aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/clang
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm-project/clang')
-rw-r--r--contrib/llvm-project/clang/include/clang-c/BuildSystem.h6
-rw-r--r--contrib/llvm-project/clang/include/clang-c/CXDiagnostic.h379
-rw-r--r--contrib/llvm-project/clang/include/clang-c/CXFile.h83
-rw-r--r--contrib/llvm-project/clang/include/clang-c/CXSourceLocation.h286
-rw-r--r--contrib/llvm-project/clang/include/clang-c/Documentation.h64
-rw-r--r--contrib/llvm-project/clang/include/clang-c/Index.h1274
-rw-r--r--contrib/llvm-project/clang/include/clang-c/module.modulemap4
-rw-r--r--contrib/llvm-project/clang/include/clang/APINotes/APINotesManager.h175
-rw-r--r--contrib/llvm-project/clang/include/clang/APINotes/APINotesOptions.h34
-rw-r--r--contrib/llvm-project/clang/include/clang/APINotes/APINotesReader.h200
-rw-r--r--contrib/llvm-project/clang/include/clang/APINotes/APINotesWriter.h110
-rw-r--r--contrib/llvm-project/clang/include/clang/APINotes/APINotesYAMLCompiler.h11
-rw-r--r--contrib/llvm-project/clang/include/clang/APINotes/Types.h226
-rw-r--r--contrib/llvm-project/clang/include/clang/ARCMigrate/ARCMT.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/ARCMigrate/ARCMTActions.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/ARCMigrate/FileRemapper.h16
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/APNumericStorage.h71
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/APValue.h16
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ASTConcept.h146
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ASTConsumer.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ASTContext.h477
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ASTDiagnostic.h7
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ASTDumper.h1
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ASTFwd.h5
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ASTImportError.h50
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ASTImporter.h52
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ASTImporterLookupTable.h5
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ASTImporterSharedState.h26
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ASTLambda.h20
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ASTNodeTraverser.h67
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ASTStructuralEquivalence.h18
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ASTTypeTraits.h101
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ASTUnresolvedSet.h7
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/AbstractBasicReader.h21
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/AbstractBasicWriter.h21
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/AbstractTypeReader.h7
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/AbstractTypeWriter.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/Attr.h57
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/AttrIterator.h1
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/BuiltinTypes.def3
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/CXXInheritance.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/CXXRecordDeclDefinitionBits.def3
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/CanonicalType.h3
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/CharUnits.h13
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/Comment.h330
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/CommentCommands.td113
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/CommentHTMLTags.td6
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/CommentLexer.h3
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/CommentParser.h5
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/CommentSema.h24
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/CommentVisitor.h5
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ComparisonCategories.h14
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ComputeDependence.h14
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/CurrentSourceLocExprScope.h13
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/Decl.h706
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/DeclBase.h372
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/DeclCXX.h462
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/DeclContextInternals.h13
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/DeclFriend.h11
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/DeclObjC.h156
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/DeclObjCCommon.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/DeclOpenMP.h34
-rwxr-xr-xcontrib/llvm-project/clang/include/clang/AST/DeclTemplate.h397
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/DeclarationName.h46
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/DependenceFlags.h24
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/DependentDiagnostic.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/Expr.h908
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ExprCXX.h568
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ExprConcepts.h228
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ExprObjC.h60
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ExprOpenMP.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ExternalASTMerger.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ExternalASTSource.h14
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/FormatString.h30
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/GlobalDecl.h9
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/IgnoreExpr.h8
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/JSONNodeDumper.h25
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/LambdaCapture.h8
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/LexicallyOrderedRecursiveASTVisitor.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/LocInfoType.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/Mangle.h61
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/MangleNumberingContext.h10
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/NSAPI.h16
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/NestedNameSpecifier.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/NonTrivialTypeVisitor.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ODRDiagsEmitter.h203
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ODRHash.h17
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/OSLog.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/OpenMPClause.h1744
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/OperationKinds.def9
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/ParentMapContext.h20
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/PrettyDeclStackTrace.h1
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/PrettyPrinter.h85
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/PropertiesBase.td158
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/QualTypeNames.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/Randstruct.h35
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/RawCommentList.h29
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/RecursiveASTVisitor.h468
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/Redeclarable.h5
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/Stmt.h403
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/StmtCXX.h39
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/StmtObjC.h41
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/StmtOpenMP.h1145
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/TemplateArgumentVisitor.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/TemplateBase.h197
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/TemplateName.h169
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/TextNodeDumper.h17
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/Type.h1137
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/TypeLoc.h316
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/TypeOrdering.h1
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/TypeProperties.td118
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/UnresolvedSet.h14
-rw-r--r--contrib/llvm-project/clang/include/clang/AST/VTableBuilder.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/ASTMatchers/ASTMatchFinder.h11
-rw-r--r--contrib/llvm-project/clang/include/clang/ASTMatchers/ASTMatchers.h965
-rw-r--r--contrib/llvm-project/clang/include/clang/ASTMatchers/ASTMatchersInternal.h209
-rw-r--r--contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/Diagnostics.h10
-rw-r--r--contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/Parser.h18
-rw-r--r--contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/Registry.h8
-rw-r--r--contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/VariantValue.h16
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/Analyses/CalledOnceCheck.h5
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/Analyses/Consumed.h11
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/Analyses/Dominators.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/Analyses/ExprMutationAnalyzer.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/Analyses/IntervalPartition.h123
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/Analyses/PostOrderCFGView.h10
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/Analyses/ReachableCode.h8
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafety.h17
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafetyCommon.h68
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafetyTIL.h37
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafetyTraverse.h5
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafetyUtil.h10
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/Analyses/UnsafeBufferUsage.h122
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/Analyses/UnsafeBufferUsageGadgets.def46
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/AnalysisDeclContext.h8
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/AnyCall.h23
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/BodyFarm.h12
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/CFG.h143
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/CFGStmtMap.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/CallGraph.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/CloneDetection.h20
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/ConstructionContext.h77
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Arena.h152
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/CFGMatchSwitch.h98
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/ControlFlowContext.h79
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowAnalysis.h332
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowAnalysisContext.h304
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowEnvironment.h739
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowLattice.h31
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowValues.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowWorklist.h35
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DebugSupport.h36
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Formula.h147
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Logger.h91
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/MapLattice.h143
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/MatchSwitch.h174
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Models/ChromiumCheckModel.h38
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.h80
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/NoopAnalysis.h41
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/NoopLattice.h41
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/RecordOps.h68
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/SimplifyConstraints.h49
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Solver.h98
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/StorageLocation.h181
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Transfer.h61
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.h159
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Value.h231
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/WatchedLiteralsSolver.h58
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/IssueHash.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/MacroExpansionContext.h12
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/PathDiagnostic.h41
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/ProgramPoint.h82
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/RetainSummaryManager.h22
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/SelectorExtras.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Analysis/Support/BumpVector.h9
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/AArch64SVEACLETypes.def12
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/AddressSpaces.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/AlignedAllocation.h12
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/Attr.td1059
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/AttrDocs.td1949
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/AttrSubjectMatchRules.h11
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/AttributeCommonInfo.h170
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/Attributes.h26
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinHeaders.def43
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/Builtins.def1335
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/Builtins.h107
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinsAArch64.def355
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinsAArch64NeonSVEBridge.def39
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinsAArch64NeonSVEBridge_cg.def39
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinsAMDGPU.def241
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinsARM.def248
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinsHexagon.def22
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinsHexagonDep.def184
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinsHexagonMapCustomDep.def196
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinsLoongArch.def28
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinsLoongArchBase.def58
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinsLoongArchLASX.def988
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinsLoongArchLSX.def959
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinsNEON.def1
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinsNVPTX.def312
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinsPPC.def1464
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinsRISCV.def114
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinsRISCVVector.def22
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinsSME.def21
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinsSVE.def2
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinsSystemZ.def46
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinsVE.def32
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinsVEVL.gen.def1257
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinsWebAssembly.def64
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinsX86.def1237
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/BuiltinsX86_64.def51
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/CLWarnings.h26
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/CharInfo.h62
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/CodeGenOptions.def173
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/CodeGenOptions.h123
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/Cuda.h40
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/CustomizableOptional.h280
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DarwinSDKInfo.h62
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DebugInfoOptions.h60
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DebugOptions.def146
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DeclNodes.td6
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/Diagnostic.h97
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/Diagnostic.td19
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticAST.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticASTKinds.td462
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticAnalysis.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticCategories.h8
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticComment.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticCommentKinds.td4
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticCommonKinds.td148
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticCrossTU.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticCrossTUKinds.td4
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticDocs.td15
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticDriver.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticDriverKinds.td447
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticError.h15
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticFrontend.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticFrontendKinds.td78
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticGroups.td306
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticIDs.h63
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticLex.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticLexKinds.td194
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticOptions.def4
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticOptions.h16
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticParse.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticParseKinds.td283
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticRefactoring.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticSema.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticSemaKinds.td1624
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticSerialization.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DiagnosticSerializationKinds.td272
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/DirectoryEntry.h137
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/ExceptionSpecificationType.h5
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/FPOptions.def12
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/Features.def28
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/FileEntry.h178
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/FileManager.h49
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/FileSystemStatCache.h3
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/HLSLRuntime.h66
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/HeaderInclude.h73
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/IdentifierTable.h440
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/JsonSupport.h24
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/LLVM.h9
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/Lambda.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/LangOptions.def130
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/LangOptions.h333
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/LangStandard.h38
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/LangStandards.def102
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/Linkage.h61
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/MSP430Target.def3
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/MakeSupport.h23
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/Module.h254
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/NoSanitizeList.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/ObjCRuntime.h52
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/OpenACCKinds.h401
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/OpenCLExtensionTypes.def8
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/OpenCLExtensions.def8
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/OpenCLOptions.h9
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/OpenMPKinds.def81
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/OpenMPKinds.h96
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/OperatorKinds.def4
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/OperatorPrecedence.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/ParsedAttrInfo.h168
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/PartialDiagnostic.h7
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/PlistSupport.h3
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/PragmaKinds.h12
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/ProfileList.h33
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/RISCVVTypes.def385
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/Sanitizers.def8
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/Sanitizers.h13
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/Sarif.h513
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/SourceLocation.h36
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/SourceManager.h280
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/SourceMgrAdapter.h85
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/Specifiers.h84
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/Stack.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/StmtNodes.td23
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/SyncScope.h142
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/TargetBuiltins.h60
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/TargetCXXABI.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/TargetID.h29
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/TargetInfo.h347
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/TargetOSMacros.def55
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/TargetOptions.h38
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/Thunk.h8
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/TokenKinds.def188
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/TokenKinds.h18
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/TransformTypeTraits.def29
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/TypeNodes.td8
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/TypeTraits.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/Version.h3
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/Visibility.h28
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/WebAssemblyReferenceTypes.def40
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/X86Target.def110
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/arm_bf16.td2
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/arm_fp16.td2
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/arm_neon.td130
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/arm_neon_incl.td1
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/arm_sme.td676
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/arm_sve.td2243
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/arm_sve_sme_incl.td295
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/riscv_sifive_vector.td211
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/riscv_vector.td3482
-rw-r--r--contrib/llvm-project/clang/include/clang/Basic/riscv_vector_common.td713
-rw-r--r--contrib/llvm-project/clang/include/clang/CodeGen/BackendUtil.h12
-rw-r--r--contrib/llvm-project/clang/include/clang/CodeGen/CGFunctionInfo.h54
-rw-r--r--contrib/llvm-project/clang/include/clang/CodeGen/CodeGenABITypes.h8
-rw-r--r--contrib/llvm-project/clang/include/clang/CodeGen/CodeGenAction.h5
-rw-r--r--contrib/llvm-project/clang/include/clang/CodeGen/ConstantInitBuilder.h7
-rw-r--r--contrib/llvm-project/clang/include/clang/CodeGen/ModuleBuilder.h12
-rw-r--r--contrib/llvm-project/clang/include/clang/CodeGen/ObjectFilePCHContainerOperations.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/CodeGen/SwiftCallingConv.h1
-rw-r--r--contrib/llvm-project/clang/include/clang/CrossTU/CrossTranslationUnit.h28
-rw-r--r--contrib/llvm-project/clang/include/clang/DirectoryWatcher/DirectoryWatcher.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/Action.h77
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/ClangOptionDocs.td6
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/Compilation.h41
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/Distro.h13
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/Driver.h262
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/Job.h49
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/Multilib.h134
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/MultilibBuilder.h134
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/OffloadBundler.h126
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/Options.h37
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/Options.td5140
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/Phases.h3
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/SanitizerArgs.h32
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/Tool.h1
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/ToolChain.h176
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/Types.def26
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/Types.h19
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/Util.h1
-rw-r--r--contrib/llvm-project/clang/include/clang/Driver/XRayArgs.h9
-rw-r--r--contrib/llvm-project/clang/include/clang/ExtractAPI/API.h1675
-rw-r--r--contrib/llvm-project/clang/include/clang/ExtractAPI/APIIgnoresList.h76
-rw-r--r--contrib/llvm-project/clang/include/clang/ExtractAPI/AvailabilityInfo.h76
-rw-r--r--contrib/llvm-project/clang/include/clang/ExtractAPI/DeclarationFragments.h454
-rw-r--r--contrib/llvm-project/clang/include/clang/ExtractAPI/ExtractAPIActionBase.h54
-rw-r--r--contrib/llvm-project/clang/include/clang/ExtractAPI/ExtractAPIVisitor.h1426
-rw-r--r--contrib/llvm-project/clang/include/clang/ExtractAPI/FrontendActions.h95
-rw-r--r--contrib/llvm-project/clang/include/clang/ExtractAPI/Serialization/SerializerBase.h314
-rw-r--r--contrib/llvm-project/clang/include/clang/ExtractAPI/Serialization/SymbolGraphSerializer.h243
-rw-r--r--contrib/llvm-project/clang/include/clang/ExtractAPI/TypedefUnderlyingTypeResolver.h48
-rwxr-xr-xcontrib/llvm-project/clang/include/clang/Format/Format.h3334
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/ASTConsumers.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/ASTUnit.h55
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/CommandLineSourceLoc.h9
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/CompilerInstance.h142
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/CompilerInvocation.h357
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/DependencyOutputOptions.h19
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/FrontendAction.h17
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/FrontendActions.h28
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/FrontendOptions.h142
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/LayoutOverrideSource.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/MigratorOptions.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/MultiplexConsumer.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/PCHContainerOperations.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/PrecompiledPreamble.h94
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/PreprocessorOutputOptions.h20
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/SARIFDiagnostic.h74
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/SARIFDiagnosticPrinter.h76
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/SerializedDiagnosticPrinter.h1
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/SerializedDiagnostics.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/TextDiagnostic.h3
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/TextDiagnosticPrinter.h1
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/Utils.h83
-rw-r--r--contrib/llvm-project/clang/include/clang/Frontend/VerifyDiagnosticConsumer.h163
-rw-r--r--contrib/llvm-project/clang/include/clang/Index/IndexSymbol.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Index/IndexingOptions.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/IndexSerialization/SerializablePathCollection.h9
-rw-r--r--contrib/llvm-project/clang/include/clang/Interpreter/CodeCompletion.h49
-rw-r--r--contrib/llvm-project/clang/include/clang/Interpreter/Interpreter.h102
-rw-r--r--contrib/llvm-project/clang/include/clang/Interpreter/Value.h208
-rw-r--r--contrib/llvm-project/clang/include/clang/Lex/DependencyDirectivesScanner.h140
-rw-r--r--contrib/llvm-project/clang/include/clang/Lex/DependencyDirectivesSourceMinimizer.h112
-rw-r--r--contrib/llvm-project/clang/include/clang/Lex/DirectoryLookup.h21
-rw-r--r--contrib/llvm-project/clang/include/clang/Lex/HeaderMap.h31
-rw-r--r--contrib/llvm-project/clang/include/clang/Lex/HeaderSearch.h386
-rw-r--r--contrib/llvm-project/clang/include/clang/Lex/HeaderSearchOptions.h73
-rw-r--r--contrib/llvm-project/clang/include/clang/Lex/Lexer.h140
-rw-r--r--contrib/llvm-project/clang/include/clang/Lex/LiteralSupport.h56
-rw-r--r--contrib/llvm-project/clang/include/clang/Lex/MacroInfo.h58
-rw-r--r--contrib/llvm-project/clang/include/clang/Lex/ModuleLoader.h5
-rw-r--r--contrib/llvm-project/clang/include/clang/Lex/ModuleMap.h187
-rw-r--r--contrib/llvm-project/clang/include/clang/Lex/MultipleIncludeOpt.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Lex/PPCallbacks.h116
-rw-r--r--contrib/llvm-project/clang/include/clang/Lex/Pragma.h7
-rw-r--r--contrib/llvm-project/clang/include/clang/Lex/PreprocessingRecord.h26
-rw-r--r--contrib/llvm-project/clang/include/clang/Lex/Preprocessor.h706
-rw-r--r--contrib/llvm-project/clang/include/clang/Lex/PreprocessorExcludedConditionalDirectiveSkipMapping.h30
-rw-r--r--contrib/llvm-project/clang/include/clang/Lex/PreprocessorLexer.h21
-rw-r--r--contrib/llvm-project/clang/include/clang/Lex/PreprocessorOptions.h33
-rw-r--r--contrib/llvm-project/clang/include/clang/Lex/Token.h23
-rw-r--r--contrib/llvm-project/clang/include/clang/Lex/VariadicMacroSupport.h5
-rw-r--r--contrib/llvm-project/clang/include/clang/Parse/LoopHint.h18
-rw-r--r--contrib/llvm-project/clang/include/clang/Parse/Parser.h831
-rw-r--r--contrib/llvm-project/clang/include/clang/Parse/RAIIObjectsForParser.h51
-rw-r--r--contrib/llvm-project/clang/include/clang/Rewrite/Core/RewriteRope.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/AnalysisBasedWarnings.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/CleanupInfo.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/CodeCompleteConsumer.h146
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/DeclSpec.h174
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/DelayedDiagnostic.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/Designator.h201
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/EnterExpressionEvaluationContext.h69
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/ExternalSemaSource.h7
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/HLSLExternalSemaSource.h55
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/IdentifierResolver.h14
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/Initialization.h47
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/Lookup.h134
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/MultiplexExternalSemaSource.h20
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/Overload.h118
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/Ownership.h276
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/ParsedAttr.h343
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/ParsedTemplate.h9
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/RISCVIntrinsicManager.h41
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/Scope.h92
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/ScopeInfo.h99
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/Sema.h2893
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/SemaConcept.h17
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/SemaLambda.h5
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/Template.h147
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/TemplateDeduction.h45
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/TemplateInstCallback.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/TypoCorrection.h12
-rw-r--r--contrib/llvm-project/clang/include/clang/Sema/Weak.h39
-rw-r--r--contrib/llvm-project/clang/include/clang/Serialization/ASTBitCodes.h80
-rw-r--r--contrib/llvm-project/clang/include/clang/Serialization/ASTReader.h298
-rw-r--r--contrib/llvm-project/clang/include/clang/Serialization/ASTRecordReader.h31
-rw-r--r--contrib/llvm-project/clang/include/clang/Serialization/ASTRecordWriter.h19
-rw-r--r--contrib/llvm-project/clang/include/clang/Serialization/ASTWriter.h193
-rw-r--r--contrib/llvm-project/clang/include/clang/Serialization/GlobalModuleIndex.h16
-rw-r--r--contrib/llvm-project/clang/include/clang/Serialization/InMemoryModuleCache.h1
-rw-r--r--contrib/llvm-project/clang/include/clang/Serialization/ModuleFile.h47
-rw-r--r--contrib/llvm-project/clang/include/clang/Serialization/ModuleFileExtension.h27
-rw-r--r--contrib/llvm-project/clang/include/clang/Serialization/ModuleManager.h18
-rw-r--r--contrib/llvm-project/clang/include/clang/Serialization/PCHContainerOperations.h19
-rw-r--r--contrib/llvm-project/clang/include/clang/Serialization/SourceLocationEncoding.h163
-rw-r--r--contrib/llvm-project/clang/include/clang/Serialization/TypeBitCodes.def6
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h8
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/CheckerBase.td9
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/Checkers.td256
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/LocalCheckers.h27
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/MPIFunctionClassifier.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/SValExplainer.h49
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/Taint.h128
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/Analyses.def8
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/AnalyzerOptions.def74
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/AnalyzerOptions.h74
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h34
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitors.h145
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugSuppression.h53
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugType.h35
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/CommonBugCategories.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/Checker.h57
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/CheckerManager.h14
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h1
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/APSIntType.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h27
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h257
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h430
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h26
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h12
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ConstraintManager.h149
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h38
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicExtent.h5
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicType.h3
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicTypeInfo.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h17
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h213
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/FunctionSummary.h7
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/LoopUnrolling.h1
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h150
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h128
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h108
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/RangedConstraintManager.h112
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Regions.def1
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SMTConstraintManager.h23
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SMTConv.h28
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h134
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValVisitor.h55
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SVals.def38
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SVals.h443
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SimpleConstraintManager.h21
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Store.h19
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SymExpr.h7
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h133
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Symbols.def2
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Frontend/AnalysisConsumer.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Frontend/FrontendActions.h3
-rw-r--r--contrib/llvm-project/clang/include/clang/StaticAnalyzer/Frontend/ModelConsumer.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/Support/RISCVVIntrinsicUtils.h570
-rw-r--r--contrib/llvm-project/clang/include/clang/Testing/CommandLineArgs.h7
-rw-r--r--contrib/llvm-project/clang/include/clang/Testing/TestAST.h103
-rw-r--r--contrib/llvm-project/clang/include/clang/Testing/TestClangConfig.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/ASTDiff/ASTDiff.h33
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/ASTDiff/ASTDiffInternal.h4
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/AllTUsExecution.h3
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/CommonOptionsParser.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/CompilationDatabase.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Core/Replacement.h17
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningFilesystem.h437
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningService.h61
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningTool.h190
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningWorker.h84
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/ModuleDepCollector.h251
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/DiagnosticsYaml.h3
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/FixIt.h2
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Inclusions/HeaderAnalysis.h46
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Inclusions/HeaderIncludes.h46
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Inclusions/IncludeStyle.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Inclusions/StandardLibrary.h158
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Refactoring/ASTSelection.h17
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Refactoring/AtomicChange.h8
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Extract/Extract.h18
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Extract/SourceExtraction.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Lookup.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RecursiveSymbolVisitor.h15
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringAction.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringActionRule.h7
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringActionRuleRequirements.h8
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringActionRules.h10
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringActionRulesInternal.h18
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringOption.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringOptionVisitor.h12
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringOptions.h15
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringResultConsumer.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringRuleContext.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/RenamingAction.h7
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/SymbolName.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/SymbolOccurrences.h8
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/USRFinder.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/USRFindingAction.h7
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/USRLocFinder.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/ReplacementsYaml.h3
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/StandaloneExecution.h3
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Syntax/BuildTree.h22
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Syntax/Mutations.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Syntax/Nodes.h7
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Syntax/TokenBufferTokenManager.h70
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Syntax/TokenManager.h47
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Syntax/Tokens.h10
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Syntax/Tree.h68
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Tooling.h31
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Transformer/MatchConsumer.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Transformer/Parsing.h6
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Transformer/RangeSelector.h8
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Transformer/RewriteRule.h159
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Transformer/SourceCode.h68
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Transformer/SourceCodeBuilders.h58
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Transformer/Stencil.h32
-rw-r--r--contrib/llvm-project/clang/include/clang/Tooling/Transformer/Transformer.h180
-rw-r--r--contrib/llvm-project/clang/include/clang/module.modulemap182
-rw-r--r--contrib/llvm-project/clang/include/module.modulemap209
-rw-r--r--contrib/llvm-project/clang/lib/APINotes/APINotesFormat.h96
-rw-r--r--contrib/llvm-project/clang/lib/APINotes/APINotesManager.cpp458
-rw-r--r--contrib/llvm-project/clang/lib/APINotes/APINotesReader.cpp2049
-rw-r--r--contrib/llvm-project/clang/lib/APINotes/APINotesWriter.cpp1384
-rw-r--r--contrib/llvm-project/clang/lib/APINotes/APINotesYAMLCompiler.cpp597
-rw-r--r--contrib/llvm-project/clang/lib/ARCMigrate/ARCMT.cpp37
-rw-r--r--contrib/llvm-project/clang/lib/ARCMigrate/ARCMTActions.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/ARCMigrate/FileRemapper.cpp110
-rw-r--r--contrib/llvm-project/clang/lib/ARCMigrate/Internals.h16
-rw-r--r--contrib/llvm-project/clang/lib/ARCMigrate/ObjCMT.cpp70
-rw-r--r--contrib/llvm-project/clang/lib/ARCMigrate/PlistReporter.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/ARCMigrate/TransAutoreleasePool.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/ARCMigrate/TransGCAttrs.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/ARCMigrate/TransProperties.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/ARCMigrate/TransRetainReleaseDealloc.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/ARCMigrate/TransUnbridgedCasts.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/ARCMigrate/TransformActions.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/ARCMigrate/Transforms.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/AST/APValue.cpp113
-rw-r--r--contrib/llvm-project/clang/lib/AST/ASTConcept.cpp108
-rw-r--r--contrib/llvm-project/clang/lib/AST/ASTContext.cpp3896
-rw-r--r--contrib/llvm-project/clang/lib/AST/ASTDiagnostic.cpp134
-rw-r--r--contrib/llvm-project/clang/lib/AST/ASTDumper.cpp80
-rw-r--r--contrib/llvm-project/clang/lib/AST/ASTImporter.cpp2269
-rw-r--r--contrib/llvm-project/clang/lib/AST/ASTImporterLookupTable.cpp35
-rw-r--r--contrib/llvm-project/clang/lib/AST/ASTStructuralEquivalence.cpp558
-rw-r--r--contrib/llvm-project/clang/lib/AST/ASTTypeTraits.cpp64
-rw-r--r--contrib/llvm-project/clang/lib/AST/AttrDocTable.cpp27
-rw-r--r--contrib/llvm-project/clang/lib/AST/AttrImpl.cpp123
-rw-r--r--contrib/llvm-project/clang/lib/AST/CXXABI.h1
-rw-r--r--contrib/llvm-project/clang/lib/AST/CXXInheritance.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/AST/Comment.cpp121
-rw-r--r--contrib/llvm-project/clang/lib/AST/CommentBriefParser.cpp17
-rw-r--r--contrib/llvm-project/clang/lib/AST/CommentCommandTraits.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/AST/CommentLexer.cpp72
-rw-r--r--contrib/llvm-project/clang/lib/AST/CommentParser.cpp76
-rw-r--r--contrib/llvm-project/clang/lib/AST/CommentSema.cpp177
-rw-r--r--contrib/llvm-project/clang/lib/AST/ComparisonCategories.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/AST/ComputeDependence.cpp239
-rw-r--r--contrib/llvm-project/clang/lib/AST/Decl.cpp1048
-rw-r--r--contrib/llvm-project/clang/lib/AST/DeclBase.cpp268
-rw-r--r--contrib/llvm-project/clang/lib/AST/DeclCXX.cpp621
-rw-r--r--contrib/llvm-project/clang/lib/AST/DeclObjC.cpp147
-rw-r--r--contrib/llvm-project/clang/lib/AST/DeclOpenMP.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/AST/DeclPrinter.cpp337
-rwxr-xr-xcontrib/llvm-project/clang/lib/AST/DeclTemplate.cpp446
-rw-r--r--contrib/llvm-project/clang/lib/AST/DeclarationName.cpp22
-rw-r--r--contrib/llvm-project/clang/lib/AST/Expr.cpp919
-rw-r--r--contrib/llvm-project/clang/lib/AST/ExprCXX.cpp286
-rw-r--r--contrib/llvm-project/clang/lib/AST/ExprClassification.cpp41
-rw-r--r--contrib/llvm-project/clang/lib/AST/ExprConcepts.cpp122
-rw-r--r--contrib/llvm-project/clang/lib/AST/ExprConstShared.h59
-rw-r--r--contrib/llvm-project/clang/lib/AST/ExprConstant.cpp2266
-rw-r--r--contrib/llvm-project/clang/lib/AST/ExprObjC.cpp15
-rw-r--r--contrib/llvm-project/clang/lib/AST/ExternalASTMerger.cpp24
-rw-r--r--contrib/llvm-project/clang/lib/AST/ExternalASTSource.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/AST/FormatString.cpp187
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Boolean.h45
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/ByteCodeEmitter.cpp238
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/ByteCodeEmitter.h23
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/ByteCodeExprGen.cpp2874
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/ByteCodeExprGen.h354
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/ByteCodeGenError.h12
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/ByteCodeStmtGen.cpp507
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/ByteCodeStmtGen.h34
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Context.cpp217
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Context.h44
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Descriptor.cpp216
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Descriptor.h156
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Disasm.cpp39
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/EvalEmitter.cpp199
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/EvalEmitter.h48
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/EvaluationResult.cpp196
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/EvaluationResult.h111
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Floating.cpp22
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Floating.h218
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Frame.h4
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Function.cpp29
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Function.h143
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/FunctionPointer.h71
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Integral.h173
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/IntegralAP.h295
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Interp.cpp352
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Interp.h1335
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/InterpBlock.cpp67
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/InterpBlock.h95
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/InterpBuiltin.cpp950
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/InterpFrame.cpp145
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/InterpFrame.h54
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/InterpStack.cpp45
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/InterpStack.h105
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/InterpState.cpp39
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/InterpState.h17
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Opcodes.td314
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Pointer.cpp246
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Pointer.h196
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/PrimType.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/PrimType.h134
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Primitives.h36
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Program.cpp196
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Program.h80
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Record.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Record.h32
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Source.cpp18
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Source.h55
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/State.cpp25
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/State.h11
-rw-r--r--contrib/llvm-project/clang/lib/AST/ItaniumCXXABI.cpp51
-rw-r--r--contrib/llvm-project/clang/lib/AST/ItaniumMangle.cpp1779
-rw-r--r--contrib/llvm-project/clang/lib/AST/JSONNodeDumper.cpp211
-rw-r--r--contrib/llvm-project/clang/lib/AST/Linkage.h8
-rw-r--r--contrib/llvm-project/clang/lib/AST/Mangle.cpp36
-rw-r--r--contrib/llvm-project/clang/lib/AST/MicrosoftCXXABI.cpp50
-rw-r--r--contrib/llvm-project/clang/lib/AST/MicrosoftMangle.cpp579
-rw-r--r--contrib/llvm-project/clang/lib/AST/NSAPI.cpp28
-rw-r--r--contrib/llvm-project/clang/lib/AST/NestedNameSpecifier.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/AST/ODRDiagsEmitter.cpp2213
-rw-r--r--contrib/llvm-project/clang/lib/AST/ODRHash.cpp323
-rw-r--r--contrib/llvm-project/clang/lib/AST/OSLog.cpp17
-rw-r--r--contrib/llvm-project/clang/lib/AST/OpenMPClause.cpp381
-rw-r--r--contrib/llvm-project/clang/lib/AST/ParentMap.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/AST/ParentMapContext.cpp47
-rw-r--r--contrib/llvm-project/clang/lib/AST/PrintfFormatString.cpp60
-rw-r--r--contrib/llvm-project/clang/lib/AST/QualTypeNames.cpp30
-rw-r--r--contrib/llvm-project/clang/lib/AST/Randstruct.cpp231
-rw-r--r--contrib/llvm-project/clang/lib/AST/RawCommentList.cpp69
-rw-r--r--contrib/llvm-project/clang/lib/AST/RecordLayoutBuilder.cpp291
-rw-r--r--contrib/llvm-project/clang/lib/AST/ScanfFormatString.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/AST/Stmt.cpp75
-rw-r--r--contrib/llvm-project/clang/lib/AST/StmtCXX.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/AST/StmtObjC.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/AST/StmtOpenMP.cpp632
-rw-r--r--contrib/llvm-project/clang/lib/AST/StmtPrinter.cpp303
-rw-r--r--contrib/llvm-project/clang/lib/AST/StmtProfile.cpp245
-rw-r--r--contrib/llvm-project/clang/lib/AST/TemplateBase.cpp208
-rw-r--r--contrib/llvm-project/clang/lib/AST/TemplateName.cpp154
-rw-r--r--contrib/llvm-project/clang/lib/AST/TextNodeDumper.cpp345
-rw-r--r--contrib/llvm-project/clang/lib/AST/Type.cpp907
-rw-r--r--contrib/llvm-project/clang/lib/AST/TypeLoc.cpp101
-rw-r--r--contrib/llvm-project/clang/lib/AST/TypePrinter.cpp626
-rw-r--r--contrib/llvm-project/clang/lib/AST/VTableBuilder.cpp86
-rw-r--r--contrib/llvm-project/clang/lib/ASTMatchers/ASTMatchFinder.cpp292
-rw-r--r--contrib/llvm-project/clang/lib/ASTMatchers/ASTMatchersInternal.cpp72
-rw-r--r--contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Diagnostics.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.cpp63
-rw-r--r--contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.h94
-rw-r--r--contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Parser.cpp43
-rw-r--r--contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Registry.cpp61
-rw-r--r--contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/VariantValue.cpp33
-rw-r--r--contrib/llvm-project/clang/lib/ASTMatchers/GtestMatchers.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/AnalysisDeclContext.cpp13
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/BodyFarm.cpp87
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/CFG.cpp1098
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/CFGStmtMap.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/CallGraph.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/CalledOnceCheck.cpp66
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/CloneDetection.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/CocoaConventions.cpp11
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/ConstructionContext.cpp11
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/Consumed.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/ExprMutationAnalyzer.cpp388
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Arena.cpp213
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/ControlFlowContext.cpp121
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DataflowAnalysisContext.cpp413
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp1112
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DebugSupport.cpp79
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Formula.cpp94
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/HTMLLogger.cpp565
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/HTMLLogger.css159
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/HTMLLogger.html118
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/HTMLLogger.js219
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Logger.cpp111
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Models/ChromiumCheckModel.cpp71
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.cpp879
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/RecordOps.cpp118
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/SimplifyConstraints.cpp180
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Transfer.cpp834
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.cpp620
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Value.cpp62
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/WatchedLiteralsSolver.cpp796
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/IntervalPartition.cpp241
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/IssueHash.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/LiveVariables.cpp30
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/MacroExpansionContext.cpp15
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/ObjCNoReturn.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/PathDiagnostic.cpp91
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/ReachableCode.cpp56
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/RetainSummaryManager.cpp127
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/ThreadSafety.cpp782
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/ThreadSafetyCommon.cpp109
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/UninitializedValues.cpp88
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/UnsafeBufferUsage.cpp2941
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Attributes.cpp61
-rw-r--r--contrib/llvm-project/clang/lib/Basic/BuiltinTargetFeatures.h95
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Builtins.cpp141
-rw-r--r--contrib/llvm-project/clang/lib/Basic/CLWarnings.cpp29
-rw-r--r--contrib/llvm-project/clang/lib/Basic/CodeGenOptions.cpp37
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Cuda.cpp165
-rw-r--r--contrib/llvm-project/clang/lib/Basic/DarwinSDKInfo.cpp59
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Diagnostic.cpp155
-rw-r--r--contrib/llvm-project/clang/lib/Basic/DiagnosticIDs.cpp95
-rw-r--r--contrib/llvm-project/clang/lib/Basic/DiagnosticOptions.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Basic/FileManager.cpp310
-rw-r--r--contrib/llvm-project/clang/lib/Basic/IdentifierTable.cpp339
-rw-r--r--contrib/llvm-project/clang/lib/Basic/LangOptions.cpp163
-rw-r--r--contrib/llvm-project/clang/lib/Basic/LangStandards.cpp66
-rw-r--r--contrib/llvm-project/clang/lib/Basic/MakeSupport.cpp35
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Module.cpp138
-rw-r--r--contrib/llvm-project/clang/lib/Basic/NoSanitizeList.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/Basic/OpenCLOptions.cpp45
-rw-r--r--contrib/llvm-project/clang/lib/Basic/OpenMPKinds.cpp259
-rw-r--r--contrib/llvm-project/clang/lib/Basic/ParsedAttrInfo.cpp32
-rw-r--r--contrib/llvm-project/clang/lib/Basic/ProfileList.cpp64
-rw-r--r--contrib/llvm-project/clang/lib/Basic/SanitizerSpecialCaseList.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Sanitizers.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Sarif.cpp425
-rw-r--r--contrib/llvm-project/clang/lib/Basic/SourceLocation.cpp17
-rw-r--r--contrib/llvm-project/clang/lib/Basic/SourceManager.cpp656
-rw-r--r--contrib/llvm-project/clang/lib/Basic/SourceMgrAdapter.cpp136
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Stack.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/Basic/TargetID.cpp58
-rw-r--r--contrib/llvm-project/clang/lib/Basic/TargetInfo.cpp166
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets.cpp524
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets.h4
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp939
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/AArch64.h157
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.cpp271
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.h118
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/ARC.h16
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/ARM.cpp234
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/ARM.h32
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/AVR.cpp714
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/AVR.h34
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/BPF.cpp43
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/BPF.h13
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/CSKY.cpp315
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/CSKY.h107
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/DirectX.cpp22
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/DirectX.h103
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/Hexagon.cpp43
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/Hexagon.h7
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/Lanai.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/Lanai.h10
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/Le64.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/Le64.h10
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/LoongArch.cpp299
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/LoongArch.h154
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/M68k.cpp72
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/M68k.h10
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/MSP430.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/MSP430.h8
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/Mips.cpp56
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/Mips.h30
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.cpp74
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.h35
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.cpp184
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.h350
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/PNaCl.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/PNaCl.h10
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/PPC.cpp210
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/PPC.h86
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/RISCV.cpp438
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/RISCV.h93
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/SPIR.cpp26
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/SPIR.h238
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/Sparc.cpp82
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/Sparc.h17
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/SystemZ.cpp27
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/SystemZ.h73
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/TCE.h18
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/VE.cpp20
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/VE.h9
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.cpp76
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.h52
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/X86.cpp451
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/X86.h183
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/XCore.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/XCore.h11
-rw-r--r--contrib/llvm-project/clang/lib/Basic/TokenKinds.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/Basic/TypeTraits.cpp14
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Version.cpp24
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Warnings.cpp16
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/ABIInfo.cpp231
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/ABIInfo.h251
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/ABIInfoImpl.cpp453
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/ABIInfoImpl.h158
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Address.h92
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/BackendConsumer.h166
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/BackendUtil.cpp1386
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGAtomic.cpp512
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGBlocks.cpp549
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGBlocks.h73
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGBuilder.h135
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGBuiltin.cpp7364
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCUDANV.cpp328
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCUDARuntime.h1
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCXX.cpp24
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.cpp72
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.h61
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp1398
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCall.h68
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGClass.cpp565
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCleanup.cpp60
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCleanup.h13
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCoroutine.cpp200
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.cpp1612
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.h134
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGDecl.cpp311
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGDeclCXX.cpp323
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGException.cpp148
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGExpr.cpp1648
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGExprAgg.cpp172
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGExprCXX.cpp252
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGExprComplex.cpp440
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGExprConstant.cpp279
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGExprScalar.cpp844
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGGPUBuiltin.cpp147
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGHLSLRuntime.cpp454
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGHLSLRuntime.h105
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGLoopInfo.cpp41
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGLoopInfo.h6
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGNonTrivialStruct.cpp69
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGObjC.cpp397
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGObjCGNU.cpp746
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGObjCMac.cpp481
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGObjCRuntime.cpp125
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGObjCRuntime.h20
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGOpenCLRuntime.cpp92
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGOpenCLRuntime.h11
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.cpp5356
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.h626
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeAMDGCN.cpp60
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeAMDGCN.h43
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp1274
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.h180
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp56
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeNVPTX.h43
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGRecordLayout.h10
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp42
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGStmt.cpp571
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGStmtOpenMP.cpp2037
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGVTT.cpp18
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGVTables.cpp245
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGVTables.h17
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGValue.h99
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenABITypes.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenAction.cpp866
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp825
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.h611
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.cpp2498
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.h340
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.cpp318
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.h28
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenTBAA.cpp70
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenTBAA.h1
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenTypeCache.h16
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.cpp292
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.h41
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/ConstantEmitter.h5
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/ConstantInitBuilder.cpp13
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CoverageMappingGen.cpp836
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CoverageMappingGen.h37
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/EHScopeStack.h13
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/ItaniumCXXABI.cpp750
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/LinkInModulesPass.cpp29
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/LinkInModulesPass.h42
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/MacroPPCallbacks.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/MacroPPCallbacks.h7
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/MicrosoftCXXABI.cpp436
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/ModuleBuilder.cpp47
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp86
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/PatternInit.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/SanitizerMetadata.cpp126
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/SanitizerMetadata.h23
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/SwiftCallingConv.cpp48
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/TargetInfo.cpp11195
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/TargetInfo.h200
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/AArch64.cpp827
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/AMDGPU.cpp654
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/ARC.cpp158
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/ARM.cpp819
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/AVR.cpp154
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/BPF.cpp100
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/CSKY.cpp175
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/Hexagon.cpp423
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/Lanai.cpp154
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/LoongArch.cpp461
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/M68k.cpp55
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/MSP430.cpp94
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/Mips.cpp441
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/NVPTX.cpp342
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/PNaCl.cpp109
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/PPC.cpp993
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/RISCV.cpp555
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/SPIR.cpp218
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/Sparc.cpp409
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/SystemZ.cpp538
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/TCE.cpp82
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/VE.cpp71
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/WebAssembly.cpp173
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/X86.cpp3436
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Targets/XCore.cpp662
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/VarBypassDetector.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/VarBypassDetector.h2
-rw-r--r--contrib/llvm-project/clang/lib/CrossTU/CrossTranslationUnit.cpp154
-rw-r--r--contrib/llvm-project/clang/lib/DirectoryWatcher/DirectoryScanner.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/DirectoryWatcher/DirectoryScanner.h5
-rw-r--r--contrib/llvm-project/clang/lib/DirectoryWatcher/linux/DirectoryWatcher-linux.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/DirectoryWatcher/mac/DirectoryWatcher-mac.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/DirectoryWatcher/windows/DirectoryWatcher-windows.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/Driver/Action.cpp74
-rw-r--r--contrib/llvm-project/clang/lib/Driver/Compilation.cpp28
-rw-r--r--contrib/llvm-project/clang/lib/Driver/Distro.cpp35
-rw-r--r--contrib/llvm-project/clang/lib/Driver/Driver.cpp3061
-rw-r--r--contrib/llvm-project/clang/lib/Driver/DriverOptions.cpp44
-rw-r--r--contrib/llvm-project/clang/lib/Driver/Job.cpp83
-rw-r--r--contrib/llvm-project/clang/lib/Driver/Multilib.cpp417
-rw-r--r--contrib/llvm-project/clang/lib/Driver/MultilibBuilder.cpp197
-rw-r--r--contrib/llvm-project/clang/lib/Driver/OffloadBundler.cpp1684
-rw-r--r--contrib/llvm-project/clang/lib/Driver/SanitizerArgs.cpp583
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChain.cpp485
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/AIX.cpp300
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/AIX.h22
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.cpp461
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.h40
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPUOpenMP.cpp267
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPUOpenMP.h49
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/AVR.cpp324
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/AVR.h30
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Ananas.cpp144
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Ananas.h65
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/AArch64.cpp289
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.cpp296
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.h25
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/CSKY.cpp169
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/CSKY.h47
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/LoongArch.cpp232
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/LoongArch.h37
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/M68k.cpp50
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/M68k.h8
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/Mips.cpp28
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/Mips.h5
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/PPC.cpp140
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/PPC.h5
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/RISCV.cpp585
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/RISCV.h4
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/Sparc.cpp157
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/Sparc.h3
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/SystemZ.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/VE.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/VE.h2
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/X86.cpp136
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/X86.h4
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/BareMetal.cpp425
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/BareMetal.h51
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/CSKYToolChain.cpp204
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/CSKYToolChain.h63
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.cpp3920
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.h52
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/CloudABI.cpp149
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/CloudABI.h70
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.cpp1728
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.h90
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Contiki.cpp27
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Contiki.h39
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/CrossWindows.cpp17
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/CrossWindows.h10
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.cpp574
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.h154
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.cpp1212
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.h80
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/DragonFly.cpp163
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/DragonFly.h14
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Flang.cpp772
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Flang.h63
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.cpp242
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.h34
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.cpp238
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.h56
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Gnu.cpp1558
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Gnu.h21
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/HIP.cpp458
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/HIPAMD.cpp434
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/HIPAMD.h (renamed from contrib/llvm-project/clang/lib/Driver/ToolChains/HIP.h)50
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/HIPSPV.cpp288
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/HIPSPV.h102
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/HIPUtility.cpp179
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/HIPUtility.h35
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/HLSL.cpp259
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/HLSL.h63
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Haiku.cpp257
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Haiku.h40
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Hexagon.cpp214
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Hexagon.h20
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Hurd.cpp24
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Lanai.h2
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/LazyDetector.h45
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.cpp261
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.h8
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/MSP430.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/MSP430.h6
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.cpp953
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.h57
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/MSVCSetupApi.h514
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/MinGW.cpp371
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/MinGW.h32
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Minix.cpp113
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Minix.h64
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/MipsLinux.cpp16
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/MipsLinux.h1
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Myriad.cpp293
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Myriad.h103
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/NaCl.cpp11
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/NaCl.h2
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/NetBSD.cpp330
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/NetBSD.h14
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/OHOS.cpp419
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/OHOS.h95
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/OpenBSD.cpp187
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/OpenBSD.h13
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/PPCFreeBSD.cpp28
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/PPCFreeBSD.h33
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/PPCLinux.cpp75
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/PPCLinux.h9
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.cpp309
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.h124
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/RISCVToolchain.cpp33
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/RISCVToolchain.h5
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/ROCm.h65
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/SPIRV.cpp93
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/SPIRV.h89
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Solaris.cpp239
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Solaris.h18
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/TCE.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/TCE.h2
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/VEToolchain.cpp55
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/VEToolchain.h3
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.cpp267
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.h17
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/XCore.cpp11
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/XCore.h7
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/ZOS.cpp308
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/ZOS.h62
-rw-r--r--contrib/llvm-project/clang/lib/Driver/Types.cpp260
-rw-r--r--contrib/llvm-project/clang/lib/Driver/XRayArgs.cpp154
-rw-r--r--contrib/llvm-project/clang/lib/Edit/Commit.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Edit/EditedSource.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/Edit/RewriteObjCFoundationAPI.cpp35
-rw-r--r--contrib/llvm-project/clang/lib/ExtractAPI/API.cpp566
-rw-r--r--contrib/llvm-project/clang/lib/ExtractAPI/APIIgnoresList.cpp62
-rw-r--r--contrib/llvm-project/clang/lib/ExtractAPI/AvailabilityInfo.cpp35
-rw-r--r--contrib/llvm-project/clang/lib/ExtractAPI/DeclarationFragments.cpp1477
-rw-r--r--contrib/llvm-project/clang/lib/ExtractAPI/ExtractAPIConsumer.cpp584
-rw-r--r--contrib/llvm-project/clang/lib/ExtractAPI/Serialization/SymbolGraphSerializer.cpp1302
-rw-r--r--contrib/llvm-project/clang/lib/ExtractAPI/TypedefUnderlyingTypeResolver.cpp76
-rw-r--r--contrib/llvm-project/clang/lib/Format/AffectedRangeManager.cpp13
-rw-r--r--contrib/llvm-project/clang/lib/Format/BreakableToken.cpp320
-rw-r--r--contrib/llvm-project/clang/lib/Format/BreakableToken.h44
-rw-r--r--contrib/llvm-project/clang/lib/Format/ContinuationIndenter.cpp1413
-rw-r--r--contrib/llvm-project/clang/lib/Format/ContinuationIndenter.h47
-rw-r--r--contrib/llvm-project/clang/lib/Format/DefinitionBlockSeparator.cpp261
-rw-r--r--contrib/llvm-project/clang/lib/Format/DefinitionBlockSeparator.h41
-rw-r--r--contrib/llvm-project/clang/lib/Format/Format.cpp2214
-rw-r--r--contrib/llvm-project/clang/lib/Format/FormatToken.cpp74
-rw-r--r--contrib/llvm-project/clang/lib/Format/FormatToken.h884
-rw-r--r--contrib/llvm-project/clang/lib/Format/FormatTokenLexer.cpp850
-rw-r--r--contrib/llvm-project/clang/lib/Format/FormatTokenLexer.h25
-rw-r--r--contrib/llvm-project/clang/lib/Format/FormatTokenSource.h267
-rw-r--r--contrib/llvm-project/clang/lib/Format/IntegerLiteralSeparatorFixer.cpp221
-rw-r--r--contrib/llvm-project/clang/lib/Format/IntegerLiteralSeparatorFixer.h39
-rw-r--r--contrib/llvm-project/clang/lib/Format/MacroCallReconstructor.cpp569
-rw-r--r--contrib/llvm-project/clang/lib/Format/MacroExpander.cpp50
-rw-r--r--contrib/llvm-project/clang/lib/Format/Macros.h305
-rw-r--r--contrib/llvm-project/clang/lib/Format/MatchFilePath.cpp122
-rw-r--r--contrib/llvm-project/clang/lib/Format/MatchFilePath.h22
-rw-r--r--contrib/llvm-project/clang/lib/Format/NamespaceEndCommentsFixer.cpp156
-rw-r--r--contrib/llvm-project/clang/lib/Format/ObjCPropertyAttributeOrderFixer.cpp220
-rw-r--r--contrib/llvm-project/clang/lib/Format/ObjCPropertyAttributeOrderFixer.h51
-rw-r--r--contrib/llvm-project/clang/lib/Format/QualifierAlignmentFixer.cpp639
-rw-r--r--contrib/llvm-project/clang/lib/Format/QualifierAlignmentFixer.h86
-rw-r--r--contrib/llvm-project/clang/lib/Format/SortJavaScriptImports.cpp116
-rw-r--r--contrib/llvm-project/clang/lib/Format/TokenAnalyzer.cpp85
-rw-r--r--contrib/llvm-project/clang/lib/Format/TokenAnalyzer.h17
-rw-r--r--contrib/llvm-project/clang/lib/Format/TokenAnnotator.cpp3414
-rw-r--r--contrib/llvm-project/clang/lib/Format/TokenAnnotator.h119
-rw-r--r--contrib/llvm-project/clang/lib/Format/UnwrappedLineFormatter.cpp733
-rw-r--r--contrib/llvm-project/clang/lib/Format/UnwrappedLineFormatter.h2
-rw-r--r--contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.cpp3168
-rw-r--r--contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.h177
-rw-r--r--contrib/llvm-project/clang/lib/Format/UsingDeclarationsSorter.cpp67
-rw-r--r--contrib/llvm-project/clang/lib/Format/WhitespaceManager.cpp818
-rw-r--r--contrib/llvm-project/clang/lib/Format/WhitespaceManager.h37
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/ASTConsumers.cpp32
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/ASTMerge.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/ASTUnit.cpp171
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/ChainedIncludesSource.cpp39
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/CompilerInstance.cpp689
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/CompilerInvocation.cpp2510
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp44
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/DependencyFile.cpp89
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/DependencyGraph.cpp39
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/DiagnosticRenderer.cpp19
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/FrontendAction.cpp306
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/FrontendActions.cpp389
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/FrontendOptions.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/HeaderIncludeGen.cpp186
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/InitPreprocessor.cpp400
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/LayoutOverrideSource.cpp107
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/LogDiagnosticPrinter.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/ModuleDependencyCollector.cpp50
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/MultiplexConsumer.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/PrecompiledPreamble.cpp401
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/PrintPreprocessedOutput.cpp626
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/Rewrite/FixItRewriter.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/Rewrite/FrontendActions.cpp15
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/Rewrite/HTMLPrint.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/Rewrite/InclusionRewriter.cpp139
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp81
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/Rewrite/RewriteObjC.cpp38
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/SARIFDiagnostic.cpp224
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/SARIFDiagnosticPrinter.cpp83
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/SerializedDiagnosticReader.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/TestModuleFileExtension.cpp16
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/TestModuleFileExtension.h2
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/TextDiagnostic.cpp701
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/TextDiagnosticBuffer.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp76
-rw-r--r--contrib/llvm-project/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp42
-rw-r--r--contrib/llvm-project/clang/lib/Headers/__clang_cuda_complex_builtins.h6
-rw-r--r--contrib/llvm-project/clang/lib/Headers/__clang_cuda_device_functions.h4
-rw-r--r--contrib/llvm-project/clang/lib/Headers/__clang_cuda_intrinsics.h235
-rw-r--r--contrib/llvm-project/clang/lib/Headers/__clang_cuda_libdevice_declares.h10
-rw-r--r--contrib/llvm-project/clang/lib/Headers/__clang_cuda_math.h8
-rw-r--r--contrib/llvm-project/clang/lib/Headers/__clang_cuda_runtime_wrapper.h73
-rw-r--r--contrib/llvm-project/clang/lib/Headers/__clang_cuda_texture_intrinsics.h742
-rw-r--r--contrib/llvm-project/clang/lib/Headers/__clang_hip_cmath.h2
-rw-r--r--contrib/llvm-project/clang/lib/Headers/__clang_hip_libdevice_declares.h67
-rw-r--r--contrib/llvm-project/clang/lib/Headers/__clang_hip_math.h505
-rw-r--r--contrib/llvm-project/clang/lib/Headers/__clang_hip_runtime_wrapper.h85
-rw-r--r--contrib/llvm-project/clang/lib/Headers/__clang_hip_stdlib.h43
-rw-r--r--contrib/llvm-project/clang/lib/Headers/__stdarg___gnuc_va_list.h13
-rw-r--r--contrib/llvm-project/clang/lib/Headers/__stdarg___va_copy.h12
-rw-r--r--contrib/llvm-project/clang/lib/Headers/__stdarg_va_arg.h22
-rw-r--r--contrib/llvm-project/clang/lib/Headers/__stdarg_va_copy.h12
-rw-r--r--contrib/llvm-project/clang/lib/Headers/__stdarg_va_list.h13
-rw-r--r--contrib/llvm-project/clang/lib/Headers/__stddef_max_align_t.h2
-rw-r--r--contrib/llvm-project/clang/lib/Headers/__stddef_null.h29
-rw-r--r--contrib/llvm-project/clang/lib/Headers/__stddef_nullptr_t.h29
-rw-r--r--contrib/llvm-project/clang/lib/Headers/__stddef_offsetof.h17
-rw-r--r--contrib/llvm-project/clang/lib/Headers/__stddef_ptrdiff_t.h20
-rw-r--r--contrib/llvm-project/clang/lib/Headers/__stddef_rsize_t.h20
-rw-r--r--contrib/llvm-project/clang/lib/Headers/__stddef_size_t.h20
-rw-r--r--contrib/llvm-project/clang/lib/Headers/__stddef_unreachable.h21
-rw-r--r--contrib/llvm-project/clang/lib/Headers/__stddef_wchar_t.h28
-rw-r--r--contrib/llvm-project/clang/lib/Headers/__stddef_wint_t.h15
-rw-r--r--contrib/llvm-project/clang/lib/Headers/__wmmintrin_aes.h2
-rw-r--r--contrib/llvm-project/clang/lib/Headers/__wmmintrin_pclmul.h20
-rw-r--r--contrib/llvm-project/clang/lib/Headers/adcintrin.h160
-rw-r--r--contrib/llvm-project/clang/lib/Headers/adxintrin.h104
-rw-r--r--contrib/llvm-project/clang/lib/Headers/altivec.h1256
-rw-r--r--contrib/llvm-project/clang/lib/Headers/ammintrin.h12
-rw-r--r--contrib/llvm-project/clang/lib/Headers/amxcomplexintrin.h169
-rw-r--r--contrib/llvm-project/clang/lib/Headers/amxfp16intrin.h58
-rw-r--r--contrib/llvm-project/clang/lib/Headers/amxintrin.h67
-rw-r--r--contrib/llvm-project/clang/lib/Headers/arm_acle.h309
-rw-r--r--contrib/llvm-project/clang/lib/Headers/arm_neon_sve_bridge.h182
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avx2intrin.h4702
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avx512bf16intrin.h44
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avx512bitalgintrin.h5
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avx512bwintrin.h232
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avx512cdintrin.h4
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avx512dqintrin.h736
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avx512erintrin.h204
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avx512fintrin.h3281
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avx512fp16intrin.h3352
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avx512ifmaintrin.h4
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avx512ifmavlintrin.h56
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avx512pfintrin.h5
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avx512vbmi2intrin.h98
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avx512vbmiintrin.h5
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avx512vbmivlintrin.h11
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avx512vlbf16intrin.h137
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avx512vlbitalgintrin.h10
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avx512vlbwintrin.h566
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avx512vlcdintrin.h11
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avx512vldqintrin.h278
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avx512vlfp16intrin.h2071
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avx512vlintrin.h1216
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avx512vlvbmi2intrin.h202
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avx512vlvnniintrin.h58
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avx512vlvp2intersectintrin.h10
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avx512vnniintrin.h5
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avx512vp2intersectintrin.h5
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avx512vpopcntdqintrin.h4
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avx512vpopcntdqvlintrin.h8
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avxifmaintrin.h177
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avxintrin.h531
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avxneconvertintrin.h484
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avxvnniint16intrin.h473
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avxvnniint8intrin.h471
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avxvnniintrin.h32
-rw-r--r--contrib/llvm-project/clang/lib/Headers/bmi2intrin.h200
-rw-r--r--contrib/llvm-project/clang/lib/Headers/bmiintrin.h277
-rw-r--r--contrib/llvm-project/clang/lib/Headers/cetintrin.h24
-rw-r--r--contrib/llvm-project/clang/lib/Headers/clflushoptintrin.h9
-rw-r--r--contrib/llvm-project/clang/lib/Headers/clzerointrin.h12
-rw-r--r--contrib/llvm-project/clang/lib/Headers/cmpccxaddintrin.h70
-rw-r--r--contrib/llvm-project/clang/lib/Headers/cpuid.h16
-rw-r--r--contrib/llvm-project/clang/lib/Headers/crc32intrin.h100
-rw-r--r--contrib/llvm-project/clang/lib/Headers/cuda_wrappers/bits/basic_string.h9
-rw-r--r--contrib/llvm-project/clang/lib/Headers/cuda_wrappers/bits/basic_string.tcc9
-rw-r--r--contrib/llvm-project/clang/lib/Headers/cuda_wrappers/bits/shared_ptr_base.h9
-rw-r--r--contrib/llvm-project/clang/lib/Headers/cuda_wrappers/cmath90
-rw-r--r--contrib/llvm-project/clang/lib/Headers/emmintrin.h1356
-rw-r--r--contrib/llvm-project/clang/lib/Headers/f16cintrin.h8
-rw-r--r--contrib/llvm-project/clang/lib/Headers/float.h28
-rw-r--r--contrib/llvm-project/clang/lib/Headers/fmaintrin.h564
-rw-r--r--contrib/llvm-project/clang/lib/Headers/gfniintrin.h136
-rw-r--r--contrib/llvm-project/clang/lib/Headers/hexagon_protos.h11
-rw-r--r--contrib/llvm-project/clang/lib/Headers/hexagon_types.h32
-rw-r--r--contrib/llvm-project/clang/lib/Headers/hlsl.h (renamed from contrib/llvm-project/clang/include/clang/Basic/DiagnosticAnalysisKinds.td)10
-rw-r--r--contrib/llvm-project/clang/lib/Headers/hlsl/hlsl_basic_types.h67
-rw-r--r--contrib/llvm-project/clang/lib/Headers/hlsl/hlsl_intrinsics.h624
-rw-r--r--contrib/llvm-project/clang/lib/Headers/hresetintrin.h4
-rw-r--r--contrib/llvm-project/clang/lib/Headers/hvx_hexagon_protos.h1609
-rw-r--r--contrib/llvm-project/clang/lib/Headers/ia32intrin.h888
-rw-r--r--contrib/llvm-project/clang/lib/Headers/immintrin.h279
-rw-r--r--contrib/llvm-project/clang/lib/Headers/intrin.h85
-rw-r--r--contrib/llvm-project/clang/lib/Headers/keylockerintrin.h54
-rw-r--r--contrib/llvm-project/clang/lib/Headers/larchintrin.h246
-rw-r--r--contrib/llvm-project/clang/lib/Headers/lasxintrin.h3884
-rw-r--r--contrib/llvm-project/clang/lib/Headers/limits.h27
-rw-r--r--contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/assert.h34
-rw-r--r--contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/ctype.h102
-rw-r--r--contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/inttypes.h34
-rw-r--r--contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/llvm-libc-decls/README.txt6
-rw-r--r--contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/stdio.h80
-rw-r--r--contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/stdlib.h45
-rw-r--r--contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/string.h96
-rw-r--r--contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/time.h34
-rw-r--r--contrib/llvm-project/clang/lib/Headers/lsxintrin.h3750
-rw-r--r--contrib/llvm-project/clang/lib/Headers/mm_malloc.h6
-rw-r--r--contrib/llvm-project/clang/lib/Headers/mmintrin.h16
-rw-r--r--contrib/llvm-project/clang/lib/Headers/module.modulemap157
-rw-r--r--contrib/llvm-project/clang/lib/Headers/mwaitxintrin.h29
-rw-r--r--contrib/llvm-project/clang/lib/Headers/nmmintrin.h4
-rw-r--r--contrib/llvm-project/clang/lib/Headers/opencl-c-base.h117
-rw-r--r--contrib/llvm-project/clang/lib/Headers/opencl-c.h13460
-rw-r--r--contrib/llvm-project/clang/lib/Headers/openmp_wrappers/__clang_openmp_device_functions.h1
-rw-r--r--contrib/llvm-project/clang/lib/Headers/openmp_wrappers/cmath2
-rw-r--r--contrib/llvm-project/clang/lib/Headers/openmp_wrappers/complex11
-rw-r--r--contrib/llvm-project/clang/lib/Headers/openmp_wrappers/complex.h9
-rw-r--r--contrib/llvm-project/clang/lib/Headers/openmp_wrappers/new2
-rw-r--r--contrib/llvm-project/clang/lib/Headers/openmp_wrappers/stdlib.h29
-rw-r--r--contrib/llvm-project/clang/lib/Headers/pmmintrin.h29
-rw-r--r--contrib/llvm-project/clang/lib/Headers/ppc_wrappers/bmi2intrin.h134
-rw-r--r--contrib/llvm-project/clang/lib/Headers/ppc_wrappers/bmiintrin.h165
-rw-r--r--contrib/llvm-project/clang/lib/Headers/ppc_wrappers/emmintrin.h2925
-rw-r--r--contrib/llvm-project/clang/lib/Headers/ppc_wrappers/immintrin.h27
-rw-r--r--contrib/llvm-project/clang/lib/Headers/ppc_wrappers/mm_malloc.h29
-rw-r--r--contrib/llvm-project/clang/lib/Headers/ppc_wrappers/mmintrin.h775
-rw-r--r--contrib/llvm-project/clang/lib/Headers/ppc_wrappers/nmmintrin.h26
-rw-r--r--contrib/llvm-project/clang/lib/Headers/ppc_wrappers/pmmintrin.h153
-rw-r--r--contrib/llvm-project/clang/lib/Headers/ppc_wrappers/smmintrin.h614
-rw-r--r--contrib/llvm-project/clang/lib/Headers/ppc_wrappers/tmmintrin.h648
-rw-r--r--contrib/llvm-project/clang/lib/Headers/ppc_wrappers/x86gprintrin.h17
-rw-r--r--contrib/llvm-project/clang/lib/Headers/ppc_wrappers/x86intrin.h28
-rw-r--r--contrib/llvm-project/clang/lib/Headers/ppc_wrappers/xmmintrin.h2070
-rw-r--r--contrib/llvm-project/clang/lib/Headers/prfchiintrin.h61
-rw-r--r--contrib/llvm-project/clang/lib/Headers/prfchwintrin.h7
-rw-r--r--contrib/llvm-project/clang/lib/Headers/raointintrin.h203
-rw-r--r--contrib/llvm-project/clang/lib/Headers/rdpruintrin.h57
-rw-r--r--contrib/llvm-project/clang/lib/Headers/rdseedintrin.h73
-rw-r--r--contrib/llvm-project/clang/lib/Headers/riscv_bitmanip.h195
-rw-r--r--contrib/llvm-project/clang/lib/Headers/riscv_crypto.h170
-rw-r--r--contrib/llvm-project/clang/lib/Headers/riscv_ntlh.h26
-rw-r--r--contrib/llvm-project/clang/lib/Headers/rtmintrin.h2
-rw-r--r--contrib/llvm-project/clang/lib/Headers/sha512intrin.h200
-rw-r--r--contrib/llvm-project/clang/lib/Headers/shaintrin.h128
-rw-r--r--contrib/llvm-project/clang/lib/Headers/sifive_vector.h (renamed from contrib/llvm-project/clang/include/clang/Analysis/AnalysisDiagnostic.h)12
-rw-r--r--contrib/llvm-project/clang/lib/Headers/sm3intrin.h238
-rw-r--r--contrib/llvm-project/clang/lib/Headers/sm4intrin.h269
-rw-r--r--contrib/llvm-project/clang/lib/Headers/smmintrin.h622
-rw-r--r--contrib/llvm-project/clang/lib/Headers/stdalign.h3
-rw-r--r--contrib/llvm-project/clang/lib/Headers/stdarg.h82
-rw-r--r--contrib/llvm-project/clang/lib/Headers/stdatomic.h22
-rw-r--r--contrib/llvm-project/clang/lib/Headers/stdbool.h17
-rw-r--r--contrib/llvm-project/clang/lib/Headers/stdckdint.h42
-rw-r--r--contrib/llvm-project/clang/lib/Headers/stddef.h158
-rw-r--r--contrib/llvm-project/clang/lib/Headers/stdint.h307
-rw-r--r--contrib/llvm-project/clang/lib/Headers/stdnoreturn.h13
-rw-r--r--contrib/llvm-project/clang/lib/Headers/tmmintrin.h25
-rw-r--r--contrib/llvm-project/clang/lib/Headers/uintrintrin.h16
-rw-r--r--contrib/llvm-project/clang/lib/Headers/unwind.h19
-rw-r--r--contrib/llvm-project/clang/lib/Headers/usermsrintrin.h51
-rw-r--r--contrib/llvm-project/clang/lib/Headers/vaesintrin.h8
-rw-r--r--contrib/llvm-project/clang/lib/Headers/vecintrin.h416
-rw-r--r--contrib/llvm-project/clang/lib/Headers/velintrin.h71
-rw-r--r--contrib/llvm-project/clang/lib/Headers/velintrin_approx.h120
-rw-r--r--contrib/llvm-project/clang/lib/Headers/velintrin_gen.h1257
-rw-r--r--contrib/llvm-project/clang/lib/Headers/vpclmulqdqintrin.h12
-rw-r--r--contrib/llvm-project/clang/lib/Headers/wasm_simd128.h339
-rw-r--r--contrib/llvm-project/clang/lib/Headers/wmmintrin.h4
-rw-r--r--contrib/llvm-project/clang/lib/Headers/x86gprintrin.h47
-rw-r--r--contrib/llvm-project/clang/lib/Headers/x86intrin.h4
-rw-r--r--contrib/llvm-project/clang/lib/Headers/xmmintrin.h43
-rw-r--r--contrib/llvm-project/clang/lib/Headers/xopintrin.h62
-rw-r--r--contrib/llvm-project/clang/lib/Headers/xsavecintrin.h50
-rw-r--r--contrib/llvm-project/clang/lib/Index/CommentToXML.cpp54
-rw-r--r--contrib/llvm-project/clang/lib/Index/FileIndexRecord.cpp19
-rw-r--r--contrib/llvm-project/clang/lib/Index/IndexBody.cpp73
-rw-r--r--contrib/llvm-project/clang/lib/Index/IndexDecl.cpp79
-rw-r--r--contrib/llvm-project/clang/lib/Index/IndexSymbol.cpp39
-rw-r--r--contrib/llvm-project/clang/lib/Index/IndexTypeSourceInfo.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/Index/IndexingContext.cpp15
-rw-r--r--contrib/llvm-project/clang/lib/Index/IndexingContext.h14
-rw-r--r--contrib/llvm-project/clang/lib/Index/USRGeneration.cpp265
-rw-r--r--contrib/llvm-project/clang/lib/IndexSerialization/SerializablePathCollection.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/Interpreter/CodeCompletion.cpp388
-rw-r--r--contrib/llvm-project/clang/lib/Interpreter/DeviceOffload.cpp176
-rw-r--r--contrib/llvm-project/clang/lib/Interpreter/DeviceOffload.h51
-rw-r--r--contrib/llvm-project/clang/lib/Interpreter/IncrementalExecutor.cpp81
-rw-r--r--contrib/llvm-project/clang/lib/Interpreter/IncrementalExecutor.h23
-rw-r--r--contrib/llvm-project/clang/lib/Interpreter/IncrementalParser.cpp231
-rw-r--r--contrib/llvm-project/clang/lib/Interpreter/IncrementalParser.h32
-rw-r--r--contrib/llvm-project/clang/lib/Interpreter/Interpreter.cpp688
-rw-r--r--contrib/llvm-project/clang/lib/Interpreter/InterpreterUtils.cpp111
-rw-r--r--contrib/llvm-project/clang/lib/Interpreter/InterpreterUtils.h54
-rw-r--r--contrib/llvm-project/clang/lib/Interpreter/Value.cpp267
-rw-r--r--contrib/llvm-project/clang/lib/Lex/DependencyDirectivesScanner.cpp994
-rw-r--r--contrib/llvm-project/clang/lib/Lex/DependencyDirectivesSourceMinimizer.cpp961
-rw-r--r--contrib/llvm-project/clang/lib/Lex/HeaderMap.cpp55
-rw-r--r--contrib/llvm-project/clang/lib/Lex/HeaderSearch.cpp951
-rw-r--r--contrib/llvm-project/clang/lib/Lex/InitHeaderSearch.cpp (renamed from contrib/llvm-project/clang/lib/Frontend/InitHeaderSearch.cpp)354
-rw-r--r--contrib/llvm-project/clang/lib/Lex/Lexer.cpp1275
-rw-r--r--contrib/llvm-project/clang/lib/Lex/LiteralSupport.cpp667
-rw-r--r--contrib/llvm-project/clang/lib/Lex/MacroArgs.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Lex/MacroInfo.cpp40
-rw-r--r--contrib/llvm-project/clang/lib/Lex/ModuleMap.cpp651
-rw-r--r--contrib/llvm-project/clang/lib/Lex/PPCaching.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Lex/PPCallbacks.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/Lex/PPDirectives.cpp1021
-rw-r--r--contrib/llvm-project/clang/lib/Lex/PPExpressions.cpp41
-rw-r--r--contrib/llvm-project/clang/lib/Lex/PPLexerChange.cpp179
-rw-r--r--contrib/llvm-project/clang/lib/Lex/PPMacroExpansion.cpp360
-rw-r--r--contrib/llvm-project/clang/lib/Lex/Pragma.cpp441
-rw-r--r--contrib/llvm-project/clang/lib/Lex/PreprocessingRecord.cpp31
-rw-r--r--contrib/llvm-project/clang/lib/Lex/Preprocessor.cpp402
-rw-r--r--contrib/llvm-project/clang/lib/Lex/PreprocessorLexer.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/Lex/TokenConcatenation.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Lex/TokenLexer.cpp133
-rw-r--r--contrib/llvm-project/clang/lib/Lex/UnicodeCharSets.h510
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseAST.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseCXXInlineMethods.cpp74
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseDecl.cpp1808
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseDeclCXX.cpp1336
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseExpr.cpp472
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseExprCXX.cpp624
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseHLSL.cpp200
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseInit.cpp49
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseObjc.cpp229
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseOpenACC.cpp1044
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseOpenMP.cpp1388
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParsePragma.cpp792
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseStmt.cpp619
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseStmtAsm.cpp14
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseTemplate.cpp242
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseTentative.cpp352
-rw-r--r--contrib/llvm-project/clang/lib/Parse/Parser.cpp579
-rw-r--r--contrib/llvm-project/clang/lib/Rewrite/HTMLRewrite.cpp11
-rw-r--r--contrib/llvm-project/clang/lib/Rewrite/Rewriter.cpp85
-rw-r--r--contrib/llvm-project/clang/lib/Sema/AnalysisBasedWarnings.cpp400
-rw-r--r--contrib/llvm-project/clang/lib/Sema/CodeCompleteConsumer.cpp139
-rw-r--r--contrib/llvm-project/clang/lib/Sema/DeclSpec.cpp86
-rw-r--r--contrib/llvm-project/clang/lib/Sema/HLSLExternalSemaSource.cpp532
-rw-r--r--contrib/llvm-project/clang/lib/Sema/IdentifierResolver.cpp30
-rw-r--r--contrib/llvm-project/clang/lib/Sema/JumpDiagnostics.cpp166
-rw-r--r--contrib/llvm-project/clang/lib/Sema/MultiplexExternalSemaSource.cpp30
-rw-r--r--contrib/llvm-project/clang/lib/Sema/OpenCLBuiltins.td650
-rw-r--r--contrib/llvm-project/clang/lib/Sema/ParsedAttr.cpp111
-rw-r--r--contrib/llvm-project/clang/lib/Sema/Scope.cpp91
-rw-r--r--contrib/llvm-project/clang/lib/Sema/ScopeInfo.cpp20
-rw-r--r--contrib/llvm-project/clang/lib/Sema/Sema.cpp606
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaAccess.cpp52
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaAttr.cpp311
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaAvailability.cpp75
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaCUDA.cpp177
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaCXXScopeSpec.cpp151
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaCast.cpp225
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaChecking.cpp5638
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaCodeComplete.cpp1074
-rwxr-xr-xcontrib/llvm-project/clang/lib/Sema/SemaConcept.cpp924
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaCoroutine.cpp958
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaDecl.cpp4681
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaDeclAttr.cpp2338
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaDeclCXX.cpp2943
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaDeclObjC.cpp308
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaExceptionSpec.cpp124
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaExpr.cpp4738
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaExprCXX.cpp1418
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaExprMember.cpp175
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaExprObjC.cpp202
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaFixItUtils.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaHLSL.cpp34
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaInit.cpp1632
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaLambda.cpp1117
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaLookup.cpp895
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaModule.cpp677
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaObjCProperty.cpp74
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaOpenMP.cpp4935
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaOverload.cpp3068
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaPseudoObject.cpp24
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaRISCVVectorLookup.cpp497
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaSYCL.cpp122
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaStmt.cpp503
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaStmtAsm.cpp109
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaStmtAttr.cpp269
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaTemplate.cpp2464
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaTemplateDeduction.cpp2740
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiate.cpp1339
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp1135
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaTemplateVariadic.cpp130
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaType.cpp2249
-rw-r--r--contrib/llvm-project/clang/lib/Sema/TreeTransform.h2086
-rw-r--r--contrib/llvm-project/clang/lib/Sema/TypeLocBuilder.cpp29
-rw-r--r--contrib/llvm-project/clang/lib/Sema/TypeLocBuilder.h15
-rw-r--r--contrib/llvm-project/clang/lib/Sema/UsedDeclVisitor.h20
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ASTCommon.cpp24
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ASTReader.cpp4476
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ASTReaderDecl.cpp1591
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ASTReaderInternals.h8
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ASTReaderStmt.cpp786
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ASTWriter.cpp1644
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ASTWriterDecl.cpp1309
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ASTWriterStmt.cpp452
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/GeneratePCH.cpp15
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/GlobalModuleIndex.cpp88
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ModuleFileExtension.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ModuleManager.cpp120
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/MultiOnDiskHashTable.h8
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/PCHContainerOperations.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp23
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp15
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp594
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp184
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BitwiseShiftChecker.cpp370
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp89
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp24
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp31
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp1068
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CXXDeleteChecker.cpp238
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp41
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CastValueChecker.cpp19
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp67
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckObjCInstMethSignature.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp154
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp21
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CloneChecker.cpp17
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ContainerModeling.cpp58
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ConversionChecker.cpp41
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp50
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DebugContainerModeling.cpp24
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DebugIteratorModeling.cpp28
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DeleteWithNonVirtualDtorChecker.cpp155
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp51
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp49
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DynamicTypeChecker.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp46
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/EnumCastOutOfRangeChecker.cpp73
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoChecker.cpp250
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoModeling.cpp325
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoModeling.h110
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoTesterChecker.cpp185
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp176
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp15
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/FuchsiaHandleChecker.cpp41
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GCDAntipatternChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GTestChecker.cpp30
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp1656
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/InnerPointerChecker.cpp38
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/InvalidatedIteratorChecker.cpp28
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Iterator.cpp22
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Iterator.h23
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IteratorModeling.cpp80
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IteratorRangeChecker.cpp24
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp48
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp54
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MIGChecker.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.h21
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp51
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp719
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp85
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp38
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MismatchedIteratorChecker.cpp55
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MmapWriteExecChecker.cpp26
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MoveChecker.cpp43
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp11
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp17
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp27
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NonnullGlobalConstantsChecker.cpp21
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp227
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp23
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp19
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCAutoreleaseWriteChecker.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp18
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCPropertyChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp44
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp19
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp21
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp19
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp36
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp14
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp84
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.cpp40
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.h1
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp61
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp68
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp23
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ReturnValueChecker.cpp17
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/STLAlgorithmModeling.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp58
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SmartPtr.h2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SmartPtrModeling.cpp73
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp147
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp2627
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StdVariantChecker.cpp298
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp846
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StringChecker.cpp105
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TaggedUnionModeling.h99
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Taint.cpp203
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Taint.h106
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp22
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp20
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TrustNonnullChecker.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TrustReturnsNonnullChecker.cpp60
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp90
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp20
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp85
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp14
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefinedNewArraySizeChecker.cpp80
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObject.h2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObjectChecker.cpp33
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedPointee.cpp51
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp56
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp33
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp70
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ValistChecker.cpp54
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/VforkChecker.cpp41
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.h6
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/NoUncountedMembersChecker.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.cpp86
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.h24
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/RefCntblBaseVirtualDtorChecker.cpp51
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedCallArgsChecker.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedLambdaCapturesChecker.cpp15
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedLocalVarsChecker.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Yaml.h15
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/cert/InvalidPtrChecker.cpp353
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/cert/PutenvWithAutoChecker.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/APSIntType.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/AnalysisManager.cpp11
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp74
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp30
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporter.cpp174
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp907
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugSuppression.cpp169
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CallDescription.cpp169
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CallEvent.cpp292
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerContext.cpp38
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerHelpers.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp33
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerRegistryData.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CommonBugCategories.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ConstraintManager.cpp80
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp94
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/DynamicExtent.cpp73
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/DynamicType.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Environment.cpp19
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExplodedGraph.cpp30
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp1011
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp194
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp486
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp293
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp43
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp688
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/LoopUnrolling.cpp13
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/LoopWidening.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/MemRegion.cpp287
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp33
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ProgramState.cpp77
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp1224
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RangedConstraintManager.cpp86
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RegionStore.cpp932
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SMTConstraintManager.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp980
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SVals.cpp183
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SarifDiagnostics.cpp394
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp21
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp474
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Store.cpp85
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp105
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/TextDiagnostics.cpp11
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/WorkList.cpp16
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp189
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/AnalyzerHelpFlags.cpp15
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/CheckerRegistry.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/ModelConsumer.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/ModelInjector.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/ModelInjector.h3
-rw-r--r--contrib/llvm-project/clang/lib/Support/RISCVVIntrinsicUtils.cpp1238
-rw-r--r--contrib/llvm-project/clang/lib/Testing/CommandLineArgs.cpp57
-rw-r--r--contrib/llvm-project/clang/lib/Testing/TestAST.cpp166
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/ASTDiff/ASTDiff.cpp21
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/AllTUsExecution.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/ArgumentsAdjusters.cpp16
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/CommonOptionsParser.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/CompilationDatabase.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Core/Replacement.cpp13
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp431
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningService.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningTool.cpp320
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp508
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp712
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/DumpTool/ASTSrcLocProcessor.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/DumpTool/ASTSrcLocProcessor.h2
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/DumpTool/ClangSrcLocDump.cpp18
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/ExpandResponseFilesCompilationDatabase.cpp27
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Inclusions/HeaderAnalysis.cpp118
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Inclusions/HeaderIncludes.cpp60
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Inclusions/Stdlib/CSymbolMap.inc945
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Inclusions/Stdlib/StandardLibrary.cpp323
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Inclusions/Stdlib/StdSpecialSymbolMap.inc739
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Inclusions/Stdlib/StdSymbolMap.inc3873
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Inclusions/Stdlib/StdTsSymbolMap.inc52
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/InterpolatingCompilationDatabase.cpp25
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/JSONCompilationDatabase.cpp26
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Refactoring.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Refactoring/ASTSelection.cpp19
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Refactoring/ASTSelectionRequirements.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Refactoring/AtomicChange.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Refactoring/Extract/Extract.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Refactoring/Extract/SourceExtraction.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Refactoring/Lookup.cpp16
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Refactoring/Rename/USRFindingAction.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Refactoring/Rename/USRLocFinder.cpp29
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Syntax/BuildTree.cpp70
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Syntax/ComputeReplacements.cpp49
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Syntax/Mutations.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Syntax/Nodes.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Syntax/Synthesis.cpp34
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Syntax/TokenBufferTokenManager.cpp25
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Syntax/Tokens.cpp223
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Syntax/Tree.cpp61
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Tooling.cpp134
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Transformer/Parsing.cpp17
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Transformer/RangeSelector.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Transformer/RewriteRule.cpp93
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Transformer/SourceCode.cpp94
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Transformer/SourceCodeBuilders.cpp113
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Transformer/Stencil.cpp162
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Transformer/Transformer.cpp58
-rw-r--r--contrib/llvm-project/clang/tools/amdgpu-arch/AMDGPUArch.cpp87
-rw-r--r--contrib/llvm-project/clang/tools/amdgpu-arch/AMDGPUArchByHIP.cpp96
-rw-r--r--contrib/llvm-project/clang/tools/amdgpu-arch/AMDGPUArchByHSA.cpp122
-rw-r--r--contrib/llvm-project/clang/tools/clang-format/ClangFormat.cpp263
-rw-r--r--contrib/llvm-project/clang/tools/clang-repl/ClangRepl.cpp190
-rw-r--r--contrib/llvm-project/clang/tools/driver/cc1_main.cpp78
-rw-r--r--contrib/llvm-project/clang/tools/driver/cc1as_main.cpp103
-rw-r--r--contrib/llvm-project/clang/tools/driver/cc1gen_reproducer_main.cpp24
-rw-r--r--contrib/llvm-project/clang/tools/driver/driver.cpp303
-rw-r--r--contrib/llvm-project/clang/tools/nvptx-arch/NVPTXArch.cpp137
-rw-r--r--contrib/llvm-project/clang/utils/TableGen/ASTTableGen.cpp7
-rw-r--r--contrib/llvm-project/clang/utils/TableGen/ASTTableGen.h1
-rw-r--r--contrib/llvm-project/clang/utils/TableGen/ClangASTNodesEmitter.cpp107
-rw-r--r--contrib/llvm-project/clang/utils/TableGen/ClangASTPropertiesEmitter.cpp39
-rw-r--r--contrib/llvm-project/clang/utils/TableGen/ClangAttrEmitter.cpp1280
-rw-r--r--contrib/llvm-project/clang/utils/TableGen/ClangCommentCommandInfoEmitter.cpp20
-rw-r--r--contrib/llvm-project/clang/utils/TableGen/ClangCommentHTMLNamedCharacterReferenceEmitter.cpp6
-rw-r--r--contrib/llvm-project/clang/utils/TableGen/ClangCommentHTMLTagsEmitter.cpp5
-rw-r--r--contrib/llvm-project/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp83
-rw-r--r--contrib/llvm-project/clang/utils/TableGen/ClangOpcodesEmitter.cpp198
-rw-r--r--contrib/llvm-project/clang/utils/TableGen/ClangOpenCLBuiltinEmitter.cpp355
-rw-r--r--contrib/llvm-project/clang/utils/TableGen/ClangOptionDocEmitter.cpp89
-rw-r--r--contrib/llvm-project/clang/utils/TableGen/ClangSACheckersEmitter.cpp45
-rw-r--r--contrib/llvm-project/clang/utils/TableGen/ClangSyntaxEmitter.cpp4
-rw-r--r--contrib/llvm-project/clang/utils/TableGen/ClangTypeNodesEmitter.cpp2
-rw-r--r--contrib/llvm-project/clang/utils/TableGen/MveEmitter.cpp49
-rw-r--r--contrib/llvm-project/clang/utils/TableGen/NeonEmitter.cpp261
-rw-r--r--contrib/llvm-project/clang/utils/TableGen/RISCVVEmitter.cpp1538
-rw-r--r--contrib/llvm-project/clang/utils/TableGen/SveEmitter.cpp710
-rw-r--r--contrib/llvm-project/clang/utils/TableGen/TableGen.cpp116
-rw-r--r--contrib/llvm-project/clang/utils/TableGen/TableGenBackends.h27
1780 files changed, 338024 insertions, 132859 deletions
diff --git a/contrib/llvm-project/clang/include/clang-c/BuildSystem.h b/contrib/llvm-project/clang/include/clang-c/BuildSystem.h
index 296e61247cef..57e16af20a70 100644
--- a/contrib/llvm-project/clang/include/clang-c/BuildSystem.h
+++ b/contrib/llvm-project/clang/include/clang-c/BuildSystem.h
@@ -95,7 +95,7 @@ CINDEX_LINKAGE void clang_free(void *buffer);
CINDEX_LINKAGE void clang_VirtualFileOverlay_dispose(CXVirtualFileOverlay);
/**
- * Object encapsulating information about a module.map file.
+ * Object encapsulating information about a module.modulemap file.
*/
typedef struct CXModuleMapDescriptorImpl *CXModuleMapDescriptor;
@@ -109,7 +109,7 @@ CINDEX_LINKAGE CXModuleMapDescriptor
clang_ModuleMapDescriptor_create(unsigned options);
/**
- * Sets the framework module name that the module.map describes.
+ * Sets the framework module name that the module.modulemap describes.
* \returns 0 for success, non-zero to indicate an error.
*/
CINDEX_LINKAGE enum CXErrorCode
@@ -117,7 +117,7 @@ clang_ModuleMapDescriptor_setFrameworkModuleName(CXModuleMapDescriptor,
const char *name);
/**
- * Sets the umbrella header name that the module.map describes.
+ * Sets the umbrella header name that the module.modulemap describes.
* \returns 0 for success, non-zero to indicate an error.
*/
CINDEX_LINKAGE enum CXErrorCode
diff --git a/contrib/llvm-project/clang/include/clang-c/CXDiagnostic.h b/contrib/llvm-project/clang/include/clang-c/CXDiagnostic.h
new file mode 100644
index 000000000000..911d001f0669
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang-c/CXDiagnostic.h
@@ -0,0 +1,379 @@
+/*===-- clang-c/CXDiagnostic.h - C Index Diagnostics --------------*- C -*-===*\
+|* *|
+|* Part of the LLVM Project, under the Apache License v2.0 with LLVM *|
+|* Exceptions. *|
+|* See https://llvm.org/LICENSE.txt for license information. *|
+|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception *|
+|* *|
+|*===----------------------------------------------------------------------===*|
+|* *|
+|* This header provides the interface to C Index diagnostics. *|
+|* *|
+\*===----------------------------------------------------------------------===*/
+
+#ifndef LLVM_CLANG_C_CXDIAGNOSTIC_H
+#define LLVM_CLANG_C_CXDIAGNOSTIC_H
+
+#include "clang-c/CXSourceLocation.h"
+#include "clang-c/CXString.h"
+#include "clang-c/ExternC.h"
+#include "clang-c/Platform.h"
+
+LLVM_CLANG_C_EXTERN_C_BEGIN
+
+/**
+ * \defgroup CINDEX_DIAG Diagnostic reporting
+ *
+ * @{
+ */
+
+/**
+ * Describes the severity of a particular diagnostic.
+ */
+enum CXDiagnosticSeverity {
+ /**
+ * A diagnostic that has been suppressed, e.g., by a command-line
+ * option.
+ */
+ CXDiagnostic_Ignored = 0,
+
+ /**
+ * This diagnostic is a note that should be attached to the
+ * previous (non-note) diagnostic.
+ */
+ CXDiagnostic_Note = 1,
+
+ /**
+ * This diagnostic indicates suspicious code that may not be
+ * wrong.
+ */
+ CXDiagnostic_Warning = 2,
+
+ /**
+ * This diagnostic indicates that the code is ill-formed.
+ */
+ CXDiagnostic_Error = 3,
+
+ /**
+ * This diagnostic indicates that the code is ill-formed such
+ * that future parser recovery is unlikely to produce useful
+ * results.
+ */
+ CXDiagnostic_Fatal = 4
+};
+
+/**
+ * A single diagnostic, containing the diagnostic's severity,
+ * location, text, source ranges, and fix-it hints.
+ */
+typedef void *CXDiagnostic;
+
+/**
+ * A group of CXDiagnostics.
+ */
+typedef void *CXDiagnosticSet;
+
+/**
+ * Determine the number of diagnostics in a CXDiagnosticSet.
+ */
+CINDEX_LINKAGE unsigned clang_getNumDiagnosticsInSet(CXDiagnosticSet Diags);
+
+/**
+ * Retrieve a diagnostic associated with the given CXDiagnosticSet.
+ *
+ * \param Diags the CXDiagnosticSet to query.
+ * \param Index the zero-based diagnostic number to retrieve.
+ *
+ * \returns the requested diagnostic. This diagnostic must be freed
+ * via a call to \c clang_disposeDiagnostic().
+ */
+CINDEX_LINKAGE CXDiagnostic clang_getDiagnosticInSet(CXDiagnosticSet Diags,
+ unsigned Index);
+
+/**
+ * Describes the kind of error that occurred (if any) in a call to
+ * \c clang_loadDiagnostics.
+ */
+enum CXLoadDiag_Error {
+ /**
+ * Indicates that no error occurred.
+ */
+ CXLoadDiag_None = 0,
+
+ /**
+ * Indicates that an unknown error occurred while attempting to
+ * deserialize diagnostics.
+ */
+ CXLoadDiag_Unknown = 1,
+
+ /**
+ * Indicates that the file containing the serialized diagnostics
+ * could not be opened.
+ */
+ CXLoadDiag_CannotLoad = 2,
+
+ /**
+ * Indicates that the serialized diagnostics file is invalid or
+ * corrupt.
+ */
+ CXLoadDiag_InvalidFile = 3
+};
+
+/**
+ * Deserialize a set of diagnostics from a Clang diagnostics bitcode
+ * file.
+ *
+ * \param file The name of the file to deserialize.
+ * \param error A pointer to a enum value recording if there was a problem
+ * deserializing the diagnostics.
+ * \param errorString A pointer to a CXString for recording the error string
+ * if the file was not successfully loaded.
+ *
+ * \returns A loaded CXDiagnosticSet if successful, and NULL otherwise. These
+ * diagnostics should be released using clang_disposeDiagnosticSet().
+ */
+CINDEX_LINKAGE CXDiagnosticSet clang_loadDiagnostics(
+ const char *file, enum CXLoadDiag_Error *error, CXString *errorString);
+
+/**
+ * Release a CXDiagnosticSet and all of its contained diagnostics.
+ */
+CINDEX_LINKAGE void clang_disposeDiagnosticSet(CXDiagnosticSet Diags);
+
+/**
+ * Retrieve the child diagnostics of a CXDiagnostic.
+ *
+ * This CXDiagnosticSet does not need to be released by
+ * clang_disposeDiagnosticSet.
+ */
+CINDEX_LINKAGE CXDiagnosticSet clang_getChildDiagnostics(CXDiagnostic D);
+
+/**
+ * Destroy a diagnostic.
+ */
+CINDEX_LINKAGE void clang_disposeDiagnostic(CXDiagnostic Diagnostic);
+
+/**
+ * Options to control the display of diagnostics.
+ *
+ * The values in this enum are meant to be combined to customize the
+ * behavior of \c clang_formatDiagnostic().
+ */
+enum CXDiagnosticDisplayOptions {
+ /**
+ * Display the source-location information where the
+ * diagnostic was located.
+ *
+ * When set, diagnostics will be prefixed by the file, line, and
+ * (optionally) column to which the diagnostic refers. For example,
+ *
+ * \code
+ * test.c:28: warning: extra tokens at end of #endif directive
+ * \endcode
+ *
+ * This option corresponds to the clang flag \c -fshow-source-location.
+ */
+ CXDiagnostic_DisplaySourceLocation = 0x01,
+
+ /**
+ * If displaying the source-location information of the
+ * diagnostic, also include the column number.
+ *
+ * This option corresponds to the clang flag \c -fshow-column.
+ */
+ CXDiagnostic_DisplayColumn = 0x02,
+
+ /**
+ * If displaying the source-location information of the
+ * diagnostic, also include information about source ranges in a
+ * machine-parsable format.
+ *
+ * This option corresponds to the clang flag
+ * \c -fdiagnostics-print-source-range-info.
+ */
+ CXDiagnostic_DisplaySourceRanges = 0x04,
+
+ /**
+ * Display the option name associated with this diagnostic, if any.
+ *
+ * The option name displayed (e.g., -Wconversion) will be placed in brackets
+ * after the diagnostic text. This option corresponds to the clang flag
+ * \c -fdiagnostics-show-option.
+ */
+ CXDiagnostic_DisplayOption = 0x08,
+
+ /**
+ * Display the category number associated with this diagnostic, if any.
+ *
+ * The category number is displayed within brackets after the diagnostic text.
+ * This option corresponds to the clang flag
+ * \c -fdiagnostics-show-category=id.
+ */
+ CXDiagnostic_DisplayCategoryId = 0x10,
+
+ /**
+ * Display the category name associated with this diagnostic, if any.
+ *
+ * The category name is displayed within brackets after the diagnostic text.
+ * This option corresponds to the clang flag
+ * \c -fdiagnostics-show-category=name.
+ */
+ CXDiagnostic_DisplayCategoryName = 0x20
+};
+
+/**
+ * Format the given diagnostic in a manner that is suitable for display.
+ *
+ * This routine will format the given diagnostic to a string, rendering
+ * the diagnostic according to the various options given. The
+ * \c clang_defaultDiagnosticDisplayOptions() function returns the set of
+ * options that most closely mimics the behavior of the clang compiler.
+ *
+ * \param Diagnostic The diagnostic to print.
+ *
+ * \param Options A set of options that control the diagnostic display,
+ * created by combining \c CXDiagnosticDisplayOptions values.
+ *
+ * \returns A new string containing for formatted diagnostic.
+ */
+CINDEX_LINKAGE CXString clang_formatDiagnostic(CXDiagnostic Diagnostic,
+ unsigned Options);
+
+/**
+ * Retrieve the set of display options most similar to the
+ * default behavior of the clang compiler.
+ *
+ * \returns A set of display options suitable for use with \c
+ * clang_formatDiagnostic().
+ */
+CINDEX_LINKAGE unsigned clang_defaultDiagnosticDisplayOptions(void);
+
+/**
+ * Determine the severity of the given diagnostic.
+ */
+CINDEX_LINKAGE enum CXDiagnosticSeverity
+ clang_getDiagnosticSeverity(CXDiagnostic);
+
+/**
+ * Retrieve the source location of the given diagnostic.
+ *
+ * This location is where Clang would print the caret ('^') when
+ * displaying the diagnostic on the command line.
+ */
+CINDEX_LINKAGE CXSourceLocation clang_getDiagnosticLocation(CXDiagnostic);
+
+/**
+ * Retrieve the text of the given diagnostic.
+ */
+CINDEX_LINKAGE CXString clang_getDiagnosticSpelling(CXDiagnostic);
+
+/**
+ * Retrieve the name of the command-line option that enabled this
+ * diagnostic.
+ *
+ * \param Diag The diagnostic to be queried.
+ *
+ * \param Disable If non-NULL, will be set to the option that disables this
+ * diagnostic (if any).
+ *
+ * \returns A string that contains the command-line option used to enable this
+ * warning, such as "-Wconversion" or "-pedantic".
+ */
+CINDEX_LINKAGE CXString clang_getDiagnosticOption(CXDiagnostic Diag,
+ CXString *Disable);
+
+/**
+ * Retrieve the category number for this diagnostic.
+ *
+ * Diagnostics can be categorized into groups along with other, related
+ * diagnostics (e.g., diagnostics under the same warning flag). This routine
+ * retrieves the category number for the given diagnostic.
+ *
+ * \returns The number of the category that contains this diagnostic, or zero
+ * if this diagnostic is uncategorized.
+ */
+CINDEX_LINKAGE unsigned clang_getDiagnosticCategory(CXDiagnostic);
+
+/**
+ * Retrieve the name of a particular diagnostic category. This
+ * is now deprecated. Use clang_getDiagnosticCategoryText()
+ * instead.
+ *
+ * \param Category A diagnostic category number, as returned by
+ * \c clang_getDiagnosticCategory().
+ *
+ * \returns The name of the given diagnostic category.
+ */
+CINDEX_DEPRECATED CINDEX_LINKAGE CXString
+clang_getDiagnosticCategoryName(unsigned Category);
+
+/**
+ * Retrieve the diagnostic category text for a given diagnostic.
+ *
+ * \returns The text of the given diagnostic category.
+ */
+CINDEX_LINKAGE CXString clang_getDiagnosticCategoryText(CXDiagnostic);
+
+/**
+ * Determine the number of source ranges associated with the given
+ * diagnostic.
+ */
+CINDEX_LINKAGE unsigned clang_getDiagnosticNumRanges(CXDiagnostic);
+
+/**
+ * Retrieve a source range associated with the diagnostic.
+ *
+ * A diagnostic's source ranges highlight important elements in the source
+ * code. On the command line, Clang displays source ranges by
+ * underlining them with '~' characters.
+ *
+ * \param Diagnostic the diagnostic whose range is being extracted.
+ *
+ * \param Range the zero-based index specifying which range to
+ *
+ * \returns the requested source range.
+ */
+CINDEX_LINKAGE CXSourceRange clang_getDiagnosticRange(CXDiagnostic Diagnostic,
+ unsigned Range);
+
+/**
+ * Determine the number of fix-it hints associated with the
+ * given diagnostic.
+ */
+CINDEX_LINKAGE unsigned clang_getDiagnosticNumFixIts(CXDiagnostic Diagnostic);
+
+/**
+ * Retrieve the replacement information for a given fix-it.
+ *
+ * Fix-its are described in terms of a source range whose contents
+ * should be replaced by a string. This approach generalizes over
+ * three kinds of operations: removal of source code (the range covers
+ * the code to be removed and the replacement string is empty),
+ * replacement of source code (the range covers the code to be
+ * replaced and the replacement string provides the new code), and
+ * insertion (both the start and end of the range point at the
+ * insertion location, and the replacement string provides the text to
+ * insert).
+ *
+ * \param Diagnostic The diagnostic whose fix-its are being queried.
+ *
+ * \param FixIt The zero-based index of the fix-it.
+ *
+ * \param ReplacementRange The source range whose contents will be
+ * replaced with the returned replacement string. Note that source
+ * ranges are half-open ranges [a, b), so the source code should be
+ * replaced from a and up to (but not including) b.
+ *
+ * \returns A string containing text that should be replace the source
+ * code indicated by the \c ReplacementRange.
+ */
+CINDEX_LINKAGE CXString clang_getDiagnosticFixIt(
+ CXDiagnostic Diagnostic, unsigned FixIt, CXSourceRange *ReplacementRange);
+
+/**
+ * @}
+ */
+
+LLVM_CLANG_C_EXTERN_C_END
+
+#endif
diff --git a/contrib/llvm-project/clang/include/clang-c/CXFile.h b/contrib/llvm-project/clang/include/clang-c/CXFile.h
new file mode 100644
index 000000000000..c48f58c94043
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang-c/CXFile.h
@@ -0,0 +1,83 @@
+/*===-- clang-c/CXFile.h - C Index File ---------------------------*- C -*-===*\
+|* *|
+|* Part of the LLVM Project, under the Apache License v2.0 with LLVM *|
+|* Exceptions. *|
+|* See https://llvm.org/LICENSE.txt for license information. *|
+|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception *|
+|* *|
+|*===----------------------------------------------------------------------===*|
+|* *|
+|* This header provides the interface to C Index files. *|
+|* *|
+\*===----------------------------------------------------------------------===*/
+
+#ifndef LLVM_CLANG_C_CXFILE_H
+#define LLVM_CLANG_C_CXFILE_H
+
+#include <time.h>
+
+#include "clang-c/CXString.h"
+#include "clang-c/ExternC.h"
+#include "clang-c/Platform.h"
+
+LLVM_CLANG_C_EXTERN_C_BEGIN
+
+/**
+ * \defgroup CINDEX_FILES File manipulation routines
+ *
+ * @{
+ */
+
+/**
+ * A particular source file that is part of a translation unit.
+ */
+typedef void *CXFile;
+
+/**
+ * Retrieve the complete file and path name of the given file.
+ */
+CINDEX_LINKAGE CXString clang_getFileName(CXFile SFile);
+
+/**
+ * Retrieve the last modification time of the given file.
+ */
+CINDEX_LINKAGE time_t clang_getFileTime(CXFile SFile);
+
+/**
+ * Uniquely identifies a CXFile, that refers to the same underlying file,
+ * across an indexing session.
+ */
+typedef struct {
+ unsigned long long data[3];
+} CXFileUniqueID;
+
+/**
+ * Retrieve the unique ID for the given \c file.
+ *
+ * \param file the file to get the ID for.
+ * \param outID stores the returned CXFileUniqueID.
+ * \returns If there was a failure getting the unique ID, returns non-zero,
+ * otherwise returns 0.
+ */
+CINDEX_LINKAGE int clang_getFileUniqueID(CXFile file, CXFileUniqueID *outID);
+
+/**
+ * Returns non-zero if the \c file1 and \c file2 point to the same file,
+ * or they are both NULL.
+ */
+CINDEX_LINKAGE int clang_File_isEqual(CXFile file1, CXFile file2);
+
+/**
+ * Returns the real path name of \c file.
+ *
+ * An empty string may be returned. Use \c clang_getFileName() in that case.
+ */
+CINDEX_LINKAGE CXString clang_File_tryGetRealPathName(CXFile file);
+
+/**
+ * @}
+ */
+
+LLVM_CLANG_C_EXTERN_C_END
+
+#endif
diff --git a/contrib/llvm-project/clang/include/clang-c/CXSourceLocation.h b/contrib/llvm-project/clang/include/clang-c/CXSourceLocation.h
new file mode 100644
index 000000000000..dcb13ba27317
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang-c/CXSourceLocation.h
@@ -0,0 +1,286 @@
+/*===-- clang-c/CXSourceLocation.h - C Index Source Location ------*- C -*-===*\
+|* *|
+|* Part of the LLVM Project, under the Apache License v2.0 with LLVM *|
+|* Exceptions. *|
+|* See https://llvm.org/LICENSE.txt for license information. *|
+|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception *|
+|* *|
+|*===----------------------------------------------------------------------===*|
+|* *|
+|* This header provides the interface to C Index source locations. *|
+|* *|
+\*===----------------------------------------------------------------------===*/
+
+#ifndef LLVM_CLANG_C_CXSOURCE_LOCATION_H
+#define LLVM_CLANG_C_CXSOURCE_LOCATION_H
+
+#include "clang-c/CXFile.h"
+#include "clang-c/CXString.h"
+#include "clang-c/ExternC.h"
+#include "clang-c/Platform.h"
+
+LLVM_CLANG_C_EXTERN_C_BEGIN
+
+/**
+ * \defgroup CINDEX_LOCATIONS Physical source locations
+ *
+ * Clang represents physical source locations in its abstract syntax tree in
+ * great detail, with file, line, and column information for the majority of
+ * the tokens parsed in the source code. These data types and functions are
+ * used to represent source location information, either for a particular
+ * point in the program or for a range of points in the program, and extract
+ * specific location information from those data types.
+ *
+ * @{
+ */
+
+/**
+ * Identifies a specific source location within a translation
+ * unit.
+ *
+ * Use clang_getExpansionLocation() or clang_getSpellingLocation()
+ * to map a source location to a particular file, line, and column.
+ */
+typedef struct {
+ const void *ptr_data[2];
+ unsigned int_data;
+} CXSourceLocation;
+
+/**
+ * Identifies a half-open character range in the source code.
+ *
+ * Use clang_getRangeStart() and clang_getRangeEnd() to retrieve the
+ * starting and end locations from a source range, respectively.
+ */
+typedef struct {
+ const void *ptr_data[2];
+ unsigned begin_int_data;
+ unsigned end_int_data;
+} CXSourceRange;
+
+/**
+ * Retrieve a NULL (invalid) source location.
+ */
+CINDEX_LINKAGE CXSourceLocation clang_getNullLocation(void);
+
+/**
+ * Determine whether two source locations, which must refer into
+ * the same translation unit, refer to exactly the same point in the source
+ * code.
+ *
+ * \returns non-zero if the source locations refer to the same location, zero
+ * if they refer to different locations.
+ */
+CINDEX_LINKAGE unsigned clang_equalLocations(CXSourceLocation loc1,
+ CXSourceLocation loc2);
+
+/**
+ * Returns non-zero if the given source location is in a system header.
+ */
+CINDEX_LINKAGE int clang_Location_isInSystemHeader(CXSourceLocation location);
+
+/**
+ * Returns non-zero if the given source location is in the main file of
+ * the corresponding translation unit.
+ */
+CINDEX_LINKAGE int clang_Location_isFromMainFile(CXSourceLocation location);
+
+/**
+ * Retrieve a NULL (invalid) source range.
+ */
+CINDEX_LINKAGE CXSourceRange clang_getNullRange(void);
+
+/**
+ * Retrieve a source range given the beginning and ending source
+ * locations.
+ */
+CINDEX_LINKAGE CXSourceRange clang_getRange(CXSourceLocation begin,
+ CXSourceLocation end);
+
+/**
+ * Determine whether two ranges are equivalent.
+ *
+ * \returns non-zero if the ranges are the same, zero if they differ.
+ */
+CINDEX_LINKAGE unsigned clang_equalRanges(CXSourceRange range1,
+ CXSourceRange range2);
+
+/**
+ * Returns non-zero if \p range is null.
+ */
+CINDEX_LINKAGE int clang_Range_isNull(CXSourceRange range);
+
+/**
+ * Retrieve the file, line, column, and offset represented by
+ * the given source location.
+ *
+ * If the location refers into a macro expansion, retrieves the
+ * location of the macro expansion.
+ *
+ * \param location the location within a source file that will be decomposed
+ * into its parts.
+ *
+ * \param file [out] if non-NULL, will be set to the file to which the given
+ * source location points.
+ *
+ * \param line [out] if non-NULL, will be set to the line to which the given
+ * source location points.
+ *
+ * \param column [out] if non-NULL, will be set to the column to which the given
+ * source location points.
+ *
+ * \param offset [out] if non-NULL, will be set to the offset into the
+ * buffer to which the given source location points.
+ */
+CINDEX_LINKAGE void clang_getExpansionLocation(CXSourceLocation location,
+ CXFile *file, unsigned *line,
+ unsigned *column,
+ unsigned *offset);
+
+/**
+ * Retrieve the file, line and column represented by the given source
+ * location, as specified in a # line directive.
+ *
+ * Example: given the following source code in a file somefile.c
+ *
+ * \code
+ * #123 "dummy.c" 1
+ *
+ * static int func(void)
+ * {
+ * return 0;
+ * }
+ * \endcode
+ *
+ * the location information returned by this function would be
+ *
+ * File: dummy.c Line: 124 Column: 12
+ *
+ * whereas clang_getExpansionLocation would have returned
+ *
+ * File: somefile.c Line: 3 Column: 12
+ *
+ * \param location the location within a source file that will be decomposed
+ * into its parts.
+ *
+ * \param filename [out] if non-NULL, will be set to the filename of the
+ * source location. Note that filenames returned will be for "virtual" files,
+ * which don't necessarily exist on the machine running clang - e.g. when
+ * parsing preprocessed output obtained from a different environment. If
+ * a non-NULL value is passed in, remember to dispose of the returned value
+ * using \c clang_disposeString() once you've finished with it. For an invalid
+ * source location, an empty string is returned.
+ *
+ * \param line [out] if non-NULL, will be set to the line number of the
+ * source location. For an invalid source location, zero is returned.
+ *
+ * \param column [out] if non-NULL, will be set to the column number of the
+ * source location. For an invalid source location, zero is returned.
+ */
+CINDEX_LINKAGE void clang_getPresumedLocation(CXSourceLocation location,
+ CXString *filename,
+ unsigned *line, unsigned *column);
+
+/**
+ * Legacy API to retrieve the file, line, column, and offset represented
+ * by the given source location.
+ *
+ * This interface has been replaced by the newer interface
+ * #clang_getExpansionLocation(). See that interface's documentation for
+ * details.
+ */
+CINDEX_LINKAGE void clang_getInstantiationLocation(CXSourceLocation location,
+ CXFile *file, unsigned *line,
+ unsigned *column,
+ unsigned *offset);
+
+/**
+ * Retrieve the file, line, column, and offset represented by
+ * the given source location.
+ *
+ * If the location refers into a macro instantiation, return where the
+ * location was originally spelled in the source file.
+ *
+ * \param location the location within a source file that will be decomposed
+ * into its parts.
+ *
+ * \param file [out] if non-NULL, will be set to the file to which the given
+ * source location points.
+ *
+ * \param line [out] if non-NULL, will be set to the line to which the given
+ * source location points.
+ *
+ * \param column [out] if non-NULL, will be set to the column to which the given
+ * source location points.
+ *
+ * \param offset [out] if non-NULL, will be set to the offset into the
+ * buffer to which the given source location points.
+ */
+CINDEX_LINKAGE void clang_getSpellingLocation(CXSourceLocation location,
+ CXFile *file, unsigned *line,
+ unsigned *column,
+ unsigned *offset);
+
+/**
+ * Retrieve the file, line, column, and offset represented by
+ * the given source location.
+ *
+ * If the location refers into a macro expansion, return where the macro was
+ * expanded or where the macro argument was written, if the location points at
+ * a macro argument.
+ *
+ * \param location the location within a source file that will be decomposed
+ * into its parts.
+ *
+ * \param file [out] if non-NULL, will be set to the file to which the given
+ * source location points.
+ *
+ * \param line [out] if non-NULL, will be set to the line to which the given
+ * source location points.
+ *
+ * \param column [out] if non-NULL, will be set to the column to which the given
+ * source location points.
+ *
+ * \param offset [out] if non-NULL, will be set to the offset into the
+ * buffer to which the given source location points.
+ */
+CINDEX_LINKAGE void clang_getFileLocation(CXSourceLocation location,
+ CXFile *file, unsigned *line,
+ unsigned *column, unsigned *offset);
+
+/**
+ * Retrieve a source location representing the first character within a
+ * source range.
+ */
+CINDEX_LINKAGE CXSourceLocation clang_getRangeStart(CXSourceRange range);
+
+/**
+ * Retrieve a source location representing the last character within a
+ * source range.
+ */
+CINDEX_LINKAGE CXSourceLocation clang_getRangeEnd(CXSourceRange range);
+
+/**
+ * Identifies an array of ranges.
+ */
+typedef struct {
+ /** The number of ranges in the \c ranges array. */
+ unsigned count;
+ /**
+ * An array of \c CXSourceRanges.
+ */
+ CXSourceRange *ranges;
+} CXSourceRangeList;
+
+/**
+ * Destroy the given \c CXSourceRangeList.
+ */
+CINDEX_LINKAGE void clang_disposeSourceRangeList(CXSourceRangeList *ranges);
+
+/**
+ * @}
+ */
+
+LLVM_CLANG_C_EXTERN_C_END
+
+#endif
diff --git a/contrib/llvm-project/clang/include/clang-c/Documentation.h b/contrib/llvm-project/clang/include/clang-c/Documentation.h
index 5bece2cb6758..e04c50a0e68b 100644
--- a/contrib/llvm-project/clang/include/clang-c/Documentation.h
+++ b/contrib/llvm-project/clang/include/clang-c/Documentation.h
@@ -15,6 +15,7 @@
#ifndef LLVM_CLANG_C_DOCUMENTATION_H
#define LLVM_CLANG_C_DOCUMENTATION_H
+#include "clang-c/CXErrorCode.h"
#include "clang-c/ExternC.h"
#include "clang-c/Index.h"
@@ -546,6 +547,69 @@ CINDEX_LINKAGE CXString clang_FullComment_getAsHTML(CXComment Comment);
CINDEX_LINKAGE CXString clang_FullComment_getAsXML(CXComment Comment);
/**
+ * CXAPISet is an opaque type that represents a data structure containing all
+ * the API information for a given translation unit. This can be used for a
+ * single symbol symbol graph for a given symbol.
+ */
+typedef struct CXAPISetImpl *CXAPISet;
+
+/**
+ * Traverses the translation unit to create a \c CXAPISet.
+ *
+ * \param tu is the \c CXTranslationUnit to build the \c CXAPISet for.
+ *
+ * \param out_api is a pointer to the output of this function. It is needs to be
+ * disposed of by calling clang_disposeAPISet.
+ *
+ * \returns Error code indicating success or failure of the APISet creation.
+ */
+CINDEX_LINKAGE enum CXErrorCode clang_createAPISet(CXTranslationUnit tu,
+ CXAPISet *out_api);
+
+/**
+ * Dispose of an APISet.
+ *
+ * The provided \c CXAPISet can not be used after this function is called.
+ */
+CINDEX_LINKAGE void clang_disposeAPISet(CXAPISet api);
+
+/**
+ * Generate a single symbol symbol graph for the given USR. Returns a null
+ * string if the associated symbol can not be found in the provided \c CXAPISet.
+ *
+ * The output contains the symbol graph as well as some additional information
+ * about related symbols.
+ *
+ * \param usr is a string containing the USR of the symbol to generate the
+ * symbol graph for.
+ *
+ * \param api the \c CXAPISet to look for the symbol in.
+ *
+ * \returns a string containing the serialized symbol graph representation for
+ * the symbol being queried or a null string if it can not be found in the
+ * APISet.
+ */
+CINDEX_LINKAGE CXString clang_getSymbolGraphForUSR(const char *usr,
+ CXAPISet api);
+
+/**
+ * Generate a single symbol symbol graph for the declaration at the given
+ * cursor. Returns a null string if the AST node for the cursor isn't a
+ * declaration.
+ *
+ * The output contains the symbol graph as well as some additional information
+ * about related symbols.
+ *
+ * \param cursor the declaration for which to generate the single symbol symbol
+ * graph.
+ *
+ * \returns a string containing the serialized symbol graph representation for
+ * the symbol being queried or a null string if it can not be found in the
+ * APISet.
+ */
+CINDEX_LINKAGE CXString clang_getSymbolGraphForCursor(CXCursor cursor);
+
+/**
* @}
*/
diff --git a/contrib/llvm-project/clang/include/clang-c/Index.h b/contrib/llvm-project/clang/include/clang-c/Index.h
index 26844d1c74f3..64ab3378957c 100644
--- a/contrib/llvm-project/clang/include/clang-c/Index.h
+++ b/contrib/llvm-project/clang/include/clang-c/Index.h
@@ -16,10 +16,11 @@
#ifndef LLVM_CLANG_C_INDEX_H
#define LLVM_CLANG_C_INDEX_H
-#include <time.h>
-
#include "clang-c/BuildSystem.h"
+#include "clang-c/CXDiagnostic.h"
#include "clang-c/CXErrorCode.h"
+#include "clang-c/CXFile.h"
+#include "clang-c/CXSourceLocation.h"
#include "clang-c/CXString.h"
#include "clang-c/ExternC.h"
#include "clang-c/Platform.h"
@@ -33,7 +34,7 @@
* compatible, thus CINDEX_VERSION_MAJOR is expected to remain stable.
*/
#define CINDEX_VERSION_MAJOR 0
-#define CINDEX_VERSION_MINOR 62
+#define CINDEX_VERSION_MINOR 64
#define CINDEX_VERSION_ENCODE(major, minor) (((major)*10000) + ((minor)*1))
@@ -47,6 +48,10 @@
#define CINDEX_VERSION_STRING \
CINDEX_VERSION_STRINGIZE(CINDEX_VERSION_MAJOR, CINDEX_VERSION_MINOR)
+#ifndef __has_feature
+#define __has_feature(feature) 0
+#endif
+
LLVM_CLANG_C_EXTERN_C_BEGIN
/** \defgroup CINDEX libclang: C Interface to Clang
@@ -276,6 +281,22 @@ CINDEX_LINKAGE void clang_disposeIndex(CXIndex index);
typedef enum {
/**
+ * Use the default value of an option that may depend on the process
+ * environment.
+ */
+ CXChoice_Default = 0,
+ /**
+ * Enable the option.
+ */
+ CXChoice_Enabled = 1,
+ /**
+ * Disable the option.
+ */
+ CXChoice_Disabled = 2
+} CXChoice;
+
+typedef enum {
+ /**
* Used to indicate that no special CXIndex options are needed.
*/
CXGlobalOpt_None = 0x0,
@@ -309,8 +330,130 @@ typedef enum {
} CXGlobalOptFlags;
/**
+ * Index initialization options.
+ *
+ * 0 is the default value of each member of this struct except for Size.
+ * Initialize the struct in one of the following three ways to avoid adapting
+ * code each time a new member is added to it:
+ * \code
+ * CXIndexOptions Opts;
+ * memset(&Opts, 0, sizeof(Opts));
+ * Opts.Size = sizeof(CXIndexOptions);
+ * \endcode
+ * or explicitly initialize the first data member and zero-initialize the rest:
+ * \code
+ * CXIndexOptions Opts = { sizeof(CXIndexOptions) };
+ * \endcode
+ * or to prevent the -Wmissing-field-initializers warning for the above version:
+ * \code
+ * CXIndexOptions Opts{};
+ * Opts.Size = sizeof(CXIndexOptions);
+ * \endcode
+ */
+typedef struct CXIndexOptions {
+ /**
+ * The size of struct CXIndexOptions used for option versioning.
+ *
+ * Always initialize this member to sizeof(CXIndexOptions), or assign
+ * sizeof(CXIndexOptions) to it right after creating a CXIndexOptions object.
+ */
+ unsigned Size;
+ /**
+ * A CXChoice enumerator that specifies the indexing priority policy.
+ * \sa CXGlobalOpt_ThreadBackgroundPriorityForIndexing
+ */
+ unsigned char ThreadBackgroundPriorityForIndexing;
+ /**
+ * A CXChoice enumerator that specifies the editing priority policy.
+ * \sa CXGlobalOpt_ThreadBackgroundPriorityForEditing
+ */
+ unsigned char ThreadBackgroundPriorityForEditing;
+ /**
+ * \see clang_createIndex()
+ */
+ unsigned ExcludeDeclarationsFromPCH : 1;
+ /**
+ * \see clang_createIndex()
+ */
+ unsigned DisplayDiagnostics : 1;
+ /**
+ * Store PCH in memory. If zero, PCH are stored in temporary files.
+ */
+ unsigned StorePreamblesInMemory : 1;
+ unsigned /*Reserved*/ : 13;
+
+ /**
+ * The path to a directory, in which to store temporary PCH files. If null or
+ * empty, the default system temporary directory is used. These PCH files are
+ * deleted on clean exit but stay on disk if the program crashes or is killed.
+ *
+ * This option is ignored if \a StorePreamblesInMemory is non-zero.
+ *
+ * Libclang does not create the directory at the specified path in the file
+ * system. Therefore it must exist, or storing PCH files will fail.
+ */
+ const char *PreambleStoragePath;
+ /**
+ * Specifies a path which will contain log files for certain libclang
+ * invocations. A null value implies that libclang invocations are not logged.
+ */
+ const char *InvocationEmissionPath;
+} CXIndexOptions;
+
+/**
+ * Provides a shared context for creating translation units.
+ *
+ * Call this function instead of clang_createIndex() if you need to configure
+ * the additional options in CXIndexOptions.
+ *
+ * \returns The created index or null in case of error, such as an unsupported
+ * value of options->Size.
+ *
+ * For example:
+ * \code
+ * CXIndex createIndex(const char *ApplicationTemporaryPath) {
+ * const int ExcludeDeclarationsFromPCH = 1;
+ * const int DisplayDiagnostics = 1;
+ * CXIndex Idx;
+ * #if CINDEX_VERSION_MINOR >= 64
+ * CXIndexOptions Opts;
+ * memset(&Opts, 0, sizeof(Opts));
+ * Opts.Size = sizeof(CXIndexOptions);
+ * Opts.ThreadBackgroundPriorityForIndexing = 1;
+ * Opts.ExcludeDeclarationsFromPCH = ExcludeDeclarationsFromPCH;
+ * Opts.DisplayDiagnostics = DisplayDiagnostics;
+ * Opts.PreambleStoragePath = ApplicationTemporaryPath;
+ * Idx = clang_createIndexWithOptions(&Opts);
+ * if (Idx)
+ * return Idx;
+ * fprintf(stderr,
+ * "clang_createIndexWithOptions() failed. "
+ * "CINDEX_VERSION_MINOR = %d, sizeof(CXIndexOptions) = %u\n",
+ * CINDEX_VERSION_MINOR, Opts.Size);
+ * #else
+ * (void)ApplicationTemporaryPath;
+ * #endif
+ * Idx = clang_createIndex(ExcludeDeclarationsFromPCH, DisplayDiagnostics);
+ * clang_CXIndex_setGlobalOptions(
+ * Idx, clang_CXIndex_getGlobalOptions(Idx) |
+ * CXGlobalOpt_ThreadBackgroundPriorityForIndexing);
+ * return Idx;
+ * }
+ * \endcode
+ *
+ * \sa clang_createIndex()
+ */
+CINDEX_LINKAGE CXIndex
+clang_createIndexWithOptions(const CXIndexOptions *options);
+
+/**
* Sets general options associated with a CXIndex.
*
+ * This function is DEPRECATED. Set
+ * CXIndexOptions::ThreadBackgroundPriorityForIndexing and/or
+ * CXIndexOptions::ThreadBackgroundPriorityForEditing and call
+ * clang_createIndexWithOptions() instead.
+ *
* For example:
* \code
* CXIndex idx = ...;
@@ -326,6 +469,9 @@ CINDEX_LINKAGE void clang_CXIndex_setGlobalOptions(CXIndex, unsigned options);
/**
* Gets the general options associated with a CXIndex.
*
+ * This function allows to obtain the final option values used by libclang after
+ * specifying the option policies via CXChoice enumerators.
+ *
* \returns A bitmask of options, a bitwise OR of CXGlobalOpt_XXX flags that
* are associated with the given CXIndex object.
*/
@@ -334,6 +480,9 @@ CINDEX_LINKAGE unsigned clang_CXIndex_getGlobalOptions(CXIndex);
/**
* Sets the invocation emission path option in a CXIndex.
*
+ * This function is DEPRECATED. Set CXIndexOptions::InvocationEmissionPath and
+ * call clang_createIndexWithOptions() instead.
+ *
* The invocation emission path specifies a path which will contain log
* files for certain libclang invocations. A null value (default) implies that
* libclang invocations are not logged..
@@ -342,45 +491,6 @@ CINDEX_LINKAGE void
clang_CXIndex_setInvocationEmissionPathOption(CXIndex, const char *Path);
/**
- * \defgroup CINDEX_FILES File manipulation routines
- *
- * @{
- */
-
-/**
- * A particular source file that is part of a translation unit.
- */
-typedef void *CXFile;
-
-/**
- * Retrieve the complete file and path name of the given file.
- */
-CINDEX_LINKAGE CXString clang_getFileName(CXFile SFile);
-
-/**
- * Retrieve the last modification time of the given file.
- */
-CINDEX_LINKAGE time_t clang_getFileTime(CXFile SFile);
-
-/**
- * Uniquely identifies a CXFile, that refers to the same underlying file,
- * across an indexing session.
- */
-typedef struct {
- unsigned long long data[3];
-} CXFileUniqueID;
-
-/**
- * Retrieve the unique ID for the given \c file.
- *
- * \param file the file to get the ID for.
- * \param outID stores the returned CXFileUniqueID.
- * \returns If there was a failure getting the unique ID, returns non-zero,
- * otherwise returns 0.
- */
-CINDEX_LINKAGE int clang_getFileUniqueID(CXFile file, CXFileUniqueID *outID);
-
-/**
* Determine whether the given header is guarded against
* multiple inclusions, either with the conventional
* \#ifndef/\#define/\#endif macro guards or with \#pragma once.
@@ -417,76 +527,6 @@ CINDEX_LINKAGE const char *clang_getFileContents(CXTranslationUnit tu,
CXFile file, size_t *size);
/**
- * Returns non-zero if the \c file1 and \c file2 point to the same file,
- * or they are both NULL.
- */
-CINDEX_LINKAGE int clang_File_isEqual(CXFile file1, CXFile file2);
-
-/**
- * Returns the real path name of \c file.
- *
- * An empty string may be returned. Use \c clang_getFileName() in that case.
- */
-CINDEX_LINKAGE CXString clang_File_tryGetRealPathName(CXFile file);
-
-/**
- * @}
- */
-
-/**
- * \defgroup CINDEX_LOCATIONS Physical source locations
- *
- * Clang represents physical source locations in its abstract syntax tree in
- * great detail, with file, line, and column information for the majority of
- * the tokens parsed in the source code. These data types and functions are
- * used to represent source location information, either for a particular
- * point in the program or for a range of points in the program, and extract
- * specific location information from those data types.
- *
- * @{
- */
-
-/**
- * Identifies a specific source location within a translation
- * unit.
- *
- * Use clang_getExpansionLocation() or clang_getSpellingLocation()
- * to map a source location to a particular file, line, and column.
- */
-typedef struct {
- const void *ptr_data[2];
- unsigned int_data;
-} CXSourceLocation;
-
-/**
- * Identifies a half-open character range in the source code.
- *
- * Use clang_getRangeStart() and clang_getRangeEnd() to retrieve the
- * starting and end locations from a source range, respectively.
- */
-typedef struct {
- const void *ptr_data[2];
- unsigned begin_int_data;
- unsigned end_int_data;
-} CXSourceRange;
-
-/**
- * Retrieve a NULL (invalid) source location.
- */
-CINDEX_LINKAGE CXSourceLocation clang_getNullLocation(void);
-
-/**
- * Determine whether two source locations, which must refer into
- * the same translation unit, refer to exactly the same point in the source
- * code.
- *
- * \returns non-zero if the source locations refer to the same location, zero
- * if they refer to different locations.
- */
-CINDEX_LINKAGE unsigned clang_equalLocations(CXSourceLocation loc1,
- CXSourceLocation loc2);
-
-/**
* Retrieves the source location associated with a given file/line/column
* in a particular translation unit.
*/
@@ -502,204 +542,6 @@ CINDEX_LINKAGE CXSourceLocation clang_getLocationForOffset(CXTranslationUnit tu,
unsigned offset);
/**
- * Returns non-zero if the given source location is in a system header.
- */
-CINDEX_LINKAGE int clang_Location_isInSystemHeader(CXSourceLocation location);
-
-/**
- * Returns non-zero if the given source location is in the main file of
- * the corresponding translation unit.
- */
-CINDEX_LINKAGE int clang_Location_isFromMainFile(CXSourceLocation location);
-
-/**
- * Retrieve a NULL (invalid) source range.
- */
-CINDEX_LINKAGE CXSourceRange clang_getNullRange(void);
-
-/**
- * Retrieve a source range given the beginning and ending source
- * locations.
- */
-CINDEX_LINKAGE CXSourceRange clang_getRange(CXSourceLocation begin,
- CXSourceLocation end);
-
-/**
- * Determine whether two ranges are equivalent.
- *
- * \returns non-zero if the ranges are the same, zero if they differ.
- */
-CINDEX_LINKAGE unsigned clang_equalRanges(CXSourceRange range1,
- CXSourceRange range2);
-
-/**
- * Returns non-zero if \p range is null.
- */
-CINDEX_LINKAGE int clang_Range_isNull(CXSourceRange range);
-
-/**
- * Retrieve the file, line, column, and offset represented by
- * the given source location.
- *
- * If the location refers into a macro expansion, retrieves the
- * location of the macro expansion.
- *
- * \param location the location within a source file that will be decomposed
- * into its parts.
- *
- * \param file [out] if non-NULL, will be set to the file to which the given
- * source location points.
- *
- * \param line [out] if non-NULL, will be set to the line to which the given
- * source location points.
- *
- * \param column [out] if non-NULL, will be set to the column to which the given
- * source location points.
- *
- * \param offset [out] if non-NULL, will be set to the offset into the
- * buffer to which the given source location points.
- */
-CINDEX_LINKAGE void clang_getExpansionLocation(CXSourceLocation location,
- CXFile *file, unsigned *line,
- unsigned *column,
- unsigned *offset);
-
-/**
- * Retrieve the file, line and column represented by the given source
- * location, as specified in a # line directive.
- *
- * Example: given the following source code in a file somefile.c
- *
- * \code
- * #123 "dummy.c" 1
- *
- * static int func(void)
- * {
- * return 0;
- * }
- * \endcode
- *
- * the location information returned by this function would be
- *
- * File: dummy.c Line: 124 Column: 12
- *
- * whereas clang_getExpansionLocation would have returned
- *
- * File: somefile.c Line: 3 Column: 12
- *
- * \param location the location within a source file that will be decomposed
- * into its parts.
- *
- * \param filename [out] if non-NULL, will be set to the filename of the
- * source location. Note that filenames returned will be for "virtual" files,
- * which don't necessarily exist on the machine running clang - e.g. when
- * parsing preprocessed output obtained from a different environment. If
- * a non-NULL value is passed in, remember to dispose of the returned value
- * using \c clang_disposeString() once you've finished with it. For an invalid
- * source location, an empty string is returned.
- *
- * \param line [out] if non-NULL, will be set to the line number of the
- * source location. For an invalid source location, zero is returned.
- *
- * \param column [out] if non-NULL, will be set to the column number of the
- * source location. For an invalid source location, zero is returned.
- */
-CINDEX_LINKAGE void clang_getPresumedLocation(CXSourceLocation location,
- CXString *filename,
- unsigned *line, unsigned *column);
-
-/**
- * Legacy API to retrieve the file, line, column, and offset represented
- * by the given source location.
- *
- * This interface has been replaced by the newer interface
- * #clang_getExpansionLocation(). See that interface's documentation for
- * details.
- */
-CINDEX_LINKAGE void clang_getInstantiationLocation(CXSourceLocation location,
- CXFile *file, unsigned *line,
- unsigned *column,
- unsigned *offset);
-
-/**
- * Retrieve the file, line, column, and offset represented by
- * the given source location.
- *
- * If the location refers into a macro instantiation, return where the
- * location was originally spelled in the source file.
- *
- * \param location the location within a source file that will be decomposed
- * into its parts.
- *
- * \param file [out] if non-NULL, will be set to the file to which the given
- * source location points.
- *
- * \param line [out] if non-NULL, will be set to the line to which the given
- * source location points.
- *
- * \param column [out] if non-NULL, will be set to the column to which the given
- * source location points.
- *
- * \param offset [out] if non-NULL, will be set to the offset into the
- * buffer to which the given source location points.
- */
-CINDEX_LINKAGE void clang_getSpellingLocation(CXSourceLocation location,
- CXFile *file, unsigned *line,
- unsigned *column,
- unsigned *offset);
-
-/**
- * Retrieve the file, line, column, and offset represented by
- * the given source location.
- *
- * If the location refers into a macro expansion, return where the macro was
- * expanded or where the macro argument was written, if the location points at
- * a macro argument.
- *
- * \param location the location within a source file that will be decomposed
- * into its parts.
- *
- * \param file [out] if non-NULL, will be set to the file to which the given
- * source location points.
- *
- * \param line [out] if non-NULL, will be set to the line to which the given
- * source location points.
- *
- * \param column [out] if non-NULL, will be set to the column to which the given
- * source location points.
- *
- * \param offset [out] if non-NULL, will be set to the offset into the
- * buffer to which the given source location points.
- */
-CINDEX_LINKAGE void clang_getFileLocation(CXSourceLocation location,
- CXFile *file, unsigned *line,
- unsigned *column, unsigned *offset);
-
-/**
- * Retrieve a source location representing the first character within a
- * source range.
- */
-CINDEX_LINKAGE CXSourceLocation clang_getRangeStart(CXSourceRange range);
-
-/**
- * Retrieve a source location representing the last character within a
- * source range.
- */
-CINDEX_LINKAGE CXSourceLocation clang_getRangeEnd(CXSourceRange range);
-
-/**
- * Identifies an array of ranges.
- */
-typedef struct {
- /** The number of ranges in the \c ranges array. */
- unsigned count;
- /**
- * An array of \c CXSourceRanges.
- */
- CXSourceRange *ranges;
-} CXSourceRangeList;
-
-/**
* Retrieve all ranges that were skipped by the preprocessor.
*
* The preprocessor will skip lines when they are surrounded by an
@@ -719,142 +561,6 @@ CINDEX_LINKAGE CXSourceRangeList *
clang_getAllSkippedRanges(CXTranslationUnit tu);
/**
- * Destroy the given \c CXSourceRangeList.
- */
-CINDEX_LINKAGE void clang_disposeSourceRangeList(CXSourceRangeList *ranges);
-
-/**
- * @}
- */
-
-/**
- * \defgroup CINDEX_DIAG Diagnostic reporting
- *
- * @{
- */
-
-/**
- * Describes the severity of a particular diagnostic.
- */
-enum CXDiagnosticSeverity {
- /**
- * A diagnostic that has been suppressed, e.g., by a command-line
- * option.
- */
- CXDiagnostic_Ignored = 0,
-
- /**
- * This diagnostic is a note that should be attached to the
- * previous (non-note) diagnostic.
- */
- CXDiagnostic_Note = 1,
-
- /**
- * This diagnostic indicates suspicious code that may not be
- * wrong.
- */
- CXDiagnostic_Warning = 2,
-
- /**
- * This diagnostic indicates that the code is ill-formed.
- */
- CXDiagnostic_Error = 3,
-
- /**
- * This diagnostic indicates that the code is ill-formed such
- * that future parser recovery is unlikely to produce useful
- * results.
- */
- CXDiagnostic_Fatal = 4
-};
-
-/**
- * A single diagnostic, containing the diagnostic's severity,
- * location, text, source ranges, and fix-it hints.
- */
-typedef void *CXDiagnostic;
-
-/**
- * A group of CXDiagnostics.
- */
-typedef void *CXDiagnosticSet;
-
-/**
- * Determine the number of diagnostics in a CXDiagnosticSet.
- */
-CINDEX_LINKAGE unsigned clang_getNumDiagnosticsInSet(CXDiagnosticSet Diags);
-
-/**
- * Retrieve a diagnostic associated with the given CXDiagnosticSet.
- *
- * \param Diags the CXDiagnosticSet to query.
- * \param Index the zero-based diagnostic number to retrieve.
- *
- * \returns the requested diagnostic. This diagnostic must be freed
- * via a call to \c clang_disposeDiagnostic().
- */
-CINDEX_LINKAGE CXDiagnostic clang_getDiagnosticInSet(CXDiagnosticSet Diags,
- unsigned Index);
-
-/**
- * Describes the kind of error that occurred (if any) in a call to
- * \c clang_loadDiagnostics.
- */
-enum CXLoadDiag_Error {
- /**
- * Indicates that no error occurred.
- */
- CXLoadDiag_None = 0,
-
- /**
- * Indicates that an unknown error occurred while attempting to
- * deserialize diagnostics.
- */
- CXLoadDiag_Unknown = 1,
-
- /**
- * Indicates that the file containing the serialized diagnostics
- * could not be opened.
- */
- CXLoadDiag_CannotLoad = 2,
-
- /**
- * Indicates that the serialized diagnostics file is invalid or
- * corrupt.
- */
- CXLoadDiag_InvalidFile = 3
-};
-
-/**
- * Deserialize a set of diagnostics from a Clang diagnostics bitcode
- * file.
- *
- * \param file The name of the file to deserialize.
- * \param error A pointer to a enum value recording if there was a problem
- * deserializing the diagnostics.
- * \param errorString A pointer to a CXString for recording the error string
- * if the file was not successfully loaded.
- *
- * \returns A loaded CXDiagnosticSet if successful, and NULL otherwise. These
- * diagnostics should be released using clang_disposeDiagnosticSet().
- */
-CINDEX_LINKAGE CXDiagnosticSet clang_loadDiagnostics(
- const char *file, enum CXLoadDiag_Error *error, CXString *errorString);
-
-/**
- * Release a CXDiagnosticSet and all of its contained diagnostics.
- */
-CINDEX_LINKAGE void clang_disposeDiagnosticSet(CXDiagnosticSet Diags);
-
-/**
- * Retrieve the child diagnostics of a CXDiagnostic.
- *
- * This CXDiagnosticSet does not need to be released by
- * clang_disposeDiagnosticSet.
- */
-CINDEX_LINKAGE CXDiagnosticSet clang_getChildDiagnostics(CXDiagnostic D);
-
-/**
* Determine the number of diagnostics produced for the given
* translation unit.
*/
@@ -882,232 +588,6 @@ CINDEX_LINKAGE CXDiagnosticSet
clang_getDiagnosticSetFromTU(CXTranslationUnit Unit);
/**
- * Destroy a diagnostic.
- */
-CINDEX_LINKAGE void clang_disposeDiagnostic(CXDiagnostic Diagnostic);
-
-/**
- * Options to control the display of diagnostics.
- *
- * The values in this enum are meant to be combined to customize the
- * behavior of \c clang_formatDiagnostic().
- */
-enum CXDiagnosticDisplayOptions {
- /**
- * Display the source-location information where the
- * diagnostic was located.
- *
- * When set, diagnostics will be prefixed by the file, line, and
- * (optionally) column to which the diagnostic refers. For example,
- *
- * \code
- * test.c:28: warning: extra tokens at end of #endif directive
- * \endcode
- *
- * This option corresponds to the clang flag \c -fshow-source-location.
- */
- CXDiagnostic_DisplaySourceLocation = 0x01,
-
- /**
- * If displaying the source-location information of the
- * diagnostic, also include the column number.
- *
- * This option corresponds to the clang flag \c -fshow-column.
- */
- CXDiagnostic_DisplayColumn = 0x02,
-
- /**
- * If displaying the source-location information of the
- * diagnostic, also include information about source ranges in a
- * machine-parsable format.
- *
- * This option corresponds to the clang flag
- * \c -fdiagnostics-print-source-range-info.
- */
- CXDiagnostic_DisplaySourceRanges = 0x04,
-
- /**
- * Display the option name associated with this diagnostic, if any.
- *
- * The option name displayed (e.g., -Wconversion) will be placed in brackets
- * after the diagnostic text. This option corresponds to the clang flag
- * \c -fdiagnostics-show-option.
- */
- CXDiagnostic_DisplayOption = 0x08,
-
- /**
- * Display the category number associated with this diagnostic, if any.
- *
- * The category number is displayed within brackets after the diagnostic text.
- * This option corresponds to the clang flag
- * \c -fdiagnostics-show-category=id.
- */
- CXDiagnostic_DisplayCategoryId = 0x10,
-
- /**
- * Display the category name associated with this diagnostic, if any.
- *
- * The category name is displayed within brackets after the diagnostic text.
- * This option corresponds to the clang flag
- * \c -fdiagnostics-show-category=name.
- */
- CXDiagnostic_DisplayCategoryName = 0x20
-};
-
-/**
- * Format the given diagnostic in a manner that is suitable for display.
- *
- * This routine will format the given diagnostic to a string, rendering
- * the diagnostic according to the various options given. The
- * \c clang_defaultDiagnosticDisplayOptions() function returns the set of
- * options that most closely mimics the behavior of the clang compiler.
- *
- * \param Diagnostic The diagnostic to print.
- *
- * \param Options A set of options that control the diagnostic display,
- * created by combining \c CXDiagnosticDisplayOptions values.
- *
- * \returns A new string containing for formatted diagnostic.
- */
-CINDEX_LINKAGE CXString clang_formatDiagnostic(CXDiagnostic Diagnostic,
- unsigned Options);
-
-/**
- * Retrieve the set of display options most similar to the
- * default behavior of the clang compiler.
- *
- * \returns A set of display options suitable for use with \c
- * clang_formatDiagnostic().
- */
-CINDEX_LINKAGE unsigned clang_defaultDiagnosticDisplayOptions(void);
-
-/**
- * Determine the severity of the given diagnostic.
- */
-CINDEX_LINKAGE enum CXDiagnosticSeverity
- clang_getDiagnosticSeverity(CXDiagnostic);
-
-/**
- * Retrieve the source location of the given diagnostic.
- *
- * This location is where Clang would print the caret ('^') when
- * displaying the diagnostic on the command line.
- */
-CINDEX_LINKAGE CXSourceLocation clang_getDiagnosticLocation(CXDiagnostic);
-
-/**
- * Retrieve the text of the given diagnostic.
- */
-CINDEX_LINKAGE CXString clang_getDiagnosticSpelling(CXDiagnostic);
-
-/**
- * Retrieve the name of the command-line option that enabled this
- * diagnostic.
- *
- * \param Diag The diagnostic to be queried.
- *
- * \param Disable If non-NULL, will be set to the option that disables this
- * diagnostic (if any).
- *
- * \returns A string that contains the command-line option used to enable this
- * warning, such as "-Wconversion" or "-pedantic".
- */
-CINDEX_LINKAGE CXString clang_getDiagnosticOption(CXDiagnostic Diag,
- CXString *Disable);
-
-/**
- * Retrieve the category number for this diagnostic.
- *
- * Diagnostics can be categorized into groups along with other, related
- * diagnostics (e.g., diagnostics under the same warning flag). This routine
- * retrieves the category number for the given diagnostic.
- *
- * \returns The number of the category that contains this diagnostic, or zero
- * if this diagnostic is uncategorized.
- */
-CINDEX_LINKAGE unsigned clang_getDiagnosticCategory(CXDiagnostic);
-
-/**
- * Retrieve the name of a particular diagnostic category. This
- * is now deprecated. Use clang_getDiagnosticCategoryText()
- * instead.
- *
- * \param Category A diagnostic category number, as returned by
- * \c clang_getDiagnosticCategory().
- *
- * \returns The name of the given diagnostic category.
- */
-CINDEX_DEPRECATED CINDEX_LINKAGE CXString
-clang_getDiagnosticCategoryName(unsigned Category);
-
-/**
- * Retrieve the diagnostic category text for a given diagnostic.
- *
- * \returns The text of the given diagnostic category.
- */
-CINDEX_LINKAGE CXString clang_getDiagnosticCategoryText(CXDiagnostic);
-
-/**
- * Determine the number of source ranges associated with the given
- * diagnostic.
- */
-CINDEX_LINKAGE unsigned clang_getDiagnosticNumRanges(CXDiagnostic);
-
-/**
- * Retrieve a source range associated with the diagnostic.
- *
- * A diagnostic's source ranges highlight important elements in the source
- * code. On the command line, Clang displays source ranges by
- * underlining them with '~' characters.
- *
- * \param Diagnostic the diagnostic whose range is being extracted.
- *
- * \param Range the zero-based index specifying which range to
- *
- * \returns the requested source range.
- */
-CINDEX_LINKAGE CXSourceRange clang_getDiagnosticRange(CXDiagnostic Diagnostic,
- unsigned Range);
-
-/**
- * Determine the number of fix-it hints associated with the
- * given diagnostic.
- */
-CINDEX_LINKAGE unsigned clang_getDiagnosticNumFixIts(CXDiagnostic Diagnostic);
-
-/**
- * Retrieve the replacement information for a given fix-it.
- *
- * Fix-its are described in terms of a source range whose contents
- * should be replaced by a string. This approach generalizes over
- * three kinds of operations: removal of source code (the range covers
- * the code to be removed and the replacement string is empty),
- * replacement of source code (the range covers the code to be
- * replaced and the replacement string provides the new code), and
- * insertion (both the start and end of the range point at the
- * insertion location, and the replacement string provides the text to
- * insert).
- *
- * \param Diagnostic The diagnostic whose fix-its are being queried.
- *
- * \param FixIt The zero-based index of the fix-it.
- *
- * \param ReplacementRange The source range whose contents will be
- * replaced with the returned replacement string. Note that source
- * ranges are half-open ranges [a, b), so the source code should be
- * replaced from a and up to (but not including) b.
- *
- * \returns A string containing text that should be replace the source
- * code indicated by the \c ReplacementRange.
- */
-CINDEX_LINKAGE CXString clang_getDiagnosticFixIt(
- CXDiagnostic Diagnostic, unsigned FixIt, CXSourceRange *ReplacementRange);
-
-/**
- * @}
- */
-
-/**
* \defgroup CINDEX_TRANSLATION_UNIT Translation unit manipulation
*
* The routines in this group provide the ability to create and destroy
@@ -2189,7 +1669,23 @@ enum CXCursorKind {
*/
CXCursor_CXXAddrspaceCastExpr = 152,
- CXCursor_LastExpr = CXCursor_CXXAddrspaceCastExpr,
+ /**
+ * Expression that references a C++20 concept.
+ */
+ CXCursor_ConceptSpecializationExpr = 153,
+
+ /**
+ * Expression that references a C++20 concept.
+ */
+ CXCursor_RequiresExpr = 154,
+
+ /**
+ * Expression that references a C++20 parenthesized list aggregate
+ * initializer.
+ */
+ CXCursor_CXXParenListInitExpr = 155,
+
+ CXCursor_LastExpr = CXCursor_CXXParenListInitExpr,
/* Statements */
CXCursor_FirstStmt = 200,
@@ -2592,7 +2088,59 @@ enum CXCursorKind {
*/
CXCursor_OMPUnrollDirective = 293,
- CXCursor_LastStmt = CXCursor_OMPUnrollDirective,
+ /** OpenMP metadirective directive.
+ */
+ CXCursor_OMPMetaDirective = 294,
+
+ /** OpenMP loop directive.
+ */
+ CXCursor_OMPGenericLoopDirective = 295,
+
+ /** OpenMP teams loop directive.
+ */
+ CXCursor_OMPTeamsGenericLoopDirective = 296,
+
+ /** OpenMP target teams loop directive.
+ */
+ CXCursor_OMPTargetTeamsGenericLoopDirective = 297,
+
+ /** OpenMP parallel loop directive.
+ */
+ CXCursor_OMPParallelGenericLoopDirective = 298,
+
+ /** OpenMP target parallel loop directive.
+ */
+ CXCursor_OMPTargetParallelGenericLoopDirective = 299,
+
+ /** OpenMP parallel masked directive.
+ */
+ CXCursor_OMPParallelMaskedDirective = 300,
+
+ /** OpenMP masked taskloop directive.
+ */
+ CXCursor_OMPMaskedTaskLoopDirective = 301,
+
+ /** OpenMP masked taskloop simd directive.
+ */
+ CXCursor_OMPMaskedTaskLoopSimdDirective = 302,
+
+ /** OpenMP parallel masked taskloop directive.
+ */
+ CXCursor_OMPParallelMaskedTaskLoopDirective = 303,
+
+ /** OpenMP parallel masked taskloop simd directive.
+ */
+ CXCursor_OMPParallelMaskedTaskLoopSimdDirective = 304,
+
+ /** OpenMP error directive.
+ */
+ CXCursor_OMPErrorDirective = 305,
+
+ /** OpenMP scope directive.
+ */
+ CXCursor_OMPScopeDirective = 306,
+
+ CXCursor_LastStmt = CXCursor_OMPScopeDirective,
/**
* Cursor that represents the translation unit itself.
@@ -2600,7 +2148,7 @@ enum CXCursorKind {
* The translation unit cursor exists primarily to act as the root
* cursor for traversing the contents of a translation unit.
*/
- CXCursor_TranslationUnit = 300,
+ CXCursor_TranslationUnit = 350,
/* Attributes */
CXCursor_FirstAttr = 400,
@@ -2676,8 +2224,13 @@ enum CXCursorKind {
* a friend declaration.
*/
CXCursor_FriendDecl = 603,
+ /**
+ * a concept declaration.
+ */
+ CXCursor_ConceptDecl = 604,
+
CXCursor_FirstExtraDecl = CXCursor_ModuleImportDecl,
- CXCursor_LastExtraDecl = CXCursor_FriendDecl,
+ CXCursor_LastExtraDecl = CXCursor_ConceptDecl,
/**
* A code completion overload candidate.
@@ -3298,8 +2851,9 @@ enum CXTypeKind {
CXType_UAccum = 37,
CXType_ULongAccum = 38,
CXType_BFloat16 = 39,
+ CXType_Ibm128 = 40,
CXType_FirstBuiltin = CXType_Void,
- CXType_LastBuiltin = CXType_BFloat16,
+ CXType_LastBuiltin = CXType_Ibm128,
CXType_Complex = 100,
CXType_Pointer = 101,
@@ -3385,14 +2939,20 @@ enum CXTypeKind {
CXType_OCLIntelSubgroupAVCImeResult = 169,
CXType_OCLIntelSubgroupAVCRefResult = 170,
CXType_OCLIntelSubgroupAVCSicResult = 171,
+ CXType_OCLIntelSubgroupAVCImeResultSingleReferenceStreamout = 172,
+ CXType_OCLIntelSubgroupAVCImeResultDualReferenceStreamout = 173,
+ CXType_OCLIntelSubgroupAVCImeSingleReferenceStreamin = 174,
+ CXType_OCLIntelSubgroupAVCImeDualReferenceStreamin = 175,
+
+ /* Old aliases for AVC OpenCL extension types. */
CXType_OCLIntelSubgroupAVCImeResultSingleRefStreamout = 172,
CXType_OCLIntelSubgroupAVCImeResultDualRefStreamout = 173,
CXType_OCLIntelSubgroupAVCImeSingleRefStreamin = 174,
-
CXType_OCLIntelSubgroupAVCImeDualRefStreamin = 175,
CXType_ExtVector = 176,
- CXType_Atomic = 177
+ CXType_Atomic = 177,
+ CXType_BTFTagAttributed = 178
};
/**
@@ -3419,6 +2979,8 @@ enum CXCallingConv {
CXCallingConv_PreserveAll = 15,
CXCallingConv_AArch64VectorCall = 16,
CXCallingConv_SwiftAsync = 17,
+ CXCallingConv_AArch64SVEPCS = 18,
+ CXCallingConv_M68kRTD = 19,
CXCallingConv_Invalid = 100,
CXCallingConv_Unexposed = 200
@@ -3484,9 +3046,25 @@ CINDEX_LINKAGE unsigned long long
clang_getEnumConstantDeclUnsignedValue(CXCursor C);
/**
- * Retrieve the bit width of a bit field declaration as an integer.
+ * Returns non-zero if the cursor specifies a Record member that is a bit-field.
+ */
+CINDEX_LINKAGE unsigned clang_Cursor_isBitField(CXCursor C);
+
+/**
+ * Retrieve the bit width of a bit-field declaration as an integer.
*
- * If a cursor that is not a bit field declaration is passed in, -1 is returned.
+ * If the cursor does not reference a bit-field, or if the bit-field's width
+ * expression cannot be evaluated, -1 is returned.
+ *
+ * For example:
+ * \code
+ * if (clang_Cursor_isBitField(Cursor)) {
+ * int Width = clang_getFieldDeclBitWidth(Cursor);
+ * if (Width != -1) {
+ * // The bit-field width is not value-dependent.
+ * }
+ * }
+ * \endcode
*/
CINDEX_LINKAGE int clang_getFieldDeclBitWidth(CXCursor C);
@@ -3529,8 +3107,8 @@ enum CXTemplateArgumentKind {
};
/**
- *Returns the number of template args of a function decl representing a
- * template specialization.
+ * Returns the number of template args of a function, struct, or class decl
+ * representing a template specialization.
*
* If the argument cursor cannot be converted into a template function
* declaration, -1 is returned.
@@ -3549,8 +3127,9 @@ CINDEX_LINKAGE int clang_Cursor_getNumTemplateArguments(CXCursor C);
/**
* Retrieve the kind of the I'th template argument of the CXCursor C.
*
- * If the argument CXCursor does not represent a FunctionDecl, an invalid
- * template argument kind is returned.
+ * If the argument CXCursor does not represent a FunctionDecl, StructDecl, or
+ * ClassTemplatePartialSpecialization, an invalid template argument kind is
+ * returned.
*
* For example, for the following declaration and specialization:
* template <typename T, int kInt, bool kBool>
@@ -3569,9 +3148,9 @@ clang_Cursor_getTemplateArgumentKind(CXCursor C, unsigned I);
* Retrieve a CXType representing the type of a TemplateArgument of a
* function decl representing a template specialization.
*
- * If the argument CXCursor does not represent a FunctionDecl whose I'th
- * template argument has a kind of CXTemplateArgKind_Integral, an invalid type
- * is returned.
+ * If the argument CXCursor does not represent a FunctionDecl, StructDecl,
+ * ClassDecl or ClassTemplatePartialSpecialization whose I'th template argument
+ * has a kind of CXTemplateArgKind_Integral, an invalid type is returned.
*
* For example, for the following declaration and specialization:
* template <typename T, int kInt, bool kBool>
@@ -3591,7 +3170,8 @@ CINDEX_LINKAGE CXType clang_Cursor_getTemplateArgumentType(CXCursor C,
* decl representing a template specialization) as a signed long long.
*
* It is undefined to call this function on a CXCursor that does not represent a
- * FunctionDecl or whose I'th template argument is not an integral value.
+ * FunctionDecl, StructDecl, ClassDecl or ClassTemplatePartialSpecialization
+ * whose I'th template argument is not an integral value.
*
* For example, for the following declaration and specialization:
* template <typename T, int kInt, bool kBool>
@@ -3611,7 +3191,8 @@ CINDEX_LINKAGE long long clang_Cursor_getTemplateArgumentValue(CXCursor C,
* decl representing a template specialization) as an unsigned long long.
*
* It is undefined to call this function on a CXCursor that does not represent a
- * FunctionDecl or whose I'th template argument is not an integral value.
+ * FunctionDecl, StructDecl, ClassDecl or ClassTemplatePartialSpecialization or
+ * whose I'th template argument is not an integral value.
*
* For example, for the following declaration and specialization:
* template <typename T, int kInt, bool kBool>
@@ -3699,6 +3280,54 @@ CINDEX_LINKAGE CXString clang_getTypedefName(CXType CT);
CINDEX_LINKAGE CXType clang_getPointeeType(CXType T);
/**
+ * Retrieve the unqualified variant of the given type, removing as
+ * little sugar as possible.
+ *
+ * For example, given the following series of typedefs:
+ *
+ * \code
+ * typedef int Integer;
+ * typedef const Integer CInteger;
+ * typedef CInteger DifferenceType;
+ * \endcode
+ *
+ * Executing \c clang_getUnqualifiedType() on a \c CXType that
+ * represents \c DifferenceType, will desugar to a type representing
+ * \c Integer, that has no qualifiers.
+ *
+ * And, executing \c clang_getUnqualifiedType() on the type of the
+ * first argument of the following function declaration:
+ *
+ * \code
+ * void foo(const int);
+ * \endcode
+ *
+ * Will return a type representing \c int, removing the \c const
+ * qualifier.
+ *
+ * Sugar over array types is not desugared.
+ *
+ * A type can be checked for qualifiers with \c
+ * clang_isConstQualifiedType(), \c clang_isVolatileQualifiedType()
+ * and \c clang_isRestrictQualifiedType().
+ *
+ * A type that resulted from a call to \c clang_getUnqualifiedType
+ * will return \c false for all of the above calls.
+ */
+CINDEX_LINKAGE CXType clang_getUnqualifiedType(CXType CT);
+
+/**
+ * For reference types (e.g., "const int&"), returns the type that the
+ * reference refers to (e.g "const int").
+ *
+ * Otherwise, returns the type itself.
+ *
+ * A type that has kind \c CXType_LValueReference or
+ * \c CXType_RValueReference is a reference type.
+ */
+CINDEX_LINKAGE CXType clang_getNonReferenceType(CXType CT);
+
+/**
* Return the cursor for the declaration of the given type.
*/
CINDEX_LINKAGE CXCursor clang_getTypeDeclaration(CXType T);
@@ -3891,7 +3520,7 @@ enum CXTypeNullabilityKind {
/**
* Generally behaves like Nullable, except when used in a block parameter that
* was imported into a swift async method. There, swift will assume that the
- * parameter can get null even if no error occured. _Nullable parameters are
+ * parameter can get null even if no error occurred. _Nullable parameters are
* assumed to only get null on error.
*/
CXTypeNullability_NullableResult = 4
@@ -4065,12 +3694,6 @@ CINDEX_LINKAGE CXType clang_Type_getTemplateArgumentAsType(CXType T,
CINDEX_LINKAGE enum CXRefQualifierKind clang_Type_getCXXRefQualifier(CXType T);
/**
- * Returns non-zero if the cursor specifies a Record member that is a
- * bitfield.
- */
-CINDEX_LINKAGE unsigned clang_Cursor_isBitField(CXCursor C);
-
-/**
* Returns 1 if the base class specified by the cursor with kind
* CX_CXXBaseSpecifier is virtual.
*/
@@ -4242,8 +3865,6 @@ typedef enum CXChildVisitResult (*CXCursorVisitor)(CXCursor cursor,
CINDEX_LINKAGE unsigned clang_visitChildren(CXCursor parent,
CXCursorVisitor visitor,
CXClientData client_data);
-#ifdef __has_feature
-#if __has_feature(blocks)
/**
* Visitor invoked for each cursor found by a traversal.
*
@@ -4254,8 +3875,12 @@ CINDEX_LINKAGE unsigned clang_visitChildren(CXCursor parent,
* The visitor should return one of the \c CXChildVisitResult values
* to direct clang_visitChildrenWithBlock().
*/
+#if __has_feature(blocks)
typedef enum CXChildVisitResult (^CXCursorVisitorBlock)(CXCursor cursor,
CXCursor parent);
+#else
+typedef struct _CXChildVisitResult *CXCursorVisitorBlock;
+#endif
/**
* Visits the children of a cursor using the specified block. Behaves
@@ -4263,8 +3888,6 @@ typedef enum CXChildVisitResult (^CXCursorVisitorBlock)(CXCursor cursor,
*/
CINDEX_LINKAGE unsigned
clang_visitChildrenWithBlock(CXCursor parent, CXCursorVisitorBlock block);
-#endif
-#endif
/**
* @}
@@ -4815,6 +4438,11 @@ CINDEX_LINKAGE unsigned clang_CXXField_isMutable(CXCursor C);
CINDEX_LINKAGE unsigned clang_CXXMethod_isDefaulted(CXCursor C);
/**
+ * Determine if a C++ method is declared '= delete'.
+ */
+CINDEX_LINKAGE unsigned clang_CXXMethod_isDeleted(CXCursor C);
+
+/**
* Determine if a C++ member function or member function template is
* pure virtual.
*/
@@ -4834,6 +4462,101 @@ CINDEX_LINKAGE unsigned clang_CXXMethod_isStatic(CXCursor C);
CINDEX_LINKAGE unsigned clang_CXXMethod_isVirtual(CXCursor C);
/**
+ * Determine if a C++ member function is a copy-assignment operator,
+ * returning 1 if such is the case and 0 otherwise.
+ *
+ * > A copy-assignment operator `X::operator=` is a non-static,
+ * > non-template member function of _class_ `X` with exactly one
+ * > parameter of type `X`, `X&`, `const X&`, `volatile X&` or `const
+ * > volatile X&`.
+ *
+ * That is, for example, the `operator=` in:
+ *
+ * class Foo {
+ * bool operator=(const volatile Foo&);
+ * };
+ *
+ * Is a copy-assignment operator, while the `operator=` in:
+ *
+ * class Bar {
+ * bool operator=(const int&);
+ * };
+ *
+ * Is not.
+ */
+CINDEX_LINKAGE unsigned clang_CXXMethod_isCopyAssignmentOperator(CXCursor C);
+
+/**
+ * Determine if a C++ member function is a move-assignment operator,
+ * returning 1 if such is the case and 0 otherwise.
+ *
+ * > A move-assignment operator `X::operator=` is a non-static,
+ * > non-template member function of _class_ `X` with exactly one
+ * > parameter of type `X&&`, `const X&&`, `volatile X&&` or `const
+ * > volatile X&&`.
+ *
+ * That is, for example, the `operator=` in:
+ *
+ * class Foo {
+ * bool operator=(const volatile Foo&&);
+ * };
+ *
+ * Is a move-assignment operator, while the `operator=` in:
+ *
+ * class Bar {
+ * bool operator=(const int&&);
+ * };
+ *
+ * Is not.
+ */
+CINDEX_LINKAGE unsigned clang_CXXMethod_isMoveAssignmentOperator(CXCursor C);
+
+/**
+ * Determines if a C++ constructor or conversion function was declared
+ * explicit, returning 1 if such is the case and 0 otherwise.
+ *
+ * Constructors or conversion functions are declared explicit through
+ * the use of the explicit specifier.
+ *
+ * For example, the following constructor and conversion function are
+ * not explicit as they lack the explicit specifier:
+ *
+ * class Foo {
+ * Foo();
+ * operator int();
+ * };
+ *
+ * While the following constructor and conversion function are
+ * explicit as they are declared with the explicit specifier.
+ *
+ * class Foo {
+ * explicit Foo();
+ * explicit operator int();
+ * };
+ *
+ * This function will return 0 when given a cursor pointing to one of
+ * the former declarations and it will return 1 for a cursor pointing
+ * to the latter declarations.
+ *
+ * The explicit specifier allows the user to specify a
+ * conditional compile-time expression whose value decides
+ * whether the marked element is explicit or not.
+ *
+ * For example:
+ *
+ * constexpr bool foo(int i) { return i % 2 == 0; }
+ *
+ * class Foo {
+ * explicit(foo(1)) Foo();
+ * explicit(foo(2)) operator int();
+ * }
+ *
+ * This function will return 0 for the constructor and 1 for
+ * the conversion function.
+ */
+CINDEX_LINKAGE unsigned clang_CXXMethod_isExplicit(CXCursor C);
+
+/**
* Determine if a C++ record is abstract, i.e. whether a class or struct
* has a pure virtual member function.
*/
@@ -6165,11 +5888,12 @@ CINDEX_LINKAGE CXResult clang_findReferencesInFile(
CINDEX_LINKAGE CXResult clang_findIncludesInFile(
CXTranslationUnit TU, CXFile file, CXCursorAndRangeVisitor visitor);
-#ifdef __has_feature
#if __has_feature(blocks)
-
typedef enum CXVisitorResult (^CXCursorAndRangeVisitorBlock)(CXCursor,
CXSourceRange);
+#else
+typedef struct _CXCursorAndRangeVisitorBlock *CXCursorAndRangeVisitorBlock;
+#endif
CINDEX_LINKAGE
CXResult clang_findReferencesInFileWithBlock(CXCursor, CXFile,
@@ -6179,9 +5903,6 @@ CINDEX_LINKAGE
CXResult clang_findIncludesInFileWithBlock(CXTranslationUnit, CXFile,
CXCursorAndRangeVisitorBlock);
-#endif
-#endif
-
/**
* The client's data object that is associated with a CXFile.
*/
@@ -6292,7 +6013,8 @@ typedef enum {
CXIdxEntity_CXXDestructor = 23,
CXIdxEntity_CXXConversionFunction = 24,
CXIdxEntity_CXXTypeAlias = 25,
- CXIdxEntity_CXXInterface = 26
+ CXIdxEntity_CXXInterface = 26,
+ CXIdxEntity_CXXConcept = 27
} CXIdxEntityKind;
@@ -6794,6 +6516,144 @@ CINDEX_LINKAGE unsigned clang_Type_visitFields(CXType T, CXFieldVisitor visitor,
CXClientData client_data);
/**
+ * Describes the kind of binary operators.
+ */
+enum CXBinaryOperatorKind {
+ /** This value describes cursors which are not binary operators. */
+ CXBinaryOperator_Invalid,
+ /** C++ Pointer - to - member operator. */
+ CXBinaryOperator_PtrMemD,
+ /** C++ Pointer - to - member operator. */
+ CXBinaryOperator_PtrMemI,
+ /** Multiplication operator. */
+ CXBinaryOperator_Mul,
+ /** Division operator. */
+ CXBinaryOperator_Div,
+ /** Remainder operator. */
+ CXBinaryOperator_Rem,
+ /** Addition operator. */
+ CXBinaryOperator_Add,
+ /** Subtraction operator. */
+ CXBinaryOperator_Sub,
+ /** Bitwise shift left operator. */
+ CXBinaryOperator_Shl,
+ /** Bitwise shift right operator. */
+ CXBinaryOperator_Shr,
+ /** C++ three-way comparison (spaceship) operator. */
+ CXBinaryOperator_Cmp,
+ /** Less than operator. */
+ CXBinaryOperator_LT,
+ /** Greater than operator. */
+ CXBinaryOperator_GT,
+ /** Less or equal operator. */
+ CXBinaryOperator_LE,
+ /** Greater or equal operator. */
+ CXBinaryOperator_GE,
+ /** Equal operator. */
+ CXBinaryOperator_EQ,
+ /** Not equal operator. */
+ CXBinaryOperator_NE,
+ /** Bitwise AND operator. */
+ CXBinaryOperator_And,
+ /** Bitwise XOR operator. */
+ CXBinaryOperator_Xor,
+ /** Bitwise OR operator. */
+ CXBinaryOperator_Or,
+ /** Logical AND operator. */
+ CXBinaryOperator_LAnd,
+ /** Logical OR operator. */
+ CXBinaryOperator_LOr,
+ /** Assignment operator. */
+ CXBinaryOperator_Assign,
+ /** Multiplication assignment operator. */
+ CXBinaryOperator_MulAssign,
+ /** Division assignment operator. */
+ CXBinaryOperator_DivAssign,
+ /** Remainder assignment operator. */
+ CXBinaryOperator_RemAssign,
+ /** Addition assignment operator. */
+ CXBinaryOperator_AddAssign,
+ /** Subtraction assignment operator. */
+ CXBinaryOperator_SubAssign,
+ /** Bitwise shift left assignment operator. */
+ CXBinaryOperator_ShlAssign,
+ /** Bitwise shift right assignment operator. */
+ CXBinaryOperator_ShrAssign,
+ /** Bitwise AND assignment operator. */
+ CXBinaryOperator_AndAssign,
+ /** Bitwise XOR assignment operator. */
+ CXBinaryOperator_XorAssign,
+ /** Bitwise OR assignment operator. */
+ CXBinaryOperator_OrAssign,
+ /** Comma operator. */
+ CXBinaryOperator_Comma
+};
+
+/**
+ * Retrieve the spelling of a given CXBinaryOperatorKind.
+ */
+CINDEX_LINKAGE CXString
+clang_getBinaryOperatorKindSpelling(enum CXBinaryOperatorKind kind);
+
+/**
+ * Retrieve the binary operator kind of this cursor.
+ *
+ * If this cursor is not a binary operator then returns Invalid.
+ */
+CINDEX_LINKAGE enum CXBinaryOperatorKind
+clang_getCursorBinaryOperatorKind(CXCursor cursor);
+
+/**
+ * Describes the kind of unary operators.
+ */
+enum CXUnaryOperatorKind {
+ /** This value describes cursors which are not unary operators. */
+ CXUnaryOperator_Invalid,
+ /** Postfix increment operator. */
+ CXUnaryOperator_PostInc,
+ /** Postfix decrement operator. */
+ CXUnaryOperator_PostDec,
+ /** Prefix increment operator. */
+ CXUnaryOperator_PreInc,
+ /** Prefix decrement operator. */
+ CXUnaryOperator_PreDec,
+ /** Address of operator. */
+ CXUnaryOperator_AddrOf,
+ /** Dereference operator. */
+ CXUnaryOperator_Deref,
+ /** Plus operator. */
+ CXUnaryOperator_Plus,
+ /** Minus operator. */
+ CXUnaryOperator_Minus,
+ /** Not operator. */
+ CXUnaryOperator_Not,
+ /** LNot operator. */
+ CXUnaryOperator_LNot,
+ /** "__real expr" operator. */
+ CXUnaryOperator_Real,
+ /** "__imag expr" operator. */
+ CXUnaryOperator_Imag,
+ /** __extension__ marker operator. */
+ CXUnaryOperator_Extension,
+ /** C++ co_await operator. */
+ CXUnaryOperator_Coawait
+};
+
+/**
+ * Retrieve the spelling of a given CXUnaryOperatorKind.
+ */
+CINDEX_LINKAGE CXString
+clang_getUnaryOperatorKindSpelling(enum CXUnaryOperatorKind kind);
+
+/**
+ * Retrieve the unary operator kind of this cursor.
+ *
+ * If this cursor is not a unary operator then returns Invalid.
+ */
+CINDEX_LINKAGE enum CXUnaryOperatorKind
+clang_getCursorUnaryOperatorKind(CXCursor cursor);
+
+/**
* @}
*/
diff --git a/contrib/llvm-project/clang/include/clang-c/module.modulemap b/contrib/llvm-project/clang/include/clang-c/module.modulemap
deleted file mode 100644
index 95a59d62344c..000000000000
--- a/contrib/llvm-project/clang/include/clang-c/module.modulemap
+++ /dev/null
@@ -1,4 +0,0 @@
-module Clang_C {
- umbrella "."
- module * { export * }
-}
diff --git a/contrib/llvm-project/clang/include/clang/APINotes/APINotesManager.h b/contrib/llvm-project/clang/include/clang/APINotes/APINotesManager.h
new file mode 100644
index 000000000000..18375c9e51a1
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/APINotes/APINotesManager.h
@@ -0,0 +1,175 @@
+//===--- APINotesManager.h - Manage API Notes Files -------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_APINOTES_APINOTESMANAGER_H
+#define LLVM_CLANG_APINOTES_APINOTESMANAGER_H
+
+#include "clang/Basic/Module.h"
+#include "clang/Basic/SourceLocation.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/VersionTuple.h"
+#include <memory>
+#include <string>
+
+namespace clang {
+
+class DirectoryEntry;
+class FileEntry;
+class LangOptions;
+class SourceManager;
+
+namespace api_notes {
+
+class APINotesReader;
+
+/// The API notes manager helps find API notes associated with declarations.
+///
+/// API notes are externally-provided annotations for declarations that can
+/// introduce new attributes (covering availability, nullability of
+/// parameters/results, and so on) for specific declarations without directly
+/// modifying the headers that contain those declarations.
+///
+/// The API notes manager is responsible for finding and loading the
+/// external API notes files that correspond to a given header. Its primary
+/// operation is \c findAPINotes(), which finds the API notes reader that
+/// provides information about the declarations at that location.
+class APINotesManager {
+ using ReaderEntry = llvm::PointerUnion<DirectoryEntryRef, APINotesReader *>;
+
+ SourceManager &SM;
+
+ /// Whether to implicitly search for API notes files based on the
+ /// source file from which an entity was declared.
+ bool ImplicitAPINotes;
+
+ /// The Swift version to use when interpreting versioned API notes.
+ llvm::VersionTuple SwiftVersion;
+
+ enum ReaderKind : unsigned { Public = 0, Private = 1 };
+
+ /// API notes readers for the current module.
+ ///
+ /// There can be up to two of these, one for public headers and one
+ /// for private headers.
+ ///
+ /// Not using std::unique_ptr to store these, since the reader pointers are
+ /// also stored in llvm::PointerUnion below.
+ APINotesReader *CurrentModuleReaders[2] = {nullptr, nullptr};
+
+ /// A mapping from header file directories to the API notes reader for
+ /// that directory, or a redirection to another directory entry that may
+ /// have more information, or NULL to indicate that there is no API notes
+ /// reader for this directory.
+ llvm::DenseMap<const DirectoryEntry *, ReaderEntry> Readers;
+
+ /// Load the API notes associated with the given file, whether it is
+ /// the binary or source form of API notes.
+ ///
+ /// \returns the API notes reader for this file, or null if there is
+ /// a failure.
+ std::unique_ptr<APINotesReader> loadAPINotes(FileEntryRef APINotesFile);
+
+ /// Load the API notes associated with the given buffer, whether it is
+ /// the binary or source form of API notes.
+ ///
+ /// \returns the API notes reader for this file, or null if there is
+ /// a failure.
+ std::unique_ptr<APINotesReader> loadAPINotes(StringRef Buffer);
+
+ /// Load the given API notes file for the given header directory.
+ ///
+ /// \param HeaderDir The directory at which we
+ ///
+ /// \returns true if an error occurred.
+ bool loadAPINotes(const DirectoryEntry *HeaderDir, FileEntryRef APINotesFile);
+
+ /// Look for API notes in the given directory.
+ ///
+ /// This might find either a binary or source API notes.
+ OptionalFileEntryRef findAPINotesFile(DirectoryEntryRef Directory,
+ StringRef FileName,
+ bool WantPublic = true);
+
+ /// Attempt to load API notes for the given framework. A framework will have
+ /// the API notes file under either {FrameworkPath}/APINotes,
+ /// {FrameworkPath}/Headers or {FrameworkPath}/PrivateHeaders, while a
+ /// library will have the API notes simply in its directory.
+ ///
+ /// \param FrameworkPath The path to the framework.
+ /// \param Public Whether to load the public API notes. Otherwise, attempt
+ /// to load the private API notes.
+ ///
+ /// \returns the header directory entry (e.g., for Headers or PrivateHeaders)
+ /// for which the API notes were successfully loaded, or NULL if API notes
+ /// could not be loaded for any reason.
+ OptionalDirectoryEntryRef loadFrameworkAPINotes(llvm::StringRef FrameworkPath,
+ llvm::StringRef FrameworkName,
+ bool Public);
+
+public:
+ APINotesManager(SourceManager &SM, const LangOptions &LangOpts);
+ ~APINotesManager();
+
+ /// Set the Swift version to use when filtering API notes.
+ void setSwiftVersion(llvm::VersionTuple Version) {
+ this->SwiftVersion = Version;
+ }
+
+ /// Load the API notes for the current module.
+ ///
+ /// \param M The current module.
+ /// \param LookInModule Whether to look inside the module itself.
+ /// \param SearchPaths The paths in which we should search for API notes
+ /// for the current module.
+ ///
+ /// \returns true if API notes were successfully loaded, \c false otherwise.
+ bool loadCurrentModuleAPINotes(Module *M, bool LookInModule,
+ ArrayRef<std::string> SearchPaths);
+
+ /// Get FileEntry for the APINotes of the module that is currently being
+ /// compiled.
+ ///
+ /// \param M The current module.
+ /// \param LookInModule Whether to look inside the directory of the current
+ /// module.
+ /// \param SearchPaths The paths in which we should search for API
+ /// notes for the current module.
+ ///
+ /// \returns a vector of FileEntry where APINotes files are.
+ llvm::SmallVector<FileEntryRef, 2>
+ getCurrentModuleAPINotes(Module *M, bool LookInModule,
+ ArrayRef<std::string> SearchPaths);
+
+ /// Load Compiled API notes for current module.
+ ///
+ /// \param Buffers Array of compiled API notes.
+ ///
+ /// \returns true if API notes were successfully loaded, \c false otherwise.
+ bool loadCurrentModuleAPINotesFromBuffer(ArrayRef<StringRef> Buffers);
+
+ /// Retrieve the set of API notes readers for the current module.
+ ArrayRef<APINotesReader *> getCurrentModuleReaders() const {
+ bool HasPublic = CurrentModuleReaders[ReaderKind::Public];
+ bool HasPrivate = CurrentModuleReaders[ReaderKind::Private];
+ assert((!HasPrivate || HasPublic) && "private module requires public module");
+ if (!HasPrivate && !HasPublic)
+ return {};
+ return ArrayRef(CurrentModuleReaders).slice(0, HasPrivate ? 2 : 1);
+ }
+
+ /// Find the API notes readers that correspond to the given source location.
+ llvm::SmallVector<APINotesReader *, 2> findAPINotes(SourceLocation Loc);
+};
+
+} // end namespace api_notes
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm-project/clang/include/clang/APINotes/APINotesOptions.h b/contrib/llvm-project/clang/include/clang/APINotes/APINotesOptions.h
new file mode 100644
index 000000000000..e8b8a9ed2261
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/APINotes/APINotesOptions.h
@@ -0,0 +1,34 @@
+//===--- APINotesOptions.h --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_APINOTES_APINOTESOPTIONS_H
+#define LLVM_CLANG_APINOTES_APINOTESOPTIONS_H
+
+#include "llvm/Support/VersionTuple.h"
+#include <string>
+#include <vector>
+
+namespace clang {
+
+/// Tracks various options which control how API notes are found and handled.
+class APINotesOptions {
+public:
+ /// The Swift version which should be used for API notes.
+ llvm::VersionTuple SwiftVersion;
+
+ /// The set of search paths where we API notes can be found for particular
+ /// modules.
+ ///
+ /// The API notes in this directory are stored as <ModuleName>.apinotes, and
+ /// are only applied when building the module <ModuleName>.
+ std::vector<std::string> ModuleSearchPaths;
+};
+
+} // namespace clang
+
+#endif // LLVM_CLANG_APINOTES_APINOTESOPTIONS_H
diff --git a/contrib/llvm-project/clang/include/clang/APINotes/APINotesReader.h b/contrib/llvm-project/clang/include/clang/APINotes/APINotesReader.h
new file mode 100644
index 000000000000..1c5aab095955
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/APINotes/APINotesReader.h
@@ -0,0 +1,200 @@
+//===--- APINotesReader.h - API Notes Reader --------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the \c APINotesReader class that reads source API notes
+// data providing additional information about source code as a separate input,
+// such as the non-nil/nilable annotations for method parameters.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_APINOTES_READER_H
+#define LLVM_CLANG_APINOTES_READER_H
+
+#include "clang/APINotes/Types.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/VersionTuple.h"
+#include <memory>
+
+namespace clang {
+namespace api_notes {
+
+/// A class that reads API notes data from a binary file that was written by
+/// the \c APINotesWriter.
+class APINotesReader {
+ class Implementation;
+ std::unique_ptr<Implementation> Implementation;
+
+ APINotesReader(llvm::MemoryBuffer *InputBuffer,
+ llvm::VersionTuple SwiftVersion, bool &Failed);
+
+public:
+ /// Create a new API notes reader from the given member buffer, which
+ /// contains the contents of a binary API notes file.
+ ///
+ /// \returns the new API notes reader, or null if an error occurred.
+ static std::unique_ptr<APINotesReader>
+ Create(std::unique_ptr<llvm::MemoryBuffer> InputBuffer,
+ llvm::VersionTuple SwiftVersion);
+
+ ~APINotesReader();
+
+ APINotesReader(const APINotesReader &) = delete;
+ APINotesReader &operator=(const APINotesReader &) = delete;
+
+ /// Captures the completed versioned information for a particular part of
+ /// API notes, including both unversioned API notes and each versioned API
+ /// note for that particular entity.
+ template <typename T> class VersionedInfo {
+ /// The complete set of results.
+ llvm::SmallVector<std::pair<llvm::VersionTuple, T>, 1> Results;
+
+ /// The index of the result that is the "selected" set based on the desired
+ /// Swift version, or null if nothing matched.
+ std::optional<unsigned> Selected;
+
+ public:
+ /// Form an empty set of versioned information.
+ VersionedInfo(std::nullopt_t) : Selected(std::nullopt) {}
+
+ /// Form a versioned info set given the desired version and a set of
+ /// results.
+ VersionedInfo(
+ llvm::VersionTuple Version,
+ llvm::SmallVector<std::pair<llvm::VersionTuple, T>, 1> Results);
+
+ /// Retrieve the selected index in the result set.
+ std::optional<unsigned> getSelected() const { return Selected; }
+
+ /// Return the number of versioned results we know about.
+ unsigned size() const { return Results.size(); }
+
+ /// Access all versioned results.
+ const std::pair<llvm::VersionTuple, T> *begin() const {
+ assert(!Results.empty());
+ return Results.begin();
+ }
+ const std::pair<llvm::VersionTuple, T> *end() const {
+ return Results.end();
+ }
+
+ /// Access a specific versioned result.
+ const std::pair<llvm::VersionTuple, T> &operator[](unsigned index) const {
+ assert(index < Results.size());
+ return Results[index];
+ }
+ };
+
+ /// Look for the context ID of the given Objective-C class.
+ ///
+ /// \param Name The name of the class we're looking for.
+ ///
+ /// \returns The ID, if known.
+ std::optional<ContextID> lookupObjCClassID(llvm::StringRef Name);
+
+ /// Look for information regarding the given Objective-C class.
+ ///
+ /// \param Name The name of the class we're looking for.
+ ///
+ /// \returns The information about the class, if known.
+ VersionedInfo<ObjCContextInfo> lookupObjCClassInfo(llvm::StringRef Name);
+
+ /// Look for the context ID of the given Objective-C protocol.
+ ///
+ /// \param Name The name of the protocol we're looking for.
+ ///
+ /// \returns The ID of the protocol, if known.
+ std::optional<ContextID> lookupObjCProtocolID(llvm::StringRef Name);
+
+ /// Look for information regarding the given Objective-C protocol.
+ ///
+ /// \param Name The name of the protocol we're looking for.
+ ///
+ /// \returns The information about the protocol, if known.
+ VersionedInfo<ObjCContextInfo> lookupObjCProtocolInfo(llvm::StringRef Name);
+
+ /// Look for information regarding the given Objective-C property in
+ /// the given context.
+ ///
+ /// \param CtxID The ID that references the context we are looking for.
+ /// \param Name The name of the property we're looking for.
+ /// \param IsInstance Whether we are looking for an instance property (vs.
+ /// a class property).
+ ///
+ /// \returns Information about the property, if known.
+ VersionedInfo<ObjCPropertyInfo>
+ lookupObjCProperty(ContextID CtxID, llvm::StringRef Name, bool IsInstance);
+
+ /// Look for information regarding the given Objective-C method in
+ /// the given context.
+ ///
+ /// \param CtxID The ID that references the context we are looking for.
+ /// \param Selector The selector naming the method we're looking for.
+ /// \param IsInstanceMethod Whether we are looking for an instance method.
+ ///
+ /// \returns Information about the method, if known.
+ VersionedInfo<ObjCMethodInfo> lookupObjCMethod(ContextID CtxID,
+ ObjCSelectorRef Selector,
+ bool IsInstanceMethod);
+
+ /// Look for information regarding the given global variable.
+ ///
+ /// \param Name The name of the global variable.
+ ///
+ /// \returns information about the global variable, if known.
+ VersionedInfo<GlobalVariableInfo>
+ lookupGlobalVariable(llvm::StringRef Name,
+ std::optional<Context> Ctx = std::nullopt);
+
+ /// Look for information regarding the given global function.
+ ///
+ /// \param Name The name of the global function.
+ ///
+ /// \returns information about the global function, if known.
+ VersionedInfo<GlobalFunctionInfo>
+ lookupGlobalFunction(llvm::StringRef Name,
+ std::optional<Context> Ctx = std::nullopt);
+
+ /// Look for information regarding the given enumerator.
+ ///
+ /// \param Name The name of the enumerator.
+ ///
+ /// \returns information about the enumerator, if known.
+ VersionedInfo<EnumConstantInfo> lookupEnumConstant(llvm::StringRef Name);
+
+ /// Look for information regarding the given tag
+ /// (struct/union/enum/C++ class).
+ ///
+ /// \param Name The name of the tag.
+ ///
+ /// \returns information about the tag, if known.
+ VersionedInfo<TagInfo> lookupTag(llvm::StringRef Name,
+ std::optional<Context> Ctx = std::nullopt);
+
+ /// Look for information regarding the given typedef.
+ ///
+ /// \param Name The name of the typedef.
+ ///
+ /// \returns information about the typedef, if known.
+ VersionedInfo<TypedefInfo>
+ lookupTypedef(llvm::StringRef Name,
+ std::optional<Context> Ctx = std::nullopt);
+
+ /// Look for the context ID of the given C++ namespace.
+ ///
+ /// \param Name The name of the class we're looking for.
+ ///
+ /// \returns The ID, if known.
+ std::optional<ContextID>
+ lookupNamespaceID(llvm::StringRef Name,
+ std::optional<ContextID> ParentNamespaceID = std::nullopt);
+};
+
+} // end namespace api_notes
+} // end namespace clang
+
+#endif // LLVM_CLANG_APINOTES_READER_H
diff --git a/contrib/llvm-project/clang/include/clang/APINotes/APINotesWriter.h b/contrib/llvm-project/clang/include/clang/APINotes/APINotesWriter.h
new file mode 100644
index 000000000000..dad44623e16a
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/APINotes/APINotesWriter.h
@@ -0,0 +1,110 @@
+//===-- APINotesWriter.h - API Notes Writer ---------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_APINOTES_WRITER_H
+#define LLVM_CLANG_APINOTES_WRITER_H
+
+#include "clang/APINotes/Types.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/VersionTuple.h"
+#include "llvm/Support/raw_ostream.h"
+
+#include <memory>
+
+namespace clang {
+class FileEntry;
+
+namespace api_notes {
+class APINotesWriter {
+ class Implementation;
+ std::unique_ptr<Implementation> Implementation;
+
+public:
+ APINotesWriter(llvm::StringRef ModuleName, const FileEntry *SF);
+ ~APINotesWriter();
+
+ APINotesWriter(const APINotesWriter &) = delete;
+ APINotesWriter &operator=(const APINotesWriter &) = delete;
+
+ void writeToStream(llvm::raw_ostream &OS);
+
+ /// Add information about a specific Objective-C class or protocol or a C++
+ /// namespace.
+ ///
+ /// \param Name The name of this class/protocol/namespace.
+ /// \param Kind Whether this is a class, a protocol, or a namespace.
+ /// \param Info Information about this class/protocol/namespace.
+ ///
+ /// \returns the ID of the class, protocol, or namespace, which can be used to
+ /// add properties and methods to the class/protocol/namespace.
+ ContextID addObjCContext(std::optional<ContextID> ParentCtxID,
+ llvm::StringRef Name, ContextKind Kind,
+ const ObjCContextInfo &Info,
+ llvm::VersionTuple SwiftVersion);
+
+ /// Add information about a specific Objective-C property.
+ ///
+ /// \param CtxID The context in which this property resides.
+ /// \param Name The name of this property.
+ /// \param Info Information about this property.
+ void addObjCProperty(ContextID CtxID, llvm::StringRef Name,
+ bool IsInstanceProperty, const ObjCPropertyInfo &Info,
+ llvm::VersionTuple SwiftVersion);
+
+ /// Add information about a specific Objective-C method.
+ ///
+ /// \param CtxID The context in which this method resides.
+ /// \param Selector The selector that names this method.
+ /// \param IsInstanceMethod Whether this method is an instance method
+ /// (vs. a class method).
+ /// \param Info Information about this method.
+ void addObjCMethod(ContextID CtxID, ObjCSelectorRef Selector,
+ bool IsInstanceMethod, const ObjCMethodInfo &Info,
+ llvm::VersionTuple SwiftVersion);
+
+ /// Add information about a global variable.
+ ///
+ /// \param Name The name of this global variable.
+ /// \param Info Information about this global variable.
+ void addGlobalVariable(std::optional<Context> Ctx, llvm::StringRef Name,
+ const GlobalVariableInfo &Info,
+ llvm::VersionTuple SwiftVersion);
+
+ /// Add information about a global function.
+ ///
+ /// \param Name The name of this global function.
+ /// \param Info Information about this global function.
+ void addGlobalFunction(std::optional<Context> Ctx, llvm::StringRef Name,
+ const GlobalFunctionInfo &Info,
+ llvm::VersionTuple SwiftVersion);
+
+ /// Add information about an enumerator.
+ ///
+ /// \param Name The name of this enumerator.
+ /// \param Info Information about this enumerator.
+ void addEnumConstant(llvm::StringRef Name, const EnumConstantInfo &Info,
+ llvm::VersionTuple SwiftVersion);
+
+ /// Add information about a tag (struct/union/enum/C++ class).
+ ///
+ /// \param Name The name of this tag.
+ /// \param Info Information about this tag.
+ void addTag(std::optional<Context> Ctx, llvm::StringRef Name,
+ const TagInfo &Info, llvm::VersionTuple SwiftVersion);
+
+ /// Add information about a typedef.
+ ///
+ /// \param Name The name of this typedef.
+ /// \param Info Information about this typedef.
+ void addTypedef(std::optional<Context> Ctx, llvm::StringRef Name,
+ const TypedefInfo &Info, llvm::VersionTuple SwiftVersion);
+};
+} // namespace api_notes
+} // namespace clang
+
+#endif // LLVM_CLANG_APINOTES_WRITER_H
diff --git a/contrib/llvm-project/clang/include/clang/APINotes/APINotesYAMLCompiler.h b/contrib/llvm-project/clang/include/clang/APINotes/APINotesYAMLCompiler.h
index 6098d0ee36fc..9c24ed85b6a1 100644
--- a/contrib/llvm-project/clang/include/clang/APINotes/APINotesYAMLCompiler.h
+++ b/contrib/llvm-project/clang/include/clang/APINotes/APINotesYAMLCompiler.h
@@ -10,14 +10,25 @@
#define LLVM_CLANG_APINOTES_APINOTESYAMLCOMPILER_H
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/raw_ostream.h"
namespace clang {
+class FileEntry;
+} // namespace clang
+
+namespace clang {
namespace api_notes {
/// Parses the APINotes YAML content and writes the representation back to the
/// specified stream. This provides a means of testing the YAML processing of
/// the APINotes format.
bool parseAndDumpAPINotes(llvm::StringRef YI, llvm::raw_ostream &OS);
+
+/// Converts API notes from YAML format to binary format.
+bool compileAPINotes(llvm::StringRef YAMLInput, const FileEntry *SourceFile,
+ llvm::raw_ostream &OS,
+ llvm::SourceMgr::DiagHandlerTy DiagHandler = nullptr,
+ void *DiagHandlerCtxt = nullptr);
} // namespace api_notes
} // namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/APINotes/Types.h b/contrib/llvm-project/clang/include/clang/APINotes/Types.h
index d9bf2f07291f..1d116becf06c 100644
--- a/contrib/llvm-project/clang/include/clang/APINotes/Types.h
+++ b/contrib/llvm-project/clang/include/clang/APINotes/Types.h
@@ -10,11 +10,16 @@
#define LLVM_CLANG_APINOTES_TYPES_H
#include "clang/Basic/Specifiers.h"
-#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include <climits>
+#include <optional>
#include <vector>
+namespace llvm {
+class raw_ostream;
+} // namespace llvm
+
namespace clang {
namespace api_notes {
enum class RetainCountConventionKind {
@@ -70,14 +75,14 @@ public:
: Unavailable(0), UnavailableInSwift(0), SwiftPrivateSpecified(0),
SwiftPrivate(0) {}
- llvm::Optional<bool> isSwiftPrivate() const {
- return SwiftPrivateSpecified ? llvm::Optional<bool>(SwiftPrivate)
- : llvm::None;
+ std::optional<bool> isSwiftPrivate() const {
+ return SwiftPrivateSpecified ? std::optional<bool>(SwiftPrivate)
+ : std::nullopt;
}
- void setSwiftPrivate(llvm::Optional<bool> Private) {
- SwiftPrivateSpecified = Private.hasValue();
- SwiftPrivate = Private.hasValue() ? *Private : 0;
+ void setSwiftPrivate(std::optional<bool> Private) {
+ SwiftPrivateSpecified = Private.has_value();
+ SwiftPrivate = Private.value_or(0);
}
friend bool operator==(const CommonEntityInfo &, const CommonEntityInfo &);
@@ -127,39 +132,33 @@ class CommonTypeInfo : public CommonEntityInfo {
/// The Swift type to which a given type is bridged.
///
/// Reflects the swift_bridge attribute.
- llvm::Optional<std::string> SwiftBridge;
+ std::optional<std::string> SwiftBridge;
/// The NS error domain for this type.
- llvm::Optional<std::string> NSErrorDomain;
+ std::optional<std::string> NSErrorDomain;
public:
- CommonTypeInfo() : CommonEntityInfo() {}
+ CommonTypeInfo() {}
- const llvm::Optional<std::string> &getSwiftBridge() const {
+ const std::optional<std::string> &getSwiftBridge() const {
return SwiftBridge;
}
- void setSwiftBridge(const llvm::Optional<std::string> &SwiftType) {
+ void setSwiftBridge(std::optional<std::string> SwiftType) {
SwiftBridge = SwiftType;
}
- void setSwiftBridge(const llvm::Optional<llvm::StringRef> &SwiftType) {
- SwiftBridge = SwiftType
- ? llvm::Optional<std::string>(std::string(*SwiftType))
- : llvm::None;
- }
-
- const llvm::Optional<std::string> &getNSErrorDomain() const {
+ const std::optional<std::string> &getNSErrorDomain() const {
return NSErrorDomain;
}
- void setNSErrorDomain(const llvm::Optional<std::string> &Domain) {
+ void setNSErrorDomain(const std::optional<std::string> &Domain) {
NSErrorDomain = Domain;
}
- void setNSErrorDomain(const llvm::Optional<llvm::StringRef> &Domain) {
- NSErrorDomain =
- Domain ? llvm::Optional<std::string>(std::string(*Domain)) : llvm::None;
+ void setNSErrorDomain(const std::optional<llvm::StringRef> &Domain) {
+ NSErrorDomain = Domain ? std::optional<std::string>(std::string(*Domain))
+ : std::nullopt;
}
friend bool operator==(const CommonTypeInfo &, const CommonTypeInfo &);
@@ -208,20 +207,20 @@ class ObjCContextInfo : public CommonTypeInfo {
public:
ObjCContextInfo()
- : CommonTypeInfo(), HasDefaultNullability(0), DefaultNullability(0),
- HasDesignatedInits(0), SwiftImportAsNonGenericSpecified(false),
- SwiftImportAsNonGeneric(false), SwiftObjCMembersSpecified(false),
- SwiftObjCMembers(false) {}
+ : HasDefaultNullability(0), DefaultNullability(0), HasDesignatedInits(0),
+ SwiftImportAsNonGenericSpecified(false), SwiftImportAsNonGeneric(false),
+ SwiftObjCMembersSpecified(false), SwiftObjCMembers(false) {}
/// Determine the default nullability for properties and methods of this
/// class.
///
- /// eturns the default nullability, if implied, or None if there is no
- llvm::Optional<NullabilityKind> getDefaultNullability() const {
+ /// Returns the default nullability, if implied, or std::nullopt if there is
+ /// none.
+ std::optional<NullabilityKind> getDefaultNullability() const {
return HasDefaultNullability
- ? llvm::Optional<NullabilityKind>(
+ ? std::optional<NullabilityKind>(
static_cast<NullabilityKind>(DefaultNullability))
- : llvm::None;
+ : std::nullopt;
}
/// Set the default nullability for properties and methods of this class.
@@ -233,23 +232,23 @@ public:
bool hasDesignatedInits() const { return HasDesignatedInits; }
void setHasDesignatedInits(bool Value) { HasDesignatedInits = Value; }
- llvm::Optional<bool> getSwiftImportAsNonGeneric() const {
+ std::optional<bool> getSwiftImportAsNonGeneric() const {
return SwiftImportAsNonGenericSpecified
- ? llvm::Optional<bool>(SwiftImportAsNonGeneric)
- : llvm::None;
+ ? std::optional<bool>(SwiftImportAsNonGeneric)
+ : std::nullopt;
}
- void setSwiftImportAsNonGeneric(llvm::Optional<bool> Value) {
- SwiftImportAsNonGenericSpecified = Value.hasValue();
- SwiftImportAsNonGeneric = Value.hasValue() ? *Value : false;
+ void setSwiftImportAsNonGeneric(std::optional<bool> Value) {
+ SwiftImportAsNonGenericSpecified = Value.has_value();
+ SwiftImportAsNonGeneric = Value.value_or(false);
}
- llvm::Optional<bool> getSwiftObjCMembers() const {
- return SwiftObjCMembersSpecified ? llvm::Optional<bool>(SwiftObjCMembers)
- : llvm::None;
+ std::optional<bool> getSwiftObjCMembers() const {
+ return SwiftObjCMembersSpecified ? std::optional<bool>(SwiftObjCMembers)
+ : std::nullopt;
}
- void setSwiftObjCMembers(llvm::Optional<bool> Value) {
- SwiftObjCMembersSpecified = Value.hasValue();
- SwiftObjCMembers = Value.hasValue() ? *Value : false;
+ void setSwiftObjCMembers(std::optional<bool> Value) {
+ SwiftObjCMembersSpecified = Value.has_value();
+ SwiftObjCMembers = Value.value_or(false);
}
/// Strip off any information within the class information structure that is
@@ -309,12 +308,12 @@ class VariableInfo : public CommonEntityInfo {
std::string Type;
public:
- VariableInfo() : CommonEntityInfo(), NullabilityAudited(false), Nullable(0) {}
+ VariableInfo() : NullabilityAudited(false), Nullable(0) {}
- llvm::Optional<NullabilityKind> getNullability() const {
- return NullabilityAudited ? llvm::Optional<NullabilityKind>(
+ std::optional<NullabilityKind> getNullability() const {
+ return NullabilityAudited ? std::optional<NullabilityKind>(
static_cast<NullabilityKind>(Nullable))
- : llvm::None;
+ : std::nullopt;
}
void setNullabilityAudited(NullabilityKind kind) {
@@ -358,17 +357,16 @@ class ObjCPropertyInfo : public VariableInfo {
public:
ObjCPropertyInfo()
- : VariableInfo(), SwiftImportAsAccessorsSpecified(false),
- SwiftImportAsAccessors(false) {}
+ : SwiftImportAsAccessorsSpecified(false), SwiftImportAsAccessors(false) {}
- llvm::Optional<bool> getSwiftImportAsAccessors() const {
+ std::optional<bool> getSwiftImportAsAccessors() const {
return SwiftImportAsAccessorsSpecified
- ? llvm::Optional<bool>(SwiftImportAsAccessors)
- : llvm::None;
+ ? std::optional<bool>(SwiftImportAsAccessors)
+ : std::nullopt;
}
- void setSwiftImportAsAccessors(llvm::Optional<bool> Value) {
- SwiftImportAsAccessorsSpecified = Value.hasValue();
- SwiftImportAsAccessors = Value.hasValue() ? *Value : false;
+ void setSwiftImportAsAccessors(std::optional<bool> Value) {
+ SwiftImportAsAccessorsSpecified = Value.has_value();
+ SwiftImportAsAccessors = Value.value_or(false);
}
friend bool operator==(const ObjCPropertyInfo &, const ObjCPropertyInfo &);
@@ -423,28 +421,26 @@ class ParamInfo : public VariableInfo {
public:
ParamInfo()
- : VariableInfo(), NoEscapeSpecified(false), NoEscape(false),
- RawRetainCountConvention() {}
+ : NoEscapeSpecified(false), NoEscape(false), RawRetainCountConvention() {}
- llvm::Optional<bool> isNoEscape() const {
+ std::optional<bool> isNoEscape() const {
if (!NoEscapeSpecified)
- return llvm::None;
+ return std::nullopt;
return NoEscape;
}
- void setNoEscape(llvm::Optional<bool> Value) {
- NoEscapeSpecified = Value.hasValue();
- NoEscape = Value.hasValue() ? *Value : false;
+ void setNoEscape(std::optional<bool> Value) {
+ NoEscapeSpecified = Value.has_value();
+ NoEscape = Value.value_or(false);
}
- llvm::Optional<RetainCountConventionKind> getRetainCountConvention() const {
+ std::optional<RetainCountConventionKind> getRetainCountConvention() const {
if (!RawRetainCountConvention)
- return llvm::None;
+ return std::nullopt;
return static_cast<RetainCountConventionKind>(RawRetainCountConvention - 1);
}
void
- setRetainCountConvention(llvm::Optional<RetainCountConventionKind> Value) {
- RawRetainCountConvention =
- Value.hasValue() ? static_cast<unsigned>(Value.getValue()) + 1 : 0;
+ setRetainCountConvention(std::optional<RetainCountConventionKind> Value) {
+ RawRetainCountConvention = Value ? static_cast<unsigned>(*Value) + 1 : 0;
assert(getRetainCountConvention() == Value && "bitfield too small");
}
@@ -481,7 +477,7 @@ inline bool operator!=(const ParamInfo &LHS, const ParamInfo &RHS) {
/// API notes for a function or method.
class FunctionInfo : public CommonEntityInfo {
private:
- static constexpr const unsigned NullabilityKindMask = 0x3;
+ static constexpr const uint64_t NullabilityKindMask = 0x3;
static constexpr const unsigned NullabilityKindSize = 2;
static constexpr const unsigned ReturnInfoIndex = 0;
@@ -514,7 +510,7 @@ public:
std::vector<ParamInfo> Params;
FunctionInfo()
- : CommonEntityInfo(), NullabilityAudited(false), NumAdjustedNullable(0),
+ : NullabilityAudited(false), NumAdjustedNullable(0),
RawRetainCountConvention() {}
static unsigned getMaxNullabilityIndex() {
@@ -555,15 +551,14 @@ public:
NullabilityKind getReturnTypeInfo() const { return getTypeInfo(0); }
- llvm::Optional<RetainCountConventionKind> getRetainCountConvention() const {
+ std::optional<RetainCountConventionKind> getRetainCountConvention() const {
if (!RawRetainCountConvention)
- return llvm::None;
+ return std::nullopt;
return static_cast<RetainCountConventionKind>(RawRetainCountConvention - 1);
}
void
- setRetainCountConvention(llvm::Optional<RetainCountConventionKind> Value) {
- RawRetainCountConvention =
- Value.hasValue() ? static_cast<unsigned>(Value.getValue()) + 1 : 0;
+ setRetainCountConvention(std::optional<RetainCountConventionKind> Value) {
+ RawRetainCountConvention = Value ? static_cast<unsigned>(*Value) + 1 : 0;
assert(getRetainCountConvention() == Value && "bitfield too small");
}
@@ -607,8 +602,7 @@ public:
/// Whether this is a required initializer.
unsigned RequiredInit : 1;
- ObjCMethodInfo()
- : FunctionInfo(), DesignatedInit(false), RequiredInit(false) {}
+ ObjCMethodInfo() : DesignatedInit(false), RequiredInit(false) {}
friend bool operator==(const ObjCMethodInfo &, const ObjCMethodInfo &);
@@ -639,19 +633,19 @@ inline bool operator!=(const ObjCMethodInfo &LHS, const ObjCMethodInfo &RHS) {
/// Describes API notes data for a global variable.
class GlobalVariableInfo : public VariableInfo {
public:
- GlobalVariableInfo() : VariableInfo() {}
+ GlobalVariableInfo() {}
};
/// Describes API notes data for a global function.
class GlobalFunctionInfo : public FunctionInfo {
public:
- GlobalFunctionInfo() : FunctionInfo() {}
+ GlobalFunctionInfo() {}
};
/// Describes API notes data for an enumerator.
class EnumConstantInfo : public CommonEntityInfo {
public:
- EnumConstantInfo() : CommonEntityInfo() {}
+ EnumConstantInfo() {}
};
/// Describes API notes data for a tag.
@@ -660,27 +654,38 @@ class TagInfo : public CommonTypeInfo {
unsigned IsFlagEnum : 1;
public:
- llvm::Optional<EnumExtensibilityKind> EnumExtensibility;
+ std::optional<std::string> SwiftImportAs;
+ std::optional<std::string> SwiftRetainOp;
+ std::optional<std::string> SwiftReleaseOp;
+
+ std::optional<EnumExtensibilityKind> EnumExtensibility;
- TagInfo() : CommonTypeInfo(), HasFlagEnum(0), IsFlagEnum(0) {}
+ TagInfo() : HasFlagEnum(0), IsFlagEnum(0) {}
- llvm::Optional<bool> isFlagEnum() const {
+ std::optional<bool> isFlagEnum() const {
if (HasFlagEnum)
return IsFlagEnum;
- return llvm::None;
+ return std::nullopt;
}
- void setFlagEnum(llvm::Optional<bool> Value) {
- HasFlagEnum = Value.hasValue();
- IsFlagEnum = Value.hasValue() ? *Value : false;
+ void setFlagEnum(std::optional<bool> Value) {
+ HasFlagEnum = Value.has_value();
+ IsFlagEnum = Value.value_or(false);
}
TagInfo &operator|=(const TagInfo &RHS) {
static_cast<CommonTypeInfo &>(*this) |= RHS;
- if (!HasFlagEnum && HasFlagEnum)
+ if (!SwiftImportAs)
+ SwiftImportAs = RHS.SwiftImportAs;
+ if (!SwiftRetainOp)
+ SwiftRetainOp = RHS.SwiftRetainOp;
+ if (!SwiftReleaseOp)
+ SwiftReleaseOp = RHS.SwiftReleaseOp;
+
+ if (!HasFlagEnum)
setFlagEnum(RHS.isFlagEnum());
- if (!EnumExtensibility.hasValue())
+ if (!EnumExtensibility)
EnumExtensibility = RHS.EnumExtensibility;
return *this;
@@ -693,6 +698,9 @@ public:
inline bool operator==(const TagInfo &LHS, const TagInfo &RHS) {
return static_cast<const CommonTypeInfo &>(LHS) == RHS &&
+ LHS.SwiftImportAs == RHS.SwiftImportAs &&
+ LHS.SwiftRetainOp == RHS.SwiftRetainOp &&
+ LHS.SwiftReleaseOp == RHS.SwiftReleaseOp &&
LHS.isFlagEnum() == RHS.isFlagEnum() &&
LHS.EnumExtensibility == RHS.EnumExtensibility;
}
@@ -704,13 +712,13 @@ inline bool operator!=(const TagInfo &LHS, const TagInfo &RHS) {
/// Describes API notes data for a typedef.
class TypedefInfo : public CommonTypeInfo {
public:
- llvm::Optional<SwiftNewTypeKind> SwiftWrapper;
+ std::optional<SwiftNewTypeKind> SwiftWrapper;
- TypedefInfo() : CommonTypeInfo() {}
+ TypedefInfo() {}
TypedefInfo &operator|=(const TypedefInfo &RHS) {
static_cast<CommonTypeInfo &>(*this) |= RHS;
- if (!SwiftWrapper.hasValue())
+ if (!SwiftWrapper)
SwiftWrapper = RHS.SwiftWrapper;
return *this;
}
@@ -728,6 +736,42 @@ inline bool operator==(const TypedefInfo &LHS, const TypedefInfo &RHS) {
inline bool operator!=(const TypedefInfo &LHS, const TypedefInfo &RHS) {
return !(LHS == RHS);
}
+
+/// The file extension used for the source representation of API notes.
+static const constexpr char SOURCE_APINOTES_EXTENSION[] = "apinotes";
+
+/// Opaque context ID used to refer to an Objective-C class or protocol or a C++
+/// namespace.
+class ContextID {
+public:
+ unsigned Value;
+
+ explicit ContextID(unsigned value) : Value(value) {}
+};
+
+enum class ContextKind : uint8_t {
+ ObjCClass = 0,
+ ObjCProtocol = 1,
+ Namespace = 2,
+};
+
+struct Context {
+ ContextID id;
+ ContextKind kind;
+
+ Context(ContextID id, ContextKind kind) : id(id), kind(kind) {}
+};
+
+/// A temporary reference to an Objective-C selector, suitable for
+/// referencing selector data on the stack.
+///
+/// Instances of this struct do not store references to any of the
+/// data they contain; it is up to the user to ensure that the data
+/// referenced by the identifier list persists.
+struct ObjCSelectorRef {
+ unsigned NumArgs;
+ llvm::ArrayRef<llvm::StringRef> Identifiers;
+};
} // namespace api_notes
} // namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/ARCMigrate/ARCMT.h b/contrib/llvm-project/clang/include/clang/ARCMigrate/ARCMT.h
index 49e94a92cd0b..2b950e3d2cc2 100644
--- a/contrib/llvm-project/clang/include/clang/ARCMigrate/ARCMT.h
+++ b/contrib/llvm-project/clang/include/clang/ARCMigrate/ARCMT.h
@@ -102,7 +102,7 @@ class MigrationProcess {
public:
bool HadARCErrors;
- MigrationProcess(const CompilerInvocation &CI,
+ MigrationProcess(CompilerInvocation &CI,
std::shared_ptr<PCHContainerOperations> PCHContainerOps,
DiagnosticConsumer *diagClient,
StringRef outputDir = StringRef());
diff --git a/contrib/llvm-project/clang/include/clang/ARCMigrate/ARCMTActions.h b/contrib/llvm-project/clang/include/clang/ARCMigrate/ARCMTActions.h
index 641c259b3867..714f4b33db44 100644
--- a/contrib/llvm-project/clang/include/clang/ARCMigrate/ARCMTActions.h
+++ b/contrib/llvm-project/clang/include/clang/ARCMigrate/ARCMTActions.h
@@ -43,7 +43,7 @@ protected:
class MigrateAction : public WrapperFrontendAction {
std::string MigrateDir;
std::string PlistOut;
- bool EmitPremigrationARCErros;
+ bool EmitPremigrationARCErrors;
protected:
bool BeginInvocation(CompilerInstance &CI) override;
diff --git a/contrib/llvm-project/clang/include/clang/ARCMigrate/FileRemapper.h b/contrib/llvm-project/clang/include/clang/ARCMigrate/FileRemapper.h
index 4da68a678be2..afcee363516a 100644
--- a/contrib/llvm-project/clang/include/clang/ARCMigrate/FileRemapper.h
+++ b/contrib/llvm-project/clang/include/clang/ARCMigrate/FileRemapper.h
@@ -9,12 +9,13 @@
#ifndef LLVM_CLANG_ARCMIGRATE_FILEREMAPPER_H
#define LLVM_CLANG_ARCMIGRATE_FILEREMAPPER_H
+#include "clang/Basic/FileEntry.h"
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
#include <memory>
+#include <variant>
namespace llvm {
class MemoryBuffer;
@@ -23,7 +24,6 @@ namespace llvm {
namespace clang {
class FileManager;
- class FileEntry;
class DiagnosticsEngine;
class PreprocessorOptions;
@@ -33,11 +33,11 @@ class FileRemapper {
// FIXME: Reuse the same FileManager for multiple ASTContexts.
std::unique_ptr<FileManager> FileMgr;
- typedef llvm::PointerUnion<const FileEntry *, llvm::MemoryBuffer *> Target;
- typedef llvm::DenseMap<const FileEntry *, Target> MappingsTy;
+ using Target = std::variant<FileEntryRef, llvm::MemoryBuffer *>;
+ using MappingsTy = llvm::DenseMap<FileEntryRef, Target>;
MappingsTy FromToMappings;
- llvm::DenseMap<const FileEntry *, const FileEntry *> ToFromMappings;
+ llvm::DenseMap<const FileEntry *, FileEntryRef> ToFromMappings;
public:
FileRemapper();
@@ -66,10 +66,10 @@ public:
void clear(StringRef outputDir = StringRef());
private:
- void remap(const FileEntry *file, std::unique_ptr<llvm::MemoryBuffer> memBuf);
- void remap(const FileEntry *file, const FileEntry *newfile);
+ void remap(FileEntryRef file, std::unique_ptr<llvm::MemoryBuffer> memBuf);
+ void remap(FileEntryRef file, FileEntryRef newfile);
- const FileEntry *getOriginalFile(StringRef filePath);
+ OptionalFileEntryRef getOriginalFile(StringRef filePath);
void resetTarget(Target &targ);
bool report(const Twine &err, DiagnosticsEngine &Diag);
diff --git a/contrib/llvm-project/clang/include/clang/AST/APNumericStorage.h b/contrib/llvm-project/clang/include/clang/AST/APNumericStorage.h
new file mode 100644
index 000000000000..95eddbcd86e8
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/AST/APNumericStorage.h
@@ -0,0 +1,71 @@
+//===--- APNumericStorage.h - Store APInt/APFloat in ASTContext -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_APNUMERICSTORAGE_H
+#define LLVM_CLANG_AST_APNUMERICSTORAGE_H
+
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/APInt.h"
+
+namespace clang {
+class ASTContext;
+
+/// Used by IntegerLiteral/FloatingLiteral/EnumConstantDecl to store the
+/// numeric without leaking memory.
+///
+/// For large floats/integers, APFloat/APInt will allocate memory from the heap
+/// to represent these numbers. Unfortunately, when we use a BumpPtrAllocator
+/// to allocate IntegerLiteral/FloatingLiteral nodes the memory associated with
+/// the APFloat/APInt values will never get freed. APNumericStorage uses
+/// ASTContext's allocator for memory allocation.
+class APNumericStorage {
+ union {
+ uint64_t VAL; ///< Used to store the <= 64 bits integer value.
+ uint64_t *pVal; ///< Used to store the >64 bits integer value.
+ };
+ unsigned BitWidth;
+
+ bool hasAllocation() const { return llvm::APInt::getNumWords(BitWidth) > 1; }
+
+ APNumericStorage(const APNumericStorage &) = delete;
+ void operator=(const APNumericStorage &) = delete;
+
+protected:
+ APNumericStorage() : VAL(0), BitWidth(0) {}
+
+ llvm::APInt getIntValue() const {
+ unsigned NumWords = llvm::APInt::getNumWords(BitWidth);
+ if (NumWords > 1)
+ return llvm::APInt(BitWidth, NumWords, pVal);
+ else
+ return llvm::APInt(BitWidth, VAL);
+ }
+ void setIntValue(const ASTContext &C, const llvm::APInt &Val);
+};
+
+class APIntStorage : private APNumericStorage {
+public:
+ llvm::APInt getValue() const { return getIntValue(); }
+ void setValue(const ASTContext &C, const llvm::APInt &Val) {
+ setIntValue(C, Val);
+ }
+};
+
+class APFloatStorage : private APNumericStorage {
+public:
+ llvm::APFloat getValue(const llvm::fltSemantics &Semantics) const {
+ return llvm::APFloat(Semantics, getIntValue());
+ }
+ void setValue(const ASTContext &C, const llvm::APFloat &Val) {
+ setIntValue(C, Val.bitcastToAPInt());
+ }
+};
+
+} // end namespace clang
+
+#endif // LLVM_CLANG_AST_APNUMERICSTORAGE_H
diff --git a/contrib/llvm-project/clang/include/clang/AST/APValue.h b/contrib/llvm-project/clang/include/clang/AST/APValue.h
index 5f4ac02f53c9..c4206b73b115 100644
--- a/contrib/llvm-project/clang/include/clang/AST/APValue.h
+++ b/contrib/llvm-project/clang/include/clang/AST/APValue.h
@@ -238,7 +238,7 @@ public:
}
};
class LValuePathSerializationHelper {
- const void *ElemTy;
+ const void *Ty;
public:
ArrayRef<LValuePathEntry> Path;
@@ -267,15 +267,19 @@ private:
};
struct LV;
struct Vec {
- APValue *Elts;
- unsigned NumElts;
- Vec() : Elts(nullptr), NumElts(0) {}
+ APValue *Elts = nullptr;
+ unsigned NumElts = 0;
+ Vec() = default;
+ Vec(const Vec &) = delete;
+ Vec &operator=(const Vec &) = delete;
~Vec() { delete[] Elts; }
};
struct Arr {
APValue *Elts;
unsigned NumElts, ArrSize;
Arr(unsigned NumElts, unsigned ArrSize);
+ Arr(const Arr &) = delete;
+ Arr &operator=(const Arr &) = delete;
~Arr();
};
struct StructData {
@@ -283,12 +287,16 @@ private:
unsigned NumBases;
unsigned NumFields;
StructData(unsigned NumBases, unsigned NumFields);
+ StructData(const StructData &) = delete;
+ StructData &operator=(const StructData &) = delete;
~StructData();
};
struct UnionData {
const FieldDecl *Field;
APValue *Value;
UnionData();
+ UnionData(const UnionData &) = delete;
+ UnionData &operator=(const UnionData &) = delete;
~UnionData();
};
struct AddrLabelDiffData {
diff --git a/contrib/llvm-project/clang/include/clang/AST/ASTConcept.h b/contrib/llvm-project/clang/include/clang/AST/ASTConcept.h
index d0526f4fa5c5..5f9aa41d3e6c 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ASTConcept.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ASTConcept.h
@@ -1,9 +1,8 @@
//===--- ASTConcept.h - Concepts Related AST Data Structures ----*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
@@ -15,15 +14,21 @@
#ifndef LLVM_CLANG_AST_ASTCONCEPT_H
#define LLVM_CLANG_AST_ASTCONCEPT_H
-#include "clang/AST/Expr.h"
+#include "clang/AST/DeclarationName.h"
+#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/TemplateBase.h"
#include "clang/Basic/SourceLocation.h"
+#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/SmallVector.h"
#include <utility>
namespace clang {
+
class ConceptDecl;
-class ConceptSpecializationExpr;
+class Expr;
+class NamedDecl;
+struct PrintingPolicy;
/// The result of a constraint satisfaction check, containing the necessary
/// information to diagnose an unsatisfied constraint.
@@ -46,6 +51,7 @@ public:
using Detail = llvm::PointerUnion<Expr *, SubstitutionDiagnostic *>;
bool IsSatisfied = false;
+ bool ContainsErrors = false;
/// \brief Pairs of unsatisfied atomic constraint expressions along with the
/// substituted constraint expr, if the template arguments could be
@@ -60,6 +66,13 @@ public:
static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &C,
const NamedDecl *ConstraintOwner,
ArrayRef<TemplateArgument> TemplateArgs);
+
+ bool HasSubstitutionFailure() {
+ for (const auto &Detail : Details)
+ if (Detail.second.dyn_cast<SubstitutionDiagnostic *>())
+ return true;
+ return false;
+ }
};
/// Pairs of unsatisfied atomic constraint expressions along with the
@@ -80,6 +93,7 @@ struct ASTConstraintSatisfaction final :
UnsatisfiedConstraintRecord> {
std::size_t NumRecords;
bool IsSatisfied : 1;
+ bool ContainsErrors : 1;
const UnsatisfiedConstraintRecord *begin() const {
return getTrailingObjects<UnsatisfiedConstraintRecord>();
@@ -91,15 +105,27 @@ struct ASTConstraintSatisfaction final :
ASTConstraintSatisfaction(const ASTContext &C,
const ConstraintSatisfaction &Satisfaction);
+ ASTConstraintSatisfaction(const ASTContext &C,
+ const ASTConstraintSatisfaction &Satisfaction);
static ASTConstraintSatisfaction *
Create(const ASTContext &C, const ConstraintSatisfaction &Satisfaction);
+ static ASTConstraintSatisfaction *
+ Rebuild(const ASTContext &C, const ASTConstraintSatisfaction &Satisfaction);
};
-/// \brief Common data class for constructs that reference concepts with
-/// template arguments.
+/// A reference to a concept and its template args, as it appears in the code.
+///
+/// Examples:
+/// template <int X> requires is_even<X> int half = X/2;
+/// ~~~~~~~~~~ (in ConceptSpecializationExpr)
+///
+/// std::input_iterator auto I = Container.begin();
+/// ~~~~~~~~~~~~~~~~~~~ (in AutoTypeLoc)
+///
+/// template <std::derives_from<Expr> T> void dump();
+/// ~~~~~~~~~~~~~~~~~~~~~~~ (in TemplateTypeParmDecl)
class ConceptReference {
-protected:
// \brief The optional nested name specifier used when naming the concept.
NestedNameSpecifierLoc NestedNameSpec;
@@ -123,18 +149,20 @@ protected:
/// concept.
const ASTTemplateArgumentListInfo *ArgsAsWritten;
-public:
-
ConceptReference(NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc,
DeclarationNameInfo ConceptNameInfo, NamedDecl *FoundDecl,
ConceptDecl *NamedConcept,
- const ASTTemplateArgumentListInfo *ArgsAsWritten) :
- NestedNameSpec(NNS), TemplateKWLoc(TemplateKWLoc),
- ConceptName(ConceptNameInfo), FoundDecl(FoundDecl),
- NamedConcept(NamedConcept), ArgsAsWritten(ArgsAsWritten) {}
+ const ASTTemplateArgumentListInfo *ArgsAsWritten)
+ : NestedNameSpec(NNS), TemplateKWLoc(TemplateKWLoc),
+ ConceptName(ConceptNameInfo), FoundDecl(FoundDecl),
+ NamedConcept(NamedConcept), ArgsAsWritten(ArgsAsWritten) {}
- ConceptReference() : NestedNameSpec(), TemplateKWLoc(), ConceptName(),
- FoundDecl(nullptr), NamedConcept(nullptr), ArgsAsWritten(nullptr) {}
+public:
+ static ConceptReference *
+ Create(const ASTContext &C, NestedNameSpecifierLoc NNS,
+ SourceLocation TemplateKWLoc, DeclarationNameInfo ConceptNameInfo,
+ NamedDecl *FoundDecl, ConceptDecl *NamedConcept,
+ const ASTTemplateArgumentListInfo *ArgsAsWritten);
const NestedNameSpecifierLoc &getNestedNameSpecifierLoc() const {
return NestedNameSpec;
@@ -148,6 +176,26 @@ public:
SourceLocation getTemplateKWLoc() const { return TemplateKWLoc; }
+ SourceLocation getLocation() const { return getConceptNameLoc(); }
+
+ SourceLocation getBeginLoc() const LLVM_READONLY {
+ // Note that if the qualifier is null the template KW must also be null.
+ if (auto QualifierLoc = getNestedNameSpecifierLoc())
+ return QualifierLoc.getBeginLoc();
+ return getConceptNameInfo().getBeginLoc();
+ }
+
+ SourceLocation getEndLoc() const LLVM_READONLY {
+ return getTemplateArgsAsWritten() &&
+ getTemplateArgsAsWritten()->getRAngleLoc().isValid()
+ ? getTemplateArgsAsWritten()->getRAngleLoc()
+ : getConceptNameInfo().getEndLoc();
+ }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(getBeginLoc(), getEndLoc());
+ }
+
NamedDecl *getFoundDecl() const {
return FoundDecl;
}
@@ -165,22 +213,32 @@ public:
bool hasExplicitTemplateArgs() const {
return ArgsAsWritten != nullptr;
}
+
+ void print(llvm::raw_ostream &OS, const PrintingPolicy &Policy) const;
+ void dump() const;
+ void dump(llvm::raw_ostream &) const;
};
-class TypeConstraint : public ConceptReference {
+/// Models the abbreviated syntax to constrain a template type parameter:
+/// template <convertible_to<string> T> void print(T object);
+/// ~~~~~~~~~~~~~~~~~~~~~~
+/// Semantically, this adds an "immediately-declared constraint" with extra arg:
+/// requires convertible_to<T, string>
+///
+/// In the C++ grammar, a type-constraint is also used for auto types:
+/// convertible_to<string> auto X = ...;
+/// We do *not* model these as TypeConstraints, but AutoType(Loc) directly.
+class TypeConstraint {
/// \brief The immediately-declared constraint expression introduced by this
/// type-constraint.
Expr *ImmediatelyDeclaredConstraint = nullptr;
+ ConceptReference *ConceptRef;
public:
- TypeConstraint(NestedNameSpecifierLoc NNS,
- DeclarationNameInfo ConceptNameInfo, NamedDecl *FoundDecl,
- ConceptDecl *NamedConcept,
- const ASTTemplateArgumentListInfo *ArgsAsWritten,
- Expr *ImmediatelyDeclaredConstraint) :
- ConceptReference(NNS, /*TemplateKWLoc=*/SourceLocation(), ConceptNameInfo,
- FoundDecl, NamedConcept, ArgsAsWritten),
- ImmediatelyDeclaredConstraint(ImmediatelyDeclaredConstraint) {}
+ TypeConstraint(ConceptReference *ConceptRef,
+ Expr *ImmediatelyDeclaredConstraint)
+ : ImmediatelyDeclaredConstraint(ImmediatelyDeclaredConstraint),
+ ConceptRef(ConceptRef) {}
/// \brief Get the immediately-declared constraint expression introduced by
/// this type-constraint, that is - the constraint expression that is added to
@@ -189,7 +247,41 @@ public:
return ImmediatelyDeclaredConstraint;
}
- void print(llvm::raw_ostream &OS, PrintingPolicy Policy) const;
+ ConceptReference *getConceptReference() const { return ConceptRef; }
+
+ // FIXME: Instead of using these concept related functions the callers should
+ // directly work with the corresponding ConceptReference.
+ ConceptDecl *getNamedConcept() const { return ConceptRef->getNamedConcept(); }
+
+ SourceLocation getConceptNameLoc() const {
+ return ConceptRef->getConceptNameLoc();
+ }
+
+ bool hasExplicitTemplateArgs() const {
+ return ConceptRef->hasExplicitTemplateArgs();
+ }
+
+ const ASTTemplateArgumentListInfo *getTemplateArgsAsWritten() const {
+ return ConceptRef->getTemplateArgsAsWritten();
+ }
+
+ SourceLocation getTemplateKWLoc() const {
+ return ConceptRef->getTemplateKWLoc();
+ }
+
+ NamedDecl *getFoundDecl() const { return ConceptRef->getFoundDecl(); }
+
+ const NestedNameSpecifierLoc &getNestedNameSpecifierLoc() const {
+ return ConceptRef->getNestedNameSpecifierLoc();
+ }
+
+ const DeclarationNameInfo &getConceptNameInfo() const {
+ return ConceptRef->getConceptNameInfo();
+ }
+
+ void print(llvm::raw_ostream &OS, const PrintingPolicy &Policy) const {
+ ConceptRef->print(OS, Policy);
+ }
};
} // clang
diff --git a/contrib/llvm-project/clang/include/clang/AST/ASTConsumer.h b/contrib/llvm-project/clang/include/clang/AST/ASTConsumer.h
index ecdd8e873e1e..ebcd8059284d 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ASTConsumer.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ASTConsumer.h
@@ -33,12 +33,12 @@ namespace clang {
class ASTConsumer {
/// Whether this AST consumer also requires information about
/// semantic analysis.
- bool SemaConsumer;
+ bool SemaConsumer = false;
friend class SemaConsumer;
public:
- ASTConsumer() : SemaConsumer(false) { }
+ ASTConsumer() = default;
virtual ~ASTConsumer() {}
@@ -76,7 +76,7 @@ public:
virtual void HandleTagDeclRequiredDefinition(const TagDecl *D) {}
/// Invoked when a function is implicitly instantiated.
- /// Note that at this point point it does not have a body, its body is
+ /// Note that at this point it does not have a body, its body is
/// instantiated at the end of the translation unit and passed to
/// HandleTopLevelDecl.
virtual void HandleCXXImplicitFunctionInstantiation(FunctionDecl *D) {}
diff --git a/contrib/llvm-project/clang/include/clang/AST/ASTContext.h b/contrib/llvm-project/clang/include/clang/AST/ASTContext.h
index 34299581d89d..3e46a5da3fc0 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ASTContext.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ASTContext.h
@@ -14,65 +14,32 @@
#ifndef LLVM_CLANG_AST_ASTCONTEXT_H
#define LLVM_CLANG_AST_ASTCONTEXT_H
-#include "clang/AST/ASTContextAllocate.h"
#include "clang/AST/ASTFwd.h"
#include "clang/AST/CanonicalType.h"
#include "clang/AST/CommentCommandTraits.h"
#include "clang/AST/ComparisonCategories.h"
#include "clang/AST/Decl.h"
-#include "clang/AST/DeclBase.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/ExternalASTSource.h"
-#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/RawCommentList.h"
#include "clang/AST/TemplateName.h"
-#include "clang/AST/Type.h"
-#include "clang/Basic/AddressSpaces.h"
-#include "clang/Basic/AttrKinds.h"
-#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
-#include "clang/Basic/LangOptions.h"
-#include "clang/Basic/Linkage.h"
-#include "clang/Basic/NoSanitizeList.h"
-#include "clang/Basic/OperatorKinds.h"
#include "clang/Basic/PartialDiagnostic.h"
-#include "clang/Basic/ProfileList.h"
#include "clang/Basic/SourceLocation.h"
-#include "clang/Basic/Specifiers.h"
-#include "clang/Basic/TargetCXXABI.h"
-#include "clang/Basic/XRayLists.h"
-#include "llvm/ADT/APSInt.h"
-#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/IntrusiveRefCntPtr.h"
#include "llvm/ADT/MapVector.h"
-#include "llvm/ADT/None.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/TinyPtrVector.h"
-#include "llvm/ADT/Triple.h"
-#include "llvm/ADT/iterator_range.h"
-#include "llvm/Support/AlignOf.h"
-#include "llvm/Support/Allocator.h"
-#include "llvm/Support/Casting.h"
-#include "llvm/Support/Compiler.h"
#include "llvm/Support/TypeSize.h"
-#include <cassert>
-#include <cstddef>
-#include <cstdint>
-#include <iterator>
-#include <memory>
-#include <string>
-#include <type_traits>
-#include <utility>
-#include <vector>
+#include <optional>
namespace llvm {
@@ -90,6 +57,7 @@ class ASTMutationListener;
class ASTRecordLayout;
class AtomicExpr;
class BlockExpr;
+struct BlockVarCopyInit;
class BuiltinTemplateDecl;
class CharUnits;
class ConceptDecl;
@@ -98,18 +66,19 @@ class CXXConstructorDecl;
class CXXMethodDecl;
class CXXRecordDecl;
class DiagnosticsEngine;
-class ParentMapContext;
-class DynTypedNode;
class DynTypedNodeList;
class Expr;
+enum class FloatModeKind;
class GlobalDecl;
-class ItaniumMangleContext;
+class IdentifierTable;
+class LangOptions;
class MangleContext;
class MangleNumberingContext;
-class MaterializeTemporaryExpr;
class MemberSpecializationInfo;
class Module;
struct MSGuidDeclParts;
+class NestedNameSpecifier;
+class NoSanitizeList;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCContainerDecl;
@@ -123,9 +92,10 @@ class ObjCPropertyImplDecl;
class ObjCProtocolDecl;
class ObjCTypeParamDecl;
class OMPTraitInfo;
+class ParentMapContext;
struct ParsedTargetAttr;
class Preprocessor;
-class Stmt;
+class ProfileList;
class StoredDeclsMap;
class TargetAttr;
class TargetInfo;
@@ -133,11 +103,12 @@ class TemplateDecl;
class TemplateParameterList;
class TemplateTemplateParmDecl;
class TemplateTypeParmDecl;
+class TypeConstraint;
class UnresolvedSetIterator;
class UsingShadowDecl;
class VarTemplateDecl;
class VTableContextBase;
-struct BlockVarCopyInit;
+class XRayFunctionFilter;
namespace Builtin {
@@ -164,24 +135,46 @@ namespace serialization {
template <class> class AbstractTypeReader;
} // namespace serialization
+enum class AlignRequirementKind {
+ /// The alignment was not explicit in code.
+ None,
+
+ /// The alignment comes from an alignment attribute on a typedef.
+ RequiredByTypedef,
+
+ /// The alignment comes from an alignment attribute on a record type.
+ RequiredByRecord,
+
+ /// The alignment comes from an alignment attribute on a enum type.
+ RequiredByEnum,
+};
+
struct TypeInfo {
uint64_t Width = 0;
unsigned Align = 0;
- bool AlignIsRequired : 1;
+ AlignRequirementKind AlignRequirement;
- TypeInfo() : AlignIsRequired(false) {}
- TypeInfo(uint64_t Width, unsigned Align, bool AlignIsRequired)
- : Width(Width), Align(Align), AlignIsRequired(AlignIsRequired) {}
+ TypeInfo() : AlignRequirement(AlignRequirementKind::None) {}
+ TypeInfo(uint64_t Width, unsigned Align,
+ AlignRequirementKind AlignRequirement)
+ : Width(Width), Align(Align), AlignRequirement(AlignRequirement) {}
+ bool isAlignRequired() {
+ return AlignRequirement != AlignRequirementKind::None;
+ }
};
struct TypeInfoChars {
CharUnits Width;
CharUnits Align;
- bool AlignIsRequired : 1;
+ AlignRequirementKind AlignRequirement;
- TypeInfoChars() : AlignIsRequired(false) {}
- TypeInfoChars(CharUnits Width, CharUnits Align, bool AlignIsRequired)
- : Width(Width), Align(Align), AlignIsRequired(AlignIsRequired) {}
+ TypeInfoChars() : AlignRequirement(AlignRequirementKind::None) {}
+ TypeInfoChars(CharUnits Width, CharUnits Align,
+ AlignRequirementKind AlignRequirement)
+ : Width(Width), Align(Align), AlignRequirement(AlignRequirement) {}
+ bool isAlignRequired() {
+ return AlignRequirement != AlignRequirementKind::None;
+ }
};
/// Holds long-lived AST nodes (such as types and decls) that can be
@@ -192,7 +185,7 @@ class ASTContext : public RefCountedBase<ASTContext> {
mutable SmallVector<Type *, 0> Types;
mutable llvm::FoldingSet<ExtQuals> ExtQualNodes;
mutable llvm::FoldingSet<ComplexType> ComplexTypes;
- mutable llvm::FoldingSet<PointerType> PointerTypes;
+ mutable llvm::FoldingSet<PointerType> PointerTypes{GeneralTypesLog2InitSize};
mutable llvm::FoldingSet<AdjustedType> AdjustedTypes;
mutable llvm::FoldingSet<BlockPointerType> BlockPointerTypes;
mutable llvm::FoldingSet<LValueReferenceType> LValueReferenceTypes;
@@ -202,20 +195,25 @@ class ASTContext : public RefCountedBase<ASTContext> {
ConstantArrayTypes;
mutable llvm::FoldingSet<IncompleteArrayType> IncompleteArrayTypes;
mutable std::vector<VariableArrayType*> VariableArrayTypes;
- mutable llvm::FoldingSet<DependentSizedArrayType> DependentSizedArrayTypes;
- mutable llvm::FoldingSet<DependentSizedExtVectorType>
- DependentSizedExtVectorTypes;
- mutable llvm::FoldingSet<DependentAddressSpaceType>
+ mutable llvm::ContextualFoldingSet<DependentSizedArrayType, ASTContext &>
+ DependentSizedArrayTypes;
+ mutable llvm::ContextualFoldingSet<DependentSizedExtVectorType, ASTContext &>
+ DependentSizedExtVectorTypes;
+ mutable llvm::ContextualFoldingSet<DependentAddressSpaceType, ASTContext &>
DependentAddressSpaceTypes;
mutable llvm::FoldingSet<VectorType> VectorTypes;
- mutable llvm::FoldingSet<DependentVectorType> DependentVectorTypes;
+ mutable llvm::ContextualFoldingSet<DependentVectorType, ASTContext &>
+ DependentVectorTypes;
mutable llvm::FoldingSet<ConstantMatrixType> MatrixTypes;
- mutable llvm::FoldingSet<DependentSizedMatrixType> DependentSizedMatrixTypes;
+ mutable llvm::ContextualFoldingSet<DependentSizedMatrixType, ASTContext &>
+ DependentSizedMatrixTypes;
mutable llvm::FoldingSet<FunctionNoProtoType> FunctionNoProtoTypes;
mutable llvm::ContextualFoldingSet<FunctionProtoType, ASTContext&>
FunctionProtoTypes;
- mutable llvm::FoldingSet<DependentTypeOfExprType> DependentTypeOfExprTypes;
- mutable llvm::FoldingSet<DependentDecltypeType> DependentDecltypeTypes;
+ mutable llvm::ContextualFoldingSet<DependentTypeOfExprType, ASTContext &>
+ DependentTypeOfExprTypes;
+ mutable llvm::ContextualFoldingSet<DependentDecltypeType, ASTContext &>
+ DependentDecltypeTypes;
mutable llvm::FoldingSet<TemplateTypeParmType> TemplateTypeParmTypes;
mutable llvm::FoldingSet<ObjCTypeParamType> ObjCTypeParamTypes;
mutable llvm::FoldingSet<SubstTemplateTypeParmType>
@@ -224,8 +222,11 @@ class ASTContext : public RefCountedBase<ASTContext> {
SubstTemplateTypeParmPackTypes;
mutable llvm::ContextualFoldingSet<TemplateSpecializationType, ASTContext&>
TemplateSpecializationTypes;
- mutable llvm::FoldingSet<ParenType> ParenTypes;
- mutable llvm::FoldingSet<ElaboratedType> ElaboratedTypes;
+ mutable llvm::FoldingSet<ParenType> ParenTypes{GeneralTypesLog2InitSize};
+ mutable llvm::FoldingSet<UsingType> UsingTypes;
+ mutable llvm::FoldingSet<TypedefType> TypedefTypes;
+ mutable llvm::FoldingSet<ElaboratedType> ElaboratedTypes{
+ GeneralTypesLog2InitSize};
mutable llvm::FoldingSet<DependentNameType> DependentNameTypes;
mutable llvm::ContextualFoldingSet<DependentTemplateSpecializationType,
ASTContext&>
@@ -239,10 +240,12 @@ class ASTContext : public RefCountedBase<ASTContext> {
mutable llvm::FoldingSet<DeducedTemplateSpecializationType>
DeducedTemplateSpecializationTypes;
mutable llvm::FoldingSet<AtomicType> AtomicTypes;
- llvm::FoldingSet<AttributedType> AttributedTypes;
+ mutable llvm::FoldingSet<AttributedType> AttributedTypes;
mutable llvm::FoldingSet<PipeType> PipeTypes;
- mutable llvm::FoldingSet<ExtIntType> ExtIntTypes;
- mutable llvm::FoldingSet<DependentExtIntType> DependentExtIntTypes;
+ mutable llvm::FoldingSet<BitIntType> BitIntTypes;
+ mutable llvm::ContextualFoldingSet<DependentBitIntType, ASTContext &>
+ DependentBitIntTypes;
+ llvm::FoldingSet<BTFTagAttributedType> BTFTagAttributedTypes;
mutable llvm::FoldingSet<QualifiedTemplateName> QualifiedTemplateNames;
mutable llvm::FoldingSet<DependentTemplateName> DependentTemplateNames;
@@ -292,6 +295,10 @@ class ASTContext : public RefCountedBase<ASTContext> {
/// Mapping from GUIDs to the corresponding MSGuidDecl.
mutable llvm::FoldingSet<MSGuidDecl> MSGuidDecls;
+ /// Mapping from APValues to the corresponding UnnamedGlobalConstantDecl.
+ mutable llvm::FoldingSet<UnnamedGlobalConstantDecl>
+ UnnamedGlobalConstantDecls;
+
/// Mapping from APValues to the corresponding TemplateParamObjects.
mutable llvm::FoldingSet<TemplateParamObjectDecl> TemplateParamObjectDecls;
@@ -446,6 +453,13 @@ class ASTContext : public RefCountedBase<ASTContext> {
};
llvm::DenseMap<Module*, PerModuleInitializers*> ModuleInitializers;
+ /// This is the top-level (C++20) Named module we are building.
+ Module *CurrentCXXNamedModule = nullptr;
+
+ static constexpr unsigned ConstantArrayTypesLog2InitSize = 8;
+ static constexpr unsigned GeneralTypesLog2InitSize = 9;
+ static constexpr unsigned FunctionProtoTypesLog2InitSize = 12;
+
ASTContext &this_() { return *this; }
public:
@@ -605,9 +619,6 @@ private:
std::unique_ptr<CXXABI> ABI;
CXXABI *createCXXABI(const TargetInfo &T);
- /// The logical -> physical address space map.
- const LangASMap *AddrSpaceMap = nullptr;
-
/// Address space map mangling must be used with language specific
/// address spaces (e.g. OpenCL/CUDA)
bool AddrSpaceMapMangling;
@@ -633,6 +644,20 @@ public:
/// Returns the clang bytecode interpreter context.
interp::Context &getInterpContext();
+ struct CUDAConstantEvalContext {
+ /// Do not allow wrong-sided variables in constant expressions.
+ bool NoWrongSidedVars = false;
+ } CUDAConstantEvalCtx;
+ struct CUDAConstantEvalContextRAII {
+ ASTContext &Ctx;
+ CUDAConstantEvalContext SavedCtx;
+ CUDAConstantEvalContextRAII(ASTContext &Ctx_, bool NoWrongSidedVars)
+ : Ctx(Ctx_), SavedCtx(Ctx_.CUDAConstantEvalCtx) {
+ Ctx_.CUDAConstantEvalCtx.NoWrongSidedVars = NoWrongSidedVars;
+ }
+ ~CUDAConstantEvalContextRAII() { Ctx.CUDAConstantEvalCtx = SavedCtx; }
+ };
+
/// Returns the dynamic AST node parent map context.
ParentMapContext &getParentMapContext();
@@ -672,6 +697,12 @@ public:
SourceManager& getSourceManager() { return SourceMgr; }
const SourceManager& getSourceManager() const { return SourceMgr; }
+ // Cleans up some of the data structures. This allows us to do cleanup
+ // normally done in the destructor earlier. Renders much of the ASTContext
+ // unusable, mostly the actual AST nodes, so should be called when we no
+ // longer need access to the AST.
+ void cleanup();
+
llvm::BumpPtrAllocator &getAllocator() const {
return BumpAlloc;
}
@@ -728,7 +759,8 @@ public:
/// getRealTypeForBitwidth -
/// sets floating point QualTy according to specified bitwidth.
/// Returns empty type if there is no appropriate target types.
- QualType getRealTypeForBitwidth(unsigned DestWidth, bool ExplicitIEEE) const;
+ QualType getRealTypeForBitwidth(unsigned DestWidth,
+ FloatModeKind ExplicitType) const;
bool AtomicUsesUnsupportedLibcall(const AtomicExpr *E) const;
@@ -1024,6 +1056,12 @@ public:
/// Get the initializations to perform when importing a module, if any.
ArrayRef<Decl*> getModuleInitializers(Module *M);
+ /// Set the (C++20) module we are building.
+ void setCurrentNamedModule(Module *M);
+
+ /// Get module under construction, nullptr if this is not a C++20 module.
+ Module *getCurrentNamedModule() const { return CurrentCXXNamedModule; }
+
TranslationUnitDecl *getTranslationUnitDecl() const {
return TUDecl->getMostRecentDecl();
}
@@ -1054,7 +1092,7 @@ public:
CanQualType SignedCharTy, ShortTy, IntTy, LongTy, LongLongTy, Int128Ty;
CanQualType UnsignedCharTy, UnsignedShortTy, UnsignedIntTy, UnsignedLongTy;
CanQualType UnsignedLongLongTy, UnsignedInt128Ty;
- CanQualType FloatTy, DoubleTy, LongDoubleTy, Float128Ty;
+ CanQualType FloatTy, DoubleTy, LongDoubleTy, Float128Ty, Ibm128Ty;
CanQualType ShortAccumTy, AccumTy,
LongAccumTy; // ISO/IEC JTC1 SC22 WG14 N1169 Extension
CanQualType UnsignedShortAccumTy, UnsignedAccumTy, UnsignedLongAccumTy;
@@ -1069,8 +1107,6 @@ public:
CanQualType HalfTy; // [OpenCL 6.1.1.1], ARM NEON
CanQualType BFloat16Ty;
CanQualType Float16Ty; // C11 extension ISO/IEC TS 18661-3
- CanQualType FloatComplexTy, DoubleComplexTy, LongDoubleComplexTy;
- CanQualType Float128ComplexTy;
CanQualType VoidPtrTy, NullPtrTy;
CanQualType DependentTy, OverloadTy, BoundMemberTy, UnknownAnyTy;
CanQualType BuiltinFnTy;
@@ -1096,6 +1132,8 @@ public:
#define RVV_TYPE(Name, Id, SingletonId) \
CanQualType SingletonId;
#include "clang/Basic/RISCVVTypes.def"
+#define WASM_TYPE(Name, Id, SingletonId) CanQualType SingletonId;
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
// Types for deductions in C++0x [stmt.ranged]'s desugaring. Built on demand.
mutable QualType AutoDeductTy; // Deduction against 'auto'.
@@ -1109,8 +1147,19 @@ public:
mutable TagDecl *MSGuidTagDecl = nullptr;
/// Keep track of CUDA/HIP device-side variables ODR-used by host code.
+ /// This does not include extern shared variables used by device host
+ /// functions as addresses of shared variables are per warp, therefore
+ /// cannot be accessed by host code.
llvm::DenseSet<const VarDecl *> CUDADeviceVarODRUsedByHost;
+ /// Keep track of CUDA/HIP external kernels or device variables ODR-used by
+ /// host code.
+ llvm::DenseSet<const ValueDecl *> CUDAExternalDeviceDeclODRUsedByHost;
+
+ /// Keep track of CUDA/HIP implicit host device functions used on device side
+ /// in device compilation.
+ llvm::DenseSet<const FunctionDecl *> CUDAImplicitHostDeviceFunUsedByDevice;
+
ASTContext(LangOptions &LOpts, SourceManager &SM, IdentifierTable &idents,
SelectorTable &sels, Builtin::Context &builtins,
TranslationUnitKind TUKind);
@@ -1152,8 +1201,9 @@ public:
/// Create a new implicit TU-level CXXRecordDecl or RecordDecl
/// declaration.
- RecordDecl *buildImplicitRecord(StringRef Name,
- RecordDecl::TagKind TK = TTK_Struct) const;
+ RecordDecl *buildImplicitRecord(
+ StringRef Name,
+ RecordDecl::TagKind TK = RecordDecl::TagKind::Struct) const;
/// Create a new implicit TU-level typedef declaration.
TypedefDecl *buildImplicitTypedef(QualType T, StringRef Name) const;
@@ -1253,11 +1303,11 @@ public:
/// declaration of a function with an exception specification is permitted
/// and preserved. Other type sugar (for instance, typedefs) is not.
QualType getFunctionTypeWithExceptionSpec(
- QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI);
+ QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI) const;
/// Determine whether two function types are the same, ignoring
/// exception specifications in cases where they're part of the type.
- bool hasSameFunctionTypeIgnoringExceptionSpec(QualType T, QualType U);
+ bool hasSameFunctionTypeIgnoringExceptionSpec(QualType T, QualType U) const;
/// Change the exception specification on a function once it is
/// delay-parsed, instantiated, or computed.
@@ -1303,6 +1353,9 @@ public:
CanQualType getDecayedType(CanQualType T) const {
return CanQualType::CreateUnsafe(getDecayedType((QualType) T));
}
+ /// Return the uniqued reference to a specified decay from the original
+ /// type to the decayed type.
+ QualType getDecayedType(QualType Orig, QualType Decayed) const;
/// Return the uniqued reference to the atomic type for the specified
/// type.
@@ -1322,13 +1375,13 @@ public:
/// Return a write_only pipe type for the specified type.
QualType getWritePipeType(QualType T) const;
- /// Return an extended integer type with the specified signedness and bit
+ /// Return a bit-precise integer type with the specified signedness and bit
/// count.
- QualType getExtIntType(bool Unsigned, unsigned NumBits) const;
+ QualType getBitIntType(bool Unsigned, unsigned NumBits) const;
- /// Return a dependent extended integer type with the specified signedness and
- /// bit count.
- QualType getDependentExtIntType(bool Unsigned, Expr *BitsExpr) const;
+ /// Return a dependent bit-precise integer type with the specified signedness
+ /// and bit count.
+ QualType getDependentBitIntType(bool Unsigned, Expr *BitsExpr) const;
/// Gets the struct used to keep track of the extended descriptor for
/// pointer to blocks.
@@ -1340,6 +1393,12 @@ public:
/// Get address space for OpenCL type.
LangAS getOpenCLTypeAddrSpace(const Type *T) const;
+ /// Returns default address space based on OpenCL version and enabled features
+ inline LangAS getDefaultOpenCLPointeeAddrSpace() {
+ return LangOpts.OpenCLGenericAddressSpace ? LangAS::opencl_generic
+ : LangAS::opencl_private;
+ }
+
void setcudaConfigureCallDecl(FunctionDecl *FD) {
cudaConfigureCallDecl = FD;
}
@@ -1376,8 +1435,7 @@ public:
/// Return a non-unique reference to the type for a variable array of
/// the specified element type.
QualType getVariableArrayType(QualType EltTy, Expr *NumElts,
- ArrayType::ArraySizeModifier ASM,
- unsigned IndexTypeQuals,
+ ArraySizeModifier ASM, unsigned IndexTypeQuals,
SourceRange Brackets) const;
/// Return a non-unique reference to the type for a dependently-sized
@@ -1386,21 +1444,19 @@ public:
/// FIXME: We will need these to be uniqued, or at least comparable, at some
/// point.
QualType getDependentSizedArrayType(QualType EltTy, Expr *NumElts,
- ArrayType::ArraySizeModifier ASM,
+ ArraySizeModifier ASM,
unsigned IndexTypeQuals,
SourceRange Brackets) const;
/// Return a unique reference to the type for an incomplete array of
/// the specified element type.
- QualType getIncompleteArrayType(QualType EltTy,
- ArrayType::ArraySizeModifier ASM,
+ QualType getIncompleteArrayType(QualType EltTy, ArraySizeModifier ASM,
unsigned IndexTypeQuals) const;
/// Return the unique reference to the type for a constant array of
/// the specified element type.
QualType getConstantArrayType(QualType EltTy, const llvm::APInt &ArySize,
- const Expr *SizeExpr,
- ArrayType::ArraySizeModifier ASM,
+ const Expr *SizeExpr, ArraySizeModifier ASM,
unsigned IndexTypeQuals) const;
/// Return a type for a constant array for a string literal of the
@@ -1427,21 +1483,27 @@ public:
/// Return the unique reference to a scalable vector type of the specified
/// element type and scalable number of elements.
+ /// For RISC-V, number of fields is also provided when it fetching for
+ /// tuple type.
///
/// \pre \p EltTy must be a built-in type.
- QualType getScalableVectorType(QualType EltTy, unsigned NumElts) const;
+ QualType getScalableVectorType(QualType EltTy, unsigned NumElts,
+ unsigned NumFields = 1) const;
+
+ /// Return a WebAssembly externref type.
+ QualType getWebAssemblyExternrefType() const;
/// Return the unique reference to a vector type of the specified
/// element type and size.
///
/// \pre \p VectorType must be a built-in type.
QualType getVectorType(QualType VectorType, unsigned NumElts,
- VectorType::VectorKind VecKind) const;
+ VectorKind VecKind) const;
/// Return the unique reference to the type for a dependently sized vector of
/// the specified element type.
QualType getDependentVectorType(QualType VectorType, Expr *SizeExpr,
SourceLocation AttrLoc,
- VectorType::VectorKind VecKind) const;
+ VectorKind VecKind) const;
/// Return the unique reference to an extended vector type
/// of the specified element type and size.
@@ -1497,6 +1559,12 @@ private:
QualType getFunctionTypeInternal(QualType ResultTy, ArrayRef<QualType> Args,
const FunctionProtoType::ExtProtoInfo &EPI,
bool OnlyWantCanonical) const;
+ QualType
+ getAutoTypeInternal(QualType DeducedType, AutoTypeKeyword Keyword,
+ bool IsDependent, bool IsPack = false,
+ ConceptDecl *TypeConstraintConcept = nullptr,
+ ArrayRef<TemplateArgument> TypeConstraintArgs = {},
+ bool IsCanon = false) const;
public:
/// Return the unique reference to the type for the specified type
@@ -1515,6 +1583,9 @@ public:
return getTypeDeclTypeSlow(Decl);
}
+ QualType getUsingType(const UsingShadowDecl *Found,
+ QualType Underlying) const;
+
/// Return the unique reference to the type for the specified
/// typedef-name decl.
QualType getTypedefType(const TypedefNameDecl *Decl,
@@ -1524,16 +1595,23 @@ public:
QualType getEnumType(const EnumDecl *Decl) const;
+ QualType
+ getUnresolvedUsingType(const UnresolvedUsingTypenameDecl *Decl) const;
+
QualType getInjectedClassNameType(CXXRecordDecl *Decl, QualType TST) const;
- QualType getAttributedType(attr::Kind attrKind,
- QualType modifiedType,
- QualType equivalentType);
+ QualType getAttributedType(attr::Kind attrKind, QualType modifiedType,
+ QualType equivalentType) const;
- QualType getSubstTemplateTypeParmType(const TemplateTypeParmType *Replaced,
- QualType Replacement) const;
- QualType getSubstTemplateTypeParmPackType(
- const TemplateTypeParmType *Replaced,
+ QualType getBTFTagAttributedType(const BTFTypeTagAttr *BTFAttr,
+ QualType Wrapped);
+
+ QualType
+ getSubstTemplateTypeParmType(QualType Replacement, Decl *AssociatedDecl,
+ unsigned Index,
+ std::optional<unsigned> PackIndex) const;
+ QualType getSubstTemplateTypeParmPackType(Decl *AssociatedDecl,
+ unsigned Index, bool Final,
const TemplateArgument &ArgPack);
QualType
@@ -1550,7 +1628,7 @@ public:
ArrayRef<TemplateArgument> Args) const;
QualType getTemplateSpecializationType(TemplateName T,
- const TemplateArgumentListInfo &Args,
+ ArrayRef<TemplateArgumentLoc> Args,
QualType Canon = QualType()) const;
TypeSourceInfo *
@@ -1571,10 +1649,9 @@ public:
const IdentifierInfo *Name,
QualType Canon = QualType()) const;
- QualType getDependentTemplateSpecializationType(ElaboratedTypeKeyword Keyword,
- NestedNameSpecifier *NNS,
- const IdentifierInfo *Name,
- const TemplateArgumentListInfo &Args) const;
+ QualType getDependentTemplateSpecializationType(
+ ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS,
+ const IdentifierInfo *Name, ArrayRef<TemplateArgumentLoc> Args) const;
QualType getDependentTemplateSpecializationType(
ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS,
const IdentifierInfo *Name, ArrayRef<TemplateArgument> Args) const;
@@ -1595,7 +1672,7 @@ public:
/// elsewhere, such as if the pattern contains a placeholder type or
/// if this is the canonical type of another pack expansion type.
QualType getPackExpansionType(QualType Pattern,
- Optional<unsigned> NumExpansions,
+ std::optional<unsigned> NumExpansions,
bool ExpectPackInType = true);
QualType getObjCInterfaceType(const ObjCInterfaceDecl *Decl,
@@ -1627,9 +1704,11 @@ public:
/// Return a ObjCObjectPointerType type for the given ObjCObjectType.
QualType getObjCObjectPointerType(QualType OIT) const;
- /// GCC extension.
- QualType getTypeOfExprType(Expr *e) const;
- QualType getTypeOfType(QualType t) const;
+ /// C23 feature and GCC extension.
+ QualType getTypeOfExprType(Expr *E, TypeOfKind Kind) const;
+ QualType getTypeOfType(QualType QT, TypeOfKind Kind) const;
+
+ QualType getReferenceQualifiedType(const Expr *e) const;
/// C++11 decltype.
QualType getDecltypeType(Expr *e, QualType UnderlyingType) const;
@@ -1650,6 +1729,10 @@ public:
/// C++11 deduction pattern for 'auto &&' type.
QualType getAutoRRefDeductType() const;
+ /// Remove any type constraints from a template parameter type, for
+ /// equivalence comparison of template parameters.
+ QualType getUnconstrainedType(QualType T) const;
+
/// C++17 deduced class template specialization type.
QualType getDeducedTemplateSpecializationType(TemplateName Template,
QualType DeducedType,
@@ -2106,16 +2189,20 @@ public:
TemplateName getQualifiedTemplateName(NestedNameSpecifier *NNS,
bool TemplateKeyword,
- TemplateDecl *Template) const;
+ TemplateName Template) const;
TemplateName getDependentTemplateName(NestedNameSpecifier *NNS,
const IdentifierInfo *Name) const;
TemplateName getDependentTemplateName(NestedNameSpecifier *NNS,
OverloadedOperatorKind Operator) const;
- TemplateName getSubstTemplateTemplateParm(TemplateTemplateParmDecl *param,
- TemplateName replacement) const;
- TemplateName getSubstTemplateTemplateParmPack(TemplateTemplateParmDecl *Param,
- const TemplateArgument &ArgPack) const;
+ TemplateName
+ getSubstTemplateTemplateParm(TemplateName replacement, Decl *AssociatedDecl,
+ unsigned Index,
+ std::optional<unsigned> PackIndex) const;
+ TemplateName getSubstTemplateTemplateParmPack(const TemplateArgument &ArgPack,
+ Decl *AssociatedDecl,
+ unsigned Index,
+ bool Final) const;
enum GetBuiltinTypeError {
/// No error
@@ -2179,6 +2266,17 @@ public:
/// false otherwise.
bool areLaxCompatibleSveTypes(QualType FirstType, QualType SecondType);
+ /// Return true if the given types are an RISC-V vector builtin type and a
+ /// VectorType that is a fixed-length representation of the RISC-V vector
+ /// builtin type for a specific vector-length.
+ bool areCompatibleRVVTypes(QualType FirstType, QualType SecondType);
+
+ /// Return true if the given vector types are lax-compatible RISC-V vector
+ /// types as defined by -flax-vector-conversions=, which permits implicit
+ /// conversions between vectors with different number of elements and/or
+ /// incompatible element types, false otherwise.
+ bool areLaxCompatibleRVVTypes(QualType FirstType, QualType SecondType);
+
/// Return true if the type has been explicitly qualified with ObjC ownership.
/// A type may be implicitly qualified with ownership under ObjC ARC, and in
/// some cases the compiler treats these differently.
@@ -2225,13 +2323,13 @@ public:
CharUnits getTypeSizeInChars(QualType T) const;
CharUnits getTypeSizeInChars(const Type *T) const;
- Optional<CharUnits> getTypeSizeInCharsIfKnown(QualType Ty) const {
+ std::optional<CharUnits> getTypeSizeInCharsIfKnown(QualType Ty) const {
if (Ty->isIncompleteType() || Ty->isDependentType())
- return None;
+ return std::nullopt;
return getTypeSizeInChars(Ty);
}
- Optional<CharUnits> getTypeSizeInCharsIfKnown(const Type *Ty) const {
+ std::optional<CharUnits> getTypeSizeInCharsIfKnown(const Type *Ty) const {
return getTypeSizeInCharsIfKnown(QualType(Ty, 0));
}
@@ -2287,6 +2385,9 @@ public:
bool isAlignmentRequired(const Type *T) const;
bool isAlignmentRequired(QualType T) const;
+ /// More type predicates useful for type checking/promotion
+ bool isPromotableIntegerType(QualType T) const; // C99 6.3.1.1p2
+
/// Return the "preferred" alignment of the specified type \p T for
/// the current target, in bits.
///
@@ -2415,7 +2516,9 @@ public:
/// Return true if the specified type has unique object representations
/// according to (C++17 [meta.unary.prop]p9)
- bool hasUniqueObjectRepresentations(QualType Ty) const;
+ bool
+ hasUniqueObjectRepresentations(QualType Ty,
+ bool CheckIfTriviallyCopyable = true) const;
//===--------------------------------------------------------------------===//
// Type Operators
@@ -2452,6 +2555,9 @@ public:
return getCanonicalType(T1) == getCanonicalType(T2);
}
+ /// Determine whether the given expressions \p X and \p Y are equivalent.
+ bool hasSameExpr(const Expr *X, const Expr *Y) const;
+
/// Return this type as a completely-unqualified array type,
/// capturing the qualifiers in \p Quals.
///
@@ -2476,9 +2582,9 @@ public:
bool hasSameNullabilityTypeQualifier(QualType SubT, QualType SuperT,
bool IsParam) const {
- auto SubTnullability = SubT->getNullability(*this);
- auto SuperTnullability = SuperT->getNullability(*this);
- if (SubTnullability.hasValue() == SuperTnullability.hasValue()) {
+ auto SubTnullability = SubT->getNullability();
+ auto SuperTnullability = SuperT->getNullability();
+ if (SubTnullability.has_value() == SuperTnullability.has_value()) {
// Neither has nullability; return true
if (!SubTnullability)
return true;
@@ -2505,8 +2611,10 @@ public:
bool ObjCMethodsAreEqual(const ObjCMethodDecl *MethodDecl,
const ObjCMethodDecl *MethodImp);
- bool UnwrapSimilarTypes(QualType &T1, QualType &T2);
- void UnwrapSimilarArrayTypes(QualType &T1, QualType &T2);
+ bool UnwrapSimilarTypes(QualType &T1, QualType &T2,
+ bool AllowPiMismatch = true);
+ void UnwrapSimilarArrayTypes(QualType &T1, QualType &T2,
+ bool AllowPiMismatch = true);
/// Determine if two types are similar, according to the C++ rules. That is,
/// determine if they are the same other than qualifiers on the initial
@@ -2569,11 +2677,40 @@ public:
/// template name uses the shortest form of the dependent
/// nested-name-specifier, which itself contains all canonical
/// types, values, and templates.
- TemplateName getCanonicalTemplateName(TemplateName Name) const;
+ TemplateName getCanonicalTemplateName(const TemplateName &Name) const;
/// Determine whether the given template names refer to the same
/// template.
- bool hasSameTemplateName(TemplateName X, TemplateName Y);
+ bool hasSameTemplateName(const TemplateName &X, const TemplateName &Y) const;
+
+ /// Determine whether the two declarations refer to the same entity.
+ bool isSameEntity(const NamedDecl *X, const NamedDecl *Y) const;
+
+ /// Determine whether two template parameter lists are similar enough
+ /// that they may be used in declarations of the same template.
+ bool isSameTemplateParameterList(const TemplateParameterList *X,
+ const TemplateParameterList *Y) const;
+
+ /// Determine whether two template parameters are similar enough
+ /// that they may be used in declarations of the same template.
+ bool isSameTemplateParameter(const NamedDecl *X, const NamedDecl *Y) const;
+
+ /// Determine whether two 'requires' expressions are similar enough that they
+ /// may be used in re-declarations.
+ ///
+ /// Use of 'requires' isn't mandatory, works with constraints expressed in
+ /// other ways too.
+ bool isSameConstraintExpr(const Expr *XCE, const Expr *YCE) const;
+
+ /// Determine whether two type contraint are similar enough that they could
+ /// used in declarations of the same template.
+ bool isSameTypeConstraint(const TypeConstraint *XTC,
+ const TypeConstraint *YTC) const;
+
+ /// Determine whether two default template arguments are similar enough
+ /// that they may be used in declarations of the same template.
+ bool isSameDefaultTemplateArgument(const NamedDecl *X,
+ const NamedDecl *Y) const;
/// Retrieve the "canonical" template argument.
///
@@ -2614,6 +2751,10 @@ public:
/// Return number of constant array elements.
uint64_t getConstantArrayElementCount(const ConstantArrayType *CA) const;
+ /// Return number of elements initialized in an ArrayInitLoopExpr.
+ uint64_t
+ getArrayInitLoopExprElementCount(const ArrayInitLoopExpr *AILE) const;
+
/// Perform adjustment on the parameter type of a function.
///
/// This routine adjusts the given parameter type @p T to the actual
@@ -2671,22 +2812,6 @@ public:
/// long double and double on AArch64 will return 0).
int getFloatingTypeSemanticOrder(QualType LHS, QualType RHS) const;
- /// Return a real floating point or a complex type (based on
- /// \p typeDomain/\p typeSize).
- ///
- /// \param typeDomain a real floating point or complex type.
- /// \param typeSize a real floating point or complex type.
- QualType getFloatingTypeOfSizeWithinDomain(QualType typeSize,
- QualType typeDomain) const;
-
- unsigned getTargetAddressSpace(QualType T) const {
- return getTargetAddressSpace(T.getQualifiers());
- }
-
- unsigned getTargetAddressSpace(Qualifiers Q) const {
- return getTargetAddressSpace(Q.getAddressSpace());
- }
-
unsigned getTargetAddressSpace(LangAS AS) const;
LangAS getLangASForBuiltinAddressSpace(unsigned AS) const;
@@ -2699,6 +2824,23 @@ public:
return AddrSpaceMapMangling || isTargetAddressSpace(AS);
}
+ // Merges two exception specifications, such that the resulting
+ // exception spec is the union of both. For example, if either
+ // of them can throw something, the result can throw it as well.
+ FunctionProtoType::ExceptionSpecInfo
+ mergeExceptionSpecs(FunctionProtoType::ExceptionSpecInfo ESI1,
+ FunctionProtoType::ExceptionSpecInfo ESI2,
+ SmallVectorImpl<QualType> &ExceptionTypeStorage,
+ bool AcceptDependent);
+
+ // For two "same" types, return a type which has
+ // the common sugar between them. If Unqualified is true,
+ // both types need only be the same unqualified type.
+ // The result will drop the qualifiers which do not occur
+ // in both types.
+ QualType getCommonSugaredType(QualType X, QualType Y,
+ bool Unqualified = false);
+
private:
// Helper for integer ordering
unsigned getIntegerRank(const Type *T) const;
@@ -2716,14 +2858,20 @@ public:
bool typesAreBlockPointerCompatible(QualType, QualType);
bool isObjCIdType(QualType T) const {
+ if (const auto *ET = dyn_cast<ElaboratedType>(T))
+ T = ET->getNamedType();
return T == getObjCIdType();
}
bool isObjCClassType(QualType T) const {
+ if (const auto *ET = dyn_cast<ElaboratedType>(T))
+ T = ET->getNamedType();
return T == getObjCClassType();
}
bool isObjCSelType(QualType T) const {
+ if (const auto *ET = dyn_cast<ElaboratedType>(T))
+ T = ET->getNamedType();
return T == getObjCSelType();
}
@@ -2749,10 +2897,12 @@ public:
bool canBindObjCObjectType(QualType To, QualType From);
// Functions for calculating composite types
- QualType mergeTypes(QualType, QualType, bool OfBlockPointer=false,
- bool Unqualified = false, bool BlockReturnType = false);
- QualType mergeFunctionTypes(QualType, QualType, bool OfBlockPointer=false,
- bool Unqualified = false, bool AllowCXX = false);
+ QualType mergeTypes(QualType, QualType, bool OfBlockPointer = false,
+ bool Unqualified = false, bool BlockReturnType = false,
+ bool IsConditionalOperator = false);
+ QualType mergeFunctionTypes(QualType, QualType, bool OfBlockPointer = false,
+ bool Unqualified = false, bool AllowCXX = false,
+ bool IsConditionalOperator = false);
QualType mergeFunctionParameterTypes(QualType, QualType,
bool OfBlockPointer = false,
bool Unqualified = false);
@@ -2923,7 +3073,7 @@ public:
}
GVALinkage GetGVALinkageForFunction(const FunctionDecl *FD) const;
- GVALinkage GetGVALinkageForVariable(const VarDecl *VD);
+ GVALinkage GetGVALinkageForVariable(const VarDecl *VD) const;
/// Determines if the decl can be CodeGen'ed or deserialized from PCH
/// lazily, only when used; this is only relevant for function or file scoped
@@ -2954,7 +3104,8 @@ public:
DeclaratorDecl *getDeclaratorForUnnamedTagDecl(const TagDecl *TD);
void setManglingNumber(const NamedDecl *ND, unsigned Number);
- unsigned getManglingNumber(const NamedDecl *ND) const;
+ unsigned getManglingNumber(const NamedDecl *ND,
+ bool ForAuxTarget = false) const;
void setStaticLocalNumber(const VarDecl *VD, unsigned Number);
unsigned getStaticLocalNumber(const VarDecl *VD) const;
@@ -2985,6 +3136,11 @@ public:
/// GUID value.
MSGuidDecl *getMSGuidDecl(MSGuidDeclParts Parts) const;
+ /// Return a declaration for a uniquified anonymous global constant
+ /// corresponding to a given APValue.
+ UnnamedGlobalConstantDecl *
+ getUnnamedGlobalConstantDecl(QualType Ty, const APValue &Value) const;
+
/// Return the template parameter object of the given type with the given
/// value.
TemplateParamObjectDecl *getTemplateParamObjectDecl(QualType T,
@@ -2994,6 +3150,9 @@ public:
/// valid feature names.
ParsedTargetAttr filterFunctionTargetAttrs(const TargetAttr *TD) const;
+ std::vector<std::string>
+ filterFunctionTargetVersionAttrs(const TargetVersionAttr *TV) const;
+
void getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap,
const FunctionDecl *) const;
void getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap,
@@ -3065,7 +3224,6 @@ private:
public:
ObjCEncOptions() : Bits(0) {}
- ObjCEncOptions(const ObjCEncOptions &RHS) : Bits(RHS.Bits) {}
#define OPT_LIST(V) \
V(ExpandPointedToStructures, 0) \
@@ -3086,11 +3244,11 @@ OPT_LIST(V)
#undef OPT_LIST
- LLVM_NODISCARD ObjCEncOptions keepingOnly(ObjCEncOptions Mask) const {
+ [[nodiscard]] ObjCEncOptions keepingOnly(ObjCEncOptions Mask) const {
return Bits & Mask.Bits;
}
- LLVM_NODISCARD ObjCEncOptions forComponentType() const {
+ [[nodiscard]] ObjCEncOptions forComponentType() const {
ObjCEncOptions Mask = ObjCEncOptions()
.setIsOutermostType()
.setIsStructField();
@@ -3201,39 +3359,18 @@ public:
/// Return a new OMPTraitInfo object owned by this context.
OMPTraitInfo &getNewOMPTraitInfo();
- /// Whether a C++ static variable may be externalized.
- bool mayExternalizeStaticVar(const Decl *D) const;
+ /// Whether a C++ static variable or CUDA/HIP kernel may be externalized.
+ bool mayExternalize(const Decl *D) const;
- /// Whether a C++ static variable should be externalized.
- bool shouldExternalizeStaticVar(const Decl *D) const;
+ /// Whether a C++ static variable or CUDA/HIP kernel should be externalized.
+ bool shouldExternalize(const Decl *D) const;
StringRef getCUIDHash() const;
- void AddSYCLKernelNamingDecl(const CXXRecordDecl *RD);
- bool IsSYCLKernelNamingDecl(const NamedDecl *RD) const;
- unsigned GetSYCLKernelNamingIndex(const NamedDecl *RD);
- /// A SourceLocation to store whether we have evaluated a kernel name already,
- /// and where it happened. If so, we need to diagnose an illegal use of the
- /// builtin.
- llvm::MapVector<const SYCLUniqueStableNameExpr *, std::string>
- SYCLUniqueStableNameEvaluatedValues;
-
private:
/// All OMPTraitInfo objects live in this collection, one per
/// `pragma omp [begin] declare variant` directive.
SmallVector<std::unique_ptr<OMPTraitInfo>, 4> OMPTraitInfoVector;
-
- /// A list of the (right now just lambda decls) declarations required to
- /// name all the SYCL kernels in the translation unit, so that we can get the
- /// correct kernel name, as well as implement
- /// __builtin_sycl_unique_stable_name.
- llvm::DenseMap<const DeclContext *,
- llvm::SmallPtrSet<const CXXRecordDecl *, 4>>
- SYCLKernelNamingTypes;
- std::unique_ptr<ItaniumMangleContext> SYCLKernelFilterContext;
- void FilterSYCLKernelNamingDecls(
- const CXXRecordDecl *RD,
- llvm::SmallVectorImpl<const CXXRecordDecl *> &Decls);
};
/// Insertion operator for diagnostics.
diff --git a/contrib/llvm-project/clang/include/clang/AST/ASTDiagnostic.h b/contrib/llvm-project/clang/include/clang/AST/ASTDiagnostic.h
index d6549e12d92a..ef2224982862 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ASTDiagnostic.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ASTDiagnostic.h
@@ -9,6 +9,7 @@
#ifndef LLVM_CLANG_AST_ASTDIAGNOSTIC_H
#define LLVM_CLANG_AST_ASTDIAGNOSTIC_H
+#include "clang/AST/Type.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/DiagnosticAST.h"
@@ -31,6 +32,12 @@ namespace clang {
SmallVectorImpl<char> &Output,
void *Cookie,
ArrayRef<intptr_t> QualTypeVals);
+
+ /// Returns a desugared version of the QualType, and marks ShouldAKA as true
+ /// whenever we remove significant sugar from the type. Make sure ShouldAKA
+ /// is initialized before passing it in.
+ QualType desugarForDiagnostic(ASTContext &Context, QualType QT,
+ bool &ShouldAKA);
} // end namespace clang
#endif
diff --git a/contrib/llvm-project/clang/include/clang/AST/ASTDumper.h b/contrib/llvm-project/clang/include/clang/AST/ASTDumper.h
index a154bc2db3a7..71ac467e5104 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ASTDumper.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ASTDumper.h
@@ -32,6 +32,7 @@ public:
TextNodeDumper &doGetNodeDelegate() { return NodeDumper; }
+ void dumpInvalidDeclContext(const DeclContext *DC);
void dumpLookups(const DeclContext *DC, bool DumpDecls);
template <typename SpecializationDecl>
diff --git a/contrib/llvm-project/clang/include/clang/AST/ASTFwd.h b/contrib/llvm-project/clang/include/clang/AST/ASTFwd.h
index 649b57113424..8823663386ea 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ASTFwd.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ASTFwd.h
@@ -30,6 +30,11 @@ class OMPClause;
#define GEN_CLANG_CLAUSE_CLASS
#define CLAUSE_CLASS(Enum, Str, Class) class Class;
#include "llvm/Frontend/OpenMP/OMP.inc"
+class Attr;
+#define ATTR(A) class A##Attr;
+#include "clang/Basic/AttrList.inc"
+class ObjCProtocolLoc;
+class ConceptReference;
} // end namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/AST/ASTImportError.h b/contrib/llvm-project/clang/include/clang/AST/ASTImportError.h
new file mode 100644
index 000000000000..728314ca0936
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/AST/ASTImportError.h
@@ -0,0 +1,50 @@
+//===- ASTImportError.h - Define errors while importing AST -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ASTImportError class which basically defines the kind
+// of error while importing AST .
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_ASTIMPORTERROR_H
+#define LLVM_CLANG_AST_ASTIMPORTERROR_H
+
+#include "llvm/Support/Error.h"
+
+namespace clang {
+
+class ASTImportError : public llvm::ErrorInfo<ASTImportError> {
+public:
+ /// \brief Kind of error when importing an AST component.
+ enum ErrorKind {
+ NameConflict, /// Naming ambiguity (likely ODR violation).
+ UnsupportedConstruct, /// Not supported node or case.
+ Unknown /// Other error.
+ };
+
+ ErrorKind Error;
+
+ static char ID;
+
+ ASTImportError() : Error(Unknown) {}
+ ASTImportError(const ASTImportError &Other) : Error(Other.Error) {}
+ ASTImportError &operator=(const ASTImportError &Other) {
+ Error = Other.Error;
+ return *this;
+ }
+ ASTImportError(ErrorKind Error) : Error(Error) {}
+
+ std::string toString() const;
+
+ void log(llvm::raw_ostream &OS) const override;
+ std::error_code convertToErrorCode() const override;
+};
+
+} // namespace clang
+
+#endif // LLVM_CLANG_AST_ASTIMPORTERROR_H
diff --git a/contrib/llvm-project/clang/include/clang/AST/ASTImporter.h b/contrib/llvm-project/clang/include/clang/AST/ASTImporter.h
index 17e673a8471a..4ffd91384657 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ASTImporter.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ASTImporter.h
@@ -14,7 +14,7 @@
#ifndef LLVM_CLANG_AST_ASTIMPORTER_H
#define LLVM_CLANG_AST_ASTIMPORTER_H
-#include "clang/AST/APValue.h"
+#include "clang/AST/ASTImportError.h"
#include "clang/AST/DeclBase.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/ExprCXX.h"
@@ -27,9 +27,8 @@
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/Support/Error.h"
+#include <optional>
#include <utility>
namespace clang {
@@ -49,33 +48,6 @@ class TagDecl;
class TranslationUnitDecl;
class TypeSourceInfo;
- class ImportError : public llvm::ErrorInfo<ImportError> {
- public:
- /// \brief Kind of error when importing an AST component.
- enum ErrorKind {
- NameConflict, /// Naming ambiguity (likely ODR violation).
- UnsupportedConstruct, /// Not supported node or case.
- Unknown /// Other error.
- };
-
- ErrorKind Error;
-
- static char ID;
-
- ImportError() : Error(Unknown) {}
- ImportError(const ImportError &Other) : Error(Other.Error) {}
- ImportError &operator=(const ImportError &Other) {
- Error = Other.Error;
- return *this;
- }
- ImportError(ErrorKind Error) : Error(Error) { }
-
- std::string toString() const;
-
- void log(raw_ostream &OS) const override;
- std::error_code convertToErrorCode() const override;
- };
-
// \brief Returns with a list of declarations started from the canonical decl
// then followed by subsequent decls in the translation unit.
// This gives a canonical list for each entry in the redecl chain.
@@ -259,7 +231,7 @@ class TypeSourceInfo;
/// imported. The same declaration may or may not be included in
/// ImportedDecls. This map is updated continuously during imports and never
/// cleared (like ImportedDecls).
- llvm::DenseMap<Decl *, ImportError> ImportDeclErrors;
+ llvm::DenseMap<Decl *, ASTImportError> ImportDeclErrors;
/// Mapping from the already-imported declarations in the "to"
/// context to the corresponding declarations in the "from" context.
@@ -286,6 +258,7 @@ class TypeSourceInfo;
FoundDeclsTy findDeclsInToCtx(DeclContext *DC, DeclarationName Name);
void AddToLookupTable(Decl *ToD);
+ llvm::Error ImportAttrs(Decl *ToD, Decl *FromD);
protected:
/// Can be overwritten by subclasses to implement their own import logic.
@@ -332,7 +305,7 @@ class TypeSourceInfo;
/// \param From Object to import.
/// \return Error information (success or error).
template <typename ImportT>
- LLVM_NODISCARD llvm::Error importInto(ImportT &To, const ImportT &From) {
+ [[nodiscard]] llvm::Error importInto(ImportT &To, const ImportT &From) {
auto ToOrErr = Import(From);
if (ToOrErr)
To = *ToOrErr;
@@ -379,6 +352,9 @@ class TypeSourceInfo;
return Import(const_cast<Decl *>(FromD));
}
+ llvm::Expected<InheritedConstructor>
+ Import(const InheritedConstructor &From);
+
/// Return the copy of the given declaration in the "to" context if
/// it has already been imported from the "from" context. Otherwise return
/// nullptr.
@@ -392,7 +368,7 @@ class TypeSourceInfo;
/// in the "to" context was imported. If it was not imported or of the wrong
/// type a null value is returned.
template <typename DeclT>
- llvm::Optional<DeclT *> getImportedFromDecl(const DeclT *ToD) const {
+ std::optional<DeclT *> getImportedFromDecl(const DeclT *ToD) const {
auto FromI = ImportedFromDecls.find(ToD);
if (FromI == ImportedFromDecls.end())
return {};
@@ -507,7 +483,7 @@ class TypeSourceInfo;
/// Import the definition of the given declaration, including all of
/// the declarations it contains.
- LLVM_NODISCARD llvm::Error ImportDefinition(Decl *From);
+ [[nodiscard]] llvm::Error ImportDefinition(Decl *From);
/// Cope with a name conflict when importing a declaration into the
/// given context.
@@ -589,10 +565,10 @@ class TypeSourceInfo;
/// Return if import of the given declaration has failed and if yes
/// the kind of the problem. This gives the first error encountered with
/// the node.
- llvm::Optional<ImportError> getImportDeclErrorIfAny(Decl *FromD) const;
+ std::optional<ASTImportError> getImportDeclErrorIfAny(Decl *FromD) const;
/// Mark (newly) imported declaration with error.
- void setImportDeclError(Decl *From, ImportError Error);
+ void setImportDeclError(Decl *From, ASTImportError Error);
/// Determine whether the given types are structurally
/// equivalent.
@@ -602,8 +578,8 @@ class TypeSourceInfo;
/// Determine the index of a field in its parent record.
/// F should be a field (or indirect field) declaration.
/// \returns The index of the field in its parent context (starting from 0).
- /// On error `None` is returned (parent context is non-record).
- static llvm::Optional<unsigned> getFieldIndex(Decl *F);
+ /// On error `std::nullopt` is returned (parent context is non-record).
+ static std::optional<unsigned> getFieldIndex(Decl *F);
};
} // namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/AST/ASTImporterLookupTable.h b/contrib/llvm-project/clang/include/clang/AST/ASTImporterLookupTable.h
index 47dca2033839..2dbc44c5dcd4 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ASTImporterLookupTable.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ASTImporterLookupTable.h
@@ -21,7 +21,6 @@
namespace clang {
-class ASTContext;
class NamedDecl;
class DeclContext;
@@ -75,6 +74,10 @@ public:
// The function should be called when the old context is definitely different
// from the new.
void update(NamedDecl *ND, DeclContext *OldDC);
+ // Same as 'update' but allow if 'ND' is not in the table or the old context
+ // is the same as the new.
+ // FIXME: The old redeclaration context is not handled.
+ void updateForced(NamedDecl *ND, DeclContext *OldDC);
using LookupResult = DeclList;
LookupResult lookup(DeclContext *DC, DeclarationName Name) const;
// Check if the `ND` is within the lookup table (with its current name) in
diff --git a/contrib/llvm-project/clang/include/clang/AST/ASTImporterSharedState.h b/contrib/llvm-project/clang/include/clang/AST/ASTImporterSharedState.h
index 829eb1c611c3..446d7ee61ea5 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ASTImporterSharedState.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ASTImporterSharedState.h
@@ -1,9 +1,8 @@
//===- ASTImporterSharedState.h - ASTImporter specific state --*- C++ -*---===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -15,11 +14,11 @@
#ifndef LLVM_CLANG_AST_ASTIMPORTERSHAREDSTATE_H
#define LLVM_CLANG_AST_ASTIMPORTERSHAREDSTATE_H
+#include "clang/AST/ASTImportError.h"
#include "clang/AST/ASTImporterLookupTable.h"
#include "clang/AST/Decl.h"
#include "llvm/ADT/DenseMap.h"
-// FIXME We need this because of ImportError.
-#include "clang/AST/ASTImporter.h"
+#include <optional>
namespace clang {
@@ -38,7 +37,10 @@ class ASTImporterSharedState {
/// imported. The same declaration may or may not be included in
/// ImportedFromDecls. This map is updated continuously during imports and
/// never cleared (like ImportedFromDecls).
- llvm::DenseMap<Decl *, ImportError> ImportErrors;
+ llvm::DenseMap<Decl *, ASTImportError> ImportErrors;
+
+ /// Set of the newly created declarations.
+ llvm::DenseSet<Decl *> NewDecls;
// FIXME put ImportedFromDecls here!
// And from that point we can better encapsulate the lookup table.
@@ -64,17 +66,21 @@ public:
LookupTable->remove(ND);
}
- llvm::Optional<ImportError> getImportDeclErrorIfAny(Decl *ToD) const {
+ std::optional<ASTImportError> getImportDeclErrorIfAny(Decl *ToD) const {
auto Pos = ImportErrors.find(ToD);
if (Pos != ImportErrors.end())
return Pos->second;
else
- return Optional<ImportError>();
+ return std::nullopt;
}
- void setImportDeclError(Decl *To, ImportError Error) {
+ void setImportDeclError(Decl *To, ASTImportError Error) {
ImportErrors[To] = Error;
}
+
+ bool isNewDecl(const Decl *ToD) const { return NewDecls.count(ToD); }
+
+ void markAsNewDecl(Decl *ToD) { NewDecls.insert(ToD); }
};
} // namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/AST/ASTLambda.h b/contrib/llvm-project/clang/include/clang/AST/ASTLambda.h
index 6fd82d6af490..646cb574847f 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ASTLambda.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ASTLambda.h
@@ -35,6 +35,21 @@ inline bool isLambdaCallOperator(const DeclContext *DC) {
return isLambdaCallOperator(cast<CXXMethodDecl>(DC));
}
+inline bool isLambdaCallWithExplicitObjectParameter(const DeclContext *DC) {
+ return isLambdaCallOperator(DC) &&
+ cast<CXXMethodDecl>(DC)->isExplicitObjectMemberFunction();
+}
+
+inline bool isLambdaCallWithImplicitObjectParameter(const DeclContext *DC) {
+ return isLambdaCallOperator(DC) &&
+ // FIXME: Checking for a null type is not great
+ // but lambdas with invalid captures or whose closure parameter list
+ // have not fully been parsed may have a call operator whose type is
+ // null.
+ !cast<CXXMethodDecl>(DC)->getType().isNull() &&
+ !cast<CXXMethodDecl>(DC)->isExplicitObjectMemberFunction();
+}
+
inline bool isGenericLambdaCallOperatorSpecialization(const CXXMethodDecl *MD) {
if (!MD) return false;
const CXXRecordDecl *LambdaClass = MD->getParent();
@@ -65,8 +80,8 @@ inline bool isGenericLambdaCallOperatorSpecialization(DeclContext *DC) {
}
inline bool isGenericLambdaCallOperatorOrStaticInvokerSpecialization(
- DeclContext *DC) {
- CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(DC);
+ const DeclContext *DC) {
+ const auto *MD = dyn_cast<CXXMethodDecl>(DC);
if (!MD) return false;
const CXXRecordDecl *LambdaClass = MD->getParent();
if (LambdaClass && LambdaClass->isGenericLambda())
@@ -75,7 +90,6 @@ inline bool isGenericLambdaCallOperatorOrStaticInvokerSpecialization(
return false;
}
-
// This returns the parent DeclContext ensuring that the correct
// parent DeclContext is returned for Lambdas
inline DeclContext *getLambdaAwareParentOfDeclContext(DeclContext *DC) {
diff --git a/contrib/llvm-project/clang/include/clang/AST/ASTNodeTraverser.h b/contrib/llvm-project/clang/include/clang/AST/ASTNodeTraverser.h
index 18e7f491f222..cc8dab97f8b0 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ASTNodeTraverser.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ASTNodeTraverser.h
@@ -104,7 +104,7 @@ public:
Visit(Comment, Comment);
// Decls within functions are visited by the body.
- if (!isa<FunctionDecl>(*D) && !isa<ObjCMethodDecl>(*D)) {
+ if (!isa<FunctionDecl, ObjCMethodDecl, BlockDecl>(*D)) {
if (Traversal != TK_AsIs) {
if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(D)) {
auto SK = CTSD->getSpecializationKind();
@@ -246,12 +246,16 @@ public:
.getTypeConstraint()
->getImmediatelyDeclaredConstraint());
} else if (auto *NR = dyn_cast<concepts::NestedRequirement>(R)) {
- if (!NR->isSubstitutionFailure())
+ if (!NR->hasInvalidConstraint())
Visit(NR->getConstraintExpr());
}
});
}
+ void Visit(const ConceptReference *R) {
+ getNodeDelegate().AddChild([=] { getNodeDelegate().Visit(R); });
+ }
+
void Visit(const APValue &Value, QualType Ty) {
getNodeDelegate().AddChild([=] { getNodeDelegate().Visit(Value, Ty); });
}
@@ -288,6 +292,8 @@ public:
Visit(C);
else if (const auto *T = N.get<TemplateArgument>())
Visit(*T);
+ else if (const auto *CR = N.get<ConceptReference>())
+ Visit(CR);
}
void dumpDeclContext(const DeclContext *DC) {
@@ -384,18 +390,19 @@ public:
}
void VisitAttributedType(const AttributedType *T) {
// FIXME: AttrKind
- Visit(T->getModifiedType());
+ if (T->getModifiedType() != T->getEquivalentType())
+ Visit(T->getModifiedType());
}
- void VisitSubstTemplateTypeParmType(const SubstTemplateTypeParmType *T) {
- Visit(T->getReplacedParameter());
+ void VisitBTFTagAttributedType(const BTFTagAttributedType *T) {
+ Visit(T->getWrappedType());
}
+ void VisitSubstTemplateTypeParmType(const SubstTemplateTypeParmType *) {}
void
VisitSubstTemplateTypeParmPackType(const SubstTemplateTypeParmPackType *T) {
- Visit(T->getReplacedParameter());
Visit(T->getArgumentPack());
}
void VisitTemplateSpecializationType(const TemplateSpecializationType *T) {
- for (const auto &Arg : *T)
+ for (const auto &Arg : T->template_arguments())
Visit(Arg);
}
void VisitObjCObjectPointerType(const ObjCObjectPointerType *T) {
@@ -419,8 +426,12 @@ public:
}
void VisitFunctionDecl(const FunctionDecl *D) {
- if (const auto *FTSI = D->getTemplateSpecializationInfo())
+ if (FunctionTemplateSpecializationInfo *FTSI =
+ D->getTemplateSpecializationInfo())
dumpTemplateArgumentList(*FTSI->TemplateArguments);
+ else if (DependentFunctionTemplateSpecializationInfo *DFTSI =
+ D->getDependentSpecializationInfo())
+ dumpASTTemplateArgumentListInfo(DFTSI->TemplateArgumentsAsWritten);
if (D->param_begin())
for (const auto *Parameter : D->parameters())
@@ -464,6 +475,10 @@ public:
void VisitBindingDecl(const BindingDecl *D) {
if (Traversal == TK_IgnoreUnlessSpelledInSource)
return;
+
+ if (const auto *V = D->getHoldingVar())
+ Visit(V);
+
if (const auto *E = D->getBinding())
Visit(E);
}
@@ -472,6 +487,8 @@ public:
Visit(D->getAsmString());
}
+ void VisitTopLevelStmtDecl(const TopLevelStmtDecl *D) { Visit(D->getStmt()); }
+
void VisitCapturedDecl(const CapturedDecl *D) { Visit(D->getBody()); }
void VisitOMPThreadPrivateDecl(const OMPThreadPrivateDecl *D) {
@@ -565,11 +582,6 @@ public:
dumpTemplateParameters(D->getTemplateParameters());
}
- void VisitClassScopeFunctionSpecializationDecl(
- const ClassScopeFunctionSpecializationDecl *D) {
- Visit(D->getSpecialization());
- dumpASTTemplateArgumentListInfo(D->getTemplateArgsAsWritten());
- }
void VisitVarTemplateDecl(const VarTemplateDecl *D) { dumpTemplateDecl(D); }
void VisitBuiltinTemplateDecl(const BuiltinTemplateDecl *D) {
@@ -619,7 +631,14 @@ public:
Visit(D->getConstraintExpr());
}
+ void VisitImplicitConceptSpecializationDecl(
+ const ImplicitConceptSpecializationDecl *CSD) {
+ for (const TemplateArgument &Arg : CSD->getTemplateArguments())
+ Visit(Arg);
+ }
+
void VisitConceptSpecializationExpr(const ConceptSpecializationExpr *CSE) {
+ Visit(CSE->getSpecializationDecl());
if (CSE->hasExplicitTemplateArgs())
for (const auto &ArgLoc : CSE->getTemplateArgsAsWritten()->arguments())
dumpTemplateArgumentLoc(ArgLoc);
@@ -631,8 +650,15 @@ public:
}
void VisitFriendDecl(const FriendDecl *D) {
- if (!D->getFriendType())
+ if (D->getFriendType()) {
+ // Traverse any CXXRecordDecl owned by this type, since
+ // it will not be in the parent context:
+ if (auto *ET = D->getFriendType()->getType()->getAs<ElaboratedType>())
+ if (auto *TD = ET->getOwnedTagDecl())
+ Visit(TD);
+ } else {
Visit(D->getFriendDecl());
+ }
}
void VisitObjCMethodDecl(const ObjCMethodDecl *D) {
@@ -697,6 +723,12 @@ public:
}
}
+ void VisitCXXParenListInitExpr(const CXXParenListInitExpr *PLIE) {
+ if (auto *Filler = PLIE->getArrayFiller()) {
+ Visit(Filler, "array_filler");
+ }
+ }
+
void VisitBlockExpr(const BlockExpr *Node) { Visit(Node->getBlockDecl()); }
void VisitOpaqueValueExpr(const OpaqueValueExpr *Node) {
@@ -705,8 +737,11 @@ public:
}
void VisitGenericSelectionExpr(const GenericSelectionExpr *E) {
- Visit(E->getControllingExpr());
- Visit(E->getControllingExpr()->getType()); // FIXME: remove
+ if (E->isExprPredicate()) {
+ Visit(E->getControllingExpr());
+ Visit(E->getControllingExpr()->getType()); // FIXME: remove
+ } else
+ Visit(E->getControllingType()->getType());
for (const auto Assoc : E->associations()) {
Visit(Assoc);
diff --git a/contrib/llvm-project/clang/include/clang/AST/ASTStructuralEquivalence.h b/contrib/llvm-project/clang/include/clang/AST/ASTStructuralEquivalence.h
index c958a16aba21..029439c8e9a3 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ASTStructuralEquivalence.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ASTStructuralEquivalence.h
@@ -17,7 +17,7 @@
#include "clang/AST/DeclBase.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
-#include "llvm/ADT/Optional.h"
+#include <optional>
#include <queue>
#include <utility>
@@ -69,15 +69,19 @@ struct StructuralEquivalenceContext {
/// \c true if the last diagnostic came from ToCtx.
bool LastDiagFromC2 = false;
+ /// Whether to ignore comparing the depth of template param(TemplateTypeParm)
+ bool IgnoreTemplateParmDepth;
+
StructuralEquivalenceContext(
ASTContext &FromCtx, ASTContext &ToCtx,
llvm::DenseSet<std::pair<Decl *, Decl *>> &NonEquivalentDecls,
- StructuralEquivalenceKind EqKind,
- bool StrictTypeSpelling = false, bool Complain = true,
- bool ErrorOnTagTypeMismatch = false)
+ StructuralEquivalenceKind EqKind, bool StrictTypeSpelling = false,
+ bool Complain = true, bool ErrorOnTagTypeMismatch = false,
+ bool IgnoreTemplateParmDepth = false)
: FromCtx(FromCtx), ToCtx(ToCtx), NonEquivalentDecls(NonEquivalentDecls),
EqKind(EqKind), StrictTypeSpelling(StrictTypeSpelling),
- ErrorOnTagTypeMismatch(ErrorOnTagTypeMismatch), Complain(Complain) {}
+ ErrorOnTagTypeMismatch(ErrorOnTagTypeMismatch), Complain(Complain),
+ IgnoreTemplateParmDepth(IgnoreTemplateParmDepth) {}
DiagnosticBuilder Diag1(SourceLocation Loc, unsigned DiagID);
DiagnosticBuilder Diag2(SourceLocation Loc, unsigned DiagID);
@@ -114,10 +118,10 @@ struct StructuralEquivalenceContext {
///
/// FIXME: This is needed by ASTImporter and ASTStructureEquivalence. It
/// probably makes more sense in some other common place then here.
- static llvm::Optional<unsigned>
+ static std::optional<unsigned>
findUntaggedStructOrUnionIndex(RecordDecl *Anon);
- // If ErrorOnTagTypeMismatch is set, return the the error, otherwise get the
+ // If ErrorOnTagTypeMismatch is set, return the error, otherwise get the
// relevant warning for the input error diagnostic.
unsigned getApplicableDiagnostic(unsigned ErrorDiagnostic);
diff --git a/contrib/llvm-project/clang/include/clang/AST/ASTTypeTraits.h b/contrib/llvm-project/clang/include/clang/AST/ASTTypeTraits.h
index 57195a9d6066..3988a15971db 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ASTTypeTraits.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ASTTypeTraits.h
@@ -17,6 +17,7 @@
#include "clang/AST/ASTFwd.h"
#include "clang/AST/DeclCXX.h"
+#include "clang/AST/LambdaCapture.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/TemplateBase.h"
#include "clang/AST/TypeLoc.h"
@@ -25,10 +26,8 @@
#include "llvm/Support/AlignOf.h"
namespace llvm {
-
class raw_ostream;
-
-}
+} // namespace llvm
namespace clang {
@@ -52,11 +51,10 @@ enum TraversalKind {
class ASTNodeKind {
public:
/// Empty identifier. It matches nothing.
- ASTNodeKind() : KindId(NKI_None) {}
+ constexpr ASTNodeKind() : KindId(NKI_None) {}
/// Construct an identifier for T.
- template <class T>
- static ASTNodeKind getFromNodeKind() {
+ template <class T> static constexpr ASTNodeKind getFromNodeKind() {
return ASTNodeKind(KindToKindId<T>::Id);
}
@@ -65,27 +63,33 @@ public:
static ASTNodeKind getFromNode(const Decl &D);
static ASTNodeKind getFromNode(const Stmt &S);
static ASTNodeKind getFromNode(const Type &T);
+ static ASTNodeKind getFromNode(const TypeLoc &T);
+ static ASTNodeKind getFromNode(const LambdaCapture &L);
static ASTNodeKind getFromNode(const OMPClause &C);
+ static ASTNodeKind getFromNode(const Attr &A);
/// \}
/// Returns \c true if \c this and \c Other represent the same kind.
- bool isSame(ASTNodeKind Other) const {
+ constexpr bool isSame(ASTNodeKind Other) const {
return KindId != NKI_None && KindId == Other.KindId;
}
/// Returns \c true only for the default \c ASTNodeKind()
- bool isNone() const { return KindId == NKI_None; }
+ constexpr bool isNone() const { return KindId == NKI_None; }
+
+ /// Returns \c true if \c this is a base kind of (or same as) \c Other.
+ bool isBaseOf(ASTNodeKind Other) const;
/// Returns \c true if \c this is a base kind of (or same as) \c Other.
/// \param Distance If non-null, used to return the distance between \c this
/// and \c Other in the class hierarchy.
- bool isBaseOf(ASTNodeKind Other, unsigned *Distance = nullptr) const;
+ bool isBaseOf(ASTNodeKind Other, unsigned *Distance) const;
/// String representation of the kind.
StringRef asStringRef() const;
/// Strict weak ordering for ASTNodeKind.
- bool operator<(const ASTNodeKind &Other) const {
+ constexpr bool operator<(const ASTNodeKind &Other) const {
return KindId < Other.KindId;
}
@@ -119,7 +123,7 @@ public:
/// Check if the given ASTNodeKind identifies a type that offers pointer
/// identity. This is useful for the fast path in DynTypedNode.
- bool hasPointerIdentity() const {
+ constexpr bool hasPointerIdentity() const {
return KindId > NKI_LastKindWithoutPointerIdentity;
}
@@ -131,9 +135,12 @@ private:
NKI_None,
NKI_TemplateArgument,
NKI_TemplateArgumentLoc,
+ NKI_LambdaCapture,
NKI_TemplateName,
NKI_NestedNameSpecifierLoc,
NKI_QualType,
+#define TYPELOC(CLASS, PARENT) NKI_##CLASS##TypeLoc,
+#include "clang/AST/TypeLocNodes.def"
NKI_TypeLoc,
NKI_LastKindWithoutPointerIdentity = NKI_TypeLoc,
NKI_CXXBaseSpecifier,
@@ -152,11 +159,20 @@ private:
#define GEN_CLANG_CLAUSE_CLASS
#define CLAUSE_CLASS(Enum, Str, Class) NKI_##Class,
#include "llvm/Frontend/OpenMP/OMP.inc"
+ NKI_Attr,
+#define ATTR(A) NKI_##A##Attr,
+#include "clang/Basic/AttrList.inc"
+ NKI_ObjCProtocolLoc,
+ NKI_ConceptReference,
NKI_NumberOfKinds
};
/// Use getFromNodeKind<T>() to construct the kind.
- ASTNodeKind(NodeKindId KindId) : KindId(KindId) {}
+ constexpr ASTNodeKind(NodeKindId KindId) : KindId(KindId) {}
+
+ /// Returns \c true if \c Base is a base kind of (or same as) \c
+ /// Derived.
+ static bool isBaseOf(NodeKindId Base, NodeKindId Derived);
/// Returns \c true if \c Base is a base kind of (or same as) \c
/// Derived.
@@ -192,16 +208,22 @@ private:
KIND_TO_KIND_ID(CXXCtorInitializer)
KIND_TO_KIND_ID(TemplateArgument)
KIND_TO_KIND_ID(TemplateArgumentLoc)
+KIND_TO_KIND_ID(LambdaCapture)
KIND_TO_KIND_ID(TemplateName)
KIND_TO_KIND_ID(NestedNameSpecifier)
KIND_TO_KIND_ID(NestedNameSpecifierLoc)
KIND_TO_KIND_ID(QualType)
+#define TYPELOC(CLASS, PARENT) KIND_TO_KIND_ID(CLASS##TypeLoc)
+#include "clang/AST/TypeLocNodes.def"
KIND_TO_KIND_ID(TypeLoc)
KIND_TO_KIND_ID(Decl)
KIND_TO_KIND_ID(Stmt)
KIND_TO_KIND_ID(Type)
KIND_TO_KIND_ID(OMPClause)
+KIND_TO_KIND_ID(Attr)
+KIND_TO_KIND_ID(ObjCProtocolLoc)
KIND_TO_KIND_ID(CXXBaseSpecifier)
+KIND_TO_KIND_ID(ConceptReference)
#define DECL(DERIVED, BASE) KIND_TO_KIND_ID(DERIVED##Decl)
#include "clang/AST/DeclNodes.inc"
#define STMT(DERIVED, BASE) KIND_TO_KIND_ID(DERIVED)
@@ -211,6 +233,8 @@ KIND_TO_KIND_ID(CXXBaseSpecifier)
#define GEN_CLANG_CLAUSE_CLASS
#define CLAUSE_CLASS(Enum, Str, Class) KIND_TO_KIND_ID(Class)
#include "llvm/Frontend/OpenMP/OMP.inc"
+#define ATTR(A) KIND_TO_KIND_ID(A##Attr)
+#include "clang/Basic/AttrList.inc"
#undef KIND_TO_KIND_ID
inline raw_ostream &operator<<(raw_ostream &OS, ASTNodeKind K) {
@@ -299,7 +323,7 @@ public:
return getUnchecked<QualType>().getAsOpaquePtr() <
Other.getUnchecked<QualType>().getAsOpaquePtr();
- if (ASTNodeKind::getFromNodeKind<TypeLoc>().isSame(NodeKind)) {
+ if (ASTNodeKind::getFromNodeKind<TypeLoc>().isBaseOf(NodeKind)) {
auto TLA = getUnchecked<TypeLoc>();
auto TLB = Other.getUnchecked<TypeLoc>();
return std::make_pair(TLA.getType().getAsOpaquePtr(),
@@ -331,7 +355,7 @@ public:
if (ASTNodeKind::getFromNodeKind<QualType>().isSame(NodeKind))
return getUnchecked<QualType>() == Other.getUnchecked<QualType>();
- if (ASTNodeKind::getFromNodeKind<TypeLoc>().isSame(NodeKind))
+ if (ASTNodeKind::getFromNodeKind<TypeLoc>().isBaseOf(NodeKind))
return getUnchecked<TypeLoc>() == Other.getUnchecked<TypeLoc>();
if (ASTNodeKind::getFromNodeKind<NestedNameSpecifierLoc>().isSame(NodeKind))
@@ -360,7 +384,7 @@ public:
}
static unsigned getHashValue(const DynTypedNode &Val) {
// FIXME: Add hashing support for the remaining types.
- if (ASTNodeKind::getFromNodeKind<TypeLoc>().isSame(Val.NodeKind)) {
+ if (ASTNodeKind::getFromNodeKind<TypeLoc>().isBaseOf(Val.NodeKind)) {
auto TL = Val.getUnchecked<TypeLoc>();
return llvm::hash_combine(TL.getType().getAsOpaquePtr(),
TL.getOpaqueData());
@@ -450,6 +474,29 @@ private:
}
};
+ /// Converter that stores nodes by value. It must be possible to dynamically
+ /// cast the stored node within a type hierarchy without breaking (especially
+ /// through slicing).
+ template <typename T, typename BaseT,
+ typename = std::enable_if_t<(sizeof(T) == sizeof(BaseT))>>
+ struct DynCastValueConverter {
+ static const T *get(ASTNodeKind NodeKind, const void *Storage) {
+ if (ASTNodeKind::getFromNodeKind<T>().isBaseOf(NodeKind))
+ return &getUnchecked(NodeKind, Storage);
+ return nullptr;
+ }
+ static const T &getUnchecked(ASTNodeKind NodeKind, const void *Storage) {
+ assert(ASTNodeKind::getFromNodeKind<T>().isBaseOf(NodeKind));
+ return *static_cast<const T *>(reinterpret_cast<const BaseT *>(Storage));
+ }
+ static DynTypedNode create(const T &Node) {
+ DynTypedNode Result;
+ Result.NodeKind = ASTNodeKind::getFromNode(Node);
+ new (&Result.Storage) T(Node);
+ return Result;
+ }
+ };
+
ASTNodeKind NodeKind;
/// Stores the data of the node.
@@ -462,7 +509,7 @@ private:
/// have storage or unique pointers and thus need to be stored by value.
llvm::AlignedCharArrayUnion<const void *, TemplateArgument,
TemplateArgumentLoc, NestedNameSpecifierLoc,
- QualType, TypeLoc>
+ QualType, TypeLoc, ObjCProtocolLoc>
Storage;
};
@@ -486,6 +533,11 @@ struct DynTypedNode::BaseConverter<
T, std::enable_if_t<std::is_base_of<OMPClause, T>::value>>
: public DynCastPtrConverter<T, OMPClause> {};
+template <typename T>
+struct DynTypedNode::BaseConverter<
+ T, std::enable_if_t<std::is_base_of<Attr, T>::value>>
+ : public DynCastPtrConverter<T, Attr> {};
+
template <>
struct DynTypedNode::BaseConverter<
NestedNameSpecifier, void> : public PtrConverter<NestedNameSpecifier> {};
@@ -503,6 +555,10 @@ struct DynTypedNode::BaseConverter<TemplateArgumentLoc, void>
: public ValueConverter<TemplateArgumentLoc> {};
template <>
+struct DynTypedNode::BaseConverter<LambdaCapture, void>
+ : public ValueConverter<LambdaCapture> {};
+
+template <>
struct DynTypedNode::BaseConverter<
TemplateName, void> : public ValueConverter<TemplateName> {};
@@ -515,14 +571,23 @@ template <>
struct DynTypedNode::BaseConverter<QualType,
void> : public ValueConverter<QualType> {};
-template <>
+template <typename T>
struct DynTypedNode::BaseConverter<
- TypeLoc, void> : public ValueConverter<TypeLoc> {};
+ T, std::enable_if_t<std::is_base_of<TypeLoc, T>::value>>
+ : public DynCastValueConverter<T, TypeLoc> {};
template <>
struct DynTypedNode::BaseConverter<CXXBaseSpecifier, void>
: public PtrConverter<CXXBaseSpecifier> {};
+template <>
+struct DynTypedNode::BaseConverter<ObjCProtocolLoc, void>
+ : public ValueConverter<ObjCProtocolLoc> {};
+
+template <>
+struct DynTypedNode::BaseConverter<ConceptReference, void>
+ : public PtrConverter<ConceptReference> {};
+
// The only operation we allow on unsupported types is \c get.
// This allows to conveniently use \c DynTypedNode when having an arbitrary
// AST node that is not supported, but prevents misuse - a user cannot create
diff --git a/contrib/llvm-project/clang/include/clang/AST/ASTUnresolvedSet.h b/contrib/llvm-project/clang/include/clang/AST/ASTUnresolvedSet.h
index 8d2b23b3539a..398ffb188c95 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ASTUnresolvedSet.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ASTUnresolvedSet.h
@@ -69,7 +69,12 @@ public:
return false;
}
- void erase(unsigned I) { Decls[I] = Decls.pop_back_val(); }
+ void erase(unsigned I) {
+ if (I == Decls.size() - 1)
+ Decls.pop_back();
+ else
+ Decls[I] = Decls.pop_back_val();
+ }
void clear() { Decls.clear(); }
diff --git a/contrib/llvm-project/clang/include/clang/AST/AbstractBasicReader.h b/contrib/llvm-project/clang/include/clang/AST/AbstractBasicReader.h
index 5505d661b44e..1f2797cc7014 100644
--- a/contrib/llvm-project/clang/include/clang/AST/AbstractBasicReader.h
+++ b/contrib/llvm-project/clang/include/clang/AST/AbstractBasicReader.h
@@ -6,22 +6,22 @@
//
//===----------------------------------------------------------------------===//
-#ifndef CLANG_AST_ABSTRACTBASICREADER_H
-#define CLANG_AST_ABSTRACTBASICREADER_H
+#ifndef LLVM_CLANG_AST_ABSTRACTBASICREADER_H
+#define LLVM_CLANG_AST_ABSTRACTBASICREADER_H
#include "clang/AST/DeclTemplate.h"
+#include <optional>
namespace clang {
namespace serialization {
template <class T>
-inline T makeNullableFromOptional(const Optional<T> &value) {
+inline T makeNullableFromOptional(const std::optional<T> &value) {
return (value ? *value : T());
}
-template <class T>
-inline T *makePointerFromOptional(Optional<T *> value) {
- return (value ? *value : nullptr);
+template <class T> inline T *makePointerFromOptional(std::optional<T *> value) {
+ return value.value_or(nullptr);
}
// PropertyReader is a class concept that requires the following method:
@@ -49,7 +49,7 @@ inline T *makePointerFromOptional(Optional<T *> value) {
// type-specific readers for all the enum types.
//
// template <class ValueType>
-// Optional<ValueType> writeOptional();
+// std::optional<ValueType> writeOptional();
//
// Reads an optional value from the current property.
//
@@ -157,7 +157,7 @@ public:
}
template <class T, class... Args>
- llvm::Optional<T> readOptional(Args &&...args) {
+ std::optional<T> readOptional(Args &&...args) {
return UnpackOptionalValue<T>::unpack(
ReadDispatcher<T>::read(asImpl(), std::forward<Args>(args)...));
}
@@ -190,7 +190,8 @@ public:
APValue::LValuePathSerializationHelper readLValuePathSerializationHelper(
SmallVectorImpl<APValue::LValuePathEntry> &path) {
- auto elemTy = asImpl().readQualType();
+ auto origTy = asImpl().readQualType();
+ auto elemTy = origTy;
unsigned pathLength = asImpl().readUInt32();
for (unsigned i = 0; i < pathLength; ++i) {
if (elemTy->template getAs<RecordType>()) {
@@ -208,7 +209,7 @@ public:
APValue::LValuePathEntry::ArrayIndex(asImpl().readUInt32()));
}
}
- return APValue::LValuePathSerializationHelper(path, elemTy);
+ return APValue::LValuePathSerializationHelper(path, origTy);
}
Qualifiers readQualifiers() {
diff --git a/contrib/llvm-project/clang/include/clang/AST/AbstractBasicWriter.h b/contrib/llvm-project/clang/include/clang/AST/AbstractBasicWriter.h
index 75aef734ba9b..07afa388de2c 100644
--- a/contrib/llvm-project/clang/include/clang/AST/AbstractBasicWriter.h
+++ b/contrib/llvm-project/clang/include/clang/AST/AbstractBasicWriter.h
@@ -6,25 +6,23 @@
//
//===----------------------------------------------------------------------===//
-#ifndef CLANG_AST_ABSTRACTBASICWRITER_H
-#define CLANG_AST_ABSTRACTBASICWRITER_H
+#ifndef LLVM_CLANG_AST_ABSTRACTBASICWRITER_H
+#define LLVM_CLANG_AST_ABSTRACTBASICWRITER_H
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclTemplate.h"
+#include <optional>
namespace clang {
namespace serialization {
template <class T>
-inline llvm::Optional<T> makeOptionalFromNullable(const T &value) {
- return (value.isNull()
- ? llvm::Optional<T>()
- : llvm::Optional<T>(value));
+inline std::optional<T> makeOptionalFromNullable(const T &value) {
+ return (value.isNull() ? std::optional<T>() : std::optional<T>(value));
}
-template <class T>
-inline llvm::Optional<T*> makeOptionalFromPointer(T *value) {
- return (value ? llvm::Optional<T*>(value) : llvm::Optional<T*>());
+template <class T> inline std::optional<T *> makeOptionalFromPointer(T *value) {
+ return (value ? std::optional<T *>(value) : std::optional<T *>());
}
// PropertyWriter is a class concept that requires the following method:
@@ -51,7 +49,7 @@ inline llvm::Optional<T*> makeOptionalFromPointer(T *value) {
// type-specific writers for all the enum types.
//
// template <class ValueType>
-// void writeOptional(Optional<ValueType> value);
+// void writeOptional(std::optional<ValueType> value);
//
// Writes an optional value as the current property.
//
@@ -148,8 +146,7 @@ public:
}
}
- template <class T>
- void writeOptional(llvm::Optional<T> value) {
+ template <class T> void writeOptional(std::optional<T> value) {
WriteDispatcher<T>::write(asImpl(), PackOptionalValue<T>::pack(value));
}
diff --git a/contrib/llvm-project/clang/include/clang/AST/AbstractTypeReader.h b/contrib/llvm-project/clang/include/clang/AST/AbstractTypeReader.h
index 9fea7b26f678..e44bbf61c0ed 100644
--- a/contrib/llvm-project/clang/include/clang/AST/AbstractTypeReader.h
+++ b/contrib/llvm-project/clang/include/clang/AST/AbstractTypeReader.h
@@ -6,11 +6,12 @@
//
//===----------------------------------------------------------------------===//
-#ifndef CLANG_AST_ABSTRACTTYPEREADER_H
-#define CLANG_AST_ABSTRACTTYPEREADER_H
+#ifndef LLVM_CLANG_AST_ABSTRACTTYPEREADER_H
+#define LLVM_CLANG_AST_ABSTRACTTYPEREADER_H
-#include "clang/AST/Type.h"
#include "clang/AST/AbstractBasicReader.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Type.h"
namespace clang {
namespace serialization {
diff --git a/contrib/llvm-project/clang/include/clang/AST/AbstractTypeWriter.h b/contrib/llvm-project/clang/include/clang/AST/AbstractTypeWriter.h
index a63cb0be099d..62006ef0f26e 100644
--- a/contrib/llvm-project/clang/include/clang/AST/AbstractTypeWriter.h
+++ b/contrib/llvm-project/clang/include/clang/AST/AbstractTypeWriter.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef CLANG_AST_ABSTRACTTYPEWRITER_H
-#define CLANG_AST_ABSTRACTTYPEWRITER_H
+#ifndef LLVM_CLANG_AST_ABSTRACTTYPEWRITER_H
+#define LLVM_CLANG_AST_ABSTRACTTYPEWRITER_H
#include "clang/AST/Type.h"
#include "clang/AST/AbstractBasicWriter.h"
diff --git a/contrib/llvm-project/clang/include/clang/AST/Attr.h b/contrib/llvm-project/clang/include/clang/AST/Attr.h
index dbfecc125049..8e9b7ad8b468 100644
--- a/contrib/llvm-project/clang/include/clang/AST/Attr.h
+++ b/contrib/llvm-project/clang/include/clang/AST/Attr.h
@@ -19,12 +19,13 @@
#include "clang/AST/Type.h"
#include "clang/Basic/AttrKinds.h"
#include "clang/Basic/AttributeCommonInfo.h"
-#include "clang/Basic/LangOptions.h"
#include "clang/Basic/LLVM.h"
+#include "clang/Basic/LangOptions.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/Sanitizers.h"
#include "clang/Basic/SourceLocation.h"
-#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Frontend/HLSL/HLSLResource.h"
+#include "llvm/Support/CodeGen.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/VersionTuple.h"
#include "llvm/Support/raw_ostream.h"
@@ -34,28 +35,29 @@
namespace clang {
class ASTContext;
class AttributeCommonInfo;
-class IdentifierInfo;
-class ObjCInterfaceDecl;
-class Expr;
-class QualType;
class FunctionDecl;
-class TypeSourceInfo;
class OMPTraitInfo;
/// Attr - This represents one attribute.
class Attr : public AttributeCommonInfo {
private:
+ LLVM_PREFERRED_TYPE(attr::Kind)
unsigned AttrKind : 16;
protected:
/// An index into the spelling list of an
/// attribute defined in Attr.td file.
+ LLVM_PREFERRED_TYPE(bool)
unsigned Inherited : 1;
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsPackExpansion : 1;
+ LLVM_PREFERRED_TYPE(bool)
unsigned Implicit : 1;
// FIXME: These are properties of the attribute kind, not state for this
// instance of the attribute.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsLateParsed : 1;
+ LLVM_PREFERRED_TYPE(bool)
unsigned InheritEvenIfAlreadyPresent : 1;
void *operator new(size_t bytes) noexcept {
@@ -109,6 +111,8 @@ public:
// Pretty print this attribute.
void printPretty(raw_ostream &OS, const PrintingPolicy &Policy) const;
+
+ static StringRef getDocumentation(attr::Kind);
};
class TypeAttr : public Attr {
@@ -193,6 +197,22 @@ public:
}
};
+class HLSLAnnotationAttr : public InheritableAttr {
+protected:
+ HLSLAnnotationAttr(ASTContext &Context, const AttributeCommonInfo &CommonInfo,
+ attr::Kind AK, bool IsLateParsed,
+ bool InheritEvenIfAlreadyPresent)
+ : InheritableAttr(Context, CommonInfo, AK, IsLateParsed,
+ InheritEvenIfAlreadyPresent) {}
+
+public:
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const Attr *A) {
+ return A->getKind() >= attr::FirstHLSLAnnotationAttr &&
+ A->getKind() <= attr::LastHLSLAnnotationAttr;
+ }
+};
+
/// A parameter attribute which changes the argument-passing ABI rule
/// for the parameter.
class ParameterABIAttr : public InheritableParamAttr {
@@ -230,7 +250,9 @@ public:
class ParamIdx {
// Idx is exposed only via accessors that specify specific encodings.
unsigned Idx : 30;
+ LLVM_PREFERRED_TYPE(bool)
unsigned HasThis : 1;
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsValid : 1;
void assertComparable(const ParamIdx &I) const {
@@ -350,30 +372,11 @@ public:
static_assert(sizeof(ParamIdx) == sizeof(ParamIdx::SerialType),
"ParamIdx does not fit its serialization type");
-/// Contains information gathered from parsing the contents of TargetAttr.
-struct ParsedTargetAttr {
- std::vector<std::string> Features;
- StringRef Architecture;
- StringRef Tune;
- StringRef BranchProtection;
- bool DuplicateArchitecture = false;
- bool DuplicateTune = false;
- bool operator ==(const ParsedTargetAttr &Other) const {
- return DuplicateArchitecture == Other.DuplicateArchitecture &&
- DuplicateTune == Other.DuplicateTune &&
- Architecture == Other.Architecture &&
- Tune == Other.Tune &&
- BranchProtection == Other.BranchProtection &&
- Features == Other.Features;
- }
-};
-
#include "clang/AST/Attrs.inc"
inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &DB,
const Attr *At) {
- DB.AddTaggedVal(reinterpret_cast<intptr_t>(At),
- DiagnosticsEngine::ak_attr);
+ DB.AddTaggedVal(reinterpret_cast<uint64_t>(At), DiagnosticsEngine::ak_attr);
return DB;
}
} // end namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/AST/AttrIterator.h b/contrib/llvm-project/clang/include/clang/AST/AttrIterator.h
index 78ce9314a2bb..66571e1cf0b8 100644
--- a/contrib/llvm-project/clang/include/clang/AST/AttrIterator.h
+++ b/contrib/llvm-project/clang/include/clang/AST/AttrIterator.h
@@ -22,7 +22,6 @@
namespace clang {
-class ASTContext;
class Attr;
/// AttrVec - A vector of Attr, which is how they are stored on the AST.
diff --git a/contrib/llvm-project/clang/include/clang/AST/BuiltinTypes.def b/contrib/llvm-project/clang/include/clang/AST/BuiltinTypes.def
index 039765dfdfea..c04f6f6f1271 100644
--- a/contrib/llvm-project/clang/include/clang/AST/BuiltinTypes.def
+++ b/contrib/llvm-project/clang/include/clang/AST/BuiltinTypes.def
@@ -218,6 +218,9 @@ FLOATING_TYPE(BFloat16, BFloat16Ty)
// '__float128'
FLOATING_TYPE(Float128, Float128Ty)
+// '__ibm128'
+FLOATING_TYPE(Ibm128, Ibm128Ty)
+
//===- Language-specific types --------------------------------------------===//
// This is the type of C++0x 'nullptr'.
diff --git a/contrib/llvm-project/clang/include/clang/AST/CXXInheritance.h b/contrib/llvm-project/clang/include/clang/AST/CXXInheritance.h
index 946b9e318baa..bbef01843e0b 100644
--- a/contrib/llvm-project/clang/include/clang/AST/CXXInheritance.h
+++ b/contrib/llvm-project/clang/include/clang/AST/CXXInheritance.h
@@ -20,7 +20,6 @@
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/Specifiers.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
@@ -131,6 +130,7 @@ class CXXBasePaths {
/// class subobjects for that class type. The key of the map is
/// the cv-unqualified canonical type of the base class subobject.
struct IsVirtBaseAndNumberNonVirtBases {
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsVirtBase : 1;
unsigned NumberOfNonVirtBases : 31;
};
@@ -315,7 +315,7 @@ public:
/// virtual function; in abstract classes, the final overrider for at
/// least one virtual function is a pure virtual function. Due to
/// multiple, virtual inheritance, it is possible for a class to have
-/// more than one final overrider. Athough this is an error (per C++
+/// more than one final overrider. Although this is an error (per C++
/// [class.virtual]p2), it is not considered an error here: the final
/// overrider map can represent multiple final overriders for a
/// method, and it is up to the client to determine whether they are
diff --git a/contrib/llvm-project/clang/include/clang/AST/CXXRecordDeclDefinitionBits.def b/contrib/llvm-project/clang/include/clang/AST/CXXRecordDeclDefinitionBits.def
index 9b270682f8cf..cdf0804680ad 100644
--- a/contrib/llvm-project/clang/include/clang/AST/CXXRecordDeclDefinitionBits.def
+++ b/contrib/llvm-project/clang/include/clang/AST/CXXRecordDeclDefinitionBits.def
@@ -112,6 +112,9 @@ FIELD(HasVariantMembers, 1, NO_MERGE)
/// True if there no non-field members declared by the user.
FIELD(HasOnlyCMembers, 1, NO_MERGE)
+/// True if there is an '__init' method defined by the user.
+FIELD(HasInitMethod, 1, NO_MERGE)
+
/// True if any field has an in-class initializer, including those
/// within anonymous unions or structs.
FIELD(HasInClassInitializer, 1, NO_MERGE)
diff --git a/contrib/llvm-project/clang/include/clang/AST/CanonicalType.h b/contrib/llvm-project/clang/include/clang/AST/CanonicalType.h
index 15d7e9efc26a..dde08f0394c9 100644
--- a/contrib/llvm-project/clang/include/clang/AST/CanonicalType.h
+++ b/contrib/llvm-project/clang/include/clang/AST/CanonicalType.h
@@ -305,7 +305,6 @@ public:
LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, hasSignedIntegerRepresentation)
LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, hasUnsignedIntegerRepresentation)
LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, hasFloatingRepresentation)
- LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isPromotableIntegerType)
LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isSignedIntegerType)
LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isUnsignedIntegerType)
LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isSignedIntegerOrEnumerationType)
@@ -529,7 +528,7 @@ struct CanProxyAdaptor<FunctionProtoType>
template<>
struct CanProxyAdaptor<TypeOfType> : public CanProxyBase<TypeOfType> {
- LLVM_CLANG_CANPROXY_TYPE_ACCESSOR(getUnderlyingType)
+ LLVM_CLANG_CANPROXY_TYPE_ACCESSOR(getUnmodifiedType)
};
template<>
diff --git a/contrib/llvm-project/clang/include/clang/AST/CharUnits.h b/contrib/llvm-project/clang/include/clang/AST/CharUnits.h
index f14d3abf71e5..c06354451dfb 100644
--- a/contrib/llvm-project/clang/include/clang/AST/CharUnits.h
+++ b/contrib/llvm-project/clang/include/clang/AST/CharUnits.h
@@ -64,6 +64,12 @@ namespace clang {
return CharUnits(Quantity);
}
+ /// fromQuantity - Construct a CharUnits quantity from an llvm::Align
+ /// quantity.
+ static CharUnits fromQuantity(llvm::Align Quantity) {
+ return CharUnits(Quantity.value());
+ }
+
// Compound assignment.
CharUnits& operator+= (const CharUnits &Other) {
Quantity += Other.Quantity;
@@ -182,6 +188,13 @@ namespace clang {
/// Beware llvm::Align assumes power of two 8-bit bytes.
llvm::Align getAsAlign() const { return llvm::Align(Quantity); }
+ /// getAsMaybeAlign - Returns Quantity as a valid llvm::Align or
+ /// std::nullopt, Beware llvm::MaybeAlign assumes power of two 8-bit
+ /// bytes.
+ llvm::MaybeAlign getAsMaybeAlign() const {
+ return llvm::MaybeAlign(Quantity);
+ }
+
/// alignTo - Returns the next integer (mod 2**64) that is
/// greater than or equal to this quantity and is a multiple of \p Align.
/// Align must be non-zero.
diff --git a/contrib/llvm-project/clang/include/clang/AST/Comment.h b/contrib/llvm-project/clang/include/clang/AST/Comment.h
index 54a4b0a9cfe6..dd9906727293 100644
--- a/contrib/llvm-project/clang/include/clang/AST/Comment.h
+++ b/contrib/llvm-project/clang/include/clang/AST/Comment.h
@@ -27,6 +27,8 @@ class TemplateParameterList;
namespace comments {
class FullComment;
+enum class InlineCommandRenderKind;
+enum class ParamCommandPassDirection;
/// Describes the syntax that was used in a documentation command.
///
@@ -47,6 +49,17 @@ enum CommandMarkerKind {
CMK_At = 1
};
+enum class CommentKind {
+ None = 0,
+#define COMMENT(CLASS, PARENT) CLASS,
+#define COMMENT_RANGE(BASE, FIRST, LAST) \
+ First##BASE##Constant = FIRST, Last##BASE##Constant = LAST,
+#define LAST_COMMENT_RANGE(BASE, FIRST, LAST) \
+ First##BASE##Constant = FIRST, Last##BASE##Constant = LAST
+#define ABSTRACT_COMMENT(COMMENT)
+#include "clang/AST/CommentNodes.inc"
+};
+
/// Any part of the comment.
/// Abstract class.
class Comment {
@@ -61,6 +74,7 @@ protected:
friend class Comment;
/// Type of this AST node.
+ LLVM_PREFERRED_TYPE(CommentKind)
unsigned Kind : 8;
};
enum { NumCommentBits = 8 };
@@ -68,10 +82,12 @@ protected:
class InlineContentCommentBitfields {
friend class InlineContentComment;
+ LLVM_PREFERRED_TYPE(CommentBitfields)
unsigned : NumCommentBits;
/// True if there is a newline after this inline content node.
/// (There is no separate AST node for a newline.)
+ LLVM_PREFERRED_TYPE(bool)
unsigned HasTrailingNewline : 1;
};
enum { NumInlineContentCommentBits = NumCommentBits + 1 };
@@ -79,12 +95,15 @@ protected:
class TextCommentBitfields {
friend class TextComment;
+ LLVM_PREFERRED_TYPE(InlineContentCommentBitfields)
unsigned : NumInlineContentCommentBits;
/// True if \c IsWhitespace field contains a valid value.
+ LLVM_PREFERRED_TYPE(bool)
mutable unsigned IsWhitespaceValid : 1;
/// True if this comment AST node contains only whitespace.
+ LLVM_PREFERRED_TYPE(bool)
mutable unsigned IsWhitespace : 1;
};
enum { NumTextCommentBits = NumInlineContentCommentBits + 2 };
@@ -92,10 +111,13 @@ protected:
class InlineCommandCommentBitfields {
friend class InlineCommandComment;
+ LLVM_PREFERRED_TYPE(InlineContentCommentBitfields)
unsigned : NumInlineContentCommentBits;
+ LLVM_PREFERRED_TYPE(InlineCommandRenderKind)
unsigned RenderKind : 3;
+ LLVM_PREFERRED_TYPE(CommandTraits::KnownCommandIDs)
unsigned CommandID : CommandInfo::NumCommandIDBits;
};
enum { NumInlineCommandCommentBits = NumInlineContentCommentBits + 3 +
@@ -104,9 +126,11 @@ protected:
class HTMLTagCommentBitfields {
friend class HTMLTagComment;
+ LLVM_PREFERRED_TYPE(InlineContentCommentBitfields)
unsigned : NumInlineContentCommentBits;
/// True if we found that this tag is malformed in some way.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsMalformed : 1;
};
enum { NumHTMLTagCommentBits = NumInlineContentCommentBits + 1 };
@@ -114,10 +138,12 @@ protected:
class HTMLStartTagCommentBitfields {
friend class HTMLStartTagComment;
+ LLVM_PREFERRED_TYPE(HTMLTagCommentBitfields)
unsigned : NumHTMLTagCommentBits;
/// True if this tag is self-closing (e. g., <br />). This is based on tag
/// spelling in comment (plain <br> would not set this flag).
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsSelfClosing : 1;
};
enum { NumHTMLStartTagCommentBits = NumHTMLTagCommentBits + 1 };
@@ -125,12 +151,15 @@ protected:
class ParagraphCommentBitfields {
friend class ParagraphComment;
+ LLVM_PREFERRED_TYPE(CommentBitfields)
unsigned : NumCommentBits;
/// True if \c IsWhitespace field contains a valid value.
+ LLVM_PREFERRED_TYPE(bool)
mutable unsigned IsWhitespaceValid : 1;
/// True if this comment AST node contains only whitespace.
+ LLVM_PREFERRED_TYPE(bool)
mutable unsigned IsWhitespace : 1;
};
enum { NumParagraphCommentBits = NumCommentBits + 2 };
@@ -138,12 +167,15 @@ protected:
class BlockCommandCommentBitfields {
friend class BlockCommandComment;
+ LLVM_PREFERRED_TYPE(CommentBitfields)
unsigned : NumCommentBits;
+ LLVM_PREFERRED_TYPE(CommandTraits::KnownCommandIDs)
unsigned CommandID : CommandInfo::NumCommandIDBits;
/// Describes the syntax that was used in a documentation command.
/// Contains values from CommandMarkerKind enum.
+ LLVM_PREFERRED_TYPE(CommandMarkerKind)
unsigned CommandMarker : 1;
};
enum { NumBlockCommandCommentBits = NumCommentBits +
@@ -152,12 +184,15 @@ protected:
class ParamCommandCommentBitfields {
friend class ParamCommandComment;
+ LLVM_PREFERRED_TYPE(BlockCommandCommentBitfields)
unsigned : NumBlockCommandCommentBits;
- /// Parameter passing direction, see ParamCommandComment::PassDirection.
+ /// Parameter passing direction.
+ LLVM_PREFERRED_TYPE(ParamCommandPassDirection)
unsigned Direction : 2;
/// True if direction was specified explicitly in the comment.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsDirectionExplicit : 1;
};
enum { NumParamCommandCommentBits = NumBlockCommandCommentBits + 3 };
@@ -183,22 +218,16 @@ protected:
}
public:
- enum CommentKind {
- NoCommentKind = 0,
-#define COMMENT(CLASS, PARENT) CLASS##Kind,
-#define COMMENT_RANGE(BASE, FIRST, LAST) \
- First##BASE##Constant=FIRST##Kind, Last##BASE##Constant=LAST##Kind,
-#define LAST_COMMENT_RANGE(BASE, FIRST, LAST) \
- First##BASE##Constant=FIRST##Kind, Last##BASE##Constant=LAST##Kind
-#define ABSTRACT_COMMENT(COMMENT)
-#include "clang/AST/CommentNodes.inc"
+ struct Argument {
+ SourceRange Range;
+ StringRef Text;
};
Comment(CommentKind K,
SourceLocation LocBegin,
SourceLocation LocEnd) :
Loc(LocBegin), Range(SourceRange(LocBegin, LocEnd)) {
- CommentBits.Kind = K;
+ CommentBits.Kind = llvm::to_underlying(K);
}
CommentKind getCommentKind() const {
@@ -244,8 +273,9 @@ protected:
public:
static bool classof(const Comment *C) {
- return C->getCommentKind() >= FirstInlineContentCommentConstant &&
- C->getCommentKind() <= LastInlineContentCommentConstant;
+ return C->getCommentKind() >=
+ CommentKind::FirstInlineContentCommentConstant &&
+ C->getCommentKind() <= CommentKind::LastInlineContentCommentConstant;
}
void addTrailingNewline() {
@@ -262,16 +292,14 @@ class TextComment : public InlineContentComment {
StringRef Text;
public:
- TextComment(SourceLocation LocBegin,
- SourceLocation LocEnd,
- StringRef Text) :
- InlineContentComment(TextCommentKind, LocBegin, LocEnd),
- Text(Text) {
+ TextComment(SourceLocation LocBegin, SourceLocation LocEnd, StringRef Text)
+ : InlineContentComment(CommentKind::TextComment, LocBegin, LocEnd),
+ Text(Text) {
TextCommentBits.IsWhitespaceValid = false;
}
static bool classof(const Comment *C) {
- return C->getCommentKind() == TextCommentKind;
+ return C->getCommentKind() == CommentKind::TextComment;
}
child_iterator child_begin() const { return nullptr; }
@@ -293,44 +321,35 @@ private:
bool isWhitespaceNoCache() const;
};
+/// The most appropriate rendering mode for this command, chosen on command
+/// semantics in Doxygen.
+enum class InlineCommandRenderKind {
+ Normal,
+ Bold,
+ Monospaced,
+ Emphasized,
+ Anchor
+};
+
/// A command with word-like arguments that is considered inline content.
class InlineCommandComment : public InlineContentComment {
-public:
- struct Argument {
- SourceRange Range;
- StringRef Text;
-
- Argument(SourceRange Range, StringRef Text) : Range(Range), Text(Text) { }
- };
-
- /// The most appropriate rendering mode for this command, chosen on command
- /// semantics in Doxygen.
- enum RenderKind {
- RenderNormal,
- RenderBold,
- RenderMonospaced,
- RenderEmphasized,
- RenderAnchor
- };
-
protected:
/// Command arguments.
ArrayRef<Argument> Args;
public:
- InlineCommandComment(SourceLocation LocBegin,
- SourceLocation LocEnd,
- unsigned CommandID,
- RenderKind RK,
- ArrayRef<Argument> Args) :
- InlineContentComment(InlineCommandCommentKind, LocBegin, LocEnd),
- Args(Args) {
- InlineCommandCommentBits.RenderKind = RK;
+ InlineCommandComment(SourceLocation LocBegin, SourceLocation LocEnd,
+ unsigned CommandID, InlineCommandRenderKind RK,
+ ArrayRef<Argument> Args)
+ : InlineContentComment(CommentKind::InlineCommandComment, LocBegin,
+ LocEnd),
+ Args(Args) {
+ InlineCommandCommentBits.RenderKind = llvm::to_underlying(RK);
InlineCommandCommentBits.CommandID = CommandID;
}
static bool classof(const Comment *C) {
- return C->getCommentKind() == InlineCommandCommentKind;
+ return C->getCommentKind() == CommentKind::InlineCommandComment;
}
child_iterator child_begin() const { return nullptr; }
@@ -349,8 +368,9 @@ public:
return SourceRange(getBeginLoc().getLocWithOffset(-1), getEndLoc());
}
- RenderKind getRenderKind() const {
- return static_cast<RenderKind>(InlineCommandCommentBits.RenderKind);
+ InlineCommandRenderKind getRenderKind() const {
+ return static_cast<InlineCommandRenderKind>(
+ InlineCommandCommentBits.RenderKind);
}
unsigned getNumArgs() const {
@@ -388,8 +408,8 @@ protected:
public:
static bool classof(const Comment *C) {
- return C->getCommentKind() >= FirstHTMLTagCommentConstant &&
- C->getCommentKind() <= LastHTMLTagCommentConstant;
+ return C->getCommentKind() >= CommentKind::FirstHTMLTagCommentConstant &&
+ C->getCommentKind() <= CommentKind::LastHTMLTagCommentConstant;
}
StringRef getTagName() const LLVM_READONLY { return TagName; }
@@ -424,19 +444,13 @@ public:
Attribute() { }
- Attribute(SourceLocation NameLocBegin, StringRef Name) :
- NameLocBegin(NameLocBegin), Name(Name),
- EqualsLoc(SourceLocation()),
- ValueRange(SourceRange()), Value(StringRef())
- { }
+ Attribute(SourceLocation NameLocBegin, StringRef Name)
+ : NameLocBegin(NameLocBegin), Name(Name), EqualsLoc(SourceLocation()) {}
Attribute(SourceLocation NameLocBegin, StringRef Name,
- SourceLocation EqualsLoc,
- SourceRange ValueRange, StringRef Value) :
- NameLocBegin(NameLocBegin), Name(Name),
- EqualsLoc(EqualsLoc),
- ValueRange(ValueRange), Value(Value)
- { }
+ SourceLocation EqualsLoc, SourceRange ValueRange, StringRef Value)
+ : NameLocBegin(NameLocBegin), Name(Name), EqualsLoc(EqualsLoc),
+ ValueRange(ValueRange), Value(Value) {}
SourceLocation getNameLocEnd() const {
return NameLocBegin.getLocWithOffset(Name.size());
@@ -451,18 +465,16 @@ private:
ArrayRef<Attribute> Attributes;
public:
- HTMLStartTagComment(SourceLocation LocBegin,
- StringRef TagName) :
- HTMLTagComment(HTMLStartTagCommentKind,
- LocBegin, LocBegin.getLocWithOffset(1 + TagName.size()),
- TagName,
- LocBegin.getLocWithOffset(1),
- LocBegin.getLocWithOffset(1 + TagName.size())) {
+ HTMLStartTagComment(SourceLocation LocBegin, StringRef TagName)
+ : HTMLTagComment(CommentKind::HTMLStartTagComment, LocBegin,
+ LocBegin.getLocWithOffset(1 + TagName.size()), TagName,
+ LocBegin.getLocWithOffset(1),
+ LocBegin.getLocWithOffset(1 + TagName.size())) {
HTMLStartTagCommentBits.IsSelfClosing = false;
}
static bool classof(const Comment *C) {
- return C->getCommentKind() == HTMLStartTagCommentKind;
+ return C->getCommentKind() == CommentKind::HTMLStartTagComment;
}
child_iterator child_begin() const { return nullptr; }
@@ -506,18 +518,14 @@ public:
/// A closing HTML tag.
class HTMLEndTagComment : public HTMLTagComment {
public:
- HTMLEndTagComment(SourceLocation LocBegin,
- SourceLocation LocEnd,
- StringRef TagName) :
- HTMLTagComment(HTMLEndTagCommentKind,
- LocBegin, LocEnd,
- TagName,
- LocBegin.getLocWithOffset(2),
- LocBegin.getLocWithOffset(2 + TagName.size()))
- { }
+ HTMLEndTagComment(SourceLocation LocBegin, SourceLocation LocEnd,
+ StringRef TagName)
+ : HTMLTagComment(CommentKind::HTMLEndTagComment, LocBegin, LocEnd,
+ TagName, LocBegin.getLocWithOffset(2),
+ LocBegin.getLocWithOffset(2 + TagName.size())) {}
static bool classof(const Comment *C) {
- return C->getCommentKind() == HTMLEndTagCommentKind;
+ return C->getCommentKind() == CommentKind::HTMLEndTagComment;
}
child_iterator child_begin() const { return nullptr; }
@@ -537,8 +545,9 @@ protected:
public:
static bool classof(const Comment *C) {
- return C->getCommentKind() >= FirstBlockContentCommentConstant &&
- C->getCommentKind() <= LastBlockContentCommentConstant;
+ return C->getCommentKind() >=
+ CommentKind::FirstBlockContentCommentConstant &&
+ C->getCommentKind() <= CommentKind::LastBlockContentCommentConstant;
}
};
@@ -547,11 +556,10 @@ class ParagraphComment : public BlockContentComment {
ArrayRef<InlineContentComment *> Content;
public:
- ParagraphComment(ArrayRef<InlineContentComment *> Content) :
- BlockContentComment(ParagraphCommentKind,
- SourceLocation(),
- SourceLocation()),
- Content(Content) {
+ ParagraphComment(ArrayRef<InlineContentComment *> Content)
+ : BlockContentComment(CommentKind::ParagraphComment, SourceLocation(),
+ SourceLocation()),
+ Content(Content) {
if (Content.empty()) {
ParagraphCommentBits.IsWhitespace = true;
ParagraphCommentBits.IsWhitespaceValid = true;
@@ -566,7 +574,7 @@ public:
}
static bool classof(const Comment *C) {
- return C->getCommentKind() == ParagraphCommentKind;
+ return C->getCommentKind() == CommentKind::ParagraphComment;
}
child_iterator child_begin() const {
@@ -594,15 +602,6 @@ private:
/// arguments depends on command name) and a paragraph as an argument
/// (e. g., \\brief).
class BlockCommandComment : public BlockContentComment {
-public:
- struct Argument {
- SourceRange Range;
- StringRef Text;
-
- Argument() { }
- Argument(SourceRange Range, StringRef Text) : Range(Range), Text(Text) { }
- };
-
protected:
/// Word-like arguments.
ArrayRef<Argument> Args;
@@ -623,20 +622,19 @@ protected:
}
public:
- BlockCommandComment(SourceLocation LocBegin,
- SourceLocation LocEnd,
- unsigned CommandID,
- CommandMarkerKind CommandMarker) :
- BlockContentComment(BlockCommandCommentKind, LocBegin, LocEnd),
- Paragraph(nullptr) {
+ BlockCommandComment(SourceLocation LocBegin, SourceLocation LocEnd,
+ unsigned CommandID, CommandMarkerKind CommandMarker)
+ : BlockContentComment(CommentKind::BlockCommandComment, LocBegin, LocEnd),
+ Paragraph(nullptr) {
setLocation(getCommandNameBeginLoc());
BlockCommandCommentBits.CommandID = CommandID;
BlockCommandCommentBits.CommandMarker = CommandMarker;
}
static bool classof(const Comment *C) {
- return C->getCommentKind() >= FirstBlockCommandCommentConstant &&
- C->getCommentKind() <= LastBlockCommandCommentConstant;
+ return C->getCommentKind() >=
+ CommentKind::FirstBlockCommandCommentConstant &&
+ C->getCommentKind() <= CommentKind::LastBlockCommandCommentConstant;
}
child_iterator child_begin() const {
@@ -707,6 +705,8 @@ public:
}
};
+enum class ParamCommandPassDirection { In, Out, InOut };
+
/// Doxygen \\param command.
class ParamCommandComment : public BlockCommandComment {
private:
@@ -719,39 +719,33 @@ public:
VarArgParamIndex = ~0U/*InvalidParamIndex*/ - 1U
};
- ParamCommandComment(SourceLocation LocBegin,
- SourceLocation LocEnd,
- unsigned CommandID,
- CommandMarkerKind CommandMarker) :
- BlockCommandComment(ParamCommandCommentKind, LocBegin, LocEnd,
- CommandID, CommandMarker),
- ParamIndex(InvalidParamIndex) {
- ParamCommandCommentBits.Direction = In;
+ ParamCommandComment(SourceLocation LocBegin, SourceLocation LocEnd,
+ unsigned CommandID, CommandMarkerKind CommandMarker)
+ : BlockCommandComment(CommentKind::ParamCommandComment, LocBegin, LocEnd,
+ CommandID, CommandMarker),
+ ParamIndex(InvalidParamIndex) {
+ ParamCommandCommentBits.Direction =
+ llvm::to_underlying(ParamCommandPassDirection::In);
ParamCommandCommentBits.IsDirectionExplicit = false;
}
static bool classof(const Comment *C) {
- return C->getCommentKind() == ParamCommandCommentKind;
+ return C->getCommentKind() == CommentKind::ParamCommandComment;
}
- enum PassDirection {
- In,
- Out,
- InOut
- };
-
- static const char *getDirectionAsString(PassDirection D);
+ static const char *getDirectionAsString(ParamCommandPassDirection D);
- PassDirection getDirection() const LLVM_READONLY {
- return static_cast<PassDirection>(ParamCommandCommentBits.Direction);
+ ParamCommandPassDirection getDirection() const LLVM_READONLY {
+ return static_cast<ParamCommandPassDirection>(
+ ParamCommandCommentBits.Direction);
}
bool isDirectionExplicit() const LLVM_READONLY {
return ParamCommandCommentBits.IsDirectionExplicit;
}
- void setDirection(PassDirection Direction, bool Explicit) {
- ParamCommandCommentBits.Direction = Direction;
+ void setDirection(ParamCommandPassDirection Direction, bool Explicit) {
+ ParamCommandCommentBits.Direction = llvm::to_underlying(Direction);
ParamCommandCommentBits.IsDirectionExplicit = Explicit;
}
@@ -813,16 +807,13 @@ private:
ArrayRef<unsigned> Position;
public:
- TParamCommandComment(SourceLocation LocBegin,
- SourceLocation LocEnd,
- unsigned CommandID,
- CommandMarkerKind CommandMarker) :
- BlockCommandComment(TParamCommandCommentKind, LocBegin, LocEnd, CommandID,
- CommandMarker)
- { }
+ TParamCommandComment(SourceLocation LocBegin, SourceLocation LocEnd,
+ unsigned CommandID, CommandMarkerKind CommandMarker)
+ : BlockCommandComment(CommentKind::TParamCommandComment, LocBegin, LocEnd,
+ CommandID, CommandMarker) {}
static bool classof(const Comment *C) {
- return C->getCommentKind() == TParamCommandCommentKind;
+ return C->getCommentKind() == CommentKind::TParamCommandComment;
}
bool hasParamName() const {
@@ -864,16 +855,13 @@ class VerbatimBlockLineComment : public Comment {
StringRef Text;
public:
- VerbatimBlockLineComment(SourceLocation LocBegin,
- StringRef Text) :
- Comment(VerbatimBlockLineCommentKind,
- LocBegin,
- LocBegin.getLocWithOffset(Text.size())),
- Text(Text)
- { }
+ VerbatimBlockLineComment(SourceLocation LocBegin, StringRef Text)
+ : Comment(CommentKind::VerbatimBlockLineComment, LocBegin,
+ LocBegin.getLocWithOffset(Text.size())),
+ Text(Text) {}
static bool classof(const Comment *C) {
- return C->getCommentKind() == VerbatimBlockLineCommentKind;
+ return C->getCommentKind() == CommentKind::VerbatimBlockLineComment;
}
child_iterator child_begin() const { return nullptr; }
@@ -895,16 +883,15 @@ protected:
ArrayRef<VerbatimBlockLineComment *> Lines;
public:
- VerbatimBlockComment(SourceLocation LocBegin,
- SourceLocation LocEnd,
- unsigned CommandID) :
- BlockCommandComment(VerbatimBlockCommentKind,
- LocBegin, LocEnd, CommandID,
- CMK_At) // FIXME: improve source fidelity.
- { }
+ VerbatimBlockComment(SourceLocation LocBegin, SourceLocation LocEnd,
+ unsigned CommandID)
+ : BlockCommandComment(CommentKind::VerbatimBlockComment, LocBegin, LocEnd,
+ CommandID,
+ CMK_At) // FIXME: improve source fidelity.
+ {}
static bool classof(const Comment *C) {
- return C->getCommentKind() == VerbatimBlockCommentKind;
+ return C->getCommentKind() == CommentKind::VerbatimBlockComment;
}
child_iterator child_begin() const {
@@ -946,21 +933,16 @@ protected:
SourceLocation TextBegin;
public:
- VerbatimLineComment(SourceLocation LocBegin,
- SourceLocation LocEnd,
- unsigned CommandID,
- SourceLocation TextBegin,
- StringRef Text) :
- BlockCommandComment(VerbatimLineCommentKind,
- LocBegin, LocEnd,
- CommandID,
- CMK_At), // FIXME: improve source fidelity.
- Text(Text),
- TextBegin(TextBegin)
- { }
+ VerbatimLineComment(SourceLocation LocBegin, SourceLocation LocEnd,
+ unsigned CommandID, SourceLocation TextBegin,
+ StringRef Text)
+ : BlockCommandComment(CommentKind::VerbatimLineComment, LocBegin, LocEnd,
+ CommandID,
+ CMK_At), // FIXME: improve source fidelity.
+ Text(Text), TextBegin(TextBegin) {}
static bool classof(const Comment *C) {
- return C->getCommentKind() == VerbatimLineCommentKind;
+ return C->getCommentKind() == CommentKind::VerbatimLineComment;
}
child_iterator child_begin() const { return nullptr; }
@@ -1019,8 +1001,6 @@ struct DeclInfo {
/// \li member function template,
/// \li member function template specialization,
/// \li ObjC method,
- /// \li a typedef for a function pointer, member function pointer,
- /// ObjC block.
FunctionKind,
/// Something that we consider a "class":
@@ -1030,8 +1010,8 @@ struct DeclInfo {
ClassKind,
/// Something that we consider a "variable":
- /// \li namespace scope variables;
- /// \li static and non-static class data members;
+ /// \li namespace scope variables and variable templates;
+ /// \li static and non-static class data members and member templates;
/// \li enumerators.
VariableKind,
@@ -1055,27 +1035,37 @@ struct DeclInfo {
};
/// If false, only \c CommentDecl is valid.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsFilled : 1;
/// Simplified kind of \c CommentDecl, see \c DeclKind enum.
+ LLVM_PREFERRED_TYPE(DeclKind)
unsigned Kind : 3;
/// Is \c CommentDecl a template declaration.
+ LLVM_PREFERRED_TYPE(TemplateDeclKind)
unsigned TemplateKind : 2;
/// Is \c CommentDecl an ObjCMethodDecl.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsObjCMethod : 1;
/// Is \c CommentDecl a non-static member function of C++ class or
/// instance method of ObjC class.
/// Can be true only if \c IsFunctionDecl is true.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsInstanceMethod : 1;
/// Is \c CommentDecl a static member function of C++ class or
/// class method of ObjC class.
/// Can be true only if \c IsFunctionDecl is true.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsClassMethod : 1;
+ /// Is \c CommentDecl something we consider a "function" that's variadic.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned IsVariadic : 1;
+
void fill();
DeclKind getKind() const LLVM_READONLY {
@@ -1085,6 +1075,8 @@ struct DeclInfo {
TemplateDeclKind getTemplateKind() const LLVM_READONLY {
return static_cast<TemplateDeclKind>(TemplateKind);
}
+
+ bool involvesFunctionType() const { return !ReturnType.isNull(); }
};
/// A full comment attached to a declaration, contains block content.
@@ -1093,9 +1085,9 @@ class FullComment : public Comment {
DeclInfo *ThisDeclInfo;
public:
- FullComment(ArrayRef<BlockContentComment *> Blocks, DeclInfo *D) :
- Comment(FullCommentKind, SourceLocation(), SourceLocation()),
- Blocks(Blocks), ThisDeclInfo(D) {
+ FullComment(ArrayRef<BlockContentComment *> Blocks, DeclInfo *D)
+ : Comment(CommentKind::FullComment, SourceLocation(), SourceLocation()),
+ Blocks(Blocks), ThisDeclInfo(D) {
if (Blocks.empty())
return;
@@ -1105,7 +1097,7 @@ public:
}
static bool classof(const Comment *C) {
- return C->getCommentKind() == FullCommentKind;
+ return C->getCommentKind() == CommentKind::FullComment;
}
child_iterator child_begin() const {
diff --git a/contrib/llvm-project/clang/include/clang/AST/CommentCommands.td b/contrib/llvm-project/clang/include/clang/AST/CommentCommands.td
index fbbfc9f7e0b7..e839031752cd 100644
--- a/contrib/llvm-project/clang/include/clang/AST/CommentCommands.td
+++ b/contrib/llvm-project/clang/include/clang/AST/CommentCommands.td
@@ -31,6 +31,7 @@ class Command<string name> {
}
class InlineCommand<string name> : Command<name> {
+ let NumArgs = 1;
let IsInlineCommand = 1;
}
@@ -62,6 +63,11 @@ class VerbatimLineCommand<string name> : Command<name> {
let IsVerbatimLineCommand = 1;
}
+class PropertyCommand<string name> : Command<name> {
+ let NumArgs = 0;
+ let IsInlineCommand = 1;
+}
+
class DeclarationVerbatimLineCommand<string name> :
VerbatimLineCommand<name> {
let IsDeclarationCommand = 1;
@@ -86,9 +92,23 @@ def C : InlineCommand<"c">;
def P : InlineCommand<"p">;
def A : InlineCommand<"a">;
def E : InlineCommand<"e">;
+def N : InlineCommand<"n"> { let NumArgs = 0; }
def Em : InlineCommand<"em">;
-def Ref : InlineCommand<"ref">;
-def Anchor : InlineCommand<"anchor">;
+def Emoji : InlineCommand<"emoji">;
+
+def Anchor : InlineCommand<"anchor">;
+def Ref : InlineCommand<"ref">;
+def RefItem : InlineCommand<"refitem">;
+def Cite : InlineCommand<"cite">;
+
+def CopyBrief : InlineCommand<"copybrief">;
+def CopyDetails : InlineCommand<"copydetails">;
+def CopyDoc : InlineCommand<"copydoc">;
+
+// Typically not used inline, but they take a single word.
+def Extends : InlineCommand<"extends">;
+def Implements : InlineCommand<"implements">;
+def MemberOf : InlineCommand<"memberof">;
//===----------------------------------------------------------------------===//
// BlockCommand
@@ -141,13 +161,15 @@ def Post : BlockCommand<"post">;
def Pre : BlockCommand<"pre">;
def Remark : BlockCommand<"remark">;
def Remarks : BlockCommand<"remarks">;
-def Retval : BlockCommand<"retval">;
+def Retval : BlockCommand<"retval"> { let NumArgs = 1; }
def Sa : BlockCommand<"sa">;
def See : BlockCommand<"see">;
def Since : BlockCommand<"since">;
+def Test : BlockCommand<"test">;
def Todo : BlockCommand<"todo">;
def Version : BlockCommand<"version">;
def Warning : BlockCommand<"warning">;
+def XRefItem : BlockCommand<"xrefitem"> { let NumArgs = 3; }
// HeaderDoc commands
def Abstract : BlockCommand<"abstract"> { let IsBriefCommand = 1; }
def ClassDesign : RecordLikeDetailCommand<"classdesign">;
@@ -170,6 +192,8 @@ def SuperClass : RecordLikeDetailCommand<"superclass">;
defm Code : VerbatimBlockCommand<"code", "endcode">;
defm Verbatim : VerbatimBlockCommand<"verbatim", "endverbatim">;
+
+defm DocbookOnly : VerbatimBlockCommand<"docbookonly", "enddocbookonly">;
defm Htmlonly : VerbatimBlockCommand<"htmlonly", "endhtmlonly">;
defm Latexonly : VerbatimBlockCommand<"latexonly", "endlatexonly">;
defm Xmlonly : VerbatimBlockCommand<"xmlonly", "endxmlonly">;
@@ -178,10 +202,19 @@ defm Rtfonly : VerbatimBlockCommand<"rtfonly", "endrtfonly">;
defm Dot : VerbatimBlockCommand<"dot", "enddot">;
defm Msc : VerbatimBlockCommand<"msc", "endmsc">;
+defm Uml : VerbatimBlockCommand<"startuml", "enduml">;
+
+// Actually not verbatim blocks, we should also parse commands within them.
+defm Internal : VerbatimBlockCommand<"internal", "endinternal">;
+// TODO: conflicts with HeaderDoc link, /link.
+//defm Link : VerbatimBlockCommand<"link", "endlink">;
+defm ParBlock : VerbatimBlockCommand<"parblock", "endparblock">;
+defm SecRefList : VerbatimBlockCommand<"secreflist", "endsecreflist">;
// These three commands have special support in CommentLexer to recognize their
// names.
def FDollar : VerbatimBlockCommand<"f$">; // Inline LaTeX formula
+defm FParen : VerbatimBlockCommand<"f(", "f)">; // Inline LaTeX text
defm FBracket : VerbatimBlockCommand<"f[", "f]">; // Displayed LaTeX formula
defm FBrace : VerbatimBlockCommand<"f{", "f}">; // LaTeX environment
@@ -199,11 +232,18 @@ def Addtogroup : VerbatimLineCommand<"addtogroup">;
def Weakgroup : VerbatimLineCommand<"weakgroup">;
def Name : VerbatimLineCommand<"name">;
+// These actually take a single word, but it's optional.
+// And they're used on a separate line typically, not inline.
+def Dir : VerbatimLineCommand<"dir">;
+def File : VerbatimLineCommand<"file">;
+
def Section : VerbatimLineCommand<"section">;
def Subsection : VerbatimLineCommand<"subsection">;
def Subsubsection : VerbatimLineCommand<"subsubsection">;
def Paragraph : VerbatimLineCommand<"paragraph">;
+def TableOfContents : VerbatimLineCommand<"tableofcontents">;
+def Page : VerbatimLineCommand<"page">;
def Mainpage : VerbatimLineCommand<"mainpage">;
def Subpage : VerbatimLineCommand<"subpage">;
@@ -212,13 +252,80 @@ def Related : VerbatimLineCommand<"related">;
def RelatesAlso : VerbatimLineCommand<"relatesalso">;
def RelatedAlso : VerbatimLineCommand<"relatedalso">;
+def AddIndex : VerbatimLineCommand<"addindex">;
+
+// These take a single argument mostly, but since they include a file they'll
+// typically be on their own line.
+def DocbookInclude : VerbatimLineCommand<"docbookinclude">;
+def DontInclude : VerbatimLineCommand<"dontinclude">;
+def Example : VerbatimLineCommand<"example">;
+def HtmlInclude : VerbatimLineCommand<"htmlinclude">;
+def Include : VerbatimLineCommand<"include">;
+def ManInclude : VerbatimLineCommand<"maninclude">;
+def LatexInclude : VerbatimLineCommand<"latexinclude">;
+def RtfInclude : VerbatimLineCommand<"rtfinclude">;
+def Snippet : VerbatimLineCommand<"snippet">;
+def VerbInclude : VerbatimLineCommand<"verbinclude">;
+def XmlInclude : VerbatimLineCommand<"xmlinclude">;
+
+def Image : VerbatimLineCommand<"image">;
+def DotFile : VerbatimLineCommand<"dotfile">;
+def MscFile : VerbatimLineCommand<"mscfile">;
+def DiaFile : VerbatimLineCommand<"diafile">;
+
+def Line : VerbatimLineCommand<"line">;
+def Skip : VerbatimLineCommand<"skip">;
+def SkipLine : VerbatimLineCommand<"skipline">;
+def Until : VerbatimLineCommand<"until">;
+
+def NoOp : VerbatimLineCommand<"noop">;
+
+// We might also build proper support for if/ifnot/else/elseif/endif.
+def If : VerbatimLineCommand<"if">;
+def IfNot : VerbatimLineCommand<"ifnot">;
+def Else : VerbatimLineCommand<"else">;
+def ElseIf : VerbatimLineCommand<"elseif">;
+def Endif : VerbatimLineCommand<"endif">;
+
+// Not treated as VerbatimBlockCommand because it spans multiple comments.
+def Cond : VerbatimLineCommand<"cond">;
+def EndCond : VerbatimLineCommand<"endcond">;
+
+//===----------------------------------------------------------------------===//
+// PropertyCommand
+//===----------------------------------------------------------------------===//
+
+def CallGraph : PropertyCommand<"callgraph">;
+def HideCallGraph : PropertyCommand<"hidecallgraph">;
+def CallerGraph : PropertyCommand<"callergraph">;
+def HideCallerGraph : PropertyCommand<"hidecallergraph">;
+def ShowInitializer : PropertyCommand<"showinitializer">;
+def HideInitializer : PropertyCommand<"hideinitializer">;
+def ShowRefBy : PropertyCommand<"showrefby">;
+def HideRefBy : PropertyCommand<"hiderefby">;
+def ShowRefs : PropertyCommand<"showrefs">;
+def HideRefs : PropertyCommand<"hiderefs">;
+
+def Private : PropertyCommand<"private">;
+def Protected : PropertyCommand<"protected">;
+def Public : PropertyCommand<"public">;
+def Pure : PropertyCommand<"pure">;
+def Static : PropertyCommand<"static">;
+
+def NoSubgrouping : PropertyCommand<"nosubgrouping">;
+def PrivateSection : PropertyCommand<"privatesection">;
+def ProtectedSection : PropertyCommand<"protectedsection">;
+def PublicSection : PropertyCommand<"publicsection">;
+
//===----------------------------------------------------------------------===//
// DeclarationVerbatimLineCommand
//===----------------------------------------------------------------------===//
// Doxygen commands.
+def Concept : DeclarationVerbatimLineCommand<"concept">;
def Def : DeclarationVerbatimLineCommand<"def">;
def Fn : DeclarationVerbatimLineCommand<"fn">;
+def IDLExcept : DeclarationVerbatimLineCommand<"idlexcept">;
def Namespace : DeclarationVerbatimLineCommand<"namespace">;
def Overload : DeclarationVerbatimLineCommand<"overload">;
def Property : DeclarationVerbatimLineCommand<"property">;
diff --git a/contrib/llvm-project/clang/include/clang/AST/CommentHTMLTags.td b/contrib/llvm-project/clang/include/clang/AST/CommentHTMLTags.td
index 251490094940..a1ce8c6da96c 100644
--- a/contrib/llvm-project/clang/include/clang/AST/CommentHTMLTags.td
+++ b/contrib/llvm-project/clang/include/clang/AST/CommentHTMLTags.td
@@ -52,11 +52,11 @@ def Tr : Tag<"tr"> { let EndTagOptional = 1; }
def Th : Tag<"th"> { let EndTagOptional = 1; }
def Td : Tag<"td"> { let EndTagOptional = 1; }
-// Define a blacklist of attributes that are not safe to pass through to HTML
+// Define a list of attributes that are not safe to pass through to HTML
// output if the input is untrusted.
//
-// FIXME: this should be a whitelist. When changing this to a whitelist, don't
-// forget to change the default in the TableGen backend.
+// FIXME: This should be a list of attributes that _are_ safe. When changing
+// this change, don't forget to change the default in the TableGen backend.
class Attribute<string spelling> {
string Spelling = spelling;
bit IsSafeToPassThrough = 1;
diff --git a/contrib/llvm-project/clang/include/clang/AST/CommentLexer.h b/contrib/llvm-project/clang/include/clang/AST/CommentLexer.h
index 94f778501e75..9aa1681cb2c5 100644
--- a/contrib/llvm-project/clang/include/clang/AST/CommentLexer.h
+++ b/contrib/llvm-project/clang/include/clang/AST/CommentLexer.h
@@ -320,6 +320,9 @@ private:
/// Eat string matching regexp \code \s*\* \endcode.
void skipLineStartingDecorations();
+ /// Skip over pure text.
+ const char *skipTextToken();
+
/// Lex comment text, including commands if ParseCommands is set to true.
void lexCommentText(Token &T);
diff --git a/contrib/llvm-project/clang/include/clang/AST/CommentParser.h b/contrib/llvm-project/clang/include/clang/AST/CommentParser.h
index 1a0cfb06e52b..e11e818b1af0 100644
--- a/contrib/llvm-project/clang/include/clang/AST/CommentParser.h
+++ b/contrib/llvm-project/clang/include/clang/AST/CommentParser.h
@@ -97,9 +97,8 @@ public:
void parseTParamCommandArgs(TParamCommandComment *TPC,
TextTokenRetokenizer &Retokenizer);
- void parseBlockCommandArgs(BlockCommandComment *BC,
- TextTokenRetokenizer &Retokenizer,
- unsigned NumArgs);
+ ArrayRef<Comment::Argument>
+ parseCommandArgs(TextTokenRetokenizer &Retokenizer, unsigned NumArgs);
BlockCommandComment *parseBlockCommand();
InlineCommandComment *parseInlineCommand();
diff --git a/contrib/llvm-project/clang/include/clang/AST/CommentSema.h b/contrib/llvm-project/clang/include/clang/AST/CommentSema.h
index 6dfe0f4920d0..03f13283ac0d 100644
--- a/contrib/llvm-project/clang/include/clang/AST/CommentSema.h
+++ b/contrib/llvm-project/clang/include/clang/AST/CommentSema.h
@@ -80,7 +80,7 @@ public:
ArrayRef<T> copyArray(ArrayRef<T> Source) {
if (!Source.empty())
return Source.copy(Allocator);
- return None;
+ return std::nullopt;
}
ParagraphComment *actOnParagraphComment(
@@ -130,14 +130,8 @@ public:
InlineCommandComment *actOnInlineCommand(SourceLocation CommandLocBegin,
SourceLocation CommandLocEnd,
- unsigned CommandID);
-
- InlineCommandComment *actOnInlineCommand(SourceLocation CommandLocBegin,
- SourceLocation CommandLocEnd,
unsigned CommandID,
- SourceLocation ArgLocBegin,
- SourceLocation ArgLocEnd,
- StringRef Arg);
+ ArrayRef<Comment::Argument> Args);
InlineContentComment *actOnUnknownCommand(SourceLocation LocBegin,
SourceLocation LocEnd,
@@ -181,6 +175,7 @@ public:
FullComment *actOnFullComment(ArrayRef<BlockContentComment *> Blocks);
+private:
void checkBlockCommandEmptyParagraph(BlockCommandComment *Command);
void checkReturnsCommand(const BlockCommandComment *Command);
@@ -198,19 +193,19 @@ public:
void checkContainerDecl(const BlockCommandComment *Comment);
/// Resolve parameter names to parameter indexes in function declaration.
- /// Emit diagnostics about unknown parametrs.
+ /// Emit diagnostics about unknown parameters.
void resolveParamCommandIndexes(const FullComment *FC);
+ /// \returns \c true if the declaration that this comment is attached to
+ /// is a pointer to function/method/block type or has such a type.
+ bool involvesFunctionType();
+
bool isFunctionDecl();
bool isAnyFunctionDecl();
/// \returns \c true if declaration that this comment is attached to declares
/// a function pointer.
bool isFunctionPointerVarDecl();
- /// \returns \c true if the declaration that this comment is attached to
- /// declares a variable or a field whose type is a function or a block
- /// pointer.
- bool isFunctionOrBlockPointerVarLikeDecl();
bool isFunctionOrMethodVariadic();
bool isObjCMethodDecl();
bool isObjCPropertyDecl();
@@ -249,8 +244,7 @@ public:
StringRef Typo,
const TemplateParameterList *TemplateParameters);
- InlineCommandComment::RenderKind
- getInlineCommandRenderKind(StringRef Name) const;
+ InlineCommandRenderKind getInlineCommandRenderKind(StringRef Name) const;
};
} // end namespace comments
diff --git a/contrib/llvm-project/clang/include/clang/AST/CommentVisitor.h b/contrib/llvm-project/clang/include/clang/AST/CommentVisitor.h
index d9a7439f7cc0..bbb624a23e68 100644
--- a/contrib/llvm-project/clang/include/clang/AST/CommentVisitor.h
+++ b/contrib/llvm-project/clang/include/clang/AST/CommentVisitor.h
@@ -31,8 +31,9 @@ public:
switch (C->getCommentKind()) {
default: llvm_unreachable("Unknown comment kind!");
#define ABSTRACT_COMMENT(COMMENT)
-#define COMMENT(CLASS, PARENT) \
- case Comment::CLASS##Kind: DISPATCH(CLASS, CLASS);
+#define COMMENT(CLASS, PARENT) \
+ case CommentKind::CLASS: \
+ DISPATCH(CLASS, CLASS);
#include "clang/AST/CommentNodes.inc"
#undef ABSTRACT_COMMENT
#undef COMMENT
diff --git a/contrib/llvm-project/clang/include/clang/AST/ComparisonCategories.h b/contrib/llvm-project/clang/include/clang/AST/ComparisonCategories.h
index b41e934142ee..b4ad37e394ce 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ComparisonCategories.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ComparisonCategories.h
@@ -19,6 +19,7 @@
#include "llvm/ADT/DenseMap.h"
#include <array>
#include <cassert>
+#include <optional>
#include <vector>
namespace llvm {
@@ -38,9 +39,8 @@ class NamespaceDecl;
/// An enumeration representing the different comparison categories
/// types.
///
-/// C++2a [cmp.categories.pre] The types weak_equality, strong_equality,
-/// partial_ordering, weak_ordering, and strong_ordering are collectively
-/// termed the comparison category types.
+/// C++20 [cmp.categories.pre] The types partial_ordering, weak_ordering, and
+/// strong_ordering are collectively termed the comparison category types.
enum class ComparisonCategoryType : unsigned char {
PartialOrdering,
WeakOrdering,
@@ -58,7 +58,8 @@ inline ComparisonCategoryType commonComparisonType(ComparisonCategoryType A,
/// Get the comparison category that should be used when comparing values of
/// type \c T.
-Optional<ComparisonCategoryType> getComparisonCategoryForBuiltinCmp(QualType T);
+std::optional<ComparisonCategoryType>
+getComparisonCategoryForBuiltinCmp(QualType T);
/// An enumeration representing the possible results of a three-way
/// comparison. These values map onto instances of comparison category types
@@ -115,8 +116,7 @@ private:
public:
/// The declaration for the comparison category type from the
/// standard library.
- // FIXME: Make this const
- CXXRecordDecl *Record = nullptr;
+ const CXXRecordDecl *Record = nullptr;
/// The Kind of the comparison category type
ComparisonCategoryType Kind;
@@ -146,7 +146,7 @@ public:
return Kind == CCK::PartialOrdering;
}
- /// Converts the specified result kind into the the correct result kind
+ /// Converts the specified result kind into the correct result kind
/// for this category. Specifically it lowers strong equality results to
/// weak equivalence if needed.
ComparisonCategoryResult makeWeakResult(ComparisonCategoryResult Res) const {
diff --git a/contrib/llvm-project/clang/include/clang/AST/ComputeDependence.h b/contrib/llvm-project/clang/include/clang/AST/ComputeDependence.h
index 8db09e6b57d0..f62611cb4c3c 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ComputeDependence.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ComputeDependence.h
@@ -10,8 +10,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_AST_COMPUTE_DEPENDENCE_H
-#define LLVM_CLANG_AST_COMPUTE_DEPENDENCE_H
+#ifndef LLVM_CLANG_AST_COMPUTEDEPENDENCE_H
+#define LLVM_CLANG_AST_COMPUTEDEPENDENCE_H
#include "clang/AST/DependenceFlags.h"
#include "clang/Basic/ExceptionSpecificationType.h"
@@ -30,7 +30,8 @@ class UnaryExprOrTypeTraitExpr;
class ArraySubscriptExpr;
class MatrixSubscriptExpr;
class CompoundLiteralExpr;
-class CastExpr;
+class ImplicitCastExpr;
+class ExplicitCastExpr;
class BinaryOperator;
class ConditionalOperator;
class BinaryConditionalOperator;
@@ -70,6 +71,7 @@ class CXXPseudoDestructorExpr;
class OverloadExpr;
class DependentScopeDeclRefExpr;
class CXXConstructExpr;
+class CXXTemporaryObjectExpr;
class CXXDefaultInitExpr;
class CXXDefaultArgExpr;
class LambdaExpr;
@@ -77,6 +79,7 @@ class CXXUnresolvedConstructExpr;
class CXXDependentScopeMemberExpr;
class MaterializeTemporaryExpr;
class CXXFoldExpr;
+class CXXParenListInitExpr;
class TypeTraitExpr;
class ConceptSpecializationExpr;
class SYCLUniqueStableNameExpr;
@@ -114,7 +117,8 @@ ExprDependence computeDependence(UnaryExprOrTypeTraitExpr *E);
ExprDependence computeDependence(ArraySubscriptExpr *E);
ExprDependence computeDependence(MatrixSubscriptExpr *E);
ExprDependence computeDependence(CompoundLiteralExpr *E);
-ExprDependence computeDependence(CastExpr *E);
+ExprDependence computeDependence(ImplicitCastExpr *E);
+ExprDependence computeDependence(ExplicitCastExpr *E);
ExprDependence computeDependence(BinaryOperator *E);
ExprDependence computeDependence(ConditionalOperator *E);
ExprDependence computeDependence(BinaryConditionalOperator *E);
@@ -156,6 +160,7 @@ ExprDependence computeDependence(OverloadExpr *E, bool KnownDependent,
bool KnownContainsUnexpandedParameterPack);
ExprDependence computeDependence(DependentScopeDeclRefExpr *E);
ExprDependence computeDependence(CXXConstructExpr *E);
+ExprDependence computeDependence(CXXTemporaryObjectExpr *E);
ExprDependence computeDependence(CXXDefaultInitExpr *E);
ExprDependence computeDependence(CXXDefaultArgExpr *E);
ExprDependence computeDependence(LambdaExpr *E,
@@ -164,6 +169,7 @@ ExprDependence computeDependence(CXXUnresolvedConstructExpr *E);
ExprDependence computeDependence(CXXDependentScopeMemberExpr *E);
ExprDependence computeDependence(MaterializeTemporaryExpr *E);
ExprDependence computeDependence(CXXFoldExpr *E);
+ExprDependence computeDependence(CXXParenListInitExpr *E);
ExprDependence computeDependence(TypeTraitExpr *E);
ExprDependence computeDependence(ConceptSpecializationExpr *E,
bool ValueDependent);
diff --git a/contrib/llvm-project/clang/include/clang/AST/CurrentSourceLocExprScope.h b/contrib/llvm-project/clang/include/clang/AST/CurrentSourceLocExprScope.h
index 4ebbdf63abb5..4f8343efad16 100644
--- a/contrib/llvm-project/clang/include/clang/AST/CurrentSourceLocExprScope.h
+++ b/contrib/llvm-project/clang/include/clang/AST/CurrentSourceLocExprScope.h
@@ -1,9 +1,8 @@
//===--- CurrentSourceLocExprScope.h ----------------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -12,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_AST_CURRENT_SOURCE_LOC_EXPR_SCOPE_H
-#define LLVM_CLANG_AST_CURRENT_SOURCE_LOC_EXPR_SCOPE_H
+#ifndef LLVM_CLANG_AST_CURRENTSOURCELOCEXPRSCOPE_H
+#define LLVM_CLANG_AST_CURRENTSOURCELOCEXPRSCOPE_H
#include <cassert>
@@ -72,4 +71,4 @@ private:
} // end namespace clang
-#endif // LLVM_CLANG_AST_CURRENT_SOURCE_LOC_EXPR_SCOPE_H
+#endif // LLVM_CLANG_AST_CURRENTSOURCELOCEXPRSCOPE_H
diff --git a/contrib/llvm-project/clang/include/clang/AST/Decl.h b/contrib/llvm-project/clang/include/clang/AST/Decl.h
index 30a9458bc2ee..f26fb5ad5f13 100644
--- a/contrib/llvm-project/clang/include/clang/AST/Decl.h
+++ b/contrib/llvm-project/clang/include/clang/AST/Decl.h
@@ -13,6 +13,7 @@
#ifndef LLVM_CLANG_AST_DECL_H
#define LLVM_CLANG_AST_DECL_H
+#include "clang/AST/APNumericStorage.h"
#include "clang/AST/APValue.h"
#include "clang/AST/ASTContextAllocate.h"
#include "clang/AST/DeclAccessPair.h"
@@ -35,7 +36,6 @@
#include "clang/Basic/Visibility.h"
#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/StringRef.h"
@@ -46,6 +46,7 @@
#include <cassert>
#include <cstddef>
#include <cstdint>
+#include <optional>
#include <string>
#include <utility>
@@ -53,7 +54,6 @@ namespace clang {
class ASTContext;
struct ASTTemplateArgumentListInfo;
-class Attr;
class CompoundStmt;
class DependentFunctionTemplateSpecializationInfo;
class EnumDecl;
@@ -74,9 +74,9 @@ class TemplateArgumentList;
class TemplateArgumentListInfo;
class TemplateParameterList;
class TypeAliasTemplateDecl;
-class TypeLoc;
class UnresolvedSetImpl;
class VarTemplateDecl;
+enum class ImplicitParamKind;
/// The top declaration context.
class TranslationUnitDecl : public Decl,
@@ -293,7 +293,9 @@ public:
/// Pretty-print the unqualified name of this declaration. Can be overloaded
/// by derived classes to provide a more user-friendly name when appropriate.
- virtual void printName(raw_ostream &os) const;
+ virtual void printName(raw_ostream &OS, const PrintingPolicy &Policy) const;
+ /// Calls printName() with the ASTContext printing policy from the decl.
+ void printName(raw_ostream &OS) const;
/// Get the actual, stored name of the declaration, which may be a special
/// name.
@@ -357,7 +359,8 @@ public:
///
/// \param IsKnownNewer \c true if this declaration is known to be newer
/// than \p OldD (for instance, if this declaration is newly-created).
- bool declarationReplaces(NamedDecl *OldD, bool IsKnownNewer = true) const;
+ bool declarationReplaces(const NamedDecl *OldD,
+ bool IsKnownNewer = true) const;
/// Determine whether this declaration has linkage.
bool hasLinkage() const;
@@ -395,9 +398,7 @@ public:
/// Get the linkage from a semantic point of view. Entities in
/// anonymous namespaces are external (in c++98).
- Linkage getFormalLinkage() const {
- return clang::getFormalLinkage(getLinkageInternal());
- }
+ Linkage getFormalLinkage() const;
/// True if this decl has external linkage.
bool hasExternalFormalLinkage() const {
@@ -437,7 +438,7 @@ public:
/// If visibility was explicitly specified for this
/// declaration, return that visibility.
- Optional<Visibility>
+ std::optional<Visibility>
getExplicitVisibility(ExplicitVisibilityKind kind) const;
/// True if the computed linkage is valid. Used for consistency
@@ -454,6 +455,8 @@ public:
return hasCachedLinkage();
}
+ bool isPlaceholderVar(const LangOptions &LangOpts) const;
+
/// Looks through UsingDecls and ObjCCompatibleAliasDecls for
/// the underlying named decl.
NamedDecl *getUnderlyingDecl() {
@@ -542,6 +545,9 @@ public:
class NamespaceDecl : public NamedDecl, public DeclContext,
public Redeclarable<NamespaceDecl>
{
+
+ enum Flags : unsigned { F_Inline = 1 << 0, F_Nested = 1 << 1 };
+
/// The starting location of the source range, pointing
/// to either the namespace or the inline keyword.
SourceLocation LocStart;
@@ -553,11 +559,12 @@ class NamespaceDecl : public NamedDecl, public DeclContext,
/// this namespace or to the first namespace in the chain (the latter case
/// only when this is not the first in the chain), along with a
/// boolean value indicating whether this is an inline namespace.
- llvm::PointerIntPair<NamespaceDecl *, 1, bool> AnonOrFirstNamespaceAndInline;
+ llvm::PointerIntPair<NamespaceDecl *, 2, unsigned>
+ AnonOrFirstNamespaceAndFlags;
NamespaceDecl(ASTContext &C, DeclContext *DC, bool Inline,
SourceLocation StartLoc, SourceLocation IdLoc,
- IdentifierInfo *Id, NamespaceDecl *PrevDecl);
+ IdentifierInfo *Id, NamespaceDecl *PrevDecl, bool Nested);
using redeclarable_base = Redeclarable<NamespaceDecl>;
@@ -569,10 +576,10 @@ public:
friend class ASTDeclReader;
friend class ASTDeclWriter;
- static NamespaceDecl *Create(ASTContext &C, DeclContext *DC,
- bool Inline, SourceLocation StartLoc,
- SourceLocation IdLoc, IdentifierInfo *Id,
- NamespaceDecl *PrevDecl);
+ static NamespaceDecl *Create(ASTContext &C, DeclContext *DC, bool Inline,
+ SourceLocation StartLoc, SourceLocation IdLoc,
+ IdentifierInfo *Id, NamespaceDecl *PrevDecl,
+ bool Nested);
static NamespaceDecl *CreateDeserialized(ASTContext &C, unsigned ID);
@@ -601,12 +608,33 @@ public:
/// Returns true if this is an inline namespace declaration.
bool isInline() const {
- return AnonOrFirstNamespaceAndInline.getInt();
+ return AnonOrFirstNamespaceAndFlags.getInt() & F_Inline;
}
/// Set whether this is an inline namespace declaration.
void setInline(bool Inline) {
- AnonOrFirstNamespaceAndInline.setInt(Inline);
+ unsigned F = AnonOrFirstNamespaceAndFlags.getInt();
+ if (Inline)
+ AnonOrFirstNamespaceAndFlags.setInt(F | F_Inline);
+ else
+ AnonOrFirstNamespaceAndFlags.setInt(F & ~F_Inline);
+ }
+
+ /// Returns true if this is a nested namespace declaration.
+ /// \code
+ /// namespace outer::nested { }
+ /// \endcode
+ bool isNested() const {
+ return AnonOrFirstNamespaceAndFlags.getInt() & F_Nested;
+ }
+
+ /// Set whether this is a nested namespace declaration.
+ void setNested(bool Nested) {
+ unsigned F = AnonOrFirstNamespaceAndFlags.getInt();
+ if (Nested)
+ AnonOrFirstNamespaceAndFlags.setInt(F | F_Nested);
+ else
+ AnonOrFirstNamespaceAndFlags.setInt(F & ~F_Nested);
}
/// Returns true if the inline qualifier for \c Name is redundant.
@@ -614,7 +642,9 @@ public:
if (!isInline())
return false;
auto X = lookup(Name);
- auto Y = getParent()->lookup(Name);
+ // We should not perform a lookup within a transparent context, so find a
+ // non-transparent parent context.
+ auto Y = getParent()->getNonTransparentContext()->lookup(Name);
return std::distance(X.begin(), X.end()) ==
std::distance(Y.begin(), Y.end());
}
@@ -633,11 +663,11 @@ public:
/// Retrieve the anonymous namespace nested inside this namespace,
/// if any.
NamespaceDecl *getAnonymousNamespace() const {
- return getOriginalNamespace()->AnonOrFirstNamespaceAndInline.getPointer();
+ return getOriginalNamespace()->AnonOrFirstNamespaceAndFlags.getPointer();
}
void setAnonymousNamespace(NamespaceDecl *D) {
- getOriginalNamespace()->AnonOrFirstNamespaceAndInline.setPointer(D);
+ getOriginalNamespace()->AnonOrFirstNamespaceAndFlags.setPointer(D);
}
/// Retrieves the canonical declaration of this namespace.
@@ -668,6 +698,8 @@ public:
}
};
+class VarDecl;
+
/// Represent the declaration of a variable (in which case it is
/// an lvalue) a function (in which case it is a function designator) or
/// an enum constant.
@@ -689,6 +721,18 @@ public:
/// or declared with the weak or weak-ref attr.
bool isWeak() const;
+ /// Whether this variable is the implicit variable for a lambda init-capture.
+ /// Only VarDecl can be init captures, but both VarDecl and BindingDecl
+ /// can be captured.
+ bool isInitCapture() const;
+
+ // If this is a VarDecl, or a BindindDecl with an
+ // associated decomposed VarDecl, return that VarDecl.
+ VarDecl *getPotentiallyDecomposedVarDecl();
+ const VarDecl *getPotentiallyDecomposedVarDecl() const {
+ return const_cast<ValueDecl *>(this)->getPotentiallyDecomposedVarDecl();
+ }
+
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K >= firstValue && K <= lastValue; }
@@ -861,7 +905,7 @@ struct EvaluatedStmt {
bool HasICEInit : 1;
bool CheckedForICEInit : 1;
- Stmt *Value;
+ LazyDeclStmtPtr Value;
APValue Evaluated;
EvaluatedStmt()
@@ -882,7 +926,10 @@ public:
CallInit,
/// Direct list-initialization (C++11)
- ListInit
+ ListInit,
+
+ /// Parenthesized list-initialization (C++20)
+ ParenListInit
};
/// Kinds of thread-local storage.
@@ -925,12 +972,16 @@ private:
friend class ASTDeclReader;
friend class VarDecl;
+ LLVM_PREFERRED_TYPE(StorageClass)
unsigned SClass : 3;
+ LLVM_PREFERRED_TYPE(ThreadStorageClassSpecifier)
unsigned TSCSpec : 2;
+ LLVM_PREFERRED_TYPE(InitializationStyle)
unsigned InitStyle : 2;
/// Whether this variable is an ARC pseudo-__strong variable; see
/// isARCPseudoStrong() for details.
+ LLVM_PREFERRED_TYPE(bool)
unsigned ARCPseudoStrong : 1;
};
enum { NumVarDeclBits = 8 };
@@ -951,22 +1002,27 @@ protected:
friend class ASTDeclReader;
friend class ParmVarDecl;
+ LLVM_PREFERRED_TYPE(VarDeclBitfields)
unsigned : NumVarDeclBits;
/// Whether this parameter inherits a default argument from a
/// prior declaration.
+ LLVM_PREFERRED_TYPE(bool)
unsigned HasInheritedDefaultArg : 1;
/// Describes the kind of default argument for this parameter. By default
/// this is none. If this is normal, then the default argument is stored in
/// the \c VarDecl initializer expression unless we were unable to parse
/// (even an invalid) expression for the default argument.
+ LLVM_PREFERRED_TYPE(DefaultArgKind)
unsigned DefaultArgKind : 2;
/// Whether this parameter undergoes K&R argument promotion.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsKNRPromoted : 1;
/// Whether this parameter is an ObjC method parameter or not.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsObjCMethodParam : 1;
/// If IsObjCMethodParam, a Decl::ObjCDeclQualifier.
@@ -985,51 +1041,64 @@ protected:
friend class ImplicitParamDecl;
friend class VarDecl;
+ LLVM_PREFERRED_TYPE(VarDeclBitfields)
unsigned : NumVarDeclBits;
// FIXME: We need something similar to CXXRecordDecl::DefinitionData.
/// Whether this variable is a definition which was demoted due to
/// module merge.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsThisDeclarationADemotedDefinition : 1;
/// Whether this variable is the exception variable in a C++ catch
/// or an Objective-C @catch statement.
+ LLVM_PREFERRED_TYPE(bool)
unsigned ExceptionVar : 1;
/// Whether this local variable could be allocated in the return
/// slot of its function, enabling the named return value optimization
/// (NRVO).
+ LLVM_PREFERRED_TYPE(bool)
unsigned NRVOVariable : 1;
/// Whether this variable is the for-range-declaration in a C++0x
/// for-range statement.
+ LLVM_PREFERRED_TYPE(bool)
unsigned CXXForRangeDecl : 1;
/// Whether this variable is the for-in loop declaration in Objective-C.
+ LLVM_PREFERRED_TYPE(bool)
unsigned ObjCForDecl : 1;
/// Whether this variable is (C++1z) inline.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsInline : 1;
/// Whether this variable has (C++1z) inline explicitly specified.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsInlineSpecified : 1;
/// Whether this variable is (C++0x) constexpr.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsConstexpr : 1;
/// Whether this variable is the implicit variable for a lambda
/// init-capture.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsInitCapture : 1;
/// Whether this local extern variable's previous declaration was
/// declared in the same block scope. This controls whether we should merge
/// the type of this declaration with its previous declaration.
+ LLVM_PREFERRED_TYPE(bool)
unsigned PreviousDeclInSameBlockScope : 1;
/// Defines kind of the ImplicitParamDecl: 'this', 'self', 'vtt', '_cmd' or
/// something else.
+ LLVM_PREFERRED_TYPE(ImplicitParamKind)
unsigned ImplicitParamKind : 3;
+ LLVM_PREFERRED_TYPE(bool)
unsigned EscapingByref : 1;
};
@@ -1041,7 +1110,7 @@ protected:
};
VarDecl(Kind DK, ASTContext &C, DeclContext *DC, SourceLocation StartLoc,
- SourceLocation IdLoc, IdentifierInfo *Id, QualType T,
+ SourceLocation IdLoc, const IdentifierInfo *Id, QualType T,
TypeSourceInfo *TInfo, StorageClass SC);
using redeclarable_base = Redeclarable<VarDecl>;
@@ -1071,8 +1140,8 @@ public:
static VarDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation StartLoc, SourceLocation IdLoc,
- IdentifierInfo *Id, QualType T, TypeSourceInfo *TInfo,
- StorageClass S);
+ const IdentifierInfo *Id, QualType T,
+ TypeSourceInfo *TInfo, StorageClass S);
static VarDecl *CreateDeserialized(ASTContext &C, unsigned ID);
@@ -1316,12 +1385,15 @@ public:
EvaluatedStmt *getEvaluatedStmt() const;
/// Attempt to evaluate the value of the initializer attached to this
- /// declaration, and produce notes explaining why it cannot be evaluated or is
- /// not a constant expression. Returns a pointer to the value if evaluation
- /// succeeded, 0 otherwise.
+ /// declaration, and produce notes explaining why it cannot be evaluated.
+ /// Returns a pointer to the value if evaluation succeeded, 0 otherwise.
APValue *evaluateValue() const;
- APValue *evaluateValue(SmallVectorImpl<PartialDiagnosticAt> &Notes) const;
+private:
+ APValue *evaluateValueImpl(SmallVectorImpl<PartialDiagnosticAt> &Notes,
+ bool IsConstantInitialization) const;
+
+public:
/// Return the already-evaluated value of this variable's
/// initializer, or NULL if the value is not yet known. Returns pointer
/// to untyped APValue if the value could not be evaluated.
@@ -1588,38 +1660,55 @@ public:
/// kind?
QualType::DestructionKind needsDestruction(const ASTContext &Ctx) const;
+ /// Whether this variable has a flexible array member initialized with one
+ /// or more elements. This can only be called for declarations where
+ /// hasInit() is true.
+ ///
+ /// (The standard doesn't allow initializing flexible array members; this is
+ /// a gcc/msvc extension.)
+ bool hasFlexibleArrayInit(const ASTContext &Ctx) const;
+
+ /// If hasFlexibleArrayInit is true, compute the number of additional bytes
+ /// necessary to store those elements. Otherwise, returns zero.
+ ///
+ /// This can only be called for declarations where hasInit() is true.
+ CharUnits getFlexibleArrayInitChars(const ASTContext &Ctx) const;
+
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K >= firstVar && K <= lastVar; }
};
-class ImplicitParamDecl : public VarDecl {
- void anchor() override;
+/// Defines the kind of the implicit parameter: is this an implicit parameter
+/// with pointer to 'this', 'self', '_cmd', virtual table pointers, captured
+/// context or something else.
+enum class ImplicitParamKind {
+ /// Parameter for Objective-C 'self' argument
+ ObjCSelf,
-public:
- /// Defines the kind of the implicit parameter: is this an implicit parameter
- /// with pointer to 'this', 'self', '_cmd', virtual table pointers, captured
- /// context or something else.
- enum ImplicitParamKind : unsigned {
- /// Parameter for Objective-C 'self' argument
- ObjCSelf,
+ /// Parameter for Objective-C '_cmd' argument
+ ObjCCmd,
- /// Parameter for Objective-C '_cmd' argument
- ObjCCmd,
+ /// Parameter for C++ 'this' argument
+ CXXThis,
- /// Parameter for C++ 'this' argument
- CXXThis,
+ /// Parameter for C++ virtual table pointers
+ CXXVTT,
- /// Parameter for C++ virtual table pointers
- CXXVTT,
+ /// Parameter for captured context
+ CapturedContext,
- /// Parameter for captured context
- CapturedContext,
+ /// Parameter for Thread private variable
+ ThreadPrivateVar,
- /// Other implicit parameter
- Other,
- };
+ /// Other implicit parameter
+ Other,
+};
+class ImplicitParamDecl : public VarDecl {
+ void anchor() override;
+
+public:
/// Create implicit parameter.
static ImplicitParamDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation IdLoc, IdentifierInfo *Id,
@@ -1634,7 +1723,7 @@ public:
ImplicitParamKind ParamKind)
: VarDecl(ImplicitParam, C, DC, IdLoc, IdLoc, Id, Type,
/*TInfo=*/nullptr, SC_None) {
- NonParmVarDeclBits.ImplicitParamKind = ParamKind;
+ NonParmVarDeclBits.ImplicitParamKind = llvm::to_underlying(ParamKind);
setImplicit();
}
@@ -1642,7 +1731,7 @@ public:
: VarDecl(ImplicitParam, C, /*DC=*/nullptr, SourceLocation(),
SourceLocation(), /*Id=*/nullptr, Type,
/*TInfo=*/nullptr, SC_None) {
- NonParmVarDeclBits.ImplicitParamKind = ParamKind;
+ NonParmVarDeclBits.ImplicitParamKind = llvm::to_underlying(ParamKind);
setImplicit();
}
@@ -1745,6 +1834,18 @@ public:
ParmVarDeclBits.IsKNRPromoted = promoted;
}
+ bool isExplicitObjectParameter() const {
+ return ExplicitObjectParameterIntroducerLoc.isValid();
+ }
+
+ void setExplicitObjectParameterLoc(SourceLocation Loc) {
+ ExplicitObjectParameterIntroducerLoc = Loc;
+ }
+
+ SourceLocation getExplicitObjectParamThisLoc() const {
+ return ExplicitObjectParameterIntroducerLoc;
+ }
+
Expr *getDefaultArg();
const Expr *getDefaultArg() const {
return const_cast<ParmVarDecl *>(this)->getDefaultArg();
@@ -1811,7 +1912,10 @@ public:
static bool classofKind(Kind K) { return K == ParmVar; }
private:
+ friend class ASTDeclReader;
+
enum { ParameterIndexSentinel = (1 << NumParameterIndexBits) - 1 };
+ SourceLocation ExplicitObjectParameterIntroducerLoc;
void setParameterIndex(unsigned parameterIndex) {
if (parameterIndex >= ParameterIndexSentinel) {
@@ -1835,7 +1939,9 @@ enum class MultiVersionKind {
None,
Target,
CPUSpecific,
- CPUDispatch
+ CPUDispatch,
+ TargetClones,
+ TargetVersion
};
/// Represents a function declaration or definition.
@@ -1869,7 +1975,10 @@ public:
TK_FunctionTemplateSpecialization,
// A function template specialization that hasn't yet been resolved to a
// particular specialized function template.
- TK_DependentFunctionTemplateSpecialization
+ TK_DependentFunctionTemplateSpecialization,
+ // A non-template function which is in a dependent scope.
+ TK_DependentNonTemplate
+
};
/// Stashed information about a defaulted function definition whose body has
@@ -1915,23 +2024,26 @@ private:
/// EndRangeLoc.
SourceLocation EndRangeLoc;
+ SourceLocation DefaultKWLoc;
+
/// The template or declaration that this declaration
/// describes or was instantiated from, respectively.
///
- /// For non-templates, this value will be NULL. For function
- /// declarations that describe a function template, this will be a
- /// pointer to a FunctionTemplateDecl. For member functions
- /// of class template specializations, this will be a MemberSpecializationInfo
+ /// For non-templates this value will be NULL, unless this declaration was
+ /// declared directly inside of a function template, in which case it will
+ /// have a pointer to a FunctionDecl, stored in the NamedDecl. For function
+ /// declarations that describe a function template, this will be a pointer to
+ /// a FunctionTemplateDecl, stored in the NamedDecl. For member functions of
+ /// class template specializations, this will be a MemberSpecializationInfo
/// pointer containing information about the specialization.
/// For function template specializations, this will be a
/// FunctionTemplateSpecializationInfo, which contains information about
/// the template being specialized and the template arguments involved in
/// that specialization.
- llvm::PointerUnion<FunctionTemplateDecl *,
- MemberSpecializationInfo *,
+ llvm::PointerUnion<NamedDecl *, MemberSpecializationInfo *,
FunctionTemplateSpecializationInfo *,
DependentFunctionTemplateSpecializationInfo *>
- TemplateOrSpecialization;
+ TemplateOrSpecialization;
/// Provides source/type location info for the declaration name embedded in
/// the DeclaratorDecl base class.
@@ -1987,8 +2099,8 @@ private:
protected:
FunctionDecl(Kind DK, ASTContext &C, DeclContext *DC, SourceLocation StartLoc,
const DeclarationNameInfo &NameInfo, QualType T,
- TypeSourceInfo *TInfo, StorageClass S, bool isInlineSpecified,
- ConstexprSpecKind ConstexprKind,
+ TypeSourceInfo *TInfo, StorageClass S, bool UsesFPIntrin,
+ bool isInlineSpecified, ConstexprSpecKind ConstexprKind,
Expr *TrailingRequiresClause = nullptr);
using redeclarable_base = Redeclarable<FunctionDecl>;
@@ -2022,23 +2134,23 @@ public:
static FunctionDecl *
Create(ASTContext &C, DeclContext *DC, SourceLocation StartLoc,
SourceLocation NLoc, DeclarationName N, QualType T,
- TypeSourceInfo *TInfo, StorageClass SC, bool isInlineSpecified = false,
- bool hasWrittenPrototype = true,
+ TypeSourceInfo *TInfo, StorageClass SC, bool UsesFPIntrin = false,
+ bool isInlineSpecified = false, bool hasWrittenPrototype = true,
ConstexprSpecKind ConstexprKind = ConstexprSpecKind::Unspecified,
Expr *TrailingRequiresClause = nullptr) {
DeclarationNameInfo NameInfo(N, NLoc);
return FunctionDecl::Create(C, DC, StartLoc, NameInfo, T, TInfo, SC,
- isInlineSpecified, hasWrittenPrototype,
- ConstexprKind, TrailingRequiresClause);
+ UsesFPIntrin, isInlineSpecified,
+ hasWrittenPrototype, ConstexprKind,
+ TrailingRequiresClause);
}
- static FunctionDecl *Create(ASTContext &C, DeclContext *DC,
- SourceLocation StartLoc,
- const DeclarationNameInfo &NameInfo, QualType T,
- TypeSourceInfo *TInfo, StorageClass SC,
- bool isInlineSpecified, bool hasWrittenPrototype,
- ConstexprSpecKind ConstexprKind,
- Expr *TrailingRequiresClause);
+ static FunctionDecl *
+ Create(ASTContext &C, DeclContext *DC, SourceLocation StartLoc,
+ const DeclarationNameInfo &NameInfo, QualType T, TypeSourceInfo *TInfo,
+ StorageClass SC, bool UsesFPIntrin, bool isInlineSpecified,
+ bool hasWrittenPrototype, ConstexprSpecKind ConstexprKind,
+ Expr *TrailingRequiresClause);
static FunctionDecl *CreateDeserialized(ASTContext &C, unsigned ID);
@@ -2098,7 +2210,7 @@ public:
/// declaration to the declaration that is a definition (if there is one).
///
/// \param CheckForPendingFriendDefinition If \c true, also check for friend
- /// declarations that were instantiataed from function definitions.
+ /// declarations that were instantiated from function definitions.
/// Such a declaration behaves as if it is a definition for the
/// purpose of redefinition checking, but isn't actually a "real"
/// definition until its body is instantiated.
@@ -2182,8 +2294,8 @@ public:
/// Whether this virtual function is pure, i.e. makes the containing class
/// abstract.
- bool isPure() const { return FunctionDeclBits.IsPure; }
- void setPure(bool P = true);
+ bool isPureVirtual() const { return FunctionDeclBits.IsPureVirtual; }
+ void setIsPureVirtual(bool P = true);
/// Whether this templated function will be late parsed.
bool isLateTemplateParsed() const {
@@ -2220,6 +2332,16 @@ public:
FunctionDeclBits.IsExplicitlyDefaulted = ED;
}
+ SourceLocation getDefaultLoc() const {
+ return isExplicitlyDefaulted() ? DefaultKWLoc : SourceLocation();
+ }
+
+ void setDefaultLoc(SourceLocation NewLoc) {
+ assert((NewLoc.isInvalid() || isExplicitlyDefaulted()) &&
+ "Can't set default loc is function isn't explicitly defaulted");
+ DefaultKWLoc = NewLoc;
+ }
+
/// True if this method is user-declared and was not
/// deleted or defaulted on its first declaration.
bool isUserProvided() const {
@@ -2230,6 +2352,13 @@ public:
DeclAsWritten->getCanonicalDecl()->isDefaulted());
}
+ bool isIneligibleOrNotSelected() const {
+ return FunctionDeclBits.IsIneligibleOrNotSelected;
+ }
+ void setIneligibleOrNotSelected(bool II) {
+ FunctionDeclBits.IsIneligibleOrNotSelected = II;
+ }
+
/// Whether falling off this function implicitly returns null/zero.
/// If a more specific implicit return value is required, front-ends
/// should synthesize the appropriate return statements.
@@ -2291,6 +2420,21 @@ public:
return getConstexprKind() == ConstexprSpecKind::Consteval;
}
+ void setBodyContainsImmediateEscalatingExpressions(bool Set) {
+ FunctionDeclBits.BodyContainsImmediateEscalatingExpression = Set;
+ }
+
+ bool BodyContainsImmediateEscalatingExpressions() const {
+ return FunctionDeclBits.BodyContainsImmediateEscalatingExpression;
+ }
+
+ bool isImmediateEscalating() const;
+
+ // The function is a C++ immediate function.
+ // This can be either a consteval function, or an immediate escalating
+ // function containing an immediate escalating expression.
+ bool isImmediateFunction() const;
+
/// Whether the instantiation of this function is pending.
/// This bit is set when the decision to instantiate this function is made
/// and unset if and when the function body is created. That leaves out
@@ -2385,7 +2529,7 @@ public:
/// If this function is an allocation/deallocation function that takes
/// the `std::nothrow_t` tag, return true through IsNothrow,
bool isReplaceableGlobalAllocationFunction(
- Optional<unsigned> *AlignmentParam = nullptr,
+ std::optional<unsigned> *AlignmentParam = nullptr,
bool *IsNothrow = nullptr) const;
/// Determine if this function provides an inline implementation of a builtin.
@@ -2437,6 +2581,23 @@ public:
getCanonicalDecl()->FunctionDeclBits.IsMultiVersion = V;
}
+ // Sets that this is a constrained friend where the constraint refers to an
+ // enclosing template.
+ void setFriendConstraintRefersToEnclosingTemplate(bool V = true) {
+ getCanonicalDecl()
+ ->FunctionDeclBits.FriendConstraintRefersToEnclosingTemplate = V;
+ }
+ // Indicates this function is a constrained friend, where the constraint
+ // refers to an enclosing template for hte purposes of [temp.friend]p9.
+ bool FriendConstraintRefersToEnclosingTemplate() const {
+ return getCanonicalDecl()
+ ->FunctionDeclBits.FriendConstraintRefersToEnclosingTemplate;
+ }
+
+ /// Determine whether a function is a friend function that cannot be
+ /// redeclared outside of its class, per C++ [temp.friend]p9.
+ bool isMemberLikeConstrainedFriend() const;
+
/// Gets the kind of multiversioning attribute this declaration has. Note that
/// this can return a value even if the function is not multiversion, such as
/// the case of 'target'.
@@ -2454,6 +2615,10 @@ public:
/// the target functionality.
bool isTargetMultiVersion() const;
+ /// True if this function is a multiversioned dispatch function as a part of
+ /// the target-clones functionality.
+ bool isTargetClonesMultiVersion() const;
+
/// \brief Get the associated-constraints of this function declaration.
/// Currently, this will either be a vector of size 1 containing the
/// trailing-requires-clause or an empty vector.
@@ -2515,6 +2680,23 @@ public:
/// parameters have default arguments (in C++).
unsigned getMinRequiredArguments() const;
+ /// Returns the minimum number of non-object arguments needed to call this
+ /// function. This produces the same value as getMinRequiredArguments except
+ /// it does not count the explicit object argument, if any.
+ unsigned getMinRequiredExplicitArguments() const;
+
+ bool hasCXXExplicitFunctionObjectParameter() const;
+
+ unsigned getNumNonObjectParams() const;
+
+ const ParmVarDecl *getNonObjectParameter(unsigned I) const {
+ return getParamDecl(hasCXXExplicitFunctionObjectParameter() ? I + 1 : I);
+ }
+
+ ParmVarDecl *getNonObjectParameter(unsigned I) {
+ return getParamDecl(hasCXXExplicitFunctionObjectParameter() ? I + 1 : I);
+ }
+
/// Determine whether this function has a single parameter, or multiple
/// parameters where all but the first have default arguments.
///
@@ -2591,6 +2773,14 @@ public:
FunctionDeclBits.IsInline = I;
}
+ /// Determine whether the function was declared in source context
+ /// that requires constrained FP intrinsics
+ bool UsesFPIntrin() const { return FunctionDeclBits.UsesFPIntrin; }
+
+ /// Set whether the function was declared in source context
+ /// that requires constrained FP intrinsics
+ void setUsesFPIntrin(bool I) { FunctionDeclBits.UsesFPIntrin = I; }
+
/// Flag that this function is implicitly inline.
void setImplicitlyInline(bool I = true) { FunctionDeclBits.IsInline = I; }
@@ -2655,6 +2845,13 @@ public:
setInstantiationOfMemberFunction(getASTContext(), FD, TSK);
}
+ /// Specify that this function declaration was instantiated from a
+ /// FunctionDecl FD. This is only used if this is a function declaration
+ /// declared locally inside of a function template.
+ void setInstantiatedFromDecl(FunctionDecl *FD);
+
+ FunctionDecl *getInstantiatedFromDecl() const;
+
/// Retrieves the function template that is described by this
/// function declaration.
///
@@ -2673,9 +2870,7 @@ public:
/// Determine whether this function is a function template
/// specialization.
- bool isFunctionTemplateSpecialization() const {
- return getPrimaryTemplate() != nullptr;
- }
+ bool isFunctionTemplateSpecialization() const;
/// If this function is actually a function template specialization,
/// retrieve information about this function template specialization.
@@ -2758,9 +2953,9 @@ public:
/// Specifies that this function declaration is actually a
/// dependent function template specialization.
- void setDependentTemplateSpecialization(ASTContext &Context,
- const UnresolvedSetImpl &Templates,
- const TemplateArgumentListInfo &TemplateArgs);
+ void setDependentTemplateSpecialization(
+ ASTContext &Context, const UnresolvedSetImpl &Templates,
+ const TemplateArgumentListInfo *TemplateArgs);
DependentFunctionTemplateSpecializationInfo *
getDependentSpecializationInfo() const;
@@ -2820,11 +3015,7 @@ public:
/// Represents a member of a struct/union/class.
class FieldDecl : public DeclaratorDecl, public Mergeable<FieldDecl> {
- unsigned BitField : 1;
- unsigned Mutable : 1;
- mutable unsigned CachedFieldIndex : 30;
-
- /// The kinds of value we can store in InitializerOrBitWidth.
+ /// The kinds of value we can store in StorageKind.
///
/// Note that this is compatible with InClassInitStyle except for
/// ISK_CapturedVLAType.
@@ -2847,10 +3038,18 @@ class FieldDecl : public DeclaratorDecl, public Mergeable<FieldDecl> {
ISK_CapturedVLAType,
};
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned BitField : 1;
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned Mutable : 1;
+ LLVM_PREFERRED_TYPE(InitStorageKind)
+ unsigned StorageKind : 2;
+ mutable unsigned CachedFieldIndex : 28;
+
/// If this is a bitfield with a default member initializer, this
/// structure is used to represent the two expressions.
- struct InitAndBitWidth {
- Expr *Init;
+ struct InitAndBitWidthStorage {
+ LazyDeclStmtPtr Init;
Expr *BitWidth;
};
@@ -2863,16 +3062,25 @@ class FieldDecl : public DeclaratorDecl, public Mergeable<FieldDecl> {
/// and attached.
// FIXME: Tail-allocate this to reduce the size of FieldDecl in the
// overwhelmingly common case that we have none of these things.
- llvm::PointerIntPair<void *, 2, InitStorageKind> InitStorage;
+ union {
+ // Active member if ISK is not ISK_CapturedVLAType and BitField is false.
+ LazyDeclStmtPtr Init;
+ // Active member if ISK is ISK_NoInit and BitField is true.
+ Expr *BitWidth;
+ // Active member if ISK is ISK_InClass*Init and BitField is true.
+ InitAndBitWidthStorage *InitAndBitWidth;
+ // Active member if ISK is ISK_CapturedVLAType.
+ const VariableArrayType *CapturedVLAType;
+ };
protected:
FieldDecl(Kind DK, DeclContext *DC, SourceLocation StartLoc,
- SourceLocation IdLoc, IdentifierInfo *Id,
- QualType T, TypeSourceInfo *TInfo, Expr *BW, bool Mutable,
+ SourceLocation IdLoc, IdentifierInfo *Id, QualType T,
+ TypeSourceInfo *TInfo, Expr *BW, bool Mutable,
InClassInitStyle InitStyle)
- : DeclaratorDecl(DK, DC, IdLoc, Id, T, TInfo, StartLoc),
- BitField(false), Mutable(Mutable), CachedFieldIndex(0),
- InitStorage(nullptr, (InitStorageKind) InitStyle) {
+ : DeclaratorDecl(DK, DC, IdLoc, Id, T, TInfo, StartLoc), BitField(false),
+ Mutable(Mutable), StorageKind((InitStorageKind)InitStyle),
+ CachedFieldIndex(0), Init() {
if (BW)
setBitWidth(BW);
}
@@ -2908,15 +3116,16 @@ public:
/// store the data for the anonymous union or struct.
bool isAnonymousStructOrUnion() const;
+ /// Returns the expression that represents the bit width, if this field
+ /// is a bit field. For non-bitfields, this returns \c nullptr.
Expr *getBitWidth() const {
if (!BitField)
return nullptr;
- void *Ptr = InitStorage.getPointer();
- if (getInClassInitStyle())
- return static_cast<InitAndBitWidth*>(Ptr)->BitWidth;
- return static_cast<Expr*>(Ptr);
+ return hasInClassInitializer() ? InitAndBitWidth->BitWidth : BitWidth;
}
+ /// Computes the bit width of this field, if this is a bit field.
+ /// May not be called on non-bitfields.
unsigned getBitWidthValue(const ASTContext &Ctx) const;
/// Set the bit-field width for this member.
@@ -2925,11 +3134,11 @@ public:
assert(!hasCapturedVLAType() && !BitField &&
"bit width or captured type already set");
assert(Width && "no bit width specified");
- InitStorage.setPointer(
- InitStorage.getInt()
- ? new (getASTContext())
- InitAndBitWidth{getInClassInitializer(), Width}
- : static_cast<void*>(Width));
+ if (hasInClassInitializer())
+ InitAndBitWidth =
+ new (getASTContext()) InitAndBitWidthStorage{Init, Width};
+ else
+ BitWidth = Width;
BitField = true;
}
@@ -2937,7 +3146,11 @@ public:
// Note: used by some clients (i.e., do not remove it).
void removeBitWidth() {
assert(isBitField() && "no bitfield width to remove");
- InitStorage.setPointer(getInClassInitializer());
+ if (hasInClassInitializer()) {
+ // Read the old initializer before we change the active union member.
+ auto ExistingInit = InitAndBitWidth->Init;
+ Init = ExistingInit;
+ }
BitField = false;
}
@@ -2951,11 +3164,14 @@ public:
/// [[no_unique_address]] attribute.
bool isZeroSize(const ASTContext &Ctx) const;
+ /// Determine if this field is of potentially-overlapping class type, that
+ /// is, subobject with the [[no_unique_address]] attribute
+ bool isPotentiallyOverlapping() const;
+
/// Get the kind of (C++11) default member initializer that this field has.
InClassInitStyle getInClassInitStyle() const {
- InitStorageKind storageKind = InitStorage.getInt();
- return (storageKind == ISK_CapturedVLAType
- ? ICIS_NoInit : (InClassInitStyle) storageKind);
+ return (StorageKind == ISK_CapturedVLAType ? ICIS_NoInit
+ : (InClassInitStyle)StorageKind);
}
/// Determine whether this member has a C++11 default member initializer.
@@ -2963,44 +3179,44 @@ public:
return getInClassInitStyle() != ICIS_NoInit;
}
+ /// Determine whether getInClassInitializer() would return a non-null pointer
+ /// without deserializing the initializer.
+ bool hasNonNullInClassInitializer() const {
+ return hasInClassInitializer() && (BitField ? InitAndBitWidth->Init : Init);
+ }
+
/// Get the C++11 default member initializer for this member, or null if one
/// has not been set. If a valid declaration has a default member initializer,
/// but this returns null, then we have not parsed and attached it yet.
- Expr *getInClassInitializer() const {
- if (!hasInClassInitializer())
- return nullptr;
- void *Ptr = InitStorage.getPointer();
- if (BitField)
- return static_cast<InitAndBitWidth*>(Ptr)->Init;
- return static_cast<Expr*>(Ptr);
- }
+ Expr *getInClassInitializer() const;
/// Set the C++11 in-class initializer for this member.
- void setInClassInitializer(Expr *Init) {
- assert(hasInClassInitializer() && !getInClassInitializer());
- if (BitField)
- static_cast<InitAndBitWidth*>(InitStorage.getPointer())->Init = Init;
- else
- InitStorage.setPointer(Init);
- }
+ void setInClassInitializer(Expr *NewInit);
+private:
+ void setLazyInClassInitializer(LazyDeclStmtPtr NewInit);
+
+public:
/// Remove the C++11 in-class initializer from this member.
void removeInClassInitializer() {
assert(hasInClassInitializer() && "no initializer to remove");
- InitStorage.setPointerAndInt(getBitWidth(), ISK_NoInit);
+ StorageKind = ISK_NoInit;
+ if (BitField) {
+ // Read the bit width before we change the active union member.
+ Expr *ExistingBitWidth = InitAndBitWidth->BitWidth;
+ BitWidth = ExistingBitWidth;
+ }
}
/// Determine whether this member captures the variable length array
/// type.
bool hasCapturedVLAType() const {
- return InitStorage.getInt() == ISK_CapturedVLAType;
+ return StorageKind == ISK_CapturedVLAType;
}
/// Get the captured variable length array type.
const VariableArrayType *getCapturedVLAType() const {
- return hasCapturedVLAType() ? static_cast<const VariableArrayType *>(
- InitStorage.getPointer())
- : nullptr;
+ return hasCapturedVLAType() ? CapturedVLAType : nullptr;
}
/// Set the captured variable length array type for this field.
@@ -3028,21 +3244,24 @@ public:
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K >= firstField && K <= lastField; }
+
+ void printName(raw_ostream &OS, const PrintingPolicy &Policy) const override;
};
/// An instance of this object exists for each enum constant
/// that is defined. For example, in "enum X {a,b}", each of a/b are
/// EnumConstantDecl's, X is an instance of EnumDecl, and the type of a/b is a
/// TagType for the X EnumDecl.
-class EnumConstantDecl : public ValueDecl, public Mergeable<EnumConstantDecl> {
+class EnumConstantDecl : public ValueDecl,
+ public Mergeable<EnumConstantDecl>,
+ public APIntStorage {
Stmt *Init; // an integer constant expression
- llvm::APSInt Val; // The value.
+ bool IsUnsigned;
protected:
- EnumConstantDecl(DeclContext *DC, SourceLocation L,
+ EnumConstantDecl(const ASTContext &C, DeclContext *DC, SourceLocation L,
IdentifierInfo *Id, QualType T, Expr *E,
- const llvm::APSInt &V)
- : ValueDecl(EnumConstant, DC, L, Id, T), Init((Stmt*)E), Val(V) {}
+ const llvm::APSInt &V);
public:
friend class StmtIteratorBase;
@@ -3055,10 +3274,15 @@ public:
const Expr *getInitExpr() const { return (const Expr*) Init; }
Expr *getInitExpr() { return (Expr*) Init; }
- const llvm::APSInt &getInitVal() const { return Val; }
+ llvm::APSInt getInitVal() const {
+ return llvm::APSInt(getValue(), IsUnsigned);
+ }
void setInitExpr(Expr *E) { Init = (Stmt*) E; }
- void setInitVal(const llvm::APSInt &V) { Val = V; }
+ void setInitVal(const ASTContext &C, const llvm::APSInt &V) {
+ setValue(C, V);
+ IsUnsigned = V.isUnsigned();
+ }
SourceRange getSourceRange() const override LLVM_READONLY;
@@ -3096,7 +3320,7 @@ public:
using chain_iterator = ArrayRef<NamedDecl *>::const_iterator;
ArrayRef<NamedDecl *> chain() const {
- return llvm::makeArrayRef(Chaining, ChainingSize);
+ return llvm::ArrayRef(Chaining, ChainingSize);
}
chain_iterator chain_begin() const { return chain().begin(); }
chain_iterator chain_end() const { return chain().end(); }
@@ -3470,6 +3694,24 @@ public:
/// parameters.
bool isDependentType() const { return isDependentContext(); }
+ /// Whether this declaration was a definition in some module but was forced
+ /// to be a declaration.
+ ///
+ /// Useful for clients checking if a module has a definition of a specific
+ /// symbol and not interested in the final AST with deduplicated definitions.
+ bool isThisDeclarationADemotedDefinition() const {
+ return TagDeclBits.IsThisDeclarationADemotedDefinition;
+ }
+
+ /// Mark a definition as a declaration and maintain information it _was_
+ /// a definition.
+ void demoteThisDefinitionToDeclaration() {
+ assert(isCompleteDefinition() &&
+ "Should demote definitions only, not forward declarations");
+ setCompleteDefinition(false);
+ TagDeclBits.IsThisDeclarationADemotedDefinition = true;
+ }
+
/// Starts the definition of this tag declaration.
///
/// This method should be invoked at the beginning of the definition
@@ -3495,13 +3737,15 @@ public:
return static_cast<TagKind>(TagDeclBits.TagDeclKind);
}
- void setTagKind(TagKind TK) { TagDeclBits.TagDeclKind = TK; }
+ void setTagKind(TagKind TK) {
+ TagDeclBits.TagDeclKind = llvm::to_underlying(TK);
+ }
- bool isStruct() const { return getTagKind() == TTK_Struct; }
- bool isInterface() const { return getTagKind() == TTK_Interface; }
- bool isClass() const { return getTagKind() == TTK_Class; }
- bool isUnion() const { return getTagKind() == TTK_Union; }
- bool isEnum() const { return getTagKind() == TTK_Enum; }
+ bool isStruct() const { return getTagKind() == TagTypeKind::Struct; }
+ bool isInterface() const { return getTagKind() == TagTypeKind::Interface; }
+ bool isClass() const { return getTagKind() == TagTypeKind::Class; }
+ bool isUnion() const { return getTagKind() == TagTypeKind::Union; }
+ bool isEnum() const { return getTagKind() == TagTypeKind::Enum; }
/// Is this tag type named, either directly or via being defined in
/// a typedef of this type?
@@ -3555,6 +3799,9 @@ public:
return getExtInfo()->TemplParamLists[i];
}
+ using TypeDecl::printName;
+ void printName(raw_ostream &OS, const PrintingPolicy &Policy) const override;
+
void setTemplateParameterListsInfo(ASTContext &Context,
ArrayRef<TemplateParameterList *> TPLists);
@@ -3688,6 +3935,10 @@ public:
bool IsFixed);
static EnumDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+ /// Overrides to provide correct range when there's an enum-base specifier
+ /// with forward declarations.
+ SourceRange getSourceRange() const override LLVM_READONLY;
+
/// When created, the EnumDecl corresponds to a
/// forward-declared enum. This method is used to mark the
/// declaration as being defined; its enumerators have already been
@@ -3769,6 +4020,11 @@ public:
/// -101 1001011 8
unsigned getNumNegativeBits() const { return EnumDeclBits.NumNegativeBits; }
+ /// Calculates the [Min,Max) values the enum can store based on the
+ /// NumPositiveBits and NumNegativeBits. This matters for enums that do not
+ /// have a fixed underlying type.
+ void getValueRange(llvm::APInt &Max, llvm::APInt &Min) const;
+
/// Returns true if this is a C++11 scoped enumeration.
bool isScoped() const { return EnumDeclBits.IsScoped; }
@@ -3839,6 +4095,29 @@ public:
static bool classofKind(Kind K) { return K == Enum; }
};
+/// Enum that represents the different ways arguments are passed to and
+/// returned from function calls. This takes into account the target-specific
+/// and version-specific rules along with the rules determined by the
+/// language.
+enum class RecordArgPassingKind {
+ /// The argument of this type can be passed directly in registers.
+ CanPassInRegs,
+
+ /// The argument of this type cannot be passed directly in registers.
+ /// Records containing this type as a subobject are not forced to be passed
+ /// indirectly. This value is used only in C++. This value is required by
+ /// C++ because, in uncommon situations, it is possible for a class to have
+ /// only trivial copy/move constructors even when one of its subobjects has
+ /// a non-trivial copy/move constructor (if e.g. the corresponding copy/move
+ /// constructor in the derived class is deleted).
+ CannotPassInRegs,
+
+ /// The argument of this type cannot be passed directly in registers.
+ /// Records containing this type as a subobject are forced to be passed
+ /// indirectly.
+ CanNeverPassInRegs
+};
+
/// Represents a struct/union/class. For example:
/// struct X; // Forward declaration, no "body".
/// union Y { int A, B; }; // Has body with members A and B (FieldDecls).
@@ -3848,28 +4127,7 @@ class RecordDecl : public TagDecl {
// to save some space. Use the provided accessors to access it.
public:
friend class DeclContext;
- /// Enum that represents the different ways arguments are passed to and
- /// returned from function calls. This takes into account the target-specific
- /// and version-specific rules along with the rules determined by the
- /// language.
- enum ArgPassingKind : unsigned {
- /// The argument of this type can be passed directly in registers.
- APK_CanPassInRegs,
-
- /// The argument of this type cannot be passed directly in registers.
- /// Records containing this type as a subobject are not forced to be passed
- /// indirectly. This value is used only in C++. This value is required by
- /// C++ because, in uncommon situations, it is possible for a class to have
- /// only trivial copy/move constructors even when one of its subobjects has
- /// a non-trivial copy/move constructor (if e.g. the corresponding copy/move
- /// constructor in the derived class is deleted).
- APK_CannotPassInRegs,
-
- /// The argument of this type cannot be passed directly in registers.
- /// Records containing this type as a subobject are forced to be passed
- /// indirectly.
- APK_CanNeverPassInRegs
- };
+ friend class ASTDeclReader;
protected:
RecordDecl(Kind DK, TagKind TK, const ASTContext &C, DeclContext *DC,
@@ -3994,15 +4252,16 @@ public:
/// it must have at least one trivial, non-deleted copy or move constructor.
/// FIXME: This should be set as part of completeDefinition.
bool canPassInRegisters() const {
- return getArgPassingRestrictions() == APK_CanPassInRegs;
+ return getArgPassingRestrictions() == RecordArgPassingKind::CanPassInRegs;
}
- ArgPassingKind getArgPassingRestrictions() const {
- return static_cast<ArgPassingKind>(RecordDeclBits.ArgPassingRestrictions);
+ RecordArgPassingKind getArgPassingRestrictions() const {
+ return static_cast<RecordArgPassingKind>(
+ RecordDeclBits.ArgPassingRestrictions);
}
- void setArgPassingRestrictions(ArgPassingKind Kind) {
- RecordDeclBits.ArgPassingRestrictions = Kind;
+ void setArgPassingRestrictions(RecordArgPassingKind Kind) {
+ RecordDeclBits.ArgPassingRestrictions = llvm::to_underlying(Kind);
}
bool isParamDestroyedInCallee() const {
@@ -4013,6 +4272,12 @@ public:
RecordDeclBits.ParamDestroyedInCallee = V;
}
+ bool isRandomized() const { return RecordDeclBits.IsRandomized; }
+
+ void setIsRandomized(bool V) { RecordDeclBits.IsRandomized = V; }
+
+ void reorderDecls(const SmallVectorImpl<Decl *> &Decls);
+
/// Determines whether this declaration represents the
/// injected class name.
///
@@ -4097,9 +4362,16 @@ public:
/// nullptr is returned if no named data member exists.
const FieldDecl *findFirstNamedDataMember() const;
+ /// Get precomputed ODRHash or add a new one.
+ unsigned getODRHash();
+
private:
/// Deserialize just the fields.
void LoadFieldsFromExternalStorage() const;
+
+ /// True if a valid hash is stored in ODRHash.
+ bool hasODRHash() const { return RecordDeclBits.ODRHash; }
+ void setODRHash(unsigned Hash) { RecordDeclBits.ODRHash = Hash; }
};
class FileScopeAsmDecl : public Decl {
@@ -4134,6 +4406,41 @@ public:
static bool classofKind(Kind K) { return K == FileScopeAsm; }
};
+/// A declaration that models statements at global scope. This declaration
+/// supports incremental and interactive C/C++.
+///
+/// \note This is used in libInterpreter, clang -cc1 -fincremental-extensions
+/// and in tools such as clang-repl.
+class TopLevelStmtDecl : public Decl {
+ friend class ASTDeclReader;
+ friend class ASTDeclWriter;
+
+ Stmt *Statement = nullptr;
+ bool IsSemiMissing = false;
+
+ TopLevelStmtDecl(DeclContext *DC, SourceLocation L, Stmt *S)
+ : Decl(TopLevelStmt, DC, L), Statement(S) {}
+
+ virtual void anchor();
+
+public:
+ static TopLevelStmtDecl *Create(ASTContext &C, Stmt *Statement);
+ static TopLevelStmtDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
+ SourceRange getSourceRange() const override LLVM_READONLY;
+ Stmt *getStmt() { return Statement; }
+ const Stmt *getStmt() const { return Statement; }
+ void setStmt(Stmt *S) {
+ assert(IsSemiMissing && "Operation supported for printing values only!");
+ Statement = S;
+ }
+ bool isSemiMissing() const { return IsSemiMissing; }
+ void setSemiMissing(bool Missing = true) { IsSemiMissing = Missing; }
+
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classofKind(Kind K) { return K == TopLevelStmt; }
+};
+
/// Represents a block literal declaration, which is like an
/// unnamed FunctionDecl. For example:
/// ^{ statement-body } or ^(int arg1, float arg2){ statement-body }
@@ -4429,6 +4736,16 @@ public:
/// @import std.vector;
/// \endcode
///
+/// A C++20 module import declaration imports the named module or partition.
+/// Periods are permitted in C++20 module names, but have no semantic meaning.
+/// For example:
+/// \code
+/// import NamedModule;
+/// import :SomePartition; // Must be a partition of the current module.
+/// import Names.Like.this; // Allowed.
+/// import :and.Also.Partition.names;
+/// \endcode
+///
/// Import declarations can also be implicitly generated from
/// \#include/\#import directives.
class ImportDecl final : public Decl,
@@ -4505,7 +4822,7 @@ public:
static bool classofKind(Kind K) { return K == Import; }
};
-/// Represents a C++ Modules TS module export declaration.
+/// Represents a standard C++ module export declaration.
///
/// For example:
/// \code
@@ -4572,11 +4889,56 @@ public:
static bool classofKind(Kind K) { return K == Empty; }
};
+/// HLSLBufferDecl - Represent a cbuffer or tbuffer declaration.
+class HLSLBufferDecl final : public NamedDecl, public DeclContext {
+ /// LBraceLoc - The ending location of the source range.
+ SourceLocation LBraceLoc;
+ /// RBraceLoc - The ending location of the source range.
+ SourceLocation RBraceLoc;
+ /// KwLoc - The location of the cbuffer or tbuffer keyword.
+ SourceLocation KwLoc;
+ /// IsCBuffer - Whether the buffer is a cbuffer (and not a tbuffer).
+ bool IsCBuffer;
+
+ HLSLBufferDecl(DeclContext *DC, bool CBuffer, SourceLocation KwLoc,
+ IdentifierInfo *ID, SourceLocation IDLoc,
+ SourceLocation LBrace);
+
+public:
+ static HLSLBufferDecl *Create(ASTContext &C, DeclContext *LexicalParent,
+ bool CBuffer, SourceLocation KwLoc,
+ IdentifierInfo *ID, SourceLocation IDLoc,
+ SourceLocation LBrace);
+ static HLSLBufferDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
+ SourceRange getSourceRange() const override LLVM_READONLY {
+ return SourceRange(getLocStart(), RBraceLoc);
+ }
+ SourceLocation getLocStart() const LLVM_READONLY { return KwLoc; }
+ SourceLocation getLBraceLoc() const { return LBraceLoc; }
+ SourceLocation getRBraceLoc() const { return RBraceLoc; }
+ void setRBraceLoc(SourceLocation L) { RBraceLoc = L; }
+ bool isCBuffer() const { return IsCBuffer; }
+
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classofKind(Kind K) { return K == HLSLBuffer; }
+ static DeclContext *castToDeclContext(const HLSLBufferDecl *D) {
+ return static_cast<DeclContext *>(const_cast<HLSLBufferDecl *>(D));
+ }
+ static HLSLBufferDecl *castFromDeclContext(const DeclContext *DC) {
+ return static_cast<HLSLBufferDecl *>(const_cast<DeclContext *>(DC));
+ }
+
+ friend class ASTDeclReader;
+ friend class ASTDeclWriter;
+};
+
/// Insertion operator for diagnostics. This allows sending NamedDecl's
/// into a diagnostic with <<.
inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &PD,
const NamedDecl *ND) {
- PD.AddTaggedVal(reinterpret_cast<intptr_t>(ND),
+ PD.AddTaggedVal(reinterpret_cast<uint64_t>(ND),
DiagnosticsEngine::ak_nameddecl);
return PD;
}
diff --git a/contrib/llvm-project/clang/include/clang/AST/DeclBase.h b/contrib/llvm-project/clang/include/clang/AST/DeclBase.h
index 482d2889a25a..eb7a1a320600 100644
--- a/contrib/llvm-project/clang/include/clang/AST/DeclBase.h
+++ b/contrib/llvm-project/clang/include/clang/AST/DeclBase.h
@@ -16,8 +16,10 @@
#include "clang/AST/ASTDumperUtils.h"
#include "clang/AST/AttrIterator.h"
#include "clang/AST/DeclarationName.h"
+#include "clang/AST/SelectorLocationsKind.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
+#include "clang/Basic/LangOptions.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/Specifiers.h"
#include "llvm/ADT/ArrayRef.h"
@@ -48,18 +50,12 @@ class ExternalSourceSymbolAttr;
class FunctionDecl;
class FunctionType;
class IdentifierInfo;
-enum Linkage : unsigned char;
+enum class Linkage : unsigned char;
class LinkageSpecDecl;
class Module;
class NamedDecl;
-class ObjCCategoryDecl;
-class ObjCCategoryImplDecl;
class ObjCContainerDecl;
-class ObjCImplDecl;
-class ObjCImplementationDecl;
-class ObjCInterfaceDecl;
class ObjCMethodDecl;
-class ObjCProtocolDecl;
struct PrintingPolicy;
class RecordDecl;
class SourceManager;
@@ -216,7 +212,7 @@ public:
/// The kind of ownership a declaration has, for visibility purposes.
/// This enumeration is designed such that higher values represent higher
/// levels of name hiding.
- enum class ModuleOwnershipKind : unsigned {
+ enum class ModuleOwnershipKind : unsigned char {
/// This declaration is not owned by a module.
Unowned,
@@ -231,8 +227,15 @@ public:
/// module is imported.
VisibleWhenImported,
+ /// This declaration has an owning module, and is visible to lookups
+ /// that occurs within that module. And it is reachable in other module
+ /// when the owning module is transitively imported.
+ ReachableWhenImported,
+
/// This declaration has an owning module, but is only visible to
/// lookups that occur within that module.
+ /// The discarded declarations in global module fragment belongs
+ /// to this group too.
ModulePrivate
};
@@ -241,8 +244,8 @@ protected:
/// DeclContext. These pointers form the linked list that is
/// traversed via DeclContext's decls_begin()/decls_end().
///
- /// The extra two bits are used for the ModuleOwnershipKind.
- llvm::PointerIntPair<Decl *, 2, ModuleOwnershipKind> NextInContextAndBits;
+ /// The extra three bits are used for the ModuleOwnershipKind.
+ llvm::PointerIntPair<Decl *, 3, ModuleOwnershipKind> NextInContextAndBits;
private:
friend class DeclContext;
@@ -282,31 +285,38 @@ private:
SourceLocation Loc;
/// DeclKind - This indicates which class this is.
+ LLVM_PREFERRED_TYPE(Kind)
unsigned DeclKind : 7;
/// InvalidDecl - This indicates a semantic error occurred.
+ LLVM_PREFERRED_TYPE(bool)
unsigned InvalidDecl : 1;
/// HasAttrs - This indicates whether the decl has attributes or not.
+ LLVM_PREFERRED_TYPE(bool)
unsigned HasAttrs : 1;
/// Implicit - Whether this declaration was implicitly generated by
/// the implementation rather than explicitly written by the user.
+ LLVM_PREFERRED_TYPE(bool)
unsigned Implicit : 1;
/// Whether this declaration was "used", meaning that a definition is
/// required.
+ LLVM_PREFERRED_TYPE(bool)
unsigned Used : 1;
/// Whether this declaration was "referenced".
/// The difference with 'Used' is whether the reference appears in a
/// evaluated context or not, e.g. functions used in uninstantiated templates
/// are regarded as "referenced" but not "used".
+ LLVM_PREFERRED_TYPE(bool)
unsigned Referenced : 1;
/// Whether this declaration is a top-level declaration (function,
/// global variable, etc.) that is lexically inside an objc container
/// definition.
+ LLVM_PREFERRED_TYPE(bool)
unsigned TopLevelDeclInObjCContainer : 1;
/// Whether statistic collection is enabled.
@@ -319,20 +329,24 @@ protected:
friend class ASTReader;
friend class CXXClassMemberWrapper;
friend class LinkageComputer;
+ friend class RecordDecl;
template<typename decl_type> friend class Redeclarable;
/// Access - Used by C++ decls for the access specifier.
// NOTE: VC++ treats enums as signed, avoid using the AccessSpecifier enum
+ LLVM_PREFERRED_TYPE(AccessSpecifier)
unsigned Access : 2;
/// Whether this declaration was loaded from an AST file.
+ LLVM_PREFERRED_TYPE(bool)
unsigned FromASTFile : 1;
/// IdentifierNamespace - This specifies what IDNS_* namespace this lives in.
+ LLVM_PREFERRED_TYPE(IdentifierNamespace)
unsigned IdentifierNamespace : 14;
/// If 0, we have not computed the linkage of this declaration.
- /// Otherwise, it is the linkage + 1.
+ LLVM_PREFERRED_TYPE(Linkage)
mutable unsigned CacheValidAndLinkage : 3;
/// Allocate memory for a deserialized declaration.
@@ -352,7 +366,7 @@ protected:
DeclContext *Parent, std::size_t Extra = 0);
private:
- bool AccessDeclContextSanity() const;
+ bool AccessDeclContextCheck() const;
/// Get the module ownership kind to use for a local lexical child of \p DC,
/// which may be either a local or (rarely) an imported declaration.
@@ -383,7 +397,7 @@ protected:
Implicit(false), Used(false), Referenced(false),
TopLevelDeclInObjCContainer(false), Access(AS_none), FromASTFile(0),
IdentifierNamespace(getIdentifierNamespaceForKind(DK)),
- CacheValidAndLinkage(0) {
+ CacheValidAndLinkage(llvm::to_underlying(Linkage::Invalid)) {
if (StatisticsEnabled) add(DK);
}
@@ -392,7 +406,7 @@ protected:
Used(false), Referenced(false), TopLevelDeclInObjCContainer(false),
Access(AS_none), FromASTFile(0),
IdentifierNamespace(getIdentifierNamespaceForKind(DK)),
- CacheValidAndLinkage(0) {
+ CacheValidAndLinkage(llvm::to_underlying(Linkage::Invalid)) {
if (StatisticsEnabled) add(DK);
}
@@ -402,11 +416,11 @@ protected:
void updateOutOfDate(IdentifierInfo &II) const;
Linkage getCachedLinkage() const {
- return Linkage(CacheValidAndLinkage - 1);
+ return static_cast<Linkage>(CacheValidAndLinkage);
}
void setCachedLinkage(Linkage L) const {
- CacheValidAndLinkage = L + 1;
+ CacheValidAndLinkage = llvm::to_underlying(L);
}
bool hasCachedLinkage() const {
@@ -445,6 +459,14 @@ public:
return const_cast<Decl*>(this)->getDeclContext();
}
+ /// Return the non transparent context.
+ /// See the comment of `DeclContext::isTransparentContext()` for the
+ /// definition of transparent context.
+ DeclContext *getNonTransparentDeclContext();
+ const DeclContext *getNonTransparentDeclContext() const {
+ return const_cast<Decl *>(this)->getNonTransparentDeclContext();
+ }
+
/// Find the innermost non-closure ancestor of this declaration,
/// walking up through blocks, lambdas, etc. If that ancestor is
/// not a code context (!isFunctionOrMethod()), returns null.
@@ -464,6 +486,18 @@ public:
bool isInStdNamespace() const;
+ // Return true if this is a FileContext Decl.
+ bool isFileContextDecl() const;
+
+ /// Whether it resembles a flexible array member. This is a static member
+ /// because we want to be able to call it with a nullptr. That allows us to
+ /// perform non-Decl specific checks based on the object's type and strict
+ /// flex array level.
+ static bool isFlexibleArrayMemberLike(
+ ASTContext &Context, const Decl *D, QualType Ty,
+ LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel,
+ bool IgnoreTemplateOrMacroSubstitution);
+
ASTContext &getASTContext() const LLVM_READONLY;
/// Helper to get the language options from the ASTContext.
@@ -472,11 +506,11 @@ public:
void setAccess(AccessSpecifier AS) {
Access = AS;
- assert(AccessDeclContextSanity());
+ assert(AccessDeclContextCheck());
}
AccessSpecifier getAccess() const {
- assert(AccessDeclContextSanity());
+ assert(AccessDeclContextCheck());
return AccessSpecifier(Access);
}
@@ -514,17 +548,18 @@ public:
return hasAttrs() ? getAttrs().end() : nullptr;
}
- template <typename T>
- void dropAttr() {
+ template <typename... Ts> void dropAttrs() {
if (!HasAttrs) return;
AttrVec &Vec = getAttrs();
- llvm::erase_if(Vec, [](Attr *A) { return isa<T>(A); });
+ llvm::erase_if(Vec, [](Attr *A) { return isa<Ts...>(A); });
if (Vec.empty())
HasAttrs = false;
}
+ template <typename T> void dropAttr() { dropAttrs<T>(); }
+
template <typename T>
llvm::iterator_range<specific_attr_iterator<T>> specific_attrs() const {
return llvm::make_range(specific_attr_begin<T>(), specific_attr_end<T>());
@@ -613,6 +648,41 @@ public:
return getModuleOwnershipKind() == ModuleOwnershipKind::ModulePrivate;
}
+ /// Whether this declaration was exported in a lexical context.
+ /// e.g.:
+ ///
+ /// export namespace A {
+ /// void f1(); // isInExportDeclContext() == true
+ /// }
+ /// void A::f1(); // isInExportDeclContext() == false
+ ///
+ /// namespace B {
+ /// void f2(); // isInExportDeclContext() == false
+ /// }
+ /// export void B::f2(); // isInExportDeclContext() == true
+ bool isInExportDeclContext() const;
+
+ bool isInvisibleOutsideTheOwningModule() const {
+ return getModuleOwnershipKind() > ModuleOwnershipKind::VisibleWhenImported;
+ }
+
+ /// Whether this declaration comes from another module unit.
+ bool isInAnotherModuleUnit() const;
+
+ /// FIXME: Implement discarding declarations actually in global module
+ /// fragment. See [module.global.frag]p3,4 for details.
+ bool isDiscardedInGlobalModuleFragment() const { return false; }
+
+ /// Check if we should skip checking ODRHash for declaration \param D.
+ ///
+ /// The existing ODRHash mechanism seems to be not stable enough and
+ /// the false positive ODR violation reports are annoying and we rarely see
+ /// true ODR violation reports. Also we learned that MSVC disabled ODR checks
+ /// for declarations in GMF. So we try to disable ODR checks in the GMF to
+ /// get better user experiences before we make the ODR violation checks stable
+ /// enough.
+ bool shouldSkipCheckingODR() const;
+
/// Return true if this declaration has an attribute which acts as
/// definition of the entity, such as 'alias' or 'ifunc'.
bool hasDefiningAttr() const;
@@ -775,7 +845,7 @@ public:
}
/// Get the module that owns this declaration for linkage purposes.
- /// There only ever is such a module under the C++ Modules TS.
+ /// There only ever is such a standard C++ module.
///
/// \param IgnoreLinkage Ignore the linkage of the entity; assume that
/// all declarations in a global module fragment are unowned.
@@ -790,6 +860,11 @@ public:
return (int)getModuleOwnershipKind() <= (int)ModuleOwnershipKind::Visible;
}
+ bool isReachable() const {
+ return (int)getModuleOwnershipKind() <=
+ (int)ModuleOwnershipKind::ReachableWhenImported;
+ }
+
/// Set that this declaration is globally visible, even if it came from a
/// module that is not visible.
void setVisibleDespiteOwningModule() {
@@ -891,10 +966,12 @@ public:
/// If this decl is defined inside a function/method/block it returns
/// the corresponding DeclContext, otherwise it returns null.
- const DeclContext *getParentFunctionOrMethod() const;
- DeclContext *getParentFunctionOrMethod() {
- return const_cast<DeclContext*>(
- const_cast<const Decl*>(this)->getParentFunctionOrMethod());
+ const DeclContext *
+ getParentFunctionOrMethod(bool LexicalParent = false) const;
+ DeclContext *getParentFunctionOrMethod(bool LexicalParent = false) {
+ return const_cast<DeclContext *>(
+ const_cast<const Decl *>(this)->getParentFunctionOrMethod(
+ LexicalParent));
}
/// Retrieves the "canonical" declaration of the given declaration.
@@ -1089,7 +1166,7 @@ public:
/// Determine whether this is a block-scope declaration with linkage.
/// This will either be a local variable declaration declared 'extern', or a
/// local function declaration.
- bool isLocalExternDecl() {
+ bool isLocalExternDecl() const {
return IdentifierNamespace & IDNS_LocalExtern;
}
@@ -1130,6 +1207,12 @@ public:
}
}
+ /// Clears the namespace of this declaration.
+ ///
+ /// This is useful if we want this declaration to be available for
+ /// redeclaration lookup but otherwise hidden for ordinary name lookups.
+ void clearIdentifierNamespace() { IdentifierNamespace = 0; }
+
enum FriendObjectKind {
FOK_None, ///< Not a friend object.
FOK_Declared, ///< A friend of a previously-declared entity.
@@ -1185,6 +1268,10 @@ public:
/// have a FunctionType.
const FunctionType *getFunctionType(bool BlocksToo = true) const;
+ // Looks through the Decl's underlying type to determine if it's a
+ // function pointer type.
+ bool isFunctionPointerType() const;
+
private:
void setAttrsImpl(const AttrVec& Attrs, ASTContext &Ctx);
void setDeclContextsImpl(DeclContext *SemaDC, DeclContext *LexicalDC,
@@ -1327,6 +1414,18 @@ public:
}
};
+/// Only used by CXXDeductionGuideDecl.
+enum class DeductionCandidate : unsigned char {
+ Normal,
+ Copy,
+ Aggregate,
+};
+
+enum class RecordArgPassingKind;
+enum class OMPDeclareReductionInitKind;
+enum class ObjCImplementationControl;
+enum class LinkageSpecLanguageIDs;
+
/// DeclContext - This is used only as base class of specific decl types that
/// can act as declaration contexts. These decls are (only the top classes
/// that directly derive from DeclContext are mentioned, not their subclasses):
@@ -1347,6 +1446,8 @@ public:
class DeclContext {
/// For makeDeclVisibleInContextImpl
friend class ASTDeclReader;
+ /// For checking the new bits in the Serialization part.
+ friend class ASTDeclWriter;
/// For reconcileExternalVisibleStorage, CreateStoredDeclsMap,
/// hasNeedToReconcileExternalVisibleStorage
friend class ExternalASTSource;
@@ -1365,35 +1466,42 @@ class DeclContext {
class DeclContextBitfields {
friend class DeclContext;
/// DeclKind - This indicates which class this is.
+ LLVM_PREFERRED_TYPE(Decl::Kind)
uint64_t DeclKind : 7;
/// Whether this declaration context also has some external
/// storage that contains additional declarations that are lexically
/// part of this context.
+ LLVM_PREFERRED_TYPE(bool)
mutable uint64_t ExternalLexicalStorage : 1;
/// Whether this declaration context also has some external
/// storage that contains additional declarations that are visible
/// in this context.
+ LLVM_PREFERRED_TYPE(bool)
mutable uint64_t ExternalVisibleStorage : 1;
/// Whether this declaration context has had externally visible
/// storage added since the last lookup. In this case, \c LookupPtr's
/// invariant may not hold and needs to be fixed before we perform
/// another lookup.
+ LLVM_PREFERRED_TYPE(bool)
mutable uint64_t NeedToReconcileExternalVisibleStorage : 1;
/// If \c true, this context may have local lexical declarations
/// that are missing from the lookup table.
+ LLVM_PREFERRED_TYPE(bool)
mutable uint64_t HasLazyLocalLexicalLookups : 1;
/// If \c true, the external source may have lexical declarations
/// that are missing from the lookup table.
+ LLVM_PREFERRED_TYPE(bool)
mutable uint64_t HasLazyExternalLexicalLookups : 1;
/// If \c true, lookups should only return identifier from
/// DeclContext scope (for example TranslationUnit). Used in
/// LookupQualifiedName()
+ LLVM_PREFERRED_TYPE(bool)
mutable uint64_t UseQualifiedLookup : 1;
};
@@ -1406,48 +1514,60 @@ class DeclContext {
class TagDeclBitfields {
friend class TagDecl;
/// For the bits in DeclContextBitfields
+ LLVM_PREFERRED_TYPE(DeclContextBitfields)
uint64_t : NumDeclContextBits;
/// The TagKind enum.
+ LLVM_PREFERRED_TYPE(TagTypeKind)
uint64_t TagDeclKind : 3;
/// True if this is a definition ("struct foo {};"), false if it is a
/// declaration ("struct foo;"). It is not considered a definition
/// until the definition has been fully processed.
+ LLVM_PREFERRED_TYPE(bool)
uint64_t IsCompleteDefinition : 1;
/// True if this is currently being defined.
+ LLVM_PREFERRED_TYPE(bool)
uint64_t IsBeingDefined : 1;
/// True if this tag declaration is "embedded" (i.e., defined or declared
/// for the very first time) in the syntax of a declarator.
+ LLVM_PREFERRED_TYPE(bool)
uint64_t IsEmbeddedInDeclarator : 1;
/// True if this tag is free standing, e.g. "struct foo;".
+ LLVM_PREFERRED_TYPE(bool)
uint64_t IsFreeStanding : 1;
/// Indicates whether it is possible for declarations of this kind
/// to have an out-of-date definition.
///
/// This option is only enabled when modules are enabled.
+ LLVM_PREFERRED_TYPE(bool)
uint64_t MayHaveOutOfDateDef : 1;
/// Has the full definition of this type been required by a use somewhere in
/// the TU.
+ LLVM_PREFERRED_TYPE(bool)
uint64_t IsCompleteDefinitionRequired : 1;
+
+ /// Whether this tag is a definition which was demoted due to
+ /// a module merge.
+ LLVM_PREFERRED_TYPE(bool)
+ uint64_t IsThisDeclarationADemotedDefinition : 1;
};
- /// Number of non-inherited bits in TagDeclBitfields.
- enum { NumTagDeclBits = 9 };
+ /// Number of inherited and non-inherited bits in TagDeclBitfields.
+ enum { NumTagDeclBits = NumDeclContextBits + 10 };
/// Stores the bits used by EnumDecl.
/// If modified NumEnumDeclBit and the accessor
/// methods in EnumDecl should be updated appropriately.
class EnumDeclBitfields {
friend class EnumDecl;
- /// For the bits in DeclContextBitfields.
- uint64_t : NumDeclContextBits;
/// For the bits in TagDeclBitfields.
+ LLVM_PREFERRED_TYPE(TagDeclBitfields)
uint64_t : NumTagDeclBits;
/// Width in bits required to store all the non-negative
@@ -1460,78 +1580,102 @@ class DeclContext {
/// True if this tag declaration is a scoped enumeration. Only
/// possible in C++11 mode.
+ LLVM_PREFERRED_TYPE(bool)
uint64_t IsScoped : 1;
/// If this tag declaration is a scoped enum,
/// then this is true if the scoped enum was declared using the class
/// tag, false if it was declared with the struct tag. No meaning is
/// associated if this tag declaration is not a scoped enum.
+ LLVM_PREFERRED_TYPE(bool)
uint64_t IsScopedUsingClassTag : 1;
/// True if this is an enumeration with fixed underlying type. Only
/// possible in C++11, Microsoft extensions, or Objective C mode.
+ LLVM_PREFERRED_TYPE(bool)
uint64_t IsFixed : 1;
/// True if a valid hash is stored in ODRHash.
+ LLVM_PREFERRED_TYPE(bool)
uint64_t HasODRHash : 1;
};
- /// Number of non-inherited bits in EnumDeclBitfields.
- enum { NumEnumDeclBits = 20 };
+ /// Number of inherited and non-inherited bits in EnumDeclBitfields.
+ enum { NumEnumDeclBits = NumTagDeclBits + 20 };
/// Stores the bits used by RecordDecl.
/// If modified NumRecordDeclBits and the accessor
/// methods in RecordDecl should be updated appropriately.
class RecordDeclBitfields {
friend class RecordDecl;
- /// For the bits in DeclContextBitfields.
- uint64_t : NumDeclContextBits;
/// For the bits in TagDeclBitfields.
+ LLVM_PREFERRED_TYPE(TagDeclBitfields)
uint64_t : NumTagDeclBits;
/// This is true if this struct ends with a flexible
/// array member (e.g. int X[]) or if this union contains a struct that does.
/// If so, this cannot be contained in arrays or other structs as a member.
+ LLVM_PREFERRED_TYPE(bool)
uint64_t HasFlexibleArrayMember : 1;
/// Whether this is the type of an anonymous struct or union.
+ LLVM_PREFERRED_TYPE(bool)
uint64_t AnonymousStructOrUnion : 1;
/// This is true if this struct has at least one member
/// containing an Objective-C object pointer type.
+ LLVM_PREFERRED_TYPE(bool)
uint64_t HasObjectMember : 1;
/// This is true if struct has at least one member of
/// 'volatile' type.
+ LLVM_PREFERRED_TYPE(bool)
uint64_t HasVolatileMember : 1;
/// Whether the field declarations of this record have been loaded
/// from external storage. To avoid unnecessary deserialization of
/// methods/nested types we allow deserialization of just the fields
/// when needed.
+ LLVM_PREFERRED_TYPE(bool)
mutable uint64_t LoadedFieldsFromExternalStorage : 1;
/// Basic properties of non-trivial C structs.
+ LLVM_PREFERRED_TYPE(bool)
uint64_t NonTrivialToPrimitiveDefaultInitialize : 1;
+ LLVM_PREFERRED_TYPE(bool)
uint64_t NonTrivialToPrimitiveCopy : 1;
+ LLVM_PREFERRED_TYPE(bool)
uint64_t NonTrivialToPrimitiveDestroy : 1;
/// The following bits indicate whether this is or contains a C union that
/// is non-trivial to default-initialize, destruct, or copy. These bits
/// imply the associated basic non-triviality predicates declared above.
+ LLVM_PREFERRED_TYPE(bool)
uint64_t HasNonTrivialToPrimitiveDefaultInitializeCUnion : 1;
+ LLVM_PREFERRED_TYPE(bool)
uint64_t HasNonTrivialToPrimitiveDestructCUnion : 1;
+ LLVM_PREFERRED_TYPE(bool)
uint64_t HasNonTrivialToPrimitiveCopyCUnion : 1;
/// Indicates whether this struct is destroyed in the callee.
+ LLVM_PREFERRED_TYPE(bool)
uint64_t ParamDestroyedInCallee : 1;
/// Represents the way this type is passed to a function.
+ LLVM_PREFERRED_TYPE(RecordArgPassingKind)
uint64_t ArgPassingRestrictions : 2;
+
+ /// Indicates whether this struct has had its field layout randomized.
+ LLVM_PREFERRED_TYPE(bool)
+ uint64_t IsRandomized : 1;
+
+ /// True if a valid hash is stored in ODRHash. This should shave off some
+ /// extra storage and prevent CXXRecordDecl to store unused bits.
+ uint64_t ODRHash : 26;
};
- /// Number of non-inherited bits in RecordDeclBitfields.
- enum { NumRecordDeclBits = 14 };
+ /// Number of inherited and non-inherited bits in RecordDeclBitfields.
+ enum { NumRecordDeclBits = NumTagDeclBits + 41 };
/// Stores the bits used by OMPDeclareReductionDecl.
/// If modified NumOMPDeclareReductionDeclBits and the accessor
@@ -1539,113 +1683,156 @@ class DeclContext {
class OMPDeclareReductionDeclBitfields {
friend class OMPDeclareReductionDecl;
/// For the bits in DeclContextBitfields
+ LLVM_PREFERRED_TYPE(DeclContextBitfields)
uint64_t : NumDeclContextBits;
/// Kind of initializer,
- /// function call or omp_priv<init_expr> initializtion.
+ /// function call or omp_priv<init_expr> initialization.
+ LLVM_PREFERRED_TYPE(OMPDeclareReductionInitKind)
uint64_t InitializerKind : 2;
};
- /// Number of non-inherited bits in OMPDeclareReductionDeclBitfields.
- enum { NumOMPDeclareReductionDeclBits = 2 };
+ /// Number of inherited and non-inherited bits in
+ /// OMPDeclareReductionDeclBitfields.
+ enum { NumOMPDeclareReductionDeclBits = NumDeclContextBits + 2 };
/// Stores the bits used by FunctionDecl.
/// If modified NumFunctionDeclBits and the accessor
/// methods in FunctionDecl and CXXDeductionGuideDecl
- /// (for IsCopyDeductionCandidate) should be updated appropriately.
+ /// (for DeductionCandidateKind) should be updated appropriately.
class FunctionDeclBitfields {
friend class FunctionDecl;
- /// For IsCopyDeductionCandidate
+ /// For DeductionCandidateKind
friend class CXXDeductionGuideDecl;
/// For the bits in DeclContextBitfields.
+ LLVM_PREFERRED_TYPE(DeclContextBitfields)
uint64_t : NumDeclContextBits;
+ LLVM_PREFERRED_TYPE(StorageClass)
uint64_t SClass : 3;
+ LLVM_PREFERRED_TYPE(bool)
uint64_t IsInline : 1;
+ LLVM_PREFERRED_TYPE(bool)
uint64_t IsInlineSpecified : 1;
+ LLVM_PREFERRED_TYPE(bool)
uint64_t IsVirtualAsWritten : 1;
- uint64_t IsPure : 1;
+ LLVM_PREFERRED_TYPE(bool)
+ uint64_t IsPureVirtual : 1;
+ LLVM_PREFERRED_TYPE(bool)
uint64_t HasInheritedPrototype : 1;
+ LLVM_PREFERRED_TYPE(bool)
uint64_t HasWrittenPrototype : 1;
+ LLVM_PREFERRED_TYPE(bool)
uint64_t IsDeleted : 1;
/// Used by CXXMethodDecl
+ LLVM_PREFERRED_TYPE(bool)
uint64_t IsTrivial : 1;
/// This flag indicates whether this function is trivial for the purpose of
/// calls. This is meaningful only when this function is a copy/move
/// constructor or a destructor.
+ LLVM_PREFERRED_TYPE(bool)
uint64_t IsTrivialForCall : 1;
+ LLVM_PREFERRED_TYPE(bool)
uint64_t IsDefaulted : 1;
+ LLVM_PREFERRED_TYPE(bool)
uint64_t IsExplicitlyDefaulted : 1;
+ LLVM_PREFERRED_TYPE(bool)
uint64_t HasDefaultedFunctionInfo : 1;
+
+ /// For member functions of complete types, whether this is an ineligible
+ /// special member function or an unselected destructor. See
+ /// [class.mem.special].
+ LLVM_PREFERRED_TYPE(bool)
+ uint64_t IsIneligibleOrNotSelected : 1;
+
+ LLVM_PREFERRED_TYPE(bool)
uint64_t HasImplicitReturnZero : 1;
+ LLVM_PREFERRED_TYPE(bool)
uint64_t IsLateTemplateParsed : 1;
/// Kind of contexpr specifier as defined by ConstexprSpecKind.
+ LLVM_PREFERRED_TYPE(ConstexprSpecKind)
uint64_t ConstexprKind : 2;
+ LLVM_PREFERRED_TYPE(bool)
+ uint64_t BodyContainsImmediateEscalatingExpression : 1;
+
+ LLVM_PREFERRED_TYPE(bool)
uint64_t InstantiationIsPending : 1;
/// Indicates if the function uses __try.
+ LLVM_PREFERRED_TYPE(bool)
uint64_t UsesSEHTry : 1;
/// Indicates if the function was a definition
/// but its body was skipped.
+ LLVM_PREFERRED_TYPE(bool)
uint64_t HasSkippedBody : 1;
/// Indicates if the function declaration will
/// have a body, once we're done parsing it.
+ LLVM_PREFERRED_TYPE(bool)
uint64_t WillHaveBody : 1;
/// Indicates that this function is a multiversioned
/// function using attribute 'target'.
+ LLVM_PREFERRED_TYPE(bool)
uint64_t IsMultiVersion : 1;
- /// [C++17] Only used by CXXDeductionGuideDecl. Indicates that
- /// the Deduction Guide is the implicitly generated 'copy
- /// deduction candidate' (is used during overload resolution).
- uint64_t IsCopyDeductionCandidate : 1;
+ /// Only used by CXXDeductionGuideDecl. Indicates the kind
+ /// of the Deduction Guide that is implicitly generated
+ /// (used during overload resolution).
+ LLVM_PREFERRED_TYPE(DeductionCandidate)
+ uint64_t DeductionCandidateKind : 2;
/// Store the ODRHash after first calculation.
+ LLVM_PREFERRED_TYPE(bool)
uint64_t HasODRHash : 1;
/// Indicates if the function uses Floating Point Constrained Intrinsics
+ LLVM_PREFERRED_TYPE(bool)
uint64_t UsesFPIntrin : 1;
+
+ // Indicates this function is a constrained friend, where the constraint
+ // refers to an enclosing template for hte purposes of [temp.friend]p9.
+ LLVM_PREFERRED_TYPE(bool)
+ uint64_t FriendConstraintRefersToEnclosingTemplate : 1;
};
- /// Number of non-inherited bits in FunctionDeclBitfields.
- enum { NumFunctionDeclBits = 27 };
+ /// Number of inherited and non-inherited bits in FunctionDeclBitfields.
+ enum { NumFunctionDeclBits = NumDeclContextBits + 31 };
/// Stores the bits used by CXXConstructorDecl. If modified
/// NumCXXConstructorDeclBits and the accessor
/// methods in CXXConstructorDecl should be updated appropriately.
class CXXConstructorDeclBitfields {
friend class CXXConstructorDecl;
- /// For the bits in DeclContextBitfields.
- uint64_t : NumDeclContextBits;
/// For the bits in FunctionDeclBitfields.
+ LLVM_PREFERRED_TYPE(FunctionDeclBitfields)
uint64_t : NumFunctionDeclBits;
- /// 24 bits to fit in the remaining available space.
+ /// 20 bits to fit in the remaining available space.
/// Note that this makes CXXConstructorDeclBitfields take
/// exactly 64 bits and thus the width of NumCtorInitializers
/// will need to be shrunk if some bit is added to NumDeclContextBitfields,
/// NumFunctionDeclBitfields or CXXConstructorDeclBitfields.
- uint64_t NumCtorInitializers : 21;
+ uint64_t NumCtorInitializers : 17;
+ LLVM_PREFERRED_TYPE(bool)
uint64_t IsInheritingConstructor : 1;
/// Whether this constructor has a trail-allocated explicit specifier.
+ LLVM_PREFERRED_TYPE(bool)
uint64_t HasTrailingExplicitSpecifier : 1;
/// If this constructor does't have a trail-allocated explicit specifier.
/// Whether this constructor is explicit specified.
+ LLVM_PREFERRED_TYPE(bool)
uint64_t IsSimpleExplicit : 1;
};
- /// Number of non-inherited bits in CXXConstructorDeclBitfields.
- enum {
- NumCXXConstructorDeclBits = 64 - NumDeclContextBits - NumFunctionDeclBits
- };
+ /// Number of inherited and non-inherited bits in CXXConstructorDeclBitfields.
+ enum { NumCXXConstructorDeclBits = NumFunctionDeclBits + 20 };
/// Stores the bits used by ObjCMethodDecl.
/// If modified NumObjCMethodDeclBits and the accessor
@@ -1654,43 +1841,56 @@ class DeclContext {
friend class ObjCMethodDecl;
/// For the bits in DeclContextBitfields.
+ LLVM_PREFERRED_TYPE(DeclContextBitfields)
uint64_t : NumDeclContextBits;
/// The conventional meaning of this method; an ObjCMethodFamily.
/// This is not serialized; instead, it is computed on demand and
/// cached.
+ LLVM_PREFERRED_TYPE(ObjCMethodFamily)
mutable uint64_t Family : ObjCMethodFamilyBitWidth;
/// instance (true) or class (false) method.
+ LLVM_PREFERRED_TYPE(bool)
uint64_t IsInstance : 1;
+ LLVM_PREFERRED_TYPE(bool)
uint64_t IsVariadic : 1;
/// True if this method is the getter or setter for an explicit property.
+ LLVM_PREFERRED_TYPE(bool)
uint64_t IsPropertyAccessor : 1;
/// True if this method is a synthesized property accessor stub.
+ LLVM_PREFERRED_TYPE(bool)
uint64_t IsSynthesizedAccessorStub : 1;
/// Method has a definition.
+ LLVM_PREFERRED_TYPE(bool)
uint64_t IsDefined : 1;
/// Method redeclaration in the same interface.
+ LLVM_PREFERRED_TYPE(bool)
uint64_t IsRedeclaration : 1;
/// Is redeclared in the same interface.
+ LLVM_PREFERRED_TYPE(bool)
mutable uint64_t HasRedeclaration : 1;
/// \@required/\@optional
+ LLVM_PREFERRED_TYPE(ObjCImplementationControl)
uint64_t DeclImplementation : 2;
/// in, inout, etc.
+ LLVM_PREFERRED_TYPE(Decl::ObjCDeclQualifier)
uint64_t objcDeclQualifier : 7;
/// Indicates whether this method has a related result type.
+ LLVM_PREFERRED_TYPE(bool)
uint64_t RelatedResultType : 1;
/// Whether the locations of the selector identifiers are in a
/// "standard" position, a enum SelectorLocationsKind.
+ LLVM_PREFERRED_TYPE(SelectorLocationsKind)
uint64_t SelLocsKind : 2;
/// Whether this method overrides any other in the class hierarchy.
@@ -1700,14 +1900,16 @@ class DeclContext {
/// the same selector and is of the same kind (class or instance).
/// A method in an implementation is not considered as overriding the same
/// method in the interface or its categories.
+ LLVM_PREFERRED_TYPE(bool)
uint64_t IsOverriding : 1;
/// Indicates if the method was a definition but its body was skipped.
+ LLVM_PREFERRED_TYPE(bool)
uint64_t HasSkippedBody : 1;
};
- /// Number of non-inherited bits in ObjCMethodDeclBitfields.
- enum { NumObjCMethodDeclBits = 24 };
+ /// Number of inherited and non-inherited bits in ObjCMethodDeclBitfields.
+ enum { NumObjCMethodDeclBits = NumDeclContextBits + 24 };
/// Stores the bits used by ObjCContainerDecl.
/// If modified NumObjCContainerDeclBits and the accessor
@@ -1715,6 +1917,7 @@ class DeclContext {
class ObjCContainerDeclBitfields {
friend class ObjCContainerDecl;
/// For the bits in DeclContextBitfields
+ LLVM_PREFERRED_TYPE(DeclContextBitfields)
uint32_t : NumDeclContextBits;
// Not a bitfield but this saves space.
@@ -1722,10 +1925,10 @@ class DeclContext {
SourceLocation AtStart;
};
- /// Number of non-inherited bits in ObjCContainerDeclBitfields.
+ /// Number of inherited and non-inherited bits in ObjCContainerDeclBitfields.
/// Note that here we rely on the fact that SourceLocation is 32 bits
/// wide. We check this with the static_assert in the ctor of DeclContext.
- enum { NumObjCContainerDeclBits = 64 - NumDeclContextBits };
+ enum { NumObjCContainerDeclBits = 64 };
/// Stores the bits used by LinkageSpecDecl.
/// If modified NumLinkageSpecDeclBits and the accessor
@@ -1733,21 +1936,23 @@ class DeclContext {
class LinkageSpecDeclBitfields {
friend class LinkageSpecDecl;
/// For the bits in DeclContextBitfields.
+ LLVM_PREFERRED_TYPE(DeclContextBitfields)
uint64_t : NumDeclContextBits;
- /// The language for this linkage specification with values
- /// in the enum LinkageSpecDecl::LanguageIDs.
+ /// The language for this linkage specification.
+ LLVM_PREFERRED_TYPE(LinkageSpecLanguageIDs)
uint64_t Language : 3;
/// True if this linkage spec has braces.
/// This is needed so that hasBraces() returns the correct result while the
/// linkage spec body is being parsed. Once RBraceLoc has been set this is
/// not used, so it doesn't need to be serialized.
+ LLVM_PREFERRED_TYPE(bool)
uint64_t HasBraces : 1;
};
- /// Number of non-inherited bits in LinkageSpecDeclBitfields.
- enum { NumLinkageSpecDeclBits = 4 };
+ /// Number of inherited and non-inherited bits in LinkageSpecDeclBitfields.
+ enum { NumLinkageSpecDeclBits = NumDeclContextBits + 4 };
/// Stores the bits used by BlockDecl.
/// If modified NumBlockDeclBits and the accessor
@@ -1755,25 +1960,32 @@ class DeclContext {
class BlockDeclBitfields {
friend class BlockDecl;
/// For the bits in DeclContextBitfields.
+ LLVM_PREFERRED_TYPE(DeclContextBitfields)
uint64_t : NumDeclContextBits;
+ LLVM_PREFERRED_TYPE(bool)
uint64_t IsVariadic : 1;
+ LLVM_PREFERRED_TYPE(bool)
uint64_t CapturesCXXThis : 1;
+ LLVM_PREFERRED_TYPE(bool)
uint64_t BlockMissingReturnType : 1;
+ LLVM_PREFERRED_TYPE(bool)
uint64_t IsConversionFromLambda : 1;
/// A bit that indicates this block is passed directly to a function as a
/// non-escaping parameter.
+ LLVM_PREFERRED_TYPE(bool)
uint64_t DoesNotEscape : 1;
/// A bit that indicates whether it's possible to avoid coying this block to
/// the heap when it initializes or is assigned to a local variable with
/// automatic storage.
+ LLVM_PREFERRED_TYPE(bool)
uint64_t CanAvoidCopyToHeap : 1;
};
- /// Number of non-inherited bits in BlockDeclBitfields.
- enum { NumBlockDeclBits = 5 };
+ /// Number of inherited and non-inherited bits in BlockDeclBitfields.
+ enum { NumBlockDeclBits = NumDeclContextBits + 5 };
/// Pointer to the data structure used to lookup declarations
/// within this context (or a DependentStoredDeclsMap if this is a
@@ -1850,6 +2062,10 @@ protected:
public:
~DeclContext();
+ // For use when debugging; hasValidDeclKind() will always return true for
+ // a correctly constructed object within its lifetime.
+ bool hasValidDeclKind() const;
+
Decl::Kind getDeclKind() const {
return static_cast<Decl::Kind>(DeclContextBits.DeclKind);
}
@@ -1965,7 +2181,7 @@ public:
/// Here, E is a transparent context, so its enumerator (Val1) will
/// appear (semantically) that it is in the same context of E.
/// Examples of transparent contexts include: enumerations (except for
- /// C++0x scoped enums), and C++ linkage specifications.
+ /// C++0x scoped enums), C++ linkage specifications and export declaration.
bool isTransparentContext() const;
/// Determines whether this context or some of its ancestors is a
@@ -1997,6 +2213,12 @@ public:
return const_cast<DeclContext*>(this)->getNonClosureAncestor();
}
+ // Retrieve the nearest context that is not a transparent context.
+ DeclContext *getNonTransparentContext();
+ const DeclContext *getNonTransparentContext() const {
+ return const_cast<DeclContext *>(this)->getNonTransparentContext();
+ }
+
/// getPrimaryContext - There may be many different
/// declarations of the same entity (including forward declarations
/// of classes, multiple definitions of namespaces, etc.), each with
@@ -2452,10 +2674,8 @@ public:
D == LastDecl);
}
- bool setUseQualifiedLookup(bool use = true) const {
- bool old_value = DeclContextBits.UseQualifiedLookup;
+ void setUseQualifiedLookup(bool use = true) const {
DeclContextBits.UseQualifiedLookup = use;
- return old_value;
}
bool shouldUseQualifiedLookup() const {
@@ -2465,6 +2685,8 @@ public:
static bool classof(const Decl *D);
static bool classof(const DeclContext *D) { return true; }
+ void dumpAsDecl() const;
+ void dumpAsDecl(const ASTContext *Ctx) const;
void dumpDeclContext() const;
void dumpLookups() const;
void dumpLookups(llvm::raw_ostream &OS, bool DumpDecls = false,
@@ -2514,14 +2736,6 @@ private:
void reconcileExternalVisibleStorage() const;
bool LoadLexicalDeclsFromExternalStorage() const;
- /// Makes a declaration visible within this context, but
- /// suppresses searches for external declarations with the same
- /// name.
- ///
- /// Analogous to makeDeclVisibleInContext, but for the exclusive
- /// use of addDeclInternal().
- void makeDeclVisibleInContextInternal(NamedDecl *D);
-
StoredDeclsMap *CreateStoredDeclsMap(ASTContext &C) const;
void loadLazyLocalLexicalLookups();
diff --git a/contrib/llvm-project/clang/include/clang/AST/DeclCXX.h b/contrib/llvm-project/clang/include/clang/AST/DeclCXX.h
index 0d5ad40fc19e..9cebaff63bb0 100644
--- a/contrib/llvm-project/clang/include/clang/AST/DeclCXX.h
+++ b/contrib/llvm-project/clang/include/clang/AST/DeclCXX.h
@@ -64,7 +64,6 @@ class CXXFinalOverriderMap;
class CXXIndirectPrimaryBaseSet;
class CXXMethodDecl;
class DecompositionDecl;
-class DiagnosticBuilder;
class FriendDecl;
class FunctionTemplateDecl;
class IdentifierInfo;
@@ -155,22 +154,26 @@ class CXXBaseSpecifier {
SourceLocation EllipsisLoc;
/// Whether this is a virtual base class or not.
+ LLVM_PREFERRED_TYPE(bool)
unsigned Virtual : 1;
/// Whether this is the base of a class (true) or of a struct (false).
///
/// This determines the mapping from the access specifier as written in the
/// source code to the access specifier used for semantic analysis.
+ LLVM_PREFERRED_TYPE(bool)
unsigned BaseOfClass : 1;
/// Access specifier as written in the source code (may be AS_none).
///
/// The actual type of data stored here is an AccessSpecifier, but we use
- /// "unsigned" here to work around a VC++ bug.
+ /// "unsigned" here to work around Microsoft ABI.
+ LLVM_PREFERRED_TYPE(AccessSpecifier)
unsigned Access : 2;
/// Whether the class contains a using declaration
/// to inherit the named class's constructors.
+ LLVM_PREFERRED_TYPE(bool)
unsigned InheritConstructors : 1;
/// The type of the base class.
@@ -261,8 +264,9 @@ class CXXRecordDecl : public RecordDecl {
friend class ASTWriter;
friend class DeclContext;
friend class LambdaExpr;
+ friend class ODRDiagsEmitter;
- friend void FunctionDecl::setPure(bool);
+ friend void FunctionDecl::setIsPureVirtual(bool);
friend void TagDecl::startDefinition();
/// Values used in DefinitionData fields to represent special members.
@@ -276,21 +280,33 @@ class CXXRecordDecl : public RecordDecl {
SMF_All = 0x3f
};
+public:
+ enum LambdaDependencyKind {
+ LDK_Unknown = 0,
+ LDK_AlwaysDependent,
+ LDK_NeverDependent,
+ };
+
+private:
struct DefinitionData {
#define FIELD(Name, Width, Merge) \
unsigned Name : Width;
#include "CXXRecordDeclDefinitionBits.def"
/// Whether this class describes a C++ lambda.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsLambda : 1;
/// Whether we are currently parsing base specifiers.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsParsingBaseSpecifiers : 1;
/// True when visible conversion functions are already computed
/// and are available.
+ LLVM_PREFERRED_TYPE(bool)
unsigned ComputedVisibleConversions : 1;
+ LLVM_PREFERRED_TYPE(bool)
unsigned HasODRHash : 1;
/// A hash of parts of the class to help in ODR checking.
@@ -349,11 +365,11 @@ class CXXRecordDecl : public RecordDecl {
}
ArrayRef<CXXBaseSpecifier> bases() const {
- return llvm::makeArrayRef(getBases(), NumBases);
+ return llvm::ArrayRef(getBases(), NumBases);
}
ArrayRef<CXXBaseSpecifier> vbases() const {
- return llvm::makeArrayRef(getVBases(), NumVBases);
+ return llvm::ArrayRef(getVBases(), NumVBases);
}
private:
@@ -375,46 +391,56 @@ class CXXRecordDecl : public RecordDecl {
/// lambda will have been created with the enclosing context as its
/// declaration context, rather than function. This is an unfortunate
/// artifact of having to parse the default arguments before.
- unsigned Dependent : 1;
+ LLVM_PREFERRED_TYPE(LambdaDependencyKind)
+ unsigned DependencyKind : 2;
/// Whether this lambda is a generic lambda.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsGenericLambda : 1;
/// The Default Capture.
+ LLVM_PREFERRED_TYPE(LambdaCaptureDefault)
unsigned CaptureDefault : 2;
/// The number of captures in this lambda is limited 2^NumCaptures.
unsigned NumCaptures : 15;
/// The number of explicit captures in this lambda.
- unsigned NumExplicitCaptures : 13;
+ unsigned NumExplicitCaptures : 12;
/// Has known `internal` linkage.
+ LLVM_PREFERRED_TYPE(bool)
unsigned HasKnownInternalLinkage : 1;
/// The number used to indicate this lambda expression for name
/// mangling in the Itanium C++ ABI.
unsigned ManglingNumber : 31;
+ /// The index of this lambda within its context declaration. This is not in
+ /// general the same as the mangling number.
+ unsigned IndexInContext;
+
/// The declaration that provides context for this lambda, if the
/// actual DeclContext does not suffice. This is used for lambdas that
/// occur within default arguments of function parameters within the class
/// or within a data member initializer.
LazyDeclPtr ContextDecl;
- /// The list of captures, both explicit and implicit, for this
- /// lambda.
- Capture *Captures = nullptr;
+ /// The lists of captures, both explicit and implicit, for this
+ /// lambda. One list is provided for each merged copy of the lambda.
+ /// The first list corresponds to the canonical definition.
+ /// The destructor is registered by AddCaptureList when necessary.
+ llvm::TinyPtrVector<Capture*> Captures;
/// The type of the call method.
TypeSourceInfo *MethodTyInfo;
- LambdaDefinitionData(CXXRecordDecl *D, TypeSourceInfo *Info, bool Dependent,
+ LambdaDefinitionData(CXXRecordDecl *D, TypeSourceInfo *Info, unsigned DK,
bool IsGeneric, LambdaCaptureDefault CaptureDefault)
- : DefinitionData(D), Dependent(Dependent), IsGenericLambda(IsGeneric),
+ : DefinitionData(D), DependencyKind(DK), IsGenericLambda(IsGeneric),
CaptureDefault(CaptureDefault), NumCaptures(0),
NumExplicitCaptures(0), HasKnownInternalLinkage(0), ManglingNumber(0),
- MethodTyInfo(Info) {
+ IndexInContext(0), MethodTyInfo(Info) {
IsLambda = true;
// C++1z [expr.prim.lambda]p4:
@@ -422,6 +448,9 @@ class CXXRecordDecl : public RecordDecl {
Aggregate = false;
PlainOldData = false;
}
+
+ // Add a list of captures.
+ void AddCaptureList(ASTContext &Ctx, Capture *CaptureList);
};
struct DefinitionData *dataPtr() const {
@@ -548,7 +577,7 @@ public:
bool DelayTypeCreation = false);
static CXXRecordDecl *CreateLambda(const ASTContext &C, DeclContext *DC,
TypeSourceInfo *Info, SourceLocation Loc,
- bool DependentLambda, bool IsGeneric,
+ unsigned DependencyKind, bool IsGeneric,
LambdaCaptureDefault CaptureDefault);
static CXXRecordDecl *CreateDeserialized(const ASTContext &C, unsigned ID);
@@ -1035,6 +1064,12 @@ public:
return static_cast<LambdaCaptureDefault>(getLambdaData().CaptureDefault);
}
+ bool isCapturelessLambda() const {
+ if (!isLambda())
+ return false;
+ return getLambdaCaptureDefault() == LCD_None && capture_size() == 0;
+ }
+
/// Set the captures for this lambda closure type.
void setCaptures(ASTContext &Context, ArrayRef<LambdaCapture> Captures);
@@ -1050,8 +1085,14 @@ public:
///
/// \note No entries will be added for init-captures, as they do not capture
/// variables.
- void getCaptureFields(llvm::DenseMap<const VarDecl *, FieldDecl *> &Captures,
- FieldDecl *&ThisCapture) const;
+ ///
+ /// \note If multiple versions of the lambda are merged together, they may
+ /// have different variable declarations corresponding to the same capture.
+ /// In that case, all of those variable declarations will be added to the
+ /// Captures list, so it may have more than one variable listed per field.
+ void
+ getCaptureFields(llvm::DenseMap<const ValueDecl *, FieldDecl *> &Captures,
+ FieldDecl *&ThisCapture) const;
using capture_const_iterator = const LambdaCapture *;
using capture_const_range = llvm::iterator_range<capture_const_iterator>;
@@ -1061,7 +1102,9 @@ public:
}
capture_const_iterator captures_begin() const {
- return isLambda() ? getLambdaData().Captures : nullptr;
+ if (!isLambda()) return nullptr;
+ LambdaDefinitionData &LambdaData = getLambdaData();
+ return LambdaData.Captures.empty() ? nullptr : LambdaData.Captures.front();
}
capture_const_iterator captures_end() const {
@@ -1071,6 +1114,11 @@ public:
unsigned capture_size() const { return getLambdaData().NumCaptures; }
+ const LambdaCapture *getCapture(unsigned I) const {
+ assert(isLambda() && I < capture_size() && "invalid index for capture");
+ return captures_begin() + I;
+ }
+
using conversion_iterator = UnresolvedSetIterator;
conversion_iterator conversion_begin() const {
@@ -1139,6 +1187,13 @@ public:
///
/// \note This does NOT include a check for union-ness.
bool isEmpty() const { return data().Empty; }
+ /// Marks this record as empty. This is used by DWARFASTParserClang
+ /// when parsing records with empty fields having [[no_unique_address]]
+ /// attribute
+ void markEmpty() { data().Empty = true; }
+
+ void setInitMethod(bool Val) { data().HasInitMethod = Val; }
+ bool hasInitMethod() const { return data().HasInitMethod; }
bool hasPrivateFields() const {
return data().HasPrivateFields;
@@ -1160,7 +1215,7 @@ public:
/// Determine whether this class has a pure virtual function.
///
- /// The class is is abstract per (C++ [class.abstract]p2) if it declares
+ /// The class is abstract per (C++ [class.abstract]p2) if it declares
/// a pure virtual function or inherits a pure virtual function that is
/// not overridden.
bool isAbstract() const { return data().Abstract; }
@@ -1370,6 +1425,9 @@ public:
/// (C++11 [class]p6).
bool isTriviallyCopyable() const;
+ /// Determine whether this class is considered trivially copyable per
+ bool isTriviallyCopyConstructible() const;
+
/// Determine whether this class is considered trivial.
///
/// C++11 [class]p6:
@@ -1381,37 +1439,39 @@ public:
/// Determine whether this class is a literal type.
///
- /// C++11 [basic.types]p10:
+ /// C++20 [basic.types]p10:
/// A class type that has all the following properties:
- /// - it has a trivial destructor
- /// - every constructor call and full-expression in the
- /// brace-or-equal-intializers for non-static data members (if any) is
- /// a constant expression.
- /// - it is an aggregate type or has at least one constexpr constructor
- /// or constructor template that is not a copy or move constructor, and
- /// - all of its non-static data members and base classes are of literal
- /// types
- ///
- /// We resolve DR1361 by ignoring the second bullet. We resolve DR1452 by
- /// treating types with trivial default constructors as literal types.
- ///
- /// Only in C++17 and beyond, are lambdas literal types.
- bool isLiteral() const {
- const LangOptions &LangOpts = getLangOpts();
- return (LangOpts.CPlusPlus20 ? hasConstexprDestructor()
- : hasTrivialDestructor()) &&
- (!isLambda() || LangOpts.CPlusPlus17) &&
- !hasNonLiteralTypeFieldsOrBases() &&
- (isAggregate() || isLambda() ||
- hasConstexprNonCopyMoveConstructor() ||
- hasTrivialDefaultConstructor());
- }
+ /// - it has a constexpr destructor
+ /// - all of its non-static non-variant data members and base classes
+ /// are of non-volatile literal types, and it:
+ /// - is a closure type
+ /// - is an aggregate union type that has either no variant members
+ /// or at least one variant member of non-volatile literal type
+ /// - is a non-union aggregate type for which each of its anonymous
+ /// union members satisfies the above requirements for an aggregate
+ /// union type, or
+ /// - has at least one constexpr constructor or constructor template
+ /// that is not a copy or move constructor.
+ bool isLiteral() const;
/// Determine whether this is a structural type.
bool isStructural() const {
return isLiteral() && data().StructuralIfLiteral;
}
+ /// Notify the class that this destructor is now selected.
+ ///
+ /// Important properties of the class depend on destructor properties. Since
+ /// C++20, it is possible to have multiple destructor declarations in a class
+ /// out of which one will be selected at the end.
+ /// This is called separately from addedMember because it has to be deferred
+ /// to the completion of the class.
+ void addedSelectedDestructor(CXXDestructorDecl *DD);
+
+ /// Notify the class that an eligible SMF has been added.
+ /// This updates triviality and destructor based properties of the class accordingly.
+ void addedEligibleSpecialMemberFunction(const CXXMethodDecl *MD, unsigned SMKind);
+
/// If this record is an instantiation of a member class,
/// retrieves the member class from which it was instantiated.
///
@@ -1726,18 +1786,31 @@ public:
/// the declaration context suffices.
Decl *getLambdaContextDecl() const;
- /// Set the mangling number and context declaration for a lambda
- /// class.
- void setLambdaMangling(unsigned ManglingNumber, Decl *ContextDecl,
- bool HasKnownInternalLinkage = false) {
+ /// Retrieve the index of this lambda within the context declaration returned
+ /// by getLambdaContextDecl().
+ unsigned getLambdaIndexInContext() const {
assert(isLambda() && "Not a lambda closure type!");
- getLambdaData().ManglingNumber = ManglingNumber;
- getLambdaData().ContextDecl = ContextDecl;
- getLambdaData().HasKnownInternalLinkage = HasKnownInternalLinkage;
+ return getLambdaData().IndexInContext;
}
- /// Set the device side mangling number.
- void setDeviceLambdaManglingNumber(unsigned Num) const;
+ /// Information about how a lambda is numbered within its context.
+ struct LambdaNumbering {
+ Decl *ContextDecl = nullptr;
+ unsigned IndexInContext = 0;
+ unsigned ManglingNumber = 0;
+ unsigned DeviceManglingNumber = 0;
+ bool HasKnownInternalLinkage = false;
+ };
+
+ /// Set the mangling numbers and context declaration for a lambda class.
+ void setLambdaNumbering(LambdaNumbering Numbering);
+
+ // Get the mangling numbers and context declaration for a lambda class.
+ LambdaNumbering getLambdaNumbering() const {
+ return {getLambdaContextDecl(), getLambdaIndexInContext(),
+ getLambdaManglingNumber(), getDeviceLambdaManglingNumber(),
+ hasKnownLambdaInternalLinkage()};
+ }
/// Retrieve the device side mangling number.
unsigned getDeviceLambdaManglingNumber() const;
@@ -1772,13 +1845,37 @@ public:
/// function declaration itself is dependent. This flag indicates when we
/// know that the lambda is dependent despite that.
bool isDependentLambda() const {
- return isLambda() && getLambdaData().Dependent;
+ return isLambda() && getLambdaData().DependencyKind == LDK_AlwaysDependent;
+ }
+
+ bool isNeverDependentLambda() const {
+ return isLambda() && getLambdaData().DependencyKind == LDK_NeverDependent;
+ }
+
+ unsigned getLambdaDependencyKind() const {
+ if (!isLambda())
+ return LDK_Unknown;
+ return getLambdaData().DependencyKind;
}
TypeSourceInfo *getLambdaTypeInfo() const {
return getLambdaData().MethodTyInfo;
}
+ void setLambdaTypeInfo(TypeSourceInfo *TS) {
+ assert(DefinitionData && DefinitionData->IsLambda &&
+ "setting lambda property of non-lambda class");
+ auto &DL = static_cast<LambdaDefinitionData &>(*DefinitionData);
+ DL.MethodTyInfo = TS;
+ }
+
+ void setLambdaIsGeneric(bool IsGeneric) {
+ assert(DefinitionData && DefinitionData->IsLambda &&
+ "setting lambda property of non-lambda class");
+ auto &DL = static_cast<LambdaDefinitionData &>(*DefinitionData);
+ DL.IsGenericLambda = IsGeneric;
+ }
+
// Determine whether this type is an Interface Like type for
// __interface inheritance purposes.
bool isInterfaceLike() const;
@@ -1855,13 +1952,13 @@ private:
ExplicitSpecifier ES,
const DeclarationNameInfo &NameInfo, QualType T,
TypeSourceInfo *TInfo, SourceLocation EndLocation,
- CXXConstructorDecl *Ctor)
+ CXXConstructorDecl *Ctor, DeductionCandidate Kind)
: FunctionDecl(CXXDeductionGuide, C, DC, StartLoc, NameInfo, T, TInfo,
- SC_None, false, ConstexprSpecKind::Unspecified),
+ SC_None, false, false, ConstexprSpecKind::Unspecified),
Ctor(Ctor), ExplicitSpec(ES) {
if (EndLocation.isValid())
setRangeEnd(EndLocation);
- setIsCopyDeductionCandidate(false);
+ setDeductionCandidateKind(Kind);
}
CXXConstructorDecl *Ctor;
@@ -1876,14 +1973,15 @@ public:
Create(ASTContext &C, DeclContext *DC, SourceLocation StartLoc,
ExplicitSpecifier ES, const DeclarationNameInfo &NameInfo, QualType T,
TypeSourceInfo *TInfo, SourceLocation EndLocation,
- CXXConstructorDecl *Ctor = nullptr);
+ CXXConstructorDecl *Ctor = nullptr,
+ DeductionCandidate Kind = DeductionCandidate::Normal);
static CXXDeductionGuideDecl *CreateDeserialized(ASTContext &C, unsigned ID);
ExplicitSpecifier getExplicitSpecifier() { return ExplicitSpec; }
const ExplicitSpecifier getExplicitSpecifier() const { return ExplicitSpec; }
- /// Return true if the declartion is already resolved to be explicit.
+ /// Return true if the declaration is already resolved to be explicit.
bool isExplicit() const { return ExplicitSpec.isExplicit(); }
/// Get the template for which this guide performs deduction.
@@ -1893,16 +1991,15 @@ public:
/// Get the constructor from which this deduction guide was generated, if
/// this is an implicit deduction guide.
- CXXConstructorDecl *getCorrespondingConstructor() const {
- return Ctor;
- }
+ CXXConstructorDecl *getCorrespondingConstructor() const { return Ctor; }
- void setIsCopyDeductionCandidate(bool isCDC = true) {
- FunctionDeclBits.IsCopyDeductionCandidate = isCDC;
+ void setDeductionCandidateKind(DeductionCandidate K) {
+ FunctionDeclBits.DeductionCandidateKind = static_cast<unsigned char>(K);
}
- bool isCopyDeductionCandidate() const {
- return FunctionDeclBits.IsCopyDeductionCandidate;
+ DeductionCandidate getDeductionCandidateKind() const {
+ return static_cast<DeductionCandidate>(
+ FunctionDeclBits.DeductionCandidateKind);
}
// Implement isa/cast/dyncast/etc.
@@ -1939,6 +2036,14 @@ public:
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == RequiresExprBody; }
+
+ static DeclContext *castToDeclContext(const RequiresExprBodyDecl *D) {
+ return static_cast<DeclContext *>(const_cast<RequiresExprBodyDecl *>(D));
+ }
+
+ static RequiresExprBodyDecl *castFromDeclContext(const DeclContext *DC) {
+ return static_cast<RequiresExprBodyDecl *>(const_cast<DeclContext *>(DC));
+ }
};
/// Represents a static or instance method of a struct/union/class.
@@ -1952,29 +2057,39 @@ protected:
CXXMethodDecl(Kind DK, ASTContext &C, CXXRecordDecl *RD,
SourceLocation StartLoc, const DeclarationNameInfo &NameInfo,
QualType T, TypeSourceInfo *TInfo, StorageClass SC,
- bool isInline, ConstexprSpecKind ConstexprKind,
- SourceLocation EndLocation,
+ bool UsesFPIntrin, bool isInline,
+ ConstexprSpecKind ConstexprKind, SourceLocation EndLocation,
Expr *TrailingRequiresClause = nullptr)
- : FunctionDecl(DK, C, RD, StartLoc, NameInfo, T, TInfo, SC, isInline,
- ConstexprKind, TrailingRequiresClause) {
+ : FunctionDecl(DK, C, RD, StartLoc, NameInfo, T, TInfo, SC, UsesFPIntrin,
+ isInline, ConstexprKind, TrailingRequiresClause) {
if (EndLocation.isValid())
setRangeEnd(EndLocation);
}
public:
- static CXXMethodDecl *Create(ASTContext &C, CXXRecordDecl *RD,
- SourceLocation StartLoc,
- const DeclarationNameInfo &NameInfo, QualType T,
- TypeSourceInfo *TInfo, StorageClass SC,
- bool isInline, ConstexprSpecKind ConstexprKind,
- SourceLocation EndLocation,
- Expr *TrailingRequiresClause = nullptr);
+ static CXXMethodDecl *
+ Create(ASTContext &C, CXXRecordDecl *RD, SourceLocation StartLoc,
+ const DeclarationNameInfo &NameInfo, QualType T, TypeSourceInfo *TInfo,
+ StorageClass SC, bool UsesFPIntrin, bool isInline,
+ ConstexprSpecKind ConstexprKind, SourceLocation EndLocation,
+ Expr *TrailingRequiresClause = nullptr);
static CXXMethodDecl *CreateDeserialized(ASTContext &C, unsigned ID);
bool isStatic() const;
bool isInstance() const { return !isStatic(); }
+ /// [C++2b][dcl.fct]/p7
+ /// An explicit object member function is a non-static
+ /// member function with an explicit object parameter. e.g.,
+ /// void func(this SomeType);
+ bool isExplicitObjectMemberFunction() const;
+
+ /// [C++2b][dcl.fct]/p7
+ /// An implicit object member function is a non-static
+ /// member function without an explicit object parameter.
+ bool isImplicitObjectMemberFunction() const;
+
/// Returns true if the given operator is implicitly static in a record
/// context.
static bool isStaticOverloadedOperator(OverloadedOperatorKind OOK) {
@@ -1995,7 +2110,7 @@ public:
// Member function is virtual if it is marked explicitly so, or if it is
// declared in __interface -- then it is automatically pure virtual.
- if (CD->isVirtualAsWritten() || CD->isPure())
+ if (CD->isVirtualAsWritten() || CD->isPureVirtual())
return true;
return CD->size_overridden_methods() != 0;
@@ -2083,14 +2198,19 @@ public:
/// Return the type of the object pointed by \c this.
///
/// See getThisType() for usage restriction.
- QualType getThisObjectType() const;
+
+ QualType getFunctionObjectParameterReferenceType() const;
+ QualType getFunctionObjectParameterType() const {
+ return getFunctionObjectParameterReferenceType().getNonReferenceType();
+ }
+
+ unsigned getNumExplicitParams() const {
+ return getNumParams() - (isExplicitObjectMemberFunction() ? 1 : 0);
+ }
static QualType getThisType(const FunctionProtoType *FPT,
const CXXRecordDecl *Decl);
- static QualType getThisObjectType(const FunctionProtoType *FPT,
- const CXXRecordDecl *Decl);
-
Qualifiers getMethodQualifiers() const {
return getType()->castAs<FunctionProtoType>()->getMethodQuals();
}
@@ -2197,14 +2317,17 @@ class CXXCtorInitializer final {
/// If the initializee is a type, whether that type makes this
/// a delegating initialization.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsDelegating : 1;
/// If the initializer is a base initializer, this keeps track
/// of whether the base is virtual or not.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsVirtual : 1;
/// Whether or not the initializer is explicitly written
/// in the sources.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsWritten : 1;
/// If IsWritten is true, then this number keeps track of the textual order
@@ -2413,7 +2536,8 @@ class CXXConstructorDecl final
CXXConstructorDecl(ASTContext &C, CXXRecordDecl *RD, SourceLocation StartLoc,
const DeclarationNameInfo &NameInfo, QualType T,
- TypeSourceInfo *TInfo, ExplicitSpecifier ES, bool isInline,
+ TypeSourceInfo *TInfo, ExplicitSpecifier ES,
+ bool UsesFPIntrin, bool isInline,
bool isImplicitlyDeclared, ConstexprSpecKind ConstexprKind,
InheritedConstructor Inherited,
Expr *TrailingRequiresClause);
@@ -2456,8 +2580,8 @@ public:
static CXXConstructorDecl *
Create(ASTContext &C, CXXRecordDecl *RD, SourceLocation StartLoc,
const DeclarationNameInfo &NameInfo, QualType T, TypeSourceInfo *TInfo,
- ExplicitSpecifier ES, bool isInline, bool isImplicitlyDeclared,
- ConstexprSpecKind ConstexprKind,
+ ExplicitSpecifier ES, bool UsesFPIntrin, bool isInline,
+ bool isImplicitlyDeclared, ConstexprSpecKind ConstexprKind,
InheritedConstructor Inherited = InheritedConstructor(),
Expr *TrailingRequiresClause = nullptr);
@@ -2479,7 +2603,7 @@ public:
return getCanonicalDecl()->getExplicitSpecifierInternal();
}
- /// Return true if the declartion is already resolved to be explicit.
+ /// Return true if the declaration is already resolved to be explicit.
bool isExplicit() const { return getExplicitSpecifier().isExplicit(); }
/// Iterates through the member/base initializer list.
@@ -2676,25 +2800,24 @@ class CXXDestructorDecl : public CXXMethodDecl {
CXXDestructorDecl(ASTContext &C, CXXRecordDecl *RD, SourceLocation StartLoc,
const DeclarationNameInfo &NameInfo, QualType T,
- TypeSourceInfo *TInfo, bool isInline,
+ TypeSourceInfo *TInfo, bool UsesFPIntrin, bool isInline,
bool isImplicitlyDeclared, ConstexprSpecKind ConstexprKind,
Expr *TrailingRequiresClause = nullptr)
: CXXMethodDecl(CXXDestructor, C, RD, StartLoc, NameInfo, T, TInfo,
- SC_None, isInline, ConstexprKind, SourceLocation(),
- TrailingRequiresClause) {
+ SC_None, UsesFPIntrin, isInline, ConstexprKind,
+ SourceLocation(), TrailingRequiresClause) {
setImplicit(isImplicitlyDeclared);
}
void anchor() override;
public:
- static CXXDestructorDecl *Create(ASTContext &C, CXXRecordDecl *RD,
- SourceLocation StartLoc,
- const DeclarationNameInfo &NameInfo,
- QualType T, TypeSourceInfo *TInfo,
- bool isInline, bool isImplicitlyDeclared,
- ConstexprSpecKind ConstexprKind,
- Expr *TrailingRequiresClause = nullptr);
+ static CXXDestructorDecl *
+ Create(ASTContext &C, CXXRecordDecl *RD, SourceLocation StartLoc,
+ const DeclarationNameInfo &NameInfo, QualType T, TypeSourceInfo *TInfo,
+ bool UsesFPIntrin, bool isInline, bool isImplicitlyDeclared,
+ ConstexprSpecKind ConstexprKind,
+ Expr *TrailingRequiresClause = nullptr);
static CXXDestructorDecl *CreateDeserialized(ASTContext & C, unsigned ID);
void setOperatorDelete(FunctionDecl *OD, Expr *ThisArg);
@@ -2732,12 +2855,13 @@ public:
class CXXConversionDecl : public CXXMethodDecl {
CXXConversionDecl(ASTContext &C, CXXRecordDecl *RD, SourceLocation StartLoc,
const DeclarationNameInfo &NameInfo, QualType T,
- TypeSourceInfo *TInfo, bool isInline, ExplicitSpecifier ES,
- ConstexprSpecKind ConstexprKind, SourceLocation EndLocation,
+ TypeSourceInfo *TInfo, bool UsesFPIntrin, bool isInline,
+ ExplicitSpecifier ES, ConstexprSpecKind ConstexprKind,
+ SourceLocation EndLocation,
Expr *TrailingRequiresClause = nullptr)
: CXXMethodDecl(CXXConversion, C, RD, StartLoc, NameInfo, T, TInfo,
- SC_None, isInline, ConstexprKind, EndLocation,
- TrailingRequiresClause),
+ SC_None, UsesFPIntrin, isInline, ConstexprKind,
+ EndLocation, TrailingRequiresClause),
ExplicitSpec(ES) {}
void anchor() override;
@@ -2750,8 +2874,9 @@ public:
static CXXConversionDecl *
Create(ASTContext &C, CXXRecordDecl *RD, SourceLocation StartLoc,
const DeclarationNameInfo &NameInfo, QualType T, TypeSourceInfo *TInfo,
- bool isInline, ExplicitSpecifier ES, ConstexprSpecKind ConstexprKind,
- SourceLocation EndLocation, Expr *TrailingRequiresClause = nullptr);
+ bool UsesFPIntrin, bool isInline, ExplicitSpecifier ES,
+ ConstexprSpecKind ConstexprKind, SourceLocation EndLocation,
+ Expr *TrailingRequiresClause = nullptr);
static CXXConversionDecl *CreateDeserialized(ASTContext &C, unsigned ID);
ExplicitSpecifier getExplicitSpecifier() {
@@ -2762,7 +2887,7 @@ public:
return getCanonicalDecl()->ExplicitSpec;
}
- /// Return true if the declartion is already resolved to be explicit.
+ /// Return true if the declaration is already resolved to be explicit.
bool isExplicit() const { return getExplicitSpecifier().isExplicit(); }
void setExplicitSpecifier(ExplicitSpecifier ES) { ExplicitSpec = ES; }
@@ -2787,6 +2912,12 @@ public:
static bool classofKind(Kind K) { return K == CXXConversion; }
};
+/// Represents the language in a linkage specification.
+///
+/// The values are part of the serialization ABI for
+/// ASTs and cannot be changed without altering that ABI.
+enum class LinkageSpecLanguageIDs { C = 1, CXX = 2 };
+
/// Represents a linkage specification.
///
/// For example:
@@ -2797,14 +2928,7 @@ class LinkageSpecDecl : public Decl, public DeclContext {
virtual void anchor();
// This class stores some data in DeclContext::LinkageSpecDeclBits to save
// some space. Use the provided accessors to access it.
-public:
- /// Represents the language in a linkage specification.
- ///
- /// The values are part of the serialization ABI for
- /// ASTs and cannot be changed without altering that ABI.
- enum LanguageIDs { lang_c = 1, lang_cxx = 2 };
-private:
/// The source location for the extern keyword.
SourceLocation ExternLoc;
@@ -2812,22 +2936,25 @@ private:
SourceLocation RBraceLoc;
LinkageSpecDecl(DeclContext *DC, SourceLocation ExternLoc,
- SourceLocation LangLoc, LanguageIDs lang, bool HasBraces);
+ SourceLocation LangLoc, LinkageSpecLanguageIDs lang,
+ bool HasBraces);
public:
static LinkageSpecDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation ExternLoc,
- SourceLocation LangLoc, LanguageIDs Lang,
- bool HasBraces);
+ SourceLocation LangLoc,
+ LinkageSpecLanguageIDs Lang, bool HasBraces);
static LinkageSpecDecl *CreateDeserialized(ASTContext &C, unsigned ID);
/// Return the language specified by this linkage specification.
- LanguageIDs getLanguage() const {
- return static_cast<LanguageIDs>(LinkageSpecDeclBits.Language);
+ LinkageSpecLanguageIDs getLanguage() const {
+ return static_cast<LinkageSpecLanguageIDs>(LinkageSpecDeclBits.Language);
}
/// Set the language specified by this linkage specification.
- void setLanguage(LanguageIDs L) { LinkageSpecDeclBits.Language = L; }
+ void setLanguage(LinkageSpecLanguageIDs L) {
+ LinkageSpecDeclBits.Language = llvm::to_underlying(L);
+ }
/// Determines whether this linkage specification had braces in
/// its syntactic form.
@@ -3290,7 +3417,7 @@ class BaseUsingDecl : public NamedDecl {
protected:
BaseUsingDecl(Kind DK, DeclContext *DC, SourceLocation L, DeclarationName N)
- : NamedDecl(DK, DC, L, N), FirstUsingShadow(nullptr, 0) {}
+ : NamedDecl(DK, DC, L, N), FirstUsingShadow(nullptr, false) {}
private:
void anchor() override;
@@ -3476,6 +3603,7 @@ class ConstructorUsingShadowDecl final : public UsingShadowDecl {
/// \c true if the constructor ultimately named by this using shadow
/// declaration is within a virtual base class subobject of the class that
/// contains this declaration.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsVirtual : 1;
ConstructorUsingShadowDecl(ASTContext &C, DeclContext *DC, SourceLocation Loc,
@@ -3578,17 +3706,15 @@ public:
class UsingEnumDecl : public BaseUsingDecl, public Mergeable<UsingEnumDecl> {
/// The source location of the 'using' keyword itself.
SourceLocation UsingLocation;
-
- /// Location of the 'enum' keyword.
+ /// The source location of the 'enum' keyword.
SourceLocation EnumLocation;
-
- /// The enum
- EnumDecl *Enum;
+ /// 'qual::SomeEnum' as an EnumType, possibly with Elaborated/Typedef sugar.
+ TypeSourceInfo *EnumType;
UsingEnumDecl(DeclContext *DC, DeclarationName DN, SourceLocation UL,
- SourceLocation EL, SourceLocation NL, EnumDecl *ED)
- : BaseUsingDecl(UsingEnum, DC, NL, DN), UsingLocation(UL),
- EnumLocation(EL), Enum(ED) {}
+ SourceLocation EL, SourceLocation NL, TypeSourceInfo *EnumType)
+ : BaseUsingDecl(UsingEnum, DC, NL, DN), UsingLocation(UL), EnumLocation(EL),
+ EnumType(EnumType){}
void anchor() override;
@@ -3603,13 +3729,29 @@ public:
/// The source location of the 'enum' keyword.
SourceLocation getEnumLoc() const { return EnumLocation; }
void setEnumLoc(SourceLocation L) { EnumLocation = L; }
+ NestedNameSpecifier *getQualifier() const {
+ return getQualifierLoc().getNestedNameSpecifier();
+ }
+ NestedNameSpecifierLoc getQualifierLoc() const {
+ if (auto ETL = EnumType->getTypeLoc().getAs<ElaboratedTypeLoc>())
+ return ETL.getQualifierLoc();
+ return NestedNameSpecifierLoc();
+ }
+ // Returns the "qualifier::Name" part as a TypeLoc.
+ TypeLoc getEnumTypeLoc() const {
+ return EnumType->getTypeLoc();
+ }
+ TypeSourceInfo *getEnumType() const {
+ return EnumType;
+ }
+ void setEnumType(TypeSourceInfo *TSI) { EnumType = TSI; }
public:
- EnumDecl *getEnumDecl() const { return Enum; }
+ EnumDecl *getEnumDecl() const { return cast<EnumDecl>(EnumType->getType()->getAsTagDecl()); }
static UsingEnumDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation UsingL, SourceLocation EnumL,
- SourceLocation NameL, EnumDecl *ED);
+ SourceLocation NameL, TypeSourceInfo *EnumType);
static UsingEnumDecl *CreateDeserialized(ASTContext &C, unsigned ID);
@@ -3677,7 +3819,7 @@ public:
/// Get the set of using declarations that this pack expanded into. Note that
/// some of these may still be unresolved.
ArrayRef<NamedDecl *> expansions() const {
- return llvm::makeArrayRef(getTrailingObjects<NamedDecl *>(), NumExpansions);
+ return llvm::ArrayRef(getTrailingObjects<NamedDecl *>(), NumExpansions);
}
static UsingPackDecl *Create(ASTContext &C, DeclContext *DC,
@@ -3908,12 +4050,12 @@ public:
/// Represents a C++11 static_assert declaration.
class StaticAssertDecl : public Decl {
llvm::PointerIntPair<Expr *, 1, bool> AssertExprAndFailed;
- StringLiteral *Message;
+ Expr *Message;
SourceLocation RParenLoc;
StaticAssertDecl(DeclContext *DC, SourceLocation StaticAssertLoc,
- Expr *AssertExpr, StringLiteral *Message,
- SourceLocation RParenLoc, bool Failed)
+ Expr *AssertExpr, Expr *Message, SourceLocation RParenLoc,
+ bool Failed)
: Decl(StaticAssert, DC, StaticAssertLoc),
AssertExprAndFailed(AssertExpr, Failed), Message(Message),
RParenLoc(RParenLoc) {}
@@ -3925,15 +4067,15 @@ public:
static StaticAssertDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation StaticAssertLoc,
- Expr *AssertExpr, StringLiteral *Message,
+ Expr *AssertExpr, Expr *Message,
SourceLocation RParenLoc, bool Failed);
static StaticAssertDecl *CreateDeserialized(ASTContext &C, unsigned ID);
Expr *getAssertExpr() { return AssertExprAndFailed.getPointer(); }
const Expr *getAssertExpr() const { return AssertExprAndFailed.getPointer(); }
- StringLiteral *getMessage() { return Message; }
- const StringLiteral *getMessage() const { return Message; }
+ Expr *getMessage() { return Message; }
+ const Expr *getMessage() const { return Message; }
bool isFailed() const { return AssertExprAndFailed.getInt(); }
@@ -4047,10 +4189,10 @@ public:
unsigned NumBindings);
ArrayRef<BindingDecl *> bindings() const {
- return llvm::makeArrayRef(getTrailingObjects<BindingDecl *>(), NumBindings);
+ return llvm::ArrayRef(getTrailingObjects<BindingDecl *>(), NumBindings);
}
- void printName(raw_ostream &os) const override;
+ void printName(raw_ostream &OS, const PrintingPolicy &Policy) const override;
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == Decomposition; }
@@ -4163,7 +4305,8 @@ private:
public:
/// Print this UUID in a human-readable format.
- void printName(llvm::raw_ostream &OS) const override;
+ void printName(llvm::raw_ostream &OS,
+ const PrintingPolicy &Policy) const override;
/// Get the decomposed parts of this declaration.
Parts getParts() const { return PartVal; }
@@ -4185,6 +4328,55 @@ public:
static bool classofKind(Kind K) { return K == Decl::MSGuid; }
};
+/// An artificial decl, representing a global anonymous constant value which is
+/// uniquified by value within a translation unit.
+///
+/// These is currently only used to back the LValue returned by
+/// __builtin_source_location, but could potentially be used for other similar
+/// situations in the future.
+class UnnamedGlobalConstantDecl : public ValueDecl,
+ public Mergeable<UnnamedGlobalConstantDecl>,
+ public llvm::FoldingSetNode {
+
+ // The constant value of this global.
+ APValue Value;
+
+ void anchor() override;
+
+ UnnamedGlobalConstantDecl(const ASTContext &C, DeclContext *DC, QualType T,
+ const APValue &Val);
+
+ static UnnamedGlobalConstantDecl *Create(const ASTContext &C, QualType T,
+ const APValue &APVal);
+ static UnnamedGlobalConstantDecl *CreateDeserialized(ASTContext &C,
+ unsigned ID);
+
+ // Only ASTContext::getUnnamedGlobalConstantDecl and deserialization create
+ // these.
+ friend class ASTContext;
+ friend class ASTReader;
+ friend class ASTDeclReader;
+
+public:
+ /// Print this in a human-readable format.
+ void printName(llvm::raw_ostream &OS,
+ const PrintingPolicy &Policy) const override;
+
+ const APValue &getValue() const { return Value; }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, QualType Ty,
+ const APValue &APVal) {
+ Ty.Profile(ID);
+ APVal.Profile(ID);
+ }
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getType(), getValue());
+ }
+
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classofKind(Kind K) { return K == Decl::UnnamedGlobalConstant; }
+};
+
/// Insertion operator for diagnostics. This allows sending an AccessSpecifier
/// into a diagnostic with <<.
const StreamingDiagnostic &operator<<(const StreamingDiagnostic &DB,
diff --git a/contrib/llvm-project/clang/include/clang/AST/DeclContextInternals.h b/contrib/llvm-project/clang/include/clang/AST/DeclContextInternals.h
index 2eef2343b750..903cdb7bfcc8 100644
--- a/contrib/llvm-project/clang/include/clang/AST/DeclContextInternals.h
+++ b/contrib/llvm-project/clang/include/clang/AST/DeclContextInternals.h
@@ -78,8 +78,7 @@ class StoredDeclsList {
}
Data.setPointer(NewHead);
- assert(llvm::find_if(getLookupResult(), ShouldErase) ==
- getLookupResult().end() && "Still exists!");
+ assert(llvm::none_of(getLookupResult(), ShouldErase) && "Still exists!");
}
void erase(NamedDecl *ND) {
@@ -91,7 +90,7 @@ public:
StoredDeclsList(StoredDeclsList &&RHS) : Data(RHS.Data) {
RHS.Data.setPointer(nullptr);
- RHS.Data.setInt(0);
+ RHS.Data.setInt(false);
}
void MaybeDeallocList() {
@@ -115,7 +114,7 @@ public:
Data = RHS.Data;
RHS.Data.setPointer(nullptr);
- RHS.Data.setInt(0);
+ RHS.Data.setInt(false);
return *this;
}
@@ -143,7 +142,7 @@ public:
}
void setHasExternalDecls() {
- Data.setInt(1);
+ Data.setInt(true);
}
void remove(NamedDecl *D) {
@@ -156,7 +155,7 @@ public:
erase_if([](NamedDecl *ND) { return ND->isFromASTFile(); });
// Don't have any pending external decls any more.
- Data.setInt(0);
+ Data.setInt(false);
}
void replaceExternalDecls(ArrayRef<NamedDecl*> Decls) {
@@ -172,7 +171,7 @@ public:
});
// Don't have any pending external decls any more.
- Data.setInt(0);
+ Data.setInt(false);
if (Decls.empty())
return;
diff --git a/contrib/llvm-project/clang/include/clang/AST/DeclFriend.h b/contrib/llvm-project/clang/include/clang/AST/DeclFriend.h
index 6f8306c6025e..3e6ca5b32192 100644
--- a/contrib/llvm-project/clang/include/clang/AST/DeclFriend.h
+++ b/contrib/llvm-project/clang/include/clang/AST/DeclFriend.h
@@ -23,7 +23,6 @@
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/None.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
@@ -74,6 +73,7 @@ private:
/// True if this 'friend' declaration is unsupported. Eventually we
/// will support every possible friend declaration, but for now we
/// silently ignore some and set this flag to authorize all access.
+ LLVM_PREFERRED_TYPE(bool)
unsigned UnsupportedFriend : 1;
// The number of "outer" template parameter lists in non-templatic
@@ -108,11 +108,10 @@ public:
friend class ASTNodeImporter;
friend TrailingObjects;
- static FriendDecl *Create(ASTContext &C, DeclContext *DC,
- SourceLocation L, FriendUnion Friend_,
- SourceLocation FriendL,
- ArrayRef<TemplateParameterList*> FriendTypeTPLists
- = None);
+ static FriendDecl *
+ Create(ASTContext &C, DeclContext *DC, SourceLocation L, FriendUnion Friend_,
+ SourceLocation FriendL,
+ ArrayRef<TemplateParameterList *> FriendTypeTPLists = std::nullopt);
static FriendDecl *CreateDeserialized(ASTContext &C, unsigned ID,
unsigned FriendTypeNumTPLists);
diff --git a/contrib/llvm-project/clang/include/clang/AST/DeclObjC.h b/contrib/llvm-project/clang/include/clang/AST/DeclObjC.h
index 6bb9cdf67034..f8f894b4b10d 100644
--- a/contrib/llvm-project/clang/include/clang/AST/DeclObjC.h
+++ b/contrib/llvm-project/clang/include/clang/AST/DeclObjC.h
@@ -25,9 +25,8 @@
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/Specifiers.h"
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
-#include "llvm/ADT/None.h"
+#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
@@ -116,6 +115,8 @@ public:
const SourceLocation *Locs, ASTContext &Ctx);
};
+enum class ObjCImplementationControl { None, Required, Optional };
+
/// ObjCMethodDecl - Represents an instance or class method declaration.
/// ObjC methods can be declared within 4 contexts: class interfaces,
/// categories, protocols, and class implementations. While C++ member
@@ -140,10 +141,6 @@ class ObjCMethodDecl : public NamedDecl, public DeclContext {
// This class stores some data in DeclContext::ObjCMethodDeclBits
// to save some space. Use the provided accessors to access it.
-public:
- enum ImplementationControl { None, Required, Optional };
-
-private:
/// Return type of this method.
QualType MethodDeclType;
@@ -169,14 +166,14 @@ private:
/// constructed by createImplicitParams.
ImplicitParamDecl *CmdDecl = nullptr;
- ObjCMethodDecl(SourceLocation beginLoc, SourceLocation endLoc,
- Selector SelInfo, QualType T, TypeSourceInfo *ReturnTInfo,
- DeclContext *contextDecl, bool isInstance = true,
- bool isVariadic = false, bool isPropertyAccessor = false,
- bool isSynthesizedAccessorStub = false,
- bool isImplicitlyDeclared = false, bool isDefined = false,
- ImplementationControl impControl = None,
- bool HasRelatedResultType = false);
+ ObjCMethodDecl(
+ SourceLocation beginLoc, SourceLocation endLoc, Selector SelInfo,
+ QualType T, TypeSourceInfo *ReturnTInfo, DeclContext *contextDecl,
+ bool isInstance = true, bool isVariadic = false,
+ bool isPropertyAccessor = false, bool isSynthesizedAccessorStub = false,
+ bool isImplicitlyDeclared = false, bool isDefined = false,
+ ObjCImplementationControl impControl = ObjCImplementationControl::None,
+ bool HasRelatedResultType = false);
SelectorLocationsKind getSelLocsKind() const {
return static_cast<SelectorLocationsKind>(ObjCMethodDeclBits.SelLocsKind);
@@ -236,7 +233,7 @@ public:
bool isVariadic = false, bool isPropertyAccessor = false,
bool isSynthesizedAccessorStub = false,
bool isImplicitlyDeclared = false, bool isDefined = false,
- ImplementationControl impControl = None,
+ ObjCImplementationControl impControl = ObjCImplementationControl::None,
bool HasRelatedResultType = false);
static ObjCMethodDecl *CreateDeserialized(ASTContext &C, unsigned ID);
@@ -374,8 +371,7 @@ public:
// ArrayRef access to formal parameters. This should eventually
// replace the iterator interface above.
ArrayRef<ParmVarDecl*> parameters() const {
- return llvm::makeArrayRef(const_cast<ParmVarDecl**>(getParams()),
- NumParams);
+ return llvm::ArrayRef(const_cast<ParmVarDecl **>(getParams()), NumParams);
}
ParmVarDecl *getParamDecl(unsigned Idx) {
@@ -389,9 +385,8 @@ public:
/// Sets the method's parameters and selector source locations.
/// If the method is implicit (not coming from source) \p SelLocs is
/// ignored.
- void setMethodParams(ASTContext &C,
- ArrayRef<ParmVarDecl*> Params,
- ArrayRef<SourceLocation> SelLocs = llvm::None);
+ void setMethodParams(ASTContext &C, ArrayRef<ParmVarDecl *> Params,
+ ArrayRef<SourceLocation> SelLocs = std::nullopt);
// Iterator access to parameter types.
struct GetTypeFn {
@@ -487,6 +482,9 @@ public:
/// True if the method is tagged as objc_direct
bool isDirectMethod() const;
+ /// True if the method has a parameter that's destroyed in the callee.
+ bool hasParamDestroyedInCallee() const;
+
/// Returns the property associated with this method's selector.
///
/// Note that even if this particular method is not marked as a property
@@ -495,16 +493,17 @@ public:
const ObjCPropertyDecl *findPropertyDecl(bool CheckOverrides = true) const;
// Related to protocols declared in \@protocol
- void setDeclImplementation(ImplementationControl ic) {
- ObjCMethodDeclBits.DeclImplementation = ic;
+ void setDeclImplementation(ObjCImplementationControl ic) {
+ ObjCMethodDeclBits.DeclImplementation = llvm::to_underlying(ic);
}
- ImplementationControl getImplementationControl() const {
- return ImplementationControl(ObjCMethodDeclBits.DeclImplementation);
+ ObjCImplementationControl getImplementationControl() const {
+ return static_cast<ObjCImplementationControl>(
+ ObjCMethodDeclBits.DeclImplementation);
}
bool isOptional() const {
- return getImplementationControl() == Optional;
+ return getImplementationControl() == ObjCImplementationControl::Optional;
}
/// Returns true if this specific method declaration is marked with the
@@ -581,6 +580,7 @@ class ObjCTypeParamDecl : public TypedefNameDecl {
unsigned Index : 14;
/// The variance of the type parameter.
+ LLVM_PREFERRED_TYPE(ObjCTypeParamVariance)
unsigned Variance : 2;
/// The location of the variance, if any.
@@ -742,10 +742,13 @@ private:
QualType DeclType;
TypeSourceInfo *DeclTypeSourceInfo;
+ LLVM_PREFERRED_TYPE(ObjCPropertyAttribute::Kind)
unsigned PropertyAttributes : NumObjCPropertyAttrsBits;
+ LLVM_PREFERRED_TYPE(ObjCPropertyAttribute::Kind)
unsigned PropertyAttributesAsWritten : NumObjCPropertyAttrsBits;
// \@required/\@optional
+ LLVM_PREFERRED_TYPE(PropertyControl)
unsigned PropertyImplementation : 2;
// getter name of NULL if no getter
@@ -776,17 +779,13 @@ private:
LParenLoc(LParenLocation), DeclType(T), DeclTypeSourceInfo(TSI),
PropertyAttributes(ObjCPropertyAttribute::kind_noattr),
PropertyAttributesAsWritten(ObjCPropertyAttribute::kind_noattr),
- PropertyImplementation(propControl), GetterName(Selector()),
- SetterName(Selector()) {}
+ PropertyImplementation(propControl) {}
public:
- static ObjCPropertyDecl *Create(ASTContext &C, DeclContext *DC,
- SourceLocation L,
- IdentifierInfo *Id, SourceLocation AtLocation,
- SourceLocation LParenLocation,
- QualType T,
- TypeSourceInfo *TSI,
- PropertyControl propControl = None);
+ static ObjCPropertyDecl *
+ Create(ASTContext &C, DeclContext *DC, SourceLocation L, IdentifierInfo *Id,
+ SourceLocation AtLocation, SourceLocation LParenLocation, QualType T,
+ TypeSourceInfo *TSI, PropertyControl propControl = None);
static ObjCPropertyDecl *CreateDeserialized(ASTContext &C, unsigned ID);
@@ -1072,21 +1071,23 @@ public:
bool HasUserDeclaredSetterMethod(const ObjCPropertyDecl *P) const;
ObjCIvarDecl *getIvarDecl(IdentifierInfo *Id) const;
+ ObjCPropertyDecl *getProperty(const IdentifierInfo *Id,
+ bool IsInstance) const;
+
ObjCPropertyDecl *
FindPropertyDeclaration(const IdentifierInfo *PropertyId,
ObjCPropertyQueryKind QueryKind) const;
using PropertyMap =
- llvm::DenseMap<std::pair<IdentifierInfo *, unsigned/*isClassProperty*/>,
- ObjCPropertyDecl *>;
+ llvm::MapVector<std::pair<IdentifierInfo *, unsigned /*isClassProperty*/>,
+ ObjCPropertyDecl *>;
using ProtocolPropertySet = llvm::SmallDenseSet<const ObjCProtocolDecl *, 8>;
using PropertyDeclOrder = llvm::SmallVector<ObjCPropertyDecl *, 8>;
/// This routine collects list of properties to be implemented in the class.
/// This includes, class's and its conforming protocols' properties.
/// Note, the superclass's properties are not included in the list.
- virtual void collectPropertiesToImplement(PropertyMap &PM,
- PropertyDeclOrder &PO) const {}
+ virtual void collectPropertiesToImplement(PropertyMap &PM) const {}
SourceLocation getAtStartLoc() const { return ObjCContainerDeclBits.AtStart; }
@@ -1148,6 +1149,7 @@ public:
class ObjCInterfaceDecl : public ObjCContainerDecl
, public Redeclarable<ObjCInterfaceDecl> {
friend class ASTContext;
+ friend class ODRDiagsEmitter;
/// TypeForDecl - This indicates the Type object that represents this
/// TypeDecl. It is a cache maintained by ASTContext::getObjCInterfaceType
@@ -1180,14 +1182,17 @@ class ObjCInterfaceDecl : public ObjCContainerDecl
/// Indicates that the contents of this Objective-C class will be
/// completed by the external AST source when required.
+ LLVM_PREFERRED_TYPE(bool)
mutable unsigned ExternallyCompleted : 1;
/// Indicates that the ivar cache does not yet include ivars
/// declared in the implementation.
+ LLVM_PREFERRED_TYPE(bool)
mutable unsigned IvarListMissingImplementation : 1;
/// Indicates that this interface decl contains at least one initializer
/// marked with the 'objc_designated_initializer' attribute.
+ LLVM_PREFERRED_TYPE(bool)
unsigned HasDesignatedInitializers : 1;
enum InheritedDesignatedInitializersState {
@@ -1203,8 +1208,16 @@ class ObjCInterfaceDecl : public ObjCContainerDecl
};
/// One of the \c InheritedDesignatedInitializersState enumeratos.
+ LLVM_PREFERRED_TYPE(InheritedDesignatedInitializersState)
mutable unsigned InheritedDesignatedInitializers : 2;
+ /// Tracks whether a ODR hash has been computed for this interface.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned HasODRHash : 1;
+
+ /// A hash of parts of the class to help in ODR checking.
+ unsigned ODRHash = 0;
+
/// The location of the last location in this declaration, before
/// the properties/methods. For example, this will be the '>', '}', or
/// identifier,
@@ -1213,7 +1226,7 @@ class ObjCInterfaceDecl : public ObjCContainerDecl
DefinitionData()
: ExternallyCompleted(false), IvarListMissingImplementation(true),
HasDesignatedInitializers(false),
- InheritedDesignatedInitializers(IDI_Unknown) {}
+ InheritedDesignatedInitializers(IDI_Unknown), HasODRHash(false) {}
};
/// The type parameters associated with this class, if any.
@@ -1537,6 +1550,13 @@ public:
/// a forward declaration (\@class) to a definition (\@interface).
void startDefinition();
+ /// Starts the definition without sharing it with other redeclarations.
+ /// Such definition shouldn't be used for anything but only to compare if
+ /// a duplicate is compatible with previous definition or if it is
+ /// a distinct duplicate.
+ void startDuplicateDefinitionForComparison();
+ void mergeDuplicateDefinitionWithCommon(const ObjCInterfaceDecl *Definition);
+
/// Retrieve the superclass type.
const ObjCObjectType *getSuperClassType() const {
if (TypeSourceInfo *TInfo = getSuperClassTInfo())
@@ -1778,8 +1798,7 @@ public:
*FindPropertyVisibleInPrimaryClass(IdentifierInfo *PropertyId,
ObjCPropertyQueryKind QueryKind) const;
- void collectPropertiesToImplement(PropertyMap &PM,
- PropertyDeclOrder &PO) const override;
+ void collectPropertiesToImplement(PropertyMap &PM) const override;
/// isSuperClassOf - Return true if this class is the specified class or is a
/// super class of the specified interface class.
@@ -1895,10 +1914,17 @@ public:
const Type *getTypeForDecl() const { return TypeForDecl; }
void setTypeForDecl(const Type *TD) const { TypeForDecl = TD; }
+ /// Get precomputed ODRHash or add a new one.
+ unsigned getODRHash();
+
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == ObjCInterface; }
private:
+ /// True if a valid hash is stored in ODRHash.
+ bool hasODRHash() const;
+ void setHasODRHash(bool HasHash);
+
const ObjCInterfaceDecl *findInterfaceWithDesignatedInitializers() const;
bool inheritsDesignatedInitializers() const;
};
@@ -1949,12 +1975,22 @@ public:
/// in; this is either the interface where the ivar was declared, or the
/// interface the ivar is conceptually a part of in the case of synthesized
/// ivars.
- const ObjCInterfaceDecl *getContainingInterface() const;
+ ObjCInterfaceDecl *getContainingInterface();
+ const ObjCInterfaceDecl *getContainingInterface() const {
+ return const_cast<ObjCIvarDecl *>(this)->getContainingInterface();
+ }
ObjCIvarDecl *getNextIvar() { return NextIvar; }
const ObjCIvarDecl *getNextIvar() const { return NextIvar; }
void setNextIvar(ObjCIvarDecl *ivar) { NextIvar = ivar; }
+ ObjCIvarDecl *getCanonicalDecl() override {
+ return cast<ObjCIvarDecl>(FieldDecl::getCanonicalDecl());
+ }
+ const ObjCIvarDecl *getCanonicalDecl() const {
+ return const_cast<ObjCIvarDecl *>(this)->getCanonicalDecl();
+ }
+
void setAccessControl(AccessControl ac) { DeclAccess = ac; }
AccessControl getAccessControl() const { return AccessControl(DeclAccess); }
@@ -1980,7 +2016,9 @@ private:
ObjCIvarDecl *NextIvar = nullptr;
// NOTE: VC++ treats enums as signed, avoid using the AccessControl enum
+ LLVM_PREFERRED_TYPE(AccessControl)
unsigned DeclAccess : 3;
+ LLVM_PREFERRED_TYPE(bool)
unsigned Synthesized : 1;
};
@@ -2045,6 +2083,13 @@ class ObjCProtocolDecl : public ObjCContainerDecl,
/// Referenced protocols
ObjCProtocolList ReferencedProtocols;
+
+ /// Tracks whether a ODR hash has been computed for this protocol.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned HasODRHash : 1;
+
+ /// A hash of parts of the class to help in ODR checking.
+ unsigned ODRHash = 0;
};
/// Contains a pointer to the data associated with this class,
@@ -2081,10 +2126,15 @@ class ObjCProtocolDecl : public ObjCContainerDecl,
return getMostRecentDecl();
}
+ /// True if a valid hash is stored in ODRHash.
+ bool hasODRHash() const;
+ void setHasODRHash(bool HasHash);
+
public:
friend class ASTDeclReader;
friend class ASTDeclWriter;
friend class ASTReader;
+ friend class ODRDiagsEmitter;
static ObjCProtocolDecl *Create(ASTContext &C, DeclContext *DC,
IdentifierInfo *Id,
@@ -2209,6 +2259,13 @@ public:
/// Starts the definition of this Objective-C protocol.
void startDefinition();
+ /// Starts the definition without sharing it with other redeclarations.
+ /// Such definition shouldn't be used for anything but only to compare if
+ /// a duplicate is compatible with previous definition or if it is
+ /// a distinct duplicate.
+ void startDuplicateDefinitionForComparison();
+ void mergeDuplicateDefinitionWithCommon(const ObjCProtocolDecl *Definition);
+
/// Produce a name to be used for protocol's metadata. It comes either via
/// objc_runtime_name attribute or protocol name.
StringRef getObjCRuntimeNameAsString() const;
@@ -2234,13 +2291,15 @@ public:
ObjCProtocolDecl *getCanonicalDecl() override { return getFirstDecl(); }
const ObjCProtocolDecl *getCanonicalDecl() const { return getFirstDecl(); }
- void collectPropertiesToImplement(PropertyMap &PM,
- PropertyDeclOrder &PO) const override;
+ void collectPropertiesToImplement(PropertyMap &PM) const override;
void collectInheritedProtocolProperties(const ObjCPropertyDecl *Property,
ProtocolPropertySet &PS,
PropertyDeclOrder &PO) const;
+ /// Get precomputed ODRHash or add a new one.
+ unsigned getODRHash();
+
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == ObjCProtocol; }
};
@@ -2549,9 +2608,11 @@ class ObjCImplementationDecl : public ObjCImplDecl {
/// Do the ivars of this class require initialization other than
/// zero-initialization?
+ LLVM_PREFERRED_TYPE(bool)
bool HasNonZeroConstructors : 1;
/// Do the ivars of this class require non-trivial destruction?
+ LLVM_PREFERRED_TYPE(bool)
bool HasDestructors : 1;
ObjCImplementationDecl(DeclContext *DC,
@@ -2876,15 +2937,16 @@ ObjCInterfaceDecl::filtered_category_iterator<Filter>::operator++() {
}
inline bool ObjCInterfaceDecl::isVisibleCategory(ObjCCategoryDecl *Cat) {
- return Cat->isUnconditionallyVisible();
+ return !Cat->isInvalidDecl() && Cat->isUnconditionallyVisible();
}
inline bool ObjCInterfaceDecl::isVisibleExtension(ObjCCategoryDecl *Cat) {
- return Cat->IsClassExtension() && Cat->isUnconditionallyVisible();
+ return !Cat->isInvalidDecl() && Cat->IsClassExtension() &&
+ Cat->isUnconditionallyVisible();
}
inline bool ObjCInterfaceDecl::isKnownExtension(ObjCCategoryDecl *Cat) {
- return Cat->IsClassExtension();
+ return !Cat->isInvalidDecl() && Cat->IsClassExtension();
}
} // namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/AST/DeclObjCCommon.h b/contrib/llvm-project/clang/include/clang/AST/DeclObjCCommon.h
index 5f03bce6e9a8..42c97204a613 100644
--- a/contrib/llvm-project/clang/include/clang/AST/DeclObjCCommon.h
+++ b/contrib/llvm-project/clang/include/clang/AST/DeclObjCCommon.h
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_AST_DECLOBJC_COMMON_H
-#define LLVM_CLANG_AST_DECLOBJC_COMMON_H
+#ifndef LLVM_CLANG_AST_DECLOBJCCOMMON_H
+#define LLVM_CLANG_AST_DECLOBJCCOMMON_H
namespace clang {
@@ -52,4 +52,4 @@ enum {
} // namespace clang
-#endif // LLVM_CLANG_AST_DECLOBJC_COMMON_H
+#endif // LLVM_CLANG_AST_DECLOBJCCOMMON_H
diff --git a/contrib/llvm-project/clang/include/clang/AST/DeclOpenMP.h b/contrib/llvm-project/clang/include/clang/AST/DeclOpenMP.h
index 4aa5bde92e12..73725e6e8566 100644
--- a/contrib/llvm-project/clang/include/clang/AST/DeclOpenMP.h
+++ b/contrib/llvm-project/clang/include/clang/AST/DeclOpenMP.h
@@ -34,7 +34,7 @@ template <typename U> class OMPDeclarativeDirective : public U {
/// Get the clauses storage.
MutableArrayRef<OMPClause *> getClauses() {
if (!Data)
- return llvm::None;
+ return std::nullopt;
return Data->getClauses();
}
@@ -90,7 +90,7 @@ public:
ArrayRef<OMPClause *> clauses() const {
if (!Data)
- return llvm::None;
+ return std::nullopt;
return Data->getClauses();
}
};
@@ -118,12 +118,12 @@ class OMPThreadPrivateDecl final : public OMPDeclarativeDirective<Decl> {
ArrayRef<const Expr *> getVars() const {
auto **Storage = reinterpret_cast<Expr **>(Data->getChildren().data());
- return llvm::makeArrayRef(Storage, Data->getNumChildren());
+ return llvm::ArrayRef(Storage, Data->getNumChildren());
}
MutableArrayRef<Expr *> getVars() {
auto **Storage = reinterpret_cast<Expr **>(Data->getChildren().data());
- return llvm::makeMutableArrayRef(Storage, Data->getNumChildren());
+ return llvm::MutableArrayRef(Storage, Data->getNumChildren());
}
void setVars(ArrayRef<Expr *> VL);
@@ -158,6 +158,12 @@ public:
static bool classofKind(Kind K) { return K == OMPThreadPrivate; }
};
+enum class OMPDeclareReductionInitKind {
+ Call, // Initialized by function call.
+ Direct, // omp_priv(<expr>)
+ Copy // omp_priv = <expr>
+};
+
/// This represents '#pragma omp declare reduction ...' directive.
/// For example, in the following, declared reduction 'foo' for types 'int' and
/// 'float':
@@ -171,14 +177,7 @@ public:
class OMPDeclareReductionDecl final : public ValueDecl, public DeclContext {
// This class stores some data in DeclContext::OMPDeclareReductionDeclBits
// to save some space. Use the provided accessors to access it.
-public:
- enum InitKind {
- CallInit, // Initialized by function call.
- DirectInit, // omp_priv(<expr>)
- CopyInit // omp_priv = <expr>
- };
-private:
friend class ASTDeclReader;
/// Combiner for declare reduction construct.
Expr *Combiner = nullptr;
@@ -239,8 +238,9 @@ public:
Expr *getInitializer() { return Initializer; }
const Expr *getInitializer() const { return Initializer; }
/// Get initializer kind.
- InitKind getInitializerKind() const {
- return static_cast<InitKind>(OMPDeclareReductionDeclBits.InitializerKind);
+ OMPDeclareReductionInitKind getInitializerKind() const {
+ return static_cast<OMPDeclareReductionInitKind>(
+ OMPDeclareReductionDeclBits.InitializerKind);
}
/// Get Orig variable of the initializer.
Expr *getInitOrig() { return Orig; }
@@ -249,9 +249,9 @@ public:
Expr *getInitPriv() { return Priv; }
const Expr *getInitPriv() const { return Priv; }
/// Set initializer expression for the declare reduction construct.
- void setInitializer(Expr *E, InitKind IK) {
+ void setInitializer(Expr *E, OMPDeclareReductionInitKind IK) {
Initializer = E;
- OMPDeclareReductionDeclBits.InitializerKind = IK;
+ OMPDeclareReductionDeclBits.InitializerKind = llvm::to_underlying(IK);
}
/// Set initializer Orig and Priv vars.
void setInitializerData(Expr *OrigE, Expr *PrivE) {
@@ -481,12 +481,12 @@ class OMPAllocateDecl final : public OMPDeclarativeDirective<Decl> {
ArrayRef<const Expr *> getVars() const {
auto **Storage = reinterpret_cast<Expr **>(Data->getChildren().data());
- return llvm::makeArrayRef(Storage, Data->getNumChildren());
+ return llvm::ArrayRef(Storage, Data->getNumChildren());
}
MutableArrayRef<Expr *> getVars() {
auto **Storage = reinterpret_cast<Expr **>(Data->getChildren().data());
- return llvm::makeMutableArrayRef(Storage, Data->getNumChildren());
+ return llvm::MutableArrayRef(Storage, Data->getNumChildren());
}
void setVars(ArrayRef<Expr *> VL);
diff --git a/contrib/llvm-project/clang/include/clang/AST/DeclTemplate.h b/contrib/llvm-project/clang/include/clang/AST/DeclTemplate.h
index cbaa287f225a..832ad2de6b08 100755
--- a/contrib/llvm-project/clang/include/clang/AST/DeclTemplate.h
+++ b/contrib/llvm-project/clang/include/clang/AST/DeclTemplate.h
@@ -15,6 +15,7 @@
#define LLVM_CLANG_AST_DECLTEMPLATE_H
#include "clang/AST/ASTConcept.h"
+#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclBase.h"
#include "clang/AST/DeclCXX.h"
@@ -38,6 +39,7 @@
#include <cstddef>
#include <cstdint>
#include <iterator>
+#include <optional>
#include <utility>
namespace clang {
@@ -81,13 +83,16 @@ class TemplateParameterList final
/// Whether this template parameter list contains an unexpanded parameter
/// pack.
+ LLVM_PREFERRED_TYPE(bool)
unsigned ContainsUnexpandedParameterPack : 1;
/// Whether this template parameter list has a requires clause.
+ LLVM_PREFERRED_TYPE(bool)
unsigned HasRequiresClause : 1;
/// Whether any of the template parameters has constrained-parameter
/// constraint-expression.
+ LLVM_PREFERRED_TYPE(bool)
unsigned HasConstrainedParameters : 1;
protected:
@@ -115,6 +120,8 @@ public:
SourceLocation RAngleLoc,
Expr *RequiresClause);
+ void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &C) const;
+
/// Iterates through the template parameters in this list.
using iterator = NamedDecl **;
@@ -128,11 +135,9 @@ public:
unsigned size() const { return NumParams; }
- ArrayRef<NamedDecl*> asArray() {
- return llvm::makeArrayRef(begin(), end());
- }
+ ArrayRef<NamedDecl *> asArray() { return llvm::ArrayRef(begin(), end()); }
ArrayRef<const NamedDecl*> asArray() const {
- return llvm::makeArrayRef(begin(), size());
+ return llvm::ArrayRef(begin(), size());
}
NamedDecl* getParam(unsigned Idx) {
@@ -203,7 +208,8 @@ public:
void print(raw_ostream &Out, const ASTContext &Context,
const PrintingPolicy &Policy, bool OmitTemplateKW = false) const;
- static bool shouldIncludeTypeForArgument(const TemplateParameterList *TPL,
+ static bool shouldIncludeTypeForArgument(const PrintingPolicy &Policy,
+ const TemplateParameterList *TPL,
unsigned Idx);
};
@@ -272,8 +278,7 @@ public:
///
/// This operation assumes that the input argument list outlives it.
/// This takes the list as a pointer to avoid looking like a copy
- /// constructor, since this really really isn't safe to use that
- /// way.
+ /// constructor, since this really isn't safe to use that way.
explicit TemplateArgumentList(const TemplateArgumentList *Other)
: Arguments(Other->data()), NumArguments(Other->size()) {}
@@ -288,7 +293,7 @@ public:
/// Produce this as an array ref.
ArrayRef<TemplateArgument> asArray() const {
- return llvm::makeArrayRef(data(), size());
+ return llvm::ArrayRef(data(), size());
}
/// Retrieve the number of template arguments in this
@@ -372,11 +377,20 @@ public:
/// Set that the default argument was inherited from another parameter.
void setInherited(const ASTContext &C, ParmDecl *InheritedFrom) {
- assert(!isInherited() && "default argument already inherited");
InheritedFrom = getParmOwningDefaultArg(InheritedFrom);
if (!isSet())
ValueOrInherited = InheritedFrom;
- else
+ else if ([[maybe_unused]] auto *D =
+ ValueOrInherited.template dyn_cast<ParmDecl *>()) {
+ assert(C.isSameDefaultTemplateArgument(D, InheritedFrom));
+ ValueOrInherited =
+ new (allocateDefaultArgStorageChain(C)) Chain{InheritedFrom, get()};
+ } else if (auto *Inherited =
+ ValueOrInherited.template dyn_cast<Chain *>()) {
+ assert(C.isSameDefaultTemplateArgument(Inherited->PrevDeclWithDefaultArg,
+ InheritedFrom));
+ Inherited->PrevDeclWithDefaultArg = InheritedFrom;
+ } else
ValueOrInherited = new (allocateDefaultArgStorageChain(C))
Chain{InheritedFrom, ValueOrInherited.template get<ArgType>()};
}
@@ -430,6 +444,9 @@ public:
/// Get the underlying, templated declaration.
NamedDecl *getTemplatedDecl() const { return TemplatedDecl; }
+ // Should a specialization behave like an alias for another type.
+ bool isTypeAlias() const;
+
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
@@ -446,18 +463,17 @@ protected:
NamedDecl *TemplatedDecl;
TemplateParameterList *TemplateParams;
+public:
void setTemplateParameters(TemplateParameterList *TParams) {
TemplateParams = TParams;
}
-public:
- /// Initialize the underlying templated declaration and
- /// template parameters.
- void init(NamedDecl *templatedDecl, TemplateParameterList* templateParams) {
- assert(!TemplatedDecl && "TemplatedDecl already set!");
- assert(!TemplateParams && "TemplateParams already set!");
- TemplatedDecl = templatedDecl;
- TemplateParams = templateParams;
+ /// Initialize the underlying templated declaration.
+ void init(NamedDecl *NewTemplatedDecl) {
+ if (TemplatedDecl)
+ assert(TemplatedDecl == NewTemplatedDecl && "Inconsistent TemplatedDecl");
+ else
+ TemplatedDecl = NewTemplatedDecl;
}
};
@@ -497,7 +513,7 @@ private:
TemplateSpecializationKind TSK, const TemplateArgumentList *TemplateArgs,
const ASTTemplateArgumentListInfo *TemplateArgsAsWritten,
SourceLocation POI, MemberSpecializationInfo *MSInfo)
- : Function(FD, MSInfo ? 1 : 0), Template(Template, TSK - 1),
+ : Function(FD, MSInfo ? true : false), Template(Template, TSK - 1),
TemplateArguments(TemplateArgs),
TemplateArgumentsAsWritten(TemplateArgsAsWritten),
PointOfInstantiation(POI) {
@@ -570,7 +586,7 @@ public:
/// \code
/// template<typename> struct A {
/// template<typename> void f();
- /// template<> void f<int>(); // ClassScopeFunctionSpecializationDecl
+ /// template<> void f<int>();
/// };
/// \endcode
///
@@ -606,7 +622,7 @@ public:
static void
Profile(llvm::FoldingSetNodeID &ID, ArrayRef<TemplateArgument> TemplateArgs,
- ASTContext &Context) {
+ const ASTContext &Context) {
ID.AddInteger(TemplateArgs.size());
for (const TemplateArgument &TemplateArg : TemplateArgs)
TemplateArg.Profile(ID, Context);
@@ -669,78 +685,48 @@ public:
/// Provides information about a dependent function-template
/// specialization declaration.
///
-/// Since explicit function template specialization and instantiation
-/// declarations can only appear in namespace scope, and you can only
-/// specialize a member of a fully-specialized class, the only way to
-/// get one of these is in a friend declaration like the following:
+/// This is used for function templates explicit specializations declared
+/// within class templates:
+///
+/// \code
+/// template<typename> struct A {
+/// template<typename> void f();
+/// template<> void f<int>(); // DependentFunctionTemplateSpecializationInfo
+/// };
+/// \endcode
+///
+/// As well as dependent friend declarations naming function template
+/// specializations declared within class templates:
///
/// \code
/// template \<class T> void foo(T);
/// template \<class T> class A {
-/// friend void foo<>(T);
+/// friend void foo<>(T); // DependentFunctionTemplateSpecializationInfo
/// };
/// \endcode
class DependentFunctionTemplateSpecializationInfo final
: private llvm::TrailingObjects<DependentFunctionTemplateSpecializationInfo,
- TemplateArgumentLoc,
FunctionTemplateDecl *> {
- /// The number of potential template candidates.
- unsigned NumTemplates;
-
- /// The number of template arguments.
- unsigned NumArgs;
-
- /// The locations of the left and right angle brackets.
- SourceRange AngleLocs;
+ friend TrailingObjects;
- size_t numTrailingObjects(OverloadToken<TemplateArgumentLoc>) const {
- return NumArgs;
- }
- size_t numTrailingObjects(OverloadToken<FunctionTemplateDecl *>) const {
- return NumTemplates;
- }
+ /// The number of candidates for the primary template.
+ unsigned NumCandidates;
DependentFunctionTemplateSpecializationInfo(
- const UnresolvedSetImpl &Templates,
- const TemplateArgumentListInfo &TemplateArgs);
+ const UnresolvedSetImpl &Candidates,
+ const ASTTemplateArgumentListInfo *TemplateArgsWritten);
public:
- friend TrailingObjects;
+ /// The template arguments as written in the sources, if provided.
+ const ASTTemplateArgumentListInfo *TemplateArgumentsAsWritten;
static DependentFunctionTemplateSpecializationInfo *
- Create(ASTContext &Context, const UnresolvedSetImpl &Templates,
- const TemplateArgumentListInfo &TemplateArgs);
+ Create(ASTContext &Context, const UnresolvedSetImpl &Candidates,
+ const TemplateArgumentListInfo *TemplateArgs);
- /// Returns the number of function templates that this might
- /// be a specialization of.
- unsigned getNumTemplates() const { return NumTemplates; }
-
- /// Returns the i'th template candidate.
- FunctionTemplateDecl *getTemplate(unsigned I) const {
- assert(I < getNumTemplates() && "template index out of range");
- return getTrailingObjects<FunctionTemplateDecl *>()[I];
- }
-
- /// Returns the explicit template arguments that were given.
- const TemplateArgumentLoc *getTemplateArgs() const {
- return getTrailingObjects<TemplateArgumentLoc>();
- }
-
- /// Returns the number of explicit template arguments that were given.
- unsigned getNumTemplateArgs() const { return NumArgs; }
-
- /// Returns the nth template argument.
- const TemplateArgumentLoc &getTemplateArg(unsigned I) const {
- assert(I < getNumTemplateArgs() && "template arg index out of range");
- return getTemplateArgs()[I];
- }
-
- SourceLocation getLAngleLoc() const {
- return AngleLocs.getBegin();
- }
-
- SourceLocation getRAngleLoc() const {
- return AngleLocs.getEnd();
+ /// Returns the candidates for the primary function template.
+ ArrayRef<FunctionTemplateDecl *> getCandidates() const {
+ return {getTrailingObjects<FunctionTemplateDecl *>(), NumCandidates};
}
};
@@ -831,6 +817,15 @@ protected:
/// The first value in the array is the number of specializations/partial
/// specializations that follow.
uint32_t *LazySpecializations = nullptr;
+
+ /// The set of "injected" template arguments used within this
+ /// template.
+ ///
+ /// This pointer refers to the template arguments (there are as
+ /// many template arguments as template parameters) for the
+ /// template, and is allocated lazily, since most templates do not
+ /// require the use of this information.
+ TemplateArgument *InjectedArgs = nullptr;
};
/// Pointer to the common data shared by all declarations of this
@@ -938,6 +933,14 @@ public:
getCommonPtr()->InstantiatedFromMember.setPointer(TD);
}
+ /// Retrieve the "injected" template arguments that correspond to the
+ /// template parameters of this template.
+ ///
+ /// Although the C++ standard has no notion of the "injected" template
+ /// arguments for a template, the notion is convenient when
+ /// we need to perform substitutions inside the definition of a template.
+ ArrayRef<TemplateArgument> getInjectedTemplateArgs();
+
using redecl_range = redeclarable_base::redecl_range;
using redecl_iterator = redeclarable_base::redecl_iterator;
@@ -982,15 +985,6 @@ protected:
/// template, including explicit specializations and instantiations.
llvm::FoldingSetVector<FunctionTemplateSpecializationInfo> Specializations;
- /// The set of "injected" template arguments used within this
- /// function template.
- ///
- /// This pointer refers to the template arguments (there are as
- /// many template arguments as template parameaters) for the function
- /// template, and is allocated lazily, since most function templates do not
- /// require the use of this information.
- TemplateArgument *InjectedArgs = nullptr;
-
Common() = default;
};
@@ -1090,21 +1084,12 @@ public:
return makeSpecIterator(getSpecializations(), true);
}
- /// Retrieve the "injected" template arguments that correspond to the
- /// template parameters of this function template.
- ///
- /// Although the C++ standard has no notion of the "injected" template
- /// arguments for a function template, the notion is convenient when
- /// we need to perform substitutions inside the definition of a function
- /// template.
- ArrayRef<TemplateArgument> getInjectedTemplateArgs();
-
/// Return whether this function template is an abbreviated function template,
/// e.g. `void foo(auto x)` or `template<typename T> void foo(auto x)`
bool isAbbreviated() const {
// Since the invented template parameters generated from 'auto' parameters
// are either appended to the end of the explicit template parameter list or
- // form a new template paramter list, we can simply observe the last
+ // form a new template parameter list, we can simply observe the last
// parameter to determine if such a thing happened.
const TemplateParameterList *TPL = getTemplateParameters();
return TPL->getParam(TPL->size() - 1)->isImplicit();
@@ -1143,23 +1128,40 @@ public:
/// parameters and is not part of the Decl hierarchy. Just a facility.
class TemplateParmPosition {
protected:
- // FIXME: These probably don't need to be ints. int:5 for depth, int:8 for
- // position? Maybe?
- unsigned Depth;
- unsigned Position;
+ enum { DepthWidth = 20, PositionWidth = 12 };
+ unsigned Depth : DepthWidth;
+ unsigned Position : PositionWidth;
- TemplateParmPosition(unsigned D, unsigned P) : Depth(D), Position(P) {}
+ static constexpr unsigned MaxDepth = (1U << DepthWidth) - 1;
+ static constexpr unsigned MaxPosition = (1U << PositionWidth) - 1;
+
+ TemplateParmPosition(unsigned D, unsigned P) : Depth(D), Position(P) {
+ // The input may fill maximum values to show that it is invalid.
+ // Add one here to convert it to zero.
+ assert((D + 1) <= MaxDepth &&
+ "The depth of template parmeter position is more than 2^20!");
+ assert((P + 1) <= MaxPosition &&
+ "The position of template parmeter position is more than 2^12!");
+ }
public:
TemplateParmPosition() = delete;
/// Get the nesting depth of the template parameter.
unsigned getDepth() const { return Depth; }
- void setDepth(unsigned D) { Depth = D; }
+ void setDepth(unsigned D) {
+ assert((D + 1) <= MaxDepth &&
+ "The depth of template parmeter position is more than 2^20!");
+ Depth = D;
+ }
/// Get the position of the template parameter within its parameter list.
unsigned getPosition() const { return Position; }
- void setPosition(unsigned P) { Position = P; }
+ void setPosition(unsigned P) {
+ assert((P + 1) <= MaxPosition &&
+ "The position of template parmeter position is more than 2^12!");
+ Position = P;
+ }
/// Get the index of the template parameter within its parameter list.
unsigned getIndex() const { return Position; }
@@ -1189,10 +1191,10 @@ class TemplateTypeParmDecl final : public TypeDecl,
/// Whether the type constraint has been initialized. This can be false if the
/// constraint was not initialized yet or if there was an error forming the
- /// type constriant.
+ /// type constraint.
bool TypeConstraintInitialized : 1;
- /// Whether this non-type template parameter is an "expanded"
+ /// Whether this type template parameter is an "expanded"
/// parameter pack, meaning that its type is a pack expansion and we
/// already know the set of types that expansion expands to.
bool ExpandedParameterPack : 1;
@@ -1206,23 +1208,20 @@ class TemplateTypeParmDecl final : public TypeDecl,
DefArgStorage DefaultArgument;
TemplateTypeParmDecl(DeclContext *DC, SourceLocation KeyLoc,
- SourceLocation IdLoc, IdentifierInfo *Id,
- bool Typename, bool HasTypeConstraint,
- Optional<unsigned> NumExpanded)
+ SourceLocation IdLoc, IdentifierInfo *Id, bool Typename,
+ bool HasTypeConstraint,
+ std::optional<unsigned> NumExpanded)
: TypeDecl(TemplateTypeParm, DC, IdLoc, Id, KeyLoc), Typename(Typename),
- HasTypeConstraint(HasTypeConstraint), TypeConstraintInitialized(false),
- ExpandedParameterPack(NumExpanded),
- NumExpanded(NumExpanded ? *NumExpanded : 0) {}
+ HasTypeConstraint(HasTypeConstraint), TypeConstraintInitialized(false),
+ ExpandedParameterPack(NumExpanded),
+ NumExpanded(NumExpanded.value_or(0)) {}
public:
- static TemplateTypeParmDecl *Create(const ASTContext &C, DeclContext *DC,
- SourceLocation KeyLoc,
- SourceLocation NameLoc,
- unsigned D, unsigned P,
- IdentifierInfo *Id, bool Typename,
- bool ParameterPack,
- bool HasTypeConstraint = false,
- Optional<unsigned> NumExpanded = None);
+ static TemplateTypeParmDecl *
+ Create(const ASTContext &C, DeclContext *DC, SourceLocation KeyLoc,
+ SourceLocation NameLoc, unsigned D, unsigned P, IdentifierInfo *Id,
+ bool Typename, bool ParameterPack, bool HasTypeConstraint = false,
+ std::optional<unsigned> NumExpanded = std::nullopt);
static TemplateTypeParmDecl *CreateDeserialized(const ASTContext &C,
unsigned ID);
static TemplateTypeParmDecl *CreateDeserialized(const ASTContext &C,
@@ -1344,10 +1343,7 @@ public:
nullptr;
}
- void setTypeConstraint(NestedNameSpecifierLoc NNS,
- DeclarationNameInfo NameInfo, NamedDecl *FoundDecl,
- ConceptDecl *CD,
- const ASTTemplateArgumentListInfo *ArgsAsWritten,
+ void setTypeConstraint(ConceptReference *CR,
Expr *ImmediatelyDeclaredConstraint);
/// Determine whether this template parameter has a type-constraint.
@@ -1358,7 +1354,7 @@ public:
/// \brief Get the associated-constraints of this template parameter.
/// This will either be the immediately-introduced constraint or empty.
///
- /// Use this instead of getConstraintExpression for concepts APIs that
+ /// Use this instead of getTypeConstraint for concepts APIs that
/// accept an ArrayRef of constraint expressions.
void getAssociatedConstraints(llvm::SmallVectorImpl<const Expr *> &AC) const {
if (HasTypeConstraint)
@@ -1840,7 +1836,7 @@ class ClassTemplateSpecializationDecl
SourceLocation PointOfInstantiation;
/// The kind of specialization this declaration refers to.
- /// Really a value of type TemplateSpecializationKind.
+ LLVM_PREFERRED_TYPE(TemplateSpecializationKind)
unsigned SpecializationKind : 3;
protected:
@@ -2054,7 +2050,7 @@ public:
static void
Profile(llvm::FoldingSetNodeID &ID, ArrayRef<TemplateArgument> TemplateArgs,
- ASTContext &Context) {
+ const ASTContext &Context) {
ID.AddInteger(TemplateArgs.size());
for (const TemplateArgument &TemplateArg : TemplateArgs)
TemplateArg.Profile(ID, Context);
@@ -2230,7 +2226,7 @@ public:
static void
Profile(llvm::FoldingSetNodeID &ID, ArrayRef<TemplateArgument> TemplateArgs,
- TemplateParameterList *TPL, ASTContext &Context);
+ TemplateParameterList *TPL, const ASTContext &Context);
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
@@ -2280,9 +2276,15 @@ protected:
return static_cast<Common *>(RedeclarableTemplateDecl::getCommonPtr());
}
+ void setCommonPtr(Common *C) {
+ RedeclarableTemplateDecl::Common = C;
+ }
+
public:
+
friend class ASTDeclReader;
friend class ASTDeclWriter;
+ friend class TemplateDeclInstantiator;
/// Load any lazily-loaded specializations from the external source.
void LoadLazySpecializations() const;
@@ -2457,10 +2459,10 @@ private:
SourceLocation FriendLoc;
FriendTemplateDecl(DeclContext *DC, SourceLocation Loc,
- MutableArrayRef<TemplateParameterList *> Params,
+ TemplateParameterList **Params, unsigned NumParams,
FriendUnion Friend, SourceLocation FriendLoc)
- : Decl(Decl::FriendTemplate, DC, Loc), NumParams(Params.size()),
- Params(Params.data()), Friend(Friend), FriendLoc(FriendLoc) {}
+ : Decl(Decl::FriendTemplate, DC, Loc), NumParams(NumParams),
+ Params(Params), Friend(Friend), FriendLoc(FriendLoc) {}
FriendTemplateDecl(EmptyShell Empty) : Decl(Decl::FriendTemplate, Empty) {}
@@ -2580,70 +2582,6 @@ public:
static bool classofKind(Kind K) { return K == TypeAliasTemplate; }
};
-/// Declaration of a function specialization at template class scope.
-///
-/// For example:
-/// \code
-/// template <class T>
-/// class A {
-/// template <class U> void foo(U a) { }
-/// template<> void foo(int a) { }
-/// }
-/// \endcode
-///
-/// "template<> foo(int a)" will be saved in Specialization as a normal
-/// CXXMethodDecl. Then during an instantiation of class A, it will be
-/// transformed into an actual function specialization.
-///
-/// FIXME: This is redundant; we could store the same information directly on
-/// the CXXMethodDecl as a DependentFunctionTemplateSpecializationInfo.
-class ClassScopeFunctionSpecializationDecl : public Decl {
- CXXMethodDecl *Specialization;
- const ASTTemplateArgumentListInfo *TemplateArgs;
-
- ClassScopeFunctionSpecializationDecl(
- DeclContext *DC, SourceLocation Loc, CXXMethodDecl *FD,
- const ASTTemplateArgumentListInfo *TemplArgs)
- : Decl(Decl::ClassScopeFunctionSpecialization, DC, Loc),
- Specialization(FD), TemplateArgs(TemplArgs) {}
-
- ClassScopeFunctionSpecializationDecl(EmptyShell Empty)
- : Decl(Decl::ClassScopeFunctionSpecialization, Empty) {}
-
- virtual void anchor();
-
-public:
- friend class ASTDeclReader;
- friend class ASTDeclWriter;
-
- CXXMethodDecl *getSpecialization() const { return Specialization; }
- bool hasExplicitTemplateArgs() const { return TemplateArgs; }
- const ASTTemplateArgumentListInfo *getTemplateArgsAsWritten() const {
- return TemplateArgs;
- }
-
- static ClassScopeFunctionSpecializationDecl *
- Create(ASTContext &C, DeclContext *DC, SourceLocation Loc, CXXMethodDecl *FD,
- bool HasExplicitTemplateArgs,
- const TemplateArgumentListInfo &TemplateArgs) {
- return new (C, DC) ClassScopeFunctionSpecializationDecl(
- DC, Loc, FD,
- HasExplicitTemplateArgs
- ? ASTTemplateArgumentListInfo::Create(C, TemplateArgs)
- : nullptr);
- }
-
- static ClassScopeFunctionSpecializationDecl *
- CreateDeserialized(ASTContext &Context, unsigned ID);
-
- // Implement isa/cast/dyncast/etc.
- static bool classof(const Decl *D) { return classofKind(D->getKind()); }
-
- static bool classofKind(Kind K) {
- return K == Decl::ClassScopeFunctionSpecialization;
- }
-};
-
/// Represents a variable template specialization, which refers to
/// a variable template with a given set of template arguments.
///
@@ -2697,19 +2635,20 @@ class VarTemplateSpecializationDecl : public VarDecl,
/// The template arguments used to describe this specialization.
const TemplateArgumentList *TemplateArgs;
- TemplateArgumentListInfo TemplateArgsInfo;
+ const ASTTemplateArgumentListInfo *TemplateArgsInfo = nullptr;
/// The point where this template was instantiated (if any).
SourceLocation PointOfInstantiation;
/// The kind of specialization this declaration refers to.
- /// Really a value of type TemplateSpecializationKind.
+ LLVM_PREFERRED_TYPE(TemplateSpecializationKind)
unsigned SpecializationKind : 3;
/// Whether this declaration is a complete definition of the
/// variable template specialization. We can't otherwise tell apart
/// an instantiated declaration from an instantiated definition with
/// no initializer.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsCompleteDefinition : 1;
protected:
@@ -2752,8 +2691,9 @@ public:
// TODO: Always set this when creating the new specialization?
void setTemplateArgsInfo(const TemplateArgumentListInfo &ArgsInfo);
+ void setTemplateArgsInfo(const ASTTemplateArgumentListInfo *ArgsInfo);
- const TemplateArgumentListInfo &getTemplateArgsInfo() const {
+ const ASTTemplateArgumentListInfo *getTemplateArgsInfo() const {
return TemplateArgsInfo;
}
@@ -2898,13 +2838,15 @@ public:
return ExplicitInfo ? ExplicitInfo->TemplateKeywordLoc : SourceLocation();
}
+ SourceRange getSourceRange() const override LLVM_READONLY;
+
void Profile(llvm::FoldingSetNodeID &ID) const {
Profile(ID, TemplateArgs->asArray(), getASTContext());
}
static void Profile(llvm::FoldingSetNodeID &ID,
ArrayRef<TemplateArgument> TemplateArgs,
- ASTContext &Context) {
+ const ASTContext &Context) {
ID.AddInteger(TemplateArgs.size());
for (const TemplateArgument &TemplateArg : TemplateArgs)
TemplateArg.Profile(ID, Context);
@@ -3055,6 +2997,8 @@ public:
return First->InstantiatedFromMember.setInt(true);
}
+ SourceRange getSourceRange() const override LLVM_READONLY;
+
void Profile(llvm::FoldingSetNodeID &ID) const {
Profile(ID, getTemplateArgs().asArray(), getTemplateParameters(),
getASTContext());
@@ -3062,7 +3006,7 @@ public:
static void
Profile(llvm::FoldingSetNodeID &ID, ArrayRef<TemplateArgument> TemplateArgs,
- TemplateParameterList *TPL, ASTContext &Context);
+ TemplateParameterList *TPL, const ASTContext &Context);
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
@@ -3227,7 +3171,7 @@ public:
static bool classofKind(Kind K) { return K == VarTemplate; }
};
-/// Declaration of a C++2a concept.
+/// Declaration of a C++20 concept.
class ConceptDecl : public TemplateDecl, public Mergeable<ConceptDecl> {
protected:
Expr *ConstraintExpr;
@@ -3256,8 +3200,12 @@ public:
return isa<TemplateTypeParmDecl>(getTemplateParameters()->getParam(0));
}
- ConceptDecl *getCanonicalDecl() override { return getFirstDecl(); }
- const ConceptDecl *getCanonicalDecl() const { return getFirstDecl(); }
+ ConceptDecl *getCanonicalDecl() override {
+ return cast<ConceptDecl>(getPrimaryMergedDecl(this));
+ }
+ const ConceptDecl *getCanonicalDecl() const {
+ return const_cast<ConceptDecl *>(this)->getCanonicalDecl();
+ }
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
@@ -3268,6 +3216,40 @@ public:
friend class ASTDeclWriter;
};
+// An implementation detail of ConceptSpecialicationExpr that holds the template
+// arguments, so we can later use this to reconstitute the template arguments
+// during constraint checking.
+class ImplicitConceptSpecializationDecl final
+ : public Decl,
+ private llvm::TrailingObjects<ImplicitConceptSpecializationDecl,
+ TemplateArgument> {
+ unsigned NumTemplateArgs;
+
+ ImplicitConceptSpecializationDecl(DeclContext *DC, SourceLocation SL,
+ ArrayRef<TemplateArgument> ConvertedArgs);
+ ImplicitConceptSpecializationDecl(EmptyShell Empty, unsigned NumTemplateArgs);
+
+public:
+ static ImplicitConceptSpecializationDecl *
+ Create(const ASTContext &C, DeclContext *DC, SourceLocation SL,
+ ArrayRef<TemplateArgument> ConvertedArgs);
+ static ImplicitConceptSpecializationDecl *
+ CreateDeserialized(const ASTContext &C, unsigned ID,
+ unsigned NumTemplateArgs);
+
+ ArrayRef<TemplateArgument> getTemplateArguments() const {
+ return ArrayRef<TemplateArgument>(getTrailingObjects<TemplateArgument>(),
+ NumTemplateArgs);
+ }
+ void setTemplateArguments(ArrayRef<TemplateArgument> Converted);
+
+ static bool classofKind(Kind K) { return K == ImplicitConceptSpecialization; }
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+
+ friend TrailingObjects;
+ friend class ASTDeclReader;
+};
+
/// A template parameter object.
///
/// Template parameter objects represent values of class type used as template
@@ -3305,14 +3287,17 @@ private:
public:
/// Print this template parameter object in a human-readable format.
- void printName(llvm::raw_ostream &OS) const override;
+ void printName(llvm::raw_ostream &OS,
+ const PrintingPolicy &Policy) const override;
/// Print this object as an equivalent expression.
void printAsExpr(llvm::raw_ostream &OS) const;
+ void printAsExpr(llvm::raw_ostream &OS, const PrintingPolicy &Policy) const;
/// Print this object as an initializer suitable for a variable of the
/// object's type.
void printAsInit(llvm::raw_ostream &OS) const;
+ void printAsInit(llvm::raw_ostream &OS, const PrintingPolicy &Policy) const;
const APValue &getValue() const { return Value; }
@@ -3365,7 +3350,7 @@ inline TemplateDecl *getAsTypeTemplateDecl(Decl *D) {
///
/// In \c A<int,int>::B, \c NTs and \c TTs have expanded pack size 2, and \c Us
/// is not a pack expansion, so returns an empty Optional.
-inline Optional<unsigned> getExpandedPackSize(const NamedDecl *Param) {
+inline std::optional<unsigned> getExpandedPackSize(const NamedDecl *Param) {
if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Param)) {
if (TTP->isExpandedParameterPack())
return TTP->getNumExpansionParameters();
@@ -3381,9 +3366,13 @@ inline Optional<unsigned> getExpandedPackSize(const NamedDecl *Param) {
return TTP->getNumExpansionTemplateParameters();
}
- return None;
+ return std::nullopt;
}
+/// Internal helper used by Subst* nodes to retrieve the parameter list
+/// for their AssociatedDecl.
+TemplateParameterList *getReplacedTemplateParameterList(Decl *D);
+
} // namespace clang
#endif // LLVM_CLANG_AST_DECLTEMPLATE_H
diff --git a/contrib/llvm-project/clang/include/clang/AST/DeclarationName.h b/contrib/llvm-project/clang/include/clang/AST/DeclarationName.h
index 38da6fc727fb..c9b01dc53964 100644
--- a/contrib/llvm-project/clang/include/clang/AST/DeclarationName.h
+++ b/contrib/llvm-project/clang/include/clang/AST/DeclarationName.h
@@ -21,6 +21,7 @@
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/type_traits.h"
#include <cassert>
@@ -34,11 +35,9 @@ class ASTContext;
template <typename> class CanQual;
class DeclarationName;
class DeclarationNameTable;
-class MultiKeywordSelector;
struct PrintingPolicy;
class TemplateDecl;
class TypeSourceInfo;
-class UsingDirectiveDecl;
using CanQualType = CanQual<Type>;
@@ -119,14 +118,14 @@ class alignas(IdentifierInfoAlignment) CXXLiteralOperatorIdName
friend class clang::DeclarationName;
friend class clang::DeclarationNameTable;
- IdentifierInfo *ID;
+ const IdentifierInfo *ID;
/// Extra information associated with this operator name that
/// can be used by the front end. All bits are really needed
/// so it is not possible to stash something in the low order bits.
void *FETokenInfo;
- CXXLiteralOperatorIdName(IdentifierInfo *II)
+ CXXLiteralOperatorIdName(const IdentifierInfo *II)
: DeclarationNameExtra(CXXLiteralOperatorName), ID(II),
FETokenInfo(nullptr) {}
@@ -194,6 +193,13 @@ class DeclarationName {
"The various classes that DeclarationName::Ptr can point to"
" must be at least aligned to 8 bytes!");
+ static_assert(
+ std::is_same<std::underlying_type_t<StoredNameKind>,
+ std::underlying_type_t<
+ detail::DeclarationNameExtra::ExtraKind>>::value,
+ "The various enums used to compute values for NameKind should "
+ "all have the same underlying type");
+
public:
/// The kind of the name stored in this DeclarationName.
/// The first 7 enumeration values are stored inline and correspond
@@ -207,15 +213,18 @@ public:
CXXDestructorName = StoredCXXDestructorName,
CXXConversionFunctionName = StoredCXXConversionFunctionName,
CXXOperatorName = StoredCXXOperatorName,
- CXXDeductionGuideName = UncommonNameKindOffset +
- detail::DeclarationNameExtra::CXXDeductionGuideName,
- CXXLiteralOperatorName =
- UncommonNameKindOffset +
- detail::DeclarationNameExtra::CXXLiteralOperatorName,
- CXXUsingDirective = UncommonNameKindOffset +
- detail::DeclarationNameExtra::CXXUsingDirective,
- ObjCMultiArgSelector = UncommonNameKindOffset +
- detail::DeclarationNameExtra::ObjCMultiArgSelector
+ CXXDeductionGuideName = llvm::addEnumValues(
+ UncommonNameKindOffset,
+ detail::DeclarationNameExtra::CXXDeductionGuideName),
+ CXXLiteralOperatorName = llvm::addEnumValues(
+ UncommonNameKindOffset,
+ detail::DeclarationNameExtra::CXXLiteralOperatorName),
+ CXXUsingDirective =
+ llvm::addEnumValues(UncommonNameKindOffset,
+ detail::DeclarationNameExtra::CXXUsingDirective),
+ ObjCMultiArgSelector =
+ llvm::addEnumValues(UncommonNameKindOffset,
+ detail::DeclarationNameExtra::ObjCMultiArgSelector),
};
private:
@@ -353,7 +362,8 @@ public:
}
/// Construct a declaration name from an Objective-C selector.
- DeclarationName(Selector Sel) : Ptr(Sel.InfoPtr) {}
+ DeclarationName(Selector Sel)
+ : Ptr(reinterpret_cast<uintptr_t>(Sel.InfoPtr.getOpaqueValue())) {}
/// Returns the name for all C++ using-directives.
static DeclarationName getUsingDirectiveName() {
@@ -469,7 +479,7 @@ public:
/// If this name is the name of a literal operator,
/// retrieve the identifier associated with it.
- IdentifierInfo *getCXXLiteralIdentifier() const {
+ const IdentifierInfo *getCXXLiteralIdentifier() const {
if (getNameKind() == CXXLiteralOperatorName) {
assert(getPtr() && "getCXXLiteralIdentifier on a null DeclarationName!");
return castAsCXXLiteralOperatorIdName()->ID;
@@ -641,7 +651,7 @@ public:
}
/// Get the name of the literal operator function with II as the identifier.
- DeclarationName getCXXLiteralOperatorName(IdentifierInfo *II);
+ DeclarationName getCXXLiteralOperatorName(const IdentifierInfo *II);
};
/// DeclarationNameLoc - Additional source/type location info
@@ -754,7 +764,7 @@ public:
};
/// DeclarationNameInfo - A collector data type for bundling together
-/// a DeclarationName and the correspnding source/type location info.
+/// a DeclarationName and the corresponding source/type location info.
struct DeclarationNameInfo {
private:
/// Name - The declaration name, also encoding name kind.
@@ -932,7 +942,7 @@ class AssumedTemplateStorage : public UncommonTemplateNameStorage {
friend class ASTContext;
AssumedTemplateStorage(DeclarationName Name)
- : UncommonTemplateNameStorage(Assumed, 0), Name(Name) {}
+ : UncommonTemplateNameStorage(Assumed, 0, 0), Name(Name) {}
DeclarationName Name;
public:
diff --git a/contrib/llvm-project/clang/include/clang/AST/DependenceFlags.h b/contrib/llvm-project/clang/include/clang/AST/DependenceFlags.h
index 62efdb4ce6e4..3b3c1afb096a 100644
--- a/contrib/llvm-project/clang/include/clang/AST/DependenceFlags.h
+++ b/contrib/llvm-project/clang/include/clang/AST/DependenceFlags.h
@@ -130,6 +130,14 @@ public:
// Dependence that is propagated syntactically, regardless of semantics.
Syntactic = UnexpandedPack | Instantiation | Error,
+ // Dependence that is propagated semantically, even in cases where the
+ // type doesn't syntactically appear. This currently excludes only
+ // UnexpandedPack. Even though Instantiation dependence is also notionally
+ // syntactic, we also want to propagate it semantically because anything
+ // that semantically depends on an instantiation-dependent entity should
+ // always be instantiated when that instantiation-dependent entity is.
+ Semantic =
+ Instantiation | Type | Value | Dependent | Error | VariablyModified,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/VariablyModified)
};
@@ -175,6 +183,14 @@ public:
return Result;
}
+ /// Extract the semantic portions of this type's dependence that apply even
+ /// to uses where the type does not appear syntactically.
+ Dependence semantic() {
+ Dependence Result = *this;
+ Result.V &= Semantic;
+ return Result;
+ }
+
TypeDependence type() const {
return translate(V, UnexpandedPack, TypeDependence::UnexpandedPack) |
translate(V, Instantiation, TypeDependence::Instantiation) |
@@ -231,7 +247,10 @@ private:
inline ExprDependence toExprDependence(TemplateArgumentDependence TA) {
return Dependence(TA).expr();
}
-inline ExprDependence toExprDependence(TypeDependence D) {
+inline ExprDependence toExprDependenceForImpliedType(TypeDependence D) {
+ return Dependence(D).semantic().expr();
+}
+inline ExprDependence toExprDependenceAsWritten(TypeDependence D) {
return Dependence(D).expr();
}
// Note: it's often necessary to strip `Dependent` from qualifiers.
@@ -269,6 +288,9 @@ inline TypeDependence toTypeDependence(TemplateArgumentDependence D) {
inline TypeDependence toSyntacticDependence(TypeDependence D) {
return Dependence(D).syntactic().type();
}
+inline TypeDependence toSemanticDependence(TypeDependence D) {
+ return Dependence(D).semantic().type();
+}
inline NestedNameSpecifierDependence
toNestedNameSpecifierDependendence(TypeDependence D) {
diff --git a/contrib/llvm-project/clang/include/clang/AST/DependentDiagnostic.h b/contrib/llvm-project/clang/include/clang/AST/DependentDiagnostic.h
index 18276d54d540..cadf97062004 100644
--- a/contrib/llvm-project/clang/include/clang/AST/DependentDiagnostic.h
+++ b/contrib/llvm-project/clang/include/clang/AST/DependentDiagnostic.h
@@ -113,7 +113,9 @@ private:
struct {
SourceLocation Loc;
+ LLVM_PREFERRED_TYPE(AccessSpecifier)
unsigned Access : 2;
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsMember : 1;
NamedDecl *TargetDecl;
CXXRecordDecl *NamingClass;
diff --git a/contrib/llvm-project/clang/include/clang/AST/Expr.h b/contrib/llvm-project/clang/include/clang/AST/Expr.h
index 8efa8fdbe2bb..9820bd11da86 100644
--- a/contrib/llvm-project/clang/include/clang/AST/Expr.h
+++ b/contrib/llvm-project/clang/include/clang/AST/Expr.h
@@ -13,6 +13,7 @@
#ifndef LLVM_CLANG_AST_EXPR_H
#define LLVM_CLANG_AST_EXPR_H
+#include "clang/AST/APNumericStorage.h"
#include "clang/AST/APValue.h"
#include "clang/AST/ASTVector.h"
#include "clang/AST/ComputeDependence.h"
@@ -36,6 +37,7 @@
#include "llvm/Support/AtomicOrdering.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/TrailingObjects.h"
+#include <optional>
namespace clang {
class APValue;
@@ -134,8 +136,8 @@ protected:
void setDependence(ExprDependence Deps) {
ExprBits.Dependent = static_cast<unsigned>(Deps);
}
- friend class ASTImporter; // Sets dependence dircetly.
- friend class ASTStmtReader; // Sets dependence dircetly.
+ friend class ASTImporter; // Sets dependence directly.
+ friend class ASTStmtReader; // Sets dependence directly.
public:
QualType getType() const { return TR; }
@@ -170,7 +172,7 @@ public:
}
/// Determines whether the type of this expression depends on
- /// - a template paramter (C++ [temp.dep.expr], which means that its type
+ /// - a template parameter (C++ [temp.dep.expr], which means that its type
/// could change from one template instantiation to the next)
/// - or an error
///
@@ -523,15 +525,25 @@ public:
/// semantically correspond to a bool.
bool isKnownToHaveBooleanValue(bool Semantic = true) const;
+ /// Check whether this array fits the idiom of a flexible array member,
+ /// depending on the value of -fstrict-flex-array.
+ /// When IgnoreTemplateOrMacroSubstitution is set, it doesn't consider sizes
+ /// resulting from the substitution of a macro or a template as special sizes.
+ bool isFlexibleArrayMemberLike(
+ ASTContext &Context,
+ LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel,
+ bool IgnoreTemplateOrMacroSubstitution = false) const;
+
/// isIntegerConstantExpr - Return the value if this expression is a valid
- /// integer constant expression. If not a valid i-c-e, return None and fill
- /// in Loc (if specified) with the location of the invalid expression.
+ /// integer constant expression. If not a valid i-c-e, return std::nullopt
+ /// and fill in Loc (if specified) with the location of the invalid
+ /// expression.
///
/// Note: This does not perform the implicit conversions required by C++11
/// [expr.const]p5.
- Optional<llvm::APSInt> getIntegerConstantExpr(const ASTContext &Ctx,
- SourceLocation *Loc = nullptr,
- bool isEvaluated = true) const;
+ std::optional<llvm::APSInt>
+ getIntegerConstantExpr(const ASTContext &Ctx,
+ SourceLocation *Loc = nullptr) const;
bool isIntegerConstantExpr(const ASTContext &Ctx,
SourceLocation *Loc = nullptr) const;
@@ -555,7 +567,7 @@ public:
SmallVectorImpl<
PartialDiagnosticAt> &Diags);
- /// isPotentialConstantExprUnevaluted - Return true if this expression might
+ /// isPotentialConstantExprUnevaluated - Return true if this expression might
/// be usable in a constant expression in C++11 in an unevaluated context, if
/// it were in function FD marked constexpr. Return false if the function can
/// never produce a constant expression, along with diagnostics describing
@@ -572,16 +584,22 @@ public:
bool isConstantInitializer(ASTContext &Ctx, bool ForRef,
const Expr **Culprit = nullptr) const;
+ /// If this expression is an unambiguous reference to a single declaration,
+ /// in the style of __builtin_function_start, return that declaration. Note
+ /// that this may return a non-static member function or field in C++ if this
+ /// expression is a member pointer constant.
+ const ValueDecl *getAsBuiltinConstantDeclRef(const ASTContext &Context) const;
+
/// EvalStatus is a struct with detailed info about an evaluation in progress.
struct EvalStatus {
/// Whether the evaluated expression has side effects.
/// For example, (f() && 0) can be folded, but it still has side effects.
- bool HasSideEffects;
+ bool HasSideEffects = false;
/// Whether the evaluation hit undefined behavior.
/// For example, 1.0 / 0.0 can be folded to Inf, but has undefined behavior.
/// Likewise, INT_MAX + 1 can be folded to INT_MIN, but has UB.
- bool HasUndefinedBehavior;
+ bool HasUndefinedBehavior = false;
/// Diag - If this is non-null, it will be filled in with a stack of notes
/// indicating why evaluation failed (or why it failed to produce a constant
@@ -590,10 +608,16 @@ public:
/// foldable. If the expression is foldable, but not a constant expression,
/// the notes will describes why it isn't a constant expression. If the
/// expression *is* a constant expression, no notes will be produced.
- SmallVectorImpl<PartialDiagnosticAt> *Diag;
+ ///
+ /// FIXME: this causes significant performance concerns and should be
+ /// refactored at some point. Not all evaluations of the constant
+ /// expression interpreter will display the given diagnostics, this means
+ /// those kinds of uses are paying the expense of generating a diagnostic
+ /// (which may include expensive operations like converting APValue objects
+ /// to a string representation).
+ SmallVectorImpl<PartialDiagnosticAt> *Diag = nullptr;
- EvalStatus()
- : HasSideEffects(false), HasUndefinedBehavior(false), Diag(nullptr) {}
+ EvalStatus() = default;
// hasSideEffects - Return true if the evaluated expression has
// side effects.
@@ -648,8 +672,8 @@ public:
SideEffectsKind AllowSideEffects = SE_NoSideEffects,
bool InConstantContext = false) const;
- /// EvaluateAsFloat - Return true if this is a constant which we can fold and
- /// convert to a fixed point value.
+ /// EvaluateAsFixedPoint - Return true if this is a constant which we can fold
+ /// and convert to a fixed point value.
bool EvaluateAsFixedPoint(EvalResult &Result, const ASTContext &Ctx,
SideEffectsKind AllowSideEffects = SE_NoSideEffects,
bool InConstantContext = false) const;
@@ -697,7 +721,8 @@ public:
/// notes will be produced if the expression is not a constant expression.
bool EvaluateAsInitializer(APValue &Result, const ASTContext &Ctx,
const VarDecl *VD,
- SmallVectorImpl<PartialDiagnosticAt> &Notes) const;
+ SmallVectorImpl<PartialDiagnosticAt> &Notes,
+ bool IsConstantInitializer) const;
/// EvaluateWithSubstitution - Evaluate an expression as if from the context
/// of a call to the given function with the given arguments, inside an
@@ -739,6 +764,17 @@ public:
bool tryEvaluateObjectSize(uint64_t &Result, ASTContext &Ctx,
unsigned Type) const;
+ /// If the current Expr is a pointer, this will try to statically
+ /// determine the strlen of the string pointed to.
+ /// Returns true if all of the above holds and we were able to figure out the
+ /// strlen, false otherwise.
+ bool tryEvaluateStrLen(uint64_t &Result, ASTContext &Ctx) const;
+
+ bool EvaluateCharRangeAsString(std::string &Result,
+ const Expr *SizeExpression,
+ const Expr *PtrExpression, ASTContext &Ctx,
+ EvalResult &Status) const;
+
/// Enumeration used to describe the kind of Null pointer constant
/// returned from \c isNullPointerConstant().
enum NullPointerConstantKind {
@@ -796,7 +832,7 @@ public:
/// member expression.
static QualType findBoundMemberType(const Expr *expr);
- /// Skip past any invisble AST nodes which might surround this
+ /// Skip past any invisible AST nodes which might surround this
/// statement, such as ExprWithCleanups or ImplicitCastExpr nodes,
/// but also injected CXXMemberExpr and CXXConstructExpr which represent
/// implicit conversions.
@@ -900,7 +936,7 @@ public:
return const_cast<Expr *>(this)->IgnoreParenLValueCasts();
}
- /// Skip past any parenthese and casts which do not change the value
+ /// Skip past any parentheses and casts which do not change the value
/// (including ptr->int casts of the same size) until reaching a fixed point.
/// Skips:
/// * What IgnoreParens() skips
@@ -1014,6 +1050,9 @@ public:
}
};
+/// Describes the kind of result that can be tail-allocated.
+enum class ConstantResultStorageKind { None, Int64, APValue };
+
/// ConstantExpr - An expression that occurs in a constant context and
/// optionally the result of evaluating the expression.
class ConstantExpr final
@@ -1026,20 +1065,15 @@ class ConstantExpr final
friend class ASTStmtReader;
friend class ASTStmtWriter;
-public:
- /// Describes the kind of result that can be tail-allocated.
- enum ResultStorageKind { RSK_None, RSK_Int64, RSK_APValue };
-
-private:
size_t numTrailingObjects(OverloadToken<APValue>) const {
- return ConstantExprBits.ResultKind == ConstantExpr::RSK_APValue;
+ return getResultStorageKind() == ConstantResultStorageKind::APValue;
}
size_t numTrailingObjects(OverloadToken<uint64_t>) const {
- return ConstantExprBits.ResultKind == ConstantExpr::RSK_Int64;
+ return getResultStorageKind() == ConstantResultStorageKind::Int64;
}
uint64_t &Int64Result() {
- assert(ConstantExprBits.ResultKind == ConstantExpr::RSK_Int64 &&
+ assert(getResultStorageKind() == ConstantResultStorageKind::Int64 &&
"invalid accessor");
return *getTrailingObjects<uint64_t>();
}
@@ -1047,7 +1081,7 @@ private:
return const_cast<ConstantExpr *>(this)->Int64Result();
}
APValue &APValueResult() {
- assert(ConstantExprBits.ResultKind == ConstantExpr::RSK_APValue &&
+ assert(getResultStorageKind() == ConstantResultStorageKind::APValue &&
"invalid accessor");
return *getTrailingObjects<APValue>();
}
@@ -1055,22 +1089,23 @@ private:
return const_cast<ConstantExpr *>(this)->APValueResult();
}
- ConstantExpr(Expr *SubExpr, ResultStorageKind StorageKind,
+ ConstantExpr(Expr *SubExpr, ConstantResultStorageKind StorageKind,
bool IsImmediateInvocation);
- ConstantExpr(EmptyShell Empty, ResultStorageKind StorageKind);
+ ConstantExpr(EmptyShell Empty, ConstantResultStorageKind StorageKind);
public:
static ConstantExpr *Create(const ASTContext &Context, Expr *E,
const APValue &Result);
- static ConstantExpr *Create(const ASTContext &Context, Expr *E,
- ResultStorageKind Storage = RSK_None,
- bool IsImmediateInvocation = false);
+ static ConstantExpr *
+ Create(const ASTContext &Context, Expr *E,
+ ConstantResultStorageKind Storage = ConstantResultStorageKind::None,
+ bool IsImmediateInvocation = false);
static ConstantExpr *CreateEmpty(const ASTContext &Context,
- ResultStorageKind StorageKind);
+ ConstantResultStorageKind StorageKind);
- static ResultStorageKind getStorageKind(const APValue &Value);
- static ResultStorageKind getStorageKind(const Type *T,
- const ASTContext &Context);
+ static ConstantResultStorageKind getStorageKind(const APValue &Value);
+ static ConstantResultStorageKind getStorageKind(const Type *T,
+ const ASTContext &Context);
SourceLocation getBeginLoc() const LLVM_READONLY {
return SubExpr->getBeginLoc();
@@ -1091,8 +1126,8 @@ public:
APValue::ValueKind getResultAPValueKind() const {
return static_cast<APValue::ValueKind>(ConstantExprBits.APValueKind);
}
- ResultStorageKind getResultStorageKind() const {
- return static_cast<ResultStorageKind>(ConstantExprBits.ResultKind);
+ ConstantResultStorageKind getResultStorageKind() const {
+ return static_cast<ConstantResultStorageKind>(ConstantExprBits.ResultKind);
}
bool isImmediateInvocation() const {
return ConstantExprBits.IsImmediateInvocation;
@@ -1101,7 +1136,6 @@ public:
return ConstantExprBits.APValueKind != APValue::None;
}
APValue getAPValueResult() const;
- APValue &getResultAsAPValue() const { return APValueResult(); }
llvm::APSInt getResultAsAPSInt() const;
// Iterators
child_range children() { return child_range(&SubExpr, &SubExpr+1); }
@@ -1413,68 +1447,35 @@ public:
return DeclRefExprBits.RefersToEnclosingVariableOrCapture;
}
- static bool classof(const Stmt *T) {
- return T->getStmtClass() == DeclRefExprClass;
+ bool isImmediateEscalating() const {
+ return DeclRefExprBits.IsImmediateEscalating;
}
- // Iterators
- child_range children() {
- return child_range(child_iterator(), child_iterator());
+ void setIsImmediateEscalating(bool Set) {
+ DeclRefExprBits.IsImmediateEscalating = Set;
}
- const_child_range children() const {
- return const_child_range(const_child_iterator(), const_child_iterator());
+ bool isCapturedByCopyInLambdaWithExplicitObjectParameter() const {
+ return DeclRefExprBits.CapturedByCopyInLambdaWithExplicitObjectParameter;
}
-};
-/// Used by IntegerLiteral/FloatingLiteral to store the numeric without
-/// leaking memory.
-///
-/// For large floats/integers, APFloat/APInt will allocate memory from the heap
-/// to represent these numbers. Unfortunately, when we use a BumpPtrAllocator
-/// to allocate IntegerLiteral/FloatingLiteral nodes the memory associated with
-/// the APFloat/APInt values will never get freed. APNumericStorage uses
-/// ASTContext's allocator for memory allocation.
-class APNumericStorage {
- union {
- uint64_t VAL; ///< Used to store the <= 64 bits integer value.
- uint64_t *pVal; ///< Used to store the >64 bits integer value.
- };
- unsigned BitWidth;
-
- bool hasAllocation() const { return llvm::APInt::getNumWords(BitWidth) > 1; }
-
- APNumericStorage(const APNumericStorage &) = delete;
- void operator=(const APNumericStorage &) = delete;
-
-protected:
- APNumericStorage() : VAL(0), BitWidth(0) { }
-
- llvm::APInt getIntValue() const {
- unsigned NumWords = llvm::APInt::getNumWords(BitWidth);
- if (NumWords > 1)
- return llvm::APInt(BitWidth, NumWords, pVal);
- else
- return llvm::APInt(BitWidth, VAL);
+ void setCapturedByCopyInLambdaWithExplicitObjectParameter(
+ bool Set, const ASTContext &Context) {
+ DeclRefExprBits.CapturedByCopyInLambdaWithExplicitObjectParameter = Set;
+ setDependence(computeDependence(this, Context));
}
- void setIntValue(const ASTContext &C, const llvm::APInt &Val);
-};
-class APIntStorage : private APNumericStorage {
-public:
- llvm::APInt getValue() const { return getIntValue(); }
- void setValue(const ASTContext &C, const llvm::APInt &Val) {
- setIntValue(C, Val);
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == DeclRefExprClass;
}
-};
-class APFloatStorage : private APNumericStorage {
-public:
- llvm::APFloat getValue(const llvm::fltSemantics &Semantics) const {
- return llvm::APFloat(Semantics, getIntValue());
+ // Iterators
+ child_range children() {
+ return child_range(child_iterator(), child_iterator());
}
- void setValue(const ASTContext &C, const llvm::APFloat &Val) {
- setIntValue(C, Val.bitcastToAPInt());
+
+ const_child_range children() const {
+ return const_child_range(const_child_iterator(), const_child_iterator());
}
};
@@ -1568,26 +1569,18 @@ class FixedPointLiteral : public Expr, public APIntStorage {
}
};
-class CharacterLiteral : public Expr {
-public:
- enum CharacterKind {
- Ascii,
- Wide,
- UTF8,
- UTF16,
- UTF32
- };
+enum class CharacterLiteralKind { Ascii, Wide, UTF8, UTF16, UTF32 };
-private:
+class CharacterLiteral : public Expr {
unsigned Value;
SourceLocation Loc;
public:
// type should be IntTy
- CharacterLiteral(unsigned value, CharacterKind kind, QualType type,
+ CharacterLiteral(unsigned value, CharacterLiteralKind kind, QualType type,
SourceLocation l)
: Expr(CharacterLiteralClass, type, VK_PRValue, OK_Ordinary),
Value(value), Loc(l) {
- CharacterLiteralBits.Kind = kind;
+ CharacterLiteralBits.Kind = llvm::to_underlying(kind);
setDependence(ExprDependence::None);
}
@@ -1595,8 +1588,8 @@ public:
CharacterLiteral(EmptyShell Empty) : Expr(CharacterLiteralClass, Empty) { }
SourceLocation getLocation() const { return Loc; }
- CharacterKind getKind() const {
- return static_cast<CharacterKind>(CharacterLiteralBits.Kind);
+ CharacterLiteralKind getKind() const {
+ return static_cast<CharacterLiteralKind>(CharacterLiteralBits.Kind);
}
SourceLocation getBeginLoc() const LLVM_READONLY { return Loc; }
@@ -1605,14 +1598,16 @@ public:
unsigned getValue() const { return Value; }
void setLocation(SourceLocation Location) { Loc = Location; }
- void setKind(CharacterKind kind) { CharacterLiteralBits.Kind = kind; }
+ void setKind(CharacterLiteralKind kind) {
+ CharacterLiteralBits.Kind = llvm::to_underlying(kind);
+ }
void setValue(unsigned Val) { Value = Val; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == CharacterLiteralClass;
}
- static void print(unsigned val, CharacterKind Kind, raw_ostream &OS);
+ static void print(unsigned val, CharacterLiteralKind Kind, raw_ostream &OS);
// Iterators
child_range children() {
@@ -1734,6 +1729,15 @@ public:
}
};
+enum class StringLiteralKind {
+ Ordinary,
+ Wide,
+ UTF8,
+ UTF16,
+ UTF32,
+ Unevaluated
+};
+
/// StringLiteral - This represents a string literal expression, e.g. "foo"
/// or L"bar" (wide strings). The actual string data can be obtained with
/// getBytes() and is NOT null-terminated. The length of the string data is
@@ -1772,10 +1776,6 @@ class StringLiteral final
///
/// * An array of getByteLength() char used to store the string data.
-public:
- enum StringKind { Ascii, Wide, UTF8, UTF16, UTF32 };
-
-private:
unsigned numTrailingObjects(OverloadToken<unsigned>) const { return 1; }
unsigned numTrailingObjects(OverloadToken<SourceLocation>) const {
return getNumConcatenated();
@@ -1797,7 +1797,7 @@ private:
}
/// Build a string literal.
- StringLiteral(const ASTContext &Ctx, StringRef Str, StringKind Kind,
+ StringLiteral(const ASTContext &Ctx, StringRef Str, StringLiteralKind Kind,
bool Pascal, QualType Ty, const SourceLocation *Loc,
unsigned NumConcatenated);
@@ -1806,7 +1806,8 @@ private:
unsigned CharByteWidth);
/// Map a target and string kind to the appropriate character width.
- static unsigned mapCharByteWidth(TargetInfo const &Target, StringKind SK);
+ static unsigned mapCharByteWidth(TargetInfo const &Target,
+ StringLiteralKind SK);
/// Set one of the string literal token.
void setStrTokenLoc(unsigned TokNum, SourceLocation L) {
@@ -1818,13 +1819,13 @@ public:
/// This is the "fully general" constructor that allows representation of
/// strings formed from multiple concatenated tokens.
static StringLiteral *Create(const ASTContext &Ctx, StringRef Str,
- StringKind Kind, bool Pascal, QualType Ty,
+ StringLiteralKind Kind, bool Pascal, QualType Ty,
const SourceLocation *Loc,
unsigned NumConcatenated);
/// Simple constructor for string literals made from one token.
static StringLiteral *Create(const ASTContext &Ctx, StringRef Str,
- StringKind Kind, bool Pascal, QualType Ty,
+ StringLiteralKind Kind, bool Pascal, QualType Ty,
SourceLocation Loc) {
return Create(Ctx, Str, Kind, Pascal, Ty, &Loc, 1);
}
@@ -1835,7 +1836,7 @@ public:
unsigned CharByteWidth);
StringRef getString() const {
- assert(getCharByteWidth() == 1 &&
+ assert((isUnevaluated() || getCharByteWidth() == 1) &&
"This function is used in places that assume strings use char");
return StringRef(getStrDataAsChar(), getByteLength());
}
@@ -1866,15 +1867,16 @@ public:
unsigned getLength() const { return *getTrailingObjects<unsigned>(); }
unsigned getCharByteWidth() const { return StringLiteralBits.CharByteWidth; }
- StringKind getKind() const {
- return static_cast<StringKind>(StringLiteralBits.Kind);
+ StringLiteralKind getKind() const {
+ return static_cast<StringLiteralKind>(StringLiteralBits.Kind);
}
- bool isAscii() const { return getKind() == Ascii; }
- bool isWide() const { return getKind() == Wide; }
- bool isUTF8() const { return getKind() == UTF8; }
- bool isUTF16() const { return getKind() == UTF16; }
- bool isUTF32() const { return getKind() == UTF32; }
+ bool isOrdinary() const { return getKind() == StringLiteralKind::Ordinary; }
+ bool isWide() const { return getKind() == StringLiteralKind::Wide; }
+ bool isUTF8() const { return getKind() == StringLiteralKind::UTF8; }
+ bool isUTF16() const { return getKind() == StringLiteralKind::UTF16; }
+ bool isUTF32() const { return getKind() == StringLiteralKind::UTF32; }
+ bool isUnevaluated() const { return getKind() == StringLiteralKind::Unevaluated; }
bool isPascal() const { return StringLiteralBits.IsPascal; }
bool containsNonAscii() const {
@@ -1942,6 +1944,19 @@ public:
}
};
+enum class PredefinedIdentKind {
+ Func,
+ Function,
+ LFunction, // Same as Function, but as wide string.
+ FuncDName,
+ FuncSig,
+ LFuncSig, // Same as FuncSig, but as wide string
+ PrettyFunction,
+ /// The same as PrettyFunction, except that the
+ /// 'virtual' keyword is omitted for virtual member functions.
+ PrettyFunctionNoVirtual
+};
+
/// [C99 6.4.2.2] - A predefined identifier such as __func__.
class PredefinedExpr final
: public Expr,
@@ -1953,23 +1968,8 @@ class PredefinedExpr final
// "Stmt *" for the predefined identifier. It is present if and only if
// hasFunctionName() is true and is always a "StringLiteral *".
-public:
- enum IdentKind {
- Func,
- Function,
- LFunction, // Same as Function, but as wide string.
- FuncDName,
- FuncSig,
- LFuncSig, // Same as FuncSig, but as as wide string
- PrettyFunction,
- /// The same as PrettyFunction, except that the
- /// 'virtual' keyword is omitted for virtual member functions.
- PrettyFunctionNoVirtual
- };
-
-private:
- PredefinedExpr(SourceLocation L, QualType FNTy, IdentKind IK,
- StringLiteral *SL);
+ PredefinedExpr(SourceLocation L, QualType FNTy, PredefinedIdentKind IK,
+ bool IsTransparent, StringLiteral *SL);
explicit PredefinedExpr(EmptyShell Empty, bool HasFunctionName);
@@ -1984,17 +1984,23 @@ private:
public:
/// Create a PredefinedExpr.
+ ///
+ /// If IsTransparent, the PredefinedExpr is transparently handled as a
+ /// StringLiteral.
static PredefinedExpr *Create(const ASTContext &Ctx, SourceLocation L,
- QualType FNTy, IdentKind IK, StringLiteral *SL);
+ QualType FNTy, PredefinedIdentKind IK,
+ bool IsTransparent, StringLiteral *SL);
/// Create an empty PredefinedExpr.
static PredefinedExpr *CreateEmpty(const ASTContext &Ctx,
bool HasFunctionName);
- IdentKind getIdentKind() const {
- return static_cast<IdentKind>(PredefinedExprBits.Kind);
+ PredefinedIdentKind getIdentKind() const {
+ return static_cast<PredefinedIdentKind>(PredefinedExprBits.Kind);
}
+ bool isTransparent() const { return PredefinedExprBits.IsTransparent; }
+
SourceLocation getLocation() const { return PredefinedExprBits.Loc; }
void setLocation(SourceLocation L) { PredefinedExprBits.Loc = L; }
@@ -2010,12 +2016,13 @@ public:
: nullptr;
}
- static StringRef getIdentKindName(IdentKind IK);
+ static StringRef getIdentKindName(PredefinedIdentKind IK);
StringRef getIdentKindName() const {
return getIdentKindName(getIdentKind());
}
- static std::string ComputeName(IdentKind IK, const Decl *CurrentDecl);
+ static std::string ComputeName(PredefinedIdentKind IK,
+ const Decl *CurrentDecl);
SourceLocation getBeginLoc() const { return getLocation(); }
SourceLocation getEndLoc() const { return getLocation(); }
@@ -2210,14 +2217,14 @@ public:
bool canOverflow() const { return UnaryOperatorBits.CanOverflow; }
void setCanOverflow(bool C) { UnaryOperatorBits.CanOverflow = C; }
- // Get the FP contractability status of this operator. Only meaningful for
- // operations on floating point types.
+ /// Get the FP contractability status of this operator. Only meaningful for
+ /// operations on floating point types.
bool isFPContractableWithinStatement(const LangOptions &LO) const {
return getFPFeaturesInEffect(LO).allowFPContractWithinStatement();
}
- // Get the FENV_ACCESS status of this operator. Only meaningful for
- // operations on floating point types.
+ /// Get the FENV_ACCESS status of this operator. Only meaningful for
+ /// operations on floating point types.
bool isFEnvAccessOn(const LangOptions &LO) const {
return getFPFeaturesInEffect(LO).getAllowFEnvAccess();
}
@@ -2298,12 +2305,12 @@ public:
}
protected:
- /// Set FPFeatures in trailing storage, used only by Serialization
+ /// Set FPFeatures in trailing storage, used by Serialization & ASTImporter.
void setStoredFPFeatures(FPOptionsOverride F) { getTrailingFPFeatures() = F; }
public:
- // Get the FP features status of this operator. Only meaningful for
- // operations on floating point types.
+ /// Get the FP features status of this operator. Only meaningful for
+ /// operations on floating point types.
FPOptions getFPFeaturesInEffect(const LangOptions &LO) const {
if (UnaryOperatorBits.HasFPFeatures)
return getStoredFPFeatures().applyOverrides(LO);
@@ -2316,6 +2323,7 @@ public:
}
friend TrailingObjects;
+ friend class ASTNodeImporter;
friend class ASTReader;
friend class ASTStmtReader;
friend class ASTStmtWriter;
@@ -2375,7 +2383,7 @@ public:
/// Create an offsetof node that refers into a C++ base class.
explicit OffsetOfNode(const CXXBaseSpecifier *Base)
- : Range(), Data(reinterpret_cast<uintptr_t>(Base) | OffsetOfNode::Base) {}
+ : Data(reinterpret_cast<uintptr_t>(Base) | OffsetOfNode::Base) {}
/// Determine what kind of offsetof node this is.
Kind getKind() const { return static_cast<Kind>(Data & Mask); }
@@ -2791,7 +2799,7 @@ class CallExpr : public Expr {
/// The number of arguments in the call expression.
unsigned NumArgs;
- /// The location of the right parenthese. This has a different meaning for
+ /// The location of the right parentheses. This has a different meaning for
/// the derived classes of CallExpr.
SourceLocation RParenLoc;
@@ -2997,7 +3005,7 @@ public:
/// Compute and set dependence bits.
void computeDependence() {
setDependence(clang::computeDependence(
- this, llvm::makeArrayRef(
+ this, llvm::ArrayRef(
reinterpret_cast<Expr **>(getTrailingStmts() + PREARGS_START),
getNumPreArgs())));
}
@@ -3044,14 +3052,10 @@ public:
/// interface. This provides efficient reverse iteration of the
/// subexpressions. This is currently used for CFG construction.
ArrayRef<Stmt *> getRawSubExprs() {
- return llvm::makeArrayRef(getTrailingStmts(),
- PREARGS_START + getNumPreArgs() + getNumArgs());
+ return llvm::ArrayRef(getTrailingStmts(),
+ PREARGS_START + getNumPreArgs() + getNumArgs());
}
- /// getNumCommas - Return the number of commas that must have been present in
- /// this function call.
- unsigned getNumCommas() const { return getNumArgs() ? getNumArgs() - 1 : 0; }
-
/// Get FPOptionsOverride from trailing storage.
FPOptionsOverride getStoredFPFeatures() const {
assert(hasStoredFPFeatures());
@@ -3063,8 +3067,8 @@ public:
*getTrailingFPFeatures() = F;
}
- // Get the FP features status of this operator. Only meaningful for
- // operations on floating point types.
+ /// Get the FP features status of this operator. Only meaningful for
+ /// operations on floating point types.
FPOptions getFPFeaturesInEffect(const LangOptions &LO) const {
if (hasStoredFPFeatures())
return getStoredFPFeatures().applyOverrides(LO);
@@ -3115,11 +3119,7 @@ public:
setDependence(getDependence() | ExprDependence::TypeValueInstantiation);
}
- bool isCallToStdMove() const {
- const FunctionDecl *FD = getDirectCallee();
- return getNumArgs() == 1 && FD && FD->isInStdNamespace() &&
- FD->getIdentifier() && FD->getIdentifier()->isStr("move");
- }
+ bool isCallToStdMove() const;
static bool classof(const Stmt *T) {
return T->getStmtClass() >= firstCallExprConstant &&
@@ -3484,7 +3484,6 @@ protected:
CastExprBits.BasePathSize = BasePathSize;
assert((CastExprBits.BasePathSize == BasePathSize) &&
"BasePathSize overflow!");
- setDependence(computeDependence(this));
assert(CastConsistency());
CastExprBits.HasFPFeatures = HasFPFeatures;
}
@@ -3559,8 +3558,8 @@ public:
return *getTrailingFPFeatures();
}
- // Get the FP features status of this operation. Only meaningful for
- // operations on floating point types.
+ /// Get the FP features status of this operation. Only meaningful for
+ /// operations on floating point types.
FPOptions getFPFeaturesInEffect(const LangOptions &LO) const {
if (hasStoredFPFeatures())
return getStoredFPFeatures().applyOverrides(LO);
@@ -3573,6 +3572,19 @@ public:
return FPOptionsOverride();
}
+ /// Return
+ // True : if this conversion changes the volatile-ness of a gl-value.
+ // Qualification conversions on gl-values currently use CK_NoOp, but
+ // it's important to recognize volatile-changing conversions in
+ // clients code generation that normally eagerly peephole loads. Note
+ // that the query is answering for this specific node; Sema may
+ // produce multiple cast nodes for any particular conversion sequence.
+ // False : Otherwise.
+ bool changesVolatileQualification() const {
+ return (isGLValue() && (getType().isVolatileQualified() !=
+ getSubExpr()->getType().isVolatileQualified()));
+ }
+
static const FieldDecl *getTargetFieldForToUnionCast(QualType unionType,
QualType opType);
static const FieldDecl *getTargetFieldForToUnionCast(const RecordDecl *RD,
@@ -3618,6 +3630,7 @@ class ImplicitCastExpr final
ExprValueKind VK)
: CastExpr(ImplicitCastExprClass, ty, VK, kind, op, BasePathLength,
FPO.requiresTrailingStorage()) {
+ setDependence(computeDependence(this));
if (hasStoredFPFeatures())
*getTrailingFPFeatures() = FPO;
}
@@ -3695,7 +3708,9 @@ protected:
CastKind kind, Expr *op, unsigned PathSize,
bool HasFPFeatures, TypeSourceInfo *writtenTy)
: CastExpr(SC, exprTy, VK, kind, op, PathSize, HasFPFeatures),
- TInfo(writtenTy) {}
+ TInfo(writtenTy) {
+ setDependence(computeDependence(this));
+ }
/// Construct an empty explicit cast.
ExplicitCastExpr(StmtClass SC, EmptyShell Shell, unsigned PathSize,
@@ -3954,11 +3969,12 @@ public:
return isShiftAssignOp(getOpcode());
}
- // Return true if a binary operator using the specified opcode and operands
- // would match the 'p = (i8*)nullptr + n' idiom for casting a pointer-sized
- // integer to a pointer.
+ /// Return true if a binary operator using the specified opcode and operands
+ /// would match the 'p = (i8*)nullptr + n' idiom for casting a pointer-sized
+ /// integer to a pointer.
static bool isNullPointerArithmeticExtension(ASTContext &Ctx, Opcode Opc,
- Expr *LHS, Expr *RHS);
+ const Expr *LHS,
+ const Expr *RHS);
static bool classof(const Stmt *S) {
return S->getStmtClass() >= firstBinaryOperatorConstant &&
@@ -3989,8 +4005,8 @@ public:
*getTrailingFPFeatures() = F;
}
- // Get the FP features status of this operator. Only meaningful for
- // operations on floating point types.
+ /// Get the FP features status of this operator. Only meaningful for
+ /// operations on floating point types.
FPOptions getFPFeaturesInEffect(const LangOptions &LO) const {
if (BinaryOperatorBits.HasFPFeatures)
return getStoredFPFeatures().applyOverrides(LO);
@@ -3998,20 +4014,20 @@ public:
}
// This is used in ASTImporter
- FPOptionsOverride getFPFeatures(const LangOptions &LO) const {
+ FPOptionsOverride getFPFeatures() const {
if (BinaryOperatorBits.HasFPFeatures)
return getStoredFPFeatures();
return FPOptionsOverride();
}
- // Get the FP contractability status of this operator. Only meaningful for
- // operations on floating point types.
+ /// Get the FP contractability status of this operator. Only meaningful for
+ /// operations on floating point types.
bool isFPContractableWithinStatement(const LangOptions &LO) const {
return getFPFeaturesInEffect(LO).allowFPContractWithinStatement();
}
- // Get the FENV_ACCESS status of this operator. Only meaningful for
- // operations on floating point types.
+ /// Get the FENV_ACCESS status of this operator. Only meaningful for
+ /// operations on floating point types.
bool isFEnvAccessOn(const LangOptions &LO) const {
return getFPFeaturesInEffect(LO).getAllowFEnvAccess();
}
@@ -4107,17 +4123,17 @@ protected:
: Expr(SC, Empty) { }
public:
- // getCond - Return the expression representing the condition for
- // the ?: operator.
+ /// getCond - Return the expression representing the condition for
+ /// the ?: operator.
Expr *getCond() const;
- // getTrueExpr - Return the subexpression representing the value of
- // the expression if the condition evaluates to true.
+ /// getTrueExpr - Return the subexpression representing the value of
+ /// the expression if the condition evaluates to true.
Expr *getTrueExpr() const;
- // getFalseExpr - Return the subexpression representing the value of
- // the expression if the condition evaluates to false. This is
- // the same as getRHS.
+ /// getFalseExpr - Return the subexpression representing the value of
+ /// the expression if the condition evaluates to false. This is
+ /// the same as getRHS.
Expr *getFalseExpr() const;
SourceLocation getQuestionLoc() const { return QuestionLoc; }
@@ -4152,17 +4168,17 @@ public:
explicit ConditionalOperator(EmptyShell Empty)
: AbstractConditionalOperator(ConditionalOperatorClass, Empty) { }
- // getCond - Return the expression representing the condition for
- // the ?: operator.
+ /// getCond - Return the expression representing the condition for
+ /// the ?: operator.
Expr *getCond() const { return cast<Expr>(SubExprs[COND]); }
- // getTrueExpr - Return the subexpression representing the value of
- // the expression if the condition evaluates to true.
+ /// getTrueExpr - Return the subexpression representing the value of
+ /// the expression if the condition evaluates to true.
Expr *getTrueExpr() const { return cast<Expr>(SubExprs[LHS]); }
- // getFalseExpr - Return the subexpression representing the value of
- // the expression if the condition evaluates to false. This is
- // the same as getRHS.
+ /// getFalseExpr - Return the subexpression representing the value of
+ /// the expression if the condition evaluates to false. This is
+ /// the same as getRHS.
Expr *getFalseExpr() const { return cast<Expr>(SubExprs[RHS]); }
Expr *getLHS() const { return cast<Expr>(SubExprs[LHS]); }
@@ -4666,16 +4682,26 @@ public:
}
};
+enum class SourceLocIdentKind {
+ Function,
+ FuncSig,
+ File,
+ FileName,
+ Line,
+ Column,
+ SourceLocStruct
+};
+
/// Represents a function call to one of __builtin_LINE(), __builtin_COLUMN(),
-/// __builtin_FUNCTION(), or __builtin_FILE().
+/// __builtin_FUNCTION(), __builtin_FUNCSIG(), __builtin_FILE(),
+/// __builtin_FILE_NAME() or __builtin_source_location().
class SourceLocExpr final : public Expr {
SourceLocation BuiltinLoc, RParenLoc;
DeclContext *ParentContext;
public:
- enum IdentKind { Function, File, Line, Column };
-
- SourceLocExpr(const ASTContext &Ctx, IdentKind Type, SourceLocation BLoc,
+ SourceLocExpr(const ASTContext &Ctx, SourceLocIdentKind Type,
+ QualType ResultTy, SourceLocation BLoc,
SourceLocation RParenLoc, DeclContext *Context);
/// Build an empty call expression.
@@ -4689,22 +4715,24 @@ public:
/// Return a string representing the name of the specific builtin function.
StringRef getBuiltinStr() const;
- IdentKind getIdentKind() const {
- return static_cast<IdentKind>(SourceLocExprBits.Kind);
+ SourceLocIdentKind getIdentKind() const {
+ return static_cast<SourceLocIdentKind>(SourceLocExprBits.Kind);
}
- bool isStringType() const {
+ bool isIntType() const {
switch (getIdentKind()) {
- case File:
- case Function:
- return true;
- case Line:
- case Column:
+ case SourceLocIdentKind::File:
+ case SourceLocIdentKind::FileName:
+ case SourceLocIdentKind::Function:
+ case SourceLocIdentKind::FuncSig:
+ case SourceLocIdentKind::SourceLocStruct:
return false;
+ case SourceLocIdentKind::Line:
+ case SourceLocIdentKind::Column:
+ return true;
}
llvm_unreachable("unknown source location expression kind");
}
- bool isIntType() const LLVM_READONLY { return !isStringType(); }
/// If the SourceLocExpr has been resolved return the subexpression
/// representing the resolved value. Otherwise return null.
@@ -4727,6 +4755,17 @@ public:
return T->getStmtClass() == SourceLocExprClass;
}
+ static bool MayBeDependent(SourceLocIdentKind Kind) {
+ switch (Kind) {
+ case SourceLocIdentKind::Function:
+ case SourceLocIdentKind::FuncSig:
+ case SourceLocIdentKind::SourceLocStruct:
+ return true;
+ default:
+ return false;
+ }
+ }
+
private:
friend class ASTStmtReader;
};
@@ -4816,12 +4855,10 @@ public:
return reinterpret_cast<Expr * const *>(InitExprs.data());
}
- ArrayRef<Expr *> inits() {
- return llvm::makeArrayRef(getInits(), getNumInits());
- }
+ ArrayRef<Expr *> inits() { return llvm::ArrayRef(getInits(), getNumInits()); }
ArrayRef<Expr *> inits() const {
- return llvm::makeArrayRef(getInits(), getNumInits());
+ return llvm::ArrayRef(getInits(), getNumInits());
}
const Expr *getInit(unsigned Init) const {
@@ -4884,6 +4921,13 @@ public:
/// has been set.
bool hasArrayFiller() const { return getArrayFiller(); }
+ /// Determine whether this initializer list contains a designated initializer.
+ bool hasDesignatedInit() const {
+ return std::any_of(begin(), end(), [](const Stmt *S) {
+ return isa<DesignatedInitExpr>(S);
+ });
+ }
+
/// If this initializes a union, specifies which field in the
/// union to initialize.
///
@@ -4912,8 +4956,8 @@ public:
return LBraceLoc.isValid() && RBraceLoc.isValid();
}
- // Is this an initializer for an array of characters, initialized by a string
- // literal or an @encode?
+ /// Is this an initializer for an array of characters, initialized by a string
+ /// literal or an @encode?
bool isStringLiteralInit() const;
/// Is this a transparent initializer list (that is, an InitListExpr that is
@@ -5028,6 +5072,7 @@ private:
/// Whether this designated initializer used the GNU deprecated
/// syntax rather than the C99 '=' syntax.
+ LLVM_PREFERRED_TYPE(bool)
unsigned GNUSyntax : 1;
/// The number of designators in this initializer expression.
@@ -5052,37 +5097,6 @@ private:
NumDesignators(0), NumSubExprs(NumSubExprs), Designators(nullptr) { }
public:
- /// A field designator, e.g., ".x".
- struct FieldDesignator {
- /// Refers to the field that is being initialized. The low bit
- /// of this field determines whether this is actually a pointer
- /// to an IdentifierInfo (if 1) or a FieldDecl (if 0). When
- /// initially constructed, a field designator will store an
- /// IdentifierInfo*. After semantic analysis has resolved that
- /// name, the field designator will instead store a FieldDecl*.
- uintptr_t NameOrField;
-
- /// The location of the '.' in the designated initializer.
- SourceLocation DotLoc;
-
- /// The location of the field name in the designated initializer.
- SourceLocation FieldLoc;
- };
-
- /// An array or GNU array-range designator, e.g., "[9]" or "[10..15]".
- struct ArrayOrRangeDesignator {
- /// Location of the first index expression within the designated
- /// initializer expression's list of subexpressions.
- unsigned Index;
- /// The location of the '[' starting the array range designator.
- SourceLocation LBracketLoc;
- /// The location of the ellipsis separating the start and end
- /// indices. Only valid for GNU array-range designators.
- SourceLocation EllipsisLoc;
- /// The location of the ']' terminating the array range designator.
- SourceLocation RBracketLoc;
- };
-
/// Represents a single C99 designator.
///
/// @todo This class is infuriatingly similar to clang::Designator,
@@ -5090,118 +5104,177 @@ public:
/// keep us from reusing it. Try harder, later, to rectify these
/// differences.
class Designator {
+ /// A field designator, e.g., ".x".
+ struct FieldDesignatorInfo {
+ /// Refers to the field that is being initialized. The low bit
+ /// of this field determines whether this is actually a pointer
+ /// to an IdentifierInfo (if 1) or a FieldDecl (if 0). When
+ /// initially constructed, a field designator will store an
+ /// IdentifierInfo*. After semantic analysis has resolved that
+ /// name, the field designator will instead store a FieldDecl*.
+ uintptr_t NameOrField;
+
+ /// The location of the '.' in the designated initializer.
+ SourceLocation DotLoc;
+
+ /// The location of the field name in the designated initializer.
+ SourceLocation FieldLoc;
+
+ FieldDesignatorInfo(const IdentifierInfo *II, SourceLocation DotLoc,
+ SourceLocation FieldLoc)
+ : NameOrField(reinterpret_cast<uintptr_t>(II) | 0x1), DotLoc(DotLoc),
+ FieldLoc(FieldLoc) {}
+ };
+
+ /// An array or GNU array-range designator, e.g., "[9]" or "[10...15]".
+ struct ArrayOrRangeDesignatorInfo {
+ /// Location of the first index expression within the designated
+ /// initializer expression's list of subexpressions.
+ unsigned Index;
+
+ /// The location of the '[' starting the array range designator.
+ SourceLocation LBracketLoc;
+
+ /// The location of the ellipsis separating the start and end
+ /// indices. Only valid for GNU array-range designators.
+ SourceLocation EllipsisLoc;
+
+ /// The location of the ']' terminating the array range designator.
+ SourceLocation RBracketLoc;
+
+ ArrayOrRangeDesignatorInfo(unsigned Index, SourceLocation LBracketLoc,
+ SourceLocation RBracketLoc)
+ : Index(Index), LBracketLoc(LBracketLoc), RBracketLoc(RBracketLoc) {}
+
+ ArrayOrRangeDesignatorInfo(unsigned Index,
+ SourceLocation LBracketLoc,
+ SourceLocation EllipsisLoc,
+ SourceLocation RBracketLoc)
+ : Index(Index), LBracketLoc(LBracketLoc), EllipsisLoc(EllipsisLoc),
+ RBracketLoc(RBracketLoc) {}
+ };
+
/// The kind of designator this describes.
- enum {
+ enum DesignatorKind {
FieldDesignator,
ArrayDesignator,
ArrayRangeDesignator
- } Kind;
+ };
+
+ DesignatorKind Kind;
union {
/// A field designator, e.g., ".x".
- struct FieldDesignator Field;
+ struct FieldDesignatorInfo FieldInfo;
+
/// An array or GNU array-range designator, e.g., "[9]" or "[10..15]".
- struct ArrayOrRangeDesignator ArrayOrRange;
+ struct ArrayOrRangeDesignatorInfo ArrayOrRangeInfo;
};
- friend class DesignatedInitExpr;
+
+ Designator(DesignatorKind Kind) : Kind(Kind) {}
public:
Designator() {}
- /// Initializes a field designator.
- Designator(const IdentifierInfo *FieldName, SourceLocation DotLoc,
- SourceLocation FieldLoc)
- : Kind(FieldDesignator) {
- new (&Field) DesignatedInitExpr::FieldDesignator;
- Field.NameOrField = reinterpret_cast<uintptr_t>(FieldName) | 0x01;
- Field.DotLoc = DotLoc;
- Field.FieldLoc = FieldLoc;
- }
-
- /// Initializes an array designator.
- Designator(unsigned Index, SourceLocation LBracketLoc,
- SourceLocation RBracketLoc)
- : Kind(ArrayDesignator) {
- new (&ArrayOrRange) DesignatedInitExpr::ArrayOrRangeDesignator;
- ArrayOrRange.Index = Index;
- ArrayOrRange.LBracketLoc = LBracketLoc;
- ArrayOrRange.EllipsisLoc = SourceLocation();
- ArrayOrRange.RBracketLoc = RBracketLoc;
- }
-
- /// Initializes a GNU array-range designator.
- Designator(unsigned Index, SourceLocation LBracketLoc,
- SourceLocation EllipsisLoc, SourceLocation RBracketLoc)
- : Kind(ArrayRangeDesignator) {
- new (&ArrayOrRange) DesignatedInitExpr::ArrayOrRangeDesignator;
- ArrayOrRange.Index = Index;
- ArrayOrRange.LBracketLoc = LBracketLoc;
- ArrayOrRange.EllipsisLoc = EllipsisLoc;
- ArrayOrRange.RBracketLoc = RBracketLoc;
- }
-
bool isFieldDesignator() const { return Kind == FieldDesignator; }
bool isArrayDesignator() const { return Kind == ArrayDesignator; }
bool isArrayRangeDesignator() const { return Kind == ArrayRangeDesignator; }
- IdentifierInfo *getFieldName() const;
+ //===------------------------------------------------------------------===//
+ // FieldDesignatorInfo
+
+ /// Creates a field designator.
+ static Designator CreateFieldDesignator(const IdentifierInfo *FieldName,
+ SourceLocation DotLoc,
+ SourceLocation FieldLoc) {
+ Designator D(FieldDesignator);
+ new (&D.FieldInfo) FieldDesignatorInfo(FieldName, DotLoc, FieldLoc);
+ return D;
+ }
- FieldDecl *getField() const {
- assert(Kind == FieldDesignator && "Only valid on a field designator");
- if (Field.NameOrField & 0x01)
+ const IdentifierInfo *getFieldName() const;
+
+ FieldDecl *getFieldDecl() const {
+ assert(isFieldDesignator() && "Only valid on a field designator");
+ if (FieldInfo.NameOrField & 0x01)
return nullptr;
- else
- return reinterpret_cast<FieldDecl *>(Field.NameOrField);
+ return reinterpret_cast<FieldDecl *>(FieldInfo.NameOrField);
}
- void setField(FieldDecl *FD) {
- assert(Kind == FieldDesignator && "Only valid on a field designator");
- Field.NameOrField = reinterpret_cast<uintptr_t>(FD);
+ void setFieldDecl(FieldDecl *FD) {
+ assert(isFieldDesignator() && "Only valid on a field designator");
+ FieldInfo.NameOrField = reinterpret_cast<uintptr_t>(FD);
}
SourceLocation getDotLoc() const {
- assert(Kind == FieldDesignator && "Only valid on a field designator");
- return Field.DotLoc;
+ assert(isFieldDesignator() && "Only valid on a field designator");
+ return FieldInfo.DotLoc;
}
SourceLocation getFieldLoc() const {
- assert(Kind == FieldDesignator && "Only valid on a field designator");
- return Field.FieldLoc;
+ assert(isFieldDesignator() && "Only valid on a field designator");
+ return FieldInfo.FieldLoc;
}
- SourceLocation getLBracketLoc() const {
- assert((Kind == ArrayDesignator || Kind == ArrayRangeDesignator) &&
+ //===------------------------------------------------------------------===//
+ // ArrayOrRangeDesignator
+
+ /// Creates an array designator.
+ static Designator CreateArrayDesignator(unsigned Index,
+ SourceLocation LBracketLoc,
+ SourceLocation RBracketLoc) {
+ Designator D(ArrayDesignator);
+ new (&D.ArrayOrRangeInfo) ArrayOrRangeDesignatorInfo(Index, LBracketLoc,
+ RBracketLoc);
+ return D;
+ }
+
+ /// Creates a GNU array-range designator.
+ static Designator CreateArrayRangeDesignator(unsigned Index,
+ SourceLocation LBracketLoc,
+ SourceLocation EllipsisLoc,
+ SourceLocation RBracketLoc) {
+ Designator D(ArrayRangeDesignator);
+ new (&D.ArrayOrRangeInfo) ArrayOrRangeDesignatorInfo(Index, LBracketLoc,
+ EllipsisLoc,
+ RBracketLoc);
+ return D;
+ }
+
+ unsigned getArrayIndex() const {
+ assert((isArrayDesignator() || isArrayRangeDesignator()) &&
"Only valid on an array or array-range designator");
- return ArrayOrRange.LBracketLoc;
+ return ArrayOrRangeInfo.Index;
}
- SourceLocation getRBracketLoc() const {
- assert((Kind == ArrayDesignator || Kind == ArrayRangeDesignator) &&
+ SourceLocation getLBracketLoc() const {
+ assert((isArrayDesignator() || isArrayRangeDesignator()) &&
"Only valid on an array or array-range designator");
- return ArrayOrRange.RBracketLoc;
+ return ArrayOrRangeInfo.LBracketLoc;
}
SourceLocation getEllipsisLoc() const {
- assert(Kind == ArrayRangeDesignator &&
+ assert(isArrayRangeDesignator() &&
"Only valid on an array-range designator");
- return ArrayOrRange.EllipsisLoc;
+ return ArrayOrRangeInfo.EllipsisLoc;
}
- unsigned getFirstExprIndex() const {
- assert((Kind == ArrayDesignator || Kind == ArrayRangeDesignator) &&
+ SourceLocation getRBracketLoc() const {
+ assert((isArrayDesignator() || isArrayRangeDesignator()) &&
"Only valid on an array or array-range designator");
- return ArrayOrRange.Index;
+ return ArrayOrRangeInfo.RBracketLoc;
}
SourceLocation getBeginLoc() const LLVM_READONLY {
- if (Kind == FieldDesignator)
- return getDotLoc().isInvalid()? getFieldLoc() : getDotLoc();
- else
- return getLBracketLoc();
+ if (isFieldDesignator())
+ return getDotLoc().isInvalid() ? getFieldLoc() : getDotLoc();
+ return getLBracketLoc();
}
+
SourceLocation getEndLoc() const LLVM_READONLY {
- return Kind == FieldDesignator ? getFieldLoc() : getRBracketLoc();
+ return isFieldDesignator() ? getFieldLoc() : getRBracketLoc();
}
+
SourceRange getSourceRange() const LLVM_READONLY {
return SourceRange(getBeginLoc(), getEndLoc());
}
@@ -5563,9 +5636,7 @@ public:
return reinterpret_cast<Expr **>(getTrailingObjects<Stmt *>());
}
- ArrayRef<Expr *> exprs() {
- return llvm::makeArrayRef(getExprs(), getNumExprs());
- }
+ ArrayRef<Expr *> exprs() { return llvm::ArrayRef(getExprs(), getNumExprs()); }
SourceLocation getLParenLoc() const { return LParenLoc; }
SourceLocation getRParenLoc() const { return RParenLoc; }
@@ -5613,6 +5684,12 @@ public:
/// which names a dependent type in its association list is result-dependent,
/// which means that the choice of result expression is dependent.
/// Result-dependent generic associations are both type- and value-dependent.
+///
+/// We also allow an extended form in both C and C++ where the controlling
+/// predicate for the selection expression is a type rather than an expression.
+/// This type argument form does not perform any conversions for the
+/// controlling type, which makes it suitable for use with qualified type
+/// associations, which is not possible with the expression form.
class GenericSelectionExpr final
: public Expr,
private llvm::TrailingObjects<GenericSelectionExpr, Stmt *,
@@ -5625,31 +5702,68 @@ class GenericSelectionExpr final
/// expression in the case where the generic selection expression is not
/// result-dependent. The result index is equal to ResultDependentIndex
/// if and only if the generic selection expression is result-dependent.
- unsigned NumAssocs, ResultIndex;
+ unsigned NumAssocs : 15;
+ unsigned ResultIndex : 15; // NB: ResultDependentIndex is tied to this width.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned IsExprPredicate : 1;
enum : unsigned {
- ResultDependentIndex = std::numeric_limits<unsigned>::max(),
- ControllingIndex = 0,
- AssocExprStartIndex = 1
+ ResultDependentIndex = 0x7FFF
};
+ unsigned getIndexOfControllingExpression() const {
+ // If controlled by an expression, the first offset into the Stmt *
+ // trailing array is the controlling expression, the associated expressions
+ // follow this.
+ assert(isExprPredicate() && "Asking for the controlling expression of a "
+ "selection expr predicated by a type");
+ return 0;
+ }
+
+ unsigned getIndexOfControllingType() const {
+ // If controlled by a type, the first offset into the TypeSourceInfo *
+ // trailing array is the controlling type, the associated types follow this.
+ assert(isTypePredicate() && "Asking for the controlling type of a "
+ "selection expr predicated by an expression");
+ return 0;
+ }
+
+ unsigned getIndexOfStartOfAssociatedExprs() const {
+ // If the predicate is a type, then the associated expressions are the only
+ // Stmt * in the trailing array, otherwise we need to offset past the
+ // predicate expression.
+ return (int)isExprPredicate();
+ }
+
+ unsigned getIndexOfStartOfAssociatedTypes() const {
+ // If the predicate is a type, then the associated types follow it in the
+ // trailing array. Otherwise, the associated types are the only
+ // TypeSourceInfo * in the trailing array.
+ return (int)isTypePredicate();
+ }
+
+
/// The location of the "default" and of the right parenthesis.
SourceLocation DefaultLoc, RParenLoc;
// GenericSelectionExpr is followed by several trailing objects.
// They are (in order):
//
- // * A single Stmt * for the controlling expression.
+ // * A single Stmt * for the controlling expression or a TypeSourceInfo * for
+ // the controlling type, depending on the result of isTypePredicate() or
+ // isExprPredicate().
// * An array of getNumAssocs() Stmt * for the association expressions.
// * An array of getNumAssocs() TypeSourceInfo *, one for each of the
// association expressions.
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
// Add one to account for the controlling expression; the remainder
// are the associated expressions.
- return 1 + getNumAssocs();
+ return getNumAssocs() + (int)isExprPredicate();
}
unsigned numTrailingObjects(OverloadToken<TypeSourceInfo *>) const {
- return getNumAssocs();
+ // Add one to account for the controlling type predicate, the remainder
+ // are the associated types.
+ return getNumAssocs() + (int)isTypePredicate();
}
template <bool Const> class AssociationIteratorTy;
@@ -5730,7 +5844,8 @@ class GenericSelectionExpr final
bool operator==(AssociationIteratorTy Other) const { return E == Other.E; }
}; // class AssociationIterator
- /// Build a non-result-dependent generic selection expression.
+ /// Build a non-result-dependent generic selection expression accepting an
+ /// expression predicate.
GenericSelectionExpr(const ASTContext &Context, SourceLocation GenericLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> AssocTypes,
@@ -5739,7 +5854,8 @@ class GenericSelectionExpr final
bool ContainsUnexpandedParameterPack,
unsigned ResultIndex);
- /// Build a result-dependent generic selection expression.
+ /// Build a result-dependent generic selection expression accepting an
+ /// expression predicate.
GenericSelectionExpr(const ASTContext &Context, SourceLocation GenericLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> AssocTypes,
@@ -5747,11 +5863,31 @@ class GenericSelectionExpr final
SourceLocation RParenLoc,
bool ContainsUnexpandedParameterPack);
+ /// Build a non-result-dependent generic selection expression accepting a
+ /// type predicate.
+ GenericSelectionExpr(const ASTContext &Context, SourceLocation GenericLoc,
+ TypeSourceInfo *ControllingType,
+ ArrayRef<TypeSourceInfo *> AssocTypes,
+ ArrayRef<Expr *> AssocExprs, SourceLocation DefaultLoc,
+ SourceLocation RParenLoc,
+ bool ContainsUnexpandedParameterPack,
+ unsigned ResultIndex);
+
+ /// Build a result-dependent generic selection expression accepting a type
+ /// predicate.
+ GenericSelectionExpr(const ASTContext &Context, SourceLocation GenericLoc,
+ TypeSourceInfo *ControllingType,
+ ArrayRef<TypeSourceInfo *> AssocTypes,
+ ArrayRef<Expr *> AssocExprs, SourceLocation DefaultLoc,
+ SourceLocation RParenLoc,
+ bool ContainsUnexpandedParameterPack);
+
/// Build an empty generic selection expression for deserialization.
explicit GenericSelectionExpr(EmptyShell Empty, unsigned NumAssocs);
public:
- /// Create a non-result-dependent generic selection expression.
+ /// Create a non-result-dependent generic selection expression accepting an
+ /// expression predicate.
static GenericSelectionExpr *
Create(const ASTContext &Context, SourceLocation GenericLoc,
Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> AssocTypes,
@@ -5759,13 +5895,31 @@ public:
SourceLocation RParenLoc, bool ContainsUnexpandedParameterPack,
unsigned ResultIndex);
- /// Create a result-dependent generic selection expression.
+ /// Create a result-dependent generic selection expression accepting an
+ /// expression predicate.
static GenericSelectionExpr *
Create(const ASTContext &Context, SourceLocation GenericLoc,
Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> AssocTypes,
ArrayRef<Expr *> AssocExprs, SourceLocation DefaultLoc,
SourceLocation RParenLoc, bool ContainsUnexpandedParameterPack);
+ /// Create a non-result-dependent generic selection expression accepting a
+ /// type predicate.
+ static GenericSelectionExpr *
+ Create(const ASTContext &Context, SourceLocation GenericLoc,
+ TypeSourceInfo *ControllingType, ArrayRef<TypeSourceInfo *> AssocTypes,
+ ArrayRef<Expr *> AssocExprs, SourceLocation DefaultLoc,
+ SourceLocation RParenLoc, bool ContainsUnexpandedParameterPack,
+ unsigned ResultIndex);
+
+ /// Create a result-dependent generic selection expression accepting a type
+ /// predicate
+ static GenericSelectionExpr *
+ Create(const ASTContext &Context, SourceLocation GenericLoc,
+ TypeSourceInfo *ControllingType, ArrayRef<TypeSourceInfo *> AssocTypes,
+ ArrayRef<Expr *> AssocExprs, SourceLocation DefaultLoc,
+ SourceLocation RParenLoc, bool ContainsUnexpandedParameterPack);
+
/// Create an empty generic selection expression for deserialization.
static GenericSelectionExpr *CreateEmpty(const ASTContext &Context,
unsigned NumAssocs);
@@ -5793,32 +5947,56 @@ public:
/// Whether this generic selection is result-dependent.
bool isResultDependent() const { return ResultIndex == ResultDependentIndex; }
+ /// Whether this generic selection uses an expression as its controlling
+ /// argument.
+ bool isExprPredicate() const { return IsExprPredicate; }
+ /// Whether this generic selection uses a type as its controlling argument.
+ bool isTypePredicate() const { return !IsExprPredicate; }
+
/// Return the controlling expression of this generic selection expression.
+ /// Only valid to call if the selection expression used an expression as its
+ /// controlling argument.
Expr *getControllingExpr() {
- return cast<Expr>(getTrailingObjects<Stmt *>()[ControllingIndex]);
+ return cast<Expr>(
+ getTrailingObjects<Stmt *>()[getIndexOfControllingExpression()]);
}
const Expr *getControllingExpr() const {
- return cast<Expr>(getTrailingObjects<Stmt *>()[ControllingIndex]);
+ return cast<Expr>(
+ getTrailingObjects<Stmt *>()[getIndexOfControllingExpression()]);
+ }
+
+ /// Return the controlling type of this generic selection expression. Only
+ /// valid to call if the selection expression used a type as its controlling
+ /// argument.
+ TypeSourceInfo *getControllingType() {
+ return getTrailingObjects<TypeSourceInfo *>()[getIndexOfControllingType()];
+ }
+ const TypeSourceInfo* getControllingType() const {
+ return getTrailingObjects<TypeSourceInfo *>()[getIndexOfControllingType()];
}
/// Return the result expression of this controlling expression. Defined if
/// and only if the generic selection expression is not result-dependent.
Expr *getResultExpr() {
return cast<Expr>(
- getTrailingObjects<Stmt *>()[AssocExprStartIndex + getResultIndex()]);
+ getTrailingObjects<Stmt *>()[getIndexOfStartOfAssociatedExprs() +
+ getResultIndex()]);
}
const Expr *getResultExpr() const {
return cast<Expr>(
- getTrailingObjects<Stmt *>()[AssocExprStartIndex + getResultIndex()]);
+ getTrailingObjects<Stmt *>()[getIndexOfStartOfAssociatedExprs() +
+ getResultIndex()]);
}
ArrayRef<Expr *> getAssocExprs() const {
return {reinterpret_cast<Expr *const *>(getTrailingObjects<Stmt *>() +
- AssocExprStartIndex),
+ getIndexOfStartOfAssociatedExprs()),
NumAssocs};
}
ArrayRef<TypeSourceInfo *> getAssocTypeSourceInfos() const {
- return {getTrailingObjects<TypeSourceInfo *>(), NumAssocs};
+ return {getTrailingObjects<TypeSourceInfo *>() +
+ getIndexOfStartOfAssociatedTypes(),
+ NumAssocs};
}
/// Return the Ith association expression with its TypeSourceInfo,
@@ -5827,23 +6005,30 @@ public:
assert(I < getNumAssocs() &&
"Out-of-range index in GenericSelectionExpr::getAssociation!");
return Association(
- cast<Expr>(getTrailingObjects<Stmt *>()[AssocExprStartIndex + I]),
- getTrailingObjects<TypeSourceInfo *>()[I],
+ cast<Expr>(
+ getTrailingObjects<Stmt *>()[getIndexOfStartOfAssociatedExprs() +
+ I]),
+ getTrailingObjects<
+ TypeSourceInfo *>()[getIndexOfStartOfAssociatedTypes() + I],
!isResultDependent() && (getResultIndex() == I));
}
ConstAssociation getAssociation(unsigned I) const {
assert(I < getNumAssocs() &&
"Out-of-range index in GenericSelectionExpr::getAssociation!");
return ConstAssociation(
- cast<Expr>(getTrailingObjects<Stmt *>()[AssocExprStartIndex + I]),
- getTrailingObjects<TypeSourceInfo *>()[I],
+ cast<Expr>(
+ getTrailingObjects<Stmt *>()[getIndexOfStartOfAssociatedExprs() +
+ I]),
+ getTrailingObjects<
+ TypeSourceInfo *>()[getIndexOfStartOfAssociatedTypes() + I],
!isResultDependent() && (getResultIndex() == I));
}
association_range associations() {
AssociationIterator Begin(getTrailingObjects<Stmt *>() +
- AssocExprStartIndex,
- getTrailingObjects<TypeSourceInfo *>(),
+ getIndexOfStartOfAssociatedExprs(),
+ getTrailingObjects<TypeSourceInfo *>() +
+ getIndexOfStartOfAssociatedTypes(),
/*Offset=*/0, ResultIndex);
AssociationIterator End(Begin.E + NumAssocs, Begin.TSI + NumAssocs,
/*Offset=*/NumAssocs, ResultIndex);
@@ -5852,8 +6037,9 @@ public:
const_association_range associations() const {
ConstAssociationIterator Begin(getTrailingObjects<Stmt *>() +
- AssocExprStartIndex,
- getTrailingObjects<TypeSourceInfo *>(),
+ getIndexOfStartOfAssociatedExprs(),
+ getTrailingObjects<TypeSourceInfo *>() +
+ getIndexOfStartOfAssociatedTypes(),
/*Offset=*/0, ResultIndex);
ConstAssociationIterator End(Begin.E + NumAssocs, Begin.TSI + NumAssocs,
/*Offset=*/NumAssocs, ResultIndex);
@@ -6166,11 +6352,11 @@ public:
return getSubExprsBuffer() + getNumSubExprs();
}
- llvm::iterator_range<semantics_iterator> semantics() {
- return llvm::make_range(semantics_begin(), semantics_end());
+ ArrayRef<Expr*> semantics() {
+ return ArrayRef(semantics_begin(), semantics_end());
}
- llvm::iterator_range<const_semantics_iterator> semantics() const {
- return llvm::make_range(semantics_begin(), semantics_end());
+ ArrayRef<const Expr*> semantics() const {
+ return ArrayRef(semantics_begin(), semantics_end());
}
Expr *getSemanticExpr(unsigned index) {
@@ -6272,7 +6458,7 @@ public:
return cast<Expr>(SubExprs[ORDER_FAIL]);
}
Expr *getVal2() const {
- if (Op == AO__atomic_exchange)
+ if (Op == AO__atomic_exchange || Op == AO__scoped_atomic_exchange)
return cast<Expr>(SubExprs[ORDER_FAIL]);
assert(NumSubExprs > VAL2);
return cast<Expr>(SubExprs[VAL2]);
@@ -6284,6 +6470,16 @@ public:
QualType getValueType() const;
AtomicOp getOp() const { return Op; }
+ StringRef getOpAsString() const {
+ switch (Op) {
+#define BUILTIN(ID, TYPE, ATTRS)
+#define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \
+ case AO##ID: \
+ return #ID;
+#include "clang/Basic/Builtins.def"
+ }
+ llvm_unreachable("not an atomic operator?");
+ }
unsigned getNumSubExprs() const { return NumSubExprs; }
Expr **getSubExprs() { return reinterpret_cast<Expr **>(SubExprs); }
@@ -6298,10 +6494,14 @@ public:
bool isCmpXChg() const {
return getOp() == AO__c11_atomic_compare_exchange_strong ||
getOp() == AO__c11_atomic_compare_exchange_weak ||
+ getOp() == AO__hip_atomic_compare_exchange_strong ||
getOp() == AO__opencl_atomic_compare_exchange_strong ||
getOp() == AO__opencl_atomic_compare_exchange_weak ||
+ getOp() == AO__hip_atomic_compare_exchange_weak ||
getOp() == AO__atomic_compare_exchange ||
- getOp() == AO__atomic_compare_exchange_n;
+ getOp() == AO__atomic_compare_exchange_n ||
+ getOp() == AO__scoped_atomic_compare_exchange ||
+ getOp() == AO__scoped_atomic_compare_exchange_n;
}
bool isOpenCL() const {
@@ -6331,11 +6531,13 @@ public:
/// \return empty atomic scope model if the atomic op code does not have
/// scope operand.
static std::unique_ptr<AtomicScopeModel> getScopeModel(AtomicOp Op) {
- auto Kind =
- (Op >= AO__opencl_atomic_load && Op <= AO__opencl_atomic_fetch_max)
- ? AtomicScopeModelKind::OpenCL
- : AtomicScopeModelKind::None;
- return AtomicScopeModel::create(Kind);
+ if (Op >= AO__opencl_atomic_load && Op <= AO__opencl_atomic_fetch_max)
+ return AtomicScopeModel::create(AtomicScopeModelKind::OpenCL);
+ else if (Op >= AO__hip_atomic_load && Op <= AO__hip_atomic_fetch_max)
+ return AtomicScopeModel::create(AtomicScopeModelKind::HIP);
+ else if (Op >= AO__scoped_atomic_load && Op <= AO__scoped_atomic_fetch_max)
+ return AtomicScopeModel::create(AtomicScopeModelKind::Generic);
+ return AtomicScopeModel::create(AtomicScopeModelKind::None);
}
/// Get atomic scope model.
@@ -6412,7 +6614,7 @@ public:
ArrayRef<Expr *> subExpressions() {
auto *B = getTrailingObjects<Expr *>();
- return llvm::makeArrayRef(B, B + NumExprs);
+ return llvm::ArrayRef(B, B + NumExprs);
}
ArrayRef<const Expr *> subExpressions() const {
diff --git a/contrib/llvm-project/clang/include/clang/AST/ExprCXX.h b/contrib/llvm-project/clang/include/clang/AST/ExprCXX.h
index 161287adce4c..9a7c632c36c5 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ExprCXX.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ExprCXX.h
@@ -40,8 +40,6 @@
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TypeTraits.h"
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/None.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator_range.h"
@@ -52,6 +50,7 @@
#include <cstddef>
#include <cstdint>
#include <memory>
+#include <optional>
namespace clang {
@@ -730,6 +729,11 @@ public:
explicit CXXBoolLiteralExpr(EmptyShell Empty)
: Expr(CXXBoolLiteralExprClass, Empty) {}
+ static CXXBoolLiteralExpr *Create(const ASTContext &C, bool Val, QualType Ty,
+ SourceLocation Loc) {
+ return new (C) CXXBoolLiteralExpr(Val, Ty, Loc);
+ }
+
bool getValue() const { return CXXBoolLiteralExprBits.Value; }
void setValue(bool V) { CXXBoolLiteralExprBits.Value = V; }
@@ -756,6 +760,8 @@ public:
/// The null pointer literal (C++11 [lex.nullptr])
///
/// Introduced in C++11, the only literal of type \c nullptr_t is \c nullptr.
+/// This also implements the null pointer literal in C23 (C23 6.4.1) which is
+/// intended to have the same semantics as the feature in C++.
class CXXNullPtrLiteralExpr : public Expr {
public:
CXXNullPtrLiteralExpr(QualType Ty, SourceLocation Loc)
@@ -1140,9 +1146,8 @@ public:
/// };
/// \endcode
class CXXThisExpr : public Expr {
-public:
- CXXThisExpr(SourceLocation L, QualType Ty, bool IsImplicit)
- : Expr(CXXThisExprClass, Ty, VK_PRValue, OK_Ordinary) {
+ CXXThisExpr(SourceLocation L, QualType Ty, bool IsImplicit, ExprValueKind VK)
+ : Expr(CXXThisExprClass, Ty, VK, OK_Ordinary) {
CXXThisExprBits.IsImplicit = IsImplicit;
CXXThisExprBits.Loc = L;
setDependence(computeDependence(this));
@@ -1150,6 +1155,12 @@ public:
CXXThisExpr(EmptyShell Empty) : Expr(CXXThisExprClass, Empty) {}
+public:
+ static CXXThisExpr *Create(const ASTContext &Ctx, SourceLocation L,
+ QualType Ty, bool IsImplicit);
+
+ static CXXThisExpr *CreateEmpty(const ASTContext &Ctx);
+
SourceLocation getLocation() const { return CXXThisExprBits.Loc; }
void setLocation(SourceLocation L) { CXXThisExprBits.Loc = L; }
@@ -1238,8 +1249,12 @@ public:
/// This wraps up a function call argument that was created from the
/// corresponding parameter's default argument, when the call did not
/// explicitly supply arguments for all of the parameters.
-class CXXDefaultArgExpr final : public Expr {
+class CXXDefaultArgExpr final
+ : public Expr,
+ private llvm::TrailingObjects<CXXDefaultArgExpr, Expr *> {
friend class ASTStmtReader;
+ friend class ASTReader;
+ friend TrailingObjects;
/// The parameter whose default is being used.
ParmVarDecl *Param;
@@ -1248,7 +1263,7 @@ class CXXDefaultArgExpr final : public Expr {
DeclContext *UsedContext;
CXXDefaultArgExpr(StmtClass SC, SourceLocation Loc, ParmVarDecl *Param,
- DeclContext *UsedContext)
+ Expr *RewrittenExpr, DeclContext *UsedContext)
: Expr(SC,
Param->hasUnparsedDefaultArg()
? Param->getType().getNonReferenceType()
@@ -1257,28 +1272,54 @@ class CXXDefaultArgExpr final : public Expr {
Param->getDefaultArg()->getObjectKind()),
Param(Param), UsedContext(UsedContext) {
CXXDefaultArgExprBits.Loc = Loc;
+ CXXDefaultArgExprBits.HasRewrittenInit = RewrittenExpr != nullptr;
+ if (RewrittenExpr)
+ *getTrailingObjects<Expr *>() = RewrittenExpr;
setDependence(computeDependence(this));
}
+ CXXDefaultArgExpr(EmptyShell Empty, bool HasRewrittenInit)
+ : Expr(CXXDefaultArgExprClass, Empty) {
+ CXXDefaultArgExprBits.HasRewrittenInit = HasRewrittenInit;
+ }
+
public:
- CXXDefaultArgExpr(EmptyShell Empty) : Expr(CXXDefaultArgExprClass, Empty) {}
+ static CXXDefaultArgExpr *CreateEmpty(const ASTContext &C,
+ bool HasRewrittenInit);
// \p Param is the parameter whose default argument is used by this
// expression.
static CXXDefaultArgExpr *Create(const ASTContext &C, SourceLocation Loc,
- ParmVarDecl *Param,
- DeclContext *UsedContext) {
- return new (C)
- CXXDefaultArgExpr(CXXDefaultArgExprClass, Loc, Param, UsedContext);
- }
-
+ ParmVarDecl *Param, Expr *RewrittenExpr,
+ DeclContext *UsedContext);
// Retrieve the parameter that the argument was created from.
const ParmVarDecl *getParam() const { return Param; }
ParmVarDecl *getParam() { return Param; }
- // Retrieve the actual argument to the function call.
- const Expr *getExpr() const { return getParam()->getDefaultArg(); }
- Expr *getExpr() { return getParam()->getDefaultArg(); }
+ bool hasRewrittenInit() const {
+ return CXXDefaultArgExprBits.HasRewrittenInit;
+ }
+
+ // Retrieve the argument to the function call.
+ Expr *getExpr();
+ const Expr *getExpr() const {
+ return const_cast<CXXDefaultArgExpr *>(this)->getExpr();
+ }
+
+ Expr *getRewrittenExpr() {
+ return hasRewrittenInit() ? *getTrailingObjects<Expr *>() : nullptr;
+ }
+
+ const Expr *getRewrittenExpr() const {
+ return const_cast<CXXDefaultArgExpr *>(this)->getRewrittenExpr();
+ }
+
+ // Retrieve the rewritten init expression (for an init expression containing
+ // immediate calls) with the top level FullExpr and ConstantExpr stripped off.
+ Expr *getAdjustedRewrittenExpr();
+ const Expr *getAdjustedRewrittenExpr() const {
+ return const_cast<CXXDefaultArgExpr *>(this)->getAdjustedRewrittenExpr();
+ }
const DeclContext *getUsedContext() const { return UsedContext; }
DeclContext *getUsedContext() { return UsedContext; }
@@ -1315,10 +1356,13 @@ public:
/// is implicitly used in a mem-initializer-list in a constructor
/// (C++11 [class.base.init]p8) or in aggregate initialization
/// (C++1y [dcl.init.aggr]p7).
-class CXXDefaultInitExpr : public Expr {
- friend class ASTReader;
- friend class ASTStmtReader;
+class CXXDefaultInitExpr final
+ : public Expr,
+ private llvm::TrailingObjects<CXXDefaultInitExpr, Expr *> {
+ friend class ASTStmtReader;
+ friend class ASTReader;
+ friend TrailingObjects;
/// The field whose default is being used.
FieldDecl *Field;
@@ -1326,16 +1370,25 @@ class CXXDefaultInitExpr : public Expr {
DeclContext *UsedContext;
CXXDefaultInitExpr(const ASTContext &Ctx, SourceLocation Loc,
- FieldDecl *Field, QualType Ty, DeclContext *UsedContext);
+ FieldDecl *Field, QualType Ty, DeclContext *UsedContext,
+ Expr *RewrittenInitExpr);
- CXXDefaultInitExpr(EmptyShell Empty) : Expr(CXXDefaultInitExprClass, Empty) {}
+ CXXDefaultInitExpr(EmptyShell Empty, bool HasRewrittenInit)
+ : Expr(CXXDefaultInitExprClass, Empty) {
+ CXXDefaultInitExprBits.HasRewrittenInit = HasRewrittenInit;
+ }
public:
+ static CXXDefaultInitExpr *CreateEmpty(const ASTContext &C,
+ bool HasRewrittenInit);
/// \p Field is the non-static data member whose default initializer is used
/// by this expression.
static CXXDefaultInitExpr *Create(const ASTContext &Ctx, SourceLocation Loc,
- FieldDecl *Field, DeclContext *UsedContext) {
- return new (Ctx) CXXDefaultInitExpr(Ctx, Loc, Field, Field->getType(), UsedContext);
+ FieldDecl *Field, DeclContext *UsedContext,
+ Expr *RewrittenInitExpr);
+
+ bool hasRewrittenInit() const {
+ return CXXDefaultInitExprBits.HasRewrittenInit;
}
/// Get the field whose initializer will be used.
@@ -1343,13 +1396,23 @@ public:
const FieldDecl *getField() const { return Field; }
/// Get the initialization expression that will be used.
+ Expr *getExpr();
const Expr *getExpr() const {
- assert(Field->getInClassInitializer() && "initializer hasn't been parsed");
- return Field->getInClassInitializer();
+ return const_cast<CXXDefaultInitExpr *>(this)->getExpr();
}
- Expr *getExpr() {
- assert(Field->getInClassInitializer() && "initializer hasn't been parsed");
- return Field->getInClassInitializer();
+
+ /// Retrieve the initializing expression with evaluated immediate calls, if
+ /// any.
+ const Expr *getRewrittenExpr() const {
+ assert(hasRewrittenInit() && "expected a rewritten init expression");
+ return *getTrailingObjects<Expr *>();
+ }
+
+ /// Retrieve the initializing expression with evaluated immediate calls, if
+ /// any.
+ Expr *getRewrittenExpr() {
+ assert(hasRewrittenInit() && "expected a rewritten init expression");
+ return *getTrailingObjects<Expr *>();
}
const DeclContext *getUsedContext() const { return UsedContext; }
@@ -1456,19 +1519,17 @@ public:
}
};
+enum class CXXConstructionKind {
+ Complete,
+ NonVirtualBase,
+ VirtualBase,
+ Delegating
+};
+
/// Represents a call to a C++ constructor.
class CXXConstructExpr : public Expr {
friend class ASTStmtReader;
-public:
- enum ConstructionKind {
- CK_Complete,
- CK_NonVirtualBase,
- CK_VirtualBase,
- CK_Delegating
- };
-
-private:
/// A pointer to the constructor which will be ultimately called.
CXXConstructorDecl *Constructor;
@@ -1504,7 +1565,7 @@ protected:
CXXConstructorDecl *Ctor, bool Elidable,
ArrayRef<Expr *> Args, bool HadMultipleCandidates,
bool ListInitialization, bool StdInitListInitialization,
- bool ZeroInitialization, ConstructionKind ConstructKind,
+ bool ZeroInitialization, CXXConstructionKind ConstructKind,
SourceRange ParenOrBraceRange);
/// Build an empty C++ construction expression.
@@ -1523,7 +1584,7 @@ public:
CXXConstructorDecl *Ctor, bool Elidable, ArrayRef<Expr *> Args,
bool HadMultipleCandidates, bool ListInitialization,
bool StdInitListInitialization, bool ZeroInitialization,
- ConstructionKind ConstructKind, SourceRange ParenOrBraceRange);
+ CXXConstructionKind ConstructKind, SourceRange ParenOrBraceRange);
/// Create an empty C++ construction expression.
static CXXConstructExpr *CreateEmpty(const ASTContext &Ctx, unsigned NumArgs);
@@ -1577,11 +1638,12 @@ public:
/// Determine whether this constructor is actually constructing
/// a base class (rather than a complete object).
- ConstructionKind getConstructionKind() const {
- return static_cast<ConstructionKind>(CXXConstructExprBits.ConstructionKind);
+ CXXConstructionKind getConstructionKind() const {
+ return static_cast<CXXConstructionKind>(
+ CXXConstructExprBits.ConstructionKind);
}
- void setConstructionKind(ConstructionKind CK) {
- CXXConstructExprBits.ConstructionKind = CK;
+ void setConstructionKind(CXXConstructionKind CK) {
+ CXXConstructExprBits.ConstructionKind = llvm::to_underlying(CK);
}
using arg_iterator = ExprIterator;
@@ -1623,6 +1685,14 @@ public:
getArgs()[Arg] = ArgExpr;
}
+ bool isImmediateEscalating() const {
+ return CXXConstructExprBits.IsImmediateEscalating;
+ }
+
+ void setIsImmediateEscalating(bool Set) {
+ CXXConstructExprBits.IsImmediateEscalating = Set;
+ }
+
SourceLocation getBeginLoc() const LLVM_READONLY;
SourceLocation getEndLoc() const LLVM_READONLY;
SourceRange getParenOrBraceRange() const { return ParenOrBraceRange; }
@@ -1656,10 +1726,12 @@ private:
SourceLocation Loc;
/// Whether this is the construction of a virtual base.
+ LLVM_PREFERRED_TYPE(bool)
unsigned ConstructsVirtualBase : 1;
/// Whether the constructor is inherited from a virtual base class of the
/// class that we construct.
+ LLVM_PREFERRED_TYPE(bool)
unsigned InheritedFromVirtualBase : 1;
public:
@@ -1688,9 +1760,9 @@ public:
/// Determine whether this constructor is actually constructing
/// a base class (rather than a complete object).
bool constructsVBase() const { return ConstructsVirtualBase; }
- CXXConstructExpr::ConstructionKind getConstructionKind() const {
- return ConstructsVirtualBase ? CXXConstructExpr::CK_VirtualBase
- : CXXConstructExpr::CK_NonVirtualBase;
+ CXXConstructionKind getConstructionKind() const {
+ return ConstructsVirtualBase ? CXXConstructionKind::VirtualBase
+ : CXXConstructionKind::NonVirtualBase;
}
/// Determine whether the inherited constructor is inherited from a
@@ -2134,6 +2206,17 @@ public:
}
};
+enum class CXXNewInitializationStyle {
+ /// New-expression has no initializer as written.
+ None,
+
+ /// New-expression has a C++98 paren-delimited initializer.
+ Parens,
+
+ /// New-expression has a C++11 list-initializer.
+ Braces
+};
+
/// Represents a new-expression for memory allocation and constructor
/// calls, e.g: "new CXXNewExpr(foo)".
class CXXNewExpr final
@@ -2187,25 +2270,12 @@ class CXXNewExpr final
return isParenTypeId();
}
-public:
- enum InitializationStyle {
- /// New-expression has no initializer as written.
- NoInit,
-
- /// New-expression has a C++98 paren-delimited initializer.
- CallInit,
-
- /// New-expression has a C++11 list-initializer.
- ListInit
- };
-
-private:
/// Build a c++ new expression.
CXXNewExpr(bool IsGlobalNew, FunctionDecl *OperatorNew,
FunctionDecl *OperatorDelete, bool ShouldPassAlignment,
bool UsualArrayDeleteWantsSize, ArrayRef<Expr *> PlacementArgs,
- SourceRange TypeIdParens, Optional<Expr *> ArraySize,
- InitializationStyle InitializationStyle, Expr *Initializer,
+ SourceRange TypeIdParens, std::optional<Expr *> ArraySize,
+ CXXNewInitializationStyle InitializationStyle, Expr *Initializer,
QualType Ty, TypeSourceInfo *AllocatedTypeInfo, SourceRange Range,
SourceRange DirectInitRange);
@@ -2219,8 +2289,8 @@ public:
Create(const ASTContext &Ctx, bool IsGlobalNew, FunctionDecl *OperatorNew,
FunctionDecl *OperatorDelete, bool ShouldPassAlignment,
bool UsualArrayDeleteWantsSize, ArrayRef<Expr *> PlacementArgs,
- SourceRange TypeIdParens, Optional<Expr *> ArraySize,
- InitializationStyle InitializationStyle, Expr *Initializer,
+ SourceRange TypeIdParens, std::optional<Expr *> ArraySize,
+ CXXNewInitializationStyle InitializationStyle, Expr *Initializer,
QualType Ty, TypeSourceInfo *AllocatedTypeInfo, SourceRange Range,
SourceRange DirectInitRange);
@@ -2261,15 +2331,32 @@ public:
bool isArray() const { return CXXNewExprBits.IsArray; }
- Optional<Expr *> getArraySize() {
+ /// This might return std::nullopt even if isArray() returns true,
+ /// since there might not be an array size expression.
+ /// If the result is not std::nullopt, it will never wrap a nullptr.
+ std::optional<Expr *> getArraySize() {
if (!isArray())
- return None;
- return cast_or_null<Expr>(getTrailingObjects<Stmt *>()[arraySizeOffset()]);
+ return std::nullopt;
+
+ if (auto *Result =
+ cast_or_null<Expr>(getTrailingObjects<Stmt *>()[arraySizeOffset()]))
+ return Result;
+
+ return std::nullopt;
}
- Optional<const Expr *> getArraySize() const {
+
+ /// This might return std::nullopt even if isArray() returns true,
+ /// since there might not be an array size expression.
+ /// If the result is not std::nullopt, it will never wrap a nullptr.
+ std::optional<const Expr *> getArraySize() const {
if (!isArray())
- return None;
- return cast_or_null<Expr>(getTrailingObjects<Stmt *>()[arraySizeOffset()]);
+ return std::nullopt;
+
+ if (auto *Result =
+ cast_or_null<Expr>(getTrailingObjects<Stmt *>()[arraySizeOffset()]))
+ return Result;
+
+ return std::nullopt;
}
unsigned getNumPlacementArgs() const {
@@ -2298,16 +2385,12 @@ public:
bool isGlobalNew() const { return CXXNewExprBits.IsGlobalNew; }
/// Whether this new-expression has any initializer at all.
- bool hasInitializer() const {
- return CXXNewExprBits.StoredInitializationStyle > 0;
- }
+ bool hasInitializer() const { return CXXNewExprBits.HasInitializer; }
/// The kind of initializer this new-expression has.
- InitializationStyle getInitializationStyle() const {
- if (CXXNewExprBits.StoredInitializationStyle == 0)
- return NoInit;
- return static_cast<InitializationStyle>(
- CXXNewExprBits.StoredInitializationStyle - 1);
+ CXXNewInitializationStyle getInitializationStyle() const {
+ return static_cast<CXXNewInitializationStyle>(
+ CXXNewExprBits.StoredInitializationStyle);
}
/// The initializer of this new-expression.
@@ -2522,6 +2605,7 @@ class CXXPseudoDestructorExpr : public Expr {
/// Whether the operator was an arrow ('->'); otherwise, it was a
/// period ('.').
+ LLVM_PREFERRED_TYPE(bool)
bool IsArrow : 1;
/// The location of the '.' or '->' operator.
@@ -2721,8 +2805,7 @@ public:
/// Retrieve the argument types.
ArrayRef<TypeSourceInfo *> getArgs() const {
- return llvm::makeArrayRef(getTrailingObjects<TypeSourceInfo *>(),
- getNumArgs());
+ return llvm::ArrayRef(getTrailingObjects<TypeSourceInfo *>(), getNumArgs());
}
SourceLocation getBeginLoc() const LLVM_READONLY { return Loc; }
@@ -2752,6 +2835,7 @@ public:
/// \endcode
class ArrayTypeTraitExpr : public Expr {
/// The trait. An ArrayTypeTrait enum in MSVC compat unsigned.
+ LLVM_PREFERRED_TYPE(ArrayTypeTrait)
unsigned ATT : 2;
/// The value of the type trait. Unspecified if dependent.
@@ -2822,9 +2906,11 @@ public:
/// \endcode
class ExpressionTraitExpr : public Expr {
/// The trait. A ExpressionTrait enum in MSVC compatible unsigned.
+ LLVM_PREFERRED_TYPE(ExpressionTrait)
unsigned ET : 31;
/// The value of the type trait. Unspecified if dependent.
+ LLVM_PREFERRED_TYPE(bool)
unsigned Value : 1;
/// The location of the type trait keyword.
@@ -3104,7 +3190,8 @@ class UnresolvedLookupExpr final
const DeclarationNameInfo &NameInfo, bool RequiresADL,
bool Overloaded,
const TemplateArgumentListInfo *TemplateArgs,
- UnresolvedSetIterator Begin, UnresolvedSetIterator End);
+ UnresolvedSetIterator Begin, UnresolvedSetIterator End,
+ bool KnownDependent);
UnresolvedLookupExpr(EmptyShell Empty, unsigned NumResults,
bool HasTemplateKWAndArgsInfo);
@@ -3124,12 +3211,15 @@ public:
const DeclarationNameInfo &NameInfo, bool RequiresADL, bool Overloaded,
UnresolvedSetIterator Begin, UnresolvedSetIterator End);
+ // After canonicalization, there may be dependent template arguments in
+ // CanonicalConverted But none of Args is dependent. When any of
+ // CanonicalConverted dependent, KnownDependent is true.
static UnresolvedLookupExpr *
Create(const ASTContext &Context, CXXRecordDecl *NamingClass,
NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo, bool RequiresADL,
const TemplateArgumentListInfo *Args, UnresolvedSetIterator Begin,
- UnresolvedSetIterator End);
+ UnresolvedSetIterator End, bool KnownDependent);
static UnresolvedLookupExpr *CreateEmpty(const ASTContext &Context,
unsigned NumResults,
@@ -3368,8 +3458,7 @@ public:
ArrayRef<CleanupObject> objects);
ArrayRef<CleanupObject> getObjects() const {
- return llvm::makeArrayRef(getTrailingObjects<CleanupObject>(),
- getNumObjects());
+ return llvm::ArrayRef(getTrailingObjects<CleanupObject>(), getNumObjects());
}
unsigned getNumObjects() const { return ExprWithCleanupsBits.NumObjects; }
@@ -3431,8 +3520,9 @@ class CXXUnresolvedConstructExpr final
friend class ASTStmtReader;
friend TrailingObjects;
- /// The type being constructed.
- TypeSourceInfo *TSI;
+ /// The type being constructed, and whether the construct expression models
+ /// list initialization or not.
+ llvm::PointerIntPair<TypeSourceInfo *, 1> TypeAndInitForm;
/// The location of the left parentheses ('(').
SourceLocation LParenLoc;
@@ -3442,30 +3532,31 @@ class CXXUnresolvedConstructExpr final
CXXUnresolvedConstructExpr(QualType T, TypeSourceInfo *TSI,
SourceLocation LParenLoc, ArrayRef<Expr *> Args,
- SourceLocation RParenLoc);
+ SourceLocation RParenLoc, bool IsListInit);
CXXUnresolvedConstructExpr(EmptyShell Empty, unsigned NumArgs)
- : Expr(CXXUnresolvedConstructExprClass, Empty), TSI(nullptr) {
+ : Expr(CXXUnresolvedConstructExprClass, Empty) {
CXXUnresolvedConstructExprBits.NumArgs = NumArgs;
}
public:
- static CXXUnresolvedConstructExpr *Create(const ASTContext &Context,
- QualType T, TypeSourceInfo *TSI,
- SourceLocation LParenLoc,
- ArrayRef<Expr *> Args,
- SourceLocation RParenLoc);
+ static CXXUnresolvedConstructExpr *
+ Create(const ASTContext &Context, QualType T, TypeSourceInfo *TSI,
+ SourceLocation LParenLoc, ArrayRef<Expr *> Args,
+ SourceLocation RParenLoc, bool IsListInit);
static CXXUnresolvedConstructExpr *CreateEmpty(const ASTContext &Context,
unsigned NumArgs);
/// Retrieve the type that is being constructed, as specified
/// in the source code.
- QualType getTypeAsWritten() const { return TSI->getType(); }
+ QualType getTypeAsWritten() const { return getTypeSourceInfo()->getType(); }
/// Retrieve the type source information for the type being
/// constructed.
- TypeSourceInfo *getTypeSourceInfo() const { return TSI; }
+ TypeSourceInfo *getTypeSourceInfo() const {
+ return TypeAndInitForm.getPointer();
+ }
/// Retrieve the location of the left parentheses ('(') that
/// precedes the argument list.
@@ -3480,7 +3571,7 @@ public:
/// Determine whether this expression models list-initialization.
/// If so, there will be exactly one subexpression, which will be
/// an InitListExpr.
- bool isListInitialization() const { return LParenLoc.isInvalid(); }
+ bool isListInitialization() const { return TypeAndInitForm.getInt(); }
/// Retrieve the number of arguments.
unsigned getNumArgs() const { return CXXUnresolvedConstructExprBits.NumArgs; }
@@ -4065,7 +4156,7 @@ class PackExpansionExpr : public Expr {
public:
PackExpansionExpr(QualType T, Expr *Pattern, SourceLocation EllipsisLoc,
- Optional<unsigned> NumExpansions)
+ std::optional<unsigned> NumExpansions)
: Expr(PackExpansionExprClass, T, Pattern->getValueKind(),
Pattern->getObjectKind()),
EllipsisLoc(EllipsisLoc),
@@ -4088,11 +4179,11 @@ public:
/// Determine the number of expansions that will be produced when
/// this pack expansion is instantiated, if already known.
- Optional<unsigned> getNumExpansions() const {
+ std::optional<unsigned> getNumExpansions() const {
if (NumExpansions)
return NumExpansions - 1;
- return None;
+ return std::nullopt;
}
SourceLocation getBeginLoc() const LLVM_READONLY {
@@ -4159,7 +4250,7 @@ class SizeOfPackExpr final
/// the given parameter pack.
SizeOfPackExpr(QualType SizeType, SourceLocation OperatorLoc, NamedDecl *Pack,
SourceLocation PackLoc, SourceLocation RParenLoc,
- Optional<unsigned> Length,
+ std::optional<unsigned> Length,
ArrayRef<TemplateArgument> PartialArgs)
: Expr(SizeOfPackExprClass, SizeType, VK_PRValue, OK_Ordinary),
OperatorLoc(OperatorLoc), PackLoc(PackLoc), RParenLoc(RParenLoc),
@@ -4177,11 +4268,11 @@ class SizeOfPackExpr final
: Expr(SizeOfPackExprClass, Empty), Length(NumPartialArgs) {}
public:
- static SizeOfPackExpr *Create(ASTContext &Context, SourceLocation OperatorLoc,
- NamedDecl *Pack, SourceLocation PackLoc,
- SourceLocation RParenLoc,
- Optional<unsigned> Length = None,
- ArrayRef<TemplateArgument> PartialArgs = None);
+ static SizeOfPackExpr *
+ Create(ASTContext &Context, SourceLocation OperatorLoc, NamedDecl *Pack,
+ SourceLocation PackLoc, SourceLocation RParenLoc,
+ std::optional<unsigned> Length = std::nullopt,
+ ArrayRef<TemplateArgument> PartialArgs = std::nullopt);
static SizeOfPackExpr *CreateDeserialized(ASTContext &Context,
unsigned NumPartialArgs);
@@ -4220,7 +4311,7 @@ public:
ArrayRef<TemplateArgument> getPartialArguments() const {
assert(isPartiallySubstituted());
const auto *Args = getTrailingObjects<TemplateArgument>();
- return llvm::makeArrayRef(Args, Args + Length);
+ return llvm::ArrayRef(Args, Args + Length);
}
SourceLocation getBeginLoc() const LLVM_READONLY { return OperatorLoc; }
@@ -4246,24 +4337,30 @@ class SubstNonTypeTemplateParmExpr : public Expr {
friend class ASTReader;
friend class ASTStmtReader;
- /// The replaced parameter and a flag indicating if it was a reference
+ /// The replacement expression.
+ Stmt *Replacement;
+
+ /// The associated declaration and a flag indicating if it was a reference
/// parameter. For class NTTPs, we can't determine that based on the value
/// category alone.
- llvm::PointerIntPair<NonTypeTemplateParmDecl*, 1, bool> ParamAndRef;
+ llvm::PointerIntPair<Decl *, 1, bool> AssociatedDeclAndRef;
- /// The replacement expression.
- Stmt *Replacement;
+ unsigned Index : 15;
+ unsigned PackIndex : 16;
explicit SubstNonTypeTemplateParmExpr(EmptyShell Empty)
: Expr(SubstNonTypeTemplateParmExprClass, Empty) {}
public:
SubstNonTypeTemplateParmExpr(QualType Ty, ExprValueKind ValueKind,
- SourceLocation Loc,
- NonTypeTemplateParmDecl *Param, bool RefParam,
- Expr *Replacement)
+ SourceLocation Loc, Expr *Replacement,
+ Decl *AssociatedDecl, unsigned Index,
+ std::optional<unsigned> PackIndex, bool RefParam)
: Expr(SubstNonTypeTemplateParmExprClass, Ty, ValueKind, OK_Ordinary),
- ParamAndRef(Param, RefParam), Replacement(Replacement) {
+ Replacement(Replacement),
+ AssociatedDeclAndRef(AssociatedDecl, RefParam), Index(Index),
+ PackIndex(PackIndex ? *PackIndex + 1 : 0) {
+ assert(AssociatedDecl != nullptr);
SubstNonTypeTemplateParmExprBits.NameLoc = Loc;
setDependence(computeDependence(this));
}
@@ -4276,11 +4373,23 @@ public:
Expr *getReplacement() const { return cast<Expr>(Replacement); }
- NonTypeTemplateParmDecl *getParameter() const {
- return ParamAndRef.getPointer();
+ /// A template-like entity which owns the whole pattern being substituted.
+ /// This will own a set of template parameters.
+ Decl *getAssociatedDecl() const { return AssociatedDeclAndRef.getPointer(); }
+
+ /// Returns the index of the replaced parameter in the associated declaration.
+ /// This should match the result of `getParameter()->getIndex()`.
+ unsigned getIndex() const { return Index; }
+
+ std::optional<unsigned> getPackIndex() const {
+ if (PackIndex == 0)
+ return std::nullopt;
+ return PackIndex - 1;
}
- bool isReferenceParameter() const { return ParamAndRef.getInt(); }
+ NonTypeTemplateParmDecl *getParameter() const;
+
+ bool isReferenceParameter() const { return AssociatedDeclAndRef.getInt(); }
/// Determine the substituted type of the template parameter.
QualType getParameterType(const ASTContext &Ctx) const;
@@ -4314,14 +4423,16 @@ class SubstNonTypeTemplateParmPackExpr : public Expr {
friend class ASTStmtReader;
/// The non-type template parameter pack itself.
- NonTypeTemplateParmDecl *Param;
+ Decl *AssociatedDecl;
/// A pointer to the set of template arguments that this
/// parameter pack is instantiated with.
const TemplateArgument *Arguments;
/// The number of template arguments in \c Arguments.
- unsigned NumArguments;
+ unsigned NumArguments : 16;
+
+ unsigned Index : 16;
/// The location of the non-type template parameter pack reference.
SourceLocation NameLoc;
@@ -4330,14 +4441,21 @@ class SubstNonTypeTemplateParmPackExpr : public Expr {
: Expr(SubstNonTypeTemplateParmPackExprClass, Empty) {}
public:
- SubstNonTypeTemplateParmPackExpr(QualType T,
- ExprValueKind ValueKind,
- NonTypeTemplateParmDecl *Param,
+ SubstNonTypeTemplateParmPackExpr(QualType T, ExprValueKind ValueKind,
SourceLocation NameLoc,
- const TemplateArgument &ArgPack);
+ const TemplateArgument &ArgPack,
+ Decl *AssociatedDecl, unsigned Index);
+
+ /// A template-like entity which owns the whole pattern being substituted.
+ /// This will own a set of template parameters.
+ Decl *getAssociatedDecl() const { return AssociatedDecl; }
+
+ /// Returns the index of the replaced parameter in the associated declaration.
+ /// This should match the result of `getParameterPack()->getIndex()`.
+ unsigned getIndex() const { return Index; }
/// Retrieve the non-type template parameter pack being substituted.
- NonTypeTemplateParmDecl *getParameterPack() const { return Param; }
+ NonTypeTemplateParmDecl *getParameterPack() const;
/// Retrieve the location of the parameter pack name.
SourceLocation getParameterPackLocation() const { return NameLoc; }
@@ -4590,7 +4708,7 @@ public:
CXXFoldExpr(QualType T, UnresolvedLookupExpr *Callee,
SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Opcode,
SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc,
- Optional<unsigned> NumExpansions)
+ std::optional<unsigned> NumExpansions)
: Expr(CXXFoldExprClass, T, VK_PRValue, OK_Ordinary),
LParenLoc(LParenLoc), EllipsisLoc(EllipsisLoc), RParenLoc(RParenLoc),
NumExpansions(NumExpansions ? *NumExpansions + 1 : 0), Opcode(Opcode) {
@@ -4627,10 +4745,10 @@ public:
SourceLocation getEllipsisLoc() const { return EllipsisLoc; }
BinaryOperatorKind getOperator() const { return Opcode; }
- Optional<unsigned> getNumExpansions() const {
+ std::optional<unsigned> getNumExpansions() const {
if (NumExpansions)
return NumExpansions - 1;
- return None;
+ return std::nullopt;
}
SourceLocation getBeginLoc() const LLVM_READONLY {
@@ -4663,6 +4781,140 @@ public:
}
};
+/// Represents a list-initialization with parenthesis.
+///
+/// As per P0960R3, this is a C++20 feature that allows aggregate to
+/// be initialized with a parenthesized list of values:
+/// ```
+/// struct A {
+/// int a;
+/// double b;
+/// };
+///
+/// void foo() {
+/// A a1(0); // Well-formed in C++20
+/// A a2(1.5, 1.0); // Well-formed in C++20
+/// }
+/// ```
+/// It has some sort of similiarity to braced
+/// list-initialization, with some differences such as
+/// it allows narrowing conversion whilst braced
+/// list-initialization doesn't.
+/// ```
+/// struct A {
+/// char a;
+/// };
+/// void foo() {
+/// A a(1.5); // Well-formed in C++20
+/// A b{1.5}; // Ill-formed !
+/// }
+/// ```
+class CXXParenListInitExpr final
+ : public Expr,
+ private llvm::TrailingObjects<CXXParenListInitExpr, Expr *> {
+ friend class TrailingObjects;
+ friend class ASTStmtReader;
+ friend class ASTStmtWriter;
+
+ unsigned NumExprs;
+ unsigned NumUserSpecifiedExprs;
+ SourceLocation InitLoc, LParenLoc, RParenLoc;
+ llvm::PointerUnion<Expr *, FieldDecl *> ArrayFillerOrUnionFieldInit;
+
+ CXXParenListInitExpr(ArrayRef<Expr *> Args, QualType T,
+ unsigned NumUserSpecifiedExprs, SourceLocation InitLoc,
+ SourceLocation LParenLoc, SourceLocation RParenLoc)
+ : Expr(CXXParenListInitExprClass, T, getValueKindForType(T), OK_Ordinary),
+ NumExprs(Args.size()), NumUserSpecifiedExprs(NumUserSpecifiedExprs),
+ InitLoc(InitLoc), LParenLoc(LParenLoc), RParenLoc(RParenLoc) {
+ std::copy(Args.begin(), Args.end(), getTrailingObjects<Expr *>());
+ assert(NumExprs >= NumUserSpecifiedExprs &&
+ "number of user specified inits is greater than the number of "
+ "passed inits");
+ setDependence(computeDependence(this));
+ }
+
+ size_t numTrailingObjects(OverloadToken<Expr *>) const { return NumExprs; }
+
+public:
+ static CXXParenListInitExpr *
+ Create(ASTContext &C, ArrayRef<Expr *> Args, QualType T,
+ unsigned NumUserSpecifiedExprs, SourceLocation InitLoc,
+ SourceLocation LParenLoc, SourceLocation RParenLoc);
+
+ static CXXParenListInitExpr *CreateEmpty(ASTContext &C, unsigned numExprs,
+ EmptyShell Empty);
+
+ explicit CXXParenListInitExpr(EmptyShell Empty, unsigned NumExprs)
+ : Expr(CXXParenListInitExprClass, Empty), NumExprs(NumExprs),
+ NumUserSpecifiedExprs(0) {}
+
+ void updateDependence() { setDependence(computeDependence(this)); }
+
+ ArrayRef<Expr *> getInitExprs() {
+ return ArrayRef(getTrailingObjects<Expr *>(), NumExprs);
+ }
+
+ const ArrayRef<Expr *> getInitExprs() const {
+ return ArrayRef(getTrailingObjects<Expr *>(), NumExprs);
+ }
+
+ ArrayRef<Expr *> getUserSpecifiedInitExprs() {
+ return ArrayRef(getTrailingObjects<Expr *>(), NumUserSpecifiedExprs);
+ }
+
+ const ArrayRef<Expr *> getUserSpecifiedInitExprs() const {
+ return ArrayRef(getTrailingObjects<Expr *>(), NumUserSpecifiedExprs);
+ }
+
+ SourceLocation getBeginLoc() const LLVM_READONLY { return LParenLoc; }
+
+ SourceLocation getEndLoc() const LLVM_READONLY { return RParenLoc; }
+
+ SourceLocation getInitLoc() const LLVM_READONLY { return InitLoc; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(getBeginLoc(), getEndLoc());
+ }
+
+ void setArrayFiller(Expr *E) { ArrayFillerOrUnionFieldInit = E; }
+
+ Expr *getArrayFiller() {
+ return ArrayFillerOrUnionFieldInit.dyn_cast<Expr *>();
+ }
+
+ const Expr *getArrayFiller() const {
+ return ArrayFillerOrUnionFieldInit.dyn_cast<Expr *>();
+ }
+
+ void setInitializedFieldInUnion(FieldDecl *FD) {
+ ArrayFillerOrUnionFieldInit = FD;
+ }
+
+ FieldDecl *getInitializedFieldInUnion() {
+ return ArrayFillerOrUnionFieldInit.dyn_cast<FieldDecl *>();
+ }
+
+ const FieldDecl *getInitializedFieldInUnion() const {
+ return ArrayFillerOrUnionFieldInit.dyn_cast<FieldDecl *>();
+ }
+
+ child_range children() {
+ Stmt **Begin = reinterpret_cast<Stmt **>(getTrailingObjects<Expr *>());
+ return child_range(Begin, Begin + NumExprs);
+ }
+
+ const_child_range children() const {
+ Stmt *const *Begin =
+ reinterpret_cast<Stmt *const *>(getTrailingObjects<Expr *>());
+ return const_child_range(Begin, Begin + NumExprs);
+ }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == CXXParenListInitExprClass;
+ }
+};
+
/// Represents an expression that might suspend coroutine execution;
/// either a co_await or co_yield expression.
///
@@ -4681,18 +4933,19 @@ class CoroutineSuspendExpr : public Expr {
SourceLocation KeywordLoc;
- enum SubExpr { Common, Ready, Suspend, Resume, Count };
+ enum SubExpr { Operand, Common, Ready, Suspend, Resume, Count };
Stmt *SubExprs[SubExpr::Count];
OpaqueValueExpr *OpaqueValue = nullptr;
public:
- CoroutineSuspendExpr(StmtClass SC, SourceLocation KeywordLoc, Expr *Common,
- Expr *Ready, Expr *Suspend, Expr *Resume,
+ CoroutineSuspendExpr(StmtClass SC, SourceLocation KeywordLoc, Expr *Operand,
+ Expr *Common, Expr *Ready, Expr *Suspend, Expr *Resume,
OpaqueValueExpr *OpaqueValue)
: Expr(SC, Resume->getType(), Resume->getValueKind(),
Resume->getObjectKind()),
KeywordLoc(KeywordLoc), OpaqueValue(OpaqueValue) {
+ SubExprs[SubExpr::Operand] = Operand;
SubExprs[SubExpr::Common] = Common;
SubExprs[SubExpr::Ready] = Ready;
SubExprs[SubExpr::Suspend] = Suspend;
@@ -4701,10 +4954,11 @@ public:
}
CoroutineSuspendExpr(StmtClass SC, SourceLocation KeywordLoc, QualType Ty,
- Expr *Common)
+ Expr *Operand, Expr *Common)
: Expr(SC, Ty, VK_PRValue, OK_Ordinary), KeywordLoc(KeywordLoc) {
assert(Common->isTypeDependent() && Ty->isDependentType() &&
"wrong constructor for non-dependent co_await/co_yield expression");
+ SubExprs[SubExpr::Operand] = Operand;
SubExprs[SubExpr::Common] = Common;
SubExprs[SubExpr::Ready] = nullptr;
SubExprs[SubExpr::Suspend] = nullptr;
@@ -4713,14 +4967,13 @@ public:
}
CoroutineSuspendExpr(StmtClass SC, EmptyShell Empty) : Expr(SC, Empty) {
+ SubExprs[SubExpr::Operand] = nullptr;
SubExprs[SubExpr::Common] = nullptr;
SubExprs[SubExpr::Ready] = nullptr;
SubExprs[SubExpr::Suspend] = nullptr;
SubExprs[SubExpr::Resume] = nullptr;
}
- SourceLocation getKeywordLoc() const { return KeywordLoc; }
-
Expr *getCommonExpr() const {
return static_cast<Expr*>(SubExprs[SubExpr::Common]);
}
@@ -4740,10 +4993,17 @@ public:
return static_cast<Expr*>(SubExprs[SubExpr::Resume]);
}
+ // The syntactic operand written in the code
+ Expr *getOperand() const {
+ return static_cast<Expr *>(SubExprs[SubExpr::Operand]);
+ }
+
+ SourceLocation getKeywordLoc() const { return KeywordLoc; }
+
SourceLocation getBeginLoc() const LLVM_READONLY { return KeywordLoc; }
SourceLocation getEndLoc() const LLVM_READONLY {
- return getCommonExpr()->getEndLoc();
+ return getOperand()->getEndLoc();
}
child_range children() {
@@ -4765,28 +5025,24 @@ class CoawaitExpr : public CoroutineSuspendExpr {
friend class ASTStmtReader;
public:
- CoawaitExpr(SourceLocation CoawaitLoc, Expr *Operand, Expr *Ready,
- Expr *Suspend, Expr *Resume, OpaqueValueExpr *OpaqueValue,
- bool IsImplicit = false)
- : CoroutineSuspendExpr(CoawaitExprClass, CoawaitLoc, Operand, Ready,
- Suspend, Resume, OpaqueValue) {
+ CoawaitExpr(SourceLocation CoawaitLoc, Expr *Operand, Expr *Common,
+ Expr *Ready, Expr *Suspend, Expr *Resume,
+ OpaqueValueExpr *OpaqueValue, bool IsImplicit = false)
+ : CoroutineSuspendExpr(CoawaitExprClass, CoawaitLoc, Operand, Common,
+ Ready, Suspend, Resume, OpaqueValue) {
CoawaitBits.IsImplicit = IsImplicit;
}
CoawaitExpr(SourceLocation CoawaitLoc, QualType Ty, Expr *Operand,
- bool IsImplicit = false)
- : CoroutineSuspendExpr(CoawaitExprClass, CoawaitLoc, Ty, Operand) {
+ Expr *Common, bool IsImplicit = false)
+ : CoroutineSuspendExpr(CoawaitExprClass, CoawaitLoc, Ty, Operand,
+ Common) {
CoawaitBits.IsImplicit = IsImplicit;
}
CoawaitExpr(EmptyShell Empty)
: CoroutineSuspendExpr(CoawaitExprClass, Empty) {}
- Expr *getOperand() const {
- // FIXME: Dig out the actual operand or store it.
- return getCommonExpr();
- }
-
bool isImplicit() const { return CoawaitBits.IsImplicit; }
void setIsImplicit(bool value = true) { CoawaitBits.IsImplicit = value; }
@@ -4850,20 +5106,18 @@ class CoyieldExpr : public CoroutineSuspendExpr {
friend class ASTStmtReader;
public:
- CoyieldExpr(SourceLocation CoyieldLoc, Expr *Operand, Expr *Ready,
- Expr *Suspend, Expr *Resume, OpaqueValueExpr *OpaqueValue)
- : CoroutineSuspendExpr(CoyieldExprClass, CoyieldLoc, Operand, Ready,
- Suspend, Resume, OpaqueValue) {}
- CoyieldExpr(SourceLocation CoyieldLoc, QualType Ty, Expr *Operand)
- : CoroutineSuspendExpr(CoyieldExprClass, CoyieldLoc, Ty, Operand) {}
+ CoyieldExpr(SourceLocation CoyieldLoc, Expr *Operand, Expr *Common,
+ Expr *Ready, Expr *Suspend, Expr *Resume,
+ OpaqueValueExpr *OpaqueValue)
+ : CoroutineSuspendExpr(CoyieldExprClass, CoyieldLoc, Operand, Common,
+ Ready, Suspend, Resume, OpaqueValue) {}
+ CoyieldExpr(SourceLocation CoyieldLoc, QualType Ty, Expr *Operand,
+ Expr *Common)
+ : CoroutineSuspendExpr(CoyieldExprClass, CoyieldLoc, Ty, Operand,
+ Common) {}
CoyieldExpr(EmptyShell Empty)
: CoroutineSuspendExpr(CoyieldExprClass, Empty) {}
- Expr *getOperand() const {
- // FIXME: Dig out the actual operand or store it.
- return getCommonExpr();
- }
-
static bool classof(const Stmt *T) {
return T->getStmtClass() == CoyieldExprClass;
}
diff --git a/contrib/llvm-project/clang/include/clang/AST/ExprConcepts.h b/contrib/llvm-project/clang/include/clang/AST/ExprConcepts.h
index 1544c498ef66..29913fd84c58 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ExprConcepts.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ExprConcepts.h
@@ -14,19 +14,21 @@
#ifndef LLVM_CLANG_AST_EXPRCONCEPTS_H
#define LLVM_CLANG_AST_EXPRCONCEPTS_H
-#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTConcept.h"
+#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
-#include "clang/AST/DeclarationName.h"
#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/TemplateBase.h"
#include "clang/AST/Type.h"
#include "clang/Basic/SourceLocation.h"
+#include "llvm/ADT/STLFunctionalExtras.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TrailingObjects.h"
-#include <utility>
#include <string>
+#include <utility>
namespace clang {
class ASTStmtReader;
@@ -37,74 +39,91 @@ class ASTStmtWriter;
///
/// According to C++2a [expr.prim.id]p3 an id-expression that denotes the
/// specialization of a concept results in a prvalue of type bool.
-class ConceptSpecializationExpr final : public Expr, public ConceptReference,
- private llvm::TrailingObjects<ConceptSpecializationExpr,
- TemplateArgument> {
+class ConceptSpecializationExpr final : public Expr {
+ friend class ASTReader;
friend class ASTStmtReader;
- friend TrailingObjects;
-public:
- using SubstitutionDiagnostic = std::pair<SourceLocation, std::string>;
-protected:
- /// \brief The number of template arguments in the tail-allocated list of
- /// converted template arguments.
- unsigned NumTemplateArgs;
+private:
+ ConceptReference *ConceptRef;
+
+ /// \brief The Implicit Concept Specialization Decl, which holds the template
+ /// arguments for this specialization.
+ ImplicitConceptSpecializationDecl *SpecDecl;
/// \brief Information about the satisfaction of the named concept with the
/// given arguments. If this expression is value dependent, this is to be
/// ignored.
ASTConstraintSatisfaction *Satisfaction;
- ConceptSpecializationExpr(const ASTContext &C, NestedNameSpecifierLoc NNS,
- SourceLocation TemplateKWLoc,
- DeclarationNameInfo ConceptNameInfo,
- NamedDecl *FoundDecl, ConceptDecl *NamedConcept,
- const ASTTemplateArgumentListInfo *ArgsAsWritten,
- ArrayRef<TemplateArgument> ConvertedArgs,
+ ConceptSpecializationExpr(const ASTContext &C, ConceptReference *ConceptRef,
+ ImplicitConceptSpecializationDecl *SpecDecl,
const ConstraintSatisfaction *Satisfaction);
- ConceptSpecializationExpr(const ASTContext &C, ConceptDecl *NamedConcept,
- ArrayRef<TemplateArgument> ConvertedArgs,
+ ConceptSpecializationExpr(const ASTContext &C, ConceptReference *ConceptRef,
+ ImplicitConceptSpecializationDecl *SpecDecl,
const ConstraintSatisfaction *Satisfaction,
bool Dependent,
bool ContainsUnexpandedParameterPack);
-
- ConceptSpecializationExpr(EmptyShell Empty, unsigned NumTemplateArgs);
+ ConceptSpecializationExpr(EmptyShell Empty);
public:
-
static ConceptSpecializationExpr *
- Create(const ASTContext &C, NestedNameSpecifierLoc NNS,
- SourceLocation TemplateKWLoc, DeclarationNameInfo ConceptNameInfo,
- NamedDecl *FoundDecl, ConceptDecl *NamedConcept,
- const ASTTemplateArgumentListInfo *ArgsAsWritten,
- ArrayRef<TemplateArgument> ConvertedArgs,
+ Create(const ASTContext &C, ConceptReference *ConceptRef,
+ ImplicitConceptSpecializationDecl *SpecDecl,
const ConstraintSatisfaction *Satisfaction);
static ConceptSpecializationExpr *
- Create(const ASTContext &C, ConceptDecl *NamedConcept,
- ArrayRef<TemplateArgument> ConvertedArgs,
- const ConstraintSatisfaction *Satisfaction,
- bool Dependent,
+ Create(const ASTContext &C, ConceptReference *ConceptRef,
+ ImplicitConceptSpecializationDecl *SpecDecl,
+ const ConstraintSatisfaction *Satisfaction, bool Dependent,
bool ContainsUnexpandedParameterPack);
- static ConceptSpecializationExpr *
- Create(ASTContext &C, EmptyShell Empty, unsigned NumTemplateArgs);
-
ArrayRef<TemplateArgument> getTemplateArguments() const {
- return ArrayRef<TemplateArgument>(getTrailingObjects<TemplateArgument>(),
- NumTemplateArgs);
+ return SpecDecl->getTemplateArguments();
+ }
+
+ ConceptReference *getConceptReference() const { return ConceptRef; }
+
+ ConceptDecl *getNamedConcept() const { return ConceptRef->getNamedConcept(); }
+
+ // FIXME: Several of the following functions can be removed. Instead the
+ // caller can directly work with the ConceptReference.
+ bool hasExplicitTemplateArgs() const {
+ return ConceptRef->hasExplicitTemplateArgs();
+ }
+
+ SourceLocation getConceptNameLoc() const {
+ return ConceptRef->getConceptNameLoc();
+ }
+ const ASTTemplateArgumentListInfo *getTemplateArgsAsWritten() const {
+ return ConceptRef->getTemplateArgsAsWritten();
+ }
+
+ const NestedNameSpecifierLoc &getNestedNameSpecifierLoc() const {
+ return ConceptRef->getNestedNameSpecifierLoc();
+ }
+
+ SourceLocation getTemplateKWLoc() const {
+ return ConceptRef->getTemplateKWLoc();
}
- /// \brief Set new template arguments for this concept specialization.
- void setTemplateArguments(ArrayRef<TemplateArgument> Converted);
+ NamedDecl *getFoundDecl() const { return ConceptRef->getFoundDecl(); }
+
+ const DeclarationNameInfo &getConceptNameInfo() const {
+ return ConceptRef->getConceptNameInfo();
+ }
+
+ const ImplicitConceptSpecializationDecl *getSpecializationDecl() const {
+ assert(SpecDecl && "Template Argument Decl not initialized");
+ return SpecDecl;
+ }
/// \brief Whether or not the concept with the given arguments was satisfied
/// when the expression was created.
/// The expression must not be dependent.
bool isSatisfied() const {
- assert(!isValueDependent()
- && "isSatisfied called on a dependent ConceptSpecializationExpr");
+ assert(!isValueDependent() &&
+ "isSatisfied called on a dependent ConceptSpecializationExpr");
return Satisfaction->IsSatisfied;
}
@@ -112,8 +131,8 @@ public:
/// satisfaction of the named concept.
/// The expression must not be dependent.
const ASTConstraintSatisfaction &getSatisfaction() const {
- assert(!isValueDependent()
- && "getSatisfaction called on dependent ConceptSpecializationExpr");
+ assert(!isValueDependent() &&
+ "getSatisfaction called on dependent ConceptSpecializationExpr");
return *Satisfaction;
}
@@ -122,15 +141,15 @@ public:
}
SourceLocation getBeginLoc() const LLVM_READONLY {
- return ConceptName.getBeginLoc();
+ return ConceptRef->getBeginLoc();
}
SourceLocation getEndLoc() const LLVM_READONLY {
- // If the ConceptSpecializationExpr is the ImmediatelyDeclaredConstraint
- // of a TypeConstraint written syntactically as a constrained-parameter,
- // there may not be a template argument list.
- return ArgsAsWritten->RAngleLoc.isValid() ? ArgsAsWritten->RAngleLoc
- : ConceptName.getEndLoc();
+ return ConceptRef->getEndLoc();
+ }
+
+ SourceLocation getExprLoc() const LLVM_READONLY {
+ return ConceptRef->getLocation();
}
// Iterators
@@ -154,8 +173,11 @@ public:
private:
const RequirementKind Kind;
// FIXME: use RequirementDependence to model dependence?
+ LLVM_PREFERRED_TYPE(bool)
bool Dependent : 1;
+ LLVM_PREFERRED_TYPE(bool)
bool ContainsUnexpandedParameterPack : 1;
+ LLVM_PREFERRED_TYPE(bool)
bool Satisfied : 1;
public:
struct SubstitutionDiagnostic {
@@ -275,12 +297,12 @@ public:
friend ASTStmtWriter;
/// \brief No return type requirement was specified.
- ReturnTypeRequirement() : TypeConstraintInfo(nullptr, 0) {}
+ ReturnTypeRequirement() : TypeConstraintInfo(nullptr, false) {}
/// \brief A return type requirement was specified but it was a
/// substitution failure.
ReturnTypeRequirement(SubstitutionDiagnostic *SubstDiag) :
- TypeConstraintInfo(SubstDiag, 0) {}
+ TypeConstraintInfo(SubstDiag, false) {}
/// \brief A 'type constraint' style return type requirement.
/// \param TPL an invented template parameter list containing a single
@@ -405,57 +427,61 @@ public:
/// \brief A requires-expression requirement which is satisfied when a general
/// constraint expression is satisfied ('nested' requirements).
class NestedRequirement : public Requirement {
- llvm::PointerUnion<Expr *, SubstitutionDiagnostic *> Value;
+ Expr *Constraint = nullptr;
const ASTConstraintSatisfaction *Satisfaction = nullptr;
+ bool HasInvalidConstraint = false;
+ StringRef InvalidConstraintEntity;
public:
friend ASTStmtReader;
friend ASTStmtWriter;
- NestedRequirement(SubstitutionDiagnostic *SubstDiag) :
- Requirement(RK_Nested, /*Dependent=*/false,
- /*ContainsUnexpandedParameterPack*/false,
- /*Satisfied=*/false), Value(SubstDiag) {}
-
- NestedRequirement(Expr *Constraint) :
- Requirement(RK_Nested, /*Dependent=*/true,
- Constraint->containsUnexpandedParameterPack()),
- Value(Constraint) {
+ NestedRequirement(Expr *Constraint)
+ : Requirement(RK_Nested, /*IsDependent=*/true,
+ Constraint->containsUnexpandedParameterPack()),
+ Constraint(Constraint) {
assert(Constraint->isInstantiationDependent() &&
"Nested requirement with non-dependent constraint must be "
"constructed with a ConstraintSatisfaction object");
}
NestedRequirement(ASTContext &C, Expr *Constraint,
- const ConstraintSatisfaction &Satisfaction) :
- Requirement(RK_Nested, Constraint->isInstantiationDependent(),
- Constraint->containsUnexpandedParameterPack(),
- Satisfaction.IsSatisfied),
- Value(Constraint),
- Satisfaction(ASTConstraintSatisfaction::Create(C, Satisfaction)) {}
-
- bool isSubstitutionFailure() const {
- return Value.is<SubstitutionDiagnostic *>();
- }
-
- SubstitutionDiagnostic *getSubstitutionDiagnostic() const {
- assert(isSubstitutionFailure() &&
- "getSubstitutionDiagnostic() may not be called when there was no "
- "substitution failure.");
- return Value.get<SubstitutionDiagnostic *>();
+ const ConstraintSatisfaction &Satisfaction)
+ : Requirement(RK_Nested, Constraint->isInstantiationDependent(),
+ Constraint->containsUnexpandedParameterPack(),
+ Satisfaction.IsSatisfied),
+ Constraint(Constraint),
+ Satisfaction(ASTConstraintSatisfaction::Create(C, Satisfaction)) {}
+
+ NestedRequirement(StringRef InvalidConstraintEntity,
+ const ASTConstraintSatisfaction *Satisfaction)
+ : Requirement(RK_Nested,
+ /*IsDependent=*/false,
+ /*ContainsUnexpandedParameterPack*/ false,
+ Satisfaction->IsSatisfied),
+ Satisfaction(Satisfaction), HasInvalidConstraint(true),
+ InvalidConstraintEntity(InvalidConstraintEntity) {}
+
+ NestedRequirement(ASTContext &C, StringRef InvalidConstraintEntity,
+ const ConstraintSatisfaction &Satisfaction)
+ : NestedRequirement(InvalidConstraintEntity,
+ ASTConstraintSatisfaction::Create(C, Satisfaction)) {}
+
+ bool hasInvalidConstraint() const { return HasInvalidConstraint; }
+
+ StringRef getInvalidConstraintEntity() {
+ assert(hasInvalidConstraint());
+ return InvalidConstraintEntity;
}
Expr *getConstraintExpr() const {
- assert(!isSubstitutionFailure() && "getConstraintExpr() may not be called "
- "on nested requirements with "
- "substitution failures.");
- return Value.get<Expr *>();
+ assert(!hasInvalidConstraint() &&
+ "getConstraintExpr() may not be called "
+ "on nested requirements with invalid constraint.");
+ return Constraint;
}
const ASTConstraintSatisfaction &getConstraintSatisfaction() const {
- assert(!isSubstitutionFailure() && "getConstraintSatisfaction() may not be "
- "called on nested requirements with "
- "substitution failures.");
return *Satisfaction;
}
@@ -464,6 +490,13 @@ public:
}
};
+using EntityPrinter = llvm::function_ref<void(llvm::raw_ostream &)>;
+
+/// \brief create a Requirement::SubstitutionDiagnostic with only a
+/// SubstitutedEntity and DiagLoc using Sema's allocator.
+Requirement::SubstitutionDiagnostic *
+createSubstDiagAt(Sema &S, SourceLocation Location, EntityPrinter Printer);
+
} // namespace concepts
/// C++2a [expr.prim.req]:
@@ -481,6 +514,8 @@ class RequiresExpr final : public Expr,
unsigned NumLocalParameters;
unsigned NumRequirements;
RequiresExprBodyDecl *Body;
+ SourceLocation LParenLoc;
+ SourceLocation RParenLoc;
SourceLocation RBraceLoc;
unsigned numTrailingObjects(OverloadToken<ParmVarDecl *>) const {
@@ -492,19 +527,22 @@ class RequiresExpr final : public Expr,
}
RequiresExpr(ASTContext &C, SourceLocation RequiresKWLoc,
- RequiresExprBodyDecl *Body,
+ RequiresExprBodyDecl *Body, SourceLocation LParenLoc,
ArrayRef<ParmVarDecl *> LocalParameters,
+ SourceLocation RParenLoc,
ArrayRef<concepts::Requirement *> Requirements,
SourceLocation RBraceLoc);
RequiresExpr(ASTContext &C, EmptyShell Empty, unsigned NumLocalParameters,
unsigned NumRequirements);
public:
- static RequiresExpr *
- Create(ASTContext &C, SourceLocation RequiresKWLoc,
- RequiresExprBodyDecl *Body, ArrayRef<ParmVarDecl *> LocalParameters,
- ArrayRef<concepts::Requirement *> Requirements,
- SourceLocation RBraceLoc);
+ static RequiresExpr *Create(ASTContext &C, SourceLocation RequiresKWLoc,
+ RequiresExprBodyDecl *Body,
+ SourceLocation LParenLoc,
+ ArrayRef<ParmVarDecl *> LocalParameters,
+ SourceLocation RParenLoc,
+ ArrayRef<concepts::Requirement *> Requirements,
+ SourceLocation RBraceLoc);
static RequiresExpr *
Create(ASTContext &C, EmptyShell Empty, unsigned NumLocalParameters,
unsigned NumRequirements);
@@ -527,10 +565,18 @@ public:
return RequiresExprBits.IsSatisfied;
}
+ void setSatisfied(bool IsSatisfied) {
+ assert(!isValueDependent() &&
+ "setSatisfied called on a dependent RequiresExpr");
+ RequiresExprBits.IsSatisfied = IsSatisfied;
+ }
+
SourceLocation getRequiresKWLoc() const {
return RequiresExprBits.RequiresKWLoc;
}
+ SourceLocation getLParenLoc() const { return LParenLoc; }
+ SourceLocation getRParenLoc() const { return RParenLoc; }
SourceLocation getRBraceLoc() const { return RBraceLoc; }
static bool classof(const Stmt *T) {
diff --git a/contrib/llvm-project/clang/include/clang/AST/ExprObjC.h b/contrib/llvm-project/clang/include/clang/AST/ExprObjC.h
index b0f057dbaa02..f833916c91aa 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ExprObjC.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ExprObjC.h
@@ -27,8 +27,6 @@
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/Specifiers.h"
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/None.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/StringRef.h"
@@ -41,6 +39,7 @@
#include <cassert>
#include <cstddef>
#include <cstdint>
+#include <optional>
namespace clang {
@@ -272,7 +271,7 @@ struct ObjCDictionaryElement {
/// The number of elements this pack expansion will expand to, if
/// this is a pack expansion and is known.
- Optional<unsigned> NumExpansions;
+ std::optional<unsigned> NumExpansions;
/// Determines whether this dictionary element is a pack expansion.
bool isPackExpansion() const { return EllipsisLoc.isValid(); }
@@ -318,6 +317,7 @@ class ObjCDictionaryLiteral final
/// key/value pairs, which provide the locations of the ellipses (if
/// any) and number of elements in the expansion (if known). If
/// there are no pack expansions, we optimize away this storage.
+ LLVM_PREFERRED_TYPE(bool)
unsigned HasPackExpansions : 1;
SourceRange Range;
@@ -362,7 +362,8 @@ public:
ObjCDictionaryElement getKeyValueElement(unsigned Index) const {
assert((Index < NumElements) && "Arg access out of range!");
const KeyValuePair &KV = getTrailingObjects<KeyValuePair>()[Index];
- ObjCDictionaryElement Result = { KV.Key, KV.Value, SourceLocation(), None };
+ ObjCDictionaryElement Result = {KV.Key, KV.Value, SourceLocation(),
+ std::nullopt};
if (HasPackExpansions) {
const ExpansionData &Expansion =
getTrailingObjects<ExpansionData>()[Index];
@@ -554,9 +555,11 @@ class ObjCIvarRefExpr : public Expr {
SourceLocation OpLoc;
// True if this is "X->F", false if this is "X.F".
+ LLVM_PREFERRED_TYPE(bool)
bool IsArrow : 1;
// True if ivar reference has no base (self assumed).
+ LLVM_PREFERRED_TYPE(bool)
bool IsFreeIvar : 1;
public:
@@ -940,6 +943,23 @@ private:
class ObjCMessageExpr final
: public Expr,
private llvm::TrailingObjects<ObjCMessageExpr, void *, SourceLocation> {
+public:
+ /// The kind of receiver this message is sending to.
+ enum ReceiverKind {
+ /// The receiver is a class.
+ Class = 0,
+
+ /// The receiver is an object instance.
+ Instance,
+
+ /// The receiver is a superclass.
+ SuperClass,
+
+ /// The receiver is the instance of the superclass object.
+ SuperInstance
+ };
+
+private:
/// Stores either the selector that this message is sending
/// to (when \c HasMethod is zero) or an \c ObjCMethodDecl pointer
/// referring to the method that we type-checked against.
@@ -955,6 +975,7 @@ class ObjCMessageExpr final
/// ReceiverKind values.
///
/// We pad this out to a byte to avoid excessive masking and shifting.
+ LLVM_PREFERRED_TYPE(ReceiverKind)
unsigned Kind : 8;
/// Whether we have an actual method prototype in \c
@@ -962,18 +983,22 @@ class ObjCMessageExpr final
///
/// When non-zero, we have a method declaration; otherwise, we just
/// have a selector.
+ LLVM_PREFERRED_TYPE(bool)
unsigned HasMethod : 1;
/// Whether this message send is a "delegate init call",
/// i.e. a call of an init method on self from within an init method.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsDelegateInitCall : 1;
/// Whether this message send was implicitly generated by
/// the implementation rather than explicitly written by the user.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsImplicit : 1;
/// Whether the locations of the selector identifiers are in a
/// "standard" position, a enum SelectorLocationsKind.
+ LLVM_PREFERRED_TYPE(SelectorLocationsKind)
unsigned SelLocsKind : 2;
/// When the message expression is a send to 'super', this is
@@ -1082,21 +1107,6 @@ public:
friend class ASTStmtWriter;
friend TrailingObjects;
- /// The kind of receiver this message is sending to.
- enum ReceiverKind {
- /// The receiver is a class.
- Class = 0,
-
- /// The receiver is an object instance.
- Instance,
-
- /// The receiver is a superclass.
- SuperClass,
-
- /// The receiver is the instance of the superclass object.
- SuperInstance
- };
-
/// Create a message send to super.
///
/// \param Context The ASTContext in which this expression will be created.
@@ -1415,11 +1425,10 @@ public:
SourceLocation getSelectorLoc(unsigned Index) const {
assert(Index < getNumSelectorLocs() && "Index out of range!");
if (hasStandardSelLocs())
- return getStandardSelectorLoc(Index, getSelector(),
- getSelLocsKind() == SelLoc_StandardWithSpace,
- llvm::makeArrayRef(const_cast<Expr**>(getArgs()),
- getNumArgs()),
- RBracLoc);
+ return getStandardSelectorLoc(
+ Index, getSelector(), getSelLocsKind() == SelLoc_StandardWithSpace,
+ llvm::ArrayRef(const_cast<Expr **>(getArgs()), getNumArgs()),
+ RBracLoc);
return getStoredSelLocs()[Index];
}
@@ -1632,6 +1641,7 @@ class ObjCBridgedCastExpr final
SourceLocation LParenLoc;
SourceLocation BridgeKeywordLoc;
+ LLVM_PREFERRED_TYPE(ObjCBridgeCastKind)
unsigned Kind : 2;
public:
@@ -1706,7 +1716,7 @@ public:
/// This may be '*', in which case this should fold to true.
bool hasVersion() const { return !VersionToCheck.empty(); }
- VersionTuple getVersion() { return VersionToCheck; }
+ VersionTuple getVersion() const { return VersionToCheck; }
child_range children() {
return child_range(child_iterator(), child_iterator());
diff --git a/contrib/llvm-project/clang/include/clang/AST/ExprOpenMP.h b/contrib/llvm-project/clang/include/clang/AST/ExprOpenMP.h
index be5dda992334..be5b1f3fdd11 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ExprOpenMP.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ExprOpenMP.h
@@ -202,12 +202,12 @@ public:
/// Fetches the dimensions for array shaping expression.
ArrayRef<Expr *> getDimensions() const {
- return llvm::makeArrayRef(getTrailingObjects<Expr *>(), NumDims);
+ return llvm::ArrayRef(getTrailingObjects<Expr *>(), NumDims);
}
/// Fetches source ranges for the brackets os the array shaping expression.
ArrayRef<SourceRange> getBracketsRanges() const {
- return llvm::makeArrayRef(getTrailingObjects<SourceRange>(), NumDims);
+ return llvm::ArrayRef(getTrailingObjects<SourceRange>(), NumDims);
}
/// Fetches base expression of array shaping expression.
diff --git a/contrib/llvm-project/clang/include/clang/AST/ExternalASTMerger.h b/contrib/llvm-project/clang/include/clang/AST/ExternalASTMerger.h
index 0230495a5ef3..ec4cfbe2175c 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ExternalASTMerger.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ExternalASTMerger.h
@@ -118,7 +118,7 @@ public:
/// Asks all connected ASTImporters if any of them imported the given
/// declaration. If any ASTImporter did import the given declaration,
/// then this function returns the declaration that D was imported from.
- /// Returns nullptr if no ASTImporter did import import D.
+ /// Returns nullptr if no ASTImporter did import D.
Decl *FindOriginalDecl(Decl *D);
/// Add a set of ASTContexts as possible origins.
diff --git a/contrib/llvm-project/clang/include/clang/AST/ExternalASTSource.h b/contrib/llvm-project/clang/include/clang/AST/ExternalASTSource.h
index b1851afcda37..8e573965b0a3 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ExternalASTSource.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ExternalASTSource.h
@@ -20,7 +20,6 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/IntrusiveRefCntPtr.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
@@ -30,6 +29,7 @@
#include <cstddef>
#include <cstdint>
#include <iterator>
+#include <optional>
#include <utility>
namespace clang {
@@ -160,7 +160,7 @@ public:
virtual Module *getModule(unsigned ID) { return nullptr; }
/// Return a descriptor for the corresponding module, if one exists.
- virtual llvm::Optional<ASTSourceDescriptor> getSourceDescriptor(unsigned ID);
+ virtual std::optional<ASTSourceDescriptor> getSourceDescriptor(unsigned ID);
enum ExtKind { EK_Always, EK_Never, EK_ReplyHazy };
@@ -371,7 +371,7 @@ public:
/// \param Source the external AST source.
///
/// \returns a pointer to the AST node.
- T* get(ExternalASTSource *Source) const {
+ T *get(ExternalASTSource *Source) const {
if (isOffset()) {
assert(Source &&
"Cannot deserialize a lazy pointer without an AST source");
@@ -379,6 +379,14 @@ public:
}
return reinterpret_cast<T*>(Ptr);
}
+
+ /// Retrieve the address of the AST node pointer. Deserializes the pointee if
+ /// necessary.
+ T **getAddressOfPointer(ExternalASTSource *Source) const {
+ // Ensure the integer is in pointer form.
+ (void)get(Source);
+ return reinterpret_cast<T**>(&Ptr);
+ }
};
/// A lazy value (of type T) that is within an AST node of type Owner,
diff --git a/contrib/llvm-project/clang/include/clang/AST/FormatString.h b/contrib/llvm-project/clang/include/clang/AST/FormatString.h
index 8c944451f796..5c4ad9baaef6 100644
--- a/contrib/llvm-project/clang/include/clang/AST/FormatString.h
+++ b/contrib/llvm-project/clang/include/clang/AST/FormatString.h
@@ -15,10 +15,11 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_ANALYSIS_ANALYSES_FORMATSTRING_H
-#define LLVM_CLANG_ANALYSIS_ANALYSES_FORMATSTRING_H
+#ifndef LLVM_CLANG_AST_FORMATSTRING_H
+#define LLVM_CLANG_AST_FORMATSTRING_H
#include "clang/AST/CanonicalType.h"
+#include <optional>
namespace clang {
@@ -127,8 +128,12 @@ public:
dArg,
DArg, // Apple extension
iArg,
+ // C23 conversion specifiers.
+ bArg,
+ BArg,
+
IntArgBeg = dArg,
- IntArgEnd = iArg,
+ IntArgEnd = BArg,
oArg,
OArg, // Apple extension
@@ -237,7 +242,7 @@ public:
bool isPrintfKind() const { return IsPrintf; }
- Optional<ConversionSpecifier> getStandardSpecifier() const;
+ std::optional<ConversionSpecifier> getStandardSpecifier() const;
protected:
bool IsPrintf;
@@ -257,8 +262,14 @@ public:
/// instance, "%d" and float.
NoMatch = 0,
/// The conversion specifier and the argument type are compatible. For
- /// instance, "%d" and _Bool.
+ /// instance, "%d" and int.
Match = 1,
+ /// The conversion specifier and the argument type are compatible because of
+ /// default argument promotions. For instance, "%hhd" and int.
+ MatchPromotion,
+ /// The conversion specifier and the argument type are compatible but still
+ /// seems likely to be an error. For instanace, "%hhd" and short.
+ NoMatchPromotionTypeConfusion,
/// The conversion specifier and the argument type are disallowed by the C
/// standard, but are in practice harmless. For instance, "%p" and int*.
NoMatchPedantic,
@@ -332,11 +343,11 @@ public:
unsigned amountLength,
bool usesPositionalArg)
: start(amountStart), length(amountLength), hs(howSpecified), amt(amount),
- UsesPositionalArg(usesPositionalArg), UsesDotPrefix(0) {}
+ UsesPositionalArg(usesPositionalArg), UsesDotPrefix(false) {}
OptionalAmount(bool valid = true)
: start(nullptr),length(0), hs(valid ? NotSpecified : Invalid), amt(0),
- UsesPositionalArg(0), UsesDotPrefix(0) {}
+ UsesPositionalArg(false), UsesDotPrefix(false) {}
explicit OptionalAmount(unsigned Amount)
: start(nullptr), length(0), hs(Constant), amt(Amount),
@@ -456,7 +467,7 @@ public:
bool hasStandardLengthModifier() const;
- Optional<LengthModifier> getCorrectedLengthModifier() const;
+ std::optional<LengthModifier> getCorrectedLengthModifier() const;
bool hasStandardConversionSpecifier(const LangOptions &LangOpt) const;
@@ -726,7 +737,8 @@ public:
virtual bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS,
const char *startSpecifier,
- unsigned specifierLen) {
+ unsigned specifierLen,
+ const TargetInfo &Target) {
return true;
}
diff --git a/contrib/llvm-project/clang/include/clang/AST/GlobalDecl.h b/contrib/llvm-project/clang/include/clang/AST/GlobalDecl.h
index 8cb56fb4ae90..88abba28c991 100644
--- a/contrib/llvm-project/clang/include/clang/AST/GlobalDecl.h
+++ b/contrib/llvm-project/clang/include/clang/AST/GlobalDecl.h
@@ -18,6 +18,7 @@
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclOpenMP.h"
+#include "clang/AST/DeclTemplate.h"
#include "clang/Basic/ABI.h"
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/DenseMapInfo.h"
@@ -129,8 +130,12 @@ public:
}
KernelReferenceKind getKernelReferenceKind() const {
- assert(isa<FunctionDecl>(getDecl()) &&
- cast<FunctionDecl>(getDecl())->hasAttr<CUDAGlobalAttr>() &&
+ assert(((isa<FunctionDecl>(getDecl()) &&
+ cast<FunctionDecl>(getDecl())->hasAttr<CUDAGlobalAttr>()) ||
+ (isa<FunctionTemplateDecl>(getDecl()) &&
+ cast<FunctionTemplateDecl>(getDecl())
+ ->getTemplatedDecl()
+ ->hasAttr<CUDAGlobalAttr>())) &&
"Decl is not a GPU kernel!");
return static_cast<KernelReferenceKind>(Value.getInt());
}
diff --git a/contrib/llvm-project/clang/include/clang/AST/IgnoreExpr.h b/contrib/llvm-project/clang/include/clang/AST/IgnoreExpr.h
index a7e9b07bef6c..917bada61fa6 100644
--- a/contrib/llvm-project/clang/include/clang/AST/IgnoreExpr.h
+++ b/contrib/llvm-project/clang/include/clang/AST/IgnoreExpr.h
@@ -23,7 +23,8 @@ namespace detail {
inline Expr *IgnoreExprNodesImpl(Expr *E) { return E; }
template <typename FnTy, typename... FnTys>
Expr *IgnoreExprNodesImpl(Expr *E, FnTy &&Fn, FnTys &&... Fns) {
- return IgnoreExprNodesImpl(Fn(E), std::forward<FnTys>(Fns)...);
+ return IgnoreExprNodesImpl(std::forward<FnTy>(Fn)(E),
+ std::forward<FnTys>(Fns)...);
}
} // namespace detail
@@ -165,6 +166,11 @@ inline Expr *IgnoreParensSingleStep(Expr *E) {
return CE->getChosenSubExpr();
}
+ else if (auto *PE = dyn_cast<PredefinedExpr>(E)) {
+ if (PE->isTransparent() && PE->getFunctionName())
+ return PE->getFunctionName();
+ }
+
return E;
}
diff --git a/contrib/llvm-project/clang/include/clang/AST/JSONNodeDumper.h b/contrib/llvm-project/clang/include/clang/AST/JSONNodeDumper.h
index a96e21993e20..4def5389137f 100644
--- a/contrib/llvm-project/clang/include/clang/AST/JSONNodeDumper.h
+++ b/contrib/llvm-project/clang/include/clang/AST/JSONNodeDumper.h
@@ -1,9 +1,8 @@
//===--- JSONNodeDumper.h - Printing of AST nodes to JSON -----------------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -161,6 +160,7 @@ class JSONNodeDumper
std::string createPointerRepresentation(const void *Ptr);
llvm::json::Object createQualType(QualType QT, bool Desugar = true);
llvm::json::Object createBareDeclRef(const Decl *D);
+ llvm::json::Object createFPOptions(FPOptionsOverride FPO);
void writeBareDeclRef(const Decl *D);
llvm::json::Object createCXXRecordDefinitionData(const CXXRecordDecl *RD);
llvm::json::Object createCXXBaseSpecifier(const CXXBaseSpecifier &BS);
@@ -208,7 +208,16 @@ public:
void Visit(const concepts::Requirement *R);
void Visit(const APValue &Value, QualType Ty);
+ void VisitAliasAttr(const AliasAttr *AA);
+ void VisitCleanupAttr(const CleanupAttr *CA);
+ void VisitDeprecatedAttr(const DeprecatedAttr *DA);
+ void VisitUnavailableAttr(const UnavailableAttr *UA);
+ void VisitSectionAttr(const SectionAttr *SA);
+ void VisitVisibilityAttr(const VisibilityAttr *VA);
+ void VisitTLSModelAttr(const TLSModelAttr *TA);
+
void VisitTypedefType(const TypedefType *TT);
+ void VisitUsingType(const UsingType *TT);
void VisitFunctionType(const FunctionType *T);
void VisitFunctionProtoType(const FunctionProtoType *T);
void VisitRValueReferenceType(const ReferenceType *RT);
@@ -220,6 +229,9 @@ public:
void VisitUnaryTransformType(const UnaryTransformType *UTT);
void VisitTagType(const TagType *TT);
void VisitTemplateTypeParmType(const TemplateTypeParmType *TTPT);
+ void VisitSubstTemplateTypeParmType(const SubstTemplateTypeParmType *STTPT);
+ void
+ VisitSubstTemplateTypeParmPackType(const SubstTemplateTypeParmPackType *T);
void VisitAutoType(const AutoType *AT);
void VisitTemplateSpecializationType(const TemplateSpecializationType *TST);
void VisitInjectedClassNameType(const InjectedClassNameType *ICNT);
@@ -245,6 +257,7 @@ public:
void VisitEnumConstantDecl(const EnumConstantDecl *ECD);
void VisitRecordDecl(const RecordDecl *RD);
void VisitCXXRecordDecl(const CXXRecordDecl *RD);
+ void VisitHLSLBufferDecl(const HLSLBufferDecl *D);
void VisitTemplateTypeParmDecl(const TemplateTypeParmDecl *D);
void VisitNonTypeTemplateParmDecl(const NonTypeTemplateParmDecl *D);
void VisitTemplateTemplateParmDecl(const TemplateTemplateParmDecl *D);
@@ -272,6 +285,7 @@ public:
void VisitBinaryOperator(const BinaryOperator *BO);
void VisitCompoundAssignOperator(const CompoundAssignOperator *CAO);
void VisitMemberExpr(const MemberExpr *ME);
+ void VisitAtomicExpr(const AtomicExpr *AE);
void VisitCXXNewExpr(const CXXNewExpr *NE);
void VisitCXXDeleteExpr(const CXXDeleteExpr *DE);
void VisitCXXThisExpr(const CXXThisExpr *TE);
@@ -318,6 +332,7 @@ public:
void VisitGotoStmt(const GotoStmt *GS);
void VisitWhileStmt(const WhileStmt *WS);
void VisitObjCAtCatchStmt(const ObjCAtCatchStmt *OACS);
+ void VisitCompoundStmt(const CompoundStmt *IS);
void VisitNullTemplateArgument(const TemplateArgument &TA);
void VisitTypeTemplateArgument(const TemplateArgument &TA);
@@ -379,7 +394,7 @@ class JSONDumper : public ASTNodeTraverser<JSONDumper, JSONNodeDumper> {
case TSK_ExplicitInstantiationDefinition:
if (!DumpExplicitInst)
break;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case TSK_Undeclared:
case TSK_ImplicitInstantiation:
if (DumpRefOnly)
diff --git a/contrib/llvm-project/clang/include/clang/AST/LambdaCapture.h b/contrib/llvm-project/clang/include/clang/AST/LambdaCapture.h
index 8e2806545dd6..62e7716ed369 100644
--- a/contrib/llvm-project/clang/include/clang/AST/LambdaCapture.h
+++ b/contrib/llvm-project/clang/include/clang/AST/LambdaCapture.h
@@ -71,7 +71,7 @@ public:
/// capture that is a pack expansion, or an invalid source
/// location to indicate that this is not a pack expansion.
LambdaCapture(SourceLocation Loc, bool Implicit, LambdaCaptureKind Kind,
- VarDecl *Var = nullptr,
+ ValueDecl *Var = nullptr,
SourceLocation EllipsisLoc = SourceLocation());
/// Determine the kind of capture.
@@ -86,7 +86,7 @@ public:
/// Determine whether this capture handles a variable.
bool capturesVariable() const {
- return dyn_cast_or_null<VarDecl>(DeclAndBits.getPointer());
+ return isa_and_nonnull<ValueDecl>(DeclAndBits.getPointer());
}
/// Determine whether this captures a variable length array bound
@@ -101,9 +101,9 @@ public:
///
/// This operation is only valid if this capture is a variable capture
/// (other than a capture of \c this).
- VarDecl *getCapturedVar() const {
+ ValueDecl *getCapturedVar() const {
assert(capturesVariable() && "No variable available for capture");
- return static_cast<VarDecl *>(DeclAndBits.getPointer());
+ return static_cast<ValueDecl *>(DeclAndBits.getPointer());
}
/// Determine whether this was an implicit capture (not
diff --git a/contrib/llvm-project/clang/include/clang/AST/LexicallyOrderedRecursiveASTVisitor.h b/contrib/llvm-project/clang/include/clang/AST/LexicallyOrderedRecursiveASTVisitor.h
index e42f0449f6db..054220b8a32c 100644
--- a/contrib/llvm-project/clang/include/clang/AST/LexicallyOrderedRecursiveASTVisitor.h
+++ b/contrib/llvm-project/clang/include/clang/AST/LexicallyOrderedRecursiveASTVisitor.h
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_AST_LEXICALLY_ORDERED_RECURSIVEASTVISITOR_H
-#define LLVM_CLANG_AST_LEXICALLY_ORDERED_RECURSIVEASTVISITOR_H
+#ifndef LLVM_CLANG_AST_LEXICALLYORDEREDRECURSIVEASTVISITOR_H
+#define LLVM_CLANG_AST_LEXICALLYORDEREDRECURSIVEASTVISITOR_H
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/Basic/LLVM.h"
@@ -160,4 +160,4 @@ private:
} // end namespace clang
-#endif // LLVM_CLANG_AST_LEXICALLY_ORDERED_RECURSIVEASTVISITOR_H
+#endif // LLVM_CLANG_AST_LEXICALLYORDEREDRECURSIVEASTVISITOR_H
diff --git a/contrib/llvm-project/clang/include/clang/AST/LocInfoType.h b/contrib/llvm-project/clang/include/clang/AST/LocInfoType.h
index 7e845ad03587..876c7deeceb9 100644
--- a/contrib/llvm-project/clang/include/clang/AST/LocInfoType.h
+++ b/contrib/llvm-project/clang/include/clang/AST/LocInfoType.h
@@ -10,8 +10,8 @@
// source-location information.
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_SEMA_LOCINFOTYPE_H
-#define LLVM_CLANG_SEMA_LOCINFOTYPE_H
+#ifndef LLVM_CLANG_AST_LOCINFOTYPE_H
+#define LLVM_CLANG_AST_LOCINFOTYPE_H
#include "clang/AST/Type.h"
@@ -54,4 +54,4 @@ public:
} // end namespace clang
-#endif // LLVM_CLANG_SEMA_LOCINFOTYPE_H
+#endif // LLVM_CLANG_AST_LOCINFOTYPE_H
diff --git a/contrib/llvm-project/clang/include/clang/AST/Mangle.h b/contrib/llvm-project/clang/include/clang/AST/Mangle.h
index 7d02f08e0120..e586b0cec43d 100644
--- a/contrib/llvm-project/clang/include/clang/AST/Mangle.h
+++ b/contrib/llvm-project/clang/include/clang/AST/Mangle.h
@@ -19,6 +19,7 @@
#include "clang/Basic/ABI.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/Support/Casting.h"
+#include <optional>
namespace llvm {
class raw_ostream;
@@ -54,18 +55,23 @@ private:
ASTContext &Context;
DiagnosticsEngine &Diags;
const ManglerKind Kind;
+ /// For aux target. If true, uses mangling number for aux target from
+ /// ASTContext.
+ bool IsAux = false;
llvm::DenseMap<const BlockDecl*, unsigned> GlobalBlockIds;
llvm::DenseMap<const BlockDecl*, unsigned> LocalBlockIds;
llvm::DenseMap<const NamedDecl*, uint64_t> AnonStructIds;
+ llvm::DenseMap<const FunctionDecl*, unsigned> FuncAnonStructSize;
public:
ManglerKind getKind() const { return Kind; }
- explicit MangleContext(ASTContext &Context,
- DiagnosticsEngine &Diags,
- ManglerKind Kind)
- : Context(Context), Diags(Diags), Kind(Kind) {}
+ bool isAux() const { return IsAux; }
+
+ explicit MangleContext(ASTContext &Context, DiagnosticsEngine &Diags,
+ ManglerKind Kind, bool IsAux = false)
+ : Context(Context), Diags(Diags), Kind(Kind), IsAux(IsAux) {}
virtual ~MangleContext() { }
@@ -83,9 +89,17 @@ public:
return Result.first->second;
}
- uint64_t getAnonymousStructId(const NamedDecl *D) {
+ uint64_t getAnonymousStructId(const NamedDecl *D,
+ const FunctionDecl *FD = nullptr) {
+ auto FindResult = AnonStructIds.find(D);
+ if (FindResult != AnonStructIds.end())
+ return FindResult->second;
+
+ // If FunctionDecl is passed in, the anonymous structID will be per-function
+ // based.
+ unsigned Id = FD ? FuncAnonStructSize[FD]++ : AnonStructIds.size();
std::pair<llvm::DenseMap<const NamedDecl *, uint64_t>::iterator, bool>
- Result = AnonStructIds.insert(std::make_pair(D, AnonStructIds.size()));
+ Result = AnonStructIds.insert(std::make_pair(D, Id));
return Result.first->second;
}
@@ -126,7 +140,8 @@ public:
unsigned ManglingNumber,
raw_ostream &) = 0;
virtual void mangleCXXRTTI(QualType T, raw_ostream &) = 0;
- virtual void mangleCXXRTTIName(QualType T, raw_ostream &) = 0;
+ virtual void mangleCXXRTTIName(QualType T, raw_ostream &,
+ bool NormalizeIntegers = false) = 0;
virtual void mangleStringLiteral(const StringLiteral *SL, raw_ostream &) = 0;
virtual void mangleMSGuidDecl(const MSGuidDecl *GD, raw_ostream&);
@@ -153,17 +168,18 @@ public:
virtual void mangleDynamicAtExitDestructor(const VarDecl *D,
raw_ostream &) = 0;
- virtual void mangleSEHFilterExpression(const NamedDecl *EnclosingDecl,
+ virtual void mangleSEHFilterExpression(GlobalDecl EnclosingDecl,
raw_ostream &Out) = 0;
- virtual void mangleSEHFinallyBlock(const NamedDecl *EnclosingDecl,
+ virtual void mangleSEHFinallyBlock(GlobalDecl EnclosingDecl,
raw_ostream &Out) = 0;
/// Generates a unique string for an externally visible type for use with TBAA
/// or type uniquing.
/// TODO: Extend this to internal types by generating names that are unique
/// across translation units so it can be used with LTO.
- virtual void mangleTypeName(QualType T, raw_ostream &) = 0;
+ virtual void mangleCanonicalTypeName(QualType T, raw_ostream &,
+ bool NormalizeIntegers = false) = 0;
/// @}
};
@@ -171,9 +187,10 @@ public:
class ItaniumMangleContext : public MangleContext {
public:
using DiscriminatorOverrideTy =
- llvm::Optional<unsigned> (*)(ASTContext &, const NamedDecl *);
- explicit ItaniumMangleContext(ASTContext &C, DiagnosticsEngine &D)
- : MangleContext(C, D, MK_Itanium) {}
+ std::optional<unsigned> (*)(ASTContext &, const NamedDecl *);
+ explicit ItaniumMangleContext(ASTContext &C, DiagnosticsEngine &D,
+ bool IsAux = false)
+ : MangleContext(C, D, MK_Itanium, IsAux) {}
virtual void mangleCXXVTable(const CXXRecordDecl *RD, raw_ostream &) = 0;
virtual void mangleCXXVTT(const CXXRecordDecl *RD, raw_ostream &) = 0;
@@ -194,6 +211,8 @@ public:
virtual void mangleDynamicStermFinalizer(const VarDecl *D, raw_ostream &) = 0;
+ virtual void mangleModuleInitializer(const Module *Module, raw_ostream &) = 0;
+
// This has to live here, otherwise the CXXNameMangler won't have access to
// it.
virtual DiscriminatorOverrideTy getDiscriminatorOverride() const = 0;
@@ -201,17 +220,19 @@ public:
return C->getKind() == MK_Itanium;
}
- static ItaniumMangleContext *create(ASTContext &Context,
- DiagnosticsEngine &Diags);
+ static ItaniumMangleContext *
+ create(ASTContext &Context, DiagnosticsEngine &Diags, bool IsAux = false);
static ItaniumMangleContext *create(ASTContext &Context,
DiagnosticsEngine &Diags,
- DiscriminatorOverrideTy Discriminator);
+ DiscriminatorOverrideTy Discriminator,
+ bool IsAux = false);
};
class MicrosoftMangleContext : public MangleContext {
public:
- explicit MicrosoftMangleContext(ASTContext &C, DiagnosticsEngine &D)
- : MangleContext(C, D, MK_Microsoft) {}
+ explicit MicrosoftMangleContext(ASTContext &C, DiagnosticsEngine &D,
+ bool IsAux = false)
+ : MangleContext(C, D, MK_Microsoft, IsAux) {}
/// Mangle vftable symbols. Only a subset of the bases along the path
/// to the vftable are included in the name. It's up to the caller to pick
@@ -270,8 +291,8 @@ public:
return C->getKind() == MK_Microsoft;
}
- static MicrosoftMangleContext *create(ASTContext &Context,
- DiagnosticsEngine &Diags);
+ static MicrosoftMangleContext *
+ create(ASTContext &Context, DiagnosticsEngine &Diags, bool IsAux = false);
};
class ASTNameGenerator {
diff --git a/contrib/llvm-project/clang/include/clang/AST/MangleNumberingContext.h b/contrib/llvm-project/clang/include/clang/AST/MangleNumberingContext.h
index eb33759682d6..1313c94eb122 100644
--- a/contrib/llvm-project/clang/include/clang/AST/MangleNumberingContext.h
+++ b/contrib/llvm-project/clang/include/clang/AST/MangleNumberingContext.h
@@ -21,14 +21,15 @@ namespace clang {
class BlockDecl;
class CXXMethodDecl;
-class IdentifierInfo;
class TagDecl;
-class Type;
class VarDecl;
/// Keeps track of the mangled names of lambda expressions and block
/// literals within a particular context.
class MangleNumberingContext {
+ // The index of the next lambda we encounter in this context.
+ unsigned LambdaIndex = 0;
+
public:
virtual ~MangleNumberingContext() {}
@@ -57,6 +58,11 @@ public:
/// given call operator within the device context. No device number is
/// assigned if there's no device numbering context is associated.
virtual unsigned getDeviceManglingNumber(const CXXMethodDecl *) { return 0; }
+
+ // Retrieve the index of the next lambda appearing in this context, which is
+ // used for deduplicating lambdas across modules. Note that this is a simple
+ // sequence number and is not ABI-dependent.
+ unsigned getNextLambdaIndex() { return LambdaIndex++; }
};
} // end namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/AST/NSAPI.h b/contrib/llvm-project/clang/include/clang/AST/NSAPI.h
index a8bd2d0f17e6..d411c34191ed 100644
--- a/contrib/llvm-project/clang/include/clang/AST/NSAPI.h
+++ b/contrib/llvm-project/clang/include/clang/AST/NSAPI.h
@@ -11,7 +11,7 @@
#include "clang/Basic/IdentifierTable.h"
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/Optional.h"
+#include <optional>
namespace clang {
class ASTContext;
@@ -89,7 +89,7 @@ public:
Selector getNSArraySelector(NSArrayMethodKind MK) const;
/// Return NSArrayMethodKind if \p Sel is such a selector.
- Optional<NSArrayMethodKind> getNSArrayMethodKind(Selector Sel);
+ std::optional<NSArrayMethodKind> getNSArrayMethodKind(Selector Sel);
/// Enumerates the NSDictionary/NSMutableDictionary methods used
/// to generate literals and to apply some checks.
@@ -114,7 +114,7 @@ public:
Selector getNSDictionarySelector(NSDictionaryMethodKind MK) const;
/// Return NSDictionaryMethodKind if \p Sel is such a selector.
- Optional<NSDictionaryMethodKind> getNSDictionaryMethodKind(Selector Sel);
+ std::optional<NSDictionaryMethodKind> getNSDictionaryMethodKind(Selector Sel);
/// Enumerates the NSMutableSet/NSOrderedSet methods used
/// to apply some checks.
@@ -131,7 +131,7 @@ public:
Selector getNSSetSelector(NSSetMethodKind MK) const;
/// Return NSSetMethodKind if \p Sel is such a selector.
- Optional<NSSetMethodKind> getNSSetMethodKind(Selector Sel);
+ std::optional<NSSetMethodKind> getNSSetMethodKind(Selector Sel);
/// Returns selector for "objectForKeyedSubscript:".
Selector getObjectForKeyedSubscriptSelector() const {
@@ -203,13 +203,13 @@ public:
}
/// Return NSNumberLiteralMethodKind if \p Sel is such a selector.
- Optional<NSNumberLiteralMethodKind>
- getNSNumberLiteralMethodKind(Selector Sel) const;
+ std::optional<NSNumberLiteralMethodKind>
+ getNSNumberLiteralMethodKind(Selector Sel) const;
/// Determine the appropriate NSNumber factory method kind for a
/// literal of the given type.
- Optional<NSNumberLiteralMethodKind>
- getNSNumberFactoryMethodKind(QualType T) const;
+ std::optional<NSNumberLiteralMethodKind>
+ getNSNumberFactoryMethodKind(QualType T) const;
/// Returns true if \param T is a typedef of "BOOL" in objective-c.
bool isObjCBOOLType(QualType T) const;
diff --git a/contrib/llvm-project/clang/include/clang/AST/NestedNameSpecifier.h b/contrib/llvm-project/clang/include/clang/AST/NestedNameSpecifier.h
index 8bc3e25c0f4b..3b6cf9721185 100644
--- a/contrib/llvm-project/clang/include/clang/AST/NestedNameSpecifier.h
+++ b/contrib/llvm-project/clang/include/clang/AST/NestedNameSpecifier.h
@@ -162,7 +162,7 @@ public:
/// Return the prefix of this nested name specifier.
///
/// The prefix contains all of the parts of the nested name
- /// specifier that preced this current specifier. For example, for a
+ /// specifier that precede this current specifier. For example, for a
/// nested name specifier that represents "foo::bar::", the current
/// specifier will contain "bar::" and the prefix will contain
/// "foo::".
@@ -521,7 +521,7 @@ public:
/// NestedNameSpecifiers into a diagnostic with <<.
inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &DB,
NestedNameSpecifier *NNS) {
- DB.AddTaggedVal(reinterpret_cast<intptr_t>(NNS),
+ DB.AddTaggedVal(reinterpret_cast<uint64_t>(NNS),
DiagnosticsEngine::ak_nestednamespec);
return DB;
}
diff --git a/contrib/llvm-project/clang/include/clang/AST/NonTrivialTypeVisitor.h b/contrib/llvm-project/clang/include/clang/AST/NonTrivialTypeVisitor.h
index c95516538ad1..cf320c8a478a 100644
--- a/contrib/llvm-project/clang/include/clang/AST/NonTrivialTypeVisitor.h
+++ b/contrib/llvm-project/clang/include/clang/AST/NonTrivialTypeVisitor.h
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_NON_TRIVIAL_TYPE_VISITOR_H
-#define LLVM_CLANG_NON_TRIVIAL_TYPE_VISITOR_H
+#ifndef LLVM_CLANG_AST_NONTRIVIALTYPEVISITOR_H
+#define LLVM_CLANG_AST_NONTRIVIALTYPEVISITOR_H
#include "clang/AST/Type.h"
diff --git a/contrib/llvm-project/clang/include/clang/AST/ODRDiagsEmitter.h b/contrib/llvm-project/clang/include/clang/AST/ODRDiagsEmitter.h
new file mode 100644
index 000000000000..1f7faaa06e54
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/AST/ODRDiagsEmitter.h
@@ -0,0 +1,203 @@
+//===- ODRDiagsEmitter.h - Emits diagnostic for ODR mismatches --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_ODRDIAGSEMITTER_H
+#define LLVM_CLANG_AST_ODRDIAGSEMITTER_H
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/LangOptions.h"
+
+namespace clang {
+
+class ODRDiagsEmitter {
+public:
+ ODRDiagsEmitter(DiagnosticsEngine &Diags, const ASTContext &Context,
+ const LangOptions &LangOpts)
+ : Diags(Diags), Context(Context), LangOpts(LangOpts) {}
+
+ /// Diagnose ODR mismatch between 2 FunctionDecl.
+ ///
+ /// Returns true if found a mismatch and diagnosed it.
+ bool diagnoseMismatch(const FunctionDecl *FirstFunction,
+ const FunctionDecl *SecondFunction) const;
+
+ /// Diagnose ODR mismatch between 2 EnumDecl.
+ ///
+ /// Returns true if found a mismatch and diagnosed it.
+ bool diagnoseMismatch(const EnumDecl *FirstEnum,
+ const EnumDecl *SecondEnum) const;
+
+ /// Diagnose ODR mismatch between 2 CXXRecordDecl.
+ ///
+ /// Returns true if found a mismatch and diagnosed it.
+ /// To compare 2 declarations with merged and identical definition data
+ /// you need to provide pre-merge definition data in \p SecondDD.
+ bool
+ diagnoseMismatch(const CXXRecordDecl *FirstRecord,
+ const CXXRecordDecl *SecondRecord,
+ const struct CXXRecordDecl::DefinitionData *SecondDD) const;
+
+ /// Diagnose ODR mismatch between 2 RecordDecl that are not CXXRecordDecl.
+ ///
+ /// Returns true if found a mismatch and diagnosed it.
+ bool diagnoseMismatch(const RecordDecl *FirstRecord,
+ const RecordDecl *SecondRecord) const;
+
+ /// Diagnose ODR mismatch between 2 ObjCInterfaceDecl.
+ ///
+ /// Returns true if found a mismatch and diagnosed it.
+ bool diagnoseMismatch(
+ const ObjCInterfaceDecl *FirstID, const ObjCInterfaceDecl *SecondID,
+ const struct ObjCInterfaceDecl::DefinitionData *SecondDD) const;
+
+ /// Diagnose ODR mismatch between ObjCInterfaceDecl with different
+ /// definitions.
+ bool diagnoseMismatch(const ObjCInterfaceDecl *FirstID,
+ const ObjCInterfaceDecl *SecondID) const {
+ assert(FirstID->data().Definition != SecondID->data().Definition &&
+ "Don't diagnose differences when definitions are merged already");
+ return diagnoseMismatch(FirstID, SecondID, &SecondID->data());
+ }
+
+ /// Diagnose ODR mismatch between 2 ObjCProtocolDecl.
+ ///
+ /// Returns true if found a mismatch and diagnosed it.
+ /// To compare 2 declarations with merged and identical definition data
+ /// you need to provide pre-merge definition data in \p SecondDD.
+ bool diagnoseMismatch(
+ const ObjCProtocolDecl *FirstProtocol,
+ const ObjCProtocolDecl *SecondProtocol,
+ const struct ObjCProtocolDecl::DefinitionData *SecondDD) const;
+
+ /// Diagnose ODR mismatch between ObjCProtocolDecl with different definitions.
+ bool diagnoseMismatch(const ObjCProtocolDecl *FirstProtocol,
+ const ObjCProtocolDecl *SecondProtocol) const {
+ assert(FirstProtocol->data().Definition !=
+ SecondProtocol->data().Definition &&
+ "Don't diagnose differences when definitions are merged already");
+ return diagnoseMismatch(FirstProtocol, SecondProtocol,
+ &SecondProtocol->data());
+ }
+
+ /// Get the best name we know for the module that owns the given
+ /// declaration, or an empty string if the declaration is not from a module.
+ static std::string getOwningModuleNameForDiagnostic(const Decl *D);
+
+private:
+ using DeclHashes = llvm::SmallVector<std::pair<const Decl *, unsigned>, 4>;
+
+ // Used with err_module_odr_violation_mismatch_decl,
+ // note_module_odr_violation_mismatch_decl,
+ // err_module_odr_violation_mismatch_decl_unknown,
+ // and note_module_odr_violation_mismatch_decl_unknown
+ // This list should be the same Decl's as in ODRHash::isSubDeclToBeProcessed
+ enum ODRMismatchDecl {
+ EndOfClass,
+ PublicSpecifer,
+ PrivateSpecifer,
+ ProtectedSpecifer,
+ StaticAssert,
+ Field,
+ CXXMethod,
+ TypeAlias,
+ TypeDef,
+ Var,
+ Friend,
+ FunctionTemplate,
+ ObjCMethod,
+ ObjCIvar,
+ ObjCProperty,
+ Other
+ };
+
+ struct DiffResult {
+ const Decl *FirstDecl = nullptr, *SecondDecl = nullptr;
+ ODRMismatchDecl FirstDiffType = Other, SecondDiffType = Other;
+ };
+
+ // If there is a diagnoseable difference, FirstDiffType and
+ // SecondDiffType will not be Other and FirstDecl and SecondDecl will be
+ // filled in if not EndOfClass.
+ static DiffResult FindTypeDiffs(DeclHashes &FirstHashes,
+ DeclHashes &SecondHashes);
+
+ DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) const {
+ return Diags.Report(Loc, DiagID);
+ }
+
+ // Use this to diagnose that an unexpected Decl was encountered
+ // or no difference was detected. This causes a generic error
+ // message to be emitted.
+ void diagnoseSubMismatchUnexpected(DiffResult &DR,
+ const NamedDecl *FirstRecord,
+ StringRef FirstModule,
+ const NamedDecl *SecondRecord,
+ StringRef SecondModule) const;
+
+ void diagnoseSubMismatchDifferentDeclKinds(DiffResult &DR,
+ const NamedDecl *FirstRecord,
+ StringRef FirstModule,
+ const NamedDecl *SecondRecord,
+ StringRef SecondModule) const;
+
+ bool diagnoseSubMismatchField(const NamedDecl *FirstRecord,
+ StringRef FirstModule, StringRef SecondModule,
+ const FieldDecl *FirstField,
+ const FieldDecl *SecondField) const;
+
+ bool diagnoseSubMismatchTypedef(const NamedDecl *FirstRecord,
+ StringRef FirstModule, StringRef SecondModule,
+ const TypedefNameDecl *FirstTD,
+ const TypedefNameDecl *SecondTD,
+ bool IsTypeAlias) const;
+
+ bool diagnoseSubMismatchVar(const NamedDecl *FirstRecord,
+ StringRef FirstModule, StringRef SecondModule,
+ const VarDecl *FirstVD,
+ const VarDecl *SecondVD) const;
+
+ /// Check if protocol lists are the same and diagnose if they are different.
+ ///
+ /// Returns true if found a mismatch and diagnosed it.
+ bool diagnoseSubMismatchProtocols(const ObjCProtocolList &FirstProtocols,
+ const ObjCContainerDecl *FirstContainer,
+ StringRef FirstModule,
+ const ObjCProtocolList &SecondProtocols,
+ const ObjCContainerDecl *SecondContainer,
+ StringRef SecondModule) const;
+
+ /// Check if Objective-C methods are the same and diagnose if different.
+ ///
+ /// Returns true if found a mismatch and diagnosed it.
+ bool diagnoseSubMismatchObjCMethod(const NamedDecl *FirstObjCContainer,
+ StringRef FirstModule,
+ StringRef SecondModule,
+ const ObjCMethodDecl *FirstMethod,
+ const ObjCMethodDecl *SecondMethod) const;
+
+ /// Check if Objective-C properties are the same and diagnose if different.
+ ///
+ /// Returns true if found a mismatch and diagnosed it.
+ bool
+ diagnoseSubMismatchObjCProperty(const NamedDecl *FirstObjCContainer,
+ StringRef FirstModule, StringRef SecondModule,
+ const ObjCPropertyDecl *FirstProp,
+ const ObjCPropertyDecl *SecondProp) const;
+
+private:
+ DiagnosticsEngine &Diags;
+ const ASTContext &Context;
+ const LangOptions &LangOpts;
+};
+
+} // namespace clang
+
+#endif
diff --git a/contrib/llvm-project/clang/include/clang/AST/ODRHash.h b/contrib/llvm-project/clang/include/clang/AST/ODRHash.h
index 2e8593e0b835..a1caa6d39a87 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ODRHash.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ODRHash.h
@@ -25,6 +25,7 @@
namespace clang {
+class APValue;
class Decl;
class IdentifierInfo;
class NestedNameSpecifier;
@@ -55,6 +56,14 @@ public:
// more information than the AddDecl class.
void AddCXXRecordDecl(const CXXRecordDecl *Record);
+ // Use this for ODR checking records in C/Objective-C between modules. This
+ // method compares more information than the AddDecl class.
+ void AddRecordDecl(const RecordDecl *Record);
+
+ // Use this for ODR checking ObjC interfaces. This
+ // method compares more information than the AddDecl class.
+ void AddObjCInterfaceDecl(const ObjCInterfaceDecl *Record);
+
// Use this for ODR checking functions between modules. This method compares
// more information than the AddDecl class. SkipBody will process the
// hash as if the function has no body.
@@ -64,6 +73,10 @@ public:
// more information than the AddDecl class.
void AddEnumDecl(const EnumDecl *Enum);
+ // Use this for ODR checking ObjC protocols. This
+ // method compares more information than the AddDecl class.
+ void AddObjCProtocolDecl(const ObjCProtocolDecl *P);
+
// Process SubDecls of the main Decl. This method calls the DeclVisitor
// while AddDecl does not.
void AddSubDecl(const Decl *D);
@@ -89,7 +102,9 @@ public:
// Save booleans until the end to lower the size of data to process.
void AddBoolean(bool value);
- static bool isDeclToBeProcessed(const Decl* D, const DeclContext *Parent);
+ void AddStructuralValue(const APValue &);
+
+ static bool isSubDeclToBeProcessed(const Decl *D, const DeclContext *Parent);
private:
void AddDeclarationNameImpl(DeclarationName Name);
diff --git a/contrib/llvm-project/clang/include/clang/AST/OSLog.h b/contrib/llvm-project/clang/include/clang/AST/OSLog.h
index c24e79ce6da0..3772597e2616 100644
--- a/contrib/llvm-project/clang/include/clang/AST/OSLog.h
+++ b/contrib/llvm-project/clang/include/clang/AST/OSLog.h
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_ANALYSIS_ANALYSES_OSLOG_H
-#define LLVM_CLANG_ANALYSIS_ANALYSES_OSLOG_H
+#ifndef LLVM_CLANG_AST_OSLOG_H
+#define LLVM_CLANG_AST_OSLOG_H
#include "clang/AST/ASTContext.h"
#include "clang/AST/Expr.h"
diff --git a/contrib/llvm-project/clang/include/clang/AST/OpenMPClause.h b/contrib/llvm-project/clang/include/clang/AST/OpenMPClause.h
index aaddcfa307da..924ca189381b 100644
--- a/contrib/llvm-project/clang/include/clang/AST/OpenMPClause.h
+++ b/contrib/llvm-project/clang/include/clang/AST/OpenMPClause.h
@@ -32,6 +32,7 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
+#include "llvm/Frontend/OpenMP/OMPAssume.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include "llvm/Frontend/OpenMP/OMPContext.h"
#include "llvm/Support/Casting.h"
@@ -106,6 +107,89 @@ public:
static bool classof(const OMPClause *) { return true; }
};
+template <OpenMPClauseKind ClauseKind>
+struct OMPNoChildClause : public OMPClause {
+ /// Build '\p ClauseKind' clause.
+ ///
+ /// \param StartLoc Starting location of the clause.
+ /// \param EndLoc Ending location of the clause.
+ OMPNoChildClause(SourceLocation StartLoc, SourceLocation EndLoc)
+ : OMPClause(ClauseKind, StartLoc, EndLoc) {}
+
+ /// Build an empty clause.
+ OMPNoChildClause()
+ : OMPClause(ClauseKind, SourceLocation(), SourceLocation()) {}
+
+ child_range children() {
+ return child_range(child_iterator(), child_iterator());
+ }
+
+ const_child_range children() const {
+ return const_child_range(const_child_iterator(), const_child_iterator());
+ }
+
+ child_range used_children() {
+ return child_range(child_iterator(), child_iterator());
+ }
+ const_child_range used_children() const {
+ return const_child_range(const_child_iterator(), const_child_iterator());
+ }
+
+ static bool classof(const OMPClause *T) {
+ return T->getClauseKind() == ClauseKind;
+ }
+};
+
+template <OpenMPClauseKind ClauseKind, class Base>
+class OMPOneStmtClause : public Base {
+
+ /// Location of '('.
+ SourceLocation LParenLoc;
+
+ /// Sub-expression.
+ Stmt *S = nullptr;
+
+protected:
+ void setStmt(Stmt *S) { this->S = S; }
+
+public:
+ OMPOneStmtClause(Stmt *S, SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation EndLoc)
+ : Base(ClauseKind, StartLoc, EndLoc), LParenLoc(LParenLoc), S(S) {}
+
+ OMPOneStmtClause() : Base(ClauseKind, SourceLocation(), SourceLocation()) {}
+
+ /// Return the associated statement, potentially casted to \p T.
+ template <typename T> T *getStmtAs() const { return cast_or_null<T>(S); }
+
+ /// Sets the location of '('.
+ void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
+
+ /// Returns the location of '('.
+ SourceLocation getLParenLoc() const { return LParenLoc; }
+
+ using child_iterator = StmtIterator;
+ using const_child_iterator = ConstStmtIterator;
+ using child_range = llvm::iterator_range<child_iterator>;
+ using const_child_range = llvm::iterator_range<const_child_iterator>;
+
+ child_range children() { return child_range(&S, &S + 1); }
+
+ const_child_range children() const { return const_child_range(&S, &S + 1); }
+
+ // TODO: Consider making the getAddrOfExprAsWritten version the default.
+ child_range used_children() {
+ return child_range(child_iterator(), child_iterator());
+ }
+ const_child_range used_children() const {
+ return const_child_range(const_child_iterator(), const_child_iterator());
+ }
+
+ static bool classof(const OMPClause *T) {
+ return T->getClauseKind() == ClauseKind;
+ }
+};
+
/// Class that handles pre-initialization statement for some clauses, like
/// 'shedule', 'firstprivate' etc.
class OMPClauseWithPreInit {
@@ -252,7 +336,7 @@ public:
/// Fetches list of all variables in the clause.
ArrayRef<const Expr *> getVarRefs() const {
- return llvm::makeArrayRef(
+ return llvm::ArrayRef(
static_cast<const T *>(this)->template getTrailingObjects<Expr *>(),
NumVars);
}
@@ -266,17 +350,12 @@ public:
/// \endcode
/// In this example directive '#pragma omp allocate' has simple 'allocator'
/// clause with the allocator 'omp_default_mem_alloc'.
-class OMPAllocatorClause : public OMPClause {
+class OMPAllocatorClause final
+ : public OMPOneStmtClause<llvm::omp::OMPC_allocator, OMPClause> {
friend class OMPClauseReader;
- /// Location of '('.
- SourceLocation LParenLoc;
-
- /// Expression with the allocator.
- Stmt *Allocator = nullptr;
-
/// Set allocator.
- void setAllocator(Expr *A) { Allocator = A; }
+ void setAllocator(Expr *A) { setStmt(A); }
public:
/// Build 'allocator' clause with the given allocator.
@@ -287,39 +366,58 @@ public:
/// \param EndLoc Ending location of the clause.
OMPAllocatorClause(Expr *A, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
- : OMPClause(llvm::omp::OMPC_allocator, StartLoc, EndLoc),
- LParenLoc(LParenLoc), Allocator(A) {}
+ : OMPOneStmtClause(A, StartLoc, LParenLoc, EndLoc) {}
/// Build an empty clause.
- OMPAllocatorClause()
- : OMPClause(llvm::omp::OMPC_allocator, SourceLocation(),
- SourceLocation()) {}
+ OMPAllocatorClause() : OMPOneStmtClause() {}
- /// Sets the location of '('.
- void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
+ /// Returns allocator.
+ Expr *getAllocator() const { return getStmtAs<Expr>(); }
+};
- /// Returns the location of '('.
- SourceLocation getLParenLoc() const { return LParenLoc; }
+/// This represents the 'align' clause in the '#pragma omp allocate'
+/// directive.
+///
+/// \code
+/// #pragma omp allocate(a) allocator(omp_default_mem_alloc) align(8)
+/// \endcode
+/// In this example directive '#pragma omp allocate' has simple 'allocator'
+/// clause with the allocator 'omp_default_mem_alloc' and align clause with
+/// value of 8.
+class OMPAlignClause final
+ : public OMPOneStmtClause<llvm::omp::OMPC_align, OMPClause> {
+ friend class OMPClauseReader;
- /// Returns allocator.
- Expr *getAllocator() const { return cast_or_null<Expr>(Allocator); }
+ /// Set alignment value.
+ void setAlignment(Expr *A) { setStmt(A); }
- child_range children() { return child_range(&Allocator, &Allocator + 1); }
+ /// Build 'align' clause with the given alignment
+ ///
+ /// \param A Alignment value.
+ /// \param StartLoc Starting location of the clause.
+ /// \param LParenLoc Location of '('.
+ /// \param EndLoc Ending location of the clause.
+ OMPAlignClause(Expr *A, SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation EndLoc)
+ : OMPOneStmtClause(A, StartLoc, LParenLoc, EndLoc) {}
- const_child_range children() const {
- return const_child_range(&Allocator, &Allocator + 1);
- }
+ /// Build an empty clause.
+ OMPAlignClause() : OMPOneStmtClause() {}
- child_range used_children() {
- return child_range(child_iterator(), child_iterator());
- }
- const_child_range used_children() const {
- return const_child_range(const_child_iterator(), const_child_iterator());
- }
+public:
+ /// Build 'align' clause with the given alignment
+ ///
+ /// \param A Alignment value.
+ /// \param StartLoc Starting location of the clause.
+ /// \param LParenLoc Location of '('.
+ /// \param EndLoc Ending location of the clause.
+ static OMPAlignClause *Create(const ASTContext &C, Expr *A,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc);
- static bool classof(const OMPClause *T) {
- return T->getClauseKind() == llvm::omp::OMPC_allocator;
- }
+ /// Returns alignment
+ Expr *getAlignment() const { return getStmtAs<Expr>(); }
};
/// This represents clause 'allocate' in the '#pragma omp ...' directives.
@@ -527,17 +625,13 @@ public:
/// \endcode
/// In this example directive '#pragma omp task' has simple 'final'
/// clause with condition 'a > 5'.
-class OMPFinalClause : public OMPClause, public OMPClauseWithPreInit {
+class OMPFinalClause final
+ : public OMPOneStmtClause<llvm::omp::OMPC_final, OMPClause>,
+ public OMPClauseWithPreInit {
friend class OMPClauseReader;
- /// Location of '('.
- SourceLocation LParenLoc;
-
- /// Condition of the 'if' clause.
- Stmt *Condition = nullptr;
-
/// Set condition.
- void setCondition(Expr *Cond) { Condition = Cond; }
+ void setCondition(Expr *Cond) { setStmt(Cond); }
public:
/// Build 'final' clause with condition \a Cond.
@@ -552,42 +646,23 @@ public:
OMPFinalClause(Expr *Cond, Stmt *HelperCond,
OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
- : OMPClause(llvm::omp::OMPC_final, StartLoc, EndLoc),
- OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Condition(Cond) {
+ : OMPOneStmtClause(Cond, StartLoc, LParenLoc, EndLoc),
+ OMPClauseWithPreInit(this) {
setPreInitStmt(HelperCond, CaptureRegion);
}
/// Build an empty clause.
- OMPFinalClause()
- : OMPClause(llvm::omp::OMPC_final, SourceLocation(), SourceLocation()),
- OMPClauseWithPreInit(this) {}
-
- /// Sets the location of '('.
- void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
-
- /// Returns the location of '('.
- SourceLocation getLParenLoc() const { return LParenLoc; }
+ OMPFinalClause() : OMPOneStmtClause(), OMPClauseWithPreInit(this) {}
/// Returns condition.
- Expr *getCondition() const { return cast_or_null<Expr>(Condition); }
-
- child_range children() { return child_range(&Condition, &Condition + 1); }
-
- const_child_range children() const {
- return const_child_range(&Condition, &Condition + 1);
- }
+ Expr *getCondition() const { return getStmtAs<Expr>(); }
child_range used_children();
const_child_range used_children() const {
auto Children = const_cast<OMPFinalClause *>(this)->used_children();
return const_child_range(Children.begin(), Children.end());
}
-
- static bool classof(const OMPClause *T) {
- return T->getClauseKind() == llvm::omp::OMPC_final;
- }
};
-
/// This represents 'num_threads' clause in the '#pragma omp ...'
/// directive.
///
@@ -596,17 +671,13 @@ public:
/// \endcode
/// In this example directive '#pragma omp parallel' has simple 'num_threads'
/// clause with number of threads '6'.
-class OMPNumThreadsClause : public OMPClause, public OMPClauseWithPreInit {
+class OMPNumThreadsClause final
+ : public OMPOneStmtClause<llvm::omp::OMPC_num_threads, OMPClause>,
+ public OMPClauseWithPreInit {
friend class OMPClauseReader;
- /// Location of '('.
- SourceLocation LParenLoc;
-
- /// Condition of the 'num_threads' clause.
- Stmt *NumThreads = nullptr;
-
/// Set condition.
- void setNumThreads(Expr *NThreads) { NumThreads = NThreads; }
+ void setNumThreads(Expr *NThreads) { setStmt(NThreads); }
public:
/// Build 'num_threads' clause with condition \a NumThreads.
@@ -622,43 +693,16 @@ public:
OpenMPDirectiveKind CaptureRegion,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
- : OMPClause(llvm::omp::OMPC_num_threads, StartLoc, EndLoc),
- OMPClauseWithPreInit(this), LParenLoc(LParenLoc),
- NumThreads(NumThreads) {
+ : OMPOneStmtClause(NumThreads, StartLoc, LParenLoc, EndLoc),
+ OMPClauseWithPreInit(this) {
setPreInitStmt(HelperNumThreads, CaptureRegion);
}
/// Build an empty clause.
- OMPNumThreadsClause()
- : OMPClause(llvm::omp::OMPC_num_threads, SourceLocation(),
- SourceLocation()),
- OMPClauseWithPreInit(this) {}
-
- /// Sets the location of '('.
- void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
-
- /// Returns the location of '('.
- SourceLocation getLParenLoc() const { return LParenLoc; }
+ OMPNumThreadsClause() : OMPOneStmtClause(), OMPClauseWithPreInit(this) {}
/// Returns number of threads.
- Expr *getNumThreads() const { return cast_or_null<Expr>(NumThreads); }
-
- child_range children() { return child_range(&NumThreads, &NumThreads + 1); }
-
- const_child_range children() const {
- return const_child_range(&NumThreads, &NumThreads + 1);
- }
-
- child_range used_children() {
- return child_range(child_iterator(), child_iterator());
- }
- const_child_range used_children() const {
- return const_child_range(const_child_iterator(), const_child_iterator());
- }
-
- static bool classof(const OMPClause *T) {
- return T->getClauseKind() == llvm::omp::OMPC_num_threads;
- }
+ Expr *getNumThreads() const { return getStmtAs<Expr>(); }
};
/// This represents 'safelen' clause in the '#pragma omp ...'
@@ -673,17 +717,12 @@ public:
/// concurrently with SIMD instructions can have a greater distance
/// in the logical iteration space than its value. The parameter of
/// the safelen clause must be a constant positive integer expression.
-class OMPSafelenClause : public OMPClause {
+class OMPSafelenClause final
+ : public OMPOneStmtClause<llvm::omp::OMPC_safelen, OMPClause> {
friend class OMPClauseReader;
- /// Location of '('.
- SourceLocation LParenLoc;
-
- /// Safe iteration space distance.
- Stmt *Safelen = nullptr;
-
/// Set safelen.
- void setSafelen(Expr *Len) { Safelen = Len; }
+ void setSafelen(Expr *Len) { setStmt(Len); }
public:
/// Build 'safelen' clause.
@@ -693,39 +732,13 @@ public:
/// \param EndLoc Ending location of the clause.
OMPSafelenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
- : OMPClause(llvm::omp::OMPC_safelen, StartLoc, EndLoc),
- LParenLoc(LParenLoc), Safelen(Len) {}
+ : OMPOneStmtClause(Len, StartLoc, LParenLoc, EndLoc) {}
/// Build an empty clause.
- explicit OMPSafelenClause()
- : OMPClause(llvm::omp::OMPC_safelen, SourceLocation(), SourceLocation()) {
- }
-
- /// Sets the location of '('.
- void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
-
- /// Returns the location of '('.
- SourceLocation getLParenLoc() const { return LParenLoc; }
+ explicit OMPSafelenClause() : OMPOneStmtClause() {}
/// Return safe iteration space distance.
- Expr *getSafelen() const { return cast_or_null<Expr>(Safelen); }
-
- child_range children() { return child_range(&Safelen, &Safelen + 1); }
-
- const_child_range children() const {
- return const_child_range(&Safelen, &Safelen + 1);
- }
-
- child_range used_children() {
- return child_range(child_iterator(), child_iterator());
- }
- const_child_range used_children() const {
- return const_child_range(const_child_iterator(), const_child_iterator());
- }
-
- static bool classof(const OMPClause *T) {
- return T->getClauseKind() == llvm::omp::OMPC_safelen;
- }
+ Expr *getSafelen() const { return getStmtAs<Expr>(); }
};
/// This represents 'simdlen' clause in the '#pragma omp ...'
@@ -739,17 +752,12 @@ public:
/// If the 'simdlen' clause is used then it specifies the preferred number of
/// iterations to be executed concurrently. The parameter of the 'simdlen'
/// clause must be a constant positive integer expression.
-class OMPSimdlenClause : public OMPClause {
+class OMPSimdlenClause final
+ : public OMPOneStmtClause<llvm::omp::OMPC_simdlen, OMPClause> {
friend class OMPClauseReader;
- /// Location of '('.
- SourceLocation LParenLoc;
-
- /// Safe iteration space distance.
- Stmt *Simdlen = nullptr;
-
/// Set simdlen.
- void setSimdlen(Expr *Len) { Simdlen = Len; }
+ void setSimdlen(Expr *Len) { setStmt(Len); }
public:
/// Build 'simdlen' clause.
@@ -759,39 +767,13 @@ public:
/// \param EndLoc Ending location of the clause.
OMPSimdlenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
- : OMPClause(llvm::omp::OMPC_simdlen, StartLoc, EndLoc),
- LParenLoc(LParenLoc), Simdlen(Len) {}
+ : OMPOneStmtClause(Len, StartLoc, LParenLoc, EndLoc) {}
/// Build an empty clause.
- explicit OMPSimdlenClause()
- : OMPClause(llvm::omp::OMPC_simdlen, SourceLocation(), SourceLocation()) {
- }
-
- /// Sets the location of '('.
- void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
-
- /// Returns the location of '('.
- SourceLocation getLParenLoc() const { return LParenLoc; }
+ explicit OMPSimdlenClause() : OMPOneStmtClause() {}
/// Return safe iteration space distance.
- Expr *getSimdlen() const { return cast_or_null<Expr>(Simdlen); }
-
- child_range children() { return child_range(&Simdlen, &Simdlen + 1); }
-
- const_child_range children() const {
- return const_child_range(&Simdlen, &Simdlen + 1);
- }
-
- child_range used_children() {
- return child_range(child_iterator(), child_iterator());
- }
- const_child_range used_children() const {
- return const_child_range(const_child_iterator(), const_child_iterator());
- }
-
- static bool classof(const OMPClause *T) {
- return T->getClauseKind() == llvm::omp::OMPC_simdlen;
- }
+ Expr *getSimdlen() const { return getStmtAs<Expr>(); }
};
/// This represents the 'sizes' clause in the '#pragma omp tile' directive.
@@ -894,11 +876,11 @@ public:
/// #pragma omp unroll full
/// for (int i = 0; i < 64; ++i)
/// \endcode
-class OMPFullClause final : public OMPClause {
+class OMPFullClause final : public OMPNoChildClause<llvm::omp::OMPC_full> {
friend class OMPClauseReader;
/// Build an empty clause.
- explicit OMPFullClause() : OMPClause(llvm::omp::OMPC_full, {}, {}) {}
+ explicit OMPFullClause() : OMPNoChildClause() {}
public:
/// Build an AST node for a 'full' clause.
@@ -913,22 +895,6 @@ public:
///
/// \param C Context of the AST.
static OMPFullClause *CreateEmpty(const ASTContext &C);
-
- child_range children() { return {child_iterator(), child_iterator()}; }
- const_child_range children() const {
- return {const_child_iterator(), const_child_iterator()};
- }
-
- child_range used_children() {
- return child_range(child_iterator(), child_iterator());
- }
- const_child_range used_children() const {
- return const_child_range(const_child_iterator(), const_child_iterator());
- }
-
- static bool classof(const OMPClause *T) {
- return T->getClauseKind() == llvm::omp::OMPC_full;
- }
};
/// Representation of the 'partial' clause of the '#pragma omp unroll'
@@ -1007,17 +973,12 @@ public:
/// The parameter must be a constant positive integer expression, it specifies
/// the number of nested loops that should be collapsed into a single iteration
/// space.
-class OMPCollapseClause : public OMPClause {
+class OMPCollapseClause final
+ : public OMPOneStmtClause<llvm::omp::OMPC_collapse, OMPClause> {
friend class OMPClauseReader;
- /// Location of '('.
- SourceLocation LParenLoc;
-
- /// Number of for-loops.
- Stmt *NumForLoops = nullptr;
-
/// Set the number of associated for-loops.
- void setNumForLoops(Expr *Num) { NumForLoops = Num; }
+ void setNumForLoops(Expr *Num) { setStmt(Num); }
public:
/// Build 'collapse' clause.
@@ -1028,39 +989,13 @@ public:
/// \param EndLoc Ending location of the clause.
OMPCollapseClause(Expr *Num, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
- : OMPClause(llvm::omp::OMPC_collapse, StartLoc, EndLoc),
- LParenLoc(LParenLoc), NumForLoops(Num) {}
+ : OMPOneStmtClause(Num, StartLoc, LParenLoc, EndLoc) {}
/// Build an empty clause.
- explicit OMPCollapseClause()
- : OMPClause(llvm::omp::OMPC_collapse, SourceLocation(),
- SourceLocation()) {}
-
- /// Sets the location of '('.
- void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
-
- /// Returns the location of '('.
- SourceLocation getLParenLoc() const { return LParenLoc; }
+ explicit OMPCollapseClause() : OMPOneStmtClause() {}
/// Return the number of associated for-loops.
- Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); }
-
- child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); }
-
- const_child_range children() const {
- return const_child_range(&NumForLoops, &NumForLoops + 1);
- }
-
- child_range used_children() {
- return child_range(child_iterator(), child_iterator());
- }
- const_child_range used_children() const {
- return const_child_range(const_child_iterator(), const_child_iterator());
- }
-
- static bool classof(const OMPClause *T) {
- return T->getClauseKind() == llvm::omp::OMPC_collapse;
- }
+ Expr *getNumForLoops() const { return getStmtAs<Expr>(); }
};
/// This represents 'default' clause in the '#pragma omp ...' directive.
@@ -1233,7 +1168,8 @@ public:
/// \endcode
/// In this example directive '#pragma omp requires' has 'unified_address'
/// clause.
-class OMPUnifiedAddressClause final : public OMPClause {
+class OMPUnifiedAddressClause final
+ : public OMPNoChildClause<llvm::omp::OMPC_unified_address> {
public:
friend class OMPClauseReader;
/// Build 'unified_address' clause.
@@ -1241,31 +1177,10 @@ public:
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc)
- : OMPClause(llvm::omp::OMPC_unified_address, StartLoc, EndLoc) {}
+ : OMPNoChildClause(StartLoc, EndLoc) {}
/// Build an empty clause.
- OMPUnifiedAddressClause()
- : OMPClause(llvm::omp::OMPC_unified_address, SourceLocation(),
- SourceLocation()) {}
-
- child_range children() {
- return child_range(child_iterator(), child_iterator());
- }
-
- const_child_range children() const {
- return const_child_range(const_child_iterator(), const_child_iterator());
- }
-
- child_range used_children() {
- return child_range(child_iterator(), child_iterator());
- }
- const_child_range used_children() const {
- return const_child_range(const_child_iterator(), const_child_iterator());
- }
-
- static bool classof(const OMPClause *T) {
- return T->getClauseKind() == llvm::omp::OMPC_unified_address;
- }
+ OMPUnifiedAddressClause() : OMPNoChildClause() {}
};
/// This represents 'unified_shared_memory' clause in the '#pragma omp requires'
@@ -1487,6 +1402,231 @@ public:
}
};
+/// This represents 'at' clause in the '#pragma omp error' directive
+///
+/// \code
+/// #pragma omp error at(compilation)
+/// \endcode
+/// In this example directive '#pragma omp error' has simple
+/// 'at' clause with kind 'complilation'.
+class OMPAtClause final : public OMPClause {
+ friend class OMPClauseReader;
+
+ /// Location of '('
+ SourceLocation LParenLoc;
+
+ /// A kind of the 'at' clause.
+ OpenMPAtClauseKind Kind = OMPC_AT_unknown;
+
+ /// Start location of the kind in source code.
+ SourceLocation KindKwLoc;
+
+ /// Set kind of the clause.
+ ///
+ /// \param K Kind of clause.
+ void setAtKind(OpenMPAtClauseKind K) { Kind = K; }
+
+ /// Set clause kind location.
+ ///
+ /// \param KLoc Kind location.
+ void setAtKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; }
+
+ /// Sets the location of '('.
+ void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
+
+public:
+ /// Build 'at' clause with argument \a A ('compilation' or 'execution').
+ ///
+ /// \param A Argument of the clause ('compilation' or 'execution').
+ /// \param ALoc Starting location of the argument.
+ /// \param StartLoc Starting location of the clause.
+ /// \param LParenLoc Location of '('.
+ /// \param EndLoc Ending location of the clause.
+ OMPAtClause(OpenMPAtClauseKind A, SourceLocation ALoc,
+ SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation EndLoc)
+ : OMPClause(llvm::omp::OMPC_at, StartLoc, EndLoc), LParenLoc(LParenLoc),
+ Kind(A), KindKwLoc(ALoc) {}
+
+ /// Build an empty clause.
+ OMPAtClause()
+ : OMPClause(llvm::omp::OMPC_at, SourceLocation(), SourceLocation()) {}
+
+ /// Returns the locaiton of '('.
+ SourceLocation getLParenLoc() const { return LParenLoc; }
+
+ /// Returns kind of the clause.
+ OpenMPAtClauseKind getAtKind() const { return Kind; }
+
+ /// Returns location of clause kind.
+ SourceLocation getAtKindKwLoc() const { return KindKwLoc; }
+
+ child_range children() {
+ return child_range(child_iterator(), child_iterator());
+ }
+
+ const_child_range children() const {
+ return const_child_range(const_child_iterator(), const_child_iterator());
+ }
+
+ child_range used_children() {
+ return child_range(child_iterator(), child_iterator());
+ }
+ const_child_range used_children() const {
+ return const_child_range(const_child_iterator(), const_child_iterator());
+ }
+
+ static bool classof(const OMPClause *T) {
+ return T->getClauseKind() == llvm::omp::OMPC_at;
+ }
+};
+
+/// This represents 'severity' clause in the '#pragma omp error' directive
+///
+/// \code
+/// #pragma omp error severity(fatal)
+/// \endcode
+/// In this example directive '#pragma omp error' has simple
+/// 'severity' clause with kind 'fatal'.
+class OMPSeverityClause final : public OMPClause {
+ friend class OMPClauseReader;
+
+ /// Location of '('
+ SourceLocation LParenLoc;
+
+ /// A kind of the 'severity' clause.
+ OpenMPSeverityClauseKind Kind = OMPC_SEVERITY_unknown;
+
+ /// Start location of the kind in source code.
+ SourceLocation KindKwLoc;
+
+ /// Set kind of the clause.
+ ///
+ /// \param K Kind of clause.
+ void setSeverityKind(OpenMPSeverityClauseKind K) { Kind = K; }
+
+ /// Set clause kind location.
+ ///
+ /// \param KLoc Kind location.
+ void setSeverityKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; }
+
+ /// Sets the location of '('.
+ void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
+
+public:
+ /// Build 'severity' clause with argument \a A ('fatal' or 'warning').
+ ///
+ /// \param A Argument of the clause ('fatal' or 'warning').
+ /// \param ALoc Starting location of the argument.
+ /// \param StartLoc Starting location of the clause.
+ /// \param LParenLoc Location of '('.
+ /// \param EndLoc Ending location of the clause.
+ OMPSeverityClause(OpenMPSeverityClauseKind A, SourceLocation ALoc,
+ SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation EndLoc)
+ : OMPClause(llvm::omp::OMPC_severity, StartLoc, EndLoc),
+ LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {}
+
+ /// Build an empty clause.
+ OMPSeverityClause()
+ : OMPClause(llvm::omp::OMPC_severity, SourceLocation(),
+ SourceLocation()) {}
+
+ /// Returns the locaiton of '('.
+ SourceLocation getLParenLoc() const { return LParenLoc; }
+
+ /// Returns kind of the clause.
+ OpenMPSeverityClauseKind getSeverityKind() const { return Kind; }
+
+ /// Returns location of clause kind.
+ SourceLocation getSeverityKindKwLoc() const { return KindKwLoc; }
+
+ child_range children() {
+ return child_range(child_iterator(), child_iterator());
+ }
+
+ const_child_range children() const {
+ return const_child_range(const_child_iterator(), const_child_iterator());
+ }
+
+ child_range used_children() {
+ return child_range(child_iterator(), child_iterator());
+ }
+ const_child_range used_children() const {
+ return const_child_range(const_child_iterator(), const_child_iterator());
+ }
+
+ static bool classof(const OMPClause *T) {
+ return T->getClauseKind() == llvm::omp::OMPC_severity;
+ }
+};
+
+/// This represents 'message' clause in the '#pragma omp error' directive
+///
+/// \code
+/// #pragma omp error message("GNU compiler required.")
+/// \endcode
+/// In this example directive '#pragma omp error' has simple
+/// 'message' clause with user error message of "GNU compiler required.".
+class OMPMessageClause final : public OMPClause {
+ friend class OMPClauseReader;
+
+ /// Location of '('
+ SourceLocation LParenLoc;
+
+ // Expression of the 'message' clause.
+ Stmt *MessageString = nullptr;
+
+ /// Set message string of the clause.
+ void setMessageString(Expr *MS) { MessageString = MS; }
+
+ /// Sets the location of '('.
+ void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
+
+public:
+ /// Build 'message' clause with message string argument
+ ///
+ /// \param MS Argument of the clause (message string).
+ /// \param StartLoc Starting location of the clause.
+ /// \param LParenLoc Location of '('.
+ /// \param EndLoc Ending location of the clause.
+ OMPMessageClause(Expr *MS, SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation EndLoc)
+ : OMPClause(llvm::omp::OMPC_message, StartLoc, EndLoc),
+ LParenLoc(LParenLoc), MessageString(MS) {}
+
+ /// Build an empty clause.
+ OMPMessageClause()
+ : OMPClause(llvm::omp::OMPC_message, SourceLocation(), SourceLocation()) {
+ }
+
+ /// Returns the locaiton of '('.
+ SourceLocation getLParenLoc() const { return LParenLoc; }
+
+ /// Returns message string of the clause.
+ Expr *getMessageString() const { return cast_or_null<Expr>(MessageString); }
+
+ child_range children() {
+ return child_range(&MessageString, &MessageString + 1);
+ }
+
+ const_child_range children() const {
+ return const_child_range(&MessageString, &MessageString + 1);
+ }
+
+ child_range used_children() {
+ return child_range(child_iterator(), child_iterator());
+ }
+
+ const_child_range used_children() const {
+ return const_child_range(const_child_iterator(), const_child_iterator());
+ }
+
+ static bool classof(const OMPClause *T) {
+ return T->getClauseKind() == llvm::omp::OMPC_message;
+ }
+};
+
/// This represents 'schedule' clause in the '#pragma omp ...' directive.
///
/// \code
@@ -1782,37 +1922,15 @@ public:
/// #pragma omp for nowait
/// \endcode
/// In this example directive '#pragma omp for' has 'nowait' clause.
-class OMPNowaitClause : public OMPClause {
+class OMPNowaitClause final : public OMPNoChildClause<llvm::omp::OMPC_nowait> {
public:
/// Build 'nowait' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
- OMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc)
- : OMPClause(llvm::omp::OMPC_nowait, StartLoc, EndLoc) {}
-
- /// Build an empty clause.
- OMPNowaitClause()
- : OMPClause(llvm::omp::OMPC_nowait, SourceLocation(), SourceLocation()) {}
-
- child_range children() {
- return child_range(child_iterator(), child_iterator());
- }
-
- const_child_range children() const {
- return const_child_range(const_child_iterator(), const_child_iterator());
- }
-
- child_range used_children() {
- return child_range(child_iterator(), child_iterator());
- }
- const_child_range used_children() const {
- return const_child_range(const_child_iterator(), const_child_iterator());
- }
-
- static bool classof(const OMPClause *T) {
- return T->getClauseKind() == llvm::omp::OMPC_nowait;
- }
+ OMPNowaitClause(SourceLocation StartLoc = SourceLocation(),
+ SourceLocation EndLoc = SourceLocation())
+ : OMPNoChildClause(StartLoc, EndLoc) {}
};
/// This represents 'untied' clause in the '#pragma omp ...' directive.
@@ -2005,13 +2123,13 @@ class OMPUpdateClause final
return IsExtended ? 2 : 0;
}
- /// Sets the the location of '(' in clause for 'depobj' directive.
+ /// Sets the location of '(' in clause for 'depobj' directive.
void setLParenLoc(SourceLocation Loc) {
assert(IsExtended && "Expected extended clause.");
*getTrailingObjects<SourceLocation>() = Loc;
}
- /// Sets the the location of '(' in clause for 'depobj' directive.
+ /// Sets the location of '(' in clause for 'depobj' directive.
void setArgumentLoc(SourceLocation Loc) {
assert(IsExtended && "Expected extended clause.");
*std::next(getTrailingObjects<SourceLocation>(), 1) = Loc;
@@ -2085,13 +2203,13 @@ public:
return const_child_range(const_child_iterator(), const_child_iterator());
}
- /// Gets the the location of '(' in clause for 'depobj' directive.
+ /// Gets the location of '(' in clause for 'depobj' directive.
SourceLocation getLParenLoc() const {
assert(IsExtended && "Expected extended clause.");
return *getTrailingObjects<SourceLocation>();
}
- /// Gets the the location of argument in clause for 'depobj' directive.
+ /// Gets the location of argument in clause for 'depobj' directive.
SourceLocation getArgumentLoc() const {
assert(IsExtended && "Expected extended clause.");
return *std::next(getTrailingObjects<SourceLocation>(), 1);
@@ -2149,6 +2267,47 @@ public:
}
};
+/// This represents 'compare' clause in the '#pragma omp atomic'
+/// directive.
+///
+/// \code
+/// #pragma omp atomic compare
+/// \endcode
+/// In this example directive '#pragma omp atomic' has 'compare' clause.
+class OMPCompareClause final : public OMPClause {
+public:
+ /// Build 'compare' clause.
+ ///
+ /// \param StartLoc Starting location of the clause.
+ /// \param EndLoc Ending location of the clause.
+ OMPCompareClause(SourceLocation StartLoc, SourceLocation EndLoc)
+ : OMPClause(llvm::omp::OMPC_compare, StartLoc, EndLoc) {}
+
+ /// Build an empty clause.
+ OMPCompareClause()
+ : OMPClause(llvm::omp::OMPC_compare, SourceLocation(), SourceLocation()) {
+ }
+
+ child_range children() {
+ return child_range(child_iterator(), child_iterator());
+ }
+
+ const_child_range children() const {
+ return const_child_range(const_child_iterator(), const_child_iterator());
+ }
+
+ child_range used_children() {
+ return child_range(child_iterator(), child_iterator());
+ }
+ const_child_range used_children() const {
+ return const_child_range(const_child_iterator(), const_child_iterator());
+ }
+
+ static bool classof(const OMPClause *T) {
+ return T->getClauseKind() == llvm::omp::OMPC_compare;
+ }
+};
+
/// This represents 'seq_cst' clause in the '#pragma omp atomic'
/// directive.
///
@@ -2354,6 +2513,89 @@ public:
}
};
+/// This represents 'fail' clause in the '#pragma omp atomic'
+/// directive.
+///
+/// \code
+/// #pragma omp atomic compare fail
+/// \endcode
+/// In this example directive '#pragma omp atomic compare' has 'fail' clause.
+class OMPFailClause final : public OMPClause {
+
+ // FailParameter is a memory-order-clause. Storing the ClauseKind is
+ // sufficient for our purpose.
+ OpenMPClauseKind FailParameter = llvm::omp::Clause::OMPC_unknown;
+ SourceLocation FailParameterLoc;
+ SourceLocation LParenLoc;
+
+ friend class OMPClauseReader;
+
+ /// Sets the location of '(' in fail clause.
+ void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
+
+ /// Sets the location of memoryOrder clause argument in fail clause.
+ void setFailParameterLoc(SourceLocation Loc) { FailParameterLoc = Loc; }
+
+ /// Sets the mem_order clause for 'atomic compare fail' directive.
+ void setFailParameter(OpenMPClauseKind FailParameter) {
+ this->FailParameter = FailParameter;
+ assert(checkFailClauseParameter(FailParameter) &&
+ "Invalid fail clause parameter");
+ }
+
+public:
+ /// Build 'fail' clause.
+ ///
+ /// \param StartLoc Starting location of the clause.
+ /// \param EndLoc Ending location of the clause.
+ OMPFailClause(SourceLocation StartLoc, SourceLocation EndLoc)
+ : OMPClause(llvm::omp::OMPC_fail, StartLoc, EndLoc) {}
+
+ OMPFailClause(OpenMPClauseKind FailParameter, SourceLocation FailParameterLoc,
+ SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation EndLoc)
+ : OMPClause(llvm::omp::OMPC_fail, StartLoc, EndLoc),
+ FailParameterLoc(FailParameterLoc), LParenLoc(LParenLoc) {
+
+ setFailParameter(FailParameter);
+ }
+
+ /// Build an empty clause.
+ OMPFailClause()
+ : OMPClause(llvm::omp::OMPC_fail, SourceLocation(), SourceLocation()) {}
+
+ child_range children() {
+ return child_range(child_iterator(), child_iterator());
+ }
+
+ const_child_range children() const {
+ return const_child_range(const_child_iterator(), const_child_iterator());
+ }
+
+ child_range used_children() {
+ return child_range(child_iterator(), child_iterator());
+ }
+ const_child_range used_children() const {
+ return const_child_range(const_child_iterator(), const_child_iterator());
+ }
+
+ static bool classof(const OMPClause *T) {
+ return T->getClauseKind() == llvm::omp::OMPC_fail;
+ }
+
+ /// Gets the location of '(' (for the parameter) in fail clause.
+ SourceLocation getLParenLoc() const {
+ return LParenLoc;
+ }
+
+ /// Gets the location of Fail Parameter (type memory-order-clause) in
+ /// fail clause.
+ SourceLocation getFailParameterLoc() const { return FailParameterLoc; }
+
+ /// Gets the parameter (type memory-order-clause) in Fail clause.
+ OpenMPClauseKind getFailParameter() const { return FailParameter; }
+};
+
/// This represents clause 'private' in the '#pragma omp ...' directives.
///
/// \code
@@ -2398,7 +2640,7 @@ class OMPPrivateClause final
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivateCopies() const {
- return llvm::makeArrayRef(varlist_end(), varlist_size());
+ return llvm::ArrayRef(varlist_end(), varlist_size());
}
public:
@@ -2507,7 +2749,7 @@ class OMPFirstprivateClause final
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivateCopies() const {
- return llvm::makeArrayRef(varlist_end(), varlist_size());
+ return llvm::ArrayRef(varlist_end(), varlist_size());
}
/// Sets the list of references to initializer variables for new
@@ -2521,7 +2763,7 @@ class OMPFirstprivateClause final
return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size());
}
ArrayRef<const Expr *> getInits() const {
- return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size());
+ return llvm::ArrayRef(getPrivateCopies().end(), varlist_size());
}
public:
@@ -2669,7 +2911,7 @@ class OMPLastprivateClause final
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivateCopies() const {
- return llvm::makeArrayRef(varlist_end(), varlist_size());
+ return llvm::ArrayRef(varlist_end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the
@@ -2683,7 +2925,7 @@ class OMPLastprivateClause final
return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size());
}
ArrayRef<const Expr *> getSourceExprs() const {
- return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size());
+ return llvm::ArrayRef(getPrivateCopies().end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the
@@ -2697,7 +2939,7 @@ class OMPLastprivateClause final
return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getDestinationExprs() const {
- return llvm::makeArrayRef(getSourceExprs().end(), varlist_size());
+ return llvm::ArrayRef(getSourceExprs().end(), varlist_size());
}
/// Set list of helper assignment expressions, required for proper
@@ -2710,7 +2952,7 @@ class OMPLastprivateClause final
return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getAssignmentOps() const {
- return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size());
+ return llvm::ArrayRef(getDestinationExprs().end(), varlist_size());
}
/// Sets lastprivate kind.
@@ -2998,7 +3240,7 @@ class OMPReductionClause final
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivates() const {
- return llvm::makeArrayRef(varlist_end(), varlist_size());
+ return llvm::ArrayRef(varlist_end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the
@@ -3011,7 +3253,7 @@ class OMPReductionClause final
return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size());
}
ArrayRef<const Expr *> getLHSExprs() const {
- return llvm::makeArrayRef(getPrivates().end(), varlist_size());
+ return llvm::ArrayRef(getPrivates().end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the
@@ -3026,7 +3268,7 @@ class OMPReductionClause final
return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getRHSExprs() const {
- return llvm::makeArrayRef(getLHSExprs().end(), varlist_size());
+ return llvm::ArrayRef(getLHSExprs().end(), varlist_size());
}
/// Set list of helper reduction expressions, required for proper
@@ -3040,7 +3282,7 @@ class OMPReductionClause final
return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getReductionOps() const {
- return llvm::makeArrayRef(getRHSExprs().end(), varlist_size());
+ return llvm::ArrayRef(getRHSExprs().end(), varlist_size());
}
/// Set list of helper copy operations for inscan reductions.
@@ -3052,7 +3294,7 @@ class OMPReductionClause final
return MutableArrayRef<Expr *>(getReductionOps().end(), varlist_size());
}
ArrayRef<const Expr *> getInscanCopyOps() const {
- return llvm::makeArrayRef(getReductionOps().end(), varlist_size());
+ return llvm::ArrayRef(getReductionOps().end(), varlist_size());
}
/// Set list of helper temp vars for inscan copy array operations.
@@ -3063,7 +3305,7 @@ class OMPReductionClause final
return MutableArrayRef<Expr *>(getInscanCopyOps().end(), varlist_size());
}
ArrayRef<const Expr *> getInscanCopyArrayTemps() const {
- return llvm::makeArrayRef(getInscanCopyOps().end(), varlist_size());
+ return llvm::ArrayRef(getInscanCopyOps().end(), varlist_size());
}
/// Set list of helper temp elements vars for inscan copy array operations.
@@ -3075,7 +3317,7 @@ class OMPReductionClause final
varlist_size());
}
ArrayRef<const Expr *> getInscanCopyArrayElems() const {
- return llvm::makeArrayRef(getInscanCopyArrayTemps().end(), varlist_size());
+ return llvm::ArrayRef(getInscanCopyArrayTemps().end(), varlist_size());
}
public:
@@ -3317,7 +3559,7 @@ class OMPTaskReductionClause final
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivates() const {
- return llvm::makeArrayRef(varlist_end(), varlist_size());
+ return llvm::ArrayRef(varlist_end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the clause.
@@ -3330,7 +3572,7 @@ class OMPTaskReductionClause final
return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size());
}
ArrayRef<const Expr *> getLHSExprs() const {
- return llvm::makeArrayRef(getPrivates().end(), varlist_size());
+ return llvm::ArrayRef(getPrivates().end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the clause.
@@ -3344,7 +3586,7 @@ class OMPTaskReductionClause final
return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getRHSExprs() const {
- return llvm::makeArrayRef(getLHSExprs().end(), varlist_size());
+ return llvm::ArrayRef(getLHSExprs().end(), varlist_size());
}
/// Set list of helper reduction expressions, required for proper
@@ -3358,7 +3600,7 @@ class OMPTaskReductionClause final
return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getReductionOps() const {
- return llvm::makeArrayRef(getRHSExprs().end(), varlist_size());
+ return llvm::ArrayRef(getRHSExprs().end(), varlist_size());
}
public:
@@ -3548,7 +3790,7 @@ class OMPInReductionClause final
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivates() const {
- return llvm::makeArrayRef(varlist_end(), varlist_size());
+ return llvm::ArrayRef(varlist_end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the clause.
@@ -3561,7 +3803,7 @@ class OMPInReductionClause final
return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size());
}
ArrayRef<const Expr *> getLHSExprs() const {
- return llvm::makeArrayRef(getPrivates().end(), varlist_size());
+ return llvm::ArrayRef(getPrivates().end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the clause.
@@ -3575,7 +3817,7 @@ class OMPInReductionClause final
return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getRHSExprs() const {
- return llvm::makeArrayRef(getLHSExprs().end(), varlist_size());
+ return llvm::ArrayRef(getLHSExprs().end(), varlist_size());
}
/// Set list of helper reduction expressions, required for proper
@@ -3589,7 +3831,7 @@ class OMPInReductionClause final
return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getReductionOps() const {
- return llvm::makeArrayRef(getRHSExprs().end(), varlist_size());
+ return llvm::ArrayRef(getRHSExprs().end(), varlist_size());
}
/// Set list of helper reduction taskgroup descriptors.
@@ -3600,7 +3842,7 @@ class OMPInReductionClause final
return MutableArrayRef<Expr *>(getReductionOps().end(), varlist_size());
}
ArrayRef<const Expr *> getTaskgroupDescriptors() const {
- return llvm::makeArrayRef(getReductionOps().end(), varlist_size());
+ return llvm::ArrayRef(getReductionOps().end(), varlist_size());
}
public:
@@ -3759,6 +4001,9 @@ class OMPLinearClause final
/// Location of ':'.
SourceLocation ColonLoc;
+ /// Location of 'step' modifier.
+ SourceLocation StepModifierLoc;
+
/// Sets the linear step for clause.
void setStep(Expr *Step) { *(getFinals().end()) = Step; }
@@ -3770,16 +4015,18 @@ class OMPLinearClause final
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
+ /// \param StepModifierLoc Location of 'step' modifier.
/// \param EndLoc Ending location of the clause.
/// \param NumVars Number of variables.
OMPLinearClause(SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc,
- SourceLocation ColonLoc, SourceLocation EndLoc,
- unsigned NumVars)
+ SourceLocation ColonLoc, SourceLocation StepModifierLoc,
+ SourceLocation EndLoc, unsigned NumVars)
: OMPVarListClause<OMPLinearClause>(llvm::omp::OMPC_linear, StartLoc,
LParenLoc, EndLoc, NumVars),
OMPClauseWithPostUpdate(this), Modifier(Modifier),
- ModifierLoc(ModifierLoc), ColonLoc(ColonLoc) {}
+ ModifierLoc(ModifierLoc), ColonLoc(ColonLoc),
+ StepModifierLoc(StepModifierLoc) {}
/// Build an empty clause.
///
@@ -3806,14 +4053,14 @@ class OMPLinearClause final
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivates() const {
- return llvm::makeArrayRef(varlist_end(), varlist_size());
+ return llvm::ArrayRef(varlist_end(), varlist_size());
}
MutableArrayRef<Expr *> getInits() {
return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size());
}
ArrayRef<const Expr *> getInits() const {
- return llvm::makeArrayRef(getPrivates().end(), varlist_size());
+ return llvm::ArrayRef(getPrivates().end(), varlist_size());
}
/// Sets the list of update expressions for linear variables.
@@ -3821,7 +4068,7 @@ class OMPLinearClause final
return MutableArrayRef<Expr *>(getInits().end(), varlist_size());
}
ArrayRef<const Expr *> getUpdates() const {
- return llvm::makeArrayRef(getInits().end(), varlist_size());
+ return llvm::ArrayRef(getInits().end(), varlist_size());
}
/// Sets the list of final update expressions for linear variables.
@@ -3829,7 +4076,7 @@ class OMPLinearClause final
return MutableArrayRef<Expr *>(getUpdates().end(), varlist_size());
}
ArrayRef<const Expr *> getFinals() const {
- return llvm::makeArrayRef(getUpdates().end(), varlist_size());
+ return llvm::ArrayRef(getUpdates().end(), varlist_size());
}
/// Gets the list of used expressions for linear variables.
@@ -3837,7 +4084,7 @@ class OMPLinearClause final
return MutableArrayRef<Expr *>(getFinals().end() + 2, varlist_size() + 1);
}
ArrayRef<const Expr *> getUsedExprs() const {
- return llvm::makeArrayRef(getFinals().end() + 2, varlist_size() + 1);
+ return llvm::ArrayRef(getFinals().end() + 2, varlist_size() + 1);
}
/// Sets the list of the copies of original linear variables.
@@ -3858,6 +4105,7 @@ public:
/// \param Modifier Modifier of 'linear' clause.
/// \param ModifierLoc Modifier location.
/// \param ColonLoc Location of ':'.
+ /// \param StepModifierLoc Location of 'step' modifier.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param PL List of private copies of original variables.
@@ -3871,9 +4119,10 @@ public:
static OMPLinearClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc,
- SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL,
- ArrayRef<Expr *> PL, ArrayRef<Expr *> IL, Expr *Step, Expr *CalcStep,
- Stmt *PreInit, Expr *PostUpdate);
+ SourceLocation ColonLoc, SourceLocation StepModifierLoc,
+ SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PL,
+ ArrayRef<Expr *> IL, Expr *Step, Expr *CalcStep, Stmt *PreInit,
+ Expr *PostUpdate);
/// Creates an empty clause with the place for \a NumVars variables.
///
@@ -3896,9 +4145,15 @@ public:
/// Sets the location of ':'.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
+ /// Sets the location of 'step' modifier.
+ void setStepModifierLoc(SourceLocation Loc) { StepModifierLoc = Loc; }
+
/// Returns the location of ':'.
SourceLocation getColonLoc() const { return ColonLoc; }
+ /// Returns the location of 'step' modifier.
+ SourceLocation getStepModifierLoc() const { return StepModifierLoc; }
+
/// Returns linear step.
Expr *getStep() { return *(getFinals().end()); }
@@ -4166,7 +4421,7 @@ class OMPCopyinClause final
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getSourceExprs() const {
- return llvm::makeArrayRef(varlist_end(), varlist_size());
+ return llvm::ArrayRef(varlist_end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the
@@ -4179,7 +4434,7 @@ class OMPCopyinClause final
return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getDestinationExprs() const {
- return llvm::makeArrayRef(getSourceExprs().end(), varlist_size());
+ return llvm::ArrayRef(getSourceExprs().end(), varlist_size());
}
/// Set list of helper assignment expressions, required for proper
@@ -4193,7 +4448,7 @@ class OMPCopyinClause final
return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getAssignmentOps() const {
- return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size());
+ return llvm::ArrayRef(getDestinationExprs().end(), varlist_size());
}
public:
@@ -4331,7 +4586,7 @@ class OMPCopyprivateClause final
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getSourceExprs() const {
- return llvm::makeArrayRef(varlist_end(), varlist_size());
+ return llvm::ArrayRef(varlist_end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the
@@ -4344,7 +4599,7 @@ class OMPCopyprivateClause final
return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getDestinationExprs() const {
- return llvm::makeArrayRef(getSourceExprs().end(), varlist_size());
+ return llvm::ArrayRef(getSourceExprs().end(), varlist_size());
}
/// Set list of helper assignment expressions, required for proper
@@ -4358,7 +4613,7 @@ class OMPCopyprivateClause final
return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getAssignmentOps() const {
- return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size());
+ return llvm::ArrayRef(getDestinationExprs().end(), varlist_size());
}
public:
@@ -4629,14 +4884,24 @@ class OMPDependClause final
friend OMPVarListClause;
friend TrailingObjects;
- /// Dependency type (one of in, out, inout).
- OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown;
+public:
+ struct DependDataTy final {
+ /// Dependency type (one of in, out, inout).
+ OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown;
- /// Dependency type location.
- SourceLocation DepLoc;
+ /// Dependency type location.
+ SourceLocation DepLoc;
- /// Colon location.
- SourceLocation ColonLoc;
+ /// Colon location.
+ SourceLocation ColonLoc;
+
+ /// Location of 'omp_all_memory'.
+ SourceLocation OmpAllMemoryLoc;
+ };
+
+private:
+ /// Dependency type and source locations.
+ DependDataTy Data;
/// Number of loops, associated with the depend clause.
unsigned NumLoops = 0;
@@ -4667,13 +4932,16 @@ class OMPDependClause final
NumLoops(NumLoops) {}
/// Set dependency kind.
- void setDependencyKind(OpenMPDependClauseKind K) { DepKind = K; }
+ void setDependencyKind(OpenMPDependClauseKind K) { Data.DepKind = K; }
/// Set dependency kind and its location.
- void setDependencyLoc(SourceLocation Loc) { DepLoc = Loc; }
+ void setDependencyLoc(SourceLocation Loc) { Data.DepLoc = Loc; }
/// Set colon location.
- void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
+ void setColonLoc(SourceLocation Loc) { Data.ColonLoc = Loc; }
+
+ /// Set the 'omp_all_memory' location.
+ void setOmpAllMemoryLoc(SourceLocation Loc) { Data.OmpAllMemoryLoc = Loc; }
/// Sets optional dependency modifier.
void setModifier(Expr *DepModifier);
@@ -4685,18 +4953,15 @@ public:
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
- /// \param DepKind Dependency type.
- /// \param DepLoc Location of the dependency type.
- /// \param ColonLoc Colon location.
+ /// \param Data Dependency type and source locations.
/// \param VL List of references to the variables.
/// \param NumLoops Number of loops that is associated with this depend
/// clause.
static OMPDependClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc,
- SourceLocation EndLoc, Expr *DepModifier,
- OpenMPDependClauseKind DepKind,
- SourceLocation DepLoc, SourceLocation ColonLoc,
- ArrayRef<Expr *> VL, unsigned NumLoops);
+ SourceLocation EndLoc, DependDataTy Data,
+ Expr *DepModifier, ArrayRef<Expr *> VL,
+ unsigned NumLoops);
/// Creates an empty clause with \a N variables.
///
@@ -4708,7 +4973,16 @@ public:
unsigned NumLoops);
/// Get dependency type.
- OpenMPDependClauseKind getDependencyKind() const { return DepKind; }
+ OpenMPDependClauseKind getDependencyKind() const { return Data.DepKind; }
+
+ /// Get dependency type location.
+ SourceLocation getDependencyLoc() const { return Data.DepLoc; }
+
+ /// Get colon location.
+ SourceLocation getColonLoc() const { return Data.ColonLoc; }
+
+ /// Get 'omp_all_memory' location.
+ SourceLocation getOmpAllMemoryLoc() const { return Data.OmpAllMemoryLoc; }
/// Return optional depend modifier.
Expr *getModifier();
@@ -4716,12 +4990,6 @@ public:
return const_cast<OMPDependClause *>(this)->getModifier();
}
- /// Get dependency type location.
- SourceLocation getDependencyLoc() const { return DepLoc; }
-
- /// Get colon location.
- SourceLocation getColonLoc() const { return ColonLoc; }
-
/// Get number of loops associated with the clause.
unsigned getNumLoops() const { return NumLoops; }
@@ -4857,38 +5125,18 @@ public:
/// #pragma omp ordered threads
/// \endcode
/// In this example directive '#pragma omp ordered' has simple 'threads' clause.
-class OMPThreadsClause : public OMPClause {
+class OMPThreadsClause final
+ : public OMPNoChildClause<llvm::omp::OMPC_threads> {
public:
/// Build 'threads' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc)
- : OMPClause(llvm::omp::OMPC_threads, StartLoc, EndLoc) {}
+ : OMPNoChildClause(StartLoc, EndLoc) {}
/// Build an empty clause.
- OMPThreadsClause()
- : OMPClause(llvm::omp::OMPC_threads, SourceLocation(), SourceLocation()) {
- }
-
- child_range children() {
- return child_range(child_iterator(), child_iterator());
- }
-
- const_child_range children() const {
- return const_child_range(const_child_iterator(), const_child_iterator());
- }
-
- child_range used_children() {
- return child_range(child_iterator(), child_iterator());
- }
- const_child_range used_children() const {
- return const_child_range(const_child_iterator(), const_child_iterator());
- }
-
- static bool classof(const OMPClause *T) {
- return T->getClauseKind() == llvm::omp::OMPC_threads;
- }
+ OMPThreadsClause() : OMPNoChildClause() {}
};
/// This represents 'simd' clause in the '#pragma omp ...' directive.
@@ -5273,7 +5521,7 @@ protected:
MutableArrayRef<Expr *> getUDMapperRefs() {
assert(SupportsMapper &&
"Must be a clause that is possible to have user-defined mappers");
- return llvm::makeMutableArrayRef<Expr *>(
+ return llvm::MutableArrayRef<Expr *>(
static_cast<T *>(this)->template getTrailingObjects<Expr *>() +
OMPVarListClause<T>::varlist_size(),
OMPVarListClause<T>::varlist_size());
@@ -5284,7 +5532,7 @@ protected:
ArrayRef<Expr *> getUDMapperRefs() const {
assert(SupportsMapper &&
"Must be a clause that is possible to have user-defined mappers");
- return llvm::makeArrayRef<Expr *>(
+ return llvm::ArrayRef<Expr *>(
static_cast<const T *>(this)->template getTrailingObjects<Expr *>() +
OMPVarListClause<T>::varlist_size(),
OMPVarListClause<T>::varlist_size());
@@ -5483,14 +5731,14 @@ public:
return const_component_lists_iterator(
getUniqueDeclsRef(), getDeclNumListsRef(), getComponentListSizesRef(),
getComponentsRef(), SupportsMapper,
- SupportsMapper ? getUDMapperRefs() : llvm::None);
+ SupportsMapper ? getUDMapperRefs() : std::nullopt);
}
const_component_lists_iterator component_lists_end() const {
return const_component_lists_iterator(
ArrayRef<ValueDecl *>(), ArrayRef<unsigned>(), ArrayRef<unsigned>(),
MappableExprComponentListRef(getComponentsRef().end(),
getComponentsRef().end()),
- SupportsMapper, llvm::None);
+ SupportsMapper, std::nullopt);
}
const_component_lists_range component_lists() const {
return {component_lists_begin(), component_lists_end()};
@@ -5503,7 +5751,7 @@ public:
return const_component_lists_iterator(
VD, getUniqueDeclsRef(), getDeclNumListsRef(),
getComponentListSizesRef(), getComponentsRef(), SupportsMapper,
- SupportsMapper ? getUDMapperRefs() : llvm::None);
+ SupportsMapper ? getUDMapperRefs() : std::nullopt);
}
const_component_lists_iterator decl_component_lists_end() const {
return component_lists_end();
@@ -5593,7 +5841,7 @@ class OMPMapClause final : public OMPMappableExprListClause<OMPMapClause>,
size_t numTrailingObjects(OverloadToken<Expr *>) const {
// There are varlist_size() of expressions, and varlist_size() of
// user-defined mappers.
- return 2 * varlist_size();
+ return 2 * varlist_size() + 1;
}
size_t numTrailingObjects(OverloadToken<ValueDecl *>) const {
return getUniqueDeclarationsNum();
@@ -5606,6 +5854,7 @@ private:
/// Map-type-modifiers for the 'map' clause.
OpenMPMapModifierKind MapTypeModifiers[NumberOfOMPMapClauseModifiers] = {
OMPC_MAP_MODIFIER_unknown, OMPC_MAP_MODIFIER_unknown,
+ OMPC_MAP_MODIFIER_unknown, OMPC_MAP_MODIFIER_unknown,
OMPC_MAP_MODIFIER_unknown, OMPC_MAP_MODIFIER_unknown};
/// Location of map-type-modifiers for the 'map' clause.
@@ -5654,12 +5903,11 @@ private:
/*SupportsMapper=*/true, &MapperQualifierLoc,
&MapperIdInfo),
MapType(MapType), MapTypeIsImplicit(MapTypeIsImplicit), MapLoc(MapLoc) {
- assert(llvm::array_lengthof(MapTypeModifiers) == MapModifiers.size() &&
+ assert(std::size(MapTypeModifiers) == MapModifiers.size() &&
"Unexpected number of map type modifiers.");
llvm::copy(MapModifiers, std::begin(MapTypeModifiers));
- assert(llvm::array_lengthof(MapTypeModifiersLoc) ==
- MapModifiersLoc.size() &&
+ assert(std::size(MapTypeModifiersLoc) == MapModifiersLoc.size() &&
"Unexpected number of map type modifier locations.");
llvm::copy(MapModifiersLoc, std::begin(MapTypeModifiersLoc));
}
@@ -5708,6 +5956,11 @@ private:
/// Set colon location.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
+ /// Set iterator modifier.
+ void setIteratorModifier(Expr *IteratorModifier) {
+ getTrailingObjects<Expr *>()[2 * varlist_size()] = IteratorModifier;
+ }
+
public:
/// Creates clause with a list of variables \a VL.
///
@@ -5720,6 +5973,7 @@ public:
/// \param ComponentLists Component lists used in the clause.
/// \param UDMapperRefs References to user-defined mappers associated with
/// expressions used in the clause.
+ /// \param IteratorModifier Iterator modifier.
/// \param MapModifiers Map-type-modifiers.
/// \param MapModifiersLoc Location of map-type-modifiers.
/// \param UDMQualifierLoc C++ nested name specifier for the associated
@@ -5732,7 +5986,7 @@ public:
Create(const ASTContext &C, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists,
- ArrayRef<Expr *> UDMapperRefs,
+ ArrayRef<Expr *> UDMapperRefs, Expr *IteratorModifier,
ArrayRef<OpenMPMapModifierKind> MapModifiers,
ArrayRef<SourceLocation> MapModifiersLoc,
NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId,
@@ -5751,6 +6005,11 @@ public:
static OMPMapClause *CreateEmpty(const ASTContext &C,
const OMPMappableExprListSizeTy &Sizes);
+ /// Fetches Expr * of iterator modifier.
+ Expr *getIteratorModifier() {
+ return getTrailingObjects<Expr *>()[2 * varlist_size()];
+ }
+
/// Fetches mapping kind for the clause.
OpenMPMapClauseKind getMapType() const LLVM_READONLY { return MapType; }
@@ -5782,12 +6041,12 @@ public:
/// Fetches ArrayRef of map-type-modifiers.
ArrayRef<OpenMPMapModifierKind> getMapTypeModifiers() const LLVM_READONLY {
- return llvm::makeArrayRef(MapTypeModifiers);
+ return llvm::ArrayRef(MapTypeModifiers);
}
/// Fetches ArrayRef of location of map-type-modifiers.
ArrayRef<SourceLocation> getMapTypeModifiersLoc() const LLVM_READONLY {
- return llvm::makeArrayRef(MapTypeModifiersLoc);
+ return llvm::ArrayRef(MapTypeModifiersLoc);
}
/// Fetches location of clause mapping kind.
@@ -6065,26 +6324,43 @@ class OMPGrainsizeClause : public OMPClause, public OMPClauseWithPreInit {
/// Location of '('.
SourceLocation LParenLoc;
+ /// Modifiers for 'grainsize' clause.
+ OpenMPGrainsizeClauseModifier Modifier = OMPC_GRAINSIZE_unknown;
+
+ /// Location of the modifier.
+ SourceLocation ModifierLoc;
+
/// Safe iteration space distance.
Stmt *Grainsize = nullptr;
/// Set safelen.
void setGrainsize(Expr *Size) { Grainsize = Size; }
+ /// Sets modifier.
+ void setModifier(OpenMPGrainsizeClauseModifier M) { Modifier = M; }
+
+ /// Sets modifier location.
+ void setModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; }
+
public:
/// Build 'grainsize' clause.
///
+ /// \param Modifier Clause modifier.
/// \param Size Expression associated with this clause.
/// \param HelperSize Helper grainsize for the construct.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
+ /// \param ModifierLoc Modifier location.
+ /// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
- OMPGrainsizeClause(Expr *Size, Stmt *HelperSize,
- OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc,
- SourceLocation LParenLoc, SourceLocation EndLoc)
+ OMPGrainsizeClause(OpenMPGrainsizeClauseModifier Modifier, Expr *Size,
+ Stmt *HelperSize, OpenMPDirectiveKind CaptureRegion,
+ SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation ModifierLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_grainsize, StartLoc, EndLoc),
- OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Grainsize(Size) {
+ OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Modifier(Modifier),
+ ModifierLoc(ModifierLoc), Grainsize(Size) {
setPreInitStmt(HelperSize, CaptureRegion);
}
@@ -6103,6 +6379,12 @@ public:
/// Return safe iteration space distance.
Expr *getGrainsize() const { return cast_or_null<Expr>(Grainsize); }
+ /// Gets modifier.
+ OpenMPGrainsizeClauseModifier getModifier() const { return Modifier; }
+
+ /// Gets modifier location.
+ SourceLocation getModifierLoc() const { return ModifierLoc; }
+
child_range children() { return child_range(&Grainsize, &Grainsize + 1); }
const_child_range children() const {
@@ -6174,26 +6456,43 @@ class OMPNumTasksClause : public OMPClause, public OMPClauseWithPreInit {
/// Location of '('.
SourceLocation LParenLoc;
+ /// Modifiers for 'num_tasks' clause.
+ OpenMPNumTasksClauseModifier Modifier = OMPC_NUMTASKS_unknown;
+
+ /// Location of the modifier.
+ SourceLocation ModifierLoc;
+
/// Safe iteration space distance.
Stmt *NumTasks = nullptr;
/// Set safelen.
void setNumTasks(Expr *Size) { NumTasks = Size; }
+ /// Sets modifier.
+ void setModifier(OpenMPNumTasksClauseModifier M) { Modifier = M; }
+
+ /// Sets modifier location.
+ void setModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; }
+
public:
/// Build 'num_tasks' clause.
///
+ /// \param Modifier Clause modifier.
/// \param Size Expression associated with this clause.
/// \param HelperSize Helper grainsize for the construct.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
- OMPNumTasksClause(Expr *Size, Stmt *HelperSize,
- OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc,
- SourceLocation LParenLoc, SourceLocation EndLoc)
+ /// \param ModifierLoc Modifier location.
+ /// \param LParenLoc Location of '('.
+ OMPNumTasksClause(OpenMPNumTasksClauseModifier Modifier, Expr *Size,
+ Stmt *HelperSize, OpenMPDirectiveKind CaptureRegion,
+ SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation ModifierLoc, SourceLocation EndLoc)
: OMPClause(llvm::omp::OMPC_num_tasks, StartLoc, EndLoc),
- OMPClauseWithPreInit(this), LParenLoc(LParenLoc), NumTasks(Size) {
+ OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Modifier(Modifier),
+ ModifierLoc(ModifierLoc), NumTasks(Size) {
setPreInitStmt(HelperSize, CaptureRegion);
}
@@ -6212,6 +6511,12 @@ public:
/// Return safe iteration space distance.
Expr *getNumTasks() const { return cast_or_null<Expr>(NumTasks); }
+ /// Gets modifier.
+ OpenMPNumTasksClauseModifier getModifier() const { return Modifier; }
+
+ /// Gets modifier location.
+ SourceLocation getModifierLoc() const { return ModifierLoc; }
+
child_range children() { return child_range(&NumTasks, &NumTasks + 1); }
const_child_range children() const {
@@ -6576,12 +6881,11 @@ class OMPToClause final : public OMPMappableExprListClause<OMPToClause>,
: OMPMappableExprListClause(llvm::omp::OMPC_to, Locs, Sizes,
/*SupportsMapper=*/true, &MapperQualifierLoc,
&MapperIdInfo) {
- assert(llvm::array_lengthof(MotionModifiers) == TheMotionModifiers.size() &&
+ assert(std::size(MotionModifiers) == TheMotionModifiers.size() &&
"Unexpected number of motion modifiers.");
llvm::copy(TheMotionModifiers, std::begin(MotionModifiers));
- assert(llvm::array_lengthof(MotionModifiersLoc) ==
- TheMotionModifiersLoc.size() &&
+ assert(std::size(MotionModifiersLoc) == TheMotionModifiersLoc.size() &&
"Unexpected number of motion modifier locations.");
llvm::copy(TheMotionModifiersLoc, std::begin(MotionModifiersLoc));
}
@@ -6693,12 +6997,12 @@ public:
/// Fetches ArrayRef of motion-modifiers.
ArrayRef<OpenMPMotionModifierKind> getMotionModifiers() const LLVM_READONLY {
- return llvm::makeArrayRef(MotionModifiers);
+ return llvm::ArrayRef(MotionModifiers);
}
/// Fetches ArrayRef of location of motion-modifiers.
ArrayRef<SourceLocation> getMotionModifiersLoc() const LLVM_READONLY {
- return llvm::makeArrayRef(MotionModifiersLoc);
+ return llvm::ArrayRef(MotionModifiersLoc);
}
/// Get colon location.
@@ -6778,12 +7082,11 @@ class OMPFromClause final
: OMPMappableExprListClause(llvm::omp::OMPC_from, Locs, Sizes,
/*SupportsMapper=*/true, &MapperQualifierLoc,
&MapperIdInfo) {
- assert(llvm::array_lengthof(MotionModifiers) == TheMotionModifiers.size() &&
+ assert(std::size(MotionModifiers) == TheMotionModifiers.size() &&
"Unexpected number of motion modifiers.");
llvm::copy(TheMotionModifiers, std::begin(MotionModifiers));
- assert(llvm::array_lengthof(MotionModifiersLoc) ==
- TheMotionModifiersLoc.size() &&
+ assert(std::size(MotionModifiersLoc) == TheMotionModifiersLoc.size() &&
"Unexpected number of motion modifier locations.");
llvm::copy(TheMotionModifiersLoc, std::begin(MotionModifiersLoc));
}
@@ -6894,12 +7197,12 @@ public:
/// Fetches ArrayRef of motion-modifiers.
ArrayRef<OpenMPMotionModifierKind> getMotionModifiers() const LLVM_READONLY {
- return llvm::makeArrayRef(MotionModifiers);
+ return llvm::ArrayRef(MotionModifiers);
}
/// Fetches ArrayRef of location of motion-modifiers.
ArrayRef<SourceLocation> getMotionModifiersLoc() const LLVM_READONLY {
- return llvm::makeArrayRef(MotionModifiersLoc);
+ return llvm::ArrayRef(MotionModifiersLoc);
}
/// Get colon location.
@@ -6994,7 +7297,7 @@ class OMPUseDevicePtrClause final
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivateCopies() const {
- return llvm::makeArrayRef(varlist_end(), varlist_size());
+ return llvm::ArrayRef(varlist_end(), varlist_size());
}
/// Sets the list of references to initializer variables for new private
@@ -7008,7 +7311,7 @@ class OMPUseDevicePtrClause final
return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size());
}
ArrayRef<const Expr *> getInits() const {
- return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size());
+ return llvm::ArrayRef(getPrivateCopies().end(), varlist_size());
}
public:
@@ -7298,6 +7601,110 @@ public:
}
};
+/// This represents clause 'has_device_ptr' in the '#pragma omp ...'
+/// directives.
+///
+/// \code
+/// #pragma omp target has_device_addr(a,b)
+/// \endcode
+/// In this example directive '#pragma omp target' has clause
+/// 'has_device_ptr' with the variables 'a' and 'b'.
+class OMPHasDeviceAddrClause final
+ : public OMPMappableExprListClause<OMPHasDeviceAddrClause>,
+ private llvm::TrailingObjects<
+ OMPHasDeviceAddrClause, Expr *, ValueDecl *, unsigned,
+ OMPClauseMappableExprCommon::MappableComponent> {
+ friend class OMPClauseReader;
+ friend OMPMappableExprListClause;
+ friend OMPVarListClause;
+ friend TrailingObjects;
+
+ /// Build clause with number of variables \a NumVars.
+ ///
+ /// \param Locs Locations needed to build a mappable clause. It includes 1)
+ /// StartLoc: starting location of the clause (the clause keyword); 2)
+ /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
+ /// \param Sizes All required sizes to build a mappable clause. It includes 1)
+ /// NumVars: number of expressions listed in this clause; 2)
+ /// NumUniqueDeclarations: number of unique base declarations in this clause;
+ /// 3) NumComponentLists: number of component lists in this clause; and 4)
+ /// NumComponents: total number of expression components in the clause.
+ explicit OMPHasDeviceAddrClause(const OMPVarListLocTy &Locs,
+ const OMPMappableExprListSizeTy &Sizes)
+ : OMPMappableExprListClause(llvm::omp::OMPC_has_device_addr, Locs,
+ Sizes) {}
+
+ /// Build an empty clause.
+ ///
+ /// \param Sizes All required sizes to build a mappable clause. It includes 1)
+ /// NumVars: number of expressions listed in this clause; 2)
+ /// NumUniqueDeclarations: number of unique base declarations in this clause;
+ /// 3) NumComponentLists: number of component lists in this clause; and 4)
+ /// NumComponents: total number of expression components in the clause.
+ explicit OMPHasDeviceAddrClause(const OMPMappableExprListSizeTy &Sizes)
+ : OMPMappableExprListClause(llvm::omp::OMPC_has_device_addr,
+ OMPVarListLocTy(), Sizes) {}
+
+ /// Define the sizes of each trailing object array except the last one. This
+ /// is required for TrailingObjects to work properly.
+ size_t numTrailingObjects(OverloadToken<Expr *>) const {
+ return varlist_size();
+ }
+ size_t numTrailingObjects(OverloadToken<ValueDecl *>) const {
+ return getUniqueDeclarationsNum();
+ }
+ size_t numTrailingObjects(OverloadToken<unsigned>) const {
+ return getUniqueDeclarationsNum() + getTotalComponentListNum();
+ }
+
+public:
+ /// Creates clause with a list of variables \a Vars.
+ ///
+ /// \param C AST context.
+ /// \param Locs Locations needed to build a mappable clause. It includes 1)
+ /// StartLoc: starting location of the clause (the clause keyword); 2)
+ /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
+ /// \param Vars The original expression used in the clause.
+ /// \param Declarations Declarations used in the clause.
+ /// \param ComponentLists Component lists used in the clause.
+ static OMPHasDeviceAddrClause *
+ Create(const ASTContext &C, const OMPVarListLocTy &Locs,
+ ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations,
+ MappableExprComponentListsRef ComponentLists);
+
+ /// Creates an empty clause with the place for \a NumVars variables.
+ ///
+ /// \param C AST context.
+ /// \param Sizes All required sizes to build a mappable clause. It includes 1)
+ /// NumVars: number of expressions listed in this clause; 2)
+ /// NumUniqueDeclarations: number of unique base declarations in this clause;
+ /// 3) NumComponentLists: number of component lists in this clause; and 4)
+ /// NumComponents: total number of expression components in the clause.
+ static OMPHasDeviceAddrClause *
+ CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes);
+
+ child_range children() {
+ return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
+ reinterpret_cast<Stmt **>(varlist_end()));
+ }
+
+ const_child_range children() const {
+ auto Children = const_cast<OMPHasDeviceAddrClause *>(this)->children();
+ return const_child_range(Children.begin(), Children.end());
+ }
+
+ child_range used_children() {
+ return child_range(child_iterator(), child_iterator());
+ }
+ const_child_range used_children() const {
+ return const_child_range(const_child_iterator(), const_child_iterator());
+ }
+
+ static bool classof(const OMPClause *T) {
+ return T->getClauseKind() == llvm::omp::OMPC_has_device_addr;
+ }
+};
+
/// This represents clause 'nontemporal' in the '#pragma omp ...' directives.
///
/// \code
@@ -7338,7 +7745,7 @@ class OMPNontemporalClause final
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivateRefs() const {
- return llvm::makeArrayRef(varlist_end(), varlist_size());
+ return llvm::ArrayRef(varlist_end(), varlist_size());
}
public:
@@ -7408,12 +7815,18 @@ class OMPOrderClause final : public OMPClause {
/// Location of '('.
SourceLocation LParenLoc;
- /// A kind of the 'default' clause.
+ /// A kind of the 'order' clause.
OpenMPOrderClauseKind Kind = OMPC_ORDER_unknown;
/// Start location of the kind in source code.
SourceLocation KindKwLoc;
+ /// A modifier for order clause
+ OpenMPOrderClauseModifier Modifier = OMPC_ORDER_MODIFIER_unknown;
+
+ /// Start location of the modifier in source code.
+ SourceLocation ModifierKwLoc;
+
/// Set kind of the clause.
///
/// \param K Argument of clause.
@@ -7424,6 +7837,16 @@ class OMPOrderClause final : public OMPClause {
/// \param KLoc Argument location.
void setKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; }
+ /// Set modifier of the clause.
+ ///
+ /// \param M Argument of clause.
+ void setModifier(OpenMPOrderClauseModifier M) { Modifier = M; }
+
+ /// Set modifier location.
+ ///
+ /// \param MLoc Modifier keyword location.
+ void setModifierKwLoc(SourceLocation MLoc) { ModifierKwLoc = MLoc; }
+
public:
/// Build 'order' clause with argument \p A ('concurrent').
///
@@ -7432,11 +7855,15 @@ public:
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
+ /// \param Modifier The modifier applied to 'order' clause.
+ /// \param MLoc Location of the modifier
OMPOrderClause(OpenMPOrderClauseKind A, SourceLocation ALoc,
SourceLocation StartLoc, SourceLocation LParenLoc,
- SourceLocation EndLoc)
+ SourceLocation EndLoc, OpenMPOrderClauseModifier Modifier,
+ SourceLocation MLoc)
: OMPClause(llvm::omp::OMPC_order, StartLoc, EndLoc),
- LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {}
+ LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc), Modifier(Modifier),
+ ModifierKwLoc(MLoc) {}
/// Build an empty clause.
OMPOrderClause()
@@ -7454,6 +7881,12 @@ public:
/// Returns location of clause kind.
SourceLocation getKindKwLoc() const { return KindKwLoc; }
+ /// Returns Modifier of the clause.
+ OpenMPOrderClauseModifier getModifier() const { return Modifier; }
+
+ /// Returns location of clause modifier.
+ SourceLocation getModifierKwLoc() const { return ModifierKwLoc; }
+
child_range children() {
return child_range(child_iterator(), child_iterator());
}
@@ -7528,16 +7961,14 @@ public:
///
/// \param C AST context.
/// \param InteropVar The interop variable.
- /// \param PrefExprs The list of preference expressions.
- /// \param IsTarget Uses the 'target' interop-type.
- /// \param IsTargetSync Uses the 'targetsync' interop-type.
+ /// \param InteropInfo The interop-type and prefer_type list.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param VarLoc Location of the interop variable.
/// \param EndLoc Ending location of the clause.
static OMPInitClause *Create(const ASTContext &C, Expr *InteropVar,
- ArrayRef<Expr *> PrefExprs, bool IsTarget,
- bool IsTargetSync, SourceLocation StartLoc,
+ OMPInteropInfo &InteropInfo,
+ SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation VarLoc,
SourceLocation EndLoc);
@@ -7764,21 +8195,13 @@ public:
/// \endcode
/// In this example directive '#pragma omp dispatch' has simple 'novariants'
/// clause with condition 'a > 5'.
-class OMPNovariantsClause final : public OMPClause,
- public OMPClauseWithPreInit {
+class OMPNovariantsClause final
+ : public OMPOneStmtClause<llvm::omp::OMPC_novariants, OMPClause>,
+ public OMPClauseWithPreInit {
friend class OMPClauseReader;
- /// Location of '('.
- SourceLocation LParenLoc;
-
- /// Condition of the 'if' clause.
- Stmt *Condition = nullptr;
-
/// Set condition.
- void setCondition(Expr *Cond) { Condition = Cond; }
-
- /// Sets the location of '('.
- void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
+ void setCondition(Expr *Cond) { setStmt(Cond); }
public:
/// Build 'novariants' clause with condition \a Cond.
@@ -7794,38 +8217,22 @@ public:
OpenMPDirectiveKind CaptureRegion,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
- : OMPClause(llvm::omp::OMPC_novariants, StartLoc, EndLoc),
- OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Condition(Cond) {
+ : OMPOneStmtClause(Cond, StartLoc, LParenLoc, EndLoc),
+ OMPClauseWithPreInit(this) {
setPreInitStmt(HelperCond, CaptureRegion);
}
/// Build an empty clause.
- OMPNovariantsClause()
- : OMPClause(llvm::omp::OMPC_novariants, SourceLocation(),
- SourceLocation()),
- OMPClauseWithPreInit(this) {}
-
- /// Returns the location of '('.
- SourceLocation getLParenLoc() const { return LParenLoc; }
+ OMPNovariantsClause() : OMPOneStmtClause(), OMPClauseWithPreInit(this) {}
/// Returns condition.
- Expr *getCondition() const { return cast_or_null<Expr>(Condition); }
-
- child_range children() { return child_range(&Condition, &Condition + 1); }
-
- const_child_range children() const {
- return const_child_range(&Condition, &Condition + 1);
- }
+ Expr *getCondition() const { return getStmtAs<Expr>(); }
child_range used_children();
const_child_range used_children() const {
auto Children = const_cast<OMPNovariantsClause *>(this)->used_children();
return const_child_range(Children.begin(), Children.end());
}
-
- static bool classof(const OMPClause *T) {
- return T->getClauseKind() == llvm::omp::OMPC_novariants;
- }
};
/// This represents 'nocontext' clause in the '#pragma omp ...' directive.
@@ -7835,20 +8242,13 @@ public:
/// \endcode
/// In this example directive '#pragma omp dispatch' has simple 'nocontext'
/// clause with condition 'a > 5'.
-class OMPNocontextClause final : public OMPClause, public OMPClauseWithPreInit {
+class OMPNocontextClause final
+ : public OMPOneStmtClause<llvm::omp::OMPC_nocontext, OMPClause>,
+ public OMPClauseWithPreInit {
friend class OMPClauseReader;
- /// Location of '('.
- SourceLocation LParenLoc;
-
- /// Condition of the 'if' clause.
- Stmt *Condition = nullptr;
-
/// Set condition.
- void setCondition(Expr *Cond) { Condition = Cond; }
-
- /// Sets the location of '('.
- void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
+ void setCondition(Expr *Cond) { setStmt(Cond); }
public:
/// Build 'nocontext' clause with condition \a Cond.
@@ -7863,38 +8263,22 @@ public:
OMPNocontextClause(Expr *Cond, Stmt *HelperCond,
OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
- : OMPClause(llvm::omp::OMPC_nocontext, StartLoc, EndLoc),
- OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Condition(Cond) {
+ : OMPOneStmtClause(Cond, StartLoc, LParenLoc, EndLoc),
+ OMPClauseWithPreInit(this) {
setPreInitStmt(HelperCond, CaptureRegion);
}
/// Build an empty clause.
- OMPNocontextClause()
- : OMPClause(llvm::omp::OMPC_nocontext, SourceLocation(),
- SourceLocation()),
- OMPClauseWithPreInit(this) {}
-
- /// Returns the location of '('.
- SourceLocation getLParenLoc() const { return LParenLoc; }
+ OMPNocontextClause() : OMPOneStmtClause(), OMPClauseWithPreInit(this) {}
/// Returns condition.
- Expr *getCondition() const { return cast_or_null<Expr>(Condition); }
-
- child_range children() { return child_range(&Condition, &Condition + 1); }
-
- const_child_range children() const {
- return const_child_range(&Condition, &Condition + 1);
- }
+ Expr *getCondition() const { return getStmtAs<Expr>(); }
child_range used_children();
const_child_range used_children() const {
auto Children = const_cast<OMPNocontextClause *>(this)->used_children();
return const_child_range(Children.begin(), Children.end());
}
-
- static bool classof(const OMPClause *T) {
- return T->getClauseKind() == llvm::omp::OMPC_nocontext;
- }
};
/// This represents 'detach' clause in the '#pragma omp task' directive.
@@ -7904,20 +8288,12 @@ public:
/// \endcode
/// In this example directive '#pragma omp detach' has simple 'detach' clause
/// with the variable 'evt'.
-class OMPDetachClause final : public OMPClause {
+class OMPDetachClause final
+ : public OMPOneStmtClause<llvm::omp::OMPC_detach, OMPClause> {
friend class OMPClauseReader;
- /// Location of '('.
- SourceLocation LParenLoc;
-
- /// Expression of the 'detach' clause.
- Stmt *Evt = nullptr;
-
/// Set condition.
- void setEventHandler(Expr *E) { Evt = E; }
-
- /// Sets the location of '('.
- void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
+ void setEventHandler(Expr *E) { setStmt(E); }
public:
/// Build 'detach' clause with event-handler \a Evt.
@@ -7928,35 +8304,13 @@ public:
/// \param EndLoc Ending location of the clause.
OMPDetachClause(Expr *Evt, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
- : OMPClause(llvm::omp::OMPC_detach, StartLoc, EndLoc),
- LParenLoc(LParenLoc), Evt(Evt) {}
+ : OMPOneStmtClause(Evt, StartLoc, LParenLoc, EndLoc) {}
/// Build an empty clause.
- OMPDetachClause()
- : OMPClause(llvm::omp::OMPC_detach, SourceLocation(), SourceLocation()) {}
-
- /// Returns the location of '('.
- SourceLocation getLParenLoc() const { return LParenLoc; }
+ OMPDetachClause() : OMPOneStmtClause() {}
/// Returns event-handler expression.
- Expr *getEventHandler() const { return cast_or_null<Expr>(Evt); }
-
- child_range children() { return child_range(&Evt, &Evt + 1); }
-
- const_child_range children() const {
- return const_child_range(&Evt, &Evt + 1);
- }
-
- child_range used_children() {
- return child_range(child_iterator(), child_iterator());
- }
- const_child_range used_children() const {
- return const_child_range(const_child_iterator(), const_child_iterator());
- }
-
- static bool classof(const OMPClause *T) {
- return T->getClauseKind() == llvm::omp::OMPC_detach;
- }
+ Expr *getEventHandler() const { return getStmtAs<Expr>(); }
};
/// This represents clause 'inclusive' in the '#pragma omp scan' directive.
@@ -8156,14 +8510,14 @@ private:
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
- /// \param N Number of allocators asssociated with the clause.
+ /// \param N Number of allocators associated with the clause.
OMPUsesAllocatorsClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPClause(llvm::omp::OMPC_uses_allocators, StartLoc, EndLoc),
LParenLoc(LParenLoc), NumOfAllocators(N) {}
/// Build an empty clause.
- /// \param N Number of allocators asssociated with the clause.
+ /// \param N Number of allocators associated with the clause.
///
explicit OMPUsesAllocatorsClause(unsigned N)
: OMPClause(llvm::omp::OMPC_uses_allocators, SourceLocation(),
@@ -8257,14 +8611,14 @@ class OMPAffinityClause final
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
- /// \param N Number of locators asssociated with the clause.
+ /// \param N Number of locators associated with the clause.
OMPAffinityClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPAffinityClause>(llvm::omp::OMPC_affinity, StartLoc,
LParenLoc, EndLoc, N) {}
/// Build an empty clause.
- /// \param N Number of locators asssociated with the clause.
+ /// \param N Number of locators associated with the clause.
///
explicit OMPAffinityClause(unsigned N)
: OMPVarListClause<OMPAffinityClause>(llvm::omp::OMPC_affinity,
@@ -8340,20 +8694,13 @@ public:
/// \endcode
/// In this example directive '#pragma omp masked' has 'filter' clause with
/// thread id.
-class OMPFilterClause final : public OMPClause, public OMPClauseWithPreInit {
+class OMPFilterClause final
+ : public OMPOneStmtClause<llvm::omp::OMPC_filter, OMPClause>,
+ public OMPClauseWithPreInit {
friend class OMPClauseReader;
- /// Location of '('.
- SourceLocation LParenLoc;
-
- /// Express of the 'filter' clause.
- Stmt *ThreadID = nullptr;
-
/// Sets the thread identifier.
- void setThreadID(Expr *TID) { ThreadID = TID; }
-
- /// Sets the location of '('.
- void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
+ void setThreadID(Expr *TID) { setStmt(TID); }
public:
/// Build 'filter' clause with thread-id \a ThreadID.
@@ -8368,40 +8715,89 @@ public:
OMPFilterClause(Expr *ThreadID, Stmt *HelperE,
OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
- : OMPClause(llvm::omp::OMPC_filter, StartLoc, EndLoc),
- OMPClauseWithPreInit(this), LParenLoc(LParenLoc), ThreadID(ThreadID) {
+ : OMPOneStmtClause(ThreadID, StartLoc, LParenLoc, EndLoc),
+ OMPClauseWithPreInit(this) {
setPreInitStmt(HelperE, CaptureRegion);
}
/// Build an empty clause.
- OMPFilterClause()
- : OMPClause(llvm::omp::OMPC_filter, SourceLocation(), SourceLocation()),
- OMPClauseWithPreInit(this) {}
- /// Returns the location of '('.
- SourceLocation getLParenLoc() const { return LParenLoc; }
+ OMPFilterClause() : OMPOneStmtClause(), OMPClauseWithPreInit(this) {}
/// Return thread identifier.
- Expr *getThreadID() { return cast<Expr>(ThreadID); }
+ Expr *getThreadID() const { return getStmtAs<Expr>(); }
/// Return thread identifier.
- Expr *getThreadID() const { return cast<Expr>(ThreadID); }
+ Expr *getThreadID() { return getStmtAs<Expr>(); }
+};
+
+/// This represents 'bind' clause in the '#pragma omp ...' directives.
+///
+/// \code
+/// #pragma omp loop bind(parallel)
+/// \endcode
+class OMPBindClause final : public OMPNoChildClause<llvm::omp::OMPC_bind> {
+ friend class OMPClauseReader;
- child_range children() { return child_range(&ThreadID, &ThreadID + 1); }
+ /// Location of '('.
+ SourceLocation LParenLoc;
- const_child_range children() const {
- return const_child_range(&ThreadID, &ThreadID + 1);
- }
+ /// The binding kind of 'bind' clause.
+ OpenMPBindClauseKind Kind = OMPC_BIND_unknown;
- child_range used_children() {
- return child_range(child_iterator(), child_iterator());
- }
- const_child_range used_children() const {
- return const_child_range(const_child_iterator(), const_child_iterator());
- }
+ /// Start location of the kind in source code.
+ SourceLocation KindLoc;
- static bool classof(const OMPClause *T) {
- return T->getClauseKind() == llvm::omp::OMPC_filter;
- }
+ /// Sets the location of '('.
+ void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
+
+ /// Set the binding kind.
+ void setBindKind(OpenMPBindClauseKind K) { Kind = K; }
+
+ /// Set the binding kind location.
+ void setBindKindLoc(SourceLocation KLoc) { KindLoc = KLoc; }
+
+ /// Build 'bind' clause with kind \a K ('teams', 'parallel', or 'thread').
+ ///
+ /// \param K Binding kind of the clause ('teams', 'parallel' or 'thread').
+ /// \param KLoc Starting location of the binding kind.
+ /// \param StartLoc Starting location of the clause.
+ /// \param LParenLoc Location of '('.
+ /// \param EndLoc Ending location of the clause.
+ OMPBindClause(OpenMPBindClauseKind K, SourceLocation KLoc,
+ SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation EndLoc)
+ : OMPNoChildClause(StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(K),
+ KindLoc(KLoc) {}
+
+ /// Build an empty clause.
+ OMPBindClause() : OMPNoChildClause() {}
+
+public:
+ /// Build 'bind' clause with kind \a K ('teams', 'parallel', or 'thread').
+ ///
+ /// \param C AST context
+ /// \param K Binding kind of the clause ('teams', 'parallel' or 'thread').
+ /// \param KLoc Starting location of the binding kind.
+ /// \param StartLoc Starting location of the clause.
+ /// \param LParenLoc Location of '('.
+ /// \param EndLoc Ending location of the clause.
+ static OMPBindClause *Create(const ASTContext &C, OpenMPBindClauseKind K,
+ SourceLocation KLoc, SourceLocation StartLoc,
+ SourceLocation LParenLoc, SourceLocation EndLoc);
+
+ /// Build an empty 'bind' clause.
+ ///
+ /// \param C AST context
+ static OMPBindClause *CreateEmpty(const ASTContext &C);
+
+ /// Returns the location of '('.
+ SourceLocation getLParenLoc() const { return LParenLoc; }
+
+ /// Returns kind of the clause.
+ OpenMPBindClauseKind getBindKind() const { return Kind; }
+
+ /// Returns location of clause kind.
+ SourceLocation getBindKindLoc() const { return KindLoc; }
};
/// This class implements a simple visitor for OMPClause
@@ -8546,10 +8942,11 @@ llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const OMPTraitInfo *TI);
/// Clang specific specialization of the OMPContext to lookup target features.
struct TargetOMPContext final : public llvm::omp::OMPContext {
-
TargetOMPContext(ASTContext &ASTCtx,
std::function<void(StringRef)> &&DiagUnknownTrait,
- const FunctionDecl *CurrentFunctionDecl);
+ const FunctionDecl *CurrentFunctionDecl,
+ ArrayRef<llvm::omp::TraitProperty> ConstructTraits);
+
virtual ~TargetOMPContext() = default;
/// See llvm::omp::OMPContext::matchesISATrait
@@ -8630,7 +9027,7 @@ public:
/// Get the clauses storage.
MutableArrayRef<OMPClause *> getClauses() {
- return llvm::makeMutableArrayRef(getTrailingObjects<OMPClause *>(),
+ return llvm::MutableArrayRef(getTrailingObjects<OMPClause *>(),
NumClauses);
}
ArrayRef<OMPClause *> getClauses() const {
@@ -8644,9 +9041,7 @@ public:
const CapturedStmt *
getCapturedStmt(OpenMPDirectiveKind RegionKind,
ArrayRef<OpenMPDirectiveKind> CaptureRegions) const {
- assert(llvm::any_of(
- CaptureRegions,
- [=](const OpenMPDirectiveKind K) { return K == RegionKind; }) &&
+ assert(llvm::is_contained(CaptureRegions, RegionKind) &&
"RegionKind not found in OpenMP CaptureRegions.");
auto *CS = cast<CapturedStmt>(getAssociatedStmt());
for (auto ThisCaptureRegion : CaptureRegions) {
@@ -8705,6 +9100,243 @@ public:
}
};
+/// This represents 'ompx_dyn_cgroup_mem' clause in the '#pragma omp target ...'
+/// directive.
+///
+/// \code
+/// #pragma omp target [...] ompx_dyn_cgroup_mem(N)
+/// \endcode
+class OMPXDynCGroupMemClause
+ : public OMPOneStmtClause<llvm::omp::OMPC_ompx_dyn_cgroup_mem, OMPClause>,
+ public OMPClauseWithPreInit {
+ friend class OMPClauseReader;
+
+ /// Set size.
+ void setSize(Expr *E) { setStmt(E); }
+
+public:
+ /// Build 'ompx_dyn_cgroup_mem' clause.
+ ///
+ /// \param Size Size expression.
+ /// \param HelperSize Helper Size expression
+ /// \param CaptureRegion Innermost OpenMP region where expressions in this
+ /// \param StartLoc Starting location of the clause.
+ /// \param LParenLoc Location of '('.
+ /// \param EndLoc Ending location of the clause.
+ OMPXDynCGroupMemClause(Expr *Size, Stmt *HelperSize,
+ OpenMPDirectiveKind CaptureRegion,
+ SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation EndLoc)
+ : OMPOneStmtClause(Size, StartLoc, LParenLoc, EndLoc),
+ OMPClauseWithPreInit(this) {
+ setPreInitStmt(HelperSize, CaptureRegion);
+ }
+
+ /// Build an empty clause.
+ OMPXDynCGroupMemClause() : OMPOneStmtClause(), OMPClauseWithPreInit(this) {}
+
+ /// Return the size expression.
+ Expr *getSize() { return getStmtAs<Expr>(); }
+
+ /// Return the size expression.
+ Expr *getSize() const { return getStmtAs<Expr>(); }
+};
+
+/// This represents the 'doacross' clause for the '#pragma omp ordered'
+/// directive.
+///
+/// \code
+/// #pragma omp ordered doacross(sink: i-1, j-1)
+/// \endcode
+/// In this example directive '#pragma omp ordered' with clause 'doacross' with
+/// a dependence-type 'sink' and loop-iteration vector expressions i-1 and j-1.
+class OMPDoacrossClause final
+ : public OMPVarListClause<OMPDoacrossClause>,
+ private llvm::TrailingObjects<OMPDoacrossClause, Expr *> {
+ friend class OMPClauseReader;
+ friend OMPVarListClause;
+ friend TrailingObjects;
+
+ /// Dependence type (sink or source).
+ OpenMPDoacrossClauseModifier DepType = OMPC_DOACROSS_unknown;
+
+ /// Dependence type location.
+ SourceLocation DepLoc;
+
+ /// Colon location.
+ SourceLocation ColonLoc;
+
+ /// Number of loops, associated with the doacross clause.
+ unsigned NumLoops = 0;
+
+ /// Build clause with number of expressions \a N.
+ ///
+ /// \param StartLoc Starting location of the clause.
+ /// \param LParenLoc Location of '('.
+ /// \param EndLoc Ending location of the clause.
+ /// \param N Number of expressions in the clause.
+ /// \param NumLoops Number of loops associated with the clause.
+ OMPDoacrossClause(SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation EndLoc, unsigned N, unsigned NumLoops)
+ : OMPVarListClause<OMPDoacrossClause>(llvm::omp::OMPC_doacross, StartLoc,
+ LParenLoc, EndLoc, N),
+ NumLoops(NumLoops) {}
+
+ /// Build an empty clause.
+ ///
+ /// \param N Number of expressions in the clause.
+ /// \param NumLoops Number of loops associated with the clause.
+ explicit OMPDoacrossClause(unsigned N, unsigned NumLoops)
+ : OMPVarListClause<OMPDoacrossClause>(llvm::omp::OMPC_doacross,
+ SourceLocation(), SourceLocation(),
+ SourceLocation(), N),
+ NumLoops(NumLoops) {}
+
+ /// Set dependence type.
+ void setDependenceType(OpenMPDoacrossClauseModifier M) { DepType = M; }
+
+ /// Set dependence type location.
+ void setDependenceLoc(SourceLocation Loc) { DepLoc = Loc; }
+
+ /// Set colon location.
+ void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
+
+public:
+ /// Creates clause with a list of expressions \a VL.
+ ///
+ /// \param C AST context.
+ /// \param StartLoc Starting location of the clause.
+ /// \param LParenLoc Location of '('.
+ /// \param EndLoc Ending location of the clause.
+ /// \param DepType The dependence type.
+ /// \param DepLoc Location of the dependence type.
+ /// \param ColonLoc Location of ':'.
+ /// \param VL List of references to the expressions.
+ /// \param NumLoops Number of loops that associated with the clause.
+ static OMPDoacrossClause *
+ Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation EndLoc, OpenMPDoacrossClauseModifier DepType,
+ SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VL,
+ unsigned NumLoops);
+
+ /// Creates an empty clause with \a N expressions.
+ ///
+ /// \param C AST context.
+ /// \param N The number of expressions.
+ /// \param NumLoops Number of loops that is associated with this clause.
+ static OMPDoacrossClause *CreateEmpty(const ASTContext &C, unsigned N,
+ unsigned NumLoops);
+
+ /// Get dependence type.
+ OpenMPDoacrossClauseModifier getDependenceType() const { return DepType; }
+
+ /// Get dependence type location.
+ SourceLocation getDependenceLoc() const { return DepLoc; }
+
+ /// Get colon location.
+ SourceLocation getColonLoc() const { return ColonLoc; }
+
+ /// Get number of loops associated with the clause.
+ unsigned getNumLoops() const { return NumLoops; }
+
+ /// Set the loop data.
+ void setLoopData(unsigned NumLoop, Expr *Cnt);
+
+ /// Get the loop data.
+ Expr *getLoopData(unsigned NumLoop);
+ const Expr *getLoopData(unsigned NumLoop) const;
+
+ child_range children() {
+ return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
+ reinterpret_cast<Stmt **>(varlist_end()));
+ }
+
+ const_child_range children() const {
+ auto Children = const_cast<OMPDoacrossClause *>(this)->children();
+ return const_child_range(Children.begin(), Children.end());
+ }
+
+ child_range used_children() {
+ return child_range(child_iterator(), child_iterator());
+ }
+ const_child_range used_children() const {
+ return const_child_range(const_child_iterator(), const_child_iterator());
+ }
+
+ static bool classof(const OMPClause *T) {
+ return T->getClauseKind() == llvm::omp::OMPC_doacross;
+ }
+};
+
+/// This represents 'ompx_attribute' clause in a directive that might generate
+/// an outlined function. An example is given below.
+///
+/// \code
+/// #pragma omp target [...] ompx_attribute(flatten)
+/// \endcode
+class OMPXAttributeClause
+ : public OMPNoChildClause<llvm::omp::OMPC_ompx_attribute> {
+ friend class OMPClauseReader;
+
+ /// Location of '('.
+ SourceLocation LParenLoc;
+
+ /// The parsed attributes (clause arguments)
+ SmallVector<const Attr *> Attrs;
+
+public:
+ /// Build 'ompx_attribute' clause.
+ ///
+ /// \param Attrs The parsed attributes (clause arguments)
+ /// \param StartLoc Starting location of the clause.
+ /// \param LParenLoc Location of '('.
+ /// \param EndLoc Ending location of the clause.
+ OMPXAttributeClause(ArrayRef<const Attr *> Attrs, SourceLocation StartLoc,
+ SourceLocation LParenLoc, SourceLocation EndLoc)
+ : OMPNoChildClause(StartLoc, EndLoc), LParenLoc(LParenLoc), Attrs(Attrs) {
+ }
+
+ /// Build an empty clause.
+ OMPXAttributeClause() : OMPNoChildClause() {}
+
+ /// Sets the location of '('.
+ void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
+
+ /// Returns the location of '('.
+ SourceLocation getLParenLoc() const { return LParenLoc; }
+
+ /// Returned the attributes parsed from this clause.
+ ArrayRef<const Attr *> getAttrs() const { return Attrs; }
+
+private:
+ /// Replace the attributes with \p NewAttrs.
+ void setAttrs(ArrayRef<Attr *> NewAttrs) {
+ Attrs.clear();
+ Attrs.append(NewAttrs.begin(), NewAttrs.end());
+ }
+};
+
+/// This represents 'ompx_bare' clause in the '#pragma omp target teams ...'
+/// directive.
+///
+/// \code
+/// #pragma omp target teams ompx_bare
+/// \endcode
+/// In this example directive '#pragma omp target teams' has a 'ompx_bare'
+/// clause.
+class OMPXBareClause : public OMPNoChildClause<llvm::omp::OMPC_ompx_bare> {
+public:
+ /// Build 'ompx_bare' clause.
+ ///
+ /// \param StartLoc Starting location of the clause.
+ /// \param EndLoc Ending location of the clause.
+ OMPXBareClause(SourceLocation StartLoc, SourceLocation EndLoc)
+ : OMPNoChildClause(StartLoc, EndLoc) {}
+
+ /// Build an empty clause.
+ OMPXBareClause() = default;
+};
+
} // namespace clang
#endif // LLVM_CLANG_AST_OPENMPCLAUSE_H
diff --git a/contrib/llvm-project/clang/include/clang/AST/OperationKinds.def b/contrib/llvm-project/clang/include/clang/AST/OperationKinds.def
index b05b9d81569e..8dd98730dff7 100644
--- a/contrib/llvm-project/clang/include/clang/AST/OperationKinds.def
+++ b/contrib/llvm-project/clang/include/clang/AST/OperationKinds.def
@@ -80,6 +80,7 @@ CAST_OPERATION(LValueToRValue)
/// (possibly) adding qualifiers or removing noexcept.
/// int -> int
/// char** -> const char * const *
+/// int[1] -> int[]
/// void () noexcept -> void ()
CAST_OPERATION(NoOp)
@@ -362,8 +363,8 @@ CAST_OPERATION(IntToOCLSampler)
//===- Binary Operations -------------------------------------------------===//
// Operators listed in order of precedence.
-// Note that additions to this should also update the StmtVisitor class and
-// BinaryOperator::getOverloadedOperator.
+// Note that additions to this should also update the StmtVisitor class,
+// BinaryOperator::getOverloadedOperator and CXBinaryOperatorKind enum.
// [C++ 5.5] Pointer-to-member operators.
BINARY_OPERATION(PtrMemD, ".*")
@@ -415,8 +416,8 @@ BINARY_OPERATION(Comma, ",")
//===- Unary Operations ---------------------------------------------------===//
-// Note that additions to this should also update the StmtVisitor class and
-// UnaryOperator::getOverloadedOperator.
+// Note that additions to this should also update the StmtVisitor class,
+// UnaryOperator::getOverloadedOperator and CXUnaryOperatorKind enum.
// [C99 6.5.2.4] Postfix increment and decrement
UNARY_OPERATION(PostInc, "++")
diff --git a/contrib/llvm-project/clang/include/clang/AST/ParentMapContext.h b/contrib/llvm-project/clang/include/clang/AST/ParentMapContext.h
index 2edbc987850d..d3b2e3986a99 100644
--- a/contrib/llvm-project/clang/include/clang/AST/ParentMapContext.h
+++ b/contrib/llvm-project/clang/include/clang/AST/ParentMapContext.h
@@ -77,7 +77,7 @@ class TraversalKindScope {
TraversalKind TK = TK_AsIs;
public:
- TraversalKindScope(ASTContext &ASTCtx, llvm::Optional<TraversalKind> ScopeTK)
+ TraversalKindScope(ASTContext &ASTCtx, std::optional<TraversalKind> ScopeTK)
: Ctx(ASTCtx.getParentMapContext()) {
TK = Ctx.getTraversalKind();
if (ScopeTK)
@@ -90,29 +90,27 @@ public:
/// Container for either a single DynTypedNode or for an ArrayRef to
/// DynTypedNode. For use with ParentMap.
class DynTypedNodeList {
- llvm::AlignedCharArrayUnion<DynTypedNode, ArrayRef<DynTypedNode>> Storage;
+ union {
+ DynTypedNode SingleNode;
+ ArrayRef<DynTypedNode> Nodes;
+ };
bool IsSingleNode;
public:
DynTypedNodeList(const DynTypedNode &N) : IsSingleNode(true) {
- new (&Storage) DynTypedNode(N);
+ new (&SingleNode) DynTypedNode(N);
}
DynTypedNodeList(ArrayRef<DynTypedNode> A) : IsSingleNode(false) {
- new (&Storage) ArrayRef<DynTypedNode>(A);
+ new (&Nodes) ArrayRef<DynTypedNode>(A);
}
const DynTypedNode *begin() const {
- if (!IsSingleNode)
- return reinterpret_cast<const ArrayRef<DynTypedNode> *>(&Storage)
- ->begin();
- return reinterpret_cast<const DynTypedNode *>(&Storage);
+ return !IsSingleNode ? Nodes.begin() : &SingleNode;
}
const DynTypedNode *end() const {
- if (!IsSingleNode)
- return reinterpret_cast<const ArrayRef<DynTypedNode> *>(&Storage)->end();
- return reinterpret_cast<const DynTypedNode *>(&Storage) + 1;
+ return !IsSingleNode ? Nodes.end() : &SingleNode + 1;
}
size_t size() const { return end() - begin(); }
diff --git a/contrib/llvm-project/clang/include/clang/AST/PrettyDeclStackTrace.h b/contrib/llvm-project/clang/include/clang/AST/PrettyDeclStackTrace.h
index 899bbcb3be45..82df031d4126 100644
--- a/contrib/llvm-project/clang/include/clang/AST/PrettyDeclStackTrace.h
+++ b/contrib/llvm-project/clang/include/clang/AST/PrettyDeclStackTrace.h
@@ -22,7 +22,6 @@ namespace clang {
class ASTContext;
class Decl;
-class SourceManager;
/// PrettyDeclStackTraceEntry - If a crash occurs in the parser while
/// parsing something related to a declaration, include that
diff --git a/contrib/llvm-project/clang/include/clang/AST/PrettyPrinter.h b/contrib/llvm-project/clang/include/clang/AST/PrettyPrinter.h
index 3baf2b2ba94d..da276e26049b 100644
--- a/contrib/llvm-project/clang/include/clang/AST/PrettyPrinter.h
+++ b/contrib/llvm-project/clang/include/clang/AST/PrettyPrinter.h
@@ -20,9 +20,7 @@ namespace clang {
class DeclContext;
class LangOptions;
-class SourceManager;
class Stmt;
-class TagDecl;
class PrinterHelper {
public:
@@ -62,19 +60,24 @@ struct PrintingPolicy {
: Indentation(2), SuppressSpecifiers(false),
SuppressTagKeyword(LO.CPlusPlus), IncludeTagDefinition(false),
SuppressScope(false), SuppressUnwrittenScope(false),
- SuppressInlineNamespace(true), SuppressInitializers(false),
- ConstantArraySizeAsWritten(false), AnonymousTagLocations(true),
- SuppressStrongLifetime(false), SuppressLifetimeQualifiers(false),
+ SuppressInlineNamespace(true), SuppressElaboration(false),
+ SuppressInitializers(false), ConstantArraySizeAsWritten(false),
+ AnonymousTagLocations(true), SuppressStrongLifetime(false),
+ SuppressLifetimeQualifiers(false),
SuppressTemplateArgsInCXXConstructors(false),
SuppressDefaultTemplateArgs(true), Bool(LO.Bool),
- Nullptr(LO.CPlusPlus11), Restrict(LO.C99), Alignof(LO.CPlusPlus11),
- UnderscoreAlignof(LO.C11), UseVoidForZeroParams(!LO.CPlusPlus),
+ Nullptr(LO.CPlusPlus11 || LO.C23), NullptrTypeInNamespace(LO.CPlusPlus),
+ Restrict(LO.C99), Alignof(LO.CPlusPlus11), UnderscoreAlignof(LO.C11),
+ UseVoidForZeroParams(!LO.CPlusPlus),
SplitTemplateClosers(!LO.CPlusPlus11), TerseOutput(false),
PolishForDeclaration(false), Half(LO.Half),
MSWChar(LO.MicrosoftExt && !LO.WChar), IncludeNewlines(true),
MSVCFormatting(false), ConstantsAsWritten(false),
SuppressImplicitBase(false), FullyQualifiedName(false),
- PrintCanonicalTypes(false), PrintInjectedClassNameWithArguments(true) {}
+ PrintCanonicalTypes(false), PrintInjectedClassNameWithArguments(true),
+ UsePreferredNames(true), AlwaysIncludeTypeForTemplateArgument(false),
+ CleanUglifiedParameters(false), EntireContentsOfLargeArray(true),
+ UseEnumerators(true) {}
/// Adjust this printing policy for cases where it's known that we're
/// printing C++ code (for instance, if AST dumping reaches a C++-only
@@ -103,6 +106,7 @@ struct PrintingPolicy {
/// declaration for "x", so that we will print "int *x"; it will be
/// \c true when we print "y", so that we suppress printing the
/// "const int" type specifier and instead only print the "*y".
+ LLVM_PREFERRED_TYPE(bool)
unsigned SuppressSpecifiers : 1;
/// Whether type printing should skip printing the tag keyword.
@@ -113,6 +117,7 @@ struct PrintingPolicy {
/// \code
/// struct Geometry::Point;
/// \endcode
+ LLVM_PREFERRED_TYPE(bool)
unsigned SuppressTagKeyword : 1;
/// When true, include the body of a tag definition.
@@ -123,20 +128,29 @@ struct PrintingPolicy {
/// \code
/// typedef struct { int x, y; } Point;
/// \endcode
+ LLVM_PREFERRED_TYPE(bool)
unsigned IncludeTagDefinition : 1;
/// Suppresses printing of scope specifiers.
+ LLVM_PREFERRED_TYPE(bool)
unsigned SuppressScope : 1;
/// Suppress printing parts of scope specifiers that are never
/// written, e.g., for anonymous namespaces.
+ LLVM_PREFERRED_TYPE(bool)
unsigned SuppressUnwrittenScope : 1;
/// Suppress printing parts of scope specifiers that correspond
/// to inline namespaces, where the name is unambiguous with the specifier
/// removed.
+ LLVM_PREFERRED_TYPE(bool)
unsigned SuppressInlineNamespace : 1;
+ /// Ignore qualifiers and tag keywords as specified by elaborated type sugar,
+ /// instead letting the underlying type print as normal.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned SuppressElaboration : 1;
+
/// Suppress printing of variable initializers.
///
/// This flag is used when printing the loop variable in a for-range
@@ -148,6 +162,7 @@ struct PrintingPolicy {
///
/// SuppressInitializers will be true when printing "auto x", so that the
/// internal initializer constructed for x will not be printed.
+ LLVM_PREFERRED_TYPE(bool)
unsigned SuppressInitializers : 1;
/// Whether we should print the sizes of constant array expressions as written
@@ -166,50 +181,67 @@ struct PrintingPolicy {
/// int a[104];
/// char a[9] = "A string";
/// \endcode
+ LLVM_PREFERRED_TYPE(bool)
unsigned ConstantArraySizeAsWritten : 1;
/// When printing an anonymous tag name, also print the location of that
/// entity (e.g., "enum <anonymous at t.h:10:5>"). Otherwise, just prints
/// "(anonymous)" for the name.
+ LLVM_PREFERRED_TYPE(bool)
unsigned AnonymousTagLocations : 1;
/// When true, suppress printing of the __strong lifetime qualifier in ARC.
+ LLVM_PREFERRED_TYPE(bool)
unsigned SuppressStrongLifetime : 1;
/// When true, suppress printing of lifetime qualifier in ARC.
+ LLVM_PREFERRED_TYPE(bool)
unsigned SuppressLifetimeQualifiers : 1;
/// When true, suppresses printing template arguments in names of C++
/// constructors.
+ LLVM_PREFERRED_TYPE(bool)
unsigned SuppressTemplateArgsInCXXConstructors : 1;
/// When true, attempt to suppress template arguments that match the default
/// argument for the parameter.
+ LLVM_PREFERRED_TYPE(bool)
unsigned SuppressDefaultTemplateArgs : 1;
/// Whether we can use 'bool' rather than '_Bool' (even if the language
/// doesn't actually have 'bool', because, e.g., it is defined as a macro).
+ LLVM_PREFERRED_TYPE(bool)
unsigned Bool : 1;
/// Whether we should use 'nullptr' rather than '0' as a null pointer
/// constant.
+ LLVM_PREFERRED_TYPE(bool)
unsigned Nullptr : 1;
+ /// Whether 'nullptr_t' is in namespace 'std' or not.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned NullptrTypeInNamespace : 1;
+
/// Whether we can use 'restrict' rather than '__restrict'.
+ LLVM_PREFERRED_TYPE(bool)
unsigned Restrict : 1;
/// Whether we can use 'alignof' rather than '__alignof'.
+ LLVM_PREFERRED_TYPE(bool)
unsigned Alignof : 1;
/// Whether we can use '_Alignof' rather than '__alignof'.
+ LLVM_PREFERRED_TYPE(bool)
unsigned UnderscoreAlignof : 1;
/// Whether we should use '(void)' rather than '()' for a function prototype
/// with zero parameters.
+ LLVM_PREFERRED_TYPE(bool)
unsigned UseVoidForZeroParams : 1;
/// Whether nested templates must be closed like 'a\<b\<c\> \>' rather than
/// 'a\<b\<c\>\>'.
+ LLVM_PREFERRED_TYPE(bool)
unsigned SplitTemplateClosers : 1;
/// Provide a 'terse' output.
@@ -217,27 +249,33 @@ struct PrintingPolicy {
/// For example, in this mode we don't print function bodies, class members,
/// declarations inside namespaces etc. Effectively, this should print
/// only the requested declaration.
+ LLVM_PREFERRED_TYPE(bool)
unsigned TerseOutput : 1;
/// When true, do certain refinement needed for producing proper declaration
/// tag; such as, do not print attributes attached to the declaration.
///
+ LLVM_PREFERRED_TYPE(bool)
unsigned PolishForDeclaration : 1;
/// When true, print the half-precision floating-point type as 'half'
/// instead of '__fp16'
+ LLVM_PREFERRED_TYPE(bool)
unsigned Half : 1;
/// When true, print the built-in wchar_t type as __wchar_t. For use in
/// Microsoft mode when wchar_t is not available.
+ LLVM_PREFERRED_TYPE(bool)
unsigned MSWChar : 1;
/// When true, include newlines after statements like "break", etc.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IncludeNewlines : 1;
/// Use whitespace and punctuation like MSVC does. In particular, this prints
/// anonymous namespaces as `anonymous namespace' and does not insert spaces
/// after template arguments.
+ LLVM_PREFERRED_TYPE(bool)
unsigned MSVCFormatting : 1;
/// Whether we should print the constant expressions as written in the
@@ -256,23 +294,54 @@ struct PrintingPolicy {
/// 0x10
/// 2.5e3
/// \endcode
+ LLVM_PREFERRED_TYPE(bool)
unsigned ConstantsAsWritten : 1;
/// When true, don't print the implicit 'self' or 'this' expressions.
+ LLVM_PREFERRED_TYPE(bool)
unsigned SuppressImplicitBase : 1;
/// When true, print the fully qualified name of function declarations.
/// This is the opposite of SuppressScope and thus overrules it.
+ LLVM_PREFERRED_TYPE(bool)
unsigned FullyQualifiedName : 1;
/// Whether to print types as written or canonically.
+ LLVM_PREFERRED_TYPE(bool)
unsigned PrintCanonicalTypes : 1;
/// Whether to print an InjectedClassNameType with template arguments or as
/// written. When a template argument is unnamed, printing it results in
/// invalid C++ code.
+ LLVM_PREFERRED_TYPE(bool)
unsigned PrintInjectedClassNameWithArguments : 1;
+ /// Whether to use C++ template preferred_name attributes when printing
+ /// templates.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned UsePreferredNames : 1;
+
+ /// Whether to use type suffixes (eg: 1U) on integral non-type template
+ /// parameters.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned AlwaysIncludeTypeForTemplateArgument : 1;
+
+ /// Whether to strip underscores when printing reserved parameter names.
+ /// e.g. std::vector<class _Tp> becomes std::vector<class Tp>.
+ /// This only affects parameter names, and so describes a compatible API.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned CleanUglifiedParameters : 1;
+
+ /// Whether to print the entire array initializers, especially on non-type
+ /// template parameters, no matter how many elements there are.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned EntireContentsOfLargeArray : 1;
+
+ /// Whether to print enumerator non-type template parameters with a matching
+ /// enumerator name or via cast of an integer.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned UseEnumerators : 1;
+
/// Callbacks to use to allow the behavior of printing to be customized.
const PrintingCallbacks *Callbacks = nullptr;
};
diff --git a/contrib/llvm-project/clang/include/clang/AST/PropertiesBase.td b/contrib/llvm-project/clang/include/clang/AST/PropertiesBase.td
index a087cb406b29..0270c086d06b 100644
--- a/contrib/llvm-project/clang/include/clang/AST/PropertiesBase.td
+++ b/contrib/llvm-project/clang/include/clang/AST/PropertiesBase.td
@@ -41,7 +41,7 @@ class RefPropertyType<string className> : PropertyType<className # "*"> {
let PackOptional =
"value ? *value : nullptr";
let UnpackOptional =
- "value ? llvm::Optional<" # CXXName # ">(value) : llvm::None";
+ "value ? std::optional<" # CXXName # ">(value) : std::nullopt";
}
/// Property types that correspond to a specific subclass of another type.
@@ -58,7 +58,7 @@ class DefaultValuePropertyType<string typeName = ""> : PropertyType<typeName> {
let PackOptional =
"value ? *value : " # CXXName # "()";
let UnpackOptional =
- "value.isNull() ? llvm::None : llvm::Optional<" # CXXName # ">(value)";
+ "value.isNull() ? std::nullopt : std::optional<" # CXXName # ">(value)";
}
/// Property types that correspond to integer types and support optional
@@ -67,18 +67,19 @@ class CountPropertyType<string typeName = ""> : PropertyType<typeName> {
let PackOptional =
"value ? *value + 1 : 0";
let UnpackOptional =
- "value ? llvm::Optional<" # CXXName # ">(value - 1) : llvm::None";
+ "value ? std::optional<" # CXXName # ">(value - 1) : std::nullopt";
}
def APInt : PropertyType<"llvm::APInt"> { let PassByReference = 1; }
def APSInt : PropertyType<"llvm::APSInt"> { let PassByReference = 1; }
def APValue : PropertyType { let PassByReference = 1; }
def APValueKind : EnumPropertyType<"APValue::ValueKind">;
-def ArraySizeModifier : EnumPropertyType<"ArrayType::ArraySizeModifier">;
+def ArraySizeModifier : EnumPropertyType<"ArraySizeModifier">;
def AttrKind : EnumPropertyType<"attr::Kind">;
def AutoTypeKeyword : EnumPropertyType;
def Bool : PropertyType<"bool">;
def BuiltinTypeKind : EnumPropertyType<"BuiltinType::Kind">;
+def BTFTypeTagAttr : PropertyType<"const BTFTypeTagAttr *">;
def CallingConv : EnumPropertyType;
def DeclarationName : PropertyType;
def DeclarationNameKind : EnumPropertyType<"DeclarationName::NameKind">;
@@ -107,6 +108,8 @@ def DeclRef : RefPropertyType<"Decl"> { let ConstWhenWriting = 1; }
SubclassPropertyType<"TemplateTypeParmDecl", DeclRef>;
def TemplateTemplateParmDeclRef :
SubclassPropertyType<"TemplateTemplateParmDecl", DeclRef>;
+ def UsingShadowDeclRef :
+ SubclassPropertyType<"UsingShadowDecl", DeclRef>;
def ValueDeclRef :
SubclassPropertyType<"ValueDecl", DeclRef>;
def ElaboratedTypeKeyword : EnumPropertyType;
@@ -135,10 +138,11 @@ def TemplateArgument : PropertyType;
def TemplateArgumentKind : EnumPropertyType<"TemplateArgument::ArgKind">;
def TemplateName : DefaultValuePropertyType;
def TemplateNameKind : EnumPropertyType<"TemplateName::NameKind">;
+def TypeOfKind : EnumPropertyType<"TypeOfKind">;
def UInt32 : CountPropertyType<"uint32_t">;
def UInt64 : CountPropertyType<"uint64_t">;
def UnaryTypeTransformKind : EnumPropertyType<"UnaryTransformType::UTTKind">;
-def VectorKind : EnumPropertyType<"VectorType::VectorKind">;
+def VectorKind : EnumPropertyType<"VectorKind">;
def ExceptionSpecInfo : PropertyType<"FunctionProtoType::ExceptionSpecInfo"> {
let BufferElementTypes = [ QualType ];
@@ -151,7 +155,7 @@ class Array<PropertyType element> : PropertyType {
let BufferElementTypes = [ element ];
}
-/// llvm::Optional<T>. The corresponding C++ type is generally just the
+/// std::optional<T>. The corresponding C++ type is generally just the
/// corresponding C++ type of the element.
///
/// Optional<Unsigned> may restrict the range of the operand for some
@@ -446,10 +450,13 @@ let Class = PropertyTypeCase<APValue, "LValue"> in {
lvalueBase ? lvalueBase.dyn_cast<const Expr *>() : nullptr;
bool lvalueBaseIsExpr = (bool) expr;
bool lvalueBaseIsTypeInfo = lvalueBase.is<TypeInfoLValue>();
+ bool lvalueBaseIsDynamicAlloc = lvalueBase.is<DynamicAllocLValue>();
QualType elemTy;
if (lvalueBase) {
if (lvalueBaseIsTypeInfo) {
elemTy = lvalueBase.getTypeInfoType();
+ } else if (lvalueBaseIsDynamicAlloc) {
+ elemTy = lvalueBase.getDynamicAllocType();
} else if (lvalueBaseIsExpr) {
elemTy = expr->getType();
} else {
@@ -469,6 +476,9 @@ let Class = PropertyTypeCase<APValue, "LValue"> in {
def : Property<"isTypeInfo", Bool> {
let Read = [{ lvalueBaseIsTypeInfo }];
}
+ def : Property<"isDynamicAlloc", Bool> {
+ let Read = [{ lvalueBaseIsDynamicAlloc }];
+ }
def : Property<"hasBase", Bool> {
let Read = [{ static_cast<bool>(lvalueBase) }];
}
@@ -481,9 +491,17 @@ let Class = PropertyTypeCase<APValue, "LValue"> in {
QualType(node.getLValueBase().get<TypeInfoLValue>().getType(), 0)
}];
}
+ def : Property<"dynamicAlloc", UInt32> {
+ let Conditional = [{ hasBase && isDynamicAlloc }];
+ let Read = [{ node.getLValueBase().get<DynamicAllocLValue>().getIndex() }];
+ }
def : Property<"type", QualType> {
- let Conditional = [{ hasBase && isTypeInfo }];
- let Read = [{ node.getLValueBase().getTypeInfoType() }];
+ let Conditional = [{ hasBase && (isTypeInfo || isDynamicAlloc) }];
+ let Read = [{
+ isTypeInfo
+ ? node.getLValueBase().getTypeInfoType()
+ : node.getLValueBase().getDynamicAllocType()
+ }];
}
def : Property<"callIndex", UInt32> {
let Conditional = [{ hasBase && !isTypeInfo }];
@@ -498,7 +516,7 @@ let Class = PropertyTypeCase<APValue, "LValue"> in {
let Read = [{ const_cast<Expr *>(expr) }];
}
def : Property<"decl", DeclRef> {
- let Conditional = [{ hasBase && !isTypeInfo && !isExpr }];
+ let Conditional = [{ hasBase && !isTypeInfo && !isDynamicAlloc && !isExpr }];
let Read = [{ lvalueBase.get<const ValueDecl *>() }];
}
def : Property<"offsetQuantity", UInt32> {
@@ -513,20 +531,19 @@ let Class = PropertyTypeCase<APValue, "LValue"> in {
def : Creator<[{
(void)ctx;
APValue::LValueBase base;
- QualType elemTy;
if (hasBase) {
if (isTypeInfo) {
base = APValue::LValueBase::getTypeInfo(
- TypeInfoLValue(typeInfo.getValue().getTypePtr()), type.getValue());
- elemTy = base.getTypeInfoType();
+ TypeInfoLValue(typeInfo->getTypePtr()), *type);
+ } else if (isDynamicAlloc) {
+ base = APValue::LValueBase::getDynamicAlloc(
+ DynamicAllocLValue(*dynamicAlloc), *type);
} else if (isExpr) {
- base = APValue::LValueBase(cast<Expr>(stmt.getValue()),
- callIndex.getValue(), version.getValue());
- elemTy = base.get<const Expr *>()->getType();
+ base = APValue::LValueBase(cast<Expr>(*stmt),
+ *callIndex, *version);
} else {
- base = APValue::LValueBase(cast<ValueDecl>(decl.getValue()),
- callIndex.getValue(), version.getValue());
- elemTy = base.get<const ValueDecl *>()->getType();
+ base = APValue::LValueBase(cast<ValueDecl>(*decl),
+ *callIndex, *version);
}
}
CharUnits offset = CharUnits::fromQuantity(offsetQuantity);
@@ -539,7 +556,6 @@ let Class = PropertyTypeCase<APValue, "LValue"> in {
auto pathLength = lvaluePath->Path.size();
APValue::LValuePathEntry *path = result.setLValueUninit(
base, offset, pathLength, isLValueOnePastTheEnd, isNullPtr).data();
- assert(lvaluePath->getType() == elemTy && "Unexpected type reference!");
llvm::copy(lvaluePath->Path, path);
return result;
}]>;
@@ -617,6 +633,16 @@ let Class = PropertyTypeCase<TemplateName, "Template"> in {
return TemplateName(declaration);
}]>;
}
+
+let Class = PropertyTypeCase<TemplateName, "UsingTemplate"> in {
+ def : Property<"foundDecl", UsingShadowDeclRef> {
+ let Read = [{ node.getAsUsingShadowDecl() }];
+ }
+ def : Creator<[{
+ return TemplateName(foundDecl);
+ }]>;
+}
+
let Class = PropertyTypeCase<TemplateName, "OverloadedTemplate"> in {
def : Property<"overloads", Array<NamedDeclRef>> {
let Read = [{ node.getAsOverloadedTemplate()->decls() }];
@@ -650,12 +676,12 @@ let Class = PropertyTypeCase<TemplateName, "QualifiedTemplate"> in {
def : Property<"hasTemplateKeyword", Bool> {
let Read = [{ qtn->hasTemplateKeyword() }];
}
- def : Property<"declaration", TemplateDeclRef> {
- let Read = [{ qtn->getTemplateDecl() }];
+ def : Property<"underlyingTemplateName", TemplateName> {
+ let Read = [{ qtn->getUnderlyingTemplate() }];
}
def : Creator<[{
return ctx.getQualifiedTemplateName(qualifier, hasTemplateKeyword,
- declaration);
+ underlyingTemplateName);
}]>;
}
let Class = PropertyTypeCase<TemplateName, "DependentTemplate"> in {
@@ -687,28 +713,40 @@ let Class = PropertyTypeCase<TemplateName, "SubstTemplateTemplateParm"> in {
def : ReadHelper<[{
auto parm = node.getAsSubstTemplateTemplateParm();
}]>;
- def : Property<"parameter", TemplateTemplateParmDeclRef> {
- let Read = [{ parm->getParameter() }];
- }
def : Property<"replacement", TemplateName> {
let Read = [{ parm->getReplacement() }];
}
+ def : Property<"associatedDecl", DeclRef> {
+ let Read = [{ parm->getAssociatedDecl() }];
+ }
+ def : Property<"index", UInt32> {
+ let Read = [{ parm->getIndex() }];
+ }
+ def : Property<"packIndex", Optional<UInt32>> {
+ let Read = [{ parm->getPackIndex() }];
+ }
def : Creator<[{
- return ctx.getSubstTemplateTemplateParm(parameter, replacement);
+ return ctx.getSubstTemplateTemplateParm(replacement, associatedDecl, index, packIndex);
}]>;
}
let Class = PropertyTypeCase<TemplateName, "SubstTemplateTemplateParmPack"> in {
def : ReadHelper<[{
auto parm = node.getAsSubstTemplateTemplateParmPack();
}]>;
- def : Property<"parameterPack", TemplateTemplateParmDeclRef> {
- let Read = [{ parm->getParameterPack() }];
- }
def : Property<"argumentPack", TemplateArgument> {
let Read = [{ parm->getArgumentPack() }];
}
+ def : Property<"associatedDecl", DeclRef> {
+ let Read = [{ parm->getAssociatedDecl() }];
+ }
+ def : Property<"index", UInt32> {
+ let Read = [{ parm->getIndex() }];
+ }
+ def : Property<"final", Bool> {
+ let Read = [{ parm->getFinal() }];
+ }
def : Creator<[{
- return ctx.getSubstTemplateTemplateParmPack(parameterPack, argumentPack);
+ return ctx.getSubstTemplateTemplateParmPack(argumentPack, associatedDecl, index, final);
}]>;
}
@@ -724,8 +762,11 @@ let Class = PropertyTypeCase<TemplateArgument, "Type"> in {
def : Property<"type", QualType> {
let Read = [{ node.getAsType() }];
}
+ def : Property<"isDefaulted", Bool> {
+ let Read = [{ node.getIsDefaulted() }];
+ }
def : Creator<[{
- return TemplateArgument(type);
+ return TemplateArgument(type, /* isNullPtr */ false, isDefaulted);
}]>;
}
let Class = PropertyTypeCase<TemplateArgument, "Declaration"> in {
@@ -735,16 +776,22 @@ let Class = PropertyTypeCase<TemplateArgument, "Declaration"> in {
def : Property<"parameterType", QualType> {
let Read = [{ node.getParamTypeForDecl() }];
}
+ def : Property<"isDefaulted", Bool> {
+ let Read = [{ node.getIsDefaulted() }];
+ }
def : Creator<[{
- return TemplateArgument(declaration, parameterType);
+ return TemplateArgument(declaration, parameterType, isDefaulted);
}]>;
}
let Class = PropertyTypeCase<TemplateArgument, "NullPtr"> in {
def : Property<"type", QualType> {
let Read = [{ node.getNullPtrType() }];
}
+ def : Property<"isDefaulted", Bool> {
+ let Read = [{ node.getIsDefaulted() }];
+ }
def : Creator<[{
- return TemplateArgument(type, /*nullptr*/ true);
+ return TemplateArgument(type, /*nullptr*/ true, isDefaulted);
}]>;
}
let Class = PropertyTypeCase<TemplateArgument, "Integral"> in {
@@ -754,16 +801,36 @@ let Class = PropertyTypeCase<TemplateArgument, "Integral"> in {
def : Property<"type", QualType> {
let Read = [{ node.getIntegralType() }];
}
+ def : Property<"isDefaulted", Bool> {
+ let Read = [{ node.getIsDefaulted() }];
+ }
+ def : Creator<[{
+ return TemplateArgument(ctx, value, type, isDefaulted);
+ }]>;
+}
+let Class = PropertyTypeCase<TemplateArgument, "StructuralValue"> in {
+ def : Property<"value", APValue> {
+ let Read = [{ node.getAsStructuralValue() }];
+ }
+ def : Property<"type", QualType> {
+ let Read = [{ node.getStructuralValueType() }];
+ }
+ def : Property<"isDefaulted", Bool> {
+ let Read = [{ node.getIsDefaulted() }];
+ }
def : Creator<[{
- return TemplateArgument(ctx, value, type);
+ return TemplateArgument(ctx, type, value, isDefaulted);
}]>;
}
let Class = PropertyTypeCase<TemplateArgument, "Template"> in {
def : Property<"name", TemplateName> {
let Read = [{ node.getAsTemplateOrTemplatePattern() }];
}
+ def : Property<"isDefaulted", Bool> {
+ let Read = [{ node.getIsDefaulted() }];
+ }
def : Creator<[{
- return TemplateArgument(name);
+ return TemplateArgument(name, isDefaulted);
}]>;
}
let Class = PropertyTypeCase<TemplateArgument, "TemplateExpansion"> in {
@@ -773,22 +840,29 @@ let Class = PropertyTypeCase<TemplateArgument, "TemplateExpansion"> in {
def : Property<"numExpansions", Optional<UInt32>> {
let Read = [{
// Translate unsigned -> uint32_t just in case.
- node.getNumTemplateExpansions().map(
- [](unsigned i) { return uint32_t(i); })
+ llvm::transformOptional(node.getNumTemplateExpansions(),
+ [](unsigned i) { return uint32_t(i); })
}];
}
+ def : Property<"isDefaulted", Bool> {
+ let Read = [{ node.getIsDefaulted() }];
+ }
def : Creator<[{
- auto numExpansionsUnsigned =
- numExpansions.map([](uint32_t i) { return unsigned(i); });
- return TemplateArgument(name, numExpansionsUnsigned);
+ auto numExpansionsUnsigned = llvm::transformOptional(
+ numExpansions, [](uint32_t i) { return unsigned(i); });
+
+ return TemplateArgument(name, numExpansionsUnsigned, isDefaulted);
}]>;
}
let Class = PropertyTypeCase<TemplateArgument, "Expression"> in {
def : Property<"expression", ExprRef> {
let Read = [{ node.getAsExpr() }];
}
+ def : Property<"isDefaulted", Bool> {
+ let Read = [{ node.getIsDefaulted() }];
+ }
def : Creator<[{
- return TemplateArgument(expression);
+ return TemplateArgument(expression, isDefaulted);
}]>;
}
let Class = PropertyTypeCase<TemplateArgument, "Pack"> in {
@@ -800,6 +874,6 @@ let Class = PropertyTypeCase<TemplateArgument, "Pack"> in {
TemplateArgument *ctxElements = new (ctx) TemplateArgument[elements.size()];
for (size_t i = 0, e = elements.size(); i != e; ++i)
ctxElements[i] = elements[i];
- return TemplateArgument(llvm::makeArrayRef(ctxElements, elements.size()));
+ return TemplateArgument(llvm::ArrayRef(ctxElements, elements.size()));
}]>;
}
diff --git a/contrib/llvm-project/clang/include/clang/AST/QualTypeNames.h b/contrib/llvm-project/clang/include/clang/AST/QualTypeNames.h
index 8313e0441be5..daa86cda2d99 100644
--- a/contrib/llvm-project/clang/include/clang/AST/QualTypeNames.h
+++ b/contrib/llvm-project/clang/include/clang/AST/QualTypeNames.h
@@ -89,4 +89,4 @@ QualType getFullyQualifiedType(QualType QT, const ASTContext &Ctx,
bool WithGlobalNsPrefix = false);
} // end namespace TypeName
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_CORE_QUALTYPENAMES_H
+#endif // LLVM_CLANG_AST_QUALTYPENAMES_H
diff --git a/contrib/llvm-project/clang/include/clang/AST/Randstruct.h b/contrib/llvm-project/clang/include/clang/AST/Randstruct.h
new file mode 100644
index 000000000000..d5eaf30919e3
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/AST/Randstruct.h
@@ -0,0 +1,35 @@
+//===- Randstruct.h - Interfact for structure randomization -------*- C++ -*-=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the interface for Clang's structure field layout
+// randomization.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_RANDSTRUCT_H
+#define LLVM_CLANG_AST_RANDSTRUCT_H
+
+namespace llvm {
+template <typename T> class SmallVectorImpl;
+} // end namespace llvm
+
+namespace clang {
+
+class ASTContext;
+class Decl;
+class RecordDecl;
+
+namespace randstruct {
+
+bool randomizeStructureLayout(const ASTContext &Context, RecordDecl *RD,
+ llvm::SmallVectorImpl<Decl *> &FinalOrdering);
+
+} // namespace randstruct
+} // namespace clang
+
+#endif // LLVM_CLANG_AST_RANDSTRUCT_H
diff --git a/contrib/llvm-project/clang/include/clang/AST/RawCommentList.h b/contrib/llvm-project/clang/include/clang/AST/RawCommentList.h
index a18432c2b768..53aae24fa7bb 100644
--- a/contrib/llvm-project/clang/include/clang/AST/RawCommentList.h
+++ b/contrib/llvm-project/clang/include/clang/AST/RawCommentList.h
@@ -115,6 +115,17 @@ public:
return extractBriefText(Context);
}
+ bool hasUnsupportedSplice(const SourceManager &SourceMgr) const {
+ if (!isInvalid())
+ return false;
+ StringRef Text = getRawText(SourceMgr);
+ if (Text.size() < 6 || Text[0] != '/')
+ return false;
+ if (Text[1] == '*')
+ return Text[Text.size() - 1] != '/' || Text[Text.size() - 2] != '*';
+ return Text[1] != '/';
+ }
+
/// Returns sanitized comment text, suitable for presentation in editor UIs.
/// E.g. will transform:
/// // This is a long multiline comment.
@@ -139,6 +150,21 @@ public:
std::string getFormattedText(const SourceManager &SourceMgr,
DiagnosticsEngine &Diags) const;
+ struct CommentLine {
+ std::string Text;
+ PresumedLoc Begin;
+ PresumedLoc End;
+
+ CommentLine(StringRef Text, PresumedLoc Begin, PresumedLoc End)
+ : Text(Text), Begin(Begin), End(End) {}
+ };
+
+ /// Returns sanitized comment text as separated lines with locations in
+ /// source, suitable for further processing and rendering requiring source
+ /// locations.
+ std::vector<CommentLine> getFormattedLines(const SourceManager &SourceMgr,
+ DiagnosticsEngine &Diags) const;
+
/// Parse the comment, assuming it is attached to decl \c D.
comments::FullComment *parse(const ASTContext &Context,
const Preprocessor *PP, const Decl *D) const;
@@ -147,11 +173,12 @@ private:
SourceRange Range;
mutable StringRef RawText;
- mutable const char *BriefText;
+ mutable const char *BriefText = nullptr;
mutable bool RawTextValid : 1; ///< True if RawText is valid
mutable bool BriefTextValid : 1; ///< True if BriefText is valid
+ LLVM_PREFERRED_TYPE(CommentKind)
unsigned Kind : 3;
/// True if comment is attached to a declaration in ASTContext.
diff --git a/contrib/llvm-project/clang/include/clang/AST/RecursiveASTVisitor.h b/contrib/llvm-project/clang/include/clang/AST/RecursiveASTVisitor.h
index 9bfa5b9c2326..2aee6a947141 100644
--- a/contrib/llvm-project/clang/include/clang/AST/RecursiveASTVisitor.h
+++ b/contrib/llvm-project/clang/include/clang/AST/RecursiveASTVisitor.h
@@ -13,18 +13,19 @@
#ifndef LLVM_CLANG_AST_RECURSIVEASTVISITOR_H
#define LLVM_CLANG_AST_RECURSIVEASTVISITOR_H
+#include "clang/AST/ASTConcept.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
-#include "clang/AST/DeclarationName.h"
#include "clang/AST/DeclBase.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclFriend.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclOpenMP.h"
#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
-#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/LambdaCapture.h"
@@ -68,30 +69,17 @@ template <typename T, typename U, typename R, typename... P>
struct has_same_member_pointer_type<R (T::*)(P...), R (U::*)(P...)>
: std::true_type {};
-template <bool has_same_type> struct is_same_method_impl {
- template <typename FirstMethodPtrTy, typename SecondMethodPtrTy>
- static bool isSameMethod(FirstMethodPtrTy FirstMethodPtr,
- SecondMethodPtrTy SecondMethodPtr) {
- return false;
- }
-};
-
-template <> struct is_same_method_impl<true> {
- template <typename FirstMethodPtrTy, typename SecondMethodPtrTy>
- static bool isSameMethod(FirstMethodPtrTy FirstMethodPtr,
- SecondMethodPtrTy SecondMethodPtr) {
- return FirstMethodPtr == SecondMethodPtr;
- }
-};
-
/// Returns true if and only if \p FirstMethodPtr and \p SecondMethodPtr
/// are pointers to the same non-static member function.
template <typename FirstMethodPtrTy, typename SecondMethodPtrTy>
-bool isSameMethod(FirstMethodPtrTy FirstMethodPtr,
- SecondMethodPtrTy SecondMethodPtr) {
- return is_same_method_impl<has_same_member_pointer_type<
- FirstMethodPtrTy,
- SecondMethodPtrTy>::value>::isSameMethod(FirstMethodPtr, SecondMethodPtr);
+LLVM_ATTRIBUTE_ALWAYS_INLINE LLVM_ATTRIBUTE_NODEBUG auto
+isSameMethod([[maybe_unused]] FirstMethodPtrTy FirstMethodPtr,
+ [[maybe_unused]] SecondMethodPtrTy SecondMethodPtr)
+ -> bool {
+ if constexpr (has_same_member_pointer_type<FirstMethodPtrTy,
+ SecondMethodPtrTy>::value)
+ return FirstMethodPtr == SecondMethodPtr;
+ return false;
}
} // end namespace detail
@@ -288,8 +276,7 @@ public:
///
/// \returns false if the visitation was terminated early, true otherwise.
// FIXME: take a TemplateArgumentLoc* (or TemplateArgumentListInfo) instead.
- bool TraverseTemplateArguments(const TemplateArgument *Args,
- unsigned NumArgs);
+ bool TraverseTemplateArguments(ArrayRef<TemplateArgument> Args);
/// Recursively visit a base specifier. This can be overridden by a
/// subclass.
@@ -319,11 +306,19 @@ public:
bool TraverseSynOrSemInitListExpr(InitListExpr *S,
DataRecursionQueue *Queue = nullptr);
- /// Recursively visit a reference to a concept with potential arguments.
+ /// Recursively visit an Objective-C protocol reference with location
+ /// information.
+ ///
+ /// \returns false if the visitation was terminated early, true otherwise.
+ bool TraverseObjCProtocolLoc(ObjCProtocolLoc ProtocolLoc);
+
+ /// Recursively visit concept reference with location information.
///
/// \returns false if the visitation was terminated early, true otherwise.
- bool TraverseConceptReference(const ConceptReference &C);
+ bool TraverseConceptReference(ConceptReference *CR);
+ // Visit concept reference.
+ bool VisitConceptReference(ConceptReference *CR) { return true; }
// ---- Methods on Attrs ----
// Visit an attribute.
@@ -469,6 +464,13 @@ public:
DEF_TRAVERSE_TMPL_INST(Function)
#undef DEF_TRAVERSE_TMPL_INST
+ bool TraverseTypeConstraint(const TypeConstraint *C);
+
+ bool TraverseConceptRequirement(concepts::Requirement *R);
+ bool TraverseConceptTypeRequirement(concepts::TypeRequirement *R);
+ bool TraverseConceptExprRequirement(concepts::ExprRequirement *R);
+ bool TraverseConceptNestedRequirement(concepts::NestedRequirement *R);
+
bool dataTraverseNode(Stmt *S, DataRecursionQueue *Queue);
private:
@@ -506,6 +508,43 @@ private:
};
template <typename Derived>
+bool RecursiveASTVisitor<Derived>::TraverseTypeConstraint(
+ const TypeConstraint *C) {
+ if (!getDerived().shouldVisitImplicitCode()) {
+ TRY_TO(TraverseConceptReference(C->getConceptReference()));
+ return true;
+ }
+ if (Expr *IDC = C->getImmediatelyDeclaredConstraint()) {
+ TRY_TO(TraverseStmt(IDC));
+ } else {
+ // Avoid traversing the ConceptReference in the TypeConstraint
+ // if we have an immediately-declared-constraint, otherwise
+ // we'll end up visiting the concept and the arguments in
+ // the TC twice.
+ TRY_TO(TraverseConceptReference(C->getConceptReference()));
+ }
+ return true;
+}
+
+template <typename Derived>
+bool RecursiveASTVisitor<Derived>::TraverseConceptRequirement(
+ concepts::Requirement *R) {
+ switch (R->getKind()) {
+ case concepts::Requirement::RK_Type:
+ return getDerived().TraverseConceptTypeRequirement(
+ cast<concepts::TypeRequirement>(R));
+ case concepts::Requirement::RK_Simple:
+ case concepts::Requirement::RK_Compound:
+ return getDerived().TraverseConceptExprRequirement(
+ cast<concepts::ExprRequirement>(R));
+ case concepts::Requirement::RK_Nested:
+ return getDerived().TraverseConceptNestedRequirement(
+ cast<concepts::NestedRequirement>(R));
+ }
+ llvm_unreachable("unexpected case");
+}
+
+template <typename Derived>
bool RecursiveASTVisitor<Derived>::dataTraverseNode(Stmt *S,
DataRecursionQueue *Queue) {
// Top switch stmt: dispatch to TraverseFooStmt for each concrete FooStmt.
@@ -525,6 +564,40 @@ bool RecursiveASTVisitor<Derived>::dataTraverseNode(Stmt *S,
#undef DISPATCH_STMT
template <typename Derived>
+bool RecursiveASTVisitor<Derived>::TraverseConceptTypeRequirement(
+ concepts::TypeRequirement *R) {
+ if (R->isSubstitutionFailure())
+ return true;
+ return getDerived().TraverseTypeLoc(R->getType()->getTypeLoc());
+}
+
+template <typename Derived>
+bool RecursiveASTVisitor<Derived>::TraverseConceptExprRequirement(
+ concepts::ExprRequirement *R) {
+ if (!R->isExprSubstitutionFailure())
+ TRY_TO(TraverseStmt(R->getExpr()));
+ auto &RetReq = R->getReturnTypeRequirement();
+ if (RetReq.isTypeConstraint()) {
+ if (getDerived().shouldVisitImplicitCode()) {
+ TRY_TO(TraverseTemplateParameterListHelper(
+ RetReq.getTypeConstraintTemplateParameterList()));
+ } else {
+ // Template parameter list is implicit, visit constraint directly.
+ TRY_TO(TraverseTypeConstraint(RetReq.getTypeConstraint()));
+ }
+ }
+ return true;
+}
+
+template <typename Derived>
+bool RecursiveASTVisitor<Derived>::TraverseConceptNestedRequirement(
+ concepts::NestedRequirement *R) {
+ if (!R->hasInvalidConstraint())
+ return getDerived().TraverseStmt(R->getConstraintExpr());
+ return true;
+}
+
+template <typename Derived>
bool RecursiveASTVisitor<Derived>::PostVisitStmt(Stmt *S) {
// In pre-order traversal mode, each Traverse##STMT method is responsible for
// calling WalkUpFrom. Therefore, if the user overrides Traverse##STMT and
@@ -777,6 +850,7 @@ bool RecursiveASTVisitor<Derived>::TraverseTemplateArgument(
case TemplateArgument::Declaration:
case TemplateArgument::Integral:
case TemplateArgument::NullPtr:
+ case TemplateArgument::StructuralValue:
return true;
case TemplateArgument::Type:
@@ -791,8 +865,7 @@ bool RecursiveASTVisitor<Derived>::TraverseTemplateArgument(
return getDerived().TraverseStmt(Arg.getAsExpr());
case TemplateArgument::Pack:
- return getDerived().TraverseTemplateArguments(Arg.pack_begin(),
- Arg.pack_size());
+ return getDerived().TraverseTemplateArguments(Arg.pack_elements());
}
return true;
@@ -810,6 +883,7 @@ bool RecursiveASTVisitor<Derived>::TraverseTemplateArgumentLoc(
case TemplateArgument::Declaration:
case TemplateArgument::Integral:
case TemplateArgument::NullPtr:
+ case TemplateArgument::StructuralValue:
return true;
case TemplateArgument::Type: {
@@ -832,8 +906,7 @@ bool RecursiveASTVisitor<Derived>::TraverseTemplateArgumentLoc(
return getDerived().TraverseStmt(ArgLoc.getSourceExpression());
case TemplateArgument::Pack:
- return getDerived().TraverseTemplateArguments(Arg.pack_begin(),
- Arg.pack_size());
+ return getDerived().TraverseTemplateArguments(Arg.pack_elements());
}
return true;
@@ -841,10 +914,9 @@ bool RecursiveASTVisitor<Derived>::TraverseTemplateArgumentLoc(
template <typename Derived>
bool RecursiveASTVisitor<Derived>::TraverseTemplateArguments(
- const TemplateArgument *Args, unsigned NumArgs) {
- for (unsigned I = 0; I != NumArgs; ++I) {
- TRY_TO(TraverseTemplateArgument(Args[I]));
- }
+ ArrayRef<TemplateArgument> Args) {
+ for (const TemplateArgument &Arg : Args)
+ TRY_TO(TraverseTemplateArgument(Arg));
return true;
}
@@ -981,13 +1053,14 @@ DEF_TRAVERSE_TYPE(FunctionProtoType, {
TRY_TO(TraverseStmt(NE));
})
+DEF_TRAVERSE_TYPE(UsingType, {})
DEF_TRAVERSE_TYPE(UnresolvedUsingType, {})
DEF_TRAVERSE_TYPE(TypedefType, {})
DEF_TRAVERSE_TYPE(TypeOfExprType,
{ TRY_TO(TraverseStmt(T->getUnderlyingExpr())); })
-DEF_TRAVERSE_TYPE(TypeOfType, { TRY_TO(TraverseType(T->getUnderlyingType())); })
+DEF_TRAVERSE_TYPE(TypeOfType, { TRY_TO(TraverseType(T->getUnmodifiedType())); })
DEF_TRAVERSE_TYPE(DecltypeType,
{ TRY_TO(TraverseStmt(T->getUnderlyingExpr())); })
@@ -1000,8 +1073,7 @@ DEF_TRAVERSE_TYPE(UnaryTransformType, {
DEF_TRAVERSE_TYPE(AutoType, {
TRY_TO(TraverseType(T->getDeducedType()));
if (T->isConstrained()) {
- TRY_TO(TraverseDecl(T->getTypeConstraintConcept()));
- TRY_TO(TraverseTemplateArguments(T->getArgs(), T->getNumArgs()));
+ TRY_TO(TraverseTemplateArguments(T->getTypeConstraintArguments()));
}
})
DEF_TRAVERSE_TYPE(DeducedTemplateSpecializationType, {
@@ -1021,7 +1093,7 @@ DEF_TRAVERSE_TYPE(SubstTemplateTypeParmPackType, {
DEF_TRAVERSE_TYPE(TemplateSpecializationType, {
TRY_TO(TraverseTemplateName(T->getTemplateName()));
- TRY_TO(TraverseTemplateArguments(T->getArgs(), T->getNumArgs()));
+ TRY_TO(TraverseTemplateArguments(T->template_arguments()));
})
DEF_TRAVERSE_TYPE(InjectedClassNameType, {})
@@ -1029,6 +1101,9 @@ DEF_TRAVERSE_TYPE(InjectedClassNameType, {})
DEF_TRAVERSE_TYPE(AttributedType,
{ TRY_TO(TraverseType(T->getModifiedType())); })
+DEF_TRAVERSE_TYPE(BTFTagAttributedType,
+ { TRY_TO(TraverseType(T->getWrappedType())); })
+
DEF_TRAVERSE_TYPE(ParenType, { TRY_TO(TraverseType(T->getInnerType())); })
DEF_TRAVERSE_TYPE(MacroQualifiedType,
@@ -1046,7 +1121,7 @@ DEF_TRAVERSE_TYPE(DependentNameType,
DEF_TRAVERSE_TYPE(DependentTemplateSpecializationType, {
TRY_TO(TraverseNestedNameSpecifier(T->getQualifier()));
- TRY_TO(TraverseTemplateArguments(T->getArgs(), T->getNumArgs()));
+ TRY_TO(TraverseTemplateArguments(T->template_arguments()));
})
DEF_TRAVERSE_TYPE(PackExpansionType, { TRY_TO(TraverseType(T->getPattern())); })
@@ -1072,8 +1147,8 @@ DEF_TRAVERSE_TYPE(AtomicType, { TRY_TO(TraverseType(T->getValueType())); })
DEF_TRAVERSE_TYPE(PipeType, { TRY_TO(TraverseType(T->getElementType())); })
-DEF_TRAVERSE_TYPE(ExtIntType, {})
-DEF_TRAVERSE_TYPE(DependentExtIntType,
+DEF_TRAVERSE_TYPE(BitIntType, {})
+DEF_TRAVERSE_TYPE(DependentBitIntType,
{ TRY_TO(TraverseStmt(T->getNumBitsExpr())); })
#undef DEF_TRAVERSE_TYPE
@@ -1252,6 +1327,7 @@ DEF_TRAVERSE_TYPELOC(FunctionProtoType, {
TRY_TO(TraverseStmt(NE));
})
+DEF_TRAVERSE_TYPELOC(UsingType, {})
DEF_TRAVERSE_TYPELOC(UnresolvedUsingType, {})
DEF_TRAVERSE_TYPELOC(TypedefType, {})
@@ -1259,7 +1335,7 @@ DEF_TRAVERSE_TYPELOC(TypeOfExprType,
{ TRY_TO(TraverseStmt(TL.getUnderlyingExpr())); })
DEF_TRAVERSE_TYPELOC(TypeOfType, {
- TRY_TO(TraverseTypeLoc(TL.getUnderlyingTInfo()->getTypeLoc()));
+ TRY_TO(TraverseTypeLoc(TL.getUnmodifiedTInfo()->getTypeLoc()));
})
// FIXME: location of underlying expr
@@ -1274,10 +1350,7 @@ DEF_TRAVERSE_TYPELOC(UnaryTransformType, {
DEF_TRAVERSE_TYPELOC(AutoType, {
TRY_TO(TraverseType(TL.getTypePtr()->getDeducedType()));
if (TL.isConstrained()) {
- TRY_TO(TraverseNestedNameSpecifierLoc(TL.getNestedNameSpecifierLoc()));
- TRY_TO(TraverseDeclarationNameInfo(TL.getConceptNameInfo()));
- for (unsigned I = 0, E = TL.getNumArgs(); I != E; ++I)
- TRY_TO(TraverseTemplateArgumentLoc(TL.getArgLoc(I)));
+ TRY_TO(TraverseConceptReference(TL.getConceptReference()));
}
})
@@ -1314,6 +1387,9 @@ DEF_TRAVERSE_TYPELOC(MacroQualifiedType,
DEF_TRAVERSE_TYPELOC(AttributedType,
{ TRY_TO(TraverseTypeLoc(TL.getModifiedLoc())); })
+DEF_TRAVERSE_TYPELOC(BTFTagAttributedType,
+ { TRY_TO(TraverseTypeLoc(TL.getWrappedLoc())); })
+
DEF_TRAVERSE_TYPELOC(ElaboratedType, {
if (TL.getQualifierLoc()) {
TRY_TO(TraverseNestedNameSpecifierLoc(TL.getQualifierLoc()));
@@ -1338,7 +1414,12 @@ DEF_TRAVERSE_TYPELOC(DependentTemplateSpecializationType, {
DEF_TRAVERSE_TYPELOC(PackExpansionType,
{ TRY_TO(TraverseTypeLoc(TL.getPatternLoc())); })
-DEF_TRAVERSE_TYPELOC(ObjCTypeParamType, {})
+DEF_TRAVERSE_TYPELOC(ObjCTypeParamType, {
+ for (unsigned I = 0, N = TL.getNumProtocols(); I != N; ++I) {
+ ObjCProtocolLoc ProtocolLoc(TL.getProtocol(I), TL.getProtocolLoc(I));
+ TRY_TO(TraverseObjCProtocolLoc(ProtocolLoc));
+ }
+})
DEF_TRAVERSE_TYPELOC(ObjCInterfaceType, {})
@@ -1349,6 +1430,10 @@ DEF_TRAVERSE_TYPELOC(ObjCObjectType, {
TRY_TO(TraverseTypeLoc(TL.getBaseLoc()));
for (unsigned i = 0, n = TL.getNumTypeArgs(); i != n; ++i)
TRY_TO(TraverseTypeLoc(TL.getTypeArgTInfo(i)->getTypeLoc()));
+ for (unsigned I = 0, N = TL.getNumProtocols(); I != N; ++I) {
+ ObjCProtocolLoc ProtocolLoc(TL.getProtocol(I), TL.getProtocolLoc(I));
+ TRY_TO(TraverseObjCProtocolLoc(ProtocolLoc));
+ }
})
DEF_TRAVERSE_TYPELOC(ObjCObjectPointerType,
@@ -1358,8 +1443,8 @@ DEF_TRAVERSE_TYPELOC(AtomicType, { TRY_TO(TraverseTypeLoc(TL.getValueLoc())); })
DEF_TRAVERSE_TYPELOC(PipeType, { TRY_TO(TraverseTypeLoc(TL.getValueLoc())); })
-DEF_TRAVERSE_TYPELOC(ExtIntType, {})
-DEF_TRAVERSE_TYPELOC(DependentExtIntType, {
+DEF_TRAVERSE_TYPELOC(BitIntType, {})
+DEF_TRAVERSE_TYPELOC(DependentBitIntType, {
TRY_TO(TraverseStmt(TL.getTypePtr()->getNumBitsExpr()));
})
@@ -1440,6 +1525,8 @@ DEF_TRAVERSE_DECL(CapturedDecl, {
DEF_TRAVERSE_DECL(EmptyDecl, {})
+DEF_TRAVERSE_DECL(HLSLBufferDecl, {})
+
DEF_TRAVERSE_DECL(LifetimeExtendedTemporaryDecl, {
TRY_TO(TraverseStmt(D->getTemporaryExpr()));
})
@@ -1447,14 +1534,21 @@ DEF_TRAVERSE_DECL(LifetimeExtendedTemporaryDecl, {
DEF_TRAVERSE_DECL(FileScopeAsmDecl,
{ TRY_TO(TraverseStmt(D->getAsmString())); })
+DEF_TRAVERSE_DECL(TopLevelStmtDecl, { TRY_TO(TraverseStmt(D->getStmt())); })
+
DEF_TRAVERSE_DECL(ImportDecl, {})
DEF_TRAVERSE_DECL(FriendDecl, {
// Friend is either decl or a type.
- if (D->getFriendType())
+ if (D->getFriendType()) {
TRY_TO(TraverseTypeLoc(D->getFriendType()->getTypeLoc()));
- else
+ // Traverse any CXXRecordDecl owned by this type, since
+ // it will not be in the parent context:
+ if (auto *ET = D->getFriendType()->getType()->getAs<ElaboratedType>())
+ TRY_TO(TraverseDecl(ET->getOwnedTagDecl()));
+ } else {
TRY_TO(TraverseDecl(D->getFriendDecl()));
+ }
})
DEF_TRAVERSE_DECL(FriendTemplateDecl, {
@@ -1471,16 +1565,6 @@ DEF_TRAVERSE_DECL(FriendTemplateDecl, {
}
})
-DEF_TRAVERSE_DECL(ClassScopeFunctionSpecializationDecl, {
- TRY_TO(TraverseDecl(D->getSpecialization()));
-
- if (D->hasExplicitTemplateArgs()) {
- TRY_TO(TraverseTemplateArgumentLocsHelper(
- D->getTemplateArgsAsWritten()->getTemplateArgs(),
- D->getTemplateArgsAsWritten()->NumTemplateArgs));
- }
-})
-
DEF_TRAVERSE_DECL(LinkageSpecDecl, {})
DEF_TRAVERSE_DECL(ExportDecl, {})
@@ -1539,12 +1623,16 @@ DEF_TRAVERSE_DECL(
DEF_TRAVERSE_DECL(ObjCCompatibleAliasDecl, {// FIXME: implement
})
-DEF_TRAVERSE_DECL(ObjCCategoryDecl, {// FIXME: implement
+DEF_TRAVERSE_DECL(ObjCCategoryDecl, {
if (ObjCTypeParamList *typeParamList = D->getTypeParamList()) {
for (auto typeParam : *typeParamList) {
TRY_TO(TraverseObjCTypeParamDecl(typeParam));
}
}
+ for (auto It : llvm::zip(D->protocols(), D->protocol_locs())) {
+ ObjCProtocolLoc ProtocolLoc(std::get<0>(It), std::get<1>(It));
+ TRY_TO(TraverseObjCProtocolLoc(ProtocolLoc));
+ }
})
DEF_TRAVERSE_DECL(ObjCCategoryImplDecl, {// FIXME: implement
@@ -1553,7 +1641,7 @@ DEF_TRAVERSE_DECL(ObjCCategoryImplDecl, {// FIXME: implement
DEF_TRAVERSE_DECL(ObjCImplementationDecl, {// FIXME: implement
})
-DEF_TRAVERSE_DECL(ObjCInterfaceDecl, {// FIXME: implement
+DEF_TRAVERSE_DECL(ObjCInterfaceDecl, {
if (ObjCTypeParamList *typeParamList = D->getTypeParamListAsWritten()) {
for (auto typeParam : *typeParamList) {
TRY_TO(TraverseObjCTypeParamDecl(typeParam));
@@ -1563,10 +1651,22 @@ DEF_TRAVERSE_DECL(ObjCInterfaceDecl, {// FIXME: implement
if (TypeSourceInfo *superTInfo = D->getSuperClassTInfo()) {
TRY_TO(TraverseTypeLoc(superTInfo->getTypeLoc()));
}
+ if (D->isThisDeclarationADefinition()) {
+ for (auto It : llvm::zip(D->protocols(), D->protocol_locs())) {
+ ObjCProtocolLoc ProtocolLoc(std::get<0>(It), std::get<1>(It));
+ TRY_TO(TraverseObjCProtocolLoc(ProtocolLoc));
+ }
+ }
})
-DEF_TRAVERSE_DECL(ObjCProtocolDecl, {// FIXME: implement
- })
+DEF_TRAVERSE_DECL(ObjCProtocolDecl, {
+ if (D->isThisDeclarationADefinition()) {
+ for (auto It : llvm::zip(D->protocols(), D->protocol_locs())) {
+ ObjCProtocolLoc ProtocolLoc(std::get<0>(It), std::get<1>(It));
+ TRY_TO(TraverseObjCProtocolLoc(ProtocolLoc));
+ }
+ }
+})
DEF_TRAVERSE_DECL(ObjCMethodDecl, {
if (D->getReturnTypeSourceInfo()) {
@@ -1603,7 +1703,8 @@ DEF_TRAVERSE_DECL(UsingDecl, {
TRY_TO(TraverseDeclarationNameInfo(D->getNameInfo()));
})
-DEF_TRAVERSE_DECL(UsingEnumDecl, {})
+DEF_TRAVERSE_DECL(UsingEnumDecl,
+ { TRY_TO(TraverseTypeLoc(D->getEnumTypeLoc())); })
DEF_TRAVERSE_DECL(UsingPackDecl, {})
@@ -1681,10 +1782,7 @@ bool RecursiveASTVisitor<Derived>::TraverseTemplateInstantiations(
ClassTemplateDecl *D) {
for (auto *SD : D->specializations()) {
for (auto *RD : SD->redecls()) {
- // We don't want to visit injected-class-names in this traversal.
- if (cast<CXXRecordDecl>(RD)->isInjectedClassName())
- continue;
-
+ assert(!cast<CXXRecordDecl>(RD)->isInjectedClassName());
switch (
cast<ClassTemplateSpecializationDecl>(RD)->getSpecializationKind()) {
// Visit the implicit instantiations with the requested pattern.
@@ -1802,17 +1900,8 @@ DEF_TRAVERSE_DECL(BuiltinTemplateDecl, {
template <typename Derived>
bool RecursiveASTVisitor<Derived>::TraverseTemplateTypeParamDeclConstraints(
const TemplateTypeParmDecl *D) {
- if (const auto *TC = D->getTypeConstraint()) {
- if (Expr *IDC = TC->getImmediatelyDeclaredConstraint()) {
- TRY_TO(TraverseStmt(IDC));
- } else {
- // Avoid traversing the ConceptReference in the TypeCosntraint
- // if we have an immediately-declared-constraint, otherwise
- // we'll end up visiting the concept and the arguments in
- // the TC twice.
- TRY_TO(TraverseConceptReference(*TC));
- }
- }
+ if (const auto *TC = D->getTypeConstraint())
+ TRY_TO(TraverseTypeConstraint(TC));
return true;
}
@@ -1863,10 +1952,9 @@ DEF_TRAVERSE_DECL(UnresolvedUsingIfExistsDecl, {})
DEF_TRAVERSE_DECL(EnumDecl, {
TRY_TO(TraverseDeclTemplateParameterLists(D));
- if (D->getTypeForDecl())
- TRY_TO(TraverseType(QualType(D->getTypeForDecl(), 0)));
-
TRY_TO(TraverseNestedNameSpecifierLoc(D->getQualifierLoc()));
+ if (auto *TSI = D->getIntegerTypeSourceInfo())
+ TRY_TO(TraverseTypeLoc(TSI->getTypeLoc()));
// The enumerators are already traversed by
// decls_begin()/decls_end().
})
@@ -1907,7 +1995,7 @@ DEF_TRAVERSE_DECL(RecordDecl, { TRY_TO(TraverseRecordHelper(D)); })
DEF_TRAVERSE_DECL(CXXRecordDecl, { TRY_TO(TraverseCXXRecordHelper(D)); })
-#define DEF_TRAVERSE_TMPL_SPEC_DECL(TMPLDECLKIND) \
+#define DEF_TRAVERSE_TMPL_SPEC_DECL(TMPLDECLKIND, DECLKIND) \
DEF_TRAVERSE_DECL(TMPLDECLKIND##TemplateSpecializationDecl, { \
/* For implicit instantiations ("set<int> x;"), we don't want to \
recurse at all, since the instatiated template isn't written in \
@@ -1920,18 +2008,23 @@ DEF_TRAVERSE_DECL(CXXRecordDecl, { TRY_TO(TraverseCXXRecordHelper(D)); })
if (TypeSourceInfo *TSI = D->getTypeAsWritten()) \
TRY_TO(TraverseTypeLoc(TSI->getTypeLoc())); \
\
- TRY_TO(TraverseNestedNameSpecifierLoc(D->getQualifierLoc())); \
- if (!getDerived().shouldVisitTemplateInstantiations() && \
- D->getTemplateSpecializationKind() != TSK_ExplicitSpecialization) \
+ if (getDerived().shouldVisitTemplateInstantiations() || \
+ D->getTemplateSpecializationKind() == TSK_ExplicitSpecialization) { \
+ /* Traverse base definition for explicit specializations */ \
+ TRY_TO(Traverse##DECLKIND##Helper(D)); \
+ } else { \
+ TRY_TO(TraverseNestedNameSpecifierLoc(D->getQualifierLoc())); \
+ \
/* Returning from here skips traversing the \
declaration context of the *TemplateSpecializationDecl \
(embedded in the DEF_TRAVERSE_DECL() macro) \
which contains the instantiated members of the template. */ \
return true; \
+ } \
})
-DEF_TRAVERSE_TMPL_SPEC_DECL(Class)
-DEF_TRAVERSE_TMPL_SPEC_DECL(Var)
+DEF_TRAVERSE_TMPL_SPEC_DECL(Class, CXXRecord)
+DEF_TRAVERSE_TMPL_SPEC_DECL(Var, Var)
template <typename Derived>
bool RecursiveASTVisitor<Derived>::TraverseTemplateArgumentLocsHelper(
@@ -1945,12 +2038,7 @@ bool RecursiveASTVisitor<Derived>::TraverseTemplateArgumentLocsHelper(
#define DEF_TRAVERSE_TMPL_PART_SPEC_DECL(TMPLDECLKIND, DECLKIND) \
DEF_TRAVERSE_DECL(TMPLDECLKIND##TemplatePartialSpecializationDecl, { \
/* The partial specialization. */ \
- if (TemplateParameterList *TPL = D->getTemplateParameters()) { \
- for (TemplateParameterList::iterator I = TPL->begin(), E = TPL->end(); \
- I != E; ++I) { \
- TRY_TO(TraverseDecl(*I)); \
- } \
- } \
+ TRY_TO(TraverseTemplateParameterListHelper(D->getTemplateParameters())); \
/* The args that remains unspecialized. */ \
TRY_TO(TraverseTemplateArgumentLocsHelper( \
D->getTemplateArgsAsWritten()->getTemplateArgs(), \
@@ -2004,6 +2092,7 @@ DEF_TRAVERSE_DECL(BindingDecl, {
DEF_TRAVERSE_DECL(MSPropertyDecl, { TRY_TO(TraverseDeclaratorHelper(D)); })
DEF_TRAVERSE_DECL(MSGuidDecl, {})
+DEF_TRAVERSE_DECL(UnnamedGlobalConstantDecl, {})
DEF_TRAVERSE_DECL(TemplateParamObjectDecl, {})
@@ -2011,7 +2100,7 @@ DEF_TRAVERSE_DECL(FieldDecl, {
TRY_TO(TraverseDeclaratorHelper(D));
if (D->isBitField())
TRY_TO(TraverseStmt(D->getBitWidth()));
- else if (D->hasInClassInitializer())
+ if (D->hasInClassInitializer())
TRY_TO(TraverseStmt(D->getInClassInitializer()));
})
@@ -2052,6 +2141,13 @@ bool RecursiveASTVisitor<Derived>::TraverseFunctionHelper(FunctionDecl *D) {
TALI->NumTemplateArgs));
}
}
+ } else if (const DependentFunctionTemplateSpecializationInfo *DFSI =
+ D->getDependentSpecializationInfo()) {
+ if (const ASTTemplateArgumentListInfo *TALI =
+ DFSI->TemplateArgumentsAsWritten) {
+ TRY_TO(TraverseTemplateArgumentLocsHelper(TALI->getTemplateArgs(),
+ TALI->NumTemplateArgs));
+ }
}
// Visit the function type itself, which can be either
@@ -2099,7 +2195,13 @@ bool RecursiveASTVisitor<Derived>::TraverseFunctionHelper(FunctionDecl *D) {
}
if (VisitBody) {
- TRY_TO(TraverseStmt(D->getBody())); // Function body.
+ TRY_TO(TraverseStmt(D->getBody()));
+ // Body may contain using declarations whose shadows are parented to the
+ // FunctionDecl itself.
+ for (auto *Child : D->decls()) {
+ if (isa<UsingShadowDecl>(Child))
+ TRY_TO(TraverseDecl(Child));
+ }
}
return true;
}
@@ -2183,6 +2285,10 @@ DEF_TRAVERSE_DECL(ParmVarDecl, {
DEF_TRAVERSE_DECL(RequiresExprBodyDecl, {})
+DEF_TRAVERSE_DECL(ImplicitConceptSpecializationDecl, {
+ TRY_TO(TraverseTemplateArguments(D->getTemplateArguments()));
+})
+
#undef DEF_TRAVERSE_DECL
// ----------------- Stmt traversal -----------------
@@ -2393,15 +2499,25 @@ bool RecursiveASTVisitor<Derived>::TraverseSynOrSemInitListExpr(
return true;
}
-template<typename Derived>
+template <typename Derived>
+bool RecursiveASTVisitor<Derived>::TraverseObjCProtocolLoc(
+ ObjCProtocolLoc ProtocolLoc) {
+ return true;
+}
+
+template <typename Derived>
bool RecursiveASTVisitor<Derived>::TraverseConceptReference(
- const ConceptReference &C) {
- TRY_TO(TraverseNestedNameSpecifierLoc(C.getNestedNameSpecifierLoc()));
- TRY_TO(TraverseDeclarationNameInfo(C.getConceptNameInfo()));
- if (C.hasExplicitTemplateArgs())
+ ConceptReference *CR) {
+ if (!getDerived().shouldTraversePostOrder())
+ TRY_TO(VisitConceptReference(CR));
+ TRY_TO(TraverseNestedNameSpecifierLoc(CR->getNestedNameSpecifierLoc()));
+ TRY_TO(TraverseDeclarationNameInfo(CR->getConceptNameInfo()));
+ if (CR->hasExplicitTemplateArgs())
TRY_TO(TraverseTemplateArgumentLocsHelper(
- C.getTemplateArgsAsWritten()->getTemplateArgs(),
- C.getTemplateArgsAsWritten()->NumTemplateArgs));
+ CR->getTemplateArgsAsWritten()->getTemplateArgs(),
+ CR->getTemplateArgsAsWritten()->NumTemplateArgs));
+ if (getDerived().shouldTraversePostOrder())
+ TRY_TO(VisitConceptReference(CR));
return true;
}
@@ -2436,7 +2552,11 @@ bool RecursiveASTVisitor<Derived>::TraverseInitListExpr(
// are interleaved. We also need to watch out for null types (default
// generic associations).
DEF_TRAVERSE_STMT(GenericSelectionExpr, {
- TRY_TO(TraverseStmt(S->getControllingExpr()));
+ if (S->isExprPredicate())
+ TRY_TO(TraverseStmt(S->getControllingExpr()));
+ else
+ TRY_TO(TraverseTypeLoc(S->getControllingType()->getTypeLoc()));
+
for (const GenericSelectionExpr::Association Assoc : S->associations()) {
if (TypeSourceInfo *TSI = Assoc.getTypeSourceInfo())
TRY_TO(TraverseTypeLoc(TSI->getTypeLoc()));
@@ -2606,7 +2726,11 @@ DEF_TRAVERSE_STMT(CXXDefaultArgExpr, {
TRY_TO(TraverseStmt(S->getExpr()));
})
-DEF_TRAVERSE_STMT(CXXDefaultInitExpr, {})
+DEF_TRAVERSE_STMT(CXXDefaultInitExpr, {
+ if (getDerived().shouldVisitImplicitCode())
+ TRY_TO(TraverseStmt(S->getExpr()));
+})
+
DEF_TRAVERSE_STMT(CXXDeleteExpr, {})
DEF_TRAVERSE_STMT(ExprWithCleanups, {})
DEF_TRAVERSE_STMT(CXXInheritedCtorInitExpr, {})
@@ -2735,6 +2859,7 @@ DEF_TRAVERSE_STMT(SubstNonTypeTemplateParmExpr, {})
DEF_TRAVERSE_STMT(FunctionParmPackExpr, {})
DEF_TRAVERSE_STMT(CXXFoldExpr, {})
DEF_TRAVERSE_STMT(AtomicExpr, {})
+DEF_TRAVERSE_STMT(CXXParenListInitExpr, {})
DEF_TRAVERSE_STMT(MaterializeTemporaryExpr, {
if (S->getLifetimeExtendedTemporaryDecl()) {
@@ -2778,7 +2903,7 @@ DEF_TRAVERSE_STMT(CoyieldExpr, {
})
DEF_TRAVERSE_STMT(ConceptSpecializationExpr, {
- TRY_TO(TraverseConceptReference(*S));
+ TRY_TO(TraverseConceptReference(S->getConceptReference()));
})
DEF_TRAVERSE_STMT(RequiresExpr, {
@@ -2786,21 +2911,7 @@ DEF_TRAVERSE_STMT(RequiresExpr, {
for (ParmVarDecl *Parm : S->getLocalParameters())
TRY_TO(TraverseDecl(Parm));
for (concepts::Requirement *Req : S->getRequirements())
- if (auto *TypeReq = dyn_cast<concepts::TypeRequirement>(Req)) {
- if (!TypeReq->isSubstitutionFailure())
- TRY_TO(TraverseTypeLoc(TypeReq->getType()->getTypeLoc()));
- } else if (auto *ExprReq = dyn_cast<concepts::ExprRequirement>(Req)) {
- if (!ExprReq->isExprSubstitutionFailure())
- TRY_TO(TraverseStmt(ExprReq->getExpr()));
- auto &RetReq = ExprReq->getReturnTypeRequirement();
- if (RetReq.isTypeConstraint())
- TRY_TO(TraverseTemplateParameterListHelper(
- RetReq.getTypeConstraintTemplateParameterList()));
- } else {
- auto *NestedReq = cast<concepts::NestedRequirement>(Req);
- if (!NestedReq->isSubstitutionFailure())
- TRY_TO(TraverseStmt(NestedReq->getConstraintExpr()));
- }
+ TRY_TO(TraverseConceptRequirement(Req));
})
// These literals (all of them) do not need any action.
@@ -2842,6 +2953,9 @@ RecursiveASTVisitor<Derived>::TraverseOMPLoopDirective(OMPLoopDirective *S) {
return TraverseOMPExecutableDirective(S);
}
+DEF_TRAVERSE_STMT(OMPMetaDirective,
+ { TRY_TO(TraverseOMPExecutableDirective(S)); })
+
DEF_TRAVERSE_STMT(OMPParallelDirective,
{ TRY_TO(TraverseOMPExecutableDirective(S)); })
@@ -2866,6 +2980,9 @@ DEF_TRAVERSE_STMT(OMPSectionsDirective,
DEF_TRAVERSE_STMT(OMPSectionDirective,
{ TRY_TO(TraverseOMPExecutableDirective(S)); })
+DEF_TRAVERSE_STMT(OMPScopeDirective,
+ { TRY_TO(TraverseOMPExecutableDirective(S)); })
+
DEF_TRAVERSE_STMT(OMPSingleDirective,
{ TRY_TO(TraverseOMPExecutableDirective(S)); })
@@ -2886,6 +3003,9 @@ DEF_TRAVERSE_STMT(OMPParallelForSimdDirective,
DEF_TRAVERSE_STMT(OMPParallelMasterDirective,
{ TRY_TO(TraverseOMPExecutableDirective(S)); })
+DEF_TRAVERSE_STMT(OMPParallelMaskedDirective,
+ { TRY_TO(TraverseOMPExecutableDirective(S)); })
+
DEF_TRAVERSE_STMT(OMPParallelSectionsDirective,
{ TRY_TO(TraverseOMPExecutableDirective(S)); })
@@ -2967,6 +3087,18 @@ DEF_TRAVERSE_STMT(OMPParallelMasterTaskLoopDirective,
DEF_TRAVERSE_STMT(OMPParallelMasterTaskLoopSimdDirective,
{ TRY_TO(TraverseOMPExecutableDirective(S)); })
+DEF_TRAVERSE_STMT(OMPMaskedTaskLoopDirective,
+ { TRY_TO(TraverseOMPExecutableDirective(S)); })
+
+DEF_TRAVERSE_STMT(OMPMaskedTaskLoopSimdDirective,
+ { TRY_TO(TraverseOMPExecutableDirective(S)); })
+
+DEF_TRAVERSE_STMT(OMPParallelMaskedTaskLoopDirective,
+ { TRY_TO(TraverseOMPExecutableDirective(S)); })
+
+DEF_TRAVERSE_STMT(OMPParallelMaskedTaskLoopSimdDirective,
+ { TRY_TO(TraverseOMPExecutableDirective(S)); })
+
DEF_TRAVERSE_STMT(OMPDistributeDirective,
{ TRY_TO(TraverseOMPExecutableDirective(S)); })
@@ -3021,6 +3153,24 @@ DEF_TRAVERSE_STMT(OMPDispatchDirective,
DEF_TRAVERSE_STMT(OMPMaskedDirective,
{ TRY_TO(TraverseOMPExecutableDirective(S)); })
+DEF_TRAVERSE_STMT(OMPGenericLoopDirective,
+ { TRY_TO(TraverseOMPExecutableDirective(S)); })
+
+DEF_TRAVERSE_STMT(OMPTeamsGenericLoopDirective,
+ { TRY_TO(TraverseOMPExecutableDirective(S)); })
+
+DEF_TRAVERSE_STMT(OMPTargetTeamsGenericLoopDirective,
+ { TRY_TO(TraverseOMPExecutableDirective(S)); })
+
+DEF_TRAVERSE_STMT(OMPParallelGenericLoopDirective,
+ { TRY_TO(TraverseOMPExecutableDirective(S)); })
+
+DEF_TRAVERSE_STMT(OMPTargetParallelGenericLoopDirective,
+ { TRY_TO(TraverseOMPExecutableDirective(S)); })
+
+DEF_TRAVERSE_STMT(OMPErrorDirective,
+ { TRY_TO(TraverseOMPExecutableDirective(S)); })
+
// OpenMP clauses.
template <typename Derived>
bool RecursiveASTVisitor<Derived>::TraverseOMPClause(OMPClause *C) {
@@ -3092,6 +3242,12 @@ RecursiveASTVisitor<Derived>::VisitOMPNumThreadsClause(OMPNumThreadsClause *C) {
}
template <typename Derived>
+bool RecursiveASTVisitor<Derived>::VisitOMPAlignClause(OMPAlignClause *C) {
+ TRY_TO(TraverseStmt(C->getAlignment()));
+ return true;
+}
+
+template <typename Derived>
bool RecursiveASTVisitor<Derived>::VisitOMPSafelenClause(OMPSafelenClause *C) {
TRY_TO(TraverseStmt(C->getSafelen()));
return true;
@@ -3169,6 +3325,22 @@ bool RecursiveASTVisitor<Derived>::VisitOMPAtomicDefaultMemOrderClause(
}
template <typename Derived>
+bool RecursiveASTVisitor<Derived>::VisitOMPAtClause(OMPAtClause *) {
+ return true;
+}
+
+template <typename Derived>
+bool RecursiveASTVisitor<Derived>::VisitOMPSeverityClause(OMPSeverityClause *) {
+ return true;
+}
+
+template <typename Derived>
+bool RecursiveASTVisitor<Derived>::VisitOMPMessageClause(OMPMessageClause *C) {
+ TRY_TO(TraverseStmt(C->getMessageString()));
+ return true;
+}
+
+template <typename Derived>
bool
RecursiveASTVisitor<Derived>::VisitOMPScheduleClause(OMPScheduleClause *C) {
TRY_TO(VisitOMPClauseWithPreInit(C));
@@ -3219,6 +3391,16 @@ bool RecursiveASTVisitor<Derived>::VisitOMPCaptureClause(OMPCaptureClause *) {
}
template <typename Derived>
+bool RecursiveASTVisitor<Derived>::VisitOMPCompareClause(OMPCompareClause *) {
+ return true;
+}
+
+template <typename Derived>
+bool RecursiveASTVisitor<Derived>::VisitOMPFailClause(OMPFailClause *) {
+ return true;
+}
+
+template <typename Derived>
bool RecursiveASTVisitor<Derived>::VisitOMPSeqCstClause(OMPSeqCstClause *) {
return true;
}
@@ -3627,6 +3809,13 @@ bool RecursiveASTVisitor<Derived>::VisitOMPIsDevicePtrClause(
}
template <typename Derived>
+bool RecursiveASTVisitor<Derived>::VisitOMPHasDeviceAddrClause(
+ OMPHasDeviceAddrClause *C) {
+ TRY_TO(VisitOMPClauseList(C));
+ return true;
+}
+
+template <typename Derived>
bool RecursiveASTVisitor<Derived>::VisitOMPNontemporalClause(
OMPNontemporalClause *C) {
TRY_TO(VisitOMPClauseList(C));
@@ -3674,6 +3863,37 @@ bool RecursiveASTVisitor<Derived>::VisitOMPFilterClause(OMPFilterClause *C) {
return true;
}
+template <typename Derived>
+bool RecursiveASTVisitor<Derived>::VisitOMPBindClause(OMPBindClause *C) {
+ return true;
+}
+
+template <typename Derived>
+bool RecursiveASTVisitor<Derived>::VisitOMPXDynCGroupMemClause(
+ OMPXDynCGroupMemClause *C) {
+ TRY_TO(VisitOMPClauseWithPreInit(C));
+ TRY_TO(TraverseStmt(C->getSize()));
+ return true;
+}
+
+template <typename Derived>
+bool RecursiveASTVisitor<Derived>::VisitOMPDoacrossClause(
+ OMPDoacrossClause *C) {
+ TRY_TO(VisitOMPClauseList(C));
+ return true;
+}
+
+template <typename Derived>
+bool RecursiveASTVisitor<Derived>::VisitOMPXAttributeClause(
+ OMPXAttributeClause *C) {
+ return true;
+}
+
+template <typename Derived>
+bool RecursiveASTVisitor<Derived>::VisitOMPXBareClause(OMPXBareClause *C) {
+ return true;
+}
+
// FIXME: look at the following tricky-seeming exprs to see if we
// need to recurse on anything. These are ones that have methods
// returning decls or qualtypes or nestednamespecifier -- though I'm
diff --git a/contrib/llvm-project/clang/include/clang/AST/Redeclarable.h b/contrib/llvm-project/clang/include/clang/AST/Redeclarable.h
index 77b827c52bfb..091bb886f2d4 100644
--- a/contrib/llvm-project/clang/include/clang/AST/Redeclarable.h
+++ b/contrib/llvm-project/clang/include/clang/AST/Redeclarable.h
@@ -240,7 +240,7 @@ public:
class redecl_iterator {
/// Current - The current declaration.
decl_type *Current = nullptr;
- decl_type *Starter;
+ decl_type *Starter = nullptr;
bool PassedFirst = false;
public:
@@ -258,7 +258,8 @@ public:
redecl_iterator& operator++() {
assert(Current && "Advancing while iterator has reached end");
- // Sanity check to avoid infinite loop on invalid redecl chain.
+ // Make sure we don't infinitely loop on an invalid redecl chain. This
+ // should never happen.
if (Current->isFirstDecl()) {
if (PassedFirst) {
assert(0 && "Passed first decl twice, invalid redecl chain!");
diff --git a/contrib/llvm-project/clang/include/clang/AST/Stmt.h b/contrib/llvm-project/clang/include/clang/AST/Stmt.h
index 8e1d7df97096..55eca4007d17 100644
--- a/contrib/llvm-project/clang/include/clang/AST/Stmt.h
+++ b/contrib/llvm-project/clang/include/clang/AST/Stmt.h
@@ -13,13 +13,21 @@
#ifndef LLVM_CLANG_AST_STMT_H
#define LLVM_CLANG_AST_STMT_H
+#include "clang/AST/APValue.h"
#include "clang/AST/DeclGroup.h"
#include "clang/AST/DependenceFlags.h"
+#include "clang/AST/OperationKinds.h"
#include "clang/AST/StmtIterator.h"
#include "clang/Basic/CapturedStmt.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
+#include "clang/Basic/Lambda.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/OperatorKinds.h"
#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/Specifiers.h"
+#include "clang/Basic/TypeTraits.h"
+#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitmaskEnum.h"
#include "llvm/ADT/PointerIntPair.h"
@@ -33,6 +41,7 @@
#include <cassert>
#include <cstddef>
#include <iterator>
+#include <optional>
#include <string>
namespace llvm {
@@ -58,6 +67,13 @@ class SourceManager;
class StringLiteral;
class Token;
class VarDecl;
+enum class CharacterLiteralKind;
+enum class ConstantResultStorageKind;
+enum class CXXConstructionKind;
+enum class CXXNewInitializationStyle;
+enum class PredefinedIdentKind;
+enum class SourceLocIdentKind;
+enum class StringLiteralKind;
//===----------------------------------------------------------------------===//
// AST classes for statements.
@@ -99,6 +115,7 @@ protected:
friend class Stmt;
/// The statement class.
+ LLVM_PREFERRED_TYPE(StmtClass)
unsigned sClass : 8;
};
enum { NumStmtBits = 8 };
@@ -108,6 +125,7 @@ protected:
friend class ASTStmtWriter;
friend class NullStmt;
+ LLVM_PREFERRED_TYPE(StmtBitfields)
unsigned : NumStmtBits;
/// True if the null statement was preceded by an empty macro, e.g:
@@ -115,6 +133,7 @@ protected:
/// #define CALL(x)
/// CALL(0);
/// @endcode
+ LLVM_PREFERRED_TYPE(bool)
unsigned HasLeadingEmptyMacro : 1;
/// The location of the semi-colon.
@@ -125,17 +144,21 @@ protected:
friend class ASTStmtReader;
friend class CompoundStmt;
+ LLVM_PREFERRED_TYPE(StmtBitfields)
unsigned : NumStmtBits;
- unsigned NumStmts : 32 - NumStmtBits;
+ /// True if the compound statement has one or more pragmas that set some
+ /// floating-point features.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned HasFPFeatures : 1;
- /// The location of the opening "{".
- SourceLocation LBraceLoc;
+ unsigned NumStmts;
};
class LabelStmtBitfields {
friend class LabelStmt;
+ LLVM_PREFERRED_TYPE(StmtBitfields)
unsigned : NumStmtBits;
SourceLocation IdentLoc;
@@ -145,6 +168,7 @@ protected:
friend class ASTStmtReader;
friend class AttributedStmt;
+ LLVM_PREFERRED_TYPE(StmtBitfields)
unsigned : NumStmtBits;
/// Number of attributes.
@@ -158,18 +182,23 @@ protected:
friend class ASTStmtReader;
friend class IfStmt;
+ LLVM_PREFERRED_TYPE(StmtBitfields)
unsigned : NumStmtBits;
- /// True if this if statement is a constexpr if.
- unsigned IsConstexpr : 1;
+ /// Whether this is a constexpr if, or a consteval if, or neither.
+ LLVM_PREFERRED_TYPE(IfStatementKind)
+ unsigned Kind : 3;
/// True if this if statement has storage for an else statement.
+ LLVM_PREFERRED_TYPE(bool)
unsigned HasElse : 1;
/// True if this if statement has storage for a variable declaration.
+ LLVM_PREFERRED_TYPE(bool)
unsigned HasVar : 1;
/// True if this if statement has storage for an init statement.
+ LLVM_PREFERRED_TYPE(bool)
unsigned HasInit : 1;
/// The location of the "if".
@@ -179,17 +208,21 @@ protected:
class SwitchStmtBitfields {
friend class SwitchStmt;
+ LLVM_PREFERRED_TYPE(StmtBitfields)
unsigned : NumStmtBits;
/// True if the SwitchStmt has storage for an init statement.
+ LLVM_PREFERRED_TYPE(bool)
unsigned HasInit : 1;
/// True if the SwitchStmt has storage for a condition variable.
+ LLVM_PREFERRED_TYPE(bool)
unsigned HasVar : 1;
/// If the SwitchStmt is a switch on an enum value, records whether all
/// the enum values were covered by CaseStmts. The coverage information
/// value is meant to be a hint for possible clients.
+ LLVM_PREFERRED_TYPE(bool)
unsigned AllEnumCasesCovered : 1;
/// The location of the "switch".
@@ -200,9 +233,11 @@ protected:
friend class ASTStmtReader;
friend class WhileStmt;
+ LLVM_PREFERRED_TYPE(StmtBitfields)
unsigned : NumStmtBits;
/// True if the WhileStmt has storage for a condition variable.
+ LLVM_PREFERRED_TYPE(bool)
unsigned HasVar : 1;
/// The location of the "while".
@@ -212,6 +247,7 @@ protected:
class DoStmtBitfields {
friend class DoStmt;
+ LLVM_PREFERRED_TYPE(StmtBitfields)
unsigned : NumStmtBits;
/// The location of the "do".
@@ -221,6 +257,7 @@ protected:
class ForStmtBitfields {
friend class ForStmt;
+ LLVM_PREFERRED_TYPE(StmtBitfields)
unsigned : NumStmtBits;
/// The location of the "for".
@@ -231,6 +268,7 @@ protected:
friend class GotoStmt;
friend class IndirectGotoStmt;
+ LLVM_PREFERRED_TYPE(StmtBitfields)
unsigned : NumStmtBits;
/// The location of the "goto".
@@ -240,6 +278,7 @@ protected:
class ContinueStmtBitfields {
friend class ContinueStmt;
+ LLVM_PREFERRED_TYPE(StmtBitfields)
unsigned : NumStmtBits;
/// The location of the "continue".
@@ -249,6 +288,7 @@ protected:
class BreakStmtBitfields {
friend class BreakStmt;
+ LLVM_PREFERRED_TYPE(StmtBitfields)
unsigned : NumStmtBits;
/// The location of the "break".
@@ -258,9 +298,11 @@ protected:
class ReturnStmtBitfields {
friend class ReturnStmt;
+ LLVM_PREFERRED_TYPE(StmtBitfields)
unsigned : NumStmtBits;
/// True if this ReturnStmt has storage for an NRVO candidate.
+ LLVM_PREFERRED_TYPE(bool)
unsigned HasNRVOCandidate : 1;
/// The location of the "return".
@@ -271,10 +313,12 @@ protected:
friend class SwitchCase;
friend class CaseStmt;
+ LLVM_PREFERRED_TYPE(StmtBitfields)
unsigned : NumStmtBits;
/// Used by CaseStmt to store whether it is a case statement
/// of the form case LHS ... RHS (a GNU extension).
+ LLVM_PREFERRED_TYPE(bool)
unsigned CaseStmtIsGNURange : 1;
/// The location of the "case" or "default" keyword.
@@ -307,11 +351,15 @@ protected:
friend class PseudoObjectExpr; // ctor
friend class ShuffleVectorExpr; // ctor
+ LLVM_PREFERRED_TYPE(StmtBitfields)
unsigned : NumStmtBits;
+ LLVM_PREFERRED_TYPE(ExprValueKind)
unsigned ValueKind : 2;
+ LLVM_PREFERRED_TYPE(ExprObjectKind)
unsigned ObjectKind : 3;
- unsigned /*ExprDependence*/ Dependent : llvm::BitWidth<ExprDependence>;
+ LLVM_PREFERRED_TYPE(ExprDependence)
+ unsigned Dependent : llvm::BitWidth<ExprDependence>;
};
enum { NumExprBits = NumStmtBits + 5 + llvm::BitWidth<ExprDependence> };
@@ -320,28 +368,35 @@ protected:
friend class ASTStmtWriter;
friend class ConstantExpr;
+ LLVM_PREFERRED_TYPE(ExprBitfields)
unsigned : NumExprBits;
/// The kind of result that is tail-allocated.
+ LLVM_PREFERRED_TYPE(ConstantResultStorageKind)
unsigned ResultKind : 2;
- /// The kind of Result as defined by APValue::Kind.
+ /// The kind of Result as defined by APValue::ValueKind.
+ LLVM_PREFERRED_TYPE(APValue::ValueKind)
unsigned APValueKind : 4;
- /// When ResultKind == RSK_Int64, true if the tail-allocated integer is
- /// unsigned.
+ /// When ResultKind == ConstantResultStorageKind::Int64, true if the
+ /// tail-allocated integer is unsigned.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsUnsigned : 1;
- /// When ResultKind == RSK_Int64. the BitWidth of the tail-allocated
- /// integer. 7 bits because it is the minimal number of bits to represent a
- /// value from 0 to 64 (the size of the tail-allocated integer).
+ /// When ResultKind == ConstantResultStorageKind::Int64. the BitWidth of the
+ /// tail-allocated integer. 7 bits because it is the minimal number of bits
+ /// to represent a value from 0 to 64 (the size of the tail-allocated
+ /// integer).
unsigned BitWidth : 7;
- /// When ResultKind == RSK_APValue, true if the ASTContext will cleanup the
- /// tail-allocated APValue.
+ /// When ResultKind == ConstantResultStorageKind::APValue, true if the
+ /// ASTContext will cleanup the tail-allocated APValue.
+ LLVM_PREFERRED_TYPE(bool)
unsigned HasCleanup : 1;
/// True if this ConstantExpr was created for immediate invocation.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsImmediateInvocation : 1;
};
@@ -349,16 +404,22 @@ protected:
friend class ASTStmtReader;
friend class PredefinedExpr;
+ LLVM_PREFERRED_TYPE(ExprBitfields)
unsigned : NumExprBits;
- /// The kind of this PredefinedExpr. One of the enumeration values
- /// in PredefinedExpr::IdentKind.
+ LLVM_PREFERRED_TYPE(PredefinedIdentKind)
unsigned Kind : 4;
/// True if this PredefinedExpr has a trailing "StringLiteral *"
/// for the predefined identifier.
+ LLVM_PREFERRED_TYPE(bool)
unsigned HasFunctionName : 1;
+ /// True if this PredefinedExpr should be treated as a StringLiteral (for
+ /// MSVC compatibility).
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned IsTransparent : 1;
+
/// The location of this PredefinedExpr.
SourceLocation Loc;
};
@@ -367,14 +428,25 @@ protected:
friend class ASTStmtReader; // deserialization
friend class DeclRefExpr;
+ LLVM_PREFERRED_TYPE(ExprBitfields)
unsigned : NumExprBits;
+ LLVM_PREFERRED_TYPE(bool)
unsigned HasQualifier : 1;
+ LLVM_PREFERRED_TYPE(bool)
unsigned HasTemplateKWAndArgsInfo : 1;
+ LLVM_PREFERRED_TYPE(bool)
unsigned HasFoundDecl : 1;
+ LLVM_PREFERRED_TYPE(bool)
unsigned HadMultipleCandidates : 1;
+ LLVM_PREFERRED_TYPE(bool)
unsigned RefersToEnclosingVariableOrCapture : 1;
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned CapturedByCopyInLambdaWithExplicitObjectParameter : 1;
+ LLVM_PREFERRED_TYPE(NonOdrUseReason)
unsigned NonOdrUseReason : 2;
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned IsImmediateEscalating : 1;
/// The location of the declaration name itself.
SourceLocation Loc;
@@ -384,9 +456,15 @@ protected:
class FloatingLiteralBitfields {
friend class FloatingLiteral;
+ LLVM_PREFERRED_TYPE(ExprBitfields)
unsigned : NumExprBits;
- unsigned Semantics : 3; // Provides semantics for APFloat construction
+ static_assert(
+ llvm::APFloat::S_MaxSemantics < 16,
+ "Too many Semantics enum values to fit in bitfield of size 4");
+ LLVM_PREFERRED_TYPE(llvm::APFloat::Semantics)
+ unsigned Semantics : 4; // Provides semantics for APFloat construction
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsExact : 1;
};
@@ -394,10 +472,12 @@ protected:
friend class ASTStmtReader;
friend class StringLiteral;
+ LLVM_PREFERRED_TYPE(ExprBitfields)
unsigned : NumExprBits;
/// The kind of this string literal.
/// One of the enumeration values of StringLiteral::StringKind.
+ LLVM_PREFERRED_TYPE(StringLiteralKind)
unsigned Kind : 3;
/// The width of a single character in bytes. Only values of 1, 2,
@@ -405,6 +485,7 @@ protected:
/// the target + string kind to the appropriate CharByteWidth.
unsigned CharByteWidth : 3;
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsPascal : 1;
/// The number of concatenated token this string is made of.
@@ -415,22 +496,28 @@ protected:
class CharacterLiteralBitfields {
friend class CharacterLiteral;
+ LLVM_PREFERRED_TYPE(ExprBitfields)
unsigned : NumExprBits;
+ LLVM_PREFERRED_TYPE(CharacterLiteralKind)
unsigned Kind : 3;
};
class UnaryOperatorBitfields {
friend class UnaryOperator;
+ LLVM_PREFERRED_TYPE(ExprBitfields)
unsigned : NumExprBits;
+ LLVM_PREFERRED_TYPE(UnaryOperatorKind)
unsigned Opc : 5;
+ LLVM_PREFERRED_TYPE(bool)
unsigned CanOverflow : 1;
//
/// This is only meaningful for operations on floating point
/// types when additional values need to be in trailing storage.
/// It is 0 otherwise.
+ LLVM_PREFERRED_TYPE(bool)
unsigned HasFPFeatures : 1;
SourceLocation Loc;
@@ -439,9 +526,12 @@ protected:
class UnaryExprOrTypeTraitExprBitfields {
friend class UnaryExprOrTypeTraitExpr;
+ LLVM_PREFERRED_TYPE(ExprBitfields)
unsigned : NumExprBits;
+ LLVM_PREFERRED_TYPE(UnaryExprOrTypeTrait)
unsigned Kind : 3;
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsType : 1; // true if operand is a type, false if an expression.
};
@@ -449,6 +539,7 @@ protected:
friend class ArraySubscriptExpr;
friend class MatrixSubscriptExpr;
+ LLVM_PREFERRED_TYPE(ExprBitfields)
unsigned : NumExprBits;
SourceLocation RBracketLoc;
@@ -457,14 +548,17 @@ protected:
class CallExprBitfields {
friend class CallExpr;
+ LLVM_PREFERRED_TYPE(ExprBitfields)
unsigned : NumExprBits;
unsigned NumPreArgs : 1;
/// True if the callee of the call expression was found using ADL.
+ LLVM_PREFERRED_TYPE(bool)
unsigned UsesADL : 1;
/// True if the call expression has some floating-point features.
+ LLVM_PREFERRED_TYPE(bool)
unsigned HasFPFeatures : 1;
/// Padding used to align OffsetToTrailingObjects to a byte multiple.
@@ -481,15 +575,18 @@ protected:
friend class ASTStmtReader;
friend class MemberExpr;
+ LLVM_PREFERRED_TYPE(ExprBitfields)
unsigned : NumExprBits;
/// IsArrow - True if this is "X->F", false if this is "X.F".
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsArrow : 1;
/// True if this member expression used a nested-name-specifier to
/// refer to the member, e.g., "x->Base::f", or found its member via
/// a using declaration. When true, a MemberExprNameQualifier
/// structure is allocated immediately after the MemberExpr.
+ LLVM_PREFERRED_TYPE(bool)
unsigned HasQualifierOrFoundDecl : 1;
/// True if this member expression specified a template keyword
@@ -497,15 +594,18 @@ protected:
/// x->template f, x->template f<int>.
/// When true, an ASTTemplateKWAndArgsInfo structure and its
/// TemplateArguments (if any) are present.
+ LLVM_PREFERRED_TYPE(bool)
unsigned HasTemplateKWAndArgsInfo : 1;
/// True if this member expression refers to a method that
/// was resolved from an overloaded set having size greater than 1.
+ LLVM_PREFERRED_TYPE(bool)
unsigned HadMultipleCandidates : 1;
/// Value of type NonOdrUseReason indicating why this MemberExpr does
/// not constitute an odr-use of the named declaration. Meaningful only
/// when naming a static member.
+ LLVM_PREFERRED_TYPE(NonOdrUseReason)
unsigned NonOdrUseReason : 2;
/// This is the location of the -> or . in the expression.
@@ -516,12 +616,16 @@ protected:
friend class CastExpr;
friend class ImplicitCastExpr;
+ LLVM_PREFERRED_TYPE(ExprBitfields)
unsigned : NumExprBits;
+ LLVM_PREFERRED_TYPE(CastKind)
unsigned Kind : 7;
+ LLVM_PREFERRED_TYPE(bool)
unsigned PartOfExplicitCast : 1; // Only set for ImplicitCastExpr.
/// True if the call expression has some floating-point features.
+ LLVM_PREFERRED_TYPE(bool)
unsigned HasFPFeatures : 1;
/// The number of CXXBaseSpecifiers in the cast. 14 bits would be enough
@@ -532,13 +636,16 @@ protected:
class BinaryOperatorBitfields {
friend class BinaryOperator;
+ LLVM_PREFERRED_TYPE(ExprBitfields)
unsigned : NumExprBits;
+ LLVM_PREFERRED_TYPE(BinaryOperatorKind)
unsigned Opc : 6;
/// This is only meaningful for operations on floating point
/// types when additional values need to be in trailing storage.
/// It is 0 otherwise.
+ LLVM_PREFERRED_TYPE(bool)
unsigned HasFPFeatures : 1;
SourceLocation OpLoc;
@@ -547,10 +654,12 @@ protected:
class InitListExprBitfields {
friend class InitListExpr;
+ LLVM_PREFERRED_TYPE(ExprBitfields)
unsigned : NumExprBits;
/// Whether this initializer list originally had a GNU array-range
/// designator in it. This is a temporary marker used by CodeGen.
+ LLVM_PREFERRED_TYPE(bool)
unsigned HadArrayRangeDesignator : 1;
};
@@ -558,6 +667,7 @@ protected:
friend class ASTStmtReader;
friend class ParenListExpr;
+ LLVM_PREFERRED_TYPE(ExprBitfields)
unsigned : NumExprBits;
/// The number of expressions in the paren list.
@@ -568,6 +678,7 @@ protected:
friend class ASTStmtReader;
friend class GenericSelectionExpr;
+ LLVM_PREFERRED_TYPE(ExprBitfields)
unsigned : NumExprBits;
/// The location of the "_Generic".
@@ -578,29 +689,31 @@ protected:
friend class ASTStmtReader; // deserialization
friend class PseudoObjectExpr;
+ LLVM_PREFERRED_TYPE(ExprBitfields)
unsigned : NumExprBits;
- // These don't need to be particularly wide, because they're
- // strictly limited by the forms of expressions we permit.
- unsigned NumSubExprs : 8;
- unsigned ResultIndex : 32 - 8 - NumExprBits;
+ unsigned NumSubExprs : 16;
+ unsigned ResultIndex : 16;
};
class SourceLocExprBitfields {
friend class ASTStmtReader;
friend class SourceLocExpr;
+ LLVM_PREFERRED_TYPE(ExprBitfields)
unsigned : NumExprBits;
/// The kind of source location builtin represented by the SourceLocExpr.
- /// Ex. __builtin_LINE, __builtin_FUNCTION, ect.
- unsigned Kind : 2;
+ /// Ex. __builtin_LINE, __builtin_FUNCTION, etc.
+ LLVM_PREFERRED_TYPE(SourceLocIdentKind)
+ unsigned Kind : 3;
};
class StmtExprBitfields {
friend class ASTStmtReader;
friend class StmtExpr;
+ LLVM_PREFERRED_TYPE(ExprBitfields)
unsigned : NumExprBits;
/// The number of levels of template parameters enclosing this statement
@@ -615,10 +728,12 @@ protected:
friend class ASTStmtReader;
friend class CXXOperatorCallExpr;
+ LLVM_PREFERRED_TYPE(CallExprBitfields)
unsigned : NumCallExprBits;
/// The kind of this overloaded operator. One of the enumerator
/// value of OverloadedOperatorKind.
+ LLVM_PREFERRED_TYPE(OverloadedOperatorKind)
unsigned OperatorKind : 6;
};
@@ -626,17 +741,21 @@ protected:
friend class ASTStmtReader;
friend class CXXRewrittenBinaryOperator;
+ LLVM_PREFERRED_TYPE(CallExprBitfields)
unsigned : NumCallExprBits;
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsReversed : 1;
};
class CXXBoolLiteralExprBitfields {
friend class CXXBoolLiteralExpr;
+ LLVM_PREFERRED_TYPE(ExprBitfields)
unsigned : NumExprBits;
/// The value of the boolean literal.
+ LLVM_PREFERRED_TYPE(bool)
unsigned Value : 1;
/// The location of the boolean literal.
@@ -646,6 +765,7 @@ protected:
class CXXNullPtrLiteralExprBitfields {
friend class CXXNullPtrLiteralExpr;
+ LLVM_PREFERRED_TYPE(ExprBitfields)
unsigned : NumExprBits;
/// The location of the null pointer literal.
@@ -655,9 +775,11 @@ protected:
class CXXThisExprBitfields {
friend class CXXThisExpr;
+ LLVM_PREFERRED_TYPE(ExprBitfields)
unsigned : NumExprBits;
/// Whether this is an implicit "this".
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsImplicit : 1;
/// The location of the "this".
@@ -668,9 +790,11 @@ protected:
friend class ASTStmtReader;
friend class CXXThrowExpr;
+ LLVM_PREFERRED_TYPE(ExprBitfields)
unsigned : NumExprBits;
/// Whether the thrown variable (if any) is in scope.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsThrownVariableInScope : 1;
/// The location of the "throw".
@@ -681,8 +805,13 @@ protected:
friend class ASTStmtReader;
friend class CXXDefaultArgExpr;
+ LLVM_PREFERRED_TYPE(ExprBitfields)
unsigned : NumExprBits;
+ /// Whether this CXXDefaultArgExpr rewrote its argument and stores a copy.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned HasRewrittenInit : 1;
+
/// The location where the default argument expression was used.
SourceLocation Loc;
};
@@ -691,8 +820,14 @@ protected:
friend class ASTStmtReader;
friend class CXXDefaultInitExpr;
+ LLVM_PREFERRED_TYPE(ExprBitfields)
unsigned : NumExprBits;
+ /// Whether this CXXDefaultInitExprBitfields rewrote its argument and stores
+ /// a copy.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned HasRewrittenInit : 1;
+
/// The location where the default initializer expression was used.
SourceLocation Loc;
};
@@ -701,6 +836,7 @@ protected:
friend class ASTStmtReader;
friend class CXXScalarValueInitExpr;
+ LLVM_PREFERRED_TYPE(ExprBitfields)
unsigned : NumExprBits;
SourceLocation RParenLoc;
@@ -711,28 +847,37 @@ protected:
friend class ASTStmtWriter;
friend class CXXNewExpr;
+ LLVM_PREFERRED_TYPE(ExprBitfields)
unsigned : NumExprBits;
/// Was the usage ::new, i.e. is the global new to be used?
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsGlobalNew : 1;
/// Do we allocate an array? If so, the first trailing "Stmt *" is the
/// size expression.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsArray : 1;
/// Should the alignment be passed to the allocation function?
+ LLVM_PREFERRED_TYPE(bool)
unsigned ShouldPassAlignment : 1;
/// If this is an array allocation, does the usual deallocation
/// function for the allocated type want to know the allocated size?
+ LLVM_PREFERRED_TYPE(bool)
unsigned UsualArrayDeleteWantsSize : 1;
- /// What kind of initializer do we have? Could be none, parens, or braces.
- /// In storage, we distinguish between "none, and no initializer expr", and
- /// "none, but an implicit initializer expr".
+ // Is initializer expr present?
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned HasInitializer : 1;
+
+ /// What kind of initializer syntax used? Could be none, parens, or braces.
+ LLVM_PREFERRED_TYPE(CXXNewInitializationStyle)
unsigned StoredInitializationStyle : 2;
/// True if the allocated type was expressed as a parenthesized type-id.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsParenTypeId : 1;
/// The number of placement new arguments.
@@ -743,21 +888,26 @@ protected:
friend class ASTStmtReader;
friend class CXXDeleteExpr;
+ LLVM_PREFERRED_TYPE(ExprBitfields)
unsigned : NumExprBits;
/// Is this a forced global delete, i.e. "::delete"?
+ LLVM_PREFERRED_TYPE(bool)
unsigned GlobalDelete : 1;
/// Is this the array form of delete, i.e. "delete[]"?
+ LLVM_PREFERRED_TYPE(bool)
unsigned ArrayForm : 1;
/// ArrayFormAsWritten can be different from ArrayForm if 'delete' is
/// applied to pointer-to-array type (ArrayFormAsWritten will be false
/// while ArrayForm will be true).
+ LLVM_PREFERRED_TYPE(bool)
unsigned ArrayFormAsWritten : 1;
/// Does the usual deallocation function for the element type require
/// a size_t argument?
+ LLVM_PREFERRED_TYPE(bool)
unsigned UsualArrayDeleteWantsSize : 1;
/// Location of the expression.
@@ -769,13 +919,16 @@ protected:
friend class ASTStmtWriter;
friend class TypeTraitExpr;
+ LLVM_PREFERRED_TYPE(ExprBitfields)
unsigned : NumExprBits;
/// The kind of type trait, which is a value of a TypeTrait enumerator.
+ LLVM_PREFERRED_TYPE(TypeTrait)
unsigned Kind : 8;
/// If this expression is not value-dependent, this indicates whether
/// the trait evaluated true or false.
+ LLVM_PREFERRED_TYPE(bool)
unsigned Value : 1;
/// The number of arguments to this type trait. According to [implimits]
@@ -789,10 +942,12 @@ protected:
friend class ASTStmtWriter;
friend class DependentScopeDeclRefExpr;
+ LLVM_PREFERRED_TYPE(ExprBitfields)
unsigned : NumExprBits;
/// Whether the name includes info for explicit template
/// keyword and arguments.
+ LLVM_PREFERRED_TYPE(bool)
unsigned HasTemplateKWAndArgsInfo : 1;
};
@@ -800,14 +955,23 @@ protected:
friend class ASTStmtReader;
friend class CXXConstructExpr;
+ LLVM_PREFERRED_TYPE(ExprBitfields)
unsigned : NumExprBits;
+ LLVM_PREFERRED_TYPE(bool)
unsigned Elidable : 1;
+ LLVM_PREFERRED_TYPE(bool)
unsigned HadMultipleCandidates : 1;
+ LLVM_PREFERRED_TYPE(bool)
unsigned ListInitialization : 1;
+ LLVM_PREFERRED_TYPE(bool)
unsigned StdInitListInitialization : 1;
+ LLVM_PREFERRED_TYPE(bool)
unsigned ZeroInitialization : 1;
+ LLVM_PREFERRED_TYPE(CXXConstructionKind)
unsigned ConstructionKind : 3;
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned IsImmediateEscalating : 1;
SourceLocation Loc;
};
@@ -816,9 +980,11 @@ protected:
friend class ASTStmtReader; // deserialization
friend class ExprWithCleanups;
+ LLVM_PREFERRED_TYPE(ExprBitfields)
unsigned : NumExprBits;
// When false, it must not have side effects.
+ LLVM_PREFERRED_TYPE(bool)
unsigned CleanupsHaveSideEffects : 1;
unsigned NumObjects : 32 - 1 - NumExprBits;
@@ -828,6 +994,7 @@ protected:
friend class ASTStmtReader;
friend class CXXUnresolvedConstructExpr;
+ LLVM_PREFERRED_TYPE(ExprBitfields)
unsigned : NumExprBits;
/// The number of arguments used to construct the type.
@@ -838,18 +1005,22 @@ protected:
friend class ASTStmtReader;
friend class CXXDependentScopeMemberExpr;
+ LLVM_PREFERRED_TYPE(ExprBitfields)
unsigned : NumExprBits;
/// Whether this member expression used the '->' operator or
/// the '.' operator.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsArrow : 1;
/// Whether this member expression has info for explicit template
/// keyword and arguments.
+ LLVM_PREFERRED_TYPE(bool)
unsigned HasTemplateKWAndArgsInfo : 1;
/// See getFirstQualifierFoundInScope() and the comment listing
/// the trailing objects.
+ LLVM_PREFERRED_TYPE(bool)
unsigned HasFirstQualifierFoundInScope : 1;
/// The location of the '->' or '.' operator.
@@ -860,10 +1031,12 @@ protected:
friend class ASTStmtReader;
friend class OverloadExpr;
+ LLVM_PREFERRED_TYPE(ExprBitfields)
unsigned : NumExprBits;
/// Whether the name includes info for explicit template
/// keyword and arguments.
+ LLVM_PREFERRED_TYPE(bool)
unsigned HasTemplateKWAndArgsInfo : 1;
/// Padding used by the derived classes to store various bits. If you
@@ -880,14 +1053,17 @@ protected:
friend class ASTStmtReader;
friend class UnresolvedLookupExpr;
+ LLVM_PREFERRED_TYPE(OverloadExprBitfields)
unsigned : NumOverloadExprBits;
/// True if these lookup results should be extended by
/// argument-dependent lookup if this is the operand of a function call.
+ LLVM_PREFERRED_TYPE(bool)
unsigned RequiresADL : 1;
/// True if these lookup results are overloaded. This is pretty trivially
/// rederivable if we urgently need to kill this field.
+ LLVM_PREFERRED_TYPE(bool)
unsigned Overloaded : 1;
};
static_assert(sizeof(UnresolvedLookupExprBitfields) <= 4,
@@ -898,13 +1074,16 @@ protected:
friend class ASTStmtReader;
friend class UnresolvedMemberExpr;
+ LLVM_PREFERRED_TYPE(OverloadExprBitfields)
unsigned : NumOverloadExprBits;
/// Whether this member expression used the '->' operator or
/// the '.' operator.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsArrow : 1;
/// Whether the lookup results contain an unresolved using declaration.
+ LLVM_PREFERRED_TYPE(bool)
unsigned HasUnresolvedUsing : 1;
};
static_assert(sizeof(UnresolvedMemberExprBitfields) <= 4,
@@ -915,8 +1094,10 @@ protected:
friend class ASTStmtReader;
friend class CXXNoexceptExpr;
+ LLVM_PREFERRED_TYPE(ExprBitfields)
unsigned : NumExprBits;
+ LLVM_PREFERRED_TYPE(bool)
unsigned Value : 1;
};
@@ -924,6 +1105,7 @@ protected:
friend class ASTStmtReader;
friend class SubstNonTypeTemplateParmExpr;
+ LLVM_PREFERRED_TYPE(ExprBitfields)
unsigned : NumExprBits;
/// The location of the non-type template parameter reference.
@@ -935,17 +1117,21 @@ protected:
friend class ASTStmtWriter;
friend class LambdaExpr;
+ LLVM_PREFERRED_TYPE(ExprBitfields)
unsigned : NumExprBits;
/// The default capture kind, which is a value of type
/// LambdaCaptureDefault.
+ LLVM_PREFERRED_TYPE(LambdaCaptureDefault)
unsigned CaptureDefault : 2;
/// Whether this lambda had an explicit parameter list vs. an
/// implicit (and empty) parameter list.
+ LLVM_PREFERRED_TYPE(bool)
unsigned ExplicitParams : 1;
/// Whether this lambda had the result type explicitly specified.
+ LLVM_PREFERRED_TYPE(bool)
unsigned ExplicitResultType : 1;
/// The number of captures.
@@ -957,19 +1143,23 @@ protected:
friend class ASTStmtWriter;
friend class RequiresExpr;
+ LLVM_PREFERRED_TYPE(ExprBitfields)
unsigned : NumExprBits;
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsSatisfied : 1;
SourceLocation RequiresKWLoc;
};
- //===--- C++ Coroutines TS bitfields classes ---===//
+ //===--- C++ Coroutines bitfields classes ---===//
class CoawaitExprBitfields {
friend class CoawaitExpr;
+ LLVM_PREFERRED_TYPE(ExprBitfields)
unsigned : NumExprBits;
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsImplicit : 1;
};
@@ -978,8 +1168,10 @@ protected:
class ObjCIndirectCopyRestoreExprBitfields {
friend class ObjCIndirectCopyRestoreExpr;
+ LLVM_PREFERRED_TYPE(ExprBitfields)
unsigned : NumExprBits;
+ LLVM_PREFERRED_TYPE(bool)
unsigned ShouldCopy : 1;
};
@@ -989,10 +1181,12 @@ protected:
friend class ASTStmtReader;
friend class OpaqueValueExpr;
+ LLVM_PREFERRED_TYPE(ExprBitfields)
unsigned : NumExprBits;
/// The OVE is a unique semantic reference to its source expression if this
/// bit is set to true.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsUnique : 1;
SourceLocation Loc;
@@ -1067,7 +1261,7 @@ protected:
LambdaExprBitfields LambdaExprBits;
RequiresExprBitfields RequiresExprBits;
- // C++ Coroutines TS expressions
+ // C++ Coroutines expressions
CoawaitExprBitfields CoawaitBits;
// Obj-C Expressions
@@ -1215,6 +1409,11 @@ public:
const PrintingPolicy &Policy, unsigned Indentation = 0,
StringRef NewlineSymbol = "\n",
const ASTContext *Context = nullptr) const;
+ void printPrettyControlled(raw_ostream &OS, PrinterHelper *Helper,
+ const PrintingPolicy &Policy,
+ unsigned Indentation = 0,
+ StringRef NewlineSymbol = "\n",
+ const ASTContext *Context = nullptr) const;
/// Pretty-prints in JSON format.
void printJson(raw_ostream &Out, PrinterHelper *Helper,
@@ -1238,7 +1437,7 @@ public:
}
/// Child Iterators: All subclasses must implement 'children'
- /// to permit easy iteration over the substatements/subexpessions of an
+ /// to permit easy iteration over the substatements/subexpressions of an
/// AST node. This permits easy iteration over all nodes in the AST.
using child_iterator = StmtIterator;
using const_child_iterator = ConstStmtIterator;
@@ -1271,8 +1470,13 @@ public:
/// parameters are identified by index/level rather than their
/// declaration pointers) or the exact representation of the statement as
/// written in the source.
+ /// \param ProfileLambdaExpr whether or not to profile lambda expressions.
+ /// When false, the lambda expressions are never considered to be equal to
+ /// other lambda expressions. When true, the lambda expressions with the same
+ /// implementation will be considered to be the same. ProfileLambdaExpr should
+ /// only be true when we try to merge two declarations within modules.
void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
- bool Canonical) const;
+ bool Canonical, bool ProfileLambdaExpr = false) const;
/// Calculate a unique representation for a statement that is
/// stable across compiler invocations.
@@ -1395,36 +1599,63 @@ public:
};
/// CompoundStmt - This represents a group of statements like { stmt stmt }.
-class CompoundStmt final : public Stmt,
- private llvm::TrailingObjects<CompoundStmt, Stmt *> {
+class CompoundStmt final
+ : public Stmt,
+ private llvm::TrailingObjects<CompoundStmt, Stmt *, FPOptionsOverride> {
friend class ASTStmtReader;
friend TrailingObjects;
- /// The location of the closing "}". LBraceLoc is stored in CompoundStmtBits.
+ /// The location of the opening "{".
+ SourceLocation LBraceLoc;
+
+ /// The location of the closing "}".
SourceLocation RBraceLoc;
- CompoundStmt(ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB);
+ CompoundStmt(ArrayRef<Stmt *> Stmts, FPOptionsOverride FPFeatures,
+ SourceLocation LB, SourceLocation RB);
explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty) {}
void setStmts(ArrayRef<Stmt *> Stmts);
+ /// Set FPOptionsOverride in trailing storage. Used only by Serialization.
+ void setStoredFPFeatures(FPOptionsOverride F) {
+ assert(hasStoredFPFeatures());
+ *getTrailingObjects<FPOptionsOverride>() = F;
+ }
+
+ size_t numTrailingObjects(OverloadToken<Stmt *>) const {
+ return CompoundStmtBits.NumStmts;
+ }
+
public:
static CompoundStmt *Create(const ASTContext &C, ArrayRef<Stmt *> Stmts,
- SourceLocation LB, SourceLocation RB);
+ FPOptionsOverride FPFeatures, SourceLocation LB,
+ SourceLocation RB);
// Build an empty compound statement with a location.
- explicit CompoundStmt(SourceLocation Loc)
- : Stmt(CompoundStmtClass), RBraceLoc(Loc) {
+ explicit CompoundStmt(SourceLocation Loc) : CompoundStmt(Loc, Loc) {}
+
+ CompoundStmt(SourceLocation Loc, SourceLocation EndLoc)
+ : Stmt(CompoundStmtClass), LBraceLoc(Loc), RBraceLoc(EndLoc) {
CompoundStmtBits.NumStmts = 0;
- CompoundStmtBits.LBraceLoc = Loc;
+ CompoundStmtBits.HasFPFeatures = 0;
}
// Build an empty compound statement.
- static CompoundStmt *CreateEmpty(const ASTContext &C, unsigned NumStmts);
+ static CompoundStmt *CreateEmpty(const ASTContext &C, unsigned NumStmts,
+ bool HasFPFeatures);
bool body_empty() const { return CompoundStmtBits.NumStmts == 0; }
unsigned size() const { return CompoundStmtBits.NumStmts; }
+ bool hasStoredFPFeatures() const { return CompoundStmtBits.HasFPFeatures; }
+
+ /// Get FPOptionsOverride from trailing storage.
+ FPOptionsOverride getStoredFPFeatures() const {
+ assert(hasStoredFPFeatures());
+ return *getTrailingObjects<FPOptionsOverride>();
+ }
+
using body_iterator = Stmt **;
using body_range = llvm::iterator_range<body_iterator>;
@@ -1499,10 +1730,10 @@ public:
return const_cast<CompoundStmt *>(this)->getStmtExprResult();
}
- SourceLocation getBeginLoc() const { return CompoundStmtBits.LBraceLoc; }
+ SourceLocation getBeginLoc() const { return LBraceLoc; }
SourceLocation getEndLoc() const { return RBraceLoc; }
- SourceLocation getLBracLoc() const { return CompoundStmtBits.LBraceLoc; }
+ SourceLocation getLBracLoc() const { return LBraceLoc; }
SourceLocation getRBracLoc() const { return RBraceLoc; }
static bool classof(const Stmt *T) {
@@ -1879,7 +2110,7 @@ public:
SourceLocation getAttrLoc() const { return AttributedStmtBits.AttrLoc; }
ArrayRef<const Attr *> getAttrs() const {
- return llvm::makeArrayRef(getAttrArrayPtr(), AttributedStmtBits.NumAttrs);
+ return llvm::ArrayRef(getAttrArrayPtr(), AttributedStmtBits.NumAttrs);
}
Stmt *getSubStmt() { return SubStmt; }
@@ -1950,8 +2181,8 @@ class IfStmt final
unsigned elseOffset() const { return condOffset() + ElseOffsetFromCond; }
/// Build an if/then/else statement.
- IfStmt(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr, Stmt *Init,
- VarDecl *Var, Expr *Cond, SourceLocation LParenLoc,
+ IfStmt(const ASTContext &Ctx, SourceLocation IL, IfStatementKind Kind,
+ Stmt *Init, VarDecl *Var, Expr *Cond, SourceLocation LParenLoc,
SourceLocation RParenLoc, Stmt *Then, SourceLocation EL, Stmt *Else);
/// Build an empty if/then/else statement.
@@ -1960,9 +2191,9 @@ class IfStmt final
public:
/// Create an IfStmt.
static IfStmt *Create(const ASTContext &Ctx, SourceLocation IL,
- bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond,
- SourceLocation LPL, SourceLocation RPL, Stmt *Then,
- SourceLocation EL = SourceLocation(),
+ IfStatementKind Kind, Stmt *Init, VarDecl *Var,
+ Expr *Cond, SourceLocation LPL, SourceLocation RPL,
+ Stmt *Then, SourceLocation EL = SourceLocation(),
Stmt *Else = nullptr);
/// Create an empty IfStmt optionally with storage for an else statement,
@@ -2047,6 +2278,11 @@ public:
: nullptr;
}
+ void setConditionVariableDeclStmt(DeclStmt *CondVar) {
+ assert(hasVarStorage());
+ getTrailingObjects<Stmt *>()[varOffset()] = CondVar;
+ }
+
Stmt *getInit() {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
@@ -2077,13 +2313,35 @@ public:
*getTrailingObjects<SourceLocation>() = ElseLoc;
}
- bool isConstexpr() const { return IfStmtBits.IsConstexpr; }
- void setConstexpr(bool C) { IfStmtBits.IsConstexpr = C; }
+ bool isConsteval() const {
+ return getStatementKind() == IfStatementKind::ConstevalNonNegated ||
+ getStatementKind() == IfStatementKind::ConstevalNegated;
+ }
+
+ bool isNonNegatedConsteval() const {
+ return getStatementKind() == IfStatementKind::ConstevalNonNegated;
+ }
+
+ bool isNegatedConsteval() const {
+ return getStatementKind() == IfStatementKind::ConstevalNegated;
+ }
+
+ bool isConstexpr() const {
+ return getStatementKind() == IfStatementKind::Constexpr;
+ }
+
+ void setStatementKind(IfStatementKind Kind) {
+ IfStmtBits.Kind = static_cast<unsigned>(Kind);
+ }
+
+ IfStatementKind getStatementKind() const {
+ return static_cast<IfStatementKind>(IfStmtBits.Kind);
+ }
/// If this is an 'if constexpr', determine which substatement will be taken.
- /// Otherwise, or if the condition is value-dependent, returns None.
- Optional<const Stmt*> getNondiscardedCase(const ASTContext &Ctx) const;
- Optional<Stmt *> getNondiscardedCase(const ASTContext &Ctx);
+ /// Otherwise, or if the condition is value-dependent, returns std::nullopt.
+ std::optional<const Stmt *> getNondiscardedCase(const ASTContext &Ctx) const;
+ std::optional<Stmt *> getNondiscardedCase(const ASTContext &Ctx);
bool isObjCAvailabilityCheck() const;
@@ -2101,13 +2359,19 @@ public:
// Iterators over subexpressions. The iterators will include iterating
// over the initialization expression referenced by the condition variable.
child_range children() {
- return child_range(getTrailingObjects<Stmt *>(),
+ // We always store a condition, but there is none for consteval if
+ // statements, so skip it.
+ return child_range(getTrailingObjects<Stmt *>() +
+ (isConsteval() ? thenOffset() : 0),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
- return const_child_range(getTrailingObjects<Stmt *>(),
+ // We always store a condition, but there is none for consteval if
+ // statements, so skip it.
+ return const_child_range(getTrailingObjects<Stmt *>() +
+ (isConsteval() ? thenOffset() : 0),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
@@ -2251,6 +2515,11 @@ public:
: nullptr;
}
+ void setConditionVariableDeclStmt(DeclStmt *CondVar) {
+ assert(hasVarStorage());
+ getTrailingObjects<Stmt *>()[varOffset()] = CondVar;
+ }
+
SwitchCase *getSwitchCaseList() { return FirstCase; }
const SwitchCase *getSwitchCaseList() const { return FirstCase; }
void setSwitchCaseList(SwitchCase *SC) { FirstCase = SC; }
@@ -2414,6 +2683,11 @@ public:
: nullptr;
}
+ void setConditionVariableDeclStmt(DeclStmt *CondVar) {
+ assert(hasVarStorage());
+ getTrailingObjects<Stmt *>()[varOffset()] = CondVar;
+ }
+
SourceLocation getWhileLoc() const { return WhileStmtBits.WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileStmtBits.WhileLoc = L; }
@@ -2503,6 +2777,8 @@ public:
/// the init/cond/inc parts of the ForStmt will be null if they were not
/// specified in the source.
class ForStmt : public Stmt {
+ friend class ASTStmtReader;
+
enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR };
Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt.
SourceLocation LParenLoc, RParenLoc;
@@ -2530,10 +2806,18 @@ public:
/// If this ForStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
+ DeclStmt *getConditionVariableDeclStmt() {
+ return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]);
+ }
+
const DeclStmt *getConditionVariableDeclStmt() const {
return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]);
}
+ void setConditionVariableDeclStmt(DeclStmt *CondVar) {
+ SubExprs[CONDVAR] = CondVar;
+ }
+
Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); }
Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); }
Stmt *getBody() { return SubExprs[BODY]; }
@@ -3262,16 +3546,16 @@ public:
//===--- Other ---===//
ArrayRef<StringRef> getAllConstraints() const {
- return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs);
+ return llvm::ArrayRef(Constraints, NumInputs + NumOutputs);
}
ArrayRef<StringRef> getClobbers() const {
- return llvm::makeArrayRef(Clobbers, NumClobbers);
+ return llvm::ArrayRef(Clobbers, NumClobbers);
}
ArrayRef<Expr*> getAllExprs() const {
- return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs),
- NumInputs + NumOutputs);
+ return llvm::ArrayRef(reinterpret_cast<Expr **>(Exprs),
+ NumInputs + NumOutputs);
}
StringRef getClobber(unsigned i) const { return getClobbers()[i]; }
@@ -3485,8 +3769,11 @@ public:
llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind;
SourceLocation Loc;
+ Capture() = default;
+
public:
friend class ASTStmtReader;
+ friend class CapturedStmt;
/// Create a new capture.
///
diff --git a/contrib/llvm-project/clang/include/clang/AST/StmtCXX.h b/contrib/llvm-project/clang/include/clang/AST/StmtCXX.h
index 4d1f3e8ef255..8b4ef24ed376 100644
--- a/contrib/llvm-project/clang/include/clang/AST/StmtCXX.h
+++ b/contrib/llvm-project/clang/include/clang/AST/StmtCXX.h
@@ -75,7 +75,8 @@ class CXXTryStmt final : public Stmt,
unsigned NumHandlers;
size_t numTrailingObjects(OverloadToken<Stmt *>) const { return NumHandlers; }
- CXXTryStmt(SourceLocation tryLoc, Stmt *tryBlock, ArrayRef<Stmt*> handlers);
+ CXXTryStmt(SourceLocation tryLoc, CompoundStmt *tryBlock,
+ ArrayRef<Stmt *> handlers);
CXXTryStmt(EmptyShell Empty, unsigned numHandlers)
: Stmt(CXXTryStmtClass), NumHandlers(numHandlers) { }
@@ -84,7 +85,7 @@ class CXXTryStmt final : public Stmt,
public:
static CXXTryStmt *Create(const ASTContext &C, SourceLocation tryLoc,
- Stmt *tryBlock, ArrayRef<Stmt*> handlers);
+ CompoundStmt *tryBlock, ArrayRef<Stmt *> handlers);
static CXXTryStmt *Create(const ASTContext &C, EmptyShell Empty,
unsigned numHandlers);
@@ -326,8 +327,8 @@ class CoroutineBodyStmt final
OnFallthrough, ///< Handler for control flow falling off the body.
Allocate, ///< Coroutine frame memory allocation.
Deallocate, ///< Coroutine frame memory deallocation.
- ReturnValue, ///< Return value for thunk function: p.get_return_object().
ResultDecl, ///< Declaration holding the result of get_return_object.
+ ReturnValue, ///< Return value for thunk function: p.get_return_object().
ReturnStmt, ///< Return statement for the thunk function.
ReturnStmtOnAllocFailure, ///< Return statement if allocation failed.
FirstParamMove ///< First offset for move construction of parameter copies.
@@ -353,8 +354,8 @@ public:
Stmt *OnFallthrough = nullptr;
Expr *Allocate = nullptr;
Expr *Deallocate = nullptr;
- Expr *ReturnValue = nullptr;
Stmt *ResultDecl = nullptr;
+ Expr *ReturnValue = nullptr;
Stmt *ReturnStmt = nullptr;
Stmt *ReturnStmtOnAllocFailure = nullptr;
ArrayRef<Stmt *> ParamMoves;
@@ -374,9 +375,10 @@ public:
}
/// Retrieve the body of the coroutine as written. This will be either
- /// a CompoundStmt or a TryStmt.
- Stmt *getBody() const {
- return getStoredStmts()[SubStmt::Body];
+ /// a CompoundStmt. If the coroutine is in function-try-block, we will
+ /// wrap the CXXTryStmt into a CompoundStmt to keep consistency.
+ CompoundStmt *getBody() const {
+ return cast<CompoundStmt>(getStoredStmts()[SubStmt::Body]);
}
Stmt *getPromiseDeclStmt() const {
@@ -406,10 +408,14 @@ public:
Expr *getDeallocate() const {
return cast_or_null<Expr>(getStoredStmts()[SubStmt::Deallocate]);
}
+ Stmt *getResultDecl() const { return getStoredStmts()[SubStmt::ResultDecl]; }
Expr *getReturnValueInit() const {
return cast<Expr>(getStoredStmts()[SubStmt::ReturnValue]);
}
- Stmt *getResultDecl() const { return getStoredStmts()[SubStmt::ResultDecl]; }
+ Expr *getReturnValue() const {
+ auto *RS = dyn_cast_or_null<clang::ReturnStmt>(getReturnStmt());
+ return RS ? RS->getRetValue() : nullptr;
+ }
Stmt *getReturnStmt() const { return getStoredStmts()[SubStmt::ReturnStmt]; }
Stmt *getReturnStmtOnAllocFailure() const {
return getStoredStmts()[SubStmt::ReturnStmtOnAllocFailure];
@@ -437,6 +443,17 @@ public:
NumParams);
}
+ child_range childrenExclBody() {
+ return child_range(getStoredStmts() + SubStmt::Body + 1,
+ getStoredStmts() + SubStmt::FirstParamMove + NumParams);
+ }
+
+ const_child_range childrenExclBody() const {
+ return const_child_range(getStoredStmts() + SubStmt::Body + 1,
+ getStoredStmts() + SubStmt::FirstParamMove +
+ NumParams);
+ }
+
static bool classof(const Stmt *T) {
return T->getStmtClass() == CoroutineBodyStmtClass;
}
@@ -495,16 +512,10 @@ public:
}
child_range children() {
- if (!getOperand())
- return child_range(SubStmts + SubStmt::PromiseCall,
- SubStmts + SubStmt::Count);
return child_range(SubStmts, SubStmts + SubStmt::Count);
}
const_child_range children() const {
- if (!getOperand())
- return const_child_range(SubStmts + SubStmt::PromiseCall,
- SubStmts + SubStmt::Count);
return const_child_range(SubStmts, SubStmts + SubStmt::Count);
}
diff --git a/contrib/llvm-project/clang/include/clang/AST/StmtObjC.h b/contrib/llvm-project/clang/include/clang/AST/StmtObjC.h
index 948ef2421cb9..c46ff4634c82 100644
--- a/contrib/llvm-project/clang/include/clang/AST/StmtObjC.h
+++ b/contrib/llvm-project/clang/include/clang/AST/StmtObjC.h
@@ -162,8 +162,14 @@ public:
};
/// Represents Objective-C's \@try ... \@catch ... \@finally statement.
-class ObjCAtTryStmt : public Stmt {
-private:
+class ObjCAtTryStmt final
+ : public Stmt,
+ private llvm::TrailingObjects<ObjCAtTryStmt, Stmt *> {
+ friend TrailingObjects;
+ size_t numTrailingObjects(OverloadToken<Stmt *>) const {
+ return 1 + NumCatchStmts + HasFinally;
+ }
+
// The location of the @ in the \@try.
SourceLocation AtTryLoc;
@@ -178,10 +184,8 @@ private:
/// The order of the statements in memory follows the order in the source,
/// with the \@try body first, followed by the \@catch statements (if any)
/// and, finally, the \@finally (if it exists).
- Stmt **getStmts() { return reinterpret_cast<Stmt **> (this + 1); }
- const Stmt* const *getStmts() const {
- return reinterpret_cast<const Stmt * const*> (this + 1);
- }
+ Stmt **getStmts() { return getTrailingObjects<Stmt *>(); }
+ Stmt *const *getStmts() const { return getTrailingObjects<Stmt *>(); }
ObjCAtTryStmt(SourceLocation atTryLoc, Stmt *atTryStmt,
Stmt **CatchStmts, unsigned NumCatchStmts,
@@ -257,13 +261,34 @@ public:
}
child_range children() {
- return child_range(getStmts(),
- getStmts() + 1 + NumCatchStmts + HasFinally);
+ return child_range(
+ getStmts(), getStmts() + numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(const_cast<ObjCAtTryStmt *>(this)->children());
}
+
+ using catch_stmt_iterator = CastIterator<ObjCAtCatchStmt>;
+ using const_catch_stmt_iterator = ConstCastIterator<ObjCAtCatchStmt>;
+ using catch_range = llvm::iterator_range<catch_stmt_iterator>;
+ using catch_const_range = llvm::iterator_range<const_catch_stmt_iterator>;
+
+ catch_stmt_iterator catch_stmts_begin() { return getStmts() + 1; }
+ catch_stmt_iterator catch_stmts_end() {
+ return catch_stmts_begin() + NumCatchStmts;
+ }
+ catch_range catch_stmts() {
+ return catch_range(catch_stmts_begin(), catch_stmts_end());
+ }
+
+ const_catch_stmt_iterator catch_stmts_begin() const { return getStmts() + 1; }
+ const_catch_stmt_iterator catch_stmts_end() const {
+ return catch_stmts_begin() + NumCatchStmts;
+ }
+ catch_const_range catch_stmts() const {
+ return catch_const_range(catch_stmts_begin(), catch_stmts_end());
+ }
};
/// Represents Objective-C's \@synchronized statement.
diff --git a/contrib/llvm-project/clang/include/clang/AST/StmtOpenMP.h b/contrib/llvm-project/clang/include/clang/AST/StmtOpenMP.h
index 9c85df741f48..621643391535 100644
--- a/contrib/llvm-project/clang/include/clang/AST/StmtOpenMP.h
+++ b/contrib/llvm-project/clang/include/clang/AST/StmtOpenMP.h
@@ -277,10 +277,19 @@ class OMPExecutableDirective : public Stmt {
/// Get the clauses storage.
MutableArrayRef<OMPClause *> getClauses() {
if (!Data)
- return llvm::None;
+ return std::nullopt;
return Data->getClauses();
}
+ /// Was this directive mapped from an another directive?
+ /// e.g. 1) omp loop bind(parallel) is mapped to OMPD_for
+ /// 2) omp loop bind(teams) is mapped to OMPD_distribute
+ /// 3) omp loop bind(thread) is mapped to OMPD_simd
+ /// It was necessary to note it down in the Directive because of
+ /// clang::TreeTransform::TransformOMPExecutableDirective() pass in
+ /// the frontend.
+ OpenMPDirectiveKind PrevMappedDirective = llvm::omp::OMPD_unknown;
+
protected:
/// Data, associated with the directive.
OMPChildren *Data = nullptr;
@@ -345,6 +354,10 @@ protected:
return Inst;
}
+ void setMappedDirective(OpenMPDirectiveKind MappedDirective) {
+ PrevMappedDirective = MappedDirective;
+ }
+
public:
/// Iterates over expressions/statements used in the construct.
class used_clauses_child_iterator
@@ -399,8 +412,9 @@ public:
static llvm::iterator_range<used_clauses_child_iterator>
used_clauses_children(ArrayRef<OMPClause *> Clauses) {
- return {used_clauses_child_iterator(Clauses),
- used_clauses_child_iterator(llvm::makeArrayRef(Clauses.end(), 0))};
+ return {
+ used_clauses_child_iterator(Clauses),
+ used_clauses_child_iterator(llvm::ArrayRef(Clauses.end(), (size_t)0))};
}
/// Iterates over a filtered subrange of clauses applied to a
@@ -445,7 +459,7 @@ public:
getClausesOfKind(ArrayRef<OMPClause *> Clauses) {
return {specific_clause_iterator<SpecificClause>(Clauses),
specific_clause_iterator<SpecificClause>(
- llvm::makeArrayRef(Clauses.end(), 0))};
+ llvm::ArrayRef(Clauses.end(), (size_t)0))};
}
template <typename SpecificClause>
@@ -571,7 +585,7 @@ public:
ArrayRef<OMPClause *> clauses() const {
if (!Data)
- return llvm::None;
+ return std::nullopt;
return Data->getClauses();
}
@@ -597,6 +611,8 @@ public:
"Expected directive with the associated statement.");
return Data->getRawStmt();
}
+
+ OpenMPDirectiveKind getMappedDirective() const { return PrevMappedDirective; }
};
/// This represents '#pragma omp parallel' directive.
@@ -889,22 +905,23 @@ public:
/// Calls the specified callback function for all the loops in \p CurStmt,
/// from the outermost to the innermost.
- static bool doForAllLoops(Stmt *CurStmt, bool TryImperfectlyNestedLoops,
- unsigned NumLoops,
- llvm::function_ref<bool(unsigned, Stmt *)> Callback,
- llvm::function_ref<void(OMPLoopBasedDirective *)>
- OnTransformationCallback);
+ static bool
+ doForAllLoops(Stmt *CurStmt, bool TryImperfectlyNestedLoops,
+ unsigned NumLoops,
+ llvm::function_ref<bool(unsigned, Stmt *)> Callback,
+ llvm::function_ref<void(OMPLoopTransformationDirective *)>
+ OnTransformationCallback);
static bool
doForAllLoops(const Stmt *CurStmt, bool TryImperfectlyNestedLoops,
unsigned NumLoops,
llvm::function_ref<bool(unsigned, const Stmt *)> Callback,
- llvm::function_ref<void(const OMPLoopBasedDirective *)>
+ llvm::function_ref<void(const OMPLoopTransformationDirective *)>
OnTransformationCallback) {
auto &&NewCallback = [Callback](unsigned Cnt, Stmt *CurStmt) {
return Callback(Cnt, CurStmt);
};
auto &&NewTransformCb =
- [OnTransformationCallback](OMPLoopBasedDirective *A) {
+ [OnTransformationCallback](OMPLoopTransformationDirective *A) {
OnTransformationCallback(A);
};
return doForAllLoops(const_cast<Stmt *>(CurStmt), TryImperfectlyNestedLoops,
@@ -917,7 +934,7 @@ public:
doForAllLoops(Stmt *CurStmt, bool TryImperfectlyNestedLoops,
unsigned NumLoops,
llvm::function_ref<bool(unsigned, Stmt *)> Callback) {
- auto &&TransformCb = [](OMPLoopBasedDirective *) {};
+ auto &&TransformCb = [](OMPLoopTransformationDirective *) {};
return doForAllLoops(CurStmt, TryImperfectlyNestedLoops, NumLoops, Callback,
TransformCb);
}
@@ -954,6 +971,47 @@ public:
}
};
+/// The base class for all loop transformation directives.
+class OMPLoopTransformationDirective : public OMPLoopBasedDirective {
+ friend class ASTStmtReader;
+
+ /// Number of loops generated by this loop transformation.
+ unsigned NumGeneratedLoops = 0;
+
+protected:
+ explicit OMPLoopTransformationDirective(StmtClass SC,
+ OpenMPDirectiveKind Kind,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ unsigned NumAssociatedLoops)
+ : OMPLoopBasedDirective(SC, Kind, StartLoc, EndLoc, NumAssociatedLoops) {}
+
+ /// Set the number of loops generated by this loop transformation.
+ void setNumGeneratedLoops(unsigned Num) { NumGeneratedLoops = Num; }
+
+public:
+ /// Return the number of associated (consumed) loops.
+ unsigned getNumAssociatedLoops() const { return getLoopsNumber(); }
+
+ /// Return the number of loops generated by this loop transformation.
+ unsigned getNumGeneratedLoops() const { return NumGeneratedLoops; }
+
+ /// Get the de-sugared statements after the loop transformation.
+ ///
+ /// Might be nullptr if either the directive generates no loops and is handled
+ /// directly in CodeGen, or resolving a template-dependence context is
+ /// required.
+ Stmt *getTransformedStmt() const;
+
+ /// Return preinits statement.
+ Stmt *getPreInits() const;
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == OMPTileDirectiveClass ||
+ T->getStmtClass() == OMPUnrollDirectiveClass;
+ }
+};
+
/// This is a common base class for loop directives ('omp simd', 'omp
/// for', 'omp for simd' etc.). It is responsible for the loop code generation.
///
@@ -1024,7 +1082,7 @@ class OMPLoopDirective : public OMPLoopBasedDirective {
MutableArrayRef<Expr *> getCounters() {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind())]);
- return llvm::makeMutableArrayRef(Storage, getLoopsNumber());
+ return llvm::MutableArrayRef(Storage, getLoopsNumber());
}
/// Get the private counters storage.
@@ -1032,7 +1090,7 @@ class OMPLoopDirective : public OMPLoopBasedDirective {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind()) +
getLoopsNumber()]);
- return llvm::makeMutableArrayRef(Storage, getLoopsNumber());
+ return llvm::MutableArrayRef(Storage, getLoopsNumber());
}
/// Get the updates storage.
@@ -1040,7 +1098,7 @@ class OMPLoopDirective : public OMPLoopBasedDirective {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind()) +
2 * getLoopsNumber()]);
- return llvm::makeMutableArrayRef(Storage, getLoopsNumber());
+ return llvm::MutableArrayRef(Storage, getLoopsNumber());
}
/// Get the updates storage.
@@ -1048,7 +1106,7 @@ class OMPLoopDirective : public OMPLoopBasedDirective {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind()) +
3 * getLoopsNumber()]);
- return llvm::makeMutableArrayRef(Storage, getLoopsNumber());
+ return llvm::MutableArrayRef(Storage, getLoopsNumber());
}
/// Get the final counter updates storage.
@@ -1056,7 +1114,7 @@ class OMPLoopDirective : public OMPLoopBasedDirective {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind()) +
4 * getLoopsNumber()]);
- return llvm::makeMutableArrayRef(Storage, getLoopsNumber());
+ return llvm::MutableArrayRef(Storage, getLoopsNumber());
}
/// Get the dependent counters storage.
@@ -1064,7 +1122,7 @@ class OMPLoopDirective : public OMPLoopBasedDirective {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind()) +
5 * getLoopsNumber()]);
- return llvm::makeMutableArrayRef(Storage, getLoopsNumber());
+ return llvm::MutableArrayRef(Storage, getLoopsNumber());
}
/// Get the dependent inits storage.
@@ -1072,7 +1130,7 @@ class OMPLoopDirective : public OMPLoopBasedDirective {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind()) +
6 * getLoopsNumber()]);
- return llvm::makeMutableArrayRef(Storage, getLoopsNumber());
+ return llvm::MutableArrayRef(Storage, getLoopsNumber());
}
/// Get the finals conditions storage.
@@ -1080,7 +1138,7 @@ class OMPLoopDirective : public OMPLoopBasedDirective {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind()) +
7 * getLoopsNumber()]);
- return llvm::makeMutableArrayRef(Storage, getLoopsNumber());
+ return llvm::MutableArrayRef(Storage, getLoopsNumber());
}
protected:
@@ -1102,7 +1160,7 @@ protected:
if (isOpenMPLoopBoundSharingDirective(Kind))
return CombinedDistributeEnd;
if (isOpenMPWorksharingDirective(Kind) || isOpenMPTaskLoopDirective(Kind) ||
- isOpenMPDistributeDirective(Kind))
+ isOpenMPGenericLoopDirective(Kind) || isOpenMPDistributeDirective(Kind))
return WorksharingEnd;
return DefaultEnd;
}
@@ -1134,6 +1192,7 @@ protected:
}
void setIsLastIterVariable(Expr *IL) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
+ isOpenMPGenericLoopDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
@@ -1141,6 +1200,7 @@ protected:
}
void setLowerBoundVariable(Expr *LB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
+ isOpenMPGenericLoopDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
@@ -1148,6 +1208,7 @@ protected:
}
void setUpperBoundVariable(Expr *UB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
+ isOpenMPGenericLoopDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
@@ -1155,6 +1216,7 @@ protected:
}
void setStrideVariable(Expr *ST) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
+ isOpenMPGenericLoopDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
@@ -1162,6 +1224,7 @@ protected:
}
void setEnsureUpperBound(Expr *EUB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
+ isOpenMPGenericLoopDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
@@ -1169,6 +1232,7 @@ protected:
}
void setNextLowerBound(Expr *NLB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
+ isOpenMPGenericLoopDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
@@ -1176,6 +1240,7 @@ protected:
}
void setNextUpperBound(Expr *NUB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
+ isOpenMPGenericLoopDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
@@ -1183,6 +1248,7 @@ protected:
}
void setNumIterations(Expr *NI) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
+ isOpenMPGenericLoopDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
@@ -1285,6 +1351,7 @@ public:
Stmt *getPreInits() { return Data->getChildren()[PreInitsOffset]; }
Expr *getIsLastIterVariable() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
+ isOpenMPGenericLoopDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
@@ -1292,6 +1359,7 @@ public:
}
Expr *getLowerBoundVariable() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
+ isOpenMPGenericLoopDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
@@ -1299,6 +1367,7 @@ public:
}
Expr *getUpperBoundVariable() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
+ isOpenMPGenericLoopDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
@@ -1306,6 +1375,7 @@ public:
}
Expr *getStrideVariable() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
+ isOpenMPGenericLoopDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
@@ -1313,6 +1383,7 @@ public:
}
Expr *getEnsureUpperBound() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
+ isOpenMPGenericLoopDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
@@ -1320,6 +1391,7 @@ public:
}
Expr *getNextLowerBound() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
+ isOpenMPGenericLoopDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
@@ -1327,6 +1399,7 @@ public:
}
Expr *getNextUpperBound() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
+ isOpenMPGenericLoopDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
@@ -1334,6 +1407,7 @@ public:
}
Expr *getNumIterations() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
+ isOpenMPGenericLoopDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
@@ -1465,8 +1539,17 @@ public:
T->getStmtClass() == OMPParallelForSimdDirectiveClass ||
T->getStmtClass() == OMPTaskLoopDirectiveClass ||
T->getStmtClass() == OMPTaskLoopSimdDirectiveClass ||
+ T->getStmtClass() == OMPMaskedTaskLoopDirectiveClass ||
+ T->getStmtClass() == OMPMaskedTaskLoopSimdDirectiveClass ||
T->getStmtClass() == OMPMasterTaskLoopDirectiveClass ||
T->getStmtClass() == OMPMasterTaskLoopSimdDirectiveClass ||
+ T->getStmtClass() == OMPGenericLoopDirectiveClass ||
+ T->getStmtClass() == OMPTeamsGenericLoopDirectiveClass ||
+ T->getStmtClass() == OMPTargetTeamsGenericLoopDirectiveClass ||
+ T->getStmtClass() == OMPParallelGenericLoopDirectiveClass ||
+ T->getStmtClass() == OMPTargetParallelGenericLoopDirectiveClass ||
+ T->getStmtClass() == OMPParallelMaskedTaskLoopDirectiveClass ||
+ T->getStmtClass() == OMPParallelMaskedTaskLoopSimdDirectiveClass ||
T->getStmtClass() == OMPParallelMasterTaskLoopDirectiveClass ||
T->getStmtClass() == OMPParallelMasterTaskLoopSimdDirectiveClass ||
T->getStmtClass() == OMPDistributeDirectiveClass ||
@@ -1536,7 +1619,8 @@ public:
SourceLocation EndLoc, unsigned CollapsedNum,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt,
- const HelperExprs &Exprs);
+ const HelperExprs &Exprs,
+ OpenMPDirectiveKind ParamPrevMappedDirective);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
@@ -1614,7 +1698,8 @@ public:
SourceLocation EndLoc, unsigned CollapsedNum,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs,
- Expr *TaskRedRef, bool HasCancel);
+ Expr *TaskRedRef, bool HasCancel,
+ OpenMPDirectiveKind ParamPrevMappedDirective);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
@@ -1846,6 +1931,57 @@ public:
}
};
+/// This represents '#pragma omp scope' directive.
+/// \code
+/// #pragma omp scope private(a,b) nowait
+/// \endcode
+/// In this example directive '#pragma omp scope' has clauses 'private' with
+/// the variables 'a' and 'b' and nowait.
+///
+class OMPScopeDirective final : public OMPExecutableDirective {
+ friend class ASTStmtReader;
+ friend class OMPExecutableDirective;
+
+ /// Build directive with the given start and end location.
+ ///
+ /// \param StartLoc Starting location of the directive kind.
+ /// \param EndLoc Ending location of the directive.
+ ///
+ OMPScopeDirective(SourceLocation StartLoc, SourceLocation EndLoc)
+ : OMPExecutableDirective(OMPScopeDirectiveClass, llvm::omp::OMPD_scope,
+ StartLoc, EndLoc) {}
+
+ /// Build an empty directive.
+ ///
+ explicit OMPScopeDirective()
+ : OMPExecutableDirective(OMPScopeDirectiveClass, llvm::omp::OMPD_scope,
+ SourceLocation(), SourceLocation()) {}
+
+public:
+ /// Creates directive.
+ ///
+ /// \param C AST context.
+ /// \param StartLoc Starting location of the directive kind.
+ /// \param EndLoc Ending Location of the directive.
+ /// \param AssociatedStmt Statement, associated with the directive.
+ ///
+ static OMPScopeDirective *Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses,
+ Stmt *AssociatedStmt);
+
+ /// Creates an empty directive.
+ ///
+ /// \param C AST context.
+ ///
+ static OMPScopeDirective *CreateEmpty(const ASTContext &C,
+ unsigned NumClauses, EmptyShell);
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == OMPScopeDirectiveClass;
+ }
+};
+
/// This represents '#pragma omp single' directive.
///
/// \code
@@ -2241,6 +2377,69 @@ public:
}
};
+/// This represents '#pragma omp parallel masked' directive.
+///
+/// \code
+/// #pragma omp parallel masked filter(tid)
+/// \endcode
+/// In this example directive '#pragma omp parallel masked' has a clause
+/// 'filter' with the variable tid
+///
+class OMPParallelMaskedDirective final : public OMPExecutableDirective {
+ friend class ASTStmtReader;
+ friend class OMPExecutableDirective;
+
+ OMPParallelMaskedDirective(SourceLocation StartLoc, SourceLocation EndLoc)
+ : OMPExecutableDirective(OMPParallelMaskedDirectiveClass,
+ llvm::omp::OMPD_parallel_masked, StartLoc,
+ EndLoc) {}
+
+ explicit OMPParallelMaskedDirective()
+ : OMPExecutableDirective(OMPParallelMaskedDirectiveClass,
+ llvm::omp::OMPD_parallel_masked,
+ SourceLocation(), SourceLocation()) {}
+
+ /// Sets special task reduction descriptor.
+ void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[0] = E; }
+
+public:
+ /// Creates directive with a list of \a Clauses.
+ ///
+ /// \param C AST context.
+ /// \param StartLoc Starting location of the directive kind.
+ /// \param EndLoc Ending Location of the directive.
+ /// \param Clauses List of clauses.
+ /// \param AssociatedStmt Statement, associated with the directive.
+ /// \param TaskRedRef Task reduction special reference expression to handle
+ /// taskgroup descriptor.
+ ///
+ static OMPParallelMaskedDirective *
+ Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef);
+
+ /// Creates an empty directive with the place for \a NumClauses
+ /// clauses.
+ ///
+ /// \param C AST context.
+ /// \param NumClauses Number of clauses.
+ ///
+ static OMPParallelMaskedDirective *
+ CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell);
+
+ /// Returns special task reduction reference expression.
+ Expr *getTaskReductionRefExpr() {
+ return cast_or_null<Expr>(Data->getChildren()[0]);
+ }
+ const Expr *getTaskReductionRefExpr() const {
+ return const_cast<OMPParallelMaskedDirective *>(this)
+ ->getTaskReductionRefExpr();
+ }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == OMPParallelMaskedDirectiveClass;
+ }
+};
+
/// This represents '#pragma omp parallel sections' directive.
///
/// \code
@@ -2510,15 +2709,20 @@ public:
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
+ /// \param Clauses List of clauses.
///
- static OMPTaskwaitDirective *
- Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc);
+ static OMPTaskwaitDirective *Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses);
/// Creates an empty directive.
///
/// \param C AST context.
+ /// \param NumClauses Number of clauses.
///
- static OMPTaskwaitDirective *CreateEmpty(const ASTContext &C, EmptyShell);
+ static OMPTaskwaitDirective *CreateEmpty(const ASTContext &C,
+ unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskwaitDirectiveClass;
@@ -2738,7 +2942,7 @@ public:
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
- /// \param IsStandalone true, if the the standalone directive is created.
+ /// \param IsStandalone true, if the standalone directive is created.
///
static OMPOrderedDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
@@ -2759,25 +2963,31 @@ public:
class OMPAtomicDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
- /// Used for 'atomic update' or 'atomic capture' constructs. They may
- /// have atomic expressions of forms
- /// \code
- /// x = x binop expr;
- /// x = expr binop x;
- /// \endcode
- /// This field is true for the first form of the expression and false for the
- /// second. Required for correct codegen of non-associative operations (like
- /// << or >>).
- bool IsXLHSInRHSPart = false;
- /// Used for 'atomic update' or 'atomic capture' constructs. They may
- /// have atomic expressions of forms
- /// \code
- /// v = x; <update x>;
- /// <update x>; v = x;
- /// \endcode
- /// This field is true for the first(postfix) form of the expression and false
- /// otherwise.
- bool IsPostfixUpdate = false;
+
+ struct FlagTy {
+ /// Used for 'atomic update' or 'atomic capture' constructs. They may
+ /// have atomic expressions of forms:
+ /// \code
+ /// x = x binop expr;
+ /// x = expr binop x;
+ /// \endcode
+ /// This field is 1 for the first form of the expression and 0 for the
+ /// second. Required for correct codegen of non-associative operations (like
+ /// << or >>).
+ uint8_t IsXLHSInRHSPart : 1;
+ /// Used for 'atomic update' or 'atomic capture' constructs. They may
+ /// have atomic expressions of forms:
+ /// \code
+ /// v = x; <update x>;
+ /// <update x>; v = x;
+ /// \endcode
+ /// This field is 1 for the first(postfix) form of the expression and 0
+ /// otherwise.
+ uint8_t IsPostfixUpdate : 1;
+ /// 1 if 'v' is updated only when the condition is false (compare capture
+ /// only).
+ uint8_t IsFailOnly : 1;
+ } Flags;
/// Build directive with the given start and end location.
///
@@ -2794,18 +3004,62 @@ class OMPAtomicDirective : public OMPExecutableDirective {
: OMPExecutableDirective(OMPAtomicDirectiveClass, llvm::omp::OMPD_atomic,
SourceLocation(), SourceLocation()) {}
+ enum DataPositionTy : size_t {
+ POS_X = 0,
+ POS_V,
+ POS_E,
+ POS_UpdateExpr,
+ POS_D,
+ POS_Cond,
+ POS_R,
+ };
+
/// Set 'x' part of the associated expression/statement.
- void setX(Expr *X) { Data->getChildren()[0] = X; }
+ void setX(Expr *X) { Data->getChildren()[DataPositionTy::POS_X] = X; }
/// Set helper expression of the form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
- void setUpdateExpr(Expr *UE) { Data->getChildren()[1] = UE; }
+ void setUpdateExpr(Expr *UE) {
+ Data->getChildren()[DataPositionTy::POS_UpdateExpr] = UE;
+ }
/// Set 'v' part of the associated expression/statement.
- void setV(Expr *V) { Data->getChildren()[2] = V; }
+ void setV(Expr *V) { Data->getChildren()[DataPositionTy::POS_V] = V; }
+ /// Set 'r' part of the associated expression/statement.
+ void setR(Expr *R) { Data->getChildren()[DataPositionTy::POS_R] = R; }
/// Set 'expr' part of the associated expression/statement.
- void setExpr(Expr *E) { Data->getChildren()[3] = E; }
+ void setExpr(Expr *E) { Data->getChildren()[DataPositionTy::POS_E] = E; }
+ /// Set 'd' part of the associated expression/statement.
+ void setD(Expr *D) { Data->getChildren()[DataPositionTy::POS_D] = D; }
+ /// Set conditional expression in `atomic compare`.
+ void setCond(Expr *C) { Data->getChildren()[DataPositionTy::POS_Cond] = C; }
public:
+ struct Expressions {
+ /// 'x' part of the associated expression/statement.
+ Expr *X = nullptr;
+ /// 'v' part of the associated expression/statement.
+ Expr *V = nullptr;
+ // 'r' part of the associated expression/statement.
+ Expr *R = nullptr;
+ /// 'expr' part of the associated expression/statement.
+ Expr *E = nullptr;
+ /// UE Helper expression of the form:
+ /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
+ /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
+ Expr *UE = nullptr;
+ /// 'd' part of the associated expression/statement.
+ Expr *D = nullptr;
+ /// Conditional expression in `atomic compare` construct.
+ Expr *Cond = nullptr;
+ /// True if UE has the first form and false if the second.
+ bool IsXLHSInRHSPart;
+ /// True if original value of 'x' must be stored in 'v', not an updated one.
+ bool IsPostfixUpdate;
+ /// True if 'v' is updated only when the condition is false (compare capture
+ /// only).
+ bool IsFailOnly;
+ };
+
/// Creates directive with a list of \a Clauses and 'x', 'v' and 'expr'
/// parts of the atomic construct (see Section 2.12.6, atomic Construct, for
/// detailed description of 'x', 'v' and 'expr').
@@ -2815,20 +3069,12 @@ public:
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
- /// \param X 'x' part of the associated expression/statement.
- /// \param V 'v' part of the associated expression/statement.
- /// \param E 'expr' part of the associated expression/statement.
- /// \param UE Helper expression of the form
- /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
- /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
- /// \param IsXLHSInRHSPart true if \a UE has the first form and false if the
- /// second.
- /// \param IsPostfixUpdate true if original value of 'x' must be stored in
- /// 'v', not an updated one.
- static OMPAtomicDirective *
- Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
- ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *X, Expr *V,
- Expr *E, Expr *UE, bool IsXLHSInRHSPart, bool IsPostfixUpdate);
+ /// \param Exprs Associated expressions or statements.
+ static OMPAtomicDirective *Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses,
+ Stmt *AssociatedStmt, Expressions Exprs);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
@@ -2840,33 +3086,67 @@ public:
unsigned NumClauses, EmptyShell);
/// Get 'x' part of the associated expression/statement.
- Expr *getX() { return cast_or_null<Expr>(Data->getChildren()[0]); }
+ Expr *getX() {
+ return cast_or_null<Expr>(Data->getChildren()[DataPositionTy::POS_X]);
+ }
const Expr *getX() const {
- return cast_or_null<Expr>(Data->getChildren()[0]);
+ return cast_or_null<Expr>(Data->getChildren()[DataPositionTy::POS_X]);
}
/// Get helper expression of the form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
- Expr *getUpdateExpr() { return cast_or_null<Expr>(Data->getChildren()[1]); }
+ Expr *getUpdateExpr() {
+ return cast_or_null<Expr>(
+ Data->getChildren()[DataPositionTy::POS_UpdateExpr]);
+ }
const Expr *getUpdateExpr() const {
- return cast_or_null<Expr>(Data->getChildren()[1]);
+ return cast_or_null<Expr>(
+ Data->getChildren()[DataPositionTy::POS_UpdateExpr]);
}
/// Return true if helper update expression has form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' and false if it has form
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
- bool isXLHSInRHSPart() const { return IsXLHSInRHSPart; }
+ bool isXLHSInRHSPart() const { return Flags.IsXLHSInRHSPart; }
/// Return true if 'v' expression must be updated to original value of
/// 'x', false if 'v' must be updated to the new value of 'x'.
- bool isPostfixUpdate() const { return IsPostfixUpdate; }
+ bool isPostfixUpdate() const { return Flags.IsPostfixUpdate; }
+ /// Return true if 'v' is updated only when the condition is evaluated false
+ /// (compare capture only).
+ bool isFailOnly() const { return Flags.IsFailOnly; }
/// Get 'v' part of the associated expression/statement.
- Expr *getV() { return cast_or_null<Expr>(Data->getChildren()[2]); }
+ Expr *getV() {
+ return cast_or_null<Expr>(Data->getChildren()[DataPositionTy::POS_V]);
+ }
const Expr *getV() const {
- return cast_or_null<Expr>(Data->getChildren()[2]);
+ return cast_or_null<Expr>(Data->getChildren()[DataPositionTy::POS_V]);
+ }
+ /// Get 'r' part of the associated expression/statement.
+ Expr *getR() {
+ return cast_or_null<Expr>(Data->getChildren()[DataPositionTy::POS_R]);
+ }
+ const Expr *getR() const {
+ return cast_or_null<Expr>(Data->getChildren()[DataPositionTy::POS_R]);
}
/// Get 'expr' part of the associated expression/statement.
- Expr *getExpr() { return cast_or_null<Expr>(Data->getChildren()[3]); }
+ Expr *getExpr() {
+ return cast_or_null<Expr>(Data->getChildren()[DataPositionTy::POS_E]);
+ }
const Expr *getExpr() const {
- return cast_or_null<Expr>(Data->getChildren()[3]);
+ return cast_or_null<Expr>(Data->getChildren()[DataPositionTy::POS_E]);
+ }
+ /// Get 'd' part of the associated expression/statement.
+ Expr *getD() {
+ return cast_or_null<Expr>(Data->getChildren()[DataPositionTy::POS_D]);
+ }
+ Expr *getD() const {
+ return cast_or_null<Expr>(Data->getChildren()[DataPositionTy::POS_D]);
+ }
+ /// Get the 'cond' part of the source atomic expression.
+ Expr *getCondExpr() {
+ return cast_or_null<Expr>(Data->getChildren()[DataPositionTy::POS_Cond]);
+ }
+ Expr *getCondExpr() const {
+ return cast_or_null<Expr>(Data->getChildren()[DataPositionTy::POS_Cond]);
}
static bool classof(const Stmt *T) {
@@ -3651,6 +3931,82 @@ public:
}
};
+/// This represents '#pragma omp masked taskloop' directive.
+///
+/// \code
+/// #pragma omp masked taskloop private(a,b) grainsize(val) num_tasks(num)
+/// \endcode
+/// In this example directive '#pragma omp masked taskloop' has clauses
+/// 'private' with the variables 'a' and 'b', 'grainsize' with expression 'val'
+/// and 'num_tasks' with expression 'num'.
+///
+class OMPMaskedTaskLoopDirective final : public OMPLoopDirective {
+ friend class ASTStmtReader;
+ friend class OMPExecutableDirective;
+ /// true if the construct has inner cancel directive.
+ bool HasCancel = false;
+
+ /// Build directive with the given start and end location.
+ ///
+ /// \param StartLoc Starting location of the directive kind.
+ /// \param EndLoc Ending location of the directive.
+ /// \param CollapsedNum Number of collapsed nested loops.
+ ///
+ OMPMaskedTaskLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned CollapsedNum)
+ : OMPLoopDirective(OMPMaskedTaskLoopDirectiveClass,
+ llvm::omp::OMPD_masked_taskloop, StartLoc, EndLoc,
+ CollapsedNum) {}
+
+ /// Build an empty directive.
+ ///
+ /// \param CollapsedNum Number of collapsed nested loops.
+ ///
+ explicit OMPMaskedTaskLoopDirective(unsigned CollapsedNum)
+ : OMPLoopDirective(OMPMaskedTaskLoopDirectiveClass,
+ llvm::omp::OMPD_masked_taskloop, SourceLocation(),
+ SourceLocation(), CollapsedNum) {}
+
+ /// Set cancel state.
+ void setHasCancel(bool Has) { HasCancel = Has; }
+
+public:
+ /// Creates directive with a list of \a Clauses.
+ ///
+ /// \param C AST context.
+ /// \param StartLoc Starting location of the directive kind.
+ /// \param EndLoc Ending Location of the directive.
+ /// \param CollapsedNum Number of collapsed loops.
+ /// \param Clauses List of clauses.
+ /// \param AssociatedStmt Statement, associated with the directive.
+ /// \param Exprs Helper expressions for CodeGen.
+ /// \param HasCancel true if this directive has inner cancel directive.
+ ///
+ static OMPMaskedTaskLoopDirective *
+ Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
+ Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel);
+
+ /// Creates an empty directive with the place
+ /// for \a NumClauses clauses.
+ ///
+ /// \param C AST context.
+ /// \param CollapsedNum Number of collapsed nested loops.
+ /// \param NumClauses Number of clauses.
+ ///
+ static OMPMaskedTaskLoopDirective *CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ unsigned CollapsedNum,
+ EmptyShell);
+
+ /// Return true if current directive has inner cancel directive.
+ bool hasCancel() const { return HasCancel; }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == OMPMaskedTaskLoopDirectiveClass;
+ }
+};
+
/// This represents '#pragma omp master taskloop simd' directive.
///
/// \code
@@ -3716,6 +4072,71 @@ public:
}
};
+/// This represents '#pragma omp masked taskloop simd' directive.
+///
+/// \code
+/// #pragma omp masked taskloop simd private(a,b) grainsize(val) num_tasks(num)
+/// \endcode
+/// In this example directive '#pragma omp masked taskloop simd' has clauses
+/// 'private' with the variables 'a' and 'b', 'grainsize' with expression 'val'
+/// and 'num_tasks' with expression 'num'.
+///
+class OMPMaskedTaskLoopSimdDirective final : public OMPLoopDirective {
+ friend class ASTStmtReader;
+ friend class OMPExecutableDirective;
+ /// Build directive with the given start and end location.
+ ///
+ /// \param StartLoc Starting location of the directive kind.
+ /// \param EndLoc Ending location of the directive.
+ /// \param CollapsedNum Number of collapsed nested loops.
+ ///
+ OMPMaskedTaskLoopSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned CollapsedNum)
+ : OMPLoopDirective(OMPMaskedTaskLoopSimdDirectiveClass,
+ llvm::omp::OMPD_masked_taskloop_simd, StartLoc, EndLoc,
+ CollapsedNum) {}
+
+ /// Build an empty directive.
+ ///
+ /// \param CollapsedNum Number of collapsed nested loops.
+ ///
+ explicit OMPMaskedTaskLoopSimdDirective(unsigned CollapsedNum)
+ : OMPLoopDirective(OMPMaskedTaskLoopSimdDirectiveClass,
+ llvm::omp::OMPD_masked_taskloop_simd, SourceLocation(),
+ SourceLocation(), CollapsedNum) {}
+
+public:
+ /// Creates directive with a list of \p Clauses.
+ ///
+ /// \param C AST context.
+ /// \param StartLoc Starting location of the directive kind.
+ /// \param EndLoc Ending Location of the directive.
+ /// \param CollapsedNum Number of collapsed loops.
+ /// \param Clauses List of clauses.
+ /// \param AssociatedStmt Statement, associated with the directive.
+ /// \param Exprs Helper expressions for CodeGen.
+ ///
+ static OMPMaskedTaskLoopSimdDirective *
+ Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
+ Stmt *AssociatedStmt, const HelperExprs &Exprs);
+
+ /// Creates an empty directive with the place for \p NumClauses clauses.
+ ///
+ /// \param C AST context.
+ /// \param CollapsedNum Number of collapsed nested loops.
+ /// \param NumClauses Number of clauses.
+ ///
+ static OMPMaskedTaskLoopSimdDirective *CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ unsigned CollapsedNum,
+ EmptyShell);
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == OMPMaskedTaskLoopSimdDirectiveClass;
+ }
+};
+
/// This represents '#pragma omp parallel master taskloop' directive.
///
/// \code
@@ -3794,6 +4215,84 @@ public:
}
};
+/// This represents '#pragma omp parallel masked taskloop' directive.
+///
+/// \code
+/// #pragma omp parallel masked taskloop private(a,b) grainsize(val)
+/// num_tasks(num)
+/// \endcode
+/// In this example directive '#pragma omp parallel masked taskloop' has clauses
+/// 'private' with the variables 'a' and 'b', 'grainsize' with expression 'val'
+/// and 'num_tasks' with expression 'num'.
+///
+class OMPParallelMaskedTaskLoopDirective final : public OMPLoopDirective {
+ friend class ASTStmtReader;
+ friend class OMPExecutableDirective;
+ /// true if the construct has inner cancel directive.
+ bool HasCancel = false;
+
+ /// Build directive with the given start and end location.
+ ///
+ /// \param StartLoc Starting location of the directive kind.
+ /// \param EndLoc Ending location of the directive.
+ /// \param CollapsedNum Number of collapsed nested loops.
+ ///
+ OMPParallelMaskedTaskLoopDirective(SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ unsigned CollapsedNum)
+ : OMPLoopDirective(OMPParallelMaskedTaskLoopDirectiveClass,
+ llvm::omp::OMPD_parallel_masked_taskloop, StartLoc,
+ EndLoc, CollapsedNum) {}
+
+ /// Build an empty directive.
+ ///
+ /// \param CollapsedNum Number of collapsed nested loops.
+ ///
+ explicit OMPParallelMaskedTaskLoopDirective(unsigned CollapsedNum)
+ : OMPLoopDirective(OMPParallelMaskedTaskLoopDirectiveClass,
+ llvm::omp::OMPD_parallel_masked_taskloop,
+ SourceLocation(), SourceLocation(), CollapsedNum) {}
+
+ /// Set cancel state.
+ void setHasCancel(bool Has) { HasCancel = Has; }
+
+public:
+ /// Creates directive with a list of \a Clauses.
+ ///
+ /// \param C AST context.
+ /// \param StartLoc Starting location of the directive kind.
+ /// \param EndLoc Ending Location of the directive.
+ /// \param CollapsedNum Number of collapsed loops.
+ /// \param Clauses List of clauses.
+ /// \param AssociatedStmt Statement, associated with the directive.
+ /// \param Exprs Helper expressions for CodeGen.
+ /// \param HasCancel true if this directive has inner cancel directive.
+ ///
+ static OMPParallelMaskedTaskLoopDirective *
+ Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
+ Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel);
+
+ /// Creates an empty directive with the place
+ /// for \a NumClauses clauses.
+ ///
+ /// \param C AST context.
+ /// \param CollapsedNum Number of collapsed nested loops.
+ /// \param NumClauses Number of clauses.
+ ///
+ static OMPParallelMaskedTaskLoopDirective *CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ unsigned CollapsedNum,
+ EmptyShell);
+
+ /// Return true if current directive has inner cancel directive.
+ bool hasCancel() const { return HasCancel; }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == OMPParallelMaskedTaskLoopDirectiveClass;
+ }
+};
+
/// This represents '#pragma omp parallel master taskloop simd' directive.
///
/// \code
@@ -3861,6 +4360,73 @@ public:
}
};
+/// This represents '#pragma omp parallel masked taskloop simd' directive.
+///
+/// \code
+/// #pragma omp parallel masked taskloop simd private(a,b) grainsize(val)
+/// num_tasks(num)
+/// \endcode
+/// In this example directive '#pragma omp parallel masked taskloop simd' has
+/// clauses 'private' with the variables 'a' and 'b', 'grainsize' with
+/// expression 'val' and 'num_tasks' with expression 'num'.
+///
+class OMPParallelMaskedTaskLoopSimdDirective final : public OMPLoopDirective {
+ friend class ASTStmtReader;
+ friend class OMPExecutableDirective;
+ /// Build directive with the given start and end location.
+ ///
+ /// \param StartLoc Starting location of the directive kind.
+ /// \param EndLoc Ending location of the directive.
+ /// \param CollapsedNum Number of collapsed nested loops.
+ ///
+ OMPParallelMaskedTaskLoopSimdDirective(SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ unsigned CollapsedNum)
+ : OMPLoopDirective(OMPParallelMaskedTaskLoopSimdDirectiveClass,
+ llvm::omp::OMPD_parallel_masked_taskloop_simd,
+ StartLoc, EndLoc, CollapsedNum) {}
+
+ /// Build an empty directive.
+ ///
+ /// \param CollapsedNum Number of collapsed nested loops.
+ ///
+ explicit OMPParallelMaskedTaskLoopSimdDirective(unsigned CollapsedNum)
+ : OMPLoopDirective(OMPParallelMaskedTaskLoopSimdDirectiveClass,
+ llvm::omp::OMPD_parallel_masked_taskloop_simd,
+ SourceLocation(), SourceLocation(), CollapsedNum) {}
+
+public:
+ /// Creates directive with a list of \p Clauses.
+ ///
+ /// \param C AST context.
+ /// \param StartLoc Starting location of the directive kind.
+ /// \param EndLoc Ending Location of the directive.
+ /// \param CollapsedNum Number of collapsed loops.
+ /// \param Clauses List of clauses.
+ /// \param AssociatedStmt Statement, associated with the directive.
+ /// \param Exprs Helper expressions for CodeGen.
+ ///
+ static OMPParallelMaskedTaskLoopSimdDirective *
+ Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
+ Stmt *AssociatedStmt, const HelperExprs &Exprs);
+
+ /// Creates an empty directive with the place
+ /// for \a NumClauses clauses.
+ ///
+ /// \param C AST context.
+ /// \param CollapsedNum Number of collapsed nested loops.
+ /// \param NumClauses Number of clauses.
+ ///
+ static OMPParallelMaskedTaskLoopSimdDirective *
+ CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
+ EmptyShell);
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == OMPParallelMaskedTaskLoopSimdDirectiveClass;
+ }
+};
+
/// This represents '#pragma omp distribute' directive.
///
/// \code
@@ -3908,7 +4474,8 @@ public:
static OMPDistributeDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
- Stmt *AssociatedStmt, const HelperExprs &Exprs);
+ Stmt *AssociatedStmt, const HelperExprs &Exprs,
+ OpenMPDirectiveKind ParamPrevMappedDirective);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
@@ -4992,7 +5559,7 @@ public:
};
/// This represents the '#pragma omp tile' loop transformation directive.
-class OMPTileDirective final : public OMPLoopBasedDirective {
+class OMPTileDirective final : public OMPLoopTransformationDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
@@ -5004,8 +5571,11 @@ class OMPTileDirective final : public OMPLoopBasedDirective {
explicit OMPTileDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumLoops)
- : OMPLoopBasedDirective(OMPTileDirectiveClass, llvm::omp::OMPD_tile,
- StartLoc, EndLoc, NumLoops) {}
+ : OMPLoopTransformationDirective(OMPTileDirectiveClass,
+ llvm::omp::OMPD_tile, StartLoc, EndLoc,
+ NumLoops) {
+ setNumGeneratedLoops(3 * NumLoops);
+ }
void setPreInits(Stmt *PreInits) {
Data->getChildren()[PreInitsOffset] = PreInits;
@@ -5042,8 +5612,6 @@ public:
static OMPTileDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
unsigned NumLoops);
- unsigned getNumAssociatedLoops() const { return getLoopsNumber(); }
-
/// Gets/sets the associated loops after tiling.
///
/// This is in de-sugared format stored as a CompoundStmt.
@@ -5073,7 +5641,7 @@ public:
/// #pragma omp unroll
/// for (int i = 0; i < 64; ++i)
/// \endcode
-class OMPUnrollDirective final : public OMPLoopBasedDirective {
+class OMPUnrollDirective final : public OMPLoopTransformationDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
@@ -5084,8 +5652,9 @@ class OMPUnrollDirective final : public OMPLoopBasedDirective {
};
explicit OMPUnrollDirective(SourceLocation StartLoc, SourceLocation EndLoc)
- : OMPLoopBasedDirective(OMPUnrollDirectiveClass, llvm::omp::OMPD_unroll,
- StartLoc, EndLoc, 1) {}
+ : OMPLoopTransformationDirective(OMPUnrollDirectiveClass,
+ llvm::omp::OMPD_unroll, StartLoc, EndLoc,
+ 1) {}
/// Set the pre-init statements.
void setPreInits(Stmt *PreInits) {
@@ -5111,7 +5680,7 @@ public:
static OMPUnrollDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
- Stmt *TransformedStmt, Stmt *PreInits);
+ unsigned NumGeneratedLoops, Stmt *TransformedStmt, Stmt *PreInits);
/// Build an empty '#pragma omp unroll' AST node for deserialization.
///
@@ -5360,6 +5929,412 @@ public:
}
};
+/// This represents '#pragma omp metadirective' directive.
+///
+/// \code
+/// #pragma omp metadirective when(user={condition(N>10)}: parallel for)
+/// \endcode
+/// In this example directive '#pragma omp metadirective' has clauses 'when'
+/// with a dynamic user condition to check if a variable 'N > 10'
+///
+class OMPMetaDirective final : public OMPExecutableDirective {
+ friend class ASTStmtReader;
+ friend class OMPExecutableDirective;
+ Stmt *IfStmt;
+
+ OMPMetaDirective(SourceLocation StartLoc, SourceLocation EndLoc)
+ : OMPExecutableDirective(OMPMetaDirectiveClass,
+ llvm::omp::OMPD_metadirective, StartLoc,
+ EndLoc) {}
+ explicit OMPMetaDirective()
+ : OMPExecutableDirective(OMPMetaDirectiveClass,
+ llvm::omp::OMPD_metadirective, SourceLocation(),
+ SourceLocation()) {}
+
+ void setIfStmt(Stmt *S) { IfStmt = S; }
+
+public:
+ static OMPMetaDirective *Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses,
+ Stmt *AssociatedStmt, Stmt *IfStmt);
+ static OMPMetaDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
+ EmptyShell);
+ Stmt *getIfStmt() const { return IfStmt; }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == OMPMetaDirectiveClass;
+ }
+};
+
+/// This represents '#pragma omp loop' directive.
+///
+/// \code
+/// #pragma omp loop private(a,b) binding(parallel) order(concurrent)
+/// \endcode
+/// In this example directive '#pragma omp loop' has
+/// clauses 'private' with the variables 'a' and 'b', 'binding' with
+/// modifier 'parallel' and 'order(concurrent).
+///
+class OMPGenericLoopDirective final : public OMPLoopDirective {
+ friend class ASTStmtReader;
+ friend class OMPExecutableDirective;
+ /// Build directive with the given start and end location.
+ ///
+ /// \param StartLoc Starting location of the directive kind.
+ /// \param EndLoc Ending location of the directive.
+ /// \param CollapsedNum Number of collapsed nested loops.
+ ///
+ OMPGenericLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned CollapsedNum)
+ : OMPLoopDirective(OMPGenericLoopDirectiveClass, llvm::omp::OMPD_loop,
+ StartLoc, EndLoc, CollapsedNum) {}
+
+ /// Build an empty directive.
+ ///
+ /// \param CollapsedNum Number of collapsed nested loops.
+ ///
+ explicit OMPGenericLoopDirective(unsigned CollapsedNum)
+ : OMPLoopDirective(OMPGenericLoopDirectiveClass, llvm::omp::OMPD_loop,
+ SourceLocation(), SourceLocation(), CollapsedNum) {}
+
+public:
+ /// Creates directive with a list of \p Clauses.
+ ///
+ /// \param C AST context.
+ /// \param StartLoc Starting location of the directive kind.
+ /// \param EndLoc Ending Location of the directive.
+ /// \param CollapsedNum Number of collapsed loops.
+ /// \param Clauses List of clauses.
+ /// \param AssociatedStmt Statement, associated with the directive.
+ /// \param Exprs Helper expressions for CodeGen.
+ ///
+ static OMPGenericLoopDirective *
+ Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
+ Stmt *AssociatedStmt, const HelperExprs &Exprs);
+
+ /// Creates an empty directive with a place for \a NumClauses clauses.
+ ///
+ /// \param C AST context.
+ /// \param NumClauses Number of clauses.
+ /// \param CollapsedNum Number of collapsed nested loops.
+ ///
+ static OMPGenericLoopDirective *CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ unsigned CollapsedNum,
+ EmptyShell);
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == OMPGenericLoopDirectiveClass;
+ }
+};
+
+/// This represents '#pragma omp teams loop' directive.
+///
+/// \code
+/// #pragma omp teams loop private(a,b) order(concurrent)
+/// \endcode
+/// In this example directive '#pragma omp teams loop' has
+/// clauses 'private' with the variables 'a' and 'b', and order(concurrent).
+///
+class OMPTeamsGenericLoopDirective final : public OMPLoopDirective {
+ friend class ASTStmtReader;
+ friend class OMPExecutableDirective;
+ /// Build directive with the given start and end location.
+ ///
+ /// \param StartLoc Starting location of the directive kind.
+ /// \param EndLoc Ending location of the directive.
+ /// \param CollapsedNum Number of collapsed nested loops.
+ ///
+ OMPTeamsGenericLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned CollapsedNum)
+ : OMPLoopDirective(OMPTeamsGenericLoopDirectiveClass,
+ llvm::omp::OMPD_teams_loop, StartLoc, EndLoc,
+ CollapsedNum) {}
+
+ /// Build an empty directive.
+ ///
+ /// \param CollapsedNum Number of collapsed nested loops.
+ ///
+ explicit OMPTeamsGenericLoopDirective(unsigned CollapsedNum)
+ : OMPLoopDirective(OMPTeamsGenericLoopDirectiveClass,
+ llvm::omp::OMPD_teams_loop, SourceLocation(),
+ SourceLocation(), CollapsedNum) {}
+
+public:
+ /// Creates directive with a list of \p Clauses.
+ ///
+ /// \param C AST context.
+ /// \param StartLoc Starting location of the directive kind.
+ /// \param EndLoc Ending Location of the directive.
+ /// \param CollapsedNum Number of collapsed loops.
+ /// \param Clauses List of clauses.
+ /// \param AssociatedStmt Statement, associated with the directive.
+ /// \param Exprs Helper expressions for CodeGen.
+ ///
+ static OMPTeamsGenericLoopDirective *
+ Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
+ Stmt *AssociatedStmt, const HelperExprs &Exprs);
+
+ /// Creates an empty directive with the place
+ /// for \a NumClauses clauses.
+ ///
+ /// \param C AST context.
+ /// \param CollapsedNum Number of collapsed nested loops.
+ /// \param NumClauses Number of clauses.
+ ///
+ static OMPTeamsGenericLoopDirective *CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ unsigned CollapsedNum,
+ EmptyShell);
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == OMPTeamsGenericLoopDirectiveClass;
+ }
+};
+
+/// This represents '#pragma omp target teams loop' directive.
+///
+/// \code
+/// #pragma omp target teams loop private(a,b) order(concurrent)
+/// \endcode
+/// In this example directive '#pragma omp target teams loop' has
+/// clauses 'private' with the variables 'a' and 'b', and order(concurrent).
+///
+class OMPTargetTeamsGenericLoopDirective final : public OMPLoopDirective {
+ friend class ASTStmtReader;
+ friend class OMPExecutableDirective;
+ /// Build directive with the given start and end location.
+ ///
+ /// \param StartLoc Starting location of the directive kind.
+ /// \param EndLoc Ending location of the directive.
+ /// \param CollapsedNum Number of collapsed nested loops.
+ ///
+ OMPTargetTeamsGenericLoopDirective(SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ unsigned CollapsedNum)
+ : OMPLoopDirective(OMPTargetTeamsGenericLoopDirectiveClass,
+ llvm::omp::OMPD_target_teams_loop, StartLoc, EndLoc,
+ CollapsedNum) {}
+
+ /// Build an empty directive.
+ ///
+ /// \param CollapsedNum Number of collapsed nested loops.
+ ///
+ explicit OMPTargetTeamsGenericLoopDirective(unsigned CollapsedNum)
+ : OMPLoopDirective(OMPTargetTeamsGenericLoopDirectiveClass,
+ llvm::omp::OMPD_target_teams_loop, SourceLocation(),
+ SourceLocation(), CollapsedNum) {}
+
+public:
+ /// Creates directive with a list of \p Clauses.
+ ///
+ /// \param C AST context.
+ /// \param StartLoc Starting location of the directive kind.
+ /// \param EndLoc Ending Location of the directive.
+ /// \param CollapsedNum Number of collapsed loops.
+ /// \param Clauses List of clauses.
+ /// \param AssociatedStmt Statement, associated with the directive.
+ /// \param Exprs Helper expressions for CodeGen.
+ ///
+ static OMPTargetTeamsGenericLoopDirective *
+ Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
+ Stmt *AssociatedStmt, const HelperExprs &Exprs);
+
+ /// Creates an empty directive with the place
+ /// for \a NumClauses clauses.
+ ///
+ /// \param C AST context.
+ /// \param CollapsedNum Number of collapsed nested loops.
+ /// \param NumClauses Number of clauses.
+ ///
+ static OMPTargetTeamsGenericLoopDirective *CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ unsigned CollapsedNum,
+ EmptyShell);
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == OMPTargetTeamsGenericLoopDirectiveClass;
+ }
+};
+
+/// This represents '#pragma omp parallel loop' directive.
+///
+/// \code
+/// #pragma omp parallel loop private(a,b) order(concurrent)
+/// \endcode
+/// In this example directive '#pragma omp parallel loop' has
+/// clauses 'private' with the variables 'a' and 'b', and order(concurrent).
+///
+class OMPParallelGenericLoopDirective final : public OMPLoopDirective {
+ friend class ASTStmtReader;
+ friend class OMPExecutableDirective;
+ /// Build directive with the given start and end location.
+ ///
+ /// \param StartLoc Starting location of the directive kind.
+ /// \param EndLoc Ending location of the directive.
+ /// \param CollapsedNum Number of collapsed nested loops.
+ ///
+ OMPParallelGenericLoopDirective(SourceLocation StartLoc,
+ SourceLocation EndLoc, unsigned CollapsedNum)
+ : OMPLoopDirective(OMPParallelGenericLoopDirectiveClass,
+ llvm::omp::OMPD_parallel_loop, StartLoc, EndLoc,
+ CollapsedNum) {}
+
+ /// Build an empty directive.
+ ///
+ /// \param CollapsedNum Number of collapsed nested loops.
+ ///
+ explicit OMPParallelGenericLoopDirective(unsigned CollapsedNum)
+ : OMPLoopDirective(OMPParallelGenericLoopDirectiveClass,
+ llvm::omp::OMPD_parallel_loop, SourceLocation(),
+ SourceLocation(), CollapsedNum) {}
+
+public:
+ /// Creates directive with a list of \p Clauses.
+ ///
+ /// \param C AST context.
+ /// \param StartLoc Starting location of the directive kind.
+ /// \param EndLoc Ending Location of the directive.
+ /// \param CollapsedNum Number of collapsed loops.
+ /// \param Clauses List of clauses.
+ /// \param AssociatedStmt Statement, associated with the directive.
+ /// \param Exprs Helper expressions for CodeGen.
+ ///
+ static OMPParallelGenericLoopDirective *
+ Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
+ Stmt *AssociatedStmt, const HelperExprs &Exprs);
+
+ /// Creates an empty directive with the place
+ /// for \a NumClauses clauses.
+ ///
+ /// \param C AST context.
+ /// \param CollapsedNum Number of collapsed nested loops.
+ /// \param NumClauses Number of clauses.
+ ///
+ static OMPParallelGenericLoopDirective *CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ unsigned CollapsedNum,
+ EmptyShell);
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == OMPParallelGenericLoopDirectiveClass;
+ }
+};
+
+/// This represents '#pragma omp target parallel loop' directive.
+///
+/// \code
+/// #pragma omp target parallel loop private(a,b) order(concurrent)
+/// \endcode
+/// In this example directive '#pragma omp target parallel loop' has
+/// clauses 'private' with the variables 'a' and 'b', and order(concurrent).
+///
+class OMPTargetParallelGenericLoopDirective final : public OMPLoopDirective {
+ friend class ASTStmtReader;
+ friend class OMPExecutableDirective;
+ /// Build directive with the given start and end location.
+ ///
+ /// \param StartLoc Starting location of the directive kind.
+ /// \param EndLoc Ending location of the directive.
+ /// \param CollapsedNum Number of collapsed nested loops.
+ ///
+ OMPTargetParallelGenericLoopDirective(SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ unsigned CollapsedNum)
+ : OMPLoopDirective(OMPTargetParallelGenericLoopDirectiveClass,
+ llvm::omp::OMPD_target_parallel_loop, StartLoc, EndLoc,
+ CollapsedNum) {}
+
+ /// Build an empty directive.
+ ///
+ /// \param CollapsedNum Number of collapsed nested loops.
+ ///
+ explicit OMPTargetParallelGenericLoopDirective(unsigned CollapsedNum)
+ : OMPLoopDirective(OMPTargetParallelGenericLoopDirectiveClass,
+ llvm::omp::OMPD_target_parallel_loop, SourceLocation(),
+ SourceLocation(), CollapsedNum) {}
+
+public:
+ /// Creates directive with a list of \p Clauses.
+ ///
+ /// \param C AST context.
+ /// \param StartLoc Starting location of the directive kind.
+ /// \param EndLoc Ending Location of the directive.
+ /// \param CollapsedNum Number of collapsed loops.
+ /// \param Clauses List of clauses.
+ /// \param AssociatedStmt Statement, associated with the directive.
+ /// \param Exprs Helper expressions for CodeGen.
+ ///
+ static OMPTargetParallelGenericLoopDirective *
+ Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
+ Stmt *AssociatedStmt, const HelperExprs &Exprs);
+
+ /// Creates an empty directive with the place
+ /// for \a NumClauses clauses.
+ ///
+ /// \param C AST context.
+ /// \param CollapsedNum Number of collapsed nested loops.
+ /// \param NumClauses Number of clauses.
+ ///
+ static OMPTargetParallelGenericLoopDirective *
+ CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
+ EmptyShell);
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == OMPTargetParallelGenericLoopDirectiveClass;
+ }
+};
+
+/// This represents '#pragma omp error' directive.
+///
+/// \code
+/// #pragma omp error
+/// \endcode
+class OMPErrorDirective final : public OMPExecutableDirective {
+ friend class ASTStmtReader;
+ friend class OMPExecutableDirective;
+ /// Build directive with the given start and end location.
+ ///
+ /// \param StartLoc Starting location of the directive kind.
+ /// \param EndLoc Ending location of the directive.
+ ///
+ OMPErrorDirective(SourceLocation StartLoc, SourceLocation EndLoc)
+ : OMPExecutableDirective(OMPErrorDirectiveClass, llvm::omp::OMPD_error,
+ StartLoc, EndLoc) {}
+ /// Build an empty directive.
+ ///
+ explicit OMPErrorDirective()
+ : OMPExecutableDirective(OMPErrorDirectiveClass, llvm::omp::OMPD_error,
+ SourceLocation(), SourceLocation()) {}
+
+public:
+ ///
+ /// \param C AST context.
+ /// \param StartLoc Starting location of the directive kind.
+ /// \param EndLoc Ending Location of the directive.
+ /// \param Clauses List of clauses.
+ ///
+ static OMPErrorDirective *Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses);
+
+ /// Creates an empty directive.
+ ///
+ /// \param C AST context.
+ ///
+ static OMPErrorDirective *CreateEmpty(const ASTContext &C,
+ unsigned NumClauses, EmptyShell);
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == OMPErrorDirectiveClass;
+ }
+};
} // end namespace clang
#endif
diff --git a/contrib/llvm-project/clang/include/clang/AST/TemplateArgumentVisitor.h b/contrib/llvm-project/clang/include/clang/AST/TemplateArgumentVisitor.h
index 190aa97adf45..cf0d32201580 100644
--- a/contrib/llvm-project/clang/include/clang/AST/TemplateArgumentVisitor.h
+++ b/contrib/llvm-project/clang/include/clang/AST/TemplateArgumentVisitor.h
@@ -37,6 +37,7 @@ public:
DISPATCH(Declaration);
DISPATCH(NullPtr);
DISPATCH(Integral);
+ DISPATCH(StructuralValue);
DISPATCH(Template);
DISPATCH(TemplateExpansion);
DISPATCH(Expression);
@@ -59,6 +60,7 @@ public:
VISIT_METHOD(Declaration);
VISIT_METHOD(NullPtr);
VISIT_METHOD(Integral);
+ VISIT_METHOD(StructuralValue);
VISIT_METHOD(Template);
VISIT_METHOD(TemplateExpansion);
VISIT_METHOD(Expression);
diff --git a/contrib/llvm-project/clang/include/clang/AST/TemplateBase.h b/contrib/llvm-project/clang/include/clang/AST/TemplateBase.h
index fa27a12cfbb9..fea2c8ccfee6 100644
--- a/contrib/llvm-project/clang/include/clang/AST/TemplateBase.h
+++ b/contrib/llvm-project/clang/include/clang/AST/TemplateBase.h
@@ -23,14 +23,13 @@
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/None.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/TrailingObjects.h"
#include <cassert>
#include <cstddef>
#include <cstdint>
+#include <optional>
namespace llvm {
@@ -51,8 +50,8 @@ template <> struct PointerLikeTypeTraits<clang::Expr *> {
namespace clang {
+class APValue;
class ASTContext;
-class DiagnosticBuilder;
class Expr;
struct PrintingPolicy;
class TypeSourceInfo;
@@ -82,6 +81,13 @@ public:
/// that was provided for an integral non-type template parameter.
Integral,
+ /// The template argument is a non-type template argument that can't be
+ /// represented by the special-case Declaration, NullPtr, or Integral
+ /// forms. These values are only ever produced by constant evaluation,
+ /// so cannot be dependent.
+ /// TODO: merge Declaration, NullPtr and Integral into this?
+ StructuralValue,
+
/// The template argument is a template name that was provided for a
/// template template parameter.
Template,
@@ -105,16 +111,23 @@ private:
/// The kind of template argument we're storing.
struct DA {
- unsigned Kind;
+ LLVM_PREFERRED_TYPE(ArgKind)
+ unsigned Kind : 31;
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned IsDefaulted : 1;
void *QT;
ValueDecl *D;
};
struct I {
- unsigned Kind;
+ LLVM_PREFERRED_TYPE(ArgKind)
+ unsigned Kind : 31;
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned IsDefaulted : 1;
// We store a decomposed APSInt with the data allocated by ASTContext if
// BitWidth > 64. The memory may be shared between multiple
// TemplateArgument instances.
unsigned BitWidth : 31;
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsUnsigned : 1;
union {
/// Used to store the <= 64 bits integer value.
@@ -125,51 +138,77 @@ private:
};
void *Type;
};
+ struct V {
+ LLVM_PREFERRED_TYPE(ArgKind)
+ unsigned Kind : 31;
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned IsDefaulted : 1;
+ APValue *Value;
+ void *Type;
+ };
struct A {
- unsigned Kind;
+ LLVM_PREFERRED_TYPE(ArgKind)
+ unsigned Kind : 31;
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned IsDefaulted : 1;
unsigned NumArgs;
const TemplateArgument *Args;
};
struct TA {
- unsigned Kind;
+ LLVM_PREFERRED_TYPE(ArgKind)
+ unsigned Kind : 31;
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned IsDefaulted : 1;
unsigned NumExpansions;
void *Name;
};
struct TV {
- unsigned Kind;
+ LLVM_PREFERRED_TYPE(ArgKind)
+ unsigned Kind : 31;
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned IsDefaulted : 1;
uintptr_t V;
};
union {
struct DA DeclArg;
struct I Integer;
+ struct V Value;
struct A Args;
struct TA TemplateArg;
struct TV TypeOrValue;
};
+ void initFromType(QualType T, bool IsNullPtr, bool IsDefaulted);
+ void initFromDeclaration(ValueDecl *D, QualType QT, bool IsDefaulted);
+ void initFromIntegral(const ASTContext &Ctx, const llvm::APSInt &Value,
+ QualType Type, bool IsDefaulted);
+ void initFromStructural(const ASTContext &Ctx, QualType Type,
+ const APValue &V, bool IsDefaulted);
+
public:
/// Construct an empty, invalid template argument.
- constexpr TemplateArgument() : TypeOrValue({Null, 0}) {}
+ constexpr TemplateArgument() : TypeOrValue({Null, 0, /* IsDefaulted */ 0}) {}
/// Construct a template type argument.
- TemplateArgument(QualType T, bool isNullPtr = false) {
- TypeOrValue.Kind = isNullPtr ? NullPtr : Type;
- TypeOrValue.V = reinterpret_cast<uintptr_t>(T.getAsOpaquePtr());
+ TemplateArgument(QualType T, bool isNullPtr = false,
+ bool IsDefaulted = false) {
+ initFromType(T, isNullPtr, IsDefaulted);
}
- /// Construct a template argument that refers to a
- /// declaration, which is either an external declaration or a
- /// template declaration.
- TemplateArgument(ValueDecl *D, QualType QT) {
- assert(D && "Expected decl");
- DeclArg.Kind = Declaration;
- DeclArg.QT = QT.getAsOpaquePtr();
- DeclArg.D = D;
+ /// Construct a template argument that refers to a (non-dependent)
+ /// declaration.
+ TemplateArgument(ValueDecl *D, QualType QT, bool IsDefaulted = false) {
+ initFromDeclaration(D, QT, IsDefaulted);
}
/// Construct an integral constant template argument. The memory to
/// store the value is allocated with Ctx.
- TemplateArgument(ASTContext &Ctx, const llvm::APSInt &Value, QualType Type);
+ TemplateArgument(const ASTContext &Ctx, const llvm::APSInt &Value,
+ QualType Type, bool IsDefaulted = false);
+
+ /// Construct a template argument from an arbitrary constant value.
+ TemplateArgument(const ASTContext &Ctx, QualType Type, const APValue &Value,
+ bool IsDefaulted = false);
/// Construct an integral constant template argument with the same
/// value as Other but a different type.
@@ -186,8 +225,12 @@ public:
/// is taken.
///
/// \param Name The template name.
- TemplateArgument(TemplateName Name) {
+ ///
+ /// \param IsDefaulted If 'true', implies that this TemplateArgument
+ /// corresponds to a default template parameter
+ TemplateArgument(TemplateName Name, bool IsDefaulted = false) {
TemplateArg.Kind = Template;
+ TemplateArg.IsDefaulted = IsDefaulted;
TemplateArg.Name = Name.getAsVoidPointer();
TemplateArg.NumExpansions = 0;
}
@@ -203,8 +246,13 @@ public:
///
/// \param NumExpansions The number of expansions that will be generated by
/// instantiating
- TemplateArgument(TemplateName Name, Optional<unsigned> NumExpansions) {
+ ///
+ /// \param IsDefaulted If 'true', implies that this TemplateArgument
+ /// corresponds to a default template parameter
+ TemplateArgument(TemplateName Name, std::optional<unsigned> NumExpansions,
+ bool IsDefaulted = false) {
TemplateArg.Kind = TemplateExpansion;
+ TemplateArg.IsDefaulted = IsDefaulted;
TemplateArg.Name = Name.getAsVoidPointer();
if (NumExpansions)
TemplateArg.NumExpansions = *NumExpansions + 1;
@@ -217,8 +265,9 @@ public:
/// This form of template argument only occurs in template argument
/// lists used for dependent types and for expression; it will not
/// occur in a non-dependent, canonical template argument list.
- TemplateArgument(Expr *E) {
+ TemplateArgument(Expr *E, bool IsDefaulted = false) {
TypeOrValue.Kind = Expression;
+ TypeOrValue.IsDefaulted = IsDefaulted;
TypeOrValue.V = reinterpret_cast<uintptr_t>(E);
}
@@ -228,13 +277,14 @@ public:
/// outlives the TemplateArgument itself.
explicit TemplateArgument(ArrayRef<TemplateArgument> Args) {
this->Args.Kind = Pack;
+ this->Args.IsDefaulted = false;
this->Args.Args = Args.data();
this->Args.NumArgs = Args.size();
}
- TemplateArgument(TemplateName, bool) = delete;
-
- static TemplateArgument getEmptyPack() { return TemplateArgument(None); }
+ static TemplateArgument getEmptyPack() {
+ return TemplateArgument(std::nullopt);
+ }
/// Create a new template argument pack by copying the given set of
/// template arguments.
@@ -268,7 +318,7 @@ public:
/// Retrieve the type for a type template argument.
QualType getAsType() const {
assert(getKind() == Type && "Unexpected kind");
- return QualType::getFromOpaquePtr(reinterpret_cast<void*>(TypeOrValue.V));
+ return QualType::getFromOpaquePtr(reinterpret_cast<void *>(TypeOrValue.V));
}
/// Retrieve the declaration for a declaration non-type
@@ -286,7 +336,7 @@ public:
/// Retrieve the type for null non-type template argument.
QualType getNullPtrType() const {
assert(getKind() == NullPtr && "Unexpected kind");
- return QualType::getFromOpaquePtr(reinterpret_cast<void*>(TypeOrValue.V));
+ return QualType::getFromOpaquePtr(reinterpret_cast<void *>(TypeOrValue.V));
}
/// Retrieve the template name for a template name argument.
@@ -306,7 +356,7 @@ public:
/// Retrieve the number of expansions that a template template argument
/// expansion will produce, if known.
- Optional<unsigned> getNumTemplateExpansions() const;
+ std::optional<unsigned> getNumTemplateExpansions() const;
/// Retrieve the template argument as an integral value.
// FIXME: Provide a way to read the integral data without copying the value.
@@ -319,7 +369,7 @@ public:
return APSInt(APInt(Integer.BitWidth, Integer.VAL), Integer.IsUnsigned);
unsigned NumWords = APInt::getNumWords(Integer.BitWidth);
- return APSInt(APInt(Integer.BitWidth, makeArrayRef(Integer.pVal, NumWords)),
+ return APSInt(APInt(Integer.BitWidth, ArrayRef(Integer.pVal, NumWords)),
Integer.IsUnsigned);
}
@@ -334,6 +384,22 @@ public:
Integer.Type = T.getAsOpaquePtr();
}
+ /// Set to 'true' if this TemplateArgument corresponds to a
+ /// default template parameter.
+ void setIsDefaulted(bool v) { TypeOrValue.IsDefaulted = v; }
+
+ /// If returns 'true', this TemplateArgument corresponds to a
+ /// default template parameter.
+ bool getIsDefaulted() const { return (bool)TypeOrValue.IsDefaulted; }
+
+ /// Get the value of a StructuralValue.
+ const APValue &getAsStructuralValue() const { return *Value.Value; }
+
+ /// Get the type of a StructuralValue.
+ QualType getStructuralValueType() const {
+ return QualType::getFromOpaquePtr(Value.Type);
+ }
+
/// If this is a non-type template argument, get its type. Otherwise,
/// returns a null QualType.
QualType getNonTypeTemplateArgumentType() const;
@@ -364,7 +430,7 @@ public:
/// Iterator range referencing all of the elements of a template
/// argument pack.
ArrayRef<TemplateArgument> pack_elements() const {
- return llvm::makeArrayRef(pack_begin(), pack_end());
+ return llvm::ArrayRef(pack_begin(), pack_end());
}
/// The number of template arguments in the given template argument
@@ -377,7 +443,7 @@ public:
/// Return the array of arguments in this template argument pack.
ArrayRef<TemplateArgument> getPackAsArray() const {
assert(getKind() == Pack);
- return llvm::makeArrayRef(Args.Args, Args.NumArgs);
+ return llvm::ArrayRef(Args.Args, Args.NumArgs);
}
/// Determines whether two template arguments are superficially the
@@ -479,6 +545,7 @@ public:
assert(Argument.getKind() == TemplateArgument::NullPtr ||
Argument.getKind() == TemplateArgument::Integral ||
Argument.getKind() == TemplateArgument::Declaration ||
+ Argument.getKind() == TemplateArgument::StructuralValue ||
Argument.getKind() == TemplateArgument::Expression);
}
@@ -504,13 +571,9 @@ public:
/// - Fetches the full source range of the argument.
SourceRange getSourceRange() const LLVM_READONLY;
- const TemplateArgument &getArgument() const {
- return Argument;
- }
+ const TemplateArgument &getArgument() const { return Argument; }
- TemplateArgumentLocInfo getLocInfo() const {
- return LocInfo;
- }
+ TemplateArgumentLocInfo getLocInfo() const { return LocInfo; }
TypeSourceInfo *getTypeSourceInfo() const {
if (Argument.getKind() != TemplateArgument::Type)
@@ -538,6 +601,11 @@ public:
return LocInfo.getAsExpr();
}
+ Expr *getSourceStructuralValueExpression() const {
+ assert(Argument.getKind() == TemplateArgument::StructuralValue);
+ return LocInfo.getAsExpr();
+ }
+
NestedNameSpecifierLoc getTemplateQualifierLoc() const {
if (Argument.getKind() != TemplateArgument::Template &&
Argument.getKind() != TemplateArgument::TemplateExpansion)
@@ -569,8 +637,7 @@ class TemplateArgumentListInfo {
public:
TemplateArgumentListInfo() = default;
- TemplateArgumentListInfo(SourceLocation LAngleLoc,
- SourceLocation RAngleLoc)
+ TemplateArgumentListInfo(SourceLocation LAngleLoc, SourceLocation RAngleLoc)
: LAngleLoc(LAngleLoc), RAngleLoc(RAngleLoc) {}
// This can leak if used in an AST node, use ASTTemplateArgumentListInfo
@@ -589,21 +656,15 @@ public:
return Arguments.data();
}
- llvm::ArrayRef<TemplateArgumentLoc> arguments() const {
- return Arguments;
- }
+ llvm::ArrayRef<TemplateArgumentLoc> arguments() const { return Arguments; }
const TemplateArgumentLoc &operator[](unsigned I) const {
return Arguments[I];
}
- TemplateArgumentLoc &operator[](unsigned I) {
- return Arguments[I];
- }
+ TemplateArgumentLoc &operator[](unsigned I) { return Arguments[I]; }
- void addArgument(const TemplateArgumentLoc &Loc) {
- Arguments.push_back(Loc);
- }
+ void addArgument(const TemplateArgumentLoc &Loc) { Arguments.push_back(Loc); }
};
/// Represents an explicit template argument list in C++, e.g.,
@@ -619,6 +680,9 @@ private:
ASTTemplateArgumentListInfo(const TemplateArgumentListInfo &List);
+ // FIXME: Is it ever necessary to copy to another context?
+ ASTTemplateArgumentListInfo(const ASTTemplateArgumentListInfo *List);
+
public:
/// The source location of the left angle bracket ('<').
SourceLocation LAngleLoc;
@@ -639,7 +703,7 @@ public:
unsigned getNumTemplateArgs() const { return NumTemplateArgs; }
llvm::ArrayRef<TemplateArgumentLoc> arguments() const {
- return llvm::makeArrayRef(getTemplateArgs(), getNumTemplateArgs());
+ return llvm::ArrayRef(getTemplateArgs(), getNumTemplateArgs());
}
const TemplateArgumentLoc &operator[](unsigned I) const {
@@ -648,6 +712,10 @@ public:
static const ASTTemplateArgumentListInfo *
Create(const ASTContext &C, const TemplateArgumentListInfo &List);
+
+ // FIXME: Is it ever necessary to copy to another context?
+ static const ASTTemplateArgumentListInfo *
+ Create(const ASTContext &C, const ASTTemplateArgumentListInfo *List);
};
/// Represents an explicit template argument list in C++, e.g.,
@@ -692,33 +760,6 @@ struct alignas(void *) ASTTemplateKWAndArgsInfo {
const StreamingDiagnostic &operator<<(const StreamingDiagnostic &DB,
const TemplateArgument &Arg);
-inline TemplateSpecializationType::iterator
- TemplateSpecializationType::end() const {
- return getArgs() + getNumArgs();
-}
-
-inline DependentTemplateSpecializationType::iterator
- DependentTemplateSpecializationType::end() const {
- return getArgs() + getNumArgs();
-}
-
-inline const TemplateArgument &
- TemplateSpecializationType::getArg(unsigned Idx) const {
- assert(Idx < getNumArgs() && "Template argument out of range");
- return getArgs()[Idx];
-}
-
-inline const TemplateArgument &
- DependentTemplateSpecializationType::getArg(unsigned Idx) const {
- assert(Idx < getNumArgs() && "Template argument out of range");
- return getArgs()[Idx];
-}
-
-inline const TemplateArgument &AutoType::getArg(unsigned Idx) const {
- assert(Idx < getNumArgs() && "Template argument out of range");
- return getArgs()[Idx];
-}
-
} // namespace clang
#endif // LLVM_CLANG_AST_TEMPLATEBASE_H
diff --git a/contrib/llvm-project/clang/include/clang/AST/TemplateName.h b/contrib/llvm-project/clang/include/clang/AST/TemplateName.h
index 010b813dc525..b7732e54ba10 100644
--- a/contrib/llvm-project/clang/include/clang/AST/TemplateName.h
+++ b/contrib/llvm-project/clang/include/clang/AST/TemplateName.h
@@ -21,19 +21,19 @@
#include "llvm/ADT/PointerUnion.h"
#include "llvm/Support/PointerLikeTypeTraits.h"
#include <cassert>
+#include <optional>
namespace clang {
class ASTContext;
+class Decl;
class DependentTemplateName;
-class DiagnosticBuilder;
class IdentifierInfo;
class NamedDecl;
class NestedNameSpecifier;
enum OverloadedOperatorKind : int;
class OverloadedTemplateStorage;
class AssumedTemplateStorage;
-class PartialDiagnostic;
struct PrintingPolicy;
class QualifiedTemplateName;
class SubstTemplateTemplateParmPackStorage;
@@ -41,6 +41,7 @@ class SubstTemplateTemplateParmStorage;
class TemplateArgument;
class TemplateDecl;
class TemplateTemplateParmDecl;
+class UsingShadowDecl;
/// Implementation class used to describe either a set of overloaded
/// template names or an already-substituted template template parameter pack.
@@ -54,12 +55,15 @@ protected:
};
struct BitsTag {
- /// A Kind.
+ LLVM_PREFERRED_TYPE(Kind)
unsigned Kind : 2;
- /// The number of stored templates or template arguments,
- /// depending on which subclass we have.
- unsigned Size : 30;
+ // The template parameter index.
+ unsigned Index : 15;
+
+ /// The pack index, or the number of stored templates
+ /// or template arguments, depending on which subclass we have.
+ unsigned Data : 15;
};
union {
@@ -67,14 +71,13 @@ protected:
void *PointerAlignment;
};
- UncommonTemplateNameStorage(Kind kind, unsigned size) {
- Bits.Kind = kind;
- Bits.Size = size;
+ UncommonTemplateNameStorage(Kind Kind, unsigned Index, unsigned Data) {
+ Bits.Kind = Kind;
+ Bits.Index = Index;
+ Bits.Data = Data;
}
public:
- unsigned size() const { return Bits.Size; }
-
OverloadedTemplateStorage *getAsOverloadedStorage() {
return Bits.Kind == Overloaded
? reinterpret_cast<OverloadedTemplateStorage *>(this)
@@ -106,7 +109,7 @@ class OverloadedTemplateStorage : public UncommonTemplateNameStorage {
friend class ASTContext;
OverloadedTemplateStorage(unsigned size)
- : UncommonTemplateNameStorage(Overloaded, size) {}
+ : UncommonTemplateNameStorage(Overloaded, 0, size) {}
NamedDecl **getStorage() {
return reinterpret_cast<NamedDecl **>(this + 1);
@@ -116,13 +119,15 @@ class OverloadedTemplateStorage : public UncommonTemplateNameStorage {
}
public:
+ unsigned size() const { return Bits.Data; }
+
using iterator = NamedDecl *const *;
iterator begin() const { return getStorage(); }
- iterator end() const { return getStorage() + size(); }
+ iterator end() const { return getStorage() + Bits.Data; }
llvm::ArrayRef<NamedDecl*> decls() const {
- return llvm::makeArrayRef(begin(), end());
+ return llvm::ArrayRef(begin(), end());
}
};
@@ -132,23 +137,29 @@ public:
/// This kind of template names occurs when the parameter pack has been
/// provided with a template template argument pack in a context where its
/// enclosing pack expansion could not be fully expanded.
-class SubstTemplateTemplateParmPackStorage
- : public UncommonTemplateNameStorage, public llvm::FoldingSetNode
-{
- TemplateTemplateParmDecl *Parameter;
+class SubstTemplateTemplateParmPackStorage : public UncommonTemplateNameStorage,
+ public llvm::FoldingSetNode {
const TemplateArgument *Arguments;
+ llvm::PointerIntPair<Decl *, 1, bool> AssociatedDeclAndFinal;
public:
- SubstTemplateTemplateParmPackStorage(TemplateTemplateParmDecl *Parameter,
- unsigned Size,
- const TemplateArgument *Arguments)
- : UncommonTemplateNameStorage(SubstTemplateTemplateParmPack, Size),
- Parameter(Parameter), Arguments(Arguments) {}
+ SubstTemplateTemplateParmPackStorage(ArrayRef<TemplateArgument> ArgPack,
+ Decl *AssociatedDecl, unsigned Index,
+ bool Final);
+
+ /// A template-like entity which owns the whole pattern being substituted.
+ /// This will own a set of template parameters.
+ Decl *getAssociatedDecl() const;
+
+ /// Returns the index of the replaced parameter in the associated declaration.
+ /// This should match the result of `getParameterPack()->getIndex()`.
+ unsigned getIndex() const { return Bits.Index; }
+
+ // When true the substitution will be 'Final' (subst node won't be placed).
+ bool getFinal() const;
/// Retrieve the template template parameter pack being substituted.
- TemplateTemplateParmDecl *getParameterPack() const {
- return Parameter;
- }
+ TemplateTemplateParmDecl *getParameterPack() const;
/// Retrieve the template template argument pack with which this
/// parameter was substituted.
@@ -156,10 +167,9 @@ public:
void Profile(llvm::FoldingSetNodeID &ID, ASTContext &Context);
- static void Profile(llvm::FoldingSetNodeID &ID,
- ASTContext &Context,
- TemplateTemplateParmDecl *Parameter,
- const TemplateArgument &ArgPack);
+ static void Profile(llvm::FoldingSetNodeID &ID, ASTContext &Context,
+ const TemplateArgument &ArgPack, Decl *AssociatedDecl,
+ unsigned Index, bool Final);
};
/// Represents a C++ template name within the type system.
@@ -190,8 +200,12 @@ public:
/// specifier in the typedef. "apply" is a nested template, and can
/// only be understood in the context of
class TemplateName {
+ // NameDecl is either a TemplateDecl or a UsingShadowDecl depending on the
+ // NameKind.
+ // !! There is no free low bits in 32-bit builds to discriminate more than 4
+ // pointer types in PointerUnion.
using StorageType =
- llvm::PointerUnion<TemplateDecl *, UncommonTemplateNameStorage *,
+ llvm::PointerUnion<Decl *, UncommonTemplateNameStorage *,
QualifiedTemplateName *, DependentTemplateName *>;
StorageType Storage;
@@ -226,7 +240,11 @@ public:
/// A template template parameter pack that has been substituted for
/// a template template argument pack, but has not yet been expanded into
/// individual arguments.
- SubstTemplateTemplateParmPack
+ SubstTemplateTemplateParmPack,
+
+ /// A template name that refers to a template declaration found through a
+ /// specific using shadow declaration.
+ UsingTemplate,
};
TemplateName() = default;
@@ -237,6 +255,7 @@ public:
explicit TemplateName(SubstTemplateTemplateParmPackStorage *Storage);
explicit TemplateName(QualifiedTemplateName *Qual);
explicit TemplateName(DependentTemplateName *Dep);
+ explicit TemplateName(UsingShadowDecl *Using);
/// Determine whether this template name is NULL.
bool isNull() const;
@@ -289,6 +308,10 @@ public:
/// structure, if any.
DependentTemplateName *getAsDependentTemplateName() const;
+ /// Retrieve the using shadow declaration through which the underlying
+ /// template declaration is introduced, if any.
+ UsingShadowDecl *getAsUsingShadowDecl() const;
+
TemplateName getUnderlying() const;
/// Get the template name to substitute when this template name is used as a
@@ -309,16 +332,17 @@ public:
/// unexpanded parameter pack (for C++0x variadic templates).
bool containsUnexpandedParameterPack() const;
+ enum class Qualified { None, AsWritten, Fully };
/// Print the template name.
///
/// \param OS the output stream to which the template name will be
/// printed.
///
- /// \param SuppressNNS if true, don't print the
- /// nested-name-specifier that precedes the template name (if it has
- /// one).
+ /// \param Qual print the (Qualified::None) simple name,
+ /// (Qualified::AsWritten) any written (possibly partial) qualifier, or
+ /// (Qualified::Fully) the fully qualified name.
void print(raw_ostream &OS, const PrintingPolicy &Policy,
- bool SuppressNNS = false) const;
+ Qualified Qual = Qualified::AsWritten) const;
/// Debugging aid that dumps the template name.
void dump(raw_ostream &OS) const;
@@ -327,9 +351,7 @@ public:
/// error.
void dump() const;
- void Profile(llvm::FoldingSetNodeID &ID) {
- ID.AddPointer(Storage.getOpaqueValue());
- }
+ void Profile(llvm::FoldingSetNodeID &ID);
/// Retrieve the template name as a void pointer.
void *getAsVoidPointer() const { return Storage.getOpaqueValue(); }
@@ -351,23 +373,41 @@ class SubstTemplateTemplateParmStorage
: public UncommonTemplateNameStorage, public llvm::FoldingSetNode {
friend class ASTContext;
- TemplateTemplateParmDecl *Parameter;
TemplateName Replacement;
-
- SubstTemplateTemplateParmStorage(TemplateTemplateParmDecl *parameter,
- TemplateName replacement)
- : UncommonTemplateNameStorage(SubstTemplateTemplateParm, 0),
- Parameter(parameter), Replacement(replacement) {}
+ Decl *AssociatedDecl;
+
+ SubstTemplateTemplateParmStorage(TemplateName Replacement,
+ Decl *AssociatedDecl, unsigned Index,
+ std::optional<unsigned> PackIndex)
+ : UncommonTemplateNameStorage(SubstTemplateTemplateParm, Index,
+ PackIndex ? *PackIndex + 1 : 0),
+ Replacement(Replacement), AssociatedDecl(AssociatedDecl) {
+ assert(AssociatedDecl != nullptr);
+ }
public:
- TemplateTemplateParmDecl *getParameter() const { return Parameter; }
+ /// A template-like entity which owns the whole pattern being substituted.
+ /// This will own a set of template parameters.
+ Decl *getAssociatedDecl() const { return AssociatedDecl; }
+
+ /// Returns the index of the replaced parameter in the associated declaration.
+ /// This should match the result of `getParameter()->getIndex()`.
+ unsigned getIndex() const { return Bits.Index; }
+
+ std::optional<unsigned> getPackIndex() const {
+ if (Bits.Data == 0)
+ return std::nullopt;
+ return Bits.Data - 1;
+ }
+
+ TemplateTemplateParmDecl *getParameter() const;
TemplateName getReplacement() const { return Replacement; }
void Profile(llvm::FoldingSetNodeID &ID);
- static void Profile(llvm::FoldingSetNodeID &ID,
- TemplateTemplateParmDecl *parameter,
- TemplateName replacement);
+ static void Profile(llvm::FoldingSetNodeID &ID, TemplateName Replacement,
+ Decl *AssociatedDecl, unsigned Index,
+ std::optional<unsigned> PackIndex);
};
inline TemplateName TemplateName::getUnderlying() const {
@@ -400,13 +440,19 @@ class QualifiedTemplateName : public llvm::FoldingSetNode {
/// this name with DependentTemplateName).
llvm::PointerIntPair<NestedNameSpecifier *, 1> Qualifier;
- /// The template declaration or set of overloaded function templates
- /// that this qualified name refers to.
- TemplateDecl *Template;
+ /// The underlying template name, it is either
+ /// 1) a Template -- a template declaration that this qualified name refers
+ /// to.
+ /// 2) or a UsingTemplate -- a template declaration introduced by a
+ /// using-shadow declaration.
+ TemplateName UnderlyingTemplate;
QualifiedTemplateName(NestedNameSpecifier *NNS, bool TemplateKeyword,
- TemplateDecl *Template)
- : Qualifier(NNS, TemplateKeyword? 1 : 0), Template(Template) {}
+ TemplateName Template)
+ : Qualifier(NNS, TemplateKeyword ? 1 : 0), UnderlyingTemplate(Template) {
+ assert(UnderlyingTemplate.getKind() == TemplateName::Template ||
+ UnderlyingTemplate.getKind() == TemplateName::UsingTemplate);
+ }
public:
/// Return the nested name specifier that qualifies this name.
@@ -416,23 +462,18 @@ public:
/// keyword.
bool hasTemplateKeyword() const { return Qualifier.getInt(); }
- /// The template declaration that this qualified name refers
- /// to.
- TemplateDecl *getDecl() const { return Template; }
-
- /// The template declaration to which this qualified name
- /// refers.
- TemplateDecl *getTemplateDecl() const { return Template; }
+ /// Return the underlying template name.
+ TemplateName getUnderlyingTemplate() const { return UnderlyingTemplate; }
void Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, getQualifier(), hasTemplateKeyword(), getTemplateDecl());
+ Profile(ID, getQualifier(), hasTemplateKeyword(), UnderlyingTemplate);
}
static void Profile(llvm::FoldingSetNodeID &ID, NestedNameSpecifier *NNS,
- bool TemplateKeyword, TemplateDecl *Template) {
+ bool TemplateKeyword, TemplateName TN) {
ID.AddPointer(NNS);
ID.AddBoolean(TemplateKeyword);
- ID.AddPointer(Template);
+ ID.AddPointer(TN.getAsVoidPointer());
}
};
diff --git a/contrib/llvm-project/clang/include/clang/AST/TextNodeDumper.h b/contrib/llvm-project/clang/include/clang/AST/TextNodeDumper.h
index 0eb0031de11f..732749ad305e 100644
--- a/contrib/llvm-project/clang/include/clang/AST/TextNodeDumper.h
+++ b/contrib/llvm-project/clang/include/clang/AST/TextNodeDumper.h
@@ -189,6 +189,8 @@ public:
void Visit(const GenericSelectionExpr::ConstAssociation &A);
+ void Visit(const ConceptReference *);
+
void Visit(const concepts::Requirement *R);
void Visit(const APValue &Value, QualType Ty);
@@ -202,6 +204,9 @@ public:
void dumpName(const NamedDecl *ND);
void dumpAccessSpecifier(AccessSpecifier AS);
void dumpCleanupObject(const ExprWithCleanups::CleanupObject &C);
+ void dumpTemplateSpecializationKind(TemplateSpecializationKind TSK);
+ void dumpNestedNameSpecifier(const NestedNameSpecifier *NNS);
+ void dumpConceptReference(const ConceptReference *R);
void dumpDeclRef(const Decl *D, StringRef Label = {});
@@ -246,12 +251,17 @@ public:
void VisitLabelStmt(const LabelStmt *Node);
void VisitGotoStmt(const GotoStmt *Node);
void VisitCaseStmt(const CaseStmt *Node);
+ void VisitReturnStmt(const ReturnStmt *Node);
+ void VisitCoawaitExpr(const CoawaitExpr *Node);
+ void VisitCoreturnStmt(const CoreturnStmt *Node);
+ void VisitCompoundStmt(const CompoundStmt *Node);
void VisitConstantExpr(const ConstantExpr *Node);
void VisitCallExpr(const CallExpr *Node);
void VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *Node);
void VisitCastExpr(const CastExpr *Node);
void VisitImplicitCastExpr(const ImplicitCastExpr *Node);
void VisitDeclRefExpr(const DeclRefExpr *Node);
+ void VisitDependentScopeDeclRefExpr(const DependentScopeDeclRefExpr *Node);
void VisitSYCLUniqueStableNameExpr(const SYCLUniqueStableNameExpr *Node);
void VisitPredefinedExpr(const PredefinedExpr *Node);
void VisitCharacterLiteral(const CharacterLiteral *Node);
@@ -311,11 +321,17 @@ public:
void VisitFunctionType(const FunctionType *T);
void VisitFunctionProtoType(const FunctionProtoType *T);
void VisitUnresolvedUsingType(const UnresolvedUsingType *T);
+ void VisitUsingType(const UsingType *T);
void VisitTypedefType(const TypedefType *T);
void VisitUnaryTransformType(const UnaryTransformType *T);
void VisitTagType(const TagType *T);
void VisitTemplateTypeParmType(const TemplateTypeParmType *T);
+ void VisitSubstTemplateTypeParmType(const SubstTemplateTypeParmType *T);
+ void
+ VisitSubstTemplateTypeParmPackType(const SubstTemplateTypeParmPackType *T);
void VisitAutoType(const AutoType *T);
+ void VisitDeducedTemplateSpecializationType(
+ const DeducedTemplateSpecializationType *T);
void VisitTemplateSpecializationType(const TemplateSpecializationType *T);
void VisitInjectedClassNameType(const InjectedClassNameType *T);
void VisitObjCInterfaceType(const ObjCInterfaceType *T);
@@ -376,6 +392,7 @@ public:
void VisitConceptDecl(const ConceptDecl *D);
void
VisitLifetimeExtendedTemporaryDecl(const LifetimeExtendedTemporaryDecl *D);
+ void VisitHLSLBufferDecl(const HLSLBufferDecl *D);
};
} // namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/AST/Type.h b/contrib/llvm-project/clang/include/clang/AST/Type.h
index 9f46d5337897..6384cf9420b8 100644
--- a/contrib/llvm-project/clang/include/clang/AST/Type.h
+++ b/contrib/llvm-project/clang/include/clang/AST/Type.h
@@ -34,10 +34,9 @@
#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/FoldingSet.h"
-#include "llvm/ADT/None.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/STLForwardCompat.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/ADT/iterator_range.h"
@@ -51,12 +50,14 @@
#include <cstddef>
#include <cstdint>
#include <cstring>
+#include <optional>
#include <string>
#include <type_traits>
#include <utility>
namespace clang {
+class BTFTypeTagAttr;
class ExtQuals;
class QualType;
class ConceptDecl;
@@ -129,6 +130,7 @@ class TemplateArgumentLoc;
class TemplateTypeParmDecl;
class TypedefNameDecl;
class UnresolvedUsingTypenameDecl;
+class UsingShadowDecl;
using CanQualType = CanQual<Type>;
@@ -263,16 +265,31 @@ public:
bool hasOnlyConst() const { return Mask == Const; }
void removeConst() { Mask &= ~Const; }
void addConst() { Mask |= Const; }
+ Qualifiers withConst() const {
+ Qualifiers Qs = *this;
+ Qs.addConst();
+ return Qs;
+ }
bool hasVolatile() const { return Mask & Volatile; }
bool hasOnlyVolatile() const { return Mask == Volatile; }
void removeVolatile() { Mask &= ~Volatile; }
void addVolatile() { Mask |= Volatile; }
+ Qualifiers withVolatile() const {
+ Qualifiers Qs = *this;
+ Qs.addVolatile();
+ return Qs;
+ }
bool hasRestrict() const { return Mask & Restrict; }
bool hasOnlyRestrict() const { return Mask == Restrict; }
void removeRestrict() { Mask &= ~Restrict; }
void addRestrict() { Mask |= Restrict; }
+ Qualifiers withRestrict() const {
+ Qualifiers Qs = *this;
+ Qs.addRestrict();
+ return Qs;
+ }
bool hasCVRQualifiers() const { return getCVRQualifiers(); }
unsigned getCVRQualifiers() const { return Mask & CVRMask; }
@@ -495,7 +512,12 @@ public:
(A == LangAS::Default &&
(B == LangAS::sycl_private || B == LangAS::sycl_local ||
B == LangAS::sycl_global || B == LangAS::sycl_global_device ||
- B == LangAS::sycl_global_host));
+ B == LangAS::sycl_global_host)) ||
+ // In HIP device compilation, any cuda address space is allowed
+ // to implicitly cast into the default address space.
+ (A == LangAS::Default &&
+ (B == LangAS::cuda_constant || B == LangAS::cuda_device ||
+ B == LangAS::cuda_shared));
}
/// Returns true if the address space in these qualifiers is equal to or
@@ -602,6 +624,47 @@ private:
static const uint32_t AddressSpaceShift = 9;
};
+class QualifiersAndAtomic {
+ Qualifiers Quals;
+ bool HasAtomic;
+
+public:
+ QualifiersAndAtomic() : HasAtomic(false) {}
+ QualifiersAndAtomic(Qualifiers Quals, bool HasAtomic)
+ : Quals(Quals), HasAtomic(HasAtomic) {}
+
+ operator Qualifiers() const { return Quals; }
+
+ bool hasVolatile() const { return Quals.hasVolatile(); }
+ bool hasConst() const { return Quals.hasConst(); }
+ bool hasRestrict() const { return Quals.hasRestrict(); }
+ bool hasAtomic() const { return HasAtomic; }
+
+ void addVolatile() { Quals.addVolatile(); }
+ void addConst() { Quals.addConst(); }
+ void addRestrict() { Quals.addRestrict(); }
+ void addAtomic() { HasAtomic = true; }
+
+ void removeVolatile() { Quals.removeVolatile(); }
+ void removeConst() { Quals.removeConst(); }
+ void removeRestrict() { Quals.removeRestrict(); }
+ void removeAtomic() { HasAtomic = false; }
+
+ QualifiersAndAtomic withVolatile() {
+ return {Quals.withVolatile(), HasAtomic};
+ }
+ QualifiersAndAtomic withConst() { return {Quals.withConst(), HasAtomic}; }
+ QualifiersAndAtomic withRestrict() {
+ return {Quals.withRestrict(), HasAtomic};
+ }
+ QualifiersAndAtomic withAtomic() { return {Quals, true}; }
+
+ QualifiersAndAtomic &operator+=(Qualifiers RHS) {
+ Quals += RHS;
+ return *this;
+ }
+};
+
/// A std::pair-like structure for storing a qualified type split
/// into its local qualifiers and its locally-unqualified type.
struct SplitQualType {
@@ -651,6 +714,12 @@ enum class ObjCSubstitutionContext {
Superclass,
};
+/// The kind of 'typeof' expression we're after.
+enum class TypeOfKind : uint8_t {
+ Qualified,
+ Unqualified,
+};
+
/// A (possibly-)qualified type.
///
/// For efficiency, we don't store CV-qualified types as nodes on their
@@ -695,6 +764,8 @@ public:
unsigned getLocalFastQualifiers() const { return Value.getInt(); }
void setLocalFastQualifiers(unsigned Quals) { Value.setInt(Quals); }
+ bool UseExcessPrecision(const ASTContext &Ctx);
+
/// Retrieves a pointer to the underlying (unqualified) type.
///
/// This function requires that the type not be NULL. If the type might be
@@ -734,6 +805,9 @@ public:
return Value.getPointer().isNull();
}
+ // Determines if a type can form `T&`.
+ bool isReferenceable() const;
+
/// Determine whether this particular QualType instance has the
/// "const" qualifier set, without looking through typedefs that may have
/// added "const" at a different level.
@@ -744,6 +818,26 @@ public:
/// Determine whether this type is const-qualified.
bool isConstQualified() const;
+ enum class NonConstantStorageReason {
+ MutableField,
+ NonConstNonReferenceType,
+ NonTrivialCtor,
+ NonTrivialDtor,
+ };
+ /// Determine whether instances of this type can be placed in immutable
+ /// storage.
+ /// If ExcludeCtor is true, the duration when the object's constructor runs
+ /// will not be considered. The caller will need to verify that the object is
+ /// not written to during its construction. ExcludeDtor works similarly.
+ std::optional<NonConstantStorageReason>
+ isNonConstantStorage(const ASTContext &Ctx, bool ExcludeCtor,
+ bool ExcludeDtor);
+
+ bool isConstantStorage(const ASTContext &Ctx, bool ExcludeCtor,
+ bool ExcludeDtor) {
+ return !isNonConstantStorage(Ctx, ExcludeCtor, ExcludeDtor);
+ }
+
/// Determine whether this particular QualType instance has the
/// "restrict" qualifier set, without looking through typedefs that may have
/// added "restrict" at a different level.
@@ -823,6 +917,14 @@ public:
/// Return true if this is a trivially copyable type (C++0x [basic.types]p9)
bool isTriviallyCopyableType(const ASTContext &Context) const;
+ /// Return true if this is a trivially copyable type
+ bool isTriviallyCopyConstructibleType(const ASTContext &Context) const;
+
+ /// Return true if this is a trivially relocatable type.
+ bool isTriviallyRelocatableType(const ASTContext &Context) const;
+
+ /// Return true if this is a trivially equality comparable type.
+ bool isTriviallyEqualityComparableType(const ASTContext &Context) const;
/// Returns true if it is a class and it might be dynamic.
bool mayBeDynamicClass() const;
@@ -830,6 +932,15 @@ public:
/// Returns true if it is not a class or if the class might not be dynamic.
bool mayBeNotDynamicClass() const;
+ /// Returns true if it is a WebAssembly Reference Type.
+ bool isWebAssemblyReferenceType() const;
+
+ /// Returns true if it is a WebAssembly Externref Type.
+ bool isWebAssemblyExternrefType() const;
+
+ /// Returns true if it is a WebAssembly Funcref Type.
+ bool isWebAssemblyFuncrefType() const;
+
// Don't promise in the API that anything besides 'const' can be
// easily added.
@@ -870,7 +981,6 @@ public:
void removeLocalConst();
void removeLocalVolatile();
void removeLocalRestrict();
- void removeLocalCVRQualifiers(unsigned Mask);
void removeLocalFastQualifiers() { Value.setInt(0); }
void removeLocalFastQualifiers(unsigned Mask) {
@@ -924,6 +1034,10 @@ public:
/// The resulting type might still be qualified if it's sugar for an array
/// type. To strip qualifiers even from within a sugared array type, use
/// ASTContext::getUnqualifiedArrayType.
+ ///
+ /// Note: In C, the _Atomic qualifier is special (see C23 6.2.5p32 for
+ /// details), and it is not stripped by this function. Use
+ /// getAtomicUnqualifiedType() to strip qualifiers including _Atomic.
inline QualType getUnqualifiedType() const;
/// Retrieve the unqualified variant of the given type, removing as little
@@ -1305,6 +1419,8 @@ private:
static bool hasNonTrivialToPrimitiveCopyCUnion(const RecordDecl *RD);
};
+raw_ostream &operator<<(raw_ostream &OS, QualType QT);
+
} // namespace clang
namespace llvm {
@@ -1370,7 +1486,8 @@ class ExtQualsTypeCommonBase {
/// in three low bits on the QualType pointer; a fourth bit records whether
/// the pointer is an ExtQuals node. The extended qualifiers (address spaces,
/// Objective-C GC attributes) are much more rare.
-class ExtQuals : public ExtQualsTypeCommonBase, public llvm::FoldingSetNode {
+class alignas(TypeAlignment) ExtQuals : public ExtQualsTypeCommonBase,
+ public llvm::FoldingSetNode {
// NOTE: changing the fast qualifiers should be straightforward as
// long as you don't make 'const' non-fast.
// 1. Qualifiers:
@@ -1456,6 +1573,10 @@ enum class AutoTypeKeyword {
GNUAutoType
};
+enum class ArraySizeModifier;
+enum class ElaboratedTypeKeyword;
+enum class VectorKind;
+
/// The base class of the type hierarchy.
///
/// A central concept with types is that each type always has a canonical
@@ -1482,7 +1603,7 @@ enum class AutoTypeKeyword {
///
/// Types, once created, are immutable.
///
-class alignas(8) Type : public ExtQualsTypeCommonBase {
+class alignas(TypeAlignment) Type : public ExtQualsTypeCommonBase {
public:
enum TypeClass {
#define TYPE(Class, Base) Class,
@@ -1498,22 +1619,28 @@ private:
template <class T> friend class TypePropertyCache;
/// TypeClass bitfield - Enum that specifies what subclass this belongs to.
+ LLVM_PREFERRED_TYPE(TypeClass)
unsigned TC : 8;
/// Store information on the type dependency.
+ LLVM_PREFERRED_TYPE(TypeDependence)
unsigned Dependence : llvm::BitWidth<TypeDependence>;
/// True if the cache (i.e. the bitfields here starting with
/// 'Cache') is valid.
+ LLVM_PREFERRED_TYPE(bool)
mutable unsigned CacheValid : 1;
/// Linkage of this type.
+ LLVM_PREFERRED_TYPE(Linkage)
mutable unsigned CachedLinkage : 3;
/// Whether this type involves and local or unnamed types.
+ LLVM_PREFERRED_TYPE(bool)
mutable unsigned CachedLocalOrUnnamed : 1;
/// Whether this type comes from an AST file.
+ LLVM_PREFERRED_TYPE(bool)
mutable unsigned FromAST : 1;
bool isCacheValid() const {
@@ -1539,34 +1666,41 @@ protected:
class ArrayTypeBitfields {
friend class ArrayType;
+ LLVM_PREFERRED_TYPE(TypeBitfields)
unsigned : NumTypeBits;
/// CVR qualifiers from declarations like
/// 'int X[static restrict 4]'. For function parameters only.
+ LLVM_PREFERRED_TYPE(Qualifiers)
unsigned IndexTypeQuals : 3;
/// Storage class qualifiers from declarations like
/// 'int X[static restrict 4]'. For function parameters only.
- /// Actually an ArrayType::ArraySizeModifier.
+ LLVM_PREFERRED_TYPE(ArraySizeModifier)
unsigned SizeModifier : 3;
};
+ enum { NumArrayTypeBits = NumTypeBits + 6 };
class ConstantArrayTypeBitfields {
friend class ConstantArrayType;
- unsigned : NumTypeBits + 3 + 3;
+ LLVM_PREFERRED_TYPE(ArrayTypeBitfields)
+ unsigned : NumArrayTypeBits;
/// Whether we have a stored size expression.
+ LLVM_PREFERRED_TYPE(bool)
unsigned HasStoredSizeExpr : 1;
};
class BuiltinTypeBitfields {
friend class BuiltinType;
+ LLVM_PREFERRED_TYPE(TypeBitfields)
unsigned : NumTypeBits;
/// The kind (BuiltinType::Kind) of builtin type this is.
- unsigned Kind : 8;
+ static constexpr unsigned NumOfBuiltinTypeBits = 9;
+ unsigned Kind : NumOfBuiltinTypeBits;
};
/// FunctionTypeBitfields store various bits belonging to FunctionProtoType.
@@ -1576,15 +1710,18 @@ protected:
friend class FunctionProtoType;
friend class FunctionType;
+ LLVM_PREFERRED_TYPE(TypeBitfields)
unsigned : NumTypeBits;
/// Extra information which affects how the function is called, like
/// regparm and the calling convention.
+ LLVM_PREFERRED_TYPE(CallingConv)
unsigned ExtInfo : 13;
/// The ref-qualifier associated with a \c FunctionProtoType.
///
/// This is a value of type \c RefQualifierKind.
+ LLVM_PREFERRED_TYPE(RefQualifierKind)
unsigned RefQualifier : 2;
/// Used only by FunctionProtoType, put here to pack with the
@@ -1593,8 +1730,10 @@ protected:
///
/// C++ 8.3.5p4: The return type, the parameter type list and the
/// cv-qualifier-seq, [...], are part of the function type.
+ LLVM_PREFERRED_TYPE(Qualifiers)
unsigned FastTypeQuals : Qualifiers::FastWidth;
/// Whether this function has extended Qualifiers.
+ LLVM_PREFERRED_TYPE(bool)
unsigned HasExtQuals : 1;
/// The number of parameters this function has, not counting '...'.
@@ -1604,21 +1743,30 @@ protected:
unsigned NumParams : 16;
/// The type of exception specification this function has.
+ LLVM_PREFERRED_TYPE(ExceptionSpecificationType)
unsigned ExceptionSpecType : 4;
/// Whether this function has extended parameter information.
+ LLVM_PREFERRED_TYPE(bool)
unsigned HasExtParameterInfos : 1;
+ /// Whether this function has extra bitfields for the prototype.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned HasExtraBitfields : 1;
+
/// Whether the function is variadic.
+ LLVM_PREFERRED_TYPE(bool)
unsigned Variadic : 1;
/// Whether this function has a trailing return type.
+ LLVM_PREFERRED_TYPE(bool)
unsigned HasTrailingReturn : 1;
};
class ObjCObjectTypeBitfields {
friend class ObjCObjectType;
+ LLVM_PREFERRED_TYPE(TypeBitfields)
unsigned : NumTypeBits;
/// The number of type arguments stored directly on this object type.
@@ -1628,12 +1776,14 @@ protected:
unsigned NumProtocols : 6;
/// Whether this is a "kindof" type.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsKindOf : 1;
};
class ReferenceTypeBitfields {
friend class ReferenceType;
+ LLVM_PREFERRED_TYPE(TypeBitfields)
unsigned : NumTypeBits;
/// True if the type was originally spelled with an lvalue sigil.
@@ -1647,31 +1797,36 @@ protected:
/// ref &&a; // lvalue, inner ref
/// rvref &a; // lvalue, inner ref, spelled lvalue
/// rvref &&a; // rvalue, inner ref
+ LLVM_PREFERRED_TYPE(bool)
unsigned SpelledAsLValue : 1;
/// True if the inner type is a reference type. This only happens
/// in non-canonical forms.
+ LLVM_PREFERRED_TYPE(bool)
unsigned InnerRef : 1;
};
class TypeWithKeywordBitfields {
friend class TypeWithKeyword;
+ LLVM_PREFERRED_TYPE(TypeBitfields)
unsigned : NumTypeBits;
/// An ElaboratedTypeKeyword. 8 bits for efficient access.
+ LLVM_PREFERRED_TYPE(ElaboratedTypeKeyword)
unsigned Keyword : 8;
};
- enum { NumTypeWithKeywordBits = 8 };
+ enum { NumTypeWithKeywordBits = NumTypeBits + 8 };
class ElaboratedTypeBitfields {
friend class ElaboratedType;
- unsigned : NumTypeBits;
+ LLVM_PREFERRED_TYPE(TypeWithKeywordBitfields)
unsigned : NumTypeWithKeywordBits;
/// Whether the ElaboratedType has a trailing OwnedTagDecl.
+ LLVM_PREFERRED_TYPE(bool)
unsigned HasOwnedTagDecl : 1;
};
@@ -1679,11 +1834,13 @@ protected:
friend class VectorType;
friend class DependentVectorType;
+ LLVM_PREFERRED_TYPE(TypeBitfields)
unsigned : NumTypeBits;
/// The kind of vector, either a generic vector type or some
/// target-specific vector type such as for AltiVec or Neon.
- unsigned VecKind : 3;
+ LLVM_PREFERRED_TYPE(VectorKind)
+ unsigned VecKind : 4;
/// The number of elements in the vector.
uint32_t NumElements;
};
@@ -1691,19 +1848,22 @@ protected:
class AttributedTypeBitfields {
friend class AttributedType;
+ LLVM_PREFERRED_TYPE(TypeBitfields)
unsigned : NumTypeBits;
- /// An AttributedType::Kind
+ LLVM_PREFERRED_TYPE(attr::Kind)
unsigned AttrKind : 32 - NumTypeBits;
};
class AutoTypeBitfields {
friend class AutoType;
+ LLVM_PREFERRED_TYPE(TypeBitfields)
unsigned : NumTypeBits;
/// Was this placeholder type spelled as 'auto', 'decltype(auto)',
/// or '__auto_type'? AutoTypeKeyword value.
+ LLVM_PREFERRED_TYPE(AutoTypeKeyword)
unsigned Keyword : 2;
/// The number of template arguments in the type-constraints, which is
@@ -1716,27 +1876,82 @@ protected:
unsigned NumArgs;
};
+ class TypeOfBitfields {
+ friend class TypeOfType;
+ friend class TypeOfExprType;
+
+ LLVM_PREFERRED_TYPE(TypeBitfields)
+ unsigned : NumTypeBits;
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned IsUnqual : 1; // If true: typeof_unqual, else: typeof
+ };
+
+ class UsingBitfields {
+ friend class UsingType;
+
+ LLVM_PREFERRED_TYPE(TypeBitfields)
+ unsigned : NumTypeBits;
+
+ /// True if the underlying type is different from the declared one.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned hasTypeDifferentFromDecl : 1;
+ };
+
+ class TypedefBitfields {
+ friend class TypedefType;
+
+ LLVM_PREFERRED_TYPE(TypeBitfields)
+ unsigned : NumTypeBits;
+
+ /// True if the underlying type is different from the declared one.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned hasTypeDifferentFromDecl : 1;
+ };
+
+ class SubstTemplateTypeParmTypeBitfields {
+ friend class SubstTemplateTypeParmType;
+
+ LLVM_PREFERRED_TYPE(TypeBitfields)
+ unsigned : NumTypeBits;
+
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned HasNonCanonicalUnderlyingType : 1;
+
+ // The index of the template parameter this substitution represents.
+ unsigned Index : 15;
+
+ /// Represents the index within a pack if this represents a substitution
+ /// from a pack expansion. This index starts at the end of the pack and
+ /// increments towards the beginning.
+ /// Positive non-zero number represents the index + 1.
+ /// Zero means this is not substituted from an expansion.
+ unsigned PackIndex : 16;
+ };
+
class SubstTemplateTypeParmPackTypeBitfields {
friend class SubstTemplateTypeParmPackType;
+ LLVM_PREFERRED_TYPE(TypeBitfields)
unsigned : NumTypeBits;
+ // The index of the template parameter this substitution represents.
+ unsigned Index : 16;
+
/// The number of template arguments in \c Arguments, which is
/// expected to be able to hold at least 1024 according to [implimits].
/// However as this limit is somewhat easy to hit with template
/// metaprogramming we'd prefer to keep it as large as possible.
- /// At the moment it has been left as a non-bitfield since this type
- /// safely fits in 64 bits as an unsigned, so there is no reason to
- /// introduce the performance impact of a bitfield.
- unsigned NumArgs;
+ unsigned NumArgs : 16;
};
class TemplateSpecializationTypeBitfields {
friend class TemplateSpecializationType;
+ LLVM_PREFERRED_TYPE(TypeBitfields)
unsigned : NumTypeBits;
/// Whether this template specialization type is a substituted type alias.
+ LLVM_PREFERRED_TYPE(bool)
unsigned TypeAlias : 1;
/// The number of template arguments named in this class template
@@ -1752,7 +1967,7 @@ protected:
class DependentTemplateSpecializationTypeBitfields {
friend class DependentTemplateSpecializationType;
- unsigned : NumTypeBits;
+ LLVM_PREFERRED_TYPE(TypeWithKeywordBitfields)
unsigned : NumTypeWithKeywordBits;
/// The number of template arguments named in this class template
@@ -1768,6 +1983,7 @@ protected:
class PackExpansionTypeBitfields {
friend class PackExpansionType;
+ LLVM_PREFERRED_TYPE(TypeBitfields)
unsigned : NumTypeBits;
/// The number of expansions that this pack expansion will
@@ -1790,6 +2006,9 @@ protected:
ConstantArrayTypeBitfields ConstantArrayTypeBits;
AttributedTypeBitfields AttributedTypeBits;
AutoTypeBitfields AutoTypeBits;
+ TypeOfBitfields TypeOfBits;
+ TypedefBitfields TypedefBits;
+ UsingBitfields UsingBits;
BuiltinTypeBitfields BuiltinTypeBits;
FunctionTypeBitfields FunctionTypeBits;
ObjCObjectTypeBitfields ObjCObjectTypeBits;
@@ -1797,6 +2016,7 @@ protected:
TypeWithKeywordBitfields TypeWithKeywordBits;
ElaboratedTypeBitfields ElaboratedTypeBits;
VectorTypeBitfields VectorTypeBits;
+ SubstTemplateTypeParmTypeBitfields SubstTemplateTypeParmTypeBits;
SubstTemplateTypeParmPackTypeBitfields SubstTemplateTypeParmPackTypeBits;
TemplateSpecializationTypeBitfields TemplateSpecializationTypeBits;
DependentTemplateSpecializationTypeBitfields
@@ -1818,15 +2038,16 @@ protected:
Type(TypeClass tc, QualType canon, TypeDependence Dependence)
: ExtQualsTypeCommonBase(this,
canon.isNull() ? QualType(this_(), 0) : canon) {
- static_assert(sizeof(*this) <= 8 + sizeof(ExtQualsTypeCommonBase),
+ static_assert(sizeof(*this) <=
+ alignof(decltype(*this)) + sizeof(ExtQualsTypeCommonBase),
"changing bitfields changed sizeof(Type)!");
- static_assert(alignof(decltype(*this)) % sizeof(void *) == 0,
+ static_assert(alignof(decltype(*this)) % TypeAlignment == 0,
"Insufficient alignment!");
TypeBits.TC = tc;
TypeBits.Dependence = static_cast<unsigned>(Dependence);
TypeBits.CacheValid = false;
TypeBits.CachedLocalOrUnnamed = false;
- TypeBits.CachedLinkage = NoLinkage;
+ TypeBits.CachedLinkage = llvm::to_underlying(Linkage::Invalid);
TypeBits.FromAST = false;
}
@@ -1894,16 +2115,43 @@ public:
bool isSizelessType() const;
bool isSizelessBuiltinType() const;
+ /// Returns true for all scalable vector types.
+ bool isSizelessVectorType() const;
+
+ /// Returns true for SVE scalable vector types.
+ bool isSVESizelessBuiltinType() const;
+
+ /// Returns true for RVV scalable vector types.
+ bool isRVVSizelessBuiltinType() const;
+
+ /// Check if this is a WebAssembly Externref Type.
+ bool isWebAssemblyExternrefType() const;
+
+ /// Returns true if this is a WebAssembly table type: either an array of
+ /// reference types, or a pointer to a reference type (which can only be
+ /// created by array to pointer decay).
+ bool isWebAssemblyTableType() const;
+
/// Determines if this is a sizeless type supported by the
/// 'arm_sve_vector_bits' type attribute, which can be applied to a single
/// SVE vector or predicate, excluding tuple types such as svint32x4_t.
- bool isVLSTBuiltinType() const;
+ bool isSveVLSBuiltinType() const;
/// Returns the representative type for the element of an SVE builtin type.
/// This is used to represent fixed-length SVE vectors created with the
/// 'arm_sve_vector_bits' type attribute as VectorType.
QualType getSveEltType(const ASTContext &Ctx) const;
+ /// Determines if this is a sizeless type supported by the
+ /// 'riscv_rvv_vector_bits' type attribute, which can be applied to a single
+ /// RVV vector or mask.
+ bool isRVVVLSBuiltinType() const;
+
+ /// Returns the representative type for the element of an RVV builtin type.
+ /// This is used to represent fixed-length RVV vectors created with the
+ /// 'riscv_rvv_vector_bits' type attribute as VectorType.
+ QualType getRVVEltType(const ASTContext &Ctx) const;
+
/// Types are partitioned into 3 broad categories (C99 6.2.5p1):
/// object types, function types, and incomplete types.
@@ -1998,6 +2246,7 @@ public:
bool isFloat16Type() const; // C11 extension ISO/IEC TS 18661
bool isBFloat16Type() const;
bool isFloat128Type() const;
+ bool isIbm128Type() const;
bool isRealType() const; // C99 6.2.5p17 (real floating + integer)
bool isArithmeticType() const; // C99 6.2.5p18 (integer + floating)
bool isVoidType() const; // C99 6.2.5p19
@@ -2039,6 +2288,7 @@ public:
bool isComplexIntegerType() const; // GCC _Complex integer type.
bool isVectorType() const; // GCC vector type.
bool isExtVectorType() const; // Extended vector type.
+ bool isExtVectorBoolType() const; // Extended vector type with bool element.
bool isMatrixType() const; // Matrix type.
bool isConstantMatrixType() const; // Constant matrix type.
bool isDependentAddressSpaceType() const; // value-dependent address space qualifier
@@ -2093,7 +2343,8 @@ public:
bool isObjCARCBridgableType() const;
bool isCARCBridgableType() const;
bool isTemplateTypeParmType() const; // C++ template type parameter
- bool isNullPtrType() const; // C++11 std::nullptr_t
+ bool isNullPtrType() const; // C++11 std::nullptr_t or
+ // C23 nullptr_t
bool isNothrowT() const; // C++ std::nothrow_t
bool isAlignValT() const; // C++17 std::align_val_t
bool isStdByteType() const; // C++17 std::byte
@@ -2122,7 +2373,7 @@ public:
bool isOCLExtOpaqueType() const; // Any OpenCL extension type
bool isPipeType() const; // OpenCL pipe type
- bool isExtIntType() const; // Extended Int Type
+ bool isBitIntType() const; // Bit-precise integer type
bool isOpenCLSpecificType() const; // Any OpenCL specific type
/// Determines if this type, which must satisfy
@@ -2337,9 +2588,6 @@ public:
/// removing any typedefs, typeofs, etc., as well as any qualifiers.
const Type *getUnqualifiedDesugaredType() const;
- /// More type predicates useful for type checking/promotion
- bool isPromotableIntegerType() const; // C99 6.3.1.1p2
-
/// Return true if this is an integer type that is
/// signed, according to C99 6.2.5p4 [char, signed char, short, int, long..],
/// or an enum decl which has a signed representation.
@@ -2415,7 +2663,7 @@ public:
/// Note that nullability is only captured as sugar within the type
/// system, not as part of the canonical type, so nullability will
/// be lost by canonicalization and desugaring.
- Optional<NullabilityKind> getNullability(const ASTContext &context) const;
+ std::optional<NullabilityKind> getNullability() const;
/// Determine whether the given type can have a nullability
/// specifier applied to it, i.e., if it is any kind of pointer type.
@@ -2439,7 +2687,7 @@ public:
/// the type parameters of the given declaration context in any type described
/// within that context, or an empty optional to indicate that no
/// substitution is required.
- Optional<ArrayRef<QualType>>
+ std::optional<ArrayRef<QualType>>
getObjCSubstitutions(const DeclContext *dc) const;
/// Determines if this is an ObjC interface type that may accept type
@@ -2460,6 +2708,7 @@ public:
/// This will check for a TypedefType by removing any existing sugar
/// until it reaches a TypedefType or a non-sugared type.
template <> const TypedefType *Type::getAs() const;
+template <> const UsingType *Type::getAs() const;
/// This will check for a TemplateSpecializationType by removing any
/// existing sugar until it reaches a TemplateSpecializationType or a
@@ -2502,6 +2751,9 @@ public:
// RVV Types
#define RVV_TYPE(Name, Id, SingletonId) Id,
#include "clang/Basic/RISCVVTypes.def"
+// WebAssembly reference types
+#define WASM_TYPE(Name, Id, SingletonId) Id,
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
// All other builtin types
#define BUILTIN_TYPE(Id, SingletonId) Id,
#define LAST_BUILTIN_TYPE(Id) LastKind = Id
@@ -2515,6 +2767,10 @@ private:
: Type(Builtin, QualType(),
K == Dependent ? TypeDependence::DependentInstantiation
: TypeDependence::None) {
+ static_assert(Kind::LastKind <
+ (1 << BuiltinTypeBitfields::NumOfBuiltinTypeBits) &&
+ "Defined builtin type exceeds the allocated space for serial "
+ "numbering");
BuiltinTypeBits.Kind = K;
}
@@ -2545,9 +2801,13 @@ public:
}
bool isFloatingPoint() const {
- return getKind() >= Half && getKind() <= Float128;
+ return getKind() >= Half && getKind() <= Ibm128;
}
+ bool isSVEBool() const { return getKind() == Kind::SveBool; }
+
+ bool isSVECount() const { return getKind() == Kind::SveCount; }
+
/// Determines whether the given kind corresponds to a placeholder type.
static bool isPlaceholderTypeKind(Kind K) {
return K >= Overload;
@@ -2873,17 +3133,14 @@ public:
}
};
+/// Capture whether this is a normal array (e.g. int X[4])
+/// an array with a static size (e.g. int X[static 4]), or an array
+/// with a star size (e.g. int X[*]).
+/// 'static' is only allowed on function parameters.
+enum class ArraySizeModifier { Normal, Static, Star };
+
/// Represents an array type, per C99 6.7.5.2 - Array Declarators.
class ArrayType : public Type, public llvm::FoldingSetNode {
-public:
- /// Capture whether this is a normal array (e.g. int X[4])
- /// an array with a static size (e.g. int X[static 4]), or an array
- /// with a star size (e.g. int X[*]).
- /// 'static' is only allowed on function parameters.
- enum ArraySizeModifier {
- Normal, Static, Star
- };
-
private:
/// The element type of the array.
QualType ElementType;
@@ -2958,6 +3215,8 @@ public:
QualType ElementType,
const llvm::APInt &NumElements);
+ unsigned getNumAddressingBits(const ASTContext &Context) const;
+
/// Determine the maximum number of active bits that an array's size
/// can require, which limits the maximum size of the array.
static unsigned getMaxSizeBits(const ASTContext &Context);
@@ -3005,7 +3264,7 @@ public:
static void Profile(llvm::FoldingSetNodeID &ID, QualType ET,
ArraySizeModifier SizeMod, unsigned TypeQuals) {
ID.AddPointer(ET.getAsOpaquePtr());
- ID.AddInteger(SizeMod);
+ ID.AddInteger(llvm::to_underlying(SizeMod));
ID.AddInteger(TypeQuals);
}
};
@@ -3081,8 +3340,6 @@ public:
class DependentSizedArrayType : public ArrayType {
friend class ASTContext; // ASTContext creates these.
- const ASTContext &Context;
-
/// An assignment expression that will instantiate to the
/// size of the array.
///
@@ -3093,8 +3350,8 @@ class DependentSizedArrayType : public ArrayType {
/// The range spanned by the left and right array brackets.
SourceRange Brackets;
- DependentSizedArrayType(const ASTContext &Context, QualType et, QualType can,
- Expr *e, ArraySizeModifier sm, unsigned tq,
+ DependentSizedArrayType(QualType et, QualType can, Expr *e,
+ ArraySizeModifier sm, unsigned tq,
SourceRange brackets);
public:
@@ -3117,7 +3374,7 @@ public:
return T->getTypeClass() == DependentSizedArray;
}
- void Profile(llvm::FoldingSetNodeID &ID) {
+ void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context) {
Profile(ID, Context, getElementType(),
getSizeModifier(), getIndexTypeCVRQualifiers(), getSizeExpr());
}
@@ -3141,14 +3398,12 @@ public:
class DependentAddressSpaceType : public Type, public llvm::FoldingSetNode {
friend class ASTContext;
- const ASTContext &Context;
Expr *AddrSpaceExpr;
QualType PointeeType;
SourceLocation loc;
- DependentAddressSpaceType(const ASTContext &Context, QualType PointeeType,
- QualType can, Expr *AddrSpaceExpr,
- SourceLocation loc);
+ DependentAddressSpaceType(QualType PointeeType, QualType can,
+ Expr *AddrSpaceExpr, SourceLocation loc);
public:
Expr *getAddrSpaceExpr() const { return AddrSpaceExpr; }
@@ -3162,7 +3417,7 @@ public:
return T->getTypeClass() == DependentAddressSpace;
}
- void Profile(llvm::FoldingSetNodeID &ID) {
+ void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context) {
Profile(ID, Context, getPointeeType(), getAddrSpaceExpr());
}
@@ -3183,7 +3438,6 @@ public:
class DependentSizedExtVectorType : public Type, public llvm::FoldingSetNode {
friend class ASTContext;
- const ASTContext &Context;
Expr *SizeExpr;
/// The element type of the array.
@@ -3191,8 +3445,8 @@ class DependentSizedExtVectorType : public Type, public llvm::FoldingSetNode {
SourceLocation loc;
- DependentSizedExtVectorType(const ASTContext &Context, QualType ElementType,
- QualType can, Expr *SizeExpr, SourceLocation loc);
+ DependentSizedExtVectorType(QualType ElementType, QualType can,
+ Expr *SizeExpr, SourceLocation loc);
public:
Expr *getSizeExpr() const { return SizeExpr; }
@@ -3206,7 +3460,7 @@ public:
return T->getTypeClass() == DependentSizedExtVector;
}
- void Profile(llvm::FoldingSetNodeID &ID) {
+ void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context) {
Profile(ID, Context, getElementType(), getSizeExpr());
}
@@ -3214,40 +3468,44 @@ public:
QualType ElementType, Expr *SizeExpr);
};
+enum class VectorKind {
+ /// not a target-specific vector type
+ Generic,
-/// Represents a GCC generic vector type. This type is created using
-/// __attribute__((vector_size(n)), where "n" specifies the vector size in
-/// bytes; or from an Altivec __vector or vector declaration.
-/// Since the constructor takes the number of vector elements, the
-/// client is responsible for converting the size into the number of elements.
-class VectorType : public Type, public llvm::FoldingSetNode {
-public:
- enum VectorKind {
- /// not a target-specific vector type
- GenericVector,
+ /// is AltiVec vector
+ AltiVecVector,
- /// is AltiVec vector
- AltiVecVector,
+ /// is AltiVec 'vector Pixel'
+ AltiVecPixel,
- /// is AltiVec 'vector Pixel'
- AltiVecPixel,
+ /// is AltiVec 'vector bool ...'
+ AltiVecBool,
- /// is AltiVec 'vector bool ...'
- AltiVecBool,
+ /// is ARM Neon vector
+ Neon,
- /// is ARM Neon vector
- NeonVector,
+ /// is ARM Neon polynomial vector
+ NeonPoly,
- /// is ARM Neon polynomial vector
- NeonPolyVector,
+ /// is AArch64 SVE fixed-length data vector
+ SveFixedLengthData,
- /// is AArch64 SVE fixed-length data vector
- SveFixedLengthDataVector,
+ /// is AArch64 SVE fixed-length predicate vector
+ SveFixedLengthPredicate,
- /// is AArch64 SVE fixed-length predicate vector
- SveFixedLengthPredicateVector
- };
+ /// is RISC-V RVV fixed-length data vector
+ RVVFixedLengthData,
+
+ /// is RISC-V RVV fixed-length mask vector
+ RVVFixedLengthMask,
+};
+/// Represents a GCC generic vector type. This type is created using
+/// __attribute__((vector_size(n)), where "n" specifies the vector size in
+/// bytes; or from an Altivec __vector or vector declaration.
+/// Since the constructor takes the number of vector elements, the
+/// client is responsible for converting the size into the number of elements.
+class VectorType : public Type, public llvm::FoldingSetNode {
protected:
friend class ASTContext; // ASTContext creates these.
@@ -3282,7 +3540,7 @@ public:
ID.AddPointer(ElementType.getAsOpaquePtr());
ID.AddInteger(NumElements);
ID.AddInteger(TypeClass);
- ID.AddInteger(VecKind);
+ ID.AddInteger(llvm::to_underlying(VecKind));
}
static bool classof(const Type *T) {
@@ -3302,21 +3560,19 @@ public:
class DependentVectorType : public Type, public llvm::FoldingSetNode {
friend class ASTContext;
- const ASTContext &Context;
QualType ElementType;
Expr *SizeExpr;
SourceLocation Loc;
- DependentVectorType(const ASTContext &Context, QualType ElementType,
- QualType CanonType, Expr *SizeExpr,
- SourceLocation Loc, VectorType::VectorKind vecKind);
+ DependentVectorType(QualType ElementType, QualType CanonType, Expr *SizeExpr,
+ SourceLocation Loc, VectorKind vecKind);
public:
Expr *getSizeExpr() const { return SizeExpr; }
QualType getElementType() const { return ElementType; }
SourceLocation getAttributeLoc() const { return Loc; }
- VectorType::VectorKind getVectorKind() const {
- return VectorType::VectorKind(VectorTypeBits.VecKind);
+ VectorKind getVectorKind() const {
+ return VectorKind(VectorTypeBits.VecKind);
}
bool isSugared() const { return false; }
@@ -3326,13 +3582,13 @@ public:
return T->getTypeClass() == DependentVector;
}
- void Profile(llvm::FoldingSetNodeID &ID) {
+ void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context) {
Profile(ID, Context, getElementType(), getSizeExpr(), getVectorKind());
}
static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
QualType ElementType, const Expr *SizeExpr,
- VectorType::VectorKind VecKind);
+ VectorKind VecKind);
};
/// ExtVectorType - Extended vector type. This type is created using
@@ -3345,7 +3601,8 @@ class ExtVectorType : public VectorType {
friend class ASTContext; // ASTContext creates these.
ExtVectorType(QualType vecType, unsigned nElements, QualType canonType)
- : VectorType(ExtVector, vecType, nElements, canonType, GenericVector) {}
+ : VectorType(ExtVector, vecType, nElements, canonType,
+ VectorKind::Generic) {}
public:
static int getPointAccessorIdx(char c) {
@@ -3427,7 +3684,7 @@ public:
QualType getElementType() const { return ElementType; }
/// Valid elements types are the following:
- /// * an integer type (as in C2x 6.2.5p19), but excluding enumerated types
+ /// * an integer type (as in C23 6.2.5p22), but excluding enumerated types
/// and _Bool
/// * the standard floating types float or double
/// * a half-precision floating point type, if one is supported on the target
@@ -3450,10 +3707,6 @@ class ConstantMatrixType final : public MatrixType {
protected:
friend class ASTContext;
- /// The element type of the matrix.
- // FIXME: Appears to be unused? There is also MatrixType::ElementType...
- QualType ElementType;
-
/// Number of rows and columns.
unsigned NumRows;
unsigned NumColumns;
@@ -3512,30 +3765,24 @@ public:
class DependentSizedMatrixType final : public MatrixType {
friend class ASTContext;
- const ASTContext &Context;
Expr *RowExpr;
Expr *ColumnExpr;
SourceLocation loc;
- DependentSizedMatrixType(const ASTContext &Context, QualType ElementType,
- QualType CanonicalType, Expr *RowExpr,
- Expr *ColumnExpr, SourceLocation loc);
+ DependentSizedMatrixType(QualType ElementType, QualType CanonicalType,
+ Expr *RowExpr, Expr *ColumnExpr, SourceLocation loc);
public:
- QualType getElementType() const { return ElementType; }
Expr *getRowExpr() const { return RowExpr; }
Expr *getColumnExpr() const { return ColumnExpr; }
SourceLocation getAttributeLoc() const { return loc; }
- bool isSugared() const { return false; }
- QualType desugar() const { return QualType(this, 0); }
-
static bool classof(const Type *T) {
return T->getTypeClass() == DependentSizedMatrix;
}
- void Profile(llvm::FoldingSetNodeID &ID) {
+ void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context) {
Profile(ID, Context, getElementType(), getRowExpr(), getColumnExpr());
}
@@ -3787,13 +4034,64 @@ public:
/// A simple holder for various uncommon bits which do not fit in
/// FunctionTypeBitfields. Aligned to alignof(void *) to maintain the
- /// alignment of subsequent objects in TrailingObjects. You must update
- /// hasExtraBitfields in FunctionProtoType after adding extra data here.
+ /// alignment of subsequent objects in TrailingObjects.
struct alignas(void *) FunctionTypeExtraBitfields {
/// The number of types in the exception specification.
/// A whole unsigned is not needed here and according to
/// [implimits] 8 bits would be enough here.
- unsigned NumExceptionType;
+ unsigned NumExceptionType : 10;
+
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned HasArmTypeAttributes : 1;
+
+ FunctionTypeExtraBitfields()
+ : NumExceptionType(0), HasArmTypeAttributes(false) {}
+ };
+
+ /// The AArch64 SME ACLE (Arm C/C++ Language Extensions) define a number
+ /// of function type attributes that can be set on function types, including
+ /// function pointers.
+ enum AArch64SMETypeAttributes : unsigned {
+ SME_NormalFunction = 0,
+ SME_PStateSMEnabledMask = 1 << 0,
+ SME_PStateSMCompatibleMask = 1 << 1,
+
+ // Describes the value of the state using ArmStateValue.
+ SME_ZAShift = 2,
+ SME_ZAMask = 0b111 << SME_ZAShift,
+ SME_ZT0Shift = 5,
+ SME_ZT0Mask = 0b111 << SME_ZT0Shift,
+
+ SME_AttributeMask =
+ 0b111'111'11 // We can't support more than 8 bits because of
+ // the bitmask in FunctionTypeExtraBitfields.
+ };
+
+ enum ArmStateValue : unsigned {
+ ARM_None = 0,
+ ARM_Preserves = 1,
+ ARM_In = 2,
+ ARM_Out = 3,
+ ARM_InOut = 4,
+ };
+
+ static ArmStateValue getArmZAState(unsigned AttrBits) {
+ return (ArmStateValue)((AttrBits & SME_ZAMask) >> SME_ZAShift);
+ }
+
+ static ArmStateValue getArmZT0State(unsigned AttrBits) {
+ return (ArmStateValue)((AttrBits & SME_ZT0Mask) >> SME_ZT0Shift);
+ }
+
+ /// A holder for Arm type attributes as described in the Arm C/C++
+ /// Language extensions which are not particularly common to all
+ /// types and therefore accounted separately from FunctionTypeBitfields.
+ struct alignas(void *) FunctionTypeArmAttributes {
+ /// Any AArch64 SME ACLE type attributes that need to be propagated
+ /// on declarations and function pointers.
+ unsigned AArch64SMEAttributes : 8;
+
+ FunctionTypeArmAttributes() : AArch64SMEAttributes(SME_NormalFunction) {}
};
protected:
@@ -3804,7 +4102,10 @@ protected:
}
Qualifiers getFastTypeQuals() const {
- return Qualifiers::fromFastMask(FunctionTypeBits.FastTypeQuals);
+ if (isFunctionProtoType())
+ return Qualifiers::fromFastMask(FunctionTypeBits.FastTypeQuals);
+
+ return Qualifiers();
}
public:
@@ -3889,7 +4190,8 @@ class FunctionProtoType final
public llvm::FoldingSetNode,
private llvm::TrailingObjects<
FunctionProtoType, QualType, SourceLocation,
- FunctionType::FunctionTypeExtraBitfields, FunctionType::ExceptionType,
+ FunctionType::FunctionTypeExtraBitfields,
+ FunctionType::FunctionTypeArmAttributes, FunctionType::ExceptionType,
Expr *, FunctionDecl *, FunctionType::ExtParameterInfo, Qualifiers> {
friend class ASTContext; // ASTContext creates these.
friend TrailingObjects;
@@ -3962,6 +4264,8 @@ public:
ExceptionSpecInfo() = default;
ExceptionSpecInfo(ExceptionSpecificationType EST) : Type(EST) {}
+
+ void instantiate();
};
/// Extra information about a function prototype. ExtProtoInfo is not
@@ -3969,24 +4273,44 @@ public:
/// the various bits of extra information about a function prototype.
struct ExtProtoInfo {
FunctionType::ExtInfo ExtInfo;
- bool Variadic : 1;
- bool HasTrailingReturn : 1;
+ unsigned Variadic : 1;
+ unsigned HasTrailingReturn : 1;
+ unsigned AArch64SMEAttributes : 8;
Qualifiers TypeQuals;
RefQualifierKind RefQualifier = RQ_None;
ExceptionSpecInfo ExceptionSpec;
const ExtParameterInfo *ExtParameterInfos = nullptr;
SourceLocation EllipsisLoc;
- ExtProtoInfo() : Variadic(false), HasTrailingReturn(false) {}
+ ExtProtoInfo()
+ : Variadic(false), HasTrailingReturn(false),
+ AArch64SMEAttributes(SME_NormalFunction) {}
ExtProtoInfo(CallingConv CC)
- : ExtInfo(CC), Variadic(false), HasTrailingReturn(false) {}
+ : ExtInfo(CC), Variadic(false), HasTrailingReturn(false),
+ AArch64SMEAttributes(SME_NormalFunction) {}
ExtProtoInfo withExceptionSpec(const ExceptionSpecInfo &ESI) {
ExtProtoInfo Result(*this);
Result.ExceptionSpec = ESI;
return Result;
}
+
+ bool requiresFunctionProtoTypeExtraBitfields() const {
+ return ExceptionSpec.Type == EST_Dynamic ||
+ requiresFunctionProtoTypeArmAttributes();
+ }
+
+ bool requiresFunctionProtoTypeArmAttributes() const {
+ return AArch64SMEAttributes != SME_NormalFunction;
+ }
+
+ void setArmSMEAttribute(AArch64SMETypeAttributes Kind, bool Enable = true) {
+ if (Enable)
+ AArch64SMEAttributes |= Kind;
+ else
+ AArch64SMEAttributes &= ~Kind;
+ }
};
private:
@@ -3998,6 +4322,10 @@ private:
return isVariadic();
}
+ unsigned numTrailingObjects(OverloadToken<FunctionTypeArmAttributes>) const {
+ return hasArmTypeAttributes();
+ }
+
unsigned numTrailingObjects(OverloadToken<FunctionTypeExtraBitfields>) const {
return hasExtraBitfields();
}
@@ -4078,15 +4406,18 @@ private:
}
/// Whether the trailing FunctionTypeExtraBitfields is present.
- static bool hasExtraBitfields(ExceptionSpecificationType EST) {
- // If the exception spec type is EST_Dynamic then we have > 0 exception
- // types and the exact number is stored in FunctionTypeExtraBitfields.
- return EST == EST_Dynamic;
+ bool hasExtraBitfields() const {
+ assert((getExceptionSpecType() != EST_Dynamic ||
+ FunctionTypeBits.HasExtraBitfields) &&
+ "ExtraBitfields are required for given ExceptionSpecType");
+ return FunctionTypeBits.HasExtraBitfields;
+
}
- /// Whether the trailing FunctionTypeExtraBitfields is present.
- bool hasExtraBitfields() const {
- return hasExtraBitfields(getExceptionSpecType());
+ bool hasArmTypeAttributes() const {
+ return FunctionTypeBits.HasExtraBitfields &&
+ getTrailingObjects<FunctionTypeExtraBitfields>()
+ ->HasArmTypeAttributes;
}
bool hasExtQualifiers() const {
@@ -4102,7 +4433,7 @@ public:
}
ArrayRef<QualType> getParamTypes() const {
- return llvm::makeArrayRef(param_type_begin(), param_type_end());
+ return llvm::ArrayRef(param_type_begin(), param_type_end());
}
ExtProtoInfo getExtProtoInfo() const {
@@ -4115,6 +4446,7 @@ public:
EPI.TypeQuals = getMethodQuals();
EPI.RefQualifier = getRefQualifier();
EPI.ExtParameterInfos = getExtParameterInfosOrNull();
+ EPI.AArch64SMEAttributes = getAArch64SMEAttributes();
return EPI;
}
@@ -4247,10 +4579,9 @@ public:
}
using param_type_iterator = const QualType *;
- using param_type_range = llvm::iterator_range<param_type_iterator>;
- param_type_range param_types() const {
- return param_type_range(param_type_begin(), param_type_end());
+ ArrayRef<QualType> param_types() const {
+ return llvm::ArrayRef(param_type_begin(), param_type_end());
}
param_type_iterator param_type_begin() const {
@@ -4264,7 +4595,7 @@ public:
using exception_iterator = const QualType *;
ArrayRef<QualType> exceptions() const {
- return llvm::makeArrayRef(exception_begin(), exception_end());
+ return llvm::ArrayRef(exception_begin(), exception_end());
}
exception_iterator exception_begin() const {
@@ -4297,6 +4628,15 @@ public:
return getTrailingObjects<ExtParameterInfo>();
}
+ /// Return a bitmask describing the SME attributes on the function type, see
+ /// AArch64SMETypeAttributes for their values.
+ unsigned getAArch64SMEAttributes() const {
+ if (!hasArmTypeAttributes())
+ return SME_NormalFunction;
+ return getTrailingObjects<FunctionTypeArmAttributes>()
+ ->AArch64SMEAttributes;
+ }
+
ExtParameterInfo getExtParameterInfo(unsigned I) const {
assert(I < getNumParams() && "parameter index out of range");
if (hasExtParameterInfos())
@@ -4370,11 +4710,45 @@ public:
}
};
-class TypedefType : public Type {
- TypedefNameDecl *Decl;
+class UsingType final : public Type,
+ public llvm::FoldingSetNode,
+ private llvm::TrailingObjects<UsingType, QualType> {
+ UsingShadowDecl *Found;
+ friend class ASTContext; // ASTContext creates these.
+ friend TrailingObjects;
-private:
+ UsingType(const UsingShadowDecl *Found, QualType Underlying, QualType Canon);
+
+public:
+ UsingShadowDecl *getFoundDecl() const { return Found; }
+ QualType getUnderlyingType() const;
+
+ bool isSugared() const { return true; }
+
+ // This always has the 'same' type as declared, but not necessarily identical.
+ QualType desugar() const { return getUnderlyingType(); }
+
+ // Internal helper, for debugging purposes.
+ bool typeMatchesDecl() const { return !UsingBits.hasTypeDifferentFromDecl; }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, Found, typeMatchesDecl() ? QualType() : getUnderlyingType());
+ }
+ static void Profile(llvm::FoldingSetNodeID &ID, const UsingShadowDecl *Found,
+ QualType Underlying) {
+ ID.AddPointer(Found);
+ if (!Underlying.isNull())
+ Underlying.Profile(ID);
+ }
+ static bool classof(const Type *T) { return T->getTypeClass() == Using; }
+};
+
+class TypedefType final : public Type,
+ public llvm::FoldingSetNode,
+ private llvm::TrailingObjects<TypedefType, QualType> {
+ TypedefNameDecl *Decl;
friend class ASTContext; // ASTContext creates these.
+ friend TrailingObjects;
TypedefType(TypeClass tc, const TypedefNameDecl *D, QualType underlying,
QualType can);
@@ -4383,8 +4757,23 @@ public:
TypedefNameDecl *getDecl() const { return Decl; }
bool isSugared() const { return true; }
+
+ // This always has the 'same' type as declared, but not necessarily identical.
QualType desugar() const;
+ // Internal helper, for debugging purposes.
+ bool typeMatchesDecl() const { return !TypedefBits.hasTypeDifferentFromDecl; }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, Decl, typeMatchesDecl() ? QualType() : desugar());
+ }
+ static void Profile(llvm::FoldingSetNodeID &ID, const TypedefNameDecl *Decl,
+ QualType Underlying) {
+ ID.AddPointer(Decl);
+ if (!Underlying.isNull())
+ Underlying.Profile(ID);
+ }
+
static bool classof(const Type *T) { return T->getTypeClass() == Typedef; }
};
@@ -4420,18 +4809,25 @@ public:
}
};
-/// Represents a `typeof` (or __typeof__) expression (a GCC extension).
+/// Represents a `typeof` (or __typeof__) expression (a C23 feature and GCC
+/// extension) or a `typeof_unqual` expression (a C23 feature).
class TypeOfExprType : public Type {
Expr *TOExpr;
protected:
friend class ASTContext; // ASTContext creates these.
- TypeOfExprType(Expr *E, QualType can = QualType());
+ TypeOfExprType(Expr *E, TypeOfKind Kind, QualType Can = QualType());
public:
Expr *getUnderlyingExpr() const { return TOExpr; }
+ /// Returns the kind of 'typeof' type this is.
+ TypeOfKind getKind() const {
+ return TypeOfBits.IsUnqual ? TypeOfKind::Unqualified
+ : TypeOfKind::Qualified;
+ }
+
/// Remove a single level of sugar.
QualType desugar() const;
@@ -4447,42 +4843,54 @@ public:
/// This class is used internally by the ASTContext to manage
/// canonical, dependent types, only. Clients will only see instances
/// of this class via TypeOfExprType nodes.
-class DependentTypeOfExprType
- : public TypeOfExprType, public llvm::FoldingSetNode {
- const ASTContext &Context;
-
+class DependentTypeOfExprType : public TypeOfExprType,
+ public llvm::FoldingSetNode {
public:
- DependentTypeOfExprType(const ASTContext &Context, Expr *E)
- : TypeOfExprType(E), Context(Context) {}
+ DependentTypeOfExprType(Expr *E, TypeOfKind Kind) : TypeOfExprType(E, Kind) {}
- void Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, Context, getUnderlyingExpr());
+ void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context) {
+ Profile(ID, Context, getUnderlyingExpr(),
+ getKind() == TypeOfKind::Unqualified);
}
static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
- Expr *E);
+ Expr *E, bool IsUnqual);
};
-/// Represents `typeof(type)`, a GCC extension.
+/// Represents `typeof(type)`, a C23 feature and GCC extension, or
+/// `typeof_unqual(type), a C23 feature.
class TypeOfType : public Type {
friend class ASTContext; // ASTContext creates these.
QualType TOType;
- TypeOfType(QualType T, QualType can)
- : Type(TypeOf, can, T->getDependence()), TOType(T) {
- assert(!isa<TypedefType>(can) && "Invalid canonical type");
+ TypeOfType(QualType T, QualType Can, TypeOfKind Kind)
+ : Type(TypeOf,
+ Kind == TypeOfKind::Unqualified ? Can.getAtomicUnqualifiedType()
+ : Can,
+ T->getDependence()),
+ TOType(T) {
+ TypeOfBits.IsUnqual = Kind == TypeOfKind::Unqualified;
}
public:
- QualType getUnderlyingType() const { return TOType; }
+ QualType getUnmodifiedType() const { return TOType; }
/// Remove a single level of sugar.
- QualType desugar() const { return getUnderlyingType(); }
+ QualType desugar() const {
+ QualType QT = getUnmodifiedType();
+ return TypeOfBits.IsUnqual ? QT.getAtomicUnqualifiedType() : QT;
+ }
/// Returns whether this type directly provides sugar.
bool isSugared() const { return true; }
+ /// Returns the kind of 'typeof' type this is.
+ TypeOfKind getKind() const {
+ return TypeOfBits.IsUnqual ? TypeOfKind::Unqualified
+ : TypeOfKind::Qualified;
+ }
+
static bool classof(const Type *T) { return T->getTypeClass() == TypeOf; }
};
@@ -4516,12 +4924,10 @@ public:
/// canonical, dependent types, only. Clients will only see instances
/// of this class via DecltypeType nodes.
class DependentDecltypeType : public DecltypeType, public llvm::FoldingSetNode {
- const ASTContext &Context;
-
public:
- DependentDecltypeType(const ASTContext &Context, Expr *E);
+ DependentDecltypeType(Expr *E, QualType UnderlyingTpe);
- void Profile(llvm::FoldingSetNodeID &ID) {
+ void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context) {
Profile(ID, Context, getUnderlyingExpr());
}
@@ -4533,7 +4939,8 @@ public:
class UnaryTransformType : public Type {
public:
enum UTTKind {
- EnumUnderlyingType
+#define TRANSFORM_TYPE_TRAIT_DEF(Enum, _) Enum,
+#include "clang/Basic/TransformTypeTraits.def"
};
private:
@@ -4714,9 +5121,11 @@ public:
bool isMSTypeSpec() const;
+ bool isWebAssemblyFuncrefSpec() const;
+
bool isCallingConv() const;
- llvm::Optional<NullabilityKind> getImmediateNullability() const;
+ std::optional<NullabilityKind> getImmediateNullability() const;
/// Retrieve the attribute kind corresponding to the given
/// nullability kind.
@@ -4746,7 +5155,7 @@ public:
/// to the underlying modified type.
///
/// \returns the top-level nullability, if present.
- static Optional<NullabilityKind> stripOuterNullability(QualType &T);
+ static std::optional<NullabilityKind> stripOuterNullability(QualType &T);
void Profile(llvm::FoldingSetNodeID &ID) {
Profile(ID, getAttrKind(), ModifiedType, EquivalentType);
@@ -4764,6 +5173,40 @@ public:
}
};
+class BTFTagAttributedType : public Type, public llvm::FoldingSetNode {
+private:
+ friend class ASTContext; // ASTContext creates these
+
+ QualType WrappedType;
+ const BTFTypeTagAttr *BTFAttr;
+
+ BTFTagAttributedType(QualType Canon, QualType Wrapped,
+ const BTFTypeTagAttr *BTFAttr)
+ : Type(BTFTagAttributed, Canon, Wrapped->getDependence()),
+ WrappedType(Wrapped), BTFAttr(BTFAttr) {}
+
+public:
+ QualType getWrappedType() const { return WrappedType; }
+ const BTFTypeTagAttr *getAttr() const { return BTFAttr; }
+
+ bool isSugared() const { return true; }
+ QualType desugar() const { return getWrappedType(); }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, WrappedType, BTFAttr);
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, QualType Wrapped,
+ const BTFTypeTagAttr *BTFAttr) {
+ ID.AddPointer(Wrapped.getAsOpaquePtr());
+ ID.AddPointer(BTFAttr);
+ }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == BTFTagAttributed;
+ }
+};
+
class TemplateTypeParmType : public Type, public llvm::FoldingSetNode {
friend class ASTContext; // ASTContext creates these
@@ -4843,40 +5286,60 @@ public:
/// been replaced with these. They are used solely to record that a
/// type was originally written as a template type parameter;
/// therefore they are never canonical.
-class SubstTemplateTypeParmType : public Type, public llvm::FoldingSetNode {
+class SubstTemplateTypeParmType final
+ : public Type,
+ public llvm::FoldingSetNode,
+ private llvm::TrailingObjects<SubstTemplateTypeParmType, QualType> {
friend class ASTContext;
+ friend class llvm::TrailingObjects<SubstTemplateTypeParmType, QualType>;
- // The original type parameter.
- const TemplateTypeParmType *Replaced;
+ Decl *AssociatedDecl;
- SubstTemplateTypeParmType(const TemplateTypeParmType *Param, QualType Canon)
- : Type(SubstTemplateTypeParm, Canon, Canon->getDependence()),
- Replaced(Param) {}
+ SubstTemplateTypeParmType(QualType Replacement, Decl *AssociatedDecl,
+ unsigned Index, std::optional<unsigned> PackIndex);
public:
- /// Gets the template parameter that was substituted for.
- const TemplateTypeParmType *getReplacedParameter() const {
- return Replaced;
- }
-
/// Gets the type that was substituted for the template
/// parameter.
QualType getReplacementType() const {
- return getCanonicalTypeInternal();
+ return SubstTemplateTypeParmTypeBits.HasNonCanonicalUnderlyingType
+ ? *getTrailingObjects<QualType>()
+ : getCanonicalTypeInternal();
+ }
+
+ /// A template-like entity which owns the whole pattern being substituted.
+ /// This will usually own a set of template parameters, or in some
+ /// cases might even be a template parameter itself.
+ Decl *getAssociatedDecl() const { return AssociatedDecl; }
+
+ /// Gets the template parameter declaration that was substituted for.
+ const TemplateTypeParmDecl *getReplacedParameter() const;
+
+ /// Returns the index of the replaced parameter in the associated declaration.
+ /// This should match the result of `getReplacedParameter()->getIndex()`.
+ unsigned getIndex() const { return SubstTemplateTypeParmTypeBits.Index; }
+
+ std::optional<unsigned> getPackIndex() const {
+ if (SubstTemplateTypeParmTypeBits.PackIndex == 0)
+ return std::nullopt;
+ return SubstTemplateTypeParmTypeBits.PackIndex - 1;
}
bool isSugared() const { return true; }
QualType desugar() const { return getReplacementType(); }
void Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, getReplacedParameter(), getReplacementType());
+ Profile(ID, getReplacementType(), getAssociatedDecl(), getIndex(),
+ getPackIndex());
}
- static void Profile(llvm::FoldingSetNodeID &ID,
- const TemplateTypeParmType *Replaced,
- QualType Replacement) {
- ID.AddPointer(Replaced);
- ID.AddPointer(Replacement.getAsOpaquePtr());
+ static void Profile(llvm::FoldingSetNodeID &ID, QualType Replacement,
+ const Decl *AssociatedDecl, unsigned Index,
+ std::optional<unsigned> PackIndex) {
+ Replacement.Profile(ID);
+ ID.AddPointer(AssociatedDecl);
+ ID.AddInteger(Index);
+ ID.AddInteger(PackIndex ? *PackIndex - 1 : 0);
}
static bool classof(const Type *T) {
@@ -4899,24 +5362,33 @@ public:
class SubstTemplateTypeParmPackType : public Type, public llvm::FoldingSetNode {
friend class ASTContext;
- /// The original type parameter.
- const TemplateTypeParmType *Replaced;
-
/// A pointer to the set of template arguments that this
/// parameter pack is instantiated with.
const TemplateArgument *Arguments;
- SubstTemplateTypeParmPackType(const TemplateTypeParmType *Param,
- QualType Canon,
+ llvm::PointerIntPair<Decl *, 1, bool> AssociatedDeclAndFinal;
+
+ SubstTemplateTypeParmPackType(QualType Canon, Decl *AssociatedDecl,
+ unsigned Index, bool Final,
const TemplateArgument &ArgPack);
public:
- IdentifierInfo *getIdentifier() const { return Replaced->getIdentifier(); }
+ IdentifierInfo *getIdentifier() const;
- /// Gets the template parameter that was substituted for.
- const TemplateTypeParmType *getReplacedParameter() const {
- return Replaced;
- }
+ /// A template-like entity which owns the whole pattern being substituted.
+ /// This will usually own a set of template parameters, or in some
+ /// cases might even be a template parameter itself.
+ Decl *getAssociatedDecl() const;
+
+ /// Gets the template parameter declaration that was substituted for.
+ const TemplateTypeParmDecl *getReplacedParameter() const;
+
+ /// Returns the index of the replaced parameter in the associated declaration.
+ /// This should match the result of `getReplacedParameter()->getIndex()`.
+ unsigned getIndex() const { return SubstTemplateTypeParmPackTypeBits.Index; }
+
+ // When true the substitution will be 'Final' (subst node won't be placed).
+ bool getFinal() const;
unsigned getNumArgs() const {
return SubstTemplateTypeParmPackTypeBits.NumArgs;
@@ -4928,8 +5400,8 @@ public:
TemplateArgument getArgumentPack() const;
void Profile(llvm::FoldingSetNodeID &ID);
- static void Profile(llvm::FoldingSetNodeID &ID,
- const TemplateTypeParmType *Replaced,
+ static void Profile(llvm::FoldingSetNodeID &ID, const Decl *AssociatedDecl,
+ unsigned Index, bool Final,
const TemplateArgument &ArgPack);
static bool classof(const Type *T) {
@@ -4946,29 +5418,29 @@ public:
/// type-dependent, there is no deduced type and the type is canonical. In
/// the latter case, it is also a dependent type.
class DeducedType : public Type {
+ QualType DeducedAsType;
+
protected:
DeducedType(TypeClass TC, QualType DeducedAsType,
- TypeDependence ExtraDependence)
- : Type(TC,
- // FIXME: Retain the sugared deduced type?
- DeducedAsType.isNull() ? QualType(this, 0)
- : DeducedAsType.getCanonicalType(),
+ TypeDependence ExtraDependence, QualType Canon)
+ : Type(TC, Canon,
ExtraDependence | (DeducedAsType.isNull()
? TypeDependence::None
: DeducedAsType->getDependence() &
- ~TypeDependence::VariablyModified)) {}
+ ~TypeDependence::VariablyModified)),
+ DeducedAsType(DeducedAsType) {}
public:
- bool isSugared() const { return !isCanonicalUnqualified(); }
- QualType desugar() const { return getCanonicalTypeInternal(); }
-
- /// Get the type deduced for this placeholder type, or null if it's
- /// either not been deduced or was deduced to a dependent type.
- QualType getDeducedType() const {
- return !isCanonicalUnqualified() ? getCanonicalTypeInternal() : QualType();
+ bool isSugared() const { return !DeducedAsType.isNull(); }
+ QualType desugar() const {
+ return isSugared() ? DeducedAsType : QualType(this, 0);
}
+
+ /// Get the type deduced for this placeholder type, or null if it
+ /// has not been deduced.
+ QualType getDeducedType() const { return DeducedAsType; }
bool isDeduced() const {
- return !isCanonicalUnqualified() || isDependentType();
+ return !DeducedAsType.isNull() || isDependentType();
}
static bool classof(const Type *T) {
@@ -4979,38 +5451,19 @@ public:
/// Represents a C++11 auto or C++14 decltype(auto) type, possibly constrained
/// by a type-constraint.
-class alignas(8) AutoType : public DeducedType, public llvm::FoldingSetNode {
+class AutoType : public DeducedType, public llvm::FoldingSetNode {
friend class ASTContext; // ASTContext creates these
ConceptDecl *TypeConstraintConcept;
AutoType(QualType DeducedAsType, AutoTypeKeyword Keyword,
- TypeDependence ExtraDependence, ConceptDecl *CD,
+ TypeDependence ExtraDependence, QualType Canon, ConceptDecl *CD,
ArrayRef<TemplateArgument> TypeConstraintArgs);
- const TemplateArgument *getArgBuffer() const {
- return reinterpret_cast<const TemplateArgument*>(this+1);
- }
-
- TemplateArgument *getArgBuffer() {
- return reinterpret_cast<TemplateArgument*>(this+1);
- }
-
public:
- /// Retrieve the template arguments.
- const TemplateArgument *getArgs() const {
- return getArgBuffer();
- }
-
- /// Retrieve the number of template arguments.
- unsigned getNumArgs() const {
- return AutoTypeBits.NumArgs;
- }
-
- const TemplateArgument &getArg(unsigned Idx) const; // in TemplateBase.h
-
ArrayRef<TemplateArgument> getTypeConstraintArguments() const {
- return {getArgs(), getNumArgs()};
+ return {reinterpret_cast<const TemplateArgument *>(this + 1),
+ AutoTypeBits.NumArgs};
}
ConceptDecl *getTypeConstraintConcept() const {
@@ -5025,15 +5478,15 @@ public:
return getKeyword() == AutoTypeKeyword::DecltypeAuto;
}
- AutoTypeKeyword getKeyword() const {
- return (AutoTypeKeyword)AutoTypeBits.Keyword;
+ bool isGNUAutoType() const {
+ return getKeyword() == AutoTypeKeyword::GNUAutoType;
}
- void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context) {
- Profile(ID, Context, getDeducedType(), getKeyword(), isDependentType(),
- getTypeConstraintConcept(), getTypeConstraintArguments());
+ AutoTypeKeyword getKeyword() const {
+ return (AutoTypeKeyword)AutoTypeBits.Keyword;
}
+ void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context);
static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
QualType Deduced, AutoTypeKeyword Keyword,
bool IsDependent, ConceptDecl *CD,
@@ -5059,7 +5512,9 @@ class DeducedTemplateSpecializationType : public DeducedType,
toTypeDependence(Template.getDependence()) |
(IsDeducedAsDependent
? TypeDependence::DependentInstantiation
- : TypeDependence::None)),
+ : TypeDependence::None),
+ DeducedAsType.isNull() ? QualType(this, 0)
+ : DeducedAsType.getCanonicalType()),
Template(Template) {}
public:
@@ -5073,8 +5528,10 @@ public:
static void Profile(llvm::FoldingSetNodeID &ID, TemplateName Template,
QualType Deduced, bool IsDependent) {
Template.Profile(ID);
- ID.AddPointer(Deduced.getAsOpaquePtr());
- ID.AddBoolean(IsDependent);
+ QualType CanonicalType =
+ Deduced.isNull() ? Deduced : Deduced.getCanonicalType();
+ ID.AddPointer(CanonicalType.getAsOpaquePtr());
+ ID.AddBoolean(IsDependent || Template.isDependent());
}
static bool classof(const Type *T) {
@@ -5102,9 +5559,7 @@ public:
/// TemplateArguments, followed by a QualType representing the
/// non-canonical aliased type when the template is a type alias
/// template.
-class alignas(8) TemplateSpecializationType
- : public Type,
- public llvm::FoldingSetNode {
+class TemplateSpecializationType : public Type, public llvm::FoldingSetNode {
friend class ASTContext; // ASTContext creates these
/// The name of the template being specialized. This is
@@ -5167,35 +5622,14 @@ public:
/// Get the aliased type, if this is a specialization of a type alias
/// template.
- QualType getAliasedType() const {
- assert(isTypeAlias() && "not a type alias template specialization");
- return *reinterpret_cast<const QualType*>(end());
- }
-
- using iterator = const TemplateArgument *;
-
- iterator begin() const { return getArgs(); }
- iterator end() const; // defined inline in TemplateBase.h
+ QualType getAliasedType() const;
/// Retrieve the name of the template that we are specializing.
TemplateName getTemplateName() const { return Template; }
- /// Retrieve the template arguments.
- const TemplateArgument *getArgs() const {
- return reinterpret_cast<const TemplateArgument *>(this + 1);
- }
-
- /// Retrieve the number of template arguments.
- unsigned getNumArgs() const {
- return TemplateSpecializationTypeBits.NumArgs;
- }
-
- /// Retrieve a specific template argument as a type.
- /// \pre \c isArgType(Arg)
- const TemplateArgument &getArg(unsigned Idx) const; // in TemplateBase.h
-
ArrayRef<TemplateArgument> template_arguments() const {
- return {getArgs(), getNumArgs()};
+ return {reinterpret_cast<const TemplateArgument *>(this + 1),
+ TemplateSpecializationTypeBits.NumArgs};
}
bool isSugared() const {
@@ -5206,12 +5640,7 @@ public:
return isTypeAlias() ? getAliasedType() : getCanonicalTypeInternal();
}
- void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Ctx) {
- Profile(ID, Template, template_arguments(), Ctx);
- if (isTypeAlias())
- getAliasedType().Profile(ID);
- }
-
+ void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Ctx);
static void Profile(llvm::FoldingSetNodeID &ID, TemplateName T,
ArrayRef<TemplateArgument> Args,
const ASTContext &Context);
@@ -5238,6 +5667,13 @@ void printTemplateArgumentList(raw_ostream &OS,
const PrintingPolicy &Policy,
const TemplateParameterList *TPL = nullptr);
+/// Make a best-effort determination of whether the type T can be produced by
+/// substituting Args into the default argument of Param.
+bool isSubstitutedDefaultArgument(ASTContext &Ctx, TemplateArgument Arg,
+ const NamedDecl *Param,
+ ArrayRef<TemplateArgument> Args,
+ unsigned Depth);
+
/// The injected class name of a C++ class template or class
/// template partial specialization. Used to record that a type was
/// spelled with a bare identifier rather than as a template-id; the
@@ -5306,48 +5742,48 @@ public:
}
};
-/// The kind of a tag type.
-enum TagTypeKind {
- /// The "struct" keyword.
- TTK_Struct,
-
- /// The "__interface" keyword.
- TTK_Interface,
-
- /// The "union" keyword.
- TTK_Union,
-
- /// The "class" keyword.
- TTK_Class,
-
- /// The "enum" keyword.
- TTK_Enum
-};
-
/// The elaboration keyword that precedes a qualified type name or
/// introduces an elaborated-type-specifier.
-enum ElaboratedTypeKeyword {
+enum class ElaboratedTypeKeyword {
/// The "struct" keyword introduces the elaborated-type-specifier.
- ETK_Struct,
+ Struct,
/// The "__interface" keyword introduces the elaborated-type-specifier.
- ETK_Interface,
+ Interface,
/// The "union" keyword introduces the elaborated-type-specifier.
- ETK_Union,
+ Union,
/// The "class" keyword introduces the elaborated-type-specifier.
- ETK_Class,
+ Class,
/// The "enum" keyword introduces the elaborated-type-specifier.
- ETK_Enum,
+ Enum,
/// The "typename" keyword precedes the qualified type name, e.g.,
/// \c typename T::type.
- ETK_Typename,
+ Typename,
/// No keyword precedes the qualified type name.
- ETK_None
+ None
+};
+
+/// The kind of a tag type.
+enum class TagTypeKind {
+ /// The "struct" keyword.
+ Struct,
+
+ /// The "__interface" keyword.
+ Interface,
+
+ /// The "union" keyword.
+ Union,
+
+ /// The "class" keyword.
+ Class,
+
+ /// The "enum" keyword.
+ Enum
};
/// A helper class for Type nodes having an ElaboratedTypeKeyword.
@@ -5359,7 +5795,7 @@ protected:
TypeWithKeyword(ElaboratedTypeKeyword Keyword, TypeClass tc,
QualType Canonical, TypeDependence Dependence)
: Type(tc, Canonical, Dependence) {
- TypeWithKeywordBits.Keyword = Keyword;
+ TypeWithKeywordBits.Keyword = llvm::to_underlying(Keyword);
}
public:
@@ -5436,9 +5872,6 @@ class ElaboratedType final
ElaboratedTypeBits.HasOwnedTagDecl = true;
*getTrailingObjects<TagDecl *>() = OwnedTagDecl;
}
- assert(!(Keyword == ETK_None && NNS == nullptr) &&
- "ElaboratedType cannot have elaborated type keyword "
- "and name qualifier both null.");
}
public:
@@ -5468,7 +5901,7 @@ public:
static void Profile(llvm::FoldingSetNodeID &ID, ElaboratedTypeKeyword Keyword,
NestedNameSpecifier *NNS, QualType NamedType,
TagDecl *OwnedTagDecl) {
- ID.AddInteger(Keyword);
+ ID.AddInteger(llvm::to_underlying(Keyword));
ID.AddPointer(NNS);
NamedType.Profile(ID);
ID.AddPointer(OwnedTagDecl);
@@ -5527,7 +5960,7 @@ public:
static void Profile(llvm::FoldingSetNodeID &ID, ElaboratedTypeKeyword Keyword,
NestedNameSpecifier *NNS, const IdentifierInfo *Name) {
- ID.AddInteger(Keyword);
+ ID.AddInteger(llvm::to_underlying(Keyword));
ID.AddPointer(NNS);
ID.AddPointer(Name);
}
@@ -5540,9 +5973,8 @@ public:
/// Represents a template specialization type whose template cannot be
/// resolved, e.g.
/// A<T>::template B<T>
-class alignas(8) DependentTemplateSpecializationType
- : public TypeWithKeyword,
- public llvm::FoldingSetNode {
+class DependentTemplateSpecializationType : public TypeWithKeyword,
+ public llvm::FoldingSetNode {
friend class ASTContext; // ASTContext creates these
/// The nested name specifier containing the qualifier.
@@ -5557,44 +5989,20 @@ class alignas(8) DependentTemplateSpecializationType
ArrayRef<TemplateArgument> Args,
QualType Canon);
- const TemplateArgument *getArgBuffer() const {
- return reinterpret_cast<const TemplateArgument*>(this+1);
- }
-
- TemplateArgument *getArgBuffer() {
- return reinterpret_cast<TemplateArgument*>(this+1);
- }
-
public:
NestedNameSpecifier *getQualifier() const { return NNS; }
const IdentifierInfo *getIdentifier() const { return Name; }
- /// Retrieve the template arguments.
- const TemplateArgument *getArgs() const {
- return getArgBuffer();
- }
-
- /// Retrieve the number of template arguments.
- unsigned getNumArgs() const {
- return DependentTemplateSpecializationTypeBits.NumArgs;
- }
-
- const TemplateArgument &getArg(unsigned Idx) const; // in TemplateBase.h
-
ArrayRef<TemplateArgument> template_arguments() const {
- return {getArgs(), getNumArgs()};
+ return {reinterpret_cast<const TemplateArgument *>(this + 1),
+ DependentTemplateSpecializationTypeBits.NumArgs};
}
- using iterator = const TemplateArgument *;
-
- iterator begin() const { return getArgs(); }
- iterator end() const; // inline in TemplateBase.h
-
bool isSugared() const { return false; }
QualType desugar() const { return QualType(this, 0); }
void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context) {
- Profile(ID, Context, getKeyword(), NNS, Name, {getArgs(), getNumArgs()});
+ Profile(ID, Context, getKeyword(), NNS, Name, template_arguments());
}
static void Profile(llvm::FoldingSetNodeID &ID,
@@ -5638,7 +6046,7 @@ class PackExpansionType : public Type, public llvm::FoldingSetNode {
QualType Pattern;
PackExpansionType(QualType Pattern, QualType Canon,
- Optional<unsigned> NumExpansions)
+ std::optional<unsigned> NumExpansions)
: Type(PackExpansion, Canon,
(Pattern->getDependence() | TypeDependence::Dependent |
TypeDependence::Instantiation) &
@@ -5656,10 +6064,10 @@ public:
/// Retrieve the number of expansions that this pack expansion will
/// generate, if known.
- Optional<unsigned> getNumExpansions() const {
+ std::optional<unsigned> getNumExpansions() const {
if (PackExpansionTypeBits.NumExpansions)
return PackExpansionTypeBits.NumExpansions - 1;
- return None;
+ return std::nullopt;
}
bool isSugared() const { return false; }
@@ -5670,9 +6078,9 @@ public:
}
static void Profile(llvm::FoldingSetNodeID &ID, QualType Pattern,
- Optional<unsigned> NumExpansions) {
+ std::optional<unsigned> NumExpansions) {
ID.AddPointer(Pattern.getAsOpaquePtr());
- ID.AddBoolean(NumExpansions.hasValue());
+ ID.AddBoolean(NumExpansions.has_value());
if (NumExpansions)
ID.AddInteger(*NumExpansions);
}
@@ -5927,8 +6335,7 @@ public:
/// Retrieve the type arguments of this object type as they were
/// written.
ArrayRef<QualType> getTypeArgsAsWritten() const {
- return llvm::makeArrayRef(getTypeArgStorage(),
- ObjCObjectTypeBits.NumTypeArgs);
+ return llvm::ArrayRef(getTypeArgStorage(), ObjCObjectTypeBits.NumTypeArgs);
}
/// Whether this is a "__kindof" type as written.
@@ -6018,10 +6425,9 @@ inline ObjCProtocolDecl **ObjCTypeParamType::getProtocolStorageImpl() {
class ObjCInterfaceType : public ObjCObjectType {
friend class ASTContext; // ASTContext creates these.
friend class ASTReader;
- friend class ObjCInterfaceDecl;
template <class T> friend class serialization::AbstractTypeReader;
- mutable ObjCInterfaceDecl *Decl;
+ ObjCInterfaceDecl *Decl;
ObjCInterfaceType(const ObjCInterfaceDecl *D)
: ObjCObjectType(Nonce_ObjCInterface),
@@ -6029,7 +6435,7 @@ class ObjCInterfaceType : public ObjCObjectType {
public:
/// Get the declaration of this interface.
- ObjCInterfaceDecl *getDecl() const { return Decl; }
+ ObjCInterfaceDecl *getDecl() const;
bool isSugared() const { return false; }
QualType desugar() const { return QualType(this, 0); }
@@ -6306,13 +6712,14 @@ public:
};
/// A fixed int type of a specified bitwidth.
-class ExtIntType final : public Type, public llvm::FoldingSetNode {
+class BitIntType final : public Type, public llvm::FoldingSetNode {
friend class ASTContext;
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsUnsigned : 1;
unsigned NumBits : 24;
protected:
- ExtIntType(bool isUnsigned, unsigned NumBits);
+ BitIntType(bool isUnsigned, unsigned NumBits);
public:
bool isUnsigned() const { return IsUnsigned; }
@@ -6322,7 +6729,7 @@ public:
bool isSugared() const { return false; }
QualType desugar() const { return QualType(this, 0); }
- void Profile(llvm::FoldingSetNodeID &ID) {
+ void Profile(llvm::FoldingSetNodeID &ID) const {
Profile(ID, isUnsigned(), getNumBits());
}
@@ -6332,17 +6739,15 @@ public:
ID.AddInteger(NumBits);
}
- static bool classof(const Type *T) { return T->getTypeClass() == ExtInt; }
+ static bool classof(const Type *T) { return T->getTypeClass() == BitInt; }
};
-class DependentExtIntType final : public Type, public llvm::FoldingSetNode {
+class DependentBitIntType final : public Type, public llvm::FoldingSetNode {
friend class ASTContext;
- const ASTContext &Context;
llvm::PointerIntPair<Expr*, 1, bool> ExprAndUnsigned;
protected:
- DependentExtIntType(const ASTContext &Context, bool IsUnsigned,
- Expr *NumBits);
+ DependentBitIntType(bool IsUnsigned, Expr *NumBits);
public:
bool isUnsigned() const;
@@ -6352,14 +6757,14 @@ public:
bool isSugared() const { return false; }
QualType desugar() const { return QualType(this, 0); }
- void Profile(llvm::FoldingSetNodeID &ID) {
+ void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context) {
Profile(ID, Context, isUnsigned(), getNumBitsExpr());
}
static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
bool IsUnsigned, Expr *NumBitsExpr);
static bool classof(const Type *T) {
- return T->getTypeClass() == DependentExtInt;
+ return T->getTypeClass() == DependentBitInt;
}
};
@@ -6402,7 +6807,7 @@ class alignas(8) TypeSourceInfo {
QualType Ty;
- TypeSourceInfo(QualType ty) : Ty(ty) {}
+ TypeSourceInfo(QualType ty, size_t DataSize); // implemented in TypeLoc.h
public:
/// Return the type wrapped by this type source info.
@@ -6432,6 +6837,19 @@ inline const Type *QualType::getTypePtrOrNull() const {
return (isNull() ? nullptr : getCommonPtr()->BaseType);
}
+inline bool QualType::isReferenceable() const {
+ // C++ [defns.referenceable]
+ // type that is either an object type, a function type that does not have
+ // cv-qualifiers or a ref-qualifier, or a reference type.
+ const Type &Self = **this;
+ if (Self.isObjectType() || Self.isReferenceType())
+ return true;
+ if (const auto *F = Self.getAs<FunctionProtoType>())
+ return F->getMethodQuals().empty() && F->getRefQualifier() == RQ_None;
+
+ return false;
+}
+
inline SplitQualType QualType::split() const {
if (!hasLocalNonFastQualifiers())
return SplitQualType(getTypePtrUnsafe(),
@@ -6530,15 +6948,6 @@ inline void QualType::removeLocalVolatile() {
removeLocalFastQualifiers(Qualifiers::Volatile);
}
-inline void QualType::removeLocalCVRQualifiers(unsigned Mask) {
- assert(!(Mask & ~Qualifiers::CVRMask) && "mask has non-CVR bits");
- static_assert((int)Qualifiers::CVRMask == (int)Qualifiers::FastMask,
- "Fast bits differ from CVR bits!");
-
- // Fast path: we don't need to touch the slow qualifiers.
- removeLocalFastQualifiers(Mask);
-}
-
/// Check if this type has any address space qualifier.
inline bool QualType::hasAddressSpace() const {
return getQualifiers().hasAddressSpace();
@@ -6782,6 +7191,12 @@ inline bool Type::isExtVectorType() const {
return isa<ExtVectorType>(CanonicalType);
}
+inline bool Type::isExtVectorBoolType() const {
+ if (!isExtVectorType())
+ return false;
+ return cast<ExtVectorType>(CanonicalType)->getElementType()->isBooleanType();
+}
+
inline bool Type::isMatrixType() const {
return isa<MatrixType>(CanonicalType);
}
@@ -6890,8 +7305,8 @@ inline bool Type::isPipeType() const {
return isa<PipeType>(CanonicalType);
}
-inline bool Type::isExtIntType() const {
- return isa<ExtIntType>(CanonicalType);
+inline bool Type::isBitIntType() const {
+ return isa<BitIntType>(CanonicalType);
}
#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
@@ -6976,6 +7391,10 @@ inline bool Type::isFloat128Type() const {
return isSpecificBuiltinType(BuiltinType::Float128);
}
+inline bool Type::isIbm128Type() const {
+ return isSpecificBuiltinType(BuiltinType::Ibm128);
+}
+
inline bool Type::isNullPtrType() const {
return isSpecificBuiltinType(BuiltinType::NullPtr);
}
@@ -6993,7 +7412,7 @@ inline bool Type::isIntegerType() const {
return IsEnumDeclComplete(ET->getDecl()) &&
!IsEnumDeclScoped(ET->getDecl());
}
- return isExtIntType();
+ return isBitIntType();
}
inline bool Type::isFixedPointType() const {
@@ -7051,7 +7470,7 @@ inline bool Type::isScalarType() const {
isa<MemberPointerType>(CanonicalType) ||
isa<ComplexType>(CanonicalType) ||
isa<ObjCObjectPointerType>(CanonicalType) ||
- isExtIntType();
+ isBitIntType();
}
inline bool Type::isIntegralOrEnumerationType() const {
@@ -7064,7 +7483,7 @@ inline bool Type::isIntegralOrEnumerationType() const {
if (const auto *ET = dyn_cast<EnumType>(CanonicalType))
return IsEnumDeclComplete(ET->getDecl());
- return isExtIntType();
+ return isBitIntType();
}
inline bool Type::isBooleanType() const {
@@ -7126,7 +7545,7 @@ inline const Type *Type::getPointeeOrArrayElementType() const {
/// spaces into a diagnostic with <<.
inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &PD,
LangAS AS) {
- PD.AddTaggedVal(static_cast<std::underlying_type_t<LangAS>>(AS),
+ PD.AddTaggedVal(llvm::to_underlying(AS),
DiagnosticsEngine::ArgumentKind::ak_addrspace);
return PD;
}
@@ -7144,7 +7563,7 @@ inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &PD,
/// into a diagnostic with <<.
inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &PD,
QualType T) {
- PD.AddTaggedVal(reinterpret_cast<intptr_t>(T.getAsOpaquePtr()),
+ PD.AddTaggedVal(reinterpret_cast<uint64_t>(T.getAsOpaquePtr()),
DiagnosticsEngine::ak_qualtype);
return PD;
}
@@ -7191,6 +7610,8 @@ template <typename T> const T *Type::getAsAdjusted() const {
while (Ty) {
if (const auto *A = dyn_cast<AttributedType>(Ty))
Ty = A->getModifiedType().getTypePtr();
+ else if (const auto *A = dyn_cast<BTFTagAttributedType>(Ty))
+ Ty = A->getWrappedType().getTypePtr();
else if (const auto *E = dyn_cast<ElaboratedType>(Ty))
Ty = E->desugar().getTypePtr();
else if (const auto *P = dyn_cast<ParenType>(Ty))
diff --git a/contrib/llvm-project/clang/include/clang/AST/TypeLoc.h b/contrib/llvm-project/clang/include/clang/AST/TypeLoc.h
index 65e95d52c303..471deb14aba5 100644
--- a/contrib/llvm-project/clang/include/clang/AST/TypeLoc.h
+++ b/contrib/llvm-project/clang/include/clang/AST/TypeLoc.h
@@ -14,6 +14,7 @@
#ifndef LLVM_CLANG_AST_TYPELOC_H
#define LLVM_CLANG_AST_TYPELOC_H
+#include "clang/AST/ASTConcept.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/TemplateBase.h"
@@ -240,6 +241,11 @@ private:
static SourceRange getLocalSourceRangeImpl(TypeLoc TL);
};
+inline TypeSourceInfo::TypeSourceInfo(QualType ty, size_t DataSize) : Ty(ty) {
+ // Init data attached to the object. See getTypeLoc.
+ memset(static_cast<void *>(this + 1), 0, DataSize);
+}
+
/// Return the TypeLoc for a type source info.
inline TypeLoc TypeSourceInfo::getTypeLoc() const {
// TODO: is this alignment already sufficient?
@@ -430,7 +436,7 @@ protected:
unsigned size = sizeof(LocalData);
unsigned extraAlign = asDerived()->getExtraLocalDataAlignment();
size = llvm::alignTo(size, extraAlign);
- return reinterpret_cast<char*>(Base::Data) + size;
+ return reinterpret_cast<char *>(Base::Data) + size;
}
void *getNonLocalData() const {
@@ -581,10 +587,9 @@ public:
bool needsExtraLocalData() const {
BuiltinType::Kind bk = getTypePtr()->getKind();
- return (bk >= BuiltinType::UShort && bk <= BuiltinType::UInt128)
- || (bk >= BuiltinType::Short && bk <= BuiltinType::Float128)
- || bk == BuiltinType::UChar
- || bk == BuiltinType::SChar;
+ return (bk >= BuiltinType::UShort && bk <= BuiltinType::UInt128) ||
+ (bk >= BuiltinType::Short && bk <= BuiltinType::Ibm128) ||
+ bk == BuiltinType::UChar || bk == BuiltinType::SChar;
}
unsigned getExtraLocalDataSize() const {
@@ -666,6 +671,16 @@ public:
}
};
+/// Wrapper for source info for types used via transparent aliases.
+class UsingTypeLoc : public InheritingConcreteTypeLoc<TypeSpecTypeLoc,
+ UsingTypeLoc, UsingType> {
+public:
+ QualType getUnderlyingType() const {
+ return getTypePtr()->getUnderlyingType();
+ }
+ UsingShadowDecl *getFoundDecl() const { return getTypePtr()->getFoundDecl(); }
+};
+
/// Wrapper for source info for typedefs.
class TypedefTypeLoc : public InheritingConcreteTypeLoc<TypeSpecTypeLoc,
TypedefTypeLoc,
@@ -805,7 +820,7 @@ public:
}
ArrayRef<SourceLocation> getProtocolLocs() const {
- return llvm::makeArrayRef(getProtocolLocArray(), getNumProtocols());
+ return llvm::ArrayRef(getProtocolLocArray(), getNumProtocols());
}
void initializeLocal(ASTContext &Context, SourceLocation Loc);
@@ -892,6 +907,29 @@ public:
}
};
+struct BTFTagAttributedLocInfo {}; // Nothing.
+
+/// Type source information for an btf_tag attributed type.
+class BTFTagAttributedTypeLoc
+ : public ConcreteTypeLoc<UnqualTypeLoc, BTFTagAttributedTypeLoc,
+ BTFTagAttributedType, BTFTagAttributedLocInfo> {
+public:
+ TypeLoc getWrappedLoc() const { return getInnerTypeLoc(); }
+
+ /// The btf_type_tag attribute.
+ const BTFTypeTagAttr *getAttr() const { return getTypePtr()->getAttr(); }
+
+ template <typename T> T *getAttrAs() {
+ return dyn_cast_or_null<T>(getAttr());
+ }
+
+ SourceRange getLocalSourceRange() const;
+
+ void initializeLocal(ASTContext &Context, SourceLocation loc) {}
+
+ QualType getInnerType() const { return getTypePtr()->getWrappedType(); }
+};
+
struct ObjCObjectTypeLocInfo {
SourceLocation TypeArgsLAngleLoc;
SourceLocation TypeArgsRAngleLoc;
@@ -988,7 +1026,7 @@ public:
ArrayRef<SourceLocation> getProtocolLocs() const {
- return llvm::makeArrayRef(getProtocolLocArray(), getNumProtocols());
+ return llvm::ArrayRef(getProtocolLocArray(), getNumProtocols());
}
bool hasBaseTypeAsWritten() const {
@@ -1415,7 +1453,7 @@ public:
}
ArrayRef<ParmVarDecl *> getParams() const {
- return llvm::makeArrayRef(getParmArray(), getNumParams());
+ return llvm::ArrayRef(getParmArray(), getNumParams());
}
// ParmVarDecls* are stored after Info, one for each parameter.
@@ -1602,7 +1640,7 @@ public:
}
unsigned getNumArgs() const {
- return getTypePtr()->getNumArgs();
+ return getTypePtr()->template_arguments().size();
}
void setArgLocInfo(unsigned i, TemplateArgumentLocInfo AI) {
@@ -1614,7 +1652,8 @@ public:
}
TemplateArgumentLoc getArgLoc(unsigned i) const {
- return TemplateArgumentLoc(getTypePtr()->getArg(i), getArgLocInfo(i));
+ return TemplateArgumentLoc(getTypePtr()->template_arguments()[i],
+ getArgLocInfo(i));
}
SourceLocation getTemplateNameLoc() const {
@@ -1649,12 +1688,12 @@ public:
setTemplateNameLoc(Loc);
setLAngleLoc(Loc);
setRAngleLoc(Loc);
- initializeArgLocs(Context, getNumArgs(), getTypePtr()->getArgs(),
+ initializeArgLocs(Context, getTypePtr()->template_arguments(),
getArgInfos(), Loc);
}
- static void initializeArgLocs(ASTContext &Context, unsigned NumArgs,
- const TemplateArgument *Args,
+ static void initializeArgLocs(ASTContext &Context,
+ ArrayRef<TemplateArgument> Args,
TemplateArgumentLocInfo *ArgInfos,
SourceLocation Loc);
@@ -1902,7 +1941,7 @@ struct TypeOfExprTypeLocInfo : public TypeofLocInfo {
};
struct TypeOfTypeLocInfo : public TypeofLocInfo {
- TypeSourceInfo* UnderlyingTInfo;
+ TypeSourceInfo *UnmodifiedTInfo;
};
template <class Derived, class TypeClass, class LocalData = TypeofLocInfo>
@@ -1970,27 +2009,50 @@ public:
class TypeOfTypeLoc
: public TypeofLikeTypeLoc<TypeOfTypeLoc, TypeOfType, TypeOfTypeLocInfo> {
public:
- QualType getUnderlyingType() const {
- return this->getTypePtr()->getUnderlyingType();
+ QualType getUnmodifiedType() const {
+ return this->getTypePtr()->getUnmodifiedType();
}
- TypeSourceInfo* getUnderlyingTInfo() const {
- return this->getLocalData()->UnderlyingTInfo;
+ TypeSourceInfo *getUnmodifiedTInfo() const {
+ return this->getLocalData()->UnmodifiedTInfo;
}
- void setUnderlyingTInfo(TypeSourceInfo* TI) const {
- this->getLocalData()->UnderlyingTInfo = TI;
+ void setUnmodifiedTInfo(TypeSourceInfo *TI) const {
+ this->getLocalData()->UnmodifiedTInfo = TI;
}
void initializeLocal(ASTContext &Context, SourceLocation Loc);
};
-// FIXME: location of the 'decltype' and parens.
-class DecltypeTypeLoc : public InheritingConcreteTypeLoc<TypeSpecTypeLoc,
- DecltypeTypeLoc,
- DecltypeType> {
+// decltype(expression) abc;
+// ~~~~~~~~ DecltypeLoc
+// ~ RParenLoc
+// FIXME: add LParenLoc, it is tricky to support due to the limitation of
+// annotated-decltype token.
+struct DecltypeTypeLocInfo {
+ SourceLocation DecltypeLoc;
+ SourceLocation RParenLoc;
+};
+class DecltypeTypeLoc
+ : public ConcreteTypeLoc<UnqualTypeLoc, DecltypeTypeLoc, DecltypeType,
+ DecltypeTypeLocInfo> {
public:
Expr *getUnderlyingExpr() const { return getTypePtr()->getUnderlyingExpr(); }
+
+ SourceLocation getDecltypeLoc() const { return getLocalData()->DecltypeLoc; }
+ void setDecltypeLoc(SourceLocation Loc) { getLocalData()->DecltypeLoc = Loc; }
+
+ SourceLocation getRParenLoc() const { return getLocalData()->RParenLoc; }
+ void setRParenLoc(SourceLocation Loc) { getLocalData()->RParenLoc = Loc; }
+
+ SourceRange getLocalSourceRange() const {
+ return SourceRange(getDecltypeLoc(), getRParenLoc());
+ }
+
+ void initializeLocal(ASTContext &Context, SourceLocation Loc) {
+ setDecltypeLoc(Loc);
+ setRParenLoc(Loc);
+ }
};
struct UnaryTransformTypeLocInfo {
@@ -2043,12 +2105,10 @@ class DeducedTypeLoc
DeducedType> {};
struct AutoTypeLocInfo : TypeSpecLocInfo {
- NestedNameSpecifierLoc NestedNameSpec;
- SourceLocation TemplateKWLoc;
- SourceLocation ConceptNameLoc;
- NamedDecl *FoundDecl;
- SourceLocation LAngleLoc;
- SourceLocation RAngleLoc;
+ // For decltype(auto).
+ SourceLocation RParenLoc;
+
+ ConceptReference *CR = nullptr;
};
class AutoTypeLoc
@@ -2061,96 +2121,95 @@ public:
return getTypePtr()->getKeyword();
}
+ bool isDecltypeAuto() const { return getTypePtr()->isDecltypeAuto(); }
+ SourceLocation getRParenLoc() const { return getLocalData()->RParenLoc; }
+ void setRParenLoc(SourceLocation Loc) { getLocalData()->RParenLoc = Loc; }
+
bool isConstrained() const {
return getTypePtr()->isConstrained();
}
- const NestedNameSpecifierLoc &getNestedNameSpecifierLoc() const {
- return getLocalData()->NestedNameSpec;
- }
+ void setConceptReference(ConceptReference *CR) { getLocalData()->CR = CR; }
- void setNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS) {
- getLocalData()->NestedNameSpec = NNS;
- }
+ ConceptReference *getConceptReference() const { return getLocalData()->CR; }
- SourceLocation getTemplateKWLoc() const {
- return getLocalData()->TemplateKWLoc;
+ // FIXME: Several of the following functions can be removed. Instead the
+ // caller can directly work with the ConceptReference.
+ const NestedNameSpecifierLoc getNestedNameSpecifierLoc() const {
+ if (const auto *CR = getConceptReference())
+ return CR->getNestedNameSpecifierLoc();
+ return NestedNameSpecifierLoc();
}
- void setTemplateKWLoc(SourceLocation Loc) {
- getLocalData()->TemplateKWLoc = Loc;
+ SourceLocation getTemplateKWLoc() const {
+ if (const auto *CR = getConceptReference())
+ return CR->getTemplateKWLoc();
+ return SourceLocation();
}
SourceLocation getConceptNameLoc() const {
- return getLocalData()->ConceptNameLoc;
- }
-
- void setConceptNameLoc(SourceLocation Loc) {
- getLocalData()->ConceptNameLoc = Loc;
+ if (const auto *CR = getConceptReference())
+ return CR->getConceptNameLoc();
+ return SourceLocation();
}
NamedDecl *getFoundDecl() const {
- return getLocalData()->FoundDecl;
- }
-
- void setFoundDecl(NamedDecl *D) {
- getLocalData()->FoundDecl = D;
+ if (const auto *CR = getConceptReference())
+ return CR->getFoundDecl();
+ return nullptr;
}
ConceptDecl *getNamedConcept() const {
- return getTypePtr()->getTypeConstraintConcept();
+ if (const auto *CR = getConceptReference())
+ return CR->getNamedConcept();
+ return nullptr;
}
- DeclarationNameInfo getConceptNameInfo() const;
+ DeclarationNameInfo getConceptNameInfo() const {
+ return getConceptReference()->getConceptNameInfo();
+ }
bool hasExplicitTemplateArgs() const {
- return getLocalData()->LAngleLoc.isValid();
+ return (getConceptReference() &&
+ getConceptReference()->getTemplateArgsAsWritten() &&
+ getConceptReference()
+ ->getTemplateArgsAsWritten()
+ ->getLAngleLoc()
+ .isValid());
}
SourceLocation getLAngleLoc() const {
- return this->getLocalData()->LAngleLoc;
- }
-
- void setLAngleLoc(SourceLocation Loc) {
- this->getLocalData()->LAngleLoc = Loc;
+ if (const auto *CR = getConceptReference())
+ if (const auto *TAAW = CR->getTemplateArgsAsWritten())
+ return TAAW->getLAngleLoc();
+ return SourceLocation();
}
SourceLocation getRAngleLoc() const {
- return this->getLocalData()->RAngleLoc;
- }
-
- void setRAngleLoc(SourceLocation Loc) {
- this->getLocalData()->RAngleLoc = Loc;
+ if (const auto *CR = getConceptReference())
+ if (const auto *TAAW = CR->getTemplateArgsAsWritten())
+ return TAAW->getRAngleLoc();
+ return SourceLocation();
}
unsigned getNumArgs() const {
- return getTypePtr()->getNumArgs();
- }
-
- void setArgLocInfo(unsigned i, TemplateArgumentLocInfo AI) {
- getArgInfos()[i] = AI;
- }
-
- TemplateArgumentLocInfo getArgLocInfo(unsigned i) const {
- return getArgInfos()[i];
+ return getTypePtr()->getTypeConstraintArguments().size();
}
TemplateArgumentLoc getArgLoc(unsigned i) const {
- return TemplateArgumentLoc(getTypePtr()->getTypeConstraintArguments()[i],
- getArgLocInfo(i));
+ const auto *CR = getConceptReference();
+ assert(CR && "No ConceptReference");
+ return CR->getTemplateArgsAsWritten()->getTemplateArgs()[i];
}
SourceRange getLocalSourceRange() const {
- return{
- isConstrained()
- ? (getNestedNameSpecifierLoc()
- ? getNestedNameSpecifierLoc().getBeginLoc()
- : (getTemplateKWLoc().isValid()
- ? getTemplateKWLoc()
- : getConceptNameLoc()))
- : getNameLoc(),
- getNameLoc()
- };
+ return {isConstrained()
+ ? (getNestedNameSpecifierLoc()
+ ? getNestedNameSpecifierLoc().getBeginLoc()
+ : (getTemplateKWLoc().isValid() ? getTemplateKWLoc()
+ : getConceptNameLoc()))
+ : getNameLoc(),
+ isDecltypeAuto() ? getRParenLoc() : getNameLoc()};
}
void copy(AutoTypeLoc Loc) {
@@ -2160,19 +2219,6 @@ public:
}
void initializeLocal(ASTContext &Context, SourceLocation Loc);
-
- unsigned getExtraLocalDataSize() const {
- return getNumArgs() * sizeof(TemplateArgumentLocInfo);
- }
-
- unsigned getExtraLocalDataAlignment() const {
- return alignof(TemplateArgumentLocInfo);
- }
-
-private:
- TemplateArgumentLocInfo *getArgInfos() const {
- return static_cast<TemplateArgumentLocInfo*>(getExtraLocalData());
- }
};
class DeducedTemplateSpecializationTypeLoc
@@ -2202,22 +2248,31 @@ class ElaboratedTypeLoc : public ConcreteTypeLoc<UnqualTypeLoc,
ElaboratedLocInfo> {
public:
SourceLocation getElaboratedKeywordLoc() const {
- return this->getLocalData()->ElaboratedKWLoc;
+ return !isEmpty() ? getLocalData()->ElaboratedKWLoc : SourceLocation();
}
void setElaboratedKeywordLoc(SourceLocation Loc) {
- this->getLocalData()->ElaboratedKWLoc = Loc;
+ if (isEmpty()) {
+ assert(Loc.isInvalid());
+ return;
+ }
+ getLocalData()->ElaboratedKWLoc = Loc;
}
NestedNameSpecifierLoc getQualifierLoc() const {
- return NestedNameSpecifierLoc(getTypePtr()->getQualifier(),
- getLocalData()->QualifierData);
+ return !isEmpty() ? NestedNameSpecifierLoc(getTypePtr()->getQualifier(),
+ getLocalData()->QualifierData)
+ : NestedNameSpecifierLoc();
}
void setQualifierLoc(NestedNameSpecifierLoc QualifierLoc) {
- assert(QualifierLoc.getNestedNameSpecifier()
- == getTypePtr()->getQualifier() &&
+ assert(QualifierLoc.getNestedNameSpecifier() ==
+ getTypePtr()->getQualifier() &&
"Inconsistent nested-name-specifier pointer");
+ if (isEmpty()) {
+ assert(!QualifierLoc.hasQualifier());
+ return;
+ }
getLocalData()->QualifierData = QualifierLoc.getOpaqueData();
}
@@ -2234,12 +2289,24 @@ public:
void initializeLocal(ASTContext &Context, SourceLocation Loc);
- TypeLoc getNamedTypeLoc() const {
- return getInnerTypeLoc();
+ TypeLoc getNamedTypeLoc() const { return getInnerTypeLoc(); }
+
+ QualType getInnerType() const { return getTypePtr()->getNamedType(); }
+
+ bool isEmpty() const {
+ return getTypePtr()->getKeyword() == ElaboratedTypeKeyword::None &&
+ !getTypePtr()->getQualifier();
}
- QualType getInnerType() const {
- return getTypePtr()->getNamedType();
+ unsigned getLocalDataAlignment() const {
+ // FIXME: We want to return 1 here in the empty case, but
+ // there are bugs in how alignment is handled in TypeLocs
+ // that prevent this from working.
+ return ConcreteTypeLoc::getLocalDataAlignment();
+ }
+
+ unsigned getLocalDataSize() const {
+ return !isEmpty() ? ConcreteTypeLoc::getLocalDataSize() : 0;
}
void copy(ElaboratedTypeLoc Loc) {
@@ -2382,7 +2449,7 @@ public:
}
unsigned getNumArgs() const {
- return getTypePtr()->getNumArgs();
+ return getTypePtr()->template_arguments().size();
}
void setArgLocInfo(unsigned i, TemplateArgumentLocInfo AI) {
@@ -2394,7 +2461,8 @@ public:
}
TemplateArgumentLoc getArgLoc(unsigned i) const {
- return TemplateArgumentLoc(getTypePtr()->getArg(i), getArgLocInfo(i));
+ return TemplateArgumentLoc(getTypePtr()->template_arguments()[i],
+ getArgLocInfo(i));
}
SourceRange getLocalSourceRange() const {
@@ -2551,6 +2619,8 @@ inline T TypeLoc::getAsAdjusted() const {
Cur = PTL.getInnerLoc();
else if (auto ATL = Cur.getAs<AttributedTypeLoc>())
Cur = ATL.getModifiedLoc();
+ else if (auto ATL = Cur.getAs<BTFTagAttributedTypeLoc>())
+ Cur = ATL.getWrappedLoc();
else if (auto ETL = Cur.getAs<ElaboratedTypeLoc>())
Cur = ETL.getNamedTypeLoc();
else if (auto ATL = Cur.getAs<AdjustedTypeLoc>())
@@ -2562,12 +2632,28 @@ inline T TypeLoc::getAsAdjusted() const {
}
return Cur.getAs<T>();
}
-class ExtIntTypeLoc final
- : public InheritingConcreteTypeLoc<TypeSpecTypeLoc, ExtIntTypeLoc,
- ExtIntType> {};
-class DependentExtIntTypeLoc final
- : public InheritingConcreteTypeLoc<TypeSpecTypeLoc, DependentExtIntTypeLoc,
- DependentExtIntType> {};
+class BitIntTypeLoc final
+ : public InheritingConcreteTypeLoc<TypeSpecTypeLoc, BitIntTypeLoc,
+ BitIntType> {};
+class DependentBitIntTypeLoc final
+ : public InheritingConcreteTypeLoc<TypeSpecTypeLoc, DependentBitIntTypeLoc,
+ DependentBitIntType> {};
+
+class ObjCProtocolLoc {
+ ObjCProtocolDecl *Protocol = nullptr;
+ SourceLocation Loc = SourceLocation();
+
+public:
+ ObjCProtocolLoc(ObjCProtocolDecl *protocol, SourceLocation loc)
+ : Protocol(protocol), Loc(loc) {}
+ ObjCProtocolDecl *getProtocol() const { return Protocol; }
+ SourceLocation getLocation() const { return Loc; }
+
+ /// The source range is just the protocol name.
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(Loc, Loc);
+ }
+};
} // namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/AST/TypeOrdering.h b/contrib/llvm-project/clang/include/clang/AST/TypeOrdering.h
index 6630105136f5..8037f98cc965 100644
--- a/contrib/llvm-project/clang/include/clang/AST/TypeOrdering.h
+++ b/contrib/llvm-project/clang/include/clang/AST/TypeOrdering.h
@@ -34,7 +34,6 @@ struct QualTypeOrdering {
}
namespace llvm {
- template<class> struct DenseMapInfo;
template<> struct DenseMapInfo<clang::QualType> {
static inline clang::QualType getEmptyKey() { return clang::QualType(); }
diff --git a/contrib/llvm-project/clang/include/clang/AST/TypeProperties.td b/contrib/llvm-project/clang/include/clang/AST/TypeProperties.td
index 438d5af5a2e2..682c869b0c58 100644
--- a/contrib/llvm-project/clang/include/clang/AST/TypeProperties.td
+++ b/contrib/llvm-project/clang/include/clang/AST/TypeProperties.td
@@ -11,7 +11,7 @@ include "clang/Basic/TypeNodes.td"
let Class = ComplexType in {
def : Property<"elementType", QualType> {
- let Read = [{ node->getElementType() }];
+ let Read = [{ node->getElementType() }];
}
def : Creator<[{ return ctx.getComplexType(elementType); }]>;
@@ -323,6 +323,9 @@ let Class = FunctionProtoType in {
? node->getExtParameterInfos()
: llvm::ArrayRef<FunctionProtoType::ExtParameterInfo>() }];
}
+ def : Property<"AArch64SMEAttributes", UInt32> {
+ let Read = [{ node->getAArch64SMEAttributes() }];
+ }
def : Creator<[{
auto extInfo = FunctionType::ExtInfo(noReturn, hasRegParm, regParm,
@@ -338,6 +341,7 @@ let Class = FunctionProtoType in {
epi.ExceptionSpec = exceptionSpecifier;
epi.ExtParameterInfos =
extParameterInfo.empty() ? nullptr : extParameterInfo.data();
+ epi.AArch64SMEAttributes = AArch64SMEAttributes;
return ctx.getFunctionType(returnType, parameters, epi);
}]>;
}
@@ -358,7 +362,20 @@ let Class = UnresolvedUsingType in {
}
def : Creator<[{
- return ctx.getTypeDeclType(cast<UnresolvedUsingTypenameDecl>(declaration));
+ return ctx.getUnresolvedUsingType(cast<UnresolvedUsingTypenameDecl>(declaration));
+ }]>;
+}
+
+let Class = UsingType in {
+ def : Property<"foundDeclaration", UsingShadowDeclRef> {
+ let Read = [{ node->getFoundDecl() }];
+ }
+ def : Property<"underlyingType", QualType> {
+ let Read = [{ node->getUnderlyingType() }];
+ }
+
+ def : Creator<[{
+ return ctx.getUsingType(foundDeclaration, underlyingType);
}]>;
}
@@ -366,16 +383,12 @@ let Class = TypedefType in {
def : Property<"declaration", DeclRef> {
let Read = [{ node->getDecl() }];
}
- def : Property<"canonicalType", Optional<QualType>> {
- let Read = [{ makeOptionalFromNullable(node->getCanonicalTypeInternal()) }];
+ def : Property<"underlyingType", QualType> {
+ let Read = [{ node->desugar() }];
}
def : Creator<[{
- QualType finalCanonicalType =
- canonicalType ? ctx.getCanonicalType(*canonicalType)
- : QualType();
- return ctx.getTypedefType(cast<TypedefNameDecl>(declaration),
- finalCanonicalType);
+ return ctx.getTypedefType(cast<TypedefNameDecl>(declaration), underlyingType);
}]>;
}
@@ -384,18 +397,26 @@ let Class = TypeOfExprType in {
let Read = [{ node->getUnderlyingExpr() }];
}
+ def : Property<"kind", TypeOfKind> {
+ let Read = [{ node->getKind() }];
+ }
+
def : Creator<[{
- return ctx.getTypeOfExprType(expression);
+ return ctx.getTypeOfExprType(expression, kind);
}]>;
}
let Class = TypeOfType in {
- def : Property<"underlyingType", QualType> {
- let Read = [{ node->getUnderlyingType() }];
+ def : Property<"unmodifiedType", QualType> {
+ let Read = [{ node->getUnmodifiedType() }];
+ }
+
+ def : Property<"kind", TypeOfKind> {
+ let Read = [{ node->getKind() }];
}
def : Creator<[{
- return ctx.getTypeOfType(underlyingType);
+ return ctx.getTypeOfType(unmodifiedType, kind);
}]>;
}
@@ -574,7 +595,7 @@ let Class = ParenType in {
def : Creator<[{
return ctx.getParenType(innerType);
- }]>;
+ }]>;
}
let Class = MacroQualifiedType in {
@@ -606,6 +627,19 @@ let Class = AttributedType in {
}]>;
}
+let Class = BTFTagAttributedType in {
+ def : Property<"attr", BTFTypeTagAttr> {
+ let Read = [{ node->getAttr() }];
+ }
+ def : Property<"wrappedType", QualType> {
+ let Read = [{ node->getWrappedType() }];
+ }
+
+ def : Creator<[{
+ return ctx.getBTFTagAttributedType(attr, wrappedType);
+ }]>;
+}
+
let Class = DependentAddressSpaceType in {
def : Property<"pointeeType", QualType> {
let Read = [{ node->getPointeeType() }];
@@ -636,16 +670,16 @@ let Class = TemplateSpecializationType in {
def : Property<"underlyingType", Optional<QualType>> {
let Read = [{
node->isTypeAlias()
- ? llvm::Optional<QualType>(node->getAliasedType())
+ ? std::optional<QualType>(node->getAliasedType())
: node->isCanonicalUnqualified()
- ? llvm::None
- : llvm::Optional<QualType>(node->getCanonicalTypeInternal())
+ ? std::nullopt
+ : std::optional<QualType>(node->getCanonicalTypeInternal())
}];
}
def : Creator<[{
QualType result;
- if (!underlyingType.hasValue()) {
+ if (!underlyingType) {
result = ctx.getCanonicalTemplateSpecializationType(templateName,
templateArguments);
} else {
@@ -702,18 +736,23 @@ let Class = TemplateTypeParmType in {
}
let Class = SubstTemplateTypeParmType in {
- def : Property<"replacedParameter", QualType> {
- let Read = [{ QualType(node->getReplacedParameter(), 0) }];
- }
def : Property<"replacementType", QualType> {
let Read = [{ node->getReplacementType() }];
}
+ def : Property<"associatedDecl", DeclRef> {
+ let Read = [{ node->getAssociatedDecl() }];
+ }
+ def : Property<"Index", UInt32> {
+ let Read = [{ node->getIndex() }];
+ }
+ def : Property<"PackIndex", Optional<UInt32>> {
+ let Read = [{ node->getPackIndex() }];
+ }
+ // The call to getCanonicalType here existed in ASTReader.cpp, too.
def : Creator<[{
- // The call to getCanonicalType here existed in ASTReader.cpp, too.
return ctx.getSubstTemplateTypeParmType(
- cast<TemplateTypeParmType>(replacedParameter),
- ctx.getCanonicalType(replacementType));
+ replacementType, associatedDecl, Index, PackIndex);
}]>;
}
@@ -732,8 +771,14 @@ let Class = PackExpansionType in {
}
let Class = SubstTemplateTypeParmPackType in {
- def : Property<"replacedParameter", QualType> {
- let Read = [{ QualType(node->getReplacedParameter(), 0) }];
+ def : Property<"associatedDecl", DeclRef> {
+ let Read = [{ node->getAssociatedDecl() }];
+ }
+ def : Property<"Index", UInt32> {
+ let Read = [{ node->getIndex() }];
+ }
+ def : Property<"Final", Bool> {
+ let Read = [{ node->getFinal() }];
}
def : Property<"replacementPack", TemplateArgument> {
let Read = [{ node->getArgumentPack() }];
@@ -741,8 +786,7 @@ let Class = SubstTemplateTypeParmPackType in {
def : Creator<[{
return ctx.getSubstTemplateTypeParmPackType(
- cast<TemplateTypeParmType>(replacedParameter),
- replacementPack);
+ associatedDecl, Index, Final, replacementPack);
}]>;
}
@@ -773,6 +817,10 @@ let Class = BuiltinType in {
case BuiltinType::ID: return ctx.SINGLETON_ID;
#include "clang/Basic/RISCVVTypes.def"
+#define WASM_TYPE(NAME, ID, SINGLETON_ID) \
+ case BuiltinType::ID: return ctx.SINGLETON_ID;
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
+
#define BUILTIN_TYPE(ID, SINGLETON_ID) \
case BuiltinType::ID: return ctx.SINGLETON_ID;
#include "clang/AST/BuiltinTypes.def"
@@ -794,8 +842,8 @@ let Class = DependentNameType in {
def : Property<"underlyingType", Optional<QualType>> {
let Read = [{
node->isCanonicalUnqualified()
- ? llvm::None
- : llvm::Optional<QualType>(node->getCanonicalTypeInternal())
+ ? std::nullopt
+ : std::optional<QualType>(node->getCanonicalTypeInternal())
}];
}
@@ -849,7 +897,7 @@ let Class = ObjCInterfaceType in {
let Class = ObjCTypeParamType in {
def : Property<"declaration", ObjCTypeParamDeclRef> {
let Read = [{ node->getDecl() }];
- }
+ }
def : Property<"qualifiers", Array<ObjCProtocolDeclRef>> {
let Read = [{ node->getProtocols() }];
}
@@ -882,7 +930,7 @@ let Class = PipeType in {
}]>;
}
-let Class = ExtIntType in {
+let Class = BitIntType in {
def : Property<"isUnsigned", Bool> {
let Read = [{ node->isUnsigned() }];
}
@@ -891,11 +939,11 @@ let Class = ExtIntType in {
}
def : Creator<[{
- return ctx.getExtIntType(isUnsigned, numBits);
+ return ctx.getBitIntType(isUnsigned, numBits);
}]>;
}
-let Class = DependentExtIntType in {
+let Class = DependentBitIntType in {
def : Property<"isUnsigned", Bool> {
let Read = [{ node->isUnsigned() }];
}
@@ -903,6 +951,6 @@ let Class = DependentExtIntType in {
let Read = [{ node->getNumBitsExpr() }];
}
def : Creator<[{
- return ctx.getDependentExtIntType(isUnsigned, numBitsExpr);
+ return ctx.getDependentBitIntType(isUnsigned, numBitsExpr);
}]>;
}
diff --git a/contrib/llvm-project/clang/include/clang/AST/UnresolvedSet.h b/contrib/llvm-project/clang/include/clang/AST/UnresolvedSet.h
index c75aa0785a63..ee31be969b6e 100644
--- a/contrib/llvm-project/clang/include/clang/AST/UnresolvedSet.h
+++ b/contrib/llvm-project/clang/include/clang/AST/UnresolvedSet.h
@@ -114,14 +114,22 @@ public:
I.I->set(New, AS);
}
- void erase(unsigned I) { decls()[I] = decls().pop_back_val(); }
+ void erase(unsigned I) {
+ auto val = decls().pop_back_val();
+ if (I < size())
+ decls()[I] = val;
+ }
- void erase(iterator I) { *I.I = decls().pop_back_val(); }
+ void erase(iterator I) {
+ auto val = decls().pop_back_val();
+ if (I != end())
+ *I.I = val;
+ }
void setAccess(iterator I, AccessSpecifier AS) { I.I->setAccess(AS); }
void clear() { decls().clear(); }
- void set_size(unsigned N) { decls().set_size(N); }
+ void truncate(unsigned N) { decls().truncate(N); }
bool empty() const { return decls().empty(); }
unsigned size() const { return decls().size(); }
diff --git a/contrib/llvm-project/clang/include/clang/AST/VTableBuilder.h b/contrib/llvm-project/clang/include/clang/AST/VTableBuilder.h
index e451f3f861b7..fbf6c041a1ec 100644
--- a/contrib/llvm-project/clang/include/clang/AST/VTableBuilder.h
+++ b/contrib/llvm-project/clang/include/clang/AST/VTableBuilder.h
@@ -279,7 +279,7 @@ public:
AddressPointLocation getAddressPoint(BaseSubobject Base) const {
assert(AddressPoints.count(Base) && "Did not find address point!");
- return AddressPoints.find(Base)->second;
+ return AddressPoints.lookup(Base);
}
const AddressPointsMapTy &getAddressPoints() const {
@@ -563,8 +563,6 @@ private:
llvm::DenseMap<const CXXRecordDecl *, std::unique_ptr<VirtualBaseInfo>>
VBaseInfo;
- void enumerateVFPtrs(const CXXRecordDecl *ForClass, VPtrInfoVector &Result);
-
void computeVTableRelatedInformation(const CXXRecordDecl *RD) override;
void dumpMethodLocations(const CXXRecordDecl *RD,
diff --git a/contrib/llvm-project/clang/include/clang/ASTMatchers/ASTMatchFinder.h b/contrib/llvm-project/clang/include/clang/ASTMatchers/ASTMatchFinder.h
index 91024f9425e0..a387d9037b7d 100644
--- a/contrib/llvm-project/clang/include/clang/ASTMatchers/ASTMatchFinder.h
+++ b/contrib/llvm-project/clang/include/clang/ASTMatchers/ASTMatchFinder.h
@@ -44,6 +44,7 @@
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/Support/Timer.h"
+#include <optional>
namespace clang {
@@ -115,7 +116,7 @@ public:
/// the result nodes. This API is temporary to facilitate
/// third parties porting existing code to the default
/// behavior of clang-tidy.
- virtual llvm::Optional<TraversalKind> getCheckTraversalKind() const;
+ virtual std::optional<TraversalKind> getCheckTraversalKind() const;
};
/// Called when parsing is finished. Intended for testing only.
@@ -137,7 +138,7 @@ public:
/// Enables per-check timers.
///
/// It prints a report after match.
- llvm::Optional<Profiling> CheckProfiling;
+ std::optional<Profiling> CheckProfiling;
};
MatchFinder(MatchFinderOptions Options = MatchFinderOptions());
@@ -167,6 +168,7 @@ public:
MatchCallback *Action);
void addMatcher(const TemplateArgumentLocMatcher &NodeMatch,
MatchCallback *Action);
+ void addMatcher(const AttrMatcher &NodeMatch, MatchCallback *Action);
/// @}
/// Adds a matcher to execute when running over the AST.
@@ -219,6 +221,7 @@ public:
std::vector<std::pair<CXXCtorInitializerMatcher, MatchCallback *>> CtorInit;
std::vector<std::pair<TemplateArgumentLocMatcher, MatchCallback *>>
TemplateArgumentLoc;
+ std::vector<std::pair<AttrMatcher, MatchCallback *>> Attr;
/// All the callbacks in one container to simplify iteration.
llvm::SmallPtrSet<MatchCallback *, 16> AllCallbacks;
};
@@ -287,8 +290,8 @@ public:
Nodes.push_back(Result.Nodes);
}
- llvm::Optional<TraversalKind> getCheckTraversalKind() const override {
- return llvm::None;
+ std::optional<TraversalKind> getCheckTraversalKind() const override {
+ return std::nullopt;
}
SmallVector<BoundNodes, 1> Nodes;
diff --git a/contrib/llvm-project/clang/include/clang/ASTMatchers/ASTMatchers.h b/contrib/llvm-project/clang/include/clang/ASTMatchers/ASTMatchers.h
index 8e3ee6cb9e7e..dc1f49525a00 100644
--- a/contrib/llvm-project/clang/include/clang/ASTMatchers/ASTMatchers.h
+++ b/contrib/llvm-project/clang/include/clang/ASTMatchers/ASTMatchers.h
@@ -81,6 +81,7 @@
#include "clang/Basic/TypeTraits.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
@@ -90,6 +91,7 @@
#include <cstddef>
#include <iterator>
#include <limits>
+#include <optional>
#include <string>
#include <utility>
#include <vector>
@@ -148,6 +150,8 @@ using CXXBaseSpecifierMatcher = internal::Matcher<CXXBaseSpecifier>;
using CXXCtorInitializerMatcher = internal::Matcher<CXXCtorInitializer>;
using TemplateArgumentMatcher = internal::Matcher<TemplateArgument>;
using TemplateArgumentLocMatcher = internal::Matcher<TemplateArgumentLoc>;
+using LambdaCaptureMatcher = internal::Matcher<LambdaCapture>;
+using AttrMatcher = internal::Matcher<Attr>;
/// @}
/// Matches any node.
@@ -296,7 +300,7 @@ AST_POLYMORPHIC_MATCHER_REGEX(isExpansionInFileMatching,
return false;
}
auto FileEntry =
- SourceManager.getFileEntryForID(SourceManager.getFileID(ExpansionLoc));
+ SourceManager.getFileEntryRefForID(SourceManager.getFileID(ExpansionLoc));
if (!FileEntry) {
return false;
}
@@ -307,7 +311,7 @@ AST_POLYMORPHIC_MATCHER_REGEX(isExpansionInFileMatching,
/// Matches statements that are (transitively) expanded from the named macro.
/// Does not match if only part of the statement is expanded from that macro or
-/// if different parts of the the statement are expanded from different
+/// if different parts of the statement are expanded from different
/// appearances of the macro.
AST_POLYMORPHIC_MATCHER_P(isExpandedFromMacro,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc),
@@ -315,10 +319,10 @@ AST_POLYMORPHIC_MATCHER_P(isExpandedFromMacro,
// Verifies that the statement' beginning and ending are both expanded from
// the same instance of the given macro.
auto& Context = Finder->getASTContext();
- llvm::Optional<SourceLocation> B =
+ std::optional<SourceLocation> B =
internal::getExpansionLocOfMacro(MacroName, Node.getBeginLoc(), Context);
if (!B) return false;
- llvm::Optional<SourceLocation> E =
+ std::optional<SourceLocation> E =
internal::getExpansionLocOfMacro(MacroName, Node.getEndLoc(), Context);
if (!E) return false;
return *B == *E;
@@ -752,9 +756,11 @@ AST_MATCHER_P(ClassTemplateSpecializationDecl, hasSpecializedTemplate,
InnerMatcher.matches(*Decl, Finder, Builder));
}
-/// Matches a declaration that has been implicitly added
-/// by the compiler (eg. implicit default/copy constructors).
-AST_MATCHER(Decl, isImplicit) {
+/// Matches an entity that has been implicitly added by the compiler (e.g.
+/// implicit default/copy constructors).
+AST_POLYMORPHIC_MATCHER(isImplicit,
+ AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Attr,
+ LambdaCapture)) {
return Node.isImplicit();
}
@@ -1097,9 +1103,9 @@ AST_POLYMORPHIC_MATCHER_P(
/// template<typename T> struct A {};
/// A<X> a;
/// \endcode
-/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
-/// refersToType(class(hasName("X")))))
-/// matches the specialization \c A<X>
+/// classTemplateSpecializationDecl(hasAnyTemplateArgument(refersToType(
+/// recordType(hasDeclaration(recordDecl(hasName("X")))))))
+/// matches the specialization of \c struct A generated by \c A<X>.
AST_MATCHER_P(TemplateArgument, refersToType,
internal::Matcher<QualType>, InnerMatcher) {
if (Node.getKind() != TemplateArgument::Type)
@@ -1328,6 +1334,16 @@ extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConversionDecl>
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDeductionGuideDecl>
cxxDeductionGuideDecl;
+/// Matches concept declarations.
+///
+/// Example matches integral
+/// \code
+/// template<typename T>
+/// concept integral = std::is_integral_v<T>;
+/// \endcode
+extern const internal::VariadicDynCastAllOfMatcher<Decl, ConceptDecl>
+ conceptDecl;
+
/// Matches variable declarations.
///
/// Note: this does not match declarations of member variables, which are
@@ -1511,6 +1527,15 @@ extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXMemberCallExpr>
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCMessageExpr>
objcMessageExpr;
+/// Matches ObjectiveC String literal expressions.
+///
+/// Example matches @"abcd"
+/// \code
+/// NSString *s = @"abcd";
+/// \endcode
+extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCStringLiteral>
+ objcStringLiteral;
+
/// Matches Objective-C interface declarations.
///
/// Example matches Foo
@@ -1956,6 +1981,45 @@ extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDeleteExpr>
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNoexceptExpr>
cxxNoexceptExpr;
+/// Matches a loop initializing the elements of an array in a number of contexts:
+/// * in the implicit copy/move constructor for a class with an array member
+/// * when a lambda-expression captures an array by value
+/// * when a decomposition declaration decomposes an array
+///
+/// Given
+/// \code
+/// void testLambdaCapture() {
+/// int a[10];
+/// auto Lam1 = [a]() {
+/// return;
+/// };
+/// }
+/// \endcode
+/// arrayInitLoopExpr() matches the implicit loop that initializes each element of
+/// the implicit array field inside the lambda object, that represents the array `a`
+/// captured by value.
+extern const internal::VariadicDynCastAllOfMatcher<Stmt, ArrayInitLoopExpr>
+ arrayInitLoopExpr;
+
+/// The arrayInitIndexExpr consists of two subexpressions: a common expression
+/// (the source array) that is evaluated once up-front, and a per-element initializer
+/// that runs once for each array element. Within the per-element initializer,
+/// the current index may be obtained via an ArrayInitIndexExpr.
+///
+/// Given
+/// \code
+/// void testStructBinding() {
+/// int a[2] = {1, 2};
+/// auto [x, y] = a;
+/// }
+/// \endcode
+/// arrayInitIndexExpr() matches the array index that implicitly iterates
+/// over the array `a` to copy each element to the anonymous array
+/// that backs the structured binding `[x, y]` elements of which are
+/// referred to by their aliases `x` and `y`.
+extern const internal::VariadicDynCastAllOfMatcher<Stmt, ArrayInitIndexExpr>
+ arrayInitIndexExpr;
+
/// Matches array subscript expressions.
///
/// Given
@@ -1998,6 +2062,18 @@ extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDefaultArgExpr>
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXOperatorCallExpr>
cxxOperatorCallExpr;
+/// Matches C++17 fold expressions.
+///
+/// Example matches `(0 + ... + args)`:
+/// \code
+/// template <typename... Args>
+/// auto sum(Args... args) {
+/// return (0 + ... + args);
+/// }
+/// \endcode
+extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXFoldExpr>
+ cxxFoldExpr;
+
/// Matches rewritten binary operators
///
/// Example matches use of "<":
@@ -2436,6 +2512,17 @@ extern const internal::VariadicDynCastAllOfMatcher<Stmt, DependentCoawaitExpr>
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CoyieldExpr>
coyieldExpr;
+/// Matches coroutine body statements.
+///
+/// coroutineBodyStmt() matches the coroutine below
+/// \code
+/// generator<int> gen() {
+/// co_return;
+/// }
+/// \endcode
+extern const internal::VariadicDynCastAllOfMatcher<Stmt, CoroutineBodyStmt>
+ coroutineBodyStmt;
+
/// Matches nullptr literal.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNullPtrLiteralExpr>
cxxNullPtrLiteralExpr;
@@ -2444,6 +2531,10 @@ extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNullPtrLiteralExpr>
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ChooseExpr>
chooseExpr;
+/// Matches builtin function __builtin_convertvector.
+extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConvertVectorExpr>
+ convertVectorExpr;
+
/// Matches GNU __null expression.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, GNUNullExpr>
gnuNullExpr;
@@ -2519,7 +2610,7 @@ extern const internal::VariadicDynCastAllOfMatcher<Stmt, OpaqueValueExpr>
/// Matches a C++ static_assert declaration.
///
/// Example:
-/// staticAssertExpr()
+/// staticAssertDecl()
/// matches
/// static_assert(sizeof(S) == sizeof(int))
/// in
@@ -3489,8 +3580,8 @@ internal::Matcher<T> findAll(const internal::Matcher<T> &Matcher) {
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::HasParentMatcher,
- internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>,
- internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>>
+ internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc, Attr>,
+ internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc, Attr>>
hasParent;
/// Matches AST nodes that have an ancestor that matches the provided
@@ -3506,8 +3597,8 @@ extern const internal::ArgumentAdaptingMatcherFunc<
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::HasAncestorMatcher,
- internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>,
- internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>>
+ internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc, Attr>,
+ internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc, Attr>>
hasAncestor;
/// Matches if the provided matcher does not match.
@@ -3721,10 +3812,9 @@ AST_MATCHER_P(ObjCMessageExpr, hasReceiver, internal::Matcher<Expr>,
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, hasSelector, std::string, BaseName) {
Selector Sel = Node.getSelector();
- return BaseName.compare(Sel.getAsString()) == 0;
+ return BaseName == Sel.getAsString();
}
-
/// Matches when at least one of the supplied string equals to the
/// Selector.getAsString()
///
@@ -3803,7 +3893,7 @@ AST_MATCHER_P(ObjCMessageExpr, numSelectorArgs, unsigned, N) {
return Node.getSelector().getNumArgs() == N;
}
-/// Matches if the call expression's callee expression matches.
+/// Matches if the call or fold expression's callee expression matches.
///
/// Given
/// \code
@@ -3815,19 +3905,39 @@ AST_MATCHER_P(ObjCMessageExpr, numSelectorArgs, unsigned, N) {
/// with callee(...)
/// matching this->x, x, y.x, f respectively
///
+/// Given
+/// \code
+/// template <typename... Args>
+/// auto sum(Args... args) {
+/// return (0 + ... + args);
+/// }
+///
+/// template <typename... Args>
+/// auto multiply(Args... args) {
+/// return (args * ... * 1);
+/// }
+/// \endcode
+/// cxxFoldExpr(callee(expr()))
+/// matches (args * ... * 1)
+/// with callee(...)
+/// matching *
+///
/// Note: Callee cannot take the more general internal::Matcher<Expr>
/// because this introduces ambiguous overloads with calls to Callee taking a
/// internal::Matcher<Decl>, as the matcher hierarchy is purely
/// implemented in terms of implicit casts.
-AST_MATCHER_P(CallExpr, callee, internal::Matcher<Stmt>,
- InnerMatcher) {
- const Expr *ExprNode = Node.getCallee();
+AST_POLYMORPHIC_MATCHER_P_OVERLOAD(callee,
+ AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr,
+ CXXFoldExpr),
+ internal::Matcher<Stmt>, InnerMatcher, 0) {
+ const auto *ExprNode = Node.getCallee();
return (ExprNode != nullptr &&
InnerMatcher.matches(*ExprNode, Finder, Builder));
}
-/// Matches if the call expression's callee's declaration matches the
-/// given matcher.
+/// Matches 1) if the call expression's callee's declaration matches the
+/// given matcher; or 2) if the Obj-C message expression's callee's method
+/// declaration matches the given matcher.
///
/// Example matches y.x() (matcher = callExpr(callee(
/// cxxMethodDecl(hasName("x")))))
@@ -3835,9 +3945,31 @@ AST_MATCHER_P(CallExpr, callee, internal::Matcher<Stmt>,
/// class Y { public: void x(); };
/// void z() { Y y; y.x(); }
/// \endcode
-AST_MATCHER_P_OVERLOAD(CallExpr, callee, internal::Matcher<Decl>, InnerMatcher,
- 1) {
- return callExpr(hasDeclaration(InnerMatcher)).matches(Node, Finder, Builder);
+///
+/// Example 2. Matches [I foo] with
+/// objcMessageExpr(callee(objcMethodDecl(hasName("foo"))))
+///
+/// \code
+/// @interface I: NSObject
+/// +(void)foo;
+/// @end
+/// ...
+/// [I foo]
+/// \endcode
+AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
+ callee, AST_POLYMORPHIC_SUPPORTED_TYPES(ObjCMessageExpr, CallExpr),
+ internal::Matcher<Decl>, InnerMatcher, 1) {
+ if (isa<CallExpr>(&Node))
+ return callExpr(hasDeclaration(InnerMatcher))
+ .matches(Node, Finder, Builder);
+ else {
+ // The dynamic cast below is guaranteed to succeed as there are only 2
+ // supported return types.
+ const auto *MsgNode = cast<ObjCMessageExpr>(&Node);
+ const Decl *DeclNode = MsgNode->getMethodDecl();
+ return (DeclNode != nullptr &&
+ InnerMatcher.matches(*DeclNode, Finder, Builder));
+ }
}
/// Matches if the expression's or declaration's type matches a type
@@ -3919,14 +4051,14 @@ AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
///
/// \code
/// auto x = int(3);
-/// \code
+/// \endcode
/// cxxTemporaryObjectExpr(hasTypeLoc(loc(asString("int"))))
/// matches int(3)
///
/// \code
/// struct Foo { Foo(int, int); };
/// auto x = Foo(1, 2);
-/// \code
+/// \endcode
/// cxxFunctionalCastExpr(hasTypeLoc(loc(asString("struct Foo"))))
/// matches Foo(1, 2)
///
@@ -4124,25 +4256,34 @@ AST_MATCHER_P(DeclRefExpr, to, internal::Matcher<Decl>,
InnerMatcher.matches(*DeclNode, Finder, Builder));
}
-/// Matches a \c DeclRefExpr that refers to a declaration through a
-/// specific using shadow declaration.
+/// Matches if a node refers to a declaration through a specific
+/// using shadow declaration.
///
-/// Given
+/// Examples:
/// \code
-/// namespace a { void f() {} }
+/// namespace a { int f(); }
/// using a::f;
-/// void g() {
-/// f(); // Matches this ..
-/// a::f(); // .. but not this.
-/// }
+/// int x = f();
/// \endcode
/// declRefExpr(throughUsingDecl(anything()))
-/// matches \c f()
-AST_MATCHER_P(DeclRefExpr, throughUsingDecl,
- internal::Matcher<UsingShadowDecl>, InnerMatcher) {
+/// matches \c f
+///
+/// \code
+/// namespace a { class X{}; }
+/// using a::X;
+/// X x;
+/// \endcode
+/// typeLoc(loc(usingType(throughUsingDecl(anything()))))
+/// matches \c X
+///
+/// Usable as: Matcher<DeclRefExpr>, Matcher<UsingType>
+AST_POLYMORPHIC_MATCHER_P(throughUsingDecl,
+ AST_POLYMORPHIC_SUPPORTED_TYPES(DeclRefExpr,
+ UsingType),
+ internal::Matcher<UsingShadowDecl>, Inner) {
const NamedDecl *FoundDecl = Node.getFoundDecl();
if (const UsingShadowDecl *UsingDecl = dyn_cast<UsingShadowDecl>(FoundDecl))
- return InnerMatcher.matches(*UsingDecl, Finder, Builder);
+ return Inner.matches(*UsingDecl, Finder, Builder);
return false;
}
@@ -4201,6 +4342,45 @@ AST_MATCHER_P(
InnerMatcher.matches(*Initializer, Finder, Builder));
}
+/// Matches a variable serving as the implicit variable for a lambda init-
+/// capture.
+///
+/// Example matches x (matcher = varDecl(isInitCapture()))
+/// \code
+/// auto f = [x=3]() { return x; };
+/// \endcode
+AST_MATCHER(VarDecl, isInitCapture) { return Node.isInitCapture(); }
+
+/// Matches each lambda capture in a lambda expression.
+///
+/// Given
+/// \code
+/// int main() {
+/// int x, y;
+/// float z;
+/// auto f = [=]() { return x + y + z; };
+/// }
+/// \endcode
+/// lambdaExpr(forEachLambdaCapture(
+/// lambdaCapture(capturesVar(varDecl(hasType(isInteger()))))))
+/// will trigger two matches, binding for 'x' and 'y' respectively.
+AST_MATCHER_P(LambdaExpr, forEachLambdaCapture,
+ internal::Matcher<LambdaCapture>, InnerMatcher) {
+ BoundNodesTreeBuilder Result;
+ bool Matched = false;
+ for (const auto &Capture : Node.captures()) {
+ if (Finder->isTraversalIgnoringImplicitNodes() && Capture.isImplicit())
+ continue;
+ BoundNodesTreeBuilder CaptureBuilder(*Builder);
+ if (InnerMatcher.matches(Capture, Finder, &CaptureBuilder)) {
+ Matched = true;
+ Result.addMatch(CaptureBuilder);
+ }
+ }
+ *Builder = std::move(Result);
+ return Matched;
+}
+
/// \brief Matches a static variable with local scope.
///
/// Example matches y (matcher = varDecl(isStaticLocal()))
@@ -4335,6 +4515,33 @@ AST_POLYMORPHIC_MATCHER_P(argumentCountIs,
return NumArgs == N;
}
+/// Checks that a call expression or a constructor call expression has at least
+/// the specified number of arguments (including absent default arguments).
+///
+/// Example matches f(0, 0) and g(0, 0, 0)
+/// (matcher = callExpr(argumentCountAtLeast(2)))
+/// \code
+/// void f(int x, int y);
+/// void g(int x, int y, int z);
+/// f(0, 0);
+/// g(0, 0, 0);
+/// \endcode
+AST_POLYMORPHIC_MATCHER_P(argumentCountAtLeast,
+ AST_POLYMORPHIC_SUPPORTED_TYPES(
+ CallExpr, CXXConstructExpr,
+ CXXUnresolvedConstructExpr, ObjCMessageExpr),
+ unsigned, N) {
+ unsigned NumArgs = Node.getNumArgs();
+ if (!Finder->isTraversalIgnoringImplicitNodes())
+ return NumArgs >= N;
+ while (NumArgs) {
+ if (!isa<CXXDefaultArgExpr>(Node.getArg(NumArgs - 1)))
+ break;
+ --NumArgs;
+ }
+ return NumArgs >= N;
+}
+
/// Matches the n'th argument of a call expression or a constructor
/// call expression.
///
@@ -4356,6 +4563,121 @@ AST_POLYMORPHIC_MATCHER_P2(hasArgument,
return InnerMatcher.matches(*Arg->IgnoreParenImpCasts(), Finder, Builder);
}
+/// Matches the operand that does not contain the parameter pack.
+///
+/// Example matches `(0 + ... + args)` and `(args * ... * 1)`
+/// (matcher = cxxFoldExpr(hasFoldInit(expr())))
+/// with hasFoldInit(...)
+/// matching `0` and `1` respectively
+/// \code
+/// template <typename... Args>
+/// auto sum(Args... args) {
+/// return (0 + ... + args);
+/// }
+///
+/// template <typename... Args>
+/// auto multiply(Args... args) {
+/// return (args * ... * 1);
+/// }
+/// \endcode
+AST_MATCHER_P(CXXFoldExpr, hasFoldInit, ast_matchers::internal::Matcher<Expr>,
+ InnerMacher) {
+ const auto *const Init = Node.getInit();
+ return Init && InnerMacher.matches(*Init, Finder, Builder);
+}
+
+/// Matches the operand that contains the parameter pack.
+///
+/// Example matches `(0 + ... + args)`
+/// (matcher = cxxFoldExpr(hasPattern(expr())))
+/// with hasPattern(...)
+/// matching `args`
+/// \code
+/// template <typename... Args>
+/// auto sum(Args... args) {
+/// return (0 + ... + args);
+/// }
+///
+/// template <typename... Args>
+/// auto multiply(Args... args) {
+/// return (args * ... * 1);
+/// }
+/// \endcode
+AST_MATCHER_P(CXXFoldExpr, hasPattern, ast_matchers::internal::Matcher<Expr>,
+ InnerMacher) {
+ const Expr *const Pattern = Node.getPattern();
+ return Pattern && InnerMacher.matches(*Pattern, Finder, Builder);
+}
+
+/// Matches right-folding fold expressions.
+///
+/// Example matches `(args * ... * 1)`
+/// (matcher = cxxFoldExpr(isRightFold()))
+/// \code
+/// template <typename... Args>
+/// auto sum(Args... args) {
+/// return (0 + ... + args);
+/// }
+///
+/// template <typename... Args>
+/// auto multiply(Args... args) {
+/// return (args * ... * 1);
+/// }
+/// \endcode
+AST_MATCHER(CXXFoldExpr, isRightFold) { return Node.isRightFold(); }
+
+/// Matches left-folding fold expressions.
+///
+/// Example matches `(0 + ... + args)`
+/// (matcher = cxxFoldExpr(isLeftFold()))
+/// \code
+/// template <typename... Args>
+/// auto sum(Args... args) {
+/// return (0 + ... + args);
+/// }
+///
+/// template <typename... Args>
+/// auto multiply(Args... args) {
+/// return (args * ... * 1);
+/// }
+/// \endcode
+AST_MATCHER(CXXFoldExpr, isLeftFold) { return Node.isLeftFold(); }
+
+/// Matches unary fold expressions, i.e. fold expressions without an
+/// initializer.
+///
+/// Example matches `(args * ...)`
+/// (matcher = cxxFoldExpr(isUnaryFold()))
+/// \code
+/// template <typename... Args>
+/// auto sum(Args... args) {
+/// return (0 + ... + args);
+/// }
+///
+/// template <typename... Args>
+/// auto multiply(Args... args) {
+/// return (args * ...);
+/// }
+/// \endcode
+AST_MATCHER(CXXFoldExpr, isUnaryFold) { return Node.getInit() == nullptr; }
+
+/// Matches binary fold expressions, i.e. fold expressions with an initializer.
+///
+/// Example matches `(0 + ... + args)`
+/// (matcher = cxxFoldExpr(isBinaryFold()))
+/// \code
+/// template <typename... Args>
+/// auto sum(Args... args) {
+/// return (0 + ... + args);
+/// }
+///
+/// template <typename... Args>
+/// auto multiply(Args... args) {
+/// return (args * ...);
+/// }
+/// \endcode
+AST_MATCHER(CXXFoldExpr, isBinaryFold) { return Node.getInit() != nullptr; }
+
/// Matches the n'th item of an initializer list expression.
///
/// Example matches y.
@@ -4586,50 +4908,81 @@ AST_POLYMORPHIC_MATCHER_P(hasAnyArgument,
return false;
}
-/// Matches any capture of a lambda expression.
+/// Matches lambda captures.
///
/// Given
/// \code
-/// void foo() {
+/// int main() {
/// int x;
/// auto f = [x](){};
+/// auto g = [x = 1](){};
/// }
/// \endcode
-/// lambdaExpr(hasAnyCapture(anything()))
-/// matches [x](){};
-AST_MATCHER_P_OVERLOAD(LambdaExpr, hasAnyCapture, internal::Matcher<VarDecl>,
- InnerMatcher, 0) {
+/// In the matcher `lambdaExpr(hasAnyCapture(lambdaCapture()))`,
+/// `lambdaCapture()` matches `x` and `x=1`.
+extern const internal::VariadicAllOfMatcher<LambdaCapture> lambdaCapture;
+
+/// Matches any capture in a lambda expression.
+///
+/// Given
+/// \code
+/// void foo() {
+/// int t = 5;
+/// auto f = [=](){ return t; };
+/// }
+/// \endcode
+/// lambdaExpr(hasAnyCapture(lambdaCapture())) and
+/// lambdaExpr(hasAnyCapture(lambdaCapture(refersToVarDecl(hasName("t")))))
+/// both match `[=](){ return t; }`.
+AST_MATCHER_P(LambdaExpr, hasAnyCapture, internal::Matcher<LambdaCapture>,
+ InnerMatcher) {
for (const LambdaCapture &Capture : Node.captures()) {
- if (Capture.capturesVariable()) {
- BoundNodesTreeBuilder Result(*Builder);
- if (InnerMatcher.matches(*Capture.getCapturedVar(), Finder, &Result)) {
- *Builder = std::move(Result);
- return true;
- }
+ clang::ast_matchers::internal::BoundNodesTreeBuilder Result(*Builder);
+ if (InnerMatcher.matches(Capture, Finder, &Result)) {
+ *Builder = std::move(Result);
+ return true;
}
}
return false;
}
-/// Matches any capture of 'this' in a lambda expression.
+/// Matches a `LambdaCapture` that refers to the specified `VarDecl`. The
+/// `VarDecl` can be a separate variable that is captured by value or
+/// reference, or a synthesized variable if the capture has an initializer.
///
/// Given
/// \code
-/// struct foo {
-/// void bar() {
-/// auto f = [this](){};
-/// }
+/// void foo() {
+/// int x;
+/// auto f = [x](){};
+/// auto g = [x = 1](){};
/// }
/// \endcode
-/// lambdaExpr(hasAnyCapture(cxxThisExpr()))
-/// matches [this](){};
-AST_MATCHER_P_OVERLOAD(LambdaExpr, hasAnyCapture,
- internal::Matcher<CXXThisExpr>, InnerMatcher, 1) {
- return llvm::any_of(Node.captures(), [](const LambdaCapture &LC) {
- return LC.capturesThis();
- });
+/// In the matcher
+/// lambdaExpr(hasAnyCapture(lambdaCapture(capturesVar(hasName("x")))),
+/// capturesVar(hasName("x")) matches `x` and `x = 1`.
+AST_MATCHER_P(LambdaCapture, capturesVar, internal::Matcher<ValueDecl>,
+ InnerMatcher) {
+ auto *capturedVar = Node.getCapturedVar();
+ return capturedVar && InnerMatcher.matches(*capturedVar, Finder, Builder);
}
+/// Matches a `LambdaCapture` that refers to 'this'.
+///
+/// Given
+/// \code
+/// class C {
+/// int cc;
+/// int f() {
+/// auto l = [this]() { return cc; };
+/// return l();
+/// }
+/// };
+/// \endcode
+/// lambdaExpr(hasAnyCapture(lambdaCapture(capturesThis())))
+/// matches `[this]() { return cc; }`.
+AST_MATCHER(LambdaCapture, capturesThis) { return Node.capturesThis(); }
+
/// Matches a constructor call expression which uses list initialization.
AST_MATCHER(CXXConstructExpr, isListInitialization) {
return Node.isListInitialization();
@@ -4798,7 +5151,7 @@ AST_POLYMORPHIC_MATCHER_P2(forEachArgumentWithParamType,
}
}
- int ParamIndex = 0;
+ unsigned ParamIndex = 0;
bool Matched = false;
unsigned NumArgs = Node.getNumArgs();
if (FProto && FProto->isVariadic())
@@ -4812,7 +5165,7 @@ AST_POLYMORPHIC_MATCHER_P2(forEachArgumentWithParamType,
// This test is cheaper compared to the big matcher in the next if.
// Therefore, please keep this order.
- if (FProto) {
+ if (FProto && FProto->getNumParams() > ParamIndex) {
QualType ParamType = FProto->getParamType(ParamIndex);
if (ParamMatcher.matches(ParamType, Finder, &ParamMatches)) {
Result.addMatch(ParamMatches);
@@ -4929,6 +5282,49 @@ AST_POLYMORPHIC_MATCHER_P(parameterCountIs,
return Node.getNumParams() == N;
}
+/// Matches classTemplateSpecialization, templateSpecializationType and
+/// functionDecl nodes where the template argument matches the inner matcher.
+/// This matcher may produce multiple matches.
+///
+/// Given
+/// \code
+/// template <typename T, unsigned N, unsigned M>
+/// struct Matrix {};
+///
+/// constexpr unsigned R = 2;
+/// Matrix<int, R * 2, R * 4> M;
+///
+/// template <typename T, typename U>
+/// void f(T&& t, U&& u) {}
+///
+/// bool B = false;
+/// f(R, B);
+/// \endcode
+/// templateSpecializationType(forEachTemplateArgument(isExpr(expr())))
+/// matches twice, with expr() matching 'R * 2' and 'R * 4'
+/// functionDecl(forEachTemplateArgument(refersToType(builtinType())))
+/// matches the specialization f<unsigned, bool> twice, for 'unsigned'
+/// and 'bool'
+AST_POLYMORPHIC_MATCHER_P(
+ forEachTemplateArgument,
+ AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl,
+ TemplateSpecializationType, FunctionDecl),
+ clang::ast_matchers::internal::Matcher<TemplateArgument>, InnerMatcher) {
+ ArrayRef<TemplateArgument> TemplateArgs =
+ clang::ast_matchers::internal::getTemplateSpecializationArgs(Node);
+ clang::ast_matchers::internal::BoundNodesTreeBuilder Result;
+ bool Matched = false;
+ for (const auto &Arg : TemplateArgs) {
+ clang::ast_matchers::internal::BoundNodesTreeBuilder ArgBuilder(*Builder);
+ if (InnerMatcher.matches(Arg, Finder, &ArgBuilder)) {
+ Matched = true;
+ Result.addMatch(ArgBuilder);
+ }
+ }
+ *Builder = std::move(Result);
+ return Matched;
+}
+
/// Matches \c FunctionDecls that have a noreturn attribute.
///
/// Given
@@ -5088,6 +5484,25 @@ AST_POLYMORPHIC_MATCHER(isNoThrow,
return FnTy->isNothrow();
}
+/// Matches consteval function declarations and if consteval/if ! consteval
+/// statements.
+///
+/// Given:
+/// \code
+/// consteval int a();
+/// void b() { if consteval {} }
+/// void c() { if ! consteval {} }
+/// void d() { if ! consteval {} else {} }
+/// \endcode
+/// functionDecl(isConsteval())
+/// matches the declaration of "int a()".
+/// ifStmt(isConsteval())
+/// matches the if statement in "void b()", "void c()", "void d()".
+AST_POLYMORPHIC_MATCHER(isConsteval,
+ AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, IfStmt)) {
+ return Node.isConsteval();
+}
+
/// Matches constexpr variable and function declarations,
/// and if constexpr.
///
@@ -5110,6 +5525,23 @@ AST_POLYMORPHIC_MATCHER(isConstexpr,
return Node.isConstexpr();
}
+/// Matches constinit variable declarations.
+///
+/// Given:
+/// \code
+/// constinit int foo = 42;
+/// constinit const char* bar = "bar";
+/// int baz = 42;
+/// [[clang::require_constant_initialization]] int xyz = 42;
+/// \endcode
+/// varDecl(isConstinit())
+/// matches the declaration of `foo` and `bar`, but not `baz` and `xyz`.
+AST_MATCHER(VarDecl, isConstinit) {
+ if (const auto *CIA = Node.getAttr<ConstInitAttr>())
+ return CIA->isConstinit();
+ return false;
+}
+
/// Matches selection statements with initializer.
///
/// Given:
@@ -5266,16 +5698,16 @@ AST_MATCHER_P(ArraySubscriptExpr, hasBase,
return false;
}
-/// Matches a 'for', 'while', 'do while' statement or a function
-/// definition that has a given body. Note that in case of functions
-/// this matcher only matches the definition itself and not the other
-/// declarations of the same function.
+/// Matches a 'for', 'while', 'while' statement or a function or coroutine
+/// definition that has a given body. Note that in case of functions or
+/// coroutines this matcher only matches the definition itself and not the
+/// other declarations of the same function or coroutine.
///
/// Given
/// \code
/// for (;;) {}
/// \endcode
-/// hasBody(compoundStmt())
+/// forStmt(hasBody(compoundStmt()))
/// matches 'for (;;) {}'
/// with compoundStmt()
/// matching '{}'
@@ -5285,18 +5717,16 @@ AST_MATCHER_P(ArraySubscriptExpr, hasBase,
/// void f();
/// void f() {}
/// \endcode
-/// hasBody(functionDecl())
+/// functionDecl(hasBody(compoundStmt()))
/// matches 'void f() {}'
/// with compoundStmt()
/// matching '{}'
/// but does not match 'void f();'
-
-AST_POLYMORPHIC_MATCHER_P(hasBody,
- AST_POLYMORPHIC_SUPPORTED_TYPES(DoStmt, ForStmt,
- WhileStmt,
- CXXForRangeStmt,
- FunctionDecl),
- internal::Matcher<Stmt>, InnerMatcher) {
+AST_POLYMORPHIC_MATCHER_P(
+ hasBody,
+ AST_POLYMORPHIC_SUPPORTED_TYPES(DoStmt, ForStmt, WhileStmt, CXXForRangeStmt,
+ FunctionDecl, CoroutineBodyStmt),
+ internal::Matcher<Stmt>, InnerMatcher) {
if (Finder->isTraversalIgnoringImplicitNodes() && isDefaultedHelper(&Node))
return false;
const Stmt *const Statement = internal::GetBodyMatcher<NodeType>::get(Node);
@@ -5425,19 +5855,29 @@ AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals,
.matchesNode(Node);
}
-/// Matches the operator Name of operator expressions (binary or
-/// unary).
+/// Matches the operator Name of operator expressions and fold expressions
+/// (binary or unary).
///
/// Example matches a || b (matcher = binaryOperator(hasOperatorName("||")))
/// \code
/// !(a || b)
/// \endcode
+///
+/// Example matches `(0 + ... + args)`
+/// (matcher = cxxFoldExpr(hasOperatorName("+")))
+/// \code
+/// template <typename... Args>
+/// auto sum(Args... args) {
+/// return (0 + ... + args);
+/// }
+/// \endcode
AST_POLYMORPHIC_MATCHER_P(
hasOperatorName,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr,
- CXXRewrittenBinaryOperator, UnaryOperator),
+ CXXRewrittenBinaryOperator, CXXFoldExpr,
+ UnaryOperator),
std::string, Name) {
- if (Optional<StringRef> OpName = internal::getOpName(Node))
+ if (std::optional<StringRef> OpName = internal::getOpName(Node))
return *OpName == Name;
return false;
}
@@ -5505,11 +5945,12 @@ AST_POLYMORPHIC_MATCHER(
/// \code
/// a || b
/// \endcode
-AST_POLYMORPHIC_MATCHER_P(hasLHS,
- AST_POLYMORPHIC_SUPPORTED_TYPES(
- BinaryOperator, CXXOperatorCallExpr,
- CXXRewrittenBinaryOperator, ArraySubscriptExpr),
- internal::Matcher<Expr>, InnerMatcher) {
+AST_POLYMORPHIC_MATCHER_P(
+ hasLHS,
+ AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr,
+ CXXRewrittenBinaryOperator,
+ ArraySubscriptExpr, CXXFoldExpr),
+ internal::Matcher<Expr>, InnerMatcher) {
const Expr *LeftHandSide = internal::getLHS(Node);
return (LeftHandSide != nullptr &&
InnerMatcher.matches(*LeftHandSide, Finder, Builder));
@@ -5521,29 +5962,31 @@ AST_POLYMORPHIC_MATCHER_P(hasLHS,
/// \code
/// a || b
/// \endcode
-AST_POLYMORPHIC_MATCHER_P(hasRHS,
- AST_POLYMORPHIC_SUPPORTED_TYPES(
- BinaryOperator, CXXOperatorCallExpr,
- CXXRewrittenBinaryOperator, ArraySubscriptExpr),
- internal::Matcher<Expr>, InnerMatcher) {
+AST_POLYMORPHIC_MATCHER_P(
+ hasRHS,
+ AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr,
+ CXXRewrittenBinaryOperator,
+ ArraySubscriptExpr, CXXFoldExpr),
+ internal::Matcher<Expr>, InnerMatcher) {
const Expr *RightHandSide = internal::getRHS(Node);
return (RightHandSide != nullptr &&
InnerMatcher.matches(*RightHandSide, Finder, Builder));
}
/// Matches if either the left hand side or the right hand side of a
-/// binary operator matches.
+/// binary operator or fold expression matches.
AST_POLYMORPHIC_MATCHER_P(
hasEitherOperand,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr,
- CXXRewrittenBinaryOperator),
+ CXXFoldExpr, CXXRewrittenBinaryOperator),
internal::Matcher<Expr>, InnerMatcher) {
return internal::VariadicDynCastAllOfMatcher<Stmt, NodeType>()(
anyOf(hasLHS(InnerMatcher), hasRHS(InnerMatcher)))
.matches(Node, Finder, Builder);
}
-/// Matches if both matchers match with opposite sides of the binary operator.
+/// Matches if both matchers match with opposite sides of the binary operator
+/// or fold expression.
///
/// Example matcher = binaryOperator(hasOperands(integerLiteral(equals(1),
/// integerLiteral(equals(2)))
@@ -5556,7 +5999,7 @@ AST_POLYMORPHIC_MATCHER_P(
AST_POLYMORPHIC_MATCHER_P2(
hasOperands,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr,
- CXXRewrittenBinaryOperator),
+ CXXFoldExpr, CXXRewrittenBinaryOperator),
internal::Matcher<Expr>, Matcher1, internal::Matcher<Expr>, Matcher2) {
return internal::VariadicDynCastAllOfMatcher<Stmt, NodeType>()(
anyOf(allOf(hasLHS(Matcher1), hasRHS(Matcher2)),
@@ -5631,8 +6074,6 @@ AST_MATCHER_P(ExplicitCastExpr, hasDestinationType,
/// Matches implicit casts whose destination type matches a given
/// matcher.
-///
-/// FIXME: Unit test this matcher
AST_MATCHER_P(ImplicitCastExpr, hasImplicitDestinationType,
internal::Matcher<QualType>, InnerMatcher) {
return InnerMatcher.matches(Node.getType(), Finder, Builder);
@@ -5875,6 +6316,10 @@ AST_MATCHER(CXXMethodDecl, isVirtualAsWritten) {
return Node.isVirtualAsWritten();
}
+AST_MATCHER(CXXConstructorDecl, isInheritingConstructor) {
+ return Node.isInheritingConstructor();
+}
+
/// Matches if the given method or class declaration is final.
///
/// Given:
@@ -5906,9 +6351,7 @@ AST_POLYMORPHIC_MATCHER(isFinal,
/// };
/// \endcode
/// matches A::x
-AST_MATCHER(CXXMethodDecl, isPure) {
- return Node.isPure();
-}
+AST_MATCHER(CXXMethodDecl, isPure) { return Node.isPureVirtual(); }
/// Matches if the given method declaration is const.
///
@@ -6333,6 +6776,187 @@ AST_MATCHER_FUNCTION_P_OVERLOAD(internal::BindableMatcher<TypeLoc>, loc,
new internal::TypeLocTypeMatcher(InnerMatcher));
}
+/// Matches `QualifiedTypeLoc`s in the clang AST.
+///
+/// Given
+/// \code
+/// const int x = 0;
+/// \endcode
+/// qualifiedTypeLoc()
+/// matches `const int`.
+extern const internal::VariadicDynCastAllOfMatcher<TypeLoc, QualifiedTypeLoc>
+ qualifiedTypeLoc;
+
+/// Matches `QualifiedTypeLoc`s that have an unqualified `TypeLoc` matching
+/// `InnerMatcher`.
+///
+/// Given
+/// \code
+/// int* const x;
+/// const int y;
+/// \endcode
+/// qualifiedTypeLoc(hasUnqualifiedLoc(pointerTypeLoc()))
+/// matches the `TypeLoc` of the variable declaration of `x`, but not `y`.
+AST_MATCHER_P(QualifiedTypeLoc, hasUnqualifiedLoc, internal::Matcher<TypeLoc>,
+ InnerMatcher) {
+ return InnerMatcher.matches(Node.getUnqualifiedLoc(), Finder, Builder);
+}
+
+/// Matches a function declared with the specified return `TypeLoc`.
+///
+/// Given
+/// \code
+/// int f() { return 5; }
+/// void g() {}
+/// \endcode
+/// functionDecl(hasReturnTypeLoc(loc(asString("int"))))
+/// matches the declaration of `f`, but not `g`.
+AST_MATCHER_P(FunctionDecl, hasReturnTypeLoc, internal::Matcher<TypeLoc>,
+ ReturnMatcher) {
+ auto Loc = Node.getFunctionTypeLoc();
+ return Loc && ReturnMatcher.matches(Loc.getReturnLoc(), Finder, Builder);
+}
+
+/// Matches pointer `TypeLoc`s.
+///
+/// Given
+/// \code
+/// int* x;
+/// \endcode
+/// pointerTypeLoc()
+/// matches `int*`.
+extern const internal::VariadicDynCastAllOfMatcher<TypeLoc, PointerTypeLoc>
+ pointerTypeLoc;
+
+/// Matches pointer `TypeLoc`s that have a pointee `TypeLoc` matching
+/// `PointeeMatcher`.
+///
+/// Given
+/// \code
+/// int* x;
+/// \endcode
+/// pointerTypeLoc(hasPointeeLoc(loc(asString("int"))))
+/// matches `int*`.
+AST_MATCHER_P(PointerTypeLoc, hasPointeeLoc, internal::Matcher<TypeLoc>,
+ PointeeMatcher) {
+ return PointeeMatcher.matches(Node.getPointeeLoc(), Finder, Builder);
+}
+
+/// Matches reference `TypeLoc`s.
+///
+/// Given
+/// \code
+/// int x = 3;
+/// int& l = x;
+/// int&& r = 3;
+/// \endcode
+/// referenceTypeLoc()
+/// matches `int&` and `int&&`.
+extern const internal::VariadicDynCastAllOfMatcher<TypeLoc, ReferenceTypeLoc>
+ referenceTypeLoc;
+
+/// Matches reference `TypeLoc`s that have a referent `TypeLoc` matching
+/// `ReferentMatcher`.
+///
+/// Given
+/// \code
+/// int x = 3;
+/// int& xx = x;
+/// \endcode
+/// referenceTypeLoc(hasReferentLoc(loc(asString("int"))))
+/// matches `int&`.
+AST_MATCHER_P(ReferenceTypeLoc, hasReferentLoc, internal::Matcher<TypeLoc>,
+ ReferentMatcher) {
+ return ReferentMatcher.matches(Node.getPointeeLoc(), Finder, Builder);
+}
+
+/// Matches template specialization `TypeLoc`s.
+///
+/// Given
+/// \code
+/// template <typename T> class C {};
+/// C<char> var;
+/// \endcode
+/// varDecl(hasTypeLoc(templateSpecializationTypeLoc(typeLoc())))
+/// matches `C<char> var`.
+extern const internal::VariadicDynCastAllOfMatcher<
+ TypeLoc, TemplateSpecializationTypeLoc>
+ templateSpecializationTypeLoc;
+
+/// Matches template specialization `TypeLoc`s that have at least one
+/// `TemplateArgumentLoc` matching the given `InnerMatcher`.
+///
+/// Given
+/// \code
+/// template<typename T> class A {};
+/// A<int> a;
+/// \endcode
+/// varDecl(hasTypeLoc(templateSpecializationTypeLoc(hasAnyTemplateArgumentLoc(
+/// hasTypeLoc(loc(asString("int")))))))
+/// matches `A<int> a`.
+AST_MATCHER_P(TemplateSpecializationTypeLoc, hasAnyTemplateArgumentLoc,
+ internal::Matcher<TemplateArgumentLoc>, InnerMatcher) {
+ for (unsigned Index = 0, N = Node.getNumArgs(); Index < N; ++Index) {
+ clang::ast_matchers::internal::BoundNodesTreeBuilder Result(*Builder);
+ if (InnerMatcher.matches(Node.getArgLoc(Index), Finder, &Result)) {
+ *Builder = std::move(Result);
+ return true;
+ }
+ }
+ return false;
+}
+
+/// Matches template specialization `TypeLoc`s where the n'th
+/// `TemplateArgumentLoc` matches the given `InnerMatcher`.
+///
+/// Given
+/// \code
+/// template<typename T, typename U> class A {};
+/// A<double, int> b;
+/// A<int, double> c;
+/// \endcode
+/// varDecl(hasTypeLoc(templateSpecializationTypeLoc(hasTemplateArgumentLoc(0,
+/// hasTypeLoc(loc(asString("double")))))))
+/// matches `A<double, int> b`, but not `A<int, double> c`.
+AST_POLYMORPHIC_MATCHER_P2(
+ hasTemplateArgumentLoc,
+ AST_POLYMORPHIC_SUPPORTED_TYPES(DeclRefExpr, TemplateSpecializationTypeLoc),
+ unsigned, Index, internal::Matcher<TemplateArgumentLoc>, InnerMatcher) {
+ return internal::MatchTemplateArgLocAt(Node, Index, InnerMatcher, Finder,
+ Builder);
+}
+
+/// Matches C or C++ elaborated `TypeLoc`s.
+///
+/// Given
+/// \code
+/// struct s {};
+/// struct s ss;
+/// \endcode
+/// elaboratedTypeLoc()
+/// matches the `TypeLoc` of the variable declaration of `ss`.
+extern const internal::VariadicDynCastAllOfMatcher<TypeLoc, ElaboratedTypeLoc>
+ elaboratedTypeLoc;
+
+/// Matches elaborated `TypeLoc`s that have a named `TypeLoc` matching
+/// `InnerMatcher`.
+///
+/// Given
+/// \code
+/// template <typename T>
+/// class C {};
+/// class C<int> c;
+///
+/// class D {};
+/// class D d;
+/// \endcode
+/// elaboratedTypeLoc(hasNamedTypeLoc(templateSpecializationTypeLoc()));
+/// matches the `TypeLoc` of the variable declaration of `c`, but not `d`.
+AST_MATCHER_P(ElaboratedTypeLoc, hasNamedTypeLoc, internal::Matcher<TypeLoc>,
+ InnerMatcher) {
+ return InnerMatcher.matches(Node.getNamedTypeLoc(), Finder, Builder);
+}
+
/// Matches type \c bool.
///
/// Given
@@ -6471,10 +7095,25 @@ AST_POLYMORPHIC_MATCHER_P(hasSize,
/// T data[Size];
/// };
/// \endcode
-/// dependentSizedArrayType
+/// dependentSizedArrayType()
/// matches "T data[Size]"
extern const AstTypeMatcher<DependentSizedArrayType> dependentSizedArrayType;
+/// Matches C++ extended vector type where either the type or size is
+/// dependent.
+///
+/// Given
+/// \code
+/// template<typename T, int Size>
+/// class vector {
+/// typedef T __attribute__((ext_vector_type(Size))) type;
+/// };
+/// \endcode
+/// dependentSizedExtVectorType()
+/// matches "T __attribute__((ext_vector_type(Size)))"
+extern const AstTypeMatcher<DependentSizedExtVectorType>
+ dependentSizedExtVectorType;
+
/// Matches C arrays with unspecified size.
///
/// Given
@@ -6584,7 +7223,7 @@ extern const AstTypeMatcher<DecltypeType> decltypeType;
AST_TYPE_TRAVERSE_MATCHER(hasDeducedType, getDeducedType,
AST_POLYMORPHIC_SUPPORTED_TYPES(AutoType));
-/// Matches \c DecltypeType nodes to find out the underlying type.
+/// Matches \c DecltypeType or \c UsingType nodes to find the underlying type.
///
/// Given
/// \code
@@ -6594,9 +7233,10 @@ AST_TYPE_TRAVERSE_MATCHER(hasDeducedType, getDeducedType,
/// decltypeType(hasUnderlyingType(isInteger()))
/// matches the type of "a"
///
-/// Usable as: Matcher<DecltypeType>
+/// Usable as: Matcher<DecltypeType>, Matcher<UsingType>
AST_TYPE_TRAVERSE_MATCHER(hasUnderlyingType, getUnderlyingType,
- AST_POLYMORPHIC_SUPPORTED_TYPES(DecltypeType));
+ AST_POLYMORPHIC_SUPPORTED_TYPES(DecltypeType,
+ UsingType));
/// Matches \c FunctionType nodes.
///
@@ -6775,6 +7415,18 @@ AST_TYPELOC_TRAVERSE_MATCHER_DECL(
/// matches "typedef int X"
extern const AstTypeMatcher<TypedefType> typedefType;
+/// Matches qualified types when the qualifier is applied via a macro.
+///
+/// Given
+/// \code
+/// #define CDECL __attribute__((cdecl))
+/// typedef void (CDECL *X)();
+/// typedef void (__attribute__((cdecl)) *Y)();
+/// \endcode
+/// macroQualifiedType()
+/// matches the type of the typedef declaration of \c X but not \c Y.
+extern const AstTypeMatcher<MacroQualifiedType> macroQualifiedType;
+
/// Matches enum types.
///
/// Given
@@ -6924,6 +7576,18 @@ AST_MATCHER_P(ElaboratedType, namesType, internal::Matcher<QualType>,
return InnerMatcher.matches(Node.getNamedType(), Finder, Builder);
}
+/// Matches types specified through a using declaration.
+///
+/// Given
+/// \code
+/// namespace a { struct S {}; }
+/// using a::S;
+/// S s;
+/// \endcode
+///
+/// \c usingType() matches the type of the variable declaration of \c s.
+extern const AstTypeMatcher<UsingType> usingType;
+
/// Matches types that represent the result of substituting a type for a
/// template type parameter.
///
@@ -7133,6 +7797,24 @@ AST_MATCHER_P(NestedNameSpecifier, specifiesNamespace,
return InnerMatcher.matches(*Node.getAsNamespace(), Finder, Builder);
}
+/// Matches attributes.
+/// Attributes may be attached with a variety of different syntaxes (including
+/// keywords, C++11 attributes, GNU ``__attribute``` and MSVC `__declspec``,
+/// and ``#pragma``s). They may also be implicit.
+///
+/// Given
+/// \code
+/// struct [[nodiscard]] Foo{};
+/// void bar(int * __attribute__((nonnull)) );
+/// __declspec(noinline) void baz();
+///
+/// #pragma omp declare simd
+/// int min();
+/// \endcode
+/// attr()
+/// matches "nodiscard", "nonnull", "noinline", and the whole "#pragma" line.
+extern const internal::VariadicAllOfMatcher<Attr> attr;
+
/// Overloads for the \c equalsNode matcher.
/// FIXME: Implement for other node types.
/// @{
@@ -7339,7 +8021,7 @@ AST_MATCHER_P(FunctionDecl, hasExplicitSpecifier, internal::Matcher<Expr>,
return InnerMatcher.matches(*ES.getExpr(), Finder, Builder);
}
-/// Matches function and namespace declarations that are marked with
+/// Matches functions, variables and namespace declarations that are marked with
/// the inline keyword.
///
/// Given
@@ -7349,18 +8031,22 @@ AST_MATCHER_P(FunctionDecl, hasExplicitSpecifier, internal::Matcher<Expr>,
/// namespace n {
/// inline namespace m {}
/// }
+/// inline int Foo = 5;
/// \endcode
/// functionDecl(isInline()) will match ::f().
/// namespaceDecl(isInline()) will match n::m.
-AST_POLYMORPHIC_MATCHER(isInline,
- AST_POLYMORPHIC_SUPPORTED_TYPES(NamespaceDecl,
- FunctionDecl)) {
+/// varDecl(isInline()) will match Foo;
+AST_POLYMORPHIC_MATCHER(isInline, AST_POLYMORPHIC_SUPPORTED_TYPES(NamespaceDecl,
+ FunctionDecl,
+ VarDecl)) {
// This is required because the spelling of the function used to determine
// whether inline is specified or not differs between the polymorphic types.
if (const auto *FD = dyn_cast<FunctionDecl>(&Node))
return FD->isInlineSpecified();
- else if (const auto *NSD = dyn_cast<NamespaceDecl>(&Node))
+ if (const auto *NSD = dyn_cast<NamespaceDecl>(&Node))
return NSD->isInline();
+ if (const auto *VD = dyn_cast<VarDecl>(&Node))
+ return VD->isInline();
llvm_unreachable("Not a valid polymorphic type");
}
@@ -7400,6 +8086,30 @@ AST_MATCHER(NamespaceDecl, isAnonymous) {
/// cxxRecordDecl(hasName("vector"), isInStdNamespace()) will match only #1.
AST_MATCHER(Decl, isInStdNamespace) { return Node.isInStdNamespace(); }
+/// Matches declarations in an anonymous namespace.
+///
+/// Given
+/// \code
+/// class vector {};
+/// namespace foo {
+/// class vector {};
+/// namespace {
+/// class vector {}; // #1
+/// }
+/// }
+/// namespace {
+/// class vector {}; // #2
+/// namespace foo {
+/// class vector{}; // #3
+/// }
+/// }
+/// \endcode
+/// cxxRecordDecl(hasName("vector"), isInAnonymousNamespace()) will match
+/// #1, #2 and #3.
+AST_MATCHER(Decl, isInAnonymousNamespace) {
+ return Node.isInAnonymousNamespace();
+}
+
/// If the given case statement does not use the GNU case range
/// extension, matches the constant given in the statement.
///
@@ -7590,8 +8300,7 @@ AST_MATCHER_P(Stmt, forFunction, internal::Matcher<FunctionDecl>,
return true;
}
} else {
- for (const auto &Parent : Finder->getASTContext().getParents(CurNode))
- Stack.push_back(Parent);
+ llvm::append_range(Stack, Finder->getASTContext().getParents(CurNode));
}
}
return false;
@@ -7649,8 +8358,7 @@ AST_MATCHER_P(Stmt, forCallable, internal::Matcher<Decl>, InnerMatcher) {
return true;
}
} else {
- for (const auto &Parent : Finder->getASTContext().getParents(CurNode))
- Stack.push_back(Parent);
+ llvm::append_range(Stack, Finder->getASTContext().getParents(CurNode));
}
}
return false;
@@ -7924,12 +8632,13 @@ AST_MATCHER_P(OMPExecutableDirective, hasAnyClause,
/// \code
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
+/// #pragma omp parallel default(private)
/// #pragma omp parallel default(firstprivate)
/// #pragma omp parallel
/// \endcode
///
-/// ``ompDefaultClause()`` matches ``default(none)``, ``default(shared)``, and
-/// ``default(firstprivate)``
+/// ``ompDefaultClause()`` matches ``default(none)``, ``default(shared)``,
+/// `` default(private)`` and ``default(firstprivate)``
extern const internal::VariadicDynCastAllOfMatcher<OMPClause, OMPDefaultClause>
ompDefaultClause;
@@ -7941,6 +8650,7 @@ extern const internal::VariadicDynCastAllOfMatcher<OMPClause, OMPDefaultClause>
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
+/// #pragma omp parallel default(private)
/// #pragma omp parallel default(firstprivate)
/// \endcode
///
@@ -7957,6 +8667,7 @@ AST_MATCHER(OMPDefaultClause, isNoneKind) {
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
+/// #pragma omp parallel default(private)
/// #pragma omp parallel default(firstprivate)
/// \endcode
///
@@ -7965,6 +8676,25 @@ AST_MATCHER(OMPDefaultClause, isSharedKind) {
return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_shared;
}
+/// Matches if the OpenMP ``default`` clause has ``private`` kind
+/// specified.
+///
+/// Given
+///
+/// \code
+/// #pragma omp parallel
+/// #pragma omp parallel default(none)
+/// #pragma omp parallel default(shared)
+/// #pragma omp parallel default(private)
+/// #pragma omp parallel default(firstprivate)
+/// \endcode
+///
+/// ``ompDefaultClause(isPrivateKind())`` matches only
+/// ``default(private)``.
+AST_MATCHER(OMPDefaultClause, isPrivateKind) {
+ return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_private;
+}
+
/// Matches if the OpenMP ``default`` clause has ``firstprivate`` kind
/// specified.
///
@@ -7974,6 +8704,7 @@ AST_MATCHER(OMPDefaultClause, isSharedKind) {
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
+/// #pragma omp parallel default(private)
/// #pragma omp parallel default(firstprivate)
/// \endcode
///
diff --git a/contrib/llvm-project/clang/include/clang/ASTMatchers/ASTMatchersInternal.h b/contrib/llvm-project/clang/include/clang/ASTMatchers/ASTMatchersInternal.h
index 71f4f2d17ae3..47d912c73dd7 100644
--- a/contrib/llvm-project/clang/include/clang/ASTMatchers/ASTMatchersInternal.h
+++ b/contrib/llvm-project/clang/include/clang/ASTMatchers/ASTMatchersInternal.h
@@ -52,8 +52,6 @@
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/IntrusiveRefCntPtr.h"
-#include "llvm/ADT/None.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
@@ -67,6 +65,7 @@
#include <cstdint>
#include <map>
#include <memory>
+#include <optional>
#include <string>
#include <tuple>
#include <type_traits>
@@ -122,7 +121,7 @@ template <typename T> struct TypeListContainsSuperOf<EmptyTypeList, T> {
template <typename ResultT, typename ArgT,
ResultT (*Func)(ArrayRef<const ArgT *>)>
struct VariadicFunction {
- ResultT operator()() const { return Func(None); }
+ ResultT operator()() const { return Func(std::nullopt); }
template <typename... ArgsT>
ResultT operator()(const ArgT &Arg1, const ArgsT &... Args) const {
@@ -132,10 +131,7 @@ struct VariadicFunction {
// We also allow calls with an already created array, in case the caller
// already had it.
ResultT operator()(ArrayRef<ArgT> Args) const {
- SmallVector<const ArgT*, 8> InnerArgs;
- for (const ArgT &Arg : Args)
- InnerArgs.push_back(&Arg);
- return Func(InnerArgs);
+ return Func(llvm::to_vector<8>(llvm::make_pointer_range(Args)));
}
private:
@@ -312,8 +308,7 @@ public:
template <typename ExcludePredicate>
bool removeBindings(const ExcludePredicate &Predicate) {
- Bindings.erase(std::remove_if(Bindings.begin(), Bindings.end(), Predicate),
- Bindings.end());
+ llvm::erase_if(Bindings, Predicate);
return !Bindings.empty();
}
@@ -355,8 +350,8 @@ public:
virtual bool dynMatches(const DynTypedNode &DynNode, ASTMatchFinder *Finder,
BoundNodesTreeBuilder *Builder) const = 0;
- virtual llvm::Optional<clang::TraversalKind> TraversalKind() const {
- return llvm::None;
+ virtual std::optional<clang::TraversalKind> TraversalKind() const {
+ return std::nullopt;
}
};
@@ -468,7 +463,7 @@ public:
/// restricts the node types for \p Kind.
DynTypedMatcher dynCastTo(const ASTNodeKind Kind) const;
- /// Return a matcher that that points to the same implementation, but sets the
+ /// Return a matcher that points to the same implementation, but sets the
/// traversal kind.
///
/// If the traversal kind is already set, then \c TK overrides it.
@@ -487,8 +482,8 @@ public:
/// Bind the specified \p ID to the matcher.
/// \return A new matcher with the \p ID bound to it if this matcher supports
- /// binding. Otherwise, returns an empty \c Optional<>.
- llvm::Optional<DynTypedMatcher> tryBind(StringRef ID) const;
+ /// binding. Otherwise, returns an empty \c std::optional<>.
+ std::optional<DynTypedMatcher> tryBind(StringRef ID) const;
/// Returns a unique \p ID for the matcher.
///
@@ -540,8 +535,8 @@ public:
/// Returns the \c TraversalKind respected by calls to `match()`, if any.
///
/// Most matchers will not have a traversal kind set, instead relying on the
- /// surrounding context. For those, \c llvm::None is returned.
- llvm::Optional<clang::TraversalKind> getTraversalKind() const {
+ /// surrounding context. For those, \c std::nullopt is returned.
+ std::optional<clang::TraversalKind> getTraversalKind() const {
return Implementation->TraversalKind();
}
@@ -601,17 +596,15 @@ public:
/// Convert \c this into a \c Matcher<T> by applying dyn_cast<> to the
/// argument.
/// \c To must be a base class of \c T.
- template <typename To> Matcher<To> dynCastTo() const LLVM_LVALUE_FUNCTION {
+ template <typename To> Matcher<To> dynCastTo() const & {
static_assert(std::is_base_of<To, T>::value, "Invalid dynCast call.");
return Matcher<To>(Implementation);
}
-#if LLVM_HAS_RVALUE_REFERENCE_THIS
template <typename To> Matcher<To> dynCastTo() && {
static_assert(std::is_base_of<To, T>::value, "Invalid dynCast call.");
return Matcher<To>(std::move(Implementation));
}
-#endif
/// Forwards the call to the underlying MatcherInterface<T> pointer.
bool matches(const T &Node,
@@ -629,13 +622,9 @@ public:
///
/// The returned matcher keeps the same restrictions as \c this and remembers
/// that it is meant to support nodes of type \c T.
- operator DynTypedMatcher() const LLVM_LVALUE_FUNCTION {
- return Implementation;
- }
+ operator DynTypedMatcher() const & { return Implementation; }
-#if LLVM_HAS_RVALUE_REFERENCE_THIS
operator DynTypedMatcher() && { return std::move(Implementation); }
-#endif
/// Allows the conversion of a \c Matcher<Type> to a \c
/// Matcher<QualType>.
@@ -659,7 +648,7 @@ public:
Builder);
}
- llvm::Optional<clang::TraversalKind> TraversalKind() const override {
+ std::optional<clang::TraversalKind> TraversalKind() const override {
return this->InnerMatcher.getTraversalKind();
}
};
@@ -757,7 +746,8 @@ public:
std::is_base_of<NestedNameSpecifier, T>::value ||
std::is_base_of<NestedNameSpecifierLoc, T>::value ||
std::is_base_of<TypeLoc, T>::value ||
- std::is_base_of<QualType, T>::value,
+ std::is_base_of<QualType, T>::value ||
+ std::is_base_of<Attr, T>::value,
"unsupported type for recursive matching");
return matchesChildOf(DynTypedNode::create(Node), getASTContext(), Matcher,
Builder, Bind);
@@ -771,7 +761,8 @@ public:
std::is_base_of<NestedNameSpecifier, T>::value ||
std::is_base_of<NestedNameSpecifierLoc, T>::value ||
std::is_base_of<TypeLoc, T>::value ||
- std::is_base_of<QualType, T>::value,
+ std::is_base_of<QualType, T>::value ||
+ std::is_base_of<Attr, T>::value,
"unsupported type for recursive matching");
return matchesDescendantOf(DynTypedNode::create(Node), getASTContext(),
Matcher, Builder, Bind);
@@ -785,7 +776,8 @@ public:
static_assert(std::is_base_of<Decl, T>::value ||
std::is_base_of<NestedNameSpecifierLoc, T>::value ||
std::is_base_of<Stmt, T>::value ||
- std::is_base_of<TypeLoc, T>::value,
+ std::is_base_of<TypeLoc, T>::value ||
+ std::is_base_of<Attr, T>::value,
"type not allowed for recursive matching");
return matchesAncestorOf(DynTypedNode::create(Node), getASTContext(),
Matcher, Builder, MatchMode);
@@ -954,7 +946,7 @@ class HasNameMatcher : public SingleNodeMatcherInterface<NamedDecl> {
bool matchesNode(const NamedDecl &Node) const override;
- private:
+private:
/// Unqualified match routine.
///
/// It is much faster than the full match, but it only works for unqualified
@@ -1025,31 +1017,29 @@ private:
BoundNodesTreeBuilder *Builder) const {
// DeducedType does not have declarations of its own, so
// match the deduced type instead.
- const Type *EffectiveType = &Node;
if (const auto *S = dyn_cast<DeducedType>(&Node)) {
- EffectiveType = S->getDeducedType().getTypePtrOrNull();
- if (!EffectiveType)
- return false;
+ QualType DT = S->getDeducedType();
+ return !DT.isNull() ? matchesSpecialized(*DT, Finder, Builder) : false;
}
// First, for any types that have a declaration, extract the declaration and
// match on it.
- if (const auto *S = dyn_cast<TagType>(EffectiveType)) {
+ if (const auto *S = dyn_cast<TagType>(&Node)) {
return matchesDecl(S->getDecl(), Finder, Builder);
}
- if (const auto *S = dyn_cast<InjectedClassNameType>(EffectiveType)) {
+ if (const auto *S = dyn_cast<InjectedClassNameType>(&Node)) {
return matchesDecl(S->getDecl(), Finder, Builder);
}
- if (const auto *S = dyn_cast<TemplateTypeParmType>(EffectiveType)) {
+ if (const auto *S = dyn_cast<TemplateTypeParmType>(&Node)) {
return matchesDecl(S->getDecl(), Finder, Builder);
}
- if (const auto *S = dyn_cast<TypedefType>(EffectiveType)) {
+ if (const auto *S = dyn_cast<TypedefType>(&Node)) {
return matchesDecl(S->getDecl(), Finder, Builder);
}
- if (const auto *S = dyn_cast<UnresolvedUsingType>(EffectiveType)) {
+ if (const auto *S = dyn_cast<UnresolvedUsingType>(&Node)) {
return matchesDecl(S->getDecl(), Finder, Builder);
}
- if (const auto *S = dyn_cast<ObjCObjectType>(EffectiveType)) {
+ if (const auto *S = dyn_cast<ObjCObjectType>(&Node)) {
return matchesDecl(S->getInterface(), Finder, Builder);
}
@@ -1061,14 +1051,14 @@ private:
// template<typename T> struct X { T t; } class A {}; X<A> a;
// The following matcher will match, which otherwise would not:
// fieldDecl(hasType(pointerType())).
- if (const auto *S = dyn_cast<SubstTemplateTypeParmType>(EffectiveType)) {
+ if (const auto *S = dyn_cast<SubstTemplateTypeParmType>(&Node)) {
return matchesSpecialized(S->getReplacementType(), Finder, Builder);
}
// For template specialization types, we want to match the template
// declaration, as long as the type is still dependent, and otherwise the
// declaration of the instantiated tag type.
- if (const auto *S = dyn_cast<TemplateSpecializationType>(EffectiveType)) {
+ if (const auto *S = dyn_cast<TemplateSpecializationType>(&Node)) {
if (!S->isTypeAlias() && S->isSugared()) {
// If the template is non-dependent, we want to match the instantiated
// tag type.
@@ -1087,7 +1077,13 @@ private:
// FIXME: We desugar elaborated types. This makes the assumption that users
// do never want to match on whether a type is elaborated - there are
// arguments for both sides; for now, continue desugaring.
- if (const auto *S = dyn_cast<ElaboratedType>(EffectiveType)) {
+ if (const auto *S = dyn_cast<ElaboratedType>(&Node)) {
+ return matchesSpecialized(S->desugar(), Finder, Builder);
+ }
+ // Similarly types found via using declarations.
+ // These are *usually* meaningless sugar, and this matches the historical
+ // behavior prior to the introduction of UsingType.
+ if (const auto *S = dyn_cast<UsingType>(&Node)) {
return matchesSpecialized(S->desugar(), Finder, Builder);
}
return false;
@@ -1175,7 +1171,8 @@ struct IsBaseType {
std::is_same<T, NestedNameSpecifier>::value ||
std::is_same<T, NestedNameSpecifierLoc>::value ||
std::is_same<T, CXXCtorInitializer>::value ||
- std::is_same<T, TemplateArgumentLoc>::value;
+ std::is_same<T, TemplateArgumentLoc>::value ||
+ std::is_same<T, Attr>::value;
};
template <typename T>
const bool IsBaseType<T>::value;
@@ -1185,7 +1182,7 @@ const bool IsBaseType<T>::value;
/// Useful for matchers like \c anything and \c unless.
using AllNodeBaseTypes =
TypeList<Decl, Stmt, NestedNameSpecifier, NestedNameSpecifierLoc, QualType,
- Type, TypeLoc, CXXCtorInitializer>;
+ Type, TypeLoc, CXXCtorInitializer, Attr>;
/// Helper meta-function to extract the argument out of a function of
/// type void(Arg).
@@ -1212,7 +1209,7 @@ template <class T, class Tuple> constexpr T *new_from_tuple(Tuple &&t) {
using AdaptativeDefaultFromTypes = AllNodeBaseTypes;
using AdaptativeDefaultToTypes =
TypeList<Decl, Stmt, NestedNameSpecifier, NestedNameSpecifierLoc, TypeLoc,
- QualType>;
+ QualType, Attr>;
/// All types that are supported by HasDeclarationMatcher above.
using HasDeclarationSupportedTypes =
@@ -1354,35 +1351,31 @@ public:
VariadicOperatorMatcher(DynTypedMatcher::VariadicOperator Op, Ps &&... Params)
: Op(Op), Params(std::forward<Ps>(Params)...) {}
- template <typename T> operator Matcher<T>() const LLVM_LVALUE_FUNCTION {
+ template <typename T> operator Matcher<T>() const & {
return DynTypedMatcher::constructVariadic(
Op, ASTNodeKind::getFromNodeKind<T>(),
getMatchers<T>(std::index_sequence_for<Ps...>()))
.template unconditionalConvertTo<T>();
}
-#if LLVM_HAS_RVALUE_REFERENCE_THIS
template <typename T> operator Matcher<T>() && {
return DynTypedMatcher::constructVariadic(
Op, ASTNodeKind::getFromNodeKind<T>(),
getMatchers<T>(std::index_sequence_for<Ps...>()))
.template unconditionalConvertTo<T>();
}
-#endif
+
private:
// Helper method to unpack the tuple into a vector.
template <typename T, std::size_t... Is>
- std::vector<DynTypedMatcher>
- getMatchers(std::index_sequence<Is...>) const LLVM_LVALUE_FUNCTION {
+ std::vector<DynTypedMatcher> getMatchers(std::index_sequence<Is...>) const & {
return {Matcher<T>(std::get<Is>(Params))...};
}
-#if LLVM_HAS_RVALUE_REFERENCE_THIS
template <typename T, std::size_t... Is>
std::vector<DynTypedMatcher> getMatchers(std::index_sequence<Is...>) && {
return {Matcher<T>(std::get<Is>(std::move(Params)))...};
}
-#endif
const DynTypedMatcher::VariadicOperator Op;
std::tuple<Ps...> Params;
@@ -1402,20 +1395,6 @@ struct VariadicOperatorMatcherFunc {
}
};
-template <typename F, typename Tuple, std::size_t... I>
-constexpr auto applyMatcherImpl(F &&f, Tuple &&args,
- std::index_sequence<I...>) {
- return std::forward<F>(f)(std::get<I>(std::forward<Tuple>(args))...);
-}
-
-template <typename F, typename Tuple>
-constexpr auto applyMatcher(F &&f, Tuple &&args) {
- return applyMatcherImpl(
- std::forward<F>(f), std::forward<Tuple>(args),
- std::make_index_sequence<
- std::tuple_size<typename std::decay<Tuple>::type>::value>());
-}
-
template <typename T, bool IsBaseOf, typename Head, typename Tail>
struct GetCladeImpl {
using Type = Head;
@@ -1434,12 +1413,11 @@ struct MapAnyOfMatcherImpl {
template <typename... InnerMatchers>
BindableMatcher<CladeType>
operator()(InnerMatchers &&... InnerMatcher) const {
- // TODO: Use std::apply from c++17
- return VariadicAllOfMatcher<CladeType>()(applyMatcher(
+ return VariadicAllOfMatcher<CladeType>()(std::apply(
internal::VariadicOperatorMatcherFunc<
0, std::numeric_limits<unsigned>::max()>{
internal::DynTypedMatcher::VO_AnyOf},
- applyMatcher(
+ std::apply(
[&](auto... Matcher) {
return std::make_tuple(Matcher(InnerMatcher...)...);
},
@@ -1472,15 +1450,13 @@ public:
using ReturnTypes = ToTypes;
- template <typename To> operator Matcher<To>() const LLVM_LVALUE_FUNCTION {
+ template <typename To> operator Matcher<To>() const & {
return Matcher<To>(new ArgumentAdapterT<To, T>(InnerMatcher));
}
-#if LLVM_HAS_RVALUE_REFERENCE_THIS
template <typename To> operator Matcher<To>() && {
return Matcher<To>(new ArgumentAdapterT<To, T>(std::move(InnerMatcher)));
}
-#endif
private:
Matcher<T> InnerMatcher;
@@ -1539,7 +1515,7 @@ public:
Builder);
}
- llvm::Optional<clang::TraversalKind> TraversalKind() const override {
+ std::optional<clang::TraversalKind> TraversalKind() const override {
if (auto NestedKind = this->InnerMatcher.getTraversalKind())
return NestedKind;
return Traversal;
@@ -1551,21 +1527,19 @@ public:
TraversalWrapper(TraversalKind TK, const MatcherType &InnerMatcher)
: TK(TK), InnerMatcher(InnerMatcher) {}
- template <typename T> operator Matcher<T>() const LLVM_LVALUE_FUNCTION {
+ template <typename T> operator Matcher<T>() const & {
return internal::DynTypedMatcher::constructRestrictedWrapper(
new internal::TraversalMatcher<T>(TK, InnerMatcher),
ASTNodeKind::getFromNodeKind<T>())
.template unconditionalConvertTo<T>();
}
-#if LLVM_HAS_RVALUE_REFERENCE_THIS
template <typename T> operator Matcher<T>() && {
return internal::DynTypedMatcher::constructRestrictedWrapper(
new internal::TraversalMatcher<T>(TK, std::move(InnerMatcher)),
ASTNodeKind::getFromNodeKind<T>())
.template unconditionalConvertTo<T>();
}
-#endif
private:
TraversalKind TK;
@@ -1592,20 +1566,18 @@ public:
using ReturnTypes = typename ExtractFunctionArgMeta<ReturnTypesF>::type;
- template <typename T> operator Matcher<T>() const LLVM_LVALUE_FUNCTION {
+ template <typename T> operator Matcher<T>() const & {
static_assert(TypeListContainsSuperOf<ReturnTypes, T>::value,
"right polymorphic conversion");
return Matcher<T>(new_from_tuple<MatcherT<T, ParamTypes...>>(Params));
}
-#if LLVM_HAS_RVALUE_REFERENCE_THIS
template <typename T> operator Matcher<T>() && {
static_assert(TypeListContainsSuperOf<ReturnTypes, T>::value,
"right polymorphic conversion");
return Matcher<T>(
new_from_tuple<MatcherT<T, ParamTypes...>>(std::move(Params)));
}
-#endif
private:
std::tuple<ParamTypes...> Params;
@@ -1969,7 +1941,7 @@ getTemplateSpecializationArgs(const ClassTemplateSpecializationDecl &D) {
inline ArrayRef<TemplateArgument>
getTemplateSpecializationArgs(const TemplateSpecializationType &T) {
- return llvm::makeArrayRef(T.getArgs(), T.getNumArgs());
+ return T.template_arguments();
}
inline ArrayRef<TemplateArgument>
@@ -1993,27 +1965,27 @@ template <typename Ty, typename Enable = void> struct GetBodyMatcher {
};
template <typename Ty>
-struct GetBodyMatcher<Ty, typename std::enable_if<
- std::is_base_of<FunctionDecl, Ty>::value>::type> {
+struct GetBodyMatcher<
+ Ty, std::enable_if_t<std::is_base_of<FunctionDecl, Ty>::value>> {
static const Stmt *get(const Ty &Node) {
return Node.doesThisDeclarationHaveABody() ? Node.getBody() : nullptr;
}
};
template <typename NodeType>
-inline Optional<BinaryOperatorKind>
+inline std::optional<BinaryOperatorKind>
equivalentBinaryOperator(const NodeType &Node) {
return Node.getOpcode();
}
template <>
-inline Optional<BinaryOperatorKind>
+inline std::optional<BinaryOperatorKind>
equivalentBinaryOperator<CXXOperatorCallExpr>(const CXXOperatorCallExpr &Node) {
if (Node.getNumArgs() != 2)
- return None;
+ return std::nullopt;
switch (Node.getOperator()) {
default:
- return None;
+ return std::nullopt;
case OO_ArrowStar:
return BO_PtrMemI;
case OO_Star:
@@ -2082,20 +2054,20 @@ equivalentBinaryOperator<CXXOperatorCallExpr>(const CXXOperatorCallExpr &Node) {
}
template <typename NodeType>
-inline Optional<UnaryOperatorKind>
+inline std::optional<UnaryOperatorKind>
equivalentUnaryOperator(const NodeType &Node) {
return Node.getOpcode();
}
template <>
-inline Optional<UnaryOperatorKind>
+inline std::optional<UnaryOperatorKind>
equivalentUnaryOperator<CXXOperatorCallExpr>(const CXXOperatorCallExpr &Node) {
if (Node.getNumArgs() != 1 && Node.getOperator() != OO_PlusPlus &&
Node.getOperator() != OO_MinusMinus)
- return None;
+ return std::nullopt;
switch (Node.getOperator()) {
default:
- return None;
+ return std::nullopt;
case OO_Plus:
return UO_Plus;
case OO_Minus:
@@ -2111,13 +2083,13 @@ equivalentUnaryOperator<CXXOperatorCallExpr>(const CXXOperatorCallExpr &Node) {
case OO_PlusPlus: {
const auto *FD = Node.getDirectCallee();
if (!FD)
- return None;
+ return std::nullopt;
return FD->getNumParams() > 0 ? UO_PostInc : UO_PreInc;
}
case OO_MinusMinus: {
const auto *FD = Node.getDirectCallee();
if (!FD)
- return None;
+ return std::nullopt;
return FD->getNumParams() > 0 ? UO_PostDec : UO_PreDec;
}
case OO_Coawait:
@@ -2200,29 +2172,32 @@ CompoundStmtMatcher<StmtExpr>::get(const StmtExpr &Node) {
/// location (in the chain of expansions) at which \p MacroName was
/// expanded. Since the macro may have been expanded inside a series of
/// expansions, that location may itself be a MacroID.
-llvm::Optional<SourceLocation>
-getExpansionLocOfMacro(StringRef MacroName, SourceLocation Loc,
- const ASTContext &Context);
+std::optional<SourceLocation> getExpansionLocOfMacro(StringRef MacroName,
+ SourceLocation Loc,
+ const ASTContext &Context);
-inline Optional<StringRef> getOpName(const UnaryOperator &Node) {
+inline std::optional<StringRef> getOpName(const UnaryOperator &Node) {
return Node.getOpcodeStr(Node.getOpcode());
}
-inline Optional<StringRef> getOpName(const BinaryOperator &Node) {
+inline std::optional<StringRef> getOpName(const BinaryOperator &Node) {
return Node.getOpcodeStr();
}
inline StringRef getOpName(const CXXRewrittenBinaryOperator &Node) {
return Node.getOpcodeStr();
}
-inline Optional<StringRef> getOpName(const CXXOperatorCallExpr &Node) {
+inline std::optional<StringRef> getOpName(const CXXOperatorCallExpr &Node) {
auto optBinaryOpcode = equivalentBinaryOperator(Node);
if (!optBinaryOpcode) {
auto optUnaryOpcode = equivalentUnaryOperator(Node);
if (!optUnaryOpcode)
- return None;
+ return std::nullopt;
return UnaryOperator::getOpcodeStr(*optUnaryOpcode);
}
return BinaryOperator::getOpcodeStr(*optBinaryOpcode);
}
+inline StringRef getOpName(const CXXFoldExpr &Node) {
+ return BinaryOperator::getOpcodeStr(Node.getOperator());
+}
/// Matches overloaded operators with a specific name.
///
@@ -2244,30 +2219,26 @@ public:
: SingleNodeMatcherInterface<T>(), Names(std::move(Names)) {}
bool matchesNode(const T &Node) const override {
- Optional<StringRef> OptOpName = getOpName(Node);
- if (!OptOpName)
- return false;
- return llvm::any_of(Names, [OpName = *OptOpName](const std::string &Name) {
- return Name == OpName;
- });
+ std::optional<StringRef> OptOpName = getOpName(Node);
+ return OptOpName && llvm::is_contained(Names, *OptOpName);
}
private:
- static Optional<StringRef> getOpName(const UnaryOperator &Node) {
+ static std::optional<StringRef> getOpName(const UnaryOperator &Node) {
return Node.getOpcodeStr(Node.getOpcode());
}
- static Optional<StringRef> getOpName(const BinaryOperator &Node) {
+ static std::optional<StringRef> getOpName(const BinaryOperator &Node) {
return Node.getOpcodeStr();
}
static StringRef getOpName(const CXXRewrittenBinaryOperator &Node) {
return Node.getOpcodeStr();
}
- static Optional<StringRef> getOpName(const CXXOperatorCallExpr &Node) {
+ static std::optional<StringRef> getOpName(const CXXOperatorCallExpr &Node) {
auto optBinaryOpcode = equivalentBinaryOperator(Node);
if (!optBinaryOpcode) {
auto optUnaryOpcode = equivalentUnaryOperator(Node);
if (!optUnaryOpcode)
- return None;
+ return std::nullopt;
return UnaryOperator::getOpcodeStr(*optUnaryOpcode);
}
return BinaryOperator::getOpcodeStr(*optBinaryOpcode);
@@ -2304,6 +2275,26 @@ std::shared_ptr<llvm::Regex> createAndVerifyRegex(StringRef Regex,
llvm::Regex::RegexFlags Flags,
StringRef MatcherID);
+inline bool
+MatchTemplateArgLocAt(const DeclRefExpr &Node, unsigned int Index,
+ internal::Matcher<TemplateArgumentLoc> InnerMatcher,
+ internal::ASTMatchFinder *Finder,
+ internal::BoundNodesTreeBuilder *Builder) {
+ llvm::ArrayRef<TemplateArgumentLoc> ArgLocs = Node.template_arguments();
+ return Index < ArgLocs.size() &&
+ InnerMatcher.matches(ArgLocs[Index], Finder, Builder);
+}
+
+inline bool
+MatchTemplateArgLocAt(const TemplateSpecializationTypeLoc &Node,
+ unsigned int Index,
+ internal::Matcher<TemplateArgumentLoc> InnerMatcher,
+ internal::ASTMatchFinder *Finder,
+ internal::BoundNodesTreeBuilder *Builder) {
+ return !Node.isNull() && Index < Node.getNumArgs() &&
+ InnerMatcher.matches(Node.getArgLoc(Index), Finder, Builder);
+}
+
} // namespace internal
} // namespace ast_matchers
diff --git a/contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/Diagnostics.h b/contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/Diagnostics.h
index 10625311c1a5..960d59a747fc 100644
--- a/contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/Diagnostics.h
+++ b/contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/Diagnostics.h
@@ -28,9 +28,9 @@ namespace ast_matchers {
namespace dynamic {
struct SourceLocation {
- SourceLocation() : Line(), Column() {}
- unsigned Line;
- unsigned Column;
+ SourceLocation() = default;
+ unsigned Line = 0;
+ unsigned Column = 0;
};
struct SourceRange {
@@ -40,7 +40,7 @@ struct SourceRange {
/// A VariantValue instance annotated with its parser context.
struct ParserValue {
- ParserValue() : Text(), Range(), Value() {}
+ ParserValue() {}
StringRef Text;
SourceRange Range;
VariantValue Value;
@@ -186,4 +186,4 @@ private:
} // namespace ast_matchers
} // namespace clang
-#endif // LLVM_CLANG_AST_MATCHERS_DYNAMIC_DIAGNOSTICS_H
+#endif // LLVM_CLANG_ASTMATCHERS_DYNAMIC_DIAGNOSTICS_H
diff --git a/contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/Parser.h b/contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/Parser.h
index af370d83782a..7adaef5054b5 100644
--- a/contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/Parser.h
+++ b/contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/Parser.h
@@ -39,9 +39,9 @@
#include "clang/ASTMatchers/Dynamic/Registry.h"
#include "clang/ASTMatchers/Dynamic/VariantValue.h"
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
+#include <optional>
#include <utility>
#include <vector>
@@ -95,9 +95,9 @@ public:
///
/// \param MatcherName The matcher name found by the parser.
///
- /// \return The matcher constructor, or Optional<MatcherCtor>() if not
+ /// \return The matcher constructor, or std::optional<MatcherCtor>() if not
/// found.
- virtual llvm::Optional<MatcherCtor>
+ virtual std::optional<MatcherCtor>
lookupMatcherCtor(StringRef MatcherName) = 0;
virtual bool isBuilderMatcher(MatcherCtor) const = 0;
@@ -138,7 +138,7 @@ public:
public:
~RegistrySema() override;
- llvm::Optional<MatcherCtor>
+ std::optional<MatcherCtor>
lookupMatcherCtor(StringRef MatcherName) override;
VariantMatcher actOnMatcherExpression(MatcherCtor Ctor,
@@ -180,14 +180,14 @@ public:
/// Optional if an error occurred. In that case, \c Error will contain a
/// description of the error.
/// The caller takes ownership of the DynTypedMatcher object returned.
- static llvm::Optional<DynTypedMatcher>
+ static std::optional<DynTypedMatcher>
parseMatcherExpression(StringRef &MatcherCode, Sema *S,
const NamedValueMap *NamedValues, Diagnostics *Error);
- static llvm::Optional<DynTypedMatcher>
+ static std::optional<DynTypedMatcher>
parseMatcherExpression(StringRef &MatcherCode, Sema *S, Diagnostics *Error) {
return parseMatcherExpression(MatcherCode, S, nullptr, Error);
}
- static llvm::Optional<DynTypedMatcher>
+ static std::optional<DynTypedMatcher>
parseMatcherExpression(StringRef &MatcherCode, Diagnostics *Error) {
return parseMatcherExpression(MatcherCode, nullptr, Error);
}
@@ -254,7 +254,7 @@ private:
const TokenInfo &OpenToken, VariantValue *Value);
bool parseMatcherExpressionImpl(const TokenInfo &NameToken,
const TokenInfo &OpenToken,
- llvm::Optional<MatcherCtor> Ctor,
+ std::optional<MatcherCtor> Ctor,
VariantValue *Value);
bool parseIdentifierPrefixImpl(VariantValue *Value);
@@ -280,4 +280,4 @@ private:
} // namespace ast_matchers
} // namespace clang
-#endif // LLVM_CLANG_AST_MATCHERS_DYNAMIC_PARSER_H
+#endif // LLVM_CLANG_ASTMATCHERS_DYNAMIC_PARSER_H
diff --git a/contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/Registry.h b/contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/Registry.h
index f91f5fe01c4e..50711addc6e3 100644
--- a/contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/Registry.h
+++ b/contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/Registry.h
@@ -19,8 +19,8 @@
#include "clang/ASTMatchers/Dynamic/Diagnostics.h"
#include "clang/ASTMatchers/Dynamic/VariantValue.h"
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/StringRef.h"
+#include <optional>
#include <string>
#include <utility>
#include <vector>
@@ -94,8 +94,8 @@ public:
/// Look up a matcher in the registry by name,
///
/// \return An opaque value which may be used to refer to the matcher
- /// constructor, or Optional<MatcherCtor>() if not found.
- static llvm::Optional<MatcherCtor> lookupMatcherCtor(StringRef MatcherName);
+ /// constructor, or std::optional<MatcherCtor>() if not found.
+ static std::optional<MatcherCtor> lookupMatcherCtor(StringRef MatcherName);
/// Compute the list of completion types for \p Context.
///
@@ -157,4 +157,4 @@ public:
} // namespace ast_matchers
} // namespace clang
-#endif // LLVM_CLANG_AST_MATCHERS_DYNAMIC_REGISTRY_H
+#endif // LLVM_CLANG_ASTMATCHERS_DYNAMIC_REGISTRY_H
diff --git a/contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/VariantValue.h b/contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/VariantValue.h
index 5b3f8a7ca5eb..c99d32f5f784 100644
--- a/contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/VariantValue.h
+++ b/contrib/llvm-project/clang/include/clang/ASTMatchers/Dynamic/VariantValue.h
@@ -20,8 +20,8 @@
#include "clang/ASTMatchers/ASTMatchers.h"
#include "clang/ASTMatchers/ASTMatchersInternal.h"
#include "llvm/ADT/IntrusiveRefCntPtr.h"
-#include "llvm/ADT/Optional.h"
#include <memory>
+#include <optional>
#include <vector>
namespace clang {
@@ -117,8 +117,8 @@ class VariantMatcher {
/// Constructs a variadic typed matcher from \p InnerMatchers.
/// Will try to convert each inner matcher to the destination type and
- /// return llvm::None if it fails to do so.
- llvm::Optional<DynTypedMatcher>
+ /// return std::nullopt if it fails to do so.
+ std::optional<DynTypedMatcher>
constructVariadicOperator(DynTypedMatcher::VariadicOperator Op,
ArrayRef<VariantMatcher> InnerMatchers) const;
@@ -132,9 +132,9 @@ class VariantMatcher {
class Payload {
public:
virtual ~Payload();
- virtual llvm::Optional<DynTypedMatcher> getSingleMatcher() const = 0;
+ virtual std::optional<DynTypedMatcher> getSingleMatcher() const = 0;
virtual std::string getTypeAsString() const = 0;
- virtual llvm::Optional<DynTypedMatcher>
+ virtual std::optional<DynTypedMatcher>
getTypedMatcher(const MatcherOps &Ops) const = 0;
virtual bool isConvertibleTo(ASTNodeKind Kind,
unsigned *Specificity) const = 0;
@@ -171,7 +171,7 @@ public:
/// \returns the matcher, if there is only one matcher. An empty Optional, if
/// the underlying matcher is a polymorphic matcher with more than one
/// representation.
- llvm::Optional<DynTypedMatcher> getSingleMatcher() const;
+ std::optional<DynTypedMatcher> getSingleMatcher() const;
/// Determines if the contained matcher can be converted to
/// \c Matcher<T>.
@@ -188,7 +188,7 @@ public:
bool hasTypedMatcher(ASTNodeKind NK) const {
if (!Value) return false;
- return Value->getTypedMatcher(MatcherOps(NK)).hasValue();
+ return Value->getTypedMatcher(MatcherOps(NK)).has_value();
}
/// Determines if the contained matcher can be converted to \p Kind.
@@ -356,4 +356,4 @@ private:
} // end namespace ast_matchers
} // end namespace clang
-#endif // LLVM_CLANG_AST_MATCHERS_DYNAMIC_VARIANT_VALUE_H
+#endif // LLVM_CLANG_ASTMATCHERS_DYNAMIC_VARIANTVALUE_H
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/CalledOnceCheck.h b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/CalledOnceCheck.h
index a0c767bf92d2..6a1528a2da24 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/CalledOnceCheck.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/CalledOnceCheck.h
@@ -20,7 +20,6 @@ class AnalysisDeclContext;
class BlockDecl;
class CFG;
class Decl;
-class DeclContext;
class Expr;
class ParmVarDecl;
class Stmt;
@@ -29,7 +28,7 @@ class Stmt;
/// \enum IfThen -- then branch of the if statement has no call.
/// \enum IfElse -- else branch of the if statement has no call.
/// \enum Switch -- one of the switch cases doesn't have a call.
-/// \enum SwitchSkipped -- there is no call if none of the cases appies.
+/// \enum SwitchSkipped -- there is no call if none of the cases applies.
/// \enum LoopEntered -- no call when the loop is entered.
/// \enum LoopSkipped -- no call when the loop is not entered.
/// \enum FallbackReason -- fallback case when we were not able to figure out
@@ -80,7 +79,7 @@ public:
/// the path containing the call and not containing the call. This helps us
/// to pinpoint a bad path for the user.
/// \param Parameter -- parameter that should be called once.
- /// \param Function -- function declaration where the problem occured.
+ /// \param Function -- function declaration where the problem occurred.
/// \param Where -- the least common ancestor statement.
/// \param Reason -- a reason describing the path without a call.
/// \param IsCalledDirectly -- true, if parameter actually gets called on
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/Consumed.h b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/Consumed.h
index dec1ae3b2b4b..3e2788cac3c9 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/Consumed.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/Consumed.h
@@ -153,8 +153,11 @@ namespace consumed {
public:
ConsumedStateMap() = default;
ConsumedStateMap(const ConsumedStateMap &Other)
- : Reachable(Other.Reachable), From(Other.From), VarMap(Other.VarMap),
- TmpMap() {}
+ : Reachable(Other.Reachable), From(Other.From), VarMap(Other.VarMap) {}
+
+ // The copy assignment operator is defined as deleted pending further
+ // motivation.
+ ConsumedStateMap &operator=(const ConsumedStateMap &) = delete;
/// Warn if any of the parameters being tracked are not in the state
/// they were declared to be in upon return from a function.
@@ -241,7 +244,7 @@ namespace consumed {
ConsumedBlockInfo BlockInfo;
std::unique_ptr<ConsumedStateMap> CurrStates;
- ConsumedState ExpectedReturnState;
+ ConsumedState ExpectedReturnState = CS_None;
void determineExpectedReturnState(AnalysisDeclContext &AC,
const FunctionDecl *D);
@@ -259,7 +262,7 @@ namespace consumed {
/// Check a function's CFG for consumed violations.
///
/// We traverse the blocks in the CFG, keeping track of the state of each
- /// value who's type has uniquness annotations. If methods are invoked in
+ /// value who's type has uniqueness annotations. If methods are invoked in
/// the wrong state a warning is issued. Each block in the CFG is traversed
/// exactly once.
void run(AnalysisDeclContext &AC);
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/Dominators.h b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/Dominators.h
index 25a5ba9d83fe..7dd54c5ce262 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/Dominators.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/Dominators.h
@@ -193,7 +193,7 @@ namespace IDFCalculatorDetail {
/// Specialize ChildrenGetterTy to skip nullpointer successors.
template <bool IsPostDom>
struct ChildrenGetterTy<clang::CFGBlock, IsPostDom> {
- using NodeRef = typename GraphTraits<clang::CFGBlock>::NodeRef;
+ using NodeRef = typename GraphTraits<clang::CFGBlock *>::NodeRef;
using ChildrenTy = SmallVector<NodeRef, 8>;
ChildrenTy get(const NodeRef &N) {
@@ -202,7 +202,7 @@ struct ChildrenGetterTy<clang::CFGBlock, IsPostDom> {
auto Children = children<OrderedNodeTy>(N);
ChildrenTy Ret{Children.begin(), Children.end()};
- Ret.erase(std::remove(Ret.begin(), Ret.end(), nullptr), Ret.end());
+ llvm::erase(Ret, nullptr);
return Ret;
}
};
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ExprMutationAnalyzer.h b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ExprMutationAnalyzer.h
index 9397c5df78ab..1ceef944fbc3 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ExprMutationAnalyzer.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ExprMutationAnalyzer.h
@@ -38,6 +38,8 @@ public:
}
const Stmt *findPointeeMutation(const Expr *Exp);
const Stmt *findPointeeMutation(const Decl *Dec);
+ static bool isUnevaluated(const Stmt *Smt, const Stmt &Stm,
+ ASTContext &Context);
private:
using MutationFinder = const Stmt *(ExprMutationAnalyzer::*)(const Expr *);
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/IntervalPartition.h b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/IntervalPartition.h
new file mode 100644
index 000000000000..28a7afad41a7
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/IntervalPartition.h
@@ -0,0 +1,123 @@
+//===- IntervalPartition.h - CFG Partitioning into Intervals -----*- C++-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines functionality for partitioning a CFG into intervals and
+// building a weak topological order (WTO) of the nodes, based on the
+// partitioning. The concepts and implementations for the graph partitioning
+// are based on the presentation in "Compilers" by Aho, Sethi and Ullman (the
+// "dragon book"), pages 664-666. The concepts around WTOs is taken from the
+// paper "Efficient chaotic iteration strategies with widenings," by
+// F. Bourdoncle ([Bourdoncle1993]).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_ANALYSES_INTERVALPARTITION_H
+#define LLVM_CLANG_ANALYSIS_ANALYSES_INTERVALPARTITION_H
+
+#include "clang/Analysis/CFG.h"
+#include "llvm/ADT/DenseSet.h"
+#include <deque>
+#include <memory>
+#include <vector>
+
+namespace clang {
+/// A _weak topological ordering_ (WTO) of CFG nodes provides a total order over
+/// the CFG (defined in `WTOCompare`, below), which can guide the order in which
+/// to visit nodes in fixpoint computations over the CFG.
+///
+/// Roughly, a WTO a) groups the blocks so that loop heads are grouped with
+/// their bodies and any nodes they dominate after the loop and b) orders the
+/// groups topologically. As a result, the blocks in a series of loops are
+/// ordered such that all nodes in loop `i` are earlier in the order than nodes
+/// in loop `j`. This ordering, when combined with widening, bounds the number
+/// of times a node must be visited for a dataflow algorithm to reach a
+/// fixpoint. For the precise definition of a WTO and its properties, see
+/// [Bourdoncle1993].
+///
+/// Here, we provide a simplified WTO which drops its nesting structure,
+/// maintaining only the ordering itself. The ordering is built from the limit
+/// flow graph of `Cfg` (derived from iteratively partitioning it into
+/// intervals) if and only if it is reducible (its limit flow graph has one
+/// node). Returns `nullopt` when `Cfg` is not reducible.
+///
+/// This WTO construction is described in Section 4.2 of [Bourdoncle1993].
+using WeakTopologicalOrdering = std::vector<const CFGBlock *>;
+std::optional<WeakTopologicalOrdering> getIntervalWTO(const CFG &Cfg);
+
+struct WTOCompare {
+ WTOCompare(const WeakTopologicalOrdering &WTO);
+
+ bool operator()(const CFGBlock *B1, const CFGBlock *B2) const {
+ auto ID1 = B1->getBlockID();
+ auto ID2 = B2->getBlockID();
+
+ unsigned V1 = ID1 >= BlockOrder.size() ? 0 : BlockOrder[ID1];
+ unsigned V2 = ID2 >= BlockOrder.size() ? 0 : BlockOrder[ID2];
+ return V1 > V2;
+ }
+
+ std::vector<unsigned> BlockOrder;
+};
+
+namespace internal {
+// An interval is a strongly-connected component of the CFG along with a
+// trailing acyclic structure. An interval can be constructed directly from CFG
+// blocks or from a graph of other intervals. Each interval has one _header_
+// block, from which the interval is built. The _header_ of the interval is
+// either the graph's entry block or has at least one predecessor outside of the
+// interval. All other blocks in the interval have only predecessors also in the
+// interval.
+struct CFGIntervalNode {
+ CFGIntervalNode() = default;
+ CFGIntervalNode(unsigned ID) : ID(ID) {}
+
+ CFGIntervalNode(unsigned ID, std::vector<const CFGBlock *> Nodes)
+ : ID(ID), Nodes(std::move(Nodes)) {}
+
+ const llvm::SmallDenseSet<const CFGIntervalNode *> &preds() const {
+ return Predecessors;
+ }
+ const llvm::SmallDenseSet<const CFGIntervalNode *> &succs() const {
+ return Successors;
+ }
+
+ // Unique identifier of this interval relative to other intervals in the same
+ // graph.
+ unsigned ID;
+
+ std::vector<const CFGBlock *> Nodes;
+
+ // Predessor intervals of this interval: those intervals for which there
+ // exists an edge from a node in that other interval to the head of this
+ // interval.
+ llvm::SmallDenseSet<const CFGIntervalNode *> Predecessors;
+
+ // Successor intervals of this interval: those intervals for which there
+ // exists an edge from a node in this interval to the head of that other
+ // interval.
+ llvm::SmallDenseSet<const CFGIntervalNode *> Successors;
+};
+
+// Since graphs are built from pointers to nodes, we use a deque to ensure
+// pointer stability.
+using CFGIntervalGraph = std::deque<CFGIntervalNode>;
+
+std::vector<const CFGBlock *> buildInterval(const CFGBlock *Header);
+
+// Partitions `Cfg` into intervals and constructs the graph of the intervals
+// based on the edges between nodes in these intervals.
+CFGIntervalGraph partitionIntoIntervals(const CFG &Cfg);
+
+// (Further) partitions `Graph` into intervals and constructs the graph of the
+// intervals based on the edges between nodes (themselves intervals) in these
+// intervals.
+CFGIntervalGraph partitionIntoIntervals(const CFGIntervalGraph &Graph);
+} // namespace internal
+} // namespace clang
+
+#endif // LLVM_CLANG_ANALYSIS_ANALYSES_INTERVALPARTITION_H
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/PostOrderCFGView.h b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/PostOrderCFGView.h
index 100029894560..4356834adf76 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/PostOrderCFGView.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/PostOrderCFGView.h
@@ -18,7 +18,6 @@
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/None.h"
#include "llvm/ADT/PostOrderIterator.h"
#include <utility>
#include <vector>
@@ -48,17 +47,18 @@ public:
/// Set the bit associated with a particular CFGBlock.
/// This is the important method for the SetType template parameter.
- std::pair<llvm::NoneType, bool> insert(const CFGBlock *Block) {
+ std::pair<std::nullopt_t, bool> insert(const CFGBlock *Block) {
// Note that insert() is called by po_iterator, which doesn't check to
// make sure that Block is non-null. Moreover, the CFGBlock iterator will
// occasionally hand out null pointers for pruned edges, so we catch those
// here.
if (!Block)
- return std::make_pair(None, false); // if an edge is trivially false.
+ return std::make_pair(std::nullopt,
+ false); // if an edge is trivially false.
if (VisitedBlockIDs.test(Block->getBlockID()))
- return std::make_pair(None, false);
+ return std::make_pair(std::nullopt, false);
VisitedBlockIDs.set(Block->getBlockID());
- return std::make_pair(None, true);
+ return std::make_pair(std::nullopt, true);
}
/// Check if the bit for a CFGBlock has been already set.
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ReachableCode.h b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ReachableCode.h
index 514b9458d331..f1b63f74b6c8 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ReachableCode.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ReachableCode.h
@@ -48,11 +48,9 @@ class Callback {
virtual void anchor();
public:
virtual ~Callback() {}
- virtual void HandleUnreachable(UnreachableKind UK,
- SourceLocation L,
- SourceRange ConditionVal,
- SourceRange R1,
- SourceRange R2) = 0;
+ virtual void HandleUnreachable(UnreachableKind UK, SourceLocation L,
+ SourceRange ConditionVal, SourceRange R1,
+ SourceRange R2, bool HasFallThroughAttr) = 0;
};
/// ScanReachableFromBlock - Mark all blocks reachable from Start.
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafety.h b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafety.h
index bfa9870a1e1f..0866b09bab29 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafety.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafety.h
@@ -47,7 +47,13 @@ enum ProtectedOperationKind {
POK_PassByRef,
/// Passing a pt-guarded variable by reference.
- POK_PtPassByRef
+ POK_PtPassByRef,
+
+ /// Returning a guarded variable by reference.
+ POK_ReturnByRef,
+
+ /// Returning a pt-guarded variable by reference.
+ POK_PtReturnByRef,
};
/// This enum distinguishes between different kinds of lock actions. For
@@ -98,9 +104,8 @@ public:
virtual ~ThreadSafetyHandler();
/// Warn about lock expressions which fail to resolve to lockable objects.
- /// \param Kind -- the capability's name parameter (role, mutex, etc).
/// \param Loc -- the SourceLocation of the unresolved expression.
- virtual void handleInvalidLockExp(StringRef Kind, SourceLocation Loc) {}
+ virtual void handleInvalidLockExp(SourceLocation Loc) {}
/// Warn about unlock function calls that do not have a prior matching lock
/// expression.
@@ -169,14 +174,12 @@ public:
SourceLocation Loc2) {}
/// Warn when a protected operation occurs while no locks are held.
- /// \param Kind -- the capability's name parameter (role, mutex, etc).
/// \param D -- The decl for the protected variable or function
/// \param POK -- The kind of protected operation (e.g. variable access)
/// \param AK -- The kind of access (i.e. read or write) that occurred
/// \param Loc -- The location of the protected operation.
- virtual void handleNoMutexHeld(StringRef Kind, const NamedDecl *D,
- ProtectedOperationKind POK, AccessKind AK,
- SourceLocation Loc) {}
+ virtual void handleNoMutexHeld(const NamedDecl *D, ProtectedOperationKind POK,
+ AccessKind AK, SourceLocation Loc) {}
/// Warn when a protected operation occurs while the specific mutex protecting
/// the operation is not locked.
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafetyCommon.h b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafetyCommon.h
index 4a58fe870944..13e37ac2b56b 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafetyCommon.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafetyCommon.h
@@ -30,6 +30,8 @@
#include "clang/Analysis/CFG.h"
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Casting.h"
#include <sstream>
@@ -155,7 +157,7 @@ public:
return false;
// Ignore anonymous functions.
- if (!dyn_cast_or_null<NamedDecl>(AC.getDecl()))
+ if (!isa_and_nonnull<NamedDecl>(AC.getDecl()))
return false;
SortedGraph = AC.getAnalysis<PostOrderCFGView>();
@@ -269,28 +271,36 @@ private:
// translateAttrExpr needs it, but that should be moved too.
class CapabilityExpr {
private:
- /// The capability expression.
- const til::SExpr* CapExpr;
+ /// The capability expression and whether it's negated.
+ llvm::PointerIntPair<const til::SExpr *, 1, bool> CapExpr;
- /// True if this is a negative capability.
- bool Negated;
+ /// The kind of capability as specified by @ref CapabilityAttr::getName.
+ StringRef CapKind;
public:
- CapabilityExpr(const til::SExpr *E, bool Neg) : CapExpr(E), Negated(Neg) {}
+ CapabilityExpr() : CapExpr(nullptr, false) {}
+ CapabilityExpr(const til::SExpr *E, StringRef Kind, bool Neg)
+ : CapExpr(E, Neg), CapKind(Kind) {}
- const til::SExpr* sexpr() const { return CapExpr; }
- bool negative() const { return Negated; }
+ // Don't allow implicitly-constructed StringRefs since we'll capture them.
+ template <typename T> CapabilityExpr(const til::SExpr *, T, bool) = delete;
+
+ const til::SExpr *sexpr() const { return CapExpr.getPointer(); }
+ StringRef getKind() const { return CapKind; }
+ bool negative() const { return CapExpr.getInt(); }
CapabilityExpr operator!() const {
- return CapabilityExpr(CapExpr, !Negated);
+ return CapabilityExpr(CapExpr.getPointer(), CapKind, !CapExpr.getInt());
}
bool equals(const CapabilityExpr &other) const {
- return (Negated == other.Negated) && sx::equals(CapExpr, other.CapExpr);
+ return (negative() == other.negative()) &&
+ sx::equals(sexpr(), other.sexpr());
}
bool matches(const CapabilityExpr &other) const {
- return (Negated == other.Negated) && sx::matches(CapExpr, other.CapExpr);
+ return (negative() == other.negative()) &&
+ sx::matches(sexpr(), other.sexpr());
}
bool matchesUniv(const CapabilityExpr &CapE) const {
@@ -298,27 +308,27 @@ public:
}
bool partiallyMatches(const CapabilityExpr &other) const {
- return (Negated == other.Negated) &&
- sx::partiallyMatches(CapExpr, other.CapExpr);
+ return (negative() == other.negative()) &&
+ sx::partiallyMatches(sexpr(), other.sexpr());
}
const ValueDecl* valueDecl() const {
- if (Negated || CapExpr == nullptr)
+ if (negative() || sexpr() == nullptr)
return nullptr;
- if (const auto *P = dyn_cast<til::Project>(CapExpr))
+ if (const auto *P = dyn_cast<til::Project>(sexpr()))
return P->clangDecl();
- if (const auto *P = dyn_cast<til::LiteralPtr>(CapExpr))
+ if (const auto *P = dyn_cast<til::LiteralPtr>(sexpr()))
return P->clangDecl();
return nullptr;
}
std::string toString() const {
- if (Negated)
- return "!" + sx::toString(CapExpr);
- return sx::toString(CapExpr);
+ if (negative())
+ return "!" + sx::toString(sexpr());
+ return sx::toString(sexpr());
}
- bool shouldIgnore() const { return CapExpr == nullptr; }
+ bool shouldIgnore() const { return sexpr() == nullptr; }
bool isInvalid() const { return sexpr() && isa<til::Undefined>(sexpr()); }
@@ -345,13 +355,13 @@ public:
const NamedDecl *AttrDecl;
// Implicit object argument -- e.g. 'this'
- const Expr *SelfArg = nullptr;
+ llvm::PointerUnion<const Expr *, til::SExpr *> SelfArg = nullptr;
// Number of funArgs
unsigned NumArgs = 0;
// Function arguments
- const Expr *const *FunArgs = nullptr;
+ llvm::PointerUnion<const Expr *const *, til::SExpr *> FunArgs = nullptr;
// is Self referred to with -> or .?
bool SelfArrow = false;
@@ -369,10 +379,18 @@ public:
// Translate a clang expression in an attribute to a til::SExpr.
// Constructs the context from D, DeclExp, and SelfDecl.
CapabilityExpr translateAttrExpr(const Expr *AttrExp, const NamedDecl *D,
- const Expr *DeclExp, VarDecl *SelfD=nullptr);
+ const Expr *DeclExp,
+ til::SExpr *Self = nullptr);
CapabilityExpr translateAttrExpr(const Expr *AttrExp, CallingContext *Ctx);
+ // Translate a variable reference.
+ til::LiteralPtr *createVariable(const VarDecl *VD);
+
+ // Create placeholder for this: we don't know the VarDecl on construction yet.
+ std::pair<til::LiteralPtr *, StringRef>
+ createThisPlaceholder(const Expr *Exp);
+
// Translate a clang statement or expression to a TIL expression.
// Also performs substitution of variables; Ctx provides the context.
// Dispatches on the type of S.
@@ -466,8 +484,6 @@ private:
SMap.insert(std::make_pair(S, E));
}
- til::SExpr *getCurrentLVarDefinition(const ValueDecl *VD);
-
til::SExpr *addStatement(til::SExpr *E, const Stmt *S,
const ValueDecl *VD = nullptr);
til::SExpr *lookupVarDecl(const ValueDecl *VD);
@@ -517,4 +533,4 @@ void printSCFG(CFGWalker &Walker);
} // namespace threadSafety
} // namespace clang
-#endif // LLVM_CLANG_THREAD_SAFETY_COMMON_H
+#endif // LLVM_CLANG_ANALYSIS_ANALYSES_THREADSAFETYCOMMON_H
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafetyTIL.h b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafetyTIL.h
index 77a800c28754..65dd66ee093f 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafetyTIL.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafetyTIL.h
@@ -50,8 +50,6 @@
#include "clang/Analysis/Analyses/ThreadSafetyUtil.h"
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/None.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/raw_ostream.h"
@@ -60,6 +58,7 @@
#include <cstddef>
#include <cstdint>
#include <iterator>
+#include <optional>
#include <string>
#include <utility>
@@ -75,7 +74,7 @@ namespace til {
class BasicBlock;
/// Enum for the different distinct classes of SExpr
-enum TIL_Opcode {
+enum TIL_Opcode : unsigned char {
#define TIL_OPCODE_DEF(X) COP_##X,
#include "ThreadSafetyOps.def"
#undef TIL_OPCODE_DEF
@@ -278,7 +277,7 @@ class SExpr {
public:
SExpr() = delete;
- TIL_Opcode opcode() const { return static_cast<TIL_Opcode>(Opcode); }
+ TIL_Opcode opcode() const { return Opcode; }
// Subclasses of SExpr must define the following:
//
@@ -320,8 +319,9 @@ public:
protected:
SExpr(TIL_Opcode Op) : Opcode(Op) {}
SExpr(const SExpr &E) : Opcode(E.Opcode), Flags(E.Flags) {}
+ SExpr &operator=(const SExpr &) = delete;
- const unsigned char Opcode;
+ const TIL_Opcode Opcode;
unsigned char Reserved = 0;
unsigned short Flags = 0;
unsigned SExprID = 0;
@@ -332,7 +332,7 @@ protected:
namespace ThreadSafetyTIL {
inline bool isTrivial(const SExpr *E) {
- unsigned Op = E->opcode();
+ TIL_Opcode Op = E->opcode();
return Op == COP_Variable || Op == COP_Literal || Op == COP_LiteralPtr;
}
@@ -489,6 +489,10 @@ public:
Undefined(const Stmt *S = nullptr) : SExpr(COP_Undefined), Cstmt(S) {}
Undefined(const Undefined &U) : SExpr(U), Cstmt(U.Cstmt) {}
+ // The copy assignment operator is defined as deleted pending further
+ // motivation.
+ Undefined &operator=(const Undefined &) = delete;
+
static bool classof(const SExpr *E) { return E->opcode() == COP_Undefined; }
template <class V>
@@ -567,6 +571,10 @@ public:
LiteralT(T Dat) : Literal(ValueType::getValueType<T>()), Val(Dat) {}
LiteralT(const LiteralT<T> &L) : Literal(L), Val(L.Val) {}
+ // The copy assignment operator is defined as deleted pending further
+ // motivation.
+ LiteralT &operator=(const LiteralT<T> &) = delete;
+
T value() const { return Val;}
T& value() { return Val; }
@@ -634,15 +642,14 @@ typename V::R_SExpr Literal::traverse(V &Vs, typename V::R_Ctx Ctx) {
/// At compile time, pointer literals are represented by symbolic names.
class LiteralPtr : public SExpr {
public:
- LiteralPtr(const ValueDecl *D) : SExpr(COP_LiteralPtr), Cvdecl(D) {
- assert(D && "ValueDecl must not be null");
- }
+ LiteralPtr(const ValueDecl *D) : SExpr(COP_LiteralPtr), Cvdecl(D) {}
LiteralPtr(const LiteralPtr &) = default;
static bool classof(const SExpr *E) { return E->opcode() == COP_LiteralPtr; }
// The clang declaration for the value that this pointer points to.
const ValueDecl *clangDecl() const { return Cvdecl; }
+ void setClangDecl(const ValueDecl *VD) { Cvdecl = VD; }
template <class V>
typename V::R_SExpr traverse(V &Vs, typename V::R_Ctx Ctx) {
@@ -651,6 +658,8 @@ public:
template <class C>
typename C::CType compare(const LiteralPtr* E, C& Cmp) const {
+ if (!Cvdecl || !E->Cvdecl)
+ return Cmp.comparePointers(this, E);
return Cmp.comparePointers(Cvdecl, E->Cvdecl);
}
@@ -957,7 +966,7 @@ public:
private:
SExpr* Rec;
- mutable llvm::Optional<std::string> SlotName;
+ mutable std::optional<std::string> SlotName;
const ValueDecl *Cvdecl;
};
@@ -1430,9 +1439,7 @@ public:
BasicBlock *elseBlock() { return Branches[1]; }
/// Return the list of basic blocks that this terminator can branch to.
- ArrayRef<BasicBlock*> successors() {
- return llvm::makeArrayRef(Branches);
- }
+ ArrayRef<BasicBlock *> successors() { return llvm::ArrayRef(Branches); }
template <class V>
typename V::R_SExpr traverse(V &Vs, typename V::R_Ctx Ctx) {
@@ -1463,7 +1470,7 @@ public:
static bool classof(const SExpr *E) { return E->opcode() == COP_Return; }
/// Return an empty list.
- ArrayRef<BasicBlock *> successors() { return None; }
+ ArrayRef<BasicBlock *> successors() { return std::nullopt; }
SExpr *returnValue() { return Retval; }
const SExpr *returnValue() const { return Retval; }
@@ -1489,7 +1496,7 @@ inline ArrayRef<BasicBlock*> Terminator::successors() {
case COP_Branch: return cast<Branch>(this)->successors();
case COP_Return: return cast<Return>(this)->successors();
default:
- return None;
+ return std::nullopt;
}
}
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafetyTraverse.h b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafetyTraverse.h
index e81c00d3dddb..6fc55130655a 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafetyTraverse.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafetyTraverse.h
@@ -623,7 +623,10 @@ protected:
}
void printLiteralPtr(const LiteralPtr *E, StreamType &SS) {
- SS << E->clangDecl()->getNameAsString();
+ if (const NamedDecl *D = E->clangDecl())
+ SS << D->getNameAsString();
+ else
+ SS << "<temporary>";
}
void printVariable(const Variable *V, StreamType &SS, bool IsVarDecl=false) {
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafetyUtil.h b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafetyUtil.h
index e3b6e61d3026..ac7b24cdb4a6 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafetyUtil.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/ThreadSafetyUtil.h
@@ -204,11 +204,11 @@ public:
}
llvm::iterator_range<reverse_iterator> reverse() {
- return llvm::make_range(rbegin(), rend());
+ return llvm::reverse(*this);
}
llvm::iterator_range<const_reverse_iterator> reverse() const {
- return llvm::make_range(rbegin(), rend());
+ return llvm::reverse(*this);
}
private:
@@ -240,6 +240,10 @@ class CopyOnWriteVector {
VectorData() = default;
VectorData(const VectorData &VD) : Vect(VD.Vect) {}
+
+ // The copy assignment operator is defined as deleted pending further
+ // motivation.
+ VectorData &operator=(const VectorData &) = delete;
};
public:
@@ -354,4 +358,4 @@ inline std::ostream& operator<<(std::ostream& ss, const StringRef str) {
} // namespace threadSafety
} // namespace clang
-#endif // LLVM_CLANG_THREAD_SAFETY_UTIL_H
+#endif // LLVM_CLANG_ANALYSIS_ANALYSES_THREADSAFETYUTIL_H
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/UnsafeBufferUsage.h b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/UnsafeBufferUsage.h
new file mode 100644
index 000000000000..b28f2c6b99c5
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/UnsafeBufferUsage.h
@@ -0,0 +1,122 @@
+//===- UnsafeBufferUsage.h - Replace pointers with modern C++ ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines an analysis that aids replacing buffer accesses through
+// raw pointers with safer C++ abstractions such as containers and views/spans.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_ANALYSES_UNSAFEBUFFERUSAGE_H
+#define LLVM_CLANG_ANALYSIS_ANALYSES_UNSAFEBUFFERUSAGE_H
+
+#include "clang/AST/Decl.h"
+#include "clang/AST/Stmt.h"
+#include "llvm/Support/Debug.h"
+
+namespace clang {
+
+using VarGrpTy = std::vector<const VarDecl *>;
+using VarGrpRef = ArrayRef<const VarDecl *>;
+
+class VariableGroupsManager {
+public:
+ VariableGroupsManager() = default;
+ virtual ~VariableGroupsManager() = default;
+ /// Returns the set of variables (including `Var`) that need to be fixed
+ /// together in one step.
+ ///
+ /// `Var` must be a variable that needs fix (so it must be in a group).
+ /// `HasParm` is an optional argument that will be set to true if the set of
+ /// variables, where `Var` is in, contains parameters.
+ virtual VarGrpRef getGroupOfVar(const VarDecl *Var,
+ bool *HasParm = nullptr) const =0;
+
+ /// Returns the non-empty group of variables that include parameters of the
+ /// analyzing function, if such a group exists. An empty group, otherwise.
+ virtual VarGrpRef getGroupOfParms() const =0;
+};
+
+/// The interface that lets the caller handle unsafe buffer usage analysis
+/// results by overriding this class's handle... methods.
+class UnsafeBufferUsageHandler {
+#ifndef NDEBUG
+public:
+ // A self-debugging facility that you can use to notify the user when
+ // suggestions or fixits are incomplete.
+ // Uses std::function to avoid computing the message when it won't
+ // actually be displayed.
+ using DebugNote = std::pair<SourceLocation, std::string>;
+ using DebugNoteList = std::vector<DebugNote>;
+ using DebugNoteByVar = std::map<const VarDecl *, DebugNoteList>;
+ DebugNoteByVar DebugNotesByVar;
+#endif
+
+public:
+ UnsafeBufferUsageHandler() = default;
+ virtual ~UnsafeBufferUsageHandler() = default;
+
+ /// This analyses produces large fixits that are organized into lists
+ /// of primitive fixits (individual insertions/removals/replacements).
+ using FixItList = llvm::SmallVectorImpl<FixItHint>;
+
+ /// Invoked when an unsafe operation over raw pointers is found.
+ virtual void handleUnsafeOperation(const Stmt *Operation,
+ bool IsRelatedToDecl, ASTContext &Ctx) = 0;
+
+ /// Invoked when a fix is suggested against a variable. This function groups
+ /// all variables that must be fixed together (i.e their types must be changed
+ /// to the same target type to prevent type mismatches) into a single fixit.
+ ///
+ /// `D` is the declaration of the callable under analysis that owns `Variable`
+ /// and all of its group mates.
+ virtual void handleUnsafeVariableGroup(const VarDecl *Variable,
+ const VariableGroupsManager &VarGrpMgr,
+ FixItList &&Fixes, const Decl *D) = 0;
+
+#ifndef NDEBUG
+public:
+ bool areDebugNotesRequested() {
+ DEBUG_WITH_TYPE("SafeBuffers", return true);
+ return false;
+ }
+
+ void addDebugNoteForVar(const VarDecl *VD, SourceLocation Loc,
+ std::string Text) {
+ if (areDebugNotesRequested())
+ DebugNotesByVar[VD].push_back(std::make_pair(Loc, Text));
+ }
+
+ void clearDebugNotes() {
+ if (areDebugNotesRequested())
+ DebugNotesByVar.clear();
+ }
+#endif
+
+public:
+ /// Returns a reference to the `Preprocessor`:
+ virtual bool isSafeBufferOptOut(const SourceLocation &Loc) const = 0;
+
+ virtual std::string
+ getUnsafeBufferUsageAttributeTextAt(SourceLocation Loc,
+ StringRef WSSuffix = "") const = 0;
+};
+
+// This function invokes the analysis and allows the caller to react to it
+// through the handler class.
+void checkUnsafeBufferUsage(const Decl *D, UnsafeBufferUsageHandler &Handler,
+ bool EmitSuggestions);
+
+namespace internal {
+// Tests if any two `FixItHint`s in `FixIts` conflict. Two `FixItHint`s
+// conflict if they have overlapping source ranges.
+bool anyConflict(const llvm::SmallVectorImpl<FixItHint> &FixIts,
+ const SourceManager &SM);
+} // namespace internal
+} // end namespace clang
+
+#endif /* LLVM_CLANG_ANALYSIS_ANALYSES_UNSAFEBUFFERUSAGE_H */
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/Analyses/UnsafeBufferUsageGadgets.def b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/UnsafeBufferUsageGadgets.def
new file mode 100644
index 000000000000..c97661688365
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Analysis/Analyses/UnsafeBufferUsageGadgets.def
@@ -0,0 +1,46 @@
+//=- UnsafeBufferUsageGadgets.def - List of ways to use a buffer --*- C++ -*-=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+/// A gadget is an individual operation in the code that may be of interest to
+/// the UnsafeBufferUsage analysis.
+#ifndef GADGET
+#define GADGET(name)
+#endif
+
+/// Unsafe gadgets correspond to unsafe code patterns that warrant
+/// an immediate warning.
+#ifndef WARNING_GADGET
+#define WARNING_GADGET(name) GADGET(name)
+#endif
+
+/// Safe gadgets correspond to code patterns that aren't unsafe but need to be
+/// properly recognized in order to emit correct warnings and fixes over unsafe
+/// gadgets.
+#ifndef FIXABLE_GADGET
+#define FIXABLE_GADGET(name) GADGET(name)
+#endif
+
+WARNING_GADGET(Increment)
+WARNING_GADGET(Decrement)
+WARNING_GADGET(ArraySubscript)
+WARNING_GADGET(PointerArithmetic)
+WARNING_GADGET(UnsafeBufferUsageAttr)
+WARNING_GADGET(DataInvocation)
+FIXABLE_GADGET(ULCArraySubscript) // `DRE[any]` in an Unspecified Lvalue Context
+FIXABLE_GADGET(DerefSimplePtrArithFixable)
+FIXABLE_GADGET(PointerDereference)
+FIXABLE_GADGET(UPCAddressofArraySubscript) // '&DRE[any]' in an Unspecified Pointer Context
+FIXABLE_GADGET(UPCStandalonePointer)
+FIXABLE_GADGET(UPCPreIncrement) // '++Ptr' in an Unspecified Pointer Context
+FIXABLE_GADGET(UUCAddAssign) // 'Ptr += n' in an Unspecified Untyped Context
+FIXABLE_GADGET(PointerAssignment)
+FIXABLE_GADGET(PointerInit)
+
+#undef FIXABLE_GADGET
+#undef WARNING_GADGET
+#undef GADGET
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/AnalysisDeclContext.h b/contrib/llvm-project/clang/include/clang/Analysis/AnalysisDeclContext.h
index 102970a1d55e..a517a4e757c9 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/AnalysisDeclContext.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/AnalysisDeclContext.h
@@ -229,7 +229,9 @@ private:
protected:
LocationContext(ContextKind k, AnalysisDeclContext *ctx,
const LocationContext *parent, int64_t ID)
- : Kind(k), Ctx(ctx), Parent(parent), ID(ID) {}
+ : Kind(k), Ctx(ctx), Parent(parent), ID(ID) {
+ assert(ctx);
+ }
public:
virtual ~LocationContext();
@@ -238,8 +240,10 @@ public:
int64_t getID() const { return ID; }
+ LLVM_ATTRIBUTE_RETURNS_NONNULL
AnalysisDeclContext *getAnalysisDeclContext() const { return Ctx; }
+ /// It might return null.
const LocationContext *getParent() const { return Parent; }
bool isParentOf(const LocationContext *LC) const;
@@ -327,7 +331,7 @@ public:
unsigned getIndex() const { return Index; }
CFGElement getCallSiteCFGElement() const { return (*Block)[Index]; }
-
+
void Profile(llvm::FoldingSetNodeID &ID) override;
static void Profile(llvm::FoldingSetNodeID &ID, AnalysisDeclContext *ADC,
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/AnyCall.h b/contrib/llvm-project/clang/include/clang/Analysis/AnyCall.h
index 846ff7719ce1..48abce062d13 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/AnyCall.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/AnyCall.h
@@ -10,12 +10,13 @@
//
//===----------------------------------------------------------------------===//
//
-#ifndef LLVM_CLANG_ANALYSIS_ANY_CALL_H
-#define LLVM_CLANG_ANALYSIS_ANY_CALL_H
+#ifndef LLVM_CLANG_ANALYSIS_ANYCALL_H
+#define LLVM_CLANG_ANALYSIS_ANYCALL_H
#include "clang/AST/Decl.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
+#include <optional>
namespace clang {
@@ -108,8 +109,8 @@ public:
}
/// If @c E is a generic call (to ObjC method /function/block/etc),
- /// return a constructed @c AnyCall object. Return None otherwise.
- static Optional<AnyCall> forExpr(const Expr *E) {
+ /// return a constructed @c AnyCall object. Return std::nullopt otherwise.
+ static std::optional<AnyCall> forExpr(const Expr *E) {
if (const auto *ME = dyn_cast<ObjCMessageExpr>(E)) {
return AnyCall(ME);
} else if (const auto *CE = dyn_cast<CallExpr>(E)) {
@@ -123,26 +124,26 @@ public:
} else if (const auto *CXCIE = dyn_cast<CXXInheritedCtorInitExpr>(E)) {
return AnyCall(CXCIE);
} else {
- return None;
+ return std::nullopt;
}
}
/// If @c D is a callable (Objective-C method or a function), return
- /// a constructed @c AnyCall object. Return None otherwise.
+ /// a constructed @c AnyCall object. Return std::nullopt otherwise.
// FIXME: block support.
- static Optional<AnyCall> forDecl(const Decl *D) {
+ static std::optional<AnyCall> forDecl(const Decl *D) {
if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
return AnyCall(FD);
} else if (const auto *MD = dyn_cast<ObjCMethodDecl>(D)) {
return AnyCall(MD);
}
- return None;
+ return std::nullopt;
}
/// \returns formal parameters for direct calls (including virtual calls)
ArrayRef<ParmVarDecl *> parameters() const {
if (!D)
- return None;
+ return std::nullopt;
if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
return FD->parameters();
@@ -151,7 +152,7 @@ public:
} else if (const auto *BD = dyn_cast<BlockDecl>(D)) {
return BD->parameters();
} else {
- return None;
+ return std::nullopt;
}
}
@@ -215,4 +216,4 @@ public:
}
-#endif // LLVM_CLANG_ANALYSIS_ANY_CALL_H
+#endif // LLVM_CLANG_ANALYSIS_ANYCALL_H
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/BodyFarm.h b/contrib/llvm-project/clang/include/clang/Analysis/BodyFarm.h
index 72607f8839f5..52be29cb7885 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/BodyFarm.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/BodyFarm.h
@@ -11,20 +11,19 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_LIB_ANALYSIS_BODYFARM_H
-#define LLVM_CLANG_LIB_ANALYSIS_BODYFARM_H
+#ifndef LLVM_CLANG_ANALYSIS_BODYFARM_H
+#define LLVM_CLANG_ANALYSIS_BODYFARM_H
#include "clang/AST/DeclBase.h"
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/Optional.h"
+#include <optional>
namespace clang {
class ASTContext;
class FunctionDecl;
class ObjCMethodDecl;
-class ObjCPropertyDecl;
class Stmt;
class CodeInjector;
@@ -41,8 +40,11 @@ public:
/// Remove copy constructor to avoid accidental copying.
BodyFarm(const BodyFarm &other) = delete;
+ /// Delete copy assignment operator.
+ BodyFarm &operator=(const BodyFarm &other) = delete;
+
private:
- typedef llvm::DenseMap<const Decl *, Optional<Stmt *>> BodyMap;
+ typedef llvm::DenseMap<const Decl *, std::optional<Stmt *>> BodyMap;
ASTContext &C;
BodyMap Bodies;
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/CFG.h b/contrib/llvm-project/clang/include/clang/Analysis/CFG.h
index 9e32eb8e066a..9f776ca6cc26 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/CFG.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/CFG.h
@@ -14,15 +14,14 @@
#ifndef LLVM_CLANG_ANALYSIS_CFG_H
#define LLVM_CLANG_ANALYSIS_CFG_H
-#include "clang/Analysis/Support/BumpVector.h"
-#include "clang/Analysis/ConstructionContext.h"
+#include "clang/AST/Attr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
+#include "clang/Analysis/ConstructionContext.h"
+#include "clang/Analysis/Support/BumpVector.h"
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/GraphTraits.h"
-#include "llvm/ADT/None.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Allocator.h"
@@ -32,6 +31,7 @@
#include <cstddef>
#include <iterator>
#include <memory>
+#include <optional>
#include <vector>
namespace clang {
@@ -75,7 +75,8 @@ public:
MemberDtor,
TemporaryDtor,
DTOR_BEGIN = AutomaticObjectDtor,
- DTOR_END = TemporaryDtor
+ DTOR_END = TemporaryDtor,
+ CleanupFunction,
};
protected:
@@ -103,12 +104,11 @@ public:
return t;
}
- /// Convert to the specified CFGElement type, returning None if this
+ /// Convert to the specified CFGElement type, returning std::nullopt if this
/// CFGElement is not of the desired type.
- template<typename T>
- Optional<T> getAs() const {
+ template <typename T> std::optional<T> getAs() const {
if (!T::isKind(*this))
- return None;
+ return std::nullopt;
T t;
CFGElement& e = t;
e = *this;
@@ -131,7 +131,7 @@ public:
class CFGStmt : public CFGElement {
public:
- explicit CFGStmt(Stmt *S, Kind K = Statement) : CFGElement(K, S) {
+ explicit CFGStmt(const Stmt *S, Kind K = Statement) : CFGElement(K, S) {
assert(isKind(*this));
}
@@ -155,7 +155,8 @@ protected:
/// this is only used by the analyzer's CFG.
class CFGConstructor : public CFGStmt {
public:
- explicit CFGConstructor(CXXConstructExpr *CE, const ConstructionContext *C)
+ explicit CFGConstructor(const CXXConstructExpr *CE,
+ const ConstructionContext *C)
: CFGStmt(CE, Constructor) {
assert(C);
Data2.setPointer(const_cast<ConstructionContext *>(C));
@@ -185,7 +186,7 @@ class CFGCXXRecordTypedCall : public CFGStmt {
public:
/// Returns true when call expression \p CE needs to be represented
/// by CFGCXXRecordTypedCall, as opposed to a regular CFGStmt.
- static bool isCXXRecordTypedCall(Expr *E) {
+ static bool isCXXRecordTypedCall(const Expr *E) {
assert(isa<CallExpr>(E) || isa<ObjCMessageExpr>(E));
// There is no such thing as reference-type expression. If the function
// returns a reference, it'll return the respective lvalue or xvalue
@@ -194,7 +195,7 @@ public:
E->getType().getCanonicalType()->getAsCXXRecordDecl();
}
- explicit CFGCXXRecordTypedCall(Expr *E, const ConstructionContext *C)
+ explicit CFGCXXRecordTypedCall(const Expr *E, const ConstructionContext *C)
: CFGStmt(E, CXXRecordTypedCall) {
assert(isCXXRecordTypedCall(E));
assert(C && (isa<TemporaryObjectConstructionContext>(C) ||
@@ -202,7 +203,8 @@ public:
isa<ReturnedValueConstructionContext>(C) ||
isa<VariableConstructionContext>(C) ||
isa<ConstructorInitializerConstructionContext>(C) ||
- isa<ArgumentConstructionContext>(C)));
+ isa<ArgumentConstructionContext>(C) ||
+ isa<LambdaCaptureConstructionContext>(C)));
Data2.setPointer(const_cast<ConstructionContext *>(C));
}
@@ -224,7 +226,7 @@ private:
/// list.
class CFGInitializer : public CFGElement {
public:
- explicit CFGInitializer(CXXCtorInitializer *initializer)
+ explicit CFGInitializer(const CXXCtorInitializer *initializer)
: CFGElement(Initializer, initializer) {}
CXXCtorInitializer* getInitializer() const {
@@ -263,7 +265,7 @@ private:
};
/// Represents the point where a loop ends.
-/// This element is is only produced when building the CFG for the static
+/// This element is only produced when building the CFG for the static
/// analyzer and hidden behind the 'cfg-loopexit' analyzer config flag.
///
/// Note: a loop exit element can be reached even when the loop body was never
@@ -383,6 +385,32 @@ private:
}
};
+class CFGCleanupFunction final : public CFGElement {
+public:
+ CFGCleanupFunction() = default;
+ CFGCleanupFunction(const VarDecl *VD)
+ : CFGElement(Kind::CleanupFunction, VD) {
+ assert(VD->hasAttr<CleanupAttr>());
+ }
+
+ const VarDecl *getVarDecl() const {
+ return static_cast<VarDecl *>(Data1.getPointer());
+ }
+
+ /// Returns the function to be called when cleaning up the var decl.
+ const FunctionDecl *getFunctionDecl() const {
+ const CleanupAttr *A = getVarDecl()->getAttr<CleanupAttr>();
+ return A->getFunctionDecl();
+ }
+
+private:
+ friend class CFGElement;
+
+ static bool isKind(const CFGElement E) {
+ return E.getKind() == Kind::CleanupFunction;
+ }
+};
+
/// Represents C++ object destructor implicitly generated for automatic object
/// or temporary bound to const reference at the point of leaving its local
/// scope.
@@ -481,7 +509,7 @@ private:
/// expression for temporary object.
class CFGTemporaryDtor : public CFGImplicitDtor {
public:
- CFGTemporaryDtor(CXXBindTemporaryExpr *expr)
+ CFGTemporaryDtor(const CXXBindTemporaryExpr *expr)
: CFGImplicitDtor(TemporaryDtor, expr, nullptr) {}
const CXXBindTemporaryExpr *getBindTemporaryExpr() const {
@@ -515,7 +543,7 @@ public:
/// of the most derived class while we're in the base class.
VirtualBaseBranch,
- /// Number of different kinds, for sanity checks. We subtract 1 so that
+ /// Number of different kinds, for assertions. We subtract 1 so that
/// to keep receiving compiler warnings when we don't cover all enum values
/// in a switch.
NumKindsMinusOne = VirtualBaseBranch
@@ -707,7 +735,7 @@ class CFGBlock {
template <bool IsOtherConst>
ElementRefIterator(ElementRefIterator<true, IsOtherConst> E)
- : ElementRefIterator(E.Parent, llvm::make_reverse_iterator(E.Pos)) {}
+ : ElementRefIterator(E.Parent, std::make_reverse_iterator(E.Pos)) {}
bool operator<(ElementRefIterator Other) const {
assert(Parent == Other.Parent);
@@ -1122,19 +1150,10 @@ public:
Elements.push_back(CFGScopeBegin(VD, S), C);
}
- void prependScopeBegin(const VarDecl *VD, const Stmt *S,
- BumpVectorContext &C) {
- Elements.insert(Elements.rbegin(), 1, CFGScopeBegin(VD, S), C);
- }
-
void appendScopeEnd(const VarDecl *VD, const Stmt *S, BumpVectorContext &C) {
Elements.push_back(CFGScopeEnd(VD, S), C);
}
- void prependScopeEnd(const VarDecl *VD, const Stmt *S, BumpVectorContext &C) {
- Elements.insert(Elements.rbegin(), 1, CFGScopeEnd(VD, S), C);
- }
-
void appendBaseDtor(const CXXBaseSpecifier *BS, BumpVectorContext &C) {
Elements.push_back(CFGBaseDtor(BS), C);
}
@@ -1151,6 +1170,10 @@ public:
Elements.push_back(CFGAutomaticObjDtor(VD, S), C);
}
+ void appendCleanupFunction(const VarDecl *VD, BumpVectorContext &C) {
+ Elements.push_back(CFGCleanupFunction(VD), C);
+ }
+
void appendLifetimeEnds(VarDecl *VD, Stmt *S, BumpVectorContext &C) {
Elements.push_back(CFGLifetimeEnds(VD, S), C);
}
@@ -1162,44 +1185,6 @@ public:
void appendDeleteDtor(CXXRecordDecl *RD, CXXDeleteExpr *DE, BumpVectorContext &C) {
Elements.push_back(CFGDeleteDtor(RD, DE), C);
}
-
- // Destructors must be inserted in reversed order. So insertion is in two
- // steps. First we prepare space for some number of elements, then we insert
- // the elements beginning at the last position in prepared space.
- iterator beginAutomaticObjDtorsInsert(iterator I, size_t Cnt,
- BumpVectorContext &C) {
- return iterator(Elements.insert(I.base(), Cnt,
- CFGAutomaticObjDtor(nullptr, nullptr), C));
- }
- iterator insertAutomaticObjDtor(iterator I, VarDecl *VD, Stmt *S) {
- *I = CFGAutomaticObjDtor(VD, S);
- return ++I;
- }
-
- // Scope leaving must be performed in reversed order. So insertion is in two
- // steps. First we prepare space for some number of elements, then we insert
- // the elements beginning at the last position in prepared space.
- iterator beginLifetimeEndsInsert(iterator I, size_t Cnt,
- BumpVectorContext &C) {
- return iterator(
- Elements.insert(I.base(), Cnt, CFGLifetimeEnds(nullptr, nullptr), C));
- }
- iterator insertLifetimeEnds(iterator I, VarDecl *VD, Stmt *S) {
- *I = CFGLifetimeEnds(VD, S);
- return ++I;
- }
-
- // Scope leaving must be performed in reversed order. So insertion is in two
- // steps. First we prepare space for some number of elements, then we insert
- // the elements beginning at the last position in prepared space.
- iterator beginScopeEndInsert(iterator I, size_t Cnt, BumpVectorContext &C) {
- return iterator(
- Elements.insert(I.base(), Cnt, CFGScopeEnd(nullptr, nullptr), C));
- }
- iterator insertScopeEnd(iterator I, VarDecl *VD, Stmt *S) {
- *I = CFGScopeEnd(VD, S);
- return ++I;
- }
};
/// CFGCallback defines methods that should be called when a logical
@@ -1209,6 +1194,7 @@ public:
CFGCallback() = default;
virtual ~CFGCallback() = default;
+ virtual void logicAlwaysTrue(const BinaryOperator *B, bool isAlwaysTrue) {}
virtual void compareAlwaysTrue(const BinaryOperator *B, bool isAlwaysTrue) {}
virtual void compareBitwiseEquality(const BinaryOperator *B,
bool isAlwaysTrue) {}
@@ -1229,7 +1215,9 @@ public:
//===--------------------------------------------------------------------===//
class BuildOptions {
- std::bitset<Stmt::lastStmtConstant> alwaysAddMask;
+ // Stmt::lastStmtConstant has the same value as the last Stmt kind,
+ // so make sure we add one to account for this!
+ std::bitset<Stmt::lastStmtConstant + 1> alwaysAddMask;
public:
using ForcedBlkExprs = llvm::DenseMap<const Stmt *, const CFGBlock *>;
@@ -1337,6 +1325,7 @@ public:
const CFGBlock * getIndirectGotoBlock() const { return IndirectGotoBlock; }
using try_block_iterator = std::vector<const CFGBlock *>::const_iterator;
+ using try_block_range = llvm::iterator_range<try_block_iterator>;
try_block_iterator try_blocks_begin() const {
return TryDispatchBlocks.begin();
@@ -1346,6 +1335,10 @@ public:
return TryDispatchBlocks.end();
}
+ try_block_range try_blocks() const {
+ return try_block_range(try_blocks_begin(), try_blocks_end());
+ }
+
void addTryDispatchBlock(const CFGBlock *block) {
TryDispatchBlocks.push_back(block);
}
@@ -1393,7 +1386,7 @@ public:
for (const_iterator I = begin(), E = end(); I != E; ++I)
for (CFGBlock::const_iterator BI = (*I)->begin(), BE = (*I)->end();
BI != BE; ++BI) {
- if (Optional<CFGStmt> stmt = BI->getAs<CFGStmt>())
+ if (std::optional<CFGStmt> stmt = BI->getAs<CFGStmt>())
O(const_cast<Stmt *>(stmt->getStmt()));
}
}
@@ -1460,6 +1453,8 @@ private:
llvm::DenseMap<const DeclStmt *, const DeclStmt *> SyntheticDeclStmts;
};
+Expr *extractElementInitializerFromNestedAILE(const ArrayInitLoopExpr *AILE);
+
} // namespace clang
//===----------------------------------------------------------------------===//
@@ -1489,9 +1484,6 @@ template <> struct GraphTraits< ::clang::CFGBlock *> {
static ChildIteratorType child_end(NodeRef N) { return N->succ_end(); }
};
-template <> struct GraphTraits<clang::CFGBlock>
- : GraphTraits<clang::CFGBlock *> {};
-
template <> struct GraphTraits< const ::clang::CFGBlock *> {
using NodeRef = const ::clang::CFGBlock *;
using ChildIteratorType = ::clang::CFGBlock::const_succ_iterator;
@@ -1501,9 +1493,6 @@ template <> struct GraphTraits< const ::clang::CFGBlock *> {
static ChildIteratorType child_end(NodeRef N) { return N->succ_end(); }
};
-template <> struct GraphTraits<const clang::CFGBlock>
- : GraphTraits<clang::CFGBlock *> {};
-
template <> struct GraphTraits<Inverse< ::clang::CFGBlock *>> {
using NodeRef = ::clang::CFGBlock *;
using ChildIteratorType = ::clang::CFGBlock::const_pred_iterator;
@@ -1516,9 +1505,6 @@ template <> struct GraphTraits<Inverse< ::clang::CFGBlock *>> {
static ChildIteratorType child_end(NodeRef N) { return N->pred_end(); }
};
-template <> struct GraphTraits<Inverse<clang::CFGBlock>>
- : GraphTraits<clang::CFGBlock *> {};
-
template <> struct GraphTraits<Inverse<const ::clang::CFGBlock *>> {
using NodeRef = const ::clang::CFGBlock *;
using ChildIteratorType = ::clang::CFGBlock::const_pred_iterator;
@@ -1531,9 +1517,6 @@ template <> struct GraphTraits<Inverse<const ::clang::CFGBlock *>> {
static ChildIteratorType child_end(NodeRef N) { return N->pred_end(); }
};
-template <> struct GraphTraits<const Inverse<clang::CFGBlock>>
- : GraphTraits<clang::CFGBlock *> {};
-
// Traits for: CFG
template <> struct GraphTraits< ::clang::CFG* >
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/CFGStmtMap.h b/contrib/llvm-project/clang/include/clang/Analysis/CFGStmtMap.h
index 8cf02372ff0f..93cd9cfc5bdf 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/CFGStmtMap.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/CFGStmtMap.h
@@ -26,6 +26,8 @@ class CFGStmtMap {
void *M;
CFGStmtMap(ParentMap *pm, void *m) : PM(pm), M(m) {}
+ CFGStmtMap(const CFGStmtMap &) = delete;
+ CFGStmtMap &operator=(const CFGStmtMap &) = delete;
public:
~CFGStmtMap();
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/CallGraph.h b/contrib/llvm-project/clang/include/clang/Analysis/CallGraph.h
index 999ac5da8acb..78f8d1155501 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/CallGraph.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/CallGraph.h
@@ -66,7 +66,7 @@ public:
/// Determine if a declaration should be included in the graph.
static bool includeInGraph(const Decl *D);
- /// Determine if a declaration should be included in the graph for the
+ /// Determine if a declaration should be included in the graph for the
/// purposes of being a callee. This is similar to includeInGraph except
/// it permits declarations, not just definitions.
static bool includeCalleeInGraph(const Decl *D);
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/CloneDetection.h b/contrib/llvm-project/clang/include/clang/Analysis/CloneDetection.h
index db827c3a6d6f..3385579584b5 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/CloneDetection.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/CloneDetection.h
@@ -11,8 +11,8 @@
///
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_AST_CLONEDETECTION_H
-#define LLVM_CLANG_AST_CLONEDETECTION_H
+#ifndef LLVM_CLANG_ANALYSIS_CLONEDETECTION_H
+#define LLVM_CLANG_ANALYSIS_CLONEDETECTION_H
#include "clang/AST/StmtVisitor.h"
#include "llvm/Support/Regex.h"
@@ -208,13 +208,7 @@ public:
// The initial assumption is that there is only one clone group and every
// statement is a clone of the others. This clone group will then be
// split up with the help of the constraints.
- CloneGroup AllClones;
- AllClones.reserve(Sequences.size());
- for (const auto &C : Sequences) {
- AllClones.push_back(C);
- }
-
- Result.push_back(AllClones);
+ Result.push_back(Sequences);
constrainClones(Result, ConstraintList...);
}
@@ -235,9 +229,7 @@ public:
static void filterGroups(
std::vector<CloneDetector::CloneGroup> &CloneGroups,
llvm::function_ref<bool(const CloneDetector::CloneGroup &)> Filter) {
- CloneGroups.erase(
- std::remove_if(CloneGroups.begin(), CloneGroups.end(), Filter),
- CloneGroups.end());
+ llvm::erase_if(CloneGroups, Filter);
}
/// Splits the given CloneGroups until the given Compare function returns true
@@ -268,7 +260,7 @@ public:
///
/// Clones that aren't type II clones are moved into separate clone groups.
/// In contrast to the RecursiveCloneTypeIIHashConstraint, all clones in a clone
-/// group are guaranteed to be be type II clones of each other, but it is too
+/// group are guaranteed to be type II clones of each other, but it is too
/// slow to efficiently handle large amounts of clones.
class RecursiveCloneTypeIIVerifyConstraint {
public:
@@ -443,4 +435,4 @@ struct MatchingVariablePatternConstraint {
} // end namespace clang
-#endif // LLVM_CLANG_AST_CLONEDETECTION_H
+#endif // LLVM_CLANG_ANALYSIS_CLONEDETECTION_H
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/ConstructionContext.h b/contrib/llvm-project/clang/include/clang/Analysis/ConstructionContext.h
index 4fa5c8b454a0..e19a20500095 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/ConstructionContext.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/ConstructionContext.h
@@ -36,13 +36,14 @@ public:
ElidedDestructorKind,
ElidableConstructorKind,
ArgumentKind,
- STATEMENT_WITH_INDEX_KIND_BEGIN=ArgumentKind,
- STATEMENT_WITH_INDEX_KIND_END=ArgumentKind,
+ LambdaCaptureKind,
+ STATEMENT_WITH_INDEX_KIND_BEGIN = ArgumentKind,
+ STATEMENT_WITH_INDEX_KIND_END = LambdaCaptureKind,
STATEMENT_KIND_BEGIN = VariableKind,
- STATEMENT_KIND_END = ArgumentKind,
+ STATEMENT_KIND_END = LambdaCaptureKind,
InitializerKind,
- INITIALIZER_KIND_BEGIN=InitializerKind,
- INITIALIZER_KIND_END=InitializerKind
+ INITIALIZER_KIND_BEGIN = InitializerKind,
+ INITIALIZER_KIND_END = InitializerKind
};
LLVM_DUMP_METHOD static StringRef getKindAsString(ItemKind K) {
@@ -55,6 +56,8 @@ public:
case ElidedDestructorKind: return "elide destructor";
case ElidableConstructorKind: return "elide constructor";
case ArgumentKind: return "construct into argument";
+ case LambdaCaptureKind:
+ return "construct into lambda captured variable";
case InitializerKind: return "construct into member variable";
};
llvm_unreachable("Unknown ItemKind");
@@ -72,7 +75,7 @@ private:
bool hasIndex() const {
return Kind >= STATEMENT_WITH_INDEX_KIND_BEGIN &&
- Kind >= STATEMENT_WITH_INDEX_KIND_END;
+ Kind <= STATEMENT_WITH_INDEX_KIND_END;
}
bool hasInitializer() const {
@@ -120,12 +123,16 @@ public:
ConstructionContextItem(const Expr *E, unsigned Index)
: Data(E), Kind(ArgumentKind), Index(Index) {
assert(isa<CallExpr>(E) || isa<CXXConstructExpr>(E) ||
- isa<CXXInheritedCtorInitExpr>(E) || isa<ObjCMessageExpr>(E));
+ isa<CXXDeleteExpr>(E) || isa<CXXInheritedCtorInitExpr>(E) ||
+ isa<ObjCMessageExpr>(E));
}
ConstructionContextItem(const CXXCtorInitializer *Init)
: Data(Init), Kind(InitializerKind), Index(0) {}
+ ConstructionContextItem(const LambdaExpr *LE, unsigned Index)
+ : Data(LE), Kind(LambdaCaptureKind), Index(Index) {}
+
ItemKind getKind() const { return Kind; }
LLVM_DUMP_METHOD StringRef getKindAsString() const {
@@ -253,7 +260,8 @@ public:
CXX17ElidedCopyReturnedValueKind,
RETURNED_VALUE_BEGIN = SimpleReturnedValueKind,
RETURNED_VALUE_END = CXX17ElidedCopyReturnedValueKind,
- ArgumentKind
+ ArgumentKind,
+ LambdaCaptureKind
};
protected:
@@ -297,6 +305,11 @@ public:
const ConstructionContextLayer *TopLayer);
Kind getKind() const { return K; }
+
+ virtual const ArrayInitLoopExpr *getArrayInitLoop() const { return nullptr; }
+
+ // Only declared to silence -Wnon-virtual-dtor warnings.
+ virtual ~ConstructionContext() = default;
};
/// An abstract base class for local variable constructors.
@@ -313,6 +326,12 @@ protected:
public:
const DeclStmt *getDeclStmt() const { return DS; }
+ const ArrayInitLoopExpr *getArrayInitLoop() const override {
+ const auto *Var = cast<VarDecl>(DS->getSingleDecl());
+
+ return dyn_cast<ArrayInitLoopExpr>(Var->getInit());
+ }
+
static bool classof(const ConstructionContext *CC) {
return CC->getKind() >= VARIABLE_BEGIN &&
CC->getKind() <= VARIABLE_END;
@@ -380,6 +399,10 @@ protected:
public:
const CXXCtorInitializer *getCXXCtorInitializer() const { return I; }
+ const ArrayInitLoopExpr *getArrayInitLoop() const override {
+ return dyn_cast<ArrayInitLoopExpr>(I->getInit());
+ }
+
static bool classof(const ConstructionContext *CC) {
return CC->getKind() >= INITIALIZER_BEGIN &&
CC->getKind() <= INITIALIZER_END;
@@ -519,7 +542,7 @@ public:
/// of being immediately copied by an elidable copy/move constructor.
/// For example, T t = T(123); includes a temporary T(123) that is immediately
/// copied to variable t. In such cases the elidable copy can (but not
-/// necessarily should) be omitted ("elided") accodring to the rules of the
+/// necessarily should) be omitted ("elided") according to the rules of the
/// language; the constructor would then construct variable t directly.
/// This construction context contains information of the elidable constructor
/// and its respective construction context.
@@ -658,6 +681,42 @@ public:
}
};
+class LambdaCaptureConstructionContext : public ConstructionContext {
+ // The lambda of which the initializer we capture.
+ const LambdaExpr *LE;
+
+ // Index of the captured element in the captured list.
+ unsigned Index;
+
+ friend class ConstructionContext; // Allows to create<>() itself.
+
+ explicit LambdaCaptureConstructionContext(const LambdaExpr *LE,
+ unsigned Index)
+ : ConstructionContext(LambdaCaptureKind), LE(LE), Index(Index) {}
+
+public:
+ const LambdaExpr *getLambdaExpr() const { return LE; }
+ unsigned getIndex() const { return Index; }
+
+ const Expr *getInitializer() const {
+ return *(LE->capture_init_begin() + Index);
+ }
+
+ const FieldDecl *getFieldDecl() const {
+ auto It = LE->getLambdaClass()->field_begin();
+ std::advance(It, Index);
+ return *It;
+ }
+
+ const ArrayInitLoopExpr *getArrayInitLoop() const override {
+ return dyn_cast_or_null<ArrayInitLoopExpr>(getInitializer());
+ }
+
+ static bool classof(const ConstructionContext *CC) {
+ return CC->getKind() == LambdaCaptureKind;
+ }
+};
+
} // end namespace clang
#endif // LLVM_CLANG_ANALYSIS_CONSTRUCTIONCONTEXT_H
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Arena.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Arena.h
new file mode 100644
index 000000000000..394ce054e65f
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Arena.h
@@ -0,0 +1,152 @@
+//===-- Arena.h -------------------------------*- C++ -------------------*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_ANALYSIS_FLOWSENSITIVE__ARENA_H
+#define LLVM_CLANG_ANALYSIS_FLOWSENSITIVE__ARENA_H
+
+#include "clang/Analysis/FlowSensitive/Formula.h"
+#include "clang/Analysis/FlowSensitive/StorageLocation.h"
+#include "clang/Analysis/FlowSensitive/Value.h"
+#include "llvm/ADT/StringRef.h"
+#include <vector>
+
+namespace clang::dataflow {
+
+/// The Arena owns the objects that model data within an analysis.
+/// For example, `Value`, `StorageLocation`, `Atom`, and `Formula`.
+class Arena {
+public:
+ Arena()
+ : True(Formula::create(Alloc, Formula::Literal, {}, 1)),
+ False(Formula::create(Alloc, Formula::Literal, {}, 0)) {}
+ Arena(const Arena &) = delete;
+ Arena &operator=(const Arena &) = delete;
+
+ /// Creates a `T` (some subclass of `StorageLocation`), forwarding `args` to
+ /// the constructor, and returns a reference to it.
+ ///
+ /// The `Arena` takes ownership of the created object. The object will be
+ /// destroyed when the `Arena` is destroyed.
+ template <typename T, typename... Args>
+ std::enable_if_t<std::is_base_of<StorageLocation, T>::value, T &>
+ create(Args &&...args) {
+ // Note: If allocation of individual `StorageLocation`s turns out to be
+ // costly, consider creating specializations of `create<T>` for commonly
+ // used `StorageLocation` subclasses and make them use a `BumpPtrAllocator`.
+ return *cast<T>(
+ Locs.emplace_back(std::make_unique<T>(std::forward<Args>(args)...))
+ .get());
+ }
+
+ /// Creates a `T` (some subclass of `Value`), forwarding `args` to the
+ /// constructor, and returns a reference to it.
+ ///
+ /// The `Arena` takes ownership of the created object. The object will be
+ /// destroyed when the `Arena` is destroyed.
+ template <typename T, typename... Args>
+ std::enable_if_t<std::is_base_of<Value, T>::value, T &>
+ create(Args &&...args) {
+ // Note: If allocation of individual `Value`s turns out to be costly,
+ // consider creating specializations of `create<T>` for commonly used
+ // `Value` subclasses and make them use a `BumpPtrAllocator`.
+ return *cast<T>(
+ Vals.emplace_back(std::make_unique<T>(std::forward<Args>(args)...))
+ .get());
+ }
+
+ /// Creates a BoolValue wrapping a particular formula.
+ ///
+ /// Passing in the same formula will result in the same BoolValue.
+ /// FIXME: Interning BoolValues but not other Values is inconsistent.
+ /// Decide whether we want Value interning or not.
+ BoolValue &makeBoolValue(const Formula &);
+
+ /// Creates a fresh atom and wraps in in an AtomicBoolValue.
+ /// FIXME: For now, identical-address AtomicBoolValue <=> identical atom.
+ /// Stop relying on pointer identity and remove this guarantee.
+ AtomicBoolValue &makeAtomValue() {
+ return cast<AtomicBoolValue>(makeBoolValue(makeAtomRef(makeAtom())));
+ }
+
+ /// Creates a fresh Top boolean value.
+ TopBoolValue &makeTopValue() {
+ // No need for deduplicating: there's no way to create aliasing Tops.
+ return create<TopBoolValue>(makeAtomRef(makeAtom()));
+ }
+
+ /// Returns a symbolic integer value that models an integer literal equal to
+ /// `Value`. These literals are the same every time.
+ /// Integer literals are not typed; the type is determined by the `Expr` that
+ /// an integer literal is associated with.
+ IntegerValue &makeIntLiteral(llvm::APInt Value);
+
+ // Factories for boolean formulas.
+ // Formulas are interned: passing the same arguments return the same result.
+ // For commutative operations like And/Or, interning ignores order.
+ // Simplifications are applied: makeOr(X, X) => X, etc.
+
+ /// Returns a formula for the conjunction of `LHS` and `RHS`.
+ const Formula &makeAnd(const Formula &LHS, const Formula &RHS);
+
+ /// Returns a formula for the disjunction of `LHS` and `RHS`.
+ const Formula &makeOr(const Formula &LHS, const Formula &RHS);
+
+ /// Returns a formula for the negation of `Val`.
+ const Formula &makeNot(const Formula &Val);
+
+ /// Returns a formula for `LHS => RHS`.
+ const Formula &makeImplies(const Formula &LHS, const Formula &RHS);
+
+ /// Returns a formula for `LHS <=> RHS`.
+ const Formula &makeEquals(const Formula &LHS, const Formula &RHS);
+
+ /// Returns a formula for the variable A.
+ const Formula &makeAtomRef(Atom A);
+
+ /// Returns a formula for a literal true/false.
+ const Formula &makeLiteral(bool Value) { return Value ? True : False; }
+
+ // Parses a formula from its textual representation.
+ // This may refer to atoms that were not produced by makeAtom() yet!
+ llvm::Expected<const Formula &> parseFormula(llvm::StringRef);
+
+ /// Returns a new atomic boolean variable, distinct from any other.
+ Atom makeAtom() { return static_cast<Atom>(NextAtom++); };
+
+ /// Creates a fresh flow condition and returns a token that identifies it. The
+ /// token can be used to perform various operations on the flow condition such
+ /// as adding constraints to it, forking it, joining it with another flow
+ /// condition, or checking implications.
+ Atom makeFlowConditionToken() { return makeAtom(); }
+
+private:
+ llvm::BumpPtrAllocator Alloc;
+
+ // Storage for the state of a program.
+ std::vector<std::unique_ptr<StorageLocation>> Locs;
+ std::vector<std::unique_ptr<Value>> Vals;
+
+ // Indices that are used to avoid recreating the same integer literals and
+ // composite boolean values.
+ llvm::DenseMap<llvm::APInt, IntegerValue *> IntegerLiterals;
+ using FormulaPair = std::pair<const Formula *, const Formula *>;
+ llvm::DenseMap<FormulaPair, const Formula *> Ands;
+ llvm::DenseMap<FormulaPair, const Formula *> Ors;
+ llvm::DenseMap<const Formula *, const Formula *> Nots;
+ llvm::DenseMap<FormulaPair, const Formula *> Implies;
+ llvm::DenseMap<FormulaPair, const Formula *> Equals;
+ llvm::DenseMap<Atom, const Formula *> AtomRefs;
+
+ llvm::DenseMap<const Formula *, BoolValue *> FormulaValues;
+ unsigned NextAtom = 0;
+
+ const Formula &True, &False;
+};
+
+} // namespace clang::dataflow
+
+#endif // LLVM_CLANG_ANALYSIS_FLOWSENSITIVE__ARENA_H
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/CFGMatchSwitch.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/CFGMatchSwitch.h
new file mode 100644
index 000000000000..ecd8558970f9
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/CFGMatchSwitch.h
@@ -0,0 +1,98 @@
+//===---- CFGMatchSwitch.h --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the `CFGMatchSwitch` abstraction for building a "switch"
+// statement for control flow graph elements. Each case of the switch is
+// defined by an ASTMatcher which is applied on the AST node contained in the
+// input `CFGElement`.
+//
+// Currently, the `CFGMatchSwitch` only handles `CFGElement`s of
+// `Kind::Statement` and `Kind::Initializer`.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_CFGMATCHSWITCH_H_
+#define LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_CFGMATCHSWITCH_H_
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Stmt.h"
+#include "clang/Analysis/CFG.h"
+#include "clang/Analysis/FlowSensitive/MatchSwitch.h"
+#include <functional>
+#include <utility>
+
+namespace clang {
+namespace dataflow {
+
+template <typename State, typename Result = void>
+using CFGMatchSwitch =
+ std::function<Result(const CFGElement &, ASTContext &, State &)>;
+
+/// Collects cases of a "match switch": a collection of matchers paired with
+/// callbacks, which together define a switch that can be applied to an AST node
+/// contained in a CFG element.
+template <typename State, typename Result = void> class CFGMatchSwitchBuilder {
+public:
+ /// Registers an action `A` for `CFGStmt`s that will be triggered by the match
+ /// of the pattern `M` against the `Stmt` contained in the input `CFGStmt`.
+ ///
+ /// Requirements:
+ ///
+ /// `NodeT` should be derived from `Stmt`.
+ template <typename NodeT>
+ CFGMatchSwitchBuilder &&
+ CaseOfCFGStmt(MatchSwitchMatcher<Stmt> M,
+ MatchSwitchAction<NodeT, State, Result> A) && {
+ std::move(StmtBuilder).template CaseOf<NodeT>(M, A);
+ return std::move(*this);
+ }
+
+ /// Registers an action `A` for `CFGInitializer`s that will be triggered by
+ /// the match of the pattern `M` against the `CXXCtorInitializer` contained in
+ /// the input `CFGInitializer`.
+ ///
+ /// Requirements:
+ ///
+ /// `NodeT` should be derived from `CXXCtorInitializer`.
+ template <typename NodeT>
+ CFGMatchSwitchBuilder &&
+ CaseOfCFGInit(MatchSwitchMatcher<CXXCtorInitializer> M,
+ MatchSwitchAction<NodeT, State, Result> A) && {
+ std::move(InitBuilder).template CaseOf<NodeT>(M, A);
+ return std::move(*this);
+ }
+
+ CFGMatchSwitch<State, Result> Build() && {
+ return [StmtMS = std::move(StmtBuilder).Build(),
+ InitMS = std::move(InitBuilder).Build()](const CFGElement &Element,
+ ASTContext &Context,
+ State &S) -> Result {
+ switch (Element.getKind()) {
+ case CFGElement::Initializer:
+ return InitMS(*Element.castAs<CFGInitializer>().getInitializer(),
+ Context, S);
+ case CFGElement::Statement:
+ case CFGElement::Constructor:
+ case CFGElement::CXXRecordTypedCall:
+ return StmtMS(*Element.castAs<CFGStmt>().getStmt(), Context, S);
+ default:
+ // FIXME: Handle other kinds of CFGElement.
+ return Result();
+ }
+ };
+ }
+
+private:
+ ASTMatchSwitchBuilder<Stmt, State, Result> StmtBuilder;
+ ASTMatchSwitchBuilder<CXXCtorInitializer, State, Result> InitBuilder;
+};
+
+} // namespace dataflow
+} // namespace clang
+
+#endif // LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_CFGMATCHSWITCH_H_
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/ControlFlowContext.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/ControlFlowContext.h
new file mode 100644
index 000000000000..405e93287a05
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/ControlFlowContext.h
@@ -0,0 +1,79 @@
+//===-- ControlFlowContext.h ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a ControlFlowContext class that is used by dataflow
+// analyses that run over Control-Flow Graphs (CFGs).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_CONTROLFLOWCONTEXT_H
+#define LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_CONTROLFLOWCONTEXT_H
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/Stmt.h"
+#include "clang/Analysis/CFG.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/Support/Error.h"
+#include <memory>
+#include <utility>
+
+namespace clang {
+namespace dataflow {
+
+/// Holds CFG and other derived context that is needed to perform dataflow
+/// analysis.
+class ControlFlowContext {
+public:
+ /// Builds a ControlFlowContext from a `FunctionDecl`.
+ /// `Func.doesThisDeclarationHaveABody()` must be true, and
+ /// `Func.isTemplated()` must be false.
+ static llvm::Expected<ControlFlowContext> build(const FunctionDecl &Func);
+
+ /// Builds a ControlFlowContext from an AST node. `D` is the function in which
+ /// `S` resides. `D.isTemplated()` must be false.
+ static llvm::Expected<ControlFlowContext> build(const Decl &D, Stmt &S,
+ ASTContext &C);
+
+ /// Returns the `Decl` containing the statement used to construct the CFG, if
+ /// available.
+ const Decl &getDecl() const { return ContainingDecl; }
+
+ /// Returns the CFG that is stored in this context.
+ const CFG &getCFG() const { return *Cfg; }
+
+ /// Returns a mapping from statements to basic blocks that contain them.
+ const llvm::DenseMap<const Stmt *, const CFGBlock *> &getStmtToBlock() const {
+ return StmtToBlock;
+ }
+
+ /// Returns whether `B` is reachable from the entry block.
+ bool isBlockReachable(const CFGBlock &B) const {
+ return BlockReachable[B.getBlockID()];
+ }
+
+private:
+ ControlFlowContext(const Decl &D, std::unique_ptr<CFG> Cfg,
+ llvm::DenseMap<const Stmt *, const CFGBlock *> StmtToBlock,
+ llvm::BitVector BlockReachable)
+ : ContainingDecl(D), Cfg(std::move(Cfg)),
+ StmtToBlock(std::move(StmtToBlock)),
+ BlockReachable(std::move(BlockReachable)) {}
+
+ /// The `Decl` containing the statement used to construct the CFG.
+ const Decl &ContainingDecl;
+ std::unique_ptr<CFG> Cfg;
+ llvm::DenseMap<const Stmt *, const CFGBlock *> StmtToBlock;
+ llvm::BitVector BlockReachable;
+};
+
+} // namespace dataflow
+} // namespace clang
+
+#endif // LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_CONTROLFLOWCONTEXT_H
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowAnalysis.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowAnalysis.h
new file mode 100644
index 000000000000..b95095d2184c
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowAnalysis.h
@@ -0,0 +1,332 @@
+//===- DataflowAnalysis.h ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines base types and functions for building dataflow analyses
+// that run over Control-Flow Graphs (CFGs).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_DATAFLOWANALYSIS_H
+#define LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_DATAFLOWANALYSIS_H
+
+#include <iterator>
+#include <optional>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+#include "clang/AST/ASTContext.h"
+#include "clang/Analysis/CFG.h"
+#include "clang/Analysis/FlowSensitive/ControlFlowContext.h"
+#include "clang/Analysis/FlowSensitive/DataflowEnvironment.h"
+#include "clang/Analysis/FlowSensitive/DataflowLattice.h"
+#include "clang/Analysis/FlowSensitive/MatchSwitch.h"
+#include "clang/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.h"
+#include "clang/Analysis/FlowSensitive/WatchedLiteralsSolver.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/STLFunctionalExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Errc.h"
+#include "llvm/Support/Error.h"
+
+namespace clang {
+namespace dataflow {
+
+/// Base class template for dataflow analyses built on a single lattice type.
+///
+/// Requirements:
+///
+/// `Derived` must be derived from a specialization of this class template and
+/// must provide the following public members:
+/// * `LatticeT initialElement()` - returns a lattice element that models the
+/// initial state of a basic block;
+/// * `void transfer(const CFGElement &, LatticeT &, Environment &)` - applies
+/// the analysis transfer function for a given CFG element and lattice
+/// element.
+///
+/// `Derived` can optionally provide the following members:
+/// * `void transferBranch(bool Branch, const Stmt *Stmt, TypeErasedLattice &E,
+/// Environment &Env)` - applies the analysis transfer
+/// function for a given edge from a CFG block of a conditional statement.
+///
+/// `Derived` can optionally override the following members:
+/// * `bool merge(QualType, const Value &, const Value &, Value &,
+/// Environment &)` - joins distinct values. This could be a strict
+/// lattice join or a more general widening operation.
+///
+/// `LatticeT` is a bounded join-semilattice that is used by `Derived` and must
+/// provide the following public members:
+/// * `LatticeJoinEffect join(const LatticeT &)` - joins the object and the
+/// argument by computing their least upper bound, modifies the object if
+/// necessary, and returns an effect indicating whether any changes were
+/// made to it;
+/// FIXME: make it `static LatticeT join(const LatticeT&, const LatticeT&)`
+/// * `bool operator==(const LatticeT &) const` - returns true if and only if
+/// the object is equal to the argument.
+///
+/// `LatticeT` can optionally provide the following members:
+/// * `LatticeJoinEffect widen(const LatticeT &Previous)` - replaces the
+/// lattice element with an approximation that can reach a fixed point more
+/// quickly than iterated application of the transfer function alone. The
+/// previous value is provided to inform the choice of widened value. The
+/// function must also serve as a comparison operation, by indicating whether
+/// the widened value is equivalent to the previous value with the returned
+/// `LatticeJoinEffect`.
+template <typename Derived, typename LatticeT>
+class DataflowAnalysis : public TypeErasedDataflowAnalysis {
+public:
+ /// Bounded join-semilattice that is used in the analysis.
+ using Lattice = LatticeT;
+
+ explicit DataflowAnalysis(ASTContext &Context) : Context(Context) {}
+
+ explicit DataflowAnalysis(ASTContext &Context,
+ DataflowAnalysisOptions Options)
+ : TypeErasedDataflowAnalysis(Options), Context(Context) {}
+
+ ASTContext &getASTContext() final { return Context; }
+
+ TypeErasedLattice typeErasedInitialElement() final {
+ return {static_cast<Derived *>(this)->initialElement()};
+ }
+
+ TypeErasedLattice joinTypeErased(const TypeErasedLattice &E1,
+ const TypeErasedLattice &E2) final {
+ // FIXME: change the signature of join() to avoid copying here.
+ Lattice L1 = llvm::any_cast<const Lattice &>(E1.Value);
+ const Lattice &L2 = llvm::any_cast<const Lattice &>(E2.Value);
+ L1.join(L2);
+ return {std::move(L1)};
+ }
+
+ LatticeJoinEffect widenTypeErased(TypeErasedLattice &Current,
+ const TypeErasedLattice &Previous) final {
+ Lattice &C = llvm::any_cast<Lattice &>(Current.Value);
+ const Lattice &P = llvm::any_cast<const Lattice &>(Previous.Value);
+ return widenInternal(Rank0{}, C, P);
+ }
+
+ bool isEqualTypeErased(const TypeErasedLattice &E1,
+ const TypeErasedLattice &E2) final {
+ const Lattice &L1 = llvm::any_cast<const Lattice &>(E1.Value);
+ const Lattice &L2 = llvm::any_cast<const Lattice &>(E2.Value);
+ return L1 == L2;
+ }
+
+ void transferTypeErased(const CFGElement &Element, TypeErasedLattice &E,
+ Environment &Env) final {
+ Lattice &L = llvm::any_cast<Lattice &>(E.Value);
+ static_cast<Derived *>(this)->transfer(Element, L, Env);
+ }
+
+ void transferBranchTypeErased(bool Branch, const Stmt *Stmt,
+ TypeErasedLattice &E, Environment &Env) final {
+ transferBranchInternal(Rank0{}, *static_cast<Derived *>(this), Branch, Stmt,
+ E, Env);
+ }
+
+private:
+ // These `Rank` structs are used for template metaprogramming to choose
+ // between overloads.
+ struct Rank1 {};
+ struct Rank0 : Rank1 {};
+
+ // The first-choice implementation: use `widen` when it is available.
+ template <typename T>
+ static auto widenInternal(Rank0, T &Current, const T &Prev)
+ -> decltype(Current.widen(Prev)) {
+ return Current.widen(Prev);
+ }
+
+ // The second-choice implementation: `widen` is unavailable. Widening is
+ // merged with equality checking, so when widening is unimplemented, we
+ // default to equality checking.
+ static LatticeJoinEffect widenInternal(Rank1, const Lattice &Current,
+ const Lattice &Prev) {
+ return Prev == Current ? LatticeJoinEffect::Unchanged
+ : LatticeJoinEffect::Changed;
+ }
+
+ // The first-choice implementation: `transferBranch` is implemented.
+ template <typename Analysis>
+ static auto transferBranchInternal(Rank0, Analysis &A, bool Branch,
+ const Stmt *Stmt, TypeErasedLattice &L,
+ Environment &Env)
+ -> std::void_t<decltype(A.transferBranch(
+ Branch, Stmt, std::declval<LatticeT &>(), Env))> {
+ A.transferBranch(Branch, Stmt, llvm::any_cast<Lattice &>(L.Value), Env);
+ }
+
+ // The second-choice implementation: `transferBranch` is unimplemented. No-op.
+ template <typename Analysis>
+ static void transferBranchInternal(Rank1, Analysis &A, bool, const Stmt *,
+ TypeErasedLattice &, Environment &) {}
+
+ ASTContext &Context;
+};
+
+// Model of the program at a given program point.
+template <typename LatticeT> struct DataflowAnalysisState {
+ // Model of a program property.
+ LatticeT Lattice;
+
+ // Model of the state of the program (store and heap).
+ Environment Env;
+};
+
+/// Performs dataflow analysis and returns a mapping from basic block IDs to
+/// dataflow analysis states that model the respective basic blocks. The
+/// returned vector, if any, will have the same size as the number of CFG
+/// blocks, with indices corresponding to basic block IDs. Returns an error if
+/// the dataflow analysis cannot be performed successfully. Otherwise, calls
+/// `PostVisitCFG` on each CFG element with the final analysis results at that
+/// program point.
+///
+/// `MaxBlockVisits` caps the number of block visits during analysis. See
+/// `runTypeErasedDataflowAnalysis` for a full description. The default value is
+/// essentially arbitrary -- large enough to accommodate what seems like any
+/// reasonable CFG, but still small enough to limit the cost of hitting the
+/// limit.
+template <typename AnalysisT>
+llvm::Expected<std::vector<
+ std::optional<DataflowAnalysisState<typename AnalysisT::Lattice>>>>
+runDataflowAnalysis(
+ const ControlFlowContext &CFCtx, AnalysisT &Analysis,
+ const Environment &InitEnv,
+ std::function<void(const CFGElement &, const DataflowAnalysisState<
+ typename AnalysisT::Lattice> &)>
+ PostVisitCFG = nullptr,
+ std::int32_t MaxBlockVisits = 20'000) {
+ std::function<void(const CFGElement &,
+ const TypeErasedDataflowAnalysisState &)>
+ PostVisitCFGClosure = nullptr;
+ if (PostVisitCFG) {
+ PostVisitCFGClosure = [&PostVisitCFG](
+ const CFGElement &Element,
+ const TypeErasedDataflowAnalysisState &State) {
+ auto *Lattice =
+ llvm::any_cast<typename AnalysisT::Lattice>(&State.Lattice.Value);
+ // FIXME: we should not be copying the environment here!
+ // Ultimately the PostVisitCFG only gets a const reference anyway.
+ PostVisitCFG(Element, DataflowAnalysisState<typename AnalysisT::Lattice>{
+ *Lattice, State.Env.fork()});
+ };
+ }
+
+ auto TypeErasedBlockStates = runTypeErasedDataflowAnalysis(
+ CFCtx, Analysis, InitEnv, PostVisitCFGClosure, MaxBlockVisits);
+ if (!TypeErasedBlockStates)
+ return TypeErasedBlockStates.takeError();
+
+ std::vector<std::optional<DataflowAnalysisState<typename AnalysisT::Lattice>>>
+ BlockStates;
+ BlockStates.reserve(TypeErasedBlockStates->size());
+
+ llvm::transform(
+ std::move(*TypeErasedBlockStates), std::back_inserter(BlockStates),
+ [](auto &OptState) {
+ return llvm::transformOptional(
+ std::move(OptState), [](TypeErasedDataflowAnalysisState &&State) {
+ return DataflowAnalysisState<typename AnalysisT::Lattice>{
+ llvm::any_cast<typename AnalysisT::Lattice>(
+ std::move(State.Lattice.Value)),
+ std::move(State.Env)};
+ });
+ });
+ return std::move(BlockStates);
+}
+
+// Create an analysis class that is derived from `DataflowAnalysis`. This is an
+// SFINAE adapter that allows us to call two different variants of constructor
+// (either with or without the optional `Environment` parameter).
+// FIXME: Make all classes derived from `DataflowAnalysis` take an `Environment`
+// parameter in their constructor so that we can get rid of this abomination.
+template <typename AnalysisT>
+auto createAnalysis(ASTContext &ASTCtx, Environment &Env)
+ -> decltype(AnalysisT(ASTCtx, Env)) {
+ return AnalysisT(ASTCtx, Env);
+}
+template <typename AnalysisT>
+auto createAnalysis(ASTContext &ASTCtx, Environment &Env)
+ -> decltype(AnalysisT(ASTCtx)) {
+ return AnalysisT(ASTCtx);
+}
+
+/// Runs a dataflow analysis over the given function and then runs `Diagnoser`
+/// over the results. Returns a list of diagnostics for `FuncDecl` or an
+/// error. Currently, errors can occur (at least) because the analysis requires
+/// too many iterations over the CFG or the SAT solver times out.
+///
+/// The default value of `MaxSATIterations` was chosen based on the following
+/// observations:
+/// - Non-pathological calls to the solver typically require only a few hundred
+/// iterations.
+/// - This limit is still low enough to keep runtimes acceptable (on typical
+/// machines) in cases where we hit the limit.
+///
+/// `MaxBlockVisits` caps the number of block visits during analysis. See
+/// `runDataflowAnalysis` for a full description and explanation of the default
+/// value.
+template <typename AnalysisT, typename Diagnostic>
+llvm::Expected<llvm::SmallVector<Diagnostic>> diagnoseFunction(
+ const FunctionDecl &FuncDecl, ASTContext &ASTCtx,
+ llvm::function_ref<llvm::SmallVector<Diagnostic>(
+ const CFGElement &, ASTContext &,
+ const TransferStateForDiagnostics<typename AnalysisT::Lattice> &)>
+ Diagnoser,
+ std::int64_t MaxSATIterations = 1'000'000'000,
+ std::int32_t MaxBlockVisits = 20'000) {
+ llvm::Expected<ControlFlowContext> Context =
+ ControlFlowContext::build(FuncDecl);
+ if (!Context)
+ return Context.takeError();
+
+ auto OwnedSolver = std::make_unique<WatchedLiteralsSolver>(MaxSATIterations);
+ const WatchedLiteralsSolver *Solver = OwnedSolver.get();
+ DataflowAnalysisContext AnalysisContext(std::move(OwnedSolver));
+ Environment Env(AnalysisContext, FuncDecl);
+ AnalysisT Analysis = createAnalysis<AnalysisT>(ASTCtx, Env);
+ llvm::SmallVector<Diagnostic> Diagnostics;
+ if (llvm::Error Err =
+ runTypeErasedDataflowAnalysis(
+ *Context, Analysis, Env,
+ [&ASTCtx, &Diagnoser, &Diagnostics](
+ const CFGElement &Elt,
+ const TypeErasedDataflowAnalysisState &State) mutable {
+ auto EltDiagnostics = Diagnoser(
+ Elt, ASTCtx,
+ TransferStateForDiagnostics<typename AnalysisT::Lattice>(
+ llvm::any_cast<const typename AnalysisT::Lattice &>(
+ State.Lattice.Value),
+ State.Env));
+ llvm::move(EltDiagnostics, std::back_inserter(Diagnostics));
+ },
+ MaxBlockVisits)
+ .takeError())
+ return std::move(Err);
+
+ if (Solver->reachedLimit())
+ return llvm::createStringError(llvm::errc::interrupted,
+ "SAT solver timed out");
+
+ return Diagnostics;
+}
+
+/// Abstract base class for dataflow "models": reusable analysis components that
+/// model a particular aspect of program semantics in the `Environment`. For
+/// example, a model may capture a type and its related functions.
+class DataflowModel : public Environment::ValueModel {
+public:
+ /// Return value indicates whether the model processed the `Element`.
+ virtual bool transfer(const CFGElement &Element, Environment &Env) = 0;
+};
+
+} // namespace dataflow
+} // namespace clang
+
+#endif // LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_DATAFLOWANALYSIS_H
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowAnalysisContext.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowAnalysisContext.h
new file mode 100644
index 000000000000..20e45cc27b01
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowAnalysisContext.h
@@ -0,0 +1,304 @@
+//===-- DataflowAnalysisContext.h -------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a DataflowAnalysisContext class that owns objects that
+// encompass the state of a program and stores context that is used during
+// dataflow analysis.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_DATAFLOWANALYSISCONTEXT_H
+#define LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_DATAFLOWANALYSISCONTEXT_H
+
+#include "clang/AST/Decl.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/TypeOrdering.h"
+#include "clang/Analysis/FlowSensitive/Arena.h"
+#include "clang/Analysis/FlowSensitive/ControlFlowContext.h"
+#include "clang/Analysis/FlowSensitive/Solver.h"
+#include "clang/Analysis/FlowSensitive/StorageLocation.h"
+#include "clang/Analysis/FlowSensitive/Value.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/Support/Compiler.h"
+#include <cassert>
+#include <memory>
+#include <optional>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+namespace clang {
+namespace dataflow {
+class Logger;
+
+/// Skip past nodes that the CFG does not emit. These nodes are invisible to
+/// flow-sensitive analysis, and should be ignored as they will effectively not
+/// exist.
+///
+/// * `ParenExpr` - The CFG takes the operator precedence into account, but
+/// otherwise omits the node afterwards.
+///
+/// * `ExprWithCleanups` - The CFG will generate the appropriate calls to
+/// destructors and then omit the node.
+///
+const Expr &ignoreCFGOmittedNodes(const Expr &E);
+const Stmt &ignoreCFGOmittedNodes(const Stmt &S);
+
+/// A set of `FieldDecl *`. Use `SmallSetVector` to guarantee deterministic
+/// iteration order.
+using FieldSet = llvm::SmallSetVector<const FieldDecl *, 4>;
+
+/// Returns the set of all fields in the type.
+FieldSet getObjectFields(QualType Type);
+
+/// Returns whether `Fields` and `FieldLocs` contain the same fields.
+bool containsSameFields(const FieldSet &Fields,
+ const RecordStorageLocation::FieldToLoc &FieldLocs);
+
+struct ContextSensitiveOptions {
+ /// The maximum depth to analyze. A value of zero is equivalent to disabling
+ /// context-sensitive analysis entirely.
+ unsigned Depth = 2;
+};
+
+/// Owns objects that encompass the state of a program and stores context that
+/// is used during dataflow analysis.
+class DataflowAnalysisContext {
+public:
+ struct Options {
+ /// Options for analyzing function bodies when present in the translation
+ /// unit, or empty to disable context-sensitive analysis. Note that this is
+ /// fundamentally limited: some constructs, such as recursion, are
+ /// explicitly unsupported.
+ std::optional<ContextSensitiveOptions> ContextSensitiveOpts;
+
+ /// If provided, analysis details will be recorded here.
+ /// (This is always non-null within an AnalysisContext, the framework
+ /// provides a fallback no-op logger).
+ Logger *Log = nullptr;
+ };
+
+ /// Constructs a dataflow analysis context.
+ ///
+ /// Requirements:
+ ///
+ /// `S` must not be null.
+ DataflowAnalysisContext(std::unique_ptr<Solver> S,
+ Options Opts = Options{
+ /*ContextSensitiveOpts=*/std::nullopt,
+ /*Logger=*/nullptr});
+ ~DataflowAnalysisContext();
+
+ /// Sets a callback that returns the names and types of the synthetic fields
+ /// to add to a `RecordStorageLocation` of a given type.
+ /// Typically, this is called from the constructor of a `DataflowAnalysis`
+ ///
+ /// To maintain the invariant that all `RecordStorageLocation`s of a given
+ /// type have the same fields:
+ /// * The callback must always return the same result for a given type
+ /// * `setSyntheticFieldCallback()` must be called before any
+ // `RecordStorageLocation`s are created.
+ void setSyntheticFieldCallback(
+ std::function<llvm::StringMap<QualType>(QualType)> CB) {
+ assert(!RecordStorageLocationCreated);
+ SyntheticFieldCallback = CB;
+ }
+
+ /// Returns a new storage location appropriate for `Type`.
+ ///
+ /// A null `Type` is interpreted as the pointee type of `std::nullptr_t`.
+ StorageLocation &createStorageLocation(QualType Type);
+
+ /// Creates a `RecordStorageLocation` for the given type and with the given
+ /// fields.
+ ///
+ /// Requirements:
+ ///
+ /// `FieldLocs` must contain exactly the fields returned by
+ /// `getModeledFields(Type)`.
+ /// `SyntheticFields` must contain exactly the fields returned by
+ /// `getSyntheticFields(Type)`.
+ RecordStorageLocation &createRecordStorageLocation(
+ QualType Type, RecordStorageLocation::FieldToLoc FieldLocs,
+ RecordStorageLocation::SyntheticFieldMap SyntheticFields);
+
+ /// Returns a stable storage location for `D`.
+ StorageLocation &getStableStorageLocation(const ValueDecl &D);
+
+ /// Returns a stable storage location for `E`.
+ StorageLocation &getStableStorageLocation(const Expr &E);
+
+ /// Returns a pointer value that represents a null pointer. Calls with
+ /// `PointeeType` that are canonically equivalent will return the same result.
+ /// A null `PointeeType` can be used for the pointee of `std::nullptr_t`.
+ PointerValue &getOrCreateNullPointerValue(QualType PointeeType);
+
+ /// Adds `Constraint` to current and future flow conditions in this context.
+ ///
+ /// Invariants must contain only flow-insensitive information, i.e. facts that
+ /// are true on all paths through the program.
+ /// Information can be added eagerly (when analysis begins), or lazily (e.g.
+ /// when values are first used). The analysis must be careful that the same
+ /// information is added regardless of which order blocks are analyzed in.
+ void addInvariant(const Formula &Constraint);
+
+ /// Adds `Constraint` to the flow condition identified by `Token`.
+ void addFlowConditionConstraint(Atom Token, const Formula &Constraint);
+
+ /// Creates a new flow condition with the same constraints as the flow
+ /// condition identified by `Token` and returns its token.
+ Atom forkFlowCondition(Atom Token);
+
+ /// Creates a new flow condition that represents the disjunction of the flow
+ /// conditions identified by `FirstToken` and `SecondToken`, and returns its
+ /// token.
+ Atom joinFlowConditions(Atom FirstToken, Atom SecondToken);
+
+ /// Returns true if the constraints of the flow condition identified by
+ /// `Token` imply that `F` is true.
+ /// Returns false if the flow condition does not imply `F` or if the solver
+ /// times out.
+ bool flowConditionImplies(Atom Token, const Formula &F);
+
+ /// Returns true if the constraints of the flow condition identified by
+ /// `Token` still allow `F` to be true.
+ /// Returns false if the flow condition implies that `F` is false or if the
+ /// solver times out.
+ bool flowConditionAllows(Atom Token, const Formula &F);
+
+ /// Returns true if `Val1` is equivalent to `Val2`.
+ /// Note: This function doesn't take into account constraints on `Val1` and
+ /// `Val2` imposed by the flow condition.
+ bool equivalentFormulas(const Formula &Val1, const Formula &Val2);
+
+ LLVM_DUMP_METHOD void dumpFlowCondition(Atom Token,
+ llvm::raw_ostream &OS = llvm::dbgs());
+
+ /// Returns the `ControlFlowContext` registered for `F`, if any. Otherwise,
+ /// returns null.
+ const ControlFlowContext *getControlFlowContext(const FunctionDecl *F);
+
+ const Options &getOptions() { return Opts; }
+
+ Arena &arena() { return *A; }
+
+ /// Returns the outcome of satisfiability checking on `Constraints`.
+ ///
+ /// Flow conditions are not incorporated, so they may need to be manually
+ /// included in `Constraints` to provide contextually-accurate results, e.g.
+ /// if any definitions or relationships of the values in `Constraints` have
+ /// been stored in flow conditions.
+ Solver::Result querySolver(llvm::SetVector<const Formula *> Constraints);
+
+ /// Returns the fields of `Type`, limited to the set of fields modeled by this
+ /// context.
+ FieldSet getModeledFields(QualType Type);
+
+ /// Returns the names and types of the synthetic fields for the given record
+ /// type.
+ llvm::StringMap<QualType> getSyntheticFields(QualType Type) {
+ assert(Type->isRecordType());
+ if (SyntheticFieldCallback)
+ return SyntheticFieldCallback(Type);
+ return {};
+ }
+
+private:
+ friend class Environment;
+
+ struct NullableQualTypeDenseMapInfo : private llvm::DenseMapInfo<QualType> {
+ static QualType getEmptyKey() {
+ // Allow a NULL `QualType` by using a different value as the empty key.
+ return QualType::getFromOpaquePtr(reinterpret_cast<Type *>(1));
+ }
+
+ using DenseMapInfo::getHashValue;
+ using DenseMapInfo::getTombstoneKey;
+ using DenseMapInfo::isEqual;
+ };
+
+ // Extends the set of modeled field declarations.
+ void addModeledFields(const FieldSet &Fields);
+
+ /// Adds all constraints of the flow condition identified by `Token` and all
+ /// of its transitive dependencies to `Constraints`.
+ void
+ addTransitiveFlowConditionConstraints(Atom Token,
+ llvm::SetVector<const Formula *> &Out);
+
+ /// Returns true if the solver is able to prove that there is a satisfying
+ /// assignment for `Constraints`.
+ bool isSatisfiable(llvm::SetVector<const Formula *> Constraints) {
+ return querySolver(std::move(Constraints)).getStatus() ==
+ Solver::Result::Status::Satisfiable;
+ }
+
+ /// Returns true if the solver is able to prove that there is no satisfying
+ /// assignment for `Constraints`
+ bool isUnsatisfiable(llvm::SetVector<const Formula *> Constraints) {
+ return querySolver(std::move(Constraints)).getStatus() ==
+ Solver::Result::Status::Unsatisfiable;
+ }
+
+ std::unique_ptr<Solver> S;
+ std::unique_ptr<Arena> A;
+
+ // Maps from program declarations and statements to storage locations that are
+ // assigned to them. These assignments are global (aggregated across all basic
+ // blocks) and are used to produce stable storage locations when the same
+ // basic blocks are evaluated multiple times. The storage locations that are
+ // in scope for a particular basic block are stored in `Environment`.
+ llvm::DenseMap<const ValueDecl *, StorageLocation *> DeclToLoc;
+ llvm::DenseMap<const Expr *, StorageLocation *> ExprToLoc;
+
+ // Null pointer values, keyed by the canonical pointee type.
+ //
+ // FIXME: The pointer values are indexed by the pointee types which are
+ // required to initialize the `PointeeLoc` field in `PointerValue`. Consider
+ // creating a type-independent `NullPointerValue` without a `PointeeLoc`
+ // field.
+ llvm::DenseMap<QualType, PointerValue *, NullableQualTypeDenseMapInfo>
+ NullPointerVals;
+
+ Options Opts;
+
+ // Flow conditions are tracked symbolically: each unique flow condition is
+ // associated with a fresh symbolic variable (token), bound to the clause that
+ // defines the flow condition. Conceptually, each binding corresponds to an
+ // "iff" of the form `FC <=> (C1 ^ C2 ^ ...)` where `FC` is a flow condition
+ // token (an atomic boolean) and `Ci`s are the set of constraints in the flow
+ // flow condition clause. The set of constraints (C1 ^ C2 ^ ...) are stored in
+ // the `FlowConditionConstraints` map, keyed by the token of the flow
+ // condition.
+ //
+ // Flow conditions depend on other flow conditions if they are created using
+ // `forkFlowCondition` or `joinFlowConditions`. The graph of flow condition
+ // dependencies is stored in the `FlowConditionDeps` map.
+ llvm::DenseMap<Atom, llvm::DenseSet<Atom>> FlowConditionDeps;
+ llvm::DenseMap<Atom, const Formula *> FlowConditionConstraints;
+ const Formula *Invariant = nullptr;
+
+ llvm::DenseMap<const FunctionDecl *, ControlFlowContext> FunctionContexts;
+
+ // Fields modeled by environments covered by this context.
+ FieldSet ModeledFields;
+
+ std::unique_ptr<Logger> LogOwner; // If created via flags.
+
+ std::function<llvm::StringMap<QualType>(QualType)> SyntheticFieldCallback;
+
+ /// Has any `RecordStorageLocation` been created yet?
+ bool RecordStorageLocationCreated = false;
+};
+
+} // namespace dataflow
+} // namespace clang
+
+#endif // LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_DATAFLOWANALYSISCONTEXT_H
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowEnvironment.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowEnvironment.h
new file mode 100644
index 000000000000..1543f900e401
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowEnvironment.h
@@ -0,0 +1,739 @@
+//===-- DataflowEnvironment.h -----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines an Environment class that is used by dataflow analyses
+// that run over Control-Flow Graphs (CFGs) to keep track of the state of the
+// program at given program points.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_DATAFLOWENVIRONMENT_H
+#define LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_DATAFLOWENVIRONMENT_H
+
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclBase.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/Type.h"
+#include "clang/Analysis/FlowSensitive/ControlFlowContext.h"
+#include "clang/Analysis/FlowSensitive/DataflowAnalysisContext.h"
+#include "clang/Analysis/FlowSensitive/DataflowLattice.h"
+#include "clang/Analysis/FlowSensitive/Formula.h"
+#include "clang/Analysis/FlowSensitive/Logger.h"
+#include "clang/Analysis/FlowSensitive/StorageLocation.h"
+#include "clang/Analysis/FlowSensitive/Value.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/MapVector.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <memory>
+#include <type_traits>
+#include <utility>
+
+namespace clang {
+namespace dataflow {
+
+/// Indicates the result of a tentative comparison.
+enum class ComparisonResult {
+ Same,
+ Different,
+ Unknown,
+};
+
+/// Holds the state of the program (store and heap) at a given program point.
+///
+/// WARNING: Symbolic values that are created by the environment for static
+/// local and global variables are not currently invalidated on function calls.
+/// This is unsound and should be taken into account when designing dataflow
+/// analyses.
+class Environment {
+public:
+ /// Supplements `Environment` with non-standard comparison and join
+ /// operations.
+ class ValueModel {
+ public:
+ virtual ~ValueModel() = default;
+
+ /// Returns:
+ /// `Same`: `Val1` is equivalent to `Val2`, according to the model.
+ /// `Different`: `Val1` is distinct from `Val2`, according to the model.
+ /// `Unknown`: The model can't determine a relationship between `Val1` and
+ /// `Val2`.
+ ///
+ /// Requirements:
+ ///
+ /// `Val1` and `Val2` must be distinct.
+ ///
+ /// `Val1` and `Val2` must model values of type `Type`.
+ ///
+ /// `Val1` and `Val2` must be assigned to the same storage location in
+ /// `Env1` and `Env2` respectively.
+ virtual ComparisonResult compare(QualType Type, const Value &Val1,
+ const Environment &Env1, const Value &Val2,
+ const Environment &Env2) {
+ // FIXME: Consider adding QualType to RecordValue and removing the Type
+ // argument here.
+ return ComparisonResult::Unknown;
+ }
+
+ /// Modifies `MergedVal` to approximate both `Val1` and `Val2`. This could
+ /// be a strict lattice join or a more general widening operation.
+ ///
+ /// If this function returns true, `MergedVal` will be assigned to a storage
+ /// location of type `Type` in `MergedEnv`.
+ ///
+ /// `Env1` and `Env2` can be used to query child values and path condition
+ /// implications of `Val1` and `Val2` respectively.
+ ///
+ /// Requirements:
+ ///
+ /// `Val1` and `Val2` must be distinct.
+ ///
+ /// `Val1`, `Val2`, and `MergedVal` must model values of type `Type`.
+ ///
+ /// `Val1` and `Val2` must be assigned to the same storage location in
+ /// `Env1` and `Env2` respectively.
+ virtual bool merge(QualType Type, const Value &Val1,
+ const Environment &Env1, const Value &Val2,
+ const Environment &Env2, Value &MergedVal,
+ Environment &MergedEnv) {
+ return true;
+ }
+
+ /// This function may widen the current value -- replace it with an
+ /// approximation that can reach a fixed point more quickly than iterated
+ /// application of the transfer function alone. The previous value is
+ /// provided to inform the choice of widened value. The function must also
+ /// serve as a comparison operation, by indicating whether the widened value
+ /// is equivalent to the previous value.
+ ///
+ /// Returns either:
+ ///
+ /// `nullptr`, if this value is not of interest to the model, or
+ ///
+ /// `&Prev`, if the widened value is equivalent to `Prev`, or
+ ///
+ /// A non-null value that approximates `Current`. `Prev` is available to
+ /// inform the chosen approximation.
+ ///
+ /// `PrevEnv` and `CurrentEnv` can be used to query child values and path
+ /// condition implications of `Prev` and `Current`, respectively.
+ ///
+ /// Requirements:
+ ///
+ /// `Prev` and `Current` must model values of type `Type`.
+ ///
+ /// `Prev` and `Current` must be assigned to the same storage location in
+ /// `PrevEnv` and `CurrentEnv`, respectively.
+ virtual Value *widen(QualType Type, Value &Prev, const Environment &PrevEnv,
+ Value &Current, Environment &CurrentEnv) {
+ // The default implementation reduces to just comparison, since comparison
+ // is required by the API, even if no widening is performed.
+ switch (compare(Type, Prev, PrevEnv, Current, CurrentEnv)) {
+ case ComparisonResult::Same:
+ return &Prev;
+ case ComparisonResult::Different:
+ return &Current;
+ case ComparisonResult::Unknown:
+ return nullptr;
+ }
+ llvm_unreachable("all cases in switch covered");
+ }
+ };
+
+ /// Creates an environment that uses `DACtx` to store objects that encompass
+ /// the state of a program.
+ explicit Environment(DataflowAnalysisContext &DACtx);
+
+ // Copy-constructor is private, Environments should not be copied. See fork().
+ Environment &operator=(const Environment &Other) = delete;
+
+ Environment(Environment &&Other) = default;
+ Environment &operator=(Environment &&Other) = default;
+
+ /// Creates an environment that uses `DACtx` to store objects that encompass
+ /// the state of a program.
+ ///
+ /// If `DeclCtx` is a function, initializes the environment with symbolic
+ /// representations of the function parameters.
+ ///
+ /// If `DeclCtx` is a non-static member function, initializes the environment
+ /// with a symbolic representation of the `this` pointee.
+ Environment(DataflowAnalysisContext &DACtx, const DeclContext &DeclCtx);
+
+ /// Assigns storage locations and values to all parameters, captures, global
+ /// variables, fields and functions referenced in the function currently being
+ /// analyzed.
+ ///
+ /// Requirements:
+ ///
+ /// The function must have a body, i.e.
+ /// `FunctionDecl::doesThisDecalarationHaveABody()` must be true.
+ void initialize();
+
+ /// Returns a new environment that is a copy of this one.
+ ///
+ /// The state of the program is initially the same, but can be mutated without
+ /// affecting the original.
+ ///
+ /// However the original should not be further mutated, as this may interfere
+ /// with the fork. (In practice, values are stored independently, but the
+ /// forked flow condition references the original).
+ Environment fork() const;
+
+ /// Creates and returns an environment to use for an inline analysis of the
+ /// callee. Uses the storage location from each argument in the `Call` as the
+ /// storage location for the corresponding parameter in the callee.
+ ///
+ /// Requirements:
+ ///
+ /// The callee of `Call` must be a `FunctionDecl`.
+ ///
+ /// The body of the callee must not reference globals.
+ ///
+ /// The arguments of `Call` must map 1:1 to the callee's parameters.
+ Environment pushCall(const CallExpr *Call) const;
+ Environment pushCall(const CXXConstructExpr *Call) const;
+
+ /// Moves gathered information back into `this` from a `CalleeEnv` created via
+ /// `pushCall`.
+ void popCall(const CallExpr *Call, const Environment &CalleeEnv);
+ void popCall(const CXXConstructExpr *Call, const Environment &CalleeEnv);
+
+ /// Returns true if and only if the environment is equivalent to `Other`, i.e
+ /// the two environments:
+ /// - have the same mappings from declarations to storage locations,
+ /// - have the same mappings from expressions to storage locations,
+ /// - have the same or equivalent (according to `Model`) values assigned to
+ /// the same storage locations.
+ ///
+ /// Requirements:
+ ///
+ /// `Other` and `this` must use the same `DataflowAnalysisContext`.
+ bool equivalentTo(const Environment &Other,
+ Environment::ValueModel &Model) const;
+
+ /// Joins two environments by taking the intersection of storage locations and
+ /// values that are stored in them. Distinct values that are assigned to the
+ /// same storage locations in `EnvA` and `EnvB` are merged using `Model`.
+ ///
+ /// Requirements:
+ ///
+ /// `EnvA` and `EnvB` must use the same `DataflowAnalysisContext`.
+ static Environment join(const Environment &EnvA, const Environment &EnvB,
+ Environment::ValueModel &Model);
+
+ /// Widens the environment point-wise, using `PrevEnv` as needed to inform the
+ /// approximation.
+ ///
+ /// Requirements:
+ ///
+ /// `PrevEnv` must be the immediate previous version of the environment.
+ /// `PrevEnv` and `this` must use the same `DataflowAnalysisContext`.
+ LatticeJoinEffect widen(const Environment &PrevEnv,
+ Environment::ValueModel &Model);
+
+ // FIXME: Rename `createOrGetStorageLocation` to `getOrCreateStorageLocation`,
+ // `getStableStorageLocation`, or something more appropriate.
+
+ /// Creates a storage location appropriate for `Type`. Does not assign a value
+ /// to the returned storage location in the environment.
+ ///
+ /// Requirements:
+ ///
+ /// `Type` must not be null.
+ StorageLocation &createStorageLocation(QualType Type);
+
+ /// Creates a storage location for `D`. Does not assign the returned storage
+ /// location to `D` in the environment. Does not assign a value to the
+ /// returned storage location in the environment.
+ StorageLocation &createStorageLocation(const ValueDecl &D);
+
+ /// Creates a storage location for `E`. Does not assign the returned storage
+ /// location to `E` in the environment. Does not assign a value to the
+ /// returned storage location in the environment.
+ StorageLocation &createStorageLocation(const Expr &E);
+
+ /// Assigns `Loc` as the storage location of `D` in the environment.
+ ///
+ /// Requirements:
+ ///
+ /// `D` must not already have a storage location in the environment.
+ void setStorageLocation(const ValueDecl &D, StorageLocation &Loc);
+
+ /// Returns the storage location assigned to `D` in the environment, or null
+ /// if `D` isn't assigned a storage location in the environment.
+ StorageLocation *getStorageLocation(const ValueDecl &D) const;
+
+ /// Removes the location assigned to `D` in the environment (if any).
+ void removeDecl(const ValueDecl &D);
+
+ /// Assigns `Loc` as the storage location of the glvalue `E` in the
+ /// environment.
+ ///
+ /// Requirements:
+ ///
+ /// `E` must not be assigned a storage location in the environment.
+ /// `E` must be a glvalue or a `BuiltinType::BuiltinFn`
+ void setStorageLocation(const Expr &E, StorageLocation &Loc);
+
+ /// Returns the storage location assigned to the glvalue `E` in the
+ /// environment, or null if `E` isn't assigned a storage location in the
+ /// environment.
+ ///
+ /// Requirements:
+ /// `E` must be a glvalue or a `BuiltinType::BuiltinFn`
+ StorageLocation *getStorageLocation(const Expr &E) const;
+
+ /// Returns the result of casting `getStorageLocation(...)` to a subclass of
+ /// `StorageLocation` (using `cast_or_null<T>`).
+ /// This assert-fails if the result of `getStorageLocation(...)` is not of
+ /// type `T *`; if the storage location is not guaranteed to have type `T *`,
+ /// consider using `dyn_cast_or_null<T>(getStorageLocation(...))` instead.
+ template <typename T>
+ std::enable_if_t<std::is_base_of_v<StorageLocation, T>, T *>
+ get(const ValueDecl &D) const {
+ return cast_or_null<T>(getStorageLocation(D));
+ }
+ template <typename T>
+ std::enable_if_t<std::is_base_of_v<StorageLocation, T>, T *>
+ get(const Expr &E) const {
+ return cast_or_null<T>(getStorageLocation(E));
+ }
+
+ /// Returns the storage location assigned to the `this` pointee in the
+ /// environment or null if the `this` pointee has no assigned storage location
+ /// in the environment.
+ RecordStorageLocation *getThisPointeeStorageLocation() const {
+ return ThisPointeeLoc;
+ }
+
+ /// Sets the storage location assigned to the `this` pointee in the
+ /// environment.
+ void setThisPointeeStorageLocation(RecordStorageLocation &Loc) {
+ ThisPointeeLoc = &Loc;
+ }
+
+ /// Returns the location of the result object for a record-type prvalue.
+ ///
+ /// In C++, prvalues of record type serve only a limited purpose: They can
+ /// only be used to initialize a result object (e.g. a variable or a
+ /// temporary). This function returns the location of that result object.
+ ///
+ /// When creating a prvalue of record type, we already need the storage
+ /// location of the result object to pass in `this`, even though prvalues are
+ /// otherwise not associated with storage locations.
+ ///
+ /// FIXME: Currently, this simply returns a stable storage location for `E`,
+ /// but this doesn't do the right thing in scenarios like the following:
+ /// ```
+ /// MyClass c = some_condition()? MyClass(foo) : MyClass(bar);
+ /// ```
+ /// Here, `MyClass(foo)` and `MyClass(bar)` will have two different storage
+ /// locations, when in fact their storage locations should be the same.
+ /// Eventually, we want to propagate storage locations from result objects
+ /// down to the prvalues that initialize them, similar to the way that this is
+ /// done in Clang's CodeGen.
+ ///
+ /// Requirements:
+ /// `E` must be a prvalue of record type.
+ RecordStorageLocation &
+ getResultObjectLocation(const Expr &RecordPRValue) const;
+
+ /// Returns the return value of the current function. This can be null if:
+ /// - The function has a void return type
+ /// - No return value could be determined for the function, for example
+ /// because it calls a function without a body.
+ ///
+ /// Requirements:
+ /// The current function must have a non-reference return type.
+ Value *getReturnValue() const {
+ assert(getCurrentFunc() != nullptr &&
+ !getCurrentFunc()->getReturnType()->isReferenceType());
+ return ReturnVal;
+ }
+
+ /// Returns the storage location for the reference returned by the current
+ /// function. This can be null if function doesn't return a single consistent
+ /// reference.
+ ///
+ /// Requirements:
+ /// The current function must have a reference return type.
+ StorageLocation *getReturnStorageLocation() const {
+ assert(getCurrentFunc() != nullptr &&
+ getCurrentFunc()->getReturnType()->isReferenceType());
+ return ReturnLoc;
+ }
+
+ /// Sets the return value of the current function.
+ ///
+ /// Requirements:
+ /// The current function must have a non-reference return type.
+ void setReturnValue(Value *Val) {
+ assert(getCurrentFunc() != nullptr &&
+ !getCurrentFunc()->getReturnType()->isReferenceType());
+ ReturnVal = Val;
+ }
+
+ /// Sets the storage location for the reference returned by the current
+ /// function.
+ ///
+ /// Requirements:
+ /// The current function must have a reference return type.
+ void setReturnStorageLocation(StorageLocation *Loc) {
+ assert(getCurrentFunc() != nullptr &&
+ getCurrentFunc()->getReturnType()->isReferenceType());
+ ReturnLoc = Loc;
+ }
+
+ /// Returns a pointer value that represents a null pointer. Calls with
+ /// `PointeeType` that are canonically equivalent will return the same result.
+ PointerValue &getOrCreateNullPointerValue(QualType PointeeType);
+
+ /// Creates a value appropriate for `Type`, if `Type` is supported, otherwise
+ /// returns null.
+ ///
+ /// If `Type` is a pointer or reference type, creates all the necessary
+ /// storage locations and values for indirections until it finds a
+ /// non-pointer/non-reference type.
+ ///
+ /// If `Type` is a class, struct, or union type, creates values for all
+ /// modeled fields (including synthetic fields) and calls `setValue()` to
+ /// associate the `RecordValue` with its storage location
+ /// (`RecordValue::getLoc()`).
+ ///
+ /// If `Type` is one of the following types, this function will always return
+ /// a non-null pointer:
+ /// - `bool`
+ /// - Any integer type
+ /// - Any class, struct, or union type
+ ///
+ /// Requirements:
+ ///
+ /// `Type` must not be null.
+ Value *createValue(QualType Type);
+
+ /// Creates an object (i.e. a storage location with an associated value) of
+ /// type `Ty`. If `InitExpr` is non-null and has a value associated with it,
+ /// initializes the object with this value. Otherwise, initializes the object
+ /// with a value created using `createValue()`.
+ StorageLocation &createObject(QualType Ty, const Expr *InitExpr = nullptr) {
+ return createObjectInternal(nullptr, Ty, InitExpr);
+ }
+
+ /// Creates an object for the variable declaration `D`. If `D` has an
+ /// initializer and this initializer is associated with a value, initializes
+ /// the object with this value. Otherwise, initializes the object with a
+ /// value created using `createValue()`. Uses the storage location returned by
+ /// `DataflowAnalysisContext::getStableStorageLocation(D)`.
+ StorageLocation &createObject(const VarDecl &D) {
+ return createObjectInternal(&D, D.getType(), D.getInit());
+ }
+
+ /// Creates an object for the variable declaration `D`. If `InitExpr` is
+ /// non-null and has a value associated with it, initializes the object with
+ /// this value. Otherwise, initializes the object with a value created using
+ /// `createValue()`. Uses the storage location returned by
+ /// `DataflowAnalysisContext::getStableStorageLocation(D)`.
+ StorageLocation &createObject(const ValueDecl &D, const Expr *InitExpr) {
+ return createObjectInternal(&D, D.getType(), InitExpr);
+ }
+
+ /// Assigns `Val` as the value of `Loc` in the environment.
+ void setValue(const StorageLocation &Loc, Value &Val);
+
+ /// Clears any association between `Loc` and a value in the environment.
+ void clearValue(const StorageLocation &Loc) { LocToVal.erase(&Loc); }
+
+ /// Assigns `Val` as the value of the prvalue `E` in the environment.
+ ///
+ /// Requirements:
+ ///
+ /// - `E` must be a prvalue
+ /// - If `Val` is a `RecordValue`, its `RecordStorageLocation` must be
+ /// `getResultObjectLocation(E)`. An exception to this is if `E` is an
+ /// expression that originally creates a `RecordValue` (such as a
+ /// `CXXConstructExpr` or `CallExpr`), as these establish the location of
+ /// the result object in the first place.
+ void setValue(const Expr &E, Value &Val);
+
+ /// Returns the value assigned to `Loc` in the environment or null if `Loc`
+ /// isn't assigned a value in the environment.
+ Value *getValue(const StorageLocation &Loc) const;
+
+ /// Equivalent to `getValue(getStorageLocation(D))` if `D` is assigned a
+ /// storage location in the environment, otherwise returns null.
+ Value *getValue(const ValueDecl &D) const;
+
+ /// Equivalent to `getValue(getStorageLocation(E, SP))` if `E` is assigned a
+ /// storage location in the environment, otherwise returns null.
+ Value *getValue(const Expr &E) const;
+
+ /// Returns the result of casting `getValue(...)` to a subclass of `Value`
+ /// (using `cast_or_null<T>`).
+ /// This assert-fails if the result of `getValue(...)` is not of type `T *`;
+ /// if the value is not guaranteed to have type `T *`, consider using
+ /// `dyn_cast_or_null<T>(getValue(...))` instead.
+ template <typename T>
+ std::enable_if_t<std::is_base_of_v<Value, T>, T *>
+ get(const StorageLocation &Loc) const {
+ return cast_or_null<T>(getValue(Loc));
+ }
+ template <typename T>
+ std::enable_if_t<std::is_base_of_v<Value, T>, T *>
+ get(const ValueDecl &D) const {
+ return cast_or_null<T>(getValue(D));
+ }
+ template <typename T>
+ std::enable_if_t<std::is_base_of_v<Value, T>, T *> get(const Expr &E) const {
+ return cast_or_null<T>(getValue(E));
+ }
+
+ // FIXME: should we deprecate the following & call arena().create() directly?
+
+ /// Creates a `T` (some subclass of `Value`), forwarding `args` to the
+ /// constructor, and returns a reference to it.
+ ///
+ /// The analysis context takes ownership of the created object. The object
+ /// will be destroyed when the analysis context is destroyed.
+ template <typename T, typename... Args>
+ std::enable_if_t<std::is_base_of<Value, T>::value, T &>
+ create(Args &&...args) {
+ return arena().create<T>(std::forward<Args>(args)...);
+ }
+
+ /// Returns a symbolic integer value that models an integer literal equal to
+ /// `Value`
+ IntegerValue &getIntLiteralValue(llvm::APInt Value) const {
+ return arena().makeIntLiteral(Value);
+ }
+
+ /// Returns a symbolic boolean value that models a boolean literal equal to
+ /// `Value`
+ BoolValue &getBoolLiteralValue(bool Value) const {
+ return arena().makeBoolValue(arena().makeLiteral(Value));
+ }
+
+ /// Returns an atomic boolean value.
+ BoolValue &makeAtomicBoolValue() const {
+ return arena().makeAtomValue();
+ }
+
+ /// Returns a unique instance of boolean Top.
+ BoolValue &makeTopBoolValue() const {
+ return arena().makeTopValue();
+ }
+
+ /// Returns a boolean value that represents the conjunction of `LHS` and
+ /// `RHS`. Subsequent calls with the same arguments, regardless of their
+ /// order, will return the same result. If the given boolean values represent
+ /// the same value, the result will be the value itself.
+ BoolValue &makeAnd(BoolValue &LHS, BoolValue &RHS) const {
+ return arena().makeBoolValue(
+ arena().makeAnd(LHS.formula(), RHS.formula()));
+ }
+
+ /// Returns a boolean value that represents the disjunction of `LHS` and
+ /// `RHS`. Subsequent calls with the same arguments, regardless of their
+ /// order, will return the same result. If the given boolean values represent
+ /// the same value, the result will be the value itself.
+ BoolValue &makeOr(BoolValue &LHS, BoolValue &RHS) const {
+ return arena().makeBoolValue(
+ arena().makeOr(LHS.formula(), RHS.formula()));
+ }
+
+ /// Returns a boolean value that represents the negation of `Val`. Subsequent
+ /// calls with the same argument will return the same result.
+ BoolValue &makeNot(BoolValue &Val) const {
+ return arena().makeBoolValue(arena().makeNot(Val.formula()));
+ }
+
+ /// Returns a boolean value represents `LHS` => `RHS`. Subsequent calls with
+ /// the same arguments, will return the same result. If the given boolean
+ /// values represent the same value, the result will be a value that
+ /// represents the true boolean literal.
+ BoolValue &makeImplication(BoolValue &LHS, BoolValue &RHS) const {
+ return arena().makeBoolValue(
+ arena().makeImplies(LHS.formula(), RHS.formula()));
+ }
+
+ /// Returns a boolean value represents `LHS` <=> `RHS`. Subsequent calls with
+ /// the same arguments, regardless of their order, will return the same
+ /// result. If the given boolean values represent the same value, the result
+ /// will be a value that represents the true boolean literal.
+ BoolValue &makeIff(BoolValue &LHS, BoolValue &RHS) const {
+ return arena().makeBoolValue(
+ arena().makeEquals(LHS.formula(), RHS.formula()));
+ }
+
+ /// Returns a boolean variable that identifies the flow condition (FC).
+ ///
+ /// The flow condition is a set of facts that are necessarily true when the
+ /// program reaches the current point, expressed as boolean formulas.
+ /// The flow condition token is equivalent to the AND of these facts.
+ ///
+ /// These may e.g. constrain the value of certain variables. A pointer
+ /// variable may have a consistent modeled PointerValue throughout, but at a
+ /// given point the Environment may tell us that the value must be non-null.
+ ///
+ /// The FC is necessary but not sufficient for this point to be reachable.
+ /// In particular, where the FC token appears in flow conditions of successor
+ /// environments, it means "point X may have been reached", not
+ /// "point X was reached".
+ Atom getFlowConditionToken() const { return FlowConditionToken; }
+
+ /// Record a fact that must be true if this point in the program is reached.
+ void assume(const Formula &);
+
+ /// Returns true if the formula is always true when this point is reached.
+ /// Returns false if the formula may be false (or the flow condition isn't
+ /// sufficiently precise to prove that it is true) or if the solver times out.
+ ///
+ /// Note that there is an asymmetry between this function and `allows()` in
+ /// that they both return false if the solver times out. The assumption is
+ /// that if `proves()` or `allows()` returns true, this will result in a
+ /// diagnostic, and we want to bias towards false negatives in the case where
+ /// the solver times out.
+ bool proves(const Formula &) const;
+
+ /// Returns true if the formula may be true when this point is reached.
+ /// Returns false if the formula is always false when this point is reached
+ /// (or the flow condition is overly constraining) or if the solver times out.
+ bool allows(const Formula &) const;
+
+ /// Returns the `DeclContext` of the block being analysed, if any. Otherwise,
+ /// returns null.
+ const DeclContext *getDeclCtx() const { return CallStack.back(); }
+
+ /// Returns the function currently being analyzed, or null if the code being
+ /// analyzed isn't part of a function.
+ const FunctionDecl *getCurrentFunc() const {
+ return dyn_cast<FunctionDecl>(getDeclCtx());
+ }
+
+ /// Returns the size of the call stack.
+ size_t callStackSize() const { return CallStack.size(); }
+
+ /// Returns whether this `Environment` can be extended to analyze the given
+ /// `Callee` (i.e. if `pushCall` can be used), with recursion disallowed and a
+ /// given `MaxDepth`.
+ bool canDescend(unsigned MaxDepth, const DeclContext *Callee) const;
+
+ /// Returns the `DataflowAnalysisContext` used by the environment.
+ DataflowAnalysisContext &getDataflowAnalysisContext() const { return *DACtx; }
+
+ Arena &arena() const { return DACtx->arena(); }
+
+ LLVM_DUMP_METHOD void dump() const;
+ LLVM_DUMP_METHOD void dump(raw_ostream &OS) const;
+
+private:
+ // The copy-constructor is for use in fork() only.
+ Environment(const Environment &) = default;
+
+ /// Creates a value appropriate for `Type`, if `Type` is supported, otherwise
+ /// return null.
+ ///
+ /// Recursively initializes storage locations and values until it sees a
+ /// self-referential pointer or reference type. `Visited` is used to track
+ /// which types appeared in the reference/pointer chain in order to avoid
+ /// creating a cyclic dependency with self-referential pointers/references.
+ ///
+ /// Requirements:
+ ///
+ /// `Type` must not be null.
+ Value *createValueUnlessSelfReferential(QualType Type,
+ llvm::DenseSet<QualType> &Visited,
+ int Depth, int &CreatedValuesCount);
+
+ /// Creates a storage location for `Ty`. Also creates and associates a value
+ /// with the storage location, unless values of this type are not supported or
+ /// we hit one of the limits at which we stop producing values (controlled by
+ /// `Visited`, `Depth`, and `CreatedValuesCount`).
+ StorageLocation &createLocAndMaybeValue(QualType Ty,
+ llvm::DenseSet<QualType> &Visited,
+ int Depth, int &CreatedValuesCount);
+
+ /// Shared implementation of `createObject()` overloads.
+ /// `D` and `InitExpr` may be null.
+ StorageLocation &createObjectInternal(const ValueDecl *D, QualType Ty,
+ const Expr *InitExpr);
+
+ /// Shared implementation of `pushCall` overloads. Note that unlike
+ /// `pushCall`, this member is invoked on the environment of the callee, not
+ /// of the caller.
+ void pushCallInternal(const FunctionDecl *FuncDecl,
+ ArrayRef<const Expr *> Args);
+
+ /// Assigns storage locations and values to all global variables, fields
+ /// and functions referenced in `FuncDecl`. `FuncDecl` must have a body.
+ void initFieldsGlobalsAndFuncs(const FunctionDecl *FuncDecl);
+
+ // `DACtx` is not null and not owned by this object.
+ DataflowAnalysisContext *DACtx;
+
+ // FIXME: move the fields `CallStack`, `ReturnVal`, `ReturnLoc` and
+ // `ThisPointeeLoc` into a separate call-context object, shared between
+ // environments in the same call.
+ // https://github.com/llvm/llvm-project/issues/59005
+
+ // `DeclContext` of the block being analysed if provided.
+ std::vector<const DeclContext *> CallStack;
+
+ // Value returned by the function (if it has non-reference return type).
+ Value *ReturnVal = nullptr;
+ // Storage location of the reference returned by the function (if it has
+ // reference return type).
+ StorageLocation *ReturnLoc = nullptr;
+ // The storage location of the `this` pointee. Should only be null if the
+ // function being analyzed is only a function and not a method.
+ RecordStorageLocation *ThisPointeeLoc = nullptr;
+
+ // Maps from declarations and glvalue expression to storage locations that are
+ // assigned to them. Unlike the maps in `DataflowAnalysisContext`, these
+ // include only storage locations that are in scope for a particular basic
+ // block.
+ llvm::DenseMap<const ValueDecl *, StorageLocation *> DeclToLoc;
+ llvm::DenseMap<const Expr *, StorageLocation *> ExprToLoc;
+ // Maps from prvalue expressions and storage locations to the values that
+ // are assigned to them.
+ // We preserve insertion order so that join/widen process values in
+ // deterministic sequence. This in turn produces deterministic SAT formulas.
+ llvm::MapVector<const Expr *, Value *> ExprToVal;
+ llvm::MapVector<const StorageLocation *, Value *> LocToVal;
+
+ Atom FlowConditionToken;
+};
+
+/// Returns the storage location for the implicit object of a
+/// `CXXMemberCallExpr`, or null if none is defined in the environment.
+/// Dereferences the pointer if the member call expression was written using
+/// `->`.
+RecordStorageLocation *getImplicitObjectLocation(const CXXMemberCallExpr &MCE,
+ const Environment &Env);
+
+/// Returns the storage location for the base object of a `MemberExpr`, or null
+/// if none is defined in the environment. Dereferences the pointer if the
+/// member expression was written using `->`.
+RecordStorageLocation *getBaseObjectLocation(const MemberExpr &ME,
+ const Environment &Env);
+
+/// Returns the fields of `RD` that are initialized by an `InitListExpr`, in the
+/// order in which they appear in `InitListExpr::inits()`.
+std::vector<FieldDecl *> getFieldsForInitListExpr(const RecordDecl *RD);
+
+/// Associates a new `RecordValue` with `Loc` and returns the new value.
+RecordValue &refreshRecordValue(RecordStorageLocation &Loc, Environment &Env);
+
+/// Associates a new `RecordValue` with `Expr` and returns the new value.
+RecordValue &refreshRecordValue(const Expr &Expr, Environment &Env);
+
+} // namespace dataflow
+} // namespace clang
+
+#endif // LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_DATAFLOWENVIRONMENT_H
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowLattice.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowLattice.h
new file mode 100644
index 000000000000..0c81e2f078c2
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowLattice.h
@@ -0,0 +1,31 @@
+//===- DataflowLattice.h ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines base types for building lattices to be used in dataflow
+// analyses that run over Control-Flow Graphs (CFGs).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_DATAFLOWLATTICE_H
+#define LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_DATAFLOWLATTICE_H
+
+namespace clang {
+namespace dataflow {
+
+/// Effect indicating whether a lattice join operation resulted in a new value.
+// FIXME: Rename to `LatticeEffect` since `widen` uses it as well, and we are
+// likely removing it from `join`.
+enum class LatticeJoinEffect {
+ Unchanged,
+ Changed,
+};
+
+} // namespace dataflow
+} // namespace clang
+
+#endif // LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_DATAFLOWLATTICE_H
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowValues.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowValues.h
index ab96cd5169a2..2248bcdf3a51 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowValues.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowValues.h
@@ -134,7 +134,7 @@ public:
/// getBlockDataMap - Retrieves the internal map between CFGBlocks and
/// dataflow values. If the dataflow analysis operates in the forward
/// direction, the values correspond to the dataflow values at the start
- /// of the block. Otherwise, for a backward analysis, the values correpsond
+ /// of the block. Otherwise, for a backward analysis, the values correspond
/// to the dataflow values at the end of the block.
BlockDataMapTy& getBlockDataMap() { return BlockDataMap; }
const BlockDataMapTy& getBlockDataMap() const { return BlockDataMap; }
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowWorklist.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowWorklist.h
index 90095735ad3d..f1d05743bf7f 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowWorklist.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DataflowWorklist.h
@@ -12,6 +12,7 @@
#ifndef LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_DATAFLOWWORKLIST_H
#define LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_DATAFLOWWORKLIST_H
+#include "clang/Analysis/Analyses/IntervalPartition.h"
#include "clang/Analysis/Analyses/PostOrderCFGView.h"
#include "clang/Analysis/CFG.h"
#include "llvm/ADT/PriorityQueue.h"
@@ -21,16 +22,13 @@ namespace clang {
/// on the order defined by 'Comp'.
template <typename Comp, unsigned QueueSize> class DataflowWorklistBase {
llvm::BitVector EnqueuedBlocks;
- PostOrderCFGView *POV;
llvm::PriorityQueue<const CFGBlock *,
SmallVector<const CFGBlock *, QueueSize>, Comp>
WorkList;
public:
- DataflowWorklistBase(const CFG &Cfg, PostOrderCFGView *POV, Comp C)
- : EnqueuedBlocks(Cfg.getNumBlockIDs()), POV(POV), WorkList(C) {}
-
- const PostOrderCFGView *getCFGView() const { return POV; }
+ DataflowWorklistBase(const CFG &Cfg, Comp C)
+ : EnqueuedBlocks(Cfg.getNumBlockIDs()), WorkList(C) {}
void enqueueBlock(const CFGBlock *Block) {
if (Block && !EnqueuedBlocks[Block->getBlockID()]) {
@@ -61,11 +59,25 @@ struct ReversePostOrderCompare {
/// the same block multiple times at once.
struct ForwardDataflowWorklist
: DataflowWorklistBase<ReversePostOrderCompare, 20> {
+ ForwardDataflowWorklist(const CFG &Cfg, PostOrderCFGView *POV)
+ : DataflowWorklistBase(Cfg,
+ ReversePostOrderCompare{POV->getComparator()}) {}
+
ForwardDataflowWorklist(const CFG &Cfg, AnalysisDeclContext &Ctx)
- : DataflowWorklistBase(
- Cfg, Ctx.getAnalysis<PostOrderCFGView>(),
- ReversePostOrderCompare{
- Ctx.getAnalysis<PostOrderCFGView>()->getComparator()}) {}
+ : ForwardDataflowWorklist(Cfg, Ctx.getAnalysis<PostOrderCFGView>()) {}
+
+ void enqueueSuccessors(const CFGBlock *Block) {
+ for (auto B : Block->succs())
+ enqueueBlock(B);
+ }
+};
+
+/// A worklist implementation for forward dataflow analysis based on a weak
+/// topological ordering of the nodes. The worklist cannot contain the same
+/// block multiple times at once.
+struct WTODataflowWorklist : DataflowWorklistBase<WTOCompare, 20> {
+ WTODataflowWorklist(const CFG &Cfg, const WTOCompare &Cmp)
+ : DataflowWorklistBase(Cfg, Cmp) {}
void enqueueSuccessors(const CFGBlock *Block) {
for (auto B : Block->succs())
@@ -80,8 +92,7 @@ struct BackwardDataflowWorklist
: DataflowWorklistBase<PostOrderCFGView::BlockOrderCompare, 20> {
BackwardDataflowWorklist(const CFG &Cfg, AnalysisDeclContext &Ctx)
: DataflowWorklistBase(
- Cfg, Ctx.getAnalysis<PostOrderCFGView>(),
- Ctx.getAnalysis<PostOrderCFGView>()->getComparator()) {}
+ Cfg, Ctx.getAnalysis<PostOrderCFGView>()->getComparator()) {}
void enqueuePredecessors(const CFGBlock *Block) {
for (auto B : Block->preds())
@@ -91,4 +102,4 @@ struct BackwardDataflowWorklist
} // namespace clang
-#endif // LLVM_CLANG_ANALYSIS_ANALYSES_CONSUMED_H
+#endif // LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_DATAFLOWWORKLIST_H
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DebugSupport.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DebugSupport.h
new file mode 100644
index 000000000000..6b9f3681490a
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/DebugSupport.h
@@ -0,0 +1,36 @@
+//===-- DebugSupport.h ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines functions which generate more readable forms of data
+// structures used in the dataflow analyses, for debugging purposes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_DEBUGSUPPORT_H_
+#define LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_DEBUGSUPPORT_H_
+
+#include <string>
+#include <vector>
+
+#include "clang/Analysis/FlowSensitive/Solver.h"
+#include "clang/Analysis/FlowSensitive/Value.h"
+#include "llvm/ADT/StringRef.h"
+
+namespace clang {
+namespace dataflow {
+
+/// Returns a string representation of a value kind.
+llvm::StringRef debugString(Value::Kind Kind);
+
+/// Returns a string representation of the result status of a SAT check.
+llvm::StringRef debugString(Solver::Result::Status Status);
+
+} // namespace dataflow
+} // namespace clang
+
+#endif // LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_DEBUGSUPPORT_H_
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Formula.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Formula.h
new file mode 100644
index 000000000000..0e6352403a83
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Formula.h
@@ -0,0 +1,147 @@
+//===- Formula.h - Boolean formulas -----------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_FORMULA_H
+#define LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_FORMULA_H
+
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <string>
+
+namespace clang::dataflow {
+
+/// Identifies an atomic boolean variable such as "V1".
+///
+/// This often represents an assertion that is interesting to the analysis but
+/// cannot immediately be proven true or false. For example:
+/// - V1 may mean "the program reaches this point",
+/// - V2 may mean "the parameter was null"
+///
+/// We can use these variables in formulas to describe relationships we know
+/// to be true: "if the parameter was null, the program reaches this point".
+/// We also express hypotheses as formulas, and use a SAT solver to check
+/// whether they are consistent with the known facts.
+enum class Atom : unsigned {};
+
+/// A boolean expression such as "true" or "V1 & !V2".
+/// Expressions may refer to boolean atomic variables. These should take a
+/// consistent true/false value across the set of formulas being considered.
+///
+/// (Formulas are always expressions in terms of boolean variables rather than
+/// e.g. integers because our underlying model is SAT rather than e.g. SMT).
+///
+/// Simple formulas such as "true" and "V1" are self-contained.
+/// Compound formulas connect other formulas, e.g. "(V1 & V2) || V3" is an 'or'
+/// formula, with pointers to its operands "(V1 & V2)" and "V3" stored as
+/// trailing objects.
+/// For this reason, Formulas are Arena-allocated and over-aligned.
+class Formula;
+class alignas(const Formula *) Formula {
+public:
+ enum Kind : unsigned {
+ /// A reference to an atomic boolean variable.
+ /// We name these e.g. "V3", where 3 == atom identity == Value.
+ AtomRef,
+ /// Constant true or false.
+ Literal,
+
+ Not, /// True if its only operand is false
+
+ // These kinds connect two operands LHS and RHS
+ And, /// True if LHS and RHS are both true
+ Or, /// True if either LHS or RHS is true
+ Implies, /// True if LHS is false or RHS is true
+ Equal, /// True if LHS and RHS have the same truth value
+ };
+ Kind kind() const { return FormulaKind; }
+
+ Atom getAtom() const {
+ assert(kind() == AtomRef);
+ return static_cast<Atom>(Value);
+ }
+
+ bool literal() const {
+ assert(kind() == Literal);
+ return static_cast<bool>(Value);
+ }
+
+ bool isLiteral(bool b) const {
+ return kind() == Literal && static_cast<bool>(Value) == b;
+ }
+
+ ArrayRef<const Formula *> operands() const {
+ return ArrayRef(reinterpret_cast<Formula *const *>(this + 1),
+ numOperands(kind()));
+ }
+
+ using AtomNames = llvm::DenseMap<Atom, std::string>;
+ // Produce a stable human-readable representation of this formula.
+ // For example: (V3 | !(V1 & V2))
+ // If AtomNames is provided, these override the default V0, V1... names.
+ void print(llvm::raw_ostream &OS, const AtomNames * = nullptr) const;
+
+ // Allocate Formulas using Arena rather than calling this function directly.
+ static const Formula &create(llvm::BumpPtrAllocator &Alloc, Kind K,
+ ArrayRef<const Formula *> Operands,
+ unsigned Value = 0);
+
+private:
+ Formula() = default;
+ Formula(const Formula &) = delete;
+ Formula &operator=(const Formula &) = delete;
+
+ static unsigned numOperands(Kind K) {
+ switch (K) {
+ case AtomRef:
+ case Literal:
+ return 0;
+ case Not:
+ return 1;
+ case And:
+ case Or:
+ case Implies:
+ case Equal:
+ return 2;
+ }
+ llvm_unreachable("Unhandled Formula::Kind enum");
+ }
+
+ Kind FormulaKind;
+ // Some kinds of formula have scalar values, e.g. AtomRef's atom number.
+ unsigned Value;
+};
+
+// The default names of atoms are V0, V1 etc in order of creation.
+inline llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, Atom A) {
+ return OS << 'V' << static_cast<unsigned>(A);
+}
+inline llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const Formula &F) {
+ F.print(OS);
+ return OS;
+}
+
+} // namespace clang::dataflow
+namespace llvm {
+template <> struct DenseMapInfo<clang::dataflow::Atom> {
+ using Atom = clang::dataflow::Atom;
+ using Underlying = std::underlying_type_t<Atom>;
+
+ static inline Atom getEmptyKey() { return Atom(Underlying(-1)); }
+ static inline Atom getTombstoneKey() { return Atom(Underlying(-2)); }
+ static unsigned getHashValue(const Atom &Val) {
+ return DenseMapInfo<Underlying>::getHashValue(Underlying(Val));
+ }
+ static bool isEqual(const Atom &LHS, const Atom &RHS) { return LHS == RHS; }
+};
+} // namespace llvm
+#endif
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Logger.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Logger.h
new file mode 100644
index 000000000000..f4bd39f6ed49
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Logger.h
@@ -0,0 +1,91 @@
+//===-- Logger.h ------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_LOGGER_H
+#define LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_LOGGER_H
+
+#include "clang/Analysis/CFG.h"
+#include "llvm/Support/raw_ostream.h"
+#include <memory>
+
+namespace clang::dataflow {
+// Forward declarations so we can use Logger anywhere in the framework.
+class ControlFlowContext;
+class TypeErasedDataflowAnalysis;
+struct TypeErasedDataflowAnalysisState;
+
+/// A logger is notified as the analysis progresses.
+/// It can produce a report of the analysis's findings and how it came to them.
+///
+/// The framework reports key structural events (e.g. traversal of blocks).
+/// The specific analysis can add extra details to be presented in context.
+class Logger {
+public:
+ /// Returns a dummy logger that does nothing.
+ static Logger &null();
+ /// A logger that simply writes messages to the specified ostream in real
+ /// time.
+ static std::unique_ptr<Logger> textual(llvm::raw_ostream &);
+ /// A logger that builds an HTML UI to inspect the analysis results.
+ /// Each function's analysis is written to a stream obtained from the factory.
+ static std::unique_ptr<Logger>
+ html(std::function<std::unique_ptr<llvm::raw_ostream>()>);
+
+ virtual ~Logger() = default;
+
+ /// Called by the framework as we start analyzing a new function or statement.
+ /// Forms a pair with endAnalysis().
+ virtual void beginAnalysis(const ControlFlowContext &,
+ TypeErasedDataflowAnalysis &) {}
+ virtual void endAnalysis() {}
+
+ // At any time during the analysis, we're computing the state for some target
+ // program point.
+
+ /// Called when we start (re-)processing a block in the CFG.
+ /// The target program point is the entry to the specified block.
+ /// Calls to log() describe transferBranch(), join() etc.
+ /// `PostVisit` specifies whether we're processing the block for the
+ /// post-visit callback.
+ virtual void enterBlock(const CFGBlock &, bool PostVisit) {}
+ /// Called when we start processing an element in the current CFG block.
+ /// The target program point is after the specified element.
+ /// Calls to log() describe the transfer() function.
+ virtual void enterElement(const CFGElement &) {}
+
+ /// Records the analysis state computed for the current program point.
+ virtual void recordState(TypeErasedDataflowAnalysisState &) {}
+ /// Records that the analysis state for the current block is now final.
+ virtual void blockConverged() {}
+
+ /// Called by the framework or user code to report some event.
+ /// The event is associated with the current context (program point).
+ /// The Emit function produces the log message. It may or may not be called,
+ /// depending on if the logger is interested; it should have no side effects.
+ void log(llvm::function_ref<void(llvm::raw_ostream &)> Emit) {
+ if (!ShouldLogText)
+ return;
+ std::string S;
+ llvm::raw_string_ostream OS(S);
+ Emit(OS);
+ logText(S);
+ }
+
+protected:
+ /// ShouldLogText should be false for trivial loggers that ignore logText().
+ /// This allows log() to skip evaluating its Emit function.
+ Logger(bool ShouldLogText = true) : ShouldLogText(ShouldLogText) {}
+
+private:
+ bool ShouldLogText;
+ virtual void logText(llvm::StringRef) {}
+};
+
+} // namespace clang::dataflow
+
+#endif
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/MapLattice.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/MapLattice.h
new file mode 100644
index 000000000000..16b0c978779a
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/MapLattice.h
@@ -0,0 +1,143 @@
+//===------------------------ MapLattice.h ----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a parameterized lattice that maps keys to individual
+// lattice elements (of the parameter lattice type). A typical usage is lifting
+// a particular lattice to all variables in a lexical scope.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_FLOWSENSITIVE__MAPLATTICE_H
+#define LLVM_CLANG_ANALYSIS_FLOWSENSITIVE__MAPLATTICE_H
+
+#include <ostream>
+#include <string>
+#include <utility>
+
+#include "DataflowAnalysis.h"
+#include "clang/AST/Decl.h"
+#include "clang/Analysis/FlowSensitive/DataflowLattice.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringRef.h"
+
+namespace clang {
+namespace dataflow {
+
+/// A lattice that maps keys to individual lattice elements. When instantiated
+/// with an `ElementLattice` that is a bounded semi-lattice, `MapLattice` is
+/// itself a bounded semi-lattice, so long as the user limits themselves to a
+/// finite number of keys. In that case, `top` is (implicitly), the map
+/// containing all valid keys mapped to `top` of `ElementLattice`.
+///
+/// Requirements on `ElementLattice`:
+/// * Provides standard declarations of a bounded semi-lattice.
+template <typename Key, typename ElementLattice> class MapLattice {
+ using Container = llvm::DenseMap<Key, ElementLattice>;
+ Container C;
+
+public:
+ using key_type = Key;
+ using mapped_type = ElementLattice;
+ using value_type = typename Container::value_type;
+ using iterator = typename Container::iterator;
+ using const_iterator = typename Container::const_iterator;
+
+ MapLattice() = default;
+
+ explicit MapLattice(Container C) { C = std::move(C); }
+
+ // The `bottom` element is the empty map.
+ static MapLattice bottom() { return MapLattice(); }
+
+ std::pair<iterator, bool>
+ insert(const std::pair<const key_type, mapped_type> &P) {
+ return C.insert(P);
+ }
+
+ std::pair<iterator, bool> insert(std::pair<const key_type, mapped_type> &&P) {
+ return C.insert(std::move(P));
+ }
+
+ unsigned size() const { return C.size(); }
+ bool empty() const { return C.empty(); }
+
+ iterator begin() { return C.begin(); }
+ iterator end() { return C.end(); }
+ const_iterator begin() const { return C.begin(); }
+ const_iterator end() const { return C.end(); }
+
+ // Equality is direct equality of underlying map entries. One implication of
+ // this definition is that a map with (only) keys that map to bottom is not
+ // equal to the empty map.
+ friend bool operator==(const MapLattice &LHS, const MapLattice &RHS) {
+ return LHS.C == RHS.C;
+ }
+
+ friend bool operator!=(const MapLattice &LHS, const MapLattice &RHS) {
+ return !(LHS == RHS);
+ }
+
+ bool contains(const key_type &K) const { return C.find(K) != C.end(); }
+
+ iterator find(const key_type &K) { return C.find(K); }
+ const_iterator find(const key_type &K) const { return C.find(K); }
+
+ mapped_type &operator[](const key_type &K) { return C[K]; }
+
+ /// If an entry exists in one map but not the other, the missing entry is
+ /// treated as implicitly mapping to `bottom`. So, the joined map contains the
+ /// entry as it was in the source map.
+ LatticeJoinEffect join(const MapLattice &Other) {
+ LatticeJoinEffect Effect = LatticeJoinEffect::Unchanged;
+ for (const auto &O : Other.C) {
+ auto It = C.find(O.first);
+ if (It == C.end()) {
+ C.insert(O);
+ Effect = LatticeJoinEffect::Changed;
+ } else if (It->second.join(O.second) == LatticeJoinEffect::Changed)
+ Effect = LatticeJoinEffect::Changed;
+ }
+ return Effect;
+ }
+};
+
+/// Convenience alias that captures the common use of map lattices to model
+/// in-scope variables.
+template <typename ElementLattice>
+using VarMapLattice = MapLattice<const clang::VarDecl *, ElementLattice>;
+
+template <typename Key, typename ElementLattice>
+std::ostream &
+operator<<(std::ostream &Os,
+ const clang::dataflow::MapLattice<Key, ElementLattice> &M) {
+ std::string Separator;
+ Os << "{";
+ for (const auto &E : M) {
+ Os << std::exchange(Separator, ", ") << E.first << " => " << E.second;
+ }
+ Os << "}";
+ return Os;
+}
+
+template <typename ElementLattice>
+std::ostream &
+operator<<(std::ostream &Os,
+ const clang::dataflow::VarMapLattice<ElementLattice> &M) {
+ std::string Separator;
+ Os << "{";
+ for (const auto &E : M) {
+ Os << std::exchange(Separator, ", ") << E.first->getName().str() << " => "
+ << E.second;
+ }
+ Os << "}";
+ return Os;
+}
+} // namespace dataflow
+} // namespace clang
+
+#endif // LLVM_CLANG_ANALYSIS_FLOWSENSITIVE__MAPLATTICE_H
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/MatchSwitch.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/MatchSwitch.h
new file mode 100644
index 000000000000..085308f7db54
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/MatchSwitch.h
@@ -0,0 +1,174 @@
+//===---- MatchSwitch.h -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the `ASTMatchSwitch` abstraction for building a "switch"
+// statement, where each case of the switch is defined by an AST matcher. The
+// cases are considered in order, like pattern matching in functional
+// languages.
+//
+// Currently, the design is catered towards simplifying the implementation of
+// `DataflowAnalysis` transfer functions. Based on experience here, this
+// library may be generalized and moved to ASTMatchers.
+//
+//===----------------------------------------------------------------------===//
+//
+// FIXME: Rename to ASTMatchSwitch.h
+
+#ifndef LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_MATCHSWITCH_H_
+#define LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_MATCHSWITCH_H_
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Stmt.h"
+#include "clang/ASTMatchers/ASTMatchFinder.h"
+#include "clang/ASTMatchers/ASTMatchers.h"
+#include "clang/Analysis/FlowSensitive/DataflowEnvironment.h"
+#include "llvm/ADT/StringRef.h"
+#include <functional>
+#include <string>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+namespace clang {
+namespace dataflow {
+
+/// A common form of state shared between the cases of a transfer function.
+template <typename LatticeT> struct TransferState {
+ TransferState(LatticeT &Lattice, Environment &Env)
+ : Lattice(Lattice), Env(Env) {}
+
+ /// Current lattice element.
+ LatticeT &Lattice;
+ Environment &Env;
+};
+
+/// A read-only version of TransferState.
+///
+/// FIXME: this type is being used as a general (typed) view type for untyped
+/// dataflow analysis state, rather than strictly for transfer-function
+/// purposes. Move it (and rename it) to DataflowAnalysis.h.
+template <typename LatticeT> struct TransferStateForDiagnostics {
+ TransferStateForDiagnostics(const LatticeT &Lattice, const Environment &Env)
+ : Lattice(Lattice), Env(Env) {}
+
+ /// Current lattice element.
+ const LatticeT &Lattice;
+ const Environment &Env;
+};
+
+template <typename T>
+using MatchSwitchMatcher = ast_matchers::internal::Matcher<T>;
+
+template <typename T, typename State, typename Result = void>
+using MatchSwitchAction = std::function<Result(
+ const T *, const ast_matchers::MatchFinder::MatchResult &, State &)>;
+
+template <typename BaseT, typename State, typename Result = void>
+using ASTMatchSwitch =
+ std::function<Result(const BaseT &, ASTContext &, State &)>;
+
+/// Collects cases of a "match switch": a collection of matchers paired with
+/// callbacks, which together define a switch that can be applied to a node
+/// whose type derives from `BaseT`. This structure can simplify the definition
+/// of `transfer` functions that rely on pattern-matching.
+///
+/// For example, consider an analysis that handles particular function calls. It
+/// can define the `ASTMatchSwitch` once, in the constructor of the analysis,
+/// and then reuse it each time that `transfer` is called, with a fresh state
+/// value.
+///
+/// \code
+/// ASTMatchSwitch<Stmt, TransferState<MyLattice> BuildSwitch() {
+/// return ASTMatchSwitchBuilder<TransferState<MyLattice>>()
+/// .CaseOf(callExpr(callee(functionDecl(hasName("foo")))), TransferFooCall)
+/// .CaseOf(callExpr(argumentCountIs(2),
+/// callee(functionDecl(hasName("bar")))),
+/// TransferBarCall)
+/// .Build();
+/// }
+/// \endcode
+template <typename BaseT, typename State, typename Result = void>
+class ASTMatchSwitchBuilder {
+public:
+ /// Registers an action that will be triggered by the match of a pattern
+ /// against the input statement.
+ ///
+ /// Requirements:
+ ///
+ /// `NodeT` should be derived from `BaseT`.
+ template <typename NodeT>
+ ASTMatchSwitchBuilder &&CaseOf(MatchSwitchMatcher<BaseT> M,
+ MatchSwitchAction<NodeT, State, Result> A) && {
+ static_assert(std::is_base_of<BaseT, NodeT>::value,
+ "NodeT must be derived from BaseT.");
+ Matchers.push_back(std::move(M));
+ Actions.push_back(
+ [A = std::move(A)](const BaseT *Node,
+ const ast_matchers::MatchFinder::MatchResult &R,
+ State &S) { return A(cast<NodeT>(Node), R, S); });
+ return std::move(*this);
+ }
+
+ ASTMatchSwitch<BaseT, State, Result> Build() && {
+ return [Matcher = BuildMatcher(), Actions = std::move(Actions)](
+ const BaseT &Node, ASTContext &Context, State &S) -> Result {
+ auto Results = ast_matchers::matchDynamic(Matcher, Node, Context);
+ if (Results.empty()) {
+ return Result();
+ }
+ // Look through the map for the first binding of the form "TagN..." use
+ // that to select the action.
+ for (const auto &Element : Results[0].getMap()) {
+ llvm::StringRef ID(Element.first);
+ size_t Index = 0;
+ if (ID.consume_front("Tag") && !ID.getAsInteger(10, Index) &&
+ Index < Actions.size()) {
+ return Actions[Index](
+ &Node,
+ ast_matchers::MatchFinder::MatchResult(Results[0], &Context), S);
+ }
+ }
+ return Result();
+ };
+ }
+
+private:
+ ast_matchers::internal::DynTypedMatcher BuildMatcher() {
+ using ast_matchers::anything;
+ using ast_matchers::stmt;
+ using ast_matchers::unless;
+ using ast_matchers::internal::DynTypedMatcher;
+ if (Matchers.empty())
+ return stmt(unless(anything()));
+ for (int I = 0, N = Matchers.size(); I < N; ++I) {
+ std::string Tag = ("Tag" + llvm::Twine(I)).str();
+ // Many matchers are not bindable, so ensure that tryBind will work.
+ Matchers[I].setAllowBind(true);
+ auto M = *Matchers[I].tryBind(Tag);
+ // Each anyOf explicitly controls the traversal kind. The anyOf itself is
+ // set to `TK_AsIs` to ensure no nodes are skipped, thereby deferring to
+ // the kind of the branches. Then, each branch is either left as is, if
+ // the kind is already set, or explicitly set to `TK_AsIs`. We choose this
+ // setting because it is the default interpretation of matchers.
+ Matchers[I] =
+ !M.getTraversalKind() ? M.withTraversalKind(TK_AsIs) : std::move(M);
+ }
+ // The matcher type on the cases ensures that `Expr` kind is compatible with
+ // all of the matchers.
+ return DynTypedMatcher::constructVariadic(
+ DynTypedMatcher::VO_AnyOf, ASTNodeKind::getFromNodeKind<BaseT>(),
+ std::move(Matchers));
+ }
+
+ std::vector<ast_matchers::internal::DynTypedMatcher> Matchers;
+ std::vector<MatchSwitchAction<BaseT, State, Result>> Actions;
+};
+
+} // namespace dataflow
+} // namespace clang
+#endif // LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_MATCHSWITCH_H_
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Models/ChromiumCheckModel.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Models/ChromiumCheckModel.h
new file mode 100644
index 000000000000..b4315e41d79f
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Models/ChromiumCheckModel.h
@@ -0,0 +1,38 @@
+//===-- ChromiumCheckModel.h ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a dataflow model for Chromium's family of CHECK functions.
+//
+//===----------------------------------------------------------------------===//
+#ifndef CLANG_ANALYSIS_FLOWSENSITIVE_MODELS_CHROMIUMCHECKMODEL_H
+#define CLANG_ANALYSIS_FLOWSENSITIVE_MODELS_CHROMIUMCHECKMODEL_H
+
+#include "clang/AST/DeclCXX.h"
+#include "clang/Analysis/FlowSensitive/DataflowAnalysis.h"
+#include "clang/Analysis/FlowSensitive/DataflowEnvironment.h"
+#include "llvm/ADT/DenseSet.h"
+
+namespace clang {
+namespace dataflow {
+
+/// Models the behavior of Chromium's CHECK, DCHECK, etc. macros, so that code
+/// after a call to `*CHECK` can rely on the condition being true.
+class ChromiumCheckModel : public DataflowModel {
+public:
+ ChromiumCheckModel() = default;
+ bool transfer(const CFGElement &Element, Environment &Env) override;
+
+private:
+ /// Declarations for `::logging::CheckError::.*Check`, lazily initialized.
+ llvm::SmallDenseSet<const CXXMethodDecl *> CheckDecls;
+};
+
+} // namespace dataflow
+} // namespace clang
+
+#endif // CLANG_ANALYSIS_FLOWSENSITIVE_MODELS_CHROMIUMCHECKMODEL_H
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.h
new file mode 100644
index 000000000000..09eb8b938226
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.h
@@ -0,0 +1,80 @@
+//===-- UncheckedOptionalAccessModel.h --------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a dataflow analysis that detects unsafe uses of optional
+// values.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_ANALYSIS_FLOWSENSITIVE_MODELS_UNCHECKEDOPTIONALACCESSMODEL_H
+#define CLANG_ANALYSIS_FLOWSENSITIVE_MODELS_UNCHECKEDOPTIONALACCESSMODEL_H
+
+#include "clang/AST/ASTContext.h"
+#include "clang/Analysis/CFG.h"
+#include "clang/Analysis/FlowSensitive/CFGMatchSwitch.h"
+#include "clang/Analysis/FlowSensitive/DataflowAnalysis.h"
+#include "clang/Analysis/FlowSensitive/DataflowEnvironment.h"
+#include "clang/Analysis/FlowSensitive/NoopLattice.h"
+#include "clang/Basic/SourceLocation.h"
+#include "llvm/ADT/SmallVector.h"
+
+namespace clang {
+namespace dataflow {
+
+// FIXME: Explore using an allowlist-approach, where constructs supported by the
+// analysis are always enabled and additional constructs are enabled through the
+// `Options`.
+struct UncheckedOptionalAccessModelOptions {
+ /// In generating diagnostics, ignore optionals reachable through overloaded
+ /// `operator*` or `operator->` (other than those of the optional type
+ /// itself). The analysis does not equate the results of such calls, so it
+ /// can't identify when their results are used safely (across calls),
+ /// resulting in false positives in all such cases. Note: this option does not
+ /// cover access through `operator[]`.
+ bool IgnoreSmartPointerDereference = false;
+};
+
+/// Dataflow analysis that models whether optionals hold values or not.
+///
+/// Models the `std::optional`, `absl::optional`, and `base::Optional` types.
+class UncheckedOptionalAccessModel
+ : public DataflowAnalysis<UncheckedOptionalAccessModel, NoopLattice> {
+public:
+ UncheckedOptionalAccessModel(ASTContext &Ctx, dataflow::Environment &Env);
+
+ /// Returns a matcher for the optional classes covered by this model.
+ static ast_matchers::DeclarationMatcher optionalClassDecl();
+
+ static NoopLattice initialElement() { return {}; }
+
+ void transfer(const CFGElement &Elt, NoopLattice &L, Environment &Env);
+
+private:
+ CFGMatchSwitch<TransferState<NoopLattice>> TransferMatchSwitch;
+};
+
+class UncheckedOptionalAccessDiagnoser {
+public:
+ UncheckedOptionalAccessDiagnoser(
+ UncheckedOptionalAccessModelOptions Options = {});
+
+ llvm::SmallVector<SourceLocation>
+ operator()(const CFGElement &Elt, ASTContext &Ctx,
+ const TransferStateForDiagnostics<NoopLattice> &State) {
+ return DiagnoseMatchSwitch(Elt, Ctx, State.Env);
+ }
+
+private:
+ CFGMatchSwitch<const Environment, llvm::SmallVector<SourceLocation>>
+ DiagnoseMatchSwitch;
+};
+
+} // namespace dataflow
+} // namespace clang
+
+#endif // CLANG_ANALYSIS_FLOWSENSITIVE_MODELS_UNCHECKEDOPTIONALACCESSMODEL_H
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/NoopAnalysis.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/NoopAnalysis.h
new file mode 100644
index 000000000000..393f68300cb8
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/NoopAnalysis.h
@@ -0,0 +1,41 @@
+//===-- NoopAnalysis.h ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a NoopAnalysis class that just uses the builtin transfer.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_NOOPANALYSIS_H
+#define LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_NOOPANALYSIS_H
+
+#include "clang/AST/ASTContext.h"
+#include "clang/Analysis/CFG.h"
+#include "clang/Analysis/FlowSensitive/DataflowAnalysis.h"
+#include "clang/Analysis/FlowSensitive/DataflowEnvironment.h"
+#include "clang/Analysis/FlowSensitive/NoopLattice.h"
+
+namespace clang {
+namespace dataflow {
+
+class NoopAnalysis : public DataflowAnalysis<NoopAnalysis, NoopLattice> {
+public:
+ NoopAnalysis(ASTContext &Context)
+ : DataflowAnalysis<NoopAnalysis, NoopLattice>(Context) {}
+
+ NoopAnalysis(ASTContext &Context, DataflowAnalysisOptions Options)
+ : DataflowAnalysis<NoopAnalysis, NoopLattice>(Context, Options) {}
+
+ static NoopLattice initialElement() { return {}; }
+
+ void transfer(const CFGElement &E, NoopLattice &L, Environment &Env) {}
+};
+
+} // namespace dataflow
+} // namespace clang
+
+#endif // LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_NOOPANALYSIS_H
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/NoopLattice.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/NoopLattice.h
new file mode 100644
index 000000000000..019219328111
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/NoopLattice.h
@@ -0,0 +1,41 @@
+//===-- NoopLattice.h -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the lattice with exactly one element.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_NOOP_LATTICE_H
+#define LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_NOOP_LATTICE_H
+
+#include "clang/Analysis/FlowSensitive/DataflowLattice.h"
+#include <ostream>
+
+namespace clang {
+namespace dataflow {
+
+/// Trivial lattice for dataflow analysis with exactly one element.
+///
+/// Useful for analyses that only need the Environment and nothing more.
+class NoopLattice {
+public:
+ bool operator==(const NoopLattice &Other) const { return true; }
+
+ LatticeJoinEffect join(const NoopLattice &Other) {
+ return LatticeJoinEffect::Unchanged;
+ }
+};
+
+inline std::ostream &operator<<(std::ostream &OS, const NoopLattice &) {
+ return OS << "noop";
+}
+
+} // namespace dataflow
+} // namespace clang
+
+#endif // LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_NOOP_LATTICE_H
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/RecordOps.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/RecordOps.h
new file mode 100644
index 000000000000..783e53e980aa
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/RecordOps.h
@@ -0,0 +1,68 @@
+//===-- RecordOps.h ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Operations on records (structs, classes, and unions).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_RECORDOPS_H
+#define LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_RECORDOPS_H
+
+#include "clang/Analysis/FlowSensitive/DataflowEnvironment.h"
+#include "clang/Analysis/FlowSensitive/StorageLocation.h"
+
+namespace clang {
+namespace dataflow {
+
+/// Copies a record (struct, class, or union) from `Src` to `Dst`.
+///
+/// This performs a deep copy, i.e. it copies every field (including synthetic
+/// fields) and recurses on fields of record type.
+///
+/// If there is a `RecordValue` associated with `Dst` in the environment, this
+/// function creates a new `RecordValue` and associates it with `Dst`; clients
+/// need to be aware of this and must not assume that the `RecordValue`
+/// associated with `Dst` remains the same after the call.
+///
+/// Requirements:
+///
+/// `Src` and `Dst` must have the same canonical unqualified type.
+void copyRecord(RecordStorageLocation &Src, RecordStorageLocation &Dst,
+ Environment &Env);
+
+/// Returns whether the records `Loc1` and `Loc2` are equal.
+///
+/// Values for `Loc1` are retrieved from `Env1`, and values for `Loc2` are
+/// retrieved from `Env2`. A convenience overload retrieves values for `Loc1`
+/// and `Loc2` from the same environment.
+///
+/// This performs a deep comparison, i.e. it compares every field (including
+/// synthetic fields) and recurses on fields of record type. Fields of reference
+/// type compare equal if they refer to the same storage location.
+///
+/// Note on how to interpret the result:
+/// - If this returns true, the records are guaranteed to be equal at runtime.
+/// - If this returns false, the records may still be equal at runtime; our
+/// analysis merely cannot guarantee that they will be equal.
+///
+/// Requirements:
+///
+/// `Src` and `Dst` must have the same canonical unqualified type.
+bool recordsEqual(const RecordStorageLocation &Loc1, const Environment &Env1,
+ const RecordStorageLocation &Loc2, const Environment &Env2);
+
+inline bool recordsEqual(const RecordStorageLocation &Loc1,
+ const RecordStorageLocation &Loc2,
+ const Environment &Env) {
+ return recordsEqual(Loc1, Env, Loc2, Env);
+}
+
+} // namespace dataflow
+} // namespace clang
+
+#endif // LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_RECORDOPS_H
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/SimplifyConstraints.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/SimplifyConstraints.h
new file mode 100644
index 000000000000..fadb3caf0a4c
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/SimplifyConstraints.h
@@ -0,0 +1,49 @@
+//===-- SimplifyConstraints.h -----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_SIMPLIFYCONSTRAINTS_H
+#define LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_SIMPLIFYCONSTRAINTS_H
+
+#include "clang/Analysis/FlowSensitive/Arena.h"
+#include "clang/Analysis/FlowSensitive/Formula.h"
+#include "llvm/ADT/SetVector.h"
+
+namespace clang {
+namespace dataflow {
+
+/// Information on the way a set of constraints was simplified.
+struct SimplifyConstraintsInfo {
+ /// List of equivalence classes of atoms. For each equivalence class, the
+ /// original constraints imply that all atoms in it must be equivalent.
+ /// Simplification replaces all occurrences of atoms in an equivalence class
+ /// with a single representative atom from the class.
+ /// Does not contain equivalence classes with just one member or atoms
+ /// contained in `TrueAtoms` or `FalseAtoms`.
+ llvm::SmallVector<llvm::SmallVector<Atom>> EquivalentAtoms;
+ /// Atoms that the original constraints imply must be true.
+ /// Simplification replaces all occurrences of these atoms by a true literal
+ /// (which may enable additional simplifications).
+ llvm::SmallVector<Atom> TrueAtoms;
+ /// Atoms that the original constraints imply must be false.
+ /// Simplification replaces all occurrences of these atoms by a false literal
+ /// (which may enable additional simplifications).
+ llvm::SmallVector<Atom> FalseAtoms;
+};
+
+/// Simplifies a set of constraints (implicitly connected by "and") in a way
+/// that does not change satisfiability of the constraints. This does _not_ mean
+/// that the set of solutions is the same before and after simplification.
+/// `Info`, if non-null, will be populated with information about the
+/// simplifications that were made to the formula (e.g. to display to the user).
+void simplifyConstraints(llvm::SetVector<const Formula *> &Constraints,
+ Arena &arena, SimplifyConstraintsInfo *Info = nullptr);
+
+} // namespace dataflow
+} // namespace clang
+
+#endif // LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_SIMPLIFYCONSTRAINTS_H
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Solver.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Solver.h
new file mode 100644
index 000000000000..079f6802f241
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Solver.h
@@ -0,0 +1,98 @@
+//===- Solver.h -------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines an interface for a SAT solver that can be used by
+// dataflow analyses.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_SOLVER_H
+#define LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_SOLVER_H
+
+#include "clang/Analysis/FlowSensitive/Formula.h"
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include <optional>
+#include <vector>
+
+namespace clang {
+namespace dataflow {
+
+/// An interface for a SAT solver that can be used by dataflow analyses.
+class Solver {
+public:
+ struct Result {
+ enum class Status {
+ /// Indicates that there exists a satisfying assignment for a boolean
+ /// formula.
+ Satisfiable,
+
+ /// Indicates that there is no satisfying assignment for a boolean
+ /// formula.
+ Unsatisfiable,
+
+ /// Indicates that the solver gave up trying to find a satisfying
+ /// assignment for a boolean formula.
+ TimedOut,
+ };
+
+ /// A boolean value is set to true or false in a truth assignment.
+ enum class Assignment : uint8_t { AssignedFalse = 0, AssignedTrue = 1 };
+
+ /// Constructs a result indicating that the queried boolean formula is
+ /// satisfiable. The result will hold a solution found by the solver.
+ static Result Satisfiable(llvm::DenseMap<Atom, Assignment> Solution) {
+ return Result(Status::Satisfiable, std::move(Solution));
+ }
+
+ /// Constructs a result indicating that the queried boolean formula is
+ /// unsatisfiable.
+ static Result Unsatisfiable() { return Result(Status::Unsatisfiable, {}); }
+
+ /// Constructs a result indicating that satisfiability checking on the
+ /// queried boolean formula was not completed.
+ static Result TimedOut() { return Result(Status::TimedOut, {}); }
+
+ /// Returns the status of satisfiability checking on the queried boolean
+ /// formula.
+ Status getStatus() const { return SATCheckStatus; }
+
+ /// Returns a truth assignment to boolean values that satisfies the queried
+ /// boolean formula if available. Otherwise, an empty optional is returned.
+ std::optional<llvm::DenseMap<Atom, Assignment>> getSolution() const {
+ return Solution;
+ }
+
+ private:
+ Result(Status SATCheckStatus,
+ std::optional<llvm::DenseMap<Atom, Assignment>> Solution)
+ : SATCheckStatus(SATCheckStatus), Solution(std::move(Solution)) {}
+
+ Status SATCheckStatus;
+ std::optional<llvm::DenseMap<Atom, Assignment>> Solution;
+ };
+
+ virtual ~Solver() = default;
+
+ /// Checks if the conjunction of `Vals` is satisfiable and returns the
+ /// corresponding result.
+ ///
+ /// Requirements:
+ ///
+ /// All elements in `Vals` must not be null.
+ virtual Result solve(llvm::ArrayRef<const Formula *> Vals) = 0;
+};
+
+llvm::raw_ostream &operator<<(llvm::raw_ostream &, const Solver::Result &);
+llvm::raw_ostream &operator<<(llvm::raw_ostream &, Solver::Result::Assignment);
+
+} // namespace dataflow
+} // namespace clang
+
+#endif // LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_SOLVER_H
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/StorageLocation.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/StorageLocation.h
new file mode 100644
index 000000000000..8fcc6a44027a
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/StorageLocation.h
@@ -0,0 +1,181 @@
+//===-- StorageLocation.h ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines classes that represent elements of the local variable store
+// and of the heap during dataflow analysis.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_STORAGELOCATION_H
+#define LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_STORAGELOCATION_H
+
+#include "clang/AST/Decl.h"
+#include "clang/AST/Type.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/Support/Debug.h"
+#include <cassert>
+
+#define DEBUG_TYPE "dataflow"
+
+namespace clang {
+namespace dataflow {
+
+/// Base class for elements of the local variable store and of the heap.
+///
+/// Each storage location holds a value. The mapping from storage locations to
+/// values is stored in the environment.
+class StorageLocation {
+public:
+ enum class Kind {
+ Scalar,
+ Record,
+ };
+
+ StorageLocation(Kind LocKind, QualType Type) : LocKind(LocKind), Type(Type) {
+ assert(Type.isNull() || !Type->isReferenceType());
+ }
+
+ // Non-copyable because addresses of storage locations are used as their
+ // identities throughout framework and user code. The framework is responsible
+ // for construction and destruction of storage locations.
+ StorageLocation(const StorageLocation &) = delete;
+ StorageLocation &operator=(const StorageLocation &) = delete;
+
+ virtual ~StorageLocation() = default;
+
+ Kind getKind() const { return LocKind; }
+
+ QualType getType() const { return Type; }
+
+private:
+ Kind LocKind;
+ QualType Type;
+};
+
+/// A storage location that is not subdivided further for the purposes of
+/// abstract interpretation. For example: `int`, `int*`, `int&`.
+class ScalarStorageLocation final : public StorageLocation {
+public:
+ explicit ScalarStorageLocation(QualType Type)
+ : StorageLocation(Kind::Scalar, Type) {}
+
+ static bool classof(const StorageLocation *Loc) {
+ return Loc->getKind() == Kind::Scalar;
+ }
+};
+
+/// A storage location for a record (struct, class, or union).
+///
+/// Contains storage locations for all modeled fields of the record (also
+/// referred to as "children"). The child map is flat, so accessible members of
+/// the base class are directly accessible as children of this location.
+///
+/// Record storage locations may also contain so-called synthetic fields. These
+/// are typically used to model the internal state of a class (e.g. the value
+/// stored in a `std::optional`) without having to depend on that class's
+/// implementation details. All `RecordStorageLocation`s of a given type should
+/// have the same synthetic fields.
+///
+/// The storage location for a field of reference type may be null. This
+/// typically occurs in one of two situations:
+/// - The record has not been fully initialized.
+/// - The maximum depth for modelling a self-referential data structure has been
+/// reached.
+/// Storage locations for fields of all other types must be non-null.
+///
+/// FIXME: Currently, the storage location of unions is modelled the same way as
+/// that of structs or classes. Eventually, we need to change this modelling so
+/// that all of the members of a given union have the same storage location.
+class RecordStorageLocation final : public StorageLocation {
+public:
+ using FieldToLoc = llvm::DenseMap<const ValueDecl *, StorageLocation *>;
+ using SyntheticFieldMap = llvm::StringMap<StorageLocation *>;
+
+ RecordStorageLocation(QualType Type, FieldToLoc TheChildren,
+ SyntheticFieldMap TheSyntheticFields)
+ : StorageLocation(Kind::Record, Type), Children(std::move(TheChildren)),
+ SyntheticFields(std::move(TheSyntheticFields)) {
+ assert(!Type.isNull());
+ assert(Type->isRecordType());
+ assert([this] {
+ for (auto [Field, Loc] : Children) {
+ if (!Field->getType()->isReferenceType() && Loc == nullptr)
+ return false;
+ }
+ return true;
+ }());
+ }
+
+ static bool classof(const StorageLocation *Loc) {
+ return Loc->getKind() == Kind::Record;
+ }
+
+ /// Returns the child storage location for `D`.
+ ///
+ /// May return null if `D` has reference type; guaranteed to return non-null
+ /// in all other cases.
+ ///
+ /// Note that it is an error to call this with a field that does not exist.
+ /// The function does not return null in this case.
+ StorageLocation *getChild(const ValueDecl &D) const {
+ auto It = Children.find(&D);
+ LLVM_DEBUG({
+ if (It == Children.end()) {
+ llvm::dbgs() << "Couldn't find child " << D.getNameAsString()
+ << " on StorageLocation " << this << " of type "
+ << getType() << "\n";
+ llvm::dbgs() << "Existing children:\n";
+ for ([[maybe_unused]] auto [Field, Loc] : Children) {
+ llvm::dbgs() << Field->getNameAsString() << "\n";
+ }
+ }
+ });
+ assert(It != Children.end());
+ return It->second;
+ }
+
+ /// Returns the storage location for the synthetic field `Name`.
+ /// The synthetic field must exist.
+ StorageLocation &getSyntheticField(llvm::StringRef Name) const {
+ StorageLocation *Loc = SyntheticFields.lookup(Name);
+ assert(Loc != nullptr);
+ return *Loc;
+ }
+
+ llvm::iterator_range<SyntheticFieldMap::const_iterator>
+ synthetic_fields() const {
+ return {SyntheticFields.begin(), SyntheticFields.end()};
+ }
+
+ /// Changes the child storage location for a field `D` of reference type.
+ /// All other fields cannot change their storage location and always retain
+ /// the storage location passed to the `RecordStorageLocation` constructor.
+ ///
+ /// Requirements:
+ ///
+ /// `D` must have reference type.
+ void setChild(const ValueDecl &D, StorageLocation *Loc) {
+ assert(D.getType()->isReferenceType());
+ Children[&D] = Loc;
+ }
+
+ llvm::iterator_range<FieldToLoc::const_iterator> children() const {
+ return {Children.begin(), Children.end()};
+ }
+
+private:
+ FieldToLoc Children;
+ SyntheticFieldMap SyntheticFields;
+};
+
+} // namespace dataflow
+} // namespace clang
+
+#undef DEBUG_TYPE
+
+#endif // LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_STORAGELOCATION_H
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Transfer.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Transfer.h
new file mode 100644
index 000000000000..7713df747cb7
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Transfer.h
@@ -0,0 +1,61 @@
+//===-- Transfer.h ----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a transfer function that evaluates a program statement and
+// updates an environment accordingly.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_TRANSFER_H
+#define LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_TRANSFER_H
+
+#include "clang/AST/Stmt.h"
+#include "clang/Analysis/FlowSensitive/DataflowAnalysisContext.h"
+#include "clang/Analysis/FlowSensitive/DataflowEnvironment.h"
+#include "clang/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.h"
+
+namespace clang {
+namespace dataflow {
+
+/// Maps statements to the environments of basic blocks that contain them.
+class StmtToEnvMap {
+public:
+ // `CurBlockID` is the ID of the block currently being processed, and
+ // `CurState` is the pending state currently associated with this block. These
+ // are supplied separately as the pending state for the current block may not
+ // yet be represented in `BlockToState`.
+ StmtToEnvMap(const ControlFlowContext &CFCtx,
+ llvm::ArrayRef<std::optional<TypeErasedDataflowAnalysisState>>
+ BlockToState,
+ unsigned CurBlockID,
+ const TypeErasedDataflowAnalysisState &CurState)
+ : CFCtx(CFCtx), BlockToState(BlockToState), CurBlockID(CurBlockID),
+ CurState(CurState) {}
+
+ /// Returns the environment of the basic block that contains `S`.
+ /// The result is guaranteed never to be null.
+ const Environment *getEnvironment(const Stmt &S) const;
+
+private:
+ const ControlFlowContext &CFCtx;
+ llvm::ArrayRef<std::optional<TypeErasedDataflowAnalysisState>> BlockToState;
+ unsigned CurBlockID;
+ const TypeErasedDataflowAnalysisState &CurState;
+};
+
+/// Evaluates `S` and updates `Env` accordingly.
+///
+/// Requirements:
+///
+/// `S` must not be `ParenExpr` or `ExprWithCleanups`.
+void transfer(const StmtToEnvMap &StmtToEnv, const Stmt &S, Environment &Env);
+
+} // namespace dataflow
+} // namespace clang
+
+#endif // LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_TRANSFER_H
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.h
new file mode 100644
index 000000000000..a0ca7440230b
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.h
@@ -0,0 +1,159 @@
+//===- TypeErasedDataflowAnalysis.h -----------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines type-erased base types and functions for building dataflow
+// analyses that run over Control-Flow Graphs (CFGs).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_TYPEERASEDDATAFLOWANALYSIS_H
+#define LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_TYPEERASEDDATAFLOWANALYSIS_H
+
+#include <optional>
+#include <utility>
+#include <vector>
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Stmt.h"
+#include "clang/Analysis/CFG.h"
+#include "clang/Analysis/FlowSensitive/ControlFlowContext.h"
+#include "clang/Analysis/FlowSensitive/DataflowAnalysisContext.h"
+#include "clang/Analysis/FlowSensitive/DataflowEnvironment.h"
+#include "clang/Analysis/FlowSensitive/DataflowLattice.h"
+#include "llvm/ADT/Any.h"
+#include "llvm/Support/Error.h"
+
+namespace clang {
+namespace dataflow {
+
+struct DataflowAnalysisOptions {
+ /// Options for the built-in model, or empty to not apply them.
+ // FIXME: Remove this option once the framework supports composing analyses
+ // (at which point the built-in transfer functions can be simply a standalone
+ // analysis).
+ std::optional<DataflowAnalysisContext::Options> BuiltinOpts =
+ DataflowAnalysisContext::Options{};
+};
+
+/// Type-erased lattice element container.
+///
+/// Requirements:
+///
+/// The type of the object stored in the container must be a bounded
+/// join-semilattice.
+struct TypeErasedLattice {
+ llvm::Any Value;
+};
+
+/// Type-erased base class for dataflow analyses built on a single lattice type.
+class TypeErasedDataflowAnalysis : public Environment::ValueModel {
+ DataflowAnalysisOptions Options;
+
+public:
+ TypeErasedDataflowAnalysis() : Options({}) {}
+
+ TypeErasedDataflowAnalysis(DataflowAnalysisOptions Options)
+ : Options(Options) {}
+
+ virtual ~TypeErasedDataflowAnalysis() {}
+
+ /// Returns the `ASTContext` that is used by the analysis.
+ virtual ASTContext &getASTContext() = 0;
+
+ /// Returns a type-erased lattice element that models the initial state of a
+ /// basic block.
+ virtual TypeErasedLattice typeErasedInitialElement() = 0;
+
+ /// Joins two type-erased lattice elements by computing their least upper
+ /// bound. Places the join result in the left element and returns an effect
+ /// indicating whether any changes were made to it.
+ virtual TypeErasedLattice joinTypeErased(const TypeErasedLattice &,
+ const TypeErasedLattice &) = 0;
+
+ /// Chooses a lattice element that approximates the current element at a
+ /// program point, given the previous element at that point. Places the
+ /// widened result in the current element (`Current`). Widening is optional --
+ /// it is only needed to either accelerate convergence (for lattices with
+ /// non-trivial height) or guarantee convergence (for lattices with infinite
+ /// height).
+ ///
+ /// Returns an indication of whether any changes were made to `Current` in
+ /// order to widen. This saves a separate call to `isEqualTypeErased` after
+ /// the widening.
+ virtual LatticeJoinEffect
+ widenTypeErased(TypeErasedLattice &Current,
+ const TypeErasedLattice &Previous) = 0;
+
+ /// Returns true if and only if the two given type-erased lattice elements are
+ /// equal.
+ virtual bool isEqualTypeErased(const TypeErasedLattice &,
+ const TypeErasedLattice &) = 0;
+
+ /// Applies the analysis transfer function for a given control flow graph
+ /// element and type-erased lattice element.
+ virtual void transferTypeErased(const CFGElement &, TypeErasedLattice &,
+ Environment &) = 0;
+
+ /// Applies the analysis transfer function for a given edge from a CFG block
+ /// of a conditional statement.
+ /// @param Stmt The condition which is responsible for the split in the CFG.
+ /// @param Branch True if the edge goes to the basic block where the
+ /// condition is true.
+ // FIXME: Change `Stmt` argument to a reference.
+ virtual void transferBranchTypeErased(bool Branch, const Stmt *,
+ TypeErasedLattice &, Environment &) = 0;
+
+ /// If the built-in model is enabled, returns the options to be passed to
+ /// them. Otherwise returns empty.
+ const std::optional<DataflowAnalysisContext::Options> &
+ builtinOptions() const {
+ return Options.BuiltinOpts;
+ }
+};
+
+/// Type-erased model of the program at a given program point.
+struct TypeErasedDataflowAnalysisState {
+ /// Type-erased model of a program property.
+ TypeErasedLattice Lattice;
+
+ /// Model of the state of the program (store and heap).
+ Environment Env;
+
+ TypeErasedDataflowAnalysisState(TypeErasedLattice Lattice, Environment Env)
+ : Lattice(std::move(Lattice)), Env(std::move(Env)) {}
+
+ TypeErasedDataflowAnalysisState fork() const {
+ return TypeErasedDataflowAnalysisState(Lattice, Env.fork());
+ }
+};
+
+/// Performs dataflow analysis and returns a mapping from basic block IDs to
+/// dataflow analysis states that model the respective basic blocks. Indices of
+/// the returned vector correspond to basic block IDs. Returns an error if the
+/// dataflow analysis cannot be performed successfully. Otherwise, calls
+/// `PostVisitCFG` on each CFG element with the final analysis results at that
+/// program point.
+///
+/// `MaxBlockVisits` caps the number of block visits during analysis. It doesn't
+/// distinguish between repeat visits to the same block and visits to distinct
+/// blocks. This parameter is a backstop to prevent infinite loops, in the case
+/// of bugs in the lattice and/or transfer functions that prevent the analysis
+/// from converging.
+llvm::Expected<std::vector<std::optional<TypeErasedDataflowAnalysisState>>>
+runTypeErasedDataflowAnalysis(
+ const ControlFlowContext &CFCtx, TypeErasedDataflowAnalysis &Analysis,
+ const Environment &InitEnv,
+ std::function<void(const CFGElement &,
+ const TypeErasedDataflowAnalysisState &)>
+ PostVisitCFG,
+ std::int32_t MaxBlockVisits);
+
+} // namespace dataflow
+} // namespace clang
+
+#endif // LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_TYPEERASEDDATAFLOWANALYSIS_H
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Value.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Value.h
new file mode 100644
index 000000000000..be1bf9324c87
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/Value.h
@@ -0,0 +1,231 @@
+//===-- Value.h -------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines classes for values computed by abstract interpretation
+// during dataflow analysis.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_VALUE_H
+#define LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_VALUE_H
+
+#include "clang/AST/Decl.h"
+#include "clang/Analysis/FlowSensitive/Formula.h"
+#include "clang/Analysis/FlowSensitive/StorageLocation.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include <cassert>
+#include <utility>
+
+namespace clang {
+namespace dataflow {
+
+/// Base class for all values computed by abstract interpretation.
+///
+/// Don't use `Value` instances by value. All `Value` instances are allocated
+/// and owned by `DataflowAnalysisContext`.
+class Value {
+public:
+ enum class Kind {
+ Integer,
+ Pointer,
+ Record,
+
+ // TODO: Top values should not be need to be type-specific.
+ TopBool,
+ AtomicBool,
+ FormulaBool,
+ };
+
+ explicit Value(Kind ValKind) : ValKind(ValKind) {}
+
+ // Non-copyable because addresses of values are used as their identities
+ // throughout framework and user code. The framework is responsible for
+ // construction and destruction of values.
+ Value(const Value &) = delete;
+ Value &operator=(const Value &) = delete;
+
+ virtual ~Value() = default;
+
+ Kind getKind() const { return ValKind; }
+
+ /// Returns the value of the synthetic property with the given `Name` or null
+ /// if the property isn't assigned a value.
+ Value *getProperty(llvm::StringRef Name) const {
+ return Properties.lookup(Name);
+ }
+
+ /// Assigns `Val` as the value of the synthetic property with the given
+ /// `Name`.
+ ///
+ /// Properties may not be set on `RecordValue`s; use synthetic fields instead
+ /// (for details, see documentation for `RecordStorageLocation`).
+ void setProperty(llvm::StringRef Name, Value &Val) {
+ assert(getKind() != Kind::Record);
+ Properties.insert_or_assign(Name, &Val);
+ }
+
+ llvm::iterator_range<llvm::StringMap<Value *>::const_iterator>
+ properties() const {
+ return {Properties.begin(), Properties.end()};
+ }
+
+private:
+ Kind ValKind;
+ llvm::StringMap<Value *> Properties;
+};
+
+/// An equivalence relation for values. It obeys reflexivity, symmetry and
+/// transitivity. It does *not* include comparison of `Properties`.
+///
+/// Computes equivalence for these subclasses:
+/// * PointerValue -- pointee locations are equal. Does not compute deep
+/// equality of `Value` at said location.
+/// * TopBoolValue -- both are `TopBoolValue`s.
+///
+/// Otherwise, falls back to pointer equality.
+bool areEquivalentValues(const Value &Val1, const Value &Val2);
+
+/// Models a boolean.
+class BoolValue : public Value {
+ const Formula *F;
+
+public:
+ explicit BoolValue(Kind ValueKind, const Formula &F)
+ : Value(ValueKind), F(&F) {}
+
+ static bool classof(const Value *Val) {
+ return Val->getKind() == Kind::TopBool ||
+ Val->getKind() == Kind::AtomicBool ||
+ Val->getKind() == Kind::FormulaBool;
+ }
+
+ const Formula &formula() const { return *F; }
+};
+
+/// A TopBoolValue represents a boolean that is explicitly unconstrained.
+///
+/// This is equivalent to an AtomicBoolValue that does not appear anywhere
+/// else in a system of formula.
+/// Knowing the value is unconstrained is useful when e.g. reasoning about
+/// convergence.
+class TopBoolValue final : public BoolValue {
+public:
+ TopBoolValue(const Formula &F) : BoolValue(Kind::TopBool, F) {
+ assert(F.kind() == Formula::AtomRef);
+ }
+
+ static bool classof(const Value *Val) {
+ return Val->getKind() == Kind::TopBool;
+ }
+
+ Atom getAtom() const { return formula().getAtom(); }
+};
+
+/// Models an atomic boolean.
+///
+/// FIXME: Merge this class into FormulaBoolValue.
+/// When we want to specify atom identity, use Atom.
+class AtomicBoolValue final : public BoolValue {
+public:
+ explicit AtomicBoolValue(const Formula &F) : BoolValue(Kind::AtomicBool, F) {
+ assert(F.kind() == Formula::AtomRef);
+ }
+
+ static bool classof(const Value *Val) {
+ return Val->getKind() == Kind::AtomicBool;
+ }
+
+ Atom getAtom() const { return formula().getAtom(); }
+};
+
+/// Models a compound boolean formula.
+class FormulaBoolValue final : public BoolValue {
+public:
+ explicit FormulaBoolValue(const Formula &F)
+ : BoolValue(Kind::FormulaBool, F) {
+ assert(F.kind() != Formula::AtomRef && "For now, use AtomicBoolValue");
+ }
+
+ static bool classof(const Value *Val) {
+ return Val->getKind() == Kind::FormulaBool;
+ }
+};
+
+/// Models an integer.
+class IntegerValue : public Value {
+public:
+ explicit IntegerValue() : Value(Kind::Integer) {}
+
+ static bool classof(const Value *Val) {
+ return Val->getKind() == Kind::Integer;
+ }
+};
+
+/// Models a symbolic pointer. Specifically, any value of type `T*`.
+class PointerValue final : public Value {
+public:
+ explicit PointerValue(StorageLocation &PointeeLoc)
+ : Value(Kind::Pointer), PointeeLoc(PointeeLoc) {}
+
+ static bool classof(const Value *Val) {
+ return Val->getKind() == Kind::Pointer;
+ }
+
+ StorageLocation &getPointeeLoc() const { return PointeeLoc; }
+
+private:
+ StorageLocation &PointeeLoc;
+};
+
+/// Models a value of `struct` or `class` type.
+/// In C++, prvalues of class type serve only a limited purpose: They can only
+/// be used to initialize a result object. It is not possible to access member
+/// variables or call member functions on a prvalue of class type.
+/// Correspondingly, `RecordValue` also serves only a limited purpose: It
+/// conveys a prvalue of class type from the place where the object is
+/// constructed to the result object that it initializes.
+///
+/// When creating a prvalue of class type, we already need a storage location
+/// for `this`, even though prvalues are otherwise not associated with storage
+/// locations. `RecordValue` is therefore essentially a wrapper for a storage
+/// location, which is then used to set the storage location for the result
+/// object when we process the AST node for that result object.
+///
+/// For example:
+/// MyStruct S = MyStruct(3);
+///
+/// In this example, `MyStruct(3) is a prvalue, which is modeled as a
+/// `RecordValue` that wraps a `RecordStorageLocation`. This
+/// `RecordStorageLocation` is then used as the storage location for `S`.
+///
+/// Over time, we may eliminate `RecordValue` entirely. See also the discussion
+/// here: https://reviews.llvm.org/D155204#inline-1503204
+class RecordValue final : public Value {
+public:
+ explicit RecordValue(RecordStorageLocation &Loc)
+ : Value(Kind::Record), Loc(Loc) {}
+
+ static bool classof(const Value *Val) {
+ return Val->getKind() == Kind::Record;
+ }
+
+ /// Returns the storage location that this `RecordValue` is associated with.
+ RecordStorageLocation &getLoc() const { return Loc; }
+
+private:
+ RecordStorageLocation &Loc;
+};
+
+raw_ostream &operator<<(raw_ostream &OS, const Value &Val);
+
+} // namespace dataflow
+} // namespace clang
+
+#endif // LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_VALUE_H
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/WatchedLiteralsSolver.h b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/WatchedLiteralsSolver.h
new file mode 100644
index 000000000000..5448eecf6d41
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Analysis/FlowSensitive/WatchedLiteralsSolver.h
@@ -0,0 +1,58 @@
+//===- WatchedLiteralsSolver.h ----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a SAT solver implementation that can be used by dataflow
+// analyses.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_WATCHEDLITERALSSOLVER_H
+#define LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_WATCHEDLITERALSSOLVER_H
+
+#include "clang/Analysis/FlowSensitive/Formula.h"
+#include "clang/Analysis/FlowSensitive/Solver.h"
+#include "llvm/ADT/ArrayRef.h"
+#include <limits>
+
+namespace clang {
+namespace dataflow {
+
+/// A SAT solver that is an implementation of Algorithm D from Knuth's The Art
+/// of Computer Programming Volume 4: Satisfiability, Fascicle 6. It is based on
+/// the Davis-Putnam-Logemann-Loveland (DPLL) algorithm, keeps references to a
+/// single "watched" literal per clause, and uses a set of "active" variables
+/// for unit propagation.
+class WatchedLiteralsSolver : public Solver {
+ // Count of the iterations of the main loop of the solver. This spans *all*
+ // calls to the underlying solver across the life of this object. It is
+ // reduced with every (non-trivial) call to the solver.
+ //
+ // We give control over the abstract count of iterations instead of concrete
+ // measurements like CPU cycles or time to ensure deterministic results.
+ std::int64_t MaxIterations = std::numeric_limits<std::int64_t>::max();
+
+public:
+ WatchedLiteralsSolver() = default;
+
+ // `Work` specifies a computational limit on the solver. Units of "work"
+ // roughly correspond to attempts to assign a value to a single
+ // variable. Since the algorithm is exponential in the number of variables,
+ // this is the most direct (abstract) unit to target.
+ explicit WatchedLiteralsSolver(std::int64_t WorkLimit)
+ : MaxIterations(WorkLimit) {}
+
+ Result solve(llvm::ArrayRef<const Formula *> Vals) override;
+
+ // The solver reached its maximum number of iterations.
+ bool reachedLimit() const { return MaxIterations == 0; }
+};
+
+} // namespace dataflow
+} // namespace clang
+
+#endif // LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_WATCHEDLITERALSSOLVER_H
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/IssueHash.h b/contrib/llvm-project/clang/include/clang/Analysis/IssueHash.h
index 9c02b79f58f9..78bebbdb6ec7 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/IssueHash.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/IssueHash.h
@@ -5,8 +5,8 @@
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_STATICANALYZER_CORE_ISSUE_HASH_H
-#define LLVM_CLANG_STATICANALYZER_CORE_ISSUE_HASH_H
+#ifndef LLVM_CLANG_ANALYSIS_ISSUEHASH_H
+#define LLVM_CLANG_ANALYSIS_ISSUEHASH_H
#include "llvm/ADT/SmallString.h"
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/MacroExpansionContext.h b/contrib/llvm-project/clang/include/clang/Analysis/MacroExpansionContext.h
index 57934bfc09d9..2a27aba76656 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/MacroExpansionContext.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/MacroExpansionContext.h
@@ -13,9 +13,9 @@
#include "clang/Basic/SourceLocation.h"
#include "clang/Lex/Preprocessor.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
+#include <optional>
namespace clang {
@@ -85,14 +85,16 @@ public:
/// \param MacroExpansionLoc Must be the expansion location of a macro.
/// \return The textual representation of the token sequence which was
/// substituted in place of the macro after the preprocessing.
- /// If no macro was expanded at that location, returns llvm::None.
- Optional<StringRef> getExpandedText(SourceLocation MacroExpansionLoc) const;
+ /// If no macro was expanded at that location, returns std::nullopt.
+ std::optional<StringRef>
+ getExpandedText(SourceLocation MacroExpansionLoc) const;
/// \param MacroExpansionLoc Must be the expansion location of a macro.
/// \return The text from the original source code which were substituted by
/// the macro expansion chain from the given location.
- /// If no macro was expanded at that location, returns llvm::None.
- Optional<StringRef> getOriginalText(SourceLocation MacroExpansionLoc) const;
+ /// If no macro was expanded at that location, returns std::nullopt.
+ std::optional<StringRef>
+ getOriginalText(SourceLocation MacroExpansionLoc) const;
LLVM_DUMP_METHOD void dumpExpansionRangesToStream(raw_ostream &OS) const;
LLVM_DUMP_METHOD void dumpExpandedTextsToStream(raw_ostream &OS) const;
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/PathDiagnostic.h b/contrib/llvm-project/clang/include/clang/Analysis/PathDiagnostic.h
index 539aa20b8168..90559e7efb06 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/PathDiagnostic.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/PathDiagnostic.h
@@ -10,8 +10,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_STATICANALYZER_CORE_BUGREPORTER_PATHDIAGNOSTIC_H
-#define LLVM_CLANG_STATICANALYZER_CORE_BUGREPORTER_PATHDIAGNOSTIC_H
+#ifndef LLVM_CLANG_ANALYSIS_PATHDIAGNOSTIC_H
+#define LLVM_CLANG_ANALYSIS_PATHDIAGNOSTIC_H
#include "clang/AST/Stmt.h"
#include "clang/Analysis/AnalysisDeclContext.h"
@@ -19,7 +19,6 @@
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/FoldingSet.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
@@ -30,6 +29,7 @@
#include <list>
#include <map>
#include <memory>
+#include <optional>
#include <set>
#include <string>
#include <utility>
@@ -41,10 +41,8 @@ class AnalysisDeclContext;
class BinaryOperator;
class CallEnter;
class CallExitEnd;
-class CallExpr;
class ConditionalOperator;
class Decl;
-class Expr;
class LocationContext;
class MemberExpr;
class ProgramPoint;
@@ -75,14 +73,8 @@ struct PathDiagnosticConsumerOptions {
bool ShouldSerializeStats = false;
/// If the consumer intends to produce multiple output files, should it
- /// use randomly generated file names for these files (with the tiny risk of
- /// having random collisions) or deterministic human-readable file names
- /// (with a larger risk of deterministic collisions or invalid characters
- /// in the file name). We should not really give this choice to the users
- /// because deterministic mode is always superior when done right, but
- /// for some consumers this mode is experimental and needs to be
- /// off by default.
- bool ShouldWriteStableReportFilename = false;
+ /// use a pseudo-random file name or a human-readable file name.
+ bool ShouldWriteVerboseReportFilename = false;
/// Whether the consumer should treat consumed diagnostics as hard errors.
/// Useful for breaking your build when issues are found.
@@ -151,11 +143,14 @@ public:
/// Only runs visitors, no output generated.
None,
- /// Used for HTML, SARIF, and text output.
+ /// Used for SARIF and text output.
Minimal,
/// Used for plist output, used for "arrows" generation.
Extensive,
+
+ /// Used for HTML, shows both "arrows" and control notes.
+ Everything
};
virtual PathGenerationScheme getGenerationScheme() const { return Minimal; }
@@ -164,7 +159,11 @@ public:
return getGenerationScheme() != None;
}
- bool shouldAddPathEdges() const { return getGenerationScheme() == Extensive; }
+ bool shouldAddPathEdges() const { return getGenerationScheme() >= Extensive; }
+ bool shouldAddControlNotes() const {
+ return getGenerationScheme() == Minimal ||
+ getGenerationScheme() == Everything;
+ }
virtual bool supportsLogicalOpControlFlow() const { return false; }
@@ -533,7 +532,7 @@ public:
};
class PathDiagnosticEventPiece : public PathDiagnosticSpotPiece {
- Optional<bool> IsPrunable;
+ std::optional<bool> IsPrunable;
public:
PathDiagnosticEventPiece(const PathDiagnosticLocation &pos,
@@ -545,15 +544,13 @@ public:
/// flag may have been previously set, at which point it will not
/// be reset unless one specifies to do so.
void setPrunable(bool isPrunable, bool override = false) {
- if (IsPrunable.hasValue() && !override)
- return;
+ if (IsPrunable && !override)
+ return;
IsPrunable = isPrunable;
}
/// Return true if the diagnostic piece is prunable.
- bool isPrunable() const {
- return IsPrunable.hasValue() ? IsPrunable.getValue() : false;
- }
+ bool isPrunable() const { return IsPrunable.value_or(false); }
void dump() const override;
@@ -904,4 +901,4 @@ public:
} // namespace ento
} // namespace clang
-#endif // LLVM_CLANG_STATICANALYZER_CORE_BUGREPORTER_PATHDIAGNOSTIC_H
+#endif // LLVM_CLANG_ANALYSIS_PATHDIAGNOSTIC_H
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/ProgramPoint.h b/contrib/llvm-project/clang/include/clang/Analysis/ProgramPoint.h
index 546224bfd58d..b9339570e1ae 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/ProgramPoint.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/ProgramPoint.h
@@ -18,19 +18,18 @@
#include "clang/Analysis/CFG.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/FoldingSet.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/DataTypes.h"
#include <cassert>
+#include <optional>
#include <string>
#include <utility>
namespace clang {
class AnalysisDeclContext;
-class FunctionDecl;
class LocationContext;
/// ProgramPoints can be "tagged" as representing points specific to a given
@@ -96,35 +95,33 @@ private:
llvm::PointerIntPair<const ProgramPointTag *, 2, unsigned> Tag;
+ CFGBlock::ConstCFGElementRef ElemRef = {nullptr, 0};
+
protected:
ProgramPoint() = default;
- ProgramPoint(const void *P,
- Kind k,
- const LocationContext *l,
- const ProgramPointTag *tag = nullptr)
- : Data1(P),
- Data2(nullptr, (((unsigned) k) >> 0) & 0x3),
- L(l, (((unsigned) k) >> 2) & 0x3),
- Tag(tag, (((unsigned) k) >> 4) & 0x3) {
- assert(getKind() == k);
- assert(getLocationContext() == l);
- assert(getData1() == P);
- }
-
- ProgramPoint(const void *P1,
- const void *P2,
- Kind k,
- const LocationContext *l,
- const ProgramPointTag *tag = nullptr)
- : Data1(P1),
- Data2(P2, (((unsigned) k) >> 0) & 0x3),
- L(l, (((unsigned) k) >> 2) & 0x3),
- Tag(tag, (((unsigned) k) >> 4) & 0x3) {}
+ ProgramPoint(const void *P, Kind k, const LocationContext *l,
+ const ProgramPointTag *tag = nullptr,
+ CFGBlock::ConstCFGElementRef ElemRef = {nullptr, 0})
+ : Data1(P), Data2(nullptr, (((unsigned)k) >> 0) & 0x3),
+ L(l, (((unsigned)k) >> 2) & 0x3), Tag(tag, (((unsigned)k) >> 4) & 0x3),
+ ElemRef(ElemRef) {
+ assert(getKind() == k);
+ assert(getLocationContext() == l);
+ assert(getData1() == P);
+ }
+
+ ProgramPoint(const void *P1, const void *P2, Kind k, const LocationContext *l,
+ const ProgramPointTag *tag = nullptr,
+ CFGBlock::ConstCFGElementRef ElemRef = {nullptr, 0})
+ : Data1(P1), Data2(P2, (((unsigned)k) >> 0) & 0x3),
+ L(l, (((unsigned)k) >> 2) & 0x3), Tag(tag, (((unsigned)k) >> 4) & 0x3),
+ ElemRef(ElemRef) {}
protected:
const void *getData1() const { return Data1; }
const void *getData2() const { return Data2.getPointer(); }
void setData2(const void *d) { Data2.setPointer(d); }
+ CFGBlock::ConstCFGElementRef getElementRef() const { return ElemRef; }
public:
/// Create a new ProgramPoint object that is the same as the original
@@ -145,12 +142,11 @@ public:
return t;
}
- /// Convert to the specified ProgramPoint type, returning None if this
+ /// Convert to the specified ProgramPoint type, returning std::nullopt if this
/// ProgramPoint is not of the desired type.
- template<typename T>
- Optional<T> getAs() const {
+ template <typename T> std::optional<T> getAs() const {
if (!T::isKind(*this))
- return None;
+ return std::nullopt;
T t;
ProgramPoint& PP = t;
PP = *this;
@@ -192,17 +188,13 @@ public:
}
bool operator==(const ProgramPoint & RHS) const {
- return Data1 == RHS.Data1 &&
- Data2 == RHS.Data2 &&
- L == RHS.L &&
- Tag == RHS.Tag;
+ return Data1 == RHS.Data1 && Data2 == RHS.Data2 && L == RHS.L &&
+ Tag == RHS.Tag && ElemRef == RHS.ElemRef;
}
bool operator!=(const ProgramPoint &RHS) const {
- return Data1 != RHS.Data1 ||
- Data2 != RHS.Data2 ||
- L != RHS.L ||
- Tag != RHS.Tag;
+ return Data1 != RHS.Data1 || Data2 != RHS.Data2 || L != RHS.L ||
+ Tag != RHS.Tag || ElemRef != RHS.ElemRef;
}
void Profile(llvm::FoldingSetNodeID& ID) const {
@@ -211,6 +203,8 @@ public:
ID.AddPointer(getData2());
ID.AddPointer(getLocationContext());
ID.AddPointer(getTag());
+ ID.AddPointer(ElemRef.getParent());
+ ID.AddInteger(ElemRef.getIndexInBlock());
}
void printJson(llvm::raw_ostream &Out, const char *NL = "\n") const;
@@ -234,9 +228,9 @@ public:
return reinterpret_cast<const CFGBlock*>(getData1());
}
- Optional<CFGElement> getFirstElement() const {
+ std::optional<CFGElement> getFirstElement() const {
const CFGBlock *B = getBlock();
- return B->empty() ? Optional<CFGElement>() : B->front();
+ return B->empty() ? std::optional<CFGElement>() : B->front();
}
private:
@@ -268,6 +262,7 @@ private:
}
};
+// FIXME: Eventually we want to take a CFGElementRef as parameter here too.
class StmtPoint : public ProgramPoint {
public:
StmtPoint(const Stmt *S, const void *p2, Kind k, const LocationContext *L,
@@ -559,8 +554,9 @@ private:
class ImplicitCallPoint : public ProgramPoint {
public:
ImplicitCallPoint(const Decl *D, SourceLocation Loc, Kind K,
- const LocationContext *L, const ProgramPointTag *Tag)
- : ProgramPoint(Loc.getPtrEncoding(), D, K, L, Tag) {}
+ const LocationContext *L, const ProgramPointTag *Tag,
+ CFGBlock::ConstCFGElementRef ElemRef)
+ : ProgramPoint(Loc.getPtrEncoding(), D, K, L, Tag, ElemRef) {}
const Decl *getDecl() const { return static_cast<const Decl *>(getData2()); }
SourceLocation getLocation() const {
@@ -583,8 +579,9 @@ private:
class PreImplicitCall : public ImplicitCallPoint {
public:
PreImplicitCall(const Decl *D, SourceLocation Loc, const LocationContext *L,
+ CFGBlock::ConstCFGElementRef ElemRef,
const ProgramPointTag *Tag = nullptr)
- : ImplicitCallPoint(D, Loc, PreImplicitCallKind, L, Tag) {}
+ : ImplicitCallPoint(D, Loc, PreImplicitCallKind, L, Tag, ElemRef) {}
private:
friend class ProgramPoint;
@@ -600,8 +597,9 @@ private:
class PostImplicitCall : public ImplicitCallPoint {
public:
PostImplicitCall(const Decl *D, SourceLocation Loc, const LocationContext *L,
+ CFGBlock::ConstCFGElementRef ElemRef,
const ProgramPointTag *Tag = nullptr)
- : ImplicitCallPoint(D, Loc, PostImplicitCallKind, L, Tag) {}
+ : ImplicitCallPoint(D, Loc, PostImplicitCallKind, L, Tag, ElemRef) {}
private:
friend class ProgramPoint;
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/RetainSummaryManager.h b/contrib/llvm-project/clang/include/clang/Analysis/RetainSummaryManager.h
index b7ccb0317830..86865b9da421 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/RetainSummaryManager.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/RetainSummaryManager.h
@@ -12,8 +12,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_ANALYSIS_RETAINSUMMARY_MANAGER_H
-#define LLVM_CLANG_ANALYSIS_RETAINSUMMARY_MANAGER_H
+#ifndef LLVM_CLANG_ANALYSIS_RETAINSUMMARYMANAGER_H
+#define LLVM_CLANG_ANALYSIS_RETAINSUMMARYMANAGER_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/FoldingSet.h"
@@ -25,6 +25,7 @@
#include "clang/Analysis/AnyCall.h"
#include "clang/Analysis/SelectorExtras.h"
#include "llvm/ADT/STLExtras.h"
+#include <optional>
using namespace clang;
@@ -648,8 +649,9 @@ public:
IdentityOrZero
};
- Optional<BehaviorSummary> canEval(const CallExpr *CE, const FunctionDecl *FD,
- bool &hasTrustedImplementationAnnotation);
+ std::optional<BehaviorSummary>
+ canEval(const CallExpr *CE, const FunctionDecl *FD,
+ bool &hasTrustedImplementationAnnotation);
/// \return Whether the type corresponds to a known smart pointer
/// implementation (that is, everything about it is inlineable).
@@ -686,8 +688,8 @@ private:
Selector S, QualType RetTy);
/// Determine if there is a special return effect for this function or method.
- Optional<RetEffect> getRetEffectFromAnnotations(QualType RetTy,
- const Decl *D);
+ std::optional<RetEffect> getRetEffectFromAnnotations(QualType RetTy,
+ const Decl *D);
void updateSummaryFromAnnotations(const RetainSummary *&Summ,
const ObjCMethodDecl *MD);
@@ -719,14 +721,14 @@ private:
/// type for functions/methods) @c QT has any of the given attributes,
/// provided they pass necessary validation checks AND tracking the given
/// attribute is enabled.
- /// Returns the object kind corresponding to the present attribute, or None,
- /// if none of the specified attributes are present.
+ /// Returns the object kind corresponding to the present attribute, or
+ /// std::nullopt, if none of the specified attributes are present.
/// Crashes if passed an attribute which is not explicitly handled.
template <class T>
- Optional<ObjKind> hasAnyEnabledAttrOf(const Decl *D, QualType QT);
+ std::optional<ObjKind> hasAnyEnabledAttrOf(const Decl *D, QualType QT);
template <class T1, class T2, class... Others>
- Optional<ObjKind> hasAnyEnabledAttrOf(const Decl *D, QualType QT);
+ std::optional<ObjKind> hasAnyEnabledAttrOf(const Decl *D, QualType QT);
friend class RetainSummaryTemplate;
};
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/SelectorExtras.h b/contrib/llvm-project/clang/include/clang/Analysis/SelectorExtras.h
index d26e9159a937..1e1daf5706bb 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/SelectorExtras.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/SelectorExtras.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_LIB_ANALYSIS_SELECTOREXTRAS_H
-#define LLVM_CLANG_LIB_ANALYSIS_SELECTOREXTRAS_H
+#ifndef LLVM_CLANG_ANALYSIS_SELECTOREXTRAS_H
+#define LLVM_CLANG_ANALYSIS_SELECTOREXTRAS_H
#include "clang/AST/ASTContext.h"
@@ -16,7 +16,7 @@ namespace clang {
template <typename... IdentifierInfos>
static inline Selector getKeywordSelector(ASTContext &Ctx,
IdentifierInfos *... IIs) {
- static_assert(sizeof...(IdentifierInfos),
+ static_assert(sizeof...(IdentifierInfos) > 0,
"keyword selectors must have at least one argument");
SmallVector<IdentifierInfo *, 10> II({&Ctx.Idents.get(IIs)...});
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/Support/BumpVector.h b/contrib/llvm-project/clang/include/clang/Analysis/Support/BumpVector.h
index 74092dabbfda..6c3f11e99306 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/Support/BumpVector.h
+++ b/contrib/llvm-project/clang/include/clang/Analysis/Support/BumpVector.h
@@ -42,6 +42,15 @@ public:
Other.Alloc.setPointer(nullptr);
}
+ // The move assignment operator is defined as deleted pending further
+ // motivation.
+ BumpVectorContext &operator=(BumpVectorContext &&) = delete;
+
+ // The copy constrcutor and copy assignment operator is defined as deleted
+ // pending further motivation.
+ BumpVectorContext(const BumpVectorContext &) = delete;
+ BumpVectorContext &operator=(const BumpVectorContext &) = delete;
+
/// Construct a new BumpVectorContext that reuses an existing
/// BumpPtrAllocator. This BumpPtrAllocator is not destroyed when the
/// BumpVectorContext object is destroyed.
diff --git a/contrib/llvm-project/clang/include/clang/Basic/AArch64SVEACLETypes.def b/contrib/llvm-project/clang/include/clang/Basic/AArch64SVEACLETypes.def
index b98a07436e94..fa9c1ac0491c 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/AArch64SVEACLETypes.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/AArch64SVEACLETypes.def
@@ -49,6 +49,11 @@
SVE_TYPE(Name, Id, SingletonId)
#endif
+#ifndef SVE_OPAQUE_TYPE
+#define SVE_OPAQUE_TYPE(Name, MangledName, Id, SingletonId) \
+ SVE_TYPE(Name, Id, SingletonId)
+#endif
+
//===- Vector point types -----------------------------------------------===//
@@ -66,7 +71,7 @@ SVE_VECTOR_TYPE("__SVFloat16_t", "__SVFloat16_t", SveFloat16, SveFloat16Ty, 8, 1
SVE_VECTOR_TYPE("__SVFloat32_t", "__SVFloat32_t", SveFloat32, SveFloat32Ty, 4, 32, true, true, false)
SVE_VECTOR_TYPE("__SVFloat64_t", "__SVFloat64_t", SveFloat64, SveFloat64Ty, 2, 64, true, true, false)
-SVE_VECTOR_TYPE("__SVBFloat16_t", "__SVBFloat16_t", SveBFloat16, SveBFloat16Ty, 8, 16, true, false, true)
+SVE_VECTOR_TYPE("__SVBfloat16_t", "__SVBfloat16_t", SveBFloat16, SveBFloat16Ty, 8, 16, true, false, true)
//
// x2
@@ -124,7 +129,12 @@ SVE_VECTOR_TYPE("__clang_svfloat64x4_t", "svfloat64x4_t", SveFloat64x4, SveFloat
SVE_VECTOR_TYPE("__clang_svbfloat16x4_t", "svbfloat16x4_t", SveBFloat16x4, SveBFloat16x4Ty, 32, 16, true, false, true)
SVE_PREDICATE_TYPE("__SVBool_t", "__SVBool_t", SveBool, SveBoolTy, 16)
+SVE_PREDICATE_TYPE("__clang_svboolx2_t", "svboolx2_t", SveBoolx2, SveBoolx2Ty, 32)
+SVE_PREDICATE_TYPE("__clang_svboolx4_t", "svboolx4_t", SveBoolx4, SveBoolx4Ty, 64)
+
+SVE_OPAQUE_TYPE("__SVCount_t", "__SVCount_t", SveCount, SveCountTy)
#undef SVE_VECTOR_TYPE
#undef SVE_PREDICATE_TYPE
+#undef SVE_OPAQUE_TYPE
#undef SVE_TYPE
diff --git a/contrib/llvm-project/clang/include/clang/Basic/AddressSpaces.h b/contrib/llvm-project/clang/include/clang/Basic/AddressSpaces.h
index 99bb67fd26d1..7b723d508fff 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/AddressSpaces.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/AddressSpaces.h
@@ -56,6 +56,12 @@ enum class LangAS : unsigned {
ptr32_uptr,
ptr64,
+ // HLSL specific address spaces.
+ hlsl_groupshared,
+
+ // Wasm specific address spaces.
+ wasm_funcref,
+
// This denotes the count of language-specific address spaces and also
// the offset added to the target-specific address spaces, which are usually
// specified by address space attributes __attribute__(address_space(n))).
diff --git a/contrib/llvm-project/clang/include/clang/Basic/AlignedAllocation.h b/contrib/llvm-project/clang/include/clang/Basic/AlignedAllocation.h
index ab9f19da5d59..ac26eb4a276d 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/AlignedAllocation.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/AlignedAllocation.h
@@ -12,12 +12,12 @@
///
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_BASIC_ALIGNED_ALLOCATION_H
-#define LLVM_CLANG_BASIC_ALIGNED_ALLOCATION_H
+#ifndef LLVM_CLANG_BASIC_ALIGNEDALLOCATION_H
+#define LLVM_CLANG_BASIC_ALIGNEDALLOCATION_H
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/VersionTuple.h"
+#include "llvm/TargetParser/Triple.h"
namespace clang {
@@ -26,8 +26,8 @@ inline llvm::VersionTuple alignedAllocMinVersion(llvm::Triple::OSType OS) {
default:
break;
case llvm::Triple::Darwin:
- case llvm::Triple::MacOSX: // Earliest supporting version is 10.14.
- return llvm::VersionTuple(10U, 14U);
+ case llvm::Triple::MacOSX: // Earliest supporting version is 10.13.
+ return llvm::VersionTuple(10U, 13U);
case llvm::Triple::IOS:
case llvm::Triple::TvOS: // Earliest supporting version is 11.0.0.
return llvm::VersionTuple(11U);
@@ -42,4 +42,4 @@ inline llvm::VersionTuple alignedAllocMinVersion(llvm::Triple::OSType OS) {
} // end namespace clang
-#endif // LLVM_CLANG_BASIC_ALIGNED_ALLOCATION_H
+#endif // LLVM_CLANG_BASIC_ALIGNEDALLOCATION_H
diff --git a/contrib/llvm-project/clang/include/clang/Basic/Attr.td b/contrib/llvm-project/clang/include/clang/Basic/Attr.td
index 12d09181a2ea..dbf2dd2120fb 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/Attr.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/Attr.td
@@ -19,10 +19,19 @@ def DocCatType : DocumentationCategory<"Type Attributes">;
def DocCatStmt : DocumentationCategory<"Statement Attributes">;
def DocCatDecl : DocumentationCategory<"Declaration Attributes">;
-// Attributes listed under the Undocumented category do not generate any public
-// documentation. Ideally, this category should be used for internal-only
-// attributes which contain no spellings.
-def DocCatUndocumented : DocumentationCategory<"Undocumented">;
+// This category is for attributes which have not yet been properly documented,
+// but should be.
+def DocCatUndocumented : DocumentationCategory<"Undocumented"> {
+ let Content = [{
+This section lists attributes which are recognized by Clang, but which are
+currently missing documentation.
+}];
+}
+
+// Attributes listed under the InternalOnly category do not generate any entry
+// in the documentation. This category should be used only when we _want_
+// to not document the attribute, e.g. if the attribute has no spellings.
+def DocCatInternalOnly : DocumentationCategory<"InternalOnly">;
class DocDeprecated<string replacement = ""> {
// If the Replacement field is empty, no replacement will be listed with the
@@ -48,11 +57,17 @@ class Documentation {
DocDeprecated Deprecated;
}
-// Specifies that the attribute is explicitly undocumented. This can be a
-// helpful placeholder for the attribute while working on the implementation,
-// but should not be used once feature work has been completed.
+// Specifies that the attribute is explicitly omitted from the documentation,
+// because it is not intended to be user-facing.
+def InternalOnly : Documentation {
+ let Category = DocCatInternalOnly;
+}
+
+// Specifies that the attribute is undocumented, but that it _should_ have
+// documentation.
def Undocumented : Documentation {
let Category = DocCatUndocumented;
+ let Content = "No documentation.";
}
include "clang/Basic/AttrDocs.td"
@@ -92,6 +107,10 @@ def NonBitField : SubsetSubject<Field,
[{!S->isBitField()}],
"non-bit-field non-static data members">;
+def BitField : SubsetSubject<Field,
+ [{S->isBitField()}],
+ "bit-field data members">;
+
def NonStaticCXXMethod : SubsetSubject<CXXMethod,
[{!S->isStatic()}],
"non-static member functions">;
@@ -118,6 +137,17 @@ def SharedVar : SubsetSubject<Var,
def GlobalVar : SubsetSubject<Var,
[{S->hasGlobalStorage()}], "global variables">;
+def ExternalGlobalVar : SubsetSubject<Var,
+ [{S->hasGlobalStorage() &&
+ S->getStorageClass()!=StorageClass::SC_Static &&
+ !S->isLocalExternDecl()}],
+ "external global variables">;
+
+def NonTLSGlobalVar : SubsetSubject<Var,
+ [{S->hasGlobalStorage() &&
+ S->getTLSKind() == 0}],
+ "non-TLS global variables">;
+
def InlineFunction : SubsetSubject<Function,
[{S->isInlineSpecified()}], "inline functions">;
@@ -126,6 +156,14 @@ def FunctionTmpl
FunctionDecl::TK_FunctionTemplate}],
"function templates">;
+def HLSLEntry
+ : SubsetSubject<Function,
+ [{S->isExternallyVisible() && !isa<CXXMethodDecl>(S)}],
+ "global functions">;
+def HLSLBufferObj : SubsetSubject<HLSLBuffer,
+ [{isa<HLSLBufferDecl>(S)}],
+ "cbuffer/tbuffer">;
+
def ClassTmpl : SubsetSubject<CXXRecord, [{S->getDescribedClassTemplate()}],
"class templates">;
@@ -139,6 +177,12 @@ def FunctionLike : SubsetSubject<DeclBase,
[{S->getFunctionType(false) != nullptr}],
"functions, function pointers">;
+// Function Pointer is a stricter version of FunctionLike that only allows function
+// pointers.
+def FunctionPointer : SubsetSubject<DeclBase,
+ [{S->isFunctionPointerType()}],
+ "functions pointers">;
+
def OpenCLKernelFunction
: SubsetSubject<Function, [{S->hasAttr<OpenCLKernelAttr>()}],
"kernel functions">;
@@ -201,6 +245,7 @@ class DeclArgument<DeclNode kind, string name, bit opt = 0, bit fake = 0>
// OMPTraitProperty := {Kind}
//
class OMPTraitInfoArgument<string name> : Argument<name, 0>;
+class VariadicOMPInteropInfoArgument<string name> : Argument<name, 0>;
class TypeArgument<string name, bit opt = 0> : Argument<name, opt>;
class UnsignedArgument<string name, bit opt = 0> : Argument<name, opt>;
@@ -237,64 +282,91 @@ class DefaultIntArgument<string name, int default> : IntArgument<name, 1> {
int Default = default;
}
-// This argument is more complex, it includes the enumerator type name,
-// a list of strings to accept, and a list of enumerators to map them to.
+// This argument is more complex, it includes the enumerator type
+// name, whether the enum type is externally defined, a list of
+// strings to accept, and a list of enumerators to map them to.
class EnumArgument<string name, string type, list<string> values,
- list<string> enums, bit opt = 0, bit fake = 0>
+ list<string> enums, bit opt = 0, bit fake = 0,
+ bit isExternalType = 0>
: Argument<name, opt, fake> {
string Type = type;
list<string> Values = values;
list<string> Enums = enums;
+ bit IsExternalType = isExternalType;
}
// FIXME: There should be a VariadicArgument type that takes any other type
// of argument and generates the appropriate type.
class VariadicEnumArgument<string name, string type, list<string> values,
- list<string> enums> : Argument<name, 1> {
+ list<string> enums, bit isExternalType = 0>
+ : Argument<name, 1> {
string Type = type;
list<string> Values = values;
list<string> Enums = enums;
+ bit IsExternalType = isExternalType;
}
+// Represents an attribute wrapped by another attribute.
+class WrappedAttr<string name, bit opt = 0> : Argument<name, opt>;
+
// This handles one spelling of an attribute.
-class Spelling<string name, string variety> {
+class Spelling<string name, string variety, int version = 1> {
string Name = name;
string Variety = variety;
+ int Version = version;
}
class GNU<string name> : Spelling<name, "GNU">;
-class Declspec<string name> : Spelling<name, "Declspec">;
+class Declspec<string name> : Spelling<name, "Declspec"> {
+ bit PrintOnLeft = 1;
+}
class Microsoft<string name> : Spelling<name, "Microsoft">;
class CXX11<string namespace, string name, int version = 1>
- : Spelling<name, "CXX11"> {
+ : Spelling<name, "CXX11", version> {
+ bit CanPrintOnLeft = 0;
string Namespace = namespace;
- int Version = version;
}
-class C2x<string namespace, string name, int version = 1>
- : Spelling<name, "C2x"> {
+class C23<string namespace, string name, int version = 1>
+ : Spelling<name, "C23", version> {
string Namespace = namespace;
- int Version = version;
}
-class Keyword<string name> : Spelling<name, "Keyword">;
+class Keyword<string name, bit hasOwnParseRules>
+ : Spelling<name, "Keyword"> {
+ bit HasOwnParseRules = hasOwnParseRules;
+}
+
+// A keyword that can appear wherever a standard attribute can appear,
+// and that appertains to whatever a standard attribute would appertain to.
+// This is useful for things that affect semantics but that should otherwise
+// be treated like standard attributes.
+class RegularKeyword<string name> : Keyword<name, 0> {}
+
+// A keyword that has its own individual parsing rules.
+class CustomKeyword<string name> : Keyword<name, 1> {}
+
class Pragma<string namespace, string name> : Spelling<name, "Pragma"> {
string Namespace = namespace;
}
// The GCC spelling implies GNU<name>, CXX11<"gnu", name>, and optionally,
-// C2x<"gnu", name>. This spelling should be used for any GCC-compatible
+// C23<"gnu", name>. This spelling should be used for any GCC-compatible
// attributes.
class GCC<string name, bit allowInC = 1> : Spelling<name, "GCC"> {
bit AllowInC = allowInC;
}
// The Clang spelling implies GNU<name>, CXX11<"clang", name>, and optionally,
-// C2x<"clang", name>. This spelling should be used for any Clang-specific
+// C23<"clang", name>. This spelling should be used for any Clang-specific
// attributes.
-class Clang<string name, bit allowInC = 1> : Spelling<name, "Clang"> {
+class Clang<string name, bit allowInC = 1, int version = 1>
+ : Spelling<name, "Clang", version> {
bit AllowInC = allowInC;
}
+// HLSL Semantic spellings
+class HLSLSemantic<string name> : Spelling<name, "HLSLSemantic">;
+
class Accessor<string name, list<Spelling> spellings> {
string Name = name;
list<Spelling> Spellings = spellings;
@@ -336,6 +408,8 @@ def ObjCAutoRefCount : LangOpt<"ObjCAutoRefCount">;
def ObjCNonFragileRuntime
: LangOpt<"", "LangOpts.ObjCRuntime.allowsClassStubs()">;
+def HLSL : LangOpt<"HLSL">;
+
// Language option for CMSE extensions
def Cmse : LangOpt<"Cmse">;
@@ -361,10 +435,11 @@ class TargetArch<list<string> arches> : TargetSpec {
let Arches = arches;
}
def TargetARM : TargetArch<["arm", "thumb", "armeb", "thumbeb"]>;
-def TargetAArch64 : TargetArch<["aarch64"]>;
+def TargetAArch64 : TargetArch<["aarch64", "aarch64_be", "aarch64_32"]>;
def TargetAnyArm : TargetArch<!listconcat(TargetARM.Arches, TargetAArch64.Arches)>;
def TargetAVR : TargetArch<["avr"]>;
def TargetBPF : TargetArch<["bpfel", "bpfeb"]>;
+def TargetLoongArch : TargetArch<["loongarch32", "loongarch64"]>;
def TargetMips32 : TargetArch<["mips", "mipsel"]>;
def TargetAnyMips : TargetArch<["mips", "mipsel", "mips64", "mips64el"]>;
def TargetMSP430 : TargetArch<["msp430"]>;
@@ -373,6 +448,10 @@ def TargetRISCV : TargetArch<["riscv32", "riscv64"]>;
def TargetX86 : TargetArch<["x86"]>;
def TargetAnyX86 : TargetArch<["x86", "x86_64"]>;
def TargetWebAssembly : TargetArch<["wasm32", "wasm64"]>;
+def TargetNVPTX : TargetArch<["nvptx", "nvptx64"]>;
+def TargetWindows : TargetSpec {
+ let OSes = ["Win32"];
+}
def TargetHasDLLImportExport : TargetSpec {
let CustomCode = [{ Target.getTriple().hasDLLImportExport() }];
}
@@ -385,10 +464,19 @@ def TargetMicrosoftCXXABI : TargetArch<["x86", "x86_64", "arm", "thumb", "aarch6
def TargetELF : TargetSpec {
let ObjectFormats = ["ELF"];
}
+def TargetELFOrMachO : TargetSpec {
+ let ObjectFormats = ["ELF", "MachO"];
+}
def TargetSupportsInitPriority : TargetSpec {
let CustomCode = [{ !Target.getTriple().isOSzOS() }];
}
+
+class TargetSpecificSpelling<TargetSpec target, list<Spelling> spellings> {
+ TargetSpec Target = target;
+ list<Spelling> Spellings = spellings;
+}
+
// Attribute subject match rules that are used for #pragma clang attribute.
//
// A instance of AttrSubjectMatcherRule represents an individual match rule.
@@ -501,6 +589,12 @@ class AttrSubjectMatcherAggregateRule<AttrSubject subject> {
def SubjectMatcherForNamed : AttrSubjectMatcherAggregateRule<Named>;
class Attr {
+ // Specifies that when printed, this attribute is meaningful on the
+ // 'left side' of the declaration.
+ bit CanPrintOnLeft = 1;
+ // Specifies that when printed, this attribute is required to be printed on
+ // the 'left side' of the declaration.
+ bit PrintOnLeft = 0;
// The various ways in which an attribute can be spelled in source
list<Spelling> Spellings;
// The things to which an attribute can appertain
@@ -509,6 +603,8 @@ class Attr {
list<Argument> Args = [];
// Accessors which should be generated for the attribute.
list<Accessor> Accessors = [];
+ // Specify targets for spellings.
+ list<TargetSpecificSpelling> TargetSpecificSpellings = [];
// Set to true for attributes with arguments which require delayed parsing.
bit LateParsed = 0;
// Set to false to prevent an attribute from being propagated from a template
@@ -541,6 +637,8 @@ class Attr {
// match rules.
// - It has GNU/CXX11 spelling and doesn't require delayed parsing.
bit PragmaAttributeSupport;
+ // Set to true if this attribute accepts parameter pack expansion expressions.
+ bit AcceptsExprPack = 0;
// Lists language options, one of which is required to be true for the
// attribute to be applicable. If empty, no language options are required.
list<LangOpt> LangOpts = [];
@@ -580,6 +678,9 @@ class DeclOrTypeAttr : InheritableAttr;
/// A attribute is either a declaration attribute or a statement attribute.
class DeclOrStmtAttr : InheritableAttr;
+/// An attribute class for HLSL Annotations.
+class HLSLAnnotationAttr : InheritableAttr;
+
/// A target-specific attribute. This class is meant to be used as a mixin
/// with InheritableAttr or Attr depending on the attribute's needs.
class TargetSpecificAttr<TargetSpec target> {
@@ -614,7 +715,7 @@ class IgnoredAttr : Attr {
let Ignored = 1;
let ASTNode = 0;
let SemaHandler = 0;
- let Documentation = [Undocumented];
+ let Documentation = [InternalOnly];
}
//
@@ -644,7 +745,7 @@ def Alias : Attr {
def BuiltinAlias : Attr {
let Spellings = [CXX11<"clang", "builtin_alias">,
- C2x<"clang", "builtin_alias">,
+ C23<"clang", "builtin_alias">,
GNU<"clang_builtin_alias">];
let Args = [IdentifierArgument<"BuiltinName">];
let Subjects = SubjectList<[Function], ErrorDiag>;
@@ -659,13 +760,13 @@ def ArmBuiltinAlias : InheritableAttr, TargetSpecificAttr<TargetAnyArm> {
}
def Aligned : InheritableAttr {
- let Spellings = [GCC<"aligned">, Declspec<"align">, Keyword<"alignas">,
- Keyword<"_Alignas">];
+ let Spellings = [GCC<"aligned">, Declspec<"align">, CustomKeyword<"alignas">,
+ CustomKeyword<"_Alignas">];
let Args = [AlignedArgument<"Alignment", 1>];
let Accessors = [Accessor<"isGNU", [GCC<"aligned">]>,
- Accessor<"isC11", [Keyword<"_Alignas">]>,
- Accessor<"isAlignas", [Keyword<"alignas">,
- Keyword<"_Alignas">]>,
+ Accessor<"isC11", [CustomKeyword<"_Alignas">]>,
+ Accessor<"isAlignas", [CustomKeyword<"alignas">,
+ CustomKeyword<"_Alignas">]>,
Accessor<"isDeclspec",[Declspec<"align">]>];
let Documentation = [Undocumented];
}
@@ -694,19 +795,23 @@ def AlignMac68k : InheritableAttr {
// This attribute has no spellings as it is only ever created implicitly.
let Spellings = [];
let SemaHandler = 0;
- let Documentation = [Undocumented];
+ let Documentation = [InternalOnly];
}
def AlignNatural : InheritableAttr {
// This attribute has no spellings as it is only ever created implicitly.
let Spellings = [];
let SemaHandler = 0;
- let Documentation = [Undocumented];
+ let Documentation = [InternalOnly];
}
-def AlwaysInline : InheritableAttr {
- let Spellings = [GCC<"always_inline">, Keyword<"__forceinline">];
- let Subjects = SubjectList<[Function]>;
+def AlwaysInline : DeclOrStmtAttr {
+ let Spellings = [GCC<"always_inline">, CXX11<"clang", "always_inline">,
+ C23<"clang", "always_inline">, CustomKeyword<"__forceinline">];
+ let Accessors = [Accessor<"isClangAlwaysInline", [CXX11<"clang", "always_inline">,
+ C23<"clang", "always_inline">]>];
+ let Subjects = SubjectList<[Function, Stmt], WarnDiag,
+ "functions and statements">;
let Documentation = [AlwaysInlineDocs];
}
@@ -745,7 +850,8 @@ def XRayLogArgs : InheritableAttr {
def PatchableFunctionEntry
: InheritableAttr,
TargetSpecificAttr<TargetArch<
- ["aarch64", "aarch64_be", "riscv32", "riscv64", "x86", "x86_64"]>> {
+ ["aarch64", "aarch64_be", "loongarch32", "loongarch64", "riscv32",
+ "riscv64", "x86", "x86_64"]>> {
let Spellings = [GCC<"patchable_function_entry">];
let Subjects = SubjectList<[Function, ObjCMethod]>;
let Args = [UnsignedArgument<"Count">, DefaultIntArgument<"Offset", 0>];
@@ -779,14 +885,23 @@ def Annotate : InheritableParamAttr {
return AnnotateAttr::Create(Ctx, Annotation, nullptr, 0, CommonInfo);
}
static AnnotateAttr *CreateImplicit(ASTContext &Ctx, llvm::StringRef Annotation, \
- const AttributeCommonInfo &CommonInfo = {SourceRange{}}) {
+ const AttributeCommonInfo &CommonInfo) {
return AnnotateAttr::CreateImplicit(Ctx, Annotation, nullptr, 0, CommonInfo);
}
}];
let PragmaAttributeSupport = 1;
+ let AcceptsExprPack = 1;
let Documentation = [Undocumented];
}
+def AnnotateType : TypeAttr {
+ let Spellings = [CXX11<"clang", "annotate_type">, C23<"clang", "annotate_type">];
+ let Args = [StringArgument<"Annotation">, VariadicExprArgument<"Args">];
+ let HasCustomParsing = 1;
+ let AcceptsExprPack = 1;
+ let Documentation = [AnnotateTypeDocs];
+}
+
def ARMInterrupt : InheritableAttr, TargetSpecificAttr<TargetARM> {
// NOTE: If you add any additional spellings, M68kInterrupt's,
// MSP430Interrupt's, MipsInterrupt's and AnyX86Interrupt's spellings
@@ -815,7 +930,8 @@ def AVRSignal : InheritableAttr, TargetSpecificAttr<TargetAVR> {
}
def AsmLabel : InheritableAttr {
- let Spellings = [Keyword<"asm">, Keyword<"__asm__">];
+ let CanPrintOnLeft = 0;
+ let Spellings = [CustomKeyword<"asm">, CustomKeyword<"__asm__">];
let Args = [
// Label specifies the mangled name for the decl.
StringArgument<"Label">,
@@ -848,10 +964,12 @@ def Availability : InheritableAttr {
[{static llvm::StringRef getPrettyPlatformName(llvm::StringRef Platform) {
return llvm::StringSwitch<llvm::StringRef>(Platform)
.Case("android", "Android")
+ .Case("fuchsia", "Fuchsia")
.Case("ios", "iOS")
.Case("macos", "macOS")
.Case("tvos", "tvOS")
.Case("watchos", "watchOS")
+ .Case("driverkit", "DriverKit")
.Case("ios_app_extension", "iOS (App Extension)")
.Case("macos_app_extension", "macOS (App Extension)")
.Case("tvos_app_extension", "tvOS (App Extension)")
@@ -859,6 +977,8 @@ def Availability : InheritableAttr {
.Case("maccatalyst", "macCatalyst")
.Case("maccatalyst_app_extension", "macCatalyst (App Extension)")
.Case("swift", "Swift")
+ .Case("shadermodel", "HLSL ShaderModel")
+ .Case("ohos", "OpenHarmony OS")
.Default(llvm::StringRef());
}
static llvm::StringRef getPlatformNameSourceSpelling(llvm::StringRef Platform) {
@@ -874,6 +994,7 @@ static llvm::StringRef getPlatformNameSourceSpelling(llvm::StringRef Platform) {
.Case("maccatalyst", "macCatalyst")
.Case("maccatalyst_app_extension", "macCatalystApplicationExtension")
.Case("zos", "z/OS")
+ .Case("shadermodel", "ShaderModel")
.Default(Platform);
}
static llvm::StringRef canonicalizePlatformName(llvm::StringRef Platform) {
@@ -888,6 +1009,7 @@ static llvm::StringRef canonicalizePlatformName(llvm::StringRef Platform) {
.Case("watchOSApplicationExtension", "watchos_app_extension")
.Case("macCatalyst", "maccatalyst")
.Case("macCatalystApplicationExtension", "maccatalyst_app_extension")
+ .Case("ShaderModel", "shadermodel")
.Default(Platform);
} }];
let HasCustomParsing = 1;
@@ -897,10 +1019,12 @@ static llvm::StringRef canonicalizePlatformName(llvm::StringRef Platform) {
}
def ExternalSourceSymbol : InheritableAttr {
- let Spellings = [Clang<"external_source_symbol">];
+ let Spellings = [Clang<"external_source_symbol", /*allowInC=*/1,
+ /*version=*/20230206>];
let Args = [StringArgument<"language", 1>,
StringArgument<"definedIn", 1>,
- BoolArgument<"generatedDeclaration", 1>];
+ BoolArgument<"generatedDeclaration", 1>,
+ StringArgument<"USR", 1>];
let HasCustomParsing = 1;
let Subjects = SubjectList<[Named]>;
let Documentation = [ExternalSourceSymbolDocs];
@@ -925,7 +1049,7 @@ def CarriesDependency : InheritableParamAttr {
}
def CDecl : DeclOrTypeAttr {
- let Spellings = [GCC<"cdecl">, Keyword<"__cdecl">, Keyword<"_cdecl">];
+ let Spellings = [GCC<"cdecl">, CustomKeyword<"__cdecl">, CustomKeyword<"_cdecl">];
// let Subjects = [Function, ObjCMethod];
let Documentation = [Undocumented];
}
@@ -970,6 +1094,50 @@ def CFConsumed : InheritableParamAttr {
let Documentation = [RetainBehaviorDocs];
}
+
+// coro_only_destroy_when_complete indicates the coroutines whose return type
+// is marked by coro_only_destroy_when_complete can only be destroyed when the
+// coroutine completes. Then the space for the destroy functions can be saved.
+def CoroOnlyDestroyWhenComplete : InheritableAttr {
+ let Spellings = [Clang<"coro_only_destroy_when_complete">];
+ let Subjects = SubjectList<[CXXRecord]>;
+ let LangOpts = [CPlusPlus];
+ let Documentation = [CoroOnlyDestroyWhenCompleteDocs];
+ let SimpleHandler = 1;
+}
+
+def CoroReturnType : InheritableAttr {
+ let Spellings = [Clang<"coro_return_type">];
+ let Subjects = SubjectList<[CXXRecord]>;
+ let LangOpts = [CPlusPlus];
+ let Documentation = [CoroReturnTypeAndWrapperDoc];
+ let SimpleHandler = 1;
+}
+
+def CoroWrapper : InheritableAttr {
+ let Spellings = [Clang<"coro_wrapper">];
+ let Subjects = SubjectList<[Function]>;
+ let LangOpts = [CPlusPlus];
+ let Documentation = [CoroReturnTypeAndWrapperDoc];
+ let SimpleHandler = 1;
+}
+
+def CoroLifetimeBound : InheritableAttr {
+ let Spellings = [Clang<"coro_lifetimebound">];
+ let Subjects = SubjectList<[CXXRecord]>;
+ let LangOpts = [CPlusPlus];
+ let Documentation = [CoroLifetimeBoundDoc];
+ let SimpleHandler = 1;
+}
+
+def CoroDisableLifetimeBound : InheritableAttr {
+ let Spellings = [Clang<"coro_disable_lifetimebound">];
+ let Subjects = SubjectList<[Function]>;
+ let LangOpts = [CPlusPlus];
+ let Documentation = [CoroLifetimeBoundDoc];
+ let SimpleHandler = 1;
+}
+
// OSObject-based attributes.
def OSConsumed : InheritableParamAttr {
let Spellings = [Clang<"os_consumed">];
@@ -1012,7 +1180,7 @@ def Cleanup : InheritableAttr {
let Spellings = [GCC<"cleanup">];
let Args = [DeclArgument<Function, "FunctionDecl">];
let Subjects = SubjectList<[LocalVar]>;
- let Documentation = [Undocumented];
+ let Documentation = [CleanupDocs];
}
def CmseNSEntry : InheritableAttr, TargetSpecificAttr<TargetARM> {
@@ -1031,7 +1199,7 @@ def CmseNSCall : TypeAttr, TargetSpecificAttr<TargetARM> {
def Cold : InheritableAttr {
let Spellings = [GCC<"cold">];
let Subjects = SubjectList<[Function]>;
- let Documentation = [Undocumented];
+ let Documentation = [ColdFunctionEntryDocs];
let SimpleHandler = 1;
}
@@ -1050,10 +1218,10 @@ def Const : InheritableAttr {
def ConstInit : InheritableAttr {
// This attribute does not have a C [[]] spelling because it requires the
// CPlusPlus language option.
- let Spellings = [Keyword<"constinit">,
+ let Spellings = [CustomKeyword<"constinit">,
Clang<"require_constant_initialization", 0>];
let Subjects = SubjectList<[GlobalVar], ErrorDiag>;
- let Accessors = [Accessor<"isConstinit", [Keyword<"constinit">]>];
+ let Accessors = [Accessor<"isConstinit", [CustomKeyword<"constinit">]>];
let Documentation = [ConstInitDocs];
let LangOpts = [CPlusPlus];
let SimpleHandler = 1;
@@ -1063,7 +1231,7 @@ def Constructor : InheritableAttr {
let Spellings = [GCC<"constructor">];
let Args = [DefaultIntArgument<"Priority", 65535>];
let Subjects = SubjectList<[Function]>;
- let Documentation = [Undocumented];
+ let Documentation = [CtorDtorDocs];
}
def CPUSpecific : InheritableAttr {
@@ -1150,6 +1318,12 @@ def CUDAHost : InheritableAttr {
}
def : MutualExclusions<[CUDAGlobal, CUDAHost]>;
+def NVPTXKernel : InheritableAttr, TargetSpecificAttr<TargetNVPTX> {
+ let Spellings = [Clang<"nvptx_kernel">];
+ let Subjects = SubjectList<[Function]>;
+ let Documentation = [Undocumented];
+}
+
def HIPManaged : InheritableAttr {
let Spellings = [GNU<"managed">, Declspec<"__managed__">];
let Subjects = SubjectList<[Var]>;
@@ -1161,12 +1335,13 @@ def CUDAInvalidTarget : InheritableAttr {
let Spellings = [];
let Subjects = SubjectList<[Function]>;
let LangOpts = [CUDA];
- let Documentation = [Undocumented];
+ let Documentation = [InternalOnly];
}
def CUDALaunchBounds : InheritableAttr {
let Spellings = [GNU<"launch_bounds">, Declspec<"__launch_bounds__">];
- let Args = [ExprArgument<"MaxThreads">, ExprArgument<"MinBlocks", 1>];
+ let Args = [ExprArgument<"MaxThreads">, ExprArgument<"MinBlocks", 1>,
+ ExprArgument<"MaxBlocks", 1>];
let LangOpts = [CUDA];
let Subjects = SubjectList<[ObjCMethod, FunctionLike]>;
// An AST node is created for this attribute, but is not used by other parts
@@ -1190,24 +1365,31 @@ def SYCLKernel : InheritableAttr {
let Documentation = [SYCLKernelDocs];
}
+def SYCLSpecialClass: InheritableAttr {
+ let Spellings = [Clang<"sycl_special_class">];
+ let Subjects = SubjectList<[CXXRecord]>;
+ let LangOpts = [SYCL];
+ let Documentation = [SYCLSpecialClassDocs];
+}
+
def C11NoReturn : InheritableAttr {
- let Spellings = [Keyword<"_Noreturn">];
+ let Spellings = [CustomKeyword<"_Noreturn">];
let Subjects = SubjectList<[Function], ErrorDiag>;
let SemaHandler = 0;
let Documentation = [C11NoReturnDocs];
}
def CXX11NoReturn : InheritableAttr {
- let Spellings = [CXX11<"", "noreturn", 200809>];
+ let Spellings = [CXX11<"", "noreturn", 200809>,
+ C23<"", "noreturn", 202202>, C23<"", "_Noreturn", 202202>];
let Subjects = SubjectList<[Function], ErrorDiag>;
let Documentation = [CXX11NoReturnDocs];
- let SimpleHandler = 1;
}
// Similar to CUDA, OpenCL attributes do not receive a [[]] spelling because
// the specification does not expose them with one currently.
def OpenCLKernel : InheritableAttr {
- let Spellings = [Keyword<"__kernel">, Keyword<"kernel">];
+ let Spellings = [CustomKeyword<"__kernel">, CustomKeyword<"kernel">];
let Subjects = SubjectList<[Function], ErrorDiag>;
let Documentation = [Undocumented];
let SimpleHandler = 1;
@@ -1231,26 +1413,28 @@ def OpenCLIntelReqdSubGroupSize: InheritableAttr {
// This attribute is both a type attribute, and a declaration attribute (for
// parameter variables).
def OpenCLAccess : Attr {
- let Spellings = [Keyword<"__read_only">, Keyword<"read_only">,
- Keyword<"__write_only">, Keyword<"write_only">,
- Keyword<"__read_write">, Keyword<"read_write">];
+ let Spellings = [CustomKeyword<"__read_only">, CustomKeyword<"read_only">,
+ CustomKeyword<"__write_only">, CustomKeyword<"write_only">,
+ CustomKeyword<"__read_write">, CustomKeyword<"read_write">];
let Subjects = SubjectList<[ParmVar, TypedefName], ErrorDiag>;
- let Accessors = [Accessor<"isReadOnly", [Keyword<"__read_only">,
- Keyword<"read_only">]>,
- Accessor<"isReadWrite", [Keyword<"__read_write">,
- Keyword<"read_write">]>,
- Accessor<"isWriteOnly", [Keyword<"__write_only">,
- Keyword<"write_only">]>];
+ let Accessors = [Accessor<"isReadOnly", [CustomKeyword<"__read_only">,
+ CustomKeyword<"read_only">]>,
+ Accessor<"isReadWrite", [CustomKeyword<"__read_write">,
+ CustomKeyword<"read_write">]>,
+ Accessor<"isWriteOnly", [CustomKeyword<"__write_only">,
+ CustomKeyword<"write_only">]>];
let Documentation = [OpenCLAccessDocs];
}
def OpenCLPrivateAddressSpace : TypeAttr {
- let Spellings = [Keyword<"__private">, Keyword<"private">, Clang<"opencl_private">];
+ let Spellings = [CustomKeyword<"__private">, CustomKeyword<"private">,
+ Clang<"opencl_private">];
let Documentation = [OpenCLAddressSpacePrivateDocs];
}
def OpenCLGlobalAddressSpace : TypeAttr {
- let Spellings = [Keyword<"__global">, Keyword<"global">, Clang<"opencl_global">];
+ let Spellings = [CustomKeyword<"__global">, CustomKeyword<"global">,
+ Clang<"opencl_global">];
let Documentation = [OpenCLAddressSpaceGlobalDocs];
}
@@ -1265,17 +1449,20 @@ def OpenCLGlobalHostAddressSpace : TypeAttr {
}
def OpenCLLocalAddressSpace : TypeAttr {
- let Spellings = [Keyword<"__local">, Keyword<"local">, Clang<"opencl_local">];
+ let Spellings = [CustomKeyword<"__local">, CustomKeyword<"local">,
+ Clang<"opencl_local">];
let Documentation = [OpenCLAddressSpaceLocalDocs];
}
def OpenCLConstantAddressSpace : TypeAttr {
- let Spellings = [Keyword<"__constant">, Keyword<"constant">, Clang<"opencl_constant">];
+ let Spellings = [CustomKeyword<"__constant">, CustomKeyword<"constant">,
+ Clang<"opencl_constant">];
let Documentation = [OpenCLAddressSpaceConstantDocs];
}
def OpenCLGenericAddressSpace : TypeAttr {
- let Spellings = [Keyword<"__generic">, Keyword<"generic">, Clang<"opencl_generic">];
+ let Spellings = [CustomKeyword<"__generic">, CustomKeyword<"generic">,
+ Clang<"opencl_generic">];
let Documentation = [OpenCLAddressSpaceGenericDocs];
}
@@ -1298,7 +1485,7 @@ def RenderScriptKernel : Attr {
def Deprecated : InheritableAttr {
let Spellings = [GCC<"deprecated">, Declspec<"deprecated">,
CXX11<"","deprecated", 201309>,
- C2x<"", "deprecated", 201904>];
+ C23<"", "deprecated", 201904>];
let Args = [StringArgument<"Message", 1>,
// An optional string argument that enables us to provide a
// Fix-It.
@@ -1311,7 +1498,7 @@ def Destructor : InheritableAttr {
let Spellings = [GCC<"destructor">];
let Args = [DefaultIntArgument<"Priority", 65535>];
let Subjects = SubjectList<[Function]>;
- let Documentation = [Undocumented];
+ let Documentation = [CtorDtorDocs];
}
def EmptyBases : InheritableAttr, TargetSpecificAttr<TargetMicrosoftCXXABI> {
@@ -1331,6 +1518,7 @@ def AllocSize : InheritableAttr {
}
def EnableIf : InheritableAttr {
+ let CanPrintOnLeft = 0;
// Does not have a [[]] spelling because this attribute requires the ability
// to parse function arguments but the attribute is not written in the type
// position.
@@ -1356,7 +1544,7 @@ def ExtVectorType : Attr {
def FallThrough : StmtAttr {
let Spellings = [CXX11<"", "fallthrough", 201603>,
- C2x<"", "fallthrough", 201904>,
+ C23<"", "fallthrough", 201910>,
CXX11<"clang", "fallthrough">, GCC<"fallthrough">];
// The attribute only applies to a NullStmt, but we have special fix-it
// behavior if applied to a case label.
@@ -1366,12 +1554,12 @@ def FallThrough : StmtAttr {
}
def Likely : StmtAttr {
- let Spellings = [CXX11<"", "likely", 201803>, C2x<"clang", "likely">];
+ let Spellings = [CXX11<"", "likely", 201803>, C23<"clang", "likely">];
let Documentation = [LikelihoodDocs];
}
def Unlikely : StmtAttr {
- let Spellings = [CXX11<"", "unlikely", 201803>, C2x<"clang", "unlikely">];
+ let Spellings = [CXX11<"", "unlikely", 201803>, C23<"clang", "unlikely">];
let Documentation = [LikelihoodDocs];
}
def : MutualExclusions<[Likely, Unlikely]>;
@@ -1379,10 +1567,8 @@ def : MutualExclusions<[Likely, Unlikely]>;
def NoMerge : DeclOrStmtAttr {
let Spellings = [Clang<"nomerge">];
let Documentation = [NoMergeDocs];
- let InheritEvenIfAlreadyPresent = 1;
- let Subjects = SubjectList<[Function, Stmt], ErrorDiag,
- "functions and statements">;
- let SimpleHandler = 1;
+ let Subjects = SubjectList<[Function, Stmt, Var], ErrorDiag,
+ "functions, statements and variables">;
}
def MustTail : StmtAttr {
@@ -1392,28 +1578,31 @@ def MustTail : StmtAttr {
}
def FastCall : DeclOrTypeAttr {
- let Spellings = [GCC<"fastcall">, Keyword<"__fastcall">,
- Keyword<"_fastcall">];
+ let Spellings = [GCC<"fastcall">, CustomKeyword<"__fastcall">,
+ CustomKeyword<"_fastcall">];
// let Subjects = [Function, ObjCMethod];
let Documentation = [FastCallDocs];
}
def RegCall : DeclOrTypeAttr {
- let Spellings = [GCC<"regcall">, Keyword<"__regcall">];
+ let Spellings = [GCC<"regcall">, CustomKeyword<"__regcall">];
let Documentation = [RegCallDocs];
}
def Final : InheritableAttr {
- let Spellings = [Keyword<"final">, Keyword<"sealed">];
- let Accessors = [Accessor<"isSpelledAsSealed", [Keyword<"sealed">]>];
+ let CanPrintOnLeft = 0;
+ let Spellings = [CustomKeyword<"final">, CustomKeyword<"sealed">];
+ let Accessors = [Accessor<"isSpelledAsSealed", [CustomKeyword<"sealed">]>];
let SemaHandler = 0;
- let Documentation = [Undocumented];
+ // Omitted from docs, since this is language syntax, not an attribute, as far
+ // as users are concerned.
+ let Documentation = [InternalOnly];
}
def MinSize : InheritableAttr {
let Spellings = [Clang<"minsize">];
let Subjects = SubjectList<[Function, ObjCMethod], ErrorDiag>;
- let Documentation = [Undocumented];
+ let Documentation = [MinSizeDocs];
}
def FlagEnum : InheritableAttr {
@@ -1469,9 +1658,7 @@ def GNUInline : InheritableAttr {
def Hot : InheritableAttr {
let Spellings = [GCC<"hot">];
let Subjects = SubjectList<[Function]>;
- // An AST node is created for this attribute, but not actually used beyond
- // semantic checking for mutual exclusion with the Cold attribute.
- let Documentation = [Undocumented];
+ let Documentation = [HotFunctionEntryDocs];
let SimpleHandler = 1;
}
def : MutualExclusions<[Hot, Cold]>;
@@ -1499,7 +1686,7 @@ def IBOutletCollection : InheritableAttr {
let Documentation = [Undocumented];
}
-def IFunc : Attr, TargetSpecificAttr<TargetELF> {
+def IFunc : Attr, TargetSpecificAttr<TargetELFOrMachO> {
let Spellings = [GCC<"ifunc">];
let Args = [StringArgument<"Resolver">];
let Subjects = SubjectList<[Function]>;
@@ -1549,7 +1736,7 @@ def MaxFieldAlignment : InheritableAttr {
let Spellings = [];
let Args = [UnsignedArgument<"Alignment">];
let SemaHandler = 0;
- let Documentation = [Undocumented];
+ let Documentation = [InternalOnly];
}
def MayAlias : InheritableAttr {
@@ -1686,11 +1873,14 @@ def ArmMveStrictPolymorphism : TypeAttr, TargetSpecificAttr<TargetARM> {
let Documentation = [ArmMveStrictPolymorphismDocs];
}
-def NoUniqueAddress : InheritableAttr, TargetSpecificAttr<TargetItaniumCXXABI> {
- let Spellings = [CXX11<"", "no_unique_address", 201803>];
+def NoUniqueAddress : InheritableAttr {
let Subjects = SubjectList<[NonBitField], ErrorDiag>;
+ let Spellings = [CXX11<"", "no_unique_address", 201803>, CXX11<"msvc", "no_unique_address", 201803>];
+ let TargetSpecificSpellings = [
+ TargetSpecificSpelling<TargetItaniumCXXABI, [CXX11<"", "no_unique_address", 201803>]>,
+ TargetSpecificSpelling<TargetMicrosoftCXXABI, [CXX11<"msvc", "no_unique_address", 201803>]>,
+ ];
let Documentation = [NoUniqueAddressDocs];
- let SimpleHandler = 1;
}
def ReturnsTwice : InheritableAttr {
@@ -1750,10 +1940,15 @@ def Convergent : InheritableAttr {
let SimpleHandler = 1;
}
-def NoInline : InheritableAttr {
- let Spellings = [GCC<"noinline">, Declspec<"noinline">];
- let Subjects = SubjectList<[Function]>;
- let Documentation = [Undocumented];
+def NoInline : DeclOrStmtAttr {
+ let Spellings = [CustomKeyword<"__noinline__">, GCC<"noinline">,
+ CXX11<"clang", "noinline">, C23<"clang", "noinline">,
+ Declspec<"noinline">];
+ let Accessors = [Accessor<"isClangNoInline", [CXX11<"clang", "noinline">,
+ C23<"clang", "noinline">]>];
+ let Documentation = [NoInlineDocs];
+ let Subjects = SubjectList<[Function, Stmt], WarnDiag,
+ "functions and statements">;
let SimpleHandler = 1;
}
@@ -1775,13 +1970,23 @@ def RISCVInterrupt : InheritableAttr, TargetSpecificAttr<TargetRISCV> {
let Spellings = [GCC<"interrupt">];
let Subjects = SubjectList<[Function]>;
let Args = [EnumArgument<"Interrupt", "InterruptType",
- ["user", "supervisor", "machine"],
- ["user", "supervisor", "machine"],
+ ["supervisor", "machine"],
+ ["supervisor", "machine"],
1>];
let ParseKind = "Interrupt";
let Documentation = [RISCVInterruptDocs];
}
+def RISCVRVVVectorBits : TypeAttr {
+ let Spellings = [GNU<"riscv_rvv_vector_bits">];
+ let Subjects = SubjectList<[TypedefName], ErrorDiag>;
+ let Args = [UnsignedArgument<"NumBits">];
+ let Documentation = [RISCVRVVVectorBitsDocs];
+ let PragmaAttributeSupport = 0;
+ // Represented as VectorType instead.
+ let ASTNode = 0;
+}
+
// This is not a TargetSpecificAttr so that is silently accepted and
// ignored on other targets as encouraged by the OpenCL spec.
//
@@ -1827,6 +2032,11 @@ def AMDGPUNumVGPR : InheritableAttr {
let Subjects = SubjectList<[Function], ErrorDiag, "kernel functions">;
}
+def AMDGPUKernelCall : DeclOrTypeAttr {
+ let Spellings = [Clang<"amdgpu_kernel">];
+ let Documentation = [Undocumented];
+}
+
def BPFPreserveAccessIndex : InheritableAttr,
TargetSpecificAttr<TargetBPF> {
let Spellings = [Clang<"preserve_access_index">];
@@ -1835,6 +2045,30 @@ def BPFPreserveAccessIndex : InheritableAttr,
let LangOpts = [COnly];
}
+def BPFPreserveStaticOffset : InheritableAttr,
+ TargetSpecificAttr<TargetBPF> {
+ let Spellings = [Clang<"preserve_static_offset">];
+ let Subjects = SubjectList<[Record], ErrorDiag>;
+ let Documentation = [BPFPreserveStaticOffsetDocs];
+ let LangOpts = [COnly];
+}
+
+def BTFDeclTag : InheritableAttr {
+ let Spellings = [Clang<"btf_decl_tag">];
+ let Args = [StringArgument<"BTFDeclTag">];
+ let Subjects = SubjectList<[Var, Function, Record, Field, TypedefName],
+ ErrorDiag>;
+ let Documentation = [BTFDeclTagDocs];
+ let LangOpts = [COnly];
+}
+
+def BTFTypeTag : TypeAttr {
+ let Spellings = [Clang<"btf_type_tag">];
+ let Args = [StringArgument<"BTFTypeTag">];
+ let Documentation = [BTFTypeTagDocs];
+ let LangOpts = [COnly];
+}
+
def WebAssemblyExportName : InheritableAttr,
TargetSpecificAttr<TargetWebAssembly> {
let Spellings = [Clang<"export_name">];
@@ -1875,9 +2109,9 @@ def NonNull : InheritableParamAttr {
bool isNonNull(unsigned IdxAST) const {
if (!args_size())
return true;
- return args_end() != std::find_if(
- args_begin(), args_end(),
- [=](const ParamIdx &Idx) { return Idx.getASTIndex() == IdxAST; });
+ return llvm::any_of(args(), [=](const ParamIdx &Idx) {
+ return Idx.getASTIndex() == IdxAST;
+ });
}
}];
// FIXME: We should merge duplicates into a single nonnull attribute.
@@ -1911,22 +2145,22 @@ def PassObjectSize : InheritableParamAttr {
// Nullability type attributes.
def TypeNonNull : TypeAttr {
- let Spellings = [Keyword<"_Nonnull">];
+ let Spellings = [CustomKeyword<"_Nonnull">];
let Documentation = [TypeNonNullDocs];
}
def TypeNullable : TypeAttr {
- let Spellings = [Keyword<"_Nullable">];
+ let Spellings = [CustomKeyword<"_Nullable">];
let Documentation = [TypeNullableDocs];
}
def TypeNullableResult : TypeAttr {
- let Spellings = [Keyword<"_Nullable_result">];
+ let Spellings = [CustomKeyword<"_Nullable_result">];
let Documentation = [TypeNullableResultDocs];
}
def TypeNullUnspecified : TypeAttr {
- let Spellings = [Keyword<"_Null_unspecified">];
+ let Spellings = [CustomKeyword<"_Null_unspecified">];
let Documentation = [TypeNullUnspecifiedDocs];
}
@@ -1934,12 +2168,12 @@ def TypeNullUnspecified : TypeAttr {
// ignored because ARC is not enabled. The usual representation for this
// qualifier is as an ObjCOwnership attribute with Kind == "none".
def ObjCInertUnsafeUnretained : TypeAttr {
- let Spellings = [Keyword<"__unsafe_unretained">];
- let Documentation = [Undocumented];
+ let Spellings = [CustomKeyword<"__unsafe_unretained">];
+ let Documentation = [InternalOnly];
}
def ObjCKindOf : TypeAttr {
- let Spellings = [Keyword<"__kindof">];
+ let Spellings = [CustomKeyword<"__kindof">];
let Documentation = [Undocumented];
}
@@ -1949,6 +2183,13 @@ def NoEscape : Attr {
let Documentation = [NoEscapeDocs];
}
+def MaybeUndef : InheritableAttr {
+ let Spellings = [Clang<"maybe_undef">];
+ let Subjects = SubjectList<[ParmVar]>;
+ let Documentation = [MaybeUndefDocs];
+ let SimpleHandler = 1;
+}
+
def AssumeAligned : InheritableAttr {
let Spellings = [GCC<"assume_aligned">];
let Subjects = SubjectList<[ObjCMethod, Function]>;
@@ -1971,7 +2212,7 @@ def NoReturn : InheritableAttr {
def NoInstrumentFunction : InheritableAttr {
let Spellings = [GCC<"no_instrument_function">];
- let Subjects = SubjectList<[Function]>;
+ let Subjects = SubjectList<[Function, ObjCMethod]>;
let Documentation = [Undocumented];
let SimpleHandler = 1;
}
@@ -1992,18 +2233,33 @@ def NotTailCalled : InheritableAttr {
def : MutualExclusions<[AlwaysInline, NotTailCalled]>;
def NoStackProtector : InheritableAttr {
- let Spellings = [Clang<"no_stack_protector">];
+ let Spellings = [Clang<"no_stack_protector">, CXX11<"gnu", "no_stack_protector">,
+ C23<"gnu", "no_stack_protector">, Declspec<"safebuffers">];
let Subjects = SubjectList<[Function]>;
let Documentation = [NoStackProtectorDocs];
let SimpleHandler = 1;
}
+def StrictGuardStackCheck : InheritableAttr {
+ let Spellings = [Declspec<"strict_gs_check">];
+ let Subjects = SubjectList<[Function]>;
+ let Documentation = [StrictGuardStackCheckDocs];
+ let SimpleHandler = 1;
+}
+
def NoThrow : InheritableAttr {
let Spellings = [GCC<"nothrow">, Declspec<"nothrow">];
let Subjects = SubjectList<[FunctionLike]>;
let Documentation = [NoThrowDocs];
}
+def NoUwtable : InheritableAttr {
+ let Spellings = [Clang<"nouwtable">];
+ let Subjects = SubjectList<[FunctionLike]>;
+ let Documentation = [NoUwtableDocs];
+ let SimpleHandler = 1;
+}
+
def NvWeak : IgnoredAttr {
// No Declspec spelling of this attribute; the CUDA headers use
// __attribute__((nv_weak)) unconditionally. Does not receive an [[]]
@@ -2039,7 +2295,7 @@ def ObjCBridgeRelated : InheritableAttr {
def NSErrorDomain : InheritableAttr {
let Spellings = [GNU<"ns_error_domain">];
let Subjects = SubjectList<[Enum], ErrorDiag>;
- let Args = [DeclArgument<Var, "ErrorDomain">];
+ let Args = [IdentifierArgument<"ErrorDomain">];
let Documentation = [NSErrorDomainDocs];
}
@@ -2217,9 +2473,12 @@ def Overloadable : Attr {
}
def Override : InheritableAttr {
- let Spellings = [Keyword<"override">];
+ let CanPrintOnLeft = 0;
+ let Spellings = [CustomKeyword<"override">];
let SemaHandler = 0;
- let Documentation = [Undocumented];
+ // Omitted from docs, since this is language syntax, not an attribute, as far
+ // as users are concerned.
+ let Documentation = [InternalOnly];
}
def Ownership : InheritableAttr {
@@ -2268,6 +2527,74 @@ def AArch64VectorPcs: DeclOrTypeAttr {
let Documentation = [AArch64VectorPcsDocs];
}
+def AArch64SVEPcs: DeclOrTypeAttr {
+ let Spellings = [Clang<"aarch64_sve_pcs">];
+ let Documentation = [AArch64SVEPcsDocs];
+}
+
+def ArmStreaming : TypeAttr, TargetSpecificAttr<TargetAArch64> {
+ let Spellings = [RegularKeyword<"__arm_streaming">];
+ let Subjects = SubjectList<[HasFunctionProto], ErrorDiag>;
+ let Documentation = [ArmSmeStreamingDocs];
+}
+
+def ArmStreamingCompatible : TypeAttr, TargetSpecificAttr<TargetAArch64> {
+ let Spellings = [RegularKeyword<"__arm_streaming_compatible">];
+ let Subjects = SubjectList<[HasFunctionProto], ErrorDiag>;
+ let Documentation = [ArmSmeStreamingCompatibleDocs];
+}
+
+def ArmNew : InheritableAttr, TargetSpecificAttr<TargetAArch64> {
+ let Spellings = [RegularKeyword<"__arm_new">];
+ let Args = [VariadicStringArgument<"NewArgs">];
+ let Subjects = SubjectList<[Function], ErrorDiag>;
+ let Documentation = [ArmNewDocs];
+
+ let AdditionalMembers = [{
+ bool isNewZA() const {
+ return llvm::is_contained(newArgs(), "za");
+ }
+ bool isNewZT0() const {
+ return llvm::is_contained(newArgs(), "zt0");
+ }
+ }];
+}
+
+def ArmIn : TypeAttr, TargetSpecificAttr<TargetAArch64> {
+ let Spellings = [RegularKeyword<"__arm_in">];
+ let Args = [VariadicStringArgument<"InArgs">];
+ let Subjects = SubjectList<[HasFunctionProto], ErrorDiag>;
+ let Documentation = [ArmInDocs];
+}
+
+def ArmOut : TypeAttr, TargetSpecificAttr<TargetAArch64> {
+ let Spellings = [RegularKeyword<"__arm_out">];
+ let Args = [VariadicStringArgument<"OutArgs">];
+ let Subjects = SubjectList<[HasFunctionProto], ErrorDiag>;
+ let Documentation = [ArmOutDocs];
+}
+
+def ArmInOut : TypeAttr, TargetSpecificAttr<TargetAArch64> {
+ let Spellings = [RegularKeyword<"__arm_inout">];
+ let Args = [VariadicStringArgument<"InOutArgs">];
+ let Subjects = SubjectList<[HasFunctionProto], ErrorDiag>;
+ let Documentation = [ArmInOutDocs];
+}
+
+def ArmPreserves : TypeAttr, TargetSpecificAttr<TargetAArch64> {
+ let Spellings = [RegularKeyword<"__arm_preserves">];
+ let Args = [VariadicStringArgument<"PreserveArgs">];
+ let Subjects = SubjectList<[HasFunctionProto], ErrorDiag>;
+ let Documentation = [ArmPreservesDocs];
+}
+
+def ArmLocallyStreaming : InheritableAttr, TargetSpecificAttr<TargetAArch64> {
+ let Spellings = [RegularKeyword<"__arm_locally_streaming">];
+ let Subjects = SubjectList<[Function], ErrorDiag>;
+ let Documentation = [ArmSmeLocallyStreamingDocs];
+}
+
+
def Pure : InheritableAttr {
let Spellings = [GCC<"pure">];
let Documentation = [Undocumented];
@@ -2293,6 +2620,7 @@ def SwiftAttr : InheritableAttr {
let Spellings = [GNU<"swift_attr">];
let Args = [StringArgument<"Attribute">];
let Documentation = [SwiftAttrDocs];
+ let PragmaAttributeSupport = 1;
}
def SwiftBridge : InheritableAttr {
@@ -2328,6 +2656,22 @@ def SwiftError : InheritableAttr {
let Documentation = [SwiftErrorDocs];
}
+def SwiftImportAsNonGeneric : InheritableAttr {
+ // This attribute has no spellings as it is only ever created implicitly
+ // from API notes.
+ let Spellings = [];
+ let SemaHandler = 0;
+ let Documentation = [InternalOnly];
+}
+
+def SwiftImportPropertyAsAccessors : InheritableAttr {
+ // This attribute has no spellings as it is only ever created implicitly
+ // from API notes.
+ let Spellings = [];
+ let SemaHandler = 0;
+ let Documentation = [InternalOnly];
+}
+
def SwiftName : InheritableAttr {
let Spellings = [GNU<"swift_name">];
let Args = [StringArgument<"Name">];
@@ -2349,6 +2693,31 @@ def SwiftPrivate : InheritableAttr {
let SimpleHandler = 1;
}
+def SwiftVersionedAddition : Attr {
+ // This attribute has no spellings as it is only ever created implicitly
+ // from API notes.
+ let Spellings = [];
+ let Args = [VersionArgument<"Version">, WrappedAttr<"AdditionalAttr">,
+ BoolArgument<"IsReplacedByActive">];
+ let SemaHandler = 0;
+ let Documentation = [InternalOnly];
+}
+
+def SwiftVersionedRemoval : Attr {
+ // This attribute has no spellings as it is only ever created implicitly
+ // from API notes.
+ let Spellings = [];
+ let Args = [VersionArgument<"Version">, UnsignedArgument<"RawKind">,
+ BoolArgument<"IsReplacedByActive">];
+ let SemaHandler = 0;
+ let Documentation = [InternalOnly];
+ let AdditionalMembers = [{
+ attr::Kind getAttrKindToRemove() const {
+ return static_cast<attr::Kind>(getRawKind());
+ }
+ }];
+}
+
def NoDeref : TypeAttr {
let Spellings = [Clang<"noderef">];
let Documentation = [NoDerefDocs];
@@ -2402,7 +2771,7 @@ def PragmaClangBSSSection : InheritableAttr {
let Spellings = [];
let Args = [StringArgument<"Name">];
let Subjects = SubjectList<[GlobalVar], ErrorDiag>;
- let Documentation = [Undocumented];
+ let Documentation = [InternalOnly];
}
def PragmaClangDataSection : InheritableAttr {
@@ -2410,7 +2779,7 @@ def PragmaClangDataSection : InheritableAttr {
let Spellings = [];
let Args = [StringArgument<"Name">];
let Subjects = SubjectList<[GlobalVar], ErrorDiag>;
- let Documentation = [Undocumented];
+ let Documentation = [InternalOnly];
}
def PragmaClangRodataSection : InheritableAttr {
@@ -2418,7 +2787,7 @@ def PragmaClangRodataSection : InheritableAttr {
let Spellings = [];
let Args = [StringArgument<"Name">];
let Subjects = SubjectList<[GlobalVar], ErrorDiag>;
- let Documentation = [Undocumented];
+ let Documentation = [InternalOnly];
}
def PragmaClangRelroSection : InheritableAttr {
@@ -2426,7 +2795,7 @@ def PragmaClangRelroSection : InheritableAttr {
let Spellings = [];
let Args = [StringArgument<"Name">];
let Subjects = SubjectList<[GlobalVar], ErrorDiag>;
- let Documentation = [Undocumented];
+ let Documentation = [InternalOnly];
}
def StrictFP : InheritableAttr {
@@ -2434,7 +2803,7 @@ def StrictFP : InheritableAttr {
// Function uses strict floating point operations.
let Spellings = [];
let Subjects = SubjectList<[Function]>;
- let Documentation = [Undocumented];
+ let Documentation = [InternalOnly];
}
def PragmaClangTextSection : InheritableAttr {
@@ -2442,7 +2811,16 @@ def PragmaClangTextSection : InheritableAttr {
let Spellings = [];
let Args = [StringArgument<"Name">];
let Subjects = SubjectList<[Function], ErrorDiag>;
- let Documentation = [Undocumented];
+ let Documentation = [InternalOnly];
+}
+
+def CodeModel : InheritableAttr, TargetSpecificAttr<TargetLoongArch> {
+ let Spellings = [GCC<"model">];
+ let Args = [EnumArgument<"Model", "llvm::CodeModel::Model",
+ ["normal", "medium", "extreme"], ["Small", "Medium", "Large"],
+ /*opt=*/0, /*fake=*/0, /*isExternalType=*/1>];
+ let Subjects = SubjectList<[NonTLSGlobalVar], ErrorDiag>;
+ let Documentation = [CodeModelDocs];
}
def Sentinel : InheritableAttr {
@@ -2454,7 +2832,8 @@ def Sentinel : InheritableAttr {
}
def StdCall : DeclOrTypeAttr {
- let Spellings = [GCC<"stdcall">, Keyword<"__stdcall">, Keyword<"_stdcall">];
+ let Spellings = [GCC<"stdcall">, CustomKeyword<"__stdcall">,
+ CustomKeyword<"_stdcall">];
// let Subjects = [Function, ObjCMethod];
let Documentation = [StdCallDocs];
}
@@ -2510,9 +2889,10 @@ def SwiftAsyncError : InheritableAttr {
let Documentation = [SwiftAsyncErrorDocs];
}
-def Suppress : StmtAttr {
- let Spellings = [CXX11<"gsl", "suppress">];
+def Suppress : DeclOrStmtAttr {
+ let Spellings = [CXX11<"gsl", "suppress">, Clang<"suppress">];
let Args = [VariadicStringArgument<"DiagnosticIdentifiers">];
+ let Accessors = [Accessor<"isGSL", [CXX11<"gsl", "suppress">]>];
let Documentation = [SuppressDocs];
}
@@ -2523,21 +2903,35 @@ def SysVABI : DeclOrTypeAttr {
}
def ThisCall : DeclOrTypeAttr {
- let Spellings = [GCC<"thiscall">, Keyword<"__thiscall">,
- Keyword<"_thiscall">];
+ let Spellings = [GCC<"thiscall">, CustomKeyword<"__thiscall">,
+ CustomKeyword<"_thiscall">];
// let Subjects = [Function, ObjCMethod];
let Documentation = [ThisCallDocs];
}
def VectorCall : DeclOrTypeAttr {
- let Spellings = [Clang<"vectorcall">, Keyword<"__vectorcall">,
- Keyword<"_vectorcall">];
+ let Spellings = [Clang<"vectorcall">, CustomKeyword<"__vectorcall">,
+ CustomKeyword<"_vectorcall">];
// let Subjects = [Function, ObjCMethod];
let Documentation = [VectorCallDocs];
}
+def ZeroCallUsedRegs : InheritableAttr {
+ let Spellings = [GCC<"zero_call_used_regs">];
+ let Subjects = SubjectList<[Function], ErrorDiag>;
+ let Args = [
+ EnumArgument<"ZeroCallUsedRegs", "ZeroCallUsedRegsKind",
+ ["skip", "used-gpr-arg", "used-gpr", "used-arg", "used",
+ "all-gpr-arg", "all-gpr", "all-arg", "all"],
+ ["Skip", "UsedGPRArg", "UsedGPR", "UsedArg", "Used",
+ "AllGPRArg", "AllGPR", "AllArg", "All"]>
+ ];
+ let Documentation = [ZeroCallUsedRegsDocs];
+}
+
def Pascal : DeclOrTypeAttr {
- let Spellings = [Clang<"pascal">, Keyword<"__pascal">, Keyword<"_pascal">];
+ let Spellings = [Clang<"pascal">, CustomKeyword<"__pascal">,
+ CustomKeyword<"_pascal">];
// let Subjects = [Function, ObjCMethod];
let Documentation = [Undocumented];
}
@@ -2562,16 +2956,17 @@ def PreserveAll : DeclOrTypeAttr {
let Documentation = [PreserveAllDocs];
}
+def M68kRTD: DeclOrTypeAttr {
+ let Spellings = [Clang<"m68k_rtd">];
+ let Documentation = [M68kRTDDocs];
+}
+
def Target : InheritableAttr {
let Spellings = [GCC<"target">];
let Args = [StringArgument<"featuresStr">];
let Subjects = SubjectList<[Function], ErrorDiag>;
let Documentation = [TargetDocs];
let AdditionalMembers = [{
- ParsedTargetAttr parse() const {
- return parse(getFeaturesStr());
- }
-
StringRef getArchitecture() const {
StringRef Features = getFeaturesStr();
if (Features == "default") return {};
@@ -2581,7 +2976,7 @@ def Target : InheritableAttr {
for (auto &Feature : AttrFeatures) {
Feature = Feature.trim();
- if (Feature.startswith("arch="))
+ if (Feature.starts_with("arch="))
return Feature.drop_front(sizeof("arch=") - 1);
}
return "";
@@ -2599,66 +2994,81 @@ def Target : InheritableAttr {
for (auto &Feature : AttrFeatures) {
Feature = Feature.trim();
- if (!Feature.startswith("no-") && !Feature.startswith("arch=") &&
- !Feature.startswith("fpmath=") && !Feature.startswith("tune="))
+ if (!Feature.starts_with("no-") && !Feature.starts_with("arch=") &&
+ !Feature.starts_with("fpmath=") && !Feature.starts_with("tune="))
Out.push_back(Feature);
}
}
- template<class Compare>
- ParsedTargetAttr parse(Compare cmp) const {
- ParsedTargetAttr Attrs = parse();
- llvm::sort(std::begin(Attrs.Features), std::end(Attrs.Features), cmp);
- return Attrs;
- }
-
bool isDefaultVersion() const { return getFeaturesStr() == "default"; }
+ }];
+}
- static ParsedTargetAttr parse(StringRef Features) {
- ParsedTargetAttr Ret;
- if (Features == "default") return Ret;
- SmallVector<StringRef, 1> AttrFeatures;
- Features.split(AttrFeatures, ",");
+def TargetVersion : InheritableAttr {
+ let Spellings = [GCC<"target_version">];
+ let Args = [StringArgument<"NamesStr">];
+ let Subjects = SubjectList<[Function], ErrorDiag>;
+ let Documentation = [TargetVersionDocs];
+ let AdditionalMembers = [{
+ StringRef getName() const { return getNamesStr().trim(); }
+ bool isDefaultVersion() const {
+ return getName() == "default";
+ }
+ void getFeatures(llvm::SmallVectorImpl<StringRef> &Out) const {
+ if (isDefaultVersion()) return;
+ StringRef Features = getName();
+
+ SmallVector<StringRef, 8> AttrFeatures;
+ Features.split(AttrFeatures, "+");
- // Grab the various features and prepend a "+" to turn on the feature to
- // the backend and add them to our existing set of features.
for (auto &Feature : AttrFeatures) {
- // Go ahead and trim whitespace rather than either erroring or
- // accepting it weirdly.
Feature = Feature.trim();
-
- // TODO: Support the fpmath option. It will require checking
- // overall feature validity for the function with the rest of the
- // attributes on the function.
- if (Feature.startswith("fpmath="))
- continue;
-
- if (Feature.startswith("branch-protection=")) {
- Ret.BranchProtection = Feature.split('=').second.trim();
- continue;
- }
-
- // While we're here iterating check for a different target cpu.
- if (Feature.startswith("arch=")) {
- if (!Ret.Architecture.empty())
- Ret.DuplicateArchitecture = true;
- else
- Ret.Architecture = Feature.split("=").second.trim();
- } else if (Feature.startswith("tune=")) {
- if (!Ret.Tune.empty())
- Ret.DuplicateTune = true;
- else
- Ret.Tune = Feature.split("=").second.trim();
- } else if (Feature.startswith("no-"))
- Ret.Features.push_back("-" + Feature.split("-").second.str());
- else
- Ret.Features.push_back("+" + Feature.str());
+ Out.push_back(Feature);
}
- return Ret;
}
}];
}
+def TargetClones : InheritableAttr {
+ let Spellings = [GCC<"target_clones">];
+ let Args = [VariadicStringArgument<"featuresStrs">];
+ let Documentation = [TargetClonesDocs];
+ let Subjects = SubjectList<[Function], ErrorDiag>;
+ let AdditionalMembers = [{
+ StringRef getFeatureStr(unsigned Index) const {
+ return *(featuresStrs_begin() + Index);
+ }
+ // Given an index into the 'featuresStrs' sequence, compute a unique
+ // ID to be used with function name mangling for the associated variant.
+ // This mapping is necessary due to a requirement that the mangling ID
+ // used for the "default" variant be the largest mangling ID in the
+ // variant set. Duplicate variants present in 'featuresStrs' are also
+ // assigned their own unique ID (the mapping is bijective).
+ unsigned getMangledIndex(unsigned Index) const {
+ if (getFeatureStr(Index) == "default")
+ return std::count_if(featuresStrs_begin(), featuresStrs_end(),
+ [](StringRef S) { return S != "default"; });
+
+ return std::count_if(featuresStrs_begin(), featuresStrs_begin() + Index,
+ [](StringRef S) { return S != "default"; });
+ }
+
+ // Given an index into the 'featuresStrs' sequence, determine if the
+ // index corresponds to the first instance of the named variant. This
+ // is used to skip over duplicate variant instances when iterating over
+ // 'featuresStrs'.
+ bool isFirstOfVersion(unsigned Index) const {
+ StringRef FeatureStr(getFeatureStr(Index));
+ return 0 == std::count_if(
+ featuresStrs_begin(), featuresStrs_begin() + Index,
+ [FeatureStr](StringRef S) { return S == FeatureStr; });
+
+ }
+ }];
+}
+
+def : MutualExclusions<[TargetClones, TargetVersion, Target, CPUDispatch, CPUSpecific]>;
+
def MinVectorWidth : InheritableAttr {
let Spellings = [Clang<"min_vector_width">];
let Args = [UnsignedArgument<"VectorWidth">];
@@ -2685,9 +3095,11 @@ def Unavailable : InheritableAttr {
"IR_ARCInitReturnsUnrelated",
"IR_ARCFieldWithOwnership"], 1, /*fake*/ 1>];
let Documentation = [Undocumented];
+ let MeaningfulToClassTemplateDefinition = 1;
}
def DiagnoseIf : InheritableAttr {
+ let CanPrintOnLeft = 0;
// Does not have a [[]] spelling because this attribute requires the ability
// to parse function arguments but the attribute is not written in the type
// position.
@@ -2738,7 +3150,7 @@ def ObjCRequiresPropertyDefs : InheritableAttr {
def Unused : InheritableAttr {
let Spellings = [CXX11<"", "maybe_unused", 201603>, GCC<"unused">,
- C2x<"", "maybe_unused", 201904>];
+ C23<"", "maybe_unused", 202106>];
let Subjects = SubjectList<[Var, ObjCIvar, Type, Enum, EnumConstant, Label,
Field, ObjCMethod, FunctionLike]>;
let Documentation = [WarnMaybeUnusedDocs];
@@ -2832,10 +3244,10 @@ def WarnUnused : InheritableAttr {
def WarnUnusedResult : InheritableAttr {
let Spellings = [CXX11<"", "nodiscard", 201907>,
- C2x<"", "nodiscard", 201904>,
+ C23<"", "nodiscard", 202003>,
CXX11<"clang", "warn_unused_result">,
GCC<"warn_unused_result">];
- let Subjects = SubjectList<[ObjCMethod, Enum, Record, FunctionLike]>;
+ let Subjects = SubjectList<[ObjCMethod, Enum, Record, FunctionLike, TypedefName]>;
let Args = [StringArgument<"Message", 1>];
let Documentation = [WarnUnusedResultsDocs];
let AdditionalMembers = [{
@@ -2850,7 +3262,7 @@ def WarnUnusedResult : InheritableAttr {
def Weak : InheritableAttr {
let Spellings = [GCC<"weak">];
let Subjects = SubjectList<[Var, Function, CXXRecord]>;
- let Documentation = [Undocumented];
+ let Documentation = [WeakDocs];
let SimpleHandler = 1;
}
@@ -2881,7 +3293,7 @@ def AnyX86Interrupt : InheritableAttr, TargetSpecificAttr<TargetAnyX86> {
let Subjects = SubjectList<[HasFunctionProto]>;
let ParseKind = "Interrupt";
let HasCustomParsing = 1;
- let Documentation = [Undocumented];
+ let Documentation = [AnyX86InterruptDocs];
}
def AnyX86NoCallerSavedRegisters : InheritableAttr,
@@ -2940,6 +3352,13 @@ def NoSanitizeSpecific : InheritableAttr {
let ASTNode = 0;
}
+def DisableSanitizerInstrumentation : InheritableAttr {
+ let Spellings = [Clang<"disable_sanitizer_instrumentation">];
+ let Subjects = SubjectList<[Function, ObjCMethod, GlobalVar]>;
+ let Documentation = [DisableSanitizerInstrumentationDocs];
+ let SimpleHandler = 1;
+}
+
def CFICanonicalJumpTable : InheritableAttr {
let Spellings = [Clang<"cfi_canonical_jump_table">];
let Subjects = SubjectList<[Function], ErrorDiag>;
@@ -3325,6 +3744,14 @@ def : MutualExclusions<[Owner, Pointer]>;
// Microsoft-related attributes
+def MSConstexpr : InheritableAttr {
+ let LangOpts = [MicrosoftExt];
+ let Spellings = [CXX11<"msvc", "constexpr">];
+ let Subjects = SubjectList<[Function, ReturnStmt], ErrorDiag,
+ "functions and return statements">;
+ let Documentation = [MSConstexprDocs];
+}
+
def MSNoVTable : InheritableAttr, TargetSpecificAttr<TargetMicrosoftCXXABI> {
let Spellings = [Declspec<"novtable">];
let Subjects = SubjectList<[CXXRecord]>;
@@ -3342,10 +3769,10 @@ def MSAllocator : InheritableAttr {
let Documentation = [MSAllocatorDocs];
}
-def CFGuard : InheritableAttr {
+def CFGuard : InheritableAttr, TargetSpecificAttr<TargetWindows> {
// Currently only the __declspec(guard(nocf)) modifier is supported. In future
// we might also want to support __declspec(guard(suppress)).
- let Spellings = [Declspec<"guard">];
+ let Spellings = [Declspec<"guard">, Clang<"guard">];
let Subjects = SubjectList<[Function]>;
let Args = [EnumArgument<"Guard", "GuardArg", ["nocf"], ["nocf"]>];
let Documentation = [CFGuardDocs];
@@ -3372,7 +3799,7 @@ def DLLExportStaticLocal : InheritableAttr, TargetSpecificAttr<TargetHasDLLImpor
// the function has local static variables, the function is dllexported too.
let Spellings = [];
let Subjects = SubjectList<[Function]>;
- let Documentation = [Undocumented];
+ let Documentation = [InternalOnly];
}
def DLLImport : InheritableAttr, TargetSpecificAttr<TargetHasDLLImportExport> {
@@ -3398,7 +3825,7 @@ def DLLImportStaticLocal : InheritableAttr, TargetSpecificAttr<TargetHasDLLImpor
// attribute is used to determine whether the variables are imported or not.
let Spellings = [];
let Subjects = SubjectList<[Function]>;
- let Documentation = [Undocumented];
+ let Documentation = [InternalOnly];
}
def SelectAny : InheritableAttr {
@@ -3415,37 +3842,37 @@ def Thread : Attr {
}
def Win64 : IgnoredAttr {
- let Spellings = [Keyword<"__w64">];
+ let Spellings = [CustomKeyword<"__w64">];
let LangOpts = [MicrosoftExt];
}
def Ptr32 : TypeAttr {
- let Spellings = [Keyword<"__ptr32">];
+ let Spellings = [CustomKeyword<"__ptr32">];
let Documentation = [Ptr32Docs];
}
def Ptr64 : TypeAttr {
- let Spellings = [Keyword<"__ptr64">];
+ let Spellings = [CustomKeyword<"__ptr64">];
let Documentation = [Ptr64Docs];
}
def SPtr : TypeAttr {
- let Spellings = [Keyword<"__sptr">];
+ let Spellings = [CustomKeyword<"__sptr">];
let Documentation = [SPtrDocs];
}
def UPtr : TypeAttr {
- let Spellings = [Keyword<"__uptr">];
+ let Spellings = [CustomKeyword<"__uptr">];
let Documentation = [UPtrDocs];
}
def MSInheritance : InheritableAttr {
let LangOpts = [MicrosoftExt];
let Args = [DefaultBoolArgument<"BestCase", /*default*/1, /*fake*/1>];
- let Spellings = [Keyword<"__single_inheritance">,
- Keyword<"__multiple_inheritance">,
- Keyword<"__virtual_inheritance">,
- Keyword<"__unspecified_inheritance">];
+ let Spellings = [CustomKeyword<"__single_inheritance">,
+ CustomKeyword<"__multiple_inheritance">,
+ CustomKeyword<"__virtual_inheritance">,
+ CustomKeyword<"__unspecified_inheritance">];
let AdditionalMembers = [{
MSInheritanceModel getInheritanceModel() const {
// The spelling enum should agree with MSInheritanceModel.
@@ -3464,7 +3891,7 @@ def MSVtorDisp : InheritableAttr {
let AdditionalMembers = [{
MSVtorDispMode getVtorDispMode() const { return MSVtorDispMode(vdm); }
}];
- let Documentation = [Undocumented];
+ let Documentation = [InternalOnly];
}
def InitSeg : Attr {
@@ -3556,21 +3983,21 @@ def CapturedRecord : InheritableAttr {
// This attribute has no spellings as it is only ever created implicitly.
let Spellings = [];
let SemaHandler = 0;
- let Documentation = [Undocumented];
+ let Documentation = [InternalOnly];
}
def OMPThreadPrivateDecl : InheritableAttr {
// This attribute has no spellings as it is only ever created implicitly.
let Spellings = [];
let SemaHandler = 0;
- let Documentation = [Undocumented];
+ let Documentation = [InternalOnly];
}
def OMPCaptureNoInit : InheritableAttr {
// This attribute has no spellings as it is only ever created implicitly.
let Spellings = [];
let SemaHandler = 0;
- let Documentation = [Undocumented];
+ let Documentation = [InternalOnly];
}
def OMPCaptureKind : Attr {
@@ -3578,7 +4005,7 @@ def OMPCaptureKind : Attr {
let Spellings = [];
let SemaHandler = 0;
let Args = [UnsignedArgument<"CaptureKindVal">];
- let Documentation = [Undocumented];
+ let Documentation = [InternalOnly];
let AdditionalMembers = [{
llvm::omp::Clause getCaptureKind() const {
return static_cast<llvm::omp::Clause>(getCaptureKindVal());
@@ -3591,7 +4018,7 @@ def OMPReferencedVar : Attr {
let Spellings = [];
let SemaHandler = 0;
let Args = [ExprArgument<"Ref">];
- let Documentation = [Undocumented];
+ let Documentation = [InternalOnly];
}
def OMPDeclareSimdDecl : Attr {
@@ -3622,20 +4049,22 @@ def OMPDeclareTargetDecl : InheritableAttr {
let Documentation = [OMPDeclareTargetDocs];
let Args = [
EnumArgument<"MapType", "MapTypeTy",
- [ "to", "link" ],
- [ "MT_To", "MT_Link" ]>,
+ [ "to", "enter", "link" ],
+ [ "MT_To", "MT_Enter", "MT_Link" ]>,
EnumArgument<"DevType", "DevTypeTy",
[ "host", "nohost", "any" ],
[ "DT_Host", "DT_NoHost", "DT_Any" ]>,
+ ExprArgument<"IndirectExpr">,
+ BoolArgument<"Indirect">,
UnsignedArgument<"Level">
];
let AdditionalMembers = [{
void printPrettyPragma(raw_ostream &OS, const PrintingPolicy &Policy) const;
- static llvm::Optional<MapTypeTy>
+ static std::optional<MapTypeTy>
isDeclareTargetDeclaration(const ValueDecl *VD);
- static llvm::Optional<OMPDeclareTargetDeclAttr*> getActiveAttr(const ValueDecl *VD);
- static llvm::Optional<DevTypeTy> getDeviceType(const ValueDecl *VD);
- static llvm::Optional<SourceLocation> getLocation(const ValueDecl *VD);
+ static std::optional<OMPDeclareTargetDeclAttr*> getActiveAttr(const ValueDecl *VD);
+ static std::optional<DevTypeTy> getDeviceType(const ValueDecl *VD);
+ static std::optional<SourceLocation> getLocation(const ValueDecl *VD);
}];
}
@@ -3659,9 +4088,10 @@ def OMPAllocateDecl : InheritableAttr {
"OMPCGroupMemAlloc", "OMPPTeamMemAlloc", "OMPThreadMemAlloc",
"OMPUserDefinedMemAlloc"
]>,
- ExprArgument<"Allocator">
+ ExprArgument<"Allocator">,
+ ExprArgument<"Alignment">
];
- let Documentation = [Undocumented];
+ let Documentation = [InternalOnly];
}
def OMPDeclareVariant : InheritableAttr {
@@ -3674,11 +4104,21 @@ def OMPDeclareVariant : InheritableAttr {
let Args = [
ExprArgument<"VariantFuncRef">,
OMPTraitInfoArgument<"TraitInfos">,
+ VariadicExprArgument<"AdjustArgsNothing">,
+ VariadicExprArgument<"AdjustArgsNeedDevicePtr">,
+ VariadicOMPInteropInfoArgument<"AppendArgs">,
];
let AdditionalMembers = [{
OMPTraitInfo &getTraitInfo() { return *traitInfos; }
void printPrettyPragma(raw_ostream & OS, const PrintingPolicy &Policy)
const;
+ static StringRef getInteropTypeString(const OMPInteropInfo *I) {
+ if (I->IsTarget && I->IsTargetSync)
+ return "target,targetsync";
+ if (I->IsTarget)
+ return "target";
+ return "targetsync";
+ }
}];
}
@@ -3800,17 +4240,31 @@ def ReleaseHandle : InheritableParamAttr {
let Documentation = [ReleaseHandleDocs];
}
+def UnsafeBufferUsage : InheritableAttr {
+ let Spellings = [Clang<"unsafe_buffer_usage">];
+ let Subjects = SubjectList<[Function]>;
+ let Documentation = [UnsafeBufferUsageDocs];
+}
+
+def DiagnoseAsBuiltin : InheritableAttr {
+ let Spellings = [Clang<"diagnose_as_builtin">];
+ let Args = [DeclArgument<Function, "Function">,
+ VariadicUnsignedArgument<"ArgIndices">];
+ let Subjects = SubjectList<[Function]>;
+ let Documentation = [DiagnoseAsBuiltinDocs];
+}
+
def Builtin : InheritableAttr {
let Spellings = [];
let Args = [UnsignedArgument<"ID">];
let Subjects = SubjectList<[Function]>;
let SemaHandler = 0;
- let Documentation = [Undocumented];
+ let Documentation = [InternalOnly];
}
def EnforceTCB : InheritableAttr {
let Spellings = [Clang<"enforce_tcb">];
- let Subjects = SubjectList<[Function]>;
+ let Subjects = SubjectList<[Function, ObjCMethod]>;
let Args = [StringArgument<"TCBName">];
let Documentation = [EnforceTCBDocs];
bit InheritEvenIfAlreadyPresent = 1;
@@ -3818,8 +4272,191 @@ def EnforceTCB : InheritableAttr {
def EnforceTCBLeaf : InheritableAttr {
let Spellings = [Clang<"enforce_tcb_leaf">];
- let Subjects = SubjectList<[Function]>;
+ let Subjects = SubjectList<[Function, ObjCMethod]>;
let Args = [StringArgument<"TCBName">];
let Documentation = [EnforceTCBLeafDocs];
bit InheritEvenIfAlreadyPresent = 1;
}
+
+def Error : InheritableAttr {
+ let Spellings = [GCC<"error">, GCC<"warning">];
+ let Accessors = [Accessor<"isError", [GCC<"error">]>,
+ Accessor<"isWarning", [GCC<"warning">]>];
+ let Args = [StringArgument<"UserDiagnostic">];
+ let Subjects = SubjectList<[Function], ErrorDiag>;
+ let Documentation = [ErrorAttrDocs];
+}
+
+def HLSLNumThreads: InheritableAttr {
+ let Spellings = [Microsoft<"numthreads">];
+ let Args = [IntArgument<"X">, IntArgument<"Y">, IntArgument<"Z">];
+ let Subjects = SubjectList<[HLSLEntry]>;
+ let LangOpts = [HLSL];
+ let Documentation = [NumThreadsDocs];
+}
+
+def HLSLSV_GroupIndex: HLSLAnnotationAttr {
+ let Spellings = [HLSLSemantic<"SV_GroupIndex">];
+ let Subjects = SubjectList<[ParmVar, GlobalVar]>;
+ let LangOpts = [HLSL];
+ let Documentation = [HLSLSV_GroupIndexDocs];
+}
+
+def HLSLResourceBinding: InheritableAttr {
+ let Spellings = [HLSLSemantic<"register">];
+ let Subjects = SubjectList<[HLSLBufferObj, ExternalGlobalVar]>;
+ let LangOpts = [HLSL];
+ let Args = [StringArgument<"Slot">, StringArgument<"Space", 1>];
+ let Documentation = [HLSLResourceBindingDocs];
+}
+
+def HLSLSV_DispatchThreadID: HLSLAnnotationAttr {
+ let Spellings = [HLSLSemantic<"SV_DispatchThreadID">];
+ let Subjects = SubjectList<[ParmVar, Field]>;
+ let LangOpts = [HLSL];
+ let Documentation = [HLSLSV_DispatchThreadIDDocs];
+}
+
+def HLSLShader : InheritableAttr {
+ let Spellings = [Microsoft<"shader">];
+ let Subjects = SubjectList<[HLSLEntry]>;
+ let LangOpts = [HLSL];
+ let Args = [
+ EnumArgument<"Type", "ShaderType",
+ ["pixel", "vertex", "geometry", "hull", "domain", "compute",
+ "raygeneration", "intersection", "anyhit", "closesthit",
+ "miss", "callable", "mesh", "amplification"],
+ ["Pixel", "Vertex", "Geometry", "Hull", "Domain", "Compute",
+ "RayGeneration", "Intersection", "AnyHit", "ClosestHit",
+ "Miss", "Callable", "Mesh", "Amplification"]>
+ ];
+ let Documentation = [HLSLSV_ShaderTypeAttrDocs];
+}
+
+def HLSLResource : InheritableAttr {
+ let Spellings = [];
+ let Subjects = SubjectList<[Struct]>;
+ let LangOpts = [HLSL];
+ let Args = [EnumArgument<"ResourceClass", "llvm::hlsl::ResourceClass",
+ ["SRV", "UAV", "CBuffer", "Sampler"],
+ ["SRV", "UAV", "CBuffer", "Sampler"],
+ /*opt=*/0, /*fake=*/0, /*isExternalType=*/1>,
+ EnumArgument<"ResourceKind", "llvm::hlsl::ResourceKind",
+ ["Texture1D", "Texture2D", "Texture2DMS",
+ "Texture3D", "TextureCube", "Texture1DArray",
+ "Texture2DArray", "Texture2DMSArray",
+ "TextureCubeArray", "TypedBuffer", "RawBuffer",
+ "StructuredBuffer", "CBuffer", "Sampler",
+ "TBuffer", "RTAccelerationStructure",
+ "FeedbackTexture2D", "FeedbackTexture2DArray"],
+ ["Texture1D", "Texture2D", "Texture2DMS",
+ "Texture3D", "TextureCube", "Texture1DArray",
+ "Texture2DArray", "Texture2DMSArray",
+ "TextureCubeArray", "TypedBuffer", "RawBuffer",
+ "StructuredBuffer", "CBuffer", "Sampler",
+ "TBuffer", "RTAccelerationStructure",
+ "FeedbackTexture2D", "FeedbackTexture2DArray"],
+ /*opt=*/0, /*fake=*/0, /*isExternalType=*/1>,
+ DefaultBoolArgument<"isROV", /*default=*/0>
+ ];
+ let Documentation = [InternalOnly];
+}
+
+def HLSLGroupSharedAddressSpace : TypeAttr {
+ let Spellings = [CustomKeyword<"groupshared">];
+ let Subjects = SubjectList<[Var]>;
+ let Documentation = [HLSLGroupSharedAddressSpaceDocs];
+}
+
+def HLSLParamModifier : TypeAttr {
+ let Spellings = [CustomKeyword<"in">, CustomKeyword<"inout">, CustomKeyword<"out">];
+ let Accessors = [Accessor<"isIn", [CustomKeyword<"in">]>,
+ Accessor<"isInOut", [CustomKeyword<"inout">]>,
+ Accessor<"isOut", [CustomKeyword<"out">]>,
+ Accessor<"isAnyOut", [CustomKeyword<"out">, CustomKeyword<"inout">]>,
+ Accessor<"isAnyIn", [CustomKeyword<"in">, CustomKeyword<"inout">]>];
+ let Subjects = SubjectList<[ParmVar]>;
+ let Documentation = [HLSLParamQualifierDocs];
+ let Args = [DefaultBoolArgument<"MergedSpelling", /*default*/0, /*fake*/1>];
+}
+
+def RandomizeLayout : InheritableAttr {
+ let Spellings = [GCC<"randomize_layout">];
+ let Subjects = SubjectList<[Record]>;
+ let Documentation = [ClangRandomizeLayoutDocs];
+ let LangOpts = [COnly];
+}
+
+def NoRandomizeLayout : InheritableAttr {
+ let Spellings = [GCC<"no_randomize_layout">];
+ let Subjects = SubjectList<[Record]>;
+ let Documentation = [ClangRandomizeLayoutDocs];
+ let LangOpts = [COnly];
+}
+def : MutualExclusions<[RandomizeLayout, NoRandomizeLayout]>;
+
+def FunctionReturnThunks : InheritableAttr,
+ TargetSpecificAttr<TargetAnyX86> {
+ let Spellings = [GCC<"function_return">];
+ let Args = [EnumArgument<"ThunkType", "Kind",
+ ["keep", "thunk-extern"],
+ ["Keep", "Extern"]
+ >];
+ let Subjects = SubjectList<[Function]>;
+ let Documentation = [FunctionReturnThunksDocs];
+}
+
+def WebAssemblyFuncref : TypeAttr, TargetSpecificAttr<TargetWebAssembly> {
+ let Spellings = [CustomKeyword<"__funcref">];
+ let Documentation = [WebAssemblyExportNameDocs];
+ let Subjects = SubjectList<[FunctionPointer], ErrorDiag>;
+}
+
+def ReadOnlyPlacement : InheritableAttr {
+ let Spellings = [Clang<"enforce_read_only_placement">];
+ let Subjects = SubjectList<[Record]>;
+ let Documentation = [ReadOnlyPlacementDocs];
+}
+
+def AvailableOnlyInDefaultEvalMethod : InheritableAttr {
+ let Spellings = [Clang<"available_only_in_default_eval_method">];
+ let Subjects = SubjectList<[TypedefName], ErrorDiag>;
+ let Documentation = [Undocumented];
+}
+
+def PreferredType: InheritableAttr {
+ let Spellings = [Clang<"preferred_type">];
+ let Subjects = SubjectList<[BitField], ErrorDiag>;
+ let Args = [TypeArgument<"Type", 1>];
+ let Documentation = [PreferredTypeDocumentation];
+}
+
+def CodeAlign: StmtAttr {
+ let Spellings = [Clang<"code_align">];
+ let Subjects = SubjectList<[ForStmt, CXXForRangeStmt, WhileStmt, DoStmt],
+ ErrorDiag, "'for', 'while', and 'do' statements">;
+ let Args = [ExprArgument<"Alignment">];
+ let Documentation = [CodeAlignAttrDocs];
+ let AdditionalMembers = [{
+ static constexpr int MinimumAlignment = 1;
+ static constexpr int MaximumAlignment = 4096;
+ }];
+}
+
+def CountedBy : InheritableAttr {
+ let Spellings = [Clang<"counted_by">];
+ let Subjects = SubjectList<[Field]>;
+ let Args = [IdentifierArgument<"CountedByField">];
+ let Documentation = [CountedByDocs];
+ let LangOpts = [COnly];
+ // FIXME: This is ugly. Let using a DeclArgument would be nice, but a Decl
+ // isn't yet available due to the fact that we're still parsing the
+ // structure. Maybe that code could be changed sometime in the future.
+ code AdditionalMembers = [{
+ private:
+ SourceRange CountedByFieldLoc;
+ public:
+ SourceRange getCountedByFieldLoc() const { return CountedByFieldLoc; }
+ void setCountedByFieldLoc(SourceRange Loc) { CountedByFieldLoc = Loc; }
+ }];
+}
diff --git a/contrib/llvm-project/clang/include/clang/Basic/AttrDocs.td b/contrib/llvm-project/clang/include/clang/Basic/AttrDocs.td
index c265a877e3b1..e02a1201e2ad 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/AttrDocs.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/AttrDocs.td
@@ -22,7 +22,7 @@
// Windows (from within the clang\docs directory):
// make.bat html
// Non-Windows (from within the clang\docs directory):
-// make -f Makefile.sphinx html
+// sphinx-build -b html _build/html
def GlobalDocumentation {
code Intro =[{..
@@ -57,6 +57,15 @@ global variable or function should be in after translation.
let Heading = "section, __declspec(allocate)";
}
+def CodeModelDocs : Documentation {
+ let Category = DocCatVariable;
+ let Content = [{
+The ``model`` attribute allows overriding the translation unit's
+code model (specified by ``-mcmodel``) for a specific global variable.
+ }];
+ let Heading = "model";
+}
+
def UsedDocs : Documentation {
let Category = DocCatFunction;
let Content = [{
@@ -127,6 +136,10 @@ relative ordering of values is important. For example:
``Obj2`` will be initialized *before* ``Obj1`` despite the usual order of
initialization being the opposite.
+On Windows, ``init_seg(compiler)`` is represented with a priority of 200 and
+``init_seg(library)`` is represented with a priority of 400. ``init_seg(user)``
+uses the default 65535 priority.
+
This attribute is only supported for C++ and Objective-C++ and is ignored in
other language modes. Currently, this attribute is not implemented on z/OS.
}];
@@ -187,6 +200,10 @@ primary use is for COFF object files which explicitly specify what interfaces
are imported from external modules. See the dllimport_ documentation on MSDN
for more information.
+Note that a dllimport function may still be inlined, if its definition is
+available and it doesn't reference any non-dllimport functions or global
+variables.
+
.. _dllimport: https://msdn.microsoft.com/en-us/library/3y1sfaz2.aspx
}];
}
@@ -253,6 +270,28 @@ applies to copies of the block. For example:
}];
}
+def MaybeUndefDocs : Documentation {
+ let Category = DocCatVariable;
+ let Content = [{
+The ``maybe_undef`` attribute can be placed on a function parameter. It indicates
+that the parameter is allowed to use undef values. It informs the compiler
+to insert a freeze LLVM IR instruction on the function parameter.
+Please note that this is an attribute that is used as an internal
+implementation detail and not intended to be used by external users.
+
+In languages HIP, CUDA etc., some functions have multi-threaded semantics and
+it is enough for only one or some threads to provide defined arguments.
+Depending on semantics, undef arguments in some threads don't produce
+undefined results in the function call. Since, these functions accept undefined
+arguments, ``maybe_undef`` attribute can be placed.
+
+Sample usage:
+.. code-block:: c
+
+ void maybeundeffunc(int __attribute__((maybe_undef))param);
+ }];
+}
+
def CarriesDependencyDocs : Documentation {
let Category = DocCatFunction;
let Content = [{
@@ -281,6 +320,17 @@ features that are required for the function to be called. The result of this is
that future processors execute the most restrictive version of the function the
new processor can execute.
+In addition, unlike the ICC implementation of this feature, the selection of the
+version does not consider the manufacturer or microarchitecture of the processor.
+It tests solely the list of features that are both supported by the specified
+processor and present in the compiler-rt library. This can be surprising at times,
+as the runtime processor may be from a completely different manufacturer, as long
+as it supports the same feature set.
+
+This can additionally be surprising, as some processors are indistringuishable from
+others based on the list of testable features. When this happens, the variant
+is selected in an unspecified manner.
+
Function versions are defined with ``cpu_specific``, which takes one or more CPU
names as a parameter. For example:
@@ -405,6 +455,71 @@ The SYCL kernel in the previous code sample meets these expectations.
}];
}
+def SYCLSpecialClassDocs : Documentation {
+ let Category = DocCatStmt;
+ let Content = [{
+SYCL defines some special classes (accessor, sampler, and stream) which require
+specific handling during the generation of the SPIR entry point.
+The ``__attribute__((sycl_special_class))`` attribute is used in SYCL
+headers to indicate that a class or a struct needs a specific handling when
+it is passed from host to device.
+Special classes will have a mandatory ``__init`` method and an optional
+``__finalize`` method (the ``__finalize`` method is used only with the
+``stream`` type). Kernel parameters types are extract from the ``__init`` method
+parameters. The kernel function arguments list is derived from the
+arguments of the ``__init`` method. The arguments of the ``__init`` method are
+copied into the kernel function argument list and the ``__init`` and
+``__finalize`` methods are called at the beginning and the end of the kernel,
+respectively.
+The ``__init`` and ``__finalize`` methods must be defined inside the
+special class.
+Please note that this is an attribute that is used as an internal
+implementation detail and not intended to be used by external users.
+
+The syntax of the attribute is as follows:
+
+.. code-block:: text
+
+ class __attribute__((sycl_special_class)) accessor {};
+ class [[clang::sycl_special_class]] accessor {};
+
+This is a code example that illustrates the use of the attribute:
+
+.. code-block:: c++
+
+ class __attribute__((sycl_special_class)) SpecialType {
+ int F1;
+ int F2;
+ void __init(int f1) {
+ F1 = f1;
+ F2 = f1;
+ }
+ void __finalize() {}
+ public:
+ SpecialType() = default;
+ int getF2() const { return F2; }
+ };
+
+ int main () {
+ SpecialType T;
+ cgh.single_task([=] {
+ T.getF2();
+ });
+ }
+
+This would trigger the following kernel entry point in the AST:
+
+.. code-block:: c++
+
+ void __sycl_kernel(int f1) {
+ SpecialType T;
+ T.__init(f1);
+ ...
+ T.__finalize()
+ }
+ }];
+}
+
def C11NoReturnDocs : Documentation {
let Category = DocCatFunction;
let Content = [{
@@ -418,10 +533,14 @@ pointer type.
def CXX11NoReturnDocs : Documentation {
let Category = DocCatFunction;
+ let Heading = "noreturn, _Noreturn";
let Content = [{
A function declared as ``[[noreturn]]`` shall not return to its caller. The
compiler will generate a diagnostic for a function declared as ``[[noreturn]]``
that appears to be capable of returning to its caller.
+
+The ``[[_Noreturn]]`` spelling is deprecated and only exists to ease code
+migration for code using ``[[noreturn]]`` after including ``<stdnoreturn.h>``.
}];
}
@@ -429,17 +548,68 @@ def NoMergeDocs : Documentation {
let Category = DocCatStmt;
let Content = [{
If a statement is marked ``nomerge`` and contains call expressions, those call
-expressions inside the statement will not be merged during optimization. This
+expressions inside the statement will not be merged during optimization. This
attribute can be used to prevent the optimizer from obscuring the source
location of certain calls. For example, it will prevent tail merging otherwise
identical code sequences that raise an exception or terminate the program. Tail
merging normally reduces the precision of source location information, making
stack traces less useful for debugging. This attribute gives the user control
-over the tradeoff between code size and debug information precision.
+over the tradeoff between code size and debug information precision.
+
+``nomerge`` attribute can also be used as function attribute to prevent all
+calls to the specified function from merging. It has no effect on indirect
+calls to such functions. For example:
+
+.. code-block:: c++
+
+ [[clang::nomerge]] void foo(int) {}
+
+ void bar(int x) {
+ auto *ptr = foo;
+ if (x) foo(1); else foo(2); // will not be merged
+ if (x) ptr(1); else ptr(2); // indirect call, can be merged
+ }
+
+``nomerge`` attribute can also be used for pointers to functions to
+prevent calls through such pointer from merging. In such case the
+effect applies only to a specific function pointer. For example:
+
+.. code-block:: c++
+
+ [[clang::nomerge]] void (*foo)(int);
+
+ void bar(int x) {
+ auto *ptr = foo;
+ if (x) foo(1); else foo(2); // will not be merged
+ if (x) ptr(1); else ptr(2); // 'ptr' has no 'nomerge' attribute, can be merged
+ }
+ }];
+}
+
+def NoInlineDocs : Documentation {
+ let Category = DocCatFunction;
+ let Content = [{
+This function attribute suppresses the inlining of a function at the call sites
+of the function.
+
+``[[clang::noinline]]`` spelling can be used as a statement attribute; other
+spellings of the attribute are not supported on statements. If a statement is
+marked ``[[clang::noinline]]`` and contains calls, those calls inside the
+statement will not be inlined by the compiler.
+
+``__noinline__`` can be used as a keyword in CUDA/HIP languages. This is to
+avoid diagnostics due to usage of ``__attribute__((__noinline__))``
+with ``__noinline__`` defined as a macro as ``__attribute__((noinline))``.
+
+.. code-block:: c
+
+ int example(void) {
+ int r;
+ [[clang::noinline]] foo();
+ [[clang::noinline]] r = bar();
+ return r;
+ }
-``nomerge`` attribute can also be used as function attribute to prevent all
-calls to the specified function from merging. It has no effect on indirect
-calls.
}];
}
@@ -466,6 +636,9 @@ Any variables in scope, including all arguments to the function and the
return value must be trivially destructible. The calling convention of the
caller and callee must match, and they must not be variadic functions or have
old style K&R C function declarations.
+
+``clang::musttail`` provides assurances that the tail call can be optimized on
+all targets, not just one.
}];
}
@@ -999,7 +1172,7 @@ caveats to this use of name mangling:
* The ``overloadable`` attribute has almost no meaning when used in C++,
because names will already be mangled and functions are already overloadable.
However, when an ``overloadable`` function occurs within an ``extern "C"``
- linkage specification, it's name *will* be mangled in the same way as it
+ linkage specification, its name *will* be mangled in the same way as it
would in C.
For the purpose of backwards compatibility, at most one function with the same
@@ -1241,6 +1414,10 @@ Example usage:
``[[no_unique_address]]`` is a standard C++20 attribute. Clang supports its use
in C++11 onwards.
+
+On MSVC targets, ``[[no_unique_address]]`` is ignored; use
+``[[msvc::no_unique_address]]`` instead. Currently there is no guarantee of ABI
+compatibility or stability with MSVC.
}];
}
@@ -1445,6 +1622,10 @@ attributes are ignored. Supported platforms are:
Apple's watchOS operating system. The minimum deployment target is specified by
the ``-mwatchos-version-min=*version*`` command-line argument.
+``driverkit``
+ Apple's DriverKit userspace kernel extensions. The minimum deployment target
+ is specified as part of the triple.
+
A declaration can typically be used even when deploying back to a platform
version prior to when the declaration was introduced. When this happens, the
declaration is `weakly linked
@@ -1609,6 +1790,19 @@ defined_in=\ *string-literal*
source containers are modules, so ``defined_in`` should specify the Swift
module name.
+USR=\ *string-literal*
+ String that specifies a unified symbol resolution (USR) value for this
+ declaration. USR string uniquely identifies this particular declaration, and
+ is typically used when constructing an index of a codebase.
+ The USR value in this attribute is expected to be generated by an external
+ compiler that compiled the native declaration using its original source
+ language. The exact format of the USR string and its other attributes
+ are determined by the specification of this declaration's source language.
+ When not specified, Clang's indexer will use the Clang USR for this symbol.
+ User can query to see if Clang supports the use of the ``USR`` clause in
+ the ``external_source_symbol`` attribute with
+ ``__has_attribute(external_source_symbol) >= 20230206``.
+
generated_declaration
This declaration was automatically generated by some tool.
@@ -1677,7 +1871,8 @@ The attribute may be applied to the declaration of a class, a typedef, a
variable, a function or method, a function parameter, an enumeration, an
enumerator, a non-static data member, or a label.
-.. code-block: c++
+.. code-block:: c++
+
#include <cassert>
[[maybe_unused]] void f([[maybe_unused]] bool thing1,
@@ -1706,7 +1901,8 @@ literal contents) are allowed. If there are redeclarations of the entity with
differing string literals, it is unspecified which one will be used by Clang
in any resulting diagnostics.
-.. code-block: c++
+.. code-block:: c++
+
struct [[nodiscard]] error_info { /*...*/ };
error_info enable_missile_safety_mode();
@@ -1723,7 +1919,8 @@ marked with ``[[nodiscard]]`` or a constructor of a type marked
``[[nodiscard]]`` will also diagnose. This also applies to type conversions that
use the annotated ``[[nodiscard]]`` constructor or result in an annotated type.
-.. code-block: c++
+.. code-block:: c++
+
struct [[nodiscard]] marked_type {/*..*/ };
struct marked_ctor {
[[nodiscard]] marked_ctor();
@@ -2012,6 +2209,71 @@ struct or union, similar to clang ``__builtin_preserve_access_index()``.
}];
}
+def BPFPreserveStaticOffsetDocs : Documentation {
+ let Category = DocCatFunction;
+ let Content = [{
+Clang supports the ``__attribute__((preserve_static_offset))``
+attribute for the BPF target. This attribute may be attached to a
+struct or union declaration. Reading or writing fields of types having
+such annotation is guaranteed to generate LDX/ST/STX instruction with
+offset corresponding to the field.
+
+For example:
+
+.. code-block:: c
+
+ struct foo {
+ int a;
+ int b;
+ };
+
+ struct bar {
+ int a;
+ struct foo b;
+ } __attribute__((preserve_static_offset));
+
+ void buz(struct bar *g) {
+ g->b.a = 42;
+ }
+
+The assignment to ``g``'s field would produce an ST instruction with
+offset 8: ``*(u32)(r1 + 8) = 42;``.
+
+Without this attribute generated instructions might be different,
+depending on optimizations behavior. E.g. the example above could be
+rewritten as ``r1 += 8; *(u32)(r1 + 0) = 42;``.
+ }];
+}
+
+def BTFDeclTagDocs : Documentation {
+ let Category = DocCatFunction;
+ let Content = [{
+Clang supports the ``__attribute__((btf_decl_tag("ARGUMENT")))`` attribute for
+all targets. This attribute may be attached to a struct/union, struct/union
+field, function, function parameter, variable or typedef declaration. If -g is
+specified, the ``ARGUMENT`` info will be preserved in IR and be emitted to
+dwarf. For BPF targets, the ``ARGUMENT`` info will be emitted to .BTF ELF
+section too.
+ }];
+}
+
+def BTFTypeTagDocs : Documentation {
+ let Category = DocCatType;
+ let Content = [{
+Clang supports the ``__attribute__((btf_type_tag("ARGUMENT")))`` attribute for
+all targets. It only has effect when ``-g`` is specified on the command line and
+is currently silently ignored when not applied to a pointer type (note: this
+scenario may be diagnosed in the future).
+
+The ``ARGUMENT`` string will be preserved in IR and emitted to DWARF for the
+types used in variable declarations, function declarations, or typedef
+declarations.
+
+For BPF targets, the ``ARGUMENT`` string will also be emitted to .BTF ELF
+section.
+ }];
+}
+
def MipsInterruptDocs : Documentation {
let Category = DocCatFunction;
let Heading = "interrupt (MIPS)";
@@ -2111,7 +2373,7 @@ as ``-mlong-calls`` and ``-mno-long-calls``.
def RISCVInterruptDocs : Documentation {
let Category = DocCatFunction;
- let Heading = "interrupt (RISCV)";
+ let Heading = "interrupt (RISC-V)";
let Content = [{
Clang supports the GNU style ``__attribute__((interrupt))`` attribute on RISCV
targets. This attribute may be attached to a function definition and instructs
@@ -2132,6 +2394,43 @@ Version 1.10.
}];
}
+def RISCVRVVVectorBitsDocs : Documentation {
+ let Category = DocCatType;
+ let Content = [{
+On RISC-V targets, the ``riscv_rvv_vector_bits(N)`` attribute is used to define
+fixed-length variants of sizeless types.
+
+For example:
+
+.. code-block:: c
+
+ #include <riscv_vector.h>
+
+ #if defined(__riscv_v_fixed_vlen)
+ typedef vint8m1_t fixed_vint8m1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen)));
+ #endif
+
+Creates a type ``fixed_vint8m1_t_t`` that is a fixed-length variant of
+``vint8m1_t`` that contains exactly 512 bits. Unlike ``vint8m1_t``, this type
+can be used in globals, structs, unions, and arrays, all of which are
+unsupported for sizeless types.
+
+The attribute can be attached to a single RVV vector (such as ``vint8m1_t``).
+The attribute will be rejected unless
+``N==(__riscv_v_fixed_vlen*LMUL)``, the implementation defined feature macro that
+is enabled under the ``-mrvv-vector-bits`` flag. ``__riscv_v_fixed_vlen`` can
+only be a power of 2 between 64 and 65536.
+
+For types where LMUL!=1, ``__riscv_v_fixed_vlen`` needs to be scaled by the LMUL
+of the type before passing to the attribute.
+
+For ``vbool*_t`` types, ``__riscv_v_fixed_vlen`` needs to be divided by the
+number from the type name. For example, ``vbool8_t`` needs to use
+``__riscv_v_fixed_vlen`` / 8. If the resulting value is not a multiple of 8,
+the type is not supported for that value of ``__riscv_v_fixed_vlen``.
+}];
+}
+
def AVRInterruptDocs : Documentation {
let Category = DocCatFunction;
let Heading = "interrupt (AVR)";
@@ -2176,9 +2475,12 @@ for the function.
For X86, the attribute also allows ``tune="CPU"`` to optimize the generated
code for the given CPU without changing the available instructions.
-For AArch64, the attribute also allows the "branch-protection=<args>" option,
-where the permissible arguments and their effect on code generation are the same
-as for the command-line option ``-mbranch-protection``.
+For AArch64, ``arch="Arch"`` will set the architecture, similar to the -march
+command line options. ``cpu="CPU"`` can be used to select a specific cpu,
+as per the ``-mcpu`` option, similarly for ``tune=``. The attribute also allows the
+"branch-protection=<args>" option, where the permissible arguments and their
+effect on code generation are the same as for the command-line option
+``-mbranch-protection``.
Example "subtarget features" from the x86 backend include: "mmx", "sse", "sse4.2",
"avx", "xop" and largely correspond to the machine specific options handled by
@@ -2205,6 +2507,73 @@ Additionally, a function may not become multiversioned after its first use.
}];
}
+def TargetVersionDocs : Documentation {
+ let Category = DocCatFunction;
+ let Content = [{
+For AArch64 target clang supports function multiversioning by
+``__attribute__((target_version("OPTIONS")))`` attribute. When applied to a
+function it instructs compiler to emit multiple function versions based on
+``target_version`` attribute strings, which resolved at runtime depend on their
+priority and target features availability. One of the versions is always
+( implicitly or explicitly ) the ``default`` (fallback). Attribute strings can
+contain dependent features names joined by the "+" sign.
+}];
+}
+
+def TargetClonesDocs : Documentation {
+ let Category = DocCatFunction;
+ let Content = [{
+Clang supports the ``target_clones("OPTIONS")`` attribute. This attribute may be
+attached to a function declaration and causes function multiversioning, where
+multiple versions of the function will be emitted with different code
+generation options. Additionally, these versions will be resolved at runtime
+based on the priority of their attribute options. All ``target_clone`` functions
+are considered multiversioned functions.
+
+For AArch64 target:
+The attribute contains comma-separated strings of target features joined by "+"
+sign. For example:
+
+ .. code-block:: c++
+
+ __attribute__((target_clones("sha2+memtag2", "fcma+sve2-pmull128")))
+ void foo() {}
+
+For every multiversioned function a ``default`` (fallback) implementation
+always generated if not specified directly.
+
+For x86/x86-64 targets:
+All multiversioned functions must contain a ``default`` (fallback)
+implementation, otherwise usages of the function are considered invalid.
+Additionally, a function may not become multiversioned after its first use.
+
+The options to ``target_clones`` can either be a target-specific architecture
+(specified as ``arch=CPU``), or one of a list of subtarget features.
+
+Example "subtarget features" from the x86 backend include: "mmx", "sse", "sse4.2",
+"avx", "xop" and largely correspond to the machine specific options handled by
+the front end.
+
+The versions can either be listed as a comma-separated sequence of string
+literals or as a single string literal containing a comma-separated list of
+versions. For compatibility with GCC, the two formats can be mixed. For
+example, the following will emit 4 versions of the function:
+
+ .. code-block:: c++
+
+ __attribute__((target_clones("arch=atom,avx2","arch=ivybridge","default")))
+ void foo() {}
+
+For targets that support the GNU indirect function (IFUNC) feature, dispatch
+is performed by emitting an indirect function that is resolved to the appropriate
+target clone at load time. The indirect function is given the name the
+multiversioned function would have if it had been declared without the attribute.
+For backward compatibility with earlier Clang releases, a function alias with an
+``.ifunc`` suffix is also emitted. The ``.ifunc`` suffixed symbol is a deprecated
+feature and support for it may be removed in the future.
+}];
+}
+
def MinVectorWidthDocs : Documentation {
let Category = DocCatFunction;
let Content = [{
@@ -2302,8 +2671,9 @@ An error will be given if:
- Specified values violate subtarget specifications;
- Specified values are not compatible with values provided through other
attributes;
- - The AMDGPU target backend is unable to create machine code that can meet the
- request.
+
+The AMDGPU target backend will emit a warning whenever it is unable to
+create machine code that meets the request.
}];
}
@@ -2378,6 +2748,32 @@ the Arm Developer website.
}];
}
+def AArch64SVEPcsDocs : Documentation {
+ let Category = DocCatCallingConvs;
+ let Content = [{
+On AArch64 targets, this attribute changes the calling convention of a
+function to preserve additional Scalable Vector registers and Scalable
+Predicate registers relative to the default calling convention used for
+AArch64.
+
+This means it is more efficient to call such functions from code that performs
+extensive scalable vector and scalable predicate calculations, because fewer
+live SVE registers need to be saved. This property makes it well-suited for SVE
+math library functions, which are typically leaf functions that require a small
+number of registers.
+
+However, using this attribute also means that it is more expensive to call
+a function that adheres to the default calling convention from within such
+a function. Therefore, it is recommended that this attribute is only used
+for leaf functions.
+
+For more information, see the documentation for `aarch64_sve_pcs` in the
+ARM C Language Extension (ACLE) documentation.
+
+.. _`aarch64_sve_pcs`: https://github.com/ARM-software/acle/blob/main/main/acle.md#scalable-vector-extension-procedure-call-standard-attribute
+ }];
+}
+
def RegparmDocs : Documentation {
let Category = DocCatCallingConvs;
let Content = [{
@@ -2442,7 +2838,7 @@ On x86 targets, this attribute changes the calling convention to
as possible in registers. It also tries to utilize registers for the
return value whenever it is possible.
-.. _`__regcall`: https://software.intel.com/en-us/node/693069
+.. _`__regcall`: https://www.intel.com/content/www/us/en/docs/dpcpp-cpp-compiler/developer-guide-reference/2023-2/c-c-sycl-calling-conventions.html
}];
}
@@ -2486,6 +2882,18 @@ See the documentation for `__vectorcall`_ on MSDN for more details.
}];
}
+def M68kRTDDocs : Documentation {
+ let Category = DocCatCallingConvs;
+ let Content = [{
+On M68k targets, this attribute changes the calling convention of a function
+to clear parameters off the stack on return. In other words, callee is
+responsible for cleaning out the stack space allocated for incoming paramters.
+This convention does not support variadic calls or unprototyped functions in C.
+When targeting M68010 or newer CPUs, this calling convention is implemented
+using the `rtd` instruction.
+ }];
+}
+
def DocCatConsumed : DocumentationCategory<"Consumed Annotation Checking"> {
let Content = [{
Clang supports additional attributes for checking basic resource management
@@ -2592,6 +3000,18 @@ full list of supported sanitizer flags.
}];
}
+def DisableSanitizerInstrumentationDocs : Documentation {
+ let Category = DocCatFunction;
+ let Content = [{
+Use the ``disable_sanitizer_instrumentation`` attribute on a function,
+Objective-C method, or global variable, to specify that no sanitizer
+instrumentation should be applied.
+
+This is not the same as ``__attribute__((no_sanitize(...)))``, which depending
+on the tool may still insert instrumentation to prevent false positive reports.
+ }];
+}
+
def NoSanitizeAddressDocs : Documentation {
let Category = DocCatFunction;
// This function has multiple distinct spellings, and so it requires a custom
@@ -2873,8 +3293,8 @@ def FormatDocs : Documentation {
let Content = [{
Clang supports the ``format`` attribute, which indicates that the function
-accepts a ``printf`` or ``scanf``-like format string and corresponding
-arguments or a ``va_list`` that contains these arguments.
+accepts (among other possibilities) a ``printf`` or ``scanf``-like format string
+and corresponding arguments or a ``va_list`` that contains these arguments.
Please see `GCC documentation about format attribute
<http://gcc.gnu.org/onlinedocs/gcc/Function-Attributes.html>`_ to find details
@@ -2928,6 +3348,39 @@ Clang implements two kinds of checks with this attribute.
In this case Clang does not warn because the format string ``s`` and
the corresponding arguments are annotated. If the arguments are
incorrect, the caller of ``foo`` will receive a warning.
+
+As an extension to GCC's behavior, Clang accepts the ``format`` attribute on
+non-variadic functions. Clang checks non-variadic format functions for the same
+classes of issues that can be found on variadic functions, as controlled by the
+same warning flags, except that the types of formatted arguments is forced by
+the function signature. For example:
+
+.. code-block:: c
+
+ __attribute__((__format__(__printf__, 1, 2)))
+ void fmt(const char *s, const char *a, int b);
+
+ void bar(void) {
+ fmt("%s %i", "hello", 123); // OK
+ fmt("%i %g", "hello", 123); // warning: arguments don't match format
+ extern const char *fmt;
+ fmt(fmt, "hello", 123); // warning: format string is not a string literal
+ }
+
+When using the format attribute on a variadic function, the first data parameter
+_must_ be the index of the ellipsis in the parameter list. Clang will generate
+a diagnostic otherwise, as it wouldn't be possible to forward that argument list
+to `printf`-family functions. For instance, this is an error:
+
+.. code-block:: c
+
+ __attribute__((__format__(__printf__, 1, 2)))
+ void fmt(const char *s, int b, ...);
+ // ^ error: format attribute parameter 3 is out of bounds
+ // (must be __printf__, 1, 3)
+
+Using the ``format`` attribute on a non-variadic function emits a GCC
+compatibility diagnostic.
}];
}
@@ -3152,6 +3605,9 @@ If a type is trivial for the purposes of calls, has a non-trivial destructor,
and is passed as an argument by value, the convention is that the callee will
destroy the object before returning.
+If a type is trivial for the purpose of calls, it is assumed to be trivially
+relocatable for the purpose of ``__is_trivially_relocatable``.
+
Attribute ``trivial_abi`` has no effect in the following cases:
- The class directly declares a virtual base or virtual methods.
@@ -3168,7 +3624,7 @@ Attribute ``trivial_abi`` has no effect in the following cases:
def MSInheritanceDocs : Documentation {
let Category = DocCatDecl;
- let Heading = "__single_inhertiance, __multiple_inheritance, __virtual_inheritance";
+ let Heading = "__single_inheritance, __multiple_inheritance, __virtual_inheritance";
let Content = [{
This collection of keywords is enabled under ``-fms-extensions`` and controls
the pointer-to-member representation used on ``*-*-win32`` targets.
@@ -3213,6 +3669,21 @@ an error:
}];
}
+def MSConstexprDocs : Documentation {
+ let Category = DocCatStmt;
+ let Content = [{
+The ``[[msvc::constexpr]]`` attribute can be applied only to a function
+definition or a ``return`` statement. It does not impact function declarations.
+A ``[[msvc::constexpr]]`` function cannot be ``constexpr`` or ``consteval``.
+A ``[[msvc::constexpr]]`` function is treated as if it were a ``constexpr`` function
+when it is evaluated in a constant context of ``[[msvc::constexpr]] return`` statement.
+Otherwise, it is treated as a regular function.
+
+Semantics of this attribute are enabled only under MSVC compatibility
+(``-fms-compatibility-version``) 19.33 and later.
+ }];
+}
+
def MSNoVTableDocs : Documentation {
let Category = DocCatDecl;
let Content = [{
@@ -3301,7 +3772,7 @@ Specifying ``#pragma nounroll`` indicates that the loop should not be unrolled:
}
``#pragma unroll`` and ``#pragma unroll _value_`` have identical semantics to
-``#pragma clang loop unroll(full)`` and
+``#pragma clang loop unroll(enable)`` and
``#pragma clang loop unroll_count(_value_)`` respectively. ``#pragma nounroll``
is equivalent to ``#pragma clang loop unroll(disable)``. See
`language extensions
@@ -3577,7 +4048,7 @@ Whether a particular pointer may be "null" is an important concern when working
with pointers in the C family of languages. The various nullability attributes
indicate whether a particular pointer can be null or not, which makes APIs more
expressive and can help static analysis tools identify bugs involving null
-pointers. Clang supports several kinds of nullability attributes: the
+pointers. Clang supports several kinds of nullability attributes: the
``nonnull`` and ``returns_nonnull`` attributes indicate which function or
method parameters and result types can never be null, while nullability type
qualifiers indicate which pointer types can be null (``_Nullable``) or cannot
@@ -3670,10 +4141,10 @@ completion handler in a Swift async method. For instance, here:
This method asynchronously calls ``completionHandler`` when the data is
available, or calls it with an error. ``_Nullable_result`` indicates to the
Swift importer that this is the uncommon case where ``result`` can get ``nil``
-even if no error has occured, and will therefore import it as a Swift optional
+even if no error has occurred, and will therefore import it as a Swift optional
type. Otherwise, if ``result`` was annotated with ``_Nullable``, the Swift
importer will assume that ``result`` will always be non-nil unless an error
-occured.
+occurred.
}];
}
@@ -3753,7 +4224,7 @@ memory is not available rather than returning a null pointer:
The ``returns_nonnull`` attribute implies that returning a null pointer is
undefined behavior, which the optimizer may take advantage of. The ``_Nonnull``
type qualifier indicates that a pointer cannot be null in a more general manner
-(because it is part of the type system) and does not imply undefined behavior,
+(because it is part of the type system) and does not imply undefined behavior,
making it more widely applicable
}];
}
@@ -4112,6 +4583,7 @@ Clang provides the following context selector extensions, used via
match_none
disable_implicit_base
allow_templates
+ bind_to_declaration
The match extensions change when the *entire* context selector is considered a
match for an OpenMP context. The default is ``all``, with ``none`` no trait in the
@@ -4127,8 +4599,9 @@ The allow extensions change when the ``begin declare variant`` effect is
applied to a definition. If ``allow_templates`` is given, template function
definitions are considered as specializations of existing or assumed template
declarations with the same name. The template parameters for the base functions
-are used to instantiate the specialization.
-
+are used to instantiate the specialization. If ``bind_to_declaration`` is given,
+apply the same variant rules to function declarations. This allows the user to
+override declarations with only a function declaration.
}];
}
@@ -4185,8 +4658,10 @@ spelled "XYZ" in the `OpenMP 5.1 Standard`_).
def NoStackProtectorDocs : Documentation {
let Category = DocCatFunction;
+ let Heading = "no_stack_protector, safebuffers";
let Content = [{
-Clang supports the ``__attribute__((no_stack_protector))`` attribute which disables
+Clang supports the GNU style ``__attribute__((no_stack_protector))`` and Microsoft
+style ``__declspec(safebuffers)`` attribute which disables
the stack protector on the specified function. This attribute is useful for
selectively disabling the stack protector on some functions when building with
``-fstack-protector`` compiler option.
@@ -4205,6 +4680,27 @@ option.
}];
}
+def StrictGuardStackCheckDocs : Documentation {
+ let Category = DocCatFunction;
+ let Content = [{
+Clang supports the Microsoft style ``__declspec((strict_gs_check))`` attribute
+which upgrades the stack protector check from ``-fstack-protector`` to
+``-fstack-protector-strong``.
+
+For example, it upgrades the stack protector for the function ``foo`` to
+``-fstack-protector-strong`` but function ``bar`` will still be built with the
+stack protector with the ``-fstack-protector`` option.
+
+.. code-block:: c
+
+ __declspec((strict_gs_check))
+ int foo(int x); // stack protection will be upgraded for foo.
+
+ int bar(int y); // bar can be built with the standard stack protector checks.
+
+ }];
+}
+
def NotTailCalledDocs : Documentation {
let Category = DocCatFunction;
let Content = [{
@@ -4270,6 +4766,16 @@ guaranteed to not throw an exception.
}];
}
+def NoUwtableDocs : Documentation {
+ let Category = DocCatFunction;
+ let Content = [{
+Clang supports the ``nouwtable`` attribute which skips emitting
+the unwind table entry for the specified function. This attribute is useful for
+selectively emitting the unwind table entry on some functions when building with
+``-funwind-tables`` compiler option.
+ }];
+}
+
def InternalLinkageDocs : Documentation {
let Category = DocCatFunction;
let Content = [{
@@ -4383,6 +4889,54 @@ Marking virtual functions as ``disable_tail_calls`` is legal.
}];
}
+def AnyX86InterruptDocs : Documentation {
+ let Category = DocCatFunction;
+ let Heading = "interrupt (X86)";
+ let Content = [{
+Clang supports the GNU style ``__attribute__((interrupt))`` attribute on X86
+targets. This attribute may be attached to a function definition and instructs
+the backend to generate appropriate function entry/exit code so that it can be
+used directly as an interrupt service routine.
+
+Interrupt handlers have access to the stack frame pushed onto the stack by the processor,
+and return using the ``IRET`` instruction. All registers in an interrupt handler are callee-saved.
+Exception handlers also have access to the error code pushed onto the stack by the processor,
+when applicable.
+
+An interrupt handler must take the following arguments:
+
+ .. code-block:: c
+
+ __attribute__ ((interrupt))
+ void f (struct stack_frame *frame) {
+ ...
+ }
+
+ Where ``struct stack_frame`` is a suitable struct matching the stack frame pushed by the
+ processor.
+
+An exception handler must take the following arguments:
+
+ .. code-block:: c
+
+ __attribute__ ((interrupt))
+ void g (struct stack_frame *frame, unsigned long code) {
+ ...
+ }
+
+ On 32-bit targets, the ``code`` argument should be of type ``unsigned int``.
+
+Exception handlers should only be used when an error code is pushed by the processor.
+Using the incorrect handler type will crash the system.
+
+Interrupt and exception handlers cannot be called by other functions and must have return type ``void``.
+
+Interrupt and exception handlers should only call functions with the 'no_caller_saved_registers'
+attribute, or should be compiled with the '-mgeneral-regs-only' flag to avoid saving unused
+non-GPR registers.
+ }];
+}
+
def AnyX86NoCallerSavedRegistersDocs : Documentation {
let Category = DocCatFunction;
let Content = [{
@@ -4397,6 +4951,10 @@ The user can call functions specified with the 'no_caller_saved_registers'
attribute from an interrupt handler without saving and restoring all
call-clobbered registers.
+Functions specified with the 'no_caller_saved_registers' attribute should only
+call other functions with the 'no_caller_saved_registers' attribute, or should be
+compiled with the '-mgeneral-regs-only' flag to avoid saving unused non-GPR registers.
+
Note that 'no_caller_saved_registers' attribute is not a calling convention.
In fact, it only overrides the decision of which registers should be saved by
the caller, but not how the parameters are passed from the caller to the callee.
@@ -4721,7 +5279,74 @@ the ``int`` parameter is the one that represents the error.
def SuppressDocs : Documentation {
let Category = DocCatStmt;
let Content = [{
-The ``[[gsl::suppress]]`` attribute suppresses specific
+The ``suppress`` attribute suppresses unwanted warnings coming from static
+analysis tools such as the Clang Static Analyzer. The tool will not report
+any issues in source code annotated with the attribute.
+
+The attribute cannot be used to suppress traditional Clang warnings, because
+many such warnings are emitted before the attribute is fully parsed.
+Consider using ``#pragma clang diagnostic`` to control such diagnostics,
+as described in `Controlling Diagnostics via Pragmas
+<https://clang.llvm.org/docs/UsersManual.html#controlling-diagnostics-via-pragmas>`_.
+
+The ``suppress`` attribute can be placed on an individual statement in order to
+suppress warnings about undesirable behavior occurring at that statement:
+
+.. code-block:: c++
+
+ int foo() {
+ int *x = nullptr;
+ ...
+ [[clang::suppress]]
+ return *x; // null pointer dereference warning suppressed here
+ }
+
+Putting the attribute on a compound statement suppresses all warnings in scope:
+
+.. code-block:: c++
+
+ int foo() {
+ [[clang::suppress]] {
+ int *x = nullptr;
+ ...
+ return *x; // warnings suppressed in the entire scope
+ }
+ }
+
+Some static analysis warnings are accompanied by one or more notes, and the
+line of code against which the warning is emitted isn't necessarily the best
+for suppression purposes. In such cases the tools are allowed to implement
+additional ways to suppress specific warnings based on the attribute attached
+to a note location.
+
+For example, the Clang Static Analyzer suppresses memory leak warnings when
+the suppression attribute is placed at the allocation site (highlited by
+a "note: memory is allocated"), which may be different from the line of code
+at which the program "loses track" of the pointer (where the warning
+is ultimately emitted):
+
+.. code-block:: c
+
+ int bar1(bool coin_flip) {
+ __attribute__((suppress))
+ int *result = (int *)malloc(sizeof(int));
+ if (coin_flip)
+ return 1; // warning about this leak path is suppressed
+
+ return *result; // warning about this leak path is also suppressed
+ }
+
+ int bar2(bool coin_flip) {
+ int *result = (int *)malloc(sizeof(int));
+ if (coin_flip)
+ return 1; // leak warning on this path NOT suppressed
+
+ __attribute__((suppress))
+ return *result; // leak warning is suppressed only on this path
+ }
+
+
+When written as ``[[gsl::suppress]]``, this attribute suppresses specific
clang-tidy diagnostics for rules of the `C++ Core Guidelines`_ in a portable
way. The attribute can be attached to declarations, statements, and at
namespace scope.
@@ -4799,6 +5424,12 @@ general this requires the template to be declared at least twice. For example:
clang::preferred_name(wstring)]] basic_string {
// ...
};
+
+
+Note that the ``preferred_name`` attribute will be ignored when the compiler
+writes a C++20 Module interface now. This is due to a compiler issue
+(https://github.com/llvm/llvm-project/issues/56490) that blocks users to modularize
+declarations with `preferred_name`. This is intended to be fixed in the future.
}];
}
@@ -4819,6 +5450,9 @@ apply for values returned in callee-saved registers.
R11. R11 can be used as a scratch register. Floating-point registers
(XMMs/YMMs) are not preserved and need to be saved by the caller.
+- On AArch64 the callee preserve all general purpose registers, except X0-X8 and
+ X16-X18.
+
The idea behind this convention is to support calls to runtime functions
that have a hot path and a cold path. The hot path is usually a small piece
of code that doesn't use many registers. The cold path might need to call out to
@@ -4859,6 +5493,10 @@ returned in callee-saved registers.
R11. R11 can be used as a scratch register. Furthermore it also preserves
all floating-point registers (XMMs/YMMs).
+- On AArch64 the callee preserve all general purpose registers, except X0-X8 and
+ X16-X18. Furthermore it also preserves lower 128 bits of V8-V31 SIMD - floating
+ point registers.
+
The idea behind this convention is to support calls to runtime functions
that don't need to call out to any other functions.
@@ -4909,7 +5547,9 @@ considered inline.
Not all targets support this attribute. ELF target support depends on both the
linker and runtime linker, and is available in at least lld 4.0 and later,
binutils 2.20.1 and later, glibc v2.11.1 and later, and FreeBSD 9.1 and later.
-Non-ELF targets currently do not support this attribute.
+Mach-O targets support it, but with slightly different semantics: the resolver
+is run at first call, instead of at load time by the runtime linker. Targets
+other than ELF and Mach-O currently do not support this attribute.
}];
}
@@ -4968,10 +5608,25 @@ takes precedence over the command line option ``-fpatchable-function-entry=N,M``
``M`` defaults to 0 if omitted.
This attribute is only supported on
-aarch64/aarch64-be/riscv32/riscv64/i386/x86-64 targets.
+aarch64/aarch64-be/loongarch32/loongarch64/riscv32/riscv64/i386/x86-64 targets.
}];
}
+def HotFunctionEntryDocs : Documentation {
+ let Category = DocCatFunction;
+ let Content = [{
+``__attribute__((hot))`` marks a function as hot, as a manual alternative to PGO hotness data.
+If PGO data is available, the annotation ``__attribute__((hot))`` overrides the profile count based hotness (unlike ``__attribute__((cold))``).
+}];
+}
+
+def ColdFunctionEntryDocs : Documentation {
+ let Category = DocCatFunction;
+ let Content = [{
+``__attribute__((cold))`` marks a function as cold, as a manual alternative to PGO hotness data.
+If PGO data is available, the profile count based hotness overrides the ``__attribute__((cold))`` annotation (unlike ``__attribute__((hot))``).
+}];
+}
def TransparentUnionDocs : Documentation {
let Category = DocCatDecl;
let Content = [{
@@ -5242,12 +5897,12 @@ accessed. The following are examples of valid expressions where may not be diagn
``noderef`` is currently only supported for pointers and arrays and not usable
for references or Objective-C object pointers.
-.. code-block: c++
+.. code-block:: c++
int x = 2;
int __attribute__((noderef)) &y = x; // warning: 'noderef' can only be used on an array or pointer type
-.. code-block: objc
+.. code-block:: objc
id __attribute__((noderef)) obj = [NSObject new]; // warning: 'noderef' can only be used on an array or pointer type
}];
@@ -5444,7 +6099,7 @@ by showing the control-flow statement where the path diverges.
if (somePredicate()) {
...
callback();
- } esle {
+ } else {
callback(); // OK: callback is called on every path
}
}
@@ -5615,6 +6270,15 @@ attribute can also be written using C++11 syntax: ``[[mig::server_routine]]``.
}];
}
+def MinSizeDocs : Documentation {
+ let Category = DocCatFunction;
+ let Content = [{
+This function attribute indicates that optimization passes and code generator passes
+make choices that keep the function code size as small as possible. Optimizations may
+also sacrifice runtime performance in order to minimize the size of the generated code.
+ }];
+}
+
def MSAllocatorDocs : Documentation {
let Category = DocCatFunction;
let Content = [{
@@ -5637,15 +6301,15 @@ def CFGuardDocs : Documentation {
let Content = [{
Code can indicate CFG checks are not wanted with the ``__declspec(guard(nocf))``
attribute. This directs the compiler to not insert any CFG checks for the entire
-function. This approach is typically used only sparingly in specific situations
-where the programmer has manually inserted "CFG-equivalent" protection. The
-programmer knows that they are calling through some read-only function table
-whose address is obtained through read-only memory references and for which the
-index is masked to the function table limit. This approach may also be applied
-to small wrapper functions that are not inlined and that do nothing more than
-make a call through a function pointer. Since incorrect usage of this directive
-can compromise the security of CFG, the programmer must be very careful using
-the directive. Typically, this usage is limited to very small functions that
+function. This approach is typically used only sparingly in specific situations
+where the programmer has manually inserted "CFG-equivalent" protection. The
+programmer knows that they are calling through some read-only function table
+whose address is obtained through read-only memory references and for which the
+index is masked to the function table limit. This approach may also be applied
+to small wrapper functions that are not inlined and that do nothing more than
+make a call through a function pointer. Since incorrect usage of this directive
+can compromise the security of CFG, the programmer must be very careful using
+the directive. Typically, this usage is limited to very small functions that
only call one function.
`Control Flow Guard documentation <https://docs.microsoft.com/en-us/windows/win32/secbp/pe-metadata>`
@@ -5782,9 +6446,6 @@ attribute `clang_builtin_alias`.
def NoBuiltinDocs : Documentation {
let Category = DocCatFunction;
let Content = [{
-.. Note:: This attribute is not yet fully implemented, it is validated but has
- no effect on the generated code.
-
The ``__attribute__((no_builtin))`` is similar to the ``-fno-builtin`` flag
except it is specific to the body of a function. The attribute may also be
applied to a virtual function but has no effect on the behavior of overriding
@@ -5831,7 +6492,7 @@ deferring any errors to the point of use. For instance:
does_not_exist x; // error: use of unresolved 'using_if_exists'
-The C++ spelling of the attribte (`[[clang::using_if_exists]]`) is also
+The C++ spelling of the attribute (`[[clang::using_if_exists]]`) is also
supported as a clang extension, since ISO C++ doesn't support attributes in this
position. If the entity referred to by the using-declaration is found by name
lookup, the attribute has no effect. This attribute is useful for libraries
@@ -5859,19 +6520,21 @@ def AcquireHandleDocs : Documentation {
If this annotation is on a function or a function type it is assumed to return
a new handle. In case this annotation is on an output parameter,
the function is assumed to fill the corresponding argument with a new
-handle.
+handle. The attribute requires a string literal argument which used to
+identify the handle with later uses of ``use_handle`` or
+``release_handle``.
.. code-block:: c++
// Output arguments from Zircon.
zx_status_t zx_socket_create(uint32_t options,
- zx_handle_t __attribute__((acquire_handle)) * out0,
- zx_handle_t* out1 [[clang::acquire_handle]]);
+ zx_handle_t __attribute__((acquire_handle("zircon"))) * out0,
+ zx_handle_t* out1 [[clang::acquire_handle("zircon")]]);
// Returned handle.
- [[clang::acquire_handle]] int open(const char *path, int oflag, ... );
- int open(const char *path, int oflag, ... ) __attribute__((acquire_handle));
+ [[clang::acquire_handle("tag")]] int open(const char *path, int oflag, ... );
+ int open(const char *path, int oflag, ... ) __attribute__((acquire_handle("tag")));
}];
}
@@ -5879,12 +6542,13 @@ def UseHandleDocs : Documentation {
let Category = HandleDocs;
let Content = [{
A function taking a handle by value might close the handle. If a function
-parameter is annotated with ``use_handle`` it is assumed to not to change
+parameter is annotated with ``use_handle(tag)`` it is assumed to not to change
the state of the handle. It is also assumed to require an open handle to work with.
+The attribute requires a string literal argument to identify the handle being used.
.. code-block:: c++
- zx_status_t zx_port_wait(zx_handle_t handle [[clang::use_handle]],
+ zx_status_t zx_port_wait(zx_handle_t handle [[clang::use_handle("zircon")]],
zx_time_t deadline,
zx_port_packet_t* packet);
}];
@@ -5893,15 +6557,139 @@ the state of the handle. It is also assumed to require an open handle to work wi
def ReleaseHandleDocs : Documentation {
let Category = HandleDocs;
let Content = [{
-If a function parameter is annotated with ``release_handle`` it is assumed to
-close the handle. It is also assumed to require an open handle to work with.
+If a function parameter is annotated with ``release_handle(tag)`` it is assumed to
+close the handle. It is also assumed to require an open handle to work with. The
+attribute requires a string literal argument to identify the handle being released.
.. code-block:: c++
- zx_status_t zx_handle_close(zx_handle_t handle [[clang::release_handle]]);
+ zx_status_t zx_handle_close(zx_handle_t handle [[clang::release_handle("tag")]]);
}];
}
+def UnsafeBufferUsageDocs : Documentation {
+ let Category = DocCatFunction;
+ let Content = [{
+The attribute ``[[clang::unsafe_buffer_usage]]`` should be placed on functions
+that need to be avoided as they are prone to buffer overflows. It is designed to
+work together with the off-by-default compiler warning ``-Wunsafe-buffer-usage``
+to help codebases transition away from raw pointer based buffer management,
+in favor of safer abstractions such as C++20 ``std::span``. The attribute causes
+``-Wunsafe-buffer-usage`` to warn on every use of the function, and it may
+enable ``-Wunsafe-buffer-usage`` to emit automatic fix-it hints
+which would help the user replace such unsafe functions with safe
+alternatives, though the attribute can be used even when the fix can't be automated.
+
+The attribute does not suppress ``-Wunsafe-buffer-usage`` inside the function
+to which it is attached. These warnings still need to be addressed.
+
+The attribute is warranted even if the only way a function can overflow
+the buffer is by violating the function's preconditions. For example, it
+would make sense to put the attribute on function ``foo()`` below because
+passing an incorrect size parameter would cause a buffer overflow:
+
+.. code-block:: c++
+
+ [[clang::unsafe_buffer_usage]]
+ void foo(int *buf, size_t size) {
+ for (size_t i = 0; i < size; ++i) {
+ buf[i] = i;
+ }
+ }
+
+The attribute is NOT warranted when the function uses safe abstractions,
+assuming that these abstractions weren't misused outside the function.
+For example, function ``bar()`` below doesn't need the attribute,
+because assuming that the container ``buf`` is well-formed (has size that
+fits the original buffer it refers to), overflow cannot occur:
+
+.. code-block:: c++
+
+ void bar(std::span<int> buf) {
+ for (size_t i = 0; i < buf.size(); ++i) {
+ buf[i] = i;
+ }
+ }
+
+In this case function ``bar()`` enables the user to keep the buffer
+"containerized" in a span for as long as possible. On the other hand,
+Function ``foo()`` in the previous example may have internal
+consistency, but by accepting a raw buffer it requires the user to unwrap
+their span, which is undesirable according to the programming model
+behind ``-Wunsafe-buffer-usage``.
+
+The attribute is warranted when a function accepts a raw buffer only to
+immediately put it into a span:
+
+.. code-block:: c++
+
+ [[clang::unsafe_buffer_usage]]
+ void baz(int *buf, size_t size) {
+ std::span<int> sp{ buf, size };
+ for (size_t i = 0; i < sp.size(); ++i) {
+ sp[i] = i;
+ }
+ }
+
+In this case ``baz()`` does not contain any unsafe operations, but the awkward
+parameter type causes the caller to unwrap the span unnecessarily.
+Note that regardless of the attribute, code inside ``baz()`` isn't flagged
+by ``-Wunsafe-buffer-usage`` as unsafe. It is definitely undesirable,
+but if ``baz()`` is on an API surface, there is no way to improve it
+to make it as safe as ``bar()`` without breaking the source and binary
+compatibility with existing users of the function. In such cases
+the proper solution would be to create a different function (possibly
+an overload of ``baz()``) that accepts a safe container like ``bar()``,
+and then use the attribute on the original ``baz()`` to help the users
+update their code to use the new function.
+ }];
+}
+
+def DiagnoseAsBuiltinDocs : Documentation {
+ let Category = DocCatFunction;
+ let Content = [{
+The ``diagnose_as_builtin`` attribute indicates that Fortify diagnostics are to
+be applied to the declared function as if it were the function specified by the
+attribute. The builtin function whose diagnostics are to be mimicked should be
+given. In addition, the order in which arguments should be applied must also
+be given.
+
+For example, the attribute can be used as follows.
+
+.. code-block:: c
+
+ __attribute__((diagnose_as_builtin(__builtin_memset, 3, 2, 1)))
+ void *mymemset(int n, int c, void *s) {
+ // ...
+ }
+
+This indicates that calls to ``mymemset`` should be diagnosed as if they were
+calls to ``__builtin_memset``. The arguments ``3, 2, 1`` indicate by index the
+order in which arguments of ``mymemset`` should be applied to
+``__builtin_memset``. The third argument should be applied first, then the
+second, and then the first. Thus (when Fortify warnings are enabled) the call
+``mymemset(n, c, s)`` will diagnose overflows as if it were the call
+``__builtin_memset(s, c, n)``.
+
+For variadic functions, the variadic arguments must come in the same order as
+they would to the builtin function, after all normal arguments. For instance,
+to diagnose a new function as if it were `sscanf`, we can use the attribute as
+follows.
+
+.. code-block:: c
+
+ __attribute__((diagnose_as_builtin(sscanf, 1, 2)))
+ int mysscanf(const char *str, const char *format, ...) {
+ // ...
+ }
+
+Then the call `mysscanf("abc def", "%4s %4s", buf1, buf2)` will be diagnosed as
+if it were the call `sscanf("abc def", "%4s %4s", buf1, buf2)`.
+
+This attribute cannot be applied to non-static member functions.
+}];
+}
+
def ArmSveVectorBitsDocs : Documentation {
let Category = DocCatType;
let Content = [{
@@ -6002,13 +6790,234 @@ Requirements on Development Tools - Engineering Specification Documentation
}];
}
+def DocCatArmSmeAttributes : DocumentationCategory<"AArch64 SME Attributes"> {
+ let Content = [{
+Clang supports a number of AArch64-specific attributes to manage state
+added by the Scalable Matrix Extension (SME). This state includes the
+runtime mode that the processor is in (e.g. non-streaming or streaming)
+as well as the state of the ``ZA`` Matrix Storage.
+
+The attributes come in the form of type- and declaration attributes:
+
+* The SME declaration attributes can appear anywhere that a standard
+ ``[[...]]`` declaration attribute can appear.
+
+* The SME type attributes apply only to prototyped functions and can appear
+ anywhere that a standard ``[[...]]`` type attribute can appear. The SME
+ type attributes do not apply to functions having a K&R-style
+ unprototyped function type.
+
+See `Arm C Language Extensions <https://github.com/ARM-software/acle>`_
+for more details about the features related to the SME extension.
+
+See `Procedure Call Standard for the Arm® 64-bit Architecture (AArch64)
+<https://github.com/ARM-software/abi-aa>`_ for more details about
+streaming-interface functions and shared/private-ZA interface functions.
+ }];
+}
+
+def ArmSmeStreamingDocs : Documentation {
+ let Category = DocCatArmSmeAttributes;
+ let Content = [{
+The ``__arm_streaming`` keyword applies to prototyped function types and specifies
+that the function has a "streaming interface". This means that:
+
+* the function requires that the processor implements the Scalable Matrix
+ Extension (SME).
+
+* the function must be entered in streaming mode (that is, with PSTATE.SM
+ set to 1)
+
+* the function must return in streaming mode
+
+Clang manages PSTATE.SM automatically; it is not the source code's
+responsibility to do this. For example, if a non-streaming
+function calls an ``__arm_streaming`` function, Clang generates code
+that switches into streaming mode before calling the function and
+switches back to non-streaming mode on return.
+ }];
+}
+
+def ArmSmeStreamingCompatibleDocs : Documentation {
+ let Category = DocCatArmSmeAttributes;
+ let Content = [{
+The ``__arm_streaming_compatible`` keyword applies to prototyped function types and
+specifies that the function has a "streaming compatible interface". This
+means that:
+
+* the function may be entered in either non-streaming mode (PSTATE.SM=0) or
+ in streaming mode (PSTATE.SM=1).
+
+* the function must return in the same mode as it was entered.
+
+* the code executed in the function is compatible with either mode.
+
+Clang manages PSTATE.SM automatically; it is not the source code's
+responsibility to do this. Clang will ensure that the generated code in
+streaming-compatible functions is valid in either mode (PSTATE.SM=0 or
+PSTATE.SM=1). For example, if an ``__arm_streaming_compatible`` function calls a
+non-streaming function, Clang generates code to temporarily switch out of streaming
+mode before calling the function and switch back to streaming-mode on return if
+``PSTATE.SM`` is ``1`` on entry of the caller. If ``PSTATE.SM`` is ``0`` on
+entry to the ``__arm_streaming_compatible`` function, the call will be executed
+without changing modes.
+ }];
+}
+
+def ArmInDocs : Documentation {
+ let Category = DocCatArmSmeAttributes;
+ let Content = [{
+The ``__arm_in`` keyword applies to prototyped function types and specifies
+that the function shares a given state S with its caller. For ``__arm_in``, the
+function takes the state S as input and returns with the state S unchanged.
+
+The attribute takes string arguments to instruct the compiler which state
+is shared. The supported states for S are:
+
+* ``"za"`` for Matrix Storage (requires SME)
+
+The attributes ``__arm_in(S)``, ``__arm_out(S)``, ``__arm_inout(S)`` and
+``__arm_preserves(S)`` are all mutually exclusive for the same state S.
+ }];
+}
+
+def ArmOutDocs : Documentation {
+ let Category = DocCatArmSmeAttributes;
+ let Content = [{
+The ``__arm_out`` keyword applies to prototyped function types and specifies
+that the function shares a given state S with its caller. For ``__arm_out``,
+the function ignores the incoming state for S and returns new state for S.
+
+The attribute takes string arguments to instruct the compiler which state
+is shared. The supported states for S are:
+
+* ``"za"`` for Matrix Storage (requires SME)
+
+The attributes ``__arm_in(S)``, ``__arm_out(S)``, ``__arm_inout(S)`` and
+``__arm_preserves(S)`` are all mutually exclusive for the same state S.
+ }];
+}
+
+def ArmInOutDocs : Documentation {
+ let Category = DocCatArmSmeAttributes;
+ let Content = [{
+The ``__arm_inout`` keyword applies to prototyped function types and specifies
+that the function shares a given state S with its caller. For ``__arm_inout``,
+the function takes the state S as input and returns new state for S.
+
+The attribute takes string arguments to instruct the compiler which state
+is shared. The supported states for S are:
+
+* ``"za"`` for Matrix Storage (requires SME)
+
+The attributes ``__arm_in(S)``, ``__arm_out(S)``, ``__arm_inout(S)`` and
+``__arm_preserves(S)`` are all mutually exclusive for the same state S.
+ }];
+}
+
+def ArmPreservesDocs : Documentation {
+ let Category = DocCatArmSmeAttributes;
+ let Content = [{
+The ``__arm_preserves`` keyword applies to prototyped function types and
+specifies that the function does not read a given state S and returns
+with state S unchanged.
+
+The attribute takes string arguments to instruct the compiler which state
+is shared. The supported states for S are:
+
+* ``"za"`` for Matrix Storage (requires SME)
+
+The attributes ``__arm_in(S)``, ``__arm_out(S)``, ``__arm_inout(S)`` and
+``__arm_preserves(S)`` are all mutually exclusive for the same state S.
+ }];
+}
+
+def ArmSmeLocallyStreamingDocs : Documentation {
+ let Category = DocCatArmSmeAttributes;
+ let Content = [{
+The ``__arm_locally_streaming`` keyword applies to function declarations
+and specifies that all the statements in the function are executed in
+streaming mode. This means that:
+
+* the function requires that the target processor implements the Scalable Matrix
+ Extension (SME).
+
+* the program automatically puts the machine into streaming mode before
+ executing the statements and automatically restores the previous mode
+ afterwards.
+
+Clang manages PSTATE.SM automatically; it is not the source code's
+responsibility to do this. For example, Clang will emit code to enable
+streaming mode at the start of the function, and disable streaming mode
+at the end of the function.
+ }];
+}
+
+def ArmNewDocs : Documentation {
+ let Category = DocCatArmSmeAttributes;
+ let Content = [{
+The ``__arm_new`` keyword applies to function declarations and specifies
+that the function will create a new scope for state S.
+
+The attribute takes string arguments to instruct the compiler for which state
+to create new scope. The supported states for S are:
+
+* ``"za"`` for Matrix Storage (requires SME)
+
+For state ``"za"``, this means that:
+
+* the function requires that the target processor implements the Scalable Matrix
+ Extension (SME).
+
+* the function will commit any lazily saved ZA data.
+
+* the function will create a new ZA context and enable PSTATE.ZA.
+
+* the function will disable PSTATE.ZA (by setting it to 0) before returning.
+
+For ``__arm_new("za")`` functions Clang will set up the ZA context automatically
+on entry to the function and disable it before returning. For example, if ZA is
+in a dormant state Clang will generate the code to commit a lazy-save and set up
+a new ZA state before executing user code.
+ }];
+}
+
def AlwaysInlineDocs : Documentation {
let Category = DocCatFunction;
let Content = [{
Inlining heuristics are disabled and inlining is always attempted regardless of
optimization level.
-Does not guarantee that inline substitution actually occurs.
+``[[clang::always_inline]]`` spelling can be used as a statement attribute; other
+spellings of the attribute are not supported on statements. If a statement is
+marked ``[[clang::always_inline]]`` and contains calls, the compiler attempts
+to inline those calls.
+
+.. code-block:: c
+
+ int example(void) {
+ int i;
+ [[clang::always_inline]] foo(); // attempts to inline foo
+ [[clang::always_inline]] i = bar(); // attempts to inline bar
+ [[clang::always_inline]] return f(42, baz(bar())); // attempts to inline everything
+ }
+
+A declaration statement, which is a statement, is not a statement that can have an
+attribute associated with it (the attribute applies to the declaration, not the
+statement in that case). So this use case will not work:
+
+.. code-block:: c
+
+ int example(void) {
+ [[clang::always_inline]] int i = bar();
+ return i;
+ }
+
+This attribute does not guarantee that inline substitution actually occurs.
+
+<ins>Note: applying this attribute to a coroutine at the `-O0` optimization level
+has no effect; other optimization levels may only partially inline and result in a
+diagnostic.</ins>
See also `the Microsoft Docs on Inline Functions`_, `the GCC Common Function
Attribute docs`_, and `the GCC Inline docs`_.
@@ -6045,3 +7054,827 @@ def EnforceTCBLeafDocs : Documentation {
- ``enforce_tcb_leaf(Name)`` indicates that this function is a part of the TCB named ``Name``
}];
}
+
+def ErrorAttrDocs : Documentation {
+ let Category = DocCatFunction;
+ let Heading = "error, warning";
+ let Content = [{
+The ``error`` and ``warning`` function attributes can be used to specify a
+custom diagnostic to be emitted when a call to such a function is not
+eliminated via optimizations. This can be used to create compile time
+assertions that depend on optimizations, while providing diagnostics
+pointing to precise locations of the call site in the source.
+
+.. code-block:: c++
+
+ __attribute__((warning("oh no"))) void dontcall();
+ void foo() {
+ if (someCompileTimeAssertionThatsTrue)
+ dontcall(); // Warning
+
+ dontcall(); // Warning
+
+ if (someCompileTimeAssertionThatsFalse)
+ dontcall(); // No Warning
+ sizeof(dontcall()); // No Warning
+ }
+ }];
+}
+
+def ZeroCallUsedRegsDocs : Documentation {
+ let Category = DocCatFunction;
+ let Content = [{
+This attribute, when attached to a function, causes the compiler to zero a
+subset of all call-used registers before the function returns. It's used to
+increase program security by either mitigating `Return-Oriented Programming`_
+(ROP) attacks or preventing information leakage through registers.
+
+The term "call-used" means registers which are not guaranteed to be preserved
+unchanged for the caller by the current calling convention. This could also be
+described as "caller-saved" or "not callee-saved".
+
+The `choice` parameters gives the programmer flexibility to choose the subset
+of the call-used registers to be zeroed:
+
+- ``skip`` doesn't zero any call-used registers. This choice overrides any
+ command-line arguments.
+- ``used`` only zeros call-used registers used in the function. By ``used``, we
+ mean a register whose contents have been set or referenced in the function.
+- ``used-gpr`` only zeros call-used GPR registers used in the function.
+- ``used-arg`` only zeros call-used registers used to pass arguments to the
+ function.
+- ``used-gpr-arg`` only zeros call-used GPR registers used to pass arguments to
+ the function.
+- ``all`` zeros all call-used registers.
+- ``all-gpr`` zeros all call-used GPR registers.
+- ``all-arg`` zeros all call-used registers used to pass arguments to the
+ function.
+- ``all-gpr-arg`` zeros all call-used GPR registers used to pass arguments to
+ the function.
+
+The default for the attribute is controlled by the ``-fzero-call-used-regs``
+flag.
+
+.. _Return-Oriented Programming: https://en.wikipedia.org/wiki/Return-oriented_programming
+ }];
+}
+
+def NumThreadsDocs : Documentation {
+ let Category = DocCatFunction;
+ let Content = [{
+The ``numthreads`` attribute applies to HLSL shaders where explcit thread counts
+are required. The ``X``, ``Y``, and ``Z`` values provided to the attribute
+dictate the thread id. Total number of threads executed is ``X * Y * Z``.
+
+The full documentation is available here: https://docs.microsoft.com/en-us/windows/win32/direct3dhlsl/sm5-attributes-numthreads
+ }];
+}
+
+def HLSLSV_ShaderTypeAttrDocs : Documentation {
+ let Category = DocCatFunction;
+ let Content = [{
+The ``shader`` type attribute applies to HLSL shader entry functions to
+identify the shader type for the entry function.
+The syntax is:
+
+.. code-block:: text
+
+ ``[shader(string-literal)]``
+
+where the string literal is one of: "pixel", "vertex", "geometry", "hull",
+"domain", "compute", "raygeneration", "intersection", "anyhit", "closesthit",
+"miss", "callable", "mesh", "amplification". Normally the shader type is set
+by shader target with the ``-T`` option like ``-Tps_6_1``. When compiling to a
+library target like ``lib_6_3``, the shader type attribute can help the
+compiler to identify the shader type. It is mostly used by Raytracing shaders
+where shaders must be compiled into a library and linked at runtime.
+ }];
+}
+
+def ClangRandomizeLayoutDocs : Documentation {
+ let Category = DocCatDecl;
+ let Heading = "randomize_layout, no_randomize_layout";
+ let Content = [{
+The attribute ``randomize_layout``, when attached to a C structure, selects it
+for structure layout field randomization; a compile-time hardening technique. A
+"seed" value, is specified via the ``-frandomize-layout-seed=`` command line flag.
+For example:
+
+.. code-block:: bash
+
+ SEED=`od -A n -t x8 -N 32 /dev/urandom | tr -d ' \n'`
+ make ... CFLAGS="-frandomize-layout-seed=$SEED" ...
+
+You can also supply the seed in a file with ``-frandomize-layout-seed-file=``.
+For example:
+
+.. code-block:: bash
+
+ od -A n -t x8 -N 32 /dev/urandom | tr -d ' \n' > /tmp/seed_file.txt
+ make ... CFLAGS="-frandomize-layout-seed-file=/tmp/seed_file.txt" ...
+
+The randomization is deterministic based for a given seed, so the entire
+program should be compiled with the same seed, but keep the seed safe
+otherwise.
+
+The attribute ``no_randomize_layout``, when attached to a C structure,
+instructs the compiler that this structure should not have its field layout
+randomized.
+ }];
+}
+
+def HLSLSV_GroupIndexDocs : Documentation {
+ let Category = DocCatFunction;
+ let Content = [{
+The ``SV_GroupIndex`` semantic, when applied to an input parameter, specifies a
+data binding to map the group index to the specified parameter. This attribute
+is only supported in compute shaders.
+
+The full documentation is available here: https://docs.microsoft.com/en-us/windows/win32/direct3dhlsl/sv-groupindex
+ }];
+}
+
+def HLSLResourceBindingDocs : Documentation {
+ let Category = DocCatFunction;
+ let Content = [{
+The resource binding attribute sets the virtual register and logical register space for a resource.
+Attribute spelling in HLSL is: ``register(slot [, space])``.
+``slot`` takes the format ``[type][number]``,
+where ``type`` is a single character specifying the resource type and ``number`` is the virtual register number.
+
+Register types are:
+t for shader resource views (SRV),
+s for samplers,
+u for unordered access views (UAV),
+b for constant buffer views (CBV).
+
+Register space is specified in the format ``space[number]`` and defaults to ``space0`` if omitted.
+Here're resource binding examples with and without space:
+.. code-block:: c++
+
+ RWBuffer<float> Uav : register(u3, space1);
+ Buffer<float> Buf : register(t1);
+
+The full documentation is available here: https://docs.microsoft.com/en-us/windows/win32/direct3d12/resource-binding-in-hlsl
+ }];
+}
+
+def HLSLSV_DispatchThreadIDDocs : Documentation {
+ let Category = DocCatFunction;
+ let Content = [{
+The ``SV_DispatchThreadID`` semantic, when applied to an input parameter,
+specifies a data binding to map the global thread offset within the Dispatch
+call (per dimension of the group) to the specified parameter.
+When applied to a field of a struct, the data binding is specified to the field
+when the struct is used as a parameter type.
+The semantic on the field is ignored when not used as a parameter.
+This attribute is only supported in compute shaders.
+
+The full documentation is available here: https://docs.microsoft.com/en-us/windows/win32/direct3dhlsl/sv-dispatchthreadid
+ }];
+}
+
+def HLSLGroupSharedAddressSpaceDocs : Documentation {
+ let Category = DocCatVariable;
+ let Content = [{
+HLSL enables threads of a compute shader to exchange values via shared memory.
+HLSL provides barrier primitives such as GroupMemoryBarrierWithGroupSync,
+and so on to ensure the correct ordering of reads and writes to shared memory
+in the shader and to avoid data races.
+Here's an example to declare a groupshared variable.
+.. code-block:: c++
+
+ groupshared GSData data[5*5*1];
+
+The full documentation is available here: https://learn.microsoft.com/en-us/windows/win32/direct3dhlsl/dx-graphics-hlsl-variable-syntax#group-shared
+ }];
+}
+
+def HLSLParamQualifierDocs : Documentation {
+ let Category = DocCatVariable;
+ let Heading = "HLSL Parameter Modifiers";
+ let Content = [{
+HLSL function parameters are passed by value. Parameter declarations support
+three qualifiers to denote parameter passing behavior. The three qualifiers are
+`in`, `out` and `inout`.
+
+Parameters annotated with `in` or with no annotation are passed by value from
+the caller to the callee.
+
+Parameters annotated with `out` are written to the argument after the callee
+returns (Note: arguments values passed into `out` parameters *are not* copied
+into the callee).
+
+Parameters annotated with `inout` are copied into the callee via a temporary,
+and copied back to the argument after the callee returns.
+ }];
+}
+
+def AnnotateTypeDocs : Documentation {
+ let Category = DocCatType;
+ let Heading = "annotate_type";
+ let Content = [{
+This attribute is used to add annotations to types, typically for use by static
+analysis tools that are not integrated into the core Clang compiler (e.g.,
+Clang-Tidy checks or out-of-tree Clang-based tools). It is a counterpart to the
+`annotate` attribute, which serves the same purpose, but for declarations.
+
+The attribute takes a mandatory string literal argument specifying the
+annotation category and an arbitrary number of optional arguments that provide
+additional information specific to the annotation category. The optional
+arguments must be constant expressions of arbitrary type.
+
+For example:
+
+.. code-block:: c++
+
+ int* [[clang::annotate_type("category1", "foo", 1)]] f(int[[clang::annotate_type("category2")]] *);
+
+The attribute does not have any effect on the semantics of the type system,
+neither type checking rules, nor runtime semantics. In particular:
+
+- ``std::is_same<T, T [[clang::annotate_type("foo")]]>`` is true for all types
+ ``T``.
+
+- It is not permissible for overloaded functions or template specializations
+ to differ merely by an ``annotate_type`` attribute.
+
+- The presence of an ``annotate_type`` attribute will not affect name
+ mangling.
+ }];
+}
+
+def WeakDocs : Documentation {
+ let Category = DocCatDecl;
+ let Content = [{
+
+In supported output formats the ``weak`` attribute can be used to
+specify that a variable or function should be emitted as a symbol with
+``weak`` (if a definition) or ``extern_weak`` (if a declaration of an
+external symbol) `linkage
+<https://llvm.org/docs/LangRef.html#linkage-types>`_.
+
+If there is a non-weak definition of the symbol the linker will select
+that over the weak. They must have same type and alignment (variables
+must also have the same size), but may have a different value.
+
+If there are multiple weak definitions of same symbol, but no non-weak
+definition, they should have same type, size, alignment and value, the
+linker will select one of them (see also selectany_ attribute).
+
+If the ``weak`` attribute is applied to a ``const`` qualified variable
+definition that variable is no longer consider a compiletime constant
+as its value can change during linking (or dynamic linking). This
+means that it can e.g no longer be part of an initializer expression.
+
+.. code-block:: c
+
+ const int ANSWER __attribute__ ((weak)) = 42;
+
+ /* This function may be replaced link-time */
+ __attribute__ ((weak)) void debug_log(const char *msg)
+ {
+ fprintf(stderr, "DEBUG: %s\n", msg);
+ }
+
+ int main(int argc, const char **argv)
+ {
+ debug_log ("Starting up...");
+
+ /* This may print something else than "6 * 7 = 42",
+ if there is a non-weak definition of "ANSWER" in
+ an object linked in */
+ printf("6 * 7 = %d\n", ANSWER);
+
+ return 0;
+ }
+
+If an external declaration is marked weak and that symbol does not
+exist during linking (possibly dynamic) the address of the symbol will
+evaluate to NULL.
+
+.. code-block:: c
+
+ void may_not_exist(void) __attribute__ ((weak));
+
+ int main(int argc, const char **argv)
+ {
+ if (may_not_exist) {
+ may_not_exist();
+ } else {
+ printf("Function did not exist\n");
+ }
+ return 0;
+ }
+ }];
+}
+
+def FunctionReturnThunksDocs : Documentation {
+ let Category = DocCatFunction;
+ let Content = [{
+The attribute ``function_return`` can replace return instructions with jumps to
+target-specific symbols. This attribute supports 2 possible values,
+corresponding to the values supported by the ``-mfunction-return=`` command
+line flag:
+
+* ``__attribute__((function_return("keep")))`` to disable related transforms.
+ This is useful for undoing global setting from ``-mfunction-return=`` locally
+ for individual functions.
+* ``__attribute__((function_return("thunk-extern")))`` to replace returns with
+ jumps, while NOT emitting the thunk.
+
+The values ``thunk`` and ``thunk-inline`` from GCC are not supported.
+
+The symbol used for ``thunk-extern`` is target specific:
+* X86: ``__x86_return_thunk``
+
+As such, this function attribute is currently only supported on X86 targets.
+ }];
+}
+
+def ReadOnlyPlacementDocs : Documentation {
+ let Category = DocCatType;
+ let Content = [{This attribute is attached to a structure, class or union declaration.
+ When attached to a record declaration/definition, it checks if all instances
+ of this type can be placed in the read-only data segment of the program. If it
+ finds an instance that can not be placed in a read-only segment, the compiler
+ emits a warning at the source location where the type was used.
+
+ Examples:
+ * ``struct __attribute__((enforce_read_only_placement)) Foo;``
+ * ``struct __attribute__((enforce_read_only_placement)) Bar { ... };``
+
+ Both ``Foo`` and ``Bar`` types have the ``enforce_read_only_placement`` attribute.
+
+ The goal of introducing this attribute is to assist developers with writing secure
+ code. A ``const``-qualified global is generally placed in the read-only section
+ of the memory that has additional run time protection from malicious writes. By
+ attaching this attribute to a declaration, the developer can express the intent
+ to place all instances of the annotated type in the read-only program memory.
+
+ Note 1: The attribute doesn't guarantee that the object will be placed in the
+ read-only data segment as it does not instruct the compiler to ensure such
+ a placement. It emits a warning if something in the code can be proven to prevent
+ an instance from being placed in the read-only data segment.
+
+ Note 2: Currently, clang only checks if all global declarations of a given type 'T'
+ are ``const``-qualified. The following conditions would also prevent the data to be
+ put into read only segment, but the corresponding warnings are not yet implemented.
+
+ 1. An instance of type ``T`` is allocated on the heap/stack.
+ 2. Type ``T`` defines/inherits a mutable field.
+ 3. Type ``T`` defines/inherits non-constexpr constructor(s) for initialization.
+ 4. A field of type ``T`` is defined by type ``Q``, which does not bear the
+ ``enforce_read_only_placement`` attribute.
+ 5. A type ``Q`` inherits from type ``T`` and it does not have the
+ ``enforce_read_only_placement`` attribute.
+ }];
+}
+
+def WebAssemblyFuncrefDocs : Documentation {
+ let Category = DocCatType;
+ let Content = [{
+Clang supports the ``__funcref`` attribute for the WebAssembly target.
+This attribute may be attached to a function pointer type, where it modifies
+its underlying representation to be a WebAssembly ``funcref``.
+ }];
+}
+
+def PreferredTypeDocumentation : Documentation {
+ let Category = DocCatField;
+ let Content = [{
+This attribute allows adjusting the type of a bit-field in debug information.
+This can be helpful when a bit-field is intended to store an enumeration value,
+but has to be specified as having the enumeration's underlying type in order to
+facilitate compiler optimizations or bit-field packing behavior. Normally, the
+underlying type is what is emitted in debug information, which can make it hard
+for debuggers to know to map a bit-field's value back to a particular enumeration.
+
+.. code-block:: c++
+
+ enum Colors { Red, Green, Blue };
+
+ struct S {
+ [[clang::preferred_type(Colors)]] unsigned ColorVal : 2;
+ [[clang::preferred_type(bool)]] unsigned UseAlternateColorSpace : 1;
+ } s = { Green, false };
+
+Without the attribute, a debugger is likely to display the value ``1`` for ``ColorVal``
+and ``0`` for ``UseAlternateColorSpace``. With the attribute, the debugger may now
+display ``Green`` and ``false`` instead.
+
+This can be used to map a bit-field to an arbitrary type that isn't integral
+or an enumeration type. For example:
+
+.. code-block:: c++
+
+ struct A {
+ short a1;
+ short a2;
+ };
+
+ struct B {
+ [[clang::preferred_type(A)]] unsigned b1 : 32 = 0x000F'000C;
+ };
+
+will associate the type ``A`` with the ``b1`` bit-field and is intended to display
+something like this in the debugger:
+
+.. code-block:: text
+
+ Process 2755547 stopped
+ * thread #1, name = 'test-preferred-', stop reason = step in
+ frame #0: 0x0000555555555148 test-preferred-type`main at test.cxx:13:14
+ 10 int main()
+ 11 {
+ 12 B b;
+ -> 13 return b.b1;
+ 14 }
+ (lldb) v -T
+ (B) b = {
+ (A:32) b1 = {
+ (short) a1 = 12
+ (short) a2 = 15
+ }
+ }
+
+Note that debuggers may not be able to handle more complex mappings, and so
+this usage is debugger-dependent.
+ }];
+}
+
+def CleanupDocs : Documentation {
+ let Category = DocCatVariable;
+ let Content = [{
+This attribute allows a function to be run when a local variable goes out of
+scope. The attribute takes the identifier of a function with a parameter type
+that is a pointer to the type with the attribute.
+
+.. code-block:: c
+
+ static void foo (int *) { ... }
+ static void bar (int *) { ... }
+ void baz (void) {
+ int x __attribute__((cleanup(foo)));
+ {
+ int y __attribute__((cleanup(bar)));
+ }
+ }
+
+The above example will result in a call to ``bar`` being passed the address of
+`y`` when ``y`` goes out of scope, then a call to ``foo`` being passed the
+address of ``x`` when ``x`` goes out of scope. If two or more variables share
+the same scope, their ``cleanup`` callbacks are invoked in the reverse order
+the variables were declared in. It is not possible to check the return value
+(if any) of these ``cleanup`` callback functions.
+}];
+}
+
+def CtorDtorDocs : Documentation {
+ let Category = DocCatFunction;
+ let Content = [{
+The ``constructor`` attribute causes the function to be called before entering
+``main()``, and the ``destructor`` attribute causes the function to be called
+after returning from ``main()`` or when the ``exit()`` function has been
+called. Note, ``quick_exit()``, ``_Exit()``, and ``abort()`` prevent a function
+marked ``destructor`` from being called.
+
+The constructor or destructor function should not accept any arguments and its
+return type should be ``void``.
+
+The attributes accept an optional argument used to specify the priority order
+in which to execute constructor and destructor functions. The priority is
+given as an integer constant expression between 101 and 65535 (inclusive).
+Priorities outside of that range are reserved for use by the implementation. A
+lower value indicates a higher priority of initialization. Note that only the
+relative ordering of values is important. For example:
+
+.. code-block:: c++
+
+ __attribute__((constructor(200))) void foo(void);
+ __attribute__((constructor(101))) void bar(void);
+
+``bar()`` will be called before ``foo()``, and both will be called before
+``main()``. If no argument is given to the ``constructor`` or ``destructor``
+attribute, they default to the value ``65535``.
+}];
+}
+
+def CoroOnlyDestroyWhenCompleteDocs : Documentation {
+ let Category = DocCatDecl;
+ let Content = [{
+The `coro_only_destroy_when_complete` attribute should be marked on a C++ class. The coroutines
+whose return type is marked with the attribute are assumed to be destroyed only after the coroutine has
+reached the final suspend point.
+
+This is helpful for the optimizers to reduce the size of the destroy function for the coroutines.
+
+For example,
+
+.. code-block:: c++
+
+ A foo() {
+ dtor d;
+ co_await something();
+ dtor d1;
+ co_await something();
+ dtor d2;
+ co_return 43;
+ }
+
+The compiler may generate the following pseudocode:
+
+.. code-block:: c++
+
+ void foo.destroy(foo.Frame *frame) {
+ switch(frame->suspend_index()) {
+ case 1:
+ frame->d.~dtor();
+ break;
+ case 2:
+ frame->d.~dtor();
+ frame->d1.~dtor();
+ break;
+ case 3:
+ frame->d.~dtor();
+ frame->d1.~dtor();
+ frame->d2.~dtor();
+ break;
+ default: // coroutine completed or haven't started
+ break;
+ }
+
+ frame->promise.~promise_type();
+ delete frame;
+ }
+
+The `foo.destroy()` function's purpose is to release all of the resources
+initialized for the coroutine when it is destroyed in a suspended state.
+However, if the coroutine is only ever destroyed at the final suspend state,
+the rest of the conditions are superfluous.
+
+The user can use the `coro_only_destroy_when_complete` attributo suppress
+generation of the other destruction cases, optimizing the above `foo.destroy` to:
+
+.. code-block:: c++
+
+ void foo.destroy(foo.Frame *frame) {
+ frame->promise.~promise_type();
+ delete frame;
+ }
+
+ }];
+}
+
+def CoroReturnTypeAndWrapperDoc : Documentation {
+ let Category = DocCatDecl;
+ let Content = [{
+The ``[[clang::coro_return_type]]`` attribute is used to help static analyzers to recognize
+coroutines from the function signatures.
+
+The ``coro_return_type`` attribute should be marked on a C++ class to mark it as
+a **coroutine return type (CRT)**.
+
+A function ``R func(P1, .., PN)`` has a coroutine return type (CRT) ``R`` if ``R``
+is marked by ``[[clang::coro_return_type]]`` and ``R`` has a promise type associated to it
+(i.e., std::coroutine_traits<R, P1, .., PN>::promise_type is a valid promise type).
+
+If the return type of a function is a ``CRT`` then the function must be a coroutine.
+Otherwise the program is invalid. It is allowed for a non-coroutine to return a ``CRT``
+if the function is marked with ``[[clang::coro_wrapper]]``.
+
+The ``[[clang::coro_wrapper]]`` attribute should be marked on a C++ function to mark it as
+a **coroutine wrapper**. A coroutine wrapper is a function which returns a ``CRT``,
+is not a coroutine itself and is marked with ``[[clang::coro_wrapper]]``.
+
+Clang will enforce that all functions that return a ``CRT`` are either coroutines or marked
+with ``[[clang::coro_wrapper]]``. Clang will enforce this with an error.
+
+From a language perspective, it is not possible to differentiate between a coroutine and a
+function returning a CRT by merely looking at the function signature.
+
+Coroutine wrappers, in particular, are susceptible to capturing
+references to temporaries and other lifetime issues. This allows to avoid such lifetime
+issues with coroutine wrappers.
+
+For example,
+
+.. code-block:: c++
+
+ // This is a CRT.
+ template <typename T> struct [[clang::coro_return_type]] Task {
+ using promise_type = some_promise_type;
+ };
+
+ Task<int> increment(int a) { co_return a + 1; } // Fine. This is a coroutine.
+ Task<int> foo() { return increment(1); } // Error. foo is not a coroutine.
+
+ // Fine for a coroutine wrapper to return a CRT.
+ [[clang::coro_wrapper]] Task<int> foo() { return increment(1); }
+
+ void bar() {
+ // Invalid. This intantiates a function which returns a CRT but is not marked as
+ // a coroutine wrapper.
+ std::function<Task<int>(int)> f = increment;
+ }
+
+Note: ``a_promise_type::get_return_object`` is exempted from this analysis as it is a necessary
+implementation detail of any coroutine library.
+}];
+}
+
+def CodeAlignAttrDocs : Documentation {
+ let Category = DocCatVariable;
+ let Heading = "clang::code_align";
+ let Content = [{
+The ``clang::code_align(N)`` attribute applies to a loop and specifies the byte
+alignment for a loop. The attribute accepts a positive integer constant
+initialization expression indicating the number of bytes for the minimum
+alignment boundary. Its value must be a power of 2, between 1 and 4096
+(inclusive).
+
+.. code-block:: c++
+
+ void foo() {
+ int var = 0;
+ [[clang::code_align(16)]] for (int i = 0; i < 10; ++i) var++;
+ }
+
+ void Array(int *array, size_t n) {
+ [[clang::code_align(64)]] for (int i = 0; i < n; ++i) array[i] = 0;
+ }
+
+ void count () {
+ int a1[10], int i = 0;
+ [[clang::code_align(32)]] while (i < 10) { a1[i] += 3; }
+ }
+
+ void check() {
+ int a = 10;
+ [[clang::code_align(8)]] do {
+ a = a + 1;
+ } while (a < 20);
+ }
+
+ template<int A>
+ void func() {
+ [[clang::code_align(A)]] for(;;) { }
+ }
+
+ }];
+}
+
+def CoroLifetimeBoundDoc : Documentation {
+ let Category = DocCatDecl;
+ let Content = [{
+The ``[[clang::coro_lifetimebound]]`` is a class attribute which can be applied
+to a coroutine return type (`CRT`_) (i.e.
+it should also be annotated with ``[[clang::coro_return_type]]``).
+
+All parameters of a function are considered to be lifetime bound if the function returns a
+coroutine return type (CRT) annotated with ``[[clang::coro_lifetimebound]]``.
+This lifetime bound analysis can be disabled for a coroutine wrapper or a coroutine by annotating the function
+with ``[[clang::coro_disable_lifetimebound]]`` function attribute .
+See `documentation`_ of ``[[clang::lifetimebound]]`` for details about lifetime bound analysis.
+
+
+Reference parameters of a coroutine are susceptible to capturing references to temporaries or local variables.
+
+For example,
+
+.. code-block:: c++
+
+ task<int> coro(const int& a) { co_return a + 1; }
+ task<int> dangling_refs(int a) {
+ // `coro` captures reference to a temporary. `foo` would now contain a dangling reference to `a`.
+ auto foo = coro(1);
+ // `coro` captures reference to local variable `a` which is destroyed after the return.
+ return coro(a);
+ }
+
+Lifetime bound static analysis can be used to detect such instances when coroutines capture references
+which may die earlier than the coroutine frame itself. In the above example, if the CRT `task` is annotated with
+``[[clang::coro_lifetimebound]]``, then lifetime bound analysis would detect capturing reference to
+temporaries or return address of a local variable.
+
+Both coroutines and coroutine wrappers are part of this analysis.
+
+.. code-block:: c++
+
+ template <typename T> struct [[clang::coro_return_type, clang::coro_lifetimebound]] Task {
+ using promise_type = some_promise_type;
+ };
+
+ Task<int> coro(const int& a) { co_return a + 1; }
+ [[clang::coro_wrapper]] Task<int> coro_wrapper(const int& a, const int& b) {
+ return a > b ? coro(a) : coro(b);
+ }
+ Task<int> temporary_reference() {
+ auto foo = coro(1); // warning: capturing reference to a temporary which would die after the expression.
+
+ int a = 1;
+ auto bar = coro_wrapper(a, 0); // warning: `b` captures reference to a temporary.
+
+ co_return co_await coro(1); // fine.
+ }
+ [[clang::coro_wrapper]] Task<int> stack_reference(int a) {
+ return coro(a); // warning: returning address of stack variable `a`.
+ }
+
+This analysis can be disabled for all calls to a particular function by annotating the function
+with function attribute ``[[clang::coro_disable_lifetimebound]]``.
+For example, this could be useful for coroutine wrappers which accept reference parameters
+but do not pass them to the underlying coroutine or pass them by value.
+
+.. code-block:: c++
+
+ Task<int> coro(int a) { co_return a + 1; }
+ [[clang::coro_wrapper, clang::coro_disable_lifetimebound]] Task<int> coro_wrapper(const int& a) {
+ return coro(a + 1);
+ }
+ void use() {
+ auto task = coro_wrapper(1); // use of temporary is fine as the argument is not lifetime bound.
+ }
+
+.. _`documentation`: https://clang.llvm.org/docs/AttributeReference.html#lifetimebound
+.. _`CRT`: https://clang.llvm.org/docs/AttributeReference.html#coro-return-type
+}];
+}
+
+def CountedByDocs : Documentation {
+ let Category = DocCatField;
+ let Content = [{
+Clang supports the ``counted_by`` attribute on the flexible array member of a
+structure in C. The argument for the attribute is the name of a field member
+holding the count of elements in the flexible array. This information can be
+used to improve the results of the array bound sanitizer and the
+``__builtin_dynamic_object_size`` builtin. The ``count`` field member must be
+within the same non-anonymous, enclosing struct as the flexible array member.
+
+This example specifies that the flexible array member ``array`` has the number
+of elements allocated for it in ``count``:
+
+.. code-block:: c
+
+ struct bar;
+
+ struct foo {
+ size_t count;
+ char other;
+ struct bar *array[] __attribute__((counted_by(count)));
+ };
+
+This establishes a relationship between ``array`` and ``count``. Specifically,
+``array`` must have at least ``count`` number of elements available. It's the
+user's responsibility to ensure that this relationship is maintained through
+changes to the structure.
+
+In the following example, the allocated array erroneously has fewer elements
+than what's specified by ``p->count``. This would result in an out-of-bounds
+access not being detected.
+
+.. code-block:: c
+
+ #define SIZE_INCR 42
+
+ struct foo *p;
+
+ void foo_alloc(size_t count) {
+ p = malloc(MAX(sizeof(struct foo),
+ offsetof(struct foo, array[0]) + count * sizeof(struct bar *)));
+ p->count = count + SIZE_INCR;
+ }
+
+The next example updates ``p->count``, but breaks the relationship requirement
+that ``p->array`` must have at least ``p->count`` number of elements available:
+
+.. code-block:: c
+
+ #define SIZE_INCR 42
+
+ struct foo *p;
+
+ void foo_alloc(size_t count) {
+ p = malloc(MAX(sizeof(struct foo),
+ offsetof(struct foo, array[0]) + count * sizeof(struct bar *)));
+ p->count = count;
+ }
+
+ void use_foo(int index, int val) {
+ p->count += SIZE_INCR + 1; /* 'count' is now larger than the number of elements of 'array'. */
+ p->array[index] = val; /* The sanitizer can't properly check this access. */
+ }
+
+In this example, an update to ``p->count`` maintains the relationship
+requirement:
+
+.. code-block:: c
+
+ void use_foo(int index, int val) {
+ if (p->count == 0)
+ return;
+ --p->count;
+ p->array[index] = val;
+ }
+ }];
+}
diff --git a/contrib/llvm-project/clang/include/clang/Basic/AttrSubjectMatchRules.h b/contrib/llvm-project/clang/include/clang/Basic/AttrSubjectMatchRules.h
index 010cefcaf340..bec8122ea930 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/AttrSubjectMatchRules.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/AttrSubjectMatchRules.h
@@ -6,19 +6,24 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_BASIC_ATTR_SUBJECT_MATCH_RULES_H
-#define LLVM_CLANG_BASIC_ATTR_SUBJECT_MATCH_RULES_H
+#ifndef LLVM_CLANG_BASIC_ATTRSUBJECTMATCHRULES_H
+#define LLVM_CLANG_BASIC_ATTRSUBJECTMATCHRULES_H
-#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/DenseMap.h"
namespace clang {
+
+class SourceRange;
+
namespace attr {
/// A list of all the recognized kinds of attributes.
enum SubjectMatchRule {
#define ATTR_MATCH_RULE(X, Spelling, IsAbstract) X,
#include "clang/Basic/AttrSubMatchRulesList.inc"
+ SubjectMatchRule_Last = -1
+#define ATTR_MATCH_RULE(X, Spelling, IsAbstract) +1
+#include "clang/Basic/AttrSubMatchRulesList.inc"
};
const char *getSubjectMatchRuleSpelling(SubjectMatchRule Rule);
diff --git a/contrib/llvm-project/clang/include/clang/Basic/AttributeCommonInfo.h b/contrib/llvm-project/clang/include/clang/Basic/AttributeCommonInfo.h
index 4be598e109fd..ef2ddf525c98 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/AttributeCommonInfo.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/AttributeCommonInfo.h
@@ -13,24 +13,27 @@
#ifndef LLVM_CLANG_BASIC_ATTRIBUTECOMMONINFO_H
#define LLVM_CLANG_BASIC_ATTRIBUTECOMMONINFO_H
+
#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/TokenKinds.h"
namespace clang {
-class IdentifierInfo;
+
class ASTRecordWriter;
+class IdentifierInfo;
class AttributeCommonInfo {
public:
/// The style used to specify an attribute.
enum Syntax {
/// __attribute__((...))
- AS_GNU,
+ AS_GNU = 1,
/// [[...]]
AS_CXX11,
/// [[...]]
- AS_C2x,
+ AS_C23,
/// __declspec(...)
AS_Declspec,
@@ -48,6 +51,13 @@ public:
// without adding related code to TableGen/ClangAttrEmitter.cpp.
/// Context-sensitive version of a keyword attribute.
AS_ContextSensitiveKeyword,
+
+ /// <vardecl> : <semantic>
+ AS_HLSLSemantic,
+
+ /// The attibute has no source code manifestation and is only created
+ /// implicitly.
+ AS_Implicit
};
enum Kind {
#define PARSED_ATTR(NAME) AT_##NAME,
@@ -64,68 +74,110 @@ private:
SourceRange AttrRange;
const SourceLocation ScopeLoc;
// Corresponds to the Kind enum.
+ LLVM_PREFERRED_TYPE(Kind)
unsigned AttrKind : 16;
/// Corresponds to the Syntax enum.
- unsigned SyntaxUsed : 3;
+ LLVM_PREFERRED_TYPE(Syntax)
+ unsigned SyntaxUsed : 4;
+ LLVM_PREFERRED_TYPE(bool)
unsigned SpellingIndex : 4;
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned IsAlignas : 1;
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned IsRegularKeywordAttribute : 1;
protected:
static constexpr unsigned SpellingNotCalculated = 0xf;
public:
- AttributeCommonInfo(SourceRange AttrRange)
- : AttrRange(AttrRange), ScopeLoc(), AttrKind(0), SyntaxUsed(0),
- SpellingIndex(SpellingNotCalculated) {}
-
- AttributeCommonInfo(SourceLocation AttrLoc)
- : AttrRange(AttrLoc), ScopeLoc(), AttrKind(0), SyntaxUsed(0),
- SpellingIndex(SpellingNotCalculated) {}
-
- AttributeCommonInfo(const IdentifierInfo *AttrName,
- const IdentifierInfo *ScopeName, SourceRange AttrRange,
- SourceLocation ScopeLoc, Syntax SyntaxUsed)
- : AttrName(AttrName), ScopeName(ScopeName), AttrRange(AttrRange),
- ScopeLoc(ScopeLoc),
- AttrKind(getParsedKind(AttrName, ScopeName, SyntaxUsed)),
- SyntaxUsed(SyntaxUsed), SpellingIndex(SpellingNotCalculated) {}
+ /// Combines information about the source-code form of an attribute,
+ /// including its syntax and spelling.
+ class Form {
+ public:
+ constexpr Form(Syntax SyntaxUsed, unsigned SpellingIndex, bool IsAlignas,
+ bool IsRegularKeywordAttribute)
+ : SyntaxUsed(SyntaxUsed), SpellingIndex(SpellingIndex),
+ IsAlignas(IsAlignas),
+ IsRegularKeywordAttribute(IsRegularKeywordAttribute) {}
+ constexpr Form(tok::TokenKind Tok)
+ : SyntaxUsed(AS_Keyword), SpellingIndex(SpellingNotCalculated),
+ IsAlignas(Tok == tok::kw_alignas),
+ IsRegularKeywordAttribute(tok::isRegularKeywordAttribute(Tok)) {}
+
+ Syntax getSyntax() const { return Syntax(SyntaxUsed); }
+ unsigned getSpellingIndex() const { return SpellingIndex; }
+ bool isAlignas() const { return IsAlignas; }
+ bool isRegularKeywordAttribute() const { return IsRegularKeywordAttribute; }
+
+ static Form GNU() { return AS_GNU; }
+ static Form CXX11() { return AS_CXX11; }
+ static Form C23() { return AS_C23; }
+ static Form Declspec() { return AS_Declspec; }
+ static Form Microsoft() { return AS_Microsoft; }
+ static Form Keyword(bool IsAlignas, bool IsRegularKeywordAttribute) {
+ return Form(AS_Keyword, SpellingNotCalculated, IsAlignas,
+ IsRegularKeywordAttribute);
+ }
+ static Form Pragma() { return AS_Pragma; }
+ static Form ContextSensitiveKeyword() { return AS_ContextSensitiveKeyword; }
+ static Form HLSLSemantic() { return AS_HLSLSemantic; }
+ static Form Implicit() { return AS_Implicit; }
+
+ private:
+ constexpr Form(Syntax SyntaxUsed)
+ : SyntaxUsed(SyntaxUsed), SpellingIndex(SpellingNotCalculated),
+ IsAlignas(0), IsRegularKeywordAttribute(0) {}
+
+ LLVM_PREFERRED_TYPE(Syntax)
+ unsigned SyntaxUsed : 4;
+ unsigned SpellingIndex : 4;
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned IsAlignas : 1;
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned IsRegularKeywordAttribute : 1;
+ };
AttributeCommonInfo(const IdentifierInfo *AttrName,
const IdentifierInfo *ScopeName, SourceRange AttrRange,
- SourceLocation ScopeLoc, Kind AttrKind, Syntax SyntaxUsed)
+ SourceLocation ScopeLoc, Kind AttrKind, Form FormUsed)
: AttrName(AttrName), ScopeName(ScopeName), AttrRange(AttrRange),
- ScopeLoc(ScopeLoc), AttrKind(AttrKind), SyntaxUsed(SyntaxUsed),
- SpellingIndex(SpellingNotCalculated) {}
+ ScopeLoc(ScopeLoc), AttrKind(AttrKind),
+ SyntaxUsed(FormUsed.getSyntax()),
+ SpellingIndex(FormUsed.getSpellingIndex()),
+ IsAlignas(FormUsed.isAlignas()),
+ IsRegularKeywordAttribute(FormUsed.isRegularKeywordAttribute()) {
+ assert(SyntaxUsed >= AS_GNU && SyntaxUsed <= AS_Implicit &&
+ "Invalid syntax!");
+ }
AttributeCommonInfo(const IdentifierInfo *AttrName,
const IdentifierInfo *ScopeName, SourceRange AttrRange,
- SourceLocation ScopeLoc, Kind AttrKind, Syntax SyntaxUsed,
- unsigned Spelling)
- : AttrName(AttrName), ScopeName(ScopeName), AttrRange(AttrRange),
- ScopeLoc(ScopeLoc), AttrKind(AttrKind), SyntaxUsed(SyntaxUsed),
- SpellingIndex(Spelling) {}
+ SourceLocation ScopeLoc, Form FormUsed)
+ : AttributeCommonInfo(
+ AttrName, ScopeName, AttrRange, ScopeLoc,
+ getParsedKind(AttrName, ScopeName, FormUsed.getSyntax()),
+ FormUsed) {}
AttributeCommonInfo(const IdentifierInfo *AttrName, SourceRange AttrRange,
- Syntax SyntaxUsed)
- : AttrName(AttrName), ScopeName(nullptr), AttrRange(AttrRange),
- ScopeLoc(), AttrKind(getParsedKind(AttrName, ScopeName, SyntaxUsed)),
- SyntaxUsed(SyntaxUsed), SpellingIndex(SpellingNotCalculated) {}
-
- AttributeCommonInfo(SourceRange AttrRange, Kind K, Syntax SyntaxUsed)
- : AttrName(nullptr), ScopeName(nullptr), AttrRange(AttrRange), ScopeLoc(),
- AttrKind(K), SyntaxUsed(SyntaxUsed),
- SpellingIndex(SpellingNotCalculated) {}
+ Form FormUsed)
+ : AttributeCommonInfo(AttrName, nullptr, AttrRange, SourceLocation(),
+ FormUsed) {}
- AttributeCommonInfo(SourceRange AttrRange, Kind K, Syntax SyntaxUsed,
- unsigned Spelling)
- : AttrName(nullptr), ScopeName(nullptr), AttrRange(AttrRange), ScopeLoc(),
- AttrKind(K), SyntaxUsed(SyntaxUsed), SpellingIndex(Spelling) {}
+ AttributeCommonInfo(SourceRange AttrRange, Kind K, Form FormUsed)
+ : AttributeCommonInfo(nullptr, nullptr, AttrRange, SourceLocation(), K,
+ FormUsed) {}
AttributeCommonInfo(AttributeCommonInfo &&) = default;
AttributeCommonInfo(const AttributeCommonInfo &) = default;
Kind getParsedKind() const { return Kind(AttrKind); }
Syntax getSyntax() const { return Syntax(SyntaxUsed); }
+ Form getForm() const {
+ return Form(getSyntax(), SpellingIndex, IsAlignas,
+ IsRegularKeywordAttribute);
+ }
const IdentifierInfo *getAttrName() const { return AttrName; }
+ void setAttrName(const IdentifierInfo *AttrNameII) { AttrName = AttrNameII; }
SourceLocation getLoc() const { return AttrRange.getBegin(); }
SourceRange getRange() const { return AttrRange; }
void setRange(SourceRange R) { AttrRange = R; }
@@ -143,28 +195,35 @@ public:
bool isMicrosoftAttribute() const { return SyntaxUsed == AS_Microsoft; }
bool isGNUScope() const;
+ bool isClangScope() const;
- bool isAlignasAttribute() const {
- // FIXME: Use a better mechanism to determine this.
- return getParsedKind() == AT_Aligned && isKeywordAttribute();
- }
+ bool isCXX11Attribute() const { return SyntaxUsed == AS_CXX11 || IsAlignas; }
- bool isCXX11Attribute() const {
- return SyntaxUsed == AS_CXX11 || isAlignasAttribute();
- }
+ bool isC23Attribute() const { return SyntaxUsed == AS_C23; }
- bool isC2xAttribute() const { return SyntaxUsed == AS_C2x; }
+ bool isAlignas() const {
+ // FIXME: In the current state, the IsAlignas member variable is only true
+ // with the C++ `alignas` keyword but not `_Alignas`. The following
+ // expression works around the otherwise lost information so it will return
+ // true for `alignas` or `_Alignas` while still returning false for things
+ // like `__attribute__((aligned))`.
+ return (getParsedKind() == AT_Aligned && isKeywordAttribute());
+ }
/// The attribute is spelled [[]] in either C or C++ mode, including standard
/// attributes spelled with a keyword, like alignas.
bool isStandardAttributeSyntax() const {
- return isCXX11Attribute() || isC2xAttribute();
+ return isCXX11Attribute() || isC23Attribute();
}
+ bool isGNUAttribute() const { return SyntaxUsed == AS_GNU; }
+
bool isKeywordAttribute() const {
return SyntaxUsed == AS_Keyword || SyntaxUsed == AS_ContextSensitiveKeyword;
}
+ bool isRegularKeywordAttribute() const { return IsRegularKeywordAttribute; }
+
bool isContextSensitiveKeywordAttribute() const {
return SyntaxUsed == AS_ContextSensitiveKeyword;
}
@@ -196,6 +255,19 @@ protected:
return SpellingIndex != SpellingNotCalculated;
}
};
+
+inline bool doesKeywordAttributeTakeArgs(tok::TokenKind Kind) {
+ switch (Kind) {
+ default:
+ return false;
+#define KEYWORD_ATTRIBUTE(NAME, HASARG, ...) \
+ case tok::kw_##NAME: \
+ return HASARG;
+#include "clang/Basic/RegularKeywordAttrInfo.inc"
+#undef KEYWORD_ATTRIBUTE
+ }
+}
+
} // namespace clang
#endif // LLVM_CLANG_BASIC_ATTRIBUTECOMMONINFO_H
diff --git a/contrib/llvm-project/clang/include/clang/Basic/Attributes.h b/contrib/llvm-project/clang/include/clang/Basic/Attributes.h
index c69633decd57..61666a6f4d9a 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/Attributes.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/Attributes.h
@@ -9,33 +9,19 @@
#ifndef LLVM_CLANG_BASIC_ATTRIBUTES_H
#define LLVM_CLANG_BASIC_ATTRIBUTES_H
-#include "clang/Basic/LangOptions.h"
-#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/AttributeCommonInfo.h"
namespace clang {
class IdentifierInfo;
-
-enum class AttrSyntax {
- /// Is the identifier known as a GNU-style attribute?
- GNU,
- /// Is the identifier known as a __declspec-style attribute?
- Declspec,
- /// Is the identifier known as a [] Microsoft-style attribute?
- Microsoft,
- // Is the identifier known as a C++-style attribute?
- CXX,
- // Is the identifier known as a C-style attribute?
- C,
- // Is the identifier known as a pragma attribute?
- Pragma
-};
+class LangOptions;
+class TargetInfo;
/// Return the version number associated with the attribute if we
/// recognize and implement the attribute specified by the given information.
-int hasAttribute(AttrSyntax Syntax, const IdentifierInfo *Scope,
- const IdentifierInfo *Attr, const TargetInfo &Target,
- const LangOptions &LangOpts);
+int hasAttribute(AttributeCommonInfo::Syntax Syntax,
+ const IdentifierInfo *Scope, const IdentifierInfo *Attr,
+ const TargetInfo &Target, const LangOptions &LangOpts);
} // end namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinHeaders.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinHeaders.def
new file mode 100644
index 000000000000..8e4a2f9bee9a
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinHeaders.def
@@ -0,0 +1,43 @@
+//===--- BuiltinHeaders.def - Builtin header info database ------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the standard builtin function header locations. Users of
+// this file must define the HEADER macro to make use of this information.
+//
+//===----------------------------------------------------------------------===//
+
+HEADER(NO_HEADER, nullptr)
+HEADER(BLOCKS_H, "Blocks.h")
+HEADER(COMPLEX_H, "complex.h")
+HEADER(CTYPE_H, "ctype.h")
+HEADER(EMMINTRIN_H, "emmintrin.h")
+HEADER(FOUNDATION_NSOBJCRUNTIME_H, "Foundation/NSObjCRuntime.h")
+HEADER(IMMINTRIN_H, "immintrin.h")
+HEADER(INTRIN_H, "intrin.h")
+HEADER(MALLOC_H, "malloc.h")
+HEADER(MATH_H, "math.h")
+HEADER(MEMORY, "memory")
+HEADER(OBJC_MESSAGE_H, "objc/message.h")
+HEADER(OBJC_OBJC_AUTO_H, "objc/objc-auto.h")
+HEADER(OBJC_OBJC_EXCEPTION_H, "objc/objc-exception.h")
+HEADER(OBJC_OBJC_SYNC_H, "objc/objc-sync.h")
+HEADER(OBJC_RUNTIME_H, "objc/runtime.h")
+HEADER(PTHREAD_H, "pthread.h")
+HEADER(SETJMPEX_H, "setjmpex.h")
+HEADER(SETJMP_H, "setjmp.h")
+HEADER(STDARG_H, "stdarg.h")
+HEADER(STDIO_H, "stdio.h")
+HEADER(STDLIB_H, "stdlib.h")
+HEADER(STRINGS_H, "strings.h")
+HEADER(STRING_H, "string.h")
+HEADER(UNISTD_H, "unistd.h")
+HEADER(UTILITY, "utility")
+HEADER(WCHAR_H, "wchar.h")
+HEADER(XMMINTRIN_H, "xmmintrin.h")
+
+#undef HEADER
diff --git a/contrib/llvm-project/clang/include/clang/Basic/Builtins.def b/contrib/llvm-project/clang/include/clang/Basic/Builtins.def
index 0e3898537bcf..4dcbaf8a7bea 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/Builtins.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/Builtins.def
@@ -26,6 +26,7 @@
// i -> int
// h -> half (__fp16, OpenCL)
// x -> half (_Float16)
+// y -> half (__bf16)
// f -> float
// d -> double
// z -> size_t
@@ -38,6 +39,8 @@
// A -> "reference" to __builtin_va_list
// V -> Vector, followed by the number of elements and the base type.
// q -> Scalable vector, followed by the number of elements and the base type.
+// Q -> target builtin type, followed by a character to distinguish the builtin type
+// Qa -> AArch64 svcount_t builtin type.
// E -> ext_vector, followed by the number of elements and the base type.
// X -> _Complex, followed by the base type.
// Y -> ptrdiff_t
@@ -80,9 +83,9 @@
// builtin even if type doesn't match signature, and don't warn if we
// can't be sure the type is right
// F -> this is a libc/libm function with a '__builtin_' prefix added.
-// f -> this is a libc/libm function without the '__builtin_' prefix. It can
-// be followed by ':headername:' to state which header this function
-// comes from.
+// f -> this is a libc/libm function without a '__builtin_' prefix, or with
+// 'z', a C++ standard library function in namespace std::. This builtin
+// is disableable by '-fno-builtin-foo' / '-fno-builtin-std-foo'.
// h -> this function requires a specific header or an explicit declaration.
// i -> this is a runtime library implemented function without the
// '__builtin_' prefix. It will be implemented in compiler-rt or libgcc.
@@ -96,12 +99,15 @@
// S:N: -> similar to the s:N: attribute, but the function is like vscanf
// in that it accepts its arguments as a va_list rather than
// through an ellipsis
-// e -> const, but only when -fno-math-errno
+// e -> const, but only when -fno-math-errno and FP exceptions are ignored
+// g -> const when FP exceptions are ignored
// j -> returns_twice (like setjmp)
// u -> arguments are not evaluated for their side-effects
// V:N: -> requires vectors of at least N bits to be legal
// C<N,M_0,...,M_k> -> callback behavior: argument N is called with argument
// M_0, ..., M_k as payload
+// z -> this is a function in (possibly-versioned) namespace std
+// E -> this function can be constant evaluated by Clang frontend
// FIXME: gcc has nonnull
#if defined(BUILTIN) && !defined(LIBBUILTIN)
@@ -118,16 +124,16 @@ BUILTIN(__builtin_atan2f, "fff" , "Fne")
BUILTIN(__builtin_atan2l, "LdLdLd", "Fne")
BUILTIN(__builtin_atan2f128, "LLdLLdLLd", "Fne")
BUILTIN(__builtin_abs , "ii" , "ncF")
-BUILTIN(__builtin_copysign, "ddd", "ncF")
-BUILTIN(__builtin_copysignf, "fff", "ncF")
+BUILTIN(__builtin_copysign, "ddd", "ncFE")
+BUILTIN(__builtin_copysignf, "fff", "ncFE")
BUILTIN(__builtin_copysignf16, "hhh", "ncF")
-BUILTIN(__builtin_copysignl, "LdLdLd", "ncF")
-BUILTIN(__builtin_copysignf128, "LLdLLdLLd", "ncF")
-BUILTIN(__builtin_fabs , "dd" , "ncF")
-BUILTIN(__builtin_fabsf, "ff" , "ncF")
-BUILTIN(__builtin_fabsl, "LdLd", "ncF")
+BUILTIN(__builtin_copysignl, "LdLdLd", "ncFE")
+BUILTIN(__builtin_copysignf128, "LLdLLdLLd", "ncFE")
+BUILTIN(__builtin_fabs , "dd" , "ncFE")
+BUILTIN(__builtin_fabsf, "ff" , "ncFE")
+BUILTIN(__builtin_fabsl, "LdLd", "ncFE")
BUILTIN(__builtin_fabsf16, "hh" , "ncF")
-BUILTIN(__builtin_fabsf128, "LLdLLd", "ncF")
+BUILTIN(__builtin_fabsf128, "LLdLLd", "ncFE")
BUILTIN(__builtin_fmod , "ddd" , "Fne")
BUILTIN(__builtin_fmodf, "fff" , "Fne")
BUILTIN(__builtin_fmodf16, "hhh" , "Fne")
@@ -137,32 +143,38 @@ BUILTIN(__builtin_frexp , "ddi*" , "Fn")
BUILTIN(__builtin_frexpf, "ffi*" , "Fn")
BUILTIN(__builtin_frexpl, "LdLdi*", "Fn")
BUILTIN(__builtin_frexpf128, "LLdLLdi*", "Fn")
-BUILTIN(__builtin_huge_val, "d", "nc")
-BUILTIN(__builtin_huge_valf, "f", "nc")
-BUILTIN(__builtin_huge_vall, "Ld", "nc")
-BUILTIN(__builtin_huge_valf128, "LLd", "nc")
-BUILTIN(__builtin_inf , "d" , "nc")
-BUILTIN(__builtin_inff , "f" , "nc")
-BUILTIN(__builtin_infl , "Ld" , "nc")
-BUILTIN(__builtin_inff128 , "LLd" , "nc")
+BUILTIN(__builtin_frexpf16, "hhi*" , "Fn")
+BUILTIN(__builtin_huge_val, "d", "ncE")
+BUILTIN(__builtin_huge_valf, "f", "ncE")
+BUILTIN(__builtin_huge_vall, "Ld", "ncE")
+BUILTIN(__builtin_huge_valf16, "x", "ncE")
+BUILTIN(__builtin_huge_valf128, "LLd", "ncE")
+BUILTIN(__builtin_inf , "d" , "ncE")
+BUILTIN(__builtin_inff , "f" , "ncE")
+BUILTIN(__builtin_infl , "Ld" , "ncE")
+BUILTIN(__builtin_inff16 , "x" , "ncE")
+BUILTIN(__builtin_inff128 , "LLd" , "ncE")
BUILTIN(__builtin_labs , "LiLi" , "Fnc")
BUILTIN(__builtin_llabs, "LLiLLi", "Fnc")
BUILTIN(__builtin_ldexp , "ddi" , "Fne")
BUILTIN(__builtin_ldexpf, "ffi" , "Fne")
BUILTIN(__builtin_ldexpl, "LdLdi", "Fne")
BUILTIN(__builtin_ldexpf128, "LLdLLdi", "Fne")
+BUILTIN(__builtin_ldexpf16, "hhi", "Fne")
BUILTIN(__builtin_modf , "ddd*" , "Fn")
BUILTIN(__builtin_modff, "fff*" , "Fn")
BUILTIN(__builtin_modfl, "LdLdLd*", "Fn")
BUILTIN(__builtin_modff128, "LLdLLdLLd*", "Fn")
-BUILTIN(__builtin_nan, "dcC*" , "FnU")
-BUILTIN(__builtin_nanf, "fcC*" , "FnU")
-BUILTIN(__builtin_nanl, "LdcC*", "FnU")
-BUILTIN(__builtin_nanf128, "LLdcC*", "FnU")
-BUILTIN(__builtin_nans, "dcC*" , "FnU")
-BUILTIN(__builtin_nansf, "fcC*" , "FnU")
-BUILTIN(__builtin_nansl, "LdcC*", "FnU")
-BUILTIN(__builtin_nansf128, "LLdcC*", "FnU")
+BUILTIN(__builtin_nan, "dcC*" , "FnUE")
+BUILTIN(__builtin_nanf, "fcC*" , "FnUE")
+BUILTIN(__builtin_nanl, "LdcC*", "FnUE")
+BUILTIN(__builtin_nanf16, "xcC*", "FnUE")
+BUILTIN(__builtin_nanf128, "LLdcC*", "FnUE")
+BUILTIN(__builtin_nans, "dcC*" , "FnUE")
+BUILTIN(__builtin_nansf, "fcC*" , "FnUE")
+BUILTIN(__builtin_nansl, "LdcC*", "FnUE")
+BUILTIN(__builtin_nansf16, "xcC*", "FnUE")
+BUILTIN(__builtin_nansf128, "LLdcC*", "FnUE")
BUILTIN(__builtin_powi , "ddi" , "Fnc")
BUILTIN(__builtin_powif, "ffi" , "Fnc")
BUILTIN(__builtin_powil, "LdLdi", "Fnc")
@@ -233,6 +245,11 @@ BUILTIN(__builtin_exp2f, "ff" , "Fne")
BUILTIN(__builtin_exp2f16, "hh" , "Fne")
BUILTIN(__builtin_exp2l, "LdLd", "Fne")
BUILTIN(__builtin_exp2f128, "LLdLLd" , "Fne")
+BUILTIN(__builtin_exp10 , "dd" , "Fne")
+BUILTIN(__builtin_exp10f, "ff" , "Fne")
+BUILTIN(__builtin_exp10f16, "hh" , "Fne")
+BUILTIN(__builtin_exp10l, "LdLd", "Fne")
+BUILTIN(__builtin_exp10f128, "LLdLLd" , "Fne")
BUILTIN(__builtin_expm1 , "dd", "Fne")
BUILTIN(__builtin_expm1f, "ff", "Fne")
BUILTIN(__builtin_expm1l, "LdLd", "Fne")
@@ -251,16 +268,16 @@ BUILTIN(__builtin_fmaf, "ffff", "Fne")
BUILTIN(__builtin_fmaf16, "hhhh", "Fne")
BUILTIN(__builtin_fmal, "LdLdLdLd", "Fne")
BUILTIN(__builtin_fmaf128, "LLdLLdLLdLLd", "Fne")
-BUILTIN(__builtin_fmax, "ddd", "Fnc")
-BUILTIN(__builtin_fmaxf, "fff", "Fnc")
-BUILTIN(__builtin_fmaxf16, "hhh", "Fnc")
-BUILTIN(__builtin_fmaxl, "LdLdLd", "Fnc")
-BUILTIN(__builtin_fmaxf128, "LLdLLdLLd", "Fnc")
-BUILTIN(__builtin_fmin, "ddd", "Fnc")
-BUILTIN(__builtin_fminf, "fff", "Fnc")
-BUILTIN(__builtin_fminf16, "hhh", "Fnc")
-BUILTIN(__builtin_fminl, "LdLdLd", "Fnc")
-BUILTIN(__builtin_fminf128, "LLdLLdLLd", "Fnc")
+BUILTIN(__builtin_fmax, "ddd", "FncE")
+BUILTIN(__builtin_fmaxf, "fff", "FncE")
+BUILTIN(__builtin_fmaxf16, "hhh", "FncE")
+BUILTIN(__builtin_fmaxl, "LdLdLd", "FncE")
+BUILTIN(__builtin_fmaxf128, "LLdLLdLLd", "FncE")
+BUILTIN(__builtin_fmin, "ddd", "FncE")
+BUILTIN(__builtin_fminf, "fff", "FncE")
+BUILTIN(__builtin_fminf16, "hhh", "FncE")
+BUILTIN(__builtin_fminl, "LdLdLd", "FncE")
+BUILTIN(__builtin_fminf128, "LLdLLdLLd", "FncE")
BUILTIN(__builtin_hypot , "ddd" , "Fne")
BUILTIN(__builtin_hypotf, "fff" , "Fne")
BUILTIN(__builtin_hypotl, "LdLdLd", "Fne")
@@ -342,6 +359,11 @@ BUILTIN(__builtin_roundf, "ff" , "Fnc")
BUILTIN(__builtin_roundf16, "hh" , "Fnc")
BUILTIN(__builtin_roundl, "LdLd" , "Fnc")
BUILTIN(__builtin_roundf128, "LLdLLd" , "Fnc")
+BUILTIN(__builtin_roundeven, "dd" , "Fnc")
+BUILTIN(__builtin_roundevenf, "ff" , "Fnc")
+BUILTIN(__builtin_roundevenf16, "hh" , "Fnc")
+BUILTIN(__builtin_roundevenl, "LdLd" , "Fnc")
+BUILTIN(__builtin_roundevenf128, "LLdLLd" , "Fnc")
BUILTIN(__builtin_scalbln , "ddLi", "Fne")
BUILTIN(__builtin_scalblnf, "ffLi", "Fne")
BUILTIN(__builtin_scalblnl, "LdLdLi", "Fne")
@@ -384,6 +406,7 @@ BUILTIN(__builtin_truncf16, "hh", "Fnc")
// Access to floating point environment
BUILTIN(__builtin_flt_rounds, "i", "n")
+BUILTIN(__builtin_set_flt_rounds, "vi", "n")
// C99 complex builtins
BUILTIN(__builtin_cabs, "dXd", "Fne")
@@ -454,7 +477,7 @@ BUILTIN(__builtin_ctanhf, "XfXf", "Fne")
BUILTIN(__builtin_ctanhl, "XLdXLd", "Fne")
// GCC-compatible C99 CMPLX implementation.
-BUILTIN(__builtin_complex, "v.", "nct")
+BUILTIN(__builtin_complex, "v.", "nctE")
// FP Comparisons.
BUILTIN(__builtin_isgreater , "i.", "Fnct")
@@ -465,12 +488,16 @@ BUILTIN(__builtin_islessgreater , "i.", "Fnct")
BUILTIN(__builtin_isunordered , "i.", "Fnct")
// Unary FP classification
-BUILTIN(__builtin_fpclassify, "iiiiii.", "Fnct")
-BUILTIN(__builtin_isfinite, "i.", "Fnct")
-BUILTIN(__builtin_isinf, "i.", "Fnct")
-BUILTIN(__builtin_isinf_sign, "i.", "Fnct")
-BUILTIN(__builtin_isnan, "i.", "Fnct")
-BUILTIN(__builtin_isnormal, "i.", "Fnct")
+BUILTIN(__builtin_fpclassify, "iiiiii.", "FnctE")
+BUILTIN(__builtin_isfinite, "i.", "FnctE")
+BUILTIN(__builtin_isinf, "i.", "FnctE")
+BUILTIN(__builtin_isinf_sign, "i.", "FnctE")
+BUILTIN(__builtin_isnan, "i.", "FnctE")
+BUILTIN(__builtin_isnormal, "i.", "FnctE")
+BUILTIN(__builtin_issubnormal,"i.", "FnctE")
+BUILTIN(__builtin_iszero, "i.", "FnctE")
+BUILTIN(__builtin_issignaling,"i.", "FnctE")
+BUILTIN(__builtin_isfpclass, "i.", "nctE")
// FP signbit builtins
BUILTIN(__builtin_signbit, "i.", "Fnct")
@@ -484,103 +511,102 @@ BUILTIN(__builtin_canonicalizef16, "hh", "nc")
BUILTIN(__builtin_canonicalizel, "LdLd", "nc")
// Builtins for arithmetic.
-BUILTIN(__builtin_clzs , "iUs" , "nc")
-BUILTIN(__builtin_clz , "iUi" , "nc")
-BUILTIN(__builtin_clzl , "iULi" , "nc")
-BUILTIN(__builtin_clzll, "iULLi", "nc")
+BUILTIN(__builtin_clzs , "iUs" , "ncE")
+BUILTIN(__builtin_clz , "iUi" , "ncE")
+BUILTIN(__builtin_clzl , "iULi" , "ncE")
+BUILTIN(__builtin_clzll, "iULLi", "ncE")
// TODO: int clzimax(uintmax_t)
-BUILTIN(__builtin_ctzs , "iUs" , "nc")
-BUILTIN(__builtin_ctz , "iUi" , "nc")
-BUILTIN(__builtin_ctzl , "iULi" , "nc")
-BUILTIN(__builtin_ctzll, "iULLi", "nc")
+BUILTIN(__builtin_ctzs , "iUs" , "ncE")
+BUILTIN(__builtin_ctz , "iUi" , "ncE")
+BUILTIN(__builtin_ctzl , "iULi" , "ncE")
+BUILTIN(__builtin_ctzll, "iULLi", "ncE")
// TODO: int ctzimax(uintmax_t)
-BUILTIN(__builtin_ffs , "ii" , "Fnc")
-BUILTIN(__builtin_ffsl , "iLi" , "Fnc")
-BUILTIN(__builtin_ffsll, "iLLi", "Fnc")
-BUILTIN(__builtin_parity , "iUi" , "nc")
-BUILTIN(__builtin_parityl , "iULi" , "nc")
-BUILTIN(__builtin_parityll, "iULLi", "nc")
-BUILTIN(__builtin_popcount , "iUi" , "nc")
-BUILTIN(__builtin_popcountl , "iULi" , "nc")
-BUILTIN(__builtin_popcountll, "iULLi", "nc")
-BUILTIN(__builtin_clrsb , "ii" , "nc")
-BUILTIN(__builtin_clrsbl , "iLi" , "nc")
-BUILTIN(__builtin_clrsbll, "iLLi", "nc")
+BUILTIN(__builtin_ffs , "ii" , "FncE")
+BUILTIN(__builtin_ffsl , "iLi" , "FncE")
+BUILTIN(__builtin_ffsll, "iLLi", "FncE")
+BUILTIN(__builtin_parity , "iUi" , "ncE")
+BUILTIN(__builtin_parityl , "iULi" , "ncE")
+BUILTIN(__builtin_parityll, "iULLi", "ncE")
+BUILTIN(__builtin_popcount , "iUi" , "ncE")
+BUILTIN(__builtin_popcountl , "iULi" , "ncE")
+BUILTIN(__builtin_popcountll, "iULLi", "ncE")
+BUILTIN(__builtin_clrsb , "ii" , "ncE")
+BUILTIN(__builtin_clrsbl , "iLi" , "ncE")
+BUILTIN(__builtin_clrsbll, "iLLi", "ncE")
// The following builtins rely on that char == 8 bits, short == 16 bits and that
// there exists native types on the target that are 32- and 64-bits wide, unless
// these conditions are fulfilled these builtins will operate on a not intended
// bitwidth.
-BUILTIN(__builtin_bswap16, "UsUs", "nc")
-BUILTIN(__builtin_bswap32, "UZiUZi", "nc")
-BUILTIN(__builtin_bswap64, "UWiUWi", "nc")
-
-BUILTIN(__builtin_bitreverse8, "UcUc", "nc")
-BUILTIN(__builtin_bitreverse16, "UsUs", "nc")
-BUILTIN(__builtin_bitreverse32, "UZiUZi", "nc")
-BUILTIN(__builtin_bitreverse64, "UWiUWi", "nc")
-
-BUILTIN(__builtin_rotateleft8, "UcUcUc", "nc")
-BUILTIN(__builtin_rotateleft16, "UsUsUs", "nc")
-BUILTIN(__builtin_rotateleft32, "UZiUZiUZi", "nc")
-BUILTIN(__builtin_rotateleft64, "UWiUWiUWi", "nc")
-BUILTIN(__builtin_rotateright8, "UcUcUc", "nc")
-BUILTIN(__builtin_rotateright16, "UsUsUs", "nc")
-BUILTIN(__builtin_rotateright32, "UZiUZiUZi", "nc")
-BUILTIN(__builtin_rotateright64, "UWiUWiUWi", "nc")
+BUILTIN(__builtin_bswap16, "UsUs", "ncE")
+BUILTIN(__builtin_bswap32, "UZiUZi", "ncE")
+BUILTIN(__builtin_bswap64, "UWiUWi", "ncE")
+
+BUILTIN(__builtin_bitreverse8, "UcUc", "ncE")
+BUILTIN(__builtin_bitreverse16, "UsUs", "ncE")
+BUILTIN(__builtin_bitreverse32, "UZiUZi", "ncE")
+BUILTIN(__builtin_bitreverse64, "UWiUWi", "ncE")
+
+BUILTIN(__builtin_rotateleft8, "UcUcUc", "ncE")
+BUILTIN(__builtin_rotateleft16, "UsUsUs", "ncE")
+BUILTIN(__builtin_rotateleft32, "UZiUZiUZi", "ncE")
+BUILTIN(__builtin_rotateleft64, "UWiUWiUWi", "ncE")
+BUILTIN(__builtin_rotateright8, "UcUcUc", "ncE")
+BUILTIN(__builtin_rotateright16, "UsUsUs", "ncE")
+BUILTIN(__builtin_rotateright32, "UZiUZiUZi", "ncE")
+BUILTIN(__builtin_rotateright64, "UWiUWiUWi", "ncE")
// Random GCC builtins
BUILTIN(__builtin_calloc, "v*zz", "nF")
-BUILTIN(__builtin_constant_p, "i.", "nctu")
-BUILTIN(__builtin_classify_type, "i.", "nctu")
-BUILTIN(__builtin___CFStringMakeConstantString, "FC*cC*", "nc")
-BUILTIN(__builtin___NSStringMakeConstantString, "FC*cC*", "nc")
+BUILTIN(__builtin_constant_p, "i.", "nctuE")
+BUILTIN(__builtin_classify_type, "i.", "nctuE")
+BUILTIN(__builtin___CFStringMakeConstantString, "FC*cC*", "ncE")
+BUILTIN(__builtin___NSStringMakeConstantString, "FC*cC*", "ncE")
BUILTIN(__builtin_va_start, "vA.", "nt")
BUILTIN(__builtin_va_end, "vA", "n")
BUILTIN(__builtin_va_copy, "vAA", "n")
BUILTIN(__builtin_stdarg_start, "vA.", "nt")
-BUILTIN(__builtin_assume_aligned, "v*vC*z.", "nc")
-BUILTIN(__builtin_bcmp, "ivC*vC*z", "Fn")
-BUILTIN(__builtin_bcopy, "vv*v*z", "n")
+BUILTIN(__builtin_assume_aligned, "v*vC*z.", "nctE")
+BUILTIN(__builtin_bcmp, "ivC*vC*z", "FnE")
+BUILTIN(__builtin_bcopy, "vvC*v*z", "nF")
BUILTIN(__builtin_bzero, "vv*z", "nF")
-BUILTIN(__builtin_fprintf, "iP*cC*.", "Fp:1:")
BUILTIN(__builtin_free, "vv*", "nF")
BUILTIN(__builtin_malloc, "v*z", "nF")
-BUILTIN(__builtin_memchr, "v*vC*iz", "nF")
-BUILTIN(__builtin_memcmp, "ivC*vC*z", "nF")
-BUILTIN(__builtin_memcpy, "v*v*vC*z", "nF")
-BUILTIN(__builtin_memcpy_inline, "vv*vC*Iz", "nt")
-BUILTIN(__builtin_memmove, "v*v*vC*z", "nF")
+BUILTIN(__builtin_memchr, "v*vC*iz", "nFE")
+BUILTIN(__builtin_memcmp, "ivC*vC*z", "nFE")
+BUILTIN(__builtin_memcpy, "v*v*vC*z", "nFE")
+BUILTIN(__builtin_memcpy_inline, "vv*vC*Iz", "n")
+BUILTIN(__builtin_memmove, "v*v*vC*z", "nFE")
BUILTIN(__builtin_mempcpy, "v*v*vC*z", "nF")
BUILTIN(__builtin_memset, "v*v*iz", "nF")
-BUILTIN(__builtin_printf, "icC*.", "Fp:0:")
+BUILTIN(__builtin_memset_inline, "vv*iIz", "n")
BUILTIN(__builtin_stpcpy, "c*c*cC*", "nF")
BUILTIN(__builtin_stpncpy, "c*c*cC*z", "nF")
BUILTIN(__builtin_strcasecmp, "icC*cC*", "nF")
BUILTIN(__builtin_strcat, "c*c*cC*", "nF")
-BUILTIN(__builtin_strchr, "c*cC*i", "nF")
-BUILTIN(__builtin_strcmp, "icC*cC*", "nF")
+BUILTIN(__builtin_strchr, "c*cC*i", "nFE")
+BUILTIN(__builtin_strcmp, "icC*cC*", "nFE")
BUILTIN(__builtin_strcpy, "c*c*cC*", "nF")
BUILTIN(__builtin_strcspn, "zcC*cC*", "nF")
BUILTIN(__builtin_strdup, "c*cC*", "nF")
-BUILTIN(__builtin_strlen, "zcC*", "nF")
+BUILTIN(__builtin_strlen, "zcC*", "nFE")
BUILTIN(__builtin_strncasecmp, "icC*cC*z", "nF")
BUILTIN(__builtin_strncat, "c*c*cC*z", "nF")
-BUILTIN(__builtin_strncmp, "icC*cC*z", "nF")
+BUILTIN(__builtin_strncmp, "icC*cC*z", "nFE")
BUILTIN(__builtin_strncpy, "c*c*cC*z", "nF")
BUILTIN(__builtin_strndup, "c*cC*z", "nF")
BUILTIN(__builtin_strpbrk, "c*cC*cC*", "nF")
BUILTIN(__builtin_strrchr, "c*cC*i", "nF")
BUILTIN(__builtin_strspn, "zcC*cC*", "nF")
BUILTIN(__builtin_strstr, "c*cC*cC*", "nF")
-BUILTIN(__builtin_wcschr, "w*wC*w", "nF")
-BUILTIN(__builtin_wcscmp, "iwC*wC*", "nF")
-BUILTIN(__builtin_wcslen, "zwC*", "nF")
-BUILTIN(__builtin_wcsncmp, "iwC*wC*z", "nF")
-BUILTIN(__builtin_wmemchr, "w*wC*wz", "nF")
-BUILTIN(__builtin_wmemcmp, "iwC*wC*z", "nF")
-BUILTIN(__builtin_wmemcpy, "w*w*wC*z", "nF")
-BUILTIN(__builtin_wmemmove, "w*w*wC*z", "nF")
+BUILTIN(__builtin_wcschr, "w*wC*w", "nFE")
+BUILTIN(__builtin_wcscmp, "iwC*wC*", "nFE")
+BUILTIN(__builtin_wcslen, "zwC*", "nFE")
+BUILTIN(__builtin_wcsncmp, "iwC*wC*z", "nFE")
+BUILTIN(__builtin_wmemchr, "w*wC*wz", "nFE")
+BUILTIN(__builtin_wmemcmp, "iwC*wC*z", "nFE")
+BUILTIN(__builtin_wmemcpy, "w*w*wC*z", "nFE")
+BUILTIN(__builtin_wmemmove, "w*w*wC*z", "nFE")
BUILTIN(__builtin_realloc, "v*v*z", "nF")
BUILTIN(__builtin_return_address, "v*IUi", "n")
BUILTIN(__builtin_extract_return_addr, "v*v*", "n")
@@ -589,14 +615,24 @@ BUILTIN(__builtin___clear_cache, "vc*c*", "n")
BUILTIN(__builtin_setjmp, "iv**", "j")
BUILTIN(__builtin_longjmp, "vv**i", "r")
BUILTIN(__builtin_unwind_init, "v", "")
-BUILTIN(__builtin_eh_return_data_regno, "iIi", "nc")
-BUILTIN(__builtin_snprintf, "ic*zcC*.", "nFp:2:")
-BUILTIN(__builtin_sprintf, "ic*cC*.", "nFP:1:")
-BUILTIN(__builtin_vsnprintf, "ic*zcC*a", "nFP:2:")
-BUILTIN(__builtin_vsprintf, "ic*cC*a", "nFP:1:")
+BUILTIN(__builtin_eh_return_data_regno, "iIi", "ncE")
+BUILTIN(__builtin_fprintf, "iP*RcC*R.", "nFp:1:")
+BUILTIN(__builtin_printf, "icC*R.", "nFp:0:")
+BUILTIN(__builtin_sprintf, "ic*RcC*R.", "nFp:1:")
+BUILTIN(__builtin_snprintf, "ic*RzcC*R.", "nFp:2:")
+BUILTIN(__builtin_vprintf, "icC*Ra", "nFP:0:")
+BUILTIN(__builtin_vfprintf, "iP*RcC*Ra", "nFP:1:")
+BUILTIN(__builtin_vsprintf, "ic*RcC*Ra", "nFP:1:")
+BUILTIN(__builtin_vsnprintf, "ic*RzcC*Ra", "nFP:2:")
+BUILTIN(__builtin_fscanf, "iP*RcC*R.", "Fs:1:")
+BUILTIN(__builtin_scanf, "icC*R.", "Fs:0:")
+BUILTIN(__builtin_sscanf, "icC*RcC*R.", "Fs:1:")
+BUILTIN(__builtin_vfscanf, "iP*RcC*Ra", "FS:1:")
+BUILTIN(__builtin_vscanf, "icC*Ra", "FS:0:")
+BUILTIN(__builtin_vsscanf, "icC*RcC*Ra", "FS:1:")
BUILTIN(__builtin_thread_pointer, "v*", "nc")
-BUILTIN(__builtin_launder, "v*v*", "nt")
-LANGBUILTIN(__builtin_is_constant_evaluated, "b", "n", CXX_LANG)
+BUILTIN(__builtin_launder, "v*v*", "ntE")
+LANGBUILTIN(__builtin_is_constant_evaluated, "b", "nE", CXX_LANG)
// GCC exception builtins
BUILTIN(__builtin_eh_return, "vzv*", "r") // FIXME: Takes intptr_t, not size_t!
@@ -607,8 +643,8 @@ BUILTIN(__builtin_dwarf_sp_column, "Ui", "n")
BUILTIN(__builtin_extend_pointer, "ULLiv*", "n") // _Unwind_Word == uint64_t
// GCC Object size checking builtins
-BUILTIN(__builtin_object_size, "zvC*i", "nu")
-BUILTIN(__builtin_dynamic_object_size, "zvC*i", "nu") // Clang only.
+BUILTIN(__builtin_object_size, "zvC*i", "nuE")
+BUILTIN(__builtin_dynamic_object_size, "zvC*i", "nuE") // Clang only.
BUILTIN(__builtin___memcpy_chk, "v*v*vC*zz", "nF")
BUILTIN(__builtin___memccpy_chk, "v*v*vC*izz", "nF")
BUILTIN(__builtin___memmove_chk, "v*v*vC*zz", "nF")
@@ -622,18 +658,18 @@ BUILTIN(__builtin___strlcpy_chk, "zc*cC*zz", "nF")
BUILTIN(__builtin___strncat_chk, "c*c*cC*zz", "nF")
BUILTIN(__builtin___strncpy_chk, "c*c*cC*zz", "nF")
BUILTIN(__builtin___stpncpy_chk, "c*c*cC*zz", "nF")
-BUILTIN(__builtin___snprintf_chk, "ic*zizcC*.", "Fp:4:")
-BUILTIN(__builtin___sprintf_chk, "ic*izcC*.", "Fp:3:")
-BUILTIN(__builtin___vsnprintf_chk, "ic*zizcC*a", "FP:4:")
-BUILTIN(__builtin___vsprintf_chk, "ic*izcC*a", "FP:3:")
-BUILTIN(__builtin___fprintf_chk, "iP*icC*.", "Fp:2:")
-BUILTIN(__builtin___printf_chk, "iicC*.", "Fp:1:")
-BUILTIN(__builtin___vfprintf_chk, "iP*icC*a", "FP:2:")
-BUILTIN(__builtin___vprintf_chk, "iicC*a", "FP:1:")
+BUILTIN(__builtin___snprintf_chk, "ic*RzizcC*R.", "Fp:4:")
+BUILTIN(__builtin___sprintf_chk, "ic*RizcC*R.", "Fp:3:")
+BUILTIN(__builtin___vsnprintf_chk, "ic*RzizcC*Ra", "FP:4:")
+BUILTIN(__builtin___vsprintf_chk, "ic*RizcC*Ra", "FP:3:")
+BUILTIN(__builtin___fprintf_chk, "iP*RicC*R.", "Fp:2:")
+BUILTIN(__builtin___printf_chk, "iicC*R.", "Fp:1:")
+BUILTIN(__builtin___vfprintf_chk, "iP*RicC*Ra", "FP:2:")
+BUILTIN(__builtin___vprintf_chk, "iicC*Ra", "FP:1:")
BUILTIN(__builtin_unpredictable, "LiLi" , "nc")
-BUILTIN(__builtin_expect, "LiLiLi" , "nc")
-BUILTIN(__builtin_expect_with_probability, "LiLiLid", "nc")
+BUILTIN(__builtin_expect, "LiLiLi" , "ncE")
+BUILTIN(__builtin_expect_with_probability, "LiLiLid", "ncE")
BUILTIN(__builtin_prefetch, "vvC*.", "nc")
BUILTIN(__builtin_readcyclecounter, "ULLi", "n")
BUILTIN(__builtin_trap, "v", "nr")
@@ -641,9 +677,46 @@ BUILTIN(__builtin_debugtrap, "v", "n")
BUILTIN(__builtin_unreachable, "v", "nr")
BUILTIN(__builtin_shufflevector, "v." , "nct")
BUILTIN(__builtin_convertvector, "v." , "nct")
+BUILTIN(__builtin_vectorelements, "v." , "nct")
BUILTIN(__builtin_alloca, "v*z" , "Fn")
+BUILTIN(__builtin_alloca_uninitialized, "v*z", "Fn")
BUILTIN(__builtin_alloca_with_align, "v*zIz", "Fn")
+BUILTIN(__builtin_alloca_with_align_uninitialized, "v*zIz", "Fn")
BUILTIN(__builtin_call_with_static_chain, "v.", "nt")
+BUILTIN(__builtin_nondeterministic_value, "v.", "nt")
+
+BUILTIN(__builtin_elementwise_abs, "v.", "nct")
+BUILTIN(__builtin_elementwise_bitreverse, "v.", "nct")
+BUILTIN(__builtin_elementwise_max, "v.", "nct")
+BUILTIN(__builtin_elementwise_min, "v.", "nct")
+BUILTIN(__builtin_elementwise_ceil, "v.", "nct")
+BUILTIN(__builtin_elementwise_cos, "v.", "nct")
+BUILTIN(__builtin_elementwise_exp, "v.", "nct")
+BUILTIN(__builtin_elementwise_exp2, "v.", "nct")
+BUILTIN(__builtin_elementwise_floor, "v.", "nct")
+BUILTIN(__builtin_elementwise_log, "v.", "nct")
+BUILTIN(__builtin_elementwise_log2, "v.", "nct")
+BUILTIN(__builtin_elementwise_log10, "v.", "nct")
+BUILTIN(__builtin_elementwise_pow, "v.", "nct")
+BUILTIN(__builtin_elementwise_roundeven, "v.", "nct")
+BUILTIN(__builtin_elementwise_round, "v.", "nct")
+BUILTIN(__builtin_elementwise_rint, "v.", "nct")
+BUILTIN(__builtin_elementwise_nearbyint, "v.", "nct")
+BUILTIN(__builtin_elementwise_sin, "v.", "nct")
+BUILTIN(__builtin_elementwise_sqrt, "v.", "nct")
+BUILTIN(__builtin_elementwise_trunc, "v.", "nct")
+BUILTIN(__builtin_elementwise_canonicalize, "v.", "nct")
+BUILTIN(__builtin_elementwise_copysign, "v.", "nct")
+BUILTIN(__builtin_elementwise_fma, "v.", "nct")
+BUILTIN(__builtin_elementwise_add_sat, "v.", "nct")
+BUILTIN(__builtin_elementwise_sub_sat, "v.", "nct")
+BUILTIN(__builtin_reduce_max, "v.", "nct")
+BUILTIN(__builtin_reduce_min, "v.", "nct")
+BUILTIN(__builtin_reduce_xor, "v.", "nct")
+BUILTIN(__builtin_reduce_or, "v.", "nct")
+BUILTIN(__builtin_reduce_and, "v.", "nct")
+BUILTIN(__builtin_reduce_add, "v.", "nct")
+BUILTIN(__builtin_reduce_mul, "v.", "nct")
BUILTIN(__builtin_matrix_transpose, "v.", "nFt")
BUILTIN(__builtin_matrix_column_major_load, "v.", "nFt")
@@ -794,11 +867,12 @@ ATOMIC_BUILTIN(__c11_atomic_fetch_sub, "v.", "t")
ATOMIC_BUILTIN(__c11_atomic_fetch_and, "v.", "t")
ATOMIC_BUILTIN(__c11_atomic_fetch_or, "v.", "t")
ATOMIC_BUILTIN(__c11_atomic_fetch_xor, "v.", "t")
+ATOMIC_BUILTIN(__c11_atomic_fetch_nand, "v.", "t")
ATOMIC_BUILTIN(__c11_atomic_fetch_max, "v.", "t")
ATOMIC_BUILTIN(__c11_atomic_fetch_min, "v.", "t")
BUILTIN(__c11_atomic_thread_fence, "vi", "n")
BUILTIN(__c11_atomic_signal_fence, "vi", "n")
-BUILTIN(__c11_atomic_is_lock_free, "bz", "n")
+BUILTIN(__c11_atomic_is_lock_free, "bz", "nE")
// GNU atomic builtins.
ATOMIC_BUILTIN(__atomic_load, "v.", "t")
@@ -827,8 +901,34 @@ BUILTIN(__atomic_test_and_set, "bvD*i", "n")
BUILTIN(__atomic_clear, "vvD*i", "n")
BUILTIN(__atomic_thread_fence, "vi", "n")
BUILTIN(__atomic_signal_fence, "vi", "n")
-BUILTIN(__atomic_always_lock_free, "bzvCD*", "n")
-BUILTIN(__atomic_is_lock_free, "bzvCD*", "n")
+BUILTIN(__atomic_always_lock_free, "bzvCD*", "nE")
+BUILTIN(__atomic_is_lock_free, "bzvCD*", "nE")
+
+// GNU atomic builtins with atomic scopes.
+ATOMIC_BUILTIN(__scoped_atomic_load, "v.", "t")
+ATOMIC_BUILTIN(__scoped_atomic_load_n, "v.", "t")
+ATOMIC_BUILTIN(__scoped_atomic_store, "v.", "t")
+ATOMIC_BUILTIN(__scoped_atomic_store_n, "v.", "t")
+ATOMIC_BUILTIN(__scoped_atomic_exchange, "v.", "t")
+ATOMIC_BUILTIN(__scoped_atomic_exchange_n, "v.", "t")
+ATOMIC_BUILTIN(__scoped_atomic_compare_exchange, "v.", "t")
+ATOMIC_BUILTIN(__scoped_atomic_compare_exchange_n, "v.", "t")
+ATOMIC_BUILTIN(__scoped_atomic_fetch_add, "v.", "t")
+ATOMIC_BUILTIN(__scoped_atomic_fetch_sub, "v.", "t")
+ATOMIC_BUILTIN(__scoped_atomic_fetch_and, "v.", "t")
+ATOMIC_BUILTIN(__scoped_atomic_fetch_or, "v.", "t")
+ATOMIC_BUILTIN(__scoped_atomic_fetch_xor, "v.", "t")
+ATOMIC_BUILTIN(__scoped_atomic_fetch_nand, "v.", "t")
+ATOMIC_BUILTIN(__scoped_atomic_add_fetch, "v.", "t")
+ATOMIC_BUILTIN(__scoped_atomic_sub_fetch, "v.", "t")
+ATOMIC_BUILTIN(__scoped_atomic_and_fetch, "v.", "t")
+ATOMIC_BUILTIN(__scoped_atomic_or_fetch, "v.", "t")
+ATOMIC_BUILTIN(__scoped_atomic_xor_fetch, "v.", "t")
+ATOMIC_BUILTIN(__scoped_atomic_max_fetch, "v.", "t")
+ATOMIC_BUILTIN(__scoped_atomic_min_fetch, "v.", "t")
+ATOMIC_BUILTIN(__scoped_atomic_nand_fetch, "v.", "t")
+ATOMIC_BUILTIN(__scoped_atomic_fetch_min, "v.", "t")
+ATOMIC_BUILTIN(__scoped_atomic_fetch_max, "v.", "t")
// OpenCL 2.0 atomic builtins.
ATOMIC_BUILTIN(__opencl_atomic_init, "v.", "t")
@@ -849,6 +949,20 @@ ATOMIC_BUILTIN(__opencl_atomic_fetch_max, "v.", "t")
ATOMIC_BUILTIN(__atomic_fetch_min, "v.", "t")
ATOMIC_BUILTIN(__atomic_fetch_max, "v.", "t")
+// HIP atomic builtins.
+ATOMIC_BUILTIN(__hip_atomic_load, "v.", "t")
+ATOMIC_BUILTIN(__hip_atomic_store, "v.", "t")
+ATOMIC_BUILTIN(__hip_atomic_compare_exchange_weak, "v.", "t")
+ATOMIC_BUILTIN(__hip_atomic_compare_exchange_strong, "v.", "t")
+ATOMIC_BUILTIN(__hip_atomic_exchange, "v.", "t")
+ATOMIC_BUILTIN(__hip_atomic_fetch_add, "v.", "t")
+ATOMIC_BUILTIN(__hip_atomic_fetch_sub, "v.", "t")
+ATOMIC_BUILTIN(__hip_atomic_fetch_and, "v.", "t")
+ATOMIC_BUILTIN(__hip_atomic_fetch_or, "v.", "t")
+ATOMIC_BUILTIN(__hip_atomic_fetch_xor, "v.", "t")
+ATOMIC_BUILTIN(__hip_atomic_fetch_min, "v.", "t")
+ATOMIC_BUILTIN(__hip_atomic_fetch_max, "v.", "t")
+
#undef ATOMIC_BUILTIN
// Non-overloaded atomic builtins.
@@ -870,7 +984,7 @@ BUILTIN(__warn_memset_zero_len, "v", "nU")
// Microsoft builtins. These are only active with -fms-extensions.
LANGBUILTIN(_alloca, "v*z", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(__annotation, "wC*.","n", ALL_MS_LANGUAGES)
-LANGBUILTIN(__assume, "vb", "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(__assume, "vb", "nE", ALL_MS_LANGUAGES)
LANGBUILTIN(_bittest, "UcNiC*Ni", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_bittestandcomplement, "UcNi*Ni", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_bittestandreset, "UcNi*Ni", "n", ALL_MS_LANGUAGES)
@@ -879,9 +993,9 @@ LANGBUILTIN(_bittest64, "UcWiC*Wi", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_bittestandcomplement64, "UcWi*Wi", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_bittestandreset64, "UcWi*Wi", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_bittestandset64, "UcWi*Wi", "n", ALL_MS_LANGUAGES)
-LIBBUILTIN(_byteswap_ushort, "UsUs", "fnc", "stdlib.h", ALL_MS_LANGUAGES)
-LIBBUILTIN(_byteswap_ulong, "UNiUNi", "fnc", "stdlib.h", ALL_MS_LANGUAGES)
-LIBBUILTIN(_byteswap_uint64, "ULLiULLi", "fnc", "stdlib.h", ALL_MS_LANGUAGES)
+LIBBUILTIN(_byteswap_ushort, "UsUs", "fnc", STDLIB_H, ALL_MS_LANGUAGES)
+LIBBUILTIN(_byteswap_ulong, "UNiUNi", "fnc", STDLIB_H, ALL_MS_LANGUAGES)
+LIBBUILTIN(_byteswap_uint64, "ULLiULLi", "fnc", STDLIB_H, ALL_MS_LANGUAGES)
LANGBUILTIN(__debugbreak, "v", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(__exception_code, "UNi", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_exception_code, "UNi", "n", ALL_MS_LANGUAGES)
@@ -889,7 +1003,7 @@ LANGBUILTIN(__exception_info, "v*", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_exception_info, "v*", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(__abnormal_termination, "i", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_abnormal_termination, "i", "n", ALL_MS_LANGUAGES)
-LANGBUILTIN(__GetExceptionInfo, "v*.", "ntu", ALL_MS_LANGUAGES)
+LANGBUILTIN(__GetExceptionInfo, "v*.", "zntu", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedAnd8, "ccD*c", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedAnd16, "ssD*s", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedAnd, "NiNiD*Ni", "n", ALL_MS_LANGUAGES)
@@ -938,586 +1052,602 @@ LANGBUILTIN(__iso_volatile_store16, "vsD*s", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(__iso_volatile_store32, "viD*i", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(__iso_volatile_store64, "vLLiD*LLi", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(__noop, "i.", "n", ALL_MS_LANGUAGES)
-LANGBUILTIN(__lzcnt16, "UsUs", "nc", ALL_MS_LANGUAGES)
-LANGBUILTIN(__lzcnt, "UiUi", "nc", ALL_MS_LANGUAGES)
-LANGBUILTIN(__lzcnt64, "UWiUWi", "nc", ALL_MS_LANGUAGES)
-LANGBUILTIN(__popcnt16, "UsUs", "nc", ALL_MS_LANGUAGES)
-LANGBUILTIN(__popcnt, "UiUi", "nc", ALL_MS_LANGUAGES)
-LANGBUILTIN(__popcnt64, "UWiUWi", "nc", ALL_MS_LANGUAGES)
+LANGBUILTIN(__lzcnt16, "UsUs", "ncE", ALL_MS_LANGUAGES)
+LANGBUILTIN(__lzcnt, "UiUi", "ncE", ALL_MS_LANGUAGES)
+LANGBUILTIN(__lzcnt64, "UWiUWi", "ncE", ALL_MS_LANGUAGES)
+LANGBUILTIN(__popcnt16, "UsUs", "ncE", ALL_MS_LANGUAGES)
+LANGBUILTIN(__popcnt, "UiUi", "ncE", ALL_MS_LANGUAGES)
+LANGBUILTIN(__popcnt64, "UWiUWi", "ncE", ALL_MS_LANGUAGES)
LANGBUILTIN(_ReturnAddress, "v*", "n", ALL_MS_LANGUAGES)
-LANGBUILTIN(_rotl8, "UcUcUc", "n", ALL_MS_LANGUAGES)
-LANGBUILTIN(_rotl16, "UsUsUc", "n", ALL_MS_LANGUAGES)
-LANGBUILTIN(_rotl, "UiUii", "n", ALL_MS_LANGUAGES)
-LANGBUILTIN(_lrotl, "ULiULii", "n", ALL_MS_LANGUAGES)
-LANGBUILTIN(_rotl64, "UWiUWii", "n", ALL_MS_LANGUAGES)
-LANGBUILTIN(_rotr8, "UcUcUc", "n", ALL_MS_LANGUAGES)
-LANGBUILTIN(_rotr16, "UsUsUc", "n", ALL_MS_LANGUAGES)
-LANGBUILTIN(_rotr, "UiUii", "n", ALL_MS_LANGUAGES)
-LANGBUILTIN(_lrotr, "ULiULii", "n", ALL_MS_LANGUAGES)
-LANGBUILTIN(_rotr64, "UWiUWii", "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(_rotl8, "UcUcUc", "nE", ALL_MS_LANGUAGES)
+LANGBUILTIN(_rotl16, "UsUsUc", "nE", ALL_MS_LANGUAGES)
+LANGBUILTIN(_rotl, "UiUii", "nE", ALL_MS_LANGUAGES)
+LANGBUILTIN(_lrotl, "ULiULii", "nE", ALL_MS_LANGUAGES)
+LANGBUILTIN(_rotl64, "UWiUWii", "nE", ALL_MS_LANGUAGES)
+LANGBUILTIN(_rotr8, "UcUcUc", "nE", ALL_MS_LANGUAGES)
+LANGBUILTIN(_rotr16, "UsUsUc", "nE", ALL_MS_LANGUAGES)
+LANGBUILTIN(_rotr, "UiUii", "nE", ALL_MS_LANGUAGES)
+LANGBUILTIN(_lrotr, "ULiULii", "nE", ALL_MS_LANGUAGES)
+LANGBUILTIN(_rotr64, "UWiUWii", "nE", ALL_MS_LANGUAGES)
LANGBUILTIN(__va_start, "vc**.", "nt", ALL_MS_LANGUAGES)
LANGBUILTIN(__fastfail, "vUi", "nr", ALL_MS_LANGUAGES)
// Microsoft library builtins.
-LIBBUILTIN(_setjmpex, "iJ", "fjT", "setjmpex.h", ALL_MS_LANGUAGES)
+LIBBUILTIN(_setjmpex, "iJ", "fjT", SETJMPEX_H, ALL_MS_LANGUAGES)
// C99 library functions
// C99 stdarg.h
-LIBBUILTIN(va_start, "vA.", "fn", "stdarg.h", ALL_LANGUAGES)
-LIBBUILTIN(va_end, "vA", "fn", "stdarg.h", ALL_LANGUAGES)
-LIBBUILTIN(va_copy, "vAA", "fn", "stdarg.h", ALL_LANGUAGES)
+LIBBUILTIN(va_start, "vA.", "fn", STDARG_H, ALL_LANGUAGES)
+LIBBUILTIN(va_end, "vA", "fn", STDARG_H, ALL_LANGUAGES)
+LIBBUILTIN(va_copy, "vAA", "fn", STDARG_H, ALL_LANGUAGES)
// C99 stdlib.h
-LIBBUILTIN(abort, "v", "fr", "stdlib.h", ALL_LANGUAGES)
-LIBBUILTIN(calloc, "v*zz", "f", "stdlib.h", ALL_LANGUAGES)
-LIBBUILTIN(exit, "vi", "fr", "stdlib.h", ALL_LANGUAGES)
-LIBBUILTIN(_Exit, "vi", "fr", "stdlib.h", ALL_LANGUAGES)
-LIBBUILTIN(malloc, "v*z", "f", "stdlib.h", ALL_LANGUAGES)
-LIBBUILTIN(realloc, "v*v*z", "f", "stdlib.h", ALL_LANGUAGES)
-LIBBUILTIN(free, "vv*", "f", "stdlib.h", ALL_LANGUAGES)
-LIBBUILTIN(strtod, "dcC*c**", "f", "stdlib.h", ALL_LANGUAGES)
-LIBBUILTIN(strtof, "fcC*c**", "f", "stdlib.h", ALL_LANGUAGES)
-LIBBUILTIN(strtold, "LdcC*c**", "f", "stdlib.h", ALL_LANGUAGES)
-LIBBUILTIN(strtol, "LicC*c**i", "f", "stdlib.h", ALL_LANGUAGES)
-LIBBUILTIN(strtoll, "LLicC*c**i", "f", "stdlib.h", ALL_LANGUAGES)
-LIBBUILTIN(strtoul, "ULicC*c**i", "f", "stdlib.h", ALL_LANGUAGES)
-LIBBUILTIN(strtoull, "ULLicC*c**i", "f", "stdlib.h", ALL_LANGUAGES)
+LIBBUILTIN(abort, "v", "fr", STDLIB_H, ALL_LANGUAGES)
+LIBBUILTIN(calloc, "v*zz", "f", STDLIB_H, ALL_LANGUAGES)
+LIBBUILTIN(exit, "vi", "fr", STDLIB_H, ALL_LANGUAGES)
+LIBBUILTIN(_Exit, "vi", "fr", STDLIB_H, ALL_LANGUAGES)
+LIBBUILTIN(malloc, "v*z", "f", STDLIB_H, ALL_LANGUAGES)
+LIBBUILTIN(realloc, "v*v*z", "f", STDLIB_H, ALL_LANGUAGES)
+LIBBUILTIN(free, "vv*", "f", STDLIB_H, ALL_LANGUAGES)
+LIBBUILTIN(strtod, "dcC*c**", "f", STDLIB_H, ALL_LANGUAGES)
+LIBBUILTIN(strtof, "fcC*c**", "f", STDLIB_H, ALL_LANGUAGES)
+LIBBUILTIN(strtold, "LdcC*c**", "f", STDLIB_H, ALL_LANGUAGES)
+LIBBUILTIN(strtol, "LicC*c**i", "f", STDLIB_H, ALL_LANGUAGES)
+LIBBUILTIN(strtoll, "LLicC*c**i", "f", STDLIB_H, ALL_LANGUAGES)
+LIBBUILTIN(strtoul, "ULicC*c**i", "f", STDLIB_H, ALL_LANGUAGES)
+LIBBUILTIN(strtoull, "ULLicC*c**i", "f", STDLIB_H, ALL_LANGUAGES)
// C11 stdlib.h
-LIBBUILTIN(aligned_alloc, "v*zz", "f", "stdlib.h", ALL_LANGUAGES)
+LIBBUILTIN(aligned_alloc, "v*zz", "f", STDLIB_H, ALL_LANGUAGES)
// C99 string.h
-LIBBUILTIN(memcpy, "v*v*vC*z", "f", "string.h", ALL_LANGUAGES)
-LIBBUILTIN(memcmp, "ivC*vC*z", "f", "string.h", ALL_LANGUAGES)
-LIBBUILTIN(memmove, "v*v*vC*z", "f", "string.h", ALL_LANGUAGES)
-LIBBUILTIN(strcpy, "c*c*cC*", "f", "string.h", ALL_LANGUAGES)
-LIBBUILTIN(strncpy, "c*c*cC*z", "f", "string.h", ALL_LANGUAGES)
-LIBBUILTIN(strcmp, "icC*cC*", "f", "string.h", ALL_LANGUAGES)
-LIBBUILTIN(strncmp, "icC*cC*z", "f", "string.h", ALL_LANGUAGES)
-LIBBUILTIN(strcat, "c*c*cC*", "f", "string.h", ALL_LANGUAGES)
-LIBBUILTIN(strncat, "c*c*cC*z", "f", "string.h", ALL_LANGUAGES)
-LIBBUILTIN(strxfrm, "zc*cC*z", "f", "string.h", ALL_LANGUAGES)
-LIBBUILTIN(memchr, "v*vC*iz", "f", "string.h", ALL_LANGUAGES)
-LIBBUILTIN(strchr, "c*cC*i", "f", "string.h", ALL_LANGUAGES)
-LIBBUILTIN(strcspn, "zcC*cC*", "f", "string.h", ALL_LANGUAGES)
-LIBBUILTIN(strpbrk, "c*cC*cC*", "f", "string.h", ALL_LANGUAGES)
-LIBBUILTIN(strrchr, "c*cC*i", "f", "string.h", ALL_LANGUAGES)
-LIBBUILTIN(strspn, "zcC*cC*", "f", "string.h", ALL_LANGUAGES)
-LIBBUILTIN(strstr, "c*cC*cC*", "f", "string.h", ALL_LANGUAGES)
-LIBBUILTIN(strtok, "c*c*cC*", "f", "string.h", ALL_LANGUAGES)
-LIBBUILTIN(memset, "v*v*iz", "f", "string.h", ALL_LANGUAGES)
-LIBBUILTIN(strerror, "c*i", "f", "string.h", ALL_LANGUAGES)
-LIBBUILTIN(strlen, "zcC*", "f", "string.h", ALL_LANGUAGES)
+LIBBUILTIN(memcpy, "v*v*vC*z", "fE", STRING_H, ALL_LANGUAGES)
+LIBBUILTIN(memcmp, "ivC*vC*z", "fE", STRING_H, ALL_LANGUAGES)
+LIBBUILTIN(memmove, "v*v*vC*z", "fE", STRING_H, ALL_LANGUAGES)
+LIBBUILTIN(strcpy, "c*c*cC*", "f", STRING_H, ALL_LANGUAGES)
+LIBBUILTIN(strncpy, "c*c*cC*z", "f", STRING_H, ALL_LANGUAGES)
+LIBBUILTIN(strcmp, "icC*cC*", "fE", STRING_H, ALL_LANGUAGES)
+LIBBUILTIN(strncmp, "icC*cC*z", "fE", STRING_H, ALL_LANGUAGES)
+LIBBUILTIN(strcat, "c*c*cC*", "f", STRING_H, ALL_LANGUAGES)
+LIBBUILTIN(strncat, "c*c*cC*z", "f", STRING_H, ALL_LANGUAGES)
+LIBBUILTIN(strxfrm, "zc*cC*z", "f", STRING_H, ALL_LANGUAGES)
+LIBBUILTIN(memchr, "v*vC*iz", "fE", STRING_H, ALL_LANGUAGES)
+LIBBUILTIN(strchr, "c*cC*i", "fE", STRING_H, ALL_LANGUAGES)
+LIBBUILTIN(strcspn, "zcC*cC*", "f", STRING_H, ALL_LANGUAGES)
+LIBBUILTIN(strpbrk, "c*cC*cC*", "f", STRING_H, ALL_LANGUAGES)
+LIBBUILTIN(strrchr, "c*cC*i", "f", STRING_H, ALL_LANGUAGES)
+LIBBUILTIN(strspn, "zcC*cC*", "f", STRING_H, ALL_LANGUAGES)
+LIBBUILTIN(strstr, "c*cC*cC*", "f", STRING_H, ALL_LANGUAGES)
+LIBBUILTIN(strtok, "c*c*cC*", "f", STRING_H, ALL_LANGUAGES)
+LIBBUILTIN(memset, "v*v*iz", "f", STRING_H, ALL_LANGUAGES)
+LIBBUILTIN(strerror, "c*i", "f", STRING_H, ALL_LANGUAGES)
+LIBBUILTIN(strlen, "zcC*", "fE", STRING_H, ALL_LANGUAGES)
// C99 stdio.h
// FIXME: This list is incomplete.
-LIBBUILTIN(printf, "icC*.", "fp:0:", "stdio.h", ALL_LANGUAGES)
-LIBBUILTIN(fprintf, "iP*cC*.", "fp:1:", "stdio.h", ALL_LANGUAGES)
-LIBBUILTIN(snprintf, "ic*zcC*.", "fp:2:", "stdio.h", ALL_LANGUAGES)
-LIBBUILTIN(sprintf, "ic*cC*.", "fp:1:", "stdio.h", ALL_LANGUAGES)
-LIBBUILTIN(vprintf, "icC*a", "fP:0:", "stdio.h", ALL_LANGUAGES)
-LIBBUILTIN(vfprintf, "iP*cC*a", "fP:1:", "stdio.h", ALL_LANGUAGES)
-LIBBUILTIN(vsnprintf, "ic*zcC*a", "fP:2:", "stdio.h", ALL_LANGUAGES)
-LIBBUILTIN(vsprintf, "ic*cC*a", "fP:1:", "stdio.h", ALL_LANGUAGES)
-LIBBUILTIN(scanf, "icC*R.", "fs:0:", "stdio.h", ALL_LANGUAGES)
-LIBBUILTIN(fscanf, "iP*RcC*R.", "fs:1:", "stdio.h", ALL_LANGUAGES)
-LIBBUILTIN(sscanf, "icC*RcC*R.", "fs:1:", "stdio.h", ALL_LANGUAGES)
-LIBBUILTIN(vscanf, "icC*Ra", "fS:0:", "stdio.h", ALL_LANGUAGES)
-LIBBUILTIN(vfscanf, "iP*RcC*Ra", "fS:1:", "stdio.h", ALL_LANGUAGES)
-LIBBUILTIN(vsscanf, "icC*RcC*Ra", "fS:1:", "stdio.h", ALL_LANGUAGES)
-LIBBUILTIN(fopen, "P*cC*cC*", "f", "stdio.h", ALL_LANGUAGES)
-LIBBUILTIN(fread, "zv*zzP*", "f", "stdio.h", ALL_LANGUAGES)
-LIBBUILTIN(fwrite, "zvC*zzP*", "f", "stdio.h", ALL_LANGUAGES)
+LIBBUILTIN(printf, "icC*.", "fp:0:", STDIO_H, ALL_LANGUAGES)
+LIBBUILTIN(fprintf, "iP*cC*.", "fp:1:", STDIO_H, ALL_LANGUAGES)
+LIBBUILTIN(snprintf, "ic*zcC*.", "fp:2:", STDIO_H, ALL_LANGUAGES)
+LIBBUILTIN(sprintf, "ic*cC*.", "fp:1:", STDIO_H, ALL_LANGUAGES)
+LIBBUILTIN(vprintf, "icC*a", "fP:0:", STDIO_H, ALL_LANGUAGES)
+LIBBUILTIN(vfprintf, "iP*cC*a", "fP:1:", STDIO_H, ALL_LANGUAGES)
+LIBBUILTIN(vsnprintf, "ic*zcC*a", "fP:2:", STDIO_H, ALL_LANGUAGES)
+LIBBUILTIN(vsprintf, "ic*cC*a", "fP:1:", STDIO_H, ALL_LANGUAGES)
+LIBBUILTIN(scanf, "icC*R.", "fs:0:", STDIO_H, ALL_LANGUAGES)
+LIBBUILTIN(fscanf, "iP*RcC*R.", "fs:1:", STDIO_H, ALL_LANGUAGES)
+LIBBUILTIN(sscanf, "icC*RcC*R.", "fs:1:", STDIO_H, ALL_LANGUAGES)
+LIBBUILTIN(vscanf, "icC*Ra", "fS:0:", STDIO_H, ALL_LANGUAGES)
+LIBBUILTIN(vfscanf, "iP*RcC*Ra", "fS:1:", STDIO_H, ALL_LANGUAGES)
+LIBBUILTIN(vsscanf, "icC*RcC*Ra", "fS:1:", STDIO_H, ALL_LANGUAGES)
+LIBBUILTIN(fopen, "P*cC*cC*", "f", STDIO_H, ALL_LANGUAGES)
+LIBBUILTIN(fread, "zv*zzP*", "f", STDIO_H, ALL_LANGUAGES)
+LIBBUILTIN(fwrite, "zvC*zzP*", "f", STDIO_H, ALL_LANGUAGES)
// C99 ctype.h
-LIBBUILTIN(isalnum, "ii", "fnU", "ctype.h", ALL_LANGUAGES)
-LIBBUILTIN(isalpha, "ii", "fnU", "ctype.h", ALL_LANGUAGES)
-LIBBUILTIN(isblank, "ii", "fnU", "ctype.h", ALL_LANGUAGES)
-LIBBUILTIN(iscntrl, "ii", "fnU", "ctype.h", ALL_LANGUAGES)
-LIBBUILTIN(isdigit, "ii", "fnU", "ctype.h", ALL_LANGUAGES)
-LIBBUILTIN(isgraph, "ii", "fnU", "ctype.h", ALL_LANGUAGES)
-LIBBUILTIN(islower, "ii", "fnU", "ctype.h", ALL_LANGUAGES)
-LIBBUILTIN(isprint, "ii", "fnU", "ctype.h", ALL_LANGUAGES)
-LIBBUILTIN(ispunct, "ii", "fnU", "ctype.h", ALL_LANGUAGES)
-LIBBUILTIN(isspace, "ii", "fnU", "ctype.h", ALL_LANGUAGES)
-LIBBUILTIN(isupper, "ii", "fnU", "ctype.h", ALL_LANGUAGES)
-LIBBUILTIN(isxdigit, "ii", "fnU", "ctype.h", ALL_LANGUAGES)
-LIBBUILTIN(tolower, "ii", "fnU", "ctype.h", ALL_LANGUAGES)
-LIBBUILTIN(toupper, "ii", "fnU", "ctype.h", ALL_LANGUAGES)
+LIBBUILTIN(isalnum, "ii", "fnU", CTYPE_H, ALL_LANGUAGES)
+LIBBUILTIN(isalpha, "ii", "fnU", CTYPE_H, ALL_LANGUAGES)
+LIBBUILTIN(isblank, "ii", "fnU", CTYPE_H, ALL_LANGUAGES)
+LIBBUILTIN(iscntrl, "ii", "fnU", CTYPE_H, ALL_LANGUAGES)
+LIBBUILTIN(isdigit, "ii", "fnU", CTYPE_H, ALL_LANGUAGES)
+LIBBUILTIN(isgraph, "ii", "fnU", CTYPE_H, ALL_LANGUAGES)
+LIBBUILTIN(islower, "ii", "fnU", CTYPE_H, ALL_LANGUAGES)
+LIBBUILTIN(isprint, "ii", "fnU", CTYPE_H, ALL_LANGUAGES)
+LIBBUILTIN(ispunct, "ii", "fnU", CTYPE_H, ALL_LANGUAGES)
+LIBBUILTIN(isspace, "ii", "fnU", CTYPE_H, ALL_LANGUAGES)
+LIBBUILTIN(isupper, "ii", "fnU", CTYPE_H, ALL_LANGUAGES)
+LIBBUILTIN(isxdigit, "ii", "fnU", CTYPE_H, ALL_LANGUAGES)
+LIBBUILTIN(tolower, "ii", "fnU", CTYPE_H, ALL_LANGUAGES)
+LIBBUILTIN(toupper, "ii", "fnU", CTYPE_H, ALL_LANGUAGES)
// C99 wchar.h
// FIXME: This list is incomplete. We should cover at least the functions that
// take format strings.
-LIBBUILTIN(wcschr, "w*wC*w", "f", "wchar.h", ALL_LANGUAGES)
-LIBBUILTIN(wcscmp, "iwC*wC*", "f", "wchar.h", ALL_LANGUAGES)
-LIBBUILTIN(wcslen, "zwC*", "f", "wchar.h", ALL_LANGUAGES)
-LIBBUILTIN(wcsncmp, "iwC*wC*z", "f", "wchar.h", ALL_LANGUAGES)
-LIBBUILTIN(wmemchr, "w*wC*wz", "f", "wchar.h", ALL_LANGUAGES)
-LIBBUILTIN(wmemcmp, "iwC*wC*z", "f", "wchar.h", ALL_LANGUAGES)
-LIBBUILTIN(wmemcpy, "w*w*wC*z", "f", "wchar.h", ALL_LANGUAGES)
-LIBBUILTIN(wmemmove,"w*w*wC*z", "f", "wchar.h", ALL_LANGUAGES)
+LIBBUILTIN(wcschr, "w*wC*w", "fE", WCHAR_H, ALL_LANGUAGES)
+LIBBUILTIN(wcscmp, "iwC*wC*", "fE", WCHAR_H, ALL_LANGUAGES)
+LIBBUILTIN(wcslen, "zwC*", "fE", WCHAR_H, ALL_LANGUAGES)
+LIBBUILTIN(wcsncmp, "iwC*wC*z", "fE", WCHAR_H, ALL_LANGUAGES)
+LIBBUILTIN(wmemchr, "w*wC*wz", "fE", WCHAR_H, ALL_LANGUAGES)
+LIBBUILTIN(wmemcmp, "iwC*wC*z", "fE", WCHAR_H, ALL_LANGUAGES)
+LIBBUILTIN(wmemcpy, "w*w*wC*z", "fE", WCHAR_H, ALL_LANGUAGES)
+LIBBUILTIN(wmemmove,"w*w*wC*z", "fE", WCHAR_H, ALL_LANGUAGES)
// C99
// In some systems setjmp is a macro that expands to _setjmp. We undefine
// it here to avoid having two identical LIBBUILTIN entries.
#undef setjmp
-LIBBUILTIN(setjmp, "iJ", "fjT", "setjmp.h", ALL_LANGUAGES)
-LIBBUILTIN(longjmp, "vJi", "frT", "setjmp.h", ALL_LANGUAGES)
+LIBBUILTIN(setjmp, "iJ", "fjT", SETJMP_H, ALL_LANGUAGES)
+LIBBUILTIN(longjmp, "vJi", "frT", SETJMP_H, ALL_LANGUAGES)
// Non-C library functions, active in GNU mode only.
// Functions with (returns_twice) attribute (marked as "j") are still active in
// all languages, because losing this attribute would result in miscompilation
// when these functions are used in non-GNU mode. PR16138.
-LIBBUILTIN(alloca, "v*z", "f", "stdlib.h", ALL_GNU_LANGUAGES)
+LIBBUILTIN(alloca, "v*z", "f", STDLIB_H, ALL_GNU_LANGUAGES)
// POSIX malloc.h
-LIBBUILTIN(memalign, "v*zz", "f", "malloc.h", ALL_GNU_LANGUAGES)
+LIBBUILTIN(memalign, "v*zz", "f", MALLOC_H, ALL_GNU_LANGUAGES)
// POSIX string.h
-LIBBUILTIN(memccpy, "v*v*vC*iz", "f", "string.h", ALL_GNU_LANGUAGES)
-LIBBUILTIN(mempcpy, "v*v*vC*z", "f", "string.h", ALL_GNU_LANGUAGES)
-LIBBUILTIN(stpcpy, "c*c*cC*", "f", "string.h", ALL_GNU_LANGUAGES)
-LIBBUILTIN(stpncpy, "c*c*cC*z", "f", "string.h", ALL_GNU_LANGUAGES)
-LIBBUILTIN(strdup, "c*cC*", "f", "string.h", ALL_GNU_LANGUAGES)
-LIBBUILTIN(strndup, "c*cC*z", "f", "string.h", ALL_GNU_LANGUAGES)
+LIBBUILTIN(memccpy, "v*v*vC*iz", "f", STRING_H, ALL_GNU_LANGUAGES)
+LIBBUILTIN(mempcpy, "v*v*vC*z", "f", STRING_H, ALL_GNU_LANGUAGES)
+LIBBUILTIN(stpcpy, "c*c*cC*", "f", STRING_H, ALL_GNU_LANGUAGES)
+LIBBUILTIN(stpncpy, "c*c*cC*z", "f", STRING_H, ALL_GNU_LANGUAGES)
+LIBBUILTIN(strdup, "c*cC*", "f", STRING_H, ALL_GNU_LANGUAGES)
+LIBBUILTIN(strndup, "c*cC*z", "f", STRING_H, ALL_GNU_LANGUAGES)
// POSIX strings.h
-LIBBUILTIN(index, "c*cC*i", "f", "strings.h", ALL_GNU_LANGUAGES)
-LIBBUILTIN(rindex, "c*cC*i", "f", "strings.h", ALL_GNU_LANGUAGES)
-LIBBUILTIN(bzero, "vv*z", "f", "strings.h", ALL_GNU_LANGUAGES)
-LIBBUILTIN(bcmp, "ivC*vC*z", "f", "strings.h", ALL_GNU_LANGUAGES)
+LIBBUILTIN(index, "c*cC*i", "f", STRINGS_H, ALL_GNU_LANGUAGES)
+LIBBUILTIN(rindex, "c*cC*i", "f", STRINGS_H, ALL_GNU_LANGUAGES)
+LIBBUILTIN(bzero, "vv*z", "f", STRINGS_H, ALL_GNU_LANGUAGES)
+LIBBUILTIN(bcopy, "vvC*v*z", "f", STRINGS_H, ALL_GNU_LANGUAGES)
+LIBBUILTIN(bcmp, "ivC*vC*z", "fE", STRINGS_H, ALL_GNU_LANGUAGES)
// In some systems str[n]casejmp is a macro that expands to _str[n]icmp.
// We undefine then here to avoid wrong name.
#undef strcasecmp
#undef strncasecmp
-LIBBUILTIN(strcasecmp, "icC*cC*", "f", "strings.h", ALL_GNU_LANGUAGES)
-LIBBUILTIN(strncasecmp, "icC*cC*z", "f", "strings.h", ALL_GNU_LANGUAGES)
+LIBBUILTIN(strcasecmp, "icC*cC*", "f", STRINGS_H, ALL_GNU_LANGUAGES)
+LIBBUILTIN(strncasecmp, "icC*cC*z", "f", STRINGS_H, ALL_GNU_LANGUAGES)
// POSIX unistd.h
-LIBBUILTIN(_exit, "vi", "fr", "unistd.h", ALL_GNU_LANGUAGES)
-LIBBUILTIN(vfork, "p", "fjT", "unistd.h", ALL_LANGUAGES)
+LIBBUILTIN(_exit, "vi", "fr", UNISTD_H, ALL_GNU_LANGUAGES)
+LIBBUILTIN(vfork, "p", "fjT", UNISTD_H, ALL_LANGUAGES)
// POSIX pthread.h
// FIXME: Should specify argument types.
-LIBBUILTIN(pthread_create, "", "fC<2,3>", "pthread.h", ALL_GNU_LANGUAGES)
+LIBBUILTIN(pthread_create, "", "fC<2,3>", PTHREAD_H, ALL_GNU_LANGUAGES)
// POSIX setjmp.h
// FIXME: MinGW _setjmp has an additional void* parameter.
-LIBBUILTIN(_setjmp, "iJ", "fjT", "setjmp.h", ALL_LANGUAGES)
-LIBBUILTIN(__sigsetjmp, "iSJi", "fjT", "setjmp.h", ALL_LANGUAGES)
-LIBBUILTIN(sigsetjmp, "iSJi", "fjT", "setjmp.h", ALL_LANGUAGES)
-LIBBUILTIN(savectx, "iJ", "fjT", "setjmp.h", ALL_LANGUAGES)
-LIBBUILTIN(getcontext, "iK*", "fjT", "setjmp.h", ALL_LANGUAGES)
-
-LIBBUILTIN(_longjmp, "vJi", "frT", "setjmp.h", ALL_GNU_LANGUAGES)
-LIBBUILTIN(siglongjmp, "vSJi", "frT", "setjmp.h", ALL_GNU_LANGUAGES)
+LIBBUILTIN(_setjmp, "iJ", "fjT", SETJMP_H, ALL_LANGUAGES)
+LIBBUILTIN(__sigsetjmp, "iSJi", "fjT", SETJMP_H, ALL_LANGUAGES)
+LIBBUILTIN(sigsetjmp, "iSJi", "fjT", SETJMP_H, ALL_LANGUAGES)
+LIBBUILTIN(savectx, "iJ", "fjT", SETJMP_H, ALL_LANGUAGES)
+LIBBUILTIN(getcontext, "iK*", "fjT", SETJMP_H, ALL_LANGUAGES)
+
+LIBBUILTIN(_longjmp, "vJi", "frT", SETJMP_H, ALL_GNU_LANGUAGES)
+LIBBUILTIN(siglongjmp, "vSJi", "frT", SETJMP_H, ALL_GNU_LANGUAGES)
// non-standard but very common
-LIBBUILTIN(strlcpy, "zc*cC*z", "f", "string.h", ALL_GNU_LANGUAGES)
-LIBBUILTIN(strlcat, "zc*cC*z", "f", "string.h", ALL_GNU_LANGUAGES)
+LIBBUILTIN(strlcpy, "zc*cC*z", "f", STRING_H, ALL_GNU_LANGUAGES)
+LIBBUILTIN(strlcat, "zc*cC*z", "f", STRING_H, ALL_GNU_LANGUAGES)
// id objc_msgSend(id, SEL, ...)
-LIBBUILTIN(objc_msgSend, "GGH.", "f", "objc/message.h", OBJC_LANG)
+LIBBUILTIN(objc_msgSend, "GGH.", "f", OBJC_MESSAGE_H, OBJC_LANG)
// long double objc_msgSend_fpret(id self, SEL op, ...)
-LIBBUILTIN(objc_msgSend_fpret, "LdGH.", "f", "objc/message.h", OBJC_LANG)
+LIBBUILTIN(objc_msgSend_fpret, "LdGH.", "f", OBJC_MESSAGE_H, OBJC_LANG)
// _Complex long double objc_msgSend_fp2ret(id self, SEL op, ...)
-LIBBUILTIN(objc_msgSend_fp2ret, "XLdGH.", "f", "objc/message.h", OBJC_LANG)
+LIBBUILTIN(objc_msgSend_fp2ret, "XLdGH.", "f", OBJC_MESSAGE_H, OBJC_LANG)
// void objc_msgSend_stret (id, SEL, ...)
-LIBBUILTIN(objc_msgSend_stret, "vGH.", "f", "objc/message.h", OBJC_LANG)
+LIBBUILTIN(objc_msgSend_stret, "vGH.", "f", OBJC_MESSAGE_H, OBJC_LANG)
// id objc_msgSendSuper(struct objc_super *super, SEL op, ...)
-LIBBUILTIN(objc_msgSendSuper, "GM*H.", "f", "objc/message.h", OBJC_LANG)
+LIBBUILTIN(objc_msgSendSuper, "GM*H.", "f", OBJC_MESSAGE_H, OBJC_LANG)
// void objc_msgSendSuper_stret(struct objc_super *super, SEL op, ...)
-LIBBUILTIN(objc_msgSendSuper_stret, "vM*H.", "f", "objc/message.h", OBJC_LANG)
+LIBBUILTIN(objc_msgSendSuper_stret, "vM*H.", "f", OBJC_MESSAGE_H, OBJC_LANG)
// id objc_getClass(const char *name)
-LIBBUILTIN(objc_getClass, "GcC*", "f", "objc/runtime.h", OBJC_LANG)
+LIBBUILTIN(objc_getClass, "GcC*", "f", OBJC_RUNTIME_H, OBJC_LANG)
// id objc_getMetaClass(const char *name)
-LIBBUILTIN(objc_getMetaClass, "GcC*", "f", "objc/runtime.h", OBJC_LANG)
+LIBBUILTIN(objc_getMetaClass, "GcC*", "f", OBJC_RUNTIME_H, OBJC_LANG)
// void objc_enumerationMutation(id)
-LIBBUILTIN(objc_enumerationMutation, "vG", "f", "objc/runtime.h", OBJC_LANG)
+LIBBUILTIN(objc_enumerationMutation, "vG", "f", OBJC_RUNTIME_H, OBJC_LANG)
// id objc_read_weak(id *location)
-LIBBUILTIN(objc_read_weak, "GG*", "f", "objc/objc-auto.h", OBJC_LANG)
+LIBBUILTIN(objc_read_weak, "GG*", "f", OBJC_OBJC_AUTO_H, OBJC_LANG)
// id objc_assign_weak(id value, id *location)
-LIBBUILTIN(objc_assign_weak, "GGG*", "f", "objc/objc-auto.h", OBJC_LANG)
+LIBBUILTIN(objc_assign_weak, "GGG*", "f", OBJC_OBJC_AUTO_H, OBJC_LANG)
// id objc_assign_ivar(id value, id dest, ptrdiff_t offset)
-LIBBUILTIN(objc_assign_ivar, "GGGY", "f", "objc/objc-auto.h", OBJC_LANG)
+LIBBUILTIN(objc_assign_ivar, "GGGY", "f", OBJC_OBJC_AUTO_H, OBJC_LANG)
// id objc_assign_global(id val, id *dest)
-LIBBUILTIN(objc_assign_global, "GGG*", "f", "objc/objc-auto.h", OBJC_LANG)
+LIBBUILTIN(objc_assign_global, "GGG*", "f", OBJC_OBJC_AUTO_H, OBJC_LANG)
// id objc_assign_strongCast(id val, id *dest
-LIBBUILTIN(objc_assign_strongCast, "GGG*", "f", "objc/objc-auto.h", OBJC_LANG)
+LIBBUILTIN(objc_assign_strongCast, "GGG*", "f", OBJC_OBJC_AUTO_H, OBJC_LANG)
// id objc_exception_extract(void *localExceptionData)
-LIBBUILTIN(objc_exception_extract, "Gv*", "f", "objc/objc-exception.h", OBJC_LANG)
+LIBBUILTIN(objc_exception_extract, "Gv*", "f", OBJC_OBJC_EXCEPTION_H, OBJC_LANG)
// void objc_exception_try_enter(void *localExceptionData)
-LIBBUILTIN(objc_exception_try_enter, "vv*", "f", "objc/objc-exception.h", OBJC_LANG)
+LIBBUILTIN(objc_exception_try_enter, "vv*", "f", OBJC_OBJC_EXCEPTION_H, OBJC_LANG)
// void objc_exception_try_exit(void *localExceptionData)
-LIBBUILTIN(objc_exception_try_exit, "vv*", "f", "objc/objc-exception.h", OBJC_LANG)
+LIBBUILTIN(objc_exception_try_exit, "vv*", "f", OBJC_OBJC_EXCEPTION_H, OBJC_LANG)
// int objc_exception_match(Class exceptionClass, id exception)
-LIBBUILTIN(objc_exception_match, "iGG", "f", "objc/objc-exception.h", OBJC_LANG)
+LIBBUILTIN(objc_exception_match, "iGG", "f", OBJC_OBJC_EXCEPTION_H, OBJC_LANG)
// void objc_exception_throw(id exception)
-LIBBUILTIN(objc_exception_throw, "vG", "f", "objc/objc-exception.h", OBJC_LANG)
+LIBBUILTIN(objc_exception_throw, "vG", "f", OBJC_OBJC_EXCEPTION_H, OBJC_LANG)
// int objc_sync_enter(id obj)
-LIBBUILTIN(objc_sync_enter, "iG", "f", "objc/objc-sync.h", OBJC_LANG)
+LIBBUILTIN(objc_sync_enter, "iG", "f", OBJC_OBJC_SYNC_H, OBJC_LANG)
// int objc_sync_exit(id obj)
-LIBBUILTIN(objc_sync_exit, "iG", "f", "objc/objc-sync.h", OBJC_LANG)
+LIBBUILTIN(objc_sync_exit, "iG", "f", OBJC_OBJC_SYNC_H, OBJC_LANG)
BUILTIN(__builtin_objc_memmove_collectable, "v*v*vC*z", "nF")
// void NSLog(NSString *fmt, ...)
-LIBBUILTIN(NSLog, "vG.", "fp:0:", "Foundation/NSObjCRuntime.h", OBJC_LANG)
+LIBBUILTIN(NSLog, "vG.", "fp:0:", FOUNDATION_NSOBJCRUNTIME_H, OBJC_LANG)
// void NSLogv(NSString *fmt, va_list args)
-LIBBUILTIN(NSLogv, "vGa", "fP:0:", "Foundation/NSObjCRuntime.h", OBJC_LANG)
+LIBBUILTIN(NSLogv, "vGa", "fP:0:", FOUNDATION_NSOBJCRUNTIME_H, OBJC_LANG)
// Builtin math library functions
-LIBBUILTIN(atan2, "ddd", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(atan2f, "fff", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(atan2l, "LdLdLd", "fne", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(atan2, "ddd", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(atan2f, "fff", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(atan2l, "LdLdLd", "fne", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(abs, "ii", "fnc", "stdlib.h", ALL_LANGUAGES)
-LIBBUILTIN(labs, "LiLi", "fnc", "stdlib.h", ALL_LANGUAGES)
-LIBBUILTIN(llabs, "LLiLLi", "fnc", "stdlib.h", ALL_LANGUAGES)
+LIBBUILTIN(abs, "ii", "fnc", STDLIB_H, ALL_LANGUAGES)
+LIBBUILTIN(labs, "LiLi", "fnc", STDLIB_H, ALL_LANGUAGES)
+LIBBUILTIN(llabs, "LLiLLi", "fnc", STDLIB_H, ALL_LANGUAGES)
-LIBBUILTIN(copysign, "ddd", "fnc", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(copysignf, "fff", "fnc", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(copysignl, "LdLdLd", "fnc", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(copysign, "ddd", "fnc", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(copysignf, "fff", "fnc", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(copysignl, "LdLdLd", "fnc", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(fabs, "dd", "fnc", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(fabsf, "ff", "fnc", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(fabsl, "LdLd", "fnc", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(fabs, "dd", "fnc", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(fabsf, "ff", "fnc", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(fabsl, "LdLd", "fnc", MATH_H, ALL_LANGUAGES)
// Some systems define finitef as alias of _finitef.
#if defined (finitef)
#undef finitef
#endif
-LIBBUILTIN(finite, "id", "fnc", "math.h", GNU_LANG)
-LIBBUILTIN(finitef, "if", "fnc", "math.h", GNU_LANG)
-LIBBUILTIN(finitel, "iLd", "fnc", "math.h", GNU_LANG)
+LIBBUILTIN(finite, "id", "fnc", MATH_H, GNU_LANG)
+LIBBUILTIN(finitef, "if", "fnc", MATH_H, GNU_LANG)
+LIBBUILTIN(finitel, "iLd", "fnc", MATH_H, GNU_LANG)
// glibc's math.h generates calls to __finite
-LIBBUILTIN(__finite, "id", "fnc", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(__finitef, "if", "fnc", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(__finitel, "iLd", "fnc", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(__finite, "id", "fnc", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(__finitef, "if", "fnc", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(__finitel, "iLd", "fnc", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(fmod, "ddd", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(fmodf, "fff", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(fmodl, "LdLdLd", "fne", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(fmod, "ddd", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(fmodf, "fff", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(fmodl, "LdLdLd", "fne", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(frexp, "ddi*", "fn", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(frexpf, "ffi*", "fn", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(frexpl, "LdLdi*", "fn", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(frexp, "ddi*", "fn", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(frexpf, "ffi*", "fn", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(frexpl, "LdLdi*", "fn", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(ldexp, "ddi", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(ldexpf, "ffi", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(ldexpl, "LdLdi", "fne", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(ldexp, "ddi", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(ldexpf, "ffi", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(ldexpl, "LdLdi", "fne", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(modf, "ddd*", "fn", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(modff, "fff*", "fn", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(modfl, "LdLdLd*", "fn", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(modf, "ddd*", "fn", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(modff, "fff*", "fn", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(modfl, "LdLdLd*", "fn", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(nan, "dcC*", "fUn", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(nanf, "fcC*", "fUn", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(nanl, "LdcC*", "fUn", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(nan, "dcC*", "fUn", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(nanf, "fcC*", "fUn", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(nanl, "LdcC*", "fUn", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(pow, "ddd", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(powf, "fff", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(powl, "LdLdLd", "fne", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(pow, "ddd", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(powf, "fff", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(powl, "LdLdLd", "fne", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(acos, "dd", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(acosf, "ff", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(acosl, "LdLd", "fne", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(acos, "dd", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(acosf, "ff", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(acosl, "LdLd", "fne", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(acosh, "dd", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(acoshf, "ff", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(acoshl, "LdLd", "fne", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(acosh, "dd", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(acoshf, "ff", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(acoshl, "LdLd", "fne", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(asin, "dd", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(asinf, "ff", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(asinl, "LdLd", "fne", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(asin, "dd", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(asinf, "ff", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(asinl, "LdLd", "fne", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(asinh, "dd", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(asinhf, "ff", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(asinhl, "LdLd", "fne", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(asinh, "dd", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(asinhf, "ff", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(asinhl, "LdLd", "fne", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(atan, "dd", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(atanf, "ff", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(atanl, "LdLd", "fne", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(atan, "dd", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(atanf, "ff", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(atanl, "LdLd", "fne", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(atanh, "dd", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(atanhf, "ff", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(atanhl, "LdLd", "fne", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(atanh, "dd", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(atanhf, "ff", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(atanhl, "LdLd", "fne", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(cbrt, "dd", "fnc", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(cbrtf, "ff", "fnc", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(cbrtl, "LdLd", "fnc", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(cbrt, "dd", "fnc", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(cbrtf, "ff", "fnc", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(cbrtl, "LdLd", "fnc", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(ceil, "dd", "fnc", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(ceilf, "ff", "fnc", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(ceill, "LdLd", "fnc", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(ceil, "dd", "fnc", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(ceilf, "ff", "fnc", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(ceill, "LdLd", "fnc", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(cos, "dd", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(cosf, "ff", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(cosl, "LdLd", "fne", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(cos, "dd", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(cosf, "ff", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(cosl, "LdLd", "fne", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(cosh, "dd", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(coshf, "ff", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(coshl, "LdLd", "fne", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(cosh, "dd", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(coshf, "ff", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(coshl, "LdLd", "fne", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(erf, "dd", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(erff, "ff", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(erfl, "LdLd", "fne", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(erf, "dd", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(erff, "ff", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(erfl, "LdLd", "fne", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(erfc, "dd", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(erfcf, "ff", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(erfcl, "LdLd", "fne", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(erfc, "dd", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(erfcf, "ff", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(erfcl, "LdLd", "fne", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(exp, "dd", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(expf, "ff", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(expl, "LdLd", "fne", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(exp, "dd", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(expf, "ff", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(expl, "LdLd", "fne", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(exp2, "dd", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(exp2f, "ff", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(exp2l, "LdLd", "fne", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(exp2, "dd", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(exp2f, "ff", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(exp2l, "LdLd", "fne", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(expm1, "dd", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(expm1f, "ff", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(expm1l, "LdLd", "fne", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(expm1, "dd", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(expm1f, "ff", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(expm1l, "LdLd", "fne", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(fdim, "ddd", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(fdimf, "fff", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(fdiml, "LdLdLd", "fne", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(fdim, "ddd", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(fdimf, "fff", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(fdiml, "LdLdLd", "fne", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(floor, "dd", "fnc", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(floorf, "ff", "fnc", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(floorl, "LdLd", "fnc", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(floor, "dd", "fnc", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(floorf, "ff", "fnc", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(floorl, "LdLd", "fnc", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(fma, "dddd", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(fmaf, "ffff", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(fmal, "LdLdLdLd", "fne", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(fma, "dddd", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(fmaf, "ffff", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(fmal, "LdLdLdLd", "fne", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(fmax, "ddd", "fnc", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(fmaxf, "fff", "fnc", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(fmaxl, "LdLdLd", "fnc", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(fmax, "ddd", "fnc", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(fmaxf, "fff", "fnc", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(fmaxl, "LdLdLd", "fnc", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(fmin, "ddd", "fnc", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(fminf, "fff", "fnc", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(fminl, "LdLdLd", "fnc", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(fmin, "ddd", "fnc", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(fminf, "fff", "fnc", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(fminl, "LdLdLd", "fnc", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(hypot, "ddd", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(hypotf, "fff", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(hypotl, "LdLdLd", "fne", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(hypot, "ddd", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(hypotf, "fff", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(hypotl, "LdLdLd", "fne", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(ilogb, "id", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(ilogbf, "if", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(ilogbl, "iLd", "fne", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(ilogb, "id", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(ilogbf, "if", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(ilogbl, "iLd", "fne", MATH_H, ALL_LANGUAGES)
// POSIX math.h declares a global, signgam, that lgamma writes to, so these
-// shouldn't have "e" or "c" attributes
-LIBBUILTIN(lgamma, "dd", "fn", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(lgammaf, "ff", "fn", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(lgammal, "LdLd", "fn", "math.h", ALL_LANGUAGES)
+// shouldn't have "e", "c" or "g" attributes
+LIBBUILTIN(lgamma, "dd", "fn", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(lgammaf, "ff", "fn", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(lgammal, "LdLd", "fn", MATH_H, ALL_LANGUAGES)
+
+LIBBUILTIN(llrint, "LLid", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(llrintf, "LLif", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(llrintl, "LLiLd", "fne", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(llrint, "LLid", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(llrintf, "LLif", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(llrintl, "LLiLd", "fne", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(llround, "LLid", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(llroundf, "LLif", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(llroundl, "LLiLd", "fne", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(llround, "LLid", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(llroundf, "LLif", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(llroundl, "LLiLd", "fne", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(log, "dd", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(logf, "ff", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(logl, "LdLd", "fne", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(log, "dd", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(logf, "ff", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(logl, "LdLd", "fne", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(log10, "dd", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(log10f, "ff", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(log10l, "LdLd", "fne", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(log10, "dd", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(log10f, "ff", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(log10l, "LdLd", "fne", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(log1p, "dd", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(log1pf, "ff", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(log1pl, "LdLd", "fne", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(log1p, "dd", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(log1pf, "ff", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(log1pl, "LdLd", "fne", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(log2, "dd", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(log2f, "ff", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(log2l, "LdLd", "fne", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(log2, "dd", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(log2f, "ff", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(log2l, "LdLd", "fne", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(logb, "dd", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(logbf, "ff", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(logbl, "LdLd", "fne", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(logb, "dd", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(logbf, "ff", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(logbl, "LdLd", "fne", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(lrint, "Lid", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(lrintf, "Lif", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(lrintl, "LiLd", "fne", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(lrint, "Lid", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(lrintf, "Lif", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(lrintl, "LiLd", "fne", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(lround, "Lid", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(lroundf, "Lif", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(lroundl, "LiLd", "fne", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(lround, "Lid", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(lroundf, "Lif", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(lroundl, "LiLd", "fne", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(nearbyint, "dd", "fnc", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(nearbyintf, "ff", "fnc", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(nearbyintl, "LdLd", "fnc", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(nearbyint, "dd", "fnc", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(nearbyintf, "ff", "fnc", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(nearbyintl, "LdLd", "fnc", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(nextafter, "ddd", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(nextafterf, "fff", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(nextafterl, "LdLdLd", "fne", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(nextafter, "ddd", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(nextafterf, "fff", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(nextafterl, "LdLdLd", "fne", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(nexttoward, "ddLd", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(nexttowardf, "ffLd", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(nexttowardl, "LdLdLd", "fne", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(nexttoward, "ddLd", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(nexttowardf, "ffLd", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(nexttowardl, "LdLdLd", "fne", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(remainder, "ddd", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(remainderf, "fff", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(remainderl, "LdLdLd", "fne", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(remainder, "ddd", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(remainderf, "fff", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(remainderl, "LdLdLd", "fne", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(remquo, "dddi*", "fn", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(remquof, "fffi*", "fn", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(remquol, "LdLdLdi*", "fn", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(remquo, "dddi*", "fn", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(remquof, "fffi*", "fn", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(remquol, "LdLdLdi*", "fn", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(rint, "dd", "fng", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(rintf, "ff", "fng", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(rintl, "LdLd", "fng", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(rint, "dd", "fnc", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(rintf, "ff", "fnc", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(rintl, "LdLd", "fnc", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(round, "dd", "fnc", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(roundf, "ff", "fnc", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(roundl, "LdLd", "fnc", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(round, "dd", "fnc", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(roundf, "ff", "fnc", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(roundl, "LdLd", "fnc", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(roundeven, "dd", "fnc", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(roundevenf, "ff", "fnc", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(roundevenl, "LdLd", "fnc", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(scalbln, "ddLi", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(scalblnf, "ffLi", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(scalblnl, "LdLdLi", "fne", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(scalbln, "ddLi", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(scalblnf, "ffLi", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(scalblnl, "LdLdLi", "fne", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(scalbn, "ddi", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(scalbnf, "ffi", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(scalbnl, "LdLdi", "fne", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(scalbn, "ddi", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(scalbnf, "ffi", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(scalbnl, "LdLdi", "fne", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(sin, "dd", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(sinf, "ff", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(sinl, "LdLd", "fne", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(sin, "dd", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(sinf, "ff", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(sinl, "LdLd", "fne", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(sinh, "dd", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(sinhf, "ff", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(sinhl, "LdLd", "fne", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(sinh, "dd", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(sinhf, "ff", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(sinhl, "LdLd", "fne", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(sqrt, "dd", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(sqrtf, "ff", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(sqrtl, "LdLd", "fne", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(sqrt, "dd", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(sqrtf, "ff", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(sqrtl, "LdLd", "fne", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(tan, "dd", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(tanf, "ff", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(tanl, "LdLd", "fne", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(tan, "dd", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(tanf, "ff", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(tanl, "LdLd", "fne", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(tanh, "dd", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(tanhf, "ff", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(tanhl, "LdLd", "fne", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(tanh, "dd", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(tanhf, "ff", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(tanhl, "LdLd", "fne", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(tgamma, "dd", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(tgammaf, "ff", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(tgammal, "LdLd", "fne", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(tgamma, "dd", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(tgammaf, "ff", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(tgammal, "LdLd", "fne", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(trunc, "dd", "fnc", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(truncf, "ff", "fnc", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(truncl, "LdLd", "fnc", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(trunc, "dd", "fnc", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(truncf, "ff", "fnc", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(truncl, "LdLd", "fnc", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(cabs, "dXd", "fne", "complex.h", ALL_LANGUAGES)
-LIBBUILTIN(cabsf, "fXf", "fne", "complex.h", ALL_LANGUAGES)
-LIBBUILTIN(cabsl, "LdXLd", "fne", "complex.h", ALL_LANGUAGES)
+LIBBUILTIN(cabs, "dXd", "fne", COMPLEX_H, ALL_LANGUAGES)
+LIBBUILTIN(cabsf, "fXf", "fne", COMPLEX_H, ALL_LANGUAGES)
+LIBBUILTIN(cabsl, "LdXLd", "fne", COMPLEX_H, ALL_LANGUAGES)
-LIBBUILTIN(cacos, "XdXd", "fne", "complex.h", ALL_LANGUAGES)
-LIBBUILTIN(cacosf, "XfXf", "fne", "complex.h", ALL_LANGUAGES)
-LIBBUILTIN(cacosl, "XLdXLd", "fne", "complex.h", ALL_LANGUAGES)
+LIBBUILTIN(cacos, "XdXd", "fne", COMPLEX_H, ALL_LANGUAGES)
+LIBBUILTIN(cacosf, "XfXf", "fne", COMPLEX_H, ALL_LANGUAGES)
+LIBBUILTIN(cacosl, "XLdXLd", "fne", COMPLEX_H, ALL_LANGUAGES)
-LIBBUILTIN(cacosh, "XdXd", "fne", "complex.h", ALL_LANGUAGES)
-LIBBUILTIN(cacoshf, "XfXf", "fne", "complex.h", ALL_LANGUAGES)
-LIBBUILTIN(cacoshl, "XLdXLd", "fne", "complex.h", ALL_LANGUAGES)
+LIBBUILTIN(cacosh, "XdXd", "fne", COMPLEX_H, ALL_LANGUAGES)
+LIBBUILTIN(cacoshf, "XfXf", "fne", COMPLEX_H, ALL_LANGUAGES)
+LIBBUILTIN(cacoshl, "XLdXLd", "fne", COMPLEX_H, ALL_LANGUAGES)
-LIBBUILTIN(carg, "dXd", "fne", "complex.h", ALL_LANGUAGES)
-LIBBUILTIN(cargf, "fXf", "fne", "complex.h", ALL_LANGUAGES)
-LIBBUILTIN(cargl, "LdXLd", "fne", "complex.h", ALL_LANGUAGES)
+LIBBUILTIN(carg, "dXd", "fne", COMPLEX_H, ALL_LANGUAGES)
+LIBBUILTIN(cargf, "fXf", "fne", COMPLEX_H, ALL_LANGUAGES)
+LIBBUILTIN(cargl, "LdXLd", "fne", COMPLEX_H, ALL_LANGUAGES)
-LIBBUILTIN(casin, "XdXd", "fne", "complex.h", ALL_LANGUAGES)
-LIBBUILTIN(casinf, "XfXf", "fne", "complex.h", ALL_LANGUAGES)
-LIBBUILTIN(casinl, "XLdXLd", "fne", "complex.h", ALL_LANGUAGES)
+LIBBUILTIN(casin, "XdXd", "fne", COMPLEX_H, ALL_LANGUAGES)
+LIBBUILTIN(casinf, "XfXf", "fne", COMPLEX_H, ALL_LANGUAGES)
+LIBBUILTIN(casinl, "XLdXLd", "fne", COMPLEX_H, ALL_LANGUAGES)
-LIBBUILTIN(casinh, "XdXd", "fne", "complex.h", ALL_LANGUAGES)
-LIBBUILTIN(casinhf, "XfXf", "fne", "complex.h", ALL_LANGUAGES)
-LIBBUILTIN(casinhl, "XLdXLd", "fne", "complex.h", ALL_LANGUAGES)
+LIBBUILTIN(casinh, "XdXd", "fne", COMPLEX_H, ALL_LANGUAGES)
+LIBBUILTIN(casinhf, "XfXf", "fne", COMPLEX_H, ALL_LANGUAGES)
+LIBBUILTIN(casinhl, "XLdXLd", "fne", COMPLEX_H, ALL_LANGUAGES)
-LIBBUILTIN(catan, "XdXd", "fne", "complex.h", ALL_LANGUAGES)
-LIBBUILTIN(catanf, "XfXf", "fne", "complex.h", ALL_LANGUAGES)
-LIBBUILTIN(catanl, "XLdXLd", "fne", "complex.h", ALL_LANGUAGES)
+LIBBUILTIN(catan, "XdXd", "fne", COMPLEX_H, ALL_LANGUAGES)
+LIBBUILTIN(catanf, "XfXf", "fne", COMPLEX_H, ALL_LANGUAGES)
+LIBBUILTIN(catanl, "XLdXLd", "fne", COMPLEX_H, ALL_LANGUAGES)
-LIBBUILTIN(catanh, "XdXd", "fne", "complex.h", ALL_LANGUAGES)
-LIBBUILTIN(catanhf, "XfXf", "fne", "complex.h", ALL_LANGUAGES)
-LIBBUILTIN(catanhl, "XLdXLd", "fne", "complex.h", ALL_LANGUAGES)
+LIBBUILTIN(catanh, "XdXd", "fne", COMPLEX_H, ALL_LANGUAGES)
+LIBBUILTIN(catanhf, "XfXf", "fne", COMPLEX_H, ALL_LANGUAGES)
+LIBBUILTIN(catanhl, "XLdXLd", "fne", COMPLEX_H, ALL_LANGUAGES)
-LIBBUILTIN(ccos, "XdXd", "fne", "complex.h", ALL_LANGUAGES)
-LIBBUILTIN(ccosf, "XfXf", "fne", "complex.h", ALL_LANGUAGES)
-LIBBUILTIN(ccosl, "XLdXLd", "fne", "complex.h", ALL_LANGUAGES)
+LIBBUILTIN(ccos, "XdXd", "fne", COMPLEX_H, ALL_LANGUAGES)
+LIBBUILTIN(ccosf, "XfXf", "fne", COMPLEX_H, ALL_LANGUAGES)
+LIBBUILTIN(ccosl, "XLdXLd", "fne", COMPLEX_H, ALL_LANGUAGES)
-LIBBUILTIN(ccosh, "XdXd", "fne", "complex.h", ALL_LANGUAGES)
-LIBBUILTIN(ccoshf, "XfXf", "fne", "complex.h", ALL_LANGUAGES)
-LIBBUILTIN(ccoshl, "XLdXLd", "fne", "complex.h", ALL_LANGUAGES)
+LIBBUILTIN(ccosh, "XdXd", "fne", COMPLEX_H, ALL_LANGUAGES)
+LIBBUILTIN(ccoshf, "XfXf", "fne", COMPLEX_H, ALL_LANGUAGES)
+LIBBUILTIN(ccoshl, "XLdXLd", "fne", COMPLEX_H, ALL_LANGUAGES)
-LIBBUILTIN(cexp, "XdXd", "fne", "complex.h", ALL_LANGUAGES)
-LIBBUILTIN(cexpf, "XfXf", "fne", "complex.h", ALL_LANGUAGES)
-LIBBUILTIN(cexpl, "XLdXLd", "fne", "complex.h", ALL_LANGUAGES)
+LIBBUILTIN(cexp, "XdXd", "fne", COMPLEX_H, ALL_LANGUAGES)
+LIBBUILTIN(cexpf, "XfXf", "fne", COMPLEX_H, ALL_LANGUAGES)
+LIBBUILTIN(cexpl, "XLdXLd", "fne", COMPLEX_H, ALL_LANGUAGES)
-LIBBUILTIN(cimag, "dXd", "fnc", "complex.h", ALL_LANGUAGES)
-LIBBUILTIN(cimagf, "fXf", "fnc", "complex.h", ALL_LANGUAGES)
-LIBBUILTIN(cimagl, "LdXLd", "fnc", "complex.h", ALL_LANGUAGES)
+LIBBUILTIN(cimag, "dXd", "fnc", COMPLEX_H, ALL_LANGUAGES)
+LIBBUILTIN(cimagf, "fXf", "fnc", COMPLEX_H, ALL_LANGUAGES)
+LIBBUILTIN(cimagl, "LdXLd", "fnc", COMPLEX_H, ALL_LANGUAGES)
-LIBBUILTIN(conj, "XdXd", "fnc", "complex.h", ALL_LANGUAGES)
-LIBBUILTIN(conjf, "XfXf", "fnc", "complex.h", ALL_LANGUAGES)
-LIBBUILTIN(conjl, "XLdXLd", "fnc", "complex.h", ALL_LANGUAGES)
+LIBBUILTIN(conj, "XdXd", "fnc", COMPLEX_H, ALL_LANGUAGES)
+LIBBUILTIN(conjf, "XfXf", "fnc", COMPLEX_H, ALL_LANGUAGES)
+LIBBUILTIN(conjl, "XLdXLd", "fnc", COMPLEX_H, ALL_LANGUAGES)
-LIBBUILTIN(clog, "XdXd", "fne", "complex.h", ALL_LANGUAGES)
-LIBBUILTIN(clogf, "XfXf", "fne", "complex.h", ALL_LANGUAGES)
-LIBBUILTIN(clogl, "XLdXLd", "fne", "complex.h", ALL_LANGUAGES)
+LIBBUILTIN(clog, "XdXd", "fne", COMPLEX_H, ALL_LANGUAGES)
+LIBBUILTIN(clogf, "XfXf", "fne", COMPLEX_H, ALL_LANGUAGES)
+LIBBUILTIN(clogl, "XLdXLd", "fne", COMPLEX_H, ALL_LANGUAGES)
-LIBBUILTIN(cproj, "XdXd", "fnc", "complex.h", ALL_LANGUAGES)
-LIBBUILTIN(cprojf, "XfXf", "fnc", "complex.h", ALL_LANGUAGES)
-LIBBUILTIN(cprojl, "XLdXLd", "fnc", "complex.h", ALL_LANGUAGES)
+LIBBUILTIN(cproj, "XdXd", "fnc", COMPLEX_H, ALL_LANGUAGES)
+LIBBUILTIN(cprojf, "XfXf", "fnc", COMPLEX_H, ALL_LANGUAGES)
+LIBBUILTIN(cprojl, "XLdXLd", "fnc", COMPLEX_H, ALL_LANGUAGES)
-LIBBUILTIN(cpow, "XdXdXd", "fne", "complex.h", ALL_LANGUAGES)
-LIBBUILTIN(cpowf, "XfXfXf", "fne", "complex.h", ALL_LANGUAGES)
-LIBBUILTIN(cpowl, "XLdXLdXLd", "fne", "complex.h", ALL_LANGUAGES)
+LIBBUILTIN(cpow, "XdXdXd", "fne", COMPLEX_H, ALL_LANGUAGES)
+LIBBUILTIN(cpowf, "XfXfXf", "fne", COMPLEX_H, ALL_LANGUAGES)
+LIBBUILTIN(cpowl, "XLdXLdXLd", "fne", COMPLEX_H, ALL_LANGUAGES)
-LIBBUILTIN(creal, "dXd", "fnc", "complex.h", ALL_LANGUAGES)
-LIBBUILTIN(crealf, "fXf", "fnc", "complex.h", ALL_LANGUAGES)
-LIBBUILTIN(creall, "LdXLd", "fnc", "complex.h", ALL_LANGUAGES)
+LIBBUILTIN(creal, "dXd", "fnc", COMPLEX_H, ALL_LANGUAGES)
+LIBBUILTIN(crealf, "fXf", "fnc", COMPLEX_H, ALL_LANGUAGES)
+LIBBUILTIN(creall, "LdXLd", "fnc", COMPLEX_H, ALL_LANGUAGES)
-LIBBUILTIN(csin, "XdXd", "fne", "complex.h", ALL_LANGUAGES)
-LIBBUILTIN(csinf, "XfXf", "fne", "complex.h", ALL_LANGUAGES)
-LIBBUILTIN(csinl, "XLdXLd", "fne", "complex.h", ALL_LANGUAGES)
+LIBBUILTIN(csin, "XdXd", "fne", COMPLEX_H, ALL_LANGUAGES)
+LIBBUILTIN(csinf, "XfXf", "fne", COMPLEX_H, ALL_LANGUAGES)
+LIBBUILTIN(csinl, "XLdXLd", "fne", COMPLEX_H, ALL_LANGUAGES)
-LIBBUILTIN(csinh, "XdXd", "fne", "complex.h", ALL_LANGUAGES)
-LIBBUILTIN(csinhf, "XfXf", "fne", "complex.h", ALL_LANGUAGES)
-LIBBUILTIN(csinhl, "XLdXLd", "fne", "complex.h", ALL_LANGUAGES)
+LIBBUILTIN(csinh, "XdXd", "fne", COMPLEX_H, ALL_LANGUAGES)
+LIBBUILTIN(csinhf, "XfXf", "fne", COMPLEX_H, ALL_LANGUAGES)
+LIBBUILTIN(csinhl, "XLdXLd", "fne", COMPLEX_H, ALL_LANGUAGES)
-LIBBUILTIN(csqrt, "XdXd", "fne", "complex.h", ALL_LANGUAGES)
-LIBBUILTIN(csqrtf, "XfXf", "fne", "complex.h", ALL_LANGUAGES)
-LIBBUILTIN(csqrtl, "XLdXLd", "fne", "complex.h", ALL_LANGUAGES)
+LIBBUILTIN(csqrt, "XdXd", "fne", COMPLEX_H, ALL_LANGUAGES)
+LIBBUILTIN(csqrtf, "XfXf", "fne", COMPLEX_H, ALL_LANGUAGES)
+LIBBUILTIN(csqrtl, "XLdXLd", "fne", COMPLEX_H, ALL_LANGUAGES)
-LIBBUILTIN(ctan, "XdXd", "fne", "complex.h", ALL_LANGUAGES)
-LIBBUILTIN(ctanf, "XfXf", "fne", "complex.h", ALL_LANGUAGES)
-LIBBUILTIN(ctanl, "XLdXLd", "fne", "complex.h", ALL_LANGUAGES)
+LIBBUILTIN(ctan, "XdXd", "fne", COMPLEX_H, ALL_LANGUAGES)
+LIBBUILTIN(ctanf, "XfXf", "fne", COMPLEX_H, ALL_LANGUAGES)
+LIBBUILTIN(ctanl, "XLdXLd", "fne", COMPLEX_H, ALL_LANGUAGES)
-LIBBUILTIN(ctanh, "XdXd", "fne", "complex.h", ALL_LANGUAGES)
-LIBBUILTIN(ctanhf, "XfXf", "fne", "complex.h", ALL_LANGUAGES)
-LIBBUILTIN(ctanhl, "XLdXLd", "fne", "complex.h", ALL_LANGUAGES)
+LIBBUILTIN(ctanh, "XdXd", "fne", COMPLEX_H, ALL_LANGUAGES)
+LIBBUILTIN(ctanhf, "XfXf", "fne", COMPLEX_H, ALL_LANGUAGES)
+LIBBUILTIN(ctanhl, "XLdXLd", "fne", COMPLEX_H, ALL_LANGUAGES)
// __sinpi and friends are OS X specific library functions, but otherwise much
// like the standard (non-complex) sin (etc).
-LIBBUILTIN(__sinpi, "dd", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(__sinpif, "ff", "fne", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(__sinpi, "dd", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(__sinpif, "ff", "fne", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(__cospi, "dd", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(__cospif, "ff", "fne", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(__cospi, "dd", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(__cospif, "ff", "fne", MATH_H, ALL_LANGUAGES)
-LIBBUILTIN(__tanpi, "dd", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(__tanpif, "ff", "fne", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(__tanpi, "dd", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(__tanpif, "ff", "fne", MATH_H, ALL_LANGUAGES)
// Similarly, __exp10 is OS X only
-LIBBUILTIN(__exp10, "dd", "fne", "math.h", ALL_LANGUAGES)
-LIBBUILTIN(__exp10f, "ff", "fne", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(__exp10, "dd", "fne", MATH_H, ALL_LANGUAGES)
+LIBBUILTIN(__exp10f, "ff", "fne", MATH_H, ALL_LANGUAGES)
// Blocks runtime Builtin math library functions
-LIBBUILTIN(_Block_object_assign, "vv*vC*iC", "f", "Blocks.h", ALL_LANGUAGES)
-LIBBUILTIN(_Block_object_dispose, "vvC*iC", "f", "Blocks.h", ALL_LANGUAGES)
+LIBBUILTIN(_Block_object_assign, "vv*vC*iC", "f", BLOCKS_H, ALL_LANGUAGES)
+LIBBUILTIN(_Block_object_dispose, "vvC*iC", "f", BLOCKS_H, ALL_LANGUAGES)
// FIXME: Also declare NSConcreteGlobalBlock and NSConcreteStackBlock.
+// C++ standard library builtins in namespace 'std'.
+LIBBUILTIN(addressof, "v*v&", "zfncThE", MEMORY, CXX_LANG)
+// Synonym for addressof used internally by libstdc++.
+LANGBUILTIN(__addressof, "v*v&", "zfncTE", CXX_LANG)
+LIBBUILTIN(as_const, "v&v&", "zfncThE", UTILITY, CXX_LANG)
+LIBBUILTIN(forward, "v&v&", "zfncThE", UTILITY, CXX_LANG)
+LIBBUILTIN(forward_like, "v&v&", "zfncThE", UTILITY, CXX_LANG)
+LIBBUILTIN(move, "v&v&", "zfncThE", UTILITY, CXX_LANG)
+LIBBUILTIN(move_if_noexcept, "v&v&", "zfncThE", UTILITY, CXX_LANG)
+
// Annotation function
BUILTIN(__builtin_annotation, "v.", "tn")
// Invariants
-BUILTIN(__builtin_assume, "vb", "n")
+BUILTIN(__builtin_assume, "vb", "nE")
+BUILTIN(__builtin_assume_separate_storage, "vvCD*vCD*", "nE")
// Multiprecision Arithmetic Builtins.
BUILTIN(__builtin_addcb, "UcUcCUcCUcCUc*", "n")
@@ -1532,40 +1662,41 @@ BUILTIN(__builtin_subcl, "ULiULiCULiCULiCULi*", "n")
BUILTIN(__builtin_subcll, "ULLiULLiCULLiCULLiCULLi*", "n")
// Checked Arithmetic Builtins for Security.
-BUILTIN(__builtin_add_overflow, "b.", "nt")
-BUILTIN(__builtin_sub_overflow, "b.", "nt")
-BUILTIN(__builtin_mul_overflow, "b.", "nt")
-BUILTIN(__builtin_uadd_overflow, "bUiCUiCUi*", "n")
-BUILTIN(__builtin_uaddl_overflow, "bULiCULiCULi*", "n")
-BUILTIN(__builtin_uaddll_overflow, "bULLiCULLiCULLi*", "n")
-BUILTIN(__builtin_usub_overflow, "bUiCUiCUi*", "n")
-BUILTIN(__builtin_usubl_overflow, "bULiCULiCULi*", "n")
-BUILTIN(__builtin_usubll_overflow, "bULLiCULLiCULLi*", "n")
-BUILTIN(__builtin_umul_overflow, "bUiCUiCUi*", "n")
-BUILTIN(__builtin_umull_overflow, "bULiCULiCULi*", "n")
-BUILTIN(__builtin_umulll_overflow, "bULLiCULLiCULLi*", "n")
-BUILTIN(__builtin_sadd_overflow, "bSiCSiCSi*", "n")
-BUILTIN(__builtin_saddl_overflow, "bSLiCSLiCSLi*", "n")
-BUILTIN(__builtin_saddll_overflow, "bSLLiCSLLiCSLLi*", "n")
-BUILTIN(__builtin_ssub_overflow, "bSiCSiCSi*", "n")
-BUILTIN(__builtin_ssubl_overflow, "bSLiCSLiCSLi*", "n")
-BUILTIN(__builtin_ssubll_overflow, "bSLLiCSLLiCSLLi*", "n")
-BUILTIN(__builtin_smul_overflow, "bSiCSiCSi*", "n")
-BUILTIN(__builtin_smull_overflow, "bSLiCSLiCSLi*", "n")
-BUILTIN(__builtin_smulll_overflow, "bSLLiCSLLiCSLLi*", "n")
+BUILTIN(__builtin_add_overflow, "b.", "ntE")
+BUILTIN(__builtin_sub_overflow, "b.", "ntE")
+BUILTIN(__builtin_mul_overflow, "b.", "ntE")
+BUILTIN(__builtin_uadd_overflow, "bUiCUiCUi*", "nE")
+BUILTIN(__builtin_uaddl_overflow, "bULiCULiCULi*", "nE")
+BUILTIN(__builtin_uaddll_overflow, "bULLiCULLiCULLi*", "nE")
+BUILTIN(__builtin_usub_overflow, "bUiCUiCUi*", "nE")
+BUILTIN(__builtin_usubl_overflow, "bULiCULiCULi*", "nE")
+BUILTIN(__builtin_usubll_overflow, "bULLiCULLiCULLi*", "nE")
+BUILTIN(__builtin_umul_overflow, "bUiCUiCUi*", "nE")
+BUILTIN(__builtin_umull_overflow, "bULiCULiCULi*", "nE")
+BUILTIN(__builtin_umulll_overflow, "bULLiCULLiCULLi*", "nE")
+BUILTIN(__builtin_sadd_overflow, "bSiCSiCSi*", "nE")
+BUILTIN(__builtin_saddl_overflow, "bSLiCSLiCSLi*", "nE")
+BUILTIN(__builtin_saddll_overflow, "bSLLiCSLLiCSLLi*", "nE")
+BUILTIN(__builtin_ssub_overflow, "bSiCSiCSi*", "nE")
+BUILTIN(__builtin_ssubl_overflow, "bSLiCSLiCSLi*", "nE")
+BUILTIN(__builtin_ssubll_overflow, "bSLLiCSLLiCSLLi*", "nE")
+BUILTIN(__builtin_smul_overflow, "bSiCSiCSi*", "nE")
+BUILTIN(__builtin_smull_overflow, "bSLiCSLiCSLi*", "nE")
+BUILTIN(__builtin_smulll_overflow, "bSLLiCSLLiCSLLi*", "nE")
// Clang builtins (not available in GCC).
-BUILTIN(__builtin_addressof, "v*v&", "nct")
-BUILTIN(__builtin_operator_new, "v*z", "tc")
-BUILTIN(__builtin_operator_delete, "vv*", "tn")
-BUILTIN(__builtin_char_memchr, "c*cC*iz", "n")
-BUILTIN(__builtin_dump_struct, "ivC*v*", "tn")
+BUILTIN(__builtin_addressof, "v*v&", "nctE")
+BUILTIN(__builtin_function_start, "v*v&", "nctE")
+BUILTIN(__builtin_operator_new, "v*z", "tcE")
+BUILTIN(__builtin_operator_delete, "vv*", "tnE")
+BUILTIN(__builtin_char_memchr, "c*cC*iz", "nE")
+BUILTIN(__builtin_dump_struct, "v.", "t")
BUILTIN(__builtin_preserve_access_index, "v.", "t")
// Alignment builtins (uses custom parsing to support pointers and integers)
-BUILTIN(__builtin_is_aligned, "bvC*z", "nct")
-BUILTIN(__builtin_align_up, "v*vC*z", "nct")
-BUILTIN(__builtin_align_down, "v*vC*z", "nct")
+BUILTIN(__builtin_is_aligned, "bvC*z", "nctE")
+BUILTIN(__builtin_align_up, "v*vC*z", "nctE")
+BUILTIN(__builtin_align_down, "v*vC*z", "nctE")
// Safestack builtins
BUILTIN(__builtin___get_unsafe_stack_start, "v*", "Fn")
@@ -1584,6 +1715,7 @@ LANGBUILTIN(__builtin_coro_done, "bv*", "n", COR_LANG)
LANGBUILTIN(__builtin_coro_promise, "v*v*IiIb", "n", COR_LANG)
LANGBUILTIN(__builtin_coro_size, "z", "n", COR_LANG)
+LANGBUILTIN(__builtin_coro_align, "z", "n", COR_LANG)
LANGBUILTIN(__builtin_coro_frame, "v*", "n", COR_LANG)
LANGBUILTIN(__builtin_coro_noop, "v*", "n", COR_LANG)
LANGBUILTIN(__builtin_coro_free, "v*v*", "n", COR_LANG)
@@ -1593,62 +1725,65 @@ LANGBUILTIN(__builtin_coro_alloc, "b", "n", COR_LANG)
LANGBUILTIN(__builtin_coro_begin, "v*v*", "n", COR_LANG)
LANGBUILTIN(__builtin_coro_end, "bv*Ib", "n", COR_LANG)
LANGBUILTIN(__builtin_coro_suspend, "cIb", "n", COR_LANG)
-LANGBUILTIN(__builtin_coro_param, "bv*v*", "n", COR_LANG)
// OpenCL v2.0 s6.13.16, s9.17.3.5 - Pipe functions.
// We need the generic prototype, since the packet type could be anything.
-LANGBUILTIN(read_pipe, "i.", "tn", OCLC20_LANG)
-LANGBUILTIN(write_pipe, "i.", "tn", OCLC20_LANG)
+LANGBUILTIN(read_pipe, "i.", "tn", OCL_PIPE)
+LANGBUILTIN(write_pipe, "i.", "tn", OCL_PIPE)
-LANGBUILTIN(reserve_read_pipe, "i.", "tn", OCLC20_LANG)
-LANGBUILTIN(reserve_write_pipe, "i.", "tn", OCLC20_LANG)
+LANGBUILTIN(reserve_read_pipe, "i.", "tn", OCL_PIPE)
+LANGBUILTIN(reserve_write_pipe, "i.", "tn", OCL_PIPE)
-LANGBUILTIN(commit_write_pipe, "v.", "tn", OCLC20_LANG)
-LANGBUILTIN(commit_read_pipe, "v.", "tn", OCLC20_LANG)
+LANGBUILTIN(commit_write_pipe, "v.", "tn", OCL_PIPE)
+LANGBUILTIN(commit_read_pipe, "v.", "tn", OCL_PIPE)
-LANGBUILTIN(sub_group_reserve_read_pipe, "i.", "tn", OCLC20_LANG)
-LANGBUILTIN(sub_group_reserve_write_pipe, "i.", "tn", OCLC20_LANG)
+LANGBUILTIN(sub_group_reserve_read_pipe, "i.", "tn", OCL_PIPE)
+LANGBUILTIN(sub_group_reserve_write_pipe, "i.", "tn", OCL_PIPE)
-LANGBUILTIN(sub_group_commit_read_pipe, "v.", "tn", OCLC20_LANG)
-LANGBUILTIN(sub_group_commit_write_pipe, "v.", "tn", OCLC20_LANG)
+LANGBUILTIN(sub_group_commit_read_pipe, "v.", "tn", OCL_PIPE)
+LANGBUILTIN(sub_group_commit_write_pipe, "v.", "tn", OCL_PIPE)
-LANGBUILTIN(work_group_reserve_read_pipe, "i.", "tn", OCLC20_LANG)
-LANGBUILTIN(work_group_reserve_write_pipe, "i.", "tn", OCLC20_LANG)
+LANGBUILTIN(work_group_reserve_read_pipe, "i.", "tn", OCL_PIPE)
+LANGBUILTIN(work_group_reserve_write_pipe, "i.", "tn", OCL_PIPE)
-LANGBUILTIN(work_group_commit_read_pipe, "v.", "tn", OCLC20_LANG)
-LANGBUILTIN(work_group_commit_write_pipe, "v.", "tn", OCLC20_LANG)
+LANGBUILTIN(work_group_commit_read_pipe, "v.", "tn", OCL_PIPE)
+LANGBUILTIN(work_group_commit_write_pipe, "v.", "tn", OCL_PIPE)
-LANGBUILTIN(get_pipe_num_packets, "Ui.", "tn", OCLC20_LANG)
-LANGBUILTIN(get_pipe_max_packets, "Ui.", "tn", OCLC20_LANG)
+LANGBUILTIN(get_pipe_num_packets, "Ui.", "tn", OCL_PIPE)
+LANGBUILTIN(get_pipe_max_packets, "Ui.", "tn", OCL_PIPE)
// OpenCL v2.0 s6.13.17 - Enqueue kernel functions.
// Custom builtin check allows to perform special check of passed block arguments.
-LANGBUILTIN(enqueue_kernel, "i.", "tn", OCLC20_LANG)
-LANGBUILTIN(get_kernel_work_group_size, "Ui.", "tn", OCLC20_LANG)
-LANGBUILTIN(get_kernel_preferred_work_group_size_multiple, "Ui.", "tn", OCLC20_LANG)
-LANGBUILTIN(get_kernel_max_sub_group_size_for_ndrange, "Ui.", "tn", OCLC20_LANG)
-LANGBUILTIN(get_kernel_sub_group_count_for_ndrange, "Ui.", "tn", OCLC20_LANG)
+LANGBUILTIN(enqueue_kernel, "i.", "tn", OCL_DSE)
+LANGBUILTIN(get_kernel_work_group_size, "Ui.", "tn", OCL_DSE)
+LANGBUILTIN(get_kernel_preferred_work_group_size_multiple, "Ui.", "tn", OCL_DSE)
+LANGBUILTIN(get_kernel_max_sub_group_size_for_ndrange, "Ui.", "tn", OCL_DSE)
+LANGBUILTIN(get_kernel_sub_group_count_for_ndrange, "Ui.", "tn", OCL_DSE)
// OpenCL v2.0 s6.13.9 - Address space qualifier functions.
// FIXME: Pointer parameters of OpenCL builtins should have their address space
// requirement defined.
-LANGBUILTIN(to_global, "v*v*", "tn", OCLC20_LANG)
-LANGBUILTIN(to_local, "v*v*", "tn", OCLC20_LANG)
-LANGBUILTIN(to_private, "v*v*", "tn", OCLC20_LANG)
+LANGBUILTIN(to_global, "v*v*", "tn", OCL_GAS)
+LANGBUILTIN(to_local, "v*v*", "tn", OCL_GAS)
+LANGBUILTIN(to_private, "v*v*", "tn", OCL_GAS)
// OpenCL half load/store builtin
-LANGBUILTIN(__builtin_store_half, "vdh*", "n", ALL_OCLC_LANGUAGES)
-LANGBUILTIN(__builtin_store_halff, "vfh*", "n", ALL_OCLC_LANGUAGES)
-LANGBUILTIN(__builtin_load_half, "dhC*", "nc", ALL_OCLC_LANGUAGES)
-LANGBUILTIN(__builtin_load_halff, "fhC*", "nc", ALL_OCLC_LANGUAGES)
+LANGBUILTIN(__builtin_store_half, "vdh*", "n", ALL_OCL_LANGUAGES)
+LANGBUILTIN(__builtin_store_halff, "vfh*", "n", ALL_OCL_LANGUAGES)
+LANGBUILTIN(__builtin_load_half, "dhC*", "nc", ALL_OCL_LANGUAGES)
+LANGBUILTIN(__builtin_load_halff, "fhC*", "nc", ALL_OCL_LANGUAGES)
// Builtins for os_log/os_trace
-BUILTIN(__builtin_os_log_format_buffer_size, "zcC*.", "p:0:nut")
+BUILTIN(__builtin_os_log_format_buffer_size, "zcC*.", "p:0:nutE")
BUILTIN(__builtin_os_log_format, "v*v*cC*.", "p:0:nt")
// CUDA/HIP
LANGBUILTIN(__builtin_get_device_side_mangled_name, "cC*.", "ncT", CUDA_LANG)
+// HLSL
+LANGBUILTIN(__builtin_hlsl_wave_active_count_bits, "Uib", "nc", HLSL_LANG)
+LANGBUILTIN(__builtin_hlsl_create_handle, "v*Uc", "nc", HLSL_LANG)
+
// Builtins for XRay
BUILTIN(__xray_customevent, "vcC*z", "")
BUILTIN(__xray_typedevent, "vzcC*z", "")
@@ -1659,7 +1794,7 @@ BUILTIN(__builtin_ms_va_end, "vc*&", "n")
BUILTIN(__builtin_ms_va_copy, "vc*&c*&", "n")
// Arithmetic Fence: to prevent FP reordering and reassociation optimizations
-LANGBUILTIN(__arithmetic_fence, "v.", "t", ALL_LANGUAGES)
+LANGBUILTIN(__arithmetic_fence, "v.", "tE", ALL_LANGUAGES)
#undef BUILTIN
#undef LIBBUILTIN
diff --git a/contrib/llvm-project/clang/include/clang/Basic/Builtins.h b/contrib/llvm-project/clang/include/clang/Basic/Builtins.h
index cdaaee48c32d..3fd5b02b5aa5 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/Builtins.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/Builtins.h
@@ -16,6 +16,8 @@
#define LLVM_CLANG_BASIC_BUILTINS_H
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
#include <cstring>
// VC++ defines 'alloca' as an object-like macro, which interferes with our
@@ -27,21 +29,35 @@ class TargetInfo;
class IdentifierTable;
class LangOptions;
-enum LanguageID {
- GNU_LANG = 0x1, // builtin requires GNU mode.
- C_LANG = 0x2, // builtin for c only.
- CXX_LANG = 0x4, // builtin for cplusplus only.
- OBJC_LANG = 0x8, // builtin for objective-c and objective-c++
- MS_LANG = 0x10, // builtin requires MS mode.
- OCLC20_LANG = 0x20, // builtin for OpenCL C 2.0 only.
- OCLC1X_LANG = 0x40, // builtin for OpenCL C 1.x only.
- OMP_LANG = 0x80, // builtin requires OpenMP.
- CUDA_LANG = 0x100, // builtin requires CUDA.
- COR_LANG = 0x200, // builtin requires use of 'fcoroutine-ts' option.
+enum LanguageID : uint16_t {
+ GNU_LANG = 0x1, // builtin requires GNU mode.
+ C_LANG = 0x2, // builtin for c only.
+ CXX_LANG = 0x4, // builtin for cplusplus only.
+ OBJC_LANG = 0x8, // builtin for objective-c and objective-c++
+ MS_LANG = 0x10, // builtin requires MS mode.
+ OMP_LANG = 0x20, // builtin requires OpenMP.
+ CUDA_LANG = 0x40, // builtin requires CUDA.
+ COR_LANG = 0x80, // builtin requires use of 'fcoroutine-ts' option.
+ OCL_GAS = 0x100, // builtin requires OpenCL generic address space.
+ OCL_PIPE = 0x200, // builtin requires OpenCL pipe.
+ OCL_DSE = 0x400, // builtin requires OpenCL device side enqueue.
+ ALL_OCL_LANGUAGES = 0x800, // builtin for OCL languages.
+ HLSL_LANG = 0x1000, // builtin requires HLSL.
ALL_LANGUAGES = C_LANG | CXX_LANG | OBJC_LANG, // builtin for all languages.
ALL_GNU_LANGUAGES = ALL_LANGUAGES | GNU_LANG, // builtin requires GNU mode.
- ALL_MS_LANGUAGES = ALL_LANGUAGES | MS_LANG, // builtin requires MS mode.
- ALL_OCLC_LANGUAGES = OCLC1X_LANG | OCLC20_LANG // builtin for OCLC languages.
+ ALL_MS_LANGUAGES = ALL_LANGUAGES | MS_LANG // builtin requires MS mode.
+};
+
+struct HeaderDesc {
+ enum HeaderID : uint16_t {
+#define HEADER(ID, NAME) ID,
+#include "clang/Basic/BuiltinHeaders.def"
+#undef HEADER
+ } ID;
+
+ constexpr HeaderDesc(HeaderID ID) : ID(ID) {}
+
+ const char *getName() const;
};
namespace Builtin {
@@ -53,9 +69,11 @@ enum ID {
};
struct Info {
- const char *Name, *Type, *Attributes, *HeaderName;
- LanguageID Langs;
+ llvm::StringLiteral Name;
+ const char *Type, *Attributes;
const char *Features;
+ HeaderDesc Header;
+ LanguageID Langs;
};
/// Holds information about both target-independent and
@@ -69,7 +87,7 @@ class Context {
llvm::ArrayRef<Info> AuxTSRecords;
public:
- Context() {}
+ Context() = default;
/// Perform target-specific initialization
/// \param AuxTarget Target info to incorporate builtins from. May be nullptr.
@@ -82,9 +100,7 @@ public:
/// Return the identifier name for the specified builtin,
/// e.g. "__builtin_abs".
- const char *getName(unsigned ID) const {
- return getRecord(ID).Name;
- }
+ llvm::StringRef getName(unsigned ID) const { return getRecord(ID).Name; }
/// Get the type descriptor string for the specified builtin.
const char *getTypeString(unsigned ID) const {
@@ -137,6 +153,10 @@ public:
/// Determines whether this builtin is a predefined libc/libm
/// function, such as "malloc", where we know the signature a
/// priori.
+ /// In C, such functions behave as if they are predeclared,
+ /// possibly with a warning on first use. In Objective-C and C++,
+ /// they do not, but they are recognized as builtins once we see
+ /// a declaration.
bool isPredefinedLibFunction(unsigned ID) const {
return strchr(getRecord(ID).Attributes, 'f') != nullptr;
}
@@ -155,6 +175,23 @@ public:
return strchr(getRecord(ID).Attributes, 'i') != nullptr;
}
+ /// Determines whether this builtin is a C++ standard library function
+ /// that lives in (possibly-versioned) namespace std, possibly a template
+ /// specialization, where the signature is determined by the standard library
+ /// declaration.
+ bool isInStdNamespace(unsigned ID) const {
+ return strchr(getRecord(ID).Attributes, 'z') != nullptr;
+ }
+
+ /// Determines whether this builtin can have its address taken with no
+ /// special action required.
+ bool isDirectlyAddressable(unsigned ID) const {
+ // Most standard library functions can have their addresses taken. C++
+ // standard library functions formally cannot in C++20 onwards, and when
+ // we allow it, we need to ensure we instantiate a definition.
+ return isPredefinedLibFunction(ID) && !isInStdNamespace(ID);
+ }
+
/// Determines whether this builtin has custom typechecking.
bool hasCustomTypechecking(unsigned ID) const {
return strchr(getRecord(ID).Attributes, 't') != nullptr;
@@ -183,7 +220,7 @@ public:
/// If this is a library function that comes from a specific
/// header, retrieve that header name.
const char *getHeaderName(unsigned ID) const {
- return getRecord(ID).HeaderName;
+ return getRecord(ID).Header.getName();
}
/// Determine whether this builtin is like printf in its
@@ -203,13 +240,18 @@ public:
llvm::SmallVectorImpl<int> &Encoding) const;
/// Return true if this function has no side effects and doesn't
- /// read memory, except for possibly errno.
+ /// read memory, except for possibly errno or raising FP exceptions.
///
- /// Such functions can be const when the MathErrno lang option is disabled.
- bool isConstWithoutErrno(unsigned ID) const {
+ /// Such functions can be const when the MathErrno lang option and FP
+ /// exceptions are disabled.
+ bool isConstWithoutErrnoAndExceptions(unsigned ID) const {
return strchr(getRecord(ID).Attributes, 'e') != nullptr;
}
+ bool isConstWithoutExceptions(unsigned ID) const {
+ return strchr(getRecord(ID).Attributes, 'g') != nullptr;
+ }
+
const char *getRequiredFeatures(unsigned ID) const {
return getRecord(ID).Features;
}
@@ -233,19 +275,28 @@ public:
/// for non-builtins.
bool canBeRedeclared(unsigned ID) const;
+ /// Return true if this function can be constant evaluated by Clang frontend.
+ bool isConstantEvaluated(unsigned ID) const {
+ return strchr(getRecord(ID).Attributes, 'E') != nullptr;
+ }
+
private:
const Info &getRecord(unsigned ID) const;
- /// Is this builtin supported according to the given language options?
- bool builtinIsSupported(const Builtin::Info &BuiltinInfo,
- const LangOptions &LangOpts);
-
/// Helper function for isPrintfLike and isScanfLike.
bool isLike(unsigned ID, unsigned &FormatIdx, bool &HasVAListArg,
const char *Fmt) const;
};
-}
+/// Returns true if the required target features of a builtin function are
+/// enabled.
+/// \p TargetFeatureMap maps a target feature to true if it is enabled and
+/// false if it is disabled.
+bool evaluateRequiredTargetFeatures(
+ llvm::StringRef RequiredFatures,
+ const llvm::StringMap<bool> &TargetFetureMap);
+
+} // namespace Builtin
/// Kinds of BuiltinTemplateDecl.
enum BuiltinTemplateKind : int {
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsAArch64.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsAArch64.def
index 634bcaed20a6..31ec84143f65 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsAArch64.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsAArch64.def
@@ -17,6 +17,10 @@
# define LANGBUILTIN(ID, TYPE, ATTRS, BUILTIN_LANG) BUILTIN(ID, TYPE, ATTRS)
#endif
+#if defined(BUILTIN) && !defined(TARGET_BUILTIN)
+# define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) BUILTIN(ID, TYPE, ATTRS)
+#endif
+
#if defined(BUILTIN) && !defined(TARGET_HEADER_BUILTIN)
# define TARGET_HEADER_BUILTIN(ID, TYPE, ATTRS, HEADER, LANG, FEATURE) BUILTIN(ID, TYPE, ATTRS)
#endif
@@ -35,6 +39,8 @@ BUILTIN(__builtin_arm_rbit, "UiUi", "nc")
BUILTIN(__builtin_arm_rbit64, "WUiWUi", "nc")
BUILTIN(__builtin_arm_cls, "UiZUi", "nc")
BUILTIN(__builtin_arm_cls64, "UiWUi", "nc")
+BUILTIN(__builtin_arm_clz, "UiZUi", "nc")
+BUILTIN(__builtin_arm_clz64, "UiWUi", "nc")
// HINT
BUILTIN(__builtin_arm_nop, "v", "")
@@ -45,29 +51,35 @@ BUILTIN(__builtin_arm_sev, "v", "")
BUILTIN(__builtin_arm_sevl, "v", "")
// CRC32
-BUILTIN(__builtin_arm_crc32b, "UiUiUc", "nc")
-BUILTIN(__builtin_arm_crc32cb, "UiUiUc", "nc")
-BUILTIN(__builtin_arm_crc32h, "UiUiUs", "nc")
-BUILTIN(__builtin_arm_crc32ch, "UiUiUs", "nc")
-BUILTIN(__builtin_arm_crc32w, "UiUiUi", "nc")
-BUILTIN(__builtin_arm_crc32cw, "UiUiUi", "nc")
-BUILTIN(__builtin_arm_crc32d, "UiUiWUi", "nc")
-BUILTIN(__builtin_arm_crc32cd, "UiUiWUi", "nc")
+TARGET_BUILTIN(__builtin_arm_crc32b, "UiUiUc", "nc", "crc")
+TARGET_BUILTIN(__builtin_arm_crc32cb, "UiUiUc", "nc", "crc")
+TARGET_BUILTIN(__builtin_arm_crc32h, "UiUiUs", "nc", "crc")
+TARGET_BUILTIN(__builtin_arm_crc32ch, "UiUiUs", "nc", "crc")
+TARGET_BUILTIN(__builtin_arm_crc32w, "UiUiUi", "nc", "crc")
+TARGET_BUILTIN(__builtin_arm_crc32cw, "UiUiUi", "nc", "crc")
+TARGET_BUILTIN(__builtin_arm_crc32d, "UiUiWUi", "nc", "crc")
+TARGET_BUILTIN(__builtin_arm_crc32cd, "UiUiWUi", "nc", "crc")
// Memory Tagging Extensions (MTE)
-BUILTIN(__builtin_arm_irg, "v*v*Ui", "t")
-BUILTIN(__builtin_arm_addg, "v*v*Ui", "t")
-BUILTIN(__builtin_arm_gmi, "Uiv*Ui", "t")
-BUILTIN(__builtin_arm_ldg, "v*v*", "t")
-BUILTIN(__builtin_arm_stg, "vv*", "t")
-BUILTIN(__builtin_arm_subp, "Uiv*v*", "t")
+TARGET_BUILTIN(__builtin_arm_irg, "v*v*Ui", "t", "mte")
+TARGET_BUILTIN(__builtin_arm_addg, "v*v*Ui", "t", "mte")
+TARGET_BUILTIN(__builtin_arm_gmi, "Uiv*Ui", "t", "mte")
+TARGET_BUILTIN(__builtin_arm_ldg, "v*v*", "t", "mte")
+TARGET_BUILTIN(__builtin_arm_stg, "vv*", "t", "mte")
+TARGET_BUILTIN(__builtin_arm_subp, "Uiv*v*", "t", "mte")
+
+// SME state function
+BUILTIN(__builtin_arm_get_sme_state, "vULi*ULi*", "n")
+
+// Memory Operations
+TARGET_BUILTIN(__builtin_arm_mops_memset_tag, "v*v*iz", "", "mte,mops")
// Memory barrier
BUILTIN(__builtin_arm_dmb, "vUi", "nc")
BUILTIN(__builtin_arm_dsb, "vUi", "nc")
BUILTIN(__builtin_arm_isb, "vUi", "nc")
-BUILTIN(__builtin_arm_jcvt, "Zid", "nc")
+TARGET_BUILTIN(__builtin_arm_jcvt, "Zid", "nc", "v8.3a")
// Prefetch
BUILTIN(__builtin_arm_prefetch, "vvC*UiUiUiUi", "nc")
@@ -75,9 +87,11 @@ BUILTIN(__builtin_arm_prefetch, "vvC*UiUiUiUi", "nc")
// System Registers
BUILTIN(__builtin_arm_rsr, "UicC*", "nc")
BUILTIN(__builtin_arm_rsr64, "WUicC*", "nc")
+TARGET_BUILTIN(__builtin_arm_rsr128, "LLLUicC*", "nc", "d128")
BUILTIN(__builtin_arm_rsrp, "v*cC*", "nc")
BUILTIN(__builtin_arm_wsr, "vcC*Ui", "nc")
BUILTIN(__builtin_arm_wsr64, "vcC*WUi", "nc")
+TARGET_BUILTIN(__builtin_arm_wsr128, "vcC*LLLUi", "nc", "d128")
BUILTIN(__builtin_arm_wsrp, "vcC*vC*", "nc")
// MSVC
@@ -100,152 +114,181 @@ BUILTIN(__builtin_arm_tcancel, "vWUIi", "n")
BUILTIN(__builtin_arm_ttest, "WUi", "nc")
// Armv8.5-A FP rounding intrinsics
-BUILTIN(__builtin_arm_frint32zf, "ff", "")
-BUILTIN(__builtin_arm_frint32z, "dd", "")
-BUILTIN(__builtin_arm_frint64zf, "ff", "")
-BUILTIN(__builtin_arm_frint64z, "dd", "")
-BUILTIN(__builtin_arm_frint32xf, "ff", "")
-BUILTIN(__builtin_arm_frint32x, "dd", "")
-BUILTIN(__builtin_arm_frint64xf, "ff", "")
-BUILTIN(__builtin_arm_frint64x, "dd", "")
+TARGET_BUILTIN(__builtin_arm_rint32zf, "ff", "", "v8.5a")
+TARGET_BUILTIN(__builtin_arm_rint32z, "dd", "", "v8.5a")
+TARGET_BUILTIN(__builtin_arm_rint64zf, "ff", "", "v8.5a")
+TARGET_BUILTIN(__builtin_arm_rint64z, "dd", "", "v8.5a")
+TARGET_BUILTIN(__builtin_arm_rint32xf, "ff", "", "v8.5a")
+TARGET_BUILTIN(__builtin_arm_rint32x, "dd", "", "v8.5a")
+TARGET_BUILTIN(__builtin_arm_rint64xf, "ff", "", "v8.5a")
+TARGET_BUILTIN(__builtin_arm_rint64x, "dd", "", "v8.5a")
// Armv8.5-A Random number generation intrinsics
-BUILTIN(__builtin_arm_rndr, "iWUi*", "n")
-BUILTIN(__builtin_arm_rndrrs, "iWUi*", "n")
+TARGET_BUILTIN(__builtin_arm_rndr, "iWUi*", "n", "rand")
+TARGET_BUILTIN(__builtin_arm_rndrrs, "iWUi*", "n", "rand")
// Armv8.7-A load/store 64-byte intrinsics
-BUILTIN(__builtin_arm_ld64b, "vvC*WUi*", "n")
-BUILTIN(__builtin_arm_st64b, "vv*WUiC*", "n")
-BUILTIN(__builtin_arm_st64bv, "WUiv*WUiC*", "n")
-BUILTIN(__builtin_arm_st64bv0, "WUiv*WUiC*", "n")
-
-TARGET_HEADER_BUILTIN(_BitScanForward, "UcUNi*UNi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_BitScanReverse, "UcUNi*UNi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_BitScanForward64, "UcUNi*ULLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_BitScanReverse64, "UcUNi*ULLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-
-TARGET_HEADER_BUILTIN(_InterlockedAdd, "NiNiD*Ni", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedAnd64, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedDecrement64, "LLiLLiD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchange64, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd64, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchangeSub64, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedIncrement64, "LLiLLiD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedOr64, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedXor64, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-
-TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd_acq, "NiNiD*Ni", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd_rel, "NiNiD*Ni", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd_nf, "NiNiD*Ni", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd8_acq, "ccD*c", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd8_rel, "ccD*c", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd8_nf, "ccD*c", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd16_acq, "ssD*s", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd16_rel, "ssD*s", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd16_nf, "ssD*s", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd64_acq, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd64_rel, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd64_nf, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-
-TARGET_HEADER_BUILTIN(_InterlockedExchange8_acq, "ccD*c", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchange8_nf, "ccD*c", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchange8_rel, "ccD*c", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchange16_acq, "ssD*s", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchange16_nf, "ssD*s", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchange16_rel, "ssD*s", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchange_acq, "NiNiD*Ni", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchange_nf, "NiNiD*Ni", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchange_rel, "NiNiD*Ni", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchange64_acq, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchange64_nf, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchange64_rel, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-
-TARGET_HEADER_BUILTIN(_InterlockedCompareExchange8_acq, "ccD*cc", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedCompareExchange8_nf, "ccD*cc", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedCompareExchange8_rel, "ccD*cc", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedCompareExchange16_acq, "ssD*ss", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedCompareExchange16_nf, "ssD*ss", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedCompareExchange16_rel, "ssD*ss", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedCompareExchange_acq, "NiNiD*NiNi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedCompareExchange_nf, "NiNiD*NiNi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedCompareExchange_rel, "NiNiD*NiNi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedCompareExchange64_acq, "LLiLLiD*LLiLLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedCompareExchange64_nf, "LLiLLiD*LLiLLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedCompareExchange64_rel, "LLiLLiD*LLiLLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-
-TARGET_HEADER_BUILTIN(_InterlockedCompareExchange128, "UcLLiD*LLiLLiLLi*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedCompareExchange128_acq,"UcLLiD*LLiLLiLLi*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedCompareExchange128_nf ,"UcLLiD*LLiLLiLLi*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedCompareExchange128_rel,"UcLLiD*LLiLLiLLi*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-
-TARGET_HEADER_BUILTIN(_InterlockedOr8_acq, "ccD*c", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedOr8_nf, "ccD*c", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedOr8_rel, "ccD*c", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedOr16_acq, "ssD*s", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedOr16_nf, "ssD*s", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedOr16_rel, "ssD*s", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedOr_acq, "NiNiD*Ni", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedOr_nf, "NiNiD*Ni", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedOr_rel, "NiNiD*Ni", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedOr64_acq, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedOr64_nf, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedOr64_rel, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-
-TARGET_HEADER_BUILTIN(_InterlockedXor8_acq, "ccD*c", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedXor8_nf, "ccD*c", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedXor8_rel, "ccD*c", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedXor16_acq, "ssD*s", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedXor16_nf, "ssD*s", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedXor16_rel, "ssD*s", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedXor_acq, "NiNiD*Ni", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedXor_nf, "NiNiD*Ni", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedXor_rel, "NiNiD*Ni", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedXor64_acq, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedXor64_nf, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedXor64_rel, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-
-TARGET_HEADER_BUILTIN(_InterlockedAnd8_acq, "ccD*c", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedAnd8_nf, "ccD*c", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedAnd8_rel, "ccD*c", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedAnd16_acq, "ssD*s", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedAnd16_nf, "ssD*s", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedAnd16_rel, "ssD*s", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedAnd_acq, "NiNiD*Ni", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedAnd_nf, "NiNiD*Ni", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedAnd_rel, "NiNiD*Ni", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedAnd64_acq, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedAnd64_nf, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedAnd64_rel, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-
-TARGET_HEADER_BUILTIN(_InterlockedIncrement16_acq, "ssD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedIncrement16_nf, "ssD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedIncrement16_rel, "ssD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedIncrement_acq, "NiNiD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedIncrement_nf, "NiNiD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedIncrement_rel, "NiNiD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedIncrement64_acq, "LLiLLiD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedIncrement64_nf, "LLiLLiD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedIncrement64_rel, "LLiLLiD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-
-TARGET_HEADER_BUILTIN(_InterlockedDecrement16_acq, "ssD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedDecrement16_nf, "ssD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedDecrement16_rel, "ssD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedDecrement_acq, "NiNiD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedDecrement_nf, "NiNiD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedDecrement_rel, "NiNiD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedDecrement64_acq, "LLiLLiD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedDecrement64_nf, "LLiLLiD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedDecrement64_rel, "LLiLLiD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-
-TARGET_HEADER_BUILTIN(_ReadWriteBarrier, "v", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(__getReg, "ULLii", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_ReadStatusReg, "LLii", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_WriteStatusReg, "viLLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_AddressOfReturnAddress, "v*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-
-TARGET_HEADER_BUILTIN(__mulh, "SLLiSLLiSLLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(__umulh, "ULLiULLiULLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_BUILTIN(__builtin_arm_ld64b, "vvC*WUi*", "n", "ls64")
+TARGET_BUILTIN(__builtin_arm_st64b, "vv*WUiC*", "n", "ls64")
+TARGET_BUILTIN(__builtin_arm_st64bv, "WUiv*WUiC*", "n", "ls64")
+TARGET_BUILTIN(__builtin_arm_st64bv0, "WUiv*WUiC*", "n", "ls64")
+
+TARGET_HEADER_BUILTIN(_BitScanForward, "UcUNi*UNi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_BitScanReverse, "UcUNi*UNi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_BitScanForward64, "UcUNi*ULLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_BitScanReverse64, "UcUNi*ULLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+
+TARGET_HEADER_BUILTIN(_InterlockedAdd, "NiNiD*Ni", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedAnd64, "LLiLLiD*LLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedDecrement64, "LLiLLiD*", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchange64, "LLiLLiD*LLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd64, "LLiLLiD*LLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchangeSub64, "LLiLLiD*LLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedIncrement64, "LLiLLiD*", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedOr64, "LLiLLiD*LLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedXor64, "LLiLLiD*LLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+
+TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd_acq, "NiNiD*Ni", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd_rel, "NiNiD*Ni", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd_nf, "NiNiD*Ni", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd8_acq, "ccD*c", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd8_rel, "ccD*c", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd8_nf, "ccD*c", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd16_acq, "ssD*s", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd16_rel, "ssD*s", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd16_nf, "ssD*s", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd64_acq, "LLiLLiD*LLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd64_rel, "LLiLLiD*LLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd64_nf, "LLiLLiD*LLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+
+TARGET_HEADER_BUILTIN(_InterlockedExchange8_acq, "ccD*c", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchange8_nf, "ccD*c", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchange8_rel, "ccD*c", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchange16_acq, "ssD*s", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchange16_nf, "ssD*s", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchange16_rel, "ssD*s", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchange_acq, "NiNiD*Ni", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchange_nf, "NiNiD*Ni", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchange_rel, "NiNiD*Ni", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchange64_acq, "LLiLLiD*LLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchange64_nf, "LLiLLiD*LLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchange64_rel, "LLiLLiD*LLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange8_acq, "ccD*cc", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange8_nf, "ccD*cc", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange8_rel, "ccD*cc", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange16_acq, "ssD*ss", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange16_nf, "ssD*ss", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange16_rel, "ssD*ss", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange_acq, "NiNiD*NiNi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange_nf, "NiNiD*NiNi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange_rel, "NiNiD*NiNi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange64_acq, "LLiLLiD*LLiLLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange64_nf, "LLiLLiD*LLiLLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange64_rel, "LLiLLiD*LLiLLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange128, "UcLLiD*LLiLLiLLi*", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange128_acq,"UcLLiD*LLiLLiLLi*", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange128_nf ,"UcLLiD*LLiLLiLLi*", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange128_rel,"UcLLiD*LLiLLiLLi*", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+
+TARGET_HEADER_BUILTIN(_InterlockedOr8_acq, "ccD*c", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedOr8_nf, "ccD*c", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedOr8_rel, "ccD*c", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedOr16_acq, "ssD*s", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedOr16_nf, "ssD*s", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedOr16_rel, "ssD*s", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedOr_acq, "NiNiD*Ni", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedOr_nf, "NiNiD*Ni", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedOr_rel, "NiNiD*Ni", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedOr64_acq, "LLiLLiD*LLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedOr64_nf, "LLiLLiD*LLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedOr64_rel, "LLiLLiD*LLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+
+TARGET_HEADER_BUILTIN(_InterlockedXor8_acq, "ccD*c", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedXor8_nf, "ccD*c", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedXor8_rel, "ccD*c", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedXor16_acq, "ssD*s", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedXor16_nf, "ssD*s", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedXor16_rel, "ssD*s", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedXor_acq, "NiNiD*Ni", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedXor_nf, "NiNiD*Ni", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedXor_rel, "NiNiD*Ni", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedXor64_acq, "LLiLLiD*LLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedXor64_nf, "LLiLLiD*LLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedXor64_rel, "LLiLLiD*LLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+
+TARGET_HEADER_BUILTIN(_InterlockedAnd8_acq, "ccD*c", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedAnd8_nf, "ccD*c", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedAnd8_rel, "ccD*c", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedAnd16_acq, "ssD*s", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedAnd16_nf, "ssD*s", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedAnd16_rel, "ssD*s", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedAnd_acq, "NiNiD*Ni", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedAnd_nf, "NiNiD*Ni", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedAnd_rel, "NiNiD*Ni", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedAnd64_acq, "LLiLLiD*LLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedAnd64_nf, "LLiLLiD*LLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedAnd64_rel, "LLiLLiD*LLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+
+TARGET_HEADER_BUILTIN(_InterlockedIncrement16_acq, "ssD*", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedIncrement16_nf, "ssD*", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedIncrement16_rel, "ssD*", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedIncrement_acq, "NiNiD*", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedIncrement_nf, "NiNiD*", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedIncrement_rel, "NiNiD*", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedIncrement64_acq, "LLiLLiD*", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedIncrement64_nf, "LLiLLiD*", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedIncrement64_rel, "LLiLLiD*", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+
+TARGET_HEADER_BUILTIN(_InterlockedDecrement16_acq, "ssD*", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedDecrement16_nf, "ssD*", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedDecrement16_rel, "ssD*", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedDecrement_acq, "NiNiD*", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedDecrement_nf, "NiNiD*", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedDecrement_rel, "NiNiD*", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedDecrement64_acq, "LLiLLiD*", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedDecrement64_nf, "LLiLLiD*", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedDecrement64_rel, "LLiLLiD*", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+
+TARGET_HEADER_BUILTIN(_ReadWriteBarrier, "v", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(__getReg, "ULLii", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_ReadStatusReg, "LLii", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_WriteStatusReg, "viLLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_AddressOfReturnAddress, "v*", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+
+TARGET_HEADER_BUILTIN(__mulh, "SLLiSLLiSLLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(__umulh, "ULLiULLiULLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+
+TARGET_HEADER_BUILTIN(__break, "vi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+
+TARGET_HEADER_BUILTIN(__writex18byte, "vUNiUc", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(__writex18word, "vUNiUs", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(__writex18dword, "vUNiUNi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(__writex18qword, "vUNiULLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+
+TARGET_HEADER_BUILTIN(__readx18byte, "UcUNi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(__readx18word, "UsUNi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(__readx18dword, "UNiUNi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(__readx18qword, "ULLiUNi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+
+TARGET_HEADER_BUILTIN(_CopyDoubleFromInt64, "dSLLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_CopyFloatFromInt32, "fSi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_CopyInt32FromFloat, "Sif", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_CopyInt64FromDouble, "SLLid", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+
+TARGET_HEADER_BUILTIN(_CountLeadingOnes, "UiUNi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_CountLeadingOnes64, "UiULLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_CountLeadingSigns, "UiSNi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_CountLeadingSigns64, "UiSLLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_CountLeadingZeros, "UiUNi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_CountLeadingZeros64, "UiULLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_CountOneBits, "UiUNi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_CountOneBits64, "UiULLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+
+TARGET_HEADER_BUILTIN(__prefetch, "vv*", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
#undef BUILTIN
#undef LANGBUILTIN
+#undef TARGET_BUILTIN
#undef TARGET_HEADER_BUILTIN
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsAArch64NeonSVEBridge.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsAArch64NeonSVEBridge.def
new file mode 100644
index 000000000000..b8bb054d4cce
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsAArch64NeonSVEBridge.def
@@ -0,0 +1,39 @@
+#ifdef GET_SVE_BUILTINS
+TARGET_BUILTIN(__builtin_sve_svget_neonq_s8, "V16Scq16Sc", "n", "sve")
+TARGET_BUILTIN(__builtin_sve_svget_neonq_s16, "V8sq8s", "n", "sve")
+TARGET_BUILTIN(__builtin_sve_svget_neonq_s32, "V4iq4i", "n", "sve")
+TARGET_BUILTIN(__builtin_sve_svget_neonq_s64, "V2Wiq2Wi", "n", "sve")
+TARGET_BUILTIN(__builtin_sve_svget_neonq_u8, "V16Ucq16Uc", "n", "sve")
+TARGET_BUILTIN(__builtin_sve_svget_neonq_u16, "V16Usq16Us", "n", "sve")
+TARGET_BUILTIN(__builtin_sve_svget_neonq_u32, "V4Uiq4Ui", "n", "sve")
+TARGET_BUILTIN(__builtin_sve_svget_neonq_u64, "V2UWiq2UWi", "n", "sve")
+TARGET_BUILTIN(__builtin_sve_svget_neonq_f16, "V8hq8h", "n", "sve")
+TARGET_BUILTIN(__builtin_sve_svget_neonq_f32, "V4fq4f", "n", "sve")
+TARGET_BUILTIN(__builtin_sve_svget_neonq_f64, "V2dq2d", "n", "sve")
+TARGET_BUILTIN(__builtin_sve_svget_neonq_bf16, "V8yq8y", "n", "sve,bf16")
+TARGET_BUILTIN(__builtin_sve_svset_neonq_s8, "q16Scq16ScV16Sc", "n", "sve")
+TARGET_BUILTIN(__builtin_sve_svset_neonq_s16, "q8sq8sV8s", "n", "sve")
+TARGET_BUILTIN(__builtin_sve_svset_neonq_s32, "q4iq4iV4i", "n", "sve")
+TARGET_BUILTIN(__builtin_sve_svset_neonq_s64, "q2Wiq2WiV2Wi", "n", "sve")
+TARGET_BUILTIN(__builtin_sve_svset_neonq_u8, "q16Ucq16UcV16Uc", "n", "sve")
+TARGET_BUILTIN(__builtin_sve_svset_neonq_u16, "q8Usq8UsV8s", "n", "sve")
+TARGET_BUILTIN(__builtin_sve_svset_neonq_u32, "q4Uiq4UiV4Ui", "n", "sve")
+TARGET_BUILTIN(__builtin_sve_svset_neonq_u64, "q2UWiq2UWiV2UWi", "n", "sve")
+TARGET_BUILTIN(__builtin_sve_svset_neonq_f16, "q8hq8hV8h", "n", "sve")
+TARGET_BUILTIN(__builtin_sve_svset_neonq_f32, "q4fq4fV4f", "n", "sve")
+TARGET_BUILTIN(__builtin_sve_svset_neonq_f64, "q2dq2dV2d", "n", "sve")
+TARGET_BUILTIN(__builtin_sve_svset_neonq_bf16, "q8yq8yV8y", "n", "sve,bf16")
+TARGET_BUILTIN(__builtin_sve_svdup_neonq_s8, "q16ScV16Sc", "n", "sve")
+TARGET_BUILTIN(__builtin_sve_svdup_neonq_s16, "q8sV8s", "n", "sve")
+TARGET_BUILTIN(__builtin_sve_svdup_neonq_s32, "q4iV4i", "n", "sve")
+TARGET_BUILTIN(__builtin_sve_svdup_neonq_s64, "q4iV4i", "n", "sve")
+TARGET_BUILTIN(__builtin_sve_svdup_neonq_u8, "q16UcV16Uc", "n", "sve")
+TARGET_BUILTIN(__builtin_sve_svdup_neonq_u16, "q8UsV8Us", "n", "sve")
+TARGET_BUILTIN(__builtin_sve_svdup_neonq_u32, "q4UiV4Ui", "n", "sve")
+TARGET_BUILTIN(__builtin_sve_svdup_neonq_u64, "q2UWiV2UWi", "n", "sve")
+TARGET_BUILTIN(__builtin_sve_svdup_neonq_f16, "q8hV8h", "n", "sve")
+TARGET_BUILTIN(__builtin_sve_svdup_neonq_f32, "q4fV4f", "n", "sve")
+TARGET_BUILTIN(__builtin_sve_svdup_neonq_f64, "q2dV2d", "n", "sve")
+TARGET_BUILTIN(__builtin_sve_svdup_neonq_bf16, "q8yV8y", "n", "sve,bf16")
+#endif
+
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsAArch64NeonSVEBridge_cg.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsAArch64NeonSVEBridge_cg.def
new file mode 100644
index 000000000000..7717ba67b427
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsAArch64NeonSVEBridge_cg.def
@@ -0,0 +1,39 @@
+#ifdef GET_SVE_LLVM_INTRINSIC_MAP
+SVEMAP2(svget_neonq_s8, SVETypeFlags::EltTyInt8),
+SVEMAP2(svget_neonq_s16, SVETypeFlags::EltTyInt16),
+SVEMAP2(svget_neonq_s32, SVETypeFlags::EltTyInt32),
+SVEMAP2(svget_neonq_s64, SVETypeFlags::EltTyInt64),
+SVEMAP2(svget_neonq_u8, SVETypeFlags::EltTyInt8),
+SVEMAP2(svget_neonq_u16, SVETypeFlags::EltTyInt16),
+SVEMAP2(svget_neonq_u32, SVETypeFlags::EltTyInt32),
+SVEMAP2(svget_neonq_u64, SVETypeFlags::EltTyInt64),
+SVEMAP2(svget_neonq_f16, SVETypeFlags::EltTyFloat16),
+SVEMAP2(svget_neonq_f32, SVETypeFlags::EltTyFloat32),
+SVEMAP2(svget_neonq_f64, SVETypeFlags::EltTyFloat64),
+SVEMAP2(svget_neonq_bf16, SVETypeFlags::EltTyBFloat16),
+SVEMAP2(svset_neonq_s8, SVETypeFlags::EltTyInt8),
+SVEMAP2(svset_neonq_s16, SVETypeFlags::EltTyInt16),
+SVEMAP2(svset_neonq_s32, SVETypeFlags::EltTyInt32),
+SVEMAP2(svset_neonq_s64, SVETypeFlags::EltTyInt64),
+SVEMAP2(svset_neonq_u8, SVETypeFlags::EltTyInt8),
+SVEMAP2(svset_neonq_u16, SVETypeFlags::EltTyInt16),
+SVEMAP2(svset_neonq_u32, SVETypeFlags::EltTyInt32),
+SVEMAP2(svset_neonq_u64, SVETypeFlags::EltTyInt64),
+SVEMAP2(svset_neonq_f16, SVETypeFlags::EltTyFloat16),
+SVEMAP2(svset_neonq_f32, SVETypeFlags::EltTyFloat32),
+SVEMAP2(svset_neonq_f64, SVETypeFlags::EltTyFloat64),
+SVEMAP2(svset_neonq_bf16, SVETypeFlags::EltTyBFloat16),
+SVEMAP2(svdup_neonq_s8, SVETypeFlags::EltTyInt8),
+SVEMAP2(svdup_neonq_s16, SVETypeFlags::EltTyInt16),
+SVEMAP2(svdup_neonq_s32, SVETypeFlags::EltTyInt32),
+SVEMAP2(svdup_neonq_s64, SVETypeFlags::EltTyInt64),
+SVEMAP2(svdup_neonq_u8, SVETypeFlags::EltTyInt8),
+SVEMAP2(svdup_neonq_u16, SVETypeFlags::EltTyInt16),
+SVEMAP2(svdup_neonq_u32, SVETypeFlags::EltTyInt32),
+SVEMAP2(svdup_neonq_u64, SVETypeFlags::EltTyInt64),
+SVEMAP2(svdup_neonq_f16, SVETypeFlags::EltTyFloat16),
+SVEMAP2(svdup_neonq_f32, SVETypeFlags::EltTyFloat32),
+SVEMAP2(svdup_neonq_f64, SVETypeFlags::EltTyFloat64),
+SVEMAP2(svdup_neonq_bf16, SVETypeFlags::EltTyBFloat16),
+#endif
+
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsAMDGPU.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsAMDGPU.def
index 3570431d952c..74dfd1d214e8 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsAMDGPU.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsAMDGPU.def
@@ -62,13 +62,11 @@ BUILTIN(__builtin_amdgcn_s_sendmsg, "vIiUi", "n")
BUILTIN(__builtin_amdgcn_s_sendmsghalt, "vIiUi", "n")
BUILTIN(__builtin_amdgcn_s_barrier, "v", "n")
BUILTIN(__builtin_amdgcn_wave_barrier, "v", "n")
+BUILTIN(__builtin_amdgcn_sched_barrier, "vIi", "n")
+BUILTIN(__builtin_amdgcn_sched_group_barrier, "vIiIiIi", "n")
+BUILTIN(__builtin_amdgcn_iglp_opt, "vIi", "n")
BUILTIN(__builtin_amdgcn_s_dcache_inv, "v", "n")
BUILTIN(__builtin_amdgcn_buffer_wbinvl1, "v", "n")
-BUILTIN(__builtin_amdgcn_ds_gws_init, "vUiUi", "n")
-BUILTIN(__builtin_amdgcn_ds_gws_barrier, "vUiUi", "n")
-BUILTIN(__builtin_amdgcn_ds_gws_sema_v, "vUi", "n")
-BUILTIN(__builtin_amdgcn_ds_gws_sema_br, "vUiUi", "n")
-BUILTIN(__builtin_amdgcn_ds_gws_sema_p, "vUi", "n")
BUILTIN(__builtin_amdgcn_fence, "vUicC*", "n")
BUILTIN(__builtin_amdgcn_groupstaticsize, "Ui", "n")
@@ -97,6 +95,8 @@ BUILTIN(__builtin_amdgcn_rsq_clamp, "dd", "nc")
BUILTIN(__builtin_amdgcn_rsq_clampf, "ff", "nc")
BUILTIN(__builtin_amdgcn_sinf, "ff", "nc")
BUILTIN(__builtin_amdgcn_cosf, "ff", "nc")
+BUILTIN(__builtin_amdgcn_logf, "ff", "nc")
+BUILTIN(__builtin_amdgcn_exp2f, "ff", "nc")
BUILTIN(__builtin_amdgcn_log_clampf, "ff", "nc")
BUILTIN(__builtin_amdgcn_ldexp, "ddi", "nc")
BUILTIN(__builtin_amdgcn_ldexpf, "ffi", "nc")
@@ -116,12 +116,7 @@ BUILTIN(__builtin_amdgcn_cubema, "ffff", "nc")
BUILTIN(__builtin_amdgcn_s_sleep, "vIi", "n")
BUILTIN(__builtin_amdgcn_s_incperflevel, "vIi", "n")
BUILTIN(__builtin_amdgcn_s_decperflevel, "vIi", "n")
-BUILTIN(__builtin_amdgcn_uicmp, "WUiUiUiIi", "nc")
-BUILTIN(__builtin_amdgcn_uicmpl, "WUiWUiWUiIi", "nc")
-BUILTIN(__builtin_amdgcn_sicmp, "WUiiiIi", "nc")
-BUILTIN(__builtin_amdgcn_sicmpl, "WUiWiWiIi", "nc")
-BUILTIN(__builtin_amdgcn_fcmp, "WUiddIi", "nc")
-BUILTIN(__builtin_amdgcn_fcmpf, "WUiffIi", "nc")
+BUILTIN(__builtin_amdgcn_s_setprio, "vIs", "n")
BUILTIN(__builtin_amdgcn_ds_swizzle, "iiIi", "nc")
BUILTIN(__builtin_amdgcn_ds_permute, "iii", "nc")
BUILTIN(__builtin_amdgcn_ds_bpermute, "iii", "nc")
@@ -152,13 +147,41 @@ BUILTIN(__builtin_amdgcn_mqsad_pk_u16_u8, "WUiWUiUiWUi", "nc")
BUILTIN(__builtin_amdgcn_mqsad_u32_u8, "V4UiWUiUiV4Ui", "nc")
//===----------------------------------------------------------------------===//
+// Ballot builtins.
+//===----------------------------------------------------------------------===//
+
+TARGET_BUILTIN(__builtin_amdgcn_ballot_w32, "ZUib", "nc", "wavefrontsize32")
+TARGET_BUILTIN(__builtin_amdgcn_ballot_w64, "WUib", "nc", "wavefrontsize64")
+
+// Deprecated intrinsics in favor of __builtin_amdgn_ballot_{w32|w64}
+BUILTIN(__builtin_amdgcn_uicmp, "WUiUiUiIi", "nc")
+BUILTIN(__builtin_amdgcn_uicmpl, "WUiWUiWUiIi", "nc")
+BUILTIN(__builtin_amdgcn_sicmp, "WUiiiIi", "nc")
+BUILTIN(__builtin_amdgcn_sicmpl, "WUiWiWiIi", "nc")
+BUILTIN(__builtin_amdgcn_fcmp, "WUiddIi", "nc")
+BUILTIN(__builtin_amdgcn_fcmpf, "WUiffIi", "nc")
+
+//===----------------------------------------------------------------------===//
+// Flat addressing builtins.
+//===----------------------------------------------------------------------===//
+BUILTIN(__builtin_amdgcn_is_shared, "bvC*0", "nc")
+BUILTIN(__builtin_amdgcn_is_private, "bvC*0", "nc")
+
+//===----------------------------------------------------------------------===//
+// GWS builtins.
+//===----------------------------------------------------------------------===//
+TARGET_BUILTIN(__builtin_amdgcn_ds_gws_init, "vUiUi", "n", "gws")
+TARGET_BUILTIN(__builtin_amdgcn_ds_gws_barrier, "vUiUi", "n", "gws")
+TARGET_BUILTIN(__builtin_amdgcn_ds_gws_sema_v, "vUi", "n", "gws")
+TARGET_BUILTIN(__builtin_amdgcn_ds_gws_sema_br, "vUiUi", "n", "gws")
+TARGET_BUILTIN(__builtin_amdgcn_ds_gws_sema_p, "vUi", "n", "gws")
+
+//===----------------------------------------------------------------------===//
// CI+ only builtins.
//===----------------------------------------------------------------------===//
TARGET_BUILTIN(__builtin_amdgcn_s_dcache_inv_vol, "v", "n", "ci-insts")
TARGET_BUILTIN(__builtin_amdgcn_buffer_wbinvl1_vol, "v", "n", "ci-insts")
TARGET_BUILTIN(__builtin_amdgcn_ds_gws_sema_release_all, "vUi", "n", "ci-insts")
-TARGET_BUILTIN(__builtin_amdgcn_is_shared, "bvC*0", "nc", "flat-address-space")
-TARGET_BUILTIN(__builtin_amdgcn_is_private, "bvC*0", "nc", "flat-address-space")
//===----------------------------------------------------------------------===//
// Interpolation builtins.
@@ -196,17 +219,46 @@ TARGET_BUILTIN(__builtin_amdgcn_perm, "UiUiUiUi", "nc", "gfx8-insts")
TARGET_BUILTIN(__builtin_amdgcn_fmed3h, "hhhh", "nc", "gfx9-insts")
+TARGET_BUILTIN(__builtin_amdgcn_global_atomic_fadd_f64, "dd*1d", "t", "gfx90a-insts")
+TARGET_BUILTIN(__builtin_amdgcn_global_atomic_fadd_f32, "ff*1f", "t", "atomic-fadd-rtn-insts")
+TARGET_BUILTIN(__builtin_amdgcn_global_atomic_fadd_v2f16, "V2hV2h*1V2h", "t", "atomic-buffer-global-pk-add-f16-insts")
+TARGET_BUILTIN(__builtin_amdgcn_global_atomic_fmin_f64, "dd*1d", "t", "gfx90a-insts")
+TARGET_BUILTIN(__builtin_amdgcn_global_atomic_fmax_f64, "dd*1d", "t", "gfx90a-insts")
+
+TARGET_BUILTIN(__builtin_amdgcn_flat_atomic_fadd_f64, "dd*0d", "t", "gfx90a-insts")
+TARGET_BUILTIN(__builtin_amdgcn_flat_atomic_fmin_f64, "dd*0d", "t", "gfx90a-insts")
+TARGET_BUILTIN(__builtin_amdgcn_flat_atomic_fmax_f64, "dd*0d", "t", "gfx90a-insts")
+
+TARGET_BUILTIN(__builtin_amdgcn_ds_atomic_fadd_f64, "dd*3d", "t", "gfx90a-insts")
+TARGET_BUILTIN(__builtin_amdgcn_ds_atomic_fadd_f32, "ff*3f", "t", "gfx8-insts")
+
+TARGET_BUILTIN(__builtin_amdgcn_flat_atomic_fadd_f32, "ff*0f", "t", "gfx940-insts")
+TARGET_BUILTIN(__builtin_amdgcn_flat_atomic_fadd_v2f16, "V2hV2h*0V2h", "t", "atomic-flat-pk-add-16-insts")
+TARGET_BUILTIN(__builtin_amdgcn_flat_atomic_fadd_v2bf16, "V2sV2s*0V2s", "t", "atomic-flat-pk-add-16-insts")
+TARGET_BUILTIN(__builtin_amdgcn_global_atomic_fadd_v2bf16, "V2sV2s*1V2s", "t", "atomic-global-pk-add-bf16-inst")
+TARGET_BUILTIN(__builtin_amdgcn_ds_atomic_fadd_v2bf16, "V2sV2s*3V2s", "t", "atomic-ds-pk-add-16-insts")
+TARGET_BUILTIN(__builtin_amdgcn_ds_atomic_fadd_v2f16, "V2hV2h*3V2h", "t", "atomic-ds-pk-add-16-insts")
+
//===----------------------------------------------------------------------===//
// Deep learning builtins.
//===----------------------------------------------------------------------===//
-TARGET_BUILTIN(__builtin_amdgcn_fdot2, "fV2hV2hfIb", "nc", "dot7-insts")
+TARGET_BUILTIN(__builtin_amdgcn_fdot2, "fV2hV2hfIb", "nc", "dot10-insts")
+TARGET_BUILTIN(__builtin_amdgcn_fdot2_f16_f16, "hV2hV2hh", "nc", "dot9-insts")
+TARGET_BUILTIN(__builtin_amdgcn_fdot2_bf16_bf16, "sV2sV2ss", "nc", "dot9-insts")
+TARGET_BUILTIN(__builtin_amdgcn_fdot2_f32_bf16, "fV2sV2sfIb", "nc", "dot9-insts")
TARGET_BUILTIN(__builtin_amdgcn_sdot2, "SiV2SsV2SsSiIb", "nc", "dot2-insts")
TARGET_BUILTIN(__builtin_amdgcn_udot2, "UiV2UsV2UsUiIb", "nc", "dot2-insts")
TARGET_BUILTIN(__builtin_amdgcn_sdot4, "SiSiSiSiIb", "nc", "dot1-insts")
TARGET_BUILTIN(__builtin_amdgcn_udot4, "UiUiUiUiIb", "nc", "dot7-insts")
+TARGET_BUILTIN(__builtin_amdgcn_sudot4, "iIbiIbiiIb", "nc", "dot8-insts")
TARGET_BUILTIN(__builtin_amdgcn_sdot8, "SiSiSiSiIb", "nc", "dot1-insts")
TARGET_BUILTIN(__builtin_amdgcn_udot8, "UiUiUiUiIb", "nc", "dot7-insts")
+TARGET_BUILTIN(__builtin_amdgcn_sudot8, "iIbiIbiiIb", "nc", "dot8-insts")
+TARGET_BUILTIN(__builtin_amdgcn_dot4_f32_fp8_bf8, "fUiUif", "nc", "gfx12-insts")
+TARGET_BUILTIN(__builtin_amdgcn_dot4_f32_bf8_fp8, "fUiUif", "nc", "gfx12-insts")
+TARGET_BUILTIN(__builtin_amdgcn_dot4_f32_fp8_fp8, "fUiUif", "nc", "gfx12-insts")
+TARGET_BUILTIN(__builtin_amdgcn_dot4_f32_bf8_bf8, "fUiUif", "nc", "gfx12-insts")
//===----------------------------------------------------------------------===//
// GFX10+ only builtins.
@@ -226,6 +278,43 @@ TARGET_BUILTIN(__builtin_amdgcn_image_bvh_intersect_ray_h, "V4UiUifV4fV4hV4hV4Ui
TARGET_BUILTIN(__builtin_amdgcn_image_bvh_intersect_ray_l, "V4UiWUifV4fV4fV4fV4Ui", "nc", "gfx10-insts")
TARGET_BUILTIN(__builtin_amdgcn_image_bvh_intersect_ray_lh, "V4UiWUifV4fV4hV4hV4Ui", "nc", "gfx10-insts")
+
+//===----------------------------------------------------------------------===//
+// GFX11+ only builtins.
+//===----------------------------------------------------------------------===//
+
+// TODO: This is a no-op in wave32. Should the builtin require wavefrontsize64?
+TARGET_BUILTIN(__builtin_amdgcn_permlane64, "UiUi", "nc", "gfx11-insts")
+TARGET_BUILTIN(__builtin_amdgcn_s_wait_event_export_ready, "v", "n", "gfx11-insts")
+
+//===----------------------------------------------------------------------===//
+// WMMA builtins.
+// Postfix w32 indicates the builtin requires wavefront size of 32.
+// Postfix w64 indicates the builtin requires wavefront size of 64.
+//===----------------------------------------------------------------------===//
+TARGET_BUILTIN(__builtin_amdgcn_wmma_f32_16x16x16_f16_w32, "V8fV16hV16hV8f", "nc", "gfx11-insts")
+TARGET_BUILTIN(__builtin_amdgcn_wmma_f32_16x16x16_bf16_w32, "V8fV16sV16sV8f", "nc", "gfx11-insts")
+TARGET_BUILTIN(__builtin_amdgcn_wmma_f16_16x16x16_f16_w32, "V16hV16hV16hV16hIb", "nc", "gfx11-insts")
+TARGET_BUILTIN(__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w32, "V16sV16sV16sV16sIb", "nc", "gfx11-insts")
+TARGET_BUILTIN(__builtin_amdgcn_wmma_f16_16x16x16_f16_tied_w32, "V16hV16hV16hV16hIb", "nc", "gfx11-insts")
+TARGET_BUILTIN(__builtin_amdgcn_wmma_bf16_16x16x16_bf16_tied_w32, "V16sV16sV16sV16sIb", "nc", "gfx11-insts")
+TARGET_BUILTIN(__builtin_amdgcn_wmma_i32_16x16x16_iu8_w32, "V8iIbV4iIbV4iV8iIb", "nc", "gfx11-insts")
+TARGET_BUILTIN(__builtin_amdgcn_wmma_i32_16x16x16_iu4_w32, "V8iIbV2iIbV2iV8iIb", "nc", "gfx11-insts")
+
+TARGET_BUILTIN(__builtin_amdgcn_wmma_f32_16x16x16_f16_w64, "V4fV16hV16hV4f", "nc", "gfx11-insts")
+TARGET_BUILTIN(__builtin_amdgcn_wmma_f32_16x16x16_bf16_w64, "V4fV16sV16sV4f", "nc", "gfx11-insts")
+TARGET_BUILTIN(__builtin_amdgcn_wmma_f16_16x16x16_f16_w64, "V8hV16hV16hV8hIb", "nc", "gfx11-insts")
+TARGET_BUILTIN(__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w64, "V8sV16sV16sV8sIb", "nc", "gfx11-insts")
+TARGET_BUILTIN(__builtin_amdgcn_wmma_f16_16x16x16_f16_tied_w64, "V8hV16hV16hV8hIb", "nc", "gfx11-insts")
+TARGET_BUILTIN(__builtin_amdgcn_wmma_bf16_16x16x16_bf16_tied_w64, "V8sV16sV16sV8sIb", "nc", "gfx11-insts")
+TARGET_BUILTIN(__builtin_amdgcn_wmma_i32_16x16x16_iu8_w64, "V4iIbV4iIbV4iV4iIb", "nc", "gfx11-insts")
+TARGET_BUILTIN(__builtin_amdgcn_wmma_i32_16x16x16_iu4_w64, "V4iIbV2iIbV2iV4iIb", "nc", "gfx11-insts")
+
+TARGET_BUILTIN(__builtin_amdgcn_s_sendmsg_rtn, "UiUIi", "n", "gfx11-insts")
+TARGET_BUILTIN(__builtin_amdgcn_s_sendmsg_rtnl, "UWiUIi", "n", "gfx11-insts")
+
+TARGET_BUILTIN(__builtin_amdgcn_ds_bvh_stack_rtn, "V2UiUiUiV4UiIi", "n", "gfx11-insts")
+
//===----------------------------------------------------------------------===//
// Special builtins.
//===----------------------------------------------------------------------===//
@@ -285,5 +374,129 @@ TARGET_BUILTIN(__builtin_amdgcn_mfma_f32_16x16x16bf16_1k, "V4fV4sV4sV4fIiIiIi",
TARGET_BUILTIN(__builtin_amdgcn_mfma_f64_16x16x4f64, "V4dddV4dIiIiIi", "nc", "mai-insts")
TARGET_BUILTIN(__builtin_amdgcn_mfma_f64_4x4x4f64, "ddddIiIiIi", "nc", "mai-insts")
+TARGET_BUILTIN(__builtin_amdgcn_mfma_i32_16x16x32_i8, "V4iWiWiV4iIiIiIi", "nc", "mai-insts")
+TARGET_BUILTIN(__builtin_amdgcn_mfma_i32_32x32x16_i8, "V16iWiWiV16iIiIiIi", "nc", "mai-insts")
+TARGET_BUILTIN(__builtin_amdgcn_mfma_f32_16x16x8_xf32, "V4fV2fV2fV4fIiIiIi", "nc", "mai-insts")
+TARGET_BUILTIN(__builtin_amdgcn_mfma_f32_32x32x4_xf32, "V16fV2fV2fV16fIiIiIi", "nc", "mai-insts")
+TARGET_BUILTIN(__builtin_amdgcn_mfma_f32_16x16x32_bf8_bf8, "V4fWiWiV4fIiIiIi", "nc", "fp8-insts")
+TARGET_BUILTIN(__builtin_amdgcn_mfma_f32_16x16x32_bf8_fp8, "V4fWiWiV4fIiIiIi", "nc", "fp8-insts")
+TARGET_BUILTIN(__builtin_amdgcn_mfma_f32_16x16x32_fp8_bf8, "V4fWiWiV4fIiIiIi", "nc", "fp8-insts")
+TARGET_BUILTIN(__builtin_amdgcn_mfma_f32_16x16x32_fp8_fp8, "V4fWiWiV4fIiIiIi", "nc", "fp8-insts")
+TARGET_BUILTIN(__builtin_amdgcn_mfma_f32_32x32x16_bf8_bf8, "V16fWiWiV16fIiIiIi", "nc", "fp8-insts")
+TARGET_BUILTIN(__builtin_amdgcn_mfma_f32_32x32x16_bf8_fp8, "V16fWiWiV16fIiIiIi", "nc", "fp8-insts")
+TARGET_BUILTIN(__builtin_amdgcn_mfma_f32_32x32x16_fp8_bf8, "V16fWiWiV16fIiIiIi", "nc", "fp8-insts")
+TARGET_BUILTIN(__builtin_amdgcn_mfma_f32_32x32x16_fp8_fp8, "V16fWiWiV16fIiIiIi", "nc", "fp8-insts")
+TARGET_BUILTIN(__builtin_amdgcn_smfmac_f32_16x16x32_f16, "V4fV4hV8hV4fiIiIi", "nc", "mai-insts")
+TARGET_BUILTIN(__builtin_amdgcn_smfmac_f32_32x32x16_f16, "V16fV4hV8hV16fiIiIi", "nc", "mai-insts")
+TARGET_BUILTIN(__builtin_amdgcn_smfmac_f32_16x16x32_bf16, "V4fV4sV8sV4fiIiIi", "nc", "mai-insts")
+TARGET_BUILTIN(__builtin_amdgcn_smfmac_f32_32x32x16_bf16, "V16fV4sV8sV16fiIiIi", "nc", "mai-insts")
+TARGET_BUILTIN(__builtin_amdgcn_smfmac_i32_16x16x64_i8, "V4iV2iV4iV4iiIiIi", "nc", "mai-insts")
+TARGET_BUILTIN(__builtin_amdgcn_smfmac_i32_32x32x32_i8, "V16iV2iV4iV16iiIiIi", "nc", "mai-insts")
+TARGET_BUILTIN(__builtin_amdgcn_smfmac_f32_16x16x64_bf8_bf8, "V4fV2iV4iV4fiIiIi", "nc", "fp8-insts")
+TARGET_BUILTIN(__builtin_amdgcn_smfmac_f32_16x16x64_bf8_fp8, "V4fV2iV4iV4fiIiIi", "nc", "fp8-insts")
+TARGET_BUILTIN(__builtin_amdgcn_smfmac_f32_16x16x64_fp8_bf8, "V4fV2iV4iV4fiIiIi", "nc", "fp8-insts")
+TARGET_BUILTIN(__builtin_amdgcn_smfmac_f32_16x16x64_fp8_fp8, "V4fV2iV4iV4fiIiIi", "nc", "fp8-insts")
+TARGET_BUILTIN(__builtin_amdgcn_smfmac_f32_32x32x32_bf8_bf8, "V16fV2iV4iV16fiIiIi", "nc", "fp8-insts")
+TARGET_BUILTIN(__builtin_amdgcn_smfmac_f32_32x32x32_bf8_fp8, "V16fV2iV4iV16fiIiIi", "nc", "fp8-insts")
+TARGET_BUILTIN(__builtin_amdgcn_smfmac_f32_32x32x32_fp8_bf8, "V16fV2iV4iV16fiIiIi", "nc", "fp8-insts")
+TARGET_BUILTIN(__builtin_amdgcn_smfmac_f32_32x32x32_fp8_fp8, "V16fV2iV4iV16fiIiIi", "nc", "fp8-insts")
+
+TARGET_BUILTIN(__builtin_amdgcn_cvt_f32_bf8, "fiIi", "nc", "fp8-conversion-insts")
+TARGET_BUILTIN(__builtin_amdgcn_cvt_f32_fp8, "fiIi", "nc", "fp8-conversion-insts")
+TARGET_BUILTIN(__builtin_amdgcn_cvt_pk_f32_bf8, "V2fiIb", "nc", "fp8-conversion-insts")
+TARGET_BUILTIN(__builtin_amdgcn_cvt_pk_f32_fp8, "V2fiIb", "nc", "fp8-conversion-insts")
+TARGET_BUILTIN(__builtin_amdgcn_cvt_pk_bf8_f32, "iffiIb", "nc", "fp8-conversion-insts")
+TARGET_BUILTIN(__builtin_amdgcn_cvt_pk_fp8_f32, "iffiIb", "nc", "fp8-conversion-insts")
+TARGET_BUILTIN(__builtin_amdgcn_cvt_sr_bf8_f32, "ifiiIi", "nc", "fp8-conversion-insts")
+TARGET_BUILTIN(__builtin_amdgcn_cvt_sr_fp8_f32, "ifiiIi", "nc", "fp8-conversion-insts")
+
+//===----------------------------------------------------------------------===//
+// GFX12+ only builtins.
+//===----------------------------------------------------------------------===//
+
+TARGET_BUILTIN(__builtin_amdgcn_s_sleep_var, "vUi", "n", "gfx12-insts")
+TARGET_BUILTIN(__builtin_amdgcn_permlane16_var, "UiUiUiUiIbIb", "nc", "gfx12-insts")
+TARGET_BUILTIN(__builtin_amdgcn_permlanex16_var, "UiUiUiUiIbIb", "nc", "gfx12-insts")
+TARGET_BUILTIN(__builtin_amdgcn_s_barrier_signal, "vIi", "n", "gfx12-insts")
+TARGET_BUILTIN(__builtin_amdgcn_s_barrier_signal_var, "vi", "n", "gfx12-insts")
+TARGET_BUILTIN(__builtin_amdgcn_s_barrier_wait, "vIs", "n", "gfx12-insts")
+TARGET_BUILTIN(__builtin_amdgcn_s_barrier_signal_isfirst, "bIi", "n", "gfx12-insts")
+TARGET_BUILTIN(__builtin_amdgcn_s_barrier_signal_isfirst_var, "bi", "n", "gfx12-insts")
+TARGET_BUILTIN(__builtin_amdgcn_s_barrier_init, "vii", "n", "gfx12-insts")
+TARGET_BUILTIN(__builtin_amdgcn_s_barrier_join, "vi", "n", "gfx12-insts")
+TARGET_BUILTIN(__builtin_amdgcn_s_wakeup_barrier, "vi", "n", "gfx12-insts")
+TARGET_BUILTIN(__builtin_amdgcn_s_barrier_leave, "b", "n", "gfx12-insts")
+TARGET_BUILTIN(__builtin_amdgcn_s_get_barrier_state, "Uii", "n", "gfx12-insts")
+
+TARGET_BUILTIN(__builtin_amdgcn_global_load_tr_v2i32, "V2iV2i*1", "nc", "gfx12-insts,wavefrontsize32")
+TARGET_BUILTIN(__builtin_amdgcn_global_load_tr_v8i16, "V8sV8s*1", "nc", "gfx12-insts,wavefrontsize32")
+TARGET_BUILTIN(__builtin_amdgcn_global_load_tr_v8f16, "V8hV8h*1", "nc", "gfx12-insts,wavefrontsize32")
+
+TARGET_BUILTIN(__builtin_amdgcn_global_load_tr_i32, "ii*1", "nc", "gfx12-insts,wavefrontsize64")
+TARGET_BUILTIN(__builtin_amdgcn_global_load_tr_v4i16, "V4sV4s*1", "nc", "gfx12-insts,wavefrontsize64")
+TARGET_BUILTIN(__builtin_amdgcn_global_load_tr_v4f16, "V4hV4h*1", "nc", "gfx12-insts,wavefrontsize64")
+
+//===----------------------------------------------------------------------===//
+// WMMA builtins.
+// Postfix w32 indicates the builtin requires wavefront size of 32.
+// Postfix w64 indicates the builtin requires wavefront size of 64.
+//
+// Some of these are very similar to their GFX11 counterparts, but they don't
+// require replication of the A,B matrices, so they use fewer vector elements.
+// Therefore, we add an "_gfx12" suffix to distinguish them from the existing
+// builtins.
+//===----------------------------------------------------------------------===//
+TARGET_BUILTIN(__builtin_amdgcn_wmma_f32_16x16x16_f16_w32_gfx12, "V8fV8hV8hV8f", "nc", "gfx12-insts,wavefrontsize32")
+TARGET_BUILTIN(__builtin_amdgcn_wmma_f32_16x16x16_bf16_w32_gfx12, "V8fV8sV8sV8f", "nc", "gfx12-insts,wavefrontsize32")
+TARGET_BUILTIN(__builtin_amdgcn_wmma_f16_16x16x16_f16_w32_gfx12, "V8hV8hV8hV8h", "nc", "gfx12-insts,wavefrontsize32")
+TARGET_BUILTIN(__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w32_gfx12, "V8sV8sV8sV8s", "nc", "gfx12-insts,wavefrontsize32")
+TARGET_BUILTIN(__builtin_amdgcn_wmma_i32_16x16x16_iu8_w32_gfx12, "V8iIbV2iIbV2iV8iIb", "nc", "gfx12-insts,wavefrontsize32")
+TARGET_BUILTIN(__builtin_amdgcn_wmma_i32_16x16x16_iu4_w32_gfx12, "V8iIbiIbiV8iIb", "nc", "gfx12-insts,wavefrontsize32")
+// These are gfx12-only, but for consistency with the other WMMA variants we're
+// keeping the "_gfx12" suffix.
+TARGET_BUILTIN(__builtin_amdgcn_wmma_f32_16x16x16_fp8_fp8_w32_gfx12, "V8fV2iV2iV8f", "nc", "gfx12-insts,wavefrontsize32")
+TARGET_BUILTIN(__builtin_amdgcn_wmma_f32_16x16x16_fp8_bf8_w32_gfx12, "V8fV2iV2iV8f", "nc", "gfx12-insts,wavefrontsize32")
+TARGET_BUILTIN(__builtin_amdgcn_wmma_f32_16x16x16_bf8_fp8_w32_gfx12, "V8fV2iV2iV8f", "nc", "gfx12-insts,wavefrontsize32")
+TARGET_BUILTIN(__builtin_amdgcn_wmma_f32_16x16x16_bf8_bf8_w32_gfx12, "V8fV2iV2iV8f", "nc", "gfx12-insts,wavefrontsize32")
+TARGET_BUILTIN(__builtin_amdgcn_wmma_i32_16x16x32_iu4_w32_gfx12, "V8iIbV2iIbV2iV8iIb", "nc", "gfx12-insts,wavefrontsize32")
+
+TARGET_BUILTIN(__builtin_amdgcn_wmma_f32_16x16x16_f16_w64_gfx12, "V4fV4hV4hV4f", "nc", "gfx12-insts,wavefrontsize64")
+TARGET_BUILTIN(__builtin_amdgcn_wmma_f32_16x16x16_bf16_w64_gfx12, "V4fV4sV4sV4f", "nc", "gfx12-insts,wavefrontsize64")
+TARGET_BUILTIN(__builtin_amdgcn_wmma_f16_16x16x16_f16_w64_gfx12, "V4hV4hV4hV4h", "nc", "gfx12-insts,wavefrontsize64")
+TARGET_BUILTIN(__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w64_gfx12, "V4sV4sV4sV4s", "nc", "gfx12-insts,wavefrontsize64")
+TARGET_BUILTIN(__builtin_amdgcn_wmma_i32_16x16x16_iu8_w64_gfx12, "V4iIbiIbiV4iIb", "nc", "gfx12-insts,wavefrontsize64")
+TARGET_BUILTIN(__builtin_amdgcn_wmma_i32_16x16x16_iu4_w64_gfx12, "V4iIbiIbiV4iIb", "nc", "gfx12-insts,wavefrontsize64")
+// These are gfx12-only, but for consistency with the other WMMA variants we're
+// keeping the "_gfx12" suffix.
+TARGET_BUILTIN(__builtin_amdgcn_wmma_f32_16x16x16_fp8_fp8_w64_gfx12, "V4fiiV4f", "nc", "gfx12-insts,wavefrontsize64")
+TARGET_BUILTIN(__builtin_amdgcn_wmma_f32_16x16x16_fp8_bf8_w64_gfx12, "V4fiiV4f", "nc", "gfx12-insts,wavefrontsize64")
+TARGET_BUILTIN(__builtin_amdgcn_wmma_f32_16x16x16_bf8_fp8_w64_gfx12, "V4fiiV4f", "nc", "gfx12-insts,wavefrontsize64")
+TARGET_BUILTIN(__builtin_amdgcn_wmma_f32_16x16x16_bf8_bf8_w64_gfx12, "V4fiiV4f", "nc", "gfx12-insts,wavefrontsize64")
+TARGET_BUILTIN(__builtin_amdgcn_wmma_i32_16x16x32_iu4_w64_gfx12, "V4iIbiIbiV4iIb", "nc", "gfx12-insts,wavefrontsize64")
+
+TARGET_BUILTIN(__builtin_amdgcn_swmmac_f32_16x16x32_f16_w32, "V8fV8hV16hV8fs", "nc", "gfx12-insts,wavefrontsize32")
+TARGET_BUILTIN(__builtin_amdgcn_swmmac_f32_16x16x32_bf16_w32, "V8fV8sV16sV8fs", "nc", "gfx12-insts,wavefrontsize32")
+TARGET_BUILTIN(__builtin_amdgcn_swmmac_f16_16x16x32_f16_w32, "V8hV8hV16hV8hs", "nc", "gfx12-insts,wavefrontsize32")
+TARGET_BUILTIN(__builtin_amdgcn_swmmac_bf16_16x16x32_bf16_w32, "V8sV8sV16sV8ss", "nc", "gfx12-insts,wavefrontsize32")
+TARGET_BUILTIN(__builtin_amdgcn_swmmac_i32_16x16x32_iu8_w32, "V8iIbV2iIbV4iV8isIb", "nc", "gfx12-insts,wavefrontsize32")
+TARGET_BUILTIN(__builtin_amdgcn_swmmac_i32_16x16x32_iu4_w32, "V8iIbiIbV2iV8isIb", "nc", "gfx12-insts,wavefrontsize32")
+TARGET_BUILTIN(__builtin_amdgcn_swmmac_i32_16x16x64_iu4_w32, "V8iIbV2iIbV4iV8isIb", "nc", "gfx12-insts,wavefrontsize32")
+TARGET_BUILTIN(__builtin_amdgcn_swmmac_f32_16x16x32_fp8_fp8_w32, "V8fV2iV4iV8fs", "nc", "gfx12-insts,wavefrontsize32")
+TARGET_BUILTIN(__builtin_amdgcn_swmmac_f32_16x16x32_fp8_bf8_w32, "V8fV2iV4iV8fs", "nc", "gfx12-insts,wavefrontsize32")
+TARGET_BUILTIN(__builtin_amdgcn_swmmac_f32_16x16x32_bf8_fp8_w32, "V8fV2iV4iV8fs", "nc", "gfx12-insts,wavefrontsize32")
+TARGET_BUILTIN(__builtin_amdgcn_swmmac_f32_16x16x32_bf8_bf8_w32, "V8fV2iV4iV8fs", "nc", "gfx12-insts,wavefrontsize32")
+
+TARGET_BUILTIN(__builtin_amdgcn_swmmac_f32_16x16x32_f16_w64, "V4fV4hV8hV4fs", "nc", "gfx12-insts,wavefrontsize64")
+TARGET_BUILTIN(__builtin_amdgcn_swmmac_f32_16x16x32_bf16_w64, "V4fV4sV8sV4fs", "nc", "gfx12-insts,wavefrontsize64")
+TARGET_BUILTIN(__builtin_amdgcn_swmmac_f16_16x16x32_f16_w64, "V4hV4hV8hV4hs", "nc", "gfx12-insts,wavefrontsize64")
+TARGET_BUILTIN(__builtin_amdgcn_swmmac_bf16_16x16x32_bf16_w64, "V4sV4sV8sV4ss", "nc", "gfx12-insts,wavefrontsize64")
+TARGET_BUILTIN(__builtin_amdgcn_swmmac_i32_16x16x32_iu8_w64, "V4iIbiIbV2iV4isIb", "nc", "gfx12-insts,wavefrontsize64")
+TARGET_BUILTIN(__builtin_amdgcn_swmmac_i32_16x16x32_iu4_w64, "V4iIbiIbiV4isIb", "nc", "gfx12-insts,wavefrontsize64")
+TARGET_BUILTIN(__builtin_amdgcn_swmmac_i32_16x16x64_iu4_w64, "V4iIbiIbV2iV4isIb", "nc", "gfx12-insts,wavefrontsize64")
+TARGET_BUILTIN(__builtin_amdgcn_swmmac_f32_16x16x32_fp8_fp8_w64, "V4fiV2iV4fs", "nc", "gfx12-insts,wavefrontsize64")
+TARGET_BUILTIN(__builtin_amdgcn_swmmac_f32_16x16x32_fp8_bf8_w64, "V4fiV2iV4fs", "nc", "gfx12-insts,wavefrontsize64")
+TARGET_BUILTIN(__builtin_amdgcn_swmmac_f32_16x16x32_bf8_fp8_w64, "V4fiV2iV4fs", "nc", "gfx12-insts,wavefrontsize64")
+TARGET_BUILTIN(__builtin_amdgcn_swmmac_f32_16x16x32_bf8_bf8_w64, "V4fiV2iV4fs", "nc", "gfx12-insts,wavefrontsize64")
+
#undef BUILTIN
#undef TARGET_BUILTIN
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsARM.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsARM.def
index be20c24aa28a..9ee918cb2147 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsARM.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsARM.def
@@ -17,6 +17,10 @@
# define LANGBUILTIN(ID, TYPE, ATTRS, BUILTIN_LANG) BUILTIN(ID, TYPE, ATTRS)
#endif
+#if defined(BUILTIN) && !defined(TARGET_BUILTIN)
+# define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) BUILTIN(ID, TYPE, ATTRS)
+#endif
+
#if defined(BUILTIN) && !defined(TARGET_HEADER_BUILTIN)
# define TARGET_HEADER_BUILTIN(ID, TYPE, ATTRS, HEADER, LANG, FEATURE) BUILTIN(ID, TYPE, ATTRS)
#endif
@@ -115,6 +119,8 @@ BUILTIN(__builtin_arm_smusdx, "iii", "nc")
// Bit manipulation
BUILTIN(__builtin_arm_rbit, "UiUi", "nc")
+BUILTIN(__builtin_arm_clz, "UiZUi", "nc")
+BUILTIN(__builtin_arm_clz64, "UiWUi", "nc")
BUILTIN(__builtin_arm_cls, "UiZUi", "nc")
BUILTIN(__builtin_arm_cls64, "UiWUi", "nc")
@@ -157,14 +163,14 @@ BUILTIN(__builtin_arm_mrrc, "LLUiUIiUIiUIi", "")
BUILTIN(__builtin_arm_mrrc2, "LLUiUIiUIiUIi", "")
// CRC32
-BUILTIN(__builtin_arm_crc32b, "UiUiUc", "nc")
-BUILTIN(__builtin_arm_crc32cb, "UiUiUc", "nc")
-BUILTIN(__builtin_arm_crc32h, "UiUiUs", "nc")
-BUILTIN(__builtin_arm_crc32ch, "UiUiUs", "nc")
-BUILTIN(__builtin_arm_crc32w, "UiUiUi", "nc")
-BUILTIN(__builtin_arm_crc32cw, "UiUiUi", "nc")
-BUILTIN(__builtin_arm_crc32d, "UiUiLLUi", "nc")
-BUILTIN(__builtin_arm_crc32cd, "UiUiLLUi", "nc")
+TARGET_BUILTIN(__builtin_arm_crc32b, "UiUiUc", "nc", "crc")
+TARGET_BUILTIN(__builtin_arm_crc32cb, "UiUiUc", "nc", "crc")
+TARGET_BUILTIN(__builtin_arm_crc32h, "UiUiUs", "nc", "crc")
+TARGET_BUILTIN(__builtin_arm_crc32ch, "UiUiUs", "nc", "crc")
+TARGET_BUILTIN(__builtin_arm_crc32w, "UiUiUi", "nc", "crc")
+TARGET_BUILTIN(__builtin_arm_crc32cw, "UiUiUi", "nc", "crc")
+TARGET_BUILTIN(__builtin_arm_crc32d, "UiUiLLUi", "nc", "crc")
+TARGET_BUILTIN(__builtin_arm_crc32cd, "UiUiLLUi", "nc", "crc")
// ARMv8-M Security Extensions a.k.a CMSE
BUILTIN(__builtin_arm_cmse_TT, "Uiv*", "n")
@@ -197,6 +203,9 @@ BUILTIN(__builtin_arm_wsr, "vcC*Ui", "nc")
BUILTIN(__builtin_arm_wsr64, "vcC*LLUi", "nc")
BUILTIN(__builtin_arm_wsrp, "vcC*vC*", "nc")
+// Misc
+BUILTIN(__builtin_sponentry, "v*", "c")
+
// Builtins for implementing ACLE MVE intrinsics. (Unlike NEON, these
// don't need to live in a separate BuiltinsMVE.def, because they
// aren't included from both here and BuiltinsAArch64.def.)
@@ -222,118 +231,119 @@ LANGBUILTIN(_MoveFromCoprocessor2, "UiIUiIUiIUiIUiIUi", "", ALL_MS_LANGUAGES)
LANGBUILTIN(_MoveToCoprocessor, "vUiIUiIUiIUiIUiIUi", "", ALL_MS_LANGUAGES)
LANGBUILTIN(_MoveToCoprocessor2, "vUiIUiIUiIUiIUiIUi", "", ALL_MS_LANGUAGES)
-TARGET_HEADER_BUILTIN(_BitScanForward, "UcUNi*UNi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_BitScanReverse, "UcUNi*UNi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_BitScanForward64, "UcUNi*ULLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_BitScanReverse64, "UcUNi*ULLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-
-TARGET_HEADER_BUILTIN(_InterlockedAnd64, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedDecrement64, "LLiLLiD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchange64, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd64, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchangeSub64, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedIncrement64, "LLiLLiD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedOr64, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedXor64, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-
-TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd_acq, "NiNiD*Ni", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd_rel, "NiNiD*Ni", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd_nf, "NiNiD*Ni", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd8_acq, "ccD*c", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd8_rel, "ccD*c", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd8_nf, "ccD*c", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd16_acq, "ssD*s", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd16_rel, "ssD*s", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd16_nf, "ssD*s", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd64_acq, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd64_rel, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd64_nf, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-
-TARGET_HEADER_BUILTIN(_InterlockedExchange8_acq, "ccD*c", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchange8_nf, "ccD*c", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchange8_rel, "ccD*c", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchange16_acq, "ssD*s", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchange16_nf, "ssD*s", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchange16_rel, "ssD*s", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchange_acq, "NiNiD*Ni", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchange_nf, "NiNiD*Ni", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchange_rel, "NiNiD*Ni", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchange64_acq, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchange64_nf, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchange64_rel, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-
-TARGET_HEADER_BUILTIN(_InterlockedCompareExchange8_acq, "ccD*cc", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedCompareExchange8_nf, "ccD*cc", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedCompareExchange8_rel, "ccD*cc", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedCompareExchange16_acq, "ssD*ss", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedCompareExchange16_nf, "ssD*ss", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedCompareExchange16_rel, "ssD*ss", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedCompareExchange_acq, "NiNiD*NiNi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedCompareExchange_nf, "NiNiD*NiNi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedCompareExchange_rel, "NiNiD*NiNi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedCompareExchange64_acq, "LLiLLiD*LLiLLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedCompareExchange64_nf, "LLiLLiD*LLiLLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedCompareExchange64_rel, "LLiLLiD*LLiLLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-
-TARGET_HEADER_BUILTIN(_InterlockedOr8_acq, "ccD*c", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedOr8_nf, "ccD*c", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedOr8_rel, "ccD*c", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedOr16_acq, "ssD*s", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedOr16_nf, "ssD*s", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedOr16_rel, "ssD*s", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedOr_acq, "NiNiD*Ni", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedOr_nf, "NiNiD*Ni", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedOr_rel, "NiNiD*Ni", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedOr64_acq, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedOr64_nf, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedOr64_rel, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-
-TARGET_HEADER_BUILTIN(_InterlockedXor8_acq, "ccD*c", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedXor8_nf, "ccD*c", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedXor8_rel, "ccD*c", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedXor16_acq, "ssD*s", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedXor16_nf, "ssD*s", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedXor16_rel, "ssD*s", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedXor_acq, "NiNiD*Ni", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedXor_nf, "NiNiD*Ni", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedXor_rel, "NiNiD*Ni", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedXor64_acq, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedXor64_nf, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedXor64_rel, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-
-TARGET_HEADER_BUILTIN(_InterlockedAnd8_acq, "ccD*c", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedAnd8_nf, "ccD*c", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedAnd8_rel, "ccD*c", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedAnd16_acq, "ssD*s", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedAnd16_nf, "ssD*s", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedAnd16_rel, "ssD*s", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedAnd_acq, "NiNiD*Ni", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedAnd_nf, "NiNiD*Ni", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedAnd_rel, "NiNiD*Ni", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedAnd64_acq, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedAnd64_nf, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedAnd64_rel, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-
-TARGET_HEADER_BUILTIN(_InterlockedIncrement16_acq, "ssD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedIncrement16_nf, "ssD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedIncrement16_rel, "ssD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedIncrement_acq, "NiNiD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedIncrement_nf, "NiNiD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedIncrement_rel, "NiNiD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedIncrement64_acq, "LLiLLiD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedIncrement64_nf, "LLiLLiD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedIncrement64_rel, "LLiLLiD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-
-TARGET_HEADER_BUILTIN(_InterlockedDecrement16_acq, "ssD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedDecrement16_nf, "ssD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedDecrement16_rel, "ssD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedDecrement_acq, "NiNiD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedDecrement_nf, "NiNiD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedDecrement_rel, "NiNiD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedDecrement64_acq, "LLiLLiD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedDecrement64_nf, "LLiLLiD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedDecrement64_rel, "LLiLLiD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_BitScanForward, "UcUNi*UNi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_BitScanReverse, "UcUNi*UNi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_BitScanForward64, "UcUNi*ULLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_BitScanReverse64, "UcUNi*ULLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+
+TARGET_HEADER_BUILTIN(_InterlockedAnd64, "LLiLLiD*LLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedDecrement64, "LLiLLiD*", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchange64, "LLiLLiD*LLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd64, "LLiLLiD*LLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchangeSub64, "LLiLLiD*LLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedIncrement64, "LLiLLiD*", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedOr64, "LLiLLiD*LLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedXor64, "LLiLLiD*LLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+
+TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd_acq, "NiNiD*Ni", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd_rel, "NiNiD*Ni", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd_nf, "NiNiD*Ni", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd8_acq, "ccD*c", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd8_rel, "ccD*c", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd8_nf, "ccD*c", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd16_acq, "ssD*s", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd16_rel, "ssD*s", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd16_nf, "ssD*s", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd64_acq, "LLiLLiD*LLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd64_rel, "LLiLLiD*LLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd64_nf, "LLiLLiD*LLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+
+TARGET_HEADER_BUILTIN(_InterlockedExchange8_acq, "ccD*c", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchange8_nf, "ccD*c", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchange8_rel, "ccD*c", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchange16_acq, "ssD*s", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchange16_nf, "ssD*s", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchange16_rel, "ssD*s", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchange_acq, "NiNiD*Ni", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchange_nf, "NiNiD*Ni", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchange_rel, "NiNiD*Ni", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchange64_acq, "LLiLLiD*LLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchange64_nf, "LLiLLiD*LLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchange64_rel, "LLiLLiD*LLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange8_acq, "ccD*cc", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange8_nf, "ccD*cc", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange8_rel, "ccD*cc", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange16_acq, "ssD*ss", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange16_nf, "ssD*ss", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange16_rel, "ssD*ss", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange_acq, "NiNiD*NiNi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange_nf, "NiNiD*NiNi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange_rel, "NiNiD*NiNi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange64_acq, "LLiLLiD*LLiLLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange64_nf, "LLiLLiD*LLiLLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange64_rel, "LLiLLiD*LLiLLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+
+TARGET_HEADER_BUILTIN(_InterlockedOr8_acq, "ccD*c", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedOr8_nf, "ccD*c", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedOr8_rel, "ccD*c", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedOr16_acq, "ssD*s", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedOr16_nf, "ssD*s", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedOr16_rel, "ssD*s", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedOr_acq, "NiNiD*Ni", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedOr_nf, "NiNiD*Ni", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedOr_rel, "NiNiD*Ni", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedOr64_acq, "LLiLLiD*LLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedOr64_nf, "LLiLLiD*LLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedOr64_rel, "LLiLLiD*LLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+
+TARGET_HEADER_BUILTIN(_InterlockedXor8_acq, "ccD*c", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedXor8_nf, "ccD*c", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedXor8_rel, "ccD*c", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedXor16_acq, "ssD*s", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedXor16_nf, "ssD*s", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedXor16_rel, "ssD*s", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedXor_acq, "NiNiD*Ni", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedXor_nf, "NiNiD*Ni", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedXor_rel, "NiNiD*Ni", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedXor64_acq, "LLiLLiD*LLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedXor64_nf, "LLiLLiD*LLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedXor64_rel, "LLiLLiD*LLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+
+TARGET_HEADER_BUILTIN(_InterlockedAnd8_acq, "ccD*c", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedAnd8_nf, "ccD*c", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedAnd8_rel, "ccD*c", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedAnd16_acq, "ssD*s", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedAnd16_nf, "ssD*s", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedAnd16_rel, "ssD*s", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedAnd_acq, "NiNiD*Ni", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedAnd_nf, "NiNiD*Ni", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedAnd_rel, "NiNiD*Ni", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedAnd64_acq, "LLiLLiD*LLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedAnd64_nf, "LLiLLiD*LLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedAnd64_rel, "LLiLLiD*LLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+
+TARGET_HEADER_BUILTIN(_InterlockedIncrement16_acq, "ssD*", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedIncrement16_nf, "ssD*", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedIncrement16_rel, "ssD*", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedIncrement_acq, "NiNiD*", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedIncrement_nf, "NiNiD*", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedIncrement_rel, "NiNiD*", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedIncrement64_acq, "LLiLLiD*", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedIncrement64_nf, "LLiLLiD*", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedIncrement64_rel, "LLiLLiD*", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+
+TARGET_HEADER_BUILTIN(_InterlockedDecrement16_acq, "ssD*", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedDecrement16_nf, "ssD*", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedDecrement16_rel, "ssD*", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedDecrement_acq, "NiNiD*", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedDecrement_nf, "NiNiD*", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedDecrement_rel, "NiNiD*", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedDecrement64_acq, "LLiLLiD*", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedDecrement64_nf, "LLiLLiD*", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedDecrement64_rel, "LLiLLiD*", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
#undef BUILTIN
#undef LANGBUILTIN
+#undef TARGET_BUILTIN
#undef TARGET_HEADER_BUILTIN
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsHexagon.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsHexagon.def
index 0001bd556117..0dc0f4567dd4 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsHexagon.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsHexagon.def
@@ -17,8 +17,14 @@
# define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) BUILTIN(ID, TYPE, ATTRS)
#endif
+#pragma push_macro("V73")
+#define V73 "v73"
+#pragma push_macro("V71")
+#define V71 "v71|" V73
+#pragma push_macro("V69")
+#define V69 "v69|" V71
#pragma push_macro("V68")
-#define V68 "v68"
+#define V68 "v68|" V69
#pragma push_macro("V67")
#define V67 "v67|" V68
#pragma push_macro("V66")
@@ -34,8 +40,14 @@
#pragma push_macro("V5")
#define V5 "v5|" V55
+#pragma push_macro("HVXV73")
+#define HVXV73 "hvxv73"
+#pragma push_macro("HVXV71")
+#define HVXV71 "hvxv71|" HVXV73
+#pragma push_macro("HVXV69")
+#define HVXV69 "hvxv69|" HVXV71
#pragma push_macro("HVXV68")
-#define HVXV68 "hvxv68"
+#define HVXV68 "hvxv68|" HVXV69
#pragma push_macro("HVXV67")
#define HVXV67 "hvxv67|" HVXV68
#pragma push_macro("HVXV66")
@@ -128,6 +140,9 @@ TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpyub_rtt_acc_128B,"V64iV64iV32iLLi","", "
#pragma pop_macro("HVXV66")
#pragma pop_macro("HVXV67")
#pragma pop_macro("HVXV68")
+#pragma pop_macro("HVXV69")
+#pragma pop_macro("HVXV71")
+#pragma pop_macro("HVXV73")
#pragma pop_macro("V5")
#pragma pop_macro("V55")
@@ -137,6 +152,9 @@ TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpyub_rtt_acc_128B,"V64iV64iV32iLLi","", "
#pragma pop_macro("V66")
#pragma pop_macro("V67")
#pragma pop_macro("V68")
+#pragma pop_macro("V69")
+#pragma pop_macro("V71")
+#pragma pop_macro("V73")
#undef BUILTIN
#undef TARGET_BUILTIN
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsHexagonDep.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsHexagonDep.def
index 152c9c4dd8ad..6f1ae69037e3 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsHexagonDep.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsHexagonDep.def
@@ -1720,6 +1720,8 @@ TARGET_BUILTIN(__builtin_HEXAGON_V6_vscattermwq_128B, "vV128biiV32iV32i", "", HV
// V66 HVX Instructions.
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddcarryo, "V16iV16iV16iv*", "", HVXV66)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddcarryo_128B, "V32iV32iV32iv*", "", HVXV66)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddcarrysat, "V16iV16iV16iV64b", "", HVXV66)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddcarrysat_128B, "V32iV32iV32iV128b", "", HVXV66)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vasr_into, "V32iV32iV16iV16i", "", HVXV66)
@@ -1728,6 +1730,8 @@ TARGET_BUILTIN(__builtin_HEXAGON_V6_vrotr, "V16iV16iV16i", "", HVXV66)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vrotr_128B, "V32iV32iV32i", "", HVXV66)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vsatdw, "V16iV16iV16i", "", HVXV66)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vsatdw_128B, "V32iV32iV32i", "", HVXV66)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubcarryo, "V16iV16iV16iv*", "", HVXV66)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubcarryo_128B, "V32iV32iV32iv*", "", HVXV66)
// V68 HVX Instructions.
@@ -1739,3 +1743,183 @@ TARGET_BUILTIN(__builtin_HEXAGON_V6_v6mpyvubs10, "V32iV32iV32iUIi", "", HVXV68)
TARGET_BUILTIN(__builtin_HEXAGON_V6_v6mpyvubs10_128B, "V64iV64iV64iUIi", "", HVXV68)
TARGET_BUILTIN(__builtin_HEXAGON_V6_v6mpyvubs10_vxx, "V32iV32iV32iV32iUIi", "", HVXV68)
TARGET_BUILTIN(__builtin_HEXAGON_V6_v6mpyvubs10_vxx_128B, "V64iV64iV64iV64iUIi", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vabs_hf, "V16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vabs_hf_128B, "V32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vabs_sf, "V16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vabs_sf_128B, "V32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vadd_hf, "V16iV16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vadd_hf_128B, "V32iV32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vadd_hf_hf, "V16iV16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vadd_hf_hf_128B, "V32iV32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vadd_qf16, "V16iV16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vadd_qf16_128B, "V32iV32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vadd_qf16_mix, "V16iV16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vadd_qf16_mix_128B, "V32iV32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vadd_qf32, "V16iV16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vadd_qf32_128B, "V32iV32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vadd_qf32_mix, "V16iV16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vadd_qf32_mix_128B, "V32iV32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vadd_sf, "V16iV16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vadd_sf_128B, "V32iV32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vadd_sf_hf, "V32iV16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vadd_sf_hf_128B, "V64iV32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vadd_sf_sf, "V16iV16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vadd_sf_sf_128B, "V32iV32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vassign_fp, "V16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vassign_fp_128B, "V32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vconv_hf_qf16, "V16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vconv_hf_qf16_128B, "V32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vconv_hf_qf32, "V16iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vconv_hf_qf32_128B, "V32iV64i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vconv_sf_qf32, "V16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vconv_sf_qf32_128B, "V32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vcvt_b_hf, "V16iV16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vcvt_b_hf_128B, "V32iV32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vcvt_h_hf, "V16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vcvt_h_hf_128B, "V32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vcvt_hf_b, "V32iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vcvt_hf_b_128B, "V64iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vcvt_hf_h, "V16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vcvt_hf_h_128B, "V32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vcvt_hf_sf, "V16iV16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vcvt_hf_sf_128B, "V32iV32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vcvt_hf_ub, "V32iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vcvt_hf_ub_128B, "V64iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vcvt_hf_uh, "V16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vcvt_hf_uh_128B, "V32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vcvt_sf_hf, "V32iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vcvt_sf_hf_128B, "V64iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vcvt_ub_hf, "V16iV16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vcvt_ub_hf_128B, "V32iV32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vcvt_uh_hf, "V16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vcvt_uh_hf_128B, "V32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpy_sf_hf, "V16iV16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpy_sf_hf_128B, "V32iV32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpy_sf_hf_acc, "V16iV16iV16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpy_sf_hf_acc_128B, "V32iV32iV32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vfmax_hf, "V16iV16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vfmax_hf_128B, "V32iV32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vfmax_sf, "V16iV16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vfmax_sf_128B, "V32iV32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vfmin_hf, "V16iV16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vfmin_hf_128B, "V32iV32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vfmin_sf, "V16iV16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vfmin_sf_128B, "V32iV32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vfneg_hf, "V16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vfneg_hf_128B, "V32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vfneg_sf, "V16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vfneg_sf_128B, "V32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgthf, "V64bV16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgthf_128B, "V128bV32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgthf_and, "V64bV64bV16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgthf_and_128B, "V128bV128bV32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgthf_or, "V64bV64bV16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgthf_or_128B, "V128bV128bV32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgthf_xor, "V64bV64bV16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgthf_xor_128B, "V128bV128bV32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtsf, "V64bV16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtsf_128B, "V128bV32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtsf_and, "V64bV64bV16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtsf_and_128B, "V128bV128bV32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtsf_or, "V64bV64bV16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtsf_or_128B, "V128bV128bV32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtsf_xor, "V64bV64bV16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtsf_xor_128B, "V128bV128bV32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmax_hf, "V16iV16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmax_hf_128B, "V32iV32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmax_sf, "V16iV16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmax_sf_128B, "V32iV32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmin_hf, "V16iV16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmin_hf_128B, "V32iV32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmin_sf, "V16iV16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmin_sf_128B, "V32iV32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpy_hf_hf, "V16iV16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpy_hf_hf_128B, "V32iV32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpy_hf_hf_acc, "V16iV16iV16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpy_hf_hf_acc_128B, "V32iV32iV32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpy_qf16, "V16iV16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpy_qf16_128B, "V32iV32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpy_qf16_hf, "V16iV16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpy_qf16_hf_128B, "V32iV32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpy_qf16_mix_hf, "V16iV16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpy_qf16_mix_hf_128B, "V32iV32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpy_qf32, "V16iV16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpy_qf32_128B, "V32iV32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpy_qf32_hf, "V32iV16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpy_qf32_hf_128B, "V64iV32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpy_qf32_mix_hf, "V32iV16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpy_qf32_mix_hf_128B, "V64iV32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpy_qf32_qf16, "V32iV16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpy_qf32_qf16_128B, "V64iV32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpy_qf32_sf, "V16iV16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpy_qf32_sf_128B, "V32iV32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpy_sf_hf, "V32iV16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpy_sf_hf_128B, "V64iV32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpy_sf_hf_acc, "V32iV32iV16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpy_sf_hf_acc_128B, "V64iV64iV32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpy_sf_sf, "V16iV16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpy_sf_sf_128B, "V32iV32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsub_hf, "V16iV16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsub_hf_128B, "V32iV32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsub_hf_hf, "V16iV16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsub_hf_hf_128B, "V32iV32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsub_qf16, "V16iV16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsub_qf16_128B, "V32iV32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsub_qf16_mix, "V16iV16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsub_qf16_mix_128B, "V32iV32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsub_qf32, "V16iV16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsub_qf32_128B, "V32iV32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsub_qf32_mix, "V16iV16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsub_qf32_mix_128B, "V32iV32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsub_sf, "V16iV16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsub_sf_128B, "V32iV32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsub_sf_hf, "V32iV16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsub_sf_hf_128B, "V64iV32iV32i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsub_sf_sf, "V16iV16iV16i", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsub_sf_sf_128B, "V32iV32iV32i", "", HVXV68)
+
+// V69 HVX Instructions.
+
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrvuhubrndsat, "V16iV32iV16i", "", HVXV69)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrvuhubrndsat_128B, "V32iV64iV32i", "", HVXV69)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrvuhubsat, "V16iV32iV16i", "", HVXV69)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrvuhubsat_128B, "V32iV64iV32i", "", HVXV69)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrvwuhrndsat, "V16iV32iV16i", "", HVXV69)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrvwuhrndsat_128B, "V32iV64iV32i", "", HVXV69)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrvwuhsat, "V16iV32iV16i", "", HVXV69)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrvwuhsat_128B, "V32iV64iV32i", "", HVXV69)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyuhvs, "V16iV16iV16i", "", HVXV69)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyuhvs_128B, "V32iV32iV32i", "", HVXV69)
+
+// V73 HVX Instructions.
+
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vadd_sf_bf, "V32iV16iV16i", "", HVXV73)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vadd_sf_bf_128B, "V64iV32iV32i", "", HVXV73)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vconv_h_hf, "V16iV16i", "", HVXV73)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vconv_h_hf_128B, "V32iV32i", "", HVXV73)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vconv_hf_h, "V16iV16i", "", HVXV73)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vconv_hf_h_128B, "V32iV32i", "", HVXV73)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vconv_sf_w, "V16iV16i", "", HVXV73)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vconv_sf_w_128B, "V32iV32i", "", HVXV73)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vconv_w_sf, "V16iV16i", "", HVXV73)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vconv_w_sf_128B, "V32iV32i", "", HVXV73)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vcvt_bf_sf, "V16iV16iV16i", "", HVXV73)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vcvt_bf_sf_128B, "V32iV32iV32i", "", HVXV73)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtbf, "V64bV16iV16i", "", HVXV73)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtbf_128B, "V128bV32iV32i", "", HVXV73)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtbf_and, "V64bV64bV16iV16i", "", HVXV73)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtbf_and_128B, "V128bV128bV32iV32i", "", HVXV73)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtbf_or, "V64bV64bV16iV16i", "", HVXV73)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtbf_or_128B, "V128bV128bV32iV32i", "", HVXV73)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtbf_xor, "V64bV64bV16iV16i", "", HVXV73)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtbf_xor_128B, "V128bV128bV32iV32i", "", HVXV73)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmax_bf, "V16iV16iV16i", "", HVXV73)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmax_bf_128B, "V32iV32iV32i", "", HVXV73)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmin_bf, "V16iV16iV16i", "", HVXV73)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmin_bf_128B, "V32iV32iV32i", "", HVXV73)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpy_sf_bf, "V32iV16iV16i", "", HVXV73)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpy_sf_bf_128B, "V64iV32iV32i", "", HVXV73)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpy_sf_bf_acc, "V32iV32iV16iV16i", "", HVXV73)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpy_sf_bf_acc_128B, "V64iV64iV32iV32i", "", HVXV73)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsub_sf_bf, "V32iV16iV16i", "", HVXV73)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsub_sf_bf_128B, "V64iV32iV32i", "", HVXV73)
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsHexagonMapCustomDep.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsHexagonMapCustomDep.def
index 93f560fc5adc..9390d54e0847 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsHexagonMapCustomDep.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsHexagonMapCustomDep.def
@@ -8,199 +8,11 @@
// Automatically generated file, do not edit!
//===----------------------------------------------------------------------===//
-CUSTOM_BUILTIN_MAPPING(A2_add, 0)
-CUSTOM_BUILTIN_MAPPING(A2_addi, 0)
-CUSTOM_BUILTIN_MAPPING(A2_addp, 0)
-CUSTOM_BUILTIN_MAPPING(A2_and, 0)
-CUSTOM_BUILTIN_MAPPING(A2_andir, 0)
-CUSTOM_BUILTIN_MAPPING(A2_neg, 0)
-CUSTOM_BUILTIN_MAPPING(A2_not, 0)
-CUSTOM_BUILTIN_MAPPING(A2_or, 0)
-CUSTOM_BUILTIN_MAPPING(A2_orir, 0)
-CUSTOM_BUILTIN_MAPPING(A2_sub, 0)
-CUSTOM_BUILTIN_MAPPING(A2_subp, 0)
-CUSTOM_BUILTIN_MAPPING(A2_subri, 0)
-CUSTOM_BUILTIN_MAPPING(A2_sxtb, 0)
-CUSTOM_BUILTIN_MAPPING(A2_sxth, 0)
-CUSTOM_BUILTIN_MAPPING(A2_xor, 0)
-CUSTOM_BUILTIN_MAPPING(A2_zxtb, 0)
-CUSTOM_BUILTIN_MAPPING(A2_zxth, 0)
-CUSTOM_BUILTIN_MAPPING(M2_dpmpyss_s0, 0)
-CUSTOM_BUILTIN_MAPPING(M2_dpmpyuu_s0, 0)
-CUSTOM_BUILTIN_MAPPING(M2_mpyi, 0)
-CUSTOM_BUILTIN_MAPPING(M2_mpysmi, 0)
-CUSTOM_BUILTIN_MAPPING(M2_mpyui, 0)
-CUSTOM_BUILTIN_MAPPING(S2_asl_i_p, 0)
-CUSTOM_BUILTIN_MAPPING(S2_asl_i_r, 0)
-CUSTOM_BUILTIN_MAPPING(S2_asr_i_p, 0)
-CUSTOM_BUILTIN_MAPPING(S2_asr_i_r, 0)
-CUSTOM_BUILTIN_MAPPING(S2_lsr_i_p, 0)
-CUSTOM_BUILTIN_MAPPING(S2_lsr_i_r, 0)
-CUSTOM_BUILTIN_MAPPING(V6_pred_and, 64)
-CUSTOM_BUILTIN_MAPPING(V6_pred_and_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_pred_and_n, 64)
-CUSTOM_BUILTIN_MAPPING(V6_pred_and_n_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_pred_not, 64)
-CUSTOM_BUILTIN_MAPPING(V6_pred_not_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_pred_or, 64)
-CUSTOM_BUILTIN_MAPPING(V6_pred_or_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_pred_or_n, 64)
-CUSTOM_BUILTIN_MAPPING(V6_pred_or_n_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_pred_scalar2, 64)
-CUSTOM_BUILTIN_MAPPING(V6_pred_scalar2_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_pred_xor, 64)
-CUSTOM_BUILTIN_MAPPING(V6_pred_xor_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vS32b_nqpred_ai, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vS32b_nqpred_ai_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vS32b_nt_nqpred_ai, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vS32b_nt_nqpred_ai_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vS32b_nt_qpred_ai, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vS32b_nt_qpred_ai_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vS32b_qpred_ai, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vS32b_qpred_ai_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vaddbnq, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vaddbnq_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vaddbq, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vaddbq_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vaddhnq, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vaddhnq_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vaddhq, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vaddhq_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vaddwnq, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vaddwnq_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vaddwq, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vaddwq_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vandqrt, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vandqrt_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vandqrt_acc, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vandqrt_acc_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vandvrt, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vandvrt_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vandvrt_acc, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vandvrt_acc_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_veqb, 64)
-CUSTOM_BUILTIN_MAPPING(V6_veqb_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_veqb_and, 64)
-CUSTOM_BUILTIN_MAPPING(V6_veqb_and_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_veqb_or, 64)
-CUSTOM_BUILTIN_MAPPING(V6_veqb_or_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_veqb_xor, 64)
-CUSTOM_BUILTIN_MAPPING(V6_veqb_xor_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_veqh, 64)
-CUSTOM_BUILTIN_MAPPING(V6_veqh_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_veqh_and, 64)
-CUSTOM_BUILTIN_MAPPING(V6_veqh_and_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_veqh_or, 64)
-CUSTOM_BUILTIN_MAPPING(V6_veqh_or_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_veqh_xor, 64)
-CUSTOM_BUILTIN_MAPPING(V6_veqh_xor_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_veqw, 64)
-CUSTOM_BUILTIN_MAPPING(V6_veqw_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_veqw_and, 64)
-CUSTOM_BUILTIN_MAPPING(V6_veqw_and_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_veqw_or, 64)
-CUSTOM_BUILTIN_MAPPING(V6_veqw_or_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_veqw_xor, 64)
-CUSTOM_BUILTIN_MAPPING(V6_veqw_xor_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vgtb, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vgtb_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vgtb_and, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vgtb_and_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vgtb_or, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vgtb_or_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vgtb_xor, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vgtb_xor_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vgth, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vgth_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vgth_and, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vgth_and_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vgth_or, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vgth_or_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vgth_xor, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vgth_xor_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vgtub, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vgtub_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vgtub_and, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vgtub_and_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vgtub_or, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vgtub_or_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vgtub_xor, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vgtub_xor_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vgtuh, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vgtuh_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vgtuh_and, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vgtuh_and_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vgtuh_or, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vgtuh_or_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vgtuh_xor, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vgtuh_xor_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vgtuw, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vgtuw_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vgtuw_and, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vgtuw_and_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vgtuw_or, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vgtuw_or_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vgtuw_xor, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vgtuw_xor_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vgtw, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vgtw_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vgtw_and, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vgtw_and_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vgtw_or, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vgtw_or_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vgtw_xor, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vgtw_xor_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vmux, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vmux_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vsubbnq, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vsubbnq_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vsubbq, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vsubbq_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vsubhnq, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vsubhnq_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vsubhq, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vsubhq_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vsubwnq, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vsubwnq_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vsubwq, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vsubwq_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vswap, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vswap_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_pred_scalar2v2, 64)
-CUSTOM_BUILTIN_MAPPING(V6_pred_scalar2v2_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_shuffeqh, 64)
-CUSTOM_BUILTIN_MAPPING(V6_shuffeqh_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_shuffeqw, 64)
-CUSTOM_BUILTIN_MAPPING(V6_shuffeqw_128B, 128)
CUSTOM_BUILTIN_MAPPING(V6_vaddcarry, 64)
CUSTOM_BUILTIN_MAPPING(V6_vaddcarry_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vandnqrt, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vandnqrt_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vandnqrt_acc, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vandnqrt_acc_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vandvnqv, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vandvnqv_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vandvqv, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vandvqv_128B, 128)
CUSTOM_BUILTIN_MAPPING(V6_vsubcarry, 64)
CUSTOM_BUILTIN_MAPPING(V6_vsubcarry_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vgathermhq, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vgathermhq_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vgathermhwq, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vgathermhwq_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vgathermwq, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vgathermwq_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vprefixqb, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vprefixqb_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vprefixqh, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vprefixqh_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vprefixqw, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vprefixqw_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vscattermhq, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vscattermhq_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vscattermhwq, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vscattermhwq_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vscattermwq, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vscattermwq_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vaddcarrysat, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vaddcarrysat_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vaddcarryo, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vaddcarryo_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vsubcarryo, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vsubcarryo_128B, 128)
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsLoongArch.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsLoongArch.def
new file mode 100644
index 000000000000..95359a3fdc71
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsLoongArch.def
@@ -0,0 +1,28 @@
+//==- BuiltinsLoongArch.def - LoongArch Builtin function database -- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the LoongArch-specific builtin function database. Users of
+// this file must define the BUILTIN macro to make use of this information.
+//
+//===----------------------------------------------------------------------===//
+
+#if defined(BUILTIN) && !defined(TARGET_BUILTIN)
+# define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) BUILTIN(ID, TYPE, ATTRS)
+#endif
+
+// Definition of LoongArch basic builtins.
+#include "clang/Basic/BuiltinsLoongArchBase.def"
+
+// Definition of LSX builtins.
+#include "clang/Basic/BuiltinsLoongArchLSX.def"
+
+// Definition of LASX builtins.
+#include "clang/Basic/BuiltinsLoongArchLASX.def"
+
+#undef BUILTIN
+#undef TARGET_BUILTIN
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsLoongArchBase.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsLoongArchBase.def
new file mode 100644
index 000000000000..a5a07c167908
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsLoongArchBase.def
@@ -0,0 +1,58 @@
+//============------------ BuiltinsLoongArchBase.def -------------*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the LoongArch-specific basic builtin function database.
+// Users of this file must define the BUILTIN macro to make use of this
+// information.
+//
+//===----------------------------------------------------------------------===//
+
+TARGET_BUILTIN(__builtin_loongarch_cacop_d, "vWiUWiWi", "nc", "64bit")
+TARGET_BUILTIN(__builtin_loongarch_cacop_w, "viUii", "nc", "32bit")
+TARGET_BUILTIN(__builtin_loongarch_dbar, "vIUi", "nc", "")
+TARGET_BUILTIN(__builtin_loongarch_ibar, "vIUi", "nc", "")
+TARGET_BUILTIN(__builtin_loongarch_movfcsr2gr, "UiIUi", "nc", "f")
+TARGET_BUILTIN(__builtin_loongarch_movgr2fcsr, "vIUiUi", "nc", "f")
+TARGET_BUILTIN(__builtin_loongarch_break, "vIUi", "nc", "")
+TARGET_BUILTIN(__builtin_loongarch_syscall, "vIUi", "nc", "")
+TARGET_BUILTIN(__builtin_loongarch_cpucfg, "UiUi", "nc", "")
+TARGET_BUILTIN(__builtin_loongarch_asrtle_d, "vWiWi", "nc", "64bit")
+TARGET_BUILTIN(__builtin_loongarch_asrtgt_d, "vWiWi", "nc", "64bit")
+
+TARGET_BUILTIN(__builtin_loongarch_crc_w_b_w, "iii", "nc", "64bit")
+TARGET_BUILTIN(__builtin_loongarch_crc_w_h_w, "iii", "nc", "64bit")
+TARGET_BUILTIN(__builtin_loongarch_crc_w_w_w, "iii", "nc", "64bit")
+TARGET_BUILTIN(__builtin_loongarch_crc_w_d_w, "iWii", "nc", "64bit")
+TARGET_BUILTIN(__builtin_loongarch_crcc_w_b_w, "iii", "nc", "64bit")
+TARGET_BUILTIN(__builtin_loongarch_crcc_w_h_w, "iii", "nc", "64bit")
+TARGET_BUILTIN(__builtin_loongarch_crcc_w_w_w, "iii", "nc", "64bit")
+TARGET_BUILTIN(__builtin_loongarch_crcc_w_d_w, "iWii", "nc", "64bit")
+
+TARGET_BUILTIN(__builtin_loongarch_csrrd_w, "UiIUi", "nc", "")
+TARGET_BUILTIN(__builtin_loongarch_csrrd_d, "UWiIUi", "nc", "64bit")
+TARGET_BUILTIN(__builtin_loongarch_csrwr_w, "UiUiIUi", "nc", "")
+TARGET_BUILTIN(__builtin_loongarch_csrwr_d, "UWiUWiIUi", "nc", "64bit")
+TARGET_BUILTIN(__builtin_loongarch_csrxchg_w, "UiUiUiIUi", "nc", "")
+TARGET_BUILTIN(__builtin_loongarch_csrxchg_d, "UWiUWiUWiIUi", "nc", "64bit")
+
+TARGET_BUILTIN(__builtin_loongarch_iocsrrd_b, "UiUi", "nc", "")
+TARGET_BUILTIN(__builtin_loongarch_iocsrrd_h, "UiUi", "nc", "")
+TARGET_BUILTIN(__builtin_loongarch_iocsrrd_w, "UiUi", "nc", "")
+TARGET_BUILTIN(__builtin_loongarch_iocsrrd_d, "UWiUi", "nc", "64bit")
+TARGET_BUILTIN(__builtin_loongarch_iocsrwr_b, "vUiUi", "nc", "")
+TARGET_BUILTIN(__builtin_loongarch_iocsrwr_h, "vUiUi", "nc", "")
+TARGET_BUILTIN(__builtin_loongarch_iocsrwr_w, "vUiUi", "nc", "")
+TARGET_BUILTIN(__builtin_loongarch_iocsrwr_d, "vUWiUi", "nc", "64bit")
+
+TARGET_BUILTIN(__builtin_loongarch_lddir_d, "WiWiIUWi", "nc", "64bit")
+TARGET_BUILTIN(__builtin_loongarch_ldpte_d, "vWiIUWi", "nc", "64bit")
+
+TARGET_BUILTIN(__builtin_loongarch_frecipe_s, "ff", "nc", "f,frecipe")
+TARGET_BUILTIN(__builtin_loongarch_frecipe_d, "dd", "nc", "d,frecipe")
+TARGET_BUILTIN(__builtin_loongarch_frsqrte_s, "ff", "nc", "f,frecipe")
+TARGET_BUILTIN(__builtin_loongarch_frsqrte_d, "dd", "nc", "d,frecipe")
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsLoongArchLASX.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsLoongArchLASX.def
new file mode 100644
index 000000000000..4cf51cc000f6
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsLoongArchLASX.def
@@ -0,0 +1,988 @@
+//=BuiltinsLoongArchLASX.def - LoongArch Builtin function database -- C++ -*-=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the LoongArch-specific LASX builtin function database.
+// Users of this file must define the BUILTIN macro to make use of this
+// information.
+//
+//===----------------------------------------------------------------------===//
+
+TARGET_BUILTIN(__builtin_lasx_xvadd_b, "V32cV32cV32c", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvadd_h, "V16sV16sV16s", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvadd_w, "V8iV8iV8i", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvadd_d, "V4LLiV4LLiV4LLi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvadd_q, "V4LLiV4LLiV4LLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvsub_b, "V32cV32cV32c", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsub_h, "V16sV16sV16s", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsub_w, "V8iV8iV8i", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsub_d, "V4LLiV4LLiV4LLi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsub_q, "V4LLiV4LLiV4LLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvaddi_bu, "V32cV32cIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvaddi_hu, "V16sV16sIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvaddi_wu, "V8iV8iIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvaddi_du, "V4LLiV4LLiIUi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvsubi_bu, "V32cV32cIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsubi_hu, "V16sV16sIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsubi_wu, "V8iV8iIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsubi_du, "V4LLiV4LLiIUi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvneg_b, "V32cV32c", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvneg_h, "V16sV16s", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvneg_w, "V8iV8i", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvneg_d, "V4LLiV4LLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvsadd_b, "V32ScV32ScV32Sc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsadd_h, "V16SsV16SsV16Ss", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsadd_w, "V8SiV8SiV8Si", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsadd_d, "V4SLLiV4SLLiV4SLLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvsadd_bu, "V32UcV32UcV32Uc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsadd_hu, "V16UsV16UsV16Us", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsadd_wu, "V8UiV8UiV8Ui", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsadd_du, "V4ULLiV4ULLiV4ULLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvssub_b, "V32ScV32ScV32Sc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvssub_h, "V16SsV16SsV16Ss", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvssub_w, "V8SiV8SiV8Si", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvssub_d, "V4SLLiV4SLLiV4SLLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvssub_bu, "V32UcV32UcV32Uc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvssub_hu, "V16UsV16UsV16Us", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvssub_wu, "V8UiV8UiV8Ui", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvssub_du, "V4ULLiV4ULLiV4ULLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvhaddw_h_b, "V16SsV32ScV32Sc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvhaddw_w_h, "V8SiV16SsV16Ss", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvhaddw_d_w, "V4SLLiV8SiV8Si", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvhaddw_q_d, "V4LLiV4LLiV4LLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvhaddw_hu_bu, "V16UsV32UcV32Uc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvhaddw_wu_hu, "V8UiV16UsV16Us", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvhaddw_du_wu, "V4ULLiV8UiV8Ui", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvhaddw_qu_du, "V4ULLiV4ULLiV4ULLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvhsubw_h_b, "V16SsV32ScV32Sc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvhsubw_w_h, "V8SiV16SsV16Ss", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvhsubw_d_w, "V4SLLiV8SiV8Si", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvhsubw_q_d, "V4LLiV4LLiV4LLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvhsubw_hu_bu, "V16UsV32UcV32Uc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvhsubw_wu_hu, "V8UiV16UsV16Us", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvhsubw_du_wu, "V4ULLiV8UiV8Ui", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvhsubw_qu_du, "V4ULLiV4ULLiV4ULLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvaddwev_h_b, "V16sV32cV32c", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvaddwev_w_h, "V8SiV16sV16s", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvaddwev_d_w, "V4LLiV8SiV8Si", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvaddwev_q_d, "V4LLiV4LLiV4LLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvaddwod_h_b, "V16sV32cV32c", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvaddwod_w_h, "V8SiV16sV16s", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvaddwod_d_w, "V4LLiV8SiV8Si", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvaddwod_q_d, "V4LLiV4LLiV4LLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvsubwev_h_b, "V16sV32cV32c", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsubwev_w_h, "V8SiV16sV16s", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsubwev_d_w, "V4LLiV8SiV8Si", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsubwev_q_d, "V4LLiV4LLiV4LLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvsubwod_h_b, "V16sV32cV32c", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsubwod_w_h, "V8SiV16sV16s", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsubwod_d_w, "V4LLiV8SiV8Si", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsubwod_q_d, "V4LLiV4LLiV4LLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvaddwev_h_bu, "V16sV32UcV32Uc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvaddwev_w_hu, "V8SiV16UsV16Us", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvaddwev_d_wu, "V4LLiV8UiV8Ui", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvaddwev_q_du, "V4LLiV4ULLiV4ULLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvaddwod_h_bu, "V16sV32UcV32Uc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvaddwod_w_hu, "V8SiV16UsV16Us", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvaddwod_d_wu, "V4LLiV8UiV8Ui", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvaddwod_q_du, "V4LLiV4ULLiV4ULLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvsubwev_h_bu, "V16sV32UcV32Uc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsubwev_w_hu, "V8SiV16UsV16Us", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsubwev_d_wu, "V4LLiV8UiV8Ui", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsubwev_q_du, "V4LLiV4ULLiV4ULLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvsubwod_h_bu, "V16sV32UcV32Uc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsubwod_w_hu, "V8SiV16UsV16Us", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsubwod_d_wu, "V4LLiV8UiV8Ui", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsubwod_q_du, "V4LLiV4ULLiV4ULLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvaddwev_h_bu_b, "V16sV32UcV32c", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvaddwev_w_hu_h, "V8SiV16UsV16s", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvaddwev_d_wu_w, "V4LLiV8UiV8Si", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvaddwev_q_du_d, "V4LLiV4ULLiV4LLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvaddwod_h_bu_b, "V16sV32UcV32c", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvaddwod_w_hu_h, "V8SiV16UsV16s", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvaddwod_d_wu_w, "V4LLiV8UiV8Si", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvaddwod_q_du_d, "V4LLiV4ULLiV4LLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvavg_b, "V32ScV32ScV32Sc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvavg_h, "V16SsV16SsV16Ss", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvavg_w, "V8SiV8SiV8Si", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvavg_d, "V4SLLiV4SLLiV4SLLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvavg_bu, "V32UcV32UcV32Uc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvavg_hu, "V16UsV16UsV16Us", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvavg_wu, "V8UiV8UiV8Ui", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvavg_du, "V4ULLiV4ULLiV4ULLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvavgr_b, "V32ScV32ScV32Sc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvavgr_h, "V16SsV16SsV16Ss", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvavgr_w, "V8SiV8SiV8Si", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvavgr_d, "V4SLLiV4SLLiV4SLLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvavgr_bu, "V32UcV32UcV32Uc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvavgr_hu, "V16UsV16UsV16Us", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvavgr_wu, "V8UiV8UiV8Ui", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvavgr_du, "V4ULLiV4ULLiV4ULLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvabsd_b, "V32ScV32ScV32Sc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvabsd_h, "V16SsV16SsV16Ss", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvabsd_w, "V8SiV8SiV8Si", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvabsd_d, "V4SLLiV4SLLiV4SLLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvabsd_bu, "V32UcV32UcV32Uc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvabsd_hu, "V16UsV16UsV16Us", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvabsd_wu, "V8UiV8UiV8Ui", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvabsd_du, "V4ULLiV4ULLiV4ULLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvadda_b, "V32ScV32ScV32Sc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvadda_h, "V16SsV16SsV16Ss", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvadda_w, "V8SiV8SiV8Si", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvadda_d, "V4SLLiV4SLLiV4SLLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvmax_b, "V32ScV32ScV32Sc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmax_h, "V16SsV16SsV16Ss", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmax_w, "V8SiV8SiV8Si", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmax_d, "V4SLLiV4SLLiV4SLLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvmaxi_b, "V32ScV32ScIi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmaxi_h, "V16SsV16SsIi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmaxi_w, "V8SiV8SiIi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmaxi_d, "V4SLLiV4SLLiIi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvmax_bu, "V32UcV32UcV32Uc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmax_hu, "V16UsV16UsV16Us", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmax_wu, "V8UiV8UiV8Ui", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmax_du, "V4ULLiV4ULLiV4ULLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvmaxi_bu, "V32UcV32UcIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmaxi_hu, "V16UsV16UsIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmaxi_wu, "V8UiV8UiIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmaxi_du, "V4ULLiV4ULLiIUi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvmin_b, "V32ScV32ScV32Sc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmin_h, "V16SsV16SsV16Ss", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmin_w, "V8SiV8SiV8Si", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmin_d, "V4SLLiV4SLLiV4SLLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvmini_b, "V32ScV32ScIi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmini_h, "V16SsV16SsIi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmini_w, "V8SiV8SiIi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmini_d, "V4SLLiV4SLLiIi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvmin_bu, "V32UcV32UcV32Uc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmin_hu, "V16UsV16UsV16Us", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmin_wu, "V8UiV8UiV8Ui", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmin_du, "V4ULLiV4ULLiV4ULLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvmini_bu, "V32UcV32UcIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmini_hu, "V16UsV16UsIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmini_wu, "V8UiV8UiIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmini_du, "V4ULLiV4ULLiIUi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvmul_b, "V32ScV32ScV32Sc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmul_h, "V16SsV16SsV16Ss", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmul_w, "V8SiV8SiV8Si", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmul_d, "V4SLLiV4SLLiV4SLLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvmuh_b, "V32cV32cV32c", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmuh_h, "V16sV16sV16s", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmuh_w, "V8iV8iV8i", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmuh_d, "V4LLiV4LLiV4LLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvmuh_bu, "V32UcV32UcV32Uc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmuh_hu, "V16UsV16UsV16Us", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmuh_wu, "V8UiV8UiV8Ui", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmuh_du, "V4ULLiV4ULLiV4ULLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvmulwev_h_b, "V16sV32cV32c", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmulwev_w_h, "V8SiV16sV16s", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmulwev_d_w, "V4LLiV8SiV8Si", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmulwev_q_d, "V4LLiV4LLiV4LLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvmulwod_h_b, "V16sV32cV32c", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmulwod_w_h, "V8SiV16sV16s", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmulwod_d_w, "V4LLiV8SiV8Si", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmulwod_q_d, "V4LLiV4LLiV4LLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvmulwev_h_bu, "V16sV32UcV32Uc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmulwev_w_hu, "V8SiV16UsV16Us", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmulwev_d_wu, "V4LLiV8UiV8Ui", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmulwev_q_du, "V4LLiV4ULLiV4ULLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvmulwod_h_bu, "V16sV32UcV32Uc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmulwod_w_hu, "V8SiV16UsV16Us", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmulwod_d_wu, "V4LLiV8UiV8Ui", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmulwod_q_du, "V4LLiV4ULLiV4ULLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvmulwev_h_bu_b, "V16sV32UcV32c", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmulwev_w_hu_h, "V8SiV16UsV16s", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmulwev_d_wu_w, "V4LLiV8UiV8Si", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmulwev_q_du_d, "V4LLiV4ULLiV4LLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvmulwod_h_bu_b, "V16sV32UcV32c", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmulwod_w_hu_h, "V8SiV16UsV16s", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmulwod_d_wu_w, "V4LLiV8UiV8Si", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmulwod_q_du_d, "V4LLiV4ULLiV4LLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvmadd_b, "V32ScV32ScV32ScV32Sc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmadd_h, "V16SsV16SsV16SsV16Ss", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmadd_w, "V8SiV8SiV8SiV8Si", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmadd_d, "V4SLLiV4SLLiV4SLLiV4SLLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvmsub_b, "V32ScV32ScV32ScV32Sc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmsub_h, "V16SsV16SsV16SsV16Ss", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmsub_w, "V8SiV8SiV8SiV8Si", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmsub_d, "V4SLLiV4SLLiV4SLLiV4SLLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvmaddwev_h_b, "V16sV16sV32cV32c", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmaddwev_w_h, "V8SiV8SiV16sV16s", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmaddwev_d_w, "V4LLiV4LLiV8SiV8Si", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmaddwev_q_d, "V4LLiV4LLiV4LLiV4LLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvmaddwod_h_b, "V16sV16sV32cV32c", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmaddwod_w_h, "V8SiV8SiV16sV16s", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmaddwod_d_w, "V4LLiV4LLiV8SiV8Si", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmaddwod_q_d, "V4LLiV4LLiV4LLiV4LLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvmaddwev_h_bu, "V16UsV16UsV32UcV32Uc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmaddwev_w_hu, "V8UiV8UiV16UsV16Us", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmaddwev_d_wu, "V4ULLiV4ULLiV8UiV8Ui", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmaddwev_q_du, "V4ULLiV4ULLiV4ULLiV4ULLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvmaddwod_h_bu, "V16UsV16UsV32UcV32Uc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmaddwod_w_hu, "V8UiV8UiV16UsV16Us", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmaddwod_d_wu, "V4ULLiV4ULLiV8UiV8Ui", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmaddwod_q_du, "V4ULLiV4ULLiV4ULLiV4ULLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvmaddwev_h_bu_b, "V16sV16sV32UcV32c", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmaddwev_w_hu_h, "V8SiV8SiV16UsV16s", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmaddwev_d_wu_w, "V4LLiV4LLiV8UiV8Si", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmaddwev_q_du_d, "V4LLiV4LLiV4ULLiV4LLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvmaddwod_h_bu_b, "V16sV16sV32UcV32c", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmaddwod_w_hu_h, "V8SiV8SiV16UsV16s", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmaddwod_d_wu_w, "V4LLiV4LLiV8UiV8Si", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmaddwod_q_du_d, "V4LLiV4LLiV4ULLiV4LLi", "nc", "lasx")
+
+
+TARGET_BUILTIN(__builtin_lasx_xvdiv_b, "V32ScV32ScV32Sc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvdiv_h, "V16SsV16SsV16Ss", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvdiv_w, "V8SiV8SiV8Si", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvdiv_d, "V4SLLiV4SLLiV4SLLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvdiv_bu, "V32UcV32UcV32Uc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvdiv_hu, "V16UsV16UsV16Us", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvdiv_wu, "V8UiV8UiV8Ui", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvdiv_du, "V4ULLiV4ULLiV4ULLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvmod_b, "V32ScV32ScV32Sc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmod_h, "V16SsV16SsV16Ss", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmod_w, "V8SiV8SiV8Si", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmod_d, "V4SLLiV4SLLiV4SLLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvmod_bu, "V32UcV32UcV32Uc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmod_hu, "V16UsV16UsV16Us", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmod_wu, "V8UiV8UiV8Ui", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmod_du, "V4ULLiV4ULLiV4ULLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvsat_b, "V32ScV32ScIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsat_h, "V16SsV16SsIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsat_w, "V8SiV8SiIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsat_d, "V4SLLiV4SLLiIUi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvsat_bu, "V32UcV32UcIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsat_hu, "V16UsV16UsIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsat_wu, "V8UiV8UiIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsat_du, "V4ULLiV4ULLiIUi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvexth_h_b, "V16sV32c", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvexth_w_h, "V8SiV16s", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvexth_d_w, "V4LLiV8Si", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvexth_q_d, "V4LLiV4LLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvexth_hu_bu, "V16UsV32Uc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvexth_wu_hu, "V8UiV16Us", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvexth_du_wu, "V4ULLiV8Ui", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvexth_qu_du, "V4ULLiV4ULLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_vext2xv_h_b, "V16sV32c", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_vext2xv_w_b, "V8SiV32c", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_vext2xv_d_b, "V4LLiV32c", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_vext2xv_w_h, "V8SiV16s", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_vext2xv_d_h, "V4LLiV16s", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_vext2xv_d_w, "V4LLiV8Si", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_vext2xv_hu_bu, "V16sV32c", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_vext2xv_wu_bu, "V8SiV32c", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_vext2xv_du_bu, "V4LLiV32c", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_vext2xv_wu_hu, "V8SiV16s", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_vext2xv_du_hu, "V4LLiV16s", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_vext2xv_du_wu, "V4LLiV8Si", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvsigncov_b, "V32ScV32ScV32Sc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsigncov_h, "V16SsV16SsV16Ss", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsigncov_w, "V8SiV8SiV8Si", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsigncov_d, "V4SLLiV4SLLiV4SLLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvmskltz_b, "V32cV32c", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmskltz_h, "V16sV16s", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmskltz_w, "V8iV8i", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmskltz_d, "V4LLiV4LLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvmskgez_b, "V32cV32c", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvmsknz_b, "V16sV16s", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvldi, "V4LLiIi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvrepli_b, "V32cIi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvrepli_h, "V16sIi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvrepli_w, "V8iIi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvrepli_d, "V4LLiIi", "nc", "lasx")
+
+
+TARGET_BUILTIN(__builtin_lasx_xvand_v, "V32UcV32UcV32Uc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvor_v, "V32UcV32UcV32Uc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvxor_v, "V32cV32cV32c", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvnor_v, "V32UcV32UcV32Uc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvandn_v, "V32UcV32UcV32Uc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvorn_v, "V32ScV32ScV32Sc", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvandi_b, "V32UcV32UcIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvori_b, "V32UcV32UcIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvxori_b, "V32UcV32UcIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvnori_b, "V32UcV32UcIUi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvsll_b, "V32cV32cV32c", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsll_h, "V16sV16sV16s", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsll_w, "V8iV8iV8i", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsll_d, "V4LLiV4LLiV4LLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvslli_b, "V32cV32cIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvslli_h, "V16sV16sIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvslli_w, "V8iV8iIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvslli_d, "V4LLiV4LLiIUi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvsrl_b, "V32cV32cV32c", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsrl_h, "V16sV16sV16s", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsrl_w, "V8iV8iV8i", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsrl_d, "V4LLiV4LLiV4LLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvsrli_b, "V32cV32cIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsrli_h, "V16sV16sIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsrli_w, "V8iV8iIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsrli_d, "V4LLiV4LLiIUi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvsra_b, "V32cV32cV32c", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsra_h, "V16sV16sV16s", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsra_w, "V8iV8iV8i", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsra_d, "V4LLiV4LLiV4LLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvsrai_b, "V32cV32cIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsrai_h, "V16sV16sIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsrai_w, "V8iV8iIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsrai_d, "V4LLiV4LLiIUi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvrotr_b, "V32cV32cV32c", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvrotr_h, "V16sV16sV16s", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvrotr_w, "V8iV8iV8i", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvrotr_d, "V4LLiV4LLiV4LLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvrotri_b, "V32cV32cIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvrotri_h, "V16sV16sIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvrotri_w, "V8iV8iIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvrotri_d, "V4LLiV4LLiIUi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvsllwil_h_b, "V16sV32cIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsllwil_w_h, "V8SiV16sIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsllwil_d_w, "V4LLiV8SiIUi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvextl_q_d, "V4LLiV4LLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvsllwil_hu_bu, "V16UsV32UcIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsllwil_wu_hu, "V8UiV16UsIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsllwil_du_wu, "V4ULLiV8UiIUi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvextl_qu_du, "V4LLiV4ULLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvsrlr_b, "V32cV32cV32c", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsrlr_h, "V16sV16sV16s", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsrlr_w, "V8iV8iV8i", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsrlr_d, "V4LLiV4LLiV4LLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvsrlri_b, "V32cV32cIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsrlri_h, "V16sV16sIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsrlri_w, "V8iV8iIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsrlri_d, "V4LLiV4LLiIUi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvsrar_b, "V32cV32cV32c", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsrar_h, "V16sV16sV16s", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsrar_w, "V8iV8iV8i", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsrar_d, "V4LLiV4LLiV4LLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvsrari_b, "V32cV32cIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsrari_h, "V16sV16sIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsrari_w, "V8iV8iIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsrari_d, "V4LLiV4LLiIUi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvsrln_b_h, "V32ScV16sV16s", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsrln_h_w, "V16sV8SiV8Si", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsrln_w_d, "V8SiV4LLiV4LLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvsran_b_h, "V32ScV16sV16s", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsran_h_w, "V16sV8SiV8Si", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsran_w_d, "V8SiV4LLiV4LLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvsrlni_b_h, "V32cV32cV32cIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsrlni_h_w, "V16sV16sV16sIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsrlni_w_d, "V8iV8iV8iIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsrlni_d_q, "V4LLiV4LLiV4LLiIUi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvsrani_b_h, "V32cV32cV32cIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsrani_h_w, "V16sV16sV16sIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsrani_w_d, "V8iV8iV8iIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsrani_d_q, "V4LLiV4LLiV4LLiIUi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvsrlrn_b_h, "V32ScV16sV16s", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsrlrn_h_w, "V16sV8SiV8Si", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsrlrn_w_d, "V8SiV4LLiV4LLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvsrarn_b_h, "V32ScV16sV16s", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsrarn_h_w, "V16sV8SiV8Si", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsrarn_w_d, "V8SiV4LLiV4LLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvsrlrni_b_h, "V32cV32cV32cIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsrlrni_h_w, "V16sV16sV16sIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsrlrni_w_d, "V8iV8iV8iIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsrlrni_d_q, "V4LLiV4LLiV4LLiIUi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvsrarni_b_h, "V32cV32cV32cIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsrarni_h_w, "V16sV16sV16sIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsrarni_w_d, "V8iV8iV8iIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsrarni_d_q, "V4LLiV4LLiV4LLiIUi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvssrln_b_h, "V32ScV16sV16s", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvssrln_h_w, "V16sV8SiV8Si", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvssrln_w_d, "V8SiV4LLiV4LLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvssran_b_h, "V32ScV16sV16s", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvssran_h_w, "V16sV8SiV8Si", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvssran_w_d, "V8SiV4LLiV4LLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvssrln_bu_h, "V32UcV16UsV16Us", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvssrln_hu_w, "V16UsV8UiV8Ui", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvssrln_wu_d, "V8UiV4ULLiV4ULLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvssran_bu_h, "V32UcV16UsV16Us", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvssran_hu_w, "V16UsV8UiV8Ui", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvssran_wu_d, "V8UiV4ULLiV4ULLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvssrlni_b_h, "V32cV32cV32cIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvssrlni_h_w, "V16sV16sV16sIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvssrlni_w_d, "V8iV8iV8iIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvssrlni_d_q, "V4LLiV4LLiV4LLiIUi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvssrani_b_h, "V32cV32cV32cIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvssrani_h_w, "V16sV16sV16sIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvssrani_w_d, "V8iV8iV8iIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvssrani_d_q, "V4LLiV4LLiV4LLiIUi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvssrlrni_bu_h, "V32cV32cV32cIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvssrlrni_hu_w, "V16sV16sV16sIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvssrlrni_wu_d, "V8iV8iV8iIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvssrlrni_du_q, "V4LLiV4LLiV4LLiIUi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvssrani_bu_h, "V32cV32cV32cIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvssrani_hu_w, "V16sV16sV16sIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvssrani_wu_d, "V8iV8iV8iIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvssrani_du_q, "V4LLiV4LLiV4LLiIUi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvssrlrn_b_h, "V32ScV16sV16s", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvssrlrn_h_w, "V16sV8SiV8Si", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvssrlrn_w_d, "V8SiV4LLiV4LLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvssrarn_b_h, "V32ScV16sV16s", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvssrarn_h_w, "V16sV8SiV8Si", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvssrarn_w_d, "V8SiV4LLiV4LLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvssrlrn_bu_h, "V32UcV16UsV16Us", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvssrlrn_hu_w, "V16UsV8UiV8Ui", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvssrlrn_wu_d, "V8UiV4ULLiV4ULLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvssrarn_bu_h, "V32UcV16UsV16Us", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvssrarn_hu_w, "V16UsV8UiV8Ui", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvssrarn_wu_d, "V8UiV4ULLiV4ULLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvssrlrni_b_h, "V32cV32cV32cIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvssrlrni_h_w, "V16sV16sV16sIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvssrlrni_w_d, "V8iV8iV8iIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvssrlrni_d_q, "V4LLiV4LLiV4LLiIUi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvssrarni_b_h, "V32cV32cV32cIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvssrarni_h_w, "V16sV16sV16sIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvssrarni_w_d, "V8iV8iV8iIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvssrarni_d_q, "V4LLiV4LLiV4LLiIUi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvssrlni_bu_h, "V32cV32cV32cIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvssrlni_hu_w, "V16sV16sV16sIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvssrlni_wu_d, "V8iV8iV8iIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvssrlni_du_q, "V4LLiV4LLiV4LLiIUi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvssrarni_bu_h, "V32cV32cV32cIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvssrarni_hu_w, "V16sV16sV16sIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvssrarni_wu_d, "V8iV8iV8iIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvssrarni_du_q, "V4LLiV4LLiV4LLiIUi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvclo_b, "V32ScV32Sc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvclo_h, "V16SsV16Ss", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvclo_w, "V8SiV8Si", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvclo_d, "V4SLLiV4SLLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvclz_b, "V32ScV32Sc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvclz_h, "V16SsV16Ss", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvclz_w, "V8SiV8Si", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvclz_d, "V4SLLiV4SLLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvpcnt_b, "V32ScV32Sc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvpcnt_h, "V16SsV16Ss", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvpcnt_w, "V8SiV8Si", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvpcnt_d, "V4SLLiV4SLLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvbitclr_b, "V32UcV32UcV32Uc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvbitclr_h, "V16UsV16UsV16Us", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvbitclr_w, "V8UiV8UiV8Ui", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvbitclr_d, "V4ULLiV4ULLiV4ULLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvbitclri_b, "V32UcV32UcIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvbitclri_h, "V16UsV16UsIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvbitclri_w, "V8UiV8UiIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvbitclri_d, "V4ULLiV4ULLiIUi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvbitset_b, "V32UcV32UcV32Uc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvbitset_h, "V16UsV16UsV16Us", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvbitset_w, "V8UiV8UiV8Ui", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvbitset_d, "V4ULLiV4ULLiV4ULLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvbitseti_b, "V32UcV32UcIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvbitseti_h, "V16UsV16UsIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvbitseti_w, "V8UiV8UiIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvbitseti_d, "V4ULLiV4ULLiIUi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvbitrev_b, "V32UcV32UcV32Uc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvbitrev_h, "V16UsV16UsV16Us", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvbitrev_w, "V8UiV8UiV8Ui", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvbitrev_d, "V4ULLiV4ULLiV4ULLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvbitrevi_b, "V32UcV32UcIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvbitrevi_h, "V16UsV16UsIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvbitrevi_w, "V8UiV8UiIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvbitrevi_d, "V4ULLiV4ULLiIUi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvfrstp_b, "V32ScV32ScV32ScV32Sc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvfrstp_h, "V16SsV16SsV16SsV16Ss", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvfrstpi_b, "V32cV32cV32cIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvfrstpi_h, "V16sV16sV16sIUi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvfadd_s, "V8fV8fV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvfadd_d, "V4dV4dV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvfsub_s, "V8fV8fV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvfsub_d, "V4dV4dV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvfmul_s, "V8fV8fV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvfmul_d, "V4dV4dV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvfdiv_s, "V8fV8fV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvfdiv_d, "V4dV4dV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvfmadd_s, "V8fV8fV8fV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvfmadd_d, "V4dV4dV4dV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvfmsub_s, "V8fV8fV8fV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvfmsub_d, "V4dV4dV4dV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvfnmadd_s, "V8fV8fV8fV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvfnmadd_d, "V4dV4dV4dV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvfnmsub_s, "V8fV8fV8fV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvfnmsub_d, "V4dV4dV4dV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvfmax_s, "V8fV8fV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvfmax_d, "V4dV4dV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvfmin_s, "V8fV8fV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvfmin_d, "V4dV4dV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvfmaxa_s, "V8fV8fV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvfmaxa_d, "V4dV4dV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvfmina_s, "V8fV8fV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvfmina_d, "V4dV4dV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvflogb_s, "V8fV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvflogb_d, "V4dV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvfclass_s, "V8iV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvfclass_d, "V4LLiV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvfsqrt_s, "V8fV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvfsqrt_d, "V4dV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvfrecip_s, "V8fV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvfrecip_d, "V4dV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvfrecipe_s, "V8fV8f", "nc", "lasx,frecipe")
+TARGET_BUILTIN(__builtin_lasx_xvfrecipe_d, "V4dV4d", "nc", "lasx,frecipe")
+
+TARGET_BUILTIN(__builtin_lasx_xvfrsqrt_s, "V8fV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvfrsqrt_d, "V4dV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvfrsqrte_s, "V8fV8f", "nc", "lasx,frecipe")
+TARGET_BUILTIN(__builtin_lasx_xvfrsqrte_d, "V4dV4d", "nc", "lasx,frecipe")
+
+TARGET_BUILTIN(__builtin_lasx_xvfcvtl_s_h, "V8fV16s", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvfcvth_s_h, "V8fV16s", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvfcvtl_d_s, "V4dV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvfcvth_d_s, "V4dV8f", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvfcvt_h_s, "V16sV8fV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvfcvt_s_d, "V8fV4dV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvfrintrne_s, "V8SiV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvfrintrne_d, "V4LLiV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvfrintrz_s, "V8SiV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvfrintrz_d, "V4LLiV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvfrintrp_s, "V8SiV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvfrintrp_d, "V4LLiV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvfrintrm_s, "V8SiV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvfrintrm_d, "V4LLiV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvfrint_s, "V8fV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvfrint_d, "V4dV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvftintrne_w_s, "V8SiV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvftintrne_l_d, "V4LLiV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvftintrz_w_s, "V8SiV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvftintrz_l_d, "V4LLiV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvftintrp_w_s, "V8SiV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvftintrp_l_d, "V4LLiV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvftintrm_w_s, "V8SiV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvftintrm_l_d, "V4LLiV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvftint_w_s, "V8SiV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvftint_l_d, "V4SLLiV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvftintrz_wu_s, "V8UiV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvftintrz_lu_d, "V4ULLiV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvftint_wu_s, "V8UiV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvftint_lu_d, "V4ULLiV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvftintrne_w_d, "V8SiV4dV4d", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvftintrz_w_d, "V8SiV4dV4d", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvftintrp_w_d, "V8SiV4dV4d", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvftintrm_w_d, "V8SiV4dV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvftint_w_d, "V8SiV4dV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvftintrnel_l_s, "V4LLiV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvftintrneh_l_s, "V4LLiV8f", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvftintrzl_l_s, "V4LLiV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvftintrzh_l_s, "V4LLiV8f", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvftintrpl_l_s, "V4LLiV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvftintrph_l_s, "V4LLiV8f", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvftintrml_l_s, "V4LLiV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvftintrmh_l_s, "V4LLiV8f", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvftintl_l_s, "V4LLiV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvftinth_l_s, "V4LLiV8f", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvffint_s_w, "V8fV8Si", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvffint_d_l, "V4dV4SLLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvffint_s_wu, "V8fV8Ui", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvffint_d_lu, "V4dV4ULLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvffintl_d_w, "V4dV8Si", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvffinth_d_w, "V4dV8Si", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvffint_s_l, "V8fV4LLiV4LLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvseq_b, "V32ScV32ScV32Sc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvseq_h, "V16SsV16SsV16Ss", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvseq_w, "V8SiV8SiV8Si", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvseq_d, "V4SLLiV4SLLiV4SLLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvseqi_b, "V32ScV32ScISi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvseqi_h, "V16SsV16SsISi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvseqi_w, "V8SiV8SiISi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvseqi_d, "V4SLLiV4SLLiISi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvsle_b, "V32ScV32ScV32Sc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsle_h, "V16SsV16SsV16Ss", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsle_w, "V8SiV8SiV8Si", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsle_d, "V4SLLiV4SLLiV4SLLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvslei_b, "V32ScV32ScISi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvslei_h, "V16SsV16SsISi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvslei_w, "V8SiV8SiISi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvslei_d, "V4SLLiV4SLLiISi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvsle_bu, "V32ScV32UcV32Uc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsle_hu, "V16SsV16UsV16Us", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsle_wu, "V8SiV8UiV8Ui", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvsle_du, "V4SLLiV4ULLiV4ULLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvslei_bu, "V32ScV32UcIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvslei_hu, "V16SsV16UsIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvslei_wu, "V8SiV8UiIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvslei_du, "V4SLLiV4ULLiIUi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvslt_b, "V32ScV32ScV32Sc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvslt_h, "V16SsV16SsV16Ss", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvslt_w, "V8SiV8SiV8Si", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvslt_d, "V4SLLiV4SLLiV4SLLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvslti_b, "V32ScV32ScISi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvslti_h, "V16SsV16SsISi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvslti_w, "V8SiV8SiISi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvslti_d, "V4SLLiV4SLLiISi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvslt_bu, "V32ScV32UcV32Uc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvslt_hu, "V16SsV16UsV16Us", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvslt_wu, "V8SiV8UiV8Ui", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvslt_du, "V4SLLiV4ULLiV4ULLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvslti_bu, "V32ScV32UcIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvslti_hu, "V16SsV16UsIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvslti_wu, "V8SiV8UiIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvslti_du, "V4SLLiV4ULLiIUi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvfcmp_caf_s, "V8SiV8fV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvfcmp_caf_d, "V4SLLiV4dV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvfcmp_cun_s, "V8SiV8fV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvfcmp_cun_d, "V4SLLiV4dV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvfcmp_ceq_s, "V8SiV8fV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvfcmp_ceq_d, "V4SLLiV4dV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvfcmp_cueq_s, "V8SiV8fV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvfcmp_cueq_d, "V4SLLiV4dV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvfcmp_clt_s, "V8SiV8fV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvfcmp_clt_d, "V4SLLiV4dV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvfcmp_cult_s, "V8SiV8fV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvfcmp_cult_d, "V4SLLiV4dV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvfcmp_cle_s, "V8SiV8fV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvfcmp_cle_d, "V4SLLiV4dV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvfcmp_cule_s, "V8SiV8fV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvfcmp_cule_d, "V4SLLiV4dV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvfcmp_cne_s, "V8SiV8fV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvfcmp_cne_d, "V4SLLiV4dV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvfcmp_cor_s, "V8SiV8fV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvfcmp_cor_d, "V4SLLiV4dV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvfcmp_cune_s, "V8SiV8fV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvfcmp_cune_d, "V4SLLiV4dV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvfcmp_saf_s, "V8SiV8fV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvfcmp_saf_d, "V4SLLiV4dV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvfcmp_sun_s, "V8SiV8fV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvfcmp_sun_d, "V4SLLiV4dV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvfcmp_seq_s, "V8SiV8fV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvfcmp_seq_d, "V4SLLiV4dV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvfcmp_sueq_s, "V8SiV8fV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvfcmp_sueq_d, "V4SLLiV4dV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvfcmp_slt_s, "V8SiV8fV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvfcmp_slt_d, "V4SLLiV4dV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvfcmp_sult_s, "V8SiV8fV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvfcmp_sult_d, "V4SLLiV4dV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvfcmp_sle_s, "V8SiV8fV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvfcmp_sle_d, "V4SLLiV4dV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvfcmp_sule_s, "V8SiV8fV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvfcmp_sule_d, "V4SLLiV4dV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvfcmp_sne_s, "V8SiV8fV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvfcmp_sne_d, "V4SLLiV4dV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvfcmp_sor_s, "V8SiV8fV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvfcmp_sor_d, "V4SLLiV4dV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvfcmp_sune_s, "V8SiV8fV8f", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvfcmp_sune_d, "V4SLLiV4dV4d", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvbitsel_v, "V32UcV32UcV32UcV32Uc", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvbitseli_b, "V32UcV32UcV32UcIUi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvreplgr2vr_b, "V32Sci", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvreplgr2vr_h, "V16Ssi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvreplgr2vr_w, "V8Sii", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvreplgr2vr_d, "V4SLLiLLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvinsgr2vr_w, "V8SiV8SiiIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvinsgr2vr_d, "V4SLLiV4SLLiLLiIUi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvpickve2gr_w, "iV8SiIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvpickve2gr_d, "LLiV4SLLiIUi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvpickve2gr_wu, "iV8UiIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvpickve2gr_du, "LLiV4ULLiIUi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvreplve_b, "V32cV32cUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvreplve_h, "V16sV16sUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvreplve_w, "V8iV8iUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvreplve_d, "V4LLiV4LLiUi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvrepl128vei_b, "V32cV32cIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvrepl128vei_h, "V16sV16sIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvrepl128vei_w, "V8iV8iIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvrepl128vei_d, "V4LLiV4LLiIUi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvreplve0_b, "V32ScV32Sc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvreplve0_h, "V16SsV16Ss", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvreplve0_w, "V8SiV8Si", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvreplve0_d, "V4SLLiV4SLLi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvreplve0_q, "V32ScV32Sc", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvinsve0_w, "V8iV8iV8iIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvinsve0_d, "V4LLiV4LLiV4LLiIUi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvpickve_w, "V8iV8iIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvpickve_d, "V4LLiV4LLiIUi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvpickve_w_f, "V8fV8fIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvpickve_d_f, "V4dV4dIUi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvbsll_v, "V32cV32cIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvbsrl_v, "V32cV32cIUi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvpackev_b, "V32cV32cV32c", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvpackev_h, "V16sV16sV16s", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvpackev_w, "V8iV8iV8i", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvpackev_d, "V4LLiV4LLiV4LLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvpackod_b, "V32cV32cV32c", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvpackod_h, "V16sV16sV16s", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvpackod_w, "V8iV8iV8i", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvpackod_d, "V4LLiV4LLiV4LLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvpickev_b, "V32cV32cV32c", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvpickev_h, "V16sV16sV16s", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvpickev_w, "V8iV8iV8i", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvpickev_d, "V4LLiV4LLiV4LLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvpickod_b, "V32cV32cV32c", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvpickod_h, "V16sV16sV16s", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvpickod_w, "V8iV8iV8i", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvpickod_d, "V4LLiV4LLiV4LLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvilvl_b, "V32cV32cV32c", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvilvl_h, "V16sV16sV16s", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvilvl_w, "V8iV8iV8i", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvilvl_d, "V4LLiV4LLiV4LLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvilvh_b, "V32cV32cV32c", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvilvh_h, "V16sV16sV16s", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvilvh_w, "V8iV8iV8i", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvilvh_d, "V4LLiV4LLiV4LLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvshuf_b, "V32UcV32UcV32UcV32Uc", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvshuf_h, "V16sV16sV16sV16s", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvshuf_w, "V8iV8iV8iV8i", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvshuf_d, "V4LLiV4LLiV4LLiV4LLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvperm_w, "V8iV8iV8i", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvshuf4i_b, "V32cV32cIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvshuf4i_h, "V16sV16sIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvshuf4i_w, "V8iV8iIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvshuf4i_d, "V4LLiV4LLiV4LLiIUi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvpermi_w, "V8iV8iV8iIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvpermi_d, "V4LLiV4LLiIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvpermi_q, "V32cV32cV32cIUi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvextrins_b, "V32cV32cV32cIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvextrins_h, "V16sV16sV16sIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvextrins_w, "V8iV8iV8iIUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvextrins_d, "V4LLiV4LLiV4LLiIUi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvld, "V32ScvC*Ii", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvst, "vV32Scv*Ii", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvldx, "V32ScvC*LLi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvstx, "vV32Scv*LLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvldrepl_b, "V32cvC*Ii", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvldrepl_h, "V16svC*Ii", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvldrepl_w, "V8ivC*Ii", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvldrepl_d, "V4LLivC*Ii", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xvstelm_b, "vV32Scv*IiUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvstelm_h, "vV16Ssv*IiUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvstelm_w, "vV8Siv*IiUi", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xvstelm_d, "vV4SLLiv*IiUi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xbz_v, "iV32Uc", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xbnz_v, "iV32Uc", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xbz_b, "iV32Uc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xbz_h, "iV16Us", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xbz_w, "iV8Ui", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xbz_d, "iV4ULLi", "nc", "lasx")
+
+TARGET_BUILTIN(__builtin_lasx_xbnz_b, "iV32Uc", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xbnz_h, "iV16Us", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xbnz_w, "iV8Ui", "nc", "lasx")
+TARGET_BUILTIN(__builtin_lasx_xbnz_d, "iV4ULLi", "nc", "lasx")
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsLoongArchLSX.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsLoongArchLSX.def
new file mode 100644
index 000000000000..c90f4dc5458f
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsLoongArchLSX.def
@@ -0,0 +1,959 @@
+//=============------------- BuiltinsLoongArchLSX.def --------------- C++ -*-=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the LoongArch-specific LSX builtin function database.
+// Users of this file must define the BUILTIN macro to make use of this
+// information.
+//
+//===----------------------------------------------------------------------===//
+
+TARGET_BUILTIN(__builtin_lsx_vadd_b, "V16cV16cV16c", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vadd_h, "V8sV8sV8s", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vadd_w, "V4iV4iV4i", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vadd_d, "V2LLiV2LLiV2LLi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vadd_q, "V2LLiV2LLiV2LLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vsub_b, "V16cV16cV16c", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsub_h, "V8sV8sV8s", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsub_w, "V4iV4iV4i", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsub_d, "V2LLiV2LLiV2LLi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsub_q, "V2LLiV2LLiV2LLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vaddi_bu, "V16cV16cIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vaddi_hu, "V8sV8sIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vaddi_wu, "V4iV4iIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vaddi_du, "V2LLiV2LLiIUi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vsubi_bu, "V16cV16cIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsubi_hu, "V8sV8sIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsubi_wu, "V4iV4iIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsubi_du, "V2LLiV2LLiIUi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vneg_b, "V16cV16c", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vneg_h, "V8sV8s", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vneg_w, "V4iV4i", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vneg_d, "V2LLiV2LLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vsadd_b, "V16ScV16ScV16Sc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsadd_h, "V8SsV8SsV8Ss", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsadd_w, "V4SiV4SiV4Si", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsadd_d, "V2SLLiV2SLLiV2SLLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vsadd_bu, "V16UcV16UcV16Uc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsadd_hu, "V8UsV8UsV8Us", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsadd_wu, "V4UiV4UiV4Ui", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsadd_du, "V2ULLiV2ULLiV2ULLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vssub_b, "V16ScV16ScV16Sc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vssub_h, "V8SsV8SsV8Ss", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vssub_w, "V4SiV4SiV4Si", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vssub_d, "V2SLLiV2SLLiV2SLLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vssub_bu, "V16UcV16UcV16Uc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vssub_hu, "V8UsV8UsV8Us", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vssub_wu, "V4UiV4UiV4Ui", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vssub_du, "V2ULLiV2ULLiV2ULLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vhaddw_h_b, "V8SsV16ScV16Sc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vhaddw_w_h, "V4SiV8SsV8Ss", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vhaddw_d_w, "V2SLLiV4SiV4Si", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vhaddw_q_d, "V2LLiV2LLiV2LLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vhaddw_hu_bu, "V8UsV16UcV16Uc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vhaddw_wu_hu, "V4UiV8UsV8Us", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vhaddw_du_wu, "V2ULLiV4UiV4Ui", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vhaddw_qu_du, "V2ULLiV2ULLiV2ULLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vhsubw_h_b, "V8SsV16ScV16Sc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vhsubw_w_h, "V4SiV8SsV8Ss", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vhsubw_d_w, "V2SLLiV4SiV4Si", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vhsubw_q_d, "V2LLiV2LLiV2LLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vhsubw_hu_bu, "V8UsV16UcV16Uc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vhsubw_wu_hu, "V4UiV8UsV8Us", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vhsubw_du_wu, "V2ULLiV4UiV4Ui", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vhsubw_qu_du, "V2ULLiV2ULLiV2ULLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vaddwev_h_b, "V8sV16cV16c", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vaddwev_w_h, "V4SiV8sV8s", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vaddwev_d_w, "V2LLiV4SiV4Si", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vaddwev_q_d, "V2LLiV2LLiV2LLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vaddwod_h_b, "V8sV16cV16c", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vaddwod_w_h, "V4SiV8sV8s", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vaddwod_d_w, "V2LLiV4SiV4Si", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vaddwod_q_d, "V2LLiV2LLiV2LLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vsubwev_h_b, "V8sV16cV16c", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsubwev_w_h, "V4SiV8sV8s", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsubwev_d_w, "V2LLiV4SiV4Si", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsubwev_q_d, "V2LLiV2LLiV2LLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vsubwod_h_b, "V8sV16cV16c", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsubwod_w_h, "V4SiV8sV8s", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsubwod_d_w, "V2LLiV4SiV4Si", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsubwod_q_d, "V2LLiV2LLiV2LLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vaddwev_h_bu, "V8sV16UcV16Uc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vaddwev_w_hu, "V4SiV8UsV8Us", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vaddwev_d_wu, "V2LLiV4UiV4Ui", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vaddwev_q_du, "V2LLiV2ULLiV2ULLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vaddwod_h_bu, "V8sV16UcV16Uc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vaddwod_w_hu, "V4SiV8UsV8Us", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vaddwod_d_wu, "V2LLiV4UiV4Ui", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vaddwod_q_du, "V2LLiV2ULLiV2ULLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vsubwev_h_bu, "V8sV16UcV16Uc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsubwev_w_hu, "V4SiV8UsV8Us", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsubwev_d_wu, "V2LLiV4UiV4Ui", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsubwev_q_du, "V2LLiV2ULLiV2ULLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vsubwod_h_bu, "V8sV16UcV16Uc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsubwod_w_hu, "V4SiV8UsV8Us", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsubwod_d_wu, "V2LLiV4UiV4Ui", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsubwod_q_du, "V2LLiV2ULLiV2ULLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vaddwev_h_bu_b, "V8sV16UcV16c", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vaddwev_w_hu_h, "V4SiV8UsV8s", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vaddwev_d_wu_w, "V2LLiV4UiV4Si", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vaddwev_q_du_d, "V2LLiV2ULLiV2LLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vaddwod_h_bu_b, "V8sV16UcV16c", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vaddwod_w_hu_h, "V4SiV8UsV8s", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vaddwod_d_wu_w, "V2LLiV4UiV4Si", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vaddwod_q_du_d, "V2LLiV2ULLiV2LLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vavg_b, "V16ScV16ScV16Sc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vavg_h, "V8SsV8SsV8Ss", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vavg_w, "V4SiV4SiV4Si", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vavg_d, "V2SLLiV2SLLiV2SLLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vavg_bu, "V16UcV16UcV16Uc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vavg_hu, "V8UsV8UsV8Us", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vavg_wu, "V4UiV4UiV4Ui", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vavg_du, "V2ULLiV2ULLiV2ULLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vavgr_b, "V16ScV16ScV16Sc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vavgr_h, "V8SsV8SsV8Ss", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vavgr_w, "V4SiV4SiV4Si", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vavgr_d, "V2SLLiV2SLLiV2SLLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vavgr_bu, "V16UcV16UcV16Uc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vavgr_hu, "V8UsV8UsV8Us", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vavgr_wu, "V4UiV4UiV4Ui", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vavgr_du, "V2ULLiV2ULLiV2ULLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vabsd_b, "V16ScV16ScV16Sc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vabsd_h, "V8SsV8SsV8Ss", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vabsd_w, "V4SiV4SiV4Si", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vabsd_d, "V2SLLiV2SLLiV2SLLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vabsd_bu, "V16UcV16UcV16Uc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vabsd_hu, "V8UsV8UsV8Us", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vabsd_wu, "V4UiV4UiV4Ui", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vabsd_du, "V2ULLiV2ULLiV2ULLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vadda_b, "V16ScV16ScV16Sc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vadda_h, "V8SsV8SsV8Ss", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vadda_w, "V4SiV4SiV4Si", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vadda_d, "V2SLLiV2SLLiV2SLLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vmax_b, "V16ScV16ScV16Sc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmax_h, "V8SsV8SsV8Ss", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmax_w, "V4SiV4SiV4Si", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmax_d, "V2SLLiV2SLLiV2SLLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vmaxi_b, "V16ScV16ScIi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmaxi_h, "V8SsV8SsIi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmaxi_w, "V4SiV4SiIi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmaxi_d, "V2SLLiV2SLLiIi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vmax_bu, "V16UcV16UcV16Uc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmax_hu, "V8UsV8UsV8Us", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmax_wu, "V4UiV4UiV4Ui", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmax_du, "V2ULLiV2ULLiV2ULLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vmaxi_bu, "V16UcV16UcIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmaxi_hu, "V8UsV8UsIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmaxi_wu, "V4UiV4UiIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmaxi_du, "V2ULLiV2ULLiIUi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vmin_b, "V16ScV16ScV16Sc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmin_h, "V8SsV8SsV8Ss", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmin_w, "V4SiV4SiV4Si", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmin_d, "V2SLLiV2SLLiV2SLLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vmini_b, "V16ScV16ScIi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmini_h, "V8SsV8SsIi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmini_w, "V4SiV4SiIi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmini_d, "V2SLLiV2SLLiIi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vmin_bu, "V16UcV16UcV16Uc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmin_hu, "V8UsV8UsV8Us", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmin_wu, "V4UiV4UiV4Ui", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmin_du, "V2ULLiV2ULLiV2ULLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vmini_bu, "V16UcV16UcIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmini_hu, "V8UsV8UsIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmini_wu, "V4UiV4UiIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmini_du, "V2ULLiV2ULLiIUi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vmul_b, "V16ScV16ScV16Sc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmul_h, "V8SsV8SsV8Ss", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmul_w, "V4SiV4SiV4Si", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmul_d, "V2SLLiV2SLLiV2SLLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vmuh_b, "V16cV16cV16c", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmuh_h, "V8sV8sV8s", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmuh_w, "V4iV4iV4i", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmuh_d, "V2LLiV2LLiV2LLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vmuh_bu, "V16UcV16UcV16Uc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmuh_hu, "V8UsV8UsV8Us", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmuh_wu, "V4UiV4UiV4Ui", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmuh_du, "V2ULLiV2ULLiV2ULLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vmulwev_h_b, "V8sV16cV16c", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmulwev_w_h, "V4SiV8sV8s", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmulwev_d_w, "V2LLiV4SiV4Si", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmulwev_q_d, "V2LLiV2LLiV2LLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vmulwod_h_b, "V8sV16cV16c", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmulwod_w_h, "V4SiV8sV8s", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmulwod_d_w, "V2LLiV4SiV4Si", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmulwod_q_d, "V2LLiV2LLiV2LLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vmulwev_h_bu, "V8sV16UcV16Uc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmulwev_w_hu, "V4SiV8UsV8Us", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmulwev_d_wu, "V2LLiV4UiV4Ui", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmulwev_q_du, "V2LLiV2ULLiV2ULLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vmulwod_h_bu, "V8sV16UcV16Uc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmulwod_w_hu, "V4SiV8UsV8Us", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmulwod_d_wu, "V2LLiV4UiV4Ui", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmulwod_q_du, "V2LLiV2ULLiV2ULLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vmulwev_h_bu_b, "V8sV16UcV16c", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmulwev_w_hu_h, "V4SiV8UsV8s", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmulwev_d_wu_w, "V2LLiV4UiV4Si", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmulwev_q_du_d, "V2LLiV2ULLiV2LLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vmulwod_h_bu_b, "V8sV16UcV16c", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmulwod_w_hu_h, "V4SiV8UsV8s", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmulwod_d_wu_w, "V2LLiV4UiV4Si", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmulwod_q_du_d, "V2LLiV2ULLiV2LLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vmadd_b, "V16ScV16ScV16ScV16Sc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmadd_h, "V8SsV8SsV8SsV8Ss", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmadd_w, "V4SiV4SiV4SiV4Si", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmadd_d, "V2SLLiV2SLLiV2SLLiV2SLLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vmsub_b, "V16ScV16ScV16ScV16Sc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmsub_h, "V8SsV8SsV8SsV8Ss", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmsub_w, "V4SiV4SiV4SiV4Si", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmsub_d, "V2SLLiV2SLLiV2SLLiV2SLLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vmaddwev_h_b, "V8sV8sV16cV16c", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmaddwev_w_h, "V4SiV4SiV8sV8s", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmaddwev_d_w, "V2LLiV2LLiV4SiV4Si", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmaddwev_q_d, "V2LLiV2LLiV2LLiV2LLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vmaddwod_h_b, "V8sV8sV16cV16c", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmaddwod_w_h, "V4SiV4SiV8sV8s", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmaddwod_d_w, "V2LLiV2LLiV4SiV4Si", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmaddwod_q_d, "V2LLiV2LLiV2LLiV2LLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vmaddwev_h_bu, "V8UsV8UsV16UcV16Uc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmaddwev_w_hu, "V4UiV4UiV8UsV8Us", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmaddwev_d_wu, "V2ULLiV2ULLiV4UiV4Ui", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmaddwev_q_du, "V2ULLiV2ULLiV2ULLiV2ULLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vmaddwod_h_bu, "V8UsV8UsV16UcV16Uc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmaddwod_w_hu, "V4UiV4UiV8UsV8Us", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmaddwod_d_wu, "V2ULLiV2ULLiV4UiV4Ui", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmaddwod_q_du, "V2ULLiV2ULLiV2ULLiV2ULLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vmaddwev_h_bu_b, "V8sV8sV16UcV16c", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmaddwev_w_hu_h, "V4SiV4SiV8UsV8s", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmaddwev_d_wu_w, "V2LLiV2LLiV4UiV4Si", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmaddwev_q_du_d, "V2LLiV2LLiV2ULLiV2LLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vmaddwod_h_bu_b, "V8sV8sV16UcV16c", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmaddwod_w_hu_h, "V4SiV4SiV8UsV8s", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmaddwod_d_wu_w, "V2LLiV2LLiV4UiV4Si", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmaddwod_q_du_d, "V2LLiV2LLiV2ULLiV2LLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vdiv_b, "V16ScV16ScV16Sc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vdiv_h, "V8SsV8SsV8Ss", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vdiv_w, "V4SiV4SiV4Si", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vdiv_d, "V2SLLiV2SLLiV2SLLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vdiv_bu, "V16UcV16UcV16Uc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vdiv_hu, "V8UsV8UsV8Us", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vdiv_wu, "V4UiV4UiV4Ui", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vdiv_du, "V2ULLiV2ULLiV2ULLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vmod_b, "V16ScV16ScV16Sc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmod_h, "V8SsV8SsV8Ss", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmod_w, "V4SiV4SiV4Si", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmod_d, "V2SLLiV2SLLiV2SLLi", "nc", "lsx")
+
+
+TARGET_BUILTIN(__builtin_lsx_vmod_bu, "V16UcV16UcV16Uc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmod_hu, "V8UsV8UsV8Us", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmod_wu, "V4UiV4UiV4Ui", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmod_du, "V2ULLiV2ULLiV2ULLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vsat_b, "V16ScV16ScIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsat_h, "V8SsV8SsIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsat_w, "V4SiV4SiIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsat_d, "V2SLLiV2SLLiIUi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vsat_bu, "V16UcV16UcIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsat_hu, "V8UsV8UsIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsat_wu, "V4UiV4UiIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsat_du, "V2ULLiV2ULLiIUi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vexth_h_b, "V8sV16c", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vexth_w_h, "V4SiV8s", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vexth_d_w, "V2LLiV4Si", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vexth_q_d, "V2LLiV2LLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vexth_hu_bu, "V8UsV16Uc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vexth_wu_hu, "V4UiV8Us", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vexth_du_wu, "V2ULLiV4Ui", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vexth_qu_du, "V2ULLiV2ULLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vsigncov_b, "V16ScV16ScV16Sc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsigncov_h, "V8SsV8SsV8Ss", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsigncov_w, "V4SiV4SiV4Si", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsigncov_d, "V2SLLiV2SLLiV2SLLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vmskltz_b, "V16cV16c", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmskltz_h, "V8sV8s", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmskltz_w, "V4iV4i", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmskltz_d, "V2LLiV2LLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vmskgez_b, "V16cV16c", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vmsknz_b, "V8sV8s", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vldi, "V2LLiIi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vrepli_b, "V16cIi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vrepli_h, "V8sIi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vrepli_w, "V4iIi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vrepli_d, "V2LLiIi", "nc", "lsx")
+
+
+TARGET_BUILTIN(__builtin_lsx_vand_v, "V16UcV16UcV16Uc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vor_v, "V16UcV16UcV16Uc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vxor_v, "V16cV16cV16c", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vnor_v, "V16UcV16UcV16Uc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vandn_v, "V16UcV16UcV16Uc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vorn_v, "V16ScV16ScV16Sc", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vandi_b, "V16UcV16UcIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vori_b, "V16UcV16UcIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vxori_b, "V16UcV16UcIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vnori_b, "V16UcV16UcIUi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vsll_b, "V16cV16cV16c", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsll_h, "V8sV8sV8s", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsll_w, "V4iV4iV4i", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsll_d, "V2LLiV2LLiV2LLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vslli_b, "V16cV16cIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vslli_h, "V8sV8sIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vslli_w, "V4iV4iIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vslli_d, "V2LLiV2LLiIUi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vsrl_b, "V16cV16cV16c", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsrl_h, "V8sV8sV8s", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsrl_w, "V4iV4iV4i", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsrl_d, "V2LLiV2LLiV2LLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vsrli_b, "V16cV16cIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsrli_h, "V8sV8sIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsrli_w, "V4iV4iIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsrli_d, "V2LLiV2LLiIUi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vsra_b, "V16cV16cV16c", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsra_h, "V8sV8sV8s", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsra_w, "V4iV4iV4i", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsra_d, "V2LLiV2LLiV2LLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vsrai_b, "V16cV16cIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsrai_h, "V8sV8sIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsrai_w, "V4iV4iIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsrai_d, "V2LLiV2LLiIUi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vrotr_b, "V16cV16cV16c", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vrotr_h, "V8sV8sV8s", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vrotr_w, "V4iV4iV4i", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vrotr_d, "V2LLiV2LLiV2LLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vrotri_b, "V16cV16cIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vrotri_h, "V8sV8sIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vrotri_w, "V4iV4iIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vrotri_d, "V2LLiV2LLiIUi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vsllwil_h_b, "V8sV16cIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsllwil_w_h, "V4SiV8sIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsllwil_d_w, "V2LLiV4SiIUi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vextl_q_d, "V2LLiV2LLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vsllwil_hu_bu, "V8UsV16UcIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsllwil_wu_hu, "V4UiV8UsIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsllwil_du_wu, "V2ULLiV4UiIUi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vextl_qu_du, "V2LLiV2ULLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vsrlr_b, "V16cV16cV16c", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsrlr_h, "V8sV8sV8s", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsrlr_w, "V4iV4iV4i", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsrlr_d, "V2LLiV2LLiV2LLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vsrlri_b, "V16cV16cIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsrlri_h, "V8sV8sIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsrlri_w, "V4iV4iIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsrlri_d, "V2LLiV2LLiIUi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vsrar_b, "V16cV16cV16c", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsrar_h, "V8sV8sV8s", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsrar_w, "V4iV4iV4i", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsrar_d, "V2LLiV2LLiV2LLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vsrari_b, "V16cV16cIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsrari_h, "V8sV8sIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsrari_w, "V4iV4iIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsrari_d, "V2LLiV2LLiIUi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vsrln_b_h, "V16ScV8sV8s", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsrln_h_w, "V8sV4SiV4Si", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsrln_w_d, "V4SiV2LLiV2LLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vsran_b_h, "V16ScV8sV8s", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsran_h_w, "V8sV4SiV4Si", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsran_w_d, "V4SiV2LLiV2LLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vsrlni_b_h, "V16cV16cV16cIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsrlni_h_w, "V8sV8sV8sIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsrlni_w_d, "V4iV4iV4iIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsrlni_d_q, "V2LLiV2LLiV2LLiIUi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vsrani_b_h, "V16cV16cV16cIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsrani_h_w, "V8sV8sV8sIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsrani_w_d, "V4iV4iV4iIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsrani_d_q, "V2LLiV2LLiV2LLiIUi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vsrlrn_b_h, "V16ScV8sV8s", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsrlrn_h_w, "V8sV4SiV4Si", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsrlrn_w_d, "V4SiV2LLiV2LLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vsrarn_b_h, "V16ScV8sV8s", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsrarn_h_w, "V8sV4SiV4Si", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsrarn_w_d, "V4SiV2LLiV2LLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vsrlrni_b_h, "V16cV16cV16cIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsrlrni_h_w, "V8sV8sV8sIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsrlrni_w_d, "V4iV4iV4iIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsrlrni_d_q, "V2LLiV2LLiV2LLiIUi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vsrarni_b_h, "V16cV16cV16cIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsrarni_h_w, "V8sV8sV8sIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsrarni_w_d, "V4iV4iV4iIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsrarni_d_q, "V2LLiV2LLiV2LLiIUi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vssrln_b_h, "V16ScV8sV8s", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vssrln_h_w, "V8sV4SiV4Si", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vssrln_w_d, "V4SiV2LLiV2LLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vssran_b_h, "V16ScV8sV8s", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vssran_h_w, "V8sV4SiV4Si", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vssran_w_d, "V4SiV2LLiV2LLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vssrln_bu_h, "V16UcV8UsV8Us", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vssrln_hu_w, "V8UsV4UiV4Ui", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vssrln_wu_d, "V4UiV2ULLiV2ULLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vssran_bu_h, "V16UcV8UsV8Us", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vssran_hu_w, "V8UsV4UiV4Ui", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vssran_wu_d, "V4UiV2ULLiV2ULLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vssrlni_b_h, "V16cV16cV16cIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vssrlni_h_w, "V8sV8sV8sIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vssrlni_w_d, "V4iV4iV4iIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vssrlni_d_q, "V2LLiV2LLiV2LLiIUi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vssrani_b_h, "V16cV16cV16cIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vssrani_h_w, "V8sV8sV8sIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vssrani_w_d, "V4iV4iV4iIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vssrani_d_q, "V2LLiV2LLiV2LLiIUi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vssrlrni_bu_h, "V16cV16cV16cIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vssrlrni_hu_w, "V8sV8sV8sIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vssrlrni_wu_d, "V4iV4iV4iIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vssrlrni_du_q, "V2LLiV2LLiV2LLiIUi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vssrani_bu_h, "V16cV16cV16cIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vssrani_hu_w, "V8sV8sV8sIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vssrani_wu_d, "V4iV4iV4iIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vssrani_du_q, "V2LLiV2LLiV2LLiIUi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vssrlrn_b_h, "V16ScV8sV8s", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vssrlrn_h_w, "V8sV4SiV4Si", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vssrlrn_w_d, "V4SiV2LLiV2LLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vssrarn_b_h, "V16ScV8sV8s", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vssrarn_h_w, "V8sV4SiV4Si", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vssrarn_w_d, "V4SiV2LLiV2LLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vssrlrn_bu_h, "V16UcV8UsV8Us", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vssrlrn_hu_w, "V8UsV4UiV4Ui", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vssrlrn_wu_d, "V4UiV2ULLiV2ULLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vssrarn_bu_h, "V16UcV8UsV8Us", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vssrarn_hu_w, "V8UsV4UiV4Ui", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vssrarn_wu_d, "V4UiV2ULLiV2ULLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vssrlrni_b_h, "V16cV16cV16cIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vssrlrni_h_w, "V8sV8sV8sIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vssrlrni_w_d, "V4iV4iV4iIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vssrlrni_d_q, "V2LLiV2LLiV2LLiIUi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vssrarni_b_h, "V16cV16cV16cIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vssrarni_h_w, "V8sV8sV8sIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vssrarni_w_d, "V4iV4iV4iIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vssrarni_d_q, "V2LLiV2LLiV2LLiIUi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vssrlni_bu_h, "V16cV16cV16cIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vssrlni_hu_w, "V8sV8sV8sIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vssrlni_wu_d, "V4iV4iV4iIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vssrlni_du_q, "V2LLiV2LLiV2LLiIUi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vssrarni_bu_h, "V16cV16cV16cIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vssrarni_hu_w, "V8sV8sV8sIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vssrarni_wu_d, "V4iV4iV4iIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vssrarni_du_q, "V2LLiV2LLiV2LLiIUi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vclo_b, "V16ScV16Sc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vclo_h, "V8SsV8Ss", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vclo_w, "V4SiV4Si", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vclo_d, "V2SLLiV2SLLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vclz_b, "V16ScV16Sc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vclz_h, "V8SsV8Ss", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vclz_w, "V4SiV4Si", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vclz_d, "V2SLLiV2SLLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vpcnt_b, "V16ScV16Sc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vpcnt_h, "V8SsV8Ss", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vpcnt_w, "V4SiV4Si", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vpcnt_d, "V2SLLiV2SLLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vbitclr_b, "V16UcV16UcV16Uc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vbitclr_h, "V8UsV8UsV8Us", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vbitclr_w, "V4UiV4UiV4Ui", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vbitclr_d, "V2ULLiV2ULLiV2ULLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vbitclri_b, "V16UcV16UcIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vbitclri_h, "V8UsV8UsIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vbitclri_w, "V4UiV4UiIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vbitclri_d, "V2ULLiV2ULLiIUi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vbitset_b, "V16UcV16UcV16Uc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vbitset_h, "V8UsV8UsV8Us", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vbitset_w, "V4UiV4UiV4Ui", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vbitset_d, "V2ULLiV2ULLiV2ULLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vbitseti_b, "V16UcV16UcIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vbitseti_h, "V8UsV8UsIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vbitseti_w, "V4UiV4UiIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vbitseti_d, "V2ULLiV2ULLiIUi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vbitrev_b, "V16UcV16UcV16Uc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vbitrev_h, "V8UsV8UsV8Us", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vbitrev_w, "V4UiV4UiV4Ui", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vbitrev_d, "V2ULLiV2ULLiV2ULLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vbitrevi_b, "V16UcV16UcIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vbitrevi_h, "V8UsV8UsIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vbitrevi_w, "V4UiV4UiIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vbitrevi_d, "V2ULLiV2ULLiIUi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vfrstp_b, "V16ScV16ScV16ScV16Sc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vfrstp_h, "V8SsV8SsV8SsV8Ss", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vfrstpi_b, "V16cV16cV16cIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vfrstpi_h, "V8sV8sV8sIUi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vfadd_s, "V4fV4fV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vfadd_d, "V2dV2dV2d", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vfsub_s, "V4fV4fV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vfsub_d, "V2dV2dV2d", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vfmul_s, "V4fV4fV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vfmul_d, "V2dV2dV2d", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vfdiv_s, "V4fV4fV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vfdiv_d, "V2dV2dV2d", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vfmadd_s, "V4fV4fV4fV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vfmadd_d, "V2dV2dV2dV2d", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vfmsub_s, "V4fV4fV4fV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vfmsub_d, "V2dV2dV2dV2d", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vfnmadd_s, "V4fV4fV4fV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vfnmadd_d, "V2dV2dV2dV2d", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vfnmsub_s, "V4fV4fV4fV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vfnmsub_d, "V2dV2dV2dV2d", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vfmax_s, "V4fV4fV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vfmax_d, "V2dV2dV2d", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vfmin_s, "V4fV4fV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vfmin_d, "V2dV2dV2d", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vfmaxa_s, "V4fV4fV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vfmaxa_d, "V2dV2dV2d", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vfmina_s, "V4fV4fV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vfmina_d, "V2dV2dV2d", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vflogb_s, "V4fV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vflogb_d, "V2dV2d", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vfclass_s, "V4iV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vfclass_d, "V2LLiV2d", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vfsqrt_s, "V4fV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vfsqrt_d, "V2dV2d", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vfrecip_s, "V4fV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vfrecip_d, "V2dV2d", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vfrecipe_s, "V4fV4f", "nc", "lsx,frecipe")
+TARGET_BUILTIN(__builtin_lsx_vfrecipe_d, "V2dV2d", "nc", "lsx,frecipe")
+
+TARGET_BUILTIN(__builtin_lsx_vfrsqrt_s, "V4fV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vfrsqrt_d, "V2dV2d", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vfrsqrte_s, "V4fV4f", "nc", "lsx,frecipe")
+TARGET_BUILTIN(__builtin_lsx_vfrsqrte_d, "V2dV2d", "nc", "lsx,frecipe")
+
+TARGET_BUILTIN(__builtin_lsx_vfcvtl_s_h, "V4fV8s", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vfcvtl_d_s, "V2dV4f", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vfcvth_s_h, "V4fV8s", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vfcvth_d_s, "V2dV4f", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vfcvt_h_s, "V8sV4fV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vfcvt_s_d, "V4fV2dV2d", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vfrintrne_s, "V4SiV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vfrintrne_d, "V2LLiV2d", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vfrintrz_s, "V4SiV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vfrintrz_d, "V2LLiV2d", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vfrintrp_s, "V4SiV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vfrintrp_d, "V2LLiV2d", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vfrintrm_s, "V4SiV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vfrintrm_d, "V2LLiV2d", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vfrint_s, "V4fV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vfrint_d, "V2dV2d", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vftintrne_w_s, "V4SiV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vftintrne_l_d, "V2LLiV2d", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vftintrz_w_s, "V4SiV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vftintrz_l_d, "V2LLiV2d", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vftintrp_w_s, "V4SiV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vftintrp_l_d, "V2LLiV2d", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vftintrm_w_s, "V4SiV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vftintrm_l_d, "V2LLiV2d", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vftint_w_s, "V4SiV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vftint_l_d, "V2SLLiV2d", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vftintrz_wu_s, "V4UiV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vftintrz_lu_d, "V2ULLiV2d", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vftint_wu_s, "V4UiV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vftint_lu_d, "V2ULLiV2d", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vftintrne_w_d, "V4SiV2dV2d", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vftintrz_w_d, "V4SiV2dV2d", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vftintrp_w_d, "V4SiV2dV2d", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vftintrm_w_d, "V4SiV2dV2d", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vftint_w_d, "V4SiV2dV2d", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vftintrnel_l_s, "V2LLiV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vftintrneh_l_s, "V2LLiV4f", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vftintrzl_l_s, "V2LLiV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vftintrzh_l_s, "V2LLiV4f", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vftintrpl_l_s, "V2LLiV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vftintrph_l_s, "V2LLiV4f", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vftintrml_l_s, "V2LLiV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vftintrmh_l_s, "V2LLiV4f", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vftintl_l_s, "V2LLiV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vftinth_l_s, "V2LLiV4f", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vffint_s_w, "V4fV4Si", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vffint_d_l, "V2dV2SLLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vffint_s_wu, "V4fV4Ui", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vffint_d_lu, "V2dV2ULLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vffintl_d_w, "V2dV4Si", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vffinth_d_w, "V2dV4Si", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vffint_s_l, "V4fV2LLiV2LLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vseq_b, "V16ScV16ScV16Sc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vseq_h, "V8SsV8SsV8Ss", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vseq_w, "V4SiV4SiV4Si", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vseq_d, "V2SLLiV2SLLiV2SLLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vseqi_b, "V16ScV16ScISi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vseqi_h, "V8SsV8SsISi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vseqi_w, "V4SiV4SiISi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vseqi_d, "V2SLLiV2SLLiISi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vsle_b, "V16ScV16ScV16Sc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsle_h, "V8SsV8SsV8Ss", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsle_w, "V4SiV4SiV4Si", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsle_d, "V2SLLiV2SLLiV2SLLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vslei_b, "V16ScV16ScISi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vslei_h, "V8SsV8SsISi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vslei_w, "V4SiV4SiISi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vslei_d, "V2SLLiV2SLLiISi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vsle_bu, "V16ScV16UcV16Uc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsle_hu, "V8SsV8UsV8Us", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsle_wu, "V4SiV4UiV4Ui", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vsle_du, "V2SLLiV2ULLiV2ULLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vslei_bu, "V16ScV16UcIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vslei_hu, "V8SsV8UsIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vslei_wu, "V4SiV4UiIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vslei_du, "V2SLLiV2ULLiIUi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vslt_b, "V16ScV16ScV16Sc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vslt_h, "V8SsV8SsV8Ss", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vslt_w, "V4SiV4SiV4Si", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vslt_d, "V2SLLiV2SLLiV2SLLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vslti_b, "V16ScV16ScISi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vslti_h, "V8SsV8SsISi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vslti_w, "V4SiV4SiISi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vslti_d, "V2SLLiV2SLLiISi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vslt_bu, "V16ScV16UcV16Uc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vslt_hu, "V8SsV8UsV8Us", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vslt_wu, "V4SiV4UiV4Ui", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vslt_du, "V2SLLiV2ULLiV2ULLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vslti_bu, "V16ScV16UcIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vslti_hu, "V8SsV8UsIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vslti_wu, "V4SiV4UiIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vslti_du, "V2SLLiV2ULLiIUi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vfcmp_caf_s, "V4SiV4fV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vfcmp_caf_d, "V2SLLiV2dV2d", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vfcmp_cun_s, "V4SiV4fV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vfcmp_cun_d, "V2SLLiV2dV2d", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vfcmp_ceq_s, "V4SiV4fV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vfcmp_ceq_d, "V2SLLiV2dV2d", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vfcmp_cueq_s, "V4SiV4fV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vfcmp_cueq_d, "V2SLLiV2dV2d", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vfcmp_clt_s, "V4SiV4fV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vfcmp_clt_d, "V2SLLiV2dV2d", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vfcmp_cult_s, "V4SiV4fV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vfcmp_cult_d, "V2SLLiV2dV2d", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vfcmp_cle_s, "V4SiV4fV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vfcmp_cle_d, "V2SLLiV2dV2d", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vfcmp_cule_s, "V4SiV4fV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vfcmp_cule_d, "V2SLLiV2dV2d", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vfcmp_cne_s, "V4SiV4fV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vfcmp_cne_d, "V2SLLiV2dV2d", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vfcmp_cor_s, "V4SiV4fV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vfcmp_cor_d, "V2SLLiV2dV2d", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vfcmp_cune_s, "V4SiV4fV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vfcmp_cune_d, "V2SLLiV2dV2d", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vfcmp_saf_s, "V4SiV4fV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vfcmp_saf_d, "V2SLLiV2dV2d", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vfcmp_sun_s, "V4SiV4fV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vfcmp_sun_d, "V2SLLiV2dV2d", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vfcmp_seq_s, "V4SiV4fV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vfcmp_seq_d, "V2SLLiV2dV2d", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vfcmp_sueq_s, "V4SiV4fV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vfcmp_sueq_d, "V2SLLiV2dV2d", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vfcmp_slt_s, "V4SiV4fV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vfcmp_slt_d, "V2SLLiV2dV2d", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vfcmp_sult_s, "V4SiV4fV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vfcmp_sult_d, "V2SLLiV2dV2d", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vfcmp_sle_s, "V4SiV4fV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vfcmp_sle_d, "V2SLLiV2dV2d", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vfcmp_sule_s, "V4SiV4fV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vfcmp_sule_d, "V2SLLiV2dV2d", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vfcmp_sne_s, "V4SiV4fV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vfcmp_sne_d, "V2SLLiV2dV2d", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vfcmp_sor_s, "V4SiV4fV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vfcmp_sor_d, "V2SLLiV2dV2d", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vfcmp_sune_s, "V4SiV4fV4f", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vfcmp_sune_d, "V2SLLiV2dV2d", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vbitsel_v, "V16UcV16UcV16UcV16Uc", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vbitseli_b, "V16UcV16UcV16UcIUi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vreplgr2vr_b, "V16Sci", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vreplgr2vr_h, "V8Ssi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vreplgr2vr_w, "V4Sii", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vreplgr2vr_d, "V2SLLiLLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vinsgr2vr_b, "V16ScV16SciIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vinsgr2vr_h, "V8SsV8SsiIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vinsgr2vr_w, "V4SiV4SiiIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vinsgr2vr_d, "V2SLLiV2SLLiLLiIUi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vpickve2gr_b, "iV16ScIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vpickve2gr_h, "iV8SsIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vpickve2gr_w, "iV4SiIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vpickve2gr_d, "LLiV2SLLiIUi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vpickve2gr_bu, "iV16UcIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vpickve2gr_hu, "iV8UsIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vpickve2gr_wu, "iV4UiIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vpickve2gr_du, "LLiV2ULLiIUi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vreplve_b, "V16cV16cUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vreplve_h, "V8sV8sUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vreplve_w, "V4iV4iUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vreplve_d, "V2LLiV2LLiUi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vreplvei_b, "V16cV16cIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vreplvei_h, "V8sV8sIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vreplvei_w, "V4iV4iIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vreplvei_d, "V2LLiV2LLiIUi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vbsll_v, "V16cV16cIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vbsrl_v, "V16cV16cIUi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vpackev_b, "V16cV16cV16c", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vpackev_h, "V8sV8sV8s", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vpackev_w, "V4iV4iV4i", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vpackev_d, "V2LLiV2LLiV2LLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vpackod_b, "V16cV16cV16c", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vpackod_h, "V8sV8sV8s", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vpackod_w, "V4iV4iV4i", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vpackod_d, "V2LLiV2LLiV2LLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vpickev_b, "V16cV16cV16c", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vpickev_h, "V8sV8sV8s", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vpickev_w, "V4iV4iV4i", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vpickev_d, "V2LLiV2LLiV2LLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vpickod_b, "V16cV16cV16c", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vpickod_h, "V8sV8sV8s", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vpickod_w, "V4iV4iV4i", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vpickod_d, "V2LLiV2LLiV2LLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vilvl_b, "V16cV16cV16c", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vilvl_h, "V8sV8sV8s", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vilvl_w, "V4iV4iV4i", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vilvl_d, "V2LLiV2LLiV2LLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vilvh_b, "V16cV16cV16c", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vilvh_h, "V8sV8sV8s", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vilvh_w, "V4iV4iV4i", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vilvh_d, "V2LLiV2LLiV2LLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vshuf_b, "V16UcV16UcV16UcV16Uc", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vshuf_h, "V8sV8sV8sV8s", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vshuf_w, "V4iV4iV4iV4i", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vshuf_d, "V2LLiV2LLiV2LLiV2LLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vshuf4i_b, "V16cV16cIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vshuf4i_h, "V8sV8sIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vshuf4i_w, "V4iV4iIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vshuf4i_d, "V2LLiV2LLiV2LLiIUi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vpermi_w, "V4iV4iV4iIUi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vextrins_b, "V16cV16cV16cIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vextrins_h, "V8sV8sV8sIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vextrins_w, "V4iV4iV4iIUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vextrins_d, "V2LLiV2LLiV2LLiIUi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vld, "V16ScvC*Ii", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vst, "vV16Scv*Ii", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vldx, "V16ScvC*LLi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vstx, "vV16Scv*LLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vldrepl_b, "V16cvC*Ii", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vldrepl_h, "V8svC*Ii", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vldrepl_w, "V4ivC*Ii", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vldrepl_d, "V2LLivC*Ii", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_vstelm_b, "vV16Scv*IiUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vstelm_h, "vV8Ssv*IiUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vstelm_w, "vV4Siv*IiUi", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_vstelm_d, "vV2SLLiv*IiUi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_bz_v, "iV16Uc", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_bnz_v, "iV16Uc", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_bz_b, "iV16Uc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_bz_h, "iV8Us", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_bz_w, "iV4Ui", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_bz_d, "iV2ULLi", "nc", "lsx")
+
+TARGET_BUILTIN(__builtin_lsx_bnz_b, "iV16Uc", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_bnz_h, "iV8Us", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_bnz_w, "iV4Ui", "nc", "lsx")
+TARGET_BUILTIN(__builtin_lsx_bnz_d, "iV2ULLi", "nc", "lsx")
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsNEON.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsNEON.def
index b8eb5a7b6173..9627005ba982 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsNEON.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsNEON.def
@@ -19,3 +19,4 @@
#undef GET_NEON_BUILTINS
#undef BUILTIN
+#undef TARGET_BUILTIN
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsNVPTX.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsNVPTX.def
index 3c96900136a4..0f2e8260143b 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsNVPTX.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsNVPTX.def
@@ -17,12 +17,21 @@
# define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) BUILTIN(ID, TYPE, ATTRS)
#endif
+#pragma push_macro("SM_53")
#pragma push_macro("SM_70")
#pragma push_macro("SM_72")
#pragma push_macro("SM_75")
#pragma push_macro("SM_80")
#pragma push_macro("SM_86")
-#define SM_86 "sm_86"
+#pragma push_macro("SM_87")
+#pragma push_macro("SM_89")
+#pragma push_macro("SM_90")
+#pragma push_macro("SM_90a")
+#define SM_90a "sm_90a"
+#define SM_90 "sm_90|" SM_90a
+#define SM_89 "sm_89|" SM_90
+#define SM_87 "sm_87|" SM_89
+#define SM_86 "sm_86|" SM_87
#define SM_80 "sm_80|" SM_86
#define SM_75 "sm_75|" SM_80
#define SM_72 "sm_72|" SM_75
@@ -30,7 +39,9 @@
#pragma push_macro("SM_60")
#define SM_60 "sm_60|sm_61|sm_62|" SM_70
+#define SM_53 "sm_53|" SM_60
+#pragma push_macro("PTX42")
#pragma push_macro("PTX60")
#pragma push_macro("PTX61")
#pragma push_macro("PTX63")
@@ -39,7 +50,27 @@
#pragma push_macro("PTX70")
#pragma push_macro("PTX71")
#pragma push_macro("PTX72")
-#define PTX72 "ptx72"
+#pragma push_macro("PTX73")
+#pragma push_macro("PTX74")
+#pragma push_macro("PTX75")
+#pragma push_macro("PTX76")
+#pragma push_macro("PTX77")
+#pragma push_macro("PTX78")
+#pragma push_macro("PTX80")
+#pragma push_macro("PTX81")
+#pragma push_macro("PTX82")
+#pragma push_macro("PTX83")
+#define PTX83 "ptx83"
+#define PTX82 "ptx82|" PTX83
+#define PTX81 "ptx81|" PTX82
+#define PTX80 "ptx80|" PTX81
+#define PTX78 "ptx78|" PTX80
+#define PTX77 "ptx77|" PTX78
+#define PTX76 "ptx76|" PTX77
+#define PTX75 "ptx75|" PTX76
+#define PTX74 "ptx74|" PTX75
+#define PTX73 "ptx73|" PTX74
+#define PTX72 "ptx72|" PTX73
#define PTX71 "ptx71|" PTX72
#define PTX70 "ptx70|" PTX71
#define PTX65 "ptx65|" PTX70
@@ -47,6 +78,7 @@
#define PTX63 "ptx63|" PTX64
#define PTX61 "ptx61|" PTX63
#define PTX60 "ptx60|" PTX61
+#define PTX42 "ptx42|" PTX60
#pragma push_macro("AND")
#define AND(a, b) "(" a "),(" b ")"
@@ -73,6 +105,31 @@ BUILTIN(__nvvm_read_ptx_sreg_nctaid_y, "i", "nc")
BUILTIN(__nvvm_read_ptx_sreg_nctaid_z, "i", "nc")
BUILTIN(__nvvm_read_ptx_sreg_nctaid_w, "i", "nc")
+TARGET_BUILTIN(__nvvm_read_ptx_sreg_clusterid_x, "i", "nc", AND(SM_90, PTX78))
+TARGET_BUILTIN(__nvvm_read_ptx_sreg_clusterid_y, "i", "nc", AND(SM_90, PTX78))
+TARGET_BUILTIN(__nvvm_read_ptx_sreg_clusterid_z, "i", "nc", AND(SM_90, PTX78))
+TARGET_BUILTIN(__nvvm_read_ptx_sreg_clusterid_w, "i", "nc", AND(SM_90, PTX78))
+
+TARGET_BUILTIN(__nvvm_read_ptx_sreg_nclusterid_x, "i", "nc", AND(SM_90, PTX78))
+TARGET_BUILTIN(__nvvm_read_ptx_sreg_nclusterid_y, "i", "nc", AND(SM_90, PTX78))
+TARGET_BUILTIN(__nvvm_read_ptx_sreg_nclusterid_z, "i", "nc", AND(SM_90, PTX78))
+TARGET_BUILTIN(__nvvm_read_ptx_sreg_nclusterid_w, "i", "nc", AND(SM_90, PTX78))
+
+TARGET_BUILTIN(__nvvm_read_ptx_sreg_cluster_ctaid_x, "i", "nc", AND(SM_90, PTX78))
+TARGET_BUILTIN(__nvvm_read_ptx_sreg_cluster_ctaid_y, "i", "nc", AND(SM_90, PTX78))
+TARGET_BUILTIN(__nvvm_read_ptx_sreg_cluster_ctaid_z, "i", "nc", AND(SM_90, PTX78))
+TARGET_BUILTIN(__nvvm_read_ptx_sreg_cluster_ctaid_w, "i", "nc", AND(SM_90, PTX78))
+
+TARGET_BUILTIN(__nvvm_read_ptx_sreg_cluster_nctaid_x, "i", "nc", AND(SM_90, PTX78))
+TARGET_BUILTIN(__nvvm_read_ptx_sreg_cluster_nctaid_y, "i", "nc", AND(SM_90, PTX78))
+TARGET_BUILTIN(__nvvm_read_ptx_sreg_cluster_nctaid_z, "i", "nc", AND(SM_90, PTX78))
+TARGET_BUILTIN(__nvvm_read_ptx_sreg_cluster_nctaid_w, "i", "nc", AND(SM_90, PTX78))
+
+TARGET_BUILTIN(__nvvm_read_ptx_sreg_cluster_ctarank, "i", "nc", AND(SM_90, PTX78))
+TARGET_BUILTIN(__nvvm_read_ptx_sreg_cluster_nctarank, "i", "nc", AND(SM_90, PTX78))
+
+TARGET_BUILTIN(__nvvm_is_explicit_cluster, "b", "nc", AND(SM_90, PTX78))
+
BUILTIN(__nvvm_read_ptx_sreg_laneid, "i", "nc")
BUILTIN(__nvvm_read_ptx_sreg_warpid, "i", "nc")
BUILTIN(__nvvm_read_ptx_sreg_nwarpid, "i", "nc")
@@ -101,13 +158,97 @@ BUILTIN(__nvvm_prmt, "UiUiUiUi", "")
// Min Max
-BUILTIN(__nvvm_fmax_ftz_f, "fff", "")
-BUILTIN(__nvvm_fmax_f, "fff", "")
-BUILTIN(__nvvm_fmin_ftz_f, "fff", "")
-BUILTIN(__nvvm_fmin_f, "fff", "")
+TARGET_BUILTIN(__nvvm_fmin_f16, "hhh", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fmin_ftz_f16, "hhh", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fmin_nan_f16, "hhh", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fmin_ftz_nan_f16, "hhh", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fmin_xorsign_abs_f16, "hhh", "", AND(SM_86, PTX72))
+TARGET_BUILTIN(__nvvm_fmin_ftz_xorsign_abs_f16, "hhh", "", AND(SM_86, PTX72))
+TARGET_BUILTIN(__nvvm_fmin_nan_xorsign_abs_f16, "hhh", "", AND(SM_86, PTX72))
+TARGET_BUILTIN(__nvvm_fmin_ftz_nan_xorsign_abs_f16, "hhh", "",
+ AND(SM_86, PTX72))
+TARGET_BUILTIN(__nvvm_fmin_f16x2, "V2hV2hV2h", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fmin_ftz_f16x2, "V2hV2hV2h", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fmin_nan_f16x2, "V2hV2hV2h", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fmin_ftz_nan_f16x2, "V2hV2hV2h", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fmin_xorsign_abs_f16x2, "V2hV2hV2h", "",
+ AND(SM_86, PTX72))
+TARGET_BUILTIN(__nvvm_fmin_ftz_xorsign_abs_f16x2, "V2hV2hV2h", "",
+ AND(SM_86, PTX72))
+TARGET_BUILTIN(__nvvm_fmin_nan_xorsign_abs_f16x2, "V2hV2hV2h", "",
+ AND(SM_86, PTX72))
+TARGET_BUILTIN(__nvvm_fmin_ftz_nan_xorsign_abs_f16x2, "V2hV2hV2h", "",
+ AND(SM_86, PTX72))
+TARGET_BUILTIN(__nvvm_fmin_bf16, "yyy", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fmin_ftz_bf16, "yyy", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fmin_nan_bf16, "yyy", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fmin_ftz_nan_bf16, "yyy", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fmin_xorsign_abs_bf16, "yyy", "", AND(SM_86, PTX72))
+TARGET_BUILTIN(__nvvm_fmin_nan_xorsign_abs_bf16, "yyy", "",
+ AND(SM_86, PTX72))
+TARGET_BUILTIN(__nvvm_fmin_bf16x2, "V2yV2yV2y", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fmin_ftz_bf16x2, "V2yV2yV2y", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fmin_nan_bf16x2, "V2yV2yV2y", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fmin_ftz_nan_bf16x2, "V2yV2yV2y", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fmin_xorsign_abs_bf16x2, "V2yV2yV2y", "",
+ AND(SM_86, PTX72))
+TARGET_BUILTIN(__nvvm_fmin_nan_xorsign_abs_bf16x2, "V2yV2yV2y", "",
+ AND(SM_86, PTX72))
+BUILTIN(__nvvm_fmin_f, "fff", "")
+BUILTIN(__nvvm_fmin_ftz_f, "fff", "")
+TARGET_BUILTIN(__nvvm_fmin_nan_f, "fff", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fmin_ftz_nan_f, "fff", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fmin_xorsign_abs_f, "fff", "", AND(SM_86, PTX72))
+TARGET_BUILTIN(__nvvm_fmin_ftz_xorsign_abs_f, "fff", "", AND(SM_86, PTX72))
+TARGET_BUILTIN(__nvvm_fmin_nan_xorsign_abs_f, "fff", "", AND(SM_86, PTX72))
+TARGET_BUILTIN(__nvvm_fmin_ftz_nan_xorsign_abs_f, "fff", "", AND(SM_86, PTX72))
+BUILTIN(__nvvm_fmin_d, "ddd", "")
+TARGET_BUILTIN(__nvvm_fmax_f16, "hhh", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fmax_ftz_f16, "hhh", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fmax_nan_f16, "hhh", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fmax_ftz_nan_f16, "hhh", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fmax_xorsign_abs_f16, "hhh", "", AND(SM_86, PTX72))
+TARGET_BUILTIN(__nvvm_fmax_ftz_xorsign_abs_f16, "hhh", "", AND(SM_86, PTX72))
+TARGET_BUILTIN(__nvvm_fmax_nan_xorsign_abs_f16, "hhh", "", AND(SM_86, PTX72))
+TARGET_BUILTIN(__nvvm_fmax_ftz_nan_xorsign_abs_f16, "hhh", "",
+ AND(SM_86, PTX72))
+TARGET_BUILTIN(__nvvm_fmax_f16x2, "V2hV2hV2h", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fmax_ftz_f16x2, "V2hV2hV2h", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fmax_nan_f16x2, "V2hV2hV2h", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fmax_ftz_nan_f16x2, "V2hV2hV2h", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fmax_xorsign_abs_f16x2, "V2hV2hV2h", "",
+ AND(SM_86, PTX72))
+TARGET_BUILTIN(__nvvm_fmax_ftz_xorsign_abs_f16x2, "V2hV2hV2h", "",
+ AND(SM_86, PTX72))
+TARGET_BUILTIN(__nvvm_fmax_nan_xorsign_abs_f16x2, "V2hV2hV2h", "",
+ AND(SM_86, PTX72))
+TARGET_BUILTIN(__nvvm_fmax_ftz_nan_xorsign_abs_f16x2, "V2hV2hV2h", "",
+ AND(SM_86, PTX72))
+TARGET_BUILTIN(__nvvm_fmax_bf16, "yyy", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fmax_ftz_bf16, "yyy", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fmax_nan_bf16, "yyy", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fmax_ftz_nan_bf16, "yyy", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fmax_xorsign_abs_bf16, "yyy", "", AND(SM_86, PTX72))
+TARGET_BUILTIN(__nvvm_fmax_nan_xorsign_abs_bf16, "yyy", "",
+ AND(SM_86, PTX72))
+TARGET_BUILTIN(__nvvm_fmax_bf16x2, "V2yV2yV2y", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fmax_ftz_bf16x2, "V2yV2yV2y", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fmax_nan_bf16x2, "V2yV2yV2y", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fmax_ftz_nan_bf16x2, "V2yV2yV2y", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fmax_xorsign_abs_bf16x2, "V2yV2yV2y", "",
+ AND(SM_86, PTX72))
+TARGET_BUILTIN(__nvvm_fmax_nan_xorsign_abs_bf16x2, "V2yV2yV2y", "",
+ AND(SM_86, PTX72))
+BUILTIN(__nvvm_fmax_f, "fff", "")
+BUILTIN(__nvvm_fmax_ftz_f, "fff", "")
+TARGET_BUILTIN(__nvvm_fmax_nan_f, "fff", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fmax_ftz_nan_f, "fff", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fmax_xorsign_abs_f, "fff", "", AND(SM_86, PTX72))
+TARGET_BUILTIN(__nvvm_fmax_ftz_xorsign_abs_f, "fff", "", AND(SM_86, PTX72))
+TARGET_BUILTIN(__nvvm_fmax_nan_xorsign_abs_f, "fff", "", AND(SM_86, PTX72))
+TARGET_BUILTIN(__nvvm_fmax_ftz_nan_xorsign_abs_f, "fff", "", AND(SM_86, PTX72))
BUILTIN(__nvvm_fmax_d, "ddd", "")
-BUILTIN(__nvvm_fmin_d, "ddd", "")
// Multiplication
@@ -196,6 +337,8 @@ BUILTIN(__nvvm_saturate_d, "dd", "")
BUILTIN(__nvvm_ex2_approx_ftz_f, "ff", "")
BUILTIN(__nvvm_ex2_approx_f, "ff", "")
BUILTIN(__nvvm_ex2_approx_d, "dd", "")
+TARGET_BUILTIN(__nvvm_ex2_approx_f16, "hh", "", AND(SM_75, PTX70))
+TARGET_BUILTIN(__nvvm_ex2_approx_f16x2, "V2hV2h", "", AND(SM_75, PTX70))
BUILTIN(__nvvm_lg2_approx_ftz_f, "ff", "")
BUILTIN(__nvvm_lg2_approx_f, "ff", "")
@@ -211,6 +354,22 @@ BUILTIN(__nvvm_cos_approx_f, "ff", "")
// Fma
+TARGET_BUILTIN(__nvvm_fma_rn_f16, "hhhh", "", AND(SM_53, PTX42))
+TARGET_BUILTIN(__nvvm_fma_rn_ftz_f16, "hhhh", "", AND(SM_53, PTX42))
+TARGET_BUILTIN(__nvvm_fma_rn_sat_f16, "hhhh", "", AND(SM_53, PTX42))
+TARGET_BUILTIN(__nvvm_fma_rn_ftz_sat_f16, "hhhh", "", AND(SM_53, PTX42))
+TARGET_BUILTIN(__nvvm_fma_rn_relu_f16, "hhhh", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fma_rn_ftz_relu_f16, "hhhh", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fma_rn_f16x2, "V2hV2hV2hV2h", "", AND(SM_53, PTX42))
+TARGET_BUILTIN(__nvvm_fma_rn_ftz_f16x2, "V2hV2hV2hV2h", "", AND(SM_53, PTX42))
+TARGET_BUILTIN(__nvvm_fma_rn_sat_f16x2, "V2hV2hV2hV2h", "", AND(SM_53, PTX42))
+TARGET_BUILTIN(__nvvm_fma_rn_ftz_sat_f16x2, "V2hV2hV2hV2h", "", AND(SM_53, PTX42))
+TARGET_BUILTIN(__nvvm_fma_rn_relu_f16x2, "V2hV2hV2hV2h", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fma_rn_ftz_relu_f16x2, "V2hV2hV2hV2h", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fma_rn_bf16, "yyyy", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fma_rn_relu_bf16, "yyyy", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fma_rn_bf16x2, "V2yV2yV2yV2y", "", AND(SM_80, PTX70))
+TARGET_BUILTIN(__nvvm_fma_rn_relu_bf16x2, "V2yV2yV2yV2y", "", AND(SM_80, PTX70))
BUILTIN(__nvvm_fma_rn_ftz_f, "ffff", "")
BUILTIN(__nvvm_fma_rn_f, "ffff", "")
BUILTIN(__nvvm_fma_rz_ftz_f, "ffff", "")
@@ -239,6 +398,8 @@ BUILTIN(__nvvm_rcp_rn_d, "dd", "")
BUILTIN(__nvvm_rcp_rz_d, "dd", "")
BUILTIN(__nvvm_rcp_rm_d, "dd", "")
BUILTIN(__nvvm_rcp_rp_d, "dd", "")
+
+BUILTIN(__nvvm_rcp_approx_ftz_f, "ff", "")
BUILTIN(__nvvm_rcp_approx_ftz_d, "dd", "")
// Sqrt
@@ -396,6 +557,23 @@ BUILTIN(__nvvm_ull2d_rp, "dULLi", "")
BUILTIN(__nvvm_f2h_rn_ftz, "Usf", "")
BUILTIN(__nvvm_f2h_rn, "Usf", "")
+TARGET_BUILTIN(__nvvm_ff2bf16x2_rn, "V2yff", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_ff2bf16x2_rn_relu, "V2yff", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_ff2bf16x2_rz, "V2yff", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_ff2bf16x2_rz_relu, "V2yff", "", AND(SM_80,PTX70))
+
+TARGET_BUILTIN(__nvvm_ff2f16x2_rn, "V2hff", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_ff2f16x2_rn_relu, "V2hff", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_ff2f16x2_rz, "V2hff", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_ff2f16x2_rz_relu, "V2hff", "", AND(SM_80,PTX70))
+
+TARGET_BUILTIN(__nvvm_f2bf16_rn, "yf", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_f2bf16_rn_relu, "yf", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_f2bf16_rz, "yf", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_f2bf16_rz_relu, "yf", "", AND(SM_80,PTX70))
+
+TARGET_BUILTIN(__nvvm_f2tf32_rna, "ZUif", "", AND(SM_80,PTX70))
+
// Bitcast
BUILTIN(__nvvm_bitcast_f2i, "if", "")
@@ -418,6 +596,11 @@ TARGET_BUILTIN(__nvvm_bar_warp_sync, "vUi", "n", PTX60)
TARGET_BUILTIN(__nvvm_barrier_sync, "vUi", "n", PTX60)
TARGET_BUILTIN(__nvvm_barrier_sync_cnt, "vUiUi", "n", PTX60)
+TARGET_BUILTIN(__nvvm_barrier_cluster_arrive, "v", "n", AND(SM_90,PTX78))
+TARGET_BUILTIN(__nvvm_barrier_cluster_arrive_relaxed, "v", "n", AND(SM_90,PTX80))
+TARGET_BUILTIN(__nvvm_barrier_cluster_wait, "v", "n", AND(SM_90,PTX78))
+TARGET_BUILTIN(__nvvm_fence_sc_cluster, "v", "n", AND(SM_90,PTX78))
+
// Shuffle
BUILTIN(__nvvm_shfl_down_i32, "iiii", "")
@@ -450,11 +633,11 @@ TARGET_BUILTIN(__nvvm_vote_uni_sync, "bUib", "", PTX60)
TARGET_BUILTIN(__nvvm_vote_ballot_sync, "UiUib", "", PTX60)
// Match
-TARGET_BUILTIN(__nvvm_match_any_sync_i32, "UiUiUi", "", PTX60)
-TARGET_BUILTIN(__nvvm_match_any_sync_i64, "WiUiWi", "", PTX60)
+TARGET_BUILTIN(__nvvm_match_any_sync_i32, "UiUiUi", "", AND(SM_70,PTX60))
+TARGET_BUILTIN(__nvvm_match_any_sync_i64, "UiUiWi", "", AND(SM_70,PTX60))
// These return a pair {value, predicate}, which requires custom lowering.
-TARGET_BUILTIN(__nvvm_match_all_sync_i32p, "UiUiUii*", "", PTX60)
-TARGET_BUILTIN(__nvvm_match_all_sync_i64p, "WiUiWii*", "", PTX60)
+TARGET_BUILTIN(__nvvm_match_all_sync_i32p, "UiUiUii*", "", AND(SM_70,PTX60))
+TARGET_BUILTIN(__nvvm_match_all_sync_i64p, "UiUiWii*", "", AND(SM_70,PTX60))
// Redux
TARGET_BUILTIN(__nvvm_redux_sync_add, "iii", "", AND(SM_80,PTX70))
@@ -647,8 +830,50 @@ TARGET_BUILTIN(__nvvm_atom_sys_cas_gen_ll, "LLiLLiD*LLiLLi", "n", SM_60)
BUILTIN(__nvvm_compiler_error, "vcC*4", "n")
BUILTIN(__nvvm_compiler_warn, "vcC*4", "n")
-// __ldg. This is not implemented as a builtin by nvcc.
+BUILTIN(__nvvm_ldu_c, "ccC*", "")
+BUILTIN(__nvvm_ldu_sc, "ScScC*", "")
+BUILTIN(__nvvm_ldu_s, "ssC*", "")
+BUILTIN(__nvvm_ldu_i, "iiC*", "")
+BUILTIN(__nvvm_ldu_l, "LiLiC*", "")
+BUILTIN(__nvvm_ldu_ll, "LLiLLiC*", "")
+
+BUILTIN(__nvvm_ldu_uc, "UcUcC*", "")
+BUILTIN(__nvvm_ldu_us, "UsUsC*", "")
+BUILTIN(__nvvm_ldu_ui, "UiUiC*", "")
+BUILTIN(__nvvm_ldu_ul, "ULiULiC*", "")
+BUILTIN(__nvvm_ldu_ull, "ULLiULLiC*", "")
+
+BUILTIN(__nvvm_ldu_h, "hhC*", "")
+BUILTIN(__nvvm_ldu_f, "ffC*", "")
+BUILTIN(__nvvm_ldu_d, "ddC*", "")
+
+BUILTIN(__nvvm_ldu_c2, "E2cE2cC*", "")
+BUILTIN(__nvvm_ldu_sc2, "E2ScE2ScC*", "")
+BUILTIN(__nvvm_ldu_c4, "E4cE4cC*", "")
+BUILTIN(__nvvm_ldu_sc4, "E4ScE4ScC*", "")
+BUILTIN(__nvvm_ldu_s2, "E2sE2sC*", "")
+BUILTIN(__nvvm_ldu_s4, "E4sE4sC*", "")
+BUILTIN(__nvvm_ldu_i2, "E2iE2iC*", "")
+BUILTIN(__nvvm_ldu_i4, "E4iE4iC*", "")
+BUILTIN(__nvvm_ldu_l2, "E2LiE2LiC*", "")
+BUILTIN(__nvvm_ldu_ll2, "E2LLiE2LLiC*", "")
+
+BUILTIN(__nvvm_ldu_uc2, "E2UcE2UcC*", "")
+BUILTIN(__nvvm_ldu_uc4, "E4UcE4UcC*", "")
+BUILTIN(__nvvm_ldu_us2, "E2UsE2UsC*", "")
+BUILTIN(__nvvm_ldu_us4, "E4UsE4UsC*", "")
+BUILTIN(__nvvm_ldu_ui2, "E2UiE2UiC*", "")
+BUILTIN(__nvvm_ldu_ui4, "E4UiE4UiC*", "")
+BUILTIN(__nvvm_ldu_ul2, "E2ULiE2ULiC*", "")
+BUILTIN(__nvvm_ldu_ull2, "E2ULLiE2ULLiC*", "")
+
+BUILTIN(__nvvm_ldu_h2, "E2hE2hC*", "")
+BUILTIN(__nvvm_ldu_f2, "E2fE2fC*", "")
+BUILTIN(__nvvm_ldu_f4, "E4fE4fC*", "")
+BUILTIN(__nvvm_ldu_d2, "E2dE2dC*", "")
+
BUILTIN(__nvvm_ldg_c, "ccC*", "")
+BUILTIN(__nvvm_ldg_sc, "ScScC*", "")
BUILTIN(__nvvm_ldg_s, "ssC*", "")
BUILTIN(__nvvm_ldg_i, "iiC*", "")
BUILTIN(__nvvm_ldg_l, "LiLiC*", "")
@@ -660,15 +885,19 @@ BUILTIN(__nvvm_ldg_ui, "UiUiC*", "")
BUILTIN(__nvvm_ldg_ul, "ULiULiC*", "")
BUILTIN(__nvvm_ldg_ull, "ULLiULLiC*", "")
+BUILTIN(__nvvm_ldg_h, "hhC*", "")
BUILTIN(__nvvm_ldg_f, "ffC*", "")
BUILTIN(__nvvm_ldg_d, "ddC*", "")
BUILTIN(__nvvm_ldg_c2, "E2cE2cC*", "")
+BUILTIN(__nvvm_ldg_sc2, "E2ScE2ScC*", "")
BUILTIN(__nvvm_ldg_c4, "E4cE4cC*", "")
+BUILTIN(__nvvm_ldg_sc4, "E4ScE4ScC*", "")
BUILTIN(__nvvm_ldg_s2, "E2sE2sC*", "")
BUILTIN(__nvvm_ldg_s4, "E4sE4sC*", "")
BUILTIN(__nvvm_ldg_i2, "E2iE2iC*", "")
BUILTIN(__nvvm_ldg_i4, "E4iE4iC*", "")
+BUILTIN(__nvvm_ldg_l2, "E2LiE2LiC*", "")
BUILTIN(__nvvm_ldg_ll2, "E2LLiE2LLiC*", "")
BUILTIN(__nvvm_ldg_uc2, "E2UcE2UcC*", "")
@@ -677,33 +906,42 @@ BUILTIN(__nvvm_ldg_us2, "E2UsE2UsC*", "")
BUILTIN(__nvvm_ldg_us4, "E4UsE4UsC*", "")
BUILTIN(__nvvm_ldg_ui2, "E2UiE2UiC*", "")
BUILTIN(__nvvm_ldg_ui4, "E4UiE4UiC*", "")
+BUILTIN(__nvvm_ldg_ul2, "E2ULiE2ULiC*", "")
BUILTIN(__nvvm_ldg_ull2, "E2ULLiE2ULLiC*", "")
+BUILTIN(__nvvm_ldg_h2, "E2hE2hC*", "")
BUILTIN(__nvvm_ldg_f2, "E2fE2fC*", "")
BUILTIN(__nvvm_ldg_f4, "E4fE4fC*", "")
BUILTIN(__nvvm_ldg_d2, "E2dE2dC*", "")
+// Address space predicates.
+BUILTIN(__nvvm_isspacep_const, "bvC*", "nc")
+BUILTIN(__nvvm_isspacep_global, "bvC*", "nc")
+BUILTIN(__nvvm_isspacep_local, "bvC*", "nc")
+BUILTIN(__nvvm_isspacep_shared, "bvC*", "nc")
+TARGET_BUILTIN(__nvvm_isspacep_shared_cluster,"bvC*", "nc", AND(SM_90,PTX78))
+
// Builtins to support WMMA instructions on sm_70
TARGET_BUILTIN(__hmma_m16n16k16_ld_a, "vi*iC*UiIi", "", AND(SM_70,PTX60))
TARGET_BUILTIN(__hmma_m16n16k16_ld_b, "vi*iC*UiIi", "", AND(SM_70,PTX60))
TARGET_BUILTIN(__hmma_m16n16k16_ld_c_f16, "vi*iC*UiIi", "", AND(SM_70,PTX60))
TARGET_BUILTIN(__hmma_m16n16k16_ld_c_f32, "vf*fC*UiIi", "", AND(SM_70,PTX60))
-TARGET_BUILTIN(__hmma_m16n16k16_st_c_f16, "vi*i*UiIi", "", AND(SM_70,PTX60))
-TARGET_BUILTIN(__hmma_m16n16k16_st_c_f32, "vf*f*UiIi", "", AND(SM_70,PTX60))
+TARGET_BUILTIN(__hmma_m16n16k16_st_c_f16, "vi*iC*UiIi", "", AND(SM_70,PTX60))
+TARGET_BUILTIN(__hmma_m16n16k16_st_c_f32, "vf*fC*UiIi", "", AND(SM_70,PTX60))
TARGET_BUILTIN(__hmma_m32n8k16_ld_a, "vi*iC*UiIi", "", AND(SM_70,PTX61))
TARGET_BUILTIN(__hmma_m32n8k16_ld_b, "vi*iC*UiIi", "", AND(SM_70,PTX61))
TARGET_BUILTIN(__hmma_m32n8k16_ld_c_f16, "vi*iC*UiIi", "", AND(SM_70,PTX61))
TARGET_BUILTIN(__hmma_m32n8k16_ld_c_f32, "vf*fC*UiIi", "", AND(SM_70,PTX61))
-TARGET_BUILTIN(__hmma_m32n8k16_st_c_f16, "vi*i*UiIi", "", AND(SM_70,PTX61))
-TARGET_BUILTIN(__hmma_m32n8k16_st_c_f32, "vf*f*UiIi", "", AND(SM_70,PTX61))
+TARGET_BUILTIN(__hmma_m32n8k16_st_c_f16, "vi*iC*UiIi", "", AND(SM_70,PTX61))
+TARGET_BUILTIN(__hmma_m32n8k16_st_c_f32, "vf*fC*UiIi", "", AND(SM_70,PTX61))
TARGET_BUILTIN(__hmma_m8n32k16_ld_a, "vi*iC*UiIi", "", AND(SM_70,PTX61))
TARGET_BUILTIN(__hmma_m8n32k16_ld_b, "vi*iC*UiIi", "", AND(SM_70,PTX61))
TARGET_BUILTIN(__hmma_m8n32k16_ld_c_f16, "vi*iC*UiIi", "", AND(SM_70,PTX61))
TARGET_BUILTIN(__hmma_m8n32k16_ld_c_f32, "vf*fC*UiIi", "", AND(SM_70,PTX61))
-TARGET_BUILTIN(__hmma_m8n32k16_st_c_f16, "vi*i*UiIi", "", AND(SM_70,PTX61))
-TARGET_BUILTIN(__hmma_m8n32k16_st_c_f32, "vf*f*UiIi", "", AND(SM_70,PTX61))
+TARGET_BUILTIN(__hmma_m8n32k16_st_c_f16, "vi*iC*UiIi", "", AND(SM_70,PTX61))
+TARGET_BUILTIN(__hmma_m8n32k16_st_c_f32, "vf*fC*UiIi", "", AND(SM_70,PTX61))
TARGET_BUILTIN(__hmma_m16n16k16_mma_f16f16, "vi*iC*iC*iC*IiIi", "", AND(SM_70,PTX60))
TARGET_BUILTIN(__hmma_m16n16k16_mma_f32f16, "vf*iC*iC*iC*IiIi", "", AND(SM_70,PTX60))
@@ -724,7 +962,7 @@ TARGET_BUILTIN(__hmma_m8n32k16_mma_f16f32, "vi*iC*iC*fC*IiIi", "", AND(SM_70,PTX
TARGET_BUILTIN(__bmma_m8n8k128_ld_a_b1, "vi*iC*UiIi", "", AND(SM_75,PTX63))
TARGET_BUILTIN(__bmma_m8n8k128_ld_b_b1, "vi*iC*UiIi", "", AND(SM_75,PTX63))
TARGET_BUILTIN(__bmma_m8n8k128_ld_c, "vi*iC*UiIi", "", AND(SM_75,PTX63))
-TARGET_BUILTIN(__bmma_m8n8k128_mma_and_popc_b1, "vi*iC*iC*iC*Ii", "", AND(SM_75,PTX71))
+TARGET_BUILTIN(__bmma_m8n8k128_mma_and_popc_b1, "vi*iC*iC*iC*Ii", "", AND(SM_80,PTX71))
TARGET_BUILTIN(__bmma_m8n8k128_mma_xor_popc_b1, "vi*iC*iC*iC*Ii", "", AND(SM_75,PTX63))
TARGET_BUILTIN(__bmma_m8n8k128_st_c_i32, "vi*iC*UiIi", "", AND(SM_75,PTX63))
TARGET_BUILTIN(__imma_m16n16k16_ld_a_s8, "vi*iC*UiIi", "", AND(SM_72,PTX63))
@@ -789,24 +1027,42 @@ TARGET_BUILTIN(__nvvm_cp_async_mbarrier_arrive_shared, "vWi*3", "", AND(SM_80,PT
TARGET_BUILTIN(__nvvm_cp_async_mbarrier_arrive_noinc, "vWi*", "", AND(SM_80,PTX70))
TARGET_BUILTIN(__nvvm_cp_async_mbarrier_arrive_noinc_shared, "vWi*3", "", AND(SM_80,PTX70))
-TARGET_BUILTIN(__nvvm_cp_async_ca_shared_global_4, "vv*3vC*1", "", AND(SM_80,PTX70))
-TARGET_BUILTIN(__nvvm_cp_async_ca_shared_global_8, "vv*3vC*1", "", AND(SM_80,PTX70))
-TARGET_BUILTIN(__nvvm_cp_async_ca_shared_global_16, "vv*3vC*1", "", AND(SM_80,PTX70))
-TARGET_BUILTIN(__nvvm_cp_async_cg_shared_global_16, "vv*3vC*1", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_cp_async_ca_shared_global_4, "vv*3vC*1.", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_cp_async_ca_shared_global_8, "vv*3vC*1.", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_cp_async_ca_shared_global_16, "vv*3vC*1.", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_cp_async_cg_shared_global_16, "vv*3vC*1.", "", AND(SM_80,PTX70))
TARGET_BUILTIN(__nvvm_cp_async_commit_group, "v", "", AND(SM_80,PTX70))
TARGET_BUILTIN(__nvvm_cp_async_wait_group, "vIi", "", AND(SM_80,PTX70))
TARGET_BUILTIN(__nvvm_cp_async_wait_all, "v", "", AND(SM_80,PTX70))
+
+// bf16, bf16x2 abs, neg
+TARGET_BUILTIN(__nvvm_abs_bf16, "yy", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_abs_bf16x2, "V2yV2y", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_neg_bf16, "yy", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_neg_bf16x2, "V2yV2y", "", AND(SM_80,PTX70))
+
+TARGET_BUILTIN(__nvvm_mapa, "v*v*i", "", AND(SM_90, PTX78))
+TARGET_BUILTIN(__nvvm_mapa_shared_cluster, "v*3v*3i", "", AND(SM_90, PTX78))
+TARGET_BUILTIN(__nvvm_getctarank, "iv*", "", AND(SM_90, PTX78))
+TARGET_BUILTIN(__nvvm_getctarank_shared_cluster, "iv*3", "", AND(SM_90,PTX78))
+
#undef BUILTIN
#undef TARGET_BUILTIN
#pragma pop_macro("AND")
+#pragma pop_macro("SM_53")
#pragma pop_macro("SM_60")
#pragma pop_macro("SM_70")
#pragma pop_macro("SM_72")
#pragma pop_macro("SM_75")
#pragma pop_macro("SM_80")
#pragma pop_macro("SM_86")
+#pragma pop_macro("SM_87")
+#pragma pop_macro("SM_89")
+#pragma pop_macro("SM_90")
+#pragma pop_macro("SM_90a")
+#pragma pop_macro("PTX42")
#pragma pop_macro("PTX60")
#pragma pop_macro("PTX61")
#pragma pop_macro("PTX63")
@@ -815,3 +1071,13 @@ TARGET_BUILTIN(__nvvm_cp_async_wait_all, "v", "", AND(SM_80,PTX70))
#pragma pop_macro("PTX70")
#pragma pop_macro("PTX71")
#pragma pop_macro("PTX72")
+#pragma pop_macro("PTX73")
+#pragma pop_macro("PTX74")
+#pragma pop_macro("PTX75")
+#pragma pop_macro("PTX76")
+#pragma pop_macro("PTX77")
+#pragma pop_macro("PTX78")
+#pragma pop_macro("PTX80")
+#pragma pop_macro("PTX81")
+#pragma pop_macro("PTX82")
+#pragma pop_macro("PTX83")
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsPPC.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsPPC.def
index dfe97af300f4..88ae0ce94085 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsPPC.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsPPC.def
@@ -19,15 +19,33 @@
// The format of this database matches clang/Basic/Builtins.def except for the
// MMA builtins that are using their own format documented below.
-#if defined(BUILTIN) && !defined(CUSTOM_BUILTIN)
-# define CUSTOM_BUILTIN(ID, INTR, TYPES, ACCUMULATE) \
- BUILTIN(__builtin_##ID, "i.", "t")
-#elif defined(CUSTOM_BUILTIN) && !defined(BUILTIN)
-# define BUILTIN(ID, TYPES, ATTRS)
+#ifndef BUILTIN
+#define BUILTIN(ID, TYPE, ATTRS)
#endif
-#define UNALIASED_CUSTOM_BUILTIN(ID, TYPES, ACCUMULATE) \
- CUSTOM_BUILTIN(ID, ID, TYPES, ACCUMULATE)
+#if defined(BUILTIN) && !defined(TARGET_BUILTIN)
+#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) BUILTIN(ID, TYPE, ATTRS)
+#endif
+
+#ifndef CUSTOM_BUILTIN
+#define CUSTOM_BUILTIN(ID, INTR, TYPES, ACCUMULATE, FEATURE) \
+ TARGET_BUILTIN(__builtin_##ID, "i.", "t", FEATURE)
+#endif
+
+#define UNALIASED_CUSTOM_BUILTIN(ID, TYPES, ACCUMULATE, FEATURE) \
+ CUSTOM_BUILTIN(ID, ID, TYPES, ACCUMULATE, FEATURE)
+
+// GCC predefined macros to rename builtins, undef them to keep original names.
+#if defined(__GNUC__) && !defined(__clang__)
+#undef __builtin_vsx_xvnmaddadp
+#undef __builtin_vsx_xvnmaddasp
+#undef __builtin_vsx_xvmsubasp
+#undef __builtin_vsx_xvmsubadp
+#undef __builtin_vsx_xvmaddadp
+#undef __builtin_vsx_xvnmsubasp
+#undef __builtin_vsx_xvnmsubadp
+#undef __builtin_vsx_xvmaddasp
+#endif
// XL Compatibility built-ins
BUILTIN(__builtin_ppc_popcntb, "ULiULi", "")
@@ -46,7 +64,7 @@ BUILTIN(__builtin_ppc_dcbst, "vvC*", "")
BUILTIN(__builtin_ppc_dcbt, "vv*", "")
BUILTIN(__builtin_ppc_dcbtst, "vv*", "")
BUILTIN(__builtin_ppc_dcbz, "vv*", "")
-BUILTIN(__builtin_ppc_icbt, "vv*", "")
+TARGET_BUILTIN(__builtin_ppc_icbt, "vv*", "", "isa-v207-instructions")
BUILTIN(__builtin_ppc_fric, "dd", "")
BUILTIN(__builtin_ppc_frim, "dd", "")
BUILTIN(__builtin_ppc_frims, "ff", "")
@@ -74,12 +92,12 @@ BUILTIN(__builtin_ppc_fetch_and_swap, "UiUiD*Ui", "")
BUILTIN(__builtin_ppc_fetch_and_swaplp, "ULiULiD*ULi", "")
BUILTIN(__builtin_ppc_ldarx, "LiLiD*", "")
BUILTIN(__builtin_ppc_lwarx, "iiD*", "")
-BUILTIN(__builtin_ppc_lharx, "isD*", "")
-BUILTIN(__builtin_ppc_lbarx, "UiUcD*", "")
+TARGET_BUILTIN(__builtin_ppc_lharx, "ssD*", "", "isa-v207-instructions")
+TARGET_BUILTIN(__builtin_ppc_lbarx, "ccD*", "", "isa-v207-instructions")
BUILTIN(__builtin_ppc_stdcx, "iLiD*Li", "")
BUILTIN(__builtin_ppc_stwcx, "iiD*i", "")
-BUILTIN(__builtin_ppc_sthcx, "isD*s", "")
-BUILTIN(__builtin_ppc_stbcx, "icD*i", "")
+TARGET_BUILTIN(__builtin_ppc_sthcx, "isD*s", "", "isa-v207-instructions")
+TARGET_BUILTIN(__builtin_ppc_stbcx, "icD*i", "", "isa-v207-instructions")
BUILTIN(__builtin_ppc_tdw, "vLLiLLiIUi", "")
BUILTIN(__builtin_ppc_tw, "viiIUi", "")
BUILTIN(__builtin_ppc_trap, "vi", "")
@@ -92,42 +110,57 @@ BUILTIN(__builtin_ppc_fctiw, "dd", "")
BUILTIN(__builtin_ppc_fctiwz, "dd", "")
BUILTIN(__builtin_ppc_fctudz, "dd", "")
BUILTIN(__builtin_ppc_fctuwz, "dd", "")
+
+// fence builtin prevents all instructions moved across it
+BUILTIN(__builtin_ppc_fence, "v", "")
+
BUILTIN(__builtin_ppc_swdiv_nochk, "ddd", "")
BUILTIN(__builtin_ppc_swdivs_nochk, "fff", "")
BUILTIN(__builtin_ppc_alignx, "vIivC*", "nc")
BUILTIN(__builtin_ppc_rdlam, "UWiUWiUWiUWIi", "nc")
+TARGET_BUILTIN(__builtin_ppc_compare_exp_uo, "idd", "", "isa-v30-instructions,vsx")
+TARGET_BUILTIN(__builtin_ppc_compare_exp_lt, "idd", "", "isa-v30-instructions,vsx")
+TARGET_BUILTIN(__builtin_ppc_compare_exp_gt, "idd", "", "isa-v30-instructions,vsx")
+TARGET_BUILTIN(__builtin_ppc_compare_exp_eq, "idd", "", "isa-v30-instructions,vsx")
+TARGET_BUILTIN(__builtin_ppc_test_data_class, "idIi", "t", "isa-v30-instructions,vsx")
+BUILTIN(__builtin_ppc_swdiv, "ddd", "")
+BUILTIN(__builtin_ppc_swdivs, "fff", "")
// Compare
-BUILTIN(__builtin_ppc_cmpeqb, "LLiLLiLLi", "")
-BUILTIN(__builtin_ppc_cmprb, "iCIiii", "")
-BUILTIN(__builtin_ppc_setb, "LLiLLiLLi", "")
+TARGET_BUILTIN(__builtin_ppc_cmpeqb, "LLiLLiLLi", "", "isa-v30-instructions")
+TARGET_BUILTIN(__builtin_ppc_cmprb, "iCIiii", "", "isa-v30-instructions")
+TARGET_BUILTIN(__builtin_ppc_setb, "LLiLLiLLi", "", "isa-v30-instructions")
BUILTIN(__builtin_ppc_cmpb, "LLiLLiLLi", "")
// Multiply
BUILTIN(__builtin_ppc_mulhd, "LLiLiLi", "")
BUILTIN(__builtin_ppc_mulhdu, "ULLiULiULi", "")
BUILTIN(__builtin_ppc_mulhw, "iii", "")
BUILTIN(__builtin_ppc_mulhwu, "UiUiUi", "")
-BUILTIN(__builtin_ppc_maddhd, "LLiLLiLLiLLi", "")
-BUILTIN(__builtin_ppc_maddhdu, "ULLiULLiULLiULLi", "")
-BUILTIN(__builtin_ppc_maddld, "LLiLLiLLiLLi", "")
+TARGET_BUILTIN(__builtin_ppc_maddhd, "LLiLLiLLiLLi", "", "isa-v30-instructions")
+TARGET_BUILTIN(__builtin_ppc_maddhdu, "ULLiULLiULLiULLi", "",
+ "isa-v30-instructions")
+TARGET_BUILTIN(__builtin_ppc_maddld, "LLiLLiLLiLLi", "", "isa-v30-instructions")
// Rotate
-BUILTIN(__builtin_ppc_rlwnm, "UiUiIUiIUi", "")
+BUILTIN(__builtin_ppc_rlwnm, "UiUiUiIUi", "")
BUILTIN(__builtin_ppc_rlwimi, "UiUiUiIUiIUi", "")
BUILTIN(__builtin_ppc_rldimi, "ULLiULLiULLiIUiIULLi", "")
// load
-BUILTIN(__builtin_ppc_load2r, "UiUs*", "")
+BUILTIN(__builtin_ppc_load2r, "UsUs*", "")
BUILTIN(__builtin_ppc_load4r, "UiUi*", "")
-BUILTIN(__builtin_ppc_load8r, "ULLiULLi*", "")
+TARGET_BUILTIN(__builtin_ppc_load8r, "ULLiULLi*", "", "isa-v206-instructions")
// store
BUILTIN(__builtin_ppc_store2r, "vUiUs*", "")
BUILTIN(__builtin_ppc_store4r, "vUiUi*", "")
-BUILTIN(__builtin_ppc_store8r, "vULLiULLi*", "")
-BUILTIN(__builtin_ppc_extract_exp, "Uid", "")
-BUILTIN(__builtin_ppc_extract_sig, "ULLid", "")
+TARGET_BUILTIN(__builtin_ppc_store8r, "vULLiULLi*", "", "isa-v206-instructions")
+TARGET_BUILTIN(__builtin_ppc_extract_exp, "Uid", "", "power9-vector")
+TARGET_BUILTIN(__builtin_ppc_extract_sig, "ULLid", "", "power9-vector")
BUILTIN(__builtin_ppc_mtfsb0, "vUIi", "")
BUILTIN(__builtin_ppc_mtfsb1, "vUIi", "")
+BUILTIN(__builtin_ppc_mffs, "d", "")
+TARGET_BUILTIN(__builtin_ppc_mffsl, "d", "", "isa-v30-instructions")
BUILTIN(__builtin_ppc_mtfsf, "vUIiUi", "")
BUILTIN(__builtin_ppc_mtfsfi, "vUIiUIi", "")
-BUILTIN(__builtin_ppc_insert_exp, "ddULLi", "")
+BUILTIN(__builtin_ppc_set_fpscr_rn, "di", "")
+TARGET_BUILTIN(__builtin_ppc_insert_exp, "ddULLi", "", "power9-vector")
BUILTIN(__builtin_ppc_fmsub, "dddd", "")
BUILTIN(__builtin_ppc_fmsubs, "ffff", "")
BUILTIN(__builtin_ppc_fnmadd, "dddd", "")
@@ -138,589 +171,717 @@ BUILTIN(__builtin_ppc_fre, "dd", "")
BUILTIN(__builtin_ppc_fres, "ff", "")
BUILTIN(__builtin_ppc_dcbtstt, "vv*", "")
BUILTIN(__builtin_ppc_dcbtt, "vv*", "")
-BUILTIN(__builtin_ppc_mftbu, "Ui","")
+BUILTIN(__builtin_ppc_mftbu, "Ui", "")
BUILTIN(__builtin_ppc_mfmsr, "Ui", "")
BUILTIN(__builtin_ppc_mfspr, "ULiIi", "")
BUILTIN(__builtin_ppc_mtmsr, "vUi", "")
BUILTIN(__builtin_ppc_mtspr, "vIiULi", "")
BUILTIN(__builtin_ppc_stfiw, "viC*d", "")
+TARGET_BUILTIN(__builtin_ppc_addex, "LLiLLiLLiCIi", "", "isa-v30-instructions")
+// select
+BUILTIN(__builtin_ppc_maxfe, "LdLdLdLd.", "t")
+BUILTIN(__builtin_ppc_maxfl, "dddd.", "t")
+BUILTIN(__builtin_ppc_maxfs, "ffff.", "t")
+BUILTIN(__builtin_ppc_minfe, "LdLdLdLd.", "t")
+BUILTIN(__builtin_ppc_minfl, "dddd.", "t")
+BUILTIN(__builtin_ppc_minfs, "ffff.", "t")
+// Floating Negative Absolute Value
+BUILTIN(__builtin_ppc_fnabs, "dd", "")
+BUILTIN(__builtin_ppc_fnabss, "ff", "")
BUILTIN(__builtin_ppc_get_timebase, "ULLi", "n")
// This is just a placeholder, the types and attributes are wrong.
-BUILTIN(__builtin_altivec_vaddcuw, "V4UiV4UiV4Ui", "")
-
-BUILTIN(__builtin_altivec_vaddsbs, "V16ScV16ScV16Sc", "")
-BUILTIN(__builtin_altivec_vaddubs, "V16UcV16UcV16Uc", "")
-BUILTIN(__builtin_altivec_vaddshs, "V8SsV8SsV8Ss", "")
-BUILTIN(__builtin_altivec_vadduhs, "V8UsV8UsV8Us", "")
-BUILTIN(__builtin_altivec_vaddsws, "V4SiV4SiV4Si", "")
-BUILTIN(__builtin_altivec_vadduws, "V4UiV4UiV4Ui", "")
-BUILTIN(__builtin_altivec_vaddeuqm, "V1ULLLiV1ULLLiV1ULLLiV1ULLLi","")
-BUILTIN(__builtin_altivec_vaddcuq, "V1ULLLiV1ULLLiV1ULLLi","")
-BUILTIN(__builtin_altivec_vaddecuq, "V1ULLLiV1ULLLiV1ULLLiV1ULLLi","")
-BUILTIN(__builtin_altivec_vadduqm, "V1ULLLiV16UcV16Uc","")
-
-BUILTIN(__builtin_altivec_vsubsbs, "V16ScV16ScV16Sc", "")
-BUILTIN(__builtin_altivec_vsububs, "V16UcV16UcV16Uc", "")
-BUILTIN(__builtin_altivec_vsubshs, "V8SsV8SsV8Ss", "")
-BUILTIN(__builtin_altivec_vsubuhs, "V8UsV8UsV8Us", "")
-BUILTIN(__builtin_altivec_vsubsws, "V4SiV4SiV4Si", "")
-BUILTIN(__builtin_altivec_vsubuws, "V4UiV4UiV4Ui", "")
-BUILTIN(__builtin_altivec_vsubeuqm, "V1ULLLiV1ULLLiV1ULLLiV1ULLLi","")
-BUILTIN(__builtin_altivec_vsubcuq, "V1ULLLiV1ULLLiV1ULLLi","")
-BUILTIN(__builtin_altivec_vsubecuq, "V1ULLLiV1ULLLiV1ULLLiV1ULLLi","")
-BUILTIN(__builtin_altivec_vsubuqm, "V1ULLLiV16UcV16Uc","")
-
-BUILTIN(__builtin_altivec_vavgsb, "V16ScV16ScV16Sc", "")
-BUILTIN(__builtin_altivec_vavgub, "V16UcV16UcV16Uc", "")
-BUILTIN(__builtin_altivec_vavgsh, "V8SsV8SsV8Ss", "")
-BUILTIN(__builtin_altivec_vavguh, "V8UsV8UsV8Us", "")
-BUILTIN(__builtin_altivec_vavgsw, "V4SiV4SiV4Si", "")
-BUILTIN(__builtin_altivec_vavguw, "V4UiV4UiV4Ui", "")
-
-BUILTIN(__builtin_altivec_vrfip, "V4fV4f", "")
-
-BUILTIN(__builtin_altivec_vcfsx, "V4fV4SiIi", "")
-BUILTIN(__builtin_altivec_vcfux, "V4fV4UiIi", "")
-BUILTIN(__builtin_altivec_vctsxs, "V4SiV4fIi", "")
-BUILTIN(__builtin_altivec_vctuxs, "V4UiV4fIi", "")
-
-BUILTIN(__builtin_altivec_dss, "vUIi", "")
-BUILTIN(__builtin_altivec_dssall, "v", "")
-BUILTIN(__builtin_altivec_dst, "vvC*iUIi", "")
-BUILTIN(__builtin_altivec_dstt, "vvC*iUIi", "")
-BUILTIN(__builtin_altivec_dstst, "vvC*iUIi", "")
-BUILTIN(__builtin_altivec_dststt, "vvC*iUIi", "")
-
-BUILTIN(__builtin_altivec_vexptefp, "V4fV4f", "")
-
-BUILTIN(__builtin_altivec_vrfim, "V4fV4f", "")
-
-BUILTIN(__builtin_altivec_lvx, "V4iLivC*", "")
-BUILTIN(__builtin_altivec_lvxl, "V4iLivC*", "")
-BUILTIN(__builtin_altivec_lvebx, "V16cLivC*", "")
-BUILTIN(__builtin_altivec_lvehx, "V8sLivC*", "")
-BUILTIN(__builtin_altivec_lvewx, "V4iLivC*", "")
-
-BUILTIN(__builtin_altivec_vlogefp, "V4fV4f", "")
-
-BUILTIN(__builtin_altivec_lvsl, "V16cUcvC*", "")
-BUILTIN(__builtin_altivec_lvsr, "V16cUcvC*", "")
-
-BUILTIN(__builtin_altivec_vmaddfp, "V4fV4fV4fV4f", "")
-BUILTIN(__builtin_altivec_vmhaddshs, "V8sV8sV8sV8s", "")
-BUILTIN(__builtin_altivec_vmhraddshs, "V8sV8sV8sV8s", "")
-
-BUILTIN(__builtin_altivec_vmsumubm, "V4UiV16UcV16UcV4Ui", "")
-BUILTIN(__builtin_altivec_vmsummbm, "V4SiV16ScV16UcV4Si", "")
-BUILTIN(__builtin_altivec_vmsumuhm, "V4UiV8UsV8UsV4Ui", "")
-BUILTIN(__builtin_altivec_vmsumshm, "V4SiV8SsV8SsV4Si", "")
-BUILTIN(__builtin_altivec_vmsumuhs, "V4UiV8UsV8UsV4Ui", "")
-BUILTIN(__builtin_altivec_vmsumshs, "V4SiV8SsV8SsV4Si", "")
-
-BUILTIN(__builtin_altivec_vmuleub, "V8UsV16UcV16Uc", "")
-BUILTIN(__builtin_altivec_vmulesb, "V8SsV16ScV16Sc", "")
-BUILTIN(__builtin_altivec_vmuleuh, "V4UiV8UsV8Us", "")
-BUILTIN(__builtin_altivec_vmulesh, "V4SiV8SsV8Ss", "")
-BUILTIN(__builtin_altivec_vmuleuw, "V2ULLiV4UiV4Ui", "")
-BUILTIN(__builtin_altivec_vmulesw, "V2SLLiV4SiV4Si", "")
-BUILTIN(__builtin_altivec_vmuloub, "V8UsV16UcV16Uc", "")
-BUILTIN(__builtin_altivec_vmulosb, "V8SsV16ScV16Sc", "")
-BUILTIN(__builtin_altivec_vmulouh, "V4UiV8UsV8Us", "")
-BUILTIN(__builtin_altivec_vmulosh, "V4SiV8SsV8Ss", "")
-BUILTIN(__builtin_altivec_vmulouw, "V2ULLiV4UiV4Ui", "")
-BUILTIN(__builtin_altivec_vmulosw, "V2SLLiV4SiV4Si", "")
-BUILTIN(__builtin_altivec_vmuleud, "V1ULLLiV2ULLiV2ULLi", "")
-BUILTIN(__builtin_altivec_vmulesd, "V1SLLLiV2SLLiV2SLLi", "")
-BUILTIN(__builtin_altivec_vmuloud, "V1ULLLiV2ULLiV2ULLi", "")
-BUILTIN(__builtin_altivec_vmulosd, "V1SLLLiV2SLLiV2SLLi", "")
-BUILTIN(__builtin_altivec_vmsumcud, "V1ULLLiV2ULLiV2ULLiV1ULLLi", "")
-
-BUILTIN(__builtin_altivec_vnmsubfp, "V4fV4fV4fV4f", "")
-
-BUILTIN(__builtin_altivec_vpkpx, "V8sV4UiV4Ui", "")
-BUILTIN(__builtin_altivec_vpkuhus, "V16UcV8UsV8Us", "")
-BUILTIN(__builtin_altivec_vpkshss, "V16ScV8SsV8Ss", "")
-BUILTIN(__builtin_altivec_vpkuwus, "V8UsV4UiV4Ui", "")
-BUILTIN(__builtin_altivec_vpkswss, "V8SsV4SiV4Si", "")
-BUILTIN(__builtin_altivec_vpkshus, "V16UcV8SsV8Ss", "")
-BUILTIN(__builtin_altivec_vpkswus, "V8UsV4SiV4Si", "")
-BUILTIN(__builtin_altivec_vpksdss, "V4SiV2SLLiV2SLLi", "")
-BUILTIN(__builtin_altivec_vpksdus, "V4UiV2SLLiV2SLLi", "")
-BUILTIN(__builtin_altivec_vpkudus, "V4UiV2ULLiV2ULLi", "")
-BUILTIN(__builtin_altivec_vpkudum, "V4UiV2ULLiV2ULLi", "")
-
-BUILTIN(__builtin_altivec_vperm_4si, "V4iV4iV4iV16Uc", "")
-
-BUILTIN(__builtin_altivec_stvx, "vV4iLiv*", "")
-BUILTIN(__builtin_altivec_stvxl, "vV4iLiv*", "")
-BUILTIN(__builtin_altivec_stvebx, "vV16cLiv*", "")
-BUILTIN(__builtin_altivec_stvehx, "vV8sLiv*", "")
-BUILTIN(__builtin_altivec_stvewx, "vV4iLiv*", "")
-
-BUILTIN(__builtin_altivec_vcmpbfp, "V4iV4fV4f", "")
-
-BUILTIN(__builtin_altivec_vcmpgefp, "V4iV4fV4f", "")
-
-BUILTIN(__builtin_altivec_vcmpequb, "V16cV16cV16c", "")
-BUILTIN(__builtin_altivec_vcmpequh, "V8sV8sV8s", "")
-BUILTIN(__builtin_altivec_vcmpequw, "V4iV4iV4i", "")
-BUILTIN(__builtin_altivec_vcmpequd, "V2LLiV2LLiV2LLi", "")
-BUILTIN(__builtin_altivec_vcmpeqfp, "V4iV4fV4f", "")
-
-BUILTIN(__builtin_altivec_vcmpneb, "V16cV16cV16c", "")
-BUILTIN(__builtin_altivec_vcmpneh, "V8sV8sV8s", "")
-BUILTIN(__builtin_altivec_vcmpnew, "V4iV4iV4i", "")
-
-BUILTIN(__builtin_altivec_vcmpnezb, "V16cV16cV16c", "")
-BUILTIN(__builtin_altivec_vcmpnezh, "V8sV8sV8s", "")
-BUILTIN(__builtin_altivec_vcmpnezw, "V4iV4iV4i", "")
-
-BUILTIN(__builtin_altivec_vcmpgtsb, "V16cV16ScV16Sc", "")
-BUILTIN(__builtin_altivec_vcmpgtub, "V16cV16UcV16Uc", "")
-BUILTIN(__builtin_altivec_vcmpgtsh, "V8sV8SsV8Ss", "")
-BUILTIN(__builtin_altivec_vcmpgtuh, "V8sV8UsV8Us", "")
-BUILTIN(__builtin_altivec_vcmpgtsw, "V4iV4SiV4Si", "")
-BUILTIN(__builtin_altivec_vcmpgtuw, "V4iV4UiV4Ui", "")
-BUILTIN(__builtin_altivec_vcmpgtsd, "V2LLiV2LLiV2LLi", "")
-BUILTIN(__builtin_altivec_vcmpgtud, "V2LLiV2ULLiV2ULLi", "")
-BUILTIN(__builtin_altivec_vcmpgtfp, "V4iV4fV4f", "")
+TARGET_BUILTIN(__builtin_altivec_vaddcuw, "V4UiV4UiV4Ui", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vaddsbs, "V16ScV16ScV16Sc", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vaddubs, "V16UcV16UcV16Uc", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vaddshs, "V8SsV8SsV8Ss", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vadduhs, "V8UsV8UsV8Us", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vaddsws, "V4SiV4SiV4Si", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vadduws, "V4UiV4UiV4Ui", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vaddeuqm, "V1ULLLiV1ULLLiV1ULLLiV1ULLLi", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vaddcuq, "V1ULLLiV1ULLLiV1ULLLi", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vaddecuq, "V1ULLLiV1ULLLiV1ULLLiV1ULLLi", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vadduqm, "V1ULLLiV16UcV16Uc", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vaddeuqm_c, "V16UcV16UcV16UcV16Uc", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vaddcuq_c, "V16UcV16UcV16Uc", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vaddecuq_c, "V16UcV16UcV16UcV16Uc", "",
+ "power8-vector")
+
+TARGET_BUILTIN(__builtin_altivec_vsubsbs, "V16ScV16ScV16Sc", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vsububs, "V16UcV16UcV16Uc", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vsubshs, "V8SsV8SsV8Ss", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vsubuhs, "V8UsV8UsV8Us", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vsubsws, "V4SiV4SiV4Si", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vsubuws, "V4UiV4UiV4Ui", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vsubeuqm, "V1ULLLiV1ULLLiV1ULLLiV1ULLLi", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vsubcuq, "V1ULLLiV1ULLLiV1ULLLi", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vsubecuq, "V1ULLLiV1ULLLiV1ULLLiV1ULLLi", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vsubuqm, "V1ULLLiV16UcV16Uc", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vsubeuqm_c, "V16UcV16UcV16UcV16Uc", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vsubcuq_c, "V16UcV16UcV16Uc", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vsubecuq_c, "V16UcV16UcV16UcV16Uc", "",
+ "power8-vector")
+
+TARGET_BUILTIN(__builtin_altivec_vavgsb, "V16ScV16ScV16Sc", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vavgub, "V16UcV16UcV16Uc", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vavgsh, "V8SsV8SsV8Ss", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vavguh, "V8UsV8UsV8Us", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vavgsw, "V4SiV4SiV4Si", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vavguw, "V4UiV4UiV4Ui", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vrfip, "V4fV4f", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vcfsx, "V4fV4SiIi", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vcfux, "V4fV4UiIi", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vctsxs, "V4SiV4fIi", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vctuxs, "V4UiV4fIi", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_dss, "vUIi", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_dssall, "v", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_dst, "vvC*iUIi", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_dstt, "vvC*iUIi", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_dstst, "vvC*iUIi", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_dststt, "vvC*iUIi", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vexptefp, "V4fV4f", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vrfim, "V4fV4f", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_lvx, "V4iLivC*", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_lvxl, "V4iLivC*", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_lvebx, "V16cLivC*", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_lvehx, "V8sLivC*", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_lvewx, "V4iLivC*", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vlogefp, "V4fV4f", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_lvsl, "V16cUcvC*", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_lvsr, "V16cUcvC*", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vmaddfp, "V4fV4fV4fV4f", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vmhaddshs, "V8sV8sV8sV8s", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vmhraddshs, "V8sV8sV8sV8s", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vmsumubm, "V4UiV16UcV16UcV4Ui", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vmsummbm, "V4SiV16ScV16UcV4Si", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vmsumuhm, "V4UiV8UsV8UsV4Ui", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vmsumshm, "V4SiV8SsV8SsV4Si", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vmsumuhs, "V4UiV8UsV8UsV4Ui", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vmsumshs, "V4SiV8SsV8SsV4Si", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vmuleub, "V8UsV16UcV16Uc", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vmulesb, "V8SsV16ScV16Sc", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vmuleuh, "V4UiV8UsV8Us", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vmulesh, "V4SiV8SsV8Ss", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vmuleuw, "V2ULLiV4UiV4Ui", "", "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vmulesw, "V2SLLiV4SiV4Si", "", "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vmuloub, "V8UsV16UcV16Uc", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vmulosb, "V8SsV16ScV16Sc", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vmulouh, "V4UiV8UsV8Us", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vmulosh, "V4SiV8SsV8Ss", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vmulouw, "V2ULLiV4UiV4Ui", "", "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vmulosw, "V2SLLiV4SiV4Si", "", "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vmuleud, "V1ULLLiV2ULLiV2ULLi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vmulesd, "V1SLLLiV2SLLiV2SLLi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vmuloud, "V1ULLLiV2ULLiV2ULLi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vmulosd, "V1SLLLiV2SLLiV2SLLi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vmsumcud, "V1ULLLiV2ULLiV2ULLiV1ULLLi", "",
+ "power10-vector")
+
+TARGET_BUILTIN(__builtin_altivec_vnmsubfp, "V4fV4fV4fV4f", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vpkpx, "V8sV4UiV4Ui", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vpkuhus, "V16UcV8UsV8Us", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vpkshss, "V16ScV8SsV8Ss", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vpkuwus, "V8UsV4UiV4Ui", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vpkswss, "V8SsV4SiV4Si", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vpkshus, "V16UcV8SsV8Ss", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vpkswus, "V8UsV4SiV4Si", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vpksdss, "V4SiV2SLLiV2SLLi", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vpksdus, "V4UiV2SLLiV2SLLi", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vpkudus, "V4UiV2ULLiV2ULLi", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vpkudum, "V4UiV2ULLiV2ULLi", "",
+ "power8-vector")
+
+TARGET_BUILTIN(__builtin_altivec_vperm_4si, "V4iV4iV4iV16Uc", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_stvx, "vV4iLiv*", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_stvxl, "vV4iLiv*", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_stvebx, "vV16cLiv*", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_stvehx, "vV8sLiv*", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_stvewx, "vV4iLiv*", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vcmpbfp, "V4iV4fV4f", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vcmpgefp, "V4iV4fV4f", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vcmpequb, "V16cV16cV16c", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vcmpequh, "V8sV8sV8s", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vcmpequw, "V4iV4iV4i", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vcmpequd, "V2LLiV2LLiV2LLi", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vcmpeqfp, "V4iV4fV4f", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vcmpneb, "V16cV16cV16c", "", "power9-vector")
+TARGET_BUILTIN(__builtin_altivec_vcmpneh, "V8sV8sV8s", "", "power9-vector")
+TARGET_BUILTIN(__builtin_altivec_vcmpnew, "V4iV4iV4i", "", "power9-vector")
+
+TARGET_BUILTIN(__builtin_altivec_vcmpnezb, "V16cV16cV16c", "", "power9-vector")
+TARGET_BUILTIN(__builtin_altivec_vcmpnezh, "V8sV8sV8s", "", "power9-vector")
+TARGET_BUILTIN(__builtin_altivec_vcmpnezw, "V4iV4iV4i", "", "power9-vector")
+
+TARGET_BUILTIN(__builtin_altivec_vcmpgtsb, "V16cV16ScV16Sc", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vcmpgtub, "V16cV16UcV16Uc", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vcmpgtsh, "V8sV8SsV8Ss", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vcmpgtuh, "V8sV8UsV8Us", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vcmpgtsw, "V4iV4SiV4Si", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vcmpgtuw, "V4iV4UiV4Ui", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vcmpgtsd, "V2LLiV2LLiV2LLi", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vcmpgtud, "V2LLiV2ULLiV2ULLi", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vcmpgtfp, "V4iV4fV4f", "", "altivec")
// P10 Vector compare builtins.
-BUILTIN(__builtin_altivec_vcmpequq, "V1LLLiV1ULLLiV1ULLLi", "")
-BUILTIN(__builtin_altivec_vcmpgtsq, "V1LLLiV1SLLLiV1SLLLi", "")
-BUILTIN(__builtin_altivec_vcmpgtuq, "V1LLLiV1ULLLiV1ULLLi", "")
-BUILTIN(__builtin_altivec_vcmpequq_p, "iiV1ULLLiV1LLLi", "")
-BUILTIN(__builtin_altivec_vcmpgtsq_p, "iiV1SLLLiV1SLLLi", "")
-BUILTIN(__builtin_altivec_vcmpgtuq_p, "iiV1ULLLiV1ULLLi", "")
-
-BUILTIN(__builtin_altivec_vmaxsb, "V16ScV16ScV16Sc", "")
-BUILTIN(__builtin_altivec_vmaxub, "V16UcV16UcV16Uc", "")
-BUILTIN(__builtin_altivec_vmaxsh, "V8SsV8SsV8Ss", "")
-BUILTIN(__builtin_altivec_vmaxuh, "V8UsV8UsV8Us", "")
-BUILTIN(__builtin_altivec_vmaxsw, "V4SiV4SiV4Si", "")
-BUILTIN(__builtin_altivec_vmaxuw, "V4UiV4UiV4Ui", "")
-BUILTIN(__builtin_altivec_vmaxsd, "V2LLiV2LLiV2LLi", "")
-BUILTIN(__builtin_altivec_vmaxud, "V2ULLiV2ULLiV2ULLi", "")
-BUILTIN(__builtin_altivec_vmaxfp, "V4fV4fV4f", "")
-
-BUILTIN(__builtin_altivec_mfvscr, "V8Us", "")
-
-BUILTIN(__builtin_altivec_vminsb, "V16ScV16ScV16Sc", "")
-BUILTIN(__builtin_altivec_vminub, "V16UcV16UcV16Uc", "")
-BUILTIN(__builtin_altivec_vminsh, "V8SsV8SsV8Ss", "")
-BUILTIN(__builtin_altivec_vminuh, "V8UsV8UsV8Us", "")
-BUILTIN(__builtin_altivec_vminsw, "V4SiV4SiV4Si", "")
-BUILTIN(__builtin_altivec_vminuw, "V4UiV4UiV4Ui", "")
-BUILTIN(__builtin_altivec_vminsd, "V2LLiV2LLiV2LLi", "")
-BUILTIN(__builtin_altivec_vminud, "V2ULLiV2ULLiV2ULLi", "")
-BUILTIN(__builtin_altivec_vminfp, "V4fV4fV4f", "")
-
-BUILTIN(__builtin_altivec_mtvscr, "vV4i", "")
-
-BUILTIN(__builtin_altivec_vrefp, "V4fV4f", "")
-
-BUILTIN(__builtin_altivec_vrlb, "V16cV16cV16Uc", "")
-BUILTIN(__builtin_altivec_vrlh, "V8sV8sV8Us", "")
-BUILTIN(__builtin_altivec_vrlw, "V4iV4iV4Ui", "")
-BUILTIN(__builtin_altivec_vrld, "V2LLiV2LLiV2ULLi", "")
-
-BUILTIN(__builtin_altivec_vsel_4si, "V4iV4iV4iV4Ui", "")
-
-BUILTIN(__builtin_altivec_vsl, "V4iV4iV4i", "")
-BUILTIN(__builtin_altivec_vslo, "V4iV4iV4i", "")
-
-BUILTIN(__builtin_altivec_vsrab, "V16cV16cV16Uc", "")
-BUILTIN(__builtin_altivec_vsrah, "V8sV8sV8Us", "")
-BUILTIN(__builtin_altivec_vsraw, "V4iV4iV4Ui", "")
-
-BUILTIN(__builtin_altivec_vsr, "V4iV4iV4i", "")
-BUILTIN(__builtin_altivec_vsro, "V4iV4iV4i", "")
-
-BUILTIN(__builtin_altivec_vrfin, "V4fV4f", "")
-
-BUILTIN(__builtin_altivec_vrsqrtefp, "V4fV4f", "")
-
-BUILTIN(__builtin_altivec_vsubcuw, "V4UiV4UiV4Ui", "")
-
-BUILTIN(__builtin_altivec_vsum4sbs, "V4SiV16ScV4Si", "")
-BUILTIN(__builtin_altivec_vsum4ubs, "V4UiV16UcV4Ui", "")
-BUILTIN(__builtin_altivec_vsum4shs, "V4SiV8SsV4Si", "")
-
-BUILTIN(__builtin_altivec_vsum2sws, "V4SiV4SiV4Si", "")
-
-BUILTIN(__builtin_altivec_vsumsws, "V4SiV4SiV4Si", "")
-
-BUILTIN(__builtin_altivec_vrfiz, "V4fV4f", "")
-
-BUILTIN(__builtin_altivec_vupkhsb, "V8sV16c", "")
-BUILTIN(__builtin_altivec_vupkhpx, "V4UiV8s", "")
-BUILTIN(__builtin_altivec_vupkhsh, "V4iV8s", "")
-BUILTIN(__builtin_altivec_vupkhsw, "V2LLiV4i", "")
-
-BUILTIN(__builtin_altivec_vupklsb, "V8sV16c", "")
-BUILTIN(__builtin_altivec_vupklpx, "V4UiV8s", "")
-BUILTIN(__builtin_altivec_vupklsh, "V4iV8s", "")
-BUILTIN(__builtin_altivec_vupklsw, "V2LLiV4i", "")
-
-BUILTIN(__builtin_altivec_vcmpbfp_p, "iiV4fV4f", "")
-
-BUILTIN(__builtin_altivec_vcmpgefp_p, "iiV4fV4f", "")
-
-BUILTIN(__builtin_altivec_vcmpequb_p, "iiV16cV16c", "")
-BUILTIN(__builtin_altivec_vcmpequh_p, "iiV8sV8s", "")
-BUILTIN(__builtin_altivec_vcmpequw_p, "iiV4iV4i", "")
-BUILTIN(__builtin_altivec_vcmpequd_p, "iiV2LLiV2LLi", "")
-BUILTIN(__builtin_altivec_vcmpeqfp_p, "iiV4fV4f", "")
-
-BUILTIN(__builtin_altivec_vcmpneb_p, "iiV16cV16c", "")
-BUILTIN(__builtin_altivec_vcmpneh_p, "iiV8sV8s", "")
-BUILTIN(__builtin_altivec_vcmpnew_p, "iiV4iV4i", "")
-BUILTIN(__builtin_altivec_vcmpned_p, "iiV2LLiV2LLi", "")
-
-BUILTIN(__builtin_altivec_vcmpgtsb_p, "iiV16ScV16Sc", "")
-BUILTIN(__builtin_altivec_vcmpgtub_p, "iiV16UcV16Uc", "")
-BUILTIN(__builtin_altivec_vcmpgtsh_p, "iiV8SsV8Ss", "")
-BUILTIN(__builtin_altivec_vcmpgtuh_p, "iiV8UsV8Us", "")
-BUILTIN(__builtin_altivec_vcmpgtsw_p, "iiV4SiV4Si", "")
-BUILTIN(__builtin_altivec_vcmpgtuw_p, "iiV4UiV4Ui", "")
-BUILTIN(__builtin_altivec_vcmpgtsd_p, "iiV2LLiV2LLi", "")
-BUILTIN(__builtin_altivec_vcmpgtud_p, "iiV2ULLiV2ULLi", "")
-BUILTIN(__builtin_altivec_vcmpgtfp_p, "iiV4fV4f", "")
+TARGET_BUILTIN(__builtin_altivec_vcmpequq, "V1LLLiV1ULLLiV1ULLLi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vcmpgtsq, "V1LLLiV1SLLLiV1SLLLi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vcmpgtuq, "V1LLLiV1ULLLiV1ULLLi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vcmpequq_p, "iiV1ULLLiV1LLLi", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vcmpgtsq_p, "iiV1SLLLiV1SLLLi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vcmpgtuq_p, "iiV1ULLLiV1ULLLi", "",
+ "power10-vector")
+
+TARGET_BUILTIN(__builtin_altivec_vmaxsb, "V16ScV16ScV16Sc", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vmaxub, "V16UcV16UcV16Uc", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vmaxsh, "V8SsV8SsV8Ss", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vmaxuh, "V8UsV8UsV8Us", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vmaxsw, "V4SiV4SiV4Si", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vmaxuw, "V4UiV4UiV4Ui", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vmaxsd, "V2LLiV2LLiV2LLi", "", "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vmaxud, "V2ULLiV2ULLiV2ULLi", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vmaxfp, "V4fV4fV4f", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_mfvscr, "V8Us", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vminsb, "V16ScV16ScV16Sc", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vminub, "V16UcV16UcV16Uc", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vminsh, "V8SsV8SsV8Ss", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vminuh, "V8UsV8UsV8Us", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vminsw, "V4SiV4SiV4Si", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vminuw, "V4UiV4UiV4Ui", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vminsd, "V2LLiV2LLiV2LLi", "", "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vminud, "V2ULLiV2ULLiV2ULLi", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vminfp, "V4fV4fV4f", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_mtvscr, "vV4i", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vrefp, "V4fV4f", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vrlb, "V16cV16cV16Uc", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vrlh, "V8sV8sV8Us", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vrlw, "V4iV4iV4Ui", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vrld, "V2LLiV2LLiV2ULLi", "", "power8-vector")
+
+TARGET_BUILTIN(__builtin_altivec_vsel_4si, "V4iV4iV4iV4Ui", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vsl, "V4iV4iV4i", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vslo, "V4iV4iV4i", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vsrab, "V16cV16cV16Uc", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vsrah, "V8sV8sV8Us", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vsraw, "V4iV4iV4Ui", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vsr, "V4iV4iV4i", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vsro, "V4iV4iV4i", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vrfin, "V4fV4f", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vrsqrtefp, "V4fV4f", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vsubcuw, "V4UiV4UiV4Ui", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vsum4sbs, "V4SiV16ScV4Si", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vsum4ubs, "V4UiV16UcV4Ui", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vsum4shs, "V4SiV8SsV4Si", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vsum2sws, "V4SiV4SiV4Si", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vsumsws, "V4SiV4SiV4Si", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vrfiz, "V4fV4f", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vupkhsb, "V8sV16c", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vupkhpx, "V4UiV8s", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vupkhsh, "V4iV8s", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vupkhsw, "V2LLiV4i", "", "power8-vector")
+
+TARGET_BUILTIN(__builtin_altivec_vupklsb, "V8sV16c", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vupklpx, "V4UiV8s", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vupklsh, "V4iV8s", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vupklsw, "V2LLiV4i", "", "power8-vector")
+
+TARGET_BUILTIN(__builtin_altivec_vcmpbfp_p, "iiV4fV4f", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vcmpgefp_p, "iiV4fV4f", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vcmpequb_p, "iiV16cV16c", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vcmpequh_p, "iiV8sV8s", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vcmpequw_p, "iiV4iV4i", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vcmpequd_p, "iiV2LLiV2LLi", "", "vsx")
+TARGET_BUILTIN(__builtin_altivec_vcmpeqfp_p, "iiV4fV4f", "", "altivec")
+
+TARGET_BUILTIN(__builtin_altivec_vcmpneb_p, "iiV16cV16c", "", "power9-vector")
+TARGET_BUILTIN(__builtin_altivec_vcmpneh_p, "iiV8sV8s", "", "power9-vector")
+TARGET_BUILTIN(__builtin_altivec_vcmpnew_p, "iiV4iV4i", "", "power9-vector")
+TARGET_BUILTIN(__builtin_altivec_vcmpned_p, "iiV2LLiV2LLi", "", "vsx")
+
+TARGET_BUILTIN(__builtin_altivec_vcmpgtsb_p, "iiV16ScV16Sc", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vcmpgtub_p, "iiV16UcV16Uc", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vcmpgtsh_p, "iiV8SsV8Ss", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vcmpgtuh_p, "iiV8UsV8Us", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vcmpgtsw_p, "iiV4SiV4Si", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vcmpgtuw_p, "iiV4UiV4Ui", "", "altivec")
+TARGET_BUILTIN(__builtin_altivec_vcmpgtsd_p, "iiV2LLiV2LLi", "", "vsx")
+TARGET_BUILTIN(__builtin_altivec_vcmpgtud_p, "iiV2ULLiV2ULLi", "", "vsx")
+TARGET_BUILTIN(__builtin_altivec_vcmpgtfp_p, "iiV4fV4f", "", "altivec")
-BUILTIN(__builtin_altivec_vgbbd, "V16UcV16Uc", "")
-BUILTIN(__builtin_altivec_vbpermq, "V2ULLiV16UcV16Uc", "")
+TARGET_BUILTIN(__builtin_altivec_vgbbd, "V16UcV16Uc", "", "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vbpermq, "V2ULLiV16UcV16Uc", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vbpermd, "V2ULLiV2ULLiV16Uc", "",
+ "power9-vector")
// P8 Crypto built-ins.
-BUILTIN(__builtin_altivec_crypto_vsbox, "V2ULLiV2ULLi", "")
-BUILTIN(__builtin_altivec_crypto_vpermxor, "V16UcV16UcV16UcV16Uc", "")
-BUILTIN(__builtin_altivec_crypto_vshasigmaw, "V4UiV4UiIiIi", "")
-BUILTIN(__builtin_altivec_crypto_vshasigmad, "V2ULLiV2ULLiIiIi", "")
-BUILTIN(__builtin_altivec_crypto_vcipher, "V2ULLiV2ULLiV2ULLi", "")
-BUILTIN(__builtin_altivec_crypto_vcipherlast, "V2ULLiV2ULLiV2ULLi", "")
-BUILTIN(__builtin_altivec_crypto_vncipher, "V2ULLiV2ULLiV2ULLi", "")
-BUILTIN(__builtin_altivec_crypto_vncipherlast, "V2ULLiV2ULLiV2ULLi", "")
-BUILTIN(__builtin_altivec_crypto_vpmsumb, "V16UcV16UcV16Uc", "")
-BUILTIN(__builtin_altivec_crypto_vpmsumh, "V8UsV8UsV8Us", "")
-BUILTIN(__builtin_altivec_crypto_vpmsumw, "V4UiV4UiV4Ui", "")
-BUILTIN(__builtin_altivec_crypto_vpmsumd, "V2ULLiV2ULLiV2ULLi", "")
-
-BUILTIN(__builtin_altivec_vclzb, "V16UcV16Uc", "")
-BUILTIN(__builtin_altivec_vclzh, "V8UsV8Us", "")
-BUILTIN(__builtin_altivec_vclzw, "V4UiV4Ui", "")
-BUILTIN(__builtin_altivec_vclzd, "V2ULLiV2ULLi", "")
-BUILTIN(__builtin_altivec_vctzb, "V16UcV16Uc", "")
-BUILTIN(__builtin_altivec_vctzh, "V8UsV8Us", "")
-BUILTIN(__builtin_altivec_vctzw, "V4UiV4Ui", "")
-BUILTIN(__builtin_altivec_vctzd, "V2ULLiV2ULLi", "")
-
-BUILTIN(__builtin_altivec_vclzlsbb, "SiV16Uc", "")
-BUILTIN(__builtin_altivec_vctzlsbb, "SiV16Uc", "")
-BUILTIN(__builtin_altivec_vprtybw, "V4UiV4Ui", "")
-BUILTIN(__builtin_altivec_vprtybd, "V2ULLiV2ULLi", "")
-BUILTIN(__builtin_altivec_vprtybq, "V1ULLLiV1ULLLi", "")
+TARGET_BUILTIN(__builtin_altivec_crypto_vsbox, "V16UcV16Uc", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_crypto_vpermxor, "V16UcV16UcV16UcV16Uc", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_crypto_vpermxor_be, "V16UcV16UcV16UcV16Uc", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_crypto_vshasigmaw, "V4UiV4UiIiIi", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_crypto_vshasigmad, "V2ULLiV2ULLiIiIi", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_crypto_vcipher, "V16UcV16UcV16Uc", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_crypto_vcipherlast, "V16UcV16UcV16Uc", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_crypto_vncipher, "V16UcV16UcV16Uc", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_crypto_vncipherlast, "V16UcV16UcV16Uc", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_crypto_vpmsumb, "V16UcV16UcV16Uc", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_crypto_vpmsumh, "V8UsV8UsV8Us", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_crypto_vpmsumw, "V4UiV4UiV4Ui", "",
+ "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_crypto_vpmsumd, "V2ULLiV2ULLiV2ULLi", "",
+ "power8-vector")
+
+TARGET_BUILTIN(__builtin_altivec_vclzb, "V16UcV16Uc", "", "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vclzh, "V8UsV8Us", "", "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vclzw, "V4UiV4Ui", "", "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vclzd, "V2ULLiV2ULLi", "", "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vctzb, "V16UcV16Uc", "", "power9-vector")
+TARGET_BUILTIN(__builtin_altivec_vctzh, "V8UsV8Us", "", "power9-vector")
+TARGET_BUILTIN(__builtin_altivec_vctzw, "V4UiV4Ui", "", "power9-vector")
+TARGET_BUILTIN(__builtin_altivec_vctzd, "V2ULLiV2ULLi", "", "power9-vector")
+
+// P8 BCD builtins.
+TARGET_BUILTIN(__builtin_ppc_bcdadd, "V16UcV16UcV16UcIi", "",
+ "isa-v207-instructions")
+TARGET_BUILTIN(__builtin_ppc_bcdsub, "V16UcV16UcV16UcIi", "",
+ "isa-v207-instructions")
+TARGET_BUILTIN(__builtin_ppc_bcdadd_p, "iiV16UcV16Uc", "",
+ "isa-v207-instructions")
+TARGET_BUILTIN(__builtin_ppc_bcdsub_p, "iiV16UcV16Uc", "",
+ "isa-v207-instructions")
+
+TARGET_BUILTIN(__builtin_altivec_vclzlsbb, "SiV16Uc", "", "power9-vector")
+TARGET_BUILTIN(__builtin_altivec_vctzlsbb, "SiV16Uc", "", "power9-vector")
+TARGET_BUILTIN(__builtin_altivec_vprtybw, "V4UiV4Ui", "", "power9-vector")
+TARGET_BUILTIN(__builtin_altivec_vprtybd, "V2ULLiV2ULLi", "", "power9-vector")
+TARGET_BUILTIN(__builtin_altivec_vprtybq, "V1ULLLiV1ULLLi", "", "power9-vector")
// Vector population count built-ins
-BUILTIN(__builtin_altivec_vpopcntb, "V16UcV16Uc", "")
-BUILTIN(__builtin_altivec_vpopcnth, "V8UsV8Us", "")
-BUILTIN(__builtin_altivec_vpopcntw, "V4UiV4Ui", "")
-BUILTIN(__builtin_altivec_vpopcntd, "V2ULLiV2ULLi", "")
+TARGET_BUILTIN(__builtin_altivec_vpopcntb, "V16UcV16Uc", "", "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vpopcnth, "V8UsV8Us", "", "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vpopcntw, "V4UiV4Ui", "", "power8-vector")
+TARGET_BUILTIN(__builtin_altivec_vpopcntd, "V2ULLiV2ULLi", "", "power8-vector")
// Absolute difference built-ins
-BUILTIN(__builtin_altivec_vabsdub, "V16UcV16UcV16Uc", "")
-BUILTIN(__builtin_altivec_vabsduh, "V8UsV8UsV8Us", "")
-BUILTIN(__builtin_altivec_vabsduw, "V4UiV4UiV4Ui", "")
+TARGET_BUILTIN(__builtin_altivec_vabsdub, "V16UcV16UcV16Uc", "",
+ "power9-vector")
+TARGET_BUILTIN(__builtin_altivec_vabsduh, "V8UsV8UsV8Us", "", "power9-vector")
+TARGET_BUILTIN(__builtin_altivec_vabsduw, "V4UiV4UiV4Ui", "", "power9-vector")
// P9 Shift built-ins.
-BUILTIN(__builtin_altivec_vslv, "V16UcV16UcV16Uc", "")
-BUILTIN(__builtin_altivec_vsrv, "V16UcV16UcV16Uc", "")
+TARGET_BUILTIN(__builtin_altivec_vslv, "V16UcV16UcV16Uc", "", "power9-vector")
+TARGET_BUILTIN(__builtin_altivec_vsrv, "V16UcV16UcV16Uc", "", "power9-vector")
// P9 Vector rotate built-ins
-BUILTIN(__builtin_altivec_vrlwmi, "V4UiV4UiV4UiV4Ui", "")
-BUILTIN(__builtin_altivec_vrldmi, "V2ULLiV2ULLiV2ULLiV2ULLi", "")
-BUILTIN(__builtin_altivec_vrlwnm, "V4UiV4UiV4Ui", "")
-BUILTIN(__builtin_altivec_vrldnm, "V2ULLiV2ULLiV2ULLi", "")
+TARGET_BUILTIN(__builtin_altivec_vrlwmi, "V4UiV4UiV4UiV4Ui", "",
+ "power9-vector")
+TARGET_BUILTIN(__builtin_altivec_vrldmi, "V2ULLiV2ULLiV2ULLiV2ULLi", "",
+ "power9-vector")
+TARGET_BUILTIN(__builtin_altivec_vrlwnm, "V4UiV4UiV4Ui", "", "power9-vector")
+TARGET_BUILTIN(__builtin_altivec_vrldnm, "V2ULLiV2ULLiV2ULLi", "",
+ "power9-vector")
// P9 Vector extend sign builtins.
-BUILTIN(__builtin_altivec_vextsb2w, "V4SiV16Sc", "")
-BUILTIN(__builtin_altivec_vextsb2d, "V2SLLiV16Sc", "")
-BUILTIN(__builtin_altivec_vextsh2w, "V4SiV8Ss", "")
-BUILTIN(__builtin_altivec_vextsh2d, "V2SLLiV8Ss", "")
-BUILTIN(__builtin_altivec_vextsw2d, "V2SLLiV4Si", "")
+TARGET_BUILTIN(__builtin_altivec_vextsb2w, "V4SiV16Sc", "", "power9-vector")
+TARGET_BUILTIN(__builtin_altivec_vextsb2d, "V2SLLiV16Sc", "", "power9-vector")
+TARGET_BUILTIN(__builtin_altivec_vextsh2w, "V4SiV8Ss", "", "power9-vector")
+TARGET_BUILTIN(__builtin_altivec_vextsh2d, "V2SLLiV8Ss", "", "power9-vector")
+TARGET_BUILTIN(__builtin_altivec_vextsw2d, "V2SLLiV4Si", "", "power9-vector")
// P10 Vector extend sign builtins.
-BUILTIN(__builtin_altivec_vextsd2q, "V1SLLLiV2SLLi", "")
+TARGET_BUILTIN(__builtin_altivec_vextsd2q, "V1SLLLiV2SLLi", "",
+ "power10-vector")
// P10 Vector Extract with Mask built-ins.
-BUILTIN(__builtin_altivec_vextractbm, "UiV16Uc", "")
-BUILTIN(__builtin_altivec_vextracthm, "UiV8Us", "")
-BUILTIN(__builtin_altivec_vextractwm, "UiV4Ui", "")
-BUILTIN(__builtin_altivec_vextractdm, "UiV2ULLi", "")
-BUILTIN(__builtin_altivec_vextractqm, "UiV1ULLLi", "")
+TARGET_BUILTIN(__builtin_altivec_vextractbm, "UiV16Uc", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vextracthm, "UiV8Us", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vextractwm, "UiV4Ui", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vextractdm, "UiV2ULLi", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vextractqm, "UiV1ULLLi", "", "power10-vector")
// P10 Vector Divide Extended built-ins.
-BUILTIN(__builtin_altivec_vdivesw, "V4SiV4SiV4Si", "")
-BUILTIN(__builtin_altivec_vdiveuw, "V4UiV4UiV4Ui", "")
-BUILTIN(__builtin_altivec_vdivesd, "V2LLiV2LLiV2LLi", "")
-BUILTIN(__builtin_altivec_vdiveud, "V2ULLiV2ULLiV2ULLi", "")
-BUILTIN(__builtin_altivec_vdivesq, "V1SLLLiV1SLLLiV1SLLLi", "")
-BUILTIN(__builtin_altivec_vdiveuq, "V1ULLLiV1ULLLiV1ULLLi", "")
+TARGET_BUILTIN(__builtin_altivec_vdivesw, "V4SiV4SiV4Si", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vdiveuw, "V4UiV4UiV4Ui", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vdivesd, "V2LLiV2LLiV2LLi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vdiveud, "V2ULLiV2ULLiV2ULLi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vdivesq, "V1SLLLiV1SLLLiV1SLLLi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vdiveuq, "V1ULLLiV1ULLLiV1ULLLi", "",
+ "power10-vector")
// P10 Vector Multiply High built-ins.
-BUILTIN(__builtin_altivec_vmulhsw, "V4SiV4SiV4Si", "")
-BUILTIN(__builtin_altivec_vmulhuw, "V4UiV4UiV4Ui", "")
-BUILTIN(__builtin_altivec_vmulhsd, "V2LLiV2LLiV2LLi", "")
-BUILTIN(__builtin_altivec_vmulhud, "V2ULLiV2ULLiV2ULLi", "")
+TARGET_BUILTIN(__builtin_altivec_vmulhsw, "V4SiV4SiV4Si", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vmulhuw, "V4UiV4UiV4Ui", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vmulhsd, "V2LLiV2LLiV2LLi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vmulhud, "V2ULLiV2ULLiV2ULLi", "",
+ "power10-vector")
// P10 Vector Expand with Mask built-ins.
-BUILTIN(__builtin_altivec_vexpandbm, "V16UcV16Uc", "")
-BUILTIN(__builtin_altivec_vexpandhm, "V8UsV8Us", "")
-BUILTIN(__builtin_altivec_vexpandwm, "V4UiV4Ui", "")
-BUILTIN(__builtin_altivec_vexpanddm, "V2ULLiV2ULLi", "")
-BUILTIN(__builtin_altivec_vexpandqm, "V1ULLLiV1ULLLi", "")
+TARGET_BUILTIN(__builtin_altivec_vexpandbm, "V16UcV16Uc", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vexpandhm, "V8UsV8Us", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vexpandwm, "V4UiV4Ui", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vexpanddm, "V2ULLiV2ULLi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vexpandqm, "V1ULLLiV1ULLLi", "",
+ "power10-vector")
// P10 Vector Count with Mask built-ins.
-BUILTIN(__builtin_altivec_vcntmbb, "ULLiV16UcUi", "")
-BUILTIN(__builtin_altivec_vcntmbh, "ULLiV8UsUi", "")
-BUILTIN(__builtin_altivec_vcntmbw, "ULLiV4UiUi", "")
-BUILTIN(__builtin_altivec_vcntmbd, "ULLiV2ULLiUi", "")
+TARGET_BUILTIN(__builtin_altivec_vcntmbb, "ULLiV16UcUi", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vcntmbh, "ULLiV8UsUi", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vcntmbw, "ULLiV4UiUi", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vcntmbd, "ULLiV2ULLiUi", "", "power10-vector")
// P10 Move to VSR with Mask built-ins.
-BUILTIN(__builtin_altivec_mtvsrbm, "V16UcULLi", "")
-BUILTIN(__builtin_altivec_mtvsrhm, "V8UsULLi", "")
-BUILTIN(__builtin_altivec_mtvsrwm, "V4UiULLi", "")
-BUILTIN(__builtin_altivec_mtvsrdm, "V2ULLiULLi", "")
-BUILTIN(__builtin_altivec_mtvsrqm, "V1ULLLiULLi", "")
+TARGET_BUILTIN(__builtin_altivec_mtvsrbm, "V16UcULLi", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_mtvsrhm, "V8UsULLi", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_mtvsrwm, "V4UiULLi", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_mtvsrdm, "V2ULLiULLi", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_mtvsrqm, "V1ULLLiULLi", "", "power10-vector")
// P10 Vector Parallel Bits built-ins.
-BUILTIN(__builtin_altivec_vpdepd, "V2ULLiV2ULLiV2ULLi", "")
-BUILTIN(__builtin_altivec_vpextd, "V2ULLiV2ULLiV2ULLi", "")
+TARGET_BUILTIN(__builtin_altivec_vpdepd, "V2ULLiV2ULLiV2ULLi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vpextd, "V2ULLiV2ULLiV2ULLi", "",
+ "power10-vector")
// P10 Vector String Isolate Built-ins.
-BUILTIN(__builtin_altivec_vstribr, "V16cV16c", "")
-BUILTIN(__builtin_altivec_vstribl, "V16cV16c", "")
-BUILTIN(__builtin_altivec_vstrihr, "V8sV8s", "")
-BUILTIN(__builtin_altivec_vstrihl, "V8sV8s", "")
-BUILTIN(__builtin_altivec_vstribr_p, "iiV16c", "")
-BUILTIN(__builtin_altivec_vstribl_p, "iiV16c", "")
-BUILTIN(__builtin_altivec_vstrihr_p, "iiV8s", "")
-BUILTIN(__builtin_altivec_vstrihl_p, "iiV8s", "")
+TARGET_BUILTIN(__builtin_altivec_vstribr, "V16UcV16Uc", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vstribl, "V16UcV16Uc", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vstrihr, "V8sV8s", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vstrihl, "V8sV8s", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vstribr_p, "iiV16Uc", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vstribl_p, "iiV16Uc", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vstrihr_p, "iiV8s", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vstrihl_p, "iiV8s", "", "power10-vector")
// P10 Vector Centrifuge built-in.
-BUILTIN(__builtin_altivec_vcfuged, "V2ULLiV2ULLiV2ULLi", "")
+TARGET_BUILTIN(__builtin_altivec_vcfuged, "V2ULLiV2ULLiV2ULLi", "",
+ "power10-vector")
// P10 Vector Gather Every N-th Bit built-in.
-BUILTIN(__builtin_altivec_vgnb, "ULLiV1ULLLiIi", "")
+TARGET_BUILTIN(__builtin_altivec_vgnb, "ULLiV1ULLLiIi", "", "power10-vector")
// P10 Vector Clear Bytes built-ins.
-BUILTIN(__builtin_altivec_vclrlb, "V16cV16cUi", "")
-BUILTIN(__builtin_altivec_vclrrb, "V16cV16cUi", "")
+TARGET_BUILTIN(__builtin_altivec_vclrlb, "V16UcV16UcUi", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vclrrb, "V16UcV16UcUi", "", "power10-vector")
// P10 Vector Count Leading / Trailing Zeroes under bit Mask built-ins.
-BUILTIN(__builtin_altivec_vclzdm, "V2ULLiV2ULLiV2ULLi", "")
-BUILTIN(__builtin_altivec_vctzdm, "V2ULLiV2ULLiV2ULLi", "")
+TARGET_BUILTIN(__builtin_altivec_vclzdm, "V2ULLiV2ULLiV2ULLi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vctzdm, "V2ULLiV2ULLiV2ULLi", "",
+ "power10-vector")
// P10 Vector Shift built-ins.
-BUILTIN(__builtin_altivec_vsldbi, "V16UcV16UcV16UcIi", "")
-BUILTIN(__builtin_altivec_vsrdbi, "V16UcV16UcV16UcIi", "")
+TARGET_BUILTIN(__builtin_altivec_vsldbi, "V16UcV16UcV16UcIi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vsrdbi, "V16UcV16UcV16UcIi", "",
+ "power10-vector")
// P10 Vector Insert built-ins.
-BUILTIN(__builtin_altivec_vinsblx, "V16UcV16UcUiUi", "")
-BUILTIN(__builtin_altivec_vinsbrx, "V16UcV16UcUiUi", "")
-BUILTIN(__builtin_altivec_vinshlx, "V8UsV8UsUiUi", "")
-BUILTIN(__builtin_altivec_vinshrx, "V8UsV8UsUiUi", "")
-BUILTIN(__builtin_altivec_vinswlx, "V4UiV4UiUiUi", "")
-BUILTIN(__builtin_altivec_vinswrx, "V4UiV4UiUiUi", "")
-BUILTIN(__builtin_altivec_vinsdlx, "V2ULLiV2ULLiULLiULLi", "")
-BUILTIN(__builtin_altivec_vinsdrx, "V2ULLiV2ULLiULLiULLi", "")
-BUILTIN(__builtin_altivec_vinsbvlx, "V16UcV16UcUiV16Uc", "")
-BUILTIN(__builtin_altivec_vinsbvrx, "V16UcV16UcUiV16Uc", "")
-BUILTIN(__builtin_altivec_vinshvlx, "V8UsV8UsUiV8Us", "")
-BUILTIN(__builtin_altivec_vinshvrx, "V8UsV8UsUiV8Us", "")
-BUILTIN(__builtin_altivec_vinswvlx, "V4UiV4UiUiV4Ui", "")
-BUILTIN(__builtin_altivec_vinswvrx, "V4UiV4UiUiV4Ui", "")
-BUILTIN(__builtin_altivec_vec_replace_elt, "V4UiV4UiUiIi", "t")
-BUILTIN(__builtin_altivec_vec_replace_unaligned, "V4UiV4UiUiIi", "t")
+TARGET_BUILTIN(__builtin_altivec_vinsblx, "V16UcV16UcUiUi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vinsbrx, "V16UcV16UcUiUi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vinshlx, "V8UsV8UsUiUi", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vinshrx, "V8UsV8UsUiUi", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vinswlx, "V4UiV4UiUiUi", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vinswrx, "V4UiV4UiUiUi", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vinsdlx, "V2ULLiV2ULLiULLiULLi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vinsdrx, "V2ULLiV2ULLiULLiULLi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vinsbvlx, "V16UcV16UcUiV16Uc", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vinsbvrx, "V16UcV16UcUiV16Uc", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vinshvlx, "V8UsV8UsUiV8Us", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vinshvrx, "V8UsV8UsUiV8Us", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vinswvlx, "V4UiV4UiUiV4Ui", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vinswvrx, "V4UiV4UiUiV4Ui", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vinsw, "V16UcV16UcUiIi", "", "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vinsd, "V16UcV16UcULLiIi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vinsw_elt, "V16UcV16UcUiiC", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vinsd_elt, "V16UcV16UcULLiiC", "",
+ "power10-vector")
// P10 Vector Extract built-ins.
-BUILTIN(__builtin_altivec_vextdubvlx, "V2ULLiV16UcV16UcUi", "")
-BUILTIN(__builtin_altivec_vextdubvrx, "V2ULLiV16UcV16UcUi", "")
-BUILTIN(__builtin_altivec_vextduhvlx, "V2ULLiV8UsV8UsUi", "")
-BUILTIN(__builtin_altivec_vextduhvrx, "V2ULLiV8UsV8UsUi", "")
-BUILTIN(__builtin_altivec_vextduwvlx, "V2ULLiV4UiV4UiUi", "")
-BUILTIN(__builtin_altivec_vextduwvrx, "V2ULLiV4UiV4UiUi", "")
-BUILTIN(__builtin_altivec_vextddvlx, "V2ULLiV2ULLiV2ULLiUi", "")
-BUILTIN(__builtin_altivec_vextddvrx, "V2ULLiV2ULLiV2ULLiUi", "")
+TARGET_BUILTIN(__builtin_altivec_vextdubvlx, "V2ULLiV16UcV16UcUi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vextdubvrx, "V2ULLiV16UcV16UcUi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vextduhvlx, "V2ULLiV8UsV8UsUi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vextduhvrx, "V2ULLiV8UsV8UsUi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vextduwvlx, "V2ULLiV4UiV4UiUi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vextduwvrx, "V2ULLiV4UiV4UiUi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vextddvlx, "V2ULLiV2ULLiV2ULLiUi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vextddvrx, "V2ULLiV2ULLiV2ULLiUi", "",
+ "power10-vector")
// P10 Vector rotate built-ins.
-BUILTIN(__builtin_altivec_vrlqmi, "V1ULLLiV1ULLLiV1ULLLiV1ULLLi", "")
-BUILTIN(__builtin_altivec_vrlqnm, "V1ULLLiV1ULLLiV1ULLLi", "")
+TARGET_BUILTIN(__builtin_altivec_vrlqmi, "V1ULLLiV1ULLLiV1ULLLiV1ULLLi", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_altivec_vrlqnm, "V1ULLLiV1ULLLiV1ULLLi", "",
+ "power10-vector")
// VSX built-ins.
-BUILTIN(__builtin_vsx_lxvd2x, "V2dLivC*", "")
-BUILTIN(__builtin_vsx_lxvw4x, "V4iLivC*", "")
-BUILTIN(__builtin_vsx_lxvd2x_be, "V2dSLLivC*", "")
-BUILTIN(__builtin_vsx_lxvw4x_be, "V4iSLLivC*", "")
+TARGET_BUILTIN(__builtin_vsx_lxvd2x, "V2dLivC*", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_lxvw4x, "V4iLivC*", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_lxvd2x_be, "V2dSLLivC*", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_lxvw4x_be, "V4iSLLivC*", "", "vsx")
-BUILTIN(__builtin_vsx_stxvd2x, "vV2dLiv*", "")
-BUILTIN(__builtin_vsx_stxvw4x, "vV4iLiv*", "")
-BUILTIN(__builtin_vsx_stxvd2x_be, "vV2dSLLivC*", "")
-BUILTIN(__builtin_vsx_stxvw4x_be, "vV4iSLLivC*", "")
+TARGET_BUILTIN(__builtin_vsx_stxvd2x, "vV2dLiv*", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_stxvw4x, "vV4iLiv*", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_stxvd2x_be, "vV2dSLLivC*", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_stxvw4x_be, "vV4iSLLivC*", "", "vsx")
-BUILTIN(__builtin_vsx_lxvl, "V4ivC*ULLi", "")
-BUILTIN(__builtin_vsx_lxvll, "V4ivC*ULLi", "")
-BUILTIN(__builtin_vsx_stxvl, "vV4iv*ULLi", "")
-BUILTIN(__builtin_vsx_stxvll, "vV4iv*ULLi", "")
-BUILTIN(__builtin_vsx_ldrmb, "V16UcCc*Ii", "")
-BUILTIN(__builtin_vsx_strmb, "vCc*IiV16Uc", "")
+TARGET_BUILTIN(__builtin_vsx_lxvl, "V4ivC*ULLi", "", "power9-vector")
+TARGET_BUILTIN(__builtin_vsx_lxvll, "V4ivC*ULLi", "", "power9-vector")
+TARGET_BUILTIN(__builtin_vsx_stxvl, "vV4iv*ULLi", "", "power9-vector")
+TARGET_BUILTIN(__builtin_vsx_stxvll, "vV4iv*ULLi", "", "power9-vector")
+TARGET_BUILTIN(__builtin_vsx_ldrmb, "V16UcCc*Ii", "", "isa-v207-instructions")
+TARGET_BUILTIN(__builtin_vsx_strmb, "vCc*IiV16Uc", "", "isa-v207-instructions")
-BUILTIN(__builtin_vsx_xvmaxdp, "V2dV2dV2d", "")
-BUILTIN(__builtin_vsx_xvmaxsp, "V4fV4fV4f", "")
-BUILTIN(__builtin_vsx_xsmaxdp, "ddd", "")
+TARGET_BUILTIN(__builtin_vsx_xvmaxdp, "V2dV2dV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvmaxsp, "V4fV4fV4f", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xsmaxdp, "ddd", "", "vsx")
-BUILTIN(__builtin_vsx_xvmindp, "V2dV2dV2d", "")
-BUILTIN(__builtin_vsx_xvminsp, "V4fV4fV4f", "")
-BUILTIN(__builtin_vsx_xsmindp, "ddd", "")
+TARGET_BUILTIN(__builtin_vsx_xvmindp, "V2dV2dV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvminsp, "V4fV4fV4f", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xsmindp, "ddd", "", "vsx")
-BUILTIN(__builtin_vsx_xvdivdp, "V2dV2dV2d", "")
-BUILTIN(__builtin_vsx_xvdivsp, "V4fV4fV4f", "")
+TARGET_BUILTIN(__builtin_vsx_xvdivdp, "V2dV2dV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvdivsp, "V4fV4fV4f", "", "vsx")
-BUILTIN(__builtin_vsx_xvrdpip, "V2dV2d", "")
-BUILTIN(__builtin_vsx_xvrspip, "V4fV4f", "")
+TARGET_BUILTIN(__builtin_vsx_xvrdpip, "V2dV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvrspip, "V4fV4f", "", "vsx")
-BUILTIN(__builtin_vsx_xvcmpeqdp, "V2ULLiV2dV2d", "")
-BUILTIN(__builtin_vsx_xvcmpeqsp, "V4UiV4fV4f", "")
+TARGET_BUILTIN(__builtin_vsx_xvcmpeqdp, "V2ULLiV2dV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvcmpeqsp, "V4UiV4fV4f", "", "vsx")
-BUILTIN(__builtin_vsx_xvcmpeqdp_p, "iiV2dV2d", "")
-BUILTIN(__builtin_vsx_xvcmpeqsp_p, "iiV4fV4f", "")
+TARGET_BUILTIN(__builtin_vsx_xvcmpeqdp_p, "iiV2dV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvcmpeqsp_p, "iiV4fV4f", "", "vsx")
-BUILTIN(__builtin_vsx_xvcmpgedp, "V2ULLiV2dV2d", "")
-BUILTIN(__builtin_vsx_xvcmpgesp, "V4UiV4fV4f", "")
+TARGET_BUILTIN(__builtin_vsx_xvcmpgedp, "V2ULLiV2dV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvcmpgesp, "V4UiV4fV4f", "", "vsx")
-BUILTIN(__builtin_vsx_xvcmpgedp_p, "iiV2dV2d", "")
-BUILTIN(__builtin_vsx_xvcmpgesp_p, "iiV4fV4f", "")
+TARGET_BUILTIN(__builtin_vsx_xvcmpgedp_p, "iiV2dV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvcmpgesp_p, "iiV4fV4f", "", "vsx")
-BUILTIN(__builtin_vsx_xvcmpgtdp, "V2ULLiV2dV2d", "")
-BUILTIN(__builtin_vsx_xvcmpgtsp, "V4UiV4fV4f", "")
+TARGET_BUILTIN(__builtin_vsx_xvcmpgtdp, "V2ULLiV2dV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvcmpgtsp, "V4UiV4fV4f", "", "vsx")
-BUILTIN(__builtin_vsx_xvcmpgtdp_p, "iiV2dV2d", "")
-BUILTIN(__builtin_vsx_xvcmpgtsp_p, "iiV4fV4f", "")
+TARGET_BUILTIN(__builtin_vsx_xvcmpgtdp_p, "iiV2dV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvcmpgtsp_p, "iiV4fV4f", "", "vsx")
-BUILTIN(__builtin_vsx_xvrdpim, "V2dV2d", "")
-BUILTIN(__builtin_vsx_xvrspim, "V4fV4f", "")
+TARGET_BUILTIN(__builtin_vsx_xvrdpim, "V2dV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvrspim, "V4fV4f", "", "vsx")
-BUILTIN(__builtin_vsx_xvrdpi, "V2dV2d", "")
-BUILTIN(__builtin_vsx_xvrspi, "V4fV4f", "")
+TARGET_BUILTIN(__builtin_vsx_xvrdpi, "V2dV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvrspi, "V4fV4f", "", "vsx")
-BUILTIN(__builtin_vsx_xvrdpic, "V2dV2d", "")
-BUILTIN(__builtin_vsx_xvrspic, "V4fV4f", "")
+TARGET_BUILTIN(__builtin_vsx_xvrdpic, "V2dV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvrspic, "V4fV4f", "", "vsx")
-BUILTIN(__builtin_vsx_xvrdpiz, "V2dV2d", "")
-BUILTIN(__builtin_vsx_xvrspiz, "V4fV4f", "")
+TARGET_BUILTIN(__builtin_vsx_xvrdpiz, "V2dV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvrspiz, "V4fV4f", "", "vsx")
-BUILTIN(__builtin_vsx_xvmaddadp, "V2dV2dV2dV2d", "")
-BUILTIN(__builtin_vsx_xvmaddasp, "V4fV4fV4fV4f", "")
+TARGET_BUILTIN(__builtin_vsx_xvmaddadp, "V2dV2dV2dV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvmaddasp, "V4fV4fV4fV4f", "", "vsx")
-BUILTIN(__builtin_vsx_xvmsubadp, "V2dV2dV2dV2d", "")
-BUILTIN(__builtin_vsx_xvmsubasp, "V4fV4fV4fV4f", "")
+TARGET_BUILTIN(__builtin_vsx_xvmsubadp, "V2dV2dV2dV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvmsubasp, "V4fV4fV4fV4f", "", "vsx")
-BUILTIN(__builtin_vsx_xvmuldp, "V2dV2dV2d", "")
-BUILTIN(__builtin_vsx_xvmulsp, "V4fV4fV4f", "")
+TARGET_BUILTIN(__builtin_vsx_xvmuldp, "V2dV2dV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvmulsp, "V4fV4fV4f", "", "vsx")
-BUILTIN(__builtin_vsx_xvnmaddadp, "V2dV2dV2dV2d", "")
-BUILTIN(__builtin_vsx_xvnmaddasp, "V4fV4fV4fV4f", "")
+TARGET_BUILTIN(__builtin_vsx_xvnmaddadp, "V2dV2dV2dV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvnmaddasp, "V4fV4fV4fV4f", "", "vsx")
-BUILTIN(__builtin_vsx_xvnmsubadp, "V2dV2dV2dV2d", "")
-BUILTIN(__builtin_vsx_xvnmsubasp, "V4fV4fV4fV4f", "")
+TARGET_BUILTIN(__builtin_vsx_xvnmsubadp, "V2dV2dV2dV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvnmsubasp, "V4fV4fV4fV4f", "", "vsx")
-BUILTIN(__builtin_vsx_xvredp, "V2dV2d", "")
-BUILTIN(__builtin_vsx_xvresp, "V4fV4f", "")
+TARGET_BUILTIN(__builtin_vsx_xvredp, "V2dV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvresp, "V4fV4f", "", "vsx")
-BUILTIN(__builtin_vsx_xvrsqrtedp, "V2dV2d", "")
-BUILTIN(__builtin_vsx_xvrsqrtesp, "V4fV4f", "")
+TARGET_BUILTIN(__builtin_vsx_xvrsqrtedp, "V2dV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvrsqrtesp, "V4fV4f", "", "vsx")
-BUILTIN(__builtin_vsx_xvsqrtdp, "V2dV2d", "")
-BUILTIN(__builtin_vsx_xvsqrtsp, "V4fV4f", "")
+TARGET_BUILTIN(__builtin_vsx_xvsqrtdp, "V2dV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvsqrtsp, "V4fV4f", "", "vsx")
-BUILTIN(__builtin_vsx_xxleqv, "V4UiV4UiV4Ui", "")
+TARGET_BUILTIN(__builtin_vsx_xxleqv, "V4UiV4UiV4Ui", "", "power8-vector")
-BUILTIN(__builtin_vsx_xvcpsgndp, "V2dV2dV2d", "")
-BUILTIN(__builtin_vsx_xvcpsgnsp, "V4fV4fV4f", "")
+TARGET_BUILTIN(__builtin_vsx_xvcpsgndp, "V2dV2dV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvcpsgnsp, "V4fV4fV4f", "", "vsx")
-BUILTIN(__builtin_vsx_xvabssp, "V4fV4f", "")
-BUILTIN(__builtin_vsx_xvabsdp, "V2dV2d", "")
+TARGET_BUILTIN(__builtin_vsx_xvabssp, "V4fV4f", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvabsdp, "V2dV2d", "", "vsx")
-BUILTIN(__builtin_vsx_xxgenpcvbm, "V16UcV16Uci", "")
-BUILTIN(__builtin_vsx_xxgenpcvhm, "V8UsV8Usi", "")
-BUILTIN(__builtin_vsx_xxgenpcvwm, "V4UiV4Uii", "")
-BUILTIN(__builtin_vsx_xxgenpcvdm, "V2ULLiV2ULLii", "")
+TARGET_BUILTIN(__builtin_vsx_xxgenpcvbm, "V16UcV16Uci", "", "power10-vector")
+TARGET_BUILTIN(__builtin_vsx_xxgenpcvhm, "V8UsV8Usi", "", "power10-vector")
+TARGET_BUILTIN(__builtin_vsx_xxgenpcvwm, "V4UiV4Uii", "", "power10-vector")
+TARGET_BUILTIN(__builtin_vsx_xxgenpcvdm, "V2ULLiV2ULLii", "", "power10-vector")
// vector Insert/Extract exponent/significand builtins
-BUILTIN(__builtin_vsx_xviexpdp, "V2dV2ULLiV2ULLi", "")
-BUILTIN(__builtin_vsx_xviexpsp, "V4fV4UiV4Ui", "")
-BUILTIN(__builtin_vsx_xvxexpdp, "V2ULLiV2d", "")
-BUILTIN(__builtin_vsx_xvxexpsp, "V4UiV4f", "")
-BUILTIN(__builtin_vsx_xvxsigdp, "V2ULLiV2d", "")
-BUILTIN(__builtin_vsx_xvxsigsp, "V4UiV4f", "")
+TARGET_BUILTIN(__builtin_vsx_xviexpdp, "V2dV2ULLiV2ULLi", "", "power9-vector")
+TARGET_BUILTIN(__builtin_vsx_xviexpsp, "V4fV4UiV4Ui", "", "power9-vector")
+TARGET_BUILTIN(__builtin_vsx_xvxexpdp, "V2ULLiV2d", "", "power9-vector")
+TARGET_BUILTIN(__builtin_vsx_xvxexpsp, "V4UiV4f", "", "power9-vector")
+TARGET_BUILTIN(__builtin_vsx_xvxsigdp, "V2ULLiV2d", "", "power9-vector")
+TARGET_BUILTIN(__builtin_vsx_xvxsigsp, "V4UiV4f", "", "power9-vector")
// Conversion builtins
-BUILTIN(__builtin_vsx_xvcvdpsxws, "V4SiV2d", "")
-BUILTIN(__builtin_vsx_xvcvdpuxws, "V4UiV2d", "")
-BUILTIN(__builtin_vsx_xvcvspsxds, "V2SLLiV4f", "")
-BUILTIN(__builtin_vsx_xvcvspuxds, "V2ULLiV4f", "")
-BUILTIN(__builtin_vsx_xvcvsxwdp, "V2dV4Si", "")
-BUILTIN(__builtin_vsx_xvcvuxwdp, "V2dV4Ui", "")
-BUILTIN(__builtin_vsx_xvcvspdp, "V2dV4f", "")
-BUILTIN(__builtin_vsx_xvcvsxdsp, "V4fV2SLLi", "")
-BUILTIN(__builtin_vsx_xvcvuxdsp, "V4fV2ULLi", "")
-BUILTIN(__builtin_vsx_xvcvdpsp, "V4fV2d", "")
-
-BUILTIN(__builtin_vsx_xvcvsphp, "V4fV4f", "")
-BUILTIN(__builtin_vsx_xvcvhpsp, "V4fV8Us", "")
-
-BUILTIN(__builtin_vsx_xvcvspbf16, "V16UcV16Uc", "")
-BUILTIN(__builtin_vsx_xvcvbf16spn, "V16UcV16Uc", "")
+TARGET_BUILTIN(__builtin_vsx_xvcvdpsxws, "V4SiV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvcvdpuxws, "V4UiV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvcvspsxds, "V2SLLiV4f", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvcvspuxds, "V2ULLiV4f", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvcvsxwdp, "V2dV4Si", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvcvuxwdp, "V2dV4Ui", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvcvspdp, "V2dV4f", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvcvsxdsp, "V4fV2SLLi", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvcvuxdsp, "V4fV2ULLi", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvcvdpsp, "V4fV2d", "", "vsx")
+
+TARGET_BUILTIN(__builtin_vsx_xvcvsphp, "V4fV4f", "", "power9-vector")
+TARGET_BUILTIN(__builtin_vsx_xvcvhpsp, "V4fV8Us", "", "power9-vector")
+
+TARGET_BUILTIN(__builtin_vsx_xvcvspbf16, "V16UcV16Uc", "", "power10-vector")
+TARGET_BUILTIN(__builtin_vsx_xvcvbf16spn, "V16UcV16Uc", "", "power10-vector")
// Vector Test Data Class builtins
-BUILTIN(__builtin_vsx_xvtstdcdp, "V2ULLiV2dIi", "")
-BUILTIN(__builtin_vsx_xvtstdcsp, "V4UiV4fIi", "")
+TARGET_BUILTIN(__builtin_vsx_xvtstdcdp, "V2ULLiV2dIi", "", "power9-vector")
+TARGET_BUILTIN(__builtin_vsx_xvtstdcsp, "V4UiV4fIi", "", "power9-vector")
-BUILTIN(__builtin_vsx_insertword, "V16UcV4UiV16UcIi", "")
-BUILTIN(__builtin_vsx_extractuword, "V2ULLiV16UcIi", "")
+TARGET_BUILTIN(__builtin_vsx_insertword, "V16UcV4UiV16UcIi", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_extractuword, "V2ULLiV16UcIi", "", "vsx")
-BUILTIN(__builtin_vsx_xxpermdi, "v.", "t")
-BUILTIN(__builtin_vsx_xxsldwi, "v.", "t")
+TARGET_BUILTIN(__builtin_vsx_xxpermdi, "v.", "t", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xxsldwi, "v.", "t", "vsx")
-BUILTIN(__builtin_vsx_xxeval, "V2ULLiV2ULLiV2ULLiV2ULLiIi", "")
+TARGET_BUILTIN(__builtin_vsx_xxeval, "V2ULLiV2ULLiV2ULLiV2ULLiIi", "",
+ "power10-vector")
-BUILTIN(__builtin_vsx_xvtlsbb, "iV16UcUi", "")
+TARGET_BUILTIN(__builtin_vsx_xvtlsbb, "iV16UcUi", "", "power10-vector")
-BUILTIN(__builtin_vsx_xvtdivdp, "iV2dV2d", "")
-BUILTIN(__builtin_vsx_xvtdivsp, "iV4fV4f", "")
-BUILTIN(__builtin_vsx_xvtsqrtdp, "iV2d", "")
-BUILTIN(__builtin_vsx_xvtsqrtsp, "iV4f", "")
+TARGET_BUILTIN(__builtin_vsx_xvtdivdp, "iV2dV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvtdivsp, "iV4fV4f", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvtsqrtdp, "iV2d", "", "vsx")
+TARGET_BUILTIN(__builtin_vsx_xvtsqrtsp, "iV4f", "", "vsx")
// P10 Vector Permute Extended built-in.
-BUILTIN(__builtin_vsx_xxpermx, "V16UcV16UcV16UcV16UcIi", "")
+TARGET_BUILTIN(__builtin_vsx_xxpermx, "V16UcV16UcV16UcV16UcIi", "",
+ "power10-vector")
// P10 Vector Blend built-ins.
-BUILTIN(__builtin_vsx_xxblendvb, "V16UcV16UcV16UcV16Uc", "")
-BUILTIN(__builtin_vsx_xxblendvh, "V8UsV8UsV8UsV8Us", "")
-BUILTIN(__builtin_vsx_xxblendvw, "V4UiV4UiV4UiV4Ui", "")
-BUILTIN(__builtin_vsx_xxblendvd, "V2ULLiV2ULLiV2ULLiV2ULLi", "")
+TARGET_BUILTIN(__builtin_vsx_xxblendvb, "V16UcV16UcV16UcV16Uc", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_vsx_xxblendvh, "V8UsV8UsV8UsV8Us", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_vsx_xxblendvw, "V4UiV4UiV4UiV4Ui", "",
+ "power10-vector")
+TARGET_BUILTIN(__builtin_vsx_xxblendvd, "V2ULLiV2ULLiV2ULLiV2ULLi", "",
+ "power10-vector")
// Float 128 built-ins
-BUILTIN(__builtin_sqrtf128_round_to_odd, "LLdLLd", "")
-BUILTIN(__builtin_addf128_round_to_odd, "LLdLLdLLd", "")
-BUILTIN(__builtin_subf128_round_to_odd, "LLdLLdLLd", "")
-BUILTIN(__builtin_mulf128_round_to_odd, "LLdLLdLLd", "")
-BUILTIN(__builtin_divf128_round_to_odd, "LLdLLdLLd", "")
-BUILTIN(__builtin_fmaf128_round_to_odd, "LLdLLdLLdLLd", "")
-BUILTIN(__builtin_truncf128_round_to_odd, "dLLd", "")
-BUILTIN(__builtin_vsx_scalar_extract_expq, "ULLiLLd", "")
-BUILTIN(__builtin_vsx_scalar_insert_exp_qp, "LLdLLdULLi", "")
+TARGET_BUILTIN(__builtin_sqrtf128_round_to_odd, "LLdLLd", "", "float128")
+TARGET_BUILTIN(__builtin_addf128_round_to_odd, "LLdLLdLLd", "", "float128")
+TARGET_BUILTIN(__builtin_subf128_round_to_odd, "LLdLLdLLd", "", "float128")
+TARGET_BUILTIN(__builtin_mulf128_round_to_odd, "LLdLLdLLd", "", "float128")
+TARGET_BUILTIN(__builtin_divf128_round_to_odd, "LLdLLdLLd", "", "float128")
+TARGET_BUILTIN(__builtin_fmaf128_round_to_odd, "LLdLLdLLdLLd", "", "float128")
+TARGET_BUILTIN(__builtin_truncf128_round_to_odd, "dLLd", "", "float128")
+TARGET_BUILTIN(__builtin_vsx_scalar_extract_expq, "ULLiLLd", "", "float128")
+TARGET_BUILTIN(__builtin_vsx_scalar_insert_exp_qp, "LLdLLdULLi", "", "float128")
// Fastmath by default builtins
BUILTIN(__builtin_ppc_rsqrtf, "V4fV4f", "")
@@ -729,56 +890,60 @@ BUILTIN(__builtin_ppc_recipdivf, "V4fV4fV4f", "")
BUILTIN(__builtin_ppc_recipdivd, "V2dV2dV2d", "")
// HTM builtins
-BUILTIN(__builtin_tbegin, "UiUIi", "")
-BUILTIN(__builtin_tend, "UiUIi", "")
+TARGET_BUILTIN(__builtin_tbegin, "UiUIi", "", "htm")
+TARGET_BUILTIN(__builtin_tend, "UiUIi", "", "htm")
-BUILTIN(__builtin_tabort, "UiUi", "")
-BUILTIN(__builtin_tabortdc, "UiUiUiUi", "")
-BUILTIN(__builtin_tabortdci, "UiUiUii", "")
-BUILTIN(__builtin_tabortwc, "UiUiUiUi", "")
-BUILTIN(__builtin_tabortwci, "UiUiUii", "")
+TARGET_BUILTIN(__builtin_tabort, "UiUi", "", "htm")
+TARGET_BUILTIN(__builtin_tabortdc, "UiUiUiUi", "", "htm")
+TARGET_BUILTIN(__builtin_tabortdci, "UiUiUii", "", "htm")
+TARGET_BUILTIN(__builtin_tabortwc, "UiUiUiUi", "", "htm")
+TARGET_BUILTIN(__builtin_tabortwci, "UiUiUii", "", "htm")
-BUILTIN(__builtin_tcheck, "Ui", "")
-BUILTIN(__builtin_treclaim, "UiUi", "")
-BUILTIN(__builtin_trechkpt, "Ui", "")
-BUILTIN(__builtin_tsr, "UiUi", "")
+TARGET_BUILTIN(__builtin_tcheck, "Ui", "", "htm")
+TARGET_BUILTIN(__builtin_treclaim, "UiUi", "", "htm")
+TARGET_BUILTIN(__builtin_trechkpt, "Ui", "", "htm")
+TARGET_BUILTIN(__builtin_tsr, "UiUi", "", "htm")
-BUILTIN(__builtin_tendall, "Ui", "")
-BUILTIN(__builtin_tresume, "Ui", "")
-BUILTIN(__builtin_tsuspend, "Ui", "")
+TARGET_BUILTIN(__builtin_tendall, "Ui", "", "htm")
+TARGET_BUILTIN(__builtin_tresume, "Ui", "", "htm")
+TARGET_BUILTIN(__builtin_tsuspend, "Ui", "", "htm")
-BUILTIN(__builtin_get_texasr, "LUi", "c")
-BUILTIN(__builtin_get_texasru, "LUi", "c")
-BUILTIN(__builtin_get_tfhar, "LUi", "c")
-BUILTIN(__builtin_get_tfiar, "LUi", "c")
+TARGET_BUILTIN(__builtin_get_texasr, "LUi", "c", "htm")
+TARGET_BUILTIN(__builtin_get_texasru, "LUi", "c", "htm")
+TARGET_BUILTIN(__builtin_get_tfhar, "LUi", "c", "htm")
+TARGET_BUILTIN(__builtin_get_tfiar, "LUi", "c", "htm")
-BUILTIN(__builtin_set_texasr, "vLUi", "c")
-BUILTIN(__builtin_set_texasru, "vLUi", "c")
-BUILTIN(__builtin_set_tfhar, "vLUi", "c")
-BUILTIN(__builtin_set_tfiar, "vLUi", "c")
+TARGET_BUILTIN(__builtin_set_texasr, "vLUi", "c", "htm")
+TARGET_BUILTIN(__builtin_set_texasru, "vLUi", "c", "htm")
+TARGET_BUILTIN(__builtin_set_tfhar, "vLUi", "c", "htm")
+TARGET_BUILTIN(__builtin_set_tfiar, "vLUi", "c", "htm")
-BUILTIN(__builtin_ttest, "LUi", "")
+TARGET_BUILTIN(__builtin_ttest, "LUi", "", "htm")
// Scalar built-ins
-BUILTIN(__builtin_divwe, "SiSiSi", "")
-BUILTIN(__builtin_divweu, "UiUiUi", "")
-BUILTIN(__builtin_divde, "SLLiSLLiSLLi", "")
-BUILTIN(__builtin_divdeu, "ULLiULLiULLi", "")
-BUILTIN(__builtin_bpermd, "SLLiSLLiSLLi", "")
-BUILTIN(__builtin_pdepd, "ULLiULLiULLi", "")
-BUILTIN(__builtin_pextd, "ULLiULLiULLi", "")
-BUILTIN(__builtin_cfuged, "ULLiULLiULLi", "")
-BUILTIN(__builtin_cntlzdm, "ULLiULLiULLi", "")
-BUILTIN(__builtin_cnttzdm, "ULLiULLiULLi", "")
+TARGET_BUILTIN(__builtin_divwe, "SiSiSi", "", "extdiv")
+TARGET_BUILTIN(__builtin_divweu, "UiUiUi", "", "extdiv")
+TARGET_BUILTIN(__builtin_divde, "SLLiSLLiSLLi", "", "extdiv")
+TARGET_BUILTIN(__builtin_divdeu, "ULLiULLiULLi", "", "extdiv")
+TARGET_BUILTIN(__builtin_bpermd, "SLLiSLLiSLLi", "", "bpermd")
+TARGET_BUILTIN(__builtin_pdepd, "ULLiULLiULLi", "", "isa-v31-instructions")
+TARGET_BUILTIN(__builtin_pextd, "ULLiULLiULLi", "", "isa-v31-instructions")
+TARGET_BUILTIN(__builtin_cfuged, "ULLiULLiULLi", "", "isa-v31-instructions")
+TARGET_BUILTIN(__builtin_cntlzdm, "ULLiULLiULLi", "", "isa-v31-instructions")
+TARGET_BUILTIN(__builtin_cnttzdm, "ULLiULLiULLi", "", "isa-v31-instructions")
+
+// Double-double (un)pack
+BUILTIN(__builtin_unpack_longdouble, "dLdIi", "")
+BUILTIN(__builtin_pack_longdouble, "Lddd", "")
// Generate random number
-BUILTIN(__builtin_darn, "LLi", "")
-BUILTIN(__builtin_darn_raw, "LLi", "")
-BUILTIN(__builtin_darn_32, "i", "")
+TARGET_BUILTIN(__builtin_darn, "LLi", "", "isa-v30-instructions")
+TARGET_BUILTIN(__builtin_darn_raw, "LLi", "", "isa-v30-instructions")
+TARGET_BUILTIN(__builtin_darn_32, "i", "", "isa-v30-instructions")
// Vector int128 (un)pack
-BUILTIN(__builtin_unpack_vector_int128, "ULLiV1LLLii", "")
-BUILTIN(__builtin_pack_vector_int128, "V1LLLiULLiULLi", "")
+TARGET_BUILTIN(__builtin_unpack_vector_int128, "ULLiV1LLLii", "", "vsx")
+TARGET_BUILTIN(__builtin_pack_vector_int128, "V1LLLiULLiULLi", "", "vsx")
// Set the floating point rounding mode
BUILTIN(__builtin_setrnd, "di", "")
@@ -812,84 +977,159 @@ BUILTIN(__builtin_dcbf, "vvC*", "")
// its given accumulator.
// Provided builtins with _mma_ prefix for compatibility.
-CUSTOM_BUILTIN(mma_lxvp, vsx_lxvp, "W256SLLiW256C*", false)
-CUSTOM_BUILTIN(mma_stxvp, vsx_stxvp, "vW256SLLiW256C*", false)
-CUSTOM_BUILTIN(mma_assemble_pair, vsx_assemble_pair, "vW256*VV", false)
-CUSTOM_BUILTIN(mma_disassemble_pair, vsx_disassemble_pair, "vv*W256*", false)
+CUSTOM_BUILTIN(mma_lxvp, vsx_lxvp, "W256SLiW256C*", false,
+ "paired-vector-memops")
+CUSTOM_BUILTIN(mma_stxvp, vsx_stxvp, "vW256SLiW256*", false,
+ "paired-vector-memops")
+CUSTOM_BUILTIN(mma_assemble_pair, vsx_assemble_pair, "vW256*VV", false,
+ "paired-vector-memops")
+CUSTOM_BUILTIN(mma_disassemble_pair, vsx_disassemble_pair, "vv*W256*", false,
+ "paired-vector-memops")
+CUSTOM_BUILTIN(vsx_build_pair, vsx_assemble_pair, "vW256*VV", false,
+ "paired-vector-memops")
+CUSTOM_BUILTIN(mma_build_acc, mma_assemble_acc, "vW512*VVVV", false, "mma")
// UNALIASED_CUSTOM_BUILTIN macro is used for built-ins that have
// the same name as that of the intrinsic they generate, i.e. the
// ID and INTR are the same.
// This avoids repeating the ID and INTR in the macro expression.
-UNALIASED_CUSTOM_BUILTIN(vsx_lxvp, "W256SLLiW256C*", false)
-UNALIASED_CUSTOM_BUILTIN(vsx_stxvp, "vW256SLLiW256C*", false)
-UNALIASED_CUSTOM_BUILTIN(vsx_assemble_pair, "vW256*VV", false)
-UNALIASED_CUSTOM_BUILTIN(vsx_disassemble_pair, "vv*W256*", false)
-
-UNALIASED_CUSTOM_BUILTIN(mma_assemble_acc, "vW512*VVVV", false)
-UNALIASED_CUSTOM_BUILTIN(mma_disassemble_acc, "vv*W512*", false)
-UNALIASED_CUSTOM_BUILTIN(mma_xxmtacc, "vW512*", true)
-UNALIASED_CUSTOM_BUILTIN(mma_xxmfacc, "vW512*", true)
-UNALIASED_CUSTOM_BUILTIN(mma_xxsetaccz, "vW512*", false)
-UNALIASED_CUSTOM_BUILTIN(mma_xvi4ger8, "vW512*VV", false)
-UNALIASED_CUSTOM_BUILTIN(mma_xvi8ger4, "vW512*VV", false)
-UNALIASED_CUSTOM_BUILTIN(mma_xvi16ger2, "vW512*VV", false)
-UNALIASED_CUSTOM_BUILTIN(mma_xvi16ger2s, "vW512*VV", false)
-UNALIASED_CUSTOM_BUILTIN(mma_xvf16ger2, "vW512*VV", false)
-UNALIASED_CUSTOM_BUILTIN(mma_xvf32ger, "vW512*VV", false)
-UNALIASED_CUSTOM_BUILTIN(mma_xvf64ger, "vW512*W256V", false)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvi4ger8, "vW512*VVi15i15i255", false)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvi8ger4, "vW512*VVi15i15i15", false)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvi16ger2, "vW512*VVi15i15i3", false)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvi16ger2s, "vW512*VVi15i15i3", false)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvf16ger2, "vW512*VVi15i15i3", false)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvf32ger, "vW512*VVi15i15", false)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvf64ger, "vW512*W256Vi15i3", false)
-UNALIASED_CUSTOM_BUILTIN(mma_xvi4ger8pp, "vW512*VV", true)
-UNALIASED_CUSTOM_BUILTIN(mma_xvi8ger4pp, "vW512*VV", true)
-UNALIASED_CUSTOM_BUILTIN(mma_xvi8ger4spp, "vW512*VV", true)
-UNALIASED_CUSTOM_BUILTIN(mma_xvi16ger2pp, "vW512*VV", true)
-UNALIASED_CUSTOM_BUILTIN(mma_xvi16ger2spp, "vW512*VV", true)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvi4ger8pp, "vW512*VVi15i15i255", true)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvi8ger4pp, "vW512*VVi15i15i15", true)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvi8ger4spp, "vW512*VVi15i15i15", true)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvi16ger2pp, "vW512*VVi15i15i3", true)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvi16ger2spp, "vW512*VVi15i15i3", true)
-UNALIASED_CUSTOM_BUILTIN(mma_xvf16ger2pp, "vW512*VV", true)
-UNALIASED_CUSTOM_BUILTIN(mma_xvf16ger2pn, "vW512*VV", true)
-UNALIASED_CUSTOM_BUILTIN(mma_xvf16ger2np, "vW512*VV", true)
-UNALIASED_CUSTOM_BUILTIN(mma_xvf16ger2nn, "vW512*VV", true)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvf16ger2pp, "vW512*VVi15i15i3", true)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvf16ger2pn, "vW512*VVi15i15i3", true)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvf16ger2np, "vW512*VVi15i15i3", true)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvf16ger2nn, "vW512*VVi15i15i3", true)
-UNALIASED_CUSTOM_BUILTIN(mma_xvf32gerpp, "vW512*VV", true)
-UNALIASED_CUSTOM_BUILTIN(mma_xvf32gerpn, "vW512*VV", true)
-UNALIASED_CUSTOM_BUILTIN(mma_xvf32gernp, "vW512*VV", true)
-UNALIASED_CUSTOM_BUILTIN(mma_xvf32gernn, "vW512*VV", true)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvf32gerpp, "vW512*VVi15i15", true)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvf32gerpn, "vW512*VVi15i15", true)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvf32gernp, "vW512*VVi15i15", true)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvf32gernn, "vW512*VVi15i15", true)
-UNALIASED_CUSTOM_BUILTIN(mma_xvf64gerpp, "vW512*W256V", true)
-UNALIASED_CUSTOM_BUILTIN(mma_xvf64gerpn, "vW512*W256V", true)
-UNALIASED_CUSTOM_BUILTIN(mma_xvf64gernp, "vW512*W256V", true)
-UNALIASED_CUSTOM_BUILTIN(mma_xvf64gernn, "vW512*W256V", true)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvf64gerpp, "vW512*W256Vi15i3", true)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvf64gerpn, "vW512*W256Vi15i3", true)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvf64gernp, "vW512*W256Vi15i3", true)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvf64gernn, "vW512*W256Vi15i3", true)
-UNALIASED_CUSTOM_BUILTIN(mma_xvbf16ger2, "vW512*VV", false)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvbf16ger2, "vW512*VVi15i15i3", false)
-UNALIASED_CUSTOM_BUILTIN(mma_xvbf16ger2pp, "vW512*VV", true)
-UNALIASED_CUSTOM_BUILTIN(mma_xvbf16ger2pn, "vW512*VV", true)
-UNALIASED_CUSTOM_BUILTIN(mma_xvbf16ger2np, "vW512*VV", true)
-UNALIASED_CUSTOM_BUILTIN(mma_xvbf16ger2nn, "vW512*VV", true)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvbf16ger2pp, "vW512*VVi15i15i3", true)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvbf16ger2pn, "vW512*VVi15i15i3", true)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvbf16ger2np, "vW512*VVi15i15i3", true)
-UNALIASED_CUSTOM_BUILTIN(mma_pmxvbf16ger2nn, "vW512*VVi15i15i3", true)
+UNALIASED_CUSTOM_BUILTIN(vsx_lxvp, "W256SLiW256C*", false,
+ "paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(vsx_stxvp, "vW256SLiW256*", false,
+ "paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(vsx_assemble_pair, "vW256*VV", false,
+ "paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(vsx_disassemble_pair, "vv*W256*", false,
+ "paired-vector-memops")
+
+// TODO: Require only mma after backend supports these without paired memops
+UNALIASED_CUSTOM_BUILTIN(mma_assemble_acc, "vW512*VVVV", false,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_disassemble_acc, "vv*W512*", false,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xxmtacc, "vW512*", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xxmfacc, "vW512*", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xxsetaccz, "vW512*", false,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvi4ger8, "vW512*VV", false,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvi8ger4, "vW512*VV", false,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvi16ger2, "vW512*VV", false,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvi16ger2s, "vW512*VV", false,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvf16ger2, "vW512*VV", false,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvf32ger, "vW512*VV", false,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvf64ger, "vW512*W256V", false,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvi4ger8, "vW512*VVi15i15i255", false,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvi8ger4, "vW512*VVi15i15i15", false,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvi16ger2, "vW512*VVi15i15i3", false,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvi16ger2s, "vW512*VVi15i15i3", false,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvf16ger2, "vW512*VVi15i15i3", false,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvf32ger, "vW512*VVi15i15", false,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvf64ger, "vW512*W256Vi15i3", false,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvi4ger8pp, "vW512*VV", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvi8ger4pp, "vW512*VV", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvi8ger4spp, "vW512*VV", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvi16ger2pp, "vW512*VV", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvi16ger2spp, "vW512*VV", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvi4ger8pp, "vW512*VVi15i15i255", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvi8ger4pp, "vW512*VVi15i15i15", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvi8ger4spp, "vW512*VVi15i15i15", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvi16ger2pp, "vW512*VVi15i15i3", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvi16ger2spp, "vW512*VVi15i15i3", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvf16ger2pp, "vW512*VV", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvf16ger2pn, "vW512*VV", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvf16ger2np, "vW512*VV", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvf16ger2nn, "vW512*VV", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvf16ger2pp, "vW512*VVi15i15i3", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvf16ger2pn, "vW512*VVi15i15i3", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvf16ger2np, "vW512*VVi15i15i3", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvf16ger2nn, "vW512*VVi15i15i3", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvf32gerpp, "vW512*VV", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvf32gerpn, "vW512*VV", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvf32gernp, "vW512*VV", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvf32gernn, "vW512*VV", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvf32gerpp, "vW512*VVi15i15", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvf32gerpn, "vW512*VVi15i15", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvf32gernp, "vW512*VVi15i15", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvf32gernn, "vW512*VVi15i15", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvf64gerpp, "vW512*W256V", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvf64gerpn, "vW512*W256V", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvf64gernp, "vW512*W256V", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvf64gernn, "vW512*W256V", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvf64gerpp, "vW512*W256Vi15i3", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvf64gerpn, "vW512*W256Vi15i3", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvf64gernp, "vW512*W256Vi15i3", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvf64gernn, "vW512*W256Vi15i3", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvbf16ger2, "vW512*VV", false,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvbf16ger2, "vW512*VVi15i15i3", false,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvbf16ger2pp, "vW512*VV", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvbf16ger2pn, "vW512*VV", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvbf16ger2np, "vW512*VV", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_xvbf16ger2nn, "vW512*VV", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvbf16ger2pp, "vW512*VVi15i15i3", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvbf16ger2pn, "vW512*VVi15i15i3", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvbf16ger2np, "vW512*VVi15i15i3", true,
+ "mma,paired-vector-memops")
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvbf16ger2nn, "vW512*VVi15i15i3", true,
+ "mma,paired-vector-memops")
// FIXME: Obviously incomplete.
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsRISCV.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsRISCV.def
index b2b4950f92bd..1528b18c82ea 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsRISCV.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsRISCV.def
@@ -15,49 +15,79 @@
# define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) BUILTIN(ID, TYPE, ATTRS)
#endif
-#include "clang/Basic/riscv_vector_builtins.inc"
-
// Zbb extension
-TARGET_BUILTIN(__builtin_riscv_orc_b_32, "ZiZi", "nc", "experimental-zbb")
-TARGET_BUILTIN(__builtin_riscv_orc_b_64, "WiWi", "nc", "experimental-zbb,64bit")
-
-// Zbc extension
-TARGET_BUILTIN(__builtin_riscv_clmul, "LiLiLi", "nc", "experimental-zbc")
-TARGET_BUILTIN(__builtin_riscv_clmulh, "LiLiLi", "nc", "experimental-zbc")
-TARGET_BUILTIN(__builtin_riscv_clmulr, "LiLiLi", "nc", "experimental-zbc")
-
-// Zbe extension
-TARGET_BUILTIN(__builtin_riscv_bcompress_32, "ZiZiZi", "nc", "experimental-zbe")
-TARGET_BUILTIN(__builtin_riscv_bcompress_64, "WiWiWi", "nc",
- "experimental-zbe,64bit")
-TARGET_BUILTIN(__builtin_riscv_bdecompress_32, "ZiZiZi", "nc",
- "experimental-zbe")
-TARGET_BUILTIN(__builtin_riscv_bdecompress_64, "WiWiWi", "nc",
- "experimental-zbe,64bit")
-
-// Zbp extension
-TARGET_BUILTIN(__builtin_riscv_grev_32, "ZiZiZi", "nc", "experimental-zbp")
-TARGET_BUILTIN(__builtin_riscv_grev_64, "WiWiWi", "nc", "experimental-zbp,64bit")
-TARGET_BUILTIN(__builtin_riscv_gorc_32, "ZiZiZi", "nc", "experimental-zbp")
-TARGET_BUILTIN(__builtin_riscv_gorc_64, "WiWiWi", "nc", "experimental-zbp,64bit")
-TARGET_BUILTIN(__builtin_riscv_shfl_32, "ZiZiZi", "nc", "experimental-zbp")
-TARGET_BUILTIN(__builtin_riscv_shfl_64, "WiWiWi", "nc", "experimental-zbp,64bit")
-TARGET_BUILTIN(__builtin_riscv_unshfl_32, "ZiZiZi", "nc", "experimental-zbp")
-TARGET_BUILTIN(__builtin_riscv_unshfl_64, "WiWiWi", "nc", "experimental-zbp,64bit")
-TARGET_BUILTIN(__builtin_riscv_xperm_n, "LiLiLi", "nc", "experimental-zbp")
-TARGET_BUILTIN(__builtin_riscv_xperm_b, "LiLiLi", "nc", "experimental-zbp")
-TARGET_BUILTIN(__builtin_riscv_xperm_h, "LiLiLi", "nc", "experimental-zbp")
-TARGET_BUILTIN(__builtin_riscv_xperm_w, "WiWiWi", "nc", "experimental-zbp,64bit")
-
-// Zbr extension
-TARGET_BUILTIN(__builtin_riscv_crc32_b, "LiLi", "nc", "experimental-zbr")
-TARGET_BUILTIN(__builtin_riscv_crc32_h, "LiLi", "nc", "experimental-zbr")
-TARGET_BUILTIN(__builtin_riscv_crc32_w, "LiLi", "nc", "experimental-zbr")
-TARGET_BUILTIN(__builtin_riscv_crc32c_b, "LiLi", "nc", "experimental-zbr")
-TARGET_BUILTIN(__builtin_riscv_crc32c_h, "LiLi", "nc", "experimental-zbr")
-TARGET_BUILTIN(__builtin_riscv_crc32c_w, "LiLi", "nc", "experimental-zbr")
-TARGET_BUILTIN(__builtin_riscv_crc32_d, "LiLi", "nc", "experimental-zbr")
-TARGET_BUILTIN(__builtin_riscv_crc32c_d, "LiLi", "nc", "experimental-zbr")
+TARGET_BUILTIN(__builtin_riscv_orc_b_32, "UiUi", "nc", "zbb")
+TARGET_BUILTIN(__builtin_riscv_orc_b_64, "UWiUWi", "nc", "zbb,64bit")
+TARGET_BUILTIN(__builtin_riscv_clz_32, "UiUi", "nc", "zbb|xtheadbb")
+TARGET_BUILTIN(__builtin_riscv_clz_64, "UiUWi", "nc", "zbb|xtheadbb,64bit")
+TARGET_BUILTIN(__builtin_riscv_ctz_32, "UiUi", "nc", "zbb")
+TARGET_BUILTIN(__builtin_riscv_ctz_64, "UiUWi", "nc", "zbb,64bit")
+
+// Zbc or Zbkc extension
+TARGET_BUILTIN(__builtin_riscv_clmul_32, "UiUiUi", "nc", "zbc|zbkc")
+TARGET_BUILTIN(__builtin_riscv_clmul_64, "UWiUWiUWi", "nc", "zbc|zbkc,64bit")
+TARGET_BUILTIN(__builtin_riscv_clmulh_32, "UiUiUi", "nc", "zbc|zbkc,32bit")
+TARGET_BUILTIN(__builtin_riscv_clmulh_64, "UWiUWiUWi", "nc", "zbc|zbkc,64bit")
+TARGET_BUILTIN(__builtin_riscv_clmulr_32, "UiUiUi", "nc", "zbc,32bit")
+TARGET_BUILTIN(__builtin_riscv_clmulr_64, "UWiUWiUWi", "nc", "zbc,64bit")
+
+// Zbkx
+TARGET_BUILTIN(__builtin_riscv_xperm4_32, "UiUiUi", "nc", "zbkx,32bit")
+TARGET_BUILTIN(__builtin_riscv_xperm4_64, "UWiUWiUWi", "nc", "zbkx,64bit")
+TARGET_BUILTIN(__builtin_riscv_xperm8_32, "UiUiUi", "nc", "zbkx,32bit")
+TARGET_BUILTIN(__builtin_riscv_xperm8_64, "UWiUWiUWi", "nc", "zbkx,64bit")
+
+// Zbkb extension
+TARGET_BUILTIN(__builtin_riscv_brev8_32, "UiUi", "nc", "zbkb")
+TARGET_BUILTIN(__builtin_riscv_brev8_64, "UWiUWi", "nc", "zbkb,64bit")
+TARGET_BUILTIN(__builtin_riscv_zip_32, "UiUi", "nc", "zbkb,32bit")
+TARGET_BUILTIN(__builtin_riscv_unzip_32, "UiUi", "nc", "zbkb,32bit")
+
+// Zknd extension
+TARGET_BUILTIN(__builtin_riscv_aes32dsi, "UiUiUiIUi", "nc", "zknd,32bit")
+TARGET_BUILTIN(__builtin_riscv_aes32dsmi, "UiUiUiIUi", "nc", "zknd,32bit")
+TARGET_BUILTIN(__builtin_riscv_aes64ds, "UWiUWiUWi", "nc", "zknd,64bit")
+TARGET_BUILTIN(__builtin_riscv_aes64dsm, "UWiUWiUWi", "nc", "zknd,64bit")
+TARGET_BUILTIN(__builtin_riscv_aes64im, "UWiUWi", "nc", "zknd,64bit")
+
+// Zknd & Zkne
+TARGET_BUILTIN(__builtin_riscv_aes64ks1i, "UWiUWiIUi", "nc", "zknd|zkne,64bit")
+TARGET_BUILTIN(__builtin_riscv_aes64ks2, "UWiUWiUWi", "nc", "zknd|zkne,64bit")
+
+// Zkne extension
+TARGET_BUILTIN(__builtin_riscv_aes32esi, "UiUiUiIUi", "nc", "zkne,32bit")
+TARGET_BUILTIN(__builtin_riscv_aes32esmi, "UiUiUiIUi", "nc", "zkne,32bit")
+TARGET_BUILTIN(__builtin_riscv_aes64es, "UWiUWiUWi", "nc", "zkne,64bit")
+TARGET_BUILTIN(__builtin_riscv_aes64esm, "UWiUWiUWi", "nc", "zkne,64bit")
+
+// Zknh extension
+TARGET_BUILTIN(__builtin_riscv_sha256sig0, "UiUi", "nc", "zknh")
+TARGET_BUILTIN(__builtin_riscv_sha256sig1, "UiUi", "nc", "zknh")
+TARGET_BUILTIN(__builtin_riscv_sha256sum0, "UiUi", "nc", "zknh")
+TARGET_BUILTIN(__builtin_riscv_sha256sum1, "UiUi", "nc", "zknh")
+
+TARGET_BUILTIN(__builtin_riscv_sha512sig0h, "UiUiUi", "nc", "zknh,32bit")
+TARGET_BUILTIN(__builtin_riscv_sha512sig0l, "UiUiUi", "nc", "zknh,32bit")
+TARGET_BUILTIN(__builtin_riscv_sha512sig1h, "UiUiUi", "nc", "zknh,32bit")
+TARGET_BUILTIN(__builtin_riscv_sha512sig1l, "UiUiUi", "nc", "zknh,32bit")
+TARGET_BUILTIN(__builtin_riscv_sha512sum0r, "UiUiUi", "nc", "zknh,32bit")
+TARGET_BUILTIN(__builtin_riscv_sha512sum1r, "UiUiUi", "nc", "zknh,32bit")
+TARGET_BUILTIN(__builtin_riscv_sha512sig0, "UWiUWi", "nc", "zknh,64bit")
+TARGET_BUILTIN(__builtin_riscv_sha512sig1, "UWiUWi", "nc", "zknh,64bit")
+TARGET_BUILTIN(__builtin_riscv_sha512sum0, "UWiUWi", "nc", "zknh,64bit")
+TARGET_BUILTIN(__builtin_riscv_sha512sum1, "UWiUWi", "nc", "zknh,64bit")
+
+// Zksed extension
+TARGET_BUILTIN(__builtin_riscv_sm4ed, "UiUiUiIUi", "nc", "zksed")
+TARGET_BUILTIN(__builtin_riscv_sm4ks, "UiUiUiIUi", "nc", "zksed")
+
+// Zksh extension
+TARGET_BUILTIN(__builtin_riscv_sm3p0, "UiUi", "nc", "zksh")
+TARGET_BUILTIN(__builtin_riscv_sm3p1, "UiUi", "nc", "zksh")
+
+// Zihintntl extension
+TARGET_BUILTIN(__builtin_riscv_ntl_load, "v.", "t", "zihintntl")
+TARGET_BUILTIN(__builtin_riscv_ntl_store, "v.", "t", "zihintntl")
#undef BUILTIN
#undef TARGET_BUILTIN
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsRISCVVector.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsRISCVVector.def
new file mode 100644
index 000000000000..6dfa87a1a1d3
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsRISCVVector.def
@@ -0,0 +1,22 @@
+//==- BuiltinsRISCVVector.def - RISC-V Vector Builtin Database ---*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the RISC-V-specific builtin function database. Users of
+// this file must define the BUILTIN macro to make use of this information.
+//
+//===----------------------------------------------------------------------===//
+
+#if defined(BUILTIN) && !defined(TARGET_BUILTIN)
+# define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) BUILTIN(ID, TYPE, ATTRS)
+#endif
+
+#include "clang/Basic/riscv_vector_builtins.inc"
+#include "clang/Basic/riscv_sifive_vector_builtins.inc"
+
+#undef BUILTIN
+#undef TARGET_BUILTIN
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsSME.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsSME.def
new file mode 100644
index 000000000000..180ee20295cc
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsSME.def
@@ -0,0 +1,21 @@
+//===--- BuiltinsSME.def - SME Builtin function database --------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the SME-specific builtin function database. Users of
+// this file must define the BUILTIN macro to make use of this information.
+//
+//===----------------------------------------------------------------------===//
+
+// The format of this database matches clang/Basic/Builtins.def.
+
+#define GET_SME_BUILTINS
+#include "clang/Basic/arm_sme_builtins.inc"
+#undef GET_SME_BUILTINS
+
+#undef BUILTIN
+#undef TARGET_BUILTIN
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsSVE.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsSVE.def
index 2839ca992d98..a83f1c8f82dd 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsSVE.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsSVE.def
@@ -15,6 +15,8 @@
#define GET_SVE_BUILTINS
#include "clang/Basic/arm_sve_builtins.inc"
+#include "clang/Basic/BuiltinsAArch64NeonSVEBridge.def"
#undef GET_SVE_BUILTINS
#undef BUILTIN
+#undef TARGET_BUILTIN
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsSystemZ.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsSystemZ.def
index 079e41136488..f0c0ebfa622a 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsSystemZ.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsSystemZ.def
@@ -64,14 +64,14 @@ TARGET_BUILTIN(__builtin_s390_vupllh, "V4UiV8Us", "nc", "vector")
TARGET_BUILTIN(__builtin_s390_vupllf, "V2ULLiV4Ui", "nc", "vector")
// Vector integer instructions (chapter 22 of the PoP)
-TARGET_BUILTIN(__builtin_s390_vaq, "V16UcV16UcV16Uc", "nc", "vector")
-TARGET_BUILTIN(__builtin_s390_vacq, "V16UcV16UcV16UcV16Uc", "nc", "vector")
+TARGET_BUILTIN(__builtin_s390_vaq, "SLLLiSLLLiSLLLi", "nc", "vector")
+TARGET_BUILTIN(__builtin_s390_vacq, "ULLLiULLLiULLLiULLLi", "nc", "vector")
TARGET_BUILTIN(__builtin_s390_vaccb, "V16UcV16UcV16Uc", "nc", "vector")
TARGET_BUILTIN(__builtin_s390_vacch, "V8UsV8UsV8Us", "nc", "vector")
TARGET_BUILTIN(__builtin_s390_vaccf, "V4UiV4UiV4Ui", "nc", "vector")
TARGET_BUILTIN(__builtin_s390_vaccg, "V2ULLiV2ULLiV2ULLi", "nc", "vector")
-TARGET_BUILTIN(__builtin_s390_vaccq, "V16UcV16UcV16Uc", "nc", "vector")
-TARGET_BUILTIN(__builtin_s390_vacccq, "V16UcV16UcV16UcV16Uc", "nc", "vector")
+TARGET_BUILTIN(__builtin_s390_vaccq, "ULLLiULLLiULLLi", "nc", "vector")
+TARGET_BUILTIN(__builtin_s390_vacccq, "ULLLiULLLiULLLiULLLi", "nc", "vector")
TARGET_BUILTIN(__builtin_s390_vavgb, "V16ScV16ScV16Sc", "nc", "vector")
TARGET_BUILTIN(__builtin_s390_vavgh, "V8SsV8SsV8Ss", "nc", "vector")
TARGET_BUILTIN(__builtin_s390_vavgf, "V4SiV4SiV4Si", "nc", "vector")
@@ -80,10 +80,10 @@ TARGET_BUILTIN(__builtin_s390_vavglb, "V16UcV16UcV16Uc", "nc", "vector")
TARGET_BUILTIN(__builtin_s390_vavglh, "V8UsV8UsV8Us", "nc", "vector")
TARGET_BUILTIN(__builtin_s390_vavglf, "V4UiV4UiV4Ui", "nc", "vector")
TARGET_BUILTIN(__builtin_s390_vavglg, "V2ULLiV2ULLiV2ULLi", "nc", "vector")
-TARGET_BUILTIN(__builtin_s390_vceqbs, "V16ScV16ScV16Sci*", "nc", "vector")
-TARGET_BUILTIN(__builtin_s390_vceqhs, "V8SsV8SsV8Ssi*", "nc", "vector")
-TARGET_BUILTIN(__builtin_s390_vceqfs, "V4SiV4SiV4Sii*", "nc", "vector")
-TARGET_BUILTIN(__builtin_s390_vceqgs, "V2SLLiV2SLLiV2SLLii*", "nc", "vector")
+TARGET_BUILTIN(__builtin_s390_vceqbs, "V16ScV16UcV16Uci*", "nc", "vector")
+TARGET_BUILTIN(__builtin_s390_vceqhs, "V8SsV8UsV8Usi*", "nc", "vector")
+TARGET_BUILTIN(__builtin_s390_vceqfs, "V4SiV4UiV4Uii*", "nc", "vector")
+TARGET_BUILTIN(__builtin_s390_vceqgs, "V2SLLiV2ULLiV2ULLii*", "nc", "vector")
TARGET_BUILTIN(__builtin_s390_vchbs, "V16ScV16ScV16Sci*", "nc", "vector")
TARGET_BUILTIN(__builtin_s390_vchhs, "V8SsV8SsV8Ssi*", "nc", "vector")
TARGET_BUILTIN(__builtin_s390_vchfs, "V4SiV4SiV4Sii*", "nc", "vector")
@@ -105,10 +105,10 @@ TARGET_BUILTIN(__builtin_s390_verimb, "V16UcV16UcV16UcV16UcIi", "nc", "vector")
TARGET_BUILTIN(__builtin_s390_verimh, "V8UsV8UsV8UsV8UsIi", "nc", "vector")
TARGET_BUILTIN(__builtin_s390_verimf, "V4UiV4UiV4UiV4UiIi", "nc", "vector")
TARGET_BUILTIN(__builtin_s390_verimg, "V2ULLiV2ULLiV2ULLiV2ULLiIi", "nc", "vector")
-TARGET_BUILTIN(__builtin_s390_verllb, "V16UcV16UcUi", "nc", "vector")
-TARGET_BUILTIN(__builtin_s390_verllh, "V8UsV8UsUi", "nc", "vector")
-TARGET_BUILTIN(__builtin_s390_verllf, "V4UiV4UiUi", "nc", "vector")
-TARGET_BUILTIN(__builtin_s390_verllg, "V2ULLiV2ULLiUi", "nc", "vector")
+TARGET_BUILTIN(__builtin_s390_verllb, "V16UcV16UcUc", "nc", "vector")
+TARGET_BUILTIN(__builtin_s390_verllh, "V8UsV8UsUc", "nc", "vector")
+TARGET_BUILTIN(__builtin_s390_verllf, "V4UiV4UiUc", "nc", "vector")
+TARGET_BUILTIN(__builtin_s390_verllg, "V2ULLiV2ULLiUc", "nc", "vector")
TARGET_BUILTIN(__builtin_s390_verllvb, "V16UcV16UcV16Uc", "nc", "vector")
TARGET_BUILTIN(__builtin_s390_verllvh, "V8UsV8UsV8Us", "nc", "vector")
TARGET_BUILTIN(__builtin_s390_verllvf, "V4UiV4UiV4Ui", "nc", "vector")
@@ -116,11 +116,11 @@ TARGET_BUILTIN(__builtin_s390_verllvg, "V2ULLiV2ULLiV2ULLi", "nc", "vector")
TARGET_BUILTIN(__builtin_s390_vgfmb, "V8UsV16UcV16Uc", "nc", "vector")
TARGET_BUILTIN(__builtin_s390_vgfmh, "V4UiV8UsV8Us", "nc", "vector")
TARGET_BUILTIN(__builtin_s390_vgfmf, "V2ULLiV4UiV4Ui", "nc", "vector")
-TARGET_BUILTIN(__builtin_s390_vgfmg, "V16UcV2ULLiV2ULLi", "nc", "vector")
+TARGET_BUILTIN(__builtin_s390_vgfmg, "ULLLiV2ULLiV2ULLi", "nc", "vector")
TARGET_BUILTIN(__builtin_s390_vgfmab, "V8UsV16UcV16UcV8Us", "nc", "vector")
TARGET_BUILTIN(__builtin_s390_vgfmah, "V4UiV8UsV8UsV4Ui", "nc", "vector")
TARGET_BUILTIN(__builtin_s390_vgfmaf, "V2ULLiV4UiV4UiV2ULLi", "nc", "vector")
-TARGET_BUILTIN(__builtin_s390_vgfmag, "V16UcV2ULLiV2ULLiV16Uc", "nc", "vector")
+TARGET_BUILTIN(__builtin_s390_vgfmag, "ULLLiV2ULLiV2ULLiULLLi", "nc", "vector")
TARGET_BUILTIN(__builtin_s390_vmahb, "V16ScV16ScV16ScV16Sc", "nc", "vector")
TARGET_BUILTIN(__builtin_s390_vmahh, "V8SsV8SsV8SsV8Ss", "nc", "vector")
TARGET_BUILTIN(__builtin_s390_vmahf, "V4SiV4SiV4SiV4Si", "nc", "vector")
@@ -161,14 +161,14 @@ TARGET_BUILTIN(__builtin_s390_vpopctb, "V16UcV16Uc", "nc", "vector")
TARGET_BUILTIN(__builtin_s390_vpopcth, "V8UsV8Us", "nc", "vector")
TARGET_BUILTIN(__builtin_s390_vpopctf, "V4UiV4Ui", "nc", "vector")
TARGET_BUILTIN(__builtin_s390_vpopctg, "V2ULLiV2ULLi", "nc", "vector")
-TARGET_BUILTIN(__builtin_s390_vsq, "V16UcV16UcV16Uc", "nc", "vector")
-TARGET_BUILTIN(__builtin_s390_vsbcbiq, "V16UcV16UcV16UcV16Uc", "nc", "vector")
-TARGET_BUILTIN(__builtin_s390_vsbiq, "V16UcV16UcV16UcV16Uc", "nc", "vector")
+TARGET_BUILTIN(__builtin_s390_vsq, "SLLLiSLLLiSLLLi", "nc", "vector")
+TARGET_BUILTIN(__builtin_s390_vsbcbiq, "ULLLiULLLiULLLiULLLi", "nc", "vector")
+TARGET_BUILTIN(__builtin_s390_vsbiq, "ULLLiULLLiULLLiULLLi", "nc", "vector")
TARGET_BUILTIN(__builtin_s390_vscbib, "V16UcV16UcV16Uc", "nc", "vector")
TARGET_BUILTIN(__builtin_s390_vscbih, "V8UsV8UsV8Us", "nc", "vector")
TARGET_BUILTIN(__builtin_s390_vscbif, "V4UiV4UiV4Ui", "nc", "vector")
TARGET_BUILTIN(__builtin_s390_vscbig, "V2ULLiV2ULLiV2ULLi", "nc", "vector")
-TARGET_BUILTIN(__builtin_s390_vscbiq, "V16UcV16UcV16Uc", "nc", "vector")
+TARGET_BUILTIN(__builtin_s390_vscbiq, "ULLLiULLLiULLLi", "nc", "vector")
TARGET_BUILTIN(__builtin_s390_vsl, "V16UcV16UcV16Uc", "nc", "vector")
TARGET_BUILTIN(__builtin_s390_vslb, "V16UcV16UcV16Uc", "nc", "vector")
TARGET_BUILTIN(__builtin_s390_vsldb, "V16UcV16UcV16UcIi", "nc", "vector")
@@ -180,8 +180,8 @@ TARGET_BUILTIN(__builtin_s390_vsumb, "V4UiV16UcV16Uc", "nc", "vector")
TARGET_BUILTIN(__builtin_s390_vsumh, "V4UiV8UsV8Us", "nc", "vector")
TARGET_BUILTIN(__builtin_s390_vsumgh, "V2ULLiV8UsV8Us", "nc", "vector")
TARGET_BUILTIN(__builtin_s390_vsumgf, "V2ULLiV4UiV4Ui", "nc", "vector")
-TARGET_BUILTIN(__builtin_s390_vsumqf, "V16UcV4UiV4Ui", "nc", "vector")
-TARGET_BUILTIN(__builtin_s390_vsumqg, "V16UcV2ULLiV2ULLi", "nc", "vector")
+TARGET_BUILTIN(__builtin_s390_vsumqf, "ULLLiV4UiV4Ui", "nc", "vector")
+TARGET_BUILTIN(__builtin_s390_vsumqg, "ULLLiV2ULLiV2ULLi", "nc", "vector")
TARGET_BUILTIN(__builtin_s390_vtm, "iV16UcV16Uc", "nc", "vector")
// Vector string instructions (chapter 23 of the PoP)
@@ -253,10 +253,10 @@ TARGET_BUILTIN(__builtin_s390_vfsqdb, "V2dV2d", "nc", "vector")
TARGET_BUILTIN(__builtin_s390_vftcidb, "V2SLLiV2dIii*", "nc", "vector")
// Vector-enhancements facility 1 intrinsics.
-TARGET_BUILTIN(__builtin_s390_vlrl, "V16ScUivC*", "", "vector-enhancements-1")
-TARGET_BUILTIN(__builtin_s390_vstrl, "vV16ScUiv*", "", "vector-enhancements-1")
+TARGET_BUILTIN(__builtin_s390_vlrlr, "V16ScUivC*", "", "vector-enhancements-1")
+TARGET_BUILTIN(__builtin_s390_vstrlr, "vV16ScUiv*", "", "vector-enhancements-1")
TARGET_BUILTIN(__builtin_s390_vbperm, "V2ULLiV16UcV16Uc", "nc", "vector-enhancements-1")
-TARGET_BUILTIN(__builtin_s390_vmslg, "V16UcV2ULLiV2ULLiV16UcIi", "nc", "vector-enhancements-1")
+TARGET_BUILTIN(__builtin_s390_vmslg, "ULLLiV2ULLiV2ULLiULLLiIi", "nc", "vector-enhancements-1")
TARGET_BUILTIN(__builtin_s390_vfmaxdb, "V2dV2dV2dIi", "nc", "vector-enhancements-1")
TARGET_BUILTIN(__builtin_s390_vfmindb, "V2dV2dV2dIi", "nc", "vector-enhancements-1")
TARGET_BUILTIN(__builtin_s390_vfnmadb, "V2dV2dV2dV2d", "nc", "vector-enhancements-1")
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsVE.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsVE.def
new file mode 100644
index 000000000000..23bfb0e03aa7
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsVE.def
@@ -0,0 +1,32 @@
+//===--- BuiltinsVE.def - VE Builtin function database ----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the VE-specific builtin function database. Users of
+// this file must define the BUILTIN macro to make use of this information.
+//
+//===----------------------------------------------------------------------===//
+
+#if defined(BUILTIN) && !defined(TARGET_BUILTIN)
+# define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) BUILTIN(ID, TYPE, ATTRS)
+#endif
+
+// The format of this database is described in clang/Basic/Builtins.def.
+
+BUILTIN(__builtin_ve_vl_pack_f32p, "ULifC*fC*", "n")
+BUILTIN(__builtin_ve_vl_pack_f32a, "ULifC*", "n")
+
+BUILTIN(__builtin_ve_vl_extract_vm512u, "V256bV512b", "n")
+BUILTIN(__builtin_ve_vl_extract_vm512l, "V256bV512b", "n")
+BUILTIN(__builtin_ve_vl_insert_vm512u, "V512bV512bV256b", "n")
+BUILTIN(__builtin_ve_vl_insert_vm512l, "V512bV512bV256b", "n")
+
+// Use generated BUILTIN definitions
+#include "clang/Basic/BuiltinsVEVL.gen.def"
+
+#undef BUILTIN
+#undef TARGET_BUILTIN
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsVEVL.gen.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsVEVL.gen.def
new file mode 100644
index 000000000000..7b06e5c30e93
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsVEVL.gen.def
@@ -0,0 +1,1257 @@
+BUILTIN(__builtin_ve_vl_vld_vssl, "V256dLUivC*Ui", "n")
+BUILTIN(__builtin_ve_vl_vld_vssvl, "V256dLUivC*V256dUi", "n")
+BUILTIN(__builtin_ve_vl_vldnc_vssl, "V256dLUivC*Ui", "n")
+BUILTIN(__builtin_ve_vl_vldnc_vssvl, "V256dLUivC*V256dUi", "n")
+BUILTIN(__builtin_ve_vl_vldu_vssl, "V256dLUivC*Ui", "n")
+BUILTIN(__builtin_ve_vl_vldu_vssvl, "V256dLUivC*V256dUi", "n")
+BUILTIN(__builtin_ve_vl_vldunc_vssl, "V256dLUivC*Ui", "n")
+BUILTIN(__builtin_ve_vl_vldunc_vssvl, "V256dLUivC*V256dUi", "n")
+BUILTIN(__builtin_ve_vl_vldlsx_vssl, "V256dLUivC*Ui", "n")
+BUILTIN(__builtin_ve_vl_vldlsx_vssvl, "V256dLUivC*V256dUi", "n")
+BUILTIN(__builtin_ve_vl_vldlsxnc_vssl, "V256dLUivC*Ui", "n")
+BUILTIN(__builtin_ve_vl_vldlsxnc_vssvl, "V256dLUivC*V256dUi", "n")
+BUILTIN(__builtin_ve_vl_vldlzx_vssl, "V256dLUivC*Ui", "n")
+BUILTIN(__builtin_ve_vl_vldlzx_vssvl, "V256dLUivC*V256dUi", "n")
+BUILTIN(__builtin_ve_vl_vldlzxnc_vssl, "V256dLUivC*Ui", "n")
+BUILTIN(__builtin_ve_vl_vldlzxnc_vssvl, "V256dLUivC*V256dUi", "n")
+BUILTIN(__builtin_ve_vl_vld2d_vssl, "V256dLUivC*Ui", "n")
+BUILTIN(__builtin_ve_vl_vld2d_vssvl, "V256dLUivC*V256dUi", "n")
+BUILTIN(__builtin_ve_vl_vld2dnc_vssl, "V256dLUivC*Ui", "n")
+BUILTIN(__builtin_ve_vl_vld2dnc_vssvl, "V256dLUivC*V256dUi", "n")
+BUILTIN(__builtin_ve_vl_vldu2d_vssl, "V256dLUivC*Ui", "n")
+BUILTIN(__builtin_ve_vl_vldu2d_vssvl, "V256dLUivC*V256dUi", "n")
+BUILTIN(__builtin_ve_vl_vldu2dnc_vssl, "V256dLUivC*Ui", "n")
+BUILTIN(__builtin_ve_vl_vldu2dnc_vssvl, "V256dLUivC*V256dUi", "n")
+BUILTIN(__builtin_ve_vl_vldl2dsx_vssl, "V256dLUivC*Ui", "n")
+BUILTIN(__builtin_ve_vl_vldl2dsx_vssvl, "V256dLUivC*V256dUi", "n")
+BUILTIN(__builtin_ve_vl_vldl2dsxnc_vssl, "V256dLUivC*Ui", "n")
+BUILTIN(__builtin_ve_vl_vldl2dsxnc_vssvl, "V256dLUivC*V256dUi", "n")
+BUILTIN(__builtin_ve_vl_vldl2dzx_vssl, "V256dLUivC*Ui", "n")
+BUILTIN(__builtin_ve_vl_vldl2dzx_vssvl, "V256dLUivC*V256dUi", "n")
+BUILTIN(__builtin_ve_vl_vldl2dzxnc_vssl, "V256dLUivC*Ui", "n")
+BUILTIN(__builtin_ve_vl_vldl2dzxnc_vssvl, "V256dLUivC*V256dUi", "n")
+BUILTIN(__builtin_ve_vl_vst_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vst_vssml, "vV256dLUiv*V256bUi", "n")
+BUILTIN(__builtin_ve_vl_vstnc_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstnc_vssml, "vV256dLUiv*V256bUi", "n")
+BUILTIN(__builtin_ve_vl_vstot_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstot_vssml, "vV256dLUiv*V256bUi", "n")
+BUILTIN(__builtin_ve_vl_vstncot_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstncot_vssml, "vV256dLUiv*V256bUi", "n")
+BUILTIN(__builtin_ve_vl_vstu_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstu_vssml, "vV256dLUiv*V256bUi", "n")
+BUILTIN(__builtin_ve_vl_vstunc_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstunc_vssml, "vV256dLUiv*V256bUi", "n")
+BUILTIN(__builtin_ve_vl_vstuot_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstuot_vssml, "vV256dLUiv*V256bUi", "n")
+BUILTIN(__builtin_ve_vl_vstuncot_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstuncot_vssml, "vV256dLUiv*V256bUi", "n")
+BUILTIN(__builtin_ve_vl_vstl_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstl_vssml, "vV256dLUiv*V256bUi", "n")
+BUILTIN(__builtin_ve_vl_vstlnc_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstlnc_vssml, "vV256dLUiv*V256bUi", "n")
+BUILTIN(__builtin_ve_vl_vstlot_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstlot_vssml, "vV256dLUiv*V256bUi", "n")
+BUILTIN(__builtin_ve_vl_vstlncot_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstlncot_vssml, "vV256dLUiv*V256bUi", "n")
+BUILTIN(__builtin_ve_vl_vst2d_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vst2d_vssml, "vV256dLUiv*V256bUi", "n")
+BUILTIN(__builtin_ve_vl_vst2dnc_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vst2dnc_vssml, "vV256dLUiv*V256bUi", "n")
+BUILTIN(__builtin_ve_vl_vst2dot_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vst2dot_vssml, "vV256dLUiv*V256bUi", "n")
+BUILTIN(__builtin_ve_vl_vst2dncot_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vst2dncot_vssml, "vV256dLUiv*V256bUi", "n")
+BUILTIN(__builtin_ve_vl_vstu2d_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstu2d_vssml, "vV256dLUiv*V256bUi", "n")
+BUILTIN(__builtin_ve_vl_vstu2dnc_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstu2dnc_vssml, "vV256dLUiv*V256bUi", "n")
+BUILTIN(__builtin_ve_vl_vstu2dot_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstu2dot_vssml, "vV256dLUiv*V256bUi", "n")
+BUILTIN(__builtin_ve_vl_vstu2dncot_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstu2dncot_vssml, "vV256dLUiv*V256bUi", "n")
+BUILTIN(__builtin_ve_vl_vstl2d_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstl2d_vssml, "vV256dLUiv*V256bUi", "n")
+BUILTIN(__builtin_ve_vl_vstl2dnc_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstl2dnc_vssml, "vV256dLUiv*V256bUi", "n")
+BUILTIN(__builtin_ve_vl_vstl2dot_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstl2dot_vssml, "vV256dLUiv*V256bUi", "n")
+BUILTIN(__builtin_ve_vl_vstl2dncot_vssl, "vV256dLUiv*Ui", "n")
+BUILTIN(__builtin_ve_vl_vstl2dncot_vssml, "vV256dLUiv*V256bUi", "n")
+BUILTIN(__builtin_ve_vl_pfchv_ssl, "vLivC*Ui", "n")
+BUILTIN(__builtin_ve_vl_pfchvnc_ssl, "vLivC*Ui", "n")
+BUILTIN(__builtin_ve_vl_lsv_vvss, "V256dV256dUiLUi", "n")
+BUILTIN(__builtin_ve_vl_lvsl_svs, "LUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_lvsd_svs, "dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_lvss_svs, "fV256dUi", "n")
+BUILTIN(__builtin_ve_vl_lvm_mmss, "V256bV256bLUiLUi", "n")
+BUILTIN(__builtin_ve_vl_lvm_MMss, "V512bV512bLUiLUi", "n")
+BUILTIN(__builtin_ve_vl_svm_sms, "LUiV256bLUi", "n")
+BUILTIN(__builtin_ve_vl_svm_sMs, "LUiV512bLUi", "n")
+BUILTIN(__builtin_ve_vl_vbrdd_vsl, "V256ddUi", "n")
+BUILTIN(__builtin_ve_vl_vbrdd_vsvl, "V256ddV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vbrdd_vsmvl, "V256ddV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vbrdl_vsl, "V256dLiUi", "n")
+BUILTIN(__builtin_ve_vl_vbrdl_vsvl, "V256dLiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vbrdl_vsmvl, "V256dLiV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vbrds_vsl, "V256dfUi", "n")
+BUILTIN(__builtin_ve_vl_vbrds_vsvl, "V256dfV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vbrds_vsmvl, "V256dfV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vbrdw_vsl, "V256diUi", "n")
+BUILTIN(__builtin_ve_vl_vbrdw_vsvl, "V256diV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vbrdw_vsmvl, "V256diV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvbrd_vsl, "V256dLUiUi", "n")
+BUILTIN(__builtin_ve_vl_pvbrd_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvbrd_vsMvl, "V256dLUiV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmv_vsvl, "V256dUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmv_vsvvl, "V256dUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmv_vsvmvl, "V256dUiV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vaddul_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vaddul_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vaddul_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vaddul_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vaddul_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vaddul_vsvmvl, "V256dLUiV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vadduw_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vadduw_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vadduw_vsvl, "V256dUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vadduw_vsvvl, "V256dUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vadduw_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vadduw_vsvmvl, "V256dUiV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvaddu_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvaddu_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvaddu_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvaddu_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvaddu_vvvMvl, "V256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvaddu_vsvMvl, "V256dLUiV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vaddswsx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vaddswsx_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vaddswsx_vsvl, "V256diV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vaddswsx_vsvvl, "V256diV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vaddswsx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vaddswsx_vsvmvl, "V256diV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vaddswzx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vaddswzx_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vaddswzx_vsvl, "V256diV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vaddswzx_vsvvl, "V256diV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vaddswzx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vaddswzx_vsvmvl, "V256diV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvadds_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvadds_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvadds_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvadds_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvadds_vvvMvl, "V256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvadds_vsvMvl, "V256dLUiV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vaddsl_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vaddsl_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vaddsl_vsvl, "V256dLiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vaddsl_vsvvl, "V256dLiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vaddsl_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vaddsl_vsvmvl, "V256dLiV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubul_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubul_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubul_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubul_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubul_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubul_vsvmvl, "V256dLUiV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubuw_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubuw_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubuw_vsvl, "V256dUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubuw_vsvvl, "V256dUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubuw_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubuw_vsvmvl, "V256dUiV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsubu_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsubu_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsubu_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsubu_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsubu_vvvMvl, "V256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsubu_vsvMvl, "V256dLUiV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubswsx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubswsx_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubswsx_vsvl, "V256diV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubswsx_vsvvl, "V256diV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubswsx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubswsx_vsvmvl, "V256diV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubswzx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubswzx_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubswzx_vsvl, "V256diV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubswzx_vsvvl, "V256diV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubswzx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubswzx_vsvmvl, "V256diV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsubs_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsubs_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsubs_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsubs_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsubs_vvvMvl, "V256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsubs_vsvMvl, "V256dLUiV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubsl_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubsl_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubsl_vsvl, "V256dLiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubsl_vsvvl, "V256dLiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubsl_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsubsl_vsvmvl, "V256dLiV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulul_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulul_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulul_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulul_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulul_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulul_vsvmvl, "V256dLUiV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmuluw_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmuluw_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmuluw_vsvl, "V256dUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmuluw_vsvvl, "V256dUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmuluw_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmuluw_vsvmvl, "V256dUiV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulswsx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulswsx_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulswsx_vsvl, "V256diV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulswsx_vsvvl, "V256diV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulswsx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulswsx_vsvmvl, "V256diV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulswzx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulswzx_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulswzx_vsvl, "V256diV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulswzx_vsvvl, "V256diV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulswzx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulswzx_vsvmvl, "V256diV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulsl_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulsl_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulsl_vsvl, "V256dLiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulsl_vsvvl, "V256dLiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulsl_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulsl_vsvmvl, "V256dLiV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulslw_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulslw_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulslw_vsvl, "V256diV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmulslw_vsvvl, "V256diV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivul_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivul_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivul_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivul_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivul_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivul_vsvmvl, "V256dLUiV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivuw_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivuw_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivuw_vsvl, "V256dUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivuw_vsvvl, "V256dUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivuw_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivuw_vsvmvl, "V256dUiV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivul_vvsl, "V256dV256dLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vdivul_vvsvl, "V256dV256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivul_vvsmvl, "V256dV256dLUiV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivuw_vvsl, "V256dV256dUiUi", "n")
+BUILTIN(__builtin_ve_vl_vdivuw_vvsvl, "V256dV256dUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivuw_vvsmvl, "V256dV256dUiV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivswsx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivswsx_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivswsx_vsvl, "V256diV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivswsx_vsvvl, "V256diV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivswsx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivswsx_vsvmvl, "V256diV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivswzx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivswzx_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivswzx_vsvl, "V256diV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivswzx_vsvvl, "V256diV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivswzx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivswzx_vsvmvl, "V256diV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivswsx_vvsl, "V256dV256diUi", "n")
+BUILTIN(__builtin_ve_vl_vdivswsx_vvsvl, "V256dV256diV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivswsx_vvsmvl, "V256dV256diV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivswzx_vvsl, "V256dV256diUi", "n")
+BUILTIN(__builtin_ve_vl_vdivswzx_vvsvl, "V256dV256diV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivswzx_vvsmvl, "V256dV256diV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivsl_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivsl_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivsl_vsvl, "V256dLiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivsl_vsvvl, "V256dLiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivsl_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivsl_vsvmvl, "V256dLiV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivsl_vvsl, "V256dV256dLiUi", "n")
+BUILTIN(__builtin_ve_vl_vdivsl_vvsvl, "V256dV256dLiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vdivsl_vvsmvl, "V256dV256dLiV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpul_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpul_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpul_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpul_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpul_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpul_vsvmvl, "V256dLUiV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpuw_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpuw_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpuw_vsvl, "V256dUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpuw_vsvvl, "V256dUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpuw_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpuw_vsvmvl, "V256dUiV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvcmpu_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvcmpu_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvcmpu_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvcmpu_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvcmpu_vvvMvl, "V256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvcmpu_vsvMvl, "V256dLUiV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpswsx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpswsx_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpswsx_vsvl, "V256diV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpswsx_vsvvl, "V256diV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpswsx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpswsx_vsvmvl, "V256diV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpswzx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpswzx_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpswzx_vsvl, "V256diV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpswzx_vsvvl, "V256diV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpswzx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpswzx_vsvmvl, "V256diV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvcmps_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvcmps_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvcmps_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvcmps_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvcmps_vvvMvl, "V256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvcmps_vsvMvl, "V256dLUiV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpsl_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpsl_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpsl_vsvl, "V256dLiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpsl_vsvvl, "V256dLiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpsl_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcmpsl_vsvmvl, "V256dLiV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmaxswsx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmaxswsx_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmaxswsx_vsvl, "V256diV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmaxswsx_vsvvl, "V256diV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmaxswsx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmaxswsx_vsvmvl, "V256diV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmaxswzx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmaxswzx_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmaxswzx_vsvl, "V256diV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmaxswzx_vsvvl, "V256diV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmaxswzx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmaxswzx_vsvmvl, "V256diV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvmaxs_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvmaxs_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvmaxs_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvmaxs_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvmaxs_vvvMvl, "V256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvmaxs_vsvMvl, "V256dLUiV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vminswsx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vminswsx_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vminswsx_vsvl, "V256diV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vminswsx_vsvvl, "V256diV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vminswsx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vminswsx_vsvmvl, "V256diV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vminswzx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vminswzx_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vminswzx_vsvl, "V256diV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vminswzx_vsvvl, "V256diV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vminswzx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vminswzx_vsvmvl, "V256diV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvmins_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvmins_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvmins_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvmins_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvmins_vvvMvl, "V256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvmins_vsvMvl, "V256dLUiV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmaxsl_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmaxsl_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmaxsl_vsvl, "V256dLiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmaxsl_vsvvl, "V256dLiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmaxsl_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmaxsl_vsvmvl, "V256dLiV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vminsl_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vminsl_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vminsl_vsvl, "V256dLiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vminsl_vsvvl, "V256dLiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vminsl_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vminsl_vsvmvl, "V256dLiV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vand_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vand_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vand_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vand_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vand_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vand_vsvmvl, "V256dLUiV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvand_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvand_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvand_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvand_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvand_vvvMvl, "V256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvand_vsvMvl, "V256dLUiV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vor_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vor_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vor_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vor_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vor_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vor_vsvmvl, "V256dLUiV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvor_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvor_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvor_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvor_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvor_vvvMvl, "V256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvor_vsvMvl, "V256dLUiV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vxor_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vxor_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vxor_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vxor_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vxor_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vxor_vsvmvl, "V256dLUiV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvxor_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvxor_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvxor_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvxor_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvxor_vvvMvl, "V256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvxor_vsvMvl, "V256dLUiV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_veqv_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_veqv_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_veqv_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_veqv_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_veqv_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_veqv_vsvmvl, "V256dLUiV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pveqv_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pveqv_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pveqv_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pveqv_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pveqv_vvvMvl, "V256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pveqv_vsvMvl, "V256dLUiV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vldz_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vldz_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vldz_vvmvl, "V256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvldzlo_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvldzlo_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvldzlo_vvmvl, "V256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvldzup_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvldzup_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvldzup_vvmvl, "V256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvldz_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvldz_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvldz_vvMvl, "V256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vpcnt_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vpcnt_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vpcnt_vvmvl, "V256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvpcntlo_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvpcntlo_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvpcntlo_vvmvl, "V256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvpcntup_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvpcntup_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvpcntup_vvmvl, "V256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvpcnt_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvpcnt_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvpcnt_vvMvl, "V256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vbrv_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vbrv_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vbrv_vvmvl, "V256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvbrvlo_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvbrvlo_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvbrvlo_vvmvl, "V256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvbrvup_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvbrvup_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvbrvup_vvmvl, "V256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvbrv_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvbrv_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvbrv_vvMvl, "V256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vseq_vl, "V256dUi", "n")
+BUILTIN(__builtin_ve_vl_vseq_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvseqlo_vl, "V256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvseqlo_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsequp_vl, "V256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsequp_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvseq_vl, "V256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvseq_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsll_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsll_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsll_vvsl, "V256dV256dLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vsll_vvsvl, "V256dV256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsll_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsll_vvsmvl, "V256dV256dLUiV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsll_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsll_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsll_vvsl, "V256dV256dLUiUi", "n")
+BUILTIN(__builtin_ve_vl_pvsll_vvsvl, "V256dV256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsll_vvvMvl, "V256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsll_vvsMvl, "V256dV256dLUiV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsrl_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsrl_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsrl_vvsl, "V256dV256dLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vsrl_vvsvl, "V256dV256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsrl_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsrl_vvsmvl, "V256dV256dLUiV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsrl_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsrl_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsrl_vvsl, "V256dV256dLUiUi", "n")
+BUILTIN(__builtin_ve_vl_pvsrl_vvsvl, "V256dV256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsrl_vvvMvl, "V256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsrl_vvsMvl, "V256dV256dLUiV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vslawsx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vslawsx_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vslawsx_vvsl, "V256dV256diUi", "n")
+BUILTIN(__builtin_ve_vl_vslawsx_vvsvl, "V256dV256diV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vslawsx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vslawsx_vvsmvl, "V256dV256diV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vslawzx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vslawzx_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vslawzx_vvsl, "V256dV256diUi", "n")
+BUILTIN(__builtin_ve_vl_vslawzx_vvsvl, "V256dV256diV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vslawzx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vslawzx_vvsmvl, "V256dV256diV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsla_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsla_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsla_vvsl, "V256dV256dLUiUi", "n")
+BUILTIN(__builtin_ve_vl_pvsla_vvsvl, "V256dV256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsla_vvvMvl, "V256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsla_vvsMvl, "V256dV256dLUiV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vslal_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vslal_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vslal_vvsl, "V256dV256dLiUi", "n")
+BUILTIN(__builtin_ve_vl_vslal_vvsvl, "V256dV256dLiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vslal_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vslal_vvsmvl, "V256dV256dLiV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsrawsx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsrawsx_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsrawsx_vvsl, "V256dV256diUi", "n")
+BUILTIN(__builtin_ve_vl_vsrawsx_vvsvl, "V256dV256diV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsrawsx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsrawsx_vvsmvl, "V256dV256diV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsrawzx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsrawzx_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsrawzx_vvsl, "V256dV256diUi", "n")
+BUILTIN(__builtin_ve_vl_vsrawzx_vvsvl, "V256dV256diV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsrawzx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsrawzx_vvsmvl, "V256dV256diV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsra_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsra_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsra_vvsl, "V256dV256dLUiUi", "n")
+BUILTIN(__builtin_ve_vl_pvsra_vvsvl, "V256dV256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsra_vvvMvl, "V256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvsra_vvsMvl, "V256dV256dLUiV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsral_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsral_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsral_vvsl, "V256dV256dLiUi", "n")
+BUILTIN(__builtin_ve_vl_vsral_vvsvl, "V256dV256dLiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsral_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsral_vvsmvl, "V256dV256dLiV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsfa_vvssl, "V256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vsfa_vvssvl, "V256dV256dLUiLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsfa_vvssmvl, "V256dV256dLUiLUiV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfaddd_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfaddd_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfaddd_vsvl, "V256ddV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfaddd_vsvvl, "V256ddV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfaddd_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfaddd_vsvmvl, "V256ddV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfadds_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfadds_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfadds_vsvl, "V256dfV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfadds_vsvvl, "V256dfV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfadds_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfadds_vsvmvl, "V256dfV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfadd_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfadd_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfadd_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfadd_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfadd_vvvMvl, "V256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfadd_vsvMvl, "V256dLUiV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfsubd_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfsubd_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfsubd_vsvl, "V256ddV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfsubd_vsvvl, "V256ddV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfsubd_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfsubd_vsvmvl, "V256ddV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfsubs_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfsubs_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfsubs_vsvl, "V256dfV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfsubs_vsvvl, "V256dfV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfsubs_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfsubs_vsvmvl, "V256dfV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfsub_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfsub_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfsub_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfsub_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfsub_vvvMvl, "V256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfsub_vsvMvl, "V256dLUiV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmuld_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmuld_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmuld_vsvl, "V256ddV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmuld_vsvvl, "V256ddV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmuld_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmuld_vsvmvl, "V256ddV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmuls_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmuls_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmuls_vsvl, "V256dfV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmuls_vsvvl, "V256dfV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmuls_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmuls_vsvmvl, "V256dfV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmul_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmul_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmul_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmul_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmul_vvvMvl, "V256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmul_vsvMvl, "V256dLUiV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfdivd_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfdivd_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfdivd_vsvl, "V256ddV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfdivd_vsvvl, "V256ddV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfdivd_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfdivd_vsvmvl, "V256ddV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfdivs_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfdivs_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfdivs_vsvl, "V256dfV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfdivs_vsvvl, "V256dfV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfdivs_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfdivs_vsvmvl, "V256dfV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfsqrtd_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfsqrtd_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfsqrts_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfsqrts_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfcmpd_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfcmpd_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfcmpd_vsvl, "V256ddV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfcmpd_vsvvl, "V256ddV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfcmpd_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfcmpd_vsvmvl, "V256ddV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfcmps_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfcmps_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfcmps_vsvl, "V256dfV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfcmps_vsvvl, "V256dfV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfcmps_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfcmps_vsvmvl, "V256dfV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfcmp_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfcmp_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfcmp_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfcmp_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfcmp_vvvMvl, "V256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfcmp_vsvMvl, "V256dLUiV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmaxd_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmaxd_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmaxd_vsvl, "V256ddV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmaxd_vsvvl, "V256ddV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmaxd_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmaxd_vsvmvl, "V256ddV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmaxs_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmaxs_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmaxs_vsvl, "V256dfV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmaxs_vsvvl, "V256dfV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmaxs_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmaxs_vsvmvl, "V256dfV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmax_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmax_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmax_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmax_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmax_vvvMvl, "V256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmax_vsvMvl, "V256dLUiV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmind_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmind_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmind_vsvl, "V256ddV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmind_vsvvl, "V256ddV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmind_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmind_vsvmvl, "V256ddV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmins_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmins_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmins_vsvl, "V256dfV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmins_vsvvl, "V256dfV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmins_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmins_vsvmvl, "V256dfV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmin_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmin_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmin_vsvl, "V256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmin_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmin_vvvMvl, "V256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmin_vsvMvl, "V256dLUiV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmadd_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmadd_vvvvvl, "V256dV256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmadd_vsvvl, "V256ddV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmadd_vsvvvl, "V256ddV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmadd_vvsvl, "V256dV256ddV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmadd_vvsvvl, "V256dV256ddV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmadd_vvvvmvl, "V256dV256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmadd_vsvvmvl, "V256ddV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmadd_vvsvmvl, "V256dV256ddV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmads_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmads_vvvvvl, "V256dV256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmads_vsvvl, "V256dfV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmads_vsvvvl, "V256dfV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmads_vvsvl, "V256dV256dfV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmads_vvsvvl, "V256dV256dfV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmads_vvvvmvl, "V256dV256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmads_vsvvmvl, "V256dfV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmads_vvsvmvl, "V256dV256dfV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmad_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmad_vvvvvl, "V256dV256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmad_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmad_vsvvvl, "V256dLUiV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmad_vvsvl, "V256dV256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmad_vvsvvl, "V256dV256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmad_vvvvMvl, "V256dV256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmad_vsvvMvl, "V256dLUiV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmad_vvsvMvl, "V256dV256dLUiV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmsbd_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmsbd_vvvvvl, "V256dV256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmsbd_vsvvl, "V256ddV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmsbd_vsvvvl, "V256ddV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmsbd_vvsvl, "V256dV256ddV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmsbd_vvsvvl, "V256dV256ddV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmsbd_vvvvmvl, "V256dV256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmsbd_vsvvmvl, "V256ddV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmsbd_vvsvmvl, "V256dV256ddV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmsbs_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmsbs_vvvvvl, "V256dV256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmsbs_vsvvl, "V256dfV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmsbs_vsvvvl, "V256dfV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmsbs_vvsvl, "V256dV256dfV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmsbs_vvsvvl, "V256dV256dfV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmsbs_vvvvmvl, "V256dV256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmsbs_vsvvmvl, "V256dfV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmsbs_vvsvmvl, "V256dV256dfV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmsb_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmsb_vvvvvl, "V256dV256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmsb_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmsb_vsvvvl, "V256dLUiV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmsb_vvsvl, "V256dV256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmsb_vvsvvl, "V256dV256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmsb_vvvvMvl, "V256dV256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmsb_vsvvMvl, "V256dLUiV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmsb_vvsvMvl, "V256dV256dLUiV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmadd_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmadd_vvvvvl, "V256dV256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmadd_vsvvl, "V256ddV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmadd_vsvvvl, "V256ddV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmadd_vvsvl, "V256dV256ddV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmadd_vvsvvl, "V256dV256ddV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmadd_vvvvmvl, "V256dV256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmadd_vsvvmvl, "V256ddV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmadd_vvsvmvl, "V256dV256ddV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmads_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmads_vvvvvl, "V256dV256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmads_vsvvl, "V256dfV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmads_vsvvvl, "V256dfV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmads_vvsvl, "V256dV256dfV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmads_vvsvvl, "V256dV256dfV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmads_vvvvmvl, "V256dV256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmads_vsvvmvl, "V256dfV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmads_vvsvmvl, "V256dV256dfV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfnmad_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfnmad_vvvvvl, "V256dV256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfnmad_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfnmad_vsvvvl, "V256dLUiV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfnmad_vvsvl, "V256dV256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfnmad_vvsvvl, "V256dV256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfnmad_vvvvMvl, "V256dV256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfnmad_vsvvMvl, "V256dLUiV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfnmad_vvsvMvl, "V256dV256dLUiV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmsbd_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmsbd_vvvvvl, "V256dV256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmsbd_vsvvl, "V256ddV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmsbd_vsvvvl, "V256ddV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmsbd_vvsvl, "V256dV256ddV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmsbd_vvsvvl, "V256dV256ddV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmsbd_vvvvmvl, "V256dV256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmsbd_vsvvmvl, "V256ddV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmsbd_vvsvmvl, "V256dV256ddV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmsbs_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmsbs_vvvvvl, "V256dV256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmsbs_vsvvl, "V256dfV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmsbs_vsvvvl, "V256dfV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmsbs_vvsvl, "V256dV256dfV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmsbs_vvsvvl, "V256dV256dfV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmsbs_vvvvmvl, "V256dV256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmsbs_vsvvmvl, "V256dfV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfnmsbs_vvsvmvl, "V256dV256dfV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfnmsb_vvvvl, "V256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfnmsb_vvvvvl, "V256dV256dV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfnmsb_vsvvl, "V256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfnmsb_vsvvvl, "V256dLUiV256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfnmsb_vvsvl, "V256dV256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfnmsb_vvsvvl, "V256dV256dLUiV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfnmsb_vvvvMvl, "V256dV256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfnmsb_vsvvMvl, "V256dLUiV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfnmsb_vvsvMvl, "V256dV256dLUiV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrcpd_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrcpd_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrcps_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrcps_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvrcp_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvrcp_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrsqrtd_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrsqrtd_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrsqrts_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrsqrts_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvrsqrt_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvrsqrt_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrsqrtdnex_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrsqrtdnex_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrsqrtsnex_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrsqrtsnex_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvrsqrtnex_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvrsqrtnex_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtwdsx_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtwdsx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtwdsx_vvmvl, "V256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtwdsxrz_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtwdsxrz_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtwdsxrz_vvmvl, "V256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtwdzx_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtwdzx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtwdzx_vvmvl, "V256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtwdzxrz_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtwdzxrz_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtwdzxrz_vvmvl, "V256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtwssx_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtwssx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtwssx_vvmvl, "V256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtwssxrz_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtwssxrz_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtwssxrz_vvmvl, "V256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtwszx_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtwszx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtwszx_vvmvl, "V256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtwszxrz_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtwszxrz_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtwszxrz_vvmvl, "V256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvcvtws_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvcvtws_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvcvtws_vvMvl, "V256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvcvtwsrz_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvcvtwsrz_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvcvtwsrz_vvMvl, "V256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtld_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtld_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtld_vvmvl, "V256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtldrz_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtldrz_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtldrz_vvmvl, "V256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtdw_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtdw_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtsw_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtsw_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvcvtsw_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvcvtsw_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtdl_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtdl_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtds_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtds_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtsd_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcvtsd_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmrg_vvvml, "V256dV256dV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vmrg_vvvmvl, "V256dV256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmrg_vsvml, "V256dLUiV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vmrg_vsvmvl, "V256dLUiV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmrgw_vvvMl, "V256dV256dV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_vmrgw_vvvMvl, "V256dV256dV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vmrgw_vsvMl, "V256dUiV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_vmrgw_vsvMvl, "V256dUiV256dV512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vshf_vvvsl, "V256dV256dV256dLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vshf_vvvsvl, "V256dV256dV256dLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vcp_vvmvl, "V256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vex_vvmvl, "V256dV256dV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmklat_ml, "V256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmklaf_ml, "V256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkat_Ml, "V512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkaf_Ml, "V512bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmklgt_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmklgt_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkllt_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkllt_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmklne_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmklne_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkleq_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkleq_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmklge_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmklge_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmklle_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmklle_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmklnum_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmklnum_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmklnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmklnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmklgtnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmklgtnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmklltnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmklltnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmklnenan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmklnenan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkleqnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkleqnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmklgenan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmklgenan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkllenan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkllenan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkwgt_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkwgt_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkwlt_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkwlt_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkwne_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkwne_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkweq_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkweq_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkwge_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkwge_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkwle_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkwle_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkwnum_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkwnum_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkwnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkwnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkwgtnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkwgtnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkwltnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkwltnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkwnenan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkwnenan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkweqnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkweqnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkwgenan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkwgenan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkwlenan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkwlenan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwlogt_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwupgt_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwlogt_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwupgt_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwlolt_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwuplt_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwlolt_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwuplt_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwlone_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwupne_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwlone_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwupne_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwloeq_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwupeq_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwloeq_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwupeq_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwloge_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwupge_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwloge_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwupge_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwlole_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwuple_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwlole_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwuple_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwlonum_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwupnum_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwlonum_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwupnum_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwlonan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwupnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwlonan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwupnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwlogtnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwupgtnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwlogtnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwupgtnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwloltnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwupltnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwloltnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwupltnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwlonenan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwupnenan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwlonenan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwupnenan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwloeqnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwupeqnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwloeqnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwupeqnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwlogenan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwupgenan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwlogenan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwupgenan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwlolenan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwuplenan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwlolenan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwuplenan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwgt_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwgt_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwlt_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwlt_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwne_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwne_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkweq_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkweq_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwge_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwge_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwle_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwle_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwnum_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwnum_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwnan_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwnan_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwgtnan_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwgtnan_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwltnan_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwltnan_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwnenan_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwnenan_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkweqnan_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkweqnan_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwgenan_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwgenan_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwlenan_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkwlenan_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdgt_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdgt_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdlt_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdlt_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdne_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdne_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdeq_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdeq_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdge_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdge_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdle_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdle_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdnum_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdnum_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdgtnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdgtnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdltnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdltnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdnenan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdnenan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdeqnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdeqnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdgenan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdgenan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdlenan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkdlenan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmksgt_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmksgt_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkslt_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkslt_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmksne_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmksne_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkseq_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkseq_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmksge_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmksge_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmksle_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmksle_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmksnum_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmksnum_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmksnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmksnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmksgtnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmksgtnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmksltnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmksltnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmksnenan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmksnenan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkseqnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkseqnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmksgenan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmksgenan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkslenan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfmkslenan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkslogt_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksupgt_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkslogt_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksupgt_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkslolt_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksuplt_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkslolt_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksuplt_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkslone_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksupne_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkslone_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksupne_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksloeq_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksupeq_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksloeq_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksupeq_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksloge_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksupge_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksloge_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksupge_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkslole_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksuple_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkslole_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksuple_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkslonum_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksupnum_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkslonum_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksupnum_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkslonan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksupnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkslonan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksupnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkslogtnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksupgtnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkslogtnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksupgtnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksloltnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksupltnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksloltnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksupltnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkslonenan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksupnenan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkslonenan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksupnenan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksloeqnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksupeqnan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksloeqnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksupeqnan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkslogenan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksupgenan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkslogenan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksupgenan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkslolenan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksuplenan_mvl, "V256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkslolenan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksuplenan_mvml, "V256bV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksgt_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksgt_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkslt_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkslt_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksne_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksne_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkseq_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkseq_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksge_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksge_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksle_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksle_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksnum_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksnum_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksnan_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksnan_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksgtnan_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksgtnan_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksltnan_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksltnan_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksnenan_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksnenan_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkseqnan_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkseqnan_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksgenan_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmksgenan_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkslenan_Mvl, "V512bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_pvfmkslenan_MvMl, "V512bV256dV512bUi", "n")
+BUILTIN(__builtin_ve_vl_vsumwsx_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsumwsx_vvml, "V256dV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vsumwzx_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsumwzx_vvml, "V256dV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vsuml_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsuml_vvml, "V256dV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfsumd_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfsumd_vvml, "V256dV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vfsums_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfsums_vvml, "V256dV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vrmaxswfstsx_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrmaxswfstsx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrmaxswlstsx_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrmaxswlstsx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrmaxswfstzx_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrmaxswfstzx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrmaxswlstzx_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrmaxswlstzx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrminswfstsx_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrminswfstsx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrminswlstsx_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrminswlstsx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrminswfstzx_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrminswfstzx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrminswlstzx_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrminswlstzx_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrmaxslfst_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrmaxslfst_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrmaxsllst_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrmaxsllst_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrminslfst_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrminslfst_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrminsllst_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrminsllst_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfrmaxdfst_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfrmaxdfst_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfrmaxdlst_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfrmaxdlst_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfrmaxsfst_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfrmaxsfst_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfrmaxslst_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfrmaxslst_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfrmindfst_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfrmindfst_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfrmindlst_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfrmindlst_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfrminsfst_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfrminsfst_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfrminslst_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vfrminslst_vvvl, "V256dV256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrand_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrand_vvml, "V256dV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vror_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vror_vvml, "V256dV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vrxor_vvl, "V256dV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vrxor_vvml, "V256dV256dV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vgt_vvssl, "V256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vgt_vvssvl, "V256dV256dLUiLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vgt_vvssml, "V256dV256dLUiLUiV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vgt_vvssmvl, "V256dV256dLUiLUiV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vgtnc_vvssl, "V256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vgtnc_vvssvl, "V256dV256dLUiLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vgtnc_vvssml, "V256dV256dLUiLUiV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vgtnc_vvssmvl, "V256dV256dLUiLUiV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vgtu_vvssl, "V256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vgtu_vvssvl, "V256dV256dLUiLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vgtu_vvssml, "V256dV256dLUiLUiV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vgtu_vvssmvl, "V256dV256dLUiLUiV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vgtunc_vvssl, "V256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vgtunc_vvssvl, "V256dV256dLUiLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vgtunc_vvssml, "V256dV256dLUiLUiV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vgtunc_vvssmvl, "V256dV256dLUiLUiV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vgtlsx_vvssl, "V256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vgtlsx_vvssvl, "V256dV256dLUiLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vgtlsx_vvssml, "V256dV256dLUiLUiV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vgtlsx_vvssmvl, "V256dV256dLUiLUiV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vgtlsxnc_vvssl, "V256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vgtlsxnc_vvssvl, "V256dV256dLUiLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vgtlsxnc_vvssml, "V256dV256dLUiLUiV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vgtlsxnc_vvssmvl, "V256dV256dLUiLUiV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vgtlzx_vvssl, "V256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vgtlzx_vvssvl, "V256dV256dLUiLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vgtlzx_vvssml, "V256dV256dLUiLUiV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vgtlzx_vvssmvl, "V256dV256dLUiLUiV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vgtlzxnc_vvssl, "V256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vgtlzxnc_vvssvl, "V256dV256dLUiLUiV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vgtlzxnc_vvssml, "V256dV256dLUiLUiV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vgtlzxnc_vvssmvl, "V256dV256dLUiLUiV256bV256dUi", "n")
+BUILTIN(__builtin_ve_vl_vsc_vvssl, "vV256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vsc_vvssml, "vV256dV256dLUiLUiV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vscnc_vvssl, "vV256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vscnc_vvssml, "vV256dV256dLUiLUiV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vscot_vvssl, "vV256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vscot_vvssml, "vV256dV256dLUiLUiV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vscncot_vvssl, "vV256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vscncot_vvssml, "vV256dV256dLUiLUiV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vscu_vvssl, "vV256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vscu_vvssml, "vV256dV256dLUiLUiV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vscunc_vvssl, "vV256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vscunc_vvssml, "vV256dV256dLUiLUiV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vscuot_vvssl, "vV256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vscuot_vvssml, "vV256dV256dLUiLUiV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vscuncot_vvssl, "vV256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vscuncot_vvssml, "vV256dV256dLUiLUiV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vscl_vvssl, "vV256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vscl_vvssml, "vV256dV256dLUiLUiV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vsclnc_vvssl, "vV256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vsclnc_vvssml, "vV256dV256dLUiLUiV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vsclot_vvssl, "vV256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vsclot_vvssml, "vV256dV256dLUiLUiV256bUi", "n")
+BUILTIN(__builtin_ve_vl_vsclncot_vvssl, "vV256dV256dLUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_vsclncot_vvssml, "vV256dV256dLUiLUiV256bUi", "n")
+BUILTIN(__builtin_ve_vl_andm_mmm, "V256bV256bV256b", "n")
+BUILTIN(__builtin_ve_vl_andm_MMM, "V512bV512bV512b", "n")
+BUILTIN(__builtin_ve_vl_orm_mmm, "V256bV256bV256b", "n")
+BUILTIN(__builtin_ve_vl_orm_MMM, "V512bV512bV512b", "n")
+BUILTIN(__builtin_ve_vl_xorm_mmm, "V256bV256bV256b", "n")
+BUILTIN(__builtin_ve_vl_xorm_MMM, "V512bV512bV512b", "n")
+BUILTIN(__builtin_ve_vl_eqvm_mmm, "V256bV256bV256b", "n")
+BUILTIN(__builtin_ve_vl_eqvm_MMM, "V512bV512bV512b", "n")
+BUILTIN(__builtin_ve_vl_nndm_mmm, "V256bV256bV256b", "n")
+BUILTIN(__builtin_ve_vl_nndm_MMM, "V512bV512bV512b", "n")
+BUILTIN(__builtin_ve_vl_negm_mm, "V256bV256b", "n")
+BUILTIN(__builtin_ve_vl_negm_MM, "V512bV512b", "n")
+BUILTIN(__builtin_ve_vl_pcvm_sml, "LUiV256bUi", "n")
+BUILTIN(__builtin_ve_vl_lzvm_sml, "LUiV256bUi", "n")
+BUILTIN(__builtin_ve_vl_tovm_sml, "LUiV256bUi", "n")
+BUILTIN(__builtin_ve_vl_lcr_sss, "LUiLUiLUi", "n")
+BUILTIN(__builtin_ve_vl_scr_sss, "vLUiLUiLUi", "n")
+BUILTIN(__builtin_ve_vl_tscr_ssss, "LUiLUiLUiLUi", "n")
+BUILTIN(__builtin_ve_vl_fidcr_sss, "LUiLUiUi", "n")
+BUILTIN(__builtin_ve_vl_fencei, "v", "n")
+BUILTIN(__builtin_ve_vl_fencem_s, "vUi", "n")
+BUILTIN(__builtin_ve_vl_fencec_s, "vUi", "n")
+BUILTIN(__builtin_ve_vl_svob, "v", "n")
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsWebAssembly.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsWebAssembly.def
index 04ec45aa3b74..7e950914ad94 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsWebAssembly.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsWebAssembly.def
@@ -119,18 +119,22 @@ TARGET_BUILTIN(__builtin_wasm_all_true_i16x8, "iV8s", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_all_true_i32x4, "iV4i", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_all_true_i64x2, "iV2LLi", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_bitmask_i8x16, "iV16Sc", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_bitmask_i16x8, "iV8s", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_bitmask_i32x4, "iV4i", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_bitmask_i64x2, "iV2LLi", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_bitmask_i8x16, "UiV16Sc", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_bitmask_i16x8, "UiV8s", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_bitmask_i32x4, "UiV4i", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_bitmask_i64x2, "UiV2LLi", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_abs_f32x4, "V4fV4f", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_abs_f64x2, "V2dV2d", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_min_f32x4, "V4fV4fV4f", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_max_f32x4, "V4fV4fV4f", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_pmin_f32x4, "V4fV4fV4f", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_pmax_f32x4, "V4fV4fV4f", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_min_f64x2, "V2dV2dV2d", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_max_f64x2, "V2dV2dV2d", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_pmin_f64x2, "V2dV2dV2d", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_pmax_f64x2, "V2dV2dV2d", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_ceil_f32x4, "V4fV4f", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_floor_f32x4, "V4fV4f", "nc", "simd128")
@@ -154,8 +158,56 @@ TARGET_BUILTIN(__builtin_wasm_narrow_u_i8x16_i16x8, "V16UcV8sV8s", "nc", "simd12
TARGET_BUILTIN(__builtin_wasm_narrow_s_i16x8_i32x4, "V8sV4iV4i", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_narrow_u_i16x8_i32x4, "V8UsV4iV4i", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_trunc_sat_zero_s_f64x2_i32x4, "V4iV2d", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_trunc_sat_zero_u_f64x2_i32x4, "V4UiV2d", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_trunc_sat_s_zero_f64x2_i32x4, "V4iV2d", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_trunc_sat_u_zero_f64x2_i32x4, "V4UiV2d", "nc", "simd128")
+
+// Relaxed SIMD builtins
+TARGET_BUILTIN(__builtin_wasm_relaxed_madd_f32x4, "V4fV4fV4fV4f", "nc", "relaxed-simd")
+TARGET_BUILTIN(__builtin_wasm_relaxed_nmadd_f32x4, "V4fV4fV4fV4f", "nc", "relaxed-simd")
+TARGET_BUILTIN(__builtin_wasm_relaxed_madd_f64x2, "V2dV2dV2dV2d", "nc", "relaxed-simd")
+TARGET_BUILTIN(__builtin_wasm_relaxed_nmadd_f64x2, "V2dV2dV2dV2d", "nc", "relaxed-simd")
+
+TARGET_BUILTIN(__builtin_wasm_relaxed_laneselect_i8x16, "V16ScV16ScV16ScV16Sc", "nc", "relaxed-simd")
+TARGET_BUILTIN(__builtin_wasm_relaxed_laneselect_i16x8, "V8sV8sV8sV8s", "nc", "relaxed-simd")
+TARGET_BUILTIN(__builtin_wasm_relaxed_laneselect_i32x4, "V4iV4iV4iV4i", "nc", "relaxed-simd")
+TARGET_BUILTIN(__builtin_wasm_relaxed_laneselect_i64x2, "V2LLiV2LLiV2LLiV2LLi", "nc", "relaxed-simd")
+
+TARGET_BUILTIN(__builtin_wasm_relaxed_swizzle_i8x16, "V16ScV16ScV16Sc", "nc", "relaxed-simd")
+
+TARGET_BUILTIN(__builtin_wasm_relaxed_min_f32x4, "V4fV4fV4f", "nc", "relaxed-simd")
+TARGET_BUILTIN(__builtin_wasm_relaxed_max_f32x4, "V4fV4fV4f", "nc", "relaxed-simd")
+TARGET_BUILTIN(__builtin_wasm_relaxed_min_f64x2, "V2dV2dV2d", "nc", "relaxed-simd")
+TARGET_BUILTIN(__builtin_wasm_relaxed_max_f64x2, "V2dV2dV2d", "nc", "relaxed-simd")
+
+TARGET_BUILTIN(__builtin_wasm_relaxed_trunc_s_i32x4_f32x4, "V4iV4f", "nc", "relaxed-simd")
+TARGET_BUILTIN(__builtin_wasm_relaxed_trunc_u_i32x4_f32x4, "V4UiV4f", "nc", "relaxed-simd")
+TARGET_BUILTIN(__builtin_wasm_relaxed_trunc_s_zero_i32x4_f64x2, "V4iV2d", "nc", "relaxed-simd")
+TARGET_BUILTIN(__builtin_wasm_relaxed_trunc_u_zero_i32x4_f64x2, "V4UiV2d", "nc", "relaxed-simd")
+
+TARGET_BUILTIN(__builtin_wasm_relaxed_q15mulr_s_i16x8, "V8sV8sV8s", "nc", "relaxed-simd")
+
+TARGET_BUILTIN(__builtin_wasm_relaxed_dot_i8x16_i7x16_s_i16x8, "V8sV16ScV16Sc", "nc", "relaxed-simd")
+TARGET_BUILTIN(__builtin_wasm_relaxed_dot_i8x16_i7x16_add_s_i32x4, "V4iV16ScV16ScV4i", "nc", "relaxed-simd")
+TARGET_BUILTIN(__builtin_wasm_relaxed_dot_bf16x8_add_f32_f32x4, "V4fV8UsV8UsV4f", "nc", "relaxed-simd")
+
+// Reference Types builtins
+// Some builtins are custom type-checked - see 't' as part of the third argument,
+// in which case the argument spec (second argument) is unused.
+
+TARGET_BUILTIN(__builtin_wasm_ref_null_extern, "i", "nct", "reference-types")
+
+// A funcref represented as a function pointer with the funcref attribute
+// attached to the type, therefore SemaChecking will check for the right
+// return type.
+TARGET_BUILTIN(__builtin_wasm_ref_null_func, "i", "nct", "reference-types")
+
+// Table builtins
+TARGET_BUILTIN(__builtin_wasm_table_set, "viii", "t", "reference-types")
+TARGET_BUILTIN(__builtin_wasm_table_get, "iii", "t", "reference-types")
+TARGET_BUILTIN(__builtin_wasm_table_size, "zi", "nt", "reference-types")
+TARGET_BUILTIN(__builtin_wasm_table_grow, "iiii", "nt", "reference-types")
+TARGET_BUILTIN(__builtin_wasm_table_fill, "viiii", "t", "reference-types")
+TARGET_BUILTIN(__builtin_wasm_table_copy, "viiiii", "t", "reference-types")
#undef BUILTIN
#undef TARGET_BUILTIN
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsX86.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsX86.def
index 18e541fe9cb5..60b752ad4854 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsX86.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsX86.def
@@ -254,25 +254,14 @@ TARGET_BUILTIN(__builtin_ia32_minpd, "V2dV2dV2d", "ncV:128:", "sse2")
TARGET_BUILTIN(__builtin_ia32_maxpd, "V2dV2dV2d", "ncV:128:", "sse2")
TARGET_BUILTIN(__builtin_ia32_minsd, "V2dV2dV2d", "ncV:128:", "sse2")
TARGET_BUILTIN(__builtin_ia32_maxsd, "V2dV2dV2d", "ncV:128:", "sse2")
-TARGET_BUILTIN(__builtin_ia32_paddsb128, "V16cV16cV16c", "ncV:128:", "sse2")
-TARGET_BUILTIN(__builtin_ia32_paddsw128, "V8sV8sV8s", "ncV:128:", "sse2")
-TARGET_BUILTIN(__builtin_ia32_psubsb128, "V16cV16cV16c", "ncV:128:", "sse2")
-TARGET_BUILTIN(__builtin_ia32_psubsw128, "V8sV8sV8s", "ncV:128:", "sse2")
-TARGET_BUILTIN(__builtin_ia32_paddusb128, "V16cV16cV16c", "ncV:128:", "sse2")
-TARGET_BUILTIN(__builtin_ia32_paddusw128, "V8sV8sV8s", "ncV:128:", "sse2")
-TARGET_BUILTIN(__builtin_ia32_psubusb128, "V16cV16cV16c", "ncV:128:", "sse2")
-TARGET_BUILTIN(__builtin_ia32_psubusw128, "V8sV8sV8s", "ncV:128:", "sse2")
TARGET_BUILTIN(__builtin_ia32_pmulhw128, "V8sV8sV8s", "ncV:128:", "sse2")
TARGET_BUILTIN(__builtin_ia32_pavgb128, "V16cV16cV16c", "ncV:128:", "sse2")
TARGET_BUILTIN(__builtin_ia32_pavgw128, "V8sV8sV8s", "ncV:128:", "sse2")
-TARGET_BUILTIN(__builtin_ia32_pmaxub128, "V16cV16cV16c", "ncV:128:", "sse2")
-TARGET_BUILTIN(__builtin_ia32_pmaxsw128, "V8sV8sV8s", "ncV:128:", "sse2")
-TARGET_BUILTIN(__builtin_ia32_pminub128, "V16cV16cV16c", "ncV:128:", "sse2")
-TARGET_BUILTIN(__builtin_ia32_pminsw128, "V8sV8sV8s", "ncV:128:", "sse2")
TARGET_BUILTIN(__builtin_ia32_packsswb128, "V16cV8sV8s", "ncV:128:", "sse2")
TARGET_BUILTIN(__builtin_ia32_packssdw128, "V8sV4iV4i", "ncV:128:", "sse2")
TARGET_BUILTIN(__builtin_ia32_packuswb128, "V16cV8sV8s", "ncV:128:", "sse2")
TARGET_BUILTIN(__builtin_ia32_pmulhuw128, "V8sV8sV8s", "ncV:128:", "sse2")
+TARGET_BUILTIN(__builtin_ia32_vec_ext_v2di, "OiV2OiIi", "ncV:128:", "sse2")
TARGET_BUILTIN(__builtin_ia32_vec_ext_v4si, "iV4iIi", "ncV:128:", "sse2")
TARGET_BUILTIN(__builtin_ia32_vec_ext_v4sf, "fV4fIi", "ncV:128:", "sse2")
TARGET_BUILTIN(__builtin_ia32_vec_ext_v8hi, "sV8sIi", "ncV:128:", "sse2")
@@ -296,19 +285,16 @@ TARGET_BUILTIN(__builtin_ia32_pshufb128, "V16cV16cV16c", "ncV:128:", "ssse3")
TARGET_BUILTIN(__builtin_ia32_psignb128, "V16cV16cV16c", "ncV:128:", "ssse3")
TARGET_BUILTIN(__builtin_ia32_psignw128, "V8sV8sV8s", "ncV:128:", "ssse3")
TARGET_BUILTIN(__builtin_ia32_psignd128, "V4iV4iV4i", "ncV:128:", "ssse3")
-TARGET_BUILTIN(__builtin_ia32_pabsb128, "V16cV16c", "ncV:128:", "ssse3")
-TARGET_BUILTIN(__builtin_ia32_pabsw128, "V8sV8s", "ncV:128:", "ssse3")
-TARGET_BUILTIN(__builtin_ia32_pabsd128, "V4iV4i", "ncV:128:", "ssse3")
TARGET_BUILTIN(__builtin_ia32_ldmxcsr, "vUi", "n", "sse")
-TARGET_HEADER_BUILTIN(_mm_setcsr, "vUi", "nh","xmmintrin.h", ALL_LANGUAGES, "sse")
+TARGET_HEADER_BUILTIN(_mm_setcsr, "vUi", "nh",XMMINTRIN_H, ALL_LANGUAGES, "sse")
TARGET_BUILTIN(__builtin_ia32_stmxcsr, "Ui", "n", "sse")
-TARGET_HEADER_BUILTIN(_mm_getcsr, "Ui", "nh", "xmmintrin.h", ALL_LANGUAGES, "sse")
+TARGET_HEADER_BUILTIN(_mm_getcsr, "Ui", "nh", XMMINTRIN_H, ALL_LANGUAGES, "sse")
TARGET_BUILTIN(__builtin_ia32_cvtss2si, "iV4f", "ncV:128:", "sse")
TARGET_BUILTIN(__builtin_ia32_cvttss2si, "iV4f", "ncV:128:", "sse")
TARGET_BUILTIN(__builtin_ia32_movmskps, "iV4f", "nV:128:", "sse")
TARGET_BUILTIN(__builtin_ia32_sfence, "v", "n", "sse")
-TARGET_HEADER_BUILTIN(_mm_sfence, "v", "nh", "xmmintrin.h", ALL_LANGUAGES, "sse")
+TARGET_HEADER_BUILTIN(_mm_sfence, "v", "nh", XMMINTRIN_H, ALL_LANGUAGES, "sse")
TARGET_BUILTIN(__builtin_ia32_rcpps, "V4fV4f", "ncV:128:", "sse")
TARGET_BUILTIN(__builtin_ia32_rcpss, "V4fV4f", "ncV:128:", "sse")
TARGET_BUILTIN(__builtin_ia32_rsqrtps, "V4fV4f", "ncV:128:", "sse")
@@ -337,13 +323,13 @@ TARGET_BUILTIN(__builtin_ia32_cvtsd2ss, "V4fV4fV2d", "ncV:128:", "sse2")
TARGET_BUILTIN(__builtin_ia32_cvtps2dq, "V4iV4f", "ncV:128:", "sse2")
TARGET_BUILTIN(__builtin_ia32_cvttps2dq, "V4iV4f", "ncV:128:", "sse2")
TARGET_BUILTIN(__builtin_ia32_clflush, "vvC*", "n", "sse2")
-TARGET_HEADER_BUILTIN(_mm_clflush, "vvC*", "nh", "emmintrin.h", ALL_LANGUAGES, "sse2")
+TARGET_HEADER_BUILTIN(_mm_clflush, "vvC*", "nh", EMMINTRIN_H, ALL_LANGUAGES, "sse2")
TARGET_BUILTIN(__builtin_ia32_lfence, "v", "n", "sse2")
-TARGET_HEADER_BUILTIN(_mm_lfence, "v", "nh", "emmintrin.h", ALL_LANGUAGES, "sse2")
+TARGET_HEADER_BUILTIN(_mm_lfence, "v", "nh", EMMINTRIN_H, ALL_LANGUAGES, "sse2")
TARGET_BUILTIN(__builtin_ia32_mfence, "v", "n", "sse2")
-TARGET_HEADER_BUILTIN(_mm_mfence, "v", "nh", "emmintrin.h", ALL_LANGUAGES, "sse2")
+TARGET_HEADER_BUILTIN(_mm_mfence, "v", "nh", EMMINTRIN_H, ALL_LANGUAGES, "sse2")
TARGET_BUILTIN(__builtin_ia32_pause, "v", "n", "")
-TARGET_HEADER_BUILTIN(_mm_pause, "v", "nh", "emmintrin.h", ALL_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_mm_pause, "v", "nh", EMMINTRIN_H, ALL_LANGUAGES, "")
TARGET_BUILTIN(__builtin_ia32_pmuludq128, "V2OiV4iV4i", "ncV:128:", "sse2")
TARGET_BUILTIN(__builtin_ia32_psraw128, "V8sV8sV8s", "ncV:128:", "sse2")
TARGET_BUILTIN(__builtin_ia32_psrad128, "V4iV4iV4i", "ncV:128:", "sse2")
@@ -380,14 +366,6 @@ TARGET_BUILTIN(__builtin_ia32_blendvpd, "V2dV2dV2dV2d", "ncV:128:", "sse4.1")
TARGET_BUILTIN(__builtin_ia32_blendvps, "V4fV4fV4fV4f", "ncV:128:", "sse4.1")
TARGET_BUILTIN(__builtin_ia32_packusdw128, "V8sV4iV4i", "ncV:128:", "sse4.1")
-TARGET_BUILTIN(__builtin_ia32_pmaxsb128, "V16cV16cV16c", "ncV:128:", "sse4.1")
-TARGET_BUILTIN(__builtin_ia32_pmaxsd128, "V4iV4iV4i", "ncV:128:", "sse4.1")
-TARGET_BUILTIN(__builtin_ia32_pmaxud128, "V4iV4iV4i", "ncV:128:", "sse4.1")
-TARGET_BUILTIN(__builtin_ia32_pmaxuw128, "V8sV8sV8s", "ncV:128:", "sse4.1")
-TARGET_BUILTIN(__builtin_ia32_pminsb128, "V16cV16cV16c", "ncV:128:", "sse4.1")
-TARGET_BUILTIN(__builtin_ia32_pminsd128, "V4iV4iV4i", "ncV:128:", "sse4.1")
-TARGET_BUILTIN(__builtin_ia32_pminud128, "V4iV4iV4i", "ncV:128:", "sse4.1")
-TARGET_BUILTIN(__builtin_ia32_pminuw128, "V8sV8sV8s", "ncV:128:", "sse4.1")
TARGET_BUILTIN(__builtin_ia32_pmuldq128, "V2OiV4iV4i", "ncV:128:", "sse4.1")
TARGET_BUILTIN(__builtin_ia32_roundps, "V4fV4fIi", "ncV:128:", "sse4.1")
TARGET_BUILTIN(__builtin_ia32_roundss, "V4fV4fV4fIi", "ncV:128:", "sse4.1")
@@ -421,9 +399,9 @@ TARGET_BUILTIN(__builtin_ia32_pcmpestrio128, "iV16ciV16ciIc","ncV:128:", "sse4.2
TARGET_BUILTIN(__builtin_ia32_pcmpestris128, "iV16ciV16ciIc","ncV:128:", "sse4.2")
TARGET_BUILTIN(__builtin_ia32_pcmpestriz128, "iV16ciV16ciIc","ncV:128:", "sse4.2")
-TARGET_BUILTIN(__builtin_ia32_crc32qi, "UiUiUc", "nc", "sse4.2")
-TARGET_BUILTIN(__builtin_ia32_crc32hi, "UiUiUs", "nc", "sse4.2")
-TARGET_BUILTIN(__builtin_ia32_crc32si, "UiUiUi", "nc", "sse4.2")
+TARGET_BUILTIN(__builtin_ia32_crc32qi, "UiUiUc", "nc", "crc32")
+TARGET_BUILTIN(__builtin_ia32_crc32hi, "UiUiUs", "nc", "crc32")
+TARGET_BUILTIN(__builtin_ia32_crc32si, "UiUiUi", "nc", "crc32")
// SSE4a
TARGET_BUILTIN(__builtin_ia32_extrqi, "V2OiV2OiIcIc", "ncV:128:", "sse4a")
@@ -443,31 +421,31 @@ TARGET_BUILTIN(__builtin_ia32_aeskeygenassist128, "V2OiV2OiIc", "ncV:128:", "aes
// VAES
TARGET_BUILTIN(__builtin_ia32_aesenc256, "V4OiV4OiV4Oi", "ncV:256:", "vaes")
-TARGET_BUILTIN(__builtin_ia32_aesenc512, "V8OiV8OiV8Oi", "ncV:512:", "avx512f,vaes")
+TARGET_BUILTIN(__builtin_ia32_aesenc512, "V8OiV8OiV8Oi", "ncV:512:", "avx512f,evex512,vaes")
TARGET_BUILTIN(__builtin_ia32_aesenclast256, "V4OiV4OiV4Oi", "ncV:256:", "vaes")
-TARGET_BUILTIN(__builtin_ia32_aesenclast512, "V8OiV8OiV8Oi", "ncV:512:", "avx512f,vaes")
+TARGET_BUILTIN(__builtin_ia32_aesenclast512, "V8OiV8OiV8Oi", "ncV:512:", "avx512f,evex512,vaes")
TARGET_BUILTIN(__builtin_ia32_aesdec256, "V4OiV4OiV4Oi", "ncV:256:", "vaes")
-TARGET_BUILTIN(__builtin_ia32_aesdec512, "V8OiV8OiV8Oi", "ncV:512:", "avx512f,vaes")
+TARGET_BUILTIN(__builtin_ia32_aesdec512, "V8OiV8OiV8Oi", "ncV:512:", "avx512f,evex512,vaes")
TARGET_BUILTIN(__builtin_ia32_aesdeclast256, "V4OiV4OiV4Oi", "ncV:256:", "vaes")
-TARGET_BUILTIN(__builtin_ia32_aesdeclast512, "V8OiV8OiV8Oi", "ncV:512:", "avx512f,vaes")
+TARGET_BUILTIN(__builtin_ia32_aesdeclast512, "V8OiV8OiV8Oi", "ncV:512:", "avx512f,evex512,vaes")
// GFNI
TARGET_BUILTIN(__builtin_ia32_vgf2p8affineinvqb_v16qi, "V16cV16cV16cIc", "ncV:128:", "gfni")
TARGET_BUILTIN(__builtin_ia32_vgf2p8affineinvqb_v32qi, "V32cV32cV32cIc", "ncV:256:", "avx,gfni")
-TARGET_BUILTIN(__builtin_ia32_vgf2p8affineinvqb_v64qi, "V64cV64cV64cIc", "ncV:512:", "avx512bw,gfni")
+TARGET_BUILTIN(__builtin_ia32_vgf2p8affineinvqb_v64qi, "V64cV64cV64cIc", "ncV:512:", "avx512f,evex512,gfni")
TARGET_BUILTIN(__builtin_ia32_vgf2p8affineqb_v16qi, "V16cV16cV16cIc", "ncV:128:", "gfni")
TARGET_BUILTIN(__builtin_ia32_vgf2p8affineqb_v32qi, "V32cV32cV32cIc", "ncV:256:", "avx,gfni")
-TARGET_BUILTIN(__builtin_ia32_vgf2p8affineqb_v64qi, "V64cV64cV64cIc", "ncV:512:", "avx512bw,gfni")
+TARGET_BUILTIN(__builtin_ia32_vgf2p8affineqb_v64qi, "V64cV64cV64cIc", "ncV:512:", "avx512f,evex512,gfni")
TARGET_BUILTIN(__builtin_ia32_vgf2p8mulb_v16qi, "V16cV16cV16c", "ncV:128:", "gfni")
TARGET_BUILTIN(__builtin_ia32_vgf2p8mulb_v32qi, "V32cV32cV32c", "ncV:256:", "avx,gfni")
-TARGET_BUILTIN(__builtin_ia32_vgf2p8mulb_v64qi, "V64cV64cV64c", "ncV:512:", "avx512bw,gfni")
+TARGET_BUILTIN(__builtin_ia32_vgf2p8mulb_v64qi, "V64cV64cV64c", "ncV:512:", "avx512f,evex512,gfni")
// CLMUL
TARGET_BUILTIN(__builtin_ia32_pclmulqdq128, "V2OiV2OiV2OiIc", "ncV:128:", "pclmul")
// VPCLMULQDQ
TARGET_BUILTIN(__builtin_ia32_pclmulqdq256, "V4OiV4OiV4OiIc", "ncV:256:", "vpclmulqdq")
-TARGET_BUILTIN(__builtin_ia32_pclmulqdq512, "V8OiV8OiV8OiIc", "ncV:512:", "avx512f,vpclmulqdq")
+TARGET_BUILTIN(__builtin_ia32_pclmulqdq512, "V8OiV8OiV8OiIc", "ncV:512:", "avx512f,evex512,vpclmulqdq")
// AVX
TARGET_BUILTIN(__builtin_ia32_addsubpd256, "V4dV4dV4d", "ncV:256:", "avx")
@@ -558,21 +536,10 @@ TARGET_BUILTIN(__builtin_ia32_vec_set_v8si, "V8iV8iiIi", "ncV:256:", "avx")
// AVX2
TARGET_BUILTIN(__builtin_ia32_mpsadbw256, "V32cV32cV32cIc", "ncV:256:", "avx2")
-TARGET_BUILTIN(__builtin_ia32_pabsb256, "V32cV32c", "ncV:256:", "avx2")
-TARGET_BUILTIN(__builtin_ia32_pabsw256, "V16sV16s", "ncV:256:", "avx2")
-TARGET_BUILTIN(__builtin_ia32_pabsd256, "V8iV8i", "ncV:256:", "avx2")
TARGET_BUILTIN(__builtin_ia32_packsswb256, "V32cV16sV16s", "ncV:256:", "avx2")
TARGET_BUILTIN(__builtin_ia32_packssdw256, "V16sV8iV8i", "ncV:256:", "avx2")
TARGET_BUILTIN(__builtin_ia32_packuswb256, "V32cV16sV16s", "ncV:256:", "avx2")
TARGET_BUILTIN(__builtin_ia32_packusdw256, "V16sV8iV8i", "ncV:256:", "avx2")
-TARGET_BUILTIN(__builtin_ia32_paddsb256, "V32cV32cV32c", "ncV:256:", "avx2")
-TARGET_BUILTIN(__builtin_ia32_paddsw256, "V16sV16sV16s", "ncV:256:", "avx2")
-TARGET_BUILTIN(__builtin_ia32_psubsb256, "V32cV32cV32c", "ncV:256:", "avx2")
-TARGET_BUILTIN(__builtin_ia32_psubsw256, "V16sV16sV16s", "ncV:256:", "avx2")
-TARGET_BUILTIN(__builtin_ia32_paddusb256, "V32cV32cV32c", "ncV:256:", "avx2")
-TARGET_BUILTIN(__builtin_ia32_paddusw256, "V16sV16sV16s", "ncV:256:", "avx2")
-TARGET_BUILTIN(__builtin_ia32_psubusb256, "V32cV32cV32c", "ncV:256:", "avx2")
-TARGET_BUILTIN(__builtin_ia32_psubusw256, "V16sV16sV16s", "ncV:256:", "avx2")
TARGET_BUILTIN(__builtin_ia32_palignr256, "V32cV32cV32cIi", "ncV:256:", "avx2")
TARGET_BUILTIN(__builtin_ia32_pavgb256, "V32cV32cV32c", "ncV:256:", "avx2")
TARGET_BUILTIN(__builtin_ia32_pavgw256, "V16sV16sV16s", "ncV:256:", "avx2")
@@ -586,18 +553,6 @@ TARGET_BUILTIN(__builtin_ia32_phsubd256, "V8iV8iV8i", "ncV:256:", "avx2")
TARGET_BUILTIN(__builtin_ia32_phsubsw256, "V16sV16sV16s", "ncV:256:", "avx2")
TARGET_BUILTIN(__builtin_ia32_pmaddubsw256, "V16sV32cV32c", "ncV:256:", "avx2")
TARGET_BUILTIN(__builtin_ia32_pmaddwd256, "V8iV16sV16s", "ncV:256:", "avx2")
-TARGET_BUILTIN(__builtin_ia32_pmaxub256, "V32cV32cV32c", "ncV:256:", "avx2")
-TARGET_BUILTIN(__builtin_ia32_pmaxuw256, "V16sV16sV16s", "ncV:256:", "avx2")
-TARGET_BUILTIN(__builtin_ia32_pmaxud256, "V8iV8iV8i", "ncV:256:", "avx2")
-TARGET_BUILTIN(__builtin_ia32_pmaxsb256, "V32cV32cV32c", "ncV:256:", "avx2")
-TARGET_BUILTIN(__builtin_ia32_pmaxsw256, "V16sV16sV16s", "ncV:256:", "avx2")
-TARGET_BUILTIN(__builtin_ia32_pmaxsd256, "V8iV8iV8i", "ncV:256:", "avx2")
-TARGET_BUILTIN(__builtin_ia32_pminub256, "V32cV32cV32c", "ncV:256:", "avx2")
-TARGET_BUILTIN(__builtin_ia32_pminuw256, "V16sV16sV16s", "ncV:256:", "avx2")
-TARGET_BUILTIN(__builtin_ia32_pminud256, "V8iV8iV8i", "ncV:256:", "avx2")
-TARGET_BUILTIN(__builtin_ia32_pminsb256, "V32cV32cV32c", "ncV:256:", "avx2")
-TARGET_BUILTIN(__builtin_ia32_pminsw256, "V16sV16sV16s", "ncV:256:", "avx2")
-TARGET_BUILTIN(__builtin_ia32_pminsd256, "V8iV8iV8i", "ncV:256:", "avx2")
TARGET_BUILTIN(__builtin_ia32_pmovmskb256, "iV32c", "ncV:256:", "avx2")
TARGET_BUILTIN(__builtin_ia32_pmuldq256, "V4OiV8iV8i", "ncV:256:", "avx2")
TARGET_BUILTIN(__builtin_ia32_pmulhrsw256, "V16sV16sV16s", "ncV:256:", "avx2")
@@ -695,9 +650,9 @@ TARGET_BUILTIN(__builtin_ia32_fxsave, "vv*", "n", "fxsr")
TARGET_BUILTIN(__builtin_ia32_xsave, "vv*UOi", "n", "xsave")
TARGET_BUILTIN(__builtin_ia32_xrstor, "vv*UOi", "n", "xsave")
TARGET_BUILTIN(__builtin_ia32_xgetbv, "UOiUi", "n", "xsave")
-TARGET_HEADER_BUILTIN(_xgetbv, "UWiUi", "nh", "immintrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_xgetbv, "UWiUi", "nh", IMMINTRIN_H, ALL_MS_LANGUAGES, "")
TARGET_BUILTIN(__builtin_ia32_xsetbv, "vUiUOi", "n", "xsave")
-TARGET_HEADER_BUILTIN(_xsetbv, "vUiUWi", "nh", "immintrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_xsetbv, "vUiUWi", "nh", IMMINTRIN_H, ALL_MS_LANGUAGES, "")
TARGET_BUILTIN(__builtin_ia32_xsaveopt, "vv*UOi", "n", "xsaveopt")
TARGET_BUILTIN(__builtin_ia32_xrstors, "vv*UOi", "n", "xsaves")
TARGET_BUILTIN(__builtin_ia32_xsavec, "vv*UOi", "n", "xsavec")
@@ -777,22 +732,22 @@ TARGET_BUILTIN(__builtin_ia32_vfmaddpd256, "V4dV4dV4dV4d", "ncV:256:", "fma|fma4
TARGET_BUILTIN(__builtin_ia32_vfmaddsubps256, "V8fV8fV8fV8f", "ncV:256:", "fma|fma4")
TARGET_BUILTIN(__builtin_ia32_vfmaddsubpd256, "V4dV4dV4dV4d", "ncV:256:", "fma|fma4")
-TARGET_BUILTIN(__builtin_ia32_vfmaddpd512_mask, "V8dV8dV8dV8dUcIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_vfmaddpd512_maskz, "V8dV8dV8dV8dUcIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_vfmaddpd512_mask3, "V8dV8dV8dV8dUcIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_vfmsubpd512_mask3, "V8dV8dV8dV8dUcIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_vfmaddps512_mask, "V16fV16fV16fV16fUsIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_vfmaddps512_maskz, "V16fV16fV16fV16fUsIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_vfmaddps512_mask3, "V16fV16fV16fV16fUsIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_vfmsubps512_mask3, "V16fV16fV16fV16fUsIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_vfmaddsubpd512_mask, "V8dV8dV8dV8dUcIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_vfmaddsubpd512_maskz, "V8dV8dV8dV8dUcIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_vfmaddsubpd512_mask3, "V8dV8dV8dV8dUcIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_vfmsubaddpd512_mask3, "V8dV8dV8dV8dUcIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_vfmaddsubps512_mask, "V16fV16fV16fV16fUsIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_vfmaddsubps512_maskz, "V16fV16fV16fV16fUsIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_vfmaddsubps512_mask3, "V16fV16fV16fV16fUsIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_vfmsubaddps512_mask3, "V16fV16fV16fV16fUsIi", "ncV:512:", "avx512f")
+TARGET_BUILTIN(__builtin_ia32_vfmaddpd512_mask, "V8dV8dV8dV8dUcIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_vfmaddpd512_maskz, "V8dV8dV8dV8dUcIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_vfmaddpd512_mask3, "V8dV8dV8dV8dUcIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_vfmsubpd512_mask3, "V8dV8dV8dV8dUcIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_vfmaddps512_mask, "V16fV16fV16fV16fUsIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_vfmaddps512_maskz, "V16fV16fV16fV16fUsIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_vfmaddps512_mask3, "V16fV16fV16fV16fUsIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_vfmsubps512_mask3, "V16fV16fV16fV16fUsIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_vfmaddsubpd512_mask, "V8dV8dV8dV8dUcIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_vfmaddsubpd512_maskz, "V8dV8dV8dV8dUcIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_vfmaddsubpd512_mask3, "V8dV8dV8dV8dUcIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_vfmsubaddpd512_mask3, "V8dV8dV8dV8dUcIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_vfmaddsubps512_mask, "V16fV16fV16fV16fUsIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_vfmaddsubps512_maskz, "V16fV16fV16fV16fUsIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_vfmaddsubps512_mask3, "V16fV16fV16fV16fUsIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_vfmsubaddps512_mask3, "V16fV16fV16fV16fUsIi", "ncV:512:", "avx512f,evex512")
// XOP
TARGET_BUILTIN(__builtin_ia32_vpmacssww, "V8sV8sV8sV8s", "ncV:128:", "xop")
@@ -870,108 +825,114 @@ BUILTIN(__rdtsc, "UOi", "")
BUILTIN(__builtin_ia32_rdtscp, "UOiUi*", "")
TARGET_BUILTIN(__builtin_ia32_rdpid, "Ui", "n", "rdpid")
+TARGET_BUILTIN(__builtin_ia32_rdpru, "ULLii", "n", "rdpru")
// PKU
TARGET_BUILTIN(__builtin_ia32_rdpkru, "Ui", "n", "pku")
TARGET_BUILTIN(__builtin_ia32_wrpkru, "vUi", "n", "pku")
// AVX-512
-TARGET_BUILTIN(__builtin_ia32_sqrtpd512, "V8dV8dIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_sqrtps512, "V16fV16fIi", "ncV:512:", "avx512f")
+TARGET_BUILTIN(__builtin_ia32_sqrtpd512, "V8dV8dIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_sqrtps512, "V16fV16fIi", "ncV:512:", "avx512f,evex512")
TARGET_BUILTIN(__builtin_ia32_rsqrt14sd_mask, "V2dV2dV2dV2dUc", "ncV:128:", "avx512f")
TARGET_BUILTIN(__builtin_ia32_rsqrt14ss_mask, "V4fV4fV4fV4fUc", "ncV:128:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_rsqrt14pd512_mask, "V8dV8dV8dUc", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_rsqrt14ps512_mask, "V16fV16fV16fUs", "ncV:512:", "avx512f")
+TARGET_BUILTIN(__builtin_ia32_rsqrt14pd512_mask, "V8dV8dV8dUc", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_rsqrt14ps512_mask, "V16fV16fV16fUs", "ncV:512:", "avx512f,evex512")
TARGET_BUILTIN(__builtin_ia32_rsqrt28sd_round_mask, "V2dV2dV2dV2dUcIi", "ncV:128:", "avx512er")
TARGET_BUILTIN(__builtin_ia32_rsqrt28ss_round_mask, "V4fV4fV4fV4fUcIi", "ncV:128:", "avx512er")
-TARGET_BUILTIN(__builtin_ia32_rsqrt28pd_mask, "V8dV8dV8dUcIi", "ncV:512:", "avx512er")
-TARGET_BUILTIN(__builtin_ia32_rsqrt28ps_mask, "V16fV16fV16fUsIi", "ncV:512:", "avx512er")
+TARGET_BUILTIN(__builtin_ia32_rsqrt28pd_mask, "V8dV8dV8dUcIi", "ncV:512:", "avx512er,evex512")
+TARGET_BUILTIN(__builtin_ia32_rsqrt28ps_mask, "V16fV16fV16fUsIi", "ncV:512:", "avx512er,evex512")
TARGET_BUILTIN(__builtin_ia32_rcp14sd_mask, "V2dV2dV2dV2dUc", "ncV:128:", "avx512f")
TARGET_BUILTIN(__builtin_ia32_rcp14ss_mask, "V4fV4fV4fV4fUc", "ncV:128:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_rcp14pd512_mask, "V8dV8dV8dUc", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_rcp14ps512_mask, "V16fV16fV16fUs", "ncV:512:", "avx512f")
+TARGET_BUILTIN(__builtin_ia32_rcp14pd512_mask, "V8dV8dV8dUc", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_rcp14ps512_mask, "V16fV16fV16fUs", "ncV:512:", "avx512f,evex512")
TARGET_BUILTIN(__builtin_ia32_rcp28sd_round_mask, "V2dV2dV2dV2dUcIi", "ncV:128:", "avx512er")
TARGET_BUILTIN(__builtin_ia32_rcp28ss_round_mask, "V4fV4fV4fV4fUcIi", "ncV:128:", "avx512er")
-TARGET_BUILTIN(__builtin_ia32_rcp28pd_mask, "V8dV8dV8dUcIi", "ncV:512:", "avx512er")
-TARGET_BUILTIN(__builtin_ia32_rcp28ps_mask, "V16fV16fV16fUsIi", "ncV:512:", "avx512er")
-TARGET_BUILTIN(__builtin_ia32_exp2pd_mask, "V8dV8dV8dUcIi", "ncV:512:", "avx512er")
-TARGET_BUILTIN(__builtin_ia32_exp2ps_mask, "V16fV16fV16fUsIi", "ncV:512:", "avx512er")
+TARGET_BUILTIN(__builtin_ia32_rcp28pd_mask, "V8dV8dV8dUcIi", "ncV:512:", "avx512er,evex512")
+TARGET_BUILTIN(__builtin_ia32_rcp28ps_mask, "V16fV16fV16fUsIi", "ncV:512:", "avx512er,evex512")
+TARGET_BUILTIN(__builtin_ia32_exp2pd_mask, "V8dV8dV8dUcIi", "ncV:512:", "avx512er,evex512")
+TARGET_BUILTIN(__builtin_ia32_exp2ps_mask, "V16fV16fV16fUsIi", "ncV:512:", "avx512er,evex512")
-TARGET_BUILTIN(__builtin_ia32_cvttps2dq512_mask, "V16iV16fV16iUsIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_cvttps2udq512_mask, "V16iV16fV16iUsIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_cvttpd2dq512_mask, "V8iV8dV8iUcIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_cvttpd2udq512_mask, "V8iV8dV8iUcIi", "ncV:512:", "avx512f")
+TARGET_BUILTIN(__builtin_ia32_cvttps2dq512_mask, "V16iV16fV16iUsIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_cvttps2udq512_mask, "V16iV16fV16iUsIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_cvttpd2dq512_mask, "V8iV8dV8iUcIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_cvttpd2udq512_mask, "V8iV8dV8iUcIi", "ncV:512:", "avx512f,evex512")
-TARGET_BUILTIN(__builtin_ia32_cmpps512_mask, "UsV16fV16fIiUsIi", "ncV:512:", "avx512f")
+TARGET_BUILTIN(__builtin_ia32_cmpps512_mask, "UsV16fV16fIiUsIi", "ncV:512:", "avx512f,evex512")
TARGET_BUILTIN(__builtin_ia32_cmpps256_mask, "UcV8fV8fIiUc", "ncV:256:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_cmpps128_mask, "UcV4fV4fIiUc", "ncV:128:", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_cmppd512_mask, "UcV8dV8dIiUcIi", "ncV:512:", "avx512f")
+TARGET_BUILTIN(__builtin_ia32_cmppd512_mask, "UcV8dV8dIiUcIi", "ncV:512:", "avx512f,evex512")
TARGET_BUILTIN(__builtin_ia32_cmppd256_mask, "UcV4dV4dIiUc", "ncV:256:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_cmppd128_mask, "UcV2dV2dIiUc", "ncV:128:", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_rndscaleps_mask, "V16fV16fIiV16fUsIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_rndscalepd_mask, "V8dV8dIiV8dUcIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_cvtps2dq512_mask, "V16iV16fV16iUsIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_cvtpd2dq512_mask, "V8iV8dV8iUcIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_cvtps2udq512_mask, "V16iV16fV16iUsIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_cvtpd2udq512_mask, "V8iV8dV8iUcIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_minps512, "V16fV16fV16fIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_minpd512, "V8dV8dV8dIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_maxps512, "V16fV16fV16fIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_maxpd512, "V8dV8dV8dIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_cvtdq2ps512_mask, "V16fV16iV16fUsIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_cvtudq2ps512_mask, "V16fV16iV16fUsIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_cvtpd2ps512_mask, "V8fV8dV8fUcIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_vcvtps2ph512_mask, "V16sV16fIiV16sUs", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_vcvtph2ps512_mask, "V16fV16sV16fUsIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pabsd512, "V16iV16i", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pabsq512, "V8OiV8Oi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pmaxsd512, "V16iV16iV16i", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pmaxsq512, "V8OiV8OiV8Oi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pmaxud512, "V16iV16iV16i", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pmaxuq512, "V8OiV8OiV8Oi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pminsd512, "V16iV16iV16i", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pminsq512, "V8OiV8OiV8Oi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pminud512, "V16iV16iV16i", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pminuq512, "V8OiV8OiV8Oi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pmuldq512, "V8OiV16iV16i", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pmuludq512, "V8OiV16iV16i", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_loaddqusi512_mask, "V16iiC*V16iUs", "nV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_loaddqudi512_mask, "V8OiOiC*V8OiUc", "nV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_loadups512_mask, "V16ffC*V16fUs", "nV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_loadaps512_mask, "V16fV16fC*V16fUs", "nV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_loadupd512_mask, "V8ddC*V8dUc", "nV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_loadapd512_mask, "V8dV8dC*V8dUc", "nV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_storedqudi512_mask, "vOi*V8OiUc", "nV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_storedqusi512_mask, "vi*V16iUs", "nV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_storeupd512_mask, "vd*V8dUc", "nV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_storeapd512_mask, "vV8d*V8dUc", "nV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_storeups512_mask, "vf*V16fUs", "nV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_storeaps512_mask, "vV16f*V16fUs", "nV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_alignq512, "V8OiV8OiV8OiIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_alignd512, "V16iV16iV16iIi", "ncV:512:", "avx512f")
+TARGET_BUILTIN(__builtin_ia32_rndscaleps_mask, "V16fV16fIiV16fUsIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_rndscalepd_mask, "V8dV8dIiV8dUcIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_cvtps2dq512_mask, "V16iV16fV16iUsIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_cvtpd2dq512_mask, "V8iV8dV8iUcIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_cvtps2udq512_mask, "V16iV16fV16iUsIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_cvtpd2udq512_mask, "V8iV8dV8iUcIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_minps512, "V16fV16fV16fIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_minpd512, "V8dV8dV8dIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_maxps512, "V16fV16fV16fIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_maxpd512, "V8dV8dV8dIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_cvtdq2ps512_mask, "V16fV16iV16fUsIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_cvtudq2ps512_mask, "V16fV16iV16fUsIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_cvtpd2ps512_mask, "V8fV8dV8fUcIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_vcvtps2ph512_mask, "V16sV16fIiV16sUs", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_vcvtph2ps512_mask, "V16fV16sV16fUsIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_pmuldq512, "V8OiV16iV16i", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_pmuludq512, "V8OiV16iV16i", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_loaddqusi512_mask, "V16iiC*V16iUs", "nV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_loaddqudi512_mask, "V8OiOiC*V8OiUc", "nV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_loadups512_mask, "V16ffC*V16fUs", "nV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_loadaps512_mask, "V16fV16fC*V16fUs", "nV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_loadupd512_mask, "V8ddC*V8dUc", "nV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_loadapd512_mask, "V8dV8dC*V8dUc", "nV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_storedqudi512_mask, "vOi*V8OiUc", "nV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_storedqusi512_mask, "vi*V16iUs", "nV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_storeupd512_mask, "vd*V8dUc", "nV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_storeapd512_mask, "vV8d*V8dUc", "nV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_storeups512_mask, "vf*V16fUs", "nV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_storeaps512_mask, "vV16f*V16fUs", "nV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_alignq512, "V8OiV8OiV8OiIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_alignd512, "V16iV16iV16iIi", "ncV:512:", "avx512f,evex512")
TARGET_BUILTIN(__builtin_ia32_alignd128, "V4iV4iV4iIi", "ncV:128:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_alignd256, "V8iV8iV8iIi", "ncV:256:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_alignq128, "V2OiV2OiV2OiIi", "ncV:128:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_alignq256, "V4OiV4OiV4OiIi", "ncV:256:", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_extractf64x4_mask, "V4dV8dIiV4dUc", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_extractf32x4_mask, "V4fV16fIiV4fUc", "ncV:512:", "avx512f")
+TARGET_BUILTIN(__builtin_ia32_extractf64x4_mask, "V4dV8dIiV4dUc", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_extractf32x4_mask, "V4fV16fIiV4fUc", "ncV:512:", "avx512f,evex512")
+// AVX-VNNI and AVX512-VNNI
TARGET_BUILTIN(__builtin_ia32_vpdpbusd128, "V4iV4iV4iV4i", "ncV:128:", "avx512vl,avx512vnni|avxvnni")
TARGET_BUILTIN(__builtin_ia32_vpdpbusd256, "V8iV8iV8iV8i", "ncV:256:", "avx512vl,avx512vnni|avxvnni")
-TARGET_BUILTIN(__builtin_ia32_vpdpbusd512, "V16iV16iV16iV16i", "ncV:512:", "avx512vnni")
+TARGET_BUILTIN(__builtin_ia32_vpdpbusd512, "V16iV16iV16iV16i", "ncV:512:", "avx512vnni,evex512")
TARGET_BUILTIN(__builtin_ia32_vpdpbusds128, "V4iV4iV4iV4i", "ncV:128:", "avx512vl,avx512vnni|avxvnni")
TARGET_BUILTIN(__builtin_ia32_vpdpbusds256, "V8iV8iV8iV8i", "ncV:256:", "avx512vl,avx512vnni|avxvnni")
-TARGET_BUILTIN(__builtin_ia32_vpdpbusds512, "V16iV16iV16iV16i", "ncV:512:", "avx512vnni")
+TARGET_BUILTIN(__builtin_ia32_vpdpbusds512, "V16iV16iV16iV16i", "ncV:512:", "avx512vnni,evex512")
TARGET_BUILTIN(__builtin_ia32_vpdpwssd128, "V4iV4iV4iV4i", "ncV:128:", "avx512vl,avx512vnni|avxvnni")
TARGET_BUILTIN(__builtin_ia32_vpdpwssd256, "V8iV8iV8iV8i", "ncV:256:", "avx512vl,avx512vnni|avxvnni")
-TARGET_BUILTIN(__builtin_ia32_vpdpwssd512, "V16iV16iV16iV16i", "ncV:512:", "avx512vnni")
+TARGET_BUILTIN(__builtin_ia32_vpdpwssd512, "V16iV16iV16iV16i", "ncV:512:", "avx512vnni,evex512")
TARGET_BUILTIN(__builtin_ia32_vpdpwssds128, "V4iV4iV4iV4i", "ncV:128:", "avx512vl,avx512vnni|avxvnni")
TARGET_BUILTIN(__builtin_ia32_vpdpwssds256, "V8iV8iV8iV8i", "ncV:256:", "avx512vl,avx512vnni|avxvnni")
-TARGET_BUILTIN(__builtin_ia32_vpdpwssds512, "V16iV16iV16iV16i", "ncV:512:", "avx512vnni")
+TARGET_BUILTIN(__builtin_ia32_vpdpwssds512, "V16iV16iV16iV16i", "ncV:512:", "avx512vnni,evex512")
+
+// AVX-VNNI-INT8
+TARGET_BUILTIN(__builtin_ia32_vpdpbssd128, "V4iV4iV4iV4i", "ncV:128:", "avxvnniint8")
+TARGET_BUILTIN(__builtin_ia32_vpdpbssd256, "V8iV8iV8iV8i", "ncV:256:", "avxvnniint8")
+TARGET_BUILTIN(__builtin_ia32_vpdpbssds128, "V4iV4iV4iV4i", "ncV:128:", "avxvnniint8")
+TARGET_BUILTIN(__builtin_ia32_vpdpbssds256, "V8iV8iV8iV8i", "ncV:256:", "avxvnniint8")
+TARGET_BUILTIN(__builtin_ia32_vpdpbsud128, "V4iV4iV4iV4i", "ncV:128:", "avxvnniint8")
+TARGET_BUILTIN(__builtin_ia32_vpdpbsud256, "V8iV8iV8iV8i", "ncV:256:", "avxvnniint8")
+TARGET_BUILTIN(__builtin_ia32_vpdpbsuds128, "V4iV4iV4iV4i", "ncV:128:", "avxvnniint8")
+TARGET_BUILTIN(__builtin_ia32_vpdpbsuds256, "V8iV8iV8iV8i", "ncV:256:", "avxvnniint8")
+TARGET_BUILTIN(__builtin_ia32_vpdpbuud128, "V4iV4iV4iV4i", "ncV:128:", "avxvnniint8")
+TARGET_BUILTIN(__builtin_ia32_vpdpbuud256, "V8iV8iV8iV8i", "ncV:256:", "avxvnniint8")
+TARGET_BUILTIN(__builtin_ia32_vpdpbuuds128, "V4iV4iV4iV4i", "ncV:128:", "avxvnniint8")
+TARGET_BUILTIN(__builtin_ia32_vpdpbuuds256, "V8iV8iV8iV8i", "ncV:256:", "avxvnniint8")
TARGET_BUILTIN(__builtin_ia32_gather3div2df, "V2dV2dvC*V2OiUcIi", "nV:128:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_gather3div2di, "V2OiV2OivC*V2OiUcIi", "nV:128:", "avx512vl")
@@ -989,31 +950,31 @@ TARGET_BUILTIN(__builtin_ia32_gather3siv4sf, "V4fV4fvC*V4iUcIi", "nV:128:", "avx
TARGET_BUILTIN(__builtin_ia32_gather3siv4si, "V4iV4ivC*V4iUcIi", "nV:128:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_gather3siv8sf, "V8fV8fvC*V8iUcIi", "nV:256:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_gather3siv8si, "V8iV8ivC*V8iUcIi", "nV:256:", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_gathersiv8df, "V8dV8dvC*V8iUcIi", "nV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_gathersiv16sf, "V16fV16fvC*V16iUsIi", "nV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_gatherdiv8df, "V8dV8dvC*V8OiUcIi", "nV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_gatherdiv16sf, "V8fV8fvC*V8OiUcIi", "nV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_gathersiv8di, "V8OiV8OivC*V8iUcIi", "nV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_gathersiv16si, "V16iV16ivC*V16iUsIi", "nV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_gatherdiv8di, "V8OiV8OivC*V8OiUcIi", "nV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_gatherdiv16si, "V8iV8ivC*V8OiUcIi", "nV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_scattersiv8df, "vv*UcV8iV8dIi", "nV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_scattersiv16sf, "vv*UsV16iV16fIi", "nV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_scatterdiv8df, "vv*UcV8OiV8dIi", "nV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_scatterdiv16sf, "vv*UcV8OiV8fIi", "nV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_scattersiv8di, "vv*UcV8iV8OiIi", "nV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_scattersiv16si, "vv*UsV16iV16iIi", "nV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_scatterdiv8di, "vv*UcV8OiV8OiIi", "nV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_scatterdiv16si, "vv*UcV8OiV8iIi", "nV:512:", "avx512f")
-
-TARGET_BUILTIN(__builtin_ia32_gatherpfdpd, "vUcV8ivC*IiIi", "nV:512:", "avx512pf")
-TARGET_BUILTIN(__builtin_ia32_gatherpfdps, "vUsV16ivC*IiIi", "nV:512:", "avx512pf")
-TARGET_BUILTIN(__builtin_ia32_gatherpfqpd, "vUcV8OivC*IiIi", "nV:512:", "avx512pf")
-TARGET_BUILTIN(__builtin_ia32_gatherpfqps, "vUcV8OivC*IiIi", "nV:512:", "avx512pf")
-TARGET_BUILTIN(__builtin_ia32_scatterpfdpd, "vUcV8iv*IiIi", "nV:512:", "avx512pf")
-TARGET_BUILTIN(__builtin_ia32_scatterpfdps, "vUsV16iv*IiIi", "nV:512:", "avx512pf")
-TARGET_BUILTIN(__builtin_ia32_scatterpfqpd, "vUcV8Oiv*IiIi", "nV:512:", "avx512pf")
-TARGET_BUILTIN(__builtin_ia32_scatterpfqps, "vUcV8Oiv*IiIi", "nV:512:", "avx512pf")
+TARGET_BUILTIN(__builtin_ia32_gathersiv8df, "V8dV8dvC*V8iUcIi", "nV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_gathersiv16sf, "V16fV16fvC*V16iUsIi", "nV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_gatherdiv8df, "V8dV8dvC*V8OiUcIi", "nV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_gatherdiv16sf, "V8fV8fvC*V8OiUcIi", "nV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_gathersiv8di, "V8OiV8OivC*V8iUcIi", "nV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_gathersiv16si, "V16iV16ivC*V16iUsIi", "nV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_gatherdiv8di, "V8OiV8OivC*V8OiUcIi", "nV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_gatherdiv16si, "V8iV8ivC*V8OiUcIi", "nV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_scattersiv8df, "vv*UcV8iV8dIi", "nV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_scattersiv16sf, "vv*UsV16iV16fIi", "nV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_scatterdiv8df, "vv*UcV8OiV8dIi", "nV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_scatterdiv16sf, "vv*UcV8OiV8fIi", "nV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_scattersiv8di, "vv*UcV8iV8OiIi", "nV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_scattersiv16si, "vv*UsV16iV16iIi", "nV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_scatterdiv8di, "vv*UcV8OiV8OiIi", "nV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_scatterdiv16si, "vv*UcV8OiV8iIi", "nV:512:", "avx512f,evex512")
+
+TARGET_BUILTIN(__builtin_ia32_gatherpfdpd, "vUcV8ivC*IiIi", "nV:512:", "avx512pf,evex512")
+TARGET_BUILTIN(__builtin_ia32_gatherpfdps, "vUsV16ivC*IiIi", "nV:512:", "avx512pf,evex512")
+TARGET_BUILTIN(__builtin_ia32_gatherpfqpd, "vUcV8OivC*IiIi", "nV:512:", "avx512pf,evex512")
+TARGET_BUILTIN(__builtin_ia32_gatherpfqps, "vUcV8OivC*IiIi", "nV:512:", "avx512pf,evex512")
+TARGET_BUILTIN(__builtin_ia32_scatterpfdpd, "vUcV8iv*IiIi", "nV:512:", "avx512pf,evex512")
+TARGET_BUILTIN(__builtin_ia32_scatterpfdps, "vUsV16iv*IiIi", "nV:512:", "avx512pf,evex512")
+TARGET_BUILTIN(__builtin_ia32_scatterpfqpd, "vUcV8Oiv*IiIi", "nV:512:", "avx512pf,evex512")
+TARGET_BUILTIN(__builtin_ia32_scatterpfqps, "vUcV8Oiv*IiIi", "nV:512:", "avx512pf,evex512")
TARGET_BUILTIN(__builtin_ia32_knotqi, "UcUc", "nc", "avx512dq")
TARGET_BUILTIN(__builtin_ia32_knothi, "UsUs", "nc", "avx512f")
@@ -1028,10 +989,10 @@ TARGET_BUILTIN(__builtin_ia32_cmpb256_mask, "UiV32cV32cIiUi", "ncV:256:", "avx51
TARGET_BUILTIN(__builtin_ia32_cmpd256_mask, "UcV8iV8iIiUc", "ncV:256:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_cmpq256_mask, "UcV4OiV4OiIiUc", "ncV:256:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_cmpw256_mask, "UsV16sV16sIiUs", "ncV:256:", "avx512vl,avx512bw")
-TARGET_BUILTIN(__builtin_ia32_cmpb512_mask, "UOiV64cV64cIiUOi", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_cmpd512_mask, "UsV16iV16iIiUs", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_cmpq512_mask, "UcV8OiV8OiIiUc", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_cmpw512_mask, "UiV32sV32sIiUi", "ncV:512:", "avx512bw")
+TARGET_BUILTIN(__builtin_ia32_cmpb512_mask, "UOiV64cV64cIiUOi", "ncV:512:", "avx512bw,evex512")
+TARGET_BUILTIN(__builtin_ia32_cmpd512_mask, "UsV16iV16iIiUs", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_cmpq512_mask, "UcV8OiV8OiIiUc", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_cmpw512_mask, "UiV32sV32sIiUi", "ncV:512:", "avx512bw,evex512")
TARGET_BUILTIN(__builtin_ia32_ucmpb128_mask, "UsV16cV16cIiUs", "ncV:128:", "avx512vl,avx512bw")
TARGET_BUILTIN(__builtin_ia32_ucmpd128_mask, "UcV4iV4iIiUc", "ncV:128:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_ucmpq128_mask, "UcV2OiV2OiIiUc", "ncV:128:", "avx512vl")
@@ -1040,79 +1001,61 @@ TARGET_BUILTIN(__builtin_ia32_ucmpb256_mask, "UiV32cV32cIiUi", "ncV:256:", "avx5
TARGET_BUILTIN(__builtin_ia32_ucmpd256_mask, "UcV8iV8iIiUc", "ncV:256:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_ucmpq256_mask, "UcV4OiV4OiIiUc", "ncV:256:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_ucmpw256_mask, "UsV16sV16sIiUs", "ncV:256:", "avx512vl,avx512bw")
-TARGET_BUILTIN(__builtin_ia32_ucmpb512_mask, "UOiV64cV64cIiUOi", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_ucmpd512_mask, "UsV16iV16iIiUs", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_ucmpq512_mask, "UcV8OiV8OiIiUc", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_ucmpw512_mask, "UiV32sV32sIiUi", "ncV:512:", "avx512bw")
-
-TARGET_BUILTIN(__builtin_ia32_pabsb512, "V64cV64c", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_pabsw512, "V32sV32s", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_packssdw512, "V32sV16iV16i", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_packsswb512, "V64cV32sV32s", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_packusdw512, "V32sV16iV16i", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_packuswb512, "V64cV32sV32s", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_paddsb512, "V64cV64cV64c", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_paddsw512, "V32sV32sV32s", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_paddusb512, "V64cV64cV64c", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_paddusw512, "V32sV32sV32s", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_pavgb512, "V64cV64cV64c", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_pavgw512, "V32sV32sV32s", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_pmaxsb512, "V64cV64cV64c", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_pmaxsw512, "V32sV32sV32s", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_pmaxub512, "V64cV64cV64c", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_pmaxuw512, "V32sV32sV32s", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_pminsb512, "V64cV64cV64c", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_pminsw512, "V32sV32sV32s", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_pminub512, "V64cV64cV64c", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_pminuw512, "V32sV32sV32s", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_pshufb512, "V64cV64cV64c", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_psubsb512, "V64cV64cV64c", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_psubsw512, "V32sV32sV32s", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_psubusb512, "V64cV64cV64c", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_psubusw512, "V32sV32sV32s", "ncV:512:", "avx512bw")
+TARGET_BUILTIN(__builtin_ia32_ucmpb512_mask, "UOiV64cV64cIiUOi", "ncV:512:", "avx512bw,evex512")
+TARGET_BUILTIN(__builtin_ia32_ucmpd512_mask, "UsV16iV16iIiUs", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_ucmpq512_mask, "UcV8OiV8OiIiUc", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_ucmpw512_mask, "UiV32sV32sIiUi", "ncV:512:", "avx512bw,evex512")
+
+TARGET_BUILTIN(__builtin_ia32_packssdw512, "V32sV16iV16i", "ncV:512:", "avx512bw,evex512")
+TARGET_BUILTIN(__builtin_ia32_packsswb512, "V64cV32sV32s", "ncV:512:", "avx512bw,evex512")
+TARGET_BUILTIN(__builtin_ia32_packusdw512, "V32sV16iV16i", "ncV:512:", "avx512bw,evex512")
+TARGET_BUILTIN(__builtin_ia32_packuswb512, "V64cV32sV32s", "ncV:512:", "avx512bw,evex512")
+TARGET_BUILTIN(__builtin_ia32_pavgb512, "V64cV64cV64c", "ncV:512:", "avx512bw,evex512")
+TARGET_BUILTIN(__builtin_ia32_pavgw512, "V32sV32sV32s", "ncV:512:", "avx512bw,evex512")
+TARGET_BUILTIN(__builtin_ia32_pshufb512, "V64cV64cV64c", "ncV:512:", "avx512bw,evex512")
TARGET_BUILTIN(__builtin_ia32_vpconflictdi_128, "V2OiV2Oi", "ncV:128:", "avx512cd,avx512vl")
TARGET_BUILTIN(__builtin_ia32_vpconflictdi_256, "V4OiV4Oi", "ncV:256:", "avx512cd,avx512vl")
TARGET_BUILTIN(__builtin_ia32_vpconflictsi_128, "V4iV4i", "ncV:128:", "avx512cd,avx512vl")
TARGET_BUILTIN(__builtin_ia32_vpconflictsi_256, "V8iV8i", "ncV:256:", "avx512cd,avx512vl")
-TARGET_BUILTIN(__builtin_ia32_vpconflictdi_512, "V8OiV8Oi", "ncV:512:", "avx512cd")
-TARGET_BUILTIN(__builtin_ia32_vpconflictsi_512, "V16iV16i", "ncV:512:", "avx512cd")
-TARGET_BUILTIN(__builtin_ia32_vplzcntd_512, "V16iV16i", "ncV:512:", "avx512cd")
-TARGET_BUILTIN(__builtin_ia32_vplzcntq_512, "V8OiV8Oi", "ncV:512:", "avx512cd")
+TARGET_BUILTIN(__builtin_ia32_vpconflictdi_512, "V8OiV8Oi", "ncV:512:", "avx512cd,evex512")
+TARGET_BUILTIN(__builtin_ia32_vpconflictsi_512, "V16iV16i", "ncV:512:", "avx512cd,evex512")
+TARGET_BUILTIN(__builtin_ia32_vplzcntd_512, "V16iV16i", "ncV:512:", "avx512cd,evex512")
+TARGET_BUILTIN(__builtin_ia32_vplzcntq_512, "V8OiV8Oi", "ncV:512:", "avx512cd,evex512")
TARGET_BUILTIN(__builtin_ia32_vpopcntd_128, "V4iV4i", "ncV:128:", "avx512vpopcntdq,avx512vl")
TARGET_BUILTIN(__builtin_ia32_vpopcntq_128, "V2OiV2Oi", "ncV:128:", "avx512vpopcntdq,avx512vl")
TARGET_BUILTIN(__builtin_ia32_vpopcntd_256, "V8iV8i", "ncV:256:", "avx512vpopcntdq,avx512vl")
TARGET_BUILTIN(__builtin_ia32_vpopcntq_256, "V4OiV4Oi", "ncV:256:", "avx512vpopcntdq,avx512vl")
-TARGET_BUILTIN(__builtin_ia32_vpopcntd_512, "V16iV16i", "ncV:512:", "avx512vpopcntdq")
-TARGET_BUILTIN(__builtin_ia32_vpopcntq_512, "V8OiV8Oi", "ncV:512:", "avx512vpopcntdq")
+TARGET_BUILTIN(__builtin_ia32_vpopcntd_512, "V16iV16i", "ncV:512:", "avx512vpopcntdq,evex512")
+TARGET_BUILTIN(__builtin_ia32_vpopcntq_512, "V8OiV8Oi", "ncV:512:", "avx512vpopcntdq,evex512")
TARGET_BUILTIN(__builtin_ia32_vpopcntb_128, "V16cV16c", "ncV:128:", "avx512vl,avx512bitalg")
TARGET_BUILTIN(__builtin_ia32_vpopcntw_128, "V8sV8s", "ncV:128:", "avx512vl,avx512bitalg")
TARGET_BUILTIN(__builtin_ia32_vpopcntb_256, "V32cV32c", "ncV:256:", "avx512vl,avx512bitalg")
TARGET_BUILTIN(__builtin_ia32_vpopcntw_256, "V16sV16s", "ncV:256:", "avx512vl,avx512bitalg")
-TARGET_BUILTIN(__builtin_ia32_vpopcntb_512, "V64cV64c", "ncV:512:", "avx512bitalg")
-TARGET_BUILTIN(__builtin_ia32_vpopcntw_512, "V32sV32s", "ncV:512:", "avx512bitalg")
+TARGET_BUILTIN(__builtin_ia32_vpopcntb_512, "V64cV64c", "ncV:512:", "avx512bitalg,evex512")
+TARGET_BUILTIN(__builtin_ia32_vpopcntw_512, "V32sV32s", "ncV:512:", "avx512bitalg,evex512")
TARGET_BUILTIN(__builtin_ia32_vpshufbitqmb128_mask, "UsV16cV16cUs", "ncV:128:", "avx512vl,avx512bitalg")
TARGET_BUILTIN(__builtin_ia32_vpshufbitqmb256_mask, "UiV32cV32cUi", "ncV:256:", "avx512vl,avx512bitalg")
-TARGET_BUILTIN(__builtin_ia32_vpshufbitqmb512_mask, "UOiV64cV64cUOi", "ncV:512:", "avx512bitalg")
+TARGET_BUILTIN(__builtin_ia32_vpshufbitqmb512_mask, "UOiV64cV64cUOi", "ncV:512:", "avx512bitalg,evex512")
-TARGET_BUILTIN(__builtin_ia32_pmulhrsw512, "V32sV32sV32s", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_pmulhuw512, "V32sV32sV32s", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_pmulhw512, "V32sV32sV32s", "ncV:512:", "avx512bw")
+TARGET_BUILTIN(__builtin_ia32_pmulhrsw512, "V32sV32sV32s", "ncV:512:", "avx512bw,evex512")
+TARGET_BUILTIN(__builtin_ia32_pmulhuw512, "V32sV32sV32s", "ncV:512:", "avx512bw,evex512")
+TARGET_BUILTIN(__builtin_ia32_pmulhw512, "V32sV32sV32s", "ncV:512:", "avx512bw,evex512")
-TARGET_BUILTIN(__builtin_ia32_addpd512, "V8dV8dV8dIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_addps512, "V16fV16fV16fIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_divpd512, "V8dV8dV8dIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_divps512, "V16fV16fV16fIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_mulpd512, "V8dV8dV8dIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_mulps512, "V16fV16fV16fIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_subpd512, "V8dV8dV8dIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_subps512, "V16fV16fV16fIi", "ncV:512:", "avx512f")
+TARGET_BUILTIN(__builtin_ia32_addpd512, "V8dV8dV8dIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_addps512, "V16fV16fV16fIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_divpd512, "V8dV8dV8dIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_divps512, "V16fV16fV16fIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_mulpd512, "V8dV8dV8dIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_mulps512, "V16fV16fV16fIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_subpd512, "V8dV8dV8dIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_subps512, "V16fV16fV16fIi", "ncV:512:", "avx512f,evex512")
-TARGET_BUILTIN(__builtin_ia32_pmaddubsw512, "V32sV64cV64c", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_pmaddwd512, "V16iV32sV32s", "ncV:512:", "avx512bw")
+TARGET_BUILTIN(__builtin_ia32_pmaddubsw512, "V32sV64cV64c", "ncV:512:", "avx512bw,evex512")
+TARGET_BUILTIN(__builtin_ia32_pmaddwd512, "V16iV32sV32s", "ncV:512:", "avx512bw,evex512")
TARGET_BUILTIN(__builtin_ia32_addss_round_mask, "V4fV4fV4fV4fUcIi", "ncV:128:", "avx512f")
TARGET_BUILTIN(__builtin_ia32_divss_round_mask, "V4fV4fV4fV4fUcIi", "ncV:128:", "avx512f")
@@ -1198,16 +1141,6 @@ TARGET_BUILTIN(__builtin_ia32_getexppd128_mask, "V2dV2dV2dUc", "ncV:128:", "avx5
TARGET_BUILTIN(__builtin_ia32_getexppd256_mask, "V4dV4dV4dUc", "ncV:256:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_getexpps128_mask, "V4fV4fV4fUc", "ncV:128:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_getexpps256_mask, "V8fV8fV8fUc", "ncV:256:", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_pabsq128, "V2OiV2Oi", "ncV:128:", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_pabsq256, "V4OiV4Oi", "ncV:256:", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_pmaxsq128, "V2OiV2OiV2Oi", "ncV:128:", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_pmaxsq256, "V4OiV4OiV4Oi", "ncV:256:", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_pmaxuq128, "V2OiV2OiV2Oi", "ncV:128:", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_pmaxuq256, "V4OiV4OiV4Oi", "ncV:256:", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_pminsq128, "V2OiV2OiV2Oi", "ncV:128:", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_pminsq256, "V4OiV4OiV4Oi", "ncV:256:", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_pminuq128, "V2OiV2OiV2Oi", "ncV:128:", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_pminuq256, "V4OiV4OiV4Oi", "ncV:256:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_rndscalepd_128_mask, "V2dV2dIiV2dUc", "ncV:128:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_rndscalepd_256_mask, "V4dV4dIiV4dUc", "ncV:256:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_rndscaleps_128_mask, "V4fV4fIiV4fUc", "ncV:128:", "avx512vl")
@@ -1236,66 +1169,66 @@ TARGET_BUILTIN(__builtin_ia32_scattersiv8si, "vv*UcV8iV8iIi", "nV:256:", "avx512
TARGET_BUILTIN(__builtin_ia32_vpermi2vard128, "V4iV4iV4iV4i", "ncV:128:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_vpermi2vard256, "V8iV8iV8iV8i", "ncV:256:", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_vpermi2vard512, "V16iV16iV16iV16i", "ncV:512:", "avx512f")
+TARGET_BUILTIN(__builtin_ia32_vpermi2vard512, "V16iV16iV16iV16i", "ncV:512:", "avx512f,evex512")
TARGET_BUILTIN(__builtin_ia32_vpermi2varpd128, "V2dV2dV2OiV2d", "ncV:128:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_vpermi2varpd256, "V4dV4dV4OiV4d", "ncV:256:", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_vpermi2varpd512, "V8dV8dV8OiV8d", "ncV:512:", "avx512f")
+TARGET_BUILTIN(__builtin_ia32_vpermi2varpd512, "V8dV8dV8OiV8d", "ncV:512:", "avx512f,evex512")
TARGET_BUILTIN(__builtin_ia32_vpermi2varps128, "V4fV4fV4iV4f", "ncV:128:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_vpermi2varps256, "V8fV8fV8iV8f", "ncV:256:", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_vpermi2varps512, "V16fV16fV16iV16f", "ncV:512:", "avx512f")
+TARGET_BUILTIN(__builtin_ia32_vpermi2varps512, "V16fV16fV16iV16f", "ncV:512:", "avx512f,evex512")
TARGET_BUILTIN(__builtin_ia32_vpermi2varq128, "V2OiV2OiV2OiV2Oi", "ncV:128:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_vpermi2varq256, "V4OiV4OiV4OiV4Oi", "ncV:256:", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_vpermi2varq512, "V8OiV8OiV8OiV8Oi", "ncV:512:", "avx512f")
+TARGET_BUILTIN(__builtin_ia32_vpermi2varq512, "V8OiV8OiV8OiV8Oi", "ncV:512:", "avx512f,evex512")
TARGET_BUILTIN(__builtin_ia32_vpermi2varqi128, "V16cV16cV16cV16c", "ncV:128:", "avx512vbmi,avx512vl")
TARGET_BUILTIN(__builtin_ia32_vpermi2varqi256, "V32cV32cV32cV32c", "ncV:256:", "avx512vbmi,avx512vl")
-TARGET_BUILTIN(__builtin_ia32_vpermi2varqi512, "V64cV64cV64cV64c", "ncV:512:", "avx512vbmi")
+TARGET_BUILTIN(__builtin_ia32_vpermi2varqi512, "V64cV64cV64cV64c", "ncV:512:", "avx512vbmi,evex512")
TARGET_BUILTIN(__builtin_ia32_vpermi2varhi128, "V8sV8sV8sV8s", "ncV:128:", "avx512vl,avx512bw")
TARGET_BUILTIN(__builtin_ia32_vpermi2varhi256, "V16sV16sV16sV16s", "ncV:256:", "avx512vl,avx512bw")
-TARGET_BUILTIN(__builtin_ia32_vpermi2varhi512, "V32sV32sV32sV32s", "ncV:512:", "avx512bw")
+TARGET_BUILTIN(__builtin_ia32_vpermi2varhi512, "V32sV32sV32sV32s", "ncV:512:", "avx512bw,evex512")
TARGET_BUILTIN(__builtin_ia32_vpshldd128, "V4iV4iV4iIi", "ncV:128:", "avx512vl,avx512vbmi2")
TARGET_BUILTIN(__builtin_ia32_vpshldd256, "V8iV8iV8iIi", "ncV:256:", "avx512vl,avx512vbmi2")
-TARGET_BUILTIN(__builtin_ia32_vpshldd512, "V16iV16iV16iIi", "ncV:512:", "avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshldd512, "V16iV16iV16iIi", "ncV:512:", "avx512vbmi2,evex512")
TARGET_BUILTIN(__builtin_ia32_vpshldq128, "V2OiV2OiV2OiIi", "ncV:128:", "avx512vl,avx512vbmi2")
TARGET_BUILTIN(__builtin_ia32_vpshldq256, "V4OiV4OiV4OiIi", "ncV:256:", "avx512vl,avx512vbmi2")
-TARGET_BUILTIN(__builtin_ia32_vpshldq512, "V8OiV8OiV8OiIi", "ncV:512:", "avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshldq512, "V8OiV8OiV8OiIi", "ncV:512:", "avx512vbmi2,evex512")
TARGET_BUILTIN(__builtin_ia32_vpshldw128, "V8sV8sV8sIi", "ncV:128:", "avx512vl,avx512vbmi2")
TARGET_BUILTIN(__builtin_ia32_vpshldw256, "V16sV16sV16sIi", "ncV:256:", "avx512vl,avx512vbmi2")
-TARGET_BUILTIN(__builtin_ia32_vpshldw512, "V32sV32sV32sIi", "ncV:512:", "avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshldw512, "V32sV32sV32sIi", "ncV:512:", "avx512vbmi2,evex512")
TARGET_BUILTIN(__builtin_ia32_vpshldvd128, "V4iV4iV4iV4i", "ncV:128:", "avx512vl,avx512vbmi2")
TARGET_BUILTIN(__builtin_ia32_vpshldvd256, "V8iV8iV8iV8i", "ncV:256:", "avx512vl,avx512vbmi2")
-TARGET_BUILTIN(__builtin_ia32_vpshldvd512, "V16iV16iV16iV16i", "ncV:512:", "avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshldvd512, "V16iV16iV16iV16i", "ncV:512:", "avx512vbmi2,evex512")
TARGET_BUILTIN(__builtin_ia32_vpshldvq128, "V2OiV2OiV2OiV2Oi", "ncV:128:", "avx512vl,avx512vbmi2")
TARGET_BUILTIN(__builtin_ia32_vpshldvq256, "V4OiV4OiV4OiV4Oi", "ncV:256:", "avx512vl,avx512vbmi2")
-TARGET_BUILTIN(__builtin_ia32_vpshldvq512, "V8OiV8OiV8OiV8Oi", "ncV:512:", "avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshldvq512, "V8OiV8OiV8OiV8Oi", "ncV:512:", "avx512vbmi2,evex512")
TARGET_BUILTIN(__builtin_ia32_vpshldvw128, "V8sV8sV8sV8s", "ncV:128:", "avx512vl,avx512vbmi2")
TARGET_BUILTIN(__builtin_ia32_vpshldvw256, "V16sV16sV16sV16s", "ncV:256:", "avx512vl,avx512vbmi2")
-TARGET_BUILTIN(__builtin_ia32_vpshldvw512, "V32sV32sV32sV32s", "ncV:512:", "avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshldvw512, "V32sV32sV32sV32s", "ncV:512:", "avx512vbmi2,evex512")
TARGET_BUILTIN(__builtin_ia32_vpshrdvd128, "V4iV4iV4iV4i", "ncV:128:", "avx512vl,avx512vbmi2")
TARGET_BUILTIN(__builtin_ia32_vpshrdvd256, "V8iV8iV8iV8i", "ncV:256:", "avx512vl,avx512vbmi2")
-TARGET_BUILTIN(__builtin_ia32_vpshrdvd512, "V16iV16iV16iV16i", "ncV:512:", "avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshrdvd512, "V16iV16iV16iV16i", "ncV:512:", "avx512vbmi2,evex512")
TARGET_BUILTIN(__builtin_ia32_vpshrdvq128, "V2OiV2OiV2OiV2Oi", "ncV:128:", "avx512vl,avx512vbmi2")
TARGET_BUILTIN(__builtin_ia32_vpshrdvq256, "V4OiV4OiV4OiV4Oi", "ncV:256:", "avx512vl,avx512vbmi2")
-TARGET_BUILTIN(__builtin_ia32_vpshrdvq512, "V8OiV8OiV8OiV8Oi", "ncV:512:", "avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshrdvq512, "V8OiV8OiV8OiV8Oi", "ncV:512:", "avx512vbmi2,evex512")
TARGET_BUILTIN(__builtin_ia32_vpshrdvw128, "V8sV8sV8sV8s", "ncV:128:", "avx512vl,avx512vbmi2")
TARGET_BUILTIN(__builtin_ia32_vpshrdvw256, "V16sV16sV16sV16s", "ncV:256:", "avx512vl,avx512vbmi2")
-TARGET_BUILTIN(__builtin_ia32_vpshrdvw512, "V32sV32sV32sV32s", "ncV:512:", "avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshrdvw512, "V32sV32sV32sV32s", "ncV:512:", "avx512vbmi2,evex512")
TARGET_BUILTIN(__builtin_ia32_vpshrdd128, "V4iV4iV4iIi", "ncV:128:", "avx512vl,avx512vbmi2")
TARGET_BUILTIN(__builtin_ia32_vpshrdd256, "V8iV8iV8iIi", "ncV:256:", "avx512vl,avx512vbmi2")
-TARGET_BUILTIN(__builtin_ia32_vpshrdd512, "V16iV16iV16iIi", "ncV:512:", "avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshrdd512, "V16iV16iV16iIi", "ncV:512:", "avx512vbmi2,evex512")
TARGET_BUILTIN(__builtin_ia32_vpshrdq128, "V2OiV2OiV2OiIi", "ncV:128:", "avx512vl,avx512vbmi2")
TARGET_BUILTIN(__builtin_ia32_vpshrdq256, "V4OiV4OiV4OiIi", "ncV:256:", "avx512vl,avx512vbmi2")
-TARGET_BUILTIN(__builtin_ia32_vpshrdq512, "V8OiV8OiV8OiIi", "ncV:512:", "avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshrdq512, "V8OiV8OiV8OiIi", "ncV:512:", "avx512vbmi2,evex512")
TARGET_BUILTIN(__builtin_ia32_vpshrdw128, "V8sV8sV8sIi", "ncV:128:", "avx512vl,avx512vbmi2")
TARGET_BUILTIN(__builtin_ia32_vpshrdw256, "V16sV16sV16sIi", "ncV:256:", "avx512vl,avx512vbmi2")
-TARGET_BUILTIN(__builtin_ia32_vpshrdw512, "V32sV32sV32sIi", "ncV:512:", "avx512vbmi2")
+TARGET_BUILTIN(__builtin_ia32_vpshrdw512, "V32sV32sV32sIi", "ncV:512:", "avx512vbmi2,evex512")
-TARGET_BUILTIN(__builtin_ia32_pmovswb512_mask, "V32cV32sV32cUi", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_pmovuswb512_mask, "V32cV32sV32cUi", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_pmovwb512_mask, "V32cV32sV32cUi", "ncV:512:", "avx512bw")
+TARGET_BUILTIN(__builtin_ia32_pmovswb512_mask, "V32cV32sV32cUi", "ncV:512:", "avx512bw,evex512")
+TARGET_BUILTIN(__builtin_ia32_pmovuswb512_mask, "V32cV32sV32cUi", "ncV:512:", "avx512bw,evex512")
+TARGET_BUILTIN(__builtin_ia32_pmovwb512_mask, "V32cV32sV32cUi", "ncV:512:", "avx512bw,evex512")
TARGET_BUILTIN(__builtin_ia32_cvtpd2qq128_mask, "V2OiV2dV2OiUc", "ncV:128:", "avx512vl,avx512dq")
TARGET_BUILTIN(__builtin_ia32_cvtpd2qq256_mask, "V4OiV4dV4OiUc", "ncV:256:", "avx512vl,avx512dq")
TARGET_BUILTIN(__builtin_ia32_cvtpd2uqq128_mask, "V2OiV2dV2OiUc", "ncV:128:", "avx512vl,avx512dq")
@@ -1331,32 +1264,32 @@ TARGET_BUILTIN(__builtin_ia32_pmovswb256_mask, "V16cV16sV16cUs", "ncV:256:", "av
TARGET_BUILTIN(__builtin_ia32_pmovuswb128_mask, "V16cV8sV16cUc", "ncV:128:", "avx512vl,avx512bw")
TARGET_BUILTIN(__builtin_ia32_pmovuswb256_mask, "V16cV16sV16cUs", "ncV:256:", "avx512vl,avx512bw")
TARGET_BUILTIN(__builtin_ia32_pmovwb128_mask, "V16cV8sV16cUc", "ncV:128:", "avx512vl,avx512bw")
-TARGET_BUILTIN(__builtin_ia32_cvtpd2qq512_mask, "V8OiV8dV8OiUcIi", "ncV:512:", "avx512dq")
-TARGET_BUILTIN(__builtin_ia32_cvtpd2uqq512_mask, "V8OiV8dV8OiUcIi", "ncV:512:", "avx512dq")
-TARGET_BUILTIN(__builtin_ia32_cvtps2qq512_mask, "V8OiV8fV8OiUcIi", "ncV:512:", "avx512dq")
-TARGET_BUILTIN(__builtin_ia32_cvtps2uqq512_mask, "V8OiV8fV8OiUcIi", "ncV:512:", "avx512dq")
-TARGET_BUILTIN(__builtin_ia32_cvtqq2pd512_mask, "V8dV8OiV8dUcIi", "ncV:512:", "avx512dq")
-TARGET_BUILTIN(__builtin_ia32_cvtqq2ps512_mask, "V8fV8OiV8fUcIi", "ncV:512:", "avx512dq")
-TARGET_BUILTIN(__builtin_ia32_cvttpd2qq512_mask, "V8OiV8dV8OiUcIi", "ncV:512:", "avx512dq")
-TARGET_BUILTIN(__builtin_ia32_cvttpd2uqq512_mask, "V8OiV8dV8OiUcIi", "ncV:512:", "avx512dq")
-TARGET_BUILTIN(__builtin_ia32_cvttps2qq512_mask, "V8OiV8fV8OiUcIi", "ncV:512:", "avx512dq")
-TARGET_BUILTIN(__builtin_ia32_cvttps2uqq512_mask, "V8OiV8fV8OiUcIi", "ncV:512:", "avx512dq")
-TARGET_BUILTIN(__builtin_ia32_cvtuqq2pd512_mask, "V8dV8OiV8dUcIi", "ncV:512:", "avx512dq")
-TARGET_BUILTIN(__builtin_ia32_cvtuqq2ps512_mask, "V8fV8OiV8fUcIi", "ncV:512:", "avx512dq")
-TARGET_BUILTIN(__builtin_ia32_rangepd512_mask, "V8dV8dV8dIiV8dUcIi", "ncV:512:", "avx512dq")
-TARGET_BUILTIN(__builtin_ia32_rangeps512_mask, "V16fV16fV16fIiV16fUsIi", "ncV:512:", "avx512dq")
-TARGET_BUILTIN(__builtin_ia32_reducepd512_mask, "V8dV8dIiV8dUcIi", "ncV:512:", "avx512dq")
-TARGET_BUILTIN(__builtin_ia32_reduceps512_mask, "V16fV16fIiV16fUsIi", "ncV:512:", "avx512dq")
-TARGET_BUILTIN(__builtin_ia32_prold512, "V16iV16iIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_prolq512, "V8OiV8OiIi", "ncV:512:", "avx512f")
+TARGET_BUILTIN(__builtin_ia32_cvtpd2qq512_mask, "V8OiV8dV8OiUcIi", "ncV:512:", "avx512dq,evex512")
+TARGET_BUILTIN(__builtin_ia32_cvtpd2uqq512_mask, "V8OiV8dV8OiUcIi", "ncV:512:", "avx512dq,evex512")
+TARGET_BUILTIN(__builtin_ia32_cvtps2qq512_mask, "V8OiV8fV8OiUcIi", "ncV:512:", "avx512dq,evex512")
+TARGET_BUILTIN(__builtin_ia32_cvtps2uqq512_mask, "V8OiV8fV8OiUcIi", "ncV:512:", "avx512dq,evex512")
+TARGET_BUILTIN(__builtin_ia32_cvtqq2pd512_mask, "V8dV8OiV8dUcIi", "ncV:512:", "avx512dq,evex512")
+TARGET_BUILTIN(__builtin_ia32_cvtqq2ps512_mask, "V8fV8OiV8fUcIi", "ncV:512:", "avx512dq,evex512")
+TARGET_BUILTIN(__builtin_ia32_cvttpd2qq512_mask, "V8OiV8dV8OiUcIi", "ncV:512:", "avx512dq,evex512")
+TARGET_BUILTIN(__builtin_ia32_cvttpd2uqq512_mask, "V8OiV8dV8OiUcIi", "ncV:512:", "avx512dq,evex512")
+TARGET_BUILTIN(__builtin_ia32_cvttps2qq512_mask, "V8OiV8fV8OiUcIi", "ncV:512:", "avx512dq,evex512")
+TARGET_BUILTIN(__builtin_ia32_cvttps2uqq512_mask, "V8OiV8fV8OiUcIi", "ncV:512:", "avx512dq,evex512")
+TARGET_BUILTIN(__builtin_ia32_cvtuqq2pd512_mask, "V8dV8OiV8dUcIi", "ncV:512:", "avx512dq,evex512")
+TARGET_BUILTIN(__builtin_ia32_cvtuqq2ps512_mask, "V8fV8OiV8fUcIi", "ncV:512:", "avx512dq,evex512")
+TARGET_BUILTIN(__builtin_ia32_rangepd512_mask, "V8dV8dV8dIiV8dUcIi", "ncV:512:", "avx512dq,evex512")
+TARGET_BUILTIN(__builtin_ia32_rangeps512_mask, "V16fV16fV16fIiV16fUsIi", "ncV:512:", "avx512dq,evex512")
+TARGET_BUILTIN(__builtin_ia32_reducepd512_mask, "V8dV8dIiV8dUcIi", "ncV:512:", "avx512dq,evex512")
+TARGET_BUILTIN(__builtin_ia32_reduceps512_mask, "V16fV16fIiV16fUsIi", "ncV:512:", "avx512dq,evex512")
+TARGET_BUILTIN(__builtin_ia32_prold512, "V16iV16iIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_prolq512, "V8OiV8OiIi", "ncV:512:", "avx512f,evex512")
TARGET_BUILTIN(__builtin_ia32_prold128, "V4iV4iIi", "ncV:128:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_prold256, "V8iV8iIi", "ncV:256:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_prolq128, "V2OiV2OiIi", "ncV:128:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_prolq256, "V4OiV4OiIi", "ncV:256:", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_prolvd512, "V16iV16iV16i", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_prolvq512, "V8OiV8OiV8Oi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_prord512, "V16iV16iIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_prorq512, "V8OiV8OiIi", "ncV:512:", "avx512f")
+TARGET_BUILTIN(__builtin_ia32_prolvd512, "V16iV16iV16i", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_prolvq512, "V8OiV8OiV8Oi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_prord512, "V16iV16iIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_prorq512, "V8OiV8OiIi", "ncV:512:", "avx512f,evex512")
TARGET_BUILTIN(__builtin_ia32_prolvd128, "V4iV4iV4i", "ncV:128:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_prolvd256, "V8iV8iV8i", "ncV:256:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_prolvq128, "V2OiV2OiV2Oi", "ncV:128:", "avx512vl")
@@ -1365,65 +1298,65 @@ TARGET_BUILTIN(__builtin_ia32_prord128, "V4iV4iIi", "ncV:128:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_prord256, "V8iV8iIi", "ncV:256:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_prorq128, "V2OiV2OiIi", "ncV:128:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_prorq256, "V4OiV4OiIi", "ncV:256:", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_prorvd512, "V16iV16iV16i", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_prorvq512, "V8OiV8OiV8Oi", "ncV:512:", "avx512f")
+TARGET_BUILTIN(__builtin_ia32_prorvd512, "V16iV16iV16i", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_prorvq512, "V8OiV8OiV8Oi", "ncV:512:", "avx512f,evex512")
TARGET_BUILTIN(__builtin_ia32_prorvd128, "V4iV4iV4i", "ncV:128:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_prorvd256, "V8iV8iV8i", "ncV:256:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_prorvq128, "V2OiV2OiV2Oi", "ncV:128:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_prorvq256, "V4OiV4OiV4Oi", "ncV:256:", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_pshufhw512, "V32sV32sIi", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_pshuflw512, "V32sV32sIi", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_psllv32hi, "V32sV32sV32s", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_psllw512, "V32sV32sV8s", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_psllwi512, "V32sV32si", "ncV:512:", "avx512bw")
+TARGET_BUILTIN(__builtin_ia32_pshufhw512, "V32sV32sIi", "ncV:512:", "avx512bw,evex512")
+TARGET_BUILTIN(__builtin_ia32_pshuflw512, "V32sV32sIi", "ncV:512:", "avx512bw,evex512")
+TARGET_BUILTIN(__builtin_ia32_psllv32hi, "V32sV32sV32s", "ncV:512:", "avx512bw,evex512")
+TARGET_BUILTIN(__builtin_ia32_psllw512, "V32sV32sV8s", "ncV:512:", "avx512bw,evex512")
+TARGET_BUILTIN(__builtin_ia32_psllwi512, "V32sV32si", "ncV:512:", "avx512bw,evex512")
TARGET_BUILTIN(__builtin_ia32_psllv16hi, "V16sV16sV16s", "ncV:256:", "avx512bw,avx512vl")
TARGET_BUILTIN(__builtin_ia32_psllv8hi, "V8sV8sV8s", "ncV:128:", "avx512bw,avx512vl")
-TARGET_BUILTIN(__builtin_ia32_pslldi512, "V16iV16ii", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_psllqi512, "V8OiV8Oii", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_psrlv32hi, "V32sV32sV32s", "ncV:512:", "avx512bw")
+TARGET_BUILTIN(__builtin_ia32_pslldi512, "V16iV16ii", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_psllqi512, "V8OiV8Oii", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_psrlv32hi, "V32sV32sV32s", "ncV:512:", "avx512bw,evex512")
TARGET_BUILTIN(__builtin_ia32_psrlv16hi, "V16sV16sV16s", "ncV:256:", "avx512bw,avx512vl")
TARGET_BUILTIN(__builtin_ia32_psrlv8hi, "V8sV8sV8s", "ncV:128:", "avx512bw,avx512vl")
-TARGET_BUILTIN(__builtin_ia32_psrldi512, "V16iV16ii", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_psrlqi512, "V8OiV8Oii", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_psrav32hi, "V32sV32sV32s", "ncV:512:", "avx512bw")
+TARGET_BUILTIN(__builtin_ia32_psrldi512, "V16iV16ii", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_psrlqi512, "V8OiV8Oii", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_psrav32hi, "V32sV32sV32s", "ncV:512:", "avx512bw,evex512")
TARGET_BUILTIN(__builtin_ia32_psrav16hi, "V16sV16sV16s", "ncV:256:", "avx512bw,avx512vl")
TARGET_BUILTIN(__builtin_ia32_psrav8hi, "V8sV8sV8s", "ncV:128:", "avx512bw,avx512vl")
TARGET_BUILTIN(__builtin_ia32_psravq128, "V2OiV2OiV2Oi", "ncV:128:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_psravq256, "V4OiV4OiV4Oi", "ncV:256:", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_psraw512, "V32sV32sV8s", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_psrawi512, "V32sV32si", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_psrlw512, "V32sV32sV8s", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_psrlwi512, "V32sV32si", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_pslldqi512_byteshift, "V8OiV8OiIi", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_psrldqi512_byteshift, "V8OiV8OiIi", "ncV:512:", "avx512bw")
+TARGET_BUILTIN(__builtin_ia32_psraw512, "V32sV32sV8s", "ncV:512:", "avx512bw,evex512")
+TARGET_BUILTIN(__builtin_ia32_psrawi512, "V32sV32si", "ncV:512:", "avx512bw,evex512")
+TARGET_BUILTIN(__builtin_ia32_psrlw512, "V32sV32sV8s", "ncV:512:", "avx512bw,evex512")
+TARGET_BUILTIN(__builtin_ia32_psrlwi512, "V32sV32si", "ncV:512:", "avx512bw,evex512")
+TARGET_BUILTIN(__builtin_ia32_pslldqi512_byteshift, "V8OiV8OiIi", "ncV:512:", "avx512bw,evex512")
+TARGET_BUILTIN(__builtin_ia32_psrldqi512_byteshift, "V8OiV8OiIi", "ncV:512:", "avx512bw,evex512")
TARGET_BUILTIN(__builtin_ia32_movdqa32load128_mask, "V4iV4iC*V4iUc", "nV:128:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_movdqa32load256_mask, "V8iV8iC*V8iUc", "nV:256:", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_movdqa32load512_mask, "V16iV16iC*V16iUs", "nV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_movdqa32store512_mask, "vV16i*V16iUs", "nV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_movdqa64load512_mask, "V8OiV8OiC*V8OiUc", "nV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_movdqa64store512_mask, "vV8Oi*V8OiUc", "nV:512:", "avx512f")
+TARGET_BUILTIN(__builtin_ia32_movdqa32load512_mask, "V16iV16iC*V16iUs", "nV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_movdqa32store512_mask, "vV16i*V16iUs", "nV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_movdqa64load512_mask, "V8OiV8OiC*V8OiUc", "nV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_movdqa64store512_mask, "vV8Oi*V8OiUc", "nV:512:", "avx512f,evex512")
TARGET_BUILTIN(__builtin_ia32_movdqa32store128_mask, "vV4i*V4iUc", "nV:128:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_movdqa32store256_mask, "vV8i*V8iUc", "nV:256:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_movdqa64load128_mask, "V2OiV2OiC*V2OiUc", "nV:128:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_movdqa64load256_mask, "V4OiV4OiC*V4OiUc", "nV:256:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_movdqa64store128_mask, "vV2Oi*V2OiUc", "nV:128:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_movdqa64store256_mask, "vV4Oi*V4OiUc", "nV:256:", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_vpmadd52huq512, "V8OiV8OiV8OiV8Oi", "ncV:512:", "avx512ifma")
-TARGET_BUILTIN(__builtin_ia32_vpmadd52luq512, "V8OiV8OiV8OiV8Oi", "ncV:512:", "avx512ifma")
-TARGET_BUILTIN(__builtin_ia32_vpmadd52huq128, "V2OiV2OiV2OiV2Oi", "ncV:128:", "avx512ifma,avx512vl")
-TARGET_BUILTIN(__builtin_ia32_vpmadd52huq256, "V4OiV4OiV4OiV4Oi", "ncV:256:", "avx512ifma,avx512vl")
-TARGET_BUILTIN(__builtin_ia32_vpmadd52luq128, "V2OiV2OiV2OiV2Oi", "ncV:128:", "avx512ifma,avx512vl")
-TARGET_BUILTIN(__builtin_ia32_vpmadd52luq256, "V4OiV4OiV4OiV4Oi", "ncV:256:", "avx512ifma,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vpmadd52huq512, "V8OiV8OiV8OiV8Oi", "ncV:512:", "avx512ifma,evex512")
+TARGET_BUILTIN(__builtin_ia32_vpmadd52luq512, "V8OiV8OiV8OiV8Oi", "ncV:512:", "avx512ifma,evex512")
+TARGET_BUILTIN(__builtin_ia32_vpmadd52huq128, "V2OiV2OiV2OiV2Oi", "ncV:128:", "avx512ifma,avx512vl|avxifma")
+TARGET_BUILTIN(__builtin_ia32_vpmadd52huq256, "V4OiV4OiV4OiV4Oi", "ncV:256:", "avx512ifma,avx512vl|avxifma")
+TARGET_BUILTIN(__builtin_ia32_vpmadd52luq128, "V2OiV2OiV2OiV2Oi", "ncV:128:", "avx512ifma,avx512vl|avxifma")
+TARGET_BUILTIN(__builtin_ia32_vpmadd52luq256, "V4OiV4OiV4OiV4Oi", "ncV:256:", "avx512ifma,avx512vl|avxifma")
TARGET_BUILTIN(__builtin_ia32_vcomisd, "iV2dV2dIiIi", "ncV:128:", "avx512f")
TARGET_BUILTIN(__builtin_ia32_vcomiss, "iV4fV4fIiIi", "ncV:128:", "avx512f")
TARGET_BUILTIN(__builtin_ia32_kunpckdi, "UOiUOiUOi", "nc", "avx512bw")
TARGET_BUILTIN(__builtin_ia32_kunpcksi, "UiUiUi", "nc", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_loaddquhi512_mask, "V32sV32sC*V32sUi", "nV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_loaddquqi512_mask, "V64cV64cC*V64cUOi", "nV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_fixupimmpd512_mask, "V8dV8dV8dV8OiIiUcIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_fixupimmpd512_maskz, "V8dV8dV8dV8OiIiUcIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_fixupimmps512_mask, "V16fV16fV16fV16iIiUsIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_fixupimmps512_maskz, "V16fV16fV16fV16iIiUsIi", "ncV:512:", "avx512f")
+TARGET_BUILTIN(__builtin_ia32_loaddquhi512_mask, "V32sV32sC*V32sUi", "nV:512:", "avx512bw,evex512")
+TARGET_BUILTIN(__builtin_ia32_loaddquqi512_mask, "V64cV64cC*V64cUOi", "nV:512:", "avx512bw,evex512")
+TARGET_BUILTIN(__builtin_ia32_fixupimmpd512_mask, "V8dV8dV8dV8OiIiUcIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_fixupimmpd512_maskz, "V8dV8dV8dV8OiIiUcIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_fixupimmps512_mask, "V16fV16fV16fV16iIiUsIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_fixupimmps512_maskz, "V16fV16fV16fV16iIiUsIi", "ncV:512:", "avx512f,evex512")
TARGET_BUILTIN(__builtin_ia32_fixupimmsd_mask, "V2dV2dV2dV2OiIiUcIi", "ncV:128:", "avx512f")
TARGET_BUILTIN(__builtin_ia32_fixupimmsd_maskz, "V2dV2dV2dV2OiIiUcIi", "ncV:128:", "avx512f")
TARGET_BUILTIN(__builtin_ia32_fixupimmss_mask, "V4fV4fV4fV4iIiUcIi", "ncV:128:", "avx512f")
@@ -1458,8 +1391,8 @@ TARGET_BUILTIN(__builtin_ia32_loadupd128_mask, "V2dV2dC*V2dUc", "nV:128:", "avx5
TARGET_BUILTIN(__builtin_ia32_loadupd256_mask, "V4dV4dC*V4dUc", "nV:256:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_loadups128_mask, "V4fV4fC*V4fUc", "nV:128:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_loadups256_mask, "V8fV8fC*V8fUc", "nV:256:", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_storedquhi512_mask, "vV32s*V32sUi", "nV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_storedquqi512_mask, "vV64c*V64cUOi", "nV:512:", "avx512bw")
+TARGET_BUILTIN(__builtin_ia32_storedquhi512_mask, "vV32s*V32sUi", "nV:512:", "avx512bw,evex512")
+TARGET_BUILTIN(__builtin_ia32_storedquqi512_mask, "vV64c*V64cUOi", "nV:512:", "avx512bw,evex512")
TARGET_BUILTIN(__builtin_ia32_storedquhi128_mask, "vV8s*V8sUc", "nV:128:", "avx512vl,avx512bw")
TARGET_BUILTIN(__builtin_ia32_storedquhi256_mask, "vV16s*V16sUs", "nV:256:", "avx512vl,avx512bw")
TARGET_BUILTIN(__builtin_ia32_storedquqi128_mask, "vV16c*V16cUs", "nV:128:", "avx512vl,avx512bw")
@@ -1494,38 +1427,38 @@ TARGET_BUILTIN(__builtin_ia32_vcvttsd2si32, "iV2dIi", "ncV:128:", "avx512f")
TARGET_BUILTIN(__builtin_ia32_vcvttsd2usi32, "UiV2dIi", "ncV:128:", "avx512f")
TARGET_BUILTIN(__builtin_ia32_vcvttss2si32, "iV4fIi", "ncV:128:", "avx512f")
TARGET_BUILTIN(__builtin_ia32_vcvttss2usi32, "UiV4fIi", "ncV:128:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_vpermilpd512, "V8dV8dIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_vpermilps512, "V16fV16fIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_vpermilvarpd512, "V8dV8dV8Oi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_vpermilvarps512, "V16fV16fV16i", "ncV:512:", "avx512f")
+TARGET_BUILTIN(__builtin_ia32_vpermilpd512, "V8dV8dIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_vpermilps512, "V16fV16fIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_vpermilvarpd512, "V8dV8dV8Oi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_vpermilvarps512, "V16fV16fV16i", "ncV:512:", "avx512f,evex512")
TARGET_BUILTIN(__builtin_ia32_rndscalesd_round_mask, "V2dV2dV2dV2dUcIiIi", "ncV:128:", "avx512f")
TARGET_BUILTIN(__builtin_ia32_rndscaless_round_mask, "V4fV4fV4fV4fUcIiIi", "ncV:128:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_scalefpd512_mask, "V8dV8dV8dV8dUcIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_scalefps512_mask, "V16fV16fV16fV16fUsIi", "ncV:512:", "avx512f")
+TARGET_BUILTIN(__builtin_ia32_scalefpd512_mask, "V8dV8dV8dV8dUcIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_scalefps512_mask, "V16fV16fV16fV16fUsIi", "ncV:512:", "avx512f,evex512")
TARGET_BUILTIN(__builtin_ia32_scalefsd_round_mask, "V2dV2dV2dV2dUcIi", "ncV:128:", "avx512f")
TARGET_BUILTIN(__builtin_ia32_scalefss_round_mask, "V4fV4fV4fV4fUcIi", "ncV:128:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_psradi512, "V16iV16ii", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_psraqi512, "V8OiV8Oii", "ncV:512:", "avx512f")
+TARGET_BUILTIN(__builtin_ia32_psradi512, "V16iV16ii", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_psraqi512, "V8OiV8Oii", "ncV:512:", "avx512f,evex512")
TARGET_BUILTIN(__builtin_ia32_psraq128, "V2OiV2OiV2Oi", "ncV:128:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_psraq256, "V4OiV4OiV2Oi", "ncV:256:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_psraqi128, "V2OiV2Oii", "ncV:128:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_psraqi256, "V4OiV4Oii", "ncV:256:", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_pslld512, "V16iV16iV4i", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_psllq512, "V8OiV8OiV2Oi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_psllv16si, "V16iV16iV16i", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_psllv8di, "V8OiV8OiV8Oi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_psrad512, "V16iV16iV4i", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_psraq512, "V8OiV8OiV2Oi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_psrav16si, "V16iV16iV16i", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_psrav8di, "V8OiV8OiV8Oi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_psrld512, "V16iV16iV4i", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_psrlq512, "V8OiV8OiV2Oi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_psrlv16si, "V16iV16iV16i", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_psrlv8di, "V8OiV8OiV8Oi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pternlogd512_mask, "V16iV16iV16iV16iIiUs", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pternlogd512_maskz, "V16iV16iV16iV16iIiUs", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pternlogq512_mask, "V8OiV8OiV8OiV8OiIiUc", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pternlogq512_maskz, "V8OiV8OiV8OiV8OiIiUc", "ncV:512:", "avx512f")
+TARGET_BUILTIN(__builtin_ia32_pslld512, "V16iV16iV4i", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_psllq512, "V8OiV8OiV2Oi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_psllv16si, "V16iV16iV16i", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_psllv8di, "V8OiV8OiV8Oi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_psrad512, "V16iV16iV4i", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_psraq512, "V8OiV8OiV2Oi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_psrav16si, "V16iV16iV16i", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_psrav8di, "V8OiV8OiV8Oi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_psrld512, "V16iV16iV4i", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_psrlq512, "V8OiV8OiV2Oi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_psrlv16si, "V16iV16iV16i", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_psrlv8di, "V8OiV8OiV8Oi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_pternlogd512_mask, "V16iV16iV16iV16iIiUs", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_pternlogd512_maskz, "V16iV16iV16iV16iIiUs", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_pternlogq512_mask, "V8OiV8OiV8OiV8OiIiUc", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_pternlogq512_maskz, "V8OiV8OiV8OiV8OiIiUc", "ncV:512:", "avx512f,evex512")
TARGET_BUILTIN(__builtin_ia32_pternlogd128_mask, "V4iV4iV4iV4iIiUc", "ncV:128:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_pternlogd128_maskz, "V4iV4iV4iV4iIiUc", "ncV:128:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_pternlogd256_mask, "V8iV8iV8iV8iIiUc", "ncV:256:", "avx512vl")
@@ -1534,12 +1467,12 @@ TARGET_BUILTIN(__builtin_ia32_pternlogq128_mask, "V2OiV2OiV2OiV2OiIiUc", "ncV:12
TARGET_BUILTIN(__builtin_ia32_pternlogq128_maskz, "V2OiV2OiV2OiV2OiIiUc", "ncV:128:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_pternlogq256_mask, "V4OiV4OiV4OiV4OiIiUc", "ncV:256:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_pternlogq256_maskz, "V4OiV4OiV4OiV4OiIiUc", "ncV:256:", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_shuf_f32x4, "V16fV16fV16fIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_shuf_f64x2, "V8dV8dV8dIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_shuf_i32x4, "V16iV16iV16iIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_shuf_i64x2, "V8OiV8OiV8OiIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_shufpd512, "V8dV8dV8dIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_shufps512, "V16fV16fV16fIi", "ncV:512:", "avx512f")
+TARGET_BUILTIN(__builtin_ia32_shuf_f32x4, "V16fV16fV16fIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_shuf_f64x2, "V8dV8dV8dIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_shuf_i32x4, "V16iV16iV16iIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_shuf_i64x2, "V8OiV8OiV8OiIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_shufpd512, "V8dV8dV8dIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_shufps512, "V16fV16fV16fIi", "ncV:512:", "avx512f,evex512")
TARGET_BUILTIN(__builtin_ia32_shuf_f32x4_256, "V8fV8fV8fIi", "ncV:256:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_shuf_f64x2_256, "V4dV4dV4dIi", "ncV:256:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_shuf_i32x4_256, "V8iV8iV8iIi", "ncV:256:", "avx512vl")
@@ -1550,13 +1483,13 @@ TARGET_BUILTIN(__builtin_ia32_rsqrt14pd128_mask, "V2dV2dV2dUc", "ncV:128:", "avx
TARGET_BUILTIN(__builtin_ia32_rsqrt14pd256_mask, "V4dV4dV4dUc", "ncV:256:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_rsqrt14ps128_mask, "V4fV4fV4fUc", "ncV:128:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_rsqrt14ps256_mask, "V8fV8fV8fUc", "ncV:256:", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_cvtb2mask512, "UOiV64c", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_cvtmask2b512, "V64cUOi", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_cvtmask2w512, "V32sUi", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_cvtd2mask512, "UsV16i", "ncV:512:", "avx512dq")
-TARGET_BUILTIN(__builtin_ia32_cvtmask2d512, "V16iUs", "ncV:512:", "avx512dq")
-TARGET_BUILTIN(__builtin_ia32_cvtmask2q512, "V8OiUc", "ncV:512:", "avx512dq")
-TARGET_BUILTIN(__builtin_ia32_cvtq2mask512, "UcV8Oi", "ncV:512:", "avx512dq")
+TARGET_BUILTIN(__builtin_ia32_cvtb2mask512, "UOiV64c", "ncV:512:", "avx512bw,evex512")
+TARGET_BUILTIN(__builtin_ia32_cvtmask2b512, "V64cUOi", "ncV:512:", "avx512bw,evex512")
+TARGET_BUILTIN(__builtin_ia32_cvtmask2w512, "V32sUi", "ncV:512:", "avx512bw,evex512")
+TARGET_BUILTIN(__builtin_ia32_cvtd2mask512, "UsV16i", "ncV:512:", "avx512dq,evex512")
+TARGET_BUILTIN(__builtin_ia32_cvtmask2d512, "V16iUs", "ncV:512:", "avx512dq,evex512")
+TARGET_BUILTIN(__builtin_ia32_cvtmask2q512, "V8OiUc", "ncV:512:", "avx512dq,evex512")
+TARGET_BUILTIN(__builtin_ia32_cvtq2mask512, "UcV8Oi", "ncV:512:", "avx512dq,evex512")
TARGET_BUILTIN(__builtin_ia32_cvtb2mask128, "UsV16c", "ncV:128:", "avx512bw,avx512vl")
TARGET_BUILTIN(__builtin_ia32_cvtb2mask256, "UiV32c", "ncV:256:", "avx512bw,avx512vl")
TARGET_BUILTIN(__builtin_ia32_cvtmask2b128, "V16cUs", "ncV:128:", "avx512bw,avx512vl")
@@ -1571,17 +1504,17 @@ TARGET_BUILTIN(__builtin_ia32_cvtmask2q128, "V2OiUc", "ncV:128:", "avx512dq,avx5
TARGET_BUILTIN(__builtin_ia32_cvtmask2q256, "V4OiUc", "ncV:256:", "avx512dq,avx512vl")
TARGET_BUILTIN(__builtin_ia32_cvtq2mask128, "UcV2Oi", "ncV:128:", "avx512dq,avx512vl")
TARGET_BUILTIN(__builtin_ia32_cvtq2mask256, "UcV4Oi", "ncV:256:", "avx512dq,avx512vl")
-TARGET_BUILTIN(__builtin_ia32_pmovsdb512_mask, "V16cV16iV16cUs", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pmovsdb512mem_mask, "vV16c*V16iUs", "nV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pmovswb512mem_mask, "vV32c*V32sUi", "nV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_pmovsdw512_mask, "V16sV16iV16sUs", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pmovsdw512mem_mask, "vV16s*V16iUs", "nV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pmovsqb512_mask, "V16cV8OiV16cUc", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pmovsqb512mem_mask, "vV16c*V8OiUc", "nV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pmovsqd512_mask, "V8iV8OiV8iUc", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pmovsqd512mem_mask, "vV8i*V8OiUc", "nV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pmovsqw512_mask, "V8sV8OiV8sUc", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pmovsqw512mem_mask, "vV8s*V8OiUc", "nV:512:", "avx512f")
+TARGET_BUILTIN(__builtin_ia32_pmovsdb512_mask, "V16cV16iV16cUs", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_pmovsdb512mem_mask, "vV16c*V16iUs", "nV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_pmovswb512mem_mask, "vV32c*V32sUi", "nV:512:", "avx512bw,evex512")
+TARGET_BUILTIN(__builtin_ia32_pmovsdw512_mask, "V16sV16iV16sUs", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_pmovsdw512mem_mask, "vV16s*V16iUs", "nV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_pmovsqb512_mask, "V16cV8OiV16cUc", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_pmovsqb512mem_mask, "vV16c*V8OiUc", "nV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_pmovsqd512_mask, "V8iV8OiV8iUc", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_pmovsqd512mem_mask, "vV8i*V8OiUc", "nV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_pmovsqw512_mask, "V8sV8OiV8sUc", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_pmovsqw512mem_mask, "vV8s*V8OiUc", "nV:512:", "avx512f,evex512")
TARGET_BUILTIN(__builtin_ia32_pmovsdb128_mask, "V16cV4iV16cUc", "ncV:128:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_pmovsdb128mem_mask, "vV16c*V4iUc", "nV:128:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_pmovswb128mem_mask, "vV16c*V8sUc", "nV:128:", "avx512vl,avx512bw")
@@ -1604,17 +1537,17 @@ TARGET_BUILTIN(__builtin_ia32_pmovsqw128_mask, "V8sV2OiV8sUc", "ncV:128:", "avx5
TARGET_BUILTIN(__builtin_ia32_pmovsqw128mem_mask, "vV8s*V2OiUc", "nV:128:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_pmovsqw256_mask, "V8sV4OiV8sUc", "ncV:256:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_pmovsqw256mem_mask, "vV8s*V4OiUc", "nV:256:", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_pmovusdb512_mask, "V16cV16iV16cUs", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pmovusdb512mem_mask, "vV16c*V16iUs", "nV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pmovuswb512mem_mask, "vV32c*V32sUi", "nV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_pmovusdw512_mask, "V16sV16iV16sUs", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pmovusdw512mem_mask, "vV16s*V16iUs", "nV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pmovusqb512_mask, "V16cV8OiV16cUc", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pmovusqb512mem_mask, "vV16c*V8OiUc", "nV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pmovusqd512_mask, "V8iV8OiV8iUc", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pmovusqd512mem_mask, "vV8i*V8OiUc", "nV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pmovusqw512_mask, "V8sV8OiV8sUc", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pmovusqw512mem_mask, "vV8s*V8OiUc", "nV:512:", "avx512f")
+TARGET_BUILTIN(__builtin_ia32_pmovusdb512_mask, "V16cV16iV16cUs", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_pmovusdb512mem_mask, "vV16c*V16iUs", "nV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_pmovuswb512mem_mask, "vV32c*V32sUi", "nV:512:", "avx512bw,evex512")
+TARGET_BUILTIN(__builtin_ia32_pmovusdw512_mask, "V16sV16iV16sUs", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_pmovusdw512mem_mask, "vV16s*V16iUs", "nV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_pmovusqb512_mask, "V16cV8OiV16cUc", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_pmovusqb512mem_mask, "vV16c*V8OiUc", "nV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_pmovusqd512_mask, "V8iV8OiV8iUc", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_pmovusqd512mem_mask, "vV8i*V8OiUc", "nV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_pmovusqw512_mask, "V8sV8OiV8sUc", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_pmovusqw512mem_mask, "vV8s*V8OiUc", "nV:512:", "avx512f,evex512")
TARGET_BUILTIN(__builtin_ia32_pmovusdb128_mask, "V16cV4iV16cUc", "ncV:128:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_pmovusdb128mem_mask, "vV16c*V4iUc", "nV:128:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_pmovuswb128mem_mask, "vV16c*V8sUc", "nV:128:", "avx512vl,avx512bw")
@@ -1637,17 +1570,17 @@ TARGET_BUILTIN(__builtin_ia32_pmovusqw128_mask, "V8sV2OiV8sUc", "ncV:128:", "avx
TARGET_BUILTIN(__builtin_ia32_pmovusqw128mem_mask, "vV8s*V2OiUc", "nV:128:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_pmovusqw256_mask, "V8sV4OiV8sUc", "ncV:256:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_pmovusqw256mem_mask, "vV8s*V4OiUc", "nV:256:", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_pmovdb512_mask, "V16cV16iV16cUs", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pmovdb512mem_mask, "vV16c*V16iUs", "nV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pmovwb512mem_mask, "vV32c*V32sUi", "nV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_pmovdw512_mask, "V16sV16iV16sUs", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pmovdw512mem_mask, "vV16s*V16iUs", "nV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pmovqb512_mask, "V16cV8OiV16cUc", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pmovqb512mem_mask, "vV16c*V8OiUc", "nV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pmovqd512_mask, "V8iV8OiV8iUc", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pmovqd512mem_mask, "vV8i*V8OiUc", "nV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pmovqw512_mask, "V8sV8OiV8sUc", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pmovqw512mem_mask, "vV8s*V8OiUc", "nV:512:", "avx512f")
+TARGET_BUILTIN(__builtin_ia32_pmovdb512_mask, "V16cV16iV16cUs", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_pmovdb512mem_mask, "vV16c*V16iUs", "nV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_pmovwb512mem_mask, "vV32c*V32sUi", "nV:512:", "avx512bw,evex512")
+TARGET_BUILTIN(__builtin_ia32_pmovdw512_mask, "V16sV16iV16sUs", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_pmovdw512mem_mask, "vV16s*V16iUs", "nV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_pmovqb512_mask, "V16cV8OiV16cUc", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_pmovqb512mem_mask, "vV16c*V8OiUc", "nV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_pmovqd512_mask, "V8iV8OiV8iUc", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_pmovqd512mem_mask, "vV8i*V8OiUc", "nV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_pmovqw512_mask, "V8sV8OiV8sUc", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_pmovqw512mem_mask, "vV8s*V8OiUc", "nV:512:", "avx512f,evex512")
TARGET_BUILTIN(__builtin_ia32_pmovdb128_mask, "V16cV4iV16cUc", "ncV:128:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_pmovwb128mem_mask, "vV16c*V8sUc", "nV:128:", "avx512vl,avx512bw")
TARGET_BUILTIN(__builtin_ia32_pmovdb128mem_mask, "vV16c*V4iUc", "nV:128:", "avx512vl")
@@ -1669,36 +1602,36 @@ TARGET_BUILTIN(__builtin_ia32_pmovqw128_mask, "V8sV2OiV8sUc", "ncV:128:", "avx51
TARGET_BUILTIN(__builtin_ia32_pmovqw128mem_mask, "vV8s*V2OiUc", "nV:128:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_pmovqw256_mask, "V8sV4OiV8sUc", "ncV:256:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_pmovqw256mem_mask, "vV8s*V4OiUc", "nV:256:", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_extractf32x8_mask, "V8fV16fIiV8fUc", "ncV:512:", "avx512dq")
-TARGET_BUILTIN(__builtin_ia32_extractf64x2_512_mask, "V2dV8dIiV2dUc", "ncV:512:", "avx512dq")
-TARGET_BUILTIN(__builtin_ia32_extracti32x8_mask, "V8iV16iIiV8iUc", "ncV:512:", "avx512dq")
-TARGET_BUILTIN(__builtin_ia32_extracti64x2_512_mask, "V2OiV8OiIiV2OiUc", "ncV:512:", "avx512dq")
-TARGET_BUILTIN(__builtin_ia32_extracti32x4_mask, "V4iV16iIiV4iUc", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_extracti64x4_mask, "V4OiV8OiIiV4OiUc", "ncV:512:", "avx512f")
+TARGET_BUILTIN(__builtin_ia32_extractf32x8_mask, "V8fV16fIiV8fUc", "ncV:512:", "avx512dq,evex512")
+TARGET_BUILTIN(__builtin_ia32_extractf64x2_512_mask, "V2dV8dIiV2dUc", "ncV:512:", "avx512dq,evex512")
+TARGET_BUILTIN(__builtin_ia32_extracti32x8_mask, "V8iV16iIiV8iUc", "ncV:512:", "avx512dq,evex512")
+TARGET_BUILTIN(__builtin_ia32_extracti64x2_512_mask, "V2OiV8OiIiV2OiUc", "ncV:512:", "avx512dq,evex512")
+TARGET_BUILTIN(__builtin_ia32_extracti32x4_mask, "V4iV16iIiV4iUc", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_extracti64x4_mask, "V4OiV8OiIiV4OiUc", "ncV:512:", "avx512f,evex512")
TARGET_BUILTIN(__builtin_ia32_extractf64x2_256_mask, "V2dV4dIiV2dUc", "ncV:256:", "avx512dq,avx512vl")
TARGET_BUILTIN(__builtin_ia32_extracti64x2_256_mask, "V2OiV4OiIiV2OiUc", "ncV:256:", "avx512dq,avx512vl")
TARGET_BUILTIN(__builtin_ia32_extractf32x4_256_mask, "V4fV8fIiV4fUc", "ncV:256:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_extracti32x4_256_mask, "V4iV8iIiV4iUc", "ncV:256:", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_insertf32x8, "V16fV16fV8fIi", "ncV:512:", "avx512dq")
-TARGET_BUILTIN(__builtin_ia32_insertf64x2_512, "V8dV8dV2dIi", "ncV:512:", "avx512dq")
-TARGET_BUILTIN(__builtin_ia32_inserti32x8, "V16iV16iV8iIi", "ncV:512:", "avx512dq")
-TARGET_BUILTIN(__builtin_ia32_inserti64x2_512, "V8OiV8OiV2OiIi", "ncV:512:", "avx512dq")
-TARGET_BUILTIN(__builtin_ia32_insertf64x4, "V8dV8dV4dIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_inserti64x4, "V8OiV8OiV4OiIi", "ncV:512:", "avx512f")
+TARGET_BUILTIN(__builtin_ia32_insertf32x8, "V16fV16fV8fIi", "ncV:512:", "avx512dq,evex512")
+TARGET_BUILTIN(__builtin_ia32_insertf64x2_512, "V8dV8dV2dIi", "ncV:512:", "avx512dq,evex512")
+TARGET_BUILTIN(__builtin_ia32_inserti32x8, "V16iV16iV8iIi", "ncV:512:", "avx512dq,evex512")
+TARGET_BUILTIN(__builtin_ia32_inserti64x2_512, "V8OiV8OiV2OiIi", "ncV:512:", "avx512dq,evex512")
+TARGET_BUILTIN(__builtin_ia32_insertf64x4, "V8dV8dV4dIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_inserti64x4, "V8OiV8OiV4OiIi", "ncV:512:", "avx512f,evex512")
TARGET_BUILTIN(__builtin_ia32_insertf64x2_256, "V4dV4dV2dIi", "ncV:256:", "avx512dq,avx512vl")
TARGET_BUILTIN(__builtin_ia32_inserti64x2_256, "V4OiV4OiV2OiIi", "ncV:256:", "avx512dq,avx512vl")
TARGET_BUILTIN(__builtin_ia32_insertf32x4_256, "V8fV8fV4fIi", "ncV:256:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_inserti32x4_256, "V8iV8iV4iIi", "ncV:256:", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_insertf32x4, "V16fV16fV4fIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_inserti32x4, "V16iV16iV4iIi", "ncV:512:", "avx512f")
+TARGET_BUILTIN(__builtin_ia32_insertf32x4, "V16fV16fV4fIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_inserti32x4, "V16iV16iV4iIi", "ncV:512:", "avx512f,evex512")
TARGET_BUILTIN(__builtin_ia32_getmantpd128_mask, "V2dV2dIiV2dUc", "ncV:128:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_getmantpd256_mask, "V4dV4dIiV4dUc", "ncV:256:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_getmantps128_mask, "V4fV4fIiV4fUc", "ncV:128:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_getmantps256_mask, "V8fV8fIiV8fUc", "ncV:256:", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_getmantpd512_mask, "V8dV8dIiV8dUcIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_getmantps512_mask, "V16fV16fIiV16fUsIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_getexppd512_mask, "V8dV8dV8dUcIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_getexpps512_mask, "V16fV16fV16fUsIi", "ncV:512:", "avx512f")
+TARGET_BUILTIN(__builtin_ia32_getmantpd512_mask, "V8dV8dIiV8dUcIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_getmantps512_mask, "V16fV16fIiV16fUsIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_getexppd512_mask, "V8dV8dV8dUcIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_getexpps512_mask, "V16fV16fV16fUsIi", "ncV:512:", "avx512f,evex512")
TARGET_BUILTIN(__builtin_ia32_vfmaddss3_mask, "V4fV4fV4fV4fUcIi", "ncV:128:", "avx512f")
TARGET_BUILTIN(__builtin_ia32_vfmaddss3_maskz, "V4fV4fV4fV4fUcIi", "ncV:128:", "avx512f")
TARGET_BUILTIN(__builtin_ia32_vfmaddss3_mask3, "V4fV4fV4fV4fUcIi", "ncV:128:", "avx512f")
@@ -1707,14 +1640,14 @@ TARGET_BUILTIN(__builtin_ia32_vfmaddsd3_maskz, "V2dV2dV2dV2dUcIi", "ncV:128:", "
TARGET_BUILTIN(__builtin_ia32_vfmaddsd3_mask3, "V2dV2dV2dV2dUcIi", "ncV:128:", "avx512f")
TARGET_BUILTIN(__builtin_ia32_vfmsubsd3_mask3, "V2dV2dV2dV2dUcIi", "ncV:128:", "avx512f")
TARGET_BUILTIN(__builtin_ia32_vfmsubss3_mask3, "V4fV4fV4fV4fUcIi", "ncV:128:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_permdf512, "V8dV8dIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_permdi512, "V8OiV8OiIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_permvarhi512, "V32sV32sV32s", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_permvardf512, "V8dV8dV8Oi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_permvardi512, "V8OiV8OiV8Oi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_permvarsf512, "V16fV16fV16i", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_permvarsi512, "V16iV16iV16i", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_permvarqi512, "V64cV64cV64c", "ncV:512:", "avx512vbmi")
+TARGET_BUILTIN(__builtin_ia32_permdf512, "V8dV8dIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_permdi512, "V8OiV8OiIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_permvarhi512, "V32sV32sV32s", "ncV:512:", "avx512bw,evex512")
+TARGET_BUILTIN(__builtin_ia32_permvardf512, "V8dV8dV8Oi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_permvardi512, "V8OiV8OiV8Oi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_permvarsf512, "V16fV16fV16i", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_permvarsi512, "V16iV16iV16i", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_permvarqi512, "V64cV64cV64c", "ncV:512:", "avx512vbmi,evex512")
TARGET_BUILTIN(__builtin_ia32_permvarqi128, "V16cV16cV16c", "ncV:128:", "avx512vbmi,avx512vl")
TARGET_BUILTIN(__builtin_ia32_permvarqi256, "V32cV32cV32c", "ncV:256:", "avx512vbmi,avx512vl")
TARGET_BUILTIN(__builtin_ia32_permvarhi128, "V8sV8sV8s", "ncV:128:", "avx512bw,avx512vl")
@@ -1725,8 +1658,8 @@ TARGET_BUILTIN(__builtin_ia32_fpclasspd128_mask, "UcV2dIiUc", "ncV:128:", "avx51
TARGET_BUILTIN(__builtin_ia32_fpclasspd256_mask, "UcV4dIiUc", "ncV:256:", "avx512dq,avx512vl")
TARGET_BUILTIN(__builtin_ia32_fpclassps128_mask, "UcV4fIiUc", "ncV:128:", "avx512dq,avx512vl")
TARGET_BUILTIN(__builtin_ia32_fpclassps256_mask, "UcV8fIiUc", "ncV:256:", "avx512dq,avx512vl")
-TARGET_BUILTIN(__builtin_ia32_fpclassps512_mask, "UsV16fIiUs", "ncV:512:", "avx512dq")
-TARGET_BUILTIN(__builtin_ia32_fpclasspd512_mask, "UcV8dIiUc", "ncV:512:", "avx512dq")
+TARGET_BUILTIN(__builtin_ia32_fpclassps512_mask, "UsV16fIiUs", "ncV:512:", "avx512dq,evex512")
+TARGET_BUILTIN(__builtin_ia32_fpclasspd512_mask, "UcV8dIiUc", "ncV:512:", "avx512dq,evex512")
TARGET_BUILTIN(__builtin_ia32_fpclasssd_mask, "UcV2dIiUc", "ncV:128:", "avx512dq")
TARGET_BUILTIN(__builtin_ia32_fpclassss_mask, "UcV4fIiUc", "ncV:128:", "avx512dq")
TARGET_BUILTIN(__builtin_ia32_kaddqi, "UcUcUc", "nc", "avx512dq")
@@ -1782,120 +1715,321 @@ TARGET_BUILTIN(__builtin_ia32_kmovb, "UcUc", "nc", "avx512dq")
TARGET_BUILTIN(__builtin_ia32_kmovw, "UsUs", "nc", "avx512f")
TARGET_BUILTIN(__builtin_ia32_kmovd, "UiUi", "nc", "avx512bw")
TARGET_BUILTIN(__builtin_ia32_kmovq, "UOiUOi", "nc", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_palignr512, "V64cV64cV64cIi", "ncV:512:", "avx512bw")
+TARGET_BUILTIN(__builtin_ia32_palignr512, "V64cV64cV64cIi", "ncV:512:", "avx512bw,evex512")
TARGET_BUILTIN(__builtin_ia32_dbpsadbw128, "V8sV16cV16cIi", "ncV:128:", "avx512bw,avx512vl")
TARGET_BUILTIN(__builtin_ia32_dbpsadbw256, "V16sV32cV32cIi", "ncV:256:", "avx512bw,avx512vl")
-TARGET_BUILTIN(__builtin_ia32_dbpsadbw512, "V32sV64cV64cIi", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_psadbw512, "V8OiV64cV64c", "ncV:512:", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_compressdf512_mask, "V8dV8dV8dUc", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_compressdi512_mask, "V8OiV8OiV8OiUc", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_compresshi512_mask, "V32sV32sV32sUi", "ncV:512:", "avx512vbmi2")
-TARGET_BUILTIN(__builtin_ia32_compressqi512_mask, "V64cV64cV64cUOi", "ncV:512:", "avx512vbmi2")
-TARGET_BUILTIN(__builtin_ia32_compresssf512_mask, "V16fV16fV16fUs", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_compresssi512_mask, "V16iV16iV16iUs", "ncV:512:", "avx512f")
+TARGET_BUILTIN(__builtin_ia32_dbpsadbw512, "V32sV64cV64cIi", "ncV:512:", "avx512bw,evex512")
+TARGET_BUILTIN(__builtin_ia32_psadbw512, "V8OiV64cV64c", "ncV:512:", "avx512bw,evex512")
+TARGET_BUILTIN(__builtin_ia32_compressdf512_mask, "V8dV8dV8dUc", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_compressdi512_mask, "V8OiV8OiV8OiUc", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_compresshi512_mask, "V32sV32sV32sUi", "ncV:512:", "avx512vbmi2,evex512")
+TARGET_BUILTIN(__builtin_ia32_compressqi512_mask, "V64cV64cV64cUOi", "ncV:512:", "avx512vbmi2,evex512")
+TARGET_BUILTIN(__builtin_ia32_compresssf512_mask, "V16fV16fV16fUs", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_compresssi512_mask, "V16iV16iV16iUs", "ncV:512:", "avx512f,evex512")
TARGET_BUILTIN(__builtin_ia32_cmpsd_mask, "UcV2dV2dIiUcIi", "ncV:128:", "avx512f")
TARGET_BUILTIN(__builtin_ia32_cmpss_mask, "UcV4fV4fIiUcIi", "ncV:128:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_pshufd512, "V16iV16iIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_expanddf512_mask, "V8dV8dV8dUc", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_expanddi512_mask, "V8OiV8OiV8OiUc", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_expandhi512_mask, "V32sV32sV32sUi", "ncV:512:", "avx512vbmi2")
-TARGET_BUILTIN(__builtin_ia32_expandqi512_mask, "V64cV64cV64cUOi", "ncV:512:", "avx512vbmi2")
-TARGET_BUILTIN(__builtin_ia32_expandloaddf512_mask, "V8dV8dC*V8dUc", "nV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_expandloaddi512_mask, "V8OiV8OiC*V8OiUc", "nV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_expandloadhi512_mask, "V32sV32sC*V32sUi", "nV:512:", "avx512vbmi2")
-TARGET_BUILTIN(__builtin_ia32_expandloadqi512_mask, "V64cV64cC*V64cUOi", "nV:512:", "avx512vbmi2")
-TARGET_BUILTIN(__builtin_ia32_expandloadsf512_mask, "V16fV16fC*V16fUs", "nV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_expandloadsi512_mask, "V16iV16iC*V16iUs", "nV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_expandsf512_mask, "V16fV16fV16fUs", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_expandsi512_mask, "V16iV16iV16iUs", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_cvtps2pd512_mask, "V8dV8fV8dUcIi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_compressstoredf512_mask, "vV8d*V8dUc", "nV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_compressstoredi512_mask, "vV8Oi*V8OiUc", "nV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_compressstorehi512_mask, "vV32s*V32sUi", "nV:512:", "avx512vbmi2")
-TARGET_BUILTIN(__builtin_ia32_compressstoreqi512_mask, "vV64c*V64cUOi", "nV:512:", "avx512vbmi2")
-TARGET_BUILTIN(__builtin_ia32_compressstoresf512_mask, "vV16f*V16fUs", "nV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_compressstoresi512_mask, "vV16i*V16iUs", "nV:512:", "avx512f")
+TARGET_BUILTIN(__builtin_ia32_pshufd512, "V16iV16iIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_expanddf512_mask, "V8dV8dV8dUc", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_expanddi512_mask, "V8OiV8OiV8OiUc", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_expandhi512_mask, "V32sV32sV32sUi", "ncV:512:", "avx512vbmi2,evex512")
+TARGET_BUILTIN(__builtin_ia32_expandqi512_mask, "V64cV64cV64cUOi", "ncV:512:", "avx512vbmi2,evex512")
+TARGET_BUILTIN(__builtin_ia32_expandloaddf512_mask, "V8dV8dC*V8dUc", "nV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_expandloaddi512_mask, "V8OiV8OiC*V8OiUc", "nV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_expandloadhi512_mask, "V32sV32sC*V32sUi", "nV:512:", "avx512vbmi2,evex512")
+TARGET_BUILTIN(__builtin_ia32_expandloadqi512_mask, "V64cV64cC*V64cUOi", "nV:512:", "avx512vbmi2,evex512")
+TARGET_BUILTIN(__builtin_ia32_expandloadsf512_mask, "V16fV16fC*V16fUs", "nV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_expandloadsi512_mask, "V16iV16iC*V16iUs", "nV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_expandsf512_mask, "V16fV16fV16fUs", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_expandsi512_mask, "V16iV16iV16iUs", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_cvtps2pd512_mask, "V8dV8fV8dUcIi", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_compressstoredf512_mask, "vV8d*V8dUc", "nV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_compressstoredi512_mask, "vV8Oi*V8OiUc", "nV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_compressstorehi512_mask, "vV32s*V32sUi", "nV:512:", "avx512vbmi2,evex512")
+TARGET_BUILTIN(__builtin_ia32_compressstoreqi512_mask, "vV64c*V64cUOi", "nV:512:", "avx512vbmi2,evex512")
+TARGET_BUILTIN(__builtin_ia32_compressstoresf512_mask, "vV16f*V16fUs", "nV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_compressstoresi512_mask, "vV16i*V16iUs", "nV:512:", "avx512f,evex512")
TARGET_BUILTIN(__builtin_ia32_vcvtph2ps_mask, "V4fV8sV4fUc", "ncV:128:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_vcvtph2ps256_mask, "V8fV8sV8fUc", "ncV:256:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_vcvtps2ph_mask, "V8sV4fIiV8sUc", "ncV:128:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_vcvtps2ph256_mask, "V8sV8fIiV8sUc", "ncV:256:", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_cvtw2mask512, "UiV32s", "ncV:512:", "avx512bw")
+TARGET_BUILTIN(__builtin_ia32_cvtw2mask512, "UiV32s", "ncV:512:", "avx512bw,evex512")
TARGET_BUILTIN(__builtin_ia32_cvtw2mask128, "UcV8s", "ncV:128:", "avx512bw,avx512vl")
TARGET_BUILTIN(__builtin_ia32_cvtw2mask256, "UsV16s", "ncV:256:", "avx512bw,avx512vl")
TARGET_BUILTIN(__builtin_ia32_cvtsd2ss_round_mask, "V4fV4fV2dV4fUcIi", "ncV:128:", "avx512f")
TARGET_BUILTIN(__builtin_ia32_cvtsi2ss32, "V4fV4fiIi", "ncV:128:", "avx512f")
TARGET_BUILTIN(__builtin_ia32_cvtss2sd_round_mask, "V2dV2dV4fV2dUcIi", "ncV:128:", "avx512f")
TARGET_BUILTIN(__builtin_ia32_cvtusi2ss32, "V4fV4fUiIi", "ncV:128:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_vpmultishiftqb512, "V64cV64cV64c", "ncV:512:", "avx512vbmi")
+TARGET_BUILTIN(__builtin_ia32_vpmultishiftqb512, "V64cV64cV64c", "ncV:512:", "avx512vbmi,evex512")
TARGET_BUILTIN(__builtin_ia32_vpmultishiftqb128, "V16cV16cV16c", "ncV:128:", "avx512vbmi,avx512vl")
TARGET_BUILTIN(__builtin_ia32_vpmultishiftqb256, "V32cV32cV32c", "ncV:256:", "avx512vbmi,avx512vl")
// bf16 intrinsics
-TARGET_BUILTIN(__builtin_ia32_cvtne2ps2bf16_128, "V8sV4fV4f", "ncV:128:", "avx512bf16,avx512vl")
-TARGET_BUILTIN(__builtin_ia32_cvtne2ps2bf16_256, "V16sV8fV8f", "ncV:256:", "avx512bf16,avx512vl")
-TARGET_BUILTIN(__builtin_ia32_cvtne2ps2bf16_512, "V32sV16fV16f", "ncV:512:", "avx512bf16")
-TARGET_BUILTIN(__builtin_ia32_cvtneps2bf16_128_mask, "V8sV4fV8sUc", "ncV:128:", "avx512bf16,avx512vl")
-TARGET_BUILTIN(__builtin_ia32_cvtneps2bf16_256_mask, "V8sV8fV8sUc", "ncV:256:", "avx512bf16,avx512vl")
-TARGET_BUILTIN(__builtin_ia32_cvtneps2bf16_512_mask, "V16sV16fV16sUs", "ncV:512:", "avx512bf16")
-TARGET_BUILTIN(__builtin_ia32_dpbf16ps_128, "V4fV4fV4iV4i", "ncV:128:", "avx512bf16,avx512vl")
-TARGET_BUILTIN(__builtin_ia32_dpbf16ps_256, "V8fV8fV8iV8i", "ncV:256:", "avx512bf16,avx512vl")
-TARGET_BUILTIN(__builtin_ia32_dpbf16ps_512, "V16fV16fV16iV16i", "ncV:512:", "avx512bf16")
-TARGET_BUILTIN(__builtin_ia32_cvtsbf162ss_32, "fUs", "nc", "avx512bf16")
-
-TARGET_BUILTIN(__builtin_ia32_vp2intersect_q_512, "vV8OiV8OiUc*Uc*", "nV:512:", "avx512vp2intersect")
+TARGET_BUILTIN(__builtin_ia32_cvtne2ps2bf16_128, "V8yV4fV4f", "ncV:128:", "avx512bf16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_cvtne2ps2bf16_256, "V16yV8fV8f", "ncV:256:", "avx512bf16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_cvtne2ps2bf16_512, "V32yV16fV16f", "ncV:512:", "avx512bf16,evex512")
+TARGET_BUILTIN(__builtin_ia32_cvtneps2bf16_128_mask, "V8yV4fV8yUc", "ncV:128:", "avx512bf16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_cvtneps2bf16_256_mask, "V8yV8fV8yUc", "ncV:256:", "avx512bf16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_cvtneps2bf16_512_mask, "V16yV16fV16yUs", "ncV:512:", "avx512bf16,evex512")
+TARGET_BUILTIN(__builtin_ia32_dpbf16ps_128, "V4fV4fV8yV8y", "ncV:128:", "avx512bf16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_dpbf16ps_256, "V8fV8fV16yV16y", "ncV:256:", "avx512bf16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_dpbf16ps_512, "V16fV16fV32yV32y", "ncV:512:", "avx512bf16,evex512")
+TARGET_BUILTIN(__builtin_ia32_cvtsbf162ss_32, "fy", "nc", "avx512bf16")
+
+TARGET_BUILTIN(__builtin_ia32_vp2intersect_q_512, "vV8OiV8OiUc*Uc*", "nV:512:", "avx512vp2intersect,evex512")
TARGET_BUILTIN(__builtin_ia32_vp2intersect_q_256, "vV4OiV4OiUc*Uc*", "nV:256:", "avx512vp2intersect,avx512vl")
TARGET_BUILTIN(__builtin_ia32_vp2intersect_q_128, "vV2OiV2OiUc*Uc*", "nV:128:", "avx512vp2intersect,avx512vl")
-TARGET_BUILTIN(__builtin_ia32_vp2intersect_d_512, "vV16iV16iUs*Us*", "nV:512:", "avx512vp2intersect")
+TARGET_BUILTIN(__builtin_ia32_vp2intersect_d_512, "vV16iV16iUs*Us*", "nV:512:", "avx512vp2intersect,evex512")
TARGET_BUILTIN(__builtin_ia32_vp2intersect_d_256, "vV8iV8iUc*Uc*", "nV:256:", "avx512vp2intersect,avx512vl")
TARGET_BUILTIN(__builtin_ia32_vp2intersect_d_128, "vV4iV4iUc*Uc*", "nV:128:", "avx512vp2intersect,avx512vl")
+// AVX512 fp16 intrinsics
+TARGET_BUILTIN(__builtin_ia32_vcomish, "iV8xV8xIiIi", "ncV:128:", "avx512fp16")
+TARGET_BUILTIN(__builtin_ia32_addph512, "V32xV32xV32xIi", "ncV:512:", "avx512fp16,evex512")
+TARGET_BUILTIN(__builtin_ia32_subph512, "V32xV32xV32xIi", "ncV:512:", "avx512fp16,evex512")
+TARGET_BUILTIN(__builtin_ia32_mulph512, "V32xV32xV32xIi", "ncV:512:", "avx512fp16,evex512")
+TARGET_BUILTIN(__builtin_ia32_divph512, "V32xV32xV32xIi", "ncV:512:", "avx512fp16,evex512")
+TARGET_BUILTIN(__builtin_ia32_maxph512, "V32xV32xV32xIi", "ncV:512:", "avx512fp16,evex512")
+TARGET_BUILTIN(__builtin_ia32_minph512, "V32xV32xV32xIi", "ncV:512:", "avx512fp16,evex512")
+
+TARGET_BUILTIN(__builtin_ia32_minph256, "V16xV16xV16x", "ncV:256:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_minph128, "V8xV8xV8x", "ncV:128:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_maxph256, "V16xV16xV16x", "ncV:256:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_maxph128, "V8xV8xV8x", "ncV:128:", "avx512fp16,avx512vl")
+
+TARGET_BUILTIN(__builtin_ia32_addsh_round_mask, "V8xV8xV8xV8xUcIi", "ncV:128:", "avx512fp16")
+TARGET_BUILTIN(__builtin_ia32_divsh_round_mask, "V8xV8xV8xV8xUcIi", "ncV:128:", "avx512fp16")
+TARGET_BUILTIN(__builtin_ia32_mulsh_round_mask, "V8xV8xV8xV8xUcIi", "ncV:128:", "avx512fp16")
+TARGET_BUILTIN(__builtin_ia32_subsh_round_mask, "V8xV8xV8xV8xUcIi", "ncV:128:", "avx512fp16")
+TARGET_BUILTIN(__builtin_ia32_maxsh_round_mask, "V8xV8xV8xV8xUcIi", "ncV:128:", "avx512fp16")
+TARGET_BUILTIN(__builtin_ia32_minsh_round_mask, "V8xV8xV8xV8xUcIi", "ncV:128:", "avx512fp16")
+TARGET_BUILTIN(__builtin_ia32_cmpph512_mask, "UiV32xV32xIiUiIi", "ncV:512:", "avx512fp16,evex512")
+TARGET_BUILTIN(__builtin_ia32_cmpph256_mask, "UsV16xV16xIiUs", "ncV:256:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_cmpph128_mask, "UcV8xV8xIiUc", "ncV:128:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_cmpsh_mask, "UcV8xV8xIiUcIi", "ncV:128:", "avx512fp16")
+TARGET_BUILTIN(__builtin_ia32_loadsh128_mask, "V8xV8xC*V8xUc", "nV:128:", "avx512fp16")
+TARGET_BUILTIN(__builtin_ia32_storesh128_mask, "vV8x*V8xUc", "nV:128:", "avx512fp16")
+
+TARGET_BUILTIN(__builtin_ia32_rcpph128_mask, "V8xV8xV8xUc", "ncV:128:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_rcpph256_mask, "V16xV16xV16xUs", "ncV:256:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_rcpph512_mask, "V32xV32xV32xUi", "ncV:512:", "avx512fp16,evex512")
+TARGET_BUILTIN(__builtin_ia32_rsqrtph128_mask, "V8xV8xV8xUc", "ncV:128:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_rsqrtph256_mask, "V16xV16xV16xUs", "ncV:256:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_rsqrtph512_mask, "V32xV32xV32xUi", "ncV:512:", "avx512fp16,evex512")
+TARGET_BUILTIN(__builtin_ia32_getmantph128_mask, "V8xV8xIiV8xUc", "ncV:128:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_getmantph256_mask, "V16xV16xIiV16xUs", "ncV:256:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_getmantph512_mask, "V32xV32xIiV32xUiIi", "ncV:512:", "avx512fp16,evex512")
+
+TARGET_BUILTIN(__builtin_ia32_getexpph128_mask, "V8xV8xV8xUc", "ncV:128:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_getexpph256_mask, "V16xV16xV16xUs", "ncV:256:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_getexpph512_mask, "V32xV32xV32xUiIi", "ncV:512:", "avx512fp16,evex512")
+
+TARGET_BUILTIN(__builtin_ia32_scalefph128_mask, "V8xV8xV8xV8xUc", "ncV:128:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_scalefph256_mask, "V16xV16xV16xV16xUs", "ncV:256:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_scalefph512_mask, "V32xV32xV32xV32xUiIi", "ncV:512:", "avx512fp16,evex512")
+
+TARGET_BUILTIN(__builtin_ia32_rndscaleph_128_mask, "V8xV8xIiV8xUc", "ncV:128:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_rndscaleph_256_mask, "V16xV16xIiV16xUs", "ncV:256:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_rndscaleph_mask, "V32xV32xIiV32xUiIi", "ncV:512:", "avx512fp16,evex512")
+TARGET_BUILTIN(__builtin_ia32_reduceph128_mask, "V8xV8xIiV8xUc", "ncV:128:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_reduceph256_mask, "V16xV16xIiV16xUs", "ncV:256:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_reduceph512_mask, "V32xV32xIiV32xUiIi", "ncV:512:", "avx512fp16,evex512")
+TARGET_BUILTIN(__builtin_ia32_rcpsh_mask, "V8xV8xV8xV8xUc", "ncV:128:", "avx512fp16")
+TARGET_BUILTIN(__builtin_ia32_rsqrtsh_mask, "V8xV8xV8xV8xUc", "ncV:128:", "avx512fp16")
+TARGET_BUILTIN(__builtin_ia32_getmantsh_round_mask, "V8xV8xV8xIiV8xUcIi", "ncV:128:", "avx512fp16")
+TARGET_BUILTIN(__builtin_ia32_getexpsh128_round_mask, "V8xV8xV8xV8xUcIi", "ncV:128:", "avx512fp16")
+TARGET_BUILTIN(__builtin_ia32_scalefsh_round_mask, "V8xV8xV8xV8xUcIi", "ncV:128:", "avx512fp16")
+TARGET_BUILTIN(__builtin_ia32_rndscalesh_round_mask, "V8xV8xV8xV8xUcIiIi", "ncV:128:", "avx512fp16")
+TARGET_BUILTIN(__builtin_ia32_reducesh_mask, "V8xV8xV8xV8xUcIiIi", "ncV:128:", "avx512fp16")
+
+TARGET_BUILTIN(__builtin_ia32_sqrtph, "V8xV8x", "ncV:128:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_sqrtph256, "V16xV16x", "ncV:256:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_sqrtph512, "V32xV32xIi", "ncV:512:", "avx512fp16,evex512")
+TARGET_BUILTIN(__builtin_ia32_sqrtsh_round_mask, "V8xV8xV8xV8xUcIi", "ncV:128:", "avx512fp16")
+TARGET_BUILTIN(__builtin_ia32_fpclassph128_mask, "UcV8xIiUc", "ncV:128:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_fpclassph256_mask, "UsV16xIiUs", "ncV:256:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_fpclassph512_mask, "UiV32xIiUi", "ncV:512:", "avx512fp16,evex512")
+TARGET_BUILTIN(__builtin_ia32_fpclasssh_mask, "UcV8xIiUc", "ncV:128:", "avx512fp16")
+
+TARGET_BUILTIN(__builtin_ia32_vcvtpd2ph128_mask, "V8xV2dV8xUc", "ncV:128:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vcvtpd2ph256_mask, "V8xV4dV8xUc", "ncV:256:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vcvtpd2ph512_mask, "V8xV8dV8xUcIi", "ncV:512:", "avx512fp16,evex512")
+TARGET_BUILTIN(__builtin_ia32_vcvtph2pd128_mask, "V2dV8xV2dUc", "ncV:128:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vcvtph2pd256_mask, "V4dV8xV4dUc", "ncV:256:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vcvtph2pd512_mask, "V8dV8xV8dUcIi", "ncV:512:", "avx512fp16,evex512")
+TARGET_BUILTIN(__builtin_ia32_vcvtsh2ss_round_mask, "V4fV4fV8xV4fUcIi", "ncV:128:", "avx512fp16")
+TARGET_BUILTIN(__builtin_ia32_vcvtss2sh_round_mask, "V8xV8xV4fV8xUcIi", "ncV:128:", "avx512fp16")
+TARGET_BUILTIN(__builtin_ia32_vcvtsd2sh_round_mask, "V8xV8xV2dV8xUcIi", "ncV:128:", "avx512fp16")
+TARGET_BUILTIN(__builtin_ia32_vcvtsh2sd_round_mask, "V2dV2dV8xV2dUcIi", "ncV:128:", "avx512fp16")
+TARGET_BUILTIN(__builtin_ia32_vcvtph2w128_mask, "V8sV8xV8sUc", "ncV:128:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vcvtph2w256_mask, "V16sV16xV16sUs", "ncV:256:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vcvtph2w512_mask, "V32sV32xV32sUiIi", "ncV:512:", "avx512fp16,evex512")
+TARGET_BUILTIN(__builtin_ia32_vcvttph2w128_mask, "V8sV8xV8sUc", "ncV:128:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vcvttph2w256_mask, "V16sV16xV16sUs", "ncV:256:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vcvttph2w512_mask, "V32sV32xV32sUiIi", "ncV:512:", "avx512fp16,evex512")
+TARGET_BUILTIN(__builtin_ia32_vcvtw2ph128_mask, "V8xV8sV8xUc", "ncV:128:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vcvtw2ph256_mask, "V16xV16sV16xUs", "ncV:256:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vcvtw2ph512_mask, "V32xV32sV32xUiIi", "ncV:512:", "avx512fp16,evex512")
+TARGET_BUILTIN(__builtin_ia32_vcvtph2uw128_mask, "V8UsV8xV8UsUc", "ncV:128:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vcvtph2uw256_mask, "V16UsV16xV16UsUs", "ncV:256:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vcvtph2uw512_mask, "V32UsV32xV32UsUiIi", "ncV:512:", "avx512fp16,evex512")
+TARGET_BUILTIN(__builtin_ia32_vcvttph2uw128_mask, "V8UsV8xV8UsUc", "ncV:128:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vcvttph2uw256_mask, "V16UsV16xV16UsUs", "ncV:256:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vcvttph2uw512_mask, "V32UsV32xV32UsUiIi", "ncV:512:", "avx512fp16,evex512")
+TARGET_BUILTIN(__builtin_ia32_vcvtuw2ph128_mask, "V8xV8UsV8xUc", "ncV:128:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vcvtuw2ph256_mask, "V16xV16UsV16xUs", "ncV:256:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vcvtuw2ph512_mask, "V32xV32UsV32xUiIi", "ncV:512:", "avx512fp16,evex512")
+TARGET_BUILTIN(__builtin_ia32_vcvtph2dq128_mask, "V4iV8xV4iUc", "ncV:128:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vcvtph2dq256_mask, "V8iV8xV8iUc", "ncV:256:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vcvtph2dq512_mask, "V16iV16xV16iUsIi", "ncV:512:", "avx512fp16,evex512")
+TARGET_BUILTIN(__builtin_ia32_vcvtph2udq128_mask, "V4UiV8xV4UiUc", "ncV:128:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vcvtph2udq256_mask, "V8UiV8xV8UiUc", "ncV:256:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vcvtph2udq512_mask, "V16UiV16xV16UiUsIi", "ncV:512:", "avx512fp16,evex512")
+TARGET_BUILTIN(__builtin_ia32_vcvtdq2ph128_mask, "V8xV4iV8xUc", "ncV:128:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vcvtdq2ph256_mask, "V8xV8iV8xUc", "ncV:256:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vcvtdq2ph512_mask, "V16xV16iV16xUsIi", "ncV:512:", "avx512fp16,evex512")
+TARGET_BUILTIN(__builtin_ia32_vcvtudq2ph128_mask, "V8xV4UiV8xUc", "ncV:128:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vcvtudq2ph256_mask, "V8xV8UiV8xUc", "ncV:256:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vcvtudq2ph512_mask, "V16xV16UiV16xUsIi", "ncV:512:", "avx512fp16,evex512")
+TARGET_BUILTIN(__builtin_ia32_vcvttph2dq128_mask, "V4iV8xV4iUc", "ncV:128:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vcvttph2dq256_mask, "V8iV8xV8iUc", "ncV:256:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vcvttph2dq512_mask, "V16iV16xV16iUsIi", "ncV:512:", "avx512fp16,evex512")
+TARGET_BUILTIN(__builtin_ia32_vcvttph2udq128_mask, "V4UiV8xV4UiUc", "ncV:128:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vcvttph2udq256_mask, "V8UiV8xV8UiUc", "ncV:256:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vcvttph2udq512_mask, "V16UiV16xV16UiUsIi", "ncV:512:", "avx512fp16,evex512")
+TARGET_BUILTIN(__builtin_ia32_vcvtqq2ph128_mask, "V8xV2OiV8xUc", "ncV:128:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vcvtqq2ph256_mask, "V8xV4OiV8xUc", "ncV:256:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vcvtqq2ph512_mask, "V8xV8OiV8xUcIi", "ncV:512:", "avx512fp16,evex512")
+TARGET_BUILTIN(__builtin_ia32_vcvtph2qq128_mask, "V2OiV8xV2OiUc", "ncV:128:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vcvtph2qq256_mask, "V4OiV8xV4OiUc", "ncV:256:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vcvtph2qq512_mask, "V8OiV8xV8OiUcIi", "ncV:512:", "avx512fp16,evex512")
+TARGET_BUILTIN(__builtin_ia32_vcvtuqq2ph128_mask, "V8xV2UOiV8xUc", "ncV:128:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vcvtuqq2ph256_mask, "V8xV4UOiV8xUc", "ncV:256:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vcvtuqq2ph512_mask, "V8xV8UOiV8xUcIi", "ncV:512:", "avx512fp16,evex512")
+TARGET_BUILTIN(__builtin_ia32_vcvtph2uqq128_mask, "V2UOiV8xV2UOiUc", "ncV:128:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vcvtph2uqq256_mask, "V4UOiV8xV4UOiUc", "ncV:256:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vcvtph2uqq512_mask, "V8UOiV8xV8UOiUcIi", "ncV:512:", "avx512fp16,evex512")
+TARGET_BUILTIN(__builtin_ia32_vcvttph2qq128_mask, "V2OiV8xV2OiUc", "ncV:128:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vcvttph2qq256_mask, "V4OiV8xV4OiUc", "ncV:256:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vcvttph2qq512_mask, "V8OiV8xV8OiUcIi", "ncV:512:", "avx512fp16,evex512")
+TARGET_BUILTIN(__builtin_ia32_vcvttph2uqq128_mask, "V2UOiV8xV2UOiUc", "ncV:128:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vcvttph2uqq256_mask, "V4UOiV8xV4UOiUc", "ncV:256:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vcvttph2uqq512_mask, "V8UOiV8xV8UOiUcIi", "ncV:512:", "avx512fp16,evex512")
+TARGET_BUILTIN(__builtin_ia32_vcvtsh2si32, "iV8xIi", "ncV:128:", "avx512fp16")
+TARGET_BUILTIN(__builtin_ia32_vcvtsh2usi32, "UiV8xIi", "ncV:128:", "avx512fp16")
+TARGET_BUILTIN(__builtin_ia32_vcvtusi2sh, "V8xV8xUiIi", "ncV:128:", "avx512fp16")
+TARGET_BUILTIN(__builtin_ia32_vcvtsi2sh, "V8xV8xiIi", "ncV:128:", "avx512fp16")
+TARGET_BUILTIN(__builtin_ia32_vcvttsh2si32, "iV8xIi", "ncV:128:", "avx512fp16")
+TARGET_BUILTIN(__builtin_ia32_vcvttsh2usi32, "UiV8xIi", "ncV:128:", "avx512fp16")
+
+TARGET_BUILTIN(__builtin_ia32_vcvtph2psx128_mask, "V4fV8xV4fUc", "ncV:128:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vcvtph2psx256_mask, "V8fV8xV8fUc", "ncV:256:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vcvtph2psx512_mask, "V16fV16xV16fUsIi", "ncV:512:", "avx512fp16,evex512")
+TARGET_BUILTIN(__builtin_ia32_vcvtps2phx128_mask, "V8xV4fV8xUc", "ncV:128:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vcvtps2phx256_mask, "V8xV8fV8xUc", "ncV:256:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vcvtps2phx512_mask, "V16xV16fV16xUsIi", "ncV:512:", "avx512fp16,evex512")
+
+TARGET_BUILTIN(__builtin_ia32_vfmaddph, "V8xV8xV8xV8x", "ncV:128:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vfmaddph256, "V16xV16xV16xV16x", "ncV:256:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vfmaddph512_mask, "V32xV32xV32xV32xUiIi", "ncV:512:", "avx512fp16,evex512")
+TARGET_BUILTIN(__builtin_ia32_vfmaddph512_mask3, "V32xV32xV32xV32xUiIi", "ncV:512:", "avx512fp16,evex512")
+TARGET_BUILTIN(__builtin_ia32_vfmaddph512_maskz, "V32xV32xV32xV32xUiIi", "ncV:512:", "avx512fp16,evex512")
+TARGET_BUILTIN(__builtin_ia32_vfmaddsubph, "V8xV8xV8xV8x", "ncV:128:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vfmaddsubph256, "V16xV16xV16xV16x", "ncV:256:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vfmaddsubph512_mask, "V32xV32xV32xV32xUiIi", "ncV:512:", "avx512fp16,evex512")
+TARGET_BUILTIN(__builtin_ia32_vfmaddsubph512_maskz, "V32xV32xV32xV32xUiIi", "ncV:512:", "avx512fp16,evex512")
+TARGET_BUILTIN(__builtin_ia32_vfmaddsubph512_mask3, "V32xV32xV32xV32xUiIi", "ncV:512:", "avx512fp16,evex512")
+
+TARGET_BUILTIN(__builtin_ia32_vfmsubaddph512_mask3, "V32xV32xV32xV32xUiIi", "ncV:512:", "avx512fp16,evex512")
+TARGET_BUILTIN(__builtin_ia32_vfmsubph512_mask3, "V32xV32xV32xV32xUiIi", "ncV:512:", "avx512fp16,evex512")
+
+TARGET_BUILTIN(__builtin_ia32_vfmaddsh3_mask, "V8xV8xV8xV8xUcIi", "ncV:128:", "avx512fp16")
+TARGET_BUILTIN(__builtin_ia32_vfmaddsh3_maskz, "V8xV8xV8xV8xUcIi", "ncV:128:", "avx512fp16")
+TARGET_BUILTIN(__builtin_ia32_vfmaddsh3_mask3, "V8xV8xV8xV8xUcIi", "ncV:128:", "avx512fp16")
+TARGET_BUILTIN(__builtin_ia32_vfmsubsh3_mask3, "V8xV8xV8xV8xUcIi", "ncV:128:", "avx512fp16")
+
+TARGET_BUILTIN(__builtin_ia32_vfmaddcph128_mask, "V4fV4fV4fV4fUc", "ncV:128:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vfmaddcph128_maskz, "V4fV4fV4fV4fUc", "ncV:128:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vfmaddcph256_mask, "V8fV8fV8fV8fUc", "ncV:256:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vfmaddcph256_maskz, "V8fV8fV8fV8fUc", "ncV:256:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vfmaddcph512_mask, "V16fV16fV16fV16fUsIi", "ncV:512:", "avx512fp16,evex512")
+TARGET_BUILTIN(__builtin_ia32_vfmaddcph512_maskz, "V16fV16fV16fV16fUsIi", "ncV:512:", "avx512fp16,evex512")
+TARGET_BUILTIN(__builtin_ia32_vfmaddcph512_mask3, "V16fV16fV16fV16fUsIi", "ncV:512:", "avx512fp16,evex512")
+TARGET_BUILTIN(__builtin_ia32_vfcmaddcph128_mask, "V4fV4fV4fV4fUc", "ncV:128:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vfcmaddcph128_maskz, "V4fV4fV4fV4fUc", "ncV:128:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vfcmaddcph256_mask, "V8fV8fV8fV8fUc", "ncV:256:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vfcmaddcph256_maskz, "V8fV8fV8fV8fUc", "ncV:256:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vfcmaddcph512_mask, "V16fV16fV16fV16fUsIi", "ncV:512:", "avx512fp16,evex512")
+TARGET_BUILTIN(__builtin_ia32_vfcmaddcph512_maskz, "V16fV16fV16fV16fUsIi", "ncV:512:", "avx512fp16,evex512")
+TARGET_BUILTIN(__builtin_ia32_vfcmaddcph512_mask3, "V16fV16fV16fV16fUsIi", "ncV:512:", "avx512fp16,evex512")
+TARGET_BUILTIN(__builtin_ia32_vfmaddcsh_mask, "V4fV4fV4fV4fUcIi", "ncV:128:", "avx512fp16")
+TARGET_BUILTIN(__builtin_ia32_vfmaddcsh_maskz, "V4fV4fV4fV4fUcIi", "ncV:128:", "avx512fp16")
+TARGET_BUILTIN(__builtin_ia32_vfcmaddcsh_mask, "V4fV4fV4fV4fUcIi", "ncV:128:", "avx512fp16")
+TARGET_BUILTIN(__builtin_ia32_vfcmaddcsh_maskz, "V4fV4fV4fV4fUcIi", "ncV:128:", "avx512fp16")
+TARGET_BUILTIN(__builtin_ia32_vfmaddcsh_round_mask, "V4fV4fV4fV4fUcIi", "ncV:128:", "avx512fp16")
+TARGET_BUILTIN(__builtin_ia32_vfmaddcsh_round_mask3, "V4fV4fV4fV4fUcIi", "ncV:128:", "avx512fp16")
+TARGET_BUILTIN(__builtin_ia32_vfcmaddcsh_round_mask, "V4fV4fV4fV4fUcIi", "ncV:128:", "avx512fp16")
+TARGET_BUILTIN(__builtin_ia32_vfcmaddcsh_round_mask3, "V4fV4fV4fV4fUcIi", "ncV:128:", "avx512fp16")
+
+TARGET_BUILTIN(__builtin_ia32_vfmulcsh_mask, "V4fV4fV4fV4fUcIi", "ncV:128:", "avx512fp16")
+TARGET_BUILTIN(__builtin_ia32_vfcmulcsh_mask, "V4fV4fV4fV4fUcIi", "ncV:128:", "avx512fp16")
+TARGET_BUILTIN(__builtin_ia32_vfmulcph128_mask, "V4fV4fV4fV4fUc", "ncV:128:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vfmulcph256_mask, "V8fV8fV8fV8fUc", "ncV:256:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vfmulcph512_mask, "V16fV16fV16fV16fUsIi", "ncV:512:", "avx512fp16,evex512")
+TARGET_BUILTIN(__builtin_ia32_vfcmulcph128_mask, "V4fV4fV4fV4fUc", "ncV:128:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vfcmulcph256_mask, "V8fV8fV8fV8fUc", "ncV:256:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_vfcmulcph512_mask, "V16fV16fV16fV16fUsIi", "ncV:512:", "avx512fp16,evex512")
+
// generic select intrinsics
TARGET_BUILTIN(__builtin_ia32_selectb_128, "V16cUsV16cV16c", "ncV:128:", "avx512bw,avx512vl")
TARGET_BUILTIN(__builtin_ia32_selectb_256, "V32cUiV32cV32c", "ncV:256:", "avx512bw,avx512vl")
-TARGET_BUILTIN(__builtin_ia32_selectb_512, "V64cUOiV64cV64c", "ncV:512:", "avx512bw")
+TARGET_BUILTIN(__builtin_ia32_selectb_512, "V64cUOiV64cV64c", "ncV:512:", "avx512bw,evex512")
TARGET_BUILTIN(__builtin_ia32_selectw_128, "V8sUcV8sV8s", "ncV:128:", "avx512bw,avx512vl")
TARGET_BUILTIN(__builtin_ia32_selectw_256, "V16sUsV16sV16s", "ncV:256:", "avx512bw,avx512vl")
-TARGET_BUILTIN(__builtin_ia32_selectw_512, "V32sUiV32sV32s", "ncV:512:", "avx512bw")
+TARGET_BUILTIN(__builtin_ia32_selectw_512, "V32sUiV32sV32s", "ncV:512:", "avx512bw,evex512")
TARGET_BUILTIN(__builtin_ia32_selectd_128, "V4iUcV4iV4i", "ncV:128:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_selectd_256, "V8iUcV8iV8i", "ncV:256:", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_selectd_512, "V16iUsV16iV16i", "ncV:512:", "avx512f")
+TARGET_BUILTIN(__builtin_ia32_selectd_512, "V16iUsV16iV16i", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_selectph_128, "V8xUcV8xV8x", "ncV:128:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_selectph_256, "V16xUsV16xV16x", "ncV:256:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_selectph_512, "V32xUiV32xV32x", "ncV:512:", "avx512fp16,evex512")
+TARGET_BUILTIN(__builtin_ia32_selectpbf_128, "V8yUcV8yV8y", "ncV:128:", "avx512bf16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_selectpbf_256, "V16yUsV16yV16y", "ncV:256:", "avx512bf16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_selectpbf_512, "V32yUiV32yV32y", "ncV:512:", "avx512bf16,evex512")
TARGET_BUILTIN(__builtin_ia32_selectq_128, "V2OiUcV2OiV2Oi", "ncV:128:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_selectq_256, "V4OiUcV4OiV4Oi", "ncV:256:", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_selectq_512, "V8OiUcV8OiV8Oi", "ncV:512:", "avx512f")
+TARGET_BUILTIN(__builtin_ia32_selectq_512, "V8OiUcV8OiV8Oi", "ncV:512:", "avx512f,evex512")
TARGET_BUILTIN(__builtin_ia32_selectps_128, "V4fUcV4fV4f", "ncV:128:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_selectps_256, "V8fUcV8fV8f", "ncV:256:", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_selectps_512, "V16fUsV16fV16f", "ncV:512:", "avx512f")
+TARGET_BUILTIN(__builtin_ia32_selectps_512, "V16fUsV16fV16f", "ncV:512:", "avx512f,evex512")
TARGET_BUILTIN(__builtin_ia32_selectpd_128, "V2dUcV2dV2d", "ncV:128:", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_selectpd_256, "V4dUcV4dV4d", "ncV:256:", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_selectpd_512, "V8dUcV8dV8d", "ncV:512:", "avx512f")
+TARGET_BUILTIN(__builtin_ia32_selectpd_512, "V8dUcV8dV8d", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_selectsh_128, "V8xUcV8xV8x", "ncV:128:", "avx512fp16")
+TARGET_BUILTIN(__builtin_ia32_selectsbf_128, "V8yUcV8yV8y", "ncV:128:", "avx512bf16")
TARGET_BUILTIN(__builtin_ia32_selectss_128, "V4fUcV4fV4f", "ncV:128:", "avx512f")
TARGET_BUILTIN(__builtin_ia32_selectsd_128, "V2dUcV2dV2d", "ncV:128:", "avx512f")
// generic reduction intrinsics
-TARGET_BUILTIN(__builtin_ia32_reduce_add_d512, "iV16i", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_reduce_add_q512, "OiV8Oi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_reduce_and_d512, "iV16i", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_reduce_and_q512, "OiV8Oi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_reduce_fadd_pd512, "ddV8d", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_reduce_fadd_ps512, "ffV16f", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_reduce_fmax_pd512, "dV8d", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_reduce_fmax_ps512, "fV16f", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_reduce_fmin_pd512, "dV8d", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_reduce_fmin_ps512, "fV16f", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_reduce_fmul_pd512, "ddV8d", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_reduce_fmul_ps512, "ffV16f", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_reduce_mul_d512, "iV16i", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_reduce_mul_q512, "OiV8Oi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_reduce_or_d512, "iV16i", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_reduce_or_q512, "OiV8Oi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_reduce_smax_d512, "iV16i", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_reduce_smax_q512, "OiV8Oi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_reduce_smin_d512, "iV16i", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_reduce_smin_q512, "OiV8Oi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_reduce_umax_d512, "iV16i", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_reduce_umax_q512, "OiV8Oi", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_reduce_umin_d512, "iV16i", "ncV:512:", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_reduce_umin_q512, "OiV8Oi", "ncV:512:", "avx512f")
+TARGET_BUILTIN(__builtin_ia32_reduce_fadd_pd512, "ddV8d", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_reduce_fadd_ps512, "ffV16f", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_reduce_fadd_ph512, "xxV32x", "ncV:512:", "avx512fp16,evex512")
+TARGET_BUILTIN(__builtin_ia32_reduce_fadd_ph256, "xxV16x", "ncV:256:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_reduce_fadd_ph128, "xxV8x", "ncV:128:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_reduce_fmax_pd512, "dV8d", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_reduce_fmax_ps512, "fV16f", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_reduce_fmax_ph512, "xV32x", "ncV:512:", "avx512fp16,evex512")
+TARGET_BUILTIN(__builtin_ia32_reduce_fmax_ph256, "xV16x", "ncV:256:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_reduce_fmax_ph128, "xV8x", "ncV:128:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_reduce_fmin_pd512, "dV8d", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_reduce_fmin_ps512, "fV16f", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_reduce_fmin_ph512, "xV32x", "ncV:512:", "avx512fp16,evex512")
+TARGET_BUILTIN(__builtin_ia32_reduce_fmin_ph256, "xV16x", "ncV:256:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_reduce_fmin_ph128, "xV8x", "ncV:128:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_reduce_fmul_pd512, "ddV8d", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_reduce_fmul_ps512, "ffV16f", "ncV:512:", "avx512f,evex512")
+TARGET_BUILTIN(__builtin_ia32_reduce_fmul_ph512, "xxV32x", "ncV:512:", "avx512fp16,evex512")
+TARGET_BUILTIN(__builtin_ia32_reduce_fmul_ph256, "xxV16x", "ncV:256:", "avx512fp16,avx512vl")
+TARGET_BUILTIN(__builtin_ia32_reduce_fmul_ph128, "xxV8x", "ncV:128:", "avx512fp16,avx512vl")
// MONITORX/MWAITX
TARGET_BUILTIN(__builtin_ia32_monitorx, "vvC*UiUi", "n", "mwaitx")
@@ -1946,41 +2080,96 @@ TARGET_BUILTIN(__builtin_ia32_serialize, "v", "n", "serialize")
TARGET_BUILTIN(__builtin_ia32_xsusldtrk, "v", "n", "tsxldtrk")
TARGET_BUILTIN(__builtin_ia32_xresldtrk, "v", "n", "tsxldtrk")
+// RAO-INT
+TARGET_BUILTIN(__builtin_ia32_aadd32, "vv*Si", "n", "raoint")
+TARGET_BUILTIN(__builtin_ia32_aand32, "vv*Si", "n", "raoint")
+TARGET_BUILTIN(__builtin_ia32_aor32, "vv*Si", "n", "raoint")
+TARGET_BUILTIN(__builtin_ia32_axor32, "vv*Si", "n", "raoint")
+
// MSVC
-TARGET_HEADER_BUILTIN(_BitScanForward, "UcUNi*UNi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_BitScanReverse, "UcUNi*UNi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-
-TARGET_HEADER_BUILTIN(_ReadWriteBarrier, "v", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_ReadBarrier, "v", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_WriteBarrier, "v", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-
-TARGET_HEADER_BUILTIN(__emul, "LLiii", "nch", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(__emulu, "ULLiUiUi", "nch", "intrin.h", ALL_MS_LANGUAGES, "")
-
-TARGET_HEADER_BUILTIN(_AddressOfReturnAddress, "v*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-
-TARGET_HEADER_BUILTIN(__stosb, "vUc*Ucz", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(__int2c, "v", "nhr", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(__ud2, "v", "nhr", "intrin.h", ALL_MS_LANGUAGES, "")
-
-TARGET_HEADER_BUILTIN(__readfsbyte, "UcUNi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(__readfsword, "UsUNi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(__readfsdword, "UNiUNi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(__readfsqword, "ULLiUNi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-
-TARGET_HEADER_BUILTIN(__readgsbyte, "UcUNi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(__readgsword, "UsUNi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(__readgsdword, "UNiUNi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(__readgsqword, "ULLiUNi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-
-TARGET_HEADER_BUILTIN(_InterlockedAnd64, "WiWiD*Wi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedDecrement64, "WiWiD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchange64, "WiWiD*Wi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd64, "WiWiD*Wi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedExchangeSub64, "WiWiD*Wi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedIncrement64, "WiWiD*", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedOr64, "WiWiD*Wi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedXor64, "WiWiD*Wi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_BitScanForward, "UcUNi*UNi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_BitScanReverse, "UcUNi*UNi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+
+TARGET_HEADER_BUILTIN(_ReadWriteBarrier, "v", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_ReadBarrier, "v", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_WriteBarrier, "v", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+
+TARGET_HEADER_BUILTIN(__cpuid, "vi*i", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(__cpuidex, "vi*ii", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+
+TARGET_HEADER_BUILTIN(__emul, "LLiii", "nch", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(__emulu, "ULLiUiUi", "nch", INTRIN_H, ALL_MS_LANGUAGES, "")
+
+TARGET_HEADER_BUILTIN(_AddressOfReturnAddress, "v*", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+
+TARGET_HEADER_BUILTIN(__stosb, "vUc*Ucz", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(__int2c, "v", "nhr", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(__ud2, "v", "nhr", INTRIN_H, ALL_MS_LANGUAGES, "")
+
+TARGET_HEADER_BUILTIN(__readfsbyte, "UcUNi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(__readfsword, "UsUNi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(__readfsdword, "UNiUNi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(__readfsqword, "ULLiUNi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+
+TARGET_HEADER_BUILTIN(__readgsbyte, "UcUNi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(__readgsword, "UsUNi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(__readgsdword, "UNiUNi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(__readgsqword, "ULLiUNi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+
+// AVX-VNNI-INT16
+TARGET_BUILTIN(__builtin_ia32_vpdpwsud128, "V4iV4iV4iV4i", "nV:128:", "avxvnniint16")
+TARGET_BUILTIN(__builtin_ia32_vpdpwsud256, "V8iV8iV8iV8i", "nV:256:", "avxvnniint16")
+TARGET_BUILTIN(__builtin_ia32_vpdpwsuds128, "V4iV4iV4iV4i", "nV:128:", "avxvnniint16")
+TARGET_BUILTIN(__builtin_ia32_vpdpwsuds256, "V8iV8iV8iV8i", "nV:256:", "avxvnniint16")
+TARGET_BUILTIN(__builtin_ia32_vpdpwusd128, "V4iV4iV4iV4i", "nV:128:", "avxvnniint16")
+TARGET_BUILTIN(__builtin_ia32_vpdpwusd256, "V8iV8iV8iV8i", "nV:256:", "avxvnniint16")
+TARGET_BUILTIN(__builtin_ia32_vpdpwusds128, "V4iV4iV4iV4i", "nV:128:", "avxvnniint16")
+TARGET_BUILTIN(__builtin_ia32_vpdpwusds256, "V8iV8iV8iV8i", "nV:256:", "avxvnniint16")
+TARGET_BUILTIN(__builtin_ia32_vpdpwuud128, "V4iV4iV4iV4i", "nV:128:", "avxvnniint16")
+TARGET_BUILTIN(__builtin_ia32_vpdpwuud256, "V8iV8iV8iV8i", "nV:256:", "avxvnniint16")
+TARGET_BUILTIN(__builtin_ia32_vpdpwuuds128, "V4iV4iV4iV4i", "nV:128:", "avxvnniint16")
+TARGET_BUILTIN(__builtin_ia32_vpdpwuuds256, "V8iV8iV8iV8i", "nV:256:", "avxvnniint16")
+
+// AVX-NE-CONVERT
+TARGET_BUILTIN(__builtin_ia32_vbcstnebf162ps128, "V4fyC*", "nV:128:", "avxneconvert")
+TARGET_BUILTIN(__builtin_ia32_vbcstnebf162ps256, "V8fyC*", "nV:256:", "avxneconvert")
+TARGET_BUILTIN(__builtin_ia32_vbcstnesh2ps128, "V4fxC*", "nV:128:", "avxneconvert")
+TARGET_BUILTIN(__builtin_ia32_vbcstnesh2ps256, "V8fxC*", "nV:256:", "avxneconvert")
+TARGET_BUILTIN(__builtin_ia32_vcvtneebf162ps128, "V4fV8yC*", "nV:128:", "avxneconvert")
+TARGET_BUILTIN(__builtin_ia32_vcvtneebf162ps256, "V8fV16yC*", "nV:256:", "avxneconvert")
+TARGET_BUILTIN(__builtin_ia32_vcvtneeph2ps128, "V4fV8xC*", "nV:128:", "avxneconvert")
+TARGET_BUILTIN(__builtin_ia32_vcvtneeph2ps256, "V8fV16xC*", "nV:256:", "avxneconvert")
+TARGET_BUILTIN(__builtin_ia32_vcvtneobf162ps128, "V4fV8yC*", "nV:128:", "avxneconvert")
+TARGET_BUILTIN(__builtin_ia32_vcvtneobf162ps256, "V8fV16yC*", "nV:256:", "avxneconvert")
+TARGET_BUILTIN(__builtin_ia32_vcvtneoph2ps128, "V4fV8xC*", "nV:128:", "avxneconvert")
+TARGET_BUILTIN(__builtin_ia32_vcvtneoph2ps256, "V8fV16xC*", "nV:256:", "avxneconvert")
+TARGET_BUILTIN(__builtin_ia32_vcvtneps2bf16128, "V8yV4f", "nV:128:", "avx512bf16,avx512vl|avxneconvert")
+TARGET_BUILTIN(__builtin_ia32_vcvtneps2bf16256, "V8yV8f", "nV:256:", "avx512bf16,avx512vl|avxneconvert")
+
+// SHA512
+TARGET_BUILTIN(__builtin_ia32_vsha512msg1, "V4ULLiV4ULLiV2ULLi", "nV:256:", "sha512")
+TARGET_BUILTIN(__builtin_ia32_vsha512msg2, "V4ULLiV4ULLiV4ULLi", "nV:256:", "sha512")
+TARGET_BUILTIN(__builtin_ia32_vsha512rnds2, "V4ULLiV4ULLiV4ULLiV2ULLi", "nV:256:", "sha512")
+
+TARGET_HEADER_BUILTIN(_InterlockedAnd64, "WiWiD*Wi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedDecrement64, "WiWiD*", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchange64, "WiWiD*Wi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchangeAdd64, "WiWiD*Wi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedExchangeSub64, "WiWiD*Wi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedIncrement64, "WiWiD*", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedOr64, "WiWiD*Wi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedXor64, "WiWiD*Wi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+
+// SM3
+TARGET_BUILTIN(__builtin_ia32_vsm3msg1, "V4UiV4UiV4UiV4Ui", "nV:128:", "sm3")
+TARGET_BUILTIN(__builtin_ia32_vsm3msg2, "V4UiV4UiV4UiV4Ui", "nV:128:", "sm3")
+TARGET_BUILTIN(__builtin_ia32_vsm3rnds2, "V4UiV4UiV4UiV4UiIUi", "nV:128:", "sm3")
+
+// SM4
+TARGET_BUILTIN(__builtin_ia32_vsm4key4128, "V4UiV4UiV4Ui", "nV:128:", "sm4")
+TARGET_BUILTIN(__builtin_ia32_vsm4key4256, "V8UiV8UiV8Ui", "nV:256:", "sm4")
+TARGET_BUILTIN(__builtin_ia32_vsm4rnds4128, "V4UiV4UiV4Ui", "nV:128:", "sm4")
+TARGET_BUILTIN(__builtin_ia32_vsm4rnds4256, "V8UiV8UiV8Ui", "nV:256:", "sm4")
#undef BUILTIN
#undef TARGET_BUILTIN
diff --git a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsX86_64.def b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsX86_64.def
index ce2b1decdf6c..5e00916d4b25 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/BuiltinsX86_64.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/BuiltinsX86_64.def
@@ -21,19 +21,19 @@
# define TARGET_HEADER_BUILTIN(ID, TYPE, ATTRS, HEADER, LANG, FEATURE) BUILTIN(ID, TYPE, ATTRS)
#endif
-TARGET_HEADER_BUILTIN(_BitScanForward64, "UcUNi*ULLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_BitScanReverse64, "UcUNi*ULLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_BitScanForward64, "UcUNi*ULLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_BitScanReverse64, "UcUNi*ULLi", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(__mulh, "LLiLLiLLi", "nch", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(__umulh, "ULLiULLiULLi", "nch", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_mul128, "LLiLLiLLiLLi*", "nch", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_umul128, "ULLiULLiULLiULLi*", "nch", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(__mulh, "LLiLLiLLi", "nch", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(__umulh, "ULLiULLiULLi", "nch", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_mul128, "LLiLLiLLiLLi*", "nch", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_umul128, "ULLiULLiULLiULLi*", "nch", INTRIN_H, ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(__faststorefence, "v", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(__shiftleft128, "ULLiULLiULLiUc", "nch", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(__shiftright128, "ULLiULLiULLiUc", "nch", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(__faststorefence, "v", "nh", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(__shiftleft128, "ULLiULLiULLiUc", "nch", INTRIN_H, ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(__shiftright128, "ULLiULLiULLiUc", "nch", INTRIN_H, ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedCompareExchange128, "UcLLiD*LLiLLiLLi*", "nh", "intrin.h", ALL_MS_LANGUAGES, "cx16")
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange128, "UcLLiD*LLiLLiLLi*", "nh", INTRIN_H, ALL_MS_LANGUAGES, "cx16")
TARGET_BUILTIN(__builtin_ia32_readeflags_u64, "UOi", "n", "")
TARGET_BUILTIN(__builtin_ia32_writeeflags_u64, "vUOi", "n", "")
@@ -42,9 +42,8 @@ TARGET_BUILTIN(__builtin_ia32_cvttss2si64, "OiV4f", "ncV:128:", "sse")
TARGET_BUILTIN(__builtin_ia32_cvtsd2si64, "OiV2d", "ncV:128:", "sse2")
TARGET_BUILTIN(__builtin_ia32_cvttsd2si64, "OiV2d", "ncV:128:", "sse2")
TARGET_BUILTIN(__builtin_ia32_movnti64, "vOi*Oi", "n", "sse2")
-TARGET_BUILTIN(__builtin_ia32_vec_ext_v2di, "OiV2OiIi", "ncV:128:", "sse2")
TARGET_BUILTIN(__builtin_ia32_vec_set_v2di, "V2OiV2OiOiIi", "ncV:128:", "sse4.1")
-TARGET_BUILTIN(__builtin_ia32_crc32di, "UOiUOiUOi", "nc", "sse4.2")
+TARGET_BUILTIN(__builtin_ia32_crc32di, "UOiUOiUOi", "nc", "crc32")
TARGET_BUILTIN(__builtin_ia32_vec_ext_v4di, "OiV4OiIi", "ncV:256:", "avx")
TARGET_BUILTIN(__builtin_ia32_vec_set_v4di, "V4OiV4OiOiIi", "ncV:256:", "avx")
TARGET_BUILTIN(__builtin_ia32_rdfsbase32, "Ui", "n", "fsgsbase")
@@ -92,6 +91,12 @@ TARGET_BUILTIN(__builtin_ia32_cvtsi2sd64, "V2dV2dOiIi", "ncV:128:", "avx512f")
TARGET_BUILTIN(__builtin_ia32_cvtsi2ss64, "V4fV4fOiIi", "ncV:128:", "avx512f")
TARGET_BUILTIN(__builtin_ia32_cvtusi2sd64, "V2dV2dUOiIi", "ncV:128:", "avx512f")
TARGET_BUILTIN(__builtin_ia32_cvtusi2ss64, "V4fV4fUOiIi", "ncV:128:", "avx512f")
+TARGET_BUILTIN(__builtin_ia32_vcvtsh2si64, "OiV8xIi", "ncV:128:", "avx512fp16")
+TARGET_BUILTIN(__builtin_ia32_vcvtsh2usi64, "UOiV8xIi", "ncV:128:", "avx512fp16")
+TARGET_BUILTIN(__builtin_ia32_vcvtusi642sh, "V8xV8xUOiIi", "ncV:128:", "avx512fp16")
+TARGET_BUILTIN(__builtin_ia32_vcvtsi642sh, "V8xV8xOiIi", "ncV:128:", "avx512fp16")
+TARGET_BUILTIN(__builtin_ia32_vcvttsh2si64, "OiV8xIi", "ncV:128:", "avx512fp16")
+TARGET_BUILTIN(__builtin_ia32_vcvttsh2usi64, "UOiV8xIi", "ncV:128:", "avx512fp16")
TARGET_BUILTIN(__builtin_ia32_directstore_u64, "vULi*ULi", "n", "movdiri")
// UINTR
@@ -99,6 +104,9 @@ TARGET_BUILTIN(__builtin_ia32_clui, "v", "n", "uintr")
TARGET_BUILTIN(__builtin_ia32_stui, "v", "n", "uintr")
TARGET_BUILTIN(__builtin_ia32_testui, "Uc", "n", "uintr")
TARGET_BUILTIN(__builtin_ia32_senduipi, "vUWi", "n", "uintr")
+// USERMSR
+TARGET_BUILTIN(__builtin_ia32_urdmsr, "ULLiULLi", "n", "usermsr")
+TARGET_BUILTIN(__builtin_ia32_uwrmsr, "vULLiULLi", "n", "usermsr")
// AMX internal builtin
TARGET_BUILTIN(__builtin_ia32_tile_loadconfig_internal, "vvC*", "n", "amx-tile")
@@ -111,6 +119,9 @@ TARGET_BUILTIN(__builtin_ia32_tdpbuud_internal, "V256iUsUsUsV256iV256iV256i", "n
TARGET_BUILTIN(__builtin_ia32_tilestored64_internal, "vUsUsv*zV256i", "n", "amx-tile")
TARGET_BUILTIN(__builtin_ia32_tilezero_internal, "V256iUsUs", "n", "amx-tile")
TARGET_BUILTIN(__builtin_ia32_tdpbf16ps_internal, "V256iUsUsUsV256iV256iV256i", "n", "amx-bf16")
+TARGET_BUILTIN(__builtin_ia32_tdpfp16ps_internal, "V256iUsUsUsV256iV256iV256i", "n", "amx-fp16")
+TARGET_BUILTIN(__builtin_ia32_tcmmimfp16ps_internal, "V256iUsUsUsV256iV256iV256i", "n", "amx-complex")
+TARGET_BUILTIN(__builtin_ia32_tcmmrlfp16ps_internal, "V256iUsUsUsV256iV256iV256i", "n", "amx-complex")
// AMX
TARGET_BUILTIN(__builtin_ia32_tile_loadconfig, "vvC*", "n", "amx-tile")
TARGET_BUILTIN(__builtin_ia32_tile_storeconfig, "vvC*", "n", "amx-tile")
@@ -128,6 +139,22 @@ TARGET_BUILTIN(__builtin_ia32_tdpbuud, "vIUcIUcIUc", "n", "amx-int8")
TARGET_BUILTIN(__builtin_ia32_tdpbf16ps, "vIUcIUcIUc", "n", "amx-bf16")
TARGET_BUILTIN(__builtin_ia32_ptwrite64, "vUOi", "n", "ptwrite")
+TARGET_BUILTIN(__builtin_ia32_tcmmimfp16ps, "vIUcIUcIUc", "n", "amx-complex")
+TARGET_BUILTIN(__builtin_ia32_tcmmrlfp16ps, "vIUcIUcIUc", "n", "amx-complex")
+
+TARGET_BUILTIN(__builtin_ia32_prefetchi, "vvC*Ui", "nc", "prefetchi")
+TARGET_BUILTIN(__builtin_ia32_cmpccxadd32, "Siv*SiSiIi", "n", "cmpccxadd")
+TARGET_BUILTIN(__builtin_ia32_cmpccxadd64, "SLLiv*SLLiSLLiIi", "n", "cmpccxadd")
+
+// AMX_FP16 FP16
+TARGET_BUILTIN(__builtin_ia32_tdpfp16ps, "vIUcIUcIUc", "n", "amx-fp16")
+
+// RAO-INT
+TARGET_BUILTIN(__builtin_ia32_aadd64, "vv*SOi", "n", "raoint")
+TARGET_BUILTIN(__builtin_ia32_aand64, "vv*SOi", "n", "raoint")
+TARGET_BUILTIN(__builtin_ia32_aor64, "vv*SOi", "n", "raoint")
+TARGET_BUILTIN(__builtin_ia32_axor64, "vv*SOi", "n", "raoint")
+
#undef BUILTIN
#undef TARGET_BUILTIN
#undef TARGET_HEADER_BUILTIN
diff --git a/contrib/llvm-project/clang/include/clang/Basic/CLWarnings.h b/contrib/llvm-project/clang/include/clang/Basic/CLWarnings.h
new file mode 100644
index 000000000000..9b8be93bad3a
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Basic/CLWarnings.h
@@ -0,0 +1,26 @@
+//===--- CLWarnings.h - Maps some cl.exe warning ids -----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_BASIC_CLWARNINGS_H
+#define LLVM_CLANG_BASIC_CLWARNINGS_H
+
+#include <optional>
+
+namespace clang {
+
+namespace diag {
+enum class Group;
+}
+
+/// For cl.exe warning IDs that cleany map to clang diagnostic groups,
+/// returns the corresponding group. Else, returns an empty Optional.
+std::optional<diag::Group> diagGroupFromCLWarningID(unsigned);
+
+} // end namespace clang
+
+#endif // LLVM_CLANG_BASIC_CLWARNINGS_H
diff --git a/contrib/llvm-project/clang/include/clang/Basic/CharInfo.h b/contrib/llvm-project/clang/include/clang/Basic/CharInfo.h
index 8577475fab06..7d4119383508 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/CharInfo.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/CharInfo.h
@@ -38,15 +38,21 @@ namespace charinfo {
};
} // end namespace charinfo
-/// Returns true if this is an ASCII character.
+/// Returns true if a byte is an ASCII character.
LLVM_READNONE inline bool isASCII(char c) {
return static_cast<unsigned char>(c) <= 127;
}
+LLVM_READNONE inline bool isASCII(unsigned char c) { return c <= 127; }
+
+/// Returns true if a codepoint is an ASCII character.
+LLVM_READNONE inline bool isASCII(uint32_t c) { return c <= 127; }
+LLVM_READNONE inline bool isASCII(int64_t c) { return 0 <= c && c <= 127; }
+
/// Returns true if this is a valid first character of a C identifier,
/// which is [a-zA-Z_].
-LLVM_READONLY inline bool isIdentifierHead(unsigned char c,
- bool AllowDollar = false) {
+LLVM_READONLY inline bool isAsciiIdentifierStart(unsigned char c,
+ bool AllowDollar = false) {
using namespace charinfo;
if (InfoTable[c] & (CHAR_UPPER|CHAR_LOWER|CHAR_UNDER))
return true;
@@ -55,8 +61,8 @@ LLVM_READONLY inline bool isIdentifierHead(unsigned char c,
/// Returns true if this is a body character of a C identifier,
/// which is [a-zA-Z0-9_].
-LLVM_READONLY inline bool isIdentifierBody(unsigned char c,
- bool AllowDollar = false) {
+LLVM_READONLY inline bool isAsciiIdentifierContinue(unsigned char c,
+ bool AllowDollar = false) {
using namespace charinfo;
if (InfoTable[c] & (CHAR_UPPER|CHAR_LOWER|CHAR_DIGIT|CHAR_UNDER))
return true;
@@ -157,6 +163,44 @@ LLVM_READONLY inline bool isRawStringDelimBody(unsigned char c) {
CHAR_DIGIT|CHAR_UNDER|CHAR_RAWDEL)) != 0;
}
+enum class EscapeChar {
+ Single = 1,
+ Double = 2,
+ SingleAndDouble = static_cast<int>(Single) | static_cast<int>(Double),
+};
+
+/// Return C-style escaped string for special characters, or an empty string if
+/// there is no such mapping.
+template <EscapeChar Opt, class CharT>
+LLVM_READONLY inline auto escapeCStyle(CharT Ch) -> StringRef {
+ switch (Ch) {
+ case '\\':
+ return "\\\\";
+ case '\'':
+ if ((static_cast<int>(Opt) & static_cast<int>(EscapeChar::Single)) == 0)
+ break;
+ return "\\'";
+ case '"':
+ if ((static_cast<int>(Opt) & static_cast<int>(EscapeChar::Double)) == 0)
+ break;
+ return "\\\"";
+ case '\a':
+ return "\\a";
+ case '\b':
+ return "\\b";
+ case '\f':
+ return "\\f";
+ case '\n':
+ return "\\n";
+ case '\r':
+ return "\\r";
+ case '\t':
+ return "\\t";
+ case '\v':
+ return "\\v";
+ }
+ return {};
+}
/// Converts the given ASCII character to its lowercase equivalent.
///
@@ -181,13 +225,13 @@ LLVM_READONLY inline char toUppercase(char c) {
///
/// Note that this is a very simple check; it does not accept UCNs as valid
/// identifier characters.
-LLVM_READONLY inline bool isValidIdentifier(StringRef S,
- bool AllowDollar = false) {
- if (S.empty() || !isIdentifierHead(S[0], AllowDollar))
+LLVM_READONLY inline bool isValidAsciiIdentifier(StringRef S,
+ bool AllowDollar = false) {
+ if (S.empty() || !isAsciiIdentifierStart(S[0], AllowDollar))
return false;
for (StringRef::iterator I = S.begin(), E = S.end(); I != E; ++I)
- if (!isIdentifierBody(*I, AllowDollar))
+ if (!isAsciiIdentifierContinue(*I, AllowDollar))
return false;
return true;
diff --git a/contrib/llvm-project/clang/include/clang/Basic/CodeGenOptions.def b/contrib/llvm-project/clang/include/clang/Basic/CodeGenOptions.def
index e3202cf88756..7c0bfe328496 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/CodeGenOptions.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/CodeGenOptions.def
@@ -12,6 +12,9 @@
// that have enumeration type and VALUE_CODEGENOPT is a code
// generation option that describes a value rather than a flag.
//
+// AFFECTING_VALUE_CODEGENOPT is used for code generation options that can
+// affect the AST.
+//
//===----------------------------------------------------------------------===//
#ifndef CODEGENOPT
# error Define the CODEGENOPT macro to handle language options
@@ -27,16 +30,19 @@ CODEGENOPT(Name, Bits, Default)
CODEGENOPT(Name, Bits, Default)
#endif
+#ifndef AFFECTING_VALUE_CODEGENOPT
+# define AFFECTING_VALUE_CODEGENOPT(Name, Bits, Default) \
+VALUE_CODEGENOPT(Name, Bits, Default)
+#endif
+
CODEGENOPT(DisableIntegratedAS, 1, 0) ///< -no-integrated-as
-ENUM_CODEGENOPT(CompressDebugSections, llvm::DebugCompressionType, 2,
- llvm::DebugCompressionType::None)
-CODEGENOPT(RelaxELFRelocations, 1, 0) ///< -Wa,--mrelax-relocations
+CODEGENOPT(RelaxELFRelocations, 1, 1) ///< -Wa,-mrelax-relocations={yes,no}
CODEGENOPT(AsmVerbose , 1, 0) ///< -dA, -fverbose-asm.
-CODEGENOPT(Dwarf64 , 1, 0) ///< -gdwarf64.
-CODEGENOPT(Dwarf32 , 1, 1) ///< -gdwarf32.
CODEGENOPT(PreserveAsmComments, 1, 1) ///< -dA, -fno-preserve-as-comments.
CODEGENOPT(AssumeSaneOperatorNew , 1, 1) ///< implicit __attribute__((malloc)) operator new
+CODEGENOPT(AssumeUniqueVTables , 1, 1) ///< Assume a class has only one vtable.
CODEGENOPT(Autolink , 1, 1) ///< -fno-autolink
+CODEGENOPT(AutoImport , 1, 1) ///< -fno-auto-import
CODEGENOPT(ObjCAutoRefCountExceptions , 1, 0) ///< Whether ARC should be EH-safe.
CODEGENOPT(Backchain , 1, 0) ///< -mbackchain
CODEGENOPT(ControlFlowGuardNoChecks , 1, 0) ///< -cfguard-no-checks
@@ -52,8 +58,10 @@ CODEGENOPT(UniqueBasicBlockSectionNames, 1, 1) ///< Set for -funique-basic-block
///< Produce unique section names with
///< basic block sections.
CODEGENOPT(EnableAIXExtendedAltivecABI, 1, 0) ///< Set for -mabi=vec-extabi. Enables the extended Altivec ABI on AIX.
+CODEGENOPT(XCOFFReadOnlyPointers, 1, 0) ///< Set for -mxcoff-roptr.
ENUM_CODEGENOPT(FramePointer, FramePointerKind, 2, FramePointerKind::None) /// frame-pointer: all,non-leaf,none
+CODEGENOPT(ClearASTBeforeBackend , 1, 0) ///< Free the AST before running backend code generation. Only works with -disable-free.
CODEGENOPT(DisableFree , 1, 0) ///< Don't free memory.
CODEGENOPT(DiscardValueNames , 1, 0) ///< Discard Value Names from the IR (LLVMContext flag)
CODEGENOPT(DisableLLVMPasses , 1, 0) ///< Don't run any LLVM IR passes to get
@@ -64,16 +72,11 @@ CODEGENOPT(DisableO0ImplyOptNone , 1, 0) ///< Don't annonate function with optno
CODEGENOPT(ExperimentalStrictFloatingPoint, 1, 0) ///< Enables the new, experimental
///< strict floating point.
CODEGENOPT(EnableNoundefAttrs, 1, 0) ///< Enable emitting `noundef` attributes on IR call arguments and return values
-CODEGENOPT(LegacyPassManager, 1, 0) ///< Use the legacy pass manager.
CODEGENOPT(DebugPassManager, 1, 0) ///< Prints debug information for the new
///< pass manager.
CODEGENOPT(DisableRedZone , 1, 0) ///< Set when -mno-red-zone is enabled.
CODEGENOPT(EmitCallSiteInfo, 1, 0) ///< Emit call site info only in the case of
///< '-g' + 'O>0' level.
-CODEGENOPT(EnableDIPreservationVerify, 1, 0) ///< Enable di preservation verify
- ///< each (it means check
- ///< the original debug info
- ///< metadata preservation).
CODEGENOPT(IndirectTlsSegRefs, 1, 0) ///< Set when -mno-tls-direct-seg-refs
///< is specified.
CODEGENOPT(DisableTailCalls , 1, 0) ///< Do not emit tail calls.
@@ -84,13 +87,12 @@ CODEGENOPT(EmitDeclMetadata , 1, 0) ///< Emit special metadata indicating what
///< Only useful when running CodeGen as a
///< subroutine.
CODEGENOPT(EmitVersionIdentMetadata , 1, 1) ///< Emit compiler version metadata.
-CODEGENOPT(EmitGcovArcs , 1, 0) ///< Emit coverage data files, aka. GCDA.
-CODEGENOPT(EmitGcovNotes , 1, 0) ///< Emit coverage "notes" files, aka GCNO.
CODEGENOPT(EmitOpenCLArgMetadata , 1, 0) ///< Emit OpenCL kernel arg metadata.
CODEGENOPT(EmulatedTLS , 1, 0) ///< Set by default or -f[no-]emulated-tls.
-CODEGENOPT(ExplicitEmulatedTLS , 1, 0) ///< Set if -f[no-]emulated-tls is used.
/// Embed Bitcode mode (off/all/bitcode/marker).
ENUM_CODEGENOPT(EmbedBitcode, EmbedBitcodeKind, 2, Embed_Off)
+/// Inline asm dialect, -masm=(att|intel)
+ENUM_CODEGENOPT(InlineAsmDialect, InlineAsmDialectKind, 1, IAD_ATT)
CODEGENOPT(ForbidGuardVariables , 1, 0) ///< Issue errors if C++ guard variables
///< are required.
CODEGENOPT(FunctionSections , 1, 0) ///< Set when -ffunction-sections is enabled.
@@ -104,11 +106,16 @@ CODEGENOPT(CFProtectionReturn , 1, 0) ///< if -fcf-protection is
///< set to full or return.
CODEGENOPT(CFProtectionBranch , 1, 0) ///< if -fcf-protection is
///< set to full or branch.
+CODEGENOPT(FunctionReturnThunks, 1, 0) ///< -mfunction-return={keep|thunk-extern}
+CODEGENOPT(IndirectBranchCSPrefix, 1, 0) ///< if -mindirect-branch-cs-prefix
+ ///< is set.
+
CODEGENOPT(XRayInstrumentFunctions , 1, 0) ///< Set when -fxray-instrument is
///< enabled.
CODEGENOPT(StackSizeSection , 1, 0) ///< Set when -fstack-size-section is enabled.
-CODEGENOPT(ForceDwarfFrameSection , 1, 0) ///< Set when -fforce-dwarf-frame is
- ///< enabled.
+
+///< Set when -femit-compact-unwind-non-canonical is enabled.
+CODEGENOPT(EmitCompactUnwindNonCanonical, 1, 0)
///< Set when -fxray-always-emit-customevents is enabled.
CODEGENOPT(XRayAlwaysEmitCustomEvents , 1, 0)
@@ -119,8 +126,8 @@ CODEGENOPT(XRayAlwaysEmitTypedEvents , 1, 0)
///< Set when -fxray-ignore-loops is enabled.
CODEGENOPT(XRayIgnoreLoops , 1, 0)
-///< Set with -fno-xray-function-index to omit the index section.
-CODEGENOPT(XRayOmitFunctionIndex , 1, 0)
+///< Emit the XRay function index section.
+CODEGENOPT(XRayFunctionIndex , 1, 1)
///< Set the minimum number of instructions in a function to determine selective
@@ -136,6 +143,10 @@ VALUE_CODEGENOPT(XRaySelectedFunctionGroup, 32, 0)
VALUE_CODEGENOPT(PatchableFunctionEntryCount , 32, 0) ///< Number of NOPs at function entry
VALUE_CODEGENOPT(PatchableFunctionEntryOffset , 32, 0)
+CODEGENOPT(HotPatch, 1, 0) ///< Supports the Microsoft /HOTPATCH flag and
+ ///< generates a 'patchable-function' attribute.
+
+CODEGENOPT(JMCInstrument, 1, 0) ///< Set when -fjmc is enabled.
CODEGENOPT(InstrumentForProfiling , 1, 0) ///< Set when -pg is enabled.
CODEGENOPT(CallFEntry , 1, 0) ///< Set when -mfentry is enabled.
CODEGENOPT(MNopMCount , 1, 0) ///< Set when -mnop-mcount is enabled.
@@ -149,58 +160,66 @@ CODEGENOPT(PrepareForThinLTO , 1, 0) ///< Set when -flto=thin is enabled on the
///< compile step.
CODEGENOPT(LTOUnit, 1, 0) ///< Emit IR to support LTO unit features (CFI, whole
///< program vtable opt).
+CODEGENOPT(FatLTO, 1, 0) ///< Set when -ffat-lto-objects is enabled.
CODEGENOPT(EnableSplitLTOUnit, 1, 0) ///< Enable LTO unit splitting to support
/// CFI and traditional whole program
/// devirtualization that require whole
/// program IR support.
+CODEGENOPT(UnifiedLTO, 1, 0) ///< Use the unified LTO pipeline.
CODEGENOPT(IncrementalLinkerCompatible, 1, 0) ///< Emit an object file which can
///< be used with an incremental
///< linker.
CODEGENOPT(MergeAllConstants , 1, 1) ///< Merge identical constants.
CODEGENOPT(MergeFunctions , 1, 0) ///< Set when -fmerge-functions is enabled.
-CODEGENOPT(MSVolatile , 1, 0) ///< Set when /volatile:ms is enabled.
CODEGENOPT(NoCommon , 1, 0) ///< Set when -fno-common or C++ is enabled.
-CODEGENOPT(NoDwarfDirectoryAsm , 1, 0) ///< Set when -fno-dwarf-directory-asm is
- ///< enabled.
CODEGENOPT(NoExecStack , 1, 0) ///< Set when -Wa,--noexecstack is enabled.
CODEGENOPT(FatalWarnings , 1, 0) ///< Set when -Wa,--fatal-warnings is
///< enabled.
CODEGENOPT(NoWarn , 1, 0) ///< Set when -Wa,--no-warn is enabled.
+CODEGENOPT(NoTypeCheck , 1, 0) ///< Set when -Wa,--no-type-check is enabled.
+CODEGENOPT(MisExpect , 1, 0) ///< Set when -Wmisexpect is enabled
CODEGENOPT(EnableSegmentedStacks , 1, 0) ///< Set when -fsplit-stack is enabled.
-CODEGENOPT(NoInlineLineTables, 1, 0) ///< Whether debug info should contain
- ///< inline line tables.
CODEGENOPT(StackClashProtector, 1, 0) ///< Set when -fstack-clash-protection is enabled.
CODEGENOPT(NoImplicitFloat , 1, 0) ///< Set when -mno-implicit-float is enabled.
CODEGENOPT(NullPointerIsValid , 1, 0) ///< Assume Null pointer deference is defined.
CODEGENOPT(OpenCLCorrectlyRoundedDivSqrt, 1, 0) ///< -cl-fp32-correctly-rounded-divide-sqrt
CODEGENOPT(HIPCorrectlyRoundedDivSqrt, 1, 1) ///< -fno-hip-fp32-correctly-rounded-divide-sqrt
+CODEGENOPT(HIPSaveKernelArgName, 1, 0) ///< Set when -fhip-kernel-arg-name is enabled.
CODEGENOPT(UniqueInternalLinkageNames, 1, 0) ///< Internal Linkage symbols get unique names.
CODEGENOPT(SplitMachineFunctions, 1, 0) ///< Split machine functions using profile information.
+CODEGENOPT(PPCUseFullRegisterNames, 1, 0) ///< Print full register names in assembly
/// When false, this attempts to generate code as if the result of an
/// overflowing conversion matches the overflowing behavior of a target's native
/// float-to-int conversion instructions.
CODEGENOPT(StrictFloatCastOverflow, 1, 1)
-CODEGENOPT(UniformWGSize , 1, 0) ///< -cl-uniform-work-group-size
CODEGENOPT(NoZeroInitializedInBSS , 1, 0) ///< -fno-zero-initialized-in-bss.
/// Method of Objective-C dispatch to use.
ENUM_CODEGENOPT(ObjCDispatchMethod, ObjCDispatchMethodKind, 2, Legacy)
/// Replace certain message sends with calls to ObjC runtime entrypoints
CODEGENOPT(ObjCConvertMessagesToRuntimeCalls , 1, 1)
+CODEGENOPT(ObjCAvoidHeapifyLocalBlocks, 1, 0)
-VALUE_CODEGENOPT(OptimizationLevel, 2, 0) ///< The -O[0-3] option specified.
-VALUE_CODEGENOPT(OptimizeSize, 2, 0) ///< If -Os (==1) or -Oz (==2) is specified.
+
+// The optimization options affect frontend options, whicn in turn do affect the AST.
+AFFECTING_VALUE_CODEGENOPT(OptimizationLevel, 2, 0) ///< The -O[0-3] option specified.
+AFFECTING_VALUE_CODEGENOPT(OptimizeSize, 2, 0) ///< If -Os (==1) or -Oz (==2) is specified.
CODEGENOPT(AtomicProfileUpdate , 1, 0) ///< Set -fprofile-update=atomic
/// Choose profile instrumenation kind or no instrumentation.
ENUM_CODEGENOPT(ProfileInstr, ProfileInstrKind, 2, ProfileNone)
/// Choose profile kind for PGO use compilation.
ENUM_CODEGENOPT(ProfileUse, ProfileInstrKind, 2, ProfileNone)
+/// Partition functions into N groups and select only functions in group i to be
+/// instrumented. Selected group numbers can be 0 to N-1 inclusive.
+VALUE_CODEGENOPT(ProfileTotalFunctionGroups, 32, 1)
+VALUE_CODEGENOPT(ProfileSelectedFunctionGroup, 32, 0)
CODEGENOPT(CoverageMapping , 1, 0) ///< Generate coverage mapping regions to
///< enable code coverage analysis.
CODEGENOPT(DumpCoverageMapping , 1, 0) ///< Dump the generated coverage mapping
///< regions.
+CODEGENOPT(MCDCCoverage , 1, 0) ///< Enable MC/DC code coverage criteria.
/// If -fpcc-struct-return or -freg-struct-return is specified.
ENUM_CODEGENOPT(StructReturnConvention, StructReturnConventionKind, 2, SRCK_Default)
@@ -227,6 +246,9 @@ CODEGENOPT(SanitizeMemoryTrackOrigins, 2, 0) ///< Enable tracking origins in
ENUM_CODEGENOPT(SanitizeAddressDtor, llvm::AsanDtorKind, 2,
llvm::AsanDtorKind::Global) ///< Set how ASan global
///< destructors are emitted.
+CODEGENOPT(SanitizeMemoryParamRetval, 1, 0) ///< Enable detection of uninitialized
+ ///< parameters and return values
+ ///< in MemorySanitizer
CODEGENOPT(SanitizeMemoryUseAfterDtor, 1, 0) ///< Enable use-after-delete detection
///< in MemorySanitizer
CODEGENOPT(SanitizeCfiCrossDso, 1, 0) ///< Enable cross-dso support in CFI.
@@ -234,6 +256,8 @@ CODEGENOPT(SanitizeMinimalRuntime, 1, 0) ///< Use "_minimal" sanitizer runtime f
///< diagnostics.
CODEGENOPT(SanitizeCfiICallGeneralizePointers, 1, 0) ///< Generalize pointer types in
///< CFI icall function signatures
+CODEGENOPT(SanitizeCfiICallNormalizeIntegers, 1, 0) ///< Normalize integer types in
+ ///< CFI icall function signatures
CODEGENOPT(SanitizeCfiCanonicalJumpTables, 1, 0) ///< Make jump table symbols canonical
///< instead of creating a local jump table.
CODEGENOPT(SanitizeCoverageType, 2, 0) ///< Type of sanitizer coverage
@@ -257,8 +281,15 @@ CODEGENOPT(SanitizeCoverageTracePCGuard, 1, 0) ///< Enable PC tracing with guard
CODEGENOPT(SanitizeCoverageInline8bitCounters, 1, 0) ///< Use inline 8bit counters.
CODEGENOPT(SanitizeCoverageInlineBoolFlag, 1, 0) ///< Use inline bool flag.
CODEGENOPT(SanitizeCoveragePCTable, 1, 0) ///< Create a PC Table.
+CODEGENOPT(SanitizeCoverageControlFlow, 1, 0) ///< Collect control flow
CODEGENOPT(SanitizeCoverageNoPrune, 1, 0) ///< Disable coverage pruning.
CODEGENOPT(SanitizeCoverageStackDepth, 1, 0) ///< Enable max stack depth tracing
+CODEGENOPT(SanitizeCoverageTraceLoads, 1, 0) ///< Enable tracing of loads.
+CODEGENOPT(SanitizeCoverageTraceStores, 1, 0) ///< Enable tracing of stores.
+CODEGENOPT(SanitizeBinaryMetadataCovered, 1, 0) ///< Emit PCs for covered functions.
+CODEGENOPT(SanitizeBinaryMetadataAtomics, 1, 0) ///< Emit PCs for atomic operations.
+CODEGENOPT(SanitizeBinaryMetadataUAR, 1, 0) ///< Emit PCs for start of functions
+ ///< that are subject for use-after-return checking.
CODEGENOPT(SanitizeStats , 1, 0) ///< Collect statistics for sanitizers.
CODEGENOPT(SimplifyLibCalls , 1, 1) ///< Set when -fbuiltin is enabled.
CODEGENOPT(SoftFloat , 1, 0) ///< -soft-float.
@@ -274,7 +305,7 @@ VALUE_CODEGENOPT(TimeTraceGranularity, 32, 500) ///< Minimum time granularity (i
CODEGENOPT(UnrollLoops , 1, 0) ///< Control whether loops are unrolled.
CODEGENOPT(RerollLoops , 1, 0) ///< Control whether loops are rerolled.
CODEGENOPT(NoUseJumpTables , 1, 0) ///< Set when -fno-jump-tables is enabled.
-CODEGENOPT(UnwindTables , 1, 0) ///< Emit unwind tables.
+VALUE_CODEGENOPT(UnwindTables, 2, 0) ///< Unwind tables (1) or asynchronous unwind tables (2)
CODEGENOPT(VectorizeLoop , 1, 0) ///< Run loop vectorizer.
CODEGENOPT(VectorizeSLP , 1, 0) ///< Run SLP vectorizer.
CODEGENOPT(ProfileSampleAccurate, 1, 0) ///< Sample profile is accurate.
@@ -288,35 +319,21 @@ CODEGENOPT(UseRegisterSizedBitfieldAccess , 1, 0)
CODEGENOPT(VerifyModule , 1, 1) ///< Control whether the module should be run
///< through the LLVM Verifier.
+CODEGENOPT(VerifyEach , 1, 1) ///< Control whether the LLVM verifier
+ ///< should run after every pass.
CODEGENOPT(StackRealignment , 1, 0) ///< Control whether to force stack
///< realignment.
CODEGENOPT(UseInitArray , 1, 0) ///< Control whether to use .init_array or
///< .ctors.
+VALUE_CODEGENOPT(LoopAlignment , 32, 0) ///< Overrides default loop
+ ///< alignment, if not 0.
VALUE_CODEGENOPT(StackAlignment , 32, 0) ///< Overrides default stack
///< alignment, if not 0.
VALUE_CODEGENOPT(StackProbeSize , 32, 4096) ///< Overrides default stack
///< probe size, even if 0.
VALUE_CODEGENOPT(WarnStackSize , 32, UINT_MAX) ///< Set via -fwarn-stack-size.
CODEGENOPT(NoStackArgProbe, 1, 0) ///< Set when -mno-stack-arg-probe is used
-CODEGENOPT(DebugStrictDwarf, 1, 1) ///< Whether or not to use strict DWARF info.
-CODEGENOPT(DebugColumnInfo, 1, 0) ///< Whether or not to use column information
- ///< in debug info.
-
-CODEGENOPT(DebugTypeExtRefs, 1, 0) ///< Whether or not debug info should contain
- ///< external references to a PCH or module.
-
-CODEGENOPT(DebugExplicitImport, 1, 0) ///< Whether or not debug info should
- ///< contain explicit imports for
- ///< anonymous namespaces
-
-CODEGENOPT(SplitDwarfInlining, 1, 1) ///< Whether to include inlining info in the
- ///< skeleton CU to allow for symbolication
- ///< of inline stack frames without .dwo files.
-CODEGENOPT(DebugFwdTemplateParams, 1, 0) ///< Whether to emit complete
- ///< template parameter descriptions in
- ///< forward declarations (versus just
- ///< including them in the name).
CODEGENOPT(EmitLLVMUseLists, 1, 0) ///< Control whether to serialize use-lists.
CODEGENOPT(WholeProgramVTables, 1, 0) ///< Whether to apply whole-program
@@ -340,39 +357,21 @@ VALUE_CODEGENOPT(SmallDataLimit, 32, 0)
/// The lower bound for a buffer to be considered for stack protection.
VALUE_CODEGENOPT(SSPBufferSize, 32, 0)
-/// The kind of generated debug info.
-ENUM_CODEGENOPT(DebugInfo, codegenoptions::DebugInfoKind, 4, codegenoptions::NoDebugInfo)
-
-/// Whether to generate macro debug info.
-CODEGENOPT(MacroDebugInfo, 1, 0)
-
-/// Tune the debug info for this debugger.
-ENUM_CODEGENOPT(DebuggerTuning, llvm::DebuggerKind, 3,
- llvm::DebuggerKind::Default)
-
-/// Dwarf version. Version zero indicates to LLVM that no DWARF should be
-/// emitted.
-VALUE_CODEGENOPT(DwarfVersion, 3, 0)
-
-/// Whether to use experimental new variable location tracking.
-CODEGENOPT(ValueTrackingVariableLocations, 1, 0)
-
-/// Whether we should emit CodeView debug information. It's possible to emit
-/// CodeView and DWARF into the same object.
-CODEGENOPT(EmitCodeView, 1, 0)
-
-/// Whether to emit the .debug$H section containing hashes of CodeView types.
-CODEGENOPT(CodeViewGHash, 1, 0)
-
/// The kind of inlining to perform.
ENUM_CODEGENOPT(Inlining, InliningMethod, 2, NormalInlining)
+/// The maximum stack size a function can have to be considered for inlining.
+VALUE_CODEGENOPT(InlineMaxStackSize, 32, UINT_MAX)
+
// Vector functions library to use.
-ENUM_CODEGENOPT(VecLib, VectorLibrary, 3, NoLibrary)
+ENUM_CODEGENOPT(VecLib, llvm::driver::VectorLibrary, 3, llvm::driver::VectorLibrary::NoLibrary)
/// The default TLS model to use.
ENUM_CODEGENOPT(DefaultTLSModel, TLSModel, 2, GeneralDynamicTLSModel)
+/// Whether to enable TLSDESC. AArch64 enables TLSDESC regardless of this value.
+CODEGENOPT(EnableTLSDESC, 1, 0)
+
/// Bit size of immediate TLS offsets (0 == use the default).
VALUE_CODEGENOPT(TLSSize, 8, 0)
@@ -393,26 +392,14 @@ CODEGENOPT(DirectAccessExternalData, 1, 0)
/// paths that reach the end of a function without executing a required return.
CODEGENOPT(StrictReturn, 1, 1)
-/// Whether emit extra debug info for sample pgo profile collection.
-CODEGENOPT(DebugInfoForProfiling, 1, 0)
-
/// Whether emit pseudo probes for sample pgo profile collection.
CODEGENOPT(PseudoProbeForProfiling, 1, 0)
/// Whether 3-component vector type is preserved.
CODEGENOPT(PreserveVec3Type, 1, 0)
-/// Whether to emit .debug_gnu_pubnames section instead of .debug_pubnames.
-CODEGENOPT(DebugNameTable, 2, 0)
-
-/// Whether to use DWARF base address specifiers in .debug_ranges.
-CODEGENOPT(DebugRangesBaseAddress, 1, 0)
-
CODEGENOPT(NoPLT, 1, 0)
-/// Whether to embed source in DWARF debug line section.
-CODEGENOPT(EmbedSource, 1, 0)
-
/// Whether to emit all vtables
CODEGENOPT(ForceEmitVTables, 1, 0)
@@ -422,6 +409,10 @@ CODEGENOPT(Addrsig, 1, 0)
/// Whether to emit unused static constants.
CODEGENOPT(KeepStaticConsts, 1, 0)
+/// Whether to emit all variables that have a persistent storage duration,
+/// including global, static and thread local variables.
+CODEGENOPT(KeepPersistentStorageVariables, 1, 0)
+
/// Whether to follow the AAPCS enforcing at least one read before storing to a volatile bitfield
CODEGENOPT(ForceAAPCSBitfieldLoad, 1, 0)
@@ -437,6 +428,26 @@ CODEGENOPT(AAPCSBitfieldWidth, 1, 1)
/// propagate signaling NaN inputs per IEEE 754-2008 (AMDGPU Only)
CODEGENOPT(EmitIEEENaNCompliantInsts, 1, 1)
+// Whether to emit Swift Async function extended frame information: auto,
+// never, always.
+ENUM_CODEGENOPT(SwiftAsyncFramePointer, SwiftAsyncFramePointerKind, 2,
+ SwiftAsyncFramePointerKind::Always)
+
+/// Whether to skip RAX setup when passing variable arguments (x86 only).
+CODEGENOPT(SkipRaxSetup, 1, 0)
+
+/// Whether to zero out caller-used registers before returning.
+ENUM_CODEGENOPT(ZeroCallUsedRegs, llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind,
+ 5, llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Skip)
+
+/// Modify C++ ABI to returning `this` pointer from constructors and
+/// non-deleting destructors. (No effect on Microsoft ABI.)
+CODEGENOPT(CtorDtorReturnThis, 1, 0)
+
+/// FIXME: Make DebugOptions its own top-level .def file.
+#include "DebugOptions.def"
+
#undef CODEGENOPT
#undef ENUM_CODEGENOPT
#undef VALUE_CODEGENOPT
+#undef AFFECTING_VALUE_CODEGENOPT
diff --git a/contrib/llvm-project/clang/include/clang/Basic/CodeGenOptions.h b/contrib/llvm-project/clang/include/clang/Basic/CodeGenOptions.h
index 617c255641ef..3f8fe385fef3 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/CodeGenOptions.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/CodeGenOptions.h
@@ -13,10 +13,11 @@
#ifndef LLVM_CLANG_BASIC_CODEGENOPTIONS_H
#define LLVM_CLANG_BASIC_CODEGENOPTIONS_H
-#include "clang/Basic/DebugInfoOptions.h"
#include "clang/Basic/Sanitizers.h"
#include "clang/Basic/XRayInstr.h"
#include "llvm/ADT/FloatingPointMode.h"
+#include "llvm/Frontend/Debug/Options.h"
+#include "llvm/Frontend/Driver/CodeGenOptions.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/Regex.h"
#include "llvm/Target/TargetOptions.h"
@@ -26,12 +27,16 @@
#include <string>
#include <vector>
+namespace llvm {
+class PassBuilder;
+}
namespace clang {
/// Bitfields of CodeGenOptions, split out from CodeGenOptions to ensure
/// that this large collection of bitfields is a trivial class type.
class CodeGenOptionsBase {
friend class CompilerInvocation;
+ friend class CompilerInvocationBase;
public:
#define CODEGENOPT(Name, Bits, Default) unsigned Name : Bits;
@@ -54,15 +59,6 @@ public:
OnlyAlwaysInlining // Only run the always inlining pass.
};
- enum VectorLibrary {
- NoLibrary, // Don't use any vector library.
- Accelerate, // Use the Accelerate framework.
- LIBMVEC, // GLIBC vector math library.
- MASSV, // IBM MASS vector library.
- SVML, // Intel short vector math library.
- Darwin_libsystem_m // Use Darwin's libsytem_m vector functions.
- };
-
enum ObjCDispatchMethodKind {
Legacy = 0,
NonLegacy = 1,
@@ -97,6 +93,17 @@ public:
Embed_Marker // Embed a marker as a placeholder for bitcode.
};
+ enum InlineAsmDialectKind {
+ IAD_ATT,
+ IAD_Intel,
+ };
+
+ enum DebugSrcHashKind {
+ DSH_MD5,
+ DSH_SHA1,
+ DSH_SHA256,
+ };
+
// This field stores one of the allowed values for the option
// -fbasic-block-sections=. The allowed values with this option are:
// {"labels", "all", "list=<file>", "none"}.
@@ -125,15 +132,45 @@ public:
All, // Keep all frame pointers.
};
+ static StringRef getFramePointerKindName(FramePointerKind Kind) {
+ switch (Kind) {
+ case FramePointerKind::None:
+ return "none";
+ case FramePointerKind::NonLeaf:
+ return "non-leaf";
+ case FramePointerKind::All:
+ return "all";
+ }
+
+ llvm_unreachable("invalid FramePointerKind");
+ }
+
+ enum class SwiftAsyncFramePointerKind {
+ Auto, // Choose Swift async extended frame info based on deployment target.
+ Always, // Unconditionally emit Swift async extended frame info.
+ Never, // Don't emit Swift async extended frame info.
+ Default = Always,
+ };
+
enum FiniteLoopsKind {
Language, // Not specified, use language standard.
Always, // All loops are assumed to be finite.
Never, // No loop is assumed to be finite.
};
+ enum AssignmentTrackingOpts {
+ Disabled,
+ Enabled,
+ Forced,
+ };
+
/// The code model to use (-mcmodel).
std::string CodeModel;
+ /// The code model-specific large data threshold to use
+ /// (-mlarge-data-threshold).
+ uint64_t LargeDataThreshold;
+
/// The filename with path we use for coverage data files. The runtime
/// allows further manipulation with the GCOV_PREFIX and GCOV_PREFIX_STRIP
/// environment variables.
@@ -168,8 +205,11 @@ public:
/// if non-empty.
std::string RecordCommandLine;
- std::map<std::string, std::string> DebugPrefixMap;
- std::map<std::string, std::string> CoveragePrefixMap;
+ llvm::SmallVector<std::pair<std::string, std::string>, 0> DebugPrefixMap;
+
+ /// Prefix replacement map for source-based code coverage to remap source
+ /// file paths in coverage mapping.
+ llvm::SmallVector<std::pair<std::string, std::string>, 0> CoveragePrefixMap;
/// The ABI to use for passing floating point arguments.
std::string FloatABI;
@@ -215,6 +255,9 @@ public:
/// Output filename for the split debug info, not used in the skeleton CU.
std::string SplitDwarfOutput;
+ /// Output filename used in the COFF debug information.
+ std::string ObjectFilenameForDebug;
+
/// The name of the relocation model to use.
llvm::Reloc::Model RelocationModel;
@@ -238,6 +281,9 @@ public:
/// Name of the profile file to use as output for with -fmemory-profile.
std::string MemoryProfileOutput;
+ /// Name of the profile file to use as input for -fmemory-profile-use.
+ std::string MemoryProfileUsePath;
+
/// Name of the profile file to use as input for -fprofile-instr-use
std::string ProfileInstrumentUsePath;
@@ -261,6 +307,10 @@ public:
/// CUDA runtime back-end for incorporating them into host-side object file.
std::string CudaGpuBinaryFileName;
+ /// List of filenames passed in using the -fembed-offload-object option. These
+ /// are offloading binaries containing device images and metadata.
+ std::vector<std::string> OffloadObjects;
+
/// The name of the file to which the backend should save YAML optimization
/// records.
std::string OptRecordFile;
@@ -287,12 +337,12 @@ public:
/// Optimization remark with an optional regular expression pattern.
struct OptRemark {
- RemarkKind Kind;
+ RemarkKind Kind = RK_Missing;
std::string Pattern;
std::shared_ptr<llvm::Regex> Regex;
/// By default, optimization remark is missing.
- OptRemark() : Kind(RK_Missing), Pattern(""), Regex(nullptr) {}
+ OptRemark() = default;
/// Returns true iff the optimization remark holds a valid regular
/// expression.
@@ -323,9 +373,6 @@ public:
/// transformation.
OptRemark OptimizationRemarkAnalysis;
- /// Set of files defining the rules for the symbol rewriting.
- std::vector<std::string> RewriteMapFiles;
-
/// Set of sanitizer checks that are non-fatal (i.e. execution should be
/// continued when possible).
SanitizerSet SanitizeRecover;
@@ -354,6 +401,9 @@ public:
/// List of dynamic shared object files to be loaded as pass plugins.
std::vector<std::string> PassPlugins;
+ /// List of pass builder callbacks.
+ std::vector<std::function<void(llvm::PassBuilder &)>> PassBuilderCallbacks;
+
/// Path to allowlist file specifying which objects
/// (files, functions) should exclusively be instrumented
/// by sanitizer coverage pass.
@@ -370,11 +420,19 @@ public:
/// On AArch64 this can only be "sp_el0".
std::string StackProtectorGuardReg;
+ /// Specify a symbol to be the guard value.
+ std::string StackProtectorGuardSymbol;
+
/// Path to ignorelist file specifying which objects
/// (files, functions) listed for instrumentation by sanitizer
/// coverage pass should actually not be instrumented.
std::vector<std::string> SanitizeCoverageIgnorelistFiles;
+ /// Path to ignorelist file specifying which objects
+ /// (files, functions) listed for instrumentation by sanitizer
+ /// binary metadata pass should not be instrumented.
+ std::vector<std::string> SanitizeMetadataIgnorelistFiles;
+
/// Name of the stack usage file (i.e., .su file) if user passes
/// -fstack-usage. If empty, it can be implied that -fstack-usage is not
/// passed on the command line.
@@ -383,7 +441,7 @@ public:
/// Executable and command-line used to create a given CompilerInvocation.
/// Most of the time this will be the full -cc1 command.
const char *Argv0 = nullptr;
- ArrayRef<const char *> CommandLineArgs;
+ std::vector<std::string> CommandLineArgs;
/// The minimum hotness value a diagnostic needs in order to be included in
/// optimization diagnostics.
@@ -398,7 +456,14 @@ public:
/// compilation.
///
/// If threshold option is not specified, it is disabled by default.
- Optional<uint64_t> DiagnosticsHotnessThreshold = 0;
+ std::optional<uint64_t> DiagnosticsHotnessThreshold = 0;
+
+ /// The maximum percentage profiling weights can deviate from the expected
+ /// values in order to be included in misexpect diagnostics.
+ std::optional<uint32_t> DiagnosticsMisExpectTolerance = 0;
+
+ /// The name of a file to use with \c .secure_log_unique directives.
+ std::string AsSecureLogFile;
public:
// Define accessors/mutators for code generation options of enumeration type.
@@ -429,6 +494,9 @@ public:
return getProfileInstr() == ProfileCSIRInstr;
}
+ /// Check if any form of instrumentation is on.
+ bool hasProfileInstr() const { return getProfileInstr() != ProfileNone; }
+
/// Check if Clang profile use is on.
bool hasProfileClangUse() const {
return getProfileUse() == ProfileClangInstr;
@@ -445,19 +513,30 @@ public:
/// Check if type and variable info should be emitted.
bool hasReducedDebugInfo() const {
- return getDebugInfo() >= codegenoptions::DebugInfoConstructor;
+ return getDebugInfo() >= llvm::codegenoptions::DebugInfoConstructor;
}
/// Check if maybe unused type info should be emitted.
bool hasMaybeUnusedDebugInfo() const {
- return getDebugInfo() >= codegenoptions::UnusedTypeInfo;
+ return getDebugInfo() >= llvm::codegenoptions::UnusedTypeInfo;
}
// Check if any one of SanitizeCoverage* is enabled.
bool hasSanitizeCoverage() const {
return SanitizeCoverageType || SanitizeCoverageIndirectCalls ||
- SanitizeCoverageTraceCmp;
+ SanitizeCoverageTraceCmp || SanitizeCoverageTraceLoads ||
+ SanitizeCoverageTraceStores || SanitizeCoverageControlFlow;
}
+
+ // Check if any one of SanitizeBinaryMetadata* is enabled.
+ bool hasSanitizeBinaryMetadata() const {
+ return SanitizeBinaryMetadataCovered || SanitizeBinaryMetadataAtomics ||
+ SanitizeBinaryMetadataUAR;
+ }
+
+ /// Reset all of the options that are not considered when building a
+ /// module.
+ void resetNonModularOptions(StringRef ModuleFormat);
};
} // end namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/Basic/Cuda.h b/contrib/llvm-project/clang/include/clang/Basic/Cuda.h
index aa12724cbf0c..916cb4b7ef34 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/Cuda.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/Cuda.h
@@ -31,8 +31,20 @@ enum class CudaVersion {
CUDA_110,
CUDA_111,
CUDA_112,
- LATEST = CUDA_112,
- LATEST_SUPPORTED = CUDA_101,
+ CUDA_113,
+ CUDA_114,
+ CUDA_115,
+ CUDA_116,
+ CUDA_117,
+ CUDA_118,
+ CUDA_120,
+ CUDA_121,
+ CUDA_122,
+ CUDA_123,
+ FULLY_SUPPORTED = CUDA_123,
+ PARTIALLY_SUPPORTED =
+ CUDA_123, // Partially supported. Proceed with a warning.
+ NEW = 10000, // Too new. Issue a warning, but allow using it.
};
const char *CudaVersionToString(CudaVersion V);
// Input is "Major.Minor"
@@ -58,6 +70,10 @@ enum class CudaArch {
SM_75,
SM_80,
SM_86,
+ SM_87,
+ SM_89,
+ SM_90,
+ SM_90a,
GFX600,
GFX601,
GFX602,
@@ -80,6 +96,9 @@ enum class CudaArch {
GFX909,
GFX90a,
GFX90c,
+ GFX940,
+ GFX941,
+ GFX942,
GFX1010,
GFX1011,
GFX1012,
@@ -90,7 +109,21 @@ enum class CudaArch {
GFX1033,
GFX1034,
GFX1035,
+ GFX1036,
+ GFX1100,
+ GFX1101,
+ GFX1102,
+ GFX1103,
+ GFX1150,
+ GFX1151,
+ GFX1200,
+ GFX1201,
+ Generic, // A processor model named 'generic' if the target backend defines a
+ // public one.
LAST,
+
+ CudaDefault = CudaArch::SM_52,
+ HIPDefault = CudaArch::GFX803,
};
static inline bool IsNVIDIAGpuArch(CudaArch A) {
@@ -98,7 +131,8 @@ static inline bool IsNVIDIAGpuArch(CudaArch A) {
}
static inline bool IsAMDGpuArch(CudaArch A) {
- return A >= CudaArch::GFX600 && A < CudaArch::LAST;
+ // Generic processor model is for testing only.
+ return A >= CudaArch::GFX600 && A < CudaArch::Generic;
}
const char *CudaArchToString(CudaArch A);
diff --git a/contrib/llvm-project/clang/include/clang/Basic/CustomizableOptional.h b/contrib/llvm-project/clang/include/clang/Basic/CustomizableOptional.h
new file mode 100644
index 000000000000..84d40025ee41
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Basic/CustomizableOptional.h
@@ -0,0 +1,280 @@
+//===- CustomizableOptional.h - Optional with custom storage ----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_BASIC_CUSTOMIZABLEOPTIONAL_H
+#define CLANG_BASIC_CUSTOMIZABLEOPTIONAL_H
+
+#include "llvm/ADT/Hashing.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/type_traits.h"
+#include <cassert>
+#include <new>
+#include <optional>
+#include <utility>
+
+namespace clang {
+
+namespace optional_detail {
+template <typename> class OptionalStorage;
+} // namespace optional_detail
+
+// Optional type which internal storage can be specialized by providing
+// OptionalStorage. The interface follows std::optional.
+template <typename T> class CustomizableOptional {
+ optional_detail::OptionalStorage<T> Storage;
+
+public:
+ using value_type = T;
+
+ constexpr CustomizableOptional() = default;
+ constexpr CustomizableOptional(std::nullopt_t) {}
+
+ constexpr CustomizableOptional(const T &y) : Storage(std::in_place, y) {}
+ constexpr CustomizableOptional(const CustomizableOptional &O) = default;
+
+ constexpr CustomizableOptional(T &&y)
+ : Storage(std::in_place, std::move(y)) {}
+ constexpr CustomizableOptional(CustomizableOptional &&O) = default;
+
+ template <typename... ArgTypes>
+ constexpr CustomizableOptional(std::in_place_t, ArgTypes &&...Args)
+ : Storage(std::in_place, std::forward<ArgTypes>(Args)...) {}
+
+ // Allow conversion from std::optional<T>.
+ constexpr CustomizableOptional(const std::optional<T> &y)
+ : CustomizableOptional(y ? *y : CustomizableOptional()) {}
+ constexpr CustomizableOptional(std::optional<T> &&y)
+ : CustomizableOptional(y ? std::move(*y) : CustomizableOptional()) {}
+
+ CustomizableOptional &operator=(T &&y) {
+ Storage = std::move(y);
+ return *this;
+ }
+ CustomizableOptional &operator=(CustomizableOptional &&O) = default;
+
+ /// Create a new object by constructing it in place with the given arguments.
+ template <typename... ArgTypes> void emplace(ArgTypes &&...Args) {
+ Storage.emplace(std::forward<ArgTypes>(Args)...);
+ }
+
+ CustomizableOptional &operator=(const T &y) {
+ Storage = y;
+ return *this;
+ }
+ CustomizableOptional &operator=(const CustomizableOptional &O) = default;
+
+ void reset() { Storage.reset(); }
+
+ LLVM_DEPRECATED("Use &*X instead.", "&*X")
+ constexpr const T *getPointer() const { return &Storage.value(); }
+ LLVM_DEPRECATED("Use &*X instead.", "&*X")
+ T *getPointer() { return &Storage.value(); }
+ LLVM_DEPRECATED("std::optional::value is throwing. Use *X instead", "*X")
+ constexpr const T &value() const & { return Storage.value(); }
+ LLVM_DEPRECATED("std::optional::value is throwing. Use *X instead", "*X")
+ T &value() & { return Storage.value(); }
+
+ constexpr explicit operator bool() const { return has_value(); }
+ constexpr bool has_value() const { return Storage.has_value(); }
+ constexpr const T *operator->() const { return &Storage.value(); }
+ T *operator->() { return &Storage.value(); }
+ constexpr const T &operator*() const & { return Storage.value(); }
+ T &operator*() & { return Storage.value(); }
+
+ template <typename U> constexpr T value_or(U &&alt) const & {
+ return has_value() ? operator*() : std::forward<U>(alt);
+ }
+
+ LLVM_DEPRECATED("std::optional::value is throwing. Use *X instead", "*X")
+ T &&value() && { return std::move(Storage.value()); }
+ T &&operator*() && { return std::move(Storage.value()); }
+
+ template <typename U> T value_or(U &&alt) && {
+ return has_value() ? std::move(operator*()) : std::forward<U>(alt);
+ }
+
+ // Allow conversion to std::optional<T>.
+ explicit operator std::optional<T> &() const & {
+ return *this ? **this : std::optional<T>();
+ }
+ explicit operator std::optional<T> &&() const && {
+ return *this ? std::move(**this) : std::optional<T>();
+ }
+};
+
+template <typename T>
+CustomizableOptional(const T &) -> CustomizableOptional<T>;
+
+template <class T>
+llvm::hash_code hash_value(const CustomizableOptional<T> &O) {
+ return O ? llvm::hash_combine(true, *O) : llvm::hash_value(false);
+}
+
+template <typename T, typename U>
+constexpr bool operator==(const CustomizableOptional<T> &X,
+ const CustomizableOptional<U> &Y) {
+ if (X && Y)
+ return *X == *Y;
+ return X.has_value() == Y.has_value();
+}
+
+template <typename T, typename U>
+constexpr bool operator!=(const CustomizableOptional<T> &X,
+ const CustomizableOptional<U> &Y) {
+ return !(X == Y);
+}
+
+template <typename T, typename U>
+constexpr bool operator<(const CustomizableOptional<T> &X,
+ const CustomizableOptional<U> &Y) {
+ if (X && Y)
+ return *X < *Y;
+ return X.has_value() < Y.has_value();
+}
+
+template <typename T, typename U>
+constexpr bool operator<=(const CustomizableOptional<T> &X,
+ const CustomizableOptional<U> &Y) {
+ return !(Y < X);
+}
+
+template <typename T, typename U>
+constexpr bool operator>(const CustomizableOptional<T> &X,
+ const CustomizableOptional<U> &Y) {
+ return Y < X;
+}
+
+template <typename T, typename U>
+constexpr bool operator>=(const CustomizableOptional<T> &X,
+ const CustomizableOptional<U> &Y) {
+ return !(X < Y);
+}
+
+template <typename T>
+constexpr bool operator==(const CustomizableOptional<T> &X, std::nullopt_t) {
+ return !X;
+}
+
+template <typename T>
+constexpr bool operator==(std::nullopt_t, const CustomizableOptional<T> &X) {
+ return X == std::nullopt;
+}
+
+template <typename T>
+constexpr bool operator!=(const CustomizableOptional<T> &X, std::nullopt_t) {
+ return !(X == std::nullopt);
+}
+
+template <typename T>
+constexpr bool operator!=(std::nullopt_t, const CustomizableOptional<T> &X) {
+ return X != std::nullopt;
+}
+
+template <typename T>
+constexpr bool operator<(const CustomizableOptional<T> &, std::nullopt_t) {
+ return false;
+}
+
+template <typename T>
+constexpr bool operator<(std::nullopt_t, const CustomizableOptional<T> &X) {
+ return X.has_value();
+}
+
+template <typename T>
+constexpr bool operator<=(const CustomizableOptional<T> &X, std::nullopt_t) {
+ return !(std::nullopt < X);
+}
+
+template <typename T>
+constexpr bool operator<=(std::nullopt_t, const CustomizableOptional<T> &X) {
+ return !(X < std::nullopt);
+}
+
+template <typename T>
+constexpr bool operator>(const CustomizableOptional<T> &X, std::nullopt_t) {
+ return std::nullopt < X;
+}
+
+template <typename T>
+constexpr bool operator>(std::nullopt_t, const CustomizableOptional<T> &X) {
+ return X < std::nullopt;
+}
+
+template <typename T>
+constexpr bool operator>=(const CustomizableOptional<T> &X, std::nullopt_t) {
+ return std::nullopt <= X;
+}
+
+template <typename T>
+constexpr bool operator>=(std::nullopt_t, const CustomizableOptional<T> &X) {
+ return X <= std::nullopt;
+}
+
+template <typename T>
+constexpr bool operator==(const CustomizableOptional<T> &X, const T &Y) {
+ return X && *X == Y;
+}
+
+template <typename T>
+constexpr bool operator==(const T &X, const CustomizableOptional<T> &Y) {
+ return Y && X == *Y;
+}
+
+template <typename T>
+constexpr bool operator!=(const CustomizableOptional<T> &X, const T &Y) {
+ return !(X == Y);
+}
+
+template <typename T>
+constexpr bool operator!=(const T &X, const CustomizableOptional<T> &Y) {
+ return !(X == Y);
+}
+
+template <typename T>
+constexpr bool operator<(const CustomizableOptional<T> &X, const T &Y) {
+ return !X || *X < Y;
+}
+
+template <typename T>
+constexpr bool operator<(const T &X, const CustomizableOptional<T> &Y) {
+ return Y && X < *Y;
+}
+
+template <typename T>
+constexpr bool operator<=(const CustomizableOptional<T> &X, const T &Y) {
+ return !(Y < X);
+}
+
+template <typename T>
+constexpr bool operator<=(const T &X, const CustomizableOptional<T> &Y) {
+ return !(Y < X);
+}
+
+template <typename T>
+constexpr bool operator>(const CustomizableOptional<T> &X, const T &Y) {
+ return Y < X;
+}
+
+template <typename T>
+constexpr bool operator>(const T &X, const CustomizableOptional<T> &Y) {
+ return Y < X;
+}
+
+template <typename T>
+constexpr bool operator>=(const CustomizableOptional<T> &X, const T &Y) {
+ return !(X < Y);
+}
+
+template <typename T>
+constexpr bool operator>=(const T &X, const CustomizableOptional<T> &Y) {
+ return !(X < Y);
+}
+
+} // namespace clang
+
+#endif // CLANG_BASIC_CUSTOMIZABLEOPTIONAL_H
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DarwinSDKInfo.h b/contrib/llvm-project/clang/include/clang/Basic/DarwinSDKInfo.h
index 918dc7c8becc..dedfbd934a7b 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DarwinSDKInfo.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/DarwinSDKInfo.h
@@ -6,15 +6,16 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_BASIC_DARWIN_SDK_INFO_H
-#define LLVM_CLANG_BASIC_DARWIN_SDK_INFO_H
+#ifndef LLVM_CLANG_BASIC_DARWINSDKINFO_H
+#define LLVM_CLANG_BASIC_DARWINSDKINFO_H
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/VersionTuple.h"
#include "llvm/Support/VirtualFileSystem.h"
+#include "llvm/TargetParser/Triple.h"
+#include <optional>
namespace llvm {
namespace json {
@@ -57,6 +58,20 @@ public:
llvm::Triple::MacOSX, llvm::Triple::UnknownEnvironment);
}
+ /// Returns the os-environment mapping pair that's used to represent the
+ /// iOS -> watchOS version mapping.
+ static inline constexpr OSEnvPair iOStoWatchOSPair() {
+ return OSEnvPair(llvm::Triple::IOS, llvm::Triple::UnknownEnvironment,
+ llvm::Triple::WatchOS, llvm::Triple::UnknownEnvironment);
+ }
+
+ /// Returns the os-environment mapping pair that's used to represent the
+ /// iOS -> tvOS version mapping.
+ static inline constexpr OSEnvPair iOStoTvOSPair() {
+ return OSEnvPair(llvm::Triple::IOS, llvm::Triple::UnknownEnvironment,
+ llvm::Triple::TvOS, llvm::Triple::UnknownEnvironment);
+ }
+
private:
StorageType Value;
@@ -85,12 +100,12 @@ public:
/// Returns the mapped key, or the appropriate Minimum / MaximumValue if
/// they key is outside of the mapping bounds. If they key isn't mapped, but
- /// within the minimum and maximum bounds, None is returned.
- Optional<VersionTuple> map(const VersionTuple &Key,
- const VersionTuple &MinimumValue,
- Optional<VersionTuple> MaximumValue) const;
+ /// within the minimum and maximum bounds, std::nullopt is returned.
+ std::optional<VersionTuple>
+ map(const VersionTuple &Key, const VersionTuple &MinimumValue,
+ std::optional<VersionTuple> MaximumValue) const;
- static Optional<RelatedTargetVersionMapping>
+ static std::optional<RelatedTargetVersionMapping>
parseJSON(const llvm::json::Object &Obj,
VersionTuple MaximumDeploymentTarget);
@@ -102,12 +117,13 @@ public:
llvm::DenseMap<VersionTuple, VersionTuple> Mapping;
};
- DarwinSDKInfo(VersionTuple Version, VersionTuple MaximumDeploymentTarget,
- llvm::DenseMap<OSEnvPair::StorageType,
- Optional<RelatedTargetVersionMapping>>
- VersionMappings =
- llvm::DenseMap<OSEnvPair::StorageType,
- Optional<RelatedTargetVersionMapping>>())
+ DarwinSDKInfo(
+ VersionTuple Version, VersionTuple MaximumDeploymentTarget,
+ llvm::DenseMap<OSEnvPair::StorageType,
+ std::optional<RelatedTargetVersionMapping>>
+ VersionMappings =
+ llvm::DenseMap<OSEnvPair::StorageType,
+ std::optional<RelatedTargetVersionMapping>>())
: Version(Version), MaximumDeploymentTarget(MaximumDeploymentTarget),
VersionMappings(std::move(VersionMappings)) {}
@@ -128,11 +144,10 @@ public:
auto Mapping = VersionMappings.find(Kind.Value);
if (Mapping == VersionMappings.end())
return nullptr;
- return Mapping->getSecond().hasValue() ? Mapping->getSecond().getPointer()
- : nullptr;
+ return Mapping->getSecond() ? &*Mapping->getSecond() : nullptr;
}
- static Optional<DarwinSDKInfo>
+ static std::optional<DarwinSDKInfo>
parseDarwinSDKSettingsJSON(const llvm::json::Object *Obj);
private:
@@ -141,17 +156,18 @@ private:
// Need to wrap the value in an optional here as the value has to be default
// constructible, and std::unique_ptr doesn't like DarwinSDKInfo being
// Optional as Optional is trying to copy it in emplace.
- llvm::DenseMap<OSEnvPair::StorageType, Optional<RelatedTargetVersionMapping>>
+ llvm::DenseMap<OSEnvPair::StorageType,
+ std::optional<RelatedTargetVersionMapping>>
VersionMappings;
};
/// Parse the SDK information from the SDKSettings.json file.
///
-/// \returns an error if the SDKSettings.json file is invalid, None if the
-/// SDK has no SDKSettings.json, or a valid \c DarwinSDKInfo otherwise.
-Expected<Optional<DarwinSDKInfo>> parseDarwinSDKInfo(llvm::vfs::FileSystem &VFS,
- StringRef SDKRootPath);
+/// \returns an error if the SDKSettings.json file is invalid, std::nullopt if
+/// the SDK has no SDKSettings.json, or a valid \c DarwinSDKInfo otherwise.
+Expected<std::optional<DarwinSDKInfo>>
+parseDarwinSDKInfo(llvm::vfs::FileSystem &VFS, StringRef SDKRootPath);
} // end namespace clang
-#endif // LLVM_CLANG_BASIC_DARWIN_SDK_INFO_H
+#endif // LLVM_CLANG_BASIC_DARWINSDKINFO_H
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DebugInfoOptions.h b/contrib/llvm-project/clang/include/clang/Basic/DebugInfoOptions.h
deleted file mode 100644
index c1259d7797db..000000000000
--- a/contrib/llvm-project/clang/include/clang/Basic/DebugInfoOptions.h
+++ /dev/null
@@ -1,60 +0,0 @@
-//===--- DebugInfoOptions.h - Debug Info Emission Types ---------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CLANG_BASIC_DEBUGINFOOPTIONS_H
-#define LLVM_CLANG_BASIC_DEBUGINFOOPTIONS_H
-
-namespace clang {
-namespace codegenoptions {
-
-enum DebugInfoFormat {
- DIF_DWARF,
- DIF_CodeView,
-};
-
-enum DebugInfoKind {
- /// Don't generate debug info.
- NoDebugInfo,
-
- /// Emit location information but do not generate debug info in the output.
- /// This is useful in cases where the backend wants to track source
- /// locations for instructions without actually emitting debug info for them
- /// (e.g., when -Rpass is used).
- LocTrackingOnly,
-
- /// Emit only debug directives with the line numbers data
- DebugDirectivesOnly,
-
- /// Emit only debug info necessary for generating line number tables
- /// (-gline-tables-only).
- DebugLineTablesOnly,
-
- /// Limit generated debug info for classes to reduce size. This emits class
- /// type info only where the constructor is emitted, if it is a class that
- /// has a constructor.
- /// FIXME: Consider combining this with LimitedDebugInfo.
- DebugInfoConstructor,
-
- /// Limit generated debug info to reduce size (-fno-standalone-debug). This
- /// emits forward decls for types that could be replaced with forward decls in
- /// the source code. For dynamic C++ classes type info is only emitted into
- /// the module that contains the classe's vtable.
- LimitedDebugInfo,
-
- /// Generate complete debug info.
- FullDebugInfo,
-
- /// Generate debug info for types that may be unused in the source
- /// (-fno-eliminate-unused-debug-types).
- UnusedTypeInfo,
-};
-
-} // end namespace codegenoptions
-} // end namespace clang
-
-#endif
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DebugOptions.def b/contrib/llvm-project/clang/include/clang/Basic/DebugOptions.def
new file mode 100644
index 000000000000..7cd3edf08a17
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Basic/DebugOptions.def
@@ -0,0 +1,146 @@
+//===--- DebugOptions.def - Debug option database ----------------- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines debug-specific codegen options. Users of this file
+// must define the CODEGENOPT macro to make use of this information.
+// Optionally, the user may also define DEBUGOPT (for flags), ENUM_DEBUGOPT (for
+// options that have enumeration type), and VALUE_DEBUGOPT (is a debug option
+// that describes a value rather than a flag).
+//
+// BENIGN_ variants of the macros are used to describe options that do not
+// affect the generated PCM.
+//
+//===----------------------------------------------------------------------===//
+#ifndef DEBUGOPT
+#define DEBUGOPT(Name, Bits, Default) \
+CODEGENOPT(Name, Bits, Default)
+#endif
+
+#ifndef VALUE_DEBUGOPT
+# define VALUE_DEBUGOPT(Name, Bits, Default) \
+VALUE_CODEGENOPT(Name, Bits, Default)
+#endif
+
+#ifndef ENUM_DEBUGOPT
+# define ENUM_DEBUGOPT(Name, Type, Bits, Default) \
+ENUM_CODEGENOPT(Name, Type, Bits, Default)
+#endif
+
+#ifndef BENIGN_DEBUGOPT
+#define BENIGN_DEBUGOPT(Name, Bits, Default) \
+DEBUGOPT(Name, Bits, Default)
+#endif
+
+#ifndef BENIGN_VALUE_DEBUGOPT
+# define BENIGN_VALUE_DEBUGOPT(Name, Bits, Default) \
+VALUE_DEBUGOPT(Name, Bits, Default)
+#endif
+
+#ifndef BENIGN_ENUM_DEBUGOPT
+# define BENIGN_ENUM_DEBUGOPT(Name, Type, Bits, Default) \
+ENUM_DEBUGOPT(Name, Type, Bits, Default)
+#endif
+
+BENIGN_ENUM_DEBUGOPT(CompressDebugSections, llvm::DebugCompressionType, 2,
+ llvm::DebugCompressionType::None)
+DEBUGOPT(Dwarf64, 1, 0) ///< -gdwarf64.
+BENIGN_DEBUGOPT(EnableDIPreservationVerify, 1, 0) ///< Enable di preservation
+ ///< verify each (it means
+ ///< check the original debug
+ ///< info metadata
+ ///< preservation).
+BENIGN_DEBUGOPT(ForceDwarfFrameSection , 1, 0) ///< Set when -fforce-dwarf-frame
+ ///< is enabled.
+
+///< Set when -femit-dwarf-unwind is passed.
+BENIGN_ENUM_DEBUGOPT(EmitDwarfUnwind, llvm::EmitDwarfUnwindType, 2,
+ llvm::EmitDwarfUnwindType::Default)
+
+BENIGN_DEBUGOPT(NoDwarfDirectoryAsm , 1, 0) ///< Set when -fno-dwarf-directory-asm
+ ///< is enabled.
+
+BENIGN_DEBUGOPT(NoInlineLineTables, 1, 0) ///< Whether debug info should contain
+ ///< inline line tables.
+
+DEBUGOPT(DebugStrictDwarf, 1, 1) ///< Whether or not to use strict DWARF info.
+
+/// Control the Assignment Tracking debug info feature.
+BENIGN_ENUM_DEBUGOPT(AssignmentTrackingMode, AssignmentTrackingOpts, 2,
+ AssignmentTrackingOpts::Disabled)
+
+DEBUGOPT(DebugColumnInfo, 1, 0) ///< Whether or not to use column information
+ ///< in debug info.
+
+DEBUGOPT(DebugTypeExtRefs, 1, 0) ///< Whether or not debug info should contain
+ ///< external references to a PCH or module.
+
+DEBUGOPT(DebugExplicitImport, 1, 0) ///< Whether or not debug info should
+ ///< contain explicit imports for
+ ///< anonymous namespaces
+
+/// Set debug info source file hashing algorithm.
+ENUM_DEBUGOPT(DebugSrcHash, DebugSrcHashKind, 2, DSH_MD5)
+
+DEBUGOPT(SplitDwarfInlining, 1, 1) ///< Whether to include inlining info in the
+ ///< skeleton CU to allow for symbolication
+ ///< of inline stack frames without .dwo files.
+DEBUGOPT(DebugFwdTemplateParams, 1, 0) ///< Whether to emit complete
+ ///< template parameter descriptions in
+ ///< forward declarations (versus just
+ ///< including them in the name).
+ENUM_DEBUGOPT(DebugSimpleTemplateNames,
+ llvm::codegenoptions::DebugTemplateNamesKind, 2,
+ llvm::codegenoptions::DebugTemplateNamesKind::Full)
+ ///< Whether to emit template parameters in the textual names of
+ ///< template specializations.
+ ///< Implies DebugFwdTemplateNames to allow decorated names to be
+ ///< reconstructed when needed.
+
+/// The kind of generated debug info.
+ENUM_DEBUGOPT(DebugInfo, llvm::codegenoptions::DebugInfoKind, 4,
+ llvm::codegenoptions::NoDebugInfo)
+
+/// Whether to generate macro debug info.
+DEBUGOPT(MacroDebugInfo, 1, 0)
+
+/// Tune the debug info for this debugger.
+ENUM_DEBUGOPT(DebuggerTuning, llvm::DebuggerKind, 3,
+ llvm::DebuggerKind::Default)
+
+/// Dwarf version. Version zero indicates to LLVM that no DWARF should be
+/// emitted.
+VALUE_DEBUGOPT(DwarfVersion, 3, 0)
+
+/// Whether we should emit CodeView debug information. It's possible to emit
+/// CodeView and DWARF into the same object.
+DEBUGOPT(EmitCodeView, 1, 0)
+
+/// Whether to emit the .debug$H section containing hashes of CodeView types.
+DEBUGOPT(CodeViewGHash, 1, 0)
+
+/// Whether to emit the compiler path and command line into the CodeView debug information.
+DEBUGOPT(CodeViewCommandLine, 1, 0)
+
+/// Whether emit extra debug info for sample pgo profile collection.
+DEBUGOPT(DebugInfoForProfiling, 1, 0)
+
+/// Whether to emit .debug_gnu_pubnames section instead of .debug_pubnames.
+DEBUGOPT(DebugNameTable, 2, 0)
+
+/// Whether to use DWARF base address specifiers in .debug_ranges.
+DEBUGOPT(DebugRangesBaseAddress, 1, 0)
+
+/// Whether to embed source in DWARF debug line section.
+DEBUGOPT(EmbedSource, 1, 0)
+
+#undef DEBUGOPT
+#undef ENUM_DEBUGOPT
+#undef VALUE_DEBUGOPT
+#undef BENIGN_DEBUGOPT
+#undef BENIGN_ENUM_DEBUGOPT
+#undef BENIGN_VALUE_DEBUGOPT
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DeclNodes.td b/contrib/llvm-project/clang/include/clang/Basic/DeclNodes.td
index f8ad6cf5b262..8b1f415dd5fe 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DeclNodes.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/DeclNodes.td
@@ -41,6 +41,7 @@ def Named : DeclNode<Decl, "named declarations", 1>;
def OMPDeclareReduction : DeclNode<Value>, DeclContext;
def OMPDeclareMapper : DeclNode<Value>, DeclContext;
def MSGuid : DeclNode<Value>;
+ def UnnamedGlobalConstant : DeclNode<Value>;
def TemplateParamObject : DeclNode<Value>;
def Declarator : DeclNode<Value, "declarators", 1>;
def Field : DeclNode<Declarator, "non-static data members">;
@@ -89,17 +90,18 @@ def Named : DeclNode<Decl, "named declarations", 1>;
def ObjCImplementation : DeclNode<ObjCImpl>;
def ObjCProperty : DeclNode<Named, "Objective-C properties">;
def ObjCCompatibleAlias : DeclNode<Named>;
+def ImplicitConceptSpecialization : DeclNode<Decl>;
def LinkageSpec : DeclNode<Decl>, DeclContext;
def Export : DeclNode<Decl>, DeclContext;
def ObjCPropertyImpl : DeclNode<Decl>;
def FileScopeAsm : DeclNode<Decl>;
+def TopLevelStmt : DeclNode<Decl>;
def AccessSpec : DeclNode<Decl>;
def Friend : DeclNode<Decl>;
def FriendTemplate : DeclNode<Decl>;
def StaticAssert : DeclNode<Decl>;
def Block : DeclNode<Decl, "blocks">, DeclContext;
def Captured : DeclNode<Decl>, DeclContext;
-def ClassScopeFunctionSpecialization : DeclNode<Decl>;
def Import : DeclNode<Decl>;
def OMPThreadPrivate : DeclNode<Decl>;
def OMPAllocate : DeclNode<Decl>;
@@ -107,4 +109,4 @@ def OMPRequires : DeclNode<Decl>;
def Empty : DeclNode<Decl>;
def RequiresExprBody : DeclNode<Decl>, DeclContext;
def LifetimeExtendedTemporary : DeclNode<Decl>;
-
+def HLSLBuffer : DeclNode<Named, "HLSLBuffer">, DeclContext;
diff --git a/contrib/llvm-project/clang/include/clang/Basic/Diagnostic.h b/contrib/llvm-project/clang/include/clang/Basic/Diagnostic.h
index 3b915fb15a89..0c7836c2ea56 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/Diagnostic.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/Diagnostic.h
@@ -21,7 +21,6 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/IntrusiveRefCntPtr.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator_range.h"
@@ -32,6 +31,7 @@
#include <list>
#include <map>
#include <memory>
+#include <optional>
#include <string>
#include <type_traits>
#include <utility>
@@ -39,7 +39,8 @@
namespace llvm {
class Error;
-}
+class raw_ostream;
+} // namespace llvm
namespace clang {
@@ -164,9 +165,9 @@ struct DiagnosticStorage {
/// The values for the various substitution positions.
///
/// This is used when the argument is not an std::string. The specific value
- /// is mangled into an intptr_t and the interpretation depends on exactly
+ /// is mangled into an uint64_t and the interpretation depends on exactly
/// what sort of argument kind it is.
- intptr_t DiagArgumentsVal[MaxArguments];
+ uint64_t DiagArgumentsVal[MaxArguments];
/// The values for the various substitution positions that have
/// string arguments.
@@ -313,18 +314,23 @@ private:
// "Global" configuration state that can actually vary between modules.
// Ignore all warnings: -w
+ LLVM_PREFERRED_TYPE(bool)
unsigned IgnoreAllWarnings : 1;
// Enable all warnings.
+ LLVM_PREFERRED_TYPE(bool)
unsigned EnableAllWarnings : 1;
// Treat warnings like errors.
+ LLVM_PREFERRED_TYPE(bool)
unsigned WarningsAsErrors : 1;
// Treat errors like fatal errors.
+ LLVM_PREFERRED_TYPE(bool)
unsigned ErrorsAsFatal : 1;
// Suppress warnings in system headers.
+ LLVM_PREFERRED_TYPE(bool)
unsigned SuppressSystemWarnings : 1;
// Map extensions to warnings or errors?
@@ -544,6 +550,7 @@ public:
DiagnosticsEngine &operator=(const DiagnosticsEngine &) = delete;
~DiagnosticsEngine();
+ friend void DiagnosticsTestHelper(DiagnosticsEngine &);
LLVM_DUMP_METHOD void dump() const;
LLVM_DUMP_METHOD void dump(StringRef DiagName) const;
@@ -807,6 +814,9 @@ public:
bool setSeverityForGroup(diag::Flavor Flavor, StringRef Group,
diag::Severity Map,
SourceLocation Loc = SourceLocation());
+ bool setSeverityForGroup(diag::Flavor Flavor, diag::Group Group,
+ diag::Severity Map,
+ SourceLocation Loc = SourceLocation());
/// Set the warning-as-error flag for the given diagnostic group.
///
@@ -887,9 +897,9 @@ public:
LastDiagLevel = Other.LastDiagLevel;
}
- /// Reset the state of the diagnostic object to its initial
- /// configuration.
- void Reset();
+ /// Reset the state of the diagnostic object to its initial configuration.
+ /// \param[in] soft - if true, doesn't reset the diagnostic mappings and state
+ void Reset(bool soft = false);
//===--------------------------------------------------------------------===//
// DiagnosticsEngine classification and reporting interfaces.
@@ -1176,7 +1186,7 @@ public:
DiagStorage = nullptr;
}
- void AddTaggedVal(intptr_t V, DiagnosticsEngine::ArgumentKind Kind) const {
+ void AddTaggedVal(uint64_t V, DiagnosticsEngine::ArgumentKind Kind) const {
if (!DiagStorage)
DiagStorage = getStorage();
@@ -1341,8 +1351,8 @@ public:
// It is necessary to limit this to rvalue reference to avoid calling this
// function with a bitfield lvalue argument since non-const reference to
// bitfield is not allowed.
- template <typename T, typename = typename std::enable_if<
- !std::is_lvalue_reference<T>::value>::type>
+ template <typename T,
+ typename = std::enable_if_t<!std::is_lvalue_reference<T>::value>>
const DiagnosticBuilder &operator<<(T &&V) const {
assert(isActive() && "Clients must not add to cleared diagnostic!");
const StreamingDiagnostic &DB = *this;
@@ -1399,6 +1409,18 @@ inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &DB,
return DB;
}
+inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &DB,
+ long I) {
+ DB.AddTaggedVal(I, DiagnosticsEngine::ak_sint);
+ return DB;
+}
+
+inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &DB,
+ long long I) {
+ DB.AddTaggedVal(I, DiagnosticsEngine::ak_sint);
+ return DB;
+}
+
// We use enable_if here to prevent that this overload is selected for
// pointers or other arguments that are implicitly convertible to bool.
template <typename T>
@@ -1416,6 +1438,18 @@ inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &DB,
}
inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &DB,
+ unsigned long I) {
+ DB.AddTaggedVal(I, DiagnosticsEngine::ak_uint);
+ return DB;
+}
+
+inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &DB,
+ unsigned long long I) {
+ DB.AddTaggedVal(I, DiagnosticsEngine::ak_uint);
+ return DB;
+}
+
+inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &DB,
tok::TokenKind I) {
DB.AddTaggedVal(static_cast<unsigned>(I), DiagnosticsEngine::ak_tokenkind);
return DB;
@@ -1443,6 +1477,12 @@ operator<<(const StreamingDiagnostic &DB, T *DC) {
}
inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &DB,
+ SourceLocation L) {
+ DB.AddSourceRange(CharSourceRange::getTokenRange(L));
+ return DB;
+}
+
+inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &DB,
SourceRange R) {
DB.AddSourceRange(CharSourceRange::getTokenRange(R));
return DB;
@@ -1476,7 +1516,7 @@ inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &DB,
inline const StreamingDiagnostic &
operator<<(const StreamingDiagnostic &DB,
- const llvm::Optional<SourceRange> &Opt) {
+ const std::optional<SourceRange> &Opt) {
if (Opt)
DB << *Opt;
return DB;
@@ -1484,15 +1524,14 @@ operator<<(const StreamingDiagnostic &DB,
inline const StreamingDiagnostic &
operator<<(const StreamingDiagnostic &DB,
- const llvm::Optional<CharSourceRange> &Opt) {
+ const std::optional<CharSourceRange> &Opt) {
if (Opt)
DB << *Opt;
return DB;
}
inline const StreamingDiagnostic &
-operator<<(const StreamingDiagnostic &DB,
- const llvm::Optional<FixItHint> &Opt) {
+operator<<(const StreamingDiagnostic &DB, const std::optional<FixItHint> &Opt) {
if (Opt)
DB << *Opt;
return DB;
@@ -1531,7 +1570,7 @@ inline DiagnosticBuilder DiagnosticsEngine::Report(unsigned DiagID) {
/// currently in-flight diagnostic.
class Diagnostic {
const DiagnosticsEngine *DiagObj;
- StringRef StoredDiagMessage;
+ std::optional<StringRef> StoredDiagMessage;
public:
explicit Diagnostic(const DiagnosticsEngine *DO) : DiagObj(DO) {}
@@ -1577,18 +1616,18 @@ public:
/// Return the specified signed integer argument.
/// \pre getArgKind(Idx) == DiagnosticsEngine::ak_sint
- int getArgSInt(unsigned Idx) const {
+ int64_t getArgSInt(unsigned Idx) const {
assert(getArgKind(Idx) == DiagnosticsEngine::ak_sint &&
"invalid argument accessor!");
- return (int)DiagObj->DiagStorage.DiagArgumentsVal[Idx];
+ return (int64_t)DiagObj->DiagStorage.DiagArgumentsVal[Idx];
}
/// Return the specified unsigned integer argument.
/// \pre getArgKind(Idx) == DiagnosticsEngine::ak_uint
- unsigned getArgUInt(unsigned Idx) const {
+ uint64_t getArgUInt(unsigned Idx) const {
assert(getArgKind(Idx) == DiagnosticsEngine::ak_uint &&
"invalid argument accessor!");
- return (unsigned)DiagObj->DiagStorage.DiagArgumentsVal[Idx];
+ return DiagObj->DiagStorage.DiagArgumentsVal[Idx];
}
/// Return the specified IdentifierInfo argument.
@@ -1602,7 +1641,7 @@ public:
/// Return the specified non-string argument in an opaque form.
/// \pre getArgKind(Idx) != DiagnosticsEngine::ak_std_string
- intptr_t getRawArg(unsigned Idx) const {
+ uint64_t getRawArg(unsigned Idx) const {
assert(getArgKind(Idx) != DiagnosticsEngine::ak_std_string &&
"invalid argument accessor!");
return DiagObj->DiagStorage.DiagArgumentsVal[Idx];
@@ -1687,9 +1726,7 @@ public:
range_iterator range_end() const { return Ranges.end(); }
unsigned range_size() const { return Ranges.size(); }
- ArrayRef<CharSourceRange> getRanges() const {
- return llvm::makeArrayRef(Ranges);
- }
+ ArrayRef<CharSourceRange> getRanges() const { return llvm::ArrayRef(Ranges); }
using fixit_iterator = std::vector<FixItHint>::const_iterator;
@@ -1697,11 +1734,12 @@ public:
fixit_iterator fixit_end() const { return FixIts.end(); }
unsigned fixit_size() const { return FixIts.size(); }
- ArrayRef<FixItHint> getFixIts() const {
- return llvm::makeArrayRef(FixIts);
- }
+ ArrayRef<FixItHint> getFixIts() const { return llvm::ArrayRef(FixIts); }
};
+// Simple debug printing of StoredDiagnostic.
+llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const StoredDiagnostic &);
+
/// Abstract interface, implemented by clients of the front-end, which
/// formats and prints fully processed diagnostics.
class DiagnosticConsumer {
@@ -1789,12 +1827,17 @@ public:
struct TemplateDiffTypes {
intptr_t FromType;
intptr_t ToType;
+ LLVM_PREFERRED_TYPE(bool)
unsigned PrintTree : 1;
+ LLVM_PREFERRED_TYPE(bool)
unsigned PrintFromType : 1;
+ LLVM_PREFERRED_TYPE(bool)
unsigned ElideType : 1;
+ LLVM_PREFERRED_TYPE(bool)
unsigned ShowColors : 1;
// The printer sets this variable to true if the template diff was used.
+ LLVM_PREFERRED_TYPE(bool)
unsigned TemplateDiffUsed : 1;
};
@@ -1807,7 +1850,7 @@ const char ToggleHighlight = 127;
void ProcessWarningOptions(DiagnosticsEngine &Diags,
const DiagnosticOptions &Opts,
bool ReportDiags = true);
-
+void EscapeStringForDiagnostic(StringRef Str, SmallVectorImpl<char> &OutStr);
} // namespace clang
#endif // LLVM_CLANG_BASIC_DIAGNOSTIC_H
diff --git a/contrib/llvm-project/clang/include/clang/Basic/Diagnostic.td b/contrib/llvm-project/clang/include/clang/Basic/Diagnostic.td
index ab2c738a2ace..8d66e265fbae 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/Diagnostic.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/Diagnostic.td
@@ -55,11 +55,11 @@ class DiagCategory<string Name> {
}
// Diagnostic Groups.
-class DiagGroup<string Name, list<DiagGroup> subgroups = []> {
+class DiagGroup<string Name, list<DiagGroup> subgroups = [], code docs = [{}]> {
string GroupName = Name;
list<DiagGroup> SubGroups = subgroups;
string CategoryName = "";
- code Documentation = [{}];
+ code Documentation = docs;
}
class InGroup<DiagGroup G> { DiagGroup Group = G; }
//class IsGroup<string Name> { DiagGroup Group = DiagGroup<Name>; }
@@ -75,15 +75,16 @@ include "DiagnosticGroups.td"
// All diagnostics emitted by the compiler are an indirect subclass of this.
-class Diagnostic<string text, DiagClass DC, Severity defaultmapping> {
+class Diagnostic<string summary, DiagClass DC, Severity defaultmapping> {
/// Component is specified by the file with a big let directive.
string Component = ?;
- string Text = text;
+ string Summary = summary;
DiagClass Class = DC;
SFINAEResponse SFINAE = SFINAE_Suppress;
bit AccessControl = 0;
bit WarningNoWerror = 0;
bit ShowInSystemHeader = 0;
+ bit ShowInSystemMacro = 1;
bit Deferrable = 0;
Severity DefaultSeverity = defaultmapping;
DiagGroup Group;
@@ -108,6 +109,14 @@ class SuppressInSystemHeader {
bit ShowInSystemHeader = 0;
}
+class ShowInSystemMacro {
+ bit ShowInSystemMacro = 1;
+}
+
+class SuppressInSystemMacro {
+ bit ShowInSystemMacro = 0;
+}
+
class Deferrable {
bit Deferrable = 1;
}
@@ -148,7 +157,6 @@ class DefaultRemark { Severity DefaultSeverity = SEV_Remark; }
// Definitions for Diagnostics.
include "DiagnosticASTKinds.td"
-include "DiagnosticAnalysisKinds.td"
include "DiagnosticCommentKinds.td"
include "DiagnosticCommonKinds.td"
include "DiagnosticCrossTUKinds.td"
@@ -159,4 +167,3 @@ include "DiagnosticParseKinds.td"
include "DiagnosticRefactoringKinds.td"
include "DiagnosticSemaKinds.td"
include "DiagnosticSerializationKinds.td"
-
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticAST.h b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticAST.h
index 76c31ad9508e..24ef2689eac0 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticAST.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticAST.h
@@ -15,7 +15,7 @@ namespace clang {
namespace diag {
enum {
#define DIAG(ENUM, FLAGS, DEFAULT_MAPPING, DESC, GROUP, SFINAE, NOWERROR, \
- SHOWINSYSHEADER, DEFERRABLE, CATEGORY) \
+ SHOWINSYSHEADER, SHOWINSYSMACRO, DEFERRABLE, CATEGORY) \
ENUM,
#define ASTSTART
#include "clang/Basic/DiagnosticASTKinds.inc"
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticASTKinds.td b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticASTKinds.td
index 496d86ee2fe7..c81d17ed6410 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticASTKinds.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticASTKinds.td
@@ -11,9 +11,14 @@ let Component = "AST" in {
// Constant expression diagnostics. These (and their users) belong in Sema.
def note_expr_divide_by_zero : Note<"division by zero">;
def note_constexpr_invalid_cast : Note<
- "%select{reinterpret_cast|dynamic_cast|cast that performs the conversions of"
- " a reinterpret_cast|cast from %1}0 is not allowed in a constant expression"
+ "%select{reinterpret_cast|dynamic_cast|%select{this conversion|cast that"
+ " performs the conversions of a reinterpret_cast}1|cast from %1}0"
+ " is not allowed in a constant expression"
"%select{| in C++ standards before C++20||}0">;
+def note_constexpr_invalid_void_star_cast : Note<
+ "cast from %0 is not allowed in a constant expression "
+ "%select{in C++ standards before C++2c|because the pointed object "
+ "type %2 is not similar to the target type %3}1">;
def note_constexpr_invalid_downcast : Note<
"cannot cast object of dynamic type %0 to type %1">;
def note_constexpr_overflow : Note<
@@ -64,7 +69,11 @@ def note_consteval_address_accessible : Note<
"%select{pointer|reference}0 to a consteval declaration "
"is not a constant expression">;
def note_constexpr_uninitialized : Note<
- "%select{|sub}0object of type %1 is not initialized">;
+ "subobject %select{of type |}0%1 is not initialized">;
+def note_constexpr_uninitialized_base : Note<
+ "constructor of base class %0 is not called">;
+def note_constexpr_static_local : Note<
+ "control flows through the definition of a %select{static|thread_local}0 variable">;
def note_constexpr_subobject_declared_here : Note<
"subobject declared here">;
def note_constexpr_array_index : Note<"cannot refer to element %0 of "
@@ -81,7 +90,23 @@ def note_constexpr_pointer_subtraction_not_same_array : Note<
def note_constexpr_pointer_subtraction_zero_size : Note<
"subtraction of pointers to type %0 of zero size">;
def note_constexpr_pointer_comparison_unspecified : Note<
- "comparison has unspecified value">;
+ "comparison between '%0' and '%1' has unspecified value">;
+def note_constexpr_pointer_constant_comparison : Note<
+ "comparison of numeric address '%0' with pointer '%1' can only be performed "
+ "at runtime">;
+def note_constexpr_literal_comparison : Note<
+ "comparison of addresses of literals has unspecified value">;
+def note_constexpr_pointer_weak_comparison : Note<
+ "comparison against address of weak declaration '%0' can only be performed "
+ "at runtime">;
+def note_constexpr_mem_pointer_weak_comparison : Note<
+ "comparison against pointer to weak member %q0 can only be performed "
+ "at runtime">;
+def note_constexpr_pointer_comparison_past_end : Note<
+ "comparison against pointer '%0' that points past the end of a "
+ "complete object has unspecified value">;
+def note_constexpr_pointer_comparison_zero_sized : Note<
+ "comparison of pointers '%0' and '%1' to unrelated zero-sized objects">;
def note_constexpr_pointer_comparison_base_classes : Note<
"comparison of addresses of subobjects of different base classes "
"has unspecified value">;
@@ -108,6 +133,8 @@ def note_constexpr_null_subobject : Note<
"access array element of|perform pointer arithmetic on|"
"access real component of|"
"access imaginary component of}0 null pointer">;
+def note_constexpr_null_callee : Note<
+ "'%0' evaluates to a null function pointer">;
def note_constexpr_function_param_value_unknown : Note<
"function parameter %0 with unknown value cannot be used in a constant "
"expression">;
@@ -290,7 +317,7 @@ def note_constexpr_memcpy_unsupported : Note<
"source is not a contiguous array of at least %4 elements of type %3|"
"destination is not a contiguous array of at least %4 elements of type %3}2">;
def note_constexpr_bit_cast_unsupported_type : Note<
- "constexpr bit_cast involving type %0 is not yet supported">;
+ "constexpr bit cast involving type %0 is not yet supported">;
def note_constexpr_bit_cast_unsupported_bitfield : Note<
"constexpr bit_cast involving bit-field is not yet supported">;
def note_constexpr_bit_cast_invalid_type : Note<
@@ -299,6 +326,9 @@ def note_constexpr_bit_cast_invalid_type : Note<
"%select{type|member}1 is not allowed in a constant expression">;
def note_constexpr_bit_cast_invalid_subtype : Note<
"invalid type %0 is a %select{member|base}1 of %2">;
+def note_constexpr_bit_cast_invalid_vector : Note<
+ "bit_cast involving type %0 is not allowed in a constant expression; "
+ "element size %1 * element count %2 is not a multiple of the byte size %3">;
def note_constexpr_bit_cast_indet_dest : Note<
"indeterminate value can only initialize an object of type 'unsigned char'"
"%select{, 'char',|}1 or 'std::byte'; %0 is invalid">;
@@ -326,6 +356,9 @@ def note_constexpr_new_negative : Note<
"cannot allocate array; evaluated array bound %0 is negative">;
def note_constexpr_new_too_large : Note<
"cannot allocate array; evaluated array bound %0 is too large">;
+def note_constexpr_new_exceeds_limits : Note<
+ "cannot allocate array; evaluated array bound %0 exceeds the limit (%1); "
+ "use '-fconstexpr-steps' to increase this limit">;
def note_constexpr_new_too_small : Note<
"cannot allocate array; evaluated array bound %0 is too small to hold "
"%1 explicitly initialized elements">;
@@ -362,6 +395,10 @@ def note_constexpr_memory_leak : Note<
"%plural{0:|: (along with %0 other memory leak%s0)}0">;
def note_constexpr_unsupported_layout : Note<
"type %0 has unexpected layout">;
+def note_constexpr_unsupported_flexible_array : Note<
+ "flexible array initialization is not yet supported">;
+def note_constexpr_non_const_vectorelements : Note<
+ "cannot determine number of elements for sizeless vectors in a constant expression">;
def err_experimental_clang_interp_failed : Error<
"the experimental clang interpreter failed to evaluate an expression">;
@@ -371,6 +408,10 @@ def warn_integer_constant_overflow : Warning<
def warn_fixedpoint_constant_overflow : Warning<
"overflow in expression; result is %0 with type %1">,
InGroup<DiagGroup<"fixed-point-overflow">>;
+def warn_constexpr_unscoped_enum_out_of_range : Warning<
+ "integer value %0 is outside the valid range of values [%1, %2] for the "
+ "enumeration type %3">, DefaultError, ShowInSystemHeader, ShowInSystemMacro,
+ InGroup<DiagGroup<"enum-constexpr-conversion">>;
// This is a temporary diagnostic, and shall be removed once our
// implementation is complete, and like the preceding constexpr notes belongs
@@ -439,8 +480,6 @@ def note_odr_tag_kind_here: Note<
def note_odr_field : Note<"field %0 has type %1 here">;
def note_odr_field_name : Note<"field has name %0 here">;
def note_odr_missing_field : Note<"no corresponding field here">;
-def note_odr_bit_field : Note<"bit-field %0 with type %1 and length %2 here">;
-def note_odr_not_bit_field : Note<"field %0 is not a bit-field">;
def note_odr_base : Note<"class has base type %0">;
def note_odr_virtual_base : Note<
"%select{non-virtual|virtual}0 derivation here">;
@@ -561,14 +600,397 @@ def warn_odr_non_type_parameter_type_inconsistent : Warning<
InGroup<ODR>;
def err_unsupported_ast_node: Error<"cannot import unsupported AST node %0">;
+// Compare ODR hashes
+def err_module_odr_violation_different_definitions : Error<
+ "%q0 has different definitions in different modules; "
+ "%select{definition in module '%2' is here|defined here}1">;
+def note_first_module_difference : Note<
+ "in first definition, possible difference is here">;
+def note_module_odr_violation_different_definitions : Note<
+ "definition in module '%0' is here">;
+def note_second_module_difference : Note<
+ "in second definition, possible difference is here">;
+
+def err_module_odr_violation_definition_data : Error <
+ "%q0 has different definitions in different modules; first difference is "
+ "%select{definition in module '%2'|defined here}1 found "
+ "%select{"
+ "%4 base %plural{1:class|:classes}4|"
+ "%4 virtual base %plural{1:class|:classes}4|"
+ "%ordinal4 base class with type %5|"
+ "%ordinal4 %select{non-virtual|virtual}5 base class %6|"
+ "%ordinal4 base class %5 with "
+ "%select{public|protected|private|no}6 access specifier}3">;
+
+def note_module_odr_violation_definition_data : Note <
+ "but in '%0' found "
+ "%select{"
+ "%2 base %plural{1:class|:classes}2|"
+ "%2 virtual base %plural{1:class|:classes}2|"
+ "%ordinal2 base class with different type %3|"
+ "%ordinal2 %select{non-virtual|virtual}3 base class %4|"
+ "%ordinal2 base class %3 with "
+ "%select{public|protected|private|no}4 access specifier}1">;
+
+def err_module_odr_violation_objc_interface : Error <
+ "%0 has different definitions in different modules; first difference is "
+ "%select{definition in module '%2'|defined here}1 found "
+ "%select{"
+ "%select{no super class|super class with type %5}4|"
+ "instance variable '%4' access control is "
+ "%select{|@private|@protected|@public|@package}5"
+ "}3">;
+def note_module_odr_violation_objc_interface : Note <
+ "but in %select{'%1'|definition here}0 found "
+ "%select{"
+ "%select{no super class|super class with type %4}3|"
+ "instance variable '%3' access control is "
+ "%select{|@private|@protected|@public|@package}4"
+ "}2">;
+
+def err_module_odr_violation_template_parameter : Error <
+ "%q0 has different definitions in different modules; first difference is "
+ "%select{definition in module '%2'|defined here}1 found "
+ "%select{"
+ "unnamed template parameter|"
+ "template parameter %5|"
+ "template parameter with %select{no |}4default argument|"
+ "template parameter with default argument}3">;
+
+def note_module_odr_violation_template_parameter : Note <
+ "but in '%0' found "
+ "%select{"
+ "unnamed template parameter %2|"
+ "template parameter %3|"
+ "template parameter with %select{no |}2default argument|"
+ "template parameter with different default argument}1">;
+
+def err_module_odr_violation_mismatch_decl : Error<
+ "%q0 has different definitions in different modules; first difference is "
+ "%select{definition in module '%2'|defined here}1 found "
+ "%select{end of class|public access specifier|private access specifier|"
+ "protected access specifier|static assert|field|method|type alias|typedef|"
+ "data member|friend declaration|function template|method|instance variable|"
+ "property}3">;
+def note_module_odr_violation_mismatch_decl : Note<
+ "but in %select{'%1'|definition here}0 found "
+ "%select{end of class|public access specifier|private access specifier|"
+ "protected access specifier|static assert|field|method|type alias|typedef|"
+ "data member|friend declaration|function template|method|instance variable|"
+ "property}2">;
+
+def err_module_odr_violation_record : Error<
+ "%q0 has different definitions in different modules; first difference is "
+ "%select{definition in module '%2'|defined here}1 found "
+ "%select{"
+ "static assert with condition|"
+ "static assert with message|"
+ "static assert with %select{|no }4message|"
+ "%select{method %5|constructor|destructor}4|"
+ "%select{method %5|constructor|destructor}4 "
+ "is %select{not deleted|deleted}6|"
+ "%select{method %5|constructor|destructor}4 "
+ "is %select{not defaulted|defaulted}6|"
+ "%select{method %5|constructor|destructor}4 "
+ "is %select{|pure }6%select{not virtual|virtual}7|"
+ "%select{method %5|constructor|destructor}4 "
+ "is %select{not static|static}6|"
+ "%select{method %5|constructor|destructor}4 "
+ "is %select{not volatile|volatile}6|"
+ "%select{method %5|constructor|destructor}4 "
+ "is %select{not const|const}6|"
+ "%select{method %5|constructor|destructor}4 "
+ "is %select{not inline|inline}6|"
+ "%select{method %5|constructor|destructor}4 "
+ "with %ordinal6 parameter with%select{out|}7 a default argument|"
+ "%select{method %5|constructor|destructor}4 "
+ "with %ordinal6 parameter with a default argument|"
+ "%select{method %5|constructor|destructor}4 "
+ "with %select{no |}6template arguments|"
+ "%select{method %5|constructor|destructor}4 "
+ "with %6 template argument%s6|"
+ "%select{method %5|constructor|destructor}4 "
+ "with %6 for %ordinal7 template argument|"
+ "%select{method %5|constructor|destructor}4 "
+ "with %select{no body|body}6|"
+ "%select{method %5|constructor|destructor}4 "
+ "with body|"
+ "friend %select{class|function}4|"
+ "friend %4|"
+ "friend function %4|"
+ "function template %4 with %5 template parameter%s5|"
+ "function template %4 with %ordinal5 template parameter being a "
+ "%select{type|non-type|template}6 template parameter|"
+ "function template %4 with %ordinal5 template parameter "
+ "%select{with no name|named %7}6|"
+ "function template %4 with %ordinal5 template parameter with "
+ "%select{no |}6default argument|"
+ "function template %4 with %ordinal5 template parameter with "
+ "default argument %6|"
+ "function template %4 with %ordinal5 template parameter with one type|"
+ "function template %4 with %ordinal5 template parameter %select{not |}6"
+ "being a template parameter pack|"
+ "}3">;
+
+def note_module_odr_violation_record : Note<"but in '%0' found "
+ "%select{"
+ "static assert with different condition|"
+ "static assert with different message|"
+ "static assert with %select{|no }2message|"
+ "%select{method %3|constructor|destructor}2|"
+ "%select{method %3|constructor|destructor}2 "
+ "is %select{not deleted|deleted}4|"
+ "%select{method %3|constructor|destructor}2 "
+ "is %select{not defaulted|defaulted}4|"
+ "%select{method %3|constructor|destructor}2 "
+ "is %select{|pure }4%select{not virtual|virtual}5|"
+ "%select{method %3|constructor|destructor}2 "
+ "is %select{not static|static}4|"
+ "%select{method %3|constructor|destructor}2 "
+ "is %select{not volatile|volatile}4|"
+ "%select{method %3|constructor|destructor}2 "
+ "is %select{not const|const}4|"
+ "%select{method %3|constructor|destructor}2 "
+ "is %select{not inline|inline}4|"
+ "%select{method %3|constructor|destructor}2 "
+ "with %ordinal4 parameter with%select{out|}5 a default argument|"
+ "%select{method %3|constructor|destructor}2 "
+ "with %ordinal4 parameter with a different default argument|"
+ "%select{method %3|constructor|destructor}2 "
+ "with %select{no |}4template arguments|"
+ "%select{method %3|constructor|destructor}2 "
+ "with %4 template argument%s4|"
+ "%select{method %3|constructor|destructor}2 "
+ "with %4 for %ordinal5 template argument|"
+ "%select{method %3|constructor|destructor}2 "
+ "with %select{no body|body}4|"
+ "%select{method %3|constructor|destructor}2 "
+ "with different body|"
+ "friend %select{class|function}2|"
+ "friend %2|"
+ "friend function %2|"
+ "function template %2 with %3 template parameter%s3|"
+ "function template %2 with %ordinal3 template paramter being a "
+ "%select{type|non-type|template}4 template parameter|"
+ "function template %2 with %ordinal3 template parameter "
+ "%select{with no name|named %5}4|"
+ "function template %2 with %ordinal3 template parameter with "
+ "%select{no |}4default argument|"
+ "function template %2 with %ordinal3 template parameter with "
+ "default argument %4|"
+ "function template %2 with %ordinal3 template parameter with different type|"
+ "function template %2 with %ordinal3 template parameter %select{not |}4"
+ "being a template parameter pack|"
+ "}1">;
+
+def err_module_odr_violation_field : Error<
+ "%q0 has different definitions in different modules; first difference is "
+ "%select{definition in module '%2'|defined here}1 found "
+ "%select{"
+ "field %4|"
+ "field %4 with type %5|"
+ "%select{non-|}5bitfield %4|"
+ "bitfield %4 with one width expression|"
+ "%select{non-|}5mutable field %4|"
+ "field %4 with %select{no|an}5 initializer|"
+ "field %4 with an initializer"
+ "}3">;
+def note_module_odr_violation_field : Note<
+ "but in %select{'%1'|definition here}0 found "
+ "%select{"
+ "field %3|"
+ "field %3 with type %4|"
+ "%select{non-|}4bitfield %3|"
+ "bitfield %3 with different width expression|"
+ "%select{non-|}4mutable field %3|"
+ "field %3 with %select{no|an}4 initializer|"
+ "field %3 with a different initializer"
+ "}2">;
+
+def err_module_odr_violation_typedef : Error<
+ "%q0 has different definitions in different modules; first difference is "
+ "%select{definition in module '%2'|defined here}1 found "
+ "%select{"
+ "%select{typedef|type alias}4 name %5|"
+ "%select{typedef|type alias}4 %5 with underlying type %6"
+ "}3">;
+def note_module_odr_violation_typedef : Note<"but in '%0' found "
+ "%select{"
+ "%select{typedef|type alias}2 name %3|"
+ "%select{typedef|type alias}2 %3 with different underlying type %4"
+ "}1">;
+
+def err_module_odr_violation_variable : Error<
+ "%q0 has different definitions in different modules; first difference is "
+ "%select{definition in module '%2'|defined here}1 found "
+ "%select{"
+ "data member with name %4|"
+ "data member %4 with type %5|"
+ "data member %4 with%select{out|}5 an initializer|"
+ "data member %4 with an initializer|"
+ "data member %4 %select{is constexpr|is not constexpr}5"
+ "}3">;
+def note_module_odr_violation_variable : Note<"but in '%0' found "
+ "%select{"
+ "data member with name %2|"
+ "data member %2 with different type %3|"
+ "data member %2 with%select{out|}3 an initializer|"
+ "data member %2 with a different initializer|"
+ "data member %2 %select{is constexpr|is not constexpr}3"
+ "}1">;
+
+def err_module_odr_violation_function : Error<
+ "%q0 has different definitions in different modules; "
+ "%select{definition in module '%2'|defined here}1 "
+ "first difference is "
+ "%select{"
+ "return type is %4|"
+ "%ordinal4 parameter with name %5|"
+ "%ordinal4 parameter with type %5%select{| decayed from %7}6|"
+ "%ordinal4 parameter with%select{out|}5 a default argument|"
+ "%ordinal4 parameter with a default argument|"
+ "function body"
+ "}3">;
+
+def note_module_odr_violation_function : Note<"but in '%0' found "
+ "%select{"
+ "different return type %2|"
+ "%ordinal2 parameter with name %3|"
+ "%ordinal2 parameter with type %3%select{| decayed from %5}4|"
+ "%ordinal2 parameter with%select{out|}3 a default argument|"
+ "%ordinal2 parameter with a different default argument|"
+ "a different body"
+ "}1">;
+
+def err_module_odr_violation_enum : Error<
+ "%q0 has different definitions in different modules; "
+ "%select{definition in module '%2'|defined here}1 "
+ "first difference is "
+ "%select{"
+ "enum that is %select{not scoped|scoped}4|"
+ "enum scoped with keyword %select{struct|class}4|"
+ "enum %select{without|with}4 specified type|"
+ "enum with specified type %4|"
+ "enum with %4 element%s4|"
+ "%ordinal4 element has name %5|"
+ "%ordinal4 element %5 %select{has|does not have}6 an initializer|"
+ "%ordinal4 element %5 has an initializer|"
+ "}3">;
+
+def note_module_odr_violation_enum : Note<"but in '%0' found "
+ "%select{"
+ "enum that is %select{not scoped|scoped}2|"
+ "enum scoped with keyword %select{struct|class}2|"
+ "enum %select{without|with}2 specified type|"
+ "enum with specified type %2|"
+ "enum with %2 element%s2|"
+ "%ordinal2 element has name %3|"
+ "%ordinal2 element %3 %select{has|does not have}4 an initializer|"
+ "%ordinal2 element %3 has different initializer|"
+ "}1">;
+
+def err_module_odr_violation_referenced_protocols : Error <
+ "%q0 has different definitions in different modules; first difference is "
+ "%select{definition in module '%2'|defined here}1 found "
+ "%select{"
+ "%4 referenced %plural{1:protocol|:protocols}4|"
+ "%ordinal4 referenced protocol with name %5"
+ "}3">;
+def note_module_odr_violation_referenced_protocols : Note <
+ "but in %select{'%1'|definition here}0 found "
+ "%select{"
+ "%3 referenced %plural{1:protocol|:protocols}3|"
+ "%ordinal3 referenced protocol with different name %4"
+ "}2">;
+
+def err_module_odr_violation_objc_method : Error<
+ "%q0 has different definitions in different modules; first difference is "
+ "%select{definition in module '%2'|defined here}1 found "
+ "%select{"
+ "method %4 with return type %5|"
+ "%select{class|instance}5 method %4|"
+ "%select{no|'required'|'optional'}4 method control|"
+ "method %4 with %select{no designated initializer|designated initializer}5|"
+ "%select{regular|direct}5 method %4|"
+ "method %4"
+ "}3">;
+def note_module_odr_violation_objc_method : Note<
+ "but in %select{'%1'|definition here}0 found "
+ "%select{"
+ "method %3 with different return type %4|"
+ "method %3 as %select{class|instance}4 method|"
+ "%select{no|'required'|'optional'}3 method control|"
+ "method %3 with %select{no designated initializer|designated initializer}4|"
+ "%select{regular|direct}4 method %3|"
+ "different method %3"
+ "}2">;
+
+def err_module_odr_violation_method_params : Error<
+ "%q0 has different definitions in different modules; first difference is "
+ "%select{definition in module '%2'|defined here}1 found "
+ "%select{"
+ "%select{method %5|constructor|destructor}4 "
+ "that has %6 parameter%s6|"
+ "%select{method %5|constructor|destructor}4 "
+ "with %ordinal6 parameter of type %7%select{| decayed from %9}8|"
+ "%select{method %5|constructor|destructor}4 "
+ "with %ordinal6 parameter named %7"
+ "}3">;
+def note_module_odr_violation_method_params : Note<
+ "but in %select{'%1'|definition here}0 found "
+ "%select{"
+ "%select{method %4|constructor|destructor}3 "
+ "that has %5 parameter%s5|"
+ "%select{method %4|constructor|destructor}3 "
+ "with %ordinal5 parameter of type %6%select{| decayed from %8}7|"
+ "%select{method %4|constructor|destructor}3 "
+ "with %ordinal5 parameter named %6"
+ "}2">;
+
+def err_module_odr_violation_objc_property : Error<
+ "%q0 has different definitions in different modules; first difference is "
+ "%select{definition in module '%2'|defined here}1 found "
+ "%select{"
+ "property %4|"
+ "property %4 with type %5|"
+ "%select{no|'required'|'optional'}4 property control|"
+ "property %4 with %select{default |}6'%select{none|readonly|getter|assign|"
+ "readwrite|retain|copy|nonatomic|setter|atomic|weak|strong|"
+ "unsafe_unretained|nullability|null_resettable|class|direct}5' attribute"
+ "}3">;
+def note_module_odr_violation_objc_property : Note<
+ "but in %select{'%1'|definition here}0 found "
+ "%select{"
+ "property %3|"
+ "property %3 with type %4|"
+ "%select{no|'required'|'optional'}3 property control|"
+ "property %3 with different '%select{none|readonly|getter|assign|"
+ "readwrite|retain|copy|nonatomic|setter|atomic|weak|strong|"
+ "unsafe_unretained|nullability|null_resettable|class|direct}4' attribute"
+ "}2">;
+
+def err_module_odr_violation_mismatch_decl_unknown : Error<
+ "%q0 %select{with definition in module '%2'|defined here}1 has different "
+ "definitions in different modules; first difference is this "
+ "%select{||||static assert|field|method|type alias|typedef|data member|"
+ "friend declaration|function template|method|instance variable|"
+ "property|unexpected decl}3">;
+def note_module_odr_violation_mismatch_decl_unknown : Note<
+ "but in %select{'%1'|definition here}0 found "
+ "%select{||||different static assert|different field|different method|"
+ "different type alias|different typedef|different data member|"
+ "different friend declaration|different function template|different method|"
+ "different instance variable|different property|another unexpected decl}2">;
+
+
def remark_sanitize_address_insert_extra_padding_accepted : Remark<
"-fsanitize-address-field-padding applied to %0">, ShowInSystemHeader,
InGroup<SanitizeAddressRemarks>;
def remark_sanitize_address_insert_extra_padding_rejected : Remark<
"-fsanitize-address-field-padding ignored for %0 because it "
"%select{is not C++|is packed|is a union|is trivially copyable|"
- "has trivial destructor|is standard layout|is in a blacklisted file|"
- "is blacklisted}1">, ShowInSystemHeader,
+ "has trivial destructor|is standard layout|is in a ignorelisted file|"
+ "is ignorelisted}1">, ShowInSystemHeader,
InGroup<SanitizeAddressRemarks>;
def warn_npot_ms_struct : Warning<
@@ -576,6 +998,16 @@ def warn_npot_ms_struct : Warning<
"data types with sizes that aren't a power of two">,
DefaultError, InGroup<IncompatibleMSStruct>;
+// -Wpadded-bitfield
+def warn_padded_struct_bitfield : Warning<
+ "padding %select{struct|interface|class}0 %1 with %2 "
+ "%select{byte|bit}3%s2 to align %4">,
+ InGroup<PaddedBitField>, DefaultIgnore;
+def warn_padded_struct_anon_bitfield : Warning<
+ "padding %select{struct|interface|class}0 %1 with %2 "
+ "%select{byte|bit}3%s2 to align anonymous bit-field">,
+ InGroup<PaddedBitField>, DefaultIgnore;
+
// -Wpadded, -Wpacked
def warn_padded_struct_field : Warning<
"padding %select{struct|interface|class}0 %1 with %2 "
@@ -583,11 +1015,21 @@ def warn_padded_struct_field : Warning<
InGroup<Padded>, DefaultIgnore;
def warn_padded_struct_anon_field : Warning<
"padding %select{struct|interface|class}0 %1 with %2 "
- "%select{byte|bit}3%s2 to align anonymous bit-field">,
+ "%select{byte|bit}3%s2 to align anonymous field">,
InGroup<Padded>, DefaultIgnore;
def warn_padded_struct_size : Warning<
"padding size of %0 with %1 %select{byte|bit}2%s1 to alignment boundary">,
InGroup<Padded>, DefaultIgnore;
def warn_unnecessary_packed : Warning<
"packed attribute is unnecessary for %0">, InGroup<Packed>, DefaultIgnore;
+def warn_unpacked_field
+ : Warning<
+ "not packing field %0 as it is non-POD for the purposes of layout">,
+ InGroup<PackedNonPod>,
+ DefaultIgnore;
+
+// -Wunaligned-access
+def warn_unaligned_access : Warning<
+ "field %1 within %0 is less aligned than %2 and is usually due to %0 being "
+ "packed, which can lead to unaligned accesses">, InGroup<UnalignedAccess>, DefaultIgnore;
}
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticAnalysis.h b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticAnalysis.h
index f9037cc8d75a..676b58f7d6ef 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticAnalysis.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticAnalysis.h
@@ -15,7 +15,7 @@ namespace clang {
namespace diag {
enum {
#define DIAG(ENUM, FLAGS, DEFAULT_MAPPING, DESC, GROUP, SFINAE, NOWERROR, \
- SHOWINSYSHEADER, DEFERRABLE, CATEGORY) \
+ SHOWINSYSHEADER, SHOWINSYSMACRO, DEFERRABLE, CATEGORY) \
ENUM,
#define ANALYSISSTART
#include "clang/Basic/DiagnosticAnalysisKinds.inc"
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticCategories.h b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticCategories.h
index 0decf15080a0..14be326f7515 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticCategories.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticCategories.h
@@ -19,6 +19,14 @@ namespace clang {
#undef GET_CATEGORY_TABLE
DiagCat_NUM_CATEGORIES
};
+
+ enum class Group {
+#define DIAG_ENTRY(GroupName, FlagNameOffset, Members, SubGroups, Docs) \
+ GroupName,
+#include "clang/Basic/DiagnosticGroups.inc"
+#undef CATEGORY
+#undef DIAG_ENTRY
+ };
} // end namespace diag
} // end namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticComment.h b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticComment.h
index 6e011bfcebab..17c0053e9a33 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticComment.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticComment.h
@@ -15,7 +15,7 @@ namespace clang {
namespace diag {
enum {
#define DIAG(ENUM, FLAGS, DEFAULT_MAPPING, DESC, GROUP, SFINAE, NOWERROR, \
- SHOWINSYSHEADER, DEFERRABLE, CATEGORY) \
+ SHOWINSYSHEADER, SHOWINSYSMACRO, DEFERRABLE, CATEGORY) \
ENUM,
#define COMMENTSTART
#include "clang/Basic/DiagnosticCommentKinds.inc"
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticCommentKinds.td b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticCommentKinds.td
index ae63bb623ed3..1122ace3027d 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticCommentKinds.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticCommentKinds.td
@@ -155,8 +155,8 @@ def note_add_deprecation_attr : Note<
// inline contents commands
-def warn_doc_inline_contents_no_argument : Warning<
- "'%select{\\|@}0%1' command does not have a valid word argument">,
+def warn_doc_inline_command_not_enough_arguments : Warning<
+ "'%select{\\|@}0%1' command has %plural{0:no|:%2}2 word argument%s2, expected %3">,
InGroup<Documentation>, DefaultIgnore;
// verbatim block commands
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticCommonKinds.td b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticCommonKinds.td
index 4dff3379ed35..08bb1d81ba29 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticCommonKinds.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticCommonKinds.td
@@ -55,15 +55,25 @@ def err_expected_colon_after_setter_name : Error<
def err_expected_string_literal : Error<"expected string literal "
"%select{in %1|for diagnostic message in static_assert|"
"for optional message in 'availability' attribute|"
- "for %select{language|source container}1 name in "
- "'external_source_symbol' attribute}0">;
+ "for %select{language name|source container name|USR}1 in "
+ "'external_source_symbol' attribute|"
+ "as argument of '%1' attribute}0">;
+
def err_invalid_string_udl : Error<
"string literal with user-defined suffix cannot be used here">;
def err_invalid_character_udl : Error<
"character literal with user-defined suffix cannot be used here">;
def err_invalid_numeric_udl : Error<
"numeric literal with user-defined suffix cannot be used here">;
-
+def warn_pragma_debug_missing_argument : Warning<
+ "missing argument to debug command '%0'">, InGroup<IgnoredPragmas>;
+def warn_pragma_debug_unexpected_argument : Warning<
+ "unexpected argument to debug command">, InGroup<IgnoredPragmas>;
+
+def warn_fp_nan_inf_when_disabled : Warning<
+ "use of %select{infinity|NaN}0%select{| via a macro}1 is undefined behavior "
+ "due to the currently enabled floating-point options">,
+ InGroup<DiagGroup<"nan-infinity-disabled", [], NanInfDisabledDocs>>;
}
// Parse && Sema
@@ -117,13 +127,21 @@ def note_pragma_entered_here : Note<"#pragma entered here">;
def note_decl_hiding_tag_type : Note<
"%1 %0 is hidden by a non-type declaration of %0 here">;
def err_attribute_not_type_attr : Error<
- "%0 attribute cannot be applied to types">;
+ "%0%select{ attribute|}1 cannot be applied to types">;
def err_enum_template : Error<"enumeration cannot be a template">;
def warn_cxx20_compat_consteval : Warning<
"'consteval' specifier is incompatible with C++ standards before C++20">,
InGroup<CXX20Compat>, DefaultIgnore;
-
+def warn_missing_type_specifier : Warning<
+ "type specifier missing, defaults to 'int'">,
+ InGroup<ImplicitInt>, DefaultIgnore;
+
+def ext_c_empty_initializer : Extension<
+ "use of an empty initializer is a C23 extension">, InGroup<C23>;
+def warn_c23_compat_empty_initializer : Warning<
+ "use of an empty initializer is incompatible with C standards before C23">,
+ InGroup<CPre23Compat>, DefaultIgnore;
}
let CategoryName = "Nullability Issue" in {
@@ -145,15 +163,26 @@ def warn_conflicting_nullability_attr_overriding_param_types : Warning<
def err_nullability_conflicting : Error<
"nullability specifier %0 conflicts with existing specifier %1">;
+def warn_incompatible_branch_protection_option: Warning <
+ "'-mbranch-protection=' option is incompatible with the '%0' architecture">,
+ InGroup<BranchProtection>;
+
+def warn_target_unsupported_branch_protection_attribute: Warning <
+ "ignoring the 'branch-protection' attribute because the '%0' architecture does not support it">,
+ InGroup<BranchProtection>;
}
// OpenCL Section 6.8.g
def err_opencl_unknown_type_specifier : Error<
- "%select{OpenCL C|C++ for OpenCL}0 version %1 does not support the "
- "'%2' %select{type qualifier|storage class specifier}3">;
+ "%0 does not support the '%1' "
+ "%select{type qualifier|storage class specifier}2">;
def warn_unknown_attribute_ignored : Warning<
"unknown attribute %0 ignored">, InGroup<UnknownAttributes>;
+def warn_attribute_ignored : Warning<"%0 attribute ignored">,
+ InGroup<IgnoredAttributes>;
+def err_keyword_not_supported_on_target : Error<
+ "%0 is not supported on this target">;
def err_use_of_tag_name_without_tag : Error<
"must use '%1' tag to refer to type %0%select{| in this scope}2">;
@@ -189,17 +218,23 @@ def ext_cxx11_longlong : Extension<
def warn_cxx98_compat_longlong : Warning<
"'long long' is incompatible with C++98">,
InGroup<CXX98CompatPedantic>, DefaultIgnore;
-def ext_cxx2b_size_t_suffix : ExtWarn<
- "'size_t' suffix for literals is a C++2b extension">,
- InGroup<CXX2b>;
+def ext_cxx23_size_t_suffix : ExtWarn<
+ "'size_t' suffix for literals is a C++23 extension">,
+ InGroup<CXX23>;
def warn_cxx20_compat_size_t_suffix : Warning<
"'size_t' suffix for literals is incompatible with C++ standards before "
- "C++2b">, InGroup<CXXPre2bCompat>, DefaultIgnore;
-def err_cxx2b_size_t_suffix: Error<
- "'size_t' suffix for literals is a C++2b feature">;
+ "C++23">, InGroup<CXXPre23Compat>, DefaultIgnore;
+def err_cxx23_size_t_suffix: Error<
+ "'size_t' suffix for literals is a C++23 feature">;
def err_size_t_literal_too_large: Error<
"%select{signed |}0'size_t' literal is out of range of possible "
"%select{signed |}0'size_t' values">;
+def ext_c23_bitint_suffix : ExtWarn<
+ "'_BitInt' suffix for literals is a C23 extension">,
+ InGroup<C23>;
+def warn_c23_compat_bitint_suffix : Warning<
+ "'_BitInt' suffix for literals is incompatible with C standards before C23">,
+ InGroup<CPre23Compat>, DefaultIgnore;
def err_integer_literal_too_large : Error<
"integer literal is too large to be represented in any %select{signed |}0"
"integer type">;
@@ -229,8 +264,6 @@ def ext_clang_diagnose_if : Extension<"'diagnose_if' is a clang extension">,
InGroup<GccCompat>;
def err_too_large_for_fixed_point : Error<
"this value is too large for this fixed point type">;
-def err_fixed_point_not_enabled : Error<"compile with "
- "'-ffixed-point' to enable fixed point types">;
def err_unimplemented_conversion_with_fixed_point_type : Error<
"conversion between fixed point and %0 is not yet supported">;
@@ -252,34 +285,37 @@ def note_constexpr_invalid_template_arg : Note<
"%select{type_info object|string literal|temporary object|"
"predefined '%3' variable}2 is not allowed in a template argument">;
def err_constexpr_invalid_template_arg : Error<
- note_constexpr_invalid_template_arg.Text>;
+ note_constexpr_invalid_template_arg.Summary>;
// Sema && Frontend
let CategoryName = "Inline Assembly Issue" in {
- def err_asm_invalid_type_in_input : Error<
- "invalid type %0 in asm input for constraint '%1'">;
+def err_asm_invalid_type_in_input : Error<
+ "invalid type %0 in asm input for constraint '%1'">;
- def err_asm_invalid_type : Error<
- "invalid type %0 in asm %select{input|output}1">;
+def err_asm_invalid_type : Error<
+ "invalid type %0 in asm %select{input|output}1">;
- def warn_stack_clash_protection_inline_asm : Warning<
- "Unable to protect inline asm that clobbers stack pointer against stack clash">,
- InGroup<DiagGroup<"stack-protector">>;
+def err_ms_asm_bitfield_unsupported : Error<
+ "an inline asm block cannot have an operand which is a bit-field">;
- def warn_slh_does_not_support_asm_goto
- : Warning<"Speculative load hardening does not protect functions with "
- "asm goto">,
- InGroup<DiagGroup<"slh-asm-goto">>;
+def warn_stack_clash_protection_inline_asm : Warning<
+ "unable to protect inline asm that clobbers stack pointer against stack "
+ "clash">, InGroup<DiagGroup<"stack-protector">>;
+
+def warn_slh_does_not_support_asm_goto : Warning<
+ "speculative load hardening does not protect functions with asm goto">,
+ InGroup<DiagGroup<"slh-asm-goto">>;
}
// Sema && Serialization
def warn_dup_category_def : Warning<
- "duplicate definition of category %1 on interface %0">;
+ "duplicate definition of category %1 on interface %0">,
+ InGroup<DiagGroup<"objc-duplicate-category-definition">>;
// Targets
def err_target_unknown_triple : Error<
- "unknown target triple '%0', please use -triple or -arch">;
+ "unknown target triple '%0'">;
def err_target_unknown_cpu : Error<"unknown target CPU '%0'">;
def note_valid_options : Note<"valid target CPU values are: %0">;
def err_target_unsupported_cpu_for_micromips : Error<
@@ -298,30 +334,54 @@ def err_target_unsupported_unaligned : Error<
"the %0 sub-architecture does not support unaligned accesses">;
def err_target_unsupported_execute_only : Error<
"execute only is not supported for the %0 sub-architecture">;
+def err_target_unsupported_tp_hard : Error<
+ "hardware TLS register is not supported for the %0 sub-architecture">;
def err_target_unsupported_mcmse : Error<
"-mcmse is not supported for %0">;
def err_opt_not_valid_with_opt : Error<
"option '%0' cannot be specified with '%1'">;
+def err_opt_not_valid_with_opt_on_target : Error<
+ "option '%0' cannot be specified with '%1' for the %2 sub-architecture">;
def err_opt_not_valid_without_opt : Error<
"option '%0' cannot be specified without '%1'">;
def err_opt_not_valid_on_target : Error<
"option '%0' cannot be specified on this target">;
+def err_invalid_feature_combination : Error<
+ "invalid feature combination: %0">;
+def warn_invalid_feature_combination : Warning<
+ "invalid feature combination: %0">, InGroup<DiagGroup<"invalid-feature-combination">>;
+def warn_target_unrecognized_env : Warning<
+ "mismatch between architecture and environment in target triple '%0'; did you mean '%1'?">,
+ InGroup<InvalidCommandLineArgument>;
+def warn_knl_knm_isa_support_removed : Warning<
+ "KNL, KNM related Intel Xeon Phi CPU's specific ISA's supports will be removed in LLVM 19.">,
+ InGroup<DiagGroup<"knl-knm-isa-support-removed">>;
// Source manager
def err_cannot_open_file : Error<"cannot open file '%0': %1">, DefaultFatal;
def err_file_modified : Error<
"file '%0' modified since it was first processed">, DefaultFatal;
def err_file_too_large : Error<
- "sorry, unsupported: file '%0' is too large for Clang to process">;
-def err_include_too_large : Error<
- "sorry, this include generates a translation unit too large for"
- " Clang to process.">, DefaultFatal;
+ "file '%0' is too large for Clang to process">;
+def err_sloc_space_too_large : Error<
+ "translation unit is too large for Clang to process: ran out of source locations">, DefaultFatal;
def err_unsupported_bom : Error<"%0 byte order mark detected in '%1', but "
"encoding is not supported">, DefaultFatal;
def err_unable_to_rename_temp : Error<
"unable to rename temporary '%0' to output file '%1': '%2'">;
def err_unable_to_make_temp : Error<
"unable to make temporary file: %0">;
+def remark_sloc_usage : Remark<
+ "source manager location address space usage:">,
+ InGroup<DiagGroup<"sloc-usage">>, DefaultRemark, ShowInSystemHeader;
+def note_total_sloc_usage : Note<
+ "%0B in local locations, %1B in locations loaded from AST files, for a total "
+ "of %2B (%3%% of available space)">;
+def note_file_sloc_usage : Note<
+ "file entered %0 time%s0 using %1B of space"
+ "%plural{0:|: plus %2B for macro expansions}2">;
+def note_file_misc_sloc_usage : Note<
+ "%0 additional files entered using a total of %1B of space">;
// Modules
def err_module_format_unhandled : Error<
@@ -337,6 +397,19 @@ def note_mt_message : Note<"[rewriter] %0">;
def warn_arcmt_nsalloc_realloc : Warning<"[rewriter] call returns pointer to GC managed memory; it will become unmanaged in ARC">;
def err_arcmt_nsinvocation_ownership : Error<"NSInvocation's %0 is not safe to be used with an object with ownership other than __unsafe_unretained">;
+// API notes
+def err_apinotes_message : Error<"%0">;
+def warn_apinotes_message : Warning<"%0">, InGroup<DiagGroup<"apinotes">>;
+def note_apinotes_message : Note<"%0">;
+
+class NonportablePrivateAPINotesPath : Warning<
+ "private API notes file for module '%0' should be named "
+ "'%0_private.apinotes', not '%1'">;
+def warn_apinotes_private_case : NonportablePrivateAPINotesPath,
+ InGroup<DiagGroup<"nonportable-private-apinotes-path">>;
+def warn_apinotes_private_case_system : NonportablePrivateAPINotesPath,
+ DefaultIgnore, InGroup<DiagGroup<"nonportable-private-system-apinotes-path">>;
+
// C++ for OpenCL.
def err_openclcxx_not_supported : Error<
"'%0' is not supported in C++ for OpenCL">;
@@ -371,4 +444,13 @@ def err_opencl_extension_and_feature_differs : Error<
"options %0 and %1 are set to different values">;
def err_opencl_feature_requires : Error<
"feature %0 requires support of %1 feature">;
+
+def warn_throw_not_valid_on_target : Warning<
+ "target '%0' does not support exception handling;"
+ " 'throw' is assumed to be never reached">,
+ InGroup<OpenMPTargetException>;
+def warn_try_not_valid_on_target : Warning<
+ "target '%0' does not support exception handling;"
+ " 'catch' block is ignored">,
+ InGroup<OpenMPTargetException>;
}
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticCrossTU.h b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticCrossTU.h
index ded85ec3f840..4341bf327b69 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticCrossTU.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticCrossTU.h
@@ -15,7 +15,7 @@ namespace clang {
namespace diag {
enum {
#define DIAG(ENUM, FLAGS, DEFAULT_MAPPING, DESC, GROUP, SFINAE, NOWERROR, \
- SHOWINSYSHEADER, DEFERRABLE, CATEGORY) \
+ SHOWINSYSHEADER, SHOWINSYSMACRO, DEFERRABLE, CATEGORY) \
ENUM,
#define CROSSTUSTART
#include "clang/Basic/DiagnosticCrossTUKinds.inc"
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticCrossTUKinds.td b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticCrossTUKinds.td
index 4277a3173203..e6ea1956f98a 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticCrossTUKinds.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticCrossTUKinds.td
@@ -12,8 +12,8 @@ def err_ctu_error_opening : Error<
"error opening '%0': required by the CrossTU functionality">;
def err_extdefmap_parsing : Error<
- "error parsing index file: '%0' line: %1 'UniqueID filename' format "
- "expected">;
+ "error parsing index file: '%0' line: %1 '<USR-Length>:<USR> <File-Path>' "
+ "format expected">;
def err_multiple_def_index : Error<
"multiple definitions are found for the same key in index ">;
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticDocs.td b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticDocs.td
index bf88d5d04567..8c024b5cad74 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticDocs.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticDocs.td
@@ -81,3 +81,18 @@ Diagnostic flags
}];
}
+defvar GCCWriteStringsDocs = [{
+**Note:** enabling this warning in C will change the semantic behavior of the
+program by treating all string literals as having type ``const char *``
+instead of ``char *``. This can cause unexpected behaviors with type-sensitive
+constructs like ``_Generic``.
+}];
+
+defvar NanInfDisabledDocs = [{
+This warning is enabled when source code using the macros ``INFINITY`` or ``NAN``
+is compiled with floating-point options preventing these two values. This can
+lead to undefined behavior. Check the order of command line arguments that modify
+this behavior, such as ``-ffast-math``, ``-fhonor-infinities``, and
+``-fhonor-nans`` (etc), as well as ``#pragma`` directives if this diagnostic is
+generated unexpectedly.
+}];
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticDriver.h b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticDriver.h
index cecd8fd6b4d5..6931bd46542e 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticDriver.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticDriver.h
@@ -15,7 +15,7 @@ namespace clang {
namespace diag {
enum {
#define DIAG(ENUM, FLAGS, DEFAULT_MAPPING, DESC, GROUP, SFINAE, NOWERROR, \
- SHOWINSYSHEADER, DEFERRABLE, CATEGORY) \
+ SHOWINSYSHEADER, SHOWINSYSMACRO, DEFERRABLE, CATEGORY) \
ENUM,
#define DRIVERSTART
#include "clang/Basic/DiagnosticDriverKinds.inc"
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticDriverKinds.td b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticDriverKinds.td
index fc3704303a95..094fe1950941 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticDriverKinds.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticDriverKinds.td
@@ -16,8 +16,12 @@ def err_drv_unsupported_opt_with_suggestion : Error<
"unsupported option '%0'; did you mean '%1'?">;
def err_drv_unsupported_opt_for_target : Error<
"unsupported option '%0' for target '%1'">;
+def err_drv_unsupported_opt_for_language_mode : Error<
+ "unsupported option '%0' for language mode '%1'">;
def err_drv_unsupported_option_argument : Error<
"unsupported argument '%1' to option '%0'">;
+def err_drv_unsupported_option_argument_for_target : Error<
+ "unsupported argument '%1' to option '%0' for target '%2'">;
def err_drv_unknown_stdin_type : Error<
"-E or -x required when input is from standard input">;
def err_drv_unknown_stdin_type_clang_cl : Error<
@@ -27,16 +31,15 @@ def err_drv_invalid_arch_name : Error<
"invalid arch name '%0'">;
def err_drv_invalid_riscv_arch_name : Error<
"invalid arch name '%0', %1">;
-def err_drv_invalid_riscv_ext_arch_name : Error<
- "invalid arch name '%0', %1 '%2'">;
+def err_drv_invalid_riscv_cpu_name_for_target : Error<
+ "cpu '%0' does not support rv%select{32|64}1">;
+def warn_drv_invalid_arch_name_with_suggestion : Warning<
+ "ignoring invalid /arch: argument '%0'; for %select{64|32}1-bit expected one of %2">,
+ InGroup<UnusedCommandLineArgument>;
def warn_drv_avr_mcu_not_specified : Warning<
"no target microcontroller specified on command line, cannot "
"link standard libraries, please pass -mmcu=<mcu name>">,
InGroup<AVRRtlibLinkingQuirks>;
-def warn_drv_avr_gcc_not_found: Warning<
- "no avr-gcc installation can be found on the system, "
- "cannot link standard libraries">,
- InGroup<AVRRtlibLinkingQuirks>;
def warn_drv_avr_libc_not_found: Warning<
"no avr-libc installation can be found on the system, "
"cannot link standard libraries">,
@@ -52,38 +55,88 @@ def warn_drv_avr_stdlib_not_linked: Warning<
"standard library not linked and so no interrupt vector table or "
"compiler runtime routines will be linked">,
InGroup<AVRRtlibLinkingQuirks>;
-def err_drv_cuda_bad_gpu_arch : Error<"Unsupported CUDA gpu architecture: %0">;
+def err_drv_cuda_bad_gpu_arch : Error<"unsupported CUDA gpu architecture: %0">;
+def err_drv_offload_bad_gpu_arch : Error<"unsupported %0 gpu architecture: %1">;
def err_drv_no_cuda_installation : Error<
- "cannot find CUDA installation. Provide its path via --cuda-path, or pass "
- "-nocudainc to build without CUDA includes.">;
+ "cannot find CUDA installation; provide its path via '--cuda-path', or pass "
+ "'-nocudainc' to build without CUDA includes">;
def err_drv_no_cuda_libdevice : Error<
- "cannot find libdevice for %0. Provide path to different CUDA installation "
- "via --cuda-path, or pass -nocudalib to build without linking with libdevice.">;
+ "cannot find libdevice for %0; provide path to different CUDA installation "
+ "via '--cuda-path', or pass '-nocudalib' to build without linking with "
+ "libdevice">;
def err_drv_no_rocm_device_lib : Error<
- "cannot find ROCm device library%select{| for %1}0. Provide its path via --rocm-path or "
- "--rocm-device-lib-path, or pass -nogpulib to build without ROCm device library.">;
+ "cannot find ROCm device library%select{| for %1|for ABI version %1}0; provide its path via "
+ "'--rocm-path' or '--rocm-device-lib-path', or pass '-nogpulib' to build "
+ "without ROCm device library">;
def err_drv_no_hip_runtime : Error<
- "cannot find HIP runtime. Provide its path via --rocm-path, or pass "
- "-nogpuinc to build without HIP runtime.">;
-
-def err_drv_undetermined_amdgpu_arch : Error<
- "Cannot determine AMDGPU architecture: %0. Consider passing it via --march.">;
+ "cannot find HIP runtime; provide its path via '--rocm-path', or pass "
+ "'-nogpuinc' to build without HIP runtime">;
+def err_drv_no_hipstdpar_lib : Error<
+ "cannot find HIP Standard Parallelism Acceleration library; provide it via "
+ "'--hipstdpar-path'">;
+def err_drv_no_hipstdpar_thrust_lib : Error<
+ "cannot find rocThrust, which is required by the HIP Standard Parallelism "
+ "Acceleration library; provide it via "
+ "'--hipstdpar-thrust-path'">;
+def err_drv_no_hipstdpar_prim_lib : Error<
+ "cannot find rocPrim, which is required by the HIP Standard Parallelism "
+ "Acceleration library; provide it via '--hipstdpar-prim-path'">;
+
+def err_drv_no_hipspv_device_lib : Error<
+ "cannot find HIP device library%select{| for %1}0; provide its path via "
+ "'--hip-path' or '--hip-device-lib-path', or pass '-nogpulib' to build "
+ "without HIP device library">;
+def err_drv_hipspv_no_hip_path : Error<
+ "'--hip-path' must be specified when offloading to "
+ "SPIR-V%select{| unless %1 is given}0.">;
+
+def err_drv_undetermined_gpu_arch : Error<
+ "cannot determine %0 architecture: %1; consider passing it via "
+ "'%2'">;
+def warn_drv_multi_gpu_arch : Warning<
+ "multiple %0 architectures are detected: %1; only the first one is used for "
+ "'%2'">, InGroup<MultiGPU>;
def err_drv_cuda_version_unsupported : Error<
"GPU arch %0 is supported by CUDA versions between %1 and %2 (inclusive), "
- "but installation at %3 is %4. Use --cuda-path to specify a different CUDA "
- "install, pass a different GPU arch with --cuda-gpu-arch, or pass "
- "--no-cuda-version-check.">;
-def warn_drv_unknown_cuda_version: Warning<
- "Unknown CUDA version. %0 Assuming the latest supported version %1">,
+ "but installation at %3 is %4; use '--cuda-path' to specify a different CUDA "
+ "install, pass a different GPU arch with '--cuda-gpu-arch', or pass "
+ "'--no-cuda-version-check'">;
+def warn_drv_new_cuda_version: Warning<
+ "CUDA version%0 is newer than the latest%select{| partially}1 supported version %2">,
+ InGroup<CudaUnknownVersion>;
+def warn_drv_partially_supported_cuda_version: Warning<
+ "CUDA version %0 is only partially supported">,
InGroup<CudaUnknownVersion>;
-def err_drv_cuda_host_arch : Error<"unsupported architecture '%0' for host compilation.">;
-def err_drv_mix_cuda_hip : Error<"Mixed Cuda and HIP compilation is not supported.">;
-def err_drv_bad_target_id : Error<"Invalid target ID: %0 (A target ID is a processor name "
- "followed by an optional list of predefined features post-fixed by a plus or minus sign deliminated "
- "by colon, e.g. 'gfx908:sramecc+:xnack-')">;
-def err_drv_bad_offload_arch_combo : Error<"Invalid offload arch combinations: %0 and %1 (For a specific "
- "processor, a feature should either exist in all offload archs, or not exist in any offload archs)">;
+def err_drv_cuda_host_arch : Error<
+ "unsupported architecture '%0' for host compilation">;
+def err_drv_mix_cuda_hip : Error<
+ "mixed CUDA and HIP compilation is not supported">;
+def err_drv_bad_target_id : Error<
+ "invalid target ID '%0'; format is a processor name followed by an optional "
+ "colon-delimited list of features followed by an enable/disable sign (e.g., "
+ "'gfx908:sramecc+:xnack-')">;
+def err_drv_bad_offload_arch_combo : Error<
+ "invalid offload arch combinations: '%0' and '%1' (for a specific processor, "
+ "a feature should either exist in all offload archs, or not exist in any "
+ "offload archs)">;
+def warn_drv_unsupported_option_for_offload_arch_req_feature : Warning<
+ "ignoring '%0' option for offload arch '%1' as it is not currently supported "
+ "there. Use it with an offload arch containing '%2' instead">,
+ InGroup<OptionIgnored>;
+def warn_drv_unsupported_option_for_target : Warning<
+ "ignoring '%0' option as it is not currently supported for target '%1'">,
+ InGroup<OptionIgnored>;
+def warn_drv_unsupported_option_for_flang : Warning<
+ "the argument '%0' is not supported for option '%1'. Mapping to '%1%2'">,
+ InGroup<OptionIgnored>;
+def warn_drv_unsupported_diag_option_for_flang : Warning<
+ "The warning option '-%0' is not supported">,
+ InGroup<OptionIgnored>;
+def warn_drv_unsupported_option_for_processor : Warning<
+ "ignoring '%0' option as it is not currently supported for processor '%1'">,
+ InGroup<OptionIgnored>;
+
def err_drv_invalid_thread_model_for_target : Error<
"invalid thread model '%0' in '%1' for this target">;
def err_drv_invalid_linker_name : Error<
@@ -96,13 +149,13 @@ def err_drv_invalid_unwindlib_name : Error<
"invalid unwind library name in argument '%0'">;
def err_drv_incompatible_unwindlib : Error<
"--rtlib=libgcc requires --unwindlib=libgcc">;
+def err_drv_incompatible_options : Error<
+ "the combination of '%0' and '%1' is incompatible">;
def err_drv_invalid_stdlib_name : Error<
"invalid library name in argument '%0'">;
def err_drv_invalid_output_with_multiple_archs : Error<
"cannot use '%0' output with multiple -arch options">;
def err_drv_no_input_files : Error<"no input files">;
-def err_drv_use_of_Z_option : Error<
- "unsupported use of internal gcc -Z option '%0'">;
def err_drv_output_argument_with_multiple_files : Error<
"cannot specify -o when generating multiple output files">;
def err_drv_out_file_argument_with_multiple_sources : Error<
@@ -119,6 +172,8 @@ def err_drv_invalid_darwin_version : Error<
"invalid Darwin version number: %0">;
def err_drv_invalid_diagnotics_hotness_threshold : Error<
"invalid argument in '%0', only integer or 'auto' is supported">;
+def err_drv_invalid_diagnotics_misexpect_tolerance : Error<
+ "invalid argument in '%0', only integers are supported">;
def err_drv_missing_argument : Error<
"argument to '%0' is missing (expected %1 value%s1)">;
def err_drv_invalid_Xarch_argument_with_args : Error<
@@ -129,10 +184,14 @@ def err_drv_invalid_Xopenmp_target_with_args : Error<
"invalid -Xopenmp-target argument: '%0', options requiring arguments are unsupported">;
def err_drv_argument_only_allowed_with : Error<
"invalid argument '%0' only allowed with '%1'">;
+def err_drv_opt_unsupported_input_type : Error<
+ "'%0' invalid for input of type %1">;
def err_drv_amdgpu_ieee_without_no_honor_nans : Error<
"invalid argument '-mno-amdgpu-ieee' only allowed with relaxed NaN handling">;
def err_drv_argument_not_allowed_with : Error<
"invalid argument '%0' not allowed with '%1'">;
+def err_drv_cannot_open_randomize_layout_seed_file : Error<
+ "cannot read randomize layout seed file '%0'">;
def err_drv_invalid_version_number : Error<
"invalid version number in '%0'">;
def err_drv_no_linker_llvm_support : Error<
@@ -163,28 +222,36 @@ def err_drv_invalid_mtp : Error<
"invalid thread pointer reading mode '%0'">;
def err_drv_missing_arg_mtp : Error<
"missing argument to '%0'">;
-def err_drv_invalid_libcxx_deployment : Error<
- "invalid deployment target for -stdlib=libc++ (requires %0 or later)">;
+def warn_drv_missing_plugin_name : Warning<
+ "missing plugin name in %0">,
+ InGroup<InvalidCommandLineArgument>;
+def warn_drv_missing_plugin_arg : Warning<
+ "missing plugin argument for plugin %0 in %1">,
+ InGroup<InvalidCommandLineArgument>;
def err_drv_invalid_argument_to_option : Error<
"invalid argument '%0' to -%1">;
+def err_drv_missing_sanitizer_ignorelist : Error<
+ "missing sanitizer ignorelist: '%0'">;
def err_drv_malformed_sanitizer_ignorelist : Error<
"malformed sanitizer ignorelist: '%0'">;
-def err_drv_malformed_sanitizer_coverage_whitelist : Error<
- "malformed sanitizer coverage whitelist: '%0'">;
+def err_drv_malformed_sanitizer_coverage_allowlist : Error<
+ "malformed sanitizer coverage allowlist: '%0'">;
def err_drv_malformed_sanitizer_coverage_ignorelist : Error<
"malformed sanitizer coverage ignorelist: '%0'">;
+def err_drv_malformed_sanitizer_metadata_ignorelist : Error<
+ "malformed sanitizer metadata ignorelist: '%0'">;
+def err_drv_unsupported_static_sanitizer_darwin : Error<
+ "static %0 runtime is not supported on darwin">;
def err_drv_duplicate_config : Error<
"no more than one option '--config' is allowed">;
-def err_drv_config_file_not_exist : Error<
- "configuration file '%0' does not exist">;
+def err_drv_cannot_open_config_file : Error<
+ "configuration file '%0' cannot be opened: %1">;
def err_drv_config_file_not_found : Error<
"configuration file '%0' cannot be found">;
def note_drv_config_file_searched_in : Note<
"was searched for in the directory: %0">;
def err_drv_cannot_read_config_file : Error<
- "cannot read configuration file '%0'">;
-def err_drv_nested_config_file: Error<
- "option '--config' is not allowed inside configuration file">;
+ "cannot read configuration file '%0': %1">;
def err_drv_arg_requires_bitcode_input: Error<
"option '%0' requires input to be LLVM bitcode">;
@@ -193,7 +260,7 @@ def err_target_unsupported_arch
def err_cpu_unsupported_isa
: Error<"CPU '%0' does not support '%1' execution mode">;
def err_arch_unsupported_isa
- : Error<"Architecture '%0' does not support '%1' execution mode">;
+ : Error<"architecture '%0' does not support '%1' execution mode">;
def err_drv_I_dash_not_supported : Error<
"'%0' not supported, please use -iquote instead">;
@@ -206,6 +273,7 @@ def warn_drv_unknown_argument_clang_cl : Warning<
def warn_drv_unknown_argument_clang_cl_with_suggestion : Warning<
"unknown argument ignored in clang-cl '%0'; did you mean '%1'?">,
InGroup<UnknownArgument>;
+def err_drv_unknown_target_triple : Error<"unknown target triple '%0'">;
def warn_drv_ycyu_different_arg_clang_cl : Warning<
"support for '/Yc' and '/Yu' with different filenames not implemented yet; flags ignored">,
@@ -214,12 +282,17 @@ def warn_drv_yc_multiple_inputs_clang_cl : Warning<
"support for '/Yc' with more than one source file not implemented yet; flag ignored">,
InGroup<ClangClPch>;
+def warn_drv_potentially_misspelled_joined_argument : Warning<
+ "joined argument treated as '%0'; did you mean '%1'?">, InGroup<UnknownArgument>;
+
def err_drv_invalid_value : Error<"invalid value '%1' in '%0'">;
def err_drv_invalid_int_value : Error<"invalid integral value '%1' in '%0'">;
def err_drv_invalid_value_with_suggestion : Error<
"invalid value '%1' in '%0', expected one of: %2">;
+def err_drv_alignment_not_power_of_two : Error<"alignment is not a power of 2 in '%0'">;
def err_drv_invalid_remap_file : Error<
"invalid option '%0' not of the form <from-file>;<to-file>">;
+def err_drv_invalid_gcc_install_dir : Error<"'%0' does not contain a GCC installation">;
def err_drv_invalid_gcc_output_type : Error<
"invalid output type '%0' for use with gcc tool">;
def err_drv_cc_print_options_failure : Error<
@@ -227,12 +300,15 @@ def err_drv_cc_print_options_failure : Error<
def err_drv_lto_without_lld : Error<"LTO requires -fuse-ld=lld">;
def err_drv_preamble_format : Error<
"incorrect format for -preamble-bytes=N,END">;
+def err_drv_header_unit_extra_inputs : Error<
+ "multiple inputs are not valid for header units (first extra '%0')">;
def warn_invalid_ios_deployment_target : Warning<
"invalid iOS deployment version '%0', iOS 10 is the maximum deployment "
"target for 32-bit targets">, InGroup<InvalidIOSDeploymentTarget>,
DefaultError;
def err_invalid_macos_32bit_deployment_target : Error<
"32-bit targets are not supported when building for Mac Catalyst">;
+def err_drv_invalid_os_in_arg : Error<"invalid OS value '%0' in '%1'">;
def err_drv_conflicting_deployment_targets : Error<
"conflicting deployment targets, both '%0' and '%1' are present in environment">;
def err_arc_unsupported_on_runtime : Error<
@@ -262,30 +338,45 @@ def err_drv_no_neon_modifier : Error<"[no]neon is not accepted as modifier, plea
def err_drv_invalid_omp_target : Error<"OpenMP target is invalid: '%0'">;
def err_drv_incompatible_omp_arch : Error<"OpenMP target architecture '%0' pointer size is incompatible with host '%1'">;
def err_drv_omp_host_ir_file_not_found : Error<
- "The provided host compiler IR file '%0' is required to generate code for OpenMP target regions but cannot be found.">;
+ "provided host compiler IR file '%0' is required to generate code for OpenMP "
+ "target regions but cannot be found">;
def err_drv_omp_host_target_not_supported : Error<
- "The target '%0' is not a supported OpenMP host target.">;
+ "target '%0' is not a supported OpenMP host target">;
def err_drv_expecting_fopenmp_with_fopenmp_targets : Error<
- "The option -fopenmp-targets must be used in conjunction with a -fopenmp option compatible with offloading, please use -fopenmp=libomp or -fopenmp=libiomp5.">;
+ "'-fopenmp-targets' must be used in conjunction with a '-fopenmp' option "
+ "compatible with offloading; e.g., '-fopenmp=libomp' or '-fopenmp=libiomp5'">;
+def err_drv_failed_to_deduce_target_from_arch : Error<
+ "failed to deduce triple for target architecture '%0'; specify the triple "
+ "using '-fopenmp-targets' and '-Xopenmp-target' instead.">;
def err_drv_omp_offload_target_missingbcruntime : Error<
- "No library '%0' found in the default clang lib directory or in LIBRARY_PATH. Please use --libomptarget-%1-bc-path to specify %1 bitcode library.">;
-def err_drv_omp_offload_target_bcruntime_not_found : Error<"Bitcode library '%0' does not exist.">;
-def err_drv_omp_offload_target_cuda_version_not_support : Error<"NVPTX target requires CUDA 9.2 or above. CUDA %0 is detected.">;
+ "no library '%0' found in the default clang lib directory or in LIBRARY_PATH"
+ "; use '--libomptarget-%1-bc-path' to specify %1 bitcode library">;
+def err_drv_omp_offload_target_bcruntime_not_found : Error<
+ "bitcode library '%0' does not exist">;
+def err_drv_omp_offload_target_cuda_version_not_support : Error<
+ "NVPTX target requires CUDA 9.2 or above; CUDA %0 detected">;
def warn_drv_omp_offload_target_duplicate : Warning<
- "The OpenMP offloading target '%0' is similar to target '%1' already specified - will be ignored.">,
- InGroup<OpenMPTarget>;
+ "OpenMP offloading target '%0' is similar to target '%1' already specified; "
+ "will be ignored">, InGroup<OpenMPTarget>;
def err_drv_unsupported_embed_bitcode
: Error<"%0 is not supported with -fembed-bitcode">;
def err_drv_bitcode_unsupported_on_toolchain : Error<
"-fembed-bitcode is not supported on versions of iOS prior to 6.0">;
def err_drv_negative_columns : Error<
- "invalid value '%1' in '%0', value must be 'none' or a positive integer">;
+ "invalid value '%1' in '%0', value must be 'none' or a positive integer">;
def err_drv_small_columns : Error<
- "invalid value '%1' in '%0', value must be '%2' or greater">;
+ "invalid value '%1' in '%0', value must be '%2' or greater">;
def err_drv_invalid_malign_branch_EQ : Error<
"invalid argument '%0' to -malign-branch=; each element must be one of: %1">;
+def err_drv_print_header_env_var : Error<
+ "environment variable CC_PRINT_HEADERS_%select{FORMAT|FILTERING}0 has invalid value %1">;
+def err_drv_print_header_env_var_combination : Error<
+ "unsupported combination: CC_PRINT_HEADERS_FORMAT=%0 and CC_PRINT_HEADERS_FILTERING=%1">;
+def err_drv_print_header_env_var_combination_cc1 : Error<
+ "unsupported combination: -header-include-format=%0 and -header-include-filtering=%1">;
+
def warn_O4_is_O3 : Warning<"-O4 is equivalent to -O3">, InGroup<Deprecated>;
def warn_drv_optimization_value : Warning<"optimization level '%0' is not supported; using '%1%2' instead">,
InGroup<InvalidCommandLineArgument>;
@@ -300,7 +391,8 @@ def warn_drv_unsupported_debug_info_opt_for_target : Warning<
"debug information option '%0' is not supported for target '%1'">,
InGroup<UnsupportedTargetOpt>;
def warn_drv_dwarf_version_limited_by_target : Warning<
- "debug information option '%0' is not supported. It needs DWARF-%2 but target '%1' only provides DWARF-%3.">,
+ "debug information option '%0' is not supported; requires DWARF-%2 but "
+ "target '%1' only provides DWARF-%3">,
InGroup<UnsupportedTargetOpt>;
def warn_c_kext : Warning<
"ignoring -fapple-kext which is valid for C++ and Objective-C++ only">;
@@ -319,33 +411,53 @@ def warn_drv_preprocessed_input_file_unused : Warning<
def warn_drv_unused_argument : Warning<
"argument unused during compilation: '%0'">,
InGroup<UnusedCommandLineArgument>;
+def warn_drv_unused_x : Warning<
+ "'-x %0' after last input file has no effect">,
+ InGroup<UnusedCommandLineArgument>;
def warn_drv_empty_joined_argument : Warning<
"joined argument expects additional value: '%0'">,
InGroup<UnusedCommandLineArgument>;
def warn_drv_diagnostics_hotness_requires_pgo : Warning<
"argument '%0' requires profile-guided optimization information">,
InGroup<UnusedCommandLineArgument>;
+def warn_drv_diagnostics_misexpect_requires_pgo : Warning<
+ "argument '%0' requires profile-guided optimization information">,
+ InGroup<UnusedCommandLineArgument>;
def warn_drv_clang_unsupported : Warning<
"the clang compiler does not support '%0'">;
def warn_drv_deprecated_arg : Warning<
"argument '%0' is deprecated, use '%1' instead">, InGroup<Deprecated>;
+def warn_drv_deprecated_custom : Warning<
+ "argument '%0' is deprecated, %1">, InGroup<Deprecated>;
def warn_drv_assuming_mfloat_abi_is : Warning<
"unknown platform, assuming -mfloat-abi=%0">;
+def warn_drv_unsupported_float_abi_by_lib : Warning<
+ "float ABI '%0' is not supported by current library">,
+ InGroup<UnsupportedABI>;
+def warn_drv_no_floating_point_registers: Warning<
+ "'%0': selected processor lacks floating point registers">,
+ InGroup<UnsupportedABI>;
def warn_ignoring_ftabstop_value : Warning<
"ignoring invalid -ftabstop value '%0', using default value %1">;
-def warn_drv_overriding_flag_option : Warning<
+def warn_drv_overriding_option : Warning<
"overriding '%0' option with '%1'">,
- InGroup<DiagGroup<"overriding-t-option">>;
+ InGroup<DiagGroup<"overriding-option">>;
def warn_drv_treating_input_as_cxx : Warning<
"treating '%0' input as '%1' when in C++ mode, this behavior is deprecated">,
InGroup<Deprecated>;
def warn_drv_pch_not_first_include : Warning<
"precompiled header '%0' was ignored because '%1' is not first '-include'">;
+def warn_drv_pch_ignoring_gch_file : Warning<
+ "precompiled header '%0' was ignored because it is not a clang PCH file">,
+ InGroup<IgnoredGCH>;
+def warn_drv_pch_ignoring_gch_dir : Warning<
+ "precompiled header directory '%0' was ignored because it contains no clang PCH files">,
+ InGroup<IgnoredGCH>;
def warn_missing_sysroot : Warning<"no such sysroot directory: '%0'">,
InGroup<DiagGroup<"missing-sysroot">>;
def warn_incompatible_sysroot : Warning<"using sysroot for '%0' but targeting '%1'">,
InGroup<DiagGroup<"incompatible-sysroot">>;
-def warn_debug_compression_unavailable : Warning<"cannot compress debug sections (zlib not installed)">,
+def warn_debug_compression_unavailable : Warning<"cannot compress debug sections (%0 not enabled)">,
InGroup<DiagGroup<"debug-compression-unavailable">>;
def warn_drv_disabling_vptr_no_rtti_default : Warning<
"implicitly disabling vptr sanitizer because rtti wasn't enabled">,
@@ -357,17 +469,18 @@ def warn_ignoring_verify_debuginfo_preserve_export : Warning<
"ignoring -fverify-debuginfo-preserve-export=%0 because "
"-fverify-debuginfo-preserve wasn't enabled">,
InGroup<UnusedCommandLineArgument>;
-def err_invalid_branch_protection: Error <
- "invalid branch protection option '%0' in '%1'">;
-def err_invalid_sls_hardening : Error<
- "invalid sls hardening option '%0' in '%1'">;
+def warn_unsupported_branch_protection: Warning <
+ "invalid branch protection option '%0' in '%1'">, InGroup<BranchProtection>;
def err_sls_hardening_arm_not_supported : Error<
"-mharden-sls is only supported on armv7-a or later">;
+def warn_drv_large_data_threshold_invalid_code_model: Warning<
+ "'%0' only applies to medium and large code models">,
+ InGroup<UnusedCommandLineArgument>;
def note_drv_command_failed_diag_msg : Note<
"diagnostic msg: %0">;
def note_drv_t_option_is_global : Note<
- "The last /TC or /TP option takes precedence over earlier instances">;
+ "the last '/TC' or '/TP' option takes precedence over earlier instances">;
def note_drv_address_sanitizer_debug_runtime : Note<
"AddressSanitizer doesn't support linking with debug runtime libraries yet">;
def note_drv_use_standard : Note<"use '%0'"
@@ -387,12 +500,25 @@ def err_analyzer_checker_option_invalid_input : Error<
"invalid input for checker option '%0', that expects %1">;
def err_analyzer_checker_incompatible_analyzer_option : Error<
"checker cannot be enabled with analyzer option '%0' == %1">;
-
-def err_drv_invalid_hvx_length : Error<
- "-mhvx-length is not supported without a -mhvx/-mhvx= flag">;
-def warn_drv_vectorize_needs_hvx : Warning<
- "auto-vectorization requires HVX, use -mhvx to enable it">,
+def err_analyzer_not_built_with_z3 : Error<
+ "analyzer constraint manager 'z3' is only available if LLVM was built with "
+ "-DLLVM_ENABLE_Z3_SOLVER=ON">;
+def warn_analyzer_deprecated_option : Warning<
+ "analyzer option '%0' is deprecated. This flag will be removed in %1, and "
+ "passing this option will be an error.">,
+ InGroup<DeprecatedStaticAnalyzerFlag>;
+def warn_analyzer_deprecated_option_with_alternative : Warning<
+ "analyzer option '%0' is deprecated. This flag will be removed in %1, and "
+ "passing this option will be an error. Use '%2' instead.">,
+ InGroup<DeprecatedStaticAnalyzerFlag>;
+
+def warn_drv_needs_hvx : Warning<
+ "%0 requires HVX, use -mhvx/-mhvx= to enable it">,
InGroup<OptionIgnored>;
+def err_drv_needs_hvx : Error<
+ "%0 requires HVX, use -mhvx/-mhvx= to enable it">;
+def err_drv_needs_hvx_version : Error<
+ "%0 is not supported on HVX %1">;
def err_drv_module_header_wrong_kind : Error<
"header file '%0' input type '%1' does not match type of prior input "
@@ -405,9 +531,21 @@ def err_test_module_file_extension_format : Error<
"-ftest-module-file-extension argument '%0' is not of the required form "
"'blockname:major:minor:hashed:user info'">;
+def err_drv_module_output_with_multiple_arch : Error<
+ "option '-fmodule-output' can't be used with multiple arch options">;
+
+def warn_drv_delayed_template_parsing_after_cxx20 : Warning<
+ "-fdelayed-template-parsing is deprecated after C++20">,
+ InGroup<DiagGroup<"delayed-template-parsing-in-cxx20">>;
+
+def err_drv_extract_api_wrong_kind : Error<
+ "header file '%0' input '%1' does not match the type of prior input "
+ "in api extraction; use '-x %2' to override">;
+
def warn_slash_u_filename : Warning<"'/U%0' treated as the '/U' option">,
InGroup<DiagGroup<"slash-u-filename">>;
-def note_use_dashdash : Note<"Use '--' to treat subsequent arguments as filenames">;
+def note_use_dashdash : Note<
+ "use '--' to treat subsequent arguments as filenames">;
def err_drv_ropi_rwpi_incompatible_with_pic : Error<
"embedded and GOT-based position independence are incompatible">;
@@ -415,7 +553,8 @@ def err_drv_ropi_incompatible_with_cxx : Error<
"ROPI is not compatible with c++">;
def err_stack_tagging_requires_hardware_feature : Error<
- "'-fsanitize=memtag' requires hardware support (+memtag)">;
+ "'-fsanitize=memtag-stack' requires hardware support (+memtag). For Armv8 or "
+ "Armv9, try compiling with -march=armv8a+memtag or -march=armv9a+memtag">;
def err_cmse_pi_are_incompatible : Error<
"cmse is not compatible with %select{RWPI|ROPI}0">;
@@ -463,27 +602,22 @@ def err_drv_unsupported_fpatchable_function_entry_argument : Error<
"the second argument of '-fpatchable-function-entry' must be smaller than the first argument">;
def warn_drv_unable_to_find_directory_expected : Warning<
- "unable to find %0 directory, expected to be in '%1'">,
+ "unable to find %0 directory, expected to be in '%1' found via %2">,
InGroup<InvalidOrNonExistentDirectory>, DefaultIgnore;
-def warn_drv_ps4_force_pic : Warning<
- "option '%0' was ignored by the PS4 toolchain, using '-fPIC'">,
+def warn_drv_ps_force_pic : Warning<
+ "option '%0' was ignored by the %1 toolchain, using '-fPIC'">,
InGroup<OptionIgnored>;
-def warn_drv_ps4_sdk_dir : Warning<
- "environment variable SCE_ORBIS_SDK_DIR is set, but points to invalid or nonexistent directory '%0'">,
- InGroup<InvalidOrNonExistentDirectory>;
-
-def err_drv_unsupported_linker : Error<"unsupported value '%0' for -linker option">;
def err_drv_defsym_invalid_format : Error<"defsym must be of the form: sym=value: %0">;
-def err_drv_defsym_invalid_symval : Error<"Value is not an integer: %0">;
+def err_drv_defsym_invalid_symval : Error<"value is not an integer: %0">;
def warn_drv_msvc_not_found : Warning<
"unable to find a Visual Studio installation; "
"try running Clang from a developer command prompt">,
InGroup<DiagGroup<"msvc-not-found">>;
def warn_drv_fuse_ld_path : Warning<
- "'-fuse-ld=' taking a path is deprecated. Use '--ld-path=' instead">,
+ "'-fuse-ld=' taking a path is deprecated; use '--ld-path=' instead">,
InGroup<FUseLdPath>, DefaultIgnore;
def warn_drv_fine_grained_bitfield_accesses_ignored : Warning<
@@ -503,37 +637,45 @@ def warn_drv_global_isel_incomplete_opt : Warning<
InGroup<GlobalISel>;
def warn_drv_moutline_unsupported_opt : Warning<
- "The '%0' architecture does not support -moutline; flag ignored">,
+ "'%0' does not support '-moutline'; flag ignored">,
InGroup<OptionIgnored>;
def warn_drv_moutline_atomics_unsupported_opt : Warning<
- "The '%0' architecture does not support -moutline-atomics; flag ignored">,
+ "'%0' does not support '-%1'; flag ignored">,
InGroup<OptionIgnored>;
def warn_drv_darwin_sdk_invalid_settings : Warning<
"SDK settings were ignored as 'SDKSettings.json' could not be parsed">,
InGroup<DiagGroup<"darwin-sdk-settings">>;
-def err_drv_trivial_auto_var_init_zero_disabled : Error<
- "-ftrivial-auto-var-init=zero hasn't been enabled. Enable it at your own peril for benchmarking purpose only with "
- "-enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang">;
+def err_drv_darwin_sdk_missing_arclite : Error<
+ "SDK does not contain 'libarclite' at the path '%0'; try increasing the minimum deployment target">;
def err_drv_trivial_auto_var_init_stop_after_missing_dependency : Error<
- "-ftrivial-auto-var-init-stop-after=* is used without -ftrivial-auto-var-init=zero or -ftrivial-auto-var-init=pattern.">;
+ "'-ftrivial-auto-var-init-stop-after=*' is used without "
+ "'-ftrivial-auto-var-init=zero' or '-ftrivial-auto-var-init=pattern'">;
def err_drv_trivial_auto_var_init_stop_after_invalid_value : Error<
- "-ftrivial-auto-var-init-stop-after=* only accepts positive integers.">;
+ "'-ftrivial-auto-var-init-stop-after=*' only accepts positive integers">;
-def warn_drv_msp430_hwmult_unsupported : Warning<"the given MCU does not "
- "support hardware multiply, but -mhwmult is set to %0.">,
- InGroup<InvalidCommandLineArgument>;
-def warn_drv_msp430_hwmult_mismatch : Warning<"the given MCU supports %0 "
- "hardware multiply, but -mhwmult is set to %1.">,
+def err_drv_trivial_auto_var_init_max_size_missing_dependency : Error<
+ "'-ftrivial-auto-var-init-max-size=*' is used without "
+ "'-ftrivial-auto-var-init=zero' or '-ftrivial-auto-var-init=pattern'">;
+
+def err_drv_trivial_auto_var_init_max_size_invalid_value : Error<
+ "'-ftrivial-auto-var-init-max-size=*' only accepts positive integers (in bytes)">;
+
+def warn_drv_msp430_hwmult_unsupported : Warning<
+ "the given MCU does not support hardware multiply, but '-mhwmult' is set to "
+ "%0">, InGroup<InvalidCommandLineArgument>;
+def warn_drv_msp430_hwmult_mismatch : Warning<
+ "the given MCU supports %0 hardware multiply, but '-mhwmult' is set to %1">,
InGroup<InvalidCommandLineArgument>;
-def warn_drv_msp430_hwmult_no_device : Warning<"no MCU device specified, but "
- "'-mhwmult' is set to 'auto', assuming no hardware multiply. Use -mmcu to "
- "specify a MSP430 device, or -mhwmult to set hardware multiply type "
- "explicitly.">, InGroup<InvalidCommandLineArgument>;
+def warn_drv_msp430_hwmult_no_device : Warning<
+ "no MCU device specified, but '-mhwmult' is set to 'auto', assuming no "
+ "hardware multiply; use '-mmcu' to specify an MSP430 device, or '-mhwmult' "
+ "to set the hardware multiply type explicitly">,
+ InGroup<InvalidCommandLineArgument>;
def warn_drv_libstdcxx_not_found : Warning<
"include path for libstdc++ headers not found; pass '-stdlib=libc++' on the "
@@ -542,17 +684,118 @@ def warn_drv_libstdcxx_not_found : Warning<
def err_drv_cannot_mix_options : Error<"cannot specify '%1' along with '%0'">;
-def err_drv_invalid_object_mode : Error<"OBJECT_MODE setting %0 is not recognized and is not a valid setting.">;
+def err_drv_invalid_object_mode : Error<
+ "OBJECT_MODE setting %0 is not recognized and is not a valid setting">;
def err_aix_unsupported_tls_model : Error<"TLS model '%0' is not yet supported on AIX">;
+def err_roptr_requires_data_sections: Error<"-mxcoff-roptr is supported only with -fdata-sections">;
+def err_roptr_cannot_build_shared: Error<"-mxcoff-roptr is not supported with -shared">;
-def err_invalid_cxx_abi : Error<"Invalid C++ ABI name '%0'">;
+def err_invalid_cxx_abi : Error<"invalid C++ ABI name '%0'">;
def err_unsupported_cxx_abi : Error<"C++ ABI '%0' is not supported on target triple '%1'">;
-def note_cc1_round_trip_original : Note<"Original arguments in round-trip: %0">;
-def note_cc1_round_trip_generated : Note<"Generated arguments #%0 in round-trip: %1">;
-def remark_cc1_round_trip_generated : Remark<"Generated arguments #%0 in round-trip: %1">, InGroup<RoundTripCC1Args>;
-def err_cc1_round_trip_fail_then_ok : Error<"Original arguments parse failed, then succeeded in round-trip">;
-def err_cc1_round_trip_ok_then_fail : Error<"Generated arguments parse failed in round-trip">;
-def err_cc1_round_trip_mismatch : Error<"Generated arguments do not match in round-trip">;
+def note_cc1_round_trip_original : Note<"original arguments in round-trip: %0">;
+def note_cc1_round_trip_generated : Note<
+ "generated arguments #%0 in round-trip: %1">;
+def remark_cc1_round_trip_generated : Remark<
+ "generated arguments #%0 in round-trip: %1">, InGroup<RoundTripCC1Args>;
+def err_cc1_round_trip_fail_then_ok : Error<
+ "original arguments parse failed, then succeeded in round-trip">;
+def err_cc1_round_trip_ok_then_fail : Error<
+ "generated arguments parse failed in round-trip">;
+def err_cc1_round_trip_mismatch : Error<
+ "generated arguments do not match in round-trip">;
+def err_cc1_unbounded_vscale_min : Error<
+ "minimum vscale must be an unsigned integer greater than 0">;
+
+def err_drv_using_omit_rtti_component_without_no_rtti : Error<
+ "-fexperimental-omit-vtable-rtti call only be used with -fno-rtti">;
+
+def err_drv_ssp_missing_offset_argument : Error<
+ "'%0' is used without '-mstack-protector-guard-offset', and there is no default">;
+
+def err_drv_only_one_offload_target_supported : Error<
+ "only one offload target is supported">;
+def err_drv_invalid_or_unsupported_offload_target : Error<
+ "invalid or unsupported offload target: '%0'">;
+def err_drv_cuda_offload_only_emit_bc : Error<
+ "CUDA offload target is supported only along with --emit-llvm">;
+
+def warn_drv_jmc_requires_debuginfo : Warning<
+ "%0 requires debug info. Use %1 or debug options that enable debugger's "
+ "stepping function; option ignored">,
+ InGroup<OptionIgnored>;
+
+def warn_drv_fjmc_for_elf_only : Warning<
+ "-fjmc works only for ELF; option ignored">,
+ InGroup<OptionIgnored>;
+
+def warn_target_override_arm64ec : Warning<
+ "/arm64EC has been overridden by specified target: %0; option ignored">,
+ InGroup<OptionIgnored>;
+
+def err_drv_target_variant_invalid : Error<
+ "unsupported '%0' value '%1'; use 'ios-macabi' instead">;
+
+def err_drv_invalid_directx_shader_module : Error<
+ "invalid profile : %0">;
+def err_drv_dxc_missing_target_profile : Error<
+ "target profile option (-T) is missing">;
+def err_drv_hlsl_unsupported_target : Error<
+ "HLSL code generation is unsupported for target '%0'">;
+def err_drv_hlsl_bad_shader_required_in_target : Error<
+ "%select{shader model|Vulkan environment|shader stage}0 is required as %select{OS|environment}1 in target '%2' for HLSL code generation">;
+
+def err_drv_hlsl_bad_shader_unsupported : Error<
+ "%select{shader model|Vulkan environment|shader stage}0 '%1' in target '%2' is invalid for HLSL code generation">;
+def warn_drv_dxc_missing_dxv : Warning<"dxv not found. "
+ "Resulting DXIL will not be validated or signed for use in release environments.">,
+ InGroup<DXILValidation>;
+
+def err_drv_invalid_range_dxil_validator_version : Error<
+ "invalid validator version : %0\n"
+ "Validator version must be less than or equal to current internal version.">;
+def err_drv_invalid_format_dxil_validator_version : Error<
+ "invalid validator version : %0\n"
+ "Format of validator version is \"<major>.<minor>\" (ex:\"1.4\").">;
+def err_drv_invalid_empty_dxil_validator_version : Error<
+ "invalid validator version : %0\n"
+ "If validator major version is 0, minor version must also be 0.">;
+
+def warn_drv_sarif_format_unstable : Warning<
+ "diagnostic formatting in SARIF mode is currently unstable">,
+ InGroup<DiagGroup<"sarif-format-unstable">>;
+
+def err_drv_riscv_unsupported_with_linker_relaxation : Error<
+ "%0 is unsupported with RISC-V linker relaxation (-mrelax)">;
+
+def warn_drv_loongarch_conflicting_implied_val : Warning<
+ "ignoring '%0' as it conflicts with that implied by '%1' (%2)">,
+ InGroup<OptionIgnored>;
+def err_drv_loongarch_invalid_mfpu_EQ : Error<
+ "invalid argument '%0' to -mfpu=; must be one of: 64, 32, none, 0 (alias for none)">;
+def err_drv_loongarch_wrong_fpu_width_for_lsx : Error<
+ "wrong fpu width; LSX depends on 64-bit FPU.">;
+def err_drv_loongarch_wrong_fpu_width_for_lasx : Error<
+ "wrong fpu width; LASX depends on 64-bit FPU.">;
+def err_drv_loongarch_invalid_simd_option_combination : Error<
+ "invalid option combination; LASX depends on LSX.">;
+
+def err_drv_expand_response_file : Error<
+ "failed to expand response file: %0">;
+
+def warn_drv_missing_multilib : Warning<
+ "no multilib found matching flags: %0">,
+ InGroup<DiagGroup<"missing-multilib">>;
+def note_drv_available_multilibs : Note<
+ "available multilibs are:%0">;
+
+def warn_android_unversioned_fallback : Warning<
+ "Using unversioned Android target directory %0 for target %1. Unversioned"
+ " directories will not be used in Clang 19. Provide a versioned directory"
+ " for the target version or lower instead.">,
+ InGroup<DiagGroup<"android-unversioned-fallback">>;
+
+def err_drv_triple_version_invalid : Error<
+ "version '%0' in target triple '%1' is invalid">;
}
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticError.h b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticError.h
index 430da6f724ed..744f7fe19db7 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticError.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticError.h
@@ -6,11 +6,12 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_BASIC_DIAGNOSTIC_ERROR_H
-#define LLVM_CLANG_BASIC_DIAGNOSTIC_ERROR_H
+#ifndef LLVM_CLANG_BASIC_DIAGNOSTICERROR_H
+#define LLVM_CLANG_BASIC_DIAGNOSTICERROR_H
#include "clang/Basic/PartialDiagnostic.h"
#include "llvm/Support/Error.h"
+#include <optional>
namespace clang {
@@ -34,10 +35,10 @@ public:
}
/// Extracts and returns the diagnostic payload from the given \c Error if
- /// the error is a \c DiagnosticError. Returns none if the given error is not
- /// a \c DiagnosticError.
- static Optional<PartialDiagnosticAt> take(llvm::Error &Err) {
- Optional<PartialDiagnosticAt> Result;
+ /// the error is a \c DiagnosticError. Returns std::nullopt if the given error
+ /// is not a \c DiagnosticError.
+ static std::optional<PartialDiagnosticAt> take(llvm::Error &Err) {
+ std::optional<PartialDiagnosticAt> Result;
Err = llvm::handleErrors(std::move(Err), [&](DiagnosticError &E) {
Result = std::move(E.getDiagnostic());
});
@@ -57,4 +58,4 @@ private:
} // end namespace clang
-#endif // LLVM_CLANG_BASIC_DIAGNOSTIC_ERROR_H
+#endif // LLVM_CLANG_BASIC_DIAGNOSTICERROR_H
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticFrontend.h b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticFrontend.h
index f57c587fb469..ab4e855f2de0 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticFrontend.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticFrontend.h
@@ -15,7 +15,7 @@ namespace clang {
namespace diag {
enum {
#define DIAG(ENUM, FLAGS, DEFAULT_MAPPING, DESC, GROUP, SFINAE, NOWERROR, \
- SHOWINSYSHEADER, DEFERRABLE, CATEGORY) \
+ SHOWINSYSHEADER, SHOWINSYSMACRO, DEFERRABLE, CATEGORY) \
ENUM,
#define FRONTENDSTART
#include "clang/Basic/DiagnosticFrontendKinds.inc"
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticFrontendKinds.td b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticFrontendKinds.td
index 0f4ccec38550..85ecfdf9de62 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticFrontendKinds.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticFrontendKinds.td
@@ -11,7 +11,7 @@ class BackendInfo : CatBackend, ShowInSystemHeader;
let Component = "Frontend" in {
def err_fe_error_opening : Error<"error opening '%0': %1">;
-def err_fe_error_reading : Error<"error reading '%0'">;
+def err_fe_error_reading : Error<"error reading '%0': %1">;
def err_fe_error_reading_stdin : Error<"error reading stdin: %0">;
def err_fe_error_backend : Error<"error in backend: %0">, DefaultFatal;
@@ -22,10 +22,11 @@ def note_fe_inline_asm_here : Note<"instantiated into assembly here">;
def err_fe_source_mgr : Error<"%0">, CatSourceMgr;
def warn_fe_source_mgr : Warning<"%0">, CatSourceMgr, InGroup<BackendSourceMgr>;
def note_fe_source_mgr : Note<"%0">, CatSourceMgr;
-def err_fe_cannot_link_module : Error<"cannot link module '%0': %1">,
- DefaultFatal;
+def err_fe_linking_module : Error<"cannot link module '%0': %1">, DefaultFatal;
+def warn_fe_linking_module : Warning<"linking module '%0': %1">, InGroup<LinkerWarnings>;
+def note_fe_linking_module : Note<"linking module '%0': %1">;
-def warn_fe_frame_larger_than : Warning<"stack frame size (%0) exceeds limit (%1) in %q2">,
+def warn_fe_frame_larger_than : Warning<"stack frame size (%0) exceeds limit (%1) in '%2'">,
BackendInfo, InGroup<BackendFrameLargerThan>;
def warn_fe_backend_frame_larger_than: Warning<"%0">,
BackendInfo, InGroup<BackendFrameLargerThan>;
@@ -34,6 +35,11 @@ def note_fe_backend_frame_larger_than: Note<"%0">, BackendInfo;
def warn_fe_backend_plugin: Warning<"%0">, BackendInfo, InGroup<BackendPlugin>;
def err_fe_backend_plugin: Error<"%0">, BackendInfo;
+
+def warn_fe_backend_resource_limit: Warning<"%0 (%1) exceeds limit (%2) in '%3'">, BackendInfo, InGroup<BackendPlugin>;
+def err_fe_backend_resource_limit: Error<"%0 (%1) exceeds limit (%2) in '%3'">, BackendInfo;
+def note_fe_backend_resource_limit: Note<"%0 (%1) exceeds limit (%2) in '%3'">, BackendInfo;
+
def remark_fe_backend_plugin: Remark<"%0">, BackendInfo, InGroup<RemarkBackendPlugin>;
def note_fe_backend_plugin: Note<"%0">, BackendInfo;
@@ -46,6 +52,16 @@ def warn_fe_backend_unsupported_fp_rounding : Warning<
def warn_fe_backend_unsupported_fp_exceptions : Warning<
"overriding currently unsupported use of floating point exceptions "
"on this target">, InGroup<UnsupportedFPOpt>;
+def warn_fe_backend_invalid_feature_flag : Warning<
+ "feature flag '%0' must start with either '+' to enable the feature or '-'"
+ " to disable it; flag ignored">, InGroup<InvalidCommandLineArgument>;
+def warn_fe_backend_readonly_feature_flag : Warning<
+ "feature flag '%0' is ignored since the feature is read only">,
+ InGroup<InvalidCommandLineArgument>;
+
+def err_incompatible_fp_eval_method_options : Error<
+ "option 'ffp-eval-method' cannot be used with option "
+ "%select{'fapprox-func'|'mreassociate'|'freciprocal'}0">;
def remark_fe_backend_optimization_remark : Remark<"%0">, BackendInfo,
InGroup<BackendOptimizationRemark>;
@@ -64,6 +80,7 @@ def remark_fe_backend_optimization_remark_analysis_aliasing : Remark<"%0; "
"the '__restrict__' qualifier with the independent array arguments. "
"Erroneous results will occur if these options are incorrectly applied!">,
BackendInfo, InGroup<BackendOptimizationRemarkAnalysis>;
+
def warn_fe_backend_optimization_failure : Warning<"%0">, BackendInfo,
InGroup<BackendOptimizationFailure>, DefaultWarn;
def note_fe_backend_invalid_loc : Note<"could "
@@ -72,6 +89,12 @@ def note_fe_backend_invalid_loc : Note<"could "
def err_fe_backend_unsupported : Error<"%0">, BackendInfo;
def warn_fe_backend_unsupported : Warning<"%0">, BackendInfo;
+def err_fe_backend_error_attr :
+ Error<"call to '%0' declared with 'error' attribute: %1">, BackendInfo;
+def warn_fe_backend_warning_attr :
+ Warning<"call to '%0' declared with 'warning' attribute: %1">, BackendInfo,
+ InGroup<BackendWarningAttributes>;
+
def err_fe_invalid_code_complete_file : Error<
"cannot locate code-completion file %0">, DefaultFatal;
def err_fe_dependency_file_requires_MT : Error<
@@ -113,9 +136,8 @@ def err_fe_invalid_alignment : Error<
"invalid value '%1' in '%0'; alignment must be a power of 2">;
def err_fe_invalid_exception_model
: Error<"invalid exception model '%select{none|sjlj|seh|dwarf|wasm}0' for target '%1'">;
-def warn_fe_concepts_ts_flag : Warning<
- "-fconcepts-ts is deprecated - use '-std=c++20' for Concepts support">,
- InGroup<Deprecated>;
+def err_fe_invalid_source_date_epoch : Error<
+ "environment variable 'SOURCE_DATE_EPOCH' ('%0') must be a non-negative decimal integer <= %1">;
def err_fe_unable_to_load_basic_block_sections_file : Error<
"unable to load basic block sections function list: '%0'">;
@@ -145,7 +167,7 @@ def err_verify_no_such_marker : Error<
def err_verify_missing_start : Error<
"cannot find start ('{{') of expected %0">;
def err_verify_missing_end : Error<
- "cannot find end ('}}') of expected %0">;
+ "cannot find end ('%1') of expected %0">;
def err_verify_invalid_content : Error<
"invalid expected %0: %1">;
def err_verify_missing_regex : Error<
@@ -191,10 +213,7 @@ def note_incompatible_analyzer_plugin_api : Note<
def err_module_build_requires_fmodules : Error<
"module compilation requires '-fmodules'">;
def err_module_interface_requires_cpp_modules : Error<
- "module interface compilation requires '-std=c++20' or '-fmodules-ts'">;
-def err_header_module_requires_modules : Error<
- "header module compilation requires '-fmodules', '-std=c++20', or "
- "'-fmodules-ts'">;
+ "module interface compilation requires '-std=c++20'">;
def warn_module_config_mismatch : Warning<
"module file %0 cannot be loaded due to a configuration mismatch with the current "
"compilation">, InGroup<DiagGroup<"module-file-config-mismatch">>, DefaultError;
@@ -226,9 +245,13 @@ def warn_module_config_macro_undef : Warning<
def note_module_def_undef_here : Note<
"macro was %select{defined|#undef'd}0 here">;
def remark_module_build : Remark<"building module '%0' as '%1'">,
+ ShowInSystemHeader,
InGroup<ModuleBuild>;
def remark_module_build_done : Remark<"finished building module '%0'">,
+ ShowInSystemHeader,
InGroup<ModuleBuild>;
+def remark_module_lock : Remark<"locking '%0' to build module '%1'">,
+ InGroup<ModuleLock>;
def err_modules_embed_file_not_found :
Error<"file '%0' specified by '-fmodules-embed-file=' not found">,
DefaultFatal;
@@ -239,13 +262,18 @@ def err_test_module_file_extension_version : Error<
"test module file extension '%0' has different version (%1.%2) than expected "
"(%3.%4)">;
+def warn_eagerly_load_for_standard_cplusplus_modules : Warning<
+ "the form '-fmodule-file=<BMI-path>' is deprecated for standard C++ named modules;"
+ "consider to use '-fmodule-file=<module-name>=<BMI-path>' instead">,
+ InGroup<DiagGroup<"eager-load-cxx-named-modules">>;
+
def err_missing_vfs_overlay_file : Error<
"virtual filesystem overlay file '%0' not found">, DefaultFatal;
def err_invalid_vfs_overlay : Error<
"invalid virtual filesystem overlay file '%0'">, DefaultFatal;
def warn_option_invalid_ocl_version : Warning<
- "OpenCL version %0 does not support the option '%1'">, InGroup<Deprecated>;
+ "%0 does not support the option '%1'">, InGroup<Deprecated>;
def err_builtin_needs_feature : Error<"%0 needs target feature %1">;
def err_function_needs_feature : Error<
@@ -256,11 +284,17 @@ def warn_avx_calling_convention
: Warning<"AVX vector %select{return|argument}0 of type %1 without '%2' "
"enabled changes the ABI">,
InGroup<DiagGroup<"psabi">>;
-def err_avx_calling_convention : Error<warn_avx_calling_convention.Text>;
+def err_avx_calling_convention : Error<warn_avx_calling_convention.Summary>;
def err_alias_to_undefined : Error<
"%select{alias|ifunc}0 must point to a defined "
"%select{variable or |}1function">;
+def err_alias_to_common : Error<
+ "alias to a variable in a common section is not allowed">;
+def note_alias_requires_mangled_name : Note<
+ "the %select{function or variable|function}0 specified in an %select{alias|ifunc}1 must refer to its mangled name">;
+def note_alias_mangled_name_alternative: Note<
+ "function by that name is mangled as \"%0\"">;
def warn_alias_to_weak_alias : Warning<
"%select{alias|ifunc}2 will always resolve to %0 even if weak definition of "
"%1 is overridden">,
@@ -269,6 +303,10 @@ def err_duplicate_mangled_name : Error<
"definition with same mangled name '%0' as another definition">;
def err_cyclic_alias : Error<
"%select{alias|ifunc}0 definition is part of a cycle">;
+def err_hidden_visibility_dllexport : Error<
+ "hidden visibility cannot be applied to 'dllexport' declaration">;
+def err_non_default_visibility_dllimport : Error<
+ "non-default visibility cannot be applied to 'dllimport' declaration">;
def err_ifunc_resolver_return : Error<
"ifunc resolver function must return a pointer">;
@@ -284,6 +322,10 @@ def warn_atomic_op_oversized : Warning<
"; the access size (%0 bytes) exceeds the max lock-free size (%1 bytes)">,
InGroup<AtomicAlignment>;
+def warn_sync_op_misaligned : Warning<
+ "__sync builtin operation MUST have natural alignment (consider using __atomic).">,
+ InGroup<SyncAlignment>;
+
def warn_alias_with_section : Warning<
"%select{alias|ifunc}1 will not be in section '%0' but in the same section "
"as the %select{aliasee|resolver}2">,
@@ -302,6 +344,14 @@ def warn_profile_data_missing : Warning<
def warn_profile_data_unprofiled : Warning<
"no profile data available for file \"%0\"">,
InGroup<ProfileInstrUnprofiled>;
+def warn_profile_data_misexpect : Warning<
+ "Potential performance regression from use of __builtin_expect(): "
+ "Annotation was correct on %0 of profiled executions.">,
+ BackendInfo,
+ InGroup<MisExpect>;
} // end of instrumentation issue category
+def err_extract_api_ignores_file_not_found :
+ Error<"file '%0' specified by '--extract-api-ignores=' not found">, DefaultFatal;
+
}
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticGroups.td b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticGroups.td
index 4b4928a7a00e..6765721ae700 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticGroups.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticGroups.td
@@ -15,9 +15,12 @@ def Implicit : DiagGroup<"implicit", [
ImplicitInt
]>;
+def DeprecatedStaticAnalyzerFlag : DiagGroup<"deprecated-static-analyzer-flag">;
+
// Empty DiagGroups are recognized by clang but ignored.
def ODR : DiagGroup<"odr">;
def : DiagGroup<"abi">;
+def : DiagGroup<"gnu-empty-initializer">; // Now a C extension, not GNU.
def AbsoluteValue : DiagGroup<"absolute-value">;
def MisspelledAssumption : DiagGroup<"misspelled-assumption">;
def UnknownAssumption : DiagGroup<"unknown-assumption">;
@@ -29,13 +32,15 @@ def GNUAnonymousStruct : DiagGroup<"gnu-anonymous-struct">;
def GNUAutoType : DiagGroup<"gnu-auto-type">;
def ArrayBounds : DiagGroup<"array-bounds">;
def ArrayBoundsPointerArithmetic : DiagGroup<"array-bounds-pointer-arithmetic">;
+def ArrayParameter : DiagGroup<"array-parameter">;
def AutoDisableVptrSanitizer : DiagGroup<"auto-disable-vptr-sanitizer">;
def Availability : DiagGroup<"availability">;
def Section : DiagGroup<"section">;
-def AutoImport : DiagGroup<"auto-import">;
+def : DiagGroup<"auto-import">;
def FrameworkHdrQuotedInclude : DiagGroup<"quoted-include-in-framework-header">;
def FrameworkIncludePrivateFromPublic :
DiagGroup<"framework-include-private-from-public">;
+def DeprecatedModuleDotMap : DiagGroup<"deprecated-module-dot-map">;
def FrameworkHdrAtImport : DiagGroup<"atimport-in-framework-header">;
def CXX14BinaryLiteral : DiagGroup<"c++14-binary-literal">;
def CXXPre14CompatBinaryLiteral : DiagGroup<"c++98-c++11-compat-binary-literal">;
@@ -44,7 +49,10 @@ def BinaryLiteral : DiagGroup<"binary-literal", [CXX14BinaryLiteral,
CXXPre14CompatBinaryLiteral,
GNUBinaryLiteral]>;
def GNUCompoundLiteralInitializer : DiagGroup<"gnu-compound-literal-initializer">;
-def BitFieldConstantConversion : DiagGroup<"bitfield-constant-conversion">;
+def SingleBitBitFieldConstantConversion :
+ DiagGroup<"single-bit-bitfield-constant-conversion">;
+def BitFieldConstantConversion : DiagGroup<"bitfield-constant-conversion",
+ [SingleBitBitFieldConstantConversion]>;
def BitFieldEnumConversion : DiagGroup<"bitfield-enum-conversion">;
def BitFieldWidth : DiagGroup<"bitfield-width">;
def CompoundTokenSplitByMacro : DiagGroup<"compound-token-split-by-macro">;
@@ -54,7 +62,14 @@ def CompoundTokenSplit : DiagGroup<"compound-token-split",
CompoundTokenSplitBySpace]>;
def CoroutineMissingUnhandledException :
DiagGroup<"coroutine-missing-unhandled-exception">;
-def Coroutine : DiagGroup<"coroutine", [CoroutineMissingUnhandledException]>;
+def DeprecatedCoroutine :
+ DiagGroup<"deprecated-coroutine">;
+def AlwaysInlineCoroutine :
+ DiagGroup<"always-inline-coroutine">;
+def CoroNonAlignedAllocationFunction :
+ DiagGroup<"coro-non-aligned-allocation-function">;
+def Coroutine : DiagGroup<"coroutine", [CoroutineMissingUnhandledException, DeprecatedCoroutine,
+ AlwaysInlineCoroutine, CoroNonAlignedAllocationFunction]>;
def ObjCBoolConstantConversion : DiagGroup<"objc-bool-constant-conversion">;
def ConstantConversion : DiagGroup<"constant-conversion",
[BitFieldConstantConversion,
@@ -64,6 +79,8 @@ def StringConversion : DiagGroup<"string-conversion">;
def SignConversion : DiagGroup<"sign-conversion">;
def PointerBoolConversion : DiagGroup<"pointer-bool-conversion">;
def UndefinedBoolConversion : DiagGroup<"undefined-bool-conversion">;
+def BitwiseInsteadOfLogical : DiagGroup<"bitwise-instead-of-logical">;
+def BoolOperation : DiagGroup<"bool-operation", [BitwiseInsteadOfLogical]>;
def BoolConversion : DiagGroup<"bool-conversion", [PointerBoolConversion,
UndefinedBoolConversion]>;
def IntConversion : DiagGroup<"int-conversion">;
@@ -128,6 +145,9 @@ def MacroRedefined : DiagGroup<"macro-redefined">;
def BuiltinMacroRedefined : DiagGroup<"builtin-macro-redefined">;
def BuiltinRequiresHeader : DiagGroup<"builtin-requires-header">;
def C99Compat : DiagGroup<"c99-compat">;
+def C23Compat : DiagGroup<"c23-compat">;
+def : DiagGroup<"c2x-compat", [C23Compat]>;
+
def CXXCompat: DiagGroup<"c++-compat">;
def ExternCCompat : DiagGroup<"extern-c-compat">;
def KeywordCompat : DiagGroup<"keyword-compat">;
@@ -155,6 +175,7 @@ def DeleteNonVirtualDtor : DiagGroup<"delete-non-virtual-dtor",
DeleteAbstractNonVirtualDtor]>;
def AbstractFinalClass : DiagGroup<"abstract-final-class">;
def FinalDtorNonFinalClass : DiagGroup<"final-dtor-non-final-class">;
+def GNUOffsetofExtensions : DiagGroup<"gnu-offsetof-extensions">;
def CXX11CompatDeprecatedWritableStr :
DiagGroup<"c++11-compat-deprecated-writable-strings">;
@@ -166,9 +187,11 @@ def DeprecatedCopyWithUserProvidedCopy : DiagGroup<"deprecated-copy-with-user-pr
def DeprecatedCopyWithUserProvidedDtor : DiagGroup<"deprecated-copy-with-user-provided-dtor">;
def DeprecatedCopy : DiagGroup<"deprecated-copy", [DeprecatedCopyWithUserProvidedCopy]>;
def DeprecatedCopyWithDtor : DiagGroup<"deprecated-copy-with-dtor", [DeprecatedCopyWithUserProvidedDtor]>;
+def DeprecatedLiteralOperator : DiagGroup<"deprecated-literal-operator">;
// For compatibility with GCC.
def : DiagGroup<"deprecated-copy-dtor", [DeprecatedCopyWithDtor]>;
def DeprecatedDeclarations : DiagGroup<"deprecated-declarations">;
+def DeprecatedRedundantConstexprStaticDef : DiagGroup<"deprecated-redundant-constexpr-static-def">;
def UnavailableDeclarations : DiagGroup<"unavailable-declarations">;
def UnguardedAvailabilityNew : DiagGroup<"unguarded-availability-new">;
def UnguardedAvailability : DiagGroup<"unguarded-availability",
@@ -177,6 +200,7 @@ def UnguardedAvailability : DiagGroup<"unguarded-availability",
def : DiagGroup<"partial-availability", [UnguardedAvailability]>;
def DeprecatedDynamicExceptionSpec
: DiagGroup<"deprecated-dynamic-exception-spec">;
+def DeprecatedBuiltins : DiagGroup<"deprecated-builtins">;
def DeprecatedImplementations :DiagGroup<"deprecated-implementations">;
def DeprecatedIncrementBool : DiagGroup<"deprecated-increment-bool">;
def DeprecatedRegister : DiagGroup<"deprecated-register">;
@@ -184,6 +208,8 @@ def DeprecatedThisCapture : DiagGroup<"deprecated-this-capture">;
def DeprecatedVolatile : DiagGroup<"deprecated-volatile">;
def DeprecatedWritableStr : DiagGroup<"deprecated-writable-strings",
[CXX11CompatDeprecatedWritableStr]>;
+def DeprecatedPragma : DiagGroup<"deprecated-pragma">;
+def DeprecatedType : DiagGroup<"deprecated-type">;
// FIXME: Why is DeprecatedImplementations not in this group?
def Deprecated : DiagGroup<"deprecated", [DeprecatedAnonEnumEnumConversion,
DeprecatedArrayCompare,
@@ -197,11 +223,17 @@ def Deprecated : DiagGroup<"deprecated", [DeprecatedAnonEnumEnumConversion,
DeprecatedEnumCompareConditional,
DeprecatedEnumEnumConversion,
DeprecatedEnumFloatConversion,
+ DeprecatedBuiltins,
DeprecatedIncrementBool,
+ DeprecatedLiteralOperator,
+ DeprecatedPragma,
DeprecatedRegister,
DeprecatedThisCapture,
+ DeprecatedType,
DeprecatedVolatile,
- DeprecatedWritableStr]>,
+ DeprecatedWritableStr,
+ DeprecatedRedundantConstexprStaticDef
+ ]>,
DiagCategory<"Deprecations">;
def CXX20Designator : DiagGroup<"c++20-designator">;
@@ -231,8 +263,8 @@ def Documentation : DiagGroup<"documentation",
def EmptyBody : DiagGroup<"empty-body">;
def Exceptions : DiagGroup<"exceptions">;
+def DeclarationAfterStatement : DiagGroup<"declaration-after-statement">;
-def GNUEmptyInitializer : DiagGroup<"gnu-empty-initializer">;
def GNUEmptyStruct : DiagGroup<"gnu-empty-struct">;
def ExtraTokens : DiagGroup<"extra-tokens">;
def CXX98CompatExtraSemi : DiagGroup<"c++98-compat-extra-semi">;
@@ -258,9 +290,11 @@ def : DiagGroup<"c++1z-compat-mangling", [CXX17CompatMangling]>;
def NoexceptType : DiagGroup<"noexcept-type", [CXX17CompatMangling]>;
// Warnings for C code which is not compatible with previous C standards.
-def CPre2xCompat : DiagGroup<"pre-c2x-compat">;
-def CPre2xCompatPedantic : DiagGroup<"pre-c2x-compat-pedantic",
- [CPre2xCompat]>;
+def CPre23Compat : DiagGroup<"pre-c23-compat">;
+def CPre23CompatPedantic : DiagGroup<"pre-c23-compat-pedantic",
+ [CPre23Compat]>;
+def : DiagGroup<"pre-c2x-compat", [CPre23Compat]>;
+def : DiagGroup<"pre-c2x-compat-pedantic", [CPre23CompatPedantic]>;
// Warnings for C++ code which is not compatible with previous C++ standards.
def CXXPre14Compat : DiagGroup<"pre-c++14-compat">;
@@ -281,9 +315,14 @@ def CXXPre20CompatPedantic : DiagGroup<"pre-c++20-compat-pedantic",
[CXXPre20Compat]>;
def : DiagGroup<"c++98-c++11-c++14-c++17-compat-pedantic",
[CXXPre20CompatPedantic]>;
-def CXXPre2bCompat : DiagGroup<"pre-c++2b-compat">;
-def CXXPre2bCompatPedantic :
- DiagGroup<"pre-c++2b-compat-pedantic", [CXXPre2bCompat]>;
+def CXXPre23Compat : DiagGroup<"pre-c++23-compat">;
+def CXXPre23CompatPedantic :
+ DiagGroup<"pre-c++23-compat-pedantic", [CXXPre23Compat]>;
+def CXXPre26Compat : DiagGroup<"pre-c++26-compat">;
+def CXXPre26CompatPedantic :
+ DiagGroup<"pre-c++26-compat-pedantic", [CXXPre26Compat]>;
+def : DiagGroup<"pre-c++2c-compat", [CXXPre26Compat]>;
+def : DiagGroup<"pre-c++2c-compat-pedantic", [CXXPre26CompatPedantic]>;
def CXX98CompatBindToTemporaryCopy :
DiagGroup<"c++98-compat-bind-to-temporary-copy">;
@@ -298,7 +337,7 @@ def CXX98Compat : DiagGroup<"c++98-compat",
CXXPre14Compat,
CXXPre17Compat,
CXXPre20Compat,
- CXXPre2bCompat]>;
+ CXXPre23Compat]>;
// Warnings for C++11 features which are Extensions in C++98 mode.
def CXX98CompatPedantic : DiagGroup<"c++98-compat-pedantic",
[CXX98Compat,
@@ -307,9 +346,10 @@ def CXX98CompatPedantic : DiagGroup<"c++98-compat-pedantic",
CXXPre14CompatPedantic,
CXXPre17CompatPedantic,
CXXPre20CompatPedantic,
- CXXPre2bCompatPedantic]>;
+ CXXPre23CompatPedantic]>;
-def CXX11Narrowing : DiagGroup<"c++11-narrowing">;
+def CXX11NarrowingConstReference : DiagGroup<"c++11-narrowing-const-reference">;
+def CXX11Narrowing : DiagGroup<"c++11-narrowing", [CXX11NarrowingConstReference]>;
def CXX11WarnInconsistentOverrideDestructor :
DiagGroup<"inconsistent-missing-destructor-override">;
@@ -337,46 +377,50 @@ def CXX11Compat : DiagGroup<"c++11-compat",
CXXPre14Compat,
CXXPre17Compat,
CXXPre20Compat,
- CXXPre2bCompat]>;
+ CXXPre23Compat]>;
def : DiagGroup<"c++0x-compat", [CXX11Compat]>;
def CXX11CompatPedantic : DiagGroup<"c++11-compat-pedantic",
[CXX11Compat,
CXXPre14CompatPedantic,
CXXPre17CompatPedantic,
CXXPre20CompatPedantic,
- CXXPre2bCompatPedantic]>;
+ CXXPre23CompatPedantic]>;
def CXX14Compat : DiagGroup<"c++14-compat", [CXXPre17Compat,
CXXPre20Compat,
- CXXPre2bCompat]>;
+ CXXPre23Compat]>;
def CXX14CompatPedantic : DiagGroup<"c++14-compat-pedantic",
[CXX14Compat,
CXXPre17CompatPedantic,
CXXPre20CompatPedantic,
- CXXPre2bCompatPedantic]>;
+ CXXPre23CompatPedantic]>;
def CXX17Compat : DiagGroup<"c++17-compat", [DeprecatedRegister,
DeprecatedIncrementBool,
CXX17CompatMangling,
CXXPre20Compat,
- CXXPre2bCompat]>;
+ CXXPre23Compat]>;
def CXX17CompatPedantic : DiagGroup<"c++17-compat-pedantic",
[CXX17Compat,
CXXPre20CompatPedantic,
- CXXPre2bCompatPedantic]>;
+ CXXPre23CompatPedantic]>;
def : DiagGroup<"c++1z-compat", [CXX17Compat]>;
-def CXX20Compat : DiagGroup<"c++20-compat", [CXXPre2bCompat]>;
+def CXX20Compat : DiagGroup<"c++20-compat", [CXXPre23Compat]>;
def CXX20CompatPedantic : DiagGroup<"c++20-compat-pedantic",
[CXX20Compat,
- CXXPre2bCompatPedantic]>;
+ CXXPre23CompatPedantic]>;
def : DiagGroup<"c++2a-compat", [CXX20Compat]>;
def : DiagGroup<"c++2a-compat-pedantic", [CXX20CompatPedantic]>;
def ExitTimeDestructors : DiagGroup<"exit-time-destructors">;
def FlexibleArrayExtensions : DiagGroup<"flexible-array-extensions">;
def FourByteMultiChar : DiagGroup<"four-char-constants">;
-def GlobalConstructors : DiagGroup<"global-constructors">;
+def GlobalConstructors : DiagGroup<"global-constructors"> {
+ code Documentation = [{
+Emit a warning for each variable declaration that generates code run at startup.
+ }];
+}
def BitwiseConditionalParentheses: DiagGroup<"bitwise-conditional-parentheses">;
def BitwiseOpParentheses: DiagGroup<"bitwise-op-parentheses">;
def LogicalOpParentheses: DiagGroup<"logical-op-parentheses">;
@@ -388,11 +432,14 @@ def DanglingField : DiagGroup<"dangling-field">;
def DanglingInitializerList : DiagGroup<"dangling-initializer-list">;
def DanglingGsl : DiagGroup<"dangling-gsl">;
def ReturnStackAddress : DiagGroup<"return-stack-address">;
+// Name of this warning in GCC
+def : DiagGroup<"return-local-addr", [ReturnStackAddress]>;
def Dangling : DiagGroup<"dangling", [DanglingField,
DanglingInitializerList,
DanglingGsl,
ReturnStackAddress]>;
def DistributedObjectModifiers : DiagGroup<"distributed-object-modifiers">;
+def DllexportExplicitInstantiationDecl : DiagGroup<"dllexport-explicit-instantiation-decl">;
def ExcessInitializers : DiagGroup<"excess-initializers">;
def ExpansionToDefined : DiagGroup<"expansion-to-defined">;
def FlagEnum : DiagGroup<"flag-enum">;
@@ -400,10 +447,13 @@ def IncrementBool : DiagGroup<"increment-bool", [DeprecatedIncrementBool]>;
def InfiniteRecursion : DiagGroup<"infinite-recursion">;
def PureVirtualCallFromCtorDtor: DiagGroup<"call-to-pure-virtual-from-ctor-dtor">;
def GNUImaginaryConstant : DiagGroup<"gnu-imaginary-constant">;
-def IgnoredQualifiers : DiagGroup<"ignored-qualifiers">;
+def IgnoredGCH : DiagGroup<"ignored-gch">;
+def IgnoredReferenceQualifiers : DiagGroup<"ignored-reference-qualifiers">;
+def IgnoredQualifiers : DiagGroup<"ignored-qualifiers", [IgnoredReferenceQualifiers]>;
def : DiagGroup<"import">;
def GNUIncludeNext : DiagGroup<"gnu-include-next">;
def IncompatibleMSStruct : DiagGroup<"incompatible-ms-struct">;
+def IncompatibleMSPragmaSection : DiagGroup<"incompatible-ms-pragma-section">;
def IncompatiblePointerTypesDiscardsQualifiers
: DiagGroup<"incompatible-pointer-types-discards-qualifiers">;
def IncompatibleFunctionPointerTypes
@@ -429,10 +479,13 @@ def InlineNamespaceReopenedNoninline
def InvalidNoreturn : DiagGroup<"invalid-noreturn">;
def InvalidSourceEncoding : DiagGroup<"invalid-source-encoding">;
def KNRPromotedParameter : DiagGroup<"knr-promoted-parameter">;
+def DeprecatedNonPrototype : DiagGroup<"deprecated-non-prototype">;
+def StrictPrototypes : DiagGroup<"strict-prototypes", [DeprecatedNonPrototype]>;
def : DiagGroup<"init-self">;
def : DiagGroup<"inline">;
def : DiagGroup<"invalid-pch">;
def GNULabelsAsValue : DiagGroup<"gnu-label-as-value">;
+def GNULineMarker : DiagGroup<"gnu-line-marker">;
def LiteralRange : DiagGroup<"literal-range">;
def LocalTypeTemplateArgs : DiagGroup<"local-type-template-args",
[CXX98CompatLocalTypeTemplateArgs]>;
@@ -462,10 +515,12 @@ def MismatchedParameterTypes : DiagGroup<"mismatched-parameter-types">;
def MismatchedReturnTypes : DiagGroup<"mismatched-return-types">;
def MismatchedTags : DiagGroup<"mismatched-tags">;
def MissingFieldInitializers : DiagGroup<"missing-field-initializers">;
+def ModuleLock : DiagGroup<"module-lock">;
def ModuleBuild : DiagGroup<"module-build">;
def ModuleImport : DiagGroup<"module-import">;
def ModuleConflict : DiagGroup<"module-conflict">;
def ModuleFileExtension : DiagGroup<"module-file-extension">;
+def ModuleIncludeDirectiveTranslation : DiagGroup<"module-include-translation">;
def RoundTripCC1Args : DiagGroup<"round-trip-cc1-args">;
def NewlineEOF : DiagGroup<"newline-eof">;
def Nullability : DiagGroup<"nullability">;
@@ -486,7 +541,9 @@ def NonPODVarargs : DiagGroup<"non-pod-varargs">;
def ClassVarargs : DiagGroup<"class-varargs", [NonPODVarargs]>;
def : DiagGroup<"nonportable-cfstrings">;
def NonVirtualDtor : DiagGroup<"non-virtual-dtor">;
-def NullPointerArithmetic : DiagGroup<"null-pointer-arithmetic">;
+def GNUNullPointerArithmetic : DiagGroup<"gnu-null-pointer-arithmetic">;
+def NullPointerArithmetic
+ : DiagGroup<"null-pointer-arithmetic", [GNUNullPointerArithmetic]>;
def NullPointerSubtraction : DiagGroup<"null-pointer-subtraction">;
def : DiagGroup<"effc++", [NonVirtualDtor]>;
def OveralignedType : DiagGroup<"over-aligned">;
@@ -504,7 +561,8 @@ def PrivateExtern : DiagGroup<"private-extern">;
def SelTypeCast : DiagGroup<"cast-of-sel-type">;
def FunctionDefInObjCContainer : DiagGroup<"function-def-in-objc-container">;
def BadFunctionCast : DiagGroup<"bad-function-cast">;
-def CastFunctionType : DiagGroup<"cast-function-type">;
+def CastFunctionTypeStrict : DiagGroup<"cast-function-type-strict">;
+def CastFunctionType : DiagGroup<"cast-function-type", [CastFunctionTypeStrict]>;
def ObjCPropertyImpl : DiagGroup<"objc-property-implementation">;
def ObjCPropertyNoAttribute : DiagGroup<"objc-property-no-attribute">;
def ObjCPropertyAssignOnObjectType : DiagGroup<"objc-property-assign-on-object-type">;
@@ -527,13 +585,18 @@ def UnderalignedExceptionObject : DiagGroup<"underaligned-exception-object">;
def DeprecatedObjCIsaUsage : DiagGroup<"deprecated-objc-isa-usage">;
def ExplicitInitializeCall : DiagGroup<"explicit-initialize-call">;
def OrderedCompareFunctionPointers : DiagGroup<"ordered-compare-function-pointers">;
-def Packed : DiagGroup<"packed">;
-def Padded : DiagGroup<"padded">;
+def PackedNonPod : DiagGroup<"packed-non-pod">;
+def Packed : DiagGroup<"packed", [PackedNonPod]>;
+def PaddedBitField : DiagGroup<"padded-bitfield">;
+def Padded : DiagGroup<"padded", [PaddedBitField]>;
+def UnalignedAccess : DiagGroup<"unaligned-access">;
def PessimizingMove : DiagGroup<"pessimizing-move">;
def ReturnStdMove : DiagGroup<"return-std-move">;
-def PointerArith : DiagGroup<"pointer-arith">;
+def GNUPointerArith : DiagGroup<"gnu-pointer-arith">;
+def PointerArith : DiagGroup<"pointer-arith", [GNUPointerArith]>;
+
def PoundWarning : DiagGroup<"#warnings">;
def PoundPragmaMessage : DiagGroup<"#pragma-messages">,
DiagCategory<"#pragma message Directive">;
@@ -571,7 +634,7 @@ def ShadowAll : DiagGroup<"shadow-all", [Shadow, ShadowFieldInConstructor,
def Shorten64To32 : DiagGroup<"shorten-64-to-32">;
def : DiagGroup<"sign-promo">;
def SignCompare : DiagGroup<"sign-compare">;
-def : DiagGroup<"switch-default">;
+def SwitchDefault : DiagGroup<"switch-default">;
def : DiagGroup<"synth">;
def SizeofArrayArgument : DiagGroup<"sizeof-array-argument">;
def SizeofArrayDecay : DiagGroup<"sizeof-array-decay">;
@@ -587,7 +650,12 @@ def StaticInInline : DiagGroup<"static-in-inline">;
def StaticLocalInInline : DiagGroup<"static-local-in-inline">;
def GNUStaticFloatInit : DiagGroup<"gnu-static-float-init">;
def StaticFloatInit : DiagGroup<"static-float-init", [GNUStaticFloatInit]>;
-def GNUStatementExpression : DiagGroup<"gnu-statement-expression">;
+// Allow differentiation between GNU statement expressions in a macro versus
+// written directly in source.
+def GNUStatementExpressionFromMacroExpansion :
+ DiagGroup<"gnu-statement-expression-from-macro-expansion">;
+def GNUStatementExpression : DiagGroup<"gnu-statement-expression",
+ [GNUStatementExpressionFromMacroExpansion]>;
def StringConcatation : DiagGroup<"string-concatenation">;
def StringCompare : DiagGroup<"string-compare">;
def StringPlusInt : DiagGroup<"string-plus-int">;
@@ -618,13 +686,15 @@ def TautologicalOverlapCompare : DiagGroup<"tautological-overlap-compare">;
def TautologicalBitwiseCompare : DiagGroup<"tautological-bitwise-compare">;
def TautologicalUndefinedCompare : DiagGroup<"tautological-undefined-compare">;
def TautologicalObjCBoolCompare : DiagGroup<"tautological-objc-bool-compare">;
+def TautologicalNegationCompare : DiagGroup<"tautological-negation-compare">;
def TautologicalCompare : DiagGroup<"tautological-compare",
[TautologicalConstantCompare,
TautologicalPointerCompare,
TautologicalOverlapCompare,
TautologicalBitwiseCompare,
TautologicalUndefinedCompare,
- TautologicalObjCBoolCompare]>;
+ TautologicalObjCBoolCompare,
+ TautologicalNegationCompare]>;
def HeaderHygiene : DiagGroup<"header-hygiene">;
def DuplicateDeclSpecifier : DiagGroup<"duplicate-decl-specifier">;
def CompareDistinctPointerType : DiagGroup<"compare-distinct-pointer-types">;
@@ -642,6 +712,8 @@ def AmbiguousMacro : DiagGroup<"ambiguous-macro">;
def KeywordAsMacro : DiagGroup<"keyword-macro">;
def ReservedIdAsMacro : DiagGroup<"reserved-macro-identifier">;
def ReservedIdAsMacroAlias : DiagGroup<"reserved-id-macro", [ReservedIdAsMacro]>;
+def RestrictExpansionMacro : DiagGroup<"restrict-expansion">;
+def FinalMacro : DiagGroup<"final-macro">;
// Just silence warnings about -Wstrict-aliasing for now.
def : DiagGroup<"strict-aliasing=0">;
@@ -742,6 +814,7 @@ def UnusedLocalTypedef : DiagGroup<"unused-local-typedef">;
def UnusedPropertyIvar : DiagGroup<"unused-property-ivar">;
def UnusedGetterReturnValue : DiagGroup<"unused-getter-return-value">;
def UsedButMarkedUnused : DiagGroup<"used-but-marked-unused">;
+def UsedSearchPath : DiagGroup<"search-path-usage">;
def UserDefinedLiterals : DiagGroup<"user-defined-literals">;
def UserDefinedWarnings : DiagGroup<"user-defined-warnings">;
def ReorderCtor : DiagGroup<"reorder-ctor">;
@@ -753,6 +826,7 @@ def AtomicAlignment : DiagGroup<"atomic-alignment">;
def CustomAtomic : DiagGroup<"custom-atomic-properties">;
def AtomicProperties : DiagGroup<"atomic-properties",
[ImplicitAtomic, CustomAtomic]>;
+def SyncAlignment : DiagGroup<"sync-alignment">;
def ARCUnsafeRetainedAssign : DiagGroup<"arc-unsafe-retained-assign">;
def ARCRetainCycles : DiagGroup<"arc-retain-cycles">;
def ARCNonPodMemAccess : DiagGroup<"arc-non-pod-memaccess">;
@@ -779,7 +853,9 @@ def OverridingMethodMismatch : DiagGroup<"overriding-method-mismatch">;
def VariadicMacros : DiagGroup<"variadic-macros">;
def VectorConversion : DiagGroup<"vector-conversion">; // clang specific
def VexingParse : DiagGroup<"vexing-parse">;
-def VLAExtension : DiagGroup<"vla-extension">;
+def VLAUseStaticAssert : DiagGroup<"vla-extension-static-assert">;
+def VLACxxExtension : DiagGroup<"vla-cxx-extension", [VLAUseStaticAssert]>;
+def VLAExtension : DiagGroup<"vla-extension", [VLACxxExtension]>;
def VLA : DiagGroup<"vla", [VLAExtension]>;
def VolatileRegisterVar : DiagGroup<"volatile-register-var">;
def Visibility : DiagGroup<"visibility">;
@@ -799,15 +875,17 @@ def WritableStrings : DiagGroup<"writable-strings", [DeprecatedWritableStr]>;
//
// FIXME: Should this affect C++11 (where this is an error,
// not just deprecated) or not?
-def GCCWriteStrings : DiagGroup<"write-strings" , [WritableStrings]>;
+def GCCWriteStrings : DiagGroup<"write-strings" , [WritableStrings],
+ GCCWriteStringsDocs>;
def CharSubscript : DiagGroup<"char-subscripts">;
def LargeByValueCopy : DiagGroup<"large-by-value-copy">;
def DuplicateArgDecl : DiagGroup<"duplicate-method-arg">;
def SignedEnumBitfield : DiagGroup<"signed-enum-bitfield">;
+def ReservedModuleIdentifier : DiagGroup<"reserved-module-identifier">;
def ReservedIdentifier : DiagGroup<"reserved-identifier",
- [ReservedIdAsMacro]>;
+ [ReservedIdAsMacro, ReservedModuleIdentifier, UserDefinedLiterals]>;
// Unreachable code warning groups.
//
@@ -816,8 +894,12 @@ def ReservedIdentifier : DiagGroup<"reserved-identifier",
// under separate flags.
//
def UnreachableCodeLoopIncrement : DiagGroup<"unreachable-code-loop-increment">;
+def UnreachableCodeFallthrough : DiagGroup<"unreachable-code-fallthrough">;
+def UnreachableCodeGenericAssoc : DiagGroup<"unreachable-code-generic-assoc">;
def UnreachableCode : DiagGroup<"unreachable-code",
- [UnreachableCodeLoopIncrement]>;
+ [UnreachableCodeLoopIncrement,
+ UnreachableCodeFallthrough,
+ UnreachableCodeGenericAssoc]>;
def UnreachableCodeBreak : DiagGroup<"unreachable-code-break">;
def UnreachableCodeReturn : DiagGroup<"unreachable-code-return">;
def UnreachableCodeAggressive : DiagGroup<"unreachable-code-aggressive",
@@ -889,10 +971,16 @@ def FormatNonStandard : DiagGroup<"format-non-iso">;
def FormatY2K : DiagGroup<"format-y2k">;
def FormatPedantic : DiagGroup<"format-pedantic">;
def FormatTypeConfusion : DiagGroup<"format-type-confusion">;
+
+def FormatOverflowNonKprintf: DiagGroup<"format-overflow-non-kprintf">;
+def FormatOverflow: DiagGroup<"format-overflow", [FormatOverflowNonKprintf]>;
+def FormatTruncationNonKprintf: DiagGroup<"format-truncation-non-kprintf">;
+def FormatTruncation: DiagGroup<"format-truncation", [FormatTruncationNonKprintf]>;
+
def Format : DiagGroup<"format",
[FormatExtraArgs, FormatZeroLength, NonNull,
FormatSecurity, FormatY2K, FormatInvalidSpecifier,
- FormatInsufficientArgs]>,
+ FormatInsufficientArgs, FormatOverflow, FormatTruncation]>,
DiagCategory<"Format String Issue">;
def FormatNonLiteral : DiagGroup<"format-nonliteral">;
def Format2 : DiagGroup<"format=2",
@@ -912,6 +1000,7 @@ def PointerToEnumCast : DiagGroup<"pointer-to-enum-cast",
[VoidPointerToEnumCast]>;
def PointerToIntCast : DiagGroup<"pointer-to-int-cast",
[PointerToEnumCast, VoidPointerToIntCast]>;
+def VoidPointerDeref : DiagGroup<"void-ptr-dereference">;
def FUseLdPath : DiagGroup<"fuse-ld-path">;
@@ -940,6 +1029,8 @@ def Extra : DiagGroup<"extra", [
]>;
def Most : DiagGroup<"most", [
+ ArrayParameter,
+ BoolOperation,
CharSubscript,
Comment,
DeleteNonVirtualDtor,
@@ -981,7 +1072,9 @@ def Most : DiagGroup<"most", [
def ThreadSafetyAttributes : DiagGroup<"thread-safety-attributes">;
def ThreadSafetyAnalysis : DiagGroup<"thread-safety-analysis">;
def ThreadSafetyPrecise : DiagGroup<"thread-safety-precise">;
-def ThreadSafetyReference : DiagGroup<"thread-safety-reference">;
+def ThreadSafetyReferenceReturn : DiagGroup<"thread-safety-reference-return">;
+def ThreadSafetyReference : DiagGroup<"thread-safety-reference",
+ [ThreadSafetyReferenceReturn]>;
def ThreadSafetyNegative : DiagGroup<"thread-safety-negative">;
def ThreadSafety : DiagGroup<"thread-safety",
[ThreadSafetyAttributes,
@@ -998,7 +1091,8 @@ def Consumed : DiagGroup<"consumed">;
// warning should be active _only_ when -Wall is passed in, mark it as
// DefaultIgnore in addition to putting it here.
def All : DiagGroup<"all", [Most, Parentheses, Switch, SwitchBool,
- MisleadingIndentation]>;
+ MisleadingIndentation, PackedNonPod,
+ VLACxxExtension]>;
// Warnings that should be in clang-cl /w4.
def : DiagGroup<"CL4", [All, Extra]>;
@@ -1027,6 +1121,15 @@ def : DiagGroup<"unused-local-typedefs", [UnusedLocalTypedef]>;
def NonGCC : DiagGroup<"non-gcc",
[SignCompare, Conversion, LiteralRange]>;
+def CXX14Attrs : DiagGroup<"c++14-attribute-extensions">;
+def CXX17Attrs : DiagGroup<"c++17-attribute-extensions">;
+def CXX20Attrs : DiagGroup<"c++20-attribute-extensions">;
+def FutureAttrs : DiagGroup<"future-attribute-extensions", [CXX14Attrs,
+ CXX17Attrs,
+ CXX20Attrs]>;
+
+def CXX23AttrsOnLambda : DiagGroup<"c++23-lambda-attributes">;
+
// A warning group for warnings about using C++11 features as extensions in
// earlier C++ versions.
def CXX11 : DiagGroup<"c++11-extensions", [CXX11ExtraSemi, CXX11InlineNamespace,
@@ -1034,24 +1137,30 @@ def CXX11 : DiagGroup<"c++11-extensions", [CXX11ExtraSemi, CXX11InlineNamespace,
// A warning group for warnings about using C++14 features as extensions in
// earlier C++ versions.
-def CXX14 : DiagGroup<"c++14-extensions", [CXX14BinaryLiteral]>;
+def CXX14 : DiagGroup<"c++14-extensions", [CXX14BinaryLiteral, CXX14Attrs]>;
// A warning group for warnings about using C++17 features as extensions in
// earlier C++ versions.
-def CXX17 : DiagGroup<"c++17-extensions">;
+def CXX17 : DiagGroup<"c++17-extensions", [CXX17Attrs]>;
// A warning group for warnings about using C++20 features as extensions in
// earlier C++ versions.
-def CXX20 : DiagGroup<"c++20-extensions", [CXX20Designator]>;
+def CXX20 : DiagGroup<"c++20-extensions", [CXX20Designator, CXX20Attrs]>;
-// A warning group for warnings about using C++2b features as extensions in
+// A warning group for warnings about using C++23 features as extensions in
// earlier C++ versions.
-def CXX2b : DiagGroup<"c++2b-extensions">;
+def CXX23 : DiagGroup<"c++23-extensions", [CXX23AttrsOnLambda]>;
+
+// A warning group for warnings about using C++26 features as extensions in
+// earlier C++ versions.
+def CXX26 : DiagGroup<"c++26-extensions">;
def : DiagGroup<"c++0x-extensions", [CXX11]>;
def : DiagGroup<"c++1y-extensions", [CXX14]>;
def : DiagGroup<"c++1z-extensions", [CXX17]>;
def : DiagGroup<"c++2a-extensions", [CXX20]>;
+def : DiagGroup<"c++2b-extensions", [CXX23]>;
+def : DiagGroup<"c++2c-extensions", [CXX26]>;
def DelegatingCtorCycles :
DiagGroup<"delegating-ctor-cycles">;
@@ -1062,8 +1171,10 @@ def C11 : DiagGroup<"c11-extensions">;
// A warning group for warnings about using C99 features as extensions.
def C99 : DiagGroup<"c99-extensions", [C99Designator]>;
-// A warning group for warnings about using C2x features as extensions.
-def C2x : DiagGroup<"c2x-extensions">;
+// A warning group for warnings about using C23 features as extensions.
+def C23 : DiagGroup<"c23-extensions">;
+
+def : DiagGroup<"c2x-extensions", [C23]>;
// A warning group for warnings about GCC extensions.
def GNU : DiagGroup<"gnu", [GNUAlignofExpression, GNUAnonymousStruct,
@@ -1071,16 +1182,17 @@ def GNU : DiagGroup<"gnu", [GNUAlignofExpression, GNUAnonymousStruct,
GNUBinaryLiteral, GNUCaseRange,
GNUComplexInteger, GNUCompoundLiteralInitializer,
GNUConditionalOmittedOperand, GNUDesignator,
- GNUEmptyInitializer, GNUEmptyStruct,
+ GNUEmptyStruct,
VLAExtension, GNUFlexibleArrayInitializer,
GNUFlexibleArrayUnionMember, GNUFoldingConstant,
GNUImaginaryConstant, GNUIncludeNext,
- GNULabelsAsValue,
+ GNULabelsAsValue, GNULineMarker, GNUNullPointerArithmetic,
+ GNUOffsetofExtensions, GNUPointerArith,
RedeclaredClassMember, GNURedeclaredEnum,
GNUStatementExpression, GNUStaticFloatInit,
- GNUStringLiteralOperatorTemplate,
- GNUUnionCast, GNUVariableSizedTypeNotAtEnd,
- ZeroLengthArray, GNUZeroLineDirective,
+ GNUStringLiteralOperatorTemplate, GNUUnionCast,
+ GNUVariableSizedTypeNotAtEnd, ZeroLengthArray,
+ GNUZeroLineDirective,
GNUZeroVariadicMacroArguments]>;
// A warning group for warnings about code that clang accepts but gcc doesn't.
def GccCompat : DiagGroup<"gcc-compat">;
@@ -1125,6 +1237,9 @@ def MicrosoftCommentPaste : DiagGroup<"microsoft-comment-paste">;
def MicrosoftEndOfFile : DiagGroup<"microsoft-end-of-file">;
def MicrosoftInaccessibleBase : DiagGroup<"microsoft-inaccessible-base">;
def MicrosoftStaticAssert : DiagGroup<"microsoft-static-assert">;
+def MicrosoftInitFromPredefined : DiagGroup<"microsoft-init-from-predefined">;
+def MicrosoftStringLiteralFromPredefined : DiagGroup<
+ "microsoft-string-literal-from-predefined">;
// Aliases.
def : DiagGroup<"msvc-include", [MicrosoftInclude]>;
@@ -1142,6 +1257,7 @@ def Microsoft : DiagGroup<"microsoft",
MicrosoftFlexibleArray, MicrosoftExtraQualification, MicrosoftCast,
MicrosoftConstInit, MicrosoftVoidPseudoDtor, MicrosoftAnonTag,
MicrosoftCommentPaste, MicrosoftEndOfFile, MicrosoftStaticAssert,
+ MicrosoftInitFromPredefined, MicrosoftStringLiteralFromPredefined,
MicrosoftInconsistentDllImport]>;
def ClangClPch : DiagGroup<"clang-cl-pch">;
@@ -1185,6 +1301,9 @@ def ASM : DiagGroup<"asm", [
ASMOperandWidths
]>;
+// Linker warnings.
+def LinkerWarnings : DiagGroup<"linker-warnings">;
+
// OpenMP warnings.
def SourceUsesOpenMP : DiagGroup<"source-uses-openmp">;
def OpenMPClauses : DiagGroup<"openmp-clauses">;
@@ -1193,15 +1312,41 @@ def OpenMPMapping : DiagGroup<"openmp-mapping">;
def OpenMPTarget : DiagGroup<"openmp-target", [OpenMPMapping]>;
def OpenMPPre51Compat : DiagGroup<"pre-openmp-51-compat">;
def OpenMP51Ext : DiagGroup<"openmp-51-extensions">;
+def OpenMPExtensions : DiagGroup<"openmp-extensions">;
+def OpenMPTargetException : DiagGroup<"openmp-target-exception">;
def OpenMP : DiagGroup<"openmp", [
SourceUsesOpenMP, OpenMPClauses, OpenMPLoopForm, OpenMPTarget,
- OpenMPMapping, OpenMP51Ext
+ OpenMPMapping, OpenMP51Ext, OpenMPExtensions, OpenMPTargetException
]>;
+// OpenACC warnings.
+def SourceUsesOpenACC : DiagGroup<"source-uses-openacc">;
+def OpenACC : DiagGroup<"openacc", [SourceUsesOpenACC]>;
+
// Backend warnings.
def BackendInlineAsm : DiagGroup<"inline-asm">;
def BackendSourceMgr : DiagGroup<"source-mgr">;
-def BackendFrameLargerThan : DiagGroup<"frame-larger-than">;
+def BackendFrameLargerThan : DiagGroup<"frame-larger-than">{
+ code Documentation = [{
+More fine grained information about the stack layout is available by adding the
+`-Rpass-analysis=stack-frame-layout` command-line flag to the compiler
+invocation.
+
+The diagnostic information can be saved to a file in a machine readable format,
+like YAML by adding the `-foptimization-record-file=<file>` command-line flag.
+
+Results can be filtered by function name by passing
+`-mllvm -filter-print-funcs=foo`, where `foo` is the target function's name.
+
+ .. code-block:: console
+
+ clang -c a.cpp -Rpass-analysis=stack-frame-layout -mllvm -filter-print-funcs=foo
+
+ .. code-block:: console
+
+ clang -c a.cpp -Rpass-analysis=stack-frame-layout -foptimization-record-file=<file>
+}];
+}
// Compatibility flag name from old versions of Clang.
def : DiagGroup<"frame-larger-than=", [BackendFrameLargerThan]>;
def BackendPlugin : DiagGroup<"backend-plugin">;
@@ -1210,11 +1355,13 @@ def BackendOptimizationRemark : DiagGroup<"pass">;
def BackendOptimizationRemarkMissed : DiagGroup<"pass-missed">;
def BackendOptimizationRemarkAnalysis : DiagGroup<"pass-analysis">;
def BackendOptimizationFailure : DiagGroup<"pass-failed">;
+def BackendWarningAttributes : DiagGroup<"attribute-warning">;
// Instrumentation based profiling warnings.
def ProfileInstrMissing : DiagGroup<"profile-instr-missing">;
def ProfileInstrOutOfDate : DiagGroup<"profile-instr-out-of-date">;
def ProfileInstrUnprofiled : DiagGroup<"profile-instr-unprofiled">;
+def MisExpect : DiagGroup<"misexpect">;
// AddressSanitizer frontend instrumentation remarks.
def SanitizeAddressRemarks : DiagGroup<"sanitize-address">;
@@ -1233,6 +1380,12 @@ def CudaUnknownVersion: DiagGroup<"unknown-cuda-version">;
// ignored by CUDA.
def HIPOnly : DiagGroup<"hip-only">;
+// Warning about mixed HIP and OpenMP compilation / target offloading.
+def HIPOpenMPOffloading: DiagGroup<"hip-omp-target-directives">;
+
+// Warning about multiple GPUs are detected.
+def MultiGPU: DiagGroup<"multi-gpu">;
+
// Warnings which cause linking of the runtime libraries like
// libc and the CRT to be skipped.
def AVRRtlibLinkingQuirks : DiagGroup<"avr-rtlib-linking-quirks">;
@@ -1246,16 +1399,24 @@ def OptionIgnored : DiagGroup<"option-ignored">;
def UnknownArgument : DiagGroup<"unknown-argument">;
+def UnsupportedABI : DiagGroup<"unsupported-abi">;
+
// A warning group for warnings about code that clang accepts when
-// compiling OpenCL C/C++ but which is not compatible with the SPIR spec.
+// compiling OpenCL C/C++ but which is not compatible with the SPIR(-V) spec.
def SpirCompat : DiagGroup<"spir-compat">;
+def : DiagGroup<"spirv-compat", [SpirCompat]>; // Alias.
// Warning for the GlobalISel options.
def GlobalISel : DiagGroup<"global-isel">;
+// A warning group for the GNU extension to allow mixed specifier types for
+// target-clones multiversioning.
+def TargetClonesMixedSpecifiers : DiagGroup<"target-clones-mixed-specifiers">;
+
// A warning group specifically for warnings related to function
// multiversioning.
-def FunctionMultiVersioning : DiagGroup<"function-multiversion">;
+def FunctionMultiVersioning
+ : DiagGroup<"function-multiversion", [TargetClonesMixedSpecifiers]>;
def NoDeref : DiagGroup<"noderef">;
@@ -1264,7 +1425,7 @@ def CrossTU : DiagGroup<"ctu">;
def CTADMaybeUnsupported : DiagGroup<"ctad-maybe-unsupported">;
-def FortifySource : DiagGroup<"fortify-source">;
+def FortifySource : DiagGroup<"fortify-source", [FormatOverflow, FormatTruncation]>;
def MaxTokens : DiagGroup<"max-tokens"> {
code Documentation = [{
@@ -1274,19 +1435,22 @@ the token limit, which can be set in three ways:
1. As a limit at a specific point in a file, using the ``clang max_tokens_here``
pragma:
- .. code-block: c++
+ .. code-block:: c++
+
#pragma clang max_tokens_here 1234
2. As a per-translation unit limit, using the ``-fmax-tokens=`` command-line
flag:
- .. code-block: console
+ .. code-block:: console
+
clang -c a.cpp -fmax-tokens=1234
3. As a per-translation unit limit using the ``clang max_tokens_total`` pragma,
which works like and overrides the ``-fmax-tokens=`` flag:
- .. code-block: c++
+ .. code-block:: c++
+
#pragma clang max_tokens_total 1234
These limits can be helpful in limiting code growth through included files.
@@ -1303,3 +1467,27 @@ def WebAssemblyExceptionSpec : DiagGroup<"wasm-exception-spec">;
def RTTI : DiagGroup<"rtti">;
def OpenCLCoreFeaturesDiagGroup : DiagGroup<"pedantic-core-features">;
+
+// Warnings and extensions to make preprocessor macro usage pedantic.
+def PedanticMacros : DiagGroup<"pedantic-macros",
+ [DeprecatedPragma,
+ MacroRedefined,
+ BuiltinMacroRedefined,
+ RestrictExpansionMacro,
+ FinalMacro]>;
+
+def BranchProtection : DiagGroup<"branch-protection">;
+
+// HLSL diagnostic groups
+// Warnings for HLSL Clang extensions
+def HLSLExtension : DiagGroup<"hlsl-extensions">;
+
+// Warnings for DXIL validation
+def DXILValidation : DiagGroup<"dxil-validation">;
+
+// Warnings and notes related to const_var_decl_type attribute checks
+def ReadOnlyPlacementChecks : DiagGroup<"read-only-types">;
+
+// Warnings and fixes to support the "safe buffers" programming model.
+def UnsafeBufferUsageInContainer : DiagGroup<"unsafe-buffer-usage-in-container">;
+def UnsafeBufferUsage : DiagGroup<"unsafe-buffer-usage", [UnsafeBufferUsageInContainer]>;
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticIDs.h b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticIDs.h
index 288504def5eb..0cdda42793f6 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticIDs.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticIDs.h
@@ -17,6 +17,7 @@
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/IntrusiveRefCntPtr.h"
#include "llvm/ADT/StringRef.h"
+#include <optional>
#include <vector>
namespace clang {
@@ -25,15 +26,17 @@ namespace clang {
// Import the diagnostic enums themselves.
namespace diag {
+ enum class Group;
+
// Size of each of the diagnostic categories.
enum {
DIAG_SIZE_COMMON = 300,
- DIAG_SIZE_DRIVER = 250,
+ DIAG_SIZE_DRIVER = 400,
DIAG_SIZE_FRONTEND = 150,
DIAG_SIZE_SERIALIZATION = 120,
DIAG_SIZE_LEX = 400,
- DIAG_SIZE_PARSE = 600,
- DIAG_SIZE_AST = 250,
+ DIAG_SIZE_PARSE = 700,
+ DIAG_SIZE_AST = 300,
DIAG_SIZE_COMMENT = 100,
DIAG_SIZE_CROSSTU = 100,
DIAG_SIZE_SEMA = 4500,
@@ -43,18 +46,18 @@ namespace clang {
// Start position for diagnostics.
enum {
DIAG_START_COMMON = 0,
- DIAG_START_DRIVER = DIAG_START_COMMON + DIAG_SIZE_COMMON,
- DIAG_START_FRONTEND = DIAG_START_DRIVER + DIAG_SIZE_DRIVER,
- DIAG_START_SERIALIZATION = DIAG_START_FRONTEND + DIAG_SIZE_FRONTEND,
- DIAG_START_LEX = DIAG_START_SERIALIZATION + DIAG_SIZE_SERIALIZATION,
- DIAG_START_PARSE = DIAG_START_LEX + DIAG_SIZE_LEX,
- DIAG_START_AST = DIAG_START_PARSE + DIAG_SIZE_PARSE,
- DIAG_START_COMMENT = DIAG_START_AST + DIAG_SIZE_AST,
- DIAG_START_CROSSTU = DIAG_START_COMMENT + DIAG_SIZE_COMMENT,
- DIAG_START_SEMA = DIAG_START_CROSSTU + DIAG_SIZE_CROSSTU,
- DIAG_START_ANALYSIS = DIAG_START_SEMA + DIAG_SIZE_SEMA,
- DIAG_START_REFACTORING = DIAG_START_ANALYSIS + DIAG_SIZE_ANALYSIS,
- DIAG_UPPER_LIMIT = DIAG_START_REFACTORING + DIAG_SIZE_REFACTORING
+ DIAG_START_DRIVER = DIAG_START_COMMON + static_cast<int>(DIAG_SIZE_COMMON),
+ DIAG_START_FRONTEND = DIAG_START_DRIVER + static_cast<int>(DIAG_SIZE_DRIVER),
+ DIAG_START_SERIALIZATION = DIAG_START_FRONTEND + static_cast<int>(DIAG_SIZE_FRONTEND),
+ DIAG_START_LEX = DIAG_START_SERIALIZATION + static_cast<int>(DIAG_SIZE_SERIALIZATION),
+ DIAG_START_PARSE = DIAG_START_LEX + static_cast<int>(DIAG_SIZE_LEX),
+ DIAG_START_AST = DIAG_START_PARSE + static_cast<int>(DIAG_SIZE_PARSE),
+ DIAG_START_COMMENT = DIAG_START_AST + static_cast<int>(DIAG_SIZE_AST),
+ DIAG_START_CROSSTU = DIAG_START_COMMENT + static_cast<int>(DIAG_SIZE_COMMENT),
+ DIAG_START_SEMA = DIAG_START_CROSSTU + static_cast<int>(DIAG_SIZE_CROSSTU),
+ DIAG_START_ANALYSIS = DIAG_START_SEMA + static_cast<int>(DIAG_SIZE_SEMA),
+ DIAG_START_REFACTORING = DIAG_START_ANALYSIS + static_cast<int>(DIAG_SIZE_ANALYSIS),
+ DIAG_UPPER_LIMIT = DIAG_START_REFACTORING + static_cast<int>(DIAG_SIZE_REFACTORING)
};
class CustomDiagInfo;
@@ -65,7 +68,7 @@ namespace clang {
// Get typedefs for common diagnostics.
enum {
#define DIAG(ENUM, FLAGS, DEFAULT_MAPPING, DESC, GROUP, SFINAE, CATEGORY, \
- NOWERROR, SHOWINSYSHEADER, DEFFERABLE) \
+ NOWERROR, SHOWINSYSHEADER, SHOWINSYSMACRO, DEFFERABLE) \
ENUM,
#define COMMONSTART
#include "clang/Basic/DiagnosticCommonKinds.inc"
@@ -97,11 +100,17 @@ namespace clang {
}
class DiagnosticMapping {
+ LLVM_PREFERRED_TYPE(diag::Severity)
unsigned Severity : 3;
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsUser : 1;
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsPragma : 1;
+ LLVM_PREFERRED_TYPE(bool)
unsigned HasNoWarningAsError : 1;
+ LLVM_PREFERRED_TYPE(bool)
unsigned HasNoErrorAsFatal : 1;
+ LLVM_PREFERRED_TYPE(bool)
unsigned WasUpgradedFromWarning : 1;
public:
@@ -156,6 +165,10 @@ public:
Result.Severity = Bits & 0x7;
return Result;
}
+
+ bool operator==(DiagnosticMapping Other) const {
+ return serialize() == Other.serialize();
+ }
};
/// Used for handling and querying diagnostic IDs.
@@ -205,6 +218,9 @@ public:
/// default.
static bool isDefaultMappingAsError(unsigned DiagID);
+ /// Get the default mapping for this diagnostic.
+ static DiagnosticMapping getDefaultMapping(unsigned DiagID);
+
/// Determine whether the given built-in diagnostic ID is a Note.
static bool isBuiltinNote(unsigned DiagID);
@@ -224,6 +240,21 @@ public:
///
static bool isBuiltinExtensionDiag(unsigned DiagID, bool &EnabledByDefault);
+ /// Given a group ID, returns the flag that toggles the group.
+ /// For example, for Group::DeprecatedDeclarations, returns
+ /// "deprecated-declarations".
+ static StringRef getWarningOptionForGroup(diag::Group);
+
+ /// Given a diagnostic group ID, return its documentation.
+ static StringRef getWarningOptionDocumentation(diag::Group GroupID);
+
+ /// Given a group ID, returns the flag that toggles the group.
+ /// For example, for "deprecated-declarations", returns
+ /// Group::DeprecatedDeclarations.
+ static std::optional<diag::Group> getGroupForWarningOption(StringRef);
+
+ /// Return the lowest-level group that contains the specified diagnostic.
+ static std::optional<diag::Group> getGroupForDiag(unsigned DiagID);
/// Return the lowest-level warning option that enables the specified
/// diagnostic.
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticLex.h b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticLex.h
index 7a3128de3b82..5f237085ae03 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticLex.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticLex.h
@@ -15,7 +15,7 @@ namespace clang {
namespace diag {
enum {
#define DIAG(ENUM, FLAGS, DEFAULT_MAPPING, DESC, GROUP, SFINAE, NOWERROR, \
- SHOWINSYSHEADER, DEFERRABLE, CATEGORY) \
+ SHOWINSYSHEADER, SHOWINSYSMACRO, DEFERRABLE, CATEGORY) \
ENUM,
#define LEXSTART
#include "clang/Basic/DiagnosticLexKinds.inc"
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticLexKinds.td b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticLexKinds.td
index bdf5d263fa92..75ca2fa16d34 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticLexKinds.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticLexKinds.td
@@ -80,6 +80,10 @@ def warn_cxx11_keyword : Warning<"'%0' is a keyword in C++11">,
InGroup<CXX11Compat>, DefaultIgnore;
def warn_cxx20_keyword : Warning<"'%0' is a keyword in C++20">,
InGroup<CXX20Compat>, DefaultIgnore;
+def warn_c99_keyword : Warning<"'%0' is a keyword in C99">,
+ InGroup<C99Compat>, DefaultIgnore;
+def warn_c23_keyword : Warning<"'%0' is a keyword in C23">,
+ InGroup<C23Compat>, DefaultIgnore;
def ext_unterminated_char_or_string : ExtWarn<
"missing terminating %select{'|'\"'}0 character">, InGroup<InvalidPPToken>;
@@ -113,25 +117,61 @@ def warn_four_char_character_literal : Warning<
// Unicode and UCNs
def err_invalid_utf8 : Error<
"source file is not valid UTF-8">;
-def err_non_ascii : Error<
- "non-ASCII characters are not allowed outside of literals and identifiers">;
+def warn_invalid_utf8_in_comment : Extension<
+ "invalid UTF-8 in comment">, InGroup<DiagGroup<"invalid-utf8">>;
+def err_character_not_allowed : Error<
+ "unexpected character <U+%0>">;
+def err_character_not_allowed_identifier : Error<
+ "character <U+%0> not allowed %select{in|at the start of}1 an identifier">;
def ext_unicode_whitespace : ExtWarn<
"treating Unicode character as whitespace">,
InGroup<DiagGroup<"unicode-whitespace">>;
def warn_utf8_symbol_homoglyph : Warning<
- "treating Unicode character <U+%0> as identifier character rather than "
+ "treating Unicode character <U+%0> as an identifier character rather than "
"as '%1' symbol">, InGroup<DiagGroup<"unicode-homoglyph">>;
def warn_utf8_symbol_zero_width : Warning<
"identifier contains Unicode character <U+%0> that is invisible in "
"some environments">, InGroup<DiagGroup<"unicode-zero-width">>;
-
+def ext_mathematical_notation : ExtWarn<
+ "mathematical notation character <U+%0> in an identifier is a Clang extension">,
+ InGroup<DiagGroup<"mathematical-notation-identifier-extension">>;
+
+def ext_delimited_escape_sequence : Extension<
+ "%select{delimited|named}0 escape sequences are a "
+ "%select{Clang|C++23}1 extension">,
+ InGroup<DiagGroup<"delimited-escape-sequence-extension">>;
+
+def warn_cxx23_delimited_escape_sequence : Warning<
+ "%select{delimited|named}0 escape sequences are "
+ "incompatible with C++ standards before C++23">,
+ InGroup<CXXPre23Compat>, DefaultIgnore;
+
+def err_delimited_escape_empty : Error<
+ "delimited escape sequence cannot be empty">;
+def err_delimited_escape_missing_brace: Error<
+ "expected '{' after '\\%0' escape sequence">;
+def err_delimited_escape_invalid : Error<
+ "invalid digit '%0' in escape sequence">;
def err_hex_escape_no_digits : Error<
"\\%0 used with no following hex digits">;
+def err_invalid_ucn_name : Error<
+ "'%0' is not a valid Unicode character name">;
+def note_invalid_ucn_name_loose_matching : Note<
+ "characters names in Unicode escape sequences are sensitive to case and whitespaces">;
+def note_invalid_ucn_name_candidate : Note<
+ "did you mean %0 ('%2' U+%1)?">;
+
def warn_ucn_escape_no_digits : Warning<
"\\%0 used with no following hex digits; "
"treating as '\\' followed by identifier">, InGroup<Unicode>;
def err_ucn_escape_incomplete : Error<
"incomplete universal character name">;
+def warn_delimited_ucn_incomplete : Warning<
+ "incomplete delimited universal character name; "
+ "treating as '\\' '%0' '{' identifier">, InGroup<Unicode>;
+def warn_delimited_ucn_empty : Warning<
+ "empty delimited universal character name; "
+ "treating as '\\' '%0' '{' '}'">, InGroup<Unicode>;
def warn_ucn_escape_incomplete : Warning<
"incomplete universal character name; "
"treating as '\\' followed by identifier">, InGroup<Unicode>;
@@ -150,9 +190,6 @@ def warn_c99_compat_unicode_id : Warning<
"%select{using this character in an identifier|starting an identifier with "
"this character}0 is incompatible with C99">,
InGroup<C99Compat>, DefaultIgnore;
-def warn_cxx98_compat_unicode_id : Warning<
- "using this character in an identifier is incompatible with C++98">,
- InGroup<CXX98Compat>, DefaultIgnore;
def warn_cxx98_compat_literal_ucn_escape_basic_scs : Warning<
"specifying character '%0' with a universal character name "
@@ -160,6 +197,14 @@ def warn_cxx98_compat_literal_ucn_escape_basic_scs : Warning<
def warn_cxx98_compat_literal_ucn_control_character : Warning<
"universal character name referring to a control character "
"is incompatible with C++98">, InGroup<CXX98Compat>, DefaultIgnore;
+def warn_c23_compat_literal_ucn_escape_basic_scs : Warning<
+ "specifying character '%0' with a universal character name is "
+ "incompatible with C standards before C23">,
+ InGroup<CPre23Compat>, DefaultIgnore;
+def warn_c23_compat_literal_ucn_control_character : Warning<
+ "universal character name referring to a control character "
+ "is incompatible with C standards before C23">,
+ InGroup<CPre23Compat>, DefaultIgnore;
def warn_ucn_not_valid_in_c89 : Warning<
"universal character names are only valid in C99 or C++; "
"treating as '\\' followed by identifier">, InGroup<Unicode>;
@@ -179,17 +224,15 @@ def err_invalid_suffix_constant : Error<
def warn_cxx11_compat_digit_separator : Warning<
"digit separators are incompatible with C++ standards before C++14">,
InGroup<CXXPre14Compat>, DefaultIgnore;
-def warn_c2x_compat_digit_separator : Warning<
- "digit separators are incompatible with C standards before C2x">,
- InGroup<CPre2xCompat>, DefaultIgnore;
+def warn_c23_compat_digit_separator : Warning<
+ "digit separators are incompatible with C standards before C23">,
+ InGroup<CPre23Compat>, DefaultIgnore;
def err_digit_separator_not_between_digits : Error<
"digit separator cannot appear at %select{start|end}0 of digit sequence">;
-def warn_extraneous_char_constant : Warning<
- "extraneous characters in character constant ignored">;
def warn_char_constant_too_large : Warning<
"character constant too long for its type">;
-def err_multichar_utf_character_literal : Error<
- "Unicode character literals may not contain multiple characters">;
+def err_multichar_character_literal : Error<
+ "%select{wide|Unicode}0 character literals may not contain multiple characters">;
def err_exponent_has_no_digits : Error<"exponent has no digits">;
def err_hex_constant_requires : Error<
"hexadecimal floating %select{constant|literal}0 requires "
@@ -241,6 +284,17 @@ def ext_ms_reserved_user_defined_literal : ExtWarn<
"identifier">, InGroup<ReservedUserDefinedLiteral>;
def err_unsupported_string_concat : Error<
"unsupported non-standard concatenation of string literals">;
+
+def warn_unevaluated_string_prefix : Warning<
+ "encoding prefix '%0' on an unevaluated string literal has no effect"
+ "%select{| and is incompatible with c++2c}1">,
+ InGroup<DiagGroup<"invalid-unevaluated-string">>;
+def err_unevaluated_string_prefix : Error<
+ "an unevaluated string literal cannot have an encoding prefix">;
+def err_unevaluated_string_udl : Error<
+ "an unevaluated string literal cannot be a user-defined literal">;
+def err_unevaluated_string_invalid_escape_sequence : Error<
+ "invalid escape sequence '%0' in an unevaluated string literal">;
def err_string_concat_mixed_suffix : Error<
"differing user-defined suffixes ('%0' and '%1') in string literal "
"concatenation">;
@@ -257,7 +311,9 @@ def err_bad_character_encoding : Error<
def warn_bad_character_encoding : ExtWarn<
"illegal character encoding in character literal">,
InGroup<InvalidSourceEncoding>;
-def err_lexing_string : Error<"failure when lexing a string">;
+def err_lexing_string : Error<"failure when lexing a string literal">;
+def err_lexing_char : Error<"failure when lexing a character literal">;
+def err_lexing_numeric : Error<"failure when lexing a numeric literal">;
def err_placeholder_in_source : Error<"editor placeholder in source file">;
//===----------------------------------------------------------------------===//
@@ -301,11 +357,9 @@ def pp_pragma_sysheader_in_main_file : Warning<
"#pragma system_header ignored in main file">,
InGroup<DiagGroup<"pragma-system-header-outside-header">>;
-def err_pragma_include_instead_not_sysheader : Error<
- "'#pragma clang include_instead' cannot be used outside of system headers">;
-def err_pragma_include_instead_system_reserved : Error<
- "header '%0' is an implementation detail; #include %select{'%2'|either '%2' "
- "or '%3'|one of %2}1 instead">;
+def err_illegal_use_of_flt_eval_macro : Error<
+ "'__FLT_EVAL_METHOD__' cannot be expanded inside a scope containing "
+ "'#pragma clang fp eval_method'">;
def pp_poisoning_existing_macro : Warning<"poisoning existing macro">;
def pp_out_of_date_dependency : Warning<
@@ -359,7 +413,15 @@ def ext_pp_include_search_ms : ExtWarn<
def ext_pp_ident_directive : Extension<"#ident is a language extension">;
def ext_pp_include_next_directive : Extension<
"#include_next is a language extension">, InGroup<GNUIncludeNext>;
-def ext_pp_warning_directive : Extension<"#warning is a language extension">;
+
+def ext_pp_warning_directive : Extension<
+ "#warning is a %select{C23|C++23}0 extension">;
+def warn_cxx23_compat_warning_directive : Warning<
+ "#warning is incompatible with C++ standards before C++23">,
+ InGroup<CXXPre23Compat>, DefaultIgnore;
+def warn_c23_compat_warning_directive : Warning<
+ "#warning is incompatible with C standards before C23">,
+ InGroup<CPre23Compat>, DefaultIgnore;
def ext_pp_extra_tokens_at_eol : ExtWarn<
"extra tokens at end of #%0 directive">, InGroup<ExtraTokens>;
@@ -400,6 +462,10 @@ def ext_embedded_directive : Extension<
def ext_missing_varargs_arg : Extension<
"must specify at least one argument for '...' parameter of variadic macro">,
InGroup<GNUZeroVariadicMacroArguments>;
+def warn_cxx17_compat_missing_varargs_arg : Warning<
+ "passing no argument for the '...' parameter of a variadic macro is "
+ "incompatible with C++ standards before C++20">,
+ InGroup<CXXPre20Compat>, DefaultIgnore;
def ext_empty_fnmacro_arg : Extension<
"empty macro arguments are a C99 feature">, InGroup<C99>;
def warn_cxx98_compat_empty_fnmacro_arg : Warning<
@@ -411,7 +477,13 @@ def note_macro_expansion_here : Note<"expansion of macro %0 requested here">;
def ext_pp_opencl_variadic_macros : Extension<
"variadic macros are a Clang extension in OpenCL">;
-def err_pp_invalid_directive : Error<"invalid preprocessing directive">;
+def ext_pp_gnu_line_directive : Extension<
+ "this style of line directive is a GNU extension">,
+ InGroup<GNULineMarker>;
+def err_pp_invalid_directive : Error<
+ "invalid preprocessing directive%select{|, did you mean '#%1'?}0">;
+def warn_pp_invalid_directive : Warning<
+ err_pp_invalid_directive.Summary>, InGroup<DiagGroup<"unknown-directives">>;
def err_pp_directive_required : Error<
"%0 must be used within a preprocessing directive">;
def err_pp_file_not_found : Error<"'%0' file not found">, DefaultFatal;
@@ -430,6 +502,9 @@ def warn_pp_hdrstop_filename_ignored : Warning<
"#pragma hdrstop filename not supported, "
"/Fp can be used to specify precompiled header filename">,
InGroup<ClangClPch>;
+def remark_pp_search_path_usage : Remark<
+ "search path used: '%0'">,
+ InGroup<UsedSearchPath>;
def err_pp_file_not_found_angled_include_not_fatal : Error<
"'%0' file not found with <angled> %select{include|import}1; "
"use \"quotes\" instead">;
@@ -526,6 +601,27 @@ def warn_pragma_warning_expected_number :
ExtWarn<"#pragma warning expected a warning number">,
InGroup<UnknownPragmas>;
+// - #pragma deprecated(...)
+def warn_pragma_deprecated_macro_use :
+ ExtWarn<"macro %0 has been marked as deprecated%select{|: %2}1">,
+ InGroup<DeprecatedPragma>;
+
+// - #pragma clang restrict_expansion(...)
+def warn_pragma_restrict_expansion_macro_use :
+ ExtWarn<"macro %0 has been marked as unsafe for use in headers"
+ "%select{|: %2}1">,
+ InGroup<RestrictExpansionMacro>;
+
+// - Note for macro annotations.
+def note_pp_macro_annotation :
+ Note<"macro marked '%select{deprecated|restrict_expansion|final}0' here">;
+
+// - #pragma clang final(...)
+def warn_pragma_final_macro :
+ ExtWarn<"macro %0 has been marked as final and should not be "
+ "%select{undefined|redefined}1">,
+ InGroup<FinalMacro>, ShowInSystemHeader;
+
// - #pragma execution_character_set(...)
def warn_pragma_exec_charset_expected :
ExtWarn<"#pragma execution_character_set expected '%0'">,
@@ -574,10 +670,10 @@ def warn_pragma_diagnostic_unknown_warning :
ExtWarn<"unknown warning group '%0', ignored">,
InGroup<UnknownWarningOption>;
// - #pragma __debug
+def warn_pragma_debug_missing_command : Warning<
+ "missing debug command">, InGroup<IgnoredPragmas>;
def warn_pragma_debug_unexpected_command : Warning<
"unexpected debug command '%0'">, InGroup<IgnoredPragmas>;
-def warn_pragma_debug_missing_argument : Warning<
- "missing argument to debug command '%0'">, InGroup<IgnoredPragmas>;
def warn_pragma_debug_unknown_module : Warning<
"unknown module '%0'">, InGroup<IgnoredPragmas>;
// #pragma module
@@ -626,7 +722,7 @@ def ext_pp_bad_paste_ms : ExtWarn<
def err_pp_operator_used_as_macro_name : Error<
"C++ operator %0 (aka %1) used as a macro name">;
def ext_pp_operator_used_as_macro_name : Extension<
- err_pp_operator_used_as_macro_name.Text>, InGroup<MicrosoftCppMacro>;
+ err_pp_operator_used_as_macro_name.Summary>, InGroup<MicrosoftCppMacro>;
def err_pp_illegal_floating_literal : Error<
"floating point literal in preprocessor expression">;
def err_pp_line_requires_integer : Error<
@@ -654,6 +750,23 @@ def warn_cxx98_compat_pp_line_too_big : Warning<
"#line number greater than 32767 is incompatible with C++98">,
InGroup<CXX98CompatPedantic>, DefaultIgnore;
+def warn_c23_compat_pp_directive : Warning<
+ "use of a '#%select{<BUG IF SEEN>|elifdef|elifndef}0' directive "
+ "is incompatible with C standards before C23">,
+ InGroup<CPre23Compat>, DefaultIgnore;
+def ext_c23_pp_directive : ExtWarn<
+ "use of a '#%select{<BUG IF SEEN>|elifdef|elifndef}0' directive "
+ "is a C23 extension">,
+ InGroup<C23>;
+def warn_cxx23_compat_pp_directive : Warning<
+ "use of a '#%select{<BUG IF SEEN>|elifdef|elifndef}0' directive "
+ "is incompatible with C++ standards before C++23">,
+ InGroup<CXXPre23Compat>, DefaultIgnore;
+def ext_cxx23_pp_directive : ExtWarn<
+ "use of a '#%select{<BUG IF SEEN>|elifdef|elifndef}0' directive "
+ "is a C++23 extension">,
+ InGroup<CXX23>;
+
def err_pp_visibility_non_macro : Error<"no macro named %0">;
def err_pp_arc_cf_code_audited_syntax : Error<"expected 'begin' or 'end'">;
@@ -771,10 +884,13 @@ def warn_quoted_include_in_framework_header : Warning<
def warn_framework_include_private_from_public : Warning<
"public framework header includes private framework header '%0'"
>, InGroup<FrameworkIncludePrivateFromPublic>;
+def warn_deprecated_module_dot_map : Warning<
+ "'%0' as a module map name is deprecated, rename it to %select{module.modulemap|module.private.modulemap}1%select{| in the 'Modules' directory of the framework}2">,
+ InGroup<DeprecatedModuleDotMap>;
-def warn_auto_module_import : Warning<
+def remark_pp_include_directive_modular_translation : Remark<
"treating #%select{include|import|include_next|__include_macros}0 as an "
- "import of module '%1'">, InGroup<AutoImport>, DefaultIgnore;
+ "import of module '%1'">, InGroup<ModuleIncludeDirectiveTranslation>;
def note_implicit_top_level_module_import_here : Note<
"submodule of top-level module '%0' implicitly imported here">;
def warn_uncovered_module_header : Warning<
@@ -790,6 +906,8 @@ def warn_use_of_private_header_outside_module : Warning<
InGroup<DiagGroup<"private-header">>, DefaultError;
def err_undeclared_use_of_module : Error<
"module %0 does not depend on a module exporting '%1'">;
+def err_undeclared_use_of_module_indirect : Error<
+ "module %0 does not directly depend on a module exporting '%1', which is part of indirectly-used module %2">;
def warn_non_modular_include_in_framework_module : Warning<
"include of non-modular header inside framework module '%0': '%1'">,
InGroup<NonModularIncludeInFrameworkModule>, DefaultIgnore;
@@ -807,6 +925,11 @@ def err_header_import_semi_in_macro : Error<
def err_header_import_not_header_unit : Error<
"header file %0 (aka '%1') cannot be imported because "
"it is not known to be a header unit">;
+def warn_pp_include_angled_in_module_purview : Warning<
+ "'#include <filename>' attaches the declarations to the named module '%0'"
+ ", which is not usually intended; consider moving that directive before "
+ "the module declaration">,
+ InGroup<DiagGroup<"include-angled-in-module-purview">>;
def warn_header_guard : Warning<
"%0 is used as a header guard here, followed by #define of a different macro">,
@@ -836,13 +959,24 @@ def err_pp_eof_in_assume_nonnull : Error<
}
-let CategoryName = "Dependency Directive Source Minimization Issue" in {
+let CategoryName = "Dependency Directive Source Scanner Issue" in {
-def err_dep_source_minimizer_missing_sema_after_at_import : Error<
+def err_dep_source_scanner_missing_semi_after_at_import : Error<
"could not find ';' after @import">;
-def err_dep_source_minimizer_unexpected_tokens_at_import : Error<
+def err_dep_source_scanner_unexpected_tokens_at_import : Error<
"unexpected extra tokens at end of @import declaration">;
}
+def err_pp_double_begin_pragma_unsafe_buffer_usage :
+Error<"already inside '#pragma unsafe_buffer_usage'">;
+
+def err_pp_unmatched_end_begin_pragma_unsafe_buffer_usage :
+Error<"not currently inside '#pragma unsafe_buffer_usage'">;
+
+def err_pp_unclosed_pragma_unsafe_buffer_usage :
+Error<"'#pragma unsafe_buffer_usage' was not ended">;
+
+def err_pp_pragma_unsafe_buffer_usage_syntax :
+Error<"Expected 'begin' or 'end'">;
}
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticOptions.def b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticOptions.def
index 7be81f6b6a95..6d0c1b14acc1 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticOptions.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticOptions.def
@@ -90,11 +90,15 @@ VALUE_DIAGOPT(ConstexprBacktraceLimit, 32, DefaultConstexprBacktraceLimit)
VALUE_DIAGOPT(SpellCheckingLimit, 32, DefaultSpellCheckingLimit)
/// Limit number of lines shown in a snippet.
VALUE_DIAGOPT(SnippetLineLimit, 32, DefaultSnippetLineLimit)
+/// Show line number column on the left of snippets.
+VALUE_DIAGOPT(ShowLineNumbers, 1, DefaultShowLineNumbers)
VALUE_DIAGOPT(TabStop, 32, DefaultTabStop) /// The distance between tab stops.
/// Column limit for formatting message diagnostics, or 0 if unused.
VALUE_DIAGOPT(MessageLength, 32, 0)
+DIAGOPT(ShowSafeBufferUsageSuggestions, 1, 0)
+
#undef DIAGOPT
#undef ENUM_DIAGOPT
#undef VALUE_DIAGOPT
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticOptions.h b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticOptions.h
index 17533b38ff5f..099982c3bdd5 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticOptions.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticOptions.h
@@ -46,20 +46,20 @@ enum class DiagnosticLevelMask : unsigned {
};
inline DiagnosticLevelMask operator~(DiagnosticLevelMask M) {
- using UT = std::underlying_type<DiagnosticLevelMask>::type;
+ using UT = std::underlying_type_t<DiagnosticLevelMask>;
return static_cast<DiagnosticLevelMask>(~static_cast<UT>(M));
}
inline DiagnosticLevelMask operator|(DiagnosticLevelMask LHS,
DiagnosticLevelMask RHS) {
- using UT = std::underlying_type<DiagnosticLevelMask>::type;
+ using UT = std::underlying_type_t<DiagnosticLevelMask>;
return static_cast<DiagnosticLevelMask>(
static_cast<UT>(LHS) | static_cast<UT>(RHS));
}
inline DiagnosticLevelMask operator&(DiagnosticLevelMask LHS,
DiagnosticLevelMask RHS) {
- using UT = std::underlying_type<DiagnosticLevelMask>::type;
+ using UT = std::underlying_type_t<DiagnosticLevelMask>;
return static_cast<DiagnosticLevelMask>(
static_cast<UT>(LHS) & static_cast<UT>(RHS));
}
@@ -72,9 +72,10 @@ class DiagnosticOptions : public RefCountedBase<DiagnosticOptions>{
clang::DiagnosticsEngine *, bool);
friend class CompilerInvocation;
+ friend class CompilerInvocationBase;
public:
- enum TextDiagnosticFormat { Clang, MSVC, Vi };
+ enum TextDiagnosticFormat { Clang, MSVC, Vi, SARIF };
// Default values.
enum {
@@ -84,7 +85,8 @@ public:
DefaultTemplateBacktraceLimit = 10,
DefaultConstexprBacktraceLimit = 10,
DefaultSpellCheckingLimit = 50,
- DefaultSnippetLineLimit = 1,
+ DefaultSnippetLineLimit = 16,
+ DefaultShowLineNumbers = 1,
};
// Define simple diagnostic options (with no accessors).
@@ -122,6 +124,10 @@ public:
/// default).
std::vector<std::string> VerifyPrefixes;
+ /// The list of -Wsystem-header-in-module=... options used to override
+ /// whether -Wsystem-headers is enabled on a per-module basis.
+ std::vector<std::string> SystemHeaderWarningsModules;
+
public:
// Define accessors/mutators for diagnostic options of enumeration type.
#define DIAGOPT(Name, Bits, Default)
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticParse.h b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticParse.h
index d066d3f71a25..81a8185d25fb 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticParse.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticParse.h
@@ -15,7 +15,7 @@ namespace clang {
namespace diag {
enum {
#define DIAG(ENUM, FLAGS, DEFAULT_MAPPING, DESC, GROUP, SFINAE, NOWERROR, \
- SHOWINSYSHEADER, DEFERRABLE, CATEGORY) \
+ SHOWINSYSHEADER, SHOWINSYSMACRO, DEFERRABLE, CATEGORY) \
ENUM,
#define PARSESTART
#include "clang/Basic/DiagnosticParseKinds.inc"
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticParseKinds.td b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticParseKinds.td
index 7e4b0841e06b..a30ab27566ec 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticParseKinds.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticParseKinds.td
@@ -157,11 +157,17 @@ def err_duplicate_default_assoc : Error<
"duplicate default generic association">;
def note_previous_default_assoc : Note<
"previous default generic association is here">;
+def ext_generic_with_type_arg : Extension<
+ "passing a type argument as the first operand to '_Generic' is a Clang "
+ "extension">, InGroup<DiagGroup<"generic-type-extension">>;
def ext_c99_feature : Extension<
"'%0' is a C99 extension">, InGroup<C99>;
def ext_c11_feature : Extension<
"'%0' is a C11 extension">, InGroup<C11>;
+def warn_c23_compat_keyword : Warning<
+ "'%0' is incompatible with C standards before C23">,
+ InGroup<CPre23Compat>, DefaultIgnore;
def err_c11_noreturn_misplaced : Error<
"'_Noreturn' keyword must precede function declarator">;
@@ -174,10 +180,11 @@ def err_stmtexpr_file_scope : Error<
"statement expression not allowed at file scope">;
def ext_gnu_statement_expr : Extension<
"use of GNU statement expression extension">, InGroup<GNUStatementExpression>;
+def ext_gnu_statement_expr_macro : Extension<
+ "use of GNU statement expression extension from macro expansion">,
+ InGroup<GNUStatementExpressionFromMacroExpansion>;
def ext_gnu_conditional_expr : Extension<
"use of GNU ?: conditional expression extension, omitting middle operand">, InGroup<GNUConditionalOmittedOperand>;
-def ext_gnu_empty_initializer : Extension<
- "use of GNU empty initializer extension">, InGroup<GNUEmptyInitializer>;
def ext_gnu_array_range : Extension<"use of GNU array range extension">,
InGroup<GNUDesignator>;
def ext_gnu_missing_equal_designator : ExtWarn<
@@ -279,7 +286,7 @@ def err_inline_nested_namespace_definition : Error<
def err_expected_semi_after_attribute_list : Error<
"expected ';' after attribute list">;
def err_expected_semi_after_static_assert : Error<
- "expected ';' after static_assert">;
+ "expected ';' after '%0'">;
def err_expected_semi_for : Error<"expected ';' in 'for' statement specifier">;
def err_single_decl_assign_in_for_range : Error<
"range-based 'for' statement uses ':', not '='">;
@@ -292,8 +299,24 @@ def note_missing_selector_name : Note<
def note_force_empty_selector_name : Note<
"or insert whitespace before ':' to use %0 as parameter name "
"and have an empty entry in the selector">;
-def err_label_end_of_compound_statement : Error<
- "label at end of compound statement: expected statement">;
+def ext_c_label_followed_by_declaration : ExtWarn<
+ "label followed by a declaration is a C23 extension">,
+ InGroup<C23>;
+def warn_c23_compat_label_followed_by_declaration : Warning<
+ "label followed by a declaration is incompatible with C standards before "
+ "C23">, InGroup<CPre23Compat>, DefaultIgnore;
+def ext_c_label_end_of_compound_statement : ExtWarn<
+ "label at end of compound statement is a C23 extension">,
+ InGroup<C23>;
+def ext_cxx_label_end_of_compound_statement : ExtWarn<
+ "label at end of compound statement is a C++23 extension">,
+ InGroup<CXX23>;
+def warn_c23_compat_label_end_of_compound_statement : Warning<
+ "label at end of compound statement is incompatible with C standards before C23">,
+ InGroup<CPre23Compat>, DefaultIgnore;
+def warn_cxx20_compat_label_end_of_compound_statement : Warning<
+ "label at end of compound statement is incompatible with C++ standards before C++23">,
+ InGroup<CXXPre23Compat>, DefaultIgnore;
def err_address_of_label_outside_fn : Error<
"use of address-of-label extension outside of a function body">;
def err_asm_operand_wide_string_literal : Error<
@@ -422,7 +445,7 @@ def err_unexpected_token_in_nested_name_spec : Error<
def err_bool_redeclaration : Error<
"redeclaration of C++ built-in type 'bool'">;
def warn_cxx98_compat_static_assert : Warning<
- "static_assert declarations are incompatible with C++98">,
+ "'static_assert' declarations are incompatible with C++98">,
InGroup<CXX98Compat>, DefaultIgnore;
def ext_ms_static_assert : ExtWarn<
"use of 'static_assert' without inclusion of <assert.h> is a Microsoft "
@@ -430,15 +453,15 @@ def ext_ms_static_assert : ExtWarn<
def ext_cxx_static_assert_no_message : ExtWarn<
"'static_assert' with no message is a C++17 extension">, InGroup<CXX17>;
def ext_c_static_assert_no_message : ExtWarn<
- "'_Static_assert' with no message is a C2x extension">, InGroup<C2x>;
+ "'_Static_assert' with no message is a C23 extension">, InGroup<C23>;
def warn_cxx14_compat_static_assert_no_message : Warning<
"'static_assert' with no message is incompatible with C++ standards before "
"C++17">,
DefaultIgnore, InGroup<CXXPre17Compat>;
def warn_c17_compat_static_assert_no_message : Warning<
"'_Static_assert' with no message is incompatible with C standards before "
- "C2x">,
- DefaultIgnore, InGroup<CPre2xCompat>;
+ "C23">,
+ DefaultIgnore, InGroup<CPre23Compat>;
def err_function_definition_not_allowed : Error<
"function definition is not allowed here">;
def err_expected_end_of_enumerator : Error<
@@ -535,6 +558,8 @@ def err_invalid_operator_on_type : Error<
"cannot use %select{dot|arrow}0 operator on a type">;
def err_expected_unqualified_id : Error<
"expected %select{identifier|unqualified-id}0">;
+def err_while_loop_outside_of_a_function : Error<
+ "while loop outside of a function">;
def err_brackets_go_after_unqualified_id : Error<
"brackets are not allowed here; to declare an array, "
"place the brackets after the %select{identifier|name}0">;
@@ -549,6 +574,12 @@ def err_expected_init_in_condition_lparen : Error<
"variable declaration in condition cannot have a parenthesized initializer">;
def err_extraneous_rparen_in_condition : Error<
"extraneous ')' after condition, expected a statement">;
+def ext_alias_in_init_statement : ExtWarn<
+ "alias declaration in this context is a C++23 extension">,
+ InGroup<CXX23>;
+def warn_cxx20_alias_in_init_statement : Warning<
+ "alias declaration in this context is incompatible with C++ standards before C++23">,
+ DefaultIgnore, InGroup<CXXPre23Compat>;
def warn_dangling_else : Warning<
"add explicit braces to avoid dangling else">,
InGroup<DanglingElse>;
@@ -586,6 +617,9 @@ def warn_cxx17_compat_using_enum_declaration : Warning<
def ext_using_enum_declaration : ExtWarn<
"using enum declaration is a C++20 extension">,
InGroup<CXX20>;
+def err_using_enum_expect_identifier : Error<
+ "using enum %select{requires an enum or typedef name|"
+ "does not permit an elaborated enum specifier}0">;
def err_constructor_bad_name : Error<
"missing return type for function %0; did you mean the constructor name %1?">;
def err_destructor_tilde_identifier : Error<
@@ -626,6 +660,13 @@ def ext_constexpr_if : ExtWarn<
def warn_cxx14_compat_constexpr_if : Warning<
"constexpr if is incompatible with C++ standards before C++17">,
DefaultIgnore, InGroup<CXXPre17Compat>;
+def ext_consteval_if : ExtWarn<
+ "consteval if is a C++23 extension">,
+ InGroup<CXX23>;
+def warn_cxx20_compat_consteval_if : Warning<
+ "consteval if is incompatible with C++ standards before C++23">,
+ InGroup<CXXPre23Compat>, DefaultIgnore;
+
def ext_init_statement : ExtWarn<
"'%select{if|switch}0' initialization statements are a C++17 extension">,
InGroup<CXX17>;
@@ -668,6 +709,8 @@ def warn_cxx98_compat_noexcept_expr : Warning<
InGroup<CXX98Compat>, DefaultIgnore;
def warn_cxx98_compat_nullptr : Warning<
"'nullptr' is incompatible with C++98">, InGroup<CXX98Compat>, DefaultIgnore;
+def ext_c_nullptr : Extension<
+ "'nullptr' is a C23 extension">, InGroup<C23>;
def warn_wrong_clang_attr_namespace : Warning<
"'__clang__' is a predefined macro name, not an attribute scope specifier; "
@@ -682,8 +725,17 @@ def warn_cxx14_compat_ns_enum_attribute : Warning<
def warn_cxx98_compat_alignas : Warning<"'alignas' is incompatible with C++98">,
InGroup<CXX98Compat>, DefaultIgnore;
def warn_cxx98_compat_attribute : Warning<
- "C++11 attribute syntax is incompatible with C++98">,
+ "[[]] attributes are incompatible with C++ standards before C++11">,
InGroup<CXX98Compat>, DefaultIgnore;
+def warn_ext_cxx11_attributes : Extension<
+ "[[]] attributes are a C++11 extension">,
+ InGroup<CXX11>;
+def warn_pre_c23_compat_attributes : Warning<
+ "[[]] attributes are incompatible with C standards before C23">,
+ DefaultIgnore, InGroup<CPre23Compat>;
+def warn_ext_c23_attributes : Extension<
+ "[[]] attributes are a C23 extension">,
+ InGroup<C23>;
def err_cxx11_attribute_forbids_arguments : Error<
"attribute %0 cannot have an argument list">;
def err_attribute_requires_arguments : Error<
@@ -699,13 +751,17 @@ def ext_using_attribute_ns : ExtWarn<
def err_using_attribute_ns_conflict : Error<
"attribute with scope specifier cannot follow default scope specifier">;
def err_attributes_not_allowed : Error<"an attribute list cannot appear here">;
+def err_keyword_not_allowed : Error<"%0 cannot appear here">;
def ext_cxx11_attr_placement : ExtWarn<
- "ISO C++ does not allow an attribute list to appear here">,
+ "ISO C++ does not allow %select{an attribute list|%0}1 to appear here">,
InGroup<DiagGroup<"cxx-attribute-extension">>;
def err_attributes_misplaced : Error<"misplaced attributes; expected attributes here">;
+def err_keyword_misplaced : Error<"misplaced %0; expected %0 here">;
def err_l_square_l_square_not_attribute : Error<
"C++11 only allows consecutive left square brackets when "
"introducing an attribute">;
+def err_attribute_argument_parm_pack_not_supported : Error<
+ "attribute %0 does not support argument pack expansion">;
def err_ms_declspec_type : Error<
"__declspec attributes must be an identifier or string literal">;
def err_ms_property_no_getter_or_putter : Error<
@@ -742,8 +798,9 @@ def err_unknown_template_name : Error<
"unknown template name %0">;
def err_expected_comma_greater : Error<
"expected ',' or '>' in template-parameter-list">;
-def err_class_on_template_template_param : Error<
- "template template parameter requires 'class' after the parameter list">;
+def err_class_on_template_template_param
+ : Error<"template template parameter requires 'class'%select{| or "
+ "'typename'}0 after the parameter list">;
def ext_template_template_param_typename : ExtWarn<
"template template parameter using 'typename' is a C++17 extension">,
InGroup<CXX17>;
@@ -754,7 +811,8 @@ def warn_cxx14_compat_template_template_param_typename : Warning<
def err_template_spec_syntax_non_template : Error<
"identifier followed by '<' indicates a class template specialization but "
"%0 %select{does not refer to a template|refers to a function template|"
- "<unused>|refers to a variable template|<unused>|refers to a concept}1">;
+ "<unused>|refers to a variable template|<unused>|<unused>|"
+ "refers to a concept}1">;
def err_id_after_template_in_nested_name_spec : Error<
"expected template name after 'template' keyword in nested name specifier">;
def err_unexpected_template_in_unqualified_id : Error<
@@ -806,10 +864,10 @@ def err_requires_expr_expected_type_constraint : Error<
def err_requires_expr_simple_requirement_noexcept : Error<
"'noexcept' can only be used in a compound requirement (with '{' '}' around "
"the expression)">;
-def warn_requires_expr_in_simple_requirement : Warning<
- "this requires expression will only be checked for syntactic validity; did "
+def err_requires_expr_in_simple_requirement : Error<
+ "requires expression in requirement body; did "
"you intend to place it in a nested requirement? (add another 'requires' "
- "before the expression)">, InGroup<DiagGroup<"requires-expression">>;
+ "before the expression)">;
def err_missing_dependent_template_keyword : Error<
"use 'template' keyword to treat '%0' as a dependent template name">;
@@ -944,6 +1002,9 @@ def err_duplicate_class_virt_specifier : Error<
def err_duplicate_virt_specifier : Error<
"class member already marked '%0'">;
+def err_virt_specifier_outside_class : Error<
+ "'%0' specifier is not allowed outside a class definition">;
+
def err_expected_parameter_pack : Error<
"expected the name of a parameter pack">;
def err_paren_sizeof_parameter_pack : Error<
@@ -963,7 +1024,8 @@ def warn_cxx98_compat_lambda : Warning<
"lambda expressions are incompatible with C++98">,
InGroup<CXX98Compat>, DefaultIgnore;
def err_lambda_decl_specifier_repeated : Error<
- "%select{'mutable'|'constexpr'|'consteval'}0 cannot appear multiple times in a lambda declarator">;
+ "%select{'mutable'|'static'|'constexpr'|'consteval'}0 cannot "
+ "appear multiple times in a lambda declarator">;
def err_lambda_capture_misplaced_ellipsis : Error<
"ellipsis in pack %select{|init-}0capture must appear %select{after|before}0 "
"the name of the capture">;
@@ -972,14 +1034,15 @@ def err_lambda_capture_multiple_ellipses : Error<
def err_capture_default_first : Error<
"capture default must be first">;
def ext_decl_attrs_on_lambda : ExtWarn<
- "an attribute specifier sequence in this position is a C++2b extension">,
- InGroup<CXX2b>;
+ "%select{an attribute specifier sequence|%0}1 in this position "
+ "is a C++23 extension">, InGroup<CXX23AttrsOnLambda>;
def ext_lambda_missing_parens : ExtWarn<
- "lambda without a parameter clause is a C++2b extension">,
- InGroup<CXX2b>;
+ "lambda without a parameter clause is a C++23 extension">,
+ InGroup<CXX23>;
def warn_cxx20_compat_decl_attrs_on_lambda : Warning<
- "an attribute specifier sequence in this position is incompatible with C++ "
- "standards before C++2b">, InGroup<CXXPre2bCompat>, DefaultIgnore;
+ "%select{an attribute specifier sequence|%1}0 in this position "
+ "is incompatible with C++ standards before C++23">,
+ InGroup<CXXPre23Compat>, DefaultIgnore;
// C++17 lambda expressions
def err_expected_star_this_capture : Error<
@@ -1002,6 +1065,17 @@ def warn_cxx17_compat_lambda_template_parameter_list: Warning<
def err_lambda_template_parameter_list_empty : Error<
"lambda template parameter list cannot be empty">;
+// C++23 static lambdas
+def err_static_lambda: ExtWarn<
+ "static lambdas are a C++23 extension">, InGroup<CXX23>;
+def warn_cxx20_compat_static_lambda : Warning<
+ "static lambdas are incompatible with C++ standards before C++23">,
+ InGroup<CXXPre23Compat>, DefaultIgnore;
+def err_static_mutable_lambda : Error<
+ "lambda cannot be both mutable and static">;
+def err_static_lambda_captures : Error<
+ "a static lambda cannot have any captures">;
+
// Availability attribute
def err_expected_version : Error<
"expected a version of the form 'major[.minor[.subminor]]'">;
@@ -1046,7 +1120,7 @@ def err_availability_query_repeated_star : Error<
// External source symbol attribute
def err_external_source_symbol_expected_keyword : Error<
- "expected 'language', 'defined_in', or 'generated_declaration'">;
+ "expected 'language', 'defined_in', 'generated_declaration', or 'USR'">;
def err_external_source_symbol_duplicate_clause : Error<
"duplicate %0 clause in an 'external_source_symbol' attribute">;
@@ -1102,6 +1176,9 @@ def warn_pragma_expected_integer : Warning<
def warn_pragma_ms_struct : Warning<
"incorrect use of '#pragma ms_struct on|off' - ignored">,
InGroup<IgnoredPragmas>;
+def warn_pragma_ms_fenv_access : Warning<
+ "incorrect use of '#pragma fenv_access (on|off)' - ignored">,
+ InGroup<IgnoredPragmas>;
def warn_pragma_extra_tokens_at_eol : Warning<
"extra tokens at end of '#pragma %0' - ignored">,
InGroup<IgnoredPragmas>;
@@ -1144,10 +1221,6 @@ def warn_pragma_pack_malformed : Warning<
def warn_pragma_intrinsic_builtin : Warning<
"%0 is not a recognized builtin%select{|; consider including <intrin.h> to access non-builtin intrinsics}1">,
InGroup<IgnoredPragmaIntrinsic>;
-// - #pragma optimize
-def warn_pragma_optimize : Warning<
- "'#pragma optimize' is not supported">,
- InGroup<IgnoredPragmaOptimize>;
// - #pragma unused
def warn_pragma_unused_expected_var : Warning<
"expected '#pragma unused' argument to be a variable name">,
@@ -1167,9 +1240,6 @@ def ext_stdc_pragma_ignored : ExtWarn<"unknown pragma in STDC namespace">,
// The C standard 7.6.1p2 says "The [FENV_ACCESS] pragma shall occur either
// outside external declarations or preceding all explicit declarations and
// statements inside a compound statement.
-def err_pragma_stdc_fenv_access_scope : Error<
- "'#pragma STDC FENV_ACCESS' can only appear at file scope or at the start of"
- " a compound statement">;
def warn_stdc_fenv_round_not_supported :
Warning<"pragma STDC FENV_ROUND is not supported">,
InGroup<UnknownPragmas>;
@@ -1218,8 +1288,6 @@ def err_pragma_attribute_extra_tokens_after_attribute : Error<
"extra tokens after attribute in a '#pragma clang attribute push'">;
def err_pragma_attribute_unsupported_attribute : Error<
"attribute %0 is not supported by '#pragma clang attribute'">;
-def err_pragma_attribute_multiple_attributes : Error<
- "more than one attribute specified in '#pragma clang attribute push'">;
def err_pragma_attribute_expected_attribute_syntax : Error<
"expected an attribute that is specified using the GNU, C++11 or '__declspec'"
" syntax">;
@@ -1248,6 +1316,13 @@ def err_pragma_attribute_namespace_on_attribute : Error<
def note_pragma_attribute_namespace_on_attribute : Note<
"omit the namespace to add attributes to the most-recently"
" pushed attribute group">;
+def warn_no_support_for_eval_method_source_on_m32 : Warning<
+ "Setting the floating point evaluation method to `source` on a target"
+ " without SSE is not supported.">, InGroup<Pragmas>;
+// - #pragma __debug
+def warn_pragma_debug_dependent_argument : Warning<
+ "%select{value|type}0-dependent expression passed as an argument to debug "
+ "command">, InGroup<IgnoredPragmas>;
// OpenCL EXTENSION pragma (OpenCL 1.1 [9.1])
def warn_pragma_expected_colon : Warning<
@@ -1255,12 +1330,13 @@ def warn_pragma_expected_colon : Warning<
def warn_pragma_expected_predicate : Warning<
"expected %select{'enable', 'disable', 'begin' or 'end'|'disable'}0 - ignoring">, InGroup<IgnoredPragmas>;
def warn_pragma_unknown_extension : Warning<
- "unknown OpenCL extension %0 - ignoring">, InGroup<IgnoredPragmas>;
+ "OpenCL extension %0 unknown or does not require pragma - ignoring">, InGroup<IgnoredPragmas>;
def warn_pragma_unsupported_extension : Warning<
"unsupported OpenCL extension %0 - ignoring">, InGroup<IgnoredPragmas>;
def warn_pragma_extension_is_core : Warning<
"OpenCL extension %0 is core feature or supported optional core feature - ignoring">,
InGroup<OpenCLCoreFeaturesDiagGroup>, DefaultIgnore;
+def err_modifier_expected_colon : Error<"missing ':' after %0 modifier">;
// OpenCL errors.
def err_opencl_taking_function_address_parser : Error<
@@ -1272,12 +1348,44 @@ def err_opencl_logical_exclusive_or : Error<
def err_openclcxx_virtual_function : Error<
"virtual functions are not supported in C++ for OpenCL">;
+// OpenACC Support.
+def warn_pragma_acc_ignored
+ : Warning<"unexpected '#pragma acc ...' in program">,
+ InGroup<SourceUsesOpenACC>,
+ DefaultIgnore;
+def err_acc_unexpected_directive
+ : Error<"unexpected OpenACC directive %select{|'#pragma acc %1'}0">;
+def warn_pragma_acc_unimplemented
+ : Warning<"OpenACC directives not yet implemented, pragma ignored">,
+ InGroup<SourceUsesOpenACC>;
+def err_acc_invalid_directive
+ : Error<"invalid OpenACC directive %select{%1|'%1 %2'}0">;
+def err_acc_invalid_clause : Error<"invalid OpenACC clause %0">;
+def err_acc_missing_directive : Error<"expected OpenACC directive">;
+def err_acc_invalid_open_paren
+ : Error<"expected clause-list or newline in OpenACC directive">;
+def err_acc_invalid_default_clause_kind
+ : Error<"invalid value for 'default' clause; expected 'present' or 'none'">;
+def err_acc_invalid_tag_kind
+ : Error<"invalid tag %0 on '%1' %select{directive|clause}2">;
+def err_acc_expected_reduction_operator
+ : Error<"missing reduction operator, expected '+', '*', 'max', 'min', '&', "
+ "'|', '^', '&&', or '||', follwed by a ':'">;
+def err_acc_invalid_reduction_operator
+ : Error<"invalid reduction operator, expected '+', '*', 'max', 'min', "
+ "'&', '|', '^', '&&', or '||'">;
+def err_acc_incorrect_bind_arg : Error<"expected identifier or string literal">;
+
// OpenMP support.
def warn_pragma_omp_ignored : Warning<
"unexpected '#pragma omp ...' in program">, InGroup<SourceUsesOpenMP>, DefaultIgnore;
def warn_omp_extra_tokens_at_eol : Warning<
"extra tokens at the end of '#pragma omp %0' are ignored">,
InGroup<ExtraTokens>;
+def err_omp_multiple_step_or_linear_modifier : Error<
+ "multiple %select{'step size'|'linear modifier'}0 found in linear clause">;
+def err_omp_deprecate_old_syntax: Error<
+ "old syntax '%0' on '%1' clause was deprecated, use new syntax '%2'">;
def warn_pragma_expected_colon_r_paren : Warning<
"missing ':' or ')' after %0 - ignoring">, InGroup<IgnoredPragmas>;
def err_omp_unknown_directive : Error<
@@ -1286,8 +1394,12 @@ def err_omp_unexpected_directive : Error<
"unexpected OpenMP directive %select{|'#pragma omp %1'}0">;
def err_omp_expected_punc : Error<
"expected ',' or ')' in '%0' %select{clause|directive}1">;
+def warn_clause_expected_string : Warning<
+ "expected string literal in 'clause %0' - ignoring">, InGroup<IgnoredPragmas>;
def err_omp_unexpected_clause : Error<
"unexpected OpenMP clause '%0' in directive '#pragma omp %1'">;
+def err_omp_unexpected_clause_extension_only : Error<
+ "OpenMP clause '%0' is only available as extension, use '-fopenmp-extensions'">;
def err_omp_immediate_directive : Error<
"'#pragma omp %0' %select{|with '%2' clause }1cannot be an immediate substatement">;
def err_omp_expected_identifier_for_critical : Error<
@@ -1300,17 +1412,21 @@ def err_omp_expected_punc_after_iterator : Error<
"expected ',' or ')' after iterator specifier">;
def err_omp_decl_in_declare_simd_variant : Error<
"function declaration is expected after 'declare %select{simd|variant}0' directive">;
+def err_omp_sink_and_source_iteration_not_allowd: Error<" '%0 %select{sink:|source:}1' must be with '%select{omp_cur_iteration - 1|omp_cur_iteration}1'">;
def err_omp_unknown_map_type : Error<
"incorrect map type, expected one of 'to', 'from', 'tofrom', 'alloc', 'release', or 'delete'">;
def err_omp_unknown_map_type_modifier : Error<
- "incorrect map type modifier, expected 'always', 'close', "
- "%select{or 'mapper'|'mapper', or 'present'}0">;
+ "incorrect map type modifier, expected one of: 'always', 'close', 'mapper'"
+ "%select{|, 'present'|, 'present', 'iterator'}0%select{|, 'ompx_hold'}1">;
def err_omp_map_type_missing : Error<
"missing map type">;
def err_omp_map_type_modifier_missing : Error<
"missing map type modifier">;
def err_omp_declare_simd_inbranch_notinbranch : Error<
"unexpected '%0' clause, '%1' is specified already">;
+def err_omp_expected_clause_argument
+ : Error<"expected '%0' clause with an argument on '#pragma omp %1' "
+ "construct">;
def err_expected_end_declare_target_or_variant : Error<
"expected '#pragma omp end declare %select{target|variant}0'">;
def err_expected_begin_declare_variant
@@ -1327,23 +1443,40 @@ def warn_omp_unknown_assumption_clause_without_args
def note_omp_assumption_clause_continue_here
: Note<"the ignored tokens spans until here">;
def err_omp_declare_target_unexpected_clause: Error<
- "unexpected '%0' clause, only %select{'device_type'|'to' or 'link'|'to', 'link' or 'device_type'}1 clauses expected">;
+ "unexpected '%0' clause, only %select{'device_type'|'to' or 'link'|'to', 'link' or 'device_type'|'device_type', 'indirect'|'to', 'link', 'device_type' or 'indirect'}1 clauses expected">;
+def err_omp_declare_target_unexpected_clause_52: Error<
+ "unexpected '%0' clause, only %select{'device_type'|'enter' or 'link'|'enter', 'link' or 'device_type'|'device_type', 'indirect'|'enter', 'link', 'device_type' or 'indirect'}1 clauses expected">;
def err_omp_begin_declare_target_unexpected_implicit_to_clause: Error<
"unexpected '(', only 'to', 'link' or 'device_type' clauses expected for 'begin declare target' directive">;
-def err_omp_declare_target_unexpected_clause_after_implicit_to: Error<
+def err_omp_declare_target_wrong_clause_after_implicit_to: Error<
"unexpected clause after an implicit 'to' clause">;
+def err_omp_declare_target_wrong_clause_after_implicit_enter: Error<
+ "unexpected clause after an implicit 'enter' clause">;
def err_omp_declare_target_missing_to_or_link_clause: Error<
- "expected at least one 'to' or 'link' clause">;
+ "expected at least one %select{'to' or 'link'|'to', 'link' or 'indirect'}0 clause">;
+def err_omp_declare_target_missing_enter_or_link_clause: Error<
+ "expected at least one %select{'enter' or 'link'|'enter', 'link' or 'indirect'}0 clause">;
+def err_omp_declare_target_unexpected_to_clause: Error<
+ "unexpected 'to' clause, use 'enter' instead">;
+def err_omp_declare_target_unexpected_enter_clause: Error<
+ "unexpected 'enter' clause, use 'to' instead">;
def err_omp_declare_target_multiple : Error<
"%0 appears multiple times in clauses on the same declare target directive">;
+def err_omp_declare_target_indirect_device_type: Error<
+ "only 'device_type(any)' clause is allowed with indirect clause">;
def err_omp_expected_clause: Error<
"expected at least one clause on '#pragma omp %0' directive">;
def err_omp_mapper_illegal_identifier : Error<
"illegal OpenMP user-defined mapper identifier">;
def err_omp_mapper_expected_declarator : Error<
"expected declarator on 'omp declare mapper' directive">;
+def err_omp_unexpected_append_op : Error<
+ "unexpected operation specified in 'append_args' clause, expected 'interop'">;
+def err_omp_unexpected_execution_modifier : Error<
+ "unexpected 'execution' modifier in non-executable context">;
def err_omp_declare_variant_wrong_clause : Error<
- "expected '%0' clause on 'omp declare variant' directive">;
+ "expected %select{'match'|'match', 'adjust_args', or 'append_args'}0 clause "
+ "on 'omp declare variant' directive">;
def err_omp_declare_variant_duplicate_nested_trait : Error<
"nested OpenMP context selector contains duplicated trait '%0'"
" in selector '%1' and set '%2' with different score">;
@@ -1354,11 +1487,13 @@ def warn_omp_declare_variant_string_literal_or_identifier
"%select{set|selector|property}0; "
"%select{set|selector|property}0 skipped">,
InGroup<OpenMPClauses>;
-def warn_unknown_begin_declare_variant_isa_trait
+def warn_unknown_declare_variant_isa_trait
: Warning<"isa trait '%0' is not known to the current target; verify the "
"spelling or consider restricting the context selector with the "
"'arch' selector further">,
InGroup<SourceUsesOpenMP>;
+def note_ompx_bare_clause : Note<
+ "OpenMP extension clause '%0' only allowed with '#pragma omp %1'">;
def note_omp_declare_variant_ctx_options
: Note<"context %select{set|selector|property}0 options are: %1">;
def warn_omp_declare_variant_expected
@@ -1436,6 +1571,21 @@ def warn_omp51_compat_attributes : Warning<
"specifying OpenMP directives with [[]] is incompatible with OpenMP "
"standards before OpenMP 5.1">,
InGroup<OpenMPPre51Compat>, DefaultIgnore;
+def err_omp_expected_colon : Error<"missing ':' in %0">;
+def err_omp_missing_comma : Error< "missing ',' after %0">;
+def err_omp_expected_context_selector
+ : Error<"expected valid context selector in %0">;
+def err_omp_requires_out_inout_depend_type : Error<
+ "reserved locator 'omp_all_memory' requires 'out' or 'inout' "
+ "dependency types">;
+def warn_omp_more_one_omp_all_memory : Warning<
+ "reserved locator 'omp_all_memory' cannot be specified more than once">,
+ InGroup<OpenMPClauses>;
+def warn_omp_depend_in_ordered_deprecated : Warning<"'depend' clause for"
+ " 'ordered' is deprecated; use 'doacross' instead">, InGroup<Deprecated>;
+def warn_omp_invalid_attribute_for_ompx_attributes : Warning<"'ompx_attribute' clause only allows "
+ "'amdgpu_flat_work_group_size', 'amdgpu_waves_per_eu', and 'launch_bounds'; "
+ "%0 is ignored">, InGroup<OpenMPExtensions>;
// Pragma loop support.
def err_pragma_loop_missing_argument : Error<
@@ -1453,26 +1603,31 @@ def note_pragma_loop_invalid_vectorize_option : Note<
"vectorize_width(X, scalable) where X is an integer, or vectorize_width('fixed' or 'scalable')">;
def err_pragma_fp_invalid_option : Error<
- "%select{invalid|missing}0 option%select{ %1|}0; expected 'contract', 'reassociate' or 'exceptions'">;
+ "%select{invalid|missing}0 option%select{ %1|}0; expected 'contract', 'reassociate', 'reciprocal', or 'exceptions'">;
def err_pragma_fp_invalid_argument : Error<
"unexpected argument '%0' to '#pragma clang fp %1'; expected "
"%select{"
"'fast' or 'on' or 'off'|"
"'on' or 'off'|"
- "'ignore', 'maytrap' or 'strict'}2">;
+ "'on' or 'off'|"
+ "'ignore', 'maytrap' or 'strict'|"
+ "'source', 'double' or 'extended'}2">;
def err_pragma_invalid_keyword : Error<
"invalid argument; expected 'enable'%select{|, 'full'}0%select{|, 'assume_safety'}1 or 'disable'">;
def err_pragma_pipeline_invalid_keyword : Error<
"invalid argument; expected 'disable'">;
+// API notes.
+def err_type_unparsed : Error<"unparsed tokens following type">;
+
// Pragma unroll support.
def warn_pragma_unroll_cuda_value_in_parens : Warning<
"argument to '#pragma unroll' should not be in parentheses in CUDA C/C++">,
InGroup<CudaCompat>;
def warn_cuda_attr_lambda_position : Warning<
- "nvcc does not allow '__%0__' to appear after '()' in lambdas">,
+ "nvcc does not allow '__%0__' to appear after the parameter list in lambdas">,
InGroup<CudaCompat>;
def warn_pragma_force_cuda_host_device_bad_arg : Warning<
"incorrect use of #pragma clang force_cuda_host_device begin|end">,
@@ -1480,6 +1635,12 @@ def warn_pragma_force_cuda_host_device_bad_arg : Warning<
def err_pragma_cannot_end_force_cuda_host_device : Error<
"force_cuda_host_device end pragma without matching "
"force_cuda_host_device begin">;
+
+def warn_ext_int_deprecated : Warning<
+ "'_ExtInt' is deprecated; use '_BitInt' instead">, InGroup<DeprecatedType>;
+def ext_bit_int : Extension<
+ "'_BitInt' in %select{C17 and earlier|C++}0 is a Clang extension">,
+ InGroup<DiagGroup<"bit-int-extension">>;
} // end of Parse Issue category.
let CategoryName = "Modules Issue" in {
@@ -1489,8 +1650,12 @@ def err_module_expected_ident : Error<
"expected a module name after '%select{module|import}0'">;
def err_attribute_not_module_attr : Error<
"%0 attribute cannot be applied to a module">;
+def err_keyword_not_module_attr : Error<
+ "%0 cannot be applied to a module">;
def err_attribute_not_import_attr : Error<
"%0 attribute cannot be applied to a module import">;
+def err_keyword_not_import_attr : Error<
+ "%0 cannot be applied to a module import">;
def err_module_expected_semi : Error<
"expected ';' after module name">;
def err_global_module_introducer_not_at_start : Error<
@@ -1502,7 +1667,13 @@ def err_private_module_fragment_expected_semi : Error<
"expected ';' after private module fragment declaration">;
def err_missing_before_module_end : Error<"expected %0 at end of module">;
def err_unsupported_module_partition : Error<
- "sorry, module partitions are not yet supported">;
+ "module partitions are only supported for C++20 onwards">;
+def err_import_not_allowed_here : Error<
+ "imports must immediately follow the module declaration">;
+def err_partition_import_outside_module : Error<
+ "module partition imports must be within a module purview">;
+def err_import_in_wrong_fragment : Error<
+ "module%select{| partition}0 imports cannot be in the %select{global|private}1 module fragment">;
def err_export_empty : Error<"export declaration cannot be empty">;
}
@@ -1525,14 +1696,16 @@ def note_meant_to_use_typename : Note<
let CategoryName = "Coroutines Issue" in {
def err_for_co_await_not_range_for : Error<
"'co_await' modifier can only be applied to range-based for loop">;
+def warn_deprecated_for_co_await : Warning<
+ "'for co_await' belongs to CoroutineTS instead of C++20, which is deprecated">,
+ InGroup<DeprecatedCoroutine>;
}
let CategoryName = "Concepts Issue" in {
def err_concept_definition_not_identifier : Error<
"name defined in concept definition must be an identifier">;
-def ext_concept_legacy_bool_keyword : ExtWarn<
- "ISO C++20 does not permit the 'bool' keyword after 'concept'">,
- InGroup<DiagGroup<"concepts-ts-compat">>;
+def err_concept_legacy_bool_keyword : Error<
+ "ISO C++ does not permit the 'bool' keyword after 'concept'">;
def err_placeholder_expected_auto_or_decltype_auto : Error<
"expected 'auto' or 'decltype(auto)' after concept name">;
}
@@ -1547,4 +1720,16 @@ def warn_max_tokens_total : Warning<
def note_max_tokens_total_override : Note<"total token limit set here">;
+// HLSL Parser Diagnostics
+
+def err_expected_semantic_identifier : Error<
+ "expected HLSL Semantic identifier">;
+def err_invalid_declaration_in_hlsl_buffer : Error<
+ "invalid declaration inside %select{tbuffer|cbuffer}0">;
+def err_unknown_hlsl_semantic : Error<"unknown HLSL semantic %0">;
+def err_hlsl_separate_attr_arg_and_number : Error<"wrong argument format for hlsl attribute, use %0 instead">;
+def ext_hlsl_access_specifiers : ExtWarn<
+ "access specifiers are a clang HLSL extension">,
+ InGroup<HLSLExtension>;
+
} // end of Parser diagnostics
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticRefactoring.h b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticRefactoring.h
index fc7564047a24..9b628dbeb7c2 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticRefactoring.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticRefactoring.h
@@ -15,7 +15,7 @@ namespace clang {
namespace diag {
enum {
#define DIAG(ENUM, FLAGS, DEFAULT_MAPPING, DESC, GROUP, SFINAE, NOWERROR, \
- SHOWINSYSHEADER, DEFERRABLE, CATEGORY) \
+ SHOWINSYSHEADER, SHOWINSYSMACRO, DEFERRABLE, CATEGORY) \
ENUM,
#define REFACTORINGSTART
#include "clang/Basic/DiagnosticRefactoringKinds.inc"
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticSema.h b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticSema.h
index 7323167aeee8..45014fe21271 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticSema.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticSema.h
@@ -15,7 +15,7 @@ namespace clang {
namespace diag {
enum {
#define DIAG(ENUM, FLAGS, DEFAULT_MAPPING, DESC, GROUP, SFINAE, NOWERROR, \
- SHOWINSYSHEADER, DEFERRABLE, CATEGORY) \
+ SHOWINSYSHEADER, SHOWINSYSMACRO, DEFERRABLE, CATEGORY) \
ENUM,
#define SEMASTART
#include "clang/Basic/DiagnosticSemaKinds.inc"
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticSemaKinds.td b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticSemaKinds.td
index c57b8eca7deb..07ba4ecf7e12 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticSemaKinds.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticSemaKinds.td
@@ -66,6 +66,7 @@ def warn_infinite_recursive_function : Warning<
def warn_comma_operator : Warning<"possible misuse of comma operator here">,
InGroup<DiagGroup<"comma">>, DefaultIgnore;
def note_cast_to_void : Note<"cast expression to void to silence warning">;
+def note_cast_operand_to_int : Note<"cast one or both operands to int to silence this warning">;
// Constant expressions
def err_expr_not_ice : Error<
@@ -82,11 +83,11 @@ def err_typecheck_converted_constant_expression_indirect : Error<
"bind reference to a temporary">;
def err_expr_not_cce : Error<
"%select{case value|enumerator value|non-type template argument|"
- "array size|explicit specifier argument}0 "
- "is not a constant expression">;
+ "array size|explicit specifier argument|noexcept specifier argument|"
+ "call to 'size()'|call to 'data()'}0 is not a constant expression">;
def ext_cce_narrowing : ExtWarn<
"%select{case value|enumerator value|non-type template argument|"
- "array size|explicit specifier argument}0 "
+ "array size|explicit specifier argument|noexcept specifier argument}0 "
"%select{cannot be narrowed from type %2 to %3|"
"evaluates to %2, which cannot be narrowed to type %3}1">,
InGroup<CXX11Narrowing>, DefaultError, SFINAEFailure;
@@ -111,12 +112,22 @@ def err_expr_not_string_literal : Error<"expression is not a string literal">;
def ext_predef_outside_function : Warning<
"predefined identifier is only valid inside function">,
InGroup<DiagGroup<"predefined-identifier-outside-function">>;
+def ext_init_from_predefined : ExtWarn<
+ "initializing an array from a '%0' predefined identifier is a Microsoft extension">,
+ InGroup<MicrosoftInitFromPredefined>;
+def ext_string_literal_from_predefined : ExtWarn<
+ "expansion of predefined identifier '%0' to a string literal is a Microsoft extension">,
+ InGroup<MicrosoftStringLiteralFromPredefined>;
def warn_float_overflow : Warning<
"magnitude of floating-point constant too large for type %0; maximum is %1">,
InGroup<LiteralRange>;
def warn_float_underflow : Warning<
"magnitude of floating-point constant too small for type %0; minimum is %1">,
InGroup<LiteralRange>;
+def warn_float_compare_literal : Warning<
+ "floating-point comparison is always %select{true|false}0; "
+ "constant cannot be represented exactly in type %1">,
+ InGroup<LiteralRange>;
def warn_double_const_requires_fp64 : Warning<
"double precision constant requires %select{cl_khr_fp64|cl_khr_fp64 and __opencl_c_fp64}0, "
"casting to single precision">;
@@ -126,6 +137,18 @@ def err_half_const_requires_fp16 : Error<
// C99 variable-length arrays
def ext_vla : Extension<"variable length arrays are a C99 feature">,
InGroup<VLAExtension>;
+// In C++ language modes, we warn by default as an extension, while in GNU++
+// language modes, we warn as an extension but add the warning group to -Wall.
+def ext_vla_cxx : ExtWarn<
+ "variable length arrays in C++ are a Clang extension">,
+ InGroup<VLACxxExtension>;
+def ext_vla_cxx_in_gnu_mode : Extension<ext_vla_cxx.Summary>,
+ InGroup<VLACxxExtension>;
+def ext_vla_cxx_static_assert : ExtWarn<
+ "variable length arrays in C++ are a Clang extension; did you mean to use "
+ "'static_assert'?">, InGroup<VLAUseStaticAssert>;
+def ext_vla_cxx_in_gnu_mode_static_assert : Extension<
+ ext_vla_cxx_static_assert.Summary>, InGroup<VLAUseStaticAssert>;
def warn_vla_used : Warning<"variable length array used">,
InGroup<VLA>, DefaultIgnore;
def err_vla_in_sfinae : Error<
@@ -142,7 +165,9 @@ def ext_vla_folded_to_constant : ExtWarn<
"variable length array folded to constant array as an extension">,
InGroup<GNUFoldingConstant>;
def err_vla_unsupported : Error<
- "variable length arrays are not supported for the current target">;
+ "variable length arrays are not supported for %select{the current target|'%1'}0">;
+def err_vla_in_coroutine_unsupported : Error<
+ "variable length arrays in a coroutine are not supported">;
def note_vla_unsupported : Note<
"variable length arrays are not supported for the current target">;
@@ -179,8 +204,10 @@ def err_designator_for_scalar_or_sizeless_init : Error<
def warn_initializer_overrides : Warning<
"initializer %select{partially |}0overrides prior initialization of "
"this subobject">, InGroup<InitializerOverrides>;
-def ext_initializer_overrides : ExtWarn<warn_initializer_overrides.Text>,
+def ext_initializer_overrides : ExtWarn<warn_initializer_overrides.Summary>,
InGroup<InitializerOverrides>, SFINAEFailure;
+def ext_initializer_union_overrides : ExtWarn<warn_initializer_overrides.Summary>,
+ InGroup<InitializerOverrides>, DefaultError, SFINAEFailure;
def err_initializer_overrides_destructed : Error<
"initializer would partially override prior initialization of object of "
"type %1 with non-trivial destruction">;
@@ -196,7 +223,8 @@ def ext_flexible_array_init : Extension<
// C++20 designated initializers
def ext_cxx_designated_init : Extension<
- "designated initializers are a C++20 extension">, InGroup<CXX20Designator>;
+ "designated initializers are a C++20 extension">, InGroup<CXX20Designator>,
+ SuppressInSystemMacro;
def warn_cxx17_compat_designated_init : Warning<
"designated initializers are incompatible with C++ standards before C++20">,
InGroup<CXXPre20CompatPedantic>, DefaultIgnore;
@@ -226,6 +254,10 @@ def ext_imaginary_constant : Extension<
"imaginary constants are a GNU extension">, InGroup<GNUImaginaryConstant>;
def ext_integer_complex : Extension<
"complex integer types are a GNU extension">, InGroup<GNUComplexInteger>;
+def ext_c23_auto_non_plain_identifier : Extension<
+ "type inference of a declaration other than a plain identifier with optional "
+ "trailing attributes is a Clang extension">,
+ InGroup<DiagGroup<"auto-decl-extensions">>;
def err_invalid_saturation_spec : Error<"'_Sat' specifier is only valid on "
"'_Fract' or '_Accum', not '%0'">;
@@ -267,10 +299,12 @@ def err_invalid_vector_double_decl_spec : Error <
def err_invalid_vector_bool_int128_decl_spec : Error <
"use of '__int128' with '__vector bool' requires VSX support enabled (on "
"POWER10 or later)">;
+def err_invalid_vector_int128_decl_spec : Error<
+ "use of '__int128' with '__vector' requires extended Altivec support"
+ " (available on POWER8 or later)">;
def err_invalid_vector_long_long_decl_spec : Error <
- "use of 'long long' with '__vector bool' requires VSX support (available on "
- "POWER7 or later) or extended Altivec support (available on POWER8 or later) "
- "to be enabled">;
+ "use of 'long long' with '__vector' requires VSX support (available on "
+ "POWER7 or later) to be enabled">;
def err_invalid_vector_long_double_decl_spec : Error<
"cannot use 'long double' with '__vector'">;
def warn_vector_long_decl_spec_combination : Warning<
@@ -284,9 +318,9 @@ def err_bad_parameter_name : Error<
"%0 cannot be the name of a parameter">;
def err_bad_parameter_name_template_id : Error<
"parameter name cannot have template arguments">;
-def ext_parameter_name_omitted_c2x : ExtWarn<
- "omitting the parameter name in a function definition is a C2x extension">,
- InGroup<C2x>;
+def ext_parameter_name_omitted_c23 : ExtWarn<
+ "omitting the parameter name in a function definition is a C23 extension">,
+ InGroup<C23>;
def err_anyx86_interrupt_attribute : Error<
"%select{x86|x86-64}0 'interrupt' attribute only applies to functions that "
"have %select{a 'void' return type|"
@@ -294,10 +328,12 @@ def err_anyx86_interrupt_attribute : Error<
"a pointer as the first parameter|a %2 type as the second parameter}1">;
def err_anyx86_interrupt_called : Error<
"interrupt service routine cannot be called directly">;
-def warn_anyx86_interrupt_regsave : Warning<
- "interrupt service routine should only call a function"
- " with attribute 'no_caller_saved_registers'">,
- InGroup<DiagGroup<"interrupt-service-routine">>;
+def warn_anyx86_excessive_regsave : Warning<
+ "%select{interrupt service routine|function with attribute"
+ " 'no_caller_saved_registers'}0 should only call a function"
+ " with attribute 'no_caller_saved_registers'"
+ " or be compiled with '-mgeneral-regs-only'">,
+ InGroup<DiagGroup<"excessive-regsave">>;
def warn_arm_interrupt_calling_convention : Warning<
"call to function without interrupt attribute could clobber interruptee's VFP registers">,
InGroup<Extra>;
@@ -388,10 +424,16 @@ def warn_reserved_extern_symbol: Warning<
"identifier %0 is reserved because %select{"
"<ERROR>|" // ReservedIdentifierStatus::NotReserved
"it starts with '_' at global scope|"
+ "it starts with '_' and has C language linkage|"
"it starts with '__'|"
"it starts with '_' followed by a capital letter|"
"it contains '__'}1">,
InGroup<ReservedIdentifier>, DefaultIgnore;
+def warn_deprecated_literal_operator_id: Warning<
+ "identifier %0 preceded by whitespace in a literal operator declaration "
+ "is deprecated">, InGroup<DeprecatedLiteralOperator>, DefaultIgnore;
+def warn_reserved_module_name : Warning<
+ "%0 is a reserved name for a module">, InGroup<ReservedModuleIdentifier>;
def warn_parameter_size: Warning<
"%0 is a large (%1 bytes) pass-by-value argument; "
@@ -408,18 +450,15 @@ def warn_return_value_udt_incomplete: Warning<
def warn_implicit_function_decl : Warning<
"implicit declaration of function %0">,
InGroup<ImplicitFunctionDeclare>, DefaultIgnore;
-def ext_implicit_function_decl : ExtWarn<
- "implicit declaration of function %0 is invalid in C99">,
- InGroup<ImplicitFunctionDeclare>;
+def ext_implicit_function_decl_c99 : ExtWarn<
+ "call to undeclared function %0; ISO C99 and later do not support implicit "
+ "function declarations">, InGroup<ImplicitFunctionDeclare>;
def note_function_suggestion : Note<"did you mean %0?">;
def err_ellipsis_first_param : Error<
"ISO C requires a named parameter before '...'">;
def err_declarator_need_ident : Error<"declarator requires an identifier">;
def err_language_linkage_spec_unknown : Error<"unknown linkage language">;
-def err_language_linkage_spec_not_ascii : Error<
- "string literal in language linkage specifier cannot have an "
- "encoding-prefix">;
def ext_use_out_of_scope_declaration : ExtWarn<
"use of out-of-scope declaration of %0%select{| whose type is not "
"compatible with that of an implicit declaration}1">,
@@ -434,7 +473,7 @@ def warn_qual_return_type : Warning<
def warn_deprecated_redundant_constexpr_static_def : Warning<
"out-of-line definition of constexpr static data member is redundant "
"in C++17 and is deprecated">,
- InGroup<Deprecated>, DefaultIgnore;
+ InGroup<DeprecatedRedundantConstexprStaticDef>, DefaultIgnore;
def warn_decl_shadow :
Warning<"declaration shadows a %select{"
@@ -445,9 +484,9 @@ def warn_decl_shadow :
"typedef in %2|"
"type alias in %2|"
"structured binding}1">,
- InGroup<Shadow>, DefaultIgnore;
+ InGroup<Shadow>, DefaultIgnore, SuppressInSystemMacro;
def warn_decl_shadow_uncaptured_local :
- Warning<warn_decl_shadow.Text>,
+ Warning<warn_decl_shadow.Summary>,
InGroup<ShadowUncapturedLocal>, DefaultIgnore;
def warn_ctor_parm_shadows_field:
Warning<"constructor parameter %0 shadows the field %1 of %2">,
@@ -483,6 +522,8 @@ def warn_cxx17_compat_decomp_decl_spec : Warning<
def err_decomp_decl_type : Error<
"decomposition declaration cannot be declared with type %0; "
"declared type must be 'auto' or reference to 'auto'">;
+def err_decomp_decl_constraint : Error<
+ "decomposition declaration cannot be declared with constrained 'auto'">;
def err_decomp_decl_parens : Error<
"decomposition declaration cannot be declared with parentheses">;
def err_decomp_decl_template : Error<
@@ -551,11 +592,10 @@ def err_using_decl_can_not_refer_to_class_member : Error<
def warn_cxx17_compat_using_decl_class_member_enumerator : Warning<
"member using declaration naming a non-member enumerator is incompatible "
"with C++ standards before C++20">, InGroup<CXXPre20Compat>, DefaultIgnore;
-def ext_using_decl_class_member_enumerator : ExtWarn<
- "member using declaration naming a non-member enumerator is "
- "a C++20 extension">, InGroup<CXX20>;
def err_using_enum_is_dependent : Error<
"using-enum cannot name a dependent type">;
+def err_using_enum_not_enum : Error<
+ "%0 is not an enumerated type">;
def err_ambiguous_inherited_constructor : Error<
"constructor of %0 inherited from multiple base class subobjects">;
def note_ambiguous_inherited_constructor_using : Note<
@@ -652,6 +692,13 @@ def warn_maybe_falloff_nonvoid_function : Warning<
def warn_falloff_nonvoid_function : Warning<
"non-void function does not return a value">,
InGroup<ReturnType>;
+def warn_const_attr_with_pure_attr : Warning<
+ "'const' attribute imposes more restrictions; 'pure' attribute ignored">,
+ InGroup<IgnoredAttributes>;
+def warn_pure_function_returns_void : Warning<
+ "'%select{pure|const}0' attribute on function returning 'void'; attribute ignored">,
+ InGroup<IgnoredAttributes>;
+
def err_maybe_falloff_nonvoid_block : Error<
"non-void block does not return a value in all control paths">;
def err_falloff_nonvoid_block : Error<
@@ -682,13 +729,24 @@ def warn_unreachable_return : Warning<
def warn_unreachable_loop_increment : Warning<
"loop will run at most once (loop increment never executed)">,
InGroup<UnreachableCodeLoopIncrement>, DefaultIgnore;
+def warn_unreachable_fallthrough_attr : Warning<
+ "fallthrough annotation in unreachable code">,
+ InGroup<UnreachableCodeFallthrough>, DefaultIgnore;
def note_unreachable_silence : Note<
"silence by adding parentheses to mark code as explicitly dead">;
+def warn_unreachable_association : Warning<
+ "due to lvalue conversion of the controlling expression, association of type "
+ "%0 will never be selected because it is %select{of array type|qualified}1">,
+ InGroup<UnreachableCodeGenericAssoc>;
/// Built-in functions.
def ext_implicit_lib_function_decl : ExtWarn<
"implicitly declaring library function '%0' with type %1">,
InGroup<ImplicitFunctionDeclare>;
+def ext_implicit_lib_function_decl_c99 : ExtWarn<
+ "call to undeclared library function '%0' with type %1; ISO C99 and later "
+ "do not support implicit function declarations">,
+ InGroup<ImplicitFunctionDeclare>;
def note_include_header_or_declare : Note<
"include the header <%0> or explicitly provide a declaration for '%1'">;
def note_previous_builtin_declaration : Note<"%0 is a builtin with type %1">;
@@ -811,16 +869,47 @@ def warn_builtin_chk_overflow : Warning<
InGroup<DiagGroup<"builtin-memcpy-chk-size">>;
def warn_fortify_source_overflow
- : Warning<warn_builtin_chk_overflow.Text>, InGroup<FortifySource>;
+ : Warning<warn_builtin_chk_overflow.Summary>, InGroup<FortifySource>;
def warn_fortify_source_size_mismatch : Warning<
"'%0' size argument is too large; destination buffer has size %1,"
" but size argument is %2">, InGroup<FortifySource>;
-def warn_fortify_source_format_overflow : Warning<
+def warn_fortify_strlen_overflow: Warning<
+ "'%0' will always overflow; destination buffer has size %1,"
+ " but the source string has length %2 (including NUL byte)">,
+ InGroup<FortifySource>;
+
+def subst_format_overflow : TextSubstitution<
"'%0' will always overflow; destination buffer has size %1,"
- " but format string expands to at least %2">,
+ " but format string expands to at least %2">;
+
+def warn_format_overflow : Warning<
+ "%sub{subst_format_overflow}0,1,2">,
+ InGroup<FormatOverflow>;
+
+def warn_format_overflow_non_kprintf : Warning<
+ "%sub{subst_format_overflow}0,1,2">,
+ InGroup<FormatOverflowNonKprintf>;
+
+def subst_format_truncation: TextSubstitution<
+ "'%0' will always be truncated; specified size is %1,"
+ " but format string expands to at least %2">;
+
+def warn_format_truncation: Warning<
+ "%sub{subst_format_truncation}0,1,2">,
+ InGroup<FormatTruncation>;
+
+def warn_format_truncation_non_kprintf: Warning<
+ "%sub{subst_format_truncation}0,1,2">,
+ InGroup<FormatTruncationNonKprintf>;
+
+def warn_fortify_scanf_overflow : Warning<
+ "'%0' may overflow; destination buffer in argument %1 has size "
+ "%2, but the corresponding specifier may require size %3">,
InGroup<FortifySource>;
+def err_function_start_invalid_type: Error<
+ "argument must be a function">;
/// main()
// static main() is not an error in C, just in C++.
@@ -908,11 +997,14 @@ def warn_pragma_options_align_reset_failed : Warning<
InGroup<IgnoredPragmas>;
def err_pragma_options_align_mac68k_target_unsupported : Error<
"mac68k alignment pragma is not supported on this target">;
+def warn_pragma_align_not_xl_compatible : Warning<
+ "#pragma align(packed) may not be compatible with objects generated with AIX XL C/C++">,
+ InGroup<AIXCompat>;
def warn_pragma_pack_invalid_alignment : Warning<
"expected #pragma pack parameter to be '1', '2', '4', '8', or '16'">,
InGroup<IgnoredPragmas>;
def err_pragma_pack_invalid_alignment : Error<
- warn_pragma_pack_invalid_alignment.Text>;
+ warn_pragma_pack_invalid_alignment.Summary>;
def warn_pragma_pack_non_default_at_include : Warning<
"non-default #pragma pack value changes the alignment of struct or union "
"members in the included file">, InGroup<PragmaPackSuspiciousInclude>,
@@ -933,7 +1025,8 @@ def warn_pragma_pack_pop_identifier_and_alignment : Warning<
def warn_pragma_pop_failed : Warning<"#pragma %0(pop, ...) failed: %1">,
InGroup<IgnoredPragmas>;
def err_pragma_fc_pp_scope : Error<
- "'#pragma float_control push/pop' can only appear at file scope or namespace scope">;
+ "'#pragma float_control push/pop' can only appear at file or namespace scope "
+ "or within a language linkage specification">;
def err_pragma_fc_noprecise_requires_nofenv : Error<
"'#pragma float_control(precise, off)' is illegal when fenv_access is enabled">;
def err_pragma_fc_except_requires_precise : Error<
@@ -949,12 +1042,22 @@ def warn_cxx_ms_struct :
def err_pragma_pack_identifer_not_supported : Error<
"specifying an identifier within `#pragma pack` is not supported on this target">;
def err_section_conflict : Error<"%0 causes a section type conflict with %1">;
+def warn_section_msvc_compat : Warning<"`#pragma const_seg` for section %1 will"
+ " not apply to %0 due to the presence of a %select{mutable field||non-trivial constructor|non-trivial destructor}2">,
+ InGroup<IncompatibleMSPragmaSection>;
def err_no_base_classes : Error<"invalid use of '__super', %0 has no base classes">;
def err_invalid_super_scope : Error<"invalid use of '__super', "
"this keyword can only be used inside class or member function scope">;
def err_super_in_lambda_unsupported : Error<
"use of '__super' inside a lambda is unsupported">;
+def err_pragma_expected_file_scope : Error<
+ "'#pragma %0' can only appear at file scope">;
+def err_pragma_alloc_text_c_linkage: Error<
+ "'#pragma alloc_text' is applicable only to functions with C linkage">;
+def err_pragma_alloc_text_not_function: Error<
+ "'#pragma alloc_text' is applicable only to functions">;
+
def warn_pragma_unused_undeclared_var : Warning<
"undeclared variable %0 used as an argument for '#pragma unused'">,
InGroup<IgnoredPragmas>;
@@ -1041,7 +1144,7 @@ def warn_protocol_property_mismatch : Warning<
"property %select{of type %1|with attribute '%1'|without attribute '%1'|with "
"getter %1|with setter %1}0 was selected for synthesis">,
InGroup<DiagGroup<"protocol-property-synthesis-ambiguity">>;
-def err_protocol_property_mismatch: Error<warn_protocol_property_mismatch.Text>;
+def err_protocol_property_mismatch: Error<warn_protocol_property_mismatch.Summary>;
def err_undef_interface : Error<"cannot find interface declaration for %0">;
def err_category_forward_interface : Error<
"cannot define %select{category|class extension}0 for undefined class %1">;
@@ -1258,7 +1361,7 @@ def warn_objc_pointer_masking : Warning<
"bitmasking for introspection of Objective-C object pointers is strongly "
"discouraged">,
InGroup<ObjCPointerIntrospect>;
-def warn_objc_pointer_masking_performSelector : Warning<warn_objc_pointer_masking.Text>,
+def warn_objc_pointer_masking_performSelector : Warning<warn_objc_pointer_masking.Summary>,
InGroup<ObjCPointerIntrospectPerformSelector>;
def warn_objc_property_default_assign_on_object : Warning<
"default property attribute 'assign' not appropriate for object">,
@@ -1284,7 +1387,7 @@ def warn_atomic_property_rule : Warning<
"with a user defined %select{getter|setter}2">,
InGroup<DiagGroup<"atomic-property-with-user-defined-accessor">>;
def note_atomic_property_fixup_suggest : Note<"setter and getter must both be "
- "synthesized, or both be user defined,or the property must be nonatomic">;
+ "synthesized, or both be user defined, or the property must be nonatomic">;
def err_atomic_property_nontrivial_assign_op : Error<
"atomic property of reference type %0 cannot have non-trivial assignment"
" operator">;
@@ -1462,7 +1565,7 @@ def warn_potentially_direct_selector_expression : Warning<
"@selector expression formed with potentially direct selector %0">,
InGroup<ObjCPotentiallyDirectSelector>;
def warn_strict_potentially_direct_selector_expression : Warning<
- warn_potentially_direct_selector_expression.Text>,
+ warn_potentially_direct_selector_expression.Summary>,
InGroup<ObjCStrictPotentiallyDirectSelector>, DefaultIgnore;
def err_objc_kindof_nonobject : Error<
@@ -1486,12 +1589,36 @@ def err_messaging_class_with_direct_method : Error<
// C++ declarations
def err_static_assert_expression_is_not_constant : Error<
- "static_assert expression is not an integral constant expression">;
+ "static assertion expression is not an integral constant expression">;
def err_constexpr_if_condition_expression_is_not_constant : Error<
"constexpr if condition is not a constant expression">;
-def err_static_assert_failed : Error<"static_assert failed%select{ %1|}0">;
+def err_static_assert_failed : Error<"static assertion failed%select{: %1|}0">;
def err_static_assert_requirement_failed : Error<
- "static_assert failed due to requirement '%0'%select{ %2|}1">;
+ "static assertion failed due to requirement '%0'%select{: %2|}1">;
+def note_expr_evaluates_to : Note<
+ "expression evaluates to '%0 %1 %2'">;
+def err_static_assert_invalid_message : Error<
+ "the message in a static assertion must be a string literal or an "
+ "object with 'data()' and 'size()' member functions">;
+def err_static_assert_missing_member_function : Error<
+ "the message object in this static assertion is missing %select{"
+ "a 'size()' member function|"
+ "a 'data()' member function|"
+ "'data()' and 'size()' member functions}0">;
+def err_static_assert_invalid_mem_fn_ret_ty : Error<
+ "the message in a static assertion must have a '%select{size|data}0()' member "
+ "function returning an object convertible to '%select{std::size_t|const char *}0'">;
+def warn_static_assert_message_constexpr : Warning<
+ "the message in this static assertion is not a "
+ "constant expression">,
+ DefaultError, InGroup<DiagGroup<"invalid-static-assert-message">>;
+def err_static_assert_message_constexpr : Error<
+ "the message in a static assertion must be produced by a "
+ "constant expression">;
+
+def warn_consteval_if_always_true : Warning<
+ "consteval if is always true in an %select{unevaluated|immediate}0 context">,
+ InGroup<DiagGroup<"redundant-consteval-if">>;
def ext_inline_variable : ExtWarn<
"inline variables are a C++17 extension">, InGroup<CXX17>;
@@ -1504,6 +1631,8 @@ def warn_inline_namespace_reopened_noninline : Warning<
InGroup<InlineNamespaceReopenedNoninline>;
def err_inline_namespace_mismatch : Error<
"non-inline namespace cannot be reopened as inline">;
+def err_inline_namespace_std : Error<
+ "cannot declare the namespace 'std' to be inline">;
def err_unexpected_friend : Error<
"friends can only be classes or functions">;
@@ -1547,6 +1676,8 @@ def err_qualified_friend_def : Error<
"friend function definition cannot be qualified with '%0'">;
def err_friend_def_in_local_class : Error<
"friend function cannot be defined in a local class">;
+def err_friend_specialization_def : Error<
+ "friend function specialization cannot be defined">;
def err_friend_not_first_in_declaration : Error<
"'friend' must appear first in a non-function declaration">;
def err_using_decl_friend : Error<
@@ -1600,6 +1731,9 @@ def err_type_defined_in_condition : Error<
"%0 cannot be defined in a condition">;
def err_type_defined_in_enum : Error<
"%0 cannot be defined in an enumeration">;
+def ext_type_defined_in_offsetof : Extension<
+ "defining a type within '%select{__builtin_offsetof|offsetof}0' is a Clang "
+ "extension">, InGroup<GNUOffsetofExtensions>;
def note_pure_virtual_function : Note<
"unimplemented pure virtual method %0 in %1">;
@@ -1624,8 +1758,7 @@ def warn_weak_vtable : Warning<
"emitted in every translation unit">,
InGroup<DiagGroup<"weak-vtables">>, DefaultIgnore;
def warn_weak_template_vtable : Warning<
- "explicit template instantiation %0 will emit a vtable in every "
- "translation unit">,
+ "this warning is no longer in use and will be removed in the next release">,
InGroup<DiagGroup<"weak-template-vtables">>, DefaultIgnore;
def ext_using_undefined_std : ExtWarn<
@@ -1643,37 +1776,32 @@ def err_incomplete_in_exception_spec : Error<
def err_sizeless_in_exception_spec : Error<
"%select{|reference to }0sizeless type %1 is not allowed "
"in exception specification">;
-def ext_incomplete_in_exception_spec : ExtWarn<err_incomplete_in_exception_spec.Text>,
+def ext_incomplete_in_exception_spec : ExtWarn<err_incomplete_in_exception_spec.Summary>,
InGroup<MicrosoftExceptionSpec>;
def err_rref_in_exception_spec : Error<
"rvalue reference type %0 is not allowed in exception specification">;
def err_mismatched_exception_spec : Error<
"exception specification in declaration does not match previous declaration">;
-def ext_mismatched_exception_spec : ExtWarn<err_mismatched_exception_spec.Text>,
+def ext_mismatched_exception_spec : ExtWarn<err_mismatched_exception_spec.Summary>,
InGroup<MicrosoftExceptionSpec>;
def err_override_exception_spec : Error<
"exception specification of overriding function is more lax than "
"base version">;
-def ext_override_exception_spec : ExtWarn<err_override_exception_spec.Text>,
+def ext_override_exception_spec : ExtWarn<err_override_exception_spec.Summary>,
InGroup<MicrosoftExceptionSpec>;
def err_incompatible_exception_specs : Error<
"target exception specification is not superset of source">;
def warn_incompatible_exception_specs : Warning<
- err_incompatible_exception_specs.Text>, InGroup<IncompatibleExceptionSpec>;
+ err_incompatible_exception_specs.Summary>, InGroup<IncompatibleExceptionSpec>;
def err_deep_exception_specs_differ : Error<
"exception specifications of %select{return|argument}0 types differ">;
def warn_deep_exception_specs_differ : Warning<
- err_deep_exception_specs_differ.Text>, InGroup<IncompatibleExceptionSpec>;
+ err_deep_exception_specs_differ.Summary>, InGroup<IncompatibleExceptionSpec>;
def err_missing_exception_specification : Error<
"%0 is missing exception specification '%1'">;
def ext_missing_exception_specification : ExtWarn<
- err_missing_exception_specification.Text>,
+ err_missing_exception_specification.Summary>,
InGroup<DiagGroup<"missing-exception-spec">>;
-def ext_ms_missing_exception_specification : ExtWarn<
- err_missing_exception_specification.Text>,
- InGroup<MicrosoftExceptionSpec>;
-def err_noexcept_needs_constant_expression : Error<
- "argument to noexcept specifier must be a constant expression">;
def err_exception_spec_not_parsed : Error<
"exception specification is not available until end of class definition">;
def err_exception_spec_cycle : Error<
@@ -1833,7 +1961,7 @@ def err_static_not_bitfield : Error<"static member %0 cannot be a bit-field">;
def err_static_out_of_line : Error<
"'static' can only be specified inside the class definition">;
def ext_static_out_of_line : ExtWarn<
- err_static_out_of_line.Text>,
+ err_static_out_of_line.Summary>,
InGroup<MicrosoftTemplate>;
def err_storage_class_for_static_member : Error<
"static data member definition cannot specify a storage class">;
@@ -1961,6 +2089,10 @@ def err_different_return_type_for_overriding_virtual_function : Error<
"than the function it overrides}1,2">;
def note_overridden_virtual_function : Note<
"overridden virtual function is here">;
+def err_conflicting_overriding_attributes : Error<
+ "virtual function %0 has different attributes "
+ "%diff{($) than the function it overrides (which has $)|"
+ "than the function it overrides}1,2">;
def err_conflicting_overriding_cc_attributes : Error<
"virtual function %0 has different calling convention attributes "
"%diff{($) than the function it overrides (which has calling convention $)|"
@@ -2077,7 +2209,8 @@ def err_init_conversion_failed : Error<
"exception object|a member subobject|an array element|a new value|a value|a "
"base class|a constructor delegation|a vector element|a block element|a "
"block element|a complex element|a lambda capture|a compound literal "
- "initializer|a related result|a parameter of CF audited function}0 "
+ "initializer|a related result|a parameter of CF audited function|a "
+ "structured binding|a member subobject}0 "
"%diff{of type $ with an %select{rvalue|lvalue}2 of type $|"
"with an %select{rvalue|lvalue}2 of incompatible type}1,3"
"%select{|: different classes%diff{ ($ vs $)|}5,6"
@@ -2120,10 +2253,15 @@ def err_init_list_bad_dest_type : Error<
def warn_cxx20_compat_aggregate_init_with_ctors : Warning<
"aggregate initialization of type %0 with user-declared constructors "
"is incompatible with C++20">, DefaultIgnore, InGroup<CXX20Compat>;
+def warn_cxx17_compat_aggregate_init_paren_list : Warning<
+ "aggregate initialization of type %0 from a parenthesized list of values "
+ "is a C++20 extension">, DefaultIgnore, InGroup<CXX20>;
def err_reference_bind_to_bitfield : Error<
"%select{non-const|volatile}0 reference cannot bind to "
"bit-field%select{| %1}2">;
+def err_reference_bind_to_bitfield_in_cce : Error<
+ "reference cannot bind to bit-field in converted constant expression">;
def err_reference_bind_to_vector_element : Error<
"%select{non-const|volatile}0 reference cannot bind to vector element">;
def err_reference_bind_to_matrix_element : Error<
@@ -2138,6 +2276,8 @@ def err_reference_has_multiple_inits : Error<
"reference cannot be initialized with multiple values">;
def err_init_non_aggr_init_list : Error<
"initialization of non-aggregate type %0 with an initializer list">;
+def err_designated_init_for_non_aggregate : Error<
+ "initialization of non-aggregate type %0 with a designated initializer list">;
def err_init_reference_member_uninitialized : Error<
"reference member of type %0 uninitialized">;
def note_uninit_reference_member : Note<
@@ -2260,8 +2400,6 @@ def err_auto_variable_cannot_appear_in_own_initializer : Error<
def err_binding_cannot_appear_in_own_initializer : Error<
"binding %0 cannot appear in the initializer of its own "
"decomposition declaration">;
-def err_illegal_decl_array_of_auto : Error<
- "'%0' declared as array of %1">;
def err_new_array_of_auto : Error<
"cannot allocate array of 'auto'">;
def err_auto_not_allowed : Error<
@@ -2279,7 +2417,9 @@ def err_auto_not_allowed : Error<
"|in conversion function type|here|in lambda parameter"
"|in type allocated by 'new'|in K&R-style function parameter"
"|in template parameter|in friend declaration|in function prototype that is "
- "not a function declaration|in requires expression parameter}1">;
+ "not a function declaration|in requires expression parameter"
+ "|in array declaration"
+ "|in declaration of conversion function template}1">;
def err_dependent_deduced_tst : Error<
"typename specifier refers to "
"%select{class template|function template|variable template|alias template|"
@@ -2301,13 +2441,23 @@ def ext_auto_new_list_init : Extension<
"type %0 to use list-initialization">, InGroup<CXX17>;
def err_auto_var_init_no_expression : Error<
"initializer for variable %0 with type %1 is empty">;
+def err_auto_expr_init_no_expression : Error<
+ "initializer for functional-style cast to %0 is empty">;
def err_auto_var_init_multiple_expressions : Error<
"initializer for variable %0 with type %1 contains multiple expressions">;
+def err_auto_expr_init_multiple_expressions : Error<
+ "initializer for functional-style cast to %0 contains multiple expressions">;
def err_auto_var_init_paren_braces : Error<
"cannot deduce type for variable %1 with type %2 from "
"%select{parenthesized|nested}0 initializer list">;
def err_auto_new_ctor_multiple_expressions : Error<
"new expression for type %0 contains multiple constructor arguments">;
+def err_auto_expr_init_paren_braces : Error<
+ "cannot deduce actual type for %1 from "
+ "%select{parenthesized|nested}0 initializer list">;
+def warn_cxx20_compat_auto_expr : Warning<
+ "'auto' as a functional-style cast is incompatible with C++ standards "
+ "before C++23">, InGroup<CXXPre23Compat>, DefaultIgnore;
def err_auto_missing_trailing_return : Error<
"'auto' return without trailing return type; deduced return types are a "
"C++14 extension">;
@@ -2321,6 +2471,8 @@ def err_auto_var_deduction_failure : Error<
"variable %0 with type %1 has incompatible initializer of type %2">;
def err_auto_var_deduction_failure_from_init_list : Error<
"cannot deduce actual type for variable %0 with type %1 from initializer list">;
+def err_auto_expr_deduction_failure : Error<
+ "functional-style cast to %0 has incompatible initializer of type %1">;
def err_auto_new_deduction_failure : Error<
"new expression for type %0 has incompatible constructor argument of type %1">;
def err_auto_inconsistent_deduction : Error<
@@ -2340,7 +2492,8 @@ def err_implied_std_initializer_list_not_found : Error<
def err_malformed_std_initializer_list : Error<
"std::initializer_list must be a class template with a single type parameter">;
def err_auto_init_list_from_c : Error<
- "cannot use __auto_type with initializer list in C">;
+ "cannot use %select{'auto'|<ERROR>|'__auto_type'}0 with "
+ "%select{initializer list|array}1 in C">;
def err_auto_bitfield : Error<
"cannot pass bit-field as __auto_type initializer in C">;
@@ -2486,7 +2639,7 @@ def err_final_function_overridden : Error<
// C++11 scoped enumerations
def err_enum_invalid_underlying : Error<
- "non-integral type %0 is an invalid underlying type">;
+ "%select{non-integral type %0|%0}1 is an invalid underlying type">;
def err_enumerator_too_large : Error<
"enumerator value is not representable in the underlying type %0">;
def ext_enumerator_too_large : Extension<
@@ -2586,10 +2739,28 @@ def warn_cxx14_compat_constexpr_not_const : Warning<
"in C++14; add 'const' to avoid a change in behavior">,
InGroup<DiagGroup<"constexpr-not-const">>;
def err_invalid_consteval_take_address : Error<
- "cannot take address of consteval function %0 outside"
+ "cannot take address of %select{immediate|consteval}2 "
+ "%select{function|call operator of}1 %0 outside"
" of an immediate invocation">;
def err_invalid_consteval_call : Error<
- "call to consteval function %q0 is not a constant expression">;
+ "call to %select{immediate|consteval}1 function "
+ "%q0 is not a constant expression">;
+
+def err_immediate_function_used_before_definition : Error<
+ "immediate function %0 used before it is defined">;
+
+def note_immediate_function_reason : Note<
+ "%0 is an immediate %select{function|constructor}5 because "
+ "%select{its body|the%select{| default}7 initializer of %8}6 "
+ "%select{evaluates the address of %select{an immediate|a consteval}2 "
+ "function %1|contains a call to %select{an immediate|a consteval}2 "
+ "%select{function|constructor}4 %1 and that call is not a constant "
+ "expression}3">;
+
+def note_invalid_consteval_initializer : Note<
+ "in the default initializer of %0">;
+def note_invalid_consteval_initializer_here : Note<
+ "initialized here %0">;
def err_invalid_consteval_decl_kind : Error<
"%0 cannot be declared consteval">;
def err_invalid_constexpr : Error<
@@ -2598,7 +2769,7 @@ def err_invalid_constexpr : Error<
def err_invalid_constexpr_member : Error<"non-static data member cannot be "
"constexpr%select{; did you intend to make it %select{const|static}0?|}1">;
def err_constexpr_tag : Error<
- "%select{class|struct|interface|union|enum}0 "
+ "%select{class|struct|interface|union|enum|enum class|enum struct}0 "
"cannot be marked %sub{select_constexpr_spec_kind}1">;
def err_constexpr_dtor : Error<
"destructor cannot be declared %sub{select_constexpr_spec_kind}0">;
@@ -2658,6 +2829,13 @@ def warn_cxx17_compat_constexpr_body_invalid_stmt : Warning<
"use of this statement in a constexpr %select{function|constructor}0 "
"is incompatible with C++ standards before C++20">,
InGroup<CXXPre20Compat>, DefaultIgnore;
+def ext_constexpr_body_invalid_stmt_cxx23 : ExtWarn<
+ "use of this statement in a constexpr %select{function|constructor}0 "
+ "is a C++23 extension">, InGroup<CXX23>;
+def warn_cxx20_compat_constexpr_body_invalid_stmt : Warning<
+ "use of this statement in a constexpr %select{function|constructor}0 "
+ "is incompatible with C++ standards before C++23">,
+ InGroup<CXXPre23Compat>, DefaultIgnore;
def ext_constexpr_type_definition : ExtWarn<
"type definition in a constexpr %select{function|constructor}0 "
"is a C++14 extension">, InGroup<CXX14>;
@@ -2675,12 +2853,18 @@ def warn_cxx11_compat_constexpr_local_var : Warning<
"variable declaration in a constexpr %select{function|constructor}0 "
"is incompatible with C++ standards before C++14">,
InGroup<CXXPre14Compat>, DefaultIgnore;
-def err_constexpr_local_var_static : Error<
- "%select{static|thread_local}1 variable not permitted in a constexpr "
- "%select{function|constructor}0">;
+def ext_constexpr_static_var : ExtWarn<
+ "definition of a %select{static|thread_local}1 variable "
+ "in a constexpr %select{function|constructor}0 "
+ "is a C++23 extension">, InGroup<CXX23>;
+def warn_cxx20_compat_constexpr_var : Warning<
+ "definition of a %select{static variable|thread_local variable|variable "
+ "of non-literal type}1 in a constexpr %select{function|constructor}0 "
+ "is incompatible with C++ standards before C++23">,
+ InGroup<CXXPre23Compat>, DefaultIgnore;
def err_constexpr_local_var_non_literal_type : Error<
"variable of non-literal type %1 cannot be defined in a constexpr "
- "%select{function|constructor}0">;
+ "%select{function|constructor}0 before C++23">;
def ext_constexpr_local_var_no_init : ExtWarn<
"uninitialized variable in a constexpr %select{function|constructor}0 "
"is a C++20 extension">, InGroup<CXX20>;
@@ -2712,6 +2896,8 @@ def warn_cxx11_compat_constexpr_body_multiple_return : Warning<
InGroup<CXXPre14Compat>, DefaultIgnore;
def note_constexpr_body_previous_return : Note<
"previous return statement is here">;
+def err_ms_constexpr_cannot_be_applied : Error<
+ "attribute 'msvc::constexpr' cannot be applied to the %select{constexpr|consteval|virtual}0 function %1">;
// C++20 function try blocks in constexpr
def ext_constexpr_function_try_block_cxx20 : ExtWarn<
@@ -2780,6 +2966,8 @@ def err_template_arg_list_constraints_not_satisfied : Error<
"template template parameter|template}0 %1%2">;
def note_substituted_constraint_expr_is_ill_formed : Note<
"because substituted constraint expression is ill-formed%0">;
+def note_constraint_references_error
+ : Note<"constraint depends on a previously diagnosed expression">;
def note_atomic_constraint_evaluated_to_false : Note<
"%select{and|because}0 '%1' evaluated to false">;
def note_concept_specialization_constraint_evaluated_to_false : Note<
@@ -2792,6 +2980,8 @@ def err_constrained_virtual_method : Error<
"virtual function cannot have a requires clause">;
def err_trailing_requires_clause_on_deduction_guide : Error<
"deduction guide cannot have a requires clause">;
+def err_constrained_non_templated_function
+ : Error<"non-templated function cannot have a requires clause">;
def err_reference_to_function_with_unsatisfied_constraints : Error<
"invalid reference to function %0: constraints not satisfied">;
def err_requires_expr_local_parameter_default_argument : Error<
@@ -2817,7 +3007,7 @@ def note_type_requirement_substitution_error : Note<
def note_type_requirement_unknown_substitution_error : Note<
"%select{and|because}0 '%1' would be invalid">;
def note_nested_requirement_substitution_error : Note<
- "%select{and|because}0 '%1' would be invalid: %2">;
+ "%select{and|because}0 '%1' would be invalid%2">;
def note_nested_requirement_unknown_substitution_error : Note<
"%select{and|because}0 '%1' would be invalid">;
def note_ambiguous_atomic_constraints : Note<
@@ -2872,11 +3062,20 @@ def warn_auto_var_is_id : Warning<
InGroup<DiagGroup<"auto-var-id">>;
// Attributes
-def warn_nomerge_attribute_ignored_in_stmt: Warning<
+def warn_attribute_ignored_no_calls_in_stmt: Warning<
"%0 attribute is ignored because there exists no call expression inside the "
"statement">,
InGroup<IgnoredAttributes>;
+def warn_attribute_ignored_non_function_pointer: Warning<
+ "%0 attribute is ignored because %1 is not a function pointer">,
+ InGroup<IgnoredAttributes>;
+
+def warn_function_attribute_ignored_in_stmt : Warning<
+ "attribute is ignored on this statement as it only applies to functions; "
+ "use '%0' on statements">,
+ InGroup<IgnoredAttributes>;
+
def err_musttail_needs_trivial_args : Error<
"tail call requires that the return value, all parameters, and any "
"temporaries created by the expression are trivially destructible">;
@@ -2922,22 +3121,38 @@ def err_musttail_scope : Error<
"cannot perform a tail call from this return statement">;
def err_musttail_no_variadic : Error<
"%0 attribute may not be used with variadic functions">;
+def err_musttail_no_return : Error<
+ "%0 attribute may not be used with no-return-attribute functions">;
def err_nsobject_attribute : Error<
"'NSObject' attribute is for pointer types only">;
def err_attributes_are_not_compatible : Error<
- "%0 and %1 attributes are not compatible">;
+ "%0 and %1%select{ attributes|}2 are not compatible">;
def err_attribute_invalid_argument : Error<
"%select{a reference type|an array type|a non-vector or "
"non-vectorizable scalar type}0 is an invalid argument to attribute %1">;
def err_attribute_wrong_number_arguments : Error<
"%0 attribute %plural{0:takes no arguments|1:takes one argument|"
":requires exactly %1 arguments}1">;
+def err_attribute_wrong_number_arguments_for : Error <
+ "%0 attribute references function %1, which %plural{0:takes no arguments|1:takes one argument|"
+ ":takes exactly %2 arguments}2">;
+def err_attribute_bounds_for_function : Error<
+ "%0 attribute references parameter %1, but the function %2 has only %3 parameters">;
+def err_attribute_no_member_function : Error<
+ "%0 attribute cannot be applied to non-static member functions">;
+def err_attribute_parameter_types : Error<
+ "%0 attribute parameter types do not match: parameter %1 of function %2 has type %3, "
+ "but parameter %4 of function %5 has type %6">;
+
def err_attribute_too_many_arguments : Error<
"%0 attribute takes no more than %1 argument%s1">;
def err_attribute_too_few_arguments : Error<
"%0 attribute takes at least %1 argument%s1">;
def err_attribute_invalid_vector_type : Error<"invalid vector element type %0">;
+def err_attribute_invalid_bitint_vector_type : Error<
+ "'_BitInt' vector element width must be %select{a power of 2|"
+ "at least as wide as 'CHAR_BIT'}0">;
def err_attribute_invalid_matrix_type : Error<"invalid matrix element type %0">;
def err_attribute_bad_neon_vector_size : Error<
"Neon vector size must be 64 or 128 bits">;
@@ -2949,21 +3164,48 @@ def err_attribute_bad_sve_vector_size : Error<
def err_attribute_arm_feature_sve_bits_unsupported : Error<
"%0 is only supported when '-msve-vector-bits=<bits>' is specified with a "
"value of 128, 256, 512, 1024 or 2048.">;
+def warn_attribute_arm_sm_incompat_builtin : Warning<
+ "builtin call has undefined behaviour when called from a %0 function">,
+ InGroup<DiagGroup<"undefined-arm-streaming">>;
+def warn_attribute_arm_za_builtin_no_za_state : Warning<
+ "builtin call is not valid when calling from a function without active ZA state">,
+ InGroup<DiagGroup<"undefined-arm-za">>;
+def warn_attribute_arm_zt0_builtin_no_zt0_state : Warning<
+ "builtin call is not valid when calling from a function without active ZT0 state">,
+ InGroup<DiagGroup<"undefined-arm-zt0">>;
+def err_sve_vector_in_non_sve_target : Error<
+ "SVE vector type %0 cannot be used in a target without sve">;
+def err_attribute_riscv_rvv_bits_unsupported : Error<
+ "%0 is only supported when '-mrvv-vector-bits=<bits>' is specified with a "
+ "value of \"zvl\" or a power 2 in the range [64,65536]">;
+def err_attribute_bad_rvv_vector_size : Error<
+ "invalid RVV vector size '%0', expected size is '%1' based on LMUL of type "
+ "and '-mrvv-vector-bits'">;
+def err_attribute_invalid_rvv_type : Error<
+ "%0 attribute applied to non-RVV type %1">;
def err_attribute_requires_positive_integer : Error<
"%0 attribute requires a %select{positive|non-negative}1 "
"integral compile time constant expression">;
def err_attribute_requires_opencl_version : Error<
- "%0 attribute requires OpenCL version %1%select{| or above}2">;
+ "attribute %0 is supported in the OpenCL version %1%select{| onwards}2">;
def err_invalid_branch_protection_spec : Error<
"invalid or misplaced branch protection specification '%0'">;
+def warn_unsupported_branch_protection_spec : Warning<
+ "unsupported branch protection specification '%0'">, InGroup<BranchProtection>;
+
def warn_unsupported_target_attribute
- : Warning<"%select{unsupported|duplicate|unknown}0%select{| architecture|"
- " tune CPU}1 '%2' in the 'target' attribute string; 'target' "
+ : Warning<"%select{unsupported|duplicate|unknown}0%select{| CPU|"
+ " tune CPU}1 '%2' in the '%select{target|target_clones|target_version}3' "
+ "attribute string; '%select{target|target_clones|target_version}3' "
"attribute ignored">,
InGroup<IgnoredAttributes>;
def err_attribute_unsupported
: Error<"%0 attribute is not supported on targets missing %1;"
" specify an appropriate -march= or -mcpu=">;
+def err_duplicate_target_attribute
+ : Error<"%select{unsupported|duplicate|unknown}0%select{| CPU|"
+ " tune CPU}1 '%2' in the '%select{target|target_clones|target_version}3' "
+ "attribute string; ">;
// The err_*_attribute_argument_not_int are separate because they're used by
// VerifyIntegerConstantExpression.
def err_aligned_attribute_argument_not_int : Error<
@@ -2971,8 +3213,9 @@ def err_aligned_attribute_argument_not_int : Error<
def err_align_value_attribute_argument_not_int : Error<
"'align_value' attribute requires integer constant">;
def err_alignas_attribute_wrong_decl_type : Error<
- "%0 attribute cannot be applied to a %select{function parameter|"
- "variable with 'register' storage class|'catch' variable|bit-field}1">;
+ "%0 attribute cannot be applied to %select{a function parameter|"
+ "a variable with 'register' storage class|a 'catch' variable|a bit-field|"
+ "an enumeration}1">;
def err_alignas_missing_on_definition : Error<
"%0 must be specified on definition if it is specified on any declaration">;
def note_alignas_on_declaration : Note<"declared with %0 attribute here">;
@@ -2980,11 +3223,13 @@ def err_alignas_mismatch : Error<
"redeclaration has different alignment requirement (%1 vs %0)">;
def err_alignas_underaligned : Error<
"requested alignment is less than minimum alignment of %1 for type %0">;
+def warn_aligned_attr_underaligned : Warning<err_alignas_underaligned.Summary>,
+ InGroup<IgnoredAttributes>;
def err_attribute_sizeless_type : Error<
"%0 attribute cannot be applied to sizeless type %1">;
def err_attribute_argument_n_type : Error<
"%0 attribute requires parameter %1 to be %select{int or bool|an integer "
- "constant|a string|an identifier|a constant expression}2">;
+ "constant|a string|an identifier|a constant expression|a builtin function}2">;
def err_attribute_argument_type : Error<
"%0 attribute requires %select{int or bool|an integer "
"constant|a string|an identifier}1">;
@@ -3002,7 +3247,7 @@ def note_previous_uuid : Note<"previous uuid specified here">;
def warn_attribute_pointers_only : Warning<
"%0 attribute only applies to%select{| constant}1 pointer arguments">,
InGroup<IgnoredAttributes>;
-def err_attribute_pointers_only : Error<warn_attribute_pointers_only.Text>;
+def err_attribute_pointers_only : Error<warn_attribute_pointers_only.Summary>;
def err_attribute_integers_only : Error<
"%0 attribute argument may only refer to a function parameter of integer "
"type">;
@@ -3027,9 +3272,7 @@ def note_ownership_returns_index_mismatch : Note<
"declared with index %0 here">;
def err_format_strftime_third_parameter : Error<
"strftime format attribute requires 3rd parameter to be 0">;
-def err_format_attribute_requires_variadic : Error<
- "format attribute requires variadic function">;
-def err_format_attribute_not : Error<"format argument not %0">;
+def err_format_attribute_not : Error<"format argument not a string type">;
def err_format_attribute_result_not : Error<"function does not return %0">;
def err_format_attribute_implicit_this_format_string : Error<
"format attribute cannot specify the implicit this argument as the format "
@@ -3054,10 +3297,11 @@ def err_attribute_invalid_size : Error<
"vector size not an integral multiple of component size">;
def err_attribute_zero_size : Error<"zero %0 size">;
def err_attribute_size_too_large : Error<"%0 size too large">;
-def err_typecheck_sve_ambiguous : Error<
- "cannot combine fixed-length and sizeless SVE vectors in expression, result is ambiguous (%0 and %1)">;
-def err_typecheck_sve_gnu_ambiguous : Error<
- "cannot combine GNU and SVE vectors in expression, result is ambiguous (%0 and %1)">;
+def err_typecheck_sve_rvv_ambiguous : Error<
+ "cannot combine fixed-length and sizeless %select{SVE|RVV}0 vectors "
+ "in expression, result is ambiguous (%1 and %2)">;
+def err_typecheck_sve_rvv_gnu_ambiguous : Error<
+ "cannot combine GNU and %select{SVE|RVV}0 vectors in expression, result is ambiguous (%1 and %2)">;
def err_typecheck_vector_not_convertable_implict_truncation : Error<
"cannot convert between %select{scalar|vector}0 type %1 and vector type"
" %2 as implicit conversion would cause truncation">;
@@ -3186,6 +3430,8 @@ def warn_objc_redundant_literal_use : Warning<
def err_attr_tlsmodel_arg : Error<"tls_model must be \"global-dynamic\", "
"\"local-dynamic\", \"initial-exec\" or \"local-exec\"">;
+def err_attr_codemodel_arg : Error<"code model '%0' is not supported on this target">;
+
def err_aix_attr_unsupported_tls_model : Error<"TLS model '%0' is not yet supported on AIX">;
def err_tls_var_aligned_over_maximum : Error<
@@ -3240,7 +3486,7 @@ def err_alignment_too_big : Error<
def err_alignment_not_power_of_two : Error<
"requested alignment is not a power of 2">;
def warn_alignment_not_power_of_two : Warning<
- err_alignment_not_power_of_two.Text>,
+ err_alignment_not_power_of_two.Summary>,
InGroup<DiagGroup<"non-power-of-two-alignment">>;
def err_alignment_dependent_typedef_name : Error<
"requested alignment is dependent but declaration is not dependent">;
@@ -3255,9 +3501,11 @@ def warn_assume_aligned_too_great
"alignment assumed">,
InGroup<DiagGroup<"builtin-assume-aligned-alignment">>;
def warn_not_xl_compatible
- : Warning<"requesting an alignment of 16 bytes or greater for struct"
- " members is not binary compatible with AIX XL 16.1 and older">,
+ : Warning<"alignment of 16 bytes for a struct member is not binary "
+ "compatible with IBM XL C/C++ for AIX 16.1.0 or older">,
InGroup<AIXCompat>;
+def note_misaligned_member_used_here : Note<
+ "passing byval argument %0 with potentially incompatible alignment here">;
def warn_redeclaration_without_attribute_prev_attribute_ignored : Warning<
"%q0 redeclared without %1 attribute: previous %1 ignored">,
InGroup<MicrosoftInconsistentDllImport>;
@@ -3267,8 +3515,6 @@ def warn_redeclaration_without_import_attribute : Warning<
def warn_dllimport_dropped_from_inline_function : Warning<
"%q0 redeclared inline; %1 attribute ignored">,
InGroup<IgnoredAttributes>;
-def warn_attribute_ignored : Warning<"%0 attribute ignored">,
- InGroup<IgnoredAttributes>;
def warn_nothrow_attribute_ignored : Warning<"'nothrow' attribute conflicts with"
" exception specification; attribute ignored">,
InGroup<IgnoredAttributes>;
@@ -3298,19 +3544,26 @@ def warn_attribute_has_no_effect_on_infinite_loop : Warning<
InGroup<IgnoredAttributes>;
def note_attribute_has_no_effect_on_infinite_loop_here : Note<
"annotating the infinite loop here">;
-def warn_attribute_has_no_effect_on_if_constexpr : Warning<
- "attribute %0 has no effect when annotating an 'if constexpr' statement">,
+def warn_attribute_has_no_effect_on_compile_time_if : Warning<
+ "attribute %0 has no effect when annotating an 'if %select{constexpr|consteval}1' statement">,
InGroup<IgnoredAttributes>;
-def note_attribute_has_no_effect_on_if_constexpr_here : Note<
- "annotating the 'if constexpr' statement here">;
+def note_attribute_has_no_effect_on_compile_time_if_here : Note<
+ "annotating the 'if %select{constexpr|consteval}0' statement here">;
def err_decl_attribute_invalid_on_stmt : Error<
- "%0 attribute cannot be applied to a statement">;
-def err_stmt_attribute_invalid_on_decl : Error<
- "%0 attribute cannot be applied to a declaration">;
+ "%0%select{ attribute|}1 cannot be applied to a statement">;
+def err_attribute_invalid_on_decl : Error<
+ "%0%select{ attribute|}1 cannot be applied to a declaration">;
+def warn_type_attribute_deprecated_on_decl : Warning<
+ "applying attribute %0 to a declaration is deprecated; apply it to the type instead">,
+ InGroup<DeprecatedAttributes>;
def warn_declspec_attribute_ignored : Warning<
"attribute %0 is ignored, place it after "
- "\"%select{class|struct|interface|union|enum}1\" to apply attribute to "
+ "\"%select{class|struct|interface|union|enum|enum class|enum struct}1\" to apply attribute to "
"type declaration">, InGroup<IgnoredAttributes>;
+def err_declspec_keyword_has_no_effect : Error<
+ "%0 cannot appear here, place it after "
+ "\"%select{class|struct|interface|union|enum}1\" to apply it to the "
+ "type declaration">;
def warn_attribute_precede_definition : Warning<
"attribute declaration must precede definition">,
InGroup<IgnoredAttributes>;
@@ -3371,6 +3624,8 @@ def warn_attribute_dll_redeclaration : Warning<
InGroup<DiagGroup<"dll-attribute-on-redeclaration">>;
def err_attribute_dllimport_function_definition : Error<
"dllimport cannot be applied to non-inline function definition">;
+def err_attribute_dllimport_function_specialization_definition : Error<
+ "cannot define non-inline dllimport template specialization">;
def err_attribute_dll_deleted : Error<
"attribute %q0 cannot be applied to a deleted function">;
def err_attribute_dllimport_data_definition : Error<
@@ -3382,7 +3637,7 @@ def warn_attribute_dllimport_static_field_definition : Warning<
InGroup<DiagGroup<"dllimport-static-field-def">>;
def warn_attribute_dllexport_explicit_instantiation_decl : Warning<
"explicit instantiation declaration should not be 'dllexport'">,
- InGroup<DiagGroup<"dllexport-explicit-instantiation-decl">>;
+ InGroup<DllexportExplicitInstantiationDecl>;
def warn_attribute_dllexport_explicit_instantiation_def : Warning<
"'dllexport' attribute ignored on explicit instantiation definition">,
InGroup<IgnoredAttributes>;
@@ -3407,11 +3662,11 @@ def err_attribute_weakref_without_alias : Error<
def err_alias_not_supported_on_darwin : Error <
"aliases are not supported on darwin">;
def warn_attribute_wrong_decl_type_str : Warning<
- "%0 attribute only applies to %1">, InGroup<IgnoredAttributes>;
+ "%0%select{ attribute|}1 only applies to %2">, InGroup<IgnoredAttributes>;
def err_attribute_wrong_decl_type_str : Error<
- warn_attribute_wrong_decl_type_str.Text>;
+ warn_attribute_wrong_decl_type_str.Summary>;
def warn_attribute_wrong_decl_type : Warning<
- "%0 attribute only applies to %select{"
+ "%0%select{ attribute|}1 only applies to %select{"
"functions"
"|unions"
"|variables and functions"
@@ -3424,13 +3679,15 @@ def warn_attribute_wrong_decl_type : Warning<
"|types and namespaces"
"|variables, functions and classes"
"|kernel functions"
- "|non-K&R-style functions}1">,
+ "|non-K&R-style functions}2">,
InGroup<IgnoredAttributes>;
-def err_attribute_wrong_decl_type : Error<warn_attribute_wrong_decl_type.Text>;
+def err_attribute_wrong_decl_type : Error<warn_attribute_wrong_decl_type.Summary>;
def warn_type_attribute_wrong_type : Warning<
"'%0' only applies to %select{function|pointer|"
"Objective-C object or block pointer}1 types; type here is %2">,
InGroup<IgnoredAttributes>;
+def err_type_attribute_wrong_type : Error<
+ warn_type_attribute_wrong_type.Summary>;
def warn_incomplete_encoded_type : Warning<
"encoding of %0 type is incomplete because %1 component has unknown encoding">,
InGroup<DiagGroup<"encode-type">>;
@@ -3446,6 +3703,32 @@ def err_attribute_vecreturn_only_vector_member : Error<
"the vecreturn attribute can only be used on a class or structure with one member, which must be a vector">;
def err_attribute_vecreturn_only_pod_record : Error<
"the vecreturn attribute can only be used on a POD (plain old data) class or structure (i.e. no virtual functions)">;
+def err_sme_attr_mismatch : Error<
+ "function declared %0 was previously declared %1, which has different SME function attributes">;
+def err_sme_call_in_non_sme_target : Error<
+ "call to a streaming function requires 'sme'">;
+def err_sme_za_call_no_za_state : Error<
+ "call to a shared ZA function requires the caller to have ZA state">;
+def err_sme_zt0_call_no_zt0_state : Error<
+ "call to a shared ZT0 function requires the caller to have ZT0 state">;
+def err_sme_unimplemented_za_save_restore : Error<
+ "call to a function that shares state other than 'za' from a "
+ "function that has live 'za' state requires a spill/fill of ZA, which is not yet "
+ "implemented">;
+def note_sme_use_preserves_za : Note<
+ "add '__arm_preserves(\"za\")' to the callee if it preserves ZA">;
+def err_sme_definition_using_sm_in_non_sme_target : Error<
+ "function executed in streaming-SVE mode requires 'sme'">;
+def err_sme_definition_using_za_in_non_sme_target : Error<
+ "function using ZA state requires 'sme'">;
+def err_sme_definition_using_zt0_in_non_sme2_target : Error<
+ "function using ZT0 state requires 'sme2'">;
+def err_conflicting_attributes_arm_state : Error<
+ "conflicting attributes for state '%0'">;
+def err_unknown_arm_state : Error<
+ "unknown state '%0'">;
+def err_missing_arm_state : Error<
+ "missing state for %0">;
def err_cconv_change : Error<
"function declared '%0' here was previously declared "
"%select{'%2'|without calling convention}1">;
@@ -3458,11 +3741,11 @@ def warn_cconv_unsupported : Warning<
"|on builtin function"
"}1">,
InGroup<IgnoredAttributes>;
-def error_cconv_unsupported : Error<warn_cconv_unsupported.Text>;
+def error_cconv_unsupported : Error<warn_cconv_unsupported.Summary>;
def err_cconv_knr : Error<
"function with no prototype cannot use the %0 calling convention">;
def warn_cconv_knr : Warning<
- err_cconv_knr.Text>,
+ err_cconv_knr.Summary>,
InGroup<DiagGroup<"missing-prototype-for-cc">>;
def err_cconv_varargs : Error<
"variadic function cannot use %0 calling convention">;
@@ -3481,9 +3764,7 @@ def err_invalid_pcs : Error<"invalid PCS type">;
def warn_attribute_not_on_decl : Warning<
"%0 attribute ignored when parsing type">, InGroup<IgnoredAttributes>;
def err_base_specifier_attribute : Error<
- "%0 attribute cannot be applied to a base specifier">;
-def err_invalid_attribute_on_virtual_function : Error<
- "%0 attribute cannot be applied to virtual functions">;
+ "%0%select{ attribute|}1 cannot be applied to a base specifier">;
def warn_declspec_allocator_nonpointer : Warning<
"ignoring __declspec(allocator) because the function return type %0 is not "
"a pointer or reference type">, InGroup<IgnoredAttributes>;
@@ -3532,12 +3813,15 @@ def warn_availability_swift_unavailable_deprecated_only : Warning<
InGroup<Availability>;
def note_protocol_method : Note<
"protocol method is here">;
+def warn_availability_fuchsia_unavailable_minor : Warning<
+ "Fuchsia API Level prohibits specifying a minor or sub-minor version">,
+ InGroup<Availability>;
def warn_unguarded_availability :
Warning<"%0 is only available on %1 %2 or newer">,
InGroup<UnguardedAvailability>, DefaultIgnore;
def warn_unguarded_availability_new :
- Warning<warn_unguarded_availability.Text>,
+ Warning<warn_unguarded_availability.Summary>,
InGroup<UnguardedAvailabilityNew>;
def note_decl_unguarded_availability_silence : Note<
"annotate %select{%1|anonymous %1}0 with an availability attribute to silence this warning">;
@@ -3641,7 +3925,7 @@ def warn_fun_requires_negative_cap : Warning<
"calling function %0 requires negative capability '%1'">,
InGroup<ThreadSafetyAnalysis>, DefaultIgnore;
-// Thread safety warnings on pass by reference
+// Thread safety warnings on pass/return by reference
def warn_guarded_pass_by_reference : Warning<
"passing variable %1 by reference requires holding %0 "
"%select{'%2'|'%2' exclusively}3">,
@@ -3650,6 +3934,14 @@ def warn_pt_guarded_pass_by_reference : Warning<
"passing the value that %1 points to by reference requires holding %0 "
"%select{'%2'|'%2' exclusively}3">,
InGroup<ThreadSafetyReference>, DefaultIgnore;
+def warn_guarded_return_by_reference : Warning<
+ "returning variable %1 by reference requires holding %0 "
+ "%select{'%2'|'%2' exclusively}3">,
+ InGroup<ThreadSafetyReferenceReturn>, DefaultIgnore;
+def warn_pt_guarded_return_by_reference : Warning<
+ "returning the value that %1 points to by reference requires holding %0 "
+ "%select{'%2'|'%2' exclusively}3">,
+ InGroup<ThreadSafetyReferenceReturn>, DefaultIgnore;
// Imprecise thread safety warnings
def warn_variable_requires_lock : Warning<
@@ -3666,13 +3958,13 @@ def warn_fun_requires_lock : Warning<
// Precise thread safety warnings
def warn_variable_requires_lock_precise :
- Warning<warn_variable_requires_lock.Text>,
+ Warning<warn_variable_requires_lock.Summary>,
InGroup<ThreadSafetyPrecise>, DefaultIgnore;
def warn_var_deref_requires_lock_precise :
- Warning<warn_var_deref_requires_lock.Text>,
+ Warning<warn_var_deref_requires_lock.Summary>,
InGroup<ThreadSafetyPrecise>, DefaultIgnore;
def warn_fun_requires_lock_precise :
- Warning<warn_fun_requires_lock.Text>,
+ Warning<warn_fun_requires_lock.Summary>,
InGroup<ThreadSafetyPrecise>, DefaultIgnore;
def note_found_mutex_near_match : Note<"found near match '%0'">;
@@ -3754,6 +4046,9 @@ def warn_impcast_integer_64_32 : Warning<
def warn_impcast_integer_precision_constant : Warning<
"implicit conversion from %2 to %3 changes value from %0 to %1">,
InGroup<ConstantConversion>;
+def warn_impcast_single_bit_bitield_precision_constant : Warning<
+ "implicit truncation from %2 to a one-bit wide bit-field changes value from "
+ "%0 to %1">, InGroup<SingleBitBitFieldConstantConversion>;
def warn_impcast_bitfield_precision_constant : Warning<
"implicit truncation from %2 to bit-field changes value from %0 to %1">,
InGroup<BitFieldConstantConversion>;
@@ -3897,7 +4192,8 @@ def warn_cast_align : Warning<
"cast from %0 to %1 increases required alignment from %2 to %3">,
InGroup<CastAlign>, DefaultIgnore;
def warn_old_style_cast : Warning<
- "use of old-style cast">, InGroup<OldStyleCast>, DefaultIgnore;
+ "use of old-style cast">, InGroup<OldStyleCast>, DefaultIgnore,
+ SuppressInSystemMacro;
// Separate between casts to void* and non-void* pointers.
// Some APIs use (abuse) void* for something like a user context,
@@ -3914,13 +4210,13 @@ def warn_pointer_to_int_cast : Warning<
"cast to smaller integer type %1 from %0">,
InGroup<PointerToIntCast>;
def warn_pointer_to_enum_cast : Warning<
- warn_pointer_to_int_cast.Text>,
+ warn_pointer_to_int_cast.Summary>,
InGroup<PointerToEnumCast>;
def warn_void_pointer_to_int_cast : Warning<
"cast to smaller integer type %1 from %0">,
InGroup<VoidPointerToIntCast>;
def warn_void_pointer_to_enum_cast : Warning<
- warn_void_pointer_to_int_cast.Text>,
+ warn_void_pointer_to_int_cast.Summary>,
InGroup<VoidPointerToEnumCast>;
def warn_attribute_ignored_for_field_of_type : Warning<
@@ -3956,6 +4252,9 @@ def warn_transparent_union_attribute_zero_fields : Warning<
def warn_attribute_type_not_supported : Warning<
"%0 attribute argument not supported: %1">,
InGroup<IgnoredAttributes>;
+def warn_attribute_type_not_supported_global : Warning<
+ "%0 attribute argument '%1' not supported on a global variable">,
+ InGroup<IgnoredAttributes>;
def warn_attribute_unknown_visibility : Warning<"unknown visibility %0">,
InGroup<IgnoredAttributes>;
def warn_attribute_protected_visibility :
@@ -3981,6 +4280,9 @@ def warn_vector_mode_deprecated : Warning<
"specifying vector types with the 'mode' attribute is deprecated; "
"use the 'vector_size' attribute instead">,
InGroup<DeprecatedAttributes>;
+def warn_deprecated_noreturn_spelling : Warning<
+ "the '[[_Noreturn]]' attribute spelling is deprecated in C23; use "
+ "'[[noreturn]]' instead">, InGroup<DeprecatedAttributes>;
def err_complex_mode_vector_type : Error<
"type of machine mode does not support base vector types">;
def err_enum_mode_vector_type : Error<
@@ -3991,6 +4293,10 @@ def warn_attribute_nonnull_no_pointers : Warning<
def warn_attribute_nonnull_parm_no_args : Warning<
"'nonnull' attribute when used on parameters takes no arguments">,
InGroup<IgnoredAttributes>;
+def warn_function_stmt_attribute_precedence : Warning<
+ "statement attribute %0 has higher precedence than function attribute "
+ "'%select{always_inline|flatten|noinline}1'">,
+ InGroup<IgnoredAttributes>;
def note_declared_nonnull : Note<
"declared %select{'returns_nonnull'|'nonnull'}0 here">;
def warn_attribute_sentinel_named_arguments : Warning<
@@ -4028,6 +4334,9 @@ def err_attribute_not_supported_on_arch
def warn_gcc_ignores_type_attr : Warning<
"GCC does not allow the %0 attribute to be written on a type">,
InGroup<GccCompat>;
+def warn_gcc_requires_variadic_function : Warning<
+ "GCC requires a function with the %0 attribute to be variadic">,
+ InGroup<GccCompat>;
// Clang-Specific Attributes
def warn_attribute_iboutlet : Warning<
@@ -4084,7 +4393,7 @@ def err_attribute_preferred_name_arg_invalid : Error<
"argument %0 to 'preferred_name' attribute is not a typedef for "
"a specialization of %1">;
def err_attribute_builtin_alias : Error<
- "%0 attribute can only be applied to a ARM or RISC-V builtin">;
+ "%0 attribute can only be applied to a ARM, HLSL or RISC-V builtin">;
// called-once attribute diagnostics.
def err_called_once_attribute_wrong_type : Error<
@@ -4256,14 +4565,15 @@ def err_void_param_qualified : Error<
"'void' as parameter must not have type qualifiers">;
def err_ident_list_in_fn_declaration : Error<
"a parameter list without types is only allowed in a function definition">;
-def ext_param_not_declared : Extension<
- "parameter %0 was not declared, defaulting to type 'int'">;
+def ext_param_not_declared : ExtWarn<
+ "parameter %0 was not declared, defaults to 'int'; ISO C99 and later do not "
+ "support implicit int">, InGroup<ImplicitInt>;
def err_param_default_argument : Error<
"C does not support default arguments">;
def err_param_default_argument_redefinition : Error<
"redefinition of default argument">;
def ext_param_default_argument_redefinition : ExtWarn<
- err_param_default_argument_redefinition.Text>,
+ err_param_default_argument_redefinition.Summary>,
InGroup<MicrosoftDefaultArgRedefinition>;
def err_param_default_argument_missing : Error<
"missing default argument on parameter">;
@@ -4299,6 +4609,9 @@ def err_uninitialized_member_in_ctor : Error<
def err_default_arg_makes_ctor_special : Error<
"addition of default argument on redeclaration makes this constructor a "
"%select{default|copy|move}0 constructor">;
+def err_stmt_expr_in_default_arg : Error<
+ "default %select{argument|non-type template argument}0 may not use a GNU "
+ "statement expression">;
def err_use_of_default_argument_to_function_declared_later : Error<
"use of default argument to function %0 that is declared later in class %1">;
@@ -4443,12 +4756,14 @@ def note_ovl_candidate_non_deduced_mismatch_qualified : Note<
// Note that we don't treat templates differently for this diagnostic.
def note_ovl_candidate_arity : Note<"candidate "
"%sub{select_ovl_candidate_kind}0,1,2 not viable: "
- "requires%select{ at least| at most|}3 %4 argument%s4, but %5 "
+ "requires%select{ at least| at most|}3 %4 "
+ "%select{|non-object }6argument%s4, but %5 "
"%plural{1:was|:were}5 provided">;
def note_ovl_candidate_arity_one : Note<"candidate "
"%sub{select_ovl_candidate_kind}0,1,2 not viable: "
"%select{requires at least|allows at most single|requires single}3 "
+ "%select{|non-object }6"
"argument %4, but %plural{0:no|:%5}5 arguments were provided">;
def note_ovl_candidate_deleted : Note<
@@ -4472,7 +4787,8 @@ def note_ovl_candidate_bad_conv_incomplete : Note<
"; remove &}7">;
def note_ovl_candidate_bad_list_argument : Note<
"candidate %sub{select_ovl_candidate_kind}0,1,2 not viable: "
- "cannot convert initializer list argument to %4">;
+ "%select{cannot convert initializer list|too few initializers in list"
+ "|too many initializers in list}7 argument to %4">;
def note_ovl_candidate_bad_overload : Note<
"candidate %sub{select_ovl_candidate_kind}0,1,2 not viable: "
"no overload of %4 matching %3 for %ordinal5 argument">;
@@ -4523,9 +4839,6 @@ def note_ovl_candidate_bad_cvr : Note<
"%select{const|restrict|const and restrict|volatile|const and volatile|"
"volatile and restrict|const, volatile, and restrict}4 qualifier"
"%select{||s||s|s|s}4">;
-def note_ovl_candidate_bad_unaligned : Note<
- "candidate %sub{select_ovl_candidate_kind}0,1,2 not viable: "
- "%ordinal5 argument (%3) would lose __unaligned qualifier">;
def note_ovl_candidate_bad_base_to_derived_conv : Note<
"candidate %sub{select_ovl_candidate_kind}0,1,2 not viable: "
"cannot %select{convert from|convert from|bind}3 "
@@ -4540,6 +4853,8 @@ def note_ovl_candidate_bad_target : Note<
def note_ovl_candidate_constraints_not_satisfied : Note<
"candidate %sub{select_ovl_candidate_kind}0,1,2 not viable: constraints "
"not satisfied">;
+def note_ovl_surrogate_constraints_not_satisfied : Note<
+ "conversion candidate %0 not viable: constraints not satisfied">;
def note_implicit_member_target_infer_collision : Note<
"implicit %sub{select_special_member_kind}0 inferred target collision: call to both "
"%select{__device__|__global__|__host__|__host__ __device__}1 and "
@@ -4582,6 +4897,8 @@ def ext_ovl_ambiguous_oper_binary_reversed : ExtWarn<
def note_ovl_ambiguous_oper_binary_reversed_self : Note<
"ambiguity is between a regular call to this operator and a call with the "
"argument order reversed">;
+def note_ovl_ambiguous_eqeq_reversed_self_non_const : Note<
+ "mark 'operator==' as const or add a matching 'operator!=' to resolve the ambiguity">;
def note_ovl_ambiguous_oper_binary_selected_candidate : Note<
"candidate function with non-reversed arguments">;
def note_ovl_ambiguous_oper_binary_reversed_candidate : Note<
@@ -4614,16 +4931,22 @@ def err_bound_member_function : Error<
"reference to non-static member function must be called"
"%select{|; did you mean to call it with no arguments?}0">;
def note_possible_target_of_call : Note<"possible target for call">;
+def err_no_viable_destructor : Error<
+ "no viable destructor found for class %0">;
+def err_ambiguous_destructor : Error<
+ "destructor of class %0 is ambiguous">;
def err_ovl_no_viable_object_call : Error<
"no matching function for call to object of type %0">;
def err_ovl_ambiguous_object_call : Error<
"call to object of type %0 is ambiguous">;
+def err_ovl_ambiguous_subscript_call : Error<
+ "call to subscript operator of type %0 is ambiguous">;
def err_ovl_deleted_object_call : Error<
"call to deleted function call operator in type %0">;
def note_ovl_surrogate_cand : Note<"conversion candidate of type %0">;
def err_member_call_without_object : Error<
- "call to non-static member function without an object argument">;
+ "call to %select{non-static|explicit}0 member function without an object argument">;
// C++ Address of Overloaded Function
def err_addr_ovl_no_viable : Error<
@@ -4648,8 +4971,10 @@ def err_ovl_no_viable_literal_operator : Error<
def err_template_param_shadow : Error<
"declaration of %0 shadows template parameter">;
def ext_template_param_shadow : ExtWarn<
- err_template_param_shadow.Text>, InGroup<MicrosoftTemplateShadow>;
+ err_template_param_shadow.Summary>, InGroup<MicrosoftTemplateShadow>;
def note_template_param_here : Note<"template parameter is declared here">;
+def note_template_param_external : Note<
+ "template parameter from hidden source: %0">;
def warn_template_export_unsupported : Warning<
"exported templates are unsupported">;
def err_template_outside_namespace_or_class_scope : Error<
@@ -4719,8 +5044,12 @@ def warn_cxx14_compat_template_nontype_parm_auto_type : Warning<
DefaultIgnore, InGroup<CXXPre17Compat>;
def err_template_param_default_arg_redefinition : Error<
"template parameter redefines default argument">;
+def err_template_param_default_arg_inconsistent_redefinition : Error<
+ "template parameter default argument is inconsistent with previous definition">;
def note_template_param_prev_default_arg : Note<
"previous default template argument defined here">;
+def note_template_param_prev_default_arg_in_other_module : Note<
+ "previous default template argument defined in module %0">;
def err_template_param_default_arg_missing : Error<
"template parameter missing a default argument">;
def ext_template_parameter_default_in_function_template : ExtWarn<
@@ -4745,6 +5074,7 @@ def warn_cxx11_compat_variable_template : Warning<
def err_template_variable_noparams : Error<
"extraneous 'template<>' in declaration of variable %0">;
def err_template_member : Error<"member %0 declared as a template">;
+def err_member_with_template_arguments : Error<"member %0 cannot have template arguments">;
def err_template_member_noparams : Error<
"extraneous 'template<>' in declaration of member %0">;
def err_template_tag_noparams : Error<
@@ -4759,6 +5089,9 @@ def ext_adl_only_template_id : ExtWarn<
"use of function template name with no prior declaration in function call "
"with explicit template arguments is a C++20 extension">, InGroup<CXX20>;
+def warn_unqualified_call_to_std_cast_function : Warning<
+ "unqualified call to '%0'">, InGroup<DiagGroup<"unqualified-std-cast-call">>;
+
// C++ Template Argument Lists
def err_template_missing_args : Error<
"use of "
@@ -4770,6 +5103,8 @@ def err_template_arg_list_different_arity : Error<
"%select{class template|function template|variable template|alias template|"
"template template parameter|concept|template}1 %2">;
def note_template_decl_here : Note<"template is declared here">;
+def note_template_decl_external : Note<
+ "template declaration from hidden source: %0">;
def err_template_arg_must_be_type : Error<
"template argument for template type parameter must be a type">;
def err_template_arg_must_be_type_suggest : Error<
@@ -4832,8 +5167,6 @@ def err_non_type_template_arg_subobject : Error<
"non-type template argument refers to subobject '%0'">;
def err_non_type_template_arg_addr_label_diff : Error<
"template argument / label address difference / what did you expect?">;
-def err_non_type_template_arg_unsupported : Error<
- "sorry, non-type template argument of type %0 is not yet supported">;
def err_template_arg_not_convertible : Error<
"non-type template argument of type %0 cannot be converted to a value "
"of type %1">;
@@ -4885,9 +5218,8 @@ def err_template_arg_not_object_or_func : Error<
"non-type template argument does not refer to an object or function">;
def err_template_arg_not_pointer_to_member_form : Error<
"non-type template argument is not a pointer to member constant">;
-def err_template_arg_member_ptr_base_derived_not_supported : Error<
- "sorry, non-type template argument of pointer-to-member type %1 that refers "
- "to member %q0 of a different class is not supported yet">;
+def err_template_arg_invalid : Error<
+ "non-type template argument '%0' is invalid">;
def ext_template_arg_extra_parens : ExtWarn<
"address non-type template argument cannot be surrounded by parentheses">;
def warn_cxx98_compat_template_arg_extra_parens : Warning<
@@ -4924,8 +5256,6 @@ def err_template_spec_unknown_kind : Error<
"class template">;
def note_specialized_entity : Note<
"explicitly specialized declaration is here">;
-def note_explicit_specialization_declared_here : Note<
- "explicit specialization declared here">;
def err_template_spec_decl_function_scope : Error<
"explicit specialization of %0 in function scope">;
def err_template_spec_decl_friend : Error<
@@ -4970,11 +5300,11 @@ def err_explicit_specialization_inconsistent_storage_class : Error<
"'%select{none|extern|static|__private_extern__|auto|register}0'">;
def err_dependent_function_template_spec_no_match : Error<
"no candidate function template was found for dependent"
- " friend function template specialization">;
+ " %select{member|friend}0 function template specialization">;
def note_dependent_function_template_spec_discard_reason : Note<
- "candidate ignored: %select{not a function template"
- "|not a member of the enclosing namespace;"
- " did you mean to explicitly qualify the specialization?}0">;
+ "candidate ignored: %select{not a function template|"
+ "not a member of the enclosing %select{class template|"
+ "namespace; did you mean to explicitly qualify the specialization?}1}0">;
// C++ class template specializations and out-of-line definitions
def err_template_spec_needs_header : Error<
@@ -5034,8 +5364,6 @@ def err_partial_spec_ordering_ambiguous : Error<
def note_partial_spec_match : Note<"partial specialization matches %0">;
def err_partial_spec_redeclared : Error<
"class template partial specialization %0 cannot be redeclared">;
-def note_partial_specialization_declared_here : Note<
- "explicit specialization declared here">;
def note_prev_partial_spec_here : Note<
"previous declaration of class template partial specialization %0 is here">;
def err_partial_spec_fully_specialized : Error<
@@ -5068,6 +5396,8 @@ def err_function_template_partial_spec : Error<
def err_template_recursion_depth_exceeded : Error<
"recursive template instantiation exceeded maximum depth of %0">,
DefaultFatal, NoSFINAE;
+def err_constraint_depends_on_self : Error<
+ "satisfaction of constraint '%0' depends on itself">, NoSFINAE;
def note_template_recursion_depth : Note<
"use -ftemplate-depth=N to increase recursive template instantiation depth">;
@@ -5104,6 +5434,8 @@ def note_template_exception_spec_instantiation_here : Note<
"in instantiation of exception specification for %0 requested here">;
def note_template_requirement_instantiation_here : Note<
"in instantiation of requirement here">;
+def note_template_requirement_params_instantiation_here : Note<
+ "in instantiation of requirement parameters here">;
def warn_var_template_missing : Warning<"instantiation of variable %q0 "
"required here, but no definition is available">,
InGroup<UndefinedVarTemplate>;
@@ -5158,6 +5490,10 @@ def note_constraint_normalization_here : Note<
def note_parameter_mapping_substitution_here : Note<
"while substituting into concept arguments here; substitution failures not "
"allowed in concept arguments">;
+def note_building_deduction_guide_here : Note<
+ "while building implicit deduction guide first needed here">;
+def note_lambda_substitution_here : Note<
+ "while substituting into a lambda expression here">;
def note_instantiation_contexts_suppressed : Note<
"(skipping %0 context%s0 in backtrace; use -ftemplate-backtrace-limit=0 to "
"see all)">;
@@ -5263,7 +5599,7 @@ def err_mismatched_exception_spec_explicit_instantiation : Error<
"exception specification in explicit instantiation does not match "
"instantiated one">;
def ext_mismatched_exception_spec_explicit_instantiation : ExtWarn<
- err_mismatched_exception_spec_explicit_instantiation.Text>,
+ err_mismatched_exception_spec_explicit_instantiation.Summary>,
InGroup<MicrosoftExceptionSpec>;
def err_explicit_instantiation_dependent : Error<
"explicit instantiation has dependent template arguments">;
@@ -5301,6 +5637,12 @@ def err_typename_refers_to_using_value_decl : Error<
"%0 in %1">;
def note_using_value_decl_missing_typename : Note<
"add 'typename' to treat this using declaration as a type">;
+def warn_cxx17_compat_implicit_typename : Warning<"use of implicit 'typename' is "
+ "incompatible with C++ standards before C++20">, InGroup<CXX20Compat>,
+ DefaultIgnore;
+def ext_implicit_typename : ExtWarn<"missing 'typename' prior to dependent "
+ "type name %0%1; implicit 'typename' is a C++20 extension">,
+ InGroup<CXX20>;
def err_template_kw_refers_to_non_template : Error<
"%0%select{| following the 'template' keyword}1 "
@@ -5310,10 +5652,10 @@ def note_template_kw_refers_to_non_template : Note<
def err_template_kw_refers_to_dependent_non_template : Error<
"%0%select{| following the 'template' keyword}1 "
"cannot refer to a dependent template">;
-def err_template_kw_refers_to_class_template : Error<
- "'%0%1' instantiated to a class template, not a function template">;
-def note_referenced_class_template : Note<
- "class template declared here">;
+def err_template_kw_refers_to_type_template : Error<
+ "'%0%1' is expected to be a non-type template, but instantiated to a %select{class|type alias}2 template">;
+def note_referenced_type_template : Note<
+ "%select{class|type alias}0 template declared here">;
def err_template_kw_missing : Error<
"missing 'template' keyword prior to dependent template name '%0%1'">;
def ext_template_outside_of_template : ExtWarn<
@@ -5359,9 +5701,9 @@ def err_unexpanded_parameter_pack : Error<
"%select{expression|base type|declaration type|data member type|bit-field "
"size|static assertion|fixed underlying type|enumerator value|"
"using declaration|friend declaration|qualifier|initializer|default argument|"
- "non-type template parameter type|exception type|partial specialization|"
- "__if_exists name|__if_not_exists name|lambda|block|type constraint|"
- "requirement|requires clause}0 "
+ "non-type template parameter type|exception type|explicit specialization|"
+ "partial specialization|__if_exists name|__if_not_exists name|lambda|block|"
+ "type constraint|requirement|requires clause}0 "
"contains%plural{0: an|:}1 unexpanded parameter pack"
"%plural{0:|1: %2|2:s %2 and %3|:s %2, %3, ...}1">;
@@ -5417,6 +5759,9 @@ def err_found_later_in_class : Error<"member %0 used before its declaration">;
def ext_found_later_in_class : ExtWarn<
"use of member %0 before its declaration is a Microsoft extension">,
InGroup<MicrosoftTemplate>;
+def ext_unqualified_base_class : ExtWarn<
+ "unqualified base initializer of class templates is a Microsoft extension">,
+ InGroup<MicrosoftTemplate>;
def note_dependent_member_use : Note<
"must qualify identifier to find this declaration in dependent base class">;
def err_not_found_by_two_phase_lookup : Error<"call to function %0 that is neither "
@@ -5444,6 +5789,9 @@ def warn_deprecated_def : Warning<
def warn_unavailable_def : Warning<
"implementing unavailable method">,
InGroup<DeprecatedImplementations>, DefaultIgnore;
+def warn_deprecated_builtin : Warning<
+ "builtin %0 is deprecated; use %1 instead">,
+ InGroup<DeprecatedBuiltins>;
def err_unavailable : Error<"%0 is unavailable">;
def err_property_method_unavailable :
Error<"property access is using %0 method which is unavailable">;
@@ -5467,16 +5815,28 @@ def warn_missing_sentinel : Warning <
InGroup<Sentinel>;
def note_sentinel_here : Note<
"%select{function|method|block}0 has been explicitly marked sentinel here">;
+def warn_strict_uses_without_prototype : Warning<
+ "passing arguments to %select{a function|%1}0 without a prototype is "
+ "deprecated in all versions of C and is not supported in C23">,
+ InGroup<DeprecatedNonPrototype>;
def warn_missing_prototype : Warning<
"no previous prototype for function %0">,
InGroup<DiagGroup<"missing-prototypes">>, DefaultIgnore;
def note_declaration_not_a_prototype : Note<
"this declaration is not a prototype; add %select{'void'|parameter declarations}0 "
"to make it %select{a prototype for a zero-parameter function|one}0">;
-def warn_strict_prototypes : Warning<
- "this %select{function declaration is not|block declaration is not|"
- "old-style function definition is not preceded by}0 a prototype">,
- InGroup<DiagGroup<"strict-prototypes">>, DefaultIgnore;
+// This is not actually an extension, but we only want it to be enabled in
+// -pedantic mode and this is the most direct way of accomplishing that.
+def warn_strict_prototypes : Extension<
+ "a %select{function|block}0 declaration without a prototype is deprecated "
+ "%select{in all versions of C|}0">, InGroup<StrictPrototypes>;
+def warn_non_prototype_changes_behavior : Warning<
+ "a function %select{declaration|definition}0 without a prototype is "
+ "deprecated in all versions of C %select{and is not supported in C23|and is "
+ "treated as a zero-parameter prototype in C23, conflicting with a "
+ "%select{previous|subsequent}2 %select{declaration|definition}3}1">,
+ InGroup<DeprecatedNonPrototype>;
+def note_conflicting_prototype : Note<"conflicting prototype is here">;
def warn_missing_variable_declarations : Warning<
"no previous extern declaration for non-static variable %0">,
InGroup<DiagGroup<"missing-variable-declarations">>, DefaultIgnore;
@@ -5511,6 +5871,12 @@ def err_new_abi_tag_on_redeclaration : Error<
def note_use_ifdef_guards : Note<
"unguarded header; consider using #ifdef guards or #pragma once">;
+def warn_var_decl_not_read_only : Warning<
+ "object of type %0 cannot be placed in read-only memory">,
+ InGroup<ReadOnlyPlacementChecks>;
+def note_enforce_read_only_placement : Note<"type was declared read-only here">;
+
+
def note_deleted_dtor_no_operator_delete : Note<
"virtual destructor requires an unambiguous, accessible 'operator delete'">;
def note_deleted_special_member_class_subobject : Note<
@@ -5563,8 +5929,8 @@ def warn_undefined_inline : Warning<"inline function %q0 is not defined">,
def err_undefined_inline_var : Error<"inline variable %q0 is not defined">;
def note_used_here : Note<"used here">;
-def err_internal_linkage_redeclaration : Error<
- "'internal_linkage' attribute does not appear on the first declaration of %0">;
+def err_attribute_missing_on_first_decl : Error<
+ "%0 attribute does not appear on the first declaration">;
def warn_internal_linkage_local_storage : Warning<
"'internal_linkage' attribute on a non-static local variable is ignored">,
InGroup<IgnoredAttributes>;
@@ -5640,6 +6006,8 @@ def warn_forward_class_redefinition : Warning<
def err_redefinition_different_typedef : Error<
"%select{typedef|type alias|type alias template}0 "
"redefinition with different types%diff{ ($ vs $)|}1,2">;
+def err_redefinition_different_concept : Error<
+ "redefinition of concept %0 with different template parameters or requirements">;
def err_tag_reference_non_tag : Error<
"%select{non-struct type|non-class type|non-union type|non-enum "
"type|typedef|type alias|template|type alias template|template "
@@ -5727,7 +6095,7 @@ def warn_typecheck_function_qualifiers_unspecified : Warning<
"'%0' qualifier on function type %1 has unspecified behavior">;
def warn_typecheck_reference_qualifiers : Warning<
"'%0' qualifier on reference type %1 has no effect">,
- InGroup<IgnoredQualifiers>;
+ InGroup<IgnoredReferenceQualifiers>;
def err_typecheck_invalid_restrict_not_pointer : Error<
"restrict requires a pointer or reference (%0 is invalid)">;
def err_typecheck_invalid_restrict_not_pointer_noarg : Error<
@@ -5737,7 +6105,7 @@ def err_typecheck_invalid_restrict_invalid_pointee : Error<
def ext_typecheck_zero_array_size : Extension<
"zero size arrays are an extension">, InGroup<ZeroLengthArray>;
def err_typecheck_zero_array_size : Error<
- "zero-length arrays are not permitted in C++">;
+ "zero-length arrays are not permitted in %select{C++|SYCL device code}0">;
def err_array_size_non_int : Error<"size of array has non-integer type %0">;
def err_init_element_not_constant : Error<
"initializer element is not a compile-time constant">;
@@ -5757,7 +6125,7 @@ def err_loader_uninitialized_extern_decl
: Error<"variable %0 cannot be declared both 'extern' and with the "
"'loader_uninitialized' attribute">;
def err_block_extern_cant_init : Error<
- "'extern' variable cannot have an initializer">;
+ "declaration of block scope identifier with linkage cannot have an initializer">;
def warn_extern_init : Warning<"'extern' variable has an initializer">,
InGroup<DiagGroup<"extern-initializer">>;
def err_variable_object_no_init : Error<
@@ -5778,7 +6146,7 @@ def ext_excess_initializers_in_char_array_initializer : ExtWarn<
"excess elements in char array initializer">,
InGroup<ExcessInitializers>;
def err_initializer_string_for_char_array_too_long : Error<
- "initializer-string for char array is too long">;
+ "initializer-string for char array is too long, array size is %0 but initializer has size %1 (including the null terminating character)">;
def ext_initializer_string_for_char_array_too_long : ExtWarn<
"initializer-string for char array is too long">,
InGroup<ExcessInitializers>;
@@ -5818,12 +6186,20 @@ def err_illegal_initializer_type : Error<"illegal initializer type %0">;
def ext_init_list_type_narrowing : ExtWarn<
"type %0 cannot be narrowed to %1 in initializer list">,
InGroup<CXX11Narrowing>, DefaultError, SFINAEFailure;
+def ext_init_list_type_narrowing_const_reference : ExtWarn<
+ ext_init_list_type_narrowing.Summary>,
+ InGroup<CXX11NarrowingConstReference>, DefaultError, SFINAEFailure;
def ext_init_list_variable_narrowing : ExtWarn<
"non-constant-expression cannot be narrowed from type %0 to %1 in "
"initializer list">, InGroup<CXX11Narrowing>, DefaultError, SFINAEFailure;
+def ext_init_list_variable_narrowing_const_reference : ExtWarn<
+ ext_init_list_variable_narrowing.Summary>, InGroup<CXX11NarrowingConstReference>, DefaultError, SFINAEFailure;
def ext_init_list_constant_narrowing : ExtWarn<
"constant expression evaluates to %0 which cannot be narrowed to type %1">,
InGroup<CXX11Narrowing>, DefaultError, SFINAEFailure;
+def ext_init_list_constant_narrowing_const_reference : ExtWarn<
+ ext_init_list_constant_narrowing.Summary>,
+ InGroup<CXX11NarrowingConstReference>, DefaultError, SFINAEFailure;
def warn_init_list_type_narrowing : Warning<
"type %0 cannot be narrowed to %1 in initializer list in C++11">,
InGroup<CXX11Narrowing>, DefaultIgnore;
@@ -5926,6 +6302,8 @@ def note_protected_by_vla_type_alias : Note<
"jump bypasses initialization of VLA type alias">;
def note_protected_by_constexpr_if : Note<
"jump enters controlled statement of constexpr if">;
+def note_protected_by_consteval_if : Note<
+ "jump enters controlled statement of consteval if">;
def note_protected_by_if_available : Note<
"jump enters controlled statement of if available">;
def note_protected_by_vla : Note<
@@ -5971,6 +6349,8 @@ def note_enters_block_captures_non_trivial_c_struct : Note<
"to destroy">;
def note_enters_compound_literal_scope : Note<
"jump enters lifetime of a compound literal that is non-trivial to destruct">;
+def note_enters_statement_expression : Note<
+ "jump enters a statement expression">;
def note_exits_cleanup : Note<
"jump exits scope of variable with __attribute__((cleanup))">;
@@ -6085,6 +6465,19 @@ def warn_superclass_variable_sized_type_not_at_end : Warning<
"field %0 can overwrite instance variable %1 with variable sized type %2"
" in superclass %3">, InGroup<ObjCFlexibleArray>;
+def err_flexible_array_count_not_in_same_struct : Error<
+ "'counted_by' field %0 isn't within the same struct as the flexible array">;
+def err_counted_by_attr_not_on_flexible_array_member : Error<
+ "'counted_by' only applies to C99 flexible array members">;
+def err_counted_by_attr_refers_to_flexible_array : Error<
+ "'counted_by' cannot refer to the flexible array %0">;
+def err_counted_by_must_be_in_structure : Error<
+ "field %0 in 'counted_by' not inside structure">;
+def err_flexible_array_counted_by_attr_field_not_integer : Error<
+ "field %0 in 'counted_by' must be a non-boolean integer type">;
+def note_flexible_array_counted_by_attr_field : Note<
+ "field %0 declared here">;
+
let CategoryName = "ARC Semantic Issue" in {
// ARC-mode diagnostics.
@@ -6351,10 +6744,23 @@ def err_func_def_incomplete_result : Error<
def err_atomic_specifier_bad_type
: Error<"_Atomic cannot be applied to "
"%select{incomplete |array |function |reference |atomic |qualified "
- "|sizeless ||integer }0type "
- "%1 %select{|||||||which is not trivially copyable|}0">;
+ "|sizeless ||integer |}0type "
+ "%1 %select{|||||||which is not trivially copyable||in C23}0">;
+def warn_atomic_member_access : Warning<
+ "accessing a member of an atomic structure or union is undefined behavior">,
+ InGroup<DiagGroup<"atomic-access">>, DefaultError;
// Expressions.
+def err_using_placeholder_variable : Error<
+ "ambiguous reference to placeholder '_', which is defined multiple times">;
+def note_reference_placeholder : Note<
+ "placeholder declared here">;
+def ext_placeholder_var_definition : ExtWarn<
+ "placeholder variables are a C++2c extension">, InGroup<CXX26>;
+def warn_cxx23_placeholder_var_definition : Warning<
+ "placeholder variables are incompatible with C++ standards before C++2c">,
+ DefaultIgnore, InGroup<CXXPre26Compat>;
+
def ext_sizeof_alignof_function_type : Extension<
"invalid application of '%0' to a function type">, InGroup<PointerArith>;
def ext_sizeof_alignof_void_type : Extension<
@@ -6368,7 +6774,8 @@ def err_sizeof_alignof_function_type : Error<
def err_openmp_default_simd_align_expr : Error<
"invalid application of '__builtin_omp_required_simd_align' to an expression, only type is allowed">;
def err_sizeof_alignof_typeof_bitfield : Error<
- "invalid application of '%select{sizeof|alignof|typeof}0' to bit-field">;
+ "invalid application of '%select{sizeof|alignof|typeof|typeof_unqual}0' to "
+ "bit-field">;
def err_alignof_member_of_incomplete_type : Error<
"invalid application of 'alignof' to a field of a class still being defined">;
def err_vecstep_non_scalar_vector_type : Error<
@@ -6391,22 +6798,21 @@ def warn_sub_ptr_zero_size_types : Warning<
def warn_pointer_arith_null_ptr : Warning<
"performing pointer arithmetic on a null pointer has undefined behavior%select{| if the offset is nonzero}0">,
InGroup<NullPointerArithmetic>, DefaultIgnore;
-def warn_gnu_null_ptr_arith : Warning<
+def warn_gnu_null_ptr_arith : Extension<
"arithmetic on a null pointer treated as a cast from integer to pointer is a GNU extension">,
- InGroup<NullPointerArithmetic>, DefaultIgnore;
+ InGroup<GNUNullPointerArithmetic>;
def warn_pointer_sub_null_ptr : Warning<
"performing pointer subtraction with a null pointer %select{has|may have}0 undefined behavior">,
InGroup<NullPointerSubtraction>, DefaultIgnore;
-def err_kernel_invalidates_sycl_unique_stable_name
- : Error<"kernel instantiation changes the result of an evaluated "
- "'__builtin_sycl_unique_stable_name'">;
-def note_sycl_unique_stable_name_evaluated_here
- : Note<"'__builtin_sycl_unique_stable_name' evaluated here">;
def warn_floatingpoint_eq : Warning<
"comparing floating point with == or != is unsafe">,
InGroup<DiagGroup<"float-equal">>, DefaultIgnore;
+def err_setting_eval_method_used_in_unsafe_context : Error <
+ "%select{'#pragma clang fp eval_method'|option 'ffp-eval-method'}0 cannot be used with "
+ "%select{option 'fapprox-func'|option 'mreassociate'|option 'freciprocal'|option 'ffp-eval-method'|'#pragma clang fp reassociate'|'#pragma clang fp reciprocal'}1">;
+
def warn_remainder_division_by_zero : Warning<
"%select{remainder|division}0 by zero is undefined">,
InGroup<DivZero>;
@@ -6476,15 +6882,27 @@ def warn_addition_in_bitshift : Warning<
"'%1' will be evaluated first">, InGroup<ShiftOpParentheses>;
def warn_self_assignment_builtin : Warning<
- "explicitly assigning value of variable of type %0 to itself">,
+ "explicitly assigning value of variable of type %0 to itself%select{|; did "
+ "you mean to assign to member %2?}1">,
InGroup<SelfAssignment>, DefaultIgnore;
def warn_self_assignment_overloaded : Warning<
- "explicitly assigning value of variable of type %0 to itself">,
+ "explicitly assigning value of variable of type %0 to itself%select{|; did "
+ "you mean to assign to member %2?}1">,
InGroup<SelfAssignmentOverloaded>, DefaultIgnore;
def warn_self_move : Warning<
- "explicitly moving variable of type %0 to itself">,
+ "explicitly moving variable of type %0 to itself%select{|; did you mean to "
+ "move to member %2?}1">,
InGroup<SelfMove>, DefaultIgnore;
+def err_builtin_move_forward_unsupported : Error<
+ "unsupported signature for %q0">;
+def err_use_of_unaddressable_function : Error<
+ "taking address of non-addressable standard library function">;
+// FIXME: This should also be in -Wc++23-compat once we have it.
+def warn_cxx20_compat_use_of_unaddressable_function : Warning<
+ "taking address of non-addressable standard library function "
+ "is incompatible with C++20">, InGroup<CXX20Compat>;
+
def warn_redundant_move_on_return : Warning<
"redundant move in return statement">,
InGroup<RedundantMove>, DefaultIgnore;
@@ -6527,7 +6945,8 @@ def err_arithmetic_nonfragile_interface : Error<
"this architecture and platform">;
def warn_deprecated_comma_subscript : Warning<
- "top-level comma expression in array subscript is deprecated">,
+ "top-level comma expression in array subscript is deprecated "
+ "in C++20 and unsupported in C++23">,
InGroup<DeprecatedCommaSubscript>;
def ext_subscript_non_lvalue : Extension<
@@ -6540,10 +6959,13 @@ def err_subscript_function_type : Error<
"subscript of pointer to function type %0">;
def err_subscript_incomplete_or_sizeless_type : Error<
"subscript of pointer to %select{incomplete|sizeless}0 type %1">;
+def err_subscript_svbool_t : Error<
+ "subscript of svbool_t is not allowed">;
def err_dereference_incomplete_type : Error<
"dereference of pointer to incomplete type %0">;
def ext_gnu_subscript_void_type : Extension<
- "subscript of a pointer to void is a GNU extension">, InGroup<PointerArith>;
+ "subscript of a pointer to void is a GNU extension">,
+ InGroup<GNUPointerArith>;
def err_typecheck_member_reference_struct_union : Error<
"member reference base type %0 is not a structure or union">;
def err_typecheck_member_reference_ivar : Error<
@@ -6602,6 +7024,11 @@ def err_member_decl_does_not_match : Error<
"does not match any declaration in %1">;
def err_friend_decl_with_def_arg_must_be_def : Error<
"friend declaration specifying a default argument must be a definition">;
+def err_friend_decl_with_enclosing_temp_constraint_must_be_def : Error<
+ "friend declaration with a constraint that depends on an enclosing "
+ "template parameter must be a definition">;
+def err_non_temp_friend_decl_with_requires_clause_must_be_def : Error<
+ "non-template friend declaration with a requires clause must be a definition">;
def err_friend_decl_with_def_arg_redeclared : Error<
"friend declaration specifying a default argument must be the only declaration">;
def err_friend_decl_does_not_match : Error<
@@ -6624,7 +7051,7 @@ def ext_out_of_line_declaration : ExtWarn<
def err_member_extra_qualification : Error<
"extra qualification on member %0">;
def warn_member_extra_qualification : Warning<
- err_member_extra_qualification.Text>, InGroup<MicrosoftExtraQualification>;
+ err_member_extra_qualification.Summary>, InGroup<MicrosoftExtraQualification>;
def warn_namespace_member_extra_qualification : Warning<
"extra qualification on member %0">,
InGroup<DiagGroup<"extra-qualification">>;
@@ -6682,8 +7109,8 @@ def err_array_init_plain_string_into_char8_t : Error<
def note_array_init_plain_string_into_char8_t : Note<
"add 'u8' prefix to form a 'char8_t' string literal">;
def err_array_init_utf8_string_into_char : Error<
- "%select{|ISO C++20 does not permit }0initialization of char array with "
- "UTF-8 string literal%select{ is not permitted by '-fchar8_t'|}0">;
+ "initialization of %select{|signed }0char array with "
+ "UTF-8 string literal is not permitted by %select{'-fchar8_t'|C++20}1">;
def warn_cxx20_compat_utf8_string : Warning<
"type of UTF-8 string literal will change from array of const char to "
"array of const char8_t in C++20">, InGroup<CXX20Compat>, DefaultIgnore;
@@ -6718,7 +7145,7 @@ def warn_standalone_specifier : Warning<"'%0' ignored on this declaration">,
def ext_standalone_specifier : ExtWarn<"'%0' is not permitted on a declaration "
"of a type">, InGroup<MissingDeclarations>;
def err_standalone_class_nested_name_specifier : Error<
- "forward declaration of %select{class|struct|interface|union|enum}0 cannot "
+ "forward declaration of %select{class|struct|interface|union|enum|enum class|enum struct}0 cannot "
"have a nested name specifier">;
def err_typecheck_sclass_func : Error<"illegal storage class on function">;
def err_static_block_func : Error<
@@ -6752,8 +7179,10 @@ def err_typecheck_unary_expr : Error<
def err_typecheck_indirection_requires_pointer : Error<
"indirection requires pointer operand (%0 invalid)">;
def ext_typecheck_indirection_through_void_pointer : ExtWarn<
- "ISO C++ does not allow indirection on operand of type %0">,
- InGroup<DiagGroup<"void-ptr-dereference">>;
+ "ISO C does not allow indirection on operand of type %0">,
+ InGroup<VoidPointerDeref>;
+def err_typecheck_indirection_through_void_pointer_cpp
+ : Error<"indirection not permitted on operand of type %0">;
def warn_indirection_through_null : Warning<
"indirection of non-volatile null pointer will be deleted, not trap">,
InGroup<NullDereference>;
@@ -6770,7 +7199,7 @@ def warn_taking_address_of_packed_member : Warning<
"taking address of packed member %0 of class or structure %q1 may result in an unaligned pointer value">,
InGroup<DiagGroup<"address-of-packed-member">>;
def warn_param_mismatched_alignment : Warning<
- "passing %0-byte aligned argument to %1-byte aligned parameter %2 of %3 may result in an unaligned pointer access">,
+ "passing %0-byte aligned argument to %1-byte aligned parameter %2%select{| of %4}3 may result in an unaligned pointer access">,
InGroup<DiagGroup<"align-mismatch">>;
def err_objc_object_assignment : Error<
@@ -6848,6 +7277,11 @@ def warn_arith_conv_enum_float_cxx20 : Warning<
"%plural{2:with|4:from|:and}0 "
"%select{enumeration|floating-point}1 type %3 is deprecated">,
InGroup<DeprecatedEnumFloatConversion>;
+def err_arith_conv_enum_float_cxx26 : Error<
+ "invalid %sub{select_arith_conv_kind}0 "
+ "%select{floating-point|enumeration}1 type %2 "
+ "%plural{2:with|4:from|:and}0 "
+ "%select{enumeration|floating-point}1 type %3">;
def warn_arith_conv_mixed_enum_types : Warning<
"%sub{select_arith_conv_kind}0 "
"different enumeration types%diff{ ($ and $)|}1,2">,
@@ -6856,23 +7290,27 @@ def warn_arith_conv_mixed_enum_types_cxx20 : Warning<
"%sub{select_arith_conv_kind}0 "
"different enumeration types%diff{ ($ and $)|}1,2 is deprecated">,
InGroup<DeprecatedEnumEnumConversion>;
+def err_conv_mixed_enum_types_cxx26 : Error<
+ "invalid %sub{select_arith_conv_kind}0 "
+ "different enumeration types%diff{ ($ and $)|}1,2">;
+
def warn_arith_conv_mixed_anon_enum_types : Warning<
- warn_arith_conv_mixed_enum_types.Text>,
+ warn_arith_conv_mixed_enum_types.Summary>,
InGroup<AnonEnumEnumConversion>, DefaultIgnore;
def warn_arith_conv_mixed_anon_enum_types_cxx20 : Warning<
- warn_arith_conv_mixed_enum_types_cxx20.Text>,
+ warn_arith_conv_mixed_enum_types_cxx20.Summary>,
InGroup<DeprecatedAnonEnumEnumConversion>;
def warn_conditional_mixed_enum_types : Warning<
- warn_arith_conv_mixed_enum_types.Text>,
+ warn_arith_conv_mixed_enum_types.Summary>,
InGroup<EnumCompareConditional>, DefaultIgnore;
def warn_conditional_mixed_enum_types_cxx20 : Warning<
- warn_arith_conv_mixed_enum_types_cxx20.Text>,
+ warn_arith_conv_mixed_enum_types_cxx20.Summary>,
InGroup<DeprecatedEnumCompareConditional>;
def warn_comparison_mixed_enum_types : Warning<
- warn_arith_conv_mixed_enum_types.Text>,
+ warn_arith_conv_mixed_enum_types.Summary>,
InGroup<EnumCompare>;
def warn_comparison_mixed_enum_types_cxx20 : Warning<
- warn_arith_conv_mixed_enum_types_cxx20.Text>,
+ warn_arith_conv_mixed_enum_types_cxx20.Summary>,
InGroup<DeprecatedEnumCompare>;
def warn_comparison_of_mixed_enum_types_switch : Warning<
"comparison of different enumeration types in switch statement"
@@ -6932,7 +7370,7 @@ def warn_out_of_range_compare : Warning<
"result of comparison of %select{constant %0|true|false}1 with "
"%select{expression of type %2|boolean expression}3 is always %4">,
InGroup<TautologicalOutOfRangeCompare>;
-def warn_tautological_bool_compare : Warning<warn_out_of_range_compare.Text>,
+def warn_tautological_bool_compare : Warning<warn_out_of_range_compare.Summary>,
InGroup<TautologicalConstantCompare>;
def warn_integer_constants_in_conditional_always_true : Warning<
"converting the result of '?:' with integer constants to a boolean always "
@@ -6964,14 +7402,16 @@ def note_logical_not_silence_with_parens : Note<
"add parentheses around left hand side expression to silence this warning">;
def err_invalid_this_use : Error<
- "invalid use of 'this' outside of a non-static member function">;
+ "invalid use of 'this' %select{outside of a non-static member function"
+ "|in a function with an explicit object parameter}0">;
def err_this_static_member_func : Error<
"'this' cannot be%select{| implicitly}0 used in a static member function "
"declaration">;
-def err_invalid_member_use_in_static_method : Error<
- "invalid use of member %0 in static member function">;
+def err_invalid_member_use_in_method : Error<
+ "invalid use of member %0 in %select{static|explicit object}1 member function">;
+
def err_invalid_qualified_function_type : Error<
- "%select{non-member function|static member function|deduction guide}0 "
+ "%select{non-member function|static member function|explicit object member function|deduction guide}0 "
"%select{of type %2 |}1cannot have '%3' qualifier">;
def err_compound_qualified_function_type : Error<
"%select{block pointer|pointer|reference}0 to function type %select{%2 |}1"
@@ -6979,6 +7419,26 @@ def err_compound_qualified_function_type : Error<
def err_qualified_function_typeid : Error<
"type operand %0 of 'typeid' cannot have '%1' qualifier">;
+def err_cxx20_deducing_this : Error<
+ "explicit object parameters are incompatible with C++ standards before C++2b">;
+def err_explicit_object_default_arg: Error<
+ "the explicit object parameter cannot have a default argument">;
+def err_explicit_object_parameter_pack: Error<
+ "the explicit object parameter cannot be a function parameter pack">;
+def err_explicit_object_parameter_must_be_first: Error<
+ "an explicit object parameter can only appear as the first parameter "
+ "of the %select{function|lambda}0">;
+def err_explicit_object_parameter_nonmember: Error<
+ "an explicit object parameter cannot appear in a "
+ "%select{static|virtual|non-member}0 %select{function|lambda}1">;
+def err_explicit_object_parameter_constructor: Error<
+ "an explicit object parameter cannot appear in a %select{constructor|destructor}0">;
+def err_explicit_object_parameter_mutable: Error<
+ "a lambda with an explicit object parameter cannot be mutable">;
+def err_invalid_explicit_object_type_in_lambda: Error<
+ "invalid explicit object parameter type %0 in lambda with capture; "
+ "the type must be the same as, or derived from, the lambda">;
+
def err_ref_qualifier_overload : Error<
"cannot overload a member function %select{without a ref-qualifier|with "
"ref-qualifier '&'|with ref-qualifier '&&'}0 with a member function %select{"
@@ -7064,11 +7524,11 @@ def err_duplicate_property : Error<
"property has a previous declaration">;
def ext_gnu_void_ptr : Extension<
"arithmetic on%select{ a|}0 pointer%select{|s}0 to void is a GNU extension">,
- InGroup<PointerArith>;
+ InGroup<GNUPointerArith>;
def ext_gnu_ptr_func_arith : Extension<
"arithmetic on%select{ a|}0 pointer%select{|s}0 to%select{ the|}2 function "
"type%select{|s}2 %1%select{| and %3}2 is a GNU extension">,
- InGroup<PointerArith>;
+ InGroup<GNUPointerArith>;
def err_readonly_message_assignment : Error<
"assigning to 'readonly' return result of an Objective-C message not allowed">;
def ext_integer_increment_complex : Extension<
@@ -7174,6 +7634,8 @@ def err_attribute_arm_builtin_alias : Error<
"'__clang_arm_builtin_alias' attribute can only be applied to an ARM builtin">;
def err_attribute_arm_mve_polymorphism : Error<
"'__clang_arm_mve_strict_polymorphism' attribute can only be applied to an MVE/NEON vector type">;
+def err_attribute_webassembly_funcref : Error<
+ "'__funcref' attribute can only be applied to a function pointer type">;
def warn_setter_getter_impl_required : Warning<
"property %0 requires method %1 to be defined - "
@@ -7314,8 +7776,6 @@ def err_bad_dynamic_cast_not_polymorphic : Error<"%0 is not polymorphic">;
// Other C++ expressions
def err_need_header_before_typeid : Error<
"you need to include <typeinfo> before using the 'typeid' operator">;
-def err_need_header_before_ms_uuidof : Error<
- "you need to include <guiddef.h> before using the '__uuidof' operator">;
def err_need_header_before_placement_new : Error<
"no matching %0 function for non-allocating placement new expression; "
"include <new>">;
@@ -7413,17 +7873,20 @@ def note_member_declared_here : Note<
"member %0 declared here">;
def note_member_first_declared_here : Note<
"member %0 first declared here">;
+def warn_bitwise_instead_of_logical : Warning<
+ "use of bitwise '%0' with boolean operands">,
+ InGroup<BitwiseInsteadOfLogical>, DefaultIgnore;
def warn_bitwise_negation_bool : Warning<
"bitwise negation of a boolean expression%select{;| always evaluates to 'true';}0 "
"did you mean logical negation?">,
- InGroup<DiagGroup<"bool-operation">>;
+ InGroup<BoolOperation>, DefaultIgnore;
def err_decrement_bool : Error<"cannot decrement expression of type bool">;
def warn_increment_bool : Warning<
"incrementing expression of type bool is deprecated and "
"incompatible with C++17">, InGroup<DeprecatedIncrementBool>;
def ext_increment_bool : ExtWarn<
"ISO C++17 does not allow incrementing expression of type bool">,
- DefaultError, InGroup<IncrementBool>;
+ DefaultError, SFINAEFailure, InGroup<IncrementBool>;
def err_increment_decrement_enum : Error<
"cannot %select{decrement|increment}0 expression of enum type %1">;
@@ -7433,9 +7896,6 @@ def warn_deprecated_increment_decrement_volatile : Warning<
def warn_deprecated_simple_assign_volatile : Warning<
"use of result of assignment to object of volatile-qualified type %0 "
"is deprecated">, InGroup<DeprecatedVolatile>;
-def warn_deprecated_compound_assign_volatile : Warning<
- "compound assignment to object of volatile-qualified type %0 is deprecated">,
- InGroup<DeprecatedVolatile>;
def warn_deprecated_volatile_return : Warning<
"volatile-qualified return type %0 is deprecated">,
InGroup<DeprecatedVolatile>;
@@ -7452,6 +7912,12 @@ def warn_deprecated_altivec_src_compat : Warning<
"'-altivec-compat=xl' option">,
InGroup<DiagGroup<"deprecated-altivec-src-compat">>;
+def warn_deprecated_lax_vec_conv_all : Warning<
+ "Implicit conversion between vector types ('%0' and '%1') is deprecated. "
+ "In the future, the behavior implied by '-fno-lax-vector-conversions' "
+ "will be the default.">,
+ InGroup<DiagGroup<"deprecate-lax-vec-conv-all">>;
+
def err_catch_incomplete_ptr : Error<
"cannot catch pointer to incomplete type %0">;
def err_catch_incomplete_ref : Error<
@@ -7490,7 +7956,8 @@ def note_throw_in_function : Note<"function declared non-throwing here">;
def err_seh_try_outside_functions : Error<
"cannot use SEH '__try' in blocks, captured regions, or Obj-C method decls">;
def err_mixing_cxx_try_seh_try : Error<
- "cannot use C++ 'try' in the same function as SEH '__try'">;
+ "cannot use %select{C++ 'try'|Objective-C '@try'}0 "
+ "in the same function as SEH '__try'">;
def err_seh_try_unsupported : Error<
"SEH '__try' is not supported on this target">;
def note_conflicting_try_here : Note<
@@ -7528,6 +7995,8 @@ def warn_overaligned_type : Warning<
"type %0 requires %1 bytes of alignment and the default allocator only "
"guarantees %2 bytes">,
InGroup<OveralignedType>, DefaultIgnore;
+def err_array_element_alignment : Error<
+ "size of array element of type %0 (%1 bytes) isn't a multiple of its alignment (%2 bytes)">;
def err_aligned_allocation_unavailable : Error<
"aligned %select{allocation|deallocation}0 function of type '%1' is "
"%select{only|not}4 available on %2%select{ %3 or newer|}4">;
@@ -7578,6 +8047,8 @@ def err_return_in_constructor_handler : Error<
def warn_cdtor_function_try_handler_mem_expr : Warning<
"cannot refer to a non-static member from the handler of a "
"%select{constructor|destructor}0 function try block">, InGroup<Exceptions>;
+def err_throw_object_throwing_dtor : Error<
+ "cannot throw object of type %0 with a potentially-throwing destructor">;
let CategoryName = "Lambda Issue" in {
def err_capture_more_than_once : Error<
@@ -7688,7 +8159,7 @@ let CategoryName = "Lambda Issue" in {
"is a C++20 extension">, InGroup<CXX20>;
def warn_deprecated_this_capture : Warning<
"implicit capture of 'this' with a capture default of '=' is deprecated">,
- InGroup<DeprecatedThisCapture>, DefaultIgnore;
+ InGroup<DeprecatedThisCapture>;
def note_deprecated_this_capture : Note<
"add an explicit capture of 'this' to capture '*this' by reference">;
@@ -7755,6 +8226,11 @@ def err_expected_class_or_namespace : Error<"%0 is not a class"
"%select{ or namespace|, namespace, or enumeration}1">;
def err_invalid_declarator_scope : Error<"cannot define or redeclare %0 here "
"because namespace %1 does not enclose namespace %2">;
+def err_export_non_namespace_scope_name : Error<
+ "cannot export %0 as it is not at namespace scope">;
+def err_redeclaration_non_exported : Error <
+ "cannot export redeclaration %0 here since the previous declaration "
+ "%select{is not exported|has internal linkage|has module linkage}1">;
def err_invalid_declarator_global_scope : Error<
"definition or redeclaration of %0 cannot name the global scope">;
def err_invalid_declarator_in_function : Error<
@@ -7870,24 +8346,6 @@ def err_incompatible_qualified_id : Error<
"sending type to parameter of incompatible type}0,1"
"|%diff{casting $ to incompatible type $|"
"casting type to incompatible type}0,1}2">;
-def ext_typecheck_convert_pointer_int : ExtWarn<
- "incompatible pointer to integer conversion "
- "%select{%diff{assigning to $ from $|assigning to different types}0,1"
- "|%diff{passing $ to parameter of type $|"
- "passing to parameter of different type}0,1"
- "|%diff{returning $ from a function with result type $|"
- "returning from function with different return type}0,1"
- "|%diff{converting $ to type $|converting between types}0,1"
- "|%diff{initializing $ with an expression of type $|"
- "initializing with expression of different type}0,1"
- "|%diff{sending $ to parameter of type $|"
- "sending to parameter of different type}0,1"
- "|%diff{casting $ to type $|casting between types}0,1}2"
- "%select{|; dereference with *|"
- "; take the address with &|"
- "; remove *|"
- "; remove &}3">,
- InGroup<IntConversion>;
def err_typecheck_convert_pointer_int : Error<
"incompatible pointer to integer conversion "
"%select{%diff{assigning to $ from $|assigning to different types}0,1"
@@ -7905,24 +8363,9 @@ def err_typecheck_convert_pointer_int : Error<
"; take the address with &|"
"; remove *|"
"; remove &}3">;
-def ext_typecheck_convert_int_pointer : ExtWarn<
- "incompatible integer to pointer conversion "
- "%select{%diff{assigning to $ from $|assigning to different types}0,1"
- "|%diff{passing $ to parameter of type $|"
- "passing to parameter of different type}0,1"
- "|%diff{returning $ from a function with result type $|"
- "returning from function with different return type}0,1"
- "|%diff{converting $ to type $|converting between types}0,1"
- "|%diff{initializing $ with an expression of type $|"
- "initializing with expression of different type}0,1"
- "|%diff{sending $ to parameter of type $|"
- "sending to parameter of different type}0,1"
- "|%diff{casting $ to type $|casting between types}0,1}2"
- "%select{|; dereference with *|"
- "; take the address with &|"
- "; remove *|"
- "; remove &}3">,
- InGroup<IntConversion>, SFINAEFailure;
+def ext_typecheck_convert_pointer_int : ExtWarn<
+ err_typecheck_convert_pointer_int.Summary>,
+ InGroup<IntConversion>, DefaultError;
def err_typecheck_convert_int_pointer : Error<
"incompatible integer to pointer conversion "
"%select{%diff{assigning to $ from $|assigning to different types}0,1"
@@ -7940,6 +8383,9 @@ def err_typecheck_convert_int_pointer : Error<
"; take the address with &|"
"; remove *|"
"; remove &}3">;
+def ext_typecheck_convert_int_pointer : ExtWarn<
+ err_typecheck_convert_int_pointer.Summary>,
+ InGroup<IntConversion>, DefaultError;
def ext_typecheck_convert_pointer_void_func : Extension<
"%select{%diff{assigning to $ from $|assigning to different types}0,1"
"|%diff{passing $ to parameter of type $|"
@@ -7982,7 +8428,7 @@ def ext_typecheck_convert_incompatible_pointer_sign : ExtWarn<
"where one is of the unique plain 'char' type and the other is not}3">,
InGroup<DiagGroup<"pointer-sign">>;
def err_typecheck_convert_incompatible_pointer_sign :
- Error<ext_typecheck_convert_incompatible_pointer_sign.Text>;
+ Error<ext_typecheck_convert_incompatible_pointer_sign.Summary>;
def ext_typecheck_convert_incompatible_pointer : ExtWarn<
"incompatible pointer types "
"%select{%diff{assigning to $ from $|assigning to different types}0,1"
@@ -8018,24 +8464,6 @@ def err_typecheck_convert_incompatible_pointer : Error<
"; take the address with &|"
"; remove *|"
"; remove &}3">;
-def ext_typecheck_convert_incompatible_function_pointer : ExtWarn<
- "incompatible function pointer types "
- "%select{%diff{assigning to $ from $|assigning to different types}0,1"
- "|%diff{passing $ to parameter of type $|"
- "passing to parameter of different type}0,1"
- "|%diff{returning $ from a function with result type $|"
- "returning from function with different return type}0,1"
- "|%diff{converting $ to type $|converting between types}0,1"
- "|%diff{initializing $ with an expression of type $|"
- "initializing with expression of different type}0,1"
- "|%diff{sending $ to parameter of type $|"
- "sending to parameter of different type}0,1"
- "|%diff{casting $ to type $|casting between types}0,1}2"
- "%select{|; dereference with *|"
- "; take the address with &|"
- "; remove *|"
- "; remove &}3">,
- InGroup<IncompatibleFunctionPointerTypes>;
def err_typecheck_convert_incompatible_function_pointer : Error<
"incompatible function pointer types "
"%select{%diff{assigning to $ from $|assigning to different types}0,1"
@@ -8053,6 +8481,12 @@ def err_typecheck_convert_incompatible_function_pointer : Error<
"; take the address with &|"
"; remove *|"
"; remove &}3">;
+def ext_typecheck_convert_incompatible_function_pointer : ExtWarn<
+ err_typecheck_convert_incompatible_function_pointer.Summary>,
+ InGroup<IncompatibleFunctionPointerTypes>, DefaultError;
+def warn_typecheck_convert_incompatible_function_pointer_strict : Warning<
+ err_typecheck_convert_incompatible_function_pointer.Summary>,
+ InGroup<DiagGroup<"incompatible-function-pointer-types-strict">>, DefaultIgnore;
def ext_typecheck_convert_discards_qualifiers : ExtWarn<
"%select{%diff{assigning to $ from $|assigning to different types}0,1"
"|%diff{passing $ to parameter of type $|"
@@ -8228,53 +8662,65 @@ def err_call_function_incomplete_return : Error<
def err_call_incomplete_argument : Error<
"argument type %0 is incomplete">;
def err_typecheck_call_too_few_args : Error<
- "too few %select{|||execution configuration }0arguments to "
+ "too few %select{|||execution configuration }0"
+ "%select{|non-object }3arguments to "
"%select{function|block|method|kernel function}0 call, "
"expected %1, have %2">;
def err_typecheck_call_too_few_args_one : Error<
- "too few %select{|||execution configuration }0arguments to "
+ "too few %select{|||execution configuration }0"
+ "%select{|non-object }2arguments to "
"%select{function|block|method|kernel function}0 call, "
"single argument %1 was not specified">;
def err_typecheck_call_too_few_args_at_least : Error<
- "too few %select{|||execution configuration }0arguments to "
+ "too few %select{|||execution configuration }0"
+ "%select{|non-object }3arguments to "
"%select{function|block|method|kernel function}0 call, "
"expected at least %1, have %2">;
def err_typecheck_call_too_few_args_at_least_one : Error<
- "too few %select{|||execution configuration }0arguments to "
+ "too few %select{|||execution configuration }0"
+ "%select{|non-object }2arguments to "
"%select{function|block|method|kernel function}0 call, "
"at least argument %1 must be specified">;
def err_typecheck_call_too_few_args_suggest : Error<
- "too few %select{|||execution configuration }0arguments to "
+ "too few %select{|||execution configuration }0"
+ "%select{|non-object }3arguments to "
"%select{function|block|method|kernel function}0 call, "
- "expected %1, have %2; did you mean %3?">;
+ "expected %1, have %2; did you mean %4?">;
def err_typecheck_call_too_few_args_at_least_suggest : Error<
- "too few %select{|||execution configuration }0arguments to "
+ "too few %select{|||execution configuration }0"
+ "%select{|non-object }3arguments to "
"%select{function|block|method|kernel function}0 call, "
- "expected at least %1, have %2; did you mean %3?">;
+ "expected at least %1, have %2; did you mean %4?">;
def err_typecheck_call_too_many_args : Error<
- "too many %select{|||execution configuration }0arguments to "
+ "too many %select{|||execution configuration }0"
+ "%select{|non-object }3arguments to "
"%select{function|block|method|kernel function}0 call, "
"expected %1, have %2">;
def err_typecheck_call_too_many_args_one : Error<
- "too many %select{|||execution configuration }0arguments to "
+ "too many %select{|||execution configuration }0"
+ "%select{|non-object }3arguments to "
"%select{function|block|method|kernel function}0 call, "
"expected single argument %1, have %2 arguments">;
def err_typecheck_call_too_many_args_at_most : Error<
- "too many %select{|||execution configuration }0arguments to "
+ "too many %select{|||execution configuration }0"
+ "%select{|non-object }3arguments to "
"%select{function|block|method|kernel function}0 call, "
"expected at most %1, have %2">;
def err_typecheck_call_too_many_args_at_most_one : Error<
"too many %select{|||execution configuration }0arguments to "
"%select{function|block|method|kernel function}0 call, "
- "expected at most single argument %1, have %2 arguments">;
+ "expected at most single %select{|non-object }3argument %1, "
+ "have %2%select{|non-object}3 arguments">;
def err_typecheck_call_too_many_args_suggest : Error<
- "too many %select{|||execution configuration }0arguments to "
+ "too many %select{|||execution configuration }0"
+ "%select{|non-object }3arguments to "
"%select{function|block|method|kernel function}0 call, "
- "expected %1, have %2; did you mean %3?">;
+ "expected %1, have %2; did you mean %4?">;
def err_typecheck_call_too_many_args_at_most_suggest : Error<
- "too many %select{|||execution configuration }0arguments to "
+ "too many %select{|||execution configuration }0"
+ "%select{|non-object }3arguments to "
"%select{function|block|method|kernel function}0 call, "
- "expected at most %1, have %2; did you mean %3?">;
+ "expected at most %1, have %2; did you mean %4?">;
def err_arc_typecheck_convert_incompatible_pointer : Error<
"incompatible pointer types passing retainable parameter of type %0"
@@ -8302,8 +8748,8 @@ def err_atomic_exclusive_builtin_pointer_size : Error<
" 1,2,4 or 8 byte type (%0 invalid)">;
def err_atomic_builtin_ext_int_size : Error<
"Atomic memory operand must have a power-of-two size">;
-def err_atomic_builtin_ext_int_prohibit : Error<
- "argument to atomic builtin of type '_ExtInt' is not supported">;
+def err_atomic_builtin_bit_int_prohibit : Error<
+ "argument to atomic builtin of type '_BitInt' is not supported">;
def err_atomic_op_needs_atomic : Error<
"address argument to atomic operation must be a pointer to _Atomic "
"type (%0 invalid)">;
@@ -8322,11 +8768,14 @@ def err_atomic_op_needs_atomic_int_ptr_or_fp : Error<
def err_atomic_op_needs_atomic_int_or_ptr : Error<
"address argument to atomic operation must be a pointer to %select{|atomic }0"
"integer or pointer (%1 invalid)">;
+def err_atomic_op_needs_atomic_int_or_fp : Error<
+ "address argument to atomic operation must be a pointer to %select{|atomic }0"
+ "integer or supported floating point type (%1 invalid)">;
def err_atomic_op_needs_atomic_int : Error<
"address argument to atomic operation must be a pointer to "
"%select{|atomic }0integer (%1 invalid)">;
def warn_atomic_op_has_invalid_memory_order : Warning<
- "memory order argument to atomic operation is invalid">,
+ "%select{|success |failure }0memory order argument to atomic operation is invalid">,
InGroup<DiagGroup<"atomic-memory-ordering">>;
def err_atomic_op_has_invalid_synch_scope : Error<
"synchronization scope argument to atomic operation is invalid">;
@@ -8335,13 +8784,22 @@ def warn_atomic_implicit_seq_cst : Warning<
InGroup<DiagGroup<"atomic-implicit-seq-cst">>, DefaultIgnore;
def err_overflow_builtin_must_be_int : Error<
- "operand argument to overflow builtin must be an integer (%0 invalid)">;
+ "operand argument to %select{overflow builtin|checked integer operation}0 "
+ "must be an integer type %select{|other than plain 'char', 'bool', bit-precise, "
+ "or an enumeration }0(%1 invalid)">;
def err_overflow_builtin_must_be_ptr_int : Error<
- "result argument to overflow builtin must be a pointer "
- "to a non-const integer (%0 invalid)">;
-def err_overflow_builtin_ext_int_max_size : Error<
- "__builtin_mul_overflow does not support signed _ExtInt operands of more "
+ "result argument to %select{overflow builtin|checked integer operation}0 "
+ "must be a pointer to a non-const integer type %select{|other than plain 'char', "
+ "'bool', bit-precise, or an enumeration }0(%1 invalid)">;
+def err_overflow_builtin_bit_int_max_size : Error<
+ "__builtin_mul_overflow does not support 'signed _BitInt' operands of more "
"than %0 bits">;
+def err_expected_struct_pointer_argument : Error<
+ "expected pointer to struct as %ordinal0 argument to %1, found %2">;
+def err_expected_callable_argument : Error<
+ "expected a callable expression as %ordinal0 argument to %1, found %2">;
+def note_building_builtin_dump_struct_call : Note<
+ "in call to printing function with arguments '(%0)' while dumping struct">;
def err_atomic_load_store_uses_lib : Error<
"atomic %select{load|store}0 requires runtime support that is not "
@@ -8380,8 +8838,10 @@ def err_ref_bad_target_global_initializer : Error<
"function %1 in global initializer">;
def err_capture_bad_target : Error<
"capture host variable %0 by reference in device or host device lambda function">;
-def err_capture_bad_target_this_ptr : Error<
- "capture host side class data member by this pointer in device or host device lambda function">;
+def warn_maybe_capture_bad_target_this_ptr : Warning<
+ "capture host side class data member by this pointer in device or host device lambda function "
+ "may result in invalid memory access if this pointer is not accessible on device side">,
+ InGroup<DiagGroup<"gpu-maybe-wrong-side">>;
def warn_kern_is_method : Extension<
"kernel function %0 is a member function; this may not be accepted by nvcc">,
InGroup<CudaCompat>;
@@ -8391,8 +8851,8 @@ def warn_kern_is_inline : Warning<
def err_variadic_device_fn : Error<
"CUDA device code does not support variadic functions">;
def err_va_arg_in_device : Error<
- "CUDA device code does not support va_arg">;
-def err_alias_not_supported_on_nvptx : Error<"CUDA does not support aliases">;
+"CUDA device code does not support va_arg">;
+def err_alias_not_supported_on_nvptx : Error<"CUDA older than 10.0 does not support .alias">;
def err_cuda_unattributed_constexpr_cannot_overload_device : Error<
"constexpr function %0 without __host__ or __device__ attributes cannot "
"overload __device__ function with same signature. Add a __host__ "
@@ -8441,6 +8901,10 @@ def note_cuda_device_builtin_surftex_should_be_template_class : Note<
def err_hip_invalid_args_builtin_mangled_name : Error<
"invalid argument: symbol must be a device-side function or global variable">;
+def warn_hip_omp_target_directives : Warning<
+ "HIP does not support OpenMP target directives; directive has been ignored">,
+ InGroup<HIPOpenMPOffloading>, DefaultError;
+
def warn_non_pod_vararg_with_format_string : Warning<
"cannot pass %select{non-POD|non-trivial}0 object of type %1 to variadic "
"%select{function|block|method|constructor}2; expected type from format "
@@ -8527,10 +8991,13 @@ def warn_bad_function_cast : Warning<
def warn_cast_function_type : Warning<
"cast %diff{from $ to $ |}0,1converts to incompatible function type">,
InGroup<CastFunctionType>, DefaultIgnore;
+def warn_cast_function_type_strict : Warning<warn_cast_function_type.Summary>,
+ InGroup<CastFunctionTypeStrict>, DefaultIgnore;
def err_cast_pointer_to_non_pointer_int : Error<
"pointer cannot be cast to type %0">;
-def err_cast_to_bfloat16 : Error<"cannot type-cast to __bf16">;
-def err_cast_from_bfloat16 : Error<"cannot type-cast from __bf16">;
+def err_nullptr_cast : Error<
+ "cannot cast an object of type %select{'nullptr_t' to %1|%1 to 'nullptr_t'}0"
+>;
def err_typecheck_expect_scalar_operand : Error<
"operand of type %0 where arithmetic or pointer type is required">;
def err_typecheck_cond_incompatible_operands : Error<
@@ -8540,6 +9007,10 @@ def err_typecheck_expect_flt_or_vector : Error<
"a vector of such types is required">;
def err_cast_selector_expr : Error<
"cannot type cast @selector expression">;
+def err_make_signed_integral_only : Error<
+ "'%select{make_unsigned|make_signed}0' is only compatible with "
+ "non-%select{bool|_BitInt(1)}1 integers and enum types, but was given "
+ "%2%select{| whose underlying type is %4}3">;
def ext_typecheck_cond_incompatible_pointers : ExtWarn<
"pointer type mismatch%diff{ ($ and $)|}0,1">,
InGroup<DiagGroup<"pointer-type-mismatch">>;
@@ -8551,6 +9022,9 @@ def err_typecheck_choose_expr_requires_constant : Error<
"'__builtin_choose_expr' requires a constant expression">;
def warn_unused_expr : Warning<"expression result unused">,
InGroup<UnusedValue>;
+def warn_unused_comma_left_operand : Warning<
+ "left operand of comma operator has no effect">,
+ InGroup<UnusedValue>;
def warn_unused_voidptr : Warning<
"expression result unused; should this cast be to 'void'?">,
InGroup<UnusedValue>;
@@ -8581,16 +9055,20 @@ def warn_unused_result : Warning<
def warn_unused_result_msg : Warning<
"ignoring return value of function declared with %0 attribute: %1">,
InGroup<UnusedResult>;
+def warn_unused_result_typedef_unsupported_spelling : Warning<
+ "'[[%select{nodiscard|gnu::warn_unused_result}0]]' attribute ignored when "
+ "applied to a typedef; consider using '__attribute__((warn_unused_result))' "
+ "or '[[clang::warn_unused_result]]' instead">, InGroup<IgnoredAttributes>;
def warn_unused_volatile : Warning<
"expression result unused; assign into a variable to force a volatile load">,
InGroup<DiagGroup<"unused-volatile-lvalue">>;
def ext_cxx14_attr : Extension<
- "use of the %0 attribute is a C++14 extension">, InGroup<CXX14>;
+ "use of the %0 attribute is a C++14 extension">, InGroup<CXX14Attrs>;
def ext_cxx17_attr : Extension<
- "use of the %0 attribute is a C++17 extension">, InGroup<CXX17>;
+ "use of the %0 attribute is a C++17 extension">, InGroup<CXX17Attrs>;
def ext_cxx20_attr : Extension<
- "use of the %0 attribute is a C++20 extension">, InGroup<CXX20>;
+ "use of the %0 attribute is a C++20 extension">, InGroup<CXX20Attrs>;
def warn_unused_comparison : Warning<
"%select{equality|inequality|relational|three-way}0 comparison result unused">,
@@ -8654,6 +9132,9 @@ def warn_redefine_extname_not_applied : Warning<
// inline asm.
let CategoryName = "Inline Assembly Issue" in {
+ def err_asm_pmf_through_constraint_not_permitted
+ : Error<"cannot pass a pointer-to-member through register-constrained "
+ "inline assembly parameter">;
def err_asm_invalid_lvalue_in_output : Error<"invalid lvalue in asm output">;
def err_asm_invalid_output_constraint : Error<
"invalid output constraint '%0' in asm">;
@@ -8688,6 +9169,8 @@ let CategoryName = "Inline Assembly Issue" in {
" in asm %select{input|output}1 with a memory constraint '%2'">;
def err_asm_input_duplicate_match : Error<
"more than one input constraint matches the same output '%0'">;
+ def err_store_value_to_reg : Error<
+ "impossible constraint in asm: can't store value into a register">;
def warn_asm_label_on_auto_decl : Warning<
"ignored asm label '%0' on automatic variable">;
@@ -8857,6 +9340,14 @@ def ext_ms_anonymous_record : ExtWarn<
def err_reference_to_local_in_enclosing_context : Error<
"reference to local %select{variable|binding}1 %0 declared in enclosing "
"%select{%3|block literal|lambda expression|context}2">;
+def err_capture_binding_openmp : Error<
+ "capturing a structured binding is not yet supported in OpenMP">;
+def ext_capture_binding : ExtWarn<
+ "captured structured bindings are a C++20 extension">, InGroup<CXX20>;
+def warn_cxx17_compat_capture_binding : Warning<
+ "captured structured bindings are incompatible with "
+ "C++ standards before C++20">,
+ InGroup<CXXPre20Compat>, DefaultIgnore;
def err_static_data_member_not_allowed_in_local_class : Error<
"static data member %0 not allowed in local %sub{select_tag_type_kind}2 %1">;
@@ -8910,10 +9401,22 @@ def err_operator_overload_needs_class_or_enum : Error<
"or enumeration type">;
def err_operator_overload_variadic : Error<"overloaded %0 cannot be variadic">;
+def warn_cxx20_compat_operator_overload_static : Warning<
+ "declaring overloaded %0 as 'static' is incompatible with C++ standards "
+ "before C++23">, InGroup<CXXPre23Compat>, DefaultIgnore;
+def ext_operator_overload_static : ExtWarn<
+ "declaring overloaded %0 as 'static' is a C++23 extension">, InGroup<CXX23>;
def err_operator_overload_static : Error<
"overloaded %0 cannot be a static member function">;
def err_operator_overload_default_arg : Error<
"parameter of overloaded %0 cannot have a default argument">;
+
+def ext_subscript_overload : Warning<
+ "overloaded %0 with %select{no|a defaulted|more than one}1 parameter is a "
+ "C++23 extension">, InGroup<CXXPre23Compat>, DefaultIgnore;
+def error_subscript_overload : Error<
+ "overloaded %0 cannot have %select{no|a defaulted|more than one}1 parameter before C++23">;
+
def err_operator_overload_must_be : Error<
"overloaded %0 must be a %select{unary|binary|unary or binary}2 operator "
"(has %1 parameter%s1)">;
@@ -8989,8 +9492,8 @@ def ext_string_literal_operator_template : ExtWarn<
"string literal operator templates are a GNU extension">,
InGroup<GNUStringLiteralOperatorTemplate>;
def warn_user_literal_reserved : Warning<
- "user-defined literal suffixes not starting with '_' are reserved"
- "%select{; no literal will invoke this operator|}0">,
+ "user-defined literal suffixes %select{<ERROR>|not starting with '_'|containing '__'}0 are reserved"
+ "%select{; no literal will invoke this operator|}1">,
InGroup<UserDefinedLiterals>;
// C++ conversion functions
@@ -9038,10 +9541,10 @@ def warn_cxx98_compat_explicit_conversion_functions : Warning<
// C++11 defaulted functions
def err_defaulted_special_member_params : Error<
- "an explicitly-defaulted %select{|copy |move }0constructor cannot "
+ "an explicitly-defaulted %sub{select_special_member_kind}0 cannot "
"have default arguments">;
def err_defaulted_special_member_variadic : Error<
- "an explicitly-defaulted %select{|copy |move }0constructor cannot "
+ "an explicitly-defaulted %sub{select_special_member_kind}0 cannot "
"be variadic">;
def err_defaulted_special_member_return_type : Error<
"explicitly-defaulted %select{copy|move}0 assignment operator must "
@@ -9049,6 +9552,9 @@ def err_defaulted_special_member_return_type : Error<
def err_defaulted_special_member_quals : Error<
"an explicitly-defaulted %select{copy|move}0 assignment operator may not "
"have 'const'%select{, 'constexpr'|}1 or 'volatile' qualifiers">;
+def err_defaulted_special_member_explicit_object_mismatch : Error<
+ "the type of the explicit object parameter of an explicitly-defaulted "
+ "%select{copy|move}0 assignment operator should match the type of the class %1">;
def err_defaulted_special_member_volatile_param : Error<
"the parameter for an explicitly-defaulted %sub{select_special_member_kind}0 "
"may not be volatile">;
@@ -9065,12 +9571,16 @@ def err_defaulted_copy_assign_not_ref : Error<
def err_incorrect_defaulted_constexpr : Error<
"defaulted definition of %sub{select_special_member_kind}0 "
"is not constexpr">;
+def err_incorrect_defaulted_constexpr_with_vb: Error<
+ "%sub{select_special_member_kind}0 cannot be 'constexpr' in a class with virtual base class">;
def err_incorrect_defaulted_consteval : Error<
"defaulted declaration of %sub{select_special_member_kind}0 "
"cannot be consteval because implicit definition is not constexpr">;
def warn_defaulted_method_deleted : Warning<
"explicitly defaulted %sub{select_special_member_kind}0 is implicitly "
"deleted">, InGroup<DefaultedFunctionDeleted>;
+def note_replace_equals_default_to_delete : Note<
+ "replace 'default' with 'delete'">;
def err_out_of_line_default_deletes : Error<
"defaulting this %sub{select_special_member_kind}0 "
"would delete it after its first declaration">;
@@ -9100,15 +9610,22 @@ def warn_cxx17_compat_defaulted_comparison : Warning<
"before C++20">, InGroup<CXXPre20Compat>, DefaultIgnore;
def err_defaulted_comparison_template : Error<
"comparison operator template cannot be defaulted">;
-def err_defaulted_comparison_out_of_class : Error<
- "%sub{select_defaulted_comparison_kind}0 can only be defaulted in a class "
- "definition">;
+def err_defaulted_comparison_num_args : Error<
+ "%select{non-member|member}0 %sub{select_defaulted_comparison_kind}1"
+ " must have %select{2|1}0 parameters">;
def err_defaulted_comparison_param : Error<
"invalid parameter type for defaulted %sub{select_defaulted_comparison_kind}0"
"; found %1, expected %2%select{| or %4}3">;
+def err_defaulted_comparison_param_unknown : Error<
+ "invalid parameter type for non-member defaulted"
+ " %sub{select_defaulted_comparison_kind}0"
+ "; found %1, expected class or reference to a constant class">;
def err_defaulted_comparison_param_mismatch : Error<
"parameters for defaulted %sub{select_defaulted_comparison_kind}0 "
"must have the same type%diff{ (found $ vs $)|}1,2">;
+def err_defaulted_comparison_not_friend : Error<
+ "%sub{select_defaulted_comparison_kind}0 is not a friend of"
+ " %select{|incomplete class }1%2">;
def err_defaulted_comparison_non_const : Error<
"defaulted member %sub{select_defaulted_comparison_kind}0 must be "
"const-qualified">;
@@ -9125,6 +9642,9 @@ def err_non_first_default_compare_deletes : Error<
"defaulting %select{this %sub{select_defaulted_comparison_kind}1|"
"the corresponding implicit 'operator==' for this defaulted 'operator<=>'}0 "
"would delete it after its first declaration">;
+def err_non_first_default_compare_in_class : Error<
+ "defaulting this %sub{select_defaulted_comparison_kind}0 "
+ "is not allowed because it was already declared outside the class">;
def note_defaulted_comparison_union : Note<
"defaulted %0 is implicitly deleted because "
"%2 is a %select{union-like class|union}1 with variant members">;
@@ -9142,14 +9662,18 @@ def note_defaulted_comparison_calls_deleted : Note<
"defaulted %0 is implicitly deleted because it would invoke a deleted "
"comparison function%select{| for member %2| for base class %2}1">;
def note_defaulted_comparison_no_viable_function : Note<
- "defaulted %0 is implicitly deleted because there is no viable three-way "
- "comparison function for%select{| member| base class}1 %2">;
+ "defaulted %0 is implicitly deleted because there is no viable "
+ "%select{three-way comparison function|'operator=='}1 for "
+ "%select{|member |base class }2%3">;
def note_defaulted_comparison_no_viable_function_synthesized : Note<
"three-way comparison cannot be synthesized because there is no viable "
"function for %select{'=='|'<'}0 comparison">;
def note_defaulted_comparison_not_rewritten_callee : Note<
"defaulted %0 is implicitly deleted because this non-rewritten comparison "
"function would be the best match for the comparison">;
+def note_defaulted_comparison_not_rewritten_conversion : Note<
+ "defaulted %0 is implicitly deleted because a builtin comparison function "
+ "using this conversion would be the best match for the comparison">;
def note_defaulted_comparison_cannot_deduce : Note<
"return type of defaulted 'operator<=>' cannot be deduced because "
"return type %2 of three-way comparison for %select{|member|base class}0 %1 "
@@ -9162,12 +9686,21 @@ def note_defaulted_comparison_cannot_deduce_undeduced_auto : Note<
"%select{|member|base class}0 %1 declared here">;
def note_defaulted_comparison_cannot_deduce_callee : Note<
"selected 'operator<=>' for %select{|member|base class}0 %1 declared here">;
-def err_incorrect_defaulted_comparison_constexpr : Error<
+def ext_defaulted_comparison_constexpr_mismatch : Extension<
"defaulted definition of %select{%sub{select_defaulted_comparison_kind}1|"
- "three-way comparison operator}0 "
- "cannot be declared %select{constexpr|consteval}2 because "
- "%select{it|the corresponding implicit 'operator=='}0 "
- "invokes a non-constexpr comparison function">;
+ "three-way comparison operator}0 that is "
+ "declared %select{constexpr|consteval}2 but"
+ "%select{|for which the corresponding implicit 'operator==' }0 "
+ "invokes a non-constexpr comparison function is a C++23 extension">,
+ InGroup<DiagGroup<"c++23-default-comp-relaxed-constexpr">>;
+def warn_cxx23_compat_defaulted_comparison_constexpr_mismatch : Warning<
+ "defaulted definition of %select{%sub{select_defaulted_comparison_kind}1|"
+ "three-way comparison operator}0 that is "
+ "declared %select{constexpr|consteval}2 but"
+ "%select{|for which the corresponding implicit 'operator==' }0 "
+ "invokes a non-constexpr comparison function is incompatible with C++ "
+ "standards before C++23">,
+ InGroup<CXXPre23Compat>, DefaultIgnore;
def note_defaulted_comparison_not_constexpr : Note<
"non-constexpr comparison function would be used to compare "
"%select{|member %1|base class %1}0">;
@@ -9176,6 +9709,10 @@ def note_defaulted_comparison_not_constexpr_here : Note<
def note_in_declaration_of_implicit_equality_comparison : Note<
"while declaring the corresponding implicit 'operator==' "
"for this defaulted 'operator<=>'">;
+def err_volatile_comparison_operator : Error<
+ "defaulted comparison function must not be volatile">;
+def err_ref_qualifier_comparison_operator : Error<
+ "ref-qualifier '&&' is not allowed on a defaulted comparison operator">;
def ext_implicit_exception_spec_mismatch : ExtWarn<
"function previously declared with an %select{explicit|implicit}0 exception "
@@ -9186,15 +9723,14 @@ def warn_ptr_arith_precedes_bounds : Warning<
"the pointer decremented by %0 refers before the beginning of the array">,
InGroup<ArrayBoundsPointerArithmetic>, DefaultIgnore;
def warn_ptr_arith_exceeds_bounds : Warning<
- "the pointer incremented by %0 refers past the end of the array (that "
- "contains %1 element%s2)">,
+ "the pointer incremented by %0 refers past the end of the array (that has type %1)">,
InGroup<ArrayBoundsPointerArithmetic>, DefaultIgnore;
def warn_array_index_precedes_bounds : Warning<
"array index %0 is before the beginning of the array">,
InGroup<ArrayBounds>;
def warn_array_index_exceeds_bounds : Warning<
- "array index %0 is past the end of the array (which contains %1 "
- "element%s2)">, InGroup<ArrayBounds>;
+ "array index %0 is past the end of the array (that has type %1%select{|, cast to %3}2)">,
+ InGroup<ArrayBounds>;
def warn_ptr_arith_exceeds_max_addressable_bounds : Warning<
"the pointer incremented by %0 refers past the last possible element for an array in %1-bit "
"address space containing %2-bit (%3-byte) elements (max possible %4 element%s5)">,
@@ -9206,6 +9742,12 @@ def warn_array_index_exceeds_max_addressable_bounds : Warning<
def note_array_declared_here : Note<
"array %0 declared here">;
+def warn_inconsistent_array_form : Warning<
+ "argument %0 of type %1 with mismatched bound">,
+ InGroup<ArrayParameter>, DefaultIgnore;
+def note_previous_declaration_as : Note<
+ "previously declared as %0 here">;
+
def warn_printf_insufficient_data_args : Warning<
"more '%%' conversions than data arguments">, InGroup<FormatInsufficientArgs>;
def warn_printf_data_arg_not_used : Warning<
@@ -9224,17 +9766,17 @@ def warn_format_conversion_argument_type_mismatch : Warning<
"%select{type|underlying type}2 %1">,
InGroup<Format>;
def warn_format_conversion_argument_type_mismatch_pedantic : Extension<
- warn_format_conversion_argument_type_mismatch.Text>,
+ warn_format_conversion_argument_type_mismatch.Summary>,
InGroup<FormatPedantic>;
def warn_format_conversion_argument_type_mismatch_confusion : Warning<
- warn_format_conversion_argument_type_mismatch.Text>,
+ warn_format_conversion_argument_type_mismatch.Summary>,
InGroup<FormatTypeConfusion>, DefaultIgnore;
def warn_format_argument_needs_cast : Warning<
"%select{values of type|enum values with underlying type}2 '%0' should not "
"be used as format arguments; add an explicit cast to %1 instead">,
InGroup<Format>;
def warn_format_argument_needs_cast_pedantic : Warning<
- warn_format_argument_needs_cast.Text>,
+ warn_format_argument_needs_cast.Summary>,
InGroup<FormatPedantic>, DefaultIgnore;
def warn_printf_positional_arg_exceeds_data_args : Warning <
"data argument position '%0' exceeds the number of data arguments (%1)">,
@@ -9305,6 +9847,9 @@ def warn_printf_ObjCflags_without_ObjCConversion: Warning<
def warn_printf_invalid_objc_flag: Warning<
"'%0' is not a valid object format flag">,
InGroup<Format>;
+def warn_printf_narg_not_supported : Warning<
+ "'%%n' specifier not supported on this platform">,
+ InGroup<Format>;
def warn_scanf_scanlist_incomplete : Warning<
"no closing ']' for '%%[' in scanf format string">,
InGroup<Format>;
@@ -9351,7 +9896,7 @@ def note_lambda_capture_initializer : Note<
"%select{implicitly |}2captured%select{| by reference}3"
"%select{%select{ due to use|}2 here|"
" via initialization of lambda capture %0}1">;
-def note_init_with_default_member_initalizer : Note<
+def note_init_with_default_member_initializer : Note<
"initializing field %0 with default member initializer">;
// Check for initializing a member variable with the address or a reference to
@@ -9410,10 +9955,10 @@ def warn_new_dangling_initializer_list : Warning<
"will be destroyed at the end of the full-expression">,
InGroup<DanglingInitializerList>;
def warn_unsupported_lifetime_extension : Warning<
- "sorry, lifetime extension of "
+ "lifetime extension of "
"%select{temporary|backing array of initializer list}0 created "
- "by aggregate initialization using default member initializer "
- "is not supported; lifetime of %select{temporary|backing array}0 "
+ "by aggregate initialization using a default member initializer "
+ "is not yet supported; lifetime of %select{temporary|backing array}0 "
"will end at the end of the full-expression">, InGroup<Dangling>;
// For non-floating point, expressions of the form x == x or x != x
@@ -9429,6 +9974,12 @@ def warn_comparison_bitwise_always : Warning<
def warn_comparison_bitwise_or : Warning<
"bitwise or with non-zero value always evaluates to true">,
InGroup<TautologicalBitwiseCompare>, DefaultIgnore;
+def warn_tautological_negation_and_compare: Warning<
+ "'&&' of a value and its negation always evaluates to false">,
+ InGroup<TautologicalNegationCompare>, DefaultIgnore;
+def warn_tautological_negation_or_compare: Warning<
+ "'||' of a value and its negation always evaluates to true">,
+ InGroup<TautologicalNegationCompare>, DefaultIgnore;
def warn_tautological_overlap_comparison : Warning<
"overlapping comparisons always evaluate to %select{false|true}0">,
InGroup<TautologicalOverlapCompare>, DefaultIgnore;
@@ -9484,7 +10035,8 @@ def err_generic_sel_multi_match : Error<
// Blocks
def err_blocks_disable : Error<"blocks support disabled - compile with -fblocks"
- " or %select{pick a deployment target that supports them|for OpenCL 2.0}0">;
+ " or %select{pick a deployment target that supports them|for OpenCL C 2.0"
+ " or OpenCL C 3.0 with __opencl_c_device_enqueue feature}0">;
def err_block_returning_array_function : Error<
"block cannot return %select{array|function}0 type %1">;
@@ -9521,6 +10073,11 @@ def err_break_not_in_loop_or_switch : Error<
def warn_loop_ctrl_binds_to_inner : Warning<
"'%0' is bound to current loop, GCC binds it to the enclosing loop">,
InGroup<GccCompat>;
+def err_omp_bind_required_on_loop : Error<
+ "expected 'bind' clause for 'loop' construct without an enclosing OpenMP "
+ "construct">;
+def err_omp_loop_reduction_clause : Error<
+ "'reduction' clause not allowed with '#pragma omp loop bind(teams)'">;
def warn_break_binds_to_switch : Warning<
"'break' is bound to loop, GCC binds it to switch">,
InGroup<GccCompat>;
@@ -9538,6 +10095,11 @@ def err_duplicate_case_differing_expr : Error<
def warn_case_empty_range : Warning<"empty case range specified">;
def warn_missing_case_for_condition :
Warning<"no case matching constant switch condition '%0'">;
+def err_loop_attr_conflict : Error<
+ "conflicting loop attribute %0">;
+def err_attribute_power_of_two_in_range : Error<
+ "%0 attribute requires an integer argument which is a constant power of two "
+ "between %1 and %2 inclusive; provided argument was %3">;
def warn_def_missing_case : Warning<"%plural{"
"1:enumeration value %1 not explicitly handled in switch|"
@@ -9552,6 +10114,8 @@ def warn_missing_case : Warning<"%plural{"
"3:enumeration values %1, %2, and %3 not handled in switch|"
":%0 enumeration values not handled in switch: %1, %2, %3...}0">,
InGroup<Switch>;
+def warn_switch_default : Warning<"'switch' missing 'default' label">,
+ InGroup<SwitchDefault>, DefaultIgnore;
def warn_unannotated_fallthrough : Warning<
"unannotated fall-through between switch labels">,
@@ -9570,9 +10134,6 @@ def err_fallthrough_attr_outside_switch : Error<
"fallthrough annotation is outside switch statement">;
def err_fallthrough_attr_invalid_placement : Error<
"fallthrough annotation does not directly precede switch label">;
-def warn_fallthrough_attr_unreachable : Warning<
- "fallthrough annotation in unreachable code">,
- InGroup<ImplicitFallthrough>, DefaultIgnore;
def warn_unreachable_default : Warning<
"default label in switch which covers all enumeration values">,
@@ -9623,6 +10184,9 @@ def err_ms_va_start_used_in_sysv_function : Error<
def warn_second_arg_of_va_start_not_last_named_param : Warning<
"second argument to 'va_start' is not the last named parameter">,
InGroup<Varargs>;
+def warn_c17_compat_ellipsis_only_parameter : Warning<
+ "'...' as the only parameter of a function is incompatible with C standards "
+ "before C23">, DefaultIgnore, InGroup<CPre23Compat>;
def warn_va_start_type_is_undefined : Warning<
"passing %select{an object that undergoes default argument promotion|"
"an object of reference type|a parameter declared with the 'register' "
@@ -9668,10 +10232,6 @@ def warn_falloff_noreturn_function : Warning<
InGroup<InvalidNoreturn>;
def err_noreturn_block_has_return_expr : Error<
"block declared 'noreturn' should not return">;
-def err_noreturn_missing_on_first_decl : Error<
- "function declared '[[noreturn]]' after its first declaration">;
-def note_noreturn_missing_first_decl : Note<
- "declaration missing '[[noreturn]]' attribute is here">;
def err_carries_dependency_missing_on_first_decl : Error<
"%select{function|parameter}0 declared '[[carries_dependency]]' "
"after its first declaration">;
@@ -9702,8 +10262,8 @@ def err_shufflevector_argument_too_large : Error<
def err_convertvector_non_vector : Error<
"first argument to __builtin_convertvector must be a vector">;
-def err_convertvector_non_vector_type : Error<
- "second argument to __builtin_convertvector must be a vector type">;
+def err_builtin_non_vector_type : Error<
+ "%0 argument to %1 must be of vector type">;
def err_convertvector_incompatible_vector : Error<
"first two arguments to __builtin_convertvector must have the same number of elements">;
@@ -9729,6 +10289,9 @@ def err_argument_invalid_range : Error<
def warn_argument_invalid_range : Warning<
"argument value %0 is outside the valid range [%1, %2]">, DefaultError,
InGroup<DiagGroup<"argument-outside-range">>;
+def warn_argument_undefined_behaviour : Warning<
+ "argument value %0 will result in undefined behaviour">,
+ InGroup<DiagGroup<"argument-undefined-behaviour">>;
def err_argument_not_multiple : Error<
"argument should be a multiple of %0">;
def err_argument_not_power_of_2 : Error<
@@ -9739,10 +10302,10 @@ def err_argument_not_shifted_byte_or_xxff : Error<
"argument should be an 8-bit value shifted by a multiple of 8 bits, or in the form 0x??FF">;
def err_argument_not_contiguous_bit_field : Error<
"argument %0 value should represent a contiguous bit field">;
-def err_rotation_argument_to_cadd
- : Error<"argument should be the value 90 or 270">;
-def err_rotation_argument_to_cmla
- : Error<"argument should be the value 0, 90, 180 or 270">;
+def err_rotation_argument_to_cadd : Error<
+ "argument should be the value 90 or 270">;
+def err_rotation_argument_to_cmla : Error<
+ "argument should be the value 0, 90, 180 or 270">;
def warn_neon_vector_initializer_non_portable : Warning<
"vector initializers are not compatible with NEON intrinsics in big endian "
"mode">, InGroup<DiagGroup<"nonportable-vector-initialization">>;
@@ -9767,10 +10330,12 @@ def err_mips_builtin_requires_dspr2 : Error<
"this builtin requires 'dsp r2' ASE, please use -mdspr2">;
def err_mips_builtin_requires_msa : Error<
"this builtin requires 'msa' ASE, please use -mmsa">;
-def err_ppc_builtin_only_on_arch : Error<
- "this builtin is only valid on POWER%0 or later CPUs">;
+def err_ppc_builtin_requires_abi : Error<
+ "this builtin requires ABI -mabi=%0">;
def err_ppc_invalid_use_mma_type : Error<
"invalid use of PPC MMA type">;
+def err_ppc_invalid_test_data_class_type : Error<
+ "expected a 'float', 'double' or '__float128' for the first argument">;
def err_x86_builtin_invalid_rounding : Error<
"invalid rounding argument">;
def err_x86_builtin_invalid_scale : Error<
@@ -9793,8 +10358,11 @@ def err_constant_integer_arg_type : Error<
"argument to %0 must be a constant integer">;
def ext_mixed_decls_code : Extension<
- "ISO C90 forbids mixing declarations and code">,
- InGroup<DiagGroup<"declaration-after-statement">>;
+ "mixing declarations and code is a C99 extension">,
+ InGroup<DeclarationAfterStatement>;
+def warn_mixed_decls_code : Warning<
+ "mixing declarations and code is incompatible with standards before C99">,
+ InGroup<DeclarationAfterStatement>, DefaultIgnore;
def err_non_local_variable_decl_in_for : Error<
"declaration of non-local variable in 'for' loop">;
@@ -9822,6 +10390,8 @@ def warn_duplicate_attribute_exact : Warning<
def warn_duplicate_attribute : Warning<
"attribute %0 is already applied with different arguments">,
InGroup<IgnoredAttributes>;
+def err_disallowed_duplicate_attribute : Error<
+ "attribute %0 cannot appear more than once on a declaration">;
def warn_sync_fetch_and_nand_semantics_change : Warning<
"the semantics of this intrinsic changed with GCC "
@@ -9835,15 +10405,13 @@ def warn_receiver_forward_class : Warning<
"receiver %0 is a forward class and corresponding @interface may not exist">,
InGroup<ForwardClassReceiver>;
def note_method_sent_forward_class : Note<"method %0 is used for the forward class">;
-def ext_missing_declspec : ExtWarn<
- "declaration specifier missing, defaulting to 'int'">;
def ext_missing_type_specifier : ExtWarn<
- "type specifier missing, defaults to 'int'">,
- InGroup<ImplicitInt>;
+ "type specifier missing, defaults to 'int'; ISO C99 and later do not support "
+ "implicit int">, InGroup<ImplicitInt>;
+def err_missing_type_specifier : Error<
+ "a type specifier is required for all declarations">;
def err_decimal_unsupported : Error<
"GNU decimal type extension not supported">;
-def err_missing_type_specifier : Error<
- "C++ requires a type specifier for all declarations">;
def err_objc_array_of_interfaces : Error<
"array of interface %0 is invalid (probably should be an array of pointers)">;
def ext_c99_array_usage : Extension<
@@ -9866,9 +10434,9 @@ def err_nserrordomain_wrong_type : Error<
"domain argument %0 does not point to an NSString or CFString constant">;
def warn_nsconsumed_attribute_mismatch : Warning<
- err_nsconsumed_attribute_mismatch.Text>, InGroup<NSConsumedMismatch>;
+ err_nsconsumed_attribute_mismatch.Summary>, InGroup<NSConsumedMismatch>;
def warn_nsreturns_retained_attribute_mismatch : Warning<
- err_nsreturns_retained_attribute_mismatch.Text>, InGroup<NSReturnsMismatch>;
+ err_nsreturns_retained_attribute_mismatch.Summary>, InGroup<NSReturnsMismatch>;
def note_getter_unavailable : Note<
"or because setter is declared here, but no getter method %0 is found">;
@@ -10036,8 +10604,6 @@ def err_opencl_scalar_type_rank_greater_than_vector_type : Error<
"element. (%0 and %1)">;
def err_bad_kernel_param_type : Error<
"%0 cannot be used as the type of a kernel parameter">;
-def err_opencl_implicit_function_decl : Error<
- "implicit declaration of function %0 is invalid in OpenCL">;
def err_record_with_pointers_kernel_param : Error<
"%select{struct|union}0 kernel parameters may not contain pointers">;
def note_within_field_of_type : Note<
@@ -10071,8 +10637,8 @@ def err_reference_pipe_type : Error <
def err_opencl_no_main : Error<"%select{function|kernel}0 cannot be called 'main'">;
def err_opencl_kernel_attr :
Error<"attribute %0 can only be applied to an OpenCL kernel function">;
-def err_opencl_return_value_with_address_space : Error<
- "return value cannot be qualified with address space">;
+def err_return_value_with_address_space : Error<
+ "return type cannot be qualified with address space">;
def err_opencl_constant_no_init : Error<
"variable in constant address space must be initialized">;
def err_opencl_atomic_init: Error<
@@ -10091,8 +10657,7 @@ def err_opencl_type_can_only_be_used_as_function_parameter : Error <
def err_opencl_type_not_found : Error<
"%0 type %1 not found; include the base header with -finclude-default-header">;
def warn_opencl_attr_deprecated_ignored : Warning <
- "%0 attribute is deprecated and ignored in OpenCL version %1">,
- InGroup<IgnoredAttributes>;
+ "%0 attribute is deprecated and ignored in %1">, InGroup<IgnoredAttributes>;
def err_opencl_variadic_function : Error<
"invalid prototype, variadic arguments are not allowed in OpenCL">;
def err_opencl_requires_extension : Error<
@@ -10157,7 +10722,7 @@ def err_opencl_builtin_expected_type : Error<
// OpenCL v3.0 s6.3.7 - Vector Components
def ext_opencl_ext_vector_type_rgba_selector: ExtWarn<
- "vector component name '%0' is an OpenCL C version 3.0 feature">,
+ "vector component name '%0' is a feature from OpenCL version 3.0 onwards">,
InGroup<OpenCLUnsupportedRGBA>;
def err_openclcxx_placement_new : Error<
@@ -10167,6 +10732,11 @@ def err_openclcxx_placement_new : Error<
def warn_mig_server_routine_does_not_return_kern_return_t : Warning<
"'mig_server_routine' attribute only applies to routines that return a kern_return_t">,
InGroup<IgnoredAttributes>;
+
+def warn_imp_cast_drops_unaligned : Warning<
+ "implicit cast from type %0 to type %1 drops __unaligned qualifier">,
+ InGroup<DiagGroup<"unaligned-qualifier-implicit-cast">>;
+
} // end of sema category
let CategoryName = "OpenMP Issue" in {
@@ -10196,10 +10766,20 @@ def err_omp_lastprivate_incomplete_type : Error<
"a lastprivate variable with incomplete type %0">;
def err_omp_reduction_incomplete_type : Error<
"a reduction list item with incomplete type %0">;
+def warn_omp_minus_in_reduction_deprecated : Warning<
+ "minus(-) operator for reductions is deprecated; use + or user defined reduction instead">,
+ InGroup<Deprecated>;
def err_omp_unexpected_clause_value : Error<
"expected %0 in OpenMP clause '%1'">;
+def err_omp_unexpected_call_to_omp_runtime_api
+ : Error<"calls to OpenMP runtime API are not allowed within a region that "
+ "corresponds to a construct with an order clause that specifies "
+ "concurrent">;
def err_omp_expected_var_name_member_expr : Error<
"expected variable name%select{| or data member of current class}0">;
+def err_omp_expected_var_name_member_expr_with_type : Error<
+ "expected variable%select{| or static data member|, static data member, "
+ "or non-static data member of current class}0 of type '%1'">;
def err_omp_expected_var_name_member_expr_or_array_item : Error<
"expected variable name%select{|, data member of current class}0, array element or array section">;
def err_omp_expected_addressable_lvalue_or_array_item : Error<
@@ -10353,9 +10933,12 @@ def err_omp_simd_region_cannot_use_stmt : Error<
def warn_omp_loop_64_bit_var : Warning<
"OpenMP loop iteration variable cannot have more than 64 bits size and will be narrowed">,
InGroup<OpenMPLoopForm>;
-def err_omp_unknown_reduction_identifier : Error<
+def err_omp_unknown_reduction_identifier_prior_omp_6_0 : Error<
"incorrect reduction identifier, expected one of '+', '-', '*', '&', '|', '^', "
"'&&', '||', 'min' or 'max' or declare reduction for type %0">;
+def err_omp_unknown_reduction_identifier_since_omp_6_0 : Error<
+ "incorrect reduction identifier, expected one of '+', '*', '&', '|', '^', "
+ "'&&', '||', 'min' or 'max' or declare reduction for type %0">;
def err_omp_not_resolved_reduction_identifier : Error<
"unable to resolve declare reduction construct for type %0">;
def err_omp_reduction_ref_type_arg : Error<
@@ -10389,6 +10972,9 @@ def err_omp_prohibited_region_simd : Error<
"OpenMP constructs may not be nested inside a simd region%select{| except for ordered simd, simd, scan, or atomic directive}0">;
def err_omp_prohibited_region_atomic : Error<
"OpenMP constructs may not be nested inside an atomic region">;
+def err_omp_prohibited_region_order
+ : Error<"construct '%0' not allowed in a region associated with a "
+ "directive with 'order' clause">;
def err_omp_prohibited_region_critical_same_name : Error<
"cannot nest 'critical' regions having the same name %0">;
def note_omp_previous_critical_region : Note<
@@ -10438,8 +11024,25 @@ def err_omp_atomic_capture_not_compound_statement : Error<
" where x is an lvalue expression with scalar type">;
def note_omp_atomic_capture: Note<
"%select{expected assignment expression|expected compound statement|expected exactly two expression statements|expected in right hand side of the first expression}0">;
+def err_omp_atomic_compare : Error<
+ "the statement for 'atomic compare' must be a compound statement of form '{x = expr ordop x ? expr : x;}', '{x = x ordop expr? expr : x;}',"
+ " '{x = x == e ? d : x;}', '{x = e == x ? d : x;}', or 'if(expr ordop x) {x = expr;}', 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}',"
+ " 'if(e == x) {x = d;}' where 'x' is an lvalue expression with scalar type, 'expr', 'e', and 'd' are expressions with scalar type,"
+ " and 'ordop' is one of '<' or '>'.">;
+def err_omp_atomic_compare_capture : Error<
+ "the statement for 'atomic compare capture' must be a compound statement of form '{v = x; cond-up-stmt}', ''{cond-up-stmt v = x;}', '{if(x == e) {x = d;} else {v = x;}}',"
+ " '{r = x == e; if(r) {x = d;}}', or '{r = x == e; if(r) {x = d;} else {v = x;}}', where 'cond-update-stmt' can have one of the following forms: 'if(expr ordop x) {x = expr;}',"
+ " 'if(x ordop expr) {x = expr;}', 'if(x == e) {x = d;}', or 'if(e == x) {x = d;}' where 'x', 'r', and 'v' are lvalue expressions with scalar type, 'expr', 'e', and 'd' are expressions with scalar type,"
+ " and 'ordop' is one of '<' or '>'.">;
+def note_omp_atomic_compare: Note<
+ "%select{expected compound statement|expected exactly one expression statement|expected assignment statement|expected conditional operator|expect result value to be at false expression|"
+ "expect binary operator in conditional expression|expect '<', '>' or '==' as order operator|expect comparison in a form of 'x == e', 'e == x', 'x ordop expr', or 'expr ordop x'|"
+ "expect lvalue for result value|expect scalar value|expect integer value|unexpected 'else' statement|expect '==' operator|expect an assignment statement 'v = x'|"
+ "expect a 'if' statement|expect no more than two statements|expect a compound statement|expect 'else' statement|expect a form 'r = x == e; if (r) ...'}0">;
+def err_omp_atomic_fail_wrong_or_no_clauses : Error<"expected a memory order clause">;
+def err_omp_atomic_fail_no_compare : Error<"expected 'compare' clause with the 'fail' modifier">;
def err_omp_atomic_several_clauses : Error<
- "directive '#pragma omp atomic' cannot contain more than one 'read', 'write', 'update' or 'capture' clause">;
+ "directive '#pragma omp atomic' cannot contain more than one 'read', 'write', 'update', 'capture', or 'compare' clause">;
def err_omp_several_mem_order_clauses : Error<
"directive '#pragma omp %0' cannot contain more than one %select{'seq_cst', 'relaxed', |}1'acq_rel', 'acquire' or 'release' clause">;
def err_omp_atomic_incompatible_mem_order_clause : Error<
@@ -10454,6 +11057,8 @@ def note_omp_nested_statement_here : Note<
"%select{statement|directive}0 outside teams construct here">;
def err_omp_single_copyprivate_with_nowait : Error<
"the 'copyprivate' clause must not be used with the 'nowait' clause">;
+def err_omp_nowait_clause_without_depend: Error<
+ "directive '#pragma omp taskwait' cannot use 'nowait' clause without 'depend' clause">;
def note_omp_nowait_clause_here : Note<
"'nowait' clause is here">;
def err_omp_single_decl_in_declare_simd_variant : Error<
@@ -10506,6 +11111,8 @@ def err_omp_wrong_linear_modifier : Error<
"expected %select{'val' modifier|one of 'ref', val' or 'uval' modifiers}0">;
def err_omp_wrong_linear_modifier_non_reference : Error<
"variable of non-reference type %0 can be used only with 'val' modifier, but used with '%1'">;
+def err_omp_step_simple_modifier_exclusive : Error<
+ "step simple modifier is exclusive and can't be use with 'val', 'uval' or 'ref' modifier">;
def err_omp_wrong_simdlen_safelen_values : Error<
"the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter">;
def err_omp_wrong_if_directive_name_modifier : Error<
@@ -10519,7 +11126,7 @@ def note_omp_previous_named_if_clause : Note<
def err_omp_ordered_directive_with_param : Error<
"'ordered' directive %select{without any clauses|with 'threads' clause}0 cannot be closely nested inside ordered region with specified parameter">;
def err_omp_ordered_directive_without_param : Error<
- "'ordered' directive with 'depend' clause cannot be closely nested inside ordered region without specified parameter">;
+ "'ordered' directive with '%0' clause cannot be closely nested inside ordered region without specified parameter">;
def note_omp_ordered_param : Note<
"'ordered' clause%select{| with specified parameter}0">;
def err_omp_expected_base_var_name : Error<
@@ -10528,6 +11135,8 @@ def err_omp_map_shared_storage : Error<
"variable already marked as mapped in current construct">;
def err_omp_invalid_map_type_for_directive : Error<
"%select{map type '%1' is not allowed|map type must be specified}0 for '#pragma omp %2'">;
+def err_omp_invalid_map_type_modifier_for_directive : Error<
+ "map type modifier '%0' is not allowed for '#pragma omp %1'">;
def err_omp_no_clause_for_directive : Error<
"expected at least one %0 clause for '#pragma omp %1'">;
def err_omp_threadprivate_in_clause : Error<
@@ -10549,21 +11158,25 @@ def note_omp_critical_hint_here : Note<
def note_omp_critical_no_hint : Note<
"%select{|previous }0directive with no 'hint' clause specified">;
def err_omp_depend_clause_thread_simd : Error<
- "'depend' clauses cannot be mixed with '%0' clause">;
+ "'%0' clauses cannot be mixed with '%1' clause">;
def err_omp_depend_sink_expected_loop_iteration : Error<
"expected%select{| %1}0 loop iteration variable">;
def err_omp_depend_sink_unexpected_expr : Error<
"unexpected expression: number of expressions is larger than the number of associated loops">;
def err_omp_depend_sink_expected_plus_minus : Error<
"expected '+' or '-' operation">;
-def err_omp_depend_sink_source_not_allowed : Error<
- "'depend(%select{source|sink:vec}0)' clause%select{|s}0 cannot be mixed with 'depend(%select{sink:vec|source}0)' clause%select{s|}0">;
+def err_omp_taskwait_depend_mutexinoutset_not_allowed : Error<
+ "'mutexinoutset' modifier not allowed in 'depend' clause on 'taskwait' directive">;
+def err_omp_sink_and_source_not_allowed : Error<
+ "'%0(%select{source|sink:vec}1)' clause%select{|s}1 cannot be mixed with '%0(%select{sink:vec|source}1)' clause%select{s|}1">;
def err_omp_depend_zero_length_array_section_not_allowed : Error<
"zero-length array section is not allowed in 'depend' clause">;
def err_omp_depend_sink_source_with_modifier : Error<
"depend modifier cannot be used with 'sink' or 'source' depend type">;
def err_omp_depend_modifier_not_iterator : Error<
"expected iterator specification as depend modifier">;
+def err_omp_map_modifier_not_iterator : Error<
+ "expected iterator specification as map modifier">;
def err_omp_linear_ordered : Error<
"'linear' clause cannot be specified along with 'ordered' clause with a parameter">;
def err_omp_unexpected_schedule_modifier : Error<
@@ -10584,6 +11197,8 @@ def err_omp_expected_int_param : Error<
"expected a reference to an integer-typed parameter">;
def err_omp_at_least_one_motion_clause_required : Error<
"expected at least one 'to' clause or 'from' clause specified to '#pragma omp target update'">;
+def err_omp_cannot_update_with_internal_linkage : Error<
+ "the host cannot update a declare target variable that is not externally visible.">;
def err_omp_usedeviceptr_not_a_pointer : Error<
"expected pointer or reference to pointer in 'use_device_ptr' clause">;
def err_omp_argument_type_isdeviceptr : Error <
@@ -10614,6 +11229,8 @@ def err_omp_directive_before_requires : Error <
"'%0' region encountered before requires directive with '%1' clause">;
def note_omp_requires_encountered_directive : Note <
"'%0' previously encountered here">;
+def err_omp_device_ancestor_without_requires_reverse_offload : Error <
+ "Device clause with ancestor device-modifier used without specifying 'requires reverse_offload'">;
def err_omp_invalid_scope : Error <
"'#pragma omp %0' directive must appear only in file scope">;
def note_omp_invalid_length_on_this_ptr_mapping : Note <
@@ -10677,9 +11294,9 @@ def err_omp_invariant_or_linear_dependency : Error<
"expected loop invariant expression or '<invariant1> * %0 + <invariant2>' kind of expression">;
def err_omp_wrong_dependency_iterator_type : Error<
"expected an integer or a pointer type of the outer loop counter '%0' for non-rectangular nests">;
-def err_device_unsupported_type
- : Error<"%0 requires %select{|%2 bit size}1 %3 type support, but device "
- "'%4' does not support it">;
+def err_target_unsupported_type
+ : Error<"%0 requires %select{|%2 bit size}1 %3 %select{|return }4type support,"
+ " but target '%5' does not support it">;
def err_omp_lambda_capture_in_declare_target_not_to : Error<
"variable captured in declare target region must appear in a to clause">;
def err_omp_device_type_mismatch : Error<
@@ -10714,9 +11331,16 @@ def err_omp_declare_variant_diff : Error<
"function with '#pragma omp declare variant' has a different %select{calling convention"
"|return type|constexpr specification|inline specification|storage class|"
"linkage}0">;
+def err_omp_declare_variant_prototype_required : Error<
+ "function with '#pragma omp declare variant' must have a prototype when "
+ "'append_args' is used">;
+def err_omp_interop_type_not_found : Error<
+ "'omp_interop_t' must be defined when 'append_args' clause is used; include <omp.h>">;
def err_omp_declare_variant_incompat_types : Error<
- "variant in '#pragma omp declare variant' with type %0 is incompatible with type %1"
- >;
+ "variant in '#pragma omp declare variant' with type %0 is incompatible with"
+ " type %1%select{| with appended arguments}2">;
+def err_omp_declare_variant_same_base_function : Error<
+ "variant in '#pragma omp declare variant' is the same as the base function">;
def warn_omp_declare_variant_marked_as_declare_variant : Warning<
"variant function in '#pragma omp declare variant' is itself marked as '#pragma omp declare variant'"
>, InGroup<SourceUsesOpenMP>;
@@ -10734,11 +11358,6 @@ def err_omp_non_lvalue_in_map_or_motion_clauses: Error<
"expected addressable lvalue in '%0' clause">;
def err_omp_var_expected : Error<
"expected variable of the '%0' type%select{|, not %2}1">;
-def warn_unknown_declare_variant_isa_trait
- : Warning<"isa trait '%0' is not known to the current target; verify the "
- "spelling or consider restricting the context selector with the "
- "'arch' selector further">,
- InGroup<SourceUsesOpenMP>;
def err_omp_non_pointer_type_array_shaping_base : Error<
"expected expression with a pointer to a complete type as a base of an array "
"shaping operation">;
@@ -10762,6 +11381,9 @@ def note_omp_protected_structured_block
: Note<"jump bypasses OpenMP structured block">;
def note_omp_exits_structured_block
: Note<"jump exits scope of OpenMP structured block">;
+def err_omp_lastprivate_loop_var_non_loop_iteration : Error<
+ "only loop iteration variables are allowed in 'lastprivate' clause in "
+ "'omp %0' directives">;
def err_omp_interop_variable_expected : Error<
"expected%select{| non-const}0 variable of type 'omp_interop_t'">;
def err_omp_interop_variable_wrong_type : Error<
@@ -10779,6 +11401,21 @@ def err_omp_dispatch_statement_call
def err_omp_unroll_full_variable_trip_count : Error<
"loop to be fully unrolled must have a constant trip count">;
def note_omp_directive_here : Note<"'%0' directive found here">;
+def err_omp_instantiation_not_supported
+ : Error<"instantiation of '%0' not supported yet">;
+def err_omp_adjust_arg_multiple_clauses : Error<
+ "'adjust_arg' argument %0 used in multiple clauses">;
+def err_omp_clause_requires_dispatch_construct : Error<
+ "'%0' clause requires 'dispatch' context selector">;
+def err_omp_append_args_with_varargs : Error<
+ "'append_args' is not allowed with varargs functions">;
+def err_openmp_vla_in_task_untied : Error<
+ "variable length arrays are not supported in OpenMP tasking regions with 'untied' clause">;
+def warn_omp_unterminated_declare_target : Warning<
+ "expected '#pragma omp end declare target' at end of file to match '#pragma omp %0'">,
+ InGroup<SourceUsesOpenMP>;
+def err_ompx_bare_no_grid : Error<
+ "'ompx_bare' clauses requires explicit grid size via 'num_teams' and 'thread_limit' clauses">;
} // end of OpenMP category
let CategoryName = "Related Result Type Issue" in {
@@ -10811,7 +11448,7 @@ def err_invalid_type_for_program_scope_var : Error<
let CategoryName = "Modules Issue" in {
def err_module_decl_in_module_map_module : Error<
"'module' declaration found while building module from module map">;
-def err_module_decl_in_header_module : Error<
+def err_module_decl_in_header_unit : Error<
"'module' declaration found while building header unit">;
def err_module_interface_implementation_mismatch : Error<
"missing 'export' specifier in module declaration while "
@@ -10872,10 +11509,14 @@ def ext_module_import_not_at_top_level_noop : ExtWarn<
def note_module_import_not_at_top_level : Note<"%0 begins here">;
def err_module_self_import : Error<
"import of module '%0' appears within same top-level module '%1'">;
+def err_module_self_import_cxx20 : Error<
+ "import of module '%0' appears within its own %select{interface|implementation}1">;
def err_module_import_in_implementation : Error<
"@import of module '%0' in implementation of '%1'; use #import">;
// C++ Modules
+def err_module_import_non_interface_nor_parition : Error<
+ "import of module '%0' imported non C++20 importable modules">;
def err_module_decl_not_at_start : Error<
"module declaration must occur at the start of the translation unit">;
def note_global_module_introducer_missing : Note<
@@ -10884,27 +11525,22 @@ def note_global_module_introducer_missing : Note<
def err_export_within_anonymous_namespace : Error<
"export declaration appears within anonymous namespace">;
def note_anonymous_namespace : Note<"anonymous namespace begins here">;
-def ext_export_no_name_block : ExtWarn<
- "ISO C++20 does not permit %select{an empty|a static_assert}0 declaration "
- "to appear in an export block">, InGroup<ExportUnnamed>;
-def ext_export_no_names : ExtWarn<
- "ISO C++20 does not permit a declaration that does not introduce any names "
- "to be exported">, InGroup<ExportUnnamed>;
def note_export : Note<"export block begins here">;
-def err_export_no_name : Error<
- "%select{empty|static_assert|asm}0 declaration cannot be exported">;
-def ext_export_using_directive : ExtWarn<
- "ISO C++20 does not permit using directive to be exported">,
- InGroup<DiagGroup<"export-using-directive">>;
def err_export_within_export : Error<
"export declaration appears within another export declaration">;
+def err_export_anon_ns_internal : Error<
+ "anonymous namespaces cannot be exported">;
def err_export_internal : Error<
"declaration of %0 with internal linkage cannot be exported">;
def err_export_using_internal : Error<
- "using declaration referring to %0 with internal linkage cannot be exported">;
+ "using declaration referring to %1 with %select{internal|module|unknown}0 "
+ "linkage cannot be exported">;
def err_export_not_in_module_interface : Error<
- "export declaration can only be used within a module interface unit"
- "%select{ after the module declaration|}0">;
+ "export declaration can only be used within a module purview">;
+def err_export_inline_not_defined : Error<
+ "inline function not defined%select{| before the private module fragment}0">;
+def err_export_partition_impl : Error<
+ "module partition implementations cannot be exported">;
def err_export_in_private_module_fragment : Error<
"export declaration cannot be used in a private module fragment">;
def note_private_module_fragment : Note<
@@ -10917,6 +11553,13 @@ def err_private_module_fragment_not_module_interface : Error<
"private module fragment in module implementation unit">;
def note_not_module_interface_add_export : Note<
"add 'export' here if this is intended to be a module interface unit">;
+def err_invalid_module_name : Error<"%0 is an invalid name for a module">;
+def err_extern_def_in_header_unit : Error<
+ "non-inline external definitions are not permitted in C++ header units">;
+
+def warn_experimental_header_unit : Warning<
+ "the implementation of header units is in an experimental phase">,
+ InGroup<DiagGroup<"experimental-header-units">>;
def ext_equivalent_internal_linkage_decl_in_modules : ExtWarn<
"ambiguous use of internal linkage declaration %0 defined in multiple modules">,
@@ -10949,7 +11592,7 @@ def err_coroutine_invalid_func_context : Error<
"|a function with a deduced return type|a varargs function"
"|a consteval function}0">;
def err_implied_coroutine_type_not_found : Error<
- "%0 type was not found; include <experimental/coroutine> before defining "
+ "%0 type was not found; include <coroutine> before defining "
"a coroutine">;
def err_implicit_coroutine_std_nothrow_type_not_found : Error<
"std::nothrow was not found; include <new> before defining a coroutine which "
@@ -10957,11 +11600,11 @@ def err_implicit_coroutine_std_nothrow_type_not_found : Error<
def err_malformed_std_nothrow : Error<
"std::nothrow must be a valid variable declaration">;
def err_malformed_std_coroutine_handle : Error<
- "std::experimental::coroutine_handle must be a class template">;
+ "std::coroutine_handle isn't a class template">;
def err_coroutine_handle_missing_member : Error<
- "std::experimental::coroutine_handle missing a member named '%0'">;
+ "std::coroutine_handle must have a member named '%0'">;
def err_malformed_std_coroutine_traits : Error<
- "'std::experimental::coroutine_traits' must be a class template">;
+ "std::coroutine_traits isn't a class template">;
def err_implied_std_coroutine_traits_promise_type_not_found : Error<
"this function cannot be a coroutine: %q0 has no member named 'promise_type'">;
def err_implied_std_coroutine_traits_promise_type_not_class : Error<
@@ -10973,8 +11616,6 @@ def err_coroutine_type_missing_specialization : Error<
"specialization %0">;
def err_coroutine_promise_incompatible_return_functions : Error<
"the coroutine promise type %0 declares both 'return_value' and 'return_void'">;
-def err_coroutine_promise_requires_return_function : Error<
- "the coroutine promise type %0 must declare either 'return_value' or 'return_void'">;
def note_coroutine_promise_implicit_await_transform_required_here : Note<
"call to 'await_transform' implicitly required by 'co_await' here">;
def note_coroutine_promise_suspend_implicitly_required : Note<
@@ -11009,11 +11650,39 @@ def err_coroutine_promise_final_suspend_requires_nothrow : Error<
def note_coroutine_function_declare_noexcept : Note<
"must be declared with 'noexcept'"
>;
+def warn_always_inline_coroutine : Warning<
+ "this coroutine may be split into pieces; not every piece is guaranteed to be inlined"
+ >,
+ InGroup<AlwaysInlineCoroutine>;
+def err_coroutine_unusable_new : Error<
+ "'operator new' provided by %0 is not usable with the function signature of %1"
+>;
+def err_coroutine_unfound_nothrow_new : Error <
+ "unable to find %select{'::operator new(size_t, nothrow_t)'|"
+ "'::operator new(size_t, align_val_t, nothrow_t)'}1 for %0"
+>;
+def warn_non_aligned_allocation_function : Warning <
+ "under -fcoro-aligned-allocation, the non-aligned allocation function "
+ "for the promise type %0 has higher precedence than the global aligned "
+ "allocation function">,
+ InGroup<CoroNonAlignedAllocationFunction>;
+def err_conflicting_aligned_options : Error <
+ "conflicting option '-fcoro-aligned-allocation' and '-fno-aligned-allocation'"
+>;
+def err_coro_invalid_addr_of_label : Error<
+ "the GNU address of label extension is not allowed in coroutines."
+>;
+def err_coroutine_return_type : Error<
+ "function returns a type %0 marked with [[clang::coro_return_type]] but is neither a coroutine nor a coroutine wrapper; "
+ "non-coroutines should be marked with [[clang::coro_wrapper]] to allow returning coroutine return type"
+>;
} // end of coroutines issue category
let CategoryName = "Documentation Issue" in {
def warn_not_a_doxygen_trailing_member_comment : Warning<
"not a Doxygen trailing comment">, InGroup<Documentation>, DefaultIgnore;
+def warn_splice_in_doxygen_comment : Warning<
+ "line splicing in Doxygen comments are not supported">, InGroup<Documentation>, DefaultIgnore;
} // end of documentation issue category
let CategoryName = "Nullability Issue" in {
@@ -11145,6 +11814,10 @@ def err_objc_type_args_wrong_arity : Error<
"too %select{many|few}0 type arguments for class %1 (have %2, expected %3)">;
}
+def err_type_available_only_in_default_eval_method : Error<
+ "cannot use type '%0' within '#pragma clang fp eval_method'; type is set "
+ "according to the default eval method for the translation unit">;
+
def err_objc_type_arg_not_id_compatible : Error<
"type argument %0 is neither an Objective-C object nor a block type">;
@@ -11170,7 +11843,7 @@ def note_shadow_field : Note<"declared here">;
def err_multiversion_required_in_redecl : Error<
"function declaration is missing %select{'target'|'cpu_specific' or "
- "'cpu_dispatch'}0 attribute in a multiversioned function">;
+ "'cpu_dispatch'|'target_version'}0 attribute in a multiversioned function">;
def note_multiversioning_caused_here : Note<
"function multiversioning caused by this declaration">;
def err_multiversion_after_used : Error<
@@ -11183,21 +11856,23 @@ def err_multiversion_duplicate : Error<
"multiversioned function redeclarations require identical target attributes">;
def err_multiversion_noproto : Error<
"multiversioned function must have a prototype">;
-def err_multiversion_disallowed_other_attr : Error<
- "attribute '%select{target|cpu_specific|cpu_dispatch}0' multiversioning cannot be combined"
- " with attribute %1">;
-def err_multiversion_mismatched_attrs
- : Error<"attributes on multiversioned functions must all match, attribute "
- "%0 %select{is missing|has different arguments}1">;
+def err_multiversion_disallowed_other_attr
+ : Error<"attribute "
+ "'%select{|target|cpu_specific|cpu_dispatch|target_clones|target_version}0' "
+ "multiversioning cannot be combined"
+ " with attribute %1">;
def err_multiversion_diff : Error<
"multiversioned function declaration has a different %select{calling convention"
- "|return type|constexpr specification|inline specification|storage class|"
- "linkage}0">;
-def err_multiversion_doesnt_support : Error<
- "attribute '%select{target|cpu_specific|cpu_dispatch}0' multiversioned functions do not "
- "yet support %select{function templates|virtual functions|"
- "deduced return types|constructors|destructors|deleted functions|"
- "defaulted functions|constexpr functions|consteval function}1">;
+ "|return type|constexpr specification|inline specification|linkage|"
+ "language linkage}0">;
+def err_multiversion_doesnt_support
+ : Error<"attribute "
+ "'%select{|target|cpu_specific|cpu_dispatch|target_clones|target_version}0' "
+ "multiversioned functions do not "
+ "yet support %select{function templates|virtual functions|"
+ "deduced return types|constructors|destructors|deleted functions|"
+ "defaulted functions|constexpr functions|consteval "
+ "function|lambdas}1">;
def err_multiversion_not_allowed_on_main : Error<
"'main' cannot be a multiversioned function">;
def err_multiversion_not_supported : Error<
@@ -11214,6 +11889,22 @@ def warn_multiversion_duplicate_entries : Warning<
def warn_dispatch_body_ignored : Warning<
"body of cpu_dispatch function will be ignored">,
InGroup<FunctionMultiVersioning>;
+def err_target_clone_must_have_default
+ : Error<"'target_clones' multiversioning requires a default target">;
+def err_target_clone_doesnt_match
+ : Error<"'target_clones' attribute does not match previous declaration">;
+def warn_target_clone_mixed_values
+ : ExtWarn<
+ "mixing 'target_clones' specifier mechanisms is permitted for GCC "
+ "compatibility; use a comma separated sequence of string literals, "
+ "or a string literal containing a comma-separated list of versions">,
+ InGroup<TargetClonesMixedSpecifiers>;
+def warn_target_clone_duplicate_options
+ : Warning<"version list contains duplicate entries">,
+ InGroup<FunctionMultiVersioning>;
+def warn_target_clone_no_impact_options
+ : Warning<"version list contains entries that don't impact code generation">,
+ InGroup<FunctionMultiVersioning>;
// three-way comparison operator diagnostics
def err_implied_comparison_category_type_not_found : Error<
@@ -11257,6 +11948,14 @@ def err_builtin_launder_invalid_arg : Error<
"%select{non-pointer|function pointer|void pointer}0 argument to "
"'__builtin_launder' is not allowed">;
+def err_builtin_invalid_arg_type: Error <
+ "%ordinal0 argument must be a "
+ "%select{vector, integer or floating point type|matrix|"
+ "pointer to a valid matrix element type|"
+ "signed integer or floating point type|vector type|"
+ "floating point type|"
+ "vector of integers}1 (was %2)">;
+
def err_builtin_matrix_disabled: Error<
"matrix types extension is disabled. Pass -fenable-matrix to enable it">;
def err_matrix_index_not_integer: Error<
@@ -11269,11 +11968,8 @@ def err_matrix_separate_incomplete_index: Error<
"matrix row and column subscripts cannot be separated by any expression">;
def err_matrix_subscript_comma: Error<
"comma expressions are not allowed as indices in matrix subscript expressions">;
-def err_builtin_matrix_arg: Error<"1st argument must be a matrix">;
def err_builtin_matrix_scalar_unsigned_arg: Error<
"%0 argument must be a constant unsigned integer expression">;
-def err_builtin_matrix_pointer_arg: Error<
- "%ordinal0 argument must be a pointer to a valid matrix element type">;
def err_builtin_matrix_pointer_arg_mismatch: Error<
"the pointee of the 2nd argument must match the element type of the 1st argument (%0 != %1)">;
def err_builtin_matrix_store_to_const: Error<
@@ -11324,10 +12020,17 @@ def warn_sycl_kernel_num_of_function_params : Warning<
def warn_sycl_kernel_return_type : Warning<
"function template with 'sycl_kernel' attribute must have a 'void' return type">,
InGroup<IgnoredAttributes>;
+def err_sycl_special_type_num_init_method : Error<
+ "types with 'sycl_special_class' attribute must have one and only one '__init' "
+ "method defined">;
+
+def warn_cuda_maxclusterrank_sm_90 : Warning<
+ "maxclusterrank requires sm_90 or higher, CUDA arch provided: %0, ignoring "
+ "%1 attribute">, InGroup<IgnoredAttributes>;
-def err_ext_int_bad_size : Error<"%select{signed|unsigned}0 _ExtInt must "
+def err_bit_int_bad_size : Error<"%select{signed|unsigned}0 _BitInt must "
"have a bit size of at least %select{2|1}0">;
-def err_ext_int_max_size : Error<"%select{signed|unsigned}0 _ExtInt of bit "
+def err_bit_int_max_size : Error<"%select{signed|unsigned}0 _BitInt of bit "
"sizes greater than %1 not supported">;
// errors of expect.with.probability
@@ -11347,7 +12050,118 @@ def warn_tcb_enforcement_violation : Warning<
// RISC-V builtin required extension warning
def err_riscv_builtin_requires_extension : Error<
- "builtin requires '%0' extension support to be enabled">;
+ "builtin requires%select{| at least one of the following extensions}0: %1">;
def err_riscv_builtin_invalid_lmul : Error<
"LMUL argument must be in the range [0,3] or [5,7]">;
+def err_riscv_type_requires_extension : Error<
+ "RISC-V type %0 requires the '%1' extension"
+>;
+
+def err_std_source_location_impl_not_found : Error<
+ "'std::source_location::__impl' was not found; it must be defined before '__builtin_source_location' is called">;
+def err_std_source_location_impl_malformed : Error<
+ "'std::source_location::__impl' must be standard-layout and have only two 'const char *' fields '_M_file_name' and '_M_function_name', and two integral fields '_M_line' and '_M_column'">;
+
+// HLSL Diagnostics
+def err_hlsl_attr_unsupported_in_stage : Error<"attribute %0 is unsupported in '%1' shaders, requires %select{|one of the following: }2%3">;
+def err_hlsl_attr_invalid_type : Error<
+ "attribute %0 only applies to a field or parameter of type '%1'">;
+def err_hlsl_attr_invalid_ast_node : Error<
+ "attribute %0 only applies to %1">;
+def err_hlsl_entry_shader_attr_mismatch : Error<
+ "%0 attribute on entry function does not match the target profile">;
+def err_hlsl_numthreads_argument_oor : Error<"argument '%select{X|Y|Z}0' to numthreads attribute cannot exceed %1">;
+def err_hlsl_numthreads_invalid : Error<"total number of threads cannot exceed %0">;
+def err_hlsl_missing_numthreads : Error<"missing numthreads attribute for %0 shader entry">;
+def err_hlsl_attribute_param_mismatch : Error<"%0 attribute parameters do not match the previous declaration">;
+def err_hlsl_duplicate_parameter_modifier : Error<"duplicate parameter modifier %0">;
+def err_hlsl_missing_semantic_annotation : Error<
+ "semantic annotations must be present for all parameters of an entry "
+ "function or patch constant function">;
+def err_hlsl_init_priority_unsupported : Error<
+ "initializer priorities are not supported in HLSL">;
+
+def err_hlsl_unsupported_register_type : Error<"invalid resource class specifier '%0' used; expected 'b', 's', 't', or 'u'">;
+def err_hlsl_unsupported_register_number : Error<"register number should be an integer">;
+def err_hlsl_expected_space : Error<"invalid space specifier '%0' used; expected 'space' followed by an integer, like space1">;
+def err_hlsl_pointers_unsupported : Error<
+ "%select{pointers|references}0 are unsupported in HLSL">;
+
+def err_hlsl_operator_unsupported : Error<
+ "the '%select{&|*|->}0' operator is unsupported in HLSL">;
+
+def err_hlsl_param_qualifier_mismatch :
+ Error<"conflicting parameter qualifier %0 on parameter %1">;
+
+// Layout randomization diagnostics.
+def err_non_designated_init_used : Error<
+ "a randomized struct can only be initialized with a designated initializer">;
+def err_cast_from_randomized_struct : Error<
+ "casting from randomized structure pointer type %0 to %1">;
+
+// Unsafe buffer usage diagnostics.
+def warn_unsafe_buffer_variable : Warning<
+ "%0 is an %select{unsafe pointer used for buffer access|unsafe buffer that "
+ "does not perform bounds checks}1">,
+ InGroup<UnsafeBufferUsage>, DefaultIgnore;
+def warn_unsafe_buffer_operation : Warning<
+ "%select{unsafe pointer operation|unsafe pointer arithmetic|"
+ "unsafe buffer access|function introduces unsafe buffer manipulation|unsafe invocation of span::data}0">,
+ InGroup<UnsafeBufferUsage>, DefaultIgnore;
+def note_unsafe_buffer_operation : Note<
+ "used%select{| in pointer arithmetic| in buffer access}0 here">;
+def note_unsafe_buffer_variable_fixit_group : Note<
+ "change type of %0 to '%select{std::span|std::array|std::span::iterator}1' to preserve bounds information%select{|, and change %2 to '%select{std::span|std::array|std::span::iterator}1' to propagate bounds information between them}3">;
+def note_unsafe_buffer_variable_fixit_together : Note<
+ "change type of %0 to '%select{std::span|std::array|std::span::iterator}1' to preserve bounds information"
+ "%select{|, and change %2 to safe types to make function %4 bounds-safe}3">;
+def note_safe_buffer_usage_suggestions_disabled : Note<
+ "pass -fsafe-buffer-usage-suggestions to receive code hardening suggestions">;
+#ifndef NDEBUG
+// Not a user-facing diagnostic. Useful for debugging false negatives in
+// -fsafe-buffer-usage-suggestions (i.e. lack of -Wunsafe-buffer-usage fixits).
+def note_safe_buffer_debug_mode : Note<"safe buffers debug: %0">;
+#endif
+
+def err_builtin_pass_in_regs_non_class : Error<
+ "argument %0 is not an unqualified class type">;
+
+
+// WebAssembly reference type and table diagnostics.
+def err_wasm_reference_pr : Error<
+ "%select{pointer|reference}0 to WebAssembly reference type is not allowed">;
+def err_wasm_ca_reference : Error<
+ "cannot %select{capture|take address of}0 WebAssembly reference">;
+def err_wasm_funcref_not_wasm : Error<
+ "invalid use of '__funcref' keyword outside the WebAssembly triple">;
+def err_wasm_table_pr : Error<
+ "cannot form a %select{pointer|reference}0 to a WebAssembly table">;
+def err_typecheck_wasm_table_must_have_zero_length : Error<
+ "only zero-length WebAssembly tables are currently supported">;
+def err_wasm_table_in_function : Error<
+ "WebAssembly table cannot be declared within a function">;
+def err_wasm_table_as_function_parameter : Error<
+ "cannot use WebAssembly table as a function parameter">;
+def err_wasm_table_invalid_uett_operand : Error<
+ "invalid application of '%0' to WebAssembly table">;
+def err_wasm_cast_table : Error<
+ "cannot cast %select{to|from}0 a WebAssembly table">;
+def err_wasm_table_conditional_expression : Error<
+ "cannot use a WebAssembly table within a branch of a conditional expression">;
+def err_wasm_table_art : Error<
+ "cannot %select{assign|return|throw|subscript}0 a WebAssembly table">;
+def err_wasm_reftype_tc : Error<
+ "cannot %select{throw|catch}0 a WebAssembly reference type">;
+def err_wasm_reftype_exception_spec : Error<
+ "WebAssembly reference type not allowed in exception specification">;
+def err_wasm_table_must_be_static : Error<
+ "WebAssembly table must be static">;
+def err_wasm_reftype_multidimensional_array : Error<
+ "multi-dimensional arrays of WebAssembly references are not allowed">;
+def err_wasm_builtin_arg_must_be_table_type : Error <
+ "%ordinal0 argument must be a WebAssembly table">;
+def err_wasm_builtin_arg_must_match_table_element_type : Error <
+ "%ordinal0 argument must match the element type of the WebAssembly table in the %ordinal1 argument">;
+def err_wasm_builtin_arg_must_be_integer_type : Error <
+ "%ordinal0 argument must be an integer">;
} // end of sema component.
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticSerialization.h b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticSerialization.h
index b3d99fb3feaa..0c622a565773 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticSerialization.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticSerialization.h
@@ -15,7 +15,7 @@ namespace clang {
namespace diag {
enum {
#define DIAG(ENUM, FLAGS, DEFAULT_MAPPING, DESC, GROUP, SFINAE, NOWERROR, \
- SHOWINSYSHEADER, DEFERRABLE, CATEGORY) \
+ SHOWINSYSHEADER, SHOWINSYSMACRO, DEFERRABLE, CATEGORY) \
ENUM,
#define SERIALIZATIONSTART
#include "clang/Basic/DiagnosticSerializationKinds.inc"
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticSerializationKinds.td b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticSerializationKinds.td
index bf3221be004d..11c706ebf84b 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticSerializationKinds.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/DiagnosticSerializationKinds.td
@@ -20,7 +20,7 @@ def err_fe_pch_malformed_block : Error<
def err_fe_ast_file_modified : Error<
"file '%0' has been modified since the "
"%select{precompiled header|module file|AST file}1 '%2' was built"
- ": %select{size|mtime|content}3 changed">,
+ ": %select{size|mtime|content}3 changed%select{| (was %5, now %6)}4">,
DefaultFatal;
def err_fe_pch_file_overridden : Error<
"file '%0' from the precompiled header has been overridden">;
@@ -62,7 +62,7 @@ def err_ast_file_out_of_date : Error<
"%select{PCH|module|AST}0 file '%1' is out of date and "
"needs to be rebuilt%select{|: %3}2">, DefaultFatal;
def err_ast_file_invalid : Error<
- "file '%1' is not a valid precompiled %select{PCH|module|AST}0 file">, DefaultFatal;
+ "file '%1' is not a valid precompiled %select{PCH|module|AST}0 file: %2">, DefaultFatal;
def note_module_file_imported_by : Note<
"imported by %select{|module '%2' in }1'%0'">;
def err_module_file_not_module : Error<
@@ -75,6 +75,7 @@ def note_module_file_conflict : Note<
def remark_module_import : Remark<
"importing module '%0'%select{| into '%3'}2 from '%1'">,
+ ShowInSystemHeader,
InGroup<ModuleImport>;
def err_imported_module_not_found : Error<
@@ -115,274 +116,10 @@ def note_module_odr_violation_no_possible_decls : Note<
"definition has no member %0">;
def note_module_odr_violation_possible_decl : Note<
"declaration of %0 does not match">;
-def err_module_odr_violation_different_definitions : Error<
- "%q0 has different definitions in different modules; "
- "%select{definition in module '%2' is here|defined here}1">;
-def note_first_module_difference : Note<
- "in first definition, possible difference is here">;
-def note_module_odr_violation_different_definitions : Note<
- "definition in module '%0' is here">;
-def note_second_module_difference : Note<
- "in second definition, possible difference is here">;
def err_module_odr_violation_different_instantiations : Error<
"instantiation of %q0 is different in different modules">;
-def err_module_odr_violation_definition_data : Error <
- "%q0 has different definitions in different modules; first difference is "
- "%select{definition in module '%2'|defined here}1 found "
- "%select{"
- "%4 base %plural{1:class|:classes}4|"
- "%4 virtual base %plural{1:class|:classes}4|"
- "%ordinal4 base class with type %5|"
- "%ordinal4 %select{non-virtual|virtual}5 base class %6|"
- "%ordinal4 base class %5 with "
- "%select{public|protected|private|no}6 access specifier}3">;
-
-def note_module_odr_violation_definition_data : Note <
- "but in '%0' found "
- "%select{"
- "%2 base %plural{1:class|:classes}2|"
- "%2 virtual base %plural{1:class|:classes}2|"
- "%ordinal2 base class with different type %3|"
- "%ordinal2 %select{non-virtual|virtual}3 base class %4|"
- "%ordinal2 base class %3 with "
- "%select{public|protected|private|no}4 access specifier}1">;
-
-def err_module_odr_violation_template_parameter : Error <
- "%q0 has different definitions in different modules; first difference is "
- "%select{definition in module '%2'|defined here}1 found "
- "%select{"
- "unnamed template parameter|"
- "template parameter %4|"
- "template parameter with %select{no |}4default argument|"
- "template parameter with default argument}3">;
-
-
-def note_module_odr_violation_template_parameter : Note <
- "but in '%0' found "
- "%select{"
- "unnamed template parameter %2|"
- "template parameter %2|"
- "template parameter with %select{no |}2default argument|"
- "template parameter with different default argument}1">;
-
-def err_module_odr_violation_mismatch_decl : Error<
- "%q0 has different definitions in different modules; first difference is "
- "%select{definition in module '%2'|defined here}1 found "
- "%select{end of class|public access specifier|private access specifier|"
- "protected access specifier|static assert|field|method|type alias|typedef|"
- "data member|friend declaration|function template}3">;
-def note_module_odr_violation_mismatch_decl : Note<"but in '%0' found "
- "%select{end of class|public access specifier|private access specifier|"
- "protected access specifier|static assert|field|method|type alias|typedef|"
- "data member|friend declaration|function template}1">;
-
-def err_module_odr_violation_mismatch_decl_diff : Error<
- "%q0 has different definitions in different modules; first difference is "
- "%select{definition in module '%2'|defined here}1 found "
- "%select{"
- "static assert with condition|"
- "static assert with message|"
- "static assert with %select{|no }4message|"
- "field %4|"
- "field %4 with type %5|"
- "%select{non-|}5bitfield %4|"
- "bitfield %4 with one width expression|"
- "%select{non-|}5mutable field %4|"
- "field %4 with %select{no|an}5 initalizer|"
- "field %4 with an initializer|"
- "%select{method %5|constructor|destructor}4|"
- "%select{method %5|constructor|destructor}4 "
- "is %select{not deleted|deleted}6|"
- "%select{method %5|constructor|destructor}4 "
- "is %select{not defaulted|defaulted}6|"
- "%select{method %5|constructor|destructor}4 "
- "is %select{|pure }6%select{not virtual|virtual}7|"
- "%select{method %5|constructor|destructor}4 "
- "is %select{not static|static}6|"
- "%select{method %5|constructor|destructor}4 "
- "is %select{not volatile|volatile}6|"
- "%select{method %5|constructor|destructor}4 "
- "is %select{not const|const}6|"
- "%select{method %5|constructor|destructor}4 "
- "is %select{not inline|inline}6|"
- "%select{method %5|constructor|destructor}4 "
- "that has %6 parameter%s6|"
- "%select{method %5|constructor|destructor}4 "
- "with %ordinal6 parameter of type %7%select{| decayed from %9}8|"
- "%select{method %5|constructor|destructor}4 "
- "with %ordinal6 parameter named %7|"
- "%select{method %5|constructor|destructor}4 "
- "with %ordinal6 parameter with%select{out|}7 a default argument|"
- "%select{method %5|constructor|destructor}4 "
- "with %ordinal6 parameter with a default argument|"
- "%select{method %5|constructor|destructor}4 "
- "with %select{no |}6template arguments|"
- "%select{method %5|constructor|destructor}4 "
- "with %6 template argument%s6|"
- "%select{method %5|constructor|destructor}4 "
- "with %6 for %ordinal7 template argument|"
- "%select{method %5|constructor|destructor}4 "
- "with %select{no body|body}6|"
- "%select{method %5|constructor|destructor}4 "
- "with body|"
- "%select{typedef|type alias}4 name %5|"
- "%select{typedef|type alias}4 %5 with underlying type %6|"
- "data member with name %4|"
- "data member %4 with type %5|"
- "data member %4 with%select{out|}5 an initializer|"
- "data member %4 with an initializer|"
- "data member %4 %select{is constexpr|is not constexpr}5|"
- "friend %select{class|function}4|"
- "friend %4|"
- "friend function %4|"
- "function template %4 with %5 template parameter%s5|"
- "function template %4 with %ordinal5 template parameter being a "
- "%select{type|non-type|template}6 template parameter|"
- "function template %4 with %ordinal5 template parameter "
- "%select{with no name|named %7}6|"
- "function template %4 with %ordinal5 template parameter with "
- "%select{no |}6default argument|"
- "function template %4 with %ordinal5 template parameter with "
- "default argument %6|"
- "function template %4 with %ordinal5 template parameter with one type|"
- "function template %4 with %ordinal5 template parameter %select{not |}6"
- "being a template parameter pack|"
- "}3">;
-
-def note_module_odr_violation_mismatch_decl_diff : Note<"but in '%0' found "
- "%select{"
- "static assert with different condition|"
- "static assert with different message|"
- "static assert with %select{|no }2message|"
- "field %2|"
- "field %2 with type %3|"
- "%select{non-|}3bitfield %2|"
- "bitfield %2 with different width expression|"
- "%select{non-|}3mutable field %2|"
- "field %2 with %select{no|an}3 initializer|"
- "field %2 with a different initializer|"
- "%select{method %3|constructor|destructor}2|"
- "%select{method %3|constructor|destructor}2 "
- "is %select{not deleted|deleted}4|"
- "%select{method %3|constructor|destructor}2 "
- "is %select{not defaulted|defaulted}4|"
- "%select{method %3|constructor|destructor}2 "
- "is %select{|pure }4%select{not virtual|virtual}5|"
- "%select{method %3|constructor|destructor}2 "
- "is %select{not static|static}4|"
- "%select{method %3|constructor|destructor}2 "
- "is %select{not volatile|volatile}4|"
- "%select{method %3|constructor|destructor}2 "
- "is %select{not const|const}4|"
- "%select{method %3|constructor|destructor}2 "
- "is %select{not inline|inline}4|"
- "%select{method %3|constructor|destructor}2 "
- "that has %4 parameter%s4|"
- "%select{method %3|constructor|destructor}2 "
- "with %ordinal4 parameter of type %5%select{| decayed from %7}6|"
- "%select{method %3|constructor|destructor}2 "
- "with %ordinal4 parameter named %5|"
- "%select{method %3|constructor|destructor}2 "
- "with %ordinal4 parameter with%select{out|}5 a default argument|"
- "%select{method %3|constructor|destructor}2 "
- "with %ordinal4 parameter with a different default argument|"
- "%select{method %3|constructor|destructor}2 "
- "with %select{no |}4template arguments|"
- "%select{method %3|constructor|destructor}2 "
- "with %4 template argument%s4|"
- "%select{method %3|constructor|destructor}2 "
- "with %4 for %ordinal5 template argument|"
- "%select{method %3|constructor|destructor}2 "
- "with %select{no body|body}4|"
- "%select{method %3|constructor|destructor}2 "
- "with different body|"
- "%select{typedef|type alias}2 name %3|"
- "%select{typedef|type alias}2 %3 with different underlying type %4|"
- "data member with name %2|"
- "data member %2 with different type %3|"
- "data member %2 with%select{out|}3 an initializer|"
- "data member %2 with a different initializer|"
- "data member %2 %select{is constexpr|is not constexpr}3|"
- "friend %select{class|function}2|"
- "friend %2|"
- "friend function %2|"
- "function template %2 with %3 template parameter%s3|"
- "function template %2 with %ordinal3 template paramter being a "
- "%select{type|non-type|template}4 template parameter|"
- "function template %2 with %ordinal3 template parameter "
- "%select{with no name|named %5}4|"
- "function template %2 with %ordinal3 template parameter with "
- "%select{no |}4default argument|"
- "function template %2 with %ordinal3 template parameter with "
- "default argument %4|"
- "function template %2 with %ordinal3 template parameter with different type|"
- "function template %2 with %ordinal3 template parameter %select{not |}4"
- "being a template parameter pack|"
- "}1">;
-
-def err_module_odr_violation_function : Error<
- "%q0 has different definitions in different modules; "
- "%select{definition in module '%2'|defined here}1 "
- "first difference is "
- "%select{"
- "return type is %4|"
- "%ordinal4 parameter with name %5|"
- "%ordinal4 parameter with type %5%select{| decayed from %7}6|"
- "%ordinal4 parameter with%select{out|}5 a default argument|"
- "%ordinal4 parameter with a default argument|"
- "function body"
- "}3">;
-
-def note_module_odr_violation_function : Note<"but in '%0' found "
- "%select{"
- "different return type %2|"
- "%ordinal2 parameter with name %3|"
- "%ordinal2 parameter with type %3%select{| decayed from %5}4|"
- "%ordinal2 parameter with%select{out|}3 a default argument|"
- "%ordinal2 parameter with a different default argument|"
- "a different body"
- "}1">;
-
-def err_module_odr_violation_enum : Error<
- "%q0 has different definitions in different modules; "
- "%select{definition in module '%2'|defined here}1 "
- "first difference is "
- "%select{"
- "enum that is %select{not scoped|scoped}4|"
- "enum scoped with keyword %select{struct|class}4|"
- "enum %select{without|with}4 specified type|"
- "enum with specified type %4|"
- "enum with %4 element%s4|"
- "%ordinal4 element has name %5|"
- "%ordinal4 element %5 %select{has|does not have}6 an initilizer|"
- "%ordinal4 element %5 has an initializer|"
- "}3">;
-
-def note_module_odr_violation_enum : Note<"but in '%0' found "
- "%select{"
- "enum that is %select{not scoped|scoped}2|"
- "enum scoped with keyword %select{struct|class}2|"
- "enum %select{without|with}2 specified type|"
- "enum with specified type %2|"
- "enum with %2 element%s2|"
- "%ordinal2 element has name %3|"
- "%ordinal2 element %3 %select{has|does not have}4 an initializer|"
- "%ordinal2 element %3 has different initializer|"
- "}1">;
-
-def err_module_odr_violation_mismatch_decl_unknown : Error<
- "%q0 %select{with definition in module '%2'|defined here}1 has different "
- "definitions in different modules; first difference is this "
- "%select{||||static assert|field|method|type alias|typedef|data member|"
- "friend declaration|unexpected decl}3">;
-def note_module_odr_violation_mismatch_decl_unknown : Note<
- "but in '%0' found "
- "%select{||||different static assert|different field|different method|"
- "different type alias|different typedef|different data member|"
- "different friend declaration|another unexpected decl}1">;
-
def warn_duplicate_module_file_extension : Warning<
"duplicate module file extension block name '%0'">,
InGroup<ModuleFileExtension>;
@@ -391,6 +128,9 @@ def warn_module_system_bit_conflict : Warning<
"module file '%0' was validated as a system module and is now being imported "
"as a non-system module; any difference in diagnostic options will be ignored">,
InGroup<ModuleConflict>;
+
+def err_failed_to_find_module_file : Error<
+ "failed to find module file for module '%0'">;
} // let CategoryName
let CategoryName = "AST Serialization Issue" in {
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DirectoryEntry.h b/contrib/llvm-project/clang/include/clang/Basic/DirectoryEntry.h
index edb8031a20b8..906c2e9af23b 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DirectoryEntry.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/DirectoryEntry.h
@@ -14,13 +14,18 @@
#ifndef LLVM_CLANG_BASIC_DIRECTORYENTRY_H
#define LLVM_CLANG_BASIC_DIRECTORYENTRY_H
+#include "clang/Basic/CustomizableOptional.h"
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/Hashing.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/ErrorOr.h"
+#include <optional>
+#include <utility>
+
namespace clang {
namespace FileMgr {
@@ -31,12 +36,17 @@ template <class RefTy> class MapEntryOptionalStorage;
/// Cached information about one directory (either on disk or in
/// the virtual file system).
class DirectoryEntry {
+ DirectoryEntry() = default;
+ DirectoryEntry(const DirectoryEntry &) = delete;
+ DirectoryEntry &operator=(const DirectoryEntry &) = delete;
friend class FileManager;
+ friend class FileEntryTestHelper;
// FIXME: We should not be storing a directory entry name here.
StringRef Name; // Name of the directory.
public:
+ LLVM_DEPRECATED("Use DirectoryEntryRef::getName() instead.", "")
StringRef getName() const { return Name; }
};
@@ -62,7 +72,7 @@ public:
bool isSameRef(DirectoryEntryRef RHS) const { return ME == RHS.ME; }
DirectoryEntryRef() = delete;
- DirectoryEntryRef(const MapEntry &ME) : ME(&ME) {}
+ explicit DirectoryEntryRef(const MapEntry &ME) : ME(&ME) {}
/// Allow DirectoryEntryRef to degrade into 'const DirectoryEntry*' to
/// facilitate incremental adoption.
@@ -108,6 +118,8 @@ private:
const MapEntry *ME;
};
+using OptionalDirectoryEntryRef = CustomizableOptional<DirectoryEntryRef>;
+
namespace FileMgr {
/// Customized storage for refs derived from map entires in FileManager, using
@@ -120,27 +132,25 @@ public:
MapEntryOptionalStorage() : MaybeRef(optional_none_tag()) {}
template <class... ArgTypes>
- explicit MapEntryOptionalStorage(llvm::in_place_t, ArgTypes &&...Args)
+ explicit MapEntryOptionalStorage(std::in_place_t, ArgTypes &&...Args)
: MaybeRef(std::forward<ArgTypes>(Args)...) {}
void reset() { MaybeRef = optional_none_tag(); }
- bool hasValue() const { return MaybeRef.hasOptionalValue(); }
+ bool has_value() const { return MaybeRef.hasOptionalValue(); }
- RefTy &getValue() LLVM_LVALUE_FUNCTION {
- assert(hasValue());
+ RefTy &value() & {
+ assert(has_value());
return MaybeRef;
}
- RefTy const &getValue() const LLVM_LVALUE_FUNCTION {
- assert(hasValue());
+ RefTy const &value() const & {
+ assert(has_value());
return MaybeRef;
}
-#if LLVM_HAS_RVALUE_REFERENCE_THIS
- RefTy &&getValue() && {
- assert(hasValue());
+ RefTy &&value() && {
+ assert(has_value());
return std::move(MaybeRef);
}
-#endif
template <class... Args> void emplace(Args &&...args) {
MaybeRef = RefTy(std::forward<Args>(args)...);
@@ -153,9 +163,7 @@ public:
};
} // end namespace FileMgr
-} // end namespace clang
-namespace llvm {
namespace optional_detail {
/// Customize OptionalStorage<DirectoryEntryRef> to use DirectoryEntryRef and
@@ -170,8 +178,8 @@ public:
OptionalStorage() = default;
template <class... ArgTypes>
- explicit OptionalStorage(in_place_t, ArgTypes &&...Args)
- : StorageImpl(in_place_t{}, std::forward<ArgTypes>(Args)...) {}
+ explicit OptionalStorage(std::in_place_t, ArgTypes &&...Args)
+ : StorageImpl(std::in_place_t{}, std::forward<ArgTypes>(Args)...) {}
OptionalStorage &operator=(clang::DirectoryEntryRef Ref) {
StorageImpl::operator=(Ref);
@@ -179,15 +187,30 @@ public:
}
};
-static_assert(sizeof(Optional<clang::DirectoryEntryRef>) ==
- sizeof(clang::DirectoryEntryRef),
- "Optional<DirectoryEntryRef> must avoid size overhead");
+static_assert(sizeof(OptionalDirectoryEntryRef) == sizeof(DirectoryEntryRef),
+ "OptionalDirectoryEntryRef must avoid size overhead");
-static_assert(
- std::is_trivially_copyable<Optional<clang::DirectoryEntryRef>>::value,
- "Optional<DirectoryEntryRef> should be trivially copyable");
+static_assert(std::is_trivially_copyable<OptionalDirectoryEntryRef>::value,
+ "OptionalDirectoryEntryRef should be trivially copyable");
} // end namespace optional_detail
+} // namespace clang
+
+namespace llvm {
+
+template <> struct PointerLikeTypeTraits<clang::DirectoryEntryRef> {
+ static inline void *getAsVoidPointer(clang::DirectoryEntryRef Dir) {
+ return const_cast<clang::DirectoryEntryRef::MapEntry *>(&Dir.getMapEntry());
+ }
+
+ static inline clang::DirectoryEntryRef getFromVoidPointer(void *Ptr) {
+ return clang::DirectoryEntryRef(
+ *reinterpret_cast<const clang::DirectoryEntryRef::MapEntry *>(Ptr));
+ }
+
+ static constexpr int NumLowBitsAvailable = PointerLikeTypeTraits<
+ const clang::DirectoryEntryRef::MapEntry *>::NumLowBitsAvailable;
+};
/// Specialisation of DenseMapInfo for DirectoryEntryRef.
template <> struct DenseMapInfo<clang::DirectoryEntryRef> {
@@ -222,76 +245,4 @@ template <> struct DenseMapInfo<clang::DirectoryEntryRef> {
} // end namespace llvm
-namespace clang {
-
-/// Wrapper around Optional<DirectoryEntryRef> that degrades to 'const
-/// DirectoryEntry*', facilitating incremental patches to propagate
-/// DirectoryEntryRef.
-///
-/// This class can be used as return value or field where it's convenient for
-/// an Optional<DirectoryEntryRef> to degrade to a 'const DirectoryEntry*'. The
-/// purpose is to avoid code churn due to dances like the following:
-/// \code
-/// // Old code.
-/// lvalue = rvalue;
-///
-/// // Temporary code from an incremental patch.
-/// Optional<DirectoryEntryRef> MaybeF = rvalue;
-/// lvalue = MaybeF ? &MaybeF.getDirectoryEntry() : nullptr;
-///
-/// // Final code.
-/// lvalue = rvalue;
-/// \endcode
-///
-/// FIXME: Once DirectoryEntryRef is "everywhere" and DirectoryEntry::LastRef
-/// and DirectoryEntry::getName have been deleted, delete this class and
-/// replace instances with Optional<DirectoryEntryRef>.
-class OptionalDirectoryEntryRefDegradesToDirectoryEntryPtr
- : public Optional<DirectoryEntryRef> {
-public:
- OptionalDirectoryEntryRefDegradesToDirectoryEntryPtr() = default;
- OptionalDirectoryEntryRefDegradesToDirectoryEntryPtr(
- OptionalDirectoryEntryRefDegradesToDirectoryEntryPtr &&) = default;
- OptionalDirectoryEntryRefDegradesToDirectoryEntryPtr(
- const OptionalDirectoryEntryRefDegradesToDirectoryEntryPtr &) = default;
- OptionalDirectoryEntryRefDegradesToDirectoryEntryPtr &
- operator=(OptionalDirectoryEntryRefDegradesToDirectoryEntryPtr &&) = default;
- OptionalDirectoryEntryRefDegradesToDirectoryEntryPtr &
- operator=(const OptionalDirectoryEntryRefDegradesToDirectoryEntryPtr &) = default;
-
- OptionalDirectoryEntryRefDegradesToDirectoryEntryPtr(llvm::NoneType) {}
- OptionalDirectoryEntryRefDegradesToDirectoryEntryPtr(DirectoryEntryRef Ref)
- : Optional<DirectoryEntryRef>(Ref) {}
- OptionalDirectoryEntryRefDegradesToDirectoryEntryPtr(Optional<DirectoryEntryRef> MaybeRef)
- : Optional<DirectoryEntryRef>(MaybeRef) {}
-
- OptionalDirectoryEntryRefDegradesToDirectoryEntryPtr &operator=(llvm::NoneType) {
- Optional<DirectoryEntryRef>::operator=(None);
- return *this;
- }
- OptionalDirectoryEntryRefDegradesToDirectoryEntryPtr &operator=(DirectoryEntryRef Ref) {
- Optional<DirectoryEntryRef>::operator=(Ref);
- return *this;
- }
- OptionalDirectoryEntryRefDegradesToDirectoryEntryPtr &
- operator=(Optional<DirectoryEntryRef> MaybeRef) {
- Optional<DirectoryEntryRef>::operator=(MaybeRef);
- return *this;
- }
-
- /// Degrade to 'const DirectoryEntry *' to allow DirectoryEntry::LastRef and
- /// DirectoryEntry::getName have been deleted, delete this class and replace
- /// instances with Optional<DirectoryEntryRef>
- operator const DirectoryEntry *() const {
- return hasValue() ? &getValue().getDirEntry() : nullptr;
- }
-};
-
-static_assert(std::is_trivially_copyable<
- OptionalDirectoryEntryRefDegradesToDirectoryEntryPtr>::value,
- "OptionalDirectoryEntryRefDegradesToDirectoryEntryPtr should be "
- "trivially copyable");
-
-} // end namespace clang
-
#endif // LLVM_CLANG_BASIC_DIRECTORYENTRY_H
diff --git a/contrib/llvm-project/clang/include/clang/Basic/ExceptionSpecificationType.h b/contrib/llvm-project/clang/include/clang/Basic/ExceptionSpecificationType.h
index 5616860555c8..d3c9e9cd063b 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/ExceptionSpecificationType.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/ExceptionSpecificationType.h
@@ -50,6 +50,11 @@ inline bool isUnresolvedExceptionSpec(ExceptionSpecificationType ESpecType) {
return ESpecType == EST_Unevaluated || ESpecType == EST_Uninstantiated;
}
+inline bool isExplicitThrowExceptionSpec(ExceptionSpecificationType ESpecType) {
+ return ESpecType == EST_Dynamic || ESpecType == EST_MSAny ||
+ ESpecType == EST_NoexceptFalse;
+}
+
/// Possible results from evaluation of a noexcept expression.
enum CanThrowResult {
CT_Cannot,
diff --git a/contrib/llvm-project/clang/include/clang/Basic/FPOptions.def b/contrib/llvm-project/clang/include/clang/Basic/FPOptions.def
index a93fa475cd5f..79f04c89c9fe 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/FPOptions.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/FPOptions.def
@@ -14,13 +14,19 @@
// OPTION(name, type, width, previousName)
OPTION(FPContractMode, LangOptions::FPModeKind, 2, First)
-OPTION(RoundingMode, LangOptions::RoundingMode, 3, FPContractMode)
-OPTION(FPExceptionMode, LangOptions::FPExceptionModeKind, 2, RoundingMode)
-OPTION(AllowFEnvAccess, bool, 1, FPExceptionMode)
+OPTION(RoundingMath, bool, 1, FPContractMode)
+OPTION(ConstRoundingMode, LangOptions::RoundingMode, 3, RoundingMath)
+OPTION(SpecifiedExceptionMode, LangOptions::FPExceptionModeKind, 2, ConstRoundingMode)
+OPTION(AllowFEnvAccess, bool, 1, SpecifiedExceptionMode)
OPTION(AllowFPReassociate, bool, 1, AllowFEnvAccess)
OPTION(NoHonorNaNs, bool, 1, AllowFPReassociate)
OPTION(NoHonorInfs, bool, 1, NoHonorNaNs)
OPTION(NoSignedZero, bool, 1, NoHonorInfs)
OPTION(AllowReciprocal, bool, 1, NoSignedZero)
OPTION(AllowApproxFunc, bool, 1, AllowReciprocal)
+OPTION(FPEvalMethod, LangOptions::FPEvalMethodKind, 2, AllowApproxFunc)
+OPTION(Float16ExcessPrecision, LangOptions::ExcessPrecisionKind, 2, FPEvalMethod)
+OPTION(BFloat16ExcessPrecision, LangOptions::ExcessPrecisionKind, 2, Float16ExcessPrecision)
+OPTION(MathErrno, bool, 1, BFloat16ExcessPrecision)
+OPTION(ComplexRange, LangOptions::ComplexRangeKind, 2, MathErrno)
#undef OPTION
diff --git a/contrib/llvm-project/clang/include/clang/Basic/Features.def b/contrib/llvm-project/clang/include/clang/Basic/Features.def
index 6ca0e646b865..5fad5fc3623c 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/Features.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/Features.def
@@ -45,7 +45,12 @@ FEATURE(leak_sanitizer,
FEATURE(hwaddress_sanitizer,
LangOpts.Sanitize.hasOneOf(SanitizerKind::HWAddress |
SanitizerKind::KernelHWAddress))
-FEATURE(memtag_sanitizer, LangOpts.Sanitize.has(SanitizerKind::MemTag))
+FEATURE(memtag_stack,
+ LangOpts.Sanitize.has(SanitizerKind::MemtagStack))
+FEATURE(memtag_heap,
+ LangOpts.Sanitize.has(SanitizerKind::MemtagHeap))
+FEATURE(memtag_globals,
+ LangOpts.Sanitize.has(SanitizerKind::MemtagGlobals))
FEATURE(xray_instrument, LangOpts.XRayInstrument)
FEATURE(undefined_behavior_sanitizer,
LangOpts.Sanitize.hasOneOf(SanitizerKind::Undefined))
@@ -58,6 +63,7 @@ FEATURE(attribute_availability_app_extension, true)
FEATURE(attribute_availability_with_version_underscores, true)
FEATURE(attribute_availability_tvos, true)
FEATURE(attribute_availability_watchos, true)
+FEATURE(attribute_availability_driverkit, true)
FEATURE(attribute_availability_with_strict, true)
FEATURE(attribute_availability_with_replacement, true)
FEATURE(attribute_availability_in_templates, true)
@@ -83,6 +89,8 @@ FEATURE(blocks, LangOpts.Blocks)
FEATURE(c_thread_safety_attributes, true)
FEATURE(cxx_exceptions, LangOpts.CXXExceptions)
FEATURE(cxx_rtti, LangOpts.RTTI &&LangOpts.RTTIData)
+EXTENSION(define_target_os_macros,
+ PP.getPreprocessorOpts().DefineTargetOSMacros)
FEATURE(enumerator_attributes, true)
FEATURE(nullability, true)
FEATURE(nullability_on_arrays, true)
@@ -96,6 +104,7 @@ FEATURE(scudo, LangOpts.Sanitize.hasOneOf(SanitizerKind::Scudo))
FEATURE(swiftasynccc,
PP.getTargetInfo().checkCallingConvention(CC_SwiftAsync) ==
clang::TargetInfo::CCCR_OK)
+FEATURE(pragma_stdc_cx_limited_range, true)
// Objective-C features
FEATURE(objc_arr, LangOpts.ObjCAutoRefCount) // FIXME: REMOVE?
FEATURE(objc_arc, LangOpts.ObjCAutoRefCount)
@@ -222,22 +231,29 @@ FEATURE(is_trivially_assignable, LangOpts.CPlusPlus)
FEATURE(is_trivially_constructible, LangOpts.CPlusPlus)
FEATURE(is_trivially_copyable, LangOpts.CPlusPlus)
FEATURE(is_union, LangOpts.CPlusPlus)
+FEATURE(kcfi, LangOpts.Sanitize.has(SanitizerKind::KCFI))
FEATURE(modules, LangOpts.Modules)
FEATURE(safe_stack, LangOpts.Sanitize.has(SanitizerKind::SafeStack))
FEATURE(shadow_call_stack,
LangOpts.Sanitize.has(SanitizerKind::ShadowCallStack))
FEATURE(tls, PP.getTargetInfo().isTLSSupported())
FEATURE(underlying_type, LangOpts.CPlusPlus)
+FEATURE(experimental_library, LangOpts.ExperimentalLibrary)
// C11 features supported by other languages as extensions.
EXTENSION(c_alignas, true)
EXTENSION(c_alignof, true)
EXTENSION(c_atomic, true)
EXTENSION(c_generic_selections, true)
+EXTENSION(c_generic_selection_with_controlling_type, true)
EXTENSION(c_static_assert, true)
EXTENSION(c_thread_local, PP.getTargetInfo().isTLSSupported())
+// C23 features supported by other languages as extensions
+EXTENSION(c_attributes, true)
// C++11 features supported by other languages as extensions.
EXTENSION(cxx_atomic, LangOpts.CPlusPlus)
+EXTENSION(cxx_default_function_template_args, LangOpts.CPlusPlus)
+EXTENSION(cxx_defaulted_functions, LangOpts.CPlusPlus)
EXTENSION(cxx_deleted_functions, LangOpts.CPlusPlus)
EXTENSION(cxx_explicit_conversions, LangOpts.CPlusPlus)
EXTENSION(cxx_inline_namespaces, LangOpts.CPlusPlus)
@@ -253,6 +269,10 @@ EXTENSION(cxx_fixed_enum, true)
EXTENSION(cxx_binary_literals, true)
EXTENSION(cxx_init_captures, LangOpts.CPlusPlus11)
EXTENSION(cxx_variable_templates, LangOpts.CPlusPlus)
+//C++20
+EXTENSION(cxx_generalized_nttp, LangOpts.CPlusPlus20)
+//C++23
+EXTENSION(cxx_explicit_this_parameter, LangOpts.CPlusPlus23)
// Miscellaneous language extensions
EXTENSION(overloadable_unmarked, true)
EXTENSION(pragma_clang_attribute_namespaces, true)
@@ -260,11 +280,17 @@ EXTENSION(pragma_clang_attribute_external_declaration, true)
EXTENSION(statement_attributes_with_gnu_syntax, true)
EXTENSION(gnu_asm, LangOpts.GNUAsm)
EXTENSION(gnu_asm_goto_with_outputs, LangOpts.GNUAsm)
+EXTENSION(gnu_asm_goto_with_outputs_full, LangOpts.GNUAsm)
EXTENSION(matrix_types, LangOpts.MatrixTypes)
EXTENSION(matrix_types_scalar_division, true)
EXTENSION(cxx_attributes_on_using_declarations, LangOpts.CPlusPlus11)
+EXTENSION(datasizeof, LangOpts.CPlusPlus)
FEATURE(cxx_abi_relative_vtable, LangOpts.CPlusPlus && LangOpts.RelativeCXXABIVTables)
+// CUDA/HIP Features
+FEATURE(cuda_noinline_keyword, LangOpts.CUDA)
+EXTENSION(cuda_implicit_host_device_templates, LangOpts.CUDA && LangOpts.OffloadImplicitHostDeviceTemplates)
+
#undef EXTENSION
#undef FEATURE
diff --git a/contrib/llvm-project/clang/include/clang/Basic/FileEntry.h b/contrib/llvm-project/clang/include/clang/Basic/FileEntry.h
index 6e91b42e18b7..35efa147950f 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/FileEntry.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/FileEntry.h
@@ -14,6 +14,7 @@
#ifndef LLVM_CLANG_BASIC_FILEENTRY_H
#define LLVM_CLANG_BASIC_FILEENTRY_H
+#include "clang/Basic/CustomizableOptional.h"
#include "clang/Basic/DirectoryEntry.h"
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/DenseMapInfo.h"
@@ -24,6 +25,9 @@
#include "llvm/Support/ErrorOr.h"
#include "llvm/Support/FileSystem/UniqueID.h"
+#include <optional>
+#include <utility>
+
namespace llvm {
class MemoryBuffer;
@@ -39,19 +43,12 @@ namespace clang {
class FileEntryRef;
-} // namespace clang
-
-namespace llvm {
namespace optional_detail {
/// Forward declare a template specialization for OptionalStorage.
-template <>
-class OptionalStorage<clang::FileEntryRef, /*is_trivially_copyable*/ true>;
+template <> class OptionalStorage<clang::FileEntryRef>;
} // namespace optional_detail
-} // namespace llvm
-
-namespace clang {
class FileEntry;
@@ -59,13 +56,22 @@ class FileEntry;
/// accessed by the FileManager's client.
class FileEntryRef {
public:
- StringRef getName() const { return ME->first(); }
+ /// The name of this FileEntry. If a VFS uses 'use-external-name', this is
+ /// the redirected name. See getRequestedName().
+ StringRef getName() const { return getBaseMapEntry().first(); }
+
+ /// The name of this FileEntry, as originally requested without applying any
+ /// remappings for VFS 'use-external-name'.
+ ///
+ /// FIXME: this should be the semantics of getName(). See comment in
+ /// FileManager::getFileRef().
+ StringRef getNameAsRequested() const { return ME->first(); }
+
const FileEntry &getFileEntry() const {
- return *ME->second->V.get<FileEntry *>();
+ return *getBaseMapEntry().second->V.get<FileEntry *>();
}
- DirectoryEntryRef getDir() const { return *ME->second->Dir; }
+ DirectoryEntryRef getDir() const { return ME->second->Dir; }
- inline bool isValid() const;
inline off_t getSize() const;
inline unsigned getUID() const;
inline const llvm::sys::fs::UniqueID &getUniqueID() const;
@@ -112,17 +118,14 @@ public:
/// VFSs that use external names. In that case, the \c FileEntryRef
/// returned by the \c FileManager will have the external name, and not the
/// name that was used to lookup the file.
- ///
- /// The second type is really a `const MapEntry *`, but that confuses
- /// gcc5.3. Once that's no longer supported, change this back.
- llvm::PointerUnion<FileEntry *, const void *> V;
+ llvm::PointerUnion<FileEntry *, const MapEntry *> V;
- /// Directory the file was found in. Set if and only if V is a FileEntry.
- Optional<DirectoryEntryRef> Dir;
+ /// Directory the file was found in.
+ DirectoryEntryRef Dir;
MapValue() = delete;
MapValue(FileEntry &FE, DirectoryEntryRef Dir) : V(&FE), Dir(Dir) {}
- MapValue(MapEntry &ME) : V(&ME) {}
+ MapValue(MapEntry &ME, DirectoryEntryRef Dir) : V(&ME), Dir(Dir) {}
};
/// Check if RHS referenced the file in exactly the same way.
@@ -151,13 +154,20 @@ public:
explicit FileEntryRef(const MapEntry &ME) : ME(&ME) {
assert(ME.second && "Expected payload");
assert(ME.second->V && "Expected non-null");
- assert(ME.second->V.is<FileEntry *>() && "Expected FileEntry");
}
/// Expose the underlying MapEntry to simplify packing in a PointerIntPair or
/// PointerUnion and allow construction in Optional.
const clang::FileEntryRef::MapEntry &getMapEntry() const { return *ME; }
+ /// Retrieve the base MapEntry after redirects.
+ const MapEntry &getBaseMapEntry() const {
+ const MapEntry *Base = ME;
+ while (const auto *Next = Base->second->V.dyn_cast<const MapEntry *>())
+ Base = Next;
+ return *Base;
+ }
+
private:
friend class FileMgr::MapEntryOptionalStorage<FileEntryRef>;
struct optional_none_tag {};
@@ -189,9 +199,8 @@ static_assert(sizeof(FileEntryRef) == sizeof(const FileEntry *),
static_assert(std::is_trivially_copyable<FileEntryRef>::value,
"FileEntryRef must be trivially copyable");
-} // end namespace clang
+using OptionalFileEntryRef = CustomizableOptional<FileEntryRef>;
-namespace llvm {
namespace optional_detail {
/// Customize OptionalStorage<FileEntryRef> to use FileEntryRef and its
@@ -206,8 +215,8 @@ public:
OptionalStorage() = default;
template <class... ArgTypes>
- explicit OptionalStorage(in_place_t, ArgTypes &&...Args)
- : StorageImpl(in_place_t{}, std::forward<ArgTypes>(Args)...) {}
+ explicit OptionalStorage(std::in_place_t, ArgTypes &&...Args)
+ : StorageImpl(std::in_place_t{}, std::forward<ArgTypes>(Args)...) {}
OptionalStorage &operator=(clang::FileEntryRef Ref) {
StorageImpl::operator=(Ref);
@@ -215,14 +224,16 @@ public:
}
};
-static_assert(sizeof(Optional<clang::FileEntryRef>) ==
- sizeof(clang::FileEntryRef),
- "Optional<FileEntryRef> must avoid size overhead");
+static_assert(sizeof(OptionalFileEntryRef) == sizeof(FileEntryRef),
+ "OptionalFileEntryRef must avoid size overhead");
-static_assert(std::is_trivially_copyable<Optional<clang::FileEntryRef>>::value,
- "Optional<FileEntryRef> should be trivially copyable");
+static_assert(std::is_trivially_copyable<OptionalFileEntryRef>::value,
+ "OptionalFileEntryRef should be trivially copyable");
} // end namespace optional_detail
+} // namespace clang
+
+namespace llvm {
/// Specialisation of DenseMapInfo for FileEntryRef.
template <> struct DenseMapInfo<clang::FileEntryRef> {
@@ -250,78 +261,36 @@ template <> struct DenseMapInfo<clang::FileEntryRef> {
// It's safe to use operator==.
return LHS == RHS;
}
+
+ /// Support for finding `const FileEntry *` in a `DenseMap<FileEntryRef, T>`.
+ /// @{
+ static unsigned getHashValue(const clang::FileEntry *Val) {
+ return llvm::hash_value(Val);
+ }
+ static bool isEqual(const clang::FileEntry *LHS, clang::FileEntryRef RHS) {
+ if (RHS.isSpecialDenseMapKey())
+ return false;
+ return LHS == RHS;
+ }
+ /// @}
};
} // end namespace llvm
namespace clang {
-/// Wrapper around Optional<FileEntryRef> that degrades to 'const FileEntry*',
-/// facilitating incremental patches to propagate FileEntryRef.
-///
-/// This class can be used as return value or field where it's convenient for
-/// an Optional<FileEntryRef> to degrade to a 'const FileEntry*'. The purpose
-/// is to avoid code churn due to dances like the following:
-/// \code
-/// // Old code.
-/// lvalue = rvalue;
-///
-/// // Temporary code from an incremental patch.
-/// Optional<FileEntryRef> MaybeF = rvalue;
-/// lvalue = MaybeF ? &MaybeF.getFileEntry() : nullptr;
-///
-/// // Final code.
-/// lvalue = rvalue;
-/// \endcode
-///
-/// FIXME: Once FileEntryRef is "everywhere" and FileEntry::LastRef and
-/// FileEntry::getName have been deleted, delete this class and replace
-/// instances with Optional<FileEntryRef>.
-class OptionalFileEntryRefDegradesToFileEntryPtr
- : public Optional<FileEntryRef> {
-public:
- OptionalFileEntryRefDegradesToFileEntryPtr() = default;
- OptionalFileEntryRefDegradesToFileEntryPtr(
- OptionalFileEntryRefDegradesToFileEntryPtr &&) = default;
- OptionalFileEntryRefDegradesToFileEntryPtr(
- const OptionalFileEntryRefDegradesToFileEntryPtr &) = default;
- OptionalFileEntryRefDegradesToFileEntryPtr &
- operator=(OptionalFileEntryRefDegradesToFileEntryPtr &&) = default;
- OptionalFileEntryRefDegradesToFileEntryPtr &
- operator=(const OptionalFileEntryRefDegradesToFileEntryPtr &) = default;
-
- OptionalFileEntryRefDegradesToFileEntryPtr(llvm::NoneType) {}
- OptionalFileEntryRefDegradesToFileEntryPtr(FileEntryRef Ref)
- : Optional<FileEntryRef>(Ref) {}
- OptionalFileEntryRefDegradesToFileEntryPtr(Optional<FileEntryRef> MaybeRef)
- : Optional<FileEntryRef>(MaybeRef) {}
-
- OptionalFileEntryRefDegradesToFileEntryPtr &operator=(llvm::NoneType) {
- Optional<FileEntryRef>::operator=(None);
- return *this;
- }
- OptionalFileEntryRefDegradesToFileEntryPtr &operator=(FileEntryRef Ref) {
- Optional<FileEntryRef>::operator=(Ref);
- return *this;
- }
- OptionalFileEntryRefDegradesToFileEntryPtr &
- operator=(Optional<FileEntryRef> MaybeRef) {
- Optional<FileEntryRef>::operator=(MaybeRef);
- return *this;
- }
-
- /// Degrade to 'const FileEntry *' to allow FileEntry::LastRef and
- /// FileEntry::getName have been deleted, delete this class and replace
- /// instances with Optional<FileEntryRef>
- operator const FileEntry *() const {
- return hasValue() ? &getValue().getFileEntry() : nullptr;
- }
-};
-
-static_assert(
- std::is_trivially_copyable<
- OptionalFileEntryRefDegradesToFileEntryPtr>::value,
- "OptionalFileEntryRefDegradesToFileEntryPtr should be trivially copyable");
+inline bool operator==(const FileEntry *LHS, const OptionalFileEntryRef &RHS) {
+ return LHS == (RHS ? &RHS->getFileEntry() : nullptr);
+}
+inline bool operator==(const OptionalFileEntryRef &LHS, const FileEntry *RHS) {
+ return (LHS ? &LHS->getFileEntry() : nullptr) == RHS;
+}
+inline bool operator!=(const FileEntry *LHS, const OptionalFileEntryRef &RHS) {
+ return !(LHS == RHS);
+}
+inline bool operator!=(const OptionalFileEntryRef &LHS, const FileEntry *RHS) {
+ return !(LHS == RHS);
+}
/// Cached information about one file (either on disk
/// or in the virtual file system).
@@ -330,6 +299,10 @@ static_assert(
/// descriptor for the file.
class FileEntry {
friend class FileManager;
+ friend class FileEntryTestHelper;
+ FileEntry();
+ FileEntry(const FileEntry &) = delete;
+ FileEntry &operator=(const FileEntry &) = delete;
std::string RealPathName; // Real path to the file; could be empty.
off_t Size = 0; // File size in bytes.
@@ -338,7 +311,6 @@ class FileEntry {
llvm::sys::fs::UniqueID UniqueID;
unsigned UID = 0; // A unique (small) ID for the file.
bool IsNamedPipe = false;
- bool IsValid = false; // Is this \c FileEntry initialized and valid?
/// The open file, if it is owned by the \p FileEntry.
mutable std::unique_ptr<llvm::vfs::File> File;
@@ -352,20 +324,14 @@ class FileEntry {
// default constructor). It should always have a value in practice.
//
// TODO: remove this once everyone that needs a name uses FileEntryRef.
- Optional<FileEntryRef> LastRef;
+ OptionalFileEntryRef LastRef;
public:
- FileEntry();
~FileEntry();
-
- FileEntry(const FileEntry &) = delete;
- FileEntry &operator=(const FileEntry &) = delete;
-
+ LLVM_DEPRECATED("Use FileEntryRef::getName() instead.", "")
StringRef getName() const { return LastRef->getName(); }
- FileEntryRef getLastRef() const { return *LastRef; }
StringRef tryGetRealPathName() const { return RealPathName; }
- bool isValid() const { return IsValid; }
off_t getSize() const { return Size; }
unsigned getUID() const { return UID; }
const llvm::sys::fs::UniqueID &getUniqueID() const { return UniqueID; }
@@ -374,8 +340,6 @@ public:
/// Return the directory the file lives in.
const DirectoryEntry *getDir() const { return Dir; }
- bool operator<(const FileEntry &RHS) const { return UniqueID < RHS.UniqueID; }
-
/// Check whether the file is a named pipe (and thus can't be opened by
/// the native FileManager methods).
bool isNamedPipe() const { return IsNamedPipe; }
@@ -383,8 +347,6 @@ public:
void closeFile() const;
};
-bool FileEntryRef::isValid() const { return getFileEntry().isValid(); }
-
off_t FileEntryRef::getSize() const { return getFileEntry().getSize(); }
unsigned FileEntryRef::getUID() const { return getFileEntry().getUID(); }
diff --git a/contrib/llvm-project/clang/include/clang/Basic/FileManager.h b/contrib/llvm-project/clang/include/clang/Basic/FileManager.h
index 974771a8f8f3..56cb093dd8c3 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/FileManager.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/FileManager.h
@@ -53,24 +53,26 @@ class FileSystemStatCache;
class FileManager : public RefCountedBase<FileManager> {
IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS;
FileSystemOptions FileSystemOpts;
+ llvm::SpecificBumpPtrAllocator<FileEntry> FilesAlloc;
+ llvm::SpecificBumpPtrAllocator<DirectoryEntry> DirsAlloc;
/// Cache for existing real directories.
- std::map<llvm::sys::fs::UniqueID, DirectoryEntry> UniqueRealDirs;
+ llvm::DenseMap<llvm::sys::fs::UniqueID, DirectoryEntry *> UniqueRealDirs;
/// Cache for existing real files.
- std::map<llvm::sys::fs::UniqueID, FileEntry> UniqueRealFiles;
+ llvm::DenseMap<llvm::sys::fs::UniqueID, FileEntry *> UniqueRealFiles;
/// The virtual directories that we have allocated.
///
/// For each virtual file (e.g. foo/bar/baz.cpp), we add all of its parent
/// directories (foo/ and foo/bar/) here.
- SmallVector<std::unique_ptr<DirectoryEntry>, 4> VirtualDirectoryEntries;
+ SmallVector<DirectoryEntry *, 4> VirtualDirectoryEntries;
/// The virtual files that we have allocated.
- SmallVector<std::unique_ptr<FileEntry>, 4> VirtualFileEntries;
+ SmallVector<FileEntry *, 4> VirtualFileEntries;
/// A set of files that bypass the maps and uniquing. They can have
/// conflicting filenames.
- SmallVector<std::unique_ptr<FileEntry>, 0> BypassFileEntries;
+ SmallVector<FileEntry *, 0> BypassFileEntries;
/// A cache that maps paths to directory entries (either real or
/// virtual) we have looked up, or an error that occurred when we looked up
@@ -100,7 +102,7 @@ class FileManager : public RefCountedBase<FileManager> {
SeenBypassFileEntries;
/// The file entry for stdin, if it has been accessed through the FileManager.
- Optional<FileEntryRef> STDIN;
+ OptionalFileEntryRef STDIN;
/// The canonical names of files and directories .
llvm::DenseMap<const void *, llvm::StringRef> CanonicalNames;
@@ -164,8 +166,8 @@ public:
bool CacheFailure = true);
/// Get a \c DirectoryEntryRef if it exists, without doing anything on error.
- llvm::Optional<DirectoryEntryRef>
- getOptionalDirectoryRef(StringRef DirName, bool CacheFailure = true) {
+ OptionalDirectoryEntryRef getOptionalDirectoryRef(StringRef DirName,
+ bool CacheFailure = true) {
return llvm::expectedToOptional(getDirectoryRef(DirName, CacheFailure));
}
@@ -229,9 +231,9 @@ public:
llvm::Expected<FileEntryRef> getSTDIN();
/// Get a FileEntryRef if it exists, without doing anything on error.
- llvm::Optional<FileEntryRef> getOptionalFileRef(StringRef Filename,
- bool OpenFile = false,
- bool CacheFailure = true) {
+ OptionalFileEntryRef getOptionalFileRef(StringRef Filename,
+ bool OpenFile = false,
+ bool CacheFailure = true) {
return llvm::expectedToOptional(
getFileRef(Filename, OpenFile, CacheFailure));
}
@@ -241,6 +243,10 @@ public:
const FileSystemOptions &getFileSystemOpts() const { return FileSystemOpts; }
llvm::vfs::FileSystem &getVirtualFileSystem() const { return *FS; }
+ llvm::IntrusiveRefCntPtr<llvm::vfs::FileSystem>
+ getVirtualFileSystemPtr() const {
+ return FS;
+ }
void setVirtualFileSystem(IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS) {
this->FS = std::move(FS);
@@ -264,12 +270,12 @@ public:
/// bypasses all mapping and uniquing, blindly creating a new FileEntry.
/// There is no attempt to deduplicate these; if you bypass the same file
/// twice, you get two new file entries.
- llvm::Optional<FileEntryRef> getBypassFile(FileEntryRef VFE);
+ OptionalFileEntryRef getBypassFile(FileEntryRef VFE);
/// Open the specified file as a MemoryBuffer, returning a new
/// MemoryBuffer if successful, otherwise returning null.
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>>
- getBufferForFile(const FileEntry *Entry, bool isVolatile = false,
+ getBufferForFile(FileEntryRef Entry, bool isVolatile = false,
bool RequiresNullTerminator = true);
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>>
getBufferForFile(StringRef Filename, bool isVolatile = false,
@@ -305,24 +311,31 @@ public:
bool makeAbsolutePath(SmallVectorImpl<char> &Path) const;
/// Produce an array mapping from the unique IDs assigned to each
- /// file to the corresponding FileEntry pointer.
- void GetUniqueIDMapping(
- SmallVectorImpl<const FileEntry *> &UIDToFiles) const;
+ /// file to the corresponding FileEntryRef.
+ void
+ GetUniqueIDMapping(SmallVectorImpl<OptionalFileEntryRef> &UIDToFiles) const;
/// Retrieve the canonical name for a given directory.
///
/// This is a very expensive operation, despite its results being cached,
/// and should only be used when the physical layout of the file system is
/// required, which is (almost) never.
- StringRef getCanonicalName(const DirectoryEntry *Dir);
+ StringRef getCanonicalName(DirectoryEntryRef Dir);
/// Retrieve the canonical name for a given file.
///
/// This is a very expensive operation, despite its results being cached,
/// and should only be used when the physical layout of the file system is
/// required, which is (almost) never.
- StringRef getCanonicalName(const FileEntry *File);
+ StringRef getCanonicalName(FileEntryRef File);
+
+private:
+ /// Retrieve the canonical name for a given file or directory.
+ ///
+ /// The first param is a key in the CanonicalNames array.
+ StringRef getCanonicalName(const void *Entry, StringRef Name);
+public:
void PrintStats() const;
};
diff --git a/contrib/llvm-project/clang/include/clang/Basic/FileSystemStatCache.h b/contrib/llvm-project/clang/include/clang/Basic/FileSystemStatCache.h
index d37f2d507f83..5a003a748178 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/FileSystemStatCache.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/FileSystemStatCache.h
@@ -23,6 +23,7 @@
#include <cstdint>
#include <ctime>
#include <memory>
+#include <optional>
#include <string>
#include <utility>
@@ -54,7 +55,7 @@ public:
protected:
// FIXME: The pointer here is a non-owning/optional reference to the
- // unique_ptr. Optional<unique_ptr<vfs::File>&> might be nicer, but
+ // unique_ptr. std::optional<unique_ptr<vfs::File>&> might be nicer, but
// Optional needs some work to support references so this isn't possible yet.
virtual std::error_code getStat(StringRef Path, llvm::vfs::Status &Status,
bool isFile,
diff --git a/contrib/llvm-project/clang/include/clang/Basic/HLSLRuntime.h b/contrib/llvm-project/clang/include/clang/Basic/HLSLRuntime.h
new file mode 100644
index 000000000000..03166805daa6
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Basic/HLSLRuntime.h
@@ -0,0 +1,66 @@
+//===- HLSLRuntime.h - HLSL Runtime -----------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// Defines helper utilities for supporting the HLSL runtime environment.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_BASIC_HLSLRUNTIME_H
+#define CLANG_BASIC_HLSLRUNTIME_H
+
+#include "clang/Basic/LangOptions.h"
+#include <cstdint>
+
+namespace clang {
+namespace hlsl {
+
+constexpr ShaderStage
+getStageFromEnvironment(const llvm::Triple::EnvironmentType &E) {
+ uint32_t Pipeline =
+ static_cast<uint32_t>(E) - static_cast<uint32_t>(llvm::Triple::Pixel);
+
+ if (Pipeline > (uint32_t)ShaderStage::Invalid)
+ return ShaderStage::Invalid;
+ return static_cast<ShaderStage>(Pipeline);
+}
+
+#define ENUM_COMPARE_ASSERT(Value) \
+ static_assert( \
+ getStageFromEnvironment(llvm::Triple::Value) == ShaderStage::Value, \
+ "Mismatch between llvm::Triple and clang::ShaderStage for " #Value);
+
+ENUM_COMPARE_ASSERT(Pixel)
+ENUM_COMPARE_ASSERT(Vertex)
+ENUM_COMPARE_ASSERT(Geometry)
+ENUM_COMPARE_ASSERT(Hull)
+ENUM_COMPARE_ASSERT(Domain)
+ENUM_COMPARE_ASSERT(Compute)
+ENUM_COMPARE_ASSERT(Library)
+ENUM_COMPARE_ASSERT(RayGeneration)
+ENUM_COMPARE_ASSERT(Intersection)
+ENUM_COMPARE_ASSERT(AnyHit)
+ENUM_COMPARE_ASSERT(ClosestHit)
+ENUM_COMPARE_ASSERT(Miss)
+ENUM_COMPARE_ASSERT(Callable)
+ENUM_COMPARE_ASSERT(Mesh)
+ENUM_COMPARE_ASSERT(Amplification)
+
+static_assert(getStageFromEnvironment(llvm::Triple::UnknownEnvironment) ==
+ ShaderStage::Invalid,
+ "Mismatch between llvm::Triple and "
+ "clang::ShaderStage for Invalid");
+static_assert(getStageFromEnvironment(llvm::Triple::MSVC) ==
+ ShaderStage::Invalid,
+ "Mismatch between llvm::Triple and "
+ "clang::ShaderStage for Invalid");
+
+} // namespace hlsl
+} // namespace clang
+
+#endif // CLANG_BASIC_HLSLRUNTIME_H
diff --git a/contrib/llvm-project/clang/include/clang/Basic/HeaderInclude.h b/contrib/llvm-project/clang/include/clang/Basic/HeaderInclude.h
new file mode 100644
index 000000000000..83c26543bbd3
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Basic/HeaderInclude.h
@@ -0,0 +1,73 @@
+//===--- HeaderInclude.h - Header Include -----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// Defines enums used when emitting included header information.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_BASIC_HEADERINCLUDEFORMATKIND_H
+#define LLVM_CLANG_BASIC_HEADERINCLUDEFORMATKIND_H
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <utility>
+
+namespace clang {
+/// The format in which header information is emitted.
+enum HeaderIncludeFormatKind { HIFMT_None, HIFMT_Textual, HIFMT_JSON };
+
+/// Whether header information is filtered or not. If HIFIL_Only_Direct_System
+/// is used, only information on system headers directly included from
+/// non-system headers is emitted.
+enum HeaderIncludeFilteringKind { HIFIL_None, HIFIL_Only_Direct_System };
+
+inline HeaderIncludeFormatKind
+stringToHeaderIncludeFormatKind(const char *Str) {
+ return llvm::StringSwitch<HeaderIncludeFormatKind>(Str)
+ .Case("textual", HIFMT_Textual)
+ .Case("json", HIFMT_JSON)
+ .Default(HIFMT_None);
+}
+
+inline bool stringToHeaderIncludeFiltering(const char *Str,
+ HeaderIncludeFilteringKind &Kind) {
+ std::pair<bool, HeaderIncludeFilteringKind> P =
+ llvm::StringSwitch<std::pair<bool, HeaderIncludeFilteringKind>>(Str)
+ .Case("none", {true, HIFIL_None})
+ .Case("only-direct-system", {true, HIFIL_Only_Direct_System})
+ .Default({false, HIFIL_None});
+ Kind = P.second;
+ return P.first;
+}
+
+inline const char *headerIncludeFormatKindToString(HeaderIncludeFormatKind K) {
+ switch (K) {
+ case HIFMT_None:
+ llvm_unreachable("unexpected format kind");
+ case HIFMT_Textual:
+ return "textual";
+ case HIFMT_JSON:
+ return "json";
+ }
+ llvm_unreachable("Unknown HeaderIncludeFormatKind enum");
+}
+
+inline const char *
+headerIncludeFilteringKindToString(HeaderIncludeFilteringKind K) {
+ switch (K) {
+ case HIFIL_None:
+ return "none";
+ case HIFIL_Only_Direct_System:
+ return "only-direct-system";
+ }
+ llvm_unreachable("Unknown HeaderIncludeFilteringKind enum");
+}
+
+} // end namespace clang
+
+#endif // LLVM_CLANG_BASIC_HEADERINCLUDEFORMATKIND_H
diff --git a/contrib/llvm-project/clang/include/clang/Basic/IdentifierTable.h b/contrib/llvm-project/clang/include/clang/Basic/IdentifierTable.h
index f2379c7ddfbd..1ac182d4fce2 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/IdentifierTable.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/IdentifierTable.h
@@ -15,9 +15,13 @@
#ifndef LLVM_CLANG_BASIC_IDENTIFIERTABLE_H
#define LLVM_CLANG_BASIC_IDENTIFIERTABLE_H
+#include "clang/Basic/DiagnosticIDs.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/TokenKinds.h"
#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
@@ -43,11 +47,34 @@ class SourceLocation;
enum class ReservedIdentifierStatus {
NotReserved = 0,
StartsWithUnderscoreAtGlobalScope,
+ StartsWithUnderscoreAndIsExternC,
StartsWithDoubleUnderscore,
StartsWithUnderscoreFollowedByCapitalLetter,
ContainsDoubleUnderscore,
};
+enum class ReservedLiteralSuffixIdStatus {
+ NotReserved = 0,
+ NotStartsWithUnderscore,
+ ContainsDoubleUnderscore,
+};
+
+/// Determine whether an identifier is reserved for use as a name at global
+/// scope. Such identifiers might be implementation-specific global functions
+/// or variables.
+inline bool isReservedAtGlobalScope(ReservedIdentifierStatus Status) {
+ return Status != ReservedIdentifierStatus::NotReserved;
+}
+
+/// Determine whether an identifier is reserved in all contexts. Such
+/// identifiers might be implementation-specific keywords or macros, for
+/// example.
+inline bool isReservedInAllContexts(ReservedIdentifierStatus Status) {
+ return Status != ReservedIdentifierStatus::NotReserved &&
+ Status != ReservedIdentifierStatus::StartsWithUnderscoreAtGlobalScope &&
+ Status != ReservedIdentifierStatus::StartsWithUnderscoreAndIsExternC;
+}
+
/// A simple pair of identifier info and location.
using IdentifierLocPair = std::pair<IdentifierInfo *, SourceLocation>;
@@ -58,6 +85,21 @@ enum { IdentifierInfoAlignment = 8 };
static constexpr int ObjCOrBuiltinIDBits = 16;
+/// The "layout" of ObjCOrBuiltinID is:
+/// - The first value (0) represents "not a special identifier".
+/// - The next (NUM_OBJC_KEYWORDS - 1) values represent ObjCKeywordKinds (not
+/// including objc_not_keyword).
+/// - The next (NUM_INTERESTING_IDENTIFIERS - 1) values represent
+/// InterestingIdentifierKinds (not including not_interesting).
+/// - The rest of the values represent builtin IDs (not including NotBuiltin).
+static constexpr int FirstObjCKeywordID = 1;
+static constexpr int LastObjCKeywordID =
+ FirstObjCKeywordID + tok::NUM_OBJC_KEYWORDS - 2;
+static constexpr int FirstInterestingIdentifierID = LastObjCKeywordID + 1;
+static constexpr int LastInterestingIdentifierID =
+ FirstInterestingIdentifierID + tok::NUM_INTERESTING_IDENTIFIERS - 2;
+static constexpr int FirstBuiltinID = LastInterestingIdentifierID + 1;
+
/// One of these records is kept for each identifier that
/// is lexed. This contains information about whether the token was \#define'd,
/// is a language keyword, or if it is a front-end token of some sort (e.g. a
@@ -68,6 +110,7 @@ class alignas(IdentifierInfoAlignment) IdentifierInfo {
friend class IdentifierTable;
// Front-end token ID or tok::identifier.
+ LLVM_PREFERRED_TYPE(tok::TokenKind)
unsigned TokenID : 9;
// ObjC keyword ('protocol' in '@protocol') or builtin (__builtin_inf).
@@ -76,52 +119,78 @@ class alignas(IdentifierInfoAlignment) IdentifierInfo {
unsigned ObjCOrBuiltinID : ObjCOrBuiltinIDBits;
// True if there is a #define for this.
+ LLVM_PREFERRED_TYPE(bool)
unsigned HasMacro : 1;
// True if there was a #define for this.
+ LLVM_PREFERRED_TYPE(bool)
unsigned HadMacro : 1;
// True if the identifier is a language extension.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsExtension : 1;
// True if the identifier is a keyword in a newer or proposed Standard.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsFutureCompatKeyword : 1;
// True if the identifier is poisoned.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsPoisoned : 1;
// True if the identifier is a C++ operator keyword.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsCPPOperatorKeyword : 1;
// Internal bit set by the member function RecomputeNeedsHandleIdentifier.
// See comment about RecomputeNeedsHandleIdentifier for more info.
+ LLVM_PREFERRED_TYPE(bool)
unsigned NeedsHandleIdentifier : 1;
// True if the identifier was loaded (at least partially) from an AST file.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsFromAST : 1;
// True if the identifier has changed from the definition
// loaded from an AST file.
+ LLVM_PREFERRED_TYPE(bool)
unsigned ChangedAfterLoad : 1;
// True if the identifier's frontend information has changed from the
// definition loaded from an AST file.
+ LLVM_PREFERRED_TYPE(bool)
unsigned FEChangedAfterLoad : 1;
// True if revertTokenIDToIdentifier was called.
+ LLVM_PREFERRED_TYPE(bool)
unsigned RevertedTokenID : 1;
// True if there may be additional information about
// this identifier stored externally.
+ LLVM_PREFERRED_TYPE(bool)
unsigned OutOfDate : 1;
// True if this is the 'import' contextual keyword.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsModulesImport : 1;
// True if this is a mangled OpenMP variant name.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsMangledOpenMPVariantName : 1;
- // 28 bits left in a 64-bit word.
+ // True if this is a deprecated macro.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned IsDeprecatedMacro : 1;
+
+ // True if this macro is unsafe in headers.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned IsRestrictExpansion : 1;
+
+ // True if this macro is final.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned IsFinal : 1;
+
+ // 22 bits left in a 64-bit word.
// Managed by the language front-end.
void *FETokenInfo = nullptr;
@@ -134,7 +203,8 @@ class alignas(IdentifierInfoAlignment) IdentifierInfo {
IsPoisoned(false), IsCPPOperatorKeyword(false),
NeedsHandleIdentifier(false), IsFromAST(false), ChangedAfterLoad(false),
FEChangedAfterLoad(false), RevertedTokenID(false), OutOfDate(false),
- IsModulesImport(false), IsMangledOpenMPVariantName(false) {}
+ IsModulesImport(false), IsMangledOpenMPVariantName(false),
+ IsDeprecatedMacro(false), IsRestrictExpansion(false), IsFinal(false) {}
public:
IdentifierInfo(const IdentifierInfo &) = delete;
@@ -182,6 +252,14 @@ public:
NeedsHandleIdentifier = true;
HadMacro = true;
} else {
+ // If this is a final macro, make the deprecation and header unsafe bits
+ // stick around after the undefinition so they apply to any redefinitions.
+ if (!IsFinal) {
+ // Because calling the setters of these calls recomputes, just set them
+ // manually to avoid recomputing a bunch of times.
+ IsDeprecatedMacro = false;
+ IsRestrictExpansion = false;
+ }
RecomputeNeedsHandleIdentifier();
}
}
@@ -192,6 +270,34 @@ public:
return HadMacro;
}
+ bool isDeprecatedMacro() const { return IsDeprecatedMacro; }
+
+ void setIsDeprecatedMacro(bool Val) {
+ if (IsDeprecatedMacro == Val)
+ return;
+ IsDeprecatedMacro = Val;
+ if (Val)
+ NeedsHandleIdentifier = true;
+ else
+ RecomputeNeedsHandleIdentifier();
+ }
+
+ bool isRestrictExpansion() const { return IsRestrictExpansion; }
+
+ void setIsRestrictExpansion(bool Val) {
+ if (IsRestrictExpansion == Val)
+ return;
+ IsRestrictExpansion = Val;
+ if (Val)
+ NeedsHandleIdentifier = true;
+ else
+ RecomputeNeedsHandleIdentifier();
+ }
+
+ bool isFinal() const { return IsFinal; }
+
+ void setIsFinal(bool Val) { IsFinal = Val; }
+
/// If this is a source-language token (e.g. 'for'), this API
/// can be used to cause the lexer to map identifiers to source-language
/// tokens.
@@ -226,7 +332,9 @@ public:
///
/// For example, 'class' will return tok::objc_class if ObjC is enabled.
tok::ObjCKeywordKind getObjCKeywordID() const {
- if (ObjCOrBuiltinID < tok::NUM_OBJC_KEYWORDS)
+ static_assert(FirstObjCKeywordID == 1,
+ "hard-coding this assumption to simplify code");
+ if (ObjCOrBuiltinID <= LastObjCKeywordID)
return tok::ObjCKeywordKind(ObjCOrBuiltinID);
else
return tok::objc_not_keyword;
@@ -237,15 +345,30 @@ public:
///
/// 0 is not-built-in. 1+ are specific builtin functions.
unsigned getBuiltinID() const {
- if (ObjCOrBuiltinID >= tok::NUM_OBJC_KEYWORDS)
- return ObjCOrBuiltinID - tok::NUM_OBJC_KEYWORDS;
+ if (ObjCOrBuiltinID >= FirstBuiltinID)
+ return 1 + (ObjCOrBuiltinID - FirstBuiltinID);
else
return 0;
}
void setBuiltinID(unsigned ID) {
- ObjCOrBuiltinID = ID + tok::NUM_OBJC_KEYWORDS;
- assert(ObjCOrBuiltinID - unsigned(tok::NUM_OBJC_KEYWORDS) == ID
- && "ID too large for field!");
+ assert(ID != 0);
+ ObjCOrBuiltinID = FirstBuiltinID + (ID - 1);
+ assert(getBuiltinID() == ID && "ID too large for field!");
+ }
+ void clearBuiltinID() { ObjCOrBuiltinID = 0; }
+
+ tok::InterestingIdentifierKind getInterestingIdentifierID() const {
+ if (ObjCOrBuiltinID >= FirstInterestingIdentifierID &&
+ ObjCOrBuiltinID <= LastInterestingIdentifierID)
+ return tok::InterestingIdentifierKind(
+ 1 + (ObjCOrBuiltinID - FirstInterestingIdentifierID));
+ else
+ return tok::not_interesting;
+ }
+ void setInterestingIdentifierID(unsigned ID) {
+ assert(ID != tok::not_interesting);
+ ObjCOrBuiltinID = FirstInterestingIdentifierID + (ID - 1);
+ assert(getInterestingIdentifierID() == ID && "ID too large for field!");
}
unsigned getObjCOrBuiltinID() const { return ObjCOrBuiltinID; }
@@ -388,13 +511,24 @@ public:
/// function(<#int x#>);
/// \endcode
bool isEditorPlaceholder() const {
- return getName().startswith("<#") && getName().endswith("#>");
+ return getName().starts_with("<#") && getName().ends_with("#>");
}
/// Determine whether \p this is a name reserved for the implementation (C99
/// 7.1.3, C++ [lib.global.names]).
ReservedIdentifierStatus isReserved(const LangOptions &LangOpts) const;
+ /// Determine whether \p this is a name reserved for future standardization or
+ /// the implementation (C++ [usrlit.suffix]).
+ ReservedLiteralSuffixIdStatus isReservedLiteralSuffixId() const;
+
+ /// If the identifier is an "uglified" reserved name, return a cleaned form.
+ /// e.g. _Foo => Foo. Otherwise, just returns the name.
+ StringRef deuglifiedName() const;
+ bool isPlaceholder() const {
+ return getLength() == 1 && getNameStart()[0] == '_';
+ }
+
/// Provide less than operator for lexicographical sorting.
bool operator<(const IdentifierInfo &RHS) const {
return getName() < RHS.getName();
@@ -527,7 +661,7 @@ public:
/// Return the identifier token info for the specified named
/// identifier.
IdentifierInfo &get(StringRef Name) {
- auto &Entry = *HashTable.insert(std::make_pair(Name, nullptr)).first;
+ auto &Entry = *HashTable.try_emplace(Name, nullptr).first;
IdentifierInfo *&II = Entry.second;
if (II) return *II;
@@ -601,6 +735,12 @@ public:
/// Populate the identifier table with info about the language keywords
/// for the language specified by \p LangOpts.
void AddKeywords(const LangOptions &LangOpts);
+
+ /// Returns the correct diagnostic to issue for a future-compat diagnostic
+ /// warning. Note, this function assumes the identifier passed has already
+ /// been determined to be a future compatible keyword.
+ diag::kind getFutureCompatDiagKind(const IdentifierInfo &II,
+ const LangOptions &LangOpts);
};
/// A family of Objective-C methods.
@@ -675,6 +815,121 @@ enum ObjCStringFormatFamily {
SFF_CFString
};
+namespace detail {
+
+/// DeclarationNameExtra is used as a base of various uncommon special names.
+/// This class is needed since DeclarationName has not enough space to store
+/// the kind of every possible names. Therefore the kind of common names is
+/// stored directly in DeclarationName, and the kind of uncommon names is
+/// stored in DeclarationNameExtra. It is aligned to 8 bytes because
+/// DeclarationName needs the lower 3 bits to store the kind of common names.
+/// DeclarationNameExtra is tightly coupled to DeclarationName and any change
+/// here is very likely to require changes in DeclarationName(Table).
+class alignas(IdentifierInfoAlignment) DeclarationNameExtra {
+ friend class clang::DeclarationName;
+ friend class clang::DeclarationNameTable;
+
+protected:
+ /// The kind of "extra" information stored in the DeclarationName. See
+ /// @c ExtraKindOrNumArgs for an explanation of how these enumerator values
+ /// are used. Note that DeclarationName depends on the numerical values
+ /// of the enumerators in this enum. See DeclarationName::StoredNameKind
+ /// for more info.
+ enum ExtraKind {
+ CXXDeductionGuideName,
+ CXXLiteralOperatorName,
+ CXXUsingDirective,
+ ObjCMultiArgSelector
+ };
+
+ /// ExtraKindOrNumArgs has one of the following meaning:
+ /// * The kind of an uncommon C++ special name. This DeclarationNameExtra
+ /// is in this case in fact either a CXXDeductionGuideNameExtra or
+ /// a CXXLiteralOperatorIdName.
+ ///
+ /// * It may be also name common to C++ using-directives (CXXUsingDirective),
+ ///
+ /// * Otherwise it is ObjCMultiArgSelector+NumArgs, where NumArgs is
+ /// the number of arguments in the Objective-C selector, in which
+ /// case the DeclarationNameExtra is also a MultiKeywordSelector.
+ unsigned ExtraKindOrNumArgs;
+
+ DeclarationNameExtra(ExtraKind Kind) : ExtraKindOrNumArgs(Kind) {}
+ DeclarationNameExtra(unsigned NumArgs)
+ : ExtraKindOrNumArgs(ObjCMultiArgSelector + NumArgs) {}
+
+ /// Return the corresponding ExtraKind.
+ ExtraKind getKind() const {
+ return static_cast<ExtraKind>(ExtraKindOrNumArgs >
+ (unsigned)ObjCMultiArgSelector
+ ? (unsigned)ObjCMultiArgSelector
+ : ExtraKindOrNumArgs);
+ }
+
+ /// Return the number of arguments in an ObjC selector. Only valid when this
+ /// is indeed an ObjCMultiArgSelector.
+ unsigned getNumArgs() const {
+ assert(ExtraKindOrNumArgs >= (unsigned)ObjCMultiArgSelector &&
+ "getNumArgs called but this is not an ObjC selector!");
+ return ExtraKindOrNumArgs - (unsigned)ObjCMultiArgSelector;
+ }
+};
+
+} // namespace detail
+
+/// One of these variable length records is kept for each
+/// selector containing more than one keyword. We use a folding set
+/// to unique aggregate names (keyword selectors in ObjC parlance). Access to
+/// this class is provided strictly through Selector.
+class alignas(IdentifierInfoAlignment) MultiKeywordSelector
+ : public detail::DeclarationNameExtra,
+ public llvm::FoldingSetNode {
+ MultiKeywordSelector(unsigned nKeys) : DeclarationNameExtra(nKeys) {}
+
+public:
+ // Constructor for keyword selectors.
+ MultiKeywordSelector(unsigned nKeys, IdentifierInfo **IIV)
+ : DeclarationNameExtra(nKeys) {
+ assert((nKeys > 1) && "not a multi-keyword selector");
+
+ // Fill in the trailing keyword array.
+ IdentifierInfo **KeyInfo = reinterpret_cast<IdentifierInfo **>(this + 1);
+ for (unsigned i = 0; i != nKeys; ++i)
+ KeyInfo[i] = IIV[i];
+ }
+
+ // getName - Derive the full selector name and return it.
+ std::string getName() const;
+
+ using DeclarationNameExtra::getNumArgs;
+
+ using keyword_iterator = IdentifierInfo *const *;
+
+ keyword_iterator keyword_begin() const {
+ return reinterpret_cast<keyword_iterator>(this + 1);
+ }
+
+ keyword_iterator keyword_end() const {
+ return keyword_begin() + getNumArgs();
+ }
+
+ IdentifierInfo *getIdentifierInfoForSlot(unsigned i) const {
+ assert(i < getNumArgs() && "getIdentifierInfoForSlot(): illegal index");
+ return keyword_begin()[i];
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, keyword_iterator ArgTys,
+ unsigned NumArgs) {
+ ID.AddInteger(NumArgs);
+ for (unsigned i = 0; i != NumArgs; ++i)
+ ID.AddPointer(ArgTys[i]);
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, keyword_begin(), getNumArgs());
+ }
+};
+
/// Smart pointer class that efficiently represents Objective-C method
/// names.
///
@@ -690,43 +945,58 @@ class Selector {
enum IdentifierInfoFlag {
// Empty selector = 0. Note that these enumeration values must
// correspond to the enumeration values of DeclarationName::StoredNameKind
- ZeroArg = 0x01,
- OneArg = 0x02,
+ ZeroArg = 0x01,
+ OneArg = 0x02,
+ // IMPORTANT NOTE: see comments in InfoPtr (below) about this enumerator
+ // value.
MultiArg = 0x07,
- ArgFlags = 0x07
};
- /// A pointer to the MultiKeywordSelector or IdentifierInfo. We use the low
- /// three bits of InfoPtr to store an IdentifierInfoFlag. Note that in any
- /// case IdentifierInfo and MultiKeywordSelector are already aligned to
- /// 8 bytes even on 32 bits archs because of DeclarationName.
- uintptr_t InfoPtr = 0;
+ /// IMPORTANT NOTE: the order of the types in this PointerUnion are
+ /// important! The DeclarationName class has bidirectional conversion
+ /// to/from Selector through an opaque pointer (void *) which corresponds
+ /// to this PointerIntPair. The discriminator bit from the PointerUnion
+ /// corresponds to the high bit in the MultiArg enumerator. So while this
+ /// PointerIntPair only has two bits for the integer (and we mask off the
+ /// high bit in `MultiArg` when it is used), that discrimator bit is
+ /// still necessary for the opaque conversion. The discriminator bit
+ /// from the PointerUnion and the two integer bits from the
+ /// PointerIntPair are also exposed via the DeclarationName::StoredNameKind
+ /// enumeration; see the comments in DeclarationName.h for more details.
+ /// Do not reorder or add any arguments to this template
+ /// without thoroughly understanding how tightly coupled these classes are.
+ llvm::PointerIntPair<
+ llvm::PointerUnion<IdentifierInfo *, MultiKeywordSelector *>, 2>
+ InfoPtr;
Selector(IdentifierInfo *II, unsigned nArgs) {
- InfoPtr = reinterpret_cast<uintptr_t>(II);
- assert((InfoPtr & ArgFlags) == 0 &&"Insufficiently aligned IdentifierInfo");
assert(nArgs < 2 && "nArgs not equal to 0/1");
- InfoPtr |= nArgs+1;
+ InfoPtr.setPointerAndInt(II, nArgs + 1);
}
Selector(MultiKeywordSelector *SI) {
- InfoPtr = reinterpret_cast<uintptr_t>(SI);
- assert((InfoPtr & ArgFlags) == 0 &&"Insufficiently aligned IdentifierInfo");
- InfoPtr |= MultiArg;
+ // IMPORTANT NOTE: we mask off the upper bit of this value because we only
+ // reserve two bits for the integer in the PointerIntPair. See the comments
+ // in `InfoPtr` for more details.
+ InfoPtr.setPointerAndInt(SI, MultiArg & 0b11);
}
IdentifierInfo *getAsIdentifierInfo() const {
- if (getIdentifierInfoFlag() < MultiArg)
- return reinterpret_cast<IdentifierInfo *>(InfoPtr & ~ArgFlags);
- return nullptr;
+ return InfoPtr.getPointer().dyn_cast<IdentifierInfo *>();
}
MultiKeywordSelector *getMultiKeywordSelector() const {
- return reinterpret_cast<MultiKeywordSelector *>(InfoPtr & ~ArgFlags);
+ return InfoPtr.getPointer().get<MultiKeywordSelector *>();
}
unsigned getIdentifierInfoFlag() const {
- return InfoPtr & ArgFlags;
+ unsigned new_flags = InfoPtr.getInt();
+ // IMPORTANT NOTE: We have to reconstitute this data rather than use the
+ // value directly from the PointerIntPair. See the comments in `InfoPtr`
+ // for more details.
+ if (InfoPtr.getPointer().is<MultiKeywordSelector *>())
+ new_flags |= MultiArg;
+ return new_flags;
}
static ObjCMethodFamily getMethodFamilyImpl(Selector sel);
@@ -737,31 +1007,27 @@ public:
/// The default ctor should only be used when creating data structures that
/// will contain selectors.
Selector() = default;
- explicit Selector(uintptr_t V) : InfoPtr(V) {}
+ explicit Selector(uintptr_t V) {
+ InfoPtr.setFromOpaqueValue(reinterpret_cast<void *>(V));
+ }
/// operator==/!= - Indicate whether the specified selectors are identical.
bool operator==(Selector RHS) const {
- return InfoPtr == RHS.InfoPtr;
+ return InfoPtr.getOpaqueValue() == RHS.InfoPtr.getOpaqueValue();
}
bool operator!=(Selector RHS) const {
- return InfoPtr != RHS.InfoPtr;
+ return InfoPtr.getOpaqueValue() != RHS.InfoPtr.getOpaqueValue();
}
- void *getAsOpaquePtr() const {
- return reinterpret_cast<void*>(InfoPtr);
- }
+ void *getAsOpaquePtr() const { return InfoPtr.getOpaqueValue(); }
/// Determine whether this is the empty selector.
- bool isNull() const { return InfoPtr == 0; }
+ bool isNull() const { return InfoPtr.getOpaqueValue() == nullptr; }
// Predicates to identify the selector type.
- bool isKeywordSelector() const {
- return getIdentifierInfoFlag() != ZeroArg;
- }
+ bool isKeywordSelector() const { return InfoPtr.getInt() != ZeroArg; }
- bool isUnarySelector() const {
- return getIdentifierInfoFlag() == ZeroArg;
- }
+ bool isUnarySelector() const { return InfoPtr.getInt() == ZeroArg; }
/// If this selector is the specific keyword selector described by Names.
bool isKeywordSelector(ArrayRef<StringRef> Names) const;
@@ -872,68 +1138,6 @@ public:
static std::string getPropertyNameFromSetterSelector(Selector Sel);
};
-namespace detail {
-
-/// DeclarationNameExtra is used as a base of various uncommon special names.
-/// This class is needed since DeclarationName has not enough space to store
-/// the kind of every possible names. Therefore the kind of common names is
-/// stored directly in DeclarationName, and the kind of uncommon names is
-/// stored in DeclarationNameExtra. It is aligned to 8 bytes because
-/// DeclarationName needs the lower 3 bits to store the kind of common names.
-/// DeclarationNameExtra is tightly coupled to DeclarationName and any change
-/// here is very likely to require changes in DeclarationName(Table).
-class alignas(IdentifierInfoAlignment) DeclarationNameExtra {
- friend class clang::DeclarationName;
- friend class clang::DeclarationNameTable;
-
-protected:
- /// The kind of "extra" information stored in the DeclarationName. See
- /// @c ExtraKindOrNumArgs for an explanation of how these enumerator values
- /// are used. Note that DeclarationName depends on the numerical values
- /// of the enumerators in this enum. See DeclarationName::StoredNameKind
- /// for more info.
- enum ExtraKind {
- CXXDeductionGuideName,
- CXXLiteralOperatorName,
- CXXUsingDirective,
- ObjCMultiArgSelector
- };
-
- /// ExtraKindOrNumArgs has one of the following meaning:
- /// * The kind of an uncommon C++ special name. This DeclarationNameExtra
- /// is in this case in fact either a CXXDeductionGuideNameExtra or
- /// a CXXLiteralOperatorIdName.
- ///
- /// * It may be also name common to C++ using-directives (CXXUsingDirective),
- ///
- /// * Otherwise it is ObjCMultiArgSelector+NumArgs, where NumArgs is
- /// the number of arguments in the Objective-C selector, in which
- /// case the DeclarationNameExtra is also a MultiKeywordSelector.
- unsigned ExtraKindOrNumArgs;
-
- DeclarationNameExtra(ExtraKind Kind) : ExtraKindOrNumArgs(Kind) {}
- DeclarationNameExtra(unsigned NumArgs)
- : ExtraKindOrNumArgs(ObjCMultiArgSelector + NumArgs) {}
-
- /// Return the corresponding ExtraKind.
- ExtraKind getKind() const {
- return static_cast<ExtraKind>(ExtraKindOrNumArgs >
- (unsigned)ObjCMultiArgSelector
- ? (unsigned)ObjCMultiArgSelector
- : ExtraKindOrNumArgs);
- }
-
- /// Return the number of arguments in an ObjC selector. Only valid when this
- /// is indeed an ObjCMultiArgSelector.
- unsigned getNumArgs() const {
- assert(ExtraKindOrNumArgs >= (unsigned)ObjCMultiArgSelector &&
- "getNumArgs called but this is not an ObjC selector!");
- return ExtraKindOrNumArgs - (unsigned)ObjCMultiArgSelector;
- }
-};
-
-} // namespace detail
-
} // namespace clang
namespace llvm {
@@ -970,34 +1174,6 @@ struct PointerLikeTypeTraits<clang::Selector> {
static constexpr int NumLowBitsAvailable = 0;
};
-// Provide PointerLikeTypeTraits for IdentifierInfo pointers, which
-// are not guaranteed to be 8-byte aligned.
-template<>
-struct PointerLikeTypeTraits<clang::IdentifierInfo*> {
- static void *getAsVoidPointer(clang::IdentifierInfo* P) {
- return P;
- }
-
- static clang::IdentifierInfo *getFromVoidPointer(void *P) {
- return static_cast<clang::IdentifierInfo*>(P);
- }
-
- static constexpr int NumLowBitsAvailable = 1;
-};
-
-template<>
-struct PointerLikeTypeTraits<const clang::IdentifierInfo*> {
- static const void *getAsVoidPointer(const clang::IdentifierInfo* P) {
- return P;
- }
-
- static const clang::IdentifierInfo *getFromVoidPointer(const void *P) {
- return static_cast<const clang::IdentifierInfo*>(P);
- }
-
- static constexpr int NumLowBitsAvailable = 1;
-};
-
} // namespace llvm
#endif // LLVM_CLANG_BASIC_IDENTIFIERTABLE_H
diff --git a/contrib/llvm-project/clang/include/clang/Basic/JsonSupport.h b/contrib/llvm-project/clang/include/clang/Basic/JsonSupport.h
index 8b02e440df44..bcaa3d364444 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/JsonSupport.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/JsonSupport.h
@@ -12,6 +12,7 @@
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
#include <iterator>
@@ -70,7 +71,7 @@ inline std::string JsonFormat(StringRef RawSR, bool AddQuotes) {
}
// Remove new-lines.
- Str.erase(std::remove(Str.begin(), Str.end(), '\n'), Str.end());
+ llvm::erase(Str, '\n');
if (!AddQuotes)
return Str;
@@ -98,18 +99,15 @@ inline void printSourceLocationAsJson(raw_ostream &Out, SourceLocation Loc,
if (AddBraces)
Out << "{ ";
std::string filename(PLoc.getFilename());
-#ifdef _WIN32
- // Remove forbidden Windows path characters
- auto RemoveIt =
- std::remove_if(filename.begin(), filename.end(), [](auto Char) {
- static const char ForbiddenChars[] = "<>*?\"|";
- return std::find(std::begin(ForbiddenChars), std::end(ForbiddenChars),
- Char) != std::end(ForbiddenChars);
- });
- filename.erase(RemoveIt, filename.end());
- // Handle windows-specific path delimiters.
- std::replace(filename.begin(), filename.end(), '\\', '/');
-#endif
+ if (is_style_windows(llvm::sys::path::Style::native)) {
+ // Remove forbidden Windows path characters
+ llvm::erase_if(filename, [](auto Char) {
+ static const char ForbiddenChars[] = "<>*?\"|";
+ return llvm::is_contained(ForbiddenChars, Char);
+ });
+ // Handle windows-specific path delimiters.
+ std::replace(filename.begin(), filename.end(), '\\', '/');
+ }
Out << "\"line\": " << PLoc.getLine()
<< ", \"column\": " << PLoc.getColumn()
<< ", \"file\": \"" << filename << "\"";
diff --git a/contrib/llvm-project/clang/include/clang/Basic/LLVM.h b/contrib/llvm-project/clang/include/clang/Basic/LLVM.h
index 4ac2d744af3c..f4956cd16cbc 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/LLVM.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/LLVM.h
@@ -19,9 +19,6 @@
// dependencies.
// Casting.h has complex templates that cannot be easily forward declared.
#include "llvm/Support/Casting.h"
-// None.h includes an enumerator that is desired & cannot be forward declared
-// without a definition of NoneType.
-#include "llvm/ADT/None.h"
// Add this header as a workaround to prevent `too few template arguments for
// class template 'SmallVector'` building error with build compilers like XL.
#include "llvm/ADT/SmallVector.h"
@@ -37,7 +34,6 @@ namespace llvm {
template<unsigned InternalLen> class SmallString;
template<typename T, unsigned N> class SmallVector;
template<typename T> class SmallVectorImpl;
- template<typename T> class Optional;
template <class T> class Expected;
template<typename T>
@@ -58,16 +54,17 @@ namespace clang {
// Casting operators.
using llvm::isa;
using llvm::isa_and_nonnull;
+ using llvm::isa_and_present;
using llvm::cast;
using llvm::dyn_cast;
using llvm::dyn_cast_or_null;
+ using llvm::dyn_cast_if_present;
using llvm::cast_or_null;
+ using llvm::cast_if_present;
// ADT's.
using llvm::ArrayRef;
using llvm::MutableArrayRef;
- using llvm::None;
- using llvm::Optional;
using llvm::OwningArrayRef;
using llvm::SaveAndRestore;
using llvm::SmallString;
diff --git a/contrib/llvm-project/clang/include/clang/Basic/Lambda.h b/contrib/llvm-project/clang/include/clang/Basic/Lambda.h
index 853821a33c2a..de01d6f33c01 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/Lambda.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/Lambda.h
@@ -32,7 +32,7 @@ enum LambdaCaptureDefault {
/// is an expression.
enum LambdaCaptureKind {
LCK_This, ///< Capturing the \c *this object by reference
- LCK_StarThis, /// < Capturing the \c *this object by copy
+ LCK_StarThis, ///< Capturing the \c *this object by copy
LCK_ByCopy, ///< Capturing by copy (a.k.a., by value)
LCK_ByRef, ///< Capturing by reference
LCK_VLAType ///< Capturing variable-length array type
diff --git a/contrib/llvm-project/clang/include/clang/Basic/LangOptions.def b/contrib/llvm-project/clang/include/clang/Basic/LangOptions.def
index 74deba6ef7fb..4942dcaa086e 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/LangOptions.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/LangOptions.def
@@ -7,7 +7,11 @@
//===----------------------------------------------------------------------===//
//
// This file defines the language options. Users of this file must
-// define the LANGOPT macro to make use of this information.
+// define the LANGOPT macro to make use of this information. The arguments to
+// the macro are:
+// LANGOPT(Name, Bits, DefaultValue, Description)
+// Note that the DefaultValue must be a constant value (literal or enumeration);
+// it cannot depend on the value of another language option.
//
// Optionally, the user may also define:
//
@@ -82,8 +86,9 @@
LANGOPT(C99 , 1, 0, "C99")
LANGOPT(C11 , 1, 0, "C11")
LANGOPT(C17 , 1, 0, "C17")
-LANGOPT(C2x , 1, 0, "C2x")
+LANGOPT(C23 , 1, 0, "C23")
LANGOPT(MSVCCompat , 1, 0, "Microsoft Visual C++ full compatibility mode")
+LANGOPT(Kernel , 1, 0, "Kernel mode")
LANGOPT(MicrosoftExt , 1, 0, "Microsoft C++ extensions")
LANGOPT(AsmBlocks , 1, 0, "Microsoft inline asm blocks")
LANGOPT(Borland , 1, 0, "Borland extensions")
@@ -92,7 +97,8 @@ LANGOPT(CPlusPlus11 , 1, 0, "C++11")
LANGOPT(CPlusPlus14 , 1, 0, "C++14")
LANGOPT(CPlusPlus17 , 1, 0, "C++17")
LANGOPT(CPlusPlus20 , 1, 0, "C++20")
-LANGOPT(CPlusPlus2b , 1, 0, "C++2b")
+LANGOPT(CPlusPlus23 , 1, 0, "C++23")
+LANGOPT(CPlusPlus26 , 1, 0, "C++26")
LANGOPT(ObjC , 1, 0, "Objective-C")
BENIGN_LANGOPT(ObjCDefaultSynthProperties , 1, 0,
"Objective-C auto-synthesized properties")
@@ -107,7 +113,7 @@ LANGOPT(Trigraphs , 1, 0,"trigraphs")
LANGOPT(LineComment , 1, 0, "'//' comments")
LANGOPT(Bool , 1, 0, "bool, true, and false keywords")
LANGOPT(Half , 1, 0, "half keyword")
-LANGOPT(WChar , 1, CPlusPlus, "wchar_t keyword")
+LANGOPT(WChar , 1, 0, "wchar_t keyword")
LANGOPT(Char8 , 1, 0, "char8_t keyword")
LANGOPT(IEEE128 , 1, 0, "__ieee128 keyword")
LANGOPT(DeclSpecKeyword , 1, 0, "__declspec keyword")
@@ -116,9 +122,9 @@ BENIGN_LANGOPT(AsmPreprocessor, 1, 0, "preprocessor in asm mode")
LANGOPT(GNUMode , 1, 1, "GNU extensions")
LANGOPT(GNUKeywords , 1, 1, "GNU keywords")
VALUE_LANGOPT(GNUCVersion , 32, 0, "GNU C compatibility version")
-BENIGN_LANGOPT(ImplicitInt, 1, !C99 && !CPlusPlus, "C89 implicit 'int'")
+LANGOPT(DisableKNRFunctions, 1, 0, "require function types to have a prototype")
LANGOPT(Digraphs , 1, 0, "digraphs")
-BENIGN_LANGOPT(HexFloats , 1, C99, "C99 hexadecimal float constants")
+BENIGN_LANGOPT(HexFloats , 1, 0, "C99 hexadecimal float constants")
LANGOPT(CXXOperatorNames , 1, 0, "C++ operator name keywords")
LANGOPT(AppleKext , 1, 0, "Apple kext support")
BENIGN_LANGOPT(PascalStrings, 1, 0, "Pascal string support")
@@ -139,17 +145,21 @@ ENUM_LANGOPT(ExceptionHandling, ExceptionHandlingKind, 3,
ExceptionHandlingKind::None, "exception handling")
LANGOPT(IgnoreExceptions , 1, 0, "ignore exceptions")
LANGOPT(ExternCNoUnwind , 1, 0, "Assume extern C functions don't unwind")
+LANGOPT(AssumeNothrowExceptionDtor , 1, 0, "Assume exception object's destructor is nothrow")
LANGOPT(TraditionalCPP , 1, 0, "traditional CPP emulation")
LANGOPT(RTTI , 1, 1, "run-time type information")
LANGOPT(RTTIData , 1, 1, "emit run-time type information data")
LANGOPT(MSBitfields , 1, 0, "Microsoft-compatible structure layout")
+LANGOPT(MSVolatile , 1, 0, "Microsoft-compatible volatile loads and stores")
LANGOPT(Freestanding, 1, 0, "freestanding implementation")
LANGOPT(NoBuiltin , 1, 0, "disable builtin functions")
LANGOPT(NoMathBuiltin , 1, 0, "disable math builtin functions")
LANGOPT(GNUAsm , 1, 1, "GNU-style inline assembly")
LANGOPT(Coroutines , 1, 0, "C++20 coroutines")
+LANGOPT(CoroAlignedAllocation, 1, 0, "prefer Aligned Allocation according to P2014 Option 2")
LANGOPT(DllExportInlines , 1, 1, "dllexported classes dllexport inline methods")
LANGOPT(RelaxedTemplateTemplateArgs, 1, 0, "C++17 relaxed matching of template template arguments")
+LANGOPT(ExperimentalLibrary, 1, 0, "enable unstable and experimental library features")
LANGOPT(DoubleSquareBracketAttributes, 1, 0, "'[[]]' attributes extension for all language standard modes")
@@ -163,9 +173,10 @@ BENIGN_LANGOPT(EmitAllDecls , 1, 0, "emitting all declarations")
LANGOPT(MathErrno , 1, 1, "errno in math functions")
BENIGN_LANGOPT(HeinousExtensions , 1, 0, "extensions that we really don't like and may be ripped out at any time")
LANGOPT(Modules , 1, 0, "modules semantics")
-COMPATIBLE_LANGOPT(ModulesTS , 1, 0, "C++ Modules TS syntax")
COMPATIBLE_LANGOPT(CPlusPlusModules, 1, 0, "C++ modules syntax")
-BENIGN_ENUM_LANGOPT(CompilingModule, CompilingModuleKind, 2, CMK_None,
+LANGOPT(SkipODRCheckInGMF, 1, 0, "Skip ODR checks for decls in the global module fragment")
+LANGOPT(BuiltinHeadersInSystemModules, 1, 0, "builtin headers belong to system modules, and _Builtin_ modules are ignored for cstdlib headers")
+BENIGN_ENUM_LANGOPT(CompilingModule, CompilingModuleKind, 3, CMK_None,
"compiling a module interface")
BENIGN_LANGOPT(CompilingPCH, 1, 0, "building a pch")
BENIGN_LANGOPT(BuildingPCHWithObjectFile, 1, 0, "building a pch which has a corresponding object file")
@@ -174,6 +185,7 @@ BENIGN_LANGOPT(PCHInstantiateTemplates, 1, 0, "instantiate templates while build
COMPATIBLE_LANGOPT(ModulesDeclUse , 1, 0, "require declaration of module uses")
BENIGN_LANGOPT(ModulesSearchAll , 1, 1, "searching even non-imported modules to find unresolved references")
COMPATIBLE_LANGOPT(ModulesStrictDeclUse, 1, 0, "requiring declaration of module uses and all headers to be in modules")
+COMPATIBLE_LANGOPT(ModulesValidateTextualHeaderIncludes, 1, 1, "validation of textual header includes")
BENIGN_LANGOPT(ModulesErrorRecovery, 1, 1, "automatically importing modules as needed when performing error recovery")
BENIGN_LANGOPT(ImplicitModules, 1, 1, "building modules that are not specified via -fmodule-file")
COMPATIBLE_LANGOPT(ModulesLocalVisibility, 1, 0, "local submodule visibility")
@@ -189,6 +201,7 @@ VALUE_LANGOPT(DoubleSize , 32, 0, "width of double")
VALUE_LANGOPT(LongDoubleSize , 32, 0, "width of long double")
LANGOPT(PPCIEEELongDouble , 1, 0, "use IEEE 754 quadruple-precision for long double")
LANGOPT(EnableAIXExtendedAltivecABI , 1, 0, "__EXTABI__ predefined macro")
+LANGOPT(EnableAIXQuadwordAtomicsABI , 1, 0, "Use 16-byte atomic lock free semantics")
COMPATIBLE_VALUE_LANGOPT(PICLevel , 2, 0, "__PIC__ level")
COMPATIBLE_VALUE_LANGOPT(PIE , 1, 0, "is pie")
LANGOPT(ROPI , 1, 0, "Read-only position independence")
@@ -208,6 +221,8 @@ BENIGN_LANGOPT(NoSignedZero , 1, 0, "Permit Floating Point optimization wit
BENIGN_LANGOPT(AllowRecip , 1, 0, "Permit Floating Point reciprocal")
BENIGN_LANGOPT(ApproxFunc , 1, 0, "Permit Floating Point approximation")
+ENUM_LANGOPT(ComplexRange, ComplexRangeKind, 2, CX_None, "Enable use of range reduction for complex arithmetics.")
+
BENIGN_LANGOPT(ObjCGCBitmapPrint , 1, 0, "printing of GC's bitmap layout for __weak/__strong ivars")
BENIGN_LANGOPT(AccessControl , 1, 1, "C++ access control")
@@ -227,44 +242,59 @@ LANGOPT(OpenCLGenericAddressSpace, 1, 0, "OpenCL generic keyword")
LANGOPT(OpenCLPipes , 1, 0, "OpenCL pipes language constructs and built-ins")
LANGOPT(NativeHalfType , 1, 0, "Native half type support")
LANGOPT(NativeHalfArgsAndReturns, 1, 0, "Native half args and returns")
-LANGOPT(HalfArgsAndReturns, 1, 0, "half args and returns")
LANGOPT(CUDA , 1, 0, "CUDA")
LANGOPT(HIP , 1, 0, "HIP")
LANGOPT(OpenMP , 32, 0, "OpenMP support and version of OpenMP (31, 40 or 45)")
+LANGOPT(OpenMPExtensions , 1, 1, "Enable all Clang extensions for OpenMP directives and clauses")
LANGOPT(OpenMPSimd , 1, 0, "Use SIMD only OpenMP support.")
LANGOPT(OpenMPUseTLS , 1, 0, "Use TLS for threadprivates or runtime calls")
-LANGOPT(OpenMPIsDevice , 1, 0, "Generate code only for OpenMP target device")
+LANGOPT(OpenMPIsTargetDevice , 1, 0, "Generate code only for OpenMP target device")
LANGOPT(OpenMPCUDAMode , 1, 0, "Generate code for OpenMP pragmas in SIMT/SPMD mode")
LANGOPT(OpenMPIRBuilder , 1, 0, "Use the experimental OpenMP-IR-Builder codegen path.")
-LANGOPT(OpenMPCUDAForceFullRuntime , 1, 0, "Force to use full runtime in all constructs when offloading to CUDA devices")
LANGOPT(OpenMPCUDANumSMs , 32, 0, "Number of SMs for CUDA devices.")
LANGOPT(OpenMPCUDABlocksPerSM , 32, 0, "Number of blocks per SM for CUDA devices.")
LANGOPT(OpenMPCUDAReductionBufNum , 32, 1024, "Number of the reduction records in the intermediate reduction buffer used for the teams reductions.")
-LANGOPT(OpenMPTargetNewRuntime , 1, 0, "Use the new bitcode library for OpenMP offloading")
+LANGOPT(OpenMPTargetDebug , 32, 0, "Enable debugging in the OpenMP offloading device RTL")
LANGOPT(OpenMPOptimisticCollapse , 1, 0, "Use at most 32 bits to represent the collapsed loop nest counter.")
+LANGOPT(OpenMPThreadSubscription , 1, 0, "Assume work-shared loops do not have more iterations than participating threads.")
+LANGOPT(OpenMPTeamSubscription , 1, 0, "Assume distributed loops do not have more iterations than participating teams.")
+LANGOPT(OpenMPNoThreadState , 1, 0, "Assume that no thread in a parallel region will modify an ICV.")
+LANGOPT(OpenMPNoNestedParallelism , 1, 0, "Assume that no thread in a parallel region will encounter a parallel region")
+LANGOPT(OpenMPOffloadMandatory , 1, 0, "Assert that offloading is mandatory and do not create a host fallback.")
+LANGOPT(OpenMPForceUSM , 1, 0, "Enable OpenMP unified shared memory mode via compiler.")
+LANGOPT(NoGPULib , 1, 0, "Indicate a build without the standard GPU libraries.")
LANGOPT(RenderScript , 1, 0, "RenderScript")
+LANGOPT(HLSL, 1, 0, "HLSL")
+ENUM_LANGOPT(HLSLVersion, HLSLLangStd, 16, HLSL_Unset, "HLSL Version")
+
LANGOPT(CUDAIsDevice , 1, 0, "compiling for CUDA device")
LANGOPT(CUDAAllowVariadicFunctions, 1, 0, "allowing variadic functions in CUDA device code")
LANGOPT(CUDAHostDeviceConstexpr, 1, 1, "treating unattributed constexpr functions as __host__ __device__")
-LANGOPT(CUDADeviceApproxTranscendentals, 1, 0, "using approximate transcendental functions")
+LANGOPT(GPUDeviceApproxTranscendentals, 1, 0, "using approximate transcendental functions")
LANGOPT(GPURelocatableDeviceCode, 1, 0, "generate relocatable device code")
+LANGOPT(OffloadImplicitHostDeviceTemplates, 1, 0, "assume template functions to be implicitly host device by default for CUDA/HIP")
LANGOPT(GPUAllowDeviceInit, 1, 0, "allowing device side global init functions for HIP")
LANGOPT(GPUMaxThreadsPerBlock, 32, 1024, "default max threads per block for kernel launch bounds for HIP")
LANGOPT(GPUDeferDiag, 1, 0, "defer host/device related diagnostic messages for CUDA/HIP")
LANGOPT(GPUExcludeWrongSideOverloads, 1, 0, "always exclude wrong side overloads in overloading resolution for CUDA/HIP")
+LANGOPT(OffloadingNewDriver, 1, 0, "use the new driver for generating offloading code.")
LANGOPT(SYCLIsDevice , 1, 0, "Generate code for SYCL device")
LANGOPT(SYCLIsHost , 1, 0, "SYCL host compilation")
ENUM_LANGOPT(SYCLVersion , SYCLMajorVersion, 2, SYCL_None, "Version of the SYCL standard used")
LANGOPT(HIPUseNewLaunchAPI, 1, 0, "Use new kernel launching API for HIP")
+LANGOPT(OffloadUniformBlock, 1, 0, "Assume that kernels are launched with uniform block sizes (default true for CUDA/HIP and false otherwise)")
+LANGOPT(HIPStdPar, 1, 0, "Enable Standard Parallel Algorithm Acceleration for HIP (experimental)")
+LANGOPT(HIPStdParInterposeAlloc, 1, 0, "Replace allocations / deallocations with HIP RT calls when Standard Parallel Algorithm Acceleration for HIP is enabled (Experimental)")
+
+LANGOPT(OpenACC , 1, 0, "OpenACC Enabled")
LANGOPT(SizedDeallocation , 1, 0, "sized deallocation")
LANGOPT(AlignedAllocation , 1, 0, "aligned allocation")
LANGOPT(AlignedAllocationUnavailable, 1, 0, "aligned allocation functions are unavailable")
LANGOPT(NewAlignOverride , 32, 0, "maximum alignment guaranteed by '::operator new(size_t)'")
-LANGOPT(ConceptSatisfactionCaching , 1, 1, "enable satisfaction caching for C++20 Concepts")
BENIGN_LANGOPT(ModulesCodegen , 1, 0, "Modules code generation")
BENIGN_LANGOPT(ModulesDebugInfo , 1, 0, "Modules debug info")
BENIGN_LANGOPT(ElideConstructors , 1, 1, "C++ copy constructor elision")
@@ -275,11 +305,14 @@ BENIGN_LANGOPT(DumpRecordLayoutsComplete , 1, 0, "dumping the AST layout of all
BENIGN_LANGOPT(DumpVTableLayouts , 1, 0, "dumping the layouts of emitted vtables")
LANGOPT(NoConstantCFStrings , 1, 0, "no constant CoreFoundation strings")
BENIGN_LANGOPT(InlineVisibilityHidden , 1, 0, "hidden visibility for inline C++ methods")
+BENIGN_ENUM_LANGOPT(DefaultVisibilityExportMapping, DefaultVisiblityExportMapping, 2, DefaultVisiblityExportMapping::None, "controls mapping of default visibility to dllexport")
BENIGN_LANGOPT(IgnoreXCOFFVisibility, 1, 0, "All the visibility attributes that are specified in the source code are ignored in aix XCOFF.")
BENIGN_LANGOPT(VisibilityInlinesHiddenStaticLocalVar, 1, 0,
"hidden visibility for static local variables in inline C++ "
"methods when -fvisibility-inlines hidden is enabled")
-LANGOPT(GlobalAllocationFunctionVisibilityHidden , 1, 0, "hidden visibility for global operator new and delete declaration")
+ENUM_LANGOPT(GlobalAllocationFunctionVisibility, VisibilityForcedKinds, 3, VisibilityForcedKinds::ForceDefault,
+ "How to apply visibility to global operator new and delete declarations")
+LANGOPT(NewInfallible , 1, 0, "Treats throwing global C++ operator new as always returning valid memory (annotates with __attribute__((returns_nonnull)) and throw()). This is detectable in source.")
BENIGN_LANGOPT(ParseUnknownAnytype, 1, 0, "__unknown_anytype")
BENIGN_LANGOPT(DebuggerSupport , 1, 0, "debugger support")
BENIGN_LANGOPT(DebuggerCastResultToId, 1, 0, "for 'po' in the debugger, cast the result to id if it is of unknown type")
@@ -294,8 +327,11 @@ COMPATIBLE_LANGOPT(CLFiniteMathOnly , 1, 0, "__FINITE_MATH_ONLY__ predefined mac
/// FP_CONTRACT mode (on/off/fast).
BENIGN_ENUM_LANGOPT(DefaultFPContractMode, FPModeKind, 2, FPM_Off, "FP contraction type")
COMPATIBLE_LANGOPT(ExpStrictFP, 1, false, "Enable experimental strict floating point")
-BENIGN_ENUM_LANGOPT(FPRoundingMode, RoundingMode, 3, RoundingMode::NearestTiesToEven, "FP Rounding Mode type")
-BENIGN_ENUM_LANGOPT(FPExceptionMode, FPExceptionModeKind, 2, FPE_Ignore, "FP Exception Behavior Mode type")
+BENIGN_LANGOPT(RoundingMath, 1, false, "Do not assume default floating-point rounding behavior")
+BENIGN_ENUM_LANGOPT(FPExceptionMode, FPExceptionModeKind, 2, FPE_Default, "FP Exception Behavior Mode type")
+BENIGN_ENUM_LANGOPT(FPEvalMethod, FPEvalMethodKind, 2, FEM_UnsetOnCommandLine, "FP type used for floating point arithmetic")
+ENUM_LANGOPT(Float16ExcessPrecision, ExcessPrecisionKind, 2, FPP_Standard, "Intermediate truncation behavior for Float16 arithmetic")
+ENUM_LANGOPT(BFloat16ExcessPrecision, ExcessPrecisionKind, 2, FPP_Standard, "Intermediate truncation behavior for BFloat16 arithmetic")
LANGOPT(NoBitFieldTypeAlign , 1, 0, "bit-field type alignment")
LANGOPT(HexagonQdsp6Compat , 1, 0, "hexagon-qdsp6 backward compatibility")
LANGOPT(ObjCAutoRefCount , 1, 0, "Objective-C automated reference counting")
@@ -320,22 +356,22 @@ LANGOPT(
"type's inheritance model would be determined under the Microsoft ABI")
ENUM_LANGOPT(GC, GCMode, 2, NonGC, "Objective-C Garbage Collection mode")
-ENUM_LANGOPT(ValueVisibilityMode, Visibility, 3, DefaultVisibility,
+BENIGN_ENUM_LANGOPT(ValueVisibilityMode, Visibility, 3, DefaultVisibility,
"default visibility for functions and variables [-fvisibility]")
-ENUM_LANGOPT(TypeVisibilityMode, Visibility, 3, DefaultVisibility,
+BENIGN_ENUM_LANGOPT(TypeVisibilityMode, Visibility, 3, DefaultVisibility,
"default visibility for types [-ftype-visibility]")
LANGOPT(SetVisibilityForExternDecls, 1, 0,
"apply global symbol visibility to external declarations without an explicit visibility")
-LANGOPT(VisibilityFromDLLStorageClass, 1, 0,
- "set the visiblity of globals from their DLL storage class [-fvisibility-from-dllstorageclass]")
-ENUM_LANGOPT(DLLExportVisibility, Visibility, 3, DefaultVisibility,
- "visibility for functions and variables with dllexport annotations [-fvisibility-from-dllstorageclass]")
-ENUM_LANGOPT(NoDLLStorageClassVisibility, Visibility, 3, HiddenVisibility,
- "visibility for functions and variables without an explicit DLL storage class [-fvisibility-from-dllstorageclass]")
-ENUM_LANGOPT(ExternDeclDLLImportVisibility, Visibility, 3, DefaultVisibility,
- "visibility for external declarations with dllimport annotations [-fvisibility-from-dllstorageclass]")
-ENUM_LANGOPT(ExternDeclNoDLLStorageClassVisibility, Visibility, 3, HiddenVisibility,
- "visibility for external declarations without an explicit DLL storage class [-fvisibility-from-dllstorageclass]")
+BENIGN_LANGOPT(VisibilityFromDLLStorageClass, 1, 0,
+ "override the visibility of globals based on their final DLL storage class [-fvisibility-from-dllstorageclass]")
+BENIGN_ENUM_LANGOPT(DLLExportVisibility, VisibilityFromDLLStorageClassKinds, 3, VisibilityFromDLLStorageClassKinds::Default,
+ "how to adjust the visibility for functions and variables with dllexport annotations [-fvisibility-dllexport]")
+BENIGN_ENUM_LANGOPT(NoDLLStorageClassVisibility, VisibilityFromDLLStorageClassKinds, 3, VisibilityFromDLLStorageClassKinds::Hidden,
+ "how to adjust the visibility for functions and variables without an explicit DLL storage class [-fvisibility-nodllstorageclass]")
+BENIGN_ENUM_LANGOPT(ExternDeclDLLImportVisibility, VisibilityFromDLLStorageClassKinds, 3, VisibilityFromDLLStorageClassKinds::Default,
+ "how to adjust the visibility for external declarations with dllimport annotations [-fvisibility-externs-dllimport]")
+BENIGN_ENUM_LANGOPT(ExternDeclNoDLLStorageClassVisibility, VisibilityFromDLLStorageClassKinds, 3, VisibilityFromDLLStorageClassKinds::Hidden,
+ "how to adjust the visibility for external declarations without an explicit DLL storage class [-fvisibility-externs-nodllstorageclass]")
BENIGN_LANGOPT(SemanticInterposition , 1, 0, "semantic interposition")
BENIGN_LANGOPT(HalfNoSemanticInterposition, 1, 0,
"Like -fno-semantic-interposition but don't use local aliases")
@@ -345,6 +381,8 @@ ENUM_LANGOPT(TrivialAutoVarInit, TrivialAutoVarInitKind, 2, TrivialAutoVarInitKi
"trivial automatic variable initialization")
VALUE_LANGOPT(TrivialAutoVarInitStopAfter, 32, 0,
"stop trivial automatic variable initialization after the specified number of instances. Must be greater than 0.")
+VALUE_LANGOPT(TrivialAutoVarInitMaxSize, 32, 0,
+ "stop trivial automatic variable initialization if var size exceeds the specified size (in bytes). Must be greater than 0.")
ENUM_LANGOPT(SignedOverflowBehavior, SignedOverflowBehaviorTy, 2, SOB_Undefined,
"signed integer overflow handling")
ENUM_LANGOPT(ThreadModel , ThreadModelKind, 2, ThreadModelKind::POSIX, "Thread Model")
@@ -373,6 +411,9 @@ LANGOPT(XLPragmaPack, 1, 0, "IBM XL #pragma pack handling")
LANGOPT(RetainCommentsFromSystemHeaders, 1, 0, "retain documentation comments from system headers in the AST")
+LANGOPT(APINotes, 1, 0, "use external API notes")
+LANGOPT(APINotesModules, 1, 0, "use module-based external API notes")
+
LANGOPT(SanitizeAddressFieldPadding, 2, 0, "controls how aggressive is ASan "
"field padding (0: none, 1:least "
"aggressive, 2: more aggressive)")
@@ -397,6 +438,7 @@ ENUM_LANGOPT(ClangABICompat, ClangABI, 4, ClangABI::Latest,
"with")
COMPATIBLE_VALUE_LANGOPT(FunctionAlignment, 5, 0, "Default alignment for functions")
+COMPATIBLE_VALUE_LANGOPT(LoopAlignment, 32, 0, "Default alignment for loops")
LANGOPT(FixedPoint, 1, 0, "fixed point types")
LANGOPT(PaddingOnUnsignedFixedPoint, 1, 0,
@@ -404,8 +446,14 @@ LANGOPT(PaddingOnUnsignedFixedPoint, 1, 0,
LANGOPT(RegisterStaticDestructors, 1, 1, "Register C++ static destructors")
+LANGOPT(RegCall4, 1, 0, "Set __regcall4 as a default calling convention to respect __regcall ABI v.4")
+
LANGOPT(MatrixTypes, 1, 0, "Enable or disable the builtin matrix type")
+ENUM_LANGOPT(StrictFlexArraysLevel, StrictFlexArraysLevelKind, 2,
+ StrictFlexArraysLevelKind::Default,
+ "Rely on strict definition of flexible arrays")
+
COMPATIBLE_VALUE_LANGOPT(MaxTokens, 32, 0, "Max number of tokens per TU or 0")
ENUM_LANGOPT(SignReturnAddressScope, SignReturnAddressScopeKind, 2, SignReturnAddressScopeKind::None,
@@ -413,18 +461,38 @@ ENUM_LANGOPT(SignReturnAddressScope, SignReturnAddressScopeKind, 2, SignReturnAd
ENUM_LANGOPT(SignReturnAddressKey, SignReturnAddressKeyKind, 1, SignReturnAddressKeyKind::AKey,
"Key used for return address signing")
LANGOPT(BranchTargetEnforcement, 1, 0, "Branch-target enforcement enabled")
+LANGOPT(BranchProtectionPAuthLR, 1, 0, "Use PC as a diversifier using PAuthLR NOP instructions.")
+LANGOPT(GuardedControlStack, 1, 0, "Guarded control stack enabled")
LANGOPT(SpeculativeLoadHardening, 1, 0, "Speculative load hardening enabled")
LANGOPT(RelativeCXXABIVTables, 1, 0,
"Use an ABI-incompatible v-table layout that uses relative references")
-LANGOPT(ArmSveVectorBits, 32, 0, "SVE vector size in bits")
+LANGOPT(OmitVTableRTTI, 1, 0,
+ "Use an ABI-incompatible v-table layout that omits the RTTI component")
-ENUM_LANGOPT(ExtendIntArgs, ExtendArgsKind, 1, ExtendArgsKind::ExtendTo32,
+LANGOPT(VScaleMin, 32, 0, "Minimum vscale value")
+LANGOPT(VScaleMax, 32, 0, "Maximum vscale value")
+
+ENUM_LANGOPT(ExtendIntArgs, ExtendArgsKind, 1, ExtendArgsKind::ExtendTo32,
"Controls how scalar integer arguments are extended in calls "
"to unprototyped and varargs functions")
+VALUE_LANGOPT(FuchsiaAPILevel, 32, 0, "Fuchsia API level")
+
+// This option will be removed in the future once the backend
+// supports all operations (like division or float-to-integer conversion)
+// on large _BitInts.
+BENIGN_VALUE_LANGOPT(MaxBitIntWidth, 32, 128, "Maximum width of a _BitInt")
+
+LANGOPT(IncrementalExtensions, 1, 0, " True if we want to process statements"
+ "on the global scope, ignore EOF token and continue later on (thus "
+ "avoid tearing the Lexer and etc. down). Controlled by "
+ "-fincremental-extensions.")
+
+BENIGN_LANGOPT(CheckNew, 1, 0, "Do not assume C++ operator new may not return NULL")
+
#undef LANGOPT
#undef COMPATIBLE_LANGOPT
#undef BENIGN_LANGOPT
diff --git a/contrib/llvm-project/clang/include/clang/Basic/LangOptions.h b/contrib/llvm-project/clang/include/clang/Basic/LangOptions.h
index b60b94a1ba08..c1cc5548ef10 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/LangOptions.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/LangOptions.h
@@ -23,7 +23,8 @@
#include "clang/Basic/Visibility.h"
#include "llvm/ADT/FloatingPointMode.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/Triple.h"
+#include "llvm/TargetParser/Triple.h"
+#include <optional>
#include <string>
#include <vector>
@@ -33,6 +34,7 @@ namespace clang {
/// this large collection of bitfields is a trivial class type.
class LangOptionsBase {
friend class CompilerInvocation;
+ friend class CompilerInvocationBase;
public:
// Define simple language options (with no accessors).
@@ -53,6 +55,29 @@ protected:
/// members used to implement virtual inheritance.
enum class MSVtorDispMode { Never, ForVBaseOverride, ForVFTable };
+/// Shader programs run in specific pipeline stages.
+/// The order of these values matters, and must be kept in sync with the
+/// Triple Environment enum in llvm::Triple. The ordering is enforced in
+/// static_asserts in Triple.cpp and in clang/Basic/HLSLRuntime.h.
+enum class ShaderStage {
+ Pixel = 0,
+ Vertex,
+ Geometry,
+ Hull,
+ Domain,
+ Compute,
+ Library,
+ RayGeneration,
+ Intersection,
+ AnyHit,
+ ClosestHit,
+ Miss,
+ Callable,
+ Mesh,
+ Amplification,
+ Invalid,
+};
+
/// Keeps track of the various options that can be
/// enabled, which controls the dialect of C or C++ that is accepted.
class LangOptions : public LangOptionsBase {
@@ -87,10 +112,10 @@ public:
/// Compiling a module from a module map.
CMK_ModuleMap,
- /// Compiling a module from a list of header files.
- CMK_HeaderModule,
+ /// Compiling a module header unit.
+ CMK_HeaderUnit,
- /// Compiling a C++ modules TS module interface unit.
+ /// Compiling a C++ modules interface unit.
CMK_ModuleInterface,
};
@@ -109,7 +134,8 @@ public:
DCC_FastCall,
DCC_StdCall,
DCC_VectorCall,
- DCC_RegCall
+ DCC_RegCall,
+ DCC_RtdCall
};
enum AddrSpaceMapMangling { ASMM_Target, ASMM_On, ASMM_Off };
@@ -124,7 +150,9 @@ public:
MSVC2017_5 = 1912,
MSVC2017_7 = 1914,
MSVC2019 = 1920,
+ MSVC2019_5 = 1925,
MSVC2019_8 = 1928,
+ MSVC2022_3 = 1933,
};
enum SYCLMajorVersion {
@@ -136,6 +164,16 @@ public:
SYCL_Default = SYCL_2020
};
+ enum HLSLLangStd {
+ HLSL_Unset = 0,
+ HLSL_2015 = 2015,
+ HLSL_2016 = 2016,
+ HLSL_2017 = 2017,
+ HLSL_2018 = 2018,
+ HLSL_2021 = 2021,
+ HLSL_202x = 2029,
+ };
+
/// Clang versions with different platform ABI conformance.
enum class ClangABI {
/// Attempt to be ABI-compatible with code generated by Clang 3.8.x
@@ -180,6 +218,30 @@ public:
/// global-scope inline variables incorrectly.
Ver12,
+ /// Attempt to be ABI-compatible with code generated by Clang 14.0.x.
+ /// This causes clang to:
+ /// - mangle dependent nested names incorrectly.
+ /// - make trivial only those defaulted copy constructors with a
+ /// parameter-type-list equivalent to the parameter-type-list of an
+ /// implicit declaration.
+ Ver14,
+
+ /// Attempt to be ABI-compatible with code generated by Clang 15.0.x.
+ /// This causes clang to:
+ /// - Reverse the implementation for DR692, DR1395 and DR1432.
+ /// - pack non-POD members of packed structs.
+ /// - consider classes with defaulted special member functions non-pod.
+ Ver15,
+
+ /// Attempt to be ABI-compatible with code generated by Clang 17.0.x.
+ /// This causes clang to revert some fixes to its implementation of the
+ /// Itanium name mangling scheme, with the consequence that overloaded
+ /// function templates are mangled the same if they differ only by:
+ /// - constraints
+ /// - whether a non-type template parameter has a deduced type
+ /// - the parameter list of a template template parameter
+ Ver17,
+
/// Conform to the underlying platform's C and C++ ABIs as closely
/// as we can.
Latest
@@ -216,10 +278,6 @@ public:
FPM_FastHonorPragmas
};
- /// Alias for RoundingMode::NearestTiesToEven.
- static constexpr unsigned FPR_ToNearest =
- static_cast<unsigned>(llvm::RoundingMode::NearestTiesToEven);
-
/// Possible floating point exception behavior.
enum FPExceptionModeKind {
/// Assume that floating-point exceptions are masked.
@@ -227,9 +285,31 @@ public:
/// Transformations do not cause new exceptions but may hide some.
FPE_MayTrap,
/// Strictly preserve the floating-point exception semantics.
- FPE_Strict
+ FPE_Strict,
+ /// Used internally to represent initial unspecified value.
+ FPE_Default
+ };
+
+ /// Possible float expression evaluation method choices.
+ enum FPEvalMethodKind {
+ /// The evaluation method cannot be determined or is inconsistent for this
+ /// target.
+ FEM_Indeterminable = -1,
+ /// Use the declared type for fp arithmetic.
+ FEM_Source = 0,
+ /// Use the type double for fp arithmetic.
+ FEM_Double = 1,
+ /// Use extended type for fp arithmetic.
+ FEM_Extended = 2,
+ /// Used only for FE option processing; this is only used to indicate that
+ /// the user did not specify an explicit evaluation method on the command
+ /// line and so the target should be queried for its default evaluation
+ /// method instead.
+ FEM_UnsetOnCommandLine = 3
};
+ enum ExcessPrecisionKind { FPP_Standard, FPP_Fast, FPP_None };
+
/// Possible exception handling behavior.
enum class ExceptionHandlingKind { None, SjLj, WinEH, DwarfCFI, Wasm };
@@ -286,6 +366,56 @@ public:
ExtendTo64
};
+ enum class GPUDefaultStreamKind {
+ /// Legacy default stream
+ Legacy,
+ /// Per-thread default stream
+ PerThread,
+ };
+
+ enum class DefaultVisiblityExportMapping {
+ None,
+ /// map only explicit default visibilities to exported
+ Explicit,
+ /// map all default visibilities to exported
+ All,
+ };
+
+ enum class VisibilityForcedKinds {
+ /// Force hidden visibility
+ ForceHidden,
+ /// Force protected visibility
+ ForceProtected,
+ /// Force default visibility
+ ForceDefault,
+ /// Don't alter the visibility
+ Source,
+ };
+
+ enum class VisibilityFromDLLStorageClassKinds {
+ /// Keep the IR-gen assigned visibility.
+ Keep,
+ /// Override the IR-gen assigned visibility with default visibility.
+ Default,
+ /// Override the IR-gen assigned visibility with hidden visibility.
+ Hidden,
+ /// Override the IR-gen assigned visibility with protected visibility.
+ Protected,
+ };
+
+ enum class StrictFlexArraysLevelKind {
+ /// Any trailing array member is a FAM.
+ Default = 0,
+ /// Any trailing array member of undefined, 0, or 1 size is a FAM.
+ OneZeroOrIncomplete = 1,
+ /// Any trailing array member of undefined or 0 size is a FAM.
+ ZeroOrIncomplete = 2,
+ /// Any trailing array member of undefined size is a FAM.
+ IncompleteOnly = 3,
+ };
+
+ enum ComplexRangeKind { CX_Full, CX_Limited, CX_Fortran, CX_None };
+
public:
/// The used language standard.
LangStandard::Kind LangStd;
@@ -373,14 +503,51 @@ public:
/// C++ ABI to compile with, if specified by the frontend through -fc++-abi=.
/// This overrides the default ABI used by the target.
- llvm::Optional<TargetCXXABI::Kind> CXXABI;
+ std::optional<TargetCXXABI::Kind> CXXABI;
/// Indicates whether the front-end is explicitly told that the
/// input is a header file (i.e. -x c-header).
bool IsHeaderFile = false;
+ /// The default stream kind used for HIP kernel launching.
+ GPUDefaultStreamKind GPUDefaultStream;
+
+ /// The seed used by the randomize structure layout feature.
+ std::string RandstructSeed;
+
+ /// Indicates whether to use target's platform-specific file separator when
+ /// __FILE__ macro is used and when concatenating filename with directory or
+ /// to use build environment environment's platform-specific file separator.
+ ///
+ /// The plaform-specific path separator is the backslash(\) for Windows and
+ /// forward slash (/) elsewhere.
+ bool UseTargetPathSeparator = false;
+
+ // Indicates whether we should keep all nullptr checks for pointers
+ // received as a result of a standard operator new (-fcheck-new)
+ bool CheckNew = false;
+
+ // In OpenACC mode, contains a user provided override for the _OPENACC macro.
+ // This exists so that we can override the macro value and test our incomplete
+ // implementation on real-world examples.
+ std::string OpenACCMacroOverride;
+
LangOptions();
+ /// Set language defaults for the given input language and
+ /// language standard in the given LangOptions object.
+ ///
+ /// \param Opts - The LangOptions object to set up.
+ /// \param Lang - The input language.
+ /// \param T - The target triple.
+ /// \param Includes - If the language requires extra headers to be implicitly
+ /// included, they will be appended to this list.
+ /// \param LangStd - The input language standard.
+ static void
+ setLangDefaults(LangOptions &Opts, Language Lang, const llvm::Triple &T,
+ std::vector<std::string> &Includes,
+ LangStandard::Kind LangStd = LangStandard::lang_unspecified);
+
// Define accessors/mutators for language options of enumeration type.
#define LANGOPT(Name, Bits, Default, Description)
#define ENUM_LANGOPT(Name, Type, Bits, Default, Description) \
@@ -388,11 +555,21 @@ public:
void set##Name(Type Value) { Name = static_cast<unsigned>(Value); }
#include "clang/Basic/LangOptions.def"
- /// Are we compiling a module interface (.cppm or module map)?
+ /// Are we compiling a module?
bool isCompilingModule() const {
return getCompilingModule() != CMK_None;
}
+ /// Are we compiling a standard c++ module interface?
+ bool isCompilingModuleInterface() const {
+ return getCompilingModule() == CMK_ModuleInterface;
+ }
+
+ /// Are we compiling a module implementation?
+ bool isCompilingModuleImplementation() const {
+ return !isCompilingModule() && !ModuleName.empty();
+ }
+
/// Do we need to track the owning module for a local declaration?
bool trackLocalOwningModule() const {
return isCompilingModule() || ModulesLocalVisibility;
@@ -431,6 +608,34 @@ public:
/// Return the OpenCL C or C++ version as a VersionTuple.
VersionTuple getOpenCLVersionTuple() const;
+ /// Return the OpenCL version that kernel language is compatible with
+ unsigned getOpenCLCompatibleVersion() const;
+
+ /// Return the OpenCL C or C++ for OpenCL language name and version
+ /// as a string.
+ std::string getOpenCLVersionString() const;
+
+ /// Returns true if functions without prototypes or functions with an
+ /// identifier list (aka K&R C functions) are not allowed.
+ bool requiresStrictPrototypes() const {
+ return CPlusPlus || C23 || DisableKNRFunctions;
+ }
+
+ /// Returns true if implicit function declarations are allowed in the current
+ /// language mode.
+ bool implicitFunctionsAllowed() const {
+ return !requiresStrictPrototypes() && !OpenCL;
+ }
+
+ /// Returns true if the language supports calling the 'atexit' function.
+ bool hasAtExit() const { return !(OpenMP && OpenMPIsTargetDevice); }
+
+ /// Returns true if implicit int is part of the language requirements.
+ bool isImplicitIntRequired() const { return !CPlusPlus && !C99; }
+
+ /// Returns true if implicit int is supported at all.
+ bool isImplicitIntAllowed() const { return !CPlusPlus && !C23; }
+
/// Check if return address signing is enabled.
bool hasSignReturnAddress() const {
return getSignReturnAddressScope() != SignReturnAddressScopeKind::None;
@@ -464,8 +669,55 @@ public:
bool isSYCL() const { return SYCLIsDevice || SYCLIsHost; }
+ bool hasDefaultVisibilityExportMapping() const {
+ return getDefaultVisibilityExportMapping() !=
+ DefaultVisiblityExportMapping::None;
+ }
+
+ bool isExplicitDefaultVisibilityExportMapping() const {
+ return getDefaultVisibilityExportMapping() ==
+ DefaultVisiblityExportMapping::Explicit;
+ }
+
+ bool isAllDefaultVisibilityExportMapping() const {
+ return getDefaultVisibilityExportMapping() ==
+ DefaultVisiblityExportMapping::All;
+ }
+
+ bool hasGlobalAllocationFunctionVisibility() const {
+ return getGlobalAllocationFunctionVisibility() !=
+ VisibilityForcedKinds::Source;
+ }
+
+ bool hasDefaultGlobalAllocationFunctionVisibility() const {
+ return getGlobalAllocationFunctionVisibility() ==
+ VisibilityForcedKinds::ForceDefault;
+ }
+
+ bool hasProtectedGlobalAllocationFunctionVisibility() const {
+ return getGlobalAllocationFunctionVisibility() ==
+ VisibilityForcedKinds::ForceProtected;
+ }
+
+ bool hasHiddenGlobalAllocationFunctionVisibility() const {
+ return getGlobalAllocationFunctionVisibility() ==
+ VisibilityForcedKinds::ForceHidden;
+ }
+
/// Remap path prefix according to -fmacro-prefix-path option.
- void remapPathPrefix(SmallString<256> &Path) const;
+ void remapPathPrefix(SmallVectorImpl<char> &Path) const;
+
+ RoundingMode getDefaultRoundingMode() const {
+ return RoundingMath ? RoundingMode::Dynamic
+ : RoundingMode::NearestTiesToEven;
+ }
+
+ FPExceptionModeKind getDefaultExceptionMode() const {
+ FPExceptionModeKind EM = getFPExceptionMode();
+ if (EM == FPExceptionModeKind::FPE_Default)
+ return FPExceptionModeKind::FPE_Ignore;
+ return EM;
+ }
};
/// Floating point control options
@@ -473,7 +725,7 @@ class FPOptionsOverride;
class FPOptions {
public:
// We start by defining the layout.
- using storage_type = uint16_t;
+ using storage_type = uint32_t;
using RoundingMode = llvm::RoundingMode;
@@ -499,11 +751,13 @@ public:
private:
storage_type Value;
+ FPOptionsOverride getChangesSlow(const FPOptions &Base) const;
+
public:
FPOptions() : Value(0) {
setFPContractMode(LangOptions::FPM_Off);
- setRoundingMode(static_cast<RoundingMode>(LangOptions::FPR_ToNearest));
- setFPExceptionMode(LangOptions::FPE_Ignore);
+ setConstRoundingMode(RoundingMode::Dynamic);
+ setSpecifiedExceptionMode(LangOptions::FPE_Default);
}
explicit FPOptions(const LangOptions &LO) {
Value = 0;
@@ -514,8 +768,9 @@ public:
if (LangOptContractMode == LangOptions::FPM_FastHonorPragmas)
LangOptContractMode = LangOptions::FPM_Fast;
setFPContractMode(LangOptContractMode);
- setRoundingMode(LO.getFPRoundingMode());
- setFPExceptionMode(LO.getFPExceptionMode());
+ setRoundingMath(LO.RoundingMath);
+ setConstRoundingMode(LangOptions::RoundingMode::Dynamic);
+ setSpecifiedExceptionMode(LO.getFPExceptionMode());
setAllowFPReassociate(LO.AllowFPReassoc);
setNoHonorNaNs(LO.NoHonorNaNs);
setNoHonorInfs(LO.NoHonorInfs);
@@ -524,12 +779,13 @@ public:
setAllowApproxFunc(LO.ApproxFunc);
if (getFPContractMode() == LangOptions::FPM_On &&
getRoundingMode() == llvm::RoundingMode::Dynamic &&
- getFPExceptionMode() == LangOptions::FPE_Strict)
+ getExceptionMode() == LangOptions::FPE_Strict)
// If the FP settings are set to the "strict" model, then
// FENV access is set to true. (ffp-model=strict)
setAllowFEnvAccess(true);
else
setAllowFEnvAccess(LangOptions::FPM_Off);
+ setComplexRange(LO.getComplexRange());
}
bool allowFPContractWithinStatement() const {
@@ -548,10 +804,33 @@ public:
bool isFPConstrained() const {
return getRoundingMode() != llvm::RoundingMode::NearestTiesToEven ||
- getFPExceptionMode() != LangOptions::FPE_Ignore ||
+ getExceptionMode() != LangOptions::FPE_Ignore ||
getAllowFEnvAccess();
}
+ RoundingMode getRoundingMode() const {
+ RoundingMode RM = getConstRoundingMode();
+ if (RM == RoundingMode::Dynamic) {
+ // C23: 7.6.2p3 If the FE_DYNAMIC mode is specified and FENV_ACCESS is
+ // "off", the translator may assume that the default rounding mode is in
+ // effect.
+ if (!getAllowFEnvAccess() && !getRoundingMath())
+ RM = RoundingMode::NearestTiesToEven;
+ }
+ return RM;
+ }
+
+ LangOptions::FPExceptionModeKind getExceptionMode() const {
+ LangOptions::FPExceptionModeKind EM = getSpecifiedExceptionMode();
+ if (EM == LangOptions::FPExceptionModeKind::FPE_Default) {
+ if (getAllowFEnvAccess())
+ return LangOptions::FPExceptionModeKind::FPE_Strict;
+ else
+ return LangOptions::FPExceptionModeKind::FPE_Ignore;
+ }
+ return EM;
+ }
+
bool operator==(FPOptions other) const { return Value == other.Value; }
/// Return the default value of FPOptions that's used when trailing
@@ -565,6 +844,9 @@ public:
return Opts;
}
+ /// Return difference with the given option set.
+ FPOptionsOverride getChangesFrom(const FPOptions &Base) const;
+
// We can define most of the accessors automatically:
#define OPTION(NAME, TYPE, WIDTH, PREVIOUS) \
TYPE get##NAME() const { \
@@ -599,7 +881,7 @@ public:
/// The type suitable for storing values of FPOptionsOverride. Must be twice
/// as wide as bit size of FPOption.
- using storage_type = uint32_t;
+ using storage_type = uint64_t;
static_assert(sizeof(storage_type) >= 2 * sizeof(FPOptions::storage_type),
"Too short type for FPOptionsOverride");
@@ -613,6 +895,8 @@ public:
: Options(LO), OverrideMask(OverrideMaskBits) {}
FPOptionsOverride(FPOptions FPO)
: Options(FPO), OverrideMask(OverrideMaskBits) {}
+ FPOptionsOverride(FPOptions FPO, FPOptions::storage_type Mask)
+ : Options(FPO), OverrideMask(Mask) {}
bool requiresTrailingStorage() const { return OverrideMask != 0; }
@@ -635,6 +919,7 @@ public:
setNoSignedZeroOverride(!Value);
setAllowReciprocalOverride(!Value);
setAllowApproxFuncOverride(!Value);
+ setMathErrnoOverride(Value);
if (Value)
/* Precise mode implies fp_contract=on and disables ffast-math */
setAllowFPContractWithinStatement();
@@ -693,6 +978,12 @@ public:
LLVM_DUMP_METHOD void dump();
};
+inline FPOptionsOverride FPOptions::getChangesFrom(const FPOptions &Base) const {
+ if (Value == Base.Value)
+ return FPOptionsOverride();
+ return getChangesSlow(Base);
+}
+
/// Describes the kind of translation unit being processed.
enum TranslationUnitKind {
/// The translation unit is a complete translation unit.
diff --git a/contrib/llvm-project/clang/include/clang/Basic/LangStandard.h b/contrib/llvm-project/clang/include/clang/Basic/LangStandard.h
index b0785409628c..bc49669a82ad 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/LangStandard.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/LangStandard.h
@@ -12,6 +12,10 @@
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/StringRef.h"
+namespace llvm {
+class Triple;
+}
+
namespace clang {
/// The language for the input, used to select and validate the language
@@ -36,26 +40,29 @@ enum class Language : uint8_t {
CUDA,
RenderScript,
HIP,
+ HLSL,
///@}
};
+StringRef languageToString(Language L);
enum LangFeatures {
LineComment = (1 << 0),
C99 = (1 << 1),
C11 = (1 << 2),
C17 = (1 << 3),
- C2x = (1 << 4),
+ C23 = (1 << 4),
CPlusPlus = (1 << 5),
CPlusPlus11 = (1 << 6),
CPlusPlus14 = (1 << 7),
CPlusPlus17 = (1 << 8),
CPlusPlus20 = (1 << 9),
- CPlusPlus2b = (1 << 10),
- Digraphs = (1 << 11),
- GNUMode = (1 << 12),
- HexFloat = (1 << 13),
- ImplicitInt = (1 << 14),
- OpenCL = (1 << 15)
+ CPlusPlus23 = (1 << 10),
+ CPlusPlus26 = (1 << 11),
+ Digraphs = (1 << 12),
+ GNUMode = (1 << 13),
+ HexFloat = (1 << 14),
+ OpenCL = (1 << 15),
+ HLSL = (1 << 16)
};
/// LangStandard - Information about the properties of a particular language
@@ -95,8 +102,8 @@ public:
/// isC17 - Language is a superset of C17.
bool isC17() const { return Flags & C17; }
- /// isC2x - Language is a superset of C2x.
- bool isC2x() const { return Flags & C2x; }
+ /// isC23 - Language is a superset of C23.
+ bool isC23() const { return Flags & C23; }
/// isCPlusPlus - Language is a C++ variant.
bool isCPlusPlus() const { return Flags & CPlusPlus; }
@@ -113,8 +120,11 @@ public:
/// isCPlusPlus20 - Language is a C++20 variant (or later).
bool isCPlusPlus20() const { return Flags & CPlusPlus20; }
- /// isCPlusPlus2b - Language is a post-C++20 variant (or later).
- bool isCPlusPlus2b() const { return Flags & CPlusPlus2b; }
+ /// isCPlusPlus23 - Language is a post-C++23 variant (or later).
+ bool isCPlusPlus23() const { return Flags & CPlusPlus23; }
+
+ /// isCPlusPlus26 - Language is a post-C++26 variant (or later).
+ bool isCPlusPlus26() const { return Flags & CPlusPlus26; }
/// hasDigraphs - Language supports digraphs.
bool hasDigraphs() const { return Flags & Digraphs; }
@@ -125,9 +135,6 @@ public:
/// hasHexFloats - Language supports hexadecimal float constants.
bool hasHexFloats() const { return Flags & HexFloat; }
- /// hasImplicitInt - Language allows variables to be typed as int implicitly.
- bool hasImplicitInt() const { return Flags & ImplicitInt; }
-
/// isOpenCL - Language is a OpenCL variant.
bool isOpenCL() const { return Flags & OpenCL; }
@@ -136,6 +143,9 @@ public:
static const LangStandard *getLangStandardForName(StringRef Name);
};
+LangStandard::Kind getDefaultLanguageStandard(clang::Language Lang,
+ const llvm::Triple &T);
+
} // end namespace clang
#endif
diff --git a/contrib/llvm-project/clang/include/clang/Basic/LangStandards.def b/contrib/llvm-project/clang/include/clang/Basic/LangStandards.def
index 2cfeb68e56d6..b6192e48efc1 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/LangStandards.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/LangStandards.def
@@ -17,7 +17,7 @@
/// \param LANG - The Language for which this is a standard.
/// \param DESC - A short description of the standard.
/// \param FEATURES - The standard features as flags, these are enums from the
-/// clang::frontend namespace, which is assumed to be be available.
+/// clang::frontend namespace, which is assumed to be available.
/// LANGSTANDARD_ALIAS(IDENT, ALIAS)
/// \param IDENT - The name of the standard as a C++ identifier.
@@ -36,18 +36,17 @@
// C89-ish modes.
LANGSTANDARD(c89, "c89",
- C, "ISO C 1990",
- ImplicitInt)
+ C, "ISO C 1990", 0)
LANGSTANDARD_ALIAS(c89, "c90")
LANGSTANDARD_ALIAS(c89, "iso9899:1990")
LANGSTANDARD(c94, "iso9899:199409",
C, "ISO C 1990 with amendment 1",
- Digraphs | ImplicitInt)
+ Digraphs)
LANGSTANDARD(gnu89, "gnu89",
C, "ISO C 1990 with GNU extensions",
- LineComment | Digraphs | GNUMode | ImplicitInt)
+ LineComment | Digraphs | GNUMode)
LANGSTANDARD_ALIAS(gnu89, "gnu90")
// C99-ish modes
@@ -88,13 +87,17 @@ LANGSTANDARD(gnu17, "gnu17",
LineComment | C99 | C11 | C17 | Digraphs | GNUMode | HexFloat)
LANGSTANDARD_ALIAS(gnu17, "gnu18")
-// C2x modes
-LANGSTANDARD(c2x, "c2x",
- C, "Working Draft for ISO C2x",
- LineComment | C99 | C11 | C17 | C2x | Digraphs | HexFloat)
-LANGSTANDARD(gnu2x, "gnu2x",
- C, "Working Draft for ISO C2x with GNU extensions",
- LineComment | C99 | C11 | C17 | C2x | Digraphs | GNUMode | HexFloat)
+// C23 modes
+LANGSTANDARD(c23, "c23",
+ C, "Working Draft for ISO C23",
+ LineComment | C99 | C11 | C17 | C23 | Digraphs | HexFloat)
+LANGSTANDARD_ALIAS_DEPR(c23, "c2x")
+LANGSTANDARD(gnu23, "gnu23",
+ C, "Working Draft for ISO C23 with GNU extensions",
+ LineComment | C99 | C11 | C17 | C23 | Digraphs | GNUMode | HexFloat)
+LANGSTANDARD_ALIAS_DEPR(gnu23, "gnu2x")
+// FIXME: Add the alias for iso9899:202* once we know the year ISO publishes
+// the document (expected to be 2024).
// C++ modes
LANGSTANDARD(cxx98, "c++98",
@@ -152,15 +155,29 @@ LANGSTANDARD(gnucxx20, "gnu++20",
CPlusPlus20 | Digraphs | HexFloat | GNUMode)
LANGSTANDARD_ALIAS_DEPR(gnucxx20, "gnu++2a")
-LANGSTANDARD(cxx2b, "c++2b",
- CXX, "Working draft for ISO C++ 2023 DIS",
+LANGSTANDARD(cxx23, "c++23",
+ CXX, "ISO C++ 2023 DIS",
LineComment | CPlusPlus | CPlusPlus11 | CPlusPlus14 | CPlusPlus17 |
- CPlusPlus20 | CPlusPlus2b | Digraphs | HexFloat)
+ CPlusPlus20 | CPlusPlus23 | Digraphs | HexFloat)
+LANGSTANDARD_ALIAS_DEPR(cxx23, "c++2b")
-LANGSTANDARD(gnucxx2b, "gnu++2b",
- CXX, "Working draft for ISO C++ 2023 DIS with GNU extensions",
+LANGSTANDARD(gnucxx23, "gnu++23",
+ CXX, "ISO C++ 2023 DIS with GNU extensions",
LineComment | CPlusPlus | CPlusPlus11 | CPlusPlus14 | CPlusPlus17 |
- CPlusPlus20 | CPlusPlus2b | Digraphs | HexFloat | GNUMode)
+ CPlusPlus20 | CPlusPlus23 | Digraphs | HexFloat | GNUMode)
+LANGSTANDARD_ALIAS_DEPR(gnucxx23, "gnu++2b")
+
+LANGSTANDARD(cxx26, "c++2c",
+ CXX, "Working draft for C++2c",
+ LineComment | CPlusPlus | CPlusPlus11 | CPlusPlus14 | CPlusPlus17 |
+ CPlusPlus20 | CPlusPlus23 | CPlusPlus26 | Digraphs | HexFloat)
+LANGSTANDARD_ALIAS(cxx26, "c++26")
+
+LANGSTANDARD(gnucxx26, "gnu++2c",
+ CXX, "Working draft for C++2c with GNU extensions",
+ LineComment | CPlusPlus | CPlusPlus11 | CPlusPlus14 | CPlusPlus17 |
+ CPlusPlus20 | CPlusPlus23 | CPlusPlus26 | Digraphs | HexFloat | GNUMode)
+LANGSTANDARD_ALIAS(gnucxx26, "gnu++26")
// OpenCL
LANGSTANDARD(opencl10, "cl1.0",
@@ -180,8 +197,15 @@ LANGSTANDARD(opencl20, "cl2.0",
LANGSTANDARD(opencl30, "cl3.0",
OpenCL, "OpenCL 3.0",
LineComment | C99 | Digraphs | HexFloat | OpenCL)
-LANGSTANDARD(openclcpp, "clc++",
- OpenCL, "C++ for OpenCL",
+
+LANGSTANDARD(openclcpp10, "clc++1.0",
+ OpenCL, "C++ for OpenCL 1.0",
+ LineComment | CPlusPlus | CPlusPlus11 | CPlusPlus14 | CPlusPlus17 |
+ Digraphs | HexFloat | OpenCL)
+LANGSTANDARD_ALIAS(openclcpp10, "clc++")
+
+LANGSTANDARD(openclcpp2021, "clc++2021",
+ OpenCL, "C++ for OpenCL 2021",
LineComment | CPlusPlus | CPlusPlus11 | CPlusPlus14 | CPlusPlus17 |
Digraphs | HexFloat | OpenCL)
@@ -190,15 +214,39 @@ LANGSTANDARD_ALIAS_DEPR(opencl11, "CL1.1")
LANGSTANDARD_ALIAS_DEPR(opencl12, "CL1.2")
LANGSTANDARD_ALIAS_DEPR(opencl20, "CL2.0")
LANGSTANDARD_ALIAS_DEPR(opencl30, "CL3.0")
-LANGSTANDARD_ALIAS_DEPR(openclcpp, "CLC++")
+LANGSTANDARD_ALIAS_DEPR(openclcpp10, "CLC++")
+LANGSTANDARD_ALIAS_DEPR(openclcpp10, "CLC++1.0")
+LANGSTANDARD_ALIAS_DEPR(openclcpp2021, "CLC++2021")
-// CUDA
-LANGSTANDARD(cuda, "cuda", CUDA, "NVIDIA CUDA(tm)",
- LineComment | CPlusPlus | CPlusPlus11 | CPlusPlus14 | Digraphs)
+// HLSL
+LANGSTANDARD(hlsl, "hlsl",
+ HLSL, "High Level Shader Language",
+ LineComment | HLSL | CPlusPlus )
+
+LANGSTANDARD(hlsl2015, "hlsl2015",
+ HLSL, "High Level Shader Language 2015",
+ LineComment | HLSL | CPlusPlus )
+
+LANGSTANDARD(hlsl2016, "hlsl2016",
+ HLSL, "High Level Shader Language 2016",
+ LineComment | HLSL | CPlusPlus )
+
+LANGSTANDARD(hlsl2017, "hlsl2017",
+ HLSL, "High Level Shader Language 2017",
+ LineComment | HLSL | CPlusPlus )
+
+LANGSTANDARD(hlsl2018, "hlsl2018",
+ HLSL, "High Level Shader Language 2018",
+ LineComment | HLSL | CPlusPlus )
+
+LANGSTANDARD(hlsl2021, "hlsl2021",
+ HLSL, "High Level Shader Language 2021",
+ LineComment | HLSL | CPlusPlus )
+
+LANGSTANDARD(hlsl202x, "hlsl202x",
+ HLSL, "High Level Shader Language 202x",
+ LineComment | HLSL | CPlusPlus | CPlusPlus11)
-// HIP
-LANGSTANDARD(hip, "hip", HIP, "HIP",
- LineComment | CPlusPlus | CPlusPlus11 | CPlusPlus14 | Digraphs)
#undef LANGSTANDARD
#undef LANGSTANDARD_ALIAS
diff --git a/contrib/llvm-project/clang/include/clang/Basic/Linkage.h b/contrib/llvm-project/clang/include/clang/Basic/Linkage.h
index f4d442c084cf..fcf56b93b978 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/Linkage.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/Linkage.h
@@ -14,21 +14,25 @@
#ifndef LLVM_CLANG_BASIC_LINKAGE_H
#define LLVM_CLANG_BASIC_LINKAGE_H
+#include "llvm/Support/ErrorHandling.h"
#include <utility>
namespace clang {
/// Describes the different kinds of linkage
/// (C++ [basic.link], C99 6.2.2) that an entity may have.
-enum Linkage : unsigned char {
+enum class Linkage : unsigned char {
+ // Linkage hasn't been computed.
+ Invalid = 0,
+
/// No linkage, which means that the entity is unique and
/// can only be referred to from within its scope.
- NoLinkage = 0,
+ None,
/// Internal linkage, which indicates that the entity can
/// be referred to from within the translation unit (but not other
/// translation units).
- InternalLinkage,
+ Internal,
/// External linkage within a unique namespace.
///
@@ -37,26 +41,21 @@ enum Linkage : unsigned char {
/// their names are unique to this translation unit, which is
/// equivalent to having internal linkage from the code-generation
/// point of view.
- UniqueExternalLinkage,
+ UniqueExternal,
/// No linkage according to the standard, but is visible from other
/// translation units because of types defined in a inline function.
- VisibleNoLinkage,
-
- /// Internal linkage according to the Modules TS, but can be referred
- /// to from other translation units indirectly through inline functions and
- /// templates in the module interface.
- ModuleInternalLinkage,
+ VisibleNone,
/// Module linkage, which indicates that the entity can be referred
/// to from other translation units within the same module, and indirectly
/// from arbitrary other translation units through inline functions and
/// templates in the module interface.
- ModuleLinkage,
+ Module,
/// External linkage, which indicates that the entity can
/// be referred to from other translation units.
- ExternalLinkage
+ External
};
/// Describes the different kinds of language linkage
@@ -89,24 +88,34 @@ inline bool isUniqueGVALinkage(GVALinkage L) {
}
inline bool isExternallyVisible(Linkage L) {
- return L >= VisibleNoLinkage;
+ switch (L) {
+ case Linkage::Invalid:
+ llvm_unreachable("Linkage hasn't been computed!");
+ case Linkage::None:
+ case Linkage::Internal:
+ case Linkage::UniqueExternal:
+ return false;
+ case Linkage::VisibleNone:
+ case Linkage::Module:
+ case Linkage::External:
+ return true;
+ }
+ llvm_unreachable("Unhandled Linkage enum");
}
inline Linkage getFormalLinkage(Linkage L) {
switch (L) {
- case UniqueExternalLinkage:
- return ExternalLinkage;
- case VisibleNoLinkage:
- return NoLinkage;
- case ModuleInternalLinkage:
- return InternalLinkage;
+ case Linkage::UniqueExternal:
+ return Linkage::External;
+ case Linkage::VisibleNone:
+ return Linkage::None;
default:
return L;
}
}
inline bool isExternalFormalLinkage(Linkage L) {
- return getFormalLinkage(L) == ExternalLinkage;
+ return getFormalLinkage(L) == Linkage::External;
}
/// Compute the minimum linkage given two linkages.
@@ -118,13 +127,13 @@ inline bool isExternalFormalLinkage(Linkage L) {
/// special cases for when VisibleNoLinkage would lose the visible bit and
/// become NoLinkage.
inline Linkage minLinkage(Linkage L1, Linkage L2) {
- if (L2 == VisibleNoLinkage)
+ if (L2 == Linkage::VisibleNone)
std::swap(L1, L2);
- if (L1 == VisibleNoLinkage) {
- if (L2 == InternalLinkage)
- return NoLinkage;
- if (L2 == UniqueExternalLinkage)
- return NoLinkage;
+ if (L1 == Linkage::VisibleNone) {
+ if (L2 == Linkage::Internal)
+ return Linkage::None;
+ if (L2 == Linkage::UniqueExternal)
+ return Linkage::None;
}
return L1 < L2 ? L1 : L2;
}
diff --git a/contrib/llvm-project/clang/include/clang/Basic/MSP430Target.def b/contrib/llvm-project/clang/include/clang/Basic/MSP430Target.def
index a1e192c19261..7a10be1d54c8 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/MSP430Target.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/MSP430Target.def
@@ -238,8 +238,7 @@ MSP430_MCU_FEAT("msp430f4793", "32bit")
MSP430_MCU_FEAT("msp430f4784", "32bit")
MSP430_MCU_FEAT("msp430f4794", "32bit")
-// Generic MSUs
-MSP430_MCU("msp430")
+// Generic MCUs
MSP430_MCU("msp430i2xxgeneric")
#undef MSP430_MCU
diff --git a/contrib/llvm-project/clang/include/clang/Basic/MakeSupport.h b/contrib/llvm-project/clang/include/clang/Basic/MakeSupport.h
new file mode 100644
index 000000000000..c663014ba7bc
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Basic/MakeSupport.h
@@ -0,0 +1,23 @@
+//===- MakeSupport.h - Make Utilities ---------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_BASIC_MAKESUPPORT_H
+#define LLVM_CLANG_BASIC_MAKESUPPORT_H
+
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/StringRef.h"
+
+namespace clang {
+
+/// Quote target names for inclusion in GNU Make dependency files.
+/// Only the characters '$', '#', ' ', '\t' are quoted.
+void quoteMakeTarget(StringRef Target, SmallVectorImpl<char> &Res);
+
+} // namespace clang
+
+#endif // LLVM_CLANG_BASIC_MAKESUPPORT_H
diff --git a/contrib/llvm-project/clang/include/clang/Basic/Module.h b/contrib/llvm-project/clang/include/clang/Basic/Module.h
index 3476b05d2e92..62786e3ac865 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/Module.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/Module.h
@@ -20,7 +20,6 @@
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseSet.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
@@ -33,8 +32,10 @@
#include <cstdint>
#include <ctime>
#include <iterator>
+#include <optional>
#include <string>
#include <utility>
+#include <variant>
#include <vector>
namespace llvm {
@@ -71,8 +72,8 @@ struct ASTFileSignature : std::array<uint8_t, 20> {
return Value;
}
- static ASTFileSignature create(StringRef Bytes) {
- return create(Bytes.bytes_begin(), Bytes.bytes_end());
+ static ASTFileSignature create(std::array<uint8_t, 20> Bytes) {
+ return ASTFileSignature(std::move(Bytes));
}
static ASTFileSignature createDISentinel() {
@@ -81,6 +82,12 @@ struct ASTFileSignature : std::array<uint8_t, 20> {
return Sentinel;
}
+ static ASTFileSignature createDummy() {
+ ASTFileSignature Dummy;
+ Dummy.fill(0x00);
+ return Dummy;
+ }
+
template <typename InputIt>
static ASTFileSignature create(InputIt First, InputIt Last) {
assert(std::distance(First, Last) == size &&
@@ -93,7 +100,9 @@ struct ASTFileSignature : std::array<uint8_t, 20> {
};
/// Describes a module or submodule.
-class Module {
+///
+/// Aligned to 8 bytes to allow for llvm::PointerIntPair<Module *, 3>.
+class alignas(8) Module {
public:
/// The name of this module.
std::string Name;
@@ -101,19 +110,40 @@ public:
/// The location of the module definition.
SourceLocation DefinitionLoc;
+ // FIXME: Consider if reducing the size of this enum (having Partition and
+ // Named modules only) then representing interface/implementation separately
+ // is more efficient.
enum ModuleKind {
/// This is a module that was defined by a module map and built out
/// of header files.
ModuleMapModule,
- /// This is a C++ Modules TS module interface unit.
+ /// This is a C++ 20 header unit.
+ ModuleHeaderUnit,
+
+ /// This is a C++20 module interface unit.
ModuleInterfaceUnit,
- /// This is a fragment of the global module within some C++ module.
- GlobalModuleFragment,
+ /// This is a C++20 module implementation unit.
+ ModuleImplementationUnit,
+
+ /// This is a C++ 20 module partition interface.
+ ModulePartitionInterface,
+
+ /// This is a C++ 20 module partition implementation.
+ ModulePartitionImplementation,
+
+ /// This is the explicit Global Module Fragment of a modular TU.
+ /// As per C++ [module.global.frag].
+ ExplicitGlobalModuleFragment,
/// This is the private module fragment within some C++ module.
PrivateModuleFragment,
+
+ /// This is an implicit fragment of the global module which contains
+ /// only language linkage declarations (made in the purview of the
+ /// named module).
+ ImplicitGlobalModuleFragment,
};
/// The kind of this module.
@@ -126,14 +156,14 @@ public:
/// The build directory of this module. This is the directory in
/// which the module is notionally built, and relative to which its headers
/// are found.
- const DirectoryEntry *Directory = nullptr;
+ OptionalDirectoryEntryRef Directory;
/// The presumed file name for the module map defining this module.
/// Only non-empty when building from preprocessed source.
std::string PresumedModuleMapFile;
/// The umbrella header or directory.
- llvm::PointerUnion<const FileEntry *, const DirectoryEntry *> Umbrella;
+ std::variant<std::monostate, FileEntryRef, DirectoryEntryRef> Umbrella;
/// The module signature.
ASTFileSignature Signature;
@@ -148,10 +178,38 @@ public:
/// eventually be exposed, for use in "private" modules.
std::string ExportAsModule;
- /// Does this Module scope describe part of the purview of a named C++ module?
- bool isModulePurview() const {
- return Kind == ModuleInterfaceUnit || Kind == PrivateModuleFragment;
+ /// For the debug info, the path to this module's .apinotes file, if any.
+ std::string APINotesFile;
+
+ /// Does this Module is a named module of a standard named module?
+ bool isNamedModule() const {
+ switch (Kind) {
+ case ModuleInterfaceUnit:
+ case ModuleImplementationUnit:
+ case ModulePartitionInterface:
+ case ModulePartitionImplementation:
+ case PrivateModuleFragment:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ /// Does this Module scope describe a fragment of the global module within
+ /// some C++ module.
+ bool isGlobalModule() const {
+ return isExplicitGlobalModule() || isImplicitGlobalModule();
}
+ bool isExplicitGlobalModule() const {
+ return Kind == ExplicitGlobalModuleFragment;
+ }
+ bool isImplicitGlobalModule() const {
+ return Kind == ImplicitGlobalModuleFragment;
+ }
+
+ bool isPrivateModule() const { return Kind == PrivateModuleFragment; }
+
+ bool isModuleMapModule() const { return Kind == ModuleMapModule; }
private:
/// The submodules of this module, indexed by name.
@@ -163,10 +221,10 @@ private:
/// The AST file if this is a top-level module which has a
/// corresponding serialized AST file, or null otherwise.
- Optional<FileEntryRef> ASTFile;
+ OptionalFileEntryRef ASTFile;
/// The top-level headers associated with this module.
- llvm::SmallSetVector<const FileEntry *, 2> TopHeaders;
+ llvm::SmallSetVector<FileEntryRef, 2> TopHeaders;
/// top-level header filenames that aren't resolved to FileEntries yet.
std::vector<std::string> TopHeaderNames;
@@ -192,9 +250,7 @@ public:
struct Header {
std::string NameAsWritten;
std::string PathRelativeToRootModuleDirectory;
- const FileEntry *Entry;
-
- explicit operator bool() { return Entry; }
+ FileEntryRef Entry;
};
/// Information about a directory name as found in the module map
@@ -202,9 +258,7 @@ public:
struct DirectoryName {
std::string NameAsWritten;
std::string PathRelativeToRootModuleDirectory;
- const DirectoryEntry *Entry;
-
- explicit operator bool() { return Entry; }
+ DirectoryEntryRef Entry;
};
/// The headers that are part of this module.
@@ -218,8 +272,8 @@ public:
std::string FileName;
bool IsUmbrella = false;
bool HasBuiltinHeader = false;
- Optional<off_t> Size;
- Optional<time_t> ModTime;
+ std::optional<off_t> Size;
+ std::optional<time_t> ModTime;
};
/// Headers that are mentioned in the module map file but that we have not
@@ -246,50 +300,62 @@ public:
/// Whether this module has declared itself unimportable, either because
/// it's missing a requirement from \p Requirements or because it's been
/// shadowed by another module.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsUnimportable : 1;
/// Whether we tried and failed to load a module file for this module.
+ LLVM_PREFERRED_TYPE(bool)
unsigned HasIncompatibleModuleFile : 1;
/// Whether this module is available in the current translation unit.
///
/// If the module is missing headers or does not meet all requirements then
/// this bit will be 0.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsAvailable : 1;
/// Whether this module was loaded from a module file.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsFromModuleFile : 1;
/// Whether this is a framework module.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsFramework : 1;
/// Whether this is an explicit submodule.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsExplicit : 1;
/// Whether this is a "system" module (which assumes that all
/// headers in it are system headers).
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsSystem : 1;
/// Whether this is an 'extern "C"' module (which implicitly puts all
/// headers in it within an 'extern "C"' block, and allows the module to be
/// imported within such a block).
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsExternC : 1;
/// Whether this is an inferred submodule (module * { ... }).
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsInferred : 1;
/// Whether we should infer submodules for this module based on
/// the headers.
///
/// Submodules can only be inferred for modules with an umbrella header.
+ LLVM_PREFERRED_TYPE(bool)
unsigned InferSubmodules : 1;
/// Whether, when inferring submodules, the inferred submodules
/// should be explicit.
+ LLVM_PREFERRED_TYPE(bool)
unsigned InferExplicitSubmodules : 1;
/// Whether, when inferring submodules, the inferr submodules should
/// export all modules they import (e.g., the equivalent of "export *").
+ LLVM_PREFERRED_TYPE(bool)
unsigned InferExportWildcard : 1;
/// Whether the set of configuration macros is exhaustive.
@@ -297,16 +363,24 @@ public:
/// When the set of configuration macros is exhaustive, meaning
/// that no identifier not in this list should affect how the module is
/// built.
+ LLVM_PREFERRED_TYPE(bool)
unsigned ConfigMacrosExhaustive : 1;
/// Whether files in this module can only include non-modular headers
/// and headers from used modules.
+ LLVM_PREFERRED_TYPE(bool)
unsigned NoUndeclaredIncludes : 1;
/// Whether this module came from a "private" module map, found next
/// to a regular (public) module map.
+ LLVM_PREFERRED_TYPE(bool)
unsigned ModuleMapIsPrivate : 1;
+ /// Whether this C++20 named modules doesn't need an initializer.
+ /// This is only meaningful for C++20 modules.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned NamedModuleHasInit : 1;
+
/// Describes the visibility of the various names within a
/// particular module.
enum NameVisibilityKind {
@@ -326,6 +400,10 @@ public:
/// module depends.
llvm::SmallSetVector<Module *, 2> Imports;
+ /// The set of top-level modules that affected the compilation of this module,
+ /// but were not imported.
+ llvm::SmallSetVector<Module *, 2> AffectingClangModules;
+
/// Describes an exported module.
///
/// The pointer is the module being re-exported, while the bit will be true
@@ -359,6 +437,10 @@ public:
/// The set of use declarations that have yet to be resolved.
SmallVector<ModuleId, 2> UnresolvedDirectUses;
+ /// When \c NoUndeclaredIncludes is true, the set of modules this module tried
+ /// to import but didn't because they are not direct uses.
+ llvm::SmallSetVector<const Module *, 2> UndeclaredUses;
+
/// A library or framework to link against when an entity from this
/// module is used.
struct LinkLibrary {
@@ -438,6 +520,9 @@ public:
bool isUnimportable(const LangOptions &LangOpts, const TargetInfo &Target,
Requirement &Req, Module *&ShadowingModule) const;
+ /// Determine whether this module can be built in this compilation.
+ bool isForBuilding(const LangOptions &LangOpts) const;
+
/// Determine whether this module is available for use within the
/// current translation unit.
bool isAvailable() const { return IsAvailable; }
@@ -502,6 +587,62 @@ public:
Parent->SubModules.push_back(this);
}
+ /// Is this module have similar semantics as headers.
+ bool isHeaderLikeModule() const {
+ return isModuleMapModule() || isHeaderUnit();
+ }
+
+ /// Is this a module partition.
+ bool isModulePartition() const {
+ return Kind == ModulePartitionInterface ||
+ Kind == ModulePartitionImplementation;
+ }
+
+ /// Is this a module implementation.
+ bool isModuleImplementation() const {
+ return Kind == ModuleImplementationUnit;
+ }
+
+ /// Is this module a header unit.
+ bool isHeaderUnit() const { return Kind == ModuleHeaderUnit; }
+ // Is this a C++20 module interface or a partition.
+ bool isInterfaceOrPartition() const {
+ return Kind == ModuleInterfaceUnit || isModulePartition();
+ }
+
+ /// Is this a C++20 named module unit.
+ bool isNamedModuleUnit() const {
+ return isInterfaceOrPartition() || isModuleImplementation();
+ }
+
+ bool isModuleInterfaceUnit() const {
+ return Kind == ModuleInterfaceUnit || Kind == ModulePartitionInterface;
+ }
+
+ bool isNamedModuleInterfaceHasInit() const { return NamedModuleHasInit; }
+
+ /// Get the primary module interface name from a partition.
+ StringRef getPrimaryModuleInterfaceName() const {
+ // Technically, global module fragment belongs to global module. And global
+ // module has no name: [module.unit]p6:
+ // The global module has no name, no module interface unit, and is not
+ // introduced by any module-declaration.
+ //
+ // <global> is the default name showed in module map.
+ if (isGlobalModule())
+ return "<global>";
+
+ if (isModulePartition()) {
+ auto pos = Name.find(':');
+ return StringRef(Name.data(), pos);
+ }
+
+ if (isPrivateModule())
+ return getTopLevelModuleName();
+
+ return Name;
+ }
+
/// Retrieve the full name of this module, including the path from
/// its top-level module.
/// \param AllowStringLiterals If \c true, components that might not be
@@ -531,38 +672,39 @@ public:
}
/// The serialized AST file for this module, if one was created.
- OptionalFileEntryRefDegradesToFileEntryPtr getASTFile() const {
+ OptionalFileEntryRef getASTFile() const {
return getTopLevelModule()->ASTFile;
}
/// Set the serialized AST file for the top-level module of this module.
- void setASTFile(Optional<FileEntryRef> File) {
- assert((!File || !getASTFile() || getASTFile() == File) &&
- "file path changed");
+ void setASTFile(OptionalFileEntryRef File) {
+ assert((!getASTFile() || getASTFile() == File) && "file path changed");
getTopLevelModule()->ASTFile = File;
}
- /// Retrieve the directory for which this module serves as the
- /// umbrella.
- DirectoryName getUmbrellaDir() const;
+ /// Retrieve the umbrella directory as written.
+ std::optional<DirectoryName> getUmbrellaDirAsWritten() const {
+ if (const auto *Dir = std::get_if<DirectoryEntryRef>(&Umbrella))
+ return DirectoryName{UmbrellaAsWritten,
+ UmbrellaRelativeToRootModuleDirectory, *Dir};
+ return std::nullopt;
+ }
- /// Retrieve the header that serves as the umbrella header for this
- /// module.
- Header getUmbrellaHeader() const {
- if (auto *FE = Umbrella.dyn_cast<const FileEntry *>())
+ /// Retrieve the umbrella header as written.
+ std::optional<Header> getUmbrellaHeaderAsWritten() const {
+ if (const auto *Hdr = std::get_if<FileEntryRef>(&Umbrella))
return Header{UmbrellaAsWritten, UmbrellaRelativeToRootModuleDirectory,
- FE};
- return Header{};
+ *Hdr};
+ return std::nullopt;
}
- /// Determine whether this module has an umbrella directory that is
- /// not based on an umbrella header.
- bool hasUmbrellaDir() const {
- return Umbrella && Umbrella.is<const DirectoryEntry *>();
- }
+ /// Get the effective umbrella directory for this module: either the one
+ /// explicitly written in the module map file, or the parent of the umbrella
+ /// header.
+ OptionalDirectoryEntryRef getEffectiveUmbrellaDir() const;
/// Add a top-level header associated with this module.
- void addTopHeader(const FileEntry *File);
+ void addTopHeader(FileEntryRef File);
/// Add a top-level header filename associated with this module.
void addTopHeaderFilename(StringRef Filename) {
@@ -570,11 +712,11 @@ public:
}
/// The top-level headers associated with this module.
- ArrayRef<const FileEntry *> getTopHeaders(FileManager &FileMgr);
+ ArrayRef<FileEntryRef> getTopHeaders(FileManager &FileMgr);
/// Determine whether this module has declared its intention to
/// directly use another module.
- bool directlyUses(const Module *Requested) const;
+ bool directlyUses(const Module *Requested);
/// Add the given feature requirement to the list of features
/// required by this module.
@@ -603,6 +745,18 @@ public:
Module *findSubmodule(StringRef Name) const;
Module *findOrInferSubmodule(StringRef Name);
+ /// Get the Global Module Fragment (sub-module) for this module, it there is
+ /// one.
+ ///
+ /// \returns The GMF sub-module if found, or NULL otherwise.
+ Module *getGlobalModuleFragment() const;
+
+ /// Get the Private Module Fragment (sub-module) for this module, it there is
+ /// one.
+ ///
+ /// \returns The PMF sub-module if found, or NULL otherwise.
+ Module *getPrivateModuleFragment() const;
+
/// Determine whether the specified module would be visible to
/// a lookup at the end of this module.
///
@@ -620,16 +774,11 @@ public:
using submodule_iterator = std::vector<Module *>::iterator;
using submodule_const_iterator = std::vector<Module *>::const_iterator;
- submodule_iterator submodule_begin() { return SubModules.begin(); }
- submodule_const_iterator submodule_begin() const {return SubModules.begin();}
- submodule_iterator submodule_end() { return SubModules.end(); }
- submodule_const_iterator submodule_end() const { return SubModules.end(); }
-
llvm::iterator_range<submodule_iterator> submodules() {
- return llvm::make_range(submodule_begin(), submodule_end());
+ return llvm::make_range(SubModules.begin(), SubModules.end());
}
llvm::iterator_range<submodule_const_iterator> submodules() const {
- return llvm::make_range(submodule_begin(), submodule_end());
+ return llvm::make_range(SubModules.begin(), SubModules.end());
}
/// Appends this module's list of exported modules to \p Exported.
@@ -705,6 +854,11 @@ public:
ConflictCallback Cb = [](ArrayRef<Module *>, Module *,
StringRef) {});
+ /// Make transitive imports visible for [module.import]/7.
+ void makeTransitiveImportsVisible(
+ Module *M, SourceLocation Loc, VisibleCallback Vis = [](Module *) {},
+ ConflictCallback Cb = [](ArrayRef<Module *>, Module *, StringRef) {});
+
private:
/// Import locations for each visible module. Indexed by the module's
/// VisibilityID.
diff --git a/contrib/llvm-project/clang/include/clang/Basic/NoSanitizeList.h b/contrib/llvm-project/clang/include/clang/Basic/NoSanitizeList.h
index 3f80e0fdedda..43415859fcd5 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/NoSanitizeList.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/NoSanitizeList.h
@@ -41,6 +41,8 @@ public:
bool containsFunction(SanitizerMask Mask, StringRef FunctionName) const;
bool containsFile(SanitizerMask Mask, StringRef FileName,
StringRef Category = StringRef()) const;
+ bool containsMainFile(SanitizerMask Mask, StringRef FileName,
+ StringRef Category = StringRef()) const;
bool containsLocation(SanitizerMask Mask, SourceLocation Loc,
StringRef Category = StringRef()) const;
};
diff --git a/contrib/llvm-project/clang/include/clang/Basic/ObjCRuntime.h b/contrib/llvm-project/clang/include/clang/Basic/ObjCRuntime.h
index 26403bfa98c9..1ccf60f0b7be 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/ObjCRuntime.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/ObjCRuntime.h
@@ -16,9 +16,10 @@
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/HashBuilder.h"
#include "llvm/Support/VersionTuple.h"
+#include "llvm/TargetParser/Triple.h"
#include <string>
namespace clang {
@@ -99,16 +100,24 @@ public:
bool isLegacyDispatchDefaultForArch(llvm::Triple::ArchType Arch) {
// The GNUstep runtime uses a newer dispatch method by default from
// version 1.6 onwards
- if (getKind() == GNUstep && getVersion() >= VersionTuple(1, 6)) {
- if (Arch == llvm::Triple::arm ||
- Arch == llvm::Triple::x86 ||
- Arch == llvm::Triple::x86_64)
- return false;
- }
- else if ((getKind() == MacOSX) && isNonFragile() &&
- (getVersion() >= VersionTuple(10, 0)) &&
- (getVersion() < VersionTuple(10, 6)))
- return Arch != llvm::Triple::x86_64;
+ if (getKind() == GNUstep) {
+ switch (Arch) {
+ case llvm::Triple::arm:
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ return !(getVersion() >= VersionTuple(1, 6));
+ case llvm::Triple::aarch64:
+ case llvm::Triple::mips64:
+ return !(getVersion() >= VersionTuple(1, 9));
+ case llvm::Triple::riscv64:
+ return !(getVersion() >= VersionTuple(2, 2));
+ default:
+ return true;
+ }
+ } else if ((getKind() == MacOSX) && isNonFragile() &&
+ (getVersion() >= VersionTuple(10, 0)) &&
+ (getVersion() < VersionTuple(10, 6)))
+ return Arch != llvm::Triple::x86_64;
// Except for deployment target of 10.5 or less,
// Mac runtimes use legacy dispatch everywhere now.
return true;
@@ -202,7 +211,13 @@ public:
case GCC:
return false;
case GNUstep:
- return false;
+ // This could be enabled for all versions, except for the fact that the
+ // implementation of `objc_retain` and friends prior to 2.2 call [object
+ // retain] in their fall-back paths, which leads to infinite recursion if
+ // the runtime is built with this enabled. Since distributions typically
+ // build all Objective-C things with the same compiler version and flags,
+ // it's better to be conservative here.
+ return (getVersion() >= VersionTuple(2, 2));
case ObjFW:
return false;
}
@@ -239,7 +254,7 @@ public:
case GCC:
return false;
case GNUstep:
- return false;
+ return getVersion() >= VersionTuple(2, 2);
case ObjFW:
return false;
}
@@ -257,6 +272,8 @@ public:
return getVersion() >= VersionTuple(12, 2);
case WatchOS:
return getVersion() >= VersionTuple(5, 2);
+ case GNUstep:
+ return getVersion() >= VersionTuple(2, 2);
default:
return false;
}
@@ -454,7 +471,8 @@ public:
case iOS: return true;
case WatchOS: return true;
case GCC: return false;
- case GNUstep: return false;
+ case GNUstep:
+ return (getVersion() >= VersionTuple(2, 2));
case ObjFW: return false;
}
llvm_unreachable("bad kind");
@@ -480,6 +498,12 @@ public:
friend llvm::hash_code hash_value(const ObjCRuntime &OCR) {
return llvm::hash_combine(OCR.getKind(), OCR.getVersion());
}
+
+ template <typename HasherT, llvm::endianness Endianness>
+ friend void addHash(llvm::HashBuilder<HasherT, Endianness> &HBuilder,
+ const ObjCRuntime &OCR) {
+ HBuilder.add(OCR.getKind(), OCR.getVersion());
+ }
};
raw_ostream &operator<<(raw_ostream &out, const ObjCRuntime &value);
diff --git a/contrib/llvm-project/clang/include/clang/Basic/OpenACCKinds.h b/contrib/llvm-project/clang/include/clang/Basic/OpenACCKinds.h
new file mode 100644
index 000000000000..6487a95910ed
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Basic/OpenACCKinds.h
@@ -0,0 +1,401 @@
+//===--- OpenACCKinds.h - OpenACC Enums -------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// Defines some OpenACC-specific enums and functions.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_BASIC_OPENACCKINDS_H
+#define LLVM_CLANG_BASIC_OPENACCKINDS_H
+
+#include "clang/Basic/Diagnostic.h"
+#include "llvm/Support/ErrorHandling.h"
+
+namespace clang {
+// Represents the Construct/Directive kind of a pragma directive. Note the
+// OpenACC standard is inconsistent between calling these Construct vs
+// Directive, but we're calling it a Directive to be consistent with OpenMP.
+enum class OpenACCDirectiveKind {
+ // Compute Constructs.
+ Parallel,
+ Serial,
+ Kernels,
+
+ // Data Environment. "enter data" and "exit data" are also referred to in the
+ // Executable Directives section, but just as a back reference to the Data
+ // Environment.
+ Data,
+ EnterData,
+ ExitData,
+ HostData,
+
+ // Misc.
+ Loop,
+ Cache,
+
+ // Combined Constructs.
+ ParallelLoop,
+ SerialLoop,
+ KernelsLoop,
+
+ // Atomic Construct.
+ Atomic,
+
+ // Declare Directive.
+ Declare,
+
+ // Executable Directives. "wait" is first referred to here, but ends up being
+ // in its own section after "routine".
+ Init,
+ Shutdown,
+ Set,
+ Update,
+ Wait,
+
+ // Procedure Calls in Compute Regions.
+ Routine,
+
+ // Invalid.
+ Invalid,
+};
+
+inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &Out,
+ OpenACCDirectiveKind K) {
+ switch (K) {
+ case OpenACCDirectiveKind::Parallel:
+ return Out << "parallel";
+
+ case OpenACCDirectiveKind::Serial:
+ return Out << "serial";
+
+ case OpenACCDirectiveKind::Kernels:
+ return Out << "kernels";
+
+ case OpenACCDirectiveKind::Data:
+ return Out << "data";
+
+ case OpenACCDirectiveKind::EnterData:
+ return Out << "enter data";
+
+ case OpenACCDirectiveKind::ExitData:
+ return Out << "exit data";
+
+ case OpenACCDirectiveKind::HostData:
+ return Out << "host_data";
+
+ case OpenACCDirectiveKind::Loop:
+ return Out << "loop";
+
+ case OpenACCDirectiveKind::Cache:
+ return Out << "cache";
+
+ case OpenACCDirectiveKind::ParallelLoop:
+ return Out << "parallel loop";
+
+ case OpenACCDirectiveKind::SerialLoop:
+ return Out << "serial loop";
+
+ case OpenACCDirectiveKind::KernelsLoop:
+ return Out << "kernels loop";
+
+ case OpenACCDirectiveKind::Atomic:
+ return Out << "atomic";
+
+ case OpenACCDirectiveKind::Declare:
+ return Out << "declare";
+
+ case OpenACCDirectiveKind::Init:
+ return Out << "init";
+
+ case OpenACCDirectiveKind::Shutdown:
+ return Out << "shutdown";
+
+ case OpenACCDirectiveKind::Set:
+ return Out << "set";
+
+ case OpenACCDirectiveKind::Update:
+ return Out << "update";
+
+ case OpenACCDirectiveKind::Wait:
+ return Out << "wait";
+
+ case OpenACCDirectiveKind::Routine:
+ return Out << "routine";
+
+ case OpenACCDirectiveKind::Invalid:
+ return Out << "<invalid>";
+ }
+ llvm_unreachable("Uncovered directive kind");
+}
+
+enum class OpenACCAtomicKind {
+ Read,
+ Write,
+ Update,
+ Capture,
+ Invalid,
+};
+
+/// Represents the kind of an OpenACC clause.
+enum class OpenACCClauseKind {
+ /// 'finalize' clause, allowed on 'exit data' directive.
+ Finalize,
+ /// 'if_present' clause, allowed on 'host_data' and 'update' directives.
+ IfPresent,
+ /// 'seq' clause, allowed on 'loop' and 'routine' directives.
+ Seq,
+ /// 'independent' clause, allowed on 'loop' directives.
+ Independent,
+ /// 'auto' clause, allowed on 'loop' directives.
+ Auto,
+ /// 'worker' clause, allowed on 'loop', Combined, and 'routine' directives.
+ Worker,
+ /// 'vector' clause, allowed on 'loop', Combined, and 'routine' directives.
+ Vector,
+ /// 'nohost' clause, allowed on 'routine' directives.
+ NoHost,
+ /// 'default' clause, allowed on parallel, serial, kernel (and compound)
+ /// constructs.
+ Default,
+ /// 'if' clause, allowed on all the Compute Constructs, Data Constructs,
+ /// Executable Constructs, and Combined Constructs.
+ If,
+ /// 'self' clause, allowed on Compute and Combined Constructs, plus 'update'.
+ Self,
+ /// 'copy' clause, allowed on Compute and Combined Constructs, plus 'data' and
+ /// 'declare'.
+ Copy,
+ /// 'use_device' clause, allowed on 'host_data' construct.
+ UseDevice,
+ /// 'attach' clause, allowed on Compute and Combined constructs, plus 'data'
+ /// and 'enter data'.
+ Attach,
+ /// 'delete' clause, allowed on the 'exit data' construct.
+ Delete,
+ /// 'detach' clause, allowed on the 'exit data' construct.
+ Detach,
+ /// 'device' clause, allowed on the 'update' construct.
+ Device,
+ /// 'deviceptr' clause, allowed on Compute and Combined Constructs, plus
+ /// 'data' and 'declare'.
+ DevicePtr,
+ /// 'device_resident' clause, allowed on the 'declare' construct.
+ DeviceResident,
+ /// 'firstprivate' clause, allowed on 'parallel', 'serial', 'parallel loop',
+ /// and 'serial loop' constructs.
+ FirstPrivate,
+ /// 'host' clause, allowed on 'update' construct.
+ Host,
+ /// 'link' clause, allowed on 'declare' construct.
+ Link,
+ /// 'no_create' clause, allowed on allowed on Compute and Combined constructs,
+ /// plus 'data'.
+ NoCreate,
+ /// 'present' clause, allowed on Compute and Combined constructs, plus 'data'
+ /// and 'declare'.
+ Present,
+ /// 'private' clause, allowed on 'parallel', 'serial', 'loop', 'parallel
+ /// loop', and 'serial loop' constructs.
+ Private,
+ /// 'copyout' clause, allowed on Compute and Combined constructs, plus 'data',
+ /// 'exit data', and 'declare'.
+ CopyOut,
+ /// 'copyin' clause, allowed on Compute and Combined constructs, plus 'data',
+ /// 'enter data', and 'declare'.
+ CopyIn,
+ /// 'copyin' clause, allowed on Compute and Combined constructs, plus 'data',
+ /// 'enter data', and 'declare'.
+ Create,
+ /// 'reduction' clause, allowed on Parallel, Serial, Loop, and the combined
+ /// constructs.
+ Reduction,
+ /// 'collapse' clause, allowed on 'loop' and Combined constructs.
+ Collapse,
+ /// 'bind' clause, allowed on routine constructs.
+ Bind,
+ /// 'vector_length' clause, allowed on 'parallel', 'kernels', 'parallel loop',
+ /// and 'kernels loop' constructs.
+ VectorLength,
+ /// 'num_gangs' clause, allowed on 'parallel', 'kernels', parallel loop', and
+ /// 'kernels loop' constructs.
+ NumGangs,
+ /// 'num_workers' clause, allowed on 'parallel', 'kernels', parallel loop',
+ /// and 'kernels loop' constructs.
+ NumWorkers,
+ /// 'device_num' clause, allowed on 'init', 'shutdown', and 'set' constructs.
+ DeviceNum,
+ /// 'default_async' clause, allowed on 'set' construct.
+ DefaultAsync,
+ /// 'device_type' clause, allowed on Constructs, 'data', 'init', 'shutdown',
+ /// 'set', update', 'loop', 'routine', and Combined constructs.
+ DeviceType,
+ /// 'dtype' clause, an alias for 'device_type', stored separately for
+ /// diagnostic purposes.
+ DType,
+
+ /// Represents an invalid clause, for the purposes of parsing.
+ Invalid,
+};
+
+inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &Out,
+ OpenACCClauseKind K) {
+ switch (K) {
+ case OpenACCClauseKind::Finalize:
+ return Out << "finalize";
+
+ case OpenACCClauseKind::IfPresent:
+ return Out << "if_present";
+
+ case OpenACCClauseKind::Seq:
+ return Out << "seq";
+
+ case OpenACCClauseKind::Independent:
+ return Out << "independent";
+
+ case OpenACCClauseKind::Auto:
+ return Out << "auto";
+
+ case OpenACCClauseKind::Worker:
+ return Out << "worker";
+
+ case OpenACCClauseKind::Vector:
+ return Out << "vector";
+
+ case OpenACCClauseKind::NoHost:
+ return Out << "nohost";
+
+ case OpenACCClauseKind::Default:
+ return Out << "default";
+
+ case OpenACCClauseKind::If:
+ return Out << "if";
+
+ case OpenACCClauseKind::Self:
+ return Out << "self";
+
+ case OpenACCClauseKind::Copy:
+ return Out << "copy";
+
+ case OpenACCClauseKind::UseDevice:
+ return Out << "use_device";
+
+ case OpenACCClauseKind::Attach:
+ return Out << "attach";
+
+ case OpenACCClauseKind::Delete:
+ return Out << "delete";
+
+ case OpenACCClauseKind::Detach:
+ return Out << "detach";
+
+ case OpenACCClauseKind::Device:
+ return Out << "device";
+
+ case OpenACCClauseKind::DevicePtr:
+ return Out << "deviceptr";
+
+ case OpenACCClauseKind::DeviceResident:
+ return Out << "device_resident";
+
+ case OpenACCClauseKind::FirstPrivate:
+ return Out << "firstprivate";
+
+ case OpenACCClauseKind::Host:
+ return Out << "host";
+
+ case OpenACCClauseKind::Link:
+ return Out << "link";
+
+ case OpenACCClauseKind::NoCreate:
+ return Out << "no_create";
+
+ case OpenACCClauseKind::Present:
+ return Out << "present";
+
+ case OpenACCClauseKind::Private:
+ return Out << "private";
+
+ case OpenACCClauseKind::CopyOut:
+ return Out << "copyout";
+
+ case OpenACCClauseKind::CopyIn:
+ return Out << "copyin";
+
+ case OpenACCClauseKind::Create:
+ return Out << "create";
+
+ case OpenACCClauseKind::Reduction:
+ return Out << "reduction";
+
+ case OpenACCClauseKind::Collapse:
+ return Out << "collapse";
+
+ case OpenACCClauseKind::Bind:
+ return Out << "bind";
+
+ case OpenACCClauseKind::VectorLength:
+ return Out << "vector_length";
+
+ case OpenACCClauseKind::NumGangs:
+ return Out << "num_gangs";
+
+ case OpenACCClauseKind::NumWorkers:
+ return Out << "num_workers";
+
+ case OpenACCClauseKind::DeviceNum:
+ return Out << "device_num";
+
+ case OpenACCClauseKind::DefaultAsync:
+ return Out << "default_async";
+
+ case OpenACCClauseKind::DeviceType:
+ return Out << "device_type";
+
+ case OpenACCClauseKind::DType:
+ return Out << "dtype";
+
+ case OpenACCClauseKind::Invalid:
+ return Out << "<invalid>";
+ }
+ llvm_unreachable("Uncovered clause kind");
+}
+enum class OpenACCDefaultClauseKind {
+ /// 'none' option.
+ None,
+ /// 'present' option.
+ Present,
+ /// Not a valid option.
+ Invalid,
+};
+
+enum class OpenACCReductionOperator {
+ /// '+'.
+ Addition,
+ /// '*'.
+ Multiplication,
+ /// 'max'.
+ Max,
+ /// 'min'.
+ Min,
+ /// '&'.
+ BitwiseAnd,
+ /// '|'.
+ BitwiseOr,
+ /// '^'.
+ BitwiseXOr,
+ /// '&&'.
+ And,
+ /// '||'.
+ Or,
+ /// Invalid Reduction Clause Kind.
+ Invalid,
+};
+} // namespace clang
+
+#endif // LLVM_CLANG_BASIC_OPENACCKINDS_H
diff --git a/contrib/llvm-project/clang/include/clang/Basic/OpenCLExtensionTypes.def b/contrib/llvm-project/clang/include/clang/Basic/OpenCLExtensionTypes.def
index 84ffbe936b77..17c72d69a020 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/OpenCLExtensionTypes.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/OpenCLExtensionTypes.def
@@ -28,10 +28,10 @@ INTEL_SUBGROUP_AVC_TYPE(mce_result_t, MceResult)
INTEL_SUBGROUP_AVC_TYPE(ime_result_t, ImeResult)
INTEL_SUBGROUP_AVC_TYPE(ref_result_t, RefResult)
INTEL_SUBGROUP_AVC_TYPE(sic_result_t, SicResult)
-INTEL_SUBGROUP_AVC_TYPE(ime_result_single_reference_streamout_t, ImeResultSingleRefStreamout)
-INTEL_SUBGROUP_AVC_TYPE(ime_result_dual_reference_streamout_t, ImeResultDualRefStreamout)
-INTEL_SUBGROUP_AVC_TYPE(ime_single_reference_streamin_t, ImeSingleRefStreamin)
-INTEL_SUBGROUP_AVC_TYPE(ime_dual_reference_streamin_t, ImeDualRefStreamin)
+INTEL_SUBGROUP_AVC_TYPE(ime_result_single_reference_streamout_t, ImeResultSingleReferenceStreamout)
+INTEL_SUBGROUP_AVC_TYPE(ime_result_dual_reference_streamout_t, ImeResultDualReferenceStreamout)
+INTEL_SUBGROUP_AVC_TYPE(ime_single_reference_streamin_t, ImeSingleReferenceStreamin)
+INTEL_SUBGROUP_AVC_TYPE(ime_dual_reference_streamin_t, ImeDualReferenceStreamin)
#undef INTEL_SUBGROUP_AVC_TYPE
#endif // INTEL_SUBGROUP_AVC_TYPE
diff --git a/contrib/llvm-project/clang/include/clang/Basic/OpenCLExtensions.def b/contrib/llvm-project/clang/include/clang/Basic/OpenCLExtensions.def
index a053a0e9adb5..6f73b2613750 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/OpenCLExtensions.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/OpenCLExtensions.def
@@ -31,7 +31,7 @@
// If extensions are to be enumerated without any information,
// define OPENCLEXTNAME(ext) where ext is the name of the extension.
//
-// Difference between optional core feature and core feature is that the
+// Difference between optional core feature and core feature is that the
// later is unconditionally supported in specific OpenCL version.
//
// As per The OpenCL Extension Specification, Section 1.2, in this file, an
@@ -94,12 +94,6 @@ OPENCL_EXTENSION(__cl_clang_bitfields, true, 100)
OPENCL_EXTENSION(cl_amd_media_ops, true, 100)
OPENCL_EXTENSION(cl_amd_media_ops2, true, 100)
-// ARM OpenCL extensions
-OPENCL_EXTENSION(cl_arm_integer_dot_product_int8, true, 120)
-OPENCL_EXTENSION(cl_arm_integer_dot_product_accumulate_int8, true, 120)
-OPENCL_EXTENSION(cl_arm_integer_dot_product_accumulate_int16, true, 120)
-OPENCL_EXTENSION(cl_arm_integer_dot_product_accumulate_saturate_int8, true, 120)
-
// Intel OpenCL extensions
OPENCL_EXTENSION(cl_intel_subgroups, true, 120)
OPENCL_EXTENSION(cl_intel_subgroups_short, true, 120)
diff --git a/contrib/llvm-project/clang/include/clang/Basic/OpenCLOptions.h b/contrib/llvm-project/clang/include/clang/Basic/OpenCLOptions.h
index 1a035626fade..d6cb1a210519 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/OpenCLOptions.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/OpenCLOptions.h
@@ -58,7 +58,7 @@ static inline OpenCLVersionID encodeOpenCLVersion(unsigned OpenCLVersion) {
// mask.
static inline bool isOpenCLVersionContainedInMask(const LangOptions &LO,
unsigned Mask) {
- auto CLVer = LO.OpenCLCPlusPlus ? 200 : LO.OpenCLVersion;
+ auto CLVer = LO.getOpenCLCompatibleVersion();
OpenCLVersionID Code = encodeOpenCLVersion(CLVer);
return Mask & Code;
}
@@ -79,8 +79,8 @@ public:
// the __opencl_c_program_scope_global_variables feature is supported
// C++ for OpenCL inherits rule from OpenCL C v2.0.
bool areProgramScopeVariablesSupported(const LangOptions &Opts) const {
- return Opts.OpenCLCPlusPlus || Opts.OpenCLVersion == 200 ||
- (Opts.OpenCLVersion == 300 &&
+ return Opts.getOpenCLCompatibleVersion() == 200 ||
+ (Opts.getOpenCLCompatibleVersion() == 300 &&
isSupported("__opencl_c_program_scope_global_variables", Opts));
}
@@ -115,8 +115,7 @@ public:
// Is option available in OpenCL version \p LO.
bool isAvailableIn(const LangOptions &LO) const {
// In C++ mode all extensions should work at least as in v2.0.
- auto CLVer = LO.OpenCLCPlusPlus ? 200 : LO.OpenCLVersion;
- return CLVer >= Avail;
+ return LO.getOpenCLCompatibleVersion() >= Avail;
}
// Is core option in OpenCL version \p LO.
diff --git a/contrib/llvm-project/clang/include/clang/Basic/OpenMPKinds.def b/contrib/llvm-project/clang/include/clang/Basic/OpenMPKinds.def
index 9f9c32da4aa0..f46a92d5ecfd 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/OpenMPKinds.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/OpenMPKinds.def
@@ -41,6 +41,15 @@
#ifndef OPENMP_ATOMIC_DEFAULT_MEM_ORDER_KIND
#define OPENMP_ATOMIC_DEFAULT_MEM_ORDER_KIND(Name)
#endif
+#ifndef OPENMP_ATOMIC_FAIL_MODIFIER
+#define OPENMP_ATOMIC_FAIL_MODIFIER(Name)
+#endif
+#ifndef OPENMP_AT_KIND
+#define OPENMP_AT_KIND(Name)
+#endif
+#ifndef OPENMP_SEVERITY_KIND
+#define OPENMP_SEVERITY_KIND(Name)
+#endif
#ifndef OPENMP_DEFAULTMAP_MODIFIER
#define OPENMP_DEFAULTMAP_MODIFIER(Name)
#endif
@@ -53,12 +62,30 @@
#ifndef OPENMP_ORDER_KIND
#define OPENMP_ORDER_KIND(Name)
#endif
+#ifndef OPENMP_ORDER_MODIFIER
+#define OPENMP_ORDER_MODIFIER(Name)
+#endif
#ifndef OPENMP_DEVICE_MODIFIER
#define OPENMP_DEVICE_MODIFIER(Name)
#endif
#ifndef OPENMP_REDUCTION_MODIFIER
#define OPENMP_REDUCTION_MODIFIER(Name)
#endif
+#ifndef OPENMP_ADJUST_ARGS_KIND
+#define OPENMP_ADJUST_ARGS_KIND(Name)
+#endif
+#ifndef OPENMP_BIND_KIND
+#define OPENMP_BIND_KIND(Name)
+#endif
+#ifndef OPENMP_GRAINSIZE_MODIFIER
+#define OPENMP_GRAINSIZE_MODIFIER(Name)
+#endif
+#ifndef OPENMP_NUMTASKS_MODIFIER
+#define OPENMP_NUMTASKS_MODIFIER(Name)
+#endif
+#ifndef OPENMP_DOACROSS_MODIFIER
+#define OPENMP_DOACROSS_MODIFIER(Name)
+#endif
// Static attributes for 'schedule' clause.
OPENMP_SCHEDULE_KIND(static)
@@ -99,17 +126,34 @@ OPENMP_DEPEND_KIND(mutexinoutset)
OPENMP_DEPEND_KIND(depobj)
OPENMP_DEPEND_KIND(source)
OPENMP_DEPEND_KIND(sink)
+OPENMP_DEPEND_KIND(inoutset)
+OPENMP_DEPEND_KIND(outallmemory)
+OPENMP_DEPEND_KIND(inoutallmemory)
// Modifiers for 'linear' clause.
OPENMP_LINEAR_KIND(val)
OPENMP_LINEAR_KIND(ref)
OPENMP_LINEAR_KIND(uval)
+OPENMP_LINEAR_KIND(step)
// Modifiers for 'atomic_default_mem_order' clause.
OPENMP_ATOMIC_DEFAULT_MEM_ORDER_KIND(seq_cst)
OPENMP_ATOMIC_DEFAULT_MEM_ORDER_KIND(acq_rel)
OPENMP_ATOMIC_DEFAULT_MEM_ORDER_KIND(relaxed)
+// Modifiers for atomic 'fail' clause.
+OPENMP_ATOMIC_FAIL_MODIFIER(seq_cst)
+OPENMP_ATOMIC_FAIL_MODIFIER(acquire)
+OPENMP_ATOMIC_FAIL_MODIFIER(relaxed)
+
+// Modifiers for 'at' clause.
+OPENMP_AT_KIND(compilation)
+OPENMP_AT_KIND(execution)
+
+// Modifiers for 'severity' clause.
+OPENMP_SEVERITY_KIND(fatal)
+OPENMP_SEVERITY_KIND(warning)
+
// Map types for 'map' clause.
OPENMP_MAP_KIND(alloc)
OPENMP_MAP_KIND(to)
@@ -122,7 +166,10 @@ OPENMP_MAP_KIND(release)
OPENMP_MAP_MODIFIER_KIND(always)
OPENMP_MAP_MODIFIER_KIND(close)
OPENMP_MAP_MODIFIER_KIND(mapper)
+OPENMP_MAP_MODIFIER_KIND(iterator)
OPENMP_MAP_MODIFIER_KIND(present)
+// This is an OpenMP extension for the sake of OpenACC support.
+OPENMP_MAP_MODIFIER_KIND(ompx_hold)
// Modifiers for 'to' or 'from' clause.
OPENMP_MOTION_MODIFIER_KIND(mapper)
@@ -142,14 +189,44 @@ OPENMP_LASTPRIVATE_KIND(conditional)
// Type of the 'order' clause.
OPENMP_ORDER_KIND(concurrent)
+// Modifiers for the 'order' clause.
+OPENMP_ORDER_MODIFIER(reproducible)
+OPENMP_ORDER_MODIFIER(unconstrained)
+
// Modifiers for 'reduction' clause.
OPENMP_REDUCTION_MODIFIER(default)
OPENMP_REDUCTION_MODIFIER(inscan)
OPENMP_REDUCTION_MODIFIER(task)
+// Adjust-op kinds for the 'adjust_args' clause.
+OPENMP_ADJUST_ARGS_KIND(nothing)
+OPENMP_ADJUST_ARGS_KIND(need_device_ptr)
+
+// Binding kinds for the 'bind' clause.
+OPENMP_BIND_KIND(teams)
+OPENMP_BIND_KIND(parallel)
+OPENMP_BIND_KIND(thread)
+
+// Modifiers for the 'grainsize' clause.
+OPENMP_GRAINSIZE_MODIFIER(strict)
+
+// Modifiers for the 'num_tasks' clause.
+OPENMP_NUMTASKS_MODIFIER(strict)
+
+// Modifiers for the 'doacross' clause.
+OPENMP_DOACROSS_MODIFIER(source)
+OPENMP_DOACROSS_MODIFIER(sink)
+OPENMP_DOACROSS_MODIFIER(sink_omp_cur_iteration)
+OPENMP_DOACROSS_MODIFIER(source_omp_cur_iteration)
+
+#undef OPENMP_NUMTASKS_MODIFIER
+#undef OPENMP_GRAINSIZE_MODIFIER
+#undef OPENMP_BIND_KIND
+#undef OPENMP_ADJUST_ARGS_KIND
#undef OPENMP_REDUCTION_MODIFIER
#undef OPENMP_DEVICE_MODIFIER
#undef OPENMP_ORDER_KIND
+#undef OPENMP_ORDER_MODIFIER
#undef OPENMP_LASTPRIVATE_KIND
#undef OPENMP_DEVICE_TYPE_KIND
#undef OPENMP_LINEAR_KIND
@@ -157,10 +234,14 @@ OPENMP_REDUCTION_MODIFIER(task)
#undef OPENMP_SCHEDULE_MODIFIER
#undef OPENMP_SCHEDULE_KIND
#undef OPENMP_ATOMIC_DEFAULT_MEM_ORDER_KIND
+#undef OPENMP_ATOMIC_FAIL_MODIFIER
+#undef OPENMP_AT_KIND
+#undef OPENMP_SEVERITY_KIND
#undef OPENMP_MAP_KIND
#undef OPENMP_MAP_MODIFIER_KIND
#undef OPENMP_MOTION_MODIFIER_KIND
#undef OPENMP_DIST_SCHEDULE_KIND
#undef OPENMP_DEFAULTMAP_KIND
#undef OPENMP_DEFAULTMAP_MODIFIER
+#undef OPENMP_DOACROSS_MODIFIER
diff --git a/contrib/llvm-project/clang/include/clang/Basic/OpenMPKinds.h b/contrib/llvm-project/clang/include/clang/Basic/OpenMPKinds.h
index c7a2591de26c..d127498774c7 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/OpenMPKinds.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/OpenMPKinds.h
@@ -14,6 +14,7 @@
#ifndef LLVM_CLANG_BASIC_OPENMPKINDS_H
#define LLVM_CLANG_BASIC_OPENMPKINDS_H
+#include "clang/Basic/LangOptions.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
@@ -82,7 +83,7 @@ enum OpenMPMapModifierKind {
OMPC_MAP_MODIFIER_last
};
- /// Number of allowed map-type-modifiers.
+/// Number of allowed map-type-modifiers.
static constexpr unsigned NumberOfOMPMapClauseModifiers =
OMPC_MAP_MODIFIER_last - OMPC_MAP_MODIFIER_unknown - 1;
@@ -130,6 +131,20 @@ enum OpenMPAtomicDefaultMemOrderClauseKind {
OMPC_ATOMIC_DEFAULT_MEM_ORDER_unknown
};
+/// OpenMP attributes for 'at' clause.
+enum OpenMPAtClauseKind {
+#define OPENMP_AT_KIND(Name) OMPC_AT_##Name,
+#include "clang/Basic/OpenMPKinds.def"
+ OMPC_AT_unknown
+};
+
+/// OpenMP attributes for 'severity' clause.
+enum OpenMPSeverityClauseKind {
+#define OPENMP_SEVERITY_KIND(Name) OMPC_SEVERITY_##Name,
+#include "clang/Basic/OpenMPKinds.def"
+ OMPC_SEVERITY_unknown
+};
+
/// OpenMP device type for 'device_type' clause.
enum OpenMPDeviceType {
#define OPENMP_DEVICE_TYPE_KIND(Name) \
@@ -152,6 +167,14 @@ enum OpenMPOrderClauseKind {
OMPC_ORDER_unknown,
};
+/// OpenMP modifiers for 'order' clause.
+enum OpenMPOrderClauseModifier {
+ OMPC_ORDER_MODIFIER_unknown = OMPC_ORDER_unknown,
+#define OPENMP_ORDER_MODIFIER(Name) OMPC_ORDER_MODIFIER_##Name,
+#include "clang/Basic/OpenMPKinds.def"
+ OMPC_ORDER_MODIFIER_last
+};
+
/// Scheduling data for loop-based OpenMP directives.
struct OpenMPScheduleTy final {
OpenMPScheduleClauseKind Schedule = OMPC_SCHEDULE_unknown;
@@ -166,8 +189,51 @@ enum OpenMPReductionClauseModifier {
OMPC_REDUCTION_unknown,
};
+/// OpenMP adjust-op kinds for 'adjust_args' clause.
+enum OpenMPAdjustArgsOpKind {
+#define OPENMP_ADJUST_ARGS_KIND(Name) OMPC_ADJUST_ARGS_##Name,
+#include "clang/Basic/OpenMPKinds.def"
+ OMPC_ADJUST_ARGS_unknown,
+};
+
+/// OpenMP bindings for the 'bind' clause.
+enum OpenMPBindClauseKind {
+#define OPENMP_BIND_KIND(Name) OMPC_BIND_##Name,
+#include "clang/Basic/OpenMPKinds.def"
+ OMPC_BIND_unknown
+};
+
+enum OpenMPGrainsizeClauseModifier {
+#define OPENMP_GRAINSIZE_MODIFIER(Name) OMPC_GRAINSIZE_##Name,
+#include "clang/Basic/OpenMPKinds.def"
+ OMPC_GRAINSIZE_unknown
+};
+
+enum OpenMPNumTasksClauseModifier {
+#define OPENMP_NUMTASKS_MODIFIER(Name) OMPC_NUMTASKS_##Name,
+#include "clang/Basic/OpenMPKinds.def"
+ OMPC_NUMTASKS_unknown
+};
+
+/// OpenMP dependence types for 'doacross' clause.
+enum OpenMPDoacrossClauseModifier {
+#define OPENMP_DOACROSS_MODIFIER(Name) OMPC_DOACROSS_##Name,
+#include "clang/Basic/OpenMPKinds.def"
+ OMPC_DOACROSS_unknown
+};
+
+/// Contains 'interop' data for 'append_args' and 'init' clauses.
+class Expr;
+struct OMPInteropInfo final {
+ OMPInteropInfo(bool IsTarget = false, bool IsTargetSync = false)
+ : IsTarget(IsTarget), IsTargetSync(IsTargetSync) {}
+ bool IsTarget;
+ bool IsTargetSync;
+ llvm::SmallVector<Expr *, 4> PreferTypes;
+};
+
unsigned getOpenMPSimpleClauseType(OpenMPClauseKind Kind, llvm::StringRef Str,
- unsigned OpenMPVersion);
+ const LangOptions &LangOpts);
const char *getOpenMPSimpleClauseTypeName(OpenMPClauseKind Kind, unsigned Type);
/// Checks if the specified directive is a directive with an associated
@@ -245,6 +311,13 @@ bool isOpenMPDistributeDirective(OpenMPDirectiveKind DKind);
/// otherwise - false.
bool isOpenMPNestingDistributeDirective(OpenMPDirectiveKind DKind);
+/// Checks if the specified directive constitutes a 'loop' directive in the
+/// outermost nest. For example, 'omp teams loop' or 'omp loop'.
+/// \param DKind Specified directive.
+/// \return true - the directive has loop on the outermost nest.
+/// otherwise - false.
+bool isOpenMPGenericLoopDirective(OpenMPDirectiveKind DKind);
+
/// Checks if the specified clause is one of private clauses like
/// 'private', 'firstprivate', 'reduction' etc..
/// \param Kind Clause kind.
@@ -276,6 +349,25 @@ bool isOpenMPLoopTransformationDirective(OpenMPDirectiveKind DKind);
void getOpenMPCaptureRegions(
llvm::SmallVectorImpl<OpenMPDirectiveKind> &CaptureRegions,
OpenMPDirectiveKind DKind);
+
+/// Checks if the specified directive is a combined construct for which
+/// the first construct is a parallel construct.
+/// \param DKind Specified directive.
+/// \return true - if the above condition is met for this directive
+/// otherwise - false.
+bool isOpenMPCombinedParallelADirective(OpenMPDirectiveKind DKind);
+
+/// Checks if the specified target directive, combined or not, needs task based
+/// thread_limit
+/// \param DKind Specified directive.
+/// \return true - if the above condition is met for this directive
+/// otherwise - false.
+bool needsTaskBasedThreadLimit(OpenMPDirectiveKind DKind);
+
+/// Checks if the parameter to the fail clause in "#pragma atomic compare fail"
+/// is restricted only to memory order clauses of "OMPC_acquire",
+/// "OMPC_relaxed" and "OMPC_seq_cst".
+bool checkFailClauseParameter(OpenMPClauseKind FailClauseParameter);
}
#endif
diff --git a/contrib/llvm-project/clang/include/clang/Basic/OperatorKinds.def b/contrib/llvm-project/clang/include/clang/Basic/OperatorKinds.def
index d464db29274e..fab777349ede 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/OperatorKinds.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/OperatorKinds.def
@@ -38,8 +38,8 @@
/// "operator*") can be both unary and binary.
///
/// MemberOnly: True if this operator can only be declared as a
-/// non-static member function. False if the operator can be both a
-/// non-member function and a non-static member function.
+/// member function. False if the operator can be both a
+/// non-member function and a member function.
///
/// OVERLOADED_OPERATOR_MULTI is used to enumerate the multi-token
/// overloaded operator names, e.g., "operator delete []". The macro
diff --git a/contrib/llvm-project/clang/include/clang/Basic/OperatorPrecedence.h b/contrib/llvm-project/clang/include/clang/Basic/OperatorPrecedence.h
index 61ac7ad62f6b..9bda3eb28fdf 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/OperatorPrecedence.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/OperatorPrecedence.h
@@ -49,4 +49,4 @@ prec::Level getBinOpPrecedence(tok::TokenKind Kind, bool GreaterThanIsOperator,
} // end namespace clang
-#endif // LLVM_CLANG_OPERATOR_PRECEDENCE_H
+#endif // LLVM_CLANG_BASIC_OPERATORPRECEDENCE_H
diff --git a/contrib/llvm-project/clang/include/clang/Basic/ParsedAttrInfo.h b/contrib/llvm-project/clang/include/clang/Basic/ParsedAttrInfo.h
new file mode 100644
index 000000000000..537d8f3391d5
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Basic/ParsedAttrInfo.h
@@ -0,0 +1,168 @@
+//===- ParsedAttrInfo.h - Info needed to parse an attribute -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ParsedAttrInfo class, which dictates how to
+// parse an attribute. This class is the one that plugins derive to
+// define a new attribute.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_BASIC_PARSEDATTRINFO_H
+#define LLVM_CLANG_BASIC_PARSEDATTRINFO_H
+
+#include "clang/Basic/AttrSubjectMatchRules.h"
+#include "clang/Basic/AttributeCommonInfo.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/Support/Registry.h"
+#include <climits>
+#include <list>
+
+namespace clang {
+
+class Decl;
+class LangOptions;
+class ParsedAttr;
+class Sema;
+class Stmt;
+class TargetInfo;
+
+struct ParsedAttrInfo {
+ /// Corresponds to the Kind enum.
+ LLVM_PREFERRED_TYPE(AttributeCommonInfo::Kind)
+ unsigned AttrKind : 16;
+ /// The number of required arguments of this attribute.
+ unsigned NumArgs : 4;
+ /// The number of optional arguments of this attributes.
+ unsigned OptArgs : 4;
+ /// The number of non-fake arguments specified in the attribute definition.
+ unsigned NumArgMembers : 4;
+ /// True if the parsing does not match the semantic content.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned HasCustomParsing : 1;
+ // True if this attribute accepts expression parameter pack expansions.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned AcceptsExprPack : 1;
+ /// True if this attribute is only available for certain targets.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned IsTargetSpecific : 1;
+ /// True if this attribute applies to types.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned IsType : 1;
+ /// True if this attribute applies to statements.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned IsStmt : 1;
+ /// True if this attribute has any spellings that are known to gcc.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned IsKnownToGCC : 1;
+ /// True if this attribute is supported by #pragma clang attribute.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned IsSupportedByPragmaAttribute : 1;
+ /// The syntaxes supported by this attribute and how they're spelled.
+ struct Spelling {
+ AttributeCommonInfo::Syntax Syntax;
+ const char *NormalizedFullName;
+ };
+ ArrayRef<Spelling> Spellings;
+ // The names of the known arguments of this attribute.
+ ArrayRef<const char *> ArgNames;
+
+protected:
+ constexpr ParsedAttrInfo(AttributeCommonInfo::Kind AttrKind =
+ AttributeCommonInfo::NoSemaHandlerAttribute)
+ : AttrKind(AttrKind), NumArgs(0), OptArgs(0), NumArgMembers(0),
+ HasCustomParsing(0), AcceptsExprPack(0), IsTargetSpecific(0), IsType(0),
+ IsStmt(0), IsKnownToGCC(0), IsSupportedByPragmaAttribute(0) {}
+
+ constexpr ParsedAttrInfo(AttributeCommonInfo::Kind AttrKind, unsigned NumArgs,
+ unsigned OptArgs, unsigned NumArgMembers,
+ unsigned HasCustomParsing, unsigned AcceptsExprPack,
+ unsigned IsTargetSpecific, unsigned IsType,
+ unsigned IsStmt, unsigned IsKnownToGCC,
+ unsigned IsSupportedByPragmaAttribute,
+ ArrayRef<Spelling> Spellings,
+ ArrayRef<const char *> ArgNames)
+ : AttrKind(AttrKind), NumArgs(NumArgs), OptArgs(OptArgs),
+ NumArgMembers(NumArgMembers), HasCustomParsing(HasCustomParsing),
+ AcceptsExprPack(AcceptsExprPack), IsTargetSpecific(IsTargetSpecific),
+ IsType(IsType), IsStmt(IsStmt), IsKnownToGCC(IsKnownToGCC),
+ IsSupportedByPragmaAttribute(IsSupportedByPragmaAttribute),
+ Spellings(Spellings), ArgNames(ArgNames) {}
+
+public:
+ virtual ~ParsedAttrInfo() = default;
+
+ /// Check if this attribute has specified spelling.
+ bool hasSpelling(AttributeCommonInfo::Syntax Syntax, StringRef Name) const {
+ return llvm::any_of(Spellings, [&](const Spelling &S) {
+ return (S.Syntax == Syntax && S.NormalizedFullName == Name);
+ });
+ }
+
+ /// Check if this attribute appertains to D, and issue a diagnostic if not.
+ virtual bool diagAppertainsToDecl(Sema &S, const ParsedAttr &Attr,
+ const Decl *D) const {
+ return true;
+ }
+ /// Check if this attribute appertains to St, and issue a diagnostic if not.
+ virtual bool diagAppertainsToStmt(Sema &S, const ParsedAttr &Attr,
+ const Stmt *St) const {
+ return true;
+ }
+ /// Check if the given attribute is mutually exclusive with other attributes
+ /// already applied to the given declaration.
+ virtual bool diagMutualExclusion(Sema &S, const ParsedAttr &A,
+ const Decl *D) const {
+ return true;
+ }
+ /// Check if this attribute is allowed by the language we are compiling.
+ virtual bool acceptsLangOpts(const LangOptions &LO) const { return true; }
+
+ /// Check if this attribute is allowed when compiling for the given target.
+ virtual bool existsInTarget(const TargetInfo &Target) const { return true; }
+
+ /// Check if this attribute's spelling is allowed when compiling for the given
+ /// target.
+ virtual bool spellingExistsInTarget(const TargetInfo &Target,
+ const unsigned SpellingListIndex) const {
+ return true;
+ }
+
+ /// Convert the spelling index of Attr to a semantic spelling enum value.
+ virtual unsigned
+ spellingIndexToSemanticSpelling(const ParsedAttr &Attr) const {
+ return UINT_MAX;
+ }
+ /// Returns true if the specified parameter index for this attribute in
+ /// Attr.td is an ExprArgument or VariadicExprArgument, or a subclass thereof;
+ /// returns false otherwise.
+ virtual bool isParamExpr(size_t N) const { return false; }
+ /// Populate Rules with the match rules of this attribute.
+ virtual void getPragmaAttributeMatchRules(
+ llvm::SmallVectorImpl<std::pair<attr::SubjectMatchRule, bool>> &Rules,
+ const LangOptions &LangOpts) const {}
+
+ enum AttrHandling { NotHandled, AttributeApplied, AttributeNotApplied };
+ /// If this ParsedAttrInfo knows how to handle this ParsedAttr applied to this
+ /// Decl then do so and return either AttributeApplied if it was applied or
+ /// AttributeNotApplied if it wasn't. Otherwise return NotHandled.
+ virtual AttrHandling handleDeclAttribute(Sema &S, Decl *D,
+ const ParsedAttr &Attr) const {
+ return NotHandled;
+ }
+
+ static const ParsedAttrInfo &get(const AttributeCommonInfo &A);
+ static ArrayRef<const ParsedAttrInfo *> getAllBuiltin();
+};
+
+typedef llvm::Registry<ParsedAttrInfo> ParsedAttrInfoRegistry;
+
+const std::list<std::unique_ptr<ParsedAttrInfo>> &getAttributePluginInstances();
+
+} // namespace clang
+
+#endif // LLVM_CLANG_BASIC_PARSEDATTRINFO_H
diff --git a/contrib/llvm-project/clang/include/clang/Basic/PartialDiagnostic.h b/contrib/llvm-project/clang/include/clang/Basic/PartialDiagnostic.h
index 9fb70bff7fee..507d789c54ff 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/PartialDiagnostic.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/PartialDiagnostic.h
@@ -28,9 +28,6 @@
namespace clang {
-class DeclContext;
-class IdentifierInfo;
-
class PartialDiagnostic : public StreamingDiagnostic {
private:
// NOTE: Sema assumes that PartialDiagnostic is location-invariant
@@ -67,8 +64,8 @@ public:
// It is necessary to limit this to rvalue reference to avoid calling this
// function with a bitfield lvalue argument since non-const reference to
// bitfield is not allowed.
- template <typename T, typename = typename std::enable_if<
- !std::is_lvalue_reference<T>::value>::type>
+ template <typename T,
+ typename = std::enable_if_t<!std::is_lvalue_reference<T>::value>>
const PartialDiagnostic &operator<<(T &&V) const {
const StreamingDiagnostic &DB = *this;
DB << std::move(V);
diff --git a/contrib/llvm-project/clang/include/clang/Basic/PlistSupport.h b/contrib/llvm-project/clang/include/clang/Basic/PlistSupport.h
index 557462a5b90d..d52d196019cf 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/PlistSupport.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/PlistSupport.h
@@ -77,8 +77,7 @@ inline raw_ostream &EmitInteger(raw_ostream &o, int64_t value) {
inline raw_ostream &EmitString(raw_ostream &o, StringRef s) {
o << "<string>";
- for (StringRef::const_iterator I = s.begin(), E = s.end(); I != E; ++I) {
- char c = *I;
+ for (char c : s) {
switch (c) {
default:
o << c;
diff --git a/contrib/llvm-project/clang/include/clang/Basic/PragmaKinds.h b/contrib/llvm-project/clang/include/clang/Basic/PragmaKinds.h
index 82c0d5f0a551..42f049f7323d 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/PragmaKinds.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/PragmaKinds.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_BASIC_PRAGMA_KINDS_H
-#define LLVM_CLANG_BASIC_PRAGMA_KINDS_H
+#ifndef LLVM_CLANG_BASIC_PRAGMAKINDS_H
+#define LLVM_CLANG_BASIC_PRAGMAKINDS_H
namespace clang {
@@ -34,6 +34,14 @@ enum PragmaFloatControlKind {
PFC_Push, // #pragma float_control(push)
PFC_Pop // #pragma float_control(pop)
};
+
+enum PragmaFPKind {
+ PFK_Contract, // #pragma clang fp contract
+ PFK_Reassociate, // #pragma clang fp reassociate
+ PFK_Reciprocal, // #pragma clang fp reciprocal
+ PFK_Exceptions, // #pragma clang fp exceptions
+ PFK_EvalMethod // #pragma clang fp eval_method
+};
}
#endif
diff --git a/contrib/llvm-project/clang/include/clang/Basic/ProfileList.h b/contrib/llvm-project/clang/include/clang/Basic/ProfileList.h
index 989c36549a3d..b4217e49c18a 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/ProfileList.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/ProfileList.h
@@ -10,45 +10,54 @@
// functions.
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_BASIC_INSTRPROFLIST_H
-#define LLVM_CLANG_BASIC_INSTRPROFLIST_H
+#ifndef LLVM_CLANG_BASIC_PROFILELIST_H
+#define LLVM_CLANG_BASIC_PROFILELIST_H
#include "clang/Basic/CodeGenOptions.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/StringRef.h"
#include <memory>
-
-namespace llvm {
-class SpecialCaseList;
-}
+#include <optional>
namespace clang {
class ProfileSpecialCaseList;
class ProfileList {
+public:
+ /// Represents if an how something should be excluded from profiling.
+ enum ExclusionType {
+ /// Profiling is allowed.
+ Allow,
+ /// Profiling is skipped using the \p skipprofile attribute.
+ Skip,
+ /// Profiling is forbidden using the \p noprofile attribute.
+ Forbid,
+ };
+
+private:
std::unique_ptr<ProfileSpecialCaseList> SCL;
const bool Empty;
- const bool Default;
SourceManager &SM;
+ std::optional<ExclusionType> inSection(StringRef Section, StringRef Prefix,
+ StringRef Query) const;
public:
ProfileList(ArrayRef<std::string> Paths, SourceManager &SM);
~ProfileList();
bool isEmpty() const { return Empty; }
- bool getDefault() const { return Default; }
+ ExclusionType getDefault(CodeGenOptions::ProfileInstrKind Kind) const;
- llvm::Optional<bool>
+ std::optional<ExclusionType>
isFunctionExcluded(StringRef FunctionName,
CodeGenOptions::ProfileInstrKind Kind) const;
- llvm::Optional<bool>
+ std::optional<ExclusionType>
isLocationExcluded(SourceLocation Loc,
CodeGenOptions::ProfileInstrKind Kind) const;
- llvm::Optional<bool>
+ std::optional<ExclusionType>
isFileExcluded(StringRef FileName,
CodeGenOptions::ProfileInstrKind Kind) const;
};
diff --git a/contrib/llvm-project/clang/include/clang/Basic/RISCVVTypes.def b/contrib/llvm-project/clang/include/clang/Basic/RISCVVTypes.def
index f6ef62a64636..6620de8ad50e 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/RISCVVTypes.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/RISCVVTypes.def
@@ -12,7 +12,8 @@
// A builtin type that has not been covered by any other #define
// Defining this macro covers all the builtins.
//
-// - RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, IsSigned, IsFP)
+// - RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, IsSigned, IsFP,
+// IsBF)
// A RISC-V V scalable vector.
//
// - RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls)
@@ -30,8 +31,8 @@
//
// - ElBits is the size of one element in bits (SEW).
//
-// - NF is the number of fields (NFIELDS) used in the Zvlsseg instructions
-// (TODO).
+// - NF is the number of fields (NFIELDS) used in the Load/Store Segment
+// instructions (TODO).
//
// - IsSigned is true for vectors of signed integer elements and
// for vectors of floating-point elements.
@@ -40,8 +41,13 @@
//
//===----------------------------------------------------------------------===//
+#ifndef RVV_TYPE
+#define RVV_TYPE(Name, Id, SingletonId)
+#endif
+
#ifndef RVV_VECTOR_TYPE
-#define RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, IsSigned, IsFP)\
+#define RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, IsSigned, \
+ IsFP, IsBF) \
RVV_TYPE(Name, Id, SingletonId)
#endif
@@ -51,13 +57,20 @@
#endif
#ifndef RVV_VECTOR_TYPE_INT
-#define RVV_VECTOR_TYPE_INT(Name, Id, SingletonId, NumEls, ElBits, NF, IsSigned) \
- RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, IsSigned, false)
+#define RVV_VECTOR_TYPE_INT(Name, Id, SingletonId, NumEls, ElBits, NF, \
+ IsSigned) \
+ RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, IsSigned, false, \
+ false)
#endif
#ifndef RVV_VECTOR_TYPE_FLOAT
-#define RVV_VECTOR_TYPE_FLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \
- RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, false, true)
+#define RVV_VECTOR_TYPE_FLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \
+ RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, false, true, false)
+#endif
+
+#ifndef RVV_VECTOR_TYPE_BFLOAT
+#define RVV_VECTOR_TYPE_BFLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \
+ RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, false, false, true)
#endif
//===- Vector types -------------------------------------------------------===//
@@ -121,6 +134,19 @@ RVV_VECTOR_TYPE_FLOAT("__rvv_float16m2_t", RvvFloat16m2, RvvFloat16m2Ty, 8, 16,
RVV_VECTOR_TYPE_FLOAT("__rvv_float16m4_t", RvvFloat16m4, RvvFloat16m4Ty, 16, 16, 1)
RVV_VECTOR_TYPE_FLOAT("__rvv_float16m8_t", RvvFloat16m8, RvvFloat16m8Ty, 32, 16, 1)
+RVV_VECTOR_TYPE_BFLOAT("__rvv_bfloat16mf4_t", RvvBFloat16mf4, RvvBFloat16mf4Ty,
+ 1, 16, 1)
+RVV_VECTOR_TYPE_BFLOAT("__rvv_bfloat16mf2_t", RvvBFloat16mf2, RvvBFloat16mf2Ty,
+ 2, 16, 1)
+RVV_VECTOR_TYPE_BFLOAT("__rvv_bfloat16m1_t", RvvBFloat16m1, RvvBFloat16m1Ty, 4,
+ 16, 1)
+RVV_VECTOR_TYPE_BFLOAT("__rvv_bfloat16m2_t", RvvBFloat16m2, RvvBFloat16m2Ty, 8,
+ 16, 1)
+RVV_VECTOR_TYPE_BFLOAT("__rvv_bfloat16m4_t", RvvBFloat16m4, RvvBFloat16m4Ty, 16,
+ 16, 1)
+RVV_VECTOR_TYPE_BFLOAT("__rvv_bfloat16m8_t", RvvBFloat16m8, RvvBFloat16m8Ty, 32,
+ 16, 1)
+
RVV_VECTOR_TYPE_FLOAT("__rvv_float32mf2_t",RvvFloat32mf2,RvvFloat32mf2Ty,1, 32, 1)
RVV_VECTOR_TYPE_FLOAT("__rvv_float32m1_t", RvvFloat32m1, RvvFloat32m1Ty, 2, 32, 1)
RVV_VECTOR_TYPE_FLOAT("__rvv_float32m2_t", RvvFloat32m2, RvvFloat32m2Ty, 4, 32, 1)
@@ -140,6 +166,349 @@ RVV_PREDICATE_TYPE("__rvv_bool16_t", RvvBool16, RvvBool16Ty, 4)
RVV_PREDICATE_TYPE("__rvv_bool32_t", RvvBool32, RvvBool32Ty, 2)
RVV_PREDICATE_TYPE("__rvv_bool64_t", RvvBool64, RvvBool64Ty, 1)
+//===- Tuple vector types -------------------------------------------------===//
+//===- Int8 tuple types --------------------------------------------------===//
+RVV_VECTOR_TYPE_INT("__rvv_int8mf8x2_t", RvvInt8mf8x2, RvvInt8mf8x2Ty, 1, 8, 2, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8mf8x3_t", RvvInt8mf8x3, RvvInt8mf8x3Ty, 1, 8, 3, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8mf8x4_t", RvvInt8mf8x4, RvvInt8mf8x4Ty, 1, 8, 4, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8mf8x5_t", RvvInt8mf8x5, RvvInt8mf8x5Ty, 1, 8, 5, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8mf8x6_t", RvvInt8mf8x6, RvvInt8mf8x6Ty, 1, 8, 6, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8mf8x7_t", RvvInt8mf8x7, RvvInt8mf8x7Ty, 1, 8, 7, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8mf8x8_t", RvvInt8mf8x8, RvvInt8mf8x8Ty, 1, 8, 8, true)
+
+RVV_VECTOR_TYPE_INT("__rvv_int8mf4x2_t", RvvInt8mf4x2, RvvInt8mf4x2Ty, 2, 8, 2, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8mf4x3_t", RvvInt8mf4x3, RvvInt8mf4x3Ty, 2, 8, 3, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8mf4x4_t", RvvInt8mf4x4, RvvInt8mf4x4Ty, 2, 8, 4, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8mf4x5_t", RvvInt8mf4x5, RvvInt8mf4x5Ty, 2, 8, 5, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8mf4x6_t", RvvInt8mf4x6, RvvInt8mf4x6Ty, 2, 8, 6, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8mf4x7_t", RvvInt8mf4x7, RvvInt8mf4x7Ty, 2, 8, 7, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8mf4x8_t", RvvInt8mf4x8, RvvInt8mf4x8Ty, 2, 8, 8, true)
+
+RVV_VECTOR_TYPE_INT("__rvv_int8mf2x2_t", RvvInt8mf2x2, RvvInt8mf2x2Ty, 4, 8, 2, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8mf2x3_t", RvvInt8mf2x3, RvvInt8mf2x3Ty, 4, 8, 3, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8mf2x4_t", RvvInt8mf2x4, RvvInt8mf2x4Ty, 4, 8, 4, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8mf2x5_t", RvvInt8mf2x5, RvvInt8mf2x5Ty, 4, 8, 5, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8mf2x6_t", RvvInt8mf2x6, RvvInt8mf2x6Ty, 4, 8, 6, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8mf2x7_t", RvvInt8mf2x7, RvvInt8mf2x7Ty, 4, 8, 7, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8mf2x8_t", RvvInt8mf2x8, RvvInt8mf2x8Ty, 4, 8, 8, true)
+
+RVV_VECTOR_TYPE_INT("__rvv_int8m1x2_t", RvvInt8m1x2, RvvInt8m1x2Ty, 8, 8, 2, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8m1x3_t", RvvInt8m1x3, RvvInt8m1x3Ty, 8, 8, 3, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8m1x4_t", RvvInt8m1x4, RvvInt8m1x4Ty, 8, 8, 4, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8m1x5_t", RvvInt8m1x5, RvvInt8m1x5Ty, 8, 8, 5, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8m1x6_t", RvvInt8m1x6, RvvInt8m1x6Ty, 8, 8, 6, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8m1x7_t", RvvInt8m1x7, RvvInt8m1x7Ty, 8, 8, 7, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8m1x8_t", RvvInt8m1x8, RvvInt8m1x8Ty, 8, 8, 8, true)
+
+RVV_VECTOR_TYPE_INT("__rvv_int8m2x2_t", RvvInt8m2x2, RvvInt8m2x2Ty, 16, 8, 2, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8m2x3_t", RvvInt8m2x3, RvvInt8m2x3Ty, 16, 8, 3, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8m2x4_t", RvvInt8m2x4, RvvInt8m2x4Ty, 16, 8, 4, true)
+
+RVV_VECTOR_TYPE_INT("__rvv_int8m4x2_t", RvvInt8m4x2, RvvInt8m4x2Ty, 32, 8, 2, true)
+
+//===- Uint8 tuple types -------------------------------------------------===//
+RVV_VECTOR_TYPE_INT("__rvv_uint8mf8x2_t", RvvUint8mf8x2, RvvUint8mf8x2Ty, 1, 8, 2, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8mf8x3_t", RvvUint8mf8x3, RvvUint8mf8x3Ty, 1, 8, 3, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8mf8x4_t", RvvUint8mf8x4, RvvUint8mf8x4Ty, 1, 8, 4, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8mf8x5_t", RvvUint8mf8x5, RvvUint8mf8x5Ty, 1, 8, 5, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8mf8x6_t", RvvUint8mf8x6, RvvUint8mf8x6Ty, 1, 8, 6, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8mf8x7_t", RvvUint8mf8x7, RvvUint8mf8x7Ty, 1, 8, 7, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8mf8x8_t", RvvUint8mf8x8, RvvUint8mf8x8Ty, 1, 8, 8, false)
+
+RVV_VECTOR_TYPE_INT("__rvv_uint8mf4x2_t", RvvUint8mf4x2, RvvUint8mf4x2Ty, 2, 8, 2, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8mf4x3_t", RvvUint8mf4x3, RvvUint8mf4x3Ty, 2, 8, 3, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8mf4x4_t", RvvUint8mf4x4, RvvUint8mf4x4Ty, 2, 8, 4, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8mf4x5_t", RvvUint8mf4x5, RvvUint8mf4x5Ty, 2, 8, 5, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8mf4x6_t", RvvUint8mf4x6, RvvUint8mf4x6Ty, 2, 8, 6, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8mf4x7_t", RvvUint8mf4x7, RvvUint8mf4x7Ty, 2, 8, 7, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8mf4x8_t", RvvUint8mf4x8, RvvUint8mf4x8Ty, 2, 8, 8, false)
+
+RVV_VECTOR_TYPE_INT("__rvv_uint8mf2x2_t", RvvUint8mf2x2, RvvUint8mf2x2Ty, 4, 8, 2, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8mf2x3_t", RvvUint8mf2x3, RvvUint8mf2x3Ty, 4, 8, 3, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8mf2x4_t", RvvUint8mf2x4, RvvUint8mf2x4Ty, 4, 8, 4, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8mf2x5_t", RvvUint8mf2x5, RvvUint8mf2x5Ty, 4, 8, 5, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8mf2x6_t", RvvUint8mf2x6, RvvUint8mf2x6Ty, 4, 8, 6, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8mf2x7_t", RvvUint8mf2x7, RvvUint8mf2x7Ty, 4, 8, 7, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8mf2x8_t", RvvUint8mf2x8, RvvUint8mf2x8Ty, 4, 8, 8, false)
+
+RVV_VECTOR_TYPE_INT("__rvv_uint8m1x2_t", RvvUint8m1x2, RvvUint8m1x2Ty, 8, 8, 2, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8m1x3_t", RvvUint8m1x3, RvvUint8m1x3Ty, 8, 8, 3, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8m1x4_t", RvvUint8m1x4, RvvUint8m1x4Ty, 8, 8, 4, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8m1x5_t", RvvUint8m1x5, RvvUint8m1x5Ty, 8, 8, 5, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8m1x6_t", RvvUint8m1x6, RvvUint8m1x6Ty, 8, 8, 6, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8m1x7_t", RvvUint8m1x7, RvvUint8m1x7Ty, 8, 8, 7, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8m1x8_t", RvvUint8m1x8, RvvUint8m1x8Ty, 8, 8, 8, false)
+
+RVV_VECTOR_TYPE_INT("__rvv_uint8m2x2_t", RvvUint8m2x2, RvvUint8m2x2Ty, 16, 8, 2, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8m2x3_t", RvvUint8m2x3, RvvUint8m2x3Ty, 16, 8, 3, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8m2x4_t", RvvUint8m2x4, RvvUint8m2x4Ty, 16, 8, 4, false)
+
+RVV_VECTOR_TYPE_INT("__rvv_uint8m4x2_t", RvvUint8m4x2, RvvUint8m4x2Ty, 32, 8, 2, false)
+
+//===- Int16 tuple types --------------------------------------------------===//
+RVV_VECTOR_TYPE_INT("__rvv_int16mf4x2_t", RvvInt16mf4x2, RvvInt16mf4x2Ty, 1, 16, 2, true)
+RVV_VECTOR_TYPE_INT("__rvv_int16mf4x3_t", RvvInt16mf4x3, RvvInt16mf4x3Ty, 1, 16, 3, true)
+RVV_VECTOR_TYPE_INT("__rvv_int16mf4x4_t", RvvInt16mf4x4, RvvInt16mf4x4Ty, 1, 16, 4, true)
+RVV_VECTOR_TYPE_INT("__rvv_int16mf4x5_t", RvvInt16mf4x5, RvvInt16mf4x5Ty, 1, 16, 5, true)
+RVV_VECTOR_TYPE_INT("__rvv_int16mf4x6_t", RvvInt16mf4x6, RvvInt16mf4x6Ty, 1, 16, 6, true)
+RVV_VECTOR_TYPE_INT("__rvv_int16mf4x7_t", RvvInt16mf4x7, RvvInt16mf4x7Ty, 1, 16, 7, true)
+RVV_VECTOR_TYPE_INT("__rvv_int16mf4x8_t", RvvInt16mf4x8, RvvInt16mf4x8Ty, 1, 16, 8, true)
+
+RVV_VECTOR_TYPE_INT("__rvv_int16mf2x2_t", RvvInt16mf2x2, RvvInt16mf2x2Ty, 2, 16, 2, true)
+RVV_VECTOR_TYPE_INT("__rvv_int16mf2x3_t", RvvInt16mf2x3, RvvInt16mf2x3Ty, 2, 16, 3, true)
+RVV_VECTOR_TYPE_INT("__rvv_int16mf2x4_t", RvvInt16mf2x4, RvvInt16mf2x4Ty, 2, 16, 4, true)
+RVV_VECTOR_TYPE_INT("__rvv_int16mf2x5_t", RvvInt16mf2x5, RvvInt16mf2x5Ty, 2, 16, 5, true)
+RVV_VECTOR_TYPE_INT("__rvv_int16mf2x6_t", RvvInt16mf2x6, RvvInt16mf2x6Ty, 2, 16, 6, true)
+RVV_VECTOR_TYPE_INT("__rvv_int16mf2x7_t", RvvInt16mf2x7, RvvInt16mf2x7Ty, 2, 16, 7, true)
+RVV_VECTOR_TYPE_INT("__rvv_int16mf2x8_t", RvvInt16mf2x8, RvvInt16mf2x8Ty, 2, 16, 8, true)
+
+RVV_VECTOR_TYPE_INT("__rvv_int16m1x2_t", RvvInt16m1x2, RvvInt16m1x2Ty, 4, 16, 2, true)
+RVV_VECTOR_TYPE_INT("__rvv_int16m1x3_t", RvvInt16m1x3, RvvInt16m1x3Ty, 4, 16, 3, true)
+RVV_VECTOR_TYPE_INT("__rvv_int16m1x4_t", RvvInt16m1x4, RvvInt16m1x4Ty, 4, 16, 4, true)
+RVV_VECTOR_TYPE_INT("__rvv_int16m1x5_t", RvvInt16m1x5, RvvInt16m1x5Ty, 4, 16, 5, true)
+RVV_VECTOR_TYPE_INT("__rvv_int16m1x6_t", RvvInt16m1x6, RvvInt16m1x6Ty, 4, 16, 6, true)
+RVV_VECTOR_TYPE_INT("__rvv_int16m1x7_t", RvvInt16m1x7, RvvInt16m1x7Ty, 4, 16, 7, true)
+RVV_VECTOR_TYPE_INT("__rvv_int16m1x8_t", RvvInt16m1x8, RvvInt16m1x8Ty, 4, 16, 8, true)
+
+RVV_VECTOR_TYPE_INT("__rvv_int16m2x2_t", RvvInt16m2x2, RvvInt16m2x2Ty, 8, 16, 2, true)
+RVV_VECTOR_TYPE_INT("__rvv_int16m2x3_t", RvvInt16m2x3, RvvInt16m2x3Ty, 8, 16, 3, true)
+RVV_VECTOR_TYPE_INT("__rvv_int16m2x4_t", RvvInt16m2x4, RvvInt16m2x4Ty, 8, 16, 4, true)
+
+RVV_VECTOR_TYPE_INT("__rvv_int16m4x2_t", RvvInt16m4x2, RvvInt16m4x2Ty, 16, 16, 2, true)
+
+//===- Uint16 tuple types -------------------------------------------------===//
+RVV_VECTOR_TYPE_INT("__rvv_uint16mf4x2_t", RvvUint16mf4x2, RvvUint16mf4x2Ty, 1, 16, 2, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint16mf4x3_t", RvvUint16mf4x3, RvvUint16mf4x3Ty, 1, 16, 3, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint16mf4x4_t", RvvUint16mf4x4, RvvUint16mf4x4Ty, 1, 16, 4, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint16mf4x5_t", RvvUint16mf4x5, RvvUint16mf4x5Ty, 1, 16, 5, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint16mf4x6_t", RvvUint16mf4x6, RvvUint16mf4x6Ty, 1, 16, 6, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint16mf4x7_t", RvvUint16mf4x7, RvvUint16mf4x7Ty, 1, 16, 7, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint16mf4x8_t", RvvUint16mf4x8, RvvUint16mf4x8Ty, 1, 16, 8, false)
+
+RVV_VECTOR_TYPE_INT("__rvv_uint16mf2x2_t", RvvUint16mf2x2, RvvUint16mf2x2Ty, 2, 16, 2, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint16mf2x3_t", RvvUint16mf2x3, RvvUint16mf2x3Ty, 2, 16, 3, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint16mf2x4_t", RvvUint16mf2x4, RvvUint16mf2x4Ty, 2, 16, 4, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint16mf2x5_t", RvvUint16mf2x5, RvvUint16mf2x5Ty, 2, 16, 5, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint16mf2x6_t", RvvUint16mf2x6, RvvUint16mf2x6Ty, 2, 16, 6, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint16mf2x7_t", RvvUint16mf2x7, RvvUint16mf2x7Ty, 2, 16, 7, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint16mf2x8_t", RvvUint16mf2x8, RvvUint16mf2x8Ty, 2, 16, 8, false)
+
+RVV_VECTOR_TYPE_INT("__rvv_uint16m1x2_t", RvvUint16m1x2, RvvUint16m1x2Ty, 4, 16, 2, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint16m1x3_t", RvvUint16m1x3, RvvUint16m1x3Ty, 4, 16, 3, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint16m1x4_t", RvvUint16m1x4, RvvUint16m1x4Ty, 4, 16, 4, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint16m1x5_t", RvvUint16m1x5, RvvUint16m1x5Ty, 4, 16, 5, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint16m1x6_t", RvvUint16m1x6, RvvUint16m1x6Ty, 4, 16, 6, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint16m1x7_t", RvvUint16m1x7, RvvUint16m1x7Ty, 4, 16, 7, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint16m1x8_t", RvvUint16m1x8, RvvUint16m1x8Ty, 4, 16, 8, false)
+
+RVV_VECTOR_TYPE_INT("__rvv_uint16m2x2_t", RvvUint16m2x2, RvvUint16m2x2Ty, 8, 16, 2, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint16m2x3_t", RvvUint16m2x3, RvvUint16m2x3Ty, 8, 16, 3, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint16m2x4_t", RvvUint16m2x4, RvvUint16m2x4Ty, 8, 16, 4, false)
+
+RVV_VECTOR_TYPE_INT("__rvv_uint16m4x2_t", RvvUint16m4x2, RvvUint16m4x2Ty, 16, 16, 2, false)
+
+//===- Int32 tuple types --------------------------------------------------===//
+RVV_VECTOR_TYPE_INT("__rvv_int32mf2x2_t", RvvInt32mf2x2, RvvInt32mf2x2Ty, 1, 32, 2, true)
+RVV_VECTOR_TYPE_INT("__rvv_int32mf2x3_t", RvvInt32mf2x3, RvvInt32mf2x3Ty, 1, 32, 3, true)
+RVV_VECTOR_TYPE_INT("__rvv_int32mf2x4_t", RvvInt32mf2x4, RvvInt32mf2x4Ty, 1, 32, 4, true)
+RVV_VECTOR_TYPE_INT("__rvv_int32mf2x5_t", RvvInt32mf2x5, RvvInt32mf2x5Ty, 1, 32, 5, true)
+RVV_VECTOR_TYPE_INT("__rvv_int32mf2x6_t", RvvInt32mf2x6, RvvInt32mf2x6Ty, 1, 32, 6, true)
+RVV_VECTOR_TYPE_INT("__rvv_int32mf2x7_t", RvvInt32mf2x7, RvvInt32mf2x7Ty, 1, 32, 7, true)
+RVV_VECTOR_TYPE_INT("__rvv_int32mf2x8_t", RvvInt32mf2x8, RvvInt32mf2x8Ty, 1, 32, 8, true)
+
+RVV_VECTOR_TYPE_INT("__rvv_int32m1x2_t", RvvInt32m1x2, RvvInt32m1x2Ty, 2, 32, 2, true)
+RVV_VECTOR_TYPE_INT("__rvv_int32m1x3_t", RvvInt32m1x3, RvvInt32m1x3Ty, 2, 32, 3, true)
+RVV_VECTOR_TYPE_INT("__rvv_int32m1x4_t", RvvInt32m1x4, RvvInt32m1x4Ty, 2, 32, 4, true)
+RVV_VECTOR_TYPE_INT("__rvv_int32m1x5_t", RvvInt32m1x5, RvvInt32m1x5Ty, 2, 32, 5, true)
+RVV_VECTOR_TYPE_INT("__rvv_int32m1x6_t", RvvInt32m1x6, RvvInt32m1x6Ty, 2, 32, 6, true)
+RVV_VECTOR_TYPE_INT("__rvv_int32m1x7_t", RvvInt32m1x7, RvvInt32m1x7Ty, 2, 32, 7, true)
+RVV_VECTOR_TYPE_INT("__rvv_int32m1x8_t", RvvInt32m1x8, RvvInt32m1x8Ty, 2, 32, 8, true)
+
+RVV_VECTOR_TYPE_INT("__rvv_int32m2x2_t", RvvInt32m2x2, RvvInt32m2x2Ty, 4, 32, 2, true)
+RVV_VECTOR_TYPE_INT("__rvv_int32m2x3_t", RvvInt32m2x3, RvvInt32m2x3Ty, 4, 32, 3, true)
+RVV_VECTOR_TYPE_INT("__rvv_int32m2x4_t", RvvInt32m2x4, RvvInt32m2x4Ty, 4, 32, 4, true)
+
+RVV_VECTOR_TYPE_INT("__rvv_int32m4x2_t", RvvInt32m4x2, RvvInt32m4x2Ty, 8, 32, 2, true)
+
+//===- Uint32 tuple types -------------------------------------------------===//
+RVV_VECTOR_TYPE_INT("__rvv_uint32mf2x2_t", RvvUint32mf2x2, RvvUint32mf2x2Ty, 1, 32, 2, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint32mf2x3_t", RvvUint32mf2x3, RvvUint32mf2x3Ty, 1, 32, 3, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint32mf2x4_t", RvvUint32mf2x4, RvvUint32mf2x4Ty, 1, 32, 4, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint32mf2x5_t", RvvUint32mf2x5, RvvUint32mf2x5Ty, 1, 32, 5, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint32mf2x6_t", RvvUint32mf2x6, RvvUint32mf2x6Ty, 1, 32, 6, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint32mf2x7_t", RvvUint32mf2x7, RvvUint32mf2x7Ty, 1, 32, 7, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint32mf2x8_t", RvvUint32mf2x8, RvvUint32mf2x8Ty, 1, 32, 8, false)
+
+RVV_VECTOR_TYPE_INT("__rvv_uint32m1x2_t", RvvUint32m1x2, RvvUint32m1x2Ty, 2, 32, 2, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint32m1x3_t", RvvUint32m1x3, RvvUint32m1x3Ty, 2, 32, 3, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint32m1x4_t", RvvUint32m1x4, RvvUint32m1x4Ty, 2, 32, 4, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint32m1x5_t", RvvUint32m1x5, RvvUint32m1x5Ty, 2, 32, 5, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint32m1x6_t", RvvUint32m1x6, RvvUint32m1x6Ty, 2, 32, 6, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint32m1x7_t", RvvUint32m1x7, RvvUint32m1x7Ty, 2, 32, 7, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint32m1x8_t", RvvUint32m1x8, RvvUint32m1x8Ty, 2, 32, 8, false)
+
+RVV_VECTOR_TYPE_INT("__rvv_uint32m2x2_t", RvvUint32m2x2, RvvUint32m2x2Ty, 4, 32, 2, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint32m2x3_t", RvvUint32m2x3, RvvUint32m2x3Ty, 4, 32, 3, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint32m2x4_t", RvvUint32m2x4, RvvUint32m2x4Ty, 4, 32, 4, false)
+
+RVV_VECTOR_TYPE_INT("__rvv_uint32m4x2_t", RvvUint32m4x2, RvvUint32m4x2Ty, 8, 32, 2, false)
+
+//===- Int64 tuple types -------------------------------------------------===//
+RVV_VECTOR_TYPE_INT("__rvv_int64m1x2_t", RvvInt64m1x2, RvvInt64m1x2Ty, 1, 64, 2, true)
+RVV_VECTOR_TYPE_INT("__rvv_int64m1x3_t", RvvInt64m1x3, RvvInt64m1x3Ty, 1, 64, 3, true)
+RVV_VECTOR_TYPE_INT("__rvv_int64m1x4_t", RvvInt64m1x4, RvvInt64m1x4Ty, 1, 64, 4, true)
+RVV_VECTOR_TYPE_INT("__rvv_int64m1x5_t", RvvInt64m1x5, RvvInt64m1x5Ty, 1, 64, 5, true)
+RVV_VECTOR_TYPE_INT("__rvv_int64m1x6_t", RvvInt64m1x6, RvvInt64m1x6Ty, 1, 64, 6, true)
+RVV_VECTOR_TYPE_INT("__rvv_int64m1x7_t", RvvInt64m1x7, RvvInt64m1x7Ty, 1, 64, 7, true)
+RVV_VECTOR_TYPE_INT("__rvv_int64m1x8_t", RvvInt64m1x8, RvvInt64m1x8Ty, 1, 64, 8, true)
+
+RVV_VECTOR_TYPE_INT("__rvv_int64m2x2_t", RvvInt64m2x2, RvvInt64m2x2Ty, 2, 64, 2, true)
+RVV_VECTOR_TYPE_INT("__rvv_int64m2x3_t", RvvInt64m2x3, RvvInt64m2x3Ty, 2, 64, 3, true)
+RVV_VECTOR_TYPE_INT("__rvv_int64m2x4_t", RvvInt64m2x4, RvvInt64m2x4Ty, 2, 64, 4, true)
+
+RVV_VECTOR_TYPE_INT("__rvv_int64m4x2_t", RvvInt64m4x2, RvvInt64m4x2Ty, 4, 64, 2, true)
+
+//===- Uint64 tuple types -------------------------------------------------===//
+RVV_VECTOR_TYPE_INT("__rvv_uint64m1x2_t", RvvUint64m1x2, RvvUint64m1x2Ty, 1, 64, 2, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint64m1x3_t", RvvUint64m1x3, RvvUint64m1x3Ty, 1, 64, 3, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint64m1x4_t", RvvUint64m1x4, RvvUint64m1x4Ty, 1, 64, 4, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint64m1x5_t", RvvUint64m1x5, RvvUint64m1x5Ty, 1, 64, 5, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint64m1x6_t", RvvUint64m1x6, RvvUint64m1x6Ty, 1, 64, 6, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint64m1x7_t", RvvUint64m1x7, RvvUint64m1x7Ty, 1, 64, 7, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint64m1x8_t", RvvUint64m1x8, RvvUint64m1x8Ty, 1, 64, 8, false)
+
+RVV_VECTOR_TYPE_INT("__rvv_uint64m2x2_t", RvvUint64m2x2, RvvUint64m2x2Ty, 2, 64, 2, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint64m2x3_t", RvvUint64m2x3, RvvUint64m2x3Ty, 2, 64, 3, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint64m2x4_t", RvvUint64m2x4, RvvUint64m2x4Ty, 2, 64, 4, false)
+
+RVV_VECTOR_TYPE_INT("__rvv_uint64m4x2_t", RvvUint64m4x2, RvvUint64m4x2Ty, 4, 64, 2, false)
+
+//===- Float16 tuple types --------------------------------------------------===//
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16mf4x2_t", RvvFloat16mf4x2, RvvFloat16mf4x2Ty, 1, 16, 2)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16mf4x3_t", RvvFloat16mf4x3, RvvFloat16mf4x3Ty, 1, 16, 3)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16mf4x4_t", RvvFloat16mf4x4, RvvFloat16mf4x4Ty, 1, 16, 4)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16mf4x5_t", RvvFloat16mf4x5, RvvFloat16mf4x5Ty, 1, 16, 5)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16mf4x6_t", RvvFloat16mf4x6, RvvFloat16mf4x6Ty, 1, 16, 6)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16mf4x7_t", RvvFloat16mf4x7, RvvFloat16mf4x7Ty, 1, 16, 7)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16mf4x8_t", RvvFloat16mf4x8, RvvFloat16mf4x8Ty, 1, 16, 8)
+
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16mf2x2_t", RvvFloat16mf2x2, RvvFloat16mf2x2Ty, 2, 16, 2)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16mf2x3_t", RvvFloat16mf2x3, RvvFloat16mf2x3Ty, 2, 16, 3)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16mf2x4_t", RvvFloat16mf2x4, RvvFloat16mf2x4Ty, 2, 16, 4)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16mf2x5_t", RvvFloat16mf2x5, RvvFloat16mf2x5Ty, 2, 16, 5)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16mf2x6_t", RvvFloat16mf2x6, RvvFloat16mf2x6Ty, 2, 16, 6)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16mf2x7_t", RvvFloat16mf2x7, RvvFloat16mf2x7Ty, 2, 16, 7)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16mf2x8_t", RvvFloat16mf2x8, RvvFloat16mf2x8Ty, 2, 16, 8)
+
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16m1x2_t", RvvFloat16m1x2, RvvFloat16m1x2Ty, 4, 16, 2)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16m1x3_t", RvvFloat16m1x3, RvvFloat16m1x3Ty, 4, 16, 3)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16m1x4_t", RvvFloat16m1x4, RvvFloat16m1x4Ty, 4, 16, 4)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16m1x5_t", RvvFloat16m1x5, RvvFloat16m1x5Ty, 4, 16, 5)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16m1x6_t", RvvFloat16m1x6, RvvFloat16m1x6Ty, 4, 16, 6)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16m1x7_t", RvvFloat16m1x7, RvvFloat16m1x7Ty, 4, 16, 7)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16m1x8_t", RvvFloat16m1x8, RvvFloat16m1x8Ty, 4, 16, 8)
+
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16m2x2_t", RvvFloat16m2x2, RvvFloat16m2x2Ty, 8, 16, 2)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16m2x3_t", RvvFloat16m2x3, RvvFloat16m2x3Ty, 8, 16, 3)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16m2x4_t", RvvFloat16m2x4, RvvFloat16m2x4Ty, 8, 16, 4)
+
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16m4x2_t", RvvFloat16m4x2, RvvFloat16m4x2Ty, 16, 16, 2)
+
+//===- Float32 tuple types --------------------------------------------------===//
+RVV_VECTOR_TYPE_FLOAT("__rvv_float32mf2x2_t", RvvFloat32mf2x2, RvvFloat32mf2x2Ty, 1, 32, 2)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float32mf2x3_t", RvvFloat32mf2x3, RvvFloat32mf2x3Ty, 1, 32, 3)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float32mf2x4_t", RvvFloat32mf2x4, RvvFloat32mf2x4Ty, 1, 32, 4)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float32mf2x5_t", RvvFloat32mf2x5, RvvFloat32mf2x5Ty, 1, 32, 5)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float32mf2x6_t", RvvFloat32mf2x6, RvvFloat32mf2x6Ty, 1, 32, 6)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float32mf2x7_t", RvvFloat32mf2x7, RvvFloat32mf2x7Ty, 1, 32, 7)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float32mf2x8_t", RvvFloat32mf2x8, RvvFloat32mf2x8Ty, 1, 32, 8)
+
+RVV_VECTOR_TYPE_FLOAT("__rvv_float32m1x2_t", RvvFloat32m1x2, RvvFloat32m1x2Ty, 2, 32, 2)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float32m1x3_t", RvvFloat32m1x3, RvvFloat32m1x3Ty, 2, 32, 3)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float32m1x4_t", RvvFloat32m1x4, RvvFloat32m1x4Ty, 2, 32, 4)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float32m1x5_t", RvvFloat32m1x5, RvvFloat32m1x5Ty, 2, 32, 5)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float32m1x6_t", RvvFloat32m1x6, RvvFloat32m1x6Ty, 2, 32, 6)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float32m1x7_t", RvvFloat32m1x7, RvvFloat32m1x7Ty, 2, 32, 7)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float32m1x8_t", RvvFloat32m1x8, RvvFloat32m1x8Ty, 2, 32, 8)
+
+RVV_VECTOR_TYPE_FLOAT("__rvv_float32m2x2_t", RvvFloat32m2x2, RvvFloat32m2x2Ty, 4, 32, 2)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float32m2x3_t", RvvFloat32m2x3, RvvFloat32m2x3Ty, 4, 32, 3)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float32m2x4_t", RvvFloat32m2x4, RvvFloat32m2x4Ty, 4, 32, 4)
+
+RVV_VECTOR_TYPE_FLOAT("__rvv_float32m4x2_t", RvvFloat32m4x2, RvvFloat32m4x2Ty, 8, 32, 2)
+
+//===- Float64 tuple types -------------------------------------------------===//
+RVV_VECTOR_TYPE_FLOAT("__rvv_float64m1x2_t", RvvFloat64m1x2, RvvFloat64m1x2Ty, 1, 64, 2)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float64m1x3_t", RvvFloat64m1x3, RvvFloat64m1x3Ty, 1, 64, 3)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float64m1x4_t", RvvFloat64m1x4, RvvFloat64m1x4Ty, 1, 64, 4)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float64m1x5_t", RvvFloat64m1x5, RvvFloat64m1x5Ty, 1, 64, 5)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float64m1x6_t", RvvFloat64m1x6, RvvFloat64m1x6Ty, 1, 64, 6)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float64m1x7_t", RvvFloat64m1x7, RvvFloat64m1x7Ty, 1, 64, 7)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float64m1x8_t", RvvFloat64m1x8, RvvFloat64m1x8Ty, 1, 64, 8)
+
+RVV_VECTOR_TYPE_FLOAT("__rvv_float64m2x2_t", RvvFloat64m2x2, RvvFloat64m2x2Ty, 2, 64, 2)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float64m2x3_t", RvvFloat64m2x3, RvvFloat64m2x3Ty, 2, 64, 3)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float64m2x4_t", RvvFloat64m2x4, RvvFloat64m2x4Ty, 2, 64, 4)
+
+RVV_VECTOR_TYPE_FLOAT("__rvv_float64m4x2_t", RvvFloat64m4x2, RvvFloat64m4x2Ty, 4, 64, 2)
+
+//===- BFloat16 tuple types -------------------------------------------------===//
+RVV_VECTOR_TYPE_BFLOAT("__rvv_bfloat16mf4x2_t", RvvBFloat16mf4x2, RvvBFloat16mf4x2Ty,
+ 1, 16, 2)
+RVV_VECTOR_TYPE_BFLOAT("__rvv_bfloat16mf4x3_t", RvvBFloat16mf4x3, RvvBFloat16mf4x3Ty,
+ 1, 16, 3)
+RVV_VECTOR_TYPE_BFLOAT("__rvv_bfloat16mf4x4_t", RvvBFloat16mf4x4, RvvBFloat16mf4x4Ty,
+ 1, 16, 4)
+RVV_VECTOR_TYPE_BFLOAT("__rvv_bfloat16mf4x5_t", RvvBFloat16mf4x5, RvvBFloat16mf4x5Ty,
+ 1, 16, 5)
+RVV_VECTOR_TYPE_BFLOAT("__rvv_bfloat16mf4x6_t", RvvBFloat16mf4x6, RvvBFloat16mf4x6Ty,
+ 1, 16, 6)
+RVV_VECTOR_TYPE_BFLOAT("__rvv_bfloat16mf4x7_t", RvvBFloat16mf4x7, RvvBFloat16mf4x7Ty,
+ 1, 16, 7)
+RVV_VECTOR_TYPE_BFLOAT("__rvv_bfloat16mf4x8_t", RvvBFloat16mf4x8, RvvBFloat16mf4x8Ty,
+ 1, 16, 8)
+
+RVV_VECTOR_TYPE_BFLOAT("__rvv_bfloat16mf2x2_t", RvvBFloat16mf2x2, RvvBFloat16mf2x2Ty,
+ 2, 16, 2)
+RVV_VECTOR_TYPE_BFLOAT("__rvv_bfloat16mf2x3_t", RvvBFloat16mf2x3, RvvBFloat16mf2x3Ty,
+ 2, 16, 3)
+RVV_VECTOR_TYPE_BFLOAT("__rvv_bfloat16mf2x4_t", RvvBFloat16mf2x4, RvvBFloat16mf2x4Ty,
+ 2, 16, 4)
+RVV_VECTOR_TYPE_BFLOAT("__rvv_bfloat16mf2x5_t", RvvBFloat16mf2x5, RvvBFloat16mf2x5Ty,
+ 2, 16, 5)
+RVV_VECTOR_TYPE_BFLOAT("__rvv_bfloat16mf2x6_t", RvvBFloat16mf2x6, RvvBFloat16mf2x6Ty,
+ 2, 16, 6)
+RVV_VECTOR_TYPE_BFLOAT("__rvv_bfloat16mf2x7_t", RvvBFloat16mf2x7, RvvBFloat16mf2x7Ty,
+ 2, 16, 7)
+RVV_VECTOR_TYPE_BFLOAT("__rvv_bfloat16mf2x8_t", RvvBFloat16mf2x8, RvvBFloat16mf2x8Ty,
+ 2, 16, 8)
+
+RVV_VECTOR_TYPE_BFLOAT("__rvv_bfloat16m1x2_t", RvvBFloat16m1x2, RvvBFloat16m1x2Ty,
+ 4, 16, 2)
+RVV_VECTOR_TYPE_BFLOAT("__rvv_bfloat16m1x3_t", RvvBFloat16m1x3, RvvBFloat16m1x3Ty,
+ 4, 16, 3)
+RVV_VECTOR_TYPE_BFLOAT("__rvv_bfloat16m1x4_t", RvvBFloat16m1x4, RvvBFloat16m1x4Ty,
+ 4, 16, 4)
+RVV_VECTOR_TYPE_BFLOAT("__rvv_bfloat16m1x5_t", RvvBFloat16m1x5, RvvBFloat16m1x5Ty,
+ 4, 16, 5)
+RVV_VECTOR_TYPE_BFLOAT("__rvv_bfloat16m1x6_t", RvvBFloat16m1x6, RvvBFloat16m1x6Ty,
+ 4, 16, 6)
+RVV_VECTOR_TYPE_BFLOAT("__rvv_bfloat16m1x7_t", RvvBFloat16m1x7, RvvBFloat16m1x7Ty,
+ 4, 16, 7)
+RVV_VECTOR_TYPE_BFLOAT("__rvv_bfloat16m1x8_t", RvvBFloat16m1x8, RvvBFloat16m1x8Ty,
+ 4, 16, 8)
+
+RVV_VECTOR_TYPE_BFLOAT("__rvv_bfloat16m2x2_t", RvvBFloat16m2x2, RvvBFloat16m2x2Ty,
+ 8, 16, 2)
+RVV_VECTOR_TYPE_BFLOAT("__rvv_bfloat16m2x3_t", RvvBFloat16m2x3, RvvBFloat16m2x3Ty,
+ 8, 16, 3)
+RVV_VECTOR_TYPE_BFLOAT("__rvv_bfloat16m2x4_t", RvvBFloat16m2x4, RvvBFloat16m2x4Ty,
+ 8, 16, 4)
+
+RVV_VECTOR_TYPE_BFLOAT("__rvv_bfloat16m4x2_t", RvvBFloat16m4x2, RvvBFloat16m4x2Ty,
+ 16, 16, 2)
+
+#undef RVV_VECTOR_TYPE_BFLOAT
#undef RVV_VECTOR_TYPE_FLOAT
#undef RVV_VECTOR_TYPE_INT
#undef RVV_VECTOR_TYPE
diff --git a/contrib/llvm-project/clang/include/clang/Basic/Sanitizers.def b/contrib/llvm-project/clang/include/clang/Basic/Sanitizers.def
index 9b8936cc520c..c2137e3f61f6 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/Sanitizers.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/Sanitizers.def
@@ -56,7 +56,10 @@ SANITIZER("hwaddress", HWAddress)
SANITIZER("kernel-hwaddress", KernelHWAddress)
// A variant of AddressSanitizer using AArch64 MTE extension.
-SANITIZER("memtag", MemTag)
+SANITIZER("memtag-stack", MemtagStack)
+SANITIZER("memtag-heap", MemtagHeap)
+SANITIZER("memtag-globals", MemtagGlobals)
+SANITIZER_GROUP("memtag", MemTag, MemtagStack | MemtagHeap | MemtagGlobals)
// MemorySanitizer
SANITIZER("memory", Memory)
@@ -124,6 +127,9 @@ SANITIZER_GROUP("cfi", CFI,
CFIDerivedCast | CFIICall | CFIMFCall | CFIUnrelatedCast |
CFINVCall | CFIVCall)
+// Kernel Control Flow Integrity
+SANITIZER("kcfi", KCFI)
+
// Safe Stack
SANITIZER("safe-stack", SafeStack)
diff --git a/contrib/llvm-project/clang/include/clang/Basic/Sanitizers.h b/contrib/llvm-project/clang/include/clang/Basic/Sanitizers.h
index b12a3b7821d7..c890242269b3 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/Sanitizers.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/Sanitizers.h
@@ -16,13 +16,18 @@
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/HashBuilder.h"
#include "llvm/Transforms/Instrumentation/AddressSanitizerOptions.h"
#include <cassert>
#include <cstdint>
namespace llvm {
class hash_code;
+class Triple;
+namespace opt {
+class ArgList;
}
+} // namespace llvm
namespace clang {
@@ -72,6 +77,12 @@ public:
llvm::hash_code hash_value() const;
+ template <typename HasherT, llvm::endianness Endianness>
+ friend void addHash(llvm::HashBuilder<HasherT, Endianness> &HBuilder,
+ const SanitizerMask &SM) {
+ HBuilder.addRange(&SM.maskLoToHigh[0], &SM.maskLoToHigh[kNumElem]);
+ }
+
constexpr explicit operator bool() const {
return maskLoToHigh[0] || maskLoToHigh[1];
}
@@ -159,6 +170,8 @@ struct SanitizerSet {
Mask = Value ? (Mask | K) : (Mask & ~K);
}
+ void set(SanitizerMask K) { Mask = K; }
+
/// Disable the sanitizers specified in \p K.
void clear(SanitizerMask K = SanitizerKind::All) { Mask &= ~K; }
diff --git a/contrib/llvm-project/clang/include/clang/Basic/Sarif.h b/contrib/llvm-project/clang/include/clang/Basic/Sarif.h
new file mode 100644
index 000000000000..e6c46224b316
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Basic/Sarif.h
@@ -0,0 +1,513 @@
+//== clang/Basic/Sarif.h - SARIF Diagnostics Object Model -------*- C++ -*--==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// Defines clang::SarifDocumentWriter, clang::SarifRule, clang::SarifResult.
+///
+/// The document built can be accessed as a JSON Object.
+/// Several value semantic types are also introduced which represent properties
+/// of the SARIF standard, such as 'artifact', 'result', 'rule'.
+///
+/// A SARIF (Static Analysis Results Interchange Format) document is JSON
+/// document that describes in detail the results of running static analysis
+/// tools on a project. Each (non-trivial) document consists of at least one
+/// "run", which are themselves composed of details such as:
+/// * Tool: The tool that was run
+/// * Rules: The rules applied during the tool run, represented by
+/// \c reportingDescriptor objects in SARIF
+/// * Results: The matches for the rules applied against the project(s) being
+/// evaluated, represented by \c result objects in SARIF
+///
+/// Reference:
+/// 1. <a href="https://docs.oasis-open.org/sarif/sarif/v2.1.0/os/sarif-v2.1.0-os.html">The SARIF standard</a>
+/// 2. <a href="https://docs.oasis-open.org/sarif/sarif/v2.1.0/os/sarif-v2.1.0-os.html#_Toc34317836">SARIF<pre>reportingDescriptor</pre></a>
+/// 3. <a href="https://docs.oasis-open.org/sarif/sarif/v2.1.0/os/sarif-v2.1.0-os.html#_Toc34317638">SARIF<pre>result</pre></a>
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_BASIC_SARIF_H
+#define LLVM_CLANG_BASIC_SARIF_H
+
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/Version.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/JSON.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <initializer_list>
+#include <optional>
+#include <string>
+
+namespace clang {
+
+class SarifDocumentWriter;
+class SourceManager;
+
+namespace detail {
+
+/// \internal
+/// An artifact location is SARIF's way of describing the complete location
+/// of an artifact encountered during analysis. The \c artifactLocation object
+/// typically consists of a URI, and/or an index to reference the artifact it
+/// locates.
+///
+/// This builder makes an additional assumption: that every artifact encountered
+/// by \c clang will be a physical, top-level artifact. Which is why the static
+/// creation method \ref SarifArtifactLocation::create takes a mandatory URI
+/// parameter. The official standard states that either a \c URI or \c Index
+/// must be available in the object, \c clang picks the \c URI as a reasonable
+/// default, because it intends to deal in physical artifacts for now.
+///
+/// Reference:
+/// 1. <a href="https://docs.oasis-open.org/sarif/sarif/v2.1.0/os/sarif-v2.1.0-os.html#_Toc34317427">artifactLocation object</a>
+/// 2. \ref SarifArtifact
+class SarifArtifactLocation {
+private:
+ friend class clang::SarifDocumentWriter;
+
+ std::optional<uint32_t> Index;
+ std::string URI;
+
+ SarifArtifactLocation() = delete;
+ explicit SarifArtifactLocation(const std::string &URI) : URI(URI) {}
+
+public:
+ static SarifArtifactLocation create(llvm::StringRef URI) {
+ return SarifArtifactLocation{URI.str()};
+ }
+
+ SarifArtifactLocation setIndex(uint32_t Idx) {
+ Index = Idx;
+ return *this;
+ }
+};
+
+/// \internal
+/// An artifact in SARIF is any object (a sequence of bytes) addressable by
+/// a URI (RFC 3986). The most common type of artifact for clang's use-case
+/// would be source files. SARIF's artifact object is described in detail in
+/// section 3.24.
+//
+/// Since every clang artifact MUST have a location (there being no nested
+/// artifacts), the creation method \ref SarifArtifact::create requires a
+/// \ref SarifArtifactLocation object.
+///
+/// Reference:
+/// 1. <a href="https://docs.oasis-open.org/sarif/sarif/v2.1.0/os/sarif-v2.1.0-os.html#_Toc34317611">artifact object</a>
+class SarifArtifact {
+private:
+ friend class clang::SarifDocumentWriter;
+
+ std::optional<uint32_t> Offset;
+ std::optional<size_t> Length;
+ std::string MimeType;
+ SarifArtifactLocation Location;
+ llvm::SmallVector<std::string, 4> Roles;
+
+ SarifArtifact() = delete;
+
+ explicit SarifArtifact(const SarifArtifactLocation &Loc) : Location(Loc) {}
+
+public:
+ static SarifArtifact create(const SarifArtifactLocation &Loc) {
+ return SarifArtifact{Loc};
+ }
+
+ SarifArtifact setOffset(uint32_t ArtifactOffset) {
+ Offset = ArtifactOffset;
+ return *this;
+ }
+
+ SarifArtifact setLength(size_t NumBytes) {
+ Length = NumBytes;
+ return *this;
+ }
+
+ SarifArtifact setRoles(std::initializer_list<llvm::StringRef> ArtifactRoles) {
+ Roles.assign(ArtifactRoles.begin(), ArtifactRoles.end());
+ return *this;
+ }
+
+ SarifArtifact setMimeType(llvm::StringRef ArtifactMimeType) {
+ MimeType = ArtifactMimeType.str();
+ return *this;
+ }
+};
+
+} // namespace detail
+
+enum class ThreadFlowImportance { Important, Essential, Unimportant };
+
+/// The level of severity associated with a \ref SarifResult.
+///
+/// Of all the levels, \c None is the only one that is not associated with
+/// a failure.
+///
+/// A typical mapping for clang's DiagnosticKind to SarifResultLevel would look
+/// like:
+/// * \c None: \ref clang::DiagnosticsEngine::Level::Remark, \ref clang::DiagnosticsEngine::Level::Ignored
+/// * \c Note: \ref clang::DiagnosticsEngine::Level::Note
+/// * \c Warning: \ref clang::DiagnosticsEngine::Level::Warning
+/// * \c Error could be generated from one of:
+/// - \ref clang::DiagnosticsEngine::Level::Warning with \c -Werror
+/// - \ref clang::DiagnosticsEngine::Level::Error
+/// - \ref clang::DiagnosticsEngine::Level::Fatal when \ref clang::DiagnosticsEngine::ErrorsAsFatal is set.
+///
+/// Reference:
+/// 1. <a href="https://docs.oasis-open.org/sarif/sarif/v2.1.0/os/sarif-v2.1.0-os.html#_Toc34317648">level property</a>
+enum class SarifResultLevel { None, Note, Warning, Error };
+
+/// A thread flow is a sequence of code locations that specify a possible path
+/// through a single thread of execution.
+/// A thread flow in SARIF is related to a code flow which describes
+/// the progress of one or more programs through one or more thread flows.
+///
+/// Reference:
+/// 1. <a href="https://docs.oasis-open.org/sarif/sarif/v2.1.0/os/sarif-v2.1.0-os.html#_Toc34317744">threadFlow object</a>
+/// 2. <a href="https://docs.oasis-open.org/sarif/sarif/v2.1.0/os/sarif-v2.1.0-os.html#_Toc34317740">codeFlow object</a>
+class ThreadFlow {
+ friend class SarifDocumentWriter;
+
+ CharSourceRange Range;
+ ThreadFlowImportance Importance;
+ std::string Message;
+
+ ThreadFlow() = default;
+
+public:
+ static ThreadFlow create() { return {}; }
+
+ ThreadFlow setRange(const CharSourceRange &ItemRange) {
+ assert(ItemRange.isCharRange() &&
+ "ThreadFlows require a character granular source range!");
+ Range = ItemRange;
+ return *this;
+ }
+
+ ThreadFlow setImportance(const ThreadFlowImportance &ItemImportance) {
+ Importance = ItemImportance;
+ return *this;
+ }
+
+ ThreadFlow setMessage(llvm::StringRef ItemMessage) {
+ Message = ItemMessage.str();
+ return *this;
+ }
+};
+
+/// A SARIF Reporting Configuration (\c reportingConfiguration) object contains
+/// properties for a \ref SarifRule that can be configured at runtime before
+/// analysis begins.
+///
+/// Reference:
+/// 1. <a href="https://docs.oasis-open.org/sarif/sarif/v2.1.0/os/sarif-v2.1.0-os.html#_Toc34317852">reportingConfiguration object</a>
+class SarifReportingConfiguration {
+ friend class clang::SarifDocumentWriter;
+
+ bool Enabled = true;
+ SarifResultLevel Level = SarifResultLevel::Warning;
+ float Rank = -1.0f;
+
+ SarifReportingConfiguration() = default;
+
+public:
+ static SarifReportingConfiguration create() { return {}; };
+
+ SarifReportingConfiguration disable() {
+ Enabled = false;
+ return *this;
+ }
+
+ SarifReportingConfiguration enable() {
+ Enabled = true;
+ return *this;
+ }
+
+ SarifReportingConfiguration setLevel(SarifResultLevel TheLevel) {
+ Level = TheLevel;
+ return *this;
+ }
+
+ SarifReportingConfiguration setRank(float TheRank) {
+ assert(TheRank >= 0.0f && "Rule rank cannot be smaller than 0.0");
+ assert(TheRank <= 100.0f && "Rule rank cannot be larger than 100.0");
+ Rank = TheRank;
+ return *this;
+ }
+};
+
+/// A SARIF rule (\c reportingDescriptor object) contains information that
+/// describes a reporting item generated by a tool. A reporting item is
+/// either a result of analysis or notification of a condition encountered by
+/// the tool. Rules are arbitrary but are identifiable by a hierarchical
+/// rule-id.
+///
+/// This builder provides an interface to create SARIF \c reportingDescriptor
+/// objects via the \ref SarifRule::create static method.
+///
+/// Reference:
+/// 1. <a href="https://docs.oasis-open.org/sarif/sarif/v2.1.0/os/sarif-v2.1.0-os.html#_Toc34317836">reportingDescriptor object</a>
+class SarifRule {
+ friend class clang::SarifDocumentWriter;
+
+ std::string Name;
+ std::string Id;
+ std::string Description;
+ std::string HelpURI;
+ SarifReportingConfiguration DefaultConfiguration;
+
+ SarifRule() : DefaultConfiguration(SarifReportingConfiguration::create()) {}
+
+public:
+ static SarifRule create() { return {}; }
+
+ SarifRule setName(llvm::StringRef RuleName) {
+ Name = RuleName.str();
+ return *this;
+ }
+
+ SarifRule setRuleId(llvm::StringRef RuleId) {
+ Id = RuleId.str();
+ return *this;
+ }
+
+ SarifRule setDescription(llvm::StringRef RuleDesc) {
+ Description = RuleDesc.str();
+ return *this;
+ }
+
+ SarifRule setHelpURI(llvm::StringRef RuleHelpURI) {
+ HelpURI = RuleHelpURI.str();
+ return *this;
+ }
+
+ SarifRule
+ setDefaultConfiguration(const SarifReportingConfiguration &Configuration) {
+ DefaultConfiguration = Configuration;
+ return *this;
+ }
+};
+
+/// A SARIF result (also called a "reporting item") is a unit of output
+/// produced when one of the tool's \c reportingDescriptor encounters a match
+/// on the file being analysed by the tool.
+///
+/// This builder provides a \ref SarifResult::create static method that can be
+/// used to create an empty shell onto which attributes can be added using the
+/// \c setX(...) methods.
+///
+/// For example:
+/// \code{.cpp}
+/// SarifResult result = SarifResult::create(...)
+/// .setRuleId(...)
+/// .setDiagnosticMessage(...);
+/// \endcode
+///
+/// Reference:
+/// 1. <a href="https://docs.oasis-open.org/sarif/sarif/v2.1.0/os/sarif-v2.1.0-os.html#_Toc34317638">SARIF<pre>result</pre></a>
+class SarifResult {
+ friend class clang::SarifDocumentWriter;
+
+ // NOTE:
+ // This type cannot fit all possible indexes representable by JSON, but is
+ // chosen because it is the largest unsigned type that can be safely
+ // converted to an \c int64_t.
+ uint32_t RuleIdx;
+ std::string RuleId;
+ std::string DiagnosticMessage;
+ llvm::SmallVector<CharSourceRange, 8> Locations;
+ llvm::SmallVector<ThreadFlow, 8> ThreadFlows;
+ std::optional<SarifResultLevel> LevelOverride;
+
+ SarifResult() = delete;
+ explicit SarifResult(uint32_t RuleIdx) : RuleIdx(RuleIdx) {}
+
+public:
+ static SarifResult create(uint32_t RuleIdx) { return SarifResult{RuleIdx}; }
+
+ SarifResult setIndex(uint32_t Idx) {
+ RuleIdx = Idx;
+ return *this;
+ }
+
+ SarifResult setRuleId(llvm::StringRef Id) {
+ RuleId = Id.str();
+ return *this;
+ }
+
+ SarifResult setDiagnosticMessage(llvm::StringRef Message) {
+ DiagnosticMessage = Message.str();
+ return *this;
+ }
+
+ SarifResult setLocations(llvm::ArrayRef<CharSourceRange> DiagLocs) {
+#ifndef NDEBUG
+ for (const auto &Loc : DiagLocs) {
+ assert(Loc.isCharRange() &&
+ "SARIF Results require character granular source ranges!");
+ }
+#endif
+ Locations.assign(DiagLocs.begin(), DiagLocs.end());
+ return *this;
+ }
+ SarifResult setThreadFlows(llvm::ArrayRef<ThreadFlow> ThreadFlowResults) {
+ ThreadFlows.assign(ThreadFlowResults.begin(), ThreadFlowResults.end());
+ return *this;
+ }
+
+ SarifResult setDiagnosticLevel(const SarifResultLevel &TheLevel) {
+ LevelOverride = TheLevel;
+ return *this;
+ }
+};
+
+/// This class handles creating a valid SARIF document given various input
+/// attributes. However, it requires an ordering among certain method calls:
+///
+/// 1. Because every SARIF document must contain at least 1 \c run, callers
+/// must ensure that \ref SarifDocumentWriter::createRun is called before
+/// any other methods.
+/// 2. If SarifDocumentWriter::endRun is called, callers MUST call
+/// SarifDocumentWriter::createRun, before invoking any of the result
+/// aggregation methods such as SarifDocumentWriter::appendResult etc.
+class SarifDocumentWriter {
+private:
+ const llvm::StringRef SchemaURI{
+ "https://docs.oasis-open.org/sarif/sarif/v2.1.0/cos02/schemas/"
+ "sarif-schema-2.1.0.json"};
+ const llvm::StringRef SchemaVersion{"2.1.0"};
+
+ /// \internal
+ /// Return a pointer to the current tool. Asserts that a run exists.
+ llvm::json::Object &getCurrentTool();
+
+ /// \internal
+ /// Checks if there is a run associated with this document.
+ ///
+ /// \return true on success
+ bool hasRun() const;
+
+ /// \internal
+ /// Reset portions of the internal state so that the document is ready to
+ /// receive data for a new run.
+ void reset();
+
+ /// \internal
+ /// Return a mutable reference to the current run, after asserting it exists.
+ ///
+ /// \note It is undefined behavior to call this if a run does not exist in
+ /// the SARIF document.
+ llvm::json::Object &getCurrentRun();
+
+ /// Create a code flow object for the given threadflows.
+ /// See \ref ThreadFlow.
+ ///
+ /// \note It is undefined behavior to call this if a run does not exist in
+ /// the SARIF document.
+ llvm::json::Object
+ createCodeFlow(const llvm::ArrayRef<ThreadFlow> ThreadFlows);
+
+ /// Add the given threadflows to the ones this SARIF document knows about.
+ llvm::json::Array
+ createThreadFlows(const llvm::ArrayRef<ThreadFlow> ThreadFlows);
+
+ /// Add the given \ref CharSourceRange to the SARIF document as a physical
+ /// location, with its corresponding artifact.
+ llvm::json::Object createPhysicalLocation(const CharSourceRange &R);
+
+public:
+ SarifDocumentWriter() = delete;
+
+ /// Create a new empty SARIF document with the given source manager.
+ SarifDocumentWriter(const SourceManager &SourceMgr) : SourceMgr(SourceMgr) {}
+
+ /// Release resources held by this SARIF document.
+ ~SarifDocumentWriter() = default;
+
+ /// Create a new run with which any upcoming analysis will be associated.
+ /// Each run requires specifying the tool that is generating reporting items.
+ void createRun(const llvm::StringRef ShortToolName,
+ const llvm::StringRef LongToolName,
+ const llvm::StringRef ToolVersion = CLANG_VERSION_STRING);
+
+ /// If there is a current run, end it.
+ ///
+ /// This method collects various book-keeping required to clear and close
+ /// resources associated with the current run, but may also allocate some
+ /// for the next run.
+ ///
+ /// Calling \ref endRun before associating a run through \ref createRun leads
+ /// to undefined behaviour.
+ void endRun();
+
+ /// Associate the given rule with the current run.
+ ///
+ /// Returns an integer rule index for the created rule that is unique within
+ /// the current run, which can then be used to create a \ref SarifResult
+ /// to add to the current run. Note that a rule must exist before being
+ /// referenced by a result.
+ ///
+ /// \pre
+ /// There must be a run associated with the document, failing to do so will
+ /// cause undefined behaviour.
+ size_t createRule(const SarifRule &Rule);
+
+ /// Append a new result to the currently in-flight run.
+ ///
+ /// \pre
+ /// There must be a run associated with the document, failing to do so will
+ /// cause undefined behaviour.
+ /// \pre
+ /// \c RuleIdx used to create the result must correspond to a rule known by
+ /// the SARIF document. It must be the value returned by a previous call
+ /// to \ref createRule.
+ void appendResult(const SarifResult &SarifResult);
+
+ /// Return the SARIF document in its current state.
+ /// Calling this will trigger a copy of the internal state including all
+ /// reported diagnostics, resulting in an expensive call.
+ llvm::json::Object createDocument();
+
+private:
+ /// Source Manager to use for the current SARIF document.
+ const SourceManager &SourceMgr;
+
+ /// Flag to track the state of this document:
+ /// A closed document is one on which a new runs must be created.
+ /// This could be a document that is freshly created, or has recently
+ /// finished writing to a previous run.
+ bool Closed = true;
+
+ /// A sequence of SARIF runs.
+ /// Each run object describes a single run of an analysis tool and contains
+ /// the output of that run.
+ ///
+ /// Reference: <a href="https://docs.oasis-open.org/sarif/sarif/v2.1.0/os/sarif-v2.1.0-os.html#_Toc34317484">run object</a>
+ llvm::json::Array Runs;
+
+ /// The list of rules associated with the most recent active run. These are
+ /// defined using the diagnostics passed to the SarifDocument. Each rule
+ /// need not be unique through the result set. E.g. there may be several
+ /// 'syntax' errors throughout code under analysis, each of which has its
+ /// own specific diagnostic message (and consequently, RuleId). Rules are
+ /// also known as "reportingDescriptor" objects in SARIF.
+ ///
+ /// Reference: <a href="https://docs.oasis-open.org/sarif/sarif/v2.1.0/os/sarif-v2.1.0-os.html#_Toc34317556">rules property</a>
+ llvm::SmallVector<SarifRule, 32> CurrentRules;
+
+ /// The list of artifacts that have been encountered on the most recent active
+ /// run. An artifact is defined in SARIF as a sequence of bytes addressable
+ /// by a URI. A common example for clang's case would be files named by
+ /// filesystem paths.
+ llvm::StringMap<detail::SarifArtifact> CurrentArtifacts;
+};
+} // namespace clang
+
+#endif // LLVM_CLANG_BASIC_SARIF_H
diff --git a/contrib/llvm-project/clang/include/clang/Basic/SourceLocation.h b/contrib/llvm-project/clang/include/clang/Basic/SourceLocation.h
index 540de23b9f55..00b1e0fa855b 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/SourceLocation.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/SourceLocation.h
@@ -14,6 +14,7 @@
#ifndef LLVM_CLANG_BASIC_SOURCELOCATION_H
#define LLVM_CLANG_BASIC_SOURCELOCATION_H
+#include "clang/Basic/FileEntry.h"
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/StringRef.h"
#include <cassert>
@@ -23,10 +24,8 @@
namespace llvm {
-template <typename T> struct DenseMapInfo;
-
class FoldingSetNodeID;
-template <typename T> struct FoldingSetTrait;
+template <typename T, typename Enable> struct FoldingSetTrait;
} // namespace llvm
@@ -60,6 +59,7 @@ private:
friend class ASTWriter;
friend class ASTReader;
friend class SourceManager;
+ friend class SourceManagerTestHelper;
static FileID get(int V) {
FileID F;
@@ -89,7 +89,7 @@ class SourceLocation {
friend class ASTReader;
friend class ASTWriter;
friend class SourceManager;
- friend struct llvm::FoldingSetTrait<SourceLocation>;
+ friend struct llvm::FoldingSetTrait<SourceLocation, void>;
public:
using UIntTy = uint32_t;
@@ -358,11 +358,13 @@ public:
}
};
-class FileEntry;
-
/// A SourceLocation and its associated SourceManager.
///
/// This is useful for argument passing to functions that expect both objects.
+///
+/// This class does not guarantee the presence of either the SourceManager or
+/// a valid SourceLocation. Clients should use `isValid()` and `hasManager()`
+/// before calling the member functions.
class FullSourceLoc : public SourceLocation {
const SourceManager *SrcMgr = nullptr;
@@ -373,13 +375,10 @@ public:
explicit FullSourceLoc(SourceLocation Loc, const SourceManager &SM)
: SourceLocation(Loc), SrcMgr(&SM) {}
- bool hasManager() const {
- bool hasSrcMgr = SrcMgr != nullptr;
- assert(hasSrcMgr == isValid() && "FullSourceLoc has location but no manager");
- return hasSrcMgr;
- }
+ /// Checks whether the SourceManager is present.
+ bool hasManager() const { return SrcMgr != nullptr; }
- /// \pre This FullSourceLoc has an associated SourceManager.
+ /// \pre hasManager()
const SourceManager &getManager() const {
assert(SrcMgr && "SourceManager is NULL.");
return *SrcMgr;
@@ -399,6 +398,12 @@ public:
unsigned getExpansionLineNumber(bool *Invalid = nullptr) const;
unsigned getExpansionColumnNumber(bool *Invalid = nullptr) const;
+ /// Decompose the underlying \c SourceLocation into a raw (FileID + Offset)
+ /// pair, after walking through all expansion records.
+ ///
+ /// \see SourceManager::getDecomposedExpansionLoc
+ std::pair<FileID, unsigned> getDecomposedExpansionLoc() const;
+
unsigned getSpellingLineNumber(bool *Invalid = nullptr) const;
unsigned getSpellingColumnNumber(bool *Invalid = nullptr) const;
@@ -408,6 +413,7 @@ public:
unsigned getColumnNumber(bool *Invalid = nullptr) const;
const FileEntry *getFileEntry() const;
+ OptionalFileEntryRef getFileEntryRef() const;
/// Return a StringRef to the source buffer data for the
/// specified FileID.
@@ -466,7 +472,7 @@ namespace llvm {
/// Define DenseMapInfo so that FileID's can be used as keys in DenseMap and
/// DenseSets.
template <>
- struct DenseMapInfo<clang::FileID> {
+ struct DenseMapInfo<clang::FileID, void> {
static clang::FileID getEmptyKey() {
return {};
}
@@ -487,7 +493,7 @@ namespace llvm {
/// Define DenseMapInfo so that SourceLocation's can be used as keys in
/// DenseMap and DenseSet. This trait class is eqivalent to
/// DenseMapInfo<unsigned> which uses SourceLocation::ID is used as a key.
- template <> struct DenseMapInfo<clang::SourceLocation> {
+ template <> struct DenseMapInfo<clang::SourceLocation, void> {
static clang::SourceLocation getEmptyKey() {
constexpr clang::SourceLocation::UIntTy Zero = 0;
return clang::SourceLocation::getFromRawEncoding(~Zero);
@@ -508,7 +514,7 @@ namespace llvm {
};
// Allow calling FoldingSetNodeID::Add with SourceLocation object as parameter
- template <> struct FoldingSetTrait<clang::SourceLocation> {
+ template <> struct FoldingSetTrait<clang::SourceLocation, void> {
static void Profile(const clang::SourceLocation &X, FoldingSetNodeID &ID);
};
diff --git a/contrib/llvm-project/clang/include/clang/Basic/SourceManager.h b/contrib/llvm-project/clang/include/clang/Basic/SourceManager.h
index cc29c24f5a35..d2ece14da0b1 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/SourceManager.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/SourceManager.h
@@ -36,12 +36,14 @@
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/FileEntry.h"
+#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/IntrusiveRefCntPtr.h"
+#include "llvm/ADT/PagedVector.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
@@ -52,6 +54,7 @@
#include <cstddef>
#include <map>
#include <memory>
+#include <optional>
#include <string>
#include <utility>
#include <vector>
@@ -138,14 +141,15 @@ public:
/// It is possible for this to be NULL if the ContentCache encapsulates
/// an imaginary text buffer.
///
- /// FIXME: Turn this into a FileEntryRef and remove Filename.
- const FileEntry *OrigEntry;
+ /// FIXME: Make non-optional using a virtual file as needed, remove \c
+ /// Filename and use \c OrigEntry.getNameAsRequested() instead.
+ OptionalFileEntryRef OrigEntry;
/// References the file which the contents were actually loaded from.
///
/// Can be different from 'Entry' if we overridden the contents of one file
/// with the contents of another file.
- const FileEntry *ContentsEntry;
+ OptionalFileEntryRef ContentsEntry;
/// The filename that is used to access OrigEntry.
///
@@ -163,22 +167,31 @@ public:
///
/// When true, the original entry may be a virtual file that does not
/// exist.
+ LLVM_PREFERRED_TYPE(bool)
unsigned BufferOverridden : 1;
/// True if this content cache was initially created for a source file
/// considered to be volatile (likely to change between stat and open).
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsFileVolatile : 1;
/// True if this file may be transient, that is, if it might not
/// exist at some later point in time when this content entry is used,
/// after serialization and deserialization.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsTransient : 1;
+ LLVM_PREFERRED_TYPE(bool)
mutable unsigned IsBufferInvalid : 1;
- ContentCache(const FileEntry *Ent = nullptr) : ContentCache(Ent, Ent) {}
+ ContentCache()
+ : OrigEntry(std::nullopt), ContentsEntry(std::nullopt),
+ BufferOverridden(false), IsFileVolatile(false), IsTransient(false),
+ IsBufferInvalid(false) {}
- ContentCache(const FileEntry *Ent, const FileEntry *contentEnt)
+ ContentCache(FileEntryRef Ent) : ContentCache(Ent, Ent) {}
+
+ ContentCache(FileEntryRef Ent, FileEntryRef contentEnt)
: OrigEntry(Ent), ContentsEntry(contentEnt), BufferOverridden(false),
IsFileVolatile(false), IsTransient(false), IsBufferInvalid(false) {}
@@ -204,7 +217,7 @@ public:
///
/// \param Loc If specified, is the location that invalid file diagnostics
/// will be emitted at.
- llvm::Optional<llvm::MemoryBufferRef>
+ std::optional<llvm::MemoryBufferRef>
getBufferOrNone(DiagnosticsEngine &Diag, FileManager &FM,
SourceLocation Loc = SourceLocation()) const;
@@ -227,18 +240,18 @@ public:
llvm::MemoryBuffer::BufferKind getMemoryBufferKind() const;
/// Return the buffer, only if it has been loaded.
- llvm::Optional<llvm::MemoryBufferRef> getBufferIfLoaded() const {
+ std::optional<llvm::MemoryBufferRef> getBufferIfLoaded() const {
if (Buffer)
return Buffer->getMemBufferRef();
- return None;
+ return std::nullopt;
}
/// Return a StringRef to the source buffer data, only if it has already
/// been loaded.
- llvm::Optional<StringRef> getBufferDataIfLoaded() const {
+ std::optional<StringRef> getBufferDataIfLoaded() const {
if (Buffer)
return Buffer->getBuffer();
- return None;
+ return std::nullopt;
}
/// Set the buffer.
@@ -250,7 +263,7 @@ public:
/// Set the buffer to one that's not owned (or to nullptr).
///
/// \pre Buffer cannot already be set.
- void setUnownedBuffer(llvm::Optional<llvm::MemoryBufferRef> B) {
+ void setUnownedBuffer(std::optional<llvm::MemoryBufferRef> B) {
assert(!Buffer && "Expected to be called right after construction");
if (B)
setBuffer(llvm::MemoryBuffer::getMemBuffer(*B));
@@ -296,6 +309,7 @@ class FileInfo {
unsigned NumCreatedFIDs : 31;
/// Whether this FileInfo has any \#line directives.
+ LLVM_PREFERRED_TYPE(bool)
unsigned HasLineDirectives : 1;
/// The content cache and the characteristic of the file.
@@ -467,6 +481,7 @@ static_assert(sizeof(FileInfo) <= sizeof(ExpansionInfo),
class SLocEntry {
static constexpr int OffsetBits = 8 * sizeof(SourceLocation::UIntTy) - 1;
SourceLocation::UIntTy Offset : OffsetBits;
+ LLVM_PREFERRED_TYPE(bool)
SourceLocation::UIntTy IsExpansion : 1;
union {
FileInfo File;
@@ -491,6 +506,14 @@ public:
return Expansion;
}
+ /// Creates an incomplete SLocEntry that is only able to report its offset.
+ static SLocEntry getOffsetOnly(SourceLocation::UIntTy Offset) {
+ assert(!(Offset & (1ULL << OffsetBits)) && "Offset is too large");
+ SLocEntry E;
+ E.Offset = Offset;
+ return E;
+ }
+
static SLocEntry get(SourceLocation::UIntTy Offset, const FileInfo &FI) {
assert(!(Offset & (1ULL << OffsetBits)) && "Offset is too large");
SLocEntry E;
@@ -525,6 +548,12 @@ public:
/// entry from being loaded.
virtual bool ReadSLocEntry(int ID) = 0;
+ /// Get the index ID for the loaded SourceLocation offset.
+ ///
+ /// \returns Invalid index ID (0) if an error occurred that prevented the
+ /// SLocEntry from being loaded.
+ virtual int getSLocEntryID(SourceLocation::UIntTy SLocOffset) = 0;
+
/// Retrieve the module import location and name for the given ID, if
/// in fact it was loaded from a module (rather than, say, a precompiled
/// header).
@@ -541,10 +570,10 @@ class InBeforeInTUCacheEntry {
/// If these match up with a subsequent query, the result can be reused.
FileID LQueryFID, RQueryFID;
- /// True if LQueryFID was created before RQueryFID.
+ /// The relative order of FileIDs that the CommonFID *immediately* includes.
///
/// This is used to compare macro expansion locations.
- bool IsLQFIDBeforeRQFID;
+ bool LChildBeforeRChild;
/// The file found in common between the two \#include traces, i.e.,
/// the nearest common ancestor of the \#include tree.
@@ -558,12 +587,17 @@ class InBeforeInTUCacheEntry {
unsigned LCommonOffset, RCommonOffset;
public:
+ InBeforeInTUCacheEntry() = default;
+ InBeforeInTUCacheEntry(FileID L, FileID R) : LQueryFID(L), RQueryFID(R) {
+ assert(L != R);
+ }
+
/// Return true if the currently cached values match up with
/// the specified LHS/RHS query.
///
/// If not, we can't use the cache.
- bool isCacheValid(FileID LHS, FileID RHS) const {
- return LQueryFID == LHS && RQueryFID == RHS;
+ bool isCacheValid() const {
+ return CommonFID.isValid();
}
/// If the cache is valid, compute the result given the
@@ -580,29 +614,28 @@ public:
// one of the locations points at the inclusion/expansion point of the other
// in which case its FileID will come before the other.
if (LOffset == ROffset)
- return IsLQFIDBeforeRQFID;
+ return LChildBeforeRChild;
return LOffset < ROffset;
}
/// Set up a new query.
- void setQueryFIDs(FileID LHS, FileID RHS, bool isLFIDBeforeRFID) {
+ /// If it matches the old query, we can keep the cached answer.
+ void setQueryFIDs(FileID LHS, FileID RHS) {
assert(LHS != RHS);
- LQueryFID = LHS;
- RQueryFID = RHS;
- IsLQFIDBeforeRQFID = isLFIDBeforeRFID;
- }
-
- void clear() {
- LQueryFID = RQueryFID = FileID();
- IsLQFIDBeforeRQFID = false;
+ if (LQueryFID != LHS || RQueryFID != RHS) {
+ LQueryFID = LHS;
+ RQueryFID = RHS;
+ CommonFID = FileID();
+ }
}
void setCommonLoc(FileID commonFID, unsigned lCommonOffset,
- unsigned rCommonOffset) {
+ unsigned rCommonOffset, bool LParentBeforeRParent) {
CommonFID = commonFID;
LCommonOffset = lCommonOffset;
RCommonOffset = rCommonOffset;
+ LChildBeforeRChild = LParentBeforeRParent;
}
};
@@ -637,7 +670,7 @@ class SourceManager : public RefCountedBase<SourceManager> {
/// This map allows us to merge ContentCache entries based
/// on their FileEntry*. All ContentCache objects will thus have unique,
/// non-null, FileEntry pointers.
- llvm::DenseMap<const FileEntry*, SrcMgr::ContentCache*> FileInfos;
+ llvm::DenseMap<FileEntryRef, SrcMgr::ContentCache*> FileInfos;
/// True if the ContentCache for files that are overridden by other
/// files, should report the original file name. Defaults to true.
@@ -655,7 +688,7 @@ class SourceManager : public RefCountedBase<SourceManager> {
struct OverriddenFilesInfoTy {
/// Files that have been overridden with the contents from another
/// file.
- llvm::DenseMap<const FileEntry *, const FileEntry *> OverriddenFiles;
+ llvm::DenseMap<const FileEntry *, FileEntryRef> OverriddenFiles;
/// Files that were overridden with a memory buffer.
llvm::DenseSet<const FileEntry *> OverriddenFilesWithBuffer;
@@ -687,7 +720,12 @@ class SourceManager : public RefCountedBase<SourceManager> {
///
/// Negative FileIDs are indexes into this table. To get from ID to an index,
/// use (-ID - 2).
- SmallVector<SrcMgr::SLocEntry, 0> LoadedSLocEntryTable;
+ llvm::PagedVector<SrcMgr::SLocEntry> LoadedSLocEntryTable;
+
+ /// For each allocation in LoadedSLocEntryTable, we keep the first FileID.
+ /// We assume exactly one allocation per AST file, and use that to determine
+ /// whether two FileIDs come from the same AST file.
+ SmallVector<FileID, 0> LoadedSLocEntryAllocBegin;
/// The starting offset of the next local SLocEntry.
///
@@ -700,7 +738,7 @@ class SourceManager : public RefCountedBase<SourceManager> {
/// not have been loaded, so that value would be unknown.
SourceLocation::UIntTy CurrentLoadedOffset;
- /// The highest possible offset is 2^32-1 (2^63-1 for 64-bit source
+ /// The highest possible offset is 2^31-1 (2^63-1 for 64-bit source
/// locations), so CurrentLoadedOffset starts at 2^31 (2^63 resp.).
static const SourceLocation::UIntTy MaxLoadedOffset =
1ULL << (8 * sizeof(SourceLocation::UIntTy) - 1);
@@ -711,6 +749,12 @@ class SourceManager : public RefCountedBase<SourceManager> {
/// Same indexing as LoadedSLocEntryTable.
llvm::BitVector SLocEntryLoaded;
+ /// A bitmap that indicates whether the entries of LoadedSLocEntryTable
+ /// have already had their offset loaded from the external source.
+ ///
+ /// Superset of SLocEntryLoaded. Same indexing as SLocEntryLoaded.
+ llvm::BitVector SLocEntryOffsetLoaded;
+
/// An external source for source location entries.
ExternalSLocEntrySource *ExternalSLocEntries = nullptr;
@@ -864,13 +908,6 @@ public:
/// Create a new FileID that represents the specified file
/// being \#included from the specified IncludePosition.
- ///
- /// This translates NULL into standard input.
- FileID createFileID(const FileEntry *SourceFile, SourceLocation IncludePos,
- SrcMgr::CharacteristicKind FileCharacter,
- int LoadedID = 0,
- SourceLocation::UIntTy LoadedOffset = 0);
-
FileID createFileID(FileEntryRef SourceFile, SourceLocation IncludePos,
SrcMgr::CharacteristicKind FileCharacter,
int LoadedID = 0,
@@ -896,25 +933,29 @@ public:
/// Get the FileID for \p SourceFile if it exists. Otherwise, create a
/// new FileID for the \p SourceFile.
- FileID getOrCreateFileID(const FileEntry *SourceFile,
+ FileID getOrCreateFileID(FileEntryRef SourceFile,
SrcMgr::CharacteristicKind FileCharacter);
- /// Return a new SourceLocation that encodes the
- /// fact that a token from SpellingLoc should actually be referenced from
- /// ExpansionLoc, and that it represents the expansion of a macro argument
- /// into the function-like macro body.
- SourceLocation createMacroArgExpansionLoc(SourceLocation Loc,
+ /// Creates an expansion SLocEntry for the substitution of an argument into a
+ /// function-like macro's body. Returns the start of the expansion.
+ ///
+ /// The macro argument was written at \p SpellingLoc with length \p Length.
+ /// \p ExpansionLoc is the parameter name in the (expanded) macro body.
+ SourceLocation createMacroArgExpansionLoc(SourceLocation SpellingLoc,
SourceLocation ExpansionLoc,
- unsigned TokLength);
+ unsigned Length);
- /// Return a new SourceLocation that encodes the fact
- /// that a token from SpellingLoc should actually be referenced from
- /// ExpansionLoc.
- SourceLocation
- createExpansionLoc(SourceLocation Loc, SourceLocation ExpansionLocStart,
- SourceLocation ExpansionLocEnd, unsigned TokLength,
- bool ExpansionIsTokenRange = true, int LoadedID = 0,
- SourceLocation::UIntTy LoadedOffset = 0);
+ /// Creates an expansion SLocEntry for a macro use. Returns its start.
+ ///
+ /// The macro body begins at \p SpellingLoc with length \p Length.
+ /// The macro use spans [ExpansionLocStart, ExpansionLocEnd].
+ SourceLocation createExpansionLoc(SourceLocation SpellingLoc,
+ SourceLocation ExpansionLocStart,
+ SourceLocation ExpansionLocEnd,
+ unsigned Length,
+ bool ExpansionIsTokenRange = true,
+ int LoadedID = 0,
+ SourceLocation::UIntTy LoadedOffset = 0);
/// Return a new SourceLocation that encodes that the token starting
/// at \p TokenStart ends prematurely at \p TokenEnd.
@@ -924,14 +965,14 @@ public:
/// Retrieve the memory buffer associated with the given file.
///
- /// Returns None if the buffer is not valid.
- llvm::Optional<llvm::MemoryBufferRef>
- getMemoryBufferForFileOrNone(const FileEntry *File);
+ /// Returns std::nullopt if the buffer is not valid.
+ std::optional<llvm::MemoryBufferRef>
+ getMemoryBufferForFileOrNone(FileEntryRef File);
/// Retrieve the memory buffer associated with the given file.
///
/// Returns a fake buffer if there isn't a real one.
- llvm::MemoryBufferRef getMemoryBufferForFileOrFake(const FileEntry *File) {
+ llvm::MemoryBufferRef getMemoryBufferForFileOrFake(FileEntryRef File) {
if (auto B = getMemoryBufferForFileOrNone(File))
return *B;
return getFakeBufferForRecovery();
@@ -944,7 +985,7 @@ public:
///
/// \param Buffer the memory buffer whose contents will be used as the
/// data in the given source file.
- void overrideFileContents(const FileEntry *SourceFile,
+ void overrideFileContents(FileEntryRef SourceFile,
const llvm::MemoryBufferRef &Buffer) {
overrideFileContents(SourceFile, llvm::MemoryBuffer::getMemBuffer(Buffer));
}
@@ -956,12 +997,8 @@ public:
///
/// \param Buffer the memory buffer whose contents will be used as the
/// data in the given source file.
- void overrideFileContents(const FileEntry *SourceFile,
- std::unique_ptr<llvm::MemoryBuffer> Buffer);
void overrideFileContents(FileEntryRef SourceFile,
- std::unique_ptr<llvm::MemoryBuffer> Buffer) {
- overrideFileContents(&SourceFile.getFileEntry(), std::move(Buffer));
- }
+ std::unique_ptr<llvm::MemoryBuffer> Buffer);
/// Override the given source file with another one.
///
@@ -969,30 +1006,28 @@ public:
///
/// \param NewFile the file whose contents will be used as the
/// data instead of the contents of the given source file.
- void overrideFileContents(const FileEntry *SourceFile,
- const FileEntry *NewFile);
+ void overrideFileContents(const FileEntry *SourceFile, FileEntryRef NewFile);
/// Returns true if the file contents have been overridden.
bool isFileOverridden(const FileEntry *File) const {
if (OverriddenFilesInfo) {
if (OverriddenFilesInfo->OverriddenFilesWithBuffer.count(File))
return true;
- if (OverriddenFilesInfo->OverriddenFiles.find(File) !=
- OverriddenFilesInfo->OverriddenFiles.end())
+ if (OverriddenFilesInfo->OverriddenFiles.contains(File))
return true;
}
return false;
}
/// Bypass the overridden contents of a file. This creates a new FileEntry
- /// and initializes the content cache for it. Returns None if there is no
- /// such file in the filesystem.
+ /// and initializes the content cache for it. Returns std::nullopt if there
+ /// is no such file in the filesystem.
///
/// This should be called before parsing has begun.
- Optional<FileEntryRef> bypassFileContentsOverride(FileEntryRef File);
+ OptionalFileEntryRef bypassFileContentsOverride(FileEntryRef File);
/// Specify that a file is transient.
- void setFileIsTransient(const FileEntry *SourceFile);
+ void setFileIsTransient(FileEntryRef SourceFile);
/// Specify that all files that are read during this compilation are
/// transient.
@@ -1006,13 +1041,14 @@ public:
/// Return the buffer for the specified FileID.
///
- /// If there is an error opening this buffer the first time, return None.
- llvm::Optional<llvm::MemoryBufferRef>
+ /// If there is an error opening this buffer the first time, return
+ /// std::nullopt.
+ std::optional<llvm::MemoryBufferRef>
getBufferOrNone(FileID FID, SourceLocation Loc = SourceLocation()) const {
if (auto *Entry = getSLocEntryForFile(FID))
return Entry->getFile().getContentCache().getBufferOrNone(
Diag, getFileManager(), Loc);
- return None;
+ return std::nullopt;
}
/// Return the buffer for the specified FileID.
@@ -1028,28 +1064,30 @@ public:
/// Returns the FileEntry record for the provided FileID.
const FileEntry *getFileEntryForID(FileID FID) const {
- if (auto *Entry = getSLocEntryForFile(FID))
- return Entry->getFile().getContentCache().OrigEntry;
+ if (auto FE = getFileEntryRefForID(FID))
+ return *FE;
return nullptr;
}
/// Returns the FileEntryRef for the provided FileID.
- Optional<FileEntryRef> getFileEntryRefForID(FileID FID) const {
- if (auto *Entry = getFileEntryForID(FID))
- return Entry->getLastRef();
- return None;
+ OptionalFileEntryRef getFileEntryRefForID(FileID FID) const {
+ if (auto *Entry = getSLocEntryForFile(FID))
+ return Entry->getFile().getContentCache().OrigEntry;
+ return std::nullopt;
}
/// Returns the filename for the provided FileID, unless it's a built-in
/// buffer that's not represented by a filename.
///
- /// Returns None for non-files and built-in files.
- Optional<StringRef> getNonBuiltinFilenameForID(FileID FID) const;
+ /// Returns std::nullopt for non-files and built-in files.
+ std::optional<StringRef> getNonBuiltinFilenameForID(FileID FID) const;
/// Returns the FileEntry record for the provided SLocEntry.
- const FileEntry *getFileEntryForSLocEntry(const SrcMgr::SLocEntry &sloc) const
- {
- return sloc.getFile().getContentCache().OrigEntry;
+ const FileEntry *
+ getFileEntryForSLocEntry(const SrcMgr::SLocEntry &SLocEntry) const {
+ if (auto FE = SLocEntry.getFile().getContentCache().OrigEntry)
+ return *FE;
+ return nullptr;
}
/// Return a StringRef to the source buffer data for the
@@ -1060,16 +1098,16 @@ public:
StringRef getBufferData(FileID FID, bool *Invalid = nullptr) const;
/// Return a StringRef to the source buffer data for the
- /// specified FileID, returning None if invalid.
+ /// specified FileID, returning std::nullopt if invalid.
///
/// \param FID The file ID whose contents will be returned.
- llvm::Optional<StringRef> getBufferDataOrNone(FileID FID) const;
+ std::optional<StringRef> getBufferDataOrNone(FileID FID) const;
/// Return a StringRef to the source buffer data for the
- /// specified FileID, returning None if it's not yet loaded.
+ /// specified FileID, returning std::nullopt if it's not yet loaded.
///
/// \param FID The file ID whose contents will be returned.
- llvm::Optional<StringRef> getBufferDataIfLoaded(FileID FID) const;
+ std::optional<StringRef> getBufferDataIfLoaded(FileID FID) const;
/// Get the number of FileIDs (files and macros) that were created
/// during preprocessing of \p FID, including it.
@@ -1101,13 +1139,7 @@ public:
/// the entry in SLocEntryTable which contains the specified location.
///
FileID getFileID(SourceLocation SpellingLoc) const {
- SourceLocation::UIntTy SLocOffset = SpellingLoc.getOffset();
-
- // If our one-entry cache covers this offset, just return it.
- if (isOffsetInFileID(LastFileIDLookup, SLocOffset))
- return LastFileIDLookup;
-
- return getFileIDSlow(SLocOffset);
+ return getFileID(SpellingLoc.getOffset());
}
/// Return the filename of the file containing a SourceLocation.
@@ -1468,24 +1500,35 @@ public:
/// Returns whether \p Loc is located in a <built-in> file.
bool isWrittenInBuiltinFile(SourceLocation Loc) const {
- StringRef Filename(getPresumedLoc(Loc).getFilename());
+ PresumedLoc Presumed = getPresumedLoc(Loc);
+ if (Presumed.isInvalid())
+ return false;
+ StringRef Filename(Presumed.getFilename());
return Filename.equals("<built-in>");
}
/// Returns whether \p Loc is located in a <command line> file.
bool isWrittenInCommandLineFile(SourceLocation Loc) const {
- StringRef Filename(getPresumedLoc(Loc).getFilename());
+ PresumedLoc Presumed = getPresumedLoc(Loc);
+ if (Presumed.isInvalid())
+ return false;
+ StringRef Filename(Presumed.getFilename());
return Filename.equals("<command line>");
}
/// Returns whether \p Loc is located in a <scratch space> file.
bool isWrittenInScratchSpace(SourceLocation Loc) const {
- StringRef Filename(getPresumedLoc(Loc).getFilename());
+ PresumedLoc Presumed = getPresumedLoc(Loc);
+ if (Presumed.isInvalid())
+ return false;
+ StringRef Filename(Presumed.getFilename());
return Filename.equals("<scratch space>");
}
/// Returns if a SourceLocation is in a system header.
bool isInSystemHeader(SourceLocation Loc) const {
+ if (Loc.isInvalid())
+ return false;
return isSystem(getFileCharacteristic(Loc));
}
@@ -1629,6 +1672,11 @@ public:
isInTheSameTranslationUnit(std::pair<FileID, unsigned> &LOffs,
std::pair<FileID, unsigned> &ROffs) const;
+ /// Determines whether the two decomposed source location is in the same TU.
+ bool isInTheSameTranslationUnitImpl(
+ const std::pair<FileID, unsigned> &LOffs,
+ const std::pair<FileID, unsigned> &ROffs) const;
+
/// Determines the order of 2 source locations in the "source location
/// address space".
bool isBeforeInSLocAddrSpace(SourceLocation LHS, SourceLocation RHS) const {
@@ -1660,12 +1708,12 @@ public:
// Iterators over FileInfos.
using fileinfo_iterator =
- llvm::DenseMap<const FileEntry*, SrcMgr::ContentCache*>::const_iterator;
+ llvm::DenseMap<FileEntryRef, SrcMgr::ContentCache *>::const_iterator;
fileinfo_iterator fileinfo_begin() const { return FileInfos.begin(); }
fileinfo_iterator fileinfo_end() const { return FileInfos.end(); }
bool hasFileInfo(const FileEntry *File) const {
- return FileInfos.find(File) != FileInfos.end();
+ return FileInfos.find_as(File) != FileInfos.end();
}
/// Print statistics to stderr.
@@ -1673,6 +1721,10 @@ public:
void dump() const;
+ // Produce notes describing the current source location address space usage.
+ void noteSLocAddressSpaceUsage(DiagnosticsEngine &Diag,
+ std::optional<unsigned> MaxNotes = 32) const;
+
/// Get the number of local SLocEntries we have.
unsigned local_sloc_entry_size() const { return LocalSLocEntryTable.size(); }
@@ -1723,12 +1775,12 @@ public:
/// Returns true if \p Loc came from a PCH/Module.
bool isLoadedSourceLocation(SourceLocation Loc) const {
- return Loc.getOffset() >= CurrentLoadedOffset;
+ return isLoadedOffset(Loc.getOffset());
}
/// Returns true if \p Loc did not come from a PCH/Module.
bool isLocalSourceLocation(SourceLocation Loc) const {
- return Loc.getOffset() < NextLocalOffset;
+ return isLocalOffset(Loc.getOffset());
}
/// Returns true if \p FID came from a PCH/Module.
@@ -1798,11 +1850,27 @@ private:
return getLoadedSLocEntry(static_cast<unsigned>(-ID - 2), Invalid);
}
+ FileID getFileID(SourceLocation::UIntTy SLocOffset) const {
+ // If our one-entry cache covers this offset, just return it.
+ if (isOffsetInFileID(LastFileIDLookup, SLocOffset))
+ return LastFileIDLookup;
+
+ return getFileIDSlow(SLocOffset);
+ }
+
+ bool isLocalOffset(SourceLocation::UIntTy SLocOffset) const {
+ return SLocOffset < CurrentLoadedOffset;
+ }
+
+ bool isLoadedOffset(SourceLocation::UIntTy SLocOffset) const {
+ return SLocOffset >= CurrentLoadedOffset;
+ }
+
/// Implements the common elements of storing an expansion info struct into
/// the SLocEntry table and producing a source location that refers to it.
SourceLocation
createExpansionLocImpl(const SrcMgr::ExpansionInfo &Expansion,
- unsigned TokLength, int LoadedID = 0,
+ unsigned Length, int LoadedID = 0,
SourceLocation::UIntTy LoadedOffset = 0);
/// Return true if the specified FileID contains the
@@ -1902,11 +1970,11 @@ public:
}
};
-/// SourceManager and necessary depdencies (e.g. VFS, FileManager) for a single
-/// in-memorty file.
+/// SourceManager and necessary dependencies (e.g. VFS, FileManager) for a
+/// single in-memorty file.
class SourceManagerForFile {
public:
- /// Creates SourceManager and necessary depdencies (e.g. VFS, FileManager).
+ /// Creates SourceManager and necessary dependencies (e.g. VFS, FileManager).
/// The main file in the SourceManager will be \p FileName with \p Content.
SourceManagerForFile(StringRef FileName, StringRef Content);
diff --git a/contrib/llvm-project/clang/include/clang/Basic/SourceMgrAdapter.h b/contrib/llvm-project/clang/include/clang/Basic/SourceMgrAdapter.h
new file mode 100644
index 000000000000..be7f9d5051fb
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Basic/SourceMgrAdapter.h
@@ -0,0 +1,85 @@
+//=== SourceMgrAdapter.h - SourceMgr to SourceManager Adapter ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides an adapter that maps diagnostics from llvm::SourceMgr
+// to Clang's SourceManager.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SOURCEMGRADAPTER_H
+#define LLVM_CLANG_SOURCEMGRADAPTER_H
+
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/Support/SourceMgr.h"
+#include <string>
+#include <utility>
+
+namespace clang {
+
+class DiagnosticsEngine;
+class FileEntry;
+
+/// An adapter that can be used to translate diagnostics from one or more
+/// llvm::SourceMgr instances to a ,
+class SourceMgrAdapter {
+ /// Clang source manager.
+ SourceManager &SrcMgr;
+
+ /// Clang diagnostics engine.
+ DiagnosticsEngine &Diagnostics;
+
+ /// Diagnostic IDs for errors, warnings, and notes.
+ unsigned ErrorDiagID, WarningDiagID, NoteDiagID;
+
+ /// The default file to use when mapping buffers.
+ OptionalFileEntryRef DefaultFile;
+
+ /// A mapping from (LLVM source manager, buffer ID) pairs to the
+ /// corresponding file ID within the Clang source manager.
+ llvm::DenseMap<std::pair<const llvm::SourceMgr *, unsigned>, FileID>
+ FileIDMapping;
+
+ /// Diagnostic handler.
+ static void handleDiag(const llvm::SMDiagnostic &Diag, void *Context);
+
+public:
+ /// Create a new \c SourceMgr adaptor that maps to the given source
+ /// manager and diagnostics engine.
+ SourceMgrAdapter(SourceManager &SM, DiagnosticsEngine &Diagnostics,
+ unsigned ErrorDiagID, unsigned WarningDiagID,
+ unsigned NoteDiagID,
+ OptionalFileEntryRef DefaultFile = std::nullopt);
+
+ ~SourceMgrAdapter();
+
+ /// Map a source location in the given LLVM source manager to its
+ /// corresponding location in the Clang source manager.
+ SourceLocation mapLocation(const llvm::SourceMgr &LLVMSrcMgr,
+ llvm::SMLoc Loc);
+
+ /// Map a source range in the given LLVM source manager to its corresponding
+ /// range in the Clang source manager.
+ SourceRange mapRange(const llvm::SourceMgr &LLVMSrcMgr, llvm::SMRange Range);
+
+ /// Handle the given diagnostic from an LLVM source manager.
+ void handleDiag(const llvm::SMDiagnostic &Diag);
+
+ /// Retrieve the diagnostic handler to use with the underlying SourceMgr.
+ llvm::SourceMgr::DiagHandlerTy getDiagHandler() {
+ return &SourceMgrAdapter::handleDiag;
+ }
+
+ /// Retrieve the context to use with the diagnostic handler produced by
+ /// \c getDiagHandler().
+ void *getDiagContext() { return this; }
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm-project/clang/include/clang/Basic/Specifiers.h b/contrib/llvm-project/clang/include/clang/Basic/Specifiers.h
index 1c38b411e083..87f29c8ae10b 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/Specifiers.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/Specifiers.h
@@ -19,6 +19,9 @@
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/ErrorHandling.h"
+namespace llvm {
+class raw_ostream;
+} // namespace llvm
namespace clang {
/// Define the meaning of possible values of the kind in ExplicitSpecifier.
@@ -31,6 +34,15 @@ namespace clang {
/// Define the kind of constexpr specifier.
enum class ConstexprSpecKind { Unspecified, Constexpr, Consteval, Constinit };
+ /// In an if statement, this denotes whether the statement is
+ /// a constexpr or consteval if statement.
+ enum class IfStatementKind : unsigned {
+ Ordinary,
+ Constexpr,
+ ConstevalNonNegated,
+ ConstevalNegated
+ };
+
/// Specifies the width of a type, e.g., short, long, or long long.
enum class TypeSpecifierWidth { Unspecified, Short, Long, LongLong };
@@ -44,40 +56,44 @@ namespace clang {
TST_unspecified,
TST_void,
TST_char,
- TST_wchar, // C++ wchar_t
- TST_char8, // C++20 char8_t (proposed)
- TST_char16, // C++11 char16_t
- TST_char32, // C++11 char32_t
+ TST_wchar, // C++ wchar_t
+ TST_char8, // C++20 char8_t (proposed)
+ TST_char16, // C++11 char16_t
+ TST_char32, // C++11 char32_t
TST_int,
TST_int128,
- TST_extint, // Extended Int types.
- TST_half, // OpenCL half, ARM NEON __fp16
- TST_Float16, // C11 extension ISO/IEC TS 18661-3
- TST_Accum, // ISO/IEC JTC1 SC22 WG14 N1169 Extension
+ TST_bitint, // Bit-precise integer types.
+ TST_half, // OpenCL half, ARM NEON __fp16
+ TST_Float16, // C11 extension ISO/IEC TS 18661-3
+ TST_Accum, // ISO/IEC JTC1 SC22 WG14 N1169 Extension
TST_Fract,
TST_BFloat16,
TST_float,
TST_double,
TST_float128,
- TST_bool, // _Bool
- TST_decimal32, // _Decimal32
- TST_decimal64, // _Decimal64
- TST_decimal128, // _Decimal128
+ TST_ibm128,
+ TST_bool, // _Bool
+ TST_decimal32, // _Decimal32
+ TST_decimal64, // _Decimal64
+ TST_decimal128, // _Decimal128
TST_enum,
TST_union,
TST_struct,
- TST_class, // C++ class type
- TST_interface, // C++ (Microsoft-specific) __interface type
- TST_typename, // Typedef, C++ class-name or enum name, etc.
- TST_typeofType,
- TST_typeofExpr,
- TST_decltype, // C++11 decltype
- TST_underlyingType, // __underlying_type for C++11
- TST_auto, // C++11 auto
- TST_decltype_auto, // C++1y decltype(auto)
- TST_auto_type, // __auto_type extension
- TST_unknown_anytype, // __unknown_anytype extension
- TST_atomic, // C11 _Atomic
+ TST_class, // C++ class type
+ TST_interface, // C++ (Microsoft-specific) __interface type
+ TST_typename, // Typedef, C++ class-name or enum name, etc.
+ TST_typeofType, // C23 (and GNU extension) typeof(type-name)
+ TST_typeofExpr, // C23 (and GNU extension) typeof(expression)
+ TST_typeof_unqualType, // C23 typeof_unqual(type-name)
+ TST_typeof_unqualExpr, // C23 typeof_unqual(expression)
+ TST_decltype, // C++11 decltype
+#define TRANSFORM_TYPE_TRAIT_DEF(_, Trait) TST_##Trait,
+#include "clang/Basic/TransformTypeTraits.def"
+ TST_auto, // C++11 auto
+ TST_decltype_auto, // C++1y decltype(auto)
+ TST_auto_type, // __auto_type extension
+ TST_unknown_anytype, // __unknown_anytype extension
+ TST_atomic, // C11 _Atomic
#define GENERIC_IMAGE_TYPE(ImgType, Id) TST_##ImgType##_t, // OpenCL image types
#include "clang/Basic/OpenCLImageTypes.def"
TST_error // erroneous type
@@ -86,10 +102,14 @@ namespace clang {
/// Structure that packs information about the type specifiers that
/// were written in a particular type specifier sequence.
struct WrittenBuiltinSpecs {
- static_assert(TST_error < 1 << 6, "Type bitfield not wide enough for TST");
- /*DeclSpec::TST*/ unsigned Type : 6;
- /*DeclSpec::TSS*/ unsigned Sign : 2;
- /*TypeSpecifierWidth*/ unsigned Width : 2;
+ static_assert(TST_error < 1 << 7, "Type bitfield not wide enough for TST");
+ LLVM_PREFERRED_TYPE(TypeSpecifierType)
+ unsigned Type : 7;
+ LLVM_PREFERRED_TYPE(TypeSpecifierSign)
+ unsigned Sign : 2;
+ LLVM_PREFERRED_TYPE(TypeSpecifierWidth)
+ unsigned Width : 2;
+ LLVM_PREFERRED_TYPE(bool)
unsigned ModeAttr : 1;
};
@@ -270,6 +290,9 @@ namespace clang {
CC_PreserveMost, // __attribute__((preserve_most))
CC_PreserveAll, // __attribute__((preserve_all))
CC_AArch64VectorCall, // __attribute__((aarch64_vector_pcs))
+ CC_AArch64SVEPCS, // __attribute__((aarch64_sve_pcs))
+ CC_AMDGPUKernelCall, // __attribute__((amdgpu_kernel))
+ CC_M68kRTD, // __attribute__((m68k_rtd))
};
/// Checks whether the given calling convention supports variadic
@@ -286,6 +309,7 @@ namespace clang {
case CC_OpenCLKernel:
case CC_Swift:
case CC_SwiftAsync:
+ case CC_M68kRTD:
return false;
default:
return true;
@@ -314,10 +338,12 @@ namespace clang {
Unspecified,
// Generally behaves like Nullable, except when used in a block parameter
// that was imported into a swift async method. There, swift will assume
- // that the parameter can get null even if no error occured. _Nullable
+ // that the parameter can get null even if no error occurred. _Nullable
// parameters are assumed to only get null on error.
NullableResult,
};
+ /// Prints human-readable debug representation.
+ llvm::raw_ostream &operator<<(llvm::raw_ostream&, NullabilityKind);
/// Return true if \p L has a weaker nullability annotation than \p R. The
/// ordering is: Unspecified < Nullable < NonNull.
diff --git a/contrib/llvm-project/clang/include/clang/Basic/Stack.h b/contrib/llvm-project/clang/include/clang/Basic/Stack.h
index 3418c3bad11b..30ebd94aedd1 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/Stack.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/Stack.h
@@ -39,7 +39,7 @@ namespace clang {
/// is insufficient, calls Diag to emit a diagnostic before calling Fn.
inline void runWithSufficientStackSpace(llvm::function_ref<void()> Diag,
llvm::function_ref<void()> Fn) {
-#ifdef LLVM_ENABLE_THREADS
+#if LLVM_ENABLE_THREADS
if (LLVM_UNLIKELY(isStackNearlyExhausted()))
runWithSufficientStackSpaceSlow(Diag, Fn);
else
diff --git a/contrib/llvm-project/clang/include/clang/Basic/StmtNodes.td b/contrib/llvm-project/clang/include/clang/Basic/StmtNodes.td
index 508f1fddf1b3..cec301dfca28 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/StmtNodes.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/StmtNodes.td
@@ -50,7 +50,7 @@ def CXXCatchStmt : StmtNode<Stmt>;
def CXXTryStmt : StmtNode<Stmt>;
def CXXForRangeStmt : StmtNode<Stmt>;
-// C++ Coroutines TS statements
+// C++ Coroutines statements
def CoroutineBodyStmt : StmtNode<Stmt>;
def CoreturnStmt : StmtNode<Stmt>;
@@ -160,8 +160,9 @@ def FunctionParmPackExpr : StmtNode<Expr>;
def MaterializeTemporaryExpr : StmtNode<Expr>;
def LambdaExpr : StmtNode<Expr>;
def CXXFoldExpr : StmtNode<Expr>;
+def CXXParenListInitExpr: StmtNode<Expr>;
-// C++ Coroutines TS expressions
+// C++ Coroutines expressions
def CoroutineSuspendExpr : StmtNode<Expr, 1>;
def CoawaitExpr : StmtNode<CoroutineSuspendExpr>;
def DependentCoawaitExpr : StmtNode<Expr>;
@@ -219,12 +220,14 @@ def AsTypeExpr : StmtNode<Expr>;
// OpenMP Directives.
def OMPCanonicalLoop : StmtNode<Stmt>;
def OMPExecutableDirective : StmtNode<Stmt, 1>;
+def OMPMetaDirective : StmtNode<OMPExecutableDirective>;
def OMPLoopBasedDirective : StmtNode<OMPExecutableDirective, 1>;
def OMPLoopDirective : StmtNode<OMPLoopBasedDirective, 1>;
def OMPParallelDirective : StmtNode<OMPExecutableDirective>;
def OMPSimdDirective : StmtNode<OMPLoopDirective>;
-def OMPTileDirective : StmtNode<OMPLoopBasedDirective>;
-def OMPUnrollDirective : StmtNode<OMPLoopBasedDirective>;
+def OMPLoopTransformationDirective : StmtNode<OMPLoopBasedDirective, 1>;
+def OMPTileDirective : StmtNode<OMPLoopTransformationDirective>;
+def OMPUnrollDirective : StmtNode<OMPLoopTransformationDirective>;
def OMPForDirective : StmtNode<OMPLoopDirective>;
def OMPForSimdDirective : StmtNode<OMPLoopDirective>;
def OMPSectionsDirective : StmtNode<OMPExecutableDirective>;
@@ -256,12 +259,17 @@ def OMPTargetUpdateDirective : StmtNode<OMPExecutableDirective>;
def OMPTeamsDirective : StmtNode<OMPExecutableDirective>;
def OMPCancellationPointDirective : StmtNode<OMPExecutableDirective>;
def OMPCancelDirective : StmtNode<OMPExecutableDirective>;
+def OMPScopeDirective : StmtNode<OMPExecutableDirective>;
def OMPTaskLoopDirective : StmtNode<OMPLoopDirective>;
def OMPTaskLoopSimdDirective : StmtNode<OMPLoopDirective>;
def OMPMasterTaskLoopDirective : StmtNode<OMPLoopDirective>;
def OMPMasterTaskLoopSimdDirective : StmtNode<OMPLoopDirective>;
def OMPParallelMasterTaskLoopDirective : StmtNode<OMPLoopDirective>;
def OMPParallelMasterTaskLoopSimdDirective : StmtNode<OMPLoopDirective>;
+def OMPMaskedTaskLoopDirective : StmtNode<OMPLoopDirective>;
+def OMPMaskedTaskLoopSimdDirective : StmtNode<OMPLoopDirective>;
+def OMPParallelMaskedTaskLoopDirective : StmtNode<OMPLoopDirective>;
+def OMPParallelMaskedTaskLoopSimdDirective : StmtNode<OMPLoopDirective>;
def OMPDistributeDirective : StmtNode<OMPLoopDirective>;
def OMPDistributeParallelForDirective : StmtNode<OMPLoopDirective>;
def OMPDistributeParallelForSimdDirective : StmtNode<OMPLoopDirective>;
@@ -280,3 +288,10 @@ def OMPTargetTeamsDistributeSimdDirective : StmtNode<OMPLoopDirective>;
def OMPInteropDirective : StmtNode<OMPExecutableDirective>;
def OMPDispatchDirective : StmtNode<OMPExecutableDirective>;
def OMPMaskedDirective : StmtNode<OMPExecutableDirective>;
+def OMPParallelMaskedDirective : StmtNode<OMPExecutableDirective>;
+def OMPGenericLoopDirective : StmtNode<OMPLoopDirective>;
+def OMPTeamsGenericLoopDirective : StmtNode<OMPLoopDirective>;
+def OMPTargetTeamsGenericLoopDirective : StmtNode<OMPLoopDirective>;
+def OMPParallelGenericLoopDirective : StmtNode<OMPLoopDirective>;
+def OMPTargetParallelGenericLoopDirective : StmtNode<OMPLoopDirective>;
+def OMPErrorDirective : StmtNode<OMPExecutableDirective>;
diff --git a/contrib/llvm-project/clang/include/clang/Basic/SyncScope.h b/contrib/llvm-project/clang/include/clang/Basic/SyncScope.h
index ce8fb9cbed13..bc7ec7b5cf77 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/SyncScope.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/SyncScope.h
@@ -40,6 +40,16 @@ namespace clang {
/// Update getAsString.
///
enum class SyncScope {
+ SystemScope,
+ DeviceScope,
+ WorkgroupScope,
+ WavefrontScope,
+ SingleScope,
+ HIPSingleThread,
+ HIPWavefront,
+ HIPWorkgroup,
+ HIPAgent,
+ HIPSystem,
OpenCLWorkGroup,
OpenCLDevice,
OpenCLAllSVMDevices,
@@ -49,6 +59,26 @@ enum class SyncScope {
inline llvm::StringRef getAsString(SyncScope S) {
switch (S) {
+ case SyncScope::SystemScope:
+ return "system_scope";
+ case SyncScope::DeviceScope:
+ return "device_scope";
+ case SyncScope::WorkgroupScope:
+ return "workgroup_scope";
+ case SyncScope::WavefrontScope:
+ return "wavefront_scope";
+ case SyncScope::SingleScope:
+ return "single_scope";
+ case SyncScope::HIPSingleThread:
+ return "hip_singlethread";
+ case SyncScope::HIPWavefront:
+ return "hip_wavefront";
+ case SyncScope::HIPWorkgroup:
+ return "hip_workgroup";
+ case SyncScope::HIPAgent:
+ return "hip_agent";
+ case SyncScope::HIPSystem:
+ return "hip_system";
case SyncScope::OpenCLWorkGroup:
return "opencl_workgroup";
case SyncScope::OpenCLDevice:
@@ -62,7 +92,7 @@ inline llvm::StringRef getAsString(SyncScope S) {
}
/// Defines the kind of atomic scope models.
-enum class AtomicScopeModelKind { None, OpenCL };
+enum class AtomicScopeModelKind { None, OpenCL, HIP, Generic };
/// Defines the interface for synch scope model.
class AtomicScopeModel {
@@ -130,7 +160,7 @@ public:
static const unsigned Scopes[] = {
static_cast<unsigned>(WorkGroup), static_cast<unsigned>(Device),
static_cast<unsigned>(AllSVMDevices), static_cast<unsigned>(SubGroup)};
- return llvm::makeArrayRef(Scopes);
+ return llvm::ArrayRef(Scopes);
}
unsigned getFallBackValue() const override {
@@ -138,6 +168,108 @@ public:
}
};
+/// Defines the synch scope model for HIP.
+class AtomicScopeHIPModel : public AtomicScopeModel {
+public:
+ /// The enum values match the pre-defined macros
+ /// __HIP_MEMORY_SCOPE_*, which are used to define memory_scope_*
+ /// enums in hip-c.h.
+ enum ID {
+ SingleThread = 1,
+ Wavefront = 2,
+ Workgroup = 3,
+ Agent = 4,
+ System = 5,
+ Last = System
+ };
+
+ AtomicScopeHIPModel() {}
+
+ SyncScope map(unsigned S) const override {
+ switch (static_cast<ID>(S)) {
+ case SingleThread:
+ return SyncScope::HIPSingleThread;
+ case Wavefront:
+ return SyncScope::HIPWavefront;
+ case Workgroup:
+ return SyncScope::HIPWorkgroup;
+ case Agent:
+ return SyncScope::HIPAgent;
+ case System:
+ return SyncScope::HIPSystem;
+ }
+ llvm_unreachable("Invalid language synch scope value");
+ }
+
+ bool isValid(unsigned S) const override {
+ return S >= static_cast<unsigned>(SingleThread) &&
+ S <= static_cast<unsigned>(Last);
+ }
+
+ ArrayRef<unsigned> getRuntimeValues() const override {
+ static_assert(Last == System, "Does not include all synch scopes");
+ static const unsigned Scopes[] = {
+ static_cast<unsigned>(SingleThread), static_cast<unsigned>(Wavefront),
+ static_cast<unsigned>(Workgroup), static_cast<unsigned>(Agent),
+ static_cast<unsigned>(System)};
+ return llvm::ArrayRef(Scopes);
+ }
+
+ unsigned getFallBackValue() const override {
+ return static_cast<unsigned>(System);
+ }
+};
+
+/// Defines the generic atomic scope model.
+class AtomicScopeGenericModel : public AtomicScopeModel {
+public:
+ /// The enum values match predefined built-in macros __ATOMIC_SCOPE_*.
+ enum ID {
+ System = 0,
+ Device = 1,
+ Workgroup = 2,
+ Wavefront = 3,
+ Single = 4,
+ Last = Single
+ };
+
+ AtomicScopeGenericModel() = default;
+
+ SyncScope map(unsigned S) const override {
+ switch (static_cast<ID>(S)) {
+ case Device:
+ return SyncScope::DeviceScope;
+ case System:
+ return SyncScope::SystemScope;
+ case Workgroup:
+ return SyncScope::WorkgroupScope;
+ case Wavefront:
+ return SyncScope::WavefrontScope;
+ case Single:
+ return SyncScope::SingleScope;
+ }
+ llvm_unreachable("Invalid language sync scope value");
+ }
+
+ bool isValid(unsigned S) const override {
+ return S >= static_cast<unsigned>(System) &&
+ S <= static_cast<unsigned>(Last);
+ }
+
+ ArrayRef<unsigned> getRuntimeValues() const override {
+ static_assert(Last == Single, "Does not include all sync scopes");
+ static const unsigned Scopes[] = {
+ static_cast<unsigned>(Device), static_cast<unsigned>(System),
+ static_cast<unsigned>(Workgroup), static_cast<unsigned>(Wavefront),
+ static_cast<unsigned>(Single)};
+ return llvm::ArrayRef(Scopes);
+ }
+
+ unsigned getFallBackValue() const override {
+ return static_cast<unsigned>(System);
+ }
+};
+
inline std::unique_ptr<AtomicScopeModel>
AtomicScopeModel::create(AtomicScopeModelKind K) {
switch (K) {
@@ -145,9 +277,13 @@ AtomicScopeModel::create(AtomicScopeModelKind K) {
return std::unique_ptr<AtomicScopeModel>{};
case AtomicScopeModelKind::OpenCL:
return std::make_unique<AtomicScopeOpenCLModel>();
+ case AtomicScopeModelKind::HIP:
+ return std::make_unique<AtomicScopeHIPModel>();
+ case AtomicScopeModelKind::Generic:
+ return std::make_unique<AtomicScopeGenericModel>();
}
llvm_unreachable("Invalid atomic scope model kind");
}
-}
+} // namespace clang
#endif
diff --git a/contrib/llvm-project/clang/include/clang/Basic/TargetBuiltins.h b/contrib/llvm-project/clang/include/clang/Basic/TargetBuiltins.h
index ed53b10f61ef..c31834fb52a9 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/TargetBuiltins.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/TargetBuiltins.h
@@ -27,6 +27,7 @@ namespace clang {
enum {
LastTIBuiltin = clang::Builtin::FirstTSBuiltin - 1,
#define BUILTIN(ID, TYPE, ATTRS) BI##ID,
+#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) BI##ID,
#include "clang/Basic/BuiltinsNEON.def"
FirstTSBuiltin
};
@@ -47,11 +48,22 @@ namespace clang {
enum {
LastNEONBuiltin = NEON::FirstTSBuiltin - 1,
#define BUILTIN(ID, TYPE, ATTRS) BI##ID,
+#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) BI##ID,
#include "clang/Basic/BuiltinsSVE.def"
FirstTSBuiltin,
};
}
+ namespace SME {
+ enum {
+ LastSVEBuiltin = SVE::FirstTSBuiltin - 1,
+#define BUILTIN(ID, TYPE, ATTRS) BI##ID,
+#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) BI##ID,
+#include "clang/Basic/BuiltinsSME.def"
+ FirstTSBuiltin,
+ };
+ }
+
/// AArch64 builtins
namespace AArch64 {
enum {
@@ -59,6 +71,8 @@ namespace clang {
LastNEONBuiltin = NEON::FirstTSBuiltin - 1,
FirstSVEBuiltin = NEON::FirstTSBuiltin,
LastSVEBuiltin = SVE::FirstTSBuiltin - 1,
+ FirstSMEBuiltin = SVE::FirstTSBuiltin,
+ LastSMEBuiltin = SME::FirstTSBuiltin - 1,
#define BUILTIN(ID, TYPE, ATTRS) BI##ID,
#include "clang/Basic/BuiltinsAArch64.def"
LastTSBuiltin
@@ -121,19 +135,45 @@ namespace clang {
/// VE builtins
namespace VE {
- enum { LastTIBuiltin = clang::Builtin::FirstTSBuiltin - 1, LastTSBuiltin };
+ enum {
+ LastTIBuiltin = clang::Builtin::FirstTSBuiltin - 1,
+#define BUILTIN(ID, TYPE, ATTRS) BI##ID,
+#include "clang/Basic/BuiltinsVE.def"
+ LastTSBuiltin
+ };
+ }
+
+ namespace RISCVVector {
+ enum {
+ LastTIBuiltin = clang::Builtin::FirstTSBuiltin - 1,
+#define BUILTIN(ID, TYPE, ATTRS) BI##ID,
+#include "clang/Basic/BuiltinsRISCVVector.def"
+ FirstTSBuiltin,
+ };
}
/// RISCV builtins
namespace RISCV {
enum {
LastTIBuiltin = clang::Builtin::FirstTSBuiltin - 1,
+ FirstRVVBuiltin = clang::Builtin::FirstTSBuiltin,
+ LastRVVBuiltin = RISCVVector::FirstTSBuiltin - 1,
#define BUILTIN(ID, TYPE, ATTRS) BI##ID,
#include "clang/Basic/BuiltinsRISCV.def"
LastTSBuiltin
};
} // namespace RISCV
+ /// LoongArch builtins
+ namespace LoongArch {
+ enum {
+ LastTIBuiltin = clang::Builtin::FirstTSBuiltin - 1,
+#define BUILTIN(ID, TYPE, ATTRS) BI##ID,
+#include "clang/Basic/BuiltinsLoongArch.def"
+ LastTSBuiltin
+ };
+ } // namespace LoongArch
+
/// Flags to identify the types for overloaded Neon builtins.
///
/// These must be kept in sync with the flags in utils/TableGen/NeonEmitter.h.
@@ -216,10 +256,10 @@ namespace clang {
};
SVETypeFlags(uint64_t F) : Flags(F) {
- EltTypeShift = llvm::countTrailingZeros(EltTypeMask);
- MemEltTypeShift = llvm::countTrailingZeros(MemEltTypeMask);
- MergeTypeShift = llvm::countTrailingZeros(MergeTypeMask);
- SplatOperandMaskShift = llvm::countTrailingZeros(SplatOperandMask);
+ EltTypeShift = llvm::countr_zero(EltTypeMask);
+ MemEltTypeShift = llvm::countr_zero(MemEltTypeMask);
+ MergeTypeShift = llvm::countr_zero(MergeTypeMask);
+ SplatOperandMaskShift = llvm::countr_zero(SplatOperandMask);
}
EltType getEltType() const {
@@ -251,7 +291,9 @@ namespace clang {
bool isZExtReturn() const { return Flags & IsZExtReturn; }
bool isByteIndexed() const { return Flags & IsByteIndexed; }
bool isOverloadNone() const { return Flags & IsOverloadNone; }
- bool isOverloadWhile() const { return Flags & IsOverloadWhile; }
+ bool isOverloadWhileOrMultiVecCvt() const {
+ return Flags & IsOverloadWhileOrMultiVecCvt;
+ }
bool isOverloadDefault() const { return !(Flags & OverloadKindMask); }
bool isOverloadWhileRW() const { return Flags & IsOverloadWhileRW; }
bool isOverloadCvt() const { return Flags & IsOverloadCvt; }
@@ -261,11 +303,15 @@ namespace clang {
bool isInsertOp1SVALL() const { return Flags & IsInsertOp1SVALL; }
bool isGatherPrefetch() const { return Flags & IsGatherPrefetch; }
bool isReverseUSDOT() const { return Flags & ReverseUSDOT; }
+ bool isReverseMergeAnyBinOp() const { return Flags & ReverseMergeAnyBinOp; }
+ bool isReverseMergeAnyAccOp() const { return Flags & ReverseMergeAnyAccOp; }
bool isUndef() const { return Flags & IsUndef; }
bool isTupleCreate() const { return Flags & IsTupleCreate; }
bool isTupleGet() const { return Flags & IsTupleGet; }
bool isTupleSet() const { return Flags & IsTupleSet; }
-
+ bool isReadZA() const { return Flags & IsReadZA; }
+ bool isWriteZA() const { return Flags & IsWriteZA; }
+ bool isReductionQV() const { return Flags & IsReductionQV; }
uint64_t getBits() const { return Flags; }
bool isFlagSet(uint64_t Flag) const { return Flags & Flag; }
};
diff --git a/contrib/llvm-project/clang/include/clang/Basic/TargetCXXABI.h b/contrib/llvm-project/clang/include/clang/Basic/TargetCXXABI.h
index e727f85edad7..c113a6a048ad 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/TargetCXXABI.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/TargetCXXABI.h
@@ -19,8 +19,8 @@
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/StringMap.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/TargetParser/Triple.h"
namespace clang {
@@ -60,9 +60,7 @@ public:
static const auto &getSpelling(Kind ABIKind) {
return getSpellingMap().find(ABIKind)->second;
}
- static bool isABI(StringRef Name) {
- return getABIMap().find(Name) != getABIMap().end();
- }
+ static bool isABI(StringRef Name) { return getABIMap().contains(Name); }
// Return true if this target should use the relative vtables C++ ABI by
// default.
diff --git a/contrib/llvm-project/clang/include/clang/Basic/TargetID.h b/contrib/llvm-project/clang/include/clang/Basic/TargetID.h
index 1a9785574d06..cef9cb5f0fb2 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/TargetID.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/TargetID.h
@@ -6,12 +6,13 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_BASIC_TARGET_ID_H
-#define LLVM_CLANG_BASIC_TARGET_ID_H
+#ifndef LLVM_CLANG_BASIC_TARGETID_H
+#define LLVM_CLANG_BASIC_TARGETID_H
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
-#include "llvm/ADT/Triple.h"
+#include "llvm/TargetParser/Triple.h"
+#include <optional>
#include <set>
namespace clang {
@@ -21,7 +22,7 @@ namespace clang {
/// postfixed by a plus or minus sign delimited by colons, e.g.
/// gfx908:xnack+:sramecc-. Each processor have a limited
/// number of predefined features when showing up in a target ID.
-const llvm::SmallVector<llvm::StringRef, 4>
+llvm::SmallVector<llvm::StringRef, 4>
getAllPossibleTargetIDFeatures(const llvm::Triple &T,
llvm::StringRef Processor);
@@ -31,15 +32,15 @@ llvm::StringRef getProcessorFromTargetID(const llvm::Triple &T,
llvm::StringRef OffloadArch);
/// Parse a target ID to get processor and feature map.
-/// Returns canonicalized processor name or None if the target ID is invalid.
-/// Returns target ID features in \p FeatureMap if it is not null pointer.
-/// This function assumes \p OffloadArch is a valid target ID.
+/// Returns canonicalized processor name or std::nullopt if the target ID is
+/// invalid. Returns target ID features in \p FeatureMap if it is not null
+/// pointer. This function assumes \p OffloadArch is a valid target ID.
/// If the target ID contains feature+, map it to true.
/// If the target ID contains feature-, map it to false.
/// If the target ID does not contain a feature (default), do not map it.
-llvm::Optional<llvm::StringRef>
-parseTargetID(const llvm::Triple &T, llvm::StringRef OffloadArch,
- llvm::StringMap<bool> *FeatureMap);
+std::optional<llvm::StringRef> parseTargetID(const llvm::Triple &T,
+ llvm::StringRef OffloadArch,
+ llvm::StringMap<bool> *FeatureMap);
/// Returns canonical target ID, assuming \p Processor is canonical and all
/// entries in \p Features are valid.
@@ -48,9 +49,13 @@ std::string getCanonicalTargetID(llvm::StringRef Processor,
/// Get the conflicted pair of target IDs for a compilation or a bundled code
/// object, assuming \p TargetIDs are canonicalized. If there is no conflicts,
-/// returns None.
-llvm::Optional<std::pair<llvm::StringRef, llvm::StringRef>>
+/// returns std::nullopt.
+std::optional<std::pair<llvm::StringRef, llvm::StringRef>>
getConflictTargetIDCombination(const std::set<llvm::StringRef> &TargetIDs);
+
+/// Check whether the provided target ID is compatible with the requested
+/// target ID.
+bool isCompatibleTargetID(llvm::StringRef Provided, llvm::StringRef Requested);
} // namespace clang
#endif
diff --git a/contrib/llvm-project/clang/include/clang/Basic/TargetInfo.h b/contrib/llvm-project/clang/include/clang/Basic/TargetInfo.h
index 4f0cbf986b31..3eb23ebdacf0 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/TargetInfo.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/TargetInfo.h
@@ -15,6 +15,7 @@
#define LLVM_CLANG_BASIC_TARGETINFO_H
#include "clang/Basic/AddressSpaces.h"
+#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/CodeGenOptions.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/LangOptions.h"
@@ -25,16 +26,18 @@
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/IntrusiveRefCntPtr.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/Triple.h"
+#include "llvm/ADT/StringSet.h"
#include "llvm/Frontend/OpenMP/OMPGridValues.h"
+#include "llvm/IR/DerivedTypes.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/VersionTuple.h"
+#include "llvm/TargetParser/Triple.h"
#include <cassert>
+#include <optional>
#include <string>
#include <vector>
@@ -47,12 +50,34 @@ class DiagnosticsEngine;
class LangOptions;
class CodeGenOptions;
class MacroBuilder;
-class QualType;
-class SourceLocation;
-class SourceManager;
+
+/// Contains information gathered from parsing the contents of TargetAttr.
+struct ParsedTargetAttr {
+ std::vector<std::string> Features;
+ StringRef CPU;
+ StringRef Tune;
+ StringRef BranchProtection;
+ StringRef Duplicate;
+ bool operator ==(const ParsedTargetAttr &Other) const {
+ return Duplicate == Other.Duplicate && CPU == Other.CPU &&
+ Tune == Other.Tune && BranchProtection == Other.BranchProtection &&
+ Features == Other.Features;
+ }
+};
namespace Builtin { struct Info; }
+enum class FloatModeKind {
+ NoFloat = 0,
+ Half = 1 << 0,
+ Float = 1 << 1,
+ Double = 1 << 2,
+ LongDouble = 1 << 3,
+ Float128 = 1 << 4,
+ Ibm128 = 1 << 5,
+ LLVM_MARK_AS_BITMASK_ENUM(Ibm128)
+};
+
/// Fields controlling how types are laid out in memory; these may need to
/// be copied for targets like AMDGPU that base their ABIs on an auxiliary
/// CPU target.
@@ -64,10 +89,11 @@ struct TransferrableTargetInfo {
unsigned char BFloat16Width, BFloat16Align;
unsigned char FloatWidth, FloatAlign;
unsigned char DoubleWidth, DoubleAlign;
- unsigned char LongDoubleWidth, LongDoubleAlign, Float128Align;
+ unsigned char LongDoubleWidth, LongDoubleAlign, Float128Align, Ibm128Align;
unsigned char LargeArrayMinWidth, LargeArrayAlign;
unsigned char LongWidth, LongAlign;
unsigned char LongLongWidth, LongLongAlign;
+ unsigned char Int128Align;
// Fixed point bit widths
unsigned char ShortAccumWidth, ShortAccumAlign;
@@ -95,16 +121,16 @@ struct TransferrableTargetInfo {
unsigned char AccumScale;
unsigned char LongAccumScale;
- unsigned char SuitableAlign;
unsigned char DefaultAlignForAttributeAligned;
unsigned char MinGlobalAlign;
+ unsigned short SuitableAlign;
unsigned short NewAlign;
unsigned MaxVectorAlign;
unsigned MaxTLSAlign;
const llvm::fltSemantics *HalfFormat, *BFloat16Format, *FloatFormat,
- *DoubleFormat, *LongDoubleFormat, *Float128Format;
+ *DoubleFormat, *LongDoubleFormat, *Float128Format, *Ibm128Format;
///===---- Target Data Type Query Methods -------------------------------===//
enum IntType {
@@ -121,13 +147,6 @@ struct TransferrableTargetInfo {
UnsignedLongLong
};
- enum RealType {
- NoFloat = 255,
- Float = 0,
- Double,
- LongDouble,
- Float128
- };
protected:
IntType SizeType, IntMaxType, PtrDiffType, IntPtrType, WCharType, WIntType,
Char16Type, Char32Type, Int64Type, Int16Type, SigAtomicType,
@@ -137,6 +156,7 @@ protected:
///
/// Otherwise, when this flag is not set, the normal built-in boolean type is
/// used.
+ LLVM_PREFERRED_TYPE(bool)
unsigned UseSignedCharForObjCBool : 1;
/// Control whether the alignment of bit-field types is respected when laying
@@ -144,6 +164,7 @@ protected:
/// used to (a) impact the alignment of the containing structure, and (b)
/// ensure that the individual bit-field will not straddle an alignment
/// boundary.
+ LLVM_PREFERRED_TYPE(bool)
unsigned UseBitFieldTypeAlignment : 1;
/// Whether zero length bitfields (e.g., int : 0;) force alignment of
@@ -152,13 +173,16 @@ protected:
/// If the alignment of the zero length bitfield is greater than the member
/// that follows it, `bar', `bar' will be aligned as the type of the
/// zero-length bitfield.
+ LLVM_PREFERRED_TYPE(bool)
unsigned UseZeroLengthBitfieldAlignment : 1;
/// Whether zero length bitfield alignment is respected if they are the
/// leading members.
+ LLVM_PREFERRED_TYPE(bool)
unsigned UseLeadingZeroLengthBitfield : 1;
/// Whether explicit bit field alignment attributes are honored.
+ LLVM_PREFERRED_TYPE(bool)
unsigned UseExplicitBitFieldAlignment : 1;
/// If non-zero, specifies a fixed alignment value for bitfields that follow
@@ -184,7 +208,7 @@ enum OpenCLTypeKind : uint8_t {
/// Exposes information about the current target.
///
-class TargetInfo : public virtual TransferrableTargetInfo,
+class TargetInfo : public TransferrableTargetInfo,
public RefCountedBase<TargetInfo> {
std::shared_ptr<TargetOptions> TargetOpts;
llvm::Triple Triple;
@@ -197,44 +221,59 @@ protected:
bool NoAsmVariants; // True if {|} are normal characters.
bool HasLegalHalfType; // True if the backend supports operations on the half
// LLVM IR type.
+ bool HalfArgsAndReturns;
bool HasFloat128;
bool HasFloat16;
bool HasBFloat16;
+ bool HasFullBFloat16; // True if the backend supports native bfloat16
+ // arithmetic. Used to determine excess precision
+ // support in the frontend.
+ bool HasIbm128;
+ bool HasLongDouble;
+ bool HasFPReturn;
bool HasStrictFP;
unsigned char MaxAtomicPromoteWidth, MaxAtomicInlineWidth;
- unsigned short SimdDefaultAlign;
std::string DataLayoutString;
const char *UserLabelPrefix;
const char *MCountName;
unsigned char RegParmMax, SSERegParmMax;
TargetCXXABI TheCXXABI;
const LangASMap *AddrSpaceMap;
- const unsigned *GridValues =
- nullptr; // Array of target-specific GPU grid values that must be
- // consistent between host RTL (plugin), device RTL, and clang.
mutable StringRef PlatformName;
mutable VersionTuple PlatformMinVersion;
+ LLVM_PREFERRED_TYPE(bool)
unsigned HasAlignMac68kSupport : 1;
- unsigned RealTypeUsesObjCFPRet : 3;
+ LLVM_PREFERRED_TYPE(FloatModeKind)
+ unsigned RealTypeUsesObjCFPRetMask : llvm::BitWidth<FloatModeKind>;
+ LLVM_PREFERRED_TYPE(bool)
unsigned ComplexLongDoubleUsesFP2Ret : 1;
+ LLVM_PREFERRED_TYPE(bool)
unsigned HasBuiltinMSVaList : 1;
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsRenderScriptTarget : 1;
+ LLVM_PREFERRED_TYPE(bool)
unsigned HasAArch64SVETypes : 1;
+ LLVM_PREFERRED_TYPE(bool)
unsigned HasRISCVVTypes : 1;
+ LLVM_PREFERRED_TYPE(bool)
unsigned AllowAMDGPUUnsafeFPAtomics : 1;
unsigned ARMCDECoprocMask : 8;
unsigned MaxOpenCLWorkGroupSize;
+ std::optional<unsigned> MaxBitIntWidth;
+
+ std::optional<llvm::Triple> DarwinTargetVariantTriple;
+
// TargetInfo Constructor. Default initializes all fields.
TargetInfo(const llvm::Triple &T);
@@ -242,6 +281,12 @@ protected:
// as a DataLayout object.
void resetDataLayout(StringRef DL, const char *UserLabelPrefix = "");
+ // Target features that are read-only and should not be disabled/enabled
+ // by command line options. Such features are for emitting predefined
+ // macros or checking availability of builtin functions and can be omitted
+ // in function attributes in IR.
+ llvm::StringSet<> ReadOnlyFeatures;
+
public:
/// Construct a target for the given options.
///
@@ -333,10 +378,11 @@ public:
IntType getUIntMaxType() const {
return getCorrespondingUnsignedType(IntMaxType);
}
- IntType getPtrDiffType(unsigned AddrSpace) const {
- return AddrSpace == 0 ? PtrDiffType : getPtrDiffTypeV(AddrSpace);
+ IntType getPtrDiffType(LangAS AddrSpace) const {
+ return AddrSpace == LangAS::Default ? PtrDiffType
+ : getPtrDiffTypeV(AddrSpace);
}
- IntType getUnsignedPtrDiffType(unsigned AddrSpace) const {
+ IntType getUnsignedPtrDiffType(LangAS AddrSpace) const {
return getCorrespondingUnsignedType(getPtrDiffType(AddrSpace));
}
IntType getIntPtrType() const { return IntPtrType; }
@@ -401,7 +447,8 @@ public:
/// is represented as one of those two). At this time, there is no support
/// for an explicit "PPC double-double" type (i.e. __ibm128) so we only
/// need to differentiate between "long double" and IEEE quad precision.
- RealType getRealTypeByWidth(unsigned BitWidth, bool ExplicitIEEE) const;
+ FloatModeKind getRealTypeByWidth(unsigned BitWidth,
+ FloatModeKind ExplicitType) const;
/// Return the alignment (in bits) of the specified integer type enum.
///
@@ -413,11 +460,13 @@ public:
/// Return the width of pointers on this target, for the
/// specified address space.
- uint64_t getPointerWidth(unsigned AddrSpace) const {
- return AddrSpace == 0 ? PointerWidth : getPointerWidthV(AddrSpace);
+ uint64_t getPointerWidth(LangAS AddrSpace) const {
+ return AddrSpace == LangAS::Default ? PointerWidth
+ : getPointerWidthV(AddrSpace);
}
- uint64_t getPointerAlign(unsigned AddrSpace) const {
- return AddrSpace == 0 ? PointerAlign : getPointerAlignV(AddrSpace);
+ uint64_t getPointerAlign(LangAS AddrSpace) const {
+ return AddrSpace == LangAS::Default ? PointerAlign
+ : getPointerAlignV(AddrSpace);
}
/// Return the maximum width of pointers on this target.
@@ -461,6 +510,9 @@ public:
unsigned getLongLongWidth() const { return LongLongWidth; }
unsigned getLongLongAlign() const { return LongLongAlign; }
+ /// getInt128Align() - Returns the alignment of Int128.
+ unsigned getInt128Align() const { return Int128Align; }
+
/// getShortAccumWidth/Align - Return the size of 'signed short _Accum' and
/// 'unsigned short _Accum' for this target, in bits.
unsigned getShortAccumWidth() const { return ShortAccumWidth; }
@@ -576,18 +628,42 @@ public:
/// Determine whether the __int128 type is supported on this target.
virtual bool hasInt128Type() const {
- return (getPointerWidth(0) >= 64) || getTargetOpts().ForceEnableInt128;
+ return (getPointerWidth(LangAS::Default) >= 64) ||
+ getTargetOpts().ForceEnableInt128;
} // FIXME
- /// Determine whether the _ExtInt type is supported on this target. This
+ /// Determine whether the _BitInt type is supported on this target. This
/// limitation is put into place for ABI reasons.
- virtual bool hasExtIntType() const {
+ /// FIXME: _BitInt is a required type in C23, so there's not much utility in
+ /// asking whether the target supported it or not; I think this should be
+ /// removed once backends have been alerted to the type and have had the
+ /// chance to do implementation work if needed.
+ virtual bool hasBitIntType() const {
return false;
}
+ // Different targets may support a different maximum width for the _BitInt
+ // type, depending on what operations are supported.
+ virtual size_t getMaxBitIntWidth() const {
+ // Consider -fexperimental-max-bitint-width= first.
+ if (MaxBitIntWidth)
+ return std::min<size_t>(*MaxBitIntWidth, llvm::IntegerType::MAX_INT_BITS);
+
+ // FIXME: this value should be llvm::IntegerType::MAX_INT_BITS, which is
+ // maximum bit width that LLVM claims its IR can support. However, most
+ // backends currently have a bug where they only support float to int
+ // conversion (and vice versa) on types that are <= 128 bits and crash
+ // otherwise. We're setting the max supported value to 128 to be
+ // conservative.
+ return 128;
+ }
+
/// Determine whether _Float16 is supported on this target.
virtual bool hasLegalHalfType() const { return HasLegalHalfType; }
+ /// Whether half args and returns are supported.
+ virtual bool allowHalfArgsAndReturns() const { return HalfArgsAndReturns; }
+
/// Determine whether the __float128 type is supported on this target.
virtual bool hasFloat128Type() const { return HasFloat128; }
@@ -595,7 +671,23 @@ public:
virtual bool hasFloat16Type() const { return HasFloat16; }
/// Determine whether the _BFloat16 type is supported on this target.
- virtual bool hasBFloat16Type() const { return HasBFloat16; }
+ virtual bool hasBFloat16Type() const {
+ return HasBFloat16 || HasFullBFloat16;
+ }
+
+ /// Determine whether the BFloat type is fully supported on this target, i.e
+ /// arithemtic operations.
+ virtual bool hasFullBFloat16Type() const { return HasFullBFloat16; }
+
+ /// Determine whether the __ibm128 type is supported on this target.
+ virtual bool hasIbm128Type() const { return HasIbm128; }
+
+ /// Determine whether the long double type is supported on this target.
+ virtual bool hasLongDoubleType() const { return HasLongDouble; }
+
+ /// Determine whether return of a floating point value is supported
+ /// on this target.
+ virtual bool hasFPReturn() const { return HasFPReturn; }
/// Determine whether constrained floating point is supported on this target.
virtual bool hasStrictFP() const { return HasStrictFP; }
@@ -618,8 +710,8 @@ public:
}
/// Return the largest alignment for which a suitably-sized allocation with
- /// '::operator new(size_t)' or 'malloc' is guaranteed to produce a
- /// correctly-aligned pointer.
+ /// '::operator new(size_t)' is guaranteed to produce a correctly-aligned
+ /// pointer.
unsigned getNewAlign() const {
return NewAlign ? NewAlign : std::max(LongDoubleAlign, LongLongAlign);
}
@@ -675,19 +767,32 @@ public:
return *Float128Format;
}
+ /// getIbm128Width/Align/Format - Return the size/align/format of
+ /// '__ibm128'.
+ unsigned getIbm128Width() const { return 128; }
+ unsigned getIbm128Align() const { return Ibm128Align; }
+ const llvm::fltSemantics &getIbm128Format() const { return *Ibm128Format; }
+
/// Return the mangled code of long double.
virtual const char *getLongDoubleMangling() const { return "e"; }
/// Return the mangled code of __float128.
virtual const char *getFloat128Mangling() const { return "g"; }
- /// Return the mangled code of bfloat.
- virtual const char *getBFloat16Mangling() const {
- llvm_unreachable("bfloat not implemented on this target");
+ /// Return the mangled code of __ibm128.
+ virtual const char *getIbm128Mangling() const {
+ llvm_unreachable("ibm128 not implemented on this target");
}
+ /// Return the mangled code of bfloat.
+ virtual const char *getBFloat16Mangling() const { return "DF16b"; }
+
/// Return the value for the C99 FLT_EVAL_METHOD macro.
- virtual unsigned getFloatEvalMethod() const { return 0; }
+ virtual LangOptions::FPEvalMethodKind getFPEvalMethod() const {
+ return LangOptions::FPEvalMethodKind::FEM_Source;
+ }
+
+ virtual bool supportSourceEvalMethod() const { return true; }
// getLargeArrayMinWidth/Align - Return the minimum array size that is
// 'large' and its alignment.
@@ -715,10 +820,6 @@ public:
/// Return the maximum vector alignment supported for the given target.
unsigned getMaxVectorAlign() const { return MaxVectorAlign; }
- /// Return default simd alignment for the given target. Generally, this
- /// value is type-specific, but this alignment can be used for most of the
- /// types for the given target.
- unsigned getSimdDefaultAlign() const { return SimdDefaultAlign; }
unsigned getMaxOpenCLWorkGroupSize() const { return MaxOpenCLWorkGroupSize; }
@@ -743,7 +844,9 @@ public:
}
// Return the size of unwind_word for this target.
- virtual unsigned getUnwindWordWidth() const { return getPointerWidth(0); }
+ virtual unsigned getUnwindWordWidth() const {
+ return getPointerWidth(LangAS::Default);
+ }
/// Return the "preferred" register width on this target.
virtual unsigned getRegisterWidth() const {
@@ -833,8 +936,8 @@ public:
/// Check whether the given real type should use the "fpret" flavor of
/// Objective-C message passing on this target.
- bool useObjCFPRetForRealType(RealType T) const {
- return RealTypeUsesObjCFPRet & (1 << T);
+ bool useObjCFPRetForRealType(FloatModeKind T) const {
+ return (int)((FloatModeKind)RealTypeUsesObjCFPRetMask & T);
}
/// Check whether _Complex long double should use the "fp2ret" flavor
@@ -870,6 +973,11 @@ public:
/// across the current set of primary and secondary targets.
virtual ArrayRef<Builtin::Info> getTargetBuiltins() const = 0;
+ /// Returns target-specific min and max values VScale_Range.
+ virtual std::optional<std::pair<unsigned, unsigned>>
+ getVScaleRange(const LangOptions &LangOpts) const {
+ return std::nullopt;
+ }
/// The __builtin_clz* and __builtin_ctz* built-in
/// functions are specified to have undefined results for zero inputs, but
/// on targets that support these operations in a way that provides
@@ -993,8 +1101,7 @@ public:
}
bool isValidAsmImmediate(const llvm::APInt &Value) const {
if (!ImmSet.empty())
- return Value.isSignedIntN(32) &&
- ImmSet.count(Value.getZExtValue()) != 0;
+ return Value.isSignedIntN(32) && ImmSet.contains(Value.getZExtValue());
return !ImmRange.isConstrained ||
(Value.sge(ImmRange.Min) && Value.sle(ImmRange.Max));
}
@@ -1093,12 +1200,12 @@ public:
/// Replace some escaped characters with another string based on
/// target-specific rules
- virtual llvm::Optional<std::string> handleAsmEscapedChar(char C) const {
- return llvm::None;
+ virtual std::optional<std::string> handleAsmEscapedChar(char C) const {
+ return std::nullopt;
}
/// Returns a string of target-specific clobbers, in LLVM format.
- virtual const char *getClobbers() const = 0;
+ virtual std::string_view getClobbers() const = 0;
/// Returns true if NaN encoding is IEEE 754-2008.
/// Only MIPS allows a different encoding.
@@ -1112,7 +1219,9 @@ public:
}
/// Returns the target ID if supported.
- virtual llvm::Optional<std::string> getTargetID() const { return llvm::None; }
+ virtual std::optional<std::string> getTargetID() const {
+ return std::nullopt;
+ }
const char *getDataLayoutString() const {
assert(!DataLayoutString.empty() && "Uninitialized DataLayout!");
@@ -1145,12 +1254,12 @@ public:
/// Microsoft C++ code using dllimport/export attributes?
virtual bool shouldDLLImportComdatSymbols() const {
return getTriple().isWindowsMSVCEnvironment() ||
- getTriple().isWindowsItaniumEnvironment() || getTriple().isPS4CPU();
+ getTriple().isWindowsItaniumEnvironment() || getTriple().isPS();
}
// Does this target have PS4 specific dllimport/export handling?
virtual bool hasPS4DLLImportExport() const {
- return getTriple().isPS4CPU() ||
+ return getTriple().isPS() ||
// Windows Itanium support allows for testing the SCEI flavour of
// dllimport/export handling on a Windows system.
(getTriple().isWindowsItaniumEnvironment() &&
@@ -1164,10 +1273,6 @@ public:
/// the language based on the target options where applicable.
virtual void adjust(DiagnosticsEngine &Diags, LangOptions &Opts);
- /// Adjust target options based on codegen options.
- virtual void adjustTargetOptions(const CodeGenOptions &CGOpts,
- TargetOptions &TargetOpts) const {}
-
/// Initialize the map with the default set of target features for the
/// CPU this should include all legal feature strings on the target.
///
@@ -1199,18 +1304,20 @@ public:
fillValidCPUList(Values);
}
- /// brief Determine whether this TargetInfo supports the given CPU name.
+ /// Determine whether this TargetInfo supports the given CPU name.
virtual bool isValidCPUName(StringRef Name) const {
return true;
}
- /// brief Determine whether this TargetInfo supports the given CPU name for
- // tuning.
+ /// Determine whether this TargetInfo supports the given CPU name for
+ /// tuning.
virtual bool isValidTuneCPUName(StringRef Name) const {
return isValidCPUName(Name);
}
- /// brief Determine whether this TargetInfo supports tune in target attribute.
+ virtual ParsedTargetAttr parseTargetAttr(StringRef Str) const;
+
+ /// Determine whether this TargetInfo supports tune in target attribute.
virtual bool supportsTargetAttributeTune() const {
return false;
}
@@ -1248,17 +1355,36 @@ public:
return true;
}
+ /// Returns true if feature has an impact on target code
+ /// generation.
+ virtual bool doesFeatureAffectCodeGen(StringRef Feature) const {
+ return true;
+ }
+
+ /// For given feature return dependent ones.
+ virtual StringRef getFeatureDependencies(StringRef Feature) const {
+ return StringRef();
+ }
+
struct BranchProtectionInfo {
LangOptions::SignReturnAddressScopeKind SignReturnAddr =
LangOptions::SignReturnAddressScopeKind::None;
LangOptions::SignReturnAddressKeyKind SignKey =
LangOptions::SignReturnAddressKeyKind::AKey;
bool BranchTargetEnforcement = false;
+ bool BranchProtectionPAuthLR = false;
+ bool GuardedControlStack = false;
};
+ /// Determine if the Architecture in this TargetInfo supports branch
+ /// protection
+ virtual bool isBranchProtectionSupportedArch(StringRef Arch) const {
+ return false;
+ }
+
/// Determine if this TargetInfo supports the given branch protection
/// specification
- virtual bool validateBranchProtection(StringRef Spec,
+ virtual bool validateBranchProtection(StringRef Spec, StringRef Arch,
BranchProtectionInfo &BPI,
StringRef &Err) const {
Err = "";
@@ -1286,12 +1412,25 @@ public:
return false;
}
+ /// Determine whether the given target feature is read only.
+ bool isReadOnlyFeature(StringRef Feature) const {
+ return ReadOnlyFeatures.count(Feature);
+ }
+
/// Identify whether this target supports multiversioning of functions,
/// which requires support for cpu_supports and cpu_is functionality.
- bool supportsMultiVersioning() const { return getTriple().isX86(); }
+ bool supportsMultiVersioning() const {
+ return getTriple().isX86() || getTriple().isAArch64();
+ }
/// Identify whether this target supports IFuncs.
- bool supportsIFunc() const { return getTriple().isOSBinFormatELF(); }
+ bool supportsIFunc() const {
+ if (getTriple().isOSBinFormatMachO())
+ return true;
+ return getTriple().isOSBinFormatELF() &&
+ ((getTriple().isOSLinux() && !getTriple().isMusl()) ||
+ getTriple().isOSFreeBSD());
+ }
// Validate the contents of the __builtin_cpu_supports(const char*)
// argument.
@@ -1303,6 +1442,10 @@ public:
return 0;
}
+ // Return the target-specific cost for feature
+ // that taken into account in priority sorting.
+ virtual unsigned multiVersionFeatureCost() const { return 0; }
+
// Validate the contents of the __builtin_cpu_is(const char*)
// argument.
virtual bool validateCpuIs(StringRef Name) const { return false; }
@@ -1319,6 +1462,13 @@ public:
"cpu_specific Multiversioning not implemented on this target");
}
+ // Get the value for the 'tune-cpu' flag for a cpu_specific variant with the
+ // programmer-specified 'Name'.
+ virtual StringRef getCPUSpecificTuneName(StringRef Name) const {
+ llvm_unreachable(
+ "cpu_specific Multiversioning not implemented on this target");
+ }
+
// Get a list of the features that make up the CPU option for
// cpu_specific/cpu_dispatch so that it can be passed to llvm as optimization
// options.
@@ -1329,8 +1479,10 @@ public:
}
// Get the cache line size of a given cpu. This method switches over
- // the given cpu and returns "None" if the CPU is not found.
- virtual Optional<unsigned> getCPUCacheLineSize() const { return None; }
+ // the given cpu and returns "std::nullopt" if the CPU is not found.
+ virtual std::optional<unsigned> getCPUCacheLineSize() const {
+ return std::nullopt;
+ }
// Returns maximal number of args passed in registers.
unsigned getRegParmMax() const {
@@ -1383,6 +1535,11 @@ public:
}
const LangASMap &getAddressSpaceMap() const { return *AddrSpaceMap; }
+ unsigned getTargetAddressSpace(LangAS AS) const {
+ if (isTargetAddressSpace(AS))
+ return toTargetAddressSpace(AS);
+ return getAddressSpaceMap()[(unsigned)AS];
+ }
/// Map from the address space field in builtin description strings to the
/// language address space.
@@ -1399,15 +1556,15 @@ public:
/// Return an AST address space which can be used opportunistically
/// for constant global memory. It must be possible to convert pointers into
/// this address space to LangAS::Default. If no such address space exists,
- /// this may return None, and such optimizations will be disabled.
- virtual llvm::Optional<LangAS> getConstantAddressSpace() const {
+ /// this may return std::nullopt, and such optimizations will be disabled.
+ virtual std::optional<LangAS> getConstantAddressSpace() const {
return LangAS::Default;
}
- /// Return a target-specific GPU grid value based on the GVIDX enum \p gv
- unsigned getGridValue(llvm::omp::GVIDX gv) const {
- assert(GridValues != nullptr && "GridValues not initialized");
- return GridValues[gv];
+ // access target-specific GPU grid values that must be consistent between
+ // host RTL (plugin), deviceRTL and clang.
+ virtual const llvm::omp::GV &getGridValue() const {
+ llvm_unreachable("getGridValue not implemented on this target");
}
/// Retrieve the name of the platform as it is used in the
@@ -1464,6 +1621,14 @@ public:
virtual CallingConvKind getCallingConvKind(bool ClangABICompat4) const;
+ /// Controls whether explicitly defaulted (`= default`) special member
+ /// functions disqualify something from being POD-for-the-purposes-of-layout.
+ /// Historically, Clang didn't consider these acceptable for POD, but GCC
+ /// does. So in newer Clang ABIs they are acceptable for POD to be compatible
+ /// with GCC/Itanium ABI, and remains disqualifying for targets that need
+ /// Clang backwards compatibility rather than GCC/Itanium ABI compatibility.
+ virtual bool areDefaultedSMFStillPOD(const LangOptions&) const;
+
/// Controls if __builtin_longjmp / __builtin_setjmp can be lowered to
/// llvm.eh.sjlj.longjmp / llvm.eh.sjlj.setjmp.
virtual bool hasSjLjLowering() const {
@@ -1474,7 +1639,7 @@ public:
virtual bool
checkCFProtectionBranchSupported(DiagnosticsEngine &Diags) const;
- /// Check if the target supports CFProtection branch.
+ /// Check if the target supports CFProtection return.
virtual bool
checkCFProtectionReturnSupported(DiagnosticsEngine &Diags) const;
@@ -1531,10 +1696,11 @@ public:
/// space \p AddressSpace to be converted in order to be used, then return the
/// corresponding target specific DWARF address space.
///
- /// \returns Otherwise return None and no conversion will be emitted in the
- /// DWARF.
- virtual Optional<unsigned> getDWARFAddressSpace(unsigned AddressSpace) const {
- return None;
+ /// \returns Otherwise return std::nullopt and no conversion will be emitted
+ /// in the DWARF.
+ virtual std::optional<unsigned> getDWARFAddressSpace(unsigned AddressSpace)
+ const {
+ return std::nullopt;
}
/// \returns The version of the SDK which was used during the compilation if
@@ -1558,22 +1724,39 @@ public:
/// Whether target allows debuginfo types for decl only variables/functions.
virtual bool allowDebugInfoForExternalRef() const { return false; }
+ /// Returns the darwin target variant triple, the variant of the deployment
+ /// target for which the code is being compiled.
+ const llvm::Triple *getDarwinTargetVariantTriple() const {
+ return DarwinTargetVariantTriple ? &*DarwinTargetVariantTriple : nullptr;
+ }
+
+ /// Returns the version of the darwin target variant SDK which was used during
+ /// the compilation if one was specified, or an empty version otherwise.
+ const std::optional<VersionTuple> getDarwinTargetVariantSDKVersion() const {
+ return !getTargetOpts().DarwinTargetVariantSDKVersion.empty()
+ ? getTargetOpts().DarwinTargetVariantSDKVersion
+ : std::optional<VersionTuple>();
+ }
+
+ /// Whether to support HIP image/texture API's.
+ virtual bool hasHIPImageSupport() const { return true; }
+
protected:
/// Copy type and layout related info.
void copyAuxTarget(const TargetInfo *Aux);
- virtual uint64_t getPointerWidthV(unsigned AddrSpace) const {
+ virtual uint64_t getPointerWidthV(LangAS AddrSpace) const {
return PointerWidth;
}
- virtual uint64_t getPointerAlignV(unsigned AddrSpace) const {
+ virtual uint64_t getPointerAlignV(LangAS AddrSpace) const {
return PointerAlign;
}
- virtual enum IntType getPtrDiffTypeV(unsigned AddrSpace) const {
+ virtual enum IntType getPtrDiffTypeV(LangAS AddrSpace) const {
return PtrDiffType;
}
virtual ArrayRef<const char *> getGCCRegNames() const = 0;
virtual ArrayRef<GCCRegAlias> getGCCRegAliases() const = 0;
virtual ArrayRef<AddlRegName> getGCCAddlRegNames() const {
- return None;
+ return std::nullopt;
}
private:
diff --git a/contrib/llvm-project/clang/include/clang/Basic/TargetOSMacros.def b/contrib/llvm-project/clang/include/clang/Basic/TargetOSMacros.def
new file mode 100644
index 000000000000..dfc2e033f6fd
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Basic/TargetOSMacros.def
@@ -0,0 +1,55 @@
+//===--- TargetOSMacros.def - Target OS macros ------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file specifies the predefined TARGET_OS_* conditional macros.
+// A target macro `Name` should be defined if `Predicate` evaluates to true.
+// The macro expects `const llvm::Triple &Triple` and the class `llvm::Triple`
+// to be available for the predicate.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef TARGET_OS
+#define TARGET_OS(Name, Predicate)
+#endif
+
+// Windows targets.
+TARGET_OS(TARGET_OS_WIN32, Triple.isOSWindows())
+TARGET_OS(TARGET_OS_WINDOWS, Triple.isOSWindows())
+
+// Linux target.
+TARGET_OS(TARGET_OS_LINUX, Triple.isOSLinux())
+
+// Unix target.
+TARGET_OS(TARGET_OS_UNIX, Triple.isOSNetBSD() ||
+ Triple.isOSFreeBSD() ||
+ Triple.isOSOpenBSD() ||
+ Triple.isOSSolaris())
+
+// Apple (Mac) targets.
+TARGET_OS(TARGET_OS_MAC, Triple.isOSDarwin())
+TARGET_OS(TARGET_OS_OSX, Triple.isMacOSX())
+TARGET_OS(TARGET_OS_IPHONE, Triple.isiOS() || Triple.isTvOS() ||
+ Triple.isWatchOS())
+// Triple::isiOS() also includes tvOS
+TARGET_OS(TARGET_OS_IOS, Triple.getOS() == llvm::Triple::IOS)
+TARGET_OS(TARGET_OS_TV, Triple.isTvOS())
+TARGET_OS(TARGET_OS_WATCH, Triple.isWatchOS())
+TARGET_OS(TARGET_OS_DRIVERKIT, Triple.isDriverKit())
+TARGET_OS(TARGET_OS_MACCATALYST, Triple.isMacCatalystEnvironment())
+TARGET_OS(TARGET_OS_SIMULATOR, Triple.isSimulatorEnvironment())
+
+// Deprecated Apple target conditionals.
+TARGET_OS(TARGET_OS_EMBEDDED, (Triple.isiOS() || Triple.isTvOS() \
+ || Triple.isWatchOS()) \
+ && !Triple.isMacCatalystEnvironment() \
+ && !Triple.isSimulatorEnvironment())
+TARGET_OS(TARGET_OS_NANO, Triple.isWatchOS())
+TARGET_OS(TARGET_IPHONE_SIMULATOR, Triple.isSimulatorEnvironment())
+TARGET_OS(TARGET_OS_UIKITFORMAC, Triple.isMacCatalystEnvironment())
+
+#undef TARGET_OS
diff --git a/contrib/llvm-project/clang/include/clang/Basic/TargetOptions.h b/contrib/llvm-project/clang/include/clang/Basic/TargetOptions.h
index 81c15adb8248..2049f03b2889 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/TargetOptions.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/TargetOptions.h
@@ -45,7 +45,7 @@ public:
std::string ABI;
/// The EABI version to use
- llvm::EABI EABIVersion;
+ llvm::EABI EABIVersion = llvm::EABI::Default;
/// If given, the version string of the linker in use.
std::string LinkerVersion;
@@ -78,12 +78,33 @@ public:
/// \brief If enabled, allow AMDGPU unsafe floating point atomics.
bool AllowAMDGPUUnsafeFPAtomics = false;
+ /// \brief Code object version for AMDGPU.
+ llvm::CodeObjectVersionKind CodeObjectVersion =
+ llvm::CodeObjectVersionKind::COV_None;
+
+ /// \brief Enumeration values for AMDGPU printf lowering scheme
+ enum class AMDGPUPrintfKind {
+ /// printf lowering scheme involving hostcalls, currently used by HIP
+ /// programs by default
+ Hostcall = 0,
+
+ /// printf lowering scheme involving implicit printf buffers,
+ Buffered = 1,
+ };
+
+ /// \brief AMDGPU Printf lowering scheme
+ AMDGPUPrintfKind AMDGPUPrintfKindVal = AMDGPUPrintfKind::Hostcall;
+
// The code model to be used as specified by the user. Corresponds to
// CodeModel::Model enum defined in include/llvm/Support/CodeGen.h, plus
// "default" for the case when the user has not explicitly specified a
// code model.
std::string CodeModel;
+ // The large data threshold used for certain code models on certain
+ // architectures.
+ uint64_t LargeDataThreshold;
+
/// The version of the SDK which was used during the compilation.
/// The option is used for two different purposes:
/// * on darwin the version is propagated to LLVM where it's used
@@ -91,8 +112,21 @@ public:
/// * CUDA compilation uses it to control parts of CUDA compilation
/// in clang that depend on specific version of the CUDA SDK.
llvm::VersionTuple SDKVersion;
+
+ /// The name of the darwin target- ariant triple to compile for.
+ std::string DarwinTargetVariantTriple;
+
+ /// The version of the darwin target variant SDK which was used during the
+ /// compilation.
+ llvm::VersionTuple DarwinTargetVariantSDKVersion;
+
+ /// The validator version for dxil.
+ std::string DxilValidatorVersion;
+
+ /// The entry point name for HLSL shader being compiled as specified by -E.
+ std::string HLSLEntry;
};
-} // end namespace clang
+} // end namespace clang
#endif
diff --git a/contrib/llvm-project/clang/include/clang/Basic/Thunk.h b/contrib/llvm-project/clang/include/clang/Basic/Thunk.h
index 91088be6ae73..0247e279408f 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/Thunk.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/Thunk.h
@@ -26,7 +26,7 @@ class CXXMethodDecl;
struct ReturnAdjustment {
/// The non-virtual adjustment from the derived object to its
/// nearest virtual base.
- int64_t NonVirtual;
+ int64_t NonVirtual = 0;
/// Holds the ABI-specific information about the virtual return
/// adjustment, if needed.
@@ -64,7 +64,7 @@ struct ReturnAdjustment {
}
} Virtual;
- ReturnAdjustment() : NonVirtual(0) {}
+ ReturnAdjustment() = default;
bool isEmpty() const { return !NonVirtual && Virtual.isEmpty(); }
@@ -91,7 +91,7 @@ struct ReturnAdjustment {
struct ThisAdjustment {
/// The non-virtual adjustment from the derived object to its
/// nearest virtual base.
- int64_t NonVirtual;
+ int64_t NonVirtual = 0;
/// Holds the ABI-specific information about the virtual this
/// adjustment, if needed.
@@ -131,7 +131,7 @@ struct ThisAdjustment {
}
} Virtual;
- ThisAdjustment() : NonVirtual(0) {}
+ ThisAdjustment() = default;
bool isEmpty() const { return !NonVirtual && Virtual.isEmpty(); }
diff --git a/contrib/llvm-project/clang/include/clang/Basic/TokenKinds.def b/contrib/llvm-project/clang/include/clang/Basic/TokenKinds.def
index 48a664e3494e..c10e2adfbe6e 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/TokenKinds.def
+++ b/contrib/llvm-project/clang/include/clang/Basic/TokenKinds.def
@@ -9,8 +9,7 @@
// This file defines the TokenKind database. This includes normal tokens like
// tok::ampamp (corresponding to the && token) as well as keywords for various
// languages. Users of this file must optionally #define the TOK, KEYWORD,
-// CXX11_KEYWORD, CONCEPTS_KEYWORD, ALIAS, or PPKEYWORD macros to make use of
-// this file.
+// CXX11_KEYWORD, ALIAS, or PPKEYWORD macros to make use of this file.
//
//===----------------------------------------------------------------------===//
@@ -29,8 +28,11 @@
#ifndef CXX20_KEYWORD
#define CXX20_KEYWORD(X,Y) KEYWORD(X,KEYCXX20|(Y))
#endif
-#ifndef CONCEPTS_KEYWORD
-#define CONCEPTS_KEYWORD(X) CXX20_KEYWORD(X,KEYCONCEPTS)
+#ifndef C99_KEYWORD
+#define C99_KEYWORD(X,Y) KEYWORD(X,KEYC99|(Y))
+#endif
+#ifndef C23_KEYWORD
+#define C23_KEYWORD(X,Y) KEYWORD(X,KEYC23|(Y))
#endif
#ifndef COROUTINES_KEYWORD
#define COROUTINES_KEYWORD(X) CXX20_KEYWORD(X,KEYCOROUTINES)
@@ -83,6 +85,9 @@
#ifndef PRAGMA_ANNOTATION
#define PRAGMA_ANNOTATION(X) ANNOTATION(X)
#endif
+#ifndef INTERESTING_IDENTIFIER
+#define INTERESTING_IDENTIFIER(X)
+#endif
//===----------------------------------------------------------------------===//
// Preprocessor keywords.
@@ -254,13 +259,12 @@ PUNCTUATOR(caretcaret, "^^")
// always be treated as a keyword
// KEYC99 - This is a keyword introduced to C in C99
// KEYC11 - This is a keyword introduced to C in C11
+// KEYC23 - This is a keyword introduced to C in C23
// KEYCXX - This is a C++ keyword, or a C++-specific keyword in the
// implementation namespace
// KEYNOCXX - This is a keyword in every non-C++ dialect.
// KEYCXX11 - This is a C++ keyword introduced to C++ in C++11
// KEYCXX20 - This is a C++ keyword introduced to C++ in C++20
-// KEYCONCEPTS - This is a keyword if the C++ extensions for concepts
-// are enabled.
// KEYMODULES - This is a keyword if the C++ extensions for modules
// are enabled.
// KEYGNU - This is a keyword if GNU extensions are enabled
@@ -280,6 +284,8 @@ PUNCTUATOR(caretcaret, "^^")
// HALFSUPPORT - This is a keyword if 'half' is a built-in type
// WCHARSUPPORT - This is a keyword if 'wchar_t' is a built-in type
// CHAR8SUPPORT - This is a keyword if 'char8_t' is a built-in type
+// KEYFIXEDPOINT - This is a keyword according to the N1169 fixed point
+// extension.
//
KEYWORD(auto , KEYALL)
KEYWORD(break , KEYALL)
@@ -297,16 +303,16 @@ KEYWORD(float , KEYALL)
KEYWORD(for , KEYALL)
KEYWORD(goto , KEYALL)
KEYWORD(if , KEYALL)
-KEYWORD(inline , KEYC99|KEYCXX|KEYGNU)
KEYWORD(int , KEYALL)
KEYWORD(_ExtInt , KEYALL)
+KEYWORD(_BitInt , KEYALL)
KEYWORD(long , KEYALL)
KEYWORD(register , KEYALL)
-KEYWORD(restrict , KEYC99)
KEYWORD(return , KEYALL)
KEYWORD(short , KEYALL)
KEYWORD(signed , KEYALL)
UNARY_EXPR_OR_TYPE_TRAIT(sizeof, SizeOf, KEYALL)
+UNARY_EXPR_OR_TYPE_TRAIT(__datasizeof, DataSizeOf, KEYCXX)
KEYWORD(static , KEYALL)
KEYWORD(struct , KEYALL)
KEYWORD(switch , KEYALL)
@@ -333,7 +339,7 @@ KEYWORD(__objc_no , KEYALL)
// C++ 2.11p1: Keywords.
KEYWORD(asm , KEYCXX|KEYGNU)
-KEYWORD(bool , BOOLSUPPORT)
+KEYWORD(bool , BOOLSUPPORT|KEYC23)
KEYWORD(catch , KEYCXX)
KEYWORD(class , KEYCXX)
KEYWORD(const_cast , KEYCXX)
@@ -341,7 +347,7 @@ KEYWORD(delete , KEYCXX)
KEYWORD(dynamic_cast , KEYCXX)
KEYWORD(explicit , KEYCXX)
KEYWORD(export , KEYCXX)
-KEYWORD(false , BOOLSUPPORT)
+KEYWORD(false , BOOLSUPPORT|KEYC23)
KEYWORD(friend , KEYCXX)
KEYWORD(mutable , KEYCXX)
KEYWORD(namespace , KEYCXX)
@@ -355,7 +361,7 @@ KEYWORD(static_cast , KEYCXX)
KEYWORD(template , KEYCXX)
KEYWORD(this , KEYCXX)
KEYWORD(throw , KEYCXX)
-KEYWORD(true , BOOLSUPPORT)
+KEYWORD(true , BOOLSUPPORT|KEYC23)
KEYWORD(try , KEYCXX)
KEYWORD(typename , KEYCXX)
KEYWORD(typeid , KEYCXX)
@@ -376,45 +382,53 @@ CXX_KEYWORD_OPERATOR(or_eq , pipeequal)
CXX_KEYWORD_OPERATOR(xor , caret)
CXX_KEYWORD_OPERATOR(xor_eq , caretequal)
+// C99 Keywords.
+C99_KEYWORD(restrict , 0)
+C99_KEYWORD(inline , KEYCXX|KEYGNU)
+
+
// C++11 keywords
-CXX11_KEYWORD(alignas , 0)
+CXX11_KEYWORD(alignas , KEYC23)
// alignof and _Alignof return the required ABI alignment
-CXX11_UNARY_EXPR_OR_TYPE_TRAIT(alignof, AlignOf, 0)
+CXX11_UNARY_EXPR_OR_TYPE_TRAIT(alignof, AlignOf, KEYC23)
CXX11_KEYWORD(char16_t , KEYNOMS18)
CXX11_KEYWORD(char32_t , KEYNOMS18)
CXX11_KEYWORD(constexpr , 0)
CXX11_KEYWORD(decltype , 0)
CXX11_KEYWORD(noexcept , 0)
-CXX11_KEYWORD(nullptr , 0)
-CXX11_KEYWORD(static_assert , KEYMSCOMPAT)
-CXX11_KEYWORD(thread_local , 0)
+CXX11_KEYWORD(nullptr , KEYC23)
+CXX11_KEYWORD(static_assert , KEYMSCOMPAT|KEYC23)
+CXX11_KEYWORD(thread_local , KEYC23)
-// C++20 keywords
-CONCEPTS_KEYWORD(concept)
-CONCEPTS_KEYWORD(requires)
-
-// C++20 / coroutines TS keywords
+// C++20 / coroutines keywords
COROUTINES_KEYWORD(co_await)
COROUTINES_KEYWORD(co_return)
COROUTINES_KEYWORD(co_yield)
-// C++ modules TS keywords
+// C++20 keywords
MODULES_KEYWORD(module)
MODULES_KEYWORD(import)
// C++20 keywords.
CXX20_KEYWORD(consteval , 0)
CXX20_KEYWORD(constinit , 0)
+CXX20_KEYWORD(concept , 0)
+CXX20_KEYWORD(requires , 0)
+
// Not a CXX20_KEYWORD because it is disabled by -fno-char8_t.
KEYWORD(char8_t , CHAR8SUPPORT)
// C11 Extension
KEYWORD(_Float16 , KEYALL)
+// C23 keywords
+C23_KEYWORD(typeof , KEYGNU)
+C23_KEYWORD(typeof_unqual , 0)
+
// ISO/IEC JTC1 SC22 WG14 N1169 Extension
-KEYWORD(_Accum , KEYNOCXX)
-KEYWORD(_Fract , KEYNOCXX)
-KEYWORD(_Sat , KEYNOCXX)
+KEYWORD(_Accum , KEYFIXEDPOINT)
+KEYWORD(_Fract , KEYFIXEDPOINT)
+KEYWORD(_Sat , KEYFIXEDPOINT)
// GNU Extensions (in impl-reserved namespace)
KEYWORD(_Decimal32 , KEYALL)
@@ -428,9 +442,12 @@ KEYWORD(__attribute , KEYALL)
KEYWORD(__builtin_choose_expr , KEYALL)
KEYWORD(__builtin_offsetof , KEYALL)
KEYWORD(__builtin_FILE , KEYALL)
+KEYWORD(__builtin_FILE_NAME , KEYALL)
KEYWORD(__builtin_FUNCTION , KEYALL)
+KEYWORD(__builtin_FUNCSIG , KEYMS)
KEYWORD(__builtin_LINE , KEYALL)
KEYWORD(__builtin_COLUMN , KEYALL)
+KEYWORD(__builtin_source_location , KEYCXX)
// __builtin_types_compatible_p is a GNU C extension that we handle like a C++
// type trait.
@@ -438,6 +455,7 @@ TYPE_TRAIT_2(__builtin_types_compatible_p, TypeCompatible, KEYNOCXX)
KEYWORD(__builtin_va_arg , KEYALL)
KEYWORD(__extension__ , KEYALL)
KEYWORD(__float128 , KEYALL)
+KEYWORD(__ibm128 , KEYALL)
KEYWORD(__imag , KEYALL)
KEYWORD(__int128 , KEYALL)
KEYWORD(__label__ , KEYALL)
@@ -447,9 +465,6 @@ KEYWORD(__FUNCTION__ , KEYALL)
KEYWORD(__PRETTY_FUNCTION__ , KEYALL)
KEYWORD(__auto_type , KEYALL)
-// GNU Extensions (outside impl-reserved namespace)
-KEYWORD(typeof , KEYGNU)
-
// MS Extensions
KEYWORD(__FUNCDNAME__ , KEYMS)
KEYWORD(__FUNCSIG__ , KEYMS)
@@ -459,9 +474,9 @@ TYPE_TRAIT_1(__is_interface_class, IsInterfaceClass, KEYMS)
TYPE_TRAIT_1(__is_sealed, IsSealed, KEYMS)
// MSVC12.0 / VS2013 Type Traits
-TYPE_TRAIT_1(__is_destructible, IsDestructible, KEYMS)
+TYPE_TRAIT_1(__is_destructible, IsDestructible, KEYALL)
TYPE_TRAIT_1(__is_trivially_destructible, IsTriviallyDestructible, KEYCXX)
-TYPE_TRAIT_1(__is_nothrow_destructible, IsNothrowDestructible, KEYMS)
+TYPE_TRAIT_1(__is_nothrow_destructible, IsNothrowDestructible, KEYALL)
TYPE_TRAIT_2(__is_nothrow_assignable, IsNothrowAssignable, KEYCXX)
TYPE_TRAIT_N(__is_constructible, IsConstructible, KEYCXX)
TYPE_TRAIT_N(__is_nothrow_constructible, IsNothrowConstructible, KEYCXX)
@@ -505,10 +520,21 @@ TYPE_TRAIT_1(__is_trivially_copyable, IsTriviallyCopyable, KEYCXX)
TYPE_TRAIT_1(__is_union, IsUnion, KEYCXX)
TYPE_TRAIT_1(__has_unique_object_representations,
HasUniqueObjectRepresentations, KEYCXX)
-KEYWORD(__underlying_type , KEYCXX)
+
+#define TRANSFORM_TYPE_TRAIT_DEF(_, Trait) KEYWORD(__##Trait, KEYCXX)
+#include "clang/Basic/TransformTypeTraits.def"
// Clang-only C++ Type Traits
+TYPE_TRAIT_1(__is_trivially_relocatable, IsTriviallyRelocatable, KEYCXX)
+TYPE_TRAIT_1(__is_trivially_equality_comparable, IsTriviallyEqualityComparable, KEYCXX)
+TYPE_TRAIT_1(__is_bounded_array, IsBoundedArray, KEYCXX)
+TYPE_TRAIT_1(__is_unbounded_array, IsUnboundedArray, KEYCXX)
+TYPE_TRAIT_1(__is_nullptr, IsNullPointer, KEYCXX)
+TYPE_TRAIT_1(__is_scoped_enum, IsScopedEnum, KEYCXX)
+TYPE_TRAIT_1(__is_referenceable, IsReferenceable, KEYCXX)
+TYPE_TRAIT_1(__can_pass_in_regs, CanPassInRegs, KEYCXX)
TYPE_TRAIT_2(__reference_binds_to_temporary, ReferenceBindsToTemporary, KEYCXX)
+TYPE_TRAIT_2(__reference_constructs_from_temporary, ReferenceConstructsFromTemporary, KEYCXX)
// Embarcadero Expression Traits
EXPRESSION_TRAIT(__is_lvalue_expr, IsLValueExpr, KEYCXX)
@@ -595,6 +621,17 @@ KEYWORD(pipe , KEYOPENCLC | KEYOPENCLCXX)
// C++ for OpenCL s2.3.1: addrspace_cast operator
KEYWORD(addrspace_cast , KEYOPENCLCXX)
+// CUDA/HIP function attributes
+KEYWORD(__noinline__ , KEYCUDA)
+
+// HLSL keywords.
+KEYWORD(cbuffer , KEYHLSL)
+KEYWORD(tbuffer , KEYHLSL)
+KEYWORD(groupshared , KEYHLSL)
+KEYWORD(in , KEYHLSL)
+KEYWORD(inout , KEYHLSL)
+KEYWORD(out , KEYHLSL)
+
// OpenMP Type Traits
UNARY_EXPR_OR_TYPE_TRAIT(__builtin_omp_required_simd_align, OpenMPRequiredSimdAlign, KEYALL)
@@ -655,6 +692,9 @@ KEYWORD(_Nullable , KEYALL)
KEYWORD(_Nullable_result , KEYALL)
KEYWORD(_Null_unspecified , KEYALL)
+// WebAssembly Type Extension
+KEYWORD(__funcref , KEYALL)
+
// Microsoft extensions which should be disabled in strict conformance mode
KEYWORD(__ptr64 , KEYMS)
KEYWORD(__ptr32 , KEYMS)
@@ -673,36 +713,59 @@ KEYWORD(__multiple_inheritance , KEYMS)
KEYWORD(__virtual_inheritance , KEYMS)
KEYWORD(__interface , KEYMS)
ALIAS("__int8" , char , KEYMS)
-ALIAS("_int8" , char , KEYMS)
ALIAS("__int16" , short , KEYMS)
-ALIAS("_int16" , short , KEYMS)
ALIAS("__int32" , int , KEYMS)
-ALIAS("_int32" , int , KEYMS)
-ALIAS("_int64" , __int64 , KEYMS)
ALIAS("__wchar_t" , wchar_t , KEYMS)
-ALIAS("_asm" , asm , KEYMS)
-ALIAS("_alignof" , __alignof , KEYMS)
ALIAS("__builtin_alignof", __alignof , KEYMS)
-ALIAS("_cdecl" , __cdecl , KEYMS | KEYBORLAND)
-ALIAS("_fastcall" , __fastcall , KEYMS | KEYBORLAND)
-ALIAS("_stdcall" , __stdcall , KEYMS | KEYBORLAND)
-ALIAS("_thiscall" , __thiscall , KEYMS)
-ALIAS("_vectorcall" , __vectorcall, KEYMS)
-ALIAS("_uuidof" , __uuidof , KEYMS | KEYBORLAND)
-ALIAS("_inline" , inline , KEYMS)
-ALIAS("_declspec" , __declspec , KEYMS)
+
+// Microsoft single-underscore prefixed aliases for double-underscore prefixed
+// keywords.
+ALIAS("_asm" , asm , KEYMS)
+ALIAS("_alignof" , __alignof , KEYMS)
+ALIAS("_cdecl" , __cdecl , KEYMS | KEYBORLAND)
+ALIAS("_declspec" , __declspec , KEYMS)
+ALIAS("_fastcall" , __fastcall , KEYMS | KEYBORLAND)
+ALIAS("_finally" , __finally , KEYMSCOMPAT)
+ALIAS("_forceinline" , __forceinline, KEYMSCOMPAT)
+ALIAS("_inline" , inline , KEYMS)
+ALIAS("_int8" , char , KEYMS)
+ALIAS("_int16" , short , KEYMS)
+ALIAS("_int32" , int , KEYMS)
+ALIAS("_int64" , __int64 , KEYMS)
+ALIAS("_leave" , __leave , KEYMSCOMPAT)
+ALIAS("_multiple_inheritance", __multiple_inheritance, KEYMSCOMPAT)
+ALIAS("_ptr32" , __ptr32 , KEYMSCOMPAT)
+ALIAS("_ptr64" , __ptr64 , KEYMSCOMPAT)
+ALIAS("_restrict" , restrict , KEYMSCOMPAT)
+ALIAS("_stdcall" , __stdcall , KEYMS | KEYBORLAND)
+ALIAS("_thiscall" , __thiscall , KEYMS)
+ALIAS("_try" , __try , KEYMSCOMPAT)
+ALIAS("_vectorcall" , __vectorcall , KEYMS)
+ALIAS("_unaligned" , __unaligned , KEYMSCOMPAT)
+ALIAS("_uptr" , __uptr , KEYMSCOMPAT)
+ALIAS("_uuidof" , __uuidof , KEYMS | KEYBORLAND)
+ALIAS("_virtual_inheritance", __virtual_inheritance, KEYMSCOMPAT)
+ALIAS("_w64" , __w64 , KEYMSCOMPAT)
// Borland Extensions which should be disabled in strict conformance mode.
ALIAS("_pascal" , __pascal , KEYBORLAND)
// Clang Extensions.
KEYWORD(__builtin_convertvector , KEYALL)
+UNARY_EXPR_OR_TYPE_TRAIT(__builtin_vectorelements, VectorElements, KEYALL)
ALIAS("__char16_t" , char16_t , KEYCXX)
ALIAS("__char32_t" , char32_t , KEYCXX)
KEYWORD(__builtin_bit_cast , KEYALL)
KEYWORD(__builtin_available , KEYALL)
KEYWORD(__builtin_sycl_unique_stable_name, KEYSYCL)
+// Keywords defined by Attr.td.
+// The "EMPTY ## X" is used to prevent early macro-expansion of the keyword.
+#ifndef KEYWORD_ATTRIBUTE
+#define KEYWORD_ATTRIBUTE(X, HASARG, EMPTY) KEYWORD(EMPTY ## X, KEYALL)
+#endif
+#include "clang/Basic/RegularKeywordAttrInfo.inc"
+
// Clang-specific keywords enabled only in testing.
TESTING_KEYWORD(__unknown_anytype , KEYALL)
@@ -743,6 +806,17 @@ OBJC_AT_KEYWORD(dynamic)
OBJC_AT_KEYWORD(import)
OBJC_AT_KEYWORD(available)
+//===----------------------------------------------------------------------===//
+// Interesting identifiers.
+//===----------------------------------------------------------------------===//
+INTERESTING_IDENTIFIER(not_interesting)
+INTERESTING_IDENTIFIER(FILE)
+INTERESTING_IDENTIFIER(jmp_buf)
+INTERESTING_IDENTIFIER(sigjmp_buf)
+INTERESTING_IDENTIFIER(ucontext_t)
+INTERESTING_IDENTIFIER(float_t)
+INTERESTING_IDENTIFIER(double_t)
+
// TODO: What to do about context-sensitive keywords like:
// bycopy/byref/in/inout/oneway/out?
@@ -827,16 +901,22 @@ PRAGMA_ANNOTATION(pragma_redefine_extname)
// handles them.
PRAGMA_ANNOTATION(pragma_fp_contract)
-// Annotation for #pragma STDC FENV_ACCESS
+// Annotations for #pragma STDC FENV_ACCESS and #pragma fenv_access (MS compat)
// The lexer produces these so that they only take effect when the parser
// handles them.
PRAGMA_ANNOTATION(pragma_fenv_access)
+PRAGMA_ANNOTATION(pragma_fenv_access_ms)
// Annotation for #pragma STDC FENV_ROUND
// The lexer produces these so that they only take effect when the parser
// handles them.
PRAGMA_ANNOTATION(pragma_fenv_round)
+// Annotation for #pragma STDC CX_LIMITED_RANGE
+// The lexer produces these so that they only take effect when the parser
+// handles them.
+PRAGMA_ANNOTATION(pragma_cx_limited_range)
+
// Annotation for #pragma float_control
// The lexer produces these so that they only take effect when the parser
// handles them.
@@ -875,6 +955,12 @@ ANNOTATION(attr_openmp)
PRAGMA_ANNOTATION(pragma_openmp)
PRAGMA_ANNOTATION(pragma_openmp_end)
+// Annotations for OpenACC pragma directives - #pragma acc.
+// Like with OpenMP, these are produced by the lexer when it parses a
+// #pragma acc directive so it can be handled during parsing of the directives.
+PRAGMA_ANNOTATION(pragma_openacc)
+PRAGMA_ANNOTATION(pragma_openacc_end)
+
// Annotations for loop pragma directives #pragma clang loop ...
// The lexer produces these so that they only take effect when the parser
// handles #pragma loop ... directives.
@@ -885,6 +971,9 @@ PRAGMA_ANNOTATION(pragma_fp)
// Annotation for the attribute pragma directives - #pragma clang attribute ...
PRAGMA_ANNOTATION(pragma_attribute)
+// Annotation for the riscv pragma directives - #pragma clang riscv intrinsic ...
+PRAGMA_ANNOTATION(pragma_riscv)
+
// Annotations for module import translated from #include etc.
ANNOTATION(module_include)
ANNOTATION(module_begin)
@@ -894,6 +983,9 @@ ANNOTATION(module_end)
// into the name of a header unit.
ANNOTATION(header_unit)
+// Annotation for end of input in clang-repl.
+ANNOTATION(repl_input_end)
+
#undef PRAGMA_ANNOTATION
#undef ANNOTATION
#undef TESTING_KEYWORD
@@ -909,9 +1001,11 @@ ANNOTATION(header_unit)
#undef TYPE_TRAIT_2
#undef TYPE_TRAIT_1
#undef TYPE_TRAIT
-#undef CONCEPTS_KEYWORD
#undef CXX20_KEYWORD
#undef CXX11_KEYWORD
#undef KEYWORD
#undef PUNCTUATOR
#undef TOK
+#undef C99_KEYWORD
+#undef C23_KEYWORD
+#undef INTERESTING_IDENTIFIER
diff --git a/contrib/llvm-project/clang/include/clang/Basic/TokenKinds.h b/contrib/llvm-project/clang/include/clang/Basic/TokenKinds.h
index 4e66aa1c8c2d..7529b922619a 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/TokenKinds.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/TokenKinds.h
@@ -44,6 +44,14 @@ enum ObjCKeywordKind {
NUM_OBJC_KEYWORDS
};
+/// Provides a namespace for interesting identifers such as float_t and
+/// double_t.
+enum InterestingIdentifierKind {
+#define INTERESTING_IDENTIFIER(X) X,
+#include "clang/Basic/TokenKinds.def"
+ NUM_INTERESTING_IDENTIFIERS
+};
+
/// Defines the possible values of an on-off-switch (C99 6.10.6p2).
enum OnOffSwitch {
OOS_ON, OOS_OFF, OOS_DEFAULT
@@ -68,6 +76,9 @@ const char *getPunctuatorSpelling(TokenKind Kind) LLVM_READNONE;
/// tokens like 'int' and 'dynamic_cast'. Returns NULL for other token kinds.
const char *getKeywordSpelling(TokenKind Kind) LLVM_READNONE;
+/// Returns the spelling of preprocessor keywords, such as "else".
+const char *getPPKeywordSpelling(PPKeywordKind Kind) LLVM_READNONE;
+
/// Return true if this is a raw identifier or an identifier kind.
inline bool isAnyIdentifier(TokenKind K) {
return (K == tok::identifier) || (K == tok::raw_identifier);
@@ -96,6 +107,13 @@ bool isAnnotation(TokenKind K);
/// Return true if this is an annotation token representing a pragma.
bool isPragmaAnnotation(TokenKind K);
+inline constexpr bool isRegularKeywordAttribute(TokenKind K) {
+ return (false
+#define KEYWORD_ATTRIBUTE(X, ...) || (K == tok::kw_##X)
+#include "clang/Basic/RegularKeywordAttrInfo.inc"
+ );
+}
+
} // end namespace tok
} // end namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/Basic/TransformTypeTraits.def b/contrib/llvm-project/clang/include/clang/Basic/TransformTypeTraits.def
new file mode 100644
index 000000000000..e27a2719a968
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Basic/TransformTypeTraits.def
@@ -0,0 +1,29 @@
+//==--- TransformTypeTraits.def - type trait transformations --------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines transform type traits' names.
+//
+//===----------------------------------------------------------------------===//
+
+TRANSFORM_TYPE_TRAIT_DEF(AddLvalueReference, add_lvalue_reference)
+TRANSFORM_TYPE_TRAIT_DEF(AddPointer, add_pointer)
+TRANSFORM_TYPE_TRAIT_DEF(AddRvalueReference, add_rvalue_reference)
+TRANSFORM_TYPE_TRAIT_DEF(Decay, decay)
+TRANSFORM_TYPE_TRAIT_DEF(MakeSigned, make_signed)
+TRANSFORM_TYPE_TRAIT_DEF(MakeUnsigned, make_unsigned)
+TRANSFORM_TYPE_TRAIT_DEF(RemoveAllExtents, remove_all_extents)
+TRANSFORM_TYPE_TRAIT_DEF(RemoveConst, remove_const)
+TRANSFORM_TYPE_TRAIT_DEF(RemoveCV, remove_cv)
+TRANSFORM_TYPE_TRAIT_DEF(RemoveCVRef, remove_cvref)
+TRANSFORM_TYPE_TRAIT_DEF(RemoveExtent, remove_extent)
+TRANSFORM_TYPE_TRAIT_DEF(RemovePointer, remove_pointer)
+TRANSFORM_TYPE_TRAIT_DEF(RemoveReference, remove_reference_t)
+TRANSFORM_TYPE_TRAIT_DEF(RemoveRestrict, remove_restrict)
+TRANSFORM_TYPE_TRAIT_DEF(RemoveVolatile, remove_volatile)
+TRANSFORM_TYPE_TRAIT_DEF(EnumUnderlyingType, underlying_type)
+#undef TRANSFORM_TYPE_TRAIT_DEF
diff --git a/contrib/llvm-project/clang/include/clang/Basic/TypeNodes.td b/contrib/llvm-project/clang/include/clang/Basic/TypeNodes.td
index 011394c3ef45..649b071cebb9 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/TypeNodes.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/TypeNodes.td
@@ -3,7 +3,7 @@ include "clang/Basic/ASTNode.td"
class TypeNode<TypeNode base, bit abstract = 0> : ASTNode {
TypeNode Base = base;
bit Abstract = abstract;
-}
+}
/// A type node that is only used to represent dependent types in C++. For
/// example, DependentTemplateSpecializationType is used to represent types
@@ -75,6 +75,7 @@ def DependentSizedMatrixType : TypeNode<MatrixType>, AlwaysDependent;
def FunctionType : TypeNode<Type, 1>;
def FunctionProtoType : TypeNode<FunctionType>;
def FunctionNoProtoType : TypeNode<FunctionType>;
+def UsingType : TypeNode<Type>, NeverCanonical;
def UnresolvedUsingType : TypeNode<Type>, AlwaysDependent;
def ParenType : TypeNode<Type>, NeverCanonical;
def TypedefType : TypeNode<Type>, NeverCanonical;
@@ -90,6 +91,7 @@ def RecordType : TypeNode<TagType>, LeafType;
def EnumType : TypeNode<TagType>, LeafType;
def ElaboratedType : TypeNode<Type>, NeverCanonical;
def AttributedType : TypeNode<Type>, NeverCanonical;
+def BTFTagAttributedType : TypeNode<Type>, NeverCanonical;
def TemplateTypeParmType : TypeNode<Type>, AlwaysDependent, LeafType;
def SubstTemplateTypeParmType : TypeNode<Type>, NeverCanonical;
def SubstTemplateTypeParmPackType : TypeNode<Type>, AlwaysDependent;
@@ -107,5 +109,5 @@ def ObjCInterfaceType : TypeNode<ObjCObjectType>, LeafType;
def ObjCObjectPointerType : TypeNode<Type>;
def PipeType : TypeNode<Type>;
def AtomicType : TypeNode<Type>;
-def ExtIntType : TypeNode<Type>;
-def DependentExtIntType : TypeNode<Type>, AlwaysDependent;
+def BitIntType : TypeNode<Type>;
+def DependentBitIntType : TypeNode<Type>, AlwaysDependent;
diff --git a/contrib/llvm-project/clang/include/clang/Basic/TypeTraits.h b/contrib/llvm-project/clang/include/clang/Basic/TypeTraits.h
index a0f06bec6697..eb8b1923152d 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/TypeTraits.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/TypeTraits.h
@@ -67,6 +67,10 @@ const char *getTraitName(UnaryExprOrTypeTrait T) LLVM_READONLY;
const char *getTraitSpelling(TypeTrait T) LLVM_READONLY;
const char *getTraitSpelling(ArrayTypeTrait T) LLVM_READONLY;
const char *getTraitSpelling(UnaryExprOrTypeTrait T) LLVM_READONLY;
+
+/// Return the arity of the type trait \p T.
+unsigned getTypeTraitArity(TypeTrait T) LLVM_READONLY;
+
} // namespace clang
#endif
diff --git a/contrib/llvm-project/clang/include/clang/Basic/Version.h b/contrib/llvm-project/clang/include/clang/Basic/Version.h
index 2881d8db954e..8e4e6928fded 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/Version.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/Version.h
@@ -40,6 +40,9 @@ namespace clang {
/// string as getClangRevision.
std::string getLLVMRevision();
+ /// Retrieves the Clang vendor tag.
+ std::string getClangVendor();
+
/// Retrieves the full repository version that is an amalgamation of
/// the information in getClangRepositoryPath() and getClangRevision().
std::string getClangFullRepositoryVersion();
diff --git a/contrib/llvm-project/clang/include/clang/Basic/Visibility.h b/contrib/llvm-project/clang/include/clang/Basic/Visibility.h
index 57d9754ae4a9..1e196300be42 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/Visibility.h
+++ b/contrib/llvm-project/clang/include/clang/Basic/Visibility.h
@@ -15,6 +15,7 @@
#define LLVM_CLANG_BASIC_VISIBILITY_H
#include "clang/Basic/Linkage.h"
+#include "llvm/ADT/STLForwardCompat.h"
#include <cassert>
#include <cstdint>
@@ -56,10 +57,11 @@ class LinkageInfo {
void setVisibility(Visibility V, bool E) { visibility_ = V; explicit_ = E; }
public:
- LinkageInfo() : linkage_(ExternalLinkage), visibility_(DefaultVisibility),
- explicit_(false) {}
+ LinkageInfo()
+ : linkage_(llvm::to_underlying(Linkage::External)),
+ visibility_(DefaultVisibility), explicit_(false) {}
LinkageInfo(Linkage L, Visibility V, bool E)
- : linkage_(L), visibility_(V), explicit_(E) {
+ : linkage_(llvm::to_underlying(L)), visibility_(V), explicit_(E) {
assert(getLinkage() == L && getVisibility() == V &&
isVisibilityExplicit() == E && "Enum truncated!");
}
@@ -68,23 +70,23 @@ public:
return LinkageInfo();
}
static LinkageInfo internal() {
- return LinkageInfo(InternalLinkage, DefaultVisibility, false);
+ return LinkageInfo(Linkage::Internal, DefaultVisibility, false);
}
static LinkageInfo uniqueExternal() {
- return LinkageInfo(UniqueExternalLinkage, DefaultVisibility, false);
+ return LinkageInfo(Linkage::UniqueExternal, DefaultVisibility, false);
}
static LinkageInfo none() {
- return LinkageInfo(NoLinkage, DefaultVisibility, false);
+ return LinkageInfo(Linkage::None, DefaultVisibility, false);
}
static LinkageInfo visible_none() {
- return LinkageInfo(VisibleNoLinkage, DefaultVisibility, false);
+ return LinkageInfo(Linkage::VisibleNone, DefaultVisibility, false);
}
- Linkage getLinkage() const { return (Linkage)linkage_; }
+ Linkage getLinkage() const { return static_cast<Linkage>(linkage_); }
Visibility getVisibility() const { return (Visibility)visibility_; }
bool isVisibilityExplicit() const { return explicit_; }
- void setLinkage(Linkage L) { linkage_ = L; }
+ void setLinkage(Linkage L) { linkage_ = llvm::to_underlying(L); }
void mergeLinkage(Linkage L) {
setLinkage(minLinkage(getLinkage(), L));
@@ -96,10 +98,10 @@ public:
void mergeExternalVisibility(Linkage L) {
Linkage ThisL = getLinkage();
if (!isExternallyVisible(L)) {
- if (ThisL == VisibleNoLinkage)
- ThisL = NoLinkage;
- else if (ThisL == ExternalLinkage)
- ThisL = UniqueExternalLinkage;
+ if (ThisL == Linkage::VisibleNone)
+ ThisL = Linkage::None;
+ else if (ThisL == Linkage::External)
+ ThisL = Linkage::UniqueExternal;
}
setLinkage(ThisL);
}
diff --git a/contrib/llvm-project/clang/include/clang/Basic/WebAssemblyReferenceTypes.def b/contrib/llvm-project/clang/include/clang/Basic/WebAssemblyReferenceTypes.def
new file mode 100644
index 000000000000..7c83da15150c
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Basic/WebAssemblyReferenceTypes.def
@@ -0,0 +1,40 @@
+//===-- WebAssemblyReferenceTypes.def - Wasm reference types ----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines externref_t. The macros are:
+//
+// WASM_TYPE(Name, Id, SingletonId)
+// WASM_REF_TYPE(Name, MangledName, Id, SingletonId, AS)
+//
+// where:
+//
+// - Name is the name of the builtin type.
+//
+// - MangledNameBase is the base used for name mangling.
+//
+// - BuiltinType::Id is the enumerator defining the type.
+//
+// - Context.SingletonId is the global singleton of this type.
+//
+// - AS indicates the address space for values of this type.
+//
+// To include this file, define either WASM_REF_TYPE or WASM_TYPE, depending on
+// how much information you want. The macros will be undefined after inclusion.
+//
+//===----------------------------------------------------------------------===//
+
+
+#ifndef WASM_REF_TYPE
+#define WASM_REF_TYPE(Name, MangledNameBase, Id, SingletonId, AS) \
+ WASM_TYPE(Name, Id, SingletonId)
+#endif
+
+WASM_REF_TYPE("__externref_t", "externref_t", WasmExternRef, WasmExternRefTy, 10)
+
+#undef WASM_TYPE
+#undef WASM_REF_TYPE
diff --git a/contrib/llvm-project/clang/include/clang/Basic/X86Target.def b/contrib/llvm-project/clang/include/clang/Basic/X86Target.def
deleted file mode 100644
index 70f3879f33a1..000000000000
--- a/contrib/llvm-project/clang/include/clang/Basic/X86Target.def
+++ /dev/null
@@ -1,110 +0,0 @@
-//===--- X86Target.def - X86 Feature/Processor Database ---------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the X86-specific Features and Processors, as used by
-// the X86 Targets.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef FEATURE
-#define FEATURE(ENUM)
-#endif
-
-#ifndef CPU_SPECIFIC
-#define CPU_SPECIFIC(NAME, MANGLING, FEATURES)
-#endif
-
-#ifndef CPU_SPECIFIC_ALIAS
-#define CPU_SPECIFIC_ALIAS(NEW_NAME, NAME)
-#endif
-
-// List of CPU Supports features in order. These need to remain in the order
-// required by attribute 'target' checking. Note that not all are supported/
-// prioritized by GCC, so synchronization with GCC's implementation may require
-// changing some existing values.
-FEATURE(FEATURE_CMOV)
-FEATURE(FEATURE_MMX)
-FEATURE(FEATURE_SSE)
-FEATURE(FEATURE_SSE2)
-FEATURE(FEATURE_SSE3)
-FEATURE(FEATURE_SSSE3)
-FEATURE(FEATURE_SSE4_A)
-FEATURE(FEATURE_SSE4_1)
-FEATURE(FEATURE_SSE4_2)
-FEATURE(FEATURE_POPCNT)
-FEATURE(FEATURE_AES)
-FEATURE(FEATURE_PCLMUL)
-FEATURE(FEATURE_AVX)
-FEATURE(FEATURE_BMI)
-FEATURE(FEATURE_FMA4)
-FEATURE(FEATURE_XOP)
-FEATURE(FEATURE_FMA)
-FEATURE(FEATURE_BMI2)
-FEATURE(FEATURE_AVX2)
-FEATURE(FEATURE_AVX512F)
-FEATURE(FEATURE_AVX512VL)
-FEATURE(FEATURE_AVX512BW)
-FEATURE(FEATURE_AVX512DQ)
-FEATURE(FEATURE_AVX512CD)
-FEATURE(FEATURE_AVX512ER)
-FEATURE(FEATURE_AVX512PF)
-FEATURE(FEATURE_AVX512VBMI)
-FEATURE(FEATURE_AVX512IFMA)
-FEATURE(FEATURE_AVX5124VNNIW)
-FEATURE(FEATURE_AVX5124FMAPS)
-FEATURE(FEATURE_AVX512VPOPCNTDQ)
-FEATURE(FEATURE_AVX512VBMI2)
-FEATURE(FEATURE_GFNI)
-FEATURE(FEATURE_VPCLMULQDQ)
-FEATURE(FEATURE_AVX512VNNI)
-FEATURE(FEATURE_AVX512BITALG)
-FEATURE(FEATURE_AVX512BF16)
-FEATURE(FEATURE_AVX512VP2INTERSECT)
-
-
-// FIXME: When commented out features are supported in LLVM, enable them here.
-CPU_SPECIFIC("generic", 'A', "")
-CPU_SPECIFIC("pentium", 'B', "")
-CPU_SPECIFIC("pentium_pro", 'C', "+cmov")
-CPU_SPECIFIC("pentium_mmx", 'D', "+mmx")
-CPU_SPECIFIC("pentium_ii", 'E', "+cmov,+mmx")
-CPU_SPECIFIC("pentium_iii", 'H', "+cmov,+mmx,+sse")
-CPU_SPECIFIC_ALIAS("pentium_iii_no_xmm_regs", "pentium_iii")
-CPU_SPECIFIC("pentium_4", 'J', "+cmov,+mmx,+sse,+sse2")
-CPU_SPECIFIC("pentium_m", 'K', "+cmov,+mmx,+sse,+sse2")
-CPU_SPECIFIC("pentium_4_sse3", 'L', "+cmov,+mmx,+sse,+sse2,+sse3")
-CPU_SPECIFIC("core_2_duo_ssse3", 'M', "+cmov,+mmx,+sse,+sse2,+sse3,+ssse3")
-CPU_SPECIFIC("core_2_duo_sse4_1", 'N', "+cmov,+mmx,+sse,+sse2,+sse3,+ssse3,+sse4.1")
-CPU_SPECIFIC("atom", 'O', "+cmov,+mmx,+sse,+sse2,+sse3,+ssse3,+movbe")
-CPU_SPECIFIC("atom_sse4_2", 'c', "+cmov,+mmx,+sse,+sse2,+sse3,+ssse3,+sse4.1,+sse4.2,+popcnt")
-CPU_SPECIFIC("core_i7_sse4_2", 'P', "+cmov,+mmx,+sse,+sse2,+sse3,+ssse3,+sse4.1,+sse4.2,+popcnt")
-CPU_SPECIFIC("core_aes_pclmulqdq", 'Q', "+cmov,+mmx,+sse,+sse2,+sse3,+ssse3,+sse4.1,+sse4.2,+popcnt")
-CPU_SPECIFIC("atom_sse4_2_movbe", 'd', "+cmov,+mmx,+sse,+sse2,+sse3,+ssse3,+sse4.1,+sse4.2,+movbe,+popcnt")
-CPU_SPECIFIC("goldmont", 'i', "+cmov,+mmx,+sse,+sse2,+sse3,+ssse3,+sse4.1,+sse4.2,+movbe,+popcnt")
-CPU_SPECIFIC("sandybridge", 'R', "+cmov,+mmx,+sse,+sse2,+sse3,+ssse3,+sse4.1,+sse4.2,+popcnt,+avx")
-CPU_SPECIFIC_ALIAS("core_2nd_gen_avx", "sandybridge")
-CPU_SPECIFIC("ivybridge", 'S', "+cmov,+mmx,+sse,+sse2,+sse3,+ssse3,+sse4.1,+sse4.2,+popcnt,+f16c,+avx")
-CPU_SPECIFIC_ALIAS("core_3rd_gen_avx", "ivybridge")
-CPU_SPECIFIC("haswell", 'V', "+cmov,+mmx,+sse,+sse2,+sse3,+ssse3,+sse4.1,+sse4.2,+movbe,+popcnt,+f16c,+avx,+fma,+bmi,+lzcnt,+avx2")
-CPU_SPECIFIC_ALIAS("core_4th_gen_avx", "haswell")
-CPU_SPECIFIC("core_4th_gen_avx_tsx", 'W', "+cmov,+mmx,+sse,+sse2,+sse3,+ssse3,+sse4.1,+sse4.2,+movbe,+popcnt,+f16c,+avx,+fma,+bmi,+lzcnt,+avx2")
-CPU_SPECIFIC("broadwell", 'X', "+cmov,+mmx,+sse,+sse2,+sse3,+ssse3,+sse4.1,+sse4.2,+movbe,+popcnt,+f16c,+avx,+fma,+bmi,+lzcnt,+avx2,+adx")
-CPU_SPECIFIC_ALIAS("core_5th_gen_avx", "broadwell")
-CPU_SPECIFIC("core_5th_gen_avx_tsx", 'Y', "+cmov,+mmx,+sse,+sse2,+sse3,+ssse3,+sse4.1,+sse4.2,+movbe,+popcnt,+f16c,+avx,+fma,+bmi,+lzcnt,+avx2,+adx")
-CPU_SPECIFIC("knl", 'Z', "+cmov,+mmx,+sse,+sse2,+sse3,+ssse3,+sse4.1,+sse4.2,+movbe,+popcnt,+f16c,+avx,+fma,+bmi,+lzcnt,+avx2,+avx512f,+adx,+avx512er,+avx512pf,+avx512cd")
-CPU_SPECIFIC_ALIAS("mic_avx512", "knl")
-CPU_SPECIFIC("skylake", 'b', "+cmov,+mmx,+sse,+sse2,+sse3,+ssse3,+sse4.1,+sse4.2,+movbe,+popcnt,+f16c,+avx,+fma,+bmi,+lzcnt,+avx2,+adx,+mpx")
-CPU_SPECIFIC( "skylake_avx512", 'a', "+cmov,+mmx,+sse,+sse2,+sse3,+ssse3,+sse4.1,+sse4.2,+movbe,+popcnt,+f16c,+avx,+fma,+bmi,+lzcnt,+avx2,+avx512dq,+avx512f,+adx,+avx512cd,+avx512bw,+avx512vl,+clwb")
-CPU_SPECIFIC("cannonlake", 'e', "+cmov,+mmx,+sse,+sse2,+sse3,+ssse3,+sse4.1,+sse4.2,+movbe,+popcnt,+f16c,+avx,+fma,+bmi,+lzcnt,+avx2,+avx512dq,+avx512f,+adx,+avx512ifma,+avx512cd,+avx512bw,+avx512vl,+avx512vbmi")
-CPU_SPECIFIC("knm", 'j', "+cmov,+mmx,+sse,+sse2,+sse3,+ssse3,+sse4.1,+sse4.2,+movbe,+popcnt,+f16c,+avx,+fma,+bmi,+lzcnt,+avx2,+avx512f,+adx,+avx512er,+avx512pf,+avx512cd,+avx5124fmaps,+avx5124vnniw,+avx512vpopcntdq")
-
-#undef CPU_SPECIFIC_ALIAS
-#undef CPU_SPECIFIC
-#undef PROC_64_BIT
-#undef PROC_32_BIT
-#undef FEATURE
diff --git a/contrib/llvm-project/clang/include/clang/Basic/arm_bf16.td b/contrib/llvm-project/clang/include/clang/Basic/arm_bf16.td
index d837a7666d40..f70c7221f8d6 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/arm_bf16.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/arm_bf16.td
@@ -1,4 +1,4 @@
-//===--- arm_fp16.td - ARM BF16 compiler interface ------------------------===//
+//===--- arm_bf16.td - ARM BF16 compiler interface ------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/contrib/llvm-project/clang/include/clang/Basic/arm_fp16.td b/contrib/llvm-project/clang/include/clang/Basic/arm_fp16.td
index 79cd16233c10..cb2a09303e8e 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/arm_fp16.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/arm_fp16.td
@@ -14,7 +14,7 @@
include "arm_neon_incl.td"
// ARMv8.2-A FP16 intrinsics.
-let ArchGuard = "defined(__ARM_FEATURE_FP16_SCALAR_ARITHMETIC) && defined(__aarch64__)" in {
+let ArchGuard = "defined(__aarch64__)", TargetGuard = "fullfp16" in {
// Negate
def VNEGSH : SInst<"vneg", "11", "Sh">;
diff --git a/contrib/llvm-project/clang/include/clang/Basic/arm_neon.td b/contrib/llvm-project/clang/include/clang/Basic/arm_neon.td
index 173003d171ee..9cb7e0981384 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/arm_neon.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/arm_neon.td
@@ -80,10 +80,8 @@ def OP_QDMULH_N : Op<(call "vqdmulh", $p0, (dup $p1))>;
def OP_QDMULH_LN : Op<(call "vqdmulh", $p0, (call_mangled "splat_lane", $p1, $p2))>;
def OP_QRDMULH_LN : Op<(call "vqrdmulh", $p0, (call_mangled "splat_lane", $p1, $p2))>;
def OP_QRDMULH_N : Op<(call "vqrdmulh", $p0, (dup $p1))>;
-def OP_QRDMLAH : Op<(call "vqadd", $p0, (call "vqrdmulh", $p1, $p2))>;
-def OP_QRDMLSH : Op<(call "vqsub", $p0, (call "vqrdmulh", $p1, $p2))>;
-def OP_QRDMLAH_LN : Op<(call "vqadd", $p0, (call "vqrdmulh", $p1, (call_mangled "splat_lane", $p2, $p3)))>;
-def OP_QRDMLSH_LN : Op<(call "vqsub", $p0, (call "vqrdmulh", $p1, (call_mangled "splat_lane", $p2, $p3)))>;
+def OP_QRDMLAH_LN : Op<(call "vqrdmlah", $p0, $p1, (call_mangled "splat_lane", $p2, $p3))>;
+def OP_QRDMLSH_LN : Op<(call "vqrdmlsh", $p0, $p1, (call_mangled "splat_lane", $p2, $p3))>;
def OP_FMS_LN : Op<(call "vfma_lane", $p0, (op "-", $p1), $p2, $p3)>;
def OP_FMS_LNQ : Op<(call "vfma_laneq", $p0, (op "-", $p1), $p2, $p3)>;
def OP_TRN1 : Op<(shuffle $p0, $p1, (interleave (decimate mask0, 2),
@@ -185,10 +183,10 @@ def OP_SCALAR_QDMULL_LN : ScalarMulOp<"vqdmull">;
def OP_SCALAR_QDMULH_LN : ScalarMulOp<"vqdmulh">;
def OP_SCALAR_QRDMULH_LN : ScalarMulOp<"vqrdmulh">;
-def OP_SCALAR_QRDMLAH_LN : Op<(call "vqadd", $p0, (call "vqrdmulh", $p1,
- (call "vget_lane", $p2, $p3)))>;
-def OP_SCALAR_QRDMLSH_LN : Op<(call "vqsub", $p0, (call "vqrdmulh", $p1,
- (call "vget_lane", $p2, $p3)))>;
+def OP_SCALAR_QRDMLAH_LN : Op<(call "vqrdmlah", $p0, $p1,
+ (call "vget_lane", $p2, $p3))>;
+def OP_SCALAR_QRDMLSH_LN : Op<(call "vqrdmlsh", $p0, $p1,
+ (call "vget_lane", $p2, $p3))>;
def OP_SCALAR_HALF_GET_LN : Op<(bitcast "float16_t",
(call "vget_lane",
@@ -291,7 +289,7 @@ def SPLATQ : WInst<"splat_laneq", ".(!Q)I",
"UcUsUicsilPcPsfQUcQUsQUiQcQsQiQPcQPsQflUlQlQUlhdQhQdPlQPl"> {
let isLaneQ = 1;
}
-let ArchGuard = "defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC)" in {
+let TargetGuard = "bf16" in {
def SPLAT_BF : WInst<"splat_lane", ".(!q)I", "bQb">;
def SPLATQ_BF : WInst<"splat_laneq", ".(!Q)I", "bQb"> {
let isLaneQ = 1;
@@ -325,9 +323,9 @@ def VMLSL : SOpInst<"vmlsl", "(>Q)(>Q)..", "csiUcUsUi", OP_MLSL>;
def VQDMULH : SInst<"vqdmulh", "...", "siQsQi">;
def VQRDMULH : SInst<"vqrdmulh", "...", "siQsQi">;
-let ArchGuard = "defined(__ARM_FEATURE_QRDMX)" in {
-def VQRDMLAH : SOpInst<"vqrdmlah", "....", "siQsQi", OP_QRDMLAH>;
-def VQRDMLSH : SOpInst<"vqrdmlsh", "....", "siQsQi", OP_QRDMLSH>;
+let TargetGuard = "v8.1a" in {
+def VQRDMLAH : SInst<"vqrdmlah", "....", "siQsQi">;
+def VQRDMLSH : SInst<"vqrdmlsh", "....", "siQsQi">;
}
def VQDMLAL : SInst<"vqdmlal", "(>Q)(>Q)..", "si">;
@@ -532,7 +530,7 @@ def VMOV_N : WOpInst<"vmov_n", ".1",
}
let InstName = "" in
def VDUP_LANE: WOpInst<"vdup_lane", ".qI",
- "UcUsUicsiPcPsfQUcQUsQUiQcQsQiQPcQPsQflUlQlQUl",
+ "UcUsUicsiPcPshfQUcQUsQUiQcQsQiQPcQPsQhQflUlQlQUl",
OP_DUP_LN>;
////////////////////////////////////////////////////////////////////////////////
@@ -616,7 +614,7 @@ def A64_VQDMULH_LANE : SInst<"vqdmulh_lane", "..(!q)I", "siQsQi">;
def A64_VQRDMULH_LANE : SInst<"vqrdmulh_lane", "..(!q)I", "siQsQi">;
}
-let ArchGuard = "defined(__ARM_FEATURE_QRDMX)" in {
+let TargetGuard = "v8.1a" in {
def VQRDMLAH_LANE : SOpInst<"vqrdmlah_lane", "...qI", "siQsQi", OP_QRDMLAH_LN>;
def VQRDMLSH_LANE : SOpInst<"vqrdmlsh_lane", "...qI", "siQsQi", OP_QRDMLSH_LN>;
}
@@ -959,8 +957,10 @@ def VQDMLAL_HIGH : SOpInst<"vqdmlal_high", "(>Q)(>Q)QQ", "si", OP_QDMLALHi>;
def VQDMLAL_HIGH_N : SOpInst<"vqdmlal_high_n", "(>Q)(>Q)Q1", "si", OP_QDMLALHi_N>;
def VQDMLSL_HIGH : SOpInst<"vqdmlsl_high", "(>Q)(>Q)QQ", "si", OP_QDMLSLHi>;
def VQDMLSL_HIGH_N : SOpInst<"vqdmlsl_high_n", "(>Q)(>Q)Q1", "si", OP_QDMLSLHi_N>;
-def VMULL_P64 : SInst<"vmull", "(1>)11", "Pl">;
-def VMULL_HIGH_P64 : SOpInst<"vmull_high", "(1>)..", "HPl", OP_MULLHi_P64>;
+let TargetGuard = "aes" in {
+ def VMULL_P64 : SInst<"vmull", "(1>)11", "Pl">;
+ def VMULL_HIGH_P64 : SOpInst<"vmull_high", "(1>)..", "HPl", OP_MULLHi_P64>;
+}
////////////////////////////////////////////////////////////////////////////////
@@ -982,7 +982,7 @@ def COPYQ_LANEQ : IOpInst<"vcopy_laneq", "..I.I",
////////////////////////////////////////////////////////////////////////////////
// Set all lanes to same value
-def VDUP_LANE1: WOpInst<"vdup_lane", ".qI", "hdQhQdPlQPl", OP_DUP_LN>;
+def VDUP_LANE1: WOpInst<"vdup_lane", ".qI", "dQdPlQPl", OP_DUP_LN>;
def VDUP_LANE2: WOpInst<"vdup_laneq", ".QI",
"csilUcUsUiUlPcPshfdQcQsQiQlQPcQPsQUcQUsQUiQUlQhQfQdPlQPl",
OP_DUP_LN> {
@@ -1091,14 +1091,14 @@ let isLaneQ = 1 in {
def VQDMULH_LANEQ : SInst<"vqdmulh_laneq", "..QI", "siQsQi">;
def VQRDMULH_LANEQ : SInst<"vqrdmulh_laneq", "..QI", "siQsQi">;
}
-let ArchGuard = "defined(__ARM_FEATURE_QRDMX) && defined(__aarch64__)" in {
+let ArchGuard = "defined(__aarch64__)", TargetGuard = "v8.1a" in {
def VQRDMLAH_LANEQ : SOpInst<"vqrdmlah_laneq", "...QI", "siQsQi", OP_QRDMLAH_LN> {
let isLaneQ = 1;
}
def VQRDMLSH_LANEQ : SOpInst<"vqrdmlsh_laneq", "...QI", "siQsQi", OP_QRDMLSH_LN> {
let isLaneQ = 1;
}
-}
+} // ArchGuard = "defined(__aarch64__)", TargetGuard = "v8.1a"
// Note: d type implemented by SCALAR_VMULX_LANE
def VMULX_LANE : IOpInst<"vmulx_lane", "..qI", "fQfQd", OP_MULX_LN>;
@@ -1122,14 +1122,14 @@ def VEXT_A64 : WInst<"vext", "...I", "dQdPlQPl">;
////////////////////////////////////////////////////////////////////////////////
// Crypto
-let ArchGuard = "__ARM_ARCH >= 8 && defined(__ARM_FEATURE_AES)" in {
+let ArchGuard = "__ARM_ARCH >= 8", TargetGuard = "aes" in {
def AESE : SInst<"vaese", "...", "QUc">;
def AESD : SInst<"vaesd", "...", "QUc">;
def AESMC : SInst<"vaesmc", "..", "QUc">;
def AESIMC : SInst<"vaesimc", "..", "QUc">;
}
-let ArchGuard = "__ARM_ARCH >= 8 && defined(__ARM_FEATURE_SHA2)" in {
+let ArchGuard = "__ARM_ARCH >= 8", TargetGuard = "sha2" in {
def SHA1H : SInst<"vsha1h", "11", "Ui">;
def SHA1SU1 : SInst<"vsha1su1", "...", "QUi">;
def SHA256SU0 : SInst<"vsha256su0", "...", "QUi">;
@@ -1143,7 +1143,7 @@ def SHA256H2 : SInst<"vsha256h2", "....", "QUi">;
def SHA256SU1 : SInst<"vsha256su1", "....", "QUi">;
}
-let ArchGuard = "__ARM_ARCH >= 8 && defined(__ARM_FEATURE_SHA3) && defined(__aarch64__)" in {
+let ArchGuard = "defined(__aarch64__)", TargetGuard = "sha3" in {
def BCAX : SInst<"vbcax", "....", "QUcQUsQUiQUlQcQsQiQl">;
def EOR3 : SInst<"veor3", "....", "QUcQUsQUiQUlQcQsQiQl">;
def RAX1 : SInst<"vrax1", "...", "QUl">;
@@ -1153,15 +1153,14 @@ def XAR : SInst<"vxar", "...I", "QUl">;
}
}
-let ArchGuard = "__ARM_ARCH >= 8 && defined(__ARM_FEATURE_SHA512) && defined(__aarch64__)" in {
-
+let ArchGuard = "defined(__aarch64__)", TargetGuard = "sha3" in {
def SHA512SU0 : SInst<"vsha512su0", "...", "QUl">;
def SHA512su1 : SInst<"vsha512su1", "....", "QUl">;
def SHA512H : SInst<"vsha512h", "....", "QUl">;
def SHA512H2 : SInst<"vsha512h2", "....", "QUl">;
}
-let ArchGuard = "__ARM_ARCH >= 8 && defined(__ARM_FEATURE_SM3) && defined(__aarch64__)" in {
+let ArchGuard = "defined(__aarch64__)", TargetGuard = "sm4" in {
def SM3SS1 : SInst<"vsm3ss1", "....", "QUi">;
def SM3TT1A : SInst<"vsm3tt1a", "....I", "QUi">;
def SM3TT1B : SInst<"vsm3tt1b", "....I", "QUi">;
@@ -1171,7 +1170,7 @@ def SM3PARTW1 : SInst<"vsm3partw1", "....", "QUi">;
def SM3PARTW2 : SInst<"vsm3partw2", "....", "QUi">;
}
-let ArchGuard = "__ARM_ARCH >= 8 && defined(__ARM_FEATURE_SM4) && defined(__aarch64__)" in {
+let ArchGuard = "defined(__aarch64__)", TargetGuard = "sm4" in {
def SM4E : SInst<"vsm4e", "...", "QUi">;
def SM4EKEY : SInst<"vsm4ekey", "...", "QUi">;
}
@@ -1194,7 +1193,7 @@ def FCVTAS_S32 : SInst<"vcvta_s32", "S.", "fQf">;
def FCVTAU_S32 : SInst<"vcvta_u32", "U.", "fQf">;
}
-let ArchGuard = "__ARM_ARCH >= 8 && defined(__aarch64__)" in {
+let ArchGuard = "defined(__aarch64__)" in {
def FCVTNS_S64 : SInst<"vcvtn_s64", "S.", "dQd">;
def FCVTNU_S64 : SInst<"vcvtn_u64", "U.", "dQd">;
def FCVTPS_S64 : SInst<"vcvtp_s64", "S.", "dQd">;
@@ -1218,7 +1217,7 @@ def FRINTZ_S32 : SInst<"vrnd", "..", "fQf">;
def FRINTI_S32 : SInst<"vrndi", "..", "fQf">;
}
-let ArchGuard = "__ARM_ARCH >= 8 && defined(__aarch64__) && defined(__ARM_FEATURE_DIRECTED_ROUNDING)" in {
+let ArchGuard = "defined(__aarch64__) && defined(__ARM_FEATURE_DIRECTED_ROUNDING)" in {
def FRINTN_S64 : SInst<"vrndn", "..", "dQd">;
def FRINTA_S64 : SInst<"vrnda", "..", "dQd">;
def FRINTP_S64 : SInst<"vrndp", "..", "dQd">;
@@ -1228,11 +1227,16 @@ def FRINTZ_S64 : SInst<"vrnd", "..", "dQd">;
def FRINTI_S64 : SInst<"vrndi", "..", "dQd">;
}
-let ArchGuard = "__ARM_ARCH >= 8 && defined(__aarch64__) && defined(__ARM_FEATURE_FRINT)" in {
+let ArchGuard = "defined(__aarch64__)", TargetGuard = "v8.5a" in {
def FRINT32X_S32 : SInst<"vrnd32x", "..", "fQf">;
def FRINT32Z_S32 : SInst<"vrnd32z", "..", "fQf">;
def FRINT64X_S32 : SInst<"vrnd64x", "..", "fQf">;
def FRINT64Z_S32 : SInst<"vrnd64z", "..", "fQf">;
+
+def FRINT32X_S64 : SInst<"vrnd32x", "..", "dQd">;
+def FRINT32Z_S64 : SInst<"vrnd32z", "..", "dQd">;
+def FRINT64X_S64 : SInst<"vrnd64x", "..", "dQd">;
+def FRINT64Z_S64 : SInst<"vrnd64z", "..", "dQd">;
}
////////////////////////////////////////////////////////////////////////////////
@@ -1243,7 +1247,7 @@ def FMAXNM_S32 : SInst<"vmaxnm", "...", "fQf">;
def FMINNM_S32 : SInst<"vminnm", "...", "fQf">;
}
-let ArchGuard = "__ARM_ARCH >= 8 && defined(__aarch64__) && defined(__ARM_FEATURE_NUMERIC_MAXMIN)" in {
+let ArchGuard = "defined(__aarch64__) && defined(__ARM_FEATURE_NUMERIC_MAXMIN)" in {
def FMAXNM_S64 : SInst<"vmaxnm", "...", "dQd">;
def FMINNM_S64 : SInst<"vminnm", "...", "dQd">;
}
@@ -1285,7 +1289,7 @@ def VQTBX4_A64 : WInst<"vqtbx4", "..(4Q)U", "UccPcQUcQcQPc">;
// itself during generation so, unlike all other intrinsics, this one should
// include *all* types, not just additional ones.
def VVREINTERPRET : REINTERPRET_CROSS_SELF<"csilUcUsUiUlhfdPcPsPlQcQsQiQlQUcQUsQUiQUlQhQfQdQPcQPsQPlQPk"> {
- let ArchGuard = "__ARM_ARCH >= 8 && defined(__aarch64__)";
+ let ArchGuard = "defined(__aarch64__)";
let BigEndianSafe = 1;
}
@@ -1397,15 +1401,15 @@ def SCALAR_SQDMULH : SInst<"vqdmulh", "111", "SsSi">;
// Scalar Integer Saturating Rounding Doubling Multiply Half High
def SCALAR_SQRDMULH : SInst<"vqrdmulh", "111", "SsSi">;
-let ArchGuard = "defined(__ARM_FEATURE_QRDMX) && defined(__aarch64__)" in {
+let ArchGuard = "defined(__aarch64__)", TargetGuard = "v8.1a" in {
////////////////////////////////////////////////////////////////////////////////
// Signed Saturating Rounding Doubling Multiply Accumulate Returning High Half
-def SCALAR_SQRDMLAH : SOpInst<"vqrdmlah", "1111", "SsSi", OP_QRDMLAH>;
+def SCALAR_SQRDMLAH : SInst<"vqrdmlah", "1111", "SsSi">;
////////////////////////////////////////////////////////////////////////////////
// Signed Saturating Rounding Doubling Multiply Subtract Returning High Half
-def SCALAR_SQRDMLSH : SOpInst<"vqrdmlsh", "1111", "SsSi", OP_QRDMLSH>;
-}
+def SCALAR_SQRDMLSH : SInst<"vqrdmlsh", "1111", "SsSi">;
+} // ArchGuard = "defined(__aarch64__)", TargetGuard = "v8.1a"
////////////////////////////////////////////////////////////////////////////////
// Scalar Floating-point Multiply Extended
@@ -1628,7 +1632,7 @@ def SCALAR_SQRDMULH_LANEQ : SOpInst<"vqrdmulh_laneq", "11QI", "SsSi", OP_SCALAR_
let isLaneQ = 1;
}
-let ArchGuard = "defined(__ARM_FEATURE_QRDMX) && defined(__aarch64__)" in {
+let TargetGuard = "v8.1a" in {
// Signed Saturating Rounding Doubling Multiply Accumulate Returning High Half
def SCALAR_SQRDMLAH_LANE : SOpInst<"vqrdmlah_lane", "111.I", "SsSi", OP_SCALAR_QRDMLAH_LN>;
def SCALAR_SQRDMLAH_LANEQ : SOpInst<"vqrdmlah_laneq", "111QI", "SsSi", OP_SCALAR_QRDMLAH_LN> {
@@ -1640,16 +1644,17 @@ def SCALAR_SQRDMLSH_LANE : SOpInst<"vqrdmlsh_lane", "111.I", "SsSi", OP_SCALAR_Q
def SCALAR_SQRDMLSH_LANEQ : SOpInst<"vqrdmlsh_laneq", "111QI", "SsSi", OP_SCALAR_QRDMLSH_LN> {
let isLaneQ = 1;
}
-}
+} // TargetGuard = "v8.1a"
def SCALAR_VDUP_LANE : IInst<"vdup_lane", "1.I", "ScSsSiSlSfSdSUcSUsSUiSUlSPcSPs">;
def SCALAR_VDUP_LANEQ : IInst<"vdup_laneq", "1QI", "ScSsSiSlSfSdSUcSUsSUiSUlSPcSPs"> {
let isLaneQ = 1;
}
-}
+
+} // ArchGuard = "defined(__aarch64__)"
// ARMv8.2-A FP16 vector intrinsics for A32/A64.
-let ArchGuard = "defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)" in {
+let TargetGuard = "fullfp16" in {
// ARMv8.2-A FP16 one-operand vector intrinsics.
@@ -1674,7 +1679,7 @@ let ArchGuard = "defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)" in {
def VCVTP_U16 : SInst<"vcvtp_u16", "U.", "hQh">;
// Vector rounding
- let ArchGuard = "__ARM_ARCH >= 8 && defined(__ARM_FEATURE_DIRECTED_ROUNDING) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)" in {
+ let ArchGuard = "__ARM_ARCH >= 8 && defined(__ARM_FEATURE_DIRECTED_ROUNDING)", TargetGuard = "fullfp16" in {
def FRINTZH : SInst<"vrnd", "..", "hQh">;
def FRINTNH : SInst<"vrndn", "..", "hQh">;
def FRINTAH : SInst<"vrnda", "..", "hQh">;
@@ -1723,7 +1728,7 @@ let ArchGuard = "defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)" in {
// Max/Min
def VMAXH : SInst<"vmax", "...", "hQh">;
def VMINH : SInst<"vmin", "...", "hQh">;
- let ArchGuard = "__ARM_ARCH >= 8 && defined(__ARM_FEATURE_NUMERIC_MAXMIN) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)" in {
+ let ArchGuard = "__ARM_ARCH >= 8 && defined(__ARM_FEATURE_NUMERIC_MAXMIN)", TargetGuard = "fullfp16" in {
def FMAXNMH : SInst<"vmaxnm", "...", "hQh">;
def FMINNMH : SInst<"vminnm", "...", "hQh">;
}
@@ -1765,15 +1770,6 @@ let ArchGuard = "defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)" in {
def VUZPH : WInst<"vuzp", "2..", "hQh">;
def VTRNH : WInst<"vtrn", "2..", "hQh">;
-
- let ArchGuard = "!defined(__aarch64__)" in {
- // Set all lanes to same value.
- // Already implemented prior to ARMv8.2-A.
- def VMOV_NH : WOpInst<"vmov_n", ".1", "hQh", OP_DUP>;
- def VDUP_NH : WOpInst<"vdup_n", ".1", "hQh", OP_DUP>;
- def VDUP_LANE1H : WOpInst<"vdup_lane", ".qI", "hQh", OP_DUP_LN>;
- }
-
// Vector Extract
def VEXTH : WInst<"vext", "...I", "hQh">;
@@ -1782,7 +1778,7 @@ let ArchGuard = "defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)" in {
}
// ARMv8.2-A FP16 vector intrinsics for A64 only.
-let ArchGuard = "defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(__aarch64__)" in {
+let ArchGuard = "defined(__aarch64__)", TargetGuard = "fullfp16" in {
// Vector rounding
def FRINTIH : SInst<"vrndi", "..", "hQh">;
@@ -1877,11 +1873,11 @@ let ArchGuard = "defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(__aarc
}
// v8.2-A dot product instructions.
-let ArchGuard = "defined(__ARM_FEATURE_DOTPROD)" in {
+let TargetGuard = "dotprod" in {
def DOT : SInst<"vdot", "..(<<)(<<)", "iQiUiQUi">;
def DOT_LANE : SOpInst<"vdot_lane", "..(<<)(<<q)I", "iUiQiQUi", OP_DOT_LN>;
}
-let ArchGuard = "defined(__ARM_FEATURE_DOTPROD) && defined(__aarch64__)" in {
+let ArchGuard = "defined(__aarch64__)", TargetGuard = "dotprod" in {
// Variants indexing into a 128-bit vector are A64 only.
def UDOT_LANEQ : SOpInst<"vdot_laneq", "..(<<)(<<Q)I", "iUiQiQUi", OP_DOT_LNQ> {
let isLaneQ = 1;
@@ -1889,7 +1885,7 @@ let ArchGuard = "defined(__ARM_FEATURE_DOTPROD) && defined(__aarch64__)" in {
}
// v8.2-A FP16 fused multiply-add long instructions.
-let ArchGuard = "defined(__ARM_FEATURE_FP16_FML) && defined(__aarch64__)" in {
+let ArchGuard = "defined(__aarch64__)", TargetGuard = "fp16fml" in {
def VFMLAL_LOW : SInst<"vfmlal_low", ">>..", "hQh">;
def VFMLSL_LOW : SInst<"vfmlsl_low", ">>..", "hQh">;
def VFMLAL_HIGH : SInst<"vfmlal_high", ">>..", "hQh">;
@@ -1914,7 +1910,7 @@ let ArchGuard = "defined(__ARM_FEATURE_FP16_FML) && defined(__aarch64__)" in {
}
}
-let ArchGuard = "defined(__ARM_FEATURE_MATMUL_INT8)" in {
+let TargetGuard = "i8mm" in {
def VMMLA : SInst<"vmmla", "..(<<)(<<)", "QUiQi">;
def VUSMMLA : SInst<"vusmmla", "..(<<U)(<<)", "Qi">;
@@ -1931,7 +1927,7 @@ let ArchGuard = "defined(__ARM_FEATURE_MATMUL_INT8)" in {
}
}
-let ArchGuard = "defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC)" in {
+let TargetGuard = "bf16" in {
def VDOT_BF : SInst<"vbfdot", "..BB", "fQf">;
def VDOT_LANE_BF : SOpInst<"vbfdot_lane", "..B(Bq)I", "fQf", OP_BFDOT_LN>;
def VDOT_LANEQ_BF : SOpInst<"vbfdot_laneq", "..B(BQ)I", "fQf", OP_BFDOT_LNQ> {
@@ -1975,7 +1971,7 @@ multiclass VCMLA_ROTS<string type, string lanety, string laneqty> {
}
// v8.3-A Vector complex addition intrinsics
-let ArchGuard = "defined(__ARM_FEATURE_COMPLEX) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)" in {
+let TargetGuard = "v8.3a,fullfp16" in {
def VCADD_ROT90_FP16 : SInst<"vcadd_rot90", "...", "h">;
def VCADD_ROT270_FP16 : SInst<"vcadd_rot270", "...", "h">;
def VCADDQ_ROT90_FP16 : SInst<"vcaddq_rot90", "QQQ", "h">;
@@ -1983,7 +1979,7 @@ let ArchGuard = "defined(__ARM_FEATURE_COMPLEX) && defined(__ARM_FEATURE_FP16_VE
defm VCMLA_FP16 : VCMLA_ROTS<"h", "uint32x2_t", "uint32x4_t">;
}
-let ArchGuard = "defined(__ARM_FEATURE_COMPLEX)" in {
+let TargetGuard = "v8.3a" in {
def VCADD_ROT90 : SInst<"vcadd_rot90", "...", "f">;
def VCADD_ROT270 : SInst<"vcadd_rot270", "...", "f">;
def VCADDQ_ROT90 : SInst<"vcaddq_rot90", "QQQ", "f">;
@@ -1991,7 +1987,7 @@ let ArchGuard = "defined(__ARM_FEATURE_COMPLEX)" in {
defm VCMLA_F32 : VCMLA_ROTS<"f", "uint64x1_t", "uint64x2_t">;
}
-let ArchGuard = "defined(__ARM_FEATURE_COMPLEX) && defined(__aarch64__)" in {
+let ArchGuard = "defined(__aarch64__)", TargetGuard = "v8.3a" in {
def VCADDQ_ROT90_FP64 : SInst<"vcaddq_rot90", "QQQ", "d">;
def VCADDQ_ROT270_FP64 : SInst<"vcaddq_rot270", "QQQ", "d">;
@@ -1999,7 +1995,7 @@ let ArchGuard = "defined(__ARM_FEATURE_COMPLEX) && defined(__aarch64__)" in {
}
// V8.2-A BFloat intrinsics
-let ArchGuard = "defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC)" in {
+let TargetGuard = "bf16" in {
def VCREATE_BF : NoTestOpInst<"vcreate", ".(IU>)", "b", OP_CAST> {
let BigEndianSafe = 1;
}
@@ -2063,14 +2059,14 @@ let ArchGuard = "defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC)" in {
def SCALAR_CVT_F32_BF16 : SOpInst<"vcvtah_f32", "(1F>)(1!)", "b", OP_CVT_F32_BF16>;
}
-let ArchGuard = "defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) && !defined(__aarch64__)" in {
+let ArchGuard = "!defined(__aarch64__)", TargetGuard = "bf16" in {
def VCVT_BF16_F32_A32_INTERNAL : WInst<"__a32_vcvt_bf16", "BQ", "f">;
def VCVT_BF16_F32_A32 : SOpInst<"vcvt_bf16", "BQ", "f", OP_VCVT_BF16_F32_A32>;
def VCVT_LOW_BF16_F32_A32 : SOpInst<"vcvt_low_bf16", "BQ", "Qf", OP_VCVT_BF16_F32_LO_A32>;
def VCVT_HIGH_BF16_F32_A32 : SOpInst<"vcvt_high_bf16", "BBQ", "Qf", OP_VCVT_BF16_F32_HI_A32>;
}
-let ArchGuard = "defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) && defined(__aarch64__)" in {
+let ArchGuard = "defined(__aarch64__)", TargetGuard = "bf16" in {
def VCVT_LOW_BF16_F32_A64_INTERNAL : WInst<"__a64_vcvtq_low_bf16", "BQ", "Hf">;
def VCVT_LOW_BF16_F32_A64 : SOpInst<"vcvt_low_bf16", "BQ", "Qf", OP_VCVT_BF16_F32_LO_A64>;
def VCVT_HIGH_BF16_F32_A64 : SInst<"vcvt_high_bf16", "BBQ", "Qf">;
@@ -2082,16 +2078,22 @@ let ArchGuard = "defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) && defined(__aarc
def COPYQ_LANEQ_BF16 : IOpInst<"vcopy_laneq", "..I.I", "Qb", OP_COPY_LN>;
}
-let ArchGuard = "defined(__ARM_FEATURE_BF16) && !defined(__aarch64__)" in {
+let ArchGuard = "!defined(__aarch64__)", TargetGuard = "bf16" in {
let BigEndianSafe = 1 in {
defm VREINTERPRET_BF : REINTERPRET_CROSS_TYPES<
"csilUcUsUiUlhfPcPsPlQcQsQiQlQUcQUsQUiQUlQhQfQPcQPsQPl", "bQb">;
}
}
-let ArchGuard = "defined(__ARM_FEATURE_BF16) && defined(__aarch64__)" in {
+let ArchGuard = "defined(__aarch64__)", TargetGuard = "bf16" in {
let BigEndianSafe = 1 in {
defm VVREINTERPRET_BF : REINTERPRET_CROSS_TYPES<
"csilUcUsUiUlhfdPcPsPlQcQsQiQlQUcQUsQUiQUlQhQfQdQPcQPsQPlQPk", "bQb">;
}
}
+
+// v8.9a/v9.4a LRCPC3 intrinsics
+let ArchGuard = "defined(__aarch64__)", TargetGuard = "rcpc3" in {
+ def VLDAP1_LANE : WInst<"vldap1_lane", ".(c*!).I", "QUlQlUlldQdPlQPl">;
+ def VSTL1_LANE : WInst<"vstl1_lane", "v*(.!)I", "QUlQlUlldQdPlQPl">;
+}
diff --git a/contrib/llvm-project/clang/include/clang/Basic/arm_neon_incl.td b/contrib/llvm-project/clang/include/clang/Basic/arm_neon_incl.td
index 60dbea627d58..4f969ac1c78a 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/arm_neon_incl.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/arm_neon_incl.td
@@ -265,6 +265,7 @@ class Inst <string n, string p, string t, Operation o> {
string Prototype = p;
string Types = t;
string ArchGuard = "";
+ string TargetGuard = "";
Operation Operation = o;
bit BigEndianSafe = 0;
diff --git a/contrib/llvm-project/clang/include/clang/Basic/arm_sme.td b/contrib/llvm-project/clang/include/clang/Basic/arm_sme.td
new file mode 100644
index 000000000000..2da0e8d2aba9
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Basic/arm_sme.td
@@ -0,0 +1,676 @@
+//===--- arm_sme.td - ARM SME compiler interface ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the TableGen definitions from which the ARM SME header
+// file will be generated. See:
+//
+// https://developer.arm.com/architectures/system-architectures/software-standards/acle
+//
+//===----------------------------------------------------------------------===//
+
+include "arm_sve_sme_incl.td"
+
+////////////////////////////////////////////////////////////////////////////////
+// Loads
+
+multiclass ZALoad<string n_suffix, string t, string i_prefix, list<ImmCheck> ch> {
+ let TargetGuard = "sme" in {
+ def NAME # _H : MInst<"svld1_hor_" # n_suffix, "vimPQ", t,
+ [IsLoad, IsOverloadNone, IsStreaming, IsInOutZA],
+ MemEltTyDefault, i_prefix # "_horiz", ch>;
+
+ def NAME # _H_VNUM : MInst<"svld1_hor_vnum_" # n_suffix, "vimPQl", t,
+ [IsLoad, IsOverloadNone, IsStreaming, IsInOutZA],
+ MemEltTyDefault, i_prefix # "_horiz", ch>;
+
+ def NAME # _V : MInst<"svld1_ver_" # n_suffix, "vimPQ", t,
+ [IsLoad, IsOverloadNone, IsStreaming, IsInOutZA],
+ MemEltTyDefault, i_prefix # "_vert", ch>;
+
+ def NAME # _V_VNUM : MInst<"svld1_ver_vnum_" # n_suffix, "vimPQl", t,
+ [IsLoad, IsOverloadNone, IsStreaming, IsInOutZA],
+ MemEltTyDefault, i_prefix # "_vert", ch>;
+ }
+}
+
+defm SVLD1_ZA8 : ZALoad<"za8", "c", "aarch64_sme_ld1b", [ImmCheck<0, ImmCheck0_0>]>;
+defm SVLD1_ZA16 : ZALoad<"za16", "s", "aarch64_sme_ld1h", [ImmCheck<0, ImmCheck0_1>]>;
+defm SVLD1_ZA32 : ZALoad<"za32", "i", "aarch64_sme_ld1w", [ImmCheck<0, ImmCheck0_3>]>;
+defm SVLD1_ZA64 : ZALoad<"za64", "l", "aarch64_sme_ld1d", [ImmCheck<0, ImmCheck0_7>]>;
+defm SVLD1_ZA128 : ZALoad<"za128", "q", "aarch64_sme_ld1q", [ImmCheck<0, ImmCheck0_15>]>;
+
+let TargetGuard = "sme" in {
+def SVLDR_VNUM_ZA : MInst<"svldr_vnum_za", "vmQl", "",
+ [IsOverloadNone, IsStreamingCompatible, IsInOutZA],
+ MemEltTyDefault, "aarch64_sme_ldr">;
+
+def SVLDR_ZA : MInst<"svldr_za", "vmQ", "",
+ [IsOverloadNone, IsStreamingCompatible, IsInOutZA],
+ MemEltTyDefault, "aarch64_sme_ldr", []>;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Stores
+
+multiclass ZAStore<string n_suffix, string t, string i_prefix, list<ImmCheck> ch> {
+ let TargetGuard = "sme" in {
+ def NAME # _H : MInst<"svst1_hor_" # n_suffix, "vimP%", t,
+ [IsStore, IsOverloadNone, IsStreaming, IsInZA],
+ MemEltTyDefault, i_prefix # "_horiz", ch>;
+
+ def NAME # _H_VNUM : MInst<"svst1_hor_vnum_" # n_suffix, "vimP%l", t,
+ [IsStore, IsOverloadNone, IsStreaming, IsInZA],
+ MemEltTyDefault, i_prefix # "_horiz", ch>;
+
+ def NAME # _V : MInst<"svst1_ver_" # n_suffix, "vimP%", t,
+ [IsStore, IsOverloadNone, IsStreaming, IsInZA],
+ MemEltTyDefault, i_prefix # "_vert", ch>;
+
+ def NAME # _V_VNUM : MInst<"svst1_ver_vnum_" # n_suffix, "vimP%l", t,
+ [IsStore, IsOverloadNone, IsStreaming, IsInZA],
+ MemEltTyDefault, i_prefix # "_vert", ch>;
+ }
+}
+
+defm SVST1_ZA8 : ZAStore<"za8", "c", "aarch64_sme_st1b", [ImmCheck<0, ImmCheck0_0>]>;
+defm SVST1_ZA16 : ZAStore<"za16", "s", "aarch64_sme_st1h", [ImmCheck<0, ImmCheck0_1>]>;
+defm SVST1_ZA32 : ZAStore<"za32", "i", "aarch64_sme_st1w", [ImmCheck<0, ImmCheck0_3>]>;
+defm SVST1_ZA64 : ZAStore<"za64", "l", "aarch64_sme_st1d", [ImmCheck<0, ImmCheck0_7>]>;
+defm SVST1_ZA128 : ZAStore<"za128", "q", "aarch64_sme_st1q", [ImmCheck<0, ImmCheck0_15>]>;
+
+let TargetGuard = "sme" in {
+def SVSTR_VNUM_ZA : MInst<"svstr_vnum_za", "vm%l", "",
+ [IsOverloadNone, IsStreamingCompatible, IsInZA],
+ MemEltTyDefault, "aarch64_sme_str">;
+
+def SVSTR_ZA : MInst<"svstr_za", "vm%", "",
+ [IsOverloadNone, IsStreamingCompatible, IsInZA],
+ MemEltTyDefault, "aarch64_sme_str", []>;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Read horizontal/vertical ZA slices
+
+multiclass ZARead<string n_suffix, string t, string i_prefix, list<ImmCheck> ch> {
+ let TargetGuard = "sme" in {
+ def NAME # _H : SInst<"svread_hor_" # n_suffix # "[_{d}]", "ddPim", t,
+ MergeOp1, i_prefix # "_horiz",
+ [IsReadZA, IsStreaming, IsInZA], ch>;
+
+ def NAME # _V : SInst<"svread_ver_" # n_suffix # "[_{d}]", "ddPim", t,
+ MergeOp1, i_prefix # "_vert",
+ [IsReadZA, IsStreaming, IsInZA], ch>;
+ }
+}
+
+defm SVREAD_ZA8 : ZARead<"za8", "cUc", "aarch64_sme_read", [ImmCheck<2, ImmCheck0_0>]>;
+defm SVREAD_ZA16 : ZARead<"za16", "sUshb", "aarch64_sme_read", [ImmCheck<2, ImmCheck0_1>]>;
+defm SVREAD_ZA32 : ZARead<"za32", "iUif", "aarch64_sme_read", [ImmCheck<2, ImmCheck0_3>]>;
+defm SVREAD_ZA64 : ZARead<"za64", "lUld", "aarch64_sme_read", [ImmCheck<2, ImmCheck0_7>]>;
+defm SVREAD_ZA128 : ZARead<"za128", "csilUcUsUiUlhbfd", "aarch64_sme_readq", [ImmCheck<2, ImmCheck0_15>]>;
+
+////////////////////////////////////////////////////////////////////////////////
+// Write horizontal/vertical ZA slices
+
+multiclass ZAWrite<string n_suffix, string t, string i_prefix, list<ImmCheck> ch> {
+ let TargetGuard = "sme" in {
+ def NAME # _H : SInst<"svwrite_hor_" # n_suffix # "[_{d}]", "vimPd", t,
+ MergeOp1, i_prefix # "_horiz",
+ [IsWriteZA, IsStreaming, IsInOutZA], ch>;
+
+ def NAME # _V : SInst<"svwrite_ver_" # n_suffix # "[_{d}]", "vimPd", t,
+ MergeOp1, i_prefix # "_vert",
+ [IsWriteZA, IsStreaming, IsInOutZA], ch>;
+ }
+}
+
+defm SVWRITE_ZA8 : ZAWrite<"za8", "cUc", "aarch64_sme_write", [ImmCheck<0, ImmCheck0_0>]>;
+defm SVWRITE_ZA16 : ZAWrite<"za16", "sUshb", "aarch64_sme_write", [ImmCheck<0, ImmCheck0_1>]>;
+defm SVWRITE_ZA32 : ZAWrite<"za32", "iUif", "aarch64_sme_write", [ImmCheck<0, ImmCheck0_3>]>;
+defm SVWRITE_ZA64 : ZAWrite<"za64", "lUld", "aarch64_sme_write", [ImmCheck<0, ImmCheck0_7>]>;
+defm SVWRITE_ZA128 : ZAWrite<"za128", "csilUcUsUiUlhbfd", "aarch64_sme_writeq", [ImmCheck<0, ImmCheck0_15>]>;
+
+////////////////////////////////////////////////////////////////////////////////
+// SME - Zero
+
+let TargetGuard = "sme" in {
+ def SVZERO_MASK_ZA : SInst<"svzero_mask_za", "vi", "", MergeNone, "aarch64_sme_zero",
+ [IsOverloadNone, IsStreamingCompatible, IsInOutZA],
+ [ImmCheck<0, ImmCheck0_255>]>;
+ def SVZERO_ZA : SInst<"svzero_za", "v", "", MergeNone, "aarch64_sme_zero",
+ [IsOverloadNone, IsStreamingCompatible, IsOutZA]>;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// SME - Counting elements in a streaming vector
+
+multiclass ZACount<string n_suffix> {
+ let TargetGuard = "sme" in {
+ def NAME : SInst<"sv" # n_suffix, "nv", "", MergeNone,
+ "aarch64_sme_" # n_suffix,
+ [IsOverloadNone, IsStreamingCompatible]>;
+ }
+}
+
+defm SVCNTSB : ZACount<"cntsb">;
+defm SVCNTSH : ZACount<"cntsh">;
+defm SVCNTSW : ZACount<"cntsw">;
+defm SVCNTSD : ZACount<"cntsd">;
+
+////////////////////////////////////////////////////////////////////////////////
+// SME - ADDHA/ADDVA
+
+multiclass ZAAdd<string n_suffix> {
+ let TargetGuard = "sme" in {
+ def NAME # _ZA32: SInst<"sv" # n_suffix # "_za32[_{d}]", "viPPd", "iUi", MergeOp1,
+ "aarch64_sme_" # n_suffix, [IsStreaming, IsInOutZA],
+ [ImmCheck<0, ImmCheck0_3>]>;
+ }
+
+ let TargetGuard = "sme-i16i64" in {
+ def NAME # _ZA64: SInst<"sv" # n_suffix # "_za64[_{d}]", "viPPd", "lUl", MergeOp1,
+ "aarch64_sme_" # n_suffix, [IsStreaming, IsInOutZA],
+ [ImmCheck<0, ImmCheck0_7>]>;
+ }
+}
+
+defm SVADDHA : ZAAdd<"addha">;
+defm SVADDVA : ZAAdd<"addva">;
+
+////////////////////////////////////////////////////////////////////////////////
+// SME - SMOPA, SMOPS, UMOPA, UMOPS
+
+multiclass ZAIntOuterProd<string n_suffix1, string n_suffix2> {
+ let TargetGuard = "sme" in {
+ def NAME # _ZA32_B: SInst<"sv" # n_suffix2 # "_za32[_{d}]",
+ "viPPdd", !cond(!eq(n_suffix1, "s") : "", true: "U") # "c",
+ MergeOp1, "aarch64_sme_" # n_suffix1 # n_suffix2 # "_wide",
+ [IsStreaming, IsInOutZA],
+ [ImmCheck<0, ImmCheck0_3>]>;
+ }
+
+ let TargetGuard = "sme-i16i64" in {
+ def NAME # _ZA64_H: SInst<"sv" # n_suffix2 # "_za64[_{d}]",
+ "viPPdd", !cond(!eq(n_suffix1, "s") : "", true: "U") # "s",
+ MergeOp1, "aarch64_sme_" # n_suffix1 # n_suffix2 # "_wide",
+ [IsStreaming, IsInOutZA],
+ [ImmCheck<0, ImmCheck0_7>]>;
+ }
+}
+
+defm SVSMOPA : ZAIntOuterProd<"s", "mopa">;
+defm SVSMOPS : ZAIntOuterProd<"s", "mops">;
+defm SVUMOPA : ZAIntOuterProd<"u", "mopa">;
+defm SVUMOPS : ZAIntOuterProd<"u", "mops">;
+
+////////////////////////////////////////////////////////////////////////////////
+// SME - SUMOPA, SUMOPS, USMOPA, USMOPS
+
+multiclass ZAIntOuterProdMixedSigns<string n_suffix1, string n_suffix2> {
+ let TargetGuard = "sme" in {
+ def NAME # _ZA32_B: SInst<"sv" # n_suffix1 # n_suffix2 # "_za32[_{d}]",
+ "viPPd" # !cond(!eq(n_suffix1, "su") : "u", true: "x"),
+ !cond(!eq(n_suffix1, "su") : "", true: "U") # "c",
+ MergeOp1, "aarch64_sme_" # n_suffix1 # n_suffix2 # "_wide",
+ [IsStreaming, IsInOutZA],
+ [ImmCheck<0, ImmCheck0_3>]>;
+ }
+
+ let TargetGuard = "sme-i16i64" in {
+ def NAME # _ZA64_H: SInst<"sv" # n_suffix1 # n_suffix2 # "_za64[_{d}]",
+ "viPPd" # !cond(!eq(n_suffix1, "su") : "u", true: "x"),
+ !cond(!eq(n_suffix1, "su") : "", true: "U") # "s",
+ MergeOp1, "aarch64_sme_" # n_suffix1 # n_suffix2 # "_wide",
+ [IsStreaming, IsInOutZA],
+ [ImmCheck<0, ImmCheck0_7>]>;
+ }
+}
+
+defm SVSUMOPA : ZAIntOuterProdMixedSigns<"su", "mopa">;
+defm SVSUMOPS : ZAIntOuterProdMixedSigns<"su", "mops">;
+defm SVUSMOPA : ZAIntOuterProdMixedSigns<"us", "mopa">;
+defm SVUSMOPS : ZAIntOuterProdMixedSigns<"us", "mops">;
+
+////////////////////////////////////////////////////////////////////////////////
+// SME - FMOPA, FMOPS
+
+multiclass ZAFPOuterProd<string n_suffix> {
+ let TargetGuard = "sme" in {
+ def NAME # _ZA32_B: SInst<"sv" # n_suffix # "_za32[_{d}]", "viPPdd", "h",
+ MergeOp1, "aarch64_sme_" # n_suffix # "_wide",
+ [IsStreaming, IsInOutZA],
+ [ImmCheck<0, ImmCheck0_3>]>;
+
+ def NAME # _ZA32_H: SInst<"sv" # n_suffix # "_za32[_{d}]", "viPPdd", "b",
+ MergeOp1, "aarch64_sme_" # n_suffix # "_wide",
+ [IsStreaming, IsInOutZA],
+ [ImmCheck<0, ImmCheck0_3>]>;
+
+ def NAME # _ZA32_S: SInst<"sv" # n_suffix # "_za32[_{d}]", "viPPdd", "f",
+ MergeOp1, "aarch64_sme_" # n_suffix,
+ [IsStreaming, IsInOutZA],
+ [ImmCheck<0, ImmCheck0_3>]>;
+ }
+
+ let TargetGuard = "sme-f64f64" in {
+ def NAME # _ZA64_D: SInst<"sv" # n_suffix # "_za64[_{d}]", "viPPdd", "d",
+ MergeOp1, "aarch64_sme_" # n_suffix,
+ [IsStreaming, IsInOutZA],
+ [ImmCheck<0, ImmCheck0_7>]>;
+ }
+}
+
+defm SVMOPA : ZAFPOuterProd<"mopa">;
+defm SVMOPS : ZAFPOuterProd<"mops">;
+
+////////////////////////////////////////////////////////////////////////////////
+// SME2 - ADD, SUB
+
+multiclass ZAAddSub<string n_suffix> {
+ let TargetGuard = "sme2" in {
+ def NAME # _WRITE_SINGLE_ZA32_VG1X2_I32 : Inst<"sv" # n_suffix # "_write[_single]_za32[_{d}]_vg1x2", "vm2d", "iUi", MergeNone, "aarch64_sme_" # n_suffix # "_write_single_za_vg1x2", [IsStreaming, IsInOutZA], []>;
+ def NAME # _WRITE_SINGLE_ZA32_VG1X4_I32 : Inst<"sv" # n_suffix # "_write[_single]_za32[_{d}]_vg1x4", "vm4d", "iUi", MergeNone, "aarch64_sme_" # n_suffix # "_write_single_za_vg1x4", [IsStreaming, IsInOutZA], []>;
+
+ def NAME # _WRITE_ZA32_VG1X2_I32 : Inst<"sv" # n_suffix # "_write_za32[_{d}]_vg1x2", "vm22", "iUi", MergeNone, "aarch64_sme_" # n_suffix # "_write_za_vg1x2", [IsStreaming, IsInOutZA], []>;
+ def NAME # _WRITE_ZA32_VG1X4_I32 : Inst<"sv" # n_suffix # "_write_za32[_{d}]_vg1x4", "vm44", "iUi", MergeNone, "aarch64_sme_" # n_suffix # "_write_za_vg1x4", [IsStreaming, IsInOutZA], []>;
+
+ def NAME # _ZA32_VG1x2_I32 : Inst<"sv" # n_suffix # "_za32[_{d}]_vg1x2", "vm2", "iUif", MergeNone, "aarch64_sme_" # n_suffix # "_za32_vg1x2", [IsStreaming, IsInOutZA], []>;
+ def NAME # _ZA32_VG1X4_I32 : Inst<"sv" # n_suffix # "_za32[_{d}]_vg1x4", "vm4", "iUif", MergeNone, "aarch64_sme_" # n_suffix # "_za32_vg1x4", [IsStreaming, IsInOutZA], []>;
+ }
+
+ let TargetGuard = "sme2,sme-i16i64" in {
+ def NAME # _WRITE_SINGLE_ZA64_VG1X2_I64 : Inst<"sv" # n_suffix # "_write[_single]_za64[_{d}]_vg1x2", "vm2d", "lUl", MergeNone, "aarch64_sme_" # n_suffix # "_write_single_za_vg1x2", [IsStreaming, IsInOutZA], []>;
+ def NAME # _WRITE_SINGLE_ZA64_VG1X4_I64 : Inst<"sv" # n_suffix # "_write[_single]_za64[_{d}]_vg1x4", "vm4d", "lUl", MergeNone, "aarch64_sme_" # n_suffix # "_write_single_za_vg1x4", [IsStreaming, IsInOutZA], []>;
+
+ def NAME # _WRITE_ZA64_VG1x2_I64 : Inst<"sv" # n_suffix # "_write_za64[_{d}]_vg1x2", "vm22", "lUl", MergeNone, "aarch64_sme_" # n_suffix # "_write_za_vg1x2", [IsStreaming, IsInOutZA], []>;
+ def NAME # _WRITE_ZA64_VG1x4_I64 : Inst<"sv" # n_suffix # "_write_za64[_{d}]_vg1x4", "vm44", "lUl", MergeNone, "aarch64_sme_" # n_suffix # "_write_za_vg1x4", [IsStreaming, IsInOutZA], []>;
+
+ def NAME # _ZA64_VG1X2_I64 : Inst<"sv" # n_suffix # "_za64[_{d}]_vg1x2", "vm2", "lUl", MergeNone, "aarch64_sme_" # n_suffix # "_za64_vg1x2", [IsStreaming, IsInOutZA], []>;
+ def NAME # _ZA64_VG1X4_I64 : Inst<"sv" # n_suffix # "_za64[_{d}]_vg1x4", "vm4", "lUl", MergeNone, "aarch64_sme_" # n_suffix # "_za64_vg1x4", [IsStreaming, IsInOutZA], []>;
+ }
+
+ let TargetGuard = "sme2,sme-f64f64" in {
+ def NAME # _ZA64_VG1X2_F64 : Inst<"sv" # n_suffix # "_za64[_{d}]_vg1x2", "vm2", "d", MergeNone, "aarch64_sme_" # n_suffix # "_za64_vg1x2", [IsStreaming, IsInOutZA], []>;
+ def NAME # _ZA64_VG1X4_F64 : Inst<"sv" # n_suffix # "_za64[_{d}]_vg1x4", "vm4", "d", MergeNone, "aarch64_sme_" # n_suffix # "_za64_vg1x4", [IsStreaming, IsInOutZA], []>;
+ }
+}
+
+defm SVADD : ZAAddSub<"add">;
+defm SVSUB : ZAAddSub<"sub">;
+
+// SME2 - MOVA
+
+//
+// Single, 2 and 4 vector-group read/write intrinsics.
+//
+
+multiclass ZAWrite_VG<string n, string t, string i, list<ImmCheck> checks> {
+ def NAME # _VG2_H : Inst<"svwrite_hor_" # n # "[_{d}]_vg2", "vim2", t, MergeNone, i # "_hor_vg2", [IsInOutZA, IsStreaming], checks>;
+ def NAME # _VG2_V : Inst<"svwrite_ver_" # n # "[_{d}]_vg2", "vim2", t, MergeNone, i # "_ver_vg2", [IsInOutZA, IsStreaming], checks>;
+ def NAME # _VG4_H : Inst<"svwrite_hor_" # n # "[_{d}]_vg4", "vim4", t, MergeNone, i # "_hor_vg4", [IsInOutZA, IsStreaming], checks>;
+ def NAME # _VG4_V : Inst<"svwrite_ver_" # n # "[_{d}]_vg4", "vim4", t, MergeNone, i # "_ver_vg4", [IsInOutZA, IsStreaming], checks>;
+ def NAME # _VG1x2 : Inst<"svwrite_" # n # "[_{d}]_vg1x2", "vm2", t, MergeNone, i # "_vg1x2", [IsInOutZA, IsStreaming], []>;
+ def NAME # _VG1x4 : Inst<"svwrite_" # n # "[_{d}]_vg1x4", "vm4", t, MergeNone, i # "_vg1x4", [IsInOutZA, IsStreaming], []>;
+}
+
+let TargetGuard = "sme2" in {
+ defm SVWRITE_ZA8 : ZAWrite_VG<"za8", "cUc", "aarch64_sme_write", [ImmCheck<0, ImmCheck0_0>]>;
+ defm SVWRITE_ZA16 : ZAWrite_VG<"za16", "sUshb", "aarch64_sme_write", [ImmCheck<0, ImmCheck0_1>]>;
+ defm SVWRITE_ZA32 : ZAWrite_VG<"za32", "iUif", "aarch64_sme_write", [ImmCheck<0, ImmCheck0_3>]>;
+ defm SVWRITE_ZA64 : ZAWrite_VG<"za64", "lUld", "aarch64_sme_write", [ImmCheck<0, ImmCheck0_7>]>;
+}
+
+multiclass ZARead_VG<string n, string t, string i, list<ImmCheck> checks> {
+ def NAME # _VG2_H : Inst<"svread_hor_" # n # "_{d}_vg2", "2im", t, MergeNone, i # "_hor_vg2", [IsInZA, IsStreaming], checks>;
+ def NAME # _VG2_V : Inst<"svread_ver_" # n # "_{d}_vg2", "2im", t, MergeNone, i # "_ver_vg2", [IsInZA, IsStreaming], checks>;
+ def NAME # _VG4_H : Inst<"svread_hor_" # n # "_{d}_vg4", "4im", t, MergeNone, i # "_hor_vg4", [IsInZA, IsStreaming], checks>;
+ def NAME # _VG4_V : Inst<"svread_ver_" # n # "_{d}_vg4", "4im", t, MergeNone, i # "_ver_vg4", [IsInZA, IsStreaming], checks>;
+ def NAME # _VG1x2 : Inst<"svread_" # n # "_{d}_vg1x2", "2m", t, MergeNone, i # "_vg1x2", [IsInZA, IsStreaming], []>;
+ def NAME # _VG1x4 : Inst<"svread_" # n # "_{d}_vg1x4", "4m", t, MergeNone, i # "_vg1x4", [IsInZA, IsStreaming], []>;
+}
+
+let TargetGuard = "sme2" in {
+ defm SVREAD_ZA8 : ZARead_VG<"za8", "cUc", "aarch64_sme_read", [ImmCheck<0, ImmCheck0_0>]>;
+ defm SVREAD_ZA16 : ZARead_VG<"za16", "sUshb", "aarch64_sme_read", [ImmCheck<0, ImmCheck0_1>]>;
+ defm SVREAD_ZA32 : ZARead_VG<"za32", "iUif", "aarch64_sme_read", [ImmCheck<0, ImmCheck0_3>]>;
+ defm SVREAD_ZA64 : ZARead_VG<"za64", "lUld", "aarch64_sme_read", [ImmCheck<0, ImmCheck0_7>]>;
+}
+
+//
+// Outer product and accumulate/subtract
+//
+
+let TargetGuard = "sme2" in {
+ def SVSMOPA : Inst<"svmopa_za32[_{d}]_m", "viPPdd", "s", MergeNone, "aarch64_sme_smopa_za32", [IsInOutZA, IsStreaming], [ImmCheck<0, ImmCheck0_3>]>;
+ def SVUSMOPA : Inst<"svmopa_za32[_{d}]_m", "viPPdd", "Us", MergeNone, "aarch64_sme_umopa_za32", [IsInOutZA, IsStreaming], [ImmCheck<0, ImmCheck0_3>]>;
+
+ def SVSMOPS : Inst<"svmops_za32[_{d}]_m", "viPPdd", "s", MergeNone, "aarch64_sme_smops_za32", [IsInOutZA, IsStreaming], [ImmCheck<0, ImmCheck0_3>]>;
+ def SVUSMOPS : Inst<"svmops_za32[_{d}]_m", "viPPdd", "Us", MergeNone, "aarch64_sme_umops_za32", [IsInOutZA, IsStreaming], [ImmCheck<0, ImmCheck0_3>]>;
+
+ def SVBMOPA : Inst<"svbmopa_za32[_{d}]_m", "viPPdd", "iUi", MergeNone, "aarch64_sme_bmopa_za32", [IsInOutZA, IsStreaming], [ImmCheck<0, ImmCheck0_3>]>;
+
+ def SVBMOPS : Inst<"svbmops_za32[_{d}]_m", "viPPdd", "iUi", MergeNone, "aarch64_sme_bmops_za32", [IsInOutZA, IsStreaming], [ImmCheck<0, ImmCheck0_3>]>;
+
+ // VERTICAL DOT-PRODUCT
+ def SVVDOT_LANE_ZA32_VG1x2_S : Inst<"svvdot_lane_za32[_{d}]_vg1x2", "vm2di", "s", MergeNone, "aarch64_sme_svdot_lane_za32_vg1x2", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_3>]>;
+ def SVVDOT_LANE_ZA32_VG1x4_S : Inst<"svvdot_lane_za32[_{d}]_vg1x4", "vm4di", "c", MergeNone, "aarch64_sme_svdot_lane_za32_vg1x4", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_3>]>;
+ def SVVDOT_LANE_ZA32_VG1x2_U : Inst<"svvdot_lane_za32[_{d}]_vg1x2", "vm2di", "Us", MergeNone, "aarch64_sme_uvdot_lane_za32_vg1x2", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_3>]>;
+ def SVVDOT_LANE_ZA32_VG1x4_U : Inst<"svvdot_lane_za32[_{d}]_vg1x4", "vm4di", "Uc", MergeNone, "aarch64_sme_uvdot_lane_za32_vg1x4", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_3>]>;
+ def SVVDOT_LANE_ZA32_VG1x2_F : Inst<"svvdot_lane_za32[_{d}]_vg1x2", "vm2di", "hb", MergeNone, "aarch64_sme_fvdot_lane_za32_vg1x2", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_3>]>;
+ def SVSUVDOT_LANE_ZA32_VG1x4 : Inst<"svsuvdot_lane_za32[_{d}]_vg1x4", "vm4di", "c", MergeNone, "aarch64_sme_suvdot_lane_za32_vg1x4", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_3>]>;
+ def SVUSVDOT_LANE_ZA32_VG1x4 : Inst<"svusvdot_lane_za32[_{d}]_vg1x4", "vm4di", "Uc", MergeNone, "aarch64_sme_usvdot_lane_za32_vg1x4", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_3>]>;
+
+ // Multi-vector signed & unsigned integer dot-product
+ def SVDOT_MULTI_ZA32_VG1x2_S : Inst<"svdot_za32[_{d}]_vg1x2", "vm22", "cs", MergeNone, "aarch64_sme_sdot_za32_vg1x2", [IsStreaming, IsInOutZA], []>;
+ def SVDOT_MULTI_ZA32_VG1x4_S : Inst<"svdot_za32[_{d}]_vg1x4", "vm44", "cs", MergeNone, "aarch64_sme_sdot_za32_vg1x4", [IsStreaming, IsInOutZA], []>;
+ def SVDOT_MULTI_ZA32_VG1x2_U : Inst<"svdot_za32[_{d}]_vg1x2", "vm22", "UcUs", MergeNone, "aarch64_sme_udot_za32_vg1x2", [IsStreaming, IsInOutZA], []>;
+ def SVDOT_MULTI_ZA32_VG1x4_U : Inst<"svdot_za32[_{d}]_vg1x4", "vm44", "UcUs", MergeNone, "aarch64_sme_udot_za32_vg1x4", [IsStreaming, IsInOutZA], []>;
+ def SVDOT_SINGLE_ZA32_VG1x2_S : Inst<"svdot[_single]_za32[_{d}]_vg1x2", "vm2d", "cs", MergeNone, "aarch64_sme_sdot_single_za32_vg1x2", [IsStreaming, IsInOutZA], []>;
+ def SVDOT_SINGLE_ZA32_VG1x4_S : Inst<"svdot[_single]_za32[_{d}]_vg1x4", "vm4d", "cs", MergeNone, "aarch64_sme_sdot_single_za32_vg1x4", [IsStreaming, IsInOutZA], []>;
+ def SVDOT_SINGLE_ZA32_VG1x2_U : Inst<"svdot[_single]_za32[_{d}]_vg1x2", "vm2d", "UcUs", MergeNone, "aarch64_sme_udot_single_za32_vg1x2", [IsStreaming, IsInOutZA], []>;
+ def SVDOT_SINGLE_ZA32_VG1x4_U : Inst<"svdot[_single]_za32[_{d}]_vg1x4", "vm4d", "UcUs", MergeNone, "aarch64_sme_udot_single_za32_vg1x4", [IsStreaming, IsInOutZA], []>;
+ def SVDOT_LANE_ZA32_VG1x2_S : Inst<"svdot_lane_za32[_{d}]_vg1x2", "vm2di", "cs", MergeNone, "aarch64_sme_sdot_lane_za32_vg1x2", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_3>]>;
+ def SVDOT_LANE_ZA32_VG1x4_S : Inst<"svdot_lane_za32[_{d}]_vg1x4", "vm4di", "cs", MergeNone, "aarch64_sme_sdot_lane_za32_vg1x4", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_3>]>;
+ def SVDOT_LANE_ZA32_VG1x2_U : Inst<"svdot_lane_za32[_{d}]_vg1x2", "vm2di", "UcUs", MergeNone, "aarch64_sme_udot_lane_za32_vg1x2", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_3>]>;
+ def SVDOT_LANE_ZA32_VG1x4_U : Inst<"svdot_lane_za32[_{d}]_vg1x4", "vm4di", "UcUs", MergeNone, "aarch64_sme_udot_lane_za32_vg1x4", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_3>]>;
+
+ def SVUSDOT_SINGLE_ZA32_VG1x2 : Inst<"svusdot[_single]_za32[_{d}]_vg1x2", "vm2.dx", "Uc", MergeNone, "aarch64_sme_usdot_single_za32_vg1x2", [IsStreaming, IsInOutZA], []>;
+ def SVUSDOT_SINGLE_ZA32_VG1x4 : Inst<"svusdot[_single]_za32[_{d}]_vg1x4", "vm4.dx", "Uc", MergeNone, "aarch64_sme_usdot_single_za32_vg1x4", [IsStreaming, IsInOutZA], []>;
+ def SVUSDOT_MULTI_ZA32_VG1x2 : Inst<"svusdot_za32[_{d}]_vg1x2", "vm2.d2.x", "Uc", MergeNone, "aarch64_sme_usdot_za32_vg1x2", [IsStreaming, IsInOutZA], []>;
+ def SVUSDOT_MULTI_ZA32_VG1x4 : Inst<"svusdot_za32[_{d}]_vg1x4", "vm4.d4.x", "Uc", MergeNone, "aarch64_sme_usdot_za32_vg1x4", [IsStreaming, IsInOutZA], []>;
+ def SVUSDOT_LANE_ZA32_VG1x2 : Inst<"svusdot_lane_za32[_{d}]_vg1x2", "vm2.dxi", "Uc", MergeNone, "aarch64_sme_usdot_lane_za32_vg1x2", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_3>]>;
+ def SVUSDOT_LANE_ZA32_VG1x4 : Inst<"svusdot_lane_za32[_{d}]_vg1x4", "vm4.dxi", "Uc", MergeNone, "aarch64_sme_usdot_lane_za32_vg1x4", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_3>]>;
+
+ def SVSUDOT_SINGLE_ZA32_VG1x2 : Inst<"svsudot[_single]_za32[_{d}]_vg1x2", "vm2.du", "c", MergeNone, "aarch64_sme_sudot_single_za32_vg1x2", [IsStreaming, IsInOutZA], []>;
+ def SVSUDOT_SINGLE_ZA32_VG1x4 : Inst<"svsudot[_single]_za32[_{d}]_vg1x4", "vm4.du", "c", MergeNone, "aarch64_sme_sudot_single_za32_vg1x4", [IsStreaming, IsInOutZA], []>;
+
+ // Multi-multi sudot builtins are mapped to usdot, with zn & zm operands swapped
+ def SVSUDOT_MULTI_ZA32_VG1x2 : Inst<"svsudot_za32[_{d}]_vg1x2", "vm2.d2.u", "c", MergeNone, "aarch64_sme_usdot_za32_vg1x2", [IsStreaming, IsInOutZA], []>;
+ def SVSUDOT_MULTI_ZA32_VG1x4 : Inst<"svsudot_za32[_{d}]_vg1x4", "vm4.d4.u", "c", MergeNone, "aarch64_sme_usdot_za32_vg1x4", [IsStreaming, IsInOutZA], []>;
+
+ def SVSUDOT_LANE_ZA32_VG1x2 : Inst<"svsudot_lane_za32[_{d}]_vg1x2", "vm2.dui", "c", MergeNone, "aarch64_sme_sudot_lane_za32_vg1x2", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_3>]>;
+ def SVSUDOT_LANE_ZA32_VG1x4 : Inst<"svsudot_lane_za32[_{d}]_vg1x4", "vm4.dui", "c", MergeNone, "aarch64_sme_sudot_lane_za32_vg1x4", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_3>]>;
+
+ // Multi-vector half-precision/BFloat16 floating-point dot-product
+ def SVDOT_MULTI_ZA32_VG1x2_F16 : Inst<"svdot_za32[_{d}]_vg1x2", "vm22", "bh", MergeNone, "aarch64_sme_fdot_za32_vg1x2", [IsStreaming, IsInOutZA], []>;
+ def SVDOT_MULTI_ZA32_VG1x4_F16 : Inst<"svdot_za32[_{d}]_vg1x4", "vm44", "bh", MergeNone, "aarch64_sme_fdot_za32_vg1x4", [IsStreaming, IsInOutZA], []>;
+ def SVDOT_SINGLE_ZA32_VG1x2_F16 : Inst<"svdot[_single]_za32[_{d}]_vg1x2", "vm2d", "bh", MergeNone, "aarch64_sme_fdot_single_za32_vg1x2", [IsStreaming, IsInOutZA], []>;
+ def SVDOT_SINGLE_ZA32_VG1x4_F16 : Inst<"svdot[_single]_za32[_{d}]_vg1x4", "vm4d", "bh", MergeNone, "aarch64_sme_fdot_single_za32_vg1x4", [IsStreaming, IsInOutZA], []>;
+ def SVDOT_LANE_ZA32_VG1x2_F16 : Inst<"svdot_lane_za32[_{d}]_vg1x2", "vm2di", "bh", MergeNone, "aarch64_sme_fdot_lane_za32_vg1x2", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_3>]>;
+ def SVDOT_LANE_ZA32_VG1x4_F16 : Inst<"svdot_lane_za32[_{d}]_vg1x4", "vm4di", "bh", MergeNone, "aarch64_sme_fdot_lane_za32_vg1x4", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_3>]>;
+}
+
+let TargetGuard = "sme2,sme-i16i64" in {
+ def SVVDOT_LANE_ZA64_VG1x4_S : Inst<"svvdot_lane_za64[_{d}]_vg1x4", "vm4di", "s", MergeNone, "aarch64_sme_svdot_lane_za64_vg1x4", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_1>]>;
+ def SVVDOT_LANE_ZA64_VG1x4_U : Inst<"svvdot_lane_za64[_{d}]_vg1x4", "vm4di", "Us", MergeNone, "aarch64_sme_uvdot_lane_za64_vg1x4", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_1>]>;
+
+ def SVDOT_MULTI_ZA64_VG1x2_S16 : Inst<"svdot_za64[_{d}]_vg1x2", "vm22", "s", MergeNone, "aarch64_sme_sdot_za64_vg1x2", [IsStreaming, IsInOutZA], []>;
+ def SVDOT_MULTI_ZA64_VG1x4_S16 : Inst<"svdot_za64[_{d}]_vg1x4", "vm44", "s", MergeNone, "aarch64_sme_sdot_za64_vg1x4", [IsStreaming, IsInOutZA], []>;
+ def SVDOT_MULTI_ZA64_VG1x2_U16 : Inst<"svdot_za64[_{d}]_vg1x2", "vm22", "Us", MergeNone, "aarch64_sme_udot_za64_vg1x2", [IsStreaming, IsInOutZA], []>;
+ def SVDOT_MULTI_ZA64_VG1x4_U16 : Inst<"svdot_za64[_{d}]_vg1x4", "vm44", "Us", MergeNone, "aarch64_sme_udot_za64_vg1x4", [IsStreaming, IsInOutZA], []>;
+ def SVDOT_SINGLE_ZA64_VG1x2_S16 : Inst<"svdot[_single]_za64[_{d}]_vg1x2", "vm2d", "s", MergeNone, "aarch64_sme_sdot_single_za64_vg1x2", [IsStreaming, IsInOutZA], []>;
+ def SVDOT_SINGLE_ZA64_VG1x4_S16 : Inst<"svdot[_single]_za64[_{d}]_vg1x4", "vm4d", "s", MergeNone, "aarch64_sme_sdot_single_za64_vg1x4", [IsStreaming, IsInOutZA], []>;
+ def SVDOT_SINGLE_ZA64_VG1x2_U16 : Inst<"svdot[_single]_za64[_{d}]_vg1x2", "vm2d", "Us", MergeNone, "aarch64_sme_udot_single_za64_vg1x2", [IsStreaming, IsInOutZA], []>;
+ def SVDOT_SINGLE_ZA64_VG1x4_U16 : Inst<"svdot[_single]_za64[_{d}]_vg1x4", "vm4d", "Us", MergeNone, "aarch64_sme_udot_single_za64_vg1x4", [IsStreaming, IsInOutZA], []>;
+ def SVDOT_LANE_ZA64_VG1x2_S16 : Inst<"svdot_lane_za64[_{d}]_vg1x2", "vm2di", "s", MergeNone, "aarch64_sme_sdot_lane_za64_vg1x2", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_1>]>;
+ def SVDOT_LANE_ZA64_VG1x4_S16 : Inst<"svdot_lane_za64[_{d}]_vg1x4", "vm4di", "s", MergeNone, "aarch64_sme_sdot_lane_za64_vg1x4", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_1>]>;
+ def SVDOT_LANE_ZA64_VG1x2_U16 : Inst<"svdot_lane_za64[_{d}]_vg1x2", "vm2di", "Us", MergeNone, "aarch64_sme_udot_lane_za64_vg1x2", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_1>]>;
+ def SVDOT_LANE_ZA64_VG1x4_U16 : Inst<"svdot_lane_za64[_{d}]_vg1x4", "vm4di", "Us", MergeNone, "aarch64_sme_udot_lane_za64_vg1x4", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_1>]>;
+}
+
+// FMLA/FMLS
+let TargetGuard = "sme2" in {
+ def SVMLA_MULTI_VG1x2_F32 : Inst<"svmla_za32[_{d}]_vg1x2", "vm22", "f", MergeNone, "aarch64_sme_fmla_vg1x2", [IsStreaming, IsInOutZA], []>;
+ def SVMLA_MULTI_VG1x4_F32 : Inst<"svmla_za32[_{d}]_vg1x4", "vm44", "f", MergeNone, "aarch64_sme_fmla_vg1x4", [IsStreaming, IsInOutZA], []>;
+ def SVMLS_MULTI_VG1x2_F32 : Inst<"svmls_za32[_{d}]_vg1x2", "vm22", "f", MergeNone, "aarch64_sme_fmls_vg1x2", [IsStreaming, IsInOutZA], []>;
+ def SVMLS_MULTI_VG1x4_F32 : Inst<"svmls_za32[_{d}]_vg1x4", "vm44", "f", MergeNone, "aarch64_sme_fmls_vg1x4", [IsStreaming, IsInOutZA], []>;
+
+ def SVMLA_SINGLE_VG1x2_F32 : Inst<"svmla[_single]_za32[_{d}]_vg1x2", "vm2d", "f", MergeNone, "aarch64_sme_fmla_single_vg1x2", [IsStreaming, IsInOutZA], []>;
+ def SVMLA_SINGLE_VG1x4_F32 : Inst<"svmla[_single]_za32[_{d}]_vg1x4", "vm4d", "f", MergeNone, "aarch64_sme_fmla_single_vg1x4", [IsStreaming, IsInOutZA], []>;
+ def SVMLS_SINGLE_VG1x2_F32 : Inst<"svmls[_single]_za32[_{d}]_vg1x2", "vm2d", "f", MergeNone, "aarch64_sme_fmls_single_vg1x2", [IsStreaming, IsInOutZA], []>;
+ def SVMLS_SINGLE_VG1x4_F32 : Inst<"svmls[_single]_za32[_{d}]_vg1x4", "vm4d", "f", MergeNone, "aarch64_sme_fmls_single_vg1x4", [IsStreaming, IsInOutZA], []>;
+
+ def SVMLA_LANE_VG1x2_F32 : Inst<"svmla_lane_za32[_{d}]_vg1x2", "vm2di", "f", MergeNone, "aarch64_sme_fmla_lane_vg1x2", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_3>]>;
+ def SVMLA_LANE_VG1x4_F32 : Inst<"svmla_lane_za32[_{d}]_vg1x4", "vm4di", "f", MergeNone, "aarch64_sme_fmla_lane_vg1x4", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_3>]>;
+ def SVMLS_LANE_VG1x2_F32 : Inst<"svmls_lane_za32[_{d}]_vg1x2", "vm2di", "f", MergeNone, "aarch64_sme_fmls_lane_vg1x2", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_3>]>;
+ def SVMLS_LANE_VG1x4_F32 : Inst<"svmls_lane_za32[_{d}]_vg1x4", "vm4di", "f", MergeNone, "aarch64_sme_fmls_lane_vg1x4", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_3>]>;
+}
+
+let TargetGuard = "sme2,sme-f64f64" in {
+ def SVMLA_MULTI_VG1x2_F64 : Inst<"svmla_za64[_{d}]_vg1x2", "vm22", "d", MergeNone, "aarch64_sme_fmla_vg1x2", [IsStreaming, IsInOutZA], []>;
+ def SVMLA_MULTI_VG1x4_F64 : Inst<"svmla_za64[_{d}]_vg1x4", "vm44", "d", MergeNone, "aarch64_sme_fmla_vg1x4", [IsStreaming, IsInOutZA], []>;
+ def SVMLS_MULTI_VG1x2_F64 : Inst<"svmls_za64[_{d}]_vg1x2", "vm22", "d", MergeNone, "aarch64_sme_fmls_vg1x2", [IsStreaming, IsInOutZA], []>;
+ def SVMLS_MULTI_VG1x4_F64 : Inst<"svmls_za64[_{d}]_vg1x4", "vm44", "d", MergeNone, "aarch64_sme_fmls_vg1x4", [IsStreaming, IsInOutZA], []>;
+
+ def SVMLA_SINGLE_VG1x2_F64 : Inst<"svmla[_single]_za64[_{d}]_vg1x2", "vm2d", "d", MergeNone, "aarch64_sme_fmla_single_vg1x2", [IsStreaming, IsInOutZA], []>;
+ def SVMLA_SINGLE_VG1x4_F64 : Inst<"svmla[_single]_za64[_{d}]_vg1x4", "vm4d", "d", MergeNone, "aarch64_sme_fmla_single_vg1x4", [IsStreaming, IsInOutZA], []>;
+ def SVMLS_SINGLE_VG1x2_F64 : Inst<"svmls[_single]_za64[_{d}]_vg1x2", "vm2d", "d", MergeNone, "aarch64_sme_fmls_single_vg1x2", [IsStreaming, IsInOutZA], []>;
+ def SVMLS_SINGLE_VG1x4_F64 : Inst<"svmls[_single]_za64[_{d}]_vg1x4", "vm4d", "d", MergeNone, "aarch64_sme_fmls_single_vg1x4", [IsStreaming, IsInOutZA], []>;
+
+ def SVMLA_LANE_VG1x2_F64 : Inst<"svmla_lane_za64[_{d}]_vg1x2", "vm2di", "d", MergeNone, "aarch64_sme_fmla_lane_vg1x2", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_1>]>;
+ def SVMLA_LANE_VG1x4_F64 : Inst<"svmla_lane_za64[_{d}]_vg1x4", "vm4di", "d", MergeNone, "aarch64_sme_fmla_lane_vg1x4", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_1>]>;
+ def SVMLS_LANE_VG1x2_F64 : Inst<"svmls_lane_za64[_{d}]_vg1x2", "vm2di", "d", MergeNone, "aarch64_sme_fmls_lane_vg1x2", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_1>]>;
+ def SVMLS_LANE_VG1x4_F64 : Inst<"svmls_lane_za64[_{d}]_vg1x4", "vm4di", "d", MergeNone, "aarch64_sme_fmls_lane_vg1x4", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_1>]>;
+}
+
+// FMLAL/FMLSL/UMLAL/SMLAL
+// SMLALL/UMLALL/USMLALL/SUMLALL
+let TargetGuard = "sme2" in {
+ // MULTI MLAL
+ def SVMLAL_MULTI_VG2x2_F16 : Inst<"svmla_za32[_{d}]_vg2x2", "vm22", "bh", MergeNone, "aarch64_sme_fmlal_vg2x2", [IsStreaming, IsInOutZA], []>;
+ def SVMLAL_MULTI_VG2x4_F16 : Inst<"svmla_za32[_{d}]_vg2x4", "vm44", "bh", MergeNone, "aarch64_sme_fmlal_vg2x4", [IsStreaming, IsInOutZA], []>;
+ def SVMLAL_MULTI_VG2x2_S16 : Inst<"svmla_za32[_{d}]_vg2x2", "vm22", "s", MergeNone, "aarch64_sme_smlal_vg2x2", [IsStreaming, IsInOutZA], []>;
+ def SVMLAL_MULTI_VG2x4_S16 : Inst<"svmla_za32[_{d}]_vg2x4", "vm44", "s", MergeNone, "aarch64_sme_smlal_vg2x4", [IsStreaming, IsInOutZA], []>;
+ def SVMLAL_MULTI_VG2x2_U16 : Inst<"svmla_za32[_{d}]_vg2x2", "vm22", "Us", MergeNone, "aarch64_sme_umlal_vg2x2", [IsStreaming, IsInOutZA], []>;
+ def SVMLAL_MULTI_VG2x4_U16 : Inst<"svmla_za32[_{d}]_vg2x4", "vm44", "Us", MergeNone, "aarch64_sme_umlal_vg2x4", [IsStreaming, IsInOutZA], []>;
+
+ def SVMLAL_MULTI_VG4x2_S8 : Inst<"svmla_za32[_{d}]_vg4x2", "vm22", "c", MergeNone, "aarch64_sme_smla_za32_vg4x2", [IsStreaming, IsInOutZA], []>;
+ def SVMLAL_MULTI_VG4x2_U8 : Inst<"svmla_za32[_{d}]_vg4x2", "vm22", "Uc", MergeNone, "aarch64_sme_umla_za32_vg4x2", [IsStreaming, IsInOutZA], []>;
+ def SVMLAL_MULTI_VG4x4_S8 : Inst<"svmla_za32[_{d}]_vg4x4", "vm44", "c", MergeNone, "aarch64_sme_smla_za32_vg4x4", [IsStreaming, IsInOutZA], []>;
+ def SVMLAL_MULTI_VG4x4_U8 : Inst<"svmla_za32[_{d}]_vg4x4", "vm44", "Uc", MergeNone, "aarch64_sme_umla_za32_vg4x4", [IsStreaming, IsInOutZA], []>;
+
+ // MULTI MLSL
+ def SVMLSL_MULTI_VG2x2_F16 : Inst<"svmls_za32[_{d}]_vg2x2", "vm22", "bh", MergeNone, "aarch64_sme_fmlsl_vg2x2", [IsStreaming, IsInOutZA], []>;
+ def SVMLSL_MULTI_VG2x4_F16 : Inst<"svmls_za32[_{d}]_vg2x4", "vm44", "bh", MergeNone, "aarch64_sme_fmlsl_vg2x4", [IsStreaming, IsInOutZA], []>;
+ def SVMLSL_MULTI_VG2x2_S16 : Inst<"svmls_za32[_{d}]_vg2x2", "vm22", "s", MergeNone, "aarch64_sme_smlsl_vg2x2", [IsStreaming, IsInOutZA], []>;
+ def SVMLSL_MULTI_VG2x4_S16 : Inst<"svmls_za32[_{d}]_vg2x4", "vm44", "s", MergeNone, "aarch64_sme_smlsl_vg2x4", [IsStreaming, IsInOutZA], []>;
+ def SVMLSL_MULTI_VG2x2_U16 : Inst<"svmls_za32[_{d}]_vg2x2", "vm22", "Us", MergeNone, "aarch64_sme_umlsl_vg2x2", [IsStreaming, IsInOutZA], []>;
+ def SVMLSL_MULTI_VG2x4_U16 : Inst<"svmls_za32[_{d}]_vg2x4", "vm44", "Us", MergeNone, "aarch64_sme_umlsl_vg2x4", [IsStreaming, IsInOutZA], []>;
+
+ def SVMLSL_MULTI_VG4x2_S8 : Inst<"svmls_za32[_{d}]_vg4x2", "vm22", "c", MergeNone, "aarch64_sme_smls_za32_vg4x2", [IsStreaming, IsInOutZA], []>;
+ def SVMLSL_MULTI_VG4x2_U8 : Inst<"svmls_za32[_{d}]_vg4x2", "vm22", "Uc", MergeNone, "aarch64_sme_umls_za32_vg4x2", [IsStreaming, IsInOutZA], []>;
+ def SVMLSL_MULTI_VG4x4_S8 : Inst<"svmls_za32[_{d}]_vg4x4", "vm44", "c", MergeNone, "aarch64_sme_smls_za32_vg4x4", [IsStreaming, IsInOutZA], []>;
+ def SVMLSL_MULTI_VG4x4_U8 : Inst<"svmls_za32[_{d}]_vg4x4", "vm44", "Uc", MergeNone, "aarch64_sme_umls_za32_vg4x4", [IsStreaming, IsInOutZA], []>;
+
+ // SINGLE MLAL
+ def SVMLAL_SINGLE_VG2x1_F16 : Inst<"svmla_za32[_{d}]_vg2x1", "vmdd", "bh", MergeNone, "aarch64_sme_fmlal_single_vg2x1", [IsStreaming, IsInOutZA], []>;
+ def SVMLAL_SINGLE_VG2x2_F16 : Inst<"svmla[_single]_za32[_{d}]_vg2x2", "vm2d", "bh", MergeNone, "aarch64_sme_fmlal_single_vg2x2", [IsStreaming, IsInOutZA], []>;
+ def SVMLAL_SINGLE_VG2x4_F16 : Inst<"svmla[_single]_za32[_{d}]_vg2x4", "vm4d", "bh", MergeNone, "aarch64_sme_fmlal_single_vg2x4", [IsStreaming, IsInOutZA], []>;
+ def SVMLAL_SINGLE_VG2x1_S16 : Inst<"svmla_za32[_{d}]_vg2x1", "vmdd", "s", MergeNone, "aarch64_sme_smlal_single_vg2x1", [IsStreaming, IsInOutZA], []>;
+ def SVMLAL_SINGLE_VG2x2_S16 : Inst<"svmla[_single]_za32[_{d}]_vg2x2", "vm2d", "s", MergeNone, "aarch64_sme_smlal_single_vg2x2", [IsStreaming, IsInOutZA], []>;
+ def SVMLAL_SINGLE_VG2x4_S16 : Inst<"svmla[_single]_za32[_{d}]_vg2x4", "vm4d", "s", MergeNone, "aarch64_sme_smlal_single_vg2x4", [IsStreaming, IsInOutZA], []>;
+ def SVMLAL_SINGLE_VG2x1_U16 : Inst<"svmla_za32[_{d}]_vg2x1", "vmdd", "Us", MergeNone, "aarch64_sme_umlal_single_vg2x1", [IsStreaming, IsInOutZA], []>;
+ def SVMLAL_SINGLE_VG2x2_U16 : Inst<"svmla[_single]_za32[_{d}]_vg2x2", "vm2d", "Us", MergeNone, "aarch64_sme_umlal_single_vg2x2", [IsStreaming, IsInOutZA], []>;
+ def SVMLAL_SINGLE_VG2x4_U16 : Inst<"svmla[_single]_za32[_{d}]_vg2x4", "vm4d", "Us", MergeNone, "aarch64_sme_umlal_single_vg2x4", [IsStreaming, IsInOutZA], []>;
+
+ def SVMLAL_SINGLE_VG4x1_S8 : Inst<"svmla_za32[_{d}]_vg4x1", "vmdd", "c", MergeNone, "aarch64_sme_smla_za32_single_vg4x1", [IsStreaming, IsInOutZA], []>;
+ def SVMLAL_SINGLE_VG4x1_U8 : Inst<"svmla_za32[_{d}]_vg4x1", "vmdd", "Uc", MergeNone, "aarch64_sme_umla_za32_single_vg4x1", [IsStreaming, IsInOutZA], []>;
+ def SVMLAL_SINGLE_VG4x2_S8 : Inst<"svmla[_single]_za32[_{d}]_vg4x2", "vm2d", "c", MergeNone, "aarch64_sme_smla_za32_single_vg4x2", [IsStreaming, IsInOutZA], []>;
+ def SVMLAL_SINGLE_VG4x2_U8 : Inst<"svmla[_single]_za32[_{d}]_vg4x2", "vm2d", "Uc", MergeNone, "aarch64_sme_umla_za32_single_vg4x2", [IsStreaming, IsInOutZA], []>;
+ def SVMLAL_SINGLE_VG4x4_S8 : Inst<"svmla[_single]_za32[_{d}]_vg4x4", "vm4d", "c", MergeNone, "aarch64_sme_smla_za32_single_vg4x4", [IsStreaming, IsInOutZA], []>;
+ def SVMLAL_SINGLE_VG4x4_U8 : Inst<"svmla[_single]_za32[_{d}]_vg4x4", "vm4d", "Uc", MergeNone, "aarch64_sme_umla_za32_single_vg4x4", [IsStreaming, IsInOutZA], []>;
+
+ // SINGLE MLSL
+ def SVMLSL_SINGLE_VG2x1_F16 : Inst<"svmls_za32[_{d}]_vg2x1", "vmdd", "bh", MergeNone, "aarch64_sme_fmlsl_single_vg2x1", [IsStreaming, IsInOutZA], []>;
+ def SVMLSL_SINGLE_VG2x2_F16 : Inst<"svmls[_single]_za32[_{d}]_vg2x2", "vm2d", "bh", MergeNone, "aarch64_sme_fmlsl_single_vg2x2", [IsStreaming, IsInOutZA], []>;
+ def SVMLSL_SINGLE_VG2x4_F16 : Inst<"svmls[_single]_za32[_{d}]_vg2x4", "vm4d", "bh", MergeNone, "aarch64_sme_fmlsl_single_vg2x4", [IsStreaming, IsInOutZA], []>;
+ def SVMLSL_SINGLE_VG2x1_S16 : Inst<"svmls_za32[_{d}]_vg2x1", "vmdd", "s", MergeNone, "aarch64_sme_smlsl_single_vg2x1", [IsStreaming, IsInOutZA], []>;
+ def SVMLSL_SINGLE_VG2x2_S16 : Inst<"svmls[_single]_za32[_{d}]_vg2x2", "vm2d", "s", MergeNone, "aarch64_sme_smlsl_single_vg2x2", [IsStreaming, IsInOutZA], []>;
+ def SVMLSL_SINGLE_VG2x4_S16 : Inst<"svmls[_single]_za32[_{d}]_vg2x4", "vm4d", "s", MergeNone, "aarch64_sme_smlsl_single_vg2x4", [IsStreaming, IsInOutZA], []>;
+ def SVMLSL_SINGLE_VG2x1_U16 : Inst<"svmls_za32[_{d}]_vg2x1", "vmdd", "Us", MergeNone, "aarch64_sme_umlsl_single_vg2x1", [IsStreaming, IsInOutZA], []>;
+ def SVMLSL_SINGLE_VG2x2_U16 : Inst<"svmls[_single]_za32[_{d}]_vg2x2", "vm2d", "Us", MergeNone, "aarch64_sme_umlsl_single_vg2x2", [IsStreaming, IsInOutZA], []>;
+ def SVMLSL_SINGLE_VG2x4_U16 : Inst<"svmls[_single]_za32[_{d}]_vg2x4", "vm4d", "Us", MergeNone, "aarch64_sme_umlsl_single_vg2x4", [IsStreaming, IsInOutZA], []>;
+
+ def SVMLSL_SINGLE_VG4x1_S8 : Inst<"svmls_za32[_{d}]_vg4x1", "vmdd", "c", MergeNone, "aarch64_sme_smls_za32_single_vg4x1", [IsStreaming, IsInOutZA], []>;
+ def SVMLSL_SINGLE_VG4x1_U8 : Inst<"svmls_za32[_{d}]_vg4x1", "vmdd", "Uc", MergeNone, "aarch64_sme_umls_za32_single_vg4x1", [IsStreaming, IsInOutZA], []>;
+ def SVMLSL_SINGLE_VG4x2_S8 : Inst<"svmls[_single]_za32[_{d}]_vg4x2", "vm2d", "c", MergeNone, "aarch64_sme_smls_za32_single_vg4x2", [IsStreaming, IsInOutZA], []>;
+ def SVMLSL_SINGLE_VG4x2_U8 : Inst<"svmls[_single]_za32[_{d}]_vg4x2", "vm2d", "Uc", MergeNone, "aarch64_sme_umls_za32_single_vg4x2", [IsStreaming, IsInOutZA], []>;
+ def SVMLSL_SINGLE_VG4x4_S8 : Inst<"svmls[_single]_za32[_{d}]_vg4x4", "vm4d", "c", MergeNone, "aarch64_sme_smls_za32_single_vg4x4", [IsStreaming, IsInOutZA], []>;
+ def SVMLSL_SINGLE_VG4x4_U8 : Inst<"svmls[_single]_za32[_{d}]_vg4x4", "vm4d", "Uc", MergeNone, "aarch64_sme_umls_za32_single_vg4x4", [IsStreaming, IsInOutZA], []>;
+
+ // INDEXED MLAL
+ def SVMLAL_LANE_VG2x1_F16 : Inst<"svmla_lane_za32[_{d}]_vg2x1", "vmddi", "bh", MergeNone, "aarch64_sme_fmlal_lane_vg2x1", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_7>]>;
+ def SVMLAL_LANE_VG2x2_F16 : Inst<"svmla_lane_za32[_{d}]_vg2x2", "vm2di", "bh", MergeNone, "aarch64_sme_fmlal_lane_vg2x2", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_7>]>;
+ def SVMLAL_LANE_VG2x4_F16 : Inst<"svmla_lane_za32[_{d}]_vg2x4", "vm4di", "bh", MergeNone, "aarch64_sme_fmlal_lane_vg2x4", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_7>]>;
+ def SVMLAL_LANE_VG2x1_S16 : Inst<"svmla_lane_za32[_{d}]_vg2x1", "vmddi", "s", MergeNone, "aarch64_sme_smlal_lane_vg2x1", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_7>]>;
+ def SVMLAL_LANE_VG2x2_S16 : Inst<"svmla_lane_za32[_{d}]_vg2x2", "vm2di", "s", MergeNone, "aarch64_sme_smlal_lane_vg2x2", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_7>]>;
+ def SVMLAL_LANE_VG2x4_S16 : Inst<"svmla_lane_za32[_{d}]_vg2x4", "vm4di", "s", MergeNone, "aarch64_sme_smlal_lane_vg2x4", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_7>]>;
+ def SVMLAL_LANE_VG2x1_U16 : Inst<"svmla_lane_za32[_{d}]_vg2x1", "vmddi", "Us", MergeNone, "aarch64_sme_umlal_lane_vg2x1", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_7>]>;
+ def SVMLAL_LANE_VG2x2_U16 : Inst<"svmla_lane_za32[_{d}]_vg2x2", "vm2di", "Us", MergeNone, "aarch64_sme_umlal_lane_vg2x2", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_7>]>;
+ def SVMLAL_LANE_VG2x4_U16 : Inst<"svmla_lane_za32[_{d}]_vg2x4", "vm4di", "Us", MergeNone, "aarch64_sme_umlal_lane_vg2x4", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_7>]>;
+
+ def SVMLAL_LANE_VG4x1_S8 : Inst<"svmla_lane_za32[_{d}]_vg4x1", "vmddi", "c", MergeNone, "aarch64_sme_smla_za32_lane_vg4x1", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_15>]>;
+ def SVMLAL_LANE_VG4x1_U8 : Inst<"svmla_lane_za32[_{d}]_vg4x1", "vmddi", "Uc", MergeNone, "aarch64_sme_umla_za32_lane_vg4x1", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_15>]>;
+ def SVMLAL_LANE_VG4x2_S8 : Inst<"svmla_lane_za32[_{d}]_vg4x2", "vm2di", "c", MergeNone, "aarch64_sme_smla_za32_lane_vg4x2", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_15>]>;
+ def SVMLAL_LANE_VG4x2_U8 : Inst<"svmla_lane_za32[_{d}]_vg4x2", "vm2di", "Uc", MergeNone, "aarch64_sme_umla_za32_lane_vg4x2", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_15>]>;
+ def SVMLAL_LANE_VG4x4_S8 : Inst<"svmla_lane_za32[_{d}]_vg4x4", "vm4di", "c", MergeNone, "aarch64_sme_smla_za32_lane_vg4x4", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_15>]>;
+ def SVMLAL_LANE_VG4x4_U8 : Inst<"svmla_lane_za32[_{d}]_vg4x4", "vm4di", "Uc", MergeNone, "aarch64_sme_umla_za32_lane_vg4x4", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_15>]>;
+
+ // INDEXED MLSL
+ def SVMLSL_LANE_VG2x1_F16 : Inst<"svmls_lane_za32[_{d}]_vg2x1", "vmddi", "bh", MergeNone, "aarch64_sme_fmlsl_lane_vg2x1", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_7>]>;
+ def SVMLSL_LANE_VG2x2_F16 : Inst<"svmls_lane_za32[_{d}]_vg2x2", "vm2di", "bh", MergeNone, "aarch64_sme_fmlsl_lane_vg2x2", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_7>]>;
+ def SVMLSL_LANE_VG2x4_F16 : Inst<"svmls_lane_za32[_{d}]_vg2x4", "vm4di", "bh", MergeNone, "aarch64_sme_fmlsl_lane_vg2x4", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_7>]>;
+ def SVMLSL_LANE_VG2x1_S16 : Inst<"svmls_lane_za32[_{d}]_vg2x1", "vmddi", "s", MergeNone, "aarch64_sme_smlsl_lane_vg2x1", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_7>]>;
+ def SVMLSL_LANE_VG2x2_S16 : Inst<"svmls_lane_za32[_{d}]_vg2x2", "vm2di", "s", MergeNone, "aarch64_sme_smlsl_lane_vg2x2", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_7>]>;
+ def SVMLSL_LANE_VG2x4_S16 : Inst<"svmls_lane_za32[_{d}]_vg2x4", "vm4di", "s", MergeNone, "aarch64_sme_smlsl_lane_vg2x4", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_7>]>;
+ def SVMLSL_LANE_VG2x1_U16 : Inst<"svmls_lane_za32[_{d}]_vg2x1", "vmddi", "Us", MergeNone, "aarch64_sme_umlsl_lane_vg2x1", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_7>]>;
+ def SVMLSL_LANE_VG2x2_U16 : Inst<"svmls_lane_za32[_{d}]_vg2x2", "vm2di", "Us", MergeNone, "aarch64_sme_umlsl_lane_vg2x2", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_7>]>;
+ def SVMLSL_LANE_VG2x4_U16 : Inst<"svmls_lane_za32[_{d}]_vg2x4", "vm4di", "Us", MergeNone, "aarch64_sme_umlsl_lane_vg2x4", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_7>]>;
+
+ def SVMLSL_LANE_VG4x1_S8 : Inst<"svmls_lane_za32[_{d}]_vg4x1", "vmddi", "c", MergeNone, "aarch64_sme_smls_za32_lane_vg4x1", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_15>]>;
+ def SVMLSL_LANE_VG4x1_U8 : Inst<"svmls_lane_za32[_{d}]_vg4x1", "vmddi", "Uc", MergeNone, "aarch64_sme_umls_za32_lane_vg4x1", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_15>]>;
+ def SVMLSL_LANE_VG4x2_S8 : Inst<"svmls_lane_za32[_{d}]_vg4x2", "vm2di", "c", MergeNone, "aarch64_sme_smls_za32_lane_vg4x2", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_15>]>;
+ def SVMLSL_LANE_VG4x2_U8 : Inst<"svmls_lane_za32[_{d}]_vg4x2", "vm2di", "Uc", MergeNone, "aarch64_sme_umls_za32_lane_vg4x2", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_15>]>;
+ def SVMLSL_LANE_VG4x4_S8 : Inst<"svmls_lane_za32[_{d}]_vg4x4", "vm4di", "c", MergeNone, "aarch64_sme_smls_za32_lane_vg4x4", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_15>]>;
+ def SVMLSL_LANE_VG4x4_U8 : Inst<"svmls_lane_za32[_{d}]_vg4x4", "vm4di", "Uc", MergeNone, "aarch64_sme_umls_za32_lane_vg4x4", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_15>]>;
+
+ // SINGLE SUMLALL
+ // Single sumla maps to usmla, with zn & zm operands swapped
+ def SVSUMLALL_SINGLE_VG4x1 : Inst<"svsumla_za32[_{d}]_vg4x1", "vmdu", "c", MergeNone, "aarch64_sme_usmla_za32_single_vg4x1", [IsStreaming, IsInOutZA], []>;
+
+ def SVSUMLALL_SINGLE_VG4x2 : Inst<"svsumla[_single]_za32[_{d}]_vg4x2", "vm2.du", "c", MergeNone, "aarch64_sme_sumla_za32_single_vg4x2", [IsStreaming, IsInOutZA], []>;
+ def SVSUMLALL_SINGLE_VG4x4 : Inst<"svsumla[_single]_za32[_{d}]_vg4x4", "vm4.du", "c", MergeNone, "aarch64_sme_sumla_za32_single_vg4x4", [IsStreaming, IsInOutZA], []>;
+
+ // Multi-multi sumla builtins are mapped to usmla, with zn & zm operands swapped
+ def SVSUMLALL_MULTI_VG4x2 : Inst<"svsumla_za32[_{d}]_vg4x2", "vm2.d2.u", "c", MergeNone, "aarch64_sme_usmla_za32_vg4x2", [IsStreaming, IsInOutZA], []>;
+ def SVSUMLALL_MULTI_VG4x4 : Inst<"svsumla_za32[_{d}]_vg4x4", "vm4.d4.u", "c", MergeNone, "aarch64_sme_usmla_za32_vg4x4", [IsStreaming, IsInOutZA], []>;
+
+ // INDEXED SUMLALL
+ def SVSUMLALL_LANE_VG4x1 : Inst<"svsumla_lane_za32[_{d}]_vg4x1", "vmdui", "c", MergeNone, "aarch64_sme_sumla_za32_lane_vg4x1", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_15>]>;
+ def SVSUMLALL_LANE_VG4x2 : Inst<"svsumla_lane_za32[_{d}]_vg4x2", "vm2ui", "c", MergeNone, "aarch64_sme_sumla_za32_lane_vg4x2", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_15>]>;
+ def SVSUMLALL_LANE_VG4x4 : Inst<"svsumla_lane_za32[_{d}]_vg4x4", "vm4ui", "c", MergeNone, "aarch64_sme_sumla_za32_lane_vg4x4", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_15>]>;
+
+ // SINGLE USMLALL
+ def SVUSMLALL_SINGLE_VG4x1 : Inst<"svusmla_za32[_{d}]_vg4x1", "vmdx", "Uc", MergeNone, "aarch64_sme_usmla_za32_single_vg4x1", [IsStreaming, IsInOutZA], []>;
+ def SVUSMLALL_SINGLE_VG4x2 : Inst<"svusmla[_single]_za32[_{d}]_vg4x2", "vm2.dx", "Uc", MergeNone, "aarch64_sme_usmla_za32_single_vg4x2", [IsStreaming, IsInOutZA], []>;
+ def SVUSMLALL_SINGLE_VG4x4 : Inst<"svusmla[_single]_za32[_{d}]_vg4x4", "vm4.dx", "Uc", MergeNone, "aarch64_sme_usmla_za32_single_vg4x4", [IsStreaming, IsInOutZA], []>;
+
+ // MULTI USMLALL
+ def SVUSMLALL_MULTI_VG4x2 : Inst<"svusmla_za32[_{d}]_vg4x2", "vm2.d2.x", "Uc", MergeNone, "aarch64_sme_usmla_za32_vg4x2", [IsStreaming, IsInOutZA], []>;
+ def SVUSMLALL_MULTI_VG4x4 : Inst<"svusmla_za32[_{d}]_vg4x4", "vm4.d4.x", "Uc", MergeNone, "aarch64_sme_usmla_za32_vg4x4", [IsStreaming, IsInOutZA], []>;
+
+ // INDEXED USMLALL
+ def SVUSMLALL_LANE_VG4x1 : Inst<"svusmla_lane_za32[_{d}]_vg4x1", "vmdxi", "Uc", MergeNone, "aarch64_sme_usmla_za32_lane_vg4x1", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_15>]>;
+ def SVUSMLALL_LANE_VG4x2 : Inst<"svusmla_lane_za32[_{d}]_vg4x2", "vm2xi", "Uc", MergeNone, "aarch64_sme_usmla_za32_lane_vg4x2", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_15>]>;
+ def SVUSMLALL_LANE_VG4x4 : Inst<"svusmla_lane_za32[_{d}]_vg4x4", "vm4xi", "Uc", MergeNone, "aarch64_sme_usmla_za32_lane_vg4x4", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_15>]>;
+}
+
+let TargetGuard = "sme2,sme-i16i64" in {
+ // MULTI MLAL
+ def SVMLAL_MULTI_VG4x2_S16 : Inst<"svmla_za64[_{d}]_vg4x2", "vm22", "s", MergeNone, "aarch64_sme_smla_za64_vg4x2", [IsStreaming, IsInOutZA], []>;
+ def SVMLAL_MULTI_VG4x2_U16 : Inst<"svmla_za64[_{d}]_vg4x2", "vm22", "Us", MergeNone, "aarch64_sme_umla_za64_vg4x2", [IsStreaming, IsInOutZA], []>;
+ def SVMLAL_MULTI_VG4x4_S16 : Inst<"svmla_za64[_{d}]_vg4x4", "vm44", "s", MergeNone, "aarch64_sme_smla_za64_vg4x4", [IsStreaming, IsInOutZA], []>;
+ def SVMLAL_MULTI_VG4x4_U16 : Inst<"svmla_za64[_{d}]_vg4x4", "vm44", "Us", MergeNone, "aarch64_sme_umla_za64_vg4x4", [IsStreaming, IsInOutZA], []>;
+
+ // MULTI MLSL
+ def SVMLSL_MULTI_VG4x2_S16 : Inst<"svmls_za64[_{d}]_vg4x2", "vm22", "s", MergeNone, "aarch64_sme_smls_za64_vg4x2", [IsStreaming, IsInOutZA], []>;
+ def SVMLSL_MULTI_VG4x2_U16 : Inst<"svmls_za64[_{d}]_vg4x2", "vm22", "Us", MergeNone, "aarch64_sme_umls_za64_vg4x2", [IsStreaming, IsInOutZA], []>;
+ def SVMLSL_MULTI_VG4x4_S16 : Inst<"svmls_za64[_{d}]_vg4x4", "vm44", "s", MergeNone, "aarch64_sme_smls_za64_vg4x4", [IsStreaming, IsInOutZA], []>;
+ def SVMLSL_MULTI_VG4x4_U16 : Inst<"svmls_za64[_{d}]_vg4x4", "vm44", "Us", MergeNone, "aarch64_sme_umls_za64_vg4x4", [IsStreaming, IsInOutZA], []>;
+
+ // SINGLE MLAL
+ def SVMLAL_SINGLE_VG4x1_S16 : Inst<"svmla_za64[_{d}]_vg4x1", "vmdd", "s", MergeNone, "aarch64_sme_smla_za64_single_vg4x1", [IsStreaming, IsInOutZA], []>;
+ def SVMLAL_SINGLE_VG4x1_U16 : Inst<"svmla_za64[_{d}]_vg4x1", "vmdd", "Us", MergeNone, "aarch64_sme_umla_za64_single_vg4x1", [IsStreaming, IsInOutZA], []>;
+ def SVMLAL_SINGLE_VG4x2_S16 : Inst<"svmla[_single]_za64[_{d}]_vg4x2", "vm2d", "s", MergeNone, "aarch64_sme_smla_za64_single_vg4x2", [IsStreaming, IsInOutZA], []>;
+ def SVMLAL_SINGLE_VG4x2_U16 : Inst<"svmla[_single]_za64[_{d}]_vg4x2", "vm2d", "Us", MergeNone, "aarch64_sme_umla_za64_single_vg4x2", [IsStreaming, IsInOutZA], []>;
+ def SVMLAL_SINGLE_VG4x4_S16 : Inst<"svmla[_single]_za64[_{d}]_vg4x4", "vm4d", "s", MergeNone, "aarch64_sme_smla_za64_single_vg4x4", [IsStreaming, IsInOutZA], []>;
+ def SVMLAL_SINGLE_VG4x4_U16 : Inst<"svmla[_single]_za64[_{d}]_vg4x4", "vm4d", "Us", MergeNone, "aarch64_sme_umla_za64_single_vg4x4", [IsStreaming, IsInOutZA], []>;
+
+ // SINGLE MLSL
+ def SVMLSL_SINGLE_VG4x1_S16 : Inst<"svmls_za64[_{d}]_vg4x1", "vmdd", "s", MergeNone, "aarch64_sme_smls_za64_single_vg4x1", [IsStreaming, IsInOutZA], []>;
+ def SVMLSL_SINGLE_VG4x1_U16 : Inst<"svmls_za64[_{d}]_vg4x1", "vmdd", "Us", MergeNone, "aarch64_sme_umls_za64_single_vg4x1", [IsStreaming, IsInOutZA], []>;
+ def SVMLSL_SINGLE_VG4x2_S16 : Inst<"svmls[_single]_za64[_{d}]_vg4x2", "vm2d", "s", MergeNone, "aarch64_sme_smls_za64_single_vg4x2", [IsStreaming, IsInOutZA], []>;
+ def SVMLSL_SINGLE_VG4x2_U16 : Inst<"svmls[_single]_za64[_{d}]_vg4x2", "vm2d", "Us", MergeNone, "aarch64_sme_umls_za64_single_vg4x2", [IsStreaming, IsInOutZA], []>;
+ def SVMLSL_SINGLE_VG4x4_S16 : Inst<"svmls[_single]_za64[_{d}]_vg4x4", "vm4d", "s", MergeNone, "aarch64_sme_smls_za64_single_vg4x4", [IsStreaming, IsInOutZA], []>;
+ def SVMLSL_SINGLE_VG4x4_U16 : Inst<"svmls[_single]_za64[_{d}]_vg4x4", "vm4d", "Us", MergeNone, "aarch64_sme_umls_za64_single_vg4x4", [IsStreaming, IsInOutZA], []>;
+
+ // INDEXED MLAL
+ def SVMLAL_LANE_VG4x1_S16 : Inst<"svmla_lane_za64[_{d}]_vg4x1", "vmddi", "s", MergeNone, "aarch64_sme_smla_za64_lane_vg4x1", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_7>]>;
+ def SVMLAL_LANE_VG4x1_U16 : Inst<"svmla_lane_za64[_{d}]_vg4x1", "vmddi", "Us", MergeNone, "aarch64_sme_umla_za64_lane_vg4x1", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_7>]>;
+ def SVMLAL_LANE_VG4x2_S16 : Inst<"svmla_lane_za64[_{d}]_vg4x2", "vm2di", "s", MergeNone, "aarch64_sme_smla_za64_lane_vg4x2", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_7>]>;
+ def SVMLAL_LANE_VG4x2_U16 : Inst<"svmla_lane_za64[_{d}]_vg4x2", "vm2di", "Us", MergeNone, "aarch64_sme_umla_za64_lane_vg4x2", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_7>]>;
+ def SVMLAL_LANE_VG4x4_S16 : Inst<"svmla_lane_za64[_{d}]_vg4x4", "vm4di", "s", MergeNone, "aarch64_sme_smla_za64_lane_vg4x4", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_7>]>;
+ def SVMLAL_LANE_VG4x4_U16 : Inst<"svmla_lane_za64[_{d}]_vg4x4", "vm4di", "Us", MergeNone, "aarch64_sme_umla_za64_lane_vg4x4", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_7>]>;
+
+ // INDEXED MLSL
+ def SVMLSL_LANE_VG4x1_S16 : Inst<"svmls_lane_za64[_{d}]_vg4x1", "vmddi", "s", MergeNone, "aarch64_sme_smls_za64_lane_vg4x1", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_7>]>;
+ def SVMLSL_LANE_VG4x1_U16 : Inst<"svmls_lane_za64[_{d}]_vg4x1", "vmddi", "Us", MergeNone, "aarch64_sme_umls_za64_lane_vg4x1", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_7>]>;
+ def SVMLSL_LANE_VG4x2_S16 : Inst<"svmls_lane_za64[_{d}]_vg4x2", "vm2di", "s", MergeNone, "aarch64_sme_smls_za64_lane_vg4x2", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_7>]>;
+ def SVMLSL_LANE_VG4x2_U16 : Inst<"svmls_lane_za64[_{d}]_vg4x2", "vm2di", "Us", MergeNone, "aarch64_sme_umls_za64_lane_vg4x2", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_7>]>;
+ def SVMLSL_LANE_VG4x4_S16 : Inst<"svmls_lane_za64[_{d}]_vg4x4", "vm4di", "s", MergeNone, "aarch64_sme_smls_za64_lane_vg4x4", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_7>]>;
+ def SVMLSL_LANE_VG4x4_U16 : Inst<"svmls_lane_za64[_{d}]_vg4x4", "vm4di", "Us", MergeNone, "aarch64_sme_umls_za64_lane_vg4x4", [IsStreaming, IsInOutZA], [ImmCheck<3, ImmCheck0_7>]>;
+}
+
+//
+// Spill and fill of ZT0
+//
+let TargetGuard = "sme2" in {
+ def SVLDR_ZT : Inst<"svldr_zt", "viQ", "", MergeNone, "aarch64_sme_ldr_zt", [IsOverloadNone, IsStreamingCompatible, IsInOutZT0], [ImmCheck<0, ImmCheck0_0>]>;
+ def SVSTR_ZT : Inst<"svstr_zt", "vi%", "", MergeNone, "aarch64_sme_str_zt", [IsOverloadNone, IsStreamingCompatible, IsInZT0], [ImmCheck<0, ImmCheck0_0>]>;
+}
+
+//
+// Zero ZT0
+//
+let TargetGuard = "sme2" in {
+ def SVZERO_ZT : Inst<"svzero_zt", "vi", "", MergeNone, "aarch64_sme_zero_zt", [IsOverloadNone, IsStreamingCompatible, IsOutZT0], [ImmCheck<0, ImmCheck0_0>]>;
+}
+
+//
+// lookup table expand four contiguous registers
+//
+let TargetGuard = "sme2" in {
+ def SVLUTI2_LANE_ZT_X4 : Inst<"svluti2_lane_zt_{d}_x4", "4.di[i", "cUcsUsiUibhf", MergeNone, "aarch64_sme_luti2_lane_zt_x4", [IsStreaming, IsInZT0], [ImmCheck<0, ImmCheck0_0>, ImmCheck<2, ImmCheck0_3>]>;
+ def SVLUTI4_LANE_ZT_X4 : Inst<"svluti4_lane_zt_{d}_x4", "4.di[i", "sUsiUibhf", MergeNone, "aarch64_sme_luti4_lane_zt_x4", [IsStreaming, IsInZT0], [ImmCheck<0, ImmCheck0_0>, ImmCheck<2, ImmCheck0_1>]>;
+}
+
+//
+// lookup table expand one register
+//
+let TargetGuard = "sme2" in {
+ def SVLUTI2_LANE_ZT : Inst<"svluti2_lane_zt_{d}", "di[i", "cUcsUsiUibhf", MergeNone, "aarch64_sme_luti2_lane_zt", [IsStreaming, IsInZT0], [ImmCheck<0, ImmCheck0_0>, ImmCheck<2, ImmCheck0_15>]>;
+ def SVLUTI4_LANE_ZT : Inst<"svluti4_lane_zt_{d}", "di[i", "cUcsUsiUibhf", MergeNone, "aarch64_sme_luti4_lane_zt", [IsStreaming, IsInZT0], [ImmCheck<0, ImmCheck0_0>, ImmCheck<2, ImmCheck0_7>]>;
+}
+
+//
+// lookup table expand two contiguous registers
+//
+let TargetGuard = "sme2" in {
+ def SVLUTI2_LANE_ZT_X2 : Inst<"svluti2_lane_zt_{d}_x2", "2.di[i", "cUcsUsiUibhf", MergeNone, "aarch64_sme_luti2_lane_zt_x2", [IsStreaming, IsInZT0], [ImmCheck<0, ImmCheck0_0>, ImmCheck<2, ImmCheck0_7>]>;
+ def SVLUTI4_LANE_ZT_X2 : Inst<"svluti4_lane_zt_{d}_x2", "2.di[i", "cUcsUsiUibhf", MergeNone, "aarch64_sme_luti4_lane_zt_x2", [IsStreaming, IsInZT0], [ImmCheck<0, ImmCheck0_0>, ImmCheck<2, ImmCheck0_3>]>;
+}
diff --git a/contrib/llvm-project/clang/include/clang/Basic/arm_sve.td b/contrib/llvm-project/clang/include/clang/Basic/arm_sve.td
index 5e9d1c96558b..6da30e08e752 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/arm_sve.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/arm_sve.td
@@ -13,277 +13,33 @@
//
//===----------------------------------------------------------------------===//
-//===----------------------------------------------------------------------===//
-// Instruction definitions
-//===----------------------------------------------------------------------===//
-// Every intrinsic subclasses "Inst". An intrinsic has a name, a prototype and
-// a sequence of typespecs.
-//
-// The name is the base name of the intrinsic, for example "svld1". This is
-// then mangled by the tblgen backend to add type information ("svld1_s16").
-//
-// A typespec is a sequence of uppercase characters (modifiers) followed by one
-// lowercase character. A typespec encodes a particular "base type" of the
-// intrinsic.
-//
-// An example typespec is "Us" - unsigned short - svuint16_t. The available
-// typespec codes are given below.
-//
-// The string given to an Inst class is a sequence of typespecs. The intrinsic
-// is instantiated for every typespec in the sequence. For example "sdUsUd".
-//
-// The prototype is a string that defines the return type of the intrinsic
-// and the type of each argument. The return type and every argument gets a
-// "modifier" that can change in some way the "base type" of the intrinsic.
-//
-// The modifier 'd' means "default" and does not modify the base type in any
-// way. The available modifiers are given below.
-//
-// Typespecs
-// ---------
-// c: char
-// s: short
-// i: int
-// l: long
-// f: float
-// h: half-float
-// d: double
-// b: bfloat
-
-// Typespec modifiers
-// ------------------
-// P: boolean
-// U: unsigned
-
-// Prototype modifiers
-// -------------------
-// prototype: return (arg, arg, ...)
-//
-// 2,3,4: array of default vectors
-// v: void
-// x: vector of signed integers
-// u: vector of unsigned integers
-// d: default
-// c: const pointer type
-// P: predicate type
-// s: scalar of element type
-// a: scalar of element type (splat to vector type)
-// R: scalar of 1/2 width element type (splat to vector type)
-// r: scalar of 1/4 width element type (splat to vector type)
-// @: unsigned scalar of 1/4 width element type (splat to vector type)
-// e: 1/2 width unsigned elements, 2x element count
-// b: 1/4 width unsigned elements, 4x element count
-// h: 1/2 width elements, 2x element count
-// q: 1/4 width elements, 4x element count
-// o: 4x width elements, 1/4 element count
-//
-// w: vector of element type promoted to 64bits, vector maintains
-// signedness of its element type.
-// f: element type promoted to uint64_t (splat to vector type)
-// j: element type promoted to 64bits (splat to vector type)
-// K: element type bitcast to a signed integer (splat to vector type)
-// L: element type bitcast to an unsigned integer (splat to vector type)
-//
-// i: constant uint64_t
-// k: int32_t
-// l: int64_t
-// m: uint32_t
-// n: uint64_t
-
-// t: svint32_t
-// z: svuint32_t
-// g: svuint64_t
-// O: svfloat16_t
-// M: svfloat32_t
-// N: svfloat64_t
-
-// J: Prefetch type (sv_prfop)
-// A: pointer to int8_t
-// B: pointer to int16_t
-// C: pointer to int32_t
-// D: pointer to int64_t
-
-// E: pointer to uint8_t
-// F: pointer to uint16_t
-// G: pointer to uint32_t
-// H: pointer to uint64_t
-
-// Q: const pointer to void
-
-// S: const pointer to int8_t
-// T: const pointer to int16_t
-// U: const pointer to int32_t
-// V: const pointer to int64_t
-//
-// W: const pointer to uint8_t
-// X: const pointer to uint16_t
-// Y: const pointer to uint32_t
-// Z: const pointer to uint64_t
-
-class MergeType<int val, string suffix=""> {
- int Value = val;
- string Suffix = suffix;
-}
-def MergeNone : MergeType<0>;
-def MergeAny : MergeType<1, "_x">;
-def MergeOp1 : MergeType<2, "_m">;
-def MergeZero : MergeType<3, "_z">;
-def MergeAnyExp : MergeType<4, "_x">; // Use merged builtin with explicit
-def MergeZeroExp : MergeType<5, "_z">; // generation of its inactive argument.
-
-class EltType<int val> {
- int Value = val;
-}
-def EltTyInvalid : EltType<0>;
-def EltTyInt8 : EltType<1>;
-def EltTyInt16 : EltType<2>;
-def EltTyInt32 : EltType<3>;
-def EltTyInt64 : EltType<4>;
-def EltTyFloat16 : EltType<5>;
-def EltTyFloat32 : EltType<6>;
-def EltTyFloat64 : EltType<7>;
-def EltTyBool8 : EltType<8>;
-def EltTyBool16 : EltType<9>;
-def EltTyBool32 : EltType<10>;
-def EltTyBool64 : EltType<11>;
-def EltTyBFloat16 : EltType<12>;
-
-class MemEltType<int val> {
- int Value = val;
-}
-def MemEltTyDefault : MemEltType<0>;
-def MemEltTyInt8 : MemEltType<1>;
-def MemEltTyInt16 : MemEltType<2>;
-def MemEltTyInt32 : MemEltType<3>;
-def MemEltTyInt64 : MemEltType<4>;
-
-class FlagType<int val> {
- int Value = val;
-}
-
-// These must be kept in sync with the flags in utils/TableGen/SveEmitter.h
-// and include/clang/Basic/TargetBuiltins.h
-def NoFlags : FlagType<0x00000000>;
-def FirstEltType : FlagType<0x00000001>;
-// : :
-// : :
-def EltTypeMask : FlagType<0x0000000f>;
-def FirstMemEltType : FlagType<0x00000010>;
-// : :
-// : :
-def MemEltTypeMask : FlagType<0x00000070>;
-def FirstMergeTypeMask : FlagType<0x00000080>;
-// : :
-// : :
-def MergeTypeMask : FlagType<0x00000380>;
-def FirstSplatOperand : FlagType<0x00000400>;
-// : :
-// These flags are used to specify which scalar operand
-// needs to be duplicated/splatted into a vector.
-// : :
-def SplatOperandMask : FlagType<0x00001C00>;
-def IsLoad : FlagType<0x00002000>;
-def IsStore : FlagType<0x00004000>;
-def IsGatherLoad : FlagType<0x00008000>;
-def IsScatterStore : FlagType<0x00010000>;
-def IsStructLoad : FlagType<0x00020000>;
-def IsStructStore : FlagType<0x00040000>;
-def IsZExtReturn : FlagType<0x00080000>; // Return value is sign-extend by default
-def IsOverloadNone : FlagType<0x00100000>; // Intrinsic does not take any overloaded types.
-def IsOverloadWhile : FlagType<0x00200000>; // Use {default type, typeof(operand1)} as overloaded types.
-def IsOverloadWhileRW : FlagType<0x00400000>; // Use {pred(default type), typeof(operand0)} as overloaded types.
-def IsOverloadCvt : FlagType<0x00800000>; // Use {typeof(operand0), typeof(last operand)} as overloaded types.
-def OverloadKindMask : FlagType<0x00E00000>; // When the masked values are all '0', the default type is used as overload type.
-def IsByteIndexed : FlagType<0x01000000>;
-def IsAppendSVALL : FlagType<0x02000000>; // Appends SV_ALL as the last operand.
-def IsInsertOp1SVALL : FlagType<0x04000000>; // Inserts SV_ALL as the second operand.
-def IsPrefetch : FlagType<0x08000000>; // Contiguous prefetches.
-def IsGatherPrefetch : FlagType<0x10000000>;
-def ReverseCompare : FlagType<0x20000000>; // Compare operands must be swapped.
-def ReverseUSDOT : FlagType<0x40000000>; // Unsigned/signed operands must be swapped.
-def IsUndef : FlagType<0x80000000>; // Codegen `undef` of given type.
-def IsTupleCreate : FlagType<0x100000000>;
-def IsTupleGet : FlagType<0x200000000>;
-def IsTupleSet : FlagType<0x400000000>;
-
-// These must be kept in sync with the flags in include/clang/Basic/TargetBuiltins.h
-class ImmCheckType<int val> {
- int Value = val;
-}
-def ImmCheck0_31 : ImmCheckType<0>; // 0..31 (used for e.g. predicate patterns)
-def ImmCheck1_16 : ImmCheckType<1>; // 1..16
-def ImmCheckExtract : ImmCheckType<2>; // 0..(2048/sizeinbits(elt) - 1)
-def ImmCheckShiftRight : ImmCheckType<3>; // 1..sizeinbits(elt)
-def ImmCheckShiftRightNarrow : ImmCheckType<4>; // 1..sizeinbits(elt)/2
-def ImmCheckShiftLeft : ImmCheckType<5>; // 0..(sizeinbits(elt) - 1)
-def ImmCheck0_7 : ImmCheckType<6>; // 0..7
-def ImmCheckLaneIndex : ImmCheckType<7>; // 0..(128/(1*sizeinbits(elt)) - 1)
-def ImmCheckLaneIndexCompRotate : ImmCheckType<8>; // 0..(128/(2*sizeinbits(elt)) - 1)
-def ImmCheckLaneIndexDot : ImmCheckType<9>; // 0..(128/(4*sizeinbits(elt)) - 1)
-def ImmCheckComplexRot90_270 : ImmCheckType<10>; // [90,270]
-def ImmCheckComplexRotAll90 : ImmCheckType<11>; // [0, 90, 180,270]
-def ImmCheck0_13 : ImmCheckType<12>; // 0..13
-def ImmCheck0_1 : ImmCheckType<13>; // 0..1
-def ImmCheck0_2 : ImmCheckType<14>; // 0..2
-def ImmCheck0_3 : ImmCheckType<15>; // 0..3
-
-class ImmCheck<int arg, ImmCheckType kind, int eltSizeArg = -1> {
- int Arg = arg;
- int EltSizeArg = eltSizeArg;
- ImmCheckType Kind = kind;
-}
-
-class Inst<string n, string p, string t, MergeType mt, string i,
- list<FlagType> ft, list<ImmCheck> ch, MemEltType met> {
- string Name = n;
- string Prototype = p;
- string Types = t;
- string ArchGuard = "";
- int Merge = mt.Value;
- string MergeSuffix = mt.Suffix;
- string LLVMIntrinsic = i;
- list<FlagType> Flags = ft;
- list<ImmCheck> ImmChecks = ch;
- int MemEltType = met.Value;
-}
-
-// SInst: Instruction with signed/unsigned suffix (e.g., "s8", "u8")
-class SInst<string n, string p, string t, MergeType mt, string i = "",
- list<FlagType> ft = [], list<ImmCheck> ch = []>
- : Inst<n, p, t, mt, i, ft, ch, MemEltTyDefault> {
-}
-
-// MInst: Instructions which access memory
-class MInst<string n, string p, string t, list<FlagType> f,
- MemEltType met = MemEltTyDefault, string i = "">
- : Inst<n, p, t, MergeNone, i, f, [], met> {
-}
+include "arm_sve_sme_incl.td"
////////////////////////////////////////////////////////////////////////////////
// Loads
// Load one vector (scalar base)
-def SVLD1 : MInst<"svld1[_{2}]", "dPc", "csilUcUsUiUlhfd", [IsLoad], MemEltTyDefault, "aarch64_sve_ld1">;
-def SVLD1SB : MInst<"svld1sb_{d}", "dPS", "silUsUiUl", [IsLoad], MemEltTyInt8, "aarch64_sve_ld1">;
-def SVLD1UB : MInst<"svld1ub_{d}", "dPW", "silUsUiUl", [IsLoad, IsZExtReturn], MemEltTyInt8, "aarch64_sve_ld1">;
-def SVLD1SH : MInst<"svld1sh_{d}", "dPT", "ilUiUl", [IsLoad], MemEltTyInt16, "aarch64_sve_ld1">;
-def SVLD1UH : MInst<"svld1uh_{d}", "dPX", "ilUiUl", [IsLoad, IsZExtReturn], MemEltTyInt16, "aarch64_sve_ld1">;
-def SVLD1SW : MInst<"svld1sw_{d}", "dPU", "lUl", [IsLoad], MemEltTyInt32, "aarch64_sve_ld1">;
-def SVLD1UW : MInst<"svld1uw_{d}", "dPY", "lUl", [IsLoad, IsZExtReturn], MemEltTyInt32, "aarch64_sve_ld1">;
+def SVLD1 : MInst<"svld1[_{2}]", "dPc", "csilUcUsUiUlhfd", [IsLoad, IsStreamingCompatible], MemEltTyDefault, "aarch64_sve_ld1">;
+def SVLD1SB : MInst<"svld1sb_{d}", "dPS", "silUsUiUl", [IsLoad, IsStreamingCompatible], MemEltTyInt8, "aarch64_sve_ld1">;
+def SVLD1UB : MInst<"svld1ub_{d}", "dPW", "silUsUiUl", [IsLoad, IsZExtReturn, IsStreamingCompatible], MemEltTyInt8, "aarch64_sve_ld1">;
+def SVLD1SH : MInst<"svld1sh_{d}", "dPT", "ilUiUl", [IsLoad, IsStreamingCompatible], MemEltTyInt16, "aarch64_sve_ld1">;
+def SVLD1UH : MInst<"svld1uh_{d}", "dPX", "ilUiUl", [IsLoad, IsZExtReturn, IsStreamingCompatible], MemEltTyInt16, "aarch64_sve_ld1">;
+def SVLD1SW : MInst<"svld1sw_{d}", "dPU", "lUl", [IsLoad, IsStreamingCompatible], MemEltTyInt32, "aarch64_sve_ld1">;
+def SVLD1UW : MInst<"svld1uw_{d}", "dPY", "lUl", [IsLoad, IsZExtReturn, IsStreamingCompatible], MemEltTyInt32, "aarch64_sve_ld1">;
-let ArchGuard = "defined(__ARM_FEATURE_SVE_BF16)" in {
- def SVLD1_BF : MInst<"svld1[_{2}]", "dPc", "b", [IsLoad], MemEltTyDefault, "aarch64_sve_ld1">;
- def SVLD1_VNUM_BF : MInst<"svld1_vnum[_{2}]", "dPcl", "b", [IsLoad], MemEltTyDefault, "aarch64_sve_ld1">;
+let TargetGuard = "sve,bf16" in {
+ def SVLD1_BF : MInst<"svld1[_{2}]", "dPc", "b", [IsLoad, IsStreamingCompatible], MemEltTyDefault, "aarch64_sve_ld1">;
+ def SVLD1_VNUM_BF : MInst<"svld1_vnum[_{2}]", "dPcl", "b", [IsLoad, IsStreamingCompatible], MemEltTyDefault, "aarch64_sve_ld1">;
}
// Load one vector (scalar base, VL displacement)
-def SVLD1_VNUM : MInst<"svld1_vnum[_{2}]", "dPcl", "csilUcUsUiUlhfd", [IsLoad], MemEltTyDefault, "aarch64_sve_ld1">;
-def SVLD1SB_VNUM : MInst<"svld1sb_vnum_{d}", "dPSl", "silUsUiUl", [IsLoad], MemEltTyInt8, "aarch64_sve_ld1">;
-def SVLD1UB_VNUM : MInst<"svld1ub_vnum_{d}", "dPWl", "silUsUiUl", [IsLoad, IsZExtReturn], MemEltTyInt8, "aarch64_sve_ld1">;
-def SVLD1SH_VNUM : MInst<"svld1sh_vnum_{d}", "dPTl", "ilUiUl", [IsLoad], MemEltTyInt16, "aarch64_sve_ld1">;
-def SVLD1UH_VNUM : MInst<"svld1uh_vnum_{d}", "dPXl", "ilUiUl", [IsLoad, IsZExtReturn], MemEltTyInt16, "aarch64_sve_ld1">;
-def SVLD1SW_VNUM : MInst<"svld1sw_vnum_{d}", "dPUl", "lUl", [IsLoad], MemEltTyInt32, "aarch64_sve_ld1">;
-def SVLD1UW_VNUM : MInst<"svld1uw_vnum_{d}", "dPYl", "lUl", [IsLoad, IsZExtReturn], MemEltTyInt32, "aarch64_sve_ld1">;
+def SVLD1_VNUM : MInst<"svld1_vnum[_{2}]", "dPcl", "csilUcUsUiUlhfd", [IsLoad, IsStreamingCompatible], MemEltTyDefault, "aarch64_sve_ld1">;
+def SVLD1SB_VNUM : MInst<"svld1sb_vnum_{d}", "dPSl", "silUsUiUl", [IsLoad, IsStreamingCompatible], MemEltTyInt8, "aarch64_sve_ld1">;
+def SVLD1UB_VNUM : MInst<"svld1ub_vnum_{d}", "dPWl", "silUsUiUl", [IsLoad, IsZExtReturn, IsStreamingCompatible], MemEltTyInt8, "aarch64_sve_ld1">;
+def SVLD1SH_VNUM : MInst<"svld1sh_vnum_{d}", "dPTl", "ilUiUl", [IsLoad, IsStreamingCompatible], MemEltTyInt16, "aarch64_sve_ld1">;
+def SVLD1UH_VNUM : MInst<"svld1uh_vnum_{d}", "dPXl", "ilUiUl", [IsLoad, IsZExtReturn, IsStreamingCompatible], MemEltTyInt16, "aarch64_sve_ld1">;
+def SVLD1SW_VNUM : MInst<"svld1sw_vnum_{d}", "dPUl", "lUl", [IsLoad, IsStreamingCompatible], MemEltTyInt32, "aarch64_sve_ld1">;
+def SVLD1UW_VNUM : MInst<"svld1uw_vnum_{d}", "dPYl", "lUl", [IsLoad, IsZExtReturn, IsStreamingCompatible], MemEltTyInt32, "aarch64_sve_ld1">;
// Load one vector (vector base)
def SVLD1_GATHER_BASES_U : MInst<"svld1_gather[_{2}base]_{d}", "dPu", "ilUiUlfd", [IsGatherLoad], MemEltTyDefault, "aarch64_sve_ld1_gather_scalar_offset">;
@@ -381,7 +137,7 @@ def SVLDFF1UH_VNUM : MInst<"svldff1uh_vnum_{d}", "dPXl", "ilUiUl", [IsL
def SVLDFF1SW_VNUM : MInst<"svldff1sw_vnum_{d}", "dPUl", "lUl", [IsLoad], MemEltTyInt32, "aarch64_sve_ldff1">;
def SVLDFF1UW_VNUM : MInst<"svldff1uw_vnum_{d}", "dPYl", "lUl", [IsLoad, IsZExtReturn], MemEltTyInt32, "aarch64_sve_ldff1">;
-let ArchGuard = "defined(__ARM_FEATURE_SVE_BF16)" in {
+let TargetGuard = "sve,bf16" in {
def SVLDFF1_BF : MInst<"svldff1[_{2}]", "dPc", "b", [IsLoad], MemEltTyDefault, "aarch64_sve_ldff1">;
def SVLDFF1_VNUM_BF : MInst<"svldff1_vnum[_{2}]", "dPcl", "b", [IsLoad], MemEltTyDefault, "aarch64_sve_ldff1">;
}
@@ -481,91 +237,123 @@ def SVLDNF1UH_VNUM : MInst<"svldnf1uh_vnum_{d}", "dPXl", "ilUiUl", [IsL
def SVLDNF1SW_VNUM : MInst<"svldnf1sw_vnum_{d}", "dPUl", "lUl", [IsLoad], MemEltTyInt32, "aarch64_sve_ldnf1">;
def SVLDNF1UW_VNUM : MInst<"svldnf1uw_vnum_{d}", "dPYl", "lUl", [IsLoad, IsZExtReturn], MemEltTyInt32, "aarch64_sve_ldnf1">;
-let ArchGuard = "defined(__ARM_FEATURE_SVE_BF16)" in {
+let TargetGuard = "sve,bf16" in {
def SVLDNF1_BF : MInst<"svldnf1[_{2}]", "dPc", "b", [IsLoad], MemEltTyDefault, "aarch64_sve_ldnf1">;
def SVLDNF1_VNUM_BF : MInst<"svldnf1_vnum[_{2}]", "dPcl", "b", [IsLoad], MemEltTyDefault, "aarch64_sve_ldnf1">;
}
// Load one vector, unextended load, non-temporal (scalar base)
-def SVLDNT1 : MInst<"svldnt1[_{2}]", "dPc", "csilUcUsUiUlhfd", [IsLoad], MemEltTyDefault, "aarch64_sve_ldnt1">;
+def SVLDNT1 : MInst<"svldnt1[_{2}]", "dPc", "csilUcUsUiUlhfd", [IsLoad, IsStreamingCompatible], MemEltTyDefault, "aarch64_sve_ldnt1">;
// Load one vector, unextended load, non-temporal (scalar base, VL displacement)
-def SVLDNT1_VNUM : MInst<"svldnt1_vnum[_{2}]", "dPcl", "csilUcUsUiUlhfd", [IsLoad], MemEltTyDefault, "aarch64_sve_ldnt1">;
+def SVLDNT1_VNUM : MInst<"svldnt1_vnum[_{2}]", "dPcl", "csilUcUsUiUlhfd", [IsLoad, IsStreamingCompatible], MemEltTyDefault, "aarch64_sve_ldnt1">;
-let ArchGuard = "defined(__ARM_FEATURE_SVE_BF16)" in {
- def SVLDNT1_BF : MInst<"svldnt1[_{2}]", "dPc", "b", [IsLoad], MemEltTyDefault, "aarch64_sve_ldnt1">;
- def SVLDNT1_VNUM_BF : MInst<"svldnt1_vnum[_{2}]", "dPcl", "b", [IsLoad], MemEltTyDefault, "aarch64_sve_ldnt1">;
+let TargetGuard = "sve,bf16" in {
+ def SVLDNT1_BF : MInst<"svldnt1[_{2}]", "dPc", "b", [IsLoad, IsStreamingCompatible], MemEltTyDefault, "aarch64_sve_ldnt1">;
+ def SVLDNT1_VNUM_BF : MInst<"svldnt1_vnum[_{2}]", "dPcl", "b", [IsLoad, IsStreamingCompatible], MemEltTyDefault, "aarch64_sve_ldnt1">;
}
// Load one quadword and replicate (scalar base)
-def SVLD1RQ : SInst<"svld1rq[_{2}]", "dPc", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_ld1rq">;
+def SVLD1RQ : SInst<"svld1rq[_{2}]", "dPc", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_ld1rq", [IsStreamingCompatible]>;
-let ArchGuard = "defined(__ARM_FEATURE_SVE_BF16)" in {
- def SVLD1RQ_BF : SInst<"svld1rq[_{2}]", "dPc", "b", MergeNone, "aarch64_sve_ld1rq">;
+let TargetGuard = "sve,bf16" in {
+ def SVLD1RQ_BF : SInst<"svld1rq[_{2}]", "dPc", "b", MergeNone, "aarch64_sve_ld1rq", [IsStreamingCompatible]>;
}
multiclass StructLoad<string name, string proto, string i> {
- def : SInst<name, proto, "csilUcUsUiUlhfd", MergeNone, i, [IsStructLoad]>;
- let ArchGuard = "defined(__ARM_FEATURE_SVE_BF16)" in {
- def: SInst<name, proto, "b", MergeNone, i, [IsStructLoad]>;
+ def : SInst<name, proto, "csilUcUsUiUlhfd", MergeNone, i, [IsStructLoad, IsStreamingCompatible]>;
+ let TargetGuard = "sve,bf16" in {
+ def: SInst<name, proto, "b", MergeNone, i, [IsStructLoad, IsStreamingCompatible]>;
}
}
// Load N-element structure into N vectors (scalar base)
-defm SVLD2 : StructLoad<"svld2[_{2}]", "2Pc", "aarch64_sve_ld2">;
-defm SVLD3 : StructLoad<"svld3[_{2}]", "3Pc", "aarch64_sve_ld3">;
-defm SVLD4 : StructLoad<"svld4[_{2}]", "4Pc", "aarch64_sve_ld4">;
+defm SVLD2 : StructLoad<"svld2[_{2}]", "2Pc", "aarch64_sve_ld2_sret">;
+defm SVLD3 : StructLoad<"svld3[_{2}]", "3Pc", "aarch64_sve_ld3_sret">;
+defm SVLD4 : StructLoad<"svld4[_{2}]", "4Pc", "aarch64_sve_ld4_sret">;
// Load N-element structure into N vectors (scalar base, VL displacement)
-defm SVLD2_VNUM : StructLoad<"svld2_vnum[_{2}]", "2Pcl", "aarch64_sve_ld2">;
-defm SVLD3_VNUM : StructLoad<"svld3_vnum[_{2}]", "3Pcl", "aarch64_sve_ld3">;
-defm SVLD4_VNUM : StructLoad<"svld4_vnum[_{2}]", "4Pcl", "aarch64_sve_ld4">;
+defm SVLD2_VNUM : StructLoad<"svld2_vnum[_{2}]", "2Pcl", "aarch64_sve_ld2_sret">;
+defm SVLD3_VNUM : StructLoad<"svld3_vnum[_{2}]", "3Pcl", "aarch64_sve_ld3_sret">;
+defm SVLD4_VNUM : StructLoad<"svld4_vnum[_{2}]", "4Pcl", "aarch64_sve_ld4_sret">;
// Load one octoword and replicate (scalar base)
-let ArchGuard = "defined(__ARM_FEATURE_SVE_MATMUL_FP64)" in {
+let TargetGuard = "sve,f64mm" in {
def SVLD1RO : SInst<"svld1ro[_{2}]", "dPc", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_ld1ro">;
}
-let ArchGuard = "defined(__ARM_FEATURE_SVE_MATMUL_FP64) && defined(__ARM_FEATURE_SVE_BF16)" in {
+let TargetGuard = "sve,f64mm,bf16" in {
def SVLD1RO_BF16 : SInst<"svld1ro[_{2}]", "dPc", "b", MergeNone, "aarch64_sve_ld1ro">;
}
-let ArchGuard = "defined(__ARM_FEATURE_SVE_BF16)" in {
- def SVBFDOT : SInst<"svbfdot[_{0}]", "MMdd", "b", MergeNone, "aarch64_sve_bfdot", [IsOverloadNone]>;
- def SVBFMLALB : SInst<"svbfmlalb[_{0}]", "MMdd", "b", MergeNone, "aarch64_sve_bfmlalb", [IsOverloadNone]>;
- def SVBFMLALT : SInst<"svbfmlalt[_{0}]", "MMdd", "b", MergeNone, "aarch64_sve_bfmlalt", [IsOverloadNone]>;
- def SVBFMMLA : SInst<"svbfmmla[_{0}]", "MMdd", "b", MergeNone, "aarch64_sve_bfmmla", [IsOverloadNone]>;
- def SVBFDOT_N : SInst<"svbfdot[_n_{0}]", "MMda", "b", MergeNone, "aarch64_sve_bfdot", [IsOverloadNone]>;
- def SVBFMLAL_N : SInst<"svbfmlalb[_n_{0}]", "MMda", "b", MergeNone, "aarch64_sve_bfmlalb", [IsOverloadNone]>;
- def SVBFMLALT_N : SInst<"svbfmlalt[_n_{0}]", "MMda", "b", MergeNone, "aarch64_sve_bfmlalt", [IsOverloadNone]>;
- def SVBFDOT_LANE : SInst<"svbfdot_lane[_{0}]", "MMddn", "b", MergeNone, "aarch64_sve_bfdot_lane", [IsOverloadNone], [ImmCheck<3, ImmCheck0_3>]>;
- def SVBFMLALB_LANE : SInst<"svbfmlalb_lane[_{0}]", "MMddn", "b", MergeNone, "aarch64_sve_bfmlalb_lane", [IsOverloadNone], [ImmCheck<3, ImmCheck0_7>]>;
- def SVBFMLALT_LANE : SInst<"svbfmlalt_lane[_{0}]", "MMddn", "b", MergeNone, "aarch64_sve_bfmlalt_lane", [IsOverloadNone], [ImmCheck<3, ImmCheck0_7>]>;
+let TargetGuard = "sve,bf16" in {
+ def SVBFDOT : SInst<"svbfdot[_{0}]", "MMdd", "b", MergeNone, "aarch64_sve_bfdot", [IsOverloadNone, IsStreamingCompatible]>;
+ def SVBFMLALB : SInst<"svbfmlalb[_{0}]", "MMdd", "b", MergeNone, "aarch64_sve_bfmlalb", [IsOverloadNone, IsStreamingCompatible]>;
+ def SVBFMLALT : SInst<"svbfmlalt[_{0}]", "MMdd", "b", MergeNone, "aarch64_sve_bfmlalt", [IsOverloadNone, IsStreamingCompatible]>;
+ def SVBFMMLA : SInst<"svbfmmla[_{0}]", "MMdd", "b", MergeNone, "aarch64_sve_bfmmla", [IsOverloadNone, IsStreamingCompatible]>;
+ def SVBFDOT_N : SInst<"svbfdot[_n_{0}]", "MMda", "b", MergeNone, "aarch64_sve_bfdot", [IsOverloadNone, IsStreamingCompatible]>;
+ def SVBFMLAL_N : SInst<"svbfmlalb[_n_{0}]", "MMda", "b", MergeNone, "aarch64_sve_bfmlalb", [IsOverloadNone, IsStreamingCompatible]>;
+ def SVBFMLALT_N : SInst<"svbfmlalt[_n_{0}]", "MMda", "b", MergeNone, "aarch64_sve_bfmlalt", [IsOverloadNone, IsStreamingCompatible]>;
+ def SVBFDOT_LANE : SInst<"svbfdot_lane[_{0}]", "MMddi", "b", MergeNone, "aarch64_sve_bfdot_lane_v2", [IsOverloadNone, IsStreamingCompatible], [ImmCheck<3, ImmCheck0_3>]>;
+ def SVBFMLALB_LANE : SInst<"svbfmlalb_lane[_{0}]", "MMddi", "b", MergeNone, "aarch64_sve_bfmlalb_lane_v2", [IsOverloadNone, IsStreamingCompatible], [ImmCheck<3, ImmCheck0_7>]>;
+ def SVBFMLALT_LANE : SInst<"svbfmlalt_lane[_{0}]", "MMddi", "b", MergeNone, "aarch64_sve_bfmlalt_lane_v2", [IsOverloadNone, IsStreamingCompatible], [ImmCheck<3, ImmCheck0_7>]>;
+}
+
+let TargetGuard = "sve2p1" in {
+ // Contiguous zero-extending load to quadword (single vector).
+ def SVLD1UWQ : MInst<"svld1uwq[_{d}]", "dPc", "iUif", [IsLoad], MemEltTyInt32, "aarch64_sve_ld1uwq">;
+ def SVLD1UWQ_VNUM : MInst<"svld1uwq_vnum[_{d}]", "dPcl", "iUif", [IsLoad], MemEltTyInt32, "aarch64_sve_ld1uwq">;
+
+ def SVLD1UDQ : MInst<"svld1udq[_{d}]", "dPc", "lUld", [IsLoad], MemEltTyInt64, "aarch64_sve_ld1udq">;
+ def SVLD1UDQ_VNUM : MInst<"svld1udq_vnum[_{d}]", "dPcl", "lUld", [IsLoad], MemEltTyInt64, "aarch64_sve_ld1udq">;
+
+ // Load one vector (vector base + scalar offset)
+ def SVLD1Q_GATHER_U64BASE_OFFSET : MInst<"svld1q_gather[_{2}base]_offset_{d}", "dPgl", "cUcsUsiUilUlfhdb", [IsGatherLoad, IsByteIndexed], MemEltTyDefault, "aarch64_sve_ld1q_gather_scalar_offset">;
+ def SVLD1Q_GATHER_U64BASE : MInst<"svld1q_gather[_{2}base]_{d}", "dPg", "cUcsUsiUilUlfhdb", [IsGatherLoad, IsByteIndexed], MemEltTyDefault, "aarch64_sve_ld1q_gather_scalar_offset">;
+
+ // Load one vector (scalar base + vector offset)
+ def SVLD1Q_GATHER_U64OFFSET : MInst<"svld1q_gather_[{3}]offset[_{d}]", "dPcg", "cUcsUsiUilUlfhdb", [IsGatherLoad, IsByteIndexed], MemEltTyDefault, "aarch64_sve_ld1q_gather_vector_offset">;
+
+ // Load N-element structure into N vectors (scalar base)
+ defm SVLD2Q : StructLoad<"svld2q[_{2}]", "2Pc", "aarch64_sve_ld2q_sret">;
+ defm SVLD3Q : StructLoad<"svld3q[_{2}]", "3Pc", "aarch64_sve_ld3q_sret">;
+ defm SVLD4Q : StructLoad<"svld4q[_{2}]", "4Pc", "aarch64_sve_ld4q_sret">;
+
+ // Load N-element structure into N vectors (scalar base, VL displacement)
+ defm SVLD2Q_VNUM : StructLoad<"svld2q_vnum[_{2}]", "2Pcl", "aarch64_sve_ld2q_sret">;
+ defm SVLD3Q_VNUM : StructLoad<"svld3q_vnum[_{2}]", "3Pcl", "aarch64_sve_ld3q_sret">;
+ defm SVLD4Q_VNUM : StructLoad<"svld4q_vnum[_{2}]", "4Pcl", "aarch64_sve_ld4q_sret">;
+
+ // Load quadwords (scalar base + vector index)
+ def SVLD1Q_GATHER_INDICES_U : MInst<"svld1q_gather_[{3}]index[_{d}]", "dPcg", "sUsiUilUlbhfd", [IsGatherLoad], MemEltTyDefault, "aarch64_sve_ld1q_gather_index">;
+
+ // Load quadwords (vector base + scalar index)
+ def SVLD1Q_GATHER_INDEX_S : MInst<"svld1q_gather[_{2}base]_index_{d}", "dPgl", "sUsiUilUlbhfd", [IsGatherLoad], MemEltTyDefault, "aarch64_sve_ld1q_gather_scalar_offset">;
}
////////////////////////////////////////////////////////////////////////////////
// Stores
// Store one vector (scalar base)
-def SVST1 : MInst<"svst1[_{d}]", "vPpd", "csilUcUsUiUlhfd", [IsStore], MemEltTyDefault, "aarch64_sve_st1">;
-def SVST1B_S : MInst<"svst1b[_{d}]", "vPAd", "sil", [IsStore], MemEltTyInt8, "aarch64_sve_st1">;
-def SVST1B_U : MInst<"svst1b[_{d}]", "vPEd", "UsUiUl", [IsStore], MemEltTyInt8, "aarch64_sve_st1">;
-def SVST1H_S : MInst<"svst1h[_{d}]", "vPBd", "il", [IsStore], MemEltTyInt16, "aarch64_sve_st1">;
-def SVST1H_U : MInst<"svst1h[_{d}]", "vPFd", "UiUl", [IsStore], MemEltTyInt16, "aarch64_sve_st1">;
-def SVST1W_S : MInst<"svst1w[_{d}]", "vPCd", "l", [IsStore], MemEltTyInt32, "aarch64_sve_st1">;
-def SVST1W_U : MInst<"svst1w[_{d}]", "vPGd", "Ul", [IsStore], MemEltTyInt32, "aarch64_sve_st1">;
+def SVST1 : MInst<"svst1[_{d}]", "vPpd", "csilUcUsUiUlhfd", [IsStore, IsStreamingCompatible], MemEltTyDefault, "aarch64_sve_st1">;
+def SVST1B_S : MInst<"svst1b[_{d}]", "vPAd", "sil", [IsStore, IsStreamingCompatible], MemEltTyInt8, "aarch64_sve_st1">;
+def SVST1B_U : MInst<"svst1b[_{d}]", "vPEd", "UsUiUl", [IsStore, IsStreamingCompatible], MemEltTyInt8, "aarch64_sve_st1">;
+def SVST1H_S : MInst<"svst1h[_{d}]", "vPBd", "il", [IsStore, IsStreamingCompatible], MemEltTyInt16, "aarch64_sve_st1">;
+def SVST1H_U : MInst<"svst1h[_{d}]", "vPFd", "UiUl", [IsStore, IsStreamingCompatible], MemEltTyInt16, "aarch64_sve_st1">;
+def SVST1W_S : MInst<"svst1w[_{d}]", "vPCd", "l", [IsStore, IsStreamingCompatible], MemEltTyInt32, "aarch64_sve_st1">;
+def SVST1W_U : MInst<"svst1w[_{d}]", "vPGd", "Ul", [IsStore, IsStreamingCompatible], MemEltTyInt32, "aarch64_sve_st1">;
// Store one vector (scalar base, VL displacement)
-def SVST1_VNUM : MInst<"svst1_vnum[_{d}]", "vPpld", "csilUcUsUiUlhfd", [IsStore], MemEltTyDefault, "aarch64_sve_st1">;
-def SVST1B_VNUM_S : MInst<"svst1b_vnum[_{d}]", "vPAld", "sil", [IsStore], MemEltTyInt8, "aarch64_sve_st1">;
-def SVST1B_VNUM_U : MInst<"svst1b_vnum[_{d}]", "vPEld", "UsUiUl", [IsStore], MemEltTyInt8, "aarch64_sve_st1">;
-def SVST1H_VNUM_S : MInst<"svst1h_vnum[_{d}]", "vPBld", "il", [IsStore], MemEltTyInt16, "aarch64_sve_st1">;
-def SVST1H_VNUM_U : MInst<"svst1h_vnum[_{d}]", "vPFld", "UiUl", [IsStore], MemEltTyInt16, "aarch64_sve_st1">;
-def SVST1W_VNUM_S : MInst<"svst1w_vnum[_{d}]", "vPCld", "l", [IsStore], MemEltTyInt32, "aarch64_sve_st1">;
-def SVST1W_VNUM_U : MInst<"svst1w_vnum[_{d}]", "vPGld", "Ul", [IsStore], MemEltTyInt32, "aarch64_sve_st1">;
+def SVST1_VNUM : MInst<"svst1_vnum[_{d}]", "vPpld", "csilUcUsUiUlhfd", [IsStore, IsStreamingCompatible], MemEltTyDefault, "aarch64_sve_st1">;
+def SVST1B_VNUM_S : MInst<"svst1b_vnum[_{d}]", "vPAld", "sil", [IsStore, IsStreamingCompatible], MemEltTyInt8, "aarch64_sve_st1">;
+def SVST1B_VNUM_U : MInst<"svst1b_vnum[_{d}]", "vPEld", "UsUiUl", [IsStore, IsStreamingCompatible], MemEltTyInt8, "aarch64_sve_st1">;
+def SVST1H_VNUM_S : MInst<"svst1h_vnum[_{d}]", "vPBld", "il", [IsStore, IsStreamingCompatible], MemEltTyInt16, "aarch64_sve_st1">;
+def SVST1H_VNUM_U : MInst<"svst1h_vnum[_{d}]", "vPFld", "UiUl", [IsStore, IsStreamingCompatible], MemEltTyInt16, "aarch64_sve_st1">;
+def SVST1W_VNUM_S : MInst<"svst1w_vnum[_{d}]", "vPCld", "l", [IsStore, IsStreamingCompatible], MemEltTyInt32, "aarch64_sve_st1">;
+def SVST1W_VNUM_U : MInst<"svst1w_vnum[_{d}]", "vPGld", "Ul", [IsStore, IsStreamingCompatible], MemEltTyInt32, "aarch64_sve_st1">;
-let ArchGuard = "defined(__ARM_FEATURE_SVE_BF16)" in {
- def SVST1_BF : MInst<"svst1[_{d}]", "vPpd", "b", [IsStore], MemEltTyDefault, "aarch64_sve_st1">;
- def SVST1_VNUM_BF : MInst<"svst1_vnum[_{d}]", "vPpld", "b", [IsStore], MemEltTyDefault, "aarch64_sve_st1">;
+let TargetGuard = "sve,bf16" in {
+ def SVST1_BF : MInst<"svst1[_{d}]", "vPpd", "b", [IsStore, IsStreamingCompatible], MemEltTyDefault, "aarch64_sve_st1">;
+ def SVST1_VNUM_BF : MInst<"svst1_vnum[_{d}]", "vPpld", "b", [IsStore, IsStreamingCompatible], MemEltTyDefault, "aarch64_sve_st1">;
}
// Store one vector (vector base)
@@ -638,9 +426,9 @@ def SVST1H_SCATTER_INDEX_S : MInst<"svst1h_scatter[_{2}base]_index[_{d}]", "v
def SVST1W_SCATTER_INDEX_S : MInst<"svst1w_scatter[_{2}base]_index[_{d}]", "vPuld", "lUl", [IsScatterStore], MemEltTyInt32, "aarch64_sve_st1_scatter_scalar_offset">;
multiclass StructStore<string name, string proto, string i> {
- def : SInst<name, proto, "csilUcUsUiUlhfd", MergeNone, i, [IsStructStore]>;
- let ArchGuard = "defined(__ARM_FEATURE_SVE_BF16)" in {
- def: SInst<name, proto, "b", MergeNone, i, [IsStructStore]>;
+ def : SInst<name, proto, "csilUcUsUiUlhfd", MergeNone, i, [IsStructStore, IsStreamingCompatible]>;
+ let TargetGuard = "sve,bf16" in {
+ def: SInst<name, proto, "b", MergeNone, i, [IsStructStore, IsStreamingCompatible]>;
}
}
// Store N vectors into N-element structure (scalar base)
@@ -654,30 +442,62 @@ defm SVST3_VNUM : StructStore<"svst3_vnum[_{d}]", "vPpl3", "aarch64_sve_st3">;
defm SVST4_VNUM : StructStore<"svst4_vnum[_{d}]", "vPpl4", "aarch64_sve_st4">;
// Store one vector, with no truncation, non-temporal (scalar base)
-def SVSTNT1 : MInst<"svstnt1[_{d}]", "vPpd", "csilUcUsUiUlhfd", [IsStore], MemEltTyDefault, "aarch64_sve_stnt1">;
+def SVSTNT1 : MInst<"svstnt1[_{d}]", "vPpd", "csilUcUsUiUlhfd", [IsStore, IsStreamingCompatible], MemEltTyDefault, "aarch64_sve_stnt1">;
// Store one vector, with no truncation, non-temporal (scalar base, VL displacement)
-def SVSTNT1_VNUM : MInst<"svstnt1_vnum[_{d}]", "vPpld", "csilUcUsUiUlhfd", [IsStore], MemEltTyDefault, "aarch64_sve_stnt1">;
+def SVSTNT1_VNUM : MInst<"svstnt1_vnum[_{d}]", "vPpld", "csilUcUsUiUlhfd", [IsStore, IsStreamingCompatible], MemEltTyDefault, "aarch64_sve_stnt1">;
-let ArchGuard = "defined(__ARM_FEATURE_SVE_BF16)" in {
- def SVSTNT1_BF : MInst<"svstnt1[_{d}]", "vPpd", "b", [IsStore], MemEltTyDefault, "aarch64_sve_stnt1">;
- def SVSTNT1_VNUM_BF : MInst<"svstnt1_vnum[_{d}]", "vPpld", "b", [IsStore], MemEltTyDefault, "aarch64_sve_stnt1">;
+let TargetGuard = "sve,bf16" in {
+ def SVSTNT1_BF : MInst<"svstnt1[_{d}]", "vPpd", "b", [IsStore, IsStreamingCompatible], MemEltTyDefault, "aarch64_sve_stnt1">;
+ def SVSTNT1_VNUM_BF : MInst<"svstnt1_vnum[_{d}]", "vPpld", "b", [IsStore, IsStreamingCompatible], MemEltTyDefault, "aarch64_sve_stnt1">;
+}
+
+let TargetGuard = "sve2p1" in {
+ // Contiguous truncating store from quadword (single vector).
+ def SVST1UWQ : MInst<"svst1wq[_{d}]", "vPcd", "iUif", [IsStore], MemEltTyInt32, "aarch64_sve_st1wq">;
+ def SVST1UWQ_VNUM : MInst<"svst1wq_vnum[_{d}]", "vPcld", "iUif", [IsStore], MemEltTyInt32, "aarch64_sve_st1wq">;
+
+ def SVST1UDQ : MInst<"svst1dq[_{d}]", "vPcd", "lUld", [IsStore], MemEltTyInt64, "aarch64_sve_st1dq">;
+ def SVST1UDQ_VNUM : MInst<"svst1dq_vnum[_{d}]", "vPcld", "lUld", [IsStore], MemEltTyInt64, "aarch64_sve_st1dq">;
+
+ // Store one vector (vector base + scalar offset)
+ def SVST1Q_SCATTER_U64BASE_OFFSET : MInst<"svst1q_scatter[_{2}base]_offset[_{d}]", "vPgld", "cUcsUsiUilUlfhdb", [IsScatterStore, IsByteIndexed], MemEltTyDefault, "aarch64_sve_st1q_scatter_scalar_offset">;
+ def SVST1Q_SCATTER_U64BASE : MInst<"svst1q_scatter[_{2}base][_{d}]", "vPgd", "cUcsUsiUilUlfhdb", [IsScatterStore, IsByteIndexed], MemEltTyDefault, "aarch64_sve_st1q_scatter_scalar_offset">;
+
+ // Store one vector (scalar base + vector offset)
+ def SVST1Q_SCATTER_U64OFFSET : MInst<"svst1q_scatter_[{3}]offset[_{d}]", "vPpgd", "cUcsUsiUilUlfhdb", [IsScatterStore, IsByteIndexed], MemEltTyDefault, "aarch64_sve_st1q_scatter_vector_offset">;
+
+ // Store N vectors into N-element structure (scalar base)
+ defm SVST2Q : StructStore<"svst2q[_{d}]", "vPc2", "aarch64_sve_st2q">;
+ defm SVST3Q : StructStore<"svst3q[_{d}]", "vPc3", "aarch64_sve_st3q">;
+ defm SVST4Q : StructStore<"svst4q[_{d}]", "vPc4", "aarch64_sve_st4q">;
+
+ // Store N vectors into N-element structure (scalar base, VL displacement)
+ defm SVST2Q_VNUM : StructStore<"svst2q_vnum[_{d}]", "vPcl2", "aarch64_sve_st2q">;
+ defm SVST3Q_VNUM : StructStore<"svst3q_vnum[_{d}]", "vPcl3", "aarch64_sve_st3q">;
+ defm SVST4Q_VNUM : StructStore<"svst4q_vnum[_{d}]", "vPcl4", "aarch64_sve_st4q">;
+
+ // Scatter store quadwords (scalar base + vector index)
+ def SVST1Q_SCATTER_INDICES_U : MInst<"svst1q_scatter_[{3}]index[_{d}]", "vPpgd", "sUsiUilUlbhfd", [IsScatterStore], MemEltTyDefault, "aarch64_sve_st1q_scatter_index">;
+
+ // Scatter store quadwords (vector base + scalar index)
+ def SVST1Q_SCATTER_INDEX_S : MInst<"svst1q_scatter[_{2}base]_index[_{d}]", "vPgld", "sUsiUilUlbhfd", [IsScatterStore], MemEltTyDefault, "aarch64_sve_st1q_scatter_scalar_offset">;
}
////////////////////////////////////////////////////////////////////////////////
// Prefetches
// Prefetch (Scalar base)
-def SVPRFB : MInst<"svprfb", "vPQJ", "c", [IsPrefetch], MemEltTyInt8, "aarch64_sve_prf">;
-def SVPRFH : MInst<"svprfh", "vPQJ", "s", [IsPrefetch], MemEltTyInt16, "aarch64_sve_prf">;
-def SVPRFW : MInst<"svprfw", "vPQJ", "i", [IsPrefetch], MemEltTyInt32, "aarch64_sve_prf">;
-def SVPRFD : MInst<"svprfd", "vPQJ", "l", [IsPrefetch], MemEltTyInt64, "aarch64_sve_prf">;
+def SVPRFB : MInst<"svprfb", "vPQJ", "c", [IsPrefetch, IsStreamingCompatible], MemEltTyInt8, "aarch64_sve_prf">;
+def SVPRFH : MInst<"svprfh", "vPQJ", "s", [IsPrefetch, IsStreamingCompatible], MemEltTyInt16, "aarch64_sve_prf">;
+def SVPRFW : MInst<"svprfw", "vPQJ", "i", [IsPrefetch, IsStreamingCompatible], MemEltTyInt32, "aarch64_sve_prf">;
+def SVPRFD : MInst<"svprfd", "vPQJ", "l", [IsPrefetch, IsStreamingCompatible], MemEltTyInt64, "aarch64_sve_prf">;
// Prefetch (Scalar base, VL displacement)
-def SVPRFB_VNUM : MInst<"svprfb_vnum", "vPQlJ", "c", [IsPrefetch], MemEltTyInt8, "aarch64_sve_prf">;
-def SVPRFH_VNUM : MInst<"svprfh_vnum", "vPQlJ", "s", [IsPrefetch], MemEltTyInt16, "aarch64_sve_prf">;
-def SVPRFW_VNUM : MInst<"svprfw_vnum", "vPQlJ", "i", [IsPrefetch], MemEltTyInt32, "aarch64_sve_prf">;
-def SVPRFD_VNUM : MInst<"svprfd_vnum", "vPQlJ", "l", [IsPrefetch], MemEltTyInt64, "aarch64_sve_prf">;
+def SVPRFB_VNUM : MInst<"svprfb_vnum", "vPQlJ", "c", [IsPrefetch, IsStreamingCompatible], MemEltTyInt8, "aarch64_sve_prf">;
+def SVPRFH_VNUM : MInst<"svprfh_vnum", "vPQlJ", "s", [IsPrefetch, IsStreamingCompatible], MemEltTyInt16, "aarch64_sve_prf">;
+def SVPRFW_VNUM : MInst<"svprfw_vnum", "vPQlJ", "i", [IsPrefetch, IsStreamingCompatible], MemEltTyInt32, "aarch64_sve_prf">;
+def SVPRFD_VNUM : MInst<"svprfd_vnum", "vPQlJ", "l", [IsPrefetch, IsStreamingCompatible], MemEltTyInt64, "aarch64_sve_prf">;
// Prefetch (Vector bases)
def SVPRFB_GATHER_BASES : MInst<"svprfb_gather[_{2}base]", "vPdJ", "UiUl", [IsGatherPrefetch], MemEltTyInt8, "aarch64_sve_prfb_gather_scalar_offset">;
@@ -723,18 +543,18 @@ def SVADRD : SInst<"svadrd[_{0}base]_[{2}]index", "uud", "ilUiUl", MergeNone, "
////////////////////////////////////////////////////////////////////////////////
// Scalar to vector
-def SVDUPQ_8 : SInst<"svdupq[_n]_{d}", "dssssssssssssssss", "cUc", MergeNone>;
-def SVDUPQ_16 : SInst<"svdupq[_n]_{d}", "dssssssss", "sUsh", MergeNone>;
-let ArchGuard = "defined(__ARM_FEATURE_SVE_BF16)" in {
- def SVDUPQ_BF16 : SInst<"svdupq[_n]_{d}", "dssssssss", "b", MergeNone>;
+def SVDUPQ_8 : SInst<"svdupq[_n]_{d}", "dssssssssssssssss", "cUc", MergeNone, "", [IsStreamingCompatible]>;
+def SVDUPQ_16 : SInst<"svdupq[_n]_{d}", "dssssssss", "sUsh", MergeNone, "", [IsStreamingCompatible]>;
+let TargetGuard = "sve,bf16" in {
+ def SVDUPQ_BF16 : SInst<"svdupq[_n]_{d}", "dssssssss", "b", MergeNone, "", [IsStreamingCompatible]>;
}
-def SVDUPQ_32 : SInst<"svdupq[_n]_{d}", "dssss", "iUif", MergeNone>;
-def SVDUPQ_64 : SInst<"svdupq[_n]_{d}", "dss", "lUld", MergeNone>;
+def SVDUPQ_32 : SInst<"svdupq[_n]_{d}", "dssss", "iUif", MergeNone, "", [IsStreamingCompatible]>;
+def SVDUPQ_64 : SInst<"svdupq[_n]_{d}", "dss", "lUld", MergeNone, "", [IsStreamingCompatible]>;
multiclass svdup_base<string n, string p, MergeType mt, string i> {
- def NAME : SInst<n, p, "csilUcUsUiUlhfd", mt, i>;
- let ArchGuard = "defined(__ARM_FEATURE_SVE_BF16)" in {
- def _BF16: SInst<n, p, "b", mt, i>;
+ def NAME : SInst<n, p, "csilUcUsUiUlhfd", mt, i, [IsStreamingCompatible]>;
+ let TargetGuard = "sve,bf16" in {
+ def _BF16: SInst<n, p, "b", mt, i, [IsStreamingCompatible]>;
}
}
@@ -743,14 +563,14 @@ defm SVDUP_M : svdup_base<"svdup[_n]_{d}", "ddPs", MergeOp1, "aarch64_sve_du
defm SVDUP_X : svdup_base<"svdup[_n]_{d}", "dPs", MergeAnyExp, "aarch64_sve_dup">;
defm SVDUP_Z : svdup_base<"svdup[_n]_{d}", "dPs", MergeZeroExp, "aarch64_sve_dup">;
-def SVINDEX : SInst<"svindex_{d}", "dss", "csilUcUsUiUl", MergeNone, "aarch64_sve_index">;
+def SVINDEX : SInst<"svindex_{d}", "dss", "csilUcUsUiUl", MergeNone, "aarch64_sve_index", [IsStreamingCompatible]>;
// Integer arithmetic
-multiclass SInstZPZ<string name, string types, string intrinsic, list<FlagType> flags=[]> {
- def _M : SInst<name # "[_{d}]", "ddPd", types, MergeOp1, intrinsic, flags>;
- def _X : SInst<name # "[_{d}]", "dPd", types, MergeAnyExp, intrinsic, flags>;
- def _Z : SInst<name # "[_{d}]", "dPd", types, MergeZeroExp, intrinsic, flags>;
+multiclass SInstZPZ<string name, string types, string intrinsic> {
+ def _M : SInst<name # "[_{d}]", "ddPd", types, MergeOp1, intrinsic, [IsStreamingCompatible]>;
+ def _X : SInst<name # "[_{d}]", "dPd", types, MergeAnyExp, intrinsic, [IsStreamingCompatible]>;
+ def _Z : SInst<name # "[_{d}]", "dPd", types, MergeZeroExp, intrinsic, [IsStreamingCompatible]>;
}
defm SVABS : SInstZPZ<"svabs", "csil", "aarch64_sve_abs">;
@@ -758,76 +578,76 @@ defm SVNEG : SInstZPZ<"svneg", "csil", "aarch64_sve_neg">;
//------------------------------------------------------------------------------
-multiclass SInstZPZZ<string name, string types, string intrinsic, list<FlagType> flags=[]> {
- def _M : SInst<name # "[_{d}]", "dPdd", types, MergeOp1, intrinsic, flags>;
- def _X : SInst<name # "[_{d}]", "dPdd", types, MergeAny, intrinsic, flags>;
- def _Z : SInst<name # "[_{d}]", "dPdd", types, MergeZero, intrinsic, flags>;
-
- def _N_M : SInst<name # "[_n_{d}]", "dPda", types, MergeOp1, intrinsic, flags>;
- def _N_X : SInst<name # "[_n_{d}]", "dPda", types, MergeAny, intrinsic, flags>;
- def _N_Z : SInst<name # "[_n_{d}]", "dPda", types, MergeZero, intrinsic, flags>;
-}
-
-defm SVABD_S : SInstZPZZ<"svabd", "csil", "aarch64_sve_sabd">;
-defm SVABD_U : SInstZPZZ<"svabd", "UcUsUiUl", "aarch64_sve_uabd">;
-defm SVADD : SInstZPZZ<"svadd", "csilUcUsUiUl", "aarch64_sve_add">;
-defm SVDIV_S : SInstZPZZ<"svdiv", "il", "aarch64_sve_sdiv">;
-defm SVDIV_U : SInstZPZZ<"svdiv", "UiUl", "aarch64_sve_udiv">;
-defm SVDIVR_S : SInstZPZZ<"svdivr", "il", "aarch64_sve_sdivr">;
-defm SVDIVR_U : SInstZPZZ<"svdivr", "UiUl", "aarch64_sve_udivr">;
-defm SVMAX_S : SInstZPZZ<"svmax", "csil", "aarch64_sve_smax">;
-defm SVMAX_U : SInstZPZZ<"svmax", "UcUsUiUl", "aarch64_sve_umax">;
-defm SVMIN_S : SInstZPZZ<"svmin", "csil", "aarch64_sve_smin">;
-defm SVMIN_U : SInstZPZZ<"svmin", "UcUsUiUl", "aarch64_sve_umin">;
-defm SVMUL : SInstZPZZ<"svmul", "csilUcUsUiUl", "aarch64_sve_mul">;
-defm SVMULH_S : SInstZPZZ<"svmulh", "csil", "aarch64_sve_smulh">;
-defm SVMULH_U : SInstZPZZ<"svmulh", "UcUsUiUl", "aarch64_sve_umulh">;
-defm SVSUB : SInstZPZZ<"svsub", "csilUcUsUiUl", "aarch64_sve_sub">;
-defm SVSUBR : SInstZPZZ<"svsubr", "csilUcUsUiUl", "aarch64_sve_subr">;
+multiclass SInstZPZZ<string name, string types, string m_intrinsic, string x_intrinsic, list<FlagType> flags=[]> {
+ def _M : SInst<name # "[_{d}]", "dPdd", types, MergeOp1, m_intrinsic, !listconcat(flags, [IsStreamingCompatible])>;
+ def _X : SInst<name # "[_{d}]", "dPdd", types, MergeAny, x_intrinsic, !listconcat(flags, [IsStreamingCompatible])>;
+ def _Z : SInst<name # "[_{d}]", "dPdd", types, MergeZero, m_intrinsic, !listconcat(flags, [IsStreamingCompatible])>;
+
+ def _N_M : SInst<name # "[_n_{d}]", "dPda", types, MergeOp1, m_intrinsic, !listconcat(flags, [IsStreamingCompatible])>;
+ def _N_X : SInst<name # "[_n_{d}]", "dPda", types, MergeAny, x_intrinsic, !listconcat(flags, [IsStreamingCompatible])>;
+ def _N_Z : SInst<name # "[_n_{d}]", "dPda", types, MergeZero, m_intrinsic, !listconcat(flags, [IsStreamingCompatible])>;
+}
+
+defm SVABD_S : SInstZPZZ<"svabd", "csil", "aarch64_sve_sabd", "aarch64_sve_sabd_u">;
+defm SVABD_U : SInstZPZZ<"svabd", "UcUsUiUl", "aarch64_sve_uabd", "aarch64_sve_uabd_u">;
+defm SVADD : SInstZPZZ<"svadd", "csilUcUsUiUl", "aarch64_sve_add", "aarch64_sve_add_u">;
+defm SVDIV_S : SInstZPZZ<"svdiv", "il", "aarch64_sve_sdiv", "aarch64_sve_sdiv_u">;
+defm SVDIV_U : SInstZPZZ<"svdiv", "UiUl", "aarch64_sve_udiv", "aarch64_sve_udiv_u">;
+defm SVDIVR_S : SInstZPZZ<"svdivr", "il", "aarch64_sve_sdivr", "aarch64_sve_sdiv_u", [ReverseMergeAnyBinOp]>;
+defm SVDIVR_U : SInstZPZZ<"svdivr", "UiUl", "aarch64_sve_udivr", "aarch64_sve_udiv_u", [ReverseMergeAnyBinOp]>;
+defm SVMAX_S : SInstZPZZ<"svmax", "csil", "aarch64_sve_smax", "aarch64_sve_smax_u">;
+defm SVMAX_U : SInstZPZZ<"svmax", "UcUsUiUl", "aarch64_sve_umax", "aarch64_sve_umax_u">;
+defm SVMIN_S : SInstZPZZ<"svmin", "csil", "aarch64_sve_smin", "aarch64_sve_smin_u">;
+defm SVMIN_U : SInstZPZZ<"svmin", "UcUsUiUl", "aarch64_sve_umin", "aarch64_sve_umin_u">;
+defm SVMUL : SInstZPZZ<"svmul", "csilUcUsUiUl", "aarch64_sve_mul", "aarch64_sve_mul_u">;
+defm SVMULH_S : SInstZPZZ<"svmulh", "csil", "aarch64_sve_smulh", "aarch64_sve_smulh_u">;
+defm SVMULH_U : SInstZPZZ<"svmulh", "UcUsUiUl", "aarch64_sve_umulh", "aarch64_sve_umulh_u">;
+defm SVSUB : SInstZPZZ<"svsub", "csilUcUsUiUl", "aarch64_sve_sub", "aarch64_sve_sub_u">;
+defm SVSUBR : SInstZPZZ<"svsubr", "csilUcUsUiUl", "aarch64_sve_subr", "aarch64_sve_sub_u", [ReverseMergeAnyBinOp]>;
//------------------------------------------------------------------------------
-multiclass SInstZPZZZ<string name, string types, string intrinsic, list<FlagType> flags=[]> {
- def _M : SInst<name # "[_{d}]", "dPddd", types, MergeOp1, intrinsic, flags>;
- def _X : SInst<name # "[_{d}]", "dPddd", types, MergeAny, intrinsic, flags>;
- def _Z : SInst<name # "[_{d}]", "dPddd", types, MergeZero, intrinsic, flags>;
+multiclass SInstZPZZZ<string name, string types, string m_intrinsic, string x_intrinsic, list<FlagType> flags=[]> {
+ def _M : SInst<name # "[_{d}]", "dPddd", types, MergeOp1, m_intrinsic, flags>;
+ def _X : SInst<name # "[_{d}]", "dPddd", types, MergeAny, x_intrinsic, flags>;
+ def _Z : SInst<name # "[_{d}]", "dPddd", types, MergeZero, m_intrinsic, flags>;
- def _N_M : SInst<name # "[_n_{d}]", "dPdda", types, MergeOp1, intrinsic, flags>;
- def _N_X : SInst<name # "[_n_{d}]", "dPdda", types, MergeAny, intrinsic, flags>;
- def _N_Z : SInst<name # "[_n_{d}]", "dPdda", types, MergeZero, intrinsic, flags>;
+ def _N_M : SInst<name # "[_n_{d}]", "dPdda", types, MergeOp1, m_intrinsic, flags>;
+ def _N_X : SInst<name # "[_n_{d}]", "dPdda", types, MergeAny, x_intrinsic, flags>;
+ def _N_Z : SInst<name # "[_n_{d}]", "dPdda", types, MergeZero, m_intrinsic, flags>;
}
-defm SVMAD : SInstZPZZZ<"svmad", "csilUcUsUiUl", "aarch64_sve_mad">;
-defm SVMLA : SInstZPZZZ<"svmla", "csilUcUsUiUl", "aarch64_sve_mla">;
-defm SVMLS : SInstZPZZZ<"svmls", "csilUcUsUiUl", "aarch64_sve_mls">;
-defm SVMSB : SInstZPZZZ<"svmsb", "csilUcUsUiUl", "aarch64_sve_msb">;
+defm SVMAD : SInstZPZZZ<"svmad", "csilUcUsUiUl", "aarch64_sve_mad", "aarch64_sve_mla_u", [ReverseMergeAnyAccOp, IsStreamingCompatible]>;
+defm SVMLA : SInstZPZZZ<"svmla", "csilUcUsUiUl", "aarch64_sve_mla", "aarch64_sve_mla_u", [IsStreamingCompatible]>;
+defm SVMLS : SInstZPZZZ<"svmls", "csilUcUsUiUl", "aarch64_sve_mls", "aarch64_sve_mls_u", [IsStreamingCompatible]>;
+defm SVMSB : SInstZPZZZ<"svmsb", "csilUcUsUiUl", "aarch64_sve_msb", "aarch64_sve_mls_u", [ReverseMergeAnyAccOp, IsStreamingCompatible]>;
//------------------------------------------------------------------------------
-def SVDOT_S : SInst<"svdot[_{0}]", "ddqq", "il", MergeNone, "aarch64_sve_sdot">;
-def SVDOT_U : SInst<"svdot[_{0}]", "ddqq", "UiUl", MergeNone, "aarch64_sve_udot">;
-def SVQADD_S : SInst<"svqadd[_{d}]", "ddd", "csil", MergeNone, "aarch64_sve_sqadd_x">;
-def SVQADD_U : SInst<"svqadd[_{d}]", "ddd", "UcUsUiUl", MergeNone, "aarch64_sve_uqadd_x">;
-def SVQSUB_S : SInst<"svqsub[_{d}]", "ddd", "csil", MergeNone, "aarch64_sve_sqsub_x">;
-def SVQSUB_U : SInst<"svqsub[_{d}]", "ddd", "UcUsUiUl", MergeNone, "aarch64_sve_uqsub_x">;
+def SVDOT_S : SInst<"svdot[_{0}]", "ddqq", "il", MergeNone, "aarch64_sve_sdot", [IsStreamingCompatible]>;
+def SVDOT_U : SInst<"svdot[_{0}]", "ddqq", "UiUl", MergeNone, "aarch64_sve_udot", [IsStreamingCompatible]>;
+def SVQADD_S : SInst<"svqadd[_{d}]", "ddd", "csil", MergeNone, "aarch64_sve_sqadd_x", [IsStreamingCompatible]>;
+def SVQADD_U : SInst<"svqadd[_{d}]", "ddd", "UcUsUiUl", MergeNone, "aarch64_sve_uqadd_x", [IsStreamingCompatible]>;
+def SVQSUB_S : SInst<"svqsub[_{d}]", "ddd", "csil", MergeNone, "aarch64_sve_sqsub_x", [IsStreamingCompatible]>;
+def SVQSUB_U : SInst<"svqsub[_{d}]", "ddd", "UcUsUiUl", MergeNone, "aarch64_sve_uqsub_x", [IsStreamingCompatible]>;
-def SVDOT_N_S : SInst<"svdot[_n_{0}]", "ddqr", "il", MergeNone, "aarch64_sve_sdot">;
-def SVDOT_N_U : SInst<"svdot[_n_{0}]", "ddqr", "UiUl", MergeNone, "aarch64_sve_udot">;
-def SVQADD_N_S : SInst<"svqadd[_n_{d}]", "dda", "csil", MergeNone, "aarch64_sve_sqadd_x">;
-def SVQADD_N_U : SInst<"svqadd[_n_{d}]", "dda", "UcUsUiUl", MergeNone, "aarch64_sve_uqadd_x">;
-def SVQSUB_N_S : SInst<"svqsub[_n_{d}]", "dda", "csil", MergeNone, "aarch64_sve_sqsub_x">;
-def SVQSUB_N_U : SInst<"svqsub[_n_{d}]", "dda", "UcUsUiUl", MergeNone, "aarch64_sve_uqsub_x">;
+def SVDOT_N_S : SInst<"svdot[_n_{0}]", "ddqr", "il", MergeNone, "aarch64_sve_sdot", [IsStreamingCompatible]>;
+def SVDOT_N_U : SInst<"svdot[_n_{0}]", "ddqr", "UiUl", MergeNone, "aarch64_sve_udot", [IsStreamingCompatible]>;
+def SVQADD_N_S : SInst<"svqadd[_n_{d}]", "dda", "csil", MergeNone, "aarch64_sve_sqadd_x", [IsStreamingCompatible]>;
+def SVQADD_N_U : SInst<"svqadd[_n_{d}]", "dda", "UcUsUiUl", MergeNone, "aarch64_sve_uqadd_x", [IsStreamingCompatible]>;
+def SVQSUB_N_S : SInst<"svqsub[_n_{d}]", "dda", "csil", MergeNone, "aarch64_sve_sqsub_x", [IsStreamingCompatible]>;
+def SVQSUB_N_U : SInst<"svqsub[_n_{d}]", "dda", "UcUsUiUl", MergeNone, "aarch64_sve_uqsub_x", [IsStreamingCompatible]>;
-def SVDOT_LANE_S : SInst<"svdot_lane[_{d}]", "ddqqi", "il", MergeNone, "aarch64_sve_sdot_lane", [], [ImmCheck<3, ImmCheckLaneIndexDot, 2>]>;
-def SVDOT_LANE_U : SInst<"svdot_lane[_{d}]", "ddqqi", "UiUl", MergeNone, "aarch64_sve_udot_lane", [], [ImmCheck<3, ImmCheckLaneIndexDot, 2>]>;
+def SVDOT_LANE_S : SInst<"svdot_lane[_{d}]", "ddqqi", "il", MergeNone, "aarch64_sve_sdot_lane", [IsStreamingCompatible], [ImmCheck<3, ImmCheckLaneIndexDot, 2>]>;
+def SVDOT_LANE_U : SInst<"svdot_lane[_{d}]", "ddqqi", "UiUl", MergeNone, "aarch64_sve_udot_lane", [IsStreamingCompatible], [ImmCheck<3, ImmCheckLaneIndexDot, 2>]>;
////////////////////////////////////////////////////////////////////////////////
// Logical operations
-defm SVAND : SInstZPZZ<"svand", "csilUcUsUiUl", "aarch64_sve_and">;
-defm SVBIC : SInstZPZZ<"svbic", "csilUcUsUiUl", "aarch64_sve_bic">;
-defm SVEOR : SInstZPZZ<"sveor", "csilUcUsUiUl", "aarch64_sve_eor">;
-defm SVORR : SInstZPZZ<"svorr", "csilUcUsUiUl", "aarch64_sve_orr">;
+defm SVAND : SInstZPZZ<"svand", "csilUcUsUiUl", "aarch64_sve_and", "aarch64_sve_and_u">;
+defm SVBIC : SInstZPZZ<"svbic", "csilUcUsUiUl", "aarch64_sve_bic", "aarch64_sve_bic_u">;
+defm SVEOR : SInstZPZZ<"sveor", "csilUcUsUiUl", "aarch64_sve_eor", "aarch64_sve_eor_u">;
+defm SVORR : SInstZPZZ<"svorr", "csilUcUsUiUl", "aarch64_sve_orr", "aarch64_sve_orr_u">;
defm SVCNOT : SInstZPZ<"svcnot", "csilUcUsUiUl", "aarch64_sve_cnot">;
defm SVNOT : SInstZPZ<"svnot", "csilUcUsUiUl", "aarch64_sve_not">;
@@ -836,107 +656,107 @@ defm SVNOT : SInstZPZ<"svnot", "csilUcUsUiUl", "aarch64_sve_not">;
// Shifts
multiclass SInst_SHIFT<string name, string intrinsic, string ts, string wide_ts> {
- def _M : SInst<name # "[_{d}]", "dPdu", ts, MergeOp1, intrinsic>;
- def _X : SInst<name # "[_{d}]", "dPdu", ts, MergeAny, intrinsic>;
- def _Z : SInst<name # "[_{d}]", "dPdu", ts, MergeZero, intrinsic>;
+ def _M : SInst<name # "[_{d}]", "dPdu", ts, MergeOp1, intrinsic, [IsStreamingCompatible]>;
+ def _X : SInst<name # "[_{d}]", "dPdu", ts, MergeAny, intrinsic # _u, [IsStreamingCompatible]>;
+ def _Z : SInst<name # "[_{d}]", "dPdu", ts, MergeZero, intrinsic, [IsStreamingCompatible]>;
- def _N_M : SInst<name # "[_n_{d}]", "dPdL", ts, MergeOp1, intrinsic>;
- def _N_X : SInst<name # "[_n_{d}]", "dPdL", ts, MergeAny, intrinsic>;
- def _N_Z : SInst<name # "[_n_{d}]", "dPdL", ts, MergeZero, intrinsic>;
+ def _N_M : SInst<name # "[_n_{d}]", "dPdL", ts, MergeOp1, intrinsic, [IsStreamingCompatible]>;
+ def _N_X : SInst<name # "[_n_{d}]", "dPdL", ts, MergeAny, intrinsic # _u, [IsStreamingCompatible]>;
+ def _N_Z : SInst<name # "[_n_{d}]", "dPdL", ts, MergeZero, intrinsic, [IsStreamingCompatible]>;
- def _WIDE_M : SInst<name # _wide # "[_{d}]", "dPdg", wide_ts, MergeOp1, intrinsic # _wide>;
- def _WIDE_X : SInst<name # _wide # "[_{d}]", "dPdg", wide_ts, MergeAny, intrinsic # _wide>;
- def _WIDE_Z : SInst<name # _wide # "[_{d}]", "dPdg", wide_ts, MergeZero, intrinsic # _wide>;
+ def _WIDE_M : SInst<name # _wide # "[_{d}]", "dPdg", wide_ts, MergeOp1, intrinsic # _wide, [IsStreamingCompatible]>;
+ def _WIDE_X : SInst<name # _wide # "[_{d}]", "dPdg", wide_ts, MergeAny, intrinsic # _wide, [IsStreamingCompatible]>;
+ def _WIDE_Z : SInst<name # _wide # "[_{d}]", "dPdg", wide_ts, MergeZero, intrinsic # _wide, [IsStreamingCompatible]>;
- def _WIDE_N_M : SInst<name # _wide # "[_n_{d}]", "dPdf", wide_ts, MergeOp1, intrinsic # _wide>;
- def _WIDE_N_X : SInst<name # _wide # "[_n_{d}]", "dPdf", wide_ts, MergeAny, intrinsic # _wide>;
- def _WIDE_N_Z : SInst<name # _wide # "[_n_{d}]", "dPdf", wide_ts, MergeZero, intrinsic # _wide>;
+ def _WIDE_N_M : SInst<name # _wide # "[_n_{d}]", "dPdf", wide_ts, MergeOp1, intrinsic # _wide, [IsStreamingCompatible]>;
+ def _WIDE_N_X : SInst<name # _wide # "[_n_{d}]", "dPdf", wide_ts, MergeAny, intrinsic # _wide, [IsStreamingCompatible]>;
+ def _WIDE_N_Z : SInst<name # _wide # "[_n_{d}]", "dPdf", wide_ts, MergeZero, intrinsic # _wide, [IsStreamingCompatible]>;
}
defm SVASR : SInst_SHIFT<"svasr", "aarch64_sve_asr", "csil", "csi">;
defm SVLSL : SInst_SHIFT<"svlsl", "aarch64_sve_lsl", "csilUcUsUiUl", "csiUcUsUi">;
defm SVLSR : SInst_SHIFT<"svlsr", "aarch64_sve_lsr", "UcUsUiUl", "UcUsUi">;
-def SVASRD_M : SInst<"svasrd[_n_{d}]", "dPdi", "csil", MergeOp1, "aarch64_sve_asrd", [], [ImmCheck<2, ImmCheckShiftRight, 1>]>;
-def SVASRD_X : SInst<"svasrd[_n_{d}]", "dPdi", "csil", MergeAny, "aarch64_sve_asrd", [], [ImmCheck<2, ImmCheckShiftRight, 1>]>;
-def SVASRD_Z : SInst<"svasrd[_n_{d}]", "dPdi", "csil", MergeZero, "aarch64_sve_asrd", [], [ImmCheck<2, ImmCheckShiftRight, 1>]>;
+def SVASRD_M : SInst<"svasrd[_n_{d}]", "dPdi", "csil", MergeOp1, "aarch64_sve_asrd", [IsStreamingCompatible], [ImmCheck<2, ImmCheckShiftRight, 1>]>;
+def SVASRD_X : SInst<"svasrd[_n_{d}]", "dPdi", "csil", MergeAny, "aarch64_sve_asrd", [IsStreamingCompatible], [ImmCheck<2, ImmCheckShiftRight, 1>]>;
+def SVASRD_Z : SInst<"svasrd[_n_{d}]", "dPdi", "csil", MergeZero, "aarch64_sve_asrd", [IsStreamingCompatible], [ImmCheck<2, ImmCheckShiftRight, 1>]>;
-def SVINSR : SInst<"svinsr[_n_{d}]", "dds", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_insr">;
-let ArchGuard = "defined(__ARM_FEATURE_SVE_BF16)" in {
- def SVINSR_BF16 : SInst<"svinsr[_n_{d}]", "dds", "b", MergeNone, "aarch64_sve_insr">;
+def SVINSR : SInst<"svinsr[_n_{d}]", "dds", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_insr", [IsStreamingCompatible]>;
+let TargetGuard = "sve,bf16" in {
+ def SVINSR_BF16 : SInst<"svinsr[_n_{d}]", "dds", "b", MergeNone, "aarch64_sve_insr", [IsStreamingCompatible]>;
}
////////////////////////////////////////////////////////////////////////////////
// Integer reductions
-def SVADDV_S : SInst<"svaddv[_{d}]", "lPd", "csil", MergeNone, "aarch64_sve_saddv">;
-def SVADDV_U : SInst<"svaddv[_{d}]", "nPd", "UcUsUiUl", MergeNone, "aarch64_sve_uaddv">;
-def SVANDV : SInst<"svandv[_{d}]", "sPd", "csilUcUsUiUl", MergeNone, "aarch64_sve_andv">;
-def SVEORV : SInst<"sveorv[_{d}]", "sPd", "csilUcUsUiUl", MergeNone, "aarch64_sve_eorv">;
-def SVMAXV_S : SInst<"svmaxv[_{d}]", "sPd", "csil", MergeNone, "aarch64_sve_smaxv">;
-def SVMAXV_U : SInst<"svmaxv[_{d}]", "sPd", "UcUsUiUl", MergeNone, "aarch64_sve_umaxv">;
-def SVMINV_S : SInst<"svminv[_{d}]", "sPd", "csil", MergeNone, "aarch64_sve_sminv">;
-def SVMINV_U : SInst<"svminv[_{d}]", "sPd", "UcUsUiUl", MergeNone, "aarch64_sve_uminv">;
-def SVORV : SInst<"svorv[_{d}]", "sPd", "csilUcUsUiUl", MergeNone, "aarch64_sve_orv">;
+def SVADDV_S : SInst<"svaddv[_{d}]", "lPd", "csil", MergeNone, "aarch64_sve_saddv", [IsStreamingCompatible]>;
+def SVADDV_U : SInst<"svaddv[_{d}]", "nPd", "UcUsUiUl", MergeNone, "aarch64_sve_uaddv", [IsStreamingCompatible]>;
+def SVANDV : SInst<"svandv[_{d}]", "sPd", "csilUcUsUiUl", MergeNone, "aarch64_sve_andv", [IsStreamingCompatible]>;
+def SVEORV : SInst<"sveorv[_{d}]", "sPd", "csilUcUsUiUl", MergeNone, "aarch64_sve_eorv", [IsStreamingCompatible]>;
+def SVMAXV_S : SInst<"svmaxv[_{d}]", "sPd", "csil", MergeNone, "aarch64_sve_smaxv", [IsStreamingCompatible]>;
+def SVMAXV_U : SInst<"svmaxv[_{d}]", "sPd", "UcUsUiUl", MergeNone, "aarch64_sve_umaxv", [IsStreamingCompatible]>;
+def SVMINV_S : SInst<"svminv[_{d}]", "sPd", "csil", MergeNone, "aarch64_sve_sminv", [IsStreamingCompatible]>;
+def SVMINV_U : SInst<"svminv[_{d}]", "sPd", "UcUsUiUl", MergeNone, "aarch64_sve_uminv", [IsStreamingCompatible]>;
+def SVORV : SInst<"svorv[_{d}]", "sPd", "csilUcUsUiUl", MergeNone, "aarch64_sve_orv", [IsStreamingCompatible]>;
////////////////////////////////////////////////////////////////////////////////
// Integer comparisons
-def SVCMPEQ : SInst<"svcmpeq[_{d}]", "PPdd", "csilUcUsUiUl", MergeNone, "aarch64_sve_cmpeq">;
-def SVCMPNE : SInst<"svcmpne[_{d}]", "PPdd", "csilUcUsUiUl", MergeNone, "aarch64_sve_cmpne">;
-def SVCMPGE : SInst<"svcmpge[_{d}]", "PPdd", "csil", MergeNone, "aarch64_sve_cmpge">;
-def SVCMPGT : SInst<"svcmpgt[_{d}]", "PPdd", "csil", MergeNone, "aarch64_sve_cmpgt">;
-def SVCMPLE : SInst<"svcmple[_{d}]", "PPdd", "csil", MergeNone, "aarch64_sve_cmpge", [ReverseCompare]>;
-def SVCMPLT : SInst<"svcmplt[_{d}]", "PPdd", "csil", MergeNone, "aarch64_sve_cmpgt", [ReverseCompare]>;
-def SVCMPHI : SInst<"svcmpgt[_{d}]", "PPdd", "UcUsUiUl", MergeNone, "aarch64_sve_cmphi">;
-def SVCMPHS : SInst<"svcmpge[_{d}]", "PPdd", "UcUsUiUl", MergeNone, "aarch64_sve_cmphs">;
-def SVCMPLO : SInst<"svcmplt[_{d}]", "PPdd", "UcUsUiUl", MergeNone, "aarch64_sve_cmphi", [ReverseCompare]>;
-def SVCMPLS : SInst<"svcmple[_{d}]", "PPdd", "UcUsUiUl", MergeNone, "aarch64_sve_cmphs", [ReverseCompare]>;
-
-def SVCMPEQ_N : SInst<"svcmpeq[_n_{d}]", "PPda", "csilUcUsUiUl", MergeNone, "aarch64_sve_cmpeq">;
-def SVCMPNE_N : SInst<"svcmpne[_n_{d}]", "PPda", "csilUcUsUiUl", MergeNone, "aarch64_sve_cmpne">;
-def SVCMPGE_N : SInst<"svcmpge[_n_{d}]", "PPda", "csil", MergeNone, "aarch64_sve_cmpge">;
-def SVCMPGT_N : SInst<"svcmpgt[_n_{d}]", "PPda", "csil", MergeNone, "aarch64_sve_cmpgt">;
-def SVCMPLE_N : SInst<"svcmple[_n_{d}]", "PPda", "csil", MergeNone, "aarch64_sve_cmpge", [ReverseCompare]>;
-def SVCMPLT_N : SInst<"svcmplt[_n_{d}]", "PPda", "csil", MergeNone, "aarch64_sve_cmpgt", [ReverseCompare]>;
-def SVCMPHS_N : SInst<"svcmpge[_n_{d}]", "PPda", "UcUsUiUl", MergeNone, "aarch64_sve_cmphs">;
-def SVCMPHI_N : SInst<"svcmpgt[_n_{d}]", "PPda", "UcUsUiUl", MergeNone, "aarch64_sve_cmphi">;
-def SVCMPLS_N : SInst<"svcmple[_n_{d}]", "PPda", "UcUsUiUl", MergeNone, "aarch64_sve_cmphs", [ReverseCompare]>;
-def SVCMPLO_N : SInst<"svcmplt[_n_{d}]", "PPda", "UcUsUiUl", MergeNone, "aarch64_sve_cmphi", [ReverseCompare]>;
-
-def SVCMPEQ_WIDE : SInst<"svcmpeq_wide[_{d}]", "PPdw", "csi", MergeNone, "aarch64_sve_cmpeq_wide">;
-def SVCMPNE_WIDE : SInst<"svcmpne_wide[_{d}]", "PPdw", "csi", MergeNone, "aarch64_sve_cmpne_wide">;
-def SVCMPGE_WIDE : SInst<"svcmpge_wide[_{d}]", "PPdw", "csi", MergeNone, "aarch64_sve_cmpge_wide">;
-def SVCMPGT_WIDE : SInst<"svcmpgt_wide[_{d}]", "PPdw", "csi", MergeNone, "aarch64_sve_cmpgt_wide">;
-def SVCMPLE_WIDE : SInst<"svcmple_wide[_{d}]", "PPdw", "csi", MergeNone, "aarch64_sve_cmple_wide">;
-def SVCMPLT_WIDE : SInst<"svcmplt_wide[_{d}]", "PPdw", "csi", MergeNone, "aarch64_sve_cmplt_wide">;
-def SVCMPHI_WIDE : SInst<"svcmpgt_wide[_{d}]", "PPdw", "UcUsUi", MergeNone, "aarch64_sve_cmphi_wide">;
-def SVCMPHS_WIDE : SInst<"svcmpge_wide[_{d}]", "PPdw", "UcUsUi", MergeNone, "aarch64_sve_cmphs_wide">;
-def SVCMPLO_WIDE : SInst<"svcmplt_wide[_{d}]", "PPdw", "UcUsUi", MergeNone, "aarch64_sve_cmplo_wide">;
-def SVCMPLS_WIDE : SInst<"svcmple_wide[_{d}]", "PPdw", "UcUsUi", MergeNone, "aarch64_sve_cmpls_wide">;
-
-def SVCMPEQ_WIDE_N : SInst<"svcmpeq_wide[_n_{d}]", "PPdj", "csi", MergeNone, "aarch64_sve_cmpeq_wide">;
-def SVCMPNE_WIDE_N : SInst<"svcmpne_wide[_n_{d}]", "PPdj", "csi", MergeNone, "aarch64_sve_cmpne_wide">;
-def SVCMPGE_WIDE_N : SInst<"svcmpge_wide[_n_{d}]", "PPdj", "csi", MergeNone, "aarch64_sve_cmpge_wide">;
-def SVCMPGT_WIDE_N : SInst<"svcmpgt_wide[_n_{d}]", "PPdj", "csi", MergeNone, "aarch64_sve_cmpgt_wide">;
-def SVCMPLE_WIDE_N : SInst<"svcmple_wide[_n_{d}]", "PPdj", "csi", MergeNone, "aarch64_sve_cmple_wide">;
-def SVCMPLT_WIDE_N : SInst<"svcmplt_wide[_n_{d}]", "PPdj", "csi", MergeNone, "aarch64_sve_cmplt_wide">;
-def SVCMPHS_WIDE_N : SInst<"svcmpge_wide[_n_{d}]", "PPdj", "UcUsUi", MergeNone, "aarch64_sve_cmphs_wide">;
-def SVCMPHI_WIDE_N : SInst<"svcmpgt_wide[_n_{d}]", "PPdj", "UcUsUi", MergeNone, "aarch64_sve_cmphi_wide">;
-def SVCMPLO_WIDE_N : SInst<"svcmplt_wide[_n_{d}]", "PPdj", "UcUsUi", MergeNone, "aarch64_sve_cmplo_wide">;
-def SVCMPLS_WIDE_N : SInst<"svcmple_wide[_n_{d}]", "PPdj", "UcUsUi", MergeNone, "aarch64_sve_cmpls_wide">;
+def SVCMPEQ : SInst<"svcmpeq[_{d}]", "PPdd", "csilUcUsUiUl", MergeNone, "aarch64_sve_cmpeq", [IsStreamingCompatible]>;
+def SVCMPNE : SInst<"svcmpne[_{d}]", "PPdd", "csilUcUsUiUl", MergeNone, "aarch64_sve_cmpne", [IsStreamingCompatible]>;
+def SVCMPGE : SInst<"svcmpge[_{d}]", "PPdd", "csil", MergeNone, "aarch64_sve_cmpge", [IsStreamingCompatible]>;
+def SVCMPGT : SInst<"svcmpgt[_{d}]", "PPdd", "csil", MergeNone, "aarch64_sve_cmpgt", [IsStreamingCompatible]>;
+def SVCMPLE : SInst<"svcmple[_{d}]", "PPdd", "csil", MergeNone, "aarch64_sve_cmpge", [ReverseCompare, IsStreamingCompatible]>;
+def SVCMPLT : SInst<"svcmplt[_{d}]", "PPdd", "csil", MergeNone, "aarch64_sve_cmpgt", [ReverseCompare, IsStreamingCompatible]>;
+def SVCMPHI : SInst<"svcmpgt[_{d}]", "PPdd", "UcUsUiUl", MergeNone, "aarch64_sve_cmphi", [IsStreamingCompatible]>;
+def SVCMPHS : SInst<"svcmpge[_{d}]", "PPdd", "UcUsUiUl", MergeNone, "aarch64_sve_cmphs", [IsStreamingCompatible]>;
+def SVCMPLO : SInst<"svcmplt[_{d}]", "PPdd", "UcUsUiUl", MergeNone, "aarch64_sve_cmphi", [ReverseCompare, IsStreamingCompatible]>;
+def SVCMPLS : SInst<"svcmple[_{d}]", "PPdd", "UcUsUiUl", MergeNone, "aarch64_sve_cmphs", [ReverseCompare, IsStreamingCompatible]>;
+
+def SVCMPEQ_N : SInst<"svcmpeq[_n_{d}]", "PPda", "csilUcUsUiUl", MergeNone, "aarch64_sve_cmpeq", [IsStreamingCompatible]>;
+def SVCMPNE_N : SInst<"svcmpne[_n_{d}]", "PPda", "csilUcUsUiUl", MergeNone, "aarch64_sve_cmpne", [IsStreamingCompatible]>;
+def SVCMPGE_N : SInst<"svcmpge[_n_{d}]", "PPda", "csil", MergeNone, "aarch64_sve_cmpge", [IsStreamingCompatible]>;
+def SVCMPGT_N : SInst<"svcmpgt[_n_{d}]", "PPda", "csil", MergeNone, "aarch64_sve_cmpgt", [IsStreamingCompatible]>;
+def SVCMPLE_N : SInst<"svcmple[_n_{d}]", "PPda", "csil", MergeNone, "aarch64_sve_cmpge", [ReverseCompare, IsStreamingCompatible]>;
+def SVCMPLT_N : SInst<"svcmplt[_n_{d}]", "PPda", "csil", MergeNone, "aarch64_sve_cmpgt", [ReverseCompare, IsStreamingCompatible]>;
+def SVCMPHS_N : SInst<"svcmpge[_n_{d}]", "PPda", "UcUsUiUl", MergeNone, "aarch64_sve_cmphs", [IsStreamingCompatible]>;
+def SVCMPHI_N : SInst<"svcmpgt[_n_{d}]", "PPda", "UcUsUiUl", MergeNone, "aarch64_sve_cmphi", [IsStreamingCompatible]>;
+def SVCMPLS_N : SInst<"svcmple[_n_{d}]", "PPda", "UcUsUiUl", MergeNone, "aarch64_sve_cmphs", [ReverseCompare, IsStreamingCompatible]>;
+def SVCMPLO_N : SInst<"svcmplt[_n_{d}]", "PPda", "UcUsUiUl", MergeNone, "aarch64_sve_cmphi", [ReverseCompare, IsStreamingCompatible]>;
+
+def SVCMPEQ_WIDE : SInst<"svcmpeq_wide[_{d}]", "PPdw", "csi", MergeNone, "aarch64_sve_cmpeq_wide", [IsStreamingCompatible]>;
+def SVCMPNE_WIDE : SInst<"svcmpne_wide[_{d}]", "PPdw", "csi", MergeNone, "aarch64_sve_cmpne_wide", [IsStreamingCompatible]>;
+def SVCMPGE_WIDE : SInst<"svcmpge_wide[_{d}]", "PPdw", "csi", MergeNone, "aarch64_sve_cmpge_wide", [IsStreamingCompatible]>;
+def SVCMPGT_WIDE : SInst<"svcmpgt_wide[_{d}]", "PPdw", "csi", MergeNone, "aarch64_sve_cmpgt_wide", [IsStreamingCompatible]>;
+def SVCMPLE_WIDE : SInst<"svcmple_wide[_{d}]", "PPdw", "csi", MergeNone, "aarch64_sve_cmple_wide", [IsStreamingCompatible]>;
+def SVCMPLT_WIDE : SInst<"svcmplt_wide[_{d}]", "PPdw", "csi", MergeNone, "aarch64_sve_cmplt_wide", [IsStreamingCompatible]>;
+def SVCMPHI_WIDE : SInst<"svcmpgt_wide[_{d}]", "PPdw", "UcUsUi", MergeNone, "aarch64_sve_cmphi_wide", [IsStreamingCompatible]>;
+def SVCMPHS_WIDE : SInst<"svcmpge_wide[_{d}]", "PPdw", "UcUsUi", MergeNone, "aarch64_sve_cmphs_wide", [IsStreamingCompatible]>;
+def SVCMPLO_WIDE : SInst<"svcmplt_wide[_{d}]", "PPdw", "UcUsUi", MergeNone, "aarch64_sve_cmplo_wide", [IsStreamingCompatible]>;
+def SVCMPLS_WIDE : SInst<"svcmple_wide[_{d}]", "PPdw", "UcUsUi", MergeNone, "aarch64_sve_cmpls_wide", [IsStreamingCompatible]>;
+
+def SVCMPEQ_WIDE_N : SInst<"svcmpeq_wide[_n_{d}]", "PPdj", "csi", MergeNone, "aarch64_sve_cmpeq_wide", [IsStreamingCompatible]>;
+def SVCMPNE_WIDE_N : SInst<"svcmpne_wide[_n_{d}]", "PPdj", "csi", MergeNone, "aarch64_sve_cmpne_wide", [IsStreamingCompatible]>;
+def SVCMPGE_WIDE_N : SInst<"svcmpge_wide[_n_{d}]", "PPdj", "csi", MergeNone, "aarch64_sve_cmpge_wide", [IsStreamingCompatible]>;
+def SVCMPGT_WIDE_N : SInst<"svcmpgt_wide[_n_{d}]", "PPdj", "csi", MergeNone, "aarch64_sve_cmpgt_wide", [IsStreamingCompatible]>;
+def SVCMPLE_WIDE_N : SInst<"svcmple_wide[_n_{d}]", "PPdj", "csi", MergeNone, "aarch64_sve_cmple_wide", [IsStreamingCompatible]>;
+def SVCMPLT_WIDE_N : SInst<"svcmplt_wide[_n_{d}]", "PPdj", "csi", MergeNone, "aarch64_sve_cmplt_wide", [IsStreamingCompatible]>;
+def SVCMPHS_WIDE_N : SInst<"svcmpge_wide[_n_{d}]", "PPdj", "UcUsUi", MergeNone, "aarch64_sve_cmphs_wide", [IsStreamingCompatible]>;
+def SVCMPHI_WIDE_N : SInst<"svcmpgt_wide[_n_{d}]", "PPdj", "UcUsUi", MergeNone, "aarch64_sve_cmphi_wide", [IsStreamingCompatible]>;
+def SVCMPLO_WIDE_N : SInst<"svcmplt_wide[_n_{d}]", "PPdj", "UcUsUi", MergeNone, "aarch64_sve_cmplo_wide", [IsStreamingCompatible]>;
+def SVCMPLS_WIDE_N : SInst<"svcmple_wide[_n_{d}]", "PPdj", "UcUsUi", MergeNone, "aarch64_sve_cmpls_wide", [IsStreamingCompatible]>;
////////////////////////////////////////////////////////////////////////////////
// While comparisons
-def SVWHILELE_S32 : SInst<"svwhilele_{d}[_{1}]", "Pkk", "PcPsPiPl", MergeNone, "aarch64_sve_whilele", [IsOverloadWhile]>;
-def SVWHILELE_S64 : SInst<"svwhilele_{d}[_{1}]", "Pll", "PcPsPiPl", MergeNone, "aarch64_sve_whilele", [IsOverloadWhile]>;
-def SVWHILELO_U32 : SInst<"svwhilelt_{d}[_{1}]", "Pmm", "PUcPUsPUiPUl", MergeNone, "aarch64_sve_whilelo", [IsOverloadWhile]>;
-def SVWHILELO_U64 : SInst<"svwhilelt_{d}[_{1}]", "Pnn", "PUcPUsPUiPUl", MergeNone, "aarch64_sve_whilelo", [IsOverloadWhile]>;
-def SVWHILELS_U32 : SInst<"svwhilele_{d}[_{1}]", "Pmm", "PUcPUsPUiPUl", MergeNone, "aarch64_sve_whilels", [IsOverloadWhile]>;
-def SVWHILELS_U64 : SInst<"svwhilele_{d}[_{1}]", "Pnn", "PUcPUsPUiPUl", MergeNone, "aarch64_sve_whilels", [IsOverloadWhile]>;
-def SVWHILELT_S32 : SInst<"svwhilelt_{d}[_{1}]", "Pkk", "PcPsPiPl", MergeNone, "aarch64_sve_whilelt", [IsOverloadWhile]>;
-def SVWHILELT_S64 : SInst<"svwhilelt_{d}[_{1}]", "Pll", "PcPsPiPl", MergeNone, "aarch64_sve_whilelt", [IsOverloadWhile]>;
+def SVWHILELE_S32 : SInst<"svwhilele_{d}[_{1}]", "Pkk", "PcPsPiPl", MergeNone, "aarch64_sve_whilele", [IsOverloadWhileOrMultiVecCvt, IsStreamingCompatible]>;
+def SVWHILELE_S64 : SInst<"svwhilele_{d}[_{1}]", "Pll", "PcPsPiPl", MergeNone, "aarch64_sve_whilele", [IsOverloadWhileOrMultiVecCvt, IsStreamingCompatible]>;
+def SVWHILELO_U32 : SInst<"svwhilelt_{d}[_{1}]", "Pmm", "PUcPUsPUiPUl", MergeNone, "aarch64_sve_whilelo", [IsOverloadWhileOrMultiVecCvt, IsStreamingCompatible]>;
+def SVWHILELO_U64 : SInst<"svwhilelt_{d}[_{1}]", "Pnn", "PUcPUsPUiPUl", MergeNone, "aarch64_sve_whilelo", [IsOverloadWhileOrMultiVecCvt, IsStreamingCompatible]>;
+def SVWHILELS_U32 : SInst<"svwhilele_{d}[_{1}]", "Pmm", "PUcPUsPUiPUl", MergeNone, "aarch64_sve_whilels", [IsOverloadWhileOrMultiVecCvt, IsStreamingCompatible]>;
+def SVWHILELS_U64 : SInst<"svwhilele_{d}[_{1}]", "Pnn", "PUcPUsPUiPUl", MergeNone, "aarch64_sve_whilels", [IsOverloadWhileOrMultiVecCvt, IsStreamingCompatible]>;
+def SVWHILELT_S32 : SInst<"svwhilelt_{d}[_{1}]", "Pkk", "PcPsPiPl", MergeNone, "aarch64_sve_whilelt", [IsOverloadWhileOrMultiVecCvt, IsStreamingCompatible]>;
+def SVWHILELT_S64 : SInst<"svwhilelt_{d}[_{1}]", "Pll", "PcPsPiPl", MergeNone, "aarch64_sve_whilelt", [IsOverloadWhileOrMultiVecCvt, IsStreamingCompatible]>;
////////////////////////////////////////////////////////////////////////////////
// Counting bit
@@ -947,12 +767,12 @@ multiclass SInstCLS<string name, string types, string intrinsic, list<FlagType>
def _Z : SInst<name # "[_{d}]", "uPd", types, MergeZeroExp, intrinsic, flags>;
}
-defm SVCLS : SInstCLS<"svcls", "csil", "aarch64_sve_cls">;
-defm SVCLZ : SInstCLS<"svclz", "csilUcUsUiUl", "aarch64_sve_clz">;
-defm SVCNT : SInstCLS<"svcnt", "csilUcUsUiUlhfd", "aarch64_sve_cnt">;
+defm SVCLS : SInstCLS<"svcls", "csil", "aarch64_sve_cls", [IsStreamingCompatible]>;
+defm SVCLZ : SInstCLS<"svclz", "csilUcUsUiUl", "aarch64_sve_clz", [IsStreamingCompatible]>;
+defm SVCNT : SInstCLS<"svcnt", "csilUcUsUiUlhfd", "aarch64_sve_cnt", [IsStreamingCompatible]>;
-let ArchGuard = "defined(__ARM_FEATURE_SVE_BF16)" in {
- defm SVCNT_BF16 : SInstCLS<"svcnt", "b", "aarch64_sve_cnt">;
+let TargetGuard = "sve,bf16" in {
+ defm SVCNT_BF16 : SInstCLS<"svcnt", "b", "aarch64_sve_cnt", [IsStreamingCompatible]>;
}
////////////////////////////////////////////////////////////////////////////////
@@ -979,18 +799,18 @@ defm SVREVW : SInstZPZ<"svrevw", "lUl", "aarch64_sve_revw">;
defm SVABS_F : SInstZPZ<"svabs", "hfd", "aarch64_sve_fabs">;
defm SVNEG_F : SInstZPZ<"svneg", "hfd", "aarch64_sve_fneg">;
-defm SVABD_F : SInstZPZZ<"svabd", "hfd", "aarch64_sve_fabd">;
-defm SVADD_F : SInstZPZZ<"svadd", "hfd", "aarch64_sve_fadd">;
-defm SVDIV_F : SInstZPZZ<"svdiv", "hfd", "aarch64_sve_fdiv">;
-defm SVDIVR_F : SInstZPZZ<"svdivr", "hfd", "aarch64_sve_fdivr">;
-defm SVMAX_F : SInstZPZZ<"svmax", "hfd", "aarch64_sve_fmax">;
-defm SVMAXNM : SInstZPZZ<"svmaxnm","hfd", "aarch64_sve_fmaxnm">;
-defm SVMIN_F : SInstZPZZ<"svmin", "hfd", "aarch64_sve_fmin">;
-defm SVMINNM : SInstZPZZ<"svminnm","hfd", "aarch64_sve_fminnm">;
-defm SVMUL_F : SInstZPZZ<"svmul", "hfd", "aarch64_sve_fmul">;
-defm SVMULX : SInstZPZZ<"svmulx", "hfd", "aarch64_sve_fmulx">;
-defm SVSUB_F : SInstZPZZ<"svsub", "hfd", "aarch64_sve_fsub">;
-defm SVSUBR_F : SInstZPZZ<"svsubr", "hfd", "aarch64_sve_fsubr">;
+defm SVABD_F : SInstZPZZ<"svabd", "hfd", "aarch64_sve_fabd", "aarch64_sve_fabd_u">;
+defm SVADD_F : SInstZPZZ<"svadd", "hfd", "aarch64_sve_fadd", "aarch64_sve_fadd_u">;
+defm SVDIV_F : SInstZPZZ<"svdiv", "hfd", "aarch64_sve_fdiv", "aarch64_sve_fdiv_u">;
+defm SVDIVR_F : SInstZPZZ<"svdivr", "hfd", "aarch64_sve_fdivr", "aarch64_sve_fdiv_u", [ReverseMergeAnyBinOp]>;
+defm SVMAX_F : SInstZPZZ<"svmax", "hfd", "aarch64_sve_fmax", "aarch64_sve_fmax_u">;
+defm SVMAXNM : SInstZPZZ<"svmaxnm","hfd", "aarch64_sve_fmaxnm", "aarch64_sve_fmaxnm_u">;
+defm SVMIN_F : SInstZPZZ<"svmin", "hfd", "aarch64_sve_fmin", "aarch64_sve_fmin_u">;
+defm SVMINNM : SInstZPZZ<"svminnm","hfd", "aarch64_sve_fminnm", "aarch64_sve_fminnm_u">;
+defm SVMUL_F : SInstZPZZ<"svmul", "hfd", "aarch64_sve_fmul", "aarch64_sve_fmul_u">;
+defm SVMULX : SInstZPZZ<"svmulx", "hfd", "aarch64_sve_fmulx", "aarch64_sve_fmulx_u">;
+defm SVSUB_F : SInstZPZZ<"svsub", "hfd", "aarch64_sve_fsub", "aarch64_sve_fsub_u">;
+defm SVSUBR_F : SInstZPZZ<"svsubr", "hfd", "aarch64_sve_fsubr", "aarch64_sve_fsub_u", [ReverseMergeAnyBinOp]>;
defm SVRECPX : SInstZPZ<"svrecpx", "hfd", "aarch64_sve_frecpx">;
defm SVRINTA : SInstZPZ<"svrinta", "hfd", "aarch64_sve_frinta">;
@@ -1007,79 +827,79 @@ def SVTMAD : SInst<"svtmad[_{d}]", "dddi", "hfd", MergeNone, "aarch64_sve_ftma
def SVTSMUL : SInst<"svtsmul[_{d}]", "ddu", "hfd", MergeNone, "aarch64_sve_ftsmul_x">;
def SVTSSEL : SInst<"svtssel[_{d}]", "ddu", "hfd", MergeNone, "aarch64_sve_ftssel_x">;
-def SVSCALE_M : SInst<"svscale[_{d}]", "dPdx", "hfd", MergeOp1, "aarch64_sve_fscale">;
-def SVSCALE_X : SInst<"svscale[_{d}]", "dPdx", "hfd", MergeAny, "aarch64_sve_fscale">;
-def SVSCALE_Z : SInst<"svscale[_{d}]", "dPdx", "hfd", MergeZero, "aarch64_sve_fscale">;
-
-def SVSCALE_N_M : SInst<"svscale[_n_{d}]", "dPdK", "hfd", MergeOp1, "aarch64_sve_fscale">;
-def SVSCALE_N_X : SInst<"svscale[_n_{d}]", "dPdK", "hfd", MergeAny, "aarch64_sve_fscale">;
-def SVSCALE_N_Z : SInst<"svscale[_n_{d}]", "dPdK", "hfd", MergeZero, "aarch64_sve_fscale">;
-
-defm SVMAD_F : SInstZPZZZ<"svmad", "hfd", "aarch64_sve_fmad">;
-defm SVMLA_F : SInstZPZZZ<"svmla", "hfd", "aarch64_sve_fmla">;
-defm SVMLS_F : SInstZPZZZ<"svmls", "hfd", "aarch64_sve_fmls">;
-defm SVMSB_F : SInstZPZZZ<"svmsb", "hfd", "aarch64_sve_fmsb">;
-defm SVNMAD_F : SInstZPZZZ<"svnmad", "hfd", "aarch64_sve_fnmad">;
-defm SVNMLA_F : SInstZPZZZ<"svnmla", "hfd", "aarch64_sve_fnmla">;
-defm SVNMLS_F : SInstZPZZZ<"svnmls", "hfd", "aarch64_sve_fnmls">;
-defm SVNMSB_F : SInstZPZZZ<"svnmsb", "hfd", "aarch64_sve_fnmsb">;
-
-def SVCADD_M : SInst<"svcadd[_{d}]", "dPddi", "hfd", MergeOp1, "aarch64_sve_fcadd", [], [ImmCheck<3, ImmCheckComplexRot90_270>]>;
-def SVCADD_X : SInst<"svcadd[_{d}]", "dPddi", "hfd", MergeAny, "aarch64_sve_fcadd", [], [ImmCheck<3, ImmCheckComplexRot90_270>]>;
-def SVCADD_Z : SInst<"svcadd[_{d}]", "dPddi", "hfd", MergeZero, "aarch64_sve_fcadd", [], [ImmCheck<3, ImmCheckComplexRot90_270>]>;
-def SVCMLA_M : SInst<"svcmla[_{d}]", "dPdddi", "hfd", MergeOp1, "aarch64_sve_fcmla", [], [ImmCheck<4, ImmCheckComplexRotAll90>]>;
-def SVCMLA_X : SInst<"svcmla[_{d}]", "dPdddi", "hfd", MergeAny, "aarch64_sve_fcmla", [], [ImmCheck<4, ImmCheckComplexRotAll90>]>;
-def SVCMLA_Z : SInst<"svcmla[_{d}]", "dPdddi", "hfd", MergeZero, "aarch64_sve_fcmla", [], [ImmCheck<4, ImmCheckComplexRotAll90>]>;
-
-def SVCMLA_LANE : SInst<"svcmla_lane[_{d}]", "ddddii", "hf", MergeNone, "aarch64_sve_fcmla_lane", [], [ImmCheck<3, ImmCheckLaneIndexCompRotate, 2>,
+def SVSCALE_M : SInst<"svscale[_{d}]", "dPdx", "hfd", MergeOp1, "aarch64_sve_fscale", [IsStreamingCompatible]>;
+def SVSCALE_X : SInst<"svscale[_{d}]", "dPdx", "hfd", MergeAny, "aarch64_sve_fscale", [IsStreamingCompatible]>;
+def SVSCALE_Z : SInst<"svscale[_{d}]", "dPdx", "hfd", MergeZero, "aarch64_sve_fscale", [IsStreamingCompatible]>;
+
+def SVSCALE_N_M : SInst<"svscale[_n_{d}]", "dPdK", "hfd", MergeOp1, "aarch64_sve_fscale", [IsStreamingCompatible]>;
+def SVSCALE_N_X : SInst<"svscale[_n_{d}]", "dPdK", "hfd", MergeAny, "aarch64_sve_fscale", [IsStreamingCompatible]>;
+def SVSCALE_N_Z : SInst<"svscale[_n_{d}]", "dPdK", "hfd", MergeZero, "aarch64_sve_fscale", [IsStreamingCompatible]>;
+
+defm SVMAD_F : SInstZPZZZ<"svmad", "hfd", "aarch64_sve_fmad", "aarch64_sve_fmla_u", [IsStreamingCompatible, ReverseMergeAnyAccOp]>;
+defm SVMLA_F : SInstZPZZZ<"svmla", "hfd", "aarch64_sve_fmla", "aarch64_sve_fmla_u", [IsStreamingCompatible]>;
+defm SVMLS_F : SInstZPZZZ<"svmls", "hfd", "aarch64_sve_fmls", "aarch64_sve_fmls_u", [IsStreamingCompatible]>;
+defm SVMSB_F : SInstZPZZZ<"svmsb", "hfd", "aarch64_sve_fmsb", "aarch64_sve_fmls_u", [IsStreamingCompatible, ReverseMergeAnyAccOp]>;
+defm SVNMAD_F : SInstZPZZZ<"svnmad", "hfd", "aarch64_sve_fnmad", "aarch64_sve_fnmla_u", [IsStreamingCompatible, ReverseMergeAnyAccOp]>;
+defm SVNMLA_F : SInstZPZZZ<"svnmla", "hfd", "aarch64_sve_fnmla", "aarch64_sve_fnmla_u", [IsStreamingCompatible]>;
+defm SVNMLS_F : SInstZPZZZ<"svnmls", "hfd", "aarch64_sve_fnmls", "aarch64_sve_fnmls_u", [IsStreamingCompatible]>;
+defm SVNMSB_F : SInstZPZZZ<"svnmsb", "hfd", "aarch64_sve_fnmsb", "aarch64_sve_fnmls_u", [IsStreamingCompatible, ReverseMergeAnyAccOp]>;
+
+def SVCADD_M : SInst<"svcadd[_{d}]", "dPddi", "hfd", MergeOp1, "aarch64_sve_fcadd", [IsStreamingCompatible], [ImmCheck<3, ImmCheckComplexRot90_270>]>;
+def SVCADD_X : SInst<"svcadd[_{d}]", "dPddi", "hfd", MergeAny, "aarch64_sve_fcadd", [IsStreamingCompatible], [ImmCheck<3, ImmCheckComplexRot90_270>]>;
+def SVCADD_Z : SInst<"svcadd[_{d}]", "dPddi", "hfd", MergeZero, "aarch64_sve_fcadd", [IsStreamingCompatible], [ImmCheck<3, ImmCheckComplexRot90_270>]>;
+def SVCMLA_M : SInst<"svcmla[_{d}]", "dPdddi", "hfd", MergeOp1, "aarch64_sve_fcmla", [IsStreamingCompatible], [ImmCheck<4, ImmCheckComplexRotAll90>]>;
+def SVCMLA_X : SInst<"svcmla[_{d}]", "dPdddi", "hfd", MergeAny, "aarch64_sve_fcmla", [IsStreamingCompatible], [ImmCheck<4, ImmCheckComplexRotAll90>]>;
+def SVCMLA_Z : SInst<"svcmla[_{d}]", "dPdddi", "hfd", MergeZero, "aarch64_sve_fcmla", [IsStreamingCompatible], [ImmCheck<4, ImmCheckComplexRotAll90>]>;
+
+def SVCMLA_LANE : SInst<"svcmla_lane[_{d}]", "ddddii", "hf", MergeNone, "aarch64_sve_fcmla_lane", [IsStreamingCompatible], [ImmCheck<3, ImmCheckLaneIndexCompRotate, 2>,
ImmCheck<4, ImmCheckComplexRotAll90>]>;
-def SVMLA_LANE : SInst<"svmla_lane[_{d}]", "ddddi", "hfd", MergeNone, "aarch64_sve_fmla_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
-def SVMLS_LANE : SInst<"svmls_lane[_{d}]", "ddddi", "hfd", MergeNone, "aarch64_sve_fmls_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
-def SVMUL_LANE : SInst<"svmul_lane[_{d}]", "dddi", "hfd", MergeNone, "aarch64_sve_fmul_lane", [], [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
+def SVMLA_LANE : SInst<"svmla_lane[_{d}]", "ddddi", "hfd", MergeNone, "aarch64_sve_fmla_lane", [IsStreamingCompatible], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVMLS_LANE : SInst<"svmls_lane[_{d}]", "ddddi", "hfd", MergeNone, "aarch64_sve_fmls_lane", [IsStreamingCompatible], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVMUL_LANE : SInst<"svmul_lane[_{d}]", "dddi", "hfd", MergeNone, "aarch64_sve_fmul_lane", [IsStreamingCompatible], [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
-def SVRECPE : SInst<"svrecpe[_{d}]", "dd", "hfd", MergeNone, "aarch64_sve_frecpe_x">;
-def SVRECPS : SInst<"svrecps[_{d}]", "ddd", "hfd", MergeNone, "aarch64_sve_frecps_x">;
-def SVRSQRTE : SInst<"svrsqrte[_{d}]", "dd", "hfd", MergeNone, "aarch64_sve_frsqrte_x">;
-def SVRSQRTS : SInst<"svrsqrts[_{d}]", "ddd", "hfd", MergeNone, "aarch64_sve_frsqrts_x">;
+def SVRECPE : SInst<"svrecpe[_{d}]", "dd", "hfd", MergeNone, "aarch64_sve_frecpe_x", [IsStreamingCompatible]>;
+def SVRECPS : SInst<"svrecps[_{d}]", "ddd", "hfd", MergeNone, "aarch64_sve_frecps_x", [IsStreamingCompatible]>;
+def SVRSQRTE : SInst<"svrsqrte[_{d}]", "dd", "hfd", MergeNone, "aarch64_sve_frsqrte_x", [IsStreamingCompatible]>;
+def SVRSQRTS : SInst<"svrsqrts[_{d}]", "ddd", "hfd", MergeNone, "aarch64_sve_frsqrts_x", [IsStreamingCompatible]>;
////////////////////////////////////////////////////////////////////////////////
// Floating-point reductions
-def SVFADDA : SInst<"svadda[_{d}]", "sPsd", "hfd", MergeNone, "aarch64_sve_fadda">;
-def SVFADDV : SInst<"svaddv[_{d}]", "sPd", "hfd", MergeNone, "aarch64_sve_faddv">;
-def SVFMAXV : SInst<"svmaxv[_{d}]", "sPd", "hfd", MergeNone, "aarch64_sve_fmaxv">;
-def SVFMAXNMV : SInst<"svmaxnmv[_{d}]", "sPd", "hfd", MergeNone, "aarch64_sve_fmaxnmv">;
-def SVFMINV : SInst<"svminv[_{d}]", "sPd", "hfd", MergeNone, "aarch64_sve_fminv">;
-def SVFMINNMV : SInst<"svminnmv[_{d}]", "sPd", "hfd", MergeNone, "aarch64_sve_fminnmv">;
+def SVFADDA : SInst<"svadda[_{d}]", "sPsd", "hfd", MergeNone, "aarch64_sve_fadda", [IsStreamingCompatible]>;
+def SVFADDV : SInst<"svaddv[_{d}]", "sPd", "hfd", MergeNone, "aarch64_sve_faddv", [IsStreamingCompatible]>;
+def SVFMAXV : SInst<"svmaxv[_{d}]", "sPd", "hfd", MergeNone, "aarch64_sve_fmaxv", [IsStreamingCompatible]>;
+def SVFMAXNMV : SInst<"svmaxnmv[_{d}]", "sPd", "hfd", MergeNone, "aarch64_sve_fmaxnmv", [IsStreamingCompatible]>;
+def SVFMINV : SInst<"svminv[_{d}]", "sPd", "hfd", MergeNone, "aarch64_sve_fminv", [IsStreamingCompatible]>;
+def SVFMINNMV : SInst<"svminnmv[_{d}]", "sPd", "hfd", MergeNone, "aarch64_sve_fminnmv", [IsStreamingCompatible]>;
////////////////////////////////////////////////////////////////////////////////
// Floating-point comparisons
-def SVACGE : SInst<"svacge[_{d}]", "PPdd", "hfd", MergeNone, "aarch64_sve_facge">;
-def SVACGT : SInst<"svacgt[_{d}]", "PPdd", "hfd", MergeNone, "aarch64_sve_facgt">;
-def SVACLE : SInst<"svacle[_{d}]", "PPdd", "hfd", MergeNone, "aarch64_sve_facge", [ReverseCompare]>;
-def SVACLT : SInst<"svaclt[_{d}]", "PPdd", "hfd", MergeNone, "aarch64_sve_facgt", [ReverseCompare]>;
-def SVCMPUO : SInst<"svcmpuo[_{d}]", "PPdd", "hfd", MergeNone, "aarch64_sve_fcmpuo">;
-
-def SVACGE_N : SInst<"svacge[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_facge">;
-def SVACGT_N : SInst<"svacgt[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_facgt">;
-def SVACLE_N : SInst<"svacle[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_facge", [ReverseCompare]>;
-def SVACLT_N : SInst<"svaclt[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_facgt", [ReverseCompare]>;
-def SVCMPUO_N : SInst<"svcmpuo[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_fcmpuo">;
-
-def SVCMPEQ_F : SInst<"svcmpeq[_{d}]", "PPdd", "hfd", MergeNone, "aarch64_sve_fcmpeq">;
-def SVCMPNE_F : SInst<"svcmpne[_{d}]", "PPdd", "hfd", MergeNone, "aarch64_sve_fcmpne">;
-def SVCMPGE_F : SInst<"svcmpge[_{d}]", "PPdd", "hfd", MergeNone, "aarch64_sve_fcmpge">;
-def SVCMPGT_F : SInst<"svcmpgt[_{d}]", "PPdd", "hfd", MergeNone, "aarch64_sve_fcmpgt">;
-def SVCMPLE_F : SInst<"svcmple[_{d}]", "PPdd", "hfd", MergeNone, "aarch64_sve_fcmpge", [ReverseCompare]>;
-def SVCMPLT_F : SInst<"svcmplt[_{d}]", "PPdd", "hfd", MergeNone, "aarch64_sve_fcmpgt", [ReverseCompare]>;
-
-def SVCMPEQ_F_N : SInst<"svcmpeq[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_fcmpeq">;
-def SVCMPNE_F_N : SInst<"svcmpne[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_fcmpne">;
-def SVCMPGE_F_N : SInst<"svcmpge[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_fcmpge">;
-def SVCMPGT_F_N : SInst<"svcmpgt[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_fcmpgt">;
-def SVCMPLE_F_N : SInst<"svcmple[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_fcmpge", [ReverseCompare]>;
-def SVCMPLT_F_N : SInst<"svcmplt[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_fcmpgt", [ReverseCompare]>;
+def SVACGE : SInst<"svacge[_{d}]", "PPdd", "hfd", MergeNone, "aarch64_sve_facge", [IsStreamingCompatible]>;
+def SVACGT : SInst<"svacgt[_{d}]", "PPdd", "hfd", MergeNone, "aarch64_sve_facgt", [IsStreamingCompatible]>;
+def SVACLE : SInst<"svacle[_{d}]", "PPdd", "hfd", MergeNone, "aarch64_sve_facge", [ReverseCompare, IsStreamingCompatible]>;
+def SVACLT : SInst<"svaclt[_{d}]", "PPdd", "hfd", MergeNone, "aarch64_sve_facgt", [ReverseCompare, IsStreamingCompatible]>;
+def SVCMPUO : SInst<"svcmpuo[_{d}]", "PPdd", "hfd", MergeNone, "aarch64_sve_fcmpuo", [IsStreamingCompatible]>;
+
+def SVACGE_N : SInst<"svacge[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_facge", [IsStreamingCompatible]>;
+def SVACGT_N : SInst<"svacgt[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_facgt", [IsStreamingCompatible]>;
+def SVACLE_N : SInst<"svacle[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_facge", [ReverseCompare, IsStreamingCompatible]>;
+def SVACLT_N : SInst<"svaclt[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_facgt", [ReverseCompare, IsStreamingCompatible]>;
+def SVCMPUO_N : SInst<"svcmpuo[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_fcmpuo", [IsStreamingCompatible]>;
+
+def SVCMPEQ_F : SInst<"svcmpeq[_{d}]", "PPdd", "hfd", MergeNone, "aarch64_sve_fcmpeq", [IsStreamingCompatible]>;
+def SVCMPNE_F : SInst<"svcmpne[_{d}]", "PPdd", "hfd", MergeNone, "aarch64_sve_fcmpne", [IsStreamingCompatible]>;
+def SVCMPGE_F : SInst<"svcmpge[_{d}]", "PPdd", "hfd", MergeNone, "aarch64_sve_fcmpge", [IsStreamingCompatible]>;
+def SVCMPGT_F : SInst<"svcmpgt[_{d}]", "PPdd", "hfd", MergeNone, "aarch64_sve_fcmpgt", [IsStreamingCompatible]>;
+def SVCMPLE_F : SInst<"svcmple[_{d}]", "PPdd", "hfd", MergeNone, "aarch64_sve_fcmpge", [ReverseCompare, IsStreamingCompatible]>;
+def SVCMPLT_F : SInst<"svcmplt[_{d}]", "PPdd", "hfd", MergeNone, "aarch64_sve_fcmpgt", [ReverseCompare, IsStreamingCompatible]>;
+
+def SVCMPEQ_F_N : SInst<"svcmpeq[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_fcmpeq", [IsStreamingCompatible]>;
+def SVCMPNE_F_N : SInst<"svcmpne[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_fcmpne", [IsStreamingCompatible]>;
+def SVCMPGE_F_N : SInst<"svcmpge[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_fcmpge", [IsStreamingCompatible]>;
+def SVCMPGT_F_N : SInst<"svcmpgt[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_fcmpgt", [IsStreamingCompatible]>;
+def SVCMPLE_F_N : SInst<"svcmple[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_fcmpge", [ReverseCompare, IsStreamingCompatible]>;
+def SVCMPLT_F_N : SInst<"svcmplt[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sve_fcmpgt", [ReverseCompare, IsStreamingCompatible]>;
////////////////////////////////////////////////////////////////////////////////
// Floating-point conversions
@@ -1087,16 +907,16 @@ def SVCMPLT_F_N : SInst<"svcmplt[_n_{d}]", "PPda", "hfd", MergeNone, "aarch64_sv
multiclass SInstCvtMXZ<
string name, string m_types, string xz_types, string types,
string intrinsic, list<FlagType> flags = [IsOverloadNone]> {
- def _M : SInst<name, m_types, types, MergeOp1, intrinsic, flags>;
- def _X : SInst<name, xz_types, types, MergeAnyExp, intrinsic, flags>;
- def _Z : SInst<name, xz_types, types, MergeZeroExp, intrinsic, flags>;
+ def _M : SInst<name, m_types, types, MergeOp1, intrinsic, !listconcat(flags, [IsStreamingCompatible])>;
+ def _X : SInst<name, xz_types, types, MergeAnyExp, intrinsic, !listconcat(flags, [IsStreamingCompatible])>;
+ def _Z : SInst<name, xz_types, types, MergeZeroExp, intrinsic, !listconcat(flags, [IsStreamingCompatible])>;
}
multiclass SInstCvtMX<string name, string m_types, string xz_types,
string types, string intrinsic,
list<FlagType> flags = [IsOverloadNone]> {
- def _M : SInst<name, m_types, types, MergeOp1, intrinsic, flags>;
- def _X : SInst<name, xz_types, types, MergeAnyExp, intrinsic, flags>;
+ def _M : SInst<name, m_types, types, MergeOp1, intrinsic, !listconcat(flags, [IsStreamingCompatible])>;
+ def _X : SInst<name, xz_types, types, MergeAnyExp, intrinsic, !listconcat(flags, [IsStreamingCompatible])>;
}
// svcvt_s##_f16
@@ -1108,9 +928,9 @@ defm SVFCVTZS_S64_F16 : SInstCvtMXZ<"svcvt_s64[_f16]", "ddPO", "dPO", "l", "aar
defm SVFCVTZS_S32_F32 : SInstCvtMXZ<"svcvt_s32[_f32]", "ddPM", "dPM", "i", "aarch64_sve_fcvtzs", [IsOverloadCvt]>;
defm SVFCVTZS_S64_F32 : SInstCvtMXZ<"svcvt_s64[_f32]", "ddPM", "dPM", "l", "aarch64_sve_fcvtzs_i64f32">;
-let ArchGuard = "defined(__ARM_FEATURE_SVE_BF16)" in {
+let TargetGuard = "sve,bf16" in {
defm SVCVT_BF16_F32 : SInstCvtMXZ<"svcvt_bf16[_f32]", "ddPM", "dPM", "b", "aarch64_sve_fcvt_bf16f32">;
- def SVCVTNT_BF16_F32 : SInst<"svcvtnt_bf16[_f32]", "ddPM", "b", MergeOp1, "aarch64_sve_fcvtnt_bf16f32", [IsOverloadNone]>;
+ def SVCVTNT_BF16_F32 : SInst<"svcvtnt_bf16[_f32]", "ddPM", "b", MergeOp1, "aarch64_sve_fcvtnt_bf16f32", [IsOverloadNone, IsStreamingCompatible]>;
}
// svcvt_s##_f64
@@ -1168,17 +988,17 @@ defm SVFCVT_F32_F64 : SInstCvtMXZ<"svcvt_f32[_f64]", "MMPd", "MPd", "d", "aarc
defm SVFCVT_F64_F16 : SInstCvtMXZ<"svcvt_f64[_f16]", "ddPO", "dPO", "d", "aarch64_sve_fcvt_f64f16">;
defm SVFCVT_F64_F32 : SInstCvtMXZ<"svcvt_f64[_f32]", "ddPM", "dPM", "d", "aarch64_sve_fcvt_f64f32">;
-let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
+let TargetGuard = "sve2" in {
defm SVCVTLT_F32 : SInstCvtMX<"svcvtlt_f32[_f16]", "ddPh", "dPh", "f", "aarch64_sve_fcvtlt_f32f16">;
defm SVCVTLT_F64 : SInstCvtMX<"svcvtlt_f64[_f32]", "ddPh", "dPh", "d", "aarch64_sve_fcvtlt_f64f32">;
defm SVCVTX_F32 : SInstCvtMXZ<"svcvtx_f32[_f64]", "MMPd", "MPd", "d", "aarch64_sve_fcvtx_f32f64">;
-def SVCVTNT_F32 : SInst<"svcvtnt_f16[_f32]", "hhPd", "f", MergeOp1, "aarch64_sve_fcvtnt_f16f32", [IsOverloadNone]>;
-def SVCVTNT_F64 : SInst<"svcvtnt_f32[_f64]", "hhPd", "d", MergeOp1, "aarch64_sve_fcvtnt_f32f64", [IsOverloadNone]>;
+def SVCVTNT_F32 : SInst<"svcvtnt_f16[_f32]", "hhPd", "f", MergeOp1, "aarch64_sve_fcvtnt_f16f32", [IsOverloadNone, IsStreamingCompatible]>;
+def SVCVTNT_F64 : SInst<"svcvtnt_f32[_f64]", "hhPd", "d", MergeOp1, "aarch64_sve_fcvtnt_f32f64", [IsOverloadNone, IsStreamingCompatible]>;
// SVCVTNT_X : Implemented as macro by SveEmitter.cpp
-def SVCVTXNT_F32 : SInst<"svcvtxnt_f32[_f64]", "MMPd", "d", MergeOp1, "aarch64_sve_fcvtxnt_f32f64", [IsOverloadNone]>;
+def SVCVTXNT_F32 : SInst<"svcvtxnt_f32[_f64]", "MMPd", "d", MergeOp1, "aarch64_sve_fcvtxnt_f32f64", [IsOverloadNone, IsStreamingCompatible]>;
// SVCVTXNT_X_F32 : Implemented as macro by SveEmitter.cpp
}
@@ -1187,9 +1007,9 @@ def SVCVTXNT_F32 : SInst<"svcvtxnt_f32[_f64]", "MMPd", "d", MergeOp1, "aarch6
// Permutations and selection
multiclass SVEPerm<string name, string proto, string i> {
- def : SInst<name, proto, "csilUcUsUiUlhfd", MergeNone, i>;
- let ArchGuard = "defined(__ARM_FEATURE_SVE_BF16)" in {
- def: SInst<name, proto, "b", MergeNone, i>;
+ def : SInst<name, proto, "csilUcUsUiUlhfd", MergeNone, i, [IsStreamingCompatible]>;
+ let TargetGuard = "sve,bf16" in {
+ def: SInst<name, proto, "b", MergeNone, i, [IsStreamingCompatible]>;
}
}
@@ -1203,135 +1023,156 @@ def SVCOMPACT : SInst<"svcompact[_{d}]", "dPd", "ilUiUlfd", MergeNo
// splat of any possible lane. It is upto LLVM to pick a more efficient
// instruction such as DUP (indexed) if the lane index fits the range of the
// instruction's immediate.
-def SVDUP_LANE : SInst<"svdup_lane[_{d}]", "ddL", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_tbl">;
-let ArchGuard = "defined(__ARM_FEATURE_SVE_BF16)" in {
+def SVDUP_LANE : SInst<"svdup_lane[_{d}]", "ddL", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_tbl", [IsStreamingCompatible]>;
+let TargetGuard = "sve,bf16" in {
def SVDUP_LANE_BF16 :
- SInst<"svdup_lane[_{d}]", "ddL", "b", MergeNone, "aarch64_sve_tbl">;
+ SInst<"svdup_lane[_{d}]", "ddL", "b", MergeNone, "aarch64_sve_tbl", [IsStreamingCompatible]>;
}
-def SVDUPQ_LANE : SInst<"svdupq_lane[_{d}]", "ddn", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_dupq_lane">;
-let ArchGuard = "defined(__ARM_FEATURE_SVE_BF16)" in {
- def SVDUPQ_LANE_BF16 : SInst<"svdupq_lane[_{d}]", "ddn", "b", MergeNone, "aarch64_sve_dupq_lane">;
+def SVDUPQ_LANE : SInst<"svdupq_lane[_{d}]", "ddn", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_dupq_lane", [IsStreamingCompatible]>;
+let TargetGuard = "sve,bf16" in {
+ def SVDUPQ_LANE_BF16 : SInst<"svdupq_lane[_{d}]", "ddn", "b", MergeNone, "aarch64_sve_dupq_lane", [IsStreamingCompatible]>;
}
-def SVEXT : SInst<"svext[_{d}]", "dddi", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_ext", [], [ImmCheck<2, ImmCheckExtract, 1>]>;
+def SVEXT : SInst<"svext[_{d}]", "dddi", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_ext", [IsStreamingCompatible], [ImmCheck<2, ImmCheckExtract, 1>]>;
defm SVLASTA : SVEPerm<"svlasta[_{d}]", "sPd", "aarch64_sve_lasta">;
defm SVLASTB : SVEPerm<"svlastb[_{d}]", "sPd", "aarch64_sve_lastb">;
-def SVREV : SInst<"svrev[_{d}]", "dd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_rev">;
-def SVSEL : SInst<"svsel[_{d}]", "dPdd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_sel">;
-def SVSPLICE : SInst<"svsplice[_{d}]", "dPdd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_splice">;
-def SVTBL : SInst<"svtbl[_{d}]", "ddu", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_tbl">;
-
-let ArchGuard = "defined(__ARM_FEATURE_SVE_BF16)" in {
- def SVTBL_BF16 : SInst<"svtbl[_{d}]", "ddu", "b", MergeNone, "aarch64_sve_tbl">;
-}
-
-def SVTRN1 : SInst<"svtrn1[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_trn1">;
-def SVTRN2 : SInst<"svtrn2[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_trn2">;
-def SVUNPKHI_S : SInst<"svunpkhi[_{d}]", "dh", "sil", MergeNone, "aarch64_sve_sunpkhi">;
-def SVUNPKHI_U : SInst<"svunpkhi[_{d}]", "dh", "UsUiUl", MergeNone, "aarch64_sve_uunpkhi">;
-def SVUNPKLO_S : SInst<"svunpklo[_{d}]", "dh", "sil", MergeNone, "aarch64_sve_sunpklo">;
-def SVUNPKLO_U : SInst<"svunpklo[_{d}]", "dh", "UsUiUl", MergeNone, "aarch64_sve_uunpklo">;
-def SVUZP1 : SInst<"svuzp1[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_uzp1">;
-def SVUZP2 : SInst<"svuzp2[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_uzp2">;
-def SVZIP1 : SInst<"svzip1[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_zip1">;
-def SVZIP2 : SInst<"svzip2[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_zip2">;
-
-let ArchGuard = "defined(__ARM_FEATURE_SVE_BF16)" in {
-def SVEXT_BF16 : SInst<"svext[_{d}]", "dddi", "b", MergeNone, "aarch64_sve_ext", [], [ImmCheck<2, ImmCheckExtract, 1>]>;
-def SVREV_BF16 : SInst<"svrev[_{d}]", "dd", "b", MergeNone, "aarch64_sve_rev">;
-def SVSEL_BF16 : SInst<"svsel[_{d}]", "dPdd", "b", MergeNone, "aarch64_sve_sel">;
-def SVSPLICE_BF16 : SInst<"svsplice[_{d}]", "dPdd", "b", MergeNone, "aarch64_sve_splice">;
-def SVTRN1_BF16 : SInst<"svtrn1[_{d}]", "ddd", "b", MergeNone, "aarch64_sve_trn1">;
-def SVTRN2_BF16 : SInst<"svtrn2[_{d}]", "ddd", "b", MergeNone, "aarch64_sve_trn2">;
-def SVUZP1_BF16 : SInst<"svuzp1[_{d}]", "ddd", "b", MergeNone, "aarch64_sve_uzp1">;
-def SVUZP2_BF16 : SInst<"svuzp2[_{d}]", "ddd", "b", MergeNone, "aarch64_sve_uzp2">;
-def SVZIP1_BF16 : SInst<"svzip1[_{d}]", "ddd", "b", MergeNone, "aarch64_sve_zip1">;
-def SVZIP2_BF16 : SInst<"svzip2[_{d}]", "ddd", "b", MergeNone, "aarch64_sve_zip2">;
-}
-
-def SVREV_B : SInst<"svrev_{d}", "PP", "PcPsPiPl", MergeNone, "aarch64_sve_rev">;
-def SVSEL_B : SInst<"svsel[_b]", "PPPP", "Pc", MergeNone, "aarch64_sve_sel">;
-def SVTRN1_B : SInst<"svtrn1_{d}", "PPP", "PcPsPiPl", MergeNone, "aarch64_sve_trn1">;
-def SVTRN2_B : SInst<"svtrn2_{d}", "PPP", "PcPsPiPl", MergeNone, "aarch64_sve_trn2">;
-def SVPUNPKHI : SInst<"svunpkhi[_b]", "PP", "Pc", MergeNone, "aarch64_sve_punpkhi">;
-def SVPUNPKLO : SInst<"svunpklo[_b]", "PP", "Pc", MergeNone, "aarch64_sve_punpklo">;
-def SVUZP1_B : SInst<"svuzp1_{d}", "PPP", "PcPsPiPl", MergeNone, "aarch64_sve_uzp1">;
-def SVUZP2_B : SInst<"svuzp2_{d}", "PPP", "PcPsPiPl", MergeNone, "aarch64_sve_uzp2">;
-def SVZIP1_B : SInst<"svzip1_{d}", "PPP", "PcPsPiPl", MergeNone, "aarch64_sve_zip1">;
-def SVZIP2_B : SInst<"svzip2_{d}", "PPP", "PcPsPiPl", MergeNone, "aarch64_sve_zip2">;
+def SVREV : SInst<"svrev[_{d}]", "dd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_rev", [IsStreamingCompatible]>;
+def SVSEL : SInst<"svsel[_{d}]", "dPdd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_sel", [IsStreamingCompatible]>;
+def SVSPLICE : SInst<"svsplice[_{d}]", "dPdd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_splice", [IsStreamingCompatible]>;
+def SVTBL : SInst<"svtbl[_{d}]", "ddu", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_tbl", [IsStreamingCompatible]>;
+
+let TargetGuard = "sve,bf16" in {
+ def SVTBL_BF16 : SInst<"svtbl[_{d}]", "ddu", "b", MergeNone, "aarch64_sve_tbl", [IsStreamingCompatible]>;
+}
+
+def SVTRN1 : SInst<"svtrn1[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_trn1", [IsStreamingCompatible]>;
+def SVTRN2 : SInst<"svtrn2[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_trn2", [IsStreamingCompatible]>;
+def SVUNPKHI_S : SInst<"svunpkhi[_{d}]", "dh", "sil", MergeNone, "aarch64_sve_sunpkhi", [IsStreamingCompatible]>;
+def SVUNPKHI_U : SInst<"svunpkhi[_{d}]", "dh", "UsUiUl", MergeNone, "aarch64_sve_uunpkhi", [IsStreamingCompatible]>;
+def SVUNPKLO_S : SInst<"svunpklo[_{d}]", "dh", "sil", MergeNone, "aarch64_sve_sunpklo", [IsStreamingCompatible]>;
+def SVUNPKLO_U : SInst<"svunpklo[_{d}]", "dh", "UsUiUl", MergeNone, "aarch64_sve_uunpklo", [IsStreamingCompatible]>;
+def SVUZP1 : SInst<"svuzp1[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_uzp1", [IsStreamingCompatible]>;
+def SVUZP2 : SInst<"svuzp2[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_uzp2", [IsStreamingCompatible]>;
+def SVZIP1 : SInst<"svzip1[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_zip1", [IsStreamingCompatible]>;
+def SVZIP2 : SInst<"svzip2[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_zip2", [IsStreamingCompatible]>;
+
+let TargetGuard = "sve,bf16" in {
+def SVEXT_BF16 : SInst<"svext[_{d}]", "dddi", "b", MergeNone, "aarch64_sve_ext", [IsStreamingCompatible], [ImmCheck<2, ImmCheckExtract, 1>]>;
+def SVREV_BF16 : SInst<"svrev[_{d}]", "dd", "b", MergeNone, "aarch64_sve_rev", [IsStreamingCompatible]>;
+def SVSEL_BF16 : SInst<"svsel[_{d}]", "dPdd", "b", MergeNone, "aarch64_sve_sel", [IsStreamingCompatible]>;
+def SVSPLICE_BF16 : SInst<"svsplice[_{d}]", "dPdd", "b", MergeNone, "aarch64_sve_splice", [IsStreamingCompatible]>;
+def SVTRN1_BF16 : SInst<"svtrn1[_{d}]", "ddd", "b", MergeNone, "aarch64_sve_trn1", [IsStreamingCompatible]>;
+def SVTRN2_BF16 : SInst<"svtrn2[_{d}]", "ddd", "b", MergeNone, "aarch64_sve_trn2", [IsStreamingCompatible]>;
+def SVUZP1_BF16 : SInst<"svuzp1[_{d}]", "ddd", "b", MergeNone, "aarch64_sve_uzp1", [IsStreamingCompatible]>;
+def SVUZP2_BF16 : SInst<"svuzp2[_{d}]", "ddd", "b", MergeNone, "aarch64_sve_uzp2", [IsStreamingCompatible]>;
+def SVZIP1_BF16 : SInst<"svzip1[_{d}]", "ddd", "b", MergeNone, "aarch64_sve_zip1", [IsStreamingCompatible]>;
+def SVZIP2_BF16 : SInst<"svzip2[_{d}]", "ddd", "b", MergeNone, "aarch64_sve_zip2", [IsStreamingCompatible]>;
+}
+
+def SVREV_B8 : SInst<"svrev_b8", "PP", "Pc", MergeNone, "aarch64_sve_rev", [IsStreamingCompatible]>;
+def SVREV_B16 : SInst<"svrev_b16", "PP", "Pc", MergeNone, "aarch64_sve_rev_b16", [IsOverloadNone, IsStreamingCompatible]>;
+def SVREV_B32 : SInst<"svrev_b32", "PP", "Pc", MergeNone, "aarch64_sve_rev_b32", [IsOverloadNone, IsStreamingCompatible]>;
+def SVREV_B64 : SInst<"svrev_b64", "PP", "Pc", MergeNone, "aarch64_sve_rev_b64", [IsOverloadNone, IsStreamingCompatible]>;
+def SVSEL_B : SInst<"svsel[_b]", "PPPP", "Pc", MergeNone, "aarch64_sve_sel", [IsStreamingCompatible]>;
+def SVTRN1_B8 : SInst<"svtrn1_b8", "PPP", "Pc", MergeNone, "aarch64_sve_trn1", [IsStreamingCompatible]>;
+def SVTRN1_B16 : SInst<"svtrn1_b16", "PPP", "Pc", MergeNone, "aarch64_sve_trn1_b16", [IsOverloadNone, IsStreamingCompatible]>;
+def SVTRN1_B32 : SInst<"svtrn1_b32", "PPP", "Pc", MergeNone, "aarch64_sve_trn1_b32", [IsOverloadNone, IsStreamingCompatible]>;
+def SVTRN1_B64 : SInst<"svtrn1_b64", "PPP", "Pc", MergeNone, "aarch64_sve_trn1_b64", [IsOverloadNone, IsStreamingCompatible]>;
+def SVTRN2_B8 : SInst<"svtrn2_b8", "PPP", "Pc", MergeNone, "aarch64_sve_trn2", [IsStreamingCompatible]>;
+def SVTRN2_B16 : SInst<"svtrn2_b16", "PPP", "Pc", MergeNone, "aarch64_sve_trn2_b16", [IsOverloadNone, IsStreamingCompatible]>;
+def SVTRN2_B32 : SInst<"svtrn2_b32", "PPP", "Pc", MergeNone, "aarch64_sve_trn2_b32", [IsOverloadNone, IsStreamingCompatible]>;
+def SVTRN2_B64 : SInst<"svtrn2_b64", "PPP", "Pc", MergeNone, "aarch64_sve_trn2_b64", [IsOverloadNone, IsStreamingCompatible]>;
+def SVPUNPKHI : SInst<"svunpkhi[_b]", "PP", "Pc", MergeNone, "aarch64_sve_punpkhi", [IsStreamingCompatible]>;
+def SVPUNPKLO : SInst<"svunpklo[_b]", "PP", "Pc", MergeNone, "aarch64_sve_punpklo", [IsStreamingCompatible]>;
+def SVUZP1_B8 : SInst<"svuzp1_b8", "PPP", "Pc", MergeNone, "aarch64_sve_uzp1", [IsStreamingCompatible]>;
+def SVUZP1_B16 : SInst<"svuzp1_b16", "PPP", "Pc", MergeNone, "aarch64_sve_uzp1_b16", [IsOverloadNone, IsStreamingCompatible]>;
+def SVUZP1_B32 : SInst<"svuzp1_b32", "PPP", "Pc", MergeNone, "aarch64_sve_uzp1_b32", [IsOverloadNone, IsStreamingCompatible]>;
+def SVUZP1_B64 : SInst<"svuzp1_b64", "PPP", "Pc", MergeNone, "aarch64_sve_uzp1_b64", [IsOverloadNone, IsStreamingCompatible]>;
+def SVUZP2_B8 : SInst<"svuzp2_b8", "PPP", "Pc", MergeNone, "aarch64_sve_uzp2", [IsStreamingCompatible]>;
+def SVUZP2_B16 : SInst<"svuzp2_b16", "PPP", "Pc", MergeNone, "aarch64_sve_uzp2_b16", [IsOverloadNone, IsStreamingCompatible]>;
+def SVUZP2_B32 : SInst<"svuzp2_b32", "PPP", "Pc", MergeNone, "aarch64_sve_uzp2_b32", [IsOverloadNone, IsStreamingCompatible]>;
+def SVUZP2_B64 : SInst<"svuzp2_b64", "PPP", "Pc", MergeNone, "aarch64_sve_uzp2_b64", [IsOverloadNone, IsStreamingCompatible]>;
+def SVZIP1_B8 : SInst<"svzip1_b8", "PPP", "Pc", MergeNone, "aarch64_sve_zip1", [IsStreamingCompatible]>;
+def SVZIP1_B16 : SInst<"svzip1_b16", "PPP", "Pc", MergeNone, "aarch64_sve_zip1_b16", [IsOverloadNone, IsStreamingCompatible]>;
+def SVZIP1_B32 : SInst<"svzip1_b32", "PPP", "Pc", MergeNone, "aarch64_sve_zip1_b32", [IsOverloadNone, IsStreamingCompatible]>;
+def SVZIP1_B64 : SInst<"svzip1_b64", "PPP", "Pc", MergeNone, "aarch64_sve_zip1_b64", [IsOverloadNone, IsStreamingCompatible]>;
+def SVZIP2_B : SInst<"svzip2_b8", "PPP", "Pc", MergeNone, "aarch64_sve_zip2", [IsStreamingCompatible]>;
+def SVZIP2_B16 : SInst<"svzip2_b16", "PPP", "Pc", MergeNone, "aarch64_sve_zip2_b16", [IsOverloadNone, IsStreamingCompatible]>;
+def SVZIP2_B32 : SInst<"svzip2_b32", "PPP", "Pc", MergeNone, "aarch64_sve_zip2_b32", [IsOverloadNone, IsStreamingCompatible]>;
+def SVZIP2_B64 : SInst<"svzip2_b64", "PPP", "Pc", MergeNone, "aarch64_sve_zip2_b64", [IsOverloadNone, IsStreamingCompatible]>;
////////////////////////////////////////////////////////////////////////////////
// Predicate creation
-def SVPFALSE : SInst<"svpfalse[_b]", "P", "", MergeNone, "", [IsOverloadNone]>;
+def SVPFALSE : SInst<"svpfalse[_b]", "Pv", "", MergeNone, "", [IsOverloadNone, IsStreamingCompatible]>;
-def SVPTRUE_PAT : SInst<"svptrue_pat_{d}", "PI", "PcPsPiPl", MergeNone, "aarch64_sve_ptrue">;
-def SVPTRUE : SInst<"svptrue_{d}", "P", "PcPsPiPl", MergeNone, "aarch64_sve_ptrue", [IsAppendSVALL]>;
+def SVPTRUE_PAT : SInst<"svptrue_pat_{d}", "PI", "PcPsPiPl", MergeNone, "aarch64_sve_ptrue", [IsStreamingCompatible]>;
+def SVPTRUE : SInst<"svptrue_{d}", "Pv", "PcPsPiPl", MergeNone, "aarch64_sve_ptrue", [IsAppendSVALL, IsStreamingCompatible]>;
-def SVDUPQ_B8 : SInst<"svdupq[_n]_{d}", "Pssssssssssssssss", "Pc", MergeNone>;
-def SVDUPQ_B16 : SInst<"svdupq[_n]_{d}", "Pssssssss", "Ps", MergeNone>;
-def SVDUPQ_B32 : SInst<"svdupq[_n]_{d}", "Pssss", "Pi", MergeNone>;
-def SVDUPQ_B64 : SInst<"svdupq[_n]_{d}", "Pss", "Pl", MergeNone>;
-def SVDUP_N_B : SInst<"svdup[_n]_{d}", "Ps", "PcPsPiPl", MergeNone>;
+def SVDUPQ_B8 : SInst<"svdupq[_n]_{d}", "Pssssssssssssssss", "Pc", MergeNone, "", [IsStreamingCompatible]>;
+def SVDUPQ_B16 : SInst<"svdupq[_n]_{d}", "Pssssssss", "Ps", MergeNone, "", [IsStreamingCompatible]>;
+def SVDUPQ_B32 : SInst<"svdupq[_n]_{d}", "Pssss", "Pi", MergeNone, "", [IsStreamingCompatible]>;
+def SVDUPQ_B64 : SInst<"svdupq[_n]_{d}", "Pss", "Pl", MergeNone, "", [IsStreamingCompatible]>;
+def SVDUP_N_B : SInst<"svdup[_n]_{d}", "Ps", "PcPsPiPl", MergeNone, "", [IsStreamingCompatible]>;
////////////////////////////////////////////////////////////////////////////////
// Predicate operations
-def SVAND_B_Z : SInst<"svand[_b]_z", "PPPP", "Pc", MergeNone, "aarch64_sve_and_z">;
-def SVBIC_B_Z : SInst<"svbic[_b]_z", "PPPP", "Pc", MergeNone, "aarch64_sve_bic_z">;
-def SVEOR_B_Z : SInst<"sveor[_b]_z", "PPPP", "Pc", MergeNone, "aarch64_sve_eor_z">;
-def SVMOV_B_Z : SInst<"svmov[_b]_z", "PPP", "Pc", MergeNone>; // Uses custom expansion
-def SVNAND_B_Z : SInst<"svnand[_b]_z", "PPPP", "Pc", MergeNone, "aarch64_sve_nand_z">;
-def SVNOR_B_Z : SInst<"svnor[_b]_z", "PPPP", "Pc", MergeNone, "aarch64_sve_nor_z">;
-def SVNOT_B_Z : SInst<"svnot[_b]_z", "PPP", "Pc", MergeNone>; // Uses custom expansion
-def SVORN_B_Z : SInst<"svorn[_b]_z", "PPPP", "Pc", MergeNone, "aarch64_sve_orn_z">;
-def SVORR_B_Z : SInst<"svorr[_b]_z", "PPPP", "Pc", MergeNone, "aarch64_sve_orr_z">;
-
-def SVBRKA : SInst<"svbrka[_b]_m", "PPPP", "Pc", MergeNone, "aarch64_sve_brka">;
-def SVBRKA_Z : SInst<"svbrka[_b]_z", "PPP", "Pc", MergeNone, "aarch64_sve_brka_z">;
-def SVBRKB : SInst<"svbrkb[_b]_m", "PPPP", "Pc", MergeNone, "aarch64_sve_brkb">;
-def SVBRKB_Z : SInst<"svbrkb[_b]_z", "PPP", "Pc", MergeNone, "aarch64_sve_brkb_z">;
-def SVBRKN_Z : SInst<"svbrkn[_b]_z", "PPPP", "Pc", MergeNone, "aarch64_sve_brkn_z">;
-def SVBRKPA_Z : SInst<"svbrkpa[_b]_z", "PPPP", "Pc", MergeNone, "aarch64_sve_brkpa_z">;
-def SVBRKPB_Z : SInst<"svbrkpb[_b]_z", "PPPP", "Pc", MergeNone, "aarch64_sve_brkpb_z">;
-
-def SVPFIRST : SInst<"svpfirst[_b]", "PPP", "Pc", MergeNone, "aarch64_sve_pfirst">;
-def SVPNEXT : SInst<"svpnext_{d}", "PPP", "PcPsPiPl", MergeNone, "aarch64_sve_pnext">;
+def SVAND_B_Z : SInst<"svand[_b]_z", "PPPP", "Pc", MergeNone, "aarch64_sve_and_z", [IsStreamingCompatible]>;
+def SVBIC_B_Z : SInst<"svbic[_b]_z", "PPPP", "Pc", MergeNone, "aarch64_sve_bic_z", [IsStreamingCompatible]>;
+def SVEOR_B_Z : SInst<"sveor[_b]_z", "PPPP", "Pc", MergeNone, "aarch64_sve_eor_z", [IsStreamingCompatible]>;
+def SVMOV_B_Z : SInst<"svmov[_b]_z", "PPP", "Pc", MergeNone, "", [IsStreamingCompatible]>; // Uses custom expansion
+def SVNAND_B_Z : SInst<"svnand[_b]_z", "PPPP", "Pc", MergeNone, "aarch64_sve_nand_z", [IsStreamingCompatible]>;
+def SVNOR_B_Z : SInst<"svnor[_b]_z", "PPPP", "Pc", MergeNone, "aarch64_sve_nor_z", [IsStreamingCompatible]>;
+def SVNOT_B_Z : SInst<"svnot[_b]_z", "PPP", "Pc", MergeNone, "", [IsStreamingCompatible]>; // Uses custom expansion
+def SVORN_B_Z : SInst<"svorn[_b]_z", "PPPP", "Pc", MergeNone, "aarch64_sve_orn_z", [IsStreamingCompatible]>;
+def SVORR_B_Z : SInst<"svorr[_b]_z", "PPPP", "Pc", MergeNone, "aarch64_sve_orr_z", [IsStreamingCompatible]>;
+
+def SVBRKA : SInst<"svbrka[_b]_m", "PPPP", "Pc", MergeNone, "aarch64_sve_brka", [IsStreamingCompatible]>;
+def SVBRKA_Z : SInst<"svbrka[_b]_z", "PPP", "Pc", MergeNone, "aarch64_sve_brka_z", [IsStreamingCompatible]>;
+def SVBRKB : SInst<"svbrkb[_b]_m", "PPPP", "Pc", MergeNone, "aarch64_sve_brkb", [IsStreamingCompatible]>;
+def SVBRKB_Z : SInst<"svbrkb[_b]_z", "PPP", "Pc", MergeNone, "aarch64_sve_brkb_z", [IsStreamingCompatible]>;
+def SVBRKN_Z : SInst<"svbrkn[_b]_z", "PPPP", "Pc", MergeNone, "aarch64_sve_brkn_z", [IsStreamingCompatible]>;
+def SVBRKPA_Z : SInst<"svbrkpa[_b]_z", "PPPP", "Pc", MergeNone, "aarch64_sve_brkpa_z", [IsStreamingCompatible]>;
+def SVBRKPB_Z : SInst<"svbrkpb[_b]_z", "PPPP", "Pc", MergeNone, "aarch64_sve_brkpb_z", [IsStreamingCompatible]>;
+
+def SVPFIRST : SInst<"svpfirst[_b]", "PPP", "Pc", MergeNone, "aarch64_sve_pfirst", [IsStreamingCompatible]>;
+def SVPNEXT : SInst<"svpnext_{d}", "PPP", "PcPsPiPl", MergeNone, "aarch64_sve_pnext", [IsStreamingCompatible]>;
////////////////////////////////////////////////////////////////////////////////
// Testing predicates
-def SVPTEST_ANY : SInst<"svptest_any", "sPP", "Pc", MergeNone, "aarch64_sve_ptest_any">;
-def SVPTEST_FIRST : SInst<"svptest_first", "sPP", "Pc", MergeNone, "aarch64_sve_ptest_first">;
-def SVPTEST_LAST : SInst<"svptest_last", "sPP", "Pc", MergeNone, "aarch64_sve_ptest_last">;
+def SVPTEST_ANY : SInst<"svptest_any", "sPP", "Pc", MergeNone, "aarch64_sve_ptest_any", [IsStreamingCompatible]>;
+def SVPTEST_FIRST : SInst<"svptest_first", "sPP", "Pc", MergeNone, "aarch64_sve_ptest_first", [IsStreamingCompatible]>;
+def SVPTEST_LAST : SInst<"svptest_last", "sPP", "Pc", MergeNone, "aarch64_sve_ptest_last", [IsStreamingCompatible]>;
////////////////////////////////////////////////////////////////////////////////
// FFR manipulation
-def SVRDFFR : SInst<"svrdffr", "P", "Pc", MergeNone, "", [IsOverloadNone]>;
+def SVRDFFR : SInst<"svrdffr", "Pv", "Pc", MergeNone, "", [IsOverloadNone]>;
def SVRDFFR_Z : SInst<"svrdffr_z", "PP", "Pc", MergeNone, "", [IsOverloadNone]>;
-def SVSETFFR : SInst<"svsetffr", "v", "", MergeNone, "", [IsOverloadNone]>;
+def SVSETFFR : SInst<"svsetffr", "vv", "", MergeNone, "", [IsOverloadNone]>;
def SVWRFFR : SInst<"svwrffr", "vP", "Pc", MergeNone, "", [IsOverloadNone]>;
////////////////////////////////////////////////////////////////////////////////
// Counting elements
-def SVCNTB_PAT : SInst<"svcntb_pat", "nI", "", MergeNone, "aarch64_sve_cntb", [IsOverloadNone]>;
-def SVCNTH_PAT : SInst<"svcnth_pat", "nI", "", MergeNone, "aarch64_sve_cnth", [IsOverloadNone]>;
-def SVCNTW_PAT : SInst<"svcntw_pat", "nI", "", MergeNone, "aarch64_sve_cntw", [IsOverloadNone]>;
-def SVCNTD_PAT : SInst<"svcntd_pat", "nI", "", MergeNone, "aarch64_sve_cntd", [IsOverloadNone]>;
+def SVCNTB_PAT : SInst<"svcntb_pat", "nI", "", MergeNone, "aarch64_sve_cntb", [IsOverloadNone, IsStreamingCompatible]>;
+def SVCNTH_PAT : SInst<"svcnth_pat", "nI", "", MergeNone, "aarch64_sve_cnth", [IsOverloadNone, IsStreamingCompatible]>;
+def SVCNTW_PAT : SInst<"svcntw_pat", "nI", "", MergeNone, "aarch64_sve_cntw", [IsOverloadNone, IsStreamingCompatible]>;
+def SVCNTD_PAT : SInst<"svcntd_pat", "nI", "", MergeNone, "aarch64_sve_cntd", [IsOverloadNone, IsStreamingCompatible]>;
-def SVCNTB : SInst<"svcntb", "n", "", MergeNone, "aarch64_sve_cntb", [IsAppendSVALL, IsOverloadNone]>;
-def SVCNTH : SInst<"svcnth", "n", "", MergeNone, "aarch64_sve_cnth", [IsAppendSVALL, IsOverloadNone]>;
-def SVCNTW : SInst<"svcntw", "n", "", MergeNone, "aarch64_sve_cntw", [IsAppendSVALL, IsOverloadNone]>;
-def SVCNTD : SInst<"svcntd", "n", "", MergeNone, "aarch64_sve_cntd", [IsAppendSVALL, IsOverloadNone]>;
+def SVCNTB : SInst<"svcntb", "nv", "", MergeNone, "aarch64_sve_cntb", [IsAppendSVALL, IsOverloadNone, IsStreamingCompatible]>;
+def SVCNTH : SInst<"svcnth", "nv", "", MergeNone, "aarch64_sve_cnth", [IsAppendSVALL, IsOverloadNone, IsStreamingCompatible]>;
+def SVCNTW : SInst<"svcntw", "nv", "", MergeNone, "aarch64_sve_cntw", [IsAppendSVALL, IsOverloadNone, IsStreamingCompatible]>;
+def SVCNTD : SInst<"svcntd", "nv", "", MergeNone, "aarch64_sve_cntd", [IsAppendSVALL, IsOverloadNone, IsStreamingCompatible]>;
-def SVCNTP : SInst<"svcntp_{d}", "nPP", "PcPsPiPl", MergeNone, "aarch64_sve_cntp">;
-def SVLEN : SInst<"svlen[_{d}]", "nd", "csilUcUsUiUlhfd", MergeNone>;
+def SVCNTP : SInst<"svcntp_{d}", "nPP", "PcPsPiPl", MergeNone, "aarch64_sve_cntp", [IsStreamingCompatible]>;
+def SVLEN : SInst<"svlen[_{d}]", "nd", "csilUcUsUiUlhfd", MergeNone, "", [IsStreamingCompatible]>;
-let ArchGuard = "defined(__ARM_FEATURE_SVE_BF16)" in {
-def SVLEN_BF16 : SInst<"svlen[_{d}]", "nd", "b", MergeNone>;
+let TargetGuard = "sve,bf16" in {
+def SVLEN_BF16 : SInst<"svlen[_{d}]", "nd", "b", MergeNone, "", [IsStreamingCompatible]>;
}
////////////////////////////////////////////////////////////////////////////////
@@ -1348,20 +1189,20 @@ def UnsignedWord : sat_type<"U", "Ui">;
def UnsignedDoubleWord : sat_type<"U", "Ul">;
multiclass SInst_SAT1<string name, string intrinsic, sat_type type> {
- def _N32 : SInst<name # "_pat[_n_{d}]", "ssIi", type.U # "i", MergeNone, intrinsic # "_n32", [IsOverloadNone], [ImmCheck<2, ImmCheck1_16>]>;
- def _N64 : SInst<name # "_pat[_n_{d}]", "ssIi", type.U # "l", MergeNone, intrinsic # "_n64", [IsOverloadNone], [ImmCheck<2, ImmCheck1_16>]>;
- def _N32_ALL : SInst<name # "[_n_{d}]", "ssi", type.U # "i", MergeNone, intrinsic # "_n32", [IsOverloadNone, IsInsertOp1SVALL], [ImmCheck<1, ImmCheck1_16>]>;
- def _N64_ALL : SInst<name # "[_n_{d}]", "ssi", type.U # "l", MergeNone, intrinsic # "_n64", [IsOverloadNone, IsInsertOp1SVALL], [ImmCheck<1, ImmCheck1_16>]>;
+ def _N32 : SInst<name # "_pat[_n_{d}]", "ssIi", type.U # "i", MergeNone, intrinsic # "_n32", [IsOverloadNone, IsStreamingCompatible], [ImmCheck<2, ImmCheck1_16>]>;
+ def _N64 : SInst<name # "_pat[_n_{d}]", "ssIi", type.U # "l", MergeNone, intrinsic # "_n64", [IsOverloadNone, IsStreamingCompatible], [ImmCheck<2, ImmCheck1_16>]>;
+ def _N32_ALL : SInst<name # "[_n_{d}]", "ssi", type.U # "i", MergeNone, intrinsic # "_n32", [IsOverloadNone, IsInsertOp1SVALL, IsStreamingCompatible], [ImmCheck<1, ImmCheck1_16>]>;
+ def _N64_ALL : SInst<name # "[_n_{d}]", "ssi", type.U # "l", MergeNone, intrinsic # "_n64", [IsOverloadNone, IsInsertOp1SVALL, IsStreamingCompatible], [ImmCheck<1, ImmCheck1_16>]>;
}
multiclass SInst_SAT2<string name, string intrinsic, sat_type type> {
- def "" : SInst<name # "_pat[_{d}]", "ddIi", type.T, MergeNone, intrinsic, [], [ImmCheck<2, ImmCheck1_16>]>;
- def _ALL : SInst<name # "[_{d}]", "ddi", type.T, MergeNone, intrinsic, [IsInsertOp1SVALL], [ImmCheck<1, ImmCheck1_16>]>;
+ def "" : SInst<name # "_pat[_{d}]", "ddIi", type.T, MergeNone, intrinsic, [IsStreamingCompatible], [ImmCheck<2, ImmCheck1_16>]>;
+ def _ALL : SInst<name # "[_{d}]", "ddi", type.T, MergeNone, intrinsic, [IsInsertOp1SVALL, IsStreamingCompatible], [ImmCheck<1, ImmCheck1_16>]>;
- def _N32 : SInst<name # "_pat[_n_{d}]", "ssIi", type.U # "i", MergeNone, intrinsic # "_n32", [IsOverloadNone], [ImmCheck<2, ImmCheck1_16>]>;
- def _N64 : SInst<name # "_pat[_n_{d}]", "ssIi", type.U # "l", MergeNone, intrinsic # "_n64", [IsOverloadNone], [ImmCheck<2, ImmCheck1_16>]>;
- def _N32_ALL : SInst<name # "[_n_{d}]", "ssi", type.U # "i", MergeNone, intrinsic # "_n32", [IsOverloadNone, IsInsertOp1SVALL], [ImmCheck<1, ImmCheck1_16>]>;
- def _N64_ALL : SInst<name # "[_n_{d}]", "ssi", type.U # "l", MergeNone, intrinsic # "_n64", [IsOverloadNone, IsInsertOp1SVALL], [ImmCheck<1, ImmCheck1_16>]>;
+ def _N32 : SInst<name # "_pat[_n_{d}]", "ssIi", type.U # "i", MergeNone, intrinsic # "_n32", [IsOverloadNone, IsStreamingCompatible], [ImmCheck<2, ImmCheck1_16>]>;
+ def _N64 : SInst<name # "_pat[_n_{d}]", "ssIi", type.U # "l", MergeNone, intrinsic # "_n64", [IsOverloadNone, IsStreamingCompatible], [ImmCheck<2, ImmCheck1_16>]>;
+ def _N32_ALL : SInst<name # "[_n_{d}]", "ssi", type.U # "i", MergeNone, intrinsic # "_n32", [IsOverloadNone, IsInsertOp1SVALL, IsStreamingCompatible], [ImmCheck<1, ImmCheck1_16>]>;
+ def _N64_ALL : SInst<name # "[_n_{d}]", "ssi", type.U # "l", MergeNone, intrinsic # "_n64", [IsOverloadNone, IsInsertOp1SVALL, IsStreamingCompatible], [ImmCheck<1, ImmCheck1_16>]>;
}
defm SVQDECB_S : SInst_SAT1<"svqdecb", "aarch64_sve_sqdecb", SignedByte>;
@@ -1382,131 +1223,160 @@ defm SVQINCW_U : SInst_SAT2<"svqincw", "aarch64_sve_uqincw", UnsignedWord>;
defm SVQINCD_S : SInst_SAT2<"svqincd", "aarch64_sve_sqincd", SignedDoubleWord>;
defm SVQINCD_U : SInst_SAT2<"svqincd", "aarch64_sve_uqincd", UnsignedDoubleWord>;
-def SVQDECP_S : SInst<"svqdecp[_{d}]", "ddP", "sil", MergeNone, "aarch64_sve_sqdecp">;
-def SVQDECP_U : SInst<"svqdecp[_{d}]", "ddP", "UsUiUl", MergeNone, "aarch64_sve_uqdecp">;
-def SVQINCP_S : SInst<"svqincp[_{d}]", "ddP", "sil", MergeNone, "aarch64_sve_sqincp">;
-def SVQINCP_U : SInst<"svqincp[_{d}]", "ddP", "UsUiUl", MergeNone, "aarch64_sve_uqincp">;
-
-def SVQDECP_N_S32 : SInst<"svqdecp[_n_s32]_{d}", "kkP", "PcPsPiPl", MergeNone, "aarch64_sve_sqdecp_n32">;
-def SVQDECP_N_S64 : SInst<"svqdecp[_n_s64]_{d}", "llP", "PcPsPiPl", MergeNone, "aarch64_sve_sqdecp_n64">;
-def SVQDECP_N_U32 : SInst<"svqdecp[_n_u32]_{d}", "mmP", "PcPsPiPl", MergeNone, "aarch64_sve_uqdecp_n32">;
-def SVQDECP_N_U64 : SInst<"svqdecp[_n_u64]_{d}", "nnP", "PcPsPiPl", MergeNone, "aarch64_sve_uqdecp_n64">;
-def SVQINCP_N_S32 : SInst<"svqincp[_n_s32]_{d}", "kkP", "PcPsPiPl", MergeNone, "aarch64_sve_sqincp_n32">;
-def SVQINCP_N_S64 : SInst<"svqincp[_n_s64]_{d}", "llP", "PcPsPiPl", MergeNone, "aarch64_sve_sqincp_n64">;
-def SVQINCP_N_U32 : SInst<"svqincp[_n_u32]_{d}", "mmP", "PcPsPiPl", MergeNone, "aarch64_sve_uqincp_n32">;
-def SVQINCP_N_U64 : SInst<"svqincp[_n_u64]_{d}", "nnP", "PcPsPiPl", MergeNone, "aarch64_sve_uqincp_n64">;
-
-let ArchGuard = "defined(__ARM_FEATURE_SVE_MATMUL_INT8)" in {
+def SVQDECP_S : SInst<"svqdecp[_{d}]", "ddP", "sil", MergeNone, "aarch64_sve_sqdecp", [IsStreamingCompatible]>;
+def SVQDECP_U : SInst<"svqdecp[_{d}]", "ddP", "UsUiUl", MergeNone, "aarch64_sve_uqdecp", [IsStreamingCompatible]>;
+def SVQINCP_S : SInst<"svqincp[_{d}]", "ddP", "sil", MergeNone, "aarch64_sve_sqincp", [IsStreamingCompatible]>;
+def SVQINCP_U : SInst<"svqincp[_{d}]", "ddP", "UsUiUl", MergeNone, "aarch64_sve_uqincp", [IsStreamingCompatible]>;
+
+def SVQDECP_N_S32 : SInst<"svqdecp[_n_s32]_{d}", "kkP", "PcPsPiPl", MergeNone, "aarch64_sve_sqdecp_n32", [IsStreamingCompatible]>;
+def SVQDECP_N_S64 : SInst<"svqdecp[_n_s64]_{d}", "llP", "PcPsPiPl", MergeNone, "aarch64_sve_sqdecp_n64", [IsStreamingCompatible]>;
+def SVQDECP_N_U32 : SInst<"svqdecp[_n_u32]_{d}", "mmP", "PcPsPiPl", MergeNone, "aarch64_sve_uqdecp_n32", [IsStreamingCompatible]>;
+def SVQDECP_N_U64 : SInst<"svqdecp[_n_u64]_{d}", "nnP", "PcPsPiPl", MergeNone, "aarch64_sve_uqdecp_n64", [IsStreamingCompatible]>;
+def SVQINCP_N_S32 : SInst<"svqincp[_n_s32]_{d}", "kkP", "PcPsPiPl", MergeNone, "aarch64_sve_sqincp_n32", [IsStreamingCompatible]>;
+def SVQINCP_N_S64 : SInst<"svqincp[_n_s64]_{d}", "llP", "PcPsPiPl", MergeNone, "aarch64_sve_sqincp_n64", [IsStreamingCompatible]>;
+def SVQINCP_N_U32 : SInst<"svqincp[_n_u32]_{d}", "mmP", "PcPsPiPl", MergeNone, "aarch64_sve_uqincp_n32", [IsStreamingCompatible]>;
+def SVQINCP_N_U64 : SInst<"svqincp[_n_u64]_{d}", "nnP", "PcPsPiPl", MergeNone, "aarch64_sve_uqincp_n64", [IsStreamingCompatible]>;
+
+let TargetGuard = "sve,i8mm" in {
def SVMLLA_S32 : SInst<"svmmla[_s32]", "ddqq","i", MergeNone, "aarch64_sve_smmla">;
def SVMLLA_U32 : SInst<"svmmla[_u32]", "ddqq","Ui", MergeNone, "aarch64_sve_ummla">;
def SVUSMLLA_S32 : SInst<"svusmmla[_s32]", "ddbq","i", MergeNone, "aarch64_sve_usmmla">;
-def SVUSDOT_S : SInst<"svusdot[_s32]", "ddbq", "i", MergeNone, "aarch64_sve_usdot">;
-def SVUSDOT_N_S : SInst<"svusdot[_n_s32]", "ddbr", "i", MergeNone, "aarch64_sve_usdot">;
-def SVSUDOT_S : SInst<"svsudot[_s32]", "ddqb", "i", MergeNone, "aarch64_sve_usdot", [ReverseUSDOT]>;
-def SVSUDOT_N_S : SInst<"svsudot[_n_s32]", "ddq@", "i", MergeNone, "aarch64_sve_usdot", [ReverseUSDOT]>;
+def SVUSDOT_S : SInst<"svusdot[_s32]", "ddbq", "i", MergeNone, "aarch64_sve_usdot", [IsStreamingCompatible]>;
+def SVUSDOT_N_S : SInst<"svusdot[_n_s32]", "ddbr", "i", MergeNone, "aarch64_sve_usdot", [IsStreamingCompatible]>;
+def SVSUDOT_S : SInst<"svsudot[_s32]", "ddqb", "i", MergeNone, "aarch64_sve_usdot", [ReverseUSDOT, IsStreamingCompatible]>;
+def SVSUDOT_N_S : SInst<"svsudot[_n_s32]", "ddq@", "i", MergeNone, "aarch64_sve_usdot", [ReverseUSDOT, IsStreamingCompatible]>;
-def SVUSDOT_LANE_S : SInst<"svusdot_lane[_s32]", "ddbqi", "i", MergeNone, "aarch64_sve_usdot_lane", [], [ImmCheck<3, ImmCheckLaneIndexDot, 2>]>;
-def SVSUDOT_LANE_S : SInst<"svsudot_lane[_s32]", "ddqbi", "i", MergeNone, "aarch64_sve_sudot_lane", [], [ImmCheck<3, ImmCheckLaneIndexDot, 2>]>;
+def SVUSDOT_LANE_S : SInst<"svusdot_lane[_s32]", "ddbqi", "i", MergeNone, "aarch64_sve_usdot_lane", [IsStreamingCompatible], [ImmCheck<3, ImmCheckLaneIndexDot, 2>]>;
+def SVSUDOT_LANE_S : SInst<"svsudot_lane[_s32]", "ddqbi", "i", MergeNone, "aarch64_sve_sudot_lane", [IsStreamingCompatible], [ImmCheck<3, ImmCheckLaneIndexDot, 2>]>;
}
-let ArchGuard = "defined(__ARM_FEATURE_SVE_MATMUL_FP32)" in {
+let TargetGuard = "sve,f32mm" in {
def SVMLLA_F32 : SInst<"svmmla[_f32]", "dddd","f", MergeNone, "aarch64_sve_fmmla">;
}
-let ArchGuard = "defined(__ARM_FEATURE_SVE_MATMUL_FP64)" in {
+let TargetGuard = "sve,f64mm" in {
def SVMLLA_F64 : SInst<"svmmla[_f64]", "dddd","d", MergeNone, "aarch64_sve_fmmla">;
-def SVTRN1Q : SInst<"svtrn1q[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_trn1q">;
-def SVTRN2Q : SInst<"svtrn2q[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_trn2q">;
-def SVUZP1Q : SInst<"svuzp1q[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_uzp1q">;
-def SVUZP2Q : SInst<"svuzp2q[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_uzp2q">;
-def SVZIP1Q : SInst<"svzip1q[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_zip1q">;
-def SVZIP2Q : SInst<"svzip2q[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_zip2q">;
+def SVTRN1Q : SInst<"svtrn1q[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_trn1q", [IsStreamingCompatible]>;
+def SVTRN2Q : SInst<"svtrn2q[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_trn2q", [IsStreamingCompatible]>;
+def SVUZP1Q : SInst<"svuzp1q[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_uzp1q", [IsStreamingCompatible]>;
+def SVUZP2Q : SInst<"svuzp2q[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_uzp2q", [IsStreamingCompatible]>;
+def SVZIP1Q : SInst<"svzip1q[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_zip1q", [IsStreamingCompatible]>;
+def SVZIP2Q : SInst<"svzip2q[_{d}]", "ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_zip2q", [IsStreamingCompatible]>;
}
-let ArchGuard = "defined(__ARM_FEATURE_SVE_MATMUL_FP64) && defined(__ARM_FEATURE_SVE_BF16)" in {
+let TargetGuard = "sve,bf16,f64mm" in {
def SVTRN1Q_BF16 : SInst<"svtrn1q[_{d}]", "ddd", "b", MergeNone, "aarch64_sve_trn1q">;
def SVTRN2Q_BF16 : SInst<"svtrn2q[_{d}]", "ddd", "b", MergeNone, "aarch64_sve_trn2q">;
-def SVUZP1Q_BF16 : SInst<"svuzp1q[_{d}]", "ddd", "b", MergeNone, "aarch64_sve_uzp1q">;
-def SVUZP2Q_BF16 : SInst<"svuzp2q[_{d}]", "ddd", "b", MergeNone, "aarch64_sve_uzp2q">;
-def SVZIP1Q_BF16 : SInst<"svzip1q[_{d}]", "ddd", "b", MergeNone, "aarch64_sve_zip1q">;
-def SVZIP2Q_BF16 : SInst<"svzip2q[_{d}]", "ddd", "b", MergeNone, "aarch64_sve_zip2q">;
+def SVUZP1Q_BF16 : SInst<"svuzp1q[_{d}]", "ddd", "b", MergeNone, "aarch64_sve_uzp1q", [IsStreamingCompatible]>;
+def SVUZP2Q_BF16 : SInst<"svuzp2q[_{d}]", "ddd", "b", MergeNone, "aarch64_sve_uzp2q", [IsStreamingCompatible]>;
+def SVZIP1Q_BF16 : SInst<"svzip1q[_{d}]", "ddd", "b", MergeNone, "aarch64_sve_zip1q", [IsStreamingCompatible]>;
+def SVZIP2Q_BF16 : SInst<"svzip2q[_{d}]", "ddd", "b", MergeNone, "aarch64_sve_zip2q", [IsStreamingCompatible]>;
}
////////////////////////////////////////////////////////////////////////////////
// Vector creation
-def SVUNDEF_1 : SInst<"svundef_{d}", "d", "csilUcUsUiUlhfd", MergeNone, "", [IsUndef]>;
-def SVUNDEF_2 : SInst<"svundef2_{d}", "2", "csilUcUsUiUlhfd", MergeNone, "", [IsUndef]>;
-def SVUNDEF_3 : SInst<"svundef3_{d}", "3", "csilUcUsUiUlhfd", MergeNone, "", [IsUndef]>;
-def SVUNDEF_4 : SInst<"svundef4_{d}", "4", "csilUcUsUiUlhfd", MergeNone, "", [IsUndef]>;
+def SVUNDEF_1 : SInst<"svundef_{d}", "dv", "csilUcUsUiUlhfd", MergeNone, "", [IsUndef, IsStreamingCompatible]>;
+def SVUNDEF_2 : SInst<"svundef2_{d}", "2v", "csilUcUsUiUlhfd", MergeNone, "", [IsUndef, IsStreamingCompatible]>;
+def SVUNDEF_3 : SInst<"svundef3_{d}", "3v", "csilUcUsUiUlhfd", MergeNone, "", [IsUndef, IsStreamingCompatible]>;
+def SVUNDEF_4 : SInst<"svundef4_{d}", "4v", "csilUcUsUiUlhfd", MergeNone, "", [IsUndef, IsStreamingCompatible]>;
-def SVCREATE_2 : SInst<"svcreate2[_{d}]", "2dd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_tuple_create2", [IsTupleCreate]>;
-def SVCREATE_3 : SInst<"svcreate3[_{d}]", "3ddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_tuple_create3", [IsTupleCreate]>;
-def SVCREATE_4 : SInst<"svcreate4[_{d}]", "4dddd", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_tuple_create4", [IsTupleCreate]>;
+def SVCREATE_2 : SInst<"svcreate2[_{d}]", "2dd", "csilUcUsUiUlhfd", MergeNone, "", [IsTupleCreate, IsStreamingCompatible]>;
+def SVCREATE_3 : SInst<"svcreate3[_{d}]", "3ddd", "csilUcUsUiUlhfd", MergeNone, "", [IsTupleCreate, IsStreamingCompatible]>;
+def SVCREATE_4 : SInst<"svcreate4[_{d}]", "4dddd", "csilUcUsUiUlhfd", MergeNone, "", [IsTupleCreate, IsStreamingCompatible]>;
-let ArchGuard = "defined(__ARM_FEATURE_SVE_BF16)" in {
-def SVUNDEF_1_BF16 : SInst<"svundef_{d}", "d", "b", MergeNone, "", [IsUndef]>;
-def SVUNDEF_2_BF16 : SInst<"svundef2_{d}", "2", "b", MergeNone, "", [IsUndef]>;
-def SVUNDEF_3_BF16 : SInst<"svundef3_{d}", "3", "b", MergeNone, "", [IsUndef]>;
-def SVUNDEF_4_BF16 : SInst<"svundef4_{d}", "4", "b", MergeNone, "", [IsUndef]>;
+let TargetGuard = "sve,bf16" in {
+def SVUNDEF_1_BF16 : SInst<"svundef_{d}", "dv", "b", MergeNone, "", [IsUndef, IsStreamingCompatible]>;
+def SVUNDEF_2_BF16 : SInst<"svundef2_{d}", "2v", "b", MergeNone, "", [IsUndef, IsStreamingCompatible]>;
+def SVUNDEF_3_BF16 : SInst<"svundef3_{d}", "3v", "b", MergeNone, "", [IsUndef, IsStreamingCompatible]>;
+def SVUNDEF_4_BF16 : SInst<"svundef4_{d}", "4v", "b", MergeNone, "", [IsUndef, IsStreamingCompatible]>;
+
+def SVCREATE_2_BF16 : SInst<"svcreate2[_{d}]", "2dd", "b", MergeNone, "", [IsTupleCreate, IsStreamingCompatible]>;
+def SVCREATE_3_BF16 : SInst<"svcreate3[_{d}]", "3ddd", "b", MergeNone, "", [IsTupleCreate, IsStreamingCompatible]>;
+def SVCREATE_4_BF16 : SInst<"svcreate4[_{d}]", "4dddd", "b", MergeNone, "", [IsTupleCreate, IsStreamingCompatible]>;
+}
-def SVCREATE_2_BF16 : SInst<"svcreate2[_{d}]", "2dd", "b", MergeNone, "aarch64_sve_tuple_create2", [IsTupleCreate]>;
-def SVCREATE_3_BF16 : SInst<"svcreate3[_{d}]", "3ddd", "b", MergeNone, "aarch64_sve_tuple_create3", [IsTupleCreate]>;
-def SVCREATE_4_BF16 : SInst<"svcreate4[_{d}]", "4dddd", "b", MergeNone, "aarch64_sve_tuple_create4", [IsTupleCreate]>;
+let TargetGuard = "sve2p1|sme2" in {
+ def SVCREATE_2_B : SInst<"svcreate2[_b]", "2dd", "Pc", MergeNone, "", [IsTupleCreate, IsStreamingCompatible]>;
+ def SVCREATE_4_B : SInst<"svcreate4[_b]", "4dddd", "Pc", MergeNone, "", [IsTupleCreate, IsStreamingCompatible]>;
}
////////////////////////////////////////////////////////////////////////////////
// Vector insertion and extraction
-def SVGET_2 : SInst<"svget2[_{d}]", "d2i", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_tuple_get", [IsTupleGet], [ImmCheck<1, ImmCheck0_1>]>;
-def SVGET_3 : SInst<"svget3[_{d}]", "d3i", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_tuple_get", [IsTupleGet], [ImmCheck<1, ImmCheck0_2>]>;
-def SVGET_4 : SInst<"svget4[_{d}]", "d4i", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_tuple_get", [IsTupleGet], [ImmCheck<1, ImmCheck0_3>]>;
+def SVGET_2 : SInst<"svget2[_{d}]", "d2i", "csilUcUsUiUlhfd", MergeNone, "", [IsTupleGet, IsStreamingCompatible], [ImmCheck<1, ImmCheck0_1>]>;
+def SVGET_3 : SInst<"svget3[_{d}]", "d3i", "csilUcUsUiUlhfd", MergeNone, "", [IsTupleGet, IsStreamingCompatible], [ImmCheck<1, ImmCheck0_2>]>;
+def SVGET_4 : SInst<"svget4[_{d}]", "d4i", "csilUcUsUiUlhfd", MergeNone, "", [IsTupleGet, IsStreamingCompatible], [ImmCheck<1, ImmCheck0_3>]>;
-def SVSET_2 : SInst<"svset2[_{d}]", "22id", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_tuple_set", [IsTupleSet], [ImmCheck<1, ImmCheck0_1>]>;
-def SVSET_3 : SInst<"svset3[_{d}]", "33id", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_tuple_set", [IsTupleSet], [ImmCheck<1, ImmCheck0_2>]>;
-def SVSET_4 : SInst<"svset4[_{d}]", "44id", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_tuple_set", [IsTupleSet], [ImmCheck<1, ImmCheck0_3>]>;
+def SVSET_2 : SInst<"svset2[_{d}]", "22id", "csilUcUsUiUlhfd", MergeNone, "", [IsTupleSet, IsStreamingCompatible], [ImmCheck<1, ImmCheck0_1>]>;
+def SVSET_3 : SInst<"svset3[_{d}]", "33id", "csilUcUsUiUlhfd", MergeNone, "", [IsTupleSet, IsStreamingCompatible], [ImmCheck<1, ImmCheck0_2>]>;
+def SVSET_4 : SInst<"svset4[_{d}]", "44id", "csilUcUsUiUlhfd", MergeNone, "", [IsTupleSet, IsStreamingCompatible], [ImmCheck<1, ImmCheck0_3>]>;
-let ArchGuard = "defined(__ARM_FEATURE_SVE_BF16)" in {
-def SVGET_2_BF16 : SInst<"svget2[_{d}]", "d2i", "b", MergeNone, "aarch64_sve_tuple_get", [IsTupleGet], [ImmCheck<1, ImmCheck0_1>]>;
-def SVGET_3_BF16 : SInst<"svget3[_{d}]", "d3i", "b", MergeNone, "aarch64_sve_tuple_get", [IsTupleGet], [ImmCheck<1, ImmCheck0_2>]>;
-def SVGET_4_BF16 : SInst<"svget4[_{d}]", "d4i", "b", MergeNone, "aarch64_sve_tuple_get", [IsTupleGet], [ImmCheck<1, ImmCheck0_3>]>;
+let TargetGuard = "sve,bf16" in {
+def SVGET_2_BF16 : SInst<"svget2[_{d}]", "d2i", "b", MergeNone, "", [IsTupleGet, IsStreamingCompatible], [ImmCheck<1, ImmCheck0_1>]>;
+def SVGET_3_BF16 : SInst<"svget3[_{d}]", "d3i", "b", MergeNone, "", [IsTupleGet, IsStreamingCompatible], [ImmCheck<1, ImmCheck0_2>]>;
+def SVGET_4_BF16 : SInst<"svget4[_{d}]", "d4i", "b", MergeNone, "", [IsTupleGet, IsStreamingCompatible], [ImmCheck<1, ImmCheck0_3>]>;
-def SVSET_2_BF16 : SInst<"svset2[_{d}]", "22id", "b", MergeNone, "aarch64_sve_tuple_set", [IsTupleSet], [ImmCheck<1, ImmCheck0_1>]>;
-def SVSET_3_BF16 : SInst<"svset3[_{d}]", "33id", "b", MergeNone, "aarch64_sve_tuple_set", [IsTupleSet], [ImmCheck<1, ImmCheck0_2>]>;
-def SVSET_4_BF16 : SInst<"svset4[_{d}]", "44id", "b", MergeNone, "aarch64_sve_tuple_set", [IsTupleSet], [ImmCheck<1, ImmCheck0_3>]>;
+def SVSET_2_BF16 : SInst<"svset2[_{d}]", "22id", "b", MergeNone, "", [IsTupleSet, IsStreamingCompatible], [ImmCheck<1, ImmCheck0_1>]>;
+def SVSET_3_BF16 : SInst<"svset3[_{d}]", "33id", "b", MergeNone, "", [IsTupleSet, IsStreamingCompatible], [ImmCheck<1, ImmCheck0_2>]>;
+def SVSET_4_BF16 : SInst<"svset4[_{d}]", "44id", "b", MergeNone, "", [IsTupleSet, IsStreamingCompatible], [ImmCheck<1, ImmCheck0_3>]>;
}
+let TargetGuard = "sve2p1|sme2" in {
+ def SVGET_2_B : SInst<"svget2[_b]", "d2i", "Pc", MergeNone, "", [IsTupleGet, IsStreamingCompatible], [ImmCheck<1, ImmCheck0_1>]>;
+ def SVGET_4_B : SInst<"svget4[_b]", "d4i", "Pc", MergeNone, "", [IsTupleGet, IsStreamingCompatible], [ImmCheck<1, ImmCheck0_3>]>;
+
+ def SVSET_2_B : SInst<"svset2[_b]", "22id", "Pc", MergeNone, "", [IsTupleSet, IsStreamingCompatible], [ImmCheck<1, ImmCheck0_1>]>;
+ def SVSET_4_B : SInst<"svset4[_b]", "44id", "Pc", MergeNone, "", [IsTupleSet, IsStreamingCompatible], [ImmCheck<1, ImmCheck0_3>]>;
+}
+
+let TargetGuard = "sve2p1|sme2" in {
+ def SVUNDEF_2_B: Inst<"svundef2_b", "2", "Pc", MergeNone, "", [IsUndef, IsStreamingCompatible], []>;
+ def SVUNDEF_4_B: Inst<"svundef4_b", "4", "Pc", MergeNone, "", [IsUndef, IsStreamingCompatible], []>;
+}
////////////////////////////////////////////////////////////////////////////////
// SVE2 WhileGE/GT
-let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
-def SVWHILEGE_S32 : SInst<"svwhilege_{d}[_{1}]", "Pkk", "PcPsPiPl", MergeNone, "aarch64_sve_whilege", [IsOverloadWhile]>;
-def SVWHILEGE_S64 : SInst<"svwhilege_{d}[_{1}]", "Pll", "PcPsPiPl", MergeNone, "aarch64_sve_whilege", [IsOverloadWhile]>;
-def SVWHILEGT_S32 : SInst<"svwhilegt_{d}[_{1}]", "Pkk", "PcPsPiPl", MergeNone, "aarch64_sve_whilegt", [IsOverloadWhile]>;
-def SVWHILEGT_S64 : SInst<"svwhilegt_{d}[_{1}]", "Pll", "PcPsPiPl", MergeNone, "aarch64_sve_whilegt", [IsOverloadWhile]>;
-def SVWHILEHI_U32 : SInst<"svwhilegt_{d}[_{1}]", "Pmm", "PUcPUsPUiPUl", MergeNone, "aarch64_sve_whilehi", [IsOverloadWhile]>;
-def SVWHILEHI_U64 : SInst<"svwhilegt_{d}[_{1}]", "Pnn", "PUcPUsPUiPUl", MergeNone, "aarch64_sve_whilehi", [IsOverloadWhile]>;
-def SVWHILEHS_U32 : SInst<"svwhilege_{d}[_{1}]", "Pmm", "PUcPUsPUiPUl", MergeNone, "aarch64_sve_whilehs", [IsOverloadWhile]>;
-def SVWHILEHS_U64 : SInst<"svwhilege_{d}[_{1}]", "Pnn", "PUcPUsPUiPUl", MergeNone, "aarch64_sve_whilehs", [IsOverloadWhile]>;
+let TargetGuard = "sve2" in {
+def SVWHILEGE_S32 : SInst<"svwhilege_{d}[_{1}]", "Pkk", "PcPsPiPl", MergeNone, "aarch64_sve_whilege", [IsOverloadWhileOrMultiVecCvt, IsStreamingCompatible]>;
+def SVWHILEGE_S64 : SInst<"svwhilege_{d}[_{1}]", "Pll", "PcPsPiPl", MergeNone, "aarch64_sve_whilege", [IsOverloadWhileOrMultiVecCvt, IsStreamingCompatible]>;
+def SVWHILEGT_S32 : SInst<"svwhilegt_{d}[_{1}]", "Pkk", "PcPsPiPl", MergeNone, "aarch64_sve_whilegt", [IsOverloadWhileOrMultiVecCvt, IsStreamingCompatible]>;
+def SVWHILEGT_S64 : SInst<"svwhilegt_{d}[_{1}]", "Pll", "PcPsPiPl", MergeNone, "aarch64_sve_whilegt", [IsOverloadWhileOrMultiVecCvt, IsStreamingCompatible]>;
+def SVWHILEHI_U32 : SInst<"svwhilegt_{d}[_{1}]", "Pmm", "PUcPUsPUiPUl", MergeNone, "aarch64_sve_whilehi", [IsOverloadWhileOrMultiVecCvt, IsStreamingCompatible]>;
+def SVWHILEHI_U64 : SInst<"svwhilegt_{d}[_{1}]", "Pnn", "PUcPUsPUiPUl", MergeNone, "aarch64_sve_whilehi", [IsOverloadWhileOrMultiVecCvt, IsStreamingCompatible]>;
+def SVWHILEHS_U32 : SInst<"svwhilege_{d}[_{1}]", "Pmm", "PUcPUsPUiPUl", MergeNone, "aarch64_sve_whilehs", [IsOverloadWhileOrMultiVecCvt, IsStreamingCompatible]>;
+def SVWHILEHS_U64 : SInst<"svwhilege_{d}[_{1}]", "Pnn", "PUcPUsPUiPUl", MergeNone, "aarch64_sve_whilehs", [IsOverloadWhileOrMultiVecCvt, IsStreamingCompatible]>;
+}
+
+let TargetGuard = "sve2p1|sme2" in {
+ def SVWHILEGE_S64_X2 : SInst<"svwhilege_{d}[_{1}]_x2", "2ll", "PcPsPiPl", MergeNone, "aarch64_sve_whilege_x2", [IsStreamingOrSVE2p1]>;
+ def SVWHILEGT_S64_X2 : SInst<"svwhilegt_{d}[_{1}]_x2", "2ll", "PcPsPiPl", MergeNone, "aarch64_sve_whilegt_x2", [IsStreamingOrSVE2p1]>;
+ def SVWHILEHI_U64_X2 : SInst<"svwhilegt_{d}[_{1}]_x2", "2nn", "PcPsPiPl", MergeNone, "aarch64_sve_whilehi_x2", [IsStreamingOrSVE2p1]>;
+ def SVWHILEHS_U64_X2 : SInst<"svwhilege_{d}[_{1}]_x2", "2nn", "PcPsPiPl", MergeNone, "aarch64_sve_whilehs_x2", [IsStreamingOrSVE2p1]>;
+ def SVWHILELE_S64_X2 : SInst<"svwhilele_{d}[_{1}]_x2", "2ll", "PcPsPiPl", MergeNone, "aarch64_sve_whilele_x2", [IsStreamingOrSVE2p1]>;
+ def SVWHILELT_S64_X2 : SInst<"svwhilelt_{d}[_{1}]_x2", "2ll", "PcPsPiPl", MergeNone, "aarch64_sve_whilelt_x2", [IsStreamingOrSVE2p1]>;
+ def SVWHILELO_U64_X2 : SInst<"svwhilelt_{d}[_{1}]_x2", "2nn", "PcPsPiPl", MergeNone, "aarch64_sve_whilelo_x2", [IsStreamingOrSVE2p1]>;
+ def SVWHILELS_U64_X2 : SInst<"svwhilele_{d}[_{1}]_x2", "2nn", "PcPsPiPl", MergeNone, "aarch64_sve_whilels_x2", [IsStreamingOrSVE2p1]>;
+
}
////////////////////////////////////////////////////////////////////////////////
// SVE2 - Uniform DSP operations
-let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
-defm SVQADD_S : SInstZPZZ<"svqadd", "csli", "aarch64_sve_sqadd">;
-defm SVQADD_U : SInstZPZZ<"svqadd", "UcUsUiUl", "aarch64_sve_uqadd">;
-defm SVHADD_S : SInstZPZZ<"svhadd", "csli", "aarch64_sve_shadd">;
-defm SVHADD_U : SInstZPZZ<"svhadd", "UcUsUiUl", "aarch64_sve_uhadd">;
-defm SVRHADD_S : SInstZPZZ<"svrhadd", "csli", "aarch64_sve_srhadd">;
-defm SVRHADD_U : SInstZPZZ<"svrhadd", "UcUsUiUl", "aarch64_sve_urhadd">;
-
-defm SVQSUB_S : SInstZPZZ<"svqsub", "csli", "aarch64_sve_sqsub">;
-defm SVQSUB_U : SInstZPZZ<"svqsub", "UcUsUiUl", "aarch64_sve_uqsub">;
-defm SVQSUBR_S : SInstZPZZ<"svqsubr", "csli", "aarch64_sve_sqsubr">;
-defm SVQSUBR_U : SInstZPZZ<"svqsubr", "UcUsUiUl", "aarch64_sve_uqsubr">;
-defm SVHSUB_S : SInstZPZZ<"svhsub", "csli", "aarch64_sve_shsub">;
-defm SVHSUB_U : SInstZPZZ<"svhsub", "UcUsUiUl", "aarch64_sve_uhsub">;
-defm SVHSUBR_S : SInstZPZZ<"svhsubr", "csli", "aarch64_sve_shsubr">;
-defm SVHSUBR_U : SInstZPZZ<"svhsubr", "UcUsUiUl", "aarch64_sve_uhsubr">;
+let TargetGuard = "sve2" in {
+defm SVQADD_S : SInstZPZZ<"svqadd", "csli", "aarch64_sve_sqadd", "aarch64_sve_sqadd">;
+defm SVQADD_U : SInstZPZZ<"svqadd", "UcUsUiUl", "aarch64_sve_uqadd", "aarch64_sve_uqadd">;
+defm SVHADD_S : SInstZPZZ<"svhadd", "csli", "aarch64_sve_shadd", "aarch64_sve_shadd">;
+defm SVHADD_U : SInstZPZZ<"svhadd", "UcUsUiUl", "aarch64_sve_uhadd", "aarch64_sve_uhadd">;
+defm SVRHADD_S : SInstZPZZ<"svrhadd", "csli", "aarch64_sve_srhadd", "aarch64_sve_srhadd">;
+defm SVRHADD_U : SInstZPZZ<"svrhadd", "UcUsUiUl", "aarch64_sve_urhadd", "aarch64_sve_urhadd">;
+
+defm SVQSUB_S : SInstZPZZ<"svqsub", "csli", "aarch64_sve_sqsub", "aarch64_sve_sqsub_u">;
+defm SVQSUB_U : SInstZPZZ<"svqsub", "UcUsUiUl", "aarch64_sve_uqsub", "aarch64_sve_uqsub_u">;
+defm SVQSUBR_S : SInstZPZZ<"svqsubr", "csli", "aarch64_sve_sqsubr", "aarch64_sve_sqsub_u", [ReverseMergeAnyBinOp]>;
+defm SVQSUBR_U : SInstZPZZ<"svqsubr", "UcUsUiUl", "aarch64_sve_uqsubr", "aarch64_sve_uqsub_u", [ReverseMergeAnyBinOp]>;
+defm SVHSUB_S : SInstZPZZ<"svhsub", "csli", "aarch64_sve_shsub", "aarch64_sve_shsub">;
+defm SVHSUB_U : SInstZPZZ<"svhsub", "UcUsUiUl", "aarch64_sve_uhsub", "aarch64_sve_uhsub">;
+defm SVHSUBR_S : SInstZPZZ<"svhsubr", "csli", "aarch64_sve_shsubr", "aarch64_sve_shsubr">;
+defm SVHSUBR_U : SInstZPZZ<"svhsubr", "UcUsUiUl", "aarch64_sve_uhsubr", "aarch64_sve_uhsubr">;
defm SVQABS : SInstZPZ<"svqabs", "csil", "aarch64_sve_sqabs">;
defm SVQNEG : SInstZPZ<"svqneg", "csil", "aarch64_sve_sqneg">;
@@ -1526,50 +1396,50 @@ multiclass SInstZPZxZ<string name, string types, string pat_v, string pat_n, str
def _N_Z : SInst<name # "[_n_{d}]", pat_n, types, MergeZero, intrinsic, flags>;
}
-let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
-defm SVQRSHL_S : SInstZPZxZ<"svqrshl", "csil", "dPdx", "dPdK", "aarch64_sve_sqrshl">;
-defm SVQRSHL_U : SInstZPZxZ<"svqrshl", "UcUsUiUl", "dPdx", "dPdK", "aarch64_sve_uqrshl">;
-defm SVQSHL_S : SInstZPZxZ<"svqshl", "csil", "dPdx", "dPdK", "aarch64_sve_sqshl">;
-defm SVQSHL_U : SInstZPZxZ<"svqshl", "UcUsUiUl", "dPdx", "dPdK", "aarch64_sve_uqshl">;
-defm SVRSHL_S : SInstZPZxZ<"svrshl", "csil", "dPdx", "dPdK", "aarch64_sve_srshl">;
-defm SVRSHL_U : SInstZPZxZ<"svrshl", "UcUsUiUl", "dPdx", "dPdK", "aarch64_sve_urshl">;
-defm SVSQADD : SInstZPZxZ<"svsqadd", "UcUsUiUl", "dPdx", "dPdK", "aarch64_sve_usqadd">;
-defm SVUQADD : SInstZPZxZ<"svuqadd", "csil", "dPdu", "dPdL", "aarch64_sve_suqadd">;
-
-def SVABA_S : SInst<"svaba[_{d}]", "dddd", "csil" , MergeNone, "aarch64_sve_saba">;
-def SVABA_U : SInst<"svaba[_{d}]", "dddd", "UcUsUiUl", MergeNone, "aarch64_sve_uaba">;
-def SVQDMULH : SInst<"svqdmulh[_{d}]", "ddd", "csil", MergeNone, "aarch64_sve_sqdmulh">;
-def SVQRDMULH : SInst<"svqrdmulh[_{d}]", "ddd", "csil", MergeNone, "aarch64_sve_sqrdmulh">;
-def SVQRDMLAH : SInst<"svqrdmlah[_{d}]", "dddd", "csil", MergeNone, "aarch64_sve_sqrdmlah">;
-def SVQRDMLSH : SInst<"svqrdmlsh[_{d}]", "dddd", "csil", MergeNone, "aarch64_sve_sqrdmlsh">;
-
-def SVABA_S_N : SInst<"svaba[_n_{d}]", "ddda", "csil", MergeNone, "aarch64_sve_saba">;
-def SVABA_U_N : SInst<"svaba[_n_{d}]", "ddda", "UcUsUiUl", MergeNone, "aarch64_sve_uaba">;
-def SVQDMULH_N : SInst<"svqdmulh[_n_{d}]", "dda", "csil", MergeNone, "aarch64_sve_sqdmulh">;
-def SVQRDMULH_N : SInst<"svqrdmulh[_n_{d}]", "dda", "csil", MergeNone, "aarch64_sve_sqrdmulh">;
-def SVQRDMLAH_N : SInst<"svqrdmlah[_n_{d}]", "ddda", "csil", MergeNone, "aarch64_sve_sqrdmlah">;
-def SVQRDMLSH_N : SInst<"svqrdmlsh[_n_{d}]", "ddda", "csil", MergeNone, "aarch64_sve_sqrdmlsh">;
-
-def SVQDMULH_LANE : SInst<"svqdmulh_lane[_{d}]", "dddi", "sil", MergeNone, "aarch64_sve_sqdmulh_lane", [], [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
-def SVQRDMULH_LANE : SInst<"svqrdmulh_lane[_{d}]", "dddi", "sil", MergeNone, "aarch64_sve_sqrdmulh_lane", [], [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
-def SVQRDMLAH_LANE : SInst<"svqrdmlah_lane[_{d}]", "ddddi", "sil", MergeNone, "aarch64_sve_sqrdmlah_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
-def SVQRDMLSH_LANE : SInst<"svqrdmlsh_lane[_{d}]", "ddddi", "sil", MergeNone, "aarch64_sve_sqrdmlsh_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
-
-def SVQSHLU_M : SInst<"svqshlu[_n_{d}]", "uPdi", "csil", MergeOp1, "aarch64_sve_sqshlu", [], [ImmCheck<2, ImmCheckShiftLeft, 1>]>;
-def SVQSHLU_X : SInst<"svqshlu[_n_{d}]", "uPdi", "csil", MergeAny, "aarch64_sve_sqshlu", [], [ImmCheck<2, ImmCheckShiftLeft, 1>]>;
-def SVQSHLU_Z : SInst<"svqshlu[_n_{d}]", "uPdi", "csil", MergeZero, "aarch64_sve_sqshlu", [], [ImmCheck<2, ImmCheckShiftLeft, 1>]>;
-def SVRSHR_M_S : SInst<"svrshr[_n_{d}]", "dPdi", "csil", MergeOp1, "aarch64_sve_srshr", [], [ImmCheck<2, ImmCheckShiftRight, 1>]>;
-def SVRSHR_M_U : SInst<"svrshr[_n_{d}]", "dPdi", "UcUsUiUl", MergeOp1, "aarch64_sve_urshr", [], [ImmCheck<2, ImmCheckShiftRight, 1>]>;
-def SVRSHR_X_S : SInst<"svrshr[_n_{d}]", "dPdi", "csil", MergeAny, "aarch64_sve_srshr", [], [ImmCheck<2, ImmCheckShiftRight, 1>]>;
-def SVRSHR_X_U : SInst<"svrshr[_n_{d}]", "dPdi", "UcUsUiUl", MergeAny, "aarch64_sve_urshr", [], [ImmCheck<2, ImmCheckShiftRight, 1>]>;
-def SVRSHR_Z_S : SInst<"svrshr[_n_{d}]", "dPdi", "csil", MergeZero, "aarch64_sve_srshr", [], [ImmCheck<2, ImmCheckShiftRight, 1>]>;
-def SVRSHR_Z_U : SInst<"svrshr[_n_{d}]", "dPdi", "UcUsUiUl", MergeZero, "aarch64_sve_urshr", [], [ImmCheck<2, ImmCheckShiftRight, 1>]>;
-def SVRSRA_S : SInst<"svrsra[_n_{d}]", "dddi", "csil", MergeNone, "aarch64_sve_srsra", [], [ImmCheck<2, ImmCheckShiftRight, 1>]>;
-def SVRSRA_U : SInst<"svrsra[_n_{d}]", "dddi", "UcUsUiUl", MergeNone, "aarch64_sve_ursra", [], [ImmCheck<2, ImmCheckShiftRight, 1>]>;
-def SVSLI : SInst<"svsli[_n_{d}]", "dddi", "csilUcUsUiUl", MergeNone, "aarch64_sve_sli", [], [ImmCheck<2, ImmCheckShiftLeft, 1>]>;
-def SVSRA_S : SInst<"svsra[_n_{d}]", "dddi", "csil", MergeNone, "aarch64_sve_ssra", [], [ImmCheck<2, ImmCheckShiftRight, 1>]>;
-def SVSRA_U : SInst<"svsra[_n_{d}]", "dddi", "UcUsUiUl", MergeNone, "aarch64_sve_usra", [], [ImmCheck<2, ImmCheckShiftRight, 1>]>;
-def SVSRI : SInst<"svsri[_n_{d}]", "dddi", "csilUcUsUiUl", MergeNone, "aarch64_sve_sri", [], [ImmCheck<2, ImmCheckShiftRight, 1>]>;
+let TargetGuard = "sve2" in {
+defm SVQRSHL_S : SInstZPZxZ<"svqrshl", "csil", "dPdx", "dPdK", "aarch64_sve_sqrshl", [IsStreamingCompatible]>;
+defm SVQRSHL_U : SInstZPZxZ<"svqrshl", "UcUsUiUl", "dPdx", "dPdK", "aarch64_sve_uqrshl", [IsStreamingCompatible]>;
+defm SVQSHL_S : SInstZPZxZ<"svqshl", "csil", "dPdx", "dPdK", "aarch64_sve_sqshl", [IsStreamingCompatible]>;
+defm SVQSHL_U : SInstZPZxZ<"svqshl", "UcUsUiUl", "dPdx", "dPdK", "aarch64_sve_uqshl", [IsStreamingCompatible]>;
+defm SVRSHL_S : SInstZPZxZ<"svrshl", "csil", "dPdx", "dPdK", "aarch64_sve_srshl", [IsStreamingCompatible]>;
+defm SVRSHL_U : SInstZPZxZ<"svrshl", "UcUsUiUl", "dPdx", "dPdK", "aarch64_sve_urshl", [IsStreamingCompatible]>;
+defm SVSQADD : SInstZPZxZ<"svsqadd", "UcUsUiUl", "dPdx", "dPdK", "aarch64_sve_usqadd", [IsStreamingCompatible]>;
+defm SVUQADD : SInstZPZxZ<"svuqadd", "csil", "dPdu", "dPdL", "aarch64_sve_suqadd", [IsStreamingCompatible]>;
+
+def SVABA_S : SInst<"svaba[_{d}]", "dddd", "csil" , MergeNone, "aarch64_sve_saba", [IsStreamingCompatible]>;
+def SVABA_U : SInst<"svaba[_{d}]", "dddd", "UcUsUiUl", MergeNone, "aarch64_sve_uaba", [IsStreamingCompatible]>;
+def SVQDMULH : SInst<"svqdmulh[_{d}]", "ddd", "csil", MergeNone, "aarch64_sve_sqdmulh", [IsStreamingCompatible]>;
+def SVQRDMULH : SInst<"svqrdmulh[_{d}]", "ddd", "csil", MergeNone, "aarch64_sve_sqrdmulh", [IsStreamingCompatible]>;
+def SVQRDMLAH : SInst<"svqrdmlah[_{d}]", "dddd", "csil", MergeNone, "aarch64_sve_sqrdmlah", [IsStreamingCompatible]>;
+def SVQRDMLSH : SInst<"svqrdmlsh[_{d}]", "dddd", "csil", MergeNone, "aarch64_sve_sqrdmlsh", [IsStreamingCompatible]>;
+
+def SVABA_S_N : SInst<"svaba[_n_{d}]", "ddda", "csil", MergeNone, "aarch64_sve_saba", [IsStreamingCompatible]>;
+def SVABA_U_N : SInst<"svaba[_n_{d}]", "ddda", "UcUsUiUl", MergeNone, "aarch64_sve_uaba", [IsStreamingCompatible]>;
+def SVQDMULH_N : SInst<"svqdmulh[_n_{d}]", "dda", "csil", MergeNone, "aarch64_sve_sqdmulh", [IsStreamingCompatible]>;
+def SVQRDMULH_N : SInst<"svqrdmulh[_n_{d}]", "dda", "csil", MergeNone, "aarch64_sve_sqrdmulh", [IsStreamingCompatible]>;
+def SVQRDMLAH_N : SInst<"svqrdmlah[_n_{d}]", "ddda", "csil", MergeNone, "aarch64_sve_sqrdmlah", [IsStreamingCompatible]>;
+def SVQRDMLSH_N : SInst<"svqrdmlsh[_n_{d}]", "ddda", "csil", MergeNone, "aarch64_sve_sqrdmlsh", [IsStreamingCompatible]>;
+
+def SVQDMULH_LANE : SInst<"svqdmulh_lane[_{d}]", "dddi", "sil", MergeNone, "aarch64_sve_sqdmulh_lane", [IsStreamingCompatible], [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
+def SVQRDMULH_LANE : SInst<"svqrdmulh_lane[_{d}]", "dddi", "sil", MergeNone, "aarch64_sve_sqrdmulh_lane", [IsStreamingCompatible], [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
+def SVQRDMLAH_LANE : SInst<"svqrdmlah_lane[_{d}]", "ddddi", "sil", MergeNone, "aarch64_sve_sqrdmlah_lane", [IsStreamingCompatible], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVQRDMLSH_LANE : SInst<"svqrdmlsh_lane[_{d}]", "ddddi", "sil", MergeNone, "aarch64_sve_sqrdmlsh_lane", [IsStreamingCompatible], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+
+def SVQSHLU_M : SInst<"svqshlu[_n_{d}]", "uPdi", "csil", MergeOp1, "aarch64_sve_sqshlu", [IsStreamingCompatible], [ImmCheck<2, ImmCheckShiftLeft, 1>]>;
+def SVQSHLU_X : SInst<"svqshlu[_n_{d}]", "uPdi", "csil", MergeAny, "aarch64_sve_sqshlu", [IsStreamingCompatible], [ImmCheck<2, ImmCheckShiftLeft, 1>]>;
+def SVQSHLU_Z : SInst<"svqshlu[_n_{d}]", "uPdi", "csil", MergeZero, "aarch64_sve_sqshlu", [IsStreamingCompatible], [ImmCheck<2, ImmCheckShiftLeft, 1>]>;
+def SVRSHR_M_S : SInst<"svrshr[_n_{d}]", "dPdi", "csil", MergeOp1, "aarch64_sve_srshr", [IsStreamingCompatible], [ImmCheck<2, ImmCheckShiftRight, 1>]>;
+def SVRSHR_M_U : SInst<"svrshr[_n_{d}]", "dPdi", "UcUsUiUl", MergeOp1, "aarch64_sve_urshr", [IsStreamingCompatible], [ImmCheck<2, ImmCheckShiftRight, 1>]>;
+def SVRSHR_X_S : SInst<"svrshr[_n_{d}]", "dPdi", "csil", MergeAny, "aarch64_sve_srshr", [IsStreamingCompatible], [ImmCheck<2, ImmCheckShiftRight, 1>]>;
+def SVRSHR_X_U : SInst<"svrshr[_n_{d}]", "dPdi", "UcUsUiUl", MergeAny, "aarch64_sve_urshr", [IsStreamingCompatible], [ImmCheck<2, ImmCheckShiftRight, 1>]>;
+def SVRSHR_Z_S : SInst<"svrshr[_n_{d}]", "dPdi", "csil", MergeZero, "aarch64_sve_srshr", [IsStreamingCompatible], [ImmCheck<2, ImmCheckShiftRight, 1>]>;
+def SVRSHR_Z_U : SInst<"svrshr[_n_{d}]", "dPdi", "UcUsUiUl", MergeZero, "aarch64_sve_urshr", [IsStreamingCompatible], [ImmCheck<2, ImmCheckShiftRight, 1>]>;
+def SVRSRA_S : SInst<"svrsra[_n_{d}]", "dddi", "csil", MergeNone, "aarch64_sve_srsra", [IsStreamingCompatible], [ImmCheck<2, ImmCheckShiftRight, 1>]>;
+def SVRSRA_U : SInst<"svrsra[_n_{d}]", "dddi", "UcUsUiUl", MergeNone, "aarch64_sve_ursra", [IsStreamingCompatible], [ImmCheck<2, ImmCheckShiftRight, 1>]>;
+def SVSLI : SInst<"svsli[_n_{d}]", "dddi", "csilUcUsUiUl", MergeNone, "aarch64_sve_sli", [IsStreamingCompatible], [ImmCheck<2, ImmCheckShiftLeft, 1>]>;
+def SVSRA_S : SInst<"svsra[_n_{d}]", "dddi", "csil", MergeNone, "aarch64_sve_ssra", [IsStreamingCompatible], [ImmCheck<2, ImmCheckShiftRight, 1>]>;
+def SVSRA_U : SInst<"svsra[_n_{d}]", "dddi", "UcUsUiUl", MergeNone, "aarch64_sve_usra", [IsStreamingCompatible], [ImmCheck<2, ImmCheckShiftRight, 1>]>;
+def SVSRI : SInst<"svsri[_n_{d}]", "dddi", "csilUcUsUiUl", MergeNone, "aarch64_sve_sri", [IsStreamingCompatible], [ImmCheck<2, ImmCheckShiftRight, 1>]>;
}
////////////////////////////////////////////////////////////////////////////////
@@ -1580,87 +1450,87 @@ multiclass SInstPairwise<string name, string types, string intrinsic, list<FlagT
def _X : SInst<name # "[_{d}]", "dPdd", types, MergeAny, intrinsic, flags>;
}
-let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
-defm SVADDP : SInstPairwise<"svaddp", "csliUcUsUiUl", "aarch64_sve_addp">;
-defm SVADDP_F : SInstPairwise<"svaddp", "hfd", "aarch64_sve_faddp">;
-defm SVMAXNMP : SInstPairwise<"svmaxnmp", "hfd", "aarch64_sve_fmaxnmp">;
-defm SVMAXP_F : SInstPairwise<"svmaxp", "hfd", "aarch64_sve_fmaxp">;
-defm SVMAXP_S : SInstPairwise<"svmaxp", "csli", "aarch64_sve_smaxp">;
-defm SVMAXP_U : SInstPairwise<"svmaxp", "UcUsUiUl", "aarch64_sve_umaxp">;
-defm SVMINNMP : SInstPairwise<"svminnmp", "hfd", "aarch64_sve_fminnmp">;
-defm SVMINP_F : SInstPairwise<"svminp", "hfd", "aarch64_sve_fminp">;
-defm SVMINP_S : SInstPairwise<"svminp", "csli", "aarch64_sve_sminp">;
-defm SVMINP_U : SInstPairwise<"svminp", "UcUsUiUl", "aarch64_sve_uminp">;
+let TargetGuard = "sve2" in {
+defm SVADDP : SInstPairwise<"svaddp", "csliUcUsUiUl", "aarch64_sve_addp", [IsStreamingCompatible]>;
+defm SVADDP_F : SInstPairwise<"svaddp", "hfd", "aarch64_sve_faddp", [IsStreamingCompatible]>;
+defm SVMAXNMP : SInstPairwise<"svmaxnmp", "hfd", "aarch64_sve_fmaxnmp", [IsStreamingCompatible]>;
+defm SVMAXP_F : SInstPairwise<"svmaxp", "hfd", "aarch64_sve_fmaxp", [IsStreamingCompatible]>;
+defm SVMAXP_S : SInstPairwise<"svmaxp", "csli", "aarch64_sve_smaxp", [IsStreamingCompatible]>;
+defm SVMAXP_U : SInstPairwise<"svmaxp", "UcUsUiUl", "aarch64_sve_umaxp", [IsStreamingCompatible]>;
+defm SVMINNMP : SInstPairwise<"svminnmp", "hfd", "aarch64_sve_fminnmp", [IsStreamingCompatible]>;
+defm SVMINP_F : SInstPairwise<"svminp", "hfd", "aarch64_sve_fminp", [IsStreamingCompatible]>;
+defm SVMINP_S : SInstPairwise<"svminp", "csli", "aarch64_sve_sminp", [IsStreamingCompatible]>;
+defm SVMINP_U : SInstPairwise<"svminp", "UcUsUiUl", "aarch64_sve_uminp", [IsStreamingCompatible]>;
}
////////////////////////////////////////////////////////////////////////////////
// SVE2 - Widening pairwise arithmetic
-let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
-def SVADALP_S_M : SInst<"svadalp[_{d}]", "dPdh", "sil", MergeOp1, "aarch64_sve_sadalp">;
-def SVADALP_S_X : SInst<"svadalp[_{d}]", "dPdh", "sil", MergeAny, "aarch64_sve_sadalp">;
-def SVADALP_S_Z : SInst<"svadalp[_{d}]", "dPdh", "sil", MergeZero, "aarch64_sve_sadalp">;
+let TargetGuard = "sve2" in {
+def SVADALP_S_M : SInst<"svadalp[_{d}]", "dPdh", "sil", MergeOp1, "aarch64_sve_sadalp", [IsStreamingCompatible]>;
+def SVADALP_S_X : SInst<"svadalp[_{d}]", "dPdh", "sil", MergeAny, "aarch64_sve_sadalp", [IsStreamingCompatible]>;
+def SVADALP_S_Z : SInst<"svadalp[_{d}]", "dPdh", "sil", MergeZero, "aarch64_sve_sadalp", [IsStreamingCompatible]>;
-def SVADALP_U_M : SInst<"svadalp[_{d}]", "dPdh", "UsUiUl", MergeOp1, "aarch64_sve_uadalp">;
-def SVADALP_U_X : SInst<"svadalp[_{d}]", "dPdh", "UsUiUl", MergeAny, "aarch64_sve_uadalp">;
-def SVADALP_U_Z : SInst<"svadalp[_{d}]", "dPdh", "UsUiUl", MergeZero, "aarch64_sve_uadalp">;
+def SVADALP_U_M : SInst<"svadalp[_{d}]", "dPdh", "UsUiUl", MergeOp1, "aarch64_sve_uadalp", [IsStreamingCompatible]>;
+def SVADALP_U_X : SInst<"svadalp[_{d}]", "dPdh", "UsUiUl", MergeAny, "aarch64_sve_uadalp", [IsStreamingCompatible]>;
+def SVADALP_U_Z : SInst<"svadalp[_{d}]", "dPdh", "UsUiUl", MergeZero, "aarch64_sve_uadalp", [IsStreamingCompatible]>;
}
////////////////////////////////////////////////////////////////////////////////
// SVE2 - Bitwise ternary logical instructions
//
-let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
-def SVBCAX : SInst<"svbcax[_{d}]", "dddd", "csilUcUsUiUl", MergeNone, "aarch64_sve_bcax">;
-def SVBSL : SInst<"svbsl[_{d}]", "dddd", "csilUcUsUiUl", MergeNone, "aarch64_sve_bsl">;
-def SVBSL1N : SInst<"svbsl1n[_{d}]", "dddd", "csilUcUsUiUl", MergeNone, "aarch64_sve_bsl1n">;
-def SVBSL2N : SInst<"svbsl2n[_{d}]", "dddd", "csilUcUsUiUl", MergeNone, "aarch64_sve_bsl2n">;
-def SVEOR3 : SInst<"sveor3[_{d}]", "dddd", "csilUcUsUiUl", MergeNone, "aarch64_sve_eor3">;
-def SVNBSL : SInst<"svnbsl[_{d}]", "dddd", "csilUcUsUiUl", MergeNone, "aarch64_sve_nbsl">;
+let TargetGuard = "sve2" in {
+def SVBCAX : SInst<"svbcax[_{d}]", "dddd", "csilUcUsUiUl", MergeNone, "aarch64_sve_bcax", [IsStreamingCompatible]>;
+def SVBSL : SInst<"svbsl[_{d}]", "dddd", "csilUcUsUiUl", MergeNone, "aarch64_sve_bsl", [IsStreamingCompatible]>;
+def SVBSL1N : SInst<"svbsl1n[_{d}]", "dddd", "csilUcUsUiUl", MergeNone, "aarch64_sve_bsl1n", [IsStreamingCompatible]>;
+def SVBSL2N : SInst<"svbsl2n[_{d}]", "dddd", "csilUcUsUiUl", MergeNone, "aarch64_sve_bsl2n", [IsStreamingCompatible]>;
+def SVEOR3 : SInst<"sveor3[_{d}]", "dddd", "csilUcUsUiUl", MergeNone, "aarch64_sve_eor3", [IsStreamingCompatible]>;
+def SVNBSL : SInst<"svnbsl[_{d}]", "dddd", "csilUcUsUiUl", MergeNone, "aarch64_sve_nbsl", [IsStreamingCompatible]>;
-def SVBCAX_N : SInst<"svbcax[_n_{d}]", "ddda", "csilUcUsUiUl", MergeNone, "aarch64_sve_bcax">;
-def SVBSL_N : SInst<"svbsl[_n_{d}]", "ddda", "csilUcUsUiUl", MergeNone, "aarch64_sve_bsl">;
-def SVBSL1N_N : SInst<"svbsl1n[_n_{d}]", "ddda", "csilUcUsUiUl", MergeNone, "aarch64_sve_bsl1n">;
-def SVBSL2N_N : SInst<"svbsl2n[_n_{d}]", "ddda", "csilUcUsUiUl", MergeNone, "aarch64_sve_bsl2n">;
-def SVEOR3_N : SInst<"sveor3[_n_{d}]", "ddda", "csilUcUsUiUl", MergeNone, "aarch64_sve_eor3">;
-def SVNBSL_N : SInst<"svnbsl[_n_{d}]", "ddda", "csilUcUsUiUl", MergeNone, "aarch64_sve_nbsl">;
-def SVXAR_N : SInst<"svxar[_n_{d}]", "dddi", "csilUcUsUiUl", MergeNone, "aarch64_sve_xar", [], [ImmCheck<2, ImmCheckShiftRight, 1>]>;
+def SVBCAX_N : SInst<"svbcax[_n_{d}]", "ddda", "csilUcUsUiUl", MergeNone, "aarch64_sve_bcax", [IsStreamingCompatible]>;
+def SVBSL_N : SInst<"svbsl[_n_{d}]", "ddda", "csilUcUsUiUl", MergeNone, "aarch64_sve_bsl", [IsStreamingCompatible]>;
+def SVBSL1N_N : SInst<"svbsl1n[_n_{d}]", "ddda", "csilUcUsUiUl", MergeNone, "aarch64_sve_bsl1n", [IsStreamingCompatible]>;
+def SVBSL2N_N : SInst<"svbsl2n[_n_{d}]", "ddda", "csilUcUsUiUl", MergeNone, "aarch64_sve_bsl2n", [IsStreamingCompatible]>;
+def SVEOR3_N : SInst<"sveor3[_n_{d}]", "ddda", "csilUcUsUiUl", MergeNone, "aarch64_sve_eor3", [IsStreamingCompatible]>;
+def SVNBSL_N : SInst<"svnbsl[_n_{d}]", "ddda", "csilUcUsUiUl", MergeNone, "aarch64_sve_nbsl", [IsStreamingCompatible]>;
+def SVXAR_N : SInst<"svxar[_n_{d}]", "dddi", "csilUcUsUiUl", MergeNone, "aarch64_sve_xar", [IsStreamingCompatible], [ImmCheck<2, ImmCheckShiftRight, 1>]>;
}
////////////////////////////////////////////////////////////////////////////////
// SVE2 - Large integer arithmetic
-let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
-def SVADCLB : SInst<"svadclb[_{d}]", "dddd", "UiUl", MergeNone, "aarch64_sve_adclb">;
-def SVADCLT : SInst<"svadclt[_{d}]", "dddd", "UiUl", MergeNone, "aarch64_sve_adclt">;
-def SVSBCLB : SInst<"svsbclb[_{d}]", "dddd", "UiUl", MergeNone, "aarch64_sve_sbclb">;
-def SVSBCLT : SInst<"svsbclt[_{d}]", "dddd", "UiUl", MergeNone, "aarch64_sve_sbclt">;
+let TargetGuard = "sve2" in {
+def SVADCLB : SInst<"svadclb[_{d}]", "dddd", "UiUl", MergeNone, "aarch64_sve_adclb", [IsStreamingCompatible]>;
+def SVADCLT : SInst<"svadclt[_{d}]", "dddd", "UiUl", MergeNone, "aarch64_sve_adclt", [IsStreamingCompatible]>;
+def SVSBCLB : SInst<"svsbclb[_{d}]", "dddd", "UiUl", MergeNone, "aarch64_sve_sbclb", [IsStreamingCompatible]>;
+def SVSBCLT : SInst<"svsbclt[_{d}]", "dddd", "UiUl", MergeNone, "aarch64_sve_sbclt", [IsStreamingCompatible]>;
-def SVADCLB_N : SInst<"svadclb[_n_{d}]", "ddda", "UiUl", MergeNone, "aarch64_sve_adclb">;
-def SVADCLT_N : SInst<"svadclt[_n_{d}]", "ddda", "UiUl", MergeNone, "aarch64_sve_adclt">;
-def SVSBCLB_N : SInst<"svsbclb[_n_{d}]", "ddda", "UiUl", MergeNone, "aarch64_sve_sbclb">;
-def SVSBCLT_N : SInst<"svsbclt[_n_{d}]", "ddda", "UiUl", MergeNone, "aarch64_sve_sbclt">;
+def SVADCLB_N : SInst<"svadclb[_n_{d}]", "ddda", "UiUl", MergeNone, "aarch64_sve_adclb", [IsStreamingCompatible]>;
+def SVADCLT_N : SInst<"svadclt[_n_{d}]", "ddda", "UiUl", MergeNone, "aarch64_sve_adclt", [IsStreamingCompatible]>;
+def SVSBCLB_N : SInst<"svsbclb[_n_{d}]", "ddda", "UiUl", MergeNone, "aarch64_sve_sbclb", [IsStreamingCompatible]>;
+def SVSBCLT_N : SInst<"svsbclt[_n_{d}]", "ddda", "UiUl", MergeNone, "aarch64_sve_sbclt", [IsStreamingCompatible]>;
}
////////////////////////////////////////////////////////////////////////////////
// SVE2 - Multiplication by indexed elements
-let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
-def SVMLA_LANE_2 : SInst<"svmla_lane[_{d}]", "ddddi", "silUsUiUl", MergeNone, "aarch64_sve_mla_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
-def SVMLS_LANE_2 : SInst<"svmls_lane[_{d}]", "ddddi", "silUsUiUl", MergeNone, "aarch64_sve_mls_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
-def SVMUL_LANE_2 : SInst<"svmul_lane[_{d}]", "dddi", "silUsUiUl", MergeNone, "aarch64_sve_mul_lane", [], [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
+let TargetGuard = "sve2" in {
+def SVMLA_LANE_2 : SInst<"svmla_lane[_{d}]", "ddddi", "silUsUiUl", MergeNone, "aarch64_sve_mla_lane", [IsStreamingCompatible], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVMLS_LANE_2 : SInst<"svmls_lane[_{d}]", "ddddi", "silUsUiUl", MergeNone, "aarch64_sve_mls_lane", [IsStreamingCompatible], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVMUL_LANE_2 : SInst<"svmul_lane[_{d}]", "dddi", "silUsUiUl", MergeNone, "aarch64_sve_mul_lane", [IsStreamingCompatible], [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
}
////////////////////////////////////////////////////////////////////////////////
// SVE2 - Uniform complex integer arithmetic
-let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
-def SVCADD : SInst<"svcadd[_{d}]", "dddi", "csilUcUsUiUl", MergeNone, "aarch64_sve_cadd_x", [], [ImmCheck<2, ImmCheckComplexRot90_270>]>;
-def SVSQCADD : SInst<"svqcadd[_{d}]", "dddi", "csil", MergeNone, "aarch64_sve_sqcadd_x", [], [ImmCheck<2, ImmCheckComplexRot90_270>]>;
-def SVCMLA : SInst<"svcmla[_{d}]", "ddddi", "csilUcUsUiUl", MergeNone, "aarch64_sve_cmla_x", [], [ImmCheck<3, ImmCheckComplexRotAll90>]>;
-def SVCMLA_LANE_X : SInst<"svcmla_lane[_{d}]", "ddddii", "siUsUi", MergeNone, "aarch64_sve_cmla_lane_x", [], [ImmCheck<3, ImmCheckLaneIndexCompRotate, 2>,
+let TargetGuard = "sve2" in {
+def SVCADD : SInst<"svcadd[_{d}]", "dddi", "csilUcUsUiUl", MergeNone, "aarch64_sve_cadd_x", [IsStreamingCompatible], [ImmCheck<2, ImmCheckComplexRot90_270>]>;
+def SVSQCADD : SInst<"svqcadd[_{d}]", "dddi", "csil", MergeNone, "aarch64_sve_sqcadd_x", [IsStreamingCompatible], [ImmCheck<2, ImmCheckComplexRot90_270>]>;
+def SVCMLA : SInst<"svcmla[_{d}]", "ddddi", "csilUcUsUiUl", MergeNone, "aarch64_sve_cmla_x", [IsStreamingCompatible], [ImmCheck<3, ImmCheckComplexRotAll90>]>;
+def SVCMLA_LANE_X : SInst<"svcmla_lane[_{d}]", "ddddii", "siUsUi", MergeNone, "aarch64_sve_cmla_lane_x", [IsStreamingCompatible], [ImmCheck<3, ImmCheckLaneIndexCompRotate, 2>,
ImmCheck<4, ImmCheckComplexRotAll90>]>;
-def SVSQRDCMLAH_X : SInst<"svqrdcmlah[_{d}]", "ddddi", "csil", MergeNone, "aarch64_sve_sqrdcmlah_x", [], [ImmCheck<3, ImmCheckComplexRotAll90>]>;
-def SVSQRDCMLAH_LANE_X : SInst<"svqrdcmlah_lane[_{d}]", "ddddii", "si", MergeNone, "aarch64_sve_sqrdcmlah_lane_x", [], [ImmCheck<3, ImmCheckLaneIndexCompRotate, 2>,
+def SVSQRDCMLAH_X : SInst<"svqrdcmlah[_{d}]", "ddddi", "csil", MergeNone, "aarch64_sve_sqrdcmlah_x", [IsStreamingCompatible], [ImmCheck<3, ImmCheckComplexRotAll90>]>;
+def SVSQRDCMLAH_LANE_X : SInst<"svqrdcmlah_lane[_{d}]", "ddddii", "si", MergeNone, "aarch64_sve_sqrdcmlah_lane_x", [IsStreamingCompatible], [ImmCheck<3, ImmCheckLaneIndexCompRotate, 2>,
ImmCheck<4, ImmCheckComplexRotAll90>]>;
}
@@ -1668,21 +1538,21 @@ def SVSQRDCMLAH_LANE_X : SInst<"svqrdcmlah_lane[_{d}]", "ddddii", "si",
// SVE2 - Widening DSP operations
multiclass SInstWideDSPAcc<string name, string types, string intrinsic> {
- def : SInst<name # "[_{d}]", "ddhh", types, MergeNone, intrinsic>;
- def _N : SInst<name # "[_n_{d}]", "ddhR", types, MergeNone, intrinsic>;
+ def : SInst<name # "[_{d}]", "ddhh", types, MergeNone, intrinsic, [IsStreamingCompatible]>;
+ def _N : SInst<name # "[_n_{d}]", "ddhR", types, MergeNone, intrinsic, [IsStreamingCompatible]>;
}
multiclass SInstWideDSPLong<string name, string types, string intrinsic> {
- def : SInst<name # "[_{d}]", "dhh", types, MergeNone, intrinsic>;
- def _N : SInst<name # "[_n_{d}]", "dhR", types, MergeNone, intrinsic>;
+ def : SInst<name # "[_{d}]", "dhh", types, MergeNone, intrinsic, [IsStreamingCompatible]>;
+ def _N : SInst<name # "[_n_{d}]", "dhR", types, MergeNone, intrinsic, [IsStreamingCompatible]>;
}
multiclass SInstWideDSPWide<string name, string types, string intrinsic> {
- def : SInst<name # "[_{d}]", "ddh", types, MergeNone, intrinsic>;
- def _N : SInst<name # "[_n_{d}]", "ddR", types, MergeNone, intrinsic>;
+ def : SInst<name # "[_{d}]", "ddh", types, MergeNone, intrinsic, [IsStreamingCompatible]>;
+ def _N : SInst<name # "[_n_{d}]", "ddR", types, MergeNone, intrinsic, [IsStreamingCompatible]>;
}
-let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
+let TargetGuard = "sve2" in {
defm SVABALB_S : SInstWideDSPAcc<"svabalb", "sil", "aarch64_sve_sabalb">;
defm SVABALB_U : SInstWideDSPAcc<"svabalb", "UsUiUl", "aarch64_sve_uabalb">;
defm SVABALT_S : SInstWideDSPAcc<"svabalt", "sil", "aarch64_sve_sabalt">;
@@ -1728,93 +1598,93 @@ defm SVSUBWB_U : SInstWideDSPWide<"svsubwb", "UsUiUl", "aarch64_sve_usubwb">;
defm SVSUBWT_S : SInstWideDSPWide<"svsubwt", "sil", "aarch64_sve_ssubwt">;
defm SVSUBWT_U : SInstWideDSPWide<"svsubwt", "UsUiUl", "aarch64_sve_usubwt">;
-def SVSHLLB_S_N : SInst<"svshllb[_n_{d}]", "dhi", "sil", MergeNone, "aarch64_sve_sshllb", [], [ImmCheck<1, ImmCheckShiftLeft, 0>]>;
-def SVSHLLB_U_N : SInst<"svshllb[_n_{d}]", "dhi", "UsUiUl", MergeNone, "aarch64_sve_ushllb", [], [ImmCheck<1, ImmCheckShiftLeft, 0>]>;
-def SVSHLLT_S_N : SInst<"svshllt[_n_{d}]", "dhi", "sil", MergeNone, "aarch64_sve_sshllt", [], [ImmCheck<1, ImmCheckShiftLeft, 0>]>;
-def SVSHLLT_U_N : SInst<"svshllt[_n_{d}]", "dhi", "UsUiUl", MergeNone, "aarch64_sve_ushllt", [], [ImmCheck<1, ImmCheckShiftLeft, 0>]>;
-
-def SVMOVLB_S_N : SInst<"svmovlb[_{d}]", "dh", "sil", MergeNone>;
-def SVMOVLB_U_N : SInst<"svmovlb[_{d}]", "dh", "UsUiUl", MergeNone>;
-def SVMOVLT_S_N : SInst<"svmovlt[_{d}]", "dh", "sil", MergeNone>;
-def SVMOVLT_U_N : SInst<"svmovlt[_{d}]", "dh", "UsUiUl", MergeNone>;
-
-def SVMLALB_S_LANE : SInst<"svmlalb_lane[_{d}]", "ddhhi", "il", MergeNone, "aarch64_sve_smlalb_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
-def SVMLALB_U_LANE : SInst<"svmlalb_lane[_{d}]", "ddhhi", "UiUl", MergeNone, "aarch64_sve_umlalb_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
-def SVMLALT_S_LANE : SInst<"svmlalt_lane[_{d}]", "ddhhi", "il", MergeNone, "aarch64_sve_smlalt_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
-def SVMLALT_U_LANE : SInst<"svmlalt_lane[_{d}]", "ddhhi", "UiUl", MergeNone, "aarch64_sve_umlalt_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
-def SVMLSLB_S_LANE : SInst<"svmlslb_lane[_{d}]", "ddhhi", "il", MergeNone, "aarch64_sve_smlslb_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
-def SVMLSLB_U_LANE : SInst<"svmlslb_lane[_{d}]", "ddhhi", "UiUl", MergeNone, "aarch64_sve_umlslb_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
-def SVMLSLT_S_LANE : SInst<"svmlslt_lane[_{d}]", "ddhhi", "il", MergeNone, "aarch64_sve_smlslt_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
-def SVMLSLT_U_LANE : SInst<"svmlslt_lane[_{d}]", "ddhhi", "UiUl", MergeNone, "aarch64_sve_umlslt_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
-def SVMULLB_S_LANE : SInst<"svmullb_lane[_{d}]", "dhhi", "il", MergeNone, "aarch64_sve_smullb_lane", [], [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
-def SVMULLB_U_LANE : SInst<"svmullb_lane[_{d}]", "dhhi", "UiUl", MergeNone, "aarch64_sve_umullb_lane", [], [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
-def SVMULLT_S_LANE : SInst<"svmullt_lane[_{d}]", "dhhi", "il", MergeNone, "aarch64_sve_smullt_lane", [], [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
-def SVMULLT_U_LANE : SInst<"svmullt_lane[_{d}]", "dhhi", "UiUl", MergeNone, "aarch64_sve_umullt_lane", [], [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
-def SVQDMLALB_LANE : SInst<"svqdmlalb_lane[_{d}]", "ddhhi", "il", MergeNone, "aarch64_sve_sqdmlalb_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
-def SVQDMLALT_LANE : SInst<"svqdmlalt_lane[_{d}]", "ddhhi", "il", MergeNone, "aarch64_sve_sqdmlalt_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
-def SVQDMLSLB_LANE : SInst<"svqdmlslb_lane[_{d}]", "ddhhi", "il", MergeNone, "aarch64_sve_sqdmlslb_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
-def SVQDMLSLT_LANE : SInst<"svqdmlslt_lane[_{d}]", "ddhhi", "il", MergeNone, "aarch64_sve_sqdmlslt_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
-def SVQDMULLB_LANE : SInst<"svqdmullb_lane[_{d}]", "dhhi", "il", MergeNone, "aarch64_sve_sqdmullb_lane", [], [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
-def SVQDMULLT_LANE : SInst<"svqdmullt_lane[_{d}]", "dhhi", "il", MergeNone, "aarch64_sve_sqdmullt_lane", [], [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
+def SVSHLLB_S_N : SInst<"svshllb[_n_{d}]", "dhi", "sil", MergeNone, "aarch64_sve_sshllb", [IsStreamingCompatible], [ImmCheck<1, ImmCheckShiftLeft, 0>]>;
+def SVSHLLB_U_N : SInst<"svshllb[_n_{d}]", "dhi", "UsUiUl", MergeNone, "aarch64_sve_ushllb", [IsStreamingCompatible], [ImmCheck<1, ImmCheckShiftLeft, 0>]>;
+def SVSHLLT_S_N : SInst<"svshllt[_n_{d}]", "dhi", "sil", MergeNone, "aarch64_sve_sshllt", [IsStreamingCompatible], [ImmCheck<1, ImmCheckShiftLeft, 0>]>;
+def SVSHLLT_U_N : SInst<"svshllt[_n_{d}]", "dhi", "UsUiUl", MergeNone, "aarch64_sve_ushllt", [IsStreamingCompatible], [ImmCheck<1, ImmCheckShiftLeft, 0>]>;
+
+def SVMOVLB_S_N : SInst<"svmovlb[_{d}]", "dh", "sil", MergeNone, "", [IsStreamingCompatible]>;
+def SVMOVLB_U_N : SInst<"svmovlb[_{d}]", "dh", "UsUiUl", MergeNone, "", [IsStreamingCompatible]>;
+def SVMOVLT_S_N : SInst<"svmovlt[_{d}]", "dh", "sil", MergeNone, "", [IsStreamingCompatible]>;
+def SVMOVLT_U_N : SInst<"svmovlt[_{d}]", "dh", "UsUiUl", MergeNone, "", [IsStreamingCompatible]>;
+
+def SVMLALB_S_LANE : SInst<"svmlalb_lane[_{d}]", "ddhhi", "il", MergeNone, "aarch64_sve_smlalb_lane", [IsStreamingCompatible], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVMLALB_U_LANE : SInst<"svmlalb_lane[_{d}]", "ddhhi", "UiUl", MergeNone, "aarch64_sve_umlalb_lane", [IsStreamingCompatible], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVMLALT_S_LANE : SInst<"svmlalt_lane[_{d}]", "ddhhi", "il", MergeNone, "aarch64_sve_smlalt_lane", [IsStreamingCompatible], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVMLALT_U_LANE : SInst<"svmlalt_lane[_{d}]", "ddhhi", "UiUl", MergeNone, "aarch64_sve_umlalt_lane", [IsStreamingCompatible], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVMLSLB_S_LANE : SInst<"svmlslb_lane[_{d}]", "ddhhi", "il", MergeNone, "aarch64_sve_smlslb_lane", [IsStreamingCompatible], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVMLSLB_U_LANE : SInst<"svmlslb_lane[_{d}]", "ddhhi", "UiUl", MergeNone, "aarch64_sve_umlslb_lane", [IsStreamingCompatible], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVMLSLT_S_LANE : SInst<"svmlslt_lane[_{d}]", "ddhhi", "il", MergeNone, "aarch64_sve_smlslt_lane", [IsStreamingCompatible], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVMLSLT_U_LANE : SInst<"svmlslt_lane[_{d}]", "ddhhi", "UiUl", MergeNone, "aarch64_sve_umlslt_lane", [IsStreamingCompatible], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVMULLB_S_LANE : SInst<"svmullb_lane[_{d}]", "dhhi", "il", MergeNone, "aarch64_sve_smullb_lane", [IsStreamingCompatible], [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
+def SVMULLB_U_LANE : SInst<"svmullb_lane[_{d}]", "dhhi", "UiUl", MergeNone, "aarch64_sve_umullb_lane", [IsStreamingCompatible], [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
+def SVMULLT_S_LANE : SInst<"svmullt_lane[_{d}]", "dhhi", "il", MergeNone, "aarch64_sve_smullt_lane", [IsStreamingCompatible], [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
+def SVMULLT_U_LANE : SInst<"svmullt_lane[_{d}]", "dhhi", "UiUl", MergeNone, "aarch64_sve_umullt_lane", [IsStreamingCompatible], [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
+def SVQDMLALB_LANE : SInst<"svqdmlalb_lane[_{d}]", "ddhhi", "il", MergeNone, "aarch64_sve_sqdmlalb_lane", [IsStreamingCompatible], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVQDMLALT_LANE : SInst<"svqdmlalt_lane[_{d}]", "ddhhi", "il", MergeNone, "aarch64_sve_sqdmlalt_lane", [IsStreamingCompatible], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVQDMLSLB_LANE : SInst<"svqdmlslb_lane[_{d}]", "ddhhi", "il", MergeNone, "aarch64_sve_sqdmlslb_lane", [IsStreamingCompatible], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVQDMLSLT_LANE : SInst<"svqdmlslt_lane[_{d}]", "ddhhi", "il", MergeNone, "aarch64_sve_sqdmlslt_lane", [IsStreamingCompatible], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVQDMULLB_LANE : SInst<"svqdmullb_lane[_{d}]", "dhhi", "il", MergeNone, "aarch64_sve_sqdmullb_lane", [IsStreamingCompatible], [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
+def SVQDMULLT_LANE : SInst<"svqdmullt_lane[_{d}]", "dhhi", "il", MergeNone, "aarch64_sve_sqdmullt_lane", [IsStreamingCompatible], [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
}
////////////////////////////////////////////////////////////////////////////////
// SVE2 - Narrowing DSP operations
-let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
-def SVADDHNB : SInst<"svaddhnb[_{d}]", "hdd", "silUsUiUl", MergeNone, "aarch64_sve_addhnb">;
-def SVADDHNT : SInst<"svaddhnt[_{d}]", "hhdd", "silUsUiUl", MergeNone, "aarch64_sve_addhnt">;
-def SVRADDHNB : SInst<"svraddhnb[_{d}]", "hdd", "silUsUiUl", MergeNone, "aarch64_sve_raddhnb">;
-def SVRADDHNT : SInst<"svraddhnt[_{d}]", "hhdd", "silUsUiUl", MergeNone, "aarch64_sve_raddhnt">;
-def SVRSUBHNB : SInst<"svrsubhnb[_{d}]", "hdd", "silUsUiUl", MergeNone, "aarch64_sve_rsubhnb">;
-def SVRSUBHNT : SInst<"svrsubhnt[_{d}]", "hhdd", "silUsUiUl", MergeNone, "aarch64_sve_rsubhnt">;
-def SVSUBHNB : SInst<"svsubhnb[_{d}]", "hdd", "silUsUiUl", MergeNone, "aarch64_sve_subhnb">;
-def SVSUBHNT : SInst<"svsubhnt[_{d}]", "hhdd", "silUsUiUl", MergeNone, "aarch64_sve_subhnt">;
-
-def SVADDHNB_N : SInst<"svaddhnb[_n_{d}]", "hda", "silUsUiUl", MergeNone, "aarch64_sve_addhnb">;
-def SVADDHNT_N : SInst<"svaddhnt[_n_{d}]", "hhda", "silUsUiUl", MergeNone, "aarch64_sve_addhnt">;
-def SVRADDHNB_N : SInst<"svraddhnb[_n_{d}]", "hda", "silUsUiUl", MergeNone, "aarch64_sve_raddhnb">;
-def SVRADDHNT_N : SInst<"svraddhnt[_n_{d}]", "hhda", "silUsUiUl", MergeNone, "aarch64_sve_raddhnt">;
-def SVRSUBHNB_N : SInst<"svrsubhnb[_n_{d}]", "hda", "silUsUiUl", MergeNone, "aarch64_sve_rsubhnb">;
-def SVRSUBHNT_N : SInst<"svrsubhnt[_n_{d}]", "hhda", "silUsUiUl", MergeNone, "aarch64_sve_rsubhnt">;
-def SVSUBHNB_N : SInst<"svsubhnb[_n_{d}]", "hda", "silUsUiUl", MergeNone, "aarch64_sve_subhnb">;
-def SVSUBHNT_N : SInst<"svsubhnt[_n_{d}]", "hhda", "silUsUiUl", MergeNone, "aarch64_sve_subhnt">;
-
-def SVSHRNB : SInst<"svshrnb[_n_{d}]", "hdi", "silUsUiUl", MergeNone, "aarch64_sve_shrnb", [], [ImmCheck<1, ImmCheckShiftRightNarrow, 0>]>;
-def SVRSHRNB : SInst<"svrshrnb[_n_{d}]", "hdi", "silUsUiUl", MergeNone, "aarch64_sve_rshrnb", [], [ImmCheck<1, ImmCheckShiftRightNarrow, 0>]>;
-def SVQSHRUNB : SInst<"svqshrunb[_n_{d}]", "edi", "sil", MergeNone, "aarch64_sve_sqshrunb", [], [ImmCheck<1, ImmCheckShiftRightNarrow, 0>]>;
-def SVQRSHRUNB : SInst<"svqrshrunb[_n_{d}]", "edi", "sil", MergeNone, "aarch64_sve_sqrshrunb", [], [ImmCheck<1, ImmCheckShiftRightNarrow, 0>]>;
-def SVQSHRNB_S : SInst<"svqshrnb[_n_{d}]", "hdi", "sil", MergeNone, "aarch64_sve_sqshrnb", [], [ImmCheck<1, ImmCheckShiftRightNarrow, 0>]>;
-def SVQSHRNB_U : SInst<"svqshrnb[_n_{d}]", "hdi", "UsUiUl", MergeNone, "aarch64_sve_uqshrnb", [], [ImmCheck<1, ImmCheckShiftRightNarrow, 0>]>;
-def SVQRSHRNB_S : SInst<"svqrshrnb[_n_{d}]", "hdi", "sil", MergeNone, "aarch64_sve_sqrshrnb", [], [ImmCheck<1, ImmCheckShiftRightNarrow, 0>]>;
-def SVQRSHRNB_U : SInst<"svqrshrnb[_n_{d}]", "hdi", "UsUiUl", MergeNone, "aarch64_sve_uqrshrnb", [], [ImmCheck<1, ImmCheckShiftRightNarrow, 0>]>;
-
-def SVSHRNT : SInst<"svshrnt[_n_{d}]", "hhdi", "silUsUiUl", MergeNone, "aarch64_sve_shrnt", [], [ImmCheck<2, ImmCheckShiftRightNarrow, 1>]>;
-def SVRSHRNT : SInst<"svrshrnt[_n_{d}]", "hhdi", "silUsUiUl", MergeNone, "aarch64_sve_rshrnt", [], [ImmCheck<2, ImmCheckShiftRightNarrow, 1>]>;
-def SVQSHRUNT : SInst<"svqshrunt[_n_{d}]", "eedi", "sil", MergeNone, "aarch64_sve_sqshrunt", [], [ImmCheck<2, ImmCheckShiftRightNarrow, 1>]>;
-def SVQRSHRUNT : SInst<"svqrshrunt[_n_{d}]", "eedi", "sil", MergeNone, "aarch64_sve_sqrshrunt", [], [ImmCheck<2, ImmCheckShiftRightNarrow, 1>]>;
-def SVQSHRNT_S : SInst<"svqshrnt[_n_{d}]", "hhdi", "sil", MergeNone, "aarch64_sve_sqshrnt", [], [ImmCheck<2, ImmCheckShiftRightNarrow, 1>]>;
-def SVQSHRNT_U : SInst<"svqshrnt[_n_{d}]", "hhdi", "UsUiUl", MergeNone, "aarch64_sve_uqshrnt", [], [ImmCheck<2, ImmCheckShiftRightNarrow, 1>]>;
-def SVQRSHRNT_S : SInst<"svqrshrnt[_n_{d}]", "hhdi", "sil", MergeNone, "aarch64_sve_sqrshrnt", [], [ImmCheck<2, ImmCheckShiftRightNarrow, 1>]>;
-def SVQRSHRNT_U : SInst<"svqrshrnt[_n_{d}]", "hhdi", "UsUiUl", MergeNone, "aarch64_sve_uqrshrnt", [], [ImmCheck<2, ImmCheckShiftRightNarrow, 1>]>;
+let TargetGuard = "sve2" in {
+def SVADDHNB : SInst<"svaddhnb[_{d}]", "hdd", "silUsUiUl", MergeNone, "aarch64_sve_addhnb", [IsStreamingCompatible]>;
+def SVADDHNT : SInst<"svaddhnt[_{d}]", "hhdd", "silUsUiUl", MergeNone, "aarch64_sve_addhnt", [IsStreamingCompatible]>;
+def SVRADDHNB : SInst<"svraddhnb[_{d}]", "hdd", "silUsUiUl", MergeNone, "aarch64_sve_raddhnb", [IsStreamingCompatible]>;
+def SVRADDHNT : SInst<"svraddhnt[_{d}]", "hhdd", "silUsUiUl", MergeNone, "aarch64_sve_raddhnt", [IsStreamingCompatible]>;
+def SVRSUBHNB : SInst<"svrsubhnb[_{d}]", "hdd", "silUsUiUl", MergeNone, "aarch64_sve_rsubhnb", [IsStreamingCompatible]>;
+def SVRSUBHNT : SInst<"svrsubhnt[_{d}]", "hhdd", "silUsUiUl", MergeNone, "aarch64_sve_rsubhnt", [IsStreamingCompatible]>;
+def SVSUBHNB : SInst<"svsubhnb[_{d}]", "hdd", "silUsUiUl", MergeNone, "aarch64_sve_subhnb", [IsStreamingCompatible]>;
+def SVSUBHNT : SInst<"svsubhnt[_{d}]", "hhdd", "silUsUiUl", MergeNone, "aarch64_sve_subhnt", [IsStreamingCompatible]>;
+
+def SVADDHNB_N : SInst<"svaddhnb[_n_{d}]", "hda", "silUsUiUl", MergeNone, "aarch64_sve_addhnb", [IsStreamingCompatible]>;
+def SVADDHNT_N : SInst<"svaddhnt[_n_{d}]", "hhda", "silUsUiUl", MergeNone, "aarch64_sve_addhnt", [IsStreamingCompatible]>;
+def SVRADDHNB_N : SInst<"svraddhnb[_n_{d}]", "hda", "silUsUiUl", MergeNone, "aarch64_sve_raddhnb", [IsStreamingCompatible]>;
+def SVRADDHNT_N : SInst<"svraddhnt[_n_{d}]", "hhda", "silUsUiUl", MergeNone, "aarch64_sve_raddhnt", [IsStreamingCompatible]>;
+def SVRSUBHNB_N : SInst<"svrsubhnb[_n_{d}]", "hda", "silUsUiUl", MergeNone, "aarch64_sve_rsubhnb", [IsStreamingCompatible]>;
+def SVRSUBHNT_N : SInst<"svrsubhnt[_n_{d}]", "hhda", "silUsUiUl", MergeNone, "aarch64_sve_rsubhnt", [IsStreamingCompatible]>;
+def SVSUBHNB_N : SInst<"svsubhnb[_n_{d}]", "hda", "silUsUiUl", MergeNone, "aarch64_sve_subhnb", [IsStreamingCompatible]>;
+def SVSUBHNT_N : SInst<"svsubhnt[_n_{d}]", "hhda", "silUsUiUl", MergeNone, "aarch64_sve_subhnt", [IsStreamingCompatible]>;
+
+def SVSHRNB : SInst<"svshrnb[_n_{d}]", "hdi", "silUsUiUl", MergeNone, "aarch64_sve_shrnb", [IsStreamingCompatible], [ImmCheck<1, ImmCheckShiftRightNarrow, 0>]>;
+def SVRSHRNB : SInst<"svrshrnb[_n_{d}]", "hdi", "silUsUiUl", MergeNone, "aarch64_sve_rshrnb", [IsStreamingCompatible], [ImmCheck<1, ImmCheckShiftRightNarrow, 0>]>;
+def SVQSHRUNB : SInst<"svqshrunb[_n_{d}]", "edi", "sil", MergeNone, "aarch64_sve_sqshrunb", [IsStreamingCompatible], [ImmCheck<1, ImmCheckShiftRightNarrow, 0>]>;
+def SVQRSHRUNB : SInst<"svqrshrunb[_n_{d}]", "edi", "sil", MergeNone, "aarch64_sve_sqrshrunb", [IsStreamingCompatible], [ImmCheck<1, ImmCheckShiftRightNarrow, 0>]>;
+def SVQSHRNB_S : SInst<"svqshrnb[_n_{d}]", "hdi", "sil", MergeNone, "aarch64_sve_sqshrnb", [IsStreamingCompatible], [ImmCheck<1, ImmCheckShiftRightNarrow, 0>]>;
+def SVQSHRNB_U : SInst<"svqshrnb[_n_{d}]", "hdi", "UsUiUl", MergeNone, "aarch64_sve_uqshrnb", [IsStreamingCompatible], [ImmCheck<1, ImmCheckShiftRightNarrow, 0>]>;
+def SVQRSHRNB_S : SInst<"svqrshrnb[_n_{d}]", "hdi", "sil", MergeNone, "aarch64_sve_sqrshrnb", [IsStreamingCompatible], [ImmCheck<1, ImmCheckShiftRightNarrow, 0>]>;
+def SVQRSHRNB_U : SInst<"svqrshrnb[_n_{d}]", "hdi", "UsUiUl", MergeNone, "aarch64_sve_uqrshrnb", [IsStreamingCompatible], [ImmCheck<1, ImmCheckShiftRightNarrow, 0>]>;
+
+def SVSHRNT : SInst<"svshrnt[_n_{d}]", "hhdi", "silUsUiUl", MergeNone, "aarch64_sve_shrnt", [IsStreamingCompatible], [ImmCheck<2, ImmCheckShiftRightNarrow, 1>]>;
+def SVRSHRNT : SInst<"svrshrnt[_n_{d}]", "hhdi", "silUsUiUl", MergeNone, "aarch64_sve_rshrnt", [IsStreamingCompatible], [ImmCheck<2, ImmCheckShiftRightNarrow, 1>]>;
+def SVQSHRUNT : SInst<"svqshrunt[_n_{d}]", "eedi", "sil", MergeNone, "aarch64_sve_sqshrunt", [IsStreamingCompatible], [ImmCheck<2, ImmCheckShiftRightNarrow, 1>]>;
+def SVQRSHRUNT : SInst<"svqrshrunt[_n_{d}]", "eedi", "sil", MergeNone, "aarch64_sve_sqrshrunt", [IsStreamingCompatible], [ImmCheck<2, ImmCheckShiftRightNarrow, 1>]>;
+def SVQSHRNT_S : SInst<"svqshrnt[_n_{d}]", "hhdi", "sil", MergeNone, "aarch64_sve_sqshrnt", [IsStreamingCompatible], [ImmCheck<2, ImmCheckShiftRightNarrow, 1>]>;
+def SVQSHRNT_U : SInst<"svqshrnt[_n_{d}]", "hhdi", "UsUiUl", MergeNone, "aarch64_sve_uqshrnt", [IsStreamingCompatible], [ImmCheck<2, ImmCheckShiftRightNarrow, 1>]>;
+def SVQRSHRNT_S : SInst<"svqrshrnt[_n_{d}]", "hhdi", "sil", MergeNone, "aarch64_sve_sqrshrnt", [IsStreamingCompatible], [ImmCheck<2, ImmCheckShiftRightNarrow, 1>]>;
+def SVQRSHRNT_U : SInst<"svqrshrnt[_n_{d}]", "hhdi", "UsUiUl", MergeNone, "aarch64_sve_uqrshrnt", [IsStreamingCompatible], [ImmCheck<2, ImmCheckShiftRightNarrow, 1>]>;
}
////////////////////////////////////////////////////////////////////////////////
// SVE2 - Unary narrowing operations
-let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
-def SVQXTNB_S : SInst<"svqxtnb[_{d}]", "hd", "sil", MergeNone, "aarch64_sve_sqxtnb">;
-def SVQXTNB_U : SInst<"svqxtnb[_{d}]", "hd", "UsUiUl", MergeNone, "aarch64_sve_uqxtnb">;
-def SVQXTUNB_S : SInst<"svqxtunb[_{d}]", "ed", "sil", MergeNone, "aarch64_sve_sqxtunb">;
+let TargetGuard = "sve2" in {
+def SVQXTNB_S : SInst<"svqxtnb[_{d}]", "hd", "sil", MergeNone, "aarch64_sve_sqxtnb", [IsStreamingCompatible]>;
+def SVQXTNB_U : SInst<"svqxtnb[_{d}]", "hd", "UsUiUl", MergeNone, "aarch64_sve_uqxtnb", [IsStreamingCompatible]>;
+def SVQXTUNB_S : SInst<"svqxtunb[_{d}]", "ed", "sil", MergeNone, "aarch64_sve_sqxtunb", [IsStreamingCompatible]>;
-def SVQXTNT_S : SInst<"svqxtnt[_{d}]", "hhd", "sil", MergeNone, "aarch64_sve_sqxtnt">;
-def SVQXTNT_U : SInst<"svqxtnt[_{d}]", "hhd", "UsUiUl", MergeNone, "aarch64_sve_uqxtnt">;
-def SVQXTUNT_S : SInst<"svqxtunt[_{d}]", "eed", "sil", MergeNone, "aarch64_sve_sqxtunt">;
+def SVQXTNT_S : SInst<"svqxtnt[_{d}]", "hhd", "sil", MergeNone, "aarch64_sve_sqxtnt", [IsStreamingCompatible]>;
+def SVQXTNT_U : SInst<"svqxtnt[_{d}]", "hhd", "UsUiUl", MergeNone, "aarch64_sve_uqxtnt", [IsStreamingCompatible]>;
+def SVQXTUNT_S : SInst<"svqxtunt[_{d}]", "eed", "sil", MergeNone, "aarch64_sve_sqxtunt", [IsStreamingCompatible]>;
}
////////////////////////////////////////////////////////////////////////////////
// SVE2 - Widening complex integer arithmetic
-let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
+let TargetGuard = "sve2" in {
defm SVADDLBT : SInstWideDSPLong<"svaddlbt", "sil", "aarch64_sve_saddlbt">;
defm SVSUBLBT : SInstWideDSPLong<"svsublbt", "sil", "aarch64_sve_ssublbt">;
defm SVSUBLTB : SInstWideDSPLong<"svsubltb", "sil", "aarch64_sve_ssubltb">;
@@ -1825,7 +1695,7 @@ defm SVQDMLSLBT : SInstWideDSPAcc<"svqdmlslbt", "sil", "aarch64_sve_sqdmlslbt">;
////////////////////////////////////////////////////////////////////////////////
// SVE2 - Non-temporal gather/scatter
-let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
+let TargetGuard = "sve2" in {
// Non-temporal gather load one vector (vector base)
def SVLDNT1_GATHER_BASES_U : MInst<"svldnt1_gather[_{2}base]_{0}", "dPu", "ilUiUlfd", [IsGatherLoad], MemEltTyDefault, "aarch64_sve_ldnt1_gather_scalar_offset">;
def SVLDNT1SB_GATHER_BASES_U : MInst<"svldnt1sb_gather[_{2}base]_{0}", "dPu", "ilUiUl", [IsGatherLoad], MemEltTyInt8, "aarch64_sve_ldnt1_gather_scalar_offset">;
@@ -1948,63 +1818,63 @@ def SVSTNT1W_SCATTER_INDEX_S : MInst<"svstnt1w_scatter[_{2}base]_index[_{d}]", "
////////////////////////////////////////////////////////////////////////////////
// SVE2 - Polynomial arithmetic
-let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
-def SVEORBT : SInst<"sveorbt[_{d}]", "dddd", "csilUcUsUiUl", MergeNone, "aarch64_sve_eorbt">;
-def SVEORBT_N : SInst<"sveorbt[_n_{d}]", "ddda", "csilUcUsUiUl", MergeNone, "aarch64_sve_eorbt">;
-def SVEORTB : SInst<"sveortb[_{d}]", "dddd", "csilUcUsUiUl", MergeNone, "aarch64_sve_eortb">;
-def SVEORTB_N : SInst<"sveortb[_n_{d}]", "ddda", "csilUcUsUiUl", MergeNone, "aarch64_sve_eortb">;
-def SVPMUL : SInst<"svpmul[_{d}]", "ddd", "Uc", MergeNone, "aarch64_sve_pmul">;
-def SVPMUL_N : SInst<"svpmul[_n_{d}]", "dda", "Uc", MergeNone, "aarch64_sve_pmul">;
-def SVPMULLB : SInst<"svpmullb[_{d}]", "dhh", "UsUl", MergeNone>;
-def SVPMULLB_N : SInst<"svpmullb[_n_{d}]", "dhR", "UsUl", MergeNone>;
-def SVPMULLB_PAIR : SInst<"svpmullb_pair[_{d}]", "ddd", "UcUi", MergeNone, "aarch64_sve_pmullb_pair">;
-def SVPMULLB_PAIR_N : SInst<"svpmullb_pair[_n_{d}]", "dda", "UcUi", MergeNone, "aarch64_sve_pmullb_pair">;
-def SVPMULLT : SInst<"svpmullt[_{d}]", "dhh", "UsUl", MergeNone>;
-def SVPMULLT_N : SInst<"svpmullt[_n_{d}]", "dhR", "UsUl", MergeNone>;
-def SVPMULLT_PAIR : SInst<"svpmullt_pair[_{d}]", "ddd", "UcUi", MergeNone, "aarch64_sve_pmullt_pair">;
-def SVPMULLT_PAIR_N : SInst<"svpmullt_pair[_n_{d}]", "dda", "UcUi", MergeNone, "aarch64_sve_pmullt_pair">;
+let TargetGuard = "sve2" in {
+def SVEORBT : SInst<"sveorbt[_{d}]", "dddd", "csilUcUsUiUl", MergeNone, "aarch64_sve_eorbt", [IsStreamingCompatible]>;
+def SVEORBT_N : SInst<"sveorbt[_n_{d}]", "ddda", "csilUcUsUiUl", MergeNone, "aarch64_sve_eorbt", [IsStreamingCompatible]>;
+def SVEORTB : SInst<"sveortb[_{d}]", "dddd", "csilUcUsUiUl", MergeNone, "aarch64_sve_eortb", [IsStreamingCompatible]>;
+def SVEORTB_N : SInst<"sveortb[_n_{d}]", "ddda", "csilUcUsUiUl", MergeNone, "aarch64_sve_eortb", [IsStreamingCompatible]>;
+def SVPMUL : SInst<"svpmul[_{d}]", "ddd", "Uc", MergeNone, "aarch64_sve_pmul", [IsStreamingCompatible]>;
+def SVPMUL_N : SInst<"svpmul[_n_{d}]", "dda", "Uc", MergeNone, "aarch64_sve_pmul", [IsStreamingCompatible]>;
+def SVPMULLB : SInst<"svpmullb[_{d}]", "dhh", "UsUl", MergeNone, "", [IsStreamingCompatible]>;
+def SVPMULLB_N : SInst<"svpmullb[_n_{d}]", "dhR", "UsUl", MergeNone, "", [IsStreamingCompatible]>;
+def SVPMULLB_PAIR : SInst<"svpmullb_pair[_{d}]", "ddd", "UcUi", MergeNone, "aarch64_sve_pmullb_pair", [IsStreamingCompatible]>;
+def SVPMULLB_PAIR_N : SInst<"svpmullb_pair[_n_{d}]", "dda", "UcUi", MergeNone, "aarch64_sve_pmullb_pair", [IsStreamingCompatible]>;
+def SVPMULLT : SInst<"svpmullt[_{d}]", "dhh", "UsUl", MergeNone, "", [IsStreamingCompatible]>;
+def SVPMULLT_N : SInst<"svpmullt[_n_{d}]", "dhR", "UsUl", MergeNone, "", [IsStreamingCompatible]>;
+def SVPMULLT_PAIR : SInst<"svpmullt_pair[_{d}]", "ddd", "UcUi", MergeNone, "aarch64_sve_pmullt_pair", [IsStreamingCompatible]>;
+def SVPMULLT_PAIR_N : SInst<"svpmullt_pair[_n_{d}]", "dda", "UcUi", MergeNone, "aarch64_sve_pmullt_pair", [IsStreamingCompatible]>;
}
////////////////////////////////////////////////////////////////////////////////
// SVE2 - Complex integer dot product
-let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
-def SVCDOT : SInst<"svcdot[_{d}]", "ddqqi", "il", MergeNone, "aarch64_sve_cdot", [], [ImmCheck<3, ImmCheckComplexRotAll90>]>;
-def SVCDOT_LANE : SInst<"svcdot_lane[_{d}]", "ddqqii", "il", MergeNone, "aarch64_sve_cdot_lane", [], [ImmCheck<4, ImmCheckComplexRotAll90>,
+let TargetGuard = "sve2" in {
+def SVCDOT : SInst<"svcdot[_{d}]", "ddqqi", "il", MergeNone, "aarch64_sve_cdot", [IsStreamingCompatible], [ImmCheck<3, ImmCheckComplexRotAll90>]>;
+def SVCDOT_LANE : SInst<"svcdot_lane[_{d}]", "ddqqii", "il", MergeNone, "aarch64_sve_cdot_lane", [IsStreamingCompatible], [ImmCheck<4, ImmCheckComplexRotAll90>,
ImmCheck<3, ImmCheckLaneIndexDot, 2>]>;
}
////////////////////////////////////////////////////////////////////////////////
// SVE2 - Floating-point widening multiply-accumulate
-let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
-def SVMLALB_F : SInst<"svmlalb[_{d}]", "ddhh", "f", MergeNone, "aarch64_sve_fmlalb">;
-def SVMLALB_F_N : SInst<"svmlalb[_n_{d}]", "ddhR", "f", MergeNone, "aarch64_sve_fmlalb">;
-def SVMLALB_F_LANE : SInst<"svmlalb_lane[_{d}]", "ddhhi", "f", MergeNone, "aarch64_sve_fmlalb_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
-def SVMLALT_F : SInst<"svmlalt[_{d}]", "ddhh", "f", MergeNone, "aarch64_sve_fmlalt">;
-def SVMLALT_F_N : SInst<"svmlalt[_n_{d}]", "ddhR", "f", MergeNone, "aarch64_sve_fmlalt">;
-def SVMLALT_F_LANE : SInst<"svmlalt_lane[_{d}]", "ddhhi", "f", MergeNone, "aarch64_sve_fmlalt_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
-def SVMLSLB_F : SInst<"svmlslb[_{d}]", "ddhh", "f", MergeNone, "aarch64_sve_fmlslb">;
-def SVMLSLB_F_N : SInst<"svmlslb[_n_{d}]", "ddhR", "f", MergeNone, "aarch64_sve_fmlslb">;
-def SVMLSLB_F_LANE : SInst<"svmlslb_lane[_{d}]", "ddhhi", "f", MergeNone, "aarch64_sve_fmlslb_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
-def SVMLSLT_F : SInst<"svmlslt[_{d}]", "ddhh", "f", MergeNone, "aarch64_sve_fmlslt">;
-def SVMLSLT_F_N : SInst<"svmlslt[_n_{d}]", "ddhR", "f", MergeNone, "aarch64_sve_fmlslt">;
-def SVMLSLT_F_LANE : SInst<"svmlslt_lane[_{d}]", "ddhhi", "f", MergeNone, "aarch64_sve_fmlslt_lane", [], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+let TargetGuard = "sve2" in {
+def SVMLALB_F : SInst<"svmlalb[_{d}]", "ddhh", "f", MergeNone, "aarch64_sve_fmlalb", [IsStreamingCompatible]>;
+def SVMLALB_F_N : SInst<"svmlalb[_n_{d}]", "ddhR", "f", MergeNone, "aarch64_sve_fmlalb", [IsStreamingCompatible]>;
+def SVMLALB_F_LANE : SInst<"svmlalb_lane[_{d}]", "ddhhi", "f", MergeNone, "aarch64_sve_fmlalb_lane", [IsStreamingCompatible], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVMLALT_F : SInst<"svmlalt[_{d}]", "ddhh", "f", MergeNone, "aarch64_sve_fmlalt", [IsStreamingCompatible]>;
+def SVMLALT_F_N : SInst<"svmlalt[_n_{d}]", "ddhR", "f", MergeNone, "aarch64_sve_fmlalt", [IsStreamingCompatible]>;
+def SVMLALT_F_LANE : SInst<"svmlalt_lane[_{d}]", "ddhhi", "f", MergeNone, "aarch64_sve_fmlalt_lane", [IsStreamingCompatible], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVMLSLB_F : SInst<"svmlslb[_{d}]", "ddhh", "f", MergeNone, "aarch64_sve_fmlslb", [IsStreamingCompatible]>;
+def SVMLSLB_F_N : SInst<"svmlslb[_n_{d}]", "ddhR", "f", MergeNone, "aarch64_sve_fmlslb", [IsStreamingCompatible]>;
+def SVMLSLB_F_LANE : SInst<"svmlslb_lane[_{d}]", "ddhhi", "f", MergeNone, "aarch64_sve_fmlslb_lane", [IsStreamingCompatible], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVMLSLT_F : SInst<"svmlslt[_{d}]", "ddhh", "f", MergeNone, "aarch64_sve_fmlslt", [IsStreamingCompatible]>;
+def SVMLSLT_F_N : SInst<"svmlslt[_n_{d}]", "ddhR", "f", MergeNone, "aarch64_sve_fmlslt", [IsStreamingCompatible]>;
+def SVMLSLT_F_LANE : SInst<"svmlslt_lane[_{d}]", "ddhhi", "f", MergeNone, "aarch64_sve_fmlslt_lane", [IsStreamingCompatible], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
}
////////////////////////////////////////////////////////////////////////////////
// SVE2 - Floating-point integer binary logarithm
-let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
-def SVLOGB_M : SInst<"svlogb[_{d}]", "xxPd", "hfd", MergeOp1, "aarch64_sve_flogb">;
-def SVLOGB_X : SInst<"svlogb[_{d}]", "xPd", "hfd", MergeAnyExp, "aarch64_sve_flogb">;
-def SVLOGB_Z : SInst<"svlogb[_{d}]", "xPd", "hfd", MergeZeroExp, "aarch64_sve_flogb">;
+let TargetGuard = "sve2" in {
+def SVLOGB_M : SInst<"svlogb[_{d}]", "xxPd", "hfd", MergeOp1, "aarch64_sve_flogb", [IsStreamingCompatible]>;
+def SVLOGB_X : SInst<"svlogb[_{d}]", "xPd", "hfd", MergeAnyExp, "aarch64_sve_flogb", [IsStreamingCompatible]>;
+def SVLOGB_Z : SInst<"svlogb[_{d}]", "xPd", "hfd", MergeZeroExp, "aarch64_sve_flogb", [IsStreamingCompatible]>;
}
////////////////////////////////////////////////////////////////////////////////
// SVE2 - Vector Histogram count
-let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
+let TargetGuard = "sve2" in {
def SVHISTCNT : SInst<"svhistcnt[_{d}]_z", "uPdd", "ilUiUl", MergeNone, "aarch64_sve_histcnt">;
def SVHISTSEG : SInst<"svhistseg[_{d}]", "udd", "cUc", MergeNone, "aarch64_sve_histseg">;
}
@@ -2012,46 +1882,46 @@ def SVHISTSEG : SInst<"svhistseg[_{d}]", "udd", "cUc", MergeNone, "aarch6
////////////////////////////////////////////////////////////////////////////////
// SVE2 - Character match
-let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
+let TargetGuard = "sve2" in {
def SVMATCH : SInst<"svmatch[_{d}]", "PPdd", "csUcUs", MergeNone, "aarch64_sve_match">;
def SVNMATCH : SInst<"svnmatch[_{d}]", "PPdd", "csUcUs", MergeNone, "aarch64_sve_nmatch">;
}
////////////////////////////////////////////////////////////////////////////////
// SVE2 - Contiguous conflict detection
-let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
-def SVWHILERW_B : SInst<"svwhilerw[_{1}]", "Pcc", "cUc", MergeNone, "aarch64_sve_whilerw_b", [IsOverloadWhileRW]>;
-def SVWHILERW_H : SInst<"svwhilerw[_{1}]", "Pcc", "sUsh", MergeNone, "aarch64_sve_whilerw_h", [IsOverloadWhileRW]>;
-def SVWHILERW_S : SInst<"svwhilerw[_{1}]", "Pcc", "iUif", MergeNone, "aarch64_sve_whilerw_s", [IsOverloadWhileRW]>;
-def SVWHILERW_D : SInst<"svwhilerw[_{1}]", "Pcc", "lUld", MergeNone, "aarch64_sve_whilerw_d", [IsOverloadWhileRW]>;
+let TargetGuard = "sve2" in {
+def SVWHILERW_B : SInst<"svwhilerw[_{1}]", "Pcc", "cUc", MergeNone, "aarch64_sve_whilerw_b", [IsOverloadWhileRW, IsStreamingCompatible]>;
+def SVWHILERW_H : SInst<"svwhilerw[_{1}]", "Pcc", "sUsh", MergeNone, "aarch64_sve_whilerw_h", [IsOverloadWhileRW, IsStreamingCompatible]>;
+def SVWHILERW_S : SInst<"svwhilerw[_{1}]", "Pcc", "iUif", MergeNone, "aarch64_sve_whilerw_s", [IsOverloadWhileRW, IsStreamingCompatible]>;
+def SVWHILERW_D : SInst<"svwhilerw[_{1}]", "Pcc", "lUld", MergeNone, "aarch64_sve_whilerw_d", [IsOverloadWhileRW, IsStreamingCompatible]>;
-def SVWHILEWR_B : SInst<"svwhilewr[_{1}]", "Pcc", "cUc", MergeNone, "aarch64_sve_whilewr_b", [IsOverloadWhileRW]>;
-def SVWHILEWR_H : SInst<"svwhilewr[_{1}]", "Pcc", "sUsh", MergeNone, "aarch64_sve_whilewr_h", [IsOverloadWhileRW]>;
-def SVWHILEWR_S : SInst<"svwhilewr[_{1}]", "Pcc", "iUif", MergeNone, "aarch64_sve_whilewr_s", [IsOverloadWhileRW]>;
-def SVWHILEWR_D : SInst<"svwhilewr[_{1}]", "Pcc", "lUld", MergeNone, "aarch64_sve_whilewr_d", [IsOverloadWhileRW]>;
+def SVWHILEWR_B : SInst<"svwhilewr[_{1}]", "Pcc", "cUc", MergeNone, "aarch64_sve_whilewr_b", [IsOverloadWhileRW, IsStreamingCompatible]>;
+def SVWHILEWR_H : SInst<"svwhilewr[_{1}]", "Pcc", "sUsh", MergeNone, "aarch64_sve_whilewr_h", [IsOverloadWhileRW, IsStreamingCompatible]>;
+def SVWHILEWR_S : SInst<"svwhilewr[_{1}]", "Pcc", "iUif", MergeNone, "aarch64_sve_whilewr_s", [IsOverloadWhileRW, IsStreamingCompatible]>;
+def SVWHILEWR_D : SInst<"svwhilewr[_{1}]", "Pcc", "lUld", MergeNone, "aarch64_sve_whilewr_d", [IsOverloadWhileRW, IsStreamingCompatible]>;
}
-let ArchGuard = "defined(__ARM_FEATURE_SVE2) && defined(__ARM_FEATURE_BF16_SCALAR_ARITHMETIC)" in {
-def SVWHILERW_H_BF16 : SInst<"svwhilerw[_{1}]", "Pcc", "b", MergeNone, "aarch64_sve_whilerw_h", [IsOverloadWhileRW]>;
-def SVWHILEWR_H_BF16 : SInst<"svwhilewr[_{1}]", "Pcc", "b", MergeNone, "aarch64_sve_whilewr_h", [IsOverloadWhileRW]>;
+let TargetGuard = "sve2,bf16" in {
+def SVWHILERW_H_BF16 : SInst<"svwhilerw[_{1}]", "Pcc", "b", MergeNone, "aarch64_sve_whilerw_h", [IsOverloadWhileRW, IsStreamingCompatible]>;
+def SVWHILEWR_H_BF16 : SInst<"svwhilewr[_{1}]", "Pcc", "b", MergeNone, "aarch64_sve_whilewr_h", [IsOverloadWhileRW, IsStreamingCompatible]>;
}
////////////////////////////////////////////////////////////////////////////////
// SVE2 - Extended table lookup/permute
-let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
-def SVTBL2 : SInst<"svtbl2[_{d}]", "d2u", "csilUcUsUiUlhfd", MergeNone>;
-def SVTBX : SInst<"svtbx[_{d}]", "dddu", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_tbx">;
+let TargetGuard = "sve2" in {
+def SVTBL2 : SInst<"svtbl2[_{d}]", "d2u", "csilUcUsUiUlhfd", MergeNone, "", [IsStreamingCompatible]>;
+def SVTBX : SInst<"svtbx[_{d}]", "dddu", "csilUcUsUiUlhfd", MergeNone, "aarch64_sve_tbx", [IsStreamingCompatible]>;
}
-let ArchGuard = "defined(__ARM_FEATURE_SVE2) && defined(__ARM_FEATURE_SVE_BF16)" in {
-def SVTBL2_BF16 : SInst<"svtbl2[_{d}]", "d2u", "b", MergeNone>;
-def SVTBX_BF16 : SInst<"svtbx[_{d}]", "dddu", "b", MergeNone, "aarch64_sve_tbx">;
+let TargetGuard = "sve2,bf16" in {
+def SVTBL2_BF16 : SInst<"svtbl2[_{d}]", "d2u", "b", MergeNone, "", [IsStreamingCompatible]>;
+def SVTBX_BF16 : SInst<"svtbx[_{d}]", "dddu", "b", MergeNone, "aarch64_sve_tbx", [IsStreamingCompatible]>;
}
////////////////////////////////////////////////////////////////////////////////
// SVE2 - Optional
-let ArchGuard = "defined(__ARM_FEATURE_SVE2_AES)" in {
+let TargetGuard = "sve2-aes" in {
def SVAESD : SInst<"svaesd[_{d}]", "ddd", "Uc", MergeNone, "aarch64_sve_aesd", [IsOverloadNone]>;
def SVAESIMC : SInst<"svaesimc[_{d}]", "dd", "Uc", MergeNone, "aarch64_sve_aesimc", [IsOverloadNone]>;
def SVAESE : SInst<"svaese[_{d}]", "ddd", "Uc", MergeNone, "aarch64_sve_aese", [IsOverloadNone]>;
@@ -2064,16 +1934,16 @@ def SVPMULLT_PAIR_U64 : SInst<"svpmullt_pair[_{d}]", "ddd", "Ul", MergeNone,
def SVPMULLT_PAIR_N_U64 : SInst<"svpmullt_pair[_n_{d}]", "dda", "Ul", MergeNone, "aarch64_sve_pmullt_pair">;
}
-let ArchGuard = "defined(__ARM_FEATURE_SVE2_SHA3)" in {
+let TargetGuard = "sve2-sha3" in {
def SVRAX1 : SInst<"svrax1[_{d}]", "ddd", "lUl", MergeNone, "aarch64_sve_rax1", [IsOverloadNone]>;
}
-let ArchGuard = "defined(__ARM_FEATURE_SVE2_SM4)" in {
+let TargetGuard = "sve2-sm4" in {
def SVSM4E : SInst<"svsm4e[_{d}]", "ddd", "Ui", MergeNone, "aarch64_sve_sm4e", [IsOverloadNone]>;
def SVSM4EKEY : SInst<"svsm4ekey[_{d}]", "ddd", "Ui", MergeNone, "aarch64_sve_sm4ekey", [IsOverloadNone]>;
}
-let ArchGuard = "defined (__ARM_FEATURE_SVE2_BITPERM)" in {
+let TargetGuard = "sve2-bitperm" in {
def SVBDEP : SInst<"svbdep[_{d}]", "ddd", "UcUsUiUl", MergeNone, "aarch64_sve_bdep_x">;
def SVBDEP_N : SInst<"svbdep[_n_{d}]", "dda", "UcUsUiUl", MergeNone, "aarch64_sve_bdep_x">;
def SVBEXT : SInst<"svbext[_{d}]", "ddd", "UcUsUiUl", MergeNone, "aarch64_sve_bext_x">;
@@ -2081,3 +1951,386 @@ def SVBEXT_N : SInst<"svbext[_n_{d}]", "dda", "UcUsUiUl", MergeNone, "aarch64_sv
def SVBGRP : SInst<"svbgrp[_{d}]", "ddd", "UcUsUiUl", MergeNone, "aarch64_sve_bgrp_x">;
def SVBGRP_N : SInst<"svbgrp[_n_{d}]", "dda", "UcUsUiUl", MergeNone, "aarch64_sve_bgrp_x">;
}
+
+let TargetGuard = "sve2p1|sme" in {
+def SVPSEL_B : SInst<"svpsel_lane_b8", "PPPm", "Pc", MergeNone, "", [IsStreamingOrSVE2p1], []>;
+def SVPSEL_H : SInst<"svpsel_lane_b16", "PPPm", "Ps", MergeNone, "", [IsStreamingOrSVE2p1], []>;
+def SVPSEL_S : SInst<"svpsel_lane_b32", "PPPm", "Pi", MergeNone, "", [IsStreamingOrSVE2p1], []>;
+def SVPSEL_D : SInst<"svpsel_lane_b64", "PPPm", "Pl", MergeNone, "", [IsStreamingOrSVE2p1], []>;
+}
+
+// Standalone sve2.1 builtins
+let TargetGuard = "sve2p1" in {
+def SVORQV : SInst<"svorqv[_{d}]", "{Pd", "csilUcUsUiUl", MergeNone, "aarch64_sve_orqv", [IsReductionQV]>;
+def SVEORQV : SInst<"sveorqv[_{d}]", "{Pd", "csilUcUsUiUl", MergeNone, "aarch64_sve_eorqv", [IsReductionQV]>;
+def SVADDQV : SInst<"svaddqv[_{d}]", "{Pd", "hfdcsilUcUsUiUl", MergeNone, "aarch64_sve_addqv", [IsReductionQV]>;
+def SVANDQV : SInst<"svandqv[_{d}]", "{Pd", "csilUcUsUiUl", MergeNone, "aarch64_sve_andqv", [IsReductionQV]>;
+def SVSMAXQV : SInst<"svmaxqv[_{d}]", "{Pd", "csil", MergeNone, "aarch64_sve_smaxqv", [IsReductionQV]>;
+def SVUMAXQV : SInst<"svmaxqv[_{d}]", "{Pd", "UcUsUiUl", MergeNone, "aarch64_sve_umaxqv", [IsReductionQV]>;
+def SVSMINQV : SInst<"svminqv[_{d}]", "{Pd", "csil", MergeNone, "aarch64_sve_sminqv", [IsReductionQV]>;
+def SVUMINQV : SInst<"svminqv[_{d}]", "{Pd", "UcUsUiUl", MergeNone, "aarch64_sve_uminqv", [IsReductionQV]>;
+
+def SVFMAXNMQV: SInst<"svmaxnmqv[_{d}]", "{Pd", "hfd", MergeNone, "aarch64_sve_fmaxnmqv", [IsReductionQV]>;
+def SVFMINNMQV: SInst<"svminnmqv[_{d}]", "{Pd", "hfd", MergeNone, "aarch64_sve_fminnmqv", [IsReductionQV]>;
+def SVFMAXQV: SInst<"svmaxqv[_{d}]", "{Pd", "hfd", MergeNone, "aarch64_sve_fmaxqv", [IsReductionQV]>;
+def SVFMINQV: SInst<"svminqv[_{d}]", "{Pd", "hfd", MergeNone, "aarch64_sve_fminqv", [IsReductionQV]>;
+}
+
+let TargetGuard = "sve2p1|sme2" in {
+def SVPEXT_SINGLE : SInst<"svpext_lane_{d}", "P}i", "QcQsQiQl", MergeNone, "aarch64_sve_pext", [IsStreamingOrSVE2p1], [ImmCheck<1, ImmCheck0_3>]>;
+def SVPEXT_X2 : SInst<"svpext_lane_{d}_x2", "2.P}i", "QcQsQiQl", MergeNone, "aarch64_sve_pext_x2", [IsStreamingOrSVE2p1], [ImmCheck<1, ImmCheck0_1>]>;
+
+def SVPSEL_COUNT_ALIAS_B : SInst<"svpsel_lane_c8", "}}Pm", "Pc", MergeNone, "", [IsStreamingOrSVE2p1], []>;
+def SVPSEL_COUNT_ALIAS_H : SInst<"svpsel_lane_c16", "}}Pm", "Ps", MergeNone, "", [IsStreamingOrSVE2p1], []>;
+def SVPSEL_COUNT_ALIAS_S : SInst<"svpsel_lane_c32", "}}Pm", "Pi", MergeNone, "", [IsStreamingOrSVE2p1], []>;
+def SVPSEL_COUNT_ALIAS_D : SInst<"svpsel_lane_c64", "}}Pm", "Pl", MergeNone, "", [IsStreamingOrSVE2p1], []>;
+
+def SVWHILEGE_COUNT : SInst<"svwhilege_{d}[_{1}]", "}lli", "QcQsQiQl", MergeNone, "aarch64_sve_whilege_{d}", [IsOverloadNone, IsStreamingOrSVE2p1], [ImmCheck<2, ImmCheck2_4_Mul2>]>;
+def SVWHILEGT_COUNT : SInst<"svwhilegt_{d}[_{1}]", "}lli", "QcQsQiQl", MergeNone, "aarch64_sve_whilegt_{d}", [IsOverloadNone, IsStreamingOrSVE2p1], [ImmCheck<2, ImmCheck2_4_Mul2>]>;
+def SVWHILELE_COUNT : SInst<"svwhilele_{d}[_{1}]", "}lli", "QcQsQiQl", MergeNone, "aarch64_sve_whilele_{d}", [IsOverloadNone, IsStreamingOrSVE2p1], [ImmCheck<2, ImmCheck2_4_Mul2>]>;
+def SVWHILELT_COUNT : SInst<"svwhilelt_{d}[_{1}]", "}lli", "QcQsQiQl", MergeNone, "aarch64_sve_whilelt_{d}", [IsOverloadNone, IsStreamingOrSVE2p1], [ImmCheck<2, ImmCheck2_4_Mul2>]>;
+def SVWHILELO_COUNT : SInst<"svwhilelt_{d}[_{1}]", "}nni", "QcQsQiQl", MergeNone, "aarch64_sve_whilelo_{d}", [IsOverloadNone, IsStreamingOrSVE2p1], [ImmCheck<2, ImmCheck2_4_Mul2>]>;
+def SVWHILELS_COUNT : SInst<"svwhilele_{d}[_{1}]", "}nni", "QcQsQiQl", MergeNone, "aarch64_sve_whilels_{d}", [IsOverloadNone, IsStreamingOrSVE2p1], [ImmCheck<2, ImmCheck2_4_Mul2>]>;
+def SVWHILEHI_COUNT : SInst<"svwhilegt_{d}[_{1}]", "}nni", "QcQsQiQl", MergeNone, "aarch64_sve_whilehi_{d}", [IsOverloadNone, IsStreamingOrSVE2p1], [ImmCheck<2, ImmCheck2_4_Mul2>]>;
+def SVWHILEHS_COUNT : SInst<"svwhilege_{d}[_{1}]", "}nni", "QcQsQiQl", MergeNone, "aarch64_sve_whilehs_{d}", [IsOverloadNone, IsStreamingOrSVE2p1], [ImmCheck<2, ImmCheck2_4_Mul2>]>;
+}
+
+multiclass MultiVecLoad<string i> {
+ def SV # NAME # B_X2 : MInst<"sv" # i # "[_{2}]_x2", "2}c", "cUc", [IsStructLoad, IsStreamingOrSVE2p1], MemEltTyDefault, "aarch64_sve_" # i # "_pn_x2">;
+ def SV # NAME # H_X2 : MInst<"sv" # i # "[_{2}]_x2", "2}c", "sUshb", [IsStructLoad, IsStreamingOrSVE2p1], MemEltTyDefault, "aarch64_sve_" # i # "_pn_x2">;
+ def SV # NAME # W_X2 : MInst<"sv" # i # "[_{2}]_x2", "2}c", "iUif", [IsStructLoad, IsStreamingOrSVE2p1], MemEltTyDefault, "aarch64_sve_" # i # "_pn_x2">;
+ def SV # NAME # D_X2 : MInst<"sv" # i # "[_{2}]_x2", "2}c", "lUld", [IsStructLoad, IsStreamingOrSVE2p1], MemEltTyDefault, "aarch64_sve_" # i # "_pn_x2">;
+ def SV # NAME # B_X4 : MInst<"sv" # i # "[_{2}]_x4", "4}c", "cUc", [IsStructLoad, IsStreamingOrSVE2p1], MemEltTyDefault, "aarch64_sve_" # i # "_pn_x4">;
+ def SV # NAME # H_X4 : MInst<"sv" # i # "[_{2}]_x4", "4}c", "sUshb", [IsStructLoad, IsStreamingOrSVE2p1], MemEltTyDefault, "aarch64_sve_" # i # "_pn_x4">;
+ def SV # NAME # W_X4 : MInst<"sv" # i # "[_{2}]_x4", "4}c", "iUif", [IsStructLoad, IsStreamingOrSVE2p1], MemEltTyDefault, "aarch64_sve_" # i # "_pn_x4">;
+ def SV # NAME # D_X4 : MInst<"sv" # i # "[_{2}]_x4", "4}c", "lUld", [IsStructLoad, IsStreamingOrSVE2p1], MemEltTyDefault, "aarch64_sve_" # i # "_pn_x4">;
+
+ def SV # NAME # B_VNUM_X2 : MInst<"sv" # i # "_vnum" # "[_{2}]_x2", "2}cl", "cUc", [IsStructLoad, IsStreamingOrSVE2p1], MemEltTyDefault, "aarch64_sve_" # i # "_pn_x2">;
+ def SV # NAME # H_VNUM_X2 : MInst<"sv" # i # "_vnum" # "[_{2}]_x2", "2}cl", "sUshb", [IsStructLoad, IsStreamingOrSVE2p1], MemEltTyDefault, "aarch64_sve_" # i # "_pn_x2">;
+ def SV # NAME # W_VNUM_X2 : MInst<"sv" # i # "_vnum" # "[_{2}]_x2", "2}cl", "iUif", [IsStructLoad, IsStreamingOrSVE2p1], MemEltTyDefault, "aarch64_sve_" # i # "_pn_x2">;
+ def SV # NAME # D_VNUM_X2 : MInst<"sv" # i # "_vnum" # "[_{2}]_x2", "2}cl", "lUld", [IsStructLoad, IsStreamingOrSVE2p1], MemEltTyDefault, "aarch64_sve_" # i # "_pn_x2">;
+ def SV # NAME # B_VNUM_X4 : MInst<"sv" # i # "_vnum" # "[_{2}]_x4", "4}cl", "cUc", [IsStructLoad, IsStreamingOrSVE2p1], MemEltTyDefault, "aarch64_sve_" # i # "_pn_x4">;
+ def SV # NAME # H_VNUM_X4 : MInst<"sv" # i # "_vnum" # "[_{2}]_x4", "4}cl", "sUshb", [IsStructLoad, IsStreamingOrSVE2p1], MemEltTyDefault, "aarch64_sve_" # i # "_pn_x4">;
+ def SV # NAME # W_VNUM_X4 : MInst<"sv" # i # "_vnum" # "[_{2}]_x4", "4}cl", "iUif", [IsStructLoad, IsStreamingOrSVE2p1], MemEltTyDefault, "aarch64_sve_" # i # "_pn_x4">;
+ def SV # NAME # D_VNUM_X4 : MInst<"sv" # i # "_vnum" # "[_{2}]_x4", "4}cl", "lUld", [IsStructLoad, IsStreamingOrSVE2p1], MemEltTyDefault, "aarch64_sve_" # i # "_pn_x4">;
+}
+
+let TargetGuard = "sve2p1|sme2" in {
+ defm LD1 : MultiVecLoad<"ld1">;
+ defm LDNT1 : MultiVecLoad<"ldnt1">;
+}
+
+multiclass MultiVecStore<string i> {
+ def SV # NAME # B_X2 : MInst<"sv" # i # "[_{2}_x2]", "v}p2", "cUc", [IsStructStore, IsStreamingOrSVE2p1], MemEltTyDefault, "aarch64_sve_" # i # "_pn_x2">;
+ def SV # NAME # H_X2 : MInst<"sv" # i # "[_{2}_x2]", "v}p2", "sUshb", [IsStructStore, IsStreamingOrSVE2p1], MemEltTyDefault, "aarch64_sve_" # i # "_pn_x2">;
+ def SV # NAME # W_X2 : MInst<"sv" # i # "[_{2}_x2]", "v}p2", "iUif", [IsStructStore, IsStreamingOrSVE2p1], MemEltTyDefault, "aarch64_sve_" # i # "_pn_x2">;
+ def SV # NAME # D_X2 : MInst<"sv" # i # "[_{2}_x2]", "v}p2", "lUld", [IsStructStore, IsStreamingOrSVE2p1], MemEltTyDefault, "aarch64_sve_" # i # "_pn_x2">;
+ def SV # NAME # B_X4 : MInst<"sv" # i # "[_{2}_x4]", "v}p4", "cUc", [IsStructStore, IsStreamingOrSVE2p1], MemEltTyDefault, "aarch64_sve_" # i # "_pn_x4">;
+ def SV # NAME # H_X4 : MInst<"sv" # i # "[_{2}_x4]", "v}p4", "sUshb", [IsStructStore, IsStreamingOrSVE2p1], MemEltTyDefault, "aarch64_sve_" # i # "_pn_x4">;
+ def SV # NAME # W_X4 : MInst<"sv" # i # "[_{2}_x4]", "v}p4", "iUif", [IsStructStore, IsStreamingOrSVE2p1], MemEltTyDefault, "aarch64_sve_" # i # "_pn_x4">;
+ def SV # NAME # D_X4 : MInst<"sv" # i # "[_{2}_x4]", "v}p4", "lUld", [IsStructStore, IsStreamingOrSVE2p1], MemEltTyDefault, "aarch64_sve_" # i # "_pn_x4">;
+
+ def SV # NAME # B_VNUM_X2 : MInst<"sv" # i # "_vnum" # "[_{2}_x2]", "v}pl2", "cUc", [IsStructStore, IsStreamingOrSVE2p1], MemEltTyDefault, "aarch64_sve_" # i # "_pn_x2">;
+ def SV # NAME # H_VNUM_X2 : MInst<"sv" # i # "_vnum" # "[_{2}_x2]", "v}pl2", "sUshb", [IsStructStore, IsStreamingOrSVE2p1], MemEltTyDefault, "aarch64_sve_" # i # "_pn_x2">;
+ def SV # NAME # W_VNUM_X2 : MInst<"sv" # i # "_vnum" # "[_{2}_x2]", "v}pl2", "iUif", [IsStructStore, IsStreamingOrSVE2p1], MemEltTyDefault, "aarch64_sve_" # i # "_pn_x2">;
+ def SV # NAME # D_VNUM_X2 : MInst<"sv" # i # "_vnum" # "[_{2}_x2]", "v}pl2", "lUld", [IsStructStore, IsStreamingOrSVE2p1], MemEltTyDefault, "aarch64_sve_" # i # "_pn_x2">;
+ def SV # NAME # B_VNUM_X4 : MInst<"sv" # i # "_vnum" # "[_{2}_x4]", "v}pl4", "cUc", [IsStructStore, IsStreamingOrSVE2p1], MemEltTyDefault, "aarch64_sve_" # i # "_pn_x4">;
+ def SV # NAME # H_VNUM_X4 : MInst<"sv" # i # "_vnum" # "[_{2}_x4]", "v}pl4", "sUshb", [IsStructStore, IsStreamingOrSVE2p1], MemEltTyDefault, "aarch64_sve_" # i # "_pn_x4">;
+ def SV # NAME # W_VNUM_X4 : MInst<"sv" # i # "_vnum" # "[_{2}_x4]", "v}pl4", "iUif", [IsStructStore, IsStreamingOrSVE2p1], MemEltTyDefault, "aarch64_sve_" # i # "_pn_x4">;
+ def SV # NAME # D_VNUM_X4 : MInst<"sv" # i # "_vnum" # "[_{2}_x4]", "v}pl4", "lUld", [IsStructStore, IsStreamingOrSVE2p1], MemEltTyDefault, "aarch64_sve_" # i # "_pn_x4">;
+}
+
+let TargetGuard = "sve2p1|sme2" in {
+ defm ST1 : MultiVecStore<"st1">;
+ defm STNT1 : MultiVecStore<"stnt1">;
+}
+
+let TargetGuard = "sve2p1|sme2" in {
+def SVDOT_X2_S : SInst<"svdot[_{d}_{2}]", "ddhh", "i", MergeNone, "aarch64_sve_sdot_x2", [IsStreamingOrSVE2p1], []>;
+def SVDOT_X2_U : SInst<"svdot[_{d}_{2}]", "ddhh", "Ui", MergeNone, "aarch64_sve_udot_x2", [IsStreamingOrSVE2p1], []>;
+def SVDOT_X2_F : SInst<"svdot[_{d}_{2}]", "ddhh", "f", MergeNone, "aarch64_sve_fdot_x2", [IsStreamingOrSVE2p1], []>;
+def SVDOT_LANE_X2_S : SInst<"svdot_lane[_{d}_{2}]", "ddhhi", "i", MergeNone, "aarch64_sve_sdot_lane_x2", [IsStreamingOrSVE2p1], [ImmCheck<3, ImmCheck0_3>]>;
+def SVDOT_LANE_X2_U : SInst<"svdot_lane[_{d}_{2}]", "ddhhi", "Ui", MergeNone, "aarch64_sve_udot_lane_x2", [IsStreamingOrSVE2p1], [ImmCheck<3, ImmCheck0_3>]>;
+def SVDOT_LANE_X2_F : SInst<"svdot_lane[_{d}_{2}]", "ddhhi", "f", MergeNone, "aarch64_sve_fdot_lane_x2", [IsStreamingOrSVE2p1], [ImmCheck<3, ImmCheck0_3>]>;
+}
+
+let TargetGuard = "sve2p1|sme2" in {
+def SVSCLAMP : SInst<"svclamp[_{d}]", "dddd", "csil", MergeNone, "aarch64_sve_sclamp", [IsStreamingOrSVE2p1], []>;
+def SVUCLAMP : SInst<"svclamp[_{d}]", "dddd", "UcUsUiUl", MergeNone, "aarch64_sve_uclamp", [IsStreamingOrSVE2p1], []>;
+
+defm SVREVD : SInstZPZ<"svrevd", "csilUcUsUiUlbhfd", "aarch64_sve_revd">;
+}
+
+let TargetGuard = "sve2p1|sme2" in {
+ def SVPTRUE_COUNT : SInst<"svptrue_{d}", "}v", "QcQsQiQl", MergeNone, "aarch64_sve_ptrue_{d}", [IsOverloadNone, IsStreamingOrSVE2p1], []>;
+
+ def SVPFALSE_COUNT_ALIAS : SInst<"svpfalse_c", "}v", "", MergeNone, "", [IsOverloadNone, IsStreamingOrSVE2p1]>;
+
+ def SVFCLAMP : SInst<"svclamp[_{d}]", "dddd", "hfd", MergeNone, "aarch64_sve_fclamp", [IsStreamingOrSVE2p1], []>;
+ def SVCNTP_COUNT : SInst<"svcntp_{d}", "n}i", "QcQsQiQl", MergeNone, "aarch64_sve_cntp_{d}", [IsOverloadNone, IsStreamingOrSVE2p1], [ImmCheck<1, ImmCheck2_4_Mul2>]>;
+}
+
+let TargetGuard = "(sve2|sme2),b16b16" in {
+defm SVMUL_BF : SInstZPZZ<"svmul", "b", "aarch64_sve_fmul", "aarch64_sve_fmul_u", [IsStreamingCompatible]>;
+defm SVADD_BF : SInstZPZZ<"svadd", "b", "aarch64_sve_fadd", "aarch64_sve_fadd_u", [IsStreamingCompatible]>;
+defm SVSUB_BF : SInstZPZZ<"svsub", "b", "aarch64_sve_fsub", "aarch64_sve_fsub_u", [IsStreamingCompatible]>;
+defm SVMAXNM_BF : SInstZPZZ<"svmaxnm","b", "aarch64_sve_fmaxnm", "aarch64_sve_fmaxnm_u", [IsStreamingCompatible]>;
+defm SVMINNM_BF : SInstZPZZ<"svminnm","b", "aarch64_sve_fminnm", "aarch64_sve_fminnm_u", [IsStreamingCompatible]>;
+defm SVMAX_BF : SInstZPZZ<"svmax", "b", "aarch64_sve_fmax", "aarch64_sve_fmax_u", [IsStreamingCompatible]>;
+defm SVMIN_BF : SInstZPZZ<"svmin", "b", "aarch64_sve_fmin", "aarch64_sve_fmin_u", [IsStreamingCompatible]>;
+defm SVMLA_BF : SInstZPZZZ<"svmla", "b", "aarch64_sve_fmla", "aarch64_sve_fmla_u", [IsStreamingCompatible]>;
+defm SVMLS_BF : SInstZPZZZ<"svmls", "b", "aarch64_sve_fmls", "aarch64_sve_fmls_u", [IsStreamingCompatible]>;
+def SVMLA_LANE_BF : SInst<"svmla_lane[_{d}]", "ddddi", "b", MergeNone, "aarch64_sve_fmla_lane", [IsStreamingCompatible], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVMLS_LANE_BF : SInst<"svmls_lane[_{d}]", "ddddi", "b", MergeNone, "aarch64_sve_fmls_lane", [IsStreamingCompatible], [ImmCheck<3, ImmCheckLaneIndex, 2>]>;
+def SVMUL_LANE_BF : SInst<"svmul_lane[_{d}]", "dddi", "b", MergeNone, "aarch64_sve_fmul_lane", [IsStreamingCompatible], [ImmCheck<2, ImmCheckLaneIndex, 1>]>;
+def SVFCLAMP_BF : SInst<"svclamp[_{d}]", "dddd", "b", MergeNone, "aarch64_sve_fclamp", [IsStreamingCompatible], []>;
+}
+
+// SME2
+
+// SME intrinsics which operate only on vectors and do not require ZA should be added here,
+// as they could possibly become SVE instructions in the future.
+
+multiclass MinMaxIntr<string i, string zm, string mul, string t> {
+ def SVS # NAME : SInst<"sv" # i # "[" # zm # "_{d}_" # mul # "]", t, "csil", MergeNone, "aarch64_sve_s" # i # zm # "_" # mul, [IsStreaming], []>;
+ def SVU # NAME : SInst<"sv" # i # "[" # zm # "_{d}_" # mul # "]", t, "UcUsUiUl", MergeNone, "aarch64_sve_u" # i # zm # "_" # mul, [IsStreaming], []>;
+ def SVF # NAME : SInst<"sv" # i # "[" # zm # "_{d}_" # mul # "]", t, "hfd", MergeNone, "aarch64_sve_f" # i # zm # "_" # mul, [IsStreaming], []>;
+}
+
+let TargetGuard = "sme2" in {
+// == SMAX / UMAX / FMAX ==
+ defm MAX_SINGLE_X2 : MinMaxIntr<"max", "_single", "x2", "22d">;
+ defm MAX_MULTI_X2 : MinMaxIntr<"max", "", "x2", "222">;
+ defm MAX_SINGLE_X4 : MinMaxIntr<"max", "_single", "x4", "44d">;
+ defm MAX_MULTI_X4 : MinMaxIntr<"max", "", "x4", "444">;
+
+// == SMIN / UMIN / FMIN ==
+ defm MIN_SINGLE_X2 : MinMaxIntr<"min", "_single", "x2", "22d">;
+ defm MIN_MULTI_X2 : MinMaxIntr<"min", "", "x2", "222">;
+ defm MIN_SINGLE_X4 : MinMaxIntr<"min", "_single", "x4", "44d">;
+ defm MIN_MULTI_X4 : MinMaxIntr<"min", "", "x4", "444">;
+}
+
+multiclass SInstMinMaxByVector<string name> {
+ def NAME # _SINGLE_X2 : SInst<"sv" # name # "nm[_single_{d}_x2]", "22d", "hfd", MergeNone, "aarch64_sve_f" # name # "nm_single_x2", [IsStreaming], []>;
+ def NAME # _SINGLE_X4 : SInst<"sv" # name # "nm[_single_{d}_x4]", "44d", "hfd", MergeNone, "aarch64_sve_f" # name # "nm_single_x4", [IsStreaming], []>;
+
+ def NAME # _X2 : SInst<"sv" # name # "nm[_{d}_x2]", "222", "hfd", MergeNone, "aarch64_sve_f" # name # "nm_x2", [IsStreaming], []>;
+ def NAME # _X4 : SInst<"sv" # name # "nm[_{d}_x4]", "444", "hfd", MergeNone, "aarch64_sve_f" # name # "nm_x4", [IsStreaming], []>;
+}
+
+let TargetGuard = "sme2" in {
+// == FMINNM / FMAXNM ==
+ defm SVMINNM : SInstMinMaxByVector<"min">;
+ defm SVMAXNM : SInstMinMaxByVector<"max">;
+}
+
+let TargetGuard = "sme2" in {
+ // FRINTA / FRINTM / FRINTN / FRINTP
+ def SVRINTA_X2 : SInst<"svrinta[_{d}_x2]", "22", "f", MergeNone, "aarch64_sve_frinta_x2", [IsStreaming], []>;
+ def SVRINTA_X4 : SInst<"svrinta[_{d}_x4]", "44", "f", MergeNone, "aarch64_sve_frinta_x4", [IsStreaming], []>;
+
+ def SVRINTM_X2 : SInst<"svrintm[_{d}_x2]", "22", "f", MergeNone, "aarch64_sve_frintm_x2", [IsStreaming], []>;
+ def SVRINTM_X4 : SInst<"svrintm[_{d}_x4]", "44", "f", MergeNone, "aarch64_sve_frintm_x4", [IsStreaming], []>;
+
+ def SVRINTN_X2 : SInst<"svrintn[_{d}_x2]", "22", "f", MergeNone, "aarch64_sve_frintn_x2", [IsStreaming], []>;
+ def SVRINTN_X4 : SInst<"svrintn[_{d}_x4]", "44", "f", MergeNone, "aarch64_sve_frintn_x4", [IsStreaming], []>;
+
+ def SVRINTP_X2 : SInst<"svrintp[_{d}_x2]", "22", "f", MergeNone, "aarch64_sve_frintp_x2", [IsStreaming], []>;
+ def SVRINTP_X4 : SInst<"svrintp[_{d}_x4]", "44", "f", MergeNone, "aarch64_sve_frintp_x4", [IsStreaming], []>;
+}
+
+let TargetGuard = "sme2" in {
+ def SVSCLAMP_X2 : SInst<"svclamp[_single_{d}_x2]", "22dd", "csil", MergeNone, "aarch64_sve_sclamp_single_x2", [IsStreaming], []>;
+ def SVUCLAMP_X2 : SInst<"svclamp[_single_{d}_x2]", "22dd", "UcUsUiUl", MergeNone, "aarch64_sve_uclamp_single_x2", [IsStreaming], []>;
+ def SVFCLAMP_X2 : SInst<"svclamp[_single_{d}_x2]", "22dd", "hfd", MergeNone, "aarch64_sve_fclamp_single_x2", [IsStreaming], []>;
+
+ def SVSCLAMP_X4 : SInst<"svclamp[_single_{d}_x4]", "44dd", "csil", MergeNone, "aarch64_sve_sclamp_single_x4", [IsStreaming], []>;
+ def SVUCLAMP_X4 : SInst<"svclamp[_single_{d}_x4]", "44dd", "UcUsUiUl", MergeNone, "aarch64_sve_uclamp_single_x4", [IsStreaming], []>;
+ def SVFCLAMP_X4 : SInst<"svclamp[_single_{d}_x4]", "44dd", "hfd", MergeNone, "aarch64_sve_fclamp_single_x4", [IsStreaming], []>;
+}
+
+let TargetGuard = "sme2" in {
+// == ADD (vectors) ==
+ def SVADD_SINGLE_X2 : SInst<"svadd[_single_{d}_x2]", "22d", "cUcsUsiUilUl", MergeNone, "aarch64_sve_add_single_x2", [IsStreaming], []>;
+ def SVADD_SINGLE_X4 : SInst<"svadd[_single_{d}_x4]", "44d", "cUcsUsiUilUl", MergeNone, "aarch64_sve_add_single_x4", [IsStreaming], []>;
+
+ // 2-way and 4-way selects
+ def SVSEL_X2 : SInst<"svsel[_{d}_x2]", "2}22", "cUcsUsiUilUlbhfd", MergeNone, "aarch64_sve_sel_x2", [IsStreaming], []>;
+ def SVSEL_X4 : SInst<"svsel[_{d}_x4]", "4}44", "cUcsUsiUilUlbhfd", MergeNone, "aarch64_sve_sel_x4", [IsStreaming], []>;
+
+ // SRSHL / URSHL
+ def SVSRSHL_SINGLE_X2 : SInst<"svrshl[_single_{d}_x2]", "22d", "csil", MergeNone, "aarch64_sve_srshl_single_x2", [IsStreaming], []>;
+ def SVURSHL_SINGLE_X2 : SInst<"svrshl[_single_{d}_x2]", "22d", "UcUsUiUl", MergeNone, "aarch64_sve_urshl_single_x2", [IsStreaming], []>;
+ def SVSRSHL_SINGLE_X4 : SInst<"svrshl[_single_{d}_x4]", "44d", "csil", MergeNone, "aarch64_sve_srshl_single_x4", [IsStreaming], []>;
+ def SVURSHL_SINGLE_X4 : SInst<"svrshl[_single_{d}_x4]", "44d", "UcUsUiUl", MergeNone, "aarch64_sve_urshl_single_x4", [IsStreaming], []>;
+
+ def SVSRSHL_X2 : SInst<"svrshl[_{d}_x2]", "222", "csil", MergeNone, "aarch64_sve_srshl_x2", [IsStreaming], []>;
+ def SVURSHL_X2 : SInst<"svrshl[_{d}_x2]", "222", "UcUsUiUl", MergeNone, "aarch64_sve_urshl_x2", [IsStreaming], []>;
+ def SVSRSHL_X4 : SInst<"svrshl[_{d}_x4]", "444", "csil", MergeNone, "aarch64_sve_srshl_x4", [IsStreaming], []>;
+ def SVURSHL_X4 : SInst<"svrshl[_{d}_x4]", "444", "UcUsUiUl", MergeNone, "aarch64_sve_urshl_x4", [IsStreaming], []>;
+
+ def SVQRSHRN_X4 : SInst<"svqrshrn[_n]_{0}[_{d}_x4]", "q4i", "il", MergeNone, "aarch64_sve_sqrshrn_x4", [IsStreaming], [ImmCheck<1, ImmCheckShiftRight, 0>]>;
+ def SVUQRSHRN_X4 : SInst<"svqrshrn[_n]_{0}[_{d}_x4]", "b4i", "UiUl", MergeNone, "aarch64_sve_uqrshrn_x4", [IsStreaming], [ImmCheck<1, ImmCheckShiftRight, 0>]>;
+
+ // SQRSHR / UQRSHR
+ def SVQRSHR_X2 : SInst<"svqrshr[_n]_{0}[_{d}_x2]", "h2i", "i", MergeNone, "aarch64_sve_sqrshr_x2", [IsStreaming], [ImmCheck<1, ImmCheck1_16>]>;
+ def SVUQRSHR_X2 : SInst<"svqrshr[_n]_{0}[_{d}_x2]", "e2i", "Ui", MergeNone, "aarch64_sve_uqrshr_x2", [IsStreaming], [ImmCheck<1, ImmCheck1_16>]>;
+ def SVQRSHR_X4 : SInst<"svqrshr[_n]_{0}[_{d}_x4]", "q4i", "il", MergeNone, "aarch64_sve_sqrshr_x4", [IsStreaming], [ImmCheck<1, ImmCheckShiftRight, 0>]>;
+ def SVUQRSHR_X4 : SInst<"svqrshr[_n]_{0}[_{d}_x4]", "b4i", "UiUl", MergeNone, "aarch64_sve_uqrshr_x4", [IsStreaming], [ImmCheck<1, ImmCheckShiftRight, 0>]>;
+
+ // SQRSHRU
+ def SVSQRSHRU_X2 : SInst<"svqrshru[_n]_{0}[_{d}_x2]", "e2i", "i", MergeNone, "aarch64_sve_sqrshru_x2", [IsStreaming], [ImmCheck<1, ImmCheck1_16>]>;
+ def SVSQRSHRU_X4 : SInst<"svqrshru[_n]_{0}[_{d}_x4]", "b4i", "il", MergeNone, "aarch64_sve_sqrshru_x4", [IsStreaming], [ImmCheck<1, ImmCheckShiftRight, 0>]>;
+
+ def SVSQRSHRUN_X4 : SInst<"svqrshrun[_n]_{0}[_{d}_x4]", "b4i", "il", MergeNone, "aarch64_sve_sqrshrun_x4", [IsStreaming], [ImmCheck<1, ImmCheckShiftRight, 0>]>;
+
+ def REINTERPRET_SVBOOL_TO_SVCOUNT : Inst<"svreinterpret[_c]", "}P", "Pc", MergeNone, "", [IsStreamingCompatible], []>;
+ def REINTERPRET_SVCOUNT_TO_SVBOOL : Inst<"svreinterpret[_b]", "P}", "Pc", MergeNone, "", [IsStreamingCompatible], []>;
+
+ // SQDMULH
+ def SVSQDMULH_SINGLE_X2 : SInst<"svqdmulh[_single_{d}_x2]", "22d", "csil", MergeNone, "aarch64_sve_sqdmulh_single_vgx2", [IsStreaming], []>;
+ def SVSQDMULH_SINGLE_X4 : SInst<"svqdmulh[_single_{d}_x4]", "44d", "csil", MergeNone, "aarch64_sve_sqdmulh_single_vgx4", [IsStreaming], []>;
+ def SVSQDMULH_X2 : SInst<"svqdmulh[_{d}_x2]", "222", "csil", MergeNone, "aarch64_sve_sqdmulh_vgx2", [IsStreaming], []>;
+ def SVSQDMULH_X4 : SInst<"svqdmulh[_{d}_x4]", "444", "csil", MergeNone, "aarch64_sve_sqdmulh_vgx4", [IsStreaming], []>;
+}
+
+let TargetGuard = "sve2p1|sme2" in {
+ // SQRSHRN / UQRSHRN
+ def SVQRSHRN_X2 : SInst<"svqrshrn[_n]_{0}[_{d}_x2]", "h2i", "i", MergeNone, "aarch64_sve_sqrshrn_x2", [IsStreamingCompatible], [ImmCheck<1, ImmCheck1_16>]>;
+ def SVUQRSHRN_X2 : SInst<"svqrshrn[_n]_{0}[_{d}_x2]", "e2i", "Ui", MergeNone, "aarch64_sve_uqrshrn_x2", [IsStreamingCompatible], [ImmCheck<1, ImmCheck1_16>]>;
+
+ // SQRSHRUN
+ def SVSQRSHRUN_X2 : SInst<"svqrshrun[_n]_{0}[_{d}_x2]", "e2i", "i", MergeNone, "aarch64_sve_sqrshrun_x2", [IsStreamingCompatible], [ImmCheck<1, ImmCheck1_16>]>;
+}
+
+let TargetGuard = "sve2p1" in {
+ // ZIPQ1, ZIPQ2, UZPQ1, UZPQ2
+ def SVZIPQ1 : SInst<"svzipq1[_{d}]", "ddd", "cUcsUsiUilUlbhfd", MergeNone, "aarch64_sve_zipq1", [], []>;
+ def SVZIPQ2 : SInst<"svzipq2[_{d}]", "ddd", "cUcsUsiUilUlbhfd", MergeNone, "aarch64_sve_zipq2", [], []>;
+ def SVUZPQ1 : SInst<"svuzpq1[_{d}]", "ddd", "cUcsUsiUilUlbhfd", MergeNone, "aarch64_sve_uzpq1", [], []>;
+ def SVUZPQ2 : SInst<"svuzpq2[_{d}]", "ddd", "cUcsUsiUilUlbhfd", MergeNone, "aarch64_sve_uzpq2", [], []>;
+ // TBLQ, TBXQ
+ def SVTBLQ : SInst<"svtblq[_{d}]", "ddu", "cUcsUsiUilUlbhfd", MergeNone, "aarch64_sve_tblq">;
+ def SVTBXQ : SInst<"svtbxq[_{d}]", "dddu", "cUcsUsiUilUlbhfd", MergeNone, "aarch64_sve_tbxq">;
+ // EXTQ
+ def EXTQ : SInst<"svextq[_{d}]", "dddk", "cUcsUsiUilUlbhfd", MergeNone, "aarch64_sve_extq", [], [ImmCheck<2, ImmCheck0_15>]>;
+ // PMOV
+ // Move to Pred
+ multiclass PMOV_TO_PRED<string name, string types, string intrinsic, list<FlagType> flags=[], ImmCheckType immCh > {
+ def _LANE : Inst<name # "_lane[_{d}]", "Pdi", types, MergeNone, intrinsic, flags, [ImmCheck<1, immCh>]>;
+ def _LANE_ZERO : SInst<name # "[_{d}]", "Pd", types, MergeNone, intrinsic # "_zero", flags, []>;
+ }
+ defm SVPMOV_B_TO_PRED : PMOV_TO_PRED<"svpmov", "cUc", "aarch64_sve_pmov_to_pred_lane", [], ImmCheck0_0>;
+ defm SVPMOV_H_TO_PRED : PMOV_TO_PRED<"svpmov", "sUs", "aarch64_sve_pmov_to_pred_lane", [], ImmCheck0_1>;
+ defm SVPMOV_S_TO_PRED : PMOV_TO_PRED<"svpmov", "iUi", "aarch64_sve_pmov_to_pred_lane", [], ImmCheck0_3>;
+ defm SVPMOV_D_TO_PRED : PMOV_TO_PRED<"svpmov", "lUl", "aarch64_sve_pmov_to_pred_lane", [], ImmCheck0_7>;
+
+ // Move to Vector
+ multiclass PMOV_TO_VEC<string name, string types, string intrinsic, list<FlagType> flags=[], ImmCheckType immCh > {
+ def _M : SInst<name # "_lane[_{d}]", "ddPi", types, MergeOp1, intrinsic # "_merging", flags, [ImmCheck<2, immCh>]>;
+ def _Z : SInst<name # "_{d}_z", "dP", types, MergeNone, intrinsic # "_zeroing", flags, []>;
+ }
+ def SVPMOV_TO_VEC_LANE_B : SInst<"svpmov_{d}_z", "dP", "cUc", MergeNone, "aarch64_sve_pmov_to_vector_lane_zeroing", [], []>;
+ defm SVPMOV_TO_VEC_LANE_H : PMOV_TO_VEC<"svpmov", "sUs", "aarch64_sve_pmov_to_vector_lane", [], ImmCheck1_1>;
+ defm SVPMOV_TO_VEC_LANE_S : PMOV_TO_VEC<"svpmov", "iUi", "aarch64_sve_pmov_to_vector_lane", [], ImmCheck1_3>;
+ defm SVPMOV_TO_VEC_LANE_D : PMOV_TO_VEC<"svpmov", "lUl", "aarch64_sve_pmov_to_vector_lane" ,[], ImmCheck1_7>;
+}
+
+//
+// Multi-vector convert to/from floating-point.
+//
+let TargetGuard = "sme2" in {
+ def SVCVT_F16_X2 : SInst<"svcvt_f16[_f32_x2]", "e2", "f", MergeNone, "aarch64_sve_fcvt_x2", [IsStreaming],[]>;
+ def SVCVT_BF16_X2 : SInst<"svcvt_bf16[_f32_x2]", "$2", "f", MergeNone, "aarch64_sve_bfcvt_x2", [IsOverloadNone, IsStreaming],[]>;
+
+ def SVCVT_F32_U32_X2 : SInst<"svcvt_{d}[_u32_x2]", "2.d2.u", "f", MergeNone, "aarch64_sve_ucvtf_x2", [IsStreaming, IsOverloadWhileOrMultiVecCvt], []>;
+ def SVCVT_U32_F32_X2 : SInst<"svcvt_{d}[_f32_x2]", "2.d2.M", "Ui", MergeNone, "aarch64_sve_fcvtzu_x2", [IsStreaming, IsOverloadWhileOrMultiVecCvt], []>;
+ def SVCVT_F32_S32_X2 : SInst<"svcvt_{d}[_s32_x2]", "2.d2.x", "f", MergeNone, "aarch64_sve_scvtf_x2", [IsStreaming, IsOverloadWhileOrMultiVecCvt], []>;
+ def SVCVT_S32_F32_X2 : SInst<"svcvt_{d}[_f32_x2]", "2.d2.M", "i", MergeNone, "aarch64_sve_fcvtzs_x2", [IsStreaming, IsOverloadWhileOrMultiVecCvt], []>;
+
+ def SVCVT_F32_U32_X4 : SInst<"svcvt_{d}[_u32_x4]", "4.d4.u", "f", MergeNone, "aarch64_sve_ucvtf_x4", [IsStreaming, IsOverloadWhileOrMultiVecCvt], []>;
+ def SVCVT_U32_F32_X4 : SInst<"svcvt_{d}[_f32_x4]", "4.d4.M", "Ui", MergeNone, "aarch64_sve_fcvtzu_x4", [IsStreaming, IsOverloadWhileOrMultiVecCvt], []>;
+ def SVCVT_F32_S32_X4 : SInst<"svcvt_{d}[_s32_x4]", "4.d4.x", "f", MergeNone, "aarch64_sve_scvtf_x4", [IsStreaming, IsOverloadWhileOrMultiVecCvt], []>;
+ def SVCVT_S32_F32_X4 : SInst<"svcvt_{d}[_f32_x4]", "4.d4.M", "i", MergeNone, "aarch64_sve_fcvtzs_x4", [IsStreaming, IsOverloadWhileOrMultiVecCvt], []>;
+}
+
+//
+// Multi-vector floating-point convert from single-precision to interleaved half-precision/BFloat16
+//
+let TargetGuard = "sme2" in {
+ def SVCVTN_F16_X2 : SInst<"svcvtn_f16[_f32_x2]", "e2", "f", MergeNone, "aarch64_sve_fcvtn_x2", [IsStreaming],[]>;
+ def SVCVTN_BF16_X2 : SInst<"svcvtn_bf16[_f32_x2]", "$2", "f", MergeNone, "aarch64_sve_bfcvtn_x2", [IsOverloadNone, IsStreaming],[]>;
+}
+
+//
+// Multi-vector saturating extract narrow
+//
+let TargetGuard = "sme2" in {
+ def SVQCVT_S16_S32_X2 : SInst<"svqcvt_s16[_{d}_x2]", "h2.d", "i", MergeNone, "aarch64_sve_sqcvt_x2", [IsStreaming], []>;
+ def SVQCVT_U16_U32_X2 : SInst<"svqcvt_u16[_{d}_x2]", "e2.d", "Ui", MergeNone, "aarch64_sve_uqcvt_x2", [IsStreaming], []>;
+ def SVQCVT_U16_S32_X2 : SInst<"svqcvt_u16[_{d}_x2]", "e2.d", "i", MergeNone, "aarch64_sve_sqcvtu_x2", [IsStreaming], []>;
+
+ def SVQCVT_S8_S32_X4 : SInst<"svqcvt_s8[_{d}_x4]", "q4.d", "i", MergeNone, "aarch64_sve_sqcvt_x4", [IsStreaming], []>;
+ def SVQCVT_U8_U32_X4 : SInst<"svqcvt_u8[_{d}_x4]", "b4.d", "Ui", MergeNone, "aarch64_sve_uqcvt_x4", [IsStreaming], []>;
+ def SVQCVT_U8_S32_X4 : SInst<"svqcvt_u8[_{d}_x4]", "b4.d", "i", MergeNone, "aarch64_sve_sqcvtu_x4", [IsStreaming], []>;
+
+ def SVQCVT_S16_S64_X4 : SInst<"svqcvt_s16[_{d}_x4]", "q4.d", "l", MergeNone, "aarch64_sve_sqcvt_x4", [IsStreaming], []>;
+ def SVQCVT_U16_U64_X4 : SInst<"svqcvt_u16[_{d}_x4]", "b4.d", "Ul", MergeNone, "aarch64_sve_uqcvt_x4", [IsStreaming], []>;
+ def SVQCVT_U16_S64_X4 : SInst<"svqcvt_u16[_{d}_x4]", "b4.d", "l", MergeNone, "aarch64_sve_sqcvtu_x4", [IsStreaming], []>;
+}
+
+//
+// Multi-vector saturating extract narrow and interleave
+//
+let TargetGuard = "sme2|sve2p1" in {
+ def SVQCVTN_S16_S32_X2 : SInst<"svqcvtn_s16[_{d}_x2]", "h2.d", "i", MergeNone, "aarch64_sve_sqcvtn_x2", [IsStreamingCompatible], []>;
+ def SVQCVTN_U16_U32_X2 : SInst<"svqcvtn_u16[_{d}_x2]", "e2.d", "Ui", MergeNone, "aarch64_sve_uqcvtn_x2", [IsStreamingCompatible], []>;
+ def SVQCVTN_U16_S32_X2 : SInst<"svqcvtn_u16[_{d}_x2]", "e2.d", "i", MergeNone, "aarch64_sve_sqcvtun_x2", [IsStreamingCompatible], []>;
+}
+
+let TargetGuard = "sme2" in {
+ def SVQCVTN_S8_S32_X4 : SInst<"svqcvtn_s8[_{d}_x4]", "q4.d", "i", MergeNone, "aarch64_sve_sqcvtn_x4", [IsStreaming], []>;
+ def SVQCVTN_U8_U32_X4 : SInst<"svqcvtn_u8[_{d}_x4]", "b4.d", "Ui", MergeNone, "aarch64_sve_uqcvtn_x4", [IsStreaming], []>;
+ def SVQCVTN_U8_S32_X4 : SInst<"svqcvtn_u8[_{d}_x4]", "b4.d", "i", MergeNone, "aarch64_sve_sqcvtun_x4", [IsStreaming], []>;
+
+ def SVQCVTN_S16_S64_X4 : SInst<"svqcvtn_s16[_{d}_x4]", "q4.d", "l", MergeNone, "aarch64_sve_sqcvtn_x4", [IsStreaming], []>;
+ def SVQCVTN_U16_U64_X4 : SInst<"svqcvtn_u16[_{d}_x4]", "b4.d", "Ul", MergeNone, "aarch64_sve_uqcvtn_x4", [IsStreaming], []>;
+ def SVQCVTN_U16_S64_X4 : SInst<"svqcvtn_u16[_{d}_x4]", "b4.d", "l", MergeNone, "aarch64_sve_sqcvtun_x4", [IsStreaming], []>;
+}
+
+//
+// Multi-vector zip/unzip
+//
+
+let TargetGuard = "sme2" in {
+ def SVZIP_X2 : SInst<"svzip[_{d}_x2]", "22", "cUcsUsiUilUlbhfd", MergeNone, "aarch64_sve_zip_x2", [IsStreaming], []>;
+ def SVZIPQ_X2 : SInst<"svzipq[_{d}_x2]", "22", "cUcsUsiUilUlbhfd", MergeNone, "aarch64_sve_zipq_x2", [IsStreaming], []>;
+ def SVZIP_X4 : SInst<"svzip[_{d}_x4]", "44", "cUcsUsiUilUlbhfd", MergeNone, "aarch64_sve_zip_x4", [IsStreaming], []>;
+ def SVZIPQ_X4 : SInst<"svzipq[_{d}_x4]", "44", "cUcsUsiUilUlbhfd", MergeNone, "aarch64_sve_zipq_x4", [IsStreaming], []>;
+
+ def SVUZP_X2 : SInst<"svuzp[_{d}_x2]", "22", "cUcsUsiUilUlbhfd", MergeNone, "aarch64_sve_uzp_x2", [IsStreaming], []>;
+ def SVUZPQ_X2 : SInst<"svuzpq[_{d}_x2]", "22", "cUcsUsiUilUlbhfd", MergeNone, "aarch64_sve_uzpq_x2", [IsStreaming], []>;
+ def SVUZP_X4 : SInst<"svuzp[_{d}_x4]", "44", "cUcsUsiUilUlbhfd", MergeNone, "aarch64_sve_uzp_x4", [IsStreaming], []>;
+ def SVUZPQ_X4 : SInst<"svuzpq[_{d}_x4]", "44", "cUcsUsiUilUlbhfd", MergeNone, "aarch64_sve_uzpq_x4", [IsStreaming], []>;
+}
+
+//
+// Multi-vector unpack
+//
+
+let TargetGuard = "sme2" in {
+ def SVSUNPK_X2 : SInst<"svunpk_{d}[_{1}_x2]", "2h", "sil", MergeNone, "aarch64_sve_sunpk_x2", [IsStreaming], []>;
+ def SVUUNPK_X2 : SInst<"svunpk_{d}[_{1}_x2]", "2h", "UsUiUl", MergeNone, "aarch64_sve_uunpk_x2", [IsStreaming], []>;
+ def SVSUNPK_X4 : SInst<"svunpk_{d}[_{3}_x4]", "42.h", "sil", MergeNone, "aarch64_sve_sunpk_x4", [IsStreaming], []>;
+ def SVUUNPK_X4 : SInst<"svunpk_{d}[_{3}_x4]", "42.h", "UsUiUl", MergeNone, "aarch64_sve_uunpk_x4", [IsStreaming], []>;
+}
+
+let TargetGuard = "sve2p1|sme2" in {
+// == BFloat16 multiply-subtract ==
+ def SVBFMLSLB : SInst<"svbfmlslb[_{d}]", "dd$$", "f", MergeNone, "aarch64_sve_bfmlslb", [IsOverloadNone, IsStreamingOrSVE2p1], []>;
+ def SVBFMLSLT : SInst<"svbfmlslt[_{d}]", "dd$$", "f", MergeNone, "aarch64_sve_bfmlslt", [IsOverloadNone, IsStreamingOrSVE2p1], []>;
+
+ def SVBFMLSLB_LANE : SInst<"svbfmlslb_lane[_{d}]", "dd$$i", "f", MergeNone, "aarch64_sve_bfmlslb_lane", [IsOverloadNone, IsStreamingOrSVE2p1], [ImmCheck<3, ImmCheck0_7>]>;
+ def SVBFMLSLT_LANE : SInst<"svbfmlslt_lane[_{d}]", "dd$$i", "f", MergeNone, "aarch64_sve_bfmlslt_lane", [IsOverloadNone, IsStreamingOrSVE2p1], [ImmCheck<3, ImmCheck0_7>]>;
+}
diff --git a/contrib/llvm-project/clang/include/clang/Basic/arm_sve_sme_incl.td b/contrib/llvm-project/clang/include/clang/Basic/arm_sve_sme_incl.td
new file mode 100644
index 000000000000..9a6ea9898ef7
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Basic/arm_sve_sme_incl.td
@@ -0,0 +1,295 @@
+//===--- arm_sve_sme_incl.td - ARM SVE/SME compiler interface -------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines common properites of TableGen definitions use for both
+// SVE and SME intrinsics.
+//
+// https://developer.arm.com/architectures/system-architectures/software-standards/acle
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Instruction definitions
+//===----------------------------------------------------------------------===//
+// Every intrinsic subclasses "Inst". An intrinsic has a name, a prototype and
+// a sequence of typespecs.
+//
+// The name is the base name of the intrinsic, for example "svld1". This is
+// then mangled by the tblgen backend to add type information ("svld1_s16").
+//
+// A typespec is a sequence of uppercase characters (modifiers) followed by one
+// lowercase character. A typespec encodes a particular "base type" of the
+// intrinsic.
+//
+// An example typespec is "Us" - unsigned short - svuint16_t. The available
+// typespec codes are given below.
+//
+// The string given to an Inst class is a sequence of typespecs. The intrinsic
+// is instantiated for every typespec in the sequence. For example "sdUsUd".
+//
+// The prototype is a string that defines the return type of the intrinsic
+// and the type of each argument. The return type and every argument gets a
+// "modifier" that can change in some way the "base type" of the intrinsic.
+//
+// The modifier 'd' means "default" and does not modify the base type in any
+// way. The available modifiers are given below.
+//
+// Typespecs
+// ---------
+// c: char
+// s: short
+// i: int
+// l: long
+// q: int128_t
+// f: float
+// h: half-float
+// d: double
+// b: bfloat
+
+// Typespec modifiers
+// ------------------
+// P: boolean
+// U: unsigned
+// Q: svcount
+
+// Prototype modifiers
+// -------------------
+// prototype: return (arg, arg, ...)
+//
+// 2,3,4: array of vectors
+// .: indicator for multi-vector modifier that will follow (e.g. 2.x)
+// v: void
+// x: vector of signed integers
+// u: vector of unsigned integers
+// d: default
+// c: const pointer type
+// P: predicate type
+// s: scalar of element type
+// a: scalar of element type (splat to vector type)
+// R: scalar of 1/2 width element type (splat to vector type)
+// r: scalar of 1/4 width element type (splat to vector type)
+// @: unsigned scalar of 1/4 width element type (splat to vector type)
+// e: 1/2 width unsigned elements, 2x element count
+// b: 1/4 width unsigned elements, 4x element count
+// h: 1/2 width elements, 2x element count
+// q: 1/4 width elements, 4x element count
+// o: 4x width elements, 1/4 element count
+//
+// w: vector of element type promoted to 64bits, vector maintains
+// signedness of its element type.
+// f: element type promoted to uint64_t (splat to vector type)
+// j: element type promoted to 64bits (splat to vector type)
+// K: element type bitcast to a signed integer (splat to vector type)
+// L: element type bitcast to an unsigned integer (splat to vector type)
+//
+// i: constant uint64_t
+// k: int32_t
+// l: int64_t
+// m: uint32_t
+// n: uint64_t
+
+// [: svuint8_t
+// t: svint32_t
+// z: svuint32_t
+// g: svuint64_t
+// O: svfloat16_t
+// M: svfloat32_t
+// N: svfloat64_t
+// $: svbfloat16_t
+
+// J: Prefetch type (sv_prfop)
+
+// %: pointer to void
+
+// A: pointer to int8_t
+// B: pointer to int16_t
+// C: pointer to int32_t
+// D: pointer to int64_t
+
+// E: pointer to uint8_t
+// F: pointer to uint16_t
+// G: pointer to uint32_t
+// H: pointer to uint64_t
+
+// Q: const pointer to void
+
+// S: const pointer to int8_t
+// T: const pointer to int16_t
+// U: const pointer to int32_t
+// V: const pointer to int64_t
+//
+// W: const pointer to uint8_t
+// X: const pointer to uint16_t
+// Y: const pointer to uint32_t
+// Z: const pointer to uint64_t
+
+// Prototype modifiers added for SVE2p1
+// {: 128b vector
+// }: svcount_t
+
+class MergeType<int val, string suffix=""> {
+ int Value = val;
+ string Suffix = suffix;
+}
+def MergeNone : MergeType<0>;
+def MergeAny : MergeType<1, "_x">;
+def MergeOp1 : MergeType<2, "_m">;
+def MergeZero : MergeType<3, "_z">;
+def MergeAnyExp : MergeType<4, "_x">; // Use merged builtin with explicit
+def MergeZeroExp : MergeType<5, "_z">; // generation of its inactive argument.
+
+class EltType<int val> {
+ int Value = val;
+}
+def EltTyInvalid : EltType<0>;
+def EltTyInt8 : EltType<1>;
+def EltTyInt16 : EltType<2>;
+def EltTyInt32 : EltType<3>;
+def EltTyInt64 : EltType<4>;
+def EltTyInt128 : EltType<5>;
+def EltTyFloat16 : EltType<6>;
+def EltTyFloat32 : EltType<7>;
+def EltTyFloat64 : EltType<8>;
+def EltTyBool8 : EltType<9>;
+def EltTyBool16 : EltType<10>;
+def EltTyBool32 : EltType<11>;
+def EltTyBool64 : EltType<12>;
+def EltTyBFloat16 : EltType<13>;
+
+class MemEltType<int val> {
+ int Value = val;
+}
+def MemEltTyDefault : MemEltType<0>;
+def MemEltTyInt8 : MemEltType<1>;
+def MemEltTyInt16 : MemEltType<2>;
+def MemEltTyInt32 : MemEltType<3>;
+def MemEltTyInt64 : MemEltType<4>;
+
+class FlagType<int val> {
+ int Value = val;
+}
+
+// These must be kept in sync with the flags in utils/TableGen/SveEmitter.h
+// and include/clang/Basic/TargetBuiltins.h
+def NoFlags : FlagType<0x00000000>;
+def FirstEltType : FlagType<0x00000001>;
+// : :
+// : :
+def EltTypeMask : FlagType<0x0000000f>;
+def FirstMemEltType : FlagType<0x00000010>;
+// : :
+// : :
+def MemEltTypeMask : FlagType<0x00000070>;
+def FirstMergeTypeMask : FlagType<0x00000080>;
+// : :
+// : :
+def MergeTypeMask : FlagType<0x00000380>;
+def FirstSplatOperand : FlagType<0x00000400>;
+// : :
+// These flags are used to specify which scalar operand
+// needs to be duplicated/splatted into a vector.
+// : :
+def SplatOperandMask : FlagType<0x00001C00>;
+def IsLoad : FlagType<0x00002000>;
+def IsStore : FlagType<0x00004000>;
+def IsGatherLoad : FlagType<0x00008000>;
+def IsScatterStore : FlagType<0x00010000>;
+def IsStructLoad : FlagType<0x00020000>;
+def IsStructStore : FlagType<0x00040000>;
+def IsZExtReturn : FlagType<0x00080000>; // Return value is sign-extend by default
+def IsOverloadNone : FlagType<0x00100000>; // Intrinsic does not take any overloaded types.
+def IsOverloadWhileOrMultiVecCvt : FlagType<0x00200000>; // Use {default type, typeof(operand1)} as overloaded types.
+def IsOverloadWhileRW : FlagType<0x00400000>; // Use {pred(default type), typeof(operand0)} as overloaded types.
+def IsOverloadCvt : FlagType<0x00800000>; // Use {typeof(operand0), typeof(last operand)} as overloaded types.
+def OverloadKindMask : FlagType<0x00E00000>; // When the masked values are all '0', the default type is used as overload type.
+def IsByteIndexed : FlagType<0x01000000>;
+def IsAppendSVALL : FlagType<0x02000000>; // Appends SV_ALL as the last operand.
+def IsInsertOp1SVALL : FlagType<0x04000000>; // Inserts SV_ALL as the second operand.
+def IsPrefetch : FlagType<0x08000000>; // Contiguous prefetches.
+def IsGatherPrefetch : FlagType<0x10000000>;
+def ReverseCompare : FlagType<0x20000000>; // Compare operands must be swapped.
+def ReverseUSDOT : FlagType<0x40000000>; // Unsigned/signed operands must be swapped.
+def IsUndef : FlagType<0x80000000>; // Codegen `undef` of given type.
+def IsTupleCreate : FlagType<0x100000000>;
+def IsTupleGet : FlagType<0x200000000>;
+def IsTupleSet : FlagType<0x400000000>;
+def ReverseMergeAnyBinOp : FlagType<0x800000000>; // e.g. Implement SUBR_X using SUB_X.
+def ReverseMergeAnyAccOp : FlagType<0x1000000000>; // e.g. Implement MSB_X using MLS_X.
+def IsStreaming : FlagType<0x2000000000>;
+def IsStreamingCompatible : FlagType<0x4000000000>;
+def IsReadZA : FlagType<0x8000000000>;
+def IsWriteZA : FlagType<0x10000000000>;
+def IsReductionQV : FlagType<0x20000000000>;
+def IsStreamingOrSVE2p1 : FlagType<0x40000000000>; // Use for intrinsics that are common between sme/sme2 and sve2p1.
+def IsInZA : FlagType<0x80000000000>;
+def IsOutZA : FlagType<0x100000000000>;
+def IsInOutZA : FlagType<0x200000000000>;
+def IsInZT0 : FlagType<0x400000000000>;
+def IsOutZT0 : FlagType<0x800000000000>;
+def IsInOutZT0 : FlagType<0x1000000000000>;
+
+// These must be kept in sync with the flags in include/clang/Basic/TargetBuiltins.h
+class ImmCheckType<int val> {
+ int Value = val;
+}
+def ImmCheck0_31 : ImmCheckType<0>; // 0..31 (used for e.g. predicate patterns)
+def ImmCheck1_16 : ImmCheckType<1>; // 1..16
+def ImmCheckExtract : ImmCheckType<2>; // 0..(2048/sizeinbits(elt) - 1)
+def ImmCheckShiftRight : ImmCheckType<3>; // 1..sizeinbits(elt)
+def ImmCheckShiftRightNarrow : ImmCheckType<4>; // 1..sizeinbits(elt)/2
+def ImmCheckShiftLeft : ImmCheckType<5>; // 0..(sizeinbits(elt) - 1)
+def ImmCheck0_7 : ImmCheckType<6>; // 0..7
+def ImmCheckLaneIndex : ImmCheckType<7>; // 0..(128/(1*sizeinbits(elt)) - 1)
+def ImmCheckLaneIndexCompRotate : ImmCheckType<8>; // 0..(128/(2*sizeinbits(elt)) - 1)
+def ImmCheckLaneIndexDot : ImmCheckType<9>; // 0..(128/(4*sizeinbits(elt)) - 1)
+def ImmCheckComplexRot90_270 : ImmCheckType<10>; // [90,270]
+def ImmCheckComplexRotAll90 : ImmCheckType<11>; // [0, 90, 180,270]
+def ImmCheck0_13 : ImmCheckType<12>; // 0..13
+def ImmCheck0_1 : ImmCheckType<13>; // 0..1
+def ImmCheck0_2 : ImmCheckType<14>; // 0..2
+def ImmCheck0_3 : ImmCheckType<15>; // 0..3
+def ImmCheck0_0 : ImmCheckType<16>; // 0..0
+def ImmCheck0_15 : ImmCheckType<17>; // 0..15
+def ImmCheck0_255 : ImmCheckType<18>; // 0..255
+def ImmCheck2_4_Mul2 : ImmCheckType<19>; // 2, 4
+def ImmCheck1_1 : ImmCheckType<20>; // 1..1
+def ImmCheck1_3 : ImmCheckType<21>; // 1..3
+def ImmCheck1_7 : ImmCheckType<22>; // 1..7
+
+class ImmCheck<int arg, ImmCheckType kind, int eltSizeArg = -1> {
+ int Arg = arg;
+ int EltSizeArg = eltSizeArg;
+ ImmCheckType Kind = kind;
+}
+
+class Inst<string n, string p, string t, MergeType mt, string i,
+ list<FlagType> ft, list<ImmCheck> ch, MemEltType met = MemEltTyDefault> {
+ string Name = n;
+ string Prototype = p;
+ string Types = t;
+ string TargetGuard = "sve";
+ int Merge = mt.Value;
+ string MergeSuffix = mt.Suffix;
+ string LLVMIntrinsic = i;
+ list<FlagType> Flags = ft;
+ list<ImmCheck> ImmChecks = ch;
+ int MemEltType = met.Value;
+}
+
+// SInst: Instruction with signed/unsigned suffix (e.g., "s8", "u8")
+class SInst<string n, string p, string t, MergeType mt, string i = "",
+ list<FlagType> ft = [], list<ImmCheck> ch = []>
+ : Inst<n, p, t, mt, i, ft, ch, MemEltTyDefault> {
+}
+
+// MInst: Instructions which access memory
+class MInst<string n, string p, string t, list<FlagType> f,
+ MemEltType met = MemEltTyDefault, string i = "",
+ list<ImmCheck> ch = []>
+ : Inst<n, p, t, MergeNone, i, f, ch, met> {
+}
diff --git a/contrib/llvm-project/clang/include/clang/Basic/riscv_sifive_vector.td b/contrib/llvm-project/clang/include/clang/Basic/riscv_sifive_vector.td
new file mode 100644
index 000000000000..ef5114d6105e
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Basic/riscv_sifive_vector.td
@@ -0,0 +1,211 @@
+//==--- riscv_sifive_vector.td - RISC-V SiFive VCIX function list ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the builtins for RISC-V SiFive VCIX. See:
+//
+// https://sifive.cdn.prismic.io/sifive/c3829e36-8552-41f0-a841-79945784241b_vcix-spec-software.pdf
+//
+//===----------------------------------------------------------------------===//
+
+include "riscv_vector_common.td"
+
+//===----------------------------------------------------------------------===//
+// Instruction definitions
+//===----------------------------------------------------------------------===//
+
+class VCIXSuffix<string range> {
+ list<string> suffix = !cond(!eq(range, "c"): ["8mf8", "8mf4", "8mf2", "8m1", "8m2", "8m4", "8m8"],
+ !eq(range, "s"): ["16mf4", "16mf2", "16m1", "16m2", "16m4", "16m8"],
+ !eq(range, "i"): ["32mf2", "32m1", "32m2", "32m4", "32m8"],
+ !eq(range, "l"): ["64m1", "64m2", "64m4", "64m8"]);
+}
+
+class VCIXBuiltinSet<string name, string IR_name, string suffix,
+ string prototype, string type_range,
+ list<int> intrinsic_types>
+ : RVVBuiltin<suffix, prototype, type_range> {
+ let Name = name;
+ let OverloadedName = name;
+ let IRName = IR_name;
+ let HasMasked = false;
+ let IntrinsicTypes = intrinsic_types;
+}
+
+multiclass VCIXBuiltinSet<string name, string IR_name, string suffix,
+ string prototype, string type_range,
+ list<int> intrinsic_types> {
+ if !find(prototype, "0") then {
+ def : VCIXBuiltinSet<name, IR_name, suffix, prototype, type_range, intrinsic_types>;
+ }
+ def : VCIXBuiltinSet<name # "_se", IR_name # "_se", suffix, prototype, type_range, intrinsic_types>;
+}
+
+multiclass RVVVCIXBuiltinSet<list<string> range, string prototype,
+ list<int> intrinsic_types, bit UseGPR> {
+ foreach r = range in
+ let RequiredFeatures = !if(!and(UseGPR, !eq(r, "l")),
+ ["Xsfvcp", "RV64"], ["Xsfvcp"]) in
+ defm : VCIXBuiltinSet<NAME, NAME, "Uv", prototype, r, intrinsic_types>;
+}
+
+multiclass RVVVCIXBuiltinSetWVType<list<string> range, string prototype,
+ list<int> intrinsic_types, bit UseGPR> {
+ foreach r = range in
+ let RequiredFeatures = !if(!and(UseGPR, !eq(r, "l")),
+ ["Xsfvcp", "RV64"], ["Xsfvcp"]) in
+ // These intrinsics don't have any vector types in the output and inputs,
+ // but we still need to add vetvli for them. So we encode different
+ // VTYPE into the intrinsic names, and then will know which vsetvli is
+ // correct.
+ foreach s = VCIXSuffix<r>.suffix in
+ // Since we already encode the Vtype into the name, so just set
+ // Log2LMUL to zero. Otherwise the RISCVVEmitter will expand
+ // lots of redundant intrinsic but have same names.
+ let Log2LMUL = [0] in
+ def : VCIXBuiltinSet<NAME # "_u" # s, NAME # "_e" # s,
+ "", prototype, r, intrinsic_types>;
+}
+
+let SupportOverloading = false in {
+ defm sf_vc_x_se : RVVVCIXBuiltinSetWVType<["c", "s", "i", "l"], "0KzKzKzUe", [0, 3], UseGPR=1>;
+ defm sf_vc_i_se : RVVVCIXBuiltinSetWVType<["c", "s", "i", "l"], "0KzKzKzKz", [2, 3], UseGPR=0>;
+ defm sf_vc_xv : RVVVCIXBuiltinSet<["csi", "l"], "0KzKzUvUe", [0, 2, 3], UseGPR=1>;
+ defm sf_vc_iv : RVVVCIXBuiltinSet<["csi", "l"], "0KzKzUvKz", [0, 2, 3], UseGPR=0>;
+ defm sf_vc_vv : RVVVCIXBuiltinSet<["csi", "l"], "0KzKzUvUv", [0, 2, 3], UseGPR=0>;
+ defm sf_vc_fv : RVVVCIXBuiltinSet<["si", "l"], "0KzKzUvFe", [0, 2, 3], UseGPR=0>;
+ defm sf_vc_xvv : RVVVCIXBuiltinSet<["csi", "l"], "0KzUvUvUe", [0, 1, 2, 3], UseGPR=1>;
+ defm sf_vc_ivv : RVVVCIXBuiltinSet<["csi", "l"], "0KzUvUvKz", [0, 1, 2, 3], UseGPR=0>;
+ defm sf_vc_vvv : RVVVCIXBuiltinSet<["csi", "l"], "0KzUvUvUv", [0, 1, 2, 3], UseGPR=0>;
+ defm sf_vc_fvv : RVVVCIXBuiltinSet<["si", "l"], "0KzUvUvFe", [0, 1, 2, 3], UseGPR=0>;
+ defm sf_vc_v_x : RVVVCIXBuiltinSet<["csi", "l"], "UvKzKzUe", [-1, 1, 2], UseGPR=1>;
+ defm sf_vc_v_i : RVVVCIXBuiltinSet<["csi", "l"], "UvKzKzKz", [-1, 1, 2], UseGPR=0>;
+ defm sf_vc_v_xv : RVVVCIXBuiltinSet<["csi", "l"], "UvKzUvUe", [-1, 0, 1, 2], UseGPR=1>;
+ defm sf_vc_v_iv : RVVVCIXBuiltinSet<["csi", "l"], "UvKzUvKz", [-1, 0, 1, 2], UseGPR=0>;
+ defm sf_vc_v_vv : RVVVCIXBuiltinSet<["csi", "l"], "UvKzUvUv", [-1, 0, 1, 2], UseGPR=0>;
+ defm sf_vc_v_fv : RVVVCIXBuiltinSet<["si", "l"], "UvKzUvFe", [-1, 0, 1, 2], UseGPR=0>;
+ defm sf_vc_v_xvv : RVVVCIXBuiltinSet<["csi", "l"], "UvKzUvUvUe", [-1, 0, 1, 2, 3], UseGPR=1>;
+ defm sf_vc_v_ivv : RVVVCIXBuiltinSet<["csi", "l"], "UvKzUvUvKz", [-1, 0, 1, 2, 3], UseGPR=0>;
+ defm sf_vc_v_vvv : RVVVCIXBuiltinSet<["csi", "l"], "UvKzUvUvUv", [-1, 0, 1, 2, 3], UseGPR=0>;
+ defm sf_vc_v_fvv : RVVVCIXBuiltinSet<["si", "l"], "UvKzUvUvFe", [-1, 0, 1, 2, 3], UseGPR=0>;
+ let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
+ defm sf_vc_xvw : RVVVCIXBuiltinSet<["csi"], "0KzUwUvUe", [0, 1, 2, 3], UseGPR=1>;
+ defm sf_vc_ivw : RVVVCIXBuiltinSet<["csi"], "0KzUwUvKz", [0, 1, 2, 3], UseGPR=0>;
+ defm sf_vc_vvw : RVVVCIXBuiltinSet<["csi"], "0KzUwUvUv", [0, 1, 2, 3], UseGPR=0>;
+ defm sf_vc_fvw : RVVVCIXBuiltinSet<["si"], "0KzUwUvFe", [0, 1, 2, 3], UseGPR=0>;
+ defm sf_vc_v_xvw : RVVVCIXBuiltinSet<["csi"], "UwKzUwUvUe", [-1, 0, 1, 2, 3], UseGPR=1>;
+ defm sf_vc_v_ivw : RVVVCIXBuiltinSet<["csi"], "UwKzUwUvKz", [-1, 0, 1, 2, 3], UseGPR=0>;
+ defm sf_vc_v_vvw : RVVVCIXBuiltinSet<["csi"], "UwKzUwUvUv", [-1, 0, 1, 2, 3], UseGPR=0>;
+ defm sf_vc_v_fvw : RVVVCIXBuiltinSet<["si"], "UwKzUwUvFe", [-1, 0, 1, 2, 3], UseGPR=0>;
+ }
+}
+
+multiclass RVVVFWMACCBuiltinSet<list<list<string>> suffixes_prototypes> {
+ let OverloadedName = NAME,
+ Name = NAME,
+ HasMasked = false,
+ Log2LMUL = [-2, -1, 0, 1, 2] in
+ defm NAME : RVVOutOp1Op2BuiltinSet<NAME, "y", suffixes_prototypes>;
+}
+
+multiclass RVVVQMACCDODBuiltinSet<list<list<string>> suffixes_prototypes> {
+ let OverloadedName = NAME,
+ Name = NAME,
+ HasMasked = false,
+ Log2LMUL = [0, 1, 2, 3] in
+ defm NAME : RVVOutOp1Op2BuiltinSet<NAME, "i", suffixes_prototypes>;
+}
+
+multiclass RVVVQMACCQOQBuiltinSet<list<list<string>> suffixes_prototypes> {
+ let OverloadedName = NAME,
+ Name = NAME,
+ HasMasked = false,
+ Log2LMUL = [-1, 0, 1, 2] in
+ defm NAME : RVVOutOp1Op2BuiltinSet<NAME, "s", suffixes_prototypes>;
+}
+
+multiclass RVVVFNRCLIPBuiltinSet<string suffix, string prototype, string type_range> {
+ let Log2LMUL = [-3, -2, -1, 0, 1, 2],
+ Name = NAME,
+ IRName = NAME,
+ MaskedIRName = NAME # "_mask" in
+ def : RVVConvBuiltin<suffix, prototype, type_range, NAME>;
+}
+
+let UnMaskedPolicyScheme = HasPolicyOperand in
+ let RequiredFeatures = ["Xsfvqmaccdod"] in {
+ defm sf_vqmaccu_2x8x2 : RVVVQMACCDODBuiltinSet<[["", "v", "vv(FixedSEW:8)SUv(FixedSEW:8)Uv"]]>;
+ defm sf_vqmacc_2x8x2 : RVVVQMACCDODBuiltinSet<[["", "v", "vv(FixedSEW:8)Sv(FixedSEW:8)v"]]>;
+ defm sf_vqmaccus_2x8x2 : RVVVQMACCDODBuiltinSet<[["", "v", "vv(FixedSEW:8)SUv(FixedSEW:8)v"]]>;
+ defm sf_vqmaccsu_2x8x2 : RVVVQMACCDODBuiltinSet<[["", "v", "vv(FixedSEW:8)Sv(FixedSEW:8)Uv"]]>;
+ }
+
+let UnMaskedPolicyScheme = HasPolicyOperand in
+ let RequiredFeatures = ["Xsfvqmaccqoq"] in {
+ defm sf_vqmaccu_4x8x4 : RVVVQMACCQOQBuiltinSet<[["", "w", "ww(FixedSEW:8)SUv(FixedSEW:8)Uv"]]>;
+ defm sf_vqmacc_4x8x4 : RVVVQMACCQOQBuiltinSet<[["", "w", "ww(FixedSEW:8)Sv(FixedSEW:8)v"]]>;
+ defm sf_vqmaccus_4x8x4 : RVVVQMACCQOQBuiltinSet<[["", "w", "ww(FixedSEW:8)SUv(FixedSEW:8)v"]]>;
+ defm sf_vqmaccsu_4x8x4 : RVVVQMACCQOQBuiltinSet<[["", "w", "ww(FixedSEW:8)Sv(FixedSEW:8)Uv"]]>;
+ }
+
+let UnMaskedPolicyScheme = HasPolicyOperand in
+ let RequiredFeatures = ["Xsfvfwmaccqqq"] in
+ defm sf_vfwmacc_4x4x4 : RVVVFWMACCBuiltinSet<[["", "Fw", "FwFwSvv"]]>;
+
+let UnMaskedPolicyScheme = HasPassthruOperand, RequiredFeatures = ["Xsfvfnrclipxfqf"] in {
+let ManualCodegen = [{
+ {
+ // LLVM intrinsic
+ // Unmasked: (passthru, vector_in, scalar_in, frm, vl)
+ // Masked: (passthru, vector_in, scalar_in, mask, frm, vl, policy)
+
+ SmallVector<llvm::Value*, 7> Operands;
+ bool HasMaskedOff = !(
+ (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
+ (!IsMasked && PolicyAttrs & RVV_VTA));
+ bool HasRoundModeOp = IsMasked ?
+ (HasMaskedOff ? Ops.size() == 6 : Ops.size() == 5) :
+ (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4);
+
+ unsigned Offset = IsMasked ?
+ (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0);
+
+ if (!HasMaskedOff)
+ Operands.push_back(llvm::PoisonValue::get(ResultType));
+ else
+ Operands.push_back(Ops[IsMasked ? 1 : 0]);
+
+ Operands.push_back(Ops[Offset]); // op0
+ Operands.push_back(Ops[Offset + 1]); // op1
+
+ if (IsMasked)
+ Operands.push_back(Ops[0]); // mask
+
+ if (HasRoundModeOp) {
+ Operands.push_back(Ops[Offset + 2]); // frm
+ Operands.push_back(Ops[Offset + 3]); // vl
+ } else {
+ Operands.push_back(ConstantInt::get(Ops[Offset + 2]->getType(), 7)); // frm
+ Operands.push_back(Ops[Offset + 2]); // vl
+ }
+
+ if (IsMasked)
+ Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+
+ IntrinsicTypes = {ResultType, Ops[Offset]->getType(), Operands.back()->getType()};
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ return Builder.CreateCall(F, Operands, "");
+ }
+}] in {
+ let HasFRMRoundModeOp = true in {
+ defm sf_vfnrclip_x_f_qf : RVVVFNRCLIPBuiltinSet<"v", "vFqfu", "c">;
+ defm sf_vfnrclip_xu_f_qf : RVVVFNRCLIPBuiltinSet<"Uv", "UvFqfu", "c">;
+ }
+ defm sf_vfnrclip_x_f_qf : RVVVFNRCLIPBuiltinSet<"v", "vFqf", "c">;
+ defm sf_vfnrclip_xu_f_qf : RVVVFNRCLIPBuiltinSet<"Uv", "UvFqf", "c">;
+}
+}
diff --git a/contrib/llvm-project/clang/include/clang/Basic/riscv_vector.td b/contrib/llvm-project/clang/include/clang/Basic/riscv_vector.td
index 48c032dd1422..8bde08105250 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/riscv_vector.td
+++ b/contrib/llvm-project/clang/include/clang/Basic/riscv_vector.td
@@ -12,539 +12,7 @@
//
//===----------------------------------------------------------------------===//
-//===----------------------------------------------------------------------===//
-// Instruction definitions
-//===----------------------------------------------------------------------===//
-// Each record of the class RVVBuiltin defines a collection of builtins (i.e.
-// "def vadd : RVVBuiltin" will be used to define things like "vadd_vv_i32m1",
-// "vadd_vv_i32m2", etc).
-//
-// The elements of this collection are defined by an instantiation process the
-// range of which is specified by the cross product of the LMUL attribute and
-// every element in the attribute TypeRange. By default builtins have LMUL = [1,
-// 2, 4, 8, 1/2, 1/4, 1/8] so the process is repeated 7 times. In tablegen we
-// use the Log2LMUL [0, 1, 2, 3, -1, -2, -3] to represent the LMUL.
-//
-// LMUL represents the fact that the types of values used by that builtin are
-// values generated by instructions that are executed under that LMUL. However,
-// this does not mean the builtin is necessarily lowered into an instruction
-// that executes under the specified LMUL. An example where this happens are
-// loads and stores of masks. A mask like `vbool8_t` can be generated, for
-// instance, by comparing two `__rvv_int8m1_t` (this is LMUL=1) or comparing two
-// `__rvv_int16m2_t` (this is LMUL=2). The actual load or store, however, will
-// be performed under LMUL=1 because mask registers are not grouped.
-//
-// TypeRange is a non-empty sequence of basic types:
-//
-// c: int8_t (i8)
-// s: int16_t (i16)
-// i: int32_t (i32)
-// l: int64_t (i64)
-// x: float16_t (half)
-// f: float32_t (float)
-// d: float64_t (double)
-//
-// This way, given an LMUL, a record with a TypeRange "sil" will cause the
-// definition of 3 builtins. Each type "t" in the TypeRange (in this example
-// they are int16_t, int32_t, int64_t) is used as a parameter that drives the
-// definition of that particular builtin (for the given LMUL).
-//
-// During the instantiation, types can be transformed or modified using type
-// transformers. Given a type "t" the following primitive type transformers can
-// be applied to it to yield another type.
-//
-// e: type of "t" as is (identity)
-// v: computes a vector type whose element type is "t" for the current LMUL
-// w: computes a vector type identical to what 'v' computes except for the
-// element type which is twice as wide as the element type of 'v'
-// q: computes a vector type identical to what 'v' computes except for the
-// element type which is four times as wide as the element type of 'v'
-// o: computes a vector type identical to what 'v' computes except for the
-// element type which is eight times as wide as the element type of 'v'
-// m: computes a vector type identical to what 'v' computes except for the
-// element type which is bool
-// 0: void type, ignores "t"
-// z: size_t, ignores "t"
-// t: ptrdiff_t, ignores "t"
-// u: unsigned long, ignores "t"
-// l: long, ignores "t"
-//
-// So for instance if t is "i", i.e. int, then "e" will yield int again. "v"
-// will yield an RVV vector type (assume LMUL=1), so __rvv_int32m1_t.
-// Accordingly "w" would yield __rvv_int64m2_t.
-//
-// A type transformer can be prefixed by other non-primitive type transformers.
-//
-// P: constructs a pointer to the current type
-// C: adds const to the type
-// K: requires the integer type to be a constant expression
-// U: given an integer type or vector type, computes its unsigned variant
-// I: given a vector type, compute the vector type with integer type
-// elements of the same width
-// F: given a vector type, compute the vector type with floating-point type
-// elements of the same width
-// S: given a vector type, computes its equivalent one for LMUL=1. This is a
-// no-op if the vector was already LMUL=1
-// (Log2EEW:Value): Log2EEW value could be 3/4/5/6 (8/16/32/64), given a
-// vector type (SEW and LMUL) and EEW (8/16/32/64), computes its
-// equivalent integer vector type with EEW and corresponding ELMUL (elmul =
-// (eew/sew) * lmul). For example, vector type is __rvv_float16m4
-// (SEW=16, LMUL=4) and Log2EEW is 3 (EEW=8), and then equivalent vector
-// type is __rvv_uint8m2_t (elmul=(8/16)*4 = 2). Ignore to define a new
-// builtins if its equivalent type has illegal lmul.
-// (FixedSEW:Value): Given a vector type (SEW and LMUL), and computes another
-// vector type which only changed SEW as given value. Ignore to define a new
-// builtin if its equivalent type has illegal lmul or the SEW does not changed.
-// (SFixedLog2LMUL:Value): Smaller Fixed Log2LMUL. Given a vector type (SEW
-// and LMUL), and computes another vector type which only changed LMUL as
-// given value. The new LMUL should be smaller than the old one. Ignore to
-// define a new builtin if its equivalent type has illegal lmul.
-// (LFixedLog2LMUL:Value): Larger Fixed Log2LMUL. Given a vector type (SEW
-// and LMUL), and computes another vector type which only changed LMUL as
-// given value. The new LMUL should be larger than the old one. Ignore to
-// define a new builtin if its equivalent type has illegal lmul.
-//
-// Following with the example above, if t is "i", then "Ue" will yield unsigned
-// int and "Fv" will yield __rvv_float32m1_t (again assuming LMUL=1), Fw would
-// yield __rvv_float64m2_t, etc.
-//
-// Each builtin is then defined by applying each type in TypeRange against the
-// sequence of type transformers described in Suffix and Prototype.
-//
-// The name of the builtin is defined by the Name attribute (which defaults to
-// the name of the class) appended (separated with an underscore) the Suffix
-// attribute. For instance with Name="foo", Suffix = "v" and TypeRange = "il",
-// the builtin generated will be __builtin_rvv_foo_i32m1 and
-// __builtin_rvv_foo_i64m1 (under LMUL=1). If Suffix contains more than one
-// type transformer (say "vv") each of the types is separated with an
-// underscore as in "__builtin_rvv_foo_i32m1_i32m1".
-//
-// The C/C++ prototype of the builtin is defined by the Prototype attribute.
-// Prototype is a non-empty sequence of type transformers, the first of which
-// is the return type of the builtin and the rest are the parameters of the
-// builtin, in order. For instance if Prototype is "wvv" and TypeRange is "si"
-// a first builtin will have type
-// __rvv_int32m2_t (__rvv_int16m1_t, __rvv_int16m1_t) and the second builtin
-// will have type __rvv_int64m2_t (__rvv_int32m1_t, __rvv_int32m1_t) (again
-// under LMUL=1).
-//
-// There are a number of attributes that are used to constraint the number and
-// shape of the builtins generated. Refer to the comments below for them.
-class RVVBuiltin<string suffix, string prototype, string type_range,
- string mangled_suffix = ""> {
- // Base name that will be prepended in __builtin_rvv_ and appended the
- // computed Suffix.
- string Name = NAME;
-
- // If not empty, each instantiated builtin will have this appended after an
- // underscore (_). It is instantiated like Prototype.
- string Suffix = suffix;
-
- // If empty, default MangledName is sub string of `Name` which end of first
- // '_'. For example, the default mangled name is `vadd` for Name `vadd_vv`.
- // It's used for describe some special naming cases.
- string MangledName = "";
-
- // If not empty, each MangledName will have this appended after an
- // underscore (_). It is instantiated like Prototype.
- string MangledSuffix = mangled_suffix;
-
- // The different variants of the builtin, parameterised with a type.
- string TypeRange = type_range;
-
- // We use each type described in TypeRange and LMUL with prototype to
- // instantiate a specific element of the set of builtins being defined.
- // Prototype attribute defines the C/C++ prototype of the builtin. It is a
- // non-empty sequence of type transformers, the first of which is the return
- // type of the builtin and the rest are the parameters of the builtin, in
- // order. For instance if Prototype is "wvv", TypeRange is "si" and LMUL=1, a
- // first builtin will have type
- // __rvv_int32m2_t (__rvv_int16m1_t, __rvv_int16m1_t), and the second builtin
- // will have type __rvv_int64m2_t (__rvv_int32m1_t, __rvv_int32m1_t).
- string Prototype = prototype;
-
- // This builtin has a masked form.
- bit HasMask = true;
-
- // If HasMask, this flag states that this builtin has a maskedoff operand. It
- // is always the first operand in builtin and IR intrinsic.
- bit HasMaskedOffOperand = true;
-
- // This builtin has a granted vector length parameter in the last position.
- bit HasVL = true;
-
- // This builtin supports non-masked function overloading api.
- // All masked operations support overloading api.
- bit HasNoMaskedOverloaded = true;
-
- // Reads or writes "memory" or has other side-effects.
- bit HasSideEffects = false;
-
- // This builtin is valid for the given Log2LMULs.
- list<int> Log2LMUL = [0, 1, 2, 3, -1, -2, -3];
-
- // Manual code in clang codegen riscv_vector_builtin_cg.inc
- code ManualCodegen = [{}];
- code ManualCodegenMask = [{}];
-
- // When emit the automatic clang codegen, it describes what types we have to use
- // to obtain the specific LLVM intrinsic. -1 means the return type, otherwise,
- // k >= 0 meaning the k-th operand (counting from zero) of the codegen'd
- // parameter of the unmasked version. k can't be the mask operand's position.
- list<int> IntrinsicTypes = [];
-
- // If these names are not empty, this is the ID of the LLVM intrinsic
- // we want to lower to.
- string IRName = NAME;
-
- // If HasMask, this is the ID of the LLVM intrinsic we want to lower to.
- string IRNameMask = NAME #"_mask";
-
- // If non empty, this is the code emitted in the header, otherwise
- // an automatic definition in header is emitted.
- string HeaderCode = "";
-
- // Sub extension of vector spec. Currently only support Zvamo or Zvlsseg.
- string RequiredExtension = "";
-
- // Number of fields for Zvlsseg.
- int NF = 1;
-}
-
-//===----------------------------------------------------------------------===//
-// Basic classes with automatic codegen.
-//===----------------------------------------------------------------------===//
-
-class RVVOutBuiltin<string suffix, string prototype, string type_range>
- : RVVBuiltin<suffix, prototype, type_range> {
- let IntrinsicTypes = [-1];
-}
-
-class RVVOp0Builtin<string suffix, string prototype, string type_range>
- : RVVBuiltin<suffix, prototype, type_range> {
- let IntrinsicTypes = [0];
-}
-
-class RVVOutOp1Builtin<string suffix, string prototype, string type_range>
- : RVVBuiltin<suffix, prototype, type_range> {
- let IntrinsicTypes = [-1, 1];
-}
-
-class RVVOutOp0Op1Builtin<string suffix, string prototype, string type_range>
- : RVVBuiltin<suffix, prototype, type_range> {
- let IntrinsicTypes = [-1, 0, 1];
-}
-
-multiclass RVVBuiltinSet<string intrinsic_name, string type_range,
- list<list<string>> suffixes_prototypes,
- list<int> intrinsic_types> {
- let IRName = intrinsic_name, IRNameMask = intrinsic_name # "_mask",
- IntrinsicTypes = intrinsic_types in {
- foreach s_p = suffixes_prototypes in {
- let Name = NAME # "_" # s_p[0] in {
- defvar suffix = s_p[1];
- defvar prototype = s_p[2];
- def : RVVBuiltin<suffix, prototype, type_range>;
- }
- }
- }
-}
-
-// IntrinsicTypes is output, op0, op1 [-1, 0, 1]
-multiclass RVVOutOp0Op1BuiltinSet<string intrinsic_name, string type_range,
- list<list<string>> suffixes_prototypes>
- : RVVBuiltinSet<intrinsic_name, type_range, suffixes_prototypes,
- [-1, 0, 1]>;
-
-multiclass RVVOutBuiltinSet<string intrinsic_name, string type_range,
- list<list<string>> suffixes_prototypes>
- : RVVBuiltinSet<intrinsic_name, type_range, suffixes_prototypes, [-1]>;
-
-multiclass RVVOp0BuiltinSet<string intrinsic_name, string type_range,
- list<list<string>> suffixes_prototypes>
- : RVVBuiltinSet<intrinsic_name, type_range, suffixes_prototypes, [0]>;
-
-// IntrinsicTypes is output, op1 [-1, 1]
-multiclass RVVOutOp1BuiltinSet<string intrinsic_name, string type_range,
- list<list<string>> suffixes_prototypes>
- : RVVBuiltinSet<intrinsic_name, type_range, suffixes_prototypes, [-1, 1]>;
-
-multiclass RVVOp0Op1BuiltinSet<string intrinsic_name, string type_range,
- list<list<string>> suffixes_prototypes>
- : RVVBuiltinSet<intrinsic_name, type_range, suffixes_prototypes, [0, 1]>;
-
-multiclass RVVOutOp1Op2BuiltinSet<string intrinsic_name, string type_range,
- list<list<string>> suffixes_prototypes>
- : RVVBuiltinSet<intrinsic_name, type_range, suffixes_prototypes, [-1, 1, 2]>;
-
-multiclass RVVSignedBinBuiltinSet
- : RVVOutOp1BuiltinSet<NAME, "csil",
- [["vv", "v", "vvv"],
- ["vx", "v", "vve"]]>;
-
-multiclass RVVUnsignedBinBuiltinSet
- : RVVOutOp1BuiltinSet<NAME, "csil",
- [["vv", "Uv", "UvUvUv"],
- ["vx", "Uv", "UvUvUe"]]>;
-
-multiclass RVVIntBinBuiltinSet
- : RVVSignedBinBuiltinSet,
- RVVUnsignedBinBuiltinSet;
-
-multiclass RVVSlideOneBuiltinSet
- : RVVOutOp1BuiltinSet<NAME, "csil",
- [["vx", "v", "vve"],
- ["vx", "Uv", "UvUve"]]>;
-
-multiclass RVVSignedShiftBuiltinSet
- : RVVOutOp1BuiltinSet<NAME, "csil",
- [["vv", "v", "vvUv"],
- ["vx", "v", "vvz"]]>;
-
-multiclass RVVUnsignedShiftBuiltinSet
- : RVVOutOp1BuiltinSet<NAME, "csil",
- [["vv", "Uv", "UvUvUv"],
- ["vx", "Uv", "UvUvz"]]>;
-
-multiclass RVVShiftBuiltinSet
- : RVVSignedShiftBuiltinSet,
- RVVUnsignedShiftBuiltinSet;
-
-let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
- multiclass RVVSignedNShiftBuiltinSet
- : RVVOutOp0Op1BuiltinSet<NAME, "csil",
- [["wv", "v", "vwUv"],
- ["wx", "v", "vwz"]]>;
- multiclass RVVUnsignedNShiftBuiltinSet
- : RVVOutOp0Op1BuiltinSet<NAME, "csil",
- [["wv", "Uv", "UvUwUv"],
- ["wx", "Uv", "UvUwz"]]>;
-}
-
-multiclass RVVCarryinBuiltinSet
- : RVVOutOp1BuiltinSet<NAME, "csil",
- [["vvm", "v", "vvvm"],
- ["vxm", "v", "vvem"],
- ["vvm", "Uv", "UvUvUvm"],
- ["vxm", "Uv", "UvUvUem"]]>;
-
-multiclass RVVCarryOutInBuiltinSet<string intrinsic_name>
- : RVVOp0Op1BuiltinSet<intrinsic_name, "csil",
- [["vvm", "vm", "mvvm"],
- ["vxm", "vm", "mvem"],
- ["vvm", "Uvm", "mUvUvm"],
- ["vxm", "Uvm", "mUvUem"]]>;
-
-multiclass RVVSignedMaskOutBuiltinSet
- : RVVOp0Op1BuiltinSet<NAME, "csil",
- [["vv", "vm", "mvv"],
- ["vx", "vm", "mve"]]>;
-
-multiclass RVVUnsignedMaskOutBuiltinSet
- : RVVOp0Op1BuiltinSet<NAME, "csil",
- [["vv", "Uvm", "mUvUv"],
- ["vx", "Uvm", "mUvUe"]]>;
-
-multiclass RVVIntMaskOutBuiltinSet
- : RVVSignedMaskOutBuiltinSet,
- RVVUnsignedMaskOutBuiltinSet;
-
-class RVVIntExt<string intrinsic_name, string suffix, string prototype,
- string type_range>
- : RVVBuiltin<suffix, prototype, type_range> {
- let IRName = intrinsic_name;
- let IRNameMask = intrinsic_name # "_mask";
- let MangledName = NAME;
- let IntrinsicTypes = [-1, 0];
-}
-
-let HasMaskedOffOperand = false in {
- multiclass RVVIntTerBuiltinSet {
- defm "" : RVVOutOp1BuiltinSet<NAME, "csil",
- [["vv", "v", "vvvv"],
- ["vx", "v", "vvev"],
- ["vv", "Uv", "UvUvUvUv"],
- ["vx", "Uv", "UvUvUeUv"]]>;
- }
- multiclass RVVFloatingTerBuiltinSet {
- defm "" : RVVOutOp1BuiltinSet<NAME, "xfd",
- [["vv", "v", "vvvv"],
- ["vf", "v", "vvev"]]>;
- }
-}
-
-let HasMaskedOffOperand = false, Log2LMUL = [-2, -1, 0, 1, 2] in {
- multiclass RVVFloatingWidenTerBuiltinSet {
- defm "" : RVVOutOp1Op2BuiltinSet<NAME, "xf",
- [["vv", "w", "wwvv"],
- ["vf", "w", "wwev"]]>;
- }
-}
-
-multiclass RVVFloatingBinBuiltinSet
- : RVVOutOp1BuiltinSet<NAME, "xfd",
- [["vv", "v", "vvv"],
- ["vf", "v", "vve"]]>;
-
-multiclass RVVFloatingBinVFBuiltinSet
- : RVVOutOp1BuiltinSet<NAME, "xfd",
- [["vf", "v", "vve"]]>;
-
-multiclass RVVFloatingMaskOutBuiltinSet
- : RVVOp0Op1BuiltinSet<NAME, "xfd",
- [["vv", "vm", "mvv"],
- ["vf", "vm", "mve"]]>;
-
-multiclass RVVFloatingMaskOutVFBuiltinSet
- : RVVOp0Op1BuiltinSet<NAME, "fd",
- [["vf", "vm", "mve"]]>;
-
-class RVVMaskBinBuiltin : RVVOutBuiltin<"m", "mmm", "c"> {
- let Name = NAME # "_mm";
- let HasMask = false;
-}
-
-class RVVMaskUnaryBuiltin : RVVOutBuiltin<"m", "mm", "c"> {
- let Name = NAME # "_m";
-}
-
-class RVVMaskNullaryBuiltin : RVVOutBuiltin<"m", "m", "c"> {
- let Name = NAME # "_m";
- let HasMask = false;
- let HasNoMaskedOverloaded = false;
-}
-
-class RVVMaskOp0Builtin<string prototype> : RVVOp0Builtin<"m", prototype, "c"> {
- let Name = NAME # "_m";
- let HasMaskedOffOperand = false;
-}
-
-let HasMaskedOffOperand = false in {
- multiclass RVVSlideBuiltinSet {
- defm "" : RVVOutBuiltinSet<NAME, "csilxfd",
- [["vx","v", "vvvz"]]>;
- defm "" : RVVOutBuiltinSet<NAME, "csil",
- [["vx","Uv", "UvUvUvz"]]>;
- }
-}
-
-class RVVFloatingUnaryBuiltin<string builtin_suffix, string ir_suffix,
- string prototype>
- : RVVOutBuiltin<ir_suffix, prototype, "xfd"> {
- let Name = NAME # "_" # builtin_suffix;
-}
-
-class RVVFloatingUnaryVVBuiltin : RVVFloatingUnaryBuiltin<"v", "v", "vv">;
-
-class RVVConvBuiltin<string suffix, string prototype, string type_range,
- string mangled_name>
- : RVVBuiltin<suffix, prototype, type_range> {
- let IntrinsicTypes = [-1, 0];
- let MangledName = mangled_name;
-}
-
-class RVVConvToSignedBuiltin<string mangled_name>
- : RVVConvBuiltin<"Iv", "Ivv", "xfd", mangled_name>;
-
-class RVVConvToUnsignedBuiltin<string mangled_name>
- : RVVConvBuiltin<"Uv", "Uvv", "xfd", mangled_name>;
-
-class RVVConvToWidenSignedBuiltin<string mangled_name>
- : RVVConvBuiltin<"Iw", "Iwv", "xf", mangled_name>;
-
-class RVVConvToWidenUnsignedBuiltin<string mangled_name>
- : RVVConvBuiltin<"Uw", "Uwv", "xf", mangled_name>;
-
-class RVVConvToNarrowingSignedBuiltin<string mangled_name>
- : RVVConvBuiltin<"Iv", "IvFw", "csi", mangled_name>;
-
-class RVVConvToNarrowingUnsignedBuiltin<string mangled_name>
- : RVVConvBuiltin<"Uv", "UvFw", "csi", mangled_name>;
-
-let HasMaskedOffOperand = false in {
- multiclass RVVSignedReductionBuiltin {
- defm "" : RVVOutOp1BuiltinSet<NAME, "csil",
- [["vs", "vSv", "SvSvvSv"]]>;
- }
- multiclass RVVUnsignedReductionBuiltin {
- defm "" : RVVOutOp1BuiltinSet<NAME, "csil",
- [["vs", "UvUSv", "USvUSvUvUSv"]]>;
- }
- multiclass RVVFloatingReductionBuiltin {
- defm "" : RVVOutOp1BuiltinSet<NAME, "xfd",
- [["vs", "vSv", "SvSvvSv"]]>;
- }
- multiclass RVVFloatingWidenReductionBuiltin {
- defm "" : RVVOutOp1BuiltinSet<NAME, "xf",
- [["vs", "vSw", "SwSwvSw"]]>;
- }
-}
-
-multiclass RVVIntReductionBuiltinSet
- : RVVSignedReductionBuiltin,
- RVVUnsignedReductionBuiltin;
-
-// For widen operation which has different mangling name.
-multiclass RVVWidenBuiltinSet<string intrinsic_name, string type_range,
- list<list<string>> suffixes_prototypes> {
- let Log2LMUL = [-3, -2, -1, 0, 1, 2],
- IRName = intrinsic_name, IRNameMask = intrinsic_name # "_mask" in {
- foreach s_p = suffixes_prototypes in {
- let Name = NAME # "_" # s_p[0],
- MangledName = NAME # "_" # s_p[0] in {
- defvar suffix = s_p[1];
- defvar prototype = s_p[2];
- def : RVVOutOp0Op1Builtin<suffix, prototype, type_range>;
- }
- }
- }
-}
-
-// For widen operation with widen operand which has different mangling name.
-multiclass RVVWidenWOp0BuiltinSet<string intrinsic_name, string type_range,
- list<list<string>> suffixes_prototypes> {
- let Log2LMUL = [-3, -2, -1, 0, 1, 2],
- IRName = intrinsic_name, IRNameMask = intrinsic_name # "_mask" in {
- foreach s_p = suffixes_prototypes in {
- let Name = NAME # "_" # s_p[0],
- MangledName = NAME # "_" # s_p[0] in {
- defvar suffix = s_p[1];
- defvar prototype = s_p[2];
- def : RVVOutOp1Builtin<suffix, prototype, type_range>;
- }
- }
- }
-}
-
-multiclass RVVSignedWidenBinBuiltinSet
- : RVVWidenBuiltinSet<NAME, "csi",
- [["vv", "w", "wvv"],
- ["vx", "w", "wve"]]>;
-
-multiclass RVVSignedWidenOp0BinBuiltinSet
- : RVVWidenWOp0BuiltinSet<NAME # "_w", "csi",
- [["wv", "w", "wwv"],
- ["wx", "w", "wwe"]]>;
-
-multiclass RVVUnsignedWidenBinBuiltinSet
- : RVVWidenBuiltinSet<NAME, "csi",
- [["vv", "Uw", "UwUvUv"],
- ["vx", "Uw", "UwUvUe"]]>;
-
-multiclass RVVUnsignedWidenOp0BinBuiltinSet
- : RVVWidenWOp0BuiltinSet<NAME # "_w", "csi",
- [["wv", "Uw", "UwUwUv"],
- ["wx", "Uw", "UwUwUe"]]>;
-
-multiclass RVVFloatingWidenBinBuiltinSet
- : RVVWidenBuiltinSet<NAME, "xf",
- [["vv", "w", "wvv"],
- ["vf", "w", "wve"]]>;
-
-multiclass RVVFloatingWidenOp0BinBuiltinSet
- : RVVWidenWOp0BuiltinSet<NAME # "_w", "xf",
- [["wv", "w", "wwv"],
- ["wf", "w", "wwe"]]>;
+include "riscv_vector_common.td"
defvar TypeList = ["c","s","i","l","x","f","d"];
defvar EEWList = [["8", "(Log2EEW:3)"],
@@ -556,30 +24,25 @@ class IsFloat<string type> {
bit val = !or(!eq(type, "x"), !eq(type, "f"), !eq(type, "d"));
}
-let HasNoMaskedOverloaded = false,
- ManualCodegen = [{
- IntrinsicTypes = {ResultType, Ops[1]->getType()};
- Ops[0] = Builder.CreateBitCast(Ops[0], ResultType->getPointerTo());
- }],
- ManualCodegenMask= [{
- // Move mask to right before vl.
- std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
- IntrinsicTypes = {ResultType, Ops[3]->getType()};
- Ops[1] = Builder.CreateBitCast(Ops[1], ResultType->getPointerTo());
- }] in {
- class RVVVLEMaskBuiltin : RVVBuiltin<"m", "mPCUe", "c"> {
- let Name = "vle1_v";
- let IRName = "vle1";
- let HasMask = false;
+let SupportOverloading = false,
+ MaskedPolicyScheme = NonePolicy in {
+ class RVVVLEMaskBuiltin : RVVOutBuiltin<"m", "mPCUe", "c"> {
+ let Name = "vlm_v";
+ let IRName = "vlm";
+ let HasMasked = false;
}
+}
+
+let SupportOverloading = false,
+ UnMaskedPolicyScheme = HasPassthruOperand in {
multiclass RVVVLEBuiltin<list<string> types> {
let Name = NAME # "_v",
IRName = "vle",
- IRNameMask ="vle_mask" in {
+ MaskedIRName ="vle_mask" in {
foreach type = types in {
- def : RVVBuiltin<"v", "vPCe", type>;
+ def : RVVOutBuiltin<"v", "vPCe", type>;
if !not(IsFloat<type>.val) then {
- def : RVVBuiltin<"Uv", "UvPCUe", type>;
+ def : RVVOutBuiltin<"Uv", "UvPCUe", type>;
}
}
}
@@ -589,41 +52,36 @@ let HasNoMaskedOverloaded = false,
multiclass RVVVLEFFBuiltin<list<string> types> {
let Name = NAME # "_v",
IRName = "vleff",
- IRNameMask = "vleff_mask",
- HasNoMaskedOverloaded = false,
+ MaskedIRName = "vleff_mask",
+ SupportOverloading = false,
+ UnMaskedPolicyScheme = HasPassthruOperand,
ManualCodegen = [{
{
- IntrinsicTypes = {ResultType, Ops[2]->getType()};
- Ops[0] = Builder.CreateBitCast(Ops[0], ResultType->getPointerTo());
- Value *NewVL = Ops[1];
- Ops.erase(Ops.begin() + 1);
- llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
- llvm::Value *LoadValue = Builder.CreateCall(F, Ops, "");
- llvm::Value *V = Builder.CreateExtractValue(LoadValue, {0});
- // Store new_vl.
- clang::CharUnits Align =
- CGM.getNaturalTypeAlignment(getContext().getSizeType());
- Builder.CreateStore(Builder.CreateExtractValue(LoadValue, {1}),
- Address(NewVL, Align));
- return V;
- }
- }],
- ManualCodegenMask = [{
- {
- // Move mask to right before vl.
- std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
- IntrinsicTypes = {ResultType, Ops[4]->getType()};
- Ops[1] = Builder.CreateBitCast(Ops[1], ResultType->getPointerTo());
+ if (IsMasked) {
+ // Move mask to right before vl.
+ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
+ if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA))
+ Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
+ Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+ IntrinsicTypes = {ResultType, Ops[4]->getType()};
+ } else {
+ if (PolicyAttrs & RVV_VTA)
+ Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
+ IntrinsicTypes = {ResultType, Ops[3]->getType()};
+ }
Value *NewVL = Ops[2];
Ops.erase(Ops.begin() + 2);
llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
llvm::Value *LoadValue = Builder.CreateCall(F, Ops, "");
llvm::Value *V = Builder.CreateExtractValue(LoadValue, {0});
// Store new_vl.
- clang::CharUnits Align =
- CGM.getNaturalTypeAlignment(getContext().getSizeType());
- Builder.CreateStore(Builder.CreateExtractValue(LoadValue, {1}),
- Address(NewVL, Align));
+ clang::CharUnits Align;
+ if (IsMasked)
+ Align = CGM.getNaturalPointeeTypeAlignment(E->getArg(E->getNumArgs()-2)->getType());
+ else
+ Align = CGM.getNaturalPointeeTypeAlignment(E->getArg(1)->getType());
+ llvm::Value *Val = Builder.CreateExtractValue(LoadValue, {1});
+ Builder.CreateStore(Val, Address(NewVL, Val->getType(), Align));
return V;
}
}] in {
@@ -640,75 +98,71 @@ multiclass RVVVLEFFBuiltin<list<string> types> {
multiclass RVVVLSEBuiltin<list<string> types> {
let Name = NAME # "_v",
IRName = "vlse",
- IRNameMask ="vlse_mask",
- HasNoMaskedOverloaded = false,
- ManualCodegen = [{
- IntrinsicTypes = {ResultType, Ops[2]->getType()};
- Ops[0] = Builder.CreateBitCast(Ops[0], ResultType->getPointerTo());
- }],
- ManualCodegenMask= [{
- // Move mask to right before vl.
- std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
- IntrinsicTypes = {ResultType, Ops[4]->getType()};
- Ops[1] = Builder.CreateBitCast(Ops[1], ResultType->getPointerTo());
- }] in {
+ MaskedIRName ="vlse_mask",
+ SupportOverloading = false,
+ UnMaskedPolicyScheme = HasPassthruOperand in {
foreach type = types in {
- def : RVVBuiltin<"v", "vPCet", type>;
+ def : RVVOutBuiltin<"v", "vPCet", type>;
if !not(IsFloat<type>.val) then {
- def : RVVBuiltin<"Uv", "UvPCUet", type>;
+ def : RVVOutBuiltin<"Uv", "UvPCUet", type>;
}
}
}
}
multiclass RVVIndexedLoad<string op> {
- let ManualCodegen = [{
- IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[2]->getType()};
- Ops[0] = Builder.CreateBitCast(Ops[0], ResultType->getPointerTo());
- }],
- ManualCodegenMask = [{
- // Move mask to right before vl.
- std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
- IntrinsicTypes = {ResultType, Ops[2]->getType(), Ops[4]->getType()};
- Ops[1] = Builder.CreateBitCast(Ops[1], ResultType->getPointerTo());
- }] in {
- foreach type = TypeList in {
- foreach eew_list = EEWList in {
- defvar eew = eew_list[0];
- defvar eew_type = eew_list[1];
- let Name = op # eew # "_v", IRName = op, IRNameMask = op # "_mask" in {
- def: RVVBuiltin<"v", "vPCe" # eew_type # "Uv", type>;
- if !not(IsFloat<type>.val) then {
- def: RVVBuiltin<"Uv", "UvPCUe" # eew_type # "Uv", type>;
- }
- }
+ let UnMaskedPolicyScheme = HasPassthruOperand in {
+ foreach type = TypeList in {
+ foreach eew_list = EEWList[0-2] in {
+ defvar eew = eew_list[0];
+ defvar eew_type = eew_list[1];
+ let Name = op # eew # "_v", IRName = op, MaskedIRName = op # "_mask",
+ RequiredFeatures = !if(!eq(type, "x"), ["Zvfhmin"],
+ []<string>) in {
+ def: RVVOutOp1Builtin<"v", "vPCe" # eew_type # "Uv", type>;
+ if !not(IsFloat<type>.val) then {
+ def: RVVOutOp1Builtin<"Uv", "UvPCUe" # eew_type # "Uv", type>;
+ }
}
}
+ defvar eew64 = "64";
+ defvar eew64_type = "(Log2EEW:6)";
+ let Name = op # eew64 # "_v", IRName = op, MaskedIRName = op # "_mask",
+ RequiredFeatures = !if(!eq(type, "x"), ["Zvfhmin", "RV64"],
+ ["RV64"]) in {
+ def: RVVOutOp1Builtin<"v", "vPCe" # eew64_type # "Uv", type>;
+ if !not(IsFloat<type>.val) then {
+ def: RVVOutOp1Builtin<"Uv", "UvPCUe" # eew64_type # "Uv", type>;
+ }
+ }
+ }
}
}
let HasMaskedOffOperand = false,
+ MaskedPolicyScheme = NonePolicy,
ManualCodegen = [{
- // Builtin: (ptr, value, vl). Intrinsic: (value, ptr, vl)
- std::swap(Ops[0], Ops[1]);
- Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType()->getPointerTo());
- IntrinsicTypes = {Ops[0]->getType(), Ops[2]->getType()};
- }],
- ManualCodegenMask= [{
- // Builtin: (mask, ptr, value, vl). Intrinsic: (value, ptr, mask, vl)
- std::swap(Ops[0], Ops[2]);
- Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType()->getPointerTo());
- IntrinsicTypes = {Ops[0]->getType(), Ops[3]->getType()};
+ if (IsMasked) {
+ // Builtin: (mask, ptr, value, vl). Intrinsic: (value, ptr, mask, vl)
+ std::swap(Ops[0], Ops[2]);
+ } else {
+ // Builtin: (ptr, value, vl). Intrinsic: (value, ptr, vl)
+ std::swap(Ops[0], Ops[1]);
+ }
+ if (IsMasked)
+ IntrinsicTypes = {Ops[0]->getType(), Ops[3]->getType()};
+ else
+ IntrinsicTypes = {Ops[0]->getType(), Ops[2]->getType()};
}] in {
class RVVVSEMaskBuiltin : RVVBuiltin<"m", "0PUem", "c"> {
- let Name = "vse1_v";
- let IRName = "vse1";
- let HasMask = false;
+ let Name = "vsm_v";
+ let IRName = "vsm";
+ let HasMasked = false;
}
multiclass RVVVSEBuiltin<list<string> types> {
let Name = NAME # "_v",
IRName = "vse",
- IRNameMask = "vse_mask" in {
+ MaskedIRName = "vse_mask" in {
foreach type = types in {
def : RVVBuiltin<"v", "0Pev", type>;
if !not(IsFloat<type>.val) then {
@@ -722,19 +176,21 @@ let HasMaskedOffOperand = false,
multiclass RVVVSSEBuiltin<list<string> types> {
let Name = NAME # "_v",
IRName = "vsse",
- IRNameMask = "vsse_mask",
+ MaskedIRName = "vsse_mask",
HasMaskedOffOperand = false,
+ MaskedPolicyScheme = NonePolicy,
ManualCodegen = [{
- // Builtin: (ptr, stride, value, vl). Intrinsic: (value, ptr, stride, vl)
- std::rotate(Ops.begin(), Ops.begin() + 2, Ops.begin() + 3);
- Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType()->getPointerTo());
- IntrinsicTypes = {Ops[0]->getType(), Ops[3]->getType()};
- }],
- ManualCodegenMask= [{
- // Builtin: (mask, ptr, stride, value, vl). Intrinsic: (value, ptr, stride, mask, vl)
- std::swap(Ops[0], Ops[3]);
- Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType()->getPointerTo());
- IntrinsicTypes = {Ops[0]->getType(), Ops[4]->getType()};
+ if (IsMasked) {
+ // Builtin: (mask, ptr, stride, value, vl). Intrinsic: (value, ptr, stride, mask, vl)
+ std::swap(Ops[0], Ops[3]);
+ } else {
+ // Builtin: (ptr, stride, value, vl). Intrinsic: (value, ptr, stride, vl)
+ std::rotate(Ops.begin(), Ops.begin() + 2, Ops.begin() + 3);
+ }
+ if (IsMasked)
+ IntrinsicTypes = {Ops[0]->getType(), Ops[4]->getType()};
+ else
+ IntrinsicTypes = {Ops[0]->getType(), Ops[3]->getType()};
}] in {
foreach type = types in {
def : RVVBuiltin<"v", "0Petv", type>;
@@ -747,34 +203,122 @@ multiclass RVVVSSEBuiltin<list<string> types> {
multiclass RVVIndexedStore<string op> {
let HasMaskedOffOperand = false,
+ MaskedPolicyScheme = NonePolicy,
ManualCodegen = [{
- // Builtin: (ptr, index, value, vl). Intrinsic: (value, ptr, index, vl)
- std::rotate(Ops.begin(), Ops.begin() + 2, Ops.begin() + 3);
- Ops[1] = Builder.CreateBitCast(Ops[1],Ops[0]->getType()->getPointerTo());
- IntrinsicTypes = {Ops[0]->getType(), Ops[2]->getType(), Ops[3]->getType()};
- }],
- ManualCodegenMask= [{
- // Builtin: (mask, ptr, index, value, vl). Intrinsic: (value, ptr, index, mask, vl)
- std::swap(Ops[0], Ops[3]);
- Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType()->getPointerTo());
- IntrinsicTypes = {Ops[0]->getType(), Ops[2]->getType(), Ops[4]->getType()};
+ if (IsMasked) {
+ // Builtin: (mask, ptr, index, value, vl). Intrinsic: (value, ptr, index, mask, vl)
+ std::swap(Ops[0], Ops[3]);
+ } else {
+ // Builtin: (ptr, index, value, vl). Intrinsic: (value, ptr, index, vl)
+ std::rotate(Ops.begin(), Ops.begin() + 2, Ops.begin() + 3);
+ }
+ if (IsMasked)
+ IntrinsicTypes = {Ops[0]->getType(), Ops[2]->getType(), Ops[4]->getType()};
+ else
+ IntrinsicTypes = {Ops[0]->getType(), Ops[2]->getType(), Ops[3]->getType()};
}] in {
foreach type = TypeList in {
- foreach eew_list = EEWList in {
+ foreach eew_list = EEWList[0-2] in {
defvar eew = eew_list[0];
defvar eew_type = eew_list[1];
- let Name = op # eew # "_v", IRName = op, IRNameMask = op # "_mask" in {
+ let Name = op # eew # "_v", IRName = op, MaskedIRName = op # "_mask",
+ RequiredFeatures = !if(!eq(type, "x"), ["Zvfhmin"],
+ []<string>) in {
def : RVVBuiltin<"v", "0Pe" # eew_type # "Uvv", type>;
if !not(IsFloat<type>.val) then {
def : RVVBuiltin<"Uv", "0PUe" # eew_type # "UvUv", type>;
}
}
}
+ defvar eew64 = "64";
+ defvar eew64_type = "(Log2EEW:6)";
+ let Name = op # eew64 # "_v", IRName = op, MaskedIRName = op # "_mask",
+ RequiredFeatures = !if(!eq(type, "x"), ["Zvfhmin", "RV64"],
+ ["RV64"]) in {
+ def : RVVBuiltin<"v", "0Pe" # eew64_type # "Uvv", type>;
+ if !not(IsFloat<type>.val) then {
+ def : RVVBuiltin<"Uv", "0PUe" # eew64_type # "UvUv", type>;
+ }
+ }
}
}
}
defvar NFList = [2, 3, 4, 5, 6, 7, 8];
+/*
+A segment load builtin has different variants.
+
+Therefore a segment unit-stride load builtin can have 4 variants,
+1. When unmasked and the policies are all specified as agnostic:
+(Address0, ..., Address{NF - 1}, Ptr, VL)
+2. When masked and the policies are all specified as agnostic:
+(Address0, ..., Address{NF - 1}, Mask, Ptr, VL)
+3. When unmasked and one of the policies is specified as undisturbed:
+(Address0, ..., Address{NF - 1}, Maskedoff0, ..., Maskedoff{NF - 1},
+ Ptr, VL)
+4. When masked and one of the policies is specified as undisturbed:
+(Address0, ..., Address{NF - 1}, Mask, Maskedoff0, ..., Maskedoff{NF - 1},
+ Ptr, VL)
+
+Other variants of segment load builtin share the same structure, but they
+have their own extra parameter.
+
+The segment unit-stride fault-only-first load builtin has a 'NewVL'
+operand after the 'Ptr' operand.
+1. When unmasked and the policies are all specified as agnostic:
+(Address0, ..., Address{NF - 1}, Ptr, NewVL, VL)
+2. When masked and the policies are all specified as agnostic:
+(Address0, ..., Address{NF - 1}, Mask, Ptr, NewVL, VL)
+3. When unmasked and one of the policies is specified as undisturbed:
+(Address0, ..., Address{NF - 1}, Maskedoff0, ..., Maskedoff{NF - 1},
+ Ptr, NewVL, VL)
+4. When masked and one of the policies is specified as undisturbed:
+(Address0, ..., Address{NF - 1}, Mask, Maskedoff0, ..., Maskedoff{NF - 1},
+ Ptr, NewVL, VL)
+
+The segment strided load builtin has a 'Stride' operand after the 'Ptr'
+operand.
+1. When unmasked and the policies are all specified as agnostic:
+(Address0, ..., Address{NF - 1}, Ptr, Stride, VL)
+2. When masked and the policies are all specified as agnostic:
+(Address0, ..., Address{NF - 1}, Mask, Ptr, Stride, VL)
+3. When unmasked and one of the policies is specified as undisturbed:
+(Address0, ..., Address{NF - 1}, Maskedoff0, ..., Maskedoff{NF - 1},
+ Ptr, Stride, VL)
+4. When masked and one of the policies is specified as undisturbed:
+(Address0, ..., Address{NF - 1}, Mask, Maskedoff0, ..., Maskedoff{NF - 1},
+ Ptr, Stride, VL)
+
+The segment indexed load builtin has a 'Idx' operand after the 'Ptr' operand.
+1. When unmasked and the policies are all specified as agnostic:
+(Address0, ..., Address{NF - 1}, Ptr, Idx, VL)
+2. When masked and the policies are all specified as agnostic:
+(Address0, ..., Address{NF - 1}, Mask, Ptr, Idx, VL)
+3. When unmasked and one of the policies is specified as undisturbed:
+(Address0, ..., Address{NF - 1}, Maskedoff0, ..., Maskedoff{NF - 1},
+ Ptr, Idx, VL)
+4. When masked and one of the policies is specified as undisturbed:
+(Address0, ..., Address{NF - 1}, Mask, Maskedoff0, ..., Maskedoff{NF - 1},
+ Ptr, Idx, VL)
+
+Segment load intrinsics has different variants similar to their builtins.
+
+Segment unit-stride load intrinsic,
+ Masked: (Vector0, ..., Vector{NF - 1}, Ptr, Mask, VL, Policy)
+ Unmasked: (Vector0, ..., Vector{NF - 1}, Ptr, VL)
+Segment unit-stride fault-only-first load intrinsic,
+ Masked: (Vector0, ..., Vector{NF - 1}, Ptr, Mask, VL, Policy)
+ Unmasked: (Vector0, ..., Vector{NF - 1}, Ptr, VL)
+Segment strided load intrinsic,
+ Masked: (Vector0, ..., Vector{NF - 1}, Ptr, Stride, Mask, VL, Policy)
+ Unmasked: (Vector0, ..., Vector{NF - 1}, Ptr, Stride, VL)
+Segment indexed load intrinsic,
+ Masked: (Vector0, ..., Vector{NF - 1}, Ptr, Index, Mask, VL, Policy)
+ Unmasked: (Vector0, ..., Vector{NF - 1}, Ptr, Index, VL)
+
+The Vector(s) is poison when the policy behavior allows us to not care
+about any masked-off elements.
+*/
class PVString<int nf, bit signed> {
string S =
@@ -787,7 +331,400 @@ class PVString<int nf, bit signed> {
!eq(nf, 8): !if(signed, "PvPvPvPvPvPvPvPv", "PUvPUvPUvPUvPUvPUvPUvPUv"));
}
-multiclass RVVUnitStridedSegLoad<string op> {
+class VString<int nf, bit signed> {
+ string S = !cond(!eq(nf, 2): !if(signed, "vv", "UvUv"),
+ !eq(nf, 3): !if(signed, "vvv", "UvUvUv"),
+ !eq(nf, 4): !if(signed, "vvvv", "UvUvUvUv"),
+ !eq(nf, 5): !if(signed, "vvvvv", "UvUvUvUvUv"),
+ !eq(nf, 6): !if(signed, "vvvvvv", "UvUvUvUvUvUv"),
+ !eq(nf, 7): !if(signed, "vvvvvvv", "UvUvUvUvUvUvUv"),
+ !eq(nf, 8): !if(signed, "vvvvvvvv", "UvUvUvUvUvUvUvUv"));
+}
+
+
+class FixedVString<int fixed_lmul, int num, string vec> {
+ string V = "(LFixedLog2LMUL:" # fixed_lmul # ")" # vec;
+ string S = !interleave(!listsplat(V, num), "");
+}
+
+multiclass RVVNonTupleVCreateBuiltin<int dst_lmul, list<int> src_lmul_list> {
+ defvar dst_v = FixedVString<dst_lmul, 1, "v">.V;
+ defvar dst_uv = FixedVString<dst_lmul, 1, "Uv">.V;
+ foreach src_lmul = src_lmul_list in {
+ defvar num = !shl(1, !sub(dst_lmul, src_lmul));
+
+ defvar src_v = FixedVString<src_lmul, num, "v">.V;
+ defvar src_s = FixedVString<src_lmul, num, "v">.S;
+ def vcreate # src_v # dst_v : RVVBuiltin<src_v # dst_v,
+ dst_v # src_s,
+ "csilxfd", dst_v>;
+
+ defvar src_uv = FixedVString<src_lmul, num, "Uv">.V;
+ defvar src_us = FixedVString<src_lmul, num, "Uv">.S;
+ def vcreate_u # src_uv # dst_uv : RVVBuiltin<src_uv # dst_uv,
+ dst_uv # src_us,
+ "csil", dst_uv>;
+ }
+}
+
+multiclass RVVPseudoUnaryBuiltin<string IR, string type_range> {
+ let Name = NAME,
+ IRName = IR,
+ MaskedIRName = IR # "_mask",
+ UnMaskedPolicyScheme = HasPassthruOperand,
+ ManualCodegen = [{
+ {
+ if (IsMasked) {
+ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
+ if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA))
+ Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
+ } else {
+ if (PolicyAttrs & RVV_VTA)
+ Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
+ }
+ auto ElemTy = cast<llvm::VectorType>(ResultType)->getElementType();
+ Ops.insert(Ops.begin() + 2, llvm::Constant::getNullValue(ElemTy));
+
+ if (IsMasked) {
+ Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+ // maskedoff, op1, op2, mask, vl, policy
+ IntrinsicTypes = {ResultType, ElemTy, Ops[4]->getType()};
+ } else {
+ // passthru, op1, op2, vl
+ IntrinsicTypes = {ResultType, ElemTy, Ops[3]->getType()};
+ }
+ break;
+ }
+ }] in {
+ def : RVVBuiltin<"v", "vv", type_range>;
+ }
+}
+
+multiclass RVVPseudoVNotBuiltin<string IR, string type_range> {
+ let Name = NAME,
+ IRName = IR,
+ MaskedIRName = IR # "_mask",
+ UnMaskedPolicyScheme = HasPassthruOperand,
+ ManualCodegen = [{
+ {
+ if (IsMasked) {
+ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
+ if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA))
+ Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
+ } else {
+ if (PolicyAttrs & RVV_VTA)
+ Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
+ }
+ auto ElemTy = cast<llvm::VectorType>(ResultType)->getElementType();
+ Ops.insert(Ops.begin() + 2,
+ llvm::Constant::getAllOnesValue(ElemTy));
+ if (IsMasked) {
+ Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+ // maskedoff, op1, po2, mask, vl, policy
+ IntrinsicTypes = {ResultType,
+ ElemTy,
+ Ops[4]->getType()};
+ } else {
+ // passthru, op1, op2, vl
+ IntrinsicTypes = {ResultType,
+ ElemTy,
+ Ops[3]->getType()};
+ }
+ break;
+ }
+ }] in {
+ def : RVVBuiltin<"v", "vv", type_range>;
+ def : RVVBuiltin<"Uv", "UvUv", type_range>;
+ }
+}
+
+multiclass RVVPseudoMaskBuiltin<string IR, string type_range> {
+ let Name = NAME,
+ IRName = IR,
+ HasMasked = false,
+ ManualCodegen = [{
+ {
+ // op1, vl
+ IntrinsicTypes = {ResultType,
+ Ops[1]->getType()};
+ Ops.insert(Ops.begin() + 1, Ops[0]);
+ break;
+ }
+ }] in {
+ def : RVVBuiltin<"m", "mm", type_range>;
+ }
+}
+
+multiclass RVVPseudoVFUnaryBuiltin<string IR, string type_range> {
+ let Name = NAME,
+ IRName = IR,
+ MaskedIRName = IR # "_mask",
+ UnMaskedPolicyScheme = HasPassthruOperand,
+ ManualCodegen = [{
+ {
+ if (IsMasked) {
+ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
+ if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA))
+ Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
+ Ops.insert(Ops.begin() + 2, Ops[1]);
+ Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+ // maskedoff, op1, op2, mask, vl
+ IntrinsicTypes = {ResultType,
+ Ops[2]->getType(),
+ Ops.back()->getType()};
+ } else {
+ if (PolicyAttrs & RVV_VTA)
+ Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
+ // op1, po2, vl
+ IntrinsicTypes = {ResultType,
+ Ops[1]->getType(), Ops[2]->getType()};
+ Ops.insert(Ops.begin() + 2, Ops[1]);
+ break;
+ }
+ break;
+ }
+ }] in {
+ def : RVVBuiltin<"v", "vv", type_range>;
+ }
+}
+
+multiclass RVVPseudoVWCVTBuiltin<string IR, string MName, string type_range,
+ list<list<string>> suffixes_prototypes> {
+ let Name = NAME,
+ OverloadedName = MName,
+ IRName = IR,
+ MaskedIRName = IR # "_mask",
+ UnMaskedPolicyScheme = HasPassthruOperand,
+ ManualCodegen = [{
+ {
+ if (IsMasked) {
+ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
+ if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA))
+ Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
+ } else {
+ if (PolicyAttrs & RVV_VTA)
+ Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
+ }
+ auto ElemTy = cast<llvm::VectorType>(ResultType)->getElementType();
+ Ops.insert(Ops.begin() + 2, llvm::Constant::getNullValue(ElemTy));
+ if (IsMasked) {
+ Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+ // maskedoff, op1, op2, mask, vl, policy
+ IntrinsicTypes = {ResultType,
+ Ops[1]->getType(),
+ ElemTy,
+ Ops[4]->getType()};
+ } else {
+ // passtru, op1, op2, vl
+ IntrinsicTypes = {ResultType,
+ Ops[1]->getType(),
+ ElemTy,
+ Ops[3]->getType()};
+ }
+ break;
+ }
+ }] in {
+ foreach s_p = suffixes_prototypes in {
+ def : RVVBuiltin<s_p[0], s_p[1], type_range>;
+ }
+ }
+}
+
+multiclass RVVPseudoVNCVTBuiltin<string IR, string MName, string type_range,
+ list<list<string>> suffixes_prototypes> {
+ let Name = NAME,
+ OverloadedName = MName,
+ IRName = IR,
+ MaskedIRName = IR # "_mask",
+ UnMaskedPolicyScheme = HasPassthruOperand,
+ ManualCodegen = [{
+ {
+ if (IsMasked) {
+ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
+ if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA))
+ Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
+ } else {
+ if (PolicyAttrs & RVV_VTA)
+ Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
+ }
+ Ops.insert(Ops.begin() + 2, llvm::Constant::getNullValue(Ops.back()->getType()));
+ if (IsMasked) {
+ Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+ // maskedoff, op1, xlen, mask, vl
+ IntrinsicTypes = {ResultType,
+ Ops[1]->getType(),
+ Ops[4]->getType(),
+ Ops[4]->getType()};
+ } else {
+ // passthru, op1, xlen, vl
+ IntrinsicTypes = {ResultType,
+ Ops[1]->getType(),
+ Ops[3]->getType(),
+ Ops[3]->getType()};
+ }
+ break;
+ }
+ }] in {
+ foreach s_p = suffixes_prototypes in {
+ def : RVVBuiltin<s_p[0], s_p[1], type_range>;
+ }
+ }
+}
+
+let HeaderCode =
+[{
+#define __riscv_vlenb() __builtin_rvv_vlenb()
+}] in
+def vlenb_macro: RVVHeader;
+
+let HasBuiltinAlias = false, HasVL = false, HasMasked = false,
+ UnMaskedPolicyScheme = NonePolicy, MaskedPolicyScheme = NonePolicy,
+ Log2LMUL = [0], IRName = "",
+ ManualCodegen = [{
+ {
+ LLVMContext &Context = CGM.getLLVMContext();
+ llvm::MDBuilder MDHelper(Context);
+
+ llvm::Metadata *Ops[] = {llvm::MDString::get(Context, "vlenb")};
+ llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
+ llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
+ llvm::Function *F =
+ CGM.getIntrinsic(llvm::Intrinsic::read_register, {SizeTy});
+ return Builder.CreateCall(F, Metadata);
+ }
+ }] in
+{
+ def vlenb : RVVBuiltin<"", "u", "i">;
+}
+
+// 6. Configuration-Setting Instructions
+// 6.1. vsetvli/vsetvl instructions
+
+// vsetvl/vsetvlmax are a macro because they require constant integers in SEW
+// and LMUL.
+let HeaderCode =
+[{
+#define __riscv_vsetvl_e8mf4(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 6)
+#define __riscv_vsetvl_e8mf2(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 7)
+#define __riscv_vsetvl_e8m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 0)
+#define __riscv_vsetvl_e8m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 1)
+#define __riscv_vsetvl_e8m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 2)
+#define __riscv_vsetvl_e8m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 3)
+
+#define __riscv_vsetvl_e16mf2(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 7)
+#define __riscv_vsetvl_e16m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 0)
+#define __riscv_vsetvl_e16m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 1)
+#define __riscv_vsetvl_e16m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 2)
+#define __riscv_vsetvl_e16m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 3)
+
+#define __riscv_vsetvl_e32m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 0)
+#define __riscv_vsetvl_e32m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 1)
+#define __riscv_vsetvl_e32m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 2)
+#define __riscv_vsetvl_e32m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 3)
+
+#if __riscv_v_elen >= 64
+#define __riscv_vsetvl_e8mf8(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 5)
+#define __riscv_vsetvl_e16mf4(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 6)
+#define __riscv_vsetvl_e32mf2(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 7)
+
+#define __riscv_vsetvl_e64m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 0)
+#define __riscv_vsetvl_e64m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 1)
+#define __riscv_vsetvl_e64m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 2)
+#define __riscv_vsetvl_e64m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 3)
+#endif
+
+#define __riscv_vsetvlmax_e8mf4() __builtin_rvv_vsetvlimax(0, 6)
+#define __riscv_vsetvlmax_e8mf2() __builtin_rvv_vsetvlimax(0, 7)
+#define __riscv_vsetvlmax_e8m1() __builtin_rvv_vsetvlimax(0, 0)
+#define __riscv_vsetvlmax_e8m2() __builtin_rvv_vsetvlimax(0, 1)
+#define __riscv_vsetvlmax_e8m4() __builtin_rvv_vsetvlimax(0, 2)
+#define __riscv_vsetvlmax_e8m8() __builtin_rvv_vsetvlimax(0, 3)
+
+#define __riscv_vsetvlmax_e16mf2() __builtin_rvv_vsetvlimax(1, 7)
+#define __riscv_vsetvlmax_e16m1() __builtin_rvv_vsetvlimax(1, 0)
+#define __riscv_vsetvlmax_e16m2() __builtin_rvv_vsetvlimax(1, 1)
+#define __riscv_vsetvlmax_e16m4() __builtin_rvv_vsetvlimax(1, 2)
+#define __riscv_vsetvlmax_e16m8() __builtin_rvv_vsetvlimax(1, 3)
+
+#define __riscv_vsetvlmax_e32m1() __builtin_rvv_vsetvlimax(2, 0)
+#define __riscv_vsetvlmax_e32m2() __builtin_rvv_vsetvlimax(2, 1)
+#define __riscv_vsetvlmax_e32m4() __builtin_rvv_vsetvlimax(2, 2)
+#define __riscv_vsetvlmax_e32m8() __builtin_rvv_vsetvlimax(2, 3)
+
+#if __riscv_v_elen >= 64
+#define __riscv_vsetvlmax_e8mf8() __builtin_rvv_vsetvlimax(0, 5)
+#define __riscv_vsetvlmax_e16mf4() __builtin_rvv_vsetvlimax(1, 6)
+#define __riscv_vsetvlmax_e32mf2() __builtin_rvv_vsetvlimax(2, 7)
+
+#define __riscv_vsetvlmax_e64m1() __builtin_rvv_vsetvlimax(3, 0)
+#define __riscv_vsetvlmax_e64m2() __builtin_rvv_vsetvlimax(3, 1)
+#define __riscv_vsetvlmax_e64m4() __builtin_rvv_vsetvlimax(3, 2)
+#define __riscv_vsetvlmax_e64m8() __builtin_rvv_vsetvlimax(3, 3)
+#endif
+
+}] in
+def vsetvl_macro: RVVHeader;
+
+let HasBuiltinAlias = false,
+ HasVL = false,
+ HasMasked = false,
+ MaskedPolicyScheme = NonePolicy,
+ Log2LMUL = [0],
+ ManualCodegen = [{IntrinsicTypes = {ResultType};}] in // Set XLEN type
+{
+ def vsetvli : RVVBuiltin<"", "zzKzKz", "i">;
+ def vsetvlimax : RVVBuiltin<"", "zKzKz", "i">;
+}
+
+// 7. Vector Loads and Stores
+// 7.4. Vector Unit-Stride Instructions
+def vlm: RVVVLEMaskBuiltin;
+defm vle8: RVVVLEBuiltin<["c"]>;
+defm vle16: RVVVLEBuiltin<["s"]>;
+let Name = "vle16_v", RequiredFeatures = ["Zvfhmin"] in
+ defm vle16_h: RVVVLEBuiltin<["x"]>;
+defm vle32: RVVVLEBuiltin<["i","f"]>;
+defm vle64: RVVVLEBuiltin<["l","d"]>;
+
+def vsm : RVVVSEMaskBuiltin;
+defm vse8 : RVVVSEBuiltin<["c"]>;
+defm vse16: RVVVSEBuiltin<["s"]>;
+let Name = "vse16_v", RequiredFeatures = ["Zvfhmin"] in
+ defm vse16_h: RVVVSEBuiltin<["x"]>;
+defm vse32: RVVVSEBuiltin<["i","f"]>;
+defm vse64: RVVVSEBuiltin<["l","d"]>;
+
+// 7.5. Vector Strided Instructions
+defm vlse8: RVVVLSEBuiltin<["c"]>;
+defm vlse16: RVVVLSEBuiltin<["s"]>;
+let Name = "vlse16_v", RequiredFeatures = ["Zvfhmin"] in
+ defm vlse16_h: RVVVLSEBuiltin<["x"]>;
+defm vlse32: RVVVLSEBuiltin<["i","f"]>;
+defm vlse64: RVVVLSEBuiltin<["l","d"]>;
+
+defm vsse8 : RVVVSSEBuiltin<["c"]>;
+defm vsse16: RVVVSSEBuiltin<["s"]>;
+let Name = "vsse16_v", RequiredFeatures = ["Zvfhmin"] in
+ defm vsse16_h: RVVVSSEBuiltin<["x"]>;
+defm vsse32: RVVVSSEBuiltin<["i","f"]>;
+defm vsse64: RVVVSSEBuiltin<["l","d"]>;
+
+// 7.6. Vector Indexed Instructions
+defm : RVVIndexedLoad<"vluxei">;
+defm : RVVIndexedLoad<"vloxei">;
+
+defm : RVVIndexedStore<"vsuxei">;
+defm : RVVIndexedStore<"vsoxei">;
+
+// 7.7. Unit-stride Fault-Only-First Loads
+defm vle8ff: RVVVLEFFBuiltin<["c"]>;
+defm vle16ff: RVVVLEFFBuiltin<["s"]>;
+let Name = "vle16ff_v", RequiredFeatures = ["Zvfhmin"] in
+ defm vle16ff: RVVVLEFFBuiltin<["x"]>;
+defm vle32ff: RVVVLEFFBuiltin<["i", "f"]>;
+defm vle64ff: RVVVLEFFBuiltin<["l", "d"]>;
+
+multiclass RVVUnitStridedSegLoadTuple<string op> {
foreach type = TypeList in {
defvar eew = !cond(!eq(type, "c") : "8",
!eq(type, "s") : "16",
@@ -799,65 +736,56 @@ multiclass RVVUnitStridedSegLoad<string op> {
foreach nf = NFList in {
let Name = op # nf # "e" # eew # "_v",
IRName = op # nf,
- IRNameMask = op # nf # "_mask",
+ MaskedIRName = op # nf # "_mask",
NF = nf,
- HasNoMaskedOverloaded = false,
+ RequiredFeatures = !if(!eq(type, "x"), ["Zvfhmin"],
+ []<string>),
ManualCodegen = [{
{
- // builtin: (val0 address, val1 address, ..., ptr, vl)
- IntrinsicTypes = {Ops[0]->getType()->getPointerElementType(),
- Ops[NF + 1]->getType()};
- // intrinsic: (ptr, vl)
- llvm::Value *Operands[] = {Ops[NF], Ops[NF + 1]};
- llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
- llvm::Value *LoadValue = Builder.CreateCall(F, Operands, "");
- clang::CharUnits Align = CharUnits::fromQuantity(
- IntrinsicTypes[0]->getScalarSizeInBits() / 8);
- llvm::Value *V;
- for (unsigned I = 0; I < NF; ++I) {
- V = Builder.CreateStore(Builder.CreateExtractValue(LoadValue, {I}),
- Address(Ops[I], Align));
- }
- return V;
- }
- }],
- ManualCodegenMask = [{
- {
- // builtin: (val0 address, ..., mask, maskedoff0, ..., ptr, vl)
- // intrinsic: (maskedoff0, ..., ptr, mask, vl)
- IntrinsicTypes = {Ops[0]->getType()->getPointerElementType(),
- Ops[2 * NF + 2]->getType()};
+ llvm::Type *ElementVectorType = cast<StructType>(ResultType)->elements()[0];
+ IntrinsicTypes = {ElementVectorType, Ops.back()->getType()};
SmallVector<llvm::Value*, 12> Operands;
- for (unsigned I = 0; I < NF; ++I)
- Operands.push_back(Ops[NF + I + 1]);
- Operands.push_back(Ops[2 * NF + 1]);
- Operands.push_back(Ops[NF]);
- Operands.push_back(Ops[2 * NF + 2]);
- assert(Operands.size() == NF + 3);
+
+ bool NoPassthru =
+ (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) |
+ (!IsMasked && (PolicyAttrs & RVV_VTA));
+ unsigned Offset = IsMasked ? NoPassthru ? 1 : 2 : NoPassthru ? 0 : 1;
+
+ if (NoPassthru) { // Push poison into passthru
+ Operands.append(NF, llvm::PoisonValue::get(ElementVectorType));
+ } else { // Push intrinsics operands into passthru
+ llvm::Value *PassthruOperand = IsMasked ? Ops[1] : Ops[0];
+ for (unsigned I = 0; I < NF; ++I)
+ Operands.push_back(Builder.CreateExtractValue(PassthruOperand, {I}));
+ }
+
+ Operands.push_back(Ops[Offset]); // Ptr
+ if (IsMasked)
+ Operands.push_back(Ops[0]);
+ Operands.push_back(Ops[Offset + 1]); // VL
+ if (IsMasked)
+ Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+
llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+
llvm::Value *LoadValue = Builder.CreateCall(F, Operands, "");
- clang::CharUnits Align = CharUnits::fromQuantity(
- IntrinsicTypes[0]->getScalarSizeInBits() / 8);
- llvm::Value *V;
- for (unsigned I = 0; I < NF; ++I) {
- V = Builder.CreateStore(Builder.CreateExtractValue(LoadValue, {I}),
- Address(Ops[I], Align));
- }
- return V;
+ if (ReturnValue.isNull())
+ return LoadValue;
+ else
+ return Builder.CreateStore(LoadValue, ReturnValue.getValue());
}
- }] in {
- defvar PV = PVString<nf, /*signed=*/true>.S;
- defvar PUV = PVString<nf, /*signed=*/false>.S;
- def : RVVBuiltin<"v", "0" # PV # "PCe", type>;
- if !not(IsFloat<type>.val) then {
- def : RVVBuiltin<"Uv", "0" # PUV # "PCUe", type>;
+ }] in {
+ defvar T = "(Tuple:" # nf # ")";
+ def : RVVBuiltin<T # "v", T # "vPCe", type>;
+ if !not(IsFloat<type>.val) then {
+ def : RVVBuiltin<T # "Uv", T # "UvPCUe", type>;
}
}
}
}
}
-multiclass RVVUnitStridedSegLoadFF<string op> {
+multiclass RVVUnitStridedSegStoreTuple<string op> {
foreach type = TypeList in {
defvar eew = !cond(!eq(type, "c") : "8",
!eq(type, "s") : "16",
@@ -867,71 +795,50 @@ multiclass RVVUnitStridedSegLoadFF<string op> {
!eq(type, "f") : "32",
!eq(type, "d") : "64");
foreach nf = NFList in {
- let Name = op # nf # "e" # eew # "ff_v",
- IRName = op # nf # "ff",
- IRNameMask = op # nf # "ff_mask",
- NF = nf,
- HasNoMaskedOverloaded = false,
- ManualCodegen = [{
- {
- // builtin: (val0 address, val1 address, ..., ptr, new_vl, vl)
- IntrinsicTypes = {Ops[0]->getType()->getPointerElementType(),
- Ops[NF + 2]->getType()};
- // intrinsic: (ptr, vl)
- llvm::Value *Operands[] = {Ops[NF], Ops[NF + 2]};
- Value *NewVL = Ops[NF + 1];
- llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
- llvm::Value *LoadValue = Builder.CreateCall(F, Operands, "");
- clang::CharUnits Align = CharUnits::fromQuantity(
- IntrinsicTypes[0]->getScalarSizeInBits() / 8);
- for (unsigned I = 0; I < NF; ++I) {
- Builder.CreateStore(Builder.CreateExtractValue(LoadValue, {I}),
- Address(Ops[I], Align));
- }
- // Store new_vl.
- return Builder.CreateStore(Builder.CreateExtractValue(LoadValue, {NF}),
- Address(NewVL, Align));
- }
- }],
- ManualCodegenMask = [{
+ let Name = op # nf # "e" # eew # "_v",
+ IRName = op # nf,
+ MaskedIRName = op # nf # "_mask",
+ NF = nf,
+ HasMaskedOffOperand = false,
+ RequiredFeatures = !if(!eq(type, "x"), ["Zvfhmin"],
+ []<string>),
+ ManualCodegen = [{
{
- // builtin: (val0 address, ..., mask, maskedoff0, ..., ptr, new_vl, vl)
- // intrinsic: (maskedoff0, ..., ptr, mask, vl)
- IntrinsicTypes = {Ops[0]->getType()->getPointerElementType(),
- Ops[2 * NF + 3]->getType()};
+ // Masked
+ // Builtin: (mask, ptr, v_tuple, vl)
+ // Intrinsic: (val0, val1, ..., ptr, mask, vl)
+ // Unmasked
+ // Builtin: (ptr, v_tuple, vl)
+ // Intrinsic: (val0, val1, ..., ptr, vl)
+ unsigned Offset = IsMasked ? 1 : 0;
+ llvm::Value *VTupleOperand = Ops[Offset + 1];
+
SmallVector<llvm::Value*, 12> Operands;
- for (unsigned I = 0; I < NF; ++I)
- Operands.push_back(Ops[NF + I + 1]);
- Operands.push_back(Ops[2 * NF + 1]);
- Operands.push_back(Ops[NF]);
- Operands.push_back(Ops[2 * NF + 3]);
- Value *NewVL = Ops[2 * NF + 2];
- assert(Operands.size() == NF + 3);
- llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
- llvm::Value *LoadValue = Builder.CreateCall(F, Operands, "");
- clang::CharUnits Align = CharUnits::fromQuantity(
- IntrinsicTypes[0]->getScalarSizeInBits() / 8);
for (unsigned I = 0; I < NF; ++I) {
- Builder.CreateStore(Builder.CreateExtractValue(LoadValue, {I}),
- Address(Ops[I], Align));
+ llvm::Value *V = Builder.CreateExtractValue(VTupleOperand, {I});
+ Operands.push_back(V);
}
- // Store new_vl.
- return Builder.CreateStore(Builder.CreateExtractValue(LoadValue, {NF}),
- Address(NewVL, Align));
- }
- }] in {
- defvar PV = PVString<nf, /*signed=*/true>.S;
- defvar PUV = PVString<nf, /*signed=*/false>.S;
- def : RVVBuiltin<"v", "0" # PV # "PCe" # "Pz", type>;
- if !not(IsFloat<type>.val) then {
- def : RVVBuiltin<"Uv", "0" # PUV # "PCUe" # "Pz", type>;
+ Operands.push_back(Ops[Offset]); // Ptr
+ if (IsMasked)
+ Operands.push_back(Ops[0]);
+ Operands.push_back(Ops[Offset + 2]); // VL
+
+ IntrinsicTypes = {Operands[0]->getType(), Operands.back()->getType()};
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ return Builder.CreateCall(F, Operands, "");
+ }
+ }] in {
+ defvar T = "(Tuple:" # nf # ")";
+ def : RVVBuiltin<T # "v", "0Pe" # T # "v", type>;
+ if !not(IsFloat<type>.val) then {
+ def : RVVBuiltin<T # "Uv", "0PUe" # T # "Uv", type>;
}
}
}
}
}
-multiclass RVVStridedSegLoad<string op> {
+multiclass RVVUnitStridedSegLoadFFTuple<string op> {
foreach type = TypeList in {
defvar eew = !cond(!eq(type, "c") : "8",
!eq(type, "s") : "16",
@@ -941,145 +848,134 @@ multiclass RVVStridedSegLoad<string op> {
!eq(type, "f") : "32",
!eq(type, "d") : "64");
foreach nf = NFList in {
- let Name = op # nf # "e" # eew # "_v",
- IRName = op # nf,
- IRNameMask = op # nf # "_mask",
+ let Name = op # nf # "e" # eew # "ff_v",
+ IRName = op # nf # "ff",
+ MaskedIRName = op # nf # "ff_mask",
NF = nf,
- HasNoMaskedOverloaded = false,
+ RequiredFeatures = !if(!eq(type, "x"), ["Zvfhmin"],
+ []<string>),
ManualCodegen = [{
{
- // builtin: (val0 address, val1 address, ..., ptr, stride, vl)
- IntrinsicTypes = {Ops[0]->getType()->getPointerElementType(),
- Ops[NF + 2]->getType()};
- // intrinsic: (ptr, stride, vl)
- llvm::Value *Operands[] = {Ops[NF], Ops[NF + 1], Ops[NF + 2]};
- llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
- llvm::Value *LoadValue = Builder.CreateCall(F, Operands, "");
- clang::CharUnits Align = CharUnits::fromQuantity(
- IntrinsicTypes[0]->getScalarSizeInBits() / 8);
- llvm::Value *V;
- for (unsigned I = 0; I < NF; ++I) {
- V = Builder.CreateStore(Builder.CreateExtractValue(LoadValue, {I}),
- Address(Ops[I], Align));
- }
- return V;
- }
- }],
- ManualCodegenMask = [{
- {
- // builtin: (val0 address, ..., mask, maskedoff0, ..., ptr, stride, vl)
- // intrinsic: (maskedoff0, ..., ptr, stride, mask, vl)
- IntrinsicTypes = {Ops[0]->getType()->getPointerElementType(),
- Ops[2 * NF + 3]->getType()};
+ llvm::Type *ElementVectorType = cast<StructType>(ResultType)->elements()[0];
+ IntrinsicTypes = {ElementVectorType, Ops.back()->getType()};
SmallVector<llvm::Value*, 12> Operands;
- for (unsigned I = 0; I < NF; ++I)
- Operands.push_back(Ops[NF + I + 1]);
- Operands.push_back(Ops[2 * NF + 1]);
- Operands.push_back(Ops[2 * NF + 2]);
- Operands.push_back(Ops[NF]);
- Operands.push_back(Ops[2 * NF + 3]);
- assert(Operands.size() == NF + 4);
+
+ bool NoPassthru =
+ (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) |
+ (!IsMasked && (PolicyAttrs & RVV_VTA));
+ unsigned Offset = IsMasked ? NoPassthru ? 1 : 2 : NoPassthru ? 0 : 1;
+
+ if (NoPassthru) { // Push poison into passthru
+ Operands.append(NF, llvm::PoisonValue::get(ElementVectorType));
+ } else { // Push intrinsics operands into passthru
+ llvm::Value *PassthruOperand = IsMasked ? Ops[1] : Ops[0];
+ for (unsigned I = 0; I < NF; ++I)
+ Operands.push_back(Builder.CreateExtractValue(PassthruOperand, {I}));
+ }
+
+ Operands.push_back(Ops[Offset]); // Ptr
+ if (IsMasked)
+ Operands.push_back(Ops[0]);
+ Operands.push_back(Ops[Offset + 2]); // vl
+ if (IsMasked)
+ Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+
llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+
llvm::Value *LoadValue = Builder.CreateCall(F, Operands, "");
- clang::CharUnits Align = CharUnits::fromQuantity(
- IntrinsicTypes[0]->getScalarSizeInBits() / 8);
- llvm::Value *V;
+ // Get alignment from the new vl operand
+ clang::CharUnits Align =
+ CGM.getNaturalPointeeTypeAlignment(E->getArg(Offset + 1)->getType());
+
+ llvm::Value *ReturnTuple = llvm::PoisonValue::get(ResultType);
for (unsigned I = 0; I < NF; ++I) {
- V = Builder.CreateStore(Builder.CreateExtractValue(LoadValue, {I}),
- Address(Ops[I], Align));
+ llvm::Value *V = Builder.CreateExtractValue(LoadValue, {I});
+ ReturnTuple = Builder.CreateInsertValue(ReturnTuple, V, {I});
}
- return V;
+
+ // Store new_vl
+ llvm::Value *V = Builder.CreateExtractValue(LoadValue, {NF});
+ Builder.CreateStore(V, Address(Ops[Offset + 1], V->getType(), Align));
+
+ if (ReturnValue.isNull())
+ return ReturnTuple;
+ else
+ return Builder.CreateStore(ReturnTuple, ReturnValue.getValue());
}
- }] in {
- defvar PV = PVString<nf, /*signed=*/true>.S;
- defvar PUV = PVString<nf, /*signed=*/false>.S;
- def : RVVBuiltin<"v", "0" # PV # "PCe" # "t", type>;
- if !not(IsFloat<type>.val) then {
- def : RVVBuiltin<"Uv", "0" # PUV # "PCUe" # "t", type>;
+ }] in {
+ defvar T = "(Tuple:" # nf # ")";
+ def : RVVBuiltin<T # "v", T # "vPCePz", type>;
+ if !not(IsFloat<type>.val) then {
+ def : RVVBuiltin<T # "Uv", T # "UvPCUePz", type>;
}
}
}
}
}
-multiclass RVVIndexedSegLoad<string op> {
+multiclass RVVStridedSegLoadTuple<string op> {
foreach type = TypeList in {
- foreach eew_info = EEWList in {
- defvar eew = eew_info[0];
- defvar eew_type = eew_info[1];
+ defvar eew = !cond(!eq(type, "c") : "8",
+ !eq(type, "s") : "16",
+ !eq(type, "i") : "32",
+ !eq(type, "l") : "64",
+ !eq(type, "x") : "16",
+ !eq(type, "f") : "32",
+ !eq(type, "d") : "64");
foreach nf = NFList in {
- let Name = op # nf # "ei" # eew # "_v",
+ let Name = op # nf # "e" # eew # "_v",
IRName = op # nf,
- IRNameMask = op # nf # "_mask",
+ MaskedIRName = op # nf # "_mask",
NF = nf,
+ RequiredFeatures = !if(!eq(type, "x"), ["Zvfhmin"],
+ []<string>),
ManualCodegen = [{
{
- // builtin: (val0 address, val1 address, ..., ptr, index, vl)
- IntrinsicTypes = {Ops[0]->getType()->getPointerElementType(),
- Ops[NF + 1]->getType(), Ops[NF + 2]->getType()};
- // intrinsic: (ptr, index, vl)
- llvm::Value *Operands[] = {Ops[NF], Ops[NF + 1], Ops[NF + 2]};
- llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
- llvm::Value *LoadValue = Builder.CreateCall(F, Operands, "");
- clang::CharUnits Align = CharUnits::fromQuantity(
- IntrinsicTypes[0]->getScalarSizeInBits() / 8);
- llvm::Value *V;
- for (unsigned I = 0; I < NF; ++I) {
- V = Builder.CreateStore(Builder.CreateExtractValue(LoadValue, {I}),
- Address(Ops[I], Align));
- }
- return V;
- }
- }],
- ManualCodegenMask = [{
- {
- // builtin: (val0 address, ..., mask, maskedoff0, ..., ptr, index, vl)
- IntrinsicTypes = {Ops[0]->getType()->getPointerElementType(),
- Ops[2 * NF + 2]->getType(), Ops[2 * NF + 3]->getType()};
- // intrinsic: (maskedoff0, ..., ptr, index, mask, vl)
+ llvm::Type *ElementVectorType = cast<StructType>(ResultType)->elements()[0];
+ IntrinsicTypes = {ElementVectorType, Ops.back()->getType()};
SmallVector<llvm::Value*, 12> Operands;
- for (unsigned I = 0; I < NF; ++I)
- Operands.push_back(Ops[NF + I + 1]);
- Operands.push_back(Ops[2 * NF + 1]);
- Operands.push_back(Ops[2 * NF + 2]);
- Operands.push_back(Ops[NF]);
- Operands.push_back(Ops[2 * NF + 3]);
- assert(Operands.size() == NF + 4);
+
+ bool NoPassthru =
+ (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) |
+ (!IsMasked && (PolicyAttrs & RVV_VTA));
+ unsigned Offset = IsMasked ? NoPassthru ? 1 : 2 : NoPassthru ? 0 : 1;
+
+ if (NoPassthru) { // Push poison into passthru
+ Operands.append(NF, llvm::PoisonValue::get(ElementVectorType));
+ } else { // Push intrinsics operands into passthru
+ llvm::Value *PassthruOperand = IsMasked ? Ops[1] : Ops[0];
+ for (unsigned I = 0; I < NF; ++I)
+ Operands.push_back(Builder.CreateExtractValue(PassthruOperand, {I}));
+ }
+
+ Operands.push_back(Ops[Offset]); // Ptr
+ Operands.push_back(Ops[Offset + 1]); // Stride
+ if (IsMasked)
+ Operands.push_back(Ops[0]);
+ Operands.push_back(Ops[Offset + 2]); // VL
+ if (IsMasked)
+ Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+
llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
llvm::Value *LoadValue = Builder.CreateCall(F, Operands, "");
- clang::CharUnits Align = CharUnits::fromQuantity(
- IntrinsicTypes[0]->getScalarSizeInBits() / 8);
- llvm::Value *V;
- for (unsigned I = 0; I < NF; ++I) {
- V = Builder.CreateStore(Builder.CreateExtractValue(LoadValue, {I}),
- Address(Ops[I], Align));
- }
- return V;
+
+ if (ReturnValue.isNull())
+ return LoadValue;
+ else
+ return Builder.CreateStore(LoadValue, ReturnValue.getValue());
}
- }] in {
- defvar PV = PVString<nf, /*signed=*/true>.S;
- defvar PUV = PVString<nf, /*signed=*/false>.S;
- def : RVVBuiltin<"v", "0" # PV # "PCe" # eew_type # "Uv", type>;
- if !not(IsFloat<type>.val) then {
- def : RVVBuiltin<"Uv", "0" # PUV # "PCUe" # eew_type # "Uv", type>;
- }
+ }] in {
+ defvar T = "(Tuple:" # nf # ")";
+ def : RVVBuiltin<T # "v", T # "vPCet", type>;
+ if !not(IsFloat<type>.val) then {
+ def : RVVBuiltin<T # "Uv", T # "UvPCUet", type>;
}
}
}
}
}
-class VString<int nf, bit signed> {
- string S = !cond(!eq(nf, 2): !if(signed, "vv", "UvUv"),
- !eq(nf, 3): !if(signed, "vvv", "UvUvUv"),
- !eq(nf, 4): !if(signed, "vvvv", "UvUvUvUv"),
- !eq(nf, 5): !if(signed, "vvvvv", "UvUvUvUvUv"),
- !eq(nf, 6): !if(signed, "vvvvvv", "UvUvUvUvUvUv"),
- !eq(nf, 7): !if(signed, "vvvvvvv", "UvUvUvUvUvUvUv"),
- !eq(nf, 8): !if(signed, "vvvvvvvv", "UvUvUvUvUvUvUvUv"));
-}
-
-multiclass RVVUnitStridedSegStore<string op> {
+multiclass RVVStridedSegStoreTuple<string op> {
foreach type = TypeList in {
defvar eew = !cond(!eq(type, "c") : "8",
!eq(type, "s") : "16",
@@ -1091,85 +987,111 @@ multiclass RVVUnitStridedSegStore<string op> {
foreach nf = NFList in {
let Name = op # nf # "e" # eew # "_v",
IRName = op # nf,
- IRNameMask = op # nf # "_mask",
+ MaskedIRName = op # nf # "_mask",
NF = nf,
HasMaskedOffOperand = false,
+ MaskedPolicyScheme = NonePolicy,
+ RequiredFeatures = !if(!eq(type, "x"), ["Zvfhmin"],
+ []<string>),
ManualCodegen = [{
{
- // Builtin: (ptr, val0, val1, ..., vl)
- // Intrinsic: (val0, val1, ..., ptr, vl)
- std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
- IntrinsicTypes = {Ops[0]->getType(), Ops[NF + 1]->getType()};
- assert(Ops.size() == NF + 2);
- }
- }],
- ManualCodegenMask = [{
- {
- // Builtin: (mask, ptr, val0, val1, ..., vl)
- // Intrinsic: (val0, val1, ..., ptr, mask, vl)
- std::rotate(Ops.begin(), Ops.begin() + 2, Ops.end() - 1);
- std::swap(Ops[NF], Ops[NF + 1]);
- IntrinsicTypes = {Ops[0]->getType(), Ops[NF + 2]->getType()};
- assert(Ops.size() == NF + 3);
+ // Masked
+ // Builtin: (mask, ptr, stride, v_tuple, vl)
+ // Intrinsic: (val0, val1, ..., ptr, stride, mask, vl)
+ // Unmasked
+ // Builtin: (ptr, stride, v_tuple, vl)
+ // Intrinsic: (val0, val1, ..., ptr, stride, vl)
+ unsigned Offset = IsMasked ? 1 : 0;
+ llvm::Value *VTupleOperand = Ops[Offset + 2];
+
+ SmallVector<llvm::Value*, 12> Operands;
+ for (unsigned I = 0; I < NF; ++I) {
+ llvm::Value *V = Builder.CreateExtractValue(VTupleOperand, {I});
+ Operands.push_back(V);
+ }
+ Operands.push_back(Ops[Offset]); // Ptr
+ Operands.push_back(Ops[Offset + 1]); // Stride
+ if (IsMasked)
+ Operands.push_back(Ops[0]);
+ Operands.push_back(Ops[Offset + 3]); // VL
+
+ IntrinsicTypes = {Operands[0]->getType(), Operands.back()->getType()};
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ return Builder.CreateCall(F, Operands, "");
}
}] in {
- defvar V = VString<nf, /*signed=*/true>.S;
- defvar UV = VString<nf, /*signed=*/false>.S;
- def : RVVBuiltin<"v", "0Pe" # V, type>;
- if !not(IsFloat<type>.val) then {
- def : RVVBuiltin<"Uv", "0PUe" # UV, type>;
+ defvar T = "(Tuple:" # nf # ")";
+ def : RVVBuiltin<T # "v", "0Pet" # T # "v", type>;
+ if !not(IsFloat<type>.val) then {
+ def : RVVBuiltin<T # "Uv", "0PUet" # T # "Uv", type>;
}
}
}
}
}
-multiclass RVVStridedSegStore<string op> {
+multiclass RVVIndexedSegLoadTuple<string op> {
foreach type = TypeList in {
- defvar eew = !cond(!eq(type, "c") : "8",
- !eq(type, "s") : "16",
- !eq(type, "i") : "32",
- !eq(type, "l") : "64",
- !eq(type, "x") : "16",
- !eq(type, "f") : "32",
- !eq(type, "d") : "64");
+ foreach eew_info = EEWList in {
+ defvar eew = eew_info[0];
+ defvar eew_type = eew_info[1];
foreach nf = NFList in {
- let Name = op # nf # "e" # eew # "_v",
+ let Name = op # nf # "ei" # eew # "_v",
IRName = op # nf,
- IRNameMask = op # nf # "_mask",
+ MaskedIRName = op # nf # "_mask",
NF = nf,
- HasMaskedOffOperand = false,
+ RequiredFeatures = !if(!eq(type, "x"), ["Zvfhmin"],
+ []<string>),
ManualCodegen = [{
{
- // Builtin: (ptr, stride, val0, val1, ..., vl).
- // Intrinsic: (val0, val1, ..., ptr, stride, vl)
- std::rotate(Ops.begin(), Ops.begin() + 2, Ops.end() - 1);
- IntrinsicTypes = {Ops[0]->getType(), Ops[NF + 1]->getType()};
- assert(Ops.size() == NF + 3);
- }
- }],
- ManualCodegenMask = [{
- {
- // Builtin: (mask, ptr, stride, val0, val1, ..., vl).
- // Intrinsic: (val0, val1, ..., ptr, stride, mask, vl)
- std::rotate(Ops.begin(), Ops.begin() + 3, Ops.end() - 1);
- std::rotate(Ops.begin() + NF, Ops.begin() + NF + 1, Ops.begin() + NF + 3);
- IntrinsicTypes = {Ops[0]->getType(), Ops[NF + 1]->getType()};
- assert(Ops.size() == NF + 4);
+ llvm::Type *ElementVectorType = cast<StructType>(ResultType)->elements()[0];
+ IntrinsicTypes = {ElementVectorType, Ops.back()->getType()};
+ SmallVector<llvm::Value*, 12> Operands;
+
+ bool NoPassthru =
+ (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) |
+ (!IsMasked && (PolicyAttrs & RVV_VTA));
+ unsigned Offset = IsMasked ? NoPassthru ? 1 : 2 : NoPassthru ? 0 : 1;
+
+ if (NoPassthru) { // Push poison into passthru
+ Operands.append(NF, llvm::PoisonValue::get(ElementVectorType));
+ } else { // Push intrinsics operands into passthru
+ llvm::Value *PassthruOperand = IsMasked ? Ops[1] : Ops[0];
+ for (unsigned I = 0; I < NF; ++I)
+ Operands.push_back(Builder.CreateExtractValue(PassthruOperand, {I}));
+ }
+
+ Operands.push_back(Ops[Offset]); // Ptr
+ Operands.push_back(Ops[Offset + 1]); // Idx
+ if (IsMasked)
+ Operands.push_back(Ops[0]);
+ Operands.push_back(Ops[Offset + 2]); // VL
+ if (IsMasked)
+ Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+
+ IntrinsicTypes = {ElementVectorType, Ops[Offset + 1]->getType(),
+ Ops.back()->getType()};
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ llvm::Value *LoadValue = Builder.CreateCall(F, Operands, "");
+
+ if (ReturnValue.isNull())
+ return LoadValue;
+ else
+ return Builder.CreateStore(LoadValue, ReturnValue.getValue());
}
- }] in {
- defvar V = VString<nf, /*signed=*/true>.S;
- defvar UV = VString<nf, /*signed=*/false>.S;
- def : RVVBuiltin<"v", "0Pet" # V, type>;
+ }] in {
+ defvar T = "(Tuple:" # nf # ")";
+ def : RVVBuiltin<T # "v", T # "vPCe" # eew_type # "Uv", type>;
if !not(IsFloat<type>.val) then {
- def : RVVBuiltin<"Uv", "0PUet" # UV, type>;
+ def : RVVBuiltin<T # "Uv", T # "UvPCUe" # eew_type # "Uv", type>;
+ }
}
}
}
}
}
-multiclass RVVIndexedSegStore<string op> {
+multiclass RVVIndexedSegStoreTuple<string op> {
foreach type = TypeList in {
foreach eew_info = EEWList in {
defvar eew = eew_info[0];
@@ -1177,35 +1099,44 @@ multiclass RVVIndexedSegStore<string op> {
foreach nf = NFList in {
let Name = op # nf # "ei" # eew # "_v",
IRName = op # nf,
- IRNameMask = op # nf # "_mask",
+ MaskedIRName = op # nf # "_mask",
NF = nf,
HasMaskedOffOperand = false,
+ MaskedPolicyScheme = NonePolicy,
+ RequiredFeatures = !if(!eq(type, "x"), ["Zvfhmin"],
+ []<string>),
ManualCodegen = [{
{
- // Builtin: (ptr, index, val0, val1, ..., vl)
- // Intrinsic: (val0, val1, ..., ptr, index, vl)
- std::rotate(Ops.begin(), Ops.begin() + 2, Ops.end() - 1);
- IntrinsicTypes = {Ops[0]->getType(),
- Ops[NF + 1]->getType(), Ops[NF + 2]->getType()};
- assert(Ops.size() == NF + 3);
- }
- }],
- ManualCodegenMask = [{
- {
- // Builtin: (mask, ptr, index, val0, val1, ..., vl)
+ // Masked
+ // Builtin: (mask, ptr, index, v_tuple, vl)
// Intrinsic: (val0, val1, ..., ptr, index, mask, vl)
- std::rotate(Ops.begin(), Ops.begin() + 3, Ops.end() - 1);
- std::rotate(Ops.begin() + NF, Ops.begin() + NF + 1, Ops.begin() + NF + 3);
- IntrinsicTypes = {Ops[0]->getType(),
- Ops[NF + 1]->getType(), Ops[NF + 3]->getType()};
- assert(Ops.size() == NF + 4);
+ // Unmasked
+ // Builtin: (ptr, index, v_tuple, vl)
+ // Intrinsic: (val0, val1, ..., ptr, index, vl)
+ unsigned Offset = IsMasked ? 1 : 0;
+ llvm::Value *VTupleOperand = Ops[Offset + 2];
+
+ SmallVector<llvm::Value*, 12> Operands;
+ for (unsigned I = 0; I < NF; ++I) {
+ llvm::Value *V = Builder.CreateExtractValue(VTupleOperand, {I});
+ Operands.push_back(V);
+ }
+ Operands.push_back(Ops[Offset]); // Ptr
+ Operands.push_back(Ops[Offset + 1]); // Idx
+ if (IsMasked)
+ Operands.push_back(Ops[0]);
+ Operands.push_back(Ops[Offset + 3]); // VL
+
+ IntrinsicTypes = {Operands[0]->getType(), Ops[Offset + 1]->getType(),
+ Operands.back()->getType()};
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ return Builder.CreateCall(F, Operands, "");
}
}] in {
- defvar V = VString<nf, /*signed=*/true>.S;
- defvar UV = VString<nf, /*signed=*/false>.S;
- def : RVVBuiltin<"v", "0Pe" # eew_type # "Uv" # V, type>;
+ defvar T = "(Tuple:" # nf # ")";
+ def : RVVBuiltin<T # "v", "0Pe" # eew_type # "Uv" # T # "v", type>;
if !not(IsFloat<type>.val) then {
- def : RVVBuiltin<"Uv", "0PUe" # eew_type # "Uv" # UV, type>;
+ def : RVVBuiltin<T # "Uv", "0PUe" # eew_type # "Uv" # T # "Uv", type>;
}
}
}
@@ -1213,358 +1144,39 @@ multiclass RVVIndexedSegStore<string op> {
}
}
-multiclass RVVAMOBuiltinSet<bit has_signed = false, bit has_unsigned = false,
- bit has_fp = false> {
- defvar type_list = !if(has_fp, ["i","l","f","d"], ["i","l"]);
- foreach type = type_list in
- foreach eew_list = EEWList in {
- defvar eew = eew_list[0];
- defvar eew_index = eew_list[1];
- let Name = NAME # "ei" # eew # "_" # "v",
- IRName = NAME,
- IRNameMask = NAME # "_mask",
- HasMaskedOffOperand = false,
- ManualCodegen = [{
- // base, bindex, value, vl
- IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[3]->getType()};
- Ops[0] = Builder.CreateBitCast(Ops[0], ResultType->getPointerTo());
- }],
- ManualCodegenMask = [{
- std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
- IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[4]->getType()};
- Ops[0] = Builder.CreateBitCast(Ops[0], ResultType->getPointerTo());
- }] in {
- if has_signed then
- def : RVVBuiltin<"v", "vPe" # eew_index # "Uvv", type>;
- if !and(!not(IsFloat<type>.val), has_unsigned) then
- def : RVVBuiltin<"Uv", "UvPUe" # eew_index # "UvUv", type>;
- }
- }
-}
-
-multiclass RVVPseudoUnaryBuiltin<string IR, string type_range> {
- let Name = NAME,
- IRName = IR,
- IRNameMask = IR # "_mask",
- ManualCodegen = [{
- {
- // op1, vl
- IntrinsicTypes = {ResultType,
- cast<llvm::VectorType>(ResultType)->getElementType(),
- Ops[1]->getType()};
- Ops.insert(Ops.begin() + 1, llvm::Constant::getNullValue(IntrinsicTypes[1]));
- break;
- }
- }],
- ManualCodegenMask = [{
- {
- std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
- // maskedoff, op1, mask, vl
- IntrinsicTypes = {ResultType,
- cast<llvm::VectorType>(ResultType)->getElementType(),
- Ops[3]->getType()};
- Ops.insert(Ops.begin() + 2, llvm::Constant::getNullValue(IntrinsicTypes[1]));
- break;
- }
- }] in {
- def : RVVBuiltin<"v", "vv", type_range>;
- }
-}
-
-multiclass RVVPseudoVNotBuiltin<string IR, string type_range> {
- let Name = NAME,
- IRName = IR,
- IRNameMask = IR # "_mask",
- ManualCodegen = [{
- {
- // op1, vl
- IntrinsicTypes = {ResultType,
- cast<llvm::VectorType>(ResultType)->getElementType(),
- Ops[1]->getType()};
- Ops.insert(Ops.begin() + 1,
- llvm::Constant::getAllOnesValue(IntrinsicTypes[1]));
- break;
- }
- }],
- ManualCodegenMask = [{
- {
- std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
- // maskedoff, op1, mask, vl
- IntrinsicTypes = {ResultType,
- cast<llvm::VectorType>(ResultType)->getElementType(),
- Ops[3]->getType()};
- Ops.insert(Ops.begin() + 2,
- llvm::Constant::getAllOnesValue(IntrinsicTypes[1]));
- break;
- }
- }] in {
- def : RVVBuiltin<"v", "vv", type_range>;
- def : RVVBuiltin<"Uv", "UvUv", type_range>;
- }
-}
-
-multiclass RVVPseudoMaskBuiltin<string IR, string type_range> {
- let Name = NAME,
- IRName = IR,
- HasMask = false,
- ManualCodegen = [{
- {
- // op1, vl
- IntrinsicTypes = {ResultType,
- Ops[1]->getType()};
- Ops.insert(Ops.begin() + 1, Ops[0]);
- break;
- }
- }] in {
- def : RVVBuiltin<"m", "mm", type_range>;
- }
-}
-
-multiclass RVVPseudoVFUnaryBuiltin<string IR, string type_range> {
- let Name = NAME,
- IRName = IR,
- IRNameMask = IR # "_mask",
- ManualCodegen = [{
- {
- // op1, vl
- IntrinsicTypes = {ResultType,
- Ops[0]->getType(), Ops[1]->getType()};
- Ops.insert(Ops.begin() + 1, Ops[0]);
- break;
- }
- }],
- ManualCodegenMask = [{
- {
- std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
- // maskedoff, op1, mask, vl
- IntrinsicTypes = {ResultType,
- Ops[1]->getType(),
- Ops[3]->getType()};
- Ops.insert(Ops.begin() + 2, Ops[1]);
- break;
- }
- }] in {
- def : RVVBuiltin<"v", "vv", type_range>;
- }
-}
-
-multiclass RVVPseudoVWCVTBuiltin<string IR, string MName, string type_range,
- list<list<string>> suffixes_prototypes> {
- let Name = NAME,
- MangledName = MName,
- IRName = IR,
- IRNameMask = IR # "_mask",
- ManualCodegen = [{
- {
- // op1, vl
- IntrinsicTypes = {ResultType,
- Ops[0]->getType(),
- cast<llvm::VectorType>(Ops[0]->getType())->getElementType(),
- Ops[1]->getType()};
- Ops.insert(Ops.begin() + 1, llvm::Constant::getNullValue(IntrinsicTypes[2]));
- break;
- }
- }],
- ManualCodegenMask = [{
- {
- std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
- // maskedoff, op1, mask, vl
- IntrinsicTypes = {ResultType,
- Ops[1]->getType(),
- cast<llvm::VectorType>(Ops[1]->getType())->getElementType(),
- Ops[3]->getType()};
- Ops.insert(Ops.begin() + 2, llvm::Constant::getNullValue(IntrinsicTypes[2]));
- break;
- }
- }] in {
- foreach s_p = suffixes_prototypes in {
- def : RVVBuiltin<s_p[0], s_p[1], type_range>;
- }
- }
-}
-
-multiclass RVVPseudoVNCVTBuiltin<string IR, string MName, string type_range,
- list<list<string>> suffixes_prototypes> {
- let Name = NAME,
- MangledName = MName,
- IRName = IR,
- IRNameMask = IR # "_mask",
- ManualCodegen = [{
- {
- // op1, vl
- IntrinsicTypes = {ResultType,
- Ops[0]->getType(),
- Ops[1]->getType(),
- Ops[1]->getType()};
- Ops.insert(Ops.begin() + 1, llvm::Constant::getNullValue(IntrinsicTypes[2]));
- break;
- }
- }],
- ManualCodegenMask = [{
- {
- std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
- // maskedoff, op1, mask, vl
- IntrinsicTypes = {ResultType,
- Ops[1]->getType(),
- Ops[3]->getType(),
- Ops[3]->getType()};
- Ops.insert(Ops.begin() + 2, llvm::Constant::getNullValue(IntrinsicTypes[2]));
- break;
- }
- }] in {
- foreach s_p = suffixes_prototypes in {
- def : RVVBuiltin<s_p[0], s_p[1], type_range>;
- }
- }
-}
-
-// 6. Configuration-Setting Instructions
-// 6.1. vsetvli/vsetvl instructions
-let HasVL = false,
- HasMask = false,
- HasSideEffects = true,
- Log2LMUL = [0],
- ManualCodegen = [{IntrinsicTypes = {ResultType};}] in // Set XLEN type
-{
- // vsetvl is a macro because for it require constant integers in SEW and LMUL.
- let HeaderCode =
-[{
-#define vsetvl_e8mf8(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 5)
-#define vsetvl_e8mf4(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 6)
-#define vsetvl_e8mf2(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 7)
-#define vsetvl_e8m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 0)
-#define vsetvl_e8m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 1)
-#define vsetvl_e8m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 2)
-#define vsetvl_e8m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 3)
-
-#define vsetvl_e16mf4(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 6)
-#define vsetvl_e16mf2(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 7)
-#define vsetvl_e16m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 0)
-#define vsetvl_e16m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 1)
-#define vsetvl_e16m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 2)
-#define vsetvl_e16m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 3)
-
-#define vsetvl_e32mf2(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 7)
-#define vsetvl_e32m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 0)
-#define vsetvl_e32m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 1)
-#define vsetvl_e32m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 2)
-#define vsetvl_e32m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 3)
-
-#define vsetvl_e64m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 0)
-#define vsetvl_e64m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 1)
-#define vsetvl_e64m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 2)
-#define vsetvl_e64m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 3)
-
-}] in
- def vsetvli : RVVBuiltin<"", "zzKzKz", "i">;
-
- let HeaderCode =
-[{
-#define vsetvlmax_e8mf8() __builtin_rvv_vsetvlimax(0, 5)
-#define vsetvlmax_e8mf4() __builtin_rvv_vsetvlimax(0, 6)
-#define vsetvlmax_e8mf2() __builtin_rvv_vsetvlimax(0, 7)
-#define vsetvlmax_e8m1() __builtin_rvv_vsetvlimax(0, 0)
-#define vsetvlmax_e8m2() __builtin_rvv_vsetvlimax(0, 1)
-#define vsetvlmax_e8m4() __builtin_rvv_vsetvlimax(0, 2)
-#define vsetvlmax_e8m8() __builtin_rvv_vsetvlimax(0, 3)
-
-#define vsetvlmax_e16mf4() __builtin_rvv_vsetvlimax(1, 6)
-#define vsetvlmax_e16mf2() __builtin_rvv_vsetvlimax(1, 7)
-#define vsetvlmax_e16m1() __builtin_rvv_vsetvlimax(1, 0)
-#define vsetvlmax_e16m2() __builtin_rvv_vsetvlimax(1, 1)
-#define vsetvlmax_e16m4() __builtin_rvv_vsetvlimax(1, 2)
-#define vsetvlmax_e16m8() __builtin_rvv_vsetvlimax(1, 3)
-
-#define vsetvlmax_e32mf2() __builtin_rvv_vsetvlimax(2, 7)
-#define vsetvlmax_e32m1() __builtin_rvv_vsetvlimax(2, 0)
-#define vsetvlmax_e32m2() __builtin_rvv_vsetvlimax(2, 1)
-#define vsetvlmax_e32m4() __builtin_rvv_vsetvlimax(2, 2)
-#define vsetvlmax_e32m8() __builtin_rvv_vsetvlimax(2, 3)
-
-#define vsetvlmax_e64m1() __builtin_rvv_vsetvlimax(3, 0)
-#define vsetvlmax_e64m2() __builtin_rvv_vsetvlimax(3, 1)
-#define vsetvlmax_e64m4() __builtin_rvv_vsetvlimax(3, 2)
-#define vsetvlmax_e64m8() __builtin_rvv_vsetvlimax(3, 3)
-
-}] in
- def vsetvlimax : RVVBuiltin<"", "zKzKz", "i">;
-}
-
-// 7. Vector Loads and Stores
-// 7.4. Vector Unit-Stride Instructions
-def vle1: RVVVLEMaskBuiltin;
-defm vle8: RVVVLEBuiltin<["c"]>;
-defm vle16: RVVVLEBuiltin<["s","x"]>;
-defm vle32: RVVVLEBuiltin<["i","f"]>;
-defm vle64: RVVVLEBuiltin<["l","d"]>;
-
-def vse1 : RVVVSEMaskBuiltin;
-defm vse8 : RVVVSEBuiltin<["c"]>;
-defm vse16: RVVVSEBuiltin<["s","x"]>;
-defm vse32: RVVVSEBuiltin<["i","f"]>;
-defm vse64: RVVVSEBuiltin<["l","d"]>;
-
-// 7.5. Vector Strided Instructions
-defm vlse8: RVVVLSEBuiltin<["c"]>;
-defm vlse16: RVVVLSEBuiltin<["s","x"]>;
-defm vlse32: RVVVLSEBuiltin<["i","f"]>;
-defm vlse64: RVVVLSEBuiltin<["l","d"]>;
-
-defm vsse8 : RVVVSSEBuiltin<["c"]>;
-defm vsse16: RVVVSSEBuiltin<["s","x"]>;
-defm vsse32: RVVVSSEBuiltin<["i","f"]>;
-defm vsse64: RVVVSSEBuiltin<["l","d"]>;
-
-// 7.6. Vector Indexed Instructions
-defm : RVVIndexedLoad<"vluxei">;
-defm : RVVIndexedLoad<"vloxei">;
-
-defm : RVVIndexedStore<"vsuxei">;
-defm : RVVIndexedStore<"vsoxei">;
-
-// 7.7. Unit-stride Fault-Only-First Loads
-defm vle8ff: RVVVLEFFBuiltin<["c"]>;
-defm vle16ff: RVVVLEFFBuiltin<["s","x"]>;
-defm vle32ff: RVVVLEFFBuiltin<["i", "f"]>;
-defm vle64ff: RVVVLEFFBuiltin<["l", "d"]>;
-
// 7.8 Vector Load/Store Segment Instructions
-let RequiredExtension = "Zvlsseg" in {
-defm : RVVUnitStridedSegLoad<"vlseg">;
-defm : RVVUnitStridedSegLoadFF<"vlseg">;
-defm : RVVStridedSegLoad<"vlsseg">;
-defm : RVVIndexedSegLoad<"vluxseg">;
-defm : RVVIndexedSegLoad<"vloxseg">;
-defm : RVVUnitStridedSegStore<"vsseg">;
-defm : RVVStridedSegStore<"vssseg">;
-defm : RVVIndexedSegStore<"vsuxseg">;
-defm : RVVIndexedSegStore<"vsoxseg">;
-}
-
-// 8. Vector AMO Operations
-let RequiredExtension = "Zvamo" in {
-defm vamoswap : RVVAMOBuiltinSet< /* hasSigned */ true, /* hasUnsigned */ true, /* hasFP */ true>;
-defm vamoadd : RVVAMOBuiltinSet< /* hasSigned */ true, /* hasUnsigned */ true>;
-defm vamoxor : RVVAMOBuiltinSet< /* hasSigned */ true, /* hasUnsigned */ true>;
-defm vamoand : RVVAMOBuiltinSet< /* hasSigned */ true, /* hasUnsigned */ true>;
-defm vamoor : RVVAMOBuiltinSet< /* hasSigned */ true, /* hasUnsigned */ true>;
-defm vamomin : RVVAMOBuiltinSet< /* hasSigned */ true>;
-defm vamomax : RVVAMOBuiltinSet< /* hasSigned */ true>;
-defm vamominu : RVVAMOBuiltinSet< /* hasSigned */ false, /* hasUnsigned */ true>;
-defm vamomaxu : RVVAMOBuiltinSet< /* hasSigned */ false, /* hasUnsigned */ true>;
-}
-
-// 12. Vector Integer Arithmetic Instructions
-// 12.1. Vector Single-Width Integer Add and Subtract
+let UnMaskedPolicyScheme = HasPassthruOperand,
+ IsTuple = true in {
+ defm : RVVUnitStridedSegLoadTuple<"vlseg">;
+ defm : RVVUnitStridedSegLoadFFTuple<"vlseg">;
+ defm : RVVStridedSegLoadTuple<"vlsseg">;
+ defm : RVVIndexedSegLoadTuple<"vluxseg">;
+ defm : RVVIndexedSegLoadTuple<"vloxseg">;
+}
+
+let UnMaskedPolicyScheme = NonePolicy,
+ MaskedPolicyScheme = NonePolicy,
+ IsTuple = true in {
+defm : RVVUnitStridedSegStoreTuple<"vsseg">;
+defm : RVVStridedSegStoreTuple<"vssseg">;
+defm : RVVIndexedSegStoreTuple<"vsuxseg">;
+defm : RVVIndexedSegStoreTuple<"vsoxseg">;
+}
+
+// 11. Vector Integer Arithmetic Instructions
+// 11.1. Vector Single-Width Integer Add and Subtract
+let UnMaskedPolicyScheme = HasPassthruOperand in {
defm vadd : RVVIntBinBuiltinSet;
defm vsub : RVVIntBinBuiltinSet;
defm vrsub : RVVOutOp1BuiltinSet<"vrsub", "csil",
[["vx", "v", "vve"],
["vx", "Uv", "UvUvUe"]]>;
+}
defm vneg_v : RVVPseudoUnaryBuiltin<"vrsub", "csil">;
-// 12.2. Vector Widening Integer Add/Subtract
+// 11.2. Vector Widening Integer Add/Subtract
// Widening unsigned integer add/subtract, 2*SEW = SEW +/- SEW
+let UnMaskedPolicyScheme = HasPassthruOperand in {
defm vwaddu : RVVUnsignedWidenBinBuiltinSet;
defm vwsubu : RVVUnsignedWidenBinBuiltinSet;
// Widening signed integer add/subtract, 2*SEW = SEW +/- SEW
@@ -1576,12 +1188,14 @@ defm vwsubu : RVVUnsignedWidenOp0BinBuiltinSet;
// Widening signed integer add/subtract, 2*SEW = 2*SEW +/- SEW
defm vwadd : RVVSignedWidenOp0BinBuiltinSet;
defm vwsub : RVVSignedWidenOp0BinBuiltinSet;
+}
defm vwcvtu_x_x_v : RVVPseudoVWCVTBuiltin<"vwaddu", "vwcvtu_x", "csi",
[["Uw", "UwUv"]]>;
defm vwcvt_x_x_v : RVVPseudoVWCVTBuiltin<"vwadd", "vwcvt_x", "csi",
[["w", "wv"]]>;
-// 12.3. Vector Integer Extension
+// 11.3. Vector Integer Extension
+let UnMaskedPolicyScheme = HasPassthruOperand in {
let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
def vsext_vf2 : RVVIntExt<"vsext", "w", "wv", "csi">;
def vzext_vf2 : RVVIntExt<"vzext", "Uw", "UwUv", "csi">;
@@ -1594,36 +1208,45 @@ let Log2LMUL = [-3, -2, -1, 0] in {
def vsext_vf8 : RVVIntExt<"vsext", "o", "ov", "c">;
def vzext_vf8 : RVVIntExt<"vzext", "Uo", "UoUv", "c">;
}
+}
-// 12.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
-let HasMask = false in {
- defm vadc : RVVCarryinBuiltinSet;
+// 11.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
+let HasMasked = false, MaskedPolicyScheme = NonePolicy in {
+ let UnMaskedPolicyScheme = HasPassthruOperand in {
+ defm vadc : RVVCarryinBuiltinSet;
+ defm vsbc : RVVCarryinBuiltinSet;
+ }
defm vmadc : RVVCarryOutInBuiltinSet<"vmadc_carry_in">;
defm vmadc : RVVIntMaskOutBuiltinSet;
- defm vsbc : RVVCarryinBuiltinSet;
defm vmsbc : RVVCarryOutInBuiltinSet<"vmsbc_borrow_in">;
defm vmsbc : RVVIntMaskOutBuiltinSet;
}
-// 12.5. Vector Bitwise Logical Instructions
+// 11.5. Vector Bitwise Logical Instructions
+let UnMaskedPolicyScheme = HasPassthruOperand in {
defm vand : RVVIntBinBuiltinSet;
defm vxor : RVVIntBinBuiltinSet;
defm vor : RVVIntBinBuiltinSet;
+}
defm vnot_v : RVVPseudoVNotBuiltin<"vxor", "csil">;
-// 12.6. Vector Single-Width Bit Shift Instructions
+// 11.6. Vector Single-Width Shift Instructions
+let UnMaskedPolicyScheme = HasPassthruOperand in {
defm vsll : RVVShiftBuiltinSet;
defm vsrl : RVVUnsignedShiftBuiltinSet;
defm vsra : RVVSignedShiftBuiltinSet;
-// 12.7. Vector Narrowing Integer Right Shift Instructions
+// 11.7. Vector Narrowing Integer Right Shift Instructions
defm vnsrl : RVVUnsignedNShiftBuiltinSet;
defm vnsra : RVVSignedNShiftBuiltinSet;
+}
defm vncvt_x_x_w : RVVPseudoVNCVTBuiltin<"vnsrl", "vncvt_x", "csi",
[["v", "vw"],
["Uv", "UvUw"]]>;
-// 12.8. Vector Integer Comparison Instructions
+// 11.8. Vector Integer Compare Instructions
+let MaskedPolicyScheme = HasPassthruOperand,
+ HasTailPolicy = false in {
defm vmseq : RVVIntMaskOutBuiltinSet;
defm vmsne : RVVIntMaskOutBuiltinSet;
defm vmsltu : RVVUnsignedMaskOutBuiltinSet;
@@ -1634,14 +1257,16 @@ defm vmsgtu : RVVUnsignedMaskOutBuiltinSet;
defm vmsgt : RVVSignedMaskOutBuiltinSet;
defm vmsgeu : RVVUnsignedMaskOutBuiltinSet;
defm vmsge : RVVSignedMaskOutBuiltinSet;
+}
-// 12.9. Vector Integer Min/Max Instructions
+// 11.9. Vector Integer Min/Max Instructions
+let UnMaskedPolicyScheme = HasPassthruOperand in {
defm vminu : RVVUnsignedBinBuiltinSet;
defm vmin : RVVSignedBinBuiltinSet;
defm vmaxu : RVVUnsignedBinBuiltinSet;
defm vmax : RVVSignedBinBuiltinSet;
-// 12.10. Vector Single-Width Integer Multiply Instructions
+// 11.10. Vector Single-Width Integer Multiply Instructions
defm vmul : RVVIntBinBuiltinSet;
defm vmulh : RVVSignedBinBuiltinSet;
defm vmulhu : RVVUnsignedBinBuiltinSet;
@@ -1649,14 +1274,15 @@ defm vmulhsu : RVVOutOp1BuiltinSet<"vmulhsu", "csil",
[["vv", "v", "vvUv"],
["vx", "v", "vvUe"]]>;
-// 12.11. Vector Integer Divide Instructions
+// 11.11. Vector Integer Divide Instructions
defm vdivu : RVVUnsignedBinBuiltinSet;
defm vdiv : RVVSignedBinBuiltinSet;
defm vremu : RVVUnsignedBinBuiltinSet;
defm vrem : RVVSignedBinBuiltinSet;
+}
-// 12.12. Vector Widening Integer Multiply Instructions
-let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
+// 11.12. Vector Widening Integer Multiply Instructions
+let Log2LMUL = [-3, -2, -1, 0, 1, 2], UnMaskedPolicyScheme = HasPassthruOperand in {
defm vwmul : RVVOutOp0Op1BuiltinSet<"vwmul", "csi",
[["vv", "w", "wvv"],
["vx", "w", "wve"]]>;
@@ -1668,13 +1294,14 @@ defm vwmulsu : RVVOutOp0Op1BuiltinSet<"vwmulsu", "csi",
["vx", "w", "wvUe"]]>;
}
-// 12.13. Vector Single-Width Integer Multiply-Add Instructions
+// 11.13. Vector Single-Width Integer Multiply-Add Instructions
+let UnMaskedPolicyScheme = HasPolicyOperand in {
defm vmacc : RVVIntTerBuiltinSet;
defm vnmsac : RVVIntTerBuiltinSet;
defm vmadd : RVVIntTerBuiltinSet;
defm vnmsub : RVVIntTerBuiltinSet;
-// 12.14. Vector Widening Integer Multiply-Add Instructions
+// 11.14. Vector Widening Integer Multiply-Add Instructions
let HasMaskedOffOperand = false,
Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
defm vwmaccu : RVVOutOp1Op2BuiltinSet<"vwmaccu", "csi",
@@ -1689,184 +1316,713 @@ defm vwmaccsu : RVVOutOp1Op2BuiltinSet<"vwmaccsu", "csi",
defm vwmaccus : RVVOutOp1Op2BuiltinSet<"vwmaccus", "csi",
[["vx", "w", "wwUev"]]>;
}
+}
-// 12.15. Vector Integer Merge Instructions
-// C/C++ Operand: (mask, op1, op2, vl), Intrinsic: (op1, op2, mask, vl)
-let HasMask = false,
+// 11.15. Vector Integer Merge Instructions
+// C/C++ Operand: (mask, op1, op2, vl), Intrinsic: (passthru, op1, op2, mask, vl)
+let HasMasked = false,
+ UnMaskedPolicyScheme = HasPassthruOperand,
+ MaskedPolicyScheme = NonePolicy,
ManualCodegen = [{
- std::rotate(Ops.begin(), Ops.begin() + 1, Ops.begin() + 3);
- IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[3]->getType()};
+ // insert poison passthru
+ if (PolicyAttrs & RVV_VTA)
+ Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
+ IntrinsicTypes = {ResultType, Ops[2]->getType(), Ops.back()->getType()};
}] in {
defm vmerge : RVVOutOp1BuiltinSet<"vmerge", "csil",
- [["vvm", "v", "vmvv"],
- ["vxm", "v", "vmve"],
- ["vvm", "Uv", "UvmUvUv"],
- ["vxm", "Uv", "UvmUvUe"]]>;
+ [["vvm", "v", "vvvm"],
+ ["vxm", "v", "vvem"],
+ ["vvm", "Uv", "UvUvUvm"],
+ ["vxm", "Uv", "UvUvUem"]]>;
}
-// 12.16. Vector Integer Move Instructions
-let HasMask = false in {
- let MangledName = "vmv_v" in {
+// 11.16. Vector Integer Move Instructions
+let HasMasked = false,
+ UnMaskedPolicyScheme = HasPassthruOperand,
+ MaskedPolicyScheme = NonePolicy,
+ OverloadedName = "vmv_v" in {
defm vmv_v : RVVOutBuiltinSet<"vmv_v_v", "csil",
[["v", "Uv", "UvUv"]]>;
- defm vmv_v : RVVOutBuiltinSet<"vmv_v_v", "csilxfd",
+ defm vmv_v : RVVOutBuiltinSet<"vmv_v_v", "csilfd",
[["v", "v", "vv"]]>;
- }
- let HasNoMaskedOverloaded = false in
+ let RequiredFeatures = ["Zvfhmin"] in
+ defm vmv_v : RVVOutBuiltinSet<"vmv_v_v", "x",
+ [["v", "v", "vv"]]>;
+ let SupportOverloading = false in
defm vmv_v : RVVOutBuiltinSet<"vmv_v_x", "csil",
[["x", "v", "ve"],
["x", "Uv", "UvUe"]]>;
}
-// 13. Vector Fixed-Point Arithmetic Instructions
-// 13.1. Vector Single-Width Saturating Add and Subtract
+// 12. Vector Fixed-Point Arithmetic Instructions
+let HeaderCode =
+[{
+enum __RISCV_VXRM {
+ __RISCV_VXRM_RNU = 0,
+ __RISCV_VXRM_RNE = 1,
+ __RISCV_VXRM_RDN = 2,
+ __RISCV_VXRM_ROD = 3,
+};
+}] in
+def vxrm_enum : RVVHeader;
+
+// 12.1. Vector Single-Width Saturating Add and Subtract
+let UnMaskedPolicyScheme = HasPassthruOperand in {
defm vsaddu : RVVUnsignedBinBuiltinSet;
defm vsadd : RVVSignedBinBuiltinSet;
defm vssubu : RVVUnsignedBinBuiltinSet;
defm vssub : RVVSignedBinBuiltinSet;
-// 13.2. Vector Single-Width Averaging Add and Subtract
-defm vaaddu : RVVUnsignedBinBuiltinSet;
-defm vaadd : RVVSignedBinBuiltinSet;
-defm vasubu : RVVUnsignedBinBuiltinSet;
-defm vasub : RVVSignedBinBuiltinSet;
-
-// 13.3. Vector Single-Width Fractional Multiply with Rounding and Saturation
-defm vsmul : RVVSignedBinBuiltinSet;
-
-// 13.4. Vector Single-Width Scaling Shift Instructions
-defm vssrl : RVVUnsignedShiftBuiltinSet;
-defm vssra : RVVSignedShiftBuiltinSet;
-
-// 13.5. Vector Narrowing Fixed-Point Clip Instructions
-defm vnclipu : RVVUnsignedNShiftBuiltinSet;
-defm vnclip : RVVSignedNShiftBuiltinSet;
-
-// 14. Vector Floating-Point Instructions
-// 14.2. Vector Single-Width Floating-Point Add/Subtract Instructions
-defm vfadd : RVVFloatingBinBuiltinSet;
-defm vfsub : RVVFloatingBinBuiltinSet;
-defm vfrsub : RVVFloatingBinVFBuiltinSet;
-
-// 14.3. Vector Widening Floating-Point Add/Subtract Instructions
-// Widening FP add/subtract, 2*SEW = SEW +/- SEW
-defm vfwadd : RVVFloatingWidenBinBuiltinSet;
-defm vfwsub : RVVFloatingWidenBinBuiltinSet;
-// Widening FP add/subtract, 2*SEW = 2*SEW +/- SEW
-defm vfwadd : RVVFloatingWidenOp0BinBuiltinSet;
-defm vfwsub : RVVFloatingWidenOp0BinBuiltinSet;
-
-// 14.4. Vector Single-Width Floating-Point Multiply/Divide Instructions
-defm vfmul : RVVFloatingBinBuiltinSet;
-defm vfdiv : RVVFloatingBinBuiltinSet;
-defm vfrdiv : RVVFloatingBinVFBuiltinSet;
-
-// 14.5. Vector Widening Floating-Point Multiply
-let Log2LMUL = [-2, -1, 0, 1, 2] in {
- defm vfwmul : RVVOutOp0Op1BuiltinSet<"vfwmul", "xf",
- [["vv", "w", "wvv"],
- ["vf", "w", "wve"]]>;
-}
-
-// 14.6. Vector Single-Width Floating-Point Fused Multiply-Add Instructions
-defm vfmacc : RVVFloatingTerBuiltinSet;
-defm vfnmacc : RVVFloatingTerBuiltinSet;
-defm vfmsac : RVVFloatingTerBuiltinSet;
-defm vfnmsac : RVVFloatingTerBuiltinSet;
-defm vfmadd : RVVFloatingTerBuiltinSet;
-defm vfnmadd : RVVFloatingTerBuiltinSet;
-defm vfmsub : RVVFloatingTerBuiltinSet;
-defm vfnmsub : RVVFloatingTerBuiltinSet;
-
-// 14.7. Vector Widening Floating-Point Fused Multiply-Add Instructions
-defm vfwmacc : RVVFloatingWidenTerBuiltinSet;
-defm vfwnmacc : RVVFloatingWidenTerBuiltinSet;
-defm vfwmsac : RVVFloatingWidenTerBuiltinSet;
-defm vfwnmsac : RVVFloatingWidenTerBuiltinSet;
-
-// 14.8. Vector Floating-Point Square-Root Instruction
-def vfsqrt : RVVFloatingUnaryVVBuiltin;
-
-// 14.9. Vector Floating-Point Reciprocal Square-Root Estimate Instruction
-def vfrsqrt7 : RVVFloatingUnaryVVBuiltin;
+let ManualCodegen = [{
+ {
+ // LLVM intrinsic
+ // Unmasked: (passthru, op0, op1, round_mode, vl)
+ // Masked: (passthru, vector_in, vector_in/scalar_in, mask, vxrm, vl, policy)
+
+ SmallVector<llvm::Value*, 7> Operands;
+ bool HasMaskedOff = !(
+ (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
+ (!IsMasked && PolicyAttrs & RVV_VTA));
+ unsigned Offset = IsMasked ?
+ (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0);
+
+ if (!HasMaskedOff)
+ Operands.push_back(llvm::PoisonValue::get(ResultType));
+ else
+ Operands.push_back(Ops[IsMasked ? 1 : 0]);
+
+ Operands.push_back(Ops[Offset]); // op0
+ Operands.push_back(Ops[Offset + 1]); // op1
+
+ if (IsMasked)
+ Operands.push_back(Ops[0]); // mask
+
+ Operands.push_back(Ops[Offset + 2]); // vxrm
+ Operands.push_back(Ops[Offset + 3]); // vl
+
+ if (IsMasked)
+ Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+
+ IntrinsicTypes = {ResultType, Ops[Offset + 1]->getType(), Ops.back()->getType()};
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ return Builder.CreateCall(F, Operands, "");
+ }
+}] in {
+ // 12.2. Vector Single-Width Averaging Add and Subtract
+ defm vaaddu : RVVUnsignedBinBuiltinSetRoundingMode;
+ defm vaadd : RVVSignedBinBuiltinSetRoundingMode;
+ defm vasubu : RVVUnsignedBinBuiltinSetRoundingMode;
+ defm vasub : RVVSignedBinBuiltinSetRoundingMode;
+
+ // 12.3. Vector Single-Width Fractional Multiply with Rounding and Saturation
+ defm vsmul : RVVSignedBinBuiltinSetRoundingMode;
+
+ // 12.4. Vector Single-Width Scaling Shift Instructions
+ defm vssrl : RVVUnsignedShiftBuiltinSetRoundingMode;
+ defm vssra : RVVSignedShiftBuiltinSetRoundingMode;
+}
+
+let ManualCodegen = [{
+ {
+ // LLVM intrinsic
+ // Unmasked: (passthru, op0, op1, round_mode, vl)
+ // Masked: (passthru, vector_in, vector_in/scalar_in, mask, vxrm, vl, policy)
+
+ SmallVector<llvm::Value*, 7> Operands;
+ bool HasMaskedOff = !(
+ (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
+ (!IsMasked && PolicyAttrs & RVV_VTA));
+ unsigned Offset = IsMasked ?
+ (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0);
+
+ if (!HasMaskedOff)
+ Operands.push_back(llvm::PoisonValue::get(ResultType));
+ else
+ Operands.push_back(Ops[IsMasked ? 1 : 0]);
+
+ Operands.push_back(Ops[Offset]); // op0
+ Operands.push_back(Ops[Offset + 1]); // op1
+
+ if (IsMasked)
+ Operands.push_back(Ops[0]); // mask
+
+ Operands.push_back(Ops[Offset + 2]); // vxrm
+ Operands.push_back(Ops[Offset + 3]); // vl
+
+ if (IsMasked)
+ Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+
+ IntrinsicTypes = {ResultType, Ops[Offset]->getType(), Ops[Offset + 1]->getType(),
+ Ops.back()->getType()};
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ return Builder.CreateCall(F, Operands, "");
+ }
+}] in {
+ // 12.5. Vector Narrowing Fixed-Point Clip Instructions
+ defm vnclipu : RVVUnsignedNShiftBuiltinSetRoundingMode;
+ defm vnclip : RVVSignedNShiftBuiltinSetRoundingMode;
+}
+}
+
+// 13. Vector Floating-Point Instructions
+let HeaderCode =
+[{
+enum __RISCV_FRM {
+ __RISCV_FRM_RNE = 0,
+ __RISCV_FRM_RTZ = 1,
+ __RISCV_FRM_RDN = 2,
+ __RISCV_FRM_RUP = 3,
+ __RISCV_FRM_RMM = 4,
+};
+}] in def frm_enum : RVVHeader;
+
+let UnMaskedPolicyScheme = HasPassthruOperand in {
+let ManualCodegen = [{
+ {
+ // LLVM intrinsic
+ // Unmasked: (passthru, op0, op1, round_mode, vl)
+ // Masked: (passthru, vector_in, vector_in/scalar_in, mask, frm, vl, policy)
+
+ SmallVector<llvm::Value*, 7> Operands;
+ bool HasMaskedOff = !(
+ (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
+ (!IsMasked && PolicyAttrs & RVV_VTA));
+ bool HasRoundModeOp = IsMasked ?
+ (HasMaskedOff ? Ops.size() == 6 : Ops.size() == 5) :
+ (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4);
+
+ unsigned Offset = IsMasked ?
+ (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0);
+
+ if (!HasMaskedOff)
+ Operands.push_back(llvm::PoisonValue::get(ResultType));
+ else
+ Operands.push_back(Ops[IsMasked ? 1 : 0]);
+
+ Operands.push_back(Ops[Offset]); // op0
+ Operands.push_back(Ops[Offset + 1]); // op1
+
+ if (IsMasked)
+ Operands.push_back(Ops[0]); // mask
+
+ if (HasRoundModeOp) {
+ Operands.push_back(Ops[Offset + 2]); // frm
+ Operands.push_back(Ops[Offset + 3]); // vl
+ } else {
+ Operands.push_back(ConstantInt::get(Ops[Offset + 2]->getType(), 7)); // frm
+ Operands.push_back(Ops[Offset + 2]); // vl
+ }
+
+ if (IsMasked)
+ Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+
+ IntrinsicTypes = {ResultType, Ops[Offset + 1]->getType(),
+ Operands.back()->getType()};
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ return Builder.CreateCall(F, Operands, "");
+ }
+}] in {
+ let HasFRMRoundModeOp = true in {
+ // 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions
+ defm vfadd : RVVFloatingBinBuiltinSetRoundingMode;
+ defm vfsub : RVVFloatingBinBuiltinSetRoundingMode;
+ defm vfrsub : RVVFloatingBinVFBuiltinSetRoundingMode;
+
+ // 13.3. Vector Widening Floating-Point Add/Subtract Instructions
+ // Widening FP add/subtract, 2*SEW = 2*SEW +/- SEW
+ defm vfwadd : RVVFloatingWidenOp0BinBuiltinSetRoundingMode;
+ defm vfwsub : RVVFloatingWidenOp0BinBuiltinSetRoundingMode;
+
+ // 13.4. Vector Single-Width Floating-Point Multiply/Divide Instructions
+ defm vfmul : RVVFloatingBinBuiltinSetRoundingMode;
+ defm vfdiv : RVVFloatingBinBuiltinSetRoundingMode;
+ defm vfrdiv : RVVFloatingBinVFBuiltinSetRoundingMode;
+ }
+ // 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions
+ defm vfadd : RVVFloatingBinBuiltinSet;
+ defm vfsub : RVVFloatingBinBuiltinSet;
+ defm vfrsub : RVVFloatingBinVFBuiltinSet;
+
+ // 13.3. Vector Widening Floating-Point Add/Subtract Instructions
+ // Widening FP add/subtract, 2*SEW = 2*SEW +/- SEW
+ defm vfwadd : RVVFloatingWidenOp0BinBuiltinSet;
+ defm vfwsub : RVVFloatingWidenOp0BinBuiltinSet;
+
+ // 13.4. Vector Single-Width Floating-Point Multiply/Divide Instructions
+ defm vfmul : RVVFloatingBinBuiltinSet;
+ defm vfdiv : RVVFloatingBinBuiltinSet;
+ defm vfrdiv : RVVFloatingBinVFBuiltinSet;
+}
+
+let ManualCodegen = [{
+ {
+ // LLVM intrinsic
+ // Unmasked: (passthru, op0, op1, round_mode, vl)
+ // Masked: (passthru, vector_in, vector_in/scalar_in, mask, frm, vl, policy)
+
+ SmallVector<llvm::Value*, 7> Operands;
+ bool HasMaskedOff = !(
+ (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
+ (!IsMasked && PolicyAttrs & RVV_VTA));
+ bool HasRoundModeOp = IsMasked ?
+ (HasMaskedOff ? Ops.size() == 6 : Ops.size() == 5) :
+ (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4);
+
+ unsigned Offset = IsMasked ?
+ (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0);
+
+ if (!HasMaskedOff)
+ Operands.push_back(llvm::PoisonValue::get(ResultType));
+ else
+ Operands.push_back(Ops[IsMasked ? 1 : 0]);
+
+ Operands.push_back(Ops[Offset]); // op0
+ Operands.push_back(Ops[Offset + 1]); // op1
+
+ if (IsMasked)
+ Operands.push_back(Ops[0]); // mask
+
+ if (HasRoundModeOp) {
+ Operands.push_back(Ops[Offset + 2]); // frm
+ Operands.push_back(Ops[Offset + 3]); // vl
+ } else {
+ Operands.push_back(ConstantInt::get(Ops[Offset + 2]->getType(), 7)); // frm
+ Operands.push_back(Ops[Offset + 2]); // vl
+ }
+
+ if (IsMasked)
+ Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+
+ IntrinsicTypes = {ResultType, Ops[Offset]->getType(), Ops[Offset + 1]->getType(),
+ Ops.back()->getType()};
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ return Builder.CreateCall(F, Operands, "");
+ }
+}] in {
+ let HasFRMRoundModeOp = true in {
+ // 13.3. Vector Widening Floating-Point Add/Subtract Instructions
+ // Widening FP add/subtract, 2*SEW = SEW +/- SEW
+ defm vfwadd : RVVFloatingWidenBinBuiltinSetRoundingMode;
+ defm vfwsub : RVVFloatingWidenBinBuiltinSetRoundingMode;
+
+ // 13.5. Vector Widening Floating-Point Multiply
+ let Log2LMUL = [-2, -1, 0, 1, 2] in {
+ defm vfwmul : RVVOutOp0Op1BuiltinSet<"vfwmul", "xf",
+ [["vv", "w", "wvvu"],
+ ["vf", "w", "wveu"]]>;
+ }
+ }
+ // 13.3. Vector Widening Floating-Point Add/Subtract Instructions
+ // Widening FP add/subtract, 2*SEW = SEW +/- SEW
+ defm vfwadd : RVVFloatingWidenBinBuiltinSet;
+ defm vfwsub : RVVFloatingWidenBinBuiltinSet;
+
+ // 13.5. Vector Widening Floating-Point Multiply
+ let Log2LMUL = [-2, -1, 0, 1, 2] in {
+ defm vfwmul : RVVOutOp0Op1BuiltinSet<"vfwmul", "xf",
+ [["vv", "w", "wvv"],
+ ["vf", "w", "wve"]]>;
+ }
+}
+}
+
+
+let UnMaskedPolicyScheme = HasPolicyOperand in {
+let ManualCodegen = [{
+ {
+ // LLVM intrinsic
+ // Unmasked: (passthru, op0, op1, round_mode, vl)
+ // Masked: (passthru, vector_in, vector_in/scalar_in, mask, frm, vl, policy)
+
+ SmallVector<llvm::Value*, 7> Operands;
+ bool HasRoundModeOp = IsMasked ? Ops.size() == 6 : Ops.size() == 5;
+
+ unsigned Offset = IsMasked ? 2 : 1;
+
+ Operands.push_back(Ops[IsMasked ? 1 : 0]); // passthrough
+
+ Operands.push_back(Ops[Offset]); // op0
+ Operands.push_back(Ops[Offset + 1]); // op1
+
+ if (IsMasked)
+ Operands.push_back(Ops[0]); // mask
+
+ if (HasRoundModeOp) {
+ Operands.push_back(Ops[Offset + 2]); // frm
+ Operands.push_back(Ops[Offset + 3]); // vl
+ } else {
+ Operands.push_back(ConstantInt::get(Ops[Offset + 2]->getType(), 7)); // frm
+ Operands.push_back(Ops[Offset + 2]); // vl
+ }
+
+ Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+
+ IntrinsicTypes = {ResultType, Ops[Offset]->getType(),
+ Operands.back()->getType()};
+
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+
+ return Builder.CreateCall(F, Operands, "");
+ }
+}] in {
+ let HasFRMRoundModeOp = 1 in {
+ // 13.6. Vector Single-Width Floating-Point Fused Multiply-Add Instructions
+ defm vfmacc : RVVFloatingTerBuiltinSetRoundingMode;
+ defm vfnmacc : RVVFloatingTerBuiltinSetRoundingMode;
+ defm vfmsac : RVVFloatingTerBuiltinSetRoundingMode;
+ defm vfnmsac : RVVFloatingTerBuiltinSetRoundingMode;
+ defm vfmadd : RVVFloatingTerBuiltinSetRoundingMode;
+ defm vfnmadd : RVVFloatingTerBuiltinSetRoundingMode;
+ defm vfmsub : RVVFloatingTerBuiltinSetRoundingMode;
+ defm vfnmsub : RVVFloatingTerBuiltinSetRoundingMode;
+ }
+ // 13.6. Vector Single-Width Floating-Point Fused Multiply-Add Instructions
+ defm vfmacc : RVVFloatingTerBuiltinSet;
+ defm vfnmacc : RVVFloatingTerBuiltinSet;
+ defm vfmsac : RVVFloatingTerBuiltinSet;
+ defm vfnmsac : RVVFloatingTerBuiltinSet;
+ defm vfmadd : RVVFloatingTerBuiltinSet;
+ defm vfnmadd : RVVFloatingTerBuiltinSet;
+ defm vfmsub : RVVFloatingTerBuiltinSet;
+ defm vfnmsub : RVVFloatingTerBuiltinSet;
+}
+
+let ManualCodegen = [{
+ {
+ // LLVM intrinsic
+ // Unmasked: (passthru, op0, op1, round_mode, vl)
+ // Masked: (passthru, vector_in, vector_in/scalar_in, mask, frm, vl, policy)
+
+ SmallVector<llvm::Value*, 7> Operands;
+ bool HasRoundModeOp = IsMasked ? Ops.size() == 6 : Ops.size() == 5;
+
+ unsigned Offset = IsMasked ? 2 : 1;
+
+ Operands.push_back(Ops[IsMasked ? 1 : 0]); // passthrough
+
+ Operands.push_back(Ops[Offset]); // op0
+ Operands.push_back(Ops[Offset + 1]); // op1
+
+ if (IsMasked)
+ Operands.push_back(Ops[0]); // mask
+
+ if (HasRoundModeOp) {
+ Operands.push_back(Ops[Offset + 2]); // frm
+ Operands.push_back(Ops[Offset + 3]); // vl
+ } else {
+ Operands.push_back(ConstantInt::get(Ops[Offset + 2]->getType(), 7)); // frm
+ Operands.push_back(Ops[Offset + 2]); // vl
+ }
+
+ Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+
+ IntrinsicTypes = {ResultType, Ops[Offset]->getType(), Ops[Offset + 1]->getType(),
+ Operands.back()->getType()};
+
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+
+ return Builder.CreateCall(F, Operands, "");
+ }
+}] in {
+ let HasFRMRoundModeOp = 1 in {
+ // 13.7. Vector Widening Floating-Point Fused Multiply-Add Instructions
+ defm vfwmacc : RVVFloatingWidenTerBuiltinSetRoundingMode;
+ defm vfwnmacc : RVVFloatingWidenTerBuiltinSetRoundingMode;
+ defm vfwmsac : RVVFloatingWidenTerBuiltinSetRoundingMode;
+ defm vfwnmsac : RVVFloatingWidenTerBuiltinSetRoundingMode;
+ }
+ // 13.7. Vector Widening Floating-Point Fused Multiply-Add Instructions
+ defm vfwmacc : RVVFloatingWidenTerBuiltinSet;
+ defm vfwnmacc : RVVFloatingWidenTerBuiltinSet;
+ defm vfwmsac : RVVFloatingWidenTerBuiltinSet;
+ defm vfwnmsac : RVVFloatingWidenTerBuiltinSet;
+}
+
+}
+
+let UnMaskedPolicyScheme = HasPassthruOperand in {
+let ManualCodegen = [{
+ {
+ // LLVM intrinsic
+ // Unmasked: (passthru, op0, round_mode, vl)
+ // Masked: (passthru, op0, mask, frm, vl, policy)
+
+ SmallVector<llvm::Value*, 7> Operands;
+ bool HasMaskedOff = !(
+ (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
+ (!IsMasked && PolicyAttrs & RVV_VTA));
+ bool HasRoundModeOp = IsMasked ?
+ (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4) :
+ (HasMaskedOff ? Ops.size() == 4 : Ops.size() == 3);
+
+ unsigned Offset = IsMasked ?
+ (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0);
+
+ if (!HasMaskedOff)
+ Operands.push_back(llvm::PoisonValue::get(ResultType));
+ else
+ Operands.push_back(Ops[IsMasked ? 1 : 0]);
+
+ Operands.push_back(Ops[Offset]); // op0
-// 14.10. Vector Floating-Point Reciprocal Estimate Instruction
-def vfrec7 : RVVFloatingUnaryVVBuiltin;
+ if (IsMasked)
+ Operands.push_back(Ops[0]); // mask
-// 14.11. Vector Floating-Point MIN/MAX Instructions
+ if (HasRoundModeOp) {
+ Operands.push_back(Ops[Offset + 1]); // frm
+ Operands.push_back(Ops[Offset + 2]); // vl
+ } else {
+ Operands.push_back(ConstantInt::get(Ops[Offset + 1]->getType(), 7)); // frm
+ Operands.push_back(Ops[Offset + 1]); // vl
+ }
+
+ if (IsMasked)
+ Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+
+ IntrinsicTypes = {ResultType, Operands.back()->getType()};
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ return Builder.CreateCall(F, Operands, "");
+ }
+}] in {
+ let HasFRMRoundModeOp = 1 in {
+ // 13.8. Vector Floating-Point Square-Root Instruction
+ defm vfsqrt : RVVOutBuiltinSet<"vfsqrt", "xfd", [["v", "v", "vvu"]]>;
+
+ // 13.10. Vector Floating-Point Reciprocal Estimate Instruction
+ defm vfrec7 : RVVOutBuiltinSet<"vfrec7", "xfd", [["v", "v", "vvu"]]>;
+ }
+ // 13.8. Vector Floating-Point Square-Root Instruction
+ defm vfsqrt : RVVOutBuiltinSet<"vfsqrt", "xfd", [["v", "v", "vv"]]>;
+
+ // 13.10. Vector Floating-Point Reciprocal Estimate Instruction
+ defm vfrec7 : RVVOutBuiltinSet<"vfrec7", "xfd", [["v", "v", "vv"]]>;
+}
+
+// 13.9. Vector Floating-Point Reciprocal Square-Root Estimate Instruction
+def vfrsqrt7 : RVVFloatingUnaryVVBuiltin;
+
+// 13.11. Vector Floating-Point MIN/MAX Instructions
defm vfmin : RVVFloatingBinBuiltinSet;
defm vfmax : RVVFloatingBinBuiltinSet;
-// 14.12. Vector Floating-Point Sign-Injection Instructions
+// 13.12. Vector Floating-Point Sign-Injection Instructions
defm vfsgnj : RVVFloatingBinBuiltinSet;
defm vfsgnjn : RVVFloatingBinBuiltinSet;
defm vfsgnjx : RVVFloatingBinBuiltinSet;
+}
defm vfneg_v : RVVPseudoVFUnaryBuiltin<"vfsgnjn", "xfd">;
defm vfabs_v : RVVPseudoVFUnaryBuiltin<"vfsgnjx", "xfd">;
-// 14.13. Vector Floating-Point Compare Instructions
+// 13.13. Vector Floating-Point Compare Instructions
+let MaskedPolicyScheme = HasPassthruOperand,
+ HasTailPolicy = false in {
defm vmfeq : RVVFloatingMaskOutBuiltinSet;
defm vmfne : RVVFloatingMaskOutBuiltinSet;
defm vmflt : RVVFloatingMaskOutBuiltinSet;
defm vmfle : RVVFloatingMaskOutBuiltinSet;
defm vmfgt : RVVFloatingMaskOutBuiltinSet;
defm vmfge : RVVFloatingMaskOutBuiltinSet;
+}
-// 14.14. Vector Floating-Point Classify Instruction
-let Name = "vfclass_v" in
+// 13.14. Vector Floating-Point Classify Instruction
+let Name = "vfclass_v", UnMaskedPolicyScheme = HasPassthruOperand in
def vfclass : RVVOp0Builtin<"Uv", "Uvv", "xfd">;
-// 14.15. Vector Floating-Point Merge Instructio
+// 13.15. Vector Floating-Point Merge Instruction
// C/C++ Operand: (mask, op1, op2, vl), Builtin: (op1, op2, mask, vl)
-let HasMask = false,
+let HasMasked = false,
+ UnMaskedPolicyScheme = HasPassthruOperand,
+ MaskedPolicyScheme = NonePolicy,
ManualCodegen = [{
- std::rotate(Ops.begin(), Ops.begin() + 1, Ops.begin() + 3);
- IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[3]->getType()};
+ // insert poison passthru
+ if (PolicyAttrs & RVV_VTA)
+ Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
+ IntrinsicTypes = {ResultType, Ops[2]->getType(), Ops.back()->getType()};
}] in {
- defm vmerge : RVVOutOp1BuiltinSet<"vfmerge", "xfd",
- [["vvm", "v", "vmvv"]]>;
+ defm vmerge : RVVOutOp1BuiltinSet<"vmerge", "fd",
+ [["vvm", "v", "vvvm"]]>;
+ let RequiredFeatures = ["Zvfhmin"] in
+ defm vmerge : RVVOutOp1BuiltinSet<"vmerge", "x",
+ [["vvm", "v", "vvvm"]]>;
defm vfmerge : RVVOutOp1BuiltinSet<"vfmerge", "xfd",
- [["vfm", "v", "vmve"]]>;
+ [["vfm", "v", "vvem"]]>;
}
-// 14.16. Vector Floating-Point Move Instruction
-let HasMask = false, HasNoMaskedOverloaded = false in
+// 13.16. Vector Floating-Point Move Instruction
+let HasMasked = false,
+ UnMaskedPolicyScheme = HasPassthruOperand,
+ SupportOverloading = false,
+ MaskedPolicyScheme = NonePolicy,
+ OverloadedName = "vfmv_v" in
defm vfmv_v : RVVOutBuiltinSet<"vfmv_v_f", "xfd",
[["f", "v", "ve"]]>;
-// 14.17. Single-Width Floating-Point/Integer Type-Convert Instructions
-def vfcvt_xu_f_v : RVVConvToUnsignedBuiltin<"vfcvt_xu">;
-def vfcvt_x_f_v : RVVConvToSignedBuiltin<"vfcvt_x">;
+// 13.17. Single-Width Floating-Point/Integer Type-Convert Instructions
+let UnMaskedPolicyScheme = HasPassthruOperand in {
def vfcvt_rtz_xu_f_v : RVVConvToUnsignedBuiltin<"vfcvt_rtz_xu">;
def vfcvt_rtz_x_f_v : RVVConvToSignedBuiltin<"vfcvt_rtz_x">;
-def vfcvt_f_xu_v : RVVConvBuiltin<"Fv", "FvUv", "sil", "vfcvt_f">;
-def vfcvt_f_x_v : RVVConvBuiltin<"Fv", "Fvv", "sil", "vfcvt_f">;
-// 14.18. Widening Floating-Point/Integer Type-Convert Instructions
+// 13.18. Widening Floating-Point/Integer Type-Convert Instructions
let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
- def vfwcvt_xu_f_v : RVVConvToWidenUnsignedBuiltin<"vfwcvt_xu">;
- def vfwcvt_x_f_v : RVVConvToWidenSignedBuiltin<"vfwcvt_x">;
def vfwcvt_rtz_xu_f_v : RVVConvToWidenUnsignedBuiltin<"vfwcvt_rtz_xu">;
def vfwcvt_rtz_x_f_v : RVVConvToWidenSignedBuiltin<"vfwcvt_rtz_x">;
def vfwcvt_f_xu_v : RVVConvBuiltin<"Fw", "FwUv", "csi", "vfwcvt_f">;
def vfwcvt_f_x_v : RVVConvBuiltin<"Fw", "Fwv", "csi", "vfwcvt_f">;
- def vfwcvt_f_f_v : RVVConvBuiltin<"w", "wv", "xf", "vfwcvt_f">;
+ def vfwcvt_f_f_v : RVVConvBuiltin<"w", "wv", "f", "vfwcvt_f">;
+ let RequiredFeatures = ["Zvfhmin"] in
+ def vfwcvt_f_f_v_fp16 : RVVConvBuiltin<"w", "wv", "x", "vfwcvt_f"> {
+ let Name = "vfwcvt_f_f_v";
+ let IRName = "vfwcvt_f_f_v";
+ let MaskedIRName = "vfwcvt_f_f_v_mask";
+ }
}
-// 14.19. Narrowing Floating-Point/Integer Type-Convert Instructions
+// 13.19. Narrowing Floating-Point/Integer Type-Convert Instructions
let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
- def vfncvt_xu_f_w : RVVConvToNarrowingUnsignedBuiltin<"vfncvt_xu">;
- def vfncvt_x_f_w : RVVConvToNarrowingSignedBuiltin<"vfncvt_x">;
def vfncvt_rtz_xu_f_w : RVVConvToNarrowingUnsignedBuiltin<"vfncvt_rtz_xu">;
def vfncvt_rtz_x_f_w : RVVConvToNarrowingSignedBuiltin<"vfncvt_rtz_x">;
- def vfncvt_f_xu_w : RVVConvBuiltin<"Fv", "FvUw", "csi", "vfncvt_f">;
- def vfncvt_f_x_w : RVVConvBuiltin<"Fv", "Fvw", "csi", "vfncvt_f">;
- def vfncvt_f_f_w : RVVConvBuiltin<"v", "vw", "xf", "vfncvt_f">;
def vfncvt_rod_f_f_w : RVVConvBuiltin<"v", "vw", "xf", "vfncvt_rod_f">;
}
+let ManualCodegen = [{
+ {
+ // LLVM intrinsic
+ // Unmasked: (passthru, op0, frm, vl)
+ // Masked: (passthru, op0, mask, frm, vl, policy)
+ SmallVector<llvm::Value*, 7> Operands;
+ bool HasMaskedOff = !(
+ (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
+ (!IsMasked && PolicyAttrs & RVV_VTA));
+ bool HasRoundModeOp = IsMasked ?
+ (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4) :
+ (HasMaskedOff ? Ops.size() == 4 : Ops.size() == 3);
+
+ unsigned Offset = IsMasked ?
+ (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0);
+
+ if (!HasMaskedOff)
+ Operands.push_back(llvm::PoisonValue::get(ResultType));
+ else
+ Operands.push_back(Ops[IsMasked ? 1 : 0]);
+
+ Operands.push_back(Ops[Offset]); // op0
+
+ if (IsMasked)
+ Operands.push_back(Ops[0]); // mask
+
+ if (HasRoundModeOp) {
+ Operands.push_back(Ops[Offset + 1]); // frm
+ Operands.push_back(Ops[Offset + 2]); // vl
+ } else {
+ Operands.push_back(ConstantInt::get(Ops[Offset + 1]->getType(), 7)); // frm
+ Operands.push_back(Ops[Offset + 1]); // vl
+ }
-// 15. Vector Reduction Operations
-// 15.1. Vector Single-Width Integer Reduction Instructions
+ if (IsMasked)
+ Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+
+ IntrinsicTypes = {ResultType, Ops[Offset]->getType(),
+ Operands.back()->getType()};
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ return Builder.CreateCall(F, Operands, "");
+ }
+}] in {
+ let HasFRMRoundModeOp = 1 in {
+ // 14.17. Single-Width Floating-Point/Integer Type-Convert Instructions
+ let OverloadedName = "vfcvt_x" in
+ defm :
+ RVVConvBuiltinSet<"vfcvt_x_f_v", "xfd", [["Iv", "Ivvu"]]>;
+ let OverloadedName = "vfcvt_xu" in
+ defm :
+ RVVConvBuiltinSet<"vfcvt_xu_f_v", "xfd", [["Uv", "Uvvu"]]>;
+ let OverloadedName = "vfcvt_f" in {
+ defm :
+ RVVConvBuiltinSet<"vfcvt_f_x_v", "sil", [["Fv", "Fvvu"]]>;
+ defm :
+ RVVConvBuiltinSet<"vfcvt_f_xu_v", "sil", [["Fv", "FvUvu"]]>;
+ }
+
+ // 13.18. Widening Floating-Point/Integer Type-Convert Instructions
+ let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
+ let OverloadedName = "vfwcvt_x" in
+ defm :
+ RVVConvBuiltinSet<"vfwcvt_x_f_v", "xf", [["Iw", "Iwvu"]]>;
+ let OverloadedName = "vfwcvt_xu" in
+ defm :
+ RVVConvBuiltinSet<"vfwcvt_xu_f_v", "xf", [["Uw", "Uwvu"]]>;
+ }
+ // 13.19. Narrowing Floating-Point/Integer Type-Convert Instructions
+ let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
+ let OverloadedName = "vfncvt_x" in
+ defm :
+ RVVConvBuiltinSet<"vfncvt_x_f_w", "csi", [["Iv", "IvFwu"]]>;
+ let OverloadedName = "vfncvt_xu" in
+ defm :
+ RVVConvBuiltinSet<"vfncvt_xu_f_w", "csi", [["Uv", "UvFwu"]]>;
+ let OverloadedName = "vfncvt_f" in {
+ defm :
+ RVVConvBuiltinSet<"vfncvt_f_x_w", "csi", [["Fv", "Fvwu"]]>;
+ defm :
+ RVVConvBuiltinSet<"vfncvt_f_xu_w", "csi", [["Fv", "FvUwu"]]>;
+ }
+ let OverloadedName = "vfncvt_f" in {
+ defm : RVVConvBuiltinSet<"vfncvt_f_f_w", "f", [["v", "vwu"]]>;
+ let RequiredFeatures = ["Zvfhmin"] in
+ defm : RVVConvBuiltinSet<"vfncvt_f_f_w", "x", [["v", "vwu"]]>;
+ }
+ }
+ }
+
+ // 13.17. Single-Width Floating-Point/Integer Type-Convert Instructions
+ let OverloadedName = "vfcvt_x" in
+ defm :
+ RVVConvBuiltinSet<"vfcvt_x_f_v", "xfd", [["Iv", "Ivv"]]>;
+ let OverloadedName = "vfcvt_xu" in
+ defm :
+ RVVConvBuiltinSet<"vfcvt_xu_f_v", "xfd", [["Uv", "Uvv"]]>;
+ let OverloadedName = "vfcvt_f" in {
+ defm :
+ RVVConvBuiltinSet<"vfcvt_f_x_v", "sil", [["Fv", "Fvv"]]>;
+ defm :
+ RVVConvBuiltinSet<"vfcvt_f_xu_v", "sil", [["Fv", "FvUv"]]>;
+ }
+
+ // 13.18. Widening Floating-Point/Integer Type-Convert Instructions
+ let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
+ let OverloadedName = "vfwcvt_x" in
+ defm :
+ RVVConvBuiltinSet<"vfwcvt_x_f_v", "xf", [["Iw", "Iwv"]]>;
+ let OverloadedName = "vfwcvt_xu" in
+ defm :
+ RVVConvBuiltinSet<"vfwcvt_xu_f_v", "xf", [["Uw", "Uwv"]]>;
+ }
+ // 13.19. Narrowing Floating-Point/Integer Type-Convert Instructions
+ let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
+ let OverloadedName = "vfncvt_x" in
+ defm :
+ RVVConvBuiltinSet<"vfncvt_x_f_w", "csi", [["Iv", "IvFw"]]>;
+ let OverloadedName = "vfncvt_xu" in
+ defm :
+ RVVConvBuiltinSet<"vfncvt_xu_f_w", "csi", [["Uv", "UvFw"]]>;
+ let OverloadedName = "vfncvt_f" in {
+ defm :
+ RVVConvBuiltinSet<"vfncvt_f_x_w", "csi", [["Fv", "Fvw"]]>;
+ defm :
+ RVVConvBuiltinSet<"vfncvt_f_xu_w", "csi", [["Fv", "FvUw"]]>;
+ }
+ let OverloadedName = "vfncvt_f" in {
+ defm : RVVConvBuiltinSet<"vfncvt_f_f_w", "f", [["v", "vw"]]>;
+ let RequiredFeatures = ["Zvfhmin"] in
+ defm : RVVConvBuiltinSet<"vfncvt_f_f_w", "x", [["v", "vw"]]>;
+ }
+ }
+}
+}
+
+// 14. Vector Reduction Operations
+// 14.1. Vector Single-Width Integer Reduction Instructions
+let UnMaskedPolicyScheme = HasPassthruOperand,
+ MaskedPolicyScheme = HasPassthruOperand,
+ HasMaskPolicy = false in {
defm vredsum : RVVIntReductionBuiltinSet;
defm vredmaxu : RVVUnsignedReductionBuiltin;
defm vredmax : RVVSignedReductionBuiltin;
@@ -1876,34 +2032,88 @@ defm vredand : RVVIntReductionBuiltinSet;
defm vredor : RVVIntReductionBuiltinSet;
defm vredxor : RVVIntReductionBuiltinSet;
-// 15.2. Vector Widening Integer Reduction Instructions
+// 14.2. Vector Widening Integer Reduction Instructions
// Vector Widening Integer Reduction Operations
-let HasMaskedOffOperand = false in {
- defm vwredsum : RVVOutOp1BuiltinSet<"vwredsum", "csi",
- [["vs", "vSw", "SwSwvSw"]]>;
- defm vwredsumu : RVVOutOp1BuiltinSet<"vwredsumu", "csi",
- [["vs", "UvUSw", "USwUSwUvUSw"]]>;
+let HasMaskedOffOperand = true in {
+ defm vwredsum : RVVOutOp0BuiltinSet<"vwredsum", "csi",
+ [["vs", "vSw", "SwvSw"]]>;
+ defm vwredsumu : RVVOutOp0BuiltinSet<"vwredsumu", "csi",
+ [["vs", "UvUSw", "USwUvUSw"]]>;
}
-// 15.3. Vector Single-Width Floating-Point Reduction Instructions
+// 14.3. Vector Single-Width Floating-Point Reduction Instructions
defm vfredmax : RVVFloatingReductionBuiltin;
defm vfredmin : RVVFloatingReductionBuiltin;
-defm vfredsum : RVVFloatingReductionBuiltin;
-defm vfredosum : RVVFloatingReductionBuiltin;
+let ManualCodegen = [{
+ {
+ // LLVM intrinsic
+ // Unmasked: (passthru, op0, op1, round_mode, vl)
+ // Masked: (passthru, vector_in, vector_in/scalar_in, mask, frm, vl, policy)
+
+ SmallVector<llvm::Value*, 7> Operands;
+ bool HasMaskedOff = !(
+ (IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
+ (!IsMasked && PolicyAttrs & RVV_VTA));
+ bool HasRoundModeOp = IsMasked ?
+ (HasMaskedOff ? Ops.size() == 6 : Ops.size() == 5) :
+ (HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4);
+
+ unsigned Offset = IsMasked ?
+ (HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0);
+
+ if (!HasMaskedOff)
+ Operands.push_back(llvm::PoisonValue::get(ResultType));
+ else
+ Operands.push_back(Ops[IsMasked ? 1 : 0]);
+
+ Operands.push_back(Ops[Offset]); // op0
+ Operands.push_back(Ops[Offset + 1]); // op1
+
+ if (IsMasked)
+ Operands.push_back(Ops[0]); // mask
+
+ if (HasRoundModeOp) {
+ Operands.push_back(Ops[Offset + 2]); // frm
+ Operands.push_back(Ops[Offset + 3]); // vl
+ } else {
+ Operands.push_back(ConstantInt::get(Ops[Offset + 2]->getType(), 7)); // frm
+ Operands.push_back(Ops[Offset + 2]); // vl
+ }
-// 15.4. Vector Widening Floating-Point Reduction Instructions
-defm vfwredsum : RVVFloatingWidenReductionBuiltin;
-defm vfwredosum : RVVFloatingWidenReductionBuiltin;
+ IntrinsicTypes = {ResultType, Ops[Offset]->getType(),
+ Ops.back()->getType()};
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ return Builder.CreateCall(F, Operands, "");
+ }
+}] in {
+ let HasFRMRoundModeOp = 1 in {
+ // 14.3. Vector Single-Width Floating-Point Reduction Instructions
+ defm vfredusum : RVVFloatingReductionBuiltinRoundingMode;
+ defm vfredosum : RVVFloatingReductionBuiltinRoundingMode;
+
+ // 14.4. Vector Widening Floating-Point Reduction Instructions
+ defm vfwredusum : RVVFloatingWidenReductionBuiltinRoundingMode;
+ defm vfwredosum : RVVFloatingWidenReductionBuiltinRoundingMode;
+ }
+ // 14.3. Vector Single-Width Floating-Point Reduction Instructions
+ defm vfredusum : RVVFloatingReductionBuiltin;
+ defm vfredosum : RVVFloatingReductionBuiltin;
-// 16. Vector Mask Instructions
-// 16.1. Vector Mask-Register Logical Instructions
+ // 14.4. Vector Widening Floating-Point Reduction Instructions
+ defm vfwredusum : RVVFloatingWidenReductionBuiltin;
+ defm vfwredosum : RVVFloatingWidenReductionBuiltin;
+}
+}
+
+// 15. Vector Mask Instructions
+// 15.1. Vector Mask-Register Logical Instructions
def vmand : RVVMaskBinBuiltin;
def vmnand : RVVMaskBinBuiltin;
-def vmandnot : RVVMaskBinBuiltin;
+def vmandn : RVVMaskBinBuiltin;
def vmxor : RVVMaskBinBuiltin;
def vmor : RVVMaskBinBuiltin;
def vmnor : RVVMaskBinBuiltin;
-def vmornot : RVVMaskBinBuiltin;
+def vmorn : RVVMaskBinBuiltin;
def vmxnor : RVVMaskBinBuiltin;
// pseudoinstructions
def vmclr : RVVMaskNullaryBuiltin;
@@ -1911,69 +2121,79 @@ def vmset : RVVMaskNullaryBuiltin;
defm vmmv_m : RVVPseudoMaskBuiltin<"vmand", "c">;
defm vmnot_m : RVVPseudoMaskBuiltin<"vmnand", "c">;
-// 16.2. Vector mask population count vpopc
-def vpopc : RVVMaskOp0Builtin<"um">;
+let MaskedPolicyScheme = NonePolicy in {
+// 15.2. Vector count population in mask vcpop.m
+def vcpop : RVVMaskOp0Builtin<"um">;
-// 16.3. vfirst find-first-set mask bit
+// 15.3. vfirst find-first-set mask bit
def vfirst : RVVMaskOp0Builtin<"lm">;
+}
-// 16.4. vmsbf.m set-before-first mask bit
+let MaskedPolicyScheme = HasPassthruOperand,
+ HasTailPolicy = false in {
+// 15.4. vmsbf.m set-before-first mask bit
def vmsbf : RVVMaskUnaryBuiltin;
-// 16.5. vmsif.m set-including-first mask bit
+// 15.5. vmsif.m set-including-first mask bit
def vmsif : RVVMaskUnaryBuiltin;
-// 16.6. vmsof.m set-only-first mask bit
+// 15.6. vmsof.m set-only-first mask bit
def vmsof : RVVMaskUnaryBuiltin;
+}
-let HasNoMaskedOverloaded = false in {
- // 16.8. Vector Iota Instruction
+let UnMaskedPolicyScheme = HasPassthruOperand, SupportOverloading = false in {
+ // 15.8. Vector Iota Instruction
defm viota : RVVOutBuiltinSet<"viota", "csil", [["m", "Uv", "Uvm"]]>;
- // 16.9. Vector Element Index Instruction
+ // 15.9. Vector Element Index Instruction
defm vid : RVVOutBuiltinSet<"vid", "csil", [["v", "v", "v"],
["v", "Uv", "Uv"]]>;
}
-// 17. Vector Permutation Instructions
-// 17.1. Integer Scalar Move Instructions
-let HasMask = false in {
- let HasVL = false, MangledName = "vmv_x" in
+// 16. Vector Permutation Instructions
+// 16.1. Integer Scalar Move Instructions
+let HasMasked = false, MaskedPolicyScheme = NonePolicy in {
+ let HasVL = false, OverloadedName = "vmv_x" in
defm vmv_x : RVVOp0BuiltinSet<"vmv_x_s", "csil",
[["s", "ve", "ev"],
["s", "UvUe", "UeUv"]]>;
- let MangledName = "vmv_s" in
+ let OverloadedName = "vmv_s",
+ UnMaskedPolicyScheme = HasPassthruOperand,
+ SupportOverloading = false in
defm vmv_s : RVVOutBuiltinSet<"vmv_s_x", "csil",
- [["x", "v", "vve"],
- ["x", "Uv", "UvUvUe"]]>;
+ [["x", "v", "ve"],
+ ["x", "Uv", "UvUe"]]>;
}
-// 17.2. Floating-Point Scalar Move Instructions
-let HasMask = false in {
- let HasVL = false, MangledName = "vfmv_f" in
+// 16.2. Floating-Point Scalar Move Instructions
+let HasMasked = false, MaskedPolicyScheme = NonePolicy in {
+ let HasVL = false, OverloadedName = "vfmv_f" in
defm vfmv_f : RVVOp0BuiltinSet<"vfmv_f_s", "xfd",
[["s", "ve", "ev"]]>;
- let MangledName = "vfmv_s" in
+ let OverloadedName = "vfmv_s",
+ UnMaskedPolicyScheme = HasPassthruOperand,
+ SupportOverloading = false in
defm vfmv_s : RVVOutBuiltinSet<"vfmv_s_f", "xfd",
- [["f", "v", "vve"],
- ["x", "Uv", "UvUvUe"]]>;
+ [["f", "v", "ve"],
+ ["x", "Uv", "UvUe"]]>;
}
-// 17.3. Vector Slide Instructions
-// 17.3.1. Vector Slideup Instructions
-defm vslideup : RVVSlideBuiltinSet;
-// 17.3.2. Vector Slidedown Instructions
-defm vslidedown : RVVSlideBuiltinSet;
+// 16.3. Vector Slide Instructions
+// 16.3.1. Vector Slideup Instructions
+defm vslideup : RVVSlideUpBuiltinSet;
+// 16.3.2. Vector Slidedown Instructions
+defm vslidedown : RVVSlideDownBuiltinSet;
-// 17.3.3. Vector Slide1up Instructions
+// 16.3.3. Vector Slide1up Instructions
+let UnMaskedPolicyScheme = HasPassthruOperand in {
defm vslide1up : RVVSlideOneBuiltinSet;
defm vfslide1up : RVVFloatingBinVFBuiltinSet;
-// 17.3.4. Vector Slide1down Instruction
+// 16.3.4. Vector Slide1down Instruction
defm vslide1down : RVVSlideOneBuiltinSet;
defm vfslide1down : RVVFloatingBinVFBuiltinSet;
-// 17.4. Vector Register Gather Instructions
+// 16.4. Vector Register Gather Instructions
// signed and floating type
defm vrgather : RVVOutBuiltinSet<"vrgather_vv", "csilxfd",
[["vv", "v", "vvUv"]]>;
@@ -1988,34 +2208,75 @@ defm vrgather : RVVOutBuiltinSet<"vrgather_vx", "csil",
[["vx", "Uv", "UvUvz"]]>;
defm vrgatherei16 : RVVOutBuiltinSet<"vrgatherei16_vv", "csil",
[["vv", "Uv", "UvUv(Log2EEW:4)Uv"]]>;
+}
-// 17.5. Vector Compress Instruction
-let HasMask = false,
+// 16.5. Vector Compress Instruction
+let HasMasked = false,
+ UnMaskedPolicyScheme = HasPassthruOperand,
+ MaskedPolicyScheme = NonePolicy,
ManualCodegen = [{
- std::rotate(Ops.begin(), Ops.begin() + 1, Ops.begin() + 3);
- IntrinsicTypes = {ResultType, Ops[3]->getType()};
+ // insert poison passthru
+ if (PolicyAttrs & RVV_VTA)
+ Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
+ IntrinsicTypes = {ResultType, Ops.back()->getType()};
}] in {
// signed and floating type
defm vcompress : RVVOutBuiltinSet<"vcompress", "csilxfd",
- [["vm", "v", "vmvv"]]>;
+ [["vm", "v", "vvm"]]>;
// unsigned type
defm vcompress : RVVOutBuiltinSet<"vcompress", "csil",
- [["vm", "Uv", "UvmUvUv"]]>;
+ [["vm", "Uv", "UvUvm"]]>;
}
// Miscellaneous
-let HasMask = false, HasVL = false, IRName = "" in {
- let Name = "vreinterpret_v",
+let HasMasked = false, HasVL = false, IRName = "" in {
+ let Name = "vreinterpret_v", MaskedPolicyScheme = NonePolicy,
ManualCodegen = [{
+ if (ResultType->isIntOrIntVectorTy(1) ||
+ Ops[0]->getType()->isIntOrIntVectorTy(1)) {
+ assert(isa<ScalableVectorType>(ResultType) &&
+ isa<ScalableVectorType>(Ops[0]->getType()));
+
+ LLVMContext &Context = CGM.getLLVMContext();
+ ScalableVectorType *Boolean64Ty =
+ ScalableVectorType::get(llvm::Type::getInt1Ty(Context), 64);
+
+ if (ResultType->isIntOrIntVectorTy(1)) {
+ // Casting from m1 vector integer -> vector boolean
+ // Ex: <vscale x 8 x i8>
+ // --(bitcast)--------> <vscale x 64 x i1>
+ // --(vector_extract)-> <vscale x 8 x i1>
+ llvm::Value *BitCast = Builder.CreateBitCast(Ops[0], Boolean64Ty);
+ return Builder.CreateExtractVector(ResultType, BitCast,
+ ConstantInt::get(Int64Ty, 0));
+ } else {
+ // Casting from vector boolean -> m1 vector integer
+ // Ex: <vscale x 1 x i1>
+ // --(vector_insert)-> <vscale x 64 x i1>
+ // --(bitcast)-------> <vscale x 8 x i8>
+ llvm::Value *Boolean64Val =
+ Builder.CreateInsertVector(Boolean64Ty,
+ llvm::PoisonValue::get(Boolean64Ty),
+ Ops[0],
+ ConstantInt::get(Int64Ty, 0));
+ return Builder.CreateBitCast(Boolean64Val, ResultType);
+ }
+ }
return Builder.CreateBitCast(Ops[0], ResultType);
}] in {
// Reinterpret between different type under the same SEW and LMUL
def vreinterpret_i_u : RVVBuiltin<"Uvv", "vUv", "csil", "v">;
- def vreinterpret_i_f : RVVBuiltin<"Fvv", "vFv", "sil", "v">;
+ def vreinterpret_i_f : RVVBuiltin<"Fvv", "vFv", "il", "v">;
def vreinterpret_u_i : RVVBuiltin<"vUv", "Uvv", "csil", "Uv">;
- def vreinterpret_u_f : RVVBuiltin<"FvUv", "UvFv", "sil", "Uv">;
- def vreinterpret_f_i : RVVBuiltin<"vFv", "Fvv", "sil", "Fv">;
- def vreinterpret_f_u : RVVBuiltin<"UvFv", "FvUv", "sil", "Fv">;
+ def vreinterpret_u_f : RVVBuiltin<"FvUv", "UvFv", "il", "Uv">;
+ def vreinterpret_f_i : RVVBuiltin<"vFv", "Fvv", "il", "Fv">;
+ def vreinterpret_f_u : RVVBuiltin<"UvFv", "FvUv", "il", "Fv">;
+ let RequiredFeatures = ["Zvfhmin"] in {
+ def vreinterpret_i_h : RVVBuiltin<"Fvv", "vFv", "s", "v">;
+ def vreinterpret_u_h : RVVBuiltin<"FvUv", "UvFv", "s", "Uv">;
+ def vreinterpret_h_i : RVVBuiltin<"vFv", "Fvv", "s", "Fv">;
+ def vreinterpret_h_u : RVVBuiltin<"UvFv", "FvUv", "s", "Fv">;
+ }
// Reinterpret between different SEW under the same LMUL
foreach dst_sew = ["(FixedSEW:8)", "(FixedSEW:16)", "(FixedSEW:32)",
@@ -2025,24 +2286,80 @@ let HasMask = false, HasVL = false, IRName = "" in {
def vreinterpret_u_ # dst_sew : RVVBuiltin<"Uv" # dst_sew # "Uv",
dst_sew # "UvUv", "csil", dst_sew # "Uv">;
}
+
+ // Existing users of FixedSEW - the reinterpretation between different SEW
+ // and same LMUL has the implicit assumption that if FixedSEW is set to the
+ // given element width, then the type will be identified as invalid, thus
+ // skipping definition of reinterpret of SEW=8 to SEW=8. However this blocks
+ // our usage here of defining all possible combinations of a fixed SEW to
+ // any boolean. So we need to separately define SEW=8 here.
+ // Reinterpret from LMUL=1 integer type to vector boolean type
+ def vreintrepret_m1_b8_signed :
+ RVVBuiltin<"Svm",
+ "mSv",
+ "c", "m">;
+ def vreintrepret_m1_b8_usigned :
+ RVVBuiltin<"USvm",
+ "mUSv",
+ "c", "m">;
+
+ // Reinterpret from vector boolean type to LMUL=1 integer type
+ def vreintrepret_b8_m1_signed :
+ RVVBuiltin<"mSv",
+ "Svm",
+ "c", "Sv">;
+ def vreintrepret_b8_m1_usigned :
+ RVVBuiltin<"mUSv",
+ "USvm",
+ "c", "USv">;
+
+ foreach dst_sew = ["16", "32", "64"] in {
+ // Reinterpret from LMUL=1 integer type to vector boolean type
+ def vreinterpret_m1_b # dst_sew # _signed:
+ RVVBuiltin<"(FixedSEW:" # dst_sew # ")Svm",
+ "m(FixedSEW:" # dst_sew # ")Sv",
+ "c", "m">;
+ def vreinterpret_m1_b # dst_sew # _unsigned:
+ RVVBuiltin<"(FixedSEW:" # dst_sew # ")USvm",
+ "m(FixedSEW:" # dst_sew # ")USv",
+ "c", "m">;
+ // Reinterpret from vector boolean type to LMUL=1 integer type
+ def vreinterpret_b # dst_sew # _m1_signed:
+ RVVBuiltin<"m(FixedSEW:" # dst_sew # ")Sv",
+ "(FixedSEW:" # dst_sew # ")Svm",
+ "c", "(FixedSEW:" # dst_sew # ")Sv">;
+ def vreinterpret_b # dst_sew # _m1_unsigned:
+ RVVBuiltin<"m(FixedSEW:" # dst_sew # ")USv",
+ "(FixedSEW:" # dst_sew # ")USvm",
+ "c", "(FixedSEW:" # dst_sew # ")USv">;
+ }
}
- let Name = "vundefined", HasNoMaskedOverloaded = false,
+ let Name = "vundefined", SupportOverloading = false,
+ MaskedPolicyScheme = NonePolicy,
ManualCodegen = [{
- return llvm::UndefValue::get(ResultType);
+ return llvm::PoisonValue::get(ResultType);
}] in {
def vundefined : RVVBuiltin<"v", "v", "csilxfd">;
def vundefined_u : RVVBuiltin<"Uv", "Uv", "csil">;
+
+ foreach nf = NFList in {
+ let NF = nf in {
+ defvar T = "(Tuple:" # nf # ")";
+ def : RVVBuiltin<T # "v", T # "v", "csilxfd">;
+ def : RVVBuiltin<T # "Uv", T # "Uv", "csil">;
+ }
+ }
+
}
// LMUL truncation
// C/C++ Operand: VecTy, IR Operand: VecTy, Index
- let Name = "vlmul_trunc_v", MangledName = "vlmul_trunc",
+ let Name = "vlmul_trunc_v", OverloadedName = "vlmul_trunc",
+ MaskedPolicyScheme = NonePolicy,
ManualCodegen = [{ {
- ID = Intrinsic::experimental_vector_extract;
- IntrinsicTypes = {ResultType, Ops[0]->getType()};
- Ops.push_back(ConstantInt::get(Int64Ty, 0));
- return Builder.CreateCall(CGM.getIntrinsic(ID, IntrinsicTypes), Ops, "");
+ return Builder.CreateExtractVector(ResultType, Ops[0],
+ ConstantInt::get(Int64Ty, 0));
} }] in {
foreach dst_lmul = ["(SFixedLog2LMUL:-3)", "(SFixedLog2LMUL:-2)", "(SFixedLog2LMUL:-1)",
"(SFixedLog2LMUL:0)", "(SFixedLog2LMUL:1)", "(SFixedLog2LMUL:2)"] in {
@@ -2055,14 +2372,12 @@ let HasMask = false, HasVL = false, IRName = "" in {
// LMUL extension
// C/C++ Operand: SubVecTy, IR Operand: VecTy, SubVecTy, Index
- let Name = "vlmul_ext_v", MangledName = "vlmul_ext",
+ let Name = "vlmul_ext_v", OverloadedName = "vlmul_ext",
+ MaskedPolicyScheme = NonePolicy,
ManualCodegen = [{
- ID = Intrinsic::experimental_vector_insert;
- IntrinsicTypes = {ResultType, Ops[0]->getType()};
- Ops.push_back(llvm::UndefValue::get(ResultType));
- std::swap(Ops[0], Ops[1]);
- Ops.push_back(ConstantInt::get(Int64Ty, 0));
- return Builder.CreateCall(CGM.getIntrinsic(ID, IntrinsicTypes), Ops, "");
+ return Builder.CreateInsertVector(ResultType,
+ llvm::PoisonValue::get(ResultType),
+ Ops[0], ConstantInt::get(Int64Ty, 0));
}] in {
foreach dst_lmul = ["(LFixedLog2LMUL:-2)", "(LFixedLog2LMUL:-1)", "(LFixedLog2LMUL:-0)",
"(LFixedLog2LMUL:1)", "(LFixedLog2LMUL:2)", "(LFixedLog2LMUL:3)"] in {
@@ -2073,40 +2388,225 @@ let HasMask = false, HasVL = false, IRName = "" in {
}
}
- let Name = "vget_v",
+ let Name = "vget_v", MaskedPolicyScheme = NonePolicy,
ManualCodegen = [{
{
- ID = Intrinsic::experimental_vector_extract;
- ScalableVectorType *VecTy = cast<ScalableVectorType>(ResultType);
+ if (isa<StructType>(Ops[0]->getType())) // For tuple type
+ // Extract value from index (operand 1) of vtuple (operand 0)
+ return Builder.CreateExtractValue(
+ Ops[0],
+ {(unsigned)cast<ConstantInt>(Ops[1])->getZExtValue()});
+ auto *VecTy = cast<ScalableVectorType>(ResultType);
+ auto *OpVecTy = cast<ScalableVectorType>(Ops[0]->getType());
+ // Mask to only valid indices.
+ unsigned MaxIndex = OpVecTy->getMinNumElements() / VecTy->getMinNumElements();
+ assert(isPowerOf2_32(MaxIndex));
+ Ops[1] = Builder.CreateZExt(Ops[1], Builder.getInt64Ty());
+ Ops[1] = Builder.CreateAnd(Ops[1], MaxIndex - 1);
Ops[1] = Builder.CreateMul(Ops[1],
ConstantInt::get(Ops[1]->getType(),
VecTy->getMinNumElements()));
- IntrinsicTypes = {ResultType, Ops[0]->getType()};
- return Builder.CreateCall(CGM.getIntrinsic(ID, IntrinsicTypes), Ops, "");
+ return Builder.CreateExtractVector(ResultType, Ops[0], Ops[1]);
}
}] in {
foreach dst_lmul = ["(SFixedLog2LMUL:0)", "(SFixedLog2LMUL:1)", "(SFixedLog2LMUL:2)"] in {
- def : RVVBuiltin<"v" # dst_lmul # "v", dst_lmul # "vvKz", "csilfd", dst_lmul # "v">;
+ def : RVVBuiltin<"v" # dst_lmul # "v", dst_lmul # "vvKz", "csilxfd", dst_lmul # "v">;
def : RVVBuiltin<"Uv" # dst_lmul # "Uv", dst_lmul # "UvUvKz", "csil", dst_lmul # "Uv">;
}
+ foreach nf = NFList in {
+ defvar T = "(Tuple:" # nf # ")";
+ def : RVVBuiltin<T # "vv", "v" # T # "vKz", "csilxfd", "v">;
+ def : RVVBuiltin<T # "UvUv", "Uv" # T # "UvKz", "csil", "Uv">;
+ }
}
- let Name = "vset_v", Log2LMUL = [0, 1, 2],
+ let Name = "vset_v", MaskedPolicyScheme = NonePolicy,
ManualCodegen = [{
{
- ID = Intrinsic::experimental_vector_insert;
- IntrinsicTypes = {ResultType, Ops[2]->getType()};
- ScalableVectorType *VecTy = cast<ScalableVectorType>(Ops[2]->getType());
+ if (isa<StructType>(ResultType)) // For tuple type
+ // Insert value (operand 2) into index (operand 1) of vtuple (operand 0)
+ return Builder.CreateInsertValue(
+ Ops[0], Ops[2],
+ {(unsigned)cast<ConstantInt>(Ops[1])->getZExtValue()});
+ auto *ResVecTy = cast<ScalableVectorType>(ResultType);
+ auto *VecTy = cast<ScalableVectorType>(Ops[2]->getType());
+ // Mask to only valid indices.
+ unsigned MaxIndex = ResVecTy->getMinNumElements() / VecTy->getMinNumElements();
+ assert(isPowerOf2_32(MaxIndex));
+ Ops[1] = Builder.CreateZExt(Ops[1], Builder.getInt64Ty());
+ Ops[1] = Builder.CreateAnd(Ops[1], MaxIndex - 1);
Ops[1] = Builder.CreateMul(Ops[1],
ConstantInt::get(Ops[1]->getType(),
VecTy->getMinNumElements()));
- std::swap(Ops[1], Ops[2]);
- return Builder.CreateCall(CGM.getIntrinsic(ID, IntrinsicTypes), Ops, "");
+ return Builder.CreateInsertVector(ResultType, Ops[0], Ops[2], Ops[1]);
}
}] in {
foreach dst_lmul = ["(LFixedLog2LMUL:1)", "(LFixedLog2LMUL:2)", "(LFixedLog2LMUL:3)"] in {
- def : RVVBuiltin<"v" # dst_lmul # "v", dst_lmul # "v" # dst_lmul # "vKzv", "csilfd">;
+ def : RVVBuiltin<"v" # dst_lmul # "v", dst_lmul # "v" # dst_lmul # "vKzv", "csilxfd">;
def : RVVBuiltin<"Uv" # dst_lmul # "Uv", dst_lmul # "Uv" # dst_lmul #"UvKzUv", "csil">;
}
+ foreach nf = NFList in {
+ defvar T = "(Tuple:" # nf # ")";
+ def : RVVBuiltin<"v" # T # "v", T # "v" # T # "vKzv", "csilxfd">;
+ def : RVVBuiltin<"Uv" # T # "Uv", T # "Uv" # T # "UvKzUv", "csil">;
+ }
+ }
+
+ let Name = "vcreate_v",
+ UnMaskedPolicyScheme = NonePolicy,
+ MaskedPolicyScheme = NonePolicy,
+ SupportOverloading = false,
+ ManualCodegen = [{
+ {
+ if (isa<StructType>(ResultType)) {
+ unsigned NF = cast<StructType>(ResultType)->getNumElements();
+ llvm::Value *ReturnTuple = llvm::PoisonValue::get(ResultType);
+ for (unsigned I = 0; I < NF; ++I) {
+ ReturnTuple = Builder.CreateInsertValue(ReturnTuple, Ops[I], {I});
+ }
+ return ReturnTuple;
+ }
+ llvm::Value *ReturnVector = llvm::PoisonValue::get(ResultType);
+ auto *VecTy = cast<ScalableVectorType>(Ops[0]->getType());
+ for (unsigned I = 0, N = Ops.size(); I < N; ++I) {
+ llvm::Value *Idx =
+ ConstantInt::get(Builder.getInt64Ty(),
+ VecTy->getMinNumElements() * I);
+ ReturnVector =
+ Builder.CreateInsertVector(ResultType, ReturnVector, Ops[I], Idx);
+ }
+ return ReturnVector;
+ }
+ }] in {
+
+ defm : RVVNonTupleVCreateBuiltin<1, [0]>;
+ defm : RVVNonTupleVCreateBuiltin<2, [0, 1]>;
+ defm : RVVNonTupleVCreateBuiltin<3, [0, 1, 2]>;
+
+ foreach nf = NFList in {
+ let NF = nf in {
+ defvar T = "(Tuple:" # nf # ")";
+ defvar V = VString<nf, /*signed=*/true>.S;
+ defvar UV = VString<nf, /*signed=*/false>.S;
+ def : RVVBuiltin<T # "v", T # "v" # V, "csilxfd">;
+ def : RVVBuiltin<T # "Uv", T # "Uv" # UV, "csil">;
+ }
+ }
+ }
+}
+
+multiclass RVVOutBuiltinSetZvbb {
+ let OverloadedName = NAME in
+ defm "" : RVVOutBuiltinSet<NAME, "csil", [["v", "v", "vv"],
+ ["v", "Uv", "UvUv"]]>;
+}
+
+multiclass RVVOutBuiltinSetZvk<bit HasVV = 1, bit HasVS = 1> {
+ // vaesz only has 'vs' and vgmul only has 'vv' and they do not have ambiguous
+ // prototypes like other zvkned instructions (e.g. vaesdf), so we don't
+ // need to encode the operand mnemonics into its intrinsic function name.
+ if HasVV then {
+ defvar name = NAME # !if(!eq(NAME, "vgmul"), "", "_vv");
+ let OverloadedName = name in
+ defm "" : RVVOutBuiltinSet<NAME # "_vv", "i",
+ [["vv", "Uv", "UvUvUv"]]>;
+ }
+
+ if HasVS then {
+ foreach vs2_lmul = ["(SEFixedLog2LMUL:-1)", "(SEFixedLog2LMUL:0)",
+ "(SEFixedLog2LMUL:1)", "(SEFixedLog2LMUL:2)",
+ "(SEFixedLog2LMUL:3)"] in {
+ defvar name = NAME # !if(!eq(NAME, "vaesz"), "", "_vs");
+ let OverloadedName = name, IRName = NAME # "_vs", Name = NAME # "_vs",
+ IntrinsicTypes = [-1, 1] in
+ def NAME # vs2_lmul
+ : RVVBuiltin<vs2_lmul # "UvUv", "UvUv" # vs2_lmul # "Uv", "i">;
+ }
+ }
+}
+
+multiclass RVVOutOp2BuiltinSetVVZvk<string type_range = "i">
+ : RVVOutOp2BuiltinSet<NAME, type_range, [["vv", "Uv", "UvUvUvUv"]]>;
+
+multiclass RVVOutOp2BuiltinSetVIZvk<string type_range = "i">
+ : RVVOutOp2BuiltinSet<NAME, type_range, [["vi", "Uv", "UvUvUvKz"]]>;
+
+multiclass RVVSignedWidenBinBuiltinSetVwsll
+ : RVVWidenBuiltinSet<NAME, "csi",
+ [["vv", "Uw", "UwUvUv"],
+ ["vx", "Uw", "UwUvz"]]>;
+
+let UnMaskedPolicyScheme = HasPassthruOperand in {
+ // zvkb
+ let RequiredFeatures = ["Zvkb", "Experimental"] in {
+ defm vandn : RVVUnsignedBinBuiltinSet;
+ defm vbrev8 : RVVOutBuiltinSetZvbb;
+ defm vrev8 : RVVOutBuiltinSetZvbb;
+ defm vrol : RVVUnsignedShiftBuiltinSet;
+ defm vror : RVVUnsignedShiftBuiltinSet;
+ }
+
+ // zvbb
+ let RequiredFeatures = ["Zvbb", "Experimental"] in {
+ defm vbrev : RVVOutBuiltinSetZvbb;
+ defm vclz : RVVOutBuiltinSetZvbb;
+ defm vctz : RVVOutBuiltinSetZvbb;
+ defm vcpopv : RVVOutBuiltinSetZvbb;
+ let OverloadedName = "vwsll" in
+ defm vwsll : RVVSignedWidenBinBuiltinSetVwsll;
+ }
+
+ // zvbc
+ let RequiredFeatures = ["Zvbc", "Experimental"] in {
+ defm vclmul : RVVInt64BinBuiltinSet;
+ defm vclmulh : RVVInt64BinBuiltinSet;
+ }
+}
+
+let UnMaskedPolicyScheme = HasPolicyOperand, HasMasked = false in {
+ // zvkg
+ let RequiredFeatures = ["Zvkg", "Experimental"] in {
+ defm vghsh : RVVOutOp2BuiltinSetVVZvk;
+ defm vgmul : RVVOutBuiltinSetZvk<HasVV=1, HasVS=0>;
+ }
+
+ // zvkned
+ let RequiredFeatures = ["Zvkned", "Experimental"] in {
+ defm vaesdf : RVVOutBuiltinSetZvk;
+ defm vaesdm : RVVOutBuiltinSetZvk;
+ defm vaesef : RVVOutBuiltinSetZvk;
+ defm vaesem : RVVOutBuiltinSetZvk;
+ let UnMaskedPolicyScheme = HasPassthruOperand in
+ defm vaeskf1 : RVVOutOp1BuiltinSet<"vaeskf1", "i", [["vi", "Uv", "UvUvKz"]]>;
+ defm vaeskf2 : RVVOutOp2BuiltinSetVIZvk;
+ defm vaesz : RVVOutBuiltinSetZvk<HasVV=0>;
+ }
+
+ // zvknha
+ let RequiredFeatures = ["Zvknha", "Experimental"] in {
+ defm vsha2ch : RVVOutOp2BuiltinSetVVZvk<"i">;
+ defm vsha2cl : RVVOutOp2BuiltinSetVVZvk<"i">;
+ defm vsha2ms : RVVOutOp2BuiltinSetVVZvk<"i">;
+ }
+
+ // zvknhb
+ let RequiredFeatures = ["Zvknhb", "Experimental"] in {
+ defm vsha2ch : RVVOutOp2BuiltinSetVVZvk<"il">;
+ defm vsha2cl : RVVOutOp2BuiltinSetVVZvk<"il">;
+ defm vsha2ms : RVVOutOp2BuiltinSetVVZvk<"il">;
+ }
+
+ // zvksed
+ let RequiredFeatures = ["Zvksed", "Experimental"] in {
+ let UnMaskedPolicyScheme = HasPassthruOperand in
+ defm vsm4k : RVVOutOp1BuiltinSet<"vsm4k", "i", [["vi", "Uv", "UvUvKz"]]>;
+ defm vsm4r : RVVOutBuiltinSetZvk;
+ }
+
+ // zvksh
+ let RequiredFeatures = ["Zvksh", "Experimental"] in {
+ defm vsm3c : RVVOutOp2BuiltinSetVIZvk;
+ let UnMaskedPolicyScheme = HasPassthruOperand in
+ defm vsm3me : RVVOutOp1BuiltinSet<"vsm3me", "i", [["vv", "Uv", "UvUvUv"]]>;
}
}
diff --git a/contrib/llvm-project/clang/include/clang/Basic/riscv_vector_common.td b/contrib/llvm-project/clang/include/clang/Basic/riscv_vector_common.td
new file mode 100644
index 000000000000..040db6f0cdbf
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Basic/riscv_vector_common.td
@@ -0,0 +1,713 @@
+//==------ riscv_vector_common.td - RISC-V V-ext builtin class ------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines RVV builtin base class for RISC-V V-extension.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Instruction definitions
+//===----------------------------------------------------------------------===//
+// Each record of the class RVVBuiltin defines a collection of builtins (i.e.
+// "def vadd : RVVBuiltin" will be used to define things like "vadd_vv_i32m1",
+// "vadd_vv_i32m2", etc).
+//
+// The elements of this collection are defined by an instantiation process the
+// range of which is specified by the cross product of the LMUL attribute and
+// every element in the attribute TypeRange. By default builtins have LMUL = [1,
+// 2, 4, 8, 1/2, 1/4, 1/8] so the process is repeated 7 times. In tablegen we
+// use the Log2LMUL [0, 1, 2, 3, -1, -2, -3] to represent the LMUL.
+//
+// LMUL represents the fact that the types of values used by that builtin are
+// values generated by instructions that are executed under that LMUL. However,
+// this does not mean the builtin is necessarily lowered into an instruction
+// that executes under the specified LMUL. An example where this happens are
+// loads and stores of masks. A mask like `vbool8_t` can be generated, for
+// instance, by comparing two `__rvv_int8m1_t` (this is LMUL=1) or comparing two
+// `__rvv_int16m2_t` (this is LMUL=2). The actual load or store, however, will
+// be performed under LMUL=1 because mask registers are not grouped.
+//
+// TypeRange is a non-empty sequence of basic types:
+//
+// c: int8_t (i8)
+// s: int16_t (i16)
+// i: int32_t (i32)
+// l: int64_t (i64)
+// x: float16_t (half)
+// f: float32_t (float)
+// d: float64_t (double)
+// y: bfloat16_t (bfloat16)
+//
+// This way, given an LMUL, a record with a TypeRange "sil" will cause the
+// definition of 3 builtins. Each type "t" in the TypeRange (in this example
+// they are int16_t, int32_t, int64_t) is used as a parameter that drives the
+// definition of that particular builtin (for the given LMUL).
+//
+// During the instantiation, types can be transformed or modified using type
+// transformers. Given a type "t" the following primitive type transformers can
+// be applied to it to yield another type.
+//
+// e: type of "t" as is (identity)
+// v: computes a vector type whose element type is "t" for the current LMUL
+// w: computes a vector type identical to what 'v' computes except for the
+// element type which is twice as wide as the element type of 'v'
+// q: computes a vector type identical to what 'v' computes except for the
+// element type which is four times as wide as the element type of 'v'
+// o: computes a vector type identical to what 'v' computes except for the
+// element type which is eight times as wide as the element type of 'v'
+// m: computes a vector type identical to what 'v' computes except for the
+// element type which is bool
+// 0: void type, ignores "t"
+// z: size_t, ignores "t"
+// t: ptrdiff_t, ignores "t"
+// u: unsigned long, ignores "t"
+// l: long, ignores "t"
+// f: float32, ignores "t"
+//
+// So for instance if t is "i", i.e. int, then "e" will yield int again. "v"
+// will yield an RVV vector type (assume LMUL=1), so __rvv_int32m1_t.
+// Accordingly "w" would yield __rvv_int64m2_t.
+//
+// A type transformer can be prefixed by other non-primitive type transformers.
+//
+// P: constructs a pointer to the current type
+// C: adds const to the type
+// K: requires the integer type to be a constant expression
+// U: given an integer type or vector type, computes its unsigned variant
+// I: given a vector type, compute the vector type with integer type
+// elements of the same width
+// F: given a vector type, compute the vector type with floating-point type
+// elements of the same width
+// S: given a vector type, computes its equivalent one for LMUL=1. This is a
+// no-op if the vector was already LMUL=1
+// (Log2EEW:Value): Log2EEW value could be 3/4/5/6 (8/16/32/64), given a
+// vector type (SEW and LMUL) and EEW (8/16/32/64), computes its
+// equivalent integer vector type with EEW and corresponding ELMUL (elmul =
+// (eew/sew) * lmul). For example, vector type is __rvv_float16m4
+// (SEW=16, LMUL=4) and Log2EEW is 3 (EEW=8), and then equivalent vector
+// type is __rvv_uint8m2_t (elmul=(8/16)*4 = 2). Ignore to define a new
+// builtins if its equivalent type has illegal lmul.
+// (FixedSEW:Value): Given a vector type (SEW and LMUL), and computes another
+// vector type which only changed SEW as given value. Ignore to define a new
+// builtin if its equivalent type has illegal lmul or the SEW does not changed.
+// (SFixedLog2LMUL:Value): Smaller Fixed Log2LMUL. Given a vector type (SEW
+// and LMUL), and computes another vector type which only changed LMUL as
+// given value. The new LMUL should be smaller than the old one. Ignore to
+// define a new builtin if its equivalent type has illegal lmul.
+// (SEFixedLog2LMUL:Value): Smaller or Equal Fixed Log2LMUL. Given a vector
+// type (SEW and LMUL), and computes another vector type which only
+// changed LMUL as given value. The new LMUL should be smaller than or
+// equal to the old one. Ignore to define a new builtin if its equivalent
+// type has illegal lmul.
+// (LFixedLog2LMUL:Value): Larger Fixed Log2LMUL. Given a vector type (SEW
+// and LMUL), and computes another vector type which only changed LMUL as
+// given value. The new LMUL should be larger than the old one. Ignore to
+// define a new builtin if its equivalent type has illegal lmul.
+//
+// Following with the example above, if t is "i", then "Ue" will yield unsigned
+// int and "Fv" will yield __rvv_float32m1_t (again assuming LMUL=1), Fw would
+// yield __rvv_float64m2_t, etc.
+//
+// Each builtin is then defined by applying each type in TypeRange against the
+// sequence of type transformers described in Suffix and Prototype.
+//
+// The name of the builtin is defined by the Name attribute (which defaults to
+// the name of the class) appended (separated with an underscore) the Suffix
+// attribute. For instance with Name="foo", Suffix = "v" and TypeRange = "il",
+// the builtin generated will be __builtin_rvv_foo_i32m1 and
+// __builtin_rvv_foo_i64m1 (under LMUL=1). If Suffix contains more than one
+// type transformer (say "vv") each of the types is separated with an
+// underscore as in "__builtin_rvv_foo_i32m1_i32m1".
+//
+// The C/C++ prototype of the builtin is defined by the Prototype attribute.
+// Prototype is a non-empty sequence of type transformers, the first of which
+// is the return type of the builtin and the rest are the parameters of the
+// builtin, in order. For instance if Prototype is "wvv" and TypeRange is "si"
+// a first builtin will have type
+// __rvv_int32m2_t (__rvv_int16m1_t, __rvv_int16m1_t) and the second builtin
+// will have type __rvv_int64m2_t (__rvv_int32m1_t, __rvv_int32m1_t) (again
+// under LMUL=1).
+//
+// There are a number of attributes that are used to constraint the number and
+// shape of the builtins generated. Refer to the comments below for them.
+
+class PolicyScheme<int val>{
+ int Value = val;
+}
+def NonePolicy : PolicyScheme<0>;
+def HasPassthruOperand : PolicyScheme<1>;
+def HasPolicyOperand : PolicyScheme<2>;
+
+class RVVBuiltin<string suffix, string prototype, string type_range,
+ string overloaded_suffix = ""> {
+ // Base name that will be prepended in __builtin_rvv_ and appended the
+ // computed Suffix.
+ string Name = NAME;
+
+ // If not empty, each instantiated builtin will have this appended after an
+ // underscore (_). It is instantiated like Prototype.
+ string Suffix = suffix;
+
+ // If empty, default OverloadedName is sub string of `Name` which end of first
+ // '_'. For example, the default overloaded name is `vadd` for Name `vadd_vv`.
+ // It's used for describe some special naming cases.
+ string OverloadedName = "";
+
+ // If not empty, each OverloadedName will have this appended after an
+ // underscore (_). It is instantiated like Prototype.
+ string OverloadedSuffix = overloaded_suffix;
+
+ // The different variants of the builtin, parameterised with a type.
+ string TypeRange = type_range;
+
+ // We use each type described in TypeRange and LMUL with prototype to
+ // instantiate a specific element of the set of builtins being defined.
+ // Prototype attribute defines the C/C++ prototype of the builtin. It is a
+ // non-empty sequence of type transformers, the first of which is the return
+ // type of the builtin and the rest are the parameters of the builtin, in
+ // order. For instance if Prototype is "wvv", TypeRange is "si" and LMUL=1, a
+ // first builtin will have type
+ // __rvv_int32m2_t (__rvv_int16m1_t, __rvv_int16m1_t), and the second builtin
+ // will have type __rvv_int64m2_t (__rvv_int32m1_t, __rvv_int32m1_t).
+ string Prototype = prototype;
+
+ // This builtin has a masked form.
+ bit HasMasked = true;
+
+ // If HasMasked, this flag states that this builtin has a maskedoff operand. It
+ // is always the first operand in builtin and IR intrinsic.
+ bit HasMaskedOffOperand = true;
+
+ // This builtin has a granted vector length parameter.
+ bit HasVL = true;
+
+ // The policy scheme for masked intrinsic IR.
+ // It could be NonePolicy or HasPolicyOperand.
+ // HasPolicyOperand: Has a policy operand. 0 is tail and mask undisturbed, 1 is
+ // tail agnostic, 2 is mask undisturbed, and 3 is tail and mask agnostic. The
+ // policy operand is located at the last position.
+ PolicyScheme MaskedPolicyScheme = HasPolicyOperand;
+
+ // The policy scheme for unmasked intrinsic IR.
+ // It could be NonePolicy, HasPassthruOperand or HasPolicyOperand.
+ // HasPassthruOperand: Has a passthru operand to decide tail policy. If it is
+ // poison, tail policy is tail agnostic, otherwise policy is tail undisturbed.
+ // HasPolicyOperand: Has a policy operand. 1 is tail agnostic and 0 is tail
+ // undisturbed.
+ PolicyScheme UnMaskedPolicyScheme = NonePolicy;
+
+ // This builtin support tail agnostic and undisturbed policy.
+ bit HasTailPolicy = true;
+ // This builtin support mask agnostic and undisturbed policy.
+ bit HasMaskPolicy = true;
+
+ // This builtin prototype with TA or TAMA policy could not support overloading
+ // API. Other policy intrinsic functions would support overloading API with
+ // suffix `_tu`, `tumu`, `tuma`, `tamu` and `tama`.
+ bit SupportOverloading = true;
+
+ // This builtin is valid for the given Log2LMULs.
+ list<int> Log2LMUL = [0, 1, 2, 3, -1, -2, -3];
+
+ // Manual code in clang codegen riscv_vector_builtin_cg.inc
+ code ManualCodegen = [{}];
+
+ // When emit the automatic clang codegen, it describes what types we have to use
+ // to obtain the specific LLVM intrinsic. -1 means the return type, otherwise,
+ // k >= 0 meaning the k-th operand (counting from zero) of the codegen'd
+ // parameter of the unmasked version. k can't be the mask operand's position.
+ list<int> IntrinsicTypes = [];
+
+ // If these names are not empty, this is the ID of the LLVM intrinsic
+ // we want to lower to.
+ string IRName = NAME;
+
+ // If HasMasked, this is the ID of the LLVM intrinsic we want to lower to.
+ string MaskedIRName = NAME #"_mask";
+
+ // Use clang_builtin_alias to save the number of builtins.
+ bit HasBuiltinAlias = true;
+
+ // Features required to enable for this builtin.
+ list<string> RequiredFeatures = [];
+
+ // Number of fields for Load/Store Segment instructions.
+ int NF = 1;
+
+ // Set to true if the builtin is associated with tuple types.
+ bit IsTuple = false;
+
+ // Set to true if the builtin has a parameter that models floating-point
+ // rounding mode control
+ bit HasFRMRoundModeOp = false;
+}
+
+// This is the code emitted in the header.
+class RVVHeader {
+ code HeaderCode;
+}
+
+//===----------------------------------------------------------------------===//
+// Basic classes with automatic codegen.
+//===----------------------------------------------------------------------===//
+
+class RVVOutBuiltin<string suffix, string prototype, string type_range>
+ : RVVBuiltin<suffix, prototype, type_range> {
+ let IntrinsicTypes = [-1];
+}
+
+class RVVOp0Builtin<string suffix, string prototype, string type_range>
+ : RVVBuiltin<suffix, prototype, type_range> {
+ let IntrinsicTypes = [0];
+}
+
+class RVVOutOp1Builtin<string suffix, string prototype, string type_range>
+ : RVVBuiltin<suffix, prototype, type_range> {
+ let IntrinsicTypes = [-1, 1];
+}
+
+class RVVOutOp0Op1Builtin<string suffix, string prototype, string type_range>
+ : RVVBuiltin<suffix, prototype, type_range> {
+ let IntrinsicTypes = [-1, 0, 1];
+}
+
+multiclass RVVBuiltinSet<string intrinsic_name, string type_range,
+ list<list<string>> suffixes_prototypes,
+ list<int> intrinsic_types> {
+ let IRName = intrinsic_name, MaskedIRName = intrinsic_name # "_mask",
+ IntrinsicTypes = intrinsic_types in {
+ foreach s_p = suffixes_prototypes in {
+ let Name = NAME # "_" # s_p[0] in {
+ defvar suffix = s_p[1];
+ defvar prototype = s_p[2];
+ def : RVVBuiltin<suffix, prototype, type_range>;
+ }
+ }
+ }
+}
+
+// IntrinsicTypes is output, op0, op1 [-1, 0, 1]
+multiclass RVVOutOp0Op1BuiltinSet<string intrinsic_name, string type_range,
+ list<list<string>> suffixes_prototypes>
+ : RVVBuiltinSet<intrinsic_name, type_range, suffixes_prototypes,
+ [-1, 0, 1]>;
+
+multiclass RVVOutBuiltinSet<string intrinsic_name, string type_range,
+ list<list<string>> suffixes_prototypes>
+ : RVVBuiltinSet<intrinsic_name, type_range, suffixes_prototypes, [-1]>;
+
+multiclass RVVOp0BuiltinSet<string intrinsic_name, string type_range,
+ list<list<string>> suffixes_prototypes>
+ : RVVBuiltinSet<intrinsic_name, type_range, suffixes_prototypes, [0]>;
+
+// IntrinsicTypes is output, op1 [-1, 0]
+multiclass RVVOutOp0BuiltinSet<string intrinsic_name, string type_range,
+ list<list<string>> suffixes_prototypes>
+ : RVVBuiltinSet<intrinsic_name, type_range, suffixes_prototypes, [-1, 0]>;
+
+// IntrinsicTypes is output, op1 [-1, 1]
+multiclass RVVOutOp1BuiltinSet<string intrinsic_name, string type_range,
+ list<list<string>> suffixes_prototypes>
+ : RVVBuiltinSet<intrinsic_name, type_range, suffixes_prototypes, [-1, 1]>;
+
+multiclass RVVOp0Op1BuiltinSet<string intrinsic_name, string type_range,
+ list<list<string>> suffixes_prototypes>
+ : RVVBuiltinSet<intrinsic_name, type_range, suffixes_prototypes, [0, 1]>;
+
+multiclass RVVOutOp1Op2BuiltinSet<string intrinsic_name, string type_range,
+ list<list<string>> suffixes_prototypes>
+ : RVVBuiltinSet<intrinsic_name, type_range, suffixes_prototypes, [-1, 1, 2]>;
+
+// IntrinsicTypes is output, op2 [-1, 2]
+multiclass RVVOutOp2BuiltinSet<string intrinsic_name, string type_range,
+ list<list<string>> suffixes_prototypes>
+ : RVVBuiltinSet<intrinsic_name, type_range, suffixes_prototypes, [-1, 2]>;
+
+multiclass RVVSignedBinBuiltinSet
+ : RVVOutOp1BuiltinSet<NAME, "csil",
+ [["vv", "v", "vvv"],
+ ["vx", "v", "vve"]]>;
+
+multiclass RVVSignedBinBuiltinSetRoundingMode
+ : RVVOutOp1BuiltinSet<NAME, "csil",
+ [["vv", "v", "vvvu"],
+ ["vx", "v", "vveu"]]>;
+
+multiclass RVVUnsignedBinBuiltinSet
+ : RVVOutOp1BuiltinSet<NAME, "csil",
+ [["vv", "Uv", "UvUvUv"],
+ ["vx", "Uv", "UvUvUe"]]>;
+
+multiclass RVVUnsignedBinBuiltinSetRoundingMode
+ : RVVOutOp1BuiltinSet<NAME, "csil",
+ [["vv", "Uv", "UvUvUvu"],
+ ["vx", "Uv", "UvUvUeu"]]>;
+
+multiclass RVVIntBinBuiltinSet
+ : RVVSignedBinBuiltinSet,
+ RVVUnsignedBinBuiltinSet;
+
+multiclass RVVInt64BinBuiltinSet
+ : RVVOutOp1BuiltinSet<NAME, "l",
+ [["vv", "v", "vvv"],
+ ["vx", "v", "vve"]]>,
+ RVVOutOp1BuiltinSet<NAME, "l",
+ [["vv", "Uv", "UvUvUv"],
+ ["vx", "Uv", "UvUvUe"]]>;
+
+multiclass RVVSlideOneBuiltinSet
+ : RVVOutOp1BuiltinSet<NAME, "csil",
+ [["vx", "v", "vve"],
+ ["vx", "Uv", "UvUvUe"]]>;
+
+multiclass RVVSignedShiftBuiltinSet
+ : RVVOutOp1BuiltinSet<NAME, "csil",
+ [["vv", "v", "vvUv"],
+ ["vx", "v", "vvz"]]>;
+
+multiclass RVVSignedShiftBuiltinSetRoundingMode
+ : RVVOutOp1BuiltinSet<NAME, "csil",
+ [["vv", "v", "vvUvu"],
+ ["vx", "v", "vvzu"]]>;
+
+multiclass RVVUnsignedShiftBuiltinSet
+ : RVVOutOp1BuiltinSet<NAME, "csil",
+ [["vv", "Uv", "UvUvUv"],
+ ["vx", "Uv", "UvUvz"]]>;
+
+multiclass RVVUnsignedShiftBuiltinSetRoundingMode
+ : RVVOutOp1BuiltinSet<NAME, "csil",
+ [["vv", "Uv", "UvUvUvu"],
+ ["vx", "Uv", "UvUvzu"]]>;
+
+multiclass RVVShiftBuiltinSet
+ : RVVSignedShiftBuiltinSet,
+ RVVUnsignedShiftBuiltinSet;
+
+let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
+ multiclass RVVSignedNShiftBuiltinSet
+ : RVVOutOp0Op1BuiltinSet<NAME, "csil",
+ [["wv", "v", "vwUv"],
+ ["wx", "v", "vwz"]]>;
+
+ multiclass RVVSignedNShiftBuiltinSetRoundingMode
+ : RVVOutOp0Op1BuiltinSet<NAME, "csil",
+ [["wv", "v", "vwUvu"],
+ ["wx", "v", "vwzu"]]>;
+
+ multiclass RVVUnsignedNShiftBuiltinSet
+ : RVVOutOp0Op1BuiltinSet<NAME, "csil",
+ [["wv", "Uv", "UvUwUv"],
+ ["wx", "Uv", "UvUwz"]]>;
+
+ multiclass RVVUnsignedNShiftBuiltinSetRoundingMode
+ : RVVOutOp0Op1BuiltinSet<NAME, "csil",
+ [["wv", "Uv", "UvUwUvu"],
+ ["wx", "Uv", "UvUwzu"]]>;
+
+}
+
+multiclass RVVCarryinBuiltinSet
+ : RVVOutOp1BuiltinSet<NAME, "csil",
+ [["vvm", "v", "vvvm"],
+ ["vxm", "v", "vvem"],
+ ["vvm", "Uv", "UvUvUvm"],
+ ["vxm", "Uv", "UvUvUem"]]>;
+
+multiclass RVVCarryOutInBuiltinSet<string intrinsic_name>
+ : RVVOp0Op1BuiltinSet<intrinsic_name, "csil",
+ [["vvm", "vm", "mvvm"],
+ ["vxm", "vm", "mvem"],
+ ["vvm", "Uvm", "mUvUvm"],
+ ["vxm", "Uvm", "mUvUem"]]>;
+
+multiclass RVVSignedMaskOutBuiltinSet
+ : RVVOp0Op1BuiltinSet<NAME, "csil",
+ [["vv", "vm", "mvv"],
+ ["vx", "vm", "mve"]]>;
+
+multiclass RVVUnsignedMaskOutBuiltinSet
+ : RVVOp0Op1BuiltinSet<NAME, "csil",
+ [["vv", "Uvm", "mUvUv"],
+ ["vx", "Uvm", "mUvUe"]]>;
+
+multiclass RVVIntMaskOutBuiltinSet
+ : RVVSignedMaskOutBuiltinSet,
+ RVVUnsignedMaskOutBuiltinSet;
+
+class RVVIntExt<string intrinsic_name, string suffix, string prototype,
+ string type_range>
+ : RVVBuiltin<suffix, prototype, type_range> {
+ let IRName = intrinsic_name;
+ let MaskedIRName = intrinsic_name # "_mask";
+ let OverloadedName = NAME;
+ let IntrinsicTypes = [-1, 0];
+}
+
+let HasMaskedOffOperand = false in {
+ multiclass RVVIntTerBuiltinSet {
+ defm "" : RVVOutOp1BuiltinSet<NAME, "csil",
+ [["vv", "v", "vvvv"],
+ ["vx", "v", "vvev"],
+ ["vv", "Uv", "UvUvUvUv"],
+ ["vx", "Uv", "UvUvUeUv"]]>;
+ }
+ multiclass RVVFloatingTerBuiltinSet {
+ defm "" : RVVOutOp1BuiltinSet<NAME, "xfd",
+ [["vv", "v", "vvvv"],
+ ["vf", "v", "vvev"]]>;
+ }
+ multiclass RVVFloatingTerBuiltinSetRoundingMode {
+ defm "" : RVVOutOp1BuiltinSet<NAME, "xfd",
+ [["vv", "v", "vvvvu"],
+ ["vf", "v", "vvevu"]]>;
+ }
+}
+
+let HasMaskedOffOperand = false, Log2LMUL = [-2, -1, 0, 1, 2] in {
+ multiclass RVVFloatingWidenTerBuiltinSet {
+ defm "" : RVVOutOp1Op2BuiltinSet<NAME, "xf",
+ [["vv", "w", "wwvv"],
+ ["vf", "w", "wwev"]]>;
+ }
+ multiclass RVVFloatingWidenTerBuiltinSetRoundingMode {
+ defm "" : RVVOutOp1Op2BuiltinSet<NAME, "xf",
+ [["vv", "w", "wwvvu"],
+ ["vf", "w", "wwevu"]]>;
+ }
+}
+
+multiclass RVVFloatingBinBuiltinSet
+ : RVVOutOp1BuiltinSet<NAME, "xfd",
+ [["vv", "v", "vvv"],
+ ["vf", "v", "vve"]]>;
+
+multiclass RVVFloatingBinBuiltinSetRoundingMode
+ : RVVOutOp1BuiltinSet<NAME, "xfd",
+ [["vv", "v", "vvvu"],
+ ["vf", "v", "vveu"]]>;
+
+multiclass RVVFloatingBinVFBuiltinSet
+ : RVVOutOp1BuiltinSet<NAME, "xfd",
+ [["vf", "v", "vve"]]>;
+
+multiclass RVVFloatingBinVFBuiltinSetRoundingMode
+ : RVVOutOp1BuiltinSet<NAME, "xfd",
+ [["vf", "v", "vveu"]]>;
+
+multiclass RVVFloatingMaskOutBuiltinSet
+ : RVVOp0Op1BuiltinSet<NAME, "xfd",
+ [["vv", "vm", "mvv"],
+ ["vf", "vm", "mve"]]>;
+
+multiclass RVVFloatingMaskOutVFBuiltinSet
+ : RVVOp0Op1BuiltinSet<NAME, "fd",
+ [["vf", "vm", "mve"]]>;
+
+multiclass RVVConvBuiltinSet<string intrinsic_name, string type_range,
+ list<list<string>> suffixes_prototypes> {
+let Name = intrinsic_name,
+ IRName = intrinsic_name,
+ MaskedIRName = intrinsic_name # "_mask",
+ IntrinsicTypes = [-1, 0] in {
+ foreach s_p = suffixes_prototypes in {
+ defvar suffix = s_p[0];
+ defvar prototype = s_p[1];
+ def : RVVBuiltin<suffix, prototype, type_range>;
+ }
+ }
+}
+
+
+class RVVMaskBinBuiltin : RVVOutBuiltin<"m", "mmm", "c"> {
+ let Name = NAME # "_mm";
+ let HasMasked = false;
+}
+
+class RVVMaskUnaryBuiltin : RVVOutBuiltin<"m", "mm", "c"> {
+ let Name = NAME # "_m";
+}
+
+class RVVMaskNullaryBuiltin : RVVOutBuiltin<"m", "m", "c"> {
+ let Name = NAME # "_m";
+ let HasMasked = false;
+ let SupportOverloading = false;
+}
+
+class RVVMaskOp0Builtin<string prototype> : RVVOp0Builtin<"m", prototype, "c"> {
+ let Name = NAME # "_m";
+ let HasMaskedOffOperand = false;
+}
+
+let UnMaskedPolicyScheme = HasPolicyOperand,
+ HasMaskedOffOperand = false in {
+ multiclass RVVSlideUpBuiltinSet {
+ defm "" : RVVOutBuiltinSet<NAME, "csilxfd",
+ [["vx","v", "vvvz"]]>;
+ defm "" : RVVOutBuiltinSet<NAME, "csil",
+ [["vx","Uv", "UvUvUvz"]]>;
+ }
+}
+
+let UnMaskedPolicyScheme = HasPassthruOperand,
+ ManualCodegen = [{
+ if (IsMasked) {
+ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
+ if ((PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA))
+ Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
+ } else {
+ if (PolicyAttrs & RVV_VTA)
+ Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
+ }
+
+ Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+ IntrinsicTypes = {ResultType, Ops.back()->getType()};
+ }] in {
+ multiclass RVVSlideDownBuiltinSet {
+ defm "" : RVVOutBuiltinSet<NAME, "csilxfd",
+ [["vx","v", "vvz"]]>;
+ defm "" : RVVOutBuiltinSet<NAME, "csil",
+ [["vx","Uv", "UvUvz"]]>;
+ }
+}
+
+class RVVFloatingUnaryBuiltin<string builtin_suffix, string ir_suffix,
+ string prototype>
+ : RVVOutBuiltin<ir_suffix, prototype, "xfd"> {
+ let Name = NAME # "_" # builtin_suffix;
+}
+
+class RVVFloatingUnaryVVBuiltin : RVVFloatingUnaryBuiltin<"v", "v", "vv">;
+
+class RVVConvBuiltin<string suffix, string prototype, string type_range,
+ string overloaded_name>
+ : RVVBuiltin<suffix, prototype, type_range> {
+ let IntrinsicTypes = [-1, 0];
+ let OverloadedName = overloaded_name;
+}
+
+class RVVConvToSignedBuiltin<string overloaded_name>
+ : RVVConvBuiltin<"Iv", "Ivv", "xfd", overloaded_name>;
+
+class RVVConvToUnsignedBuiltin<string overloaded_name>
+ : RVVConvBuiltin<"Uv", "Uvv", "xfd", overloaded_name>;
+
+class RVVConvToWidenSignedBuiltin<string overloaded_name>
+ : RVVConvBuiltin<"Iw", "Iwv", "xf", overloaded_name>;
+
+class RVVConvToWidenUnsignedBuiltin<string overloaded_name>
+ : RVVConvBuiltin<"Uw", "Uwv", "xf", overloaded_name>;
+
+class RVVConvToNarrowingSignedBuiltin<string overloaded_name>
+ : RVVConvBuiltin<"Iv", "IvFw", "csi", overloaded_name>;
+
+class RVVConvToNarrowingUnsignedBuiltin<string overloaded_name>
+ : RVVConvBuiltin<"Uv", "UvFw", "csi", overloaded_name>;
+
+let HasMaskedOffOperand = true in {
+ multiclass RVVSignedReductionBuiltin {
+ defm "" : RVVOutOp0BuiltinSet<NAME, "csil",
+ [["vs", "vSv", "SvvSv"]]>;
+ }
+ multiclass RVVUnsignedReductionBuiltin {
+ defm "" : RVVOutOp0BuiltinSet<NAME, "csil",
+ [["vs", "UvUSv", "USvUvUSv"]]>;
+ }
+ multiclass RVVFloatingReductionBuiltin {
+ defm "" : RVVOutOp0BuiltinSet<NAME, "xfd",
+ [["vs", "vSv", "SvvSv"]]>;
+ }
+ multiclass RVVFloatingReductionBuiltinRoundingMode {
+ defm "" : RVVOutOp0BuiltinSet<NAME, "xfd",
+ [["vs", "vSv", "SvvSvu"]]>;
+ }
+ multiclass RVVFloatingWidenReductionBuiltin {
+ defm "" : RVVOutOp0BuiltinSet<NAME, "xf",
+ [["vs", "vSw", "SwvSw"]]>;
+ }
+ multiclass RVVFloatingWidenReductionBuiltinRoundingMode {
+ defm "" : RVVOutOp0BuiltinSet<NAME, "xf",
+ [["vs", "vSw", "SwvSwu"]]>;
+ }
+}
+
+multiclass RVVIntReductionBuiltinSet
+ : RVVSignedReductionBuiltin,
+ RVVUnsignedReductionBuiltin;
+
+// For widen operation which has different mangling name.
+multiclass RVVWidenBuiltinSet<string intrinsic_name, string type_range,
+ list<list<string>> suffixes_prototypes> {
+ let Log2LMUL = [-3, -2, -1, 0, 1, 2],
+ IRName = intrinsic_name, MaskedIRName = intrinsic_name # "_mask" in {
+ foreach s_p = suffixes_prototypes in {
+ let Name = NAME # "_" # s_p[0],
+ OverloadedName = NAME # "_" # s_p[0] in {
+ defvar suffix = s_p[1];
+ defvar prototype = s_p[2];
+ def : RVVOutOp0Op1Builtin<suffix, prototype, type_range>;
+ }
+ }
+ }
+}
+
+// For widen operation with widen operand which has different mangling name.
+multiclass RVVWidenWOp0BuiltinSet<string intrinsic_name, string type_range,
+ list<list<string>> suffixes_prototypes> {
+ let Log2LMUL = [-3, -2, -1, 0, 1, 2],
+ IRName = intrinsic_name, MaskedIRName = intrinsic_name # "_mask" in {
+ foreach s_p = suffixes_prototypes in {
+ let Name = NAME # "_" # s_p[0],
+ OverloadedName = NAME # "_" # s_p[0] in {
+ defvar suffix = s_p[1];
+ defvar prototype = s_p[2];
+ def : RVVOutOp1Builtin<suffix, prototype, type_range>;
+ }
+ }
+ }
+}
+
+multiclass RVVSignedWidenBinBuiltinSet
+ : RVVWidenBuiltinSet<NAME, "csi",
+ [["vv", "w", "wvv"],
+ ["vx", "w", "wve"]]>;
+
+multiclass RVVSignedWidenOp0BinBuiltinSet
+ : RVVWidenWOp0BuiltinSet<NAME # "_w", "csi",
+ [["wv", "w", "wwv"],
+ ["wx", "w", "wwe"]]>;
+
+multiclass RVVUnsignedWidenBinBuiltinSet
+ : RVVWidenBuiltinSet<NAME, "csi",
+ [["vv", "Uw", "UwUvUv"],
+ ["vx", "Uw", "UwUvUe"]]>;
+
+multiclass RVVUnsignedWidenOp0BinBuiltinSet
+ : RVVWidenWOp0BuiltinSet<NAME # "_w", "csi",
+ [["wv", "Uw", "UwUwUv"],
+ ["wx", "Uw", "UwUwUe"]]>;
+
+multiclass RVVFloatingWidenBinBuiltinSet
+ : RVVWidenBuiltinSet<NAME, "xf",
+ [["vv", "w", "wvv"],
+ ["vf", "w", "wve"]]>;
+
+multiclass RVVFloatingWidenBinBuiltinSetRoundingMode
+ : RVVWidenBuiltinSet<NAME, "xf",
+ [["vv", "w", "wvvu"],
+ ["vf", "w", "wveu"]]>;
+
+multiclass RVVFloatingWidenOp0BinBuiltinSet
+ : RVVWidenWOp0BuiltinSet<NAME # "_w", "xf",
+ [["wv", "w", "wwv"],
+ ["wf", "w", "wwe"]]>;
+
+multiclass RVVFloatingWidenOp0BinBuiltinSetRoundingMode
+ : RVVWidenWOp0BuiltinSet<NAME # "_w", "xf",
+ [["wv", "w", "wwvu"],
+ ["wf", "w", "wweu"]]>;
diff --git a/contrib/llvm-project/clang/include/clang/CodeGen/BackendUtil.h b/contrib/llvm-project/clang/include/clang/CodeGen/BackendUtil.h
index 77d500079f01..fc8ed4f011f9 100644
--- a/contrib/llvm-project/clang/include/clang/CodeGen/BackendUtil.h
+++ b/contrib/llvm-project/clang/include/clang/CodeGen/BackendUtil.h
@@ -16,8 +16,12 @@
namespace llvm {
class BitcodeModule;
template <typename T> class Expected;
+ template <typename T> class IntrusiveRefCntPtr;
class Module;
class MemoryBufferRef;
+ namespace vfs {
+ class FileSystem;
+ } // namespace vfs
}
namespace clang {
@@ -26,6 +30,7 @@ namespace clang {
class CodeGenOptions;
class TargetOptions;
class LangOptions;
+ class BackendConsumer;
enum BackendAction {
Backend_EmitAssembly, ///< Emit native assembly files
@@ -40,10 +45,15 @@ namespace clang {
const CodeGenOptions &CGOpts,
const TargetOptions &TOpts, const LangOptions &LOpts,
StringRef TDesc, llvm::Module *M, BackendAction Action,
- std::unique_ptr<raw_pwrite_stream> OS);
+ llvm::IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS,
+ std::unique_ptr<raw_pwrite_stream> OS,
+ BackendConsumer *BC = nullptr);
void EmbedBitcode(llvm::Module *M, const CodeGenOptions &CGOpts,
llvm::MemoryBufferRef Buf);
+
+ void EmbedObject(llvm::Module *M, const CodeGenOptions &CGOpts,
+ DiagnosticsEngine &Diags);
}
#endif
diff --git a/contrib/llvm-project/clang/include/clang/CodeGen/CGFunctionInfo.h b/contrib/llvm-project/clang/include/clang/CodeGen/CGFunctionInfo.h
index 4899c9deda6a..e388901b8a50 100644
--- a/contrib/llvm-project/clang/include/clang/CodeGen/CGFunctionInfo.h
+++ b/contrib/llvm-project/clang/include/clang/CodeGen/CGFunctionInfo.h
@@ -250,7 +250,7 @@ public:
static ABIArgInfo getCoerceAndExpand(llvm::StructType *coerceToType,
llvm::Type *unpaddedCoerceToType) {
#ifndef NDEBUG
- // Sanity checks on unpaddedCoerceToType.
+ // Check that unpaddedCoerceToType has roughly the right shape.
// Assert that we only have a struct type if there are multiple elements.
auto unpaddedStruct = dyn_cast<llvm::StructType>(unpaddedCoerceToType);
@@ -371,7 +371,7 @@ public:
dyn_cast<llvm::StructType>(UnpaddedCoerceAndExpandType)) {
return structTy->elements();
} else {
- return llvm::makeArrayRef(&UnpaddedCoerceAndExpandType, 1);
+ return llvm::ArrayRef(&UnpaddedCoerceAndExpandType, 1);
}
}
@@ -527,6 +527,11 @@ public:
return NumRequired;
}
+ /// Return true if the argument at a given index is required.
+ bool isRequiredArg(unsigned argIdx) const {
+ return argIdx == ~0U || argIdx < NumRequired;
+ }
+
unsigned getOpaqueData() const { return NumRequired; }
static RequiredArgs getFromOpaqueData(unsigned value) {
if (value == ~0U) return All;
@@ -567,6 +572,10 @@ class CGFunctionInfo final
/// Whether this is a chain call.
unsigned ChainCall : 1;
+ /// Whether this function is called by forwarding arguments.
+ /// This doesn't support inalloca or varargs.
+ unsigned DelegateCall : 1;
+
/// Whether this function is a CMSE nonsecure call
unsigned CmseNSCall : 1;
@@ -586,6 +595,9 @@ class CGFunctionInfo final
/// Whether this function has nocf_check attribute.
unsigned NoCfCheck : 1;
+ /// Log 2 of the maximum vector width.
+ unsigned MaxVectorWidth : 4;
+
RequiredArgs Required;
/// The struct representing all arguments passed in memory. Only used when
@@ -613,14 +625,11 @@ class CGFunctionInfo final
CGFunctionInfo() : Required(RequiredArgs::All) {}
public:
- static CGFunctionInfo *create(unsigned llvmCC,
- bool instanceMethod,
- bool chainCall,
- const FunctionType::ExtInfo &extInfo,
- ArrayRef<ExtParameterInfo> paramInfos,
- CanQualType resultType,
- ArrayRef<CanQualType> argTypes,
- RequiredArgs required);
+ static CGFunctionInfo *
+ create(unsigned llvmCC, bool instanceMethod, bool chainCall,
+ bool delegateCall, const FunctionType::ExtInfo &extInfo,
+ ArrayRef<ExtParameterInfo> paramInfos, CanQualType resultType,
+ ArrayRef<CanQualType> argTypes, RequiredArgs required);
void operator delete(void *p) { ::operator delete(p); }
// Friending class TrailingObjects is apparently not good enough for MSVC,
@@ -660,6 +669,8 @@ public:
bool isChainCall() const { return ChainCall; }
+ bool isDelegateCall() const { return DelegateCall; }
+
bool isCmseNSCall() const { return CmseNSCall; }
bool isNoReturn() const { return NoReturn; }
@@ -710,7 +721,7 @@ public:
ArrayRef<ExtParameterInfo> getExtParameterInfos() const {
if (!HasExtParameterInfos) return {};
- return llvm::makeArrayRef(getExtParameterInfosBuffer(), NumArgs);
+ return llvm::ArrayRef(getExtParameterInfosBuffer(), NumArgs);
}
ExtParameterInfo getExtParameterInfo(unsigned argIndex) const {
assert(argIndex <= NumArgs);
@@ -731,10 +742,22 @@ public:
ArgStructAlign = Align.getQuantity();
}
+ /// Return the maximum vector width in the arguments.
+ unsigned getMaxVectorWidth() const {
+ return MaxVectorWidth ? 1U << (MaxVectorWidth - 1) : 0;
+ }
+
+ /// Set the maximum vector width in the arguments.
+ void setMaxVectorWidth(unsigned Width) {
+ assert(llvm::isPowerOf2_32(Width) && "Expected power of 2 vector");
+ MaxVectorWidth = llvm::countr_zero(Width) + 1;
+ }
+
void Profile(llvm::FoldingSetNodeID &ID) {
ID.AddInteger(getASTCallingConvention());
ID.AddBoolean(InstanceMethod);
ID.AddBoolean(ChainCall);
+ ID.AddBoolean(DelegateCall);
ID.AddBoolean(NoReturn);
ID.AddBoolean(ReturnsRetained);
ID.AddBoolean(NoCallerSavedRegs);
@@ -752,17 +775,16 @@ public:
for (const auto &I : arguments())
I.type.Profile(ID);
}
- static void Profile(llvm::FoldingSetNodeID &ID,
- bool InstanceMethod,
- bool ChainCall,
+ static void Profile(llvm::FoldingSetNodeID &ID, bool InstanceMethod,
+ bool ChainCall, bool IsDelegateCall,
const FunctionType::ExtInfo &info,
ArrayRef<ExtParameterInfo> paramInfos,
- RequiredArgs required,
- CanQualType resultType,
+ RequiredArgs required, CanQualType resultType,
ArrayRef<CanQualType> argTypes) {
ID.AddInteger(info.getCC());
ID.AddBoolean(InstanceMethod);
ID.AddBoolean(ChainCall);
+ ID.AddBoolean(IsDelegateCall);
ID.AddBoolean(info.getNoReturn());
ID.AddBoolean(info.getProducesResult());
ID.AddBoolean(info.getNoCallerSavedRegs());
diff --git a/contrib/llvm-project/clang/include/clang/CodeGen/CodeGenABITypes.h b/contrib/llvm-project/clang/include/clang/CodeGen/CodeGenABITypes.h
index 3c745fadbe78..fda0855dc868 100644
--- a/contrib/llvm-project/clang/include/clang/CodeGen/CodeGenABITypes.h
+++ b/contrib/llvm-project/clang/include/clang/CodeGen/CodeGenABITypes.h
@@ -32,26 +32,18 @@
namespace llvm {
class AttrBuilder;
class Constant;
-class DataLayout;
-class Module;
class Function;
class FunctionType;
class Type;
}
namespace clang {
-class ASTContext;
class CXXConstructorDecl;
class CXXDestructorDecl;
class CXXRecordDecl;
class CXXMethodDecl;
-class CodeGenOptions;
-class CoverageSourceInfo;
-class DiagnosticsEngine;
-class HeaderSearchOptions;
class ObjCMethodDecl;
class ObjCProtocolDecl;
-class PreprocessorOptions;
namespace CodeGen {
class CGFunctionInfo;
diff --git a/contrib/llvm-project/clang/include/clang/CodeGen/CodeGenAction.h b/contrib/llvm-project/clang/include/clang/CodeGen/CodeGenAction.h
index b5721344046d..7ad2988e589e 100644
--- a/contrib/llvm-project/clang/include/clang/CodeGen/CodeGenAction.h
+++ b/contrib/llvm-project/clang/include/clang/CodeGen/CodeGenAction.h
@@ -53,6 +53,9 @@ private:
std::unique_ptr<llvm::Module> loadModule(llvm::MemoryBufferRef MBRef);
+ /// Load bitcode modules to link into our module from the options.
+ bool loadLinkModules(CompilerInstance &CI);
+
protected:
/// Create a new code generation action. If the optional \p _VMContext
/// parameter is supplied, the action uses it without taking ownership,
@@ -80,7 +83,7 @@ public:
CodeGenerator *getCodeGenerator() const;
- BackendConsumer *BEConsumer;
+ BackendConsumer *BEConsumer = nullptr;
};
class EmitAssemblyAction : public CodeGenAction {
diff --git a/contrib/llvm-project/clang/include/clang/CodeGen/ConstantInitBuilder.h b/contrib/llvm-project/clang/include/clang/CodeGen/ConstantInitBuilder.h
index 88e357a0c29c..498acfd38013 100644
--- a/contrib/llvm-project/clang/include/clang/CodeGen/ConstantInitBuilder.h
+++ b/contrib/llvm-project/clang/include/clang/CodeGen/ConstantInitBuilder.h
@@ -41,7 +41,7 @@ class CodeGenModule;
/// for (auto &widget : widgets) {
/// auto widgetDesc = widgetArray.beginStruct();
/// widgetDesc.addInt(CGM.SizeTy, widget.getPower());
-/// widgetDesc.add(CGM.GetAddrOfConstantString(widget.getName()));
+/// widgetDesc.add(CGM.GetAddrOfConstantStringFromLiteral(widget.getName()));
/// widgetDesc.add(CGM.GetAddrOfGlobal(widget.getInitializerDecl()));
/// widgetDesc.finishAndAddTo(widgetArray);
/// }
@@ -204,11 +204,6 @@ public:
add(llvm::ConstantPointerNull::get(ptrTy));
}
- /// Add a bitcast of a value to a specific type.
- void addBitCast(llvm::Constant *value, llvm::Type *type) {
- add(llvm::ConstantExpr::getBitCast(value, type));
- }
-
/// Add a bunch of new values to this initializer.
void addAll(llvm::ArrayRef<llvm::Constant *> values) {
assert(!Finished && "cannot add more values after finishing builder");
diff --git a/contrib/llvm-project/clang/include/clang/CodeGen/ModuleBuilder.h b/contrib/llvm-project/clang/include/clang/CodeGen/ModuleBuilder.h
index f9d056ed8b1e..edacd82bf899 100644
--- a/contrib/llvm-project/clang/include/clang/CodeGen/ModuleBuilder.h
+++ b/contrib/llvm-project/clang/include/clang/CodeGen/ModuleBuilder.h
@@ -14,12 +14,17 @@
#define LLVM_CLANG_CODEGEN_MODULEBUILDER_H
#include "clang/AST/ASTConsumer.h"
+#include "clang/Basic/LLVM.h"
namespace llvm {
class Constant;
class LLVMContext;
class Module;
class StringRef;
+
+ namespace vfs {
+ class FileSystem;
+ }
}
namespace clang {
@@ -74,6 +79,10 @@ public:
/// This may return null if there was no matching declaration.
const Decl *GetDeclForMangledName(llvm::StringRef MangledName);
+ /// Given a global declaration, return a mangled name for this declaration
+ /// which has been added to this code generator via a Handle method.
+ llvm::StringRef GetMangledName(GlobalDecl GD);
+
/// Return the LLVM address of the given global entity.
///
/// \param isForDefinition If true, the caller intends to define the
@@ -94,10 +103,11 @@ public:
/// the allocated CodeGenerator instance.
CodeGenerator *CreateLLVMCodeGen(DiagnosticsEngine &Diags,
llvm::StringRef ModuleName,
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS,
const HeaderSearchOptions &HeaderSearchOpts,
const PreprocessorOptions &PreprocessorOpts,
const CodeGenOptions &CGO,
- llvm::LLVMContext& C,
+ llvm::LLVMContext &C,
CoverageSourceInfo *CoverageInfo = nullptr);
} // end namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/CodeGen/ObjectFilePCHContainerOperations.h b/contrib/llvm-project/clang/include/clang/CodeGen/ObjectFilePCHContainerOperations.h
index 8821cd70362e..7a02d8725885 100644
--- a/contrib/llvm-project/clang/include/clang/CodeGen/ObjectFilePCHContainerOperations.h
+++ b/contrib/llvm-project/clang/include/clang/CodeGen/ObjectFilePCHContainerOperations.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_CODEGEN_OBJECT_FILE_PCH_CONTAINER_OPERATIONS_H
-#define LLVM_CLANG_CODEGEN_OBJECT_FILE_PCH_CONTAINER_OPERATIONS_H
+#ifndef LLVM_CLANG_CODEGEN_OBJECTFILEPCHCONTAINEROPERATIONS_H
+#define LLVM_CLANG_CODEGEN_OBJECTFILEPCHCONTAINEROPERATIONS_H
#include "clang/Frontend/PCHContainerOperations.h"
@@ -32,7 +32,7 @@ class ObjectFilePCHContainerWriter : public PCHContainerWriter {
/// A PCHContainerReader implementation that uses LLVM to
/// wraps Clang modules inside a COFF, ELF, or Mach-O container.
class ObjectFilePCHContainerReader : public PCHContainerReader {
- StringRef getFormat() const override { return "obj"; }
+ ArrayRef<StringRef> getFormats() const override;
/// Returns the serialized AST inside the PCH container Buffer.
StringRef ExtractPCH(llvm::MemoryBufferRef Buffer) const override;
diff --git a/contrib/llvm-project/clang/include/clang/CodeGen/SwiftCallingConv.h b/contrib/llvm-project/clang/include/clang/CodeGen/SwiftCallingConv.h
index b1a638a58a09..d7a0c84699ab 100644
--- a/contrib/llvm-project/clang/include/clang/CodeGen/SwiftCallingConv.h
+++ b/contrib/llvm-project/clang/include/clang/CodeGen/SwiftCallingConv.h
@@ -28,7 +28,6 @@ namespace llvm {
}
namespace clang {
-class Decl;
class FieldDecl;
class ASTRecordLayout;
diff --git a/contrib/llvm-project/clang/include/clang/CrossTU/CrossTranslationUnit.h b/contrib/llvm-project/clang/include/clang/CrossTU/CrossTranslationUnit.h
index d9f9c51fccd9..e6b608a10e61 100644
--- a/contrib/llvm-project/clang/include/clang/CrossTU/CrossTranslationUnit.h
+++ b/contrib/llvm-project/clang/include/clang/CrossTU/CrossTranslationUnit.h
@@ -18,11 +18,11 @@
#include "clang/Analysis/MacroExpansionContext.h"
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/Path.h"
+#include <optional>
namespace clang {
class CompilerInstance;
@@ -101,7 +101,7 @@ std::string createCrossTUIndexString(const llvm::StringMap<std::string> &Index);
using InvocationListTy = llvm::StringMap<llvm::SmallVector<std::string, 32>>;
/// Parse the YAML formatted invocation list file content \p FileContent.
-/// The format is expected to be a mapping from from absolute source file
+/// The format is expected to be a mapping from absolute source file
/// paths in the filesystem to a list of command-line parts, which
/// constitute the invocation needed to compile that file. That invocation
/// will be used to produce the AST of the TU.
@@ -109,8 +109,10 @@ llvm::Expected<InvocationListTy> parseInvocationList(
StringRef FileContent,
llvm::sys::path::Style PathStyle = llvm::sys::path::Style::posix);
-// Returns true if the variable or any field of a record variable is const.
-bool containsConst(const VarDecl *VD, const ASTContext &ACtx);
+/// Returns true if it makes sense to import a foreign variable definition.
+/// For instance, we don't want to import variables that have non-trivial types
+/// because the constructor might have side-effects.
+bool shouldImport(const VarDecl *VD, const ASTContext &ACtx);
/// This class is used for tools that requires cross translation
/// unit capability.
@@ -179,7 +181,7 @@ public:
ASTUnit *Unit);
/// Get a name to identify a named decl.
- static llvm::Optional<std::string> getLookupName(const NamedDecl *ND);
+ static std::optional<std::string> getLookupName(const NamedDecl *ND);
/// Emit diagnostics for the user for potential configuration errors.
void emitCrossTUDiagnostics(const IndexError &IE);
@@ -191,10 +193,18 @@ public:
/// source-location, empty is returned.
/// \note Macro expansion tracking for imported TUs is not implemented yet.
/// It returns empty unconditionally.
- llvm::Optional<clang::MacroExpansionContext>
+ std::optional<clang::MacroExpansionContext>
getMacroExpansionContextForSourceLocation(
const clang::SourceLocation &ToLoc) const;
+ /// Returns true if the given Decl is newly created during the import.
+ bool isImportedAsNew(const Decl *ToDecl) const;
+
+ /// Returns true if the given Decl is mapped (or created) during an import
+ /// but there was an unrecoverable error (the AST node cannot be erased, it
+ /// is marked with an Error object in this case).
+ bool hasError(const Decl *ToDecl) const;
+
private:
void lazyInitImporterSharedSt(TranslationUnitDecl *ToTU);
ASTImporter &getOrCreateASTImporter(ASTUnit *Unit);
@@ -226,7 +236,7 @@ private:
StringRef InvocationListFilePath);
/// Load the ASTUnit by its identifier found in the index file. If the
- /// indentifier is suffixed with '.ast' it is considered a dump. Otherwise
+ /// identifier is suffixed with '.ast' it is considered a dump. Otherwise
/// it is treated as source-file, and on-demand parsed. Relative paths are
/// prefixed with CTUDir.
LoadResultTy load(StringRef Identifier);
@@ -253,7 +263,7 @@ private:
StringRef InvocationListFilePath;
/// In case of on-demand parsing, the invocations for parsing the source
/// files is stored.
- llvm::Optional<InvocationListTy> InvocationList;
+ std::optional<InvocationListTy> InvocationList;
index_error_code PreviousParsingResult = index_error_code::success;
};
@@ -291,7 +301,7 @@ private:
/// \param DisplayCTUProgress Display a message about loading new ASTs.
///
/// \return An Expected instance which contains the ASTUnit pointer or the
- /// error occured during the load.
+ /// error occurred during the load.
llvm::Expected<ASTUnit *> getASTUnitForFunction(StringRef FunctionName,
StringRef CrossTUDir,
StringRef IndexName,
diff --git a/contrib/llvm-project/clang/include/clang/DirectoryWatcher/DirectoryWatcher.h b/contrib/llvm-project/clang/include/clang/DirectoryWatcher/DirectoryWatcher.h
index 4475807dfce9..d879b6411c9e 100644
--- a/contrib/llvm-project/clang/include/clang/DirectoryWatcher/DirectoryWatcher.h
+++ b/contrib/llvm-project/clang/include/clang/DirectoryWatcher/DirectoryWatcher.h
@@ -20,7 +20,7 @@ namespace clang {
/// Provides notifications for file changes in a directory.
///
/// Invokes client-provided function on every filesystem event in the watched
-/// directory. Initially the the watched directory is scanned and for every file
+/// directory. Initially the watched directory is scanned and for every file
/// found, an event is synthesized as if the file was added.
///
/// This is not a general purpose directory monitoring tool - list of
diff --git a/contrib/llvm-project/clang/include/clang/Driver/Action.h b/contrib/llvm-project/clang/include/clang/Driver/Action.h
index ba84d886a6cf..04fa8b01b418 100644
--- a/contrib/llvm-project/clang/include/clang/Driver/Action.h
+++ b/contrib/llvm-project/clang/include/clang/Driver/Action.h
@@ -58,7 +58,7 @@ public:
OffloadClass,
PreprocessJobClass,
PrecompileJobClass,
- HeaderModulePrecompileJobClass,
+ ExtractAPIJobClass,
AnalyzeJobClass,
MigrateJobClass,
CompileJobClass,
@@ -72,11 +72,13 @@ public:
VerifyPCHJobClass,
OffloadBundlingJobClass,
OffloadUnbundlingJobClass,
- OffloadWrapperJobClass,
+ OffloadPackagerJobClass,
+ LinkerWrapperJobClass,
StaticLibJobClass,
+ BinaryAnalyzeJobClass,
JobClassFirst = PreprocessJobClass,
- JobClassLast = StaticLibJobClass
+ JobClassLast = BinaryAnalyzeJobClass
};
// The offloading kind determines if this action is binded to a particular
@@ -126,6 +128,9 @@ protected:
/// The Offloading architecture associated with this action.
const char *OffloadingArch = nullptr;
+ /// The Offloading toolchain associated with this device action.
+ const ToolChain *OffloadingToolChain = nullptr;
+
Action(ActionClass Kind, types::ID Type) : Action(Kind, ActionList(), Type) {}
Action(ActionClass Kind, Action *Input, types::ID Type)
: Action(Kind, ActionList({Input}), Type) {}
@@ -182,12 +187,18 @@ public:
/// Set the device offload info of this action and propagate it to its
/// dependences.
- void propagateDeviceOffloadInfo(OffloadKind OKind, const char *OArch);
+ void propagateDeviceOffloadInfo(OffloadKind OKind, const char *OArch,
+ const ToolChain *OToolChain);
/// Append the host offload info of this action and propagate it to its
/// dependences.
void propagateHostOffloadInfo(unsigned OKinds, const char *OArch);
+ void setHostOffloadInfo(unsigned OKinds, const char *OArch) {
+ ActiveOffloadKindMask |= OKinds;
+ OffloadingArch = OArch;
+ }
+
/// Set the offload info of this action to be the same as the provided action,
/// and propagate it to its dependences.
void propagateOffloadInfo(const Action *A);
@@ -198,10 +209,13 @@ public:
OffloadKind getOffloadingDeviceKind() const { return OffloadingDeviceKind; }
const char *getOffloadingArch() const { return OffloadingArch; }
+ const ToolChain *getOffloadingToolChain() const {
+ return OffloadingToolChain;
+ }
/// Check if this action have any offload kinds. Note that host offload kinds
/// are only set if the action is a dependence to a host offload action.
- bool isHostOffloading(OffloadKind OKind) const {
+ bool isHostOffloading(unsigned int OKind) const {
return ActiveOffloadKindMask & OKind;
}
bool isDeviceOffloading(OffloadKind OKind) const {
@@ -282,11 +296,16 @@ public:
OffloadKindList DeviceOffloadKinds;
public:
- /// Add a action along with the associated toolchain, bound arch, and
+ /// Add an action along with the associated toolchain, bound arch, and
/// offload kind.
void add(Action &A, const ToolChain &TC, const char *BoundArch,
OffloadKind OKind);
+ /// Add an action along with the associated toolchain, bound arch, and
+ /// offload kinds.
+ void add(Action &A, const ToolChain &TC, const char *BoundArch,
+ unsigned OffloadKindMask);
+
/// Get each of the individual arrays.
const ActionList &getActions() const { return DeviceActions; }
const ToolChainList &getToolChains() const { return DeviceToolChains; }
@@ -412,29 +431,21 @@ public:
PrecompileJobAction(Action *Input, types::ID OutputType);
static bool classof(const Action *A) {
- return A->getKind() == PrecompileJobClass ||
- A->getKind() == HeaderModulePrecompileJobClass;
+ return A->getKind() == PrecompileJobClass;
}
};
-class HeaderModulePrecompileJobAction : public PrecompileJobAction {
+class ExtractAPIJobAction : public JobAction {
void anchor() override;
- const char *ModuleName;
-
public:
- HeaderModulePrecompileJobAction(Action *Input, types::ID OutputType,
- const char *ModuleName);
+ ExtractAPIJobAction(Action *Input, types::ID OutputType);
static bool classof(const Action *A) {
- return A->getKind() == HeaderModulePrecompileJobClass;
+ return A->getKind() == ExtractAPIJobClass;
}
- void addModuleHeaderInput(Action *Input) {
- getInputs().push_back(Input);
- }
-
- const char *getModuleName() const { return ModuleName; }
+ void addHeaderInput(Action *Input) { getInputs().push_back(Input); }
};
class AnalyzeJobAction : public JobAction {
@@ -631,14 +642,25 @@ public:
}
};
-class OffloadWrapperJobAction : public JobAction {
+class OffloadPackagerJobAction : public JobAction {
void anchor() override;
public:
- OffloadWrapperJobAction(ActionList &Inputs, types::ID Type);
+ OffloadPackagerJobAction(ActionList &Inputs, types::ID Type);
static bool classof(const Action *A) {
- return A->getKind() == OffloadWrapperJobClass;
+ return A->getKind() == OffloadPackagerJobClass;
+ }
+};
+
+class LinkerWrapperJobAction : public JobAction {
+ void anchor() override;
+
+public:
+ LinkerWrapperJobAction(ActionList &Inputs, types::ID Type);
+
+ static bool classof(const Action *A) {
+ return A->getKind() == LinkerWrapperJobClass;
}
};
@@ -653,6 +675,17 @@ public:
}
};
+class BinaryAnalyzeJobAction : public JobAction {
+ void anchor() override;
+
+public:
+ BinaryAnalyzeJobAction(Action *Input, types::ID Type);
+
+ static bool classof(const Action *A) {
+ return A->getKind() == BinaryAnalyzeJobClass;
+ }
+};
+
} // namespace driver
} // namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/Driver/ClangOptionDocs.td b/contrib/llvm-project/clang/include/clang/Driver/ClangOptionDocs.td
index 3f914afea735..a5ee577c5f45 100644
--- a/contrib/llvm-project/clang/include/clang/Driver/ClangOptionDocs.td
+++ b/contrib/llvm-project/clang/include/clang/Driver/ClangOptionDocs.td
@@ -28,8 +28,10 @@ GCC-compatible ``clang`` and ``clang++`` drivers.
}];
string Program = "clang";
- list<string> ExcludedFlags = ["HelpHidden", "NoDriverOption",
- "CLOption", "Unsupported", "Ignored", "FlangOnlyOption"];
+ // Note: We *must* use DefaultVis and not ClangOption, since that's
+ // the name of the actual TableGen record. The alias will not work.
+ list<string> VisibilityMask = ["DefaultVis"];
+ list<string> IgnoreFlags = ["HelpHidden", "Unsupported", "Ignored"];
}
include "Options.td"
diff --git a/contrib/llvm-project/clang/include/clang/Driver/Compilation.h b/contrib/llvm-project/clang/include/clang/Driver/Compilation.h
index 89a43b5b7dc0..36ae85c42451 100644
--- a/contrib/llvm-project/clang/include/clang/Driver/Compilation.h
+++ b/contrib/llvm-project/clang/include/clang/Driver/Compilation.h
@@ -15,13 +15,13 @@
#include "clang/Driver/Util.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Option/Option.h"
#include <cassert>
#include <iterator>
#include <map>
#include <memory>
+#include <optional>
#include <utility>
#include <vector>
@@ -112,8 +112,11 @@ class Compilation {
/// only be removed if we crash.
ArgStringMap FailureResultFiles;
+ /// -ftime-trace result files.
+ ArgStringMap TimeTraceFiles;
+
/// Optional redirection for stdin, stdout, stderr.
- std::vector<Optional<StringRef>> Redirects;
+ std::vector<std::optional<StringRef>> Redirects;
/// Callback called after compilation job has been finished.
/// Arguments of the callback are the compilation job as an instance of
@@ -143,6 +146,8 @@ public:
return ActiveOffloadMask & Kind;
}
+ unsigned getActiveOffloadKinds() const { return ActiveOffloadMask; }
+
/// Iterator that visits device toolchains of a given kind.
using const_offload_toolchains_iterator =
const std::multimap<Action::OffloadKind,
@@ -156,6 +161,11 @@ public:
return OrderedOffloadingToolchains.equal_range(Kind);
}
+ const_offload_toolchains_range
+ getOffloadToolChains(Action::OffloadKind Kind) const {
+ return OrderedOffloadingToolchains.equal_range(Kind);
+ }
+
/// Return true if an offloading tool chain of a given kind exists.
template <Action::OffloadKind Kind> bool hasOffloadToolChain() const {
return OrderedOffloadingToolchains.find(Kind) !=
@@ -209,6 +219,7 @@ public:
void addCommand(std::unique_ptr<Command> C) { Jobs.addJob(std::move(C)); }
+ llvm::opt::ArgStringList &getTempFiles() { return TempFiles; }
const llvm::opt::ArgStringList &getTempFiles() const { return TempFiles; }
const ArgStringMap &getResultFiles() const { return ResultFiles; }
@@ -261,6 +272,14 @@ public:
return Name;
}
+ const char *getTimeTraceFile(const JobAction *JA) const {
+ return TimeTraceFiles.lookup(JA);
+ }
+ void addTimeTraceFile(const char *Name, const JobAction *JA) {
+ assert(!TimeTraceFiles.contains(JA));
+ TimeTraceFiles[JA] = Name;
+ }
+
/// CleanupFile - Delete a given file.
///
/// \param IssueErrors - Report failures as errors.
@@ -288,16 +307,22 @@ public:
///
/// \param FailingCommand - For non-zero results, this will be set to the
/// Command which failed, if any.
+ /// \param LogOnly - When true, only tries to log the command, not actually
+ /// execute it.
/// \return The result code of the subprocess.
- int ExecuteCommand(const Command &C, const Command *&FailingCommand) const;
+ int ExecuteCommand(const Command &C, const Command *&FailingCommand,
+ bool LogOnly = false) const;
/// ExecuteJob - Execute a single job.
///
/// \param FailingCommands - For non-zero results, this will be a vector of
/// failing commands and their associated result code.
- void ExecuteJobs(
- const JobList &Jobs,
- SmallVectorImpl<std::pair<int, const Command *>> &FailingCommands) const;
+ /// \param LogOnly - When true, only tries to log the command, not actually
+ /// execute it.
+ void
+ ExecuteJobs(const JobList &Jobs,
+ SmallVectorImpl<std::pair<int, const Command *>> &FailingCommands,
+ bool LogOnly = false) const;
/// initCompilationForDiagnostics - Remove stale state and suppress output
/// so compilation can be reexecuted to generate additional diagnostic
@@ -318,8 +343,8 @@ public:
///
/// \param Redirects - array of optional paths. The array should have a size
/// of three. The inferior process's stdin(0), stdout(1), and stderr(2) will
- /// be redirected to the corresponding paths, if provided (not llvm::None).
- void Redirect(ArrayRef<Optional<StringRef>> Redirects);
+ /// be redirected to the corresponding paths, if provided (not std::nullopt).
+ void Redirect(ArrayRef<std::optional<StringRef>> Redirects);
};
} // namespace driver
diff --git a/contrib/llvm-project/clang/include/clang/Driver/Distro.h b/contrib/llvm-project/clang/include/clang/Driver/Distro.h
index 0d2a0939639e..a8de94163e8b 100644
--- a/contrib/llvm-project/clang/include/clang/Driver/Distro.h
+++ b/contrib/llvm-project/clang/include/clang/Driver/Distro.h
@@ -9,8 +9,8 @@
#ifndef LLVM_CLANG_DRIVER_DISTRO_H
#define LLVM_CLANG_DRIVER_DISTRO_H
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/VirtualFileSystem.h"
+#include "llvm/TargetParser/Triple.h"
namespace clang {
namespace driver {
@@ -37,6 +37,8 @@ public:
DebianStretch,
DebianBuster,
DebianBullseye,
+ DebianBookworm,
+ DebianTrixie,
Exherbo,
RHEL5,
RHEL6,
@@ -72,6 +74,11 @@ public:
UbuntuGroovy,
UbuntuHirsute,
UbuntuImpish,
+ UbuntuJammy,
+ UbuntuKinetic,
+ UbuntuLunar,
+ UbuntuMantic,
+ UbuntuNoble,
UnknownDistro
};
@@ -119,11 +126,11 @@ public:
bool IsOpenSUSE() const { return DistroVal == OpenSUSE; }
bool IsDebian() const {
- return DistroVal >= DebianLenny && DistroVal <= DebianBullseye;
+ return DistroVal >= DebianLenny && DistroVal <= DebianTrixie;
}
bool IsUbuntu() const {
- return DistroVal >= UbuntuHardy && DistroVal <= UbuntuImpish;
+ return DistroVal >= UbuntuHardy && DistroVal <= UbuntuNoble;
}
bool IsAlpineLinux() const { return DistroVal == AlpineLinux; }
diff --git a/contrib/llvm-project/clang/include/clang/Driver/Driver.h b/contrib/llvm-project/clang/include/clang/Driver/Driver.h
index da7e8386a151..3ee1bcf2a69c 100644
--- a/contrib/llvm-project/clang/include/clang/Driver/Driver.h
+++ b/contrib/llvm-project/clang/include/clang/Driver/Driver.h
@@ -10,13 +10,18 @@
#define LLVM_CLANG_DRIVER_DRIVER_H
#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/HeaderInclude.h"
#include "clang/Basic/LLVM.h"
#include "clang/Driver/Action.h"
+#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/InputInfo.h"
#include "clang/Driver/Options.h"
#include "clang/Driver/Phases.h"
#include "clang/Driver/ToolChain.h"
#include "clang/Driver/Types.h"
#include "clang/Driver/Util.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/STLFunctionalExtras.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Option/Arg.h"
@@ -26,25 +31,28 @@
#include <list>
#include <map>
#include <string>
+#include <vector>
namespace llvm {
class Triple;
namespace vfs {
class FileSystem;
}
+namespace cl {
+class ExpansionContext;
+}
} // namespace llvm
namespace clang {
namespace driver {
- class Command;
- class Compilation;
- class InputInfo;
- class JobList;
- class JobAction;
- class SanitizerArgs;
- class ToolChain;
+typedef SmallVector<InputInfo, 4> InputInfoList;
+
+class Command;
+class Compilation;
+class JobAction;
+class ToolChain;
/// Describes the kind of LTO mode selected via -f(no-)?lto(=.*)? options.
enum LTOKind {
@@ -54,6 +62,16 @@ enum LTOKind {
LTOK_Unknown
};
+/// Whether headers used to construct C++20 module units should be looked
+/// up by the path supplied on the command line, or in the user or system
+/// search paths.
+enum ModuleHeaderMode {
+ HeaderMode_None,
+ HeaderMode_Default,
+ HeaderMode_User,
+ HeaderMode_System
+};
+
/// Driver - Encapsulate logic for constructing compilation processes
/// from a set of gcc-driver-like command line arguments.
class Driver {
@@ -66,7 +84,8 @@ class Driver {
GXXMode,
CPPMode,
CLMode,
- FlangMode
+ FlangMode,
+ DXCMode
} Mode;
enum SaveTempsMode {
@@ -81,6 +100,19 @@ class Driver {
EmbedBitcode
} BitcodeEmbed;
+ enum OffloadMode {
+ OffloadHostDevice,
+ OffloadHost,
+ OffloadDevice,
+ } Offload;
+
+ /// Header unit mode set by -fmodule-header={user,system}.
+ ModuleHeaderMode CXX20HeaderType;
+
+ /// Set if we should process inputs and jobs with C++20 module
+ /// interpretation.
+ bool ModulesModeCXX20;
+
/// LTO mode selected via -f(no-)?lto(=.*)? options.
LTOKind LTOMode;
@@ -162,6 +194,9 @@ public:
/// The file to log CC_PRINT_PROC_STAT_FILE output to, if enabled.
std::string CCPrintStatReportFilename;
+ /// The file to log CC_PRINT_INTERNAL_STAT_FILE output to, if enabled.
+ std::string CCPrintInternalStatReportFilename;
+
/// The file to log CC_PRINT_OPTIONS output to, if enabled.
std::string CCPrintOptionsFilename;
@@ -171,9 +206,11 @@ public:
/// The file to log CC_LOG_DIAGNOSTICS output to, if enabled.
std::string CCLogDiagnosticsFilename;
+ /// An input type and its arguments.
+ using InputTy = std::pair<types::ID, const llvm::opt::Arg *>;
+
/// A list of inputs and their types for the given arguments.
- typedef SmallVector<std::pair<types::ID, const llvm::opt::Arg *>, 16>
- InputList;
+ using InputList = SmallVector<InputTy, 16>;
/// Whether the driver should follow g++ like behavior.
bool CCCIsCXX() const { return Mode == GXXMode; }
@@ -191,6 +228,9 @@ public:
/// Other modes fall back to calling gcc which in turn calls gfortran.
bool IsFlangMode() const { return Mode == FlangMode; }
+ /// Whether the driver should follow dxc.exe like behavior.
+ bool IsDXCMode() const { return Mode == DXCMode; }
+
/// Only print tool bindings, don't build any jobs.
unsigned CCCPrintBindings : 1;
@@ -198,9 +238,16 @@ public:
/// CCPrintOptionsFilename or to stderr.
unsigned CCPrintOptions : 1;
- /// Set CC_PRINT_HEADERS mode, which causes the frontend to log header include
- /// information to CCPrintHeadersFilename or to stderr.
- unsigned CCPrintHeaders : 1;
+ /// The format of the header information that is emitted. If CC_PRINT_HEADERS
+ /// is set, the format is textual. Otherwise, the format is determined by the
+ /// enviroment variable CC_PRINT_HEADERS_FORMAT.
+ HeaderIncludeFormatKind CCPrintHeadersFormat = HIFMT_None;
+
+ /// This flag determines whether clang should filter the header information
+ /// that is emitted. If enviroment variable CC_PRINT_HEADERS_FILTERING is set
+ /// to "only-direct-system", only system headers that are directly included
+ /// from non-system headers are emitted.
+ HeaderIncludeFilteringKind CCPrintHeadersFiltering = HIFIL_None;
/// Set CC_LOG_DIAGNOSTICS mode, which causes the frontend to log diagnostics
/// to CCLogDiagnosticsFilename or to stderr, in a stable machine readable
@@ -214,11 +261,16 @@ public:
/// performance report to CC_PRINT_PROC_STAT_FILE or to stdout.
unsigned CCPrintProcessStats : 1;
+ /// Set CC_PRINT_INTERNAL_STAT mode, which causes the driver to dump internal
+ /// performance report to CC_PRINT_INTERNAL_STAT_FILE or to stdout.
+ unsigned CCPrintInternalStats : 1;
+
/// Pointer to the ExecuteCC1Tool function, if available.
/// When the clangDriver lib is used through clang.exe, this provides a
/// shortcut for executing the -cc1 command-line directly, in the same
/// process.
- typedef int (*CC1ToolFunc)(SmallVectorImpl<const char *> &ArgV);
+ using CC1ToolFunc =
+ llvm::function_ref<int(SmallVectorImpl<const char *> &ArgV)>;
CC1ToolFunc CC1Main = nullptr;
private:
@@ -228,8 +280,8 @@ private:
/// Name to use when invoking gcc/g++.
std::string CCCGenericGCCName;
- /// Name of configuration file if used.
- std::string ConfigFile;
+ /// Paths to configuration files used.
+ std::vector<std::string> ConfigFiles;
/// Allocator for string saver.
llvm::BumpPtrAllocator Alloc;
@@ -243,15 +295,27 @@ private:
/// Arguments originated from command line.
std::unique_ptr<llvm::opt::InputArgList> CLOptions;
+ /// If this is non-null, the driver will prepend this argument before
+ /// reinvoking clang. This is useful for the llvm-driver where clang's
+ /// realpath will be to the llvm binary and not clang, so it must pass
+ /// "clang" as it's first argument.
+ const char *PrependArg;
+
/// Whether to check that input files exist when constructing compilation
/// jobs.
unsigned CheckInputsExist : 1;
+ /// Whether to probe for PCH files on disk, in order to upgrade
+ /// -include foo.h to -include-pch foo.h.pch.
+ unsigned ProbePrecompiled : 1;
public:
- /// Force clang to emit reproducer for driver invocation. This is enabled
- /// indirectly by setting FORCE_CLANG_DIAGNOSTICS_CRASH environment variable
- /// or when using the -gen-reproducer driver flag.
- unsigned GenReproducer : 1;
+ // getFinalPhase - Determine which compilation mode we are in and record
+ // which option we used to determine the final phase.
+ // TODO: Much of what getFinalPhase returns are not actually true compiler
+ // modes. Fold this functionality into Types::getCompilationPhases and
+ // handleArguments.
+ phases::ID getFinalPhase(const llvm::opt::DerivedArgList &DAL,
+ llvm::opt::Arg **FinalPhaseArg = nullptr) const;
private:
/// Certain options suppress the 'no input files' warning.
@@ -264,20 +328,17 @@ private:
/// stored in it, and will clean them up when torn down.
mutable llvm::StringMap<std::unique_ptr<ToolChain>> ToolChains;
+ /// Cache of known offloading architectures for the ToolChain already derived.
+ /// This should only be modified when we first initialize the offloading
+ /// toolchains.
+ llvm::DenseMap<const ToolChain *, llvm::DenseSet<llvm::StringRef>> KnownArchs;
+
private:
/// TranslateInputArgs - Create a new derived argument list from the input
/// arguments, after applying the standard argument translations.
llvm::opt::DerivedArgList *
TranslateInputArgs(const llvm::opt::InputArgList &Args) const;
- // getFinalPhase - Determine which compilation mode we are in and record
- // which option we used to determine the final phase.
- // TODO: Much of what getFinalPhase returns are not actually true compiler
- // modes. Fold this functionality into Types::getCompilationPhases and
- // handleArguments.
- phases::ID getFinalPhase(const llvm::opt::DerivedArgList &DAL,
- llvm::opt::Arg **FinalPhaseArg = nullptr) const;
-
// handleArguments - All code related to claiming and printing diagnostics
// related to arguments to the driver are done here.
void handleArguments(Compilation &C, llvm::opt::DerivedArgList &Args,
@@ -320,7 +381,9 @@ public:
/// Name to use when invoking gcc/g++.
const std::string &getCCCGenericGCCName() const { return CCCGenericGCCName; }
- const std::string &getConfigFile() const { return ConfigFile; }
+ llvm::ArrayRef<std::string> getConfigFiles() const {
+ return ConfigFiles;
+ }
const llvm::opt::OptTable &getOpts() const { return getDriverOptTable(); }
@@ -332,6 +395,12 @@ public:
void setCheckInputsExist(bool Value) { CheckInputsExist = Value; }
+ bool getProbePrecompiled() const { return ProbePrecompiled; }
+ void setProbePrecompiled(bool Value) { ProbePrecompiled = Value; }
+
+ const char *getPrependArg() const { return PrependArg; }
+ void setPrependArg(const char *Value) { PrependArg = Value; }
+
void setTargetAndMode(const ParsedClangName &TM) { ClangNameParts = TM; }
const std::string &getTitle() { return DriverTitle; }
@@ -359,6 +428,9 @@ public:
bool embedBitcodeInObject() const { return (BitcodeEmbed == EmbedBitcode); }
bool embedBitcodeMarkerOnly() const { return (BitcodeEmbed == EmbedMarker); }
+ bool offloadHostOnly() const { return Offload == OffloadHost; }
+ bool offloadDeviceOnly() const { return Offload == OffloadDevice; }
+
/// Compute the desired OpenMP runtime from the flags provided.
OpenMPRuntimeKind getOpenMPRuntime(const llvm::opt::ArgList &Args) const;
@@ -383,7 +455,7 @@ public:
/// ParseArgStrings - Parse the given list of strings into an
/// ArgList.
llvm::opt::InputArgList ParseArgStrings(ArrayRef<const char *> Args,
- bool IsClCompatMode,
+ bool UseDriverMode,
bool &ContainsError);
/// BuildInputs - Construct the list of inputs and their types from
@@ -413,6 +485,26 @@ public:
void BuildUniversalActions(Compilation &C, const ToolChain &TC,
const InputList &BAInputs) const;
+ /// BuildOffloadingActions - Construct the list of actions to perform for the
+ /// offloading toolchain that will be embedded in the host.
+ ///
+ /// \param C - The compilation that is being built.
+ /// \param Args - The input arguments.
+ /// \param Input - The input type and arguments
+ /// \param HostAction - The host action used in the offloading toolchain.
+ Action *BuildOffloadingActions(Compilation &C,
+ llvm::opt::DerivedArgList &Args,
+ const InputTy &Input,
+ Action *HostAction) const;
+
+ /// Returns the set of bound architectures active for this offload kind.
+ /// If there are no bound architctures we return a set containing only the
+ /// empty string. The \p SuppressError option is used to suppress errors.
+ llvm::DenseSet<StringRef>
+ getOffloadArchs(Compilation &C, const llvm::opt::DerivedArgList &Args,
+ Action::OffloadKind Kind, const ToolChain *TC,
+ bool SuppressError = false) const;
+
/// Check that the file referenced by Value exists. If it doesn't,
/// issue a diagnostic and return false.
/// If TypoCorrect is true and the file does not exist, see if it looks
@@ -450,6 +542,35 @@ public:
StringRef AdditionalInformation = "",
CompilationDiagnosticReport *GeneratedReport = nullptr);
+ enum class CommandStatus {
+ Crash = 1,
+ Error,
+ Ok,
+ };
+
+ enum class ReproLevel {
+ Off = 0,
+ OnCrash = static_cast<int>(CommandStatus::Crash),
+ OnError = static_cast<int>(CommandStatus::Error),
+ Always = static_cast<int>(CommandStatus::Ok),
+ };
+
+ bool maybeGenerateCompilationDiagnostics(
+ CommandStatus CS, ReproLevel Level, Compilation &C,
+ const Command &FailingCommand, StringRef AdditionalInformation = "",
+ CompilationDiagnosticReport *GeneratedReport = nullptr) {
+ if (static_cast<int>(CS) > static_cast<int>(Level))
+ return false;
+ if (CS != CommandStatus::Crash)
+ Diags.Report(diag::err_drv_force_crash)
+ << !::getenv("FORCE_CLANG_DIAGNOSTICS_CRASH");
+ // Hack to ensure that diagnostic notes get emitted.
+ Diags.setLastDiagnosticIgnored(false);
+ generateCompilationDiagnostics(C, FailingCommand, AdditionalInformation,
+ GeneratedReport);
+ return true;
+ }
+
/// @}
/// @name Helper Methods
/// @{
@@ -503,17 +624,30 @@ public:
/// BuildJobsForAction - Construct the jobs to perform for the action \p A and
/// return an InputInfo for the result of running \p A. Will only construct
/// jobs for a given (Action, ToolChain, BoundArch, DeviceKind) tuple once.
- InputInfo
- BuildJobsForAction(Compilation &C, const Action *A, const ToolChain *TC,
- StringRef BoundArch, bool AtTopLevel, bool MultipleArchs,
- const char *LinkingOutput,
- std::map<std::pair<const Action *, std::string>, InputInfo>
- &CachedResults,
- Action::OffloadKind TargetDeviceOffloadKind) const;
+ InputInfoList BuildJobsForAction(
+ Compilation &C, const Action *A, const ToolChain *TC, StringRef BoundArch,
+ bool AtTopLevel, bool MultipleArchs, const char *LinkingOutput,
+ std::map<std::pair<const Action *, std::string>, InputInfoList>
+ &CachedResults,
+ Action::OffloadKind TargetDeviceOffloadKind) const;
/// Returns the default name for linked images (e.g., "a.out").
const char *getDefaultImageName() const;
+ /// Creates a temp file.
+ /// 1. If \p MultipleArch is false or \p BoundArch is empty, the temp file is
+ /// in the temporary directory with name $Prefix-%%%%%%.$Suffix.
+ /// 2. If \p MultipleArch is true and \p BoundArch is not empty,
+ /// 2a. If \p NeedUniqueDirectory is false, the temp file is in the
+ /// temporary directory with name $Prefix-$BoundArch-%%%%%.$Suffix.
+ /// 2b. If \p NeedUniqueDirectory is true, the temp file is in a unique
+ /// subdiretory with random name under the temporary directory, and
+ /// the temp file itself has name $Prefix-$BoundArch.$Suffix.
+ const char *CreateTempFile(Compilation &C, StringRef Prefix, StringRef Suffix,
+ bool MultipleArchs = false,
+ StringRef BoundArch = {},
+ bool NeedUniqueDirectory = false) const;
+
/// GetNamedOutputPath - Return the name to use for the output of
/// the action \p JA. The result is appended to the compilation's
/// list of temporary or result files, as appropriate.
@@ -555,6 +689,12 @@ public:
/// ShouldEmitStaticLibrary - Should the linker emit a static library.
bool ShouldEmitStaticLibrary(const llvm::opt::ArgList &Args) const;
+ /// Returns true if the user has indicated a C++20 header unit mode.
+ bool hasHeaderMode() const { return CXX20HeaderType != HeaderMode_None; }
+
+ /// Get the mode for handling headers as set by fmodule-header{=}.
+ ModuleHeaderMode getModuleHeaderMode() const { return CXX20HeaderType; }
+
/// Returns true if we are performing any kind of LTO.
bool isUsingLTO(bool IsOffload = false) const {
return getLTOMode(IsOffload) != LTOK_None;
@@ -567,16 +707,23 @@ public:
private:
- /// Tries to load options from configuration file.
+ /// Tries to load options from configuration files.
///
/// \returns true if error occurred.
- bool loadConfigFile();
+ bool loadConfigFiles();
+
+ /// Tries to load options from default configuration files (deduced from
+ /// executable filename).
+ ///
+ /// \returns true if error occurred.
+ bool loadDefaultConfigFiles(llvm::cl::ExpansionContext &ExpCtx);
/// Read options from the specified file.
///
/// \param [in] FileName File to read.
+ /// \param [in] Search and expansion options.
/// \returns true, if error occurred while reading.
- bool readConfigFile(StringRef FileName);
+ bool readConfigFile(StringRef FileName, llvm::cl::ExpansionContext &ExpCtx);
/// Set the driver mode (cl, gcc, etc) from the value of the `--driver-mode`
/// option.
@@ -595,20 +742,39 @@ private:
/// @}
+ /// Retrieves a ToolChain for a particular device \p Target triple
+ ///
+ /// \param[in] HostTC is the host ToolChain paired with the device
+ ///
+ /// \param[in] TargetDeviceOffloadKind (e.g. OFK_Cuda/OFK_OpenMP/OFK_SYCL) is
+ /// an Offloading action that is optionally passed to a ToolChain (used by
+ /// CUDA, to specify if it's used in conjunction with OpenMP)
+ ///
+ /// Will cache ToolChains for the life of the driver object, and create them
+ /// on-demand.
+ const ToolChain &getOffloadingDeviceToolChain(
+ const llvm::opt::ArgList &Args, const llvm::Triple &Target,
+ const ToolChain &HostTC,
+ const Action::OffloadKind &TargetDeviceOffloadKind) const;
+
/// Get bitmasks for which option flags to include and exclude based on
/// the driver mode.
- std::pair<unsigned, unsigned> getIncludeExcludeOptionFlagMasks(bool IsClCompatMode) const;
+ llvm::opt::Visibility
+ getOptionVisibilityMask(bool UseDriverMode = true) const;
/// Helper used in BuildJobsForAction. Doesn't use the cache when building
/// jobs specifically for the given action, but will use the cache when
/// building jobs for the Action's inputs.
- InputInfo BuildJobsForActionNoCache(
+ InputInfoList BuildJobsForActionNoCache(
Compilation &C, const Action *A, const ToolChain *TC, StringRef BoundArch,
bool AtTopLevel, bool MultipleArchs, const char *LinkingOutput,
- std::map<std::pair<const Action *, std::string>, InputInfo>
+ std::map<std::pair<const Action *, std::string>, InputInfoList>
&CachedResults,
Action::OffloadKind TargetDeviceOffloadKind) const;
+ /// Return the typical executable name for the specified driver \p Mode.
+ static const char *getExecutableForDriverMode(DriverMode Mode);
+
public:
/// GetReleaseVersion - Parse (([0-9]+)(.([0-9]+)(.([0-9]+)?))?)? and
/// return the grouped values as integers. Numbers which are not
@@ -650,6 +816,16 @@ llvm::StringRef getDriverMode(StringRef ProgName, ArrayRef<const char *> Args);
/// Checks whether the value produced by getDriverMode is for CL mode.
bool IsClangCL(StringRef DriverMode);
+/// Expand response files from a clang driver or cc1 invocation.
+///
+/// \param Args The arguments that will be expanded.
+/// \param ClangCLMode Whether clang is in CL mode.
+/// \param Alloc Allocator for new arguments.
+/// \param FS Filesystem to use when expanding files.
+llvm::Error expandResponseFiles(SmallVectorImpl<const char *> &Args,
+ bool ClangCLMode, llvm::BumpPtrAllocator &Alloc,
+ llvm::vfs::FileSystem *FS = nullptr);
+
} // end namespace driver
} // end namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/Driver/Job.h b/contrib/llvm-project/clang/include/clang/Driver/Job.h
index 8b287638a271..df9449463c53 100644
--- a/contrib/llvm-project/clang/include/clang/Driver/Job.h
+++ b/contrib/llvm-project/clang/include/clang/Driver/Job.h
@@ -12,13 +12,13 @@
#include "clang/Basic/LLVM.h"
#include "clang/Driver/InputInfo.h"
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator.h"
#include "llvm/Option/Option.h"
#include "llvm/Support/Program.h"
#include <memory>
+#include <optional>
#include <string>
#include <utility>
#include <vector>
@@ -116,6 +116,9 @@ class Command {
/// The executable to run.
const char *Executable;
+ /// Optional argument to prepend.
+ const char *PrependArg;
+
/// The list of program arguments (not including the implicit first
/// argument, which will be the executable).
llvm::opt::ArgStringList Arguments;
@@ -141,8 +144,11 @@ class Command {
/// See Command::setEnvironment
std::vector<const char *> Environment;
+ /// Optional redirection for stdin, stdout, stderr.
+ std::vector<std::optional<std::string>> RedirectFiles;
+
/// Information on executable run provided by OS.
- mutable Optional<llvm::sys::ProcessStatistics> ProcStat;
+ mutable std::optional<llvm::sys::ProcessStatistics> ProcStat;
/// When a response file is needed, we try to put most arguments in an
/// exclusive file, while others remains as regular command line arguments.
@@ -166,7 +172,8 @@ public:
Command(const Action &Source, const Tool &Creator,
ResponseFileSupport ResponseSupport, const char *Executable,
const llvm::opt::ArgStringList &Arguments, ArrayRef<InputInfo> Inputs,
- ArrayRef<InputInfo> Outputs = None);
+ ArrayRef<InputInfo> Outputs = std::nullopt,
+ const char *PrependArg = nullptr);
// FIXME: This really shouldn't be copyable, but is currently copied in some
// error handling in Driver::generateCompilationDiagnostics.
Command(const Command &) = default;
@@ -175,7 +182,7 @@ public:
virtual void Print(llvm::raw_ostream &OS, const char *Terminator, bool Quote,
CrashReportInfo *CrashInfo = nullptr) const;
- virtual int Execute(ArrayRef<Optional<StringRef>> Redirects,
+ virtual int Execute(ArrayRef<std::optional<StringRef>> Redirects,
std::string *ErrMsg, bool *ExecutionFailed) const;
/// getSource - Return the Action which caused the creation of this job.
@@ -204,6 +211,15 @@ public:
/// from the parent process will be used.
virtual void setEnvironment(llvm::ArrayRef<const char *> NewEnvironment);
+ void
+ setRedirectFiles(const std::vector<std::optional<std::string>> &Redirects);
+
+ void replaceArguments(llvm::opt::ArgStringList List) {
+ Arguments = std::move(List);
+ }
+
+ void replaceExecutable(const char *Exe) { Executable = Exe; }
+
const char *getExecutable() const { return Executable; }
const llvm::opt::ArgStringList &getArguments() const { return Arguments; }
@@ -214,7 +230,7 @@ public:
return OutputFilenames;
}
- Optional<llvm::sys::ProcessStatistics> getProcessStatistics() const {
+ std::optional<llvm::sys::ProcessStatistics> getProcessStatistics() const {
return ProcStat;
}
@@ -229,34 +245,19 @@ public:
CC1Command(const Action &Source, const Tool &Creator,
ResponseFileSupport ResponseSupport, const char *Executable,
const llvm::opt::ArgStringList &Arguments,
- ArrayRef<InputInfo> Inputs, ArrayRef<InputInfo> Outputs = None);
+ ArrayRef<InputInfo> Inputs,
+ ArrayRef<InputInfo> Outputs = std::nullopt,
+ const char *PrependArg = nullptr);
void Print(llvm::raw_ostream &OS, const char *Terminator, bool Quote,
CrashReportInfo *CrashInfo = nullptr) const override;
- int Execute(ArrayRef<Optional<StringRef>> Redirects, std::string *ErrMsg,
+ int Execute(ArrayRef<std::optional<StringRef>> Redirects, std::string *ErrMsg,
bool *ExecutionFailed) const override;
void setEnvironment(llvm::ArrayRef<const char *> NewEnvironment) override;
};
-/// Like Command, but always pretends that the wrapped command succeeded.
-class ForceSuccessCommand : public Command {
-public:
- ForceSuccessCommand(const Action &Source_, const Tool &Creator_,
- ResponseFileSupport ResponseSupport,
- const char *Executable_,
- const llvm::opt::ArgStringList &Arguments_,
- ArrayRef<InputInfo> Inputs,
- ArrayRef<InputInfo> Outputs = None);
-
- void Print(llvm::raw_ostream &OS, const char *Terminator, bool Quote,
- CrashReportInfo *CrashInfo = nullptr) const override;
-
- int Execute(ArrayRef<Optional<StringRef>> Redirects, std::string *ErrMsg,
- bool *ExecutionFailed) const override;
-};
-
/// JobList - A sequence of jobs to perform.
class JobList {
public:
diff --git a/contrib/llvm-project/clang/include/clang/Driver/Multilib.h b/contrib/llvm-project/clang/include/clang/Driver/Multilib.h
index cf2dbf6ff58a..9a2cc9bb1ba1 100644
--- a/contrib/llvm-project/clang/include/clang/Driver/Multilib.h
+++ b/contrib/llvm-project/clang/include/clang/Driver/Multilib.h
@@ -13,7 +13,9 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringSet.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/Support/SourceMgr.h"
#include <cassert>
#include <functional>
#include <string>
@@ -24,7 +26,9 @@ namespace clang {
namespace driver {
/// This corresponds to a single GCC Multilib, or a segment of one controlled
-/// by a command line flag
+/// by a command line flag.
+/// See also MultilibBuilder for building a multilib by mutating it
+/// incrementally.
class Multilib {
public:
using flags_list = std::vector<std::string>;
@@ -34,74 +38,47 @@ private:
std::string OSSuffix;
std::string IncludeSuffix;
flags_list Flags;
- int Priority;
+
+ // Optionally, a multilib can be assigned a string tag indicating that it's
+ // part of a group of mutually exclusive possibilities. If two or more
+ // multilibs have the same non-empty value of ExclusiveGroup, then only the
+ // last matching one of them will be selected.
+ //
+ // Setting this to the empty string is a special case, indicating that the
+ // directory is not mutually exclusive with anything else.
+ std::string ExclusiveGroup;
public:
+ /// GCCSuffix, OSSuffix & IncludeSuffix will be appended directly to the
+ /// sysroot string so they must either be empty or begin with a '/' character.
+ /// This is enforced with an assert in the constructor.
Multilib(StringRef GCCSuffix = {}, StringRef OSSuffix = {},
- StringRef IncludeSuffix = {}, int Priority = 0);
+ StringRef IncludeSuffix = {}, const flags_list &Flags = flags_list(),
+ StringRef ExclusiveGroup = {});
/// Get the detected GCC installation path suffix for the multi-arch
/// target variant. Always starts with a '/', unless empty
- const std::string &gccSuffix() const {
- assert(GCCSuffix.empty() ||
- (StringRef(GCCSuffix).front() == '/' && GCCSuffix.size() > 1));
- return GCCSuffix;
- }
-
- /// Set the GCC installation path suffix.
- Multilib &gccSuffix(StringRef S);
+ const std::string &gccSuffix() const { return GCCSuffix; }
/// Get the detected os path suffix for the multi-arch
/// target variant. Always starts with a '/', unless empty
- const std::string &osSuffix() const {
- assert(OSSuffix.empty() ||
- (StringRef(OSSuffix).front() == '/' && OSSuffix.size() > 1));
- return OSSuffix;
- }
-
- /// Set the os path suffix.
- Multilib &osSuffix(StringRef S);
+ const std::string &osSuffix() const { return OSSuffix; }
/// Get the include directory suffix. Always starts with a '/', unless
/// empty
- const std::string &includeSuffix() const {
- assert(IncludeSuffix.empty() ||
- (StringRef(IncludeSuffix).front() == '/' && IncludeSuffix.size() > 1));
- return IncludeSuffix;
- }
-
- /// Set the include directory suffix
- Multilib &includeSuffix(StringRef S);
+ const std::string &includeSuffix() const { return IncludeSuffix; }
/// Get the flags that indicate or contraindicate this multilib's use
- /// All elements begin with either '+' or '-'
+ /// All elements begin with either '-' or '!'
const flags_list &flags() const { return Flags; }
- flags_list &flags() { return Flags; }
-
- /// Returns the multilib priority. When more than one multilib matches flags,
- /// the one with the highest priority is selected, with 0 being the default.
- int priority() const { return Priority; }
-
- /// Add a flag to the flags list
- /// \p Flag must be a flag accepted by the driver with its leading '-' removed,
- /// and replaced with either:
- /// '-' which contraindicates using this multilib with that flag
- /// or:
- /// '+' which promotes using this multilib in the presence of that flag
- /// otherwise '-print-multi-lib' will not emit them correctly.
- Multilib &flag(StringRef F) {
- assert(F.front() == '+' || F.front() == '-');
- Flags.push_back(std::string(F));
- return *this;
- }
+
+ /// Get the exclusive group label.
+ const std::string &exclusiveGroup() const { return ExclusiveGroup; }
LLVM_DUMP_METHOD void dump() const;
/// print summary of the Multilib
void print(raw_ostream &OS) const;
- /// Check whether any of the 'against' flags contradict the 'for' flags.
- bool isValid() const;
-
/// Check whether the default is selected
bool isDefault() const
{ return GCCSuffix.empty() && OSSuffix.empty() && IncludeSuffix.empty(); }
@@ -111,63 +88,57 @@ public:
raw_ostream &operator<<(raw_ostream &OS, const Multilib &M);
+/// See also MultilibSetBuilder for combining multilibs into a set.
class MultilibSet {
public:
using multilib_list = std::vector<Multilib>;
- using iterator = multilib_list::iterator;
using const_iterator = multilib_list::const_iterator;
using IncludeDirsFunc =
std::function<std::vector<std::string>(const Multilib &M)>;
using FilterCallback = llvm::function_ref<bool(const Multilib &)>;
+ /// Uses regular expressions to simplify flags used for multilib selection.
+ /// For example, we may wish both -mfloat-abi=soft and -mfloat-abi=softfp to
+ /// be treated as -mfloat-abi=soft.
+ struct FlagMatcher {
+ std::string Match;
+ std::vector<std::string> Flags;
+ };
+
private:
multilib_list Multilibs;
+ std::vector<FlagMatcher> FlagMatchers;
IncludeDirsFunc IncludeCallback;
IncludeDirsFunc FilePathsCallback;
public:
MultilibSet() = default;
+ MultilibSet(multilib_list &&Multilibs,
+ std::vector<FlagMatcher> &&FlagMatchers = {})
+ : Multilibs(Multilibs), FlagMatchers(FlagMatchers) {}
- /// Add an optional Multilib segment
- MultilibSet &Maybe(const Multilib &M);
-
- /// Add a set of mutually incompatible Multilib segments
- MultilibSet &Either(const Multilib &M1, const Multilib &M2);
- MultilibSet &Either(const Multilib &M1, const Multilib &M2,
- const Multilib &M3);
- MultilibSet &Either(const Multilib &M1, const Multilib &M2,
- const Multilib &M3, const Multilib &M4);
- MultilibSet &Either(const Multilib &M1, const Multilib &M2,
- const Multilib &M3, const Multilib &M4,
- const Multilib &M5);
- MultilibSet &Either(ArrayRef<Multilib> Ms);
+ const multilib_list &getMultilibs() { return Multilibs; }
/// Filter out some subset of the Multilibs using a user defined callback
MultilibSet &FilterOut(FilterCallback F);
- /// Filter out those Multilibs whose gccSuffix matches the given expression
- MultilibSet &FilterOut(const char *Regex);
-
/// Add a completed Multilib to the set
void push_back(const Multilib &M);
- /// Union this set of multilibs with another
- void combineWith(const MultilibSet &MS);
-
- /// Remove all of the multilibs from the set
- void clear() { Multilibs.clear(); }
-
- iterator begin() { return Multilibs.begin(); }
const_iterator begin() const { return Multilibs.begin(); }
-
- iterator end() { return Multilibs.end(); }
const_iterator end() const { return Multilibs.end(); }
- /// Pick the best multilib in the set, \returns false if none are compatible
- bool select(const Multilib::flags_list &Flags, Multilib &M) const;
+ /// Select compatible variants, \returns false if none are compatible
+ bool select(const Multilib::flags_list &Flags,
+ llvm::SmallVectorImpl<Multilib> &) const;
unsigned size() const { return Multilibs.size(); }
+ /// Get the given flags plus flags found by matching them against the
+ /// FlagMatchers and choosing the Flags of each accordingly. The select method
+ /// calls this method so in most cases it's not necessary to call it directly.
+ llvm::StringSet<> expandFlags(const Multilib::flags_list &) const;
+
LLVM_DUMP_METHOD void dump() const;
void print(raw_ostream &OS) const;
@@ -185,12 +156,9 @@ public:
const IncludeDirsFunc &filePathsCallback() const { return FilePathsCallback; }
-private:
- /// Apply the filter to Multilibs and return the subset that remains
- static multilib_list filterCopy(FilterCallback F, const multilib_list &Ms);
-
- /// Apply the filter to the multilib_list, removing those that don't match
- static void filterInPlace(FilterCallback F, multilib_list &Ms);
+ static llvm::ErrorOr<MultilibSet>
+ parseYaml(llvm::MemoryBufferRef, llvm::SourceMgr::DiagHandlerTy = nullptr,
+ void *DiagHandlerCtxt = nullptr);
};
raw_ostream &operator<<(raw_ostream &OS, const MultilibSet &MS);
diff --git a/contrib/llvm-project/clang/include/clang/Driver/MultilibBuilder.h b/contrib/llvm-project/clang/include/clang/Driver/MultilibBuilder.h
new file mode 100644
index 000000000000..61596c5c573f
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Driver/MultilibBuilder.h
@@ -0,0 +1,134 @@
+//===- MultilibBuilder.h
+//-----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_DRIVER_MULTILIBBUILDER_H
+#define LLVM_CLANG_DRIVER_MULTILIBBUILDER_H
+
+#include "clang/Driver/Multilib.h"
+
+namespace clang {
+namespace driver {
+
+/// This corresponds to a single GCC multilib, or a segment of one controlled
+/// by a command line flag. This class can be used to create a Multilib, and
+/// contains helper functions to mutate it before creating a Multilib instance
+/// with makeMultilib().
+class MultilibBuilder {
+public:
+ using flags_list = std::vector<std::string>;
+
+private:
+ std::string GCCSuffix;
+ std::string OSSuffix;
+ std::string IncludeSuffix;
+ flags_list Flags;
+
+public:
+ MultilibBuilder(StringRef GCCSuffix, StringRef OSSuffix,
+ StringRef IncludeSuffix);
+
+ /// Initializes GCCSuffix, OSSuffix & IncludeSuffix to the same value.
+ MultilibBuilder(StringRef Suffix = {});
+
+ /// Get the detected GCC installation path suffix for the multi-arch
+ /// target variant. Always starts with a '/', unless empty
+ const std::string &gccSuffix() const {
+ assert(GCCSuffix.empty() ||
+ (StringRef(GCCSuffix).front() == '/' && GCCSuffix.size() > 1));
+ return GCCSuffix;
+ }
+
+ /// Set the GCC installation path suffix.
+ MultilibBuilder &gccSuffix(StringRef S);
+
+ /// Get the detected os path suffix for the multi-arch
+ /// target variant. Always starts with a '/', unless empty
+ const std::string &osSuffix() const {
+ assert(OSSuffix.empty() ||
+ (StringRef(OSSuffix).front() == '/' && OSSuffix.size() > 1));
+ return OSSuffix;
+ }
+
+ /// Set the os path suffix.
+ MultilibBuilder &osSuffix(StringRef S);
+
+ /// Get the include directory suffix. Always starts with a '/', unless
+ /// empty
+ const std::string &includeSuffix() const {
+ assert(IncludeSuffix.empty() || (StringRef(IncludeSuffix).front() == '/' &&
+ IncludeSuffix.size() > 1));
+ return IncludeSuffix;
+ }
+
+ /// Set the include directory suffix
+ MultilibBuilder &includeSuffix(StringRef S);
+
+ /// Get the flags that indicate or contraindicate this multilib's use
+ /// All elements begin with either '-' or '!'
+ const flags_list &flags() const { return Flags; }
+ flags_list &flags() { return Flags; }
+
+ /// Add a flag to the flags list
+ /// \p Flag must be a flag accepted by the driver.
+ /// \p Disallow defines whether the flag is negated and therefore disallowed.
+ MultilibBuilder &flag(StringRef Flag, bool Disallow = false);
+
+ Multilib makeMultilib() const;
+
+ /// Check whether any of the 'against' flags contradict the 'for' flags.
+ bool isValid() const;
+
+ /// Check whether the default is selected
+ bool isDefault() const {
+ return GCCSuffix.empty() && OSSuffix.empty() && IncludeSuffix.empty();
+ }
+};
+
+/// This class can be used to create a MultilibSet, and contains helper
+/// functions to add combinations of multilibs before creating a MultilibSet
+/// instance with makeMultilibSet().
+class MultilibSetBuilder {
+public:
+ using multilib_list = std::vector<MultilibBuilder>;
+
+ MultilibSetBuilder() = default;
+
+ /// Add an optional Multilib segment
+ MultilibSetBuilder &Maybe(const MultilibBuilder &M);
+
+ /// Add a set of mutually incompatible Multilib segments
+ MultilibSetBuilder &Either(const MultilibBuilder &M1,
+ const MultilibBuilder &M2);
+ MultilibSetBuilder &Either(const MultilibBuilder &M1,
+ const MultilibBuilder &M2,
+ const MultilibBuilder &M3);
+ MultilibSetBuilder &Either(const MultilibBuilder &M1,
+ const MultilibBuilder &M2,
+ const MultilibBuilder &M3,
+ const MultilibBuilder &M4);
+ MultilibSetBuilder &Either(const MultilibBuilder &M1,
+ const MultilibBuilder &M2,
+ const MultilibBuilder &M3,
+ const MultilibBuilder &M4,
+ const MultilibBuilder &M5);
+ MultilibSetBuilder &Either(ArrayRef<MultilibBuilder> Ms);
+
+ /// Filter out those Multilibs whose gccSuffix matches the given expression
+ MultilibSetBuilder &FilterOut(const char *Regex);
+
+ MultilibSet makeMultilibSet() const;
+
+private:
+ multilib_list Multilibs;
+};
+
+} // namespace driver
+} // namespace clang
+
+#endif // LLVM_CLANG_DRIVER_MULTILIBBUILDER_H
diff --git a/contrib/llvm-project/clang/include/clang/Driver/OffloadBundler.h b/contrib/llvm-project/clang/include/clang/Driver/OffloadBundler.h
new file mode 100644
index 000000000000..84349abe185f
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Driver/OffloadBundler.h
@@ -0,0 +1,126 @@
+//===- OffloadBundler.h - File Bundling and Unbundling ----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines an offload bundling API that bundles different files
+/// that relate with the same source code but different targets into a single
+/// one. Also the implements the opposite functionality, i.e. unbundle files
+/// previous created by this API.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_DRIVER_OFFLOADBUNDLER_H
+#define LLVM_CLANG_DRIVER_OFFLOADBUNDLER_H
+
+#include "llvm/Support/Error.h"
+#include "llvm/TargetParser/Triple.h"
+#include <llvm/Support/MemoryBuffer.h>
+#include <string>
+#include <vector>
+
+namespace clang {
+
+class OffloadBundlerConfig {
+public:
+ OffloadBundlerConfig();
+
+ bool AllowNoHost = false;
+ bool AllowMissingBundles = false;
+ bool CheckInputArchive = false;
+ bool PrintExternalCommands = false;
+ bool HipOpenmpCompatible = false;
+ bool Compress = false;
+ bool Verbose = false;
+
+ unsigned BundleAlignment = 1;
+ unsigned HostInputIndex = ~0u;
+
+ std::string FilesType;
+ std::string ObjcopyPath;
+
+ // TODO: Convert these to llvm::SmallVector
+ std::vector<std::string> TargetNames;
+ std::vector<std::string> InputFileNames;
+ std::vector<std::string> OutputFileNames;
+};
+
+class OffloadBundler {
+public:
+ const OffloadBundlerConfig &BundlerConfig;
+
+ // TODO: Add error checking from ClangOffloadBundler.cpp
+ OffloadBundler(const OffloadBundlerConfig &BC) : BundlerConfig(BC) {}
+
+ // List bundle IDs. Return true if an error was found.
+ static llvm::Error
+ ListBundleIDsInFile(llvm::StringRef InputFileName,
+ const OffloadBundlerConfig &BundlerConfig);
+
+ llvm::Error BundleFiles();
+ llvm::Error UnbundleFiles();
+ llvm::Error UnbundleArchive();
+};
+
+/// Obtain the offload kind, real machine triple, and an optional TargetID
+/// out of the target information specified by the user.
+/// Bundle Entry ID (or, Offload Target String) has following components:
+/// * Offload Kind - Host, OpenMP, or HIP
+/// * Triple - Standard LLVM Triple
+/// * TargetID (Optional) - target ID, like gfx906:xnack+ or sm_30
+struct OffloadTargetInfo {
+ llvm::StringRef OffloadKind;
+ llvm::Triple Triple;
+ llvm::StringRef TargetID;
+
+ const OffloadBundlerConfig &BundlerConfig;
+
+ OffloadTargetInfo(const llvm::StringRef Target,
+ const OffloadBundlerConfig &BC);
+ bool hasHostKind() const;
+ bool isOffloadKindValid() const;
+ bool isOffloadKindCompatible(const llvm::StringRef TargetOffloadKind) const;
+ bool isTripleValid() const;
+ bool operator==(const OffloadTargetInfo &Target) const;
+ std::string str() const;
+};
+
+// CompressedOffloadBundle represents the format for the compressed offload
+// bundles.
+//
+// The format is as follows:
+// - Magic Number (4 bytes) - A constant "CCOB".
+// - Version (2 bytes)
+// - Compression Method (2 bytes) - Uses the values from
+// llvm::compression::Format.
+// - Uncompressed Size (4 bytes).
+// - Truncated MD5 Hash (8 bytes).
+// - Compressed Data (variable length).
+
+class CompressedOffloadBundle {
+private:
+ static inline const size_t MagicSize = 4;
+ static inline const size_t VersionFieldSize = sizeof(uint16_t);
+ static inline const size_t MethodFieldSize = sizeof(uint16_t);
+ static inline const size_t SizeFieldSize = sizeof(uint32_t);
+ static inline const size_t HashFieldSize = 8;
+ static inline const size_t HeaderSize = MagicSize + VersionFieldSize +
+ MethodFieldSize + SizeFieldSize +
+ HashFieldSize;
+ static inline const llvm::StringRef MagicNumber = "CCOB";
+ static inline const uint16_t Version = 1;
+
+public:
+ static llvm::Expected<std::unique_ptr<llvm::MemoryBuffer>>
+ compress(const llvm::MemoryBuffer &Input, bool Verbose = false);
+ static llvm::Expected<std::unique_ptr<llvm::MemoryBuffer>>
+ decompress(const llvm::MemoryBuffer &Input, bool Verbose = false);
+};
+
+} // namespace clang
+
+#endif // LLVM_CLANG_DRIVER_OFFLOADBUNDLER_H
diff --git a/contrib/llvm-project/clang/include/clang/Driver/Options.h b/contrib/llvm-project/clang/include/clang/Driver/Options.h
index 056660192ac5..0797410e9940 100644
--- a/contrib/llvm-project/clang/include/clang/Driver/Options.h
+++ b/contrib/llvm-project/clang/include/clang/Driver/Options.h
@@ -9,11 +9,8 @@
#ifndef LLVM_CLANG_DRIVER_OPTIONS_H
#define LLVM_CLANG_DRIVER_OPTIONS_H
-namespace llvm {
-namespace opt {
-class OptTable;
-}
-}
+#include "llvm/Option/OptTable.h"
+#include "llvm/Option/Option.h"
namespace clang {
namespace driver {
@@ -26,23 +23,27 @@ enum ClangFlags {
LinkerInput = (1 << 5),
NoArgumentUnused = (1 << 6),
Unsupported = (1 << 7),
- CoreOption = (1 << 8),
- CLOption = (1 << 9),
- CC1Option = (1 << 10),
- CC1AsOption = (1 << 11),
- NoDriverOption = (1 << 12),
- LinkOption = (1 << 13),
- FlangOption = (1 << 14),
- FC1Option = (1 << 15),
- FlangOnlyOption = (1 << 16),
- Ignored = (1 << 17),
+ LinkOption = (1 << 8),
+ Ignored = (1 << 9),
+ TargetSpecific = (1 << 10),
+};
+
+// Flags specifically for clang option visibility. We alias DefaultVis to
+// ClangOption, because "DefaultVis" is confusing in Options.td, which is used
+// for multiple drivers (clang, cl, flang, etc).
+enum ClangVisibility {
+ ClangOption = llvm::opt::DefaultVis,
+ CLOption = (1 << 1),
+ CC1Option = (1 << 2),
+ CC1AsOption = (1 << 3),
+ FlangOption = (1 << 4),
+ FC1Option = (1 << 5),
+ DXCOption = (1 << 6),
};
enum ID {
OPT_INVALID = 0, // This is not an option ID.
-#define OPTION(PREFIX, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
- HELPTEXT, METAVAR, VALUES) \
- OPT_##ID,
+#define OPTION(...) LLVM_MAKE_OPT_ID(__VA_ARGS__),
#include "clang/Driver/Options.inc"
LastOption
#undef OPTION
diff --git a/contrib/llvm-project/clang/include/clang/Driver/Options.td b/contrib/llvm-project/clang/include/clang/Driver/Options.td
index a0cbcae0bdc3..175bedbfb4d0 100644
--- a/contrib/llvm-project/clang/include/clang/Driver/Options.td
+++ b/contrib/llvm-project/clang/include/clang/Driver/Options.td
@@ -35,37 +35,51 @@ def Unsupported : OptionFlag;
// Ignored - The option is unsupported, and the driver will silently ignore it.
def Ignored : OptionFlag;
-// CoreOption - This is considered a "core" Clang option, available in both
-// clang and clang-cl modes.
-def CoreOption : OptionFlag;
+// If an option affects linking, but has a primary group (so Link_Group cannot
+// be used), add this flag.
+def LinkOption : OptionFlag;
+
+// This is a target-specific option for compilation. Using it on an unsupported
+// target will lead to an err_drv_unsupported_opt_for_target error.
+def TargetSpecific : OptionFlag;
+
+// Indicates that this warning is ignored, but accepted with a warning for
+// GCC compatibility.
+class IgnoredGCCCompat : Flags<[HelpHidden]> {}
+
+class TargetSpecific : Flags<[TargetSpecific]> {}
+
+/////////
+// Visibility
+
+// We prefer the name "ClangOption" here rather than "Default" to make
+// it clear that these options will be visible in the clang driver (as
+// opposed to clang -cc1, the CL driver, or the flang driver).
+defvar ClangOption = DefaultVis;
// CLOption - This is a cl.exe compatibility option. Options with this flag
// are made available when the driver is running in CL compatibility mode.
-def CLOption : OptionFlag;
+def CLOption : OptionVisibility;
// CC1Option - This option should be accepted by clang -cc1.
-def CC1Option : OptionFlag;
+def CC1Option : OptionVisibility;
// CC1AsOption - This option should be accepted by clang -cc1as.
-def CC1AsOption : OptionFlag;
-
-// NoDriverOption - This option should not be accepted by the driver.
-def NoDriverOption : OptionFlag;
-
-// If an option affects linking, but has a primary group (so Link_Group cannot
-// be used), add this flag.
-def LinkOption : OptionFlag;
+def CC1AsOption : OptionVisibility;
// FlangOption - This is considered a "core" Flang option, available in
// flang mode.
-def FlangOption : OptionFlag;
-
-// FlangOnlyOption - This option should only be used by Flang (i.e. it is not
-// available for Clang)
-def FlangOnlyOption : OptionFlag;
+def FlangOption : OptionVisibility;
// FC1Option - This option should be accepted by flang -fc1.
-def FC1Option : OptionFlag;
+def FC1Option : OptionVisibility;
+
+// DXCOption - This is a dxc.exe compatibility option. Options with this flag
+// are made available when the driver is running in DXC compatibility mode.
+def DXCOption : OptionVisibility;
+
+/////////
+// Docs
// A short name to show in documentation. The name will be interpreted as rST.
class DocName<string name> { string DocName = name; }
@@ -77,10 +91,6 @@ class DocBrief<code descr> { code DocBrief = descr; }
// documentation.
class DocFlatten { bit DocFlatten = 1; }
-// Indicates that this warning is ignored, but accepted with a warning for
-// GCC compatibility.
-class IgnoredGCCCompat : Flags<[HelpHidden]> {}
-
/////////
// Groups
@@ -90,13 +100,13 @@ def Action_Group : OptionGroup<"<action group>">, DocName<"Actions">,
// Meta-group for options which are only used for compilation,
// and not linking etc.
def CompileOnly_Group : OptionGroup<"<CompileOnly group>">,
- DocName<"Compilation flags">, DocBrief<[{
+ DocName<"Compilation options">, DocBrief<[{
Flags controlling the behavior of Clang during compilation. These flags have
no effect during actions that do not perform compilation.}]>;
def Preprocessor_Group : OptionGroup<"<Preprocessor group>">,
Group<CompileOnly_Group>,
- DocName<"Preprocessor flags">, DocBrief<[{
+ DocName<"Preprocessor options">, DocBrief<[{
Flags controlling the behavior of the Clang preprocessor.}]>;
def IncludePath_Group : OptionGroup<"<I/i group>">, Group<Preprocessor_Group>,
@@ -118,7 +128,7 @@ def d_Group : OptionGroup<"<d group>">, Group<Preprocessor_Group>,
Flags allowing the state of the preprocessor to be dumped in various ways.}]>;
def Diag_Group : OptionGroup<"<W/R group>">, Group<CompileOnly_Group>,
- DocName<"Diagnostic flags">, DocBrief<[{
+ DocName<"Diagnostic options">, DocBrief<[{
Flags controlling which warnings, errors, and remarks Clang will generate.
See the :doc:`full list of warning and remark flags <DiagnosticsReference>`.}]>;
@@ -136,14 +146,28 @@ def f_clang_Group : OptionGroup<"<f (clang-only) group>">,
Group<CompileOnly_Group>, DocFlatten;
def pedantic_Group : OptionGroup<"<pedantic group>">, Group<f_Group>,
DocFlatten;
+
+def offload_Group : OptionGroup<"<offload group>">, Group<f_Group>,
+ DocName<"Common Offloading options">,
+ Visibility<[ClangOption, CLOption]>;
+
def opencl_Group : OptionGroup<"<opencl group>">, Group<f_Group>,
- DocName<"OpenCL flags">;
+ DocName<"OpenCL options">;
def sycl_Group : OptionGroup<"<SYCL group>">, Group<f_Group>,
- DocName<"SYCL flags">;
+ DocName<"SYCL options">;
+
+def cuda_Group : OptionGroup<"<CUDA group>">, Group<f_Group>,
+ DocName<"CUDA options">,
+ Visibility<[ClangOption, CLOption]>;
+
+def hip_Group : OptionGroup<"<HIP group>">, Group<f_Group>,
+ DocName<"HIP options">,
+ Visibility<[ClangOption, CLOption]>;
def m_Group : OptionGroup<"<m group>">, Group<CompileOnly_Group>,
- DocName<"Target-dependent compilation options">;
+ DocName<"Target-dependent compilation options">,
+ Visibility<[ClangOption, CLOption]>;
// Feature groups - these take command line options that correspond directly to
// target specific features and can be translated directly from command line
@@ -156,6 +180,8 @@ def m_arm_Features_Group : OptionGroup<"<arm features group>">,
Group<m_Group>, DocName<"ARM">;
def m_hexagon_Features_Group : OptionGroup<"<hexagon features group>">,
Group<m_Group>, DocName<"Hexagon">;
+def m_sparc_Features_Group : OptionGroup<"<sparc features group>">,
+ Group<m_Group>, DocName<"SPARC">;
// The features added by this group will not be added to target features.
// These are explicitly handled.
def m_hexagon_Features_HVX_Group : OptionGroup<"<hexagon features group>">,
@@ -173,9 +199,17 @@ def m_wasm_Features_Group : OptionGroup<"<wasm features group>">,
def m_wasm_Features_Driver_Group : OptionGroup<"<wasm driver features group>">,
Group<m_Group>, DocName<"WebAssembly Driver">;
def m_x86_Features_Group : OptionGroup<"<x86 features group>">,
- Group<m_Group>, Flags<[CoreOption]>, DocName<"X86">;
+ Group<m_Group>, Visibility<[ClangOption, CLOption]>,
+ DocName<"X86">;
+def m_x86_AVX10_Features_Group : OptionGroup<"<x86 AVX10 features group>">,
+ Group<m_Group>, Visibility<[ClangOption, CLOption]>,
+ DocName<"X86 AVX10">;
def m_riscv_Features_Group : OptionGroup<"<riscv features group>">,
- Group<m_Group>, DocName<"RISCV">;
+ Group<m_Group>, DocName<"RISC-V">;
+def m_ve_Features_Group : OptionGroup<"<ve features group>">,
+ Group<m_Group>, DocName<"VE">;
+def m_loongarch_Features_Group : OptionGroup<"<loongarch features group>">,
+ Group<m_Group>, DocName<"LoongArch">;
def m_libc_Group : OptionGroup<"<m libc group>">, Group<m_mips_Features_Group>,
Flags<[HelpHidden]>;
@@ -197,20 +231,20 @@ def ggdbN_Group : OptionGroup<"<ggdbN group>">, Group<gN_Group>, DocFlatten;
def gTune_Group : OptionGroup<"<gTune group>">, Group<g_Group>,
DocName<"Debugger to tune debug information for">;
def g_flags_Group : OptionGroup<"<g flags group>">, Group<DebugInfo_Group>,
- DocName<"Debug information flags">;
+ DocName<"Debug information options">;
def StaticAnalyzer_Group : OptionGroup<"<Static analyzer group>">,
- DocName<"Static analyzer flags">, DocBrief<[{
+ DocName<"Static analyzer options">, DocBrief<[{
Flags controlling the behavior of the Clang Static Analyzer.}]>;
// gfortran options that we recognize in the driver and pass along when
// invoking GCC to compile Fortran code.
def gfortran_Group : OptionGroup<"<gfortran group>">,
- DocName<"Fortran compilation flags">, DocBrief<[{
+ DocName<"Fortran compilation options">, DocBrief<[{
Flags that will be passed onto the ``gfortran`` compiler when Clang is given
a Fortran input.}]>;
-def Link_Group : OptionGroup<"<T/e/s/t/u group>">, DocName<"Linker flags">,
+def Link_Group : OptionGroup<"<T/e/s/t/u group>">, DocName<"Linker options">,
DocBrief<[{Flags that are passed on to the linker}]>;
def T_Group : OptionGroup<"<T group>">, Group<Link_Group>, DocFlatten;
def u_Group : OptionGroup<"<u group>">, Group<Link_Group>, DocFlatten;
@@ -225,12 +259,20 @@ def clang_ignored_f_Group : OptionGroup<"<clang ignored f group>">,
def clang_ignored_m_Group : OptionGroup<"<clang ignored m group>">,
Group<m_Group>, Flags<[Ignored]>;
+// Unsupported flang groups
+def flang_ignored_w_Group : OptionGroup<"<flang ignored W group>">,
+ Group<W_Group>, Flags<[Ignored]>, Visibility<[FlangOption]>;
+
// Group for clang options in the process of deprecation.
// Please include the version that deprecated the flag as comment to allow
// easier garbage collection.
def clang_ignored_legacy_options_Group : OptionGroup<"<clang legacy flags>">,
Group<f_Group>, Flags<[Ignored]>;
+def LongDouble_Group : OptionGroup<"<LongDouble group>">, Group<m_Group>,
+ DocName<"Long double options">,
+ DocBrief<[{Selects the long double implementation}]>;
+
// Retired with clang-5.0
def : Flag<["-"], "fslp-vectorize-aggressive">, Group<clang_ignored_legacy_options_Group>;
def : Flag<["-"], "fno-slp-vectorize-aggressive">, Group<clang_ignored_legacy_options_Group>;
@@ -272,22 +314,47 @@ class MigratorOpts<string base>
// Args.hasArg(OPT_ffoo) can be used to check that the flag is enabled.
// This is useful if the option is usually disabled.
// Use this only when the option cannot be declared via BoolFOption.
-multiclass OptInFFlag<string name, string pos_prefix, string neg_prefix="",
- string help="", list<OptionFlag> flags=[]> {
- def f#NAME : Flag<["-"], "f"#name>, Flags<[CC1Option] # flags>,
+multiclass OptInCC1FFlag<string name, string pos_prefix, string neg_prefix="",
+ string help="",
+ list<OptionVisibility> vis=[ClangOption]> {
+ def f#NAME : Flag<["-"], "f"#name>, Visibility<[CC1Option] # vis>,
Group<f_Group>, HelpText<pos_prefix # help>;
- def fno_#NAME : Flag<["-"], "fno-"#name>, Flags<flags>,
+ def fno_#NAME : Flag<["-"], "fno-"#name>, Visibility<vis>,
Group<f_Group>, HelpText<neg_prefix # help>;
}
// A boolean option which is opt-out in CC1. The negative option exists in CC1 and
// Args.hasArg(OPT_fno_foo) can be used to check that the flag is disabled.
// Use this only when the option cannot be declared via BoolFOption.
-multiclass OptOutFFlag<string name, string pos_prefix, string neg_prefix,
- string help="", list<OptionFlag> flags=[]> {
- def f#NAME : Flag<["-"], "f"#name>, Flags<flags>,
+multiclass OptOutCC1FFlag<string name, string pos_prefix, string neg_prefix,
+ string help="",
+ list<OptionVisibility> vis=[ClangOption]> {
+ def f#NAME : Flag<["-"], "f"#name>, Visibility<vis>,
Group<f_Group>, HelpText<pos_prefix # help>;
- def fno_#NAME : Flag<["-"], "fno-"#name>, Flags<[CC1Option] # flags>,
+ def fno_#NAME : Flag<["-"], "fno-"#name>, Visibility<[CC1Option] # vis>,
+ Group<f_Group>, HelpText<neg_prefix # help>;
+}
+
+// A boolean option which is opt-in in FC1. The positive option exists in FC1 and
+// Args.hasArg(OPT_ffoo) can be used to check that the flag is enabled.
+// This is useful if the option is usually disabled.
+multiclass OptInFC1FFlag<string name, string pos_prefix, string neg_prefix="",
+ string help="",
+ list<OptionVisibility> vis=[ClangOption]> {
+ def f#NAME : Flag<["-"], "f"#name>, Visibility<[FC1Option] # vis>,
+ Group<f_Group>, HelpText<pos_prefix # help>;
+ def fno_#NAME : Flag<["-"], "fno-"#name>, Visibility<vis>,
+ Group<f_Group>, HelpText<neg_prefix # help>;
+}
+
+// A boolean option which is opt-out in FC1. The negative option exists in FC1 and
+// Args.hasArg(OPT_fno_foo) can be used to check that the flag is disabled.
+multiclass OptOutFC1FFlag<string name, string pos_prefix, string neg_prefix,
+ string help="",
+ list<OptionVisibility> vis=[ClangOption]> {
+ def f#NAME : Flag<["-"], "f"#name>, Visibility<vis>,
+ Group<f_Group>, HelpText<pos_prefix # help>;
+ def fno_#NAME : Flag<["-"], "fno-"#name>, Visibility<[FC1Option] # vis>,
Group<f_Group>, HelpText<neg_prefix # help>;
}
@@ -320,7 +387,8 @@ def SetFalse : Set<false> {}
// Definition of single command line flag. This is an implementation detail, use
// SetTrueBy or SetFalseBy instead.
-class FlagDef<bit polarity, bit value, list<OptionFlag> option_flags,
+class FlagDef<bit polarity, bit value,
+ list<OptionFlag> option_flags, list<OptionVisibility> option_vis,
string help, list<code> implied_by_expressions = []> {
// The polarity. Besides spelling, this also decides whether the TableGen
// record will be prefixed with "no_".
@@ -329,9 +397,12 @@ class FlagDef<bit polarity, bit value, list<OptionFlag> option_flags,
// The value assigned to key path when the flag is present on command line.
bit Value = value;
- // OptionFlags that control visibility of the flag in different tools.
+ // OptionFlags in different tools.
list<OptionFlag> OptionFlags = option_flags;
+ // OptionVisibility flags for different tools.
+ list<OptionVisibility> OptionVisibility = option_vis;
+
// The help text associated with the flag.
string Help = help;
@@ -340,8 +411,11 @@ class FlagDef<bit polarity, bit value, list<OptionFlag> option_flags,
}
// Additional information to be appended to both positive and negative flag.
-class BothFlags<list<OptionFlag> option_flags, string help = ""> {
+class BothFlags<list<OptionFlag> option_flags,
+ list<OptionVisibility> option_vis = [ClangOption],
+ string help = ""> {
list<OptionFlag> OptionFlags = option_flags;
+ list<OptionVisibility> OptionVisibility = option_vis;
string Help = help;
}
@@ -350,23 +424,26 @@ class ApplySuffix<FlagDef flag, BothFlags suffix> {
FlagDef Result
= FlagDef<flag.Polarity, flag.Value,
flag.OptionFlags # suffix.OptionFlags,
+ flag.OptionVisibility # suffix.OptionVisibility,
flag.Help # suffix.Help, flag.ImpliedBy>;
}
// Definition of the command line flag with positive spelling, e.g. "-ffoo".
-class PosFlag<Set value, list<OptionFlag> flags = [], string help = "",
- list<code> implied_by_expressions = []>
- : FlagDef<true, value.Value, flags, help, implied_by_expressions> {}
+class PosFlag<Set value,
+ list<OptionFlag> flags = [], list<OptionVisibility> vis = [],
+ string help = "", list<code> implied_by_expressions = []>
+ : FlagDef<true, value.Value, flags, vis, help, implied_by_expressions> {}
// Definition of the command line flag with negative spelling, e.g. "-fno-foo".
-class NegFlag<Set value, list<OptionFlag> flags = [], string help = "",
- list<code> implied_by_expressions = []>
- : FlagDef<false, value.Value, flags, help, implied_by_expressions> {}
+class NegFlag<Set value,
+ list<OptionFlag> flags = [], list<OptionVisibility> vis = [],
+ string help = "", list<code> implied_by_expressions = []>
+ : FlagDef<false, value.Value, flags, vis, help, implied_by_expressions> {}
// Expanded FlagDef that's convenient for creation of TableGen records.
class FlagDefExpanded<FlagDef flag, string prefix, string name, string spelling>
- : FlagDef<flag.Polarity, flag.Value, flag.OptionFlags, flag.Help,
- flag.ImpliedBy> {
+ : FlagDef<flag.Polarity, flag.Value, flag.OptionFlags, flag.OptionVisibility,
+ flag.Help, flag.ImpliedBy> {
// Name of the TableGen record.
string RecordName = prefix # !if(flag.Polarity, "", "no_") # name;
@@ -384,10 +461,10 @@ class FlagDefExpanded<FlagDef flag, string prefix, string name, string spelling>
class MarshalledFlagRec<FlagDefExpanded flag, FlagDefExpanded other,
FlagDefExpanded implied, KeyPathAndMacro kpm,
Default default>
- : Flag<["-"], flag.Spelling>, Flags<flag.OptionFlags>, HelpText<flag.Help>,
+ : Flag<["-"], flag.Spelling>, Flags<flag.OptionFlags>,
+ Visibility<flag.OptionVisibility>, HelpText<flag.Help>,
MarshallingInfoBooleanFlag<kpm, default.Value, flag.ValueAsCode,
- flag.RecordName, other.ValueAsCode,
- other.RecordName>,
+ other.ValueAsCode, other.RecordName>,
ImpliedByAnyOf<implied.ImpliedBy, implied.ValueAsCode> {}
// Generates TableGen records for two command line flags that control the same
@@ -399,7 +476,7 @@ class MarshalledFlagRec<FlagDefExpanded flag, FlagDefExpanded other,
multiclass BoolOption<string prefix = "", string spelling_base,
KeyPathAndMacro kpm, Default default,
FlagDef flag1_base, FlagDef flag2_base,
- BothFlags suffix = BothFlags<[], "">> {
+ BothFlags suffix = BothFlags<[]>> {
defvar flag1 = FlagDefExpanded<ApplySuffix<flag1_base, suffix>.Result, prefix,
NAME, spelling_base>;
@@ -430,7 +507,7 @@ multiclass BoolOption<string prefix = "", string spelling_base,
/// CompilerInvocation.
multiclass BoolFOption<string flag_base, KeyPathAndMacro kpm,
Default default, FlagDef flag1, FlagDef flag2,
- BothFlags both = BothFlags<[], "">> {
+ BothFlags both = BothFlags<[]>> {
defm NAME : BoolOption<"f", flag_base, kpm, default, flag1, flag2, both>,
Group<f_Group>;
}
@@ -441,11 +518,47 @@ multiclass BoolFOption<string flag_base, KeyPathAndMacro kpm,
// CompilerInvocation.
multiclass BoolGOption<string flag_base, KeyPathAndMacro kpm,
Default default, FlagDef flag1, FlagDef flag2,
- BothFlags both = BothFlags<[], "">> {
+ BothFlags both = BothFlags<[]>> {
defm NAME : BoolOption<"g", flag_base, kpm, default, flag1, flag2, both>,
Group<g_Group>;
}
+// Works like BoolOption except without marshalling
+multiclass BoolOptionWithoutMarshalling<string prefix = "", string spelling_base,
+ FlagDef flag1_base, FlagDef flag2_base,
+ BothFlags suffix = BothFlags<[]>> {
+ defvar flag1 = FlagDefExpanded<ApplySuffix<flag1_base, suffix>.Result, prefix,
+ NAME, spelling_base>;
+
+ defvar flag2 = FlagDefExpanded<ApplySuffix<flag2_base, suffix>.Result, prefix,
+ NAME, spelling_base>;
+
+ // The flags must have different polarity, different values, and only
+ // one can be implied.
+ assert !xor(flag1.Polarity, flag2.Polarity),
+ "the flags must have different polarity: flag1: " #
+ flag1.Polarity # ", flag2: " # flag2.Polarity;
+ assert !ne(flag1.Value, flag2.Value),
+ "the flags must have different values: flag1: " #
+ flag1.Value # ", flag2: " # flag2.Value;
+ assert !not(!and(flag1.CanBeImplied, flag2.CanBeImplied)),
+ "only one of the flags can be implied: flag1: " #
+ flag1.CanBeImplied # ", flag2: " # flag2.CanBeImplied;
+
+ defvar implied = !if(flag1.CanBeImplied, flag1, flag2);
+
+ def flag1.RecordName : Flag<["-"], flag1.Spelling>, Flags<flag1.OptionFlags>,
+ Visibility<flag1.OptionVisibility>,
+ HelpText<flag1.Help>,
+ ImpliedByAnyOf<implied.ImpliedBy, implied.ValueAsCode>
+ {}
+ def flag2.RecordName : Flag<["-"], flag2.Spelling>, Flags<flag2.OptionFlags>,
+ Visibility<flag2.OptionVisibility>,
+ HelpText<flag2.Help>,
+ ImpliedByAnyOf<implied.ImpliedBy, implied.ValueAsCode>
+ {}
+}
+
// FIXME: Diagnose if target does not support protected visibility.
class MarshallingInfoVisibility<KeyPathAndMacro kpm, code default>
: MarshallingInfoEnum<kpm, default>,
@@ -459,7 +572,7 @@ defvar cpp11 = LangOpts<"CPlusPlus11">;
defvar cpp17 = LangOpts<"CPlusPlus17">;
defvar cpp20 = LangOpts<"CPlusPlus20">;
defvar c99 = LangOpts<"C99">;
-defvar c2x = LangOpts<"C2x">;
+defvar c23 = LangOpts<"C23">;
defvar lang_std = LangOpts<"LangStd">;
defvar open_cl = LangOpts<"OpenCL">;
defvar cuda = LangOpts<"CUDA">;
@@ -467,7 +580,7 @@ defvar render_script = LangOpts<"RenderScript">;
defvar hip = LangOpts<"HIP">;
defvar gnu_mode = LangOpts<"GNUMode">;
defvar asm_preprocessor = LangOpts<"AsmPreprocessor">;
-defvar cpp_modules = LangOpts<"CPlusPlusModules">;
+defvar hlsl = LangOpts<"HLSL">;
defvar std = !strconcat("LangStandard::getLangStandardForKind(", lang_std.KeyPath, ")");
@@ -495,7 +608,8 @@ defvar std = !strconcat("LangStandard::getLangStandardForKind(", lang_std.KeyPat
// Developer Driver Options
-def internal_Group : OptionGroup<"<clang internal options>">, Flags<[HelpHidden]>;
+def internal_Group : OptionGroup<"<clang internal options>">,
+ Flags<[HelpHidden]>;
def internal_driver_Group : OptionGroup<"<clang driver internal options>">,
Group<internal_Group>, HelpText<"DRIVER OPTIONS">;
def internal_debug_Group :
@@ -505,20 +619,25 @@ def internal_debug_Group :
class InternalDriverOpt : Group<internal_driver_Group>,
Flags<[NoXarchOption, HelpHidden]>;
def driver_mode : Joined<["--"], "driver-mode=">, Group<internal_driver_Group>,
- Flags<[CoreOption, NoXarchOption, HelpHidden]>,
- HelpText<"Set the driver mode to either 'gcc', 'g++', 'cpp', or 'cl'">;
+ Flags<[NoXarchOption, HelpHidden]>,
+ Visibility<[ClangOption, FlangOption, CLOption, DXCOption]>,
+ HelpText<"Set the driver mode to either 'gcc', 'g++', 'cpp', 'cl' or 'flang'">;
def rsp_quoting : Joined<["--"], "rsp-quoting=">, Group<internal_driver_Group>,
- Flags<[CoreOption, NoXarchOption, HelpHidden]>,
+ Flags<[NoXarchOption, HelpHidden]>,
+ Visibility<[ClangOption, CLOption, DXCOption]>,
HelpText<"Set the rsp quoting to either 'posix', or 'windows'">;
def ccc_gcc_name : Separate<["-"], "ccc-gcc-name">, InternalDriverOpt,
HelpText<"Name for native GCC compiler">,
MetaVarName<"<gcc-path>">;
class InternalDebugOpt : Group<internal_debug_Group>,
- Flags<[NoXarchOption, HelpHidden, CoreOption]>;
+ Flags<[NoXarchOption, HelpHidden]>,
+ Visibility<[ClangOption, CLOption, DXCOption]>;
def ccc_install_dir : Separate<["-"], "ccc-install-dir">, InternalDebugOpt,
HelpText<"Simulate installation in the given directory">;
-def ccc_print_phases : Flag<["-"], "ccc-print-phases">, InternalDebugOpt,
+def ccc_print_phases : Flag<["-"], "ccc-print-phases">,
+ Flags<[NoXarchOption, HelpHidden]>, Visibility<[ClangOption, CLOption, DXCOption,
+ FlangOption]>,
HelpText<"Dump list of actions to perform">;
def ccc_print_bindings : Flag<["-"], "ccc-print-bindings">, InternalDebugOpt,
HelpText<"Show bindings of tools to actions">;
@@ -530,19 +649,26 @@ def ccc_arcmt_modify : Flag<["-"], "ccc-arcmt-modify">, InternalDriverOpt,
def ccc_arcmt_migrate : Separate<["-"], "ccc-arcmt-migrate">, InternalDriverOpt,
HelpText<"Apply modifications and produces temporary files that conform to ARC">;
def arcmt_migrate_report_output : Separate<["-"], "arcmt-migrate-report-output">,
- HelpText<"Output path for the plist report">, Flags<[CC1Option]>,
+ HelpText<"Output path for the plist report">,
+ Visibility<[ClangOption, CC1Option]>,
MarshallingInfoString<FrontendOpts<"ARCMTMigrateReportOut">>;
def arcmt_migrate_emit_arc_errors : Flag<["-"], "arcmt-migrate-emit-errors">,
- HelpText<"Emit ARC errors even if the migrator can fix them">, Flags<[CC1Option]>,
+ HelpText<"Emit ARC errors even if the migrator can fix them">,
+ Visibility<[ClangOption, CC1Option]>,
MarshallingInfoFlag<FrontendOpts<"ARCMTMigrateEmitARCErrors">>;
+def gen_reproducer_eq: Joined<["-"], "gen-reproducer=">,
+ Flags<[NoArgumentUnused]>, Visibility<[ClangOption, CLOption, DXCOption]>,
+ HelpText<"Emit reproducer on (option: off, crash (default), error, always)">;
def gen_reproducer: Flag<["-"], "gen-reproducer">, InternalDebugOpt,
+ Alias<gen_reproducer_eq>, AliasArgs<["always"]>,
HelpText<"Auto-generates preprocessed source files and a reproduction script">;
def gen_cdb_fragment_path: Separate<["-"], "gen-cdb-fragment-path">, InternalDebugOpt,
HelpText<"Emit a compilation database fragment to the specified directory">;
-def round_trip_args : Flag<["-"], "round-trip-args">, Flags<[CC1Option, NoDriverOption]>,
+def round_trip_args : Flag<["-"], "round-trip-args">, Visibility<[CC1Option]>,
HelpText<"Enable command line arguments round-trip.">;
-def no_round_trip_args : Flag<["-"], "no-round-trip-args">, Flags<[CC1Option, NoDriverOption]>,
+def no_round_trip_args : Flag<["-"], "no-round-trip-args">,
+ Visibility<[CC1Option]>,
HelpText<"Disable command line arguments round-trip.">;
def _migrate : Flag<["--"], "migrate">, Flags<[NoXarchOption]>,
@@ -552,99 +678,135 @@ def ccc_objcmt_migrate : Separate<["-"], "ccc-objcmt-migrate">,
HelpText<"Apply modifications and produces temporary files to migrate to "
"modern ObjC syntax">;
-def objcmt_migrate_literals : Flag<["-"], "objcmt-migrate-literals">, Flags<[CC1Option]>,
+def objcmt_migrate_literals : Flag<["-"], "objcmt-migrate-literals">,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Enable migration to modern ObjC literals">,
MarshallingInfoBitfieldFlag<FrontendOpts<"ObjCMTAction">, "FrontendOptions::ObjCMT_Literals">;
-def objcmt_migrate_subscripting : Flag<["-"], "objcmt-migrate-subscripting">, Flags<[CC1Option]>,
+def objcmt_migrate_subscripting : Flag<["-"], "objcmt-migrate-subscripting">,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Enable migration to modern ObjC subscripting">,
MarshallingInfoBitfieldFlag<FrontendOpts<"ObjCMTAction">, "FrontendOptions::ObjCMT_Subscripting">;
-def objcmt_migrate_property : Flag<["-"], "objcmt-migrate-property">, Flags<[CC1Option]>,
+def objcmt_migrate_property : Flag<["-"], "objcmt-migrate-property">,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Enable migration to modern ObjC property">,
MarshallingInfoBitfieldFlag<FrontendOpts<"ObjCMTAction">, "FrontendOptions::ObjCMT_Property">;
-def objcmt_migrate_all : Flag<["-"], "objcmt-migrate-all">, Flags<[CC1Option]>,
+def objcmt_migrate_all : Flag<["-"], "objcmt-migrate-all">,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Enable migration to modern ObjC">,
MarshallingInfoBitfieldFlag<FrontendOpts<"ObjCMTAction">, "FrontendOptions::ObjCMT_MigrateDecls">;
-def objcmt_migrate_readonly_property : Flag<["-"], "objcmt-migrate-readonly-property">, Flags<[CC1Option]>,
+def objcmt_migrate_readonly_property : Flag<["-"], "objcmt-migrate-readonly-property">,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Enable migration to modern ObjC readonly property">,
MarshallingInfoBitfieldFlag<FrontendOpts<"ObjCMTAction">, "FrontendOptions::ObjCMT_ReadonlyProperty">;
-def objcmt_migrate_readwrite_property : Flag<["-"], "objcmt-migrate-readwrite-property">, Flags<[CC1Option]>,
+def objcmt_migrate_readwrite_property : Flag<["-"], "objcmt-migrate-readwrite-property">,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Enable migration to modern ObjC readwrite property">,
MarshallingInfoBitfieldFlag<FrontendOpts<"ObjCMTAction">, "FrontendOptions::ObjCMT_ReadwriteProperty">;
-def objcmt_migrate_property_dot_syntax : Flag<["-"], "objcmt-migrate-property-dot-syntax">, Flags<[CC1Option]>,
+def objcmt_migrate_property_dot_syntax : Flag<["-"], "objcmt-migrate-property-dot-syntax">,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Enable migration of setter/getter messages to property-dot syntax">,
MarshallingInfoBitfieldFlag<FrontendOpts<"ObjCMTAction">, "FrontendOptions::ObjCMT_PropertyDotSyntax">;
-def objcmt_migrate_annotation : Flag<["-"], "objcmt-migrate-annotation">, Flags<[CC1Option]>,
+def objcmt_migrate_annotation : Flag<["-"], "objcmt-migrate-annotation">,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Enable migration to property and method annotations">,
MarshallingInfoBitfieldFlag<FrontendOpts<"ObjCMTAction">, "FrontendOptions::ObjCMT_Annotation">;
-def objcmt_migrate_instancetype : Flag<["-"], "objcmt-migrate-instancetype">, Flags<[CC1Option]>,
+def objcmt_migrate_instancetype : Flag<["-"], "objcmt-migrate-instancetype">,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Enable migration to infer instancetype for method result type">,
MarshallingInfoBitfieldFlag<FrontendOpts<"ObjCMTAction">, "FrontendOptions::ObjCMT_Instancetype">;
-def objcmt_migrate_nsmacros : Flag<["-"], "objcmt-migrate-ns-macros">, Flags<[CC1Option]>,
+def objcmt_migrate_nsmacros : Flag<["-"], "objcmt-migrate-ns-macros">,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Enable migration to NS_ENUM/NS_OPTIONS macros">,
MarshallingInfoBitfieldFlag<FrontendOpts<"ObjCMTAction">, "FrontendOptions::ObjCMT_NsMacros">;
-def objcmt_migrate_protocol_conformance : Flag<["-"], "objcmt-migrate-protocol-conformance">, Flags<[CC1Option]>,
+def objcmt_migrate_protocol_conformance : Flag<["-"], "objcmt-migrate-protocol-conformance">,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Enable migration to add protocol conformance on classes">,
MarshallingInfoBitfieldFlag<FrontendOpts<"ObjCMTAction">, "FrontendOptions::ObjCMT_ProtocolConformance">;
-def objcmt_atomic_property : Flag<["-"], "objcmt-atomic-property">, Flags<[CC1Option]>,
+def objcmt_atomic_property : Flag<["-"], "objcmt-atomic-property">,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Make migration to 'atomic' properties">,
MarshallingInfoBitfieldFlag<FrontendOpts<"ObjCMTAction">, "FrontendOptions::ObjCMT_AtomicProperty">;
-def objcmt_returns_innerpointer_property : Flag<["-"], "objcmt-returns-innerpointer-property">, Flags<[CC1Option]>,
+def objcmt_returns_innerpointer_property : Flag<["-"], "objcmt-returns-innerpointer-property">,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Enable migration to annotate property with NS_RETURNS_INNER_POINTER">,
MarshallingInfoBitfieldFlag<FrontendOpts<"ObjCMTAction">, "FrontendOptions::ObjCMT_ReturnsInnerPointerProperty">;
-def objcmt_ns_nonatomic_iosonly: Flag<["-"], "objcmt-ns-nonatomic-iosonly">, Flags<[CC1Option]>,
+def objcmt_ns_nonatomic_iosonly: Flag<["-"], "objcmt-ns-nonatomic-iosonly">,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Enable migration to use NS_NONATOMIC_IOSONLY macro for setting property's 'atomic' attribute">,
MarshallingInfoBitfieldFlag<FrontendOpts<"ObjCMTAction">, "FrontendOptions::ObjCMT_NsAtomicIOSOnlyProperty">;
-def objcmt_migrate_designated_init : Flag<["-"], "objcmt-migrate-designated-init">, Flags<[CC1Option]>,
+def objcmt_migrate_designated_init : Flag<["-"], "objcmt-migrate-designated-init">,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Enable migration to infer NS_DESIGNATED_INITIALIZER for initializer methods">,
MarshallingInfoBitfieldFlag<FrontendOpts<"ObjCMTAction">, "FrontendOptions::ObjCMT_DesignatedInitializer">;
-def objcmt_whitelist_dir_path: Joined<["-"], "objcmt-whitelist-dir-path=">, Flags<[CC1Option]>,
+def objcmt_allowlist_dir_path: Joined<["-"], "objcmt-allowlist-dir-path=">,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Only modify files with a filename contained in the provided directory path">,
- MarshallingInfoString<FrontendOpts<"ObjCMTWhiteListPath">>;
+ MarshallingInfoString<FrontendOpts<"ObjCMTAllowListPath">>;
+def : Joined<["-"], "objcmt-whitelist-dir-path=">,
+ Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Alias for -objcmt-allowlist-dir-path">,
+ Alias<objcmt_allowlist_dir_path>;
// The misspelt "white-list" [sic] alias is due for removal.
-def : Joined<["-"], "objcmt-white-list-dir-path=">, Flags<[CC1Option]>,
- Alias<objcmt_whitelist_dir_path>;
+def : Joined<["-"], "objcmt-white-list-dir-path=">,
+ Visibility<[ClangOption, CC1Option]>,
+ Alias<objcmt_allowlist_dir_path>;
// Make sure all other -ccc- options are rejected.
def ccc_ : Joined<["-"], "ccc-">, Group<internal_Group>, Flags<[Unsupported]>;
// Standard Options
-def _HASH_HASH_HASH : Flag<["-"], "###">, Flags<[NoXarchOption, CoreOption, FlangOption]>,
+def _HASH_HASH_HASH : Flag<["-"], "###">, Flags<[NoXarchOption]>,
+ Visibility<[ClangOption, CLOption, DXCOption, FlangOption]>,
HelpText<"Print (but do not run) the commands to run for this compilation">;
def _DASH_DASH : Option<["--"], "", KIND_REMAINING_ARGS>,
- Flags<[NoXarchOption, CoreOption]>;
-def A : JoinedOrSeparate<["-"], "A">, Flags<[RenderJoined]>, Group<gfortran_Group>;
+ Flags<[NoXarchOption]>, Visibility<[ClangOption, CLOption, DXCOption]>;
+def A : JoinedOrSeparate<["-"], "A">, Flags<[RenderJoined]>,
+ Group<gfortran_Group>;
def B : JoinedOrSeparate<["-"], "B">, MetaVarName<"<prefix>">,
- HelpText<"Search $prefix/$triple-$file and $prefix$file for executables, libraries, "
- "includes, and data files used by the compiler. $prefix may or may not be a directory">;
+ HelpText<"Search $prefix$file for executables, libraries, and data files. "
+ "If $prefix is a directory, search $prefix/$file">;
+def gcc_install_dir_EQ : Joined<["--"], "gcc-install-dir=">,
+ HelpText<"Use GCC installation in the specified directory. The directory ends with path components like 'lib{,32,64}/gcc{,-cross}/$triple/$version'. "
+ "Note: executables (e.g. ld) used by the compiler are not overridden by the selected GCC installation">;
def gcc_toolchain : Joined<["--"], "gcc-toolchain=">, Flags<[NoXarchOption]>,
- HelpText<"Search for GCC installation in the specified directory on targets which commonly use GCC. "
- "The directory usually contains 'lib{,32,64}/gcc{,-cross}/$triple' and 'include'. If specified, "
- "sysroot is skipped for GCC detection. Note: executables (e.g. ld) used by the compiler are not "
- "overridden by the selected GCC installation">;
-def CC : Flag<["-"], "CC">, Flags<[CC1Option]>, Group<Preprocessor_Group>,
+ HelpText<"Specify a directory where Clang can find 'include' and 'lib{,32,64}/gcc{,-cross}/$triple/$version'. "
+ "Clang will use the GCC installation with the largest version">;
+def gcc_triple_EQ : Joined<["--"], "gcc-triple=">,
+ HelpText<"Search for the GCC installation with the specified triple.">;
+def CC : Flag<["-"], "CC">, Visibility<[ClangOption, CC1Option]>,
+ Group<Preprocessor_Group>,
HelpText<"Include comments from within macros in preprocessed output">,
MarshallingInfoFlag<PreprocessorOutputOpts<"ShowMacroComments">>;
-def C : Flag<["-"], "C">, Flags<[CC1Option]>, Group<Preprocessor_Group>,
+def C : Flag<["-"], "C">, Visibility<[ClangOption, CC1Option]>,
+ Group<Preprocessor_Group>,
HelpText<"Include comments in preprocessed output">,
MarshallingInfoFlag<PreprocessorOutputOpts<"ShowComments">>;
def D : JoinedOrSeparate<["-"], "D">, Group<Preprocessor_Group>,
- Flags<[CC1Option, FlangOption, FC1Option]>, MetaVarName<"<macro>=<value>">,
+ Visibility<[ClangOption, CC1Option, FlangOption, FC1Option, DXCOption]>,
+ MetaVarName<"<macro>=<value>">,
HelpText<"Define <macro> to <value> (or 1 if <value> omitted)">;
-def E : Flag<["-"], "E">, Flags<[NoXarchOption,CC1Option, FlangOption, FC1Option]>, Group<Action_Group>,
+def E : Flag<["-"], "E">, Flags<[NoXarchOption]>,
+ Visibility<[ClangOption, CC1Option, FlangOption, FC1Option]>,
+ Group<Action_Group>,
HelpText<"Only run the preprocessor">;
-def F : JoinedOrSeparate<["-"], "F">, Flags<[RenderJoined,CC1Option]>,
+def F : JoinedOrSeparate<["-"], "F">, Flags<[RenderJoined]>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Add directory to framework include search path">;
-def G : JoinedOrSeparate<["-"], "G">, Flags<[NoXarchOption]>, Group<m_Group>,
+def G : JoinedOrSeparate<["-"], "G">, Flags<[NoXarchOption, TargetSpecific]>,
+ Group<m_Group>,
MetaVarName<"<size>">, HelpText<"Put objects of at most <size> bytes "
"into small data section (MIPS / Hexagon)">;
-def G_EQ : Joined<["-"], "G=">, Flags<[NoXarchOption]>, Group<m_Group>, Alias<G>;
-def H : Flag<["-"], "H">, Flags<[CC1Option]>, Group<Preprocessor_Group>,
+def G_EQ : Joined<["-"], "G=">, Flags<[NoXarchOption]>,
+ Group<m_Group>, Alias<G>;
+def H : Flag<["-"], "H">, Visibility<[ClangOption, CC1Option]>,
+ Group<Preprocessor_Group>,
HelpText<"Show header includes and nesting depth">,
MarshallingInfoFlag<DependencyOutputOpts<"ShowHeaderIncludes">>;
def fshow_skipped_includes : Flag<["-"], "fshow-skipped-includes">,
- Flags<[CC1Option]>, HelpText<"Show skipped includes in -H output.">,
+ Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Show skipped includes in -H output.">,
DocBrief<[{#include files may be "skipped" due to include guard optimization
or #pragma once. This flag makes -H show also such includes.}]>,
MarshallingInfoFlag<DependencyOutputOpts<"ShowSkippedHeaderIncludes">>;
@@ -653,7 +815,8 @@ def I_ : Flag<["-"], "I-">, Group<I_Group>,
HelpText<"Restrict all prior -I flags to double-quoted inclusion and "
"remove current directory from include path">;
def I : JoinedOrSeparate<["-"], "I">, Group<I_Group>,
- Flags<[CC1Option,CC1AsOption,FlangOption,FC1Option]>, MetaVarName<"<dir>">,
+ Visibility<[ClangOption, CC1Option, CC1AsOption, FlangOption, FC1Option]>,
+ MetaVarName<"<dir>">,
HelpText<"Add directory to the end of the list of include search paths">,
DocBrief<[{Add directory to include search path. For C++ inputs, if
there are multiple -I options, these directories are searched
@@ -662,6 +825,7 @@ are searched. If the same directory is in the SYSTEM include search
paths, for example if also specified with -isystem, the -I option
will be ignored}]>;
def L : JoinedOrSeparate<["-"], "L">, Flags<[RenderJoined]>, Group<Link_Group>,
+ Visibility<[ClangOption, FlangOption]>,
MetaVarName<"<dir>">, HelpText<"Add directory to library search path">;
def MD : Flag<["-"], "MD">, Group<M_Group>,
HelpText<"Write a depfile containing user and system headers">;
@@ -674,99 +838,112 @@ def MM : Flag<["-"], "MM">, Group<M_Group>,
def MF : JoinedOrSeparate<["-"], "MF">, Group<M_Group>,
HelpText<"Write depfile output from -MMD, -MD, -MM, or -M to <file>">,
MetaVarName<"<file>">;
-def MG : Flag<["-"], "MG">, Group<M_Group>, Flags<[CC1Option]>,
+def MG : Flag<["-"], "MG">, Group<M_Group>, Visibility<[ClangOption, CC1Option]>,
HelpText<"Add missing headers to depfile">,
MarshallingInfoFlag<DependencyOutputOpts<"AddMissingHeaderDeps">>;
def MJ : JoinedOrSeparate<["-"], "MJ">, Group<M_Group>,
HelpText<"Write a compilation database entry per input">;
-def MP : Flag<["-"], "MP">, Group<M_Group>, Flags<[CC1Option]>,
+def MP : Flag<["-"], "MP">, Group<M_Group>, Visibility<[ClangOption, CC1Option]>,
HelpText<"Create phony target for each dependency (other than main file)">,
MarshallingInfoFlag<DependencyOutputOpts<"UsePhonyTargets">>;
-def MQ : JoinedOrSeparate<["-"], "MQ">, Group<M_Group>, Flags<[CC1Option]>,
+def MQ : JoinedOrSeparate<["-"], "MQ">, Group<M_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Specify name of main file output to quote in depfile">;
-def MT : JoinedOrSeparate<["-"], "MT">, Group<M_Group>, Flags<[CC1Option]>,
+def MT : JoinedOrSeparate<["-"], "MT">, Group<M_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Specify name of main file output in depfile">,
MarshallingInfoStringVector<DependencyOutputOpts<"Targets">>;
-def MV : Flag<["-"], "MV">, Group<M_Group>, Flags<[CC1Option]>,
+def MV : Flag<["-"], "MV">, Group<M_Group>, Visibility<[ClangOption, CC1Option]>,
HelpText<"Use NMake/Jom format for the depfile">,
MarshallingInfoFlag<DependencyOutputOpts<"OutputFormat">, "DependencyOutputFormat::Make">,
Normalizer<"makeFlagToValueNormalizer(DependencyOutputFormat::NMake)">;
def Mach : Flag<["-"], "Mach">, Group<Link_Group>;
-def O0 : Flag<["-"], "O0">, Group<O_Group>, Flags<[CC1Option, HelpHidden]>;
-def O4 : Flag<["-"], "O4">, Group<O_Group>, Flags<[CC1Option, HelpHidden]>;
+def O0 : Flag<["-"], "O0">, Group<O_Group>, Flags<[HelpHidden]>,
+ Visibility<[ClangOption, CC1Option, FC1Option, FlangOption]>;
+def O4 : Flag<["-"], "O4">, Group<O_Group>, Flags<[HelpHidden]>,
+ Visibility<[ClangOption, CC1Option, FC1Option, FlangOption]>;
def ObjCXX : Flag<["-"], "ObjC++">, Flags<[NoXarchOption]>,
HelpText<"Treat source input files as Objective-C++ inputs">;
def ObjC : Flag<["-"], "ObjC">, Flags<[NoXarchOption]>,
HelpText<"Treat source input files as Objective-C inputs">;
-def O : Joined<["-"], "O">, Group<O_Group>, Flags<[CC1Option]>;
-def O_flag : Flag<["-"], "O">, Flags<[CC1Option]>, Alias<O>, AliasArgs<["1"]>;
-def Ofast : Joined<["-"], "Ofast">, Group<O_Group>, Flags<[CC1Option]>;
-def P : Flag<["-"], "P">, Flags<[CC1Option]>, Group<Preprocessor_Group>,
+def O : Joined<["-"], "O">, Group<O_Group>,
+ Visibility<[ClangOption, CC1Option, FC1Option, FlangOption]>;
+def O_flag : Flag<["-"], "O">, Visibility<[ClangOption, CC1Option, FC1Option]>,
+ Alias<O>, AliasArgs<["1"]>;
+def Ofast : Joined<["-"], "Ofast">, Group<O_Group>,
+ Visibility<[ClangOption, CC1Option, FlangOption]>;
+def P : Flag<["-"], "P">,
+ Visibility<[ClangOption, CC1Option, FlangOption, FC1Option]>,
+ Group<Preprocessor_Group>,
HelpText<"Disable linemarker output in -E mode">,
MarshallingInfoNegativeFlag<PreprocessorOutputOpts<"ShowLineMarkers">>;
-def Qy : Flag<["-"], "Qy">, Flags<[CC1Option]>,
+def Qy : Flag<["-"], "Qy">, Visibility<[ClangOption, CC1Option]>,
HelpText<"Emit metadata containing compiler name and version">;
-def Qn : Flag<["-"], "Qn">, Flags<[CC1Option]>,
+def Qn : Flag<["-"], "Qn">, Visibility<[ClangOption, CC1Option]>,
HelpText<"Do not emit metadata containing compiler name and version">;
def : Flag<["-"], "fident">, Group<f_Group>, Alias<Qy>,
- Flags<[CoreOption, CC1Option]>;
+ Visibility<[ClangOption, CLOption, DXCOption, CC1Option]>;
def : Flag<["-"], "fno-ident">, Group<f_Group>, Alias<Qn>,
- Flags<[CoreOption, CC1Option]>;
-def Qunused_arguments : Flag<["-"], "Qunused-arguments">, Flags<[NoXarchOption, CoreOption]>,
+ Visibility<[ClangOption, CLOption, DXCOption, CC1Option]>;
+def Qunused_arguments : Flag<["-"], "Qunused-arguments">,
+ Flags<[NoXarchOption]>, Visibility<[ClangOption, CLOption, DXCOption]>,
HelpText<"Don't emit warning for unused driver arguments">;
def Q : Flag<["-"], "Q">, IgnoredGCCCompat;
-def Rpass_EQ : Joined<["-"], "Rpass=">, Group<R_value_Group>, Flags<[CC1Option]>,
- HelpText<"Report transformations performed by optimization passes whose "
- "name matches the given POSIX regular expression">;
-def Rpass_missed_EQ : Joined<["-"], "Rpass-missed=">, Group<R_value_Group>,
- Flags<[CC1Option]>,
- HelpText<"Report missed transformations by optimization passes whose "
- "name matches the given POSIX regular expression">;
-def Rpass_analysis_EQ : Joined<["-"], "Rpass-analysis=">, Group<R_value_Group>,
- Flags<[CC1Option]>,
- HelpText<"Report transformation analysis from optimization passes whose "
- "name matches the given POSIX regular expression">;
-def R_Joined : Joined<["-"], "R">, Group<R_Group>, Flags<[CC1Option, CoreOption]>,
- MetaVarName<"<remark>">, HelpText<"Enable the specified remark">;
-def S : Flag<["-"], "S">, Flags<[NoXarchOption,CC1Option]>, Group<Action_Group>,
+def S : Flag<["-"], "S">, Flags<[NoXarchOption]>,
+ Visibility<[ClangOption, CC1Option, FlangOption, FC1Option]>,
+ Group<Action_Group>,
HelpText<"Only run preprocess and compilation steps">;
-def Tbss : JoinedOrSeparate<["-"], "Tbss">, Group<T_Group>,
- MetaVarName<"<addr>">, HelpText<"Set starting address of BSS to <addr>">;
-def Tdata : JoinedOrSeparate<["-"], "Tdata">, Group<T_Group>,
- MetaVarName<"<addr>">, HelpText<"Set starting address of DATA to <addr>">;
-def Ttext : JoinedOrSeparate<["-"], "Ttext">, Group<T_Group>,
- MetaVarName<"<addr>">, HelpText<"Set starting address of TEXT to <addr>">;
def T : JoinedOrSeparate<["-"], "T">, Group<T_Group>,
MetaVarName<"<script>">, HelpText<"Specify <script> as linker script">;
def U : JoinedOrSeparate<["-"], "U">, Group<Preprocessor_Group>,
- Flags<[CC1Option, FlangOption, FC1Option]>, MetaVarName<"<macro>">, HelpText<"Undefine macro <macro>">;
+ Visibility<[ClangOption, CC1Option, FlangOption, FC1Option]>,
+ MetaVarName<"<macro>">, HelpText<"Undefine macro <macro>">;
def V : JoinedOrSeparate<["-"], "V">, Flags<[NoXarchOption, Unsupported]>;
def Wa_COMMA : CommaJoined<["-"], "Wa,">,
HelpText<"Pass the comma separated arguments in <arg> to the assembler">,
MetaVarName<"<arg>">;
-def Wall : Flag<["-"], "Wall">, Group<W_Group>, Flags<[CC1Option, HelpHidden]>;
-def WCL4 : Flag<["-"], "WCL4">, Group<W_Group>, Flags<[CC1Option, HelpHidden]>;
-def Wdeprecated : Flag<["-"], "Wdeprecated">, Group<W_Group>, Flags<[CC1Option]>,
+def Wall : Flag<["-"], "Wall">, Group<W_Group>, Flags<[HelpHidden]>,
+ Visibility<[ClangOption, CC1Option, FlangOption]>;
+def WCL4 : Flag<["-"], "WCL4">, Group<W_Group>, Flags<[HelpHidden]>,
+ Visibility<[ClangOption, CC1Option]>;
+def Wsystem_headers : Flag<["-"], "Wsystem-headers">, Group<W_Group>,
+ Flags<[HelpHidden]>, Visibility<[ClangOption, CC1Option]>;
+def Wno_system_headers : Flag<["-"], "Wno-system-headers">, Group<W_Group>,
+ Flags<[HelpHidden]>, Visibility<[ClangOption, CC1Option]>;
+def Wsystem_headers_in_module_EQ : Joined<["-"], "Wsystem-headers-in-module=">,
+ Flags<[HelpHidden]>, Visibility<[ClangOption, CC1Option]>,
+ MetaVarName<"<module>">,
+ HelpText<"Enable -Wsystem-headers when building <module>">,
+ MarshallingInfoStringVector<DiagnosticOpts<"SystemHeaderWarningsModules">>;
+def Wdeprecated : Flag<["-"], "Wdeprecated">, Group<W_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Enable warnings for deprecated constructs and define __DEPRECATED">;
-def Wno_deprecated : Flag<["-"], "Wno-deprecated">, Group<W_Group>, Flags<[CC1Option]>;
-def Wl_COMMA : CommaJoined<["-"], "Wl,">, Flags<[LinkerInput, RenderAsInput]>,
+def Wno_deprecated : Flag<["-"], "Wno-deprecated">, Group<W_Group>,
+ Visibility<[ClangOption, CC1Option]>;
+def Wl_COMMA : CommaJoined<["-"], "Wl,">, Visibility<[ClangOption, FlangOption]>,
+ Flags<[LinkerInput, RenderAsInput]>,
HelpText<"Pass the comma separated arguments in <arg> to the linker">,
MetaVarName<"<arg>">, Group<Link_Group>;
// FIXME: This is broken; these should not be Joined arguments.
def Wno_nonportable_cfstrings : Joined<["-"], "Wno-nonportable-cfstrings">, Group<W_Group>,
- Flags<[CC1Option]>;
+ Visibility<[ClangOption, CC1Option]>;
def Wnonportable_cfstrings : Joined<["-"], "Wnonportable-cfstrings">, Group<W_Group>,
- Flags<[CC1Option]>;
+ Visibility<[ClangOption, CC1Option]>;
def Wp_COMMA : CommaJoined<["-"], "Wp,">,
HelpText<"Pass the comma separated arguments in <arg> to the preprocessor">,
MetaVarName<"<arg>">, Group<Preprocessor_Group>;
def Wundef_prefix_EQ : CommaJoined<["-"], "Wundef-prefix=">, Group<W_value_Group>,
- Flags<[CC1Option, CoreOption, HelpHidden]>, MetaVarName<"<arg>">,
+ Flags<[HelpHidden]>,
+ Visibility<[ClangOption, CC1Option, CLOption, DXCOption]>,
+ MetaVarName<"<arg>">,
HelpText<"Enable warnings for undefined macros with a prefix in the comma separated list <arg>">,
MarshallingInfoStringVector<DiagnosticOpts<"UndefPrefixes">>;
-def Wwrite_strings : Flag<["-"], "Wwrite-strings">, Group<W_Group>, Flags<[CC1Option, HelpHidden]>;
-def Wno_write_strings : Flag<["-"], "Wno-write-strings">, Group<W_Group>, Flags<[CC1Option, HelpHidden]>;
-def W_Joined : Joined<["-"], "W">, Group<W_Group>, Flags<[CC1Option, CoreOption, FC1Option, FlangOption]>,
+def Wwrite_strings : Flag<["-"], "Wwrite-strings">, Group<W_Group>,
+ Flags<[HelpHidden]>, Visibility<[ClangOption, CC1Option]>;
+def Wno_write_strings : Flag<["-"], "Wno-write-strings">, Group<W_Group>,
+ Flags<[HelpHidden]>, Visibility<[ClangOption, CC1Option]>;
+def W_Joined : Joined<["-"], "W">, Group<W_Group>,
+ Visibility<[ClangOption, CC1Option, CLOption, DXCOption, FC1Option, FlangOption]>,
MetaVarName<"<warning>">, HelpText<"Enable the specified warning">;
def Xanalyzer : Separate<["-"], "Xanalyzer">,
HelpText<"Pass <arg> to the static analyzer">, MetaVarName<"<arg>">,
@@ -780,131 +957,304 @@ def Xassembler : Separate<["-"], "Xassembler">,
HelpText<"Pass <arg> to the assembler">, MetaVarName<"<arg>">,
Group<CompileOnly_Group>;
def Xclang : Separate<["-"], "Xclang">,
- HelpText<"Pass <arg> to the clang compiler">, MetaVarName<"<arg>">,
- Flags<[NoXarchOption, CoreOption]>, Group<CompileOnly_Group>;
+ HelpText<"Pass <arg> to clang -cc1">, MetaVarName<"<arg>">,
+ Flags<[NoXarchOption]>, Visibility<[ClangOption, CLOption, DXCOption]>,
+ Group<CompileOnly_Group>;
+def : Joined<["-"], "Xclang=">, Group<CompileOnly_Group>,
+ Flags<[NoXarchOption]>, Visibility<[ClangOption, CLOption, DXCOption]>,
+ Alias<Xclang>,
+ HelpText<"Alias for -Xclang">, MetaVarName<"<arg>">;
def Xcuda_fatbinary : Separate<["-"], "Xcuda-fatbinary">,
HelpText<"Pass <arg> to fatbinary invocation">, MetaVarName<"<arg>">;
def Xcuda_ptxas : Separate<["-"], "Xcuda-ptxas">,
HelpText<"Pass <arg> to the ptxas assembler">, MetaVarName<"<arg>">;
-def Xopenmp_target : Separate<["-"], "Xopenmp-target">,
+def Xopenmp_target : Separate<["-"], "Xopenmp-target">, Group<CompileOnly_Group>,
HelpText<"Pass <arg> to the target offloading toolchain.">, MetaVarName<"<arg>">;
-def Xopenmp_target_EQ : JoinedAndSeparate<["-"], "Xopenmp-target=">,
+def Xopenmp_target_EQ : JoinedAndSeparate<["-"], "Xopenmp-target=">, Group<CompileOnly_Group>,
HelpText<"Pass <arg> to the target offloading toolchain identified by <triple>.">,
MetaVarName<"<triple> <arg>">;
-def z : Separate<["-"], "z">, Flags<[LinkerInput, RenderAsInput]>,
+def z : Separate<["-"], "z">, Flags<[LinkerInput]>,
HelpText<"Pass -z <arg> to the linker">, MetaVarName<"<arg>">,
Group<Link_Group>;
+def offload_link : Flag<["--"], "offload-link">, Group<Link_Group>,
+ HelpText<"Use the new offloading linker to perform the link job.">;
def Xlinker : Separate<["-"], "Xlinker">, Flags<[LinkerInput, RenderAsInput]>,
HelpText<"Pass <arg> to the linker">, MetaVarName<"<arg>">,
Group<Link_Group>;
+def Xoffload_linker : JoinedAndSeparate<["-"], "Xoffload-linker">,
+ HelpText<"Pass <arg> to the offload linkers or the ones idenfied by -<triple>">,
+ MetaVarName<"<triple> <arg>">, Group<Link_Group>;
def Xpreprocessor : Separate<["-"], "Xpreprocessor">, Group<Preprocessor_Group>,
HelpText<"Pass <arg> to the preprocessor">, MetaVarName<"<arg>">;
def X_Flag : Flag<["-"], "X">, Group<Link_Group>;
-def X_Joined : Joined<["-"], "X">, IgnoredGCCCompat;
+// Used by some macOS projects. IgnoredGCCCompat is a misnomer since GCC doesn't allow it.
+def : Flag<["-"], "Xparser">, IgnoredGCCCompat;
+// FIXME -Xcompiler is misused by some ChromeOS packages. Remove it after a while.
+def : Flag<["-"], "Xcompiler">, IgnoredGCCCompat;
def Z_Flag : Flag<["-"], "Z">, Group<Link_Group>;
-// FIXME: All we do with this is reject it. Remove.
-def Z_Joined : Joined<["-"], "Z">;
def all__load : Flag<["-"], "all_load">;
def allowable__client : Separate<["-"], "allowable_client">;
def ansi : Flag<["-", "--"], "ansi">, Group<CompileOnly_Group>;
def arch__errors__fatal : Flag<["-"], "arch_errors_fatal">;
-def arch : Separate<["-"], "arch">, Flags<[NoXarchOption]>;
+def arch : Separate<["-"], "arch">, Flags<[NoXarchOption,TargetSpecific]>;
def arch__only : Separate<["-"], "arch_only">;
-def a : Joined<["-"], "a">;
def autocomplete : Joined<["--"], "autocomplete=">;
def bind__at__load : Flag<["-"], "bind_at_load">;
def bundle__loader : Separate<["-"], "bundle_loader">;
def bundle : Flag<["-"], "bundle">;
-def b : JoinedOrSeparate<["-"], "b">, Flags<[Unsupported]>;
-def cl_opt_disable : Flag<["-"], "cl-opt-disable">, Group<opencl_Group>, Flags<[CC1Option]>,
+def b : JoinedOrSeparate<["-"], "b">, Flags<[LinkerInput]>,
+ HelpText<"Pass -b <arg> to the linker on AIX">, MetaVarName<"<arg>">,
+ Group<Link_Group>;
+
+defm offload_uniform_block : BoolFOption<"offload-uniform-block",
+ LangOpts<"OffloadUniformBlock">, Default<"LangOpts->CUDA">,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option], "Assume">,
+ NegFlag<SetFalse, [], [ClangOption, CC1Option], "Don't assume">,
+ BothFlags<[], [ClangOption], " that kernels are launched with uniform block sizes (default true for CUDA/HIP and false otherwise)">>;
+
+def fcx_limited_range : Joined<["-"], "fcx-limited-range">,
+ Group<f_Group>, Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Basic algebraic expansions of complex arithmetic operations "
+ "involving are enabled.">;
+
+def fno_cx_limited_range : Joined<["-"], "fno-cx-limited-range">,
+ Group<f_Group>, Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Basic algebraic expansions of complex arithmetic operations "
+ "involving are disabled.">;
+
+def fcx_fortran_rules : Joined<["-"], "fcx-fortran-rules">,
+ Group<f_Group>, Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Range reduction is enabled for complex arithmetic operations.">;
+
+def fno_cx_fortran_rules : Joined<["-"], "fno-cx-fortran-rules">,
+ Group<f_Group>, Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Range reduction is disabled for complex arithmetic operations.">;
+
+def complex_range_EQ : Joined<["-"], "complex-range=">, Group<f_Group>,
+ Visibility<[CC1Option]>,
+ Values<"full,limited,fortran">, NormalizedValuesScope<"LangOptions">,
+ NormalizedValues<["CX_Full", "CX_Limited", "CX_Fortran"]>,
+ MarshallingInfoEnum<LangOpts<"ComplexRange">, "CX_Full">;
+
+// OpenCL-only Options
+def cl_opt_disable : Flag<["-"], "cl-opt-disable">, Group<opencl_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"OpenCL only. This option disables all optimizations. By default optimizations are enabled.">;
-def cl_strict_aliasing : Flag<["-"], "cl-strict-aliasing">, Group<opencl_Group>, Flags<[CC1Option]>,
+def cl_strict_aliasing : Flag<["-"], "cl-strict-aliasing">, Group<opencl_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"OpenCL only. This option is added for compatibility with OpenCL 1.0.">;
-def cl_single_precision_constant : Flag<["-"], "cl-single-precision-constant">, Group<opencl_Group>, Flags<[CC1Option]>,
+def cl_single_precision_constant : Flag<["-"], "cl-single-precision-constant">, Group<opencl_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"OpenCL only. Treat double precision floating-point constant as single precision constant.">,
MarshallingInfoFlag<LangOpts<"SinglePrecisionConstants">>;
-def cl_finite_math_only : Flag<["-"], "cl-finite-math-only">, Group<opencl_Group>, Flags<[CC1Option]>,
+def cl_finite_math_only : Flag<["-"], "cl-finite-math-only">, Group<opencl_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"OpenCL only. Allow floating-point optimizations that assume arguments and results are not NaNs or +-Inf.">,
MarshallingInfoFlag<LangOpts<"CLFiniteMathOnly">>;
-def cl_kernel_arg_info : Flag<["-"], "cl-kernel-arg-info">, Group<opencl_Group>, Flags<[CC1Option]>,
+def cl_kernel_arg_info : Flag<["-"], "cl-kernel-arg-info">, Group<opencl_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"OpenCL only. Generate kernel argument metadata.">,
MarshallingInfoFlag<CodeGenOpts<"EmitOpenCLArgMetadata">>;
-def cl_unsafe_math_optimizations : Flag<["-"], "cl-unsafe-math-optimizations">, Group<opencl_Group>, Flags<[CC1Option]>,
+def cl_unsafe_math_optimizations : Flag<["-"], "cl-unsafe-math-optimizations">, Group<opencl_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"OpenCL only. Allow unsafe floating-point optimizations. Also implies -cl-no-signed-zeros and -cl-mad-enable.">,
MarshallingInfoFlag<LangOpts<"CLUnsafeMath">>;
-def cl_fast_relaxed_math : Flag<["-"], "cl-fast-relaxed-math">, Group<opencl_Group>, Flags<[CC1Option]>,
+def cl_fast_relaxed_math : Flag<["-"], "cl-fast-relaxed-math">, Group<opencl_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"OpenCL only. Sets -cl-finite-math-only and -cl-unsafe-math-optimizations, and defines __FAST_RELAXED_MATH__.">,
MarshallingInfoFlag<LangOpts<"FastRelaxedMath">>;
-def cl_mad_enable : Flag<["-"], "cl-mad-enable">, Group<opencl_Group>, Flags<[CC1Option]>,
+def cl_mad_enable : Flag<["-"], "cl-mad-enable">, Group<opencl_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"OpenCL only. Allow use of less precise MAD computations in the generated binary.">,
MarshallingInfoFlag<CodeGenOpts<"LessPreciseFPMAD">>,
ImpliedByAnyOf<[cl_unsafe_math_optimizations.KeyPath, cl_fast_relaxed_math.KeyPath]>;
-def cl_no_signed_zeros : Flag<["-"], "cl-no-signed-zeros">, Group<opencl_Group>, Flags<[CC1Option]>,
+def cl_no_signed_zeros : Flag<["-"], "cl-no-signed-zeros">, Group<opencl_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"OpenCL only. Allow use of less precise no signed zeros computations in the generated binary.">,
MarshallingInfoFlag<LangOpts<"CLNoSignedZero">>;
-def cl_std_EQ : Joined<["-"], "cl-std=">, Group<opencl_Group>, Flags<[CC1Option]>,
- HelpText<"OpenCL language standard to compile for.">, Values<"cl,CL,cl1.0,CL1.0,cl1.1,CL1.1,cl1.2,CL1.2,cl2.0,CL2.0,cl3.0,CL3.0,clc++,CLC++">;
+def cl_std_EQ : Joined<["-"], "cl-std=">, Group<opencl_Group>,
+ Visibility<[ClangOption, CC1Option]>,
+ HelpText<"OpenCL language standard to compile for.">,
+ Values<"cl,CL,cl1.0,CL1.0,cl1.1,CL1.1,cl1.2,CL1.2,cl2.0,CL2.0,cl3.0,CL3.0,clc++,CLC++,clc++1.0,CLC++1.0,clc++2021,CLC++2021">;
def cl_denorms_are_zero : Flag<["-"], "cl-denorms-are-zero">, Group<opencl_Group>,
HelpText<"OpenCL only. Allow denormals to be flushed to zero.">;
-def cl_fp32_correctly_rounded_divide_sqrt : Flag<["-"], "cl-fp32-correctly-rounded-divide-sqrt">, Group<opencl_Group>, Flags<[CC1Option]>,
+def cl_fp32_correctly_rounded_divide_sqrt : Flag<["-"], "cl-fp32-correctly-rounded-divide-sqrt">, Group<opencl_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"OpenCL only. Specify that single precision floating-point divide and sqrt used in the program source are correctly rounded.">,
MarshallingInfoFlag<CodeGenOpts<"OpenCLCorrectlyRoundedDivSqrt">>;
-def cl_uniform_work_group_size : Flag<["-"], "cl-uniform-work-group-size">, Group<opencl_Group>, Flags<[CC1Option]>,
- HelpText<"OpenCL only. Defines that the global work-size be a multiple of the work-group size specified to clEnqueueNDRangeKernel">,
- MarshallingInfoFlag<CodeGenOpts<"UniformWGSize">>;
+def cl_uniform_work_group_size : Flag<["-"], "cl-uniform-work-group-size">, Group<opencl_Group>,
+ Visibility<[ClangOption, CC1Option]>, Alias<foffload_uniform_block>,
+ HelpText<"OpenCL only. Defines that the global work-size be a multiple of the work-group size specified to clEnqueueNDRangeKernel">;
def cl_no_stdinc : Flag<["-"], "cl-no-stdinc">, Group<opencl_Group>,
HelpText<"OpenCL only. Disables all standard includes containing non-native compiler types and functions.">;
+def cl_ext_EQ : CommaJoined<["-"], "cl-ext=">, Group<opencl_Group>,
+ Visibility<[ClangOption, CC1Option]>,
+ HelpText<"OpenCL only. Enable or disable OpenCL extensions/optional features. The argument is a comma-separated "
+ "sequence of one or more extension names, each prefixed by '+' or '-'.">,
+ MarshallingInfoStringVector<TargetOpts<"OpenCLExtensionsAsWritten">>;
+
def client__name : JoinedOrSeparate<["-"], "client_name">;
def combine : Flag<["-", "--"], "combine">, Flags<[NoXarchOption, Unsupported]>;
def compatibility__version : JoinedOrSeparate<["-"], "compatibility_version">;
-def config : Separate<["--"], "config">, Flags<[NoXarchOption]>,
- HelpText<"Specifies configuration file">;
-def config_system_dir_EQ : Joined<["--"], "config-system-dir=">, Flags<[NoXarchOption, HelpHidden]>,
+def config : Joined<["--"], "config=">, Flags<[NoXarchOption]>,
+ Visibility<[ClangOption, CLOption, DXCOption]>, MetaVarName<"<file>">,
+ HelpText<"Specify configuration file">;
+def : Separate<["--"], "config">, Alias<config>;
+def no_default_config : Flag<["--"], "no-default-config">,
+ Flags<[NoXarchOption]>, Visibility<[ClangOption, CLOption, DXCOption]>,
+ HelpText<"Disable loading default configuration files">;
+def config_system_dir_EQ : Joined<["--"], "config-system-dir=">,
+ Flags<[NoXarchOption, HelpHidden]>,
+ Visibility<[ClangOption, CLOption, DXCOption]>,
HelpText<"System directory for configuration files">;
-def config_user_dir_EQ : Joined<["--"], "config-user-dir=">, Flags<[NoXarchOption, HelpHidden]>,
+def config_user_dir_EQ : Joined<["--"], "config-user-dir=">,
+ Flags<[NoXarchOption, HelpHidden]>,
+ Visibility<[ClangOption, CLOption, DXCOption]>,
HelpText<"User directory for configuration files">;
-def coverage : Flag<["-", "--"], "coverage">, Group<Link_Group>, Flags<[CoreOption]>;
+def coverage : Flag<["-", "--"], "coverage">, Group<Link_Group>,
+ Visibility<[ClangOption, CLOption]>;
def cpp_precomp : Flag<["-"], "cpp-precomp">, Group<clang_ignored_f_Group>;
def current__version : JoinedOrSeparate<["-"], "current_version">;
def cxx_isystem : JoinedOrSeparate<["-"], "cxx-isystem">, Group<clang_i_Group>,
- HelpText<"Add directory to the C++ SYSTEM include search path">, Flags<[CC1Option]>,
+ HelpText<"Add directory to the C++ SYSTEM include search path">,
+ Visibility<[ClangOption, CC1Option]>,
MetaVarName<"<directory>">;
-def c : Flag<["-"], "c">, Flags<[NoXarchOption, FlangOption]>, Group<Action_Group>,
+def c : Flag<["-"], "c">, Flags<[NoXarchOption]>,
+ Visibility<[ClangOption, FlangOption]>, Group<Action_Group>,
HelpText<"Only run preprocess, compile, and assemble steps">;
-def fconvergent_functions : Flag<["-"], "fconvergent-functions">, Group<f_Group>, Flags<[CC1Option]>,
- HelpText<"Assume functions may be convergent">;
+defm convergent_functions : BoolFOption<"convergent-functions",
+ LangOpts<"ConvergentFunctions">, DefaultFalse,
+ NegFlag<SetFalse, [], [ClangOption], "Assume all functions may be convergent.">,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option]>>;
+
+// Common offloading options
+let Group = offload_Group in {
+def offload_arch_EQ : Joined<["--"], "offload-arch=">, Flags<[NoXarchOption]>,
+ Visibility<[ClangOption, FlangOption]>,
+ HelpText<"Specify an offloading device architecture for CUDA, HIP, or OpenMP. (e.g. sm_35). "
+ "If 'native' is used the compiler will detect locally installed architectures. "
+ "For HIP offloading, the device architecture can be followed by target ID features "
+ "delimited by a colon (e.g. gfx908:xnack+:sramecc-). May be specified more than once.">;
+def no_offload_arch_EQ : Joined<["--"], "no-offload-arch=">,
+ Flags<[NoXarchOption]>,
+ Visibility<[ClangOption, FlangOption]>,
+ HelpText<"Remove CUDA/HIP offloading device architecture (e.g. sm_35, gfx906) from the list of devices to compile for. "
+ "'all' resets the list to its default value.">;
+
+def offload_new_driver : Flag<["--"], "offload-new-driver">,
+ Visibility<[ClangOption, CC1Option]>, Group<f_Group>,
+ MarshallingInfoFlag<LangOpts<"OffloadingNewDriver">>, HelpText<"Use the new driver for offloading compilation.">;
+def no_offload_new_driver : Flag<["--"], "no-offload-new-driver">,
+ Visibility<[ClangOption, CC1Option]>, Group<f_Group>,
+ HelpText<"Don't Use the new driver for offloading compilation.">;
+
+def offload_device_only : Flag<["--"], "offload-device-only">,
+ Visibility<[ClangOption, FlangOption]>,
+ HelpText<"Only compile for the offloading device.">;
+def offload_host_only : Flag<["--"], "offload-host-only">,
+ Visibility<[ClangOption, FlangOption]>,
+ HelpText<"Only compile for the offloading host.">;
+def offload_host_device : Flag<["--"], "offload-host-device">,
+ Visibility<[ClangOption, FlangOption]>,
+ HelpText<"Compile for both the offloading host and device (default).">;
def gpu_use_aux_triple_only : Flag<["--"], "gpu-use-aux-triple-only">,
InternalDriverOpt, HelpText<"Prepare '-aux-triple' only without populating "
"'-aux-target-cpu' and '-aux-target-feature'.">;
-def cuda_device_only : Flag<["--"], "cuda-device-only">,
- HelpText<"Compile CUDA code for device only">;
-def cuda_host_only : Flag<["--"], "cuda-host-only">,
- HelpText<"Compile CUDA code for host only. Has no effect on non-CUDA "
- "compilations.">;
-def cuda_compile_host_device : Flag<["--"], "cuda-compile-host-device">,
- HelpText<"Compile CUDA code for both host and device (default). Has no "
- "effect on non-CUDA compilations.">;
-def cuda_include_ptx_EQ : Joined<["--"], "cuda-include-ptx=">, Flags<[NoXarchOption]>,
+def amdgpu_arch_tool_EQ : Joined<["--"], "amdgpu-arch-tool=">,
+ HelpText<"Tool used for detecting AMD GPU arch in the system.">;
+def nvptx_arch_tool_EQ : Joined<["--"], "nvptx-arch-tool=">,
+ HelpText<"Tool used for detecting NVIDIA GPU arch in the system.">;
+
+defm gpu_rdc : BoolFOption<"gpu-rdc",
+ LangOpts<"GPURelocatableDeviceCode">, DefaultFalse,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Generate relocatable device code, also known as separate compilation mode">,
+ NegFlag<SetFalse>>;
+
+defm offload_implicit_host_device_templates :
+ BoolFOption<"offload-implicit-host-device-templates",
+ LangOpts<"OffloadImplicitHostDeviceTemplates">, DefaultFalse,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Template functions or specializations without host, device and "
+ "global attributes have implicit host device attributes (CUDA/HIP only)">,
+ NegFlag<SetFalse>>;
+
+def fgpu_default_stream_EQ : Joined<["-"], "fgpu-default-stream=">,
+ HelpText<"Specify default stream. The default value is 'legacy'. (CUDA/HIP only)">,
+ Visibility<[ClangOption, CC1Option]>,
+ Values<"legacy,per-thread">,
+ NormalizedValuesScope<"LangOptions::GPUDefaultStreamKind">,
+ NormalizedValues<["Legacy", "PerThread"]>,
+ MarshallingInfoEnum<LangOpts<"GPUDefaultStream">, "Legacy">;
+
+def fgpu_flush_denormals_to_zero : Flag<["-"], "fgpu-flush-denormals-to-zero">,
+ HelpText<"Flush denormal floating point values to zero in CUDA/HIP device mode.">;
+def fno_gpu_flush_denormals_to_zero : Flag<["-"], "fno-gpu-flush-denormals-to-zero">;
+
+defm gpu_defer_diag : BoolFOption<"gpu-defer-diag",
+ LangOpts<"GPUDeferDiag">, DefaultFalse,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option], "Defer">,
+ NegFlag<SetFalse, [], [ClangOption], "Don't defer">,
+ BothFlags<[], [ClangOption], " host/device related diagnostic messages for CUDA/HIP">>;
+
+defm gpu_exclude_wrong_side_overloads : BoolFOption<"gpu-exclude-wrong-side-overloads",
+ LangOpts<"GPUExcludeWrongSideOverloads">, DefaultFalse,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Always exclude wrong side overloads">,
+ NegFlag<SetFalse, [], [ClangOption], "Exclude wrong side overloads only if there are same side overloads">,
+ BothFlags<[HelpHidden], [], " in overloading resolution for CUDA/HIP">>;
+
+def cuid_EQ : Joined<["-"], "cuid=">, Visibility<[ClangOption, CC1Option]>,
+ HelpText<"An ID for compilation unit, which should be the same for the same "
+ "compilation unit but different for different compilation units. "
+ "It is used to externalize device-side static variables for single "
+ "source offloading languages CUDA and HIP so that they can be "
+ "accessed by the host code of the same compilation unit.">,
+ MarshallingInfoString<LangOpts<"CUID">>;
+def fuse_cuid_EQ : Joined<["-"], "fuse-cuid=">,
+ HelpText<"Method to generate ID's for compilation units for single source "
+ "offloading languages CUDA and HIP: 'hash' (ID's generated by hashing "
+ "file path and command line options) | 'random' (ID's generated as "
+ "random numbers) | 'none' (disabled). Default is 'hash'. This option "
+ "will be overridden by option '-cuid=[ID]' if it is specified." >;
+
+def fgpu_inline_threshold_EQ : Joined<["-"], "fgpu-inline-threshold=">,
+ Flags<[HelpHidden]>,
+ HelpText<"Inline threshold for device compilation for CUDA/HIP">;
+
+def fgpu_sanitize : Flag<["-"], "fgpu-sanitize">, Group<f_Group>,
+ HelpText<"Enable sanitizer for supported offloading devices">;
+def fno_gpu_sanitize : Flag<["-"], "fno-gpu-sanitize">, Group<f_Group>;
+
+def offload_compress : Flag<["--"], "offload-compress">,
+ HelpText<"Compress offload device binaries (HIP only)">;
+def no_offload_compress : Flag<["--"], "no-offload-compress">;
+}
+
+// CUDA options
+let Group = cuda_Group in {
+def cuda_include_ptx_EQ : Joined<["--"], "cuda-include-ptx=">,
+ Flags<[NoXarchOption]>,
HelpText<"Include PTX for the following GPU architecture (e.g. sm_35) or 'all'. May be specified more than once.">;
-def no_cuda_include_ptx_EQ : Joined<["--"], "no-cuda-include-ptx=">, Flags<[NoXarchOption]>,
+def no_cuda_include_ptx_EQ : Joined<["--"], "no-cuda-include-ptx=">,
+ Flags<[NoXarchOption]>,
HelpText<"Do not include PTX for the following GPU architecture (e.g. sm_35) or 'all'. May be specified more than once.">;
-def offload_arch_EQ : Joined<["--"], "offload-arch=">, Flags<[NoXarchOption]>,
- HelpText<"CUDA offloading device architecture (e.g. sm_35), or HIP offloading target ID in the form of a "
- "device architecture followed by target ID features delimited by a colon. Each target ID feature "
- "is a pre-defined string followed by a plus or minus sign (e.g. gfx908:xnack+:sramecc-). May be "
- "specified more than once.">;
def cuda_gpu_arch_EQ : Joined<["--"], "cuda-gpu-arch=">, Flags<[NoXarchOption]>,
Alias<offload_arch_EQ>;
-def hip_link : Flag<["--"], "hip-link">,
- HelpText<"Link clang-offload-bundler bundles for HIP">;
-def no_offload_arch_EQ : Joined<["--"], "no-offload-arch=">, Flags<[NoXarchOption]>,
- HelpText<"Remove CUDA/HIP offloading device architecture (e.g. sm_35, gfx906) from the list of devices to compile for. "
- "'all' resets the list to its default value.">;
-def emit_static_lib : Flag<["--"], "emit-static-lib">,
- HelpText<"Enable linker job to emit a static library.">;
-def no_cuda_gpu_arch_EQ : Joined<["--"], "no-cuda-gpu-arch=">, Flags<[NoXarchOption]>,
+def cuda_feature_EQ : Joined<["--"], "cuda-feature=">, HelpText<"Manually specify the CUDA feature to use">;
+def no_cuda_gpu_arch_EQ : Joined<["--"], "no-cuda-gpu-arch=">,
+ Flags<[NoXarchOption]>,
Alias<no_offload_arch_EQ>;
+
+def cuda_device_only : Flag<["--"], "cuda-device-only">, Alias<offload_device_only>,
+ HelpText<"Compile CUDA code for device only">;
+def cuda_host_only : Flag<["--"], "cuda-host-only">, Alias<offload_host_only>,
+ HelpText<"Compile CUDA code for host only. Has no effect on non-CUDA compilations.">;
+def cuda_compile_host_device : Flag<["--"], "cuda-compile-host-device">, Alias<offload_host_device>,
+ HelpText<"Compile CUDA code for both host and device (default). Has no "
+ "effect on non-CUDA compilations.">;
+
def cuda_noopt_device_debug : Flag<["--"], "cuda-noopt-device-debug">,
HelpText<"Enable device-side debug info generation. Disables ptxas optimizations.">;
def no_cuda_version_check : Flag<["--"], "no-cuda-version-check">,
@@ -917,121 +1267,177 @@ def cuda_path_ignore_env : Flag<["--"], "cuda-path-ignore-env">, Group<i_Group>,
HelpText<"Ignore environment variables to detect CUDA installation">;
def ptxas_path_EQ : Joined<["--"], "ptxas-path=">, Group<i_Group>,
HelpText<"Path to ptxas (used for compiling CUDA code)">;
-def fgpu_flush_denormals_to_zero : Flag<["-"], "fgpu-flush-denormals-to-zero">,
- HelpText<"Flush denormal floating point values to zero in CUDA/HIP device mode.">;
-def fno_gpu_flush_denormals_to_zero : Flag<["-"], "fno-gpu-flush-denormals-to-zero">;
def fcuda_flush_denormals_to_zero : Flag<["-"], "fcuda-flush-denormals-to-zero">,
Alias<fgpu_flush_denormals_to_zero>;
def fno_cuda_flush_denormals_to_zero : Flag<["-"], "fno-cuda-flush-denormals-to-zero">,
Alias<fno_gpu_flush_denormals_to_zero>;
-defm gpu_rdc : BoolFOption<"gpu-rdc",
- LangOpts<"GPURelocatableDeviceCode">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Generate relocatable device code, also known as separate compilation mode">,
- NegFlag<SetFalse>>;
def : Flag<["-"], "fcuda-rdc">, Alias<fgpu_rdc>;
def : Flag<["-"], "fno-cuda-rdc">, Alias<fno_gpu_rdc>;
defm cuda_short_ptr : BoolFOption<"cuda-short-ptr",
TargetOpts<"NVPTXUseShortPointers">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Use 32-bit pointers for accessing const/local/shared address spaces">,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Use 32-bit pointers for accessing const/local/shared address spaces">,
NegFlag<SetFalse>>;
-def rocm_path_EQ : Joined<["--"], "rocm-path=">, Group<i_Group>,
+}
+
+def emit_static_lib : Flag<["--"], "emit-static-lib">,
+ HelpText<"Enable linker job to emit a static library.">;
+
+def mprintf_kind_EQ : Joined<["-"], "mprintf-kind=">, Group<m_Group>,
+ HelpText<"Specify the printf lowering scheme (AMDGPU only), allowed values are "
+ "\"hostcall\"(printing happens during kernel execution, this scheme "
+ "relies on hostcalls which require system to support pcie atomics) "
+ "and \"buffered\"(printing happens after all kernel threads exit, "
+ "this uses a printf buffer and does not rely on pcie atomic support)">,
+ Visibility<[ClangOption, CC1Option]>,
+ Values<"hostcall,buffered">,
+ NormalizedValuesScope<"TargetOptions::AMDGPUPrintfKind">,
+ NormalizedValues<["Hostcall", "Buffered"]>,
+ MarshallingInfoEnum<TargetOpts<"AMDGPUPrintfKindVal">, "Hostcall">;
+
+// HIP options
+let Group = hip_Group in {
+def hip_link : Flag<["--"], "hip-link">, Group<opencl_Group>,
+ HelpText<"Link clang-offload-bundler bundles for HIP">;
+def no_hip_rt: Flag<["-"], "no-hip-rt">, Group<hip_Group>,
+ HelpText<"Do not link against HIP runtime libraries">;
+def rocm_path_EQ : Joined<["--"], "rocm-path=">, Group<hip_Group>,
HelpText<"ROCm installation path, used for finding and automatically linking required bitcode libraries.">;
-def hip_path_EQ : Joined<["--"], "hip-path=">, Group<i_Group>,
+def hip_path_EQ : Joined<["--"], "hip-path=">, Group<hip_Group>,
HelpText<"HIP runtime installation path, used for finding HIP version and adding HIP include path.">;
-def amdgpu_arch_tool_EQ : Joined<["--"], "amdgpu-arch-tool=">, Group<i_Group>,
- HelpText<"Tool used for detecting AMD GPU arch in the system.">;
-def rocm_device_lib_path_EQ : Joined<["--"], "rocm-device-lib-path=">, Group<Link_Group>,
+def hipstdpar : Flag<["--"], "hipstdpar">,
+ Visibility<[ClangOption, CC1Option]>,
+ Group<CompileOnly_Group>,
+ HelpText<"Enable HIP acceleration for standard parallel algorithms">,
+ MarshallingInfoFlag<LangOpts<"HIPStdPar">>;
+def hipstdpar_interpose_alloc : Flag<["--"], "hipstdpar-interpose-alloc">,
+ Visibility<[ClangOption, CC1Option]>,
+ Group<CompileOnly_Group>,
+ HelpText<"Replace all memory allocation / deallocation calls with "
+ "hipManagedMalloc / hipFree equivalents">,
+ MarshallingInfoFlag<LangOpts<"HIPStdParInterposeAlloc">>;
+// TODO: use MarshallingInfo here
+def hipstdpar_path_EQ : Joined<["--"], "hipstdpar-path=">, Group<i_Group>,
+ HelpText<
+ "HIP Standard Parallel Algorithm Acceleration library path, used for "
+ "finding and implicitly including the library header">;
+def hipstdpar_thrust_path_EQ : Joined<["--"], "hipstdpar-thrust-path=">,
+ Group<i_Group>,
+ HelpText<
+ "rocThrust path, required by the HIP Standard Parallel Algorithm "
+ "Acceleration library, used to implicitly include the rocThrust library">;
+def hipstdpar_prim_path_EQ : Joined<["--"], "hipstdpar-prim-path=">,
+ Group<i_Group>,
+ HelpText<
+ "rocPrim path, required by the HIP Standard Parallel Algorithm "
+ "Acceleration library, used to implicitly include the rocPrim library">;
+def rocm_device_lib_path_EQ : Joined<["--"], "rocm-device-lib-path=">, Group<hip_Group>,
HelpText<"ROCm device library path. Alternative to rocm-path.">;
def : Joined<["--"], "hip-device-lib-path=">, Alias<rocm_device_lib_path_EQ>;
-def hip_device_lib_EQ : Joined<["--"], "hip-device-lib=">, Group<Link_Group>,
+def hip_device_lib_EQ : Joined<["--"], "hip-device-lib=">, Group<hip_Group>,
HelpText<"HIP device library">;
-def hip_version_EQ : Joined<["--"], "hip-version=">,
+def hip_version_EQ : Joined<["--"], "hip-version=">, Group<hip_Group>,
HelpText<"HIP version in the format of major.minor.patch">;
def fhip_dump_offload_linker_script : Flag<["-"], "fhip-dump-offload-linker-script">,
- Group<f_Group>, Flags<[NoArgumentUnused, HelpHidden]>;
+ Group<hip_Group>, Flags<[NoArgumentUnused, HelpHidden]>;
defm hip_new_launch_api : BoolFOption<"hip-new-launch-api",
LangOpts<"HIPUseNewLaunchAPI">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Use">, NegFlag<SetFalse, [], "Don't use">,
- BothFlags<[], " new kernel launching API for HIP">>;
+ PosFlag<SetTrue, [], [ClangOption, CC1Option], "Use">,
+ NegFlag<SetFalse, [], [ClangOption], "Don't use">,
+ BothFlags<[], [ClangOption], " new kernel launching API for HIP">>;
defm hip_fp32_correctly_rounded_divide_sqrt : BoolFOption<"hip-fp32-correctly-rounded-divide-sqrt",
CodeGenOpts<"HIPCorrectlyRoundedDivSqrt">, DefaultTrue,
- PosFlag<SetTrue, [], "Specify">,
- NegFlag<SetFalse, [CC1Option], "Don't specify">,
- BothFlags<[], " that single precision floating-point divide and sqrt used in "
+ PosFlag<SetTrue, [], [ClangOption], "Specify">,
+ NegFlag<SetFalse, [], [ClangOption, CC1Option], "Don't specify">,
+ BothFlags<[], [ClangOption], " that single precision floating-point divide and sqrt used in "
"the program source are correctly rounded (HIP device compilation only)">>,
ShouldParseIf<hip.KeyPath>;
+defm hip_kernel_arg_name : BoolFOption<"hip-kernel-arg-name",
+ CodeGenOpts<"HIPSaveKernelArgName">, DefaultFalse,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option], "Specify">,
+ NegFlag<SetFalse, [], [ClangOption], "Don't specify">,
+ BothFlags<[], [ClangOption], " that kernel argument names are preserved (HIP only)">>,
+ ShouldParseIf<hip.KeyPath>;
+def hipspv_pass_plugin_EQ : Joined<["--"], "hipspv-pass-plugin=">,
+ Group<Link_Group>, MetaVarName<"<dsopath>">,
+ HelpText<"path to a pass plugin for HIP to SPIR-V passes.">;
defm gpu_allow_device_init : BoolFOption<"gpu-allow-device-init",
LangOpts<"GPUAllowDeviceInit">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Allow">, NegFlag<SetFalse, [], "Don't allow">,
- BothFlags<[], " device side init function in HIP (experimental)">>,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option], "Allow">,
+ NegFlag<SetFalse, [], [ClangOption], "Don't allow">,
+ BothFlags<[], [ClangOption], " device side init function in HIP (experimental)">>,
ShouldParseIf<hip.KeyPath>;
-defm gpu_defer_diag : BoolFOption<"gpu-defer-diag",
- LangOpts<"GPUDeferDiag">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Defer">, NegFlag<SetFalse, [], "Don't defer">,
- BothFlags<[], " host/device related diagnostic messages for CUDA/HIP">>;
-defm gpu_exclude_wrong_side_overloads : BoolFOption<"gpu-exclude-wrong-side-overloads",
- LangOpts<"GPUExcludeWrongSideOverloads">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Always exclude wrong side overloads">,
- NegFlag<SetFalse, [], "Exclude wrong side overloads only if there are same side overloads">,
- BothFlags<[HelpHidden], " in overloading resolution for CUDA/HIP">>;
def gpu_max_threads_per_block_EQ : Joined<["--"], "gpu-max-threads-per-block=">,
- Flags<[CC1Option]>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Default max threads per block for kernel launch bounds for HIP">,
MarshallingInfoInt<LangOpts<"GPUMaxThreadsPerBlock">, "1024">,
ShouldParseIf<hip.KeyPath>;
-def fgpu_inline_threshold_EQ : Joined<["-"], "fgpu-inline-threshold=">,
- Flags<[HelpHidden]>,
- HelpText<"Inline threshold for device compilation for CUDA/HIP">;
def gpu_instrument_lib_EQ : Joined<["--"], "gpu-instrument-lib=">,
HelpText<"Instrument device library for HIP, which is a LLVM bitcode containing "
"__cyg_profile_func_enter and __cyg_profile_func_exit">;
-def fgpu_sanitize : Flag<["-"], "fgpu-sanitize">, Group<f_Group>,
- HelpText<"Enable sanitizer for AMDGPU target">;
-def fno_gpu_sanitize : Flag<["-"], "fno-gpu-sanitize">, Group<f_Group>;
def gpu_bundle_output : Flag<["--"], "gpu-bundle-output">,
- Group<f_Group>, HelpText<"Bundle output files of HIP device compilation">;
+ HelpText<"Bundle output files of HIP device compilation">;
def no_gpu_bundle_output : Flag<["--"], "no-gpu-bundle-output">,
- Group<f_Group>, HelpText<"Do not bundle output files of HIP device compilation">;
-def cuid_EQ : Joined<["-"], "cuid=">, Flags<[CC1Option]>,
- HelpText<"An ID for compilation unit, which should be the same for the same "
- "compilation unit but different for different compilation units. "
- "It is used to externalize device-side static variables for single "
- "source offloading languages CUDA and HIP so that they can be "
- "accessed by the host code of the same compilation unit.">,
- MarshallingInfoString<LangOpts<"CUID">>;
-def fuse_cuid_EQ : Joined<["-"], "fuse-cuid=">,
- HelpText<"Method to generate ID's for compilation units for single source "
- "offloading languages CUDA and HIP: 'hash' (ID's generated by hashing "
- "file path and command line options) | 'random' (ID's generated as "
- "random numbers) | 'none' (disabled). Default is 'hash'. This option "
- "will be overriden by option '-cuid=[ID]' if it is specified." >;
-def libomptarget_amdgcn_bc_path_EQ : Joined<["--"], "libomptarget-amdgcn-bc-path=">, Group<i_Group>,
+ Group<hip_Group>, HelpText<"Do not bundle output files of HIP device compilation">;
+def fhip_emit_relocatable : Flag<["-"], "fhip-emit-relocatable">,
+ HelpText<"Compile HIP source to relocatable">;
+def fno_hip_emit_relocatable : Flag<["-"], "fno-hip-emit-relocatable">,
+ HelpText<"Do not override toolchain to compile HIP source to relocatable">;
+}
+
+// Clang specific/exclusive options for OpenACC.
+def openacc_macro_override
+ : Separate<["-"], "fexperimental-openacc-macro-override">,
+ Visibility<[ClangOption, CC1Option]>,
+ Group<f_Group>,
+ HelpText<"Overrides the _OPENACC macro value for experimental testing "
+ "during OpenACC support development">;
+def openacc_macro_override_EQ
+ : Joined<["-"], "fexperimental-openacc-macro-override=">,
+ Alias<openacc_macro_override>;
+
+// End Clang specific/exclusive options for OpenACC.
+
+def libomptarget_amdgpu_bc_path_EQ : Joined<["--"], "libomptarget-amdgpu-bc-path=">, Group<i_Group>,
HelpText<"Path to libomptarget-amdgcn bitcode library">;
+def libomptarget_amdgcn_bc_path_EQ : Joined<["--"], "libomptarget-amdgcn-bc-path=">, Group<i_Group>,
+ HelpText<"Path to libomptarget-amdgcn bitcode library">, Alias<libomptarget_amdgpu_bc_path_EQ>;
def libomptarget_nvptx_bc_path_EQ : Joined<["--"], "libomptarget-nvptx-bc-path=">, Group<i_Group>,
HelpText<"Path to libomptarget-nvptx bitcode library">;
-def dD : Flag<["-"], "dD">, Group<d_Group>, Flags<[CC1Option]>,
+def dD : Flag<["-"], "dD">, Group<d_Group>, Visibility<[ClangOption, CC1Option]>,
HelpText<"Print macro definitions in -E mode in addition to normal output">;
-def dI : Flag<["-"], "dI">, Group<d_Group>, Flags<[CC1Option]>,
+def dI : Flag<["-"], "dI">, Group<d_Group>, Visibility<[ClangOption, CC1Option]>,
HelpText<"Print include directives in -E mode in addition to normal output">,
MarshallingInfoFlag<PreprocessorOutputOpts<"ShowIncludeDirectives">>;
-def dM : Flag<["-"], "dM">, Group<d_Group>, Flags<[CC1Option]>,
+def dM : Flag<["-"], "dM">, Group<d_Group>, Visibility<[ClangOption, CC1Option]>,
HelpText<"Print macro definitions in -E mode instead of normal output">;
def dead__strip : Flag<["-"], "dead_strip">;
-def dependency_file : Separate<["-"], "dependency-file">, Flags<[CC1Option]>,
+def dependency_file : Separate<["-"], "dependency-file">,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Filename (or -) to write dependency output to">,
MarshallingInfoString<DependencyOutputOpts<"OutputFile">>;
-def dependency_dot : Separate<["-"], "dependency-dot">, Flags<[CC1Option]>,
+def dependency_dot : Separate<["-"], "dependency-dot">,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Filename to write DOT-formatted header dependencies to">,
MarshallingInfoString<DependencyOutputOpts<"DOTOutputFile">>;
def module_dependency_dir : Separate<["-"], "module-dependency-dir">,
- Flags<[CC1Option]>, HelpText<"Directory to dump module dependencies to">,
+ Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Directory to dump module dependencies to">,
MarshallingInfoString<DependencyOutputOpts<"ModuleDependencyOutputDir">>;
def dsym_dir : JoinedOrSeparate<["-"], "dsym-dir">,
Flags<[NoXarchOption, RenderAsInput]>,
HelpText<"Directory to output dSYM's (if any) to">, MetaVarName<"<dir>">;
-def dumpmachine : Flag<["-"], "dumpmachine">;
+// GCC style -dumpdir. We intentionally don't implement the less useful -dumpbase{,-ext}.
+def dumpdir : Separate<["-"], "dumpdir">, Visibility<[ClangOption, CC1Option]>,
+ MetaVarName<"<dumppfx>">,
+ HelpText<"Use <dumpfpx> as a prefix to form auxiliary and dump file names">;
+def dumpmachine : Flag<["-"], "dumpmachine">,
+ Visibility<[ClangOption, FlangOption]>,
+ HelpText<"Display the compiler's target processor">;
+def dumpversion : Flag<["-"], "dumpversion">,
+ Visibility<[ClangOption, FlangOption]>,
+ HelpText<"Display the version of the compiler">;
def dumpspecs : Flag<["-"], "dumpspecs">, Flags<[Unsupported]>;
-def dumpversion : Flag<["-"], "dumpversion">;
def dylib__file : Separate<["-"], "dylib_file">;
def dylinker__install__name : JoinedOrSeparate<["-"], "dylinker_install_name">;
def dylinker : Flag<["-"], "dylinker">;
@@ -1040,54 +1446,97 @@ def dynamic : Flag<["-"], "dynamic">, Flags<[NoArgumentUnused]>;
def d_Flag : Flag<["-"], "d">, Group<d_Group>;
def d_Joined : Joined<["-"], "d">, Group<d_Group>;
def emit_ast : Flag<["-"], "emit-ast">,
+ Visibility<[ClangOption, CLOption, DXCOption]>,
HelpText<"Emit Clang AST files for source inputs">;
-def emit_llvm : Flag<["-"], "emit-llvm">, Flags<[CC1Option]>, Group<Action_Group>,
+def emit_llvm : Flag<["-"], "emit-llvm">,
+ Visibility<[ClangOption, CC1Option, FC1Option, FlangOption]>,
+ Group<Action_Group>,
HelpText<"Use the LLVM representation for assembler and object files">;
-def emit_interface_stubs : Flag<["-"], "emit-interface-stubs">, Flags<[CC1Option]>, Group<Action_Group>,
+def emit_interface_stubs : Flag<["-"], "emit-interface-stubs">,
+ Visibility<[ClangOption, CC1Option]>, Group<Action_Group>,
HelpText<"Generate Interface Stub Files.">;
def emit_merged_ifs : Flag<["-"], "emit-merged-ifs">,
- Flags<[CC1Option]>, Group<Action_Group>,
+ Visibility<[ClangOption, CC1Option]>, Group<Action_Group>,
HelpText<"Generate Interface Stub Files, emit merged text not binary.">;
-def interface_stub_version_EQ : JoinedOrSeparate<["-"], "interface-stub-version=">, Flags<[CC1Option]>;
+def end_no_unused_arguments : Flag<["--"], "end-no-unused-arguments">,
+ Visibility<[ClangOption, CLOption, DXCOption]>,
+ HelpText<"Start emitting warnings for unused driver arguments">;
+def interface_stub_version_EQ : JoinedOrSeparate<["-"], "interface-stub-version=">,
+ Visibility<[ClangOption, CC1Option]>;
def exported__symbols__list : Separate<["-"], "exported_symbols_list">;
-def e : JoinedOrSeparate<["-"], "e">, Flags<[LinkerInput]>, Group<Link_Group>;
-def fmax_tokens_EQ : Joined<["-"], "fmax-tokens=">, Group<f_Group>, Flags<[CC1Option]>,
+def extract_api : Flag<["-"], "extract-api">,
+ Visibility<[ClangOption, CC1Option]>, Group<Action_Group>,
+ HelpText<"Extract API information">;
+def product_name_EQ: Joined<["--"], "product-name=">,
+ Visibility<[ClangOption, CC1Option]>,
+ MarshallingInfoString<FrontendOpts<"ProductName">>;
+def emit_symbol_graph_EQ: JoinedOrSeparate<["--"], "emit-symbol-graph=">,
+ Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Generate Extract API information as a side effect of compilation.">,
+ MarshallingInfoString<FrontendOpts<"SymbolGraphOutputDir">>;
+def extract_api_ignores_EQ: CommaJoined<["--"], "extract-api-ignores=">,
+ Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Comma separated list of files containing a new line separated list of API symbols to ignore when extracting API information.">,
+ MarshallingInfoStringVector<FrontendOpts<"ExtractAPIIgnoresFileList">>;
+def e : Separate<["-"], "e">, Flags<[LinkerInput]>, Group<Link_Group>;
+def fmax_tokens_EQ : Joined<["-"], "fmax-tokens=">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Max total number of preprocessed tokens for -Wmax-tokens.">,
MarshallingInfoInt<LangOpts<"MaxTokens">>;
-def fPIC : Flag<["-"], "fPIC">, Group<f_Group>;
-def fno_PIC : Flag<["-"], "fno-PIC">, Group<f_Group>;
-def fPIE : Flag<["-"], "fPIE">, Group<f_Group>;
-def fno_PIE : Flag<["-"], "fno-PIE">, Group<f_Group>;
defm access_control : BoolFOption<"access-control",
LangOpts<"AccessControl">, DefaultTrue,
- NegFlag<SetFalse, [CC1Option], "Disable C++ access control">,
+ NegFlag<SetFalse, [], [ClangOption, CC1Option],
+ "Disable C++ access control">,
PosFlag<SetTrue>>;
def falign_functions : Flag<["-"], "falign-functions">, Group<f_Group>;
def falign_functions_EQ : Joined<["-"], "falign-functions=">, Group<f_Group>;
+def falign_loops_EQ : Joined<["-"], "falign-loops=">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>, MetaVarName<"<N>">,
+ HelpText<"N must be a power of two. Align loops to the boundary">,
+ MarshallingInfoInt<CodeGenOpts<"LoopAlignment">>;
def fno_align_functions: Flag<["-"], "fno-align-functions">, Group<f_Group>;
defm allow_editor_placeholders : BoolFOption<"allow-editor-placeholders",
LangOpts<"AllowEditorPlaceholders">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Treat editor placeholders as valid source code">,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Treat editor placeholders as valid source code">,
NegFlag<SetFalse>>;
def fallow_unsupported : Flag<["-"], "fallow-unsupported">, Group<f_Group>;
-def fapple_kext : Flag<["-"], "fapple-kext">, Group<f_Group>, Flags<[CC1Option]>,
+def fapple_kext : Flag<["-"], "fapple-kext">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Use Apple's kernel extensions ABI">,
MarshallingInfoFlag<LangOpts<"AppleKext">>;
+def fstrict_flex_arrays_EQ : Joined<["-"], "fstrict-flex-arrays=">, Group<f_Group>,
+ MetaVarName<"<n>">, Values<"0,1,2,3">,
+ LangOpts<"StrictFlexArraysLevel">,
+ Visibility<[ClangOption, CC1Option]>,
+ NormalizedValuesScope<"LangOptions::StrictFlexArraysLevelKind">,
+ NormalizedValues<["Default", "OneZeroOrIncomplete", "ZeroOrIncomplete", "IncompleteOnly"]>,
+ HelpText<"Enable optimizations based on the strict definition of flexible arrays">,
+ MarshallingInfoEnum<LangOpts<"StrictFlexArraysLevel">, "Default">;
defm apple_pragma_pack : BoolFOption<"apple-pragma-pack",
LangOpts<"ApplePragmaPack">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Enable Apple gcc-compatible #pragma pack handling">,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Enable Apple gcc-compatible #pragma pack handling">,
NegFlag<SetFalse>>;
defm xl_pragma_pack : BoolFOption<"xl-pragma-pack",
LangOpts<"XLPragmaPack">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Enable IBM XL #pragma pack handling">,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Enable IBM XL #pragma pack handling">,
NegFlag<SetFalse>>;
def shared_libsan : Flag<["-"], "shared-libsan">,
HelpText<"Dynamically link the sanitizer runtime">;
def static_libsan : Flag<["-"], "static-libsan">,
- HelpText<"Statically link the sanitizer runtime">;
+ HelpText<"Statically link the sanitizer runtime (Not supported for ASan, TSan or UBSan on darwin)">;
def : Flag<["-"], "shared-libasan">, Alias<shared_libsan>;
def fasm : Flag<["-"], "fasm">, Group<f_Group>;
+defm assume_unique_vtables : BoolFOption<"assume-unique-vtables",
+ CodeGenOpts<"AssumeUniqueVTables">, DefaultTrue,
+ PosFlag<SetTrue>,
+ NegFlag<SetFalse, [], [ClangOption, CC1Option],
+ "Disable optimizations based on vtable pointer identity">,
+ BothFlags<[], [ClangOption, CLOption]>>;
+
def fassume_sane_operator_new : Flag<["-"], "fassume-sane-operator-new">, Group<f_Group>;
def fastcp : Flag<["-"], "fastcp">, Group<f_Group>;
def fastf : Flag<["-"], "fastf">, Group<f_Group>;
@@ -1095,24 +1544,64 @@ def fast : Flag<["-"], "fast">, Group<f_Group>;
def fasynchronous_unwind_tables : Flag<["-"], "fasynchronous-unwind-tables">, Group<f_Group>;
defm double_square_bracket_attributes : BoolFOption<"double-square-bracket-attributes",
- LangOpts<"DoubleSquareBracketAttributes">, Default<!strconcat(cpp11.KeyPath, "||", c2x.KeyPath)>,
- PosFlag<SetTrue, [], "Enable">, NegFlag<SetFalse, [], "Disable">,
- BothFlags<[NoXarchOption, CC1Option], " '[[]]' attributes in all C and C++ language modes">>;
+ LangOpts<"DoubleSquareBracketAttributes">, DefaultTrue, PosFlag<SetTrue>,
+ NegFlag<SetFalse>>;
defm autolink : BoolFOption<"autolink",
CodeGenOpts<"Autolink">, DefaultTrue,
- NegFlag<SetFalse, [CC1Option], "Disable generation of linker directives for automatic library linking">,
+ NegFlag<SetFalse, [], [ClangOption, CC1Option],
+ "Disable generation of linker directives for automatic library linking">,
PosFlag<SetTrue>>;
-// C++ Coroutines TS
-defm coroutines_ts : BoolFOption<"coroutines-ts",
+let Flags = [TargetSpecific] in {
+defm auto_import : BoolFOption<"auto-import",
+ CodeGenOpts<"AutoImport">, DefaultTrue,
+ NegFlag<SetFalse, [], [ClangOption, CC1Option],
+ "MinGW specific. Disable support for automatic dllimport in code generation "
+ "and linking">,
+ PosFlag<SetTrue, [], [], "MinGW specific. Enable code generation support for "
+ "automatic dllimport, and enable support for it in the linker. "
+ "Enabled by default.">>;
+} // let Flags = [TargetSpecific]
+
+// In the future this option will be supported by other offloading
+// languages and accept other values such as CPU/GPU architectures,
+// offload kinds and target aliases.
+def offload_EQ : CommaJoined<["--"], "offload=">, Flags<[NoXarchOption]>,
+ HelpText<"Specify comma-separated list of offloading target triples (CUDA and HIP only)">;
+
+// C++ Coroutines
+defm coroutines : BoolFOption<"coroutines",
LangOpts<"Coroutines">, Default<cpp20.KeyPath>,
- PosFlag<SetTrue, [CC1Option], "Enable support for the C++ Coroutines TS">,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Enable support for the C++ Coroutines">,
NegFlag<SetFalse>>;
+defm coro_aligned_allocation : BoolFOption<"coro-aligned-allocation",
+ LangOpts<"CoroAlignedAllocation">, DefaultFalse,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Prefer aligned allocation for C++ Coroutines">,
+ NegFlag<SetFalse>>;
+
+defm experimental_library : BoolFOption<"experimental-library",
+ LangOpts<"ExperimentalLibrary">, DefaultFalse,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option, CLOption],
+ "Control whether unstable and experimental library features are enabled. "
+ "This option enables various library features that are either experimental (also known as TSes), or have been "
+ "but are not stable yet in the selected Standard Library implementation. It is not recommended to use this option "
+ "in production code, since neither ABI nor API stability are guaranteed. This is intended to provide a preview "
+ "of features that will ship in the future for experimentation purposes">,
+ NegFlag<SetFalse>>;
+
+def fembed_offload_object_EQ : Joined<["-"], "fembed-offload-object=">,
+ Group<f_Group>, Flags<[NoXarchOption]>,
+ Visibility<[ClangOption, CC1Option, FC1Option]>,
+ HelpText<"Embed Offloading device-side binary into host object file as a section.">,
+ MarshallingInfoStringVector<CodeGenOpts<"OffloadObjects">>;
def fembed_bitcode_EQ : Joined<["-"], "fembed-bitcode=">,
- Group<f_Group>, Flags<[NoXarchOption, CC1Option, CC1AsOption]>, MetaVarName<"<option>">,
- HelpText<"Embed LLVM bitcode (option: off, all, bitcode, marker)">,
+ Group<f_Group>, Flags<[NoXarchOption]>,
+ Visibility<[ClangOption, CC1Option, CC1AsOption]>, MetaVarName<"<option>">,
+ HelpText<"Embed LLVM bitcode">,
Values<"off,all,bitcode,marker">, NormalizedValuesScope<"CodeGenOptions">,
NormalizedValues<["Embed_Off", "Embed_All", "Embed_Bitcode", "Embed_Marker"]>,
MarshallingInfoEnum<CodeGenOpts<"EmbedBitcode">, "Embed_Off">;
@@ -1124,26 +1613,37 @@ def fembed_bitcode_marker : Flag<["-"], "fembed-bitcode-marker">,
HelpText<"Embed placeholder LLVM IR data as a marker">;
defm gnu_inline_asm : BoolFOption<"gnu-inline-asm",
LangOpts<"GNUAsm">, DefaultTrue,
- NegFlag<SetFalse, [CC1Option], "Disable GNU style inline asm">, PosFlag<SetTrue>>;
+ NegFlag<SetFalse, [], [ClangOption, CC1Option],
+ "Disable GNU style inline asm">,
+ PosFlag<SetTrue>>;
def fprofile_sample_use : Flag<["-"], "fprofile-sample-use">, Group<f_Group>,
- Flags<[CoreOption]>;
+ Visibility<[ClangOption, CLOption]>;
def fno_profile_sample_use : Flag<["-"], "fno-profile-sample-use">, Group<f_Group>,
- Flags<[CoreOption]>;
+ Visibility<[ClangOption, CLOption]>;
def fprofile_sample_use_EQ : Joined<["-"], "fprofile-sample-use=">,
- Group<f_Group>, Flags<[NoXarchOption, CC1Option]>,
+ Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Enable sample-based profile guided optimizations">,
MarshallingInfoString<CodeGenOpts<"SampleProfileFile">>;
def fprofile_sample_accurate : Flag<["-"], "fprofile-sample-accurate">,
- Group<f_Group>, Flags<[NoXarchOption, CC1Option]>,
+ Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Specifies that the sample profile is accurate">,
DocBrief<[{Specifies that the sample profile is accurate. If the sample
profile is accurate, callsites without profile samples are marked
as cold. Otherwise, treat callsites without profile samples as if
we have no profile}]>,
MarshallingInfoFlag<CodeGenOpts<"ProfileSampleAccurate">>;
-def fno_profile_sample_accurate : Flag<["-"], "fno-profile-sample-accurate">,
- Group<f_Group>, Flags<[NoXarchOption]>;
+def fsample_profile_use_profi : Flag<["-"], "fsample-profile-use-profi">,
+ Visibility<[ClangOption, CC1Option]>,
+ Group<f_Group>,
+ HelpText<"Use profi to infer block and edge counts">,
+ DocBrief<[{Infer block and edge counts. If the profiles have errors or missing
+ blocks caused by sampling, profile inference (profi) can convert
+ basic block counts to branch probabilites to fix them by extended
+ and re-engineered classic MCMF (min-cost max-flow) approach.}]>;
+def fno_profile_sample_accurate : Flag<["-"], "fno-profile-sample-accurate">, Group<f_Group>;
def fauto_profile : Flag<["-"], "fauto-profile">, Group<f_Group>,
Alias<fprofile_sample_use>;
def fno_auto_profile : Flag<["-"], "fno-auto-profile">, Group<f_Group>,
@@ -1155,255 +1655,396 @@ def fauto_profile_accurate : Flag<["-"], "fauto-profile-accurate">,
def fno_auto_profile_accurate : Flag<["-"], "fno-auto-profile-accurate">,
Group<f_Group>, Alias<fno_profile_sample_accurate>;
def fdebug_compilation_dir_EQ : Joined<["-"], "fdebug-compilation-dir=">,
- Group<f_Group>, Flags<[CC1Option, CC1AsOption, CoreOption]>,
+ Group<f_Group>,
+ Visibility<[ClangOption, CC1Option, CC1AsOption, CLOption, DXCOption]>,
HelpText<"The compilation directory to embed in the debug info">,
MarshallingInfoString<CodeGenOpts<"DebugCompilationDir">>;
def fdebug_compilation_dir : Separate<["-"], "fdebug-compilation-dir">,
- Group<f_Group>, Flags<[CC1Option, CC1AsOption, CoreOption]>,
+ Group<f_Group>,
+ Visibility<[ClangOption, CC1Option, CC1AsOption, CLOption, DXCOption]>,
Alias<fdebug_compilation_dir_EQ>;
def fcoverage_compilation_dir_EQ : Joined<["-"], "fcoverage-compilation-dir=">,
- Group<f_Group>, Flags<[CC1Option, CC1AsOption, CoreOption]>,
+ Group<f_Group>, Visibility<[ClangOption, CC1Option, CC1AsOption, CLOption]>,
HelpText<"The compilation directory to embed in the coverage mapping.">,
MarshallingInfoString<CodeGenOpts<"CoverageCompilationDir">>;
def ffile_compilation_dir_EQ : Joined<["-"], "ffile-compilation-dir=">, Group<f_Group>,
- Flags<[CoreOption]>,
+ Visibility<[ClangOption, CLOption, DXCOption]>,
HelpText<"The compilation directory to embed in the debug info and coverage mapping.">;
defm debug_info_for_profiling : BoolFOption<"debug-info-for-profiling",
CodeGenOpts<"DebugInfoForProfiling">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Emit extra debug info to make sample profile more accurate">,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Emit extra debug info to make sample profile more accurate">,
NegFlag<SetFalse>>;
def fprofile_instr_generate : Flag<["-"], "fprofile-instr-generate">,
- Group<f_Group>, Flags<[CoreOption]>,
+ Group<f_Group>, Visibility<[ClangOption, CLOption]>,
HelpText<"Generate instrumented code to collect execution counts into default.profraw file (overridden by '=' form of option or LLVM_PROFILE_FILE env var)">;
def fprofile_instr_generate_EQ : Joined<["-"], "fprofile-instr-generate=">,
- Group<f_Group>, Flags<[CoreOption]>, MetaVarName<"<file>">,
+ Group<f_Group>, Visibility<[ClangOption, CLOption]>, MetaVarName<"<file>">,
HelpText<"Generate instrumented code to collect execution counts into <file> (overridden by LLVM_PROFILE_FILE env var)">;
def fprofile_instr_use : Flag<["-"], "fprofile-instr-use">, Group<f_Group>,
- Flags<[CoreOption]>;
+ Visibility<[ClangOption, CLOption]>;
def fprofile_instr_use_EQ : Joined<["-"], "fprofile-instr-use=">,
- Group<f_Group>, Flags<[CoreOption]>,
+ Group<f_Group>, Visibility<[ClangOption, CLOption]>,
HelpText<"Use instrumentation data for profile-guided optimization">;
def fprofile_remapping_file_EQ : Joined<["-"], "fprofile-remapping-file=">,
- Group<f_Group>, Flags<[CC1Option, CoreOption]>, MetaVarName<"<file>">,
+ Group<f_Group>, Visibility<[ClangOption, CC1Option, CLOption]>,
+ MetaVarName<"<file>">,
HelpText<"Use the remappings described in <file> to match the profile data against names in the program">,
MarshallingInfoString<CodeGenOpts<"ProfileRemappingFile">>;
defm coverage_mapping : BoolFOption<"coverage-mapping",
CodeGenOpts<"CoverageMapping">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Generate coverage mapping to enable code coverage analysis">,
- NegFlag<SetFalse, [], "Disable code coverage analysis">, BothFlags<[CoreOption]>>;
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Generate coverage mapping to enable code coverage analysis">,
+ NegFlag<SetFalse, [], [ClangOption], "Disable code coverage analysis">, BothFlags<
+ [], [ClangOption, CLOption]>>;
+defm mcdc_coverage : BoolFOption<"coverage-mcdc",
+ CodeGenOpts<"MCDCCoverage">, DefaultFalse,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Enable MC/DC criteria when generating code coverage">,
+ NegFlag<SetFalse, [], [ClangOption], "Disable MC/DC coverage criteria">,
+ BothFlags<[], [ClangOption, CLOption]>>;
def fprofile_generate : Flag<["-"], "fprofile-generate">,
- Group<f_Group>, Flags<[CoreOption]>,
+ Group<f_Group>, Visibility<[ClangOption, CLOption]>,
HelpText<"Generate instrumented code to collect execution counts into default.profraw (overridden by LLVM_PROFILE_FILE env var)">;
def fprofile_generate_EQ : Joined<["-"], "fprofile-generate=">,
- Group<f_Group>, Flags<[CoreOption]>, MetaVarName<"<directory>">,
+ Group<f_Group>, Visibility<[ClangOption, CLOption]>,
+ MetaVarName<"<directory>">,
HelpText<"Generate instrumented code to collect execution counts into <directory>/default.profraw (overridden by LLVM_PROFILE_FILE env var)">;
def fcs_profile_generate : Flag<["-"], "fcs-profile-generate">,
- Group<f_Group>, Flags<[CoreOption]>,
+ Group<f_Group>, Visibility<[ClangOption, CLOption]>,
HelpText<"Generate instrumented code to collect context sensitive execution counts into default.profraw (overridden by LLVM_PROFILE_FILE env var)">;
def fcs_profile_generate_EQ : Joined<["-"], "fcs-profile-generate=">,
- Group<f_Group>, Flags<[CoreOption]>, MetaVarName<"<directory>">,
+ Group<f_Group>, Visibility<[ClangOption, CLOption]>,
+ MetaVarName<"<directory>">,
HelpText<"Generate instrumented code to collect context sensitive execution counts into <directory>/default.profraw (overridden by LLVM_PROFILE_FILE env var)">;
def fprofile_use : Flag<["-"], "fprofile-use">, Group<f_Group>,
- Alias<fprofile_instr_use>;
+ Visibility<[ClangOption, CLOption]>, Alias<fprofile_instr_use>;
def fprofile_use_EQ : Joined<["-"], "fprofile-use=">,
- Group<f_Group>, Flags<[NoXarchOption]>, MetaVarName<"<pathname>">,
+ Group<f_Group>,
+ Visibility<[ClangOption, CLOption]>,
+ MetaVarName<"<pathname>">,
HelpText<"Use instrumentation data for profile-guided optimization. If pathname is a directory, it reads from <pathname>/default.profdata. Otherwise, it reads from file <pathname>.">;
def fno_profile_instr_generate : Flag<["-"], "fno-profile-instr-generate">,
- Group<f_Group>, Flags<[CoreOption]>,
+ Group<f_Group>, Visibility<[ClangOption, CLOption]>,
HelpText<"Disable generation of profile instrumentation.">;
def fno_profile_generate : Flag<["-"], "fno-profile-generate">,
- Group<f_Group>, Flags<[CoreOption]>,
+ Group<f_Group>, Visibility<[ClangOption, CLOption]>,
HelpText<"Disable generation of profile instrumentation.">;
def fno_profile_instr_use : Flag<["-"], "fno-profile-instr-use">,
- Group<f_Group>, Flags<[CoreOption]>,
+ Group<f_Group>, Visibility<[ClangOption, CLOption]>,
HelpText<"Disable using instrumentation data for profile-guided optimization">;
def fno_profile_use : Flag<["-"], "fno-profile-use">,
Alias<fno_profile_instr_use>;
-defm profile_arcs : BoolFOption<"profile-arcs",
- CodeGenOpts<"EmitGcovArcs">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option, LinkOption]>, NegFlag<SetFalse>>;
-defm test_coverage : BoolFOption<"test-coverage",
- CodeGenOpts<"EmitGcovNotes">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option]>, NegFlag<SetFalse>>;
+def ftest_coverage : Flag<["-"], "ftest-coverage">, Group<f_Group>,
+ HelpText<"Produce gcov notes files (*.gcno)">;
+def fno_test_coverage : Flag<["-"], "fno-test-coverage">, Group<f_Group>;
+def fprofile_arcs : Flag<["-"], "fprofile-arcs">, Group<f_Group>,
+ HelpText<"Instrument code to produce gcov data files (*.gcda)">;
+def fno_profile_arcs : Flag<["-"], "fno-profile-arcs">, Group<f_Group>;
def fprofile_filter_files_EQ : Joined<["-"], "fprofile-filter-files=">,
- Group<f_Group>, Flags<[CC1Option, CoreOption]>,
+ Group<f_Group>, Visibility<[ClangOption, CC1Option, CLOption]>,
HelpText<"Instrument only functions from files where names match any regex separated by a semi-colon">,
- MarshallingInfoString<CodeGenOpts<"ProfileFilterFiles">>,
- ShouldParseIf<!strconcat(fprofile_arcs.KeyPath, "||", ftest_coverage.KeyPath)>;
+ MarshallingInfoString<CodeGenOpts<"ProfileFilterFiles">>;
def fprofile_exclude_files_EQ : Joined<["-"], "fprofile-exclude-files=">,
- Group<f_Group>, Flags<[CC1Option, CoreOption]>,
+ Group<f_Group>, Visibility<[ClangOption, CC1Option, CLOption]>,
HelpText<"Instrument only functions from files where names don't match all the regexes separated by a semi-colon">,
- MarshallingInfoString<CodeGenOpts<"ProfileExcludeFiles">>,
- ShouldParseIf<!strconcat(fprofile_arcs.KeyPath, "||", ftest_coverage.KeyPath)>;
+ MarshallingInfoString<CodeGenOpts<"ProfileExcludeFiles">>;
def fprofile_update_EQ : Joined<["-"], "fprofile-update=">,
- Group<f_Group>, Flags<[CC1Option, CoreOption]>, Values<"atomic,prefer-atomic,single">,
- MetaVarName<"<method>">, HelpText<"Set update method of profile counters (atomic,prefer-atomic,single)">,
+ Group<f_Group>, Visibility<[ClangOption, CC1Option, CLOption]>,
+ Values<"atomic,prefer-atomic,single">,
+ MetaVarName<"<method>">, HelpText<"Set update method of profile counters">,
MarshallingInfoFlag<CodeGenOpts<"AtomicProfileUpdate">>;
defm pseudo_probe_for_profiling : BoolFOption<"pseudo-probe-for-profiling",
CodeGenOpts<"PseudoProbeForProfiling">, DefaultFalse,
- PosFlag<SetTrue, [], "Emit">, NegFlag<SetFalse, [], "Do not emit">,
- BothFlags<[NoXarchOption, CC1Option], " pseudo probes for sample profiling">>;
+ PosFlag<SetTrue, [], [ClangOption], "Emit">,
+ NegFlag<SetFalse, [], [ClangOption], "Do not emit">,
+ BothFlags<[], [ClangOption, CC1Option],
+ " pseudo probes for sample profiling">>;
def forder_file_instrumentation : Flag<["-"], "forder-file-instrumentation">,
- Group<f_Group>, Flags<[CC1Option, CoreOption]>,
+ Group<f_Group>, Visibility<[ClangOption, CC1Option, CLOption]>,
HelpText<"Generate instrumented code to collect order file into default.profraw file (overridden by '=' form of option or LLVM_PROFILE_FILE env var)">;
def fprofile_list_EQ : Joined<["-"], "fprofile-list=">,
- Group<f_Group>, Flags<[CC1Option, CoreOption]>,
- HelpText<"Filename defining the list of functions/files to instrument">,
+ Group<f_Group>, Visibility<[ClangOption, CC1Option, CLOption]>,
+ HelpText<"Filename defining the list of functions/files to instrument. "
+ "The file uses the sanitizer special case list format.">,
MarshallingInfoStringVector<LangOpts<"ProfileListFiles">>;
+def fprofile_function_groups : Joined<["-"], "fprofile-function-groups=">,
+ Group<f_Group>, Visibility<[ClangOption, CC1Option]>, MetaVarName<"<N>">,
+ HelpText<"Partition functions into N groups and select only functions in group i to be instrumented using -fprofile-selected-function-group">,
+ MarshallingInfoInt<CodeGenOpts<"ProfileTotalFunctionGroups">, "1">;
+def fprofile_selected_function_group :
+ Joined<["-"], "fprofile-selected-function-group=">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>, MetaVarName<"<i>">,
+ HelpText<"Partition functions into N groups using -fprofile-function-groups and select only functions in group i to be instrumented. The valid range is 0 to N-1 inclusive">,
+ MarshallingInfoInt<CodeGenOpts<"ProfileSelectedFunctionGroup">>;
+def fswift_async_fp_EQ : Joined<["-"], "fswift-async-fp=">,
+ Group<f_Group>,
+ Visibility<[ClangOption, CC1Option, CC1AsOption, CLOption]>,
+ MetaVarName<"<option>">,
+ HelpText<"Control emission of Swift async extended frame info">,
+ Values<"auto,always,never">,
+ NormalizedValuesScope<"CodeGenOptions::SwiftAsyncFramePointerKind">,
+ NormalizedValues<["Auto", "Always", "Never"]>,
+ MarshallingInfoEnum<CodeGenOpts<"SwiftAsyncFramePointer">, "Always">;
+defm apinotes : BoolOption<"f", "apinotes",
+ LangOpts<"APINotes">, DefaultFalse,
+ PosFlag<SetTrue, [], [ClangOption], "Enable">,
+ NegFlag<SetFalse, [], [ClangOption], "Disable">,
+ BothFlags<[], [ClangOption, CC1Option], "external API notes support">>,
+ Group<f_clang_Group>;
+defm apinotes_modules : BoolOption<"f", "apinotes-modules",
+ LangOpts<"APINotesModules">, DefaultFalse,
+ PosFlag<SetTrue, [], [ClangOption], "Enable">,
+ NegFlag<SetFalse, [], [ClangOption], "Disable">,
+ BothFlags<[], [ClangOption, CC1Option], "module-based external API notes support">>,
+ Group<f_clang_Group>;
+def fapinotes_swift_version : Joined<["-"], "fapinotes-swift-version=">,
+ Group<f_clang_Group>, Visibility<[ClangOption, CC1Option]>,
+ MetaVarName<"<version>">,
+ HelpText<"Specify the Swift version to use when filtering API notes">;
defm addrsig : BoolFOption<"addrsig",
CodeGenOpts<"Addrsig">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Emit">, NegFlag<SetFalse, [], "Don't emit">,
- BothFlags<[CoreOption], " an address-significance table">>;
-defm blocks : OptInFFlag<"blocks", "Enable the 'blocks' language feature", "", "", [CoreOption]>;
+ PosFlag<SetTrue, [], [ClangOption, CC1Option], "Emit">,
+ NegFlag<SetFalse, [], [ClangOption], "Don't emit">,
+ BothFlags<[], [ClangOption, CLOption],
+ " an address-significance table">>;
+defm blocks : OptInCC1FFlag<"blocks", "Enable the 'blocks' language feature",
+ "", "", [ClangOption, CLOption]>;
def fbootclasspath_EQ : Joined<["-"], "fbootclasspath=">, Group<f_Group>;
defm borland_extensions : BoolFOption<"borland-extensions",
LangOpts<"Borland">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Accept non-standard constructs supported by the Borland compiler">,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Accept non-standard constructs supported by the Borland compiler">,
NegFlag<SetFalse>>;
-def fbuiltin : Flag<["-"], "fbuiltin">, Group<f_Group>, Flags<[CoreOption]>;
+def fbuiltin : Flag<["-"], "fbuiltin">, Group<f_Group>,
+ Visibility<[ClangOption, CLOption, DXCOption]>;
def fbuiltin_module_map : Flag <["-"], "fbuiltin-module-map">, Group<f_Group>,
- Flags<[NoXarchOption]>, HelpText<"Load the clang builtins module map file.">;
+ Flags<[]>, HelpText<"Load the clang builtins module map file.">;
defm caret_diagnostics : BoolFOption<"caret-diagnostics",
DiagnosticOpts<"ShowCarets">, DefaultTrue,
- NegFlag<SetFalse, [CC1Option]>, PosFlag<SetTrue>>;
+ NegFlag<SetFalse, [], [ClangOption, CC1Option]>,
+ PosFlag<SetTrue>>;
def fclang_abi_compat_EQ : Joined<["-"], "fclang-abi-compat=">, Group<f_clang_Group>,
- Flags<[CC1Option]>, MetaVarName<"<version>">, Values<"<major>.<minor>,latest">,
+ Visibility<[ClangOption, CC1Option]>,
+ MetaVarName<"<version>">, Values<"<major>.<minor>,latest">,
HelpText<"Attempt to match the ABI of Clang <version>">;
def fclasspath_EQ : Joined<["-"], "fclasspath=">, Group<f_Group>;
-defm color_diagnostics : OptInFFlag<"color-diagnostics", "Enable", "Disable", " colors in diagnostics",
- [CoreOption, FlangOption]>;
-def fdiagnostics_color : Flag<["-"], "fdiagnostics-color">, Group<f_Group>,
- Flags<[CoreOption, NoXarchOption]>;
+def fcolor_diagnostics : Flag<["-"], "fcolor-diagnostics">, Group<f_Group>,
+
+ Visibility<[ClangOption, CLOption, DXCOption, CC1Option, FlangOption, FC1Option]>,
+ HelpText<"Enable colors in diagnostics">;
+def fno_color_diagnostics : Flag<["-"], "fno-color-diagnostics">, Group<f_Group>,
+ Visibility<[ClangOption, CLOption, DXCOption, FlangOption]>,
+ HelpText<"Disable colors in diagnostics">;
+def : Flag<["-"], "fdiagnostics-color">, Group<f_Group>,
+ Visibility<[ClangOption, CLOption, DXCOption]>, Alias<fcolor_diagnostics>;
+def : Flag<["-"], "fno-diagnostics-color">, Group<f_Group>,
+ Visibility<[ClangOption, CLOption, DXCOption]>, Alias<fno_color_diagnostics>;
def fdiagnostics_color_EQ : Joined<["-"], "fdiagnostics-color=">, Group<f_Group>;
def fansi_escape_codes : Flag<["-"], "fansi-escape-codes">, Group<f_Group>,
- Flags<[CoreOption, CC1Option]>, HelpText<"Use ANSI escape codes for diagnostics">,
+ Visibility<[ClangOption, CLOption, DXCOption, CC1Option]>,
+ HelpText<"Use ANSI escape codes for diagnostics">,
MarshallingInfoFlag<DiagnosticOpts<"UseANSIEscapeCodes">>;
-def fcomment_block_commands : CommaJoined<["-"], "fcomment-block-commands=">, Group<f_clang_Group>, Flags<[CC1Option]>,
+def fcomment_block_commands : CommaJoined<["-"], "fcomment-block-commands=">, Group<f_clang_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Treat each comma separated argument in <arg> as a documentation comment block command">,
MetaVarName<"<arg>">, MarshallingInfoStringVector<LangOpts<"CommentOpts.BlockCommandNames">>;
-def fparse_all_comments : Flag<["-"], "fparse-all-comments">, Group<f_clang_Group>, Flags<[CC1Option]>,
+defm define_target_os_macros : OptInCC1FFlag<"define-target-os-macros",
+ "Enable", "Disable", " predefined target OS macros",
+ [ClangOption, CC1Option]>;
+def fparse_all_comments : Flag<["-"], "fparse-all-comments">, Group<f_clang_Group>,
+ Visibility<[ClangOption, CC1Option]>,
MarshallingInfoFlag<LangOpts<"CommentOpts.ParseAllComments">>;
def frecord_command_line : Flag<["-"], "frecord-command-line">,
+ DocBrief<[{Generate a section named ".GCC.command.line" containing the clang
+driver command-line. After linking, the section may contain multiple command
+lines, which will be individually terminated by null bytes. Separate arguments
+within a command line are combined with spaces; spaces and backslashes within an
+argument are escaped with backslashes. This format differs from the format of
+the equivalent section produced by GCC with the -frecord-gcc-switches flag.
+This option is currently only supported on ELF targets.}]>,
Group<f_clang_Group>;
def fno_record_command_line : Flag<["-"], "fno-record-command-line">,
Group<f_clang_Group>;
def : Flag<["-"], "frecord-gcc-switches">, Alias<frecord_command_line>;
def : Flag<["-"], "fno-record-gcc-switches">, Alias<fno_record_command_line>;
def fcommon : Flag<["-"], "fcommon">, Group<f_Group>,
- Flags<[CoreOption, CC1Option]>, HelpText<"Place uninitialized global variables in a common block">,
- MarshallingInfoNegativeFlag<CodeGenOpts<"NoCommon">>;
+ Visibility<[ClangOption, CLOption, CC1Option]>,
+ HelpText<"Place uninitialized global variables in a common block">,
+ MarshallingInfoNegativeFlag<CodeGenOpts<"NoCommon">>,
+ DocBrief<[{Place definitions of variables with no storage class and no initializer
+(tentative definitions) in a common block, instead of generating individual
+zero-initialized definitions (default -fno-common).}]>;
def fcompile_resource_EQ : Joined<["-"], "fcompile-resource=">, Group<f_Group>;
defm complete_member_pointers : BoolOption<"f", "complete-member-pointers",
LangOpts<"CompleteMemberPointers">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Require">, NegFlag<SetFalse, [], "Do not require">,
- BothFlags<[CoreOption], " member pointer base types to be complete if they"
+ PosFlag<SetTrue, [], [ClangOption, CC1Option], "Require">,
+ NegFlag<SetFalse, [], [ClangOption], "Do not require">,
+ BothFlags<[], [ClangOption, CLOption],
+ " member pointer base types to be complete if they"
" would be significant under the Microsoft ABI">>,
Group<f_clang_Group>;
def fcf_runtime_abi_EQ : Joined<["-"], "fcf-runtime-abi=">, Group<f_Group>,
- Flags<[CC1Option]>, Values<"unspecified,standalone,objc,swift,swift-5.0,swift-4.2,swift-4.1">,
+ Visibility<[ClangOption, CC1Option]>,
+ Values<"unspecified,standalone,objc,swift,swift-5.0,swift-4.2,swift-4.1">,
NormalizedValuesScope<"LangOptions::CoreFoundationABI">,
NormalizedValues<["ObjectiveC", "ObjectiveC", "ObjectiveC", "Swift5_0", "Swift5_0", "Swift4_2", "Swift4_1"]>,
MarshallingInfoEnum<LangOpts<"CFRuntime">, "ObjectiveC">;
defm constant_cfstrings : BoolFOption<"constant-cfstrings",
LangOpts<"NoConstantCFStrings">, DefaultFalse,
- NegFlag<SetTrue, [CC1Option], "Disable creation of CodeFoundation-type constant strings">,
+ NegFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Disable creation of CodeFoundation-type constant strings">,
PosFlag<SetFalse>>;
def fconstant_string_class_EQ : Joined<["-"], "fconstant-string-class=">, Group<f_Group>;
-def fconstexpr_depth_EQ : Joined<["-"], "fconstexpr-depth=">, Group<f_Group>;
-def fconstexpr_steps_EQ : Joined<["-"], "fconstexpr-steps=">, Group<f_Group>;
+def fconstexpr_depth_EQ : Joined<["-"], "fconstexpr-depth=">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Set the maximum depth of recursive constexpr function calls">,
+ MarshallingInfoInt<LangOpts<"ConstexprCallDepth">, "512">;
+def fconstexpr_steps_EQ : Joined<["-"], "fconstexpr-steps=">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Set the maximum number of steps in constexpr function evaluation">,
+ MarshallingInfoInt<LangOpts<"ConstexprStepLimit">, "1048576">;
def fexperimental_new_constant_interpreter : Flag<["-"], "fexperimental-new-constant-interpreter">, Group<f_Group>,
- HelpText<"Enable the experimental new constant interpreter">, Flags<[CC1Option]>,
+ HelpText<"Enable the experimental new constant interpreter">,
+ Visibility<[ClangOption, CC1Option]>,
MarshallingInfoFlag<LangOpts<"EnableNewConstInterp">>;
-def fconstexpr_backtrace_limit_EQ : Joined<["-"], "fconstexpr-backtrace-limit=">,
- Group<f_Group>;
-def fno_crash_diagnostics : Flag<["-"], "fno-crash-diagnostics">, Group<f_clang_Group>, Flags<[NoArgumentUnused, CoreOption]>,
+def fconstexpr_backtrace_limit_EQ : Joined<["-"], "fconstexpr-backtrace-limit=">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Set the maximum number of entries to print in a constexpr evaluation backtrace (0 = no limit)">,
+ MarshallingInfoInt<DiagnosticOpts<"ConstexprBacktraceLimit">, "DiagnosticOptions::DefaultConstexprBacktraceLimit">;
+def fcrash_diagnostics_EQ : Joined<["-"], "fcrash-diagnostics=">, Group<f_clang_Group>,
+ Flags<[NoArgumentUnused]>, Visibility<[ClangOption, CLOption, DXCOption]>,
+ HelpText<"Set level of crash diagnostic reporting, (option: off, compiler, all)">;
+def fcrash_diagnostics : Flag<["-"], "fcrash-diagnostics">, Group<f_clang_Group>,
+ Flags<[NoArgumentUnused]>, Visibility<[ClangOption, CLOption, DXCOption]>,
+ HelpText<"Enable crash diagnostic reporting (default)">, Alias<fcrash_diagnostics_EQ>, AliasArgs<["compiler"]>;
+def fno_crash_diagnostics : Flag<["-"], "fno-crash-diagnostics">, Group<f_clang_Group>,
+ Flags<[NoArgumentUnused]>, Visibility<[ClangOption, CLOption, DXCOption]>,
+ Alias<gen_reproducer_eq>, AliasArgs<["off"]>,
HelpText<"Disable auto-generation of preprocessed source files and a script for reproduction during a clang crash">;
def fcrash_diagnostics_dir : Joined<["-"], "fcrash-diagnostics-dir=">,
- Group<f_clang_Group>, Flags<[NoArgumentUnused, CoreOption]>,
+ Group<f_clang_Group>, Flags<[NoArgumentUnused]>,
+ Visibility<[ClangOption, CLOption, DXCOption]>,
HelpText<"Put crash-report files in <dir>">, MetaVarName<"<dir>">;
def fcreate_profile : Flag<["-"], "fcreate-profile">, Group<f_Group>;
defm cxx_exceptions: BoolFOption<"cxx-exceptions",
LangOpts<"CXXExceptions">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Enable C++ exceptions">, NegFlag<SetFalse>>;
+ PosFlag<SetTrue, [], [ClangOption, CC1Option], "Enable C++ exceptions">,
+ NegFlag<SetFalse>>;
defm async_exceptions: BoolFOption<"async-exceptions",
LangOpts<"EHAsynch">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Enable EH Asynchronous exceptions">, NegFlag<SetFalse>>;
-def fcxx_modules : Flag <["-"], "fcxx-modules">, Group<f_Group>,
- Flags<[NoXarchOption]>;
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Enable EH Asynchronous exceptions">,
+ NegFlag<SetFalse>>;
+defm cxx_modules : BoolFOption<"cxx-modules",
+ LangOpts<"CPlusPlusModules">, Default<cpp20.KeyPath>,
+ NegFlag<SetFalse, [], [ClangOption, CC1Option], "Disable">,
+ PosFlag<SetTrue, [], [ClangOption], "Enable">,
+ BothFlags<[], [], " modules for C++">>,
+ ShouldParseIf<cplusplus.KeyPath>;
def fdebug_pass_arguments : Flag<["-"], "fdebug-pass-arguments">, Group<f_Group>;
def fdebug_pass_structure : Flag<["-"], "fdebug-pass-structure">, Group<f_Group>;
def fdepfile_entry : Joined<["-"], "fdepfile-entry=">,
- Group<f_clang_Group>, Flags<[CC1Option]>;
+ Group<f_clang_Group>, Visibility<[ClangOption, CC1Option]>;
def fdiagnostics_fixit_info : Flag<["-"], "fdiagnostics-fixit-info">, Group<f_clang_Group>;
def fno_diagnostics_fixit_info : Flag<["-"], "fno-diagnostics-fixit-info">, Group<f_Group>,
- Flags<[CC1Option]>, HelpText<"Do not include fixit information in diagnostics">,
+ Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Do not include fixit information in diagnostics">,
MarshallingInfoNegativeFlag<DiagnosticOpts<"ShowFixits">>;
def fdiagnostics_parseable_fixits : Flag<["-"], "fdiagnostics-parseable-fixits">, Group<f_clang_Group>,
- Flags<[CoreOption, CC1Option]>, HelpText<"Print fix-its in machine parseable form">,
+ Visibility<[ClangOption, CLOption, DXCOption, CC1Option]>,
+ HelpText<"Print fix-its in machine parseable form">,
MarshallingInfoFlag<DiagnosticOpts<"ShowParseableFixits">>;
def fdiagnostics_print_source_range_info : Flag<["-"], "fdiagnostics-print-source-range-info">,
- Group<f_clang_Group>, Flags<[CC1Option]>,
+ Group<f_clang_Group>, Visibility<[ClangOption, CC1Option]>,
HelpText<"Print source range spans in numeric form">,
MarshallingInfoFlag<DiagnosticOpts<"ShowSourceRanges">>;
defm diagnostics_show_hotness : BoolFOption<"diagnostics-show-hotness",
CodeGenOpts<"DiagnosticsWithHotness">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Enable profile hotness information in diagnostic line">,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Enable profile hotness information in diagnostic line">,
NegFlag<SetFalse>>;
def fdiagnostics_hotness_threshold_EQ : Joined<["-"], "fdiagnostics-hotness-threshold=">,
- Group<f_Group>, Flags<[CC1Option]>, MetaVarName<"<value>">,
+ Group<f_Group>, Visibility<[ClangOption, CC1Option]>,
+ MetaVarName<"<value>">,
HelpText<"Prevent optimization remarks from being output if they do not have at least this profile count. "
"Use 'auto' to apply the threshold from profile summary">;
+def fdiagnostics_misexpect_tolerance_EQ : Joined<["-"], "fdiagnostics-misexpect-tolerance=">,
+ Group<f_Group>, Visibility<[ClangOption, CC1Option]>,
+ MetaVarName<"<value>">,
+ HelpText<"Prevent misexpect diagnostics from being output if the profile counts are within N% of the expected. ">;
defm diagnostics_show_option : BoolFOption<"diagnostics-show-option",
DiagnosticOpts<"ShowOptionNames">, DefaultTrue,
- NegFlag<SetFalse, [CC1Option]>, PosFlag<SetTrue, [], "Print option name with mappable diagnostics">>;
+ NegFlag<SetFalse, [], [ClangOption, CC1Option]>,
+ PosFlag<SetTrue, [], [ClangOption], "Print option name with mappable diagnostics">>;
defm diagnostics_show_note_include_stack : BoolFOption<"diagnostics-show-note-include-stack",
DiagnosticOpts<"ShowNoteIncludeStack">, DefaultFalse,
- PosFlag<SetTrue, [], "Display include stacks for diagnostic notes">,
- NegFlag<SetFalse>, BothFlags<[CC1Option]>>;
+ PosFlag<SetTrue, [], [ClangOption], "Display include stacks for diagnostic notes">,
+ NegFlag<SetFalse>, BothFlags<[], [ClangOption, CC1Option]>>;
def fdiagnostics_format_EQ : Joined<["-"], "fdiagnostics-format=">, Group<f_clang_Group>;
def fdiagnostics_show_category_EQ : Joined<["-"], "fdiagnostics-show-category=">, Group<f_clang_Group>;
def fdiagnostics_show_template_tree : Flag<["-"], "fdiagnostics-show-template-tree">,
- Group<f_Group>, Flags<[CC1Option]>,
+ Group<f_Group>, Visibility<[ClangOption, CC1Option]>,
HelpText<"Print a template comparison tree for differing templates">,
MarshallingInfoFlag<DiagnosticOpts<"ShowTemplateTree">>;
-def fdiscard_value_names : Flag<["-"], "fdiscard-value-names">, Group<f_clang_Group>,
- HelpText<"Discard value names in LLVM IR">, Flags<[NoXarchOption]>;
-def fno_discard_value_names : Flag<["-"], "fno-discard-value-names">, Group<f_clang_Group>,
- HelpText<"Do not discard value names in LLVM IR">, Flags<[NoXarchOption]>;
+defm safe_buffer_usage_suggestions : BoolFOption<"safe-buffer-usage-suggestions",
+ DiagnosticOpts<"ShowSafeBufferUsageSuggestions">, DefaultFalse,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Display suggestions to update code associated with -Wunsafe-buffer-usage warnings">,
+ NegFlag<SetFalse>>;
+def fverify_intermediate_code : Flag<["-"], "fverify-intermediate-code">,
+ Group<f_clang_Group>, Visibility<[ClangOption, CLOption, DXCOption]>,
+ HelpText<"Enable verification of LLVM IR">;
+def fno_verify_intermediate_code : Flag<["-"], "fno-verify-intermediate-code">,
+ Group<f_clang_Group>, Visibility<[ClangOption, CLOption, DXCOption]>,
+ HelpText<"Disable verification of LLVM IR">;
+def fdiscard_value_names : Flag<["-"], "fdiscard-value-names">,
+ Group<f_clang_Group>, Visibility<[ClangOption, DXCOption]>,
+ HelpText<"Discard value names in LLVM IR">;
+def fno_discard_value_names : Flag<["-"], "fno-discard-value-names">,
+ Group<f_clang_Group>, Visibility<[ClangOption, DXCOption]>,
+ HelpText<"Do not discard value names in LLVM IR">;
defm dollars_in_identifiers : BoolFOption<"dollars-in-identifiers",
LangOpts<"DollarIdents">, Default<!strconcat("!", asm_preprocessor.KeyPath)>,
- PosFlag<SetTrue, [], "Allow">, NegFlag<SetFalse, [], "Disallow">,
- BothFlags<[CC1Option], " '$' in identifiers">>;
+ PosFlag<SetTrue, [], [ClangOption], "Allow">,
+ NegFlag<SetFalse, [], [ClangOption], "Disallow">,
+ BothFlags<[], [ClangOption, CC1Option], " '$' in identifiers">>;
def fdwarf2_cfi_asm : Flag<["-"], "fdwarf2-cfi-asm">, Group<clang_ignored_f_Group>;
def fno_dwarf2_cfi_asm : Flag<["-"], "fno-dwarf2-cfi-asm">, Group<clang_ignored_f_Group>;
defm dwarf_directory_asm : BoolFOption<"dwarf-directory-asm",
CodeGenOpts<"NoDwarfDirectoryAsm">, DefaultFalse,
- NegFlag<SetTrue, [CC1Option]>, PosFlag<SetFalse>>;
+ NegFlag<SetTrue, [], [ClangOption, CC1Option]>,
+ PosFlag<SetFalse>>;
defm elide_constructors : BoolFOption<"elide-constructors",
LangOpts<"ElideConstructors">, DefaultTrue,
- NegFlag<SetFalse, [CC1Option], "Disable C++ copy constructor elision">,
+ NegFlag<SetFalse, [], [ClangOption, CC1Option],
+ "Disable C++ copy constructor elision">,
PosFlag<SetTrue>>;
def fno_elide_type : Flag<["-"], "fno-elide-type">, Group<f_Group>,
- Flags<[CC1Option]>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Do not elide types when printing diagnostics">,
MarshallingInfoNegativeFlag<DiagnosticOpts<"ElideType">>;
def feliminate_unused_debug_symbols : Flag<["-"], "feliminate-unused-debug-symbols">, Group<f_Group>;
-defm eliminate_unused_debug_types : OptOutFFlag<"eliminate-unused-debug-types",
+defm eliminate_unused_debug_types : OptOutCC1FFlag<"eliminate-unused-debug-types",
"Do not emit ", "Emit ", " debug info for defined but unused types">;
-def femit_all_decls : Flag<["-"], "femit-all-decls">, Group<f_Group>, Flags<[CC1Option]>,
+def femit_all_decls : Flag<["-"], "femit-all-decls">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Emit all declarations, even if unused">,
MarshallingInfoFlag<LangOpts<"EmitAllDecls">>;
defm emulated_tls : BoolFOption<"emulated-tls",
CodeGenOpts<"EmulatedTLS">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Use emutls functions to access thread_local variables">,
- NegFlag<SetFalse>, BothFlags<[CC1Option]>>;
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Use emutls functions to access thread_local variables">,
+ NegFlag<SetFalse>>;
def fencoding_EQ : Joined<["-"], "fencoding=">, Group<f_Group>;
-def ferror_limit_EQ : Joined<["-"], "ferror-limit=">, Group<f_Group>, Flags<[CoreOption]>;
+def ferror_limit_EQ : Joined<["-"], "ferror-limit=">, Group<f_Group>,
+ Visibility<[ClangOption, CLOption, DXCOption]>;
defm exceptions : BoolFOption<"exceptions",
LangOpts<"Exceptions">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Enable">, NegFlag<SetFalse, [], "Disable">,
- BothFlags<[], " support for exception handling">>;
+ PosFlag<SetTrue, [], [ClangOption, CC1Option], "Enable">,
+ NegFlag<SetFalse, [], [ClangOption], "Disable">,
+ BothFlags<[], [ClangOption], " support for exception handling">>;
def fdwarf_exceptions : Flag<["-"], "fdwarf-exceptions">, Group<f_Group>,
HelpText<"Use DWARF style exceptions">;
def fsjlj_exceptions : Flag<["-"], "fsjlj-exceptions">, Group<f_Group>,
@@ -1413,97 +2054,160 @@ def fseh_exceptions : Flag<["-"], "fseh-exceptions">, Group<f_Group>,
def fwasm_exceptions : Flag<["-"], "fwasm-exceptions">, Group<f_Group>,
HelpText<"Use WebAssembly style exceptions">;
def exception_model : Separate<["-"], "exception-model">,
- Flags<[CC1Option, NoDriverOption]>, HelpText<"The exception model: dwarf|sjlj|seh|wasm">,
+ Visibility<[CC1Option]>, HelpText<"The exception model">,
Values<"dwarf,sjlj,seh,wasm">,
NormalizedValuesScope<"LangOptions::ExceptionHandlingKind">,
NormalizedValues<["DwarfCFI", "SjLj", "WinEH", "Wasm"]>,
MarshallingInfoEnum<LangOpts<"ExceptionHandling">, "None">;
def exception_model_EQ : Joined<["-"], "exception-model=">,
- Flags<[CC1Option, NoDriverOption]>, Alias<exception_model>;
-def fignore_exceptions : Flag<["-"], "fignore-exceptions">, Group<f_Group>, Flags<[CC1Option]>,
+ Visibility<[CC1Option]>, Alias<exception_model>;
+def fignore_exceptions : Flag<["-"], "fignore-exceptions">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Enable support for ignoring exception handling constructs">,
MarshallingInfoFlag<LangOpts<"IgnoreExceptions">>;
-def fexcess_precision_EQ : Joined<["-"], "fexcess-precision=">,
- Group<clang_ignored_gcc_optimization_f_Group>;
+defm assume_nothrow_exception_dtor: BoolFOption<"assume-nothrow-exception-dtor",
+ LangOpts<"AssumeNothrowExceptionDtor">, DefaultFalse,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option], "Assume that exception objects' destructors are non-throwing">,
+ NegFlag<SetFalse>>;
+def fexcess_precision_EQ : Joined<["-"], "fexcess-precision=">, Group<f_Group>,
+ Visibility<[ClangOption, CLOption]>,
+ HelpText<"Allows control over excess precision on targets where native "
+ "support for the precision types is not available. By default, excess "
+ "precision is used to calculate intermediate results following the "
+ "rules specified in ISO C99.">,
+ Values<"standard,fast,none">, NormalizedValuesScope<"LangOptions">,
+ NormalizedValues<["FPP_Standard", "FPP_Fast", "FPP_None"]>;
+def ffloat16_excess_precision_EQ : Joined<["-"], "ffloat16-excess-precision=">,
+ Group<f_Group>, Visibility<[CC1Option]>,
+ HelpText<"Allows control over excess precision on targets where native "
+ "support for Float16 precision types is not available. By default, excess "
+ "precision is used to calculate intermediate results following the "
+ "rules specified in ISO C99.">,
+ Values<"standard,fast,none">, NormalizedValuesScope<"LangOptions">,
+ NormalizedValues<["FPP_Standard", "FPP_Fast", "FPP_None"]>,
+ MarshallingInfoEnum<LangOpts<"Float16ExcessPrecision">, "FPP_Standard">;
+def fbfloat16_excess_precision_EQ : Joined<["-"], "fbfloat16-excess-precision=">,
+ Group<f_Group>, Visibility<[CC1Option]>,
+ HelpText<"Allows control over excess precision on targets where native "
+ "support for BFloat16 precision types is not available. By default, excess "
+ "precision is used to calculate intermediate results following the "
+ "rules specified in ISO C99.">,
+ Values<"standard,fast,none">, NormalizedValuesScope<"LangOptions">,
+ NormalizedValues<["FPP_Standard", "FPP_Fast", "FPP_None"]>,
+ MarshallingInfoEnum<LangOpts<"BFloat16ExcessPrecision">, "FPP_Standard">;
def : Flag<["-"], "fexpensive-optimizations">, Group<clang_ignored_gcc_optimization_f_Group>;
def : Flag<["-"], "fno-expensive-optimizations">, Group<clang_ignored_gcc_optimization_f_Group>;
def fextdirs_EQ : Joined<["-"], "fextdirs=">, Group<f_Group>;
def : Flag<["-"], "fdefer-pop">, Group<clang_ignored_gcc_optimization_f_Group>;
def : Flag<["-"], "fno-defer-pop">, Group<clang_ignored_gcc_optimization_f_Group>;
def : Flag<["-"], "fextended-identifiers">, Group<clang_ignored_f_Group>;
-def : Flag<["-"], "fno-extended-identifiers">, Group<f_Group>, Flags<[Unsupported]>;
+def : Flag<["-"], "fno-extended-identifiers">, Group<f_Group>,
+ Flags<[Unsupported]>;
def fhosted : Flag<["-"], "fhosted">, Group<f_Group>;
-def fdenormal_fp_math_EQ : Joined<["-"], "fdenormal-fp-math=">, Group<f_Group>, Flags<[CC1Option]>;
-def ffp_model_EQ : Joined<["-"], "ffp-model=">, Group<f_Group>, Flags<[NoXarchOption]>,
+def fdenormal_fp_math_EQ : Joined<["-"], "fdenormal-fp-math=">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>;
+def ffile_reproducible : Flag<["-"], "ffile-reproducible">, Group<f_Group>,
+ Visibility<[ClangOption, CLOption, CC1Option]>,
+ HelpText<"Use the target's platform-specific path separator character when "
+ "expanding the __FILE__ macro">;
+def fno_file_reproducible : Flag<["-"], "fno-file-reproducible">,
+ Group<f_Group>, Visibility<[ClangOption, CLOption, CC1Option]>,
+ HelpText<"Use the host's platform-specific path separator character when "
+ "expanding the __FILE__ macro">;
+def ffp_eval_method_EQ : Joined<["-"], "ffp-eval-method=">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Specifies the evaluation method to use for floating-point arithmetic.">,
+ Values<"source,double,extended">, NormalizedValuesScope<"LangOptions">,
+ NormalizedValues<["FEM_Source", "FEM_Double", "FEM_Extended"]>,
+ MarshallingInfoEnum<LangOpts<"FPEvalMethod">, "FEM_UnsetOnCommandLine">;
+def ffp_model_EQ : Joined<["-"], "ffp-model=">, Group<f_Group>,
HelpText<"Controls the semantics of floating-point calculations.">;
-def ffp_exception_behavior_EQ : Joined<["-"], "ffp-exception-behavior=">, Group<f_Group>, Flags<[CC1Option]>,
+def ffp_exception_behavior_EQ : Joined<["-"], "ffp-exception-behavior=">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Specifies the exception behavior of floating-point operations.">,
Values<"ignore,maytrap,strict">, NormalizedValuesScope<"LangOptions">,
NormalizedValues<["FPE_Ignore", "FPE_MayTrap", "FPE_Strict"]>,
- MarshallingInfoEnum<LangOpts<"FPExceptionMode">, "FPE_Ignore">;
+ MarshallingInfoEnum<LangOpts<"FPExceptionMode">, "FPE_Default">;
defm fast_math : BoolFOption<"fast-math",
LangOpts<"FastMath">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Allow aggressive, lossy floating-point optimizations",
+ PosFlag<SetTrue, [], [ClangOption, CC1Option, FC1Option, FlangOption],
+ "Allow aggressive, lossy floating-point optimizations",
[cl_fast_relaxed_math.KeyPath]>,
- NegFlag<SetFalse>>;
-def menable_unsafe_fp_math : Flag<["-"], "menable-unsafe-fp-math">, Flags<[CC1Option]>,
- HelpText<"Allow unsafe floating-point math optimizations which may decrease precision">,
- MarshallingInfoFlag<LangOpts<"UnsafeFPMath">>,
- ImpliedByAnyOf<[cl_unsafe_math_optimizations.KeyPath, ffast_math.KeyPath]>;
+ NegFlag<SetFalse, [], [ClangOption, CC1Option, FC1Option, FlangOption]>>;
defm math_errno : BoolFOption<"math-errno",
LangOpts<"MathErrno">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Require math functions to indicate errors by setting errno">,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Require math functions to indicate errors by setting errno">,
NegFlag<SetFalse>>,
ShouldParseIf<!strconcat("!", open_cl.KeyPath)>;
def fextend_args_EQ : Joined<["-"], "fextend-arguments=">, Group<f_Group>,
- Flags<[CC1Option, NoArgumentUnused]>,
+ Flags<[NoArgumentUnused]>, Visibility<[ClangOption, CC1Option]>,
HelpText<"Controls how scalar integer arguments are extended in calls "
"to unprototyped and varargs functions">,
Values<"32,64">,
NormalizedValues<["ExtendTo32", "ExtendTo64"]>,
NormalizedValuesScope<"LangOptions::ExtendArgsKind">,
MarshallingInfoEnum<LangOpts<"ExtendIntArgs">,"ExtendTo32">;
-def fbracket_depth_EQ : Joined<["-"], "fbracket-depth=">, Group<f_Group>, Flags<[CoreOption]>;
+def fbracket_depth_EQ : Joined<["-"], "fbracket-depth=">, Group<f_Group>,
+ Visibility<[ClangOption, CLOption]>;
def fsignaling_math : Flag<["-"], "fsignaling-math">, Group<f_Group>;
def fno_signaling_math : Flag<["-"], "fno-signaling-math">, Group<f_Group>;
defm jump_tables : BoolFOption<"jump-tables",
CodeGenOpts<"NoUseJumpTables">, DefaultFalse,
- NegFlag<SetTrue, [CC1Option], "Do not use">, PosFlag<SetFalse, [], "Use">,
- BothFlags<[], " jump tables for lowering switches">>;
+ NegFlag<SetTrue, [], [ClangOption, CC1Option], "Do not use">,
+ PosFlag<SetFalse, [], [ClangOption], "Use">,
+ BothFlags<[], [ClangOption], " jump tables for lowering switches">>;
defm force_enable_int128 : BoolFOption<"force-enable-int128",
TargetOpts<"ForceEnableInt128">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Enable">, NegFlag<SetFalse, [], "Disable">,
- BothFlags<[], " support for int128_t type">>;
+ PosFlag<SetTrue, [], [ClangOption, CC1Option], "Enable">,
+ NegFlag<SetFalse, [], [ClangOption], "Disable">,
+ BothFlags<[], [ClangOption], " support for int128_t type">>;
defm keep_static_consts : BoolFOption<"keep-static-consts",
CodeGenOpts<"KeepStaticConsts">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Keep">, NegFlag<SetFalse, [], "Don't keep">,
- BothFlags<[NoXarchOption], " static const variables if unused">>;
+ PosFlag<SetTrue, [], [ClangOption, CC1Option], "Keep">,
+ NegFlag<SetFalse, [], [ClangOption], "Don't keep">,
+ BothFlags<[], [], " static const variables even if unused">>;
+defm keep_persistent_storage_variables : BoolFOption<"keep-persistent-storage-variables",
+ CodeGenOpts<"KeepPersistentStorageVariables">, DefaultFalse,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option], "Enable">,
+ NegFlag<SetFalse, [], [ClangOption], "Disable">,
+ BothFlags<[], [],
+ " keeping all variables that have a persistent storage duration, including global, static and thread-local variables, to guarantee that they can be directly addressed">>;
defm fixed_point : BoolFOption<"fixed-point",
LangOpts<"FixedPoint">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Enable">, NegFlag<SetFalse, [], "Disable">,
- BothFlags<[], " fixed point types">>, ShouldParseIf<!strconcat("!", cplusplus.KeyPath)>;
+ PosFlag<SetTrue, [], [ClangOption, CC1Option], "Enable">,
+ NegFlag<SetFalse, [], [ClangOption], "Disable">,
+ BothFlags<[], [ClangOption], " fixed point types">>;
defm cxx_static_destructors : BoolFOption<"c++-static-destructors",
LangOpts<"RegisterStaticDestructors">, DefaultTrue,
- NegFlag<SetFalse, [CC1Option], "Disable C++ static destructor registration">,
+ NegFlag<SetFalse, [], [ClangOption, CC1Option],
+ "Disable C++ static destructor registration">,
PosFlag<SetTrue>>;
def fsymbol_partition_EQ : Joined<["-"], "fsymbol-partition=">, Group<f_Group>,
- Flags<[CC1Option]>, MarshallingInfoString<CodeGenOpts<"SymbolPartition">>;
+ Visibility<[ClangOption, CC1Option]>,
+ MarshallingInfoString<CodeGenOpts<"SymbolPartition">>;
-defm memory_profile : OptInFFlag<"memory-profile", "Enable", "Disable", " heap memory profiling">;
+defm memory_profile : OptInCC1FFlag<"memory-profile", "Enable", "Disable", " heap memory profiling">;
def fmemory_profile_EQ : Joined<["-"], "fmemory-profile=">,
- Group<f_Group>, Flags<[CC1Option]>, MetaVarName<"<directory>">,
+ Group<f_Group>, Visibility<[ClangOption, CC1Option]>,
+ MetaVarName<"<directory>">,
HelpText<"Enable heap memory profiling and dump results into <directory>">;
+def fmemory_profile_use_EQ : Joined<["-"], "fmemory-profile-use=">,
+ Group<f_Group>, Visibility<[ClangOption, CC1Option, CLOption]>,
+ MetaVarName<"<pathname>">,
+ HelpText<"Use memory profile for profile-guided memory optimization">,
+ MarshallingInfoString<CodeGenOpts<"MemoryProfileUsePath">>;
// Begin sanitizer flags. These should all be core options exposed in all driver
// modes.
-let Flags = [CC1Option, CoreOption] in {
+let Visibility = [ClangOption, CC1Option, CLOption] in {
def fsanitize_EQ : CommaJoined<["-"], "fsanitize=">, Group<f_clang_Group>,
MetaVarName<"<check>">,
HelpText<"Turn on runtime checks for various forms of undefined "
"or suspicious behavior. See user manual for available checks">;
def fno_sanitize_EQ : CommaJoined<["-"], "fno-sanitize=">, Group<f_clang_Group>,
- Flags<[CoreOption, NoXarchOption]>;
+ Visibility<[ClangOption, CLOption]>;
def fsanitize_ignorelist_EQ : Joined<["-"], "fsanitize-ignorelist=">,
Group<f_clang_Group>, HelpText<"Path to ignorelist file for sanitizers">;
@@ -1512,10 +2216,8 @@ def : Joined<["-"], "fsanitize-blacklist=">,
HelpText<"Alias for -fsanitize-ignorelist=">;
def fsanitize_system_ignorelist_EQ : Joined<["-"], "fsanitize-system-ignorelist=">,
- HelpText<"Path to system ignorelist file for sanitizers">, Flags<[CC1Option]>;
-def : Joined<["-"], "fsanitize-system-blacklist=">,
- HelpText<"Alias for -fsanitize-system-ignorelist=">,
- Flags<[CC1Option, HelpHidden]>, Alias<fsanitize_system_ignorelist_EQ>;
+ HelpText<"Path to system ignorelist file for sanitizers">,
+ Visibility<[ClangOption, CC1Option]>;
def fno_sanitize_ignorelist : Flag<["-"], "fno-sanitize-ignorelist">,
Group<f_clang_Group>, HelpText<"Don't use ignorelist file for sanitizers">;
@@ -1526,37 +2228,41 @@ def fsanitize_coverage : CommaJoined<["-"], "fsanitize-coverage=">,
Group<f_clang_Group>,
HelpText<"Specify the type of coverage instrumentation for Sanitizers">;
def fno_sanitize_coverage : CommaJoined<["-"], "fno-sanitize-coverage=">,
- Group<f_clang_Group>, Flags<[CoreOption, NoXarchOption]>,
+ Group<f_clang_Group>, Visibility<[ClangOption, CLOption]>,
HelpText<"Disable features of coverage instrumentation for Sanitizers">,
Values<"func,bb,edge,indirect-calls,trace-bb,trace-cmp,trace-div,trace-gep,"
"8bit-counters,trace-pc,trace-pc-guard,no-prune,inline-8bit-counters,"
"inline-bool-flag">;
def fsanitize_coverage_allowlist : Joined<["-"], "fsanitize-coverage-allowlist=">,
- Group<f_clang_Group>, Flags<[CoreOption, NoXarchOption]>,
+ Group<f_clang_Group>, Visibility<[ClangOption, CLOption]>,
HelpText<"Restrict sanitizer coverage instrumentation exclusively to modules and functions that match the provided special case list, except the blocked ones">,
MarshallingInfoStringVector<CodeGenOpts<"SanitizeCoverageAllowlistFiles">>;
-def : Joined<["-"], "fsanitize-coverage-whitelist=">,
- Group<f_clang_Group>, Flags<[CoreOption, HelpHidden]>, Alias<fsanitize_coverage_allowlist>,
- HelpText<"Deprecated, use -fsanitize-coverage-allowlist= instead">;
def fsanitize_coverage_ignorelist : Joined<["-"], "fsanitize-coverage-ignorelist=">,
- Group<f_clang_Group>, Flags<[CoreOption, NoXarchOption]>,
+ Group<f_clang_Group>, Visibility<[ClangOption, CLOption]>,
HelpText<"Disable sanitizer coverage instrumentation for modules and functions "
"that match the provided special case list, even the allowed ones">,
MarshallingInfoStringVector<CodeGenOpts<"SanitizeCoverageIgnorelistFiles">>;
-def : Joined<["-"], "fsanitize-coverage-blacklist=">,
- Group<f_clang_Group>, Flags<[CoreOption, HelpHidden]>,
- Alias<fsanitize_coverage_ignorelist>,
- HelpText<"Deprecated, use -fsanitize-coverage-ignorelist= instead">;
+def fexperimental_sanitize_metadata_EQ : CommaJoined<["-"], "fexperimental-sanitize-metadata=">,
+ Group<f_Group>,
+ HelpText<"Specify the type of metadata to emit for binary analysis sanitizers">;
+def fno_experimental_sanitize_metadata_EQ : CommaJoined<["-"], "fno-experimental-sanitize-metadata=">,
+ Group<f_Group>, Visibility<[ClangOption, CLOption]>,
+ HelpText<"Disable emitting metadata for binary analysis sanitizers">;
+def fexperimental_sanitize_metadata_ignorelist_EQ : Joined<["-"], "fexperimental-sanitize-metadata-ignorelist=">,
+ Group<f_Group>, Visibility<[ClangOption, CLOption]>,
+ HelpText<"Disable sanitizer metadata for modules and functions that match the provided special case list">,
+ MarshallingInfoStringVector<CodeGenOpts<"SanitizeMetadataIgnorelistFiles">>;
def fsanitize_memory_track_origins_EQ : Joined<["-"], "fsanitize-memory-track-origins=">,
Group<f_clang_Group>,
HelpText<"Enable origins tracking in MemorySanitizer">,
MarshallingInfoInt<CodeGenOpts<"SanitizeMemoryTrackOrigins">>;
def fsanitize_memory_track_origins : Flag<["-"], "fsanitize-memory-track-origins">,
Group<f_clang_Group>,
+ Alias<fsanitize_memory_track_origins_EQ>, AliasArgs<["2"]>,
HelpText<"Enable origins tracking in MemorySanitizer">;
def fno_sanitize_memory_track_origins : Flag<["-"], "fno-sanitize-memory-track-origins">,
Group<f_clang_Group>,
- Flags<[CoreOption, NoXarchOption]>,
+ Visibility<[ClangOption, CLOption]>,
HelpText<"Disable origins tracking in MemorySanitizer">;
def fsanitize_address_outline_instrumentation : Flag<["-"], "fsanitize-address-outline-instrumentation">,
Group<f_clang_Group>,
@@ -1564,18 +2270,27 @@ def fsanitize_address_outline_instrumentation : Flag<["-"], "fsanitize-address-o
def fno_sanitize_address_outline_instrumentation : Flag<["-"], "fno-sanitize-address-outline-instrumentation">,
Group<f_clang_Group>,
HelpText<"Use default code inlining logic for the address sanitizer">;
+defm sanitize_stable_abi
+ : OptInCC1FFlag<"sanitize-stable-abi", "Stable ", "Conventional ",
+ "ABI instrumentation for sanitizer runtime. Default: Conventional">;
+
+def fsanitize_memtag_mode_EQ : Joined<["-"], "fsanitize-memtag-mode=">,
+ Group<f_clang_Group>,
+ HelpText<"Set default MTE mode to 'sync' (default) or 'async'">;
def fsanitize_hwaddress_experimental_aliasing
: Flag<["-"], "fsanitize-hwaddress-experimental-aliasing">,
Group<f_clang_Group>,
HelpText<"Enable aliasing mode in HWAddressSanitizer">;
def fno_sanitize_hwaddress_experimental_aliasing
: Flag<["-"], "fno-sanitize-hwaddress-experimental-aliasing">,
- Group<f_clang_Group>, Flags<[CoreOption, NoXarchOption]>,
+ Group<f_clang_Group>,
+ Visibility<[ClangOption, CLOption]>,
HelpText<"Disable aliasing mode in HWAddressSanitizer">;
defm sanitize_memory_use_after_dtor : BoolOption<"f", "sanitize-memory-use-after-dtor",
CodeGenOpts<"SanitizeMemoryUseAfterDtor">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Enable">, NegFlag<SetFalse, [], "Disable">,
- BothFlags<[], " use-after-destroy detection in MemorySanitizer">>,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option], "Enable">,
+ NegFlag<SetFalse, [], [ClangOption], "Disable">,
+ BothFlags<[], [ClangOption], " use-after-destroy detection in MemorySanitizer">>,
Group<f_clang_Group>;
def fsanitize_address_field_padding : Joined<["-"], "fsanitize-address-field-padding=">,
Group<f_clang_Group>,
@@ -1583,14 +2298,16 @@ def fsanitize_address_field_padding : Joined<["-"], "fsanitize-address-field-pad
MarshallingInfoInt<LangOpts<"SanitizeAddressFieldPadding">>;
defm sanitize_address_use_after_scope : BoolOption<"f", "sanitize-address-use-after-scope",
CodeGenOpts<"SanitizeAddressUseAfterScope">, DefaultFalse,
- PosFlag<SetTrue, [], "Enable">, NegFlag<SetFalse, [CoreOption, NoXarchOption], "Disable">,
- BothFlags<[], " use-after-scope detection in AddressSanitizer">>,
+ PosFlag<SetTrue, [], [ClangOption], "Enable">,
+ NegFlag<SetFalse, [], [ClangOption, CLOption],
+ "Disable">,
+ BothFlags<[], [ClangOption], " use-after-scope detection in AddressSanitizer">>,
Group<f_clang_Group>;
def sanitize_address_use_after_return_EQ
: Joined<["-"], "fsanitize-address-use-after-return=">,
MetaVarName<"<mode>">,
- Flags<[CC1Option]>,
- HelpText<"Select the mode of detecting stack use-after-return in AddressSanitizer: never | runtime (default) | always">,
+ Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Select the mode of detecting stack use-after-return in AddressSanitizer">,
Group<f_clang_Group>,
Values<"never,runtime,always">,
NormalizedValuesScope<"llvm::AsanDetectStackUseAfterReturnMode">,
@@ -1598,28 +2315,52 @@ def sanitize_address_use_after_return_EQ
MarshallingInfoEnum<CodeGenOpts<"SanitizeAddressUseAfterReturn">, "Runtime">;
defm sanitize_address_poison_custom_array_cookie : BoolOption<"f", "sanitize-address-poison-custom-array-cookie",
CodeGenOpts<"SanitizeAddressPoisonCustomArrayCookie">, DefaultFalse,
- PosFlag<SetTrue, [], "Enable">, NegFlag<SetFalse, [], "Disable">,
- BothFlags<[], " poisoning array cookies when using custom operator new[] in AddressSanitizer">>,
+ PosFlag<SetTrue, [], [ClangOption], "Enable">,
+ NegFlag<SetFalse, [], [ClangOption], "Disable">,
+ BothFlags<[], [ClangOption], " poisoning array cookies when using custom operator new[] in AddressSanitizer">>,
+ DocBrief<[{Enable "poisoning" array cookies when allocating arrays with a
+custom operator new\[\] in Address Sanitizer, preventing accesses to the
+cookies from user code. An array cookie is a small implementation-defined
+header added to certain array allocations to record metadata such as the
+length of the array. Accesses to array cookies from user code are technically
+allowed by the standard but are more likely to be the result of an
+out-of-bounds array access.
+
+An operator new\[\] is "custom" if it is not one of the allocation functions
+provided by the C++ standard library. Array cookies from non-custom allocation
+functions are always poisoned.}]>,
+ Group<f_clang_Group>;
+defm sanitize_address_globals_dead_stripping : BoolOption<"f", "sanitize-address-globals-dead-stripping",
+ CodeGenOpts<"SanitizeAddressGlobalsDeadStripping">, DefaultFalse,
+ PosFlag<SetTrue, [], [ClangOption], "Enable linker dead stripping of globals in AddressSanitizer">,
+ NegFlag<SetFalse, [], [ClangOption], "Disable linker dead stripping of globals in AddressSanitizer">>,
Group<f_clang_Group>;
-def fsanitize_address_globals_dead_stripping : Flag<["-"], "fsanitize-address-globals-dead-stripping">,
- Group<f_clang_Group>, HelpText<"Enable linker dead stripping of globals in AddressSanitizer">,
- MarshallingInfoFlag<CodeGenOpts<"SanitizeAddressGlobalsDeadStripping">, "false">;
defm sanitize_address_use_odr_indicator : BoolOption<"f", "sanitize-address-use-odr-indicator",
- CodeGenOpts<"SanitizeAddressUseOdrIndicator">, DefaultFalse,
- PosFlag<SetTrue, [], "Enable ODR indicator globals to avoid false ODR violation"
+ CodeGenOpts<"SanitizeAddressUseOdrIndicator">, DefaultTrue,
+ PosFlag<SetTrue, [], [ClangOption], "Enable ODR indicator globals to avoid false ODR violation"
" reports in partially sanitized programs at the cost of an increase in binary size">,
- NegFlag<SetFalse, [], "Disable ODR indicator globals">>,
+ NegFlag<SetFalse, [], [ClangOption], "Disable ODR indicator globals">>,
Group<f_clang_Group>;
def sanitize_address_destructor_EQ
: Joined<["-"], "fsanitize-address-destructor=">,
- Flags<[CC1Option]>,
- HelpText<"Set destructor type used in ASan instrumentation">,
+ Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Set the kind of module destructors emitted by "
+ "AddressSanitizer instrumentation. These destructors are "
+ "emitted to unregister instrumented global variables when "
+ "code is unloaded (e.g. via `dlclose()`).">,
Group<f_clang_Group>,
Values<"none,global">,
NormalizedValuesScope<"llvm::AsanDtorKind">,
NormalizedValues<["None", "Global"]>,
MarshallingInfoEnum<CodeGenOpts<"SanitizeAddressDtor">, "Global">;
-// Note: This flag was introduced when it was necessary to distinguish between
+defm sanitize_memory_param_retval
+ : BoolFOption<"sanitize-memory-param-retval",
+ CodeGenOpts<"SanitizeMemoryParamRetval">,
+ DefaultTrue,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option], "Enable">,
+ NegFlag<SetFalse, [], [ClangOption], "Disable">,
+ BothFlags<[], [ClangOption], " detection of uninitialized parameters and return values">>;
+//// Note: This flag was introduced when it was necessary to distinguish between
// ABI for correct codegen. This is no longer needed, but the flag is
// not removed since targeting either ABI will behave the same.
// This way we cause no disturbance to existing scripts & code, and if we
@@ -1633,24 +2374,26 @@ def fsanitize_recover_EQ : CommaJoined<["-"], "fsanitize-recover=">,
Group<f_clang_Group>,
HelpText<"Enable recovery for specified sanitizers">;
def fno_sanitize_recover_EQ : CommaJoined<["-"], "fno-sanitize-recover=">,
- Group<f_clang_Group>, Flags<[CoreOption, NoXarchOption]>,
+ Group<f_clang_Group>,
+ Visibility<[ClangOption, CLOption]>,
HelpText<"Disable recovery for specified sanitizers">;
def fsanitize_recover : Flag<["-"], "fsanitize-recover">, Group<f_clang_Group>,
Alias<fsanitize_recover_EQ>, AliasArgs<["all"]>;
def fno_sanitize_recover : Flag<["-"], "fno-sanitize-recover">,
- Flags<[CoreOption, NoXarchOption]>, Group<f_clang_Group>,
+ Visibility<[ClangOption, CLOption]>,
+ Group<f_clang_Group>,
Alias<fno_sanitize_recover_EQ>, AliasArgs<["all"]>;
def fsanitize_trap_EQ : CommaJoined<["-"], "fsanitize-trap=">, Group<f_clang_Group>,
HelpText<"Enable trapping for specified sanitizers">;
def fno_sanitize_trap_EQ : CommaJoined<["-"], "fno-sanitize-trap=">, Group<f_clang_Group>,
- Flags<[CoreOption, NoXarchOption]>,
+ Visibility<[ClangOption, CLOption]>,
HelpText<"Disable trapping for specified sanitizers">;
def fsanitize_trap : Flag<["-"], "fsanitize-trap">, Group<f_clang_Group>,
Alias<fsanitize_trap_EQ>, AliasArgs<["all"]>,
HelpText<"Enable trapping for all sanitizers">;
def fno_sanitize_trap : Flag<["-"], "fno-sanitize-trap">, Group<f_clang_Group>,
Alias<fno_sanitize_trap_EQ>, AliasArgs<["all"]>,
- Flags<[CoreOption, NoXarchOption]>,
+ Visibility<[ClangOption, CLOption]>,
HelpText<"Disable trapping for all sanitizers">;
def fsanitize_undefined_trap_on_error
: Flag<["-"], "fsanitize-undefined-trap-on-error">, Group<f_clang_Group>,
@@ -1660,7 +2403,8 @@ def fno_sanitize_undefined_trap_on_error
Alias<fno_sanitize_trap_EQ>, AliasArgs<["undefined"]>;
defm sanitize_minimal_runtime : BoolOption<"f", "sanitize-minimal-runtime",
CodeGenOpts<"SanitizeMinimalRuntime">, DefaultFalse,
- PosFlag<SetTrue>, NegFlag<SetFalse>>,
+ PosFlag<SetTrue>,
+ NegFlag<SetFalse>>,
Group<f_clang_Group>;
def fsanitize_link_runtime : Flag<["-"], "fsanitize-link-runtime">,
Group<f_clang_Group>;
@@ -1672,43 +2416,53 @@ def fno_sanitize_link_cxx_runtime : Flag<["-"], "fno-sanitize-link-c++-runtime">
Group<f_clang_Group>;
defm sanitize_cfi_cross_dso : BoolOption<"f", "sanitize-cfi-cross-dso",
CodeGenOpts<"SanitizeCfiCrossDso">, DefaultFalse,
- PosFlag<SetTrue, [], "Enable">, NegFlag<SetFalse, [CoreOption, NoXarchOption], "Disable">,
- BothFlags<[], " control flow integrity (CFI) checks for cross-DSO calls.">>,
+ PosFlag<SetTrue, [], [ClangOption], "Enable">,
+ NegFlag<SetFalse, [], [ClangOption, CLOption],
+ "Disable">,
+ BothFlags<[], [ClangOption], " control flow integrity (CFI) checks for cross-DSO calls.">>,
Group<f_clang_Group>;
def fsanitize_cfi_icall_generalize_pointers : Flag<["-"], "fsanitize-cfi-icall-generalize-pointers">,
Group<f_clang_Group>,
HelpText<"Generalize pointers in CFI indirect call type signature checks">,
MarshallingInfoFlag<CodeGenOpts<"SanitizeCfiICallGeneralizePointers">>;
+def fsanitize_cfi_icall_normalize_integers : Flag<["-"], "fsanitize-cfi-icall-experimental-normalize-integers">,
+ Group<f_clang_Group>,
+ HelpText<"Normalize integers in CFI indirect call type signature checks">,
+ MarshallingInfoFlag<CodeGenOpts<"SanitizeCfiICallNormalizeIntegers">>;
defm sanitize_cfi_canonical_jump_tables : BoolOption<"f", "sanitize-cfi-canonical-jump-tables",
CodeGenOpts<"SanitizeCfiCanonicalJumpTables">, DefaultFalse,
- PosFlag<SetTrue, [], "Make">, NegFlag<SetFalse, [CoreOption, NoXarchOption], "Do not make">,
- BothFlags<[], " the jump table addresses canonical in the symbol table">>,
+ PosFlag<SetTrue, [], [ClangOption], "Make">,
+ NegFlag<SetFalse, [], [ClangOption, CLOption],
+ "Do not make">,
+ BothFlags<[], [ClangOption], " the jump table addresses canonical in the symbol table">>,
Group<f_clang_Group>;
defm sanitize_stats : BoolOption<"f", "sanitize-stats",
CodeGenOpts<"SanitizeStats">, DefaultFalse,
- PosFlag<SetTrue, [], "Enable">, NegFlag<SetFalse, [CoreOption, NoXarchOption], "Disable">,
- BothFlags<[], " sanitizer statistics gathering.">>,
+ PosFlag<SetTrue, [], [ClangOption], "Enable">,
+ NegFlag<SetFalse, [], [ClangOption, CLOption],
+ "Disable">,
+ BothFlags<[], [ClangOption], " sanitizer statistics gathering.">>,
Group<f_clang_Group>;
def fsanitize_thread_memory_access : Flag<["-"], "fsanitize-thread-memory-access">,
Group<f_clang_Group>,
HelpText<"Enable memory access instrumentation in ThreadSanitizer (default)">;
def fno_sanitize_thread_memory_access : Flag<["-"], "fno-sanitize-thread-memory-access">,
Group<f_clang_Group>,
- Flags<[CoreOption, NoXarchOption]>,
+ Visibility<[ClangOption, CLOption]>,
HelpText<"Disable memory access instrumentation in ThreadSanitizer">;
def fsanitize_thread_func_entry_exit : Flag<["-"], "fsanitize-thread-func-entry-exit">,
Group<f_clang_Group>,
HelpText<"Enable function entry/exit instrumentation in ThreadSanitizer (default)">;
def fno_sanitize_thread_func_entry_exit : Flag<["-"], "fno-sanitize-thread-func-entry-exit">,
Group<f_clang_Group>,
- Flags<[CoreOption, NoXarchOption]>,
+ Visibility<[ClangOption, CLOption]>,
HelpText<"Disable function entry/exit instrumentation in ThreadSanitizer">;
def fsanitize_thread_atomics : Flag<["-"], "fsanitize-thread-atomics">,
Group<f_clang_Group>,
HelpText<"Enable atomic operations instrumentation in ThreadSanitizer (default)">;
def fno_sanitize_thread_atomics : Flag<["-"], "fno-sanitize-thread-atomics">,
Group<f_clang_Group>,
- Flags<[CoreOption, NoXarchOption]>,
+ Visibility<[ClangOption, CLOption]>,
HelpText<"Disable atomic operations instrumentation in ThreadSanitizer">;
def fsanitize_undefined_strip_path_components_EQ : Joined<["-"], "fsanitize-undefined-strip-path-components=">,
Group<f_clang_Group>, MetaVarName<"<number>">,
@@ -1719,58 +2473,83 @@ def fsanitize_undefined_strip_path_components_EQ : Joined<["-"], "fsanitize-unde
} // end -f[no-]sanitize* flags
def funsafe_math_optimizations : Flag<["-"], "funsafe-math-optimizations">,
- Group<f_Group>;
+ Group<f_Group>, Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Allow unsafe floating-point math optimizations which may decrease precision">,
+ MarshallingInfoFlag<LangOpts<"UnsafeFPMath">>,
+ ImpliedByAnyOf<[cl_unsafe_math_optimizations.KeyPath, ffast_math.KeyPath]>;
def fno_unsafe_math_optimizations : Flag<["-"], "fno-unsafe-math-optimizations">,
Group<f_Group>;
-def fassociative_math : Flag<["-"], "fassociative-math">, Group<f_Group>;
-def fno_associative_math : Flag<["-"], "fno-associative-math">, Group<f_Group>;
+def fassociative_math : Flag<["-"], "fassociative-math">, Visibility<[ClangOption, FlangOption]>, Group<f_Group>;
+def fno_associative_math : Flag<["-"], "fno-associative-math">, Visibility<[ClangOption, FlangOption]>, Group<f_Group>;
defm reciprocal_math : BoolFOption<"reciprocal-math",
LangOpts<"AllowRecip">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Allow division operations to be reassociated",
- [menable_unsafe_fp_math.KeyPath]>,
- NegFlag<SetFalse>>;
-def fapprox_func : Flag<["-"], "fapprox-func">, Group<f_Group>, Flags<[CC1Option, NoDriverOption]>,
- MarshallingInfoFlag<LangOpts<"ApproxFunc">>, ImpliedByAnyOf<[menable_unsafe_fp_math.KeyPath]>;
+ PosFlag<SetTrue, [], [ClangOption, CC1Option, FC1Option, FlangOption],
+ "Allow division operations to be reassociated",
+ [funsafe_math_optimizations.KeyPath]>,
+ NegFlag<SetFalse, [], [ClangOption, CC1Option, FC1Option, FlangOption]>>;
+defm approx_func : BoolFOption<"approx-func", LangOpts<"ApproxFunc">, DefaultFalse,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option, FC1Option, FlangOption],
+ "Allow certain math function calls to be replaced "
+ "with an approximately equivalent calculation",
+ [funsafe_math_optimizations.KeyPath]>,
+ NegFlag<SetFalse, [], [ClangOption, CC1Option, FC1Option, FlangOption]>>;
defm finite_math_only : BoolFOption<"finite-math-only",
LangOpts<"FiniteMathOnly">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "", [cl_finite_math_only.KeyPath, ffast_math.KeyPath]>,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Allow floating-point optimizations that "
+ "assume arguments and results are not NaNs or +-inf. This defines "
+ "the \\_\\_FINITE\\_MATH\\_ONLY\\_\\_ preprocessor macro.",
+ [cl_finite_math_only.KeyPath, ffast_math.KeyPath]>,
NegFlag<SetFalse>>;
defm signed_zeros : BoolFOption<"signed-zeros",
LangOpts<"NoSignedZero">, DefaultFalse,
- NegFlag<SetTrue, [CC1Option], "Allow optimizations that ignore the sign of floating point zeros",
- [cl_no_signed_zeros.KeyPath, menable_unsafe_fp_math.KeyPath]>,
- PosFlag<SetFalse>>;
-def fhonor_nans : Flag<["-"], "fhonor-nans">, Group<f_Group>;
-def fno_honor_nans : Flag<["-"], "fno-honor-nans">, Group<f_Group>;
-def fhonor_infinities : Flag<["-"], "fhonor-infinities">, Group<f_Group>;
-def fno_honor_infinities : Flag<["-"], "fno-honor-infinities">, Group<f_Group>;
+ NegFlag<SetTrue, [], [ClangOption, CC1Option, FC1Option, FlangOption],
+ "Allow optimizations that ignore the sign of floating point zeros",
+ [cl_no_signed_zeros.KeyPath, funsafe_math_optimizations.KeyPath]>,
+ PosFlag<SetFalse, [], [ClangOption, CC1Option, FC1Option, FlangOption]>>;
+def fhonor_nans : Flag<["-"], "fhonor-nans">, Group<f_Group>,
+ Visibility<[ClangOption, FlangOption]>,
+ HelpText<"Specify that floating-point optimizations are not allowed that "
+ "assume arguments and results are not NANs.">;
+def fno_honor_nans : Flag<["-"], "fno-honor-nans">, Visibility<[ClangOption, FlangOption]>, Group<f_Group>;
+def fhonor_infinities : Flag<["-"], "fhonor-infinities">,
+ Group<f_Group>, Visibility<[ClangOption, FlangOption]>,
+ HelpText<"Specify that floating-point optimizations are not allowed that "
+ "assume arguments and results are not +-inf.">;
+def fno_honor_infinities : Flag<["-"], "fno-honor-infinities">,
+ Visibility<[ClangOption, FlangOption]>, Group<f_Group>;
// This option was originally misspelt "infinites" [sic].
def : Flag<["-"], "fhonor-infinites">, Alias<fhonor_infinities>;
def : Flag<["-"], "fno-honor-infinites">, Alias<fno_honor_infinities>;
-def frounding_math : Flag<["-"], "frounding-math">, Group<f_Group>, Flags<[CC1Option]>,
- MarshallingInfoFlag<LangOpts<"FPRoundingMode">, "llvm::RoundingMode::NearestTiesToEven">,
+def frounding_math : Flag<["-"], "frounding-math">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>,
+ MarshallingInfoFlag<LangOpts<"RoundingMath">>,
Normalizer<"makeFlagToValueNormalizer(llvm::RoundingMode::Dynamic)">;
-def fno_rounding_math : Flag<["-"], "fno-rounding-math">, Group<f_Group>, Flags<[CC1Option]>;
+def fno_rounding_math : Flag<["-"], "fno-rounding-math">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>;
def ftrapping_math : Flag<["-"], "ftrapping-math">, Group<f_Group>;
def fno_trapping_math : Flag<["-"], "fno-trapping-math">, Group<f_Group>;
def ffp_contract : Joined<["-"], "ffp-contract=">, Group<f_Group>,
- Flags<[CC1Option]>, HelpText<"Form fused FP ops (e.g. FMAs):"
+ Visibility<[ClangOption, CC1Option, FC1Option, FlangOption]>,
+ DocBrief<"Form fused FP ops (e.g. FMAs):"
" fast (fuses across statements disregarding pragmas)"
" | on (only fuses in the same statement unless dictated by pragmas)"
" | off (never fuses)"
- " | fast-honor-pragmas (fuses across statements unless diectated by pragmas)."
+ " | fast-honor-pragmas (fuses across statements unless dictated by pragmas)."
" Default is 'fast' for CUDA, 'fast-honor-pragmas' for HIP, and 'on' otherwise.">,
+ HelpText<"Form fused FP ops (e.g. FMAs)">,
Values<"fast,on,off,fast-honor-pragmas">;
defm strict_float_cast_overflow : BoolFOption<"strict-float-cast-overflow",
CodeGenOpts<"StrictFloatCastOverflow">, DefaultTrue,
- NegFlag<SetFalse, [CC1Option], "Relax language rules and try to match the behavior"
+ NegFlag<SetFalse, [], [ClangOption, CC1Option],
+ "Relax language rules and try to match the behavior"
" of the target's native float-to-int conversion instructions">,
- PosFlag<SetTrue, [], "Assume that overflowing float-to-int casts are undefined (default)">>;
+ PosFlag<SetTrue, [], [ClangOption], "Assume that overflowing float-to-int casts are undefined (default)">>;
defm protect_parens : BoolFOption<"protect-parens",
LangOpts<"ProtectParens">, DefaultFalse,
- PosFlag<SetTrue, [CoreOption, CC1Option],
+ PosFlag<SetTrue, [], [ClangOption, CLOption, CC1Option],
"Determines whether the optimizer honors parentheses when "
"floating-point expressions are evaluated">,
NegFlag<SetFalse>>;
@@ -1780,32 +2559,61 @@ def fno_for_scope : Flag<["-"], "fno-for-scope">, Group<f_Group>;
defm rewrite_imports : BoolFOption<"rewrite-imports",
PreprocessorOutputOpts<"RewriteImports">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option]>, NegFlag<SetFalse>>;
+ PosFlag<SetTrue, [], [ClangOption, CC1Option]>,
+ NegFlag<SetFalse>>;
defm rewrite_includes : BoolFOption<"rewrite-includes",
PreprocessorOutputOpts<"RewriteIncludes">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option]>, NegFlag<SetFalse>>;
+ PosFlag<SetTrue, [], [ClangOption, CC1Option]>,
+ NegFlag<SetFalse>>;
+
+defm directives_only : OptInCC1FFlag<"directives-only", "">;
defm delete_null_pointer_checks : BoolFOption<"delete-null-pointer-checks",
CodeGenOpts<"NullPointerIsValid">, DefaultFalse,
- NegFlag<SetTrue, [CC1Option], "Do not treat usage of null pointers as undefined behavior">,
- PosFlag<SetFalse, [], "Treat usage of null pointers as undefined behavior (default)">,
- BothFlags<[CoreOption]>>;
-
-def frewrite_map_file_EQ : Joined<["-"], "frewrite-map-file=">,
- Group<f_Group>,
- Flags<[NoXarchOption, CC1Option]>,
- MarshallingInfoStringVector<CodeGenOpts<"RewriteMapFiles">>;
+ NegFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Do not treat usage of null pointers as undefined behavior">,
+ PosFlag<SetFalse, [], [ClangOption], "Treat usage of null pointers as undefined behavior (default)">,
+ BothFlags<[], [ClangOption, CLOption]>>,
+ DocBrief<[{When enabled, treat null pointer dereference, creation of a reference to null,
+or passing a null pointer to a function parameter annotated with the "nonnull"
+attribute as undefined behavior. (And, thus the optimizer may assume that any
+pointer used in such a way must not have been null and optimize away the
+branches accordingly.) On by default.}]>;
defm use_line_directives : BoolFOption<"use-line-directives",
PreprocessorOutputOpts<"UseLineDirectives">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Use #line in preprocessed output">, NegFlag<SetFalse>>;
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Use #line in preprocessed output">,
+ NegFlag<SetFalse>>;
+defm minimize_whitespace : BoolFOption<"minimize-whitespace",
+ PreprocessorOutputOpts<"MinimizeWhitespace">, DefaultFalse,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Ignore the whitespace from the input file "
+ "when emitting preprocessor output. It will only contain whitespace "
+ "when necessary, e.g. to keep two minus signs from merging into to "
+ "an increment operator. Useful with the -P option to normalize "
+ "whitespace such that two files with only formatting changes are "
+ "equal.\n\nOnly valid with -E on C-like inputs and incompatible "
+ "with -traditional-cpp.">, NegFlag<SetFalse>>;
+defm keep_system_includes : BoolFOption<"keep-system-includes",
+ PreprocessorOutputOpts<"KeepSystemIncludes">, DefaultFalse,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Instead of expanding system headers when emitting preprocessor "
+ "output, preserve the #include directive. Useful when producing "
+ "preprocessed output for test case reduction. May produce incorrect "
+ "output if preprocessor symbols that control the included content "
+ "(e.g. _XOPEN_SOURCE) are defined in the including source file. The "
+ "portability of the resulting source to other compilation environments "
+ "is not guaranteed.\n\nOnly valid with -E.">,
+ NegFlag<SetFalse>>;
-def ffreestanding : Flag<["-"], "ffreestanding">, Group<f_Group>, Flags<[CC1Option]>,
+def ffreestanding : Flag<["-"], "ffreestanding">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Assert that the compilation takes place in a freestanding environment">,
MarshallingInfoFlag<LangOpts<"Freestanding">>;
def fgnuc_version_EQ : Joined<["-"], "fgnuc-version=">, Group<f_Group>,
HelpText<"Sets various macros to claim compatibility with the given GCC version (default is 4.2.1)">,
- Flags<[CC1Option, CoreOption]>;
+ Visibility<[ClangOption, CC1Option, CLOption]>;
// We abuse '-f[no-]gnu-keywords' to force overriding all GNU-extension
// keywords. This behavior is provided by GCC's poorly named '-fasm' flag,
// while a subset (the non-C++ GNU keywords) is provided by GCC's
@@ -1813,164 +2621,193 @@ def fgnuc_version_EQ : Joined<["-"], "fgnuc-version=">, Group<f_Group>,
// name, as it doesn't seem a useful distinction.
defm gnu_keywords : BoolFOption<"gnu-keywords",
LangOpts<"GNUKeywords">, Default<gnu_mode.KeyPath>,
- PosFlag<SetTrue, [], "Allow GNU-extension keywords regardless of language standard">,
- NegFlag<SetFalse>, BothFlags<[CC1Option]>>;
+ PosFlag<SetTrue, [], [ClangOption], "Allow GNU-extension keywords regardless of language standard">,
+ NegFlag<SetFalse>, BothFlags<[], [ClangOption, CC1Option]>>;
defm gnu89_inline : BoolFOption<"gnu89-inline",
LangOpts<"GNUInline">, Default<!strconcat("!", c99.KeyPath, " && !", cplusplus.KeyPath)>,
- PosFlag<SetTrue, [CC1Option], "Use the gnu89 inline semantics">,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Use the gnu89 inline semantics">,
NegFlag<SetFalse>>, ShouldParseIf<!strconcat("!", cplusplus.KeyPath)>;
def fgnu_runtime : Flag<["-"], "fgnu-runtime">, Group<f_Group>,
HelpText<"Generate output compatible with the standard GNU Objective-C runtime">;
-def fheinous_gnu_extensions : Flag<["-"], "fheinous-gnu-extensions">, Flags<[CC1Option]>,
+def fheinous_gnu_extensions : Flag<["-"], "fheinous-gnu-extensions">,
+ Visibility<[ClangOption, CC1Option]>,
MarshallingInfoFlag<LangOpts<"HeinousExtensions">>;
def filelist : Separate<["-"], "filelist">, Flags<[LinkerInput]>,
Group<Link_Group>;
def : Flag<["-"], "findirect-virtual-calls">, Alias<fapple_kext>;
-def finline_functions : Flag<["-"], "finline-functions">, Group<f_clang_Group>, Flags<[CC1Option]>,
+def finline_functions : Flag<["-"], "finline-functions">, Group<f_clang_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Inline suitable functions">;
-def finline_hint_functions: Flag<["-"], "finline-hint-functions">, Group<f_clang_Group>, Flags<[CC1Option]>,
+def finline_hint_functions: Flag<["-"], "finline-hint-functions">, Group<f_clang_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Inline functions which are (explicitly or implicitly) marked inline">;
def finline : Flag<["-"], "finline">, Group<clang_ignored_f_Group>;
+def finline_max_stacksize_EQ
+ : Joined<["-"], "finline-max-stacksize=">,
+ Group<f_Group>, Visibility<[ClangOption, CLOption, CC1Option]>,
+ HelpText<"Suppress inlining of functions whose stack size exceeds the given value">,
+ MarshallingInfoInt<CodeGenOpts<"InlineMaxStackSize">, "UINT_MAX">;
+defm jmc : BoolFOption<"jmc",
+ CodeGenOpts<"JMCInstrument">, DefaultFalse,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Enable just-my-code debugging">,
+ NegFlag<SetFalse>>;
def fglobal_isel : Flag<["-"], "fglobal-isel">, Group<f_clang_Group>,
HelpText<"Enables the global instruction selector">;
def fexperimental_isel : Flag<["-"], "fexperimental-isel">, Group<f_clang_Group>,
Alias<fglobal_isel>;
-defm legacy_pass_manager : BoolOption<"f", "legacy-pass-manager",
- CodeGenOpts<"LegacyPassManager">, Default<"!static_cast<unsigned>(LLVM_ENABLE_NEW_PASS_MANAGER)">,
- PosFlag<SetTrue, [], "Use the legacy pass manager in LLVM">,
- NegFlag<SetFalse, [], "Use the new pass manager in LLVM">,
- BothFlags<[CC1Option]>>, Group<f_clang_Group>;
-def fexperimental_new_pass_manager : Flag<["-"], "fexperimental-new-pass-manager">,
- Group<f_clang_Group>, Flags<[CC1Option]>, Alias<fno_legacy_pass_manager>;
-def fno_experimental_new_pass_manager : Flag<["-"], "fno-experimental-new-pass-manager">,
- Group<f_clang_Group>, Flags<[CC1Option]>, Alias<flegacy_pass_manager>;
def fexperimental_strict_floating_point : Flag<["-"], "fexperimental-strict-floating-point">,
- Group<f_clang_Group>, Flags<[CC1Option]>,
- HelpText<"Enables experimental strict floating point in LLVM.">,
+ Group<f_clang_Group>, Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Enables the use of non-default rounding modes and non-default exception handling on targets that are not currently ready.">,
MarshallingInfoFlag<LangOpts<"ExpStrictFP">>;
-def finput_charset_EQ : Joined<["-"], "finput-charset=">, Flags<[FlangOption, FC1Option]>, Group<f_Group>,
+def finput_charset_EQ : Joined<["-"], "finput-charset=">,
+ Visibility<[ClangOption, FlangOption, FC1Option]>, Group<f_Group>,
HelpText<"Specify the default character set for source files">;
def fexec_charset_EQ : Joined<["-"], "fexec-charset=">, Group<f_Group>;
-def finstrument_functions : Flag<["-"], "finstrument-functions">, Group<f_Group>, Flags<[CC1Option]>,
+def finstrument_functions : Flag<["-"], "finstrument-functions">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Generate calls to instrument function entry and exit">,
MarshallingInfoFlag<CodeGenOpts<"InstrumentFunctions">>;
-def finstrument_functions_after_inlining : Flag<["-"], "finstrument-functions-after-inlining">, Group<f_Group>, Flags<[CC1Option]>,
+def finstrument_functions_after_inlining : Flag<["-"], "finstrument-functions-after-inlining">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Like -finstrument-functions, but insert the calls after inlining">,
MarshallingInfoFlag<CodeGenOpts<"InstrumentFunctionsAfterInlining">>;
-def finstrument_function_entry_bare : Flag<["-"], "finstrument-function-entry-bare">, Group<f_Group>, Flags<[CC1Option]>,
+def finstrument_function_entry_bare : Flag<["-"], "finstrument-function-entry-bare">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Instrument function entry only, after inlining, without arguments to the instrumentation call">,
MarshallingInfoFlag<CodeGenOpts<"InstrumentFunctionEntryBare">>;
-def fcf_protection_EQ : Joined<["-"], "fcf-protection=">, Flags<[CoreOption, CC1Option]>, Group<f_Group>,
- HelpText<"Instrument control-flow architecture protection. Options: return, branch, full, none.">, Values<"return,branch,full,none">;
-def fcf_protection : Flag<["-"], "fcf-protection">, Group<f_Group>, Flags<[CoreOption, CC1Option]>,
+def fcf_protection_EQ : Joined<["-"], "fcf-protection=">,
+ Visibility<[ClangOption, CLOption, CC1Option]>, Group<f_Group>,
+ HelpText<"Instrument control-flow architecture protection">, Values<"return,branch,full,none">;
+def fcf_protection : Flag<["-"], "fcf-protection">, Group<f_Group>,
+ Visibility<[ClangOption, CLOption, CC1Option]>,
Alias<fcf_protection_EQ>, AliasArgs<["full"]>,
HelpText<"Enable cf-protection in 'full' mode">;
+def mfunction_return_EQ : Joined<["-"], "mfunction-return=">,
+ Group<m_Group>, Visibility<[ClangOption, CLOption, CC1Option]>,
+ HelpText<"Replace returns with jumps to ``__x86_return_thunk`` (x86 only, error otherwise)">,
+ Values<"keep,thunk-extern">,
+ NormalizedValues<["Keep", "Extern"]>,
+ NormalizedValuesScope<"llvm::FunctionReturnThunksKind">,
+ MarshallingInfoEnum<CodeGenOpts<"FunctionReturnThunks">, "Keep">;
+def mindirect_branch_cs_prefix : Flag<["-"], "mindirect-branch-cs-prefix">,
+ Group<m_Group>, Visibility<[ClangOption, CLOption, CC1Option]>,
+ HelpText<"Add cs prefix to call and jmp to indirect thunk">,
+ MarshallingInfoFlag<CodeGenOpts<"IndirectBranchCSPrefix">>;
defm xray_instrument : BoolFOption<"xray-instrument",
LangOpts<"XRayInstrument">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Generate XRay instrumentation sleds on function entry and exit">,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Generate XRay instrumentation sleds on function entry and exit">,
NegFlag<SetFalse>>;
def fxray_instruction_threshold_EQ :
- JoinedOrSeparate<["-"], "fxray-instruction-threshold=">,
- Group<f_Group>, Flags<[CC1Option]>,
+ Joined<["-"], "fxray-instruction-threshold=">,
+ Group<f_Group>, Visibility<[ClangOption, CC1Option]>,
HelpText<"Sets the minimum function size to instrument with XRay">,
MarshallingInfoInt<CodeGenOpts<"XRayInstructionThreshold">, "200">;
-def fxray_instruction_threshold_ :
- JoinedOrSeparate<["-"], "fxray-instruction-threshold">,
- Group<f_Group>, Flags<[CC1Option]>;
def fxray_always_instrument :
- JoinedOrSeparate<["-"], "fxray-always-instrument=">,
- Group<f_Group>, Flags<[CC1Option]>,
+ Joined<["-"], "fxray-always-instrument=">,
+ Group<f_Group>, Visibility<[ClangOption, CC1Option]>,
HelpText<"DEPRECATED: Filename defining the whitelist for imbuing the 'always instrument' XRay attribute.">,
MarshallingInfoStringVector<LangOpts<"XRayAlwaysInstrumentFiles">>;
def fxray_never_instrument :
- JoinedOrSeparate<["-"], "fxray-never-instrument=">,
- Group<f_Group>, Flags<[CC1Option]>,
+ Joined<["-"], "fxray-never-instrument=">,
+ Group<f_Group>, Visibility<[ClangOption, CC1Option]>,
HelpText<"DEPRECATED: Filename defining the whitelist for imbuing the 'never instrument' XRay attribute.">,
MarshallingInfoStringVector<LangOpts<"XRayNeverInstrumentFiles">>;
def fxray_attr_list :
- JoinedOrSeparate<["-"], "fxray-attr-list=">,
- Group<f_Group>, Flags<[CC1Option]>,
+ Joined<["-"], "fxray-attr-list=">,
+ Group<f_Group>, Visibility<[ClangOption, CC1Option]>,
HelpText<"Filename defining the list of functions/types for imbuing XRay attributes.">,
MarshallingInfoStringVector<LangOpts<"XRayAttrListFiles">>;
def fxray_modes :
- JoinedOrSeparate<["-"], "fxray-modes=">,
- Group<f_Group>, Flags<[CC1Option]>,
+ Joined<["-"], "fxray-modes=">,
+ Group<f_Group>, Visibility<[ClangOption, CC1Option]>,
HelpText<"List of modes to link in by default into XRay instrumented binaries.">;
defm xray_always_emit_customevents : BoolFOption<"xray-always-emit-customevents",
LangOpts<"XRayAlwaysEmitCustomEvents">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Always emit __xray_customevent(...) calls"
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Always emit __xray_customevent(...) calls"
" even if the containing function is not always instrumented">,
NegFlag<SetFalse>>;
defm xray_always_emit_typedevents : BoolFOption<"xray-always-emit-typedevents",
LangOpts<"XRayAlwaysEmitTypedEvents">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Always emit __xray_typedevent(...) calls"
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Always emit __xray_typedevent(...) calls"
" even if the containing function is not always instrumented">,
NegFlag<SetFalse>>;
defm xray_ignore_loops : BoolFOption<"xray-ignore-loops",
CodeGenOpts<"XRayIgnoreLoops">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Don't instrument functions with loops"
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Don't instrument functions with loops"
" unless they also meet the minimum function size">,
NegFlag<SetFalse>>;
defm xray_function_index : BoolFOption<"xray-function-index",
- CodeGenOpts<"XRayOmitFunctionIndex">, DefaultTrue,
- NegFlag<SetFalse, [CC1Option], "Omit function index section at the"
- " expense of single-function patching performance">,
- PosFlag<SetTrue>>;
+ CodeGenOpts<"XRayFunctionIndex">, DefaultTrue,
+ PosFlag<SetTrue, [], [ClangOption]>,
+ NegFlag<SetFalse, [], [ClangOption, CC1Option],
+ "Omit function index section at the"
+ " expense of single-function patching performance">>;
def fxray_link_deps : Flag<["-"], "fxray-link-deps">, Group<f_Group>,
- Flags<[CC1Option]>,
- HelpText<"Tells clang to add the link dependencies for XRay.">;
-def fnoxray_link_deps : Flag<["-"], "fnoxray-link-deps">, Group<f_Group>,
- Flags<[CC1Option]>;
+ HelpText<"Link XRay runtime library when -fxray-instrument is specified (default)">;
+def fno_xray_link_deps : Flag<["-"], "fno-xray-link-deps">, Group<f_Group>;
def fxray_instrumentation_bundle :
- JoinedOrSeparate<["-"], "fxray-instrumentation-bundle=">,
- Group<f_Group>, Flags<[CC1Option]>,
+ Joined<["-"], "fxray-instrumentation-bundle=">,
+ Group<f_Group>, Visibility<[ClangOption, CC1Option]>,
HelpText<"Select which XRay instrumentation points to emit. Options: all, none, function-entry, function-exit, function, custom. Default is 'all'. 'function' includes both 'function-entry' and 'function-exit'.">;
def fxray_function_groups :
Joined<["-"], "fxray-function-groups=">,
- Group<f_Group>, Flags<[CC1Option]>,
+ Group<f_Group>, Visibility<[ClangOption, CC1Option]>,
HelpText<"Only instrument 1 of N groups">,
MarshallingInfoInt<CodeGenOpts<"XRayTotalFunctionGroups">, "1">;
def fxray_selected_function_group :
Joined<["-"], "fxray-selected-function-group=">,
- Group<f_Group>, Flags<[CC1Option]>,
+ Group<f_Group>, Visibility<[ClangOption, CC1Option]>,
HelpText<"When using -fxray-function-groups, select which group of functions to instrument. Valid range is 0 to fxray-function-groups - 1">,
MarshallingInfoInt<CodeGenOpts<"XRaySelectedFunctionGroup">, "0">;
defm fine_grained_bitfield_accesses : BoolOption<"f", "fine-grained-bitfield-accesses",
CodeGenOpts<"FineGrainedBitfieldAccesses">, DefaultFalse,
- PosFlag<SetTrue, [], "Use separate accesses for consecutive bitfield runs with legal widths and alignments.">,
- NegFlag<SetFalse, [], "Use large-integer access for consecutive bitfield runs.">,
- BothFlags<[CC1Option]>>,
+ PosFlag<SetTrue, [], [ClangOption], "Use separate accesses for consecutive bitfield runs with legal widths and alignments.">,
+ NegFlag<SetFalse, [], [ClangOption], "Use large-integer access for consecutive bitfield runs.">,
+ BothFlags<[], [ClangOption, CC1Option]>>,
Group<f_clang_Group>;
def fexperimental_relative_cxx_abi_vtables :
Flag<["-"], "fexperimental-relative-c++-abi-vtables">,
- Group<f_clang_Group>, Flags<[CC1Option]>,
+ Group<f_clang_Group>, Visibility<[ClangOption, CC1Option]>,
HelpText<"Use the experimental C++ class ABI for classes with virtual tables">;
def fno_experimental_relative_cxx_abi_vtables :
Flag<["-"], "fno-experimental-relative-c++-abi-vtables">,
- Group<f_clang_Group>, Flags<[CC1Option]>,
+ Group<f_clang_Group>, Visibility<[ClangOption, CC1Option]>,
HelpText<"Do not use the experimental C++ class ABI for classes with virtual tables">;
+defm experimental_omit_vtable_rtti : BoolFOption<"experimental-omit-vtable-rtti",
+ LangOpts<"OmitVTableRTTI">, DefaultFalse,
+ PosFlag<SetTrue, [], [CC1Option], "Omit">,
+ NegFlag<SetFalse, [], [CC1Option], "Do not omit">,
+ BothFlags<[], [CC1Option], " the RTTI component from virtual tables">>;
+
def fcxx_abi_EQ : Joined<["-"], "fc++-abi=">,
- Group<f_clang_Group>, Flags<[CC1Option]>,
+ Group<f_clang_Group>, Visibility<[ClangOption, CC1Option]>,
HelpText<"C++ ABI to use. This will override the target C++ ABI.">;
def flat__namespace : Flag<["-"], "flat_namespace">;
def flax_vector_conversions_EQ : Joined<["-"], "flax-vector-conversions=">, Group<f_Group>,
- HelpText<"Enable implicit vector bit-casts">, Values<"none,integer,all">, Flags<[CC1Option]>,
+ HelpText<"Enable implicit vector bit-casts">, Values<"none,integer,all">,
+ Visibility<[ClangOption, CC1Option]>,
NormalizedValues<["LangOptions::LaxVectorConversionKind::None",
"LangOptions::LaxVectorConversionKind::Integer",
"LangOptions::LaxVectorConversionKind::All"]>,
@@ -1983,71 +2820,132 @@ def flax_vector_conversions : Flag<["-"], "flax-vector-conversions">, Group<f_Gr
def flimited_precision_EQ : Joined<["-"], "flimited-precision=">, Group<f_Group>;
def fapple_link_rtlib : Flag<["-"], "fapple-link-rtlib">, Group<f_Group>,
HelpText<"Force linking the clang builtins runtime library">;
-def flto_EQ : Joined<["-"], "flto=">, Flags<[CoreOption, CC1Option]>, Group<f_Group>,
- HelpText<"Set LTO mode to either 'full' or 'thin'">, Values<"thin,full">;
-def flto_EQ_jobserver : Flag<["-"], "flto=jobserver">, Group<f_Group>;
-def flto_EQ_auto : Flag<["-"], "flto=auto">, Group<f_Group>;
-def flto : Flag<["-"], "flto">, Flags<[CoreOption, CC1Option]>, Group<f_Group>,
- HelpText<"Enable LTO in 'full' mode">;
-def fno_lto : Flag<["-"], "fno-lto">, Flags<[CoreOption, CC1Option]>, Group<f_Group>,
+def flto_EQ : Joined<["-"], "flto=">,
+ Visibility<[ClangOption, CLOption, CC1Option, FC1Option, FlangOption]>,
+ Group<f_Group>,
+ HelpText<"Set LTO mode">, Values<"thin,full">;
+def flto_EQ_jobserver : Flag<["-"], "flto=jobserver">, Visibility<[ClangOption, FlangOption]>, Group<f_Group>,
+ Alias<flto_EQ>, AliasArgs<["full"]>, HelpText<"Enable LTO in 'full' mode">;
+def flto_EQ_auto : Flag<["-"], "flto=auto">, Visibility<[ClangOption, FlangOption]>, Group<f_Group>,
+ Alias<flto_EQ>, AliasArgs<["full"]>, HelpText<"Enable LTO in 'full' mode">;
+def flto : Flag<["-"], "flto">,
+ Visibility<[ClangOption, CLOption, CC1Option, FC1Option, FlangOption]>,
+ Group<f_Group>,
+ Alias<flto_EQ>, AliasArgs<["full"]>, HelpText<"Enable LTO in 'full' mode">;
+defm unified_lto : BoolFOption<"unified-lto",
+ CodeGenOpts<"UnifiedLTO">, DefaultFalse,
+ PosFlag<SetTrue, [], [ClangOption], "Use the unified LTO pipeline">,
+ NegFlag<SetFalse, [], [ClangOption], "Use distinct LTO pipelines">,
+ BothFlags<[], [ClangOption, CC1Option], "">>;
+def fno_lto : Flag<["-"], "fno-lto">,
+ Visibility<[ClangOption, CLOption, DXCOption, CC1Option, FlangOption]>, Group<f_Group>,
HelpText<"Disable LTO mode (default)">;
-def foffload_lto_EQ : Joined<["-"], "foffload-lto=">, Flags<[CoreOption]>, Group<f_Group>,
- HelpText<"Set LTO mode to either 'full' or 'thin' for offload compilation">, Values<"thin,full">;
-def foffload_lto : Flag<["-"], "foffload-lto">, Flags<[CoreOption]>, Group<f_Group>,
- HelpText<"Enable LTO in 'full' mode for offload compilation">;
-def fno_offload_lto : Flag<["-"], "fno-offload-lto">, Flags<[CoreOption]>, Group<f_Group>,
+def foffload_lto_EQ : Joined<["-"], "foffload-lto=">,
+ Visibility<[ClangOption, CLOption]>, Group<f_Group>,
+ HelpText<"Set LTO mode for offload compilation">, Values<"thin,full">;
+def foffload_lto : Flag<["-"], "foffload-lto">,
+ Visibility<[ClangOption, CLOption]>, Group<f_Group>,
+ Alias<foffload_lto_EQ>, AliasArgs<["full"]>, HelpText<"Enable LTO in 'full' mode for offload compilation">;
+def fno_offload_lto : Flag<["-"], "fno-offload-lto">,
+ Visibility<[ClangOption, CLOption]>, Group<f_Group>,
HelpText<"Disable LTO mode (default) for offload compilation">;
def flto_jobs_EQ : Joined<["-"], "flto-jobs=">,
- Flags<[CC1Option]>, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>, Group<f_Group>,
HelpText<"Controls the backend parallelism of -flto=thin (default "
"of 0 means the number of threads will be derived from "
"the number of CPUs detected)">;
def fthinlto_index_EQ : Joined<["-"], "fthinlto-index=">,
- Flags<[CoreOption, CC1Option]>, Group<f_Group>,
+ Visibility<[ClangOption, CLOption, CC1Option]>, Group<f_Group>,
HelpText<"Perform ThinLTO importing using provided function summary index">;
def fthin_link_bitcode_EQ : Joined<["-"], "fthin-link-bitcode=">,
- Flags<[CoreOption, CC1Option]>, Group<f_Group>,
+ Visibility<[ClangOption, CLOption, CC1Option]>, Group<f_Group>,
HelpText<"Write minimized bitcode to <file> for the ThinLTO thin link only">,
MarshallingInfoString<CodeGenOpts<"ThinLinkBitcodeFile">>;
+defm fat_lto_objects : BoolFOption<"fat-lto-objects",
+ CodeGenOpts<"FatLTO">, DefaultFalse,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option], "Enable">,
+ NegFlag<SetFalse, [], [ClangOption, CC1Option], "Disable">,
+ BothFlags<[], [ClangOption, CC1Option], " fat LTO object support">>;
def fmacro_backtrace_limit_EQ : Joined<["-"], "fmacro-backtrace-limit=">,
- Group<f_Group>, Flags<[NoXarchOption, CoreOption]>;
+ Group<f_Group>, Visibility<[ClangOption, CC1Option, CLOption]>,
+ HelpText<"Set the maximum number of entries to print in a macro expansion backtrace (0 = no limit)">,
+ MarshallingInfoInt<DiagnosticOpts<"MacroBacktraceLimit">, "DiagnosticOptions::DefaultMacroBacktraceLimit">;
+def fcaret_diagnostics_max_lines_EQ :
+ Joined<["-"], "fcaret-diagnostics-max-lines=">,
+ Group<f_Group>, Visibility<[ClangOption, CC1Option, CLOption, DXCOption]>,
+ HelpText<"Set the maximum number of source lines to show in a caret diagnostic (0 = no limit).">,
+ MarshallingInfoInt<DiagnosticOpts<"SnippetLineLimit">, "DiagnosticOptions::DefaultSnippetLineLimit">;
defm merge_all_constants : BoolFOption<"merge-all-constants",
CodeGenOpts<"MergeAllConstants">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option, CoreOption], "Allow">, NegFlag<SetFalse, [], "Disallow">,
- BothFlags<[], " merging of constants">>;
-def fmessage_length_EQ : Joined<["-"], "fmessage-length=">, Group<f_Group>, Flags<[CC1Option]>,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option, CLOption], "Allow">,
+ NegFlag<SetFalse, [], [ClangOption], "Disallow">,
+ BothFlags<[], [ClangOption], " merging of constants">>;
+def fmessage_length_EQ : Joined<["-"], "fmessage-length=">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Format message diagnostics so that they fit within N columns">,
MarshallingInfoInt<DiagnosticOpts<"MessageLength">>;
-def fms_compatibility : Flag<["-"], "fms-compatibility">, Group<f_Group>, Flags<[CC1Option, CoreOption]>,
+def frandomize_layout_seed_EQ : Joined<["-"], "frandomize-layout-seed=">,
+ MetaVarName<"<seed>">, Group<f_clang_Group>,
+ Visibility<[ClangOption, CC1Option]>,
+ HelpText<"The seed used by the randomize structure layout feature">;
+def frandomize_layout_seed_file_EQ : Joined<["-"], "frandomize-layout-seed-file=">,
+ MetaVarName<"<file>">, Group<f_clang_Group>,
+ Visibility<[ClangOption, CC1Option]>,
+ HelpText<"File holding the seed used by the randomize structure layout feature">;
+def fms_compatibility : Flag<["-"], "fms-compatibility">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option, CLOption]>,
HelpText<"Enable full Microsoft Visual C++ compatibility">,
MarshallingInfoFlag<LangOpts<"MSVCCompat">>;
-def fms_extensions : Flag<["-"], "fms-extensions">, Group<f_Group>, Flags<[CC1Option, CoreOption]>,
+def fms_extensions : Flag<["-"], "fms-extensions">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option, CLOption]>,
HelpText<"Accept some non-standard constructs supported by the Microsoft compiler">,
MarshallingInfoFlag<LangOpts<"MicrosoftExt">>, ImpliedByAnyOf<[fms_compatibility.KeyPath]>;
defm asm_blocks : BoolFOption<"asm-blocks",
LangOpts<"AsmBlocks">, Default<fms_extensions.KeyPath>,
- PosFlag<SetTrue, [CC1Option]>, NegFlag<SetFalse>>;
-def fms_volatile : Flag<["-"], "fms-volatile">, Group<f_Group>, Flags<[CC1Option]>,
- MarshallingInfoFlag<CodeGenOpts<"MSVolatile">>;
-def fmsc_version : Joined<["-"], "fmsc-version=">, Group<f_Group>, Flags<[NoXarchOption, CoreOption]>,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option]>,
+ NegFlag<SetFalse>>;
+defm ms_volatile : BoolFOption<"ms-volatile",
+ LangOpts<"MSVolatile">, DefaultFalse,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Volatile loads and stores have acquire and release semantics">,
+ NegFlag<SetFalse>>;
+def fmsc_version : Joined<["-"], "fmsc-version=">, Group<f_Group>,
+ Visibility<[ClangOption, CLOption]>,
HelpText<"Microsoft compiler version number to report in _MSC_VER (0 = don't define it (default))">;
def fms_compatibility_version
: Joined<["-"], "fms-compatibility-version=">,
Group<f_Group>,
- Flags<[ CC1Option, CoreOption ]>,
+ Visibility<[ClangOption, CC1Option, CLOption]>,
HelpText<"Dot-separated value representing the Microsoft compiler "
"version number to report in _MSC_VER (0 = don't define it "
"(default))">;
+def fms_runtime_lib_EQ : Joined<["-"], "fms-runtime-lib=">, Group<f_Group>,
+ Flags<[]>, Visibility<[ClangOption, CLOption, FlangOption]>,
+ Values<"static,static_dbg,dll,dll_dbg">,
+ HelpText<"Select Windows run-time library">,
+ DocBrief<[{
+Specify Visual Studio C runtime library. "static" and "static_dbg" correspond
+to the cl flags /MT and /MTd which use the multithread, static version. "dll"
+and "dll_dbg" correspond to the cl flags /MD and /MDd which use the multithread,
+dll version.}]>;
+def fms_omit_default_lib : Joined<["-"], "fms-omit-default-lib">,
+ Group<f_Group>, Flags<[]>,
+ Visibility<[ClangOption, CLOption]>;
defm delayed_template_parsing : BoolFOption<"delayed-template-parsing",
LangOpts<"DelayedTemplateParsing">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Parse templated function definitions at the end of the translation unit">,
- NegFlag<SetFalse, [NoXarchOption], "Disable delayed template parsing">,
- BothFlags<[CoreOption]>>;
-def fms_memptr_rep_EQ : Joined<["-"], "fms-memptr-rep=">, Group<f_Group>, Flags<[CC1Option]>,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Parse templated function definitions at the end of the translation unit">,
+ NegFlag<SetFalse, [], [], "Disable delayed template parsing">,
+ BothFlags<[], [ClangOption, CLOption]>>;
+def fms_memptr_rep_EQ : Joined<["-"], "fms-memptr-rep=">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>,
Values<"single,multiple,virtual">, NormalizedValuesScope<"LangOptions">,
NormalizedValues<["PPTMK_FullGeneralitySingleInheritance", "PPTMK_FullGeneralityMultipleInheritance",
"PPTMK_FullGeneralityVirtualInheritance"]>,
MarshallingInfoEnum<LangOpts<"MSPointerToMemberRepresentationMethod">, "PPTMK_BestCase">;
+def fms_kernel : Flag<["-"], "fms-kernel">, Group<f_Group>,
+ Visibility<[CC1Option]>,
+ MarshallingInfoFlag<LangOpts<"Kernel">>;
// __declspec is enabled by default for the PS4 by the driver, and also
// enabled for Microsoft Extensions or Borland Extensions, here.
//
@@ -2058,132 +2956,211 @@ def fms_memptr_rep_EQ : Joined<["-"], "fms-memptr-rep=">, Group<f_Group>, Flags<
// term here.
defm declspec : BoolOption<"f", "declspec",
LangOpts<"DeclSpecKeyword">, DefaultFalse,
- PosFlag<SetTrue, [], "Allow", [fms_extensions.KeyPath, fborland_extensions.KeyPath, cuda.KeyPath]>,
- NegFlag<SetFalse, [], "Disallow">,
- BothFlags<[CC1Option], " __declspec as a keyword">>, Group<f_clang_Group>;
+ PosFlag<SetTrue, [], [ClangOption], "Allow", [fms_extensions.KeyPath, fborland_extensions.KeyPath, cuda.KeyPath]>,
+ NegFlag<SetFalse, [], [ClangOption], "Disallow">,
+ BothFlags<[], [ClangOption, CC1Option],
+ " __declspec as a keyword">>, Group<f_clang_Group>;
def fmodules_cache_path : Joined<["-"], "fmodules-cache-path=">, Group<i_Group>,
- Flags<[NoXarchOption, CC1Option]>, MetaVarName<"<directory>">,
+ Flags<[]>, Visibility<[ClangOption, CC1Option]>,
+ MetaVarName<"<directory>">,
HelpText<"Specify the module cache path">;
def fmodules_user_build_path : Separate<["-"], "fmodules-user-build-path">, Group<i_Group>,
- Flags<[NoXarchOption, CC1Option]>, MetaVarName<"<directory>">,
+ Flags<[]>, Visibility<[ClangOption, CC1Option]>,
+ MetaVarName<"<directory>">,
HelpText<"Specify the module user build path">,
MarshallingInfoString<HeaderSearchOpts<"ModuleUserBuildPath">>;
def fprebuilt_module_path : Joined<["-"], "fprebuilt-module-path=">, Group<i_Group>,
- Flags<[NoXarchOption, CC1Option]>, MetaVarName<"<directory>">,
+ Flags<[]>, Visibility<[ClangOption, CC1Option]>,
+ MetaVarName<"<directory>">,
HelpText<"Specify the prebuilt module path">;
defm prebuilt_implicit_modules : BoolFOption<"prebuilt-implicit-modules",
HeaderSearchOpts<"EnablePrebuiltImplicitModules">, DefaultFalse,
- PosFlag<SetTrue, [], "Look up implicit modules in the prebuilt module path">,
- NegFlag<SetFalse>, BothFlags<[NoXarchOption, CC1Option]>>;
+ PosFlag<SetTrue, [], [ClangOption], "Look up implicit modules in the prebuilt module path">,
+ NegFlag<SetFalse>, BothFlags<[], [ClangOption, CC1Option]>>;
+
+def fmodule_output_EQ : Joined<["-"], "fmodule-output=">,
+ Flags<[NoXarchOption]>, Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Save intermediate module file results when compiling a standard C++ module unit.">;
+def fmodule_output : Flag<["-"], "fmodule-output">, Flags<[NoXarchOption]>,
+ Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Save intermediate module file results when compiling a standard C++ module unit.">;
+
+defm skip_odr_check_in_gmf : BoolOption<"f", "skip-odr-check-in-gmf",
+ LangOpts<"SkipODRCheckInGMF">, DefaultFalse,
+ PosFlag<SetTrue, [], [CC1Option],
+ "Skip ODR checks for decls in the global module fragment.">,
+ NegFlag<SetFalse, [], [CC1Option],
+ "Perform ODR checks for decls in the global module fragment.">>,
+ Group<f_Group>;
def fmodules_prune_interval : Joined<["-"], "fmodules-prune-interval=">, Group<i_Group>,
- Flags<[CC1Option]>, MetaVarName<"<seconds>">,
+ Visibility<[ClangOption, CC1Option]>, MetaVarName<"<seconds>">,
HelpText<"Specify the interval (in seconds) between attempts to prune the module cache">,
MarshallingInfoInt<HeaderSearchOpts<"ModuleCachePruneInterval">, "7 * 24 * 60 * 60">;
def fmodules_prune_after : Joined<["-"], "fmodules-prune-after=">, Group<i_Group>,
- Flags<[CC1Option]>, MetaVarName<"<seconds>">,
+ Visibility<[ClangOption, CC1Option]>, MetaVarName<"<seconds>">,
HelpText<"Specify the interval (in seconds) after which a module file will be considered unused">,
MarshallingInfoInt<HeaderSearchOpts<"ModuleCachePruneAfter">, "31 * 24 * 60 * 60">;
def fbuild_session_timestamp : Joined<["-"], "fbuild-session-timestamp=">,
- Group<i_Group>, Flags<[CC1Option]>, MetaVarName<"<time since Epoch in seconds>">,
+ Group<i_Group>, Visibility<[ClangOption, CC1Option]>,
+ MetaVarName<"<time since Epoch in seconds>">,
HelpText<"Time when the current build session started">,
MarshallingInfoInt<HeaderSearchOpts<"BuildSessionTimestamp">, "0", "uint64_t">;
def fbuild_session_file : Joined<["-"], "fbuild-session-file=">,
Group<i_Group>, MetaVarName<"<file>">,
HelpText<"Use the last modification time of <file> as the build session timestamp">;
def fmodules_validate_once_per_build_session : Flag<["-"], "fmodules-validate-once-per-build-session">,
- Group<i_Group>, Flags<[CC1Option]>,
+ Group<i_Group>, Visibility<[ClangOption, CC1Option]>,
HelpText<"Don't verify input files for the modules if the module has been "
"successfully validated or loaded during this build session">,
MarshallingInfoFlag<HeaderSearchOpts<"ModulesValidateOncePerBuildSession">>;
def fmodules_disable_diagnostic_validation : Flag<["-"], "fmodules-disable-diagnostic-validation">,
- Group<i_Group>, Flags<[CC1Option]>,
+ Group<i_Group>, Visibility<[ClangOption, CC1Option]>,
HelpText<"Disable validation of the diagnostic options when loading the module">,
MarshallingInfoNegativeFlag<HeaderSearchOpts<"ModulesValidateDiagnosticOptions">>;
defm modules_validate_system_headers : BoolOption<"f", "modules-validate-system-headers",
HeaderSearchOpts<"ModulesValidateSystemHeaders">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Validate the system headers that a module depends on when loading the module">,
- NegFlag<SetFalse, [NoXarchOption]>>, Group<i_Group>;
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Validate the system headers that a module depends on when loading the module">,
+ NegFlag<SetFalse, [], []>>, Group<i_Group>;
+def fno_modules_validate_textual_header_includes :
+ Flag<["-"], "fno-modules-validate-textual-header-includes">,
+ Group<f_Group>, Flags<[]>, Visibility<[ClangOption, CC1Option]>,
+ MarshallingInfoNegativeFlag<LangOpts<"ModulesValidateTextualHeaderIncludes">>,
+ HelpText<"Do not enforce -fmodules-decluse and private header restrictions for textual headers. "
+ "This flag will be removed in a future Clang release.">;
+defm modules_skip_diagnostic_options : BoolFOption<"modules-skip-diagnostic-options",
+ HeaderSearchOpts<"ModulesSkipDiagnosticOptions">, DefaultFalse,
+ PosFlag<SetTrue, [], [], "Disable writing diagnostic options">,
+ NegFlag<SetFalse>, BothFlags<[], [CC1Option]>>;
+defm modules_skip_header_search_paths : BoolFOption<"modules-skip-header-search-paths",
+ HeaderSearchOpts<"ModulesSkipHeaderSearchPaths">, DefaultFalse,
+ PosFlag<SetTrue, [], [], "Disable writing header search paths">,
+ NegFlag<SetFalse>, BothFlags<[], [CC1Option]>>;
+
+def fincremental_extensions :
+ Flag<["-"], "fincremental-extensions">,
+ Group<f_Group>, Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Enable incremental processing extensions such as processing"
+ "statements on the global scope.">,
+ MarshallingInfoFlag<LangOpts<"IncrementalExtensions">>;
def fvalidate_ast_input_files_content:
Flag <["-"], "fvalidate-ast-input-files-content">,
- Group<f_Group>, Flags<[CC1Option]>,
+ Group<f_Group>, Visibility<[ClangOption, CC1Option]>,
HelpText<"Compute and store the hash of input files used to build an AST."
" Files with mismatching mtime's are considered valid"
" if both contents is identical">,
MarshallingInfoFlag<HeaderSearchOpts<"ValidateASTInputFilesContent">>;
+def fforce_check_cxx20_modules_input_files:
+ Flag <["-"], "fforce-check-cxx20-modules-input-files">,
+ Group<f_Group>, Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Check the input source files from C++20 modules explicitly">,
+ MarshallingInfoFlag<HeaderSearchOpts<"ForceCheckCXX20ModulesInputFiles">>;
def fmodules_validate_input_files_content:
Flag <["-"], "fmodules-validate-input-files-content">,
- Group<f_Group>, Flags<[NoXarchOption]>,
+ Group<f_Group>,
HelpText<"Validate PCM input files based on content if mtime differs">;
def fno_modules_validate_input_files_content:
Flag <["-"], "fno_modules-validate-input-files-content">,
- Group<f_Group>, Flags<[NoXarchOption]>;
+ Group<f_Group>;
def fpch_validate_input_files_content:
Flag <["-"], "fpch-validate-input-files-content">,
- Group<f_Group>, Flags<[NoXarchOption]>,
+ Group<f_Group>,
HelpText<"Validate PCH input files based on content if mtime differs">;
def fno_pch_validate_input_files_content:
Flag <["-"], "fno_pch-validate-input-files-content">,
- Group<f_Group>, Flags<[NoXarchOption]>;
+ Group<f_Group>;
defm pch_instantiate_templates : BoolFOption<"pch-instantiate-templates",
LangOpts<"PCHInstantiateTemplates">, DefaultFalse,
- PosFlag<SetTrue, [], "Instantiate templates already while building a PCH">,
- NegFlag<SetFalse>, BothFlags<[CC1Option, CoreOption]>>;
-defm pch_codegen: OptInFFlag<"pch-codegen", "Generate ", "Do not generate ",
+ PosFlag<SetTrue, [], [ClangOption], "Instantiate templates already while building a PCH">,
+ NegFlag<SetFalse>, BothFlags<[], [ClangOption, CC1Option, CLOption]
+ >>;
+defm pch_codegen: OptInCC1FFlag<"pch-codegen", "Generate ", "Do not generate ",
"code for uses of this PCH that assumes an explicit object file will be built for the PCH">;
-defm pch_debuginfo: OptInFFlag<"pch-debuginfo", "Generate ", "Do not generate ",
+defm pch_debuginfo: OptInCC1FFlag<"pch-debuginfo", "Generate ", "Do not generate ",
"debug info for types in an object file built from this PCH and do not generate them elsewhere">;
def fimplicit_module_maps : Flag <["-"], "fimplicit-module-maps">, Group<f_Group>,
- Flags<[NoXarchOption, CC1Option, CoreOption]>,
+ Visibility<[ClangOption, CC1Option, CLOption]>,
HelpText<"Implicitly search the file system for module map files.">,
MarshallingInfoFlag<HeaderSearchOpts<"ImplicitModuleMaps">>;
-def fmodules_ts : Flag <["-"], "fmodules-ts">, Group<f_Group>,
- Flags<[CC1Option]>, HelpText<"Enable support for the C++ Modules TS">,
- MarshallingInfoFlag<LangOpts<"ModulesTS">>;
defm modules : BoolFOption<"modules",
- LangOpts<"Modules">, Default<!strconcat(fmodules_ts.KeyPath, "||", cpp_modules.KeyPath)>,
- PosFlag<SetTrue, [CC1Option], "Enable the 'modules' language feature">,
- NegFlag<SetFalse>, BothFlags<[NoXarchOption, CoreOption]>>;
-def fmodule_maps : Flag <["-"], "fmodule-maps">, Flags<[CoreOption]>, Alias<fimplicit_module_maps>;
+ LangOpts<"Modules">, Default<fcxx_modules.KeyPath>,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Enable the 'modules' language feature">,
+ NegFlag<SetFalse>, BothFlags<
+ [NoXarchOption], [ClangOption, CLOption]>>;
+def fbuiltin_headers_in_system_modules : Flag <["-"], "fbuiltin-headers-in-system-modules">,
+ Group<f_Group>,
+ Visibility<[CC1Option]>,
+ ShouldParseIf<fmodules.KeyPath>,
+ HelpText<"builtin headers belong to system modules, and _Builtin_ modules are ignored for cstdlib headers">,
+ MarshallingInfoFlag<LangOpts<"BuiltinHeadersInSystemModules">>;
+def fmodule_maps : Flag <["-"], "fmodule-maps">,
+ Visibility<[ClangOption, CLOption]>, Alias<fimplicit_module_maps>;
def fmodule_name_EQ : Joined<["-"], "fmodule-name=">, Group<f_Group>,
- Flags<[NoXarchOption,CC1Option,CoreOption]>, MetaVarName<"<name>">,
+ Visibility<[ClangOption, CC1Option, CLOption]>,
+ MetaVarName<"<name>">,
HelpText<"Specify the name of the module to build">,
MarshallingInfoString<LangOpts<"ModuleName">>;
def fmodule_implementation_of : Separate<["-"], "fmodule-implementation-of">,
- Flags<[CC1Option,CoreOption]>, Alias<fmodule_name_EQ>;
-def fsystem_module : Flag<["-"], "fsystem-module">, Flags<[CC1Option,CoreOption]>,
+ Visibility<[ClangOption, CC1Option, CLOption]>,
+ Alias<fmodule_name_EQ>;
+def fsystem_module : Flag<["-"], "fsystem-module">,
+ Visibility<[ClangOption, CC1Option, CLOption]>,
HelpText<"Build this module as a system module. Only used with -emit-module">,
MarshallingInfoFlag<FrontendOpts<"IsSystemModule">>;
def fmodule_map_file : Joined<["-"], "fmodule-map-file=">,
- Group<f_Group>, Flags<[NoXarchOption,CC1Option,CoreOption]>, MetaVarName<"<file>">,
+ Group<f_Group>,
+ Visibility<[ClangOption, CC1Option, CLOption]>,
+ MetaVarName<"<file>">,
HelpText<"Load this module map file">,
MarshallingInfoStringVector<FrontendOpts<"ModuleMapFiles">>;
def fmodule_file : Joined<["-"], "fmodule-file=">,
- Group<i_Group>, Flags<[NoXarchOption,CC1Option,CoreOption]>, MetaVarName<"[<name>=]<file>">,
+ Group<i_Group>,
+ Visibility<[ClangOption, CC1Option, CLOption]>,
+ MetaVarName<"[<name>=]<file>">,
HelpText<"Specify the mapping of module name to precompiled module file, or load a module file if name is omitted.">;
def fmodules_ignore_macro : Joined<["-"], "fmodules-ignore-macro=">, Group<f_Group>,
- Flags<[CC1Option,CoreOption]>,
+ Visibility<[ClangOption, CC1Option, CLOption]>,
HelpText<"Ignore the definition of the given macro when building and loading modules">;
def fmodules_strict_decluse : Flag <["-"], "fmodules-strict-decluse">, Group<f_Group>,
- Flags<[NoXarchOption,CC1Option,CoreOption]>,
+ Visibility<[ClangOption, CC1Option, CLOption]>,
HelpText<"Like -fmodules-decluse but requires all headers to be in modules">,
MarshallingInfoFlag<LangOpts<"ModulesStrictDeclUse">>;
defm modules_decluse : BoolFOption<"modules-decluse",
LangOpts<"ModulesDeclUse">, Default<fmodules_strict_decluse.KeyPath>,
- PosFlag<SetTrue, [CC1Option], "Require declaration of modules used within a module">,
- NegFlag<SetFalse>, BothFlags<[NoXarchOption,CoreOption]>>;
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Require declaration of modules used within a module">,
+ NegFlag<SetFalse>, BothFlags<[], [ClangOption, CLOption]>>;
defm modules_search_all : BoolFOption<"modules-search-all",
LangOpts<"ModulesSearchAll">, DefaultFalse,
- PosFlag<SetTrue, [], "Search even non-imported modules to resolve references">,
- NegFlag<SetFalse>, BothFlags<[NoXarchOption, CC1Option,CoreOption]>>,
+ PosFlag<SetTrue, [], [ClangOption], "Search even non-imported modules to resolve references">,
+ NegFlag<SetFalse>, BothFlags<[], [ClangOption, CC1Option, CLOption]>>,
ShouldParseIf<fmodules.KeyPath>;
defm implicit_modules : BoolFOption<"implicit-modules",
LangOpts<"ImplicitModules">, DefaultTrue,
- NegFlag<SetFalse, [CC1Option]>, PosFlag<SetTrue>, BothFlags<[NoXarchOption,CoreOption]>>;
-def fretain_comments_from_system_headers : Flag<["-"], "fretain-comments-from-system-headers">, Group<f_Group>, Flags<[CC1Option]>,
+ NegFlag<SetFalse, [], [ClangOption, CC1Option]>,
+ PosFlag<SetTrue>, BothFlags<
+ [NoXarchOption], [ClangOption, CLOption]>>;
+def fno_modules_check_relocated : Joined<["-"], "fno-modules-check-relocated">,
+ Group<f_Group>, Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Skip checks for relocated modules when loading PCM files">,
+ MarshallingInfoNegativeFlag<PreprocessorOpts<"ModulesCheckRelocated">>;
+def fretain_comments_from_system_headers : Flag<["-"], "fretain-comments-from-system-headers">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>,
MarshallingInfoFlag<LangOpts<"RetainCommentsFromSystemHeaders">>;
+def fmodule_header : Flag <["-"], "fmodule-header">, Group<f_Group>,
+ HelpText<"Build a C++20 Header Unit from a header">;
+def fmodule_header_EQ : Joined<["-"], "fmodule-header=">, Group<f_Group>,
+ MetaVarName<"<kind>">,
+ HelpText<"Build a C++20 Header Unit from a header that should be found in the user (fmodule-header=user) or system (fmodule-header=system) search path.">;
+
+def fno_knr_functions : Flag<["-"], "fno-knr-functions">, Group<f_Group>,
+ MarshallingInfoFlag<LangOpts<"DisableKNRFunctions">>,
+ HelpText<"Disable support for K&R C function declarations">,
+ Visibility<[ClangOption, CC1Option, CLOption]>;
def fmudflapth : Flag<["-"], "fmudflapth">, Group<f_Group>;
def fmudflap : Flag<["-"], "fmudflap">, Group<f_Group>;
@@ -2193,121 +3170,140 @@ def fno_asm : Flag<["-"], "fno-asm">, Group<f_Group>;
def fno_asynchronous_unwind_tables : Flag<["-"], "fno-asynchronous-unwind-tables">, Group<f_Group>;
def fno_assume_sane_operator_new : Flag<["-"], "fno-assume-sane-operator-new">, Group<f_Group>,
HelpText<"Don't assume that C++'s global operator new can't alias any pointer">,
- Flags<[CC1Option]>, MarshallingInfoNegativeFlag<CodeGenOpts<"AssumeSaneOperatorNew">>;
-def fno_builtin : Flag<["-"], "fno-builtin">, Group<f_Group>, Flags<[CC1Option, CoreOption]>,
+ Visibility<[ClangOption, CC1Option]>,
+ MarshallingInfoNegativeFlag<CodeGenOpts<"AssumeSaneOperatorNew">>;
+def fno_builtin : Flag<["-"], "fno-builtin">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option, CLOption, DXCOption]>,
HelpText<"Disable implicit builtin knowledge of functions">;
-def fno_builtin_ : Joined<["-"], "fno-builtin-">, Group<f_Group>, Flags<[CC1Option, CoreOption]>,
+def fno_builtin_ : Joined<["-"], "fno-builtin-">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option, CLOption, DXCOption]>,
HelpText<"Disable implicit builtin knowledge of a specific function">;
-def fno_diagnostics_color : Flag<["-"], "fno-diagnostics-color">, Group<f_Group>,
- Flags<[CoreOption, NoXarchOption]>;
-def fno_common : Flag<["-"], "fno-common">, Group<f_Group>, Flags<[CC1Option]>,
+def fno_common : Flag<["-"], "fno-common">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Compile common globals like normal definitions">;
-def fno_cxx_modules : Flag <["-"], "fno-cxx-modules">, Group<f_Group>,
- Flags<[NoXarchOption]>;
defm digraphs : BoolFOption<"digraphs",
LangOpts<"Digraphs">, Default<std#".hasDigraphs()">,
- PosFlag<SetTrue, [], "Enable alternative token representations '<:', ':>', '<%', '%>', '%:', '%:%:' (default)">,
- NegFlag<SetFalse, [], "Disallow alternative token representations '<:', ':>', '<%', '%>', '%:', '%:%:'">,
- BothFlags<[CC1Option]>>;
+ PosFlag<SetTrue, [], [ClangOption], "Enable alternative token representations '<:', ':>', '<%', '%>', '%:', '%:%:' (default)">,
+ NegFlag<SetFalse, [], [ClangOption], "Disallow alternative token representations '<:', ':>', '<%', '%>', '%:', '%:%:'">,
+ BothFlags<[], [ClangOption, CC1Option]>>;
def fno_eliminate_unused_debug_symbols : Flag<["-"], "fno-eliminate-unused-debug-symbols">, Group<f_Group>;
-def fno_inline_functions : Flag<["-"], "fno-inline-functions">, Group<f_clang_Group>, Flags<[CC1Option]>;
-def fno_inline : Flag<["-"], "fno-inline">, Group<f_clang_Group>, Flags<[CC1Option]>;
+def fno_inline_functions : Flag<["-"], "fno-inline-functions">, Group<f_clang_Group>,
+ Visibility<[ClangOption, CC1Option]>;
+def fno_inline : Flag<["-"], "fno-inline">, Group<f_clang_Group>,
+ Visibility<[ClangOption, CC1Option]>;
def fno_global_isel : Flag<["-"], "fno-global-isel">, Group<f_clang_Group>,
HelpText<"Disables the global instruction selector">;
def fno_experimental_isel : Flag<["-"], "fno-experimental-isel">, Group<f_clang_Group>,
Alias<fno_global_isel>;
-def fveclib : Joined<["-"], "fveclib=">, Group<f_Group>, Flags<[CC1Option]>,
+def fveclib : Joined<["-"], "fveclib=">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option, FlangOption, FC1Option]>,
HelpText<"Use the given vector functions library">,
- Values<"Accelerate,libmvec,MASSV,SVML,Darwin_libsystem_m,none">,
- NormalizedValuesScope<"CodeGenOptions">,
- NormalizedValues<["Accelerate", "LIBMVEC", "MASSV", "SVML",
- "Darwin_libsystem_m", "NoLibrary"]>,
+ Values<"Accelerate,libmvec,MASSV,SVML,SLEEF,Darwin_libsystem_m,ArmPL,none">,
+ NormalizedValuesScope<"llvm::driver::VectorLibrary">,
+ NormalizedValues<["Accelerate", "LIBMVEC", "MASSV", "SVML", "SLEEF",
+ "Darwin_libsystem_m", "ArmPL", "NoLibrary"]>,
MarshallingInfoEnum<CodeGenOpts<"VecLib">, "NoLibrary">;
def fno_lax_vector_conversions : Flag<["-"], "fno-lax-vector-conversions">, Group<f_Group>,
Alias<flax_vector_conversions_EQ>, AliasArgs<["none"]>;
-def fno_implicit_module_maps : Flag <["-"], "fno-implicit-module-maps">, Group<f_Group>,
- Flags<[NoXarchOption]>;
+def fno_implicit_module_maps : Flag <["-"], "fno-implicit-module-maps">, Group<f_Group>;
def fno_module_maps : Flag <["-"], "fno-module-maps">, Alias<fno_implicit_module_maps>;
-def fno_modules_strict_decluse : Flag <["-"], "fno-strict-modules-decluse">, Group<f_Group>,
- Flags<[NoXarchOption]>;
-def fmodule_file_deps : Flag <["-"], "fmodule-file-deps">, Group<f_Group>,
- Flags<[NoXarchOption]>;
-def fno_module_file_deps : Flag <["-"], "fno-module-file-deps">, Group<f_Group>,
- Flags<[NoXarchOption]>;
+def fno_modules_strict_decluse : Flag <["-"], "fno-strict-modules-decluse">, Group<f_Group>;
+def fmodule_file_deps : Flag <["-"], "fmodule-file-deps">, Group<f_Group>;
+def fno_module_file_deps : Flag <["-"], "fno-module-file-deps">, Group<f_Group>;
def fno_ms_extensions : Flag<["-"], "fno-ms-extensions">, Group<f_Group>,
- Flags<[CoreOption]>;
+ Visibility<[ClangOption, CLOption]>;
def fno_ms_compatibility : Flag<["-"], "fno-ms-compatibility">, Group<f_Group>,
- Flags<[CoreOption]>;
+ Visibility<[ClangOption, CLOption]>;
def fno_objc_legacy_dispatch : Flag<["-"], "fno-objc-legacy-dispatch">, Group<f_Group>;
-def fno_objc_weak : Flag<["-"], "fno-objc-weak">, Group<f_Group>, Flags<[CC1Option]>;
-def fno_omit_frame_pointer : Flag<["-"], "fno-omit-frame-pointer">, Group<f_Group>;
+def fno_objc_weak : Flag<["-"], "fno-objc-weak">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>;
+def fno_omit_frame_pointer : Flag<["-"], "fno-omit-frame-pointer">, Group<f_Group>,
+ Visibility<[ClangOption, FlangOption]>;
defm operator_names : BoolFOption<"operator-names",
LangOpts<"CXXOperatorNames">, Default<cplusplus.KeyPath>,
- NegFlag<SetFalse, [CC1Option], "Do not treat C++ operator name keywords as synonyms for operators">,
+ NegFlag<SetFalse, [], [ClangOption, CC1Option],
+ "Do not treat C++ operator name keywords as synonyms for operators">,
PosFlag<SetTrue>>;
def fdiagnostics_absolute_paths : Flag<["-"], "fdiagnostics-absolute-paths">, Group<f_Group>,
- Flags<[CC1Option, CoreOption]>, HelpText<"Print absolute paths in diagnostics">,
+ Visibility<[ClangOption, CC1Option, CLOption, DXCOption]>,
+ HelpText<"Print absolute paths in diagnostics">,
MarshallingInfoFlag<DiagnosticOpts<"AbsolutePath">>;
+defm diagnostics_show_line_numbers : BoolFOption<"diagnostics-show-line-numbers",
+ DiagnosticOpts<"ShowLineNumbers">, DefaultTrue,
+ NegFlag<SetFalse, [], [ClangOption, CC1Option],
+ "Show line numbers in diagnostic code snippets">,
+ PosFlag<SetTrue>>;
def fno_stack_protector : Flag<["-"], "fno-stack-protector">, Group<f_Group>,
HelpText<"Disable the use of stack protectors">;
def fno_strict_aliasing : Flag<["-"], "fno-strict-aliasing">, Group<f_Group>,
- Flags<[NoXarchOption, CoreOption]>;
+ Visibility<[ClangOption, CLOption, DXCOption]>,
+ HelpText<"Disable optimizations based on strict aliasing rules">;
def fstruct_path_tbaa : Flag<["-"], "fstruct-path-tbaa">, Group<f_Group>;
def fno_struct_path_tbaa : Flag<["-"], "fno-struct-path-tbaa">, Group<f_Group>;
def fno_strict_enums : Flag<["-"], "fno-strict-enums">, Group<f_Group>;
def fno_strict_overflow : Flag<["-"], "fno-strict-overflow">, Group<f_Group>;
def fno_temp_file : Flag<["-"], "fno-temp-file">, Group<f_Group>,
- Flags<[CC1Option, CoreOption]>, HelpText<
+ Visibility<[ClangOption, CC1Option, CLOption, DXCOption]>, HelpText<
"Directly create compilation output files. This may lead to incorrect incremental builds if the compiler crashes">,
MarshallingInfoNegativeFlag<FrontendOpts<"UseTemporary">>;
defm use_cxa_atexit : BoolFOption<"use-cxa-atexit",
CodeGenOpts<"CXAAtExit">, DefaultTrue,
- NegFlag<SetFalse, [CC1Option], "Don't use __cxa_atexit for calling destructors">,
+ NegFlag<SetFalse, [], [ClangOption, CC1Option],
+ "Don't use __cxa_atexit for calling destructors">,
PosFlag<SetTrue>>;
-def fno_unit_at_a_time : Flag<["-"], "fno-unit-at-a-time">, Group<f_Group>;
def fno_unwind_tables : Flag<["-"], "fno-unwind-tables">, Group<f_Group>;
-def fno_verbose_asm : Flag<["-"], "fno-verbose-asm">, Group<f_Group>, Flags<[CC1Option]>,
+def fno_verbose_asm : Flag<["-"], "fno-verbose-asm">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>,
MarshallingInfoNegativeFlag<CodeGenOpts<"AsmVerbose">>;
def fno_working_directory : Flag<["-"], "fno-working-directory">, Group<f_Group>;
def fno_wrapv : Flag<["-"], "fno-wrapv">, Group<f_Group>;
-def fobjc_arc : Flag<["-"], "fobjc-arc">, Group<f_Group>, Flags<[CC1Option]>,
+def fobjc_arc : Flag<["-"], "fobjc-arc">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Synthesize retain and release calls for Objective-C pointers">;
def fno_objc_arc : Flag<["-"], "fno-objc-arc">, Group<f_Group>;
defm objc_encode_cxx_class_template_spec : BoolFOption<"objc-encode-cxx-class-template-spec",
LangOpts<"EncodeCXXClassTemplateSpec">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Fully encode c++ class template specialization">,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Fully encode c++ class template specialization">,
NegFlag<SetFalse>>;
defm objc_convert_messages_to_runtime_calls : BoolFOption<"objc-convert-messages-to-runtime-calls",
CodeGenOpts<"ObjCConvertMessagesToRuntimeCalls">, DefaultTrue,
- NegFlag<SetFalse, [CC1Option]>, PosFlag<SetTrue>>;
+ NegFlag<SetFalse, [], [ClangOption, CC1Option]>,
+ PosFlag<SetTrue>>;
defm objc_arc_exceptions : BoolFOption<"objc-arc-exceptions",
CodeGenOpts<"ObjCAutoRefCountExceptions">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Use EH-safe code when synthesizing retains and releases in -fobjc-arc">,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Use EH-safe code when synthesizing retains and releases in -fobjc-arc">,
NegFlag<SetFalse>>;
def fobjc_atdefs : Flag<["-"], "fobjc-atdefs">, Group<clang_ignored_f_Group>;
def fobjc_call_cxx_cdtors : Flag<["-"], "fobjc-call-cxx-cdtors">, Group<clang_ignored_f_Group>;
defm objc_exceptions : BoolFOption<"objc-exceptions",
LangOpts<"ObjCExceptions">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Enable Objective-C exceptions">, NegFlag<SetFalse>>;
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Enable Objective-C exceptions">,
+ NegFlag<SetFalse>>;
defm application_extension : BoolFOption<"application-extension",
LangOpts<"AppExt">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Restrict code to those available for App Extensions">,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Restrict code to those available for App Extensions">,
NegFlag<SetFalse>>;
defm relaxed_template_template_args : BoolFOption<"relaxed-template-template-args",
LangOpts<"RelaxedTemplateTemplateArgs">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Enable C++17 relaxed template template argument matching">,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Enable C++17 relaxed template template argument matching">,
NegFlag<SetFalse>>;
defm sized_deallocation : BoolFOption<"sized-deallocation",
LangOpts<"SizedDeallocation">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Enable C++14 sized global deallocation functions">,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Enable C++14 sized global deallocation functions">,
NegFlag<SetFalse>>;
defm aligned_allocation : BoolFOption<"aligned-allocation",
LangOpts<"AlignedAllocation">, Default<cpp17.KeyPath>,
- PosFlag<SetTrue, [], "Enable C++17 aligned allocation functions">,
- NegFlag<SetFalse>, BothFlags<[CC1Option]>>;
+ PosFlag<SetTrue, [], [ClangOption], "Enable C++17 aligned allocation functions">,
+ NegFlag<SetFalse>, BothFlags<[], [ClangOption, CC1Option]>>;
def fnew_alignment_EQ : Joined<["-"], "fnew-alignment=">,
HelpText<"Specifies the largest alignment guaranteed by '::operator new(size_t)'">,
- MetaVarName<"<align>">, Group<f_Group>, Flags<[CC1Option]>,
+ MetaVarName<"<align>">, Group<f_Group>, Visibility<[ClangOption, CC1Option]>,
MarshallingInfoInt<LangOpts<"NewAlignOverride">>;
def : Separate<["-"], "fnew-alignment">, Alias<fnew_alignment_EQ>;
def : Flag<["-"], "faligned-new">, Alias<faligned_allocation>;
@@ -2318,14 +3314,17 @@ def fobjc_legacy_dispatch : Flag<["-"], "fobjc-legacy-dispatch">, Group<f_Group>
def fobjc_new_property : Flag<["-"], "fobjc-new-property">, Group<clang_ignored_f_Group>;
defm objc_infer_related_result_type : BoolFOption<"objc-infer-related-result-type",
LangOpts<"ObjCInferRelatedResultType">, DefaultTrue,
- NegFlag<SetFalse, [CC1Option], "do not infer Objective-C related result type based on method family">,
+ NegFlag<SetFalse, [], [ClangOption, CC1Option],
+ "do not infer Objective-C related result type based on method family">,
PosFlag<SetTrue>>;
def fobjc_link_runtime: Flag<["-"], "fobjc-link-runtime">, Group<f_Group>;
-def fobjc_weak : Flag<["-"], "fobjc-weak">, Group<f_Group>, Flags<[CC1Option]>,
+def fobjc_weak : Flag<["-"], "fobjc-weak">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Enable ARC-style weak references in Objective-C">;
// Objective-C ABI options.
-def fobjc_runtime_EQ : Joined<["-"], "fobjc-runtime=">, Group<f_Group>, Flags<[CC1Option, CoreOption]>,
+def fobjc_runtime_EQ : Joined<["-"], "fobjc-runtime=">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option, CLOption]>,
HelpText<"Specify the target Objective-C runtime kind and version">;
def fobjc_abi_version_EQ : Joined<["-"], "fobjc-abi-version=">, Group<f_Group>;
def fobjc_nonfragile_abi_version_EQ : Joined<["-"], "fobjc-nonfragile-abi-version=">, Group<f_Group>;
@@ -2335,59 +3334,173 @@ def fno_objc_nonfragile_abi : Flag<["-"], "fno-objc-nonfragile-abi">, Group<f_Gr
def fobjc_sender_dependent_dispatch : Flag<["-"], "fobjc-sender-dependent-dispatch">, Group<f_Group>;
def fobjc_disable_direct_methods_for_testing :
Flag<["-"], "fobjc-disable-direct-methods-for-testing">,
- Group<f_Group>, Flags<[CC1Option]>,
+ Group<f_Group>, Visibility<[ClangOption, CC1Option]>,
HelpText<"Ignore attribute objc_direct so that direct methods can be tested">,
MarshallingInfoFlag<LangOpts<"ObjCDisableDirectMethodsForTesting">>;
-
-def fomit_frame_pointer : Flag<["-"], "fomit-frame-pointer">, Group<f_Group>;
-def fopenmp : Flag<["-"], "fopenmp">, Group<f_Group>, Flags<[CC1Option, NoArgumentUnused, FlangOption, FC1Option]>,
+defm objc_avoid_heapify_local_blocks : BoolFOption<"objc-avoid-heapify-local-blocks",
+ CodeGenOpts<"ObjCAvoidHeapifyLocalBlocks">, DefaultFalse,
+ PosFlag<SetTrue, [], [ClangOption], "Try">,
+ NegFlag<SetFalse, [], [ClangOption], "Don't try">,
+ BothFlags<[], [CC1Option], " to avoid heapifying local blocks">>;
+
+def fomit_frame_pointer : Flag<["-"], "fomit-frame-pointer">, Group<f_Group>,
+ Visibility<[ClangOption, FlangOption]>,
+ HelpText<"Omit the frame pointer from functions that don't need it. "
+ "Some stack unwinding cases, such as profilers and sanitizers, may prefer specifying -fno-omit-frame-pointer. "
+ "On many targets, -O1 and higher omit the frame pointer by default. "
+ "-m[no-]omit-leaf-frame-pointer takes precedence for leaf functions">;
+def fopenmp : Flag<["-"], "fopenmp">, Group<f_Group>,
+ Flags<[NoArgumentUnused]>,
+ Visibility<[ClangOption, CC1Option, FlangOption, FC1Option]>,
HelpText<"Parse OpenMP pragmas and generate parallel code.">;
-def fno_openmp : Flag<["-"], "fno-openmp">, Group<f_Group>, Flags<[NoArgumentUnused]>;
-def fopenmp_version_EQ : Joined<["-"], "fopenmp-version=">, Group<f_Group>, Flags<[CC1Option, NoArgumentUnused]>;
+def fno_openmp : Flag<["-"], "fno-openmp">, Group<f_Group>,
+ Flags<[NoArgumentUnused]>;
+def fopenmp_version_EQ : Joined<["-"], "fopenmp-version=">, Group<f_Group>,
+ Flags<[NoArgumentUnused]>,
+ Visibility<[ClangOption, CC1Option, FlangOption, FC1Option]>,
+ HelpText<"Set OpenMP version (e.g. 45 for OpenMP 4.5, 51 for OpenMP 5.1). Default value is 51 for Clang">;
+defm openmp_extensions: BoolFOption<"openmp-extensions",
+ LangOpts<"OpenMPExtensions">, DefaultTrue,
+ PosFlag<SetTrue, [NoArgumentUnused], [ClangOption, CC1Option],
+ "Enable all Clang extensions for OpenMP directives and clauses">,
+ NegFlag<SetFalse, [NoArgumentUnused], [ClangOption, CC1Option],
+ "Disable all Clang extensions for OpenMP directives and clauses">>;
def fopenmp_EQ : Joined<["-"], "fopenmp=">, Group<f_Group>;
def fopenmp_use_tls : Flag<["-"], "fopenmp-use-tls">, Group<f_Group>,
Flags<[NoArgumentUnused, HelpHidden]>;
def fnoopenmp_use_tls : Flag<["-"], "fnoopenmp-use-tls">, Group<f_Group>,
- Flags<[CC1Option, NoArgumentUnused, HelpHidden]>;
-def fopenmp_targets_EQ : CommaJoined<["-"], "fopenmp-targets=">, Flags<[NoXarchOption, CC1Option]>,
+ Flags<[NoArgumentUnused, HelpHidden]>, Visibility<[ClangOption, CC1Option]>;
+def fopenmp_targets_EQ : CommaJoined<["-"], "fopenmp-targets=">,
+ Flags<[NoXarchOption]>, Visibility<[ClangOption, CC1Option, FlangOption]>,
HelpText<"Specify comma-separated list of triples OpenMP offloading targets to be supported">;
def fopenmp_relocatable_target : Flag<["-"], "fopenmp-relocatable-target">,
- Group<f_Group>, Flags<[CC1Option, NoArgumentUnused, HelpHidden]>;
+ Group<f_Group>, Flags<[NoArgumentUnused, HelpHidden]>,
+ Visibility<[ClangOption, CC1Option]>;
def fnoopenmp_relocatable_target : Flag<["-"], "fnoopenmp-relocatable-target">,
- Group<f_Group>, Flags<[CC1Option, NoArgumentUnused, HelpHidden]>;
-def fopenmp_simd : Flag<["-"], "fopenmp-simd">, Group<f_Group>, Flags<[CC1Option, NoArgumentUnused]>,
+ Group<f_Group>, Flags<[NoArgumentUnused, HelpHidden]>,
+ Visibility<[ClangOption, CC1Option]>;
+def fopenmp_simd : Flag<["-"], "fopenmp-simd">, Group<f_Group>,
+ Flags<[NoArgumentUnused]>, Visibility<[ClangOption, CC1Option]>,
HelpText<"Emit OpenMP code only for SIMD-based constructs.">;
-def fopenmp_enable_irbuilder : Flag<["-"], "fopenmp-enable-irbuilder">, Group<f_Group>, Flags<[CC1Option, NoArgumentUnused, HelpHidden]>,
+def fopenmp_enable_irbuilder : Flag<["-"], "fopenmp-enable-irbuilder">, Group<f_Group>,
+ Flags<[NoArgumentUnused, HelpHidden]>, Visibility<[ClangOption, CC1Option]>,
HelpText<"Use the experimental OpenMP-IR-Builder codegen path.">;
-def fno_openmp_simd : Flag<["-"], "fno-openmp-simd">, Group<f_Group>, Flags<[CC1Option, NoArgumentUnused]>;
+def fno_openmp_simd : Flag<["-"], "fno-openmp-simd">, Group<f_Group>,
+ Flags<[NoArgumentUnused]>, Visibility<[ClangOption, CC1Option]>;
def fopenmp_cuda_mode : Flag<["-"], "fopenmp-cuda-mode">, Group<f_Group>,
- Flags<[CC1Option, NoArgumentUnused, HelpHidden]>;
+ Flags<[NoArgumentUnused, HelpHidden]>, Visibility<[ClangOption, CC1Option]>;
def fno_openmp_cuda_mode : Flag<["-"], "fno-openmp-cuda-mode">, Group<f_Group>,
Flags<[NoArgumentUnused, HelpHidden]>;
-def fopenmp_cuda_force_full_runtime : Flag<["-"], "fopenmp-cuda-force-full-runtime">, Group<f_Group>,
- Flags<[CC1Option, NoArgumentUnused, HelpHidden]>;
-def fno_openmp_cuda_force_full_runtime : Flag<["-"], "fno-openmp-cuda-force-full-runtime">, Group<f_Group>,
- Flags<[NoArgumentUnused, HelpHidden]>;
def fopenmp_cuda_number_of_sm_EQ : Joined<["-"], "fopenmp-cuda-number-of-sm=">, Group<f_Group>,
- Flags<[CC1Option, NoArgumentUnused, HelpHidden]>;
+ Flags<[NoArgumentUnused, HelpHidden]>, Visibility<[ClangOption, CC1Option]>;
def fopenmp_cuda_blocks_per_sm_EQ : Joined<["-"], "fopenmp-cuda-blocks-per-sm=">, Group<f_Group>,
- Flags<[CC1Option, NoArgumentUnused, HelpHidden]>;
+ Flags<[NoArgumentUnused, HelpHidden]>, Visibility<[ClangOption, CC1Option]>;
def fopenmp_cuda_teams_reduction_recs_num_EQ : Joined<["-"], "fopenmp-cuda-teams-reduction-recs-num=">, Group<f_Group>,
- Flags<[CC1Option, NoArgumentUnused, HelpHidden]>;
-defm openmp_target_new_runtime: BoolFOption<"openmp-target-new-runtime",
- LangOpts<"OpenMPTargetNewRuntime">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Use the new bitcode library for OpenMP offloading">,
- NegFlag<SetFalse>>;
+ Flags<[NoArgumentUnused, HelpHidden]>, Visibility<[ClangOption, CC1Option]>;
+
+//===----------------------------------------------------------------------===//
+// Shared cc1 + fc1 OpenMP Target Options
+//===----------------------------------------------------------------------===//
+
+let Flags = [NoArgumentUnused] in {
+let Visibility = [ClangOption, CC1Option, FC1Option, FlangOption] in {
+let Group = f_Group in {
+
+def fopenmp_target_debug : Flag<["-"], "fopenmp-target-debug">,
+ HelpText<"Enable debugging in the OpenMP offloading device RTL">;
+def fno_openmp_target_debug : Flag<["-"], "fno-openmp-target-debug">;
+
+} // let Group = f_Group
+} // let Visibility = [ClangOption, CC1Option, FC1Option]
+} // let Flags = [NoArgumentUnused]
+
+//===----------------------------------------------------------------------===//
+// FlangOption + FC1 + ClangOption + CC1Option
+//===----------------------------------------------------------------------===//
+let Visibility = [FC1Option, FlangOption, CC1Option, ClangOption] in {
+def fopenacc : Flag<["-"], "fopenacc">, Group<f_Group>,
+ HelpText<"Enable OpenACC">;
+} // let Visibility = [FC1Option, FlangOption, CC1Option, ClangOption]
+
+//===----------------------------------------------------------------------===//
+// Optimisation remark options
+//===----------------------------------------------------------------------===//
+
+let Visibility = [ClangOption, CC1Option, FC1Option, FlangOption] in {
+
+def Rpass_EQ : Joined<["-"], "Rpass=">, Group<R_value_Group>,
+ HelpText<"Report transformations performed by optimization passes whose "
+ "name matches the given POSIX regular expression">;
+def Rpass_missed_EQ : Joined<["-"], "Rpass-missed=">, Group<R_value_Group>,
+ HelpText<"Report missed transformations by optimization passes whose "
+ "name matches the given POSIX regular expression">;
+def Rpass_analysis_EQ : Joined<["-"], "Rpass-analysis=">, Group<R_value_Group>,
+ HelpText<"Report transformation analysis from optimization passes whose "
+ "name matches the given POSIX regular expression">;
+def R_Joined : Joined<["-"], "R">, Group<R_Group>,
+ Visibility<[ClangOption, CLOption, DXCOption]>,
+ MetaVarName<"<remark>">, HelpText<"Enable the specified remark">;
+
+} // let Visibility = [ClangOption, CC1Option, FC1Option, FlangOption]
+
+let Flags = [NoArgumentUnused, HelpHidden] in {
+let Visibility = [ClangOption, CC1Option, FC1Option, FlangOption] in {
+let Group = f_Group in {
+
+def fopenmp_target_debug_EQ : Joined<["-"], "fopenmp-target-debug=">;
+def fopenmp_assume_teams_oversubscription : Flag<["-"], "fopenmp-assume-teams-oversubscription">;
+def fopenmp_assume_threads_oversubscription : Flag<["-"], "fopenmp-assume-threads-oversubscription">;
+def fno_openmp_assume_teams_oversubscription : Flag<["-"], "fno-openmp-assume-teams-oversubscription">;
+def fno_openmp_assume_threads_oversubscription : Flag<["-"], "fno-openmp-assume-threads-oversubscription">;
+def fopenmp_assume_no_thread_state : Flag<["-"], "fopenmp-assume-no-thread-state">,
+ HelpText<"Assert no thread in a parallel region modifies an ICV">,
+ MarshallingInfoFlag<LangOpts<"OpenMPNoThreadState">>;
+def fopenmp_assume_no_nested_parallelism : Flag<["-"], "fopenmp-assume-no-nested-parallelism">,
+ HelpText<"Assert no nested parallel regions in the GPU">,
+ MarshallingInfoFlag<LangOpts<"OpenMPNoNestedParallelism">>;
+
+} // let Group = f_Group
+} // let Visibility = [ClangOption, CC1Option, FC1Option]
+} // let Flags = [NoArgumentUnused, HelpHidden]
+
+def fopenmp_offload_mandatory : Flag<["-"], "fopenmp-offload-mandatory">, Group<f_Group>,
+ Flags<[NoArgumentUnused]>, Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Do not create a host fallback if offloading to the device fails.">,
+ MarshallingInfoFlag<LangOpts<"OpenMPOffloadMandatory">>;
+def fopenmp_force_usm : Flag<["-"], "fopenmp-force-usm">, Group<f_Group>,
+ Flags<[NoArgumentUnused]>, Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Force behvaior as if the user specified pragma omp requires unified_shared_memory.">,
+ MarshallingInfoFlag<LangOpts<"OpenMPForceUSM">>;
+def fopenmp_target_jit : Flag<["-"], "fopenmp-target-jit">, Group<f_Group>,
+ Flags<[NoArgumentUnused]>, Visibility<[ClangOption, CLOption]>,
+ HelpText<"Emit code that can be JIT compiled for OpenMP offloading. Implies -foffload-lto=full">;
+def fno_openmp_target_jit : Flag<["-"], "fno-openmp-target-jit">, Group<f_Group>,
+ Flags<[NoArgumentUnused, HelpHidden]>,
+ Visibility<[ClangOption, CLOption]>;
+def fopenmp_target_new_runtime : Flag<["-"], "fopenmp-target-new-runtime">,
+ Group<f_Group>, Flags<[HelpHidden]>, Visibility<[ClangOption, CC1Option]>;
+def fno_openmp_target_new_runtime : Flag<["-"], "fno-openmp-target-new-runtime">,
+ Group<f_Group>, Flags<[HelpHidden]>, Visibility<[ClangOption, CC1Option]>;
defm openmp_optimistic_collapse : BoolFOption<"openmp-optimistic-collapse",
LangOpts<"OpenMPOptimisticCollapse">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option]>, NegFlag<SetFalse>, BothFlags<[NoArgumentUnused, HelpHidden]>>;
+ PosFlag<SetTrue, [], [ClangOption, CC1Option]>,
+ NegFlag<SetFalse>, BothFlags<[NoArgumentUnused, HelpHidden], []>>;
def static_openmp: Flag<["-"], "static-openmp">,
HelpText<"Use the static host OpenMP runtime while linking.">;
-def fno_optimize_sibling_calls : Flag<["-"], "fno-optimize-sibling-calls">, Group<f_Group>;
+def fopenmp_new_driver : Flag<["-"], "fopenmp-new-driver">, Flags<[HelpHidden]>,
+ HelpText<"Use the new driver for OpenMP offloading.">;
+def fno_openmp_new_driver : Flag<["-"], "fno-openmp-new-driver">,
+ Flags<[HelpHidden]>,
+ HelpText<"Don't use the new driver for OpenMP offloading.">;
+def fno_optimize_sibling_calls : Flag<["-"], "fno-optimize-sibling-calls">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Disable tail call optimization, keeping the call stack accurate">,
+ MarshallingInfoFlag<CodeGenOpts<"DisableTailCalls">>;
def foptimize_sibling_calls : Flag<["-"], "foptimize-sibling-calls">, Group<f_Group>;
defm escaping_block_tail_calls : BoolFOption<"escaping-block-tail-calls",
CodeGenOpts<"NoEscapingBlockTailCalls">, DefaultFalse,
- NegFlag<SetTrue, [CC1Option]>, PosFlag<SetFalse>>;
+ NegFlag<SetTrue, [], [ClangOption, CC1Option]>,
+ PosFlag<SetFalse>>;
def force__cpusubtype__ALL : Flag<["-"], "force_cpusubtype_ALL">;
def force__flat__namespace : Flag<["-"], "force_flat_namespace">;
def force__load : Separate<["-"], "force_load">;
@@ -2395,114 +3508,147 @@ def force_addr : Joined<["-"], "fforce-addr">, Group<clang_ignored_f_Group>;
def foutput_class_dir_EQ : Joined<["-"], "foutput-class-dir=">, Group<f_Group>;
def fpack_struct : Flag<["-"], "fpack-struct">, Group<f_Group>;
def fno_pack_struct : Flag<["-"], "fno-pack-struct">, Group<f_Group>;
-def fpack_struct_EQ : Joined<["-"], "fpack-struct=">, Group<f_Group>, Flags<[CC1Option]>,
+def fpack_struct_EQ : Joined<["-"], "fpack-struct=">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Specify the default maximum struct packing alignment">,
MarshallingInfoInt<LangOpts<"PackStruct">>;
-def fmax_type_align_EQ : Joined<["-"], "fmax-type-align=">, Group<f_Group>, Flags<[CC1Option]>,
+def fmax_type_align_EQ : Joined<["-"], "fmax-type-align=">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Specify the maximum alignment to enforce on pointers lacking an explicit alignment">,
MarshallingInfoInt<LangOpts<"MaxTypeAlign">>;
def fno_max_type_align : Flag<["-"], "fno-max-type-align">, Group<f_Group>;
defm pascal_strings : BoolFOption<"pascal-strings",
LangOpts<"PascalStrings">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Recognize and construct Pascal-style string literals">,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Recognize and construct Pascal-style string literals">,
NegFlag<SetFalse>>;
// Note: This flag has different semantics in the driver and in -cc1. The driver accepts -fpatchable-function-entry=M,N
// and forwards it to -cc1 as -fpatchable-function-entry=M and -fpatchable-function-entry-offset=N. In -cc1, both flags
// are treated as a single integer.
-def fpatchable_function_entry_EQ : Joined<["-"], "fpatchable-function-entry=">, Group<f_Group>, Flags<[CC1Option]>,
+def fpatchable_function_entry_EQ : Joined<["-"], "fpatchable-function-entry=">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>,
MetaVarName<"<N,M>">, HelpText<"Generate M NOPs before function entry and N-M NOPs after function entry">,
MarshallingInfoInt<CodeGenOpts<"PatchableFunctionEntryCount">>;
-def fpcc_struct_return : Flag<["-"], "fpcc-struct-return">, Group<f_Group>, Flags<[CC1Option]>,
+def fms_hotpatch : Flag<["-"], "fms-hotpatch">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option, CLOption]>,
+ HelpText<"Ensure that all functions can be hotpatched at runtime">,
+ MarshallingInfoFlag<CodeGenOpts<"HotPatch">>;
+def fpcc_struct_return : Flag<["-"], "fpcc-struct-return">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Override the default ABI to return all structs on the stack">;
def fpch_preprocess : Flag<["-"], "fpch-preprocess">, Group<f_Group>;
-def fpic : Flag<["-"], "fpic">, Group<f_Group>;
-def fno_pic : Flag<["-"], "fno-pic">, Group<f_Group>;
-def fpie : Flag<["-"], "fpie">, Group<f_Group>;
-def fno_pie : Flag<["-"], "fno-pie">, Group<f_Group>;
-def fdirect_access_external_data : Flag<["-"], "fdirect-access-external-data">, Group<f_Group>, Flags<[CC1Option]>,
+defm pic_data_is_text_relative : SimpleMFlag<"pic-data-is-text-relative",
+ "Assume", "Don't assume", " data segments are relative to text segment">;
+def fdirect_access_external_data : Flag<["-"], "fdirect-access-external-data">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Don't use GOT indirection to reference external data symbols">;
-def fno_direct_access_external_data : Flag<["-"], "fno-direct-access-external-data">, Group<f_Group>, Flags<[CC1Option]>,
+def fno_direct_access_external_data : Flag<["-"], "fno-direct-access-external-data">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Use GOT indirection to reference external data symbols">;
defm plt : BoolFOption<"plt",
CodeGenOpts<"NoPLT">, DefaultFalse,
- NegFlag<SetTrue, [CC1Option], "Use GOT indirection instead of PLT to make external function calls (x86 only)">,
+ NegFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Use GOT indirection instead of PLT to make external function calls (x86 only)">,
PosFlag<SetFalse>>;
defm ropi : BoolFOption<"ropi",
LangOpts<"ROPI">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Generate read-only position independent code (ARM only)">,
- NegFlag<SetFalse>>;
+ PosFlag<SetTrue, [], [ClangOption, FlangOption, CC1Option],
+ "Generate read-only position independent code (ARM only)">,
+ NegFlag<SetFalse, [], [ClangOption, FlangOption, CC1Option]>>;
defm rwpi : BoolFOption<"rwpi",
LangOpts<"RWPI">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Generate read-write position independent code (ARM only)">,
- NegFlag<SetFalse>>;
-def fplugin_EQ : Joined<["-"], "fplugin=">, Group<f_Group>, Flags<[NoXarchOption]>, MetaVarName<"<dsopath>">,
+ PosFlag<SetTrue, [], [ClangOption, FlangOption, CC1Option],
+ "Generate read-write position independent code (ARM only)">,
+ NegFlag<SetFalse, [], [ClangOption, FlangOption, CC1Option]>>;
+def fplugin_EQ : Joined<["-"], "fplugin=">, Group<f_Group>,
+ Flags<[NoXarchOption]>, MetaVarName<"<dsopath>">,
HelpText<"Load the named plugin (dynamic shared object)">;
+def fplugin_arg : Joined<["-"], "fplugin-arg-">,
+ MetaVarName<"<name>-<arg>">,
+ HelpText<"Pass <arg> to plugin <name>">;
def fpass_plugin_EQ : Joined<["-"], "fpass-plugin=">,
- Group<f_Group>, Flags<[CC1Option]>, MetaVarName<"<dsopath>">,
+ Group<f_Group>, Visibility<[ClangOption, CC1Option, FlangOption, FC1Option]>,
+ MetaVarName<"<dsopath>">,
HelpText<"Load pass plugin from a dynamic shared object file (only with new pass manager).">,
MarshallingInfoStringVector<CodeGenOpts<"PassPlugins">>;
defm preserve_as_comments : BoolFOption<"preserve-as-comments",
CodeGenOpts<"PreserveAsmComments">, DefaultTrue,
- NegFlag<SetFalse, [CC1Option], "Do not preserve comments in inline assembly">,
+ NegFlag<SetFalse, [], [ClangOption, CC1Option],
+ "Do not preserve comments in inline assembly">,
PosFlag<SetTrue>>;
def framework : Separate<["-"], "framework">, Flags<[LinkerInput]>;
def frandom_seed_EQ : Joined<["-"], "frandom-seed=">, Group<clang_ignored_f_Group>;
-def freg_struct_return : Flag<["-"], "freg-struct-return">, Group<f_Group>, Flags<[CC1Option]>,
+def freg_struct_return : Flag<["-"], "freg-struct-return">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Override the default ABI to return small structs in registers">;
defm rtti : BoolFOption<"rtti",
LangOpts<"RTTI">, Default<cplusplus.KeyPath>,
- NegFlag<SetFalse, [CC1Option], "Disable generation of rtti information">,
+ NegFlag<SetFalse, [], [ClangOption, CC1Option],
+ "Disable generation of rtti information">,
PosFlag<SetTrue>>, ShouldParseIf<cplusplus.KeyPath>;
defm rtti_data : BoolFOption<"rtti-data",
LangOpts<"RTTIData">, Default<frtti.KeyPath>,
- NegFlag<SetFalse, [CC1Option], "Disable generation of RTTI data">,
+ NegFlag<SetFalse, [], [ClangOption, CC1Option],
+ "Disable generation of RTTI data">,
PosFlag<SetTrue>>, ShouldParseIf<frtti.KeyPath>;
def : Flag<["-"], "fsched-interblock">, Group<clang_ignored_f_Group>;
defm short_enums : BoolFOption<"short-enums",
LangOpts<"ShortEnums">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Allocate to an enum type only as many bytes as it"
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Allocate to an enum type only as many bytes as it"
" needs for the declared range of possible values">,
NegFlag<SetFalse>>;
defm char8__t : BoolFOption<"char8_t",
LangOpts<"Char8">, Default<cpp20.KeyPath>,
- PosFlag<SetTrue, [], "Enable">, NegFlag<SetFalse, [], "Disable">,
- BothFlags<[CC1Option], " C++ builtin type char8_t">>;
+ PosFlag<SetTrue, [], [ClangOption], "Enable">,
+ NegFlag<SetFalse, [], [ClangOption], "Disable">,
+ BothFlags<[], [ClangOption, CC1Option], " C++ builtin type char8_t">>;
def fshort_wchar : Flag<["-"], "fshort-wchar">, Group<f_Group>,
HelpText<"Force wchar_t to be a short unsigned int">;
def fno_short_wchar : Flag<["-"], "fno-short-wchar">, Group<f_Group>,
HelpText<"Force wchar_t to be an unsigned int">;
-def fshow_overloads_EQ : Joined<["-"], "fshow-overloads=">, Group<f_Group>, Flags<[CC1Option]>,
- HelpText<"Which overload candidates to show when overload resolution fails: "
- "best|all; defaults to all">, Values<"best,all">,
+def fshow_overloads_EQ : Joined<["-"], "fshow-overloads=">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Which overload candidates to show when overload resolution fails. Defaults to 'all'">,
+ Values<"best,all">,
NormalizedValues<["Ovl_Best", "Ovl_All"]>,
MarshallingInfoEnum<DiagnosticOpts<"ShowOverloads">, "Ovl_All">;
defm show_column : BoolFOption<"show-column",
DiagnosticOpts<"ShowColumn">, DefaultTrue,
- NegFlag<SetFalse, [CC1Option], "Do not include column number on diagnostics">,
+ NegFlag<SetFalse, [], [ClangOption, CC1Option],
+ "Do not include column number on diagnostics">,
PosFlag<SetTrue>>;
defm show_source_location : BoolFOption<"show-source-location",
DiagnosticOpts<"ShowLocation">, DefaultTrue,
- NegFlag<SetFalse, [CC1Option], "Do not include source location information with diagnostics">,
+ NegFlag<SetFalse, [], [ClangOption, CC1Option],
+ "Do not include source location information with diagnostics">,
PosFlag<SetTrue>>;
defm spell_checking : BoolFOption<"spell-checking",
LangOpts<"SpellChecking">, DefaultTrue,
- NegFlag<SetFalse, [CC1Option], "Disable spell-checking">, PosFlag<SetTrue>>;
-def fspell_checking_limit_EQ : Joined<["-"], "fspell-checking-limit=">, Group<f_Group>;
+ NegFlag<SetFalse, [], [ClangOption, CC1Option], "Disable spell-checking">,
+ PosFlag<SetTrue>>;
+def fspell_checking_limit_EQ : Joined<["-"], "fspell-checking-limit=">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Set the maximum number of times to perform spell checking on unrecognized identifiers (0 = no limit)">,
+ MarshallingInfoInt<DiagnosticOpts<"SpellCheckingLimit">, "DiagnosticOptions::DefaultSpellCheckingLimit">;
def fsigned_bitfields : Flag<["-"], "fsigned-bitfields">, Group<f_Group>;
defm signed_char : BoolFOption<"signed-char",
LangOpts<"CharIsSigned">, DefaultTrue,
- NegFlag<SetFalse, [CC1Option], "char is unsigned">, PosFlag<SetTrue, [], "char is signed">>,
+ NegFlag<SetFalse, [], [ClangOption, CC1Option], "char is unsigned">,
+ PosFlag<SetTrue, [], [ClangOption], "char is signed">>,
ShouldParseIf<!strconcat("!", open_cl.KeyPath)>;
defm split_stack : BoolFOption<"split-stack",
CodeGenOpts<"EnableSegmentedStacks">, DefaultFalse,
- NegFlag<SetFalse, [], "Wouldn't use segmented stack">,
- PosFlag<SetTrue, [CC1Option], "Use segmented stack">>;
+ NegFlag<SetFalse, [], [ClangOption], "Wouldn't use segmented stack">,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option], "Use segmented stack">>;
def fstack_protector_all : Flag<["-"], "fstack-protector-all">, Group<f_Group>,
HelpText<"Enable stack protectors for all functions">;
defm stack_clash_protection : BoolFOption<"stack-clash-protection",
CodeGenOpts<"StackClashProtector">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Enable">, NegFlag<SetFalse, [], "Disable">,
- BothFlags<[], " stack clash protection">>;
+ PosFlag<SetTrue, [], [ClangOption, CC1Option], "Enable">,
+ NegFlag<SetFalse, [], [ClangOption], "Disable">,
+ BothFlags<[], [ClangOption], " stack clash protection">>,
+ DocBrief<"Instrument stack allocation to prevent stack clash attacks">;
def fstack_protector_strong : Flag<["-"], "fstack-protector-strong">, Group<f_Group>,
HelpText<"Enable stack protectors for some functions vulnerable to stack smashing. "
"Compared to -fstack-protector, this uses a stronger heuristic "
@@ -2520,61 +3666,91 @@ def fstack_protector : Flag<["-"], "fstack-protector">, Group<f_Group>,
"overwrite the guard value before overwriting the function's return "
"address. The reference stack guard value is stored in a global variable.">;
def ftrivial_auto_var_init : Joined<["-"], "ftrivial-auto-var-init=">, Group<f_Group>,
- Flags<[CC1Option, CoreOption]>, HelpText<"Initialize trivial automatic stack variables: uninitialized (default)"
- " | pattern">, Values<"uninitialized,zero,pattern">,
+ Visibility<[ClangOption, CC1Option, CLOption, DXCOption]>,
+ HelpText<"Initialize trivial automatic stack variables. Defaults to 'uninitialized'">,
+ Values<"uninitialized,zero,pattern">,
NormalizedValuesScope<"LangOptions::TrivialAutoVarInitKind">,
NormalizedValues<["Uninitialized", "Zero", "Pattern"]>,
MarshallingInfoEnum<LangOpts<"TrivialAutoVarInit">, "Uninitialized">;
-def enable_trivial_var_init_zero : Flag<["-"], "enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang">,
- Flags<[CC1Option, CoreOption]>,
- HelpText<"Trivial automatic variable initialization to zero is only here for benchmarks, it'll eventually be removed, and I'm OK with that because I'm only using it to benchmark">;
def ftrivial_auto_var_init_stop_after : Joined<["-"], "ftrivial-auto-var-init-stop-after=">, Group<f_Group>,
- Flags<[CC1Option, CoreOption]>, HelpText<"Stop initializing trivial automatic stack variables after the specified number of instances">,
+ Visibility<[ClangOption, CC1Option, CLOption, DXCOption]>,
+ HelpText<"Stop initializing trivial automatic stack variables after the specified number of instances">,
MarshallingInfoInt<LangOpts<"TrivialAutoVarInitStopAfter">>;
-def fstandalone_debug : Flag<["-"], "fstandalone-debug">, Group<f_Group>, Flags<[CoreOption]>,
+def ftrivial_auto_var_init_max_size : Joined<["-"], "ftrivial-auto-var-init-max-size=">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option, CLOption, DXCOption]>,
+ HelpText<"Stop initializing trivial automatic stack variables if var size exceeds the specified number of instances (in bytes)">,
+ MarshallingInfoInt<LangOpts<"TrivialAutoVarInitMaxSize">>;
+def fstandalone_debug : Flag<["-"], "fstandalone-debug">, Group<f_Group>,
+ Visibility<[ClangOption, CLOption, DXCOption]>,
HelpText<"Emit full debug info for all types used by the program">;
-def fno_standalone_debug : Flag<["-"], "fno-standalone-debug">, Group<f_Group>, Flags<[CoreOption]>,
+def fno_standalone_debug : Flag<["-"], "fno-standalone-debug">, Group<f_Group>,
+ Visibility<[ClangOption, CLOption, DXCOption]>,
HelpText<"Limit debug information produced to reduce size of debug binary">;
-def flimit_debug_info : Flag<["-"], "flimit-debug-info">, Flags<[CoreOption]>, Alias<fno_standalone_debug>;
-def fno_limit_debug_info : Flag<["-"], "fno-limit-debug-info">, Flags<[CoreOption]>, Alias<fstandalone_debug>;
-def fdebug_macro : Flag<["-"], "fdebug-macro">, Group<f_Group>, Flags<[CoreOption]>,
+def flimit_debug_info : Flag<["-"], "flimit-debug-info">,
+ Visibility<[ClangOption, CLOption, DXCOption]>, Alias<fno_standalone_debug>;
+def fno_limit_debug_info : Flag<["-"], "fno-limit-debug-info">,
+ Visibility<[ClangOption, CLOption, DXCOption]>, Alias<fstandalone_debug>;
+def fdebug_macro : Flag<["-"], "fdebug-macro">, Group<f_Group>,
+ Visibility<[ClangOption, CLOption, DXCOption]>,
HelpText<"Emit macro debug information">;
-def fno_debug_macro : Flag<["-"], "fno-debug-macro">, Group<f_Group>, Flags<[CoreOption]>,
+def fno_debug_macro : Flag<["-"], "fno-debug-macro">, Group<f_Group>,
+ Visibility<[ClangOption, CLOption, DXCOption]>,
HelpText<"Do not emit macro debug information">;
def fstrict_aliasing : Flag<["-"], "fstrict-aliasing">, Group<f_Group>,
- Flags<[NoXarchOption, CoreOption]>;
-def fstrict_enums : Flag<["-"], "fstrict-enums">, Group<f_Group>, Flags<[CC1Option]>,
+ Visibility<[ClangOption, CLOption, DXCOption]>,
+ HelpText<"Enable optimizations based on strict aliasing rules">;
+def fstrict_enums : Flag<["-"], "fstrict-enums">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Enable optimizations based on the strict definition of an enum's "
"value range">,
MarshallingInfoFlag<CodeGenOpts<"StrictEnums">>;
defm strict_vtable_pointers : BoolFOption<"strict-vtable-pointers",
CodeGenOpts<"StrictVTablePointers">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Enable optimizations based on the strict rules for"
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Enable optimizations based on the strict rules for"
" overwriting polymorphic C++ objects">,
NegFlag<SetFalse>>;
def fstrict_overflow : Flag<["-"], "fstrict-overflow">, Group<f_Group>;
+def fdriver_only : Flag<["-"], "fdriver-only">, Flags<[NoXarchOption]>,
+ Visibility<[ClangOption, CLOption, DXCOption]>,
+ Group<Action_Group>, HelpText<"Only run the driver.">;
def fsyntax_only : Flag<["-"], "fsyntax-only">,
- Flags<[NoXarchOption,CoreOption,CC1Option,FC1Option]>, Group<Action_Group>;
+ Flags<[NoXarchOption]>,
+ Visibility<[ClangOption, CLOption, DXCOption, CC1Option, FC1Option, FlangOption]>,
+ Group<Action_Group>,
+ HelpText<"Run the preprocessor, parser and semantic analysis stages">;
def ftabstop_EQ : Joined<["-"], "ftabstop=">, Group<f_Group>;
-def ftemplate_depth_EQ : Joined<["-"], "ftemplate-depth=">, Group<f_Group>;
-def ftemplate_depth_ : Joined<["-"], "ftemplate-depth-">, Group<f_Group>;
-def ftemplate_backtrace_limit_EQ : Joined<["-"], "ftemplate-backtrace-limit=">,
- Group<f_Group>;
-def foperator_arrow_depth_EQ : Joined<["-"], "foperator-arrow-depth=">,
- Group<f_Group>;
+def ftemplate_depth_EQ : Joined<["-"], "ftemplate-depth=">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Set the maximum depth of recursive template instantiation">,
+ MarshallingInfoInt<LangOpts<"InstantiationDepth">, "1024">;
+def : Joined<["-"], "ftemplate-depth-">, Group<f_Group>, Alias<ftemplate_depth_EQ>;
+def ftemplate_backtrace_limit_EQ : Joined<["-"], "ftemplate-backtrace-limit=">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Set the maximum number of entries to print in a template instantiation backtrace (0 = no limit)">,
+ MarshallingInfoInt<DiagnosticOpts<"TemplateBacktraceLimit">, "DiagnosticOptions::DefaultTemplateBacktraceLimit">;
+def foperator_arrow_depth_EQ : Joined<["-"], "foperator-arrow-depth=">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Maximum number of 'operator->'s to call for a member access">,
+ MarshallingInfoInt<LangOpts<"ArrowDepth">, "256">;
def fsave_optimization_record : Flag<["-"], "fsave-optimization-record">,
+ Visibility<[ClangOption, FlangOption]>,
Group<f_Group>, HelpText<"Generate a YAML optimization record file">;
def fsave_optimization_record_EQ : Joined<["-"], "fsave-optimization-record=">,
+ Visibility<[ClangOption, FlangOption]>,
Group<f_Group>, HelpText<"Generate an optimization record file in a specific format">,
MetaVarName<"<format>">;
def fno_save_optimization_record : Flag<["-"], "fno-save-optimization-record">,
- Group<f_Group>, Flags<[NoArgumentUnused]>;
+ Group<f_Group>, Flags<[NoArgumentUnused]>,
+ Visibility<[ClangOption, FlangOption]>;
def foptimization_record_file_EQ : Joined<["-"], "foptimization-record-file=">,
+ Visibility<[ClangOption, FlangOption]>,
Group<f_Group>,
HelpText<"Specify the output name of the file containing the optimization remarks. Implies -fsave-optimization-record. On Darwin platforms, this cannot be used with multiple -arch <arch> options.">,
MetaVarName<"<file>">;
def foptimization_record_passes_EQ : Joined<["-"], "foptimization-record-passes=">,
+ Visibility<[ClangOption, FlangOption]>,
Group<f_Group>,
HelpText<"Only include passes which match a specified regular expression in the generated optimization record (by default, include all passes)">,
MetaVarName<"<regex>">;
@@ -2592,7 +3768,8 @@ def : Flag<["-"], "fno-tree-slp-vectorize">, Alias<fno_slp_vectorize>;
def Wlarge_by_value_copy_def : Flag<["-"], "Wlarge-by-value-copy">,
HelpText<"Warn if a function definition returns or accepts an object larger "
"in bytes than a given value">, Flags<[HelpHidden]>;
-def Wlarge_by_value_copy_EQ : Joined<["-"], "Wlarge-by-value-copy=">, Flags<[CC1Option]>,
+def Wlarge_by_value_copy_EQ : Joined<["-"], "Wlarge-by-value-copy=">,
+ Visibility<[ClangOption, CC1Option]>,
MarshallingInfoInt<LangOpts<"NumLargeByValueCopy">>;
// These "special" warning flags are effectively processed as f_Group flags by the driver:
@@ -2603,79 +3780,92 @@ def Wlarger_than_ : Joined<["-"], "Wlarger-than-">, Alias<Wlarger_than_EQ>;
// This is converted to -fwarn-stack-size=N and also passed through by the driver.
// FIXME: The driver should strip out the =<value> when passing W_value_Group through.
def Wframe_larger_than_EQ : Joined<["-"], "Wframe-larger-than=">, Group<W_value_Group>,
- Flags<[NoXarchOption, CC1Option]>;
+ Visibility<[ClangOption, CC1Option]>;
def Wframe_larger_than : Flag<["-"], "Wframe-larger-than">, Alias<Wframe_larger_than_EQ>;
def : Flag<["-"], "fterminated-vtables">, Alias<fapple_kext>;
defm threadsafe_statics : BoolFOption<"threadsafe-statics",
LangOpts<"ThreadsafeStatics">, DefaultTrue,
- NegFlag<SetFalse, [CC1Option], "Do not emit code to make initialization of local statics thread safe">,
+ NegFlag<SetFalse, [], [ClangOption, CC1Option],
+ "Do not emit code to make initialization of local statics thread safe">,
PosFlag<SetTrue>>;
-def ftime_report : Flag<["-"], "ftime-report">, Group<f_Group>, Flags<[CC1Option]>,
+def ftime_report : Flag<["-"], "ftime-report">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>,
MarshallingInfoFlag<CodeGenOpts<"TimePasses">>;
def ftime_report_EQ: Joined<["-"], "ftime-report=">, Group<f_Group>,
- Flags<[CC1Option]>, Values<"per-pass,per-pass-run">,
+ Visibility<[ClangOption, CC1Option]>, Values<"per-pass,per-pass-run">,
MarshallingInfoFlag<CodeGenOpts<"TimePassesPerRun">>,
- HelpText<"(For new pass manager) \"per-pass\": one report for each pass; "
- "\"per-pass-run\": one report for each pass invocation">;
+ HelpText<"(For new pass manager) 'per-pass': one report for each pass; "
+ "'per-pass-run': one report for each pass invocation">;
def ftime_trace : Flag<["-"], "ftime-trace">, Group<f_Group>,
HelpText<"Turn on time profiler. Generates JSON file based on output filename.">,
DocBrief<[{
Turn on time profiler. Generates JSON file based on output filename. Results
can be analyzed with chrome://tracing or `Speedscope App
<https://www.speedscope.app>`_ for flamegraph visualization.}]>,
- Flags<[CC1Option, CoreOption]>,
- MarshallingInfoFlag<FrontendOpts<"TimeTrace">>;
+ Visibility<[ClangOption, CLOption, DXCOption]>;
def ftime_trace_granularity_EQ : Joined<["-"], "ftime-trace-granularity=">, Group<f_Group>,
HelpText<"Minimum time granularity (in microseconds) traced by time profiler">,
- Flags<[CC1Option, CoreOption]>,
+ Visibility<[ClangOption, CC1Option, CLOption, DXCOption]>,
MarshallingInfoInt<FrontendOpts<"TimeTraceGranularity">, "500u">;
+def ftime_trace_EQ : Joined<["-"], "ftime-trace=">, Group<f_Group>,
+ HelpText<"Similar to -ftime-trace. Specify the JSON file or a directory which will contain the JSON file">,
+ Visibility<[ClangOption, CC1Option, CLOption, DXCOption]>,
+ MarshallingInfoString<FrontendOpts<"TimeTracePath">>;
def fproc_stat_report : Joined<["-"], "fproc-stat-report">, Group<f_Group>,
HelpText<"Print subprocess statistics">;
def fproc_stat_report_EQ : Joined<["-"], "fproc-stat-report=">, Group<f_Group>,
HelpText<"Save subprocess statistics to the given file">;
-def ftlsmodel_EQ : Joined<["-"], "ftls-model=">, Group<f_Group>, Flags<[CC1Option]>,
+def ftlsmodel_EQ : Joined<["-"], "ftls-model=">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>,
Values<"global-dynamic,local-dynamic,initial-exec,local-exec">,
NormalizedValuesScope<"CodeGenOptions">,
NormalizedValues<["GeneralDynamicTLSModel", "LocalDynamicTLSModel", "InitialExecTLSModel", "LocalExecTLSModel"]>,
MarshallingInfoEnum<CodeGenOpts<"DefaultTLSModel">, "GeneralDynamicTLSModel">;
-def ftrapv : Flag<["-"], "ftrapv">, Group<f_Group>, Flags<[CC1Option]>,
+def ftrapv : Flag<["-"], "ftrapv">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Trap on integer overflow">;
def ftrapv_handler_EQ : Joined<["-"], "ftrapv-handler=">, Group<f_Group>,
MetaVarName<"<function name>">,
HelpText<"Specify the function to be called on overflow">;
-def ftrapv_handler : Separate<["-"], "ftrapv-handler">, Group<f_Group>, Flags<[CC1Option]>;
-def ftrap_function_EQ : Joined<["-"], "ftrap-function=">, Group<f_Group>, Flags<[CC1Option]>,
+def ftrapv_handler : Separate<["-"], "ftrapv-handler">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>;
+def ftrap_function_EQ : Joined<["-"], "ftrap-function=">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Issue call to specified function rather than a trap instruction">,
MarshallingInfoString<CodeGenOpts<"TrapFuncName">>;
-def funit_at_a_time : Flag<["-"], "funit-at-a-time">, Group<f_Group>;
def funroll_loops : Flag<["-"], "funroll-loops">, Group<f_Group>,
- HelpText<"Turn on loop unroller">, Flags<[CC1Option]>;
+ HelpText<"Turn on loop unroller">, Visibility<[ClangOption, CC1Option]>;
def fno_unroll_loops : Flag<["-"], "fno-unroll-loops">, Group<f_Group>,
- HelpText<"Turn off loop unroller">, Flags<[CC1Option]>;
+ HelpText<"Turn off loop unroller">, Visibility<[ClangOption, CC1Option]>;
defm reroll_loops : BoolFOption<"reroll-loops",
CodeGenOpts<"RerollLoops">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Turn on loop reroller">, NegFlag<SetFalse>>;
+ PosFlag<SetTrue, [], [ClangOption, CC1Option], "Turn on loop reroller">,
+ NegFlag<SetFalse>>;
def ffinite_loops: Flag<["-"], "ffinite-loops">, Group<f_Group>,
- HelpText<"Assume all loops are finite.">, Flags<[CC1Option]>;
+ HelpText<"Assume all loops are finite.">, Visibility<[ClangOption, CC1Option]>;
def fno_finite_loops: Flag<["-"], "fno-finite-loops">, Group<f_Group>,
- HelpText<"Do not assume that any loop is finite.">, Flags<[CC1Option]>;
+ HelpText<"Do not assume that any loop is finite.">,
+ Visibility<[ClangOption, CC1Option]>;
def ftrigraphs : Flag<["-"], "ftrigraphs">, Group<f_Group>,
- HelpText<"Process trigraph sequences">, Flags<[CC1Option]>;
+ HelpText<"Process trigraph sequences">, Visibility<[ClangOption, CC1Option]>;
def fno_trigraphs : Flag<["-"], "fno-trigraphs">, Group<f_Group>,
- HelpText<"Do not process trigraph sequences">, Flags<[CC1Option]>;
+ HelpText<"Do not process trigraph sequences">,
+ Visibility<[ClangOption, CC1Option]>;
def funsigned_bitfields : Flag<["-"], "funsigned-bitfields">, Group<f_Group>;
def funsigned_char : Flag<["-"], "funsigned-char">, Group<f_Group>;
def fno_unsigned_char : Flag<["-"], "fno-unsigned-char">;
def funwind_tables : Flag<["-"], "funwind-tables">, Group<f_Group>;
defm register_global_dtors_with_atexit : BoolFOption<"register-global-dtors-with-atexit",
CodeGenOpts<"RegisterGlobalDtorsWithAtExit">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Use">, NegFlag<SetFalse, [], "Don't use">,
- BothFlags<[], " atexit or __cxa_atexit to register global destructors">>;
+ PosFlag<SetTrue, [], [ClangOption, CC1Option], "Use">,
+ NegFlag<SetFalse, [], [ClangOption], "Don't use">,
+ BothFlags<[], [ClangOption], " atexit or __cxa_atexit to register global destructors">>;
defm use_init_array : BoolFOption<"use-init-array",
CodeGenOpts<"UseInitArray">, DefaultTrue,
- NegFlag<SetFalse, [CC1Option], "Use .ctors/.dtors instead of .init_array/.fini_array">,
+ NegFlag<SetFalse, [], [ClangOption, CC1Option],
+ "Use .ctors/.dtors instead of .init_array/.fini_array">,
PosFlag<SetTrue>>;
def fno_var_tracking : Flag<["-"], "fno-var-tracking">, Group<clang_ignored_f_Group>;
def fverbose_asm : Flag<["-"], "fverbose-asm">, Group<f_Group>,
@@ -2683,166 +3873,256 @@ def fverbose_asm : Flag<["-"], "fverbose-asm">, Group<f_Group>,
def dA : Flag<["-"], "dA">, Alias<fverbose_asm>;
defm visibility_from_dllstorageclass : BoolFOption<"visibility-from-dllstorageclass",
LangOpts<"VisibilityFromDLLStorageClass">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Set the visiblity of symbols in the generated code from their DLL storage class">,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Override the visibility of globals based on their final DLL storage class.">,
NegFlag<SetFalse>>;
-def fvisibility_dllexport_EQ : Joined<["-"], "fvisibility-dllexport=">, Group<f_Group>, Flags<[CC1Option]>,
- HelpText<"The visibility for dllexport defintions [-fvisibility-from-dllstorageclass]">,
- MarshallingInfoVisibility<LangOpts<"DLLExportVisibility">, "DefaultVisibility">,
+class MarshallingInfoVisibilityFromDLLStorage<KeyPathAndMacro kpm, code default>
+ : MarshallingInfoEnum<kpm, default>,
+ Values<"keep,hidden,protected,default">,
+ NormalizedValuesScope<"LangOptions::VisibilityFromDLLStorageClassKinds">,
+ NormalizedValues<["Keep", "Hidden", "Protected", "Default"]> {}
+def fvisibility_dllexport_EQ : Joined<["-"], "fvisibility-dllexport=">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>,
+ HelpText<"The visibility for dllexport definitions. If Keep is specified the visibility is not adjusted [-fvisibility-from-dllstorageclass]">,
+ MarshallingInfoVisibilityFromDLLStorage<LangOpts<"DLLExportVisibility">, "Default">,
ShouldParseIf<fvisibility_from_dllstorageclass.KeyPath>;
-def fvisibility_nodllstorageclass_EQ : Joined<["-"], "fvisibility-nodllstorageclass=">, Group<f_Group>, Flags<[CC1Option]>,
- HelpText<"The visibility for defintiions without an explicit DLL export class [-fvisibility-from-dllstorageclass]">,
- MarshallingInfoVisibility<LangOpts<"NoDLLStorageClassVisibility">, "HiddenVisibility">,
+def fvisibility_nodllstorageclass_EQ : Joined<["-"], "fvisibility-nodllstorageclass=">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>,
+ HelpText<"The visibility for definitions without an explicit DLL storage class. If Keep is specified the visibility is not adjusted [-fvisibility-from-dllstorageclass]">,
+ MarshallingInfoVisibilityFromDLLStorage<LangOpts<"NoDLLStorageClassVisibility">, "Hidden">,
ShouldParseIf<fvisibility_from_dllstorageclass.KeyPath>;
-def fvisibility_externs_dllimport_EQ : Joined<["-"], "fvisibility-externs-dllimport=">, Group<f_Group>, Flags<[CC1Option]>,
- HelpText<"The visibility for dllimport external declarations [-fvisibility-from-dllstorageclass]">,
- MarshallingInfoVisibility<LangOpts<"ExternDeclDLLImportVisibility">, "DefaultVisibility">,
+def fvisibility_externs_dllimport_EQ : Joined<["-"], "fvisibility-externs-dllimport=">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>,
+ HelpText<"The visibility for dllimport external declarations. If Keep is specified the visibility is not adjusted [-fvisibility-from-dllstorageclass]">,
+ MarshallingInfoVisibilityFromDLLStorage<LangOpts<"ExternDeclDLLImportVisibility">, "Default">,
ShouldParseIf<fvisibility_from_dllstorageclass.KeyPath>;
-def fvisibility_externs_nodllstorageclass_EQ : Joined<["-"], "fvisibility-externs-nodllstorageclass=">, Group<f_Group>, Flags<[CC1Option]>,
- HelpText<"The visibility for external declarations without an explicit DLL dllstorageclass [-fvisibility-from-dllstorageclass]">,
- MarshallingInfoVisibility<LangOpts<"ExternDeclNoDLLStorageClassVisibility">, "HiddenVisibility">,
+def fvisibility_externs_nodllstorageclass_EQ : Joined<["-"], "fvisibility-externs-nodllstorageclass=">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>,
+ HelpText<"The visibility for external declarations without an explicit DLL storage class. If Keep is specified the visibility is not adjusted [-fvisibility-from-dllstorageclass]">,
+ MarshallingInfoVisibilityFromDLLStorage<LangOpts<"ExternDeclNoDLLStorageClassVisibility">, "Hidden">,
ShouldParseIf<fvisibility_from_dllstorageclass.KeyPath>;
def fvisibility_EQ : Joined<["-"], "fvisibility=">, Group<f_Group>,
- HelpText<"Set the default symbol visibility for all global declarations">, Values<"hidden,default">;
+ Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Set the default symbol visibility for all global definitions">,
+ MarshallingInfoVisibility<LangOpts<"ValueVisibilityMode">, "DefaultVisibility">;
defm visibility_inlines_hidden : BoolFOption<"visibility-inlines-hidden",
LangOpts<"InlineVisibilityHidden">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Give inline C++ member functions hidden visibility by default">,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Give inline C++ member functions hidden visibility by default">,
NegFlag<SetFalse>>;
defm visibility_inlines_hidden_static_local_var : BoolFOption<"visibility-inlines-hidden-static-local-var",
LangOpts<"VisibilityInlinesHiddenStaticLocalVar">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "When -fvisibility-inlines-hidden is enabled, static variables in"
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "When -fvisibility-inlines-hidden is enabled, static variables in"
" inline C++ member functions will also be given hidden visibility by default">,
- NegFlag<SetFalse, [], "Disables -fvisibility-inlines-hidden-static-local-var"
- " (this is the default on non-darwin targets)">, BothFlags<[CC1Option]>>;
+ NegFlag<SetFalse, [], [ClangOption], "Disables -fvisibility-inlines-hidden-static-local-var"
+ " (this is the default on non-darwin targets)">, BothFlags<
+ [], [ClangOption, CC1Option]>>;
def fvisibility_ms_compat : Flag<["-"], "fvisibility-ms-compat">, Group<f_Group>,
HelpText<"Give global types 'default' visibility and global functions and "
"variables 'hidden' visibility by default">;
def fvisibility_global_new_delete_hidden : Flag<["-"], "fvisibility-global-new-delete-hidden">, Group<f_Group>,
- HelpText<"Give global C++ operator new and delete declarations hidden visibility">, Flags<[CC1Option]>,
- MarshallingInfoFlag<LangOpts<"GlobalAllocationFunctionVisibilityHidden">>;
+ HelpText<"Give global C++ operator new and delete declarations hidden visibility">;
+class MarshallingInfoVisibilityGlobalNewDelete<KeyPathAndMacro kpm, code default>
+ : MarshallingInfoEnum<kpm, default>,
+ Values<"force-default,force-protected,force-hidden,source">,
+ NormalizedValuesScope<"LangOptions::VisibilityForcedKinds">,
+ NormalizedValues<["ForceDefault", "ForceProtected", "ForceHidden", "Source"]> {}
+def fvisibility_global_new_delete_EQ : Joined<["-"], "fvisibility-global-new-delete=">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>,
+ HelpText<"The visibility for global C++ operator new and delete declarations. If 'source' is specified the visibility is not adjusted">,
+ MarshallingInfoVisibilityGlobalNewDelete<LangOpts<"GlobalAllocationFunctionVisibility">, "ForceDefault">;
+def mdefault_visibility_export_mapping_EQ : Joined<["-"], "mdefault-visibility-export-mapping=">,
+ Values<"none,explicit,all">,
+ NormalizedValuesScope<"LangOptions::DefaultVisiblityExportMapping">,
+ NormalizedValues<["None", "Explicit", "All"]>,
+ HelpText<"Mapping between default visibility and export">,
+ Group<m_Group>, Flags<[TargetSpecific]>,
+ Visibility<[ClangOption, CC1Option]>,
+ MarshallingInfoEnum<LangOpts<"DefaultVisibilityExportMapping">,"None">;
+defm new_infallible : BoolFOption<"new-infallible",
+ LangOpts<"NewInfallible">, DefaultFalse,
+ PosFlag<SetTrue, [], [ClangOption], "Enable">,
+ NegFlag<SetFalse, [], [ClangOption], "Disable">,
+ BothFlags<[], [ClangOption, CC1Option],
+ " treating throwing global C++ operator new as always returning valid memory "
+ "(annotates with __attribute__((returns_nonnull)) and throw()). This is detectable in source.">>;
defm whole_program_vtables : BoolFOption<"whole-program-vtables",
CodeGenOpts<"WholeProgramVTables">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Enables whole-program vtable optimization. Requires -flto">,
- NegFlag<SetFalse>, BothFlags<[CoreOption]>>;
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Enables whole-program vtable optimization. Requires -flto">,
+ NegFlag<SetFalse>, BothFlags<[], [ClangOption, CLOption]>>;
defm split_lto_unit : BoolFOption<"split-lto-unit",
CodeGenOpts<"EnableSplitLTOUnit">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Enables splitting of the LTO unit">,
- NegFlag<SetFalse>, BothFlags<[CoreOption]>>;
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Enables splitting of the LTO unit">,
+ NegFlag<SetFalse>, BothFlags<[], [ClangOption, CLOption]>>;
defm force_emit_vtables : BoolFOption<"force-emit-vtables",
CodeGenOpts<"ForceEmitVTables">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Emits more virtual tables to improve devirtualization">,
- NegFlag<SetFalse>, BothFlags<[CoreOption]>>;
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Emits more virtual tables to improve devirtualization">,
+ NegFlag<SetFalse>, BothFlags<[], [ClangOption, CLOption]>>,
+ DocBrief<[{In order to improve devirtualization, forces emitting of vtables even in
+modules where it isn't necessary. It causes more inline virtual functions
+to be emitted.}]>;
defm virtual_function_elimination : BoolFOption<"virtual-function-elimination",
CodeGenOpts<"VirtualFunctionElimination">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Enables dead virtual function elimination optimization. Requires -flto=full">,
- NegFlag<SetFalse>, BothFlags<[CoreOption]>>;
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Enables dead virtual function elimination optimization. Requires -flto=full">,
+ NegFlag<SetFalse>, BothFlags<[], [ClangOption, CLOption]>>;
-def fwrapv : Flag<["-"], "fwrapv">, Group<f_Group>, Flags<[CC1Option]>,
+def fwrapv : Flag<["-"], "fwrapv">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Treat signed integer overflow as two's complement">;
-def fwritable_strings : Flag<["-"], "fwritable-strings">, Group<f_Group>, Flags<[CC1Option]>,
+def fwritable_strings : Flag<["-"], "fwritable-strings">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Store string literals as writable data">,
MarshallingInfoFlag<LangOpts<"WritableStrings">>;
defm zero_initialized_in_bss : BoolFOption<"zero-initialized-in-bss",
CodeGenOpts<"NoZeroInitializedInBSS">, DefaultFalse,
- NegFlag<SetTrue, [CC1Option], "Don't place zero initialized data in BSS">,
+ NegFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Don't place zero initialized data in BSS">,
PosFlag<SetFalse>>;
defm function_sections : BoolFOption<"function-sections",
CodeGenOpts<"FunctionSections">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Place each function in its own section">,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Place each function in its own section">,
NegFlag<SetFalse>>;
def fbasic_block_sections_EQ : Joined<["-"], "fbasic-block-sections=">, Group<f_Group>,
- Flags<[CC1Option, CC1AsOption]>,
- HelpText<"Place each function's basic blocks in unique sections (ELF Only) : all | labels | none | list=<file>">,
+ Visibility<[ClangOption, CC1Option, CC1AsOption]>,
+ HelpText<"Place each function's basic blocks in unique sections (ELF Only)">,
DocBrief<[{Generate labels for each basic block or place each basic block or a subset of basic blocks in its own section.}]>,
Values<"all,labels,none,list=">,
MarshallingInfoString<CodeGenOpts<"BBSections">, [{"none"}]>;
defm data_sections : BoolFOption<"data-sections",
CodeGenOpts<"DataSections">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Place each data in its own section">, NegFlag<SetFalse>>;
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Place each data in its own section">,
+ NegFlag<SetFalse>>;
defm stack_size_section : BoolFOption<"stack-size-section",
CodeGenOpts<"StackSizeSection">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Emit section containing metadata on function stack sizes">,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Emit section containing metadata on function stack sizes">,
NegFlag<SetFalse>>;
def fstack_usage : Flag<["-"], "fstack-usage">, Group<f_Group>,
HelpText<"Emit .su file containing information on function stack sizes">;
def stack_usage_file : Separate<["-"], "stack-usage-file">,
- Flags<[CC1Option, NoDriverOption]>,
+ Visibility<[CC1Option]>,
HelpText<"Filename (or -) to write stack usage output to">,
MarshallingInfoString<CodeGenOpts<"StackUsageOutput">>;
defm unique_basic_block_section_names : BoolFOption<"unique-basic-block-section-names",
CodeGenOpts<"UniqueBasicBlockSectionNames">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Use unique names for basic block sections (ELF Only)">,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Use unique names for basic block sections (ELF Only)">,
NegFlag<SetFalse>>;
defm unique_internal_linkage_names : BoolFOption<"unique-internal-linkage-names",
CodeGenOpts<"UniqueInternalLinkageNames">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Uniqueify Internal Linkage Symbol Names by appending"
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Uniqueify Internal Linkage Symbol Names by appending"
" the MD5 hash of the module path">,
NegFlag<SetFalse>>;
defm unique_section_names : BoolFOption<"unique-section-names",
CodeGenOpts<"UniqueSectionNames">, DefaultTrue,
- NegFlag<SetFalse, [CC1Option], "Don't use unique names for text and data sections">,
+ NegFlag<SetFalse, [], [ClangOption, CC1Option],
+ "Don't use unique names for text and data sections">,
PosFlag<SetTrue>>;
defm split_machine_functions: BoolFOption<"split-machine-functions",
CodeGenOpts<"SplitMachineFunctions">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Enable">, NegFlag<SetFalse, [], "Disable">,
- BothFlags<[], " late function splitting using profile information (x86 ELF)">>;
+ PosFlag<SetTrue, [], [ClangOption, CC1Option], "Enable">,
+ NegFlag<SetFalse, [], [ClangOption], "Disable">,
+ BothFlags<[], [ClangOption], " late function splitting using profile information (x86 ELF)">>;
defm strict_return : BoolFOption<"strict-return",
CodeGenOpts<"StrictReturn">, DefaultTrue,
- NegFlag<SetFalse, [CC1Option], "Don't treat control flow paths that fall off the end"
+ NegFlag<SetFalse, [], [ClangOption, CC1Option],
+ "Don't treat control flow paths that fall off the end"
" of a non-void function as unreachable">,
PosFlag<SetTrue>>;
def fenable_matrix : Flag<["-"], "fenable-matrix">, Group<f_Group>,
- Flags<[CC1Option]>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Enable matrix data type and related builtin functions">,
MarshallingInfoFlag<LangOpts<"MatrixTypes">>;
+def fzero_call_used_regs_EQ
+ : Joined<["-"], "fzero-call-used-regs=">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Clear call-used registers upon function return (AArch64/x86 only)">,
+ Values<"skip,used-gpr-arg,used-gpr,used-arg,used,all-gpr-arg,all-gpr,all-arg,all">,
+ NormalizedValues<["Skip", "UsedGPRArg", "UsedGPR", "UsedArg", "Used",
+ "AllGPRArg", "AllGPR", "AllArg", "All"]>,
+ NormalizedValuesScope<"llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind">,
+ MarshallingInfoEnum<CodeGenOpts<"ZeroCallUsedRegs">, "Skip">;
def fdebug_types_section: Flag <["-"], "fdebug-types-section">, Group<f_Group>,
HelpText<"Place debug types in their own section (ELF Only)">;
def fno_debug_types_section: Flag<["-"], "fno-debug-types-section">, Group<f_Group>;
defm debug_ranges_base_address : BoolFOption<"debug-ranges-base-address",
CodeGenOpts<"DebugRangesBaseAddress">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Use DWARF base address selection entries in .debug_ranges">,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Use DWARF base address selection entries in .debug_ranges">,
NegFlag<SetFalse>>;
defm split_dwarf_inlining : BoolFOption<"split-dwarf-inlining",
CodeGenOpts<"SplitDwarfInlining">, DefaultFalse,
- NegFlag<SetFalse, []>,
- PosFlag<SetTrue, [CC1Option], "Provide minimal debug info in the object/executable"
+ NegFlag<SetFalse, [], [ClangOption]>,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Provide minimal debug info in the object/executable"
" to facilitate online symbolication/stack traces in the absence of"
" .dwo/.dwp files when using Split DWARF">>;
def fdebug_default_version: Joined<["-"], "fdebug-default-version=">, Group<f_Group>,
HelpText<"Default DWARF version to use, if a -g option caused DWARF debug info to be produced">;
def fdebug_prefix_map_EQ
: Joined<["-"], "fdebug-prefix-map=">, Group<f_Group>,
- Flags<[CC1Option,CC1AsOption]>,
- HelpText<"remap file source paths in debug info">;
+ Visibility<[ClangOption, CC1Option, CC1AsOption]>,
+ MetaVarName<"<old>=<new>">,
+ HelpText<"For paths in debug info, remap directory <old> to <new>. If multiple options match a path, the last option wins">;
def fcoverage_prefix_map_EQ
: Joined<["-"], "fcoverage-prefix-map=">, Group<f_Group>,
- Flags<[CC1Option]>,
- HelpText<"remap file source paths in coverage mapping">;
+ Visibility<[ClangOption, CC1Option]>,
+ MetaVarName<"<old>=<new>">,
+ HelpText<"remap file source paths <old> to <new> in coverage mapping. If there are multiple options, prefix replacement is applied in reverse order starting from the last one">;
def ffile_prefix_map_EQ
: Joined<["-"], "ffile-prefix-map=">, Group<f_Group>,
- HelpText<"remap file source paths in debug info, predefined preprocessor macros and __builtin_FILE()">;
+ HelpText<"remap file source paths in debug info, predefined preprocessor "
+ "macros and __builtin_FILE(). Implies -ffile-reproducible.">;
def fmacro_prefix_map_EQ
- : Joined<["-"], "fmacro-prefix-map=">, Group<f_Group>, Flags<[CC1Option]>,
- HelpText<"remap file source paths in predefined preprocessor macros and __builtin_FILE()">;
+ : Joined<["-"], "fmacro-prefix-map=">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>,
+ HelpText<"remap file source paths in predefined preprocessor macros and "
+ "__builtin_FILE(). Implies -ffile-reproducible.">;
defm force_dwarf_frame : BoolFOption<"force-dwarf-frame",
CodeGenOpts<"ForceDwarfFrameSection">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Always emit a debug frame section">, NegFlag<SetFalse>>;
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Always emit a debug frame section">,
+ NegFlag<SetFalse>>;
+def femit_dwarf_unwind_EQ : Joined<["-"], "femit-dwarf-unwind=">,
+ Group<f_Group>, Visibility<[ClangOption, CC1Option, CC1AsOption]>,
+ HelpText<"When to emit DWARF unwind (EH frame) info">,
+ Values<"always,no-compact-unwind,default">,
+ NormalizedValues<["Always", "NoCompactUnwind", "Default"]>,
+ NormalizedValuesScope<"llvm::EmitDwarfUnwindType">,
+ MarshallingInfoEnum<CodeGenOpts<"EmitDwarfUnwind">, "Default">;
+defm emit_compact_unwind_non_canonical : BoolFOption<"emit-compact-unwind-non-canonical",
+ CodeGenOpts<"EmitCompactUnwindNonCanonical">, DefaultFalse,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option, CC1AsOption],
+ "Try emitting Compact-Unwind for non-canonical entries. Maybe overriden by other constraints">,
+ NegFlag<SetFalse>>;
def g_Flag : Flag<["-"], "g">, Group<g_Group>,
- HelpText<"Generate source-level debug information">;
+ Visibility<[ClangOption, CLOption, DXCOption, FlangOption]>,
+ HelpText<"Generate source-level debug information">;
def gline_tables_only : Flag<["-"], "gline-tables-only">, Group<gN_Group>,
- Flags<[CoreOption]>, HelpText<"Emit debug line number tables only">;
+ Visibility<[ClangOption, CLOption, DXCOption, FlangOption]>,
+ HelpText<"Emit debug line number tables only">;
def gline_directives_only : Flag<["-"], "gline-directives-only">, Group<gN_Group>,
- Flags<[CoreOption]>, HelpText<"Emit debug line info directives only">;
+ Visibility<[ClangOption, CLOption, DXCOption, FlangOption]>,
+ HelpText<"Emit debug line info directives only">;
def gmlt : Flag<["-"], "gmlt">, Alias<gline_tables_only>;
-def g0 : Flag<["-"], "g0">, Group<gN_Group>;
-def g1 : Flag<["-"], "g1">, Group<gN_Group>, Alias<gline_tables_only>;
-def g2 : Flag<["-"], "g2">, Group<gN_Group>;
-def g3 : Flag<["-"], "g3">, Group<gN_Group>;
+def g0 : Flag<["-"], "g0">, Group<gN_Group>, Visibility<[ClangOption, FlangOption]>;
+def g1 : Flag<["-"], "g1">, Group<gN_Group>, Visibility<[ClangOption, FlangOption]>, Alias<gline_tables_only>;
+def g2 : Flag<["-"], "g2">, Group<gN_Group>, Visibility<[ClangOption, FlangOption]>;
+def g3 : Flag<["-"], "g3">, Group<gN_Group>, Visibility<[ClangOption, FlangOption]>;
def ggdb : Flag<["-"], "ggdb">, Group<gTune_Group>;
def ggdb0 : Flag<["-"], "ggdb0">, Group<ggdbN_Group>;
def ggdb1 : Flag<["-"], "ggdb1">, Group<ggdbN_Group>;
@@ -2853,7 +4133,8 @@ def gsce : Flag<["-"], "gsce">, Group<gTune_Group>;
def gdbx : Flag<["-"], "gdbx">, Group<gTune_Group>;
// Equivalent to our default dwarf version. Forces usual dwarf emission when
// CodeView is enabled.
-def gdwarf : Flag<["-"], "gdwarf">, Group<g_Group>, Flags<[CoreOption]>,
+def gdwarf : Flag<["-"], "gdwarf">, Group<g_Group>,
+ Visibility<[ClangOption, CLOption, DXCOption]>,
HelpText<"Generate source-level debug information with the default dwarf version">;
def gdwarf_2 : Flag<["-"], "gdwarf-2">, Group<g_Group>,
HelpText<"Generate source-level debug information with dwarf version 2">;
@@ -2864,25 +4145,32 @@ def gdwarf_4 : Flag<["-"], "gdwarf-4">, Group<g_Group>,
def gdwarf_5 : Flag<["-"], "gdwarf-5">, Group<g_Group>,
HelpText<"Generate source-level debug information with dwarf version 5">;
def gdwarf64 : Flag<["-"], "gdwarf64">, Group<g_Group>,
- Flags<[CC1Option, CC1AsOption]>,
+ Visibility<[ClangOption, CC1Option, CC1AsOption]>,
HelpText<"Enables DWARF64 format for ELF binaries, if debug information emission is enabled.">,
MarshallingInfoFlag<CodeGenOpts<"Dwarf64">>;
def gdwarf32 : Flag<["-"], "gdwarf32">, Group<g_Group>,
- Flags<[CC1Option, CC1AsOption]>,
+ Visibility<[ClangOption, CC1Option, CC1AsOption]>,
HelpText<"Enables DWARF32 format for ELF binaries, if debug information emission is enabled.">;
def gcodeview : Flag<["-"], "gcodeview">,
HelpText<"Generate CodeView debug information">,
- Flags<[CC1Option, CC1AsOption, CoreOption]>,
+ Visibility<[ClangOption, CC1Option, CC1AsOption, CLOption, DXCOption]>,
MarshallingInfoFlag<CodeGenOpts<"EmitCodeView">>;
defm codeview_ghash : BoolOption<"g", "codeview-ghash",
CodeGenOpts<"CodeViewGHash">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Emit type record hashes in a .debug$H section">,
- NegFlag<SetFalse>, BothFlags<[CoreOption]>>;
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Emit type record hashes in a .debug$H section">,
+ NegFlag<SetFalse>, BothFlags<[], [ClangOption, CLOption, DXCOption]>>;
+defm codeview_command_line : BoolOption<"g", "codeview-command-line",
+ CodeGenOpts<"CodeViewCommandLine">, DefaultTrue,
+ PosFlag<SetTrue, [], [ClangOption], "Emit compiler path and command line into CodeView debug information">,
+ NegFlag<SetFalse, [], [ClangOption], "Don't emit compiler path and command line into CodeView debug information">,
+ BothFlags<[], [ClangOption, CLOption, DXCOption, CC1Option]>>;
defm inline_line_tables : BoolGOption<"inline-line-tables",
CodeGenOpts<"NoInlineLineTables">, DefaultFalse,
- NegFlag<SetTrue, [CC1Option], "Don't emit inline line tables.">,
- PosFlag<SetFalse>, BothFlags<[CoreOption]>>;
+ NegFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Don't emit inline line tables.">,
+ PosFlag<SetFalse>, BothFlags<[], [ClangOption, CLOption, DXCOption]>>;
def gfull : Flag<["-"], "gfull">, Group<g_Group>;
def gused : Flag<["-"], "gused">, Group<g_Group>;
@@ -2899,148 +4187,216 @@ def : Flag<["-"], "grecord-gcc-switches">, Alias<grecord_command_line>;
def : Flag<["-"], "gno-record-gcc-switches">, Alias<gno_record_command_line>;
defm strict_dwarf : BoolOption<"g", "strict-dwarf",
CodeGenOpts<"DebugStrictDwarf">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option]>, NegFlag<SetFalse>, BothFlags<[CoreOption]>>,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Restrict DWARF features to those defined in "
+ "the specified version, avoiding features from later versions.">,
+ NegFlag<SetFalse>, BothFlags<[], [ClangOption, CLOption, DXCOption]>>,
Group<g_flags_Group>;
defm column_info : BoolOption<"g", "column-info",
CodeGenOpts<"DebugColumnInfo">, DefaultTrue,
- NegFlag<SetFalse, [CC1Option]>, PosFlag<SetTrue>, BothFlags<[CoreOption]>>,
+ NegFlag<SetFalse, [], [ClangOption, CC1Option]>,
+ PosFlag<SetTrue>, BothFlags<[], [ClangOption, CLOption, DXCOption]>>,
Group<g_flags_Group>;
-def gsplit_dwarf : Flag<["-"], "gsplit-dwarf">, Group<g_flags_Group>;
+def gsplit_dwarf : Flag<["-"], "gsplit-dwarf">, Group<g_flags_Group>,
+ Visibility<[ClangOption, CLOption, DXCOption]>;
def gsplit_dwarf_EQ : Joined<["-"], "gsplit-dwarf=">, Group<g_flags_Group>,
- HelpText<"Set DWARF fission mode to either 'split' or 'single'">,
+ Visibility<[ClangOption, CLOption, DXCOption]>,
+ HelpText<"Set DWARF fission mode">,
Values<"split,single">;
-def gno_split_dwarf : Flag<["-"], "gno-split-dwarf">, Group<g_flags_Group>;
-def ggnu_pubnames : Flag<["-"], "ggnu-pubnames">, Group<g_flags_Group>, Flags<[CC1Option]>;
+def gno_split_dwarf : Flag<["-"], "gno-split-dwarf">, Group<g_flags_Group>,
+ Visibility<[ClangOption, CLOption, DXCOption]>;
+def gsimple_template_names : Flag<["-"], "gsimple-template-names">, Group<g_flags_Group>;
+def gsimple_template_names_EQ
+ : Joined<["-"], "gsimple-template-names=">,
+ HelpText<"Use simple template names in DWARF, or include the full "
+ "template name with a modified prefix for validation">,
+ Values<"simple,mangled">, Visibility<[CC1Option]>;
+def gsrc_hash_EQ : Joined<["-"], "gsrc-hash=">,
+ Group<g_flags_Group>, Visibility<[CC1Option]>,
+ Values<"md5,sha1,sha256">,
+ NormalizedValues<["DSH_MD5", "DSH_SHA1", "DSH_SHA256"]>,
+ NormalizedValuesScope<"CodeGenOptions">,
+ MarshallingInfoEnum<CodeGenOpts<"DebugSrcHash">, "DSH_MD5">;
+def gno_simple_template_names : Flag<["-"], "gno-simple-template-names">,
+ Group<g_flags_Group>;
+def ggnu_pubnames : Flag<["-"], "ggnu-pubnames">, Group<g_flags_Group>,
+ Visibility<[ClangOption, CC1Option]>;
def gno_gnu_pubnames : Flag<["-"], "gno-gnu-pubnames">, Group<g_flags_Group>;
-def gpubnames : Flag<["-"], "gpubnames">, Group<g_flags_Group>, Flags<[CC1Option]>;
+def gpubnames : Flag<["-"], "gpubnames">, Group<g_flags_Group>,
+ Visibility<[ClangOption, CC1Option]>;
def gno_pubnames : Flag<["-"], "gno-pubnames">, Group<g_flags_Group>;
def gdwarf_aranges : Flag<["-"], "gdwarf-aranges">, Group<g_flags_Group>;
def gmodules : Flag <["-"], "gmodules">, Group<gN_Group>,
HelpText<"Generate debug info with external references to clang modules"
" or precompiled headers">;
+def gno_modules : Flag <["-"], "gno-modules">, Group<g_flags_Group>;
def gz_EQ : Joined<["-"], "gz=">, Group<g_flags_Group>,
HelpText<"DWARF debug sections compression type">;
def gz : Flag<["-"], "gz">, Alias<gz_EQ>, AliasArgs<["zlib"]>, Group<g_flags_Group>;
-def gembed_source : Flag<["-"], "gembed-source">, Group<g_flags_Group>, Flags<[CC1Option]>,
+def gembed_source : Flag<["-"], "gembed-source">, Group<g_flags_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Embed source text in DWARF debug sections">,
MarshallingInfoFlag<CodeGenOpts<"EmbedSource">>;
def gno_embed_source : Flag<["-"], "gno-embed-source">, Group<g_flags_Group>,
Flags<[NoXarchOption]>,
HelpText<"Restore the default behavior of not embedding source text in DWARF debug sections">;
def headerpad__max__install__names : Joined<["-"], "headerpad_max_install_names">;
-def help : Flag<["-", "--"], "help">, Flags<[CC1Option,CC1AsOption, FC1Option,
- FlangOption]>, HelpText<"Display available options">,
+def help : Flag<["-", "--"], "help">,
+ Visibility<[ClangOption, CC1Option, CC1AsOption,
+ FC1Option, FlangOption, DXCOption]>,
+ HelpText<"Display available options">,
MarshallingInfoFlag<FrontendOpts<"ShowHelp">>;
-def ibuiltininc : Flag<["-"], "ibuiltininc">,
+def ibuiltininc : Flag<["-"], "ibuiltininc">, Group<clang_i_Group>,
HelpText<"Enable builtin #include directories even when -nostdinc is used "
"before or after -ibuiltininc. "
"Using -nobuiltininc after the option disables it">;
-def index_header_map : Flag<["-"], "index-header-map">, Flags<[CC1Option]>,
+def index_header_map : Flag<["-"], "index-header-map">,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Make the next included directory (-I or -F) an indexer header map">;
-def idirafter : JoinedOrSeparate<["-"], "idirafter">, Group<clang_i_Group>, Flags<[CC1Option]>,
+def iapinotes_modules : JoinedOrSeparate<["-"], "iapinotes-modules">, Group<clang_i_Group>,
+ Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Add directory to the API notes search path referenced by module name">, MetaVarName<"<directory>">;
+def idirafter : JoinedOrSeparate<["-"], "idirafter">, Group<clang_i_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Add directory to AFTER include search path">;
-def iframework : JoinedOrSeparate<["-"], "iframework">, Group<clang_i_Group>, Flags<[CC1Option]>,
+def iframework : JoinedOrSeparate<["-"], "iframework">, Group<clang_i_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Add directory to SYSTEM framework search path">;
def iframeworkwithsysroot : JoinedOrSeparate<["-"], "iframeworkwithsysroot">,
Group<clang_i_Group>,
HelpText<"Add directory to SYSTEM framework search path, "
"absolute paths are relative to -isysroot">,
- MetaVarName<"<directory>">, Flags<[CC1Option]>;
-def imacros : JoinedOrSeparate<["-", "--"], "imacros">, Group<clang_i_Group>, Flags<[CC1Option]>,
+ MetaVarName<"<directory>">, Visibility<[ClangOption, CC1Option]>;
+def imacros : JoinedOrSeparate<["-", "--"], "imacros">, Group<clang_i_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Include macros from file before parsing">, MetaVarName<"<file>">,
MarshallingInfoStringVector<PreprocessorOpts<"MacroIncludes">>;
def image__base : Separate<["-"], "image_base">;
def include_ : JoinedOrSeparate<["-", "--"], "include">, Group<clang_i_Group>, EnumName<"include">,
- MetaVarName<"<file>">, HelpText<"Include file before parsing">, Flags<[CC1Option]>;
-def include_pch : Separate<["-"], "include-pch">, Group<clang_i_Group>, Flags<[CC1Option]>,
+ MetaVarName<"<file>">, HelpText<"Include file before parsing">,
+ Visibility<[ClangOption, CC1Option]>;
+def include_pch : Separate<["-"], "include-pch">, Group<clang_i_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Include precompiled header file">, MetaVarName<"<file>">,
MarshallingInfoString<PreprocessorOpts<"ImplicitPCHInclude">>;
-def relocatable_pch : Flag<["-", "--"], "relocatable-pch">, Flags<[CC1Option]>,
+def relocatable_pch : Flag<["-", "--"], "relocatable-pch">,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Whether to build a relocatable precompiled header">,
MarshallingInfoFlag<FrontendOpts<"RelocatablePCH">>;
-def verify_pch : Flag<["-"], "verify-pch">, Group<Action_Group>, Flags<[CC1Option]>,
+def verify_pch : Flag<["-"], "verify-pch">, Group<Action_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Load and verify that a pre-compiled header file is not stale">;
def init : Separate<["-"], "init">;
def install__name : Separate<["-"], "install_name">;
-def iprefix : JoinedOrSeparate<["-"], "iprefix">, Group<clang_i_Group>, Flags<[CC1Option]>,
+def iprefix : JoinedOrSeparate<["-"], "iprefix">, Group<clang_i_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Set the -iwithprefix/-iwithprefixbefore prefix">, MetaVarName<"<dir>">;
-def iquote : JoinedOrSeparate<["-"], "iquote">, Group<clang_i_Group>, Flags<[CC1Option]>,
+def iquote : JoinedOrSeparate<["-"], "iquote">, Group<clang_i_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Add directory to QUOTE include search path">, MetaVarName<"<directory>">;
-def isysroot : JoinedOrSeparate<["-"], "isysroot">, Group<clang_i_Group>, Flags<[CC1Option]>,
+def isysroot : JoinedOrSeparate<["-"], "isysroot">, Group<clang_i_Group>,
+ Visibility<[ClangOption, CC1Option, FlangOption]>,
HelpText<"Set the system root directory (usually /)">, MetaVarName<"<dir>">,
MarshallingInfoString<HeaderSearchOpts<"Sysroot">, [{"/"}]>;
def isystem : JoinedOrSeparate<["-"], "isystem">, Group<clang_i_Group>,
- Flags<[CC1Option]>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Add directory to SYSTEM include search path">, MetaVarName<"<directory>">;
def isystem_after : JoinedOrSeparate<["-"], "isystem-after">,
Group<clang_i_Group>, Flags<[NoXarchOption]>, MetaVarName<"<directory>">,
HelpText<"Add directory to end of the SYSTEM include search path">;
def iwithprefixbefore : JoinedOrSeparate<["-"], "iwithprefixbefore">, Group<clang_i_Group>,
HelpText<"Set directory to include search path with prefix">, MetaVarName<"<dir>">,
- Flags<[CC1Option]>;
-def iwithprefix : JoinedOrSeparate<["-"], "iwithprefix">, Group<clang_i_Group>, Flags<[CC1Option]>,
+ Visibility<[ClangOption, CC1Option]>;
+def iwithprefix : JoinedOrSeparate<["-"], "iwithprefix">, Group<clang_i_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Set directory to SYSTEM include search path with prefix">, MetaVarName<"<dir>">;
def iwithsysroot : JoinedOrSeparate<["-"], "iwithsysroot">, Group<clang_i_Group>,
HelpText<"Add directory to SYSTEM include search path, "
"absolute paths are relative to -isysroot">, MetaVarName<"<directory>">,
- Flags<[CC1Option]>;
-def ivfsoverlay : JoinedOrSeparate<["-"], "ivfsoverlay">, Group<clang_i_Group>, Flags<[CC1Option]>,
+ Visibility<[ClangOption, CC1Option]>;
+def ivfsoverlay : JoinedOrSeparate<["-"], "ivfsoverlay">, Group<clang_i_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Overlay the virtual filesystem described by file over the real file system">;
+def vfsoverlay : JoinedOrSeparate<["-", "--"], "vfsoverlay">,
+ Visibility<[ClangOption, CC1Option, CLOption, DXCOption]>,
+ HelpText<"Overlay the virtual filesystem described by file over the real file system. "
+ "Additionally, pass this overlay file to the linker if it supports it">;
def imultilib : Separate<["-"], "imultilib">, Group<gfortran_Group>;
+def K : Flag<["-"], "K">, Flags<[LinkerInput]>;
def keep__private__externs : Flag<["-"], "keep_private_externs">;
def l : JoinedOrSeparate<["-"], "l">, Flags<[LinkerInput, RenderJoined]>,
- Group<Link_Group>;
+ Visibility<[ClangOption, FlangOption]>, Group<Link_Group>;
def lazy__framework : Separate<["-"], "lazy_framework">, Flags<[LinkerInput]>;
def lazy__library : Separate<["-"], "lazy_library">, Flags<[LinkerInput]>;
-def mlittle_endian : Flag<["-"], "mlittle-endian">, Flags<[NoXarchOption]>;
+def mlittle_endian : Flag<["-"], "mlittle-endian">, Group<m_Group>,
+ Flags<[NoXarchOption, TargetSpecific]>;
def EL : Flag<["-"], "EL">, Alias<mlittle_endian>;
-def mbig_endian : Flag<["-"], "mbig-endian">, Flags<[NoXarchOption]>;
+def mbig_endian : Flag<["-"], "mbig-endian">, Group<m_Group>,
+ Flags<[NoXarchOption, TargetSpecific]>;
def EB : Flag<["-"], "EB">, Alias<mbig_endian>;
-def m16 : Flag<["-"], "m16">, Group<m_Group>, Flags<[NoXarchOption, CoreOption]>;
-def m32 : Flag<["-"], "m32">, Group<m_Group>, Flags<[NoXarchOption, CoreOption]>;
-def mqdsp6_compat : Flag<["-"], "mqdsp6-compat">, Group<m_Group>, Flags<[NoXarchOption,CC1Option]>,
+def m16 : Flag<["-"], "m16">, Group<m_Group>, Flags<[NoXarchOption]>,
+ Visibility<[ClangOption, CLOption, DXCOption]>;
+def m32 : Flag<["-"], "m32">, Group<m_Group>, Flags<[NoXarchOption]>,
+ Visibility<[ClangOption, CLOption, DXCOption]>;
+def maix32 : Flag<["-"], "maix32">, Group<m_Group>, Flags<[NoXarchOption]>;
+def mqdsp6_compat : Flag<["-"], "mqdsp6-compat">, Group<m_Group>,
+ Flags<[NoXarchOption]>, Visibility<[ClangOption, CC1Option]>,
HelpText<"Enable hexagon-qdsp6 backward compatibility">,
MarshallingInfoFlag<LangOpts<"HexagonQdsp6Compat">>;
-def m64 : Flag<["-"], "m64">, Group<m_Group>, Flags<[NoXarchOption, CoreOption]>;
-def mx32 : Flag<["-"], "mx32">, Group<m_Group>, Flags<[NoXarchOption, CoreOption]>;
-def mabi_EQ : Joined<["-"], "mabi=">, Group<m_Group>;
-def miamcu : Flag<["-"], "miamcu">, Group<m_Group>, Flags<[NoXarchOption, CoreOption]>,
+def m64 : Flag<["-"], "m64">, Group<m_Group>, Flags<[NoXarchOption]>,
+ Visibility<[ClangOption, CLOption, DXCOption]>;
+def maix64 : Flag<["-"], "maix64">, Group<m_Group>, Flags<[NoXarchOption]>;
+def mx32 : Flag<["-"], "mx32">, Group<m_Group>, Flags<[NoXarchOption]>,
+ Visibility<[ClangOption, CLOption, DXCOption]>;
+def miamcu : Flag<["-"], "miamcu">, Group<m_Group>, Flags<[NoXarchOption]>,
+ Visibility<[ClangOption, CLOption]>,
HelpText<"Use Intel MCU ABI">;
-def mno_iamcu : Flag<["-"], "mno-iamcu">, Group<m_Group>, Flags<[NoXarchOption, CoreOption]>;
+def mno_iamcu : Flag<["-"], "mno-iamcu">, Group<m_Group>,
+ Flags<[NoXarchOption]>, Visibility<[ClangOption, CLOption]>;
def malign_functions_EQ : Joined<["-"], "malign-functions=">, Group<clang_ignored_m_Group>;
def malign_loops_EQ : Joined<["-"], "malign-loops=">, Group<clang_ignored_m_Group>;
def malign_jumps_EQ : Joined<["-"], "malign-jumps=">, Group<clang_ignored_m_Group>;
-def malign_branch_EQ : CommaJoined<["-"], "malign-branch=">, Group<m_Group>, Flags<[NoXarchOption]>,
+
+let Flags = [TargetSpecific] in {
+def mabi_EQ : Joined<["-"], "mabi=">, Group<m_Group>;
+def malign_branch_EQ : CommaJoined<["-"], "malign-branch=">, Group<m_Group>,
HelpText<"Specify types of branches to align">;
-def malign_branch_boundary_EQ : Joined<["-"], "malign-branch-boundary=">, Group<m_Group>, Flags<[NoXarchOption]>,
+def malign_branch_boundary_EQ : Joined<["-"], "malign-branch-boundary=">, Group<m_Group>,
HelpText<"Specify the boundary's size to align branches">;
-def mpad_max_prefix_size_EQ : Joined<["-"], "mpad-max-prefix-size=">, Group<m_Group>, Flags<[NoXarchOption]>,
+def mpad_max_prefix_size_EQ : Joined<["-"], "mpad-max-prefix-size=">, Group<m_Group>,
HelpText<"Specify maximum number of prefixes to use for padding">;
-def mbranches_within_32B_boundaries : Flag<["-"], "mbranches-within-32B-boundaries">, Flags<[NoXarchOption]>, Group<m_Group>,
+def mbranches_within_32B_boundaries : Flag<["-"], "mbranches-within-32B-boundaries">, Group<m_Group>,
HelpText<"Align selected branches (fused, jcc, jmp) within 32-byte boundary">;
def mfancy_math_387 : Flag<["-"], "mfancy-math-387">, Group<clang_ignored_m_Group>;
def mlong_calls : Flag<["-"], "mlong-calls">, Group<m_Group>,
HelpText<"Generate branches with extended addressability, usually via indirect jumps.">;
-def mdouble_EQ : Joined<["-"], "mdouble=">, Group<m_Group>, Values<"32,64">, Flags<[CC1Option]>,
- HelpText<"Force double to be 32 bits or 64 bits">,
+} // let Flags = [TargetSpecific]
+def mdouble_EQ : Joined<["-"], "mdouble=">, Group<m_Group>,
+ MetaVarName<"<n">, Values<"32,64">, Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Force double to be <n> bits">,
MarshallingInfoInt<LangOpts<"DoubleSize">, "0">;
-def LongDouble_Group : OptionGroup<"<LongDouble group>">, Group<m_Group>,
- DocName<"Long double flags">,
- DocBrief<[{Selects the long double implementation}]>;
-def mlong_double_64 : Flag<["-"], "mlong-double-64">, Group<LongDouble_Group>, Flags<[CC1Option]>,
+def mlong_double_64 : Flag<["-"], "mlong-double-64">, Group<LongDouble_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Force long double to be 64 bits">;
-def mlong_double_80 : Flag<["-"], "mlong-double-80">, Group<LongDouble_Group>, Flags<[CC1Option]>,
+def mlong_double_80 : Flag<["-"], "mlong-double-80">, Group<LongDouble_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Force long double to be 80 bits, padded to 128 bits for storage">;
-def mlong_double_128 : Flag<["-"], "mlong-double-128">, Group<LongDouble_Group>, Flags<[CC1Option]>,
+def mlong_double_128 : Flag<["-"], "mlong-double-128">, Group<LongDouble_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Force long double to be 128 bits">;
+let Flags = [TargetSpecific] in {
def mno_long_calls : Flag<["-"], "mno-long-calls">, Group<m_Group>,
HelpText<"Restore the default behaviour of not generating long calls">;
+} // let Flags = [TargetSpecific]
def mexecute_only : Flag<["-"], "mexecute-only">, Group<m_arm_Features_Group>,
HelpText<"Disallow generation of data access to code sections (ARM only)">;
def mno_execute_only : Flag<["-"], "mno-execute-only">, Group<m_arm_Features_Group>,
HelpText<"Allow generation of data access to code sections (ARM only)">;
-def mtp_mode_EQ : Joined<["-"], "mtp=">, Group<m_arm_Features_Group>, Values<"soft,cp15,el0,el1,el2,el3">,
- HelpText<"Thread pointer access method (AArch32/AArch64 only)">;
+let Flags = [TargetSpecific] in {
+def mtp_mode_EQ : Joined<["-"], "mtp=">, Group<m_arm_Features_Group>, Values<"soft,cp15,tpidrurw,tpidruro,tpidrprw,el0,el1,el2,el3,tpidr_el0,tpidr_el1,tpidr_el2,tpidr_el3,tpidrro_el0">,
+ HelpText<"Thread pointer access method. "
+ "For AArch32: 'soft' uses a function call, or 'tpidrurw', 'tpidruro' or 'tpidrprw' use the three CP15 registers. 'cp15' is an alias for 'tpidruro'. "
+ "For AArch64: 'tpidr_el0', 'tpidr_el1', 'tpidr_el2', 'tpidr_el3' or 'tpidrro_el0' use the five system registers. 'elN' is an alias for 'tpidr_elN'.">;
def mpure_code : Flag<["-"], "mpure-code">, Alias<mexecute_only>; // Alias for GCC compatibility
def mno_pure_code : Flag<["-"], "mno-pure-code">, Alias<mno_execute_only>;
def mtvos_version_min_EQ : Joined<["-"], "mtvos-version-min=">, Group<m_Group>;
@@ -3048,124 +4404,198 @@ def mappletvos_version_min_EQ : Joined<["-"], "mappletvos-version-min=">, Alias<
def mtvos_simulator_version_min_EQ : Joined<["-"], "mtvos-simulator-version-min=">;
def mappletvsimulator_version_min_EQ : Joined<["-"], "mappletvsimulator-version-min=">, Alias<mtvos_simulator_version_min_EQ>;
def mwatchos_version_min_EQ : Joined<["-"], "mwatchos-version-min=">, Group<m_Group>;
-def mwatchos_simulator_version_min_EQ : Joined<["-"], "mwatchos-simulator-version-min=">;
+def mwatchos_simulator_version_min_EQ : Joined<["-"], "mwatchos-simulator-version-min=">, Group<m_Group>;
def mwatchsimulator_version_min_EQ : Joined<["-"], "mwatchsimulator-version-min=">, Alias<mwatchos_simulator_version_min_EQ>;
-def march_EQ : Joined<["-"], "march=">, Group<m_Group>, Flags<[CoreOption]>;
-def masm_EQ : Joined<["-"], "masm=">, Group<m_Group>, Flags<[NoXarchOption]>;
-def mcmodel_EQ : Joined<["-"], "mcmodel=">, Group<m_Group>, Flags<[CC1Option]>,
+} // let Flags = [TargetSpecific]
+def march_EQ : Joined<["-"], "march=">, Group<m_Group>,
+ Flags<[TargetSpecific]>, Visibility<[ClangOption, CLOption, DXCOption, FlangOption]>,
+ HelpText<"For a list of available architectures for the target use '-mcpu=help'">;
+def masm_EQ : Joined<["-"], "masm=">, Group<m_Group>;
+def inline_asm_EQ : Joined<["-"], "inline-asm=">, Group<m_Group>,
+ Visibility<[ClangOption, CC1Option]>,
+ Values<"att,intel">,
+ NormalizedValuesScope<"CodeGenOptions">, NormalizedValues<["IAD_ATT", "IAD_Intel"]>,
+ MarshallingInfoEnum<CodeGenOpts<"InlineAsmDialect">, "IAD_ATT">;
+def mcmodel_EQ : Joined<["-"], "mcmodel=">, Group<m_Group>,
+ Visibility<[ClangOption, CC1Option]>,
MarshallingInfoString<TargetOpts<"CodeModel">, [{"default"}]>;
-def mtls_size_EQ : Joined<["-"], "mtls-size=">, Group<m_Group>, Flags<[NoXarchOption, CC1Option]>,
+def mlarge_data_threshold_EQ : Joined<["-"], "mlarge-data-threshold=">, Group<m_Group>,
+ Flags<[TargetSpecific]>, Visibility<[ClangOption, CC1Option]>,
+ MarshallingInfoInt<TargetOpts<"LargeDataThreshold">, "0">;
+def mtls_size_EQ : Joined<["-"], "mtls-size=">, Group<m_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Specify bit size of immediate TLS offsets (AArch64 ELF only): "
"12 (for 4KB) | 24 (for 16MB, default) | 32 (for 4GB) | 48 (for 256TB, needs -mcmodel=large)">,
MarshallingInfoInt<CodeGenOpts<"TLSSize">>;
+def mtls_dialect_EQ : Joined<["-"], "mtls-dialect=">, Group<m_Group>,
+ Flags<[TargetSpecific]>, HelpText<"Which thread-local storage dialect to use for dynamic accesses of TLS variables">;
def mimplicit_it_EQ : Joined<["-"], "mimplicit-it=">, Group<m_Group>;
def mdefault_build_attributes : Joined<["-"], "mdefault-build-attributes">, Group<m_Group>;
def mno_default_build_attributes : Joined<["-"], "mno-default-build-attributes">, Group<m_Group>;
+let Flags = [TargetSpecific] in {
def mconstant_cfstrings : Flag<["-"], "mconstant-cfstrings">, Group<clang_ignored_m_Group>;
-def mconsole : Joined<["-"], "mconsole">, Group<m_Group>, Flags<[NoXarchOption]>;
-def mwindows : Joined<["-"], "mwindows">, Group<m_Group>, Flags<[NoXarchOption]>;
-def mdll : Joined<["-"], "mdll">, Group<m_Group>, Flags<[NoXarchOption]>;
-def municode : Joined<["-"], "municode">, Group<m_Group>, Flags<[NoXarchOption]>;
-def mthreads : Joined<["-"], "mthreads">, Group<m_Group>, Flags<[NoXarchOption]>;
-def mcpu_EQ : Joined<["-"], "mcpu=">, Group<m_Group>;
+def mconsole : Joined<["-"], "mconsole">, Group<m_Group>;
+def mwindows : Joined<["-"], "mwindows">, Group<m_Group>;
+def mdll : Joined<["-"], "mdll">, Group<m_Group>;
+def municode : Joined<["-"], "municode">, Group<m_Group>;
+def mthreads : Joined<["-"], "mthreads">, Group<m_Group>;
+def mguard_EQ : Joined<["-"], "mguard=">, Group<m_Group>,
+ HelpText<"Enable or disable Control Flow Guard checks and guard tables emission">,
+ Values<"none,cf,cf-nochecks">;
def mmcu_EQ : Joined<["-"], "mmcu=">, Group<m_Group>;
def msim : Flag<["-"], "msim">, Group<m_Group>;
-def mdynamic_no_pic : Joined<["-"], "mdynamic-no-pic">, Group<m_Group>;
def mfix_and_continue : Flag<["-"], "mfix-and-continue">, Group<clang_ignored_m_Group>;
def mieee_fp : Flag<["-"], "mieee-fp">, Group<clang_ignored_m_Group>;
def minline_all_stringops : Flag<["-"], "minline-all-stringops">, Group<clang_ignored_m_Group>;
def mno_inline_all_stringops : Flag<["-"], "mno-inline-all-stringops">, Group<clang_ignored_m_Group>;
-def malign_double : Flag<["-"], "malign-double">, Group<m_Group>, Flags<[CC1Option]>,
+} // let Flags = [TargetSpecific]
+def malign_double : Flag<["-"], "malign-double">, Group<m_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Align doubles to two words in structs (x86 only)">,
MarshallingInfoFlag<LangOpts<"AlignDouble">>;
+let Flags = [TargetSpecific] in {
def mfloat_abi_EQ : Joined<["-"], "mfloat-abi=">, Group<m_Group>, Values<"soft,softfp,hard">;
def mfpmath_EQ : Joined<["-"], "mfpmath=">, Group<m_Group>;
def mfpu_EQ : Joined<["-"], "mfpu=">, Group<m_Group>;
def mhwdiv_EQ : Joined<["-"], "mhwdiv=">, Group<m_Group>;
def mhwmult_EQ : Joined<["-"], "mhwmult=">, Group<m_Group>;
-def mglobal_merge : Flag<["-"], "mglobal-merge">, Group<m_Group>, Flags<[CC1Option]>,
+} // let Flags = [TargetSpecific]
+def mglobal_merge : Flag<["-"], "mglobal-merge">, Group<m_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Enable merging of globals">;
+let Flags = [TargetSpecific] in {
def mhard_float : Flag<["-"], "mhard-float">, Group<m_Group>;
-def miphoneos_version_min_EQ : Joined<["-"], "miphoneos-version-min=">, Group<m_Group>;
def mios_version_min_EQ : Joined<["-"], "mios-version-min=">,
- Alias<miphoneos_version_min_EQ>, HelpText<"Set iOS deployment target">;
-def mios_simulator_version_min_EQ : Joined<["-"], "mios-simulator-version-min=">;
-def miphonesimulator_version_min_EQ : Joined<["-"], "miphonesimulator-version-min=">, Alias<mios_simulator_version_min_EQ>;
+ Group<m_Group>, HelpText<"Set iOS deployment target">;
+def : Joined<["-"], "miphoneos-version-min=">,
+ Group<m_Group>, Alias<mios_version_min_EQ>;
+def mios_simulator_version_min_EQ : Joined<["-"], "mios-simulator-version-min=">, Group<m_Group>;
+def : Joined<["-"], "miphonesimulator-version-min=">, Alias<mios_simulator_version_min_EQ>;
def mkernel : Flag<["-"], "mkernel">, Group<m_Group>;
-def mlinker_version_EQ : Joined<["-"], "mlinker-version=">,
+def mlinker_version_EQ : Joined<["-"], "mlinker-version=">, Group<m_Group>,
Flags<[NoXarchOption]>;
-def mllvm : Separate<["-"], "mllvm">, Flags<[CC1Option,CC1AsOption,CoreOption]>,
+} // let Flags = [TargetSpecific]
+def mllvm : Separate<["-"], "mllvm">,
+ Visibility<[ClangOption, CC1Option, CC1AsOption, CLOption, DXCOption, FC1Option, FlangOption]>,
HelpText<"Additional arguments to forward to LLVM's option processing">,
MarshallingInfoStringVector<FrontendOpts<"LLVMArgs">>;
-def mmacosx_version_min_EQ : Joined<["-"], "mmacosx-version-min=">,
- Group<m_Group>, HelpText<"Set Mac OS X deployment target">;
+def : Joined<["-"], "mllvm=">,
+ Visibility<[ClangOption, CLOption, DXCOption, FlangOption]>, Alias<mllvm>,
+ HelpText<"Alias for -mllvm">, MetaVarName<"<arg>">;
+def mmlir : Separate<["-"], "mmlir">,
+ Visibility<[ClangOption, CLOption, FC1Option, FlangOption]>,
+ HelpText<"Additional arguments to forward to MLIR's option processing">;
+def ffuchsia_api_level_EQ : Joined<["-"], "ffuchsia-api-level=">,
+ Group<m_Group>, Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Set Fuchsia API level">,
+ MarshallingInfoInt<LangOpts<"FuchsiaAPILevel">>;
def mmacos_version_min_EQ : Joined<["-"], "mmacos-version-min=">,
- Group<m_Group>, Alias<mmacosx_version_min_EQ>;
-def mms_bitfields : Flag<["-"], "mms-bitfields">, Group<m_Group>, Flags<[CC1Option]>,
+ Group<m_Group>, HelpText<"Set macOS deployment target">;
+def : Joined<["-"], "mmacosx-version-min=">,
+ Group<m_Group>, Alias<mmacos_version_min_EQ>;
+def mms_bitfields : Flag<["-"], "mms-bitfields">, Group<m_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Set the default structure layout to be compatible with the Microsoft compiler standard">,
MarshallingInfoFlag<LangOpts<"MSBitfields">>;
-def moutline : Flag<["-"], "moutline">, Group<f_clang_Group>, Flags<[CC1Option]>,
+def moutline : Flag<["-"], "moutline">, Group<f_clang_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Enable function outlining (AArch64 only)">;
-def mno_outline : Flag<["-"], "mno-outline">, Group<f_clang_Group>, Flags<[CC1Option]>,
+def mno_outline : Flag<["-"], "mno-outline">, Group<f_clang_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Disable function outlining (AArch64 only)">;
def mno_ms_bitfields : Flag<["-"], "mno-ms-bitfields">, Group<m_Group>,
HelpText<"Do not set the default structure layout to be compatible with the Microsoft compiler standard">;
-def mstackrealign : Flag<["-"], "mstackrealign">, Group<m_Group>, Flags<[CC1Option]>,
+def mskip_rax_setup : Flag<["-"], "mskip-rax-setup">, Group<m_Group>,
+ Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Skip setting up RAX register when passing variable arguments (x86 only)">,
+ MarshallingInfoFlag<CodeGenOpts<"SkipRaxSetup">>;
+def mno_skip_rax_setup : Flag<["-"], "mno-skip-rax-setup">, Group<m_Group>,
+ Visibility<[ClangOption, CC1Option]>;
+def mstackrealign : Flag<["-"], "mstackrealign">, Group<m_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Force realign the stack at entry to every function">,
MarshallingInfoFlag<CodeGenOpts<"StackRealignment">>;
-def mstack_alignment : Joined<["-"], "mstack-alignment=">, Group<m_Group>, Flags<[CC1Option]>,
+def mstack_alignment : Joined<["-"], "mstack-alignment=">, Group<m_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Set the stack alignment">,
MarshallingInfoInt<CodeGenOpts<"StackAlignment">>;
-def mstack_probe_size : Joined<["-"], "mstack-probe-size=">, Group<m_Group>, Flags<[CC1Option]>,
+def mstack_probe_size : Joined<["-"], "mstack-probe-size=">, Group<m_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Set the stack probe size">,
MarshallingInfoInt<CodeGenOpts<"StackProbeSize">, "4096">;
def mstack_arg_probe : Flag<["-"], "mstack-arg-probe">, Group<m_Group>,
HelpText<"Enable stack probes">;
-def mno_stack_arg_probe : Flag<["-"], "mno-stack-arg-probe">, Group<m_Group>, Flags<[CC1Option]>,
+def mzos_sys_include_EQ : Joined<["-"], "mzos-sys-include=">, MetaVarName<"<SysInclude>">,
+ HelpText<"Path to system headers on z/OS">;
+def mno_stack_arg_probe : Flag<["-"], "mno-stack-arg-probe">, Group<m_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Disable stack probes which are enabled by default">,
MarshallingInfoFlag<CodeGenOpts<"NoStackArgProbe">>;
-def mthread_model : Separate<["-"], "mthread-model">, Group<m_Group>, Flags<[CC1Option]>,
- HelpText<"The thread model to use, e.g. posix, single (posix by default)">, Values<"posix,single">,
+def mthread_model : Separate<["-"], "mthread-model">, Group<m_Group>,
+ Visibility<[ClangOption, CC1Option]>,
+ HelpText<"The thread model to use. Defaults to 'posix')">, Values<"posix,single">,
NormalizedValues<["POSIX", "Single"]>, NormalizedValuesScope<"LangOptions::ThreadModelKind">,
MarshallingInfoEnum<LangOpts<"ThreadModel">, "POSIX">;
-def meabi : Separate<["-"], "meabi">, Group<m_Group>, Flags<[CC1Option]>,
- HelpText<"Set EABI type, e.g. 4, 5 or gnu (default depends on triple)">, Values<"default,4,5,gnu">,
+def meabi : Separate<["-"], "meabi">, Group<m_Group>,
+ Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Set EABI type. Default depends on triple)">, Values<"default,4,5,gnu">,
MarshallingInfoEnum<TargetOpts<"EABIVersion">, "Default">,
NormalizedValuesScope<"llvm::EABI">,
NormalizedValues<["Default", "EABI4", "EABI5", "GNU"]>;
+def mtargetos_EQ : Joined<["-"], "mtargetos=">, Group<m_Group>,
+ HelpText<"Set the deployment target to be the specified OS and OS version">;
+def mzos_hlq_le_EQ : Joined<["-"], "mzos-hlq-le=">, MetaVarName<"<LeHLQ>">,
+ HelpText<"High level qualifier for z/OS Language Environment datasets">;
+def mzos_hlq_clang_EQ : Joined<["-"], "mzos-hlq-clang=">, MetaVarName<"<ClangHLQ>">,
+ HelpText<"High level qualifier for z/OS C++RT side deck datasets">;
+def mzos_hlq_csslib_EQ : Joined<["-"], "mzos-hlq-csslib=">, MetaVarName<"<CsslibHLQ>">,
+ HelpText<"High level qualifier for z/OS CSSLIB dataset">;
def mno_constant_cfstrings : Flag<["-"], "mno-constant-cfstrings">, Group<m_Group>;
-def mno_global_merge : Flag<["-"], "mno-global-merge">, Group<m_Group>, Flags<[CC1Option]>,
+def mno_global_merge : Flag<["-"], "mno-global-merge">, Group<m_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Disable merging of globals">;
def mno_pascal_strings : Flag<["-"], "mno-pascal-strings">,
Alias<fno_pascal_strings>;
def mno_red_zone : Flag<["-"], "mno-red-zone">, Group<m_Group>;
-def mno_tls_direct_seg_refs : Flag<["-"], "mno-tls-direct-seg-refs">, Group<m_Group>, Flags<[CC1Option]>,
+def mno_tls_direct_seg_refs : Flag<["-"], "mno-tls-direct-seg-refs">, Group<m_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Disable direct TLS access through segment registers">,
MarshallingInfoFlag<CodeGenOpts<"IndirectTlsSegRefs">>;
def mno_relax_all : Flag<["-"], "mno-relax-all">, Group<m_Group>;
+let Flags = [TargetSpecific] in {
def mno_rtd: Flag<["-"], "mno-rtd">, Group<m_Group>;
def mno_soft_float : Flag<["-"], "mno-soft-float">, Group<m_Group>;
def mno_stackrealign : Flag<["-"], "mno-stackrealign">, Group<m_Group>;
+} // let Flags = [TargetSpecific]
-def mretpoline : Flag<["-"], "mretpoline">, Group<m_Group>, Flags<[CoreOption,NoXarchOption]>;
-def mno_retpoline : Flag<["-"], "mno-retpoline">, Group<m_Group>, Flags<[CoreOption,NoXarchOption]>;
+def mretpoline : Flag<["-"], "mretpoline">, Group<m_Group>,
+ Visibility<[ClangOption, CLOption]>;
+def mno_retpoline : Flag<["-"], "mno-retpoline">, Group<m_Group>,
+ Visibility<[ClangOption, CLOption]>;
defm speculative_load_hardening : BoolOption<"m", "speculative-load-hardening",
CodeGenOpts<"SpeculativeLoadHardening">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option]>, NegFlag<SetFalse>, BothFlags<[CoreOption]>>,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option]>,
+ NegFlag<SetFalse>, BothFlags<[], [ClangOption, CLOption]>>,
Group<m_Group>;
-def mlvi_hardening : Flag<["-"], "mlvi-hardening">, Group<m_Group>, Flags<[CoreOption,NoXarchOption]>,
+def mlvi_hardening : Flag<["-"], "mlvi-hardening">, Group<m_Group>,
+ Visibility<[ClangOption, CLOption]>,
HelpText<"Enable all mitigations for Load Value Injection (LVI)">;
-def mno_lvi_hardening : Flag<["-"], "mno-lvi-hardening">, Group<m_Group>, Flags<[CoreOption,NoXarchOption]>,
+def mno_lvi_hardening : Flag<["-"], "mno-lvi-hardening">, Group<m_Group>,
+ Visibility<[ClangOption, CLOption]>,
HelpText<"Disable mitigations for Load Value Injection (LVI)">;
-def mlvi_cfi : Flag<["-"], "mlvi-cfi">, Group<m_Group>, Flags<[CoreOption,NoXarchOption]>,
+def mlvi_cfi : Flag<["-"], "mlvi-cfi">, Group<m_Group>, Flags<[NoXarchOption]>,
+ Visibility<[ClangOption, CLOption]>,
HelpText<"Enable only control-flow mitigations for Load Value Injection (LVI)">;
-def mno_lvi_cfi : Flag<["-"], "mno-lvi-cfi">, Group<m_Group>, Flags<[CoreOption,NoXarchOption]>,
+def mno_lvi_cfi : Flag<["-"], "mno-lvi-cfi">, Group<m_Group>,
+ Visibility<[ClangOption, CLOption]>,
HelpText<"Disable control-flow mitigations for Load Value Injection (LVI)">;
-def m_seses : Flag<["-"], "mseses">, Group<m_Group>, Flags<[CoreOption, NoXarchOption]>,
+def m_seses : Flag<["-"], "mseses">, Group<m_Group>, Flags<[NoXarchOption]>,
+ Visibility<[ClangOption, CLOption]>,
HelpText<"Enable speculative execution side effect suppression (SESES). "
"Includes LVI control flow integrity mitigations">;
-def mno_seses : Flag<["-"], "mno-seses">, Group<m_Group>, Flags<[CoreOption, NoXarchOption]>,
+def mno_seses : Flag<["-"], "mno-seses">, Group<m_Group>,
+ Visibility<[ClangOption, CLOption]>,
HelpText<"Disable speculative execution side effect suppression (SESES)">;
def mrelax : Flag<["-"], "mrelax">, Group<m_Group>,
@@ -3175,30 +4605,41 @@ def mno_relax : Flag<["-"], "mno-relax">, Group<m_Group>,
def msmall_data_limit_EQ : Joined<["-"], "msmall-data-limit=">, Group<m_Group>,
Alias<G>,
HelpText<"Put global and static data smaller than the limit into a special section">;
+let Flags = [TargetSpecific] in {
def msave_restore : Flag<["-"], "msave-restore">, Group<m_riscv_Features_Group>,
HelpText<"Enable using library calls for save and restore">;
def mno_save_restore : Flag<["-"], "mno-save-restore">, Group<m_riscv_Features_Group>,
HelpText<"Disable using library calls for save and restore">;
-def mcmodel_EQ_medlow : Flag<["-"], "mcmodel=medlow">, Group<m_riscv_Features_Group>,
- Flags<[CC1Option]>, Alias<mcmodel_EQ>, AliasArgs<["small"]>,
- HelpText<"Equivalent to -mcmodel=small, compatible with RISC-V gcc.">;
-def mcmodel_EQ_medany : Flag<["-"], "mcmodel=medany">, Group<m_riscv_Features_Group>,
- Flags<[CC1Option]>, Alias<mcmodel_EQ>, AliasArgs<["medium"]>,
- HelpText<"Equivalent to -mcmodel=medium, compatible with RISC-V gcc.">;
+} // let Flags = [TargetSpecific]
+let Flags = [TargetSpecific] in {
def menable_experimental_extensions : Flag<["-"], "menable-experimental-extensions">, Group<m_Group>,
HelpText<"Enable use of experimental RISC-V extensions.">;
-
-def munaligned_access : Flag<["-"], "munaligned-access">, Group<m_arm_Features_Group>,
- HelpText<"Allow memory accesses to be unaligned (AArch32/AArch64 only)">;
-def mno_unaligned_access : Flag<["-"], "mno-unaligned-access">, Group<m_arm_Features_Group>,
- HelpText<"Force all memory accesses to be aligned (AArch32/AArch64 only)">;
-def mstrict_align : Flag<["-"], "mstrict-align">, Alias<mno_unaligned_access>, Flags<[CC1Option,HelpHidden]>,
+def mrvv_vector_bits_EQ : Joined<["-"], "mrvv-vector-bits=">, Group<m_Group>,
+ Visibility<[ClangOption, FlangOption]>,
+ HelpText<"Specify the size in bits of an RVV vector register">,
+ DocBrief<"Defaults to the vector length agnostic value of \"scalable\". "
+ "Accepts power of 2 values between 64 and 65536. Also accepts "
+ "\"zvl\" to use the value implied by -march/-mcpu. On Clang, value "
+ "will be reflected in __riscv_v_fixed_vlen preprocessor define "
+ "(RISC-V only)">;
+
+def munaligned_access : Flag<["-"], "munaligned-access">, Group<m_Group>,
+ HelpText<"Allow memory accesses to be unaligned (AArch32/AArch64/LoongArch/RISC-V only)">;
+def mno_unaligned_access : Flag<["-"], "mno-unaligned-access">, Group<m_Group>,
+ HelpText<"Force all memory accesses to be aligned (AArch32/AArch64/LoongArch/RISC-V only)">;
+} // let Flags = [TargetSpecific]
+def mstrict_align : Flag<["-"], "mstrict-align">, Alias<mno_unaligned_access>,
+ Flags<[HelpHidden]>, Visibility<[ClangOption, CC1Option]>,
HelpText<"Force all memory accesses to be aligned (same as mno-unaligned-access)">;
+def mno_strict_align : Flag<["-"], "mno-strict-align">, Alias<munaligned_access>,
+ Flags<[HelpHidden]>, Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Allow memory accesses to be unaligned (same as munaligned-access)">;
+let Flags = [TargetSpecific] in {
def mno_thumb : Flag<["-"], "mno-thumb">, Group<m_arm_Features_Group>;
def mrestrict_it: Flag<["-"], "mrestrict-it">, Group<m_arm_Features_Group>,
- HelpText<"Disallow generation of deprecated IT blocks for ARMv8. It is on by default for ARMv8 Thumb mode.">;
+ HelpText<"Disallow generation of complex IT blocks. It is off by default.">;
def mno_restrict_it: Flag<["-"], "mno-restrict-it">, Group<m_arm_Features_Group>,
- HelpText<"Allow generation of deprecated IT blocks for ARMv8. It is off by default for ARMv8 Thumb mode">;
+ HelpText<"Allow generation of complex IT blocks.">;
def marm : Flag<["-"], "marm">, Alias<mno_thumb>;
def ffixed_r9 : Flag<["-"], "ffixed-r9">, Group<m_arm_Features_Group>,
HelpText<"Reserve the r9 register (ARM only)">;
@@ -3210,23 +4651,49 @@ def mnocrc : Flag<["-"], "mnocrc">, Group<m_arm_Features_Group>,
HelpText<"Disallow use of CRC instructions (ARM only)">;
def mno_neg_immediates: Flag<["-"], "mno-neg-immediates">, Group<m_arm_Features_Group>,
HelpText<"Disallow converting instructions with negative immediates to their negation or inversion.">;
+} // let Flags = [TargetSpecific]
def mcmse : Flag<["-"], "mcmse">, Group<m_arm_Features_Group>,
- Flags<[NoXarchOption,CC1Option]>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Allow use of CMSE (Armv8-M Security Extensions)">,
MarshallingInfoFlag<LangOpts<"Cmse">>;
def ForceAAPCSBitfieldLoad : Flag<["-"], "faapcs-bitfield-load">, Group<m_arm_Features_Group>,
- Flags<[NoXarchOption,CC1Option]>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Follows the AAPCS standard that all volatile bit-field write generates at least one load. (ARM only).">,
MarshallingInfoFlag<CodeGenOpts<"ForceAAPCSBitfieldLoad">>;
defm aapcs_bitfield_width : BoolOption<"f", "aapcs-bitfield-width",
CodeGenOpts<"AAPCSBitfieldWidth">, DefaultTrue,
- NegFlag<SetFalse, [], "Do not follow">, PosFlag<SetTrue, [], "Follow">,
- BothFlags<[NoXarchOption, CC1Option], " the AAPCS standard requirement stating that"
+ NegFlag<SetFalse, [], [ClangOption], "Do not follow">,
+ PosFlag<SetTrue, [], [ClangOption], "Follow">,
+ BothFlags<[], [ClangOption, CC1Option],
+ " the AAPCS standard requirement stating that"
" volatile bit-field width is dictated by the field container type. (ARM only).">>,
Group<m_arm_Features_Group>;
-
+let Flags = [TargetSpecific] in {
+def mframe_chain : Joined<["-"], "mframe-chain=">,
+ Group<m_arm_Features_Group>, Values<"none,aapcs,aapcs+leaf">,
+ HelpText<"Select the frame chain model used to emit frame records (Arm only).">;
def mgeneral_regs_only : Flag<["-"], "mgeneral-regs-only">, Group<m_Group>,
HelpText<"Generate code which only uses the general purpose registers (AArch64/x86 only)">;
+def mfix_cmse_cve_2021_35465 : Flag<["-"], "mfix-cmse-cve-2021-35465">,
+ Group<m_arm_Features_Group>,
+ HelpText<"Work around VLLDM erratum CVE-2021-35465 (ARM only)">;
+def mno_fix_cmse_cve_2021_35465 : Flag<["-"], "mno-fix-cmse-cve-2021-35465">,
+ Group<m_arm_Features_Group>,
+ HelpText<"Don't work around VLLDM erratum CVE-2021-35465 (ARM only)">;
+def mfix_cortex_a57_aes_1742098 : Flag<["-"], "mfix-cortex-a57-aes-1742098">,
+ Group<m_arm_Features_Group>,
+ HelpText<"Work around Cortex-A57 Erratum 1742098 (ARM only)">;
+def mno_fix_cortex_a57_aes_1742098 : Flag<["-"], "mno-fix-cortex-a57-aes-1742098">,
+ Group<m_arm_Features_Group>,
+ HelpText<"Don't work around Cortex-A57 Erratum 1742098 (ARM only)">;
+def mfix_cortex_a72_aes_1655431 : Flag<["-"], "mfix-cortex-a72-aes-1655431">,
+ Group<m_arm_Features_Group>,
+ HelpText<"Work around Cortex-A72 Erratum 1655431 (ARM only)">,
+ Alias<mfix_cortex_a57_aes_1742098>;
+def mno_fix_cortex_a72_aes_1655431 : Flag<["-"], "mno-fix-cortex-a72-aes-1655431">,
+ Group<m_arm_Features_Group>,
+ HelpText<"Don't work around Cortex-A72 Erratum 1655431 (ARM only)">,
+ Alias<mno_fix_cortex_a57_aes_1742098>;
def mfix_cortex_a53_835769 : Flag<["-"], "mfix-cortex-a53-835769">,
Group<m_aarch64_Features_Group>,
HelpText<"Workaround Cortex-A53 erratum 835769 (AArch64 only)">;
@@ -3236,6 +4703,11 @@ def mno_fix_cortex_a53_835769 : Flag<["-"], "mno-fix-cortex-a53-835769">,
def mmark_bti_property : Flag<["-"], "mmark-bti-property">,
Group<m_aarch64_Features_Group>,
HelpText<"Add .note.gnu.property with BTI to assembly files (AArch64 only)">;
+def mno_bti_at_return_twice : Flag<["-"], "mno-bti-at-return-twice">,
+ Group<m_arm_Features_Group>,
+ HelpText<"Do not add a BTI instruction after a setjmp or other"
+ " return-twice construct (Arm/AArch64 only)">;
+
foreach i = {1-31} in
def ffixed_x#i : Flag<["-"], "ffixed-x"#i>, Group<m_Group>,
HelpText<"Reserve the x"#i#" register (AArch64/RISC-V only)">;
@@ -3244,25 +4716,41 @@ foreach i = {8-15,18} in
def fcall_saved_x#i : Flag<["-"], "fcall-saved-x"#i>, Group<m_aarch64_Features_Group>,
HelpText<"Make the x"#i#" register call-saved (AArch64 only)">;
-def msve_vector_bits_EQ : Joined<["-"], "msve-vector-bits=">,
- Group<m_aarch64_Features_Group>, Flags<[NoXarchOption,CC1Option]>,
+def msve_vector_bits_EQ : Joined<["-"], "msve-vector-bits=">, Group<m_aarch64_Features_Group>,
+ Visibility<[ClangOption, FlangOption]>,
HelpText<"Specify the size in bits of an SVE vector register. Defaults to the"
- " vector length agnostic value of \"scalable\". (AArch64 only)">,
- Values<"128,256,512,1024,2048,scalable">,
- NormalizedValues<["128", "256", "512", "1024", "2048", "0"]>,
- MarshallingInfoEnum<LangOpts<"ArmSveVectorBits">, "0">;
+ " vector length agnostic value of \"scalable\". (AArch64 only)">;
+} // let Flags = [TargetSpecific]
+
+def mvscale_min_EQ : Joined<["-"], "mvscale-min=">,
+ Visibility<[CC1Option, FC1Option]>,
+ HelpText<"Specify the vscale minimum. Defaults to \"1\". (AArch64/RISC-V only)">,
+ MarshallingInfoInt<LangOpts<"VScaleMin">>;
+def mvscale_max_EQ : Joined<["-"], "mvscale-max=">,
+ Visibility<[CC1Option, FC1Option]>,
+ HelpText<"Specify the vscale maximum. Defaults to the"
+ " vector length agnostic value of \"0\". (AArch64/RISC-V only)">,
+ MarshallingInfoInt<LangOpts<"VScaleMax">>;
def msign_return_address_EQ : Joined<["-"], "msign-return-address=">,
- Flags<[CC1Option]>, Group<m_Group>, Values<"none,all,non-leaf">,
+ Visibility<[ClangOption, CC1Option]>,
+ Group<m_Group>, Values<"none,all,non-leaf">,
HelpText<"Select return address signing scope">;
+let Flags = [TargetSpecific] in {
def mbranch_protection_EQ : Joined<["-"], "mbranch-protection=">,
+ Group<m_Group>,
HelpText<"Enforce targets of indirect branches and function returns">;
-def mharden_sls_EQ : Joined<["-"], "mharden-sls=">,
- HelpText<"Select straight-line speculation hardening scope">;
+def mharden_sls_EQ : Joined<["-"], "mharden-sls=">, Group<m_Group>,
+ HelpText<"Select straight-line speculation hardening scope (ARM/AArch64/X86"
+ " only). <arg> must be: all, none, retbr(ARM/AArch64),"
+ " blr(ARM/AArch64), comdat(ARM/AArch64), nocomdat(ARM/AArch64),"
+ " return(X86), indirect-jmp(X86)">;
def msimd128 : Flag<["-"], "msimd128">, Group<m_wasm_Features_Group>;
def mno_simd128 : Flag<["-"], "mno-simd128">, Group<m_wasm_Features_Group>;
+def mrelaxed_simd : Flag<["-"], "mrelaxed-simd">, Group<m_wasm_Features_Group>;
+def mno_relaxed_simd : Flag<["-"], "mno-relaxed-simd">, Group<m_wasm_Features_Group>;
def mnontrapping_fptoint : Flag<["-"], "mnontrapping-fptoint">, Group<m_wasm_Features_Group>;
def mno_nontrapping_fptoint : Flag<["-"], "mno-nontrapping-fptoint">, Group<m_wasm_Features_Group>;
def msign_ext : Flag<["-"], "msign-ext">, Group<m_wasm_Features_Group>;
@@ -3281,26 +4769,35 @@ def mtail_call : Flag<["-"], "mtail-call">, Group<m_wasm_Features_Group>;
def mno_tail_call : Flag<["-"], "mno-tail-call">, Group<m_wasm_Features_Group>;
def mreference_types : Flag<["-"], "mreference-types">, Group<m_wasm_Features_Group>;
def mno_reference_types : Flag<["-"], "mno-reference-types">, Group<m_wasm_Features_Group>;
+def mextended_const : Flag<["-"], "mextended-const">, Group<m_wasm_Features_Group>;
+def mno_extended_const : Flag<["-"], "mno-extended-const">, Group<m_wasm_Features_Group>;
+def mmultimemory : Flag<["-"], "mmultimemory">, Group<m_wasm_Features_Group>;
+def mno_multimemory : Flag<["-"], "mno-multimemory">, Group<m_wasm_Features_Group>;
def mexec_model_EQ : Joined<["-"], "mexec-model=">, Group<m_wasm_Features_Driver_Group>,
Values<"command,reactor">,
- HelpText<"Execution model (WebAssembly only)">;
+ HelpText<"Execution model (WebAssembly only)">,
+ DocBrief<"Select between \"command\" and \"reactor\" executable models. "
+ "Commands have a main-function which scopes the lifetime of the "
+ "program. Reactors are activated and remain active until "
+ "explicitly terminated.">;
+} // let Flags = [TargetSpecific]
defm amdgpu_ieee : BoolOption<"m", "amdgpu-ieee",
CodeGenOpts<"EmitIEEENaNCompliantInsts">, DefaultTrue,
- PosFlag<SetTrue, [], "Sets the IEEE bit in the expected default floating point "
+ PosFlag<SetTrue, [], [ClangOption], "Sets the IEEE bit in the expected default floating point "
" mode register. Floating point opcodes that support exception flag "
"gathering quiet and propagate signaling NaN inputs per IEEE 754-2008. "
"This option changes the ABI. (AMDGPU only)">,
- NegFlag<SetFalse, [CC1Option]>>, Group<m_Group>;
+ NegFlag<SetFalse, [], [ClangOption, CC1Option]>>, Group<m_Group>;
def mcode_object_version_EQ : Joined<["-"], "mcode-object-version=">, Group<m_Group>,
- HelpText<"Specify code object ABI version. Defaults to 3. (AMDGPU only)">,
- MetaVarName<"<version>">, Values<"2,3,4">;
+ HelpText<"Specify code object ABI version. Defaults to 5. (AMDGPU only)">,
+ Visibility<[ClangOption, FlangOption, CC1Option, FC1Option]>,
+ Values<"none,4,5">,
+ NormalizedValuesScope<"llvm::CodeObjectVersionKind">,
+ NormalizedValues<["COV_None", "COV_4", "COV_5"]>,
+ MarshallingInfoEnum<TargetOpts<"CodeObjectVersion">, "COV_5">;
-defm code_object_v3_legacy : SimpleMFlag<"code-object-v3",
- "Legacy option to specify code object ABI V3",
- "Legacy option to specify code object ABI V2",
- " (AMDGPU only)">;
defm cumode : SimpleMFlag<"cumode",
"Specify CU wavefront", "Specify WGP wavefront",
" execution mode (AMDGPU only)", m_amdgpu_Features_Group>;
@@ -3312,12 +4809,18 @@ defm wavefrontsize64 : SimpleMFlag<"wavefrontsize64",
defm unsafe_fp_atomics : BoolOption<"m", "unsafe-fp-atomics",
TargetOpts<"AllowAMDGPUUnsafeFPAtomics">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Enable unsafe floating point atomic instructions (AMDGPU only)">,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Enable generation of unsafe floating point "
+ "atomic instructions. May generate more efficient code, but may not "
+ "respect rounding and denormal modes, and may give incorrect results "
+ "for certain memory destinations. (AMDGPU only)">,
NegFlag<SetFalse>>, Group<m_Group>;
-def faltivec : Flag<["-"], "faltivec">, Group<f_Group>, Flags<[NoXarchOption]>;
-def fno_altivec : Flag<["-"], "fno-altivec">, Group<f_Group>, Flags<[NoXarchOption]>;
-def maltivec : Flag<["-"], "maltivec">, Group<m_ppc_Features_Group>;
+def faltivec : Flag<["-"], "faltivec">, Group<f_Group>;
+def fno_altivec : Flag<["-"], "fno-altivec">, Group<f_Group>;
+let Flags = [TargetSpecific] in {
+def maltivec : Flag<["-"], "maltivec">, Group<m_ppc_Features_Group>,
+ HelpText<"Enable AltiVec vector initializer syntax">;
def mno_altivec : Flag<["-"], "mno-altivec">, Group<m_ppc_Features_Group>;
def mpcrel: Flag<["-"], "mpcrel">, Group<m_ppc_Features_Group>;
def mno_pcrel: Flag<["-"], "mno-pcrel">, Group<m_ppc_Features_Group>;
@@ -3326,11 +4829,12 @@ def mno_prefixed: Flag<["-"], "mno-prefixed">, Group<m_ppc_Features_Group>;
def mspe : Flag<["-"], "mspe">, Group<m_ppc_Features_Group>;
def mno_spe : Flag<["-"], "mno-spe">, Group<m_ppc_Features_Group>;
def mefpu2 : Flag<["-"], "mefpu2">, Group<m_ppc_Features_Group>;
-def mabi_EQ_vec_extabi : Flag<["-"], "mabi=vec-extabi">, Group<m_Group>, Flags<[CC1Option]>,
- HelpText<"Enable the extended Altivec ABI on AIX (AIX only). Uses volatile and nonvolatile vector registers">,
- MarshallingInfoFlag<LangOpts<"EnableAIXExtendedAltivecABI">>;
-def mabi_EQ_vec_default : Flag<["-"], "mabi=vec-default">, Group<m_Group>, Flags<[CC1Option]>,
- HelpText<"Enable the default Altivec ABI on AIX (AIX only). Uses only volatile vector registers.">;
+} // let Flags = [TargetSpecific]
+def mabi_EQ_quadword_atomics : Flag<["-"], "mabi=quadword-atomics">,
+ Group<m_Group>, Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Enable quadword atomics ABI on AIX (AIX PPC64 only). Uses lqarx/stqcx. instructions.">,
+ MarshallingInfoFlag<LangOpts<"EnableAIXQuadwordAtomicsABI">>;
+let Flags = [TargetSpecific] in {
def mvsx : Flag<["-"], "mvsx">, Group<m_ppc_Features_Group>;
def mno_vsx : Flag<["-"], "mno-vsx">, Group<m_ppc_Features_Group>;
def msecure_plt : Flag<["-"], "msecure-plt">, Group<m_ppc_Features_Group>;
@@ -3372,7 +4876,11 @@ def mno_mfocrf : Flag<["-"], "mno-mfocrf">, Group<m_ppc_Features_Group>;
def mno_mfcrf : Flag<["-"], "mno-mfcrf">, Alias<mno_mfocrf>;
def mpopcntd : Flag<["-"], "mpopcntd">, Group<m_ppc_Features_Group>;
def mno_popcntd : Flag<["-"], "mno-popcntd">, Group<m_ppc_Features_Group>;
-def mcrbits : Flag<["-"], "mcrbits">, Group<m_ppc_Features_Group>;
+def mcrbits : Flag<["-"], "mcrbits">, Group<m_ppc_Features_Group>,
+ HelpText<"Control the CR-bit tracking feature on PowerPC. ``-mcrbits`` "
+ "(the enablement of CR-bit tracking support) is the default for "
+ "POWER8 and above, as well as for all other CPUs when "
+ "optimization is applied (-O2 and above).">;
def mno_crbits : Flag<["-"], "mno-crbits">, Group<m_ppc_Features_Group>;
def minvariant_function_descriptors :
Flag<["-"], "minvariant-function-descriptors">, Group<m_ppc_Features_Group>;
@@ -3393,30 +4901,57 @@ def mrop_protect : Flag<["-"], "mrop-protect">,
Group<m_ppc_Features_Group>;
def mprivileged : Flag<["-"], "mprivileged">,
Group<m_ppc_Features_Group>;
+
+defm regnames : BoolOption<"m", "regnames",
+ CodeGenOpts<"PPCUseFullRegisterNames">, DefaultFalse,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option], "Use full register names when writing assembly output">,
+ NegFlag<SetFalse, [], [ClangOption], "Use only register numbers when writing assembly output">>,
+ Group<m_Group>;
+} // let Flags = [TargetSpecific]
+def maix_small_local_exec_tls : Flag<["-"], "maix-small-local-exec-tls">,
+ Group<m_ppc_Features_Group>,
+ HelpText<"Produce a faster access sequence for local-exec TLS variables "
+ "where the offset from the TLS base is encoded as an "
+ "immediate operand (AIX 64-bit only). "
+ "This access sequence is not used for variables larger than 32KB.">;
def maix_struct_return : Flag<["-"], "maix-struct-return">,
- Group<m_Group>, Flags<[CC1Option]>,
- HelpText<"Return all structs in memory (PPC32 only)">;
+ Group<m_Group>, Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Return all structs in memory (PPC32 only)">,
+ DocBrief<"Override the default ABI for 32-bit targets to return all "
+ "structs in memory, as in the Power 32-bit ABI for Linux (2011), "
+ "and on AIX and Darwin.">;
def msvr4_struct_return : Flag<["-"], "msvr4-struct-return">,
- Group<m_Group>, Flags<[CC1Option]>,
- HelpText<"Return small structs in registers (PPC32 only)">;
-
+ Group<m_Group>, Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Return small structs in registers (PPC32 only)">,
+ DocBrief<"Override the default ABI for 32-bit targets to return small "
+ "structs in registers, as in the System V ABI (1995).">;
+def mxcoff_roptr : Flag<["-"], "mxcoff-roptr">, Group<m_Group>,
+ Flags<[TargetSpecific]>, Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Place constant objects with relocatable address values in the RO data section and add -bforceimprw to the linker flags (AIX only)">;
+def mno_xcoff_roptr : Flag<["-"], "mno-xcoff-roptr">, Group<m_Group>, TargetSpecific;
+
+let Flags = [TargetSpecific] in {
def mvx : Flag<["-"], "mvx">, Group<m_Group>;
def mno_vx : Flag<["-"], "mno-vx">, Group<m_Group>;
+} // let Flags = [TargetSpecific]
defm zvector : BoolFOption<"zvector",
LangOpts<"ZVector">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Enable System z vector language extension">,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Enable System z vector language extension">,
NegFlag<SetFalse>>;
def mzvector : Flag<["-"], "mzvector">, Alias<fzvector>;
def mno_zvector : Flag<["-"], "mno-zvector">, Alias<fno_zvector>;
+def mxcoff_build_id_EQ : Joined<["-"], "mxcoff-build-id=">, Group<Link_Group>, MetaVarName<"<0xHEXSTRING>">,
+ HelpText<"On AIX, request creation of a build-id string, \"0xHEXSTRING\", in the string table of the loader section inside the linked binary">;
def mignore_xcoff_visibility : Flag<["-"], "mignore-xcoff-visibility">, Group<m_Group>,
HelpText<"Not emit the visibility attribute for asm in AIX OS or give all symbols 'unspecified' visibility in XCOFF object file">,
- Flags<[CC1Option]>;
+ Flags<[TargetSpecific]>, Visibility<[ClangOption, CC1Option]>;
defm backchain : BoolOption<"m", "backchain",
CodeGenOpts<"Backchain">, DefaultFalse,
- PosFlag<SetTrue, [], "Link stack frames through backchain on System Z">,
- NegFlag<SetFalse>, BothFlags<[NoXarchOption,CC1Option]>>, Group<m_Group>;
+ PosFlag<SetTrue, [], [ClangOption], "Link stack frames through backchain on System Z">,
+ NegFlag<SetFalse>, BothFlags<[], [ClangOption, CC1Option]>>, Group<m_Group>;
def mno_warn_nonportable_cfstrings : Flag<["-"], "mno-warn-nonportable-cfstrings">, Group<m_Group>;
def mno_omit_leaf_frame_pointer : Flag<["-"], "mno-omit-leaf-frame-pointer">, Group<m_Group>;
@@ -3428,57 +4963,89 @@ def mred_zone : Flag<["-"], "mred-zone">, Group<m_Group>;
def mtls_direct_seg_refs : Flag<["-"], "mtls-direct-seg-refs">, Group<m_Group>,
HelpText<"Enable direct TLS access through segment registers (default)">;
def mregparm_EQ : Joined<["-"], "mregparm=">, Group<m_Group>;
-def mrelax_all : Flag<["-"], "mrelax-all">, Group<m_Group>, Flags<[CC1Option,CC1AsOption]>,
+def mrelax_all : Flag<["-"], "mrelax-all">, Group<m_Group>,
+ Visibility<[ClangOption, CC1Option, CC1AsOption]>,
HelpText<"(integrated-as) Relax all machine instructions">,
MarshallingInfoFlag<CodeGenOpts<"RelaxAll">>;
def mincremental_linker_compatible : Flag<["-"], "mincremental-linker-compatible">, Group<m_Group>,
- Flags<[CC1Option,CC1AsOption]>,
+ Visibility<[ClangOption, CC1Option, CC1AsOption]>,
HelpText<"(integrated-as) Emit an object file which can be used with an incremental linker">,
MarshallingInfoFlag<CodeGenOpts<"IncrementalLinkerCompatible">>;
def mno_incremental_linker_compatible : Flag<["-"], "mno-incremental-linker-compatible">, Group<m_Group>,
HelpText<"(integrated-as) Emit an object file which cannot be used with an incremental linker">;
-def mrtd : Flag<["-"], "mrtd">, Group<m_Group>, Flags<[CC1Option]>,
+def mrtd : Flag<["-"], "mrtd">, Group<m_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Make StdCall calling convention the default">;
def msmall_data_threshold_EQ : Joined <["-"], "msmall-data-threshold=">,
Group<m_Group>, Alias<G>;
-def msoft_float : Flag<["-"], "msoft-float">, Group<m_Group>, Flags<[CC1Option]>,
+def msoft_float : Flag<["-"], "msoft-float">, Group<m_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Use software floating point">,
MarshallingInfoFlag<CodeGenOpts<"SoftFloat">>;
-def moutline_atomics : Flag<["-"], "moutline-atomics">, Group<f_clang_Group>, Flags<[CC1Option]>,
+def mno_fmv : Flag<["-"], "mno-fmv">, Group<f_clang_Group>,
+ Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Disable function multiversioning">;
+def moutline_atomics : Flag<["-"], "moutline-atomics">, Group<f_clang_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Generate local calls to out-of-line atomic operations">;
-def mno_outline_atomics : Flag<["-"], "mno-outline-atomics">, Group<f_clang_Group>, Flags<[CC1Option]>,
+def mno_outline_atomics : Flag<["-"], "mno-outline-atomics">, Group<f_clang_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Don't generate local calls to out-of-line atomic operations">;
def mno_implicit_float : Flag<["-"], "mno-implicit-float">, Group<m_Group>,
- HelpText<"Don't generate implicit floating point instructions">;
+ HelpText<"Don't generate implicit floating point or vector instructions">;
def mimplicit_float : Flag<["-"], "mimplicit-float">, Group<m_Group>;
-def mrecip : Flag<["-"], "mrecip">, Group<m_Group>;
-def mrecip_EQ : CommaJoined<["-"], "mrecip=">, Group<m_Group>, Flags<[CC1Option]>,
+def mrecip : Flag<["-"], "mrecip">, Group<m_Group>,
+ HelpText<"Equivalent to '-mrecip=all'">;
+def mrecip_EQ : CommaJoined<["-"], "mrecip=">, Group<m_Group>,
+ Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Control use of approximate reciprocal and reciprocal square root instructions followed by <n> iterations of "
+ "Newton-Raphson refinement. "
+ "<value> = ( ['!'] ['vec-'] ('rcp'|'sqrt') [('h'|'s'|'d')] [':'<n>] ) | 'all' | 'default' | 'none'">,
MarshallingInfoStringVector<CodeGenOpts<"Reciprocals">>;
-def mprefer_vector_width_EQ : Joined<["-"], "mprefer-vector-width=">, Group<m_Group>, Flags<[CC1Option]>,
+def mprefer_vector_width_EQ : Joined<["-"], "mprefer-vector-width=">, Group<m_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Specifies preferred vector width for auto-vectorization. Defaults to 'none' which allows target specific decisions.">,
MarshallingInfoString<CodeGenOpts<"PreferVectorWidth">>;
-def mstack_protector_guard_EQ : Joined<["-"], "mstack-protector-guard=">, Group<m_Group>, Flags<[CC1Option]>,
+def mstack_protector_guard_EQ : Joined<["-"], "mstack-protector-guard=">, Group<m_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Use the given guard (global, tls) for addressing the stack-protector guard">,
MarshallingInfoString<CodeGenOpts<"StackProtectorGuard">>;
-def mstack_protector_guard_offset_EQ : Joined<["-"], "mstack-protector-guard-offset=">, Group<m_Group>, Flags<[CC1Option]>,
+def mstack_protector_guard_offset_EQ : Joined<["-"], "mstack-protector-guard-offset=">, Group<m_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Use the given offset for addressing the stack-protector guard">,
MarshallingInfoInt<CodeGenOpts<"StackProtectorGuardOffset">, "INT_MAX", "int">;
-def mstack_protector_guard_reg_EQ : Joined<["-"], "mstack-protector-guard-reg=">, Group<m_Group>, Flags<[CC1Option]>,
+def mstack_protector_guard_symbol_EQ : Joined<["-"], "mstack-protector-guard-symbol=">, Group<m_Group>,
+ Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Use the given symbol for addressing the stack-protector guard">,
+ MarshallingInfoString<CodeGenOpts<"StackProtectorGuardSymbol">>;
+def mstack_protector_guard_reg_EQ : Joined<["-"], "mstack-protector-guard-reg=">, Group<m_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Use the given reg for addressing the stack-protector guard">,
MarshallingInfoString<CodeGenOpts<"StackProtectorGuardReg">>;
def mfentry : Flag<["-"], "mfentry">, HelpText<"Insert calls to fentry at function entry (x86/SystemZ only)">,
- Flags<[CC1Option]>, Group<m_Group>,
+ Visibility<[ClangOption, CC1Option]>, Group<m_Group>,
MarshallingInfoFlag<CodeGenOpts<"CallFEntry">>;
+def mlsx : Flag<["-"], "mlsx">, Group<m_loongarch_Features_Group>,
+ HelpText<"Enable Loongson SIMD Extension (LSX).">;
+def mno_lsx : Flag<["-"], "mno-lsx">, Group<m_loongarch_Features_Group>,
+ HelpText<"Disable Loongson SIMD Extension (LSX).">;
+def mlasx : Flag<["-"], "mlasx">, Group<m_loongarch_Features_Group>,
+ HelpText<"Enable Loongson Advanced SIMD Extension (LASX).">;
+def mno_lasx : Flag<["-"], "mno-lasx">, Group<m_loongarch_Features_Group>,
+ HelpText<"Disable Loongson Advanced SIMD Extension (LASX).">;
def mnop_mcount : Flag<["-"], "mnop-mcount">, HelpText<"Generate mcount/__fentry__ calls as nops. To activate they need to be patched in.">,
- Flags<[CC1Option]>, Group<m_Group>,
+ Visibility<[ClangOption, CC1Option]>, Group<m_Group>,
MarshallingInfoFlag<CodeGenOpts<"MNopMCount">>;
def mrecord_mcount : Flag<["-"], "mrecord-mcount">, HelpText<"Generate a __mcount_loc section entry for each __fentry__ call.">,
- Flags<[CC1Option]>, Group<m_Group>,
+ Visibility<[ClangOption, CC1Option]>, Group<m_Group>,
MarshallingInfoFlag<CodeGenOpts<"RecordMCount">>;
def mpacked_stack : Flag<["-"], "mpacked-stack">, HelpText<"Use packed stack layout (SystemZ only).">,
- Flags<[CC1Option]>, Group<m_Group>,
+ Visibility<[ClangOption, CC1Option]>, Group<m_Group>,
MarshallingInfoFlag<CodeGenOpts<"PackedStack">>;
-def mno_packed_stack : Flag<["-"], "mno-packed-stack">, Flags<[CC1Option]>, Group<m_Group>;
+def mno_packed_stack : Flag<["-"], "mno-packed-stack">,
+ Visibility<[ClangOption, CC1Option]>, Group<m_Group>;
+
+let Flags = [TargetSpecific] in {
def mips16 : Flag<["-"], "mips16">, Group<m_mips_Features_Group>;
def mno_mips16 : Flag<["-"], "mno-mips16">, Group<m_mips_Features_Group>;
def mmicromips : Flag<["-"], "mmicromips">, Group<m_mips_Features_Group>;
@@ -3491,12 +5058,15 @@ def mcheck_zero_division : Flag<["-"], "mcheck-zero-division">,
Group<m_mips_Features_Group>;
def mno_check_zero_division : Flag<["-"], "mno-check-zero-division">,
Group<m_mips_Features_Group>;
+def mfix4300 : Flag<["-"], "mfix4300">, Group<m_mips_Features_Group>;
def mcompact_branches_EQ : Joined<["-"], "mcompact-branches=">,
Group<m_mips_Features_Group>;
+} // let Flags = [TargetSpecific]
def mbranch_likely : Flag<["-"], "mbranch-likely">, Group<m_Group>,
IgnoredGCCCompat;
def mno_branch_likely : Flag<["-"], "mno-branch-likely">, Group<m_Group>,
IgnoredGCCCompat;
+let Flags = [TargetSpecific] in {
def mindirect_jump_EQ : Joined<["-"], "mindirect-jump=">,
Group<m_mips_Features_Group>,
HelpText<"Change indirect jump instructions to inhibit speculation">;
@@ -3504,8 +5074,8 @@ def mdsp : Flag<["-"], "mdsp">, Group<m_mips_Features_Group>;
def mno_dsp : Flag<["-"], "mno-dsp">, Group<m_mips_Features_Group>;
def mdspr2 : Flag<["-"], "mdspr2">, Group<m_mips_Features_Group>;
def mno_dspr2 : Flag<["-"], "mno-dspr2">, Group<m_mips_Features_Group>;
-def msingle_float : Flag<["-"], "msingle-float">, Group<m_mips_Features_Group>;
-def mdouble_float : Flag<["-"], "mdouble-float">, Group<m_mips_Features_Group>;
+def msingle_float : Flag<["-"], "msingle-float">, Group<m_Group>;
+def mdouble_float : Flag<["-"], "mdouble-float">, Group<m_Group>;
def mmadd4 : Flag<["-"], "mmadd4">, Group<m_mips_Features_Group>,
HelpText<"Enable the generation of 4-operand madd.s, madd.d and related instructions.">;
def mno_madd4 : Flag<["-"], "mno-madd4">, Group<m_mips_Features_Group>,
@@ -3562,6 +5132,7 @@ def mvirt : Flag<["-"], "mvirt">, Group<m_mips_Features_Group>;
def mno_virt : Flag<["-"], "mno-virt">, Group<m_mips_Features_Group>;
def mginv : Flag<["-"], "mginv">, Group<m_mips_Features_Group>;
def mno_ginv : Flag<["-"], "mno-ginv">, Group<m_mips_Features_Group>;
+} // let Flags = [TargetSpecific]
def mips1 : Flag<["-"], "mips1">,
Alias<march_EQ>, AliasArgs<["mips1"]>, Group<m_mips_Features_Group>,
HelpText<"Equivalent to -march=mips1">, Flags<[HelpHidden]>;
@@ -3626,59 +5197,88 @@ def mno_relax_pic_calls : Flag<["-"], "mno-relax-pic-calls">,
"call sequences into direct calls (MIPS only)">, Flags<[HelpHidden]>;
def mglibc : Flag<["-"], "mglibc">, Group<m_libc_Group>, Flags<[HelpHidden]>;
def muclibc : Flag<["-"], "muclibc">, Group<m_libc_Group>, Flags<[HelpHidden]>;
-def module_file_info : Flag<["-"], "module-file-info">, Flags<[NoXarchOption,CC1Option]>, Group<Action_Group>,
+def module_file_info : Flag<["-"], "module-file-info">, Flags<[]>,
+ Visibility<[ClangOption, CC1Option]>, Group<Action_Group>,
HelpText<"Provide information about a particular module file">;
def mthumb : Flag<["-"], "mthumb">, Group<m_Group>;
def mtune_EQ : Joined<["-"], "mtune=">, Group<m_Group>,
- HelpText<"Only supported on X86 and RISC-V. Otherwise accepted for compatibility with GCC.">;
+ HelpText<"Only supported on AArch64, PowerPC, RISC-V, SPARC, SystemZ, and X86">;
def multi__module : Flag<["-"], "multi_module">;
def multiply__defined__unused : Separate<["-"], "multiply_defined_unused">;
def multiply__defined : Separate<["-"], "multiply_defined">;
def mwarn_nonportable_cfstrings : Flag<["-"], "mwarn-nonportable-cfstrings">, Group<m_Group>;
-def no_canonical_prefixes : Flag<["-"], "no-canonical-prefixes">, Flags<[HelpHidden, CoreOption]>,
- HelpText<"Use relative instead of canonical paths">;
+def canonical_prefixes : Flag<["-"], "canonical-prefixes">,
+ Flags<[HelpHidden]>, Visibility<[ClangOption, CLOption, DXCOption]>,
+ HelpText<"Use absolute paths for invoking subcommands (default)">;
+def no_canonical_prefixes : Flag<["-"], "no-canonical-prefixes">,
+ Flags<[HelpHidden]>, Visibility<[ClangOption, CLOption, DXCOption]>,
+ HelpText<"Use relative paths for invoking subcommands">;
def no_cpp_precomp : Flag<["-"], "no-cpp-precomp">, Group<clang_ignored_f_Group>;
-def no_integrated_cpp : Flag<["-", "--"], "no-integrated-cpp">, Flags<[NoXarchOption]>;
+def no_integrated_cpp : Flag<["-", "--"], "no-integrated-cpp">;
def no_pedantic : Flag<["-", "--"], "no-pedantic">, Group<pedantic_Group>;
def no__dead__strip__inits__and__terms : Flag<["-"], "no_dead_strip_inits_and_terms">;
-def nobuiltininc : Flag<["-"], "nobuiltininc">, Flags<[CC1Option, CoreOption]>,
+def nobuiltininc : Flag<["-"], "nobuiltininc">,
+ Visibility<[ClangOption, CC1Option, CLOption, DXCOption]>,
+ Group<IncludePath_Group>,
HelpText<"Disable builtin #include directories">,
MarshallingInfoNegativeFlag<HeaderSearchOpts<"UseBuiltinIncludes">>;
-def nogpuinc : Flag<["-"], "nogpuinc">, HelpText<"Do not add include paths for CUDA/HIP and"
+def nogpuinc : Flag<["-"], "nogpuinc">, Group<IncludePath_Group>,
+ HelpText<"Do not add include paths for CUDA/HIP and"
" do not include the default CUDA/HIP wrapper headers">;
+def nohipwrapperinc : Flag<["-"], "nohipwrapperinc">, Group<IncludePath_Group>,
+ HelpText<"Do not include the default HIP wrapper headers and include paths">;
def : Flag<["-"], "nocudainc">, Alias<nogpuinc>;
-def nogpulib : Flag<["-"], "nogpulib">,
+def nogpulib : Flag<["-"], "nogpulib">, MarshallingInfoFlag<LangOpts<"NoGPULib">>,
+ Visibility<[ClangOption, CC1Option, FlangOption, FC1Option]>,
HelpText<"Do not link device library for CUDA/HIP device compilation">;
def : Flag<["-"], "nocudalib">, Alias<nogpulib>;
+def gpulibc : Flag<["-"], "gpulibc">, Visibility<[ClangOption, CC1Option, FlangOption, FC1Option]>,
+ HelpText<"Link the LLVM C Library for GPUs">;
+def nogpulibc : Flag<["-"], "nogpulibc">, Visibility<[ClangOption, CC1Option, FlangOption, FC1Option]>;
def nodefaultlibs : Flag<["-"], "nodefaultlibs">;
+def nodriverkitlib : Flag<["-"], "nodriverkitlib">;
def nofixprebinding : Flag<["-"], "nofixprebinding">;
def nolibc : Flag<["-"], "nolibc">;
def nomultidefs : Flag<["-"], "nomultidefs">;
-def nopie : Flag<["-"], "nopie">;
-def no_pie : Flag<["-"], "no-pie">, Alias<nopie>;
+def nopie : Flag<["-"], "nopie">, Visibility<[ClangOption, FlangOption]>, Flags<[TargetSpecific]>; // OpenBSD
+def no_pie : Flag<["-"], "no-pie">, Visibility<[ClangOption, FlangOption]>;
def noprebind : Flag<["-"], "noprebind">;
def noprofilelib : Flag<["-"], "noprofilelib">;
def noseglinkedit : Flag<["-"], "noseglinkedit">;
def nostartfiles : Flag<["-"], "nostartfiles">, Group<Link_Group>;
-def nostdinc : Flag<["-"], "nostdinc">, Flags<[CoreOption]>;
-def nostdlibinc : Flag<["-"], "nostdlibinc">;
-def nostdincxx : Flag<["-"], "nostdinc++">, Flags<[CC1Option]>,
+def nostdinc : Flag<["-"], "nostdinc">,
+ Visibility<[ClangOption, CLOption, DXCOption]>, Group<IncludePath_Group>;
+def nostdlibinc : Flag<["-"], "nostdlibinc">, Group<IncludePath_Group>;
+def nostdincxx : Flag<["-"], "nostdinc++">, Visibility<[ClangOption, CC1Option]>,
+ Group<IncludePath_Group>,
HelpText<"Disable standard #include directories for the C++ standard library">,
MarshallingInfoNegativeFlag<HeaderSearchOpts<"UseStandardCXXIncludes">>;
def nostdlib : Flag<["-"], "nostdlib">, Group<Link_Group>;
def nostdlibxx : Flag<["-"], "nostdlib++">;
def object : Flag<["-"], "object">;
-def o : JoinedOrSeparate<["-"], "o">, Flags<[NoXarchOption, RenderAsInput,
- CC1Option, CC1AsOption, FC1Option, FlangOption]>,
+def o : JoinedOrSeparate<["-"], "o">,
+ Flags<[NoXarchOption]>,
+ Visibility<[ClangOption, CC1Option, CC1AsOption, FC1Option, FlangOption]>,
HelpText<"Write output to <file>">, MetaVarName<"<file>">,
MarshallingInfoString<FrontendOpts<"OutputFile">>;
+def object_file_name_EQ : Joined<["-"], "object-file-name=">,
+ Visibility<[ClangOption, CC1Option, CC1AsOption, CLOption, DXCOption]>,
+ HelpText<"Set the output <file> for debug infos">, MetaVarName<"<file>">,
+ MarshallingInfoString<CodeGenOpts<"ObjectFilenameForDebug">>;
+def object_file_name : Separate<["-"], "object-file-name">,
+ Visibility<[ClangOption, CC1Option, CC1AsOption, CLOption, DXCOption]>,
+ Alias<object_file_name_EQ>;
def pagezero__size : JoinedOrSeparate<["-"], "pagezero_size">;
def pass_exit_codes : Flag<["-", "--"], "pass-exit-codes">, Flags<[Unsupported]>;
-def pedantic_errors : Flag<["-", "--"], "pedantic-errors">, Group<pedantic_Group>, Flags<[CC1Option]>,
+def pedantic_errors : Flag<["-", "--"], "pedantic-errors">, Group<pedantic_Group>,
+ Visibility<[ClangOption, CC1Option]>,
MarshallingInfoFlag<DiagnosticOpts<"PedanticErrors">>;
-def pedantic : Flag<["-", "--"], "pedantic">, Group<pedantic_Group>, Flags<[CC1Option,FlangOption,FC1Option]>,
+def pedantic : Flag<["-", "--"], "pedantic">, Group<pedantic_Group>,
+ Visibility<[ClangOption, CC1Option, FlangOption, FC1Option]>,
HelpText<"Warn on language extensions">, MarshallingInfoFlag<DiagnosticOpts<"Pedantic">>;
-def pg : Flag<["-"], "pg">, HelpText<"Enable mcount instrumentation">, Flags<[CC1Option]>,
+def p : Flag<["-"], "p">, HelpText<"Enable mcount instrumentation with prof">;
+def pg : Flag<["-"], "pg">, HelpText<"Enable mcount instrumentation">,
+ Visibility<[ClangOption, CC1Option]>,
MarshallingInfoFlag<CodeGenOpts<"InstrumentForProfiling">>;
def pipe : Flag<["-", "--"], "pipe">,
HelpText<"Use pipes between commands, when possible">;
@@ -3686,69 +5286,107 @@ def prebind__all__twolevel__modules : Flag<["-"], "prebind_all_twolevel_modules"
def prebind : Flag<["-"], "prebind">;
def preload : Flag<["-"], "preload">;
def print_file_name_EQ : Joined<["-", "--"], "print-file-name=">,
- HelpText<"Print the full library path of <file>">, MetaVarName<"<file>">;
-def print_ivar_layout : Flag<["-"], "print-ivar-layout">, Flags<[CC1Option]>,
+ HelpText<"Print the full library path of <file>">, MetaVarName<"<file>">,
+ Visibility<[ClangOption, CLOption]>;
+def print_ivar_layout : Flag<["-"], "print-ivar-layout">,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Enable Objective-C Ivar layout bitmap print trace">,
MarshallingInfoFlag<LangOpts<"ObjCGCBitmapPrint">>;
def print_libgcc_file_name : Flag<["-", "--"], "print-libgcc-file-name">,
HelpText<"Print the library path for the currently used compiler runtime "
- "library (\"libgcc.a\" or \"libclang_rt.builtins.*.a\")">;
+ "library (\"libgcc.a\" or \"libclang_rt.builtins.*.a\")">,
+ Visibility<[ClangOption, CLOption]>;
def print_multi_directory : Flag<["-", "--"], "print-multi-directory">;
def print_multi_lib : Flag<["-", "--"], "print-multi-lib">;
+def print_multi_flags : Flag<["-", "--"], "print-multi-flags-experimental">,
+ HelpText<"Print the flags used for selecting multilibs (experimental)">;
def print_multi_os_directory : Flag<["-", "--"], "print-multi-os-directory">,
Flags<[Unsupported]>;
def print_target_triple : Flag<["-", "--"], "print-target-triple">,
- HelpText<"Print the normalized target triple">;
+ HelpText<"Print the normalized target triple">,
+ Visibility<[ClangOption, FlangOption, CLOption]>;
def print_effective_triple : Flag<["-", "--"], "print-effective-triple">,
- HelpText<"Print the effective target triple">;
-def print_multiarch : Flag<["-", "--"], "print-multiarch">,
- HelpText<"Print the multiarch target triple">;
+ HelpText<"Print the effective target triple">,
+ Visibility<[ClangOption, FlangOption, CLOption]>;
+// GCC --disable-multiarch, GCC --enable-multiarch (upstream and Debian
+// specific) have different behaviors. We choose not to support the option.
+def : Flag<["-", "--"], "print-multiarch">, Flags<[Unsupported]>;
def print_prog_name_EQ : Joined<["-", "--"], "print-prog-name=">,
- HelpText<"Print the full program path of <name>">, MetaVarName<"<name>">;
+ HelpText<"Print the full program path of <name>">, MetaVarName<"<name>">,
+ Visibility<[ClangOption, CLOption]>;
def print_resource_dir : Flag<["-", "--"], "print-resource-dir">,
- HelpText<"Print the resource directory pathname">;
+ HelpText<"Print the resource directory pathname">,
+ Visibility<[ClangOption, CLOption]>;
def print_search_dirs : Flag<["-", "--"], "print-search-dirs">,
- HelpText<"Print the paths used for finding libraries and programs">;
+ HelpText<"Print the paths used for finding libraries and programs">,
+ Visibility<[ClangOption, CLOption]>;
def print_targets : Flag<["-", "--"], "print-targets">,
- HelpText<"Print the registered targets">;
+ HelpText<"Print the registered targets">,
+ Visibility<[ClangOption, CLOption]>;
def print_rocm_search_dirs : Flag<["-", "--"], "print-rocm-search-dirs">,
- HelpText<"Print the paths used for finding ROCm installation">;
+ HelpText<"Print the paths used for finding ROCm installation">,
+ Visibility<[ClangOption, CLOption]>;
def print_runtime_dir : Flag<["-", "--"], "print-runtime-dir">,
- HelpText<"Print the directory pathname containing clangs runtime libraries">;
+ HelpText<"Print the directory pathname containing clangs runtime libraries">,
+ Visibility<[ClangOption, CLOption]>;
+def print_diagnostic_options : Flag<["-", "--"], "print-diagnostic-options">,
+ HelpText<"Print all of Clang's warning options">,
+ Visibility<[ClangOption, CLOption]>;
def private__bundle : Flag<["-"], "private_bundle">;
def pthreads : Flag<["-"], "pthreads">;
defm pthread : BoolOption<"", "pthread",
LangOpts<"POSIXThreads">, DefaultFalse,
- PosFlag<SetTrue, [], "Support POSIX threads in generated code">,
- NegFlag<SetFalse>, BothFlags<[CC1Option]>>;
-def p : Flag<["-"], "p">;
+ PosFlag<SetTrue, [], [ClangOption], "Support POSIX threads in generated code">,
+ NegFlag<SetFalse>,
+ BothFlags<[], [ClangOption, CC1Option, FlangOption, FC1Option]>>;
def pie : Flag<["-"], "pie">, Group<Link_Group>;
def static_pie : Flag<["-"], "static-pie">, Group<Link_Group>;
def read__only__relocs : Separate<["-"], "read_only_relocs">;
def remap : Flag<["-"], "remap">;
-def rewrite_objc : Flag<["-"], "rewrite-objc">, Flags<[NoXarchOption,CC1Option]>,
+def rewrite_objc : Flag<["-"], "rewrite-objc">, Flags<[NoXarchOption]>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Rewrite Objective-C source to C++">, Group<Action_Group>;
-def rewrite_legacy_objc : Flag<["-"], "rewrite-legacy-objc">, Flags<[NoXarchOption]>,
+def rewrite_legacy_objc : Flag<["-"], "rewrite-legacy-objc">,
+ Flags<[NoXarchOption]>,
HelpText<"Rewrite Legacy Objective-C source to C++">;
-def rdynamic : Flag<["-"], "rdynamic">, Group<Link_Group>;
+def rdynamic : Flag<["-"], "rdynamic">, Group<Link_Group>,
+ Visibility<[ClangOption, FlangOption]>;
def resource_dir : Separate<["-"], "resource-dir">,
- Flags<[NoXarchOption, CC1Option, CoreOption, HelpHidden]>,
+ Flags<[NoXarchOption, HelpHidden]>,
+ Visibility<[ClangOption, CC1Option, CLOption, DXCOption]>,
HelpText<"The directory which holds the compiler resource files">,
MarshallingInfoString<HeaderSearchOpts<"ResourceDir">>;
-def resource_dir_EQ : Joined<["-"], "resource-dir=">, Flags<[NoXarchOption, CoreOption]>,
+def resource_dir_EQ : Joined<["-"], "resource-dir=">, Flags<[NoXarchOption]>,
+ Visibility<[ClangOption, CLOption, DXCOption]>,
Alias<resource_dir>;
-def rpath : Separate<["-"], "rpath">, Flags<[LinkerInput]>, Group<Link_Group>;
-def rtlib_EQ : Joined<["-", "--"], "rtlib=">,
+def rpath : Separate<["-"], "rpath">, Flags<[LinkerInput]>, Group<Link_Group>,
+ Visibility<[ClangOption, CLOption, DXCOption, FlangOption]>;
+def rtlib_EQ : Joined<["-", "--"], "rtlib=">, Visibility<[ClangOption, CLOption]>,
HelpText<"Compiler runtime library to use">;
def frtlib_add_rpath: Flag<["-"], "frtlib-add-rpath">, Flags<[NoArgumentUnused]>,
- HelpText<"Add -rpath with architecture-specific resource directory to the linker flags">;
-def fno_rtlib_add_rpath: Flag<["-"], "fno-rtlib-add-rpath">, Flags<[NoArgumentUnused]>,
- HelpText<"Do not add -rpath with architecture-specific resource directory to the linker flags">;
-def r : Flag<["-"], "r">, Flags<[LinkerInput,NoArgumentUnused]>,
+ HelpText<"Add -rpath with architecture-specific resource directory to the linker flags. "
+ "When --hip-link is specified, also add -rpath with HIP runtime library directory to the linker flags">;
+def fno_rtlib_add_rpath: Flag<["-"], "fno-rtlib-add-rpath">,
+ Flags<[NoArgumentUnused]>,
+ HelpText<"Do not add -rpath with architecture-specific resource directory to the linker flags. "
+ "When --hip-link is specified, do not add -rpath with HIP runtime library directory to the linker flags">;
+def offload_add_rpath: Flag<["--"], "offload-add-rpath">,
+ Flags<[NoArgumentUnused]>,
+ Alias<frtlib_add_rpath>;
+def no_offload_add_rpath: Flag<["--"], "no-offload-add-rpath">,
+ Flags<[NoArgumentUnused]>,
+ Alias<frtlib_add_rpath>;
+def r : Flag<["-"], "r">, Flags<[LinkerInput, NoArgumentUnused]>,
Group<Link_Group>;
-def save_temps_EQ : Joined<["-", "--"], "save-temps=">, Flags<[CC1Option, NoXarchOption]>,
+def regcall4 : Flag<["-"], "regcall4">, Group<m_Group>,
+ Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Set __regcall4 as a default calling convention to respect __regcall ABI v.4">,
+ MarshallingInfoFlag<LangOpts<"RegCall4">>;
+def save_temps_EQ : Joined<["-", "--"], "save-temps=">, Flags<[NoXarchOption]>,
+ Visibility<[ClangOption, CC1Option, FlangOption, FC1Option]>,
HelpText<"Save intermediate compilation results.">;
def save_temps : Flag<["-", "--"], "save-temps">, Flags<[NoXarchOption]>,
+ Visibility<[ClangOption, FlangOption, FC1Option]>,
Alias<save_temps_EQ>, AliasArgs<["cwd"]>,
HelpText<"Save intermediate compilation results">;
def save_stats_EQ : Joined<["-", "--"], "save-stats=">, Flags<[NoXarchOption]>,
@@ -3773,56 +5411,77 @@ def segs__read__only__addr : Separate<["-"], "segs_read_only_addr">;
def segs__read__write__addr : Separate<["-"], "segs_read_write_addr">;
def segs__read__ : Joined<["-"], "segs_read_">;
def shared_libgcc : Flag<["-"], "shared-libgcc">;
-def shared : Flag<["-", "--"], "shared">, Group<Link_Group>;
+def shared : Flag<["-", "--"], "shared">, Group<Link_Group>,
+ Visibility<[ClangOption, CLOption, DXCOption, FlangOption]>;
def single__module : Flag<["-"], "single_module">;
def specs_EQ : Joined<["-", "--"], "specs=">, Group<Link_Group>;
def specs : Separate<["-", "--"], "specs">, Flags<[Unsupported]>;
+def start_no_unused_arguments : Flag<["--"], "start-no-unused-arguments">,
+ Visibility<[ClangOption, CLOption, DXCOption]>,
+ HelpText<"Don't emit warnings about unused arguments for the following arguments">;
def static_libgcc : Flag<["-"], "static-libgcc">;
def static_libstdcxx : Flag<["-"], "static-libstdc++">;
-def static : Flag<["-", "--"], "static">, Group<Link_Group>, Flags<[NoArgumentUnused]>;
+def static : Flag<["-", "--"], "static">, Group<Link_Group>,
+ Visibility<[ClangOption, CLOption, DXCOption, FlangOption]>,
+ Flags<[NoArgumentUnused]>;
def std_default_EQ : Joined<["-"], "std-default=">;
-def std_EQ : Joined<["-", "--"], "std=">, Flags<[CC1Option,FlangOption,FC1Option]>,
+def std_EQ : Joined<["-", "--"], "std=">,
+ Visibility<[ClangOption, CC1Option, FlangOption, FC1Option]>,
Group<CompileOnly_Group>, HelpText<"Language standard to compile for">,
ValuesCode<[{
- const char *Values =
+ static constexpr const char VALUES_CODE [] =
#define LANGSTANDARD(id, name, lang, desc, features) name ","
#define LANGSTANDARD_ALIAS(id, alias) alias ","
#include "clang/Basic/LangStandards.def"
;
}]>;
-def stdlib_EQ : Joined<["-", "--"], "stdlib=">, Flags<[CC1Option]>,
+def stdlib_EQ : Joined<["-", "--"], "stdlib=">,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"C++ standard library to use">, Values<"libc++,libstdc++,platform">;
def stdlibxx_isystem : JoinedOrSeparate<["-"], "stdlib++-isystem">,
Group<clang_i_Group>,
HelpText<"Use directory as the C++ standard library include path">,
Flags<[NoXarchOption]>, MetaVarName<"<directory>">;
-def unwindlib_EQ : Joined<["-", "--"], "unwindlib=">, Flags<[CC1Option]>,
+def unwindlib_EQ : Joined<["-", "--"], "unwindlib=">,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Unwind library to use">, Values<"libgcc,unwindlib,platform">;
def sub__library : JoinedOrSeparate<["-"], "sub_library">;
def sub__umbrella : JoinedOrSeparate<["-"], "sub_umbrella">;
def system_header_prefix : Joined<["--"], "system-header-prefix=">,
- Group<clang_i_Group>, Flags<[CC1Option]>, MetaVarName<"<prefix>">,
+ Group<clang_i_Group>, Visibility<[ClangOption, CC1Option]>,
+ MetaVarName<"<prefix>">,
HelpText<"Treat all #include paths starting with <prefix> as including a "
"system header.">;
def : Separate<["--"], "system-header-prefix">, Alias<system_header_prefix>;
def no_system_header_prefix : Joined<["--"], "no-system-header-prefix=">,
- Group<clang_i_Group>, Flags<[CC1Option]>, MetaVarName<"<prefix>">,
+ Group<clang_i_Group>, Visibility<[ClangOption, CC1Option]>,
+ MetaVarName<"<prefix>">,
HelpText<"Treat all #include paths starting with <prefix> as not including a "
"system header.">;
def : Separate<["--"], "no-system-header-prefix">, Alias<no_system_header_prefix>;
def s : Flag<["-"], "s">, Group<Link_Group>;
-def target : Joined<["--"], "target=">, Flags<[NoXarchOption, CoreOption]>,
+def target : Joined<["--"], "target=">, Flags<[NoXarchOption]>,
+ Visibility<[ClangOption, CLOption, DXCOption, FlangOption]>,
HelpText<"Generate code for the given target">;
+def darwin_target_variant : Separate<["-"], "darwin-target-variant">,
+ Flags<[NoXarchOption]>, Visibility<[ClangOption, CLOption]>,
+ HelpText<"Generate code for an additional runtime variant of the deployment target">;
def print_supported_cpus : Flag<["-", "--"], "print-supported-cpus">,
- Group<CompileOnly_Group>, Flags<[CC1Option, CoreOption]>,
+ Group<CompileOnly_Group>,
+ Visibility<[ClangOption, CC1Option, CLOption]>,
HelpText<"Print supported cpu models for the given target (if target is not specified,"
" it will print the supported cpus for the default target)">,
MarshallingInfoFlag<FrontendOpts<"PrintSupportedCPUs">>;
-def mcpu_EQ_QUESTION : Flag<["-"], "mcpu=?">, Alias<print_supported_cpus>;
-def mtune_EQ_QUESTION : Flag<["-"], "mtune=?">, Alias<print_supported_cpus>;
+def print_supported_extensions : Flag<["-", "--"], "print-supported-extensions">,
+ Visibility<[ClangOption, CC1Option, CLOption]>,
+ HelpText<"Print supported -march extensions (RISC-V, AArch64 and ARM only)">,
+ MarshallingInfoFlag<FrontendOpts<"PrintSupportedExtensions">>;
+def : Flag<["-"], "mcpu=help">, Alias<print_supported_cpus>;
+def : Flag<["-"], "mtune=help">, Alias<print_supported_cpus>;
def time : Flag<["-"], "time">,
HelpText<"Time individual commands">;
-def traditional_cpp : Flag<["-", "--"], "traditional-cpp">, Flags<[CC1Option]>,
+def traditional_cpp : Flag<["-", "--"], "traditional-cpp">,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Enable some traditional CPP emulation">,
MarshallingInfoFlag<LangOpts<"TraditionalCPP">>;
def traditional : Flag<["-", "--"], "traditional">;
@@ -3833,16 +5492,18 @@ def twolevel__namespace : Flag<["-"], "twolevel_namespace">;
def t : Flag<["-"], "t">, Group<Link_Group>;
def umbrella : Separate<["-"], "umbrella">;
def undefined : JoinedOrSeparate<["-"], "undefined">, Group<u_Group>;
-def undef : Flag<["-"], "undef">, Group<u_Group>, Flags<[CC1Option]>,
+def undef : Flag<["-"], "undef">, Group<u_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"undef all system defines">,
MarshallingInfoNegativeFlag<PreprocessorOpts<"UsePredefines">>;
def unexported__symbols__list : Separate<["-"], "unexported_symbols_list">;
def u : JoinedOrSeparate<["-"], "u">, Group<u_Group>;
-def v : Flag<["-"], "v">, Flags<[CC1Option, CoreOption]>,
+def v : Flag<["-"], "v">,
+ Visibility<[ClangOption, CC1Option, CLOption, DXCOption, FlangOption]>,
HelpText<"Show commands to run and use verbose output">,
MarshallingInfoFlag<HeaderSearchOpts<"Verbose">>;
def altivec_src_compat : Joined<["-"], "faltivec-src-compat=">,
- Flags<[CC1Option]>, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>, Group<f_Group>,
HelpText<"Source-level compatibility for Altivec vectors (for PowerPC "
"targets). This includes results of vector comparison (scalar for "
"'xl', vector for 'gcc') as well as behavior when initializing with "
@@ -3853,7 +5514,8 @@ def altivec_src_compat : Joined<["-"], "faltivec-src-compat=">,
NormalizedValuesScope<"LangOptions::AltivecSrcCompatKind">,
NormalizedValues<["Mixed", "GCC", "XL"]>,
MarshallingInfoEnum<LangOpts<"AltivecSrcCompat">, "Mixed">;
-def verify_debug_info : Flag<["--"], "verify-debug-info">, Flags<[NoXarchOption]>,
+def verify_debug_info : Flag<["--"], "verify-debug-info">,
+ Flags<[NoXarchOption]>,
HelpText<"Verify the binary representation of debug output">;
def weak_l : Joined<["-"], "weak-l">, Flags<[LinkerInput]>;
def weak__framework : Separate<["-"], "weak_framework">, Flags<[LinkerInput]>;
@@ -3862,33 +5524,50 @@ def weak__reference__mismatches : Separate<["-"], "weak_reference_mismatches">;
def whatsloaded : Flag<["-"], "whatsloaded">;
def why_load : Flag<["-"], "why_load">;
def whyload : Flag<["-"], "whyload">, Alias<why_load>;
-def w : Flag<["-"], "w">, HelpText<"Suppress all warnings">, Flags<[CC1Option]>,
+def w : Flag<["-"], "w">, HelpText<"Suppress all warnings">,
+ Visibility<[ClangOption, CC1Option]>,
MarshallingInfoFlag<DiagnosticOpts<"IgnoreWarnings">>;
-def x : JoinedOrSeparate<["-"], "x">, Flags<[NoXarchOption,CC1Option]>,
+def x : JoinedOrSeparate<["-"], "x">,
+Flags<[NoXarchOption]>,
+ Visibility<[ClangOption, CC1Option, FlangOption, FC1Option, CLOption]>,
HelpText<"Treat subsequent input files as having type <language>">,
MetaVarName<"<language>">;
def y : Joined<["-"], "y">;
defm integrated_as : BoolFOption<"integrated-as",
CodeGenOpts<"DisableIntegratedAS">, DefaultFalse,
- NegFlag<SetTrue, [CC1Option], "Disable">, PosFlag<SetFalse, [], "Enable">,
- BothFlags<[], " the integrated assembler">>;
+ NegFlag<SetTrue, [], [ClangOption, CC1Option, FlangOption], "Disable">,
+ PosFlag<SetFalse, [], [ClangOption, CC1Option, FlangOption], "Enable">,
+ BothFlags<[], [ClangOption], " the integrated assembler">>;
def fintegrated_cc1 : Flag<["-"], "fintegrated-cc1">,
- Flags<[CoreOption, NoXarchOption]>, Group<f_Group>,
+ Visibility<[ClangOption, CLOption, DXCOption]>,
+ Group<f_Group>,
HelpText<"Run cc1 in-process">;
def fno_integrated_cc1 : Flag<["-"], "fno-integrated-cc1">,
- Flags<[CoreOption, NoXarchOption]>, Group<f_Group>,
+ Visibility<[ClangOption, CLOption, DXCOption]>,
+ Group<f_Group>,
HelpText<"Spawn a separate process for each cc1">;
-def : Flag<["-"], "integrated-as">, Alias<fintegrated_as>, Flags<[NoXarchOption]>;
+def fintegrated_objemitter : Flag<["-"], "fintegrated-objemitter">,
+ Visibility<[ClangOption, CLOption]>,
+ Group<f_Group>,
+ HelpText<"Use internal machine object code emitter.">;
+def fno_integrated_objemitter : Flag<["-"], "fno-integrated-objemitter">,
+ Visibility<[ClangOption, CLOption]>,
+ Group<f_Group>,
+ HelpText<"Use external machine object code emitter.">;
+
+def : Flag<["-"], "integrated-as">, Alias<fintegrated_as>;
def : Flag<["-"], "no-integrated-as">, Alias<fno_integrated_as>,
- Flags<[CC1Option, NoXarchOption]>;
+ Visibility<[ClangOption, CC1Option, FlangOption]>;
-def working_directory : JoinedOrSeparate<["-"], "working-directory">, Flags<[CC1Option]>,
+def working_directory : Separate<["-"], "working-directory">,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Resolve file paths relative to the specified directory">,
MarshallingInfoString<FileSystemOpts<"WorkingDir">>;
-def working_directory_EQ : Joined<["-"], "working-directory=">, Flags<[CC1Option]>,
+def working_directory_EQ : Joined<["-"], "working-directory=">,
+ Visibility<[ClangOption, CC1Option]>,
Alias<working_directory>;
// Double dash options, which are usually an alias for one of the previous
@@ -3899,10 +5578,13 @@ def _mhwdiv : Separate<["--"], "mhwdiv">, Alias<mhwdiv_EQ>;
def _CLASSPATH_EQ : Joined<["--"], "CLASSPATH=">, Alias<fclasspath_EQ>;
def _CLASSPATH : Separate<["--"], "CLASSPATH">, Alias<fclasspath_EQ>;
def _all_warnings : Flag<["--"], "all-warnings">, Alias<Wall>;
-def _analyzer_no_default_checks : Flag<["--"], "analyzer-no-default-checks">, Flags<[NoXarchOption]>;
-def _analyzer_output : JoinedOrSeparate<["--"], "analyzer-output">, Flags<[NoXarchOption]>,
+def _analyzer_no_default_checks : Flag<["--"], "analyzer-no-default-checks">,
+ Flags<[NoXarchOption]>;
+def _analyzer_output : JoinedOrSeparate<["--"], "analyzer-output">,
+ Flags<[NoXarchOption]>,
HelpText<"Static analyzer report output format (html|plist|plist-multi-file|plist-html|sarif|sarif-html|text).">;
-def _analyze : Flag<["--"], "analyze">, Flags<[NoXarchOption, CoreOption]>,
+def _analyze : Flag<["--"], "analyze">, Flags<[NoXarchOption]>,
+ Visibility<[ClangOption, CLOption]>,
HelpText<"Run the static analyzer">;
def _assemble : Flag<["--"], "assemble">, Alias<S>;
def _assert_EQ : Joined<["--"], "assert=">, Alias<A>;
@@ -3932,8 +5614,6 @@ def _for_linker_EQ : Joined<["--"], "for-linker=">, Alias<Xlinker>;
def _for_linker : Separate<["--"], "for-linker">, Alias<Xlinker>;
def _force_link_EQ : Joined<["--"], "force-link=">, Alias<u>;
def _force_link : Separate<["--"], "force-link">, Alias<u>;
-def _help_hidden : Flag<["--"], "help-hidden">,
- HelpText<"Display help for hidden options">;
def _imacros_EQ : Joined<["--"], "imacros=">, Alias<imacros>;
def _include_barrier : Flag<["--"], "include-barrier">, Alias<I_>;
def _include_directory_after_EQ : Joined<["--"], "include-directory-after=">, Alias<idirafter>;
@@ -3975,22 +5655,20 @@ def _print_diagnostic_categories : Flag<["--"], "print-diagnostic-categories">;
def _print_file_name : Separate<["--"], "print-file-name">, Alias<print_file_name_EQ>;
def _print_missing_file_dependencies : Flag<["--"], "print-missing-file-dependencies">, Alias<MG>;
def _print_prog_name : Separate<["--"], "print-prog-name">, Alias<print_prog_name_EQ>;
-def _profile_blocks : Flag<["--"], "profile-blocks">, Alias<a>;
def _profile : Flag<["--"], "profile">, Alias<p>;
def _resource_EQ : Joined<["--"], "resource=">, Alias<fcompile_resource_EQ>;
def _resource : Separate<["--"], "resource">, Alias<fcompile_resource_EQ>;
def _rtlib : Separate<["--"], "rtlib">, Alias<rtlib_EQ>;
-def _serialize_diags : Separate<["-", "--"], "serialize-diagnostics">, Flags<[NoXarchOption]>,
+def _serialize_diags : Separate<["-", "--"], "serialize-diagnostics">,
+ Flags<[NoXarchOption]>,
HelpText<"Serialize compiler diagnostics to a file">;
// We give --version different semantics from -version.
def _version : Flag<["--"], "version">,
- Flags<[CoreOption, CC1Option, FC1Option, FlangOption]>,
+ Visibility<[ClangOption, CLOption, DXCOption, FlangOption]>,
HelpText<"Print version information">;
def _signed_char : Flag<["--"], "signed-char">, Alias<fsigned_char>;
def _std : Separate<["--"], "std">, Alias<std_EQ>;
def _stdlib : Separate<["--"], "stdlib">, Alias<stdlib_EQ>;
-def _sysroot_EQ : Joined<["--"], "sysroot=">;
-def _sysroot : Separate<["--"], "sysroot">, Alias<_sysroot_EQ>;
def _target_help : Flag<["--"], "target-help">;
def _trace_includes : Flag<["--"], "trace-includes">, Alias<H>;
def _undefine_macro_EQ : Joined<["--"], "undefine-macro=">, Alias<U>;
@@ -4002,9 +5680,44 @@ def _warn__EQ : Joined<["--"], "warn-=">, Alias<W_Joined>;
def _warn_ : Joined<["--"], "warn-">, Alias<W_Joined>;
def _write_dependencies : Flag<["--"], "write-dependencies">, Alias<MD>;
def _write_user_dependencies : Flag<["--"], "write-user-dependencies">, Alias<MMD>;
-def _ : Joined<["--"], "">, Flags<[Unsupported]>;
+
+def _help_hidden : Flag<["--"], "help-hidden">,
+ Visibility<[ClangOption, FlangOption]>,
+ HelpText<"Display help for hidden options">;
+def _sysroot_EQ : Joined<["--"], "sysroot=">, Visibility<[ClangOption, FlangOption]>;
+def _sysroot : Separate<["--"], "sysroot">, Alias<_sysroot_EQ>;
+
+//===----------------------------------------------------------------------===//
+// pie/pic options (clang + flang-new)
+//===----------------------------------------------------------------------===//
+let Visibility = [ClangOption, FlangOption] in {
+
+def fPIC : Flag<["-"], "fPIC">, Group<f_Group>;
+def fno_PIC : Flag<["-"], "fno-PIC">, Group<f_Group>;
+def fPIE : Flag<["-"], "fPIE">, Group<f_Group>;
+def fno_PIE : Flag<["-"], "fno-PIE">, Group<f_Group>;
+def fpic : Flag<["-"], "fpic">, Group<f_Group>;
+def fno_pic : Flag<["-"], "fno-pic">, Group<f_Group>;
+def fpie : Flag<["-"], "fpie">, Group<f_Group>;
+def fno_pie : Flag<["-"], "fno-pie">, Group<f_Group>;
+
+} // let Vis = [Default, FlangOption]
+
+//===----------------------------------------------------------------------===//
+// Target Options (clang + flang-new)
+//===----------------------------------------------------------------------===//
+let Flags = [TargetSpecific] in {
+let Visibility = [ClangOption, FlangOption] in {
+
+def mcpu_EQ : Joined<["-"], "mcpu=">, Group<m_Group>,
+ HelpText<"For a list of available CPUs for the target use '-mcpu=help'">;
+def mdynamic_no_pic : Joined<["-"], "mdynamic-no-pic">, Group<m_Group>;
+
+} // let Vis = [Default, FlangOption]
+} // let Flags = [TargetSpecific]
// Hexagon feature flags.
+let Flags = [TargetSpecific] in {
def mieee_rnd_near : Flag<["-"], "mieee-rnd-near">,
Group<m_hexagon_Features_Group>;
def mv5 : Flag<["-"], "mv5">, Group<m_hexagon_Features_Group>, Alias<mcpu_EQ>,
@@ -4025,6 +5738,14 @@ def mv67t : Flag<["-"], "mv67t">, Group<m_hexagon_Features_Group>,
Alias<mcpu_EQ>, AliasArgs<["hexagonv67t"]>;
def mv68 : Flag<["-"], "mv68">, Group<m_hexagon_Features_Group>,
Alias<mcpu_EQ>, AliasArgs<["hexagonv68"]>;
+def mv69 : Flag<["-"], "mv69">, Group<m_hexagon_Features_Group>,
+ Alias<mcpu_EQ>, AliasArgs<["hexagonv69"]>;
+def mv71 : Flag<["-"], "mv71">, Group<m_hexagon_Features_Group>,
+ Alias<mcpu_EQ>, AliasArgs<["hexagonv71"]>;
+def mv71t : Flag<["-"], "mv71t">, Group<m_hexagon_Features_Group>,
+ Alias<mcpu_EQ>, AliasArgs<["hexagonv71t"]>;
+def mv73 : Flag<["-"], "mv73">, Group<m_hexagon_Features_Group>,
+ Alias<mcpu_EQ>, AliasArgs<["hexagonv73"]>;
def mhexagon_hvx : Flag<["-"], "mhvx">, Group<m_hexagon_Features_HVX_Group>,
HelpText<"Enable Hexagon Vector eXtensions">;
def mhexagon_hvx_EQ : Joined<["-"], "mhvx=">,
@@ -4036,26 +5757,80 @@ def mno_hexagon_hvx : Flag<["-"], "mno-hvx">,
def mhexagon_hvx_length_EQ : Joined<["-"], "mhvx-length=">,
Group<m_hexagon_Features_HVX_Group>, HelpText<"Set Hexagon Vector Length">,
Values<"64B,128B">;
-def ffixed_r19: Flag<["-"], "ffixed-r19">,
+def mhexagon_hvx_qfloat : Flag<["-"], "mhvx-qfloat">,
+ Group<m_hexagon_Features_HVX_Group>,
+ HelpText<"Enable Hexagon HVX QFloat instructions">;
+def mno_hexagon_hvx_qfloat : Flag<["-"], "mno-hvx-qfloat">,
+ Group<m_hexagon_Features_HVX_Group>,
+ HelpText<"Disable Hexagon HVX QFloat instructions">;
+def mhexagon_hvx_ieee_fp : Flag<["-"], "mhvx-ieee-fp">,
+ Group<m_hexagon_Features_Group>,
+ HelpText<"Enable Hexagon HVX IEEE floating-point">;
+def mno_hexagon_hvx_ieee_fp : Flag<["-"], "mno-hvx-ieee-fp">,
+ Group<m_hexagon_Features_Group>,
+ HelpText<"Disable Hexagon HVX IEEE floating-point">;
+def ffixed_r19: Flag<["-"], "ffixed-r19">, Group<f_Group>,
HelpText<"Reserve register r19 (Hexagon only)">;
+} // let Flags = [TargetSpecific]
def mmemops : Flag<["-"], "mmemops">, Group<m_hexagon_Features_Group>,
- Flags<[CC1Option]>, HelpText<"Enable generation of memop instructions">;
+ Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Enable generation of memop instructions">;
def mno_memops : Flag<["-"], "mno-memops">, Group<m_hexagon_Features_Group>,
- Flags<[CC1Option]>, HelpText<"Disable generation of memop instructions">;
+ Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Disable generation of memop instructions">;
def mpackets : Flag<["-"], "mpackets">, Group<m_hexagon_Features_Group>,
- Flags<[CC1Option]>, HelpText<"Enable generation of instruction packets">;
+ Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Enable generation of instruction packets">;
def mno_packets : Flag<["-"], "mno-packets">, Group<m_hexagon_Features_Group>,
- Flags<[CC1Option]>, HelpText<"Disable generation of instruction packets">;
+ Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Disable generation of instruction packets">;
def mnvj : Flag<["-"], "mnvj">, Group<m_hexagon_Features_Group>,
- Flags<[CC1Option]>, HelpText<"Enable generation of new-value jumps">;
+ Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Enable generation of new-value jumps">;
def mno_nvj : Flag<["-"], "mno-nvj">, Group<m_hexagon_Features_Group>,
- Flags<[CC1Option]>, HelpText<"Disable generation of new-value jumps">;
+ Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Disable generation of new-value jumps">;
def mnvs : Flag<["-"], "mnvs">, Group<m_hexagon_Features_Group>,
- Flags<[CC1Option]>, HelpText<"Enable generation of new-value stores">;
+ Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Enable generation of new-value stores">;
def mno_nvs : Flag<["-"], "mno-nvs">, Group<m_hexagon_Features_Group>,
- Flags<[CC1Option]>, HelpText<"Disable generation of new-value stores">;
+ Visibility<[ClangOption, CC1Option]>,
+ HelpText<"Disable generation of new-value stores">;
+def mcabac: Flag<["-"], "mcabac">, Group<m_hexagon_Features_Group>,
+ HelpText<"Enable CABAC instructions">;
+
+// SPARC feature flags
+let Flags = [TargetSpecific] in {
+def mfpu : Flag<["-"], "mfpu">, Group<m_sparc_Features_Group>;
+def mno_fpu : Flag<["-"], "mno-fpu">, Group<m_sparc_Features_Group>;
+def mfsmuld : Flag<["-"], "mfsmuld">, Group<m_sparc_Features_Group>;
+def mno_fsmuld : Flag<["-"], "mno-fsmuld">, Group<m_sparc_Features_Group>;
+def mpopc : Flag<["-"], "mpopc">, Group<m_sparc_Features_Group>;
+def mno_popc : Flag<["-"], "mno-popc">, Group<m_sparc_Features_Group>;
+def mvis : Flag<["-"], "mvis">, Group<m_sparc_Features_Group>;
+def mno_vis : Flag<["-"], "mno-vis">, Group<m_sparc_Features_Group>;
+def mvis2 : Flag<["-"], "mvis2">, Group<m_sparc_Features_Group>;
+def mno_vis2 : Flag<["-"], "mno-vis2">, Group<m_sparc_Features_Group>;
+def mvis3 : Flag<["-"], "mvis3">, Group<m_sparc_Features_Group>;
+def mno_vis3 : Flag<["-"], "mno-vis3">, Group<m_sparc_Features_Group>;
+def mhard_quad_float : Flag<["-"], "mhard-quad-float">, Group<m_sparc_Features_Group>;
+def msoft_quad_float : Flag<["-"], "msoft-quad-float">, Group<m_sparc_Features_Group>;
+foreach i = 1 ... 7 in
+ def ffixed_g#i : Flag<["-"], "ffixed-g"#i>, Group<m_sparc_Features_Group>,
+ HelpText<"Reserve the G"#i#" register (SPARC only)">;
+foreach i = 0 ... 5 in
+ def ffixed_o#i : Flag<["-"], "ffixed-o"#i>, Group<m_sparc_Features_Group>,
+ HelpText<"Reserve the O"#i#" register (SPARC only)">;
+foreach i = 0 ... 7 in
+ def ffixed_l#i : Flag<["-"], "ffixed-l"#i>, Group<m_sparc_Features_Group>,
+ HelpText<"Reserve the L"#i#" register (SPARC only)">;
+foreach i = 0 ... 5 in
+ def ffixed_i#i : Flag<["-"], "ffixed-i"#i>, Group<m_sparc_Features_Group>,
+ HelpText<"Reserve the I"#i#" register (SPARC only)">;
+} // let Flags = [TargetSpecific]
// M68k features flags
+let Flags = [TargetSpecific] in {
def m68000 : Flag<["-"], "m68000">, Group<m_m68k_Features_Group>;
def m68010 : Flag<["-"], "m68010">, Group<m_m68k_Features_Group>;
def m68020 : Flag<["-"], "m68020">, Group<m_m68k_Features_Group>;
@@ -4063,18 +5838,23 @@ def m68030 : Flag<["-"], "m68030">, Group<m_m68k_Features_Group>;
def m68040 : Flag<["-"], "m68040">, Group<m_m68k_Features_Group>;
def m68060 : Flag<["-"], "m68060">, Group<m_m68k_Features_Group>;
+def m68881 : Flag<["-"], "m68881">, Group<m_m68k_Features_Group>;
+
foreach i = {0-6} in
def ffixed_a#i : Flag<["-"], "ffixed-a"#i>, Group<m_m68k_Features_Group>,
HelpText<"Reserve the a"#i#" register (M68k only)">;
foreach i = {0-7} in
def ffixed_d#i : Flag<["-"], "ffixed-d"#i>, Group<m_m68k_Features_Group>,
HelpText<"Reserve the d"#i#" register (M68k only)">;
+} // let Flags = [TargetSpecific]
// X86 feature flags
+let Flags = [TargetSpecific] in {
def mx87 : Flag<["-"], "mx87">, Group<m_x86_Features_Group>;
def mno_x87 : Flag<["-"], "mno-x87">, Group<m_x86_Features_Group>;
def m80387 : Flag<["-"], "m80387">, Alias<mx87>;
def mno_80387 : Flag<["-"], "mno-80387">, Alias<mno_x87>;
+def mno_fp_ret_in_387 : Flag<["-"], "mno-fp-ret-in-387">, Alias<mno_x87>;
def mmmx : Flag<["-"], "mmmx">, Group<m_x86_Features_Group>;
def mno_mmx : Flag<["-"], "mno-mmx">, Group<m_x86_Features_Group>;
def m3dnow : Flag<["-"], "m3dnow">, Group<m_x86_Features_Group>;
@@ -4083,10 +5863,16 @@ def m3dnowa : Flag<["-"], "m3dnowa">, Group<m_x86_Features_Group>;
def mno_3dnowa : Flag<["-"], "mno-3dnowa">, Group<m_x86_Features_Group>;
def mamx_bf16 : Flag<["-"], "mamx-bf16">, Group<m_x86_Features_Group>;
def mno_amx_bf16 : Flag<["-"], "mno-amx-bf16">, Group<m_x86_Features_Group>;
-def mtamx_int8 : Flag<["-"], "mamx-int8">, Group<m_x86_Features_Group>;
+def mamx_complex : Flag<["-"], "mamx-complex">, Group<m_x86_Features_Group>;
+def mno_amx_complex : Flag<["-"], "mno-amx-complex">, Group<m_x86_Features_Group>;
+def mamx_fp16 : Flag<["-"], "mamx-fp16">, Group<m_x86_Features_Group>;
+def mno_amx_fp16 : Flag<["-"], "mno-amx-fp16">, Group<m_x86_Features_Group>;
+def mamx_int8 : Flag<["-"], "mamx-int8">, Group<m_x86_Features_Group>;
def mno_amx_int8 : Flag<["-"], "mno-amx-int8">, Group<m_x86_Features_Group>;
def mamx_tile : Flag<["-"], "mamx-tile">, Group<m_x86_Features_Group>;
def mno_amx_tile : Flag<["-"], "mno-amx-tile">, Group<m_x86_Features_Group>;
+def mcmpccxadd : Flag<["-"], "mcmpccxadd">, Group<m_x86_Features_Group>;
+def mno_cmpccxadd : Flag<["-"], "mno-cmpccxadd">, Group<m_x86_Features_Group>;
def msse : Flag<["-"], "msse">, Group<m_x86_Features_Group>;
def mno_sse : Flag<["-"], "mno-sse">, Group<m_x86_Features_Group>;
def msse2 : Flag<["-"], "msse2">, Group<m_x86_Features_Group>;
@@ -4097,7 +5883,11 @@ def mssse3 : Flag<["-"], "mssse3">, Group<m_x86_Features_Group>;
def mno_ssse3 : Flag<["-"], "mno-ssse3">, Group<m_x86_Features_Group>;
def msse4_1 : Flag<["-"], "msse4.1">, Group<m_x86_Features_Group>;
def mno_sse4_1 : Flag<["-"], "mno-sse4.1">, Group<m_x86_Features_Group>;
+} // let Flags = [TargetSpecific]
+// TODO: Make -msse4.2 TargetSpecific after
+// https://github.com/llvm/llvm-project/issues/63270 is fixed.
def msse4_2 : Flag<["-"], "msse4.2">, Group<m_x86_Features_Group>;
+let Flags = [TargetSpecific] in {
def mno_sse4_2 : Flag<["-"], "mno-sse4.2">, Group<m_x86_Features_Group>;
def msse4 : Flag<["-"], "msse4">, Alias<msse4_2>;
// -mno-sse4 turns off sse4.1 which has the effect of turning off everything
@@ -4108,6 +5898,12 @@ def msse4a : Flag<["-"], "msse4a">, Group<m_x86_Features_Group>;
def mno_sse4a : Flag<["-"], "mno-sse4a">, Group<m_x86_Features_Group>;
def mavx : Flag<["-"], "mavx">, Group<m_x86_Features_Group>;
def mno_avx : Flag<["-"], "mno-avx">, Group<m_x86_Features_Group>;
+def mavx10_1_256 : Flag<["-"], "mavx10.1-256">, Group<m_x86_AVX10_Features_Group>;
+def mno_avx10_1_256 : Flag<["-"], "mno-avx10.1-256">, Group<m_x86_AVX10_Features_Group>;
+def mavx10_1_512 : Flag<["-"], "mavx10.1-512">, Group<m_x86_AVX10_Features_Group>;
+def mno_avx10_1_512 : Flag<["-"], "mno-avx10.1-512">, Group<m_x86_AVX10_Features_Group>;
+def mavx10_1 : Flag<["-"], "mavx10.1">, Alias<mavx10_1_256>;
+def mno_avx10_1 : Flag<["-"], "mno-avx10.1">, Alias<mno_avx10_1_256>;
def mavx2 : Flag<["-"], "mavx2">, Group<m_x86_Features_Group>;
def mno_avx2 : Flag<["-"], "mno-avx2">, Group<m_x86_Features_Group>;
def mavx512f : Flag<["-"], "mavx512f">, Group<m_x86_Features_Group>;
@@ -4124,6 +5920,8 @@ def mavx512dq : Flag<["-"], "mavx512dq">, Group<m_x86_Features_Group>;
def mno_avx512dq : Flag<["-"], "mno-avx512dq">, Group<m_x86_Features_Group>;
def mavx512er : Flag<["-"], "mavx512er">, Group<m_x86_Features_Group>;
def mno_avx512er : Flag<["-"], "mno-avx512er">, Group<m_x86_Features_Group>;
+def mavx512fp16 : Flag<["-"], "mavx512fp16">, Group<m_x86_Features_Group>;
+def mno_avx512fp16 : Flag<["-"], "mno-avx512fp16">, Group<m_x86_Features_Group>;
def mavx512ifma : Flag<["-"], "mavx512ifma">, Group<m_x86_Features_Group>;
def mno_avx512ifma : Flag<["-"], "mno-avx512ifma">, Group<m_x86_Features_Group>;
def mavx512pf : Flag<["-"], "mavx512pf">, Group<m_x86_Features_Group>;
@@ -4140,6 +5938,14 @@ def mavx512vpopcntdq : Flag<["-"], "mavx512vpopcntdq">, Group<m_x86_Features_Gro
def mno_avx512vpopcntdq : Flag<["-"], "mno-avx512vpopcntdq">, Group<m_x86_Features_Group>;
def mavx512vp2intersect : Flag<["-"], "mavx512vp2intersect">, Group<m_x86_Features_Group>;
def mno_avx512vp2intersect : Flag<["-"], "mno-avx512vp2intersect">, Group<m_x86_Features_Group>;
+def mavxifma : Flag<["-"], "mavxifma">, Group<m_x86_Features_Group>;
+def mno_avxifma : Flag<["-"], "mno-avxifma">, Group<m_x86_Features_Group>;
+def mavxneconvert : Flag<["-"], "mavxneconvert">, Group<m_x86_Features_Group>;
+def mno_avxneconvert : Flag<["-"], "mno-avxneconvert">, Group<m_x86_Features_Group>;
+def mavxvnniint16 : Flag<["-"], "mavxvnniint16">, Group<m_x86_Features_Group>;
+def mno_avxvnniint16 : Flag<["-"], "mno-avxvnniint16">, Group<m_x86_Features_Group>;
+def mavxvnniint8 : Flag<["-"], "mavxvnniint8">, Group<m_x86_Features_Group>;
+def mno_avxvnniint8 : Flag<["-"], "mno-avxvnniint8">, Group<m_x86_Features_Group>;
def mavxvnni : Flag<["-"], "mavxvnni">, Group<m_x86_Features_Group>;
def mno_avxvnni : Flag<["-"], "mno-avxvnni">, Group<m_x86_Features_Group>;
def madx : Flag<["-"], "madx">, Group<m_x86_Features_Group>;
@@ -4160,10 +5966,14 @@ def mwbnoinvd : Flag<["-"], "mwbnoinvd">, Group<m_x86_Features_Group>;
def mno_wbnoinvd : Flag<["-"], "mno-wbnoinvd">, Group<m_x86_Features_Group>;
def mclzero : Flag<["-"], "mclzero">, Group<m_x86_Features_Group>;
def mno_clzero : Flag<["-"], "mno-clzero">, Group<m_x86_Features_Group>;
+def mcrc32 : Flag<["-"], "mcrc32">, Group<m_x86_Features_Group>;
+def mno_crc32 : Flag<["-"], "mno-crc32">, Group<m_x86_Features_Group>;
def mcx16 : Flag<["-"], "mcx16">, Group<m_x86_Features_Group>;
def mno_cx16 : Flag<["-"], "mno-cx16">, Group<m_x86_Features_Group>;
def menqcmd : Flag<["-"], "menqcmd">, Group<m_x86_Features_Group>;
def mno_enqcmd : Flag<["-"], "mno-enqcmd">, Group<m_x86_Features_Group>;
+def mevex512 : Flag<["-"], "mevex512">, Group<m_x86_Features_Group>;
+def mno_evex512 : Flag<["-"], "mno-evex512">, Group<m_x86_Features_Group>;
def mf16c : Flag<["-"], "mf16c">, Group<m_x86_Features_Group>;
def mno_f16c : Flag<["-"], "mno-f16c">, Group<m_x86_Features_Group>;
def mfma : Flag<["-"], "mfma">, Group<m_x86_Features_Group>;
@@ -4204,14 +6014,20 @@ def mpconfig : Flag<["-"], "mpconfig">, Group<m_x86_Features_Group>;
def mno_pconfig : Flag<["-"], "mno-pconfig">, Group<m_x86_Features_Group>;
def mpopcnt : Flag<["-"], "mpopcnt">, Group<m_x86_Features_Group>;
def mno_popcnt : Flag<["-"], "mno-popcnt">, Group<m_x86_Features_Group>;
+def mprefetchi : Flag<["-"], "mprefetchi">, Group<m_x86_Features_Group>;
+def mno_prefetchi : Flag<["-"], "mno-prefetchi">, Group<m_x86_Features_Group>;
def mprefetchwt1 : Flag<["-"], "mprefetchwt1">, Group<m_x86_Features_Group>;
def mno_prefetchwt1 : Flag<["-"], "mno-prefetchwt1">, Group<m_x86_Features_Group>;
def mprfchw : Flag<["-"], "mprfchw">, Group<m_x86_Features_Group>;
def mno_prfchw : Flag<["-"], "mno-prfchw">, Group<m_x86_Features_Group>;
def mptwrite : Flag<["-"], "mptwrite">, Group<m_x86_Features_Group>;
def mno_ptwrite : Flag<["-"], "mno-ptwrite">, Group<m_x86_Features_Group>;
+def mraoint : Flag<["-"], "mraoint">, Group<m_x86_Features_Group>;
+def mno_raoint : Flag<["-"], "mno-raoint">, Group<m_x86_Features_Group>;
def mrdpid : Flag<["-"], "mrdpid">, Group<m_x86_Features_Group>;
def mno_rdpid : Flag<["-"], "mno-rdpid">, Group<m_x86_Features_Group>;
+def mrdpru : Flag<["-"], "mrdpru">, Group<m_x86_Features_Group>;
+def mno_rdpru : Flag<["-"], "mno-rdpru">, Group<m_x86_Features_Group>;
def mrdrnd : Flag<["-"], "mrdrnd">, Group<m_x86_Features_Group>;
def mno_rdrnd : Flag<["-"], "mno-rdrnd">, Group<m_x86_Features_Group>;
def mrtm : Flag<["-"], "mrtm">, Group<m_x86_Features_Group>;
@@ -4226,12 +6042,20 @@ def msgx : Flag<["-"], "msgx">, Group<m_x86_Features_Group>;
def mno_sgx : Flag<["-"], "mno-sgx">, Group<m_x86_Features_Group>;
def msha : Flag<["-"], "msha">, Group<m_x86_Features_Group>;
def mno_sha : Flag<["-"], "mno-sha">, Group<m_x86_Features_Group>;
+def msha512 : Flag<["-"], "msha512">, Group<m_x86_Features_Group>;
+def mno_sha512 : Flag<["-"], "mno-sha512">, Group<m_x86_Features_Group>;
+def msm3 : Flag<["-"], "msm3">, Group<m_x86_Features_Group>;
+def mno_sm3 : Flag<["-"], "mno-sm3">, Group<m_x86_Features_Group>;
+def msm4 : Flag<["-"], "msm4">, Group<m_x86_Features_Group>;
+def mno_sm4 : Flag<["-"], "mno-sm4">, Group<m_x86_Features_Group>;
def mtbm : Flag<["-"], "mtbm">, Group<m_x86_Features_Group>;
def mno_tbm : Flag<["-"], "mno-tbm">, Group<m_x86_Features_Group>;
def mtsxldtrk : Flag<["-"], "mtsxldtrk">, Group<m_x86_Features_Group>;
def mno_tsxldtrk : Flag<["-"], "mno-tsxldtrk">, Group<m_x86_Features_Group>;
def muintr : Flag<["-"], "muintr">, Group<m_x86_Features_Group>;
def mno_uintr : Flag<["-"], "mno-uintr">, Group<m_x86_Features_Group>;
+def musermsr : Flag<["-"], "musermsr">, Group<m_x86_Features_Group>;
+def mno_usermsr : Flag<["-"], "mno-usermsr">, Group<m_x86_Features_Group>;
def mvaes : Flag<["-"], "mvaes">, Group<m_x86_Features_Group>;
def mno_vaes : Flag<["-"], "mno-vaes">, Group<m_x86_Features_Group>;
def mvpclmulqdq : Flag<["-"], "mvpclmulqdq">, Group<m_x86_Features_Group>;
@@ -4254,12 +6078,34 @@ def mretpoline_external_thunk : Flag<["-"], "mretpoline-external-thunk">, Group<
def mno_retpoline_external_thunk : Flag<["-"], "mno-retpoline-external-thunk">, Group<m_x86_Features_Group>;
def mvzeroupper : Flag<["-"], "mvzeroupper">, Group<m_x86_Features_Group>;
def mno_vzeroupper : Flag<["-"], "mno-vzeroupper">, Group<m_x86_Features_Group>;
+def mno_gather : Flag<["-"], "mno-gather">, Group<m_Group>,
+ HelpText<"Disable generation of gather instructions in auto-vectorization(x86 only)">;
+def mno_scatter : Flag<["-"], "mno-scatter">, Group<m_Group>,
+ HelpText<"Disable generation of scatter instructions in auto-vectorization(x86 only)">;
+def mapx_features_EQ : CommaJoined<["-"], "mapx-features=">, Group<m_x86_Features_Group>,
+ HelpText<"Enable features of APX">, Values<"egpr,push2pop2,ppx,ndd,ccmp,cf">;
+def mno_apx_features_EQ : CommaJoined<["-"], "mno-apx-features=">, Group<m_x86_Features_Group>,
+ HelpText<"Disable features of APX">, Values<"egpr,push2pop2,ppx,ndd,ccmp,cf">;
+// Features egpr, push2pop2, ppx and ndd are validated with llvm-test-suite && cpu2017 on Intel SDE.
+// For stability, we turn on these features only for -mapxf. After a feature pass the validation,
+// we will add it to -mapxf.
+def mapxf : Flag<["-"], "mapxf">, Alias<mapx_features_EQ>, AliasArgs<["egpr","push2pop2","ppx", "ndd"]>;
+def mno_apxf : Flag<["-"], "mno-apxf">, Alias<mno_apx_features_EQ>, AliasArgs<["egpr","push2pop2","ppx","ndd"]>;
+} // let Flags = [TargetSpecific]
+
+// VE feature flags
+let Flags = [TargetSpecific] in {
+def mvevpu : Flag<["-"], "mvevpu">, Group<m_ve_Features_Group>,
+ HelpText<"Emit VPU instructions for VE">;
+def mno_vevpu : Flag<["-"], "mno-vevpu">, Group<m_ve_Features_Group>;
+} // let Flags = [TargetSpecific]
// These are legacy user-facing driver-level option spellings. They are always
// aliases for options that are spelled using the more common Unix / GNU flag
// style of double-dash and equals-joined flags.
-def gcc_toolchain_legacy_spelling : Separate<["-"], "gcc-toolchain">, Alias<gcc_toolchain>;
-def target_legacy_spelling : Separate<["-"], "target">, Alias<target>;
+def target_legacy_spelling : Separate<["-"], "target">,
+ Alias<target>,
+ Visibility<[ClangOption, CLOption, DXCOption, FlangOption]>;
// Special internal option to handle -Xlinker --no-demangle.
def Z_Xlinker__no_demangle : Flag<["-"], "Z-Xlinker-no-demangle">,
@@ -4271,9 +6117,11 @@ def Zlinker_input : Separate<["-"], "Zlinker-input">,
// Reserved library options.
def Z_reserved_lib_stdcxx : Flag<["-"], "Z-reserved-lib-stdc++">,
- Flags<[LinkerInput, NoArgumentUnused, Unsupported]>, Group<reserved_lib_Group>;
+ Flags<[LinkerInput, NoArgumentUnused, Unsupported]>,
+ Group<reserved_lib_Group>;
def Z_reserved_lib_cckext : Flag<["-"], "Z-reserved-lib-cckext">,
- Flags<[LinkerInput, NoArgumentUnused, Unsupported]>, Group<reserved_lib_Group>;
+ Flags<[LinkerInput, NoArgumentUnused, Unsupported]>,
+ Group<reserved_lib_Group>;
// Ignored options
multiclass BooleanFFlag<string name> {
@@ -4281,24 +6129,30 @@ multiclass BooleanFFlag<string name> {
def fno_#NAME : Flag<["-"], "fno-"#name>;
}
+multiclass FlangIgnoredDiagOpt<string name> {
+ def unsupported_warning_w#NAME : Flag<["-", "--"], "W"#name>,
+ Visibility<[FlangOption]>, Group<flang_ignored_w_Group>;
+}
+
defm : BooleanFFlag<"keep-inline-functions">, Group<clang_ignored_gcc_optimization_f_Group>;
def fprofile_dir : Joined<["-"], "fprofile-dir=">, Group<f_Group>;
// The default value matches BinutilsVersion in MCAsmInfo.h.
def fbinutils_version_EQ : Joined<["-"], "fbinutils-version=">,
- MetaVarName<"<major.minor>">, Group<f_Group>, Flags<[CC1Option]>,
+ MetaVarName<"<major.minor>">, Group<f_Group>,
+ Visibility<[ClangOption, CC1Option]>,
HelpText<"Produced object files can use all ELF features supported by this "
"binutils version and newer. If -fno-integrated-as is specified, the "
"generated assembly will consider GNU as support. 'none' means that all ELF "
"features can be used, regardless of binutils support. Defaults to 2.26.">;
-def fuse_ld_EQ : Joined<["-"], "fuse-ld=">, Group<f_Group>, Flags<[CoreOption, LinkOption]>;
+def fuse_ld_EQ : Joined<["-"], "fuse-ld=">, Group<f_Group>,
+ Flags<[LinkOption]>, Visibility<[ClangOption, FlangOption, CLOption]>;
def ld_path_EQ : Joined<["--"], "ld-path=">, Group<Link_Group>;
defm align_labels : BooleanFFlag<"align-labels">, Group<clang_ignored_gcc_optimization_f_Group>;
def falign_labels_EQ : Joined<["-"], "falign-labels=">, Group<clang_ignored_gcc_optimization_f_Group>;
defm align_loops : BooleanFFlag<"align-loops">, Group<clang_ignored_gcc_optimization_f_Group>;
-def falign_loops_EQ : Joined<["-"], "falign-loops=">, Group<clang_ignored_gcc_optimization_f_Group>;
defm align_jumps : BooleanFFlag<"align-jumps">, Group<clang_ignored_gcc_optimization_f_Group>;
def falign_jumps_EQ : Joined<["-"], "falign-jumps=">, Group<clang_ignored_gcc_optimization_f_Group>;
@@ -4306,12 +6160,15 @@ def falign_jumps_EQ : Joined<["-"], "falign-jumps=">, Group<clang_ignored_gcc_op
// ignore it for now to avoid breaking builds that use it.
def fdiagnostics_show_location_EQ : Joined<["-"], "fdiagnostics-show-location=">, Group<clang_ignored_f_Group>;
-defm fcheck_new : BooleanFFlag<"check-new">, Group<clang_ignored_f_Group>;
+defm check_new : BoolOption<"f", "check-new",
+ LangOpts<"CheckNew">, DefaultFalse,
+ PosFlag<SetTrue, [], [ClangOption], "Do not assume C++ operator new may not return NULL">,
+ NegFlag<SetFalse>, BothFlags<[], [ClangOption, CC1Option]>>;
+
defm caller_saves : BooleanFFlag<"caller-saves">, Group<clang_ignored_gcc_optimization_f_Group>;
defm reorder_blocks : BooleanFFlag<"reorder-blocks">, Group<clang_ignored_gcc_optimization_f_Group>;
defm branch_count_reg : BooleanFFlag<"branch-count-reg">, Group<clang_ignored_gcc_optimization_f_Group>;
defm default_inline : BooleanFFlag<"default-inline">, Group<clang_ignored_gcc_optimization_f_Group>;
-defm fat_lto_objects : BooleanFFlag<"fat-lto-objects">, Group<clang_ignored_gcc_optimization_f_Group>;
defm float_store : BooleanFFlag<"float-store">, Group<clang_ignored_gcc_optimization_f_Group>;
defm friend_injection : BooleanFFlag<"friend-injection">, Group<clang_ignored_f_Group>;
defm function_attribute_list : BooleanFFlag<"function-attribute-list">, Group<clang_ignored_f_Group>;
@@ -4337,7 +6194,11 @@ defm ipa_cp : BooleanFFlag<"ipa-cp">,
defm ivopts : BooleanFFlag<"ivopts">, Group<clang_ignored_gcc_optimization_f_Group>;
defm semantic_interposition : BoolFOption<"semantic-interposition",
LangOpts<"SemanticInterposition">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option]>, NegFlag<SetFalse>>;
+ PosFlag<SetTrue, [], [ClangOption, CC1Option]>,
+ NegFlag<SetFalse>>,
+ DocBrief<[{Enable semantic interposition. Semantic interposition allows for the
+interposition of a symbol by another at runtime, thus preventing a range of
+inter-procedural optimisation.}]>;
defm non_call_exceptions : BooleanFFlag<"non-call-exceptions">, Group<clang_ignored_f_Group>;
defm peel_loops : BooleanFFlag<"peel-loops">, Group<clang_ignored_gcc_optimization_f_Group>;
defm permissive : BooleanFFlag<"permissive">, Group<clang_ignored_f_Group>;
@@ -4368,6 +6229,7 @@ defm tree_salias : BooleanFFlag<"tree-salias">, Group<clang_ignored_f_Group>;
defm tree_ter : BooleanFFlag<"tree-ter">, Group<clang_ignored_gcc_optimization_f_Group>;
defm tree_vectorizer_verbose : BooleanFFlag<"tree-vectorizer-verbose">, Group<clang_ignored_f_Group>;
defm tree_vrp : BooleanFFlag<"tree-vrp">, Group<clang_ignored_gcc_optimization_f_Group>;
+defm : BooleanFFlag<"unit-at-a-time">, Group<clang_ignored_gcc_optimization_f_Group>;
defm unroll_all_loops : BooleanFFlag<"unroll-all-loops">, Group<clang_ignored_gcc_optimization_f_Group>;
defm unsafe_loop_optimizations : BooleanFFlag<"unsafe-loop-optimizations">,
Group<clang_ignored_gcc_optimization_f_Group>;
@@ -4390,7 +6252,6 @@ def static_libgfortran : Flag<["-"], "static-libgfortran">, Group<gfortran_Group
def fblas_matmul_limit_EQ : Joined<["-"], "fblas-matmul-limit=">, Group<gfortran_Group>;
def fcheck_EQ : Joined<["-"], "fcheck=">, Group<gfortran_Group>;
def fcoarray_EQ : Joined<["-"], "fcoarray=">, Group<gfortran_Group>;
-def fconvert_EQ : Joined<["-"], "fconvert=">, Group<gfortran_Group>;
def ffpe_trap_EQ : Joined<["-"], "ffpe-trap=">, Group<gfortran_Group>;
def ffree_line_length_VALUE : Joined<["-"], "ffree-line-length-">, Group<gfortran_Group>;
def finit_character_EQ : Joined<["-"], "finit-character=">, Group<gfortran_Group>;
@@ -4407,7 +6268,7 @@ def frecord_marker_EQ : Joined<["-"], "frecord-marker=">, Group<gfortran_Group>;
defm aggressive_function_elimination : BooleanFFlag<"aggressive-function-elimination">, Group<gfortran_Group>;
defm align_commons : BooleanFFlag<"align-commons">, Group<gfortran_Group>;
defm all_intrinsics : BooleanFFlag<"all-intrinsics">, Group<gfortran_Group>;
-defm automatic : BooleanFFlag<"automatic">, Group<gfortran_Group>;
+def fautomatic : Flag<["-"], "fautomatic">; // -fno-automatic is significant
defm backtrace : BooleanFFlag<"backtrace">, Group<gfortran_Group>;
defm bounds_check : BooleanFFlag<"bounds-check">, Group<gfortran_Group>;
defm check_array_temporaries : BooleanFFlag<"check-array-temporaries">, Group<gfortran_Group>;
@@ -4439,35 +6300,88 @@ defm recursive : BooleanFFlag<"recursive">, Group<gfortran_Group>;
defm repack_arrays : BooleanFFlag<"repack-arrays">, Group<gfortran_Group>;
defm second_underscore : BooleanFFlag<"second-underscore">, Group<gfortran_Group>;
defm sign_zero : BooleanFFlag<"sign-zero">, Group<gfortran_Group>;
-defm stack_arrays : BooleanFFlag<"stack-arrays">, Group<gfortran_Group>;
-defm underscoring : BooleanFFlag<"underscoring">, Group<gfortran_Group>;
defm whole_file : BooleanFFlag<"whole-file">, Group<gfortran_Group>;
+// -W <arg> options unsupported by the flang compiler
+// If any of these options are passed into flang's compiler driver,
+// a warning will be raised and the argument will be claimed
+defm : FlangIgnoredDiagOpt<"extra">;
+defm : FlangIgnoredDiagOpt<"aliasing">;
+defm : FlangIgnoredDiagOpt<"ampersand">;
+defm : FlangIgnoredDiagOpt<"array-bounds">;
+defm : FlangIgnoredDiagOpt<"c-binding-type">;
+defm : FlangIgnoredDiagOpt<"character-truncation">;
+defm : FlangIgnoredDiagOpt<"conversion">;
+defm : FlangIgnoredDiagOpt<"do-subscript">;
+defm : FlangIgnoredDiagOpt<"function-elimination">;
+defm : FlangIgnoredDiagOpt<"implicit-interface">;
+defm : FlangIgnoredDiagOpt<"implicit-procedure">;
+defm : FlangIgnoredDiagOpt<"intrinsic-shadow">;
+defm : FlangIgnoredDiagOpt<"use-without-only">;
+defm : FlangIgnoredDiagOpt<"intrinsics-std">;
+defm : FlangIgnoredDiagOpt<"line-truncation">;
+defm : FlangIgnoredDiagOpt<"no-align-commons">;
+defm : FlangIgnoredDiagOpt<"no-overwrite-recursive">;
+defm : FlangIgnoredDiagOpt<"no-tabs">;
+defm : FlangIgnoredDiagOpt<"real-q-constant">;
+defm : FlangIgnoredDiagOpt<"surprising">;
+defm : FlangIgnoredDiagOpt<"underflow">;
+defm : FlangIgnoredDiagOpt<"unused-parameter">;
+defm : FlangIgnoredDiagOpt<"realloc-lhs">;
+defm : FlangIgnoredDiagOpt<"realloc-lhs-all">;
+defm : FlangIgnoredDiagOpt<"frontend-loop-interchange">;
+defm : FlangIgnoredDiagOpt<"target-lifetime">;
+
// C++ SYCL options
-def fsycl : Flag<["-"], "fsycl">, Flags<[NoXarchOption, CoreOption]>,
+def fsycl : Flag<["-"], "fsycl">,
+ Visibility<[ClangOption, CLOption]>,
Group<sycl_Group>, HelpText<"Enables SYCL kernels compilation for device">;
-def fno_sycl : Flag<["-"], "fno-sycl">, Flags<[NoXarchOption, CoreOption]>,
+def fno_sycl : Flag<["-"], "fno-sycl">,
+ Visibility<[ClangOption, CLOption]>,
Group<sycl_Group>, HelpText<"Disables SYCL kernels compilation for device">;
+// OS-specific options
+let Flags = [TargetSpecific] in {
+defm android_pad_segment : BooleanFFlag<"android-pad-segment">, Group<f_Group>;
+} // let Flags = [TargetSpecific]
+
+//===----------------------------------------------------------------------===//
+// FLangOption + NoXarchOption
+//===----------------------------------------------------------------------===//
+
+def flang_experimental_hlfir : Flag<["-"], "flang-experimental-hlfir">,
+ Flags<[HelpHidden]>, Visibility<[FlangOption, FC1Option]>,
+ HelpText<"Use HLFIR lowering (experimental)">;
+
+def flang_deprecated_no_hlfir : Flag<["-"], "flang-deprecated-no-hlfir">,
+ Flags<[HelpHidden]>, Visibility<[FlangOption, FC1Option]>,
+ HelpText<"Do not use HLFIR lowering (deprecated)">;
+
+def flang_experimental_polymorphism : Flag<["-"], "flang-experimental-polymorphism">,
+ Flags<[HelpHidden]>, Visibility<[FlangOption, FC1Option]>,
+ HelpText<"Enable Fortran 2003 polymorphism (experimental)">;
+
+
//===----------------------------------------------------------------------===//
// FLangOption + CoreOption + NoXarchOption
//===----------------------------------------------------------------------===//
-let Flags = [FlangOption, FlangOnlyOption, NoXarchOption, CoreOption] in {
+
def Xflang : Separate<["-"], "Xflang">,
HelpText<"Pass <arg> to the flang compiler">, MetaVarName<"<arg>">,
- Flags<[NoXarchOption, CoreOption]>, Group<CompileOnly_Group>;
-}
+ Flags<[NoXarchOption]>, Visibility<[FlangOption, CLOption]>,
+ Group<CompileOnly_Group>;
//===----------------------------------------------------------------------===//
// FlangOption and FC1 Options
//===----------------------------------------------------------------------===//
-let Flags = [FC1Option, FlangOption, FlangOnlyOption] in {
+
+let Visibility = [FC1Option, FlangOption] in {
def cpp : Flag<["-"], "cpp">, Group<f_Group>,
HelpText<"Enable predefined and command line preprocessor macros">;
def nocpp : Flag<["-"], "nocpp">, Group<f_Group>,
HelpText<"Disable predefined and command line preprocessor macros">;
-def module_dir : Separate<["-"], "module-dir">, MetaVarName<"<dir>">,
+def module_dir : JoinedOrSeparate<["-"], "module-dir">, MetaVarName<"<dir>">,
HelpText<"Put MODULE files in <dir>">,
DocBrief<[{This option specifies where to put .mod files for compiled modules.
It is also added to the list of directories to be searched by an USE statement.
@@ -4482,47 +6396,60 @@ def ffixed_line_length_EQ : Joined<["-"], "ffixed-line-length=">, Group<f_Group>
DocBrief<[{Set column after which characters are ignored in typical fixed-form lines in the source
file}]>;
def ffixed_line_length_VALUE : Joined<["-"], "ffixed-line-length-">, Group<f_Group>, Alias<ffixed_line_length_EQ>;
-def fopenacc : Flag<["-"], "fopenacc">, Group<f_Group>,
- HelpText<"Enable OpenACC">;
+def fconvert_EQ : Joined<["-"], "fconvert=">, Group<f_Group>,
+ HelpText<"Set endian conversion of data for unformatted files">;
def fdefault_double_8 : Flag<["-"],"fdefault-double-8">, Group<f_Group>,
HelpText<"Set the default double precision kind to an 8 byte wide type">;
def fdefault_integer_8 : Flag<["-"],"fdefault-integer-8">, Group<f_Group>,
- HelpText<"Set the default integer kind to an 8 byte wide type">;
+ HelpText<"Set the default integer and logical kind to an 8 byte wide type">;
def fdefault_real_8 : Flag<["-"],"fdefault-real-8">, Group<f_Group>,
HelpText<"Set the default real kind to an 8 byte wide type">;
def flarge_sizes : Flag<["-"],"flarge-sizes">, Group<f_Group>,
HelpText<"Use INTEGER(KIND=8) for the result type in size-related intrinsics">;
-def fbackslash : Flag<["-"], "fbackslash">, Group<f_Group>,
- HelpText<"Specify that backslash in string introduces an escape character">,
- DocBrief<[{Change the interpretation of backslashes in string literals from
-a single backslash character to "C-style" escape characters.}]>;
-def fno_backslash : Flag<["-"], "fno-backslash">, Group<f_Group>;
-def fxor_operator : Flag<["-"], "fxor-operator">, Group<f_Group>,
- HelpText<"Enable .XOR. as a synonym of .NEQV.">;
-def fno_xor_operator : Flag<["-"], "fno-xor-operator">, Group<f_Group>;
-def flogical_abbreviations : Flag<["-"], "flogical-abbreviations">, Group<f_Group>,
- HelpText<"Enable logical abbreviations">;
-def fno_logical_abbreviations : Flag<["-"], "fno-logical-abbreviations">, Group<f_Group>;
-def fimplicit_none : Flag<["-"], "fimplicit-none">, Group<f_Group>,
- HelpText<"No implicit typing allowed unless overridden by IMPLICIT statements">;
-def fno_implicit_none : Flag<["-"], "fno-implicit-none">, Group<f_Group>;
+
def falternative_parameter_statement : Flag<["-"], "falternative-parameter-statement">, Group<f_Group>,
HelpText<"Enable the old style PARAMETER statement">;
def fintrinsic_modules_path : Separate<["-"], "fintrinsic-modules-path">, Group<f_Group>, MetaVarName<"<dir>">,
HelpText<"Specify where to find the compiled intrinsic modules">,
- DocBrief<[{This option specifies the location of pre-compiled intrinsic modules,
+ DocBrief<[{This option specifies the location of pre-compiled intrinsic modules,
if they are not in the default location expected by the compiler.}]>;
-}
+
+defm backslash : OptInFC1FFlag<"backslash", "Specify that backslash in string introduces an escape character">;
+defm xor_operator : OptInFC1FFlag<"xor-operator", "Enable .XOR. as a synonym of .NEQV.">;
+defm logical_abbreviations : OptInFC1FFlag<"logical-abbreviations", "Enable logical abbreviations">;
+defm implicit_none : OptInFC1FFlag<"implicit-none", "No implicit typing allowed unless overridden by IMPLICIT statements">;
+defm underscoring : OptInFC1FFlag<"underscoring", "Appends one trailing underscore to external names">;
+defm ppc_native_vec_elem_order: BoolOptionWithoutMarshalling<"f", "ppc-native-vector-element-order",
+ PosFlag<SetTrue, [], [ClangOption], "Specifies PowerPC native vector element order (default)">,
+ NegFlag<SetFalse, [], [ClangOption], "Specifies PowerPC non-native vector element order">>;
+
+def fno_automatic : Flag<["-"], "fno-automatic">, Group<f_Group>,
+ HelpText<"Implies the SAVE attribute for non-automatic local objects in subprograms unless RECURSIVE">;
+
+defm stack_arrays : BoolOptionWithoutMarshalling<"f", "stack-arrays",
+ PosFlag<SetTrue, [], [ClangOption], "Attempt to allocate array temporaries on the stack, no matter their size">,
+ NegFlag<SetFalse, [], [ClangOption], "Allocate array temporaries on the heap (default)">>;
+defm loop_versioning : BoolOptionWithoutMarshalling<"f", "version-loops-for-stride",
+ PosFlag<SetTrue, [], [ClangOption], "Create unit-strided versions of loops">,
+ NegFlag<SetFalse, [], [ClangOption], "Do not create unit-strided loops (default)">>;
+} // let Visibility = [FC1Option, FlangOption]
def J : JoinedOrSeparate<["-"], "J">,
- Flags<[RenderJoined, FlangOption, FC1Option, FlangOnlyOption]>,
+ Flags<[RenderJoined]>, Visibility<[FlangOption, FC1Option]>,
Group<gfortran_Group>,
Alias<module_dir>;
+let Visibility = [FlangOption] in {
+def no_fortran_main : Flag<["-"], "fno-fortran-main">,
+ Visibility<[FlangOption]>, Group<f_Group>,
+ HelpText<"Do not include Fortran_main.a (provided by Flang) when linking">;
+} // let Visibility = [ FlangOption ]
+
//===----------------------------------------------------------------------===//
// FC1 Options
//===----------------------------------------------------------------------===//
-let Flags = [FC1Option, FlangOnlyOption] in {
+
+let Visibility = [FC1Option] in {
def fget_definition : MultiArg<["-"], "fget-definition", 3>,
HelpText<"Get the symbol definition from <line> <start-column> <end-column>">,
@@ -4545,6 +6472,8 @@ def fdebug_dump_parse_tree : Flag<["-"], "fdebug-dump-parse-tree">, Group<Action
HelpText<"Dump the parse tree">,
DocBrief<[{Run the Parser and the semantic checks, and then output the
parse tree.}]>;
+def fdebug_dump_pft : Flag<["-"], "fdebug-dump-pft">, Group<Action_Group>,
+ HelpText<"Dump the pre-fir parse tree">;
def fdebug_dump_parse_tree_no_sema : Flag<["-"], "fdebug-dump-parse-tree-no-sema">, Group<Action_Group>,
HelpText<"Dump the parse tree (skips the semantic checks)">,
DocBrief<[{Run the Parser and then output the parse tree. Semantic
@@ -4559,39 +6488,64 @@ def fdebug_measure_parse_tree : Flag<["-"], "fdebug-measure-parse-tree">, Group<
HelpText<"Measure the parse tree">;
def fdebug_pre_fir_tree : Flag<["-"], "fdebug-pre-fir-tree">, Group<Action_Group>,
HelpText<"Dump the pre-FIR tree">;
-def fdebug_module_writer : Flag<["-"],"fdebug-module-writer">,
+def fdebug_module_writer : Flag<["-"],"fdebug-module-writer">,
HelpText<"Enable debug messages while writing module files">;
def fget_symbols_sources : Flag<["-"], "fget-symbols-sources">, Group<Action_Group>,
HelpText<"Dump symbols and their source code locations">;
def module_suffix : Separate<["-"], "module-suffix">, Group<f_Group>, MetaVarName<"<suffix>">,
HelpText<"Use <suffix> as the suffix for module files (the default value is `.mod`)">;
-def fanalyzed_objects_for_unparse : Flag<["-"],
- "fanalyzed-objects-for-unparse">, Group<f_Group>;
-def fno_analyzed_objects_for_unparse : Flag<["-"],
- "fno-analyzed-objects-for-unparse">, Group<f_Group>,
- HelpText<"Do not use the analyzed objects when unparsing">;
+def fno_reformat : Flag<["-"], "fno-reformat">, Group<Preprocessor_Group>,
+ HelpText<"Dump the cooked character stream in -E mode">;
+defm analyzed_objects_for_unparse : OptOutFC1FFlag<"analyzed-objects-for-unparse", "", "Do not use the analyzed objects when unparsing">;
-}
+def emit_fir : Flag<["-"], "emit-fir">, Group<Action_Group>,
+ HelpText<"Build the parse tree, then lower it to FIR">;
+def emit_mlir : Flag<["-"], "emit-mlir">, Alias<emit_fir>;
+
+def emit_hlfir : Flag<["-"], "emit-hlfir">, Group<Action_Group>,
+ HelpText<"Build the parse tree, then lower it to HLFIR">;
+
+} // let Visibility = [FC1Option]
//===----------------------------------------------------------------------===//
-// CC1 Options
+// Target Options (cc1 + cc1as)
//===----------------------------------------------------------------------===//
-let Flags = [CC1Option, NoDriverOption] in {
+let Visibility = [CC1Option, CC1AsOption] in {
+
+def tune_cpu : Separate<["-"], "tune-cpu">,
+ HelpText<"Tune for a specific cpu type">,
+ MarshallingInfoString<TargetOpts<"TuneCPU">>;
+def target_abi : Separate<["-"], "target-abi">,
+ HelpText<"Target a particular ABI type">,
+ MarshallingInfoString<TargetOpts<"ABI">>;
+def target_sdk_version_EQ : Joined<["-"], "target-sdk-version=">,
+ HelpText<"The version of target SDK used for compilation">;
+def darwin_target_variant_sdk_version_EQ : Joined<["-"],
+ "darwin-target-variant-sdk-version=">,
+ HelpText<"The version of darwin target variant SDK used for compilation">;
+
+} // let Visibility = [CC1Option, CC1AsOption]
+
+let Visibility = [ClangOption, CC1Option, CC1AsOption] in {
+
+def darwin_target_variant_triple : Separate<["-"], "darwin-target-variant-triple">,
+ HelpText<"Specify the darwin target variant triple">,
+ MarshallingInfoString<TargetOpts<"DarwinTargetVariantTriple">>,
+ Normalizer<"normalizeTriple">;
+
+} // let Visibility = [ClangOption, CC1Option, CC1AsOption]
//===----------------------------------------------------------------------===//
-// Target Options
+// Target Options (cc1 + cc1as + fc1)
//===----------------------------------------------------------------------===//
-let Flags = [CC1Option, CC1AsOption, NoDriverOption] in {
+let Visibility = [CC1Option, CC1AsOption, FC1Option] in {
def target_cpu : Separate<["-"], "target-cpu">,
HelpText<"Target a specific cpu type">,
MarshallingInfoString<TargetOpts<"CPU">>;
-def tune_cpu : Separate<["-"], "tune-cpu">,
- HelpText<"Tune for a specific cpu type">,
- MarshallingInfoString<TargetOpts<"TuneCPU">>;
def target_feature : Separate<["-"], "target-feature">,
HelpText<"Target specific attributes">,
MarshallingInfoStringVector<TargetOpts<"FeaturesAsWritten">>;
@@ -4599,13 +6553,14 @@ def triple : Separate<["-"], "triple">,
HelpText<"Specify target triple (e.g. i686-apple-darwin9)">,
MarshallingInfoString<TargetOpts<"Triple">, "llvm::Triple::normalize(llvm::sys::getDefaultTargetTriple())">,
AlwaysEmit, Normalizer<"normalizeTriple">;
-def target_abi : Separate<["-"], "target-abi">,
- HelpText<"Target a particular ABI type">,
- MarshallingInfoString<TargetOpts<"ABI">>;
-def target_sdk_version_EQ : Joined<["-"], "target-sdk-version=">,
- HelpText<"The version of target SDK used for compilation">;
-}
+} // let Visibility = [CC1Option, CC1AsOption, FC1Option]
+
+//===----------------------------------------------------------------------===//
+// Target Options (other)
+//===----------------------------------------------------------------------===//
+
+let Visibility = [CC1Option] in {
def target_linker_version : Separate<["-"], "target-linker-version">,
HelpText<"Target linker version">,
@@ -4617,24 +6572,24 @@ def mfpmath : Separate<["-"], "mfpmath">,
defm padding_on_unsigned_fixed_point : BoolOption<"f", "padding-on-unsigned-fixed-point",
LangOpts<"PaddingOnUnsignedFixedPoint">, DefaultFalse,
- PosFlag<SetTrue, [], "Force each unsigned fixed point type to have an extra bit of padding to align their scales with those of signed fixed point types">,
+ PosFlag<SetTrue, [], [ClangOption], "Force each unsigned fixed point type to have an extra bit of padding to align their scales with those of signed fixed point types">,
NegFlag<SetFalse>>,
ShouldParseIf<ffixed_point.KeyPath>;
+} // let Visibility = [CC1Option]
+
//===----------------------------------------------------------------------===//
// Analyzer Options
//===----------------------------------------------------------------------===//
+let Visibility = [CC1Option] in {
+
def analysis_UnoptimizedCFG : Flag<["-"], "unoptimized-cfg">,
HelpText<"Generate unoptimized CFGs for all analyses">,
MarshallingInfoFlag<AnalyzerOpts<"UnoptimizedCFG">>;
def analysis_CFGAddImplicitDtors : Flag<["-"], "cfg-add-implicit-dtors">,
HelpText<"Add C++ implicit destructors to CFGs for all analyses">;
-def analyzer_store : Separate<["-"], "analyzer-store">,
- HelpText<"Source Code Analysis - Abstract Memory Store Models">;
-def analyzer_store_EQ : Joined<["-"], "analyzer-store=">, Alias<analyzer_store>;
-
def analyzer_constraints : Separate<["-"], "analyzer-constraints">,
HelpText<"Source Code Analysis - Symbolic Constraint Engines">;
def analyzer_constraints_EQ : Joined<["-"], "analyzer-constraints=">,
@@ -4652,9 +6607,6 @@ def analyzer_purge_EQ : Joined<["-"], "analyzer-purge=">, Alias<analyzer_purge>;
def analyzer_opt_analyze_headers : Flag<["-"], "analyzer-opt-analyze-headers">,
HelpText<"Force the static analyzer to analyze functions defined in header files">,
MarshallingInfoFlag<AnalyzerOpts<"AnalyzeAll">>;
-def analyzer_opt_analyze_nested_blocks : Flag<["-"], "analyzer-opt-analyze-nested-blocks">,
- HelpText<"Analyze the definitions of blocks in addition to functions">,
- MarshallingInfoFlag<AnalyzerOpts<"AnalyzeNestedBlocks">>;
def analyzer_display_progress : Flag<["-"], "analyzer-display-progress">,
HelpText<"Emit verbose output about the analyzer's progress">,
MarshallingInfoFlag<AnalyzerOpts<"AnalyzerDisplayProgress">>;
@@ -4698,7 +6650,7 @@ def analyzer_stats : Flag<["-"], "analyzer-stats">,
def analyzer_checker : Separate<["-"], "analyzer-checker">,
HelpText<"Choose analyzer checkers to enable">,
ValuesCode<[{
- const char *Values =
+ static constexpr const char VALUES_CODE [] =
#define GET_CHECKERS
#define CHECKER(FULLNAME, CLASS, HT, DOC_URI, IS_HIDDEN) FULLNAME ","
#include "clang/StaticAnalyzer/Checkers/Checkers.inc"
@@ -4775,23 +6727,39 @@ def analyzer_werror : Flag<["-"], "analyzer-werror">,
HelpText<"Emit analyzer results as errors rather than warnings">,
MarshallingInfoFlag<AnalyzerOpts<"AnalyzerWerror">>;
+} // let Visibility = [CC1Option]
+
//===----------------------------------------------------------------------===//
// Migrator Options
//===----------------------------------------------------------------------===//
+
def migrator_no_nsalloc_error : Flag<["-"], "no-ns-alloc-error">,
HelpText<"Do not error on use of NSAllocateCollectable/NSReallocateCollectable">,
+ Visibility<[CC1Option]>,
MarshallingInfoFlag<MigratorOpts<"NoNSAllocReallocError">>;
def migrator_no_finalize_removal : Flag<["-"], "no-finalize-removal">,
HelpText<"Do not remove finalize method in gc mode">,
+ Visibility<[CC1Option]>,
MarshallingInfoFlag<MigratorOpts<"NoFinalizeRemoval">>;
//===----------------------------------------------------------------------===//
// CodeGen Options
//===----------------------------------------------------------------------===//
-let Flags = [CC1Option, CC1AsOption, NoDriverOption] in {
+let Visibility = [CC1Option, CC1AsOption, FC1Option] in {
+
+def mrelocation_model : Separate<["-"], "mrelocation-model">,
+ HelpText<"The relocation model to use">, Values<"static,pic,ropi,rwpi,ropi-rwpi,dynamic-no-pic">,
+ NormalizedValuesScope<"llvm::Reloc">,
+ NormalizedValues<["Static", "PIC_", "ROPI", "RWPI", "ROPI_RWPI", "DynamicNoPIC"]>,
+ MarshallingInfoEnum<CodeGenOpts<"RelocationModel">, "PIC_">;
def debug_info_kind_EQ : Joined<["-"], "debug-info-kind=">;
+
+} // let Visibility = [CC1Option, CC1AsOption, FC1Option]
+
+let Visibility = [CC1Option, CC1AsOption] in {
+
def debug_info_macro : Flag<["-"], "debug-info-macro">,
HelpText<"Emit macro debug information">,
MarshallingInfoFlag<CodeGenOpts<"MacroDebugInfo">>;
@@ -4811,8 +6779,8 @@ def record_command_line : Separate<["-"], "record-command-line">,
HelpText<"The string to embed in the .LLVM.command.line section.">,
MarshallingInfoString<CodeGenOpts<"RecordCommandLine">>;
def compress_debug_sections_EQ : Joined<["-", "--"], "compress-debug-sections=">,
- HelpText<"DWARF debug sections compression type">, Values<"none,zlib,zlib-gnu">,
- NormalizedValuesScope<"llvm::DebugCompressionType">, NormalizedValues<["None", "Z", "GNU"]>,
+ HelpText<"DWARF debug sections compression type">, Values<"none,zlib,zstd">,
+ NormalizedValuesScope<"llvm::DebugCompressionType">, NormalizedValues<["None", "Zlib", "Zstd"]>,
MarshallingInfoEnum<CodeGenOpts<"CompressDebugSections">, "None">;
def compress_debug_sections : Flag<["-", "--"], "compress-debug-sections">,
Alias<compress_debug_sections_EQ>, AliasArgs<["zlib"]>;
@@ -4825,19 +6793,17 @@ def massembler_no_warn : Flag<["-"], "massembler-no-warn">,
def massembler_fatal_warnings : Flag<["-"], "massembler-fatal-warnings">,
HelpText<"Make assembler warnings fatal">,
MarshallingInfoFlag<CodeGenOpts<"FatalWarnings">>;
-def mrelax_relocations : Flag<["--"], "mrelax-relocations">,
- HelpText<"Use relaxable elf relocations">,
- MarshallingInfoFlag<CodeGenOpts<"RelaxELFRelocations">>;
+def mrelax_relocations_no : Flag<["-"], "mrelax-relocations=no">,
+ HelpText<"Disable x86 relax relocations">,
+ MarshallingInfoNegativeFlag<CodeGenOpts<"RelaxELFRelocations">>;
def msave_temp_labels : Flag<["-"], "msave-temp-labels">,
HelpText<"Save temporary labels in the symbol table. "
"Note this may change .s semantics and shouldn't generally be used "
"on compiler-generated code.">,
MarshallingInfoFlag<CodeGenOpts<"SaveTempLabels">>;
-def mrelocation_model : Separate<["-"], "mrelocation-model">,
- HelpText<"The relocation model to use">, Values<"static,pic,ropi,rwpi,ropi-rwpi,dynamic-no-pic">,
- NormalizedValuesScope<"llvm::Reloc">,
- NormalizedValues<["Static", "PIC_", "ROPI", "RWPI", "ROPI_RWPI", "DynamicNoPIC"]>,
- MarshallingInfoEnum<CodeGenOpts<"RelocationModel">, "PIC_">;
+def mno_type_check : Flag<["-"], "mno-type-check">,
+ HelpText<"Don't perform type checking of the assembly code (wasm only)">,
+ MarshallingInfoFlag<CodeGenOpts<"NoTypeCheck">>;
def fno_math_builtin : Flag<["-"], "fno-math-builtin">,
HelpText<"Disable implicit builtin knowledge of math functions">,
MarshallingInfoFlag<LangOpts<"NoMathBuiltin">>;
@@ -4845,8 +6811,17 @@ def fno_use_ctor_homing: Flag<["-"], "fno-use-ctor-homing">,
HelpText<"Don't use constructor homing for debug info">;
def fuse_ctor_homing: Flag<["-"], "fuse-ctor-homing">,
HelpText<"Use constructor homing if we are using limited debug info already">;
-}
+def as_secure_log_file : Separate<["-"], "as-secure-log-file">,
+ HelpText<"Emit .secure_log_unique directives to this filename.">,
+ MarshallingInfoString<CodeGenOpts<"AsSecureLogFile">>;
+
+} // let Visibility = [CC1Option, CC1AsOption]
+
+let Visibility = [CC1Option] in {
+def llvm_verify_each : Flag<["-"], "llvm-verify-each">,
+ HelpText<"Run the LLVM verifier after every LLVM pass">,
+ MarshallingInfoFlag<CodeGenOpts<"VerifyEach">>;
def disable_llvm_verifier : Flag<["-"], "disable-llvm-verifier">,
HelpText<"Don't run the LLVM IR verifier pass">,
MarshallingInfoNegativeFlag<CodeGenOpts<"VerifyModule">>;
@@ -4882,7 +6857,7 @@ def fforbid_guard_variables : Flag<["-"], "fforbid-guard-variables">,
HelpText<"Emit an error if a C++ static local initializer would need a guard variable">,
MarshallingInfoFlag<CodeGenOpts<"ForbidGuardVariables">>;
def no_implicit_float : Flag<["-"], "no-implicit-float">,
- HelpText<"Don't generate implicit floating point instructions">,
+ HelpText<"Don't generate implicit floating point or vector instructions">,
MarshallingInfoFlag<CodeGenOpts<"NoImplicitFloat">>;
def fdump_vtable_layouts : Flag<["-"], "fdump-vtable-layouts">,
HelpText<"Dump the layouts of all vtables that will be emitted in a translation unit">,
@@ -4890,18 +6865,10 @@ def fdump_vtable_layouts : Flag<["-"], "fdump-vtable-layouts">,
def fmerge_functions : Flag<["-"], "fmerge-functions">,
HelpText<"Permit merging of identical functions when optimizing.">,
MarshallingInfoFlag<CodeGenOpts<"MergeFunctions">>;
-def coverage_data_file : Separate<["-"], "coverage-data-file">,
- HelpText<"Emit coverage data to this filename.">,
- MarshallingInfoString<CodeGenOpts<"CoverageDataFile">>,
- ShouldParseIf<!strconcat(fprofile_arcs.KeyPath, "||", ftest_coverage.KeyPath)>;
-def coverage_data_file_EQ : Joined<["-"], "coverage-data-file=">,
- Alias<coverage_data_file>;
-def coverage_notes_file : Separate<["-"], "coverage-notes-file">,
- HelpText<"Emit coverage notes to this filename.">,
- MarshallingInfoString<CodeGenOpts<"CoverageNotesFile">>,
- ShouldParseIf<!strconcat(fprofile_arcs.KeyPath, "||", ftest_coverage.KeyPath)>;
-def coverage_notes_file_EQ : Joined<["-"], "coverage-notes-file=">,
- Alias<coverage_notes_file>;
+def : Joined<["-"], "coverage-data-file=">,
+ MarshallingInfoString<CodeGenOpts<"CoverageDataFile">>;
+def : Joined<["-"], "coverage-notes-file=">,
+ MarshallingInfoString<CodeGenOpts<"CoverageNotesFile">>;
def coverage_version_EQ : Joined<["-"], "coverage-version=">,
HelpText<"Four-byte version string for gcov files.">;
def dump_coverage_mapping : Flag<["-"], "dump-coverage-mapping">,
@@ -4921,25 +6888,12 @@ def new_struct_path_tbaa : Flag<["-"], "new-struct-path-tbaa">,
def mdebug_pass : Separate<["-"], "mdebug-pass">,
HelpText<"Enable additional debug output">,
MarshallingInfoString<CodeGenOpts<"DebugPass">>;
-def mframe_pointer_EQ : Joined<["-"], "mframe-pointer=">,
- HelpText<"Specify which frame pointers to retain (all, non-leaf, none).">, Values<"all,non-leaf,none">,
- NormalizedValuesScope<"CodeGenOptions::FramePointerKind">, NormalizedValues<["All", "NonLeaf", "None"]>,
- MarshallingInfoEnum<CodeGenOpts<"FramePointer">, "None">;
-def mdisable_tail_calls : Flag<["-"], "mdisable-tail-calls">,
- HelpText<"Disable tail call optimization, keeping the call stack accurate">,
- MarshallingInfoFlag<CodeGenOpts<"DisableTailCalls">>;
-def menable_no_infinities : Flag<["-"], "menable-no-infs">,
- HelpText<"Allow optimization to assume there are no infinities.">,
- MarshallingInfoFlag<LangOpts<"NoHonorInfs">>, ImpliedByAnyOf<[ffinite_math_only.KeyPath]>;
-def menable_no_nans : Flag<["-"], "menable-no-nans">,
- HelpText<"Allow optimization to assume there are no NaNs.">,
- MarshallingInfoFlag<LangOpts<"NoHonorNaNs">>, ImpliedByAnyOf<[ffinite_math_only.KeyPath]>;
-def mreassociate : Flag<["-"], "mreassociate">,
- HelpText<"Allow reassociation transformations for floating-point instructions">,
- MarshallingInfoFlag<LangOpts<"AllowFPReassoc">>, ImpliedByAnyOf<[menable_unsafe_fp_math.KeyPath]>;
def mabi_EQ_ieeelongdouble : Flag<["-"], "mabi=ieeelongdouble">,
HelpText<"Use IEEE 754 quadruple-precision for long double">,
MarshallingInfoFlag<LangOpts<"PPCIEEELongDouble">>;
+def mabi_EQ_vec_extabi : Flag<["-"], "mabi=vec-extabi">,
+ HelpText<"Enable the extended Altivec ABI on AIX. Use volatile and nonvolatile vector registers">,
+ MarshallingInfoFlag<LangOpts<"EnableAIXExtendedAltivecABI">>;
def mfloat_abi : Separate<["-"], "mfloat-abi">,
HelpText<"The float ABI to use">,
MarshallingInfoString<CodeGenOpts<"FloatABI">>;
@@ -4954,28 +6908,26 @@ def mregparm : Separate<["-"], "mregparm">,
def msmall_data_limit : Separate<["-"], "msmall-data-limit">,
HelpText<"Put global and static data smaller than the limit into a special section">,
MarshallingInfoInt<CodeGenOpts<"SmallDataLimit">>;
-def munwind_tables : Flag<["-"], "munwind-tables">,
+def funwind_tables_EQ : Joined<["-"], "funwind-tables=">,
HelpText<"Generate unwinding tables for all functions">,
- MarshallingInfoFlag<CodeGenOpts<"UnwindTables">>;
-def mconstructor_aliases : Flag<["-"], "mconstructor-aliases">,
- HelpText<"Emit complete constructors and destructors as aliases when possible">,
- MarshallingInfoFlag<CodeGenOpts<"CXXCtorDtorAliases">>;
+ MarshallingInfoInt<CodeGenOpts<"UnwindTables">>;
+defm constructor_aliases : BoolOption<"m", "constructor-aliases",
+ CodeGenOpts<"CXXCtorDtorAliases">, DefaultFalse,
+ PosFlag<SetTrue, [], [ClangOption], "Enable">,
+ NegFlag<SetFalse, [], [ClangOption], "Disable">,
+ BothFlags<[], [ClangOption, CC1Option],
+ " emitting complete constructors and destructors as aliases when possible">>;
def mlink_bitcode_file : Separate<["-"], "mlink-bitcode-file">,
HelpText<"Link the given bitcode file before performing optimizations.">;
def mlink_builtin_bitcode : Separate<["-"], "mlink-builtin-bitcode">,
HelpText<"Link and internalize needed symbols from the given bitcode file "
"before performing optimizations.">;
-def mlink_cuda_bitcode : Separate<["-"], "mlink-cuda-bitcode">,
- Alias<mlink_builtin_bitcode>;
def vectorize_loops : Flag<["-"], "vectorize-loops">,
HelpText<"Run the Loop vectorization passes">,
MarshallingInfoFlag<CodeGenOpts<"VectorizeLoop">>;
def vectorize_slp : Flag<["-"], "vectorize-slp">,
HelpText<"Run the SLP vectorization passes">,
MarshallingInfoFlag<CodeGenOpts<"VectorizeSLP">>;
-def dependent_lib : Joined<["--"], "dependent-lib=">,
- HelpText<"Add dependent library">,
- MarshallingInfoStringVector<CodeGenOpts<"DependentLibraries">>;
def linker_option : Joined<["--"], "linker-option=">,
HelpText<"Add linker option">,
MarshallingInfoStringVector<CodeGenOpts<"LinkerOptions">>;
@@ -5018,6 +6970,10 @@ def fsanitize_coverage_pc_table
: Flag<["-"], "fsanitize-coverage-pc-table">,
HelpText<"Create a table of coverage-instrumented PCs">,
MarshallingInfoFlag<CodeGenOpts<"SanitizeCoveragePCTable">>;
+def fsanitize_coverage_control_flow
+ : Flag<["-"], "fsanitize-coverage-control-flow">,
+ HelpText<"Collect control flow of function">,
+ MarshallingInfoFlag<CodeGenOpts<"SanitizeCoverageControlFlow">>;
def fsanitize_coverage_trace_pc
: Flag<["-"], "fsanitize-coverage-trace-pc">,
HelpText<"Enable PC tracing in sanitizer coverage">,
@@ -5034,13 +6990,32 @@ def fsanitize_coverage_stack_depth
: Flag<["-"], "fsanitize-coverage-stack-depth">,
HelpText<"Enable max stack depth tracing">,
MarshallingInfoFlag<CodeGenOpts<"SanitizeCoverageStackDepth">>;
+def fsanitize_coverage_trace_loads
+ : Flag<["-"], "fsanitize-coverage-trace-loads">,
+ HelpText<"Enable tracing of loads">,
+ MarshallingInfoFlag<CodeGenOpts<"SanitizeCoverageTraceLoads">>;
+def fsanitize_coverage_trace_stores
+ : Flag<["-"], "fsanitize-coverage-trace-stores">,
+ HelpText<"Enable tracing of stores">,
+ MarshallingInfoFlag<CodeGenOpts<"SanitizeCoverageTraceStores">>;
+def fexperimental_sanitize_metadata_EQ_covered
+ : Flag<["-"], "fexperimental-sanitize-metadata=covered">,
+ HelpText<"Emit PCs for code covered with binary analysis sanitizers">,
+ MarshallingInfoFlag<CodeGenOpts<"SanitizeBinaryMetadataCovered">>;
+def fexperimental_sanitize_metadata_EQ_atomics
+ : Flag<["-"], "fexperimental-sanitize-metadata=atomics">,
+ HelpText<"Emit PCs for atomic operations used by binary analysis sanitizers">,
+ MarshallingInfoFlag<CodeGenOpts<"SanitizeBinaryMetadataAtomics">>;
+def fexperimental_sanitize_metadata_EQ_uar
+ : Flag<["-"], "fexperimental-sanitize-metadata=uar">,
+ HelpText<"Emit PCs for start of functions that are subject for use-after-return checking.">,
+ MarshallingInfoFlag<CodeGenOpts<"SanitizeBinaryMetadataUAR">>;
def fpatchable_function_entry_offset_EQ
: Joined<["-"], "fpatchable-function-entry-offset=">, MetaVarName<"<M>">,
HelpText<"Generate M NOPs before function entry">,
MarshallingInfoInt<CodeGenOpts<"PatchableFunctionEntryOffset">>;
def fprofile_instrument_EQ : Joined<["-"], "fprofile-instrument=">,
- HelpText<"Enable PGO instrumentation. The accepted value is clang, llvm, "
- "or none">, Values<"none,clang,llvm,csllvm">,
+ HelpText<"Enable PGO instrumentation">, Values<"none,clang,llvm,csllvm">,
NormalizedValuesScope<"CodeGenOptions">,
NormalizedValues<["ProfileNone", "ProfileClangInstr", "ProfileIRInstr", "ProfileCSIRInstr"]>,
MarshallingInfoEnum<CodeGenOpts<"ProfileInstr">, "ProfileNone">;
@@ -5058,16 +7033,9 @@ def flto_visibility_public_std:
MarshallingInfoFlag<CodeGenOpts<"LTOVisibilityPublicStd">>;
defm lto_unit : BoolOption<"f", "lto-unit",
CodeGenOpts<"LTOUnit">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Emit IR to support LTO unit features (CFI, whole program vtable opt)">,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option],
+ "Emit IR to support LTO unit features (CFI, whole program vtable opt)">,
NegFlag<SetFalse>>;
-defm debug_pass_manager : BoolOption<"f", "debug-pass-manager",
- CodeGenOpts<"DebugPassManager">, DefaultFalse,
- PosFlag<SetTrue, [], "Prints debug information for the new pass manager">,
- NegFlag<SetFalse, [], "Disables debug printing for the new pass manager">>;
-def fexperimental_debug_variable_locations : Flag<["-"],
- "fexperimental-debug-variable-locations">,
- HelpText<"Use experimental new value-tracking variable locations">,
- MarshallingInfoFlag<CodeGenOpts<"ValueTrackingVariableLocations">>;
def fverify_debuginfo_preserve
: Flag<["-"], "fverify-debuginfo-preserve">,
HelpText<"Enable Debug Info Metadata preservation testing in "
@@ -5090,6 +7058,10 @@ def msign_return_address_key_EQ : Joined<["-"], "msign-return-address-key=">,
Values<"a_key,b_key">;
def mbranch_target_enforce : Flag<["-"], "mbranch-target-enforce">,
MarshallingInfoFlag<LangOpts<"BranchTargetEnforcement">>;
+def mbranch_protection_pauth_lr : Flag<["-"], "mbranch-protection-pauth-lr">,
+ MarshallingInfoFlag<LangOpts<"BranchProtectionPAuthLR">>;
+def mguarded_control_stack : Flag<["-"], "mguarded-control-stack">,
+ MarshallingInfoFlag<LangOpts<"GuardedControlStack">>;
def fno_dllexport_inlines : Flag<["-"], "fno-dllexport-inlines">,
MarshallingInfoNegativeFlag<LangOpts<"DllExportInlines">>;
def cfguard_no_checks : Flag<["-"], "cfguard-no-checks">,
@@ -5105,10 +7077,28 @@ def ehcontguard : Flag<["-"], "ehcontguard">,
def fdenormal_fp_math_f32_EQ : Joined<["-"], "fdenormal-fp-math-f32=">,
Group<f_Group>;
+def fctor_dtor_return_this : Flag<["-"], "fctor-dtor-return-this">,
+ HelpText<"Change the C++ ABI to returning `this` pointer from constructors "
+ "and non-deleting destructors. (No effect on Microsoft ABI)">,
+ MarshallingInfoFlag<CodeGenOpts<"CtorDtorReturnThis">>;
+
+def fexperimental_assignment_tracking_EQ : Joined<["-"], "fexperimental-assignment-tracking=">,
+ Group<f_Group>, CodeGenOpts<"EnableAssignmentTracking">,
+ NormalizedValuesScope<"CodeGenOptions::AssignmentTrackingOpts">,
+ Values<"disabled,enabled,forced">, NormalizedValues<["Disabled","Enabled","Forced"]>,
+ MarshallingInfoEnum<CodeGenOpts<"AssignmentTrackingMode">, "Enabled">;
+
+def enable_tlsdesc : Flag<["-"], "enable-tlsdesc">,
+ MarshallingInfoFlag<CodeGenOpts<"EnableTLSDESC">>;
+
+} // let Visibility = [CC1Option]
+
//===----------------------------------------------------------------------===//
// Dependency Output Options
//===----------------------------------------------------------------------===//
+let Visibility = [CC1Option] in {
+
def sys_header_deps : Flag<["-"], "sys-header-deps">,
HelpText<"Include system headers in dependency output">,
MarshallingInfoFlag<DependencyOutputOpts<"IncludeSystemHeaders">>;
@@ -5118,13 +7108,25 @@ def module_file_deps : Flag<["-"], "module-file-deps">,
def header_include_file : Separate<["-"], "header-include-file">,
HelpText<"Filename (or -) to write header include output to">,
MarshallingInfoString<DependencyOutputOpts<"HeaderIncludeOutputFile">>;
+def header_include_format_EQ : Joined<["-"], "header-include-format=">,
+ HelpText<"set format in which header info is emitted">,
+ Values<"textual,json">, NormalizedValues<["HIFMT_Textual", "HIFMT_JSON"]>,
+ MarshallingInfoEnum<DependencyOutputOpts<"HeaderIncludeFormat">, "HIFMT_Textual">;
+def header_include_filtering_EQ : Joined<["-"], "header-include-filtering=">,
+ HelpText<"set the flag that enables filtering header information">,
+ Values<"none,only-direct-system">, NormalizedValues<["HIFIL_None", "HIFIL_Only_Direct_System"]>,
+ MarshallingInfoEnum<DependencyOutputOpts<"HeaderIncludeFiltering">, "HIFIL_None">;
def show_includes : Flag<["--"], "show-includes">,
HelpText<"Print cl.exe style /showIncludes to stdout">;
+} // let Visibility = [CC1Option]
+
//===----------------------------------------------------------------------===//
// Diagnostic Options
//===----------------------------------------------------------------------===//
+let Visibility = [CC1Option] in {
+
def diagnostic_log_file : Separate<["-"], "diagnostic-log-file">,
HelpText<"Filename (or -) to log diagnostics to">,
MarshallingInfoString<DiagnosticOpts<"DiagnosticLogFile">>;
@@ -5133,11 +7135,13 @@ def diagnostic_serialized_file : Separate<["-"], "serialize-diagnostic-file">,
HelpText<"File for serializing diagnostics in a binary format">;
def fdiagnostics_format : Separate<["-"], "fdiagnostics-format">,
- HelpText<"Change diagnostic formatting to match IDE and command line tools">, Values<"clang,msvc,vi">,
- NormalizedValuesScope<"DiagnosticOptions">, NormalizedValues<["Clang", "MSVC", "Vi"]>,
+ HelpText<"Change diagnostic formatting to match IDE and command line tools">,
+ Values<"clang,msvc,vi,sarif,SARIF">,
+ NormalizedValuesScope<"DiagnosticOptions">, NormalizedValues<["Clang", "MSVC", "Vi", "SARIF", "SARIF"]>,
MarshallingInfoEnum<DiagnosticOpts<"Format">, "Clang">;
def fdiagnostics_show_category : Separate<["-"], "fdiagnostics-show-category">,
- HelpText<"Print diagnostic category">, Values<"none,id,name">,
+ HelpText<"Print diagnostic category">,
+ Values<"none,id,name">,
NormalizedValues<["0", "1", "2"]>,
MarshallingInfoEnum<DiagnosticOpts<"ShowCategories">, "0">;
def fno_diagnostics_use_presumed_location : Flag<["-"], "fno-diagnostics-use-presumed-location">,
@@ -5149,22 +7153,6 @@ def ftabstop : Separate<["-"], "ftabstop">, MetaVarName<"<N>">,
def ferror_limit : Separate<["-"], "ferror-limit">, MetaVarName<"<N>">,
HelpText<"Set the maximum number of errors to emit before stopping (0 = no limit).">,
MarshallingInfoInt<DiagnosticOpts<"ErrorLimit">>;
-def fmacro_backtrace_limit : Separate<["-"], "fmacro-backtrace-limit">, MetaVarName<"<N>">,
- HelpText<"Set the maximum number of entries to print in a macro expansion backtrace (0 = no limit).">,
- MarshallingInfoInt<DiagnosticOpts<"MacroBacktraceLimit">, "DiagnosticOptions::DefaultMacroBacktraceLimit">;
-def ftemplate_backtrace_limit : Separate<["-"], "ftemplate-backtrace-limit">, MetaVarName<"<N>">,
- HelpText<"Set the maximum number of entries to print in a template instantiation backtrace (0 = no limit).">,
- MarshallingInfoInt<DiagnosticOpts<"TemplateBacktraceLimit">, "DiagnosticOptions::DefaultTemplateBacktraceLimit">;
-def fconstexpr_backtrace_limit : Separate<["-"], "fconstexpr-backtrace-limit">, MetaVarName<"<N>">,
- HelpText<"Set the maximum number of entries to print in a constexpr evaluation backtrace (0 = no limit).">,
- MarshallingInfoInt<DiagnosticOpts<"ConstexprBacktraceLimit">, "DiagnosticOptions::DefaultConstexprBacktraceLimit">;
-def fspell_checking_limit : Separate<["-"], "fspell-checking-limit">, MetaVarName<"<N>">,
- HelpText<"Set the maximum number of times to perform spell checking on unrecognized identifiers (0 = no limit).">,
- MarshallingInfoInt<DiagnosticOpts<"SpellCheckingLimit">, "DiagnosticOptions::DefaultSpellCheckingLimit">;
-def fcaret_diagnostics_max_lines :
- Separate<["-"], "fcaret-diagnostics-max-lines">, MetaVarName<"<N>">,
- HelpText<"Set the maximum number of source lines to show in a caret diagnostic">,
- MarshallingInfoInt<DiagnosticOpts<"SnippetLineLimit">, "DiagnosticOptions::DefaultSnippetLineLimit">;
def verify_EQ : CommaJoined<["-"], "verify=">,
MetaVarName<"<prefixes>">,
HelpText<"Verify diagnostic output using comment directives that start with"
@@ -5179,10 +7167,14 @@ def Wno_rewrite_macros : Flag<["-"], "Wno-rewrite-macros">,
HelpText<"Silence ObjC rewriting warnings">,
MarshallingInfoFlag<DiagnosticOpts<"NoRewriteMacros">>;
+} // let Visibility = [CC1Option]
+
//===----------------------------------------------------------------------===//
// Frontend Options
//===----------------------------------------------------------------------===//
+let Visibility = [CC1Option] in {
+
// This isn't normally used, it is just here so we can parse a
// CompilerInvocation out of a driver-derived argument vector.
def cc1 : Flag<["-"], "cc1">;
@@ -5228,16 +7220,23 @@ def code_completion_with_fixits : Flag<["-"], "code-completion-with-fixits">,
def disable_free : Flag<["-"], "disable-free">,
HelpText<"Disable freeing of memory on exit">,
MarshallingInfoFlag<FrontendOpts<"DisableFree">>;
-def enable_noundef_analysis : Flag<["-"], "enable-noundef-analysis">, Group<f_Group>,
- HelpText<"Enable analyzing function argument and return types for mandatory definedness">,
- MarshallingInfoFlag<CodeGenOpts<"EnableNoundefAttrs">>;
+defm clear_ast_before_backend : BoolOption<"",
+ "clear-ast-before-backend",
+ CodeGenOpts<"ClearASTBeforeBackend">,
+ DefaultFalse,
+ PosFlag<SetTrue, [], [ClangOption], "Clear">,
+ NegFlag<SetFalse, [], [ClangOption], "Don't clear">,
+ BothFlags<[], [ClangOption], " the Clang AST before running backend code generation">>;
+defm enable_noundef_analysis : BoolOption<"",
+ "enable-noundef-analysis",
+ CodeGenOpts<"EnableNoundefAttrs">,
+ DefaultTrue,
+ PosFlag<SetTrue, [], [ClangOption], "Enable">,
+ NegFlag<SetFalse, [], [ClangOption], "Disable">,
+ BothFlags<[], [ClangOption], " analyzing function argument and return types for mandatory definedness">>;
def discard_value_names : Flag<["-"], "discard-value-names">,
HelpText<"Discard value names in LLVM IR">,
MarshallingInfoFlag<CodeGenOpts<"DiscardValueNames">>;
-def load : Separate<["-"], "load">, MetaVarName<"<dsopath>">,
- HelpText<"Load the named plugin (dynamic shared object)">;
-def plugin : Separate<["-"], "plugin">, MetaVarName<"<name>">,
- HelpText<"Use the named plugin action instead of the default action (use \"help\" to list available options)">;
def plugin_arg : JoinedAndSeparate<["-"], "plugin-arg-">,
MetaVarName<"<name> <arg>">,
HelpText<"Pass <arg> to plugin <name>">;
@@ -5250,6 +7249,8 @@ def ast_dump_filter : Separate<["-"], "ast-dump-filter">,
" nodes having a certain substring in a qualified name. Use"
" -ast-list to list all filterable declaration node names.">,
MarshallingInfoString<FrontendOpts<"ASTDumpFilter">>;
+def ast_dump_filter_EQ : Joined<["-"], "ast-dump-filter=">,
+ Alias<ast_dump_filter>;
def fno_modules_global_index : Flag<["-"], "fno-modules-global-index">,
HelpText<"Do not automatically generate or update the global module index">,
MarshallingInfoNegativeFlag<FrontendOpts<"UseGlobalModuleIndex">>;
@@ -5260,6 +7261,10 @@ def fmodule_map_file_home_is_cwd : Flag<["-"], "fmodule-map-file-home-is-cwd">,
HelpText<"Use the current working directory as the home directory of "
"module maps specified by -fmodule-map-file=<FILE>">,
MarshallingInfoFlag<HeaderSearchOpts<"ModuleMapFileHomeIsCwd">>;
+def fmodule_file_home_is_cwd : Flag<["-"], "fmodule-file-home-is-cwd">,
+ HelpText<"Use the current working directory as the base directory of "
+ "compiled module files.">,
+ MarshallingInfoFlag<HeaderSearchOpts<"ModuleFileHomeIsCwd">>;
def fmodule_feature : Separate<["-"], "fmodule-feature">,
MetaVarName<"<feature>">,
HelpText<"Enable <feature> in module map requires declarations">,
@@ -5273,14 +7278,20 @@ def fmodules_embed_all_files : Joined<["-"], "fmodules-embed-all-files">,
HelpText<"Embed the contents of all files read by this compilation into "
"the produced module file.">,
MarshallingInfoFlag<FrontendOpts<"ModulesEmbedAllFiles">>;
-// FIXME: We only need this in C++ modules / Modules TS if we might textually
+defm fimplicit_modules_use_lock : BoolOption<"f", "implicit-modules-use-lock",
+ FrontendOpts<"BuildingImplicitModuleUsesLock">, DefaultTrue,
+ NegFlag<SetFalse>,
+ PosFlag<SetTrue, [], [ClangOption],
+ "Use filesystem locks for implicit modules builds to avoid "
+ "duplicating work in competing clang invocations.">>;
+// FIXME: We only need this in C++ modules if we might textually
// enter a different module (eg, when building a header unit).
def fmodules_local_submodule_visibility :
Flag<["-"], "fmodules-local-submodule-visibility">,
HelpText<"Enforce name visibility rules across submodules of the same "
"top-level module.">,
MarshallingInfoFlag<LangOpts<"ModulesLocalVisibility">>,
- ImpliedByAnyOf<[fmodules_ts.KeyPath, cpp_modules.KeyPath]>;
+ ImpliedByAnyOf<[fcxx_modules.KeyPath]>;
def fmodules_codegen :
Flag<["-"], "fmodules-codegen">,
HelpText<"Generate code for uses of this module that assumes an explicit "
@@ -5299,20 +7310,16 @@ def ftest_module_file_extension_EQ :
Joined<["-"], "ftest-module-file-extension=">,
HelpText<"introduce a module file extension for testing purposes. "
"The argument is parsed as blockname:major:minor:hashed:user info">;
-def fconcepts_ts : Flag<["-"], "fconcepts-ts">,
- HelpText<"Enable C++ Extensions for Concepts. (deprecated - use -std=c++2a)">;
-def fno_concept_satisfaction_caching : Flag<["-"],
- "fno-concept-satisfaction-caching">,
- HelpText<"Disable satisfaction caching for C++2a Concepts.">,
- MarshallingInfoNegativeFlag<LangOpts<"ConceptSatisfactionCaching">>;
defm recovery_ast : BoolOption<"f", "recovery-ast",
LangOpts<"RecoveryAST">, DefaultTrue,
- NegFlag<SetFalse>, PosFlag<SetTrue, [], "Preserve expressions in AST rather "
+ NegFlag<SetFalse>,
+ PosFlag<SetTrue, [], [ClangOption], "Preserve expressions in AST rather "
"than dropping them when encountering semantic errors">>;
defm recovery_ast_type : BoolOption<"f", "recovery-ast-type",
LangOpts<"RecoveryASTType">, DefaultTrue,
- NegFlag<SetFalse>, PosFlag<SetTrue, [], "Preserve the type for recovery "
+ NegFlag<SetFalse>,
+ PosFlag<SetTrue, [], [ClangOption], "Preserve the type for recovery "
"expressions when possible">>;
let Group = Action_Group in {
@@ -5362,12 +7369,10 @@ def emit_module : Flag<["-"], "emit-module">,
HelpText<"Generate pre-compiled module file from a module map">;
def emit_module_interface : Flag<["-"], "emit-module-interface">,
HelpText<"Generate pre-compiled module file from a C++ module interface">;
-def emit_header_module : Flag<["-"], "emit-header-module">,
- HelpText<"Generate pre-compiled module file from a set of header files">;
+def emit_header_unit : Flag<["-"], "emit-header-unit">,
+ HelpText<"Generate C++20 header units from header files">;
def emit_pch : Flag<["-"], "emit-pch">,
HelpText<"Generate pre-compiled header file">;
-def emit_llvm_bc : Flag<["-"], "emit-llvm-bc">,
- HelpText<"Build ASTs then convert to LLVM, emit .bc file">;
def emit_llvm_only : Flag<["-"], "emit-llvm-only">,
HelpText<"Build ASTs and convert to LLVM, discarding output">;
def emit_codegen_only : Flag<["-"], "emit-codegen-only">,
@@ -5387,34 +7392,30 @@ def print_dependency_directives_minimized_source : Flag<["-"],
defm emit_llvm_uselists : BoolOption<"", "emit-llvm-uselists",
CodeGenOpts<"EmitLLVMUseLists">, DefaultFalse,
- PosFlag<SetTrue, [], "Preserve">,
- NegFlag<SetFalse, [], "Don't preserve">,
- BothFlags<[], " order of LLVM use-lists when serializing">>;
+ PosFlag<SetTrue, [], [ClangOption], "Preserve">,
+ NegFlag<SetFalse, [], [ClangOption], "Don't preserve">,
+ BothFlags<[], [ClangOption], " order of LLVM use-lists when serializing">>;
def mt_migrate_directory : Separate<["-"], "mt-migrate-directory">,
HelpText<"Directory for temporary files produced during ARC or ObjC migration">,
MarshallingInfoString<FrontendOpts<"MTMigrateDir">>;
-def arcmt_action_EQ : Joined<["-"], "arcmt-action=">, Flags<[CC1Option, NoDriverOption]>,
- HelpText<"The ARC migration action to take">, Values<"check,modify,migrate">,
+def arcmt_action_EQ : Joined<["-"], "arcmt-action=">, Visibility<[CC1Option]>,
+ HelpText<"The ARC migration action to take">,
+ Values<"check,modify,migrate">,
NormalizedValuesScope<"FrontendOptions">,
NormalizedValues<["ARCMT_Check", "ARCMT_Modify", "ARCMT_Migrate"]>,
MarshallingInfoEnum<FrontendOpts<"ARCMTAction">, "ARCMT_None">;
-def opt_record_file : Separate<["-"], "opt-record-file">,
- HelpText<"File name to use for YAML optimization record output">,
- MarshallingInfoString<CodeGenOpts<"OptRecordFile">>;
-def opt_record_passes : Separate<["-"], "opt-record-passes">,
- HelpText<"Only record remark information for passes whose names match the given regular expression">;
-def opt_record_format : Separate<["-"], "opt-record-format">,
- HelpText<"The format used for serializing remarks (default: YAML)">;
-
def print_stats : Flag<["-"], "print-stats">,
HelpText<"Print performance metrics and statistics">,
MarshallingInfoFlag<FrontendOpts<"ShowStats">>;
def stats_file : Joined<["-"], "stats-file=">,
HelpText<"Filename to write statistics to">,
MarshallingInfoString<FrontendOpts<"StatsFile">>;
+def stats_file_append : Flag<["-"], "stats-file-append">,
+ HelpText<"If stats should be appended to stats-file instead of overwriting it">,
+ MarshallingInfoFlag<FrontendOpts<"AppendStats">>;
def fdump_record_layouts_simple : Flag<["-"], "fdump-record-layouts-simple">,
HelpText<"Dump record layout information in a simple form used for testing">,
MarshallingInfoFlag<LangOpts<"DumpRecordLayoutsSimple">>;
@@ -5465,23 +7466,58 @@ def aligned_alloc_unavailable : Flag<["-"], "faligned-alloc-unavailable">,
MarshallingInfoFlag<LangOpts<"AlignedAllocationUnavailable">>,
ShouldParseIf<faligned_allocation.KeyPath>;
+} // let Visibility = [CC1Option]
+
//===----------------------------------------------------------------------===//
// Language Options
//===----------------------------------------------------------------------===//
-let Flags = [CC1Option, CC1AsOption, NoDriverOption] in {
-
def version : Flag<["-"], "version">,
HelpText<"Print the compiler version">,
+ Visibility<[CC1Option, CC1AsOption, FC1Option]>,
MarshallingInfoFlag<FrontendOpts<"ShowVersion">>;
+
def main_file_name : Separate<["-"], "main-file-name">,
HelpText<"Main file name to use for debug info and source if missing">,
+ Visibility<[CC1Option, CC1AsOption]>,
MarshallingInfoString<CodeGenOpts<"MainFileName">>;
def split_dwarf_output : Separate<["-"], "split-dwarf-output">,
HelpText<"File name to use for split dwarf debug info output">,
+ Visibility<[CC1Option, CC1AsOption]>,
MarshallingInfoString<CodeGenOpts<"SplitDwarfOutput">>;
-}
+let Visibility = [CC1Option, FC1Option] in {
+
+def mreassociate : Flag<["-"], "mreassociate">,
+ HelpText<"Allow reassociation transformations for floating-point instructions">,
+ MarshallingInfoFlag<LangOpts<"AllowFPReassoc">>, ImpliedByAnyOf<[funsafe_math_optimizations.KeyPath]>;
+def menable_no_nans : Flag<["-"], "menable-no-nans">,
+ HelpText<"Allow optimization to assume there are no NaNs.">,
+ MarshallingInfoFlag<LangOpts<"NoHonorNaNs">>, ImpliedByAnyOf<[ffinite_math_only.KeyPath]>;
+def menable_no_infinities : Flag<["-"], "menable-no-infs">,
+ HelpText<"Allow optimization to assume there are no infinities.">,
+ MarshallingInfoFlag<LangOpts<"NoHonorInfs">>, ImpliedByAnyOf<[ffinite_math_only.KeyPath]>;
+
+def pic_level : Separate<["-"], "pic-level">,
+ HelpText<"Value for __PIC__">,
+ MarshallingInfoInt<LangOpts<"PICLevel">>;
+def pic_is_pie : Flag<["-"], "pic-is-pie">,
+ HelpText<"File is for a position independent executable">,
+ MarshallingInfoFlag<LangOpts<"PIE">>;
+
+def mframe_pointer_EQ : Joined<["-"], "mframe-pointer=">,
+ HelpText<"Specify which frame pointers to retain.">, Values<"all,non-leaf,none">,
+ NormalizedValuesScope<"CodeGenOptions::FramePointerKind">, NormalizedValues<["All", "NonLeaf", "None"]>,
+ MarshallingInfoEnum<CodeGenOpts<"FramePointer">, "None">;
+
+
+def dependent_lib : Joined<["--"], "dependent-lib=">,
+ HelpText<"Add dependent library">,
+ MarshallingInfoStringVector<CodeGenOpts<"DependentLibraries">>;
+
+} // let Visibility = [CC1Option, FC1Option]
+
+let Visibility = [CC1Option] in {
def fblocks_runtime_optional : Flag<["-"], "fblocks-runtime-optional">,
HelpText<"Weakly link in the blocks runtime">,
@@ -5501,13 +7537,15 @@ def fconstant_string_class : Separate<["-"], "fconstant-string-class">,
HelpText<"Specify the class to use for constant Objective-C string objects.">,
MarshallingInfoString<LangOpts<"ObjCConstantStringClass">>;
def fobjc_arc_cxxlib_EQ : Joined<["-"], "fobjc-arc-cxxlib=">,
- HelpText<"Objective-C++ Automatic Reference Counting standard library kind">, Values<"libc++,libstdc++,none">,
+ HelpText<"Objective-C++ Automatic Reference Counting standard library kind">,
+ Values<"libc++,libstdc++,none">,
NormalizedValues<["ARCXX_libcxx", "ARCXX_libstdcxx", "ARCXX_nolib"]>,
MarshallingInfoEnum<PreprocessorOpts<"ObjCXXARCStandardLibrary">, "ARCXX_nolib">;
def fobjc_runtime_has_weak : Flag<["-"], "fobjc-runtime-has-weak">,
HelpText<"The target Objective-C runtime supports ARC weak operations">;
def fobjc_dispatch_method_EQ : Joined<["-"], "fobjc-dispatch-method=">,
- HelpText<"Objective-C dispatch method to use">, Values<"legacy,non-legacy,mixed">,
+ HelpText<"Objective-C dispatch method to use">,
+ Values<"legacy,non-legacy,mixed">,
NormalizedValuesScope<"CodeGenOptions">, NormalizedValues<["Legacy", "NonLegacy", "Mixed"]>,
MarshallingInfoEnum<CodeGenOpts<"ObjCDispatchMethod">, "Legacy">;
def disable_objc_default_synthesize_properties : Flag<["-"], "disable-objc-default-synthesize-properties">,
@@ -5519,12 +7557,6 @@ def fencode_extended_block_signature : Flag<["-"], "fencode-extended-block-signa
def function_alignment : Separate<["-"], "function-alignment">,
HelpText<"default alignment for functions">,
MarshallingInfoInt<LangOpts<"FunctionAlignment">>;
-def pic_level : Separate<["-"], "pic-level">,
- HelpText<"Value for __PIC__">,
- MarshallingInfoInt<LangOpts<"PICLevel">>;
-def pic_is_pie : Flag<["-"], "pic-is-pie">,
- HelpText<"File is for a position independent executable">,
- MarshallingInfoFlag<LangOpts<"PIE">>;
def fhalf_no_semantic_interposition : Flag<["-"], "fhalf-no-semantic-interposition">,
HelpText<"Like -fno-semantic-interposition but don't use local aliases">,
MarshallingInfoFlag<LangOpts<"HalfNoSemanticInterposition">>;
@@ -5543,6 +7575,9 @@ def fallow_pch_with_different_modules_cache_path :
Flag<["-"], "fallow-pch-with-different-modules-cache-path">,
HelpText<"Accept a PCH file that was created with a different modules cache path">,
MarshallingInfoFlag<PreprocessorOpts<"AllowPCHWithDifferentModulesCachePath">>;
+def fno_modules_share_filemanager : Flag<["-"], "fno-modules-share-filemanager">,
+ HelpText<"Disable sharing the FileManager when building a module implicitly">,
+ MarshallingInfoNegativeFlag<FrontendOpts<"ModulesShareFileManager">>;
def dump_deserialized_pch_decls : Flag<["-"], "dump-deserialized-decls">,
HelpText<"Dump declarations that are deserialized from PCH, for testing">,
MarshallingInfoFlag<PreprocessorOpts<"DumpDeserializedPCHDecls">>;
@@ -5554,52 +7589,38 @@ def static_define : Flag<["-"], "static-define">,
HelpText<"Should __STATIC__ be defined">,
MarshallingInfoFlag<LangOpts<"Static">>;
def stack_protector : Separate<["-"], "stack-protector">,
- HelpText<"Enable stack protectors">, Values<"0,1,2,3">,
+ HelpText<"Enable stack protectors">,
+ Values<"0,1,2,3">,
NormalizedValuesScope<"LangOptions">,
NormalizedValues<["SSPOff", "SSPOn", "SSPStrong", "SSPReq"]>,
MarshallingInfoEnum<LangOpts<"StackProtector">, "SSPOff">;
def stack_protector_buffer_size : Separate<["-"], "stack-protector-buffer-size">,
HelpText<"Lower bound for a buffer to be considered for stack protection">,
MarshallingInfoInt<CodeGenOpts<"SSPBufferSize">, "8">;
-def fvisibility : Separate<["-"], "fvisibility">,
- HelpText<"Default type and symbol visibility">,
- MarshallingInfoVisibility<LangOpts<"ValueVisibilityMode">, "DefaultVisibility">,
- // Always emitting because of the relation to `-mignore-xcoff-visibility`.
- AlwaysEmit;
-def ftype_visibility : Separate<["-"], "ftype-visibility">,
+def ftype_visibility : Joined<["-"], "ftype-visibility=">,
HelpText<"Default type visibility">,
- MarshallingInfoVisibility<LangOpts<"TypeVisibilityMode">, fvisibility.KeyPath>;
+ MarshallingInfoVisibility<LangOpts<"TypeVisibilityMode">, fvisibility_EQ.KeyPath>;
def fapply_global_visibility_to_externs : Flag<["-"], "fapply-global-visibility-to-externs">,
HelpText<"Apply global symbol visibility to external declarations without an explicit visibility">,
MarshallingInfoFlag<LangOpts<"SetVisibilityForExternDecls">>;
-def ftemplate_depth : Separate<["-"], "ftemplate-depth">,
- HelpText<"Maximum depth of recursive template instantiation">,
- MarshallingInfoInt<LangOpts<"InstantiationDepth">, "1024">;
-def foperator_arrow_depth : Separate<["-"], "foperator-arrow-depth">,
- HelpText<"Maximum number of 'operator->'s to call for a member access">,
- MarshallingInfoInt<LangOpts<"ArrowDepth">, "256">;
-def fconstexpr_depth : Separate<["-"], "fconstexpr-depth">,
- HelpText<"Maximum depth of recursive constexpr function calls">,
- MarshallingInfoInt<LangOpts<"ConstexprCallDepth">, "512">;
-def fconstexpr_steps : Separate<["-"], "fconstexpr-steps">,
- HelpText<"Maximum number of steps in constexpr function evaluation">,
- MarshallingInfoInt<LangOpts<"ConstexprStepLimit">, "1048576">;
def fbracket_depth : Separate<["-"], "fbracket-depth">,
HelpText<"Maximum nesting level for parentheses, brackets, and braces">,
MarshallingInfoInt<LangOpts<"BracketDepth">, "256">;
defm const_strings : BoolOption<"f", "const-strings",
LangOpts<"ConstStrings">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Use">, NegFlag<SetFalse, [], "Don't use">,
- BothFlags<[], " a const qualified type for string literals in C and ObjC">>;
+ PosFlag<SetTrue, [], [ClangOption, CC1Option], "Use">,
+ NegFlag<SetFalse, [], [ClangOption], "Don't use">,
+ BothFlags<[], [ClangOption], " a const qualified type for string literals in C and ObjC">>;
def fno_bitfield_type_align : Flag<["-"], "fno-bitfield-type-align">,
HelpText<"Ignore bit-field types when aligning structures">,
MarshallingInfoFlag<LangOpts<"NoBitFieldTypeAlign">>;
def ffake_address_space_map : Flag<["-"], "ffake-address-space-map">,
HelpText<"Use a fake address space map; OpenCL testing purposes only">,
MarshallingInfoFlag<LangOpts<"FakeAddressSpaceMap">>;
-def faddress_space_map_mangling_EQ : Joined<["-"], "faddress-space-map-mangling=">, MetaVarName<"<yes|no|target>">,
+def faddress_space_map_mangling_EQ : Joined<["-"], "faddress-space-map-mangling=">,
HelpText<"Set the mode for address space map based mangling; OpenCL testing purposes only">,
- Values<"target,no,yes">, NormalizedValuesScope<"LangOptions">,
+ Values<"target,no,yes">,
+ NormalizedValuesScope<"LangOptions">,
NormalizedValues<["ASMM_Target", "ASMM_Off", "ASMM_On"]>,
MarshallingInfoEnum<LangOpts<"AddressSpaceMapMangling">, "ASMM_Target">;
def funknown_anytype : Flag<["-"], "funknown-anytype">,
@@ -5616,8 +7637,9 @@ def fdebugger_objc_literal : Flag<["-"], "fdebugger-objc-literal">,
MarshallingInfoFlag<LangOpts<"DebuggerObjCLiteral">>;
defm deprecated_macro : BoolOption<"f", "deprecated-macro",
LangOpts<"Deprecated">, DefaultFalse,
- PosFlag<SetTrue, [], "Defines">, NegFlag<SetFalse, [], "Undefines">,
- BothFlags<[], " the __DEPRECATED macro">>;
+ PosFlag<SetTrue, [], [ClangOption], "Defines">,
+ NegFlag<SetFalse, [], [ClangOption], "Undefines">,
+ BothFlags<[], [ClangOption], " the __DEPRECATED macro">>;
def fobjc_subscripting_legacy_runtime : Flag<["-"], "fobjc-subscripting-legacy-runtime">,
HelpText<"Allow Objective-C array and dictionary subscripting in legacy runtime">;
// TODO: Enforce values valid for MSVtorDispMode.
@@ -5631,34 +7653,34 @@ def fnative_half_type: Flag<["-"], "fnative-half-type">,
def fnative_half_arguments_and_returns : Flag<["-"], "fnative-half-arguments-and-returns">,
HelpText<"Use the native __fp16 type for arguments and returns (and skip ABI-specific lowering)">,
MarshallingInfoFlag<LangOpts<"NativeHalfArgsAndReturns">>,
- ImpliedByAnyOf<[open_cl.KeyPath, render_script.KeyPath]>;
-def fallow_half_arguments_and_returns : Flag<["-"], "fallow-half-arguments-and-returns">,
- HelpText<"Allow function arguments and returns of type half">,
- MarshallingInfoFlag<LangOpts<"HalfArgsAndReturns">>,
- ImpliedByAnyOf<[fnative_half_arguments_and_returns.KeyPath]>;
+ ImpliedByAnyOf<[open_cl.KeyPath, render_script.KeyPath, hlsl.KeyPath]>;
def fdefault_calling_conv_EQ : Joined<["-"], "fdefault-calling-conv=">,
- HelpText<"Set default calling convention">, Values<"cdecl,fastcall,stdcall,vectorcall,regcall">,
+ HelpText<"Set default calling convention">,
+ Values<"cdecl,fastcall,stdcall,vectorcall,regcall,rtdcall">,
NormalizedValuesScope<"LangOptions">,
- NormalizedValues<["DCC_CDecl", "DCC_FastCall", "DCC_StdCall", "DCC_VectorCall", "DCC_RegCall"]>,
+ NormalizedValues<["DCC_CDecl", "DCC_FastCall", "DCC_StdCall", "DCC_VectorCall", "DCC_RegCall", "DCC_RtdCall"]>,
MarshallingInfoEnum<LangOpts<"DefaultCallingConv">, "DCC_None">;
// These options cannot be marshalled, because they are used to set up the LangOptions defaults.
def finclude_default_header : Flag<["-"], "finclude-default-header">,
- HelpText<"Include default header file for OpenCL">;
+ HelpText<"Include default header file for OpenCL and HLSL">;
def fdeclare_opencl_builtins : Flag<["-"], "fdeclare-opencl-builtins">,
HelpText<"Add OpenCL builtin function declarations (experimental)">;
def fpreserve_vec3_type : Flag<["-"], "fpreserve-vec3-type">,
HelpText<"Preserve 3-component vector type">,
- MarshallingInfoFlag<CodeGenOpts<"PreserveVec3Type">>;
+ MarshallingInfoFlag<CodeGenOpts<"PreserveVec3Type">>,
+ ImpliedByAnyOf<[hlsl.KeyPath]>;
def fwchar_type_EQ : Joined<["-"], "fwchar-type=">,
- HelpText<"Select underlying type for wchar_t">, Values<"char,short,int">,
+ HelpText<"Select underlying type for wchar_t">,
+ Values<"char,short,int">,
NormalizedValues<["1", "2", "4"]>,
MarshallingInfoEnum<LangOpts<"WCharSize">, "0">;
defm signed_wchar : BoolOption<"f", "signed-wchar",
LangOpts<"WCharIsSigned">, DefaultTrue,
- NegFlag<SetFalse, [CC1Option], "Use an unsigned">, PosFlag<SetTrue, [], "Use a signed">,
- BothFlags<[], " type for wchar_t">>;
+ NegFlag<SetFalse, [], [ClangOption, CC1Option], "Use an unsigned">,
+ PosFlag<SetTrue, [], [ClangOption], "Use a signed">,
+ BothFlags<[], [ClangOption], " type for wchar_t">>;
def fcompatibility_qualified_id_block_param_type_checking : Flag<["-"], "fcompatibility-qualified-id-block-type-checking">,
HelpText<"Allow using blocks with parameters of more specific type than "
"the type system guarantees when a parameter is qualified id">,
@@ -5674,10 +7696,20 @@ def fobjc_gc_only : Flag<["-"], "fobjc-gc-only">, Group<f_Group>,
def fobjc_gc : Flag<["-"], "fobjc-gc">, Group<f_Group>,
HelpText<"Enable Objective-C garbage collection">;
+def fexperimental_max_bitint_width_EQ:
+ Joined<["-"], "fexperimental-max-bitint-width=">, Group<f_Group>,
+ MetaVarName<"<N>">,
+ HelpText<"Set the maximum bitwidth for _BitInt (this option is expected to be removed in the future)">,
+ MarshallingInfoInt<LangOpts<"MaxBitIntWidth">>;
+
+} // let Visibility = [CC1Option]
+
//===----------------------------------------------------------------------===//
// Header Search Options
//===----------------------------------------------------------------------===//
+let Visibility = [CC1Option] in {
+
def nostdsysteminc : Flag<["-"], "nostdsysteminc">,
HelpText<"Disable standard system #include directories">,
MarshallingInfoNegativeFlag<HeaderSearchOpts<"UseStandardSystemIncludes">>;
@@ -5691,30 +7723,34 @@ def fmodules_strict_context_hash : Flag<["-"], "fmodules-strict-context-hash">,
HelpText<"Enable hashing of all compiler options that could impact the "
"semantics of a module in an implicit build">,
MarshallingInfoFlag<HeaderSearchOpts<"ModulesStrictContextHash">>;
-def c_isystem : JoinedOrSeparate<["-"], "c-isystem">, MetaVarName<"<directory>">,
+def c_isystem : Separate<["-"], "c-isystem">, MetaVarName<"<directory>">,
HelpText<"Add directory to the C SYSTEM include search path">;
-def objc_isystem : JoinedOrSeparate<["-"], "objc-isystem">,
+def objc_isystem : Separate<["-"], "objc-isystem">,
MetaVarName<"<directory>">,
HelpText<"Add directory to the ObjC SYSTEM include search path">;
-def objcxx_isystem : JoinedOrSeparate<["-"], "objcxx-isystem">,
+def objcxx_isystem : Separate<["-"], "objcxx-isystem">,
MetaVarName<"<directory>">,
HelpText<"Add directory to the ObjC++ SYSTEM include search path">;
-def internal_isystem : JoinedOrSeparate<["-"], "internal-isystem">,
+def internal_isystem : Separate<["-"], "internal-isystem">,
MetaVarName<"<directory>">,
HelpText<"Add directory to the internal system include search path; these "
"are assumed to not be user-provided and are used to model system "
"and standard headers' paths.">;
-def internal_externc_isystem : JoinedOrSeparate<["-"], "internal-externc-isystem">,
+def internal_externc_isystem : Separate<["-"], "internal-externc-isystem">,
MetaVarName<"<directory>">,
HelpText<"Add directory to the internal system include search path with "
"implicit extern \"C\" semantics; these are assumed to not be "
"user-provided and are used to model system and standard headers' "
"paths.">;
+} // let Visibility = [CC1Option]
+
//===----------------------------------------------------------------------===//
// Preprocessor Options
//===----------------------------------------------------------------------===//
+let Visibility = [CC1Option] in {
+
def chain_include : Separate<["-"], "chain-include">, MetaVarName<"<file>">,
HelpText<"Include and chain a header file after turning it into PCH">;
def preamble_bytes_EQ : Joined<["-"], "preamble-bytes=">,
@@ -5729,19 +7765,18 @@ def setup_static_analyzer : Flag<["-"], "setup-static-analyzer">,
def disable_pragma_debug_crash : Flag<["-"], "disable-pragma-debug-crash">,
HelpText<"Disable any #pragma clang __debug that can lead to crashing behavior. This is meant for testing.">,
MarshallingInfoFlag<PreprocessorOpts<"DisablePragmaDebugCrash">>;
+def source_date_epoch : Separate<["-"], "source-date-epoch">,
+ MetaVarName<"<time since Epoch in seconds>">,
+ HelpText<"Time to be used in __DATE__, __TIME__, and __TIMESTAMP__ macros">;
-//===----------------------------------------------------------------------===//
-// OpenCL Options
-//===----------------------------------------------------------------------===//
-
-def cl_ext_EQ : CommaJoined<["-"], "cl-ext=">,
- HelpText<"OpenCL only. Enable or disable OpenCL extensions. The argument is a comma-separated sequence of one or more extension names, each prefixed by '+' or '-'.">,
- MarshallingInfoStringVector<TargetOpts<"OpenCLExtensionsAsWritten">>;
+} // let Visibility = [CC1Option]
//===----------------------------------------------------------------------===//
// CUDA Options
//===----------------------------------------------------------------------===//
+let Visibility = [CC1Option] in {
+
def fcuda_is_device : Flag<["-"], "fcuda-is-device">,
HelpText<"Generate code for CUDA device">,
MarshallingInfoFlag<LangOpts<"CUDAIsDevice">>;
@@ -5755,30 +7790,38 @@ def fno_cuda_host_device_constexpr : Flag<["-"], "fno-cuda-host-device-constexpr
HelpText<"Don't treat unattributed constexpr functions as __host__ __device__.">,
MarshallingInfoNegativeFlag<LangOpts<"CUDAHostDeviceConstexpr">>;
+} // let Visibility = [CC1Option]
+
//===----------------------------------------------------------------------===//
// OpenMP Options
//===----------------------------------------------------------------------===//
-def fopenmp_is_device : Flag<["-"], "fopenmp-is-device">,
+let Visibility = [CC1Option, FC1Option] in {
+
+def fopenmp_is_target_device : Flag<["-"], "fopenmp-is-target-device">,
HelpText<"Generate code only for an OpenMP target device.">;
+def : Flag<["-"], "fopenmp-is-device">, Alias<fopenmp_is_target_device>;
def fopenmp_host_ir_file_path : Separate<["-"], "fopenmp-host-ir-file-path">,
HelpText<"Path to the IR file produced by the frontend for the host.">;
+} // let Visibility = [CC1Option, FC1Option]
+
//===----------------------------------------------------------------------===//
// SYCL Options
//===----------------------------------------------------------------------===//
def fsycl_is_device : Flag<["-"], "fsycl-is-device">,
HelpText<"Generate code for SYCL device.">,
+ Visibility<[CC1Option]>,
MarshallingInfoFlag<LangOpts<"SYCLIsDevice">>;
def fsycl_is_host : Flag<["-"], "fsycl-is-host">,
HelpText<"SYCL host compilation">,
+ Visibility<[CC1Option]>,
MarshallingInfoFlag<LangOpts<"SYCLIsHost">>;
-} // let Flags = [CC1Option, NoDriverOption]
-
def sycl_std_EQ : Joined<["-"], "sycl-std=">, Group<sycl_Group>,
- Flags<[CC1Option, NoArgumentUnused, CoreOption]>,
+ Flags<[NoArgumentUnused]>,
+ Visibility<[ClangOption, CC1Option, CLOption]>,
HelpText<"SYCL language standard to compile for.">,
Values<"2020,2017,121,1.2.1,sycl-1.2.1">,
NormalizedValues<["SYCL_2020", "SYCL_2017", "SYCL_2017", "SYCL_2017", "SYCL_2017"]>,
@@ -5786,31 +7829,54 @@ def sycl_std_EQ : Joined<["-"], "sycl-std=">, Group<sycl_Group>,
MarshallingInfoEnum<LangOpts<"SYCLVersion">, "SYCL_None">,
ShouldParseIf<!strconcat(fsycl_is_device.KeyPath, "||", fsycl_is_host.KeyPath)>;
-defm cuda_approx_transcendentals : BoolFOption<"cuda-approx-transcendentals",
- LangOpts<"CUDADeviceApproxTranscendentals">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Use">, NegFlag<SetFalse, [], "Don't use">,
- BothFlags<[], " approximate transcendental functions">>,
- ShouldParseIf<fcuda_is_device.KeyPath>;
+defm gpu_approx_transcendentals : BoolFOption<"gpu-approx-transcendentals",
+ LangOpts<"GPUDeviceApproxTranscendentals">, DefaultFalse,
+ PosFlag<SetTrue, [], [ClangOption, CC1Option], "Use">,
+ NegFlag<SetFalse, [], [ClangOption], "Don't use">,
+ BothFlags<[], [ClangOption], " approximate transcendental functions">>;
+def : Flag<["-"], "fcuda-approx-transcendentals">, Alias<fgpu_approx_transcendentals>;
+def : Flag<["-"], "fno-cuda-approx-transcendentals">, Alias<fno_gpu_approx_transcendentals>;
//===----------------------------------------------------------------------===//
// Frontend Options - cc1 + fc1
//===----------------------------------------------------------------------===//
-let Flags = [CC1Option, FC1Option, NoDriverOption] in {
+
+let Visibility = [CC1Option, FC1Option] in {
let Group = Action_Group in {
def emit_obj : Flag<["-"], "emit-obj">,
HelpText<"Emit native object files">;
def init_only : Flag<["-"], "init-only">,
HelpText<"Only execute frontend initialization">;
+def emit_llvm_bc : Flag<["-"], "emit-llvm-bc">,
+ HelpText<"Build ASTs then convert to LLVM, emit .bc file">;
} // let Group = Action_Group
-} // let Flags = [CC1Option, FC1Option, NoDriverOption]
+
+def load : Separate<["-"], "load">, MetaVarName<"<dsopath>">,
+ HelpText<"Load the named plugin (dynamic shared object)">;
+def plugin : Separate<["-"], "plugin">, MetaVarName<"<name>">,
+ HelpText<"Use the named plugin action instead of the default action (use \"help\" to list available options)">;
+defm debug_pass_manager : BoolOption<"f", "debug-pass-manager",
+ CodeGenOpts<"DebugPassManager">, DefaultFalse,
+ PosFlag<SetTrue, [], [ClangOption], "Prints debug information for the new pass manager">,
+ NegFlag<SetFalse, [], [ClangOption], "Disables debug printing for the new pass manager">>;
+
+def opt_record_file : Separate<["-"], "opt-record-file">,
+ HelpText<"File name to use for YAML optimization record output">,
+ MarshallingInfoString<CodeGenOpts<"OptRecordFile">>;
+def opt_record_passes : Separate<["-"], "opt-record-passes">,
+ HelpText<"Only record remark information for passes whose names match the given regular expression">;
+def opt_record_format : Separate<["-"], "opt-record-format">,
+ HelpText<"The format used for serializing remarks (default: YAML)">;
+
+} // let Visibility = [CC1Option, FC1Option]
//===----------------------------------------------------------------------===//
// cc1as-only Options
//===----------------------------------------------------------------------===//
-let Flags = [CC1AsOption, NoDriverOption] in {
+let Visibility = [CC1AsOption] in {
// Language Options
def n : Flag<["-"], "n">,
@@ -5835,13 +7901,13 @@ def dwarf_debug_producer : Separate<["-"], "dwarf-debug-producer">,
def defsym : Separate<["-"], "defsym">,
HelpText<"Define a value for a symbol">;
-} // let Flags = [CC1AsOption]
+} // let Visibility = [CC1AsOption]
//===----------------------------------------------------------------------===//
// clang-cl Options
//===----------------------------------------------------------------------===//
-def cl_Group : OptionGroup<"<clang-cl options>">, Flags<[CLOption]>,
+def cl_Group : OptionGroup<"<clang-cl options>">,
HelpText<"CL.EXE COMPATIBILITY OPTIONS">;
def cl_compile_Group : OptionGroup<"<clang-cl compile-only options>">,
@@ -5850,33 +7916,43 @@ def cl_compile_Group : OptionGroup<"<clang-cl compile-only options>">,
def cl_ignored_Group : OptionGroup<"<clang-cl ignored options>">,
Group<cl_Group>;
-class CLFlag<string name> : Option<["/", "-"], name, KIND_FLAG>,
- Group<cl_Group>, Flags<[CLOption, NoXarchOption]>;
+class CLFlag<string name, list<OptionVisibility> vis = [CLOption]> :
+ Option<["/", "-"], name, KIND_FLAG>,
+ Group<cl_Group>, Flags<[NoXarchOption]>, Visibility<vis>;
-class CLCompileFlag<string name> : Option<["/", "-"], name, KIND_FLAG>,
- Group<cl_compile_Group>, Flags<[CLOption, NoXarchOption]>;
+class CLCompileFlag<string name, list<OptionVisibility> vis = [CLOption]> :
+ Option<["/", "-"], name, KIND_FLAG>,
+ Group<cl_compile_Group>, Flags<[NoXarchOption]>, Visibility<vis>;
-class CLIgnoredFlag<string name> : Option<["/", "-"], name, KIND_FLAG>,
- Group<cl_ignored_Group>, Flags<[CLOption, NoXarchOption]>;
+class CLIgnoredFlag<string name, list<OptionVisibility> vis = [CLOption]> :
+ Option<["/", "-"], name, KIND_FLAG>,
+ Group<cl_ignored_Group>, Flags<[NoXarchOption]>, Visibility<vis>;
-class CLJoined<string name> : Option<["/", "-"], name, KIND_JOINED>,
- Group<cl_Group>, Flags<[CLOption, NoXarchOption]>;
+class CLJoined<string name, list<OptionVisibility> vis = [CLOption]> :
+ Option<["/", "-"], name, KIND_JOINED>,
+ Group<cl_Group>, Flags<[NoXarchOption]>, Visibility<vis>;
-class CLCompileJoined<string name> : Option<["/", "-"], name, KIND_JOINED>,
- Group<cl_compile_Group>, Flags<[CLOption, NoXarchOption]>;
+class CLCompileJoined<string name, list<OptionVisibility> vis = [CLOption]> :
+ Option<["/", "-"], name, KIND_JOINED>,
+ Group<cl_compile_Group>, Flags<[NoXarchOption]>, Visibility<vis>;
-class CLIgnoredJoined<string name> : Option<["/", "-"], name, KIND_JOINED>,
- Group<cl_ignored_Group>, Flags<[CLOption, NoXarchOption, HelpHidden]>;
+class CLIgnoredJoined<string name, list<OptionVisibility> vis = [CLOption]> :
+ Option<["/", "-"], name, KIND_JOINED>,
+ Group<cl_ignored_Group>, Flags<[NoXarchOption, HelpHidden]>, Visibility<vis>;
-class CLJoinedOrSeparate<string name> : Option<["/", "-"], name,
- KIND_JOINED_OR_SEPARATE>, Group<cl_Group>, Flags<[CLOption, NoXarchOption]>;
+class CLJoinedOrSeparate<string name, list<OptionVisibility> vis = [CLOption]> :
+ Option<["/", "-"], name, KIND_JOINED_OR_SEPARATE>,
+ Group<cl_Group>, Flags<[NoXarchOption]>, Visibility<vis>;
-class CLCompileJoinedOrSeparate<string name> : Option<["/", "-"], name,
- KIND_JOINED_OR_SEPARATE>, Group<cl_compile_Group>,
- Flags<[CLOption, NoXarchOption]>;
+class CLCompileJoinedOrSeparate<string name,
+ list<OptionVisibility> vis = [CLOption]> :
+ Option<["/", "-"], name, KIND_JOINED_OR_SEPARATE>,
+ Group<cl_compile_Group>, Flags<[NoXarchOption]>, Visibility<vis>;
-class CLRemainingArgsJoined<string name> : Option<["/", "-"], name,
- KIND_REMAINING_ARGS_JOINED>, Group<cl_Group>, Flags<[CLOption, NoXarchOption]>;
+class CLRemainingArgsJoined<string name,
+ list<OptionVisibility> vis = [CLOption]> :
+ Option<["/", "-"], name, KIND_REMAINING_ARGS_JOINED>,
+ Group<cl_Group>, Flags<[NoXarchOption]>, Visibility<vis>;
// Aliases:
// (We don't put any of these in cl_compile_Group as the options they alias are
@@ -5902,19 +7978,18 @@ def _SLASH_diagnostics_column : CLFlag<"diagnostics:column">,
HelpText<"Disable caret diagnostics but keep column info">;
def _SLASH_diagnostics_classic : CLFlag<"diagnostics:classic">,
HelpText<"Disable column and caret diagnostics">;
-def _SLASH_D : CLJoinedOrSeparate<"D">, HelpText<"Define macro">,
- MetaVarName<"<macro[=value]>">, Alias<D>;
+def _SLASH_D : CLJoinedOrSeparate<"D", [CLOption, DXCOption]>,
+ HelpText<"Define macro">, MetaVarName<"<macro[=value]>">, Alias<D>;
def _SLASH_E : CLFlag<"E">, HelpText<"Preprocess to stdout">, Alias<E>;
def _SLASH_external_COLON_I : CLJoinedOrSeparate<"external:I">, Alias<isystem>,
HelpText<"Add directory to include search path with warnings suppressed">,
MetaVarName<"<dir>">;
-def _SLASH_fp_except : CLFlag<"fp:except">, HelpText<"">, Alias<ftrapping_math>;
-def _SLASH_fp_except_ : CLFlag<"fp:except-">,
- HelpText<"">, Alias<fno_trapping_math>;
+def _SLASH_fp_contract : CLFlag<"fp:contract">, HelpText<"">, Alias<ffp_contract>, AliasArgs<["on"]>;
+def _SLASH_fp_except : CLFlag<"fp:except">, HelpText<"">, Alias<ffp_exception_behavior_EQ>, AliasArgs<["strict"]>;
+def _SLASH_fp_except_ : CLFlag<"fp:except-">, HelpText<"">, Alias<ffp_exception_behavior_EQ>, AliasArgs<["ignore"]>;
def _SLASH_fp_fast : CLFlag<"fp:fast">, HelpText<"">, Alias<ffast_math>;
-def _SLASH_fp_precise : CLFlag<"fp:precise">,
- HelpText<"">, Alias<fno_fast_math>;
-def _SLASH_fp_strict : CLFlag<"fp:strict">, HelpText<"">, Alias<fno_fast_math>;
+def _SLASH_fp_precise : CLFlag<"fp:precise">, HelpText<"">, Alias<ffp_model_EQ>, AliasArgs<["precise"]>;
+def _SLASH_fp_strict : CLFlag<"fp:strict">, HelpText<"">, Alias<ffp_model_EQ>, AliasArgs<["strict"]>;
def _SLASH_fsanitize_EQ_address : CLFlag<"fsanitize=address">,
HelpText<"Enable AddressSanitizer">,
Alias<fsanitize_EQ>, AliasArgs<["address"]>;
@@ -5943,10 +8018,12 @@ def _SLASH_Gw : CLFlag<"Gw">, HelpText<"Put each data item in its own section">,
def _SLASH_Gw_ : CLFlag<"Gw-">,
HelpText<"Do not put each data item in its own section (default)">,
Alias<fno_data_sections>;
-def _SLASH_help : CLFlag<"help">, Alias<help>,
+def _SLASH_help : CLFlag<"help", [CLOption, DXCOption]>, Alias<help>,
HelpText<"Display available options">;
def _SLASH_HELP : CLFlag<"HELP">, Alias<help>;
-def _SLASH_I : CLJoinedOrSeparate<"I">,
+def _SLASH_hotpatch : CLFlag<"hotpatch">, Alias<fms_hotpatch>,
+ HelpText<"Create hotpatchable image">;
+def _SLASH_I : CLJoinedOrSeparate<"I", [CLOption, DXCOption]>,
HelpText<"Add directory to include search path">, MetaVarName<"<dir>">,
Alias<I>;
def _SLASH_J : CLFlag<"J">, HelpText<"Make char type unsigned">,
@@ -5954,7 +8031,7 @@ def _SLASH_J : CLFlag<"J">, HelpText<"Make char type unsigned">,
// The _SLASH_O option handles all the /O flags, but we also provide separate
// aliased options to provide separate help messages.
-def _SLASH_O : CLJoined<"O">,
+def _SLASH_O : CLJoined<"O", [CLOption, DXCOption]>,
HelpText<"Set multiple /O flags at once; e.g. '/O2y-' for '/O2 /Oy-'">,
MetaVarName<"<flags>">;
def : CLFlag<"O1">, Alias<_SLASH_O>, AliasArgs<["1"]>,
@@ -5967,7 +8044,7 @@ def : CLFlag<"Ob1">, Alias<_SLASH_O>, AliasArgs<["b1"]>,
HelpText<"Only inline functions explicitly or implicitly marked inline">;
def : CLFlag<"Ob2">, Alias<_SLASH_O>, AliasArgs<["b2"]>,
HelpText<"Inline functions as deemed beneficial by the compiler">;
-def : CLFlag<"Od">, Alias<_SLASH_O>, AliasArgs<["d"]>,
+def : CLFlag<"Od", [CLOption, DXCOption]>, Alias<_SLASH_O>, AliasArgs<["d"]>,
HelpText<"Disable optimization">;
def : CLFlag<"Og">, Alias<_SLASH_O>, AliasArgs<["g"]>,
HelpText<"No effect">;
@@ -6014,6 +8091,11 @@ def _SLASH_validate_charset : CLFlag<"validate-charset">,
Alias<W_Joined>, AliasArgs<["invalid-source-encoding"]>;
def _SLASH_validate_charset_ : CLFlag<"validate-charset-">,
Alias<W_Joined>, AliasArgs<["no-invalid-source-encoding"]>;
+def _SLASH_external_W0 : CLFlag<"external:W0">, HelpText<"Ignore warnings from system headers (default)">, Alias<Wno_system_headers>;
+def _SLASH_external_W1 : CLFlag<"external:W1">, HelpText<"Enable -Wsystem-headers">, Alias<Wsystem_headers>;
+def _SLASH_external_W2 : CLFlag<"external:W2">, HelpText<"Enable -Wsystem-headers">, Alias<Wsystem_headers>;
+def _SLASH_external_W3 : CLFlag<"external:W3">, HelpText<"Enable -Wsystem-headers">, Alias<Wsystem_headers>;
+def _SLASH_external_W4 : CLFlag<"external:W4">, HelpText<"Enable -Wsystem-headers">, Alias<Wsystem_headers>;
def _SLASH_W0 : CLFlag<"W0">, HelpText<"Disable all warnings">, Alias<w>;
def _SLASH_W1 : CLFlag<"W1">, HelpText<"Enable -Wall">, Alias<Wall>;
def _SLASH_W2 : CLFlag<"W2">, HelpText<"Enable -Wall">, Alias<Wall>;
@@ -6027,16 +8109,7 @@ def _SLASH_WX_ : CLFlag<"WX-">,
HelpText<"Do not treat warnings as errors (default)">,
Alias<W_Joined>, AliasArgs<["no-error"]>;
def _SLASH_w_flag : CLFlag<"w">, HelpText<"Disable all warnings">, Alias<w>;
-def _SLASH_wd4005 : CLFlag<"wd4005">, Alias<W_Joined>,
- AliasArgs<["no-macro-redefined"]>;
-def _SLASH_wd4018 : CLFlag<"wd4018">, Alias<W_Joined>,
- AliasArgs<["no-sign-compare"]>;
-def _SLASH_wd4100 : CLFlag<"wd4100">, Alias<W_Joined>,
- AliasArgs<["no-unused-parameter"]>;
-def _SLASH_wd4910 : CLFlag<"wd4910">, Alias<W_Joined>,
- AliasArgs<["no-dllexport-explicit-instantiation-decl"]>;
-def _SLASH_wd4996 : CLFlag<"wd4996">, Alias<W_Joined>,
- AliasArgs<["no-deprecated-declarations"]>;
+def _SLASH_wd : CLCompileJoined<"wd">;
def _SLASH_vd : CLJoined<"vd">, HelpText<"Control vtordisp placement">,
Alias<vtordisp_mode_EQ>;
def _SLASH_X : CLFlag<"X">,
@@ -6078,9 +8151,22 @@ def _SLASH_Zc_twoPhase : CLFlag<"Zc:twoPhase">,
def _SLASH_Zc_twoPhase_ : CLFlag<"Zc:twoPhase-">,
HelpText<"Disable two-phase name lookup in templates (default)">,
Alias<fdelayed_template_parsing>;
-def _SLASH_Z7 : CLFlag<"Z7">,
+def _SLASH_Zc_wchar_t : CLFlag<"Zc:wchar_t">,
+ HelpText<"Enable C++ builtin type wchar_t (default)">;
+def _SLASH_Zc_wchar_t_ : CLFlag<"Zc:wchar_t-">,
+ HelpText<"Disable C++ builtin type wchar_t">;
+def _SLASH_Z7 : CLFlag<"Z7">, Alias<g_Flag>,
HelpText<"Enable CodeView debug information in object files">;
-def _SLASH_Zi : CLFlag<"Zi">, Alias<_SLASH_Z7>,
+def _SLASH_ZH_MD5 : CLFlag<"ZH:MD5">,
+ HelpText<"Use MD5 for file checksums in debug info (default)">,
+ Alias<gsrc_hash_EQ>, AliasArgs<["md5"]>;
+def _SLASH_ZH_SHA1 : CLFlag<"ZH:SHA1">,
+ HelpText<"Use SHA1 for file checksums in debug info">,
+ Alias<gsrc_hash_EQ>, AliasArgs<["sha1"]>;
+def _SLASH_ZH_SHA_256 : CLFlag<"ZH:SHA_256">,
+ HelpText<"Use SHA256 for file checksums in debug info">,
+ Alias<gsrc_hash_EQ>, AliasArgs<["sha256"]>;
+def _SLASH_Zi : CLFlag<"Zi", [CLOption, DXCOption]>, Alias<g_Flag>,
HelpText<"Like /Z7">;
def _SLASH_Zp : CLJoined<"Zp">,
HelpText<"Set default maximum struct packing alignment">,
@@ -6088,7 +8174,7 @@ def _SLASH_Zp : CLJoined<"Zp">,
def _SLASH_Zp_flag : CLFlag<"Zp">,
HelpText<"Set default maximum struct packing alignment to 1">,
Alias<fpack_struct_EQ>, AliasArgs<["1"]>;
-def _SLASH_Zs : CLFlag<"Zs">, HelpText<"Syntax-check only">,
+def _SLASH_Zs : CLFlag<"Zs">, HelpText<"Run the preprocessor, parser and semantic analysis stages">,
Alias<fsyntax_only>;
def _SLASH_openmp_ : CLFlag<"openmp-">,
HelpText<"Disable OpenMP support">, Alias<fno_openmp>;
@@ -6103,6 +8189,12 @@ def _SLASH_tune : CLCompileJoined<"tune:">,
def _SLASH_QIntel_jcc_erratum : CLFlag<"QIntel-jcc-erratum">,
HelpText<"Align branches within 32-byte boundaries to mitigate the performance impact of the Intel JCC erratum.">,
Alias<mbranches_within_32B_boundaries>;
+def _SLASH_arm64EC : CLFlag<"arm64EC">,
+ HelpText<"Set build target to arm64ec">;
+def : CLFlag<"Qgather-">, Alias<mno_gather>,
+ HelpText<"Disable generation of gather instructions in auto-vectorization(x86 only)">;
+def : CLFlag<"Qscatter-">, Alias<mno_scatter>,
+ HelpText<"Disable generation of scatter instructions in auto-vectorization(x86 only)">;
// Non-aliases:
@@ -6119,7 +8211,7 @@ def _SLASH_EP : CLFlag<"EP">,
def _SLASH_external_env : CLJoined<"external:env:">,
HelpText<"Add dirs in env var <var> to include search path with warnings suppressed">,
MetaVarName<"<var>">;
-def _SLASH_FA : CLFlag<"FA">,
+def _SLASH_FA : CLJoined<"FA">,
HelpText<"Output assembly code file during compilation">;
def _SLASH_Fa : CLJoined<"Fa">,
HelpText<"Set assembly output file name (with /FA)">,
@@ -6146,18 +8238,25 @@ def _SLASH_GX_ : CLFlag<"GX-">,
def _SLASH_imsvc : CLJoinedOrSeparate<"imsvc">,
HelpText<"Add <dir> to system include search path, as if in %INCLUDE%">,
MetaVarName<"<dir>">;
+def _SLASH_JMC : CLFlag<"JMC">, Alias<fjmc>,
+ HelpText<"Enable just-my-code debugging">;
+def _SLASH_JMC_ : CLFlag<"JMC-">, Alias<fno_jmc>,
+ HelpText<"Disable just-my-code debugging (default)">;
def _SLASH_LD : CLFlag<"LD">, HelpText<"Create DLL">;
def _SLASH_LDd : CLFlag<"LDd">, HelpText<"Create debug DLL">;
def _SLASH_link : CLRemainingArgsJoined<"link">,
HelpText<"Forward options to the linker">, MetaVarName<"<options>">;
def _SLASH_MD : Option<["/", "-"], "MD", KIND_FLAG>, Group<_SLASH_M_Group>,
- Flags<[CLOption, NoXarchOption]>, HelpText<"Use DLL run-time">;
+ Flags<[NoXarchOption]>, Visibility<[CLOption]>, HelpText<"Use DLL run-time">;
def _SLASH_MDd : Option<["/", "-"], "MDd", KIND_FLAG>, Group<_SLASH_M_Group>,
- Flags<[CLOption, NoXarchOption]>, HelpText<"Use DLL debug run-time">;
+ Flags<[NoXarchOption]>, Visibility<[CLOption]>,
+ HelpText<"Use DLL debug run-time">;
def _SLASH_MT : Option<["/", "-"], "MT", KIND_FLAG>, Group<_SLASH_M_Group>,
- Flags<[CLOption, NoXarchOption]>, HelpText<"Use static run-time">;
+ Flags<[NoXarchOption]>, Visibility<[CLOption]>,
+ HelpText<"Use static run-time">;
def _SLASH_MTd : Option<["/", "-"], "MTd", KIND_FLAG>, Group<_SLASH_M_Group>,
- Flags<[CLOption, NoXarchOption]>, HelpText<"Use static debug run-time">;
+ Flags<[NoXarchOption]>, Visibility<[CLOption]>,
+ HelpText<"Use static debug run-time">;
def _SLASH_o : CLJoinedOrSeparate<"o">,
HelpText<"Deprecated (set output file name); use /Fe or /Fe">,
MetaVarName<"<file or dir/>">;
@@ -6172,6 +8271,8 @@ def _SLASH_TC : CLCompileFlag<"TC">, HelpText<"Treat all source files as C">;
def _SLASH_Tp : CLCompileJoinedOrSeparate<"Tp">,
HelpText<"Treat <file> as C++ source file">, MetaVarName<"<file>">;
def _SLASH_TP : CLCompileFlag<"TP">, HelpText<"Treat all source files as C++">;
+def _SLASH_diasdkdir : CLJoinedOrSeparate<"diasdkdir">,
+ HelpText<"Path to the DIA SDK">, MetaVarName<"<dir>">;
def _SLASH_vctoolsdir : CLJoinedOrSeparate<"vctoolsdir">,
HelpText<"Path to the VCToolChain">, MetaVarName<"<dir>">;
def _SLASH_vctoolsversion : CLJoinedOrSeparate<"vctoolsversion">,
@@ -6181,10 +8282,10 @@ def _SLASH_winsdkdir : CLJoinedOrSeparate<"winsdkdir">,
def _SLASH_winsdkversion : CLJoinedOrSeparate<"winsdkversion">,
HelpText<"Full version of the Windows SDK, defaults to newest found">;
def _SLASH_winsysroot : CLJoinedOrSeparate<"winsysroot">,
- HelpText<"Same as /vctoolsdir <dir>/VC/Tools/MSVC/<vctoolsversion> /winsdkdir <dir>/Windows Kits/10">,
+ HelpText<"Same as \"/diasdkdir <dir>/DIA SDK\" /vctoolsdir <dir>/VC/Tools/MSVC/<vctoolsversion> \"/winsdkdir <dir>/Windows Kits/10\"">,
MetaVarName<"<dir>">;
def _SLASH_volatile_iso : Option<["/", "-"], "volatile:iso", KIND_FLAG>,
- Group<_SLASH_volatile_Group>, Flags<[CLOption, NoXarchOption]>,
+ Visibility<[CLOption]>, Alias<fno_ms_volatile>,
HelpText<"Volatile loads and stores have standard semantics">;
def _SLASH_vmb : CLFlag<"vmb">,
HelpText<"Use a best-case representation method for member pointers">;
@@ -6199,11 +8300,11 @@ def _SLASH_vmv : CLFlag<"vmv">,
HelpText<"Set the default most-general representation to "
"virtual inheritance">;
def _SLASH_volatile_ms : Option<["/", "-"], "volatile:ms", KIND_FLAG>,
- Group<_SLASH_volatile_Group>, Flags<[CLOption, NoXarchOption]>,
+ Visibility<[CLOption]>, Alias<fms_volatile>,
HelpText<"Volatile loads and stores have acquire and release semantics">;
def _SLASH_clang : CLJoined<"clang:">,
HelpText<"Pass <arg> to the clang driver">, MetaVarName<"<arg>">;
-def _SLASH_Zl : CLFlag<"Zl">,
+def _SLASH_Zl : CLFlag<"Zl">, Alias<fms_omit_default_lib>,
HelpText<"Do not let object file auto-link default libraries">;
def _SLASH_Yc : CLJoined<"Yc">,
@@ -6232,6 +8333,18 @@ def _SLASH_Gv : CLFlag<"Gv">,
HelpText<"Set __vectorcall as a default calling convention">;
def _SLASH_Gregcall : CLFlag<"Gregcall">,
HelpText<"Set __regcall as a default calling convention">;
+def _SLASH_Gregcall4 : CLFlag<"Gregcall4">,
+ HelpText<"Set __regcall4 as a default calling convention to respect __regcall ABI v.4">;
+
+// GNU Driver aliases
+
+def : Separate<["-"], "Xmicrosoft-visualc-tools-root">, Alias<_SLASH_vctoolsdir>;
+def : Separate<["-"], "Xmicrosoft-visualc-tools-version">,
+ Alias<_SLASH_vctoolsversion>;
+def : Separate<["-"], "Xmicrosoft-windows-sdk-root">,
+ Alias<_SLASH_winsdkdir>;
+def : Separate<["-"], "Xmicrosoft-windows-sdk-version">,
+ Alias<_SLASH_winsdkversion>;
// Ignored:
@@ -6244,7 +8357,6 @@ def _SLASH_errorReport : CLIgnoredJoined<"errorReport">;
def _SLASH_FC : CLIgnoredFlag<"FC">;
def _SLASH_Fd : CLIgnoredJoined<"Fd">;
def _SLASH_FS : CLIgnoredFlag<"FS">;
-def _SLASH_JMC : CLIgnoredFlag<"JMC">;
def _SLASH_kernel_ : CLIgnoredFlag<"kernel-">;
def _SLASH_nologo : CLIgnoredFlag<"nologo">;
def _SLASH_RTC : CLIgnoredJoined<"RTC">;
@@ -6253,16 +8365,13 @@ def _SLASH_sdl_ : CLIgnoredFlag<"sdl-">;
def _SLASH_utf8 : CLIgnoredFlag<"utf-8">,
HelpText<"Set source and runtime encoding to UTF-8 (default)">;
def _SLASH_w : CLIgnoredJoined<"w">;
+def _SLASH_Wv_ : CLIgnoredJoined<"Wv">;
def _SLASH_Zc___cplusplus : CLIgnoredFlag<"Zc:__cplusplus">;
def _SLASH_Zc_auto : CLIgnoredFlag<"Zc:auto">;
def _SLASH_Zc_forScope : CLIgnoredFlag<"Zc:forScope">;
def _SLASH_Zc_inline : CLIgnoredFlag<"Zc:inline">;
def _SLASH_Zc_rvalueCast : CLIgnoredFlag<"Zc:rvalueCast">;
def _SLASH_Zc_ternary : CLIgnoredFlag<"Zc:ternary">;
-def _SLASH_Zc_wchar_t : CLIgnoredFlag<"Zc:wchar_t">;
-def _SLASH_ZH_MD5 : CLIgnoredFlag<"ZH:MD5">;
-def _SLASH_ZH_SHA1 : CLIgnoredFlag<"ZH:SHA1">;
-def _SLASH_ZH_SHA_256 : CLIgnoredFlag<"ZH:SHA_256">;
def _SLASH_Zm : CLIgnoredJoined<"Zm">;
def _SLASH_Zo : CLIgnoredFlag<"Zo">;
def _SLASH_Zo_ : CLIgnoredFlag<"Zo-">;
@@ -6277,12 +8386,12 @@ def _SLASH_AI : CLJoinedOrSeparate<"AI">;
def _SLASH_Bt : CLFlag<"Bt">;
def _SLASH_Bt_plus : CLFlag<"Bt+">;
def _SLASH_clr : CLJoined<"clr">;
+def _SLASH_d1 : CLJoined<"d1">;
def _SLASH_d2 : CLJoined<"d2">;
def _SLASH_doc : CLJoined<"doc">;
def _SLASH_experimental : CLJoined<"experimental:">;
def _SLASH_exportHeader : CLFlag<"exportHeader">;
def _SLASH_external : CLJoined<"external:">;
-def _SLASH_FA_joined : CLJoined<"FA">;
def _SLASH_favor : CLJoined<"favor">;
def _SLASH_fsanitize_address_use_after_return : CLJoined<"fsanitize-address-use-after-return">;
def _SLASH_fno_sanitize_address_vcasan_lib : CLJoined<"fno-sanitize-address-vcasan-lib">;
@@ -6309,7 +8418,6 @@ def _SLASH_headerUnit : CLJoinedOrSeparate<"headerUnit">;
def _SLASH_headerUnitAngle : CLJoinedOrSeparate<"headerUnit:angle">;
def _SLASH_headerUnitQuote : CLJoinedOrSeparate<"headerUnit:quote">;
def _SLASH_homeparams : CLFlag<"homeparams">;
-def _SLASH_hotpatch : CLFlag<"hotpatch">;
def _SLASH_kernel : CLFlag<"kernel">;
def _SLASH_LN : CLFlag<"LN">;
def _SLASH_MP : CLJoined<"MP">;
@@ -6339,3 +8447,67 @@ def _SLASH_Ze : CLFlag<"Ze">;
def _SLASH_Zg : CLFlag<"Zg">;
def _SLASH_ZI : CLFlag<"ZI">;
def _SLASH_ZW : CLJoined<"ZW">;
+
+//===----------------------------------------------------------------------===//
+// clang-dxc Options
+//===----------------------------------------------------------------------===//
+
+def dxc_Group : OptionGroup<"<clang-dxc options>">, Visibility<[DXCOption]>,
+ HelpText<"dxc compatibility options">;
+class DXCFlag<string name> : Option<["/", "-"], name, KIND_FLAG>,
+ Group<dxc_Group>, Visibility<[DXCOption]>;
+class DXCJoinedOrSeparate<string name> : Option<["/", "-"], name,
+ KIND_JOINED_OR_SEPARATE>, Group<dxc_Group>,
+ Visibility<[DXCOption]>;
+
+def dxc_no_stdinc : DXCFlag<"hlsl-no-stdinc">,
+ HelpText<"HLSL only. Disables all standard includes containing non-native compiler types and functions.">;
+def dxc_Fo : DXCJoinedOrSeparate<"Fo">,
+ HelpText<"Output object file">;
+def dxc_Fc : DXCJoinedOrSeparate<"Fc">,
+ HelpText<"Output assembly listing file">;
+def dxil_validator_version : Option<["/", "-"], "validator-version", KIND_SEPARATE>,
+ Group<dxc_Group>, Flags<[HelpHidden]>,
+ Visibility<[DXCOption, ClangOption, CC1Option]>,
+ HelpText<"Override validator version for module. Format: <major.minor>;"
+ "Default: DXIL.dll version or current internal version">,
+ MarshallingInfoString<TargetOpts<"DxilValidatorVersion">>;
+def target_profile : DXCJoinedOrSeparate<"T">, MetaVarName<"<profile>">,
+ HelpText<"Set target profile">,
+ Values<"ps_6_0, ps_6_1, ps_6_2, ps_6_3, ps_6_4, ps_6_5, ps_6_6, ps_6_7,"
+ "vs_6_0, vs_6_1, vs_6_2, vs_6_3, vs_6_4, vs_6_5, vs_6_6, vs_6_7,"
+ "gs_6_0, gs_6_1, gs_6_2, gs_6_3, gs_6_4, gs_6_5, gs_6_6, gs_6_7,"
+ "hs_6_0, hs_6_1, hs_6_2, hs_6_3, hs_6_4, hs_6_5, hs_6_6, hs_6_7,"
+ "ds_6_0, ds_6_1, ds_6_2, ds_6_3, ds_6_4, ds_6_5, ds_6_6, ds_6_7,"
+ "cs_6_0, cs_6_1, cs_6_2, cs_6_3, cs_6_4, cs_6_5, cs_6_6, cs_6_7,"
+ "lib_6_3, lib_6_4, lib_6_5, lib_6_6, lib_6_7, lib_6_x,"
+ "ms_6_5, ms_6_6, ms_6_7,"
+ "as_6_5, as_6_6, as_6_7">;
+def emit_pristine_llvm : DXCFlag<"emit-pristine-llvm">,
+ HelpText<"Emit pristine LLVM IR from the frontend by not running any LLVM passes at all."
+ "Same as -S + -emit-llvm + -disable-llvm-passes.">;
+def fcgl : DXCFlag<"fcgl">, Alias<emit_pristine_llvm>;
+def enable_16bit_types : DXCFlag<"enable-16bit-types">, Alias<fnative_half_type>,
+ HelpText<"Enable 16-bit types and disable min precision types."
+ "Available in HLSL 2018 and shader model 6.2.">;
+def hlsl_entrypoint : Option<["-"], "hlsl-entry", KIND_SEPARATE>,
+ Group<dxc_Group>,
+ Visibility<[ClangOption, CC1Option]>,
+ MarshallingInfoString<TargetOpts<"HLSLEntry">, "\"main\"">,
+ HelpText<"Entry point name for hlsl">;
+def dxc_entrypoint : Option<["--", "/", "-"], "E", KIND_JOINED_OR_SEPARATE>,
+ Group<dxc_Group>,
+ Visibility<[DXCOption]>,
+ HelpText<"Entry point name">;
+def dxc_validator_path_EQ : Joined<["--"], "dxv-path=">, Group<dxc_Group>,
+ HelpText<"DXIL validator installation path">;
+def dxc_disable_validation : DXCFlag<"Vd">,
+ HelpText<"Disable validation">;
+def : Option<["/", "-"], "Qembed_debug", KIND_FLAG>, Group<dxc_Group>,
+ Flags<[Ignored]>, Visibility<[DXCOption]>,
+ HelpText<"Embed PDB in shader container (ignored)">;
+def spirv : DXCFlag<"spirv">,
+ HelpText<"Generate SPIR-V code">;
+def fspv_target_env_EQ : Joined<["-"], "fspv-target-env=">, Group<dxc_Group>,
+ HelpText<"Specify the target environment">,
+ Values<"vulkan1.2, vulkan1.3">;
diff --git a/contrib/llvm-project/clang/include/clang/Driver/Phases.h b/contrib/llvm-project/clang/include/clang/Driver/Phases.h
index ce914dd70514..9003c5857351 100644
--- a/contrib/llvm-project/clang/include/clang/Driver/Phases.h
+++ b/contrib/llvm-project/clang/include/clang/Driver/Phases.h
@@ -22,11 +22,10 @@ namespace phases {
Assemble,
Link,
IfsMerge,
- LastPhase = IfsMerge,
};
enum {
- MaxNumberOfPhases = LastPhase + 1
+ MaxNumberOfPhases = IfsMerge + 1
};
const char *getPhaseName(ID Id);
diff --git a/contrib/llvm-project/clang/include/clang/Driver/SanitizerArgs.h b/contrib/llvm-project/clang/include/clang/Driver/SanitizerArgs.h
index e9e329e7cb53..07070ec4fc06 100644
--- a/contrib/llvm-project/clang/include/clang/Driver/SanitizerArgs.h
+++ b/contrib/llvm-project/clang/include/clang/Driver/SanitizerArgs.h
@@ -30,14 +30,19 @@ class SanitizerArgs {
std::vector<std::string> SystemIgnorelistFiles;
std::vector<std::string> CoverageAllowlistFiles;
std::vector<std::string> CoverageIgnorelistFiles;
+ std::vector<std::string> BinaryMetadataIgnorelistFiles;
int CoverageFeatures = 0;
+ int BinaryMetadataFeatures = 0;
int MsanTrackOrigins = 0;
bool MsanUseAfterDtor = true;
+ bool MsanParamRetval = true;
bool CfiCrossDso = false;
bool CfiICallGeneralizePointers = false;
+ bool CfiICallNormalizeIntegers = false;
bool CfiCanonicalJumpTables = false;
int AsanFieldPadding = 0;
bool SharedRuntime = false;
+ bool StableABI = false;
bool AsanUseAfterScope = true;
bool AsanPoisonCustomArrayCookie = false;
bool AsanGlobalsDeadStripping = false;
@@ -63,11 +68,15 @@ class SanitizerArgs {
llvm::AsanDetectStackUseAfterReturnMode AsanUseAfterReturn =
llvm::AsanDetectStackUseAfterReturnMode::Invalid;
+ std::string MemtagMode;
+
public:
/// Parses the sanitizer arguments from an argument list.
- SanitizerArgs(const ToolChain &TC, const llvm::opt::ArgList &Args);
+ SanitizerArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
+ bool DiagnoseErrors = true);
bool needsSharedRt() const { return SharedRuntime; }
+ bool needsStableAbi() const { return StableABI; }
bool needsMemProfRt() const { return NeedsMemProfRt; }
bool needsAsanRt() const { return Sanitizers.has(SanitizerKind::Address); }
@@ -95,6 +104,27 @@ public:
bool needsStatsRt() const { return Stats; }
bool needsScudoRt() const { return Sanitizers.has(SanitizerKind::Scudo); }
+ bool hasMemTag() const {
+ return hasMemtagHeap() || hasMemtagStack() || hasMemtagGlobals();
+ }
+ bool hasMemtagHeap() const {
+ return Sanitizers.has(SanitizerKind::MemtagHeap);
+ }
+ bool hasMemtagStack() const {
+ return Sanitizers.has(SanitizerKind::MemtagStack);
+ }
+ bool hasMemtagGlobals() const {
+ return Sanitizers.has(SanitizerKind::MemtagGlobals);
+ }
+ const std::string &getMemtagMode() const {
+ assert(!MemtagMode.empty());
+ return MemtagMode;
+ }
+
+ bool hasShadowCallStack() const {
+ return Sanitizers.has(SanitizerKind::ShadowCallStack);
+ }
+
bool requiresPIE() const;
bool needsUnwindTables() const;
bool needsLTO() const;
diff --git a/contrib/llvm-project/clang/include/clang/Driver/Tool.h b/contrib/llvm-project/clang/include/clang/Driver/Tool.h
index cc0a09fb2747..42cf99a4a970 100644
--- a/contrib/llvm-project/clang/include/clang/Driver/Tool.h
+++ b/contrib/llvm-project/clang/include/clang/Driver/Tool.h
@@ -52,6 +52,7 @@ public:
const ToolChain &getToolChain() const { return TheToolChain; }
virtual bool hasIntegratedAssembler() const { return false; }
+ virtual bool hasIntegratedBackend() const { return true; }
virtual bool canEmitIR() const { return false; }
virtual bool hasIntegratedCPP() const = 0;
virtual bool isLinkJob() const { return false; }
diff --git a/contrib/llvm-project/clang/include/clang/Driver/ToolChain.h b/contrib/llvm-project/clang/include/clang/Driver/ToolChain.h
index 882ae40086ce..2d0c1f826c17 100644
--- a/contrib/llvm-project/clang/include/clang/Driver/ToolChain.h
+++ b/contrib/llvm-project/clang/include/clang/Driver/ToolChain.h
@@ -9,7 +9,6 @@
#ifndef LLVM_CLANG_DRIVER_TOOLCHAIN_H
#define LLVM_CLANG_DRIVER_TOOLCHAIN_H
-#include "clang/Basic/DebugInfoOptions.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/Sanitizers.h"
@@ -21,14 +20,16 @@
#include "llvm/ADT/FloatingPointMode.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/Triple.h"
+#include "llvm/Frontend/Debug/Options.h"
#include "llvm/MC/MCTargetOptions.h"
#include "llvm/Option/Option.h"
#include "llvm/Support/VersionTuple.h"
#include "llvm/Target/TargetOptions.h"
+#include "llvm/TargetParser/Triple.h"
#include <cassert>
#include <climits>
#include <memory>
+#include <optional>
#include <string>
#include <utility>
@@ -108,11 +109,24 @@ public:
UNW_Libgcc
};
+ enum class UnwindTableLevel {
+ None,
+ Synchronous,
+ Asynchronous,
+ };
+
enum RTTIMode {
RM_Enabled,
RM_Disabled,
};
+ struct BitCodeLibraryInfo {
+ std::string Path;
+ bool ShouldInternalize;
+ BitCodeLibraryInfo(StringRef Path, bool ShouldInternalize = true)
+ : Path(Path), ShouldInternalize(ShouldInternalize) {}
+ };
+
enum FileType { FT_Object, FT_Static, FT_Shared };
private:
@@ -143,7 +157,8 @@ private:
mutable std::unique_ptr<Tool> StaticLibTool;
mutable std::unique_ptr<Tool> IfsMerge;
mutable std::unique_ptr<Tool> OffloadBundler;
- mutable std::unique_ptr<Tool> OffloadWrapper;
+ mutable std::unique_ptr<Tool> OffloadPackager;
+ mutable std::unique_ptr<Tool> LinkerWrapper;
Tool *getClang() const;
Tool *getFlang() const;
@@ -153,9 +168,10 @@ private:
Tool *getIfsMerge() const;
Tool *getClangAs() const;
Tool *getOffloadBundler() const;
- Tool *getOffloadWrapper() const;
+ Tool *getOffloadPackager() const;
+ Tool *getLinkerWrapper() const;
- mutable std::unique_ptr<SanitizerArgs> SanitizerArguments;
+ mutable bool SanitizerArgsChecked = false;
mutable std::unique_ptr<XRayArgs> XRayArguments;
/// The effective clang triple for the current Job.
@@ -166,17 +182,24 @@ private:
EffectiveTriple = std::move(ET);
}
- mutable llvm::Optional<CXXStdlibType> cxxStdlibType;
- mutable llvm::Optional<RuntimeLibType> runtimeLibType;
- mutable llvm::Optional<UnwindLibType> unwindLibType;
+ std::optional<std::string>
+ getFallbackAndroidTargetPath(StringRef BaseDir) const;
+
+ mutable std::optional<CXXStdlibType> cxxStdlibType;
+ mutable std::optional<RuntimeLibType> runtimeLibType;
+ mutable std::optional<UnwindLibType> unwindLibType;
protected:
MultilibSet Multilibs;
- Multilib SelectedMultilib;
+ llvm::SmallVector<Multilib> SelectedMultilibs;
ToolChain(const Driver &D, const llvm::Triple &T,
const llvm::opt::ArgList &Args);
+ /// Executes the given \p Executable and returns the stdout.
+ llvm::Expected<std::unique_ptr<llvm::MemoryBuffer>>
+ executeToolChainProgram(StringRef Executable) const;
+
void setTripleEnvironment(llvm::Triple::EnvironmentType Env);
virtual Tool *buildAssembler() const;
@@ -189,6 +212,11 @@ protected:
FileType Type,
bool AddArch) const;
+ /// Find the target-specific subdirectory for the current target triple under
+ /// \p BaseDir, doing fallback triple searches as necessary.
+ /// \return The subdirectory path if it exists.
+ std::optional<std::string> getTargetSubDirPath(StringRef BaseDir) const;
+
/// \name Utilities for implementing subclasses.
///@{
static void addSystemInclude(const llvm::opt::ArgList &DriverArgs,
@@ -204,6 +232,9 @@ protected:
static void addSystemIncludes(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args,
ArrayRef<StringRef> Paths);
+
+ static std::string concat(StringRef Path, const Twine &A, const Twine &B = "",
+ const Twine &C = "", const Twine &D = "");
///@}
public:
@@ -246,6 +277,10 @@ public:
return EffectiveTriple;
}
+ bool hasEffectiveTriple() const {
+ return !EffectiveTriple.getTriple().empty();
+ }
+
path_list &getLibraryPaths() { return LibraryPaths; }
const path_list &getLibraryPaths() const { return LibraryPaths; }
@@ -257,9 +292,23 @@ public:
const MultilibSet &getMultilibs() const { return Multilibs; }
- const Multilib &getMultilib() const { return SelectedMultilib; }
+ const llvm::SmallVector<Multilib> &getSelectedMultilibs() const {
+ return SelectedMultilibs;
+ }
- const SanitizerArgs& getSanitizerArgs() const;
+ /// Get flags suitable for multilib selection, based on the provided clang
+ /// command line arguments. The command line arguments aren't suitable to be
+ /// used directly for multilib selection because they are not normalized and
+ /// normalization is a complex process. The result of this function is similar
+ /// to clang command line arguments except that the list of arguments is
+ /// incomplete. Only certain command line arguments are processed. If more
+ /// command line arguments are needed for multilib selection then this
+ /// function should be extended.
+ /// To allow users to find out what flags are returned, clang accepts a
+ /// -print-multi-flags-experimental argument.
+ Multilib::flags_list getMultilibFlags(const llvm::opt::ArgList &) const;
+
+ SanitizerArgs getSanitizerArgs(const llvm::opt::ArgList &JobArgs) const;
const XRayArgs& getXRayArgs() const;
@@ -341,10 +390,7 @@ public:
/// is LLD. If it's set, it can be assumed that the linker is LLD built
/// at the same revision as clang, and clang can make assumptions about
/// LLD's supported flags, error output, etc.
- /// If LinkerIsLLDDarwinNew is non-nullptr, it's set if the linker is
- /// the new version in lld/MachO.
- std::string GetLinkerPath(bool *LinkerIsLLD = nullptr,
- bool *LinkerIsLLDDarwinNew = nullptr) const;
+ std::string GetLinkerPath(bool *LinkerIsLLD = nullptr) const;
/// Returns the linker path for emitting a static library.
std::string GetStaticLibToolPath() const;
@@ -375,11 +421,26 @@ public:
/// IsIntegratedAssemblerDefault - Does this tool chain enable -integrated-as
/// by default.
- virtual bool IsIntegratedAssemblerDefault() const { return false; }
+ virtual bool IsIntegratedAssemblerDefault() const { return true; }
+
+ /// IsIntegratedBackendDefault - Does this tool chain enable
+ /// -fintegrated-objemitter by default.
+ virtual bool IsIntegratedBackendDefault() const { return true; }
+
+ /// IsIntegratedBackendSupported - Does this tool chain support
+ /// -fintegrated-objemitter.
+ virtual bool IsIntegratedBackendSupported() const { return true; }
+
+ /// IsNonIntegratedBackendSupported - Does this tool chain support
+ /// -fno-integrated-objemitter.
+ virtual bool IsNonIntegratedBackendSupported() const { return false; }
/// Check if the toolchain should use the integrated assembler.
virtual bool useIntegratedAs() const;
+ /// Check if the toolchain should use the integrated backend.
+ virtual bool useIntegratedBackend() const;
+
/// Check if the toolchain should use AsmParser to parse inlineAsm when
/// integrated assembler is not default.
virtual bool parseInlineAsmUsingAsmParser() const { return false; }
@@ -402,6 +463,9 @@ public:
/// Check whether to enable x86 relax relocations by default.
virtual bool useRelaxRelocations() const;
+ /// Check whether use IEEE binary128 as long double format by default.
+ bool defaultToIEEELongDouble() const;
+
/// GetDefaultStackProtectorLevel - Get the default stack protector level for
/// this tool chain.
virtual LangOptions::StackProtectorMode
@@ -445,15 +509,15 @@ public:
StringRef Component,
FileType Type = ToolChain::FT_Static) const;
- // Returns target specific runtime path if it exists.
- virtual std::string getRuntimePath() const;
+ // Returns the target specific runtime path if it exists.
+ std::optional<std::string> getRuntimePath() const;
// Returns target specific standard library path if it exists.
- virtual std::string getStdlibPath() const;
+ std::optional<std::string> getStdlibPath() const;
- // Returns <ResourceDir>/lib/<OSName>/<arch>. This is used by runtimes (such
- // as OpenMP) to find arch-specific libraries.
- std::string getArchSpecificLibPath() const;
+ // Returns <ResourceDir>/lib/<OSName>/<arch> or <ResourceDir>/lib/<triple>.
+ // This is used by runtimes (such as OpenMP) to find arch-specific libraries.
+ virtual path_list getArchSpecificLibPaths() const;
// Returns <OSname> part of above.
virtual StringRef getOSLibName() const;
@@ -464,9 +528,9 @@ public:
/// Returns true if gcov instrumentation (-fprofile-arcs or --coverage) is on.
static bool needsGCovInstrumentation(const llvm::opt::ArgList &Args);
- /// IsUnwindTablesDefault - Does this tool chain use -funwind-tables
- /// by default.
- virtual bool IsUnwindTablesDefault(const llvm::opt::ArgList &Args) const;
+ /// How detailed should the unwind tables be by default.
+ virtual UnwindTableLevel
+ getDefaultUnwindTableLevel(const llvm::opt::ArgList &Args) const;
/// Test whether this toolchain supports outline atomics by default.
virtual bool
@@ -478,15 +542,12 @@ public:
virtual bool isPICDefault() const = 0;
/// Test whether this toolchain defaults to PIE.
- virtual bool isPIEDefault() const = 0;
-
- /// Test whether this toolchaind defaults to non-executable stacks.
- virtual bool isNoExecStackDefault() const;
+ virtual bool isPIEDefault(const llvm::opt::ArgList &Args) const = 0;
/// Tests whether this toolchain forces its default for PIC, PIE or
/// non-PIC. If this returns true, any PIC related flags should be ignored
- /// and instead the results of \c isPICDefault() and \c isPIEDefault() are
- /// used exclusively.
+ /// and instead the results of \c isPICDefault() and \c isPIEDefault(const
+ /// llvm::opt::ArgList &Args) are used exclusively.
virtual bool isPICDefaultForced() const = 0;
/// SupportsProfiling - Does this tool chain support -pg.
@@ -496,17 +557,20 @@ public:
virtual void CheckObjCARC() const {}
/// Get the default debug info format. Typically, this is DWARF.
- virtual codegenoptions::DebugInfoFormat getDefaultDebugFormat() const {
- return codegenoptions::DIF_DWARF;
+ virtual llvm::codegenoptions::DebugInfoFormat getDefaultDebugFormat() const {
+ return llvm::codegenoptions::DIF_DWARF;
}
/// UseDwarfDebugFlags - Embed the compile options to clang into the Dwarf
/// compile unit information.
virtual bool UseDwarfDebugFlags() const { return false; }
+ /// Add an additional -fdebug-prefix-map entry.
+ virtual std::string GetGlobalDebugPathRemapping() const { return {}; }
+
// Return the DWARF version to emit, in the absence of arguments
// to the contrary.
- virtual unsigned GetDefaultDwarfVersion() const { return 4; }
+ virtual unsigned GetDefaultDwarfVersion() const;
// Some toolchains may have different restrictions on the DWARF version and
// may need to adjust it. E.g. NVPTX may need to enforce DWARF2 even when host
@@ -530,8 +594,9 @@ public:
}
/// Adjust debug information kind considering all passed options.
- virtual void adjustDebugInfoKind(codegenoptions::DebugInfoKind &DebugInfoKind,
- const llvm::opt::ArgList &Args) const {}
+ virtual void
+ adjustDebugInfoKind(llvm::codegenoptions::DebugInfoKind &DebugInfoKind,
+ const llvm::opt::ArgList &Args) const {}
/// GetExceptionModel - Return the tool chain exception model.
virtual llvm::ExceptionHandling
@@ -546,6 +611,9 @@ public:
/// isThreadModelSupported() - Does this target support a thread model?
virtual bool isThreadModelSupported(const StringRef Model) const;
+ /// isBareMetal - Is this a bare metal target.
+ virtual bool isBareMetal() const { return false; }
+
virtual std::string getMultiarchTriple(const Driver &D,
const llvm::Triple &TargetTriple,
StringRef SysRoot) const {
@@ -597,6 +665,11 @@ public:
llvm::opt::ArgStringList &CC1Args,
Action::OffloadKind DeviceOffloadKind) const;
+ /// Add options that need to be passed to cc1as for this target.
+ virtual void
+ addClangCC1ASTargetOptions(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CC1ASArgs) const;
+
/// Add warning options that need to be passed to cc1 for this target.
virtual void addClangWarningOptions(llvm::opt::ArgStringList &CC1Args) const;
@@ -659,6 +732,10 @@ public:
bool addFastMathRuntimeIfAvailable(
const llvm::opt::ArgList &Args, llvm::opt::ArgStringList &CmdArgs) const;
+ /// getSystemGPUArchs - Use a tool to detect the user's availible GPUs.
+ virtual Expected<SmallVector<std::string>>
+ getSystemGPUArchs(const llvm::opt::ArgList &Args) const;
+
/// addProfileRTLibs - When -fprofile-instr-profile is specified, try to pass
/// a suitable profile runtime library to the linker.
virtual void addProfileRTLibs(const llvm::opt::ArgList &Args,
@@ -680,9 +757,14 @@ public:
virtual VersionTuple computeMSVCVersion(const Driver *D,
const llvm::opt::ArgList &Args) const;
- /// Get paths of HIP device libraries.
- virtual llvm::SmallVector<std::string, 12>
- getHIPDeviceLibs(const llvm::opt::ArgList &Args) const;
+ /// Get paths for device libraries.
+ virtual llvm::SmallVector<BitCodeLibraryInfo, 12>
+ getDeviceLibs(const llvm::opt::ArgList &Args) const;
+
+ /// Add the system specific linker arguments to use
+ /// for the given HIP runtime library type.
+ virtual void AddHIPRuntimeLibArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const {}
/// Return sanitizers which are available in this toolchain.
virtual SanitizerMask getSupportedSanitizers() const;
@@ -704,6 +786,22 @@ public:
const llvm::fltSemantics *FPType = nullptr) const {
return llvm::DenormalMode::getIEEE();
}
+
+ // We want to expand the shortened versions of the triples passed in to
+ // the values used for the bitcode libraries.
+ static llvm::Triple getOpenMPTriple(StringRef TripleStr) {
+ llvm::Triple TT(TripleStr);
+ if (TT.getVendor() == llvm::Triple::UnknownVendor ||
+ TT.getOS() == llvm::Triple::UnknownOS) {
+ if (TT.getArch() == llvm::Triple::nvptx)
+ return llvm::Triple("nvptx-nvidia-cuda");
+ if (TT.getArch() == llvm::Triple::nvptx64)
+ return llvm::Triple("nvptx64-nvidia-cuda");
+ if (TT.getArch() == llvm::Triple::amdgcn)
+ return llvm::Triple("amdgcn-amd-amdhsa");
+ }
+ return TT;
+ }
};
/// Set a ToolChain's effective triple. Reset it when the registration object
diff --git a/contrib/llvm-project/clang/include/clang/Driver/Types.def b/contrib/llvm-project/clang/include/clang/Driver/Types.def
index 997eea445c22..f72c27e1ee70 100644
--- a/contrib/llvm-project/clang/include/clang/Driver/Types.def
+++ b/contrib/llvm-project/clang/include/clang/Driver/Types.def
@@ -37,14 +37,16 @@
// C family source language (with and without preprocessing).
TYPE("cpp-output", PP_C, INVALID, "i", phases::Compile, phases::Backend, phases::Assemble, phases::Link)
TYPE("c", C, PP_C, "c", phases::Preprocess, phases::Compile, phases::Backend, phases::Assemble, phases::Link)
-TYPE("cl", CL, PP_C, "cl", phases::Preprocess, phases::Compile, phases::Backend, phases::Assemble, phases::Link)
-TYPE("clcpp", CLCXX, PP_CXX, "clcpp", phases::Preprocess, phases::Compile, phases::Backend, phases::Assemble, phases::Link)
+TYPE("cl", CL, PP_CL, "cl", phases::Preprocess, phases::Compile, phases::Backend, phases::Assemble, phases::Link)
+TYPE("cl-cpp-output", PP_CL, INVALID, "cli", phases::Compile, phases::Backend, phases::Assemble, phases::Link)
+TYPE("clcpp", CLCXX, PP_CLCXX, "clcpp", phases::Preprocess, phases::Compile, phases::Backend, phases::Assemble, phases::Link)
+TYPE("clcpp-cpp-output", PP_CLCXX, INVALID, "clii", phases::Compile, phases::Backend, phases::Assemble, phases::Link)
TYPE("cuda-cpp-output", PP_CUDA, INVALID, "cui", phases::Compile, phases::Backend, phases::Assemble, phases::Link)
TYPE("cuda", CUDA, PP_CUDA, "cu", phases::Preprocess, phases::Compile, phases::Backend, phases::Assemble, phases::Link)
TYPE("cuda", CUDA_DEVICE, PP_CUDA, "cu", phases::Preprocess, phases::Compile, phases::Backend, phases::Assemble, phases::Link)
-TYPE("hip-cpp-output", PP_HIP, INVALID, "cui", phases::Compile, phases::Backend, phases::Assemble, phases::Link)
-TYPE("hip", HIP, PP_HIP, "cu", phases::Preprocess, phases::Compile, phases::Backend, phases::Assemble, phases::Link)
-TYPE("hip", HIP_DEVICE, PP_HIP, "cu", phases::Preprocess, phases::Compile, phases::Backend, phases::Assemble, phases::Link)
+TYPE("hip-cpp-output", PP_HIP, INVALID, "hipi", phases::Compile, phases::Backend, phases::Assemble, phases::Link)
+TYPE("hip", HIP, PP_HIP, "hip", phases::Preprocess, phases::Compile, phases::Backend, phases::Assemble, phases::Link)
+TYPE("hip", HIP_DEVICE, PP_HIP, "hip", phases::Preprocess, phases::Compile, phases::Backend, phases::Assemble, phases::Link)
TYPE("objective-c-cpp-output", PP_ObjC, INVALID, "mi", phases::Compile, phases::Backend, phases::Assemble, phases::Link)
TYPE("objc-cpp-output", PP_ObjC_Alias, INVALID, "mi", phases::Compile, phases::Backend, phases::Assemble, phases::Link)
TYPE("objective-c", ObjC, PP_ObjC, "m", phases::Preprocess, phases::Compile, phases::Backend, phases::Assemble, phases::Link)
@@ -54,6 +56,7 @@ TYPE("objective-c++-cpp-output", PP_ObjCXX, INVALID, "mii", phases
TYPE("objc++-cpp-output", PP_ObjCXX_Alias, INVALID, "mii", phases::Compile, phases::Backend, phases::Assemble, phases::Link)
TYPE("objective-c++", ObjCXX, PP_ObjCXX, "mm", phases::Preprocess, phases::Compile, phases::Backend, phases::Assemble, phases::Link)
TYPE("renderscript", RenderScript, PP_C, "rs", phases::Preprocess, phases::Compile, phases::Backend, phases::Assemble, phases::Link)
+TYPE("hlsl", HLSL, PP_CXX, "hlsl", phases::Preprocess, phases::Compile, phases::Backend, phases::Assemble)
// C family input files to precompile.
TYPE("c-header-cpp-output", PP_CHeader, INVALID, "i", phases::Precompile)
@@ -63,7 +66,11 @@ TYPE("objective-c-header-cpp-output", PP_ObjCHeader, INVALID, "mi", phases
TYPE("objective-c-header", ObjCHeader, PP_ObjCHeader, "h", phases::Preprocess, phases::Precompile)
TYPE("c++-header-cpp-output", PP_CXXHeader, INVALID, "ii", phases::Precompile)
TYPE("c++-header", CXXHeader, PP_CXXHeader, "hh", phases::Preprocess, phases::Precompile)
-TYPE("objective-c++-header-cpp-output", PP_ObjCXXHeader, INVALID, "mii", phases::Precompile)
+TYPE("c++-header-unit-cpp-output", PP_CXXHeaderUnit,INVALID, "iih", phases::Precompile)
+TYPE("c++-header-unit-header", CXXHUHeader, PP_CXXHeaderUnit,"hh", phases::Preprocess, phases::Precompile)
+TYPE("c++-system-header", CXXSHeader, PP_CXXHeaderUnit,"hh", phases::Preprocess, phases::Precompile)
+TYPE("c++-user-header", CXXUHeader, PP_CXXHeaderUnit,"hh", phases::Preprocess, phases::Precompile)
+TYPE("objective-c++-header-cpp-output", PP_ObjCXXHeader, INVALID,"mii", phases::Precompile)
TYPE("objective-c++-header", ObjCXXHeader, PP_ObjCXXHeader, "h", phases::Preprocess, phases::Precompile)
TYPE("c++-module", CXXModule, PP_CXXModule, "cppm", phases::Preprocess, phases::Precompile, phases::Compile, phases::Backend, phases::Assemble, phases::Link)
TYPE("c++-module-cpp-output", PP_CXXModule, INVALID, "iim", phases::Precompile, phases::Compile, phases::Backend, phases::Assemble, phases::Link)
@@ -72,7 +79,7 @@ TYPE("c++-module-cpp-output", PP_CXXModule, INVALID, "iim", phases
TYPE("ada", Ada, INVALID, nullptr, phases::Compile, phases::Backend, phases::Assemble, phases::Link)
TYPE("assembler", PP_Asm, INVALID, "s", phases::Assemble, phases::Link)
TYPE("assembler-with-cpp", Asm, PP_Asm, "S", phases::Preprocess, phases::Assemble, phases::Link)
-TYPE("f95", PP_Fortran, INVALID, nullptr, phases::Compile, phases::Backend, phases::Assemble, phases::Link)
+TYPE("f95", PP_Fortran, INVALID, "i", phases::Compile, phases::Backend, phases::Assemble, phases::Link)
TYPE("f95-cpp-input", Fortran, PP_Fortran, nullptr, phases::Preprocess, phases::Compile, phases::Backend, phases::Assemble, phases::Link)
TYPE("java", Java, INVALID, nullptr, phases::Compile, phases::Backend, phases::Assemble, phases::Link)
@@ -88,11 +95,12 @@ TYPE("ast", AST, INVALID, "ast", phases
TYPE("ifs", IFS, INVALID, "ifs", phases::IfsMerge)
TYPE("ifs-cpp", IFS_CPP, INVALID, "ifs", phases::Compile, phases::IfsMerge)
TYPE("pcm", ModuleFile, INVALID, "pcm", phases::Compile, phases::Backend, phases::Assemble, phases::Link)
+TYPE("header-unit", HeaderUnit, INVALID, "pcm", phases::Compile, phases::Backend, phases::Assemble, phases::Link)
TYPE("plist", Plist, INVALID, "plist", phases::Compile, phases::Backend, phases::Assemble, phases::Link)
TYPE("rewritten-objc", RewrittenObjC,INVALID, "cpp", phases::Compile, phases::Backend, phases::Assemble, phases::Link)
TYPE("rewritten-legacy-objc", RewrittenLegacyObjC,INVALID, "cpp", phases::Compile, phases::Backend, phases::Assemble, phases::Link)
TYPE("remap", Remap, INVALID, "remap", phases::Compile, phases::Backend, phases::Assemble, phases::Link)
-TYPE("precompiled-header", PCH, INVALID, "gch", phases::Compile, phases::Backend, phases::Assemble, phases::Link)
+TYPE("precompiled-header", PCH, INVALID, "pch", phases::Compile, phases::Backend, phases::Assemble, phases::Link)
TYPE("object", Object, INVALID, "o", phases::Link)
TYPE("treelang", Treelang, INVALID, nullptr, phases::Compile, phases::Backend, phases::Assemble, phases::Link)
TYPE("image", Image, INVALID, "out", phases::Compile, phases::Backend, phases::Assemble, phases::Link)
@@ -100,4 +108,6 @@ TYPE("dSYM", dSYM, INVALID, "dSYM", phases
TYPE("dependencies", Dependencies, INVALID, "d", phases::Compile, phases::Backend, phases::Assemble, phases::Link)
TYPE("cuda-fatbin", CUDA_FATBIN, INVALID, "fatbin", phases::Compile, phases::Backend, phases::Assemble, phases::Link)
TYPE("hip-fatbin", HIP_FATBIN, INVALID, "hipfb", phases::Compile, phases::Backend, phases::Assemble, phases::Link)
+TYPE("api-information", API_INFO, INVALID, "json", phases::Precompile)
+TYPE("dx-container", DX_CONTAINER, INVALID, "dxo", phases::Compile, phases::Backend)
TYPE("none", Nothing, INVALID, nullptr, phases::Compile, phases::Backend, phases::Assemble, phases::Link)
diff --git a/contrib/llvm-project/clang/include/clang/Driver/Types.h b/contrib/llvm-project/clang/include/clang/Driver/Types.h
index 6a1f57416ae5..121b58a6b477 100644
--- a/contrib/llvm-project/clang/include/clang/Driver/Types.h
+++ b/contrib/llvm-project/clang/include/clang/Driver/Types.h
@@ -43,7 +43,7 @@ namespace types {
/// getTypeTempSuffix - Return the suffix to use when creating a
/// temp file of this type, or null if unspecified.
- const char *getTypeTempSuffix(ID Id, bool CLMode = false);
+ const char *getTypeTempSuffix(ID Id, bool CLStyle = false);
/// onlyPrecompileType - Should this type only be precompiled.
bool onlyPrecompileType(ID Id);
@@ -66,6 +66,17 @@ namespace types {
/// isAcceptedByClang - Can clang handle this input type.
bool isAcceptedByClang(ID Id);
+ /// isAcceptedByFlang - Can flang handle this input type.
+ bool isAcceptedByFlang(ID Id);
+
+ /// isDerivedFromC - Is the input derived from C.
+ ///
+ /// That is, does the lexer follow the rules of
+ /// TokenConcatenation::AvoidConcat. If this is the case, the preprocessor may
+ /// add and remove whitespace between tokens. Used to determine whether the
+ /// input can be processed by -fminimize-whitespace.
+ bool isDerivedFromC(ID Id);
+
/// isCXX - Is this a "C++" input (C++ and Obj-C++ sources and headers).
bool isCXX(ID Id);
@@ -84,8 +95,8 @@ namespace types {
/// isOpenCL - Is this an "OpenCL" input.
bool isOpenCL(ID Id);
- /// isFortran - Is this a Fortran input.
- bool isFortran(ID Id);
+ /// isHLSL - Is this an HLSL input.
+ bool isHLSL(ID Id);
/// isSrcFile - Is this a source file, i.e. something that still has to be
/// preprocessed. The logic behind this is the same that decides if the first
@@ -103,7 +114,7 @@ namespace types {
/// getCompilationPhases - Get the list of compilation phases ('Phases') to be
/// done for type 'Id' up until including LastPhase.
llvm::SmallVector<phases::ID, phases::MaxNumberOfPhases>
- getCompilationPhases(ID Id, phases::ID LastPhase = phases::LastPhase);
+ getCompilationPhases(ID Id, phases::ID LastPhase = phases::IfsMerge);
llvm::SmallVector<phases::ID, phases::MaxNumberOfPhases>
getCompilationPhases(const clang::driver::Driver &Driver,
llvm::opt::DerivedArgList &DAL, ID Id);
diff --git a/contrib/llvm-project/clang/include/clang/Driver/Util.h b/contrib/llvm-project/clang/include/clang/Driver/Util.h
index 6788420912a1..92d3d40433a3 100644
--- a/contrib/llvm-project/clang/include/clang/Driver/Util.h
+++ b/contrib/llvm-project/clang/include/clang/Driver/Util.h
@@ -13,7 +13,6 @@
#include "llvm/ADT/DenseMap.h"
namespace clang {
-class DiagnosticsEngine;
namespace driver {
class Action;
diff --git a/contrib/llvm-project/clang/include/clang/Driver/XRayArgs.h b/contrib/llvm-project/clang/include/clang/Driver/XRayArgs.h
index 6ed99a127669..bdd3d979547e 100644
--- a/contrib/llvm-project/clang/include/clang/Driver/XRayArgs.h
+++ b/contrib/llvm-project/clang/include/clang/Driver/XRayArgs.h
@@ -25,15 +25,8 @@ class XRayArgs {
std::vector<std::string> ExtraDeps;
std::vector<std::string> Modes;
XRayInstrSet InstrumentationBundle;
- bool XRayInstrument = false;
- int InstructionThreshold = 200;
- bool XRayAlwaysEmitCustomEvents = false;
- bool XRayAlwaysEmitTypedEvents = false;
+ llvm::opt::Arg *XRayInstrument = nullptr;
bool XRayRT = true;
- bool XRayIgnoreLoops = false;
- bool XRayFunctionIndex;
- int XRayFunctionGroups = 1;
- int XRaySelectedFunctionGroup = 0;
public:
/// Parses the XRay arguments from an argument list.
diff --git a/contrib/llvm-project/clang/include/clang/ExtractAPI/API.h b/contrib/llvm-project/clang/include/clang/ExtractAPI/API.h
new file mode 100644
index 000000000000..0a0f1bd1e95f
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/ExtractAPI/API.h
@@ -0,0 +1,1675 @@
+//===- ExtractAPI/API.h -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines the APIRecord-based structs and the APISet class.
+///
+/// Clang ExtractAPI is a tool to collect API information from a given set of
+/// header files. The structures in this file describe data representations of
+/// the API information collected for various kinds of symbols.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_EXTRACTAPI_API_H
+#define LLVM_CLANG_EXTRACTAPI_API_H
+
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/RawCommentList.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/Specifiers.h"
+#include "clang/ExtractAPI/AvailabilityInfo.h"
+#include "clang/ExtractAPI/DeclarationFragments.h"
+#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/TargetParser/Triple.h"
+#include <memory>
+#include <type_traits>
+
+namespace clang {
+namespace extractapi {
+
+class Template {
+ struct TemplateParameter {
+ // "class", "typename", or concept name
+ std::string Type;
+ std::string Name;
+ unsigned int Index;
+ unsigned int Depth;
+ bool IsParameterPack;
+
+ TemplateParameter(std::string Type, std::string Name, unsigned int Index,
+ unsigned int Depth, bool IsParameterPack)
+ : Type(Type), Name(Name), Index(Index), Depth(Depth),
+ IsParameterPack(IsParameterPack) {}
+ };
+
+ struct TemplateConstraint {
+ // type name of the constraint, if it has one
+ std::string Type;
+ std::string Kind;
+ std::string LHS, RHS;
+ };
+ llvm::SmallVector<TemplateParameter> Parameters;
+ llvm::SmallVector<TemplateConstraint> Constraints;
+
+public:
+ Template() = default;
+
+ Template(const TemplateDecl *Decl) {
+ for (auto *const Parameter : *Decl->getTemplateParameters()) {
+ const auto *Param = dyn_cast<TemplateTypeParmDecl>(Parameter);
+ if (!Param) // some params are null
+ continue;
+ std::string Type;
+ if (Param->hasTypeConstraint())
+ Type = Param->getTypeConstraint()->getNamedConcept()->getName().str();
+ else if (Param->wasDeclaredWithTypename())
+ Type = "typename";
+ else
+ Type = "class";
+
+ addTemplateParameter(Type, Param->getName().str(), Param->getIndex(),
+ Param->getDepth(), Param->isParameterPack());
+ }
+ }
+
+ Template(const ClassTemplatePartialSpecializationDecl *Decl) {
+ for (auto *const Parameter : *Decl->getTemplateParameters()) {
+ const auto *Param = dyn_cast<TemplateTypeParmDecl>(Parameter);
+ if (!Param) // some params are null
+ continue;
+ std::string Type;
+ if (Param->hasTypeConstraint())
+ Type = Param->getTypeConstraint()->getNamedConcept()->getName().str();
+ else if (Param->wasDeclaredWithTypename())
+ Type = "typename";
+ else
+ Type = "class";
+
+ addTemplateParameter(Type, Param->getName().str(), Param->getIndex(),
+ Param->getDepth(), Param->isParameterPack());
+ }
+ }
+
+ Template(const VarTemplatePartialSpecializationDecl *Decl) {
+ for (auto *const Parameter : *Decl->getTemplateParameters()) {
+ const auto *Param = dyn_cast<TemplateTypeParmDecl>(Parameter);
+ if (!Param) // some params are null
+ continue;
+ std::string Type;
+ if (Param->hasTypeConstraint())
+ Type = Param->getTypeConstraint()->getNamedConcept()->getName().str();
+ else if (Param->wasDeclaredWithTypename())
+ Type = "typename";
+ else
+ Type = "class";
+
+ addTemplateParameter(Type, Param->getName().str(), Param->getIndex(),
+ Param->getDepth(), Param->isParameterPack());
+ }
+ }
+
+ const llvm::SmallVector<TemplateParameter> &getParameters() const {
+ return Parameters;
+ }
+
+ const llvm::SmallVector<TemplateConstraint> &getConstraints() const {
+ return Constraints;
+ }
+
+ void addTemplateParameter(std::string Type, std::string Name,
+ unsigned int Index, unsigned int Depth,
+ bool IsParameterPack) {
+ Parameters.emplace_back(Type, Name, Index, Depth, IsParameterPack);
+ }
+
+ bool empty() const { return Parameters.empty() && Constraints.empty(); }
+};
+
+/// DocComment is a vector of RawComment::CommentLine.
+///
+/// Each line represents one line of striped documentation comment,
+/// with source range information. This simplifies calculating the source
+/// location of a character in the doc comment for pointing back to the source
+/// file.
+/// e.g.
+/// \code
+/// /// This is a documentation comment
+/// ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' First line.
+/// /// with multiple lines.
+/// ^~~~~~~~~~~~~~~~~~~~~~~' Second line.
+/// \endcode
+using DocComment = std::vector<RawComment::CommentLine>;
+
+// Classes deriving from APIRecord need to have USR be the first constructor
+// argument. This is so that they are compatible with `addTopLevelRecord`
+// defined in API.cpp
+/// The base representation of an API record. Holds common symbol information.
+struct APIRecord {
+ /// Discriminator for LLVM-style RTTI (dyn_cast<> et al.)
+ enum RecordKind {
+ RK_Unknown,
+ RK_Namespace,
+ RK_GlobalFunction,
+ RK_GlobalFunctionTemplate,
+ RK_GlobalFunctionTemplateSpecialization,
+ RK_GlobalVariable,
+ RK_GlobalVariableTemplate,
+ RK_GlobalVariableTemplateSpecialization,
+ RK_GlobalVariableTemplatePartialSpecialization,
+ RK_EnumConstant,
+ RK_Enum,
+ RK_StructField,
+ RK_Struct,
+ RK_UnionField,
+ RK_Union,
+ RK_StaticField,
+ RK_CXXField,
+ RK_CXXFieldTemplate,
+ RK_CXXClass,
+ RK_ClassTemplate,
+ RK_ClassTemplateSpecialization,
+ RK_ClassTemplatePartialSpecialization,
+ RK_Concept,
+ RK_CXXStaticMethod,
+ RK_CXXInstanceMethod,
+ RK_CXXConstructorMethod,
+ RK_CXXDestructorMethod,
+ RK_CXXMethodTemplate,
+ RK_CXXMethodTemplateSpecialization,
+ RK_ObjCInstanceProperty,
+ RK_ObjCClassProperty,
+ RK_ObjCIvar,
+ RK_ObjCClassMethod,
+ RK_ObjCInstanceMethod,
+ RK_ObjCInterface,
+ RK_ObjCCategory,
+ RK_ObjCCategoryModule,
+ RK_ObjCProtocol,
+ RK_MacroDefinition,
+ RK_Typedef,
+ };
+
+ /// Stores information about the context of the declaration of this API.
+ /// This is roughly analogous to the DeclContext hierarchy for an AST Node.
+ struct HierarchyInformation {
+ /// The USR of the parent API.
+ StringRef ParentUSR;
+ /// The name of the parent API.
+ StringRef ParentName;
+ /// The record kind of the parent API.
+ RecordKind ParentKind = RK_Unknown;
+ /// A pointer to the parent APIRecord if known.
+ APIRecord *ParentRecord = nullptr;
+
+ HierarchyInformation() = default;
+ HierarchyInformation(StringRef ParentUSR, StringRef ParentName,
+ RecordKind Kind, APIRecord *ParentRecord = nullptr)
+ : ParentUSR(ParentUSR), ParentName(ParentName), ParentKind(Kind),
+ ParentRecord(ParentRecord) {}
+
+ bool empty() const {
+ return ParentUSR.empty() && ParentName.empty() &&
+ ParentKind == RK_Unknown && ParentRecord == nullptr;
+ }
+ };
+
+ StringRef USR;
+ StringRef Name;
+ PresumedLoc Location;
+ AvailabilityInfo Availability;
+ LinkageInfo Linkage;
+
+ /// Documentation comment lines attached to this symbol declaration.
+ DocComment Comment;
+
+ /// Declaration fragments of this symbol declaration.
+ DeclarationFragments Declaration;
+
+ /// SubHeading provides a more detailed representation than the plain
+ /// declaration name.
+ ///
+ /// SubHeading is an array of declaration fragments of tagged declaration
+ /// name, with potentially more tokens (for example the \c +/- symbol for
+ /// Objective-C class/instance methods).
+ DeclarationFragments SubHeading;
+
+ /// Information about the parent record of this record.
+ HierarchyInformation ParentInformation;
+
+ /// Whether the symbol was defined in a system header.
+ bool IsFromSystemHeader;
+
+private:
+ const RecordKind Kind;
+
+public:
+ RecordKind getKind() const { return Kind; }
+
+ APIRecord() = delete;
+
+ APIRecord(RecordKind Kind, StringRef USR, StringRef Name,
+ PresumedLoc Location, AvailabilityInfo Availability,
+ LinkageInfo Linkage, const DocComment &Comment,
+ DeclarationFragments Declaration, DeclarationFragments SubHeading,
+ bool IsFromSystemHeader)
+ : USR(USR), Name(Name), Location(Location),
+ Availability(std::move(Availability)), Linkage(Linkage),
+ Comment(Comment), Declaration(Declaration), SubHeading(SubHeading),
+ IsFromSystemHeader(IsFromSystemHeader), Kind(Kind) {}
+
+ APIRecord(RecordKind Kind, StringRef USR, StringRef Name)
+ : USR(USR), Name(Name), Kind(Kind) {}
+
+ // Pure virtual destructor to make APIRecord abstract
+ virtual ~APIRecord() = 0;
+};
+
+struct NamespaceRecord : APIRecord {
+ NamespaceRecord(StringRef USR, StringRef Name, PresumedLoc Loc,
+ AvailabilityInfo Availability, LinkageInfo Linkage,
+ const DocComment &Comment, DeclarationFragments Declaration,
+ DeclarationFragments SubHeading, bool IsFromSystemHeader)
+ : APIRecord(RK_Namespace, USR, Name, Loc, std::move(Availability),
+ Linkage, Comment, Declaration, SubHeading,
+ IsFromSystemHeader) {}
+
+ static bool classof(const APIRecord *Record) {
+ return Record->getKind() == RK_Namespace;
+ }
+};
+
+/// This holds information associated with global functions.
+struct GlobalFunctionRecord : APIRecord {
+ FunctionSignature Signature;
+
+ GlobalFunctionRecord(StringRef USR, StringRef Name, PresumedLoc Loc,
+ AvailabilityInfo Availability, LinkageInfo Linkage,
+ const DocComment &Comment,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading,
+ FunctionSignature Signature, bool IsFromSystemHeader)
+ : APIRecord(RK_GlobalFunction, USR, Name, Loc, std::move(Availability),
+ Linkage, Comment, Declaration, SubHeading,
+ IsFromSystemHeader),
+ Signature(Signature) {}
+
+ GlobalFunctionRecord(RecordKind Kind, StringRef USR, StringRef Name,
+ PresumedLoc Loc, AvailabilityInfo Availability,
+ LinkageInfo Linkage, const DocComment &Comment,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading,
+ FunctionSignature Signature, bool IsFromSystemHeader)
+ : APIRecord(Kind, USR, Name, Loc, std::move(Availability), Linkage,
+ Comment, Declaration, SubHeading, IsFromSystemHeader),
+ Signature(Signature) {}
+
+ static bool classof(const APIRecord *Record) {
+ return Record->getKind() == RK_GlobalFunction;
+ }
+
+private:
+ virtual void anchor();
+};
+
+struct GlobalFunctionTemplateRecord : GlobalFunctionRecord {
+ Template Templ;
+
+ GlobalFunctionTemplateRecord(StringRef USR, StringRef Name, PresumedLoc Loc,
+ AvailabilityInfo Availability,
+ LinkageInfo Linkage, const DocComment &Comment,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading,
+ FunctionSignature Signature, Template Template,
+ bool IsFromSystemHeader)
+ : GlobalFunctionRecord(RK_GlobalFunctionTemplate, USR, Name, Loc,
+ std::move(Availability), Linkage, Comment,
+ Declaration, SubHeading, Signature,
+ IsFromSystemHeader),
+ Templ(Template) {}
+
+ static bool classof(const APIRecord *Record) {
+ return Record->getKind() == RK_GlobalFunctionTemplate;
+ }
+};
+
+struct GlobalFunctionTemplateSpecializationRecord : GlobalFunctionRecord {
+ GlobalFunctionTemplateSpecializationRecord(
+ StringRef USR, StringRef Name, PresumedLoc Loc,
+ AvailabilityInfo Availability, LinkageInfo Linkage,
+ const DocComment &Comment, DeclarationFragments Declaration,
+ DeclarationFragments SubHeading, FunctionSignature Signature,
+ bool IsFromSystemHeader)
+ : GlobalFunctionRecord(RK_GlobalFunctionTemplateSpecialization, USR, Name,
+ Loc, std::move(Availability), Linkage, Comment,
+ Declaration, SubHeading, Signature,
+ IsFromSystemHeader) {}
+
+ static bool classof(const APIRecord *Record) {
+ return Record->getKind() == RK_GlobalFunctionTemplateSpecialization;
+ }
+};
+
+/// This holds information associated with global functions.
+struct GlobalVariableRecord : APIRecord {
+ GlobalVariableRecord(StringRef USR, StringRef Name, PresumedLoc Loc,
+ AvailabilityInfo Availability, LinkageInfo Linkage,
+ const DocComment &Comment,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading, bool IsFromSystemHeader)
+ : APIRecord(RK_GlobalVariable, USR, Name, Loc, std::move(Availability),
+ Linkage, Comment, Declaration, SubHeading,
+ IsFromSystemHeader) {}
+
+ GlobalVariableRecord(RecordKind Kind, StringRef USR, StringRef Name,
+ PresumedLoc Loc, AvailabilityInfo Availability,
+ LinkageInfo Linkage, const DocComment &Comment,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading, bool IsFromSystemHeader)
+ : APIRecord(Kind, USR, Name, Loc, std::move(Availability), Linkage,
+ Comment, Declaration, SubHeading, IsFromSystemHeader) {}
+
+ static bool classof(const APIRecord *Record) {
+ return Record->getKind() == RK_GlobalVariable;
+ }
+
+private:
+ virtual void anchor();
+};
+
+struct GlobalVariableTemplateRecord : GlobalVariableRecord {
+ Template Templ;
+
+ GlobalVariableTemplateRecord(StringRef USR, StringRef Name, PresumedLoc Loc,
+ AvailabilityInfo Availability,
+ LinkageInfo Linkage, const DocComment &Comment,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading,
+ class Template Template, bool IsFromSystemHeader)
+ : GlobalVariableRecord(RK_GlobalVariableTemplate, USR, Name, Loc,
+ std::move(Availability), Linkage, Comment,
+ Declaration, SubHeading, IsFromSystemHeader),
+ Templ(Template) {}
+
+ static bool classof(const APIRecord *Record) {
+ return Record->getKind() == RK_GlobalVariableTemplate;
+ }
+};
+
+struct GlobalVariableTemplateSpecializationRecord : GlobalVariableRecord {
+ GlobalVariableTemplateSpecializationRecord(
+ StringRef USR, StringRef Name, PresumedLoc Loc,
+ AvailabilityInfo Availability, LinkageInfo Linkage,
+ const DocComment &Comment, DeclarationFragments Declaration,
+ DeclarationFragments SubHeading, bool IsFromSystemHeader)
+ : GlobalVariableRecord(RK_GlobalVariableTemplateSpecialization, USR, Name,
+ Loc, std::move(Availability), Linkage, Comment,
+ Declaration, SubHeading, IsFromSystemHeader) {}
+
+ static bool classof(const APIRecord *Record) {
+ return Record->getKind() == RK_GlobalVariableTemplateSpecialization;
+ }
+};
+
+struct GlobalVariableTemplatePartialSpecializationRecord
+ : GlobalVariableRecord {
+ Template Templ;
+
+ GlobalVariableTemplatePartialSpecializationRecord(
+ StringRef USR, StringRef Name, PresumedLoc Loc,
+ AvailabilityInfo Availability, LinkageInfo Linkage,
+ const DocComment &Comment, DeclarationFragments Declaration,
+ DeclarationFragments SubHeading, class Template Template,
+ bool IsFromSystemHeader)
+ : GlobalVariableRecord(RK_GlobalVariableTemplatePartialSpecialization,
+ USR, Name, Loc, std::move(Availability), Linkage,
+ Comment, Declaration, SubHeading,
+ IsFromSystemHeader),
+ Templ(Template) {}
+
+ static bool classof(const APIRecord *Record) {
+ return Record->getKind() == RK_GlobalVariableTemplatePartialSpecialization;
+ }
+};
+
+/// This holds information associated with enum constants.
+struct EnumConstantRecord : APIRecord {
+ EnumConstantRecord(StringRef USR, StringRef Name, PresumedLoc Loc,
+ AvailabilityInfo Availability, const DocComment &Comment,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading, bool IsFromSystemHeader)
+ : APIRecord(RK_EnumConstant, USR, Name, Loc, std::move(Availability),
+ LinkageInfo::none(), Comment, Declaration, SubHeading,
+ IsFromSystemHeader) {}
+
+ static bool classof(const APIRecord *Record) {
+ return Record->getKind() == RK_EnumConstant;
+ }
+
+private:
+ virtual void anchor();
+};
+
+/// This holds information associated with enums.
+struct EnumRecord : APIRecord {
+ SmallVector<std::unique_ptr<EnumConstantRecord>> Constants;
+
+ EnumRecord(StringRef USR, StringRef Name, PresumedLoc Loc,
+ AvailabilityInfo Availability, const DocComment &Comment,
+ DeclarationFragments Declaration, DeclarationFragments SubHeading,
+ bool IsFromSystemHeader)
+ : APIRecord(RK_Enum, USR, Name, Loc, std::move(Availability),
+ LinkageInfo::none(), Comment, Declaration, SubHeading,
+ IsFromSystemHeader) {}
+
+ static bool classof(const APIRecord *Record) {
+ return Record->getKind() == RK_Enum;
+ }
+
+private:
+ virtual void anchor();
+};
+
+/// This holds information associated with struct fields.
+struct RecordFieldRecord : APIRecord {
+ RecordFieldRecord(StringRef USR, StringRef Name, PresumedLoc Loc,
+ AvailabilityInfo Availability, const DocComment &Comment,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading, RecordKind Kind,
+ bool IsFromSystemHeader)
+ : APIRecord(Kind, USR, Name, Loc, std::move(Availability),
+ LinkageInfo::none(), Comment, Declaration, SubHeading,
+ IsFromSystemHeader) {}
+
+ static bool classof(const APIRecord *Record) {
+ return Record->getKind() == RK_StructField ||
+ Record->getKind() == RK_UnionField;
+ }
+
+private:
+ virtual void anchor();
+};
+
+/// This holds information associated with structs.
+struct RecordRecord : APIRecord {
+ SmallVector<std::unique_ptr<RecordFieldRecord>> Fields;
+
+ RecordRecord(StringRef USR, StringRef Name, PresumedLoc Loc,
+ AvailabilityInfo Availability, const DocComment &Comment,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading, RecordKind Kind,
+ bool IsFromSystemHeader)
+ : APIRecord(Kind, USR, Name, Loc, std::move(Availability),
+ LinkageInfo::none(), Comment, Declaration, SubHeading,
+ IsFromSystemHeader) {}
+
+ static bool classof(const APIRecord *Record) {
+ return Record->getKind() == RK_Struct || Record->getKind() == RK_Union;
+ }
+
+private:
+ virtual void anchor();
+};
+
+struct CXXFieldRecord : APIRecord {
+ AccessControl Access;
+
+ CXXFieldRecord(StringRef USR, StringRef Name, PresumedLoc Loc,
+ AvailabilityInfo Availability, const DocComment &Comment,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading, AccessControl Access,
+ bool IsFromSystemHeader)
+ : APIRecord(RK_CXXField, USR, Name, Loc, std::move(Availability),
+ LinkageInfo::none(), Comment, Declaration, SubHeading,
+ IsFromSystemHeader),
+ Access(Access) {}
+
+ CXXFieldRecord(RecordKind Kind, StringRef USR, StringRef Name,
+ PresumedLoc Loc, AvailabilityInfo Availability,
+ const DocComment &Comment, DeclarationFragments Declaration,
+ DeclarationFragments SubHeading, AccessControl Access,
+ bool IsFromSystemHeader)
+ : APIRecord(Kind, USR, Name, Loc, std::move(Availability),
+ LinkageInfo::none(), Comment, Declaration, SubHeading,
+ IsFromSystemHeader),
+ Access(Access) {}
+
+ static bool classof(const APIRecord *Record) {
+ return Record->getKind() == RK_CXXField;
+ }
+
+private:
+ virtual void anchor();
+};
+
+struct CXXFieldTemplateRecord : CXXFieldRecord {
+ Template Templ;
+
+ CXXFieldTemplateRecord(StringRef USR, StringRef Name, PresumedLoc Loc,
+ AvailabilityInfo Availability,
+ const DocComment &Comment,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading, AccessControl Access,
+ Template Template, bool IsFromSystemHeader)
+ : CXXFieldRecord(RK_CXXFieldTemplate, USR, Name, Loc,
+ std::move(Availability), Comment, Declaration,
+ SubHeading, Access, IsFromSystemHeader),
+ Templ(Template) {}
+
+ static bool classof(const APIRecord *Record) {
+ return Record->getKind() == RK_CXXFieldTemplate;
+ }
+};
+
+struct CXXMethodRecord : APIRecord {
+ FunctionSignature Signature;
+ AccessControl Access;
+
+ CXXMethodRecord() = delete;
+
+ CXXMethodRecord(RecordKind Kind, StringRef USR, StringRef Name,
+ PresumedLoc Loc, AvailabilityInfo Availability,
+ const DocComment &Comment, DeclarationFragments Declaration,
+ DeclarationFragments SubHeading, FunctionSignature Signature,
+ AccessControl Access, bool IsFromSystemHeader)
+ : APIRecord(Kind, USR, Name, Loc, std::move(Availability),
+ LinkageInfo::none(), Comment, Declaration, SubHeading,
+ IsFromSystemHeader),
+ Signature(Signature), Access(Access) {}
+
+ virtual ~CXXMethodRecord() = 0;
+};
+
+struct CXXConstructorRecord : CXXMethodRecord {
+ CXXConstructorRecord(StringRef USR, StringRef Name, PresumedLoc Loc,
+ AvailabilityInfo Availability, const DocComment &Comment,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading,
+ FunctionSignature Signature, AccessControl Access,
+ bool IsFromSystemHeader)
+ : CXXMethodRecord(RK_CXXConstructorMethod, USR, Name, Loc,
+ std::move(Availability), Comment, Declaration,
+ SubHeading, Signature, Access, IsFromSystemHeader) {}
+ static bool classof(const APIRecord *Record) {
+ return Record->getKind() == RK_CXXConstructorMethod;
+ }
+
+private:
+ virtual void anchor();
+};
+
+struct CXXDestructorRecord : CXXMethodRecord {
+ CXXDestructorRecord(StringRef USR, StringRef Name, PresumedLoc Loc,
+ AvailabilityInfo Availability, const DocComment &Comment,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading,
+ FunctionSignature Signature, AccessControl Access,
+ bool IsFromSystemHeader)
+ : CXXMethodRecord(RK_CXXDestructorMethod, USR, Name, Loc,
+ std::move(Availability), Comment, Declaration,
+ SubHeading, Signature, Access, IsFromSystemHeader) {}
+ static bool classof(const APIRecord *Record) {
+ return Record->getKind() == RK_CXXDestructorMethod;
+ }
+
+private:
+ virtual void anchor();
+};
+
+struct CXXStaticMethodRecord : CXXMethodRecord {
+ CXXStaticMethodRecord(StringRef USR, StringRef Name, PresumedLoc Loc,
+ AvailabilityInfo Availability,
+ const DocComment &Comment,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading,
+ FunctionSignature Signature, AccessControl Access,
+ bool IsFromSystemHeader)
+ : CXXMethodRecord(RK_CXXStaticMethod, USR, Name, Loc,
+ std::move(Availability), Comment, Declaration,
+ SubHeading, Signature, Access, IsFromSystemHeader) {}
+ static bool classof(const APIRecord *Record) {
+ return Record->getKind() == RK_CXXStaticMethod;
+ }
+
+private:
+ virtual void anchor();
+};
+
+struct CXXInstanceMethodRecord : CXXMethodRecord {
+ CXXInstanceMethodRecord(StringRef USR, StringRef Name, PresumedLoc Loc,
+ AvailabilityInfo Availability,
+ const DocComment &Comment,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading,
+ FunctionSignature Signature, AccessControl Access,
+ bool IsFromSystemHeader)
+ : CXXMethodRecord(RK_CXXInstanceMethod, USR, Name, Loc,
+ std::move(Availability), Comment, Declaration,
+ SubHeading, Signature, Access, IsFromSystemHeader) {}
+
+ static bool classof(const APIRecord *Record) {
+ return Record->getKind() == RK_CXXInstanceMethod;
+ }
+
+private:
+ virtual void anchor();
+};
+
+struct CXXMethodTemplateRecord : CXXMethodRecord {
+ Template Templ;
+
+ CXXMethodTemplateRecord(StringRef USR, StringRef Name, PresumedLoc Loc,
+ AvailabilityInfo Availability,
+ const DocComment &Comment,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading,
+ FunctionSignature Signature, AccessControl Access,
+ Template Template, bool IsFromSystemHeader)
+ : CXXMethodRecord(RK_CXXMethodTemplate, USR, Name, Loc,
+ std::move(Availability), Comment, Declaration,
+ SubHeading, Signature, Access, IsFromSystemHeader),
+ Templ(Template) {}
+
+ static bool classof(const APIRecord *Record) {
+ return Record->getKind() == RK_CXXMethodTemplate;
+ }
+};
+
+struct CXXMethodTemplateSpecializationRecord : CXXMethodRecord {
+ CXXMethodTemplateSpecializationRecord(
+ StringRef USR, StringRef Name, PresumedLoc Loc,
+ AvailabilityInfo Availability, const DocComment &Comment,
+ DeclarationFragments Declaration, DeclarationFragments SubHeading,
+ FunctionSignature Signature, AccessControl Access,
+ bool IsFromSystemHeader)
+ : CXXMethodRecord(RK_CXXMethodTemplateSpecialization, USR, Name, Loc,
+ std::move(Availability), Comment, Declaration,
+ SubHeading, Signature, Access, IsFromSystemHeader) {}
+
+ static bool classof(const APIRecord *Record) {
+ return Record->getKind() == RK_CXXMethodTemplateSpecialization;
+ }
+};
+
+/// This holds information associated with Objective-C properties.
+struct ObjCPropertyRecord : APIRecord {
+ /// The attributes associated with an Objective-C property.
+ enum AttributeKind : unsigned {
+ NoAttr = 0,
+ ReadOnly = 1,
+ Dynamic = 1 << 2,
+ };
+
+ AttributeKind Attributes;
+ StringRef GetterName;
+ StringRef SetterName;
+ bool IsOptional;
+
+ ObjCPropertyRecord(RecordKind Kind, StringRef USR, StringRef Name,
+ PresumedLoc Loc, AvailabilityInfo Availability,
+ const DocComment &Comment,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading, AttributeKind Attributes,
+ StringRef GetterName, StringRef SetterName,
+ bool IsOptional, bool IsFromSystemHeader)
+ : APIRecord(Kind, USR, Name, Loc, std::move(Availability),
+ LinkageInfo::none(), Comment, Declaration, SubHeading,
+ IsFromSystemHeader),
+ Attributes(Attributes), GetterName(GetterName), SetterName(SetterName),
+ IsOptional(IsOptional) {}
+
+ bool isReadOnly() const { return Attributes & ReadOnly; }
+ bool isDynamic() const { return Attributes & Dynamic; }
+
+ virtual ~ObjCPropertyRecord() = 0;
+};
+
+struct ObjCInstancePropertyRecord : ObjCPropertyRecord {
+ ObjCInstancePropertyRecord(StringRef USR, StringRef Name, PresumedLoc Loc,
+ AvailabilityInfo Availability,
+ const DocComment &Comment,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading,
+ AttributeKind Attributes, StringRef GetterName,
+ StringRef SetterName, bool IsOptional,
+ bool IsFromSystemHeader)
+ : ObjCPropertyRecord(RK_ObjCInstanceProperty, USR, Name, Loc,
+ std::move(Availability), Comment, Declaration,
+ SubHeading, Attributes, GetterName, SetterName,
+ IsOptional, IsFromSystemHeader) {}
+
+ static bool classof(const APIRecord *Record) {
+ return Record->getKind() == RK_ObjCInstanceProperty;
+ }
+
+private:
+ virtual void anchor();
+};
+
+struct ObjCClassPropertyRecord : ObjCPropertyRecord {
+ ObjCClassPropertyRecord(StringRef USR, StringRef Name, PresumedLoc Loc,
+ AvailabilityInfo Availability,
+ const DocComment &Comment,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading,
+ AttributeKind Attributes, StringRef GetterName,
+ StringRef SetterName, bool IsOptional,
+ bool IsFromSystemHeader)
+ : ObjCPropertyRecord(RK_ObjCClassProperty, USR, Name, Loc,
+ std::move(Availability), Comment, Declaration,
+ SubHeading, Attributes, GetterName, SetterName,
+ IsOptional, IsFromSystemHeader) {}
+
+ static bool classof(const APIRecord *Record) {
+ return Record->getKind() == RK_ObjCClassProperty;
+ }
+
+private:
+ virtual void anchor();
+};
+
+/// This holds information associated with Objective-C instance variables.
+struct ObjCInstanceVariableRecord : APIRecord {
+ using AccessControl = ObjCIvarDecl::AccessControl;
+ AccessControl Access;
+
+ ObjCInstanceVariableRecord(StringRef USR, StringRef Name, PresumedLoc Loc,
+ AvailabilityInfo Availability,
+ const DocComment &Comment,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading,
+ AccessControl Access, bool IsFromSystemHeader)
+ : APIRecord(RK_ObjCIvar, USR, Name, Loc, std::move(Availability),
+ LinkageInfo::none(), Comment, Declaration, SubHeading,
+ IsFromSystemHeader),
+ Access(Access) {}
+
+ static bool classof(const APIRecord *Record) {
+ return Record->getKind() == RK_ObjCIvar;
+ }
+
+private:
+ virtual void anchor();
+};
+
+/// This holds information associated with Objective-C methods.
+struct ObjCMethodRecord : APIRecord {
+ FunctionSignature Signature;
+
+ ObjCMethodRecord() = delete;
+
+ ObjCMethodRecord(RecordKind Kind, StringRef USR, StringRef Name,
+ PresumedLoc Loc, AvailabilityInfo Availability,
+ const DocComment &Comment, DeclarationFragments Declaration,
+ DeclarationFragments SubHeading, FunctionSignature Signature,
+ bool IsFromSystemHeader)
+ : APIRecord(Kind, USR, Name, Loc, std::move(Availability),
+ LinkageInfo::none(), Comment, Declaration, SubHeading,
+ IsFromSystemHeader),
+ Signature(Signature) {}
+
+ virtual ~ObjCMethodRecord() = 0;
+};
+
+struct ObjCInstanceMethodRecord : ObjCMethodRecord {
+ ObjCInstanceMethodRecord(StringRef USR, StringRef Name, PresumedLoc Loc,
+ AvailabilityInfo Availability,
+ const DocComment &Comment,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading,
+ FunctionSignature Signature, bool IsFromSystemHeader)
+ : ObjCMethodRecord(RK_ObjCInstanceMethod, USR, Name, Loc,
+ std::move(Availability), Comment, Declaration,
+ SubHeading, Signature, IsFromSystemHeader) {}
+ static bool classof(const APIRecord *Record) {
+ return Record->getKind() == RK_ObjCInstanceMethod;
+ }
+
+private:
+ virtual void anchor();
+};
+
+struct ObjCClassMethodRecord : ObjCMethodRecord {
+ ObjCClassMethodRecord(StringRef USR, StringRef Name, PresumedLoc Loc,
+ AvailabilityInfo Availability,
+ const DocComment &Comment,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading,
+ FunctionSignature Signature, bool IsFromSystemHeader)
+ : ObjCMethodRecord(RK_ObjCClassMethod, USR, Name, Loc,
+ std::move(Availability), Comment, Declaration,
+ SubHeading, Signature, IsFromSystemHeader) {}
+
+ static bool classof(const APIRecord *Record) {
+ return Record->getKind() == RK_ObjCClassMethod;
+ }
+
+private:
+ virtual void anchor();
+};
+
+/// This represents a reference to another symbol that might come from external
+/// sources.
+struct SymbolReference {
+ StringRef Name;
+ StringRef USR;
+
+ /// The source project/module/product of the referred symbol.
+ StringRef Source;
+
+ SymbolReference() = default;
+ SymbolReference(StringRef Name, StringRef USR = "", StringRef Source = "")
+ : Name(Name), USR(USR), Source(Source) {}
+ SymbolReference(const APIRecord &Record)
+ : Name(Record.Name), USR(Record.USR) {}
+ SymbolReference(const APIRecord *Record)
+ : Name(Record->Name), USR(Record->USR) {}
+
+ /// Determine if this SymbolReference is empty.
+ ///
+ /// \returns true if and only if all \c Name, \c USR, and \c Source is empty.
+ bool empty() const { return Name.empty() && USR.empty() && Source.empty(); }
+};
+
+struct StaticFieldRecord : CXXFieldRecord {
+ SymbolReference Context;
+
+ StaticFieldRecord(StringRef USR, StringRef Name, PresumedLoc Loc,
+ AvailabilityInfo Availability, LinkageInfo Linkage,
+ const DocComment &Comment, DeclarationFragments Declaration,
+ DeclarationFragments SubHeading, SymbolReference Context,
+ AccessControl Access, bool IsFromSystemHeader)
+ : CXXFieldRecord(RK_StaticField, USR, Name, Loc, std::move(Availability),
+ Comment, Declaration, SubHeading, Access,
+ IsFromSystemHeader),
+ Context(Context) {}
+
+ static bool classof(const APIRecord *Record) {
+ return Record->getKind() == RK_StaticField;
+ }
+};
+
+/// The base representation of an Objective-C container record. Holds common
+/// information associated with Objective-C containers.
+struct ObjCContainerRecord : APIRecord {
+ SmallVector<std::unique_ptr<ObjCMethodRecord>> Methods;
+ SmallVector<std::unique_ptr<ObjCPropertyRecord>> Properties;
+ SmallVector<std::unique_ptr<ObjCInstanceVariableRecord>> Ivars;
+ SmallVector<SymbolReference> Protocols;
+
+ ObjCContainerRecord() = delete;
+
+ ObjCContainerRecord(RecordKind Kind, StringRef USR, StringRef Name,
+ PresumedLoc Loc, AvailabilityInfo Availability,
+ LinkageInfo Linkage, const DocComment &Comment,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading, bool IsFromSystemHeader)
+ : APIRecord(Kind, USR, Name, Loc, std::move(Availability), Linkage,
+ Comment, Declaration, SubHeading, IsFromSystemHeader) {}
+
+ virtual ~ObjCContainerRecord() = 0;
+};
+
+struct CXXClassRecord : APIRecord {
+ SmallVector<std::unique_ptr<CXXFieldRecord>> Fields;
+ SmallVector<std::unique_ptr<CXXMethodRecord>> Methods;
+ SmallVector<SymbolReference> Bases;
+ AccessControl Access;
+
+ CXXClassRecord(StringRef USR, StringRef Name, PresumedLoc Loc,
+ AvailabilityInfo Availability, const DocComment &Comment,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading, RecordKind Kind,
+ AccessControl Access, bool IsFromSystemHeader)
+ : APIRecord(Kind, USR, Name, Loc, std::move(Availability),
+ LinkageInfo::none(), Comment, Declaration, SubHeading,
+ IsFromSystemHeader),
+ Access(Access) {}
+
+ static bool classof(const APIRecord *Record) {
+ return (Record->getKind() == RK_CXXClass);
+ }
+
+private:
+ virtual void anchor();
+};
+
+struct ClassTemplateRecord : CXXClassRecord {
+ Template Templ;
+
+ ClassTemplateRecord(StringRef USR, StringRef Name, PresumedLoc Loc,
+ AvailabilityInfo Availability, const DocComment &Comment,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading, Template Template,
+ AccessControl Access, bool IsFromSystemHeader)
+ : CXXClassRecord(USR, Name, Loc, std::move(Availability), Comment,
+ Declaration, SubHeading, RK_ClassTemplate, Access,
+ IsFromSystemHeader),
+ Templ(Template) {}
+
+ static bool classof(const APIRecord *Record) {
+ return Record->getKind() == RK_ClassTemplate;
+ }
+};
+
+struct ClassTemplateSpecializationRecord : CXXClassRecord {
+ ClassTemplateSpecializationRecord(
+ StringRef USR, StringRef Name, PresumedLoc Loc,
+ AvailabilityInfo Availability, const DocComment &Comment,
+ DeclarationFragments Declaration, DeclarationFragments SubHeading,
+ AccessControl Access, bool IsFromSystemHeader)
+ : CXXClassRecord(USR, Name, Loc, std::move(Availability), Comment,
+ Declaration, SubHeading, RK_ClassTemplateSpecialization,
+ Access, IsFromSystemHeader) {}
+
+ static bool classof(const APIRecord *Record) {
+ return Record->getKind() == RK_ClassTemplateSpecialization;
+ }
+};
+
+struct ClassTemplatePartialSpecializationRecord : CXXClassRecord {
+ Template Templ;
+ ClassTemplatePartialSpecializationRecord(
+ StringRef USR, StringRef Name, PresumedLoc Loc,
+ AvailabilityInfo Availability, const DocComment &Comment,
+ DeclarationFragments Declaration, DeclarationFragments SubHeading,
+ Template Template, AccessControl Access, bool IsFromSystemHeader)
+ : CXXClassRecord(USR, Name, Loc, std::move(Availability), Comment,
+ Declaration, SubHeading, RK_ClassTemplateSpecialization,
+ Access, IsFromSystemHeader),
+ Templ(Template) {}
+
+ static bool classof(const APIRecord *Record) {
+ return Record->getKind() == RK_ClassTemplatePartialSpecialization;
+ }
+};
+
+struct ConceptRecord : APIRecord {
+ Template Templ;
+
+ ConceptRecord(StringRef USR, StringRef Name, PresumedLoc Loc,
+ AvailabilityInfo Availability, const DocComment &Comment,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading, Template Template,
+ bool IsFromSystemHeader)
+ : APIRecord(RK_Concept, USR, Name, Loc, std::move(Availability),
+ LinkageInfo::none(), Comment, Declaration, SubHeading,
+ IsFromSystemHeader),
+ Templ(Template) {}
+};
+
+/// This holds information associated with Objective-C categories.
+struct ObjCCategoryRecord : ObjCContainerRecord {
+ SymbolReference Interface;
+ /// Determine whether the Category is derived from external class interface.
+ bool IsFromExternalModule = false;
+
+ ObjCCategoryRecord(StringRef USR, StringRef Name, PresumedLoc Loc,
+ AvailabilityInfo Availability, const DocComment &Comment,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading, SymbolReference Interface,
+ bool IsFromSystemHeader)
+ : ObjCContainerRecord(RK_ObjCCategory, USR, Name, Loc,
+ std::move(Availability), LinkageInfo::none(),
+ Comment, Declaration, SubHeading,
+ IsFromSystemHeader),
+ Interface(Interface) {}
+
+ static bool classof(const APIRecord *Record) {
+ return Record->getKind() == RK_ObjCCategory;
+ }
+
+private:
+ virtual void anchor();
+};
+
+/// This holds information associated with Objective-C interfaces/classes.
+struct ObjCInterfaceRecord : ObjCContainerRecord {
+ SymbolReference SuperClass;
+ // ObjCCategoryRecord%s are stored in and owned by APISet.
+ SmallVector<ObjCCategoryRecord *> Categories;
+
+ ObjCInterfaceRecord(StringRef USR, StringRef Name, PresumedLoc Loc,
+ AvailabilityInfo Availability, LinkageInfo Linkage,
+ const DocComment &Comment,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading,
+ SymbolReference SuperClass, bool IsFromSystemHeader)
+ : ObjCContainerRecord(RK_ObjCInterface, USR, Name, Loc,
+ std::move(Availability), Linkage, Comment,
+ Declaration, SubHeading, IsFromSystemHeader),
+ SuperClass(SuperClass) {}
+
+ static bool classof(const APIRecord *Record) {
+ return Record->getKind() == RK_ObjCInterface;
+ }
+
+private:
+ virtual void anchor();
+};
+
+/// This holds information associated with Objective-C protocols.
+struct ObjCProtocolRecord : ObjCContainerRecord {
+ ObjCProtocolRecord(StringRef USR, StringRef Name, PresumedLoc Loc,
+ AvailabilityInfo Availability, const DocComment &Comment,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading, bool IsFromSystemHeader)
+ : ObjCContainerRecord(RK_ObjCProtocol, USR, Name, Loc,
+ std::move(Availability), LinkageInfo::none(),
+ Comment, Declaration, SubHeading,
+ IsFromSystemHeader) {}
+
+ static bool classof(const APIRecord *Record) {
+ return Record->getKind() == RK_ObjCProtocol;
+ }
+
+private:
+ virtual void anchor();
+};
+
+/// This holds information associated with macro definitions.
+struct MacroDefinitionRecord : APIRecord {
+ MacroDefinitionRecord(StringRef USR, StringRef Name, PresumedLoc Loc,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading,
+ bool IsFromSystemHeader)
+ : APIRecord(RK_MacroDefinition, USR, Name, Loc, AvailabilityInfo(),
+ LinkageInfo(), {}, Declaration, SubHeading,
+ IsFromSystemHeader) {}
+
+ static bool classof(const APIRecord *Record) {
+ return Record->getKind() == RK_MacroDefinition;
+ }
+
+private:
+ virtual void anchor();
+};
+
+/// This holds information associated with typedefs.
+///
+/// Note: Typedefs for anonymous enums and structs typically don't get emitted
+/// by the serializers but still get a TypedefRecord. Instead we use the
+/// typedef name as a name for the underlying anonymous struct or enum.
+struct TypedefRecord : APIRecord {
+ SymbolReference UnderlyingType;
+
+ TypedefRecord(StringRef USR, StringRef Name, PresumedLoc Loc,
+ AvailabilityInfo Availability, const DocComment &Comment,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading, SymbolReference UnderlyingType,
+ bool IsFromSystemHeader)
+ : APIRecord(RK_Typedef, USR, Name, Loc, std::move(Availability),
+ LinkageInfo(), Comment, Declaration, SubHeading,
+ IsFromSystemHeader),
+ UnderlyingType(UnderlyingType) {}
+
+ static bool classof(const APIRecord *Record) {
+ return Record->getKind() == RK_Typedef;
+ }
+
+private:
+ virtual void anchor();
+};
+
+/// Check if a record type has a function signature mixin.
+///
+/// This is denoted by the record type having a ``Signature`` field of type
+/// FunctionSignature.
+template <typename RecordTy>
+struct has_function_signature : public std::false_type {};
+template <>
+struct has_function_signature<GlobalFunctionRecord> : public std::true_type {};
+template <>
+struct has_function_signature<ObjCMethodRecord> : public std::true_type {};
+template <>
+struct has_function_signature<ObjCInstanceMethodRecord>
+ : public std::true_type {};
+template <>
+struct has_function_signature<ObjCClassMethodRecord> : public std::true_type {};
+template <>
+struct has_function_signature<CXXInstanceMethodRecord> : public std::true_type {};
+template <>
+struct has_function_signature<CXXStaticMethodRecord> : public std::true_type {};
+template <>
+struct has_function_signature<CXXMethodTemplateRecord> : public std::true_type {
+};
+template <>
+struct has_function_signature<CXXMethodTemplateSpecializationRecord>
+ : public std::true_type {};
+
+template <typename RecordTy> struct has_access : public std::false_type {};
+template <> struct has_access<CXXInstanceMethodRecord> : public std::true_type {};
+template <> struct has_access<CXXStaticMethodRecord> : public std::true_type {};
+template <> struct has_access<CXXFieldRecord> : public std::true_type {};
+template <>
+struct has_access<CXXMethodTemplateRecord> : public std::true_type {};
+template <>
+struct has_access<CXXMethodTemplateSpecializationRecord>
+ : public std::true_type {};
+template <>
+struct has_access<CXXFieldTemplateRecord> : public std::true_type {};
+template <> struct has_access<CXXClassRecord> : public std::true_type {};
+template <> struct has_access<ClassTemplateRecord> : public std::true_type {};
+template <>
+struct has_access<ClassTemplateSpecializationRecord> : public std::true_type {};
+template <>
+struct has_access<ClassTemplatePartialSpecializationRecord>
+ : public std::true_type {};
+
+template <typename RecordTy> struct has_template : public std::false_type {};
+template <> struct has_template<ClassTemplateRecord> : public std::true_type {};
+template <>
+struct has_template<ClassTemplatePartialSpecializationRecord>
+ : public std::true_type {};
+template <> struct has_template<ConceptRecord> : public std::true_type {};
+template <>
+struct has_template<GlobalVariableTemplateRecord> : public std::true_type {};
+template <>
+struct has_template<GlobalVariableTemplatePartialSpecializationRecord>
+ : public std::true_type {};
+template <>
+struct has_template<CXXMethodTemplateRecord> : public std::true_type {};
+template <>
+struct has_template<CXXFieldTemplateRecord> : public std::true_type {};
+
+template <>
+struct has_template<GlobalFunctionTemplateRecord> : public std::true_type {};
+template <>
+struct has_function_signature<GlobalFunctionTemplateRecord>
+ : public std::true_type {};
+template <>
+struct has_function_signature<GlobalFunctionTemplateSpecializationRecord>
+ : public std::true_type {};
+
+/// APISet holds the set of API records collected from given inputs.
+class APISet {
+public:
+ NamespaceRecord *addNamespace(APIRecord *Parent, StringRef Name,
+ StringRef USR, PresumedLoc Loc,
+ AvailabilityInfo Availability,
+ LinkageInfo Linkage, const DocComment &Comment,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading,
+ bool IsFromSystemHeaderg);
+ /// Create and add a global variable record into the API set.
+ ///
+ /// Note: the caller is responsible for keeping the StringRef \p Name and
+ /// \p USR alive. APISet::copyString provides a way to copy strings into
+ /// APISet itself, and APISet::recordUSR(const Decl *D) is a helper method
+ /// to generate the USR for \c D and keep it alive in APISet.
+ GlobalVariableRecord *
+ addGlobalVar(StringRef Name, StringRef USR, PresumedLoc Loc,
+ AvailabilityInfo Availability, LinkageInfo Linkage,
+ const DocComment &Comment, DeclarationFragments Declaration,
+ DeclarationFragments SubHeadin, bool IsFromSystemHeaderg);
+
+ GlobalVariableTemplateRecord *
+ addGlobalVariableTemplate(StringRef Name, StringRef USR, PresumedLoc Loc,
+ AvailabilityInfo Availability, LinkageInfo Linkage,
+ const DocComment &Comment,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading, Template Template,
+ bool IsFromSystemHeader);
+
+ /// Create and add a function record into the API set.
+ ///
+ /// Note: the caller is responsible for keeping the StringRef \p Name and
+ /// \p USR alive. APISet::copyString provides a way to copy strings into
+ /// APISet itself, and APISet::recordUSR(const Decl *D) is a helper method
+ /// to generate the USR for \c D and keep it alive in APISet.
+ GlobalFunctionRecord *
+ addGlobalFunction(StringRef Name, StringRef USR, PresumedLoc Loc,
+ AvailabilityInfo Availability, LinkageInfo Linkage,
+ const DocComment &Comment, DeclarationFragments Declaration,
+ DeclarationFragments SubHeading,
+ FunctionSignature Signature, bool IsFromSystemHeader);
+
+ GlobalFunctionTemplateRecord *addGlobalFunctionTemplate(
+ StringRef Name, StringRef USR, PresumedLoc Loc,
+ AvailabilityInfo Availability, LinkageInfo Linkage,
+ const DocComment &Comment, DeclarationFragments Declaration,
+ DeclarationFragments SubHeading, FunctionSignature Signature,
+ Template Template, bool IsFromSystemHeader);
+
+ GlobalFunctionTemplateSpecializationRecord *
+ addGlobalFunctionTemplateSpecialization(
+ StringRef Name, StringRef USR, PresumedLoc Loc,
+ AvailabilityInfo Availability, LinkageInfo Linkage,
+ const DocComment &Comment, DeclarationFragments Declaration,
+ DeclarationFragments SubHeading, FunctionSignature Signature,
+ bool IsFromSystemHeader);
+
+ /// Create and add an enum constant record into the API set.
+ ///
+ /// Note: the caller is responsible for keeping the StringRef \p Name and
+ /// \p USR alive. APISet::copyString provides a way to copy strings into
+ /// APISet itself, and APISet::recordUSR(const Decl *D) is a helper method
+ /// to generate the USR for \c D and keep it alive in APISet.
+ EnumConstantRecord *
+ addEnumConstant(EnumRecord *Enum, StringRef Name, StringRef USR,
+ PresumedLoc Loc, AvailabilityInfo Availability,
+ const DocComment &Comment, DeclarationFragments Declaration,
+ DeclarationFragments SubHeading, bool IsFromSystemHeader);
+
+ /// Create and add an enum record into the API set.
+ ///
+ /// Note: the caller is responsible for keeping the StringRef \p Name and
+ /// \p USR alive. APISet::copyString provides a way to copy strings into
+ /// APISet itself, and APISet::recordUSR(const Decl *D) is a helper method
+ /// to generate the USR for \c D and keep it alive in APISet.
+ EnumRecord *addEnum(StringRef Name, StringRef USR, PresumedLoc Loc,
+ AvailabilityInfo Availability, const DocComment &Comment,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading, bool IsFromSystemHeader);
+
+ /// Create and add a record field record into the API set.
+ ///
+ /// Note: the caller is responsible for keeping the StringRef \p Name and
+ /// \p USR alive. APISet::copyString provides a way to copy strings into
+ /// APISet itself, and APISet::recordUSR(const Decl *D) is a helper method
+ /// to generate the USR for \c D and keep it alive in APISet.
+ RecordFieldRecord *
+ addRecordField(RecordRecord *Record, StringRef Name, StringRef USR,
+ PresumedLoc Loc, AvailabilityInfo Availability,
+ const DocComment &Comment, DeclarationFragments Declaration,
+ DeclarationFragments SubHeading, APIRecord::RecordKind Kind,
+ bool IsFromSystemHeader);
+
+ /// Create and add a record record into the API set.
+ ///
+ /// Note: the caller is responsible for keeping the StringRef \p Name and
+ /// \p USR alive. APISet::copyString provides a way to copy strings into
+ /// APISet itself, and APISet::recordUSR(const Decl *D) is a helper method
+ /// to generate the USR for \c D and keep it alive in APISet.
+ RecordRecord *addRecord(StringRef Name, StringRef USR, PresumedLoc Loc,
+ AvailabilityInfo Availability,
+ const DocComment &Comment,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading,
+ APIRecord::RecordKind Kind, bool IsFromSystemHeader);
+
+ StaticFieldRecord *
+ addStaticField(StringRef Name, StringRef USR, PresumedLoc Loc,
+ AvailabilityInfo Availability, LinkageInfo Linkage,
+ const DocComment &Comment, DeclarationFragments Declaration,
+ DeclarationFragments SubHeading, SymbolReference Context,
+ AccessControl Access, bool IsFromSystemHeaderg);
+
+ CXXFieldRecord *addCXXField(APIRecord *CXXClass, StringRef Name,
+ StringRef USR, PresumedLoc Loc,
+ AvailabilityInfo Availability,
+ const DocComment &Comment,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading,
+ AccessControl Access, bool IsFromSystemHeader);
+
+ CXXFieldTemplateRecord *addCXXFieldTemplate(
+ APIRecord *Parent, StringRef Name, StringRef USR, PresumedLoc Loc,
+ AvailabilityInfo Availability, const DocComment &Comment,
+ DeclarationFragments Declaration, DeclarationFragments SubHeading,
+ AccessControl Access, Template Template, bool IsFromSystemHeader);
+
+ CXXClassRecord *addCXXClass(APIRecord *Parent, StringRef Name, StringRef USR,
+ PresumedLoc Loc, AvailabilityInfo Availability,
+ const DocComment &Comment,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading,
+ APIRecord::RecordKind Kind, AccessControl Access,
+ bool IsFromSystemHeader);
+
+ ClassTemplateRecord *
+ addClassTemplate(APIRecord *Parent, StringRef Name, StringRef USR,
+ PresumedLoc Loc, AvailabilityInfo Availability,
+ const DocComment &Comment, DeclarationFragments Declaration,
+ DeclarationFragments SubHeading, Template Template,
+ AccessControl Access, bool IsFromSystemHeader);
+
+ ClassTemplateSpecializationRecord *addClassTemplateSpecialization(
+ APIRecord *Parent, StringRef Name, StringRef USR, PresumedLoc Loc,
+ AvailabilityInfo Availability, const DocComment &Comment,
+ DeclarationFragments Declaration, DeclarationFragments SubHeading,
+ AccessControl Access, bool IsFromSystemHeader);
+
+ ClassTemplatePartialSpecializationRecord *
+ addClassTemplatePartialSpecialization(
+ APIRecord *Parent, StringRef Name, StringRef USR, PresumedLoc Loc,
+ AvailabilityInfo Availability, const DocComment &Comment,
+ DeclarationFragments Declaration, DeclarationFragments SubHeading,
+ Template Template, AccessControl Access, bool IsFromSystemHeader);
+
+ GlobalVariableTemplateSpecializationRecord *
+ addGlobalVariableTemplateSpecialization(
+ StringRef Name, StringRef USR, PresumedLoc Loc,
+ AvailabilityInfo Availability, LinkageInfo Linkage,
+ const DocComment &Comment, DeclarationFragments Declaration,
+ DeclarationFragments SubHeading, bool IsFromSystemHeader);
+
+ GlobalVariableTemplatePartialSpecializationRecord *
+ addGlobalVariableTemplatePartialSpecialization(
+ StringRef Name, StringRef USR, PresumedLoc Loc,
+ AvailabilityInfo Availability, LinkageInfo Linkage,
+ const DocComment &Comment, DeclarationFragments Declaration,
+ DeclarationFragments SubHeading, Template Template,
+ bool IsFromSystemHeader);
+
+ CXXMethodRecord *addCXXInstanceMethod(
+ APIRecord *Parent, StringRef Name, StringRef USR, PresumedLoc Loc,
+ AvailabilityInfo Availability, const DocComment &Comment,
+ DeclarationFragments Declaration, DeclarationFragments SubHeading,
+ FunctionSignature Signature, AccessControl Access,
+ bool IsFromSystemHeader);
+
+ CXXMethodRecord *addCXXStaticMethod(
+ APIRecord *Parent, StringRef Name, StringRef USR, PresumedLoc Loc,
+ AvailabilityInfo Availability, const DocComment &Comment,
+ DeclarationFragments Declaration, DeclarationFragments SubHeading,
+ FunctionSignature Signature, AccessControl Access,
+ bool IsFromSystemHeader);
+
+ CXXMethodRecord *addCXXSpecialMethod(
+ APIRecord *Parent, StringRef Name, StringRef USR, PresumedLoc Loc,
+ AvailabilityInfo Availability, const DocComment &Comment,
+ DeclarationFragments Declaration, DeclarationFragments SubHeading,
+ FunctionSignature Signature, AccessControl Access,
+ bool IsFromSystemHeader);
+
+ CXXMethodTemplateRecord *addCXXMethodTemplate(
+ APIRecord *Parent, StringRef Name, StringRef USR, PresumedLoc Loc,
+ AvailabilityInfo Availability, const DocComment &Comment,
+ DeclarationFragments Declaration, DeclarationFragments SubHeading,
+ FunctionSignature Signature, AccessControl Access, Template Template,
+ bool IsFromSystemHeader);
+
+ CXXMethodTemplateSpecializationRecord *addCXXMethodTemplateSpec(
+ APIRecord *Parent, StringRef Name, StringRef USR, PresumedLoc Loc,
+ AvailabilityInfo Availability, const DocComment &Comment,
+ DeclarationFragments Declaration, DeclarationFragments SubHeading,
+ FunctionSignature Signature, AccessControl Access,
+ bool IsFromSystemHeader);
+
+ ConceptRecord *addConcept(StringRef Name, StringRef USR, PresumedLoc Loc,
+ AvailabilityInfo Availability,
+ const DocComment &Comment,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading, Template Template,
+ bool IsFromSystemHeader);
+
+ /// Create and add an Objective-C category record into the API set.
+ ///
+ /// Note: the caller is responsible for keeping the StringRef \p Name and
+ /// \p USR alive. APISet::copyString provides a way to copy strings into
+ /// APISet itself, and APISet::recordUSR(const Decl *D) is a helper method
+ /// to generate the USR for \c D and keep it alive in APISet.
+ ObjCCategoryRecord *
+ addObjCCategory(StringRef Name, StringRef USR, PresumedLoc Loc,
+ AvailabilityInfo Availability, const DocComment &Comment,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading, SymbolReference Interface,
+ bool IsFromSystemHeader, bool IsFromExternalModule);
+
+ /// Create and add an Objective-C interface record into the API set.
+ ///
+ /// Note: the caller is responsible for keeping the StringRef \p Name and
+ /// \p USR alive. APISet::copyString provides a way to copy strings into
+ /// APISet itself, and APISet::recordUSR(const Decl *D) is a helper method
+ /// to generate the USR for \c D and keep it alive in APISet.
+ ObjCInterfaceRecord *
+ addObjCInterface(StringRef Name, StringRef USR, PresumedLoc Loc,
+ AvailabilityInfo Availability, LinkageInfo Linkage,
+ const DocComment &Comment, DeclarationFragments Declaration,
+ DeclarationFragments SubHeading, SymbolReference SuperClass,
+ bool IsFromSystemHeader);
+
+ /// Create and add an Objective-C method record into the API set.
+ ///
+ /// Note: the caller is responsible for keeping the StringRef \p Name and
+ /// \p USR alive. APISet::copyString provides a way to copy strings into
+ /// APISet itself, and APISet::recordUSR(const Decl *D) is a helper method
+ /// to generate the USR for \c D and keep it alive in APISet.
+ ObjCMethodRecord *
+ addObjCMethod(ObjCContainerRecord *Container, StringRef Name, StringRef USR,
+ PresumedLoc Loc, AvailabilityInfo Availability,
+ const DocComment &Comment, DeclarationFragments Declaration,
+ DeclarationFragments SubHeading, FunctionSignature Signature,
+ bool IsInstanceMethod, bool IsFromSystemHeader);
+
+ /// Create and add an Objective-C property record into the API set.
+ ///
+ /// Note: the caller is responsible for keeping the StringRef \p Name and
+ /// \p USR alive. APISet::copyString provides a way to copy strings into
+ /// APISet itself, and APISet::recordUSR(const Decl *D) is a helper method
+ /// to generate the USR for \c D and keep it alive in APISet.
+ ObjCPropertyRecord *
+ addObjCProperty(ObjCContainerRecord *Container, StringRef Name, StringRef USR,
+ PresumedLoc Loc, AvailabilityInfo Availability,
+ const DocComment &Comment, DeclarationFragments Declaration,
+ DeclarationFragments SubHeading,
+ ObjCPropertyRecord::AttributeKind Attributes,
+ StringRef GetterName, StringRef SetterName, bool IsOptional,
+ bool IsInstanceProperty, bool IsFromSystemHeader);
+
+ /// Create and add an Objective-C instance variable record into the API set.
+ ///
+ /// Note: the caller is responsible for keeping the StringRef \p Name and
+ /// \p USR alive. APISet::copyString provides a way to copy strings into
+ /// APISet itself, and APISet::recordUSR(const Decl *D) is a helper method
+ /// to generate the USR for \c D and keep it alive in APISet.
+ ObjCInstanceVariableRecord *addObjCInstanceVariable(
+ ObjCContainerRecord *Container, StringRef Name, StringRef USR,
+ PresumedLoc Loc, AvailabilityInfo Availability, const DocComment &Comment,
+ DeclarationFragments Declaration, DeclarationFragments SubHeading,
+ ObjCInstanceVariableRecord::AccessControl Access,
+ bool IsFromSystemHeader);
+
+ /// Create and add an Objective-C protocol record into the API set.
+ ///
+ /// Note: the caller is responsible for keeping the StringRef \p Name and
+ /// \p USR alive. APISet::copyString provides a way to copy strings into
+ /// APISet itself, and APISet::recordUSR(const Decl *D) is a helper method
+ /// to generate the USR for \c D and keep it alive in APISet.
+ ObjCProtocolRecord *
+ addObjCProtocol(StringRef Name, StringRef USR, PresumedLoc Loc,
+ AvailabilityInfo Availability, const DocComment &Comment,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading, bool IsFromSystemHeader);
+
+ /// Create a macro definition record into the API set.
+ ///
+ /// Note: the caller is responsible for keeping the StringRef \p Name and
+ /// \p USR alive. APISet::copyString provides a way to copy strings into
+ /// APISet itself, and APISet::recordUSRForMacro(StringRef Name,
+ /// SourceLocation SL, const SourceManager &SM) is a helper method to generate
+ /// the USR for the macro and keep it alive in APISet.
+ MacroDefinitionRecord *addMacroDefinition(StringRef Name, StringRef USR,
+ PresumedLoc Loc,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading,
+ bool IsFromSystemHeader);
+
+ /// Create a typedef record into the API set.
+ ///
+ /// Note: the caller is responsible for keeping the StringRef \p Name and
+ /// \p USR alive. APISet::copyString provides a way to copy strings into
+ /// APISet itself, and APISet::recordUSR(const Decl *D) is a helper method
+ /// to generate the USR for \c D and keep it alive in APISet.
+ TypedefRecord *
+ addTypedef(StringRef Name, StringRef USR, PresumedLoc Loc,
+ AvailabilityInfo Availability, const DocComment &Comment,
+ DeclarationFragments Declaration, DeclarationFragments SubHeading,
+ SymbolReference UnderlyingType, bool IsFromSystemHeader);
+
+ /// A mapping type to store a set of APIRecord%s with the USR as the key.
+ template <typename RecordTy,
+ typename =
+ std::enable_if_t<std::is_base_of<APIRecord, RecordTy>::value>>
+ using RecordMap = llvm::MapVector<StringRef, std::unique_ptr<RecordTy>>;
+
+ /// Get the target triple for the ExtractAPI invocation.
+ const llvm::Triple &getTarget() const { return Target; }
+
+ /// Get the language used by the APIs.
+ Language getLanguage() const { return Lang; }
+
+ const RecordMap<NamespaceRecord> &getNamespaces() const { return Namespaces; }
+ const RecordMap<GlobalFunctionRecord> &getGlobalFunctions() const {
+ return GlobalFunctions;
+ }
+ const RecordMap<GlobalFunctionTemplateRecord> &
+ getGlobalFunctionTemplates() const {
+ return GlobalFunctionTemplates;
+ }
+ const RecordMap<GlobalFunctionTemplateSpecializationRecord> &
+ getGlobalFunctionTemplateSpecializations() const {
+ return GlobalFunctionTemplateSpecializations;
+ }
+ const RecordMap<GlobalVariableRecord> &getGlobalVariables() const {
+ return GlobalVariables;
+ }
+ const RecordMap<GlobalVariableTemplateRecord> &
+ getGlobalVariableTemplates() const {
+ return GlobalVariableTemplates;
+ }
+ const RecordMap<StaticFieldRecord> &getStaticFields() const {
+ return StaticFields;
+ }
+ const RecordMap<GlobalVariableTemplateSpecializationRecord> &
+ getGlobalVariableTemplateSpecializations() const {
+ return GlobalVariableTemplateSpecializations;
+ }
+ const RecordMap<GlobalVariableTemplatePartialSpecializationRecord> &
+ getGlobalVariableTemplatePartialSpecializations() const {
+ return GlobalVariableTemplatePartialSpecializations;
+ }
+ const RecordMap<EnumRecord> &getEnums() const { return Enums; }
+ const RecordMap<RecordRecord> &getRecords() const { return Records; }
+ const RecordMap<CXXClassRecord> &getCXXClasses() const { return CXXClasses; }
+ const RecordMap<CXXMethodTemplateRecord> &getCXXMethodTemplates() const {
+ return CXXMethodTemplates;
+ }
+ const RecordMap<CXXInstanceMethodRecord> &getCXXInstanceMethods() const {
+ return CXXInstanceMethods;
+ }
+ const RecordMap<CXXStaticMethodRecord> &getCXXStaticMethods() const {
+ return CXXStaticMethods;
+ }
+ const RecordMap<CXXFieldRecord> &getCXXFields() const { return CXXFields; }
+ const RecordMap<CXXMethodTemplateSpecializationRecord> &
+ getCXXMethodTemplateSpecializations() const {
+ return CXXMethodTemplateSpecializations;
+ }
+ const RecordMap<CXXFieldTemplateRecord> &getCXXFieldTemplates() const {
+ return CXXFieldTemplates;
+ }
+ const RecordMap<ClassTemplateRecord> &getClassTemplates() const {
+ return ClassTemplates;
+ }
+ const RecordMap<ClassTemplateSpecializationRecord> &
+ getClassTemplateSpecializations() const {
+ return ClassTemplateSpecializations;
+ }
+ const RecordMap<ClassTemplatePartialSpecializationRecord> &
+ getClassTemplatePartialSpecializations() const {
+ return ClassTemplatePartialSpecializations;
+ }
+ const RecordMap<ConceptRecord> &getConcepts() const { return Concepts; }
+ const RecordMap<ObjCCategoryRecord> &getObjCCategories() const {
+ return ObjCCategories;
+ }
+ const RecordMap<ObjCInterfaceRecord> &getObjCInterfaces() const {
+ return ObjCInterfaces;
+ }
+ const RecordMap<ObjCProtocolRecord> &getObjCProtocols() const {
+ return ObjCProtocols;
+ }
+ const RecordMap<MacroDefinitionRecord> &getMacros() const { return Macros; }
+ const RecordMap<TypedefRecord> &getTypedefs() const { return Typedefs; }
+
+ /// Finds the APIRecord for a given USR.
+ ///
+ /// \returns a pointer to the APIRecord associated with that USR or nullptr.
+ APIRecord *findRecordForUSR(StringRef USR) const;
+
+ /// Generate and store the USR of declaration \p D.
+ ///
+ /// Note: The USR string is stored in and owned by Allocator.
+ ///
+ /// \returns a StringRef of the generated USR string.
+ StringRef recordUSR(const Decl *D);
+
+ /// Generate and store the USR for a macro \p Name.
+ ///
+ /// Note: The USR string is stored in and owned by Allocator.
+ ///
+ /// \returns a StringRef to the generate USR string.
+ StringRef recordUSRForMacro(StringRef Name, SourceLocation SL,
+ const SourceManager &SM);
+
+ /// Copy \p String into the Allocator in this APISet.
+ ///
+ /// \returns a StringRef of the copied string in APISet::Allocator.
+ StringRef copyString(StringRef String);
+
+ APISet(const llvm::Triple &Target, Language Lang,
+ const std::string &ProductName)
+ : Target(Target), Lang(Lang), ProductName(ProductName) {}
+
+private:
+ /// BumpPtrAllocator to store generated/copied strings.
+ ///
+ /// Note: The main use for this is being able to deduplicate strings.
+ llvm::BumpPtrAllocator StringAllocator;
+
+ const llvm::Triple Target;
+ const Language Lang;
+
+ llvm::DenseMap<StringRef, APIRecord *> USRBasedLookupTable;
+ RecordMap<NamespaceRecord> Namespaces;
+ RecordMap<GlobalFunctionRecord> GlobalFunctions;
+ RecordMap<GlobalFunctionTemplateRecord> GlobalFunctionTemplates;
+ RecordMap<GlobalFunctionTemplateSpecializationRecord>
+ GlobalFunctionTemplateSpecializations;
+ RecordMap<GlobalVariableRecord> GlobalVariables;
+ RecordMap<GlobalVariableTemplateRecord> GlobalVariableTemplates;
+ RecordMap<GlobalVariableTemplateSpecializationRecord>
+ GlobalVariableTemplateSpecializations;
+ RecordMap<GlobalVariableTemplatePartialSpecializationRecord>
+ GlobalVariableTemplatePartialSpecializations;
+ RecordMap<ConceptRecord> Concepts;
+ RecordMap<StaticFieldRecord> StaticFields;
+ RecordMap<EnumRecord> Enums;
+ RecordMap<RecordRecord> Records;
+ RecordMap<CXXClassRecord> CXXClasses;
+ RecordMap<CXXFieldRecord> CXXFields;
+ RecordMap<CXXMethodRecord> CXXMethods;
+ RecordMap<CXXInstanceMethodRecord> CXXInstanceMethods;
+ RecordMap<CXXStaticMethodRecord> CXXStaticMethods;
+ RecordMap<CXXMethodTemplateRecord> CXXMethodTemplates;
+ RecordMap<CXXMethodTemplateSpecializationRecord>
+ CXXMethodTemplateSpecializations;
+ RecordMap<CXXFieldTemplateRecord> CXXFieldTemplates;
+ RecordMap<ClassTemplateRecord> ClassTemplates;
+ RecordMap<ClassTemplateSpecializationRecord> ClassTemplateSpecializations;
+ RecordMap<ClassTemplatePartialSpecializationRecord>
+ ClassTemplatePartialSpecializations;
+ RecordMap<ObjCCategoryRecord> ObjCCategories;
+ RecordMap<ObjCInterfaceRecord> ObjCInterfaces;
+ RecordMap<ObjCProtocolRecord> ObjCProtocols;
+ RecordMap<MacroDefinitionRecord> Macros;
+ RecordMap<TypedefRecord> Typedefs;
+
+public:
+ const std::string ProductName;
+};
+
+} // namespace extractapi
+} // namespace clang
+
+#endif // LLVM_CLANG_EXTRACTAPI_API_H
diff --git a/contrib/llvm-project/clang/include/clang/ExtractAPI/APIIgnoresList.h b/contrib/llvm-project/clang/include/clang/ExtractAPI/APIIgnoresList.h
new file mode 100644
index 000000000000..3eee8e336cb6
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/ExtractAPI/APIIgnoresList.h
@@ -0,0 +1,76 @@
+//===- ExtractAPI/APIIgnoresList.h ---------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file This file defines APIIgnoresList which is a type that allows querying
+/// files containing symbols to ignore when extracting API information.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_API_IGNORES_LIST_H
+#define LLVM_CLANG_API_IGNORES_LIST_H
+
+#include "clang/Basic/FileManager.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/raw_ostream.h"
+
+#include <memory>
+#include <system_error>
+
+namespace llvm {
+class MemoryBuffer;
+} // namespace llvm
+
+namespace clang {
+namespace extractapi {
+
+struct IgnoresFileNotFound : public llvm::ErrorInfo<IgnoresFileNotFound> {
+ std::string Path;
+ static char ID;
+
+ explicit IgnoresFileNotFound(StringRef Path) : Path(Path) {}
+
+ virtual void log(llvm::raw_ostream &os) const override;
+
+ virtual std::error_code convertToErrorCode() const override;
+};
+
+/// A type that provides access to a new line separated list of symbol names to
+/// ignore when extracting API information.
+struct APIIgnoresList {
+ using FilePathList = std::vector<std::string>;
+
+ /// The API to use for generating from the files at \p IgnoresFilePathList.
+ ///
+ /// \returns an initialized APIIgnoresList or an Error.
+ static llvm::Expected<APIIgnoresList>
+ create(const FilePathList &IgnoresFilePathList, FileManager &FM);
+
+ APIIgnoresList() = default;
+
+ /// Check if \p SymbolName is specified in the APIIgnoresList and if it should
+ /// therefore be ignored.
+ bool shouldIgnore(llvm::StringRef SymbolName) const;
+
+private:
+ using SymbolNameList = llvm::SmallVector<llvm::StringRef, 32>;
+ using BufferList = llvm::SmallVector<std::unique_ptr<llvm::MemoryBuffer>>;
+
+ APIIgnoresList(SymbolNameList SymbolsToIgnore, BufferList Buffers)
+ : SymbolsToIgnore(std::move(SymbolsToIgnore)),
+ Buffers(std::move(Buffers)) {}
+
+ SymbolNameList SymbolsToIgnore;
+ BufferList Buffers;
+};
+
+} // namespace extractapi
+} // namespace clang
+
+#endif // LLVM_CLANG_API_IGNORES_LIST_H
diff --git a/contrib/llvm-project/clang/include/clang/ExtractAPI/AvailabilityInfo.h b/contrib/llvm-project/clang/include/clang/ExtractAPI/AvailabilityInfo.h
new file mode 100644
index 000000000000..3b8d6f46ed56
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/ExtractAPI/AvailabilityInfo.h
@@ -0,0 +1,76 @@
+//===- ExtractAPI/AvailabilityInfo.h - Availability Info --------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines the AvailabilityInfo struct that collects availability
+/// attributes of a symbol.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_EXTRACTAPI_AVAILABILITY_INFO_H
+#define LLVM_CLANG_EXTRACTAPI_AVAILABILITY_INFO_H
+
+#include "clang/AST/Decl.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/VersionTuple.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace clang {
+namespace extractapi {
+
+/// Stores availability attributes of a symbol.
+struct AvailabilityInfo {
+ /// The domain for which this availability info item applies
+ std::string Domain;
+ VersionTuple Introduced;
+ VersionTuple Deprecated;
+ VersionTuple Obsoleted;
+ bool UnconditionallyDeprecated = false;
+ bool UnconditionallyUnavailable = false;
+
+ AvailabilityInfo() = default;
+
+ /// Determine if this AvailabilityInfo represents the default availability.
+ bool isDefault() const { return *this == AvailabilityInfo(); }
+ /// Check if the symbol is unconditionally deprecated.
+ ///
+ /// i.e. \code __attribute__((deprecated)) \endcode
+ bool isUnconditionallyDeprecated() const { return UnconditionallyDeprecated; }
+ /// Check if the symbol is unconditionally unavailable.
+ ///
+ /// i.e. \code __attribute__((unavailable)) \endcode
+ bool isUnconditionallyUnavailable() const {
+ return UnconditionallyUnavailable;
+ }
+
+ AvailabilityInfo(StringRef Domain, VersionTuple I, VersionTuple D,
+ VersionTuple O, bool UD, bool UU)
+ : Domain(Domain), Introduced(I), Deprecated(D), Obsoleted(O),
+ UnconditionallyDeprecated(UD), UnconditionallyUnavailable(UU) {}
+
+ friend bool operator==(const AvailabilityInfo &Lhs,
+ const AvailabilityInfo &Rhs);
+
+public:
+ static AvailabilityInfo createFromDecl(const Decl *Decl);
+};
+
+inline bool operator==(const AvailabilityInfo &Lhs,
+ const AvailabilityInfo &Rhs) {
+ return std::tie(Lhs.Introduced, Lhs.Deprecated, Lhs.Obsoleted,
+ Lhs.UnconditionallyDeprecated,
+ Lhs.UnconditionallyUnavailable) ==
+ std::tie(Rhs.Introduced, Rhs.Deprecated, Rhs.Obsoleted,
+ Rhs.UnconditionallyDeprecated,
+ Rhs.UnconditionallyUnavailable);
+}
+
+} // namespace extractapi
+} // namespace clang
+
+#endif // LLVM_CLANG_EXTRACTAPI_AVAILABILITY_INFO_H
diff --git a/contrib/llvm-project/clang/include/clang/ExtractAPI/DeclarationFragments.h b/contrib/llvm-project/clang/include/clang/ExtractAPI/DeclarationFragments.h
new file mode 100644
index 000000000000..1b78c8b5931e
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/ExtractAPI/DeclarationFragments.h
@@ -0,0 +1,454 @@
+//===- ExtractAPI/DeclarationFragments.h ------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines the Declaration Fragments related classes.
+///
+/// Declaration Fragments represent parts of a symbol declaration tagged with
+/// syntactic/semantic information.
+/// See https://github.com/apple/swift-docc-symbolkit
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_EXTRACTAPI_DECLARATION_FRAGMENTS_H
+#define LLVM_CLANG_EXTRACTAPI_DECLARATION_FRAGMENTS_H
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/Basic/Specifiers.h"
+#include "clang/Lex/MacroInfo.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include <vector>
+
+namespace clang {
+namespace extractapi {
+
+/// DeclarationFragments is a vector of tagged important parts of a symbol's
+/// declaration.
+///
+/// The fragments sequence can be joined to form spans of declaration text, with
+/// attached information useful for purposes like syntax-highlighting etc.
+/// For example:
+/// \code
+/// const -> keyword "const"
+/// int -> type "int"
+/// pi; -> identifier "pi"
+/// \endcode
+class DeclarationFragments {
+public:
+ DeclarationFragments() = default;
+
+ /// The kind of a fragment.
+ enum class FragmentKind {
+ /// Unknown fragment kind.
+ None,
+
+ Keyword,
+ Attribute,
+ NumberLiteral,
+ StringLiteral,
+ Identifier,
+
+ /// Identifier that refers to a type in the context.
+ TypeIdentifier,
+
+ /// Parameter that's used as generics in the context. For example template
+ /// parameters.
+ GenericParameter,
+
+ /// External parameters in Objective-C methods.
+ /// For example, \c forKey in
+ /// \code{.m}
+ /// - (void) setValue:(Value)value forKey(Key)key
+ /// \endcode
+ ExternalParam,
+
+ /// Internal/local parameters in Objective-C methods.
+ /// For example, \c key in
+ /// \code{.m}
+ /// - (void) setValue:(Value)value forKey(Key)key
+ /// \endcode
+ InternalParam,
+
+ Text,
+ };
+
+ /// Fragment holds information of a single fragment.
+ struct Fragment {
+ std::string Spelling;
+ FragmentKind Kind;
+
+ /// The USR of the fragment symbol, if applicable.
+ std::string PreciseIdentifier;
+
+ /// The associated declaration, if applicable. This is not intended to be
+ /// used outside of libclang.
+ const Decl *Declaration;
+
+ Fragment(StringRef Spelling, FragmentKind Kind, StringRef PreciseIdentifier,
+ const Decl *Declaration)
+ : Spelling(Spelling), Kind(Kind), PreciseIdentifier(PreciseIdentifier),
+ Declaration(Declaration) {}
+ };
+
+ using FragmentIterator = std::vector<Fragment>::iterator;
+ using ConstFragmentIterator = std::vector<Fragment>::const_iterator;
+
+ const std::vector<Fragment> &getFragments() const { return Fragments; }
+
+ FragmentIterator begin() { return Fragments.begin(); }
+
+ FragmentIterator end() { return Fragments.end(); }
+
+ ConstFragmentIterator cbegin() const { return Fragments.cbegin(); }
+
+ ConstFragmentIterator cend() const { return Fragments.cend(); }
+
+ // Add a new Fragment at an arbitrary offset.
+ DeclarationFragments &insert(FragmentIterator It, StringRef Spelling,
+ FragmentKind Kind,
+ StringRef PreciseIdentifier = "",
+ const Decl *Declaration = nullptr) {
+ Fragments.insert(It,
+ Fragment(Spelling, Kind, PreciseIdentifier, Declaration));
+ return *this;
+ }
+
+ DeclarationFragments &insert(FragmentIterator It,
+ DeclarationFragments &&Other) {
+ Fragments.insert(It, std::make_move_iterator(Other.Fragments.begin()),
+ std::make_move_iterator(Other.Fragments.end()));
+ Other.Fragments.clear();
+ return *this;
+ }
+
+ /// Append a new Fragment to the end of the Fragments.
+ ///
+ /// \returns a reference to the DeclarationFragments object itself after
+ /// appending to chain up consecutive appends.
+ DeclarationFragments &append(StringRef Spelling, FragmentKind Kind,
+ StringRef PreciseIdentifier = "",
+ const Decl *Declaration = nullptr) {
+ if (Kind == FragmentKind::Text && !Fragments.empty() &&
+ Fragments.back().Kind == FragmentKind::Text) {
+ // If appending a text fragment, and the last fragment is also text,
+ // merge into the last fragment.
+ Fragments.back().Spelling.append(Spelling.data(), Spelling.size());
+ } else {
+ Fragments.emplace_back(Spelling, Kind, PreciseIdentifier, Declaration);
+ }
+ return *this;
+ }
+
+ /// Append another DeclarationFragments to the end.
+ ///
+ /// Note: \p Other is moved from and cannot be used after a call to this
+ /// method.
+ ///
+ /// \returns a reference to the DeclarationFragments object itself after
+ /// appending to chain up consecutive appends.
+ DeclarationFragments &append(DeclarationFragments &&Other) {
+ Fragments.insert(Fragments.end(),
+ std::make_move_iterator(Other.Fragments.begin()),
+ std::make_move_iterator(Other.Fragments.end()));
+ Other.Fragments.clear();
+ return *this;
+ }
+
+ DeclarationFragments &pop_back() {
+ Fragments.pop_back();
+ return *this;
+ }
+
+ DeclarationFragments &replace(std::string NewSpelling, unsigned Position) {
+ Fragments.at(Position).Spelling = NewSpelling;
+ return *this;
+ }
+
+ /// Append a text Fragment of a space character.
+ ///
+ /// \returns a reference to the DeclarationFragments object itself after
+ /// appending to chain up consecutive appends.
+ DeclarationFragments &appendSpace();
+
+ /// Get the string description of a FragmentKind \p Kind.
+ static StringRef getFragmentKindString(FragmentKind Kind);
+
+ /// Get the corresponding FragmentKind from string \p S.
+ static FragmentKind parseFragmentKindFromString(StringRef S);
+
+ static DeclarationFragments
+ getExceptionSpecificationString(ExceptionSpecificationType ExceptionSpec);
+
+ static DeclarationFragments getStructureTypeFragment(const RecordDecl *Decl);
+
+private:
+ std::vector<Fragment> Fragments;
+};
+
+class AccessControl {
+public:
+ AccessControl(std::string Access) : Access(Access) {}
+
+ const std::string &getAccess() const { return Access; }
+
+ bool empty() const { return Access.empty(); }
+
+private:
+ std::string Access;
+};
+
+/// Store function signature information with DeclarationFragments of the
+/// return type and parameters.
+class FunctionSignature {
+public:
+ FunctionSignature() = default;
+
+ /// Parameter holds the name and DeclarationFragments of a single parameter.
+ struct Parameter {
+ std::string Name;
+ DeclarationFragments Fragments;
+
+ Parameter(StringRef Name, DeclarationFragments Fragments)
+ : Name(Name), Fragments(Fragments) {}
+ };
+
+ const std::vector<Parameter> &getParameters() const { return Parameters; }
+ const DeclarationFragments &getReturnType() const { return ReturnType; }
+
+ FunctionSignature &addParameter(StringRef Name,
+ DeclarationFragments Fragments) {
+ Parameters.emplace_back(Name, Fragments);
+ return *this;
+ }
+
+ void setReturnType(DeclarationFragments RT) { ReturnType = RT; }
+
+ /// Determine if the FunctionSignature is empty.
+ ///
+ /// \returns true if the return type DeclarationFragments is empty and there
+ /// is no parameter, otherwise false.
+ bool empty() const {
+ return Parameters.empty() && ReturnType.getFragments().empty();
+ }
+
+private:
+ std::vector<Parameter> Parameters;
+ DeclarationFragments ReturnType;
+};
+
+/// A factory class to build DeclarationFragments for different kinds of Decl.
+class DeclarationFragmentsBuilder {
+public:
+ /// Build FunctionSignature for a function-like declaration \c FunctionT like
+ /// FunctionDecl, ObjCMethodDecl, or CXXMethodDecl.
+ ///
+ /// The logic and implementation of building a signature for a FunctionDecl,
+ /// CXXMethodDecl, and ObjCMethodDecl are exactly the same, but they do not
+ /// share a common base. This template helps reuse the code.
+ template <typename FunctionT>
+ static FunctionSignature getFunctionSignature(const FunctionT *Function);
+
+ static AccessControl getAccessControl(const Decl *Decl) {
+ switch (Decl->getAccess()) {
+ case AS_public:
+ case AS_none:
+ return AccessControl("public");
+ case AS_private:
+ return AccessControl("private");
+ case AS_protected:
+ return AccessControl("protected");
+ }
+ llvm_unreachable("Unhandled access control");
+ }
+
+ static DeclarationFragments
+ getFragmentsForNamespace(const NamespaceDecl *Decl);
+
+ /// Build DeclarationFragments for a variable declaration VarDecl.
+ static DeclarationFragments getFragmentsForVar(const VarDecl *);
+
+ static DeclarationFragments getFragmentsForVarTemplate(const VarDecl *);
+
+ /// Build DeclarationFragments for a function declaration FunctionDecl.
+ static DeclarationFragments getFragmentsForFunction(const FunctionDecl *);
+
+ /// Build DeclarationFragments for an enum constant declaration
+ /// EnumConstantDecl.
+ static DeclarationFragments
+ getFragmentsForEnumConstant(const EnumConstantDecl *);
+
+ /// Build DeclarationFragments for an enum declaration EnumDecl.
+ static DeclarationFragments getFragmentsForEnum(const EnumDecl *);
+
+ /// Build DeclarationFragments for a field declaration FieldDecl.
+ static DeclarationFragments getFragmentsForField(const FieldDecl *);
+
+ /// Build DeclarationFragments for a struct/union record declaration
+ /// RecordDecl.
+ static DeclarationFragments getFragmentsForRecordDecl(const RecordDecl *);
+
+ static DeclarationFragments getFragmentsForCXXClass(const CXXRecordDecl *);
+
+ static DeclarationFragments
+ getFragmentsForSpecialCXXMethod(const CXXMethodDecl *);
+
+ static DeclarationFragments getFragmentsForCXXMethod(const CXXMethodDecl *);
+
+ static DeclarationFragments
+ getFragmentsForConversionFunction(const CXXConversionDecl *);
+
+ static DeclarationFragments
+ getFragmentsForOverloadedOperator(const CXXMethodDecl *);
+
+ static DeclarationFragments
+ getFragmentsForTemplateParameters(ArrayRef<NamedDecl *>);
+
+ static std::string
+ getNameForTemplateArgument(const ArrayRef<NamedDecl *>, std::string);
+
+ static DeclarationFragments
+ getFragmentsForTemplateArguments(const ArrayRef<TemplateArgument>,
+ ASTContext &,
+ const std::optional<ArrayRef<NamedDecl *>>);
+
+ static DeclarationFragments getFragmentsForConcept(const ConceptDecl *);
+
+ static DeclarationFragments
+ getFragmentsForRedeclarableTemplate(const RedeclarableTemplateDecl *);
+
+ static DeclarationFragments getFragmentsForClassTemplateSpecialization(
+ const ClassTemplateSpecializationDecl *);
+
+ static DeclarationFragments getFragmentsForClassTemplatePartialSpecialization(
+ const ClassTemplatePartialSpecializationDecl *);
+
+ static DeclarationFragments getFragmentsForVarTemplateSpecialization(
+ const VarTemplateSpecializationDecl *);
+
+ static DeclarationFragments getFragmentsForVarTemplatePartialSpecialization(
+ const VarTemplatePartialSpecializationDecl *);
+
+ static DeclarationFragments
+ getFragmentsForFunctionTemplate(const FunctionTemplateDecl *Decl);
+
+ static DeclarationFragments
+ getFragmentsForFunctionTemplateSpecialization(const FunctionDecl *Decl);
+
+ /// Build DeclarationFragments for an Objective-C category declaration
+ /// ObjCCategoryDecl.
+ static DeclarationFragments
+ getFragmentsForObjCCategory(const ObjCCategoryDecl *);
+
+ /// Build DeclarationFragments for an Objective-C interface declaration
+ /// ObjCInterfaceDecl.
+ static DeclarationFragments
+ getFragmentsForObjCInterface(const ObjCInterfaceDecl *);
+
+ /// Build DeclarationFragments for an Objective-C method declaration
+ /// ObjCMethodDecl.
+ static DeclarationFragments getFragmentsForObjCMethod(const ObjCMethodDecl *);
+
+ /// Build DeclarationFragments for an Objective-C property declaration
+ /// ObjCPropertyDecl.
+ static DeclarationFragments
+ getFragmentsForObjCProperty(const ObjCPropertyDecl *);
+
+ /// Build DeclarationFragments for an Objective-C protocol declaration
+ /// ObjCProtocolDecl.
+ static DeclarationFragments
+ getFragmentsForObjCProtocol(const ObjCProtocolDecl *);
+
+ /// Build DeclarationFragments for a macro.
+ ///
+ /// \param Name name of the macro.
+ /// \param MD the associated MacroDirective.
+ static DeclarationFragments getFragmentsForMacro(StringRef Name,
+ const MacroDirective *MD);
+
+ /// Build DeclarationFragments for a typedef \p TypedefNameDecl.
+ static DeclarationFragments
+ getFragmentsForTypedef(const TypedefNameDecl *Decl);
+
+ /// Build sub-heading fragments for a NamedDecl.
+ static DeclarationFragments getSubHeading(const NamedDecl *);
+
+ /// Build sub-heading fragments for an Objective-C method.
+ static DeclarationFragments getSubHeading(const ObjCMethodDecl *);
+
+ /// Build a sub-heading for macro \p Name.
+ static DeclarationFragments getSubHeadingForMacro(StringRef Name);
+
+private:
+ DeclarationFragmentsBuilder() = delete;
+
+ /// Build DeclarationFragments for a QualType.
+ static DeclarationFragments getFragmentsForType(const QualType, ASTContext &,
+ DeclarationFragments &);
+
+ /// Build DeclarationFragments for a Type.
+ static DeclarationFragments getFragmentsForType(const Type *, ASTContext &,
+ DeclarationFragments &);
+
+ /// Build DeclarationFragments for a NestedNameSpecifier.
+ static DeclarationFragments getFragmentsForNNS(const NestedNameSpecifier *,
+ ASTContext &,
+ DeclarationFragments &);
+
+ /// Build DeclarationFragments for Qualifiers.
+ static DeclarationFragments getFragmentsForQualifiers(const Qualifiers quals);
+
+ /// Build DeclarationFragments for a parameter variable declaration
+ /// ParmVarDecl.
+ static DeclarationFragments getFragmentsForParam(const ParmVarDecl *);
+
+ static DeclarationFragments
+ getFragmentsForBlock(const NamedDecl *BlockDecl, FunctionTypeLoc &Block,
+ FunctionProtoTypeLoc &BlockProto,
+ DeclarationFragments &After);
+};
+
+template <typename FunctionT>
+FunctionSignature
+DeclarationFragmentsBuilder::getFunctionSignature(const FunctionT *Function) {
+ FunctionSignature Signature;
+
+ DeclarationFragments ReturnType, After;
+ ReturnType = getFragmentsForType(Function->getReturnType(),
+ Function->getASTContext(), After);
+ if (isa<FunctionDecl>(Function) &&
+ dyn_cast<FunctionDecl>(Function)->getDescribedFunctionTemplate() &&
+ ReturnType.begin()->Spelling.substr(0, 14).compare("type-parameter") ==
+ 0) {
+ std::string ProperArgName =
+ getNameForTemplateArgument(dyn_cast<FunctionDecl>(Function)
+ ->getDescribedFunctionTemplate()
+ ->getTemplateParameters()
+ ->asArray(),
+ ReturnType.begin()->Spelling);
+ ReturnType.begin()->Spelling.swap(ProperArgName);
+ }
+ ReturnType.append(std::move(After));
+ Signature.setReturnType(ReturnType);
+
+ for (const auto *Param : Function->parameters())
+ Signature.addParameter(Param->getName(), getFragmentsForParam(Param));
+
+ return Signature;
+}
+
+} // namespace extractapi
+} // namespace clang
+
+#endif // LLVM_CLANG_EXTRACTAPI_DECLARATION_FRAGMENTS_H
diff --git a/contrib/llvm-project/clang/include/clang/ExtractAPI/ExtractAPIActionBase.h b/contrib/llvm-project/clang/include/clang/ExtractAPI/ExtractAPIActionBase.h
new file mode 100644
index 000000000000..ac4f391db5f1
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/ExtractAPI/ExtractAPIActionBase.h
@@ -0,0 +1,54 @@
+//===- ExtractAPI/ExtractAPIActionBase.h -----------------------------*- C++
+//-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines the ExtractAPIActionBase class.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_EXTRACTAPI_ACTION_BASE_H
+#define LLVM_CLANG_EXTRACTAPI_ACTION_BASE_H
+
+#include "clang/ExtractAPI/API.h"
+#include "clang/ExtractAPI/APIIgnoresList.h"
+
+namespace clang {
+
+/// Base class to be used by front end actions to generate ExtarctAPI info
+///
+/// Deriving from this class equips an action with all the necessary tools to
+/// generate ExractAPI information in form of symbol-graphs
+class ExtractAPIActionBase {
+protected:
+ /// A representation of the APIs this action extracts.
+ std::unique_ptr<extractapi::APISet> API;
+
+ /// A stream to the output file of this action.
+ std::unique_ptr<raw_pwrite_stream> OS;
+
+ /// The product this action is extracting API information for.
+ std::string ProductName;
+
+ /// The synthesized input buffer that contains all the provided input header
+ /// files.
+ std::unique_ptr<llvm::MemoryBuffer> Buffer;
+
+ /// The list of symbols to ignore during serialization
+ extractapi::APIIgnoresList IgnoresList;
+
+ /// Implements EndSourceFileAction for Symbol-Graph generation
+ ///
+ /// Use the serializer to generate output symbol graph files from
+ /// the information gathered during the execution of Action.
+ void ImplEndSourceFileAction();
+};
+
+} // namespace clang
+
+#endif // LLVM_CLANG_EXTRACTAPI_ACTION_BASE_H
diff --git a/contrib/llvm-project/clang/include/clang/ExtractAPI/ExtractAPIVisitor.h b/contrib/llvm-project/clang/include/clang/ExtractAPI/ExtractAPIVisitor.h
new file mode 100644
index 000000000000..ac6f4e313540
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/ExtractAPI/ExtractAPIVisitor.h
@@ -0,0 +1,1426 @@
+//===- ExtractAPI/ExtractAPIVisitor.h ---------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines the ExtractAPVisitor AST visitation interface.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_EXTRACTAPI_EXTRACT_API_VISITOR_H
+#define LLVM_CLANG_EXTRACTAPI_EXTRACT_API_VISITOR_H
+
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/Basic/OperatorKinds.h"
+#include "clang/Basic/Specifiers.h"
+#include "clang/ExtractAPI/AvailabilityInfo.h"
+#include "clang/ExtractAPI/DeclarationFragments.h"
+#include "llvm/ADT/FunctionExtras.h"
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ParentMapContext.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/ExtractAPI/API.h"
+#include "clang/ExtractAPI/TypedefUnderlyingTypeResolver.h"
+#include "clang/Index/USRGeneration.h"
+#include "llvm/ADT/StringRef.h"
+#include <type_traits>
+
+namespace clang {
+namespace extractapi {
+namespace impl {
+
+template <typename Derived>
+class ExtractAPIVisitorBase : public RecursiveASTVisitor<Derived> {
+protected:
+ ExtractAPIVisitorBase(ASTContext &Context, APISet &API)
+ : Context(Context), API(API) {}
+
+public:
+ const APISet &getAPI() const { return API; }
+
+ bool VisitVarDecl(const VarDecl *Decl);
+
+ bool VisitFunctionDecl(const FunctionDecl *Decl);
+
+ bool VisitEnumDecl(const EnumDecl *Decl);
+
+ bool WalkUpFromFunctionDecl(const FunctionDecl *Decl);
+
+ bool WalkUpFromRecordDecl(const RecordDecl *Decl);
+
+ bool WalkUpFromCXXRecordDecl(const CXXRecordDecl *Decl);
+
+ bool WalkUpFromCXXMethodDecl(const CXXMethodDecl *Decl);
+
+ bool WalkUpFromClassTemplateSpecializationDecl(
+ const ClassTemplateSpecializationDecl *Decl);
+
+ bool WalkUpFromClassTemplatePartialSpecializationDecl(
+ const ClassTemplatePartialSpecializationDecl *Decl);
+
+ bool WalkUpFromVarTemplateDecl(const VarTemplateDecl *Decl);
+
+ bool WalkUpFromVarTemplateSpecializationDecl(
+ const VarTemplateSpecializationDecl *Decl);
+
+ bool WalkUpFromVarTemplatePartialSpecializationDecl(
+ const VarTemplatePartialSpecializationDecl *Decl);
+
+ bool WalkUpFromFunctionTemplateDecl(const FunctionTemplateDecl *Decl);
+
+ bool WalkUpFromNamespaceDecl(const NamespaceDecl *Decl);
+
+ bool VisitNamespaceDecl(const NamespaceDecl *Decl);
+
+ bool VisitRecordDecl(const RecordDecl *Decl);
+
+ bool VisitCXXRecordDecl(const CXXRecordDecl *Decl);
+
+ bool VisitCXXMethodDecl(const CXXMethodDecl *Decl);
+
+ bool VisitFieldDecl(const FieldDecl *Decl);
+
+ bool VisitCXXConversionDecl(const CXXConversionDecl *Decl);
+
+ bool VisitCXXConstructorDecl(const CXXConstructorDecl *Decl);
+
+ bool VisitCXXDestructorDecl(const CXXDestructorDecl *Decl);
+
+ bool VisitConceptDecl(const ConceptDecl *Decl);
+
+ bool VisitClassTemplateSpecializationDecl(
+ const ClassTemplateSpecializationDecl *Decl);
+
+ bool VisitClassTemplatePartialSpecializationDecl(
+ const ClassTemplatePartialSpecializationDecl *Decl);
+
+ bool VisitVarTemplateDecl(const VarTemplateDecl *Decl);
+
+ bool
+ VisitVarTemplateSpecializationDecl(const VarTemplateSpecializationDecl *Decl);
+
+ bool VisitVarTemplatePartialSpecializationDecl(
+ const VarTemplatePartialSpecializationDecl *Decl);
+
+ bool VisitFunctionTemplateDecl(const FunctionTemplateDecl *Decl);
+
+ bool VisitObjCInterfaceDecl(const ObjCInterfaceDecl *Decl);
+
+ bool VisitObjCProtocolDecl(const ObjCProtocolDecl *Decl);
+
+ bool VisitTypedefNameDecl(const TypedefNameDecl *Decl);
+
+ bool VisitObjCCategoryDecl(const ObjCCategoryDecl *Decl);
+
+ bool shouldDeclBeIncluded(const Decl *Decl) const;
+
+ const RawComment *fetchRawCommentForDecl(const Decl *Decl) const;
+
+protected:
+ /// Collect API information for the enum constants and associate with the
+ /// parent enum.
+ void recordEnumConstants(EnumRecord *EnumRecord,
+ const EnumDecl::enumerator_range Constants);
+
+ /// Collect API information for the record fields and associate with the
+ /// parent struct.
+ void recordRecordFields(RecordRecord *RecordRecord,
+ APIRecord::RecordKind FieldKind,
+ const RecordDecl::field_range Fields);
+
+ /// Collect API information for the Objective-C methods and associate with the
+ /// parent container.
+ void recordObjCMethods(ObjCContainerRecord *Container,
+ const ObjCContainerDecl::method_range Methods);
+
+ void recordObjCProperties(ObjCContainerRecord *Container,
+ const ObjCContainerDecl::prop_range Properties);
+
+ void recordObjCInstanceVariables(
+ ObjCContainerRecord *Container,
+ const llvm::iterator_range<
+ DeclContext::specific_decl_iterator<ObjCIvarDecl>>
+ Ivars);
+
+ void recordObjCProtocols(ObjCContainerRecord *Container,
+ ObjCInterfaceDecl::protocol_range Protocols);
+
+ ASTContext &Context;
+ APISet &API;
+
+ StringRef getTypedefName(const TagDecl *Decl) {
+ if (const auto *TypedefDecl = Decl->getTypedefNameForAnonDecl())
+ return TypedefDecl->getName();
+
+ return {};
+ }
+
+ bool isInSystemHeader(const Decl *D) {
+ return Context.getSourceManager().isInSystemHeader(D->getLocation());
+ }
+
+private:
+ Derived &getDerivedExtractAPIVisitor() {
+ return *static_cast<Derived *>(this);
+ }
+
+ SmallVector<SymbolReference> getBases(const CXXRecordDecl *Decl) {
+ // FIXME: store AccessSpecifier given by inheritance
+ SmallVector<SymbolReference> Bases;
+ for (const auto &BaseSpecifier : Decl->bases()) {
+ // skip classes not inherited as public
+ if (BaseSpecifier.getAccessSpecifier() != AccessSpecifier::AS_public)
+ continue;
+ SymbolReference BaseClass;
+ if (BaseSpecifier.getType().getTypePtr()->isTemplateTypeParmType()) {
+ BaseClass.Name = API.copyString(BaseSpecifier.getType().getAsString());
+ BaseClass.USR = API.recordUSR(
+ BaseSpecifier.getType()->getAs<TemplateTypeParmType>()->getDecl());
+ } else {
+ CXXRecordDecl *BaseClassDecl =
+ BaseSpecifier.getType().getTypePtr()->getAsCXXRecordDecl();
+ BaseClass.Name = BaseClassDecl->getName();
+ BaseClass.USR = API.recordUSR(BaseClassDecl);
+ }
+ Bases.emplace_back(BaseClass);
+ }
+ return Bases;
+ }
+
+ APIRecord *determineParentRecord(const DeclContext *Context) {
+ SmallString<128> ParentUSR;
+ if (Context->getDeclKind() == Decl::TranslationUnit)
+ return nullptr;
+
+ index::generateUSRForDecl(dyn_cast<Decl>(Context), ParentUSR);
+
+ APIRecord *Parent = API.findRecordForUSR(ParentUSR);
+ return Parent;
+ }
+};
+
+template <typename T>
+static void modifyRecords(const T &Records, const StringRef &Name) {
+ for (const auto &Record : Records) {
+ if (Name == Record.second.get()->Name) {
+ auto &DeclFragment = Record.second->Declaration;
+ DeclFragment.insert(DeclFragment.begin(), " ",
+ DeclarationFragments::FragmentKind::Text);
+ DeclFragment.insert(DeclFragment.begin(), "typedef",
+ DeclarationFragments::FragmentKind::Keyword, "",
+ nullptr);
+ DeclFragment.insert(--DeclFragment.end(), " { ... } ",
+ DeclarationFragments::FragmentKind::Text);
+ DeclFragment.insert(--DeclFragment.end(), Name,
+ DeclarationFragments::FragmentKind::Identifier);
+ break;
+ }
+ }
+}
+
+template <typename Derived>
+bool ExtractAPIVisitorBase<Derived>::VisitVarDecl(const VarDecl *Decl) {
+ // skip function parameters.
+ if (isa<ParmVarDecl>(Decl))
+ return true;
+
+ // Skip non-global variables in records (struct/union/class) but not static
+ // members.
+ if (Decl->getDeclContext()->isRecord() && !Decl->isStaticDataMember())
+ return true;
+
+ // Skip local variables inside function or method.
+ if (!Decl->isDefinedOutsideFunctionOrMethod())
+ return true;
+
+ // If this is a template but not specialization or instantiation, skip.
+ if (Decl->getASTContext().getTemplateOrSpecializationInfo(Decl) &&
+ Decl->getTemplateSpecializationKind() == TSK_Undeclared)
+ return true;
+
+ if (!getDerivedExtractAPIVisitor().shouldDeclBeIncluded(Decl))
+ return true;
+
+ // Collect symbol information.
+ StringRef Name = Decl->getName();
+ StringRef USR = API.recordUSR(Decl);
+ PresumedLoc Loc =
+ Context.getSourceManager().getPresumedLoc(Decl->getLocation());
+ LinkageInfo Linkage = Decl->getLinkageAndVisibility();
+ DocComment Comment;
+ if (auto *RawComment =
+ getDerivedExtractAPIVisitor().fetchRawCommentForDecl(Decl))
+ Comment = RawComment->getFormattedLines(Context.getSourceManager(),
+ Context.getDiagnostics());
+
+ // Build declaration fragments and sub-heading for the variable.
+ DeclarationFragments Declaration =
+ DeclarationFragmentsBuilder::getFragmentsForVar(Decl);
+ DeclarationFragments SubHeading =
+ DeclarationFragmentsBuilder::getSubHeading(Decl);
+ if (Decl->isStaticDataMember()) {
+ SymbolReference Context;
+ // getDeclContext() should return a RecordDecl since we
+ // are currently handling a static data member.
+ auto *Record = cast<RecordDecl>(Decl->getDeclContext());
+ Context.Name = Record->getName();
+ Context.USR = API.recordUSR(Record);
+ auto Access = DeclarationFragmentsBuilder::getAccessControl(Decl);
+ API.addStaticField(Name, USR, Loc, AvailabilityInfo::createFromDecl(Decl),
+ Linkage, Comment, Declaration, SubHeading, Context,
+ Access, isInSystemHeader(Decl));
+ } else
+ // Add the global variable record to the API set.
+ API.addGlobalVar(Name, USR, Loc, AvailabilityInfo::createFromDecl(Decl),
+ Linkage, Comment, Declaration, SubHeading,
+ isInSystemHeader(Decl));
+ return true;
+}
+
+template <typename Derived>
+bool ExtractAPIVisitorBase<Derived>::VisitFunctionDecl(
+ const FunctionDecl *Decl) {
+ if (const auto *Method = dyn_cast<CXXMethodDecl>(Decl)) {
+ // Skip member function in class templates.
+ if (Method->getParent()->getDescribedClassTemplate() != nullptr)
+ return true;
+
+ // Skip methods in records.
+ for (const auto &P : Context.getParents(*Method)) {
+ if (P.template get<CXXRecordDecl>())
+ return true;
+ }
+
+ // Skip ConstructorDecl and DestructorDecl.
+ if (isa<CXXConstructorDecl>(Method) || isa<CXXDestructorDecl>(Method))
+ return true;
+ }
+
+ // Skip templated functions.
+ switch (Decl->getTemplatedKind()) {
+ case FunctionDecl::TK_NonTemplate:
+ case FunctionDecl::TK_DependentNonTemplate:
+ case FunctionDecl::TK_FunctionTemplateSpecialization:
+ break;
+ case FunctionDecl::TK_FunctionTemplate:
+ case FunctionDecl::TK_DependentFunctionTemplateSpecialization:
+ case FunctionDecl::TK_MemberSpecialization:
+ return true;
+ }
+
+ if (!getDerivedExtractAPIVisitor().shouldDeclBeIncluded(Decl))
+ return true;
+
+ // Collect symbol information.
+ StringRef Name = Decl->getName();
+ StringRef USR = API.recordUSR(Decl);
+ PresumedLoc Loc =
+ Context.getSourceManager().getPresumedLoc(Decl->getLocation());
+ LinkageInfo Linkage = Decl->getLinkageAndVisibility();
+ DocComment Comment;
+ if (auto *RawComment =
+ getDerivedExtractAPIVisitor().fetchRawCommentForDecl(Decl))
+ Comment = RawComment->getFormattedLines(Context.getSourceManager(),
+ Context.getDiagnostics());
+
+ // Build declaration fragments, sub-heading, and signature of the function.
+ DeclarationFragments SubHeading =
+ DeclarationFragmentsBuilder::getSubHeading(Decl);
+ FunctionSignature Signature =
+ DeclarationFragmentsBuilder::getFunctionSignature(Decl);
+ if (Decl->getTemplateSpecializationInfo())
+ API.addGlobalFunctionTemplateSpecialization(
+ Name, USR, Loc, AvailabilityInfo::createFromDecl(Decl), Linkage,
+ Comment,
+ DeclarationFragmentsBuilder::
+ getFragmentsForFunctionTemplateSpecialization(Decl),
+ SubHeading, Signature, isInSystemHeader(Decl));
+ else
+ // Add the function record to the API set.
+ API.addGlobalFunction(
+ Name, USR, Loc, AvailabilityInfo::createFromDecl(Decl), Linkage,
+ Comment, DeclarationFragmentsBuilder::getFragmentsForFunction(Decl),
+ SubHeading, Signature, isInSystemHeader(Decl));
+ return true;
+}
+
+template <typename Derived>
+bool ExtractAPIVisitorBase<Derived>::VisitEnumDecl(const EnumDecl *Decl) {
+ if (!getDerivedExtractAPIVisitor().shouldDeclBeIncluded(Decl))
+ return true;
+
+ SmallString<128> QualifiedNameBuffer;
+ // Collect symbol information.
+ StringRef Name = Decl->getName();
+ if (Name.empty())
+ Name = getTypedefName(Decl);
+ if (Name.empty()) {
+ llvm::raw_svector_ostream OS(QualifiedNameBuffer);
+ Decl->printQualifiedName(OS);
+ Name = QualifiedNameBuffer.str();
+ }
+
+ StringRef USR = API.recordUSR(Decl);
+ PresumedLoc Loc =
+ Context.getSourceManager().getPresumedLoc(Decl->getLocation());
+ DocComment Comment;
+ if (auto *RawComment =
+ getDerivedExtractAPIVisitor().fetchRawCommentForDecl(Decl))
+ Comment = RawComment->getFormattedLines(Context.getSourceManager(),
+ Context.getDiagnostics());
+
+ // Build declaration fragments and sub-heading for the enum.
+ DeclarationFragments Declaration =
+ DeclarationFragmentsBuilder::getFragmentsForEnum(Decl);
+ DeclarationFragments SubHeading =
+ DeclarationFragmentsBuilder::getSubHeading(Decl);
+ EnumRecord *EnumRecord = API.addEnum(
+ API.copyString(Name), USR, Loc, AvailabilityInfo::createFromDecl(Decl),
+ Comment, Declaration, SubHeading, isInSystemHeader(Decl));
+
+ // Now collect information about the enumerators in this enum.
+ getDerivedExtractAPIVisitor().recordEnumConstants(EnumRecord,
+ Decl->enumerators());
+
+ return true;
+}
+
+template <typename Derived>
+bool ExtractAPIVisitorBase<Derived>::WalkUpFromFunctionDecl(
+ const FunctionDecl *Decl) {
+ getDerivedExtractAPIVisitor().VisitFunctionDecl(Decl);
+ return true;
+}
+
+template <typename Derived>
+bool ExtractAPIVisitorBase<Derived>::WalkUpFromRecordDecl(
+ const RecordDecl *Decl) {
+ getDerivedExtractAPIVisitor().VisitRecordDecl(Decl);
+ return true;
+}
+
+template <typename Derived>
+bool ExtractAPIVisitorBase<Derived>::WalkUpFromCXXRecordDecl(
+ const CXXRecordDecl *Decl) {
+ getDerivedExtractAPIVisitor().VisitCXXRecordDecl(Decl);
+ return true;
+}
+
+template <typename Derived>
+bool ExtractAPIVisitorBase<Derived>::WalkUpFromCXXMethodDecl(
+ const CXXMethodDecl *Decl) {
+ getDerivedExtractAPIVisitor().VisitCXXMethodDecl(Decl);
+ return true;
+}
+
+template <typename Derived>
+bool ExtractAPIVisitorBase<Derived>::WalkUpFromClassTemplateSpecializationDecl(
+ const ClassTemplateSpecializationDecl *Decl) {
+ getDerivedExtractAPIVisitor().VisitClassTemplateSpecializationDecl(Decl);
+ return true;
+}
+
+template <typename Derived>
+bool ExtractAPIVisitorBase<Derived>::
+ WalkUpFromClassTemplatePartialSpecializationDecl(
+ const ClassTemplatePartialSpecializationDecl *Decl) {
+ getDerivedExtractAPIVisitor().VisitClassTemplatePartialSpecializationDecl(
+ Decl);
+ return true;
+}
+
+template <typename Derived>
+bool ExtractAPIVisitorBase<Derived>::WalkUpFromVarTemplateDecl(
+ const VarTemplateDecl *Decl) {
+ getDerivedExtractAPIVisitor().VisitVarTemplateDecl(Decl);
+ return true;
+}
+
+template <typename Derived>
+bool ExtractAPIVisitorBase<Derived>::WalkUpFromVarTemplateSpecializationDecl(
+ const VarTemplateSpecializationDecl *Decl) {
+ getDerivedExtractAPIVisitor().VisitVarTemplateSpecializationDecl(Decl);
+ return true;
+}
+
+template <typename Derived>
+bool ExtractAPIVisitorBase<Derived>::
+ WalkUpFromVarTemplatePartialSpecializationDecl(
+ const VarTemplatePartialSpecializationDecl *Decl) {
+ getDerivedExtractAPIVisitor().VisitVarTemplatePartialSpecializationDecl(Decl);
+ return true;
+}
+
+template <typename Derived>
+bool ExtractAPIVisitorBase<Derived>::WalkUpFromFunctionTemplateDecl(
+ const FunctionTemplateDecl *Decl) {
+ getDerivedExtractAPIVisitor().VisitFunctionTemplateDecl(Decl);
+ return true;
+}
+
+template <typename Derived>
+bool ExtractAPIVisitorBase<Derived>::WalkUpFromNamespaceDecl(
+ const NamespaceDecl *Decl) {
+ getDerivedExtractAPIVisitor().VisitNamespaceDecl(Decl);
+ return true;
+}
+
+template <typename Derived>
+bool ExtractAPIVisitorBase<Derived>::VisitNamespaceDecl(
+ const NamespaceDecl *Decl) {
+
+ if (!getDerivedExtractAPIVisitor().shouldDeclBeIncluded(Decl))
+ return true;
+ if (Decl->isAnonymousNamespace())
+ return true;
+ StringRef Name = Decl->getName();
+ StringRef USR = API.recordUSR(Decl);
+ LinkageInfo Linkage = Decl->getLinkageAndVisibility();
+ PresumedLoc Loc =
+ Context.getSourceManager().getPresumedLoc(Decl->getLocation());
+ DocComment Comment;
+ if (auto *RawComment =
+ getDerivedExtractAPIVisitor().fetchRawCommentForDecl(Decl))
+ Comment = RawComment->getFormattedLines(Context.getSourceManager(),
+ Context.getDiagnostics());
+
+ // Build declaration fragments and sub-heading for the struct.
+ DeclarationFragments Declaration =
+ DeclarationFragmentsBuilder::getFragmentsForNamespace(Decl);
+ DeclarationFragments SubHeading =
+ DeclarationFragmentsBuilder::getSubHeading(Decl);
+ APIRecord *Parent = determineParentRecord(Decl->getDeclContext());
+ API.addNamespace(Parent, Name, USR, Loc,
+ AvailabilityInfo::createFromDecl(Decl), Linkage, Comment,
+ Declaration, SubHeading, isInSystemHeader(Decl));
+
+ return true;
+}
+
+template <typename Derived>
+bool ExtractAPIVisitorBase<Derived>::VisitRecordDecl(const RecordDecl *Decl) {
+ if (!getDerivedExtractAPIVisitor().shouldDeclBeIncluded(Decl))
+ return true;
+ // Collect symbol information.
+ StringRef Name = Decl->getName();
+ if (Name.empty())
+ Name = getTypedefName(Decl);
+ if (Name.empty())
+ return true;
+
+ StringRef USR = API.recordUSR(Decl);
+ PresumedLoc Loc =
+ Context.getSourceManager().getPresumedLoc(Decl->getLocation());
+ DocComment Comment;
+ if (auto *RawComment =
+ getDerivedExtractAPIVisitor().fetchRawCommentForDecl(Decl))
+ Comment = RawComment->getFormattedLines(Context.getSourceManager(),
+ Context.getDiagnostics());
+
+ // Build declaration fragments and sub-heading for the struct.
+ DeclarationFragments Declaration =
+ DeclarationFragmentsBuilder::getFragmentsForRecordDecl(Decl);
+ DeclarationFragments SubHeading =
+ DeclarationFragmentsBuilder::getSubHeading(Decl);
+
+ auto RecordKind = APIRecord::RK_Struct;
+ auto FieldRecordKind = APIRecord::RK_StructField;
+
+ if (Decl->isUnion()) {
+ RecordKind = APIRecord::RK_Union;
+ FieldRecordKind = APIRecord::RK_UnionField;
+ }
+
+ RecordRecord *RecordRecord = API.addRecord(
+ Name, USR, Loc, AvailabilityInfo::createFromDecl(Decl), Comment,
+ Declaration, SubHeading, RecordKind, isInSystemHeader(Decl));
+
+ // Now collect information about the fields in this struct.
+ getDerivedExtractAPIVisitor().recordRecordFields(
+ RecordRecord, FieldRecordKind, Decl->fields());
+
+ return true;
+}
+
+template <typename Derived>
+bool ExtractAPIVisitorBase<Derived>::VisitCXXRecordDecl(
+ const CXXRecordDecl *Decl) {
+ if (!getDerivedExtractAPIVisitor().shouldDeclBeIncluded(Decl) ||
+ Decl->isImplicit())
+ return true;
+
+ StringRef Name = Decl->getName();
+ StringRef USR = API.recordUSR(Decl);
+ PresumedLoc Loc =
+ Context.getSourceManager().getPresumedLoc(Decl->getLocation());
+ DocComment Comment;
+ if (auto *RawComment =
+ getDerivedExtractAPIVisitor().fetchRawCommentForDecl(Decl))
+ Comment = RawComment->getFormattedLines(Context.getSourceManager(),
+ Context.getDiagnostics());
+ DeclarationFragments Declaration =
+ DeclarationFragmentsBuilder::getFragmentsForCXXClass(Decl);
+ DeclarationFragments SubHeading =
+ DeclarationFragmentsBuilder::getSubHeading(Decl);
+
+ APIRecord::RecordKind Kind;
+ if (Decl->isUnion())
+ Kind = APIRecord::RecordKind::RK_Union;
+ else if (Decl->isStruct())
+ Kind = APIRecord::RecordKind::RK_Struct;
+ else
+ Kind = APIRecord::RecordKind::RK_CXXClass;
+ auto Access = DeclarationFragmentsBuilder::getAccessControl(Decl);
+
+ APIRecord *Parent = determineParentRecord(Decl->getDeclContext());
+ CXXClassRecord *CXXClassRecord;
+ if (Decl->getDescribedClassTemplate()) {
+ // Inject template fragments before class fragments.
+ Declaration.insert(
+ Declaration.begin(),
+ DeclarationFragmentsBuilder::getFragmentsForRedeclarableTemplate(
+ Decl->getDescribedClassTemplate()));
+ CXXClassRecord = API.addClassTemplate(
+ Parent, Name, USR, Loc, AvailabilityInfo::createFromDecl(Decl), Comment,
+ Declaration, SubHeading, Template(Decl->getDescribedClassTemplate()),
+ Access, isInSystemHeader(Decl));
+ } else
+ CXXClassRecord = API.addCXXClass(
+ Parent, Name, USR, Loc, AvailabilityInfo::createFromDecl(Decl), Comment,
+ Declaration, SubHeading, Kind, Access, isInSystemHeader(Decl));
+
+ CXXClassRecord->Bases = getBases(Decl);
+
+ return true;
+}
+
+template <typename Derived>
+bool ExtractAPIVisitorBase<Derived>::VisitCXXMethodDecl(
+ const CXXMethodDecl *Decl) {
+ if (!getDerivedExtractAPIVisitor().shouldDeclBeIncluded(Decl) ||
+ Decl->isImplicit())
+ return true;
+
+ if (isa<CXXConversionDecl>(Decl))
+ return true;
+ if (isa<CXXConstructorDecl>(Decl) || isa<CXXDestructorDecl>(Decl))
+ return true;
+
+ StringRef USR = API.recordUSR(Decl);
+ PresumedLoc Loc =
+ Context.getSourceManager().getPresumedLoc(Decl->getLocation());
+ DocComment Comment;
+ if (auto *RawComment =
+ getDerivedExtractAPIVisitor().fetchRawCommentForDecl(Decl))
+ Comment = RawComment->getFormattedLines(Context.getSourceManager(),
+ Context.getDiagnostics());
+ DeclarationFragments SubHeading =
+ DeclarationFragmentsBuilder::getSubHeading(Decl);
+ auto Access = DeclarationFragmentsBuilder::getAccessControl(Decl);
+ auto Signature = DeclarationFragmentsBuilder::getFunctionSignature(Decl);
+
+ SmallString<128> ParentUSR;
+ index::generateUSRForDecl(dyn_cast<CXXRecordDecl>(Decl->getDeclContext()),
+ ParentUSR);
+ auto *Parent = API.findRecordForUSR(ParentUSR);
+ if (Decl->isTemplated()) {
+ FunctionTemplateDecl *TemplateDecl = Decl->getDescribedFunctionTemplate();
+ API.addCXXMethodTemplate(
+ API.findRecordForUSR(ParentUSR), Decl->getName(), USR, Loc,
+ AvailabilityInfo::createFromDecl(Decl), Comment,
+ DeclarationFragmentsBuilder::getFragmentsForFunctionTemplate(
+ TemplateDecl),
+ SubHeading, DeclarationFragmentsBuilder::getFunctionSignature(Decl),
+ DeclarationFragmentsBuilder::getAccessControl(TemplateDecl),
+ Template(TemplateDecl), isInSystemHeader(Decl));
+ } else if (Decl->getTemplateSpecializationInfo())
+ API.addCXXMethodTemplateSpec(
+ Parent, Decl->getName(), USR, Loc,
+ AvailabilityInfo::createFromDecl(Decl), Comment,
+ DeclarationFragmentsBuilder::
+ getFragmentsForFunctionTemplateSpecialization(Decl),
+ SubHeading, Signature, Access, isInSystemHeader(Decl));
+ else if (Decl->isOverloadedOperator())
+ API.addCXXInstanceMethod(
+ Parent, API.copyString(Decl->getNameAsString()), USR, Loc,
+ AvailabilityInfo::createFromDecl(Decl), Comment,
+ DeclarationFragmentsBuilder::getFragmentsForOverloadedOperator(Decl),
+ SubHeading, Signature, Access, isInSystemHeader(Decl));
+ else if (Decl->isStatic())
+ API.addCXXStaticMethod(
+ Parent, Decl->getName(), USR, Loc,
+ AvailabilityInfo::createFromDecl(Decl), Comment,
+ DeclarationFragmentsBuilder::getFragmentsForCXXMethod(Decl), SubHeading,
+ Signature, Access, isInSystemHeader(Decl));
+ else
+ API.addCXXInstanceMethod(
+ Parent, Decl->getName(), USR, Loc,
+ AvailabilityInfo::createFromDecl(Decl), Comment,
+ DeclarationFragmentsBuilder::getFragmentsForCXXMethod(Decl), SubHeading,
+ Signature, Access, isInSystemHeader(Decl));
+
+ return true;
+}
+
+template <typename Derived>
+bool ExtractAPIVisitorBase<Derived>::VisitCXXConstructorDecl(
+ const CXXConstructorDecl *Decl) {
+
+ StringRef Name = API.copyString(Decl->getNameAsString());
+ StringRef USR = API.recordUSR(Decl);
+ PresumedLoc Loc =
+ Context.getSourceManager().getPresumedLoc(Decl->getLocation());
+ DocComment Comment;
+ if (auto *RawComment =
+ getDerivedExtractAPIVisitor().fetchRawCommentForDecl(Decl))
+ Comment = RawComment->getFormattedLines(Context.getSourceManager(),
+ Context.getDiagnostics());
+
+ // Build declaration fragments, sub-heading, and signature for the method.
+ DeclarationFragments Declaration =
+ DeclarationFragmentsBuilder::getFragmentsForSpecialCXXMethod(Decl);
+ DeclarationFragments SubHeading =
+ DeclarationFragmentsBuilder::getSubHeading(Decl);
+ FunctionSignature Signature =
+ DeclarationFragmentsBuilder::getFunctionSignature(Decl);
+ AccessControl Access = DeclarationFragmentsBuilder::getAccessControl(Decl);
+ SmallString<128> ParentUSR;
+ index::generateUSRForDecl(dyn_cast<CXXRecordDecl>(Decl->getDeclContext()),
+ ParentUSR);
+ API.addCXXInstanceMethod(API.findRecordForUSR(ParentUSR), Name, USR, Loc,
+ AvailabilityInfo::createFromDecl(Decl), Comment,
+ Declaration, SubHeading, Signature, Access,
+ isInSystemHeader(Decl));
+ return true;
+}
+
+template <typename Derived>
+bool ExtractAPIVisitorBase<Derived>::VisitCXXDestructorDecl(
+ const CXXDestructorDecl *Decl) {
+
+ StringRef Name = API.copyString(Decl->getNameAsString());
+ StringRef USR = API.recordUSR(Decl);
+ PresumedLoc Loc =
+ Context.getSourceManager().getPresumedLoc(Decl->getLocation());
+ DocComment Comment;
+ if (auto *RawComment =
+ getDerivedExtractAPIVisitor().fetchRawCommentForDecl(Decl))
+ Comment = RawComment->getFormattedLines(Context.getSourceManager(),
+ Context.getDiagnostics());
+
+ // Build declaration fragments, sub-heading, and signature for the method.
+ DeclarationFragments Declaration =
+ DeclarationFragmentsBuilder::getFragmentsForSpecialCXXMethod(Decl);
+ DeclarationFragments SubHeading =
+ DeclarationFragmentsBuilder::getSubHeading(Decl);
+ FunctionSignature Signature =
+ DeclarationFragmentsBuilder::getFunctionSignature(Decl);
+ AccessControl Access = DeclarationFragmentsBuilder::getAccessControl(Decl);
+ SmallString<128> ParentUSR;
+ index::generateUSRForDecl(dyn_cast<CXXRecordDecl>(Decl->getDeclContext()),
+ ParentUSR);
+ API.addCXXInstanceMethod(API.findRecordForUSR(ParentUSR), Name, USR, Loc,
+ AvailabilityInfo::createFromDecl(Decl), Comment,
+ Declaration, SubHeading, Signature, Access,
+ isInSystemHeader(Decl));
+ return true;
+}
+
+template <typename Derived>
+bool ExtractAPIVisitorBase<Derived>::VisitConceptDecl(const ConceptDecl *Decl) {
+ if (!getDerivedExtractAPIVisitor().shouldDeclBeIncluded(Decl))
+ return true;
+
+ StringRef Name = Decl->getName();
+ StringRef USR = API.recordUSR(Decl);
+ PresumedLoc Loc =
+ Context.getSourceManager().getPresumedLoc(Decl->getLocation());
+ DocComment Comment;
+ if (auto *RawComment =
+ getDerivedExtractAPIVisitor().fetchRawCommentForDecl(Decl))
+ Comment = RawComment->getFormattedLines(Context.getSourceManager(),
+ Context.getDiagnostics());
+ DeclarationFragments Declaration =
+ DeclarationFragmentsBuilder::getFragmentsForConcept(Decl);
+ DeclarationFragments SubHeading =
+ DeclarationFragmentsBuilder::getSubHeading(Decl);
+ API.addConcept(Name, USR, Loc, AvailabilityInfo::createFromDecl(Decl),
+ Comment, Declaration, SubHeading, Template(Decl),
+ isInSystemHeader(Decl));
+ return true;
+}
+
+template <typename Derived>
+bool ExtractAPIVisitorBase<Derived>::VisitClassTemplateSpecializationDecl(
+ const ClassTemplateSpecializationDecl *Decl) {
+ if (!getDerivedExtractAPIVisitor().shouldDeclBeIncluded(Decl))
+ return true;
+
+ StringRef Name = Decl->getName();
+ StringRef USR = API.recordUSR(Decl);
+ PresumedLoc Loc =
+ Context.getSourceManager().getPresumedLoc(Decl->getLocation());
+ DocComment Comment;
+ if (auto *RawComment =
+ getDerivedExtractAPIVisitor().fetchRawCommentForDecl(Decl))
+ Comment = RawComment->getFormattedLines(Context.getSourceManager(),
+ Context.getDiagnostics());
+ DeclarationFragments Declaration =
+ DeclarationFragmentsBuilder::getFragmentsForClassTemplateSpecialization(
+ Decl);
+ DeclarationFragments SubHeading =
+ DeclarationFragmentsBuilder::getSubHeading(Decl);
+
+ APIRecord *Parent = determineParentRecord(Decl->getDeclContext());
+ auto *ClassTemplateSpecializationRecord = API.addClassTemplateSpecialization(
+ Parent, Name, USR, Loc, AvailabilityInfo::createFromDecl(Decl), Comment,
+ Declaration, SubHeading,
+ DeclarationFragmentsBuilder::getAccessControl(Decl),
+ isInSystemHeader(Decl));
+
+ ClassTemplateSpecializationRecord->Bases = getBases(Decl);
+
+ return true;
+}
+
+template <typename Derived>
+bool ExtractAPIVisitorBase<Derived>::
+ VisitClassTemplatePartialSpecializationDecl(
+ const ClassTemplatePartialSpecializationDecl *Decl) {
+ if (!getDerivedExtractAPIVisitor().shouldDeclBeIncluded(Decl))
+ return true;
+
+ StringRef Name = Decl->getName();
+ StringRef USR = API.recordUSR(Decl);
+ PresumedLoc Loc =
+ Context.getSourceManager().getPresumedLoc(Decl->getLocation());
+ DocComment Comment;
+ if (auto *RawComment =
+ getDerivedExtractAPIVisitor().fetchRawCommentForDecl(Decl))
+ Comment = RawComment->getFormattedLines(Context.getSourceManager(),
+ Context.getDiagnostics());
+ DeclarationFragments Declaration = DeclarationFragmentsBuilder::
+ getFragmentsForClassTemplatePartialSpecialization(Decl);
+ DeclarationFragments SubHeading =
+ DeclarationFragmentsBuilder::getSubHeading(Decl);
+ APIRecord *Parent = determineParentRecord(Decl->getDeclContext());
+ auto *ClassTemplatePartialSpecRecord =
+ API.addClassTemplatePartialSpecialization(
+ Parent, Name, USR, Loc, AvailabilityInfo::createFromDecl(Decl),
+ Comment, Declaration, SubHeading, Template(Decl),
+ DeclarationFragmentsBuilder::getAccessControl(Decl),
+ isInSystemHeader(Decl));
+
+ ClassTemplatePartialSpecRecord->Bases = getBases(Decl);
+
+ return true;
+}
+
+template <typename Derived>
+bool ExtractAPIVisitorBase<Derived>::VisitVarTemplateDecl(
+ const VarTemplateDecl *Decl) {
+ if (!getDerivedExtractAPIVisitor().shouldDeclBeIncluded(Decl))
+ return true;
+
+ // Collect symbol information.
+ StringRef Name = Decl->getName();
+ StringRef USR = API.recordUSR(Decl);
+ PresumedLoc Loc =
+ Context.getSourceManager().getPresumedLoc(Decl->getLocation());
+ LinkageInfo Linkage = Decl->getLinkageAndVisibility();
+ DocComment Comment;
+ if (auto *RawComment =
+ getDerivedExtractAPIVisitor().fetchRawCommentForDecl(Decl))
+ Comment = RawComment->getFormattedLines(Context.getSourceManager(),
+ Context.getDiagnostics());
+
+ // Build declaration fragments and sub-heading for the variable.
+ DeclarationFragments Declaration;
+ Declaration
+ .append(DeclarationFragmentsBuilder::getFragmentsForRedeclarableTemplate(
+ Decl))
+ .append(DeclarationFragmentsBuilder::getFragmentsForVarTemplate(
+ Decl->getTemplatedDecl()));
+ // Inject template fragments before var fragments.
+ DeclarationFragments SubHeading =
+ DeclarationFragmentsBuilder::getSubHeading(Decl);
+
+ SmallString<128> ParentUSR;
+ index::generateUSRForDecl(dyn_cast<CXXRecordDecl>(Decl->getDeclContext()),
+ ParentUSR);
+ if (Decl->getDeclContext()->getDeclKind() == Decl::CXXRecord)
+ API.addCXXFieldTemplate(API.findRecordForUSR(ParentUSR), Name, USR, Loc,
+ AvailabilityInfo::createFromDecl(Decl), Comment,
+ Declaration, SubHeading,
+ DeclarationFragmentsBuilder::getAccessControl(Decl),
+ Template(Decl), isInSystemHeader(Decl));
+ else
+ API.addGlobalVariableTemplate(Name, USR, Loc,
+ AvailabilityInfo::createFromDecl(Decl),
+ Linkage, Comment, Declaration, SubHeading,
+ Template(Decl), isInSystemHeader(Decl));
+ return true;
+}
+
+template <typename Derived>
+bool ExtractAPIVisitorBase<Derived>::VisitVarTemplateSpecializationDecl(
+ const VarTemplateSpecializationDecl *Decl) {
+ if (!getDerivedExtractAPIVisitor().shouldDeclBeIncluded(Decl))
+ return true;
+
+ // Collect symbol information.
+ StringRef Name = Decl->getName();
+ StringRef USR = API.recordUSR(Decl);
+ PresumedLoc Loc =
+ Context.getSourceManager().getPresumedLoc(Decl->getLocation());
+ LinkageInfo Linkage = Decl->getLinkageAndVisibility();
+ DocComment Comment;
+ if (auto *RawComment =
+ getDerivedExtractAPIVisitor().fetchRawCommentForDecl(Decl))
+ Comment = RawComment->getFormattedLines(Context.getSourceManager(),
+ Context.getDiagnostics());
+
+ // Build declaration fragments and sub-heading for the variable.
+ DeclarationFragments Declaration =
+ DeclarationFragmentsBuilder::getFragmentsForVarTemplateSpecialization(
+ Decl);
+ DeclarationFragments SubHeading =
+ DeclarationFragmentsBuilder::getSubHeading(Decl);
+ API.addGlobalVariableTemplateSpecialization(
+ Name, USR, Loc, AvailabilityInfo::createFromDecl(Decl), Linkage, Comment,
+ Declaration, SubHeading, isInSystemHeader(Decl));
+ return true;
+}
+
+template <typename Derived>
+bool ExtractAPIVisitorBase<Derived>::VisitVarTemplatePartialSpecializationDecl(
+ const VarTemplatePartialSpecializationDecl *Decl) {
+ if (!getDerivedExtractAPIVisitor().shouldDeclBeIncluded(Decl))
+ return true;
+
+ // Collect symbol information.
+ StringRef Name = Decl->getName();
+ StringRef USR = API.recordUSR(Decl);
+ PresumedLoc Loc =
+ Context.getSourceManager().getPresumedLoc(Decl->getLocation());
+ LinkageInfo Linkage = Decl->getLinkageAndVisibility();
+ DocComment Comment;
+ if (auto *RawComment =
+ getDerivedExtractAPIVisitor().fetchRawCommentForDecl(Decl))
+ Comment = RawComment->getFormattedLines(Context.getSourceManager(),
+ Context.getDiagnostics());
+
+ // Build declaration fragments and sub-heading for the variable.
+ DeclarationFragments Declaration = DeclarationFragmentsBuilder::
+ getFragmentsForVarTemplatePartialSpecialization(Decl);
+ DeclarationFragments SubHeading =
+ DeclarationFragmentsBuilder::getSubHeading(Decl);
+ API.addGlobalVariableTemplatePartialSpecialization(
+ Name, USR, Loc, AvailabilityInfo::createFromDecl(Decl), Linkage, Comment,
+ Declaration, SubHeading, Template(Decl), isInSystemHeader(Decl));
+ return true;
+}
+
+template <typename Derived>
+bool ExtractAPIVisitorBase<Derived>::VisitFunctionTemplateDecl(
+ const FunctionTemplateDecl *Decl) {
+ if (isa<CXXMethodDecl>(Decl->getTemplatedDecl()))
+ return true;
+ if (!getDerivedExtractAPIVisitor().shouldDeclBeIncluded(Decl))
+ return true;
+
+ // Collect symbol information.
+ StringRef Name = Decl->getName();
+ StringRef USR = API.recordUSR(Decl);
+ PresumedLoc Loc =
+ Context.getSourceManager().getPresumedLoc(Decl->getLocation());
+ LinkageInfo Linkage = Decl->getLinkageAndVisibility();
+ DocComment Comment;
+ if (auto *RawComment =
+ getDerivedExtractAPIVisitor().fetchRawCommentForDecl(Decl))
+ Comment = RawComment->getFormattedLines(Context.getSourceManager(),
+ Context.getDiagnostics());
+
+ DeclarationFragments SubHeading =
+ DeclarationFragmentsBuilder::getSubHeading(Decl);
+ FunctionSignature Signature =
+ DeclarationFragmentsBuilder::getFunctionSignature(
+ Decl->getTemplatedDecl());
+ API.addGlobalFunctionTemplate(
+ Name, USR, Loc, AvailabilityInfo::createFromDecl(Decl), Linkage, Comment,
+ DeclarationFragmentsBuilder::getFragmentsForFunctionTemplate(Decl),
+ SubHeading, Signature, Template(Decl), isInSystemHeader(Decl));
+
+ return true;
+}
+
+template <typename Derived>
+bool ExtractAPIVisitorBase<Derived>::VisitObjCInterfaceDecl(
+ const ObjCInterfaceDecl *Decl) {
+ if (!getDerivedExtractAPIVisitor().shouldDeclBeIncluded(Decl))
+ return true;
+
+ // Collect symbol information.
+ StringRef Name = Decl->getName();
+ StringRef USR = API.recordUSR(Decl);
+ PresumedLoc Loc =
+ Context.getSourceManager().getPresumedLoc(Decl->getLocation());
+ LinkageInfo Linkage = Decl->getLinkageAndVisibility();
+ DocComment Comment;
+ if (auto *RawComment =
+ getDerivedExtractAPIVisitor().fetchRawCommentForDecl(Decl))
+ Comment = RawComment->getFormattedLines(Context.getSourceManager(),
+ Context.getDiagnostics());
+
+ // Build declaration fragments and sub-heading for the interface.
+ DeclarationFragments Declaration =
+ DeclarationFragmentsBuilder::getFragmentsForObjCInterface(Decl);
+ DeclarationFragments SubHeading =
+ DeclarationFragmentsBuilder::getSubHeading(Decl);
+
+ // Collect super class information.
+ SymbolReference SuperClass;
+ if (const auto *SuperClassDecl = Decl->getSuperClass()) {
+ SuperClass.Name = SuperClassDecl->getObjCRuntimeNameAsString();
+ SuperClass.USR = API.recordUSR(SuperClassDecl);
+ }
+
+ ObjCInterfaceRecord *ObjCInterfaceRecord = API.addObjCInterface(
+ Name, USR, Loc, AvailabilityInfo::createFromDecl(Decl), Linkage, Comment,
+ Declaration, SubHeading, SuperClass, isInSystemHeader(Decl));
+
+ // Record all methods (selectors). This doesn't include automatically
+ // synthesized property methods.
+ getDerivedExtractAPIVisitor().recordObjCMethods(ObjCInterfaceRecord,
+ Decl->methods());
+ getDerivedExtractAPIVisitor().recordObjCProperties(ObjCInterfaceRecord,
+ Decl->properties());
+ getDerivedExtractAPIVisitor().recordObjCInstanceVariables(ObjCInterfaceRecord,
+ Decl->ivars());
+ getDerivedExtractAPIVisitor().recordObjCProtocols(ObjCInterfaceRecord,
+ Decl->protocols());
+
+ return true;
+}
+
+template <typename Derived>
+bool ExtractAPIVisitorBase<Derived>::VisitObjCProtocolDecl(
+ const ObjCProtocolDecl *Decl) {
+ if (!getDerivedExtractAPIVisitor().shouldDeclBeIncluded(Decl))
+ return true;
+
+ // Collect symbol information.
+ StringRef Name = Decl->getName();
+ StringRef USR = API.recordUSR(Decl);
+ PresumedLoc Loc =
+ Context.getSourceManager().getPresumedLoc(Decl->getLocation());
+ DocComment Comment;
+ if (auto *RawComment =
+ getDerivedExtractAPIVisitor().fetchRawCommentForDecl(Decl))
+ Comment = RawComment->getFormattedLines(Context.getSourceManager(),
+ Context.getDiagnostics());
+
+ // Build declaration fragments and sub-heading for the protocol.
+ DeclarationFragments Declaration =
+ DeclarationFragmentsBuilder::getFragmentsForObjCProtocol(Decl);
+ DeclarationFragments SubHeading =
+ DeclarationFragmentsBuilder::getSubHeading(Decl);
+
+ ObjCProtocolRecord *ObjCProtocolRecord = API.addObjCProtocol(
+ Name, USR, Loc, AvailabilityInfo::createFromDecl(Decl), Comment,
+ Declaration, SubHeading, isInSystemHeader(Decl));
+
+ getDerivedExtractAPIVisitor().recordObjCMethods(ObjCProtocolRecord,
+ Decl->methods());
+ getDerivedExtractAPIVisitor().recordObjCProperties(ObjCProtocolRecord,
+ Decl->properties());
+ getDerivedExtractAPIVisitor().recordObjCProtocols(ObjCProtocolRecord,
+ Decl->protocols());
+
+ return true;
+}
+
+template <typename Derived>
+bool ExtractAPIVisitorBase<Derived>::VisitTypedefNameDecl(
+ const TypedefNameDecl *Decl) {
+ // Skip ObjC Type Parameter for now.
+ if (isa<ObjCTypeParamDecl>(Decl))
+ return true;
+
+ if (!Decl->isDefinedOutsideFunctionOrMethod())
+ return true;
+
+ if (!getDerivedExtractAPIVisitor().shouldDeclBeIncluded(Decl))
+ return true;
+
+ // Add the notion of typedef for tag type (struct or enum) of the same name.
+ if (const ElaboratedType *ET =
+ dyn_cast<ElaboratedType>(Decl->getUnderlyingType())) {
+ if (const TagType *TagTy = dyn_cast<TagType>(ET->desugar())) {
+ if (Decl->getName() == TagTy->getDecl()->getName()) {
+ if (isa<RecordDecl>(TagTy->getDecl())) {
+ modifyRecords(API.getRecords(), Decl->getName());
+ }
+ if (TagTy->getDecl()->isEnum()) {
+ modifyRecords(API.getEnums(), Decl->getName());
+ }
+ }
+ }
+ }
+
+ PresumedLoc Loc =
+ Context.getSourceManager().getPresumedLoc(Decl->getLocation());
+ StringRef Name = Decl->getName();
+ StringRef USR = API.recordUSR(Decl);
+ DocComment Comment;
+ if (auto *RawComment =
+ getDerivedExtractAPIVisitor().fetchRawCommentForDecl(Decl))
+ Comment = RawComment->getFormattedLines(Context.getSourceManager(),
+ Context.getDiagnostics());
+
+ QualType Type = Decl->getUnderlyingType();
+ SymbolReference SymRef =
+ TypedefUnderlyingTypeResolver(Context).getSymbolReferenceForType(Type,
+ API);
+
+ API.addTypedef(Name, USR, Loc, AvailabilityInfo::createFromDecl(Decl),
+ Comment,
+ DeclarationFragmentsBuilder::getFragmentsForTypedef(Decl),
+ DeclarationFragmentsBuilder::getSubHeading(Decl), SymRef,
+ isInSystemHeader(Decl));
+
+ return true;
+}
+
+template <typename Derived>
+bool ExtractAPIVisitorBase<Derived>::VisitObjCCategoryDecl(
+ const ObjCCategoryDecl *Decl) {
+ if (!getDerivedExtractAPIVisitor().shouldDeclBeIncluded(Decl))
+ return true;
+
+ StringRef Name = Decl->getName();
+ StringRef USR = API.recordUSR(Decl);
+ PresumedLoc Loc =
+ Context.getSourceManager().getPresumedLoc(Decl->getLocation());
+ DocComment Comment;
+ if (auto *RawComment =
+ getDerivedExtractAPIVisitor().fetchRawCommentForDecl(Decl))
+ Comment = RawComment->getFormattedLines(Context.getSourceManager(),
+ Context.getDiagnostics());
+ // Build declaration fragments and sub-heading for the category.
+ DeclarationFragments Declaration =
+ DeclarationFragmentsBuilder::getFragmentsForObjCCategory(Decl);
+ DeclarationFragments SubHeading =
+ DeclarationFragmentsBuilder::getSubHeading(Decl);
+
+ const ObjCInterfaceDecl *InterfaceDecl = Decl->getClassInterface();
+ SymbolReference Interface(InterfaceDecl->getName(),
+ API.recordUSR(InterfaceDecl));
+
+ bool IsFromExternalModule = true;
+ for (const auto &Interface : API.getObjCInterfaces()) {
+ if (InterfaceDecl->getName() == Interface.second.get()->Name) {
+ IsFromExternalModule = false;
+ break;
+ }
+ }
+
+ ObjCCategoryRecord *ObjCCategoryRecord = API.addObjCCategory(
+ Name, USR, Loc, AvailabilityInfo::createFromDecl(Decl), Comment,
+ Declaration, SubHeading, Interface, isInSystemHeader(Decl),
+ IsFromExternalModule);
+
+ getDerivedExtractAPIVisitor().recordObjCMethods(ObjCCategoryRecord,
+ Decl->methods());
+ getDerivedExtractAPIVisitor().recordObjCProperties(ObjCCategoryRecord,
+ Decl->properties());
+ getDerivedExtractAPIVisitor().recordObjCInstanceVariables(ObjCCategoryRecord,
+ Decl->ivars());
+ getDerivedExtractAPIVisitor().recordObjCProtocols(ObjCCategoryRecord,
+ Decl->protocols());
+
+ return true;
+}
+
+/// Collect API information for the enum constants and associate with the
+/// parent enum.
+template <typename Derived>
+void ExtractAPIVisitorBase<Derived>::recordEnumConstants(
+ EnumRecord *EnumRecord, const EnumDecl::enumerator_range Constants) {
+ for (const auto *Constant : Constants) {
+ // Collect symbol information.
+ StringRef Name = Constant->getName();
+ StringRef USR = API.recordUSR(Constant);
+ PresumedLoc Loc =
+ Context.getSourceManager().getPresumedLoc(Constant->getLocation());
+ DocComment Comment;
+ if (auto *RawComment =
+ getDerivedExtractAPIVisitor().fetchRawCommentForDecl(Constant))
+ Comment = RawComment->getFormattedLines(Context.getSourceManager(),
+ Context.getDiagnostics());
+
+ // Build declaration fragments and sub-heading for the enum constant.
+ DeclarationFragments Declaration =
+ DeclarationFragmentsBuilder::getFragmentsForEnumConstant(Constant);
+ DeclarationFragments SubHeading =
+ DeclarationFragmentsBuilder::getSubHeading(Constant);
+
+ API.addEnumConstant(EnumRecord, Name, USR, Loc,
+ AvailabilityInfo::createFromDecl(Constant), Comment,
+ Declaration, SubHeading, isInSystemHeader(Constant));
+ }
+}
+
+/// Collect API information for the struct fields and associate with the
+/// parent struct.
+template <typename Derived>
+void ExtractAPIVisitorBase<Derived>::recordRecordFields(
+ RecordRecord *RecordRecord, APIRecord::RecordKind FieldKind,
+ const RecordDecl::field_range Fields) {
+ for (const auto *Field : Fields) {
+ // Collect symbol information.
+ StringRef Name = Field->getName();
+ StringRef USR = API.recordUSR(Field);
+ PresumedLoc Loc =
+ Context.getSourceManager().getPresumedLoc(Field->getLocation());
+ DocComment Comment;
+ if (auto *RawComment =
+ getDerivedExtractAPIVisitor().fetchRawCommentForDecl(Field))
+ Comment = RawComment->getFormattedLines(Context.getSourceManager(),
+ Context.getDiagnostics());
+
+ // Build declaration fragments and sub-heading for the struct field.
+ DeclarationFragments Declaration =
+ DeclarationFragmentsBuilder::getFragmentsForField(Field);
+ DeclarationFragments SubHeading =
+ DeclarationFragmentsBuilder::getSubHeading(Field);
+
+ API.addRecordField(
+ RecordRecord, Name, USR, Loc, AvailabilityInfo::createFromDecl(Field),
+ Comment, Declaration, SubHeading, FieldKind, isInSystemHeader(Field));
+ }
+}
+
+template <typename Derived>
+bool ExtractAPIVisitorBase<Derived>::VisitFieldDecl(const FieldDecl *Decl) {
+ if (Decl->getDeclContext()->getDeclKind() == Decl::Record)
+ return true;
+ if (isa<ObjCIvarDecl>(Decl))
+ return true;
+ // Collect symbol information.
+ StringRef Name = Decl->getName();
+ StringRef USR = API.recordUSR(Decl);
+ PresumedLoc Loc =
+ Context.getSourceManager().getPresumedLoc(Decl->getLocation());
+ DocComment Comment;
+ if (auto *RawComment =
+ getDerivedExtractAPIVisitor().fetchRawCommentForDecl(Decl))
+ Comment = RawComment->getFormattedLines(Context.getSourceManager(),
+ Context.getDiagnostics());
+
+ // Build declaration fragments and sub-heading for the struct field.
+ DeclarationFragments Declaration =
+ DeclarationFragmentsBuilder::getFragmentsForField(Decl);
+ DeclarationFragments SubHeading =
+ DeclarationFragmentsBuilder::getSubHeading(Decl);
+ AccessControl Access = DeclarationFragmentsBuilder::getAccessControl(Decl);
+
+ SmallString<128> ParentUSR;
+ index::generateUSRForDecl(dyn_cast<CXXRecordDecl>(Decl->getDeclContext()),
+ ParentUSR);
+ API.addCXXField(API.findRecordForUSR(ParentUSR), Name, USR, Loc,
+ AvailabilityInfo::createFromDecl(Decl), Comment, Declaration,
+ SubHeading, Access, isInSystemHeader(Decl));
+ return true;
+}
+
+template <typename Derived>
+bool ExtractAPIVisitorBase<Derived>::VisitCXXConversionDecl(
+ const CXXConversionDecl *Decl) {
+ StringRef Name = API.copyString(Decl->getNameAsString());
+ StringRef USR = API.recordUSR(Decl);
+ PresumedLoc Loc =
+ Context.getSourceManager().getPresumedLoc(Decl->getLocation());
+ DocComment Comment;
+ if (auto *RawComment =
+ getDerivedExtractAPIVisitor().fetchRawCommentForDecl(Decl))
+ Comment = RawComment->getFormattedLines(Context.getSourceManager(),
+ Context.getDiagnostics());
+
+ // Build declaration fragments, sub-heading, and signature for the method.
+ DeclarationFragments Declaration =
+ DeclarationFragmentsBuilder::getFragmentsForConversionFunction(Decl);
+ DeclarationFragments SubHeading =
+ DeclarationFragmentsBuilder::getSubHeading(Decl);
+ FunctionSignature Signature =
+ DeclarationFragmentsBuilder::getFunctionSignature(Decl);
+ AccessControl Access = DeclarationFragmentsBuilder::getAccessControl(Decl);
+
+ SmallString<128> ParentUSR;
+ index::generateUSRForDecl(dyn_cast<CXXRecordDecl>(Decl->getDeclContext()),
+ ParentUSR);
+ if (Decl->isStatic())
+ API.addCXXStaticMethod(API.findRecordForUSR(ParentUSR), Name, USR, Loc,
+ AvailabilityInfo::createFromDecl(Decl), Comment,
+ Declaration, SubHeading, Signature, Access,
+ isInSystemHeader(Decl));
+ else
+ API.addCXXInstanceMethod(API.findRecordForUSR(ParentUSR), Name, USR, Loc,
+ AvailabilityInfo::createFromDecl(Decl), Comment,
+ Declaration, SubHeading, Signature, Access,
+ isInSystemHeader(Decl));
+ return true;
+}
+
+/// Collect API information for the Objective-C methods and associate with the
+/// parent container.
+template <typename Derived>
+void ExtractAPIVisitorBase<Derived>::recordObjCMethods(
+ ObjCContainerRecord *Container,
+ const ObjCContainerDecl::method_range Methods) {
+ for (const auto *Method : Methods) {
+ // Don't record selectors for properties.
+ if (Method->isPropertyAccessor())
+ continue;
+
+ StringRef Name = API.copyString(Method->getSelector().getAsString());
+ StringRef USR = API.recordUSR(Method);
+ PresumedLoc Loc =
+ Context.getSourceManager().getPresumedLoc(Method->getLocation());
+ DocComment Comment;
+ if (auto *RawComment =
+ getDerivedExtractAPIVisitor().fetchRawCommentForDecl(Method))
+ Comment = RawComment->getFormattedLines(Context.getSourceManager(),
+ Context.getDiagnostics());
+
+ // Build declaration fragments, sub-heading, and signature for the method.
+ DeclarationFragments Declaration =
+ DeclarationFragmentsBuilder::getFragmentsForObjCMethod(Method);
+ DeclarationFragments SubHeading =
+ DeclarationFragmentsBuilder::getSubHeading(Method);
+ FunctionSignature Signature =
+ DeclarationFragmentsBuilder::getFunctionSignature(Method);
+
+ API.addObjCMethod(Container, Name, USR, Loc,
+ AvailabilityInfo::createFromDecl(Method), Comment,
+ Declaration, SubHeading, Signature,
+ Method->isInstanceMethod(), isInSystemHeader(Method));
+ }
+}
+
+template <typename Derived>
+void ExtractAPIVisitorBase<Derived>::recordObjCProperties(
+ ObjCContainerRecord *Container,
+ const ObjCContainerDecl::prop_range Properties) {
+ for (const auto *Property : Properties) {
+ StringRef Name = Property->getName();
+ StringRef USR = API.recordUSR(Property);
+ PresumedLoc Loc =
+ Context.getSourceManager().getPresumedLoc(Property->getLocation());
+ DocComment Comment;
+ if (auto *RawComment =
+ getDerivedExtractAPIVisitor().fetchRawCommentForDecl(Property))
+ Comment = RawComment->getFormattedLines(Context.getSourceManager(),
+ Context.getDiagnostics());
+
+ // Build declaration fragments and sub-heading for the property.
+ DeclarationFragments Declaration =
+ DeclarationFragmentsBuilder::getFragmentsForObjCProperty(Property);
+ DeclarationFragments SubHeading =
+ DeclarationFragmentsBuilder::getSubHeading(Property);
+
+ StringRef GetterName =
+ API.copyString(Property->getGetterName().getAsString());
+ StringRef SetterName =
+ API.copyString(Property->getSetterName().getAsString());
+
+ // Get the attributes for property.
+ unsigned Attributes = ObjCPropertyRecord::NoAttr;
+ if (Property->getPropertyAttributes() &
+ ObjCPropertyAttribute::kind_readonly)
+ Attributes |= ObjCPropertyRecord::ReadOnly;
+
+ API.addObjCProperty(
+ Container, Name, USR, Loc, AvailabilityInfo::createFromDecl(Property),
+ Comment, Declaration, SubHeading,
+ static_cast<ObjCPropertyRecord::AttributeKind>(Attributes), GetterName,
+ SetterName, Property->isOptional(),
+ !(Property->getPropertyAttributes() &
+ ObjCPropertyAttribute::kind_class),
+ isInSystemHeader(Property));
+ }
+}
+
+template <typename Derived>
+void ExtractAPIVisitorBase<Derived>::recordObjCInstanceVariables(
+ ObjCContainerRecord *Container,
+ const llvm::iterator_range<
+ DeclContext::specific_decl_iterator<ObjCIvarDecl>>
+ Ivars) {
+ for (const auto *Ivar : Ivars) {
+ StringRef Name = Ivar->getName();
+ StringRef USR = API.recordUSR(Ivar);
+ PresumedLoc Loc =
+ Context.getSourceManager().getPresumedLoc(Ivar->getLocation());
+ DocComment Comment;
+ if (auto *RawComment =
+ getDerivedExtractAPIVisitor().fetchRawCommentForDecl(Ivar))
+ Comment = RawComment->getFormattedLines(Context.getSourceManager(),
+ Context.getDiagnostics());
+
+ // Build declaration fragments and sub-heading for the instance variable.
+ DeclarationFragments Declaration =
+ DeclarationFragmentsBuilder::getFragmentsForField(Ivar);
+ DeclarationFragments SubHeading =
+ DeclarationFragmentsBuilder::getSubHeading(Ivar);
+
+ ObjCInstanceVariableRecord::AccessControl Access =
+ Ivar->getCanonicalAccessControl();
+
+ API.addObjCInstanceVariable(
+ Container, Name, USR, Loc, AvailabilityInfo::createFromDecl(Ivar),
+ Comment, Declaration, SubHeading, Access, isInSystemHeader(Ivar));
+ }
+}
+
+template <typename Derived>
+void ExtractAPIVisitorBase<Derived>::recordObjCProtocols(
+ ObjCContainerRecord *Container,
+ ObjCInterfaceDecl::protocol_range Protocols) {
+ for (const auto *Protocol : Protocols)
+ Container->Protocols.emplace_back(Protocol->getName(),
+ API.recordUSR(Protocol));
+}
+
+} // namespace impl
+
+/// The RecursiveASTVisitor to traverse symbol declarations and collect API
+/// information.
+template <typename Derived = void>
+class ExtractAPIVisitor
+ : public impl::ExtractAPIVisitorBase<std::conditional_t<
+ std::is_same_v<Derived, void>, ExtractAPIVisitor<>, Derived>> {
+ using Base = impl::ExtractAPIVisitorBase<std::conditional_t<
+ std::is_same_v<Derived, void>, ExtractAPIVisitor<>, Derived>>;
+
+public:
+ ExtractAPIVisitor(ASTContext &Context, APISet &API) : Base(Context, API) {}
+
+ bool shouldDeclBeIncluded(const Decl *D) const { return true; }
+ const RawComment *fetchRawCommentForDecl(const Decl *D) const {
+ return this->Context.getRawCommentForDeclNoCache(D);
+ }
+};
+
+} // namespace extractapi
+} // namespace clang
+
+#endif // LLVM_CLANG_EXTRACTAPI_EXTRACT_API_VISITOR_H
diff --git a/contrib/llvm-project/clang/include/clang/ExtractAPI/FrontendActions.h b/contrib/llvm-project/clang/include/clang/ExtractAPI/FrontendActions.h
new file mode 100644
index 000000000000..c67864aac9af
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/ExtractAPI/FrontendActions.h
@@ -0,0 +1,95 @@
+//===- ExtractAPI/FrontendActions.h -----------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines the ExtractAPIAction and WrappingExtractAPIAction frontend
+/// actions.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_EXTRACTAPI_FRONTEND_ACTIONS_H
+#define LLVM_CLANG_EXTRACTAPI_FRONTEND_ACTIONS_H
+
+#include "clang/ExtractAPI/ExtractAPIActionBase.h"
+#include "clang/Frontend/FrontendAction.h"
+
+namespace clang {
+
+/// ExtractAPIAction sets up the output file and creates the ExtractAPIVisitor.
+class ExtractAPIAction : public ASTFrontendAction,
+ private ExtractAPIActionBase {
+protected:
+ std::unique_ptr<ASTConsumer> CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) override;
+
+private:
+
+ /// The input file originally provided on the command line.
+ ///
+ /// This captures the spelling used to include the file and whether the
+ /// include is quoted or not.
+ SmallVector<std::pair<SmallString<32>, bool>> KnownInputFiles;
+
+ /// Prepare to execute the action on the given CompilerInstance.
+ ///
+ /// This is called before executing the action on any inputs. This generates a
+ /// single header that includes all of CI's inputs and replaces CI's input
+ /// list with it before actually executing the action.
+ bool PrepareToExecuteAction(CompilerInstance &CI) override;
+
+ /// Called after executing the action on the synthesized input buffer.
+ ///
+ /// Note: Now that we have gathered all the API definitions to surface we can
+ /// emit them in this callback.
+ void EndSourceFileAction() override;
+
+ static StringRef getInputBufferName() { return "<extract-api-includes>"; }
+
+ static std::unique_ptr<llvm::raw_pwrite_stream>
+ CreateOutputFile(CompilerInstance &CI, StringRef InFile);
+};
+
+/// Wrap ExtractAPIAction on top of a pre-existing action
+///
+/// Used when the ExtractAPI action needs to be executed as a side effect of a
+/// regular compilation job. Unlike ExtarctAPIAction, this is meant to be used
+/// on regular source files ( .m , .c files) instead of header files
+class WrappingExtractAPIAction : public WrapperFrontendAction,
+ private ExtractAPIActionBase {
+public:
+ WrappingExtractAPIAction(std::unique_ptr<FrontendAction> WrappedAction)
+ : WrapperFrontendAction(std::move(WrappedAction)) {}
+
+protected:
+ /// Create ExtractAPI consumer multiplexed on another consumer.
+ ///
+ /// This allows us to execute ExtractAPI action while on top of
+ std::unique_ptr<ASTConsumer> CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) override;
+
+private:
+ /// Flag to check if the wrapper front end action's consumer is
+ /// craeted or not
+ bool CreatedASTConsumer = false;
+
+ void EndSourceFile() override { FrontendAction::EndSourceFile(); }
+
+ /// Called after executing the action on the synthesized input buffer.
+ ///
+ /// Executes both Wrapper and ExtractAPIBase end source file
+ /// actions. This is the place where all the gathered symbol graph
+ /// information is emited.
+ void EndSourceFileAction() override;
+
+ static std::unique_ptr<llvm::raw_pwrite_stream>
+ CreateOutputFile(CompilerInstance &CI, StringRef InFile);
+};
+
+} // namespace clang
+
+#endif // LLVM_CLANG_EXTRACTAPI_FRONTEND_ACTIONS_H
diff --git a/contrib/llvm-project/clang/include/clang/ExtractAPI/Serialization/SerializerBase.h b/contrib/llvm-project/clang/include/clang/ExtractAPI/Serialization/SerializerBase.h
new file mode 100644
index 000000000000..f0629a9ad56b
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/ExtractAPI/Serialization/SerializerBase.h
@@ -0,0 +1,314 @@
+//===- ExtractAPI/Serialization/SerializerBase.h ----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines the ExtractAPI APISetVisitor interface.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_EXTRACTAPI_SERIALIZATION_SERIALIZERBASE_H
+#define LLVM_CLANG_EXTRACTAPI_SERIALIZATION_SERIALIZERBASE_H
+
+#include "clang/ExtractAPI/API.h"
+
+namespace clang {
+namespace extractapi {
+
+/// The base interface of visitors for API information.
+template <typename Derived> class APISetVisitor {
+public:
+ void traverseAPISet() {
+ getDerived()->traverseNamespaces();
+
+ getDerived()->traverseGlobalVariableRecords();
+
+ getDerived()->traverseGlobalFunctionRecords();
+
+ getDerived()->traverseEnumRecords();
+
+ getDerived()->traverseStaticFieldRecords();
+
+ getDerived()->traverseCXXClassRecords();
+
+ getDerived()->traverseClassTemplateRecords();
+
+ getDerived()->traverseClassTemplateSpecializationRecords();
+
+ getDerived()->traverseClassTemplatePartialSpecializationRecords();
+
+ getDerived()->traverseCXXInstanceMethods();
+
+ getDerived()->traverseCXXStaticMethods();
+
+ getDerived()->traverseCXXMethodTemplates();
+
+ getDerived()->traverseCXXMethodTemplateSpecializations();
+
+ getDerived()->traverseCXXFields();
+
+ getDerived()->traverseCXXFieldTemplates();
+
+ getDerived()->traverseConcepts();
+
+ getDerived()->traverseGlobalVariableTemplateRecords();
+
+ getDerived()->traverseGlobalVariableTemplateSpecializationRecords();
+
+ getDerived()->traverseGlobalVariableTemplatePartialSpecializationRecords();
+
+ getDerived()->traverseGlobalFunctionTemplateRecords();
+
+ getDerived()->traverseGlobalFunctionTemplateSpecializationRecords();
+
+ getDerived()->traverseRecordRecords();
+
+ getDerived()->traverseObjCInterfaces();
+
+ getDerived()->traverseObjCProtocols();
+
+ getDerived()->traverseObjCCategories();
+
+ getDerived()->traverseMacroDefinitionRecords();
+
+ getDerived()->traverseTypedefRecords();
+ }
+
+ void traverseNamespaces() {
+ for (const auto &Namespace : API.getNamespaces())
+ getDerived()->visitNamespaceRecord(*Namespace.second);
+ }
+
+ void traverseGlobalFunctionRecords() {
+ for (const auto &GlobalFunction : API.getGlobalFunctions())
+ getDerived()->visitGlobalFunctionRecord(*GlobalFunction.second);
+ }
+
+ void traverseGlobalVariableRecords() {
+ for (const auto &GlobalVariable : API.getGlobalVariables())
+ getDerived()->visitGlobalVariableRecord(*GlobalVariable.second);
+ }
+
+ void traverseEnumRecords() {
+ for (const auto &Enum : API.getEnums())
+ getDerived()->visitEnumRecord(*Enum.second);
+ }
+
+ void traverseRecordRecords() {
+ for (const auto &Record : API.getRecords())
+ getDerived()->visitRecordRecord(*Record.second);
+ }
+
+ void traverseStaticFieldRecords() {
+ for (const auto &StaticField : API.getStaticFields())
+ getDerived()->visitStaticFieldRecord(*StaticField.second);
+ }
+
+ void traverseCXXClassRecords() {
+ for (const auto &Class : API.getCXXClasses())
+ getDerived()->visitCXXClassRecord(*Class.second);
+ }
+
+ void traverseCXXMethodTemplates() {
+ for (const auto &MethodTemplate : API.getCXXMethodTemplates())
+ getDerived()->visitMethodTemplateRecord(*MethodTemplate.second);
+ }
+
+ void traverseCXXMethodTemplateSpecializations() {
+ for (const auto &MethodTemplateSpecialization :
+ API.getCXXMethodTemplateSpecializations())
+ getDerived()->visitMethodTemplateSpecializationRecord(
+ *MethodTemplateSpecialization.second);
+ }
+
+ void traverseClassTemplateRecords() {
+ for (const auto &ClassTemplate : API.getClassTemplates())
+ getDerived()->visitClassTemplateRecord(*ClassTemplate.second);
+ }
+
+ void traverseClassTemplateSpecializationRecords() {
+ for (const auto &ClassTemplateSpecialization :
+ API.getClassTemplateSpecializations())
+ getDerived()->visitClassTemplateSpecializationRecord(
+ *ClassTemplateSpecialization.second);
+ }
+
+ void traverseClassTemplatePartialSpecializationRecords() {
+ for (const auto &ClassTemplatePartialSpecialization :
+ API.getClassTemplatePartialSpecializations())
+ getDerived()->visitClassTemplatePartialSpecializationRecord(
+ *ClassTemplatePartialSpecialization.second);
+ }
+
+ void traverseCXXInstanceMethods() {
+ for (const auto &InstanceMethod : API.getCXXInstanceMethods())
+ getDerived()->visitCXXInstanceMethodRecord(*InstanceMethod.second);
+ }
+
+ void traverseCXXStaticMethods() {
+ for (const auto &InstanceMethod : API.getCXXStaticMethods())
+ getDerived()->visitCXXStaticMethodRecord(*InstanceMethod.second);
+ }
+
+ void traverseCXXFields() {
+ for (const auto &CXXField : API.getCXXFields())
+ getDerived()->visitCXXFieldRecord(*CXXField.second);
+ }
+
+ void traverseCXXFieldTemplates() {
+ for (const auto &CXXFieldTemplate : API.getCXXFieldTemplates())
+ getDerived()->visitCXXFieldTemplateRecord(*CXXFieldTemplate.second);
+ }
+
+ void traverseGlobalVariableTemplateRecords() {
+ for (const auto &GlobalVariableTemplate : API.getGlobalVariableTemplates())
+ getDerived()->visitGlobalVariableTemplateRecord(
+ *GlobalVariableTemplate.second);
+ }
+
+ void traverseGlobalVariableTemplateSpecializationRecords() {
+ for (const auto &GlobalVariableTemplateSpecialization :
+ API.getGlobalVariableTemplateSpecializations())
+ getDerived()->visitGlobalVariableTemplateSpecializationRecord(
+ *GlobalVariableTemplateSpecialization.second);
+ }
+
+ void traverseGlobalVariableTemplatePartialSpecializationRecords() {
+ for (const auto &GlobalVariableTemplatePartialSpecialization :
+ API.getGlobalVariableTemplatePartialSpecializations())
+ getDerived()->visitGlobalVariableTemplatePartialSpecializationRecord(
+ *GlobalVariableTemplatePartialSpecialization.second);
+ }
+
+ void traverseGlobalFunctionTemplateRecords() {
+ for (const auto &GlobalFunctionTemplate : API.getGlobalFunctionTemplates())
+ getDerived()->visitGlobalFunctionTemplateRecord(
+ *GlobalFunctionTemplate.second);
+ }
+
+ void traverseGlobalFunctionTemplateSpecializationRecords() {
+ for (const auto &GlobalFunctionTemplateSpecialization :
+ API.getGlobalFunctionTemplateSpecializations())
+ getDerived()->visitGlobalFunctionTemplateSpecializationRecord(
+ *GlobalFunctionTemplateSpecialization.second);
+ }
+
+ void traverseConcepts() {
+ for (const auto &Concept : API.getConcepts())
+ getDerived()->visitConceptRecord(*Concept.second);
+ }
+
+ void traverseObjCInterfaces() {
+ for (const auto &Interface : API.getObjCInterfaces())
+ getDerived()->visitObjCContainerRecord(*Interface.second);
+ }
+
+ void traverseObjCProtocols() {
+ for (const auto &Protocol : API.getObjCProtocols())
+ getDerived()->visitObjCContainerRecord(*Protocol.second);
+ }
+
+ void traverseObjCCategories() {
+ for (const auto &Category : API.getObjCCategories())
+ getDerived()->visitObjCCategoryRecord(*Category.second);
+ }
+
+ void traverseMacroDefinitionRecords() {
+ for (const auto &Macro : API.getMacros())
+ getDerived()->visitMacroDefinitionRecord(*Macro.second);
+ }
+
+ void traverseTypedefRecords() {
+ for (const auto &Typedef : API.getTypedefs())
+ getDerived()->visitTypedefRecord(*Typedef.second);
+ }
+
+ void visitNamespaceRecord(const NamespaceRecord &Record){};
+
+ /// Visit a global function record.
+ void visitGlobalFunctionRecord(const GlobalFunctionRecord &Record){};
+
+ /// Visit a global variable record.
+ void visitGlobalVariableRecord(const GlobalVariableRecord &Record){};
+
+ /// Visit an enum record.
+ void visitEnumRecord(const EnumRecord &Record){};
+
+ /// Visit a record record.
+ void visitRecordRecord(const RecordRecord &Record){};
+
+ void visitStaticFieldRecord(const StaticFieldRecord &Record){};
+
+ void visitCXXClassRecord(const CXXClassRecord &Record){};
+
+ void visitClassTemplateRecord(const ClassTemplateRecord &Record){};
+
+ void visitClassTemplateSpecializationRecord(
+ const ClassTemplateSpecializationRecord &Record){};
+
+ void visitClassTemplatePartialSpecializationRecord(
+ const ClassTemplatePartialSpecializationRecord &Record){};
+
+ void visitCXXInstanceRecord(const CXXInstanceMethodRecord &Record){};
+
+ void visitCXXStaticRecord(const CXXStaticMethodRecord &Record){};
+
+ void visitMethodTemplateRecord(const CXXMethodTemplateRecord &Record){};
+
+ void visitMethodTemplateSpecializationRecord(
+ const CXXMethodTemplateSpecializationRecord &Record){};
+
+ void visitCXXFieldTemplateRecord(const CXXFieldTemplateRecord &Record){};
+
+ void visitGlobalVariableTemplateRecord(
+ const GlobalVariableTemplateRecord &Record) {}
+
+ void visitGlobalVariableTemplateSpecializationRecord(
+ const GlobalVariableTemplateSpecializationRecord &Record){};
+
+ void visitGlobalVariableTemplatePartialSpecializationRecord(
+ const GlobalVariableTemplatePartialSpecializationRecord &Record){};
+
+ void visitGlobalFunctionTemplateRecord(
+ const GlobalFunctionTemplateRecord &Record){};
+
+ void visitGlobalFunctionTemplateSpecializationRecord(
+ const GlobalFunctionTemplateSpecializationRecord &Record){};
+
+ /// Visit an Objective-C container record.
+ void visitObjCContainerRecord(const ObjCContainerRecord &Record){};
+
+ /// Visit an Objective-C category record.
+ void visitObjCCategoryRecord(const ObjCCategoryRecord &Record){};
+
+ /// Visit a macro definition record.
+ void visitMacroDefinitionRecord(const MacroDefinitionRecord &Record){};
+
+ /// Visit a typedef record.
+ void visitTypedefRecord(const TypedefRecord &Record){};
+
+protected:
+ const APISet &API;
+
+public:
+ APISetVisitor() = delete;
+ APISetVisitor(const APISetVisitor &) = delete;
+ APISetVisitor(APISetVisitor &&) = delete;
+ APISetVisitor &operator=(const APISetVisitor &) = delete;
+ APISetVisitor &operator=(APISetVisitor &&) = delete;
+
+protected:
+ APISetVisitor(const APISet &API) : API(API) {}
+ ~APISetVisitor() = default;
+
+ Derived *getDerived() { return static_cast<Derived *>(this); };
+};
+
+} // namespace extractapi
+} // namespace clang
+
+#endif // LLVM_CLANG_EXTRACTAPI_SERIALIZATION_SERIALIZERBASE_H
diff --git a/contrib/llvm-project/clang/include/clang/ExtractAPI/Serialization/SymbolGraphSerializer.h b/contrib/llvm-project/clang/include/clang/ExtractAPI/Serialization/SymbolGraphSerializer.h
new file mode 100644
index 000000000000..4249ac405fd2
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/ExtractAPI/Serialization/SymbolGraphSerializer.h
@@ -0,0 +1,243 @@
+//===- ExtractAPI/Serialization/SymbolGraphSerializer.h ---------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines the SymbolGraphSerializer class.
+///
+/// Implement an APISetVisitor to serialize the APISet into the Symbol Graph
+/// format for ExtractAPI. See https://github.com/apple/swift-docc-symbolkit.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_EXTRACTAPI_SERIALIZATION_SYMBOLGRAPHSERIALIZER_H
+#define LLVM_CLANG_EXTRACTAPI_SERIALIZATION_SYMBOLGRAPHSERIALIZER_H
+
+#include "clang/ExtractAPI/API.h"
+#include "clang/ExtractAPI/APIIgnoresList.h"
+#include "clang/ExtractAPI/Serialization/SerializerBase.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/Support/JSON.h"
+#include "llvm/Support/VersionTuple.h"
+#include "llvm/Support/raw_ostream.h"
+#include <optional>
+
+namespace clang {
+namespace extractapi {
+
+using namespace llvm::json;
+
+/// Common options to customize the visitor output.
+struct SymbolGraphSerializerOption {
+ /// Do not include unnecessary whitespaces to save space.
+ bool Compact;
+};
+
+/// The visitor that organizes API information in the Symbol Graph format.
+///
+/// The Symbol Graph format (https://github.com/apple/swift-docc-symbolkit)
+/// models an API set as a directed graph, where nodes are symbol declarations,
+/// and edges are relationships between the connected symbols.
+class SymbolGraphSerializer : public APISetVisitor<SymbolGraphSerializer> {
+ /// A JSON array of formatted symbols in \c APISet.
+ Array Symbols;
+
+ /// A JSON array of formatted symbol relationships in \c APISet.
+ Array Relationships;
+
+ /// The Symbol Graph format version used by this serializer.
+ static const VersionTuple FormatVersion;
+
+ /// Indicates whether child symbols should be visited. This is mainly
+ /// useful for \c serializeSingleSymbolSGF.
+ bool ShouldRecurse;
+
+public:
+ /// Serialize the APIs in \c APISet in the Symbol Graph format.
+ ///
+ /// \returns a JSON object that contains the root of the formatted
+ /// Symbol Graph.
+ Object serialize();
+
+ /// Wrap serialize(void) and write out the serialized JSON object to \p os.
+ void serialize(raw_ostream &os);
+
+ /// Serialize a single symbol SGF. This is primarily used for libclang.
+ ///
+ /// \returns an optional JSON Object representing the payload that libclang
+ /// expects for providing symbol information for a single symbol. If this is
+ /// not a known symbol returns \c std::nullopt.
+ static std::optional<Object> serializeSingleSymbolSGF(StringRef USR,
+ const APISet &API);
+
+ /// The kind of a relationship between two symbols.
+ enum RelationshipKind {
+ /// The source symbol is a member of the target symbol.
+ /// For example enum constants are members of the enum, class/instance
+ /// methods are members of the class, etc.
+ MemberOf,
+
+ /// The source symbol is inherited from the target symbol.
+ InheritsFrom,
+
+ /// The source symbol conforms to the target symbol.
+ /// For example Objective-C protocol conformances.
+ ConformsTo,
+
+ /// The source symbol is an extension to the target symbol.
+ /// For example Objective-C categories extending an external type.
+ ExtensionTo,
+ };
+
+ /// Get the string representation of the relationship kind.
+ static StringRef getRelationshipString(RelationshipKind Kind);
+
+ enum ConstraintKind { Conformance, ConditionalConformance };
+
+ static StringRef getConstraintString(ConstraintKind Kind);
+
+private:
+ /// Just serialize the currently recorded objects in Symbol Graph format.
+ Object serializeCurrentGraph();
+
+ /// Synthesize the metadata section of the Symbol Graph format.
+ ///
+ /// The metadata section describes information about the Symbol Graph itself,
+ /// including the format version and the generator information.
+ Object serializeMetadata() const;
+
+ /// Synthesize the module section of the Symbol Graph format.
+ ///
+ /// The module section contains information about the product that is defined
+ /// by the given API set.
+ /// Note that "module" here is not to be confused with the Clang/C++ module
+ /// concept.
+ Object serializeModule() const;
+
+ /// Determine if the given \p Record should be skipped during serialization.
+ bool shouldSkip(const APIRecord &Record) const;
+
+ /// Format the common API information for \p Record.
+ ///
+ /// This handles the shared information of all kinds of API records,
+ /// for example identifier and source location. The resulting object is then
+ /// augmented with kind-specific symbol information by the caller.
+ /// This method also checks if the given \p Record should be skipped during
+ /// serialization.
+ ///
+ /// \returns \c std::nullopt if this \p Record should be skipped, or a JSON
+ /// object containing common symbol information of \p Record.
+ template <typename RecordTy>
+ std::optional<Object> serializeAPIRecord(const RecordTy &Record) const;
+
+ /// Helper method to serialize second-level member records of \p Record and
+ /// the member-of relationships.
+ template <typename MemberTy>
+ void serializeMembers(const APIRecord &Record,
+ const SmallVector<std::unique_ptr<MemberTy>> &Members);
+
+ /// Serialize the \p Kind relationship between \p Source and \p Target.
+ ///
+ /// Record the relationship between the two symbols in
+ /// SymbolGraphSerializer::Relationships.
+ void serializeRelationship(RelationshipKind Kind, SymbolReference Source,
+ SymbolReference Target);
+
+protected:
+ /// The list of symbols to ignore.
+ ///
+ /// Note: This should be consulted before emitting a symbol.
+ const APIIgnoresList &IgnoresList;
+
+ SymbolGraphSerializerOption Options;
+
+ llvm::StringSet<> visitedCategories;
+
+public:
+ void visitNamespaceRecord(const NamespaceRecord &Record);
+
+ /// Visit a global function record.
+ void visitGlobalFunctionRecord(const GlobalFunctionRecord &Record);
+
+ /// Visit a global variable record.
+ void visitGlobalVariableRecord(const GlobalVariableRecord &Record);
+
+ /// Visit an enum record.
+ void visitEnumRecord(const EnumRecord &Record);
+
+ /// Visit a record record.
+ void visitRecordRecord(const RecordRecord &Record);
+
+ void visitStaticFieldRecord(const StaticFieldRecord &Record);
+
+ void visitCXXClassRecord(const CXXClassRecord &Record);
+
+ void visitClassTemplateRecord(const ClassTemplateRecord &Record);
+
+ void visitClassTemplateSpecializationRecord(
+ const ClassTemplateSpecializationRecord &Record);
+
+ void visitClassTemplatePartialSpecializationRecord(
+ const ClassTemplatePartialSpecializationRecord &Record);
+
+ void visitCXXInstanceMethodRecord(const CXXInstanceMethodRecord &Record);
+
+ void visitCXXStaticMethodRecord(const CXXStaticMethodRecord &Record);
+
+ void visitMethodTemplateRecord(const CXXMethodTemplateRecord &Record);
+
+ void visitMethodTemplateSpecializationRecord(
+ const CXXMethodTemplateSpecializationRecord &Record);
+
+ void visitCXXFieldRecord(const CXXFieldRecord &Record);
+
+ void visitCXXFieldTemplateRecord(const CXXFieldTemplateRecord &Record);
+
+ void visitConceptRecord(const ConceptRecord &Record);
+
+ void
+ visitGlobalVariableTemplateRecord(const GlobalVariableTemplateRecord &Record);
+
+ void visitGlobalVariableTemplateSpecializationRecord(
+ const GlobalVariableTemplateSpecializationRecord &Record);
+
+ void visitGlobalVariableTemplatePartialSpecializationRecord(
+ const GlobalVariableTemplatePartialSpecializationRecord &Record);
+
+ void
+ visitGlobalFunctionTemplateRecord(const GlobalFunctionTemplateRecord &Record);
+
+ void visitGlobalFunctionTemplateSpecializationRecord(
+ const GlobalFunctionTemplateSpecializationRecord &Record);
+
+ /// Visit an Objective-C container record.
+ void visitObjCContainerRecord(const ObjCContainerRecord &Record);
+
+ /// Visit an Objective-C category record.
+ void visitObjCCategoryRecord(const ObjCCategoryRecord &Record);
+
+ /// Visit a macro definition record.
+ void visitMacroDefinitionRecord(const MacroDefinitionRecord &Record);
+
+ /// Visit a typedef record.
+ void visitTypedefRecord(const TypedefRecord &Record);
+
+ /// Serialize a single record.
+ void serializeSingleRecord(const APIRecord *Record);
+
+ SymbolGraphSerializer(const APISet &API, const APIIgnoresList &IgnoresList,
+ SymbolGraphSerializerOption Options = {},
+ bool ShouldRecurse = true)
+ : APISetVisitor(API), ShouldRecurse(ShouldRecurse),
+ IgnoresList(IgnoresList), Options(Options) {}
+};
+
+} // namespace extractapi
+} // namespace clang
+
+#endif // LLVM_CLANG_EXTRACTAPI_SERIALIZATION_SYMBOLGRAPHSERIALIZER_H
diff --git a/contrib/llvm-project/clang/include/clang/ExtractAPI/TypedefUnderlyingTypeResolver.h b/contrib/llvm-project/clang/include/clang/ExtractAPI/TypedefUnderlyingTypeResolver.h
new file mode 100644
index 000000000000..54aa11c354c0
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/ExtractAPI/TypedefUnderlyingTypeResolver.h
@@ -0,0 +1,48 @@
+//===- ExtractAPI/TypedefUnderlyingTypeResolver.h ---------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines the UnderlyingTypeResolver which is a helper type for
+/// resolving the undelrying type for a given QualType and exposing that
+/// information in various forms.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_UNDERLYING_TYPE_RESOLVER_H
+#define LLVM_CLANG_UNDERLYING_TYPE_RESOLVER_H
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/ExtractAPI/API.h"
+
+#include <string>
+
+namespace clang {
+namespace extractapi {
+
+struct TypedefUnderlyingTypeResolver {
+ /// Gets the underlying type declaration.
+ const NamedDecl *getUnderlyingTypeDecl(QualType Type) const;
+
+ /// Get a SymbolReference for the given type.
+ SymbolReference getSymbolReferenceForType(QualType Type, APISet &API) const;
+
+ /// Get a USR for the given type.
+ std::string getUSRForType(QualType Type) const;
+
+ explicit TypedefUnderlyingTypeResolver(ASTContext &Context)
+ : Context(Context) {}
+
+private:
+ ASTContext &Context;
+};
+
+} // namespace extractapi
+} // namespace clang
+
+#endif // LLVM_CLANG_UNDERLYING_TYPE_RESOLVER_H
diff --git a/contrib/llvm-project/clang/include/clang/Format/Format.h b/contrib/llvm-project/clang/include/clang/Format/Format.h
index c424e79a971c..efcb4e1d87ea 100755
--- a/contrib/llvm-project/clang/include/clang/Format/Format.h
+++ b/contrib/llvm-project/clang/include/clang/Format/Format.h
@@ -20,6 +20,7 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/Support/Regex.h"
#include "llvm/Support/SourceMgr.h"
+#include <optional>
#include <system_error>
namespace llvm {
@@ -29,18 +30,17 @@ class FileSystem;
} // namespace llvm
namespace clang {
-
-class Lexer;
-class SourceManager;
-class DiagnosticConsumer;
-
namespace format {
enum class ParseError {
Success = 0,
Error,
Unsuitable,
- BinPackTrailingCommaConflict
+ BinPackTrailingCommaConflict,
+ InvalidQualifierSpecified,
+ DuplicateQualifierSpecified,
+ MissingQualifierType,
+ MissingQualifierOrder
};
class ParseErrorCategory final : public std::error_category {
public:
@@ -59,10 +59,11 @@ struct FormatStyle {
bool InheritsParentConfig;
/// The extra indent or outdent of access modifiers, e.g. ``public:``.
+ /// \version 3.3
int AccessModifierOffset;
/// Different styles for aligning after open brackets.
- enum BracketAlignmentStyle : unsigned char {
+ enum BracketAlignmentStyle : int8_t {
/// Align parameters on the open bracket, e.g.:
/// \code
/// someLongFunction(argument1,
@@ -82,16 +83,31 @@ struct FormatStyle {
/// argument1, argument2);
/// \endcode
BAS_AlwaysBreak,
+ /// Always break after an open bracket, if the parameters don't fit
+ /// on a single line. Closing brackets will be placed on a new line.
+ /// E.g.:
+ /// \code
+ /// someLongFunction(
+ /// argument1, argument2
+ /// )
+ /// \endcode
+ ///
+ /// \note
+ /// This currently only applies to braced initializer lists (when
+ /// ``Cpp11BracedListStyle`` is ``true``) and parentheses.
+ /// \endnote
+ BAS_BlockIndent,
};
/// If ``true``, horizontally aligns arguments after an open bracket.
///
/// This applies to round brackets (parentheses), angle brackets and square
/// brackets.
+ /// \version 3.8
BracketAlignmentStyle AlignAfterOpenBracket;
/// Different style for aligning array initializers.
- enum ArrayInitializerAlignmentStyle {
+ enum ArrayInitializerAlignmentStyle : int8_t {
/// Align array column and left justify the columns e.g.:
/// \code
/// struct test demo[] =
@@ -117,20 +133,143 @@ struct FormatStyle {
};
/// if not ``None``, when using initialization for an array of structs
/// aligns the fields into columns.
+ ///
+ /// \note
+ /// As of clang-format 15 this option only applied to arrays with equal
+ /// number of columns per row.
+ /// \endnote
+ ///
+ /// \version 13
ArrayInitializerAlignmentStyle AlignArrayOfStructures;
- /// Styles for alignment of consecutive tokens. Tokens can be assignment signs
- /// (see
- /// ``AlignConsecutiveAssignments``), bitfield member separators (see
- /// ``AlignConsecutiveBitFields``), names in declarations (see
- /// ``AlignConsecutiveDeclarations``) or macro definitions (see
- /// ``AlignConsecutiveMacros``).
- enum AlignConsecutiveStyle {
- ACS_None,
- ACS_Consecutive,
- ACS_AcrossEmptyLines,
- ACS_AcrossComments,
- ACS_AcrossEmptyLinesAndComments
+ /// Alignment options.
+ ///
+ /// They can also be read as a whole for compatibility. The choices are:
+ /// - None
+ /// - Consecutive
+ /// - AcrossEmptyLines
+ /// - AcrossComments
+ /// - AcrossEmptyLinesAndComments
+ ///
+ /// For example, to align across empty lines and not across comments, either
+ /// of these work.
+ /// \code
+ /// AlignConsecutiveMacros: AcrossEmptyLines
+ ///
+ /// AlignConsecutiveMacros:
+ /// Enabled: true
+ /// AcrossEmptyLines: true
+ /// AcrossComments: false
+ /// \endcode
+ struct AlignConsecutiveStyle {
+ /// Whether aligning is enabled.
+ /// \code
+ /// #define SHORT_NAME 42
+ /// #define LONGER_NAME 0x007f
+ /// #define EVEN_LONGER_NAME (2)
+ /// #define foo(x) (x * x)
+ /// #define bar(y, z) (y + z)
+ ///
+ /// int a = 1;
+ /// int somelongname = 2;
+ /// double c = 3;
+ ///
+ /// int aaaa : 1;
+ /// int b : 12;
+ /// int ccc : 8;
+ ///
+ /// int aaaa = 12;
+ /// float b = 23;
+ /// std::string ccc;
+ /// \endcode
+ bool Enabled;
+ /// Whether to align across empty lines.
+ /// \code
+ /// true:
+ /// int a = 1;
+ /// int somelongname = 2;
+ /// double c = 3;
+ ///
+ /// int d = 3;
+ ///
+ /// false:
+ /// int a = 1;
+ /// int somelongname = 2;
+ /// double c = 3;
+ ///
+ /// int d = 3;
+ /// \endcode
+ bool AcrossEmptyLines;
+ /// Whether to align across comments.
+ /// \code
+ /// true:
+ /// int d = 3;
+ /// /* A comment. */
+ /// double e = 4;
+ ///
+ /// false:
+ /// int d = 3;
+ /// /* A comment. */
+ /// double e = 4;
+ /// \endcode
+ bool AcrossComments;
+ /// Only for ``AlignConsecutiveAssignments``. Whether compound assignments
+ /// like ``+=`` are aligned along with ``=``.
+ /// \code
+ /// true:
+ /// a &= 2;
+ /// bbb = 2;
+ ///
+ /// false:
+ /// a &= 2;
+ /// bbb = 2;
+ /// \endcode
+ bool AlignCompound;
+ /// Only for ``AlignConsecutiveDeclarations``. Whether function pointers are
+ /// aligned.
+ /// \code
+ /// true:
+ /// unsigned i;
+ /// int &r;
+ /// int *p;
+ /// int (*f)();
+ ///
+ /// false:
+ /// unsigned i;
+ /// int &r;
+ /// int *p;
+ /// int (*f)();
+ /// \endcode
+ bool AlignFunctionPointers;
+ /// Only for ``AlignConsecutiveAssignments``. Whether short assignment
+ /// operators are left-padded to the same length as long ones in order to
+ /// put all assignment operators to the right of the left hand side.
+ /// \code
+ /// true:
+ /// a >>= 2;
+ /// bbb = 2;
+ ///
+ /// a = 2;
+ /// bbb >>= 2;
+ ///
+ /// false:
+ /// a >>= 2;
+ /// bbb = 2;
+ ///
+ /// a = 2;
+ /// bbb >>= 2;
+ /// \endcode
+ bool PadOperators;
+ bool operator==(const AlignConsecutiveStyle &R) const {
+ return Enabled == R.Enabled && AcrossEmptyLines == R.AcrossEmptyLines &&
+ AcrossComments == R.AcrossComments &&
+ AlignCompound == R.AlignCompound &&
+ AlignFunctionPointers == R.AlignFunctionPointers &&
+ PadOperators == R.PadOperators;
+ }
+ bool operator!=(const AlignConsecutiveStyle &R) const {
+ return !(*this == R);
+ }
};
/// Style of aligning consecutive macro definitions.
@@ -143,66 +282,8 @@ struct FormatStyle {
/// #define foo(x) (x * x)
/// #define bar(y, z) (y + z)
/// \endcode
- ///
- /// Possible values:
- ///
- /// * ``ACS_None`` (in configuration: ``None``)
- /// Do not align macro definitions on consecutive lines.
- ///
- /// * ``ACS_Consecutive`` (in configuration: ``Consecutive``)
- /// Align macro definitions on consecutive lines. This will result in
- /// formattings like:
- /// \code
- /// #define SHORT_NAME 42
- /// #define LONGER_NAME 0x007f
- /// #define EVEN_LONGER_NAME (2)
- ///
- /// #define foo(x) (x * x)
- /// /* some comment */
- /// #define bar(y, z) (y + z)
- /// \endcode
- ///
- /// * ``ACS_AcrossEmptyLines`` (in configuration: ``AcrossEmptyLines``)
- /// Same as ACS_Consecutive, but also spans over empty lines, e.g.
- /// \code
- /// #define SHORT_NAME 42
- /// #define LONGER_NAME 0x007f
- /// #define EVEN_LONGER_NAME (2)
- ///
- /// #define foo(x) (x * x)
- /// /* some comment */
- /// #define bar(y, z) (y + z)
- /// \endcode
- ///
- /// * ``ACS_AcrossComments`` (in configuration: ``AcrossComments``)
- /// Same as ACS_Consecutive, but also spans over lines only containing
- /// comments, e.g.
- /// \code
- /// #define SHORT_NAME 42
- /// #define LONGER_NAME 0x007f
- /// #define EVEN_LONGER_NAME (2)
- ///
- /// #define foo(x) (x * x)
- /// /* some comment */
- /// #define bar(y, z) (y + z)
- /// \endcode
- ///
- /// * ``ACS_AcrossEmptyLinesAndComments``
- /// (in configuration: ``AcrossEmptyLinesAndComments``)
- ///
- /// Same as ACS_Consecutive, but also spans over lines only containing
- /// comments and empty lines, e.g.
- /// \code
- /// #define SHORT_NAME 42
- /// #define LONGER_NAME 0x007f
- /// #define EVEN_LONGER_NAME (2)
- ///
- /// #define foo(x) (x * x)
- /// /* some comment */
- /// #define bar(y, z) (y + z)
- /// \endcode
+ /// \version 9
AlignConsecutiveStyle AlignConsecutiveMacros;
-
/// Style of aligning consecutive assignments.
///
/// ``Consecutive`` will result in formattings like:
@@ -211,67 +292,9 @@ struct FormatStyle {
/// int somelongname = 2;
/// double c = 3;
/// \endcode
- ///
- /// Possible values:
- ///
- /// * ``ACS_None`` (in configuration: ``None``)
- /// Do not align assignments on consecutive lines.
- ///
- /// * ``ACS_Consecutive`` (in configuration: ``Consecutive``)
- /// Align assignments on consecutive lines. This will result in
- /// formattings like:
- /// \code
- /// int a = 1;
- /// int somelongname = 2;
- /// double c = 3;
- ///
- /// int d = 3;
- /// /* A comment. */
- /// double e = 4;
- /// \endcode
- ///
- /// * ``ACS_AcrossEmptyLines`` (in configuration: ``AcrossEmptyLines``)
- /// Same as ACS_Consecutive, but also spans over empty lines, e.g.
- /// \code
- /// int a = 1;
- /// int somelongname = 2;
- /// double c = 3;
- ///
- /// int d = 3;
- /// /* A comment. */
- /// double e = 4;
- /// \endcode
- ///
- /// * ``ACS_AcrossComments`` (in configuration: ``AcrossComments``)
- /// Same as ACS_Consecutive, but also spans over lines only containing
- /// comments, e.g.
- /// \code
- /// int a = 1;
- /// int somelongname = 2;
- /// double c = 3;
- ///
- /// int d = 3;
- /// /* A comment. */
- /// double e = 4;
- /// \endcode
- ///
- /// * ``ACS_AcrossEmptyLinesAndComments``
- /// (in configuration: ``AcrossEmptyLinesAndComments``)
- ///
- /// Same as ACS_Consecutive, but also spans over lines only containing
- /// comments and empty lines, e.g.
- /// \code
- /// int a = 1;
- /// int somelongname = 2;
- /// double c = 3;
- ///
- /// int d = 3;
- /// /* A comment. */
- /// double e = 4;
- /// \endcode
+ /// \version 3.8
AlignConsecutiveStyle AlignConsecutiveAssignments;
-
- /// Style of aligning consecutive bit field.
+ /// Style of aligning consecutive bit fields.
///
/// ``Consecutive`` will align the bitfield separators of consecutive lines.
/// This will result in formattings like:
@@ -280,66 +303,8 @@ struct FormatStyle {
/// int b : 12;
/// int ccc : 8;
/// \endcode
- ///
- /// Possible values:
- ///
- /// * ``ACS_None`` (in configuration: ``None``)
- /// Do not align bit fields on consecutive lines.
- ///
- /// * ``ACS_Consecutive`` (in configuration: ``Consecutive``)
- /// Align bit fields on consecutive lines. This will result in
- /// formattings like:
- /// \code
- /// int aaaa : 1;
- /// int b : 12;
- /// int ccc : 8;
- ///
- /// int d : 2;
- /// /* A comment. */
- /// int ee : 3;
- /// \endcode
- ///
- /// * ``ACS_AcrossEmptyLines`` (in configuration: ``AcrossEmptyLines``)
- /// Same as ACS_Consecutive, but also spans over empty lines, e.g.
- /// \code
- /// int aaaa : 1;
- /// int b : 12;
- /// int ccc : 8;
- ///
- /// int d : 2;
- /// /* A comment. */
- /// int ee : 3;
- /// \endcode
- ///
- /// * ``ACS_AcrossComments`` (in configuration: ``AcrossComments``)
- /// Same as ACS_Consecutive, but also spans over lines only containing
- /// comments, e.g.
- /// \code
- /// int aaaa : 1;
- /// int b : 12;
- /// int ccc : 8;
- ///
- /// int d : 2;
- /// /* A comment. */
- /// int ee : 3;
- /// \endcode
- ///
- /// * ``ACS_AcrossEmptyLinesAndComments``
- /// (in configuration: ``AcrossEmptyLinesAndComments``)
- ///
- /// Same as ACS_Consecutive, but also spans over lines only containing
- /// comments and empty lines, e.g.
- /// \code
- /// int aaaa : 1;
- /// int b : 12;
- /// int ccc : 8;
- ///
- /// int d : 2;
- /// /* A comment. */
- /// int ee : 3;
- /// \endcode
+ /// \version 11
AlignConsecutiveStyle AlignConsecutiveBitFields;
-
/// Style of aligning consecutive declarations.
///
/// ``Consecutive`` will align the declaration names of consecutive lines.
@@ -349,68 +314,108 @@ struct FormatStyle {
/// float b = 23;
/// std::string ccc;
/// \endcode
- ///
- /// Possible values:
- ///
- /// * ``ACS_None`` (in configuration: ``None``)
- /// Do not align bit declarations on consecutive lines.
- ///
- /// * ``ACS_Consecutive`` (in configuration: ``Consecutive``)
- /// Align declarations on consecutive lines. This will result in
- /// formattings like:
- /// \code
- /// int aaaa = 12;
- /// float b = 23;
- /// std::string ccc;
- ///
- /// int a = 42;
- /// /* A comment. */
- /// bool c = false;
- /// \endcode
- ///
- /// * ``ACS_AcrossEmptyLines`` (in configuration: ``AcrossEmptyLines``)
- /// Same as ACS_Consecutive, but also spans over empty lines, e.g.
- /// \code
- /// int aaaa = 12;
- /// float b = 23;
- /// std::string ccc;
- ///
- /// int a = 42;
- /// /* A comment. */
- /// bool c = false;
- /// \endcode
- ///
- /// * ``ACS_AcrossComments`` (in configuration: ``AcrossComments``)
- /// Same as ACS_Consecutive, but also spans over lines only containing
- /// comments, e.g.
- /// \code
- /// int aaaa = 12;
- /// float b = 23;
- /// std::string ccc;
- ///
- /// int a = 42;
- /// /* A comment. */
- /// bool c = false;
- /// \endcode
- ///
- /// * ``ACS_AcrossEmptyLinesAndComments``
- /// (in configuration: ``AcrossEmptyLinesAndComments``)
- ///
- /// Same as ACS_Consecutive, but also spans over lines only containing
- /// comments and empty lines, e.g.
- /// \code
- /// int aaaa = 12;
- /// float b = 23;
- /// std::string ccc;
- ///
- /// int a = 42;
- /// /* A comment. */
- /// bool c = false;
- /// \endcode
+ /// \version 3.8
AlignConsecutiveStyle AlignConsecutiveDeclarations;
+ /// Alignment options.
+ ///
+ struct ShortCaseStatementsAlignmentStyle {
+ /// Whether aligning is enabled.
+ /// \code
+ /// true:
+ /// switch (level) {
+ /// case log::info: return "info:";
+ /// case log::warning: return "warning:";
+ /// default: return "";
+ /// }
+ ///
+ /// false:
+ /// switch (level) {
+ /// case log::info: return "info:";
+ /// case log::warning: return "warning:";
+ /// default: return "";
+ /// }
+ /// \endcode
+ bool Enabled;
+ /// Whether to align across empty lines.
+ /// \code
+ /// true:
+ /// switch (level) {
+ /// case log::info: return "info:";
+ /// case log::warning: return "warning:";
+ ///
+ /// default: return "";
+ /// }
+ ///
+ /// false:
+ /// switch (level) {
+ /// case log::info: return "info:";
+ /// case log::warning: return "warning:";
+ ///
+ /// default: return "";
+ /// }
+ /// \endcode
+ bool AcrossEmptyLines;
+ /// Whether to align across comments.
+ /// \code
+ /// true:
+ /// switch (level) {
+ /// case log::info: return "info:";
+ /// case log::warning: return "warning:";
+ /// /* A comment. */
+ /// default: return "";
+ /// }
+ ///
+ /// false:
+ /// switch (level) {
+ /// case log::info: return "info:";
+ /// case log::warning: return "warning:";
+ /// /* A comment. */
+ /// default: return "";
+ /// }
+ /// \endcode
+ bool AcrossComments;
+ /// Whether aligned case labels are aligned on the colon, or on the
+ /// , or on the tokens after the colon.
+ /// \code
+ /// true:
+ /// switch (level) {
+ /// case log::info : return "info:";
+ /// case log::warning: return "warning:";
+ /// default : return "";
+ /// }
+ ///
+ /// false:
+ /// switch (level) {
+ /// case log::info: return "info:";
+ /// case log::warning: return "warning:";
+ /// default: return "";
+ /// }
+ /// \endcode
+ bool AlignCaseColons;
+ bool operator==(const ShortCaseStatementsAlignmentStyle &R) const {
+ return Enabled == R.Enabled && AcrossEmptyLines == R.AcrossEmptyLines &&
+ AcrossComments == R.AcrossComments &&
+ AlignCaseColons == R.AlignCaseColons;
+ }
+ };
+
+ /// Style of aligning consecutive short case labels.
+ /// Only applies if ``AllowShortCaseLabelsOnASingleLine`` is ``true``.
+ ///
+ /// \code{.yaml}
+ /// # Example of usage:
+ /// AlignConsecutiveShortCaseStatements:
+ /// Enabled: true
+ /// AcrossEmptyLines: true
+ /// AcrossComments: true
+ /// AlignCaseColons: false
+ /// \endcode
+ /// \version 17
+ ShortCaseStatementsAlignmentStyle AlignConsecutiveShortCaseStatements;
+
/// Different styles for aligning escaped newlines.
- enum EscapedNewlineAlignmentStyle : unsigned char {
+ enum EscapedNewlineAlignmentStyle : int8_t {
/// Don't align escaped newlines.
/// \code
/// #define A \
@@ -441,10 +446,11 @@ struct FormatStyle {
};
/// Options for aligning backslashes in escaped newlines.
+ /// \version 5
EscapedNewlineAlignmentStyle AlignEscapedNewlines;
/// Different styles for aligning operands.
- enum OperandAlignmentStyle : unsigned char {
+ enum OperandAlignmentStyle : int8_t {
/// Do not align operands of binary and ternary expressions.
/// The wrapped lines are indented ``ContinuationIndentWidth`` spaces from
/// the start of the line.
@@ -479,15 +485,95 @@ struct FormatStyle {
/// If ``true``, horizontally align operands of binary and ternary
/// expressions.
+ /// \version 3.5
OperandAlignmentStyle AlignOperands;
- /// If ``true``, aligns trailing comments.
- /// \code
- /// true: false:
- /// int a; // My comment a vs. int a; // My comment a
- /// int b = 2; // comment b int b = 2; // comment about b
+ /// Enums for AlignTrailingComments
+ enum TrailingCommentsAlignmentKinds : int8_t {
+ /// Leave trailing comments as they are.
+ /// \code
+ /// int a; // comment
+ /// int ab; // comment
+ ///
+ /// int abc; // comment
+ /// int abcd; // comment
+ /// \endcode
+ TCAS_Leave,
+ /// Align trailing comments.
+ /// \code
+ /// int a; // comment
+ /// int ab; // comment
+ ///
+ /// int abc; // comment
+ /// int abcd; // comment
+ /// \endcode
+ TCAS_Always,
+ /// Don't align trailing comments but other formatter applies.
+ /// \code
+ /// int a; // comment
+ /// int ab; // comment
+ ///
+ /// int abc; // comment
+ /// int abcd; // comment
+ /// \endcode
+ TCAS_Never,
+ };
+
+ /// Alignment options
+ struct TrailingCommentsAlignmentStyle {
+ /// Specifies the way to align trailing comments.
+ TrailingCommentsAlignmentKinds Kind;
+ /// How many empty lines to apply alignment.
+ /// When both ``MaxEmptyLinesToKeep`` and ``OverEmptyLines`` are set to 2,
+ /// it formats like below.
+ /// \code
+ /// int a; // all these
+ ///
+ /// int ab; // comments are
+ ///
+ ///
+ /// int abcdef; // aligned
+ /// \endcode
+ ///
+ /// When ``MaxEmptyLinesToKeep`` is set to 2 and ``OverEmptyLines`` is set
+ /// to 1, it formats like below.
+ /// \code
+ /// int a; // these are
+ ///
+ /// int ab; // aligned
+ ///
+ ///
+ /// int abcdef; // but this isn't
+ /// \endcode
+ unsigned OverEmptyLines;
+
+ bool operator==(const TrailingCommentsAlignmentStyle &R) const {
+ return Kind == R.Kind && OverEmptyLines == R.OverEmptyLines;
+ }
+ bool operator!=(const TrailingCommentsAlignmentStyle &R) const {
+ return !(*this == R);
+ }
+ };
+
+ /// Control of trailing comments.
+ ///
+ /// The alignment stops at closing braces after a line break, and only
+ /// followed by other closing braces, a (``do-``) ``while``, a lambda call, or
+ /// a semicolon.
+ ///
+ /// \note
+ /// As of clang-format 16 this option is not a bool but can be set
+ /// to the options. Conventional bool options still can be parsed as before.
+ /// \endnote
+ ///
+ /// \code{.yaml}
+ /// # Example of usage:
+ /// AlignTrailingComments:
+ /// Kind: Always
+ /// OverEmptyLines: 2
/// \endcode
- bool AlignTrailingComments;
+ /// \version 3.7
+ TrailingCommentsAlignmentStyle AlignTrailingComments;
/// \brief If a function call or braced initializer list doesn't fit on a
/// line, allow putting all arguments onto the next line, even if
@@ -503,23 +589,13 @@ struct FormatStyle {
/// c,
/// d);
/// \endcode
+ /// \version 9
bool AllowAllArgumentsOnNextLine;
- /// \brief If a constructor definition with a member initializer list doesn't
- /// fit on a single line, allow putting all member initializers onto the next
- /// line, if ```ConstructorInitializerAllOnOneLineOrOnePerLine``` is true.
- /// Note that this parameter has no effect if
- /// ```ConstructorInitializerAllOnOneLineOrOnePerLine``` is false.
- /// \code
- /// true:
- /// MyClass::MyClass() :
- /// member0(0), member1(2) {}
- ///
- /// false:
- /// MyClass::MyClass() :
- /// member0(0),
- /// member1(2) {}
- bool AllowAllConstructorInitializersOnNextLine;
+ /// This option is **deprecated**. See ``NextLine`` of
+ /// ``PackConstructorInitializers``.
+ /// \version 9
+ // bool AllowAllConstructorInitializersOnNextLine;
/// If the function declaration doesn't fit on a line,
/// allow putting all parameters of a function declaration onto
@@ -536,25 +612,53 @@ struct FormatStyle {
/// int d,
/// int e);
/// \endcode
+ /// \version 3.3
bool AllowAllParametersOfDeclarationOnNextLine;
- /// Allow short enums on a single line.
- /// \code
- /// true:
- /// enum { A, B } myEnum;
- ///
- /// false:
- /// enum
- /// {
- /// A,
- /// B
- /// } myEnum;
- /// \endcode
- bool AllowShortEnumsOnASingleLine;
+ /// Different ways to break before a noexcept specifier.
+ enum BreakBeforeNoexceptSpecifierStyle : int8_t {
+ /// No line break allowed.
+ /// \code
+ /// void foo(int arg1,
+ /// double arg2) noexcept;
+ ///
+ /// void bar(int arg1, double arg2) noexcept(
+ /// noexcept(baz(arg1)) &&
+ /// noexcept(baz(arg2)));
+ /// \endcode
+ BBNSS_Never,
+ /// For a simple ``noexcept`` there is no line break allowed, but when we
+ /// have a condition it is.
+ /// \code
+ /// void foo(int arg1,
+ /// double arg2) noexcept;
+ ///
+ /// void bar(int arg1, double arg2)
+ /// noexcept(noexcept(baz(arg1)) &&
+ /// noexcept(baz(arg2)));
+ /// \endcode
+ BBNSS_OnlyWithParen,
+ /// Line breaks are allowed. But note that because of the associated
+ /// penalties ``clang-format`` often prefers not to break before the
+ /// ``noexcept``.
+ /// \code
+ /// void foo(int arg1,
+ /// double arg2) noexcept;
+ ///
+ /// void bar(int arg1, double arg2)
+ /// noexcept(noexcept(baz(arg1)) &&
+ /// noexcept(baz(arg2)));
+ /// \endcode
+ BBNSS_Always,
+ };
+
+ /// Controls if there could be a line break before a ``noexcept`` specifier.
+ /// \version 18
+ BreakBeforeNoexceptSpecifierStyle AllowBreakBeforeNoexceptSpecifier;
/// Different styles for merging short blocks containing at most one
/// statement.
- enum ShortBlockStyle : unsigned char {
+ enum ShortBlockStyle : int8_t {
/// Never merge blocks into a single line.
/// \code
/// while (true) {
@@ -582,6 +686,7 @@ struct FormatStyle {
/// Dependent on the value, ``while (true) { continue; }`` can be put on a
/// single line.
+ /// \version 3.5
ShortBlockStyle AllowShortBlocksOnASingleLine;
/// If ``true``, short case labels will be contracted to a single line.
@@ -595,11 +700,45 @@ struct FormatStyle {
/// return;
/// }
/// \endcode
+ /// \version 3.6
bool AllowShortCaseLabelsOnASingleLine;
+ /// Allow short compound requirement on a single line.
+ /// \code
+ /// true:
+ /// template <typename T>
+ /// concept c = requires(T x) {
+ /// { x + 1 } -> std::same_as<int>;
+ /// };
+ ///
+ /// false:
+ /// template <typename T>
+ /// concept c = requires(T x) {
+ /// {
+ /// x + 1
+ /// } -> std::same_as<int>;
+ /// };
+ /// \endcode
+ /// \version 18
+ bool AllowShortCompoundRequirementOnASingleLine;
+
+ /// Allow short enums on a single line.
+ /// \code
+ /// true:
+ /// enum { A, B } myEnum;
+ ///
+ /// false:
+ /// enum {
+ /// A,
+ /// B
+ /// } myEnum;
+ /// \endcode
+ /// \version 11
+ bool AllowShortEnumsOnASingleLine;
+
/// Different styles for merging short functions containing at most one
/// statement.
- enum ShortFunctionStyle : unsigned char {
+ enum ShortFunctionStyle : int8_t {
/// Never merge functions into a single line.
SFS_None,
/// Only merge functions defined inside a class. Same as "inline",
@@ -647,10 +786,11 @@ struct FormatStyle {
/// Dependent on the value, ``int f() { return 0; }`` can be put on a
/// single line.
+ /// \version 3.5
ShortFunctionStyle AllowShortFunctionsOnASingleLine;
/// Different styles for handling short if statements.
- enum ShortIfStyle : unsigned char {
+ enum ShortIfStyle : int8_t {
/// Never put short ifs on the same line.
/// \code
/// if (a)
@@ -717,32 +857,33 @@ struct FormatStyle {
};
/// Dependent on the value, ``if (a) return;`` can be put on a single line.
+ /// \version 3.3
ShortIfStyle AllowShortIfStatementsOnASingleLine;
/// Different styles for merging short lambdas containing at most one
/// statement.
- enum ShortLambdaStyle : unsigned char {
+ enum ShortLambdaStyle : int8_t {
/// Never merge lambdas into a single line.
SLS_None,
/// Only merge empty lambdas.
/// \code
- /// auto lambda = [](int a) {}
+ /// auto lambda = [](int a) {};
/// auto lambda2 = [](int a) {
/// return a;
/// };
/// \endcode
SLS_Empty,
- /// Merge lambda into a single line if argument of a function.
+ /// Merge lambda into a single line if the lambda is argument of a function.
/// \code
- /// auto lambda = [](int a) {
- /// return a;
+ /// auto lambda = [](int x, int y) {
+ /// return x < y;
/// };
- /// sort(a.begin(), a.end(), ()[] { return x < y; })
+ /// sort(a.begin(), a.end(), [](int x, int y) { return x < y; });
/// \endcode
SLS_Inline,
/// Merge all lambdas fitting on a single line.
/// \code
- /// auto lambda = [](int a) {}
+ /// auto lambda = [](int a) {};
/// auto lambda2 = [](int a) { return a; };
/// \endcode
SLS_All,
@@ -750,15 +891,17 @@ struct FormatStyle {
/// Dependent on the value, ``auto lambda []() { return 0; }`` can be put on a
/// single line.
+ /// \version 9
ShortLambdaStyle AllowShortLambdasOnASingleLine;
/// If ``true``, ``while (true) continue;`` can be put on a single
/// line.
+ /// \version 3.7
bool AllowShortLoopsOnASingleLine;
/// Different ways to break after the function definition return type.
/// This option is **deprecated** and is retained for backwards compatibility.
- enum DefinitionReturnTypeBreakingStyle : unsigned char {
+ enum DefinitionReturnTypeBreakingStyle : int8_t {
/// Break after return type automatically.
/// ``PenaltyReturnTypeOnItsOwnLine`` is taken into account.
DRTBS_None,
@@ -770,7 +913,7 @@ struct FormatStyle {
/// Different ways to break after the function definition or
/// declaration return type.
- enum ReturnTypeBreakingStyle : unsigned char {
+ enum ReturnTypeBreakingStyle : int8_t {
/// Break after return type automatically.
/// ``PenaltyReturnTypeOnItsOwnLine`` is taken into account.
/// \code
@@ -841,9 +984,11 @@ struct FormatStyle {
/// The function definition return type breaking style to use. This
/// option is **deprecated** and is retained for backwards compatibility.
+ /// \version 3.7
DefinitionReturnTypeBreakingStyle AlwaysBreakAfterDefinitionReturnType;
/// The function declaration return type breaking style to use.
+ /// \version 3.8
ReturnTypeBreakingStyle AlwaysBreakAfterReturnType;
/// If ``true``, always break before multiline string literals.
@@ -858,10 +1003,11 @@ struct FormatStyle {
/// "bbbb" "cccc";
/// "cccc";
/// \endcode
+ /// \version 3.4
bool AlwaysBreakBeforeMultilineStrings;
/// Different ways to break after the template declaration.
- enum BreakTemplateDeclarationsStyle : unsigned char {
+ enum BreakTemplateDeclarationsStyle : int8_t {
/// Do not force break before declaration.
/// ``PenaltyBreakTemplateDeclaration`` is taken into account.
/// \code
@@ -897,6 +1043,7 @@ struct FormatStyle {
};
/// The template declaration breaking style to use.
+ /// \version 3.4
BreakTemplateDeclarationsStyle AlwaysBreakTemplateDeclarations;
/// A vector of strings that should be interpreted as attributes/qualifiers
@@ -906,15 +1053,16 @@ struct FormatStyle {
/// For example:
/// \code
/// x = (char *__capability)&y;
- /// int function(void) __ununsed;
+ /// int function(void) __unused;
/// void only_writes_to_buffer(char *__output buffer);
/// \endcode
///
/// In the .clang-format configuration file, this can be configured like:
/// \code{.yaml}
- /// AttributeMacros: ['__capability', '__output', '__ununsed']
+ /// AttributeMacros: ['__capability', '__output', '__unused']
/// \endcode
///
+ /// \version 12
std::vector<std::string> AttributeMacros;
/// If ``false``, a function call's arguments will either be all on the
@@ -933,37 +1081,9 @@ struct FormatStyle {
/// aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa);
/// }
/// \endcode
+ /// \version 3.7
bool BinPackArguments;
- /// The style of inserting trailing commas into container literals.
- enum TrailingCommaStyle : unsigned char {
- /// Do not insert trailing commas.
- TCS_None,
- /// Insert trailing commas in container literals that were wrapped over
- /// multiple lines. Note that this is conceptually incompatible with
- /// bin-packing, because the trailing comma is used as an indicator
- /// that a container should be formatted one-per-line (i.e. not bin-packed).
- /// So inserting a trailing comma counteracts bin-packing.
- TCS_Wrapped,
- };
-
- /// If set to ``TCS_Wrapped`` will insert trailing commas in container
- /// literals (arrays and objects) that wrap across multiple lines.
- /// It is currently only available for JavaScript
- /// and disabled by default ``TCS_None``.
- /// ``InsertTrailingCommas`` cannot be used together with ``BinPackArguments``
- /// as inserting the comma disables bin-packing.
- /// \code
- /// TSC_Wrapped:
- /// const someArray = [
- /// aaaaaaaaaaaaaaaaaaaaaaaaaa,
- /// aaaaaaaaaaaaaaaaaaaaaaaaaa,
- /// aaaaaaaaaaaaaaaaaaaaaaaaaa,
- /// // ^ inserted
- /// ]
- /// \endcode
- TrailingCommaStyle InsertTrailingCommas;
-
/// If ``false``, a function declaration's or function definition's
/// parameters will either all be on the same line or will have one line each.
/// \code
@@ -976,11 +1096,474 @@ struct FormatStyle {
/// int aaaaaaaaaaaaaaaaaaaa,
/// int aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa) {}
/// \endcode
+ /// \version 3.7
bool BinPackParameters;
+ /// Styles for adding spacing around ``:`` in bitfield definitions.
+ enum BitFieldColonSpacingStyle : int8_t {
+ /// Add one space on each side of the ``:``
+ /// \code
+ /// unsigned bf : 2;
+ /// \endcode
+ BFCS_Both,
+ /// Add no space around the ``:`` (except when needed for
+ /// ``AlignConsecutiveBitFields``).
+ /// \code
+ /// unsigned bf:2;
+ /// \endcode
+ BFCS_None,
+ /// Add space before the ``:`` only
+ /// \code
+ /// unsigned bf :2;
+ /// \endcode
+ BFCS_Before,
+ /// Add space after the ``:`` only (space may be added before if
+ /// needed for ``AlignConsecutiveBitFields``).
+ /// \code
+ /// unsigned bf: 2;
+ /// \endcode
+ BFCS_After
+ };
+ /// The BitFieldColonSpacingStyle to use for bitfields.
+ /// \version 12
+ BitFieldColonSpacingStyle BitFieldColonSpacing;
+
+ /// The number of columns to use to indent the contents of braced init lists.
+ /// If unset, ``ContinuationIndentWidth`` is used.
+ /// \code
+ /// AlignAfterOpenBracket: AlwaysBreak
+ /// BracedInitializerIndentWidth: 2
+ ///
+ /// void f() {
+ /// SomeClass c{
+ /// "foo",
+ /// "bar",
+ /// "baz",
+ /// };
+ /// auto s = SomeStruct{
+ /// .foo = "foo",
+ /// .bar = "bar",
+ /// .baz = "baz",
+ /// };
+ /// SomeArrayT a[3] = {
+ /// {
+ /// foo,
+ /// bar,
+ /// },
+ /// {
+ /// foo,
+ /// bar,
+ /// },
+ /// SomeArrayT{},
+ /// };
+ /// }
+ /// \endcode
+ /// \version 17
+ std::optional<unsigned> BracedInitializerIndentWidth;
+
+ /// Different ways to wrap braces after control statements.
+ enum BraceWrappingAfterControlStatementStyle : int8_t {
+ /// Never wrap braces after a control statement.
+ /// \code
+ /// if (foo()) {
+ /// } else {
+ /// }
+ /// for (int i = 0; i < 10; ++i) {
+ /// }
+ /// \endcode
+ BWACS_Never,
+ /// Only wrap braces after a multi-line control statement.
+ /// \code
+ /// if (foo && bar &&
+ /// baz)
+ /// {
+ /// quux();
+ /// }
+ /// while (foo || bar) {
+ /// }
+ /// \endcode
+ BWACS_MultiLine,
+ /// Always wrap braces after a control statement.
+ /// \code
+ /// if (foo())
+ /// {
+ /// } else
+ /// {}
+ /// for (int i = 0; i < 10; ++i)
+ /// {}
+ /// \endcode
+ BWACS_Always
+ };
+
+ /// Precise control over the wrapping of braces.
+ /// \code
+ /// # Should be declared this way:
+ /// BreakBeforeBraces: Custom
+ /// BraceWrapping:
+ /// AfterClass: true
+ /// \endcode
+ struct BraceWrappingFlags {
+ /// Wrap case labels.
+ /// \code
+ /// false: true:
+ /// switch (foo) { vs. switch (foo) {
+ /// case 1: { case 1:
+ /// bar(); {
+ /// break; bar();
+ /// } break;
+ /// default: { }
+ /// plop(); default:
+ /// } {
+ /// } plop();
+ /// }
+ /// }
+ /// \endcode
+ bool AfterCaseLabel;
+ /// Wrap class definitions.
+ /// \code
+ /// true:
+ /// class foo
+ /// {};
+ ///
+ /// false:
+ /// class foo {};
+ /// \endcode
+ bool AfterClass;
+
+ /// Wrap control statements (``if``/``for``/``while``/``switch``/..).
+ BraceWrappingAfterControlStatementStyle AfterControlStatement;
+ /// Wrap enum definitions.
+ /// \code
+ /// true:
+ /// enum X : int
+ /// {
+ /// B
+ /// };
+ ///
+ /// false:
+ /// enum X : int { B };
+ /// \endcode
+ bool AfterEnum;
+ /// Wrap function definitions.
+ /// \code
+ /// true:
+ /// void foo()
+ /// {
+ /// bar();
+ /// bar2();
+ /// }
+ ///
+ /// false:
+ /// void foo() {
+ /// bar();
+ /// bar2();
+ /// }
+ /// \endcode
+ bool AfterFunction;
+ /// Wrap namespace definitions.
+ /// \code
+ /// true:
+ /// namespace
+ /// {
+ /// int foo();
+ /// int bar();
+ /// }
+ ///
+ /// false:
+ /// namespace {
+ /// int foo();
+ /// int bar();
+ /// }
+ /// \endcode
+ bool AfterNamespace;
+ /// Wrap ObjC definitions (interfaces, implementations...).
+ /// \note
+ /// @autoreleasepool and @synchronized blocks are wrapped
+ /// according to ``AfterControlStatement`` flag.
+ /// \endnote
+ bool AfterObjCDeclaration;
+ /// Wrap struct definitions.
+ /// \code
+ /// true:
+ /// struct foo
+ /// {
+ /// int x;
+ /// };
+ ///
+ /// false:
+ /// struct foo {
+ /// int x;
+ /// };
+ /// \endcode
+ bool AfterStruct;
+ /// Wrap union definitions.
+ /// \code
+ /// true:
+ /// union foo
+ /// {
+ /// int x;
+ /// }
+ ///
+ /// false:
+ /// union foo {
+ /// int x;
+ /// }
+ /// \endcode
+ bool AfterUnion;
+ /// Wrap extern blocks.
+ /// \code
+ /// true:
+ /// extern "C"
+ /// {
+ /// int foo();
+ /// }
+ ///
+ /// false:
+ /// extern "C" {
+ /// int foo();
+ /// }
+ /// \endcode
+ bool AfterExternBlock; // Partially superseded by IndentExternBlock
+ /// Wrap before ``catch``.
+ /// \code
+ /// true:
+ /// try {
+ /// foo();
+ /// }
+ /// catch () {
+ /// }
+ ///
+ /// false:
+ /// try {
+ /// foo();
+ /// } catch () {
+ /// }
+ /// \endcode
+ bool BeforeCatch;
+ /// Wrap before ``else``.
+ /// \code
+ /// true:
+ /// if (foo()) {
+ /// }
+ /// else {
+ /// }
+ ///
+ /// false:
+ /// if (foo()) {
+ /// } else {
+ /// }
+ /// \endcode
+ bool BeforeElse;
+ /// Wrap lambda block.
+ /// \code
+ /// true:
+ /// connect(
+ /// []()
+ /// {
+ /// foo();
+ /// bar();
+ /// });
+ ///
+ /// false:
+ /// connect([]() {
+ /// foo();
+ /// bar();
+ /// });
+ /// \endcode
+ bool BeforeLambdaBody;
+ /// Wrap before ``while``.
+ /// \code
+ /// true:
+ /// do {
+ /// foo();
+ /// }
+ /// while (1);
+ ///
+ /// false:
+ /// do {
+ /// foo();
+ /// } while (1);
+ /// \endcode
+ bool BeforeWhile;
+ /// Indent the wrapped braces themselves.
+ bool IndentBraces;
+ /// If ``false``, empty function body can be put on a single line.
+ /// This option is used only if the opening brace of the function has
+ /// already been wrapped, i.e. the ``AfterFunction`` brace wrapping mode is
+ /// set, and the function could/should not be put on a single line (as per
+ /// ``AllowShortFunctionsOnASingleLine`` and constructor formatting
+ /// options).
+ /// \code
+ /// false: true:
+ /// int f() vs. int f()
+ /// {} {
+ /// }
+ /// \endcode
+ ///
+ bool SplitEmptyFunction;
+ /// If ``false``, empty record (e.g. class, struct or union) body
+ /// can be put on a single line. This option is used only if the opening
+ /// brace of the record has already been wrapped, i.e. the ``AfterClass``
+ /// (for classes) brace wrapping mode is set.
+ /// \code
+ /// false: true:
+ /// class Foo vs. class Foo
+ /// {} {
+ /// }
+ /// \endcode
+ ///
+ bool SplitEmptyRecord;
+ /// If ``false``, empty namespace body can be put on a single line.
+ /// This option is used only if the opening brace of the namespace has
+ /// already been wrapped, i.e. the ``AfterNamespace`` brace wrapping mode is
+ /// set.
+ /// \code
+ /// false: true:
+ /// namespace Foo vs. namespace Foo
+ /// {} {
+ /// }
+ /// \endcode
+ ///
+ bool SplitEmptyNamespace;
+ };
+
+ /// Control of individual brace wrapping cases.
+ ///
+ /// If ``BreakBeforeBraces`` is set to ``BS_Custom``, use this to specify how
+ /// each individual brace case should be handled. Otherwise, this is ignored.
+ /// \code{.yaml}
+ /// # Example of usage:
+ /// BreakBeforeBraces: Custom
+ /// BraceWrapping:
+ /// AfterEnum: true
+ /// AfterStruct: false
+ /// SplitEmptyFunction: false
+ /// \endcode
+ /// \version 3.8
+ BraceWrappingFlags BraceWrapping;
+
+ /// Break between adjacent string literals.
+ /// \code
+ /// true:
+ /// return "Code"
+ /// "\0\52\26\55\55\0"
+ /// "x013"
+ /// "\02\xBA";
+ /// false:
+ /// return "Code" "\0\52\26\55\55\0" "x013" "\02\xBA";
+ /// \endcode
+ /// \version 18
+ bool BreakAdjacentStringLiterals;
+
+ /// Different ways to break after attributes.
+ enum AttributeBreakingStyle : int8_t {
+ /// Always break after attributes.
+ /// \code
+ /// [[maybe_unused]]
+ /// const int i;
+ /// [[gnu::const]] [[maybe_unused]]
+ /// int j;
+ ///
+ /// [[nodiscard]]
+ /// inline int f();
+ /// [[gnu::const]] [[nodiscard]]
+ /// int g();
+ ///
+ /// [[likely]]
+ /// if (a)
+ /// f();
+ /// else
+ /// g();
+ ///
+ /// switch (b) {
+ /// [[unlikely]]
+ /// case 1:
+ /// ++b;
+ /// break;
+ /// [[likely]]
+ /// default:
+ /// return;
+ /// }
+ /// \endcode
+ ABS_Always,
+ /// Leave the line breaking after attributes as is.
+ /// \code
+ /// [[maybe_unused]] const int i;
+ /// [[gnu::const]] [[maybe_unused]]
+ /// int j;
+ ///
+ /// [[nodiscard]] inline int f();
+ /// [[gnu::const]] [[nodiscard]]
+ /// int g();
+ ///
+ /// [[likely]] if (a)
+ /// f();
+ /// else
+ /// g();
+ ///
+ /// switch (b) {
+ /// [[unlikely]] case 1:
+ /// ++b;
+ /// break;
+ /// [[likely]]
+ /// default:
+ /// return;
+ /// }
+ /// \endcode
+ ABS_Leave,
+ /// Never break after attributes.
+ /// \code
+ /// [[maybe_unused]] const int i;
+ /// [[gnu::const]] [[maybe_unused]] int j;
+ ///
+ /// [[nodiscard]] inline int f();
+ /// [[gnu::const]] [[nodiscard]] int g();
+ ///
+ /// [[likely]] if (a)
+ /// f();
+ /// else
+ /// g();
+ ///
+ /// switch (b) {
+ /// [[unlikely]] case 1:
+ /// ++b;
+ /// break;
+ /// [[likely]] default:
+ /// return;
+ /// }
+ /// \endcode
+ ABS_Never,
+ };
+
+ /// Break after a group of C++11 attributes before variable or function
+ /// (including constructor/destructor) declaration/definition names or before
+ /// control statements, i.e. ``if``, ``switch`` (including ``case`` and
+ /// ``default`` labels), ``for``, and ``while`` statements.
+ /// \version 16
+ AttributeBreakingStyle BreakAfterAttributes;
+
+ /// If ``true``, clang-format will always break after a Json array ``[``
+ /// otherwise it will scan until the closing ``]`` to determine if it should
+ /// add newlines between elements (prettier compatible).
+ ///
+ /// \note
+ /// This is currently only for formatting JSON.
+ /// \endnote
+ /// \code
+ /// true: false:
+ /// [ vs. [1, 2, 3, 4]
+ /// 1,
+ /// 2,
+ /// 3,
+ /// 4
+ /// ]
+ /// \endcode
+ /// \version 16
+ bool BreakArrays;
+
/// The style of wrapping parameters on the same line (bin-packed) or
/// on one line each.
- enum BinPackStyle : unsigned char {
+ enum BinPackStyle : int8_t {
/// Automatically determine parameter bin-packing behavior.
BPS_Auto,
/// Always bin-pack parameters.
@@ -990,7 +1573,7 @@ struct FormatStyle {
};
/// The style of breaking before or after binary operators.
- enum BinaryOperatorStyle : unsigned char {
+ enum BinaryOperatorStyle : int8_t {
/// Break after operators.
/// \code
/// LooooooooooongType loooooooooooooooooooooongVariable =
@@ -1030,10 +1613,11 @@ struct FormatStyle {
};
/// The way to wrap binary operators.
+ /// \version 3.6
BinaryOperatorStyle BreakBeforeBinaryOperators;
/// Different ways to attach braces to their surrounding context.
- enum BraceBreakingStyle : unsigned char {
+ enum BraceBreakingStyle : int8_t {
/// Always attach braces to surrounding context.
/// \code
/// namespace N {
@@ -1459,297 +2043,66 @@ struct FormatStyle {
/// } // namespace N
/// \endcode
BS_WebKit,
- /// Configure each individual brace in `BraceWrapping`.
+ /// Configure each individual brace in ``BraceWrapping``.
BS_Custom
};
/// The brace breaking style to use.
+ /// \version 3.7
BraceBreakingStyle BreakBeforeBraces;
- /// Different ways to wrap braces after control statements.
- enum BraceWrappingAfterControlStatementStyle : unsigned char {
- /// Never wrap braces after a control statement.
+ /// Different ways to break before concept declarations.
+ enum BreakBeforeConceptDeclarationsStyle : int8_t {
+ /// Keep the template declaration line together with ``concept``.
/// \code
- /// if (foo()) {
- /// } else {
- /// }
- /// for (int i = 0; i < 10; ++i) {
- /// }
+ /// template <typename T> concept C = ...;
/// \endcode
- BWACS_Never,
- /// Only wrap braces after a multi-line control statement.
+ BBCDS_Never,
+ /// Breaking between template declaration and ``concept`` is allowed. The
+ /// actual behavior depends on the content and line breaking rules and
+ /// penalties.
+ BBCDS_Allowed,
+ /// Always break before ``concept``, putting it in the line after the
+ /// template declaration.
/// \code
- /// if (foo && bar &&
- /// baz)
- /// {
- /// quux();
- /// }
- /// while (foo || bar) {
- /// }
+ /// template <typename T>
+ /// concept C = ...;
/// \endcode
- BWACS_MultiLine,
- /// Always wrap braces after a control statement.
- /// \code
- /// if (foo())
- /// {
- /// } else
- /// {}
- /// for (int i = 0; i < 10; ++i)
- /// {}
- /// \endcode
- BWACS_Always
+ BBCDS_Always,
};
- /// Precise control over the wrapping of braces.
- /// \code
- /// # Should be declared this way:
- /// BreakBeforeBraces: Custom
- /// BraceWrapping:
- /// AfterClass: true
- /// \endcode
- struct BraceWrappingFlags {
- /// Wrap case labels.
- /// \code
- /// false: true:
- /// switch (foo) { vs. switch (foo) {
- /// case 1: { case 1:
- /// bar(); {
- /// break; bar();
- /// } break;
- /// default: { }
- /// plop(); default:
- /// } {
- /// } plop();
- /// }
- /// }
- /// \endcode
- bool AfterCaseLabel;
- /// Wrap class definitions.
- /// \code
- /// true:
- /// class foo {};
- ///
- /// false:
- /// class foo
- /// {};
- /// \endcode
- bool AfterClass;
+ /// The concept declaration style to use.
+ /// \version 12
+ BreakBeforeConceptDeclarationsStyle BreakBeforeConceptDeclarations;
- /// Wrap control statements (``if``/``for``/``while``/``switch``/..).
- BraceWrappingAfterControlStatementStyle AfterControlStatement;
- /// Wrap enum definitions.
- /// \code
- /// true:
- /// enum X : int
- /// {
- /// B
- /// };
- ///
- /// false:
- /// enum X : int { B };
- /// \endcode
- bool AfterEnum;
- /// Wrap function definitions.
- /// \code
- /// true:
- /// void foo()
- /// {
- /// bar();
- /// bar2();
- /// }
- ///
- /// false:
- /// void foo() {
- /// bar();
- /// bar2();
- /// }
- /// \endcode
- bool AfterFunction;
- /// Wrap namespace definitions.
- /// \code
- /// true:
- /// namespace
- /// {
- /// int foo();
- /// int bar();
- /// }
- ///
- /// false:
- /// namespace {
- /// int foo();
- /// int bar();
- /// }
- /// \endcode
- bool AfterNamespace;
- /// Wrap ObjC definitions (interfaces, implementations...).
- /// \note @autoreleasepool and @synchronized blocks are wrapped
- /// according to `AfterControlStatement` flag.
- bool AfterObjCDeclaration;
- /// Wrap struct definitions.
+ /// Different ways to break ASM parameters.
+ enum BreakBeforeInlineASMColonStyle : int8_t {
+ /// No break before inline ASM colon.
/// \code
- /// true:
- /// struct foo
- /// {
- /// int x;
- /// };
- ///
- /// false:
- /// struct foo {
- /// int x;
- /// };
+ /// asm volatile("string", : : val);
/// \endcode
- bool AfterStruct;
- /// Wrap union definitions.
+ BBIAS_Never,
+ /// Break before inline ASM colon if the line length is longer than column
+ /// limit.
/// \code
- /// true:
- /// union foo
- /// {
- /// int x;
- /// }
- ///
- /// false:
- /// union foo {
- /// int x;
- /// }
+ /// asm volatile("string", : : val);
+ /// asm("cmoveq %1, %2, %[result]"
+ /// : [result] "=r"(result)
+ /// : "r"(test), "r"(new), "[result]"(old));
/// \endcode
- bool AfterUnion;
- /// Wrap extern blocks.
+ BBIAS_OnlyMultiline,
+ /// Always break before inline ASM colon.
/// \code
- /// true:
- /// extern "C"
- /// {
- /// int foo();
- /// }
- ///
- /// false:
- /// extern "C" {
- /// int foo();
- /// }
+ /// asm volatile("string",
+ /// :
+ /// : val);
/// \endcode
- bool AfterExternBlock; // Partially superseded by IndentExternBlock
- /// Wrap before ``catch``.
- /// \code
- /// true:
- /// try {
- /// foo();
- /// }
- /// catch () {
- /// }
- ///
- /// false:
- /// try {
- /// foo();
- /// } catch () {
- /// }
- /// \endcode
- bool BeforeCatch;
- /// Wrap before ``else``.
- /// \code
- /// true:
- /// if (foo()) {
- /// }
- /// else {
- /// }
- ///
- /// false:
- /// if (foo()) {
- /// } else {
- /// }
- /// \endcode
- bool BeforeElse;
- /// Wrap lambda block.
- /// \code
- /// true:
- /// connect(
- /// []()
- /// {
- /// foo();
- /// bar();
- /// });
- ///
- /// false:
- /// connect([]() {
- /// foo();
- /// bar();
- /// });
- /// \endcode
- bool BeforeLambdaBody;
- /// Wrap before ``while``.
- /// \code
- /// true:
- /// do {
- /// foo();
- /// }
- /// while (1);
- ///
- /// false:
- /// do {
- /// foo();
- /// } while (1);
- /// \endcode
- bool BeforeWhile;
- /// Indent the wrapped braces themselves.
- bool IndentBraces;
- /// If ``false``, empty function body can be put on a single line.
- /// This option is used only if the opening brace of the function has
- /// already been wrapped, i.e. the `AfterFunction` brace wrapping mode is
- /// set, and the function could/should not be put on a single line (as per
- /// `AllowShortFunctionsOnASingleLine` and constructor formatting options).
- /// \code
- /// int f() vs. int f()
- /// {} {
- /// }
- /// \endcode
- ///
- bool SplitEmptyFunction;
- /// If ``false``, empty record (e.g. class, struct or union) body
- /// can be put on a single line. This option is used only if the opening
- /// brace of the record has already been wrapped, i.e. the `AfterClass`
- /// (for classes) brace wrapping mode is set.
- /// \code
- /// class Foo vs. class Foo
- /// {} {
- /// }
- /// \endcode
- ///
- bool SplitEmptyRecord;
- /// If ``false``, empty namespace body can be put on a single line.
- /// This option is used only if the opening brace of the namespace has
- /// already been wrapped, i.e. the `AfterNamespace` brace wrapping mode is
- /// set.
- /// \code
- /// namespace Foo vs. namespace Foo
- /// {} {
- /// }
- /// \endcode
- ///
- bool SplitEmptyNamespace;
+ BBIAS_Always,
};
- /// Control of individual brace wrapping cases.
- ///
- /// If ``BreakBeforeBraces`` is set to ``BS_Custom``, use this to specify how
- /// each individual brace case should be handled. Otherwise, this is ignored.
- /// \code{.yaml}
- /// # Example of usage:
- /// BreakBeforeBraces: Custom
- /// BraceWrapping:
- /// AfterEnum: true
- /// AfterStruct: false
- /// SplitEmptyFunction: false
- /// \endcode
- BraceWrappingFlags BraceWrapping;
-
- /// If ``true``, concept will be placed on a new line.
- /// \code
- /// true:
- /// template<typename T>
- /// concept ...
- ///
- /// false:
- /// template<typename T> concept ...
- /// \endcode
- bool BreakBeforeConceptDeclarations;
+ /// The inline ASM colon style to use.
+ /// \version 16
+ BreakBeforeInlineASMColonStyle BreakBeforeInlineASMColon;
/// If ``true``, ternary operators will be placed after line breaks.
/// \code
@@ -1763,10 +2116,11 @@ struct FormatStyle {
/// firstValue :
/// SecondValueVeryVeryVeryVeryLong;
/// \endcode
+ /// \version 3.7
bool BreakBeforeTernaryOperators;
/// Different ways to break initializers.
- enum BreakConstructorInitializersStyle : unsigned char {
+ enum BreakConstructorInitializersStyle : int8_t {
/// Break constructor initializers before the colon and after the commas.
/// \code
/// Constructor()
@@ -1791,7 +2145,8 @@ struct FormatStyle {
BCIS_AfterColon
};
- /// The constructor initializers style to use.
+ /// The break constructor initializers style to use.
+ /// \version 5
BreakConstructorInitializersStyle BreakConstructorInitializers;
/// Break after each annotation on a field in Java files.
@@ -1801,9 +2156,12 @@ struct FormatStyle {
/// @Mock
/// DataLoad loader;
/// \endcode
+ /// \version 3.8
bool BreakAfterJavaFieldAnnotations;
/// Allow breaking string literals when formatting.
+ ///
+ /// In C, C++, and Objective-C:
/// \code
/// true:
/// const char* x = "veryVeryVeryVeryVeryVe"
@@ -1812,8 +2170,36 @@ struct FormatStyle {
///
/// false:
/// const char* x =
- /// "veryVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryLongString";
+ /// "veryVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryLongString";
/// \endcode
+ ///
+ /// In C# and Java:
+ /// \code
+ /// true:
+ /// string x = "veryVeryVeryVeryVeryVe" +
+ /// "ryVeryVeryVeryVeryVery" +
+ /// "VeryLongString";
+ ///
+ /// false:
+ /// string x =
+ /// "veryVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryLongString";
+ /// \endcode
+ ///
+ /// C# interpolated strings are not broken.
+ ///
+ /// In Verilog:
+ /// \code
+ /// true:
+ /// string x = {"veryVeryVeryVeryVeryVe",
+ /// "ryVeryVeryVeryVeryVery",
+ /// "VeryLongString"};
+ ///
+ /// false:
+ /// string x =
+ /// "veryVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryLongString";
+ /// \endcode
+ ///
+ /// \version 3.9
bool BreakStringLiterals;
/// The column limit.
@@ -1821,6 +2207,7 @@ struct FormatStyle {
/// A column limit of ``0`` means that there is no column limit. In this case,
/// clang-format will respect the input's line breaking decisions within
/// statements unless they contradict other rules.
+ /// \version 3.7
unsigned ColumnLimit;
/// A regular expression that describes comments with special meaning,
@@ -1830,10 +2217,11 @@ struct FormatStyle {
/// // Will leave the following line unaffected
/// #include <vector> // FOOBAR pragma: keep
/// \endcode
+ /// \version 3.7
std::string CommentPragmas;
/// Different ways to break inheritance list.
- enum BreakInheritanceListStyle : unsigned char {
+ enum BreakInheritanceListStyle : int8_t {
/// Break inheritance list before the colon and after the commas.
/// \code
/// class Foo
@@ -1869,6 +2257,7 @@ struct FormatStyle {
};
/// The inheritance list style to use.
+ /// \version 7
BreakInheritanceListStyle BreakInheritanceList;
/// If ``true``, consecutive namespace declarations will be on the same
@@ -1892,30 +2281,17 @@ struct FormatStyle {
/// namespace Extra {
/// }}}
/// \endcode
+ /// \version 5
bool CompactNamespaces;
- // clang-format off
- /// If the constructor initializers don't fit on a line, put each
- /// initializer on its own line.
- /// \code
- /// true:
- /// SomeClass::Constructor()
- /// : aaaaaaaa(aaaaaaaa), aaaaaaaa(aaaaaaaa), aaaaaaaa(aaaaaaaaaaaaaaaaaaaaaaaaa) {
- /// return 0;
- /// }
- ///
- /// false:
- /// SomeClass::Constructor()
- /// : aaaaaaaa(aaaaaaaa), aaaaaaaa(aaaaaaaa),
- /// aaaaaaaa(aaaaaaaaaaaaaaaaaaaaaaaaa) {
- /// return 0;
- /// }
- /// \endcode
- bool ConstructorInitializerAllOnOneLineOrOnePerLine;
- // clang-format on
+ /// This option is **deprecated**. See ``CurrentLine`` of
+ /// ``PackConstructorInitializers``.
+ /// \version 3.7
+ // bool ConstructorInitializerAllOnOneLineOrOnePerLine;
/// The number of characters to use for indentation of constructor
/// initializer lists as well as inheritance lists.
+ /// \version 3.7
unsigned ConstructorInitializerIndentWidth;
/// Indent width for line continuations.
@@ -1926,6 +2302,7 @@ struct FormatStyle {
/// longFunction( // Again a long comment
/// arg);
/// \endcode
+ /// \version 3.7
unsigned ContinuationIndentWidth;
/// If ``true``, format braced lists as best suited for C++11 braced
@@ -1948,26 +2325,30 @@ struct FormatStyle {
/// f(MyMap[{composite, key}]); f(MyMap[{ composite, key }]);
/// new int[3]{1, 2, 3}; new int[3]{ 1, 2, 3 };
/// \endcode
+ /// \version 3.4
bool Cpp11BracedListStyle;
- /// \brief Analyze the formatted file for the most used line ending (``\r\n``
- /// or ``\n``). ``UseCRLF`` is only used as a fallback if none can be derived.
- bool DeriveLineEnding;
+ /// This option is **deprecated**. See ``DeriveLF`` and ``DeriveCRLF`` of
+ /// ``LineEnding``.
+ /// \version 10
+ // bool DeriveLineEnding;
/// If ``true``, analyze the formatted file for the most common
/// alignment of ``&`` and ``*``.
/// Pointer and reference alignment styles are going to be updated according
/// to the preferences found in the file.
/// ``PointerAlignment`` is then used only as fallback.
+ /// \version 3.7
bool DerivePointerAlignment;
/// Disables formatting completely.
+ /// \version 3.7
bool DisableFormat;
/// Different styles for empty line after access modifiers.
/// ``EmptyLineBeforeAccessModifier`` configuration handles the number of
/// empty lines between two access modifiers.
- enum EmptyLineAfterAccessModifierStyle : unsigned char {
+ enum EmptyLineAfterAccessModifierStyle : int8_t {
/// Remove all empty lines after access modifiers.
/// \code
/// struct foo {
@@ -2012,10 +2393,11 @@ struct FormatStyle {
/// Defines when to put an empty line after access modifiers.
/// ``EmptyLineBeforeAccessModifier`` configuration handles the number of
/// empty lines between two access modifiers.
+ /// \version 13
EmptyLineAfterAccessModifierStyle EmptyLineAfterAccessModifier;
/// Different styles for empty line before access modifiers.
- enum EmptyLineBeforeAccessModifierStyle : unsigned char {
+ enum EmptyLineBeforeAccessModifierStyle : int8_t {
/// Remove all empty lines before access modifiers.
/// \code
/// struct foo {
@@ -2074,6 +2456,7 @@ struct FormatStyle {
};
/// Defines in which cases to put empty line before access modifiers.
+ /// \version 12
EmptyLineBeforeAccessModifierStyle EmptyLineBeforeAccessModifier;
/// If ``true``, clang-format detects whether function calls and
@@ -2084,20 +2467,27 @@ struct FormatStyle {
/// made, clang-format analyzes whether there are other bin-packed cases in
/// the input file and act accordingly.
///
- /// NOTE: This is an experimental flag, that might go away or be renamed. Do
- /// not use this in config files, etc. Use at your own risk.
+ /// \note
+ /// This is an experimental flag, that might go away or be renamed. Do
+ /// not use this in config files, etc. Use at your own risk.
+ /// \endnote
+ /// \version 3.7
bool ExperimentalAutoDetectBinPacking;
/// If ``true``, clang-format adds missing namespace end comments for
- /// short namespaces and fixes invalid existing ones. Short ones are
- /// controlled by "ShortNamespaceLines".
+ /// namespaces and fixes invalid existing ones. This doesn't affect short
+ /// namespaces, which are controlled by ``ShortNamespaceLines``.
/// \code
/// true: false:
- /// namespace a { vs. namespace a {
- /// foo(); foo();
- /// bar(); bar();
+ /// namespace longNamespace { vs. namespace longNamespace {
+ /// void foo(); void foo();
+ /// void bar(); void bar();
/// } // namespace a }
+ /// namespace shortNamespace { namespace shortNamespace {
+ /// void baz(); void baz();
+ /// } }
/// \endcode
+ /// \version 5
bool FixNamespaceComments;
/// A vector of macros that should be interpreted as foreach loops
@@ -2115,8 +2505,11 @@ struct FormatStyle {
/// \endcode
///
/// For example: BOOST_FOREACH.
+ /// \version 3.7
std::vector<std::string> ForEachMacros;
+ tooling::IncludeStyle IncludeStyle;
+
/// A vector of macros that should be interpreted as conditionals
/// instead of as function calls.
///
@@ -2135,64 +2528,9 @@ struct FormatStyle {
///
/// For example: `KJ_IF_MAYBE
/// <https://github.com/capnproto/capnproto/blob/master/kjdoc/tour.md#maybes>`_
+ /// \version 13
std::vector<std::string> IfMacros;
- /// \brief A vector of macros that should be interpreted as type declarations
- /// instead of as function calls.
- ///
- /// These are expected to be macros of the form:
- /// \code
- /// STACK_OF(...)
- /// \endcode
- ///
- /// In the .clang-format configuration file, this can be configured like:
- /// \code{.yaml}
- /// TypenameMacros: ['STACK_OF', 'LIST']
- /// \endcode
- ///
- /// For example: OpenSSL STACK_OF, BSD LIST_ENTRY.
- std::vector<std::string> TypenameMacros;
-
- /// A vector of macros that should be interpreted as complete
- /// statements.
- ///
- /// Typical macros are expressions, and require a semi-colon to be
- /// added; sometimes this is not the case, and this allows to make
- /// clang-format aware of such cases.
- ///
- /// For example: Q_UNUSED
- std::vector<std::string> StatementMacros;
-
- /// A vector of macros which are used to open namespace blocks.
- ///
- /// These are expected to be macros of the form:
- /// \code
- /// NAMESPACE(<namespace-name>, ...) {
- /// <namespace-content>
- /// }
- /// \endcode
- ///
- /// For example: TESTSUITE
- std::vector<std::string> NamespaceMacros;
-
- /// A vector of macros which are whitespace-sensitive and should not
- /// be touched.
- ///
- /// These are expected to be macros of the form:
- /// \code
- /// STRINGIZE(...)
- /// \endcode
- ///
- /// In the .clang-format configuration file, this can be configured like:
- /// \code{.yaml}
- /// WhitespaceSensitiveMacros: ['STRINGIZE', 'PP_STRINGIZE']
- /// \endcode
- ///
- /// For example: BOOST_PP_STRINGIZE
- std::vector<std::string> WhitespaceSensitiveMacros;
-
- tooling::IncludeStyle IncludeStyle;
-
/// Specify whether access modifiers should have their own indentation level.
///
/// When ``false``, access modifiers are indented (or outdented) relative to
@@ -2217,26 +2555,9 @@ struct FormatStyle {
/// return 1; return 1;
/// } }
/// \endcode
+ /// \version 13
bool IndentAccessModifiers;
- /// Indent case labels one level from the switch statement.
- ///
- /// When ``false``, use the same indentation level as for the switch
- /// statement. Switch statement body is always indented one level more than
- /// case labels (except the first block following the case label, which
- /// itself indents the code - unless IndentCaseBlocks is enabled).
- /// \code
- /// false: true:
- /// switch (fool) { vs. switch (fool) {
- /// case 1: case 1:
- /// bar(); bar();
- /// break; break;
- /// default: default:
- /// plop(); plop();
- /// } }
- /// \endcode
- bool IndentCaseLabels;
-
/// Indent case label blocks one level from the case label.
///
/// When ``false``, the block following the case label uses the same
@@ -2257,8 +2578,28 @@ struct FormatStyle {
/// }
/// }
/// \endcode
+ /// \version 11
bool IndentCaseBlocks;
+ /// Indent case labels one level from the switch statement.
+ ///
+ /// When ``false``, use the same indentation level as for the switch
+ /// statement. Switch statement body is always indented one level more than
+ /// case labels (except the first block following the case label, which
+ /// itself indents the code - unless IndentCaseBlocks is enabled).
+ /// \code
+ /// false: true:
+ /// switch (fool) { vs. switch (fool) {
+ /// case 1: case 1:
+ /// bar(); bar();
+ /// break; break;
+ /// default: default:
+ /// plop(); plop();
+ /// } }
+ /// \endcode
+ /// \version 3.3
+ bool IndentCaseLabels;
+
/// Indent goto labels.
///
/// When ``false``, goto labels are flushed left.
@@ -2273,44 +2614,11 @@ struct FormatStyle {
/// return 1; return 1;
/// } }
/// \endcode
+ /// \version 10
bool IndentGotoLabels;
- /// Options for indenting preprocessor directives.
- enum PPDirectiveIndentStyle : unsigned char {
- /// Does not indent any directives.
- /// \code
- /// #if FOO
- /// #if BAR
- /// #include <foo>
- /// #endif
- /// #endif
- /// \endcode
- PPDIS_None,
- /// Indents directives after the hash.
- /// \code
- /// #if FOO
- /// # if BAR
- /// # include <foo>
- /// # endif
- /// #endif
- /// \endcode
- PPDIS_AfterHash,
- /// Indents directives before the hash.
- /// \code
- /// #if FOO
- /// #if BAR
- /// #include <foo>
- /// #endif
- /// #endif
- /// \endcode
- PPDIS_BeforeHash
- };
-
- /// The preprocessor directive indenting style to use.
- PPDirectiveIndentStyle IndentPPDirectives;
-
/// Indents extern blocks
- enum IndentExternBlockStyle : unsigned char {
+ enum IndentExternBlockStyle : int8_t {
/// Backwards compatible with AfterExternBlock's indenting.
/// \code
/// IndentExternBlock: AfterExternBlock
@@ -2346,9 +2654,48 @@ struct FormatStyle {
};
/// IndentExternBlockStyle is the type of indenting of extern blocks.
+ /// \version 11
IndentExternBlockStyle IndentExternBlock;
- /// Indent the requires clause in a template
+ /// Options for indenting preprocessor directives.
+ enum PPDirectiveIndentStyle : int8_t {
+ /// Does not indent any directives.
+ /// \code
+ /// #if FOO
+ /// #if BAR
+ /// #include <foo>
+ /// #endif
+ /// #endif
+ /// \endcode
+ PPDIS_None,
+ /// Indents directives after the hash.
+ /// \code
+ /// #if FOO
+ /// # if BAR
+ /// # include <foo>
+ /// # endif
+ /// #endif
+ /// \endcode
+ PPDIS_AfterHash,
+ /// Indents directives before the hash.
+ /// \code
+ /// #if FOO
+ /// #if BAR
+ /// #include <foo>
+ /// #endif
+ /// #endif
+ /// \endcode
+ PPDIS_BeforeHash
+ };
+
+ /// The preprocessor directive indenting style to use.
+ /// \version 6
+ PPDirectiveIndentStyle IndentPPDirectives;
+
+ /// Indent the requires clause in a template. This only applies when
+ /// ``RequiresClausePosition`` is ``OwnLine``, or ``WithFollowing``.
+ ///
+ /// In clang-format 12, 13 and 14 it was named ``IndentRequires``.
/// \code
/// true:
/// template <typename It>
@@ -2364,7 +2711,8 @@ struct FormatStyle {
/// //....
/// }
/// \endcode
- bool IndentRequires;
+ /// \version 15
+ bool IndentRequiresClause;
/// The number of columns to use for indentation.
/// \code
@@ -2377,6 +2725,7 @@ struct FormatStyle {
/// }
/// }
/// \endcode
+ /// \version 3.7
unsigned IndentWidth;
/// Indent if a function definition or declaration is wrapped after the
@@ -2390,8 +2739,154 @@ struct FormatStyle {
/// LoooooooooooooooooooooooooooooooooooooooongReturnType
/// LoooooooooooooooooooooooooooooooongFunctionDeclaration();
/// \endcode
+ /// \version 3.7
bool IndentWrappedFunctionNames;
+ /// Insert braces after control statements (``if``, ``else``, ``for``, ``do``,
+ /// and ``while``) in C++ unless the control statements are inside macro
+ /// definitions or the braces would enclose preprocessor directives.
+ /// \warning
+ /// Setting this option to ``true`` could lead to incorrect code formatting
+ /// due to clang-format's lack of complete semantic information. As such,
+ /// extra care should be taken to review code changes made by this option.
+ /// \endwarning
+ /// \code
+ /// false: true:
+ ///
+ /// if (isa<FunctionDecl>(D)) vs. if (isa<FunctionDecl>(D)) {
+ /// handleFunctionDecl(D); handleFunctionDecl(D);
+ /// else if (isa<VarDecl>(D)) } else if (isa<VarDecl>(D)) {
+ /// handleVarDecl(D); handleVarDecl(D);
+ /// else } else {
+ /// return; return;
+ /// }
+ ///
+ /// while (i--) vs. while (i--) {
+ /// for (auto *A : D.attrs()) for (auto *A : D.attrs()) {
+ /// handleAttr(A); handleAttr(A);
+ /// }
+ /// }
+ ///
+ /// do vs. do {
+ /// --i; --i;
+ /// while (i); } while (i);
+ /// \endcode
+ /// \version 15
+ bool InsertBraces;
+
+ /// Insert a newline at end of file if missing.
+ /// \version 16
+ bool InsertNewlineAtEOF;
+
+ /// The style of inserting trailing commas into container literals.
+ enum TrailingCommaStyle : int8_t {
+ /// Do not insert trailing commas.
+ TCS_None,
+ /// Insert trailing commas in container literals that were wrapped over
+ /// multiple lines. Note that this is conceptually incompatible with
+ /// bin-packing, because the trailing comma is used as an indicator
+ /// that a container should be formatted one-per-line (i.e. not bin-packed).
+ /// So inserting a trailing comma counteracts bin-packing.
+ TCS_Wrapped,
+ };
+
+ /// If set to ``TCS_Wrapped`` will insert trailing commas in container
+ /// literals (arrays and objects) that wrap across multiple lines.
+ /// It is currently only available for JavaScript
+ /// and disabled by default ``TCS_None``.
+ /// ``InsertTrailingCommas`` cannot be used together with ``BinPackArguments``
+ /// as inserting the comma disables bin-packing.
+ /// \code
+ /// TSC_Wrapped:
+ /// const someArray = [
+ /// aaaaaaaaaaaaaaaaaaaaaaaaaa,
+ /// aaaaaaaaaaaaaaaaaaaaaaaaaa,
+ /// aaaaaaaaaaaaaaaaaaaaaaaaaa,
+ /// // ^ inserted
+ /// ]
+ /// \endcode
+ /// \version 11
+ TrailingCommaStyle InsertTrailingCommas;
+
+ /// Separator format of integer literals of different bases.
+ ///
+ /// If negative, remove separators. If ``0``, leave the literal as is. If
+ /// positive, insert separators between digits starting from the rightmost
+ /// digit.
+ ///
+ /// For example, the config below will leave separators in binary literals
+ /// alone, insert separators in decimal literals to separate the digits into
+ /// groups of 3, and remove separators in hexadecimal literals.
+ /// \code
+ /// IntegerLiteralSeparator:
+ /// Binary: 0
+ /// Decimal: 3
+ /// Hex: -1
+ /// \endcode
+ ///
+ /// You can also specify a minimum number of digits (``BinaryMinDigits``,
+ /// ``DecimalMinDigits``, and ``HexMinDigits``) the integer literal must
+ /// have in order for the separators to be inserted.
+ struct IntegerLiteralSeparatorStyle {
+ /// Format separators in binary literals.
+ /// \code{.text}
+ /// /* -1: */ b = 0b100111101101;
+ /// /* 0: */ b = 0b10011'11'0110'1;
+ /// /* 3: */ b = 0b100'111'101'101;
+ /// /* 4: */ b = 0b1001'1110'1101;
+ /// \endcode
+ int8_t Binary;
+ /// Format separators in binary literals with a minimum number of digits.
+ /// \code{.text}
+ /// // Binary: 3
+ /// // BinaryMinDigits: 7
+ /// b1 = 0b101101;
+ /// b2 = 0b1'101'101;
+ /// \endcode
+ int8_t BinaryMinDigits;
+ /// Format separators in decimal literals.
+ /// \code{.text}
+ /// /* -1: */ d = 18446744073709550592ull;
+ /// /* 0: */ d = 184467'440737'0'95505'92ull;
+ /// /* 3: */ d = 18'446'744'073'709'550'592ull;
+ /// \endcode
+ int8_t Decimal;
+ /// Format separators in decimal literals with a minimum number of digits.
+ /// \code{.text}
+ /// // Decimal: 3
+ /// // DecimalMinDigits: 5
+ /// d1 = 2023;
+ /// d2 = 10'000;
+ /// \endcode
+ int8_t DecimalMinDigits;
+ /// Format separators in hexadecimal literals.
+ /// \code{.text}
+ /// /* -1: */ h = 0xDEADBEEFDEADBEEFuz;
+ /// /* 0: */ h = 0xDEAD'BEEF'DE'AD'BEE'Fuz;
+ /// /* 2: */ h = 0xDE'AD'BE'EF'DE'AD'BE'EFuz;
+ /// \endcode
+ int8_t Hex;
+ /// Format separators in hexadecimal literals with a minimum number of
+ /// digits.
+ /// \code{.text}
+ /// // Hex: 2
+ /// // HexMinDigits: 6
+ /// h1 = 0xABCDE;
+ /// h2 = 0xAB'CD'EF;
+ /// \endcode
+ int8_t HexMinDigits;
+ bool operator==(const IntegerLiteralSeparatorStyle &R) const {
+ return Binary == R.Binary && BinaryMinDigits == R.BinaryMinDigits &&
+ Decimal == R.Decimal && DecimalMinDigits == R.DecimalMinDigits &&
+ Hex == R.Hex && HexMinDigits == R.HexMinDigits;
+ }
+ };
+
+ /// Format integer literal separators (``'`` for C++ and ``_`` for C#, Java,
+ /// and JavaScript).
+ /// \version 16
+ IntegerLiteralSeparatorStyle IntegerLiteralSeparator;
+
/// A vector of prefixes ordered by the desired groups for Java imports.
///
/// One group's prefix can be a subset of another - the longest prefix is
@@ -2423,11 +2918,12 @@ struct FormatStyle {
///
/// import org.example.ClassD;
/// \endcode
+ /// \version 8
std::vector<std::string> JavaImportGroups;
/// Quotation styles for JavaScript strings. Does not affect template
/// strings.
- enum JavaScriptQuoteStyle : unsigned char {
+ enum JavaScriptQuoteStyle : int8_t {
/// Leave string quotes as they are.
/// \code{.js}
/// string1 = "foo";
@@ -2449,6 +2945,7 @@ struct FormatStyle {
};
/// The JavaScriptQuoteStyle to use for JavaScript strings.
+ /// \version 3.9
JavaScriptQuoteStyle JavaScriptQuotes;
// clang-format off
@@ -2464,9 +2961,14 @@ struct FormatStyle {
/// false:
/// import {VeryLongImportsAreAnnoying, VeryLongImportsAreAnnoying, VeryLongImportsAreAnnoying,} from "some/module.js"
/// \endcode
+ /// \version 3.9
bool JavaScriptWrapImports;
// clang-format on
+ /// Keep empty lines (up to ``MaxEmptyLinesToKeep``) at end of file.
+ /// \version 17
+ bool KeepEmptyLinesAtEOF;
+
/// If true, the empty line at the start of blocks is kept.
/// \code
/// true: false:
@@ -2475,14 +2977,49 @@ struct FormatStyle {
/// bar(); }
/// }
/// \endcode
+ /// \version 3.7
bool KeepEmptyLinesAtTheStartOfBlocks;
+ /// Indentation logic for lambda bodies.
+ enum LambdaBodyIndentationKind : int8_t {
+ /// Align lambda body relative to the lambda signature. This is the default.
+ /// \code
+ /// someMethod(
+ /// [](SomeReallyLongLambdaSignatureArgument foo) {
+ /// return;
+ /// });
+ /// \endcode
+ LBI_Signature,
+ /// For statements within block scope, align lambda body relative to the
+ /// indentation level of the outer scope the lambda signature resides in.
+ /// \code
+ /// someMethod(
+ /// [](SomeReallyLongLambdaSignatureArgument foo) {
+ /// return;
+ /// });
+ ///
+ /// someMethod(someOtherMethod(
+ /// [](SomeReallyLongLambdaSignatureArgument foo) {
+ /// return;
+ /// }));
+ /// \endcode
+ LBI_OuterScope,
+ };
+
+ /// The indentation style of lambda bodies. ``Signature`` (the default)
+ /// causes the lambda body to be indented one additional level relative to
+ /// the indentation level of the signature. ``OuterScope`` forces the lambda
+ /// body to be indented one additional level relative to the parent scope
+ /// containing the lambda signature.
+ /// \version 13
+ LambdaBodyIndentationKind LambdaBodyIndentation;
+
/// Supported languages.
///
/// When stored in a configuration file, specifies the language, that the
/// configuration targets. When passed to the ``reformat()`` function, enables
/// syntax features specific to the language.
- enum LanguageKind : unsigned char {
+ enum LanguageKind : int8_t {
/// Do not use.
LK_None,
/// Should be used for C, C++.
@@ -2504,46 +3041,41 @@ struct FormatStyle {
LK_TableGen,
/// Should be used for Protocol Buffer messages in text format
/// (https://developers.google.com/protocol-buffers/).
- LK_TextProto
+ LK_TextProto,
+ /// Should be used for Verilog and SystemVerilog.
+ /// https://standards.ieee.org/ieee/1800/6700/
+ /// https://sci-hub.st/10.1109/IEEESTD.2018.8299595
+ LK_Verilog
};
bool isCpp() const { return Language == LK_Cpp || Language == LK_ObjC; }
bool isCSharp() const { return Language == LK_CSharp; }
bool isJson() const { return Language == LK_Json; }
+ bool isJavaScript() const { return Language == LK_JavaScript; }
+ bool isVerilog() const { return Language == LK_Verilog; }
+ bool isProto() const {
+ return Language == LK_Proto || Language == LK_TextProto;
+ }
+ bool isTableGen() const { return Language == LK_TableGen; }
/// Language, this format style is targeted at.
+ /// \version 3.5
LanguageKind Language;
- /// Indentation logic for lambda bodies.
- enum LambdaBodyIndentationKind : unsigned char {
- /// Align lambda body relative to the lambda signature. This is the default.
- /// \code
- /// someMethod(
- /// [](SomeReallyLongLambdaSignatureArgument foo) {
- /// return;
- /// });
- /// \endcode
- LBI_Signature,
- /// Align lambda body relative to the indentation level of the outer scope
- /// the lambda signature resides in.
- /// \code
- /// someMethod(
- /// [](SomeReallyLongLambdaSignatureArgument foo) {
- /// return;
- /// });
- /// \endcode
- LBI_OuterScope,
+ /// Line ending style.
+ enum LineEndingStyle : int8_t {
+ /// Use ``\n``.
+ LE_LF,
+ /// Use ``\r\n``.
+ LE_CRLF,
+ /// Use ``\n`` unless the input has more lines ending in ``\r\n``.
+ LE_DeriveLF,
+ /// Use ``\r\n`` unless the input has more lines ending in ``\n``.
+ LE_DeriveCRLF,
};
- /// The indentation style of lambda bodies. ``Signature`` (the default)
- /// causes the lambda body to be indented one additional level relative to
- /// the indentation level of the signature. ``OuterScope`` forces the lambda
- /// body to be indented one additional level relative to the parent scope
- /// containing the lambda signature. For callback-heavy code, it may improve
- /// readability to have the signature indented two levels and to use
- /// ``OuterScope``. The KJ style guide requires ``OuterScope``.
- /// `KJ style guide
- /// <https://github.com/capnproto/capnproto/blob/master/kjdoc/style-guide.md>`_
- LambdaBodyIndentationKind LambdaBodyIndentation;
+ /// Line ending style (``\n`` or ``\r\n``) to use.
+ /// \version 16
+ LineEndingStyle LineEnding;
/// A regular expression matching macros that start a block.
/// \code
@@ -2571,11 +3103,53 @@ struct FormatStyle {
/// bar();
/// NS_TABLE_FOO_END
/// \endcode
+ /// \version 3.7
std::string MacroBlockBegin;
/// A regular expression matching macros that end a block.
+ /// \version 3.7
std::string MacroBlockEnd;
+ /// A list of macros of the form \c <definition>=<expansion> .
+ ///
+ /// Code will be parsed with macros expanded, in order to determine how to
+ /// interpret and format the macro arguments.
+ ///
+ /// For example, the code:
+ /// \code
+ /// A(a*b);
+ /// \endcode
+ ///
+ /// will usually be interpreted as a call to a function A, and the
+ /// multiplication expression will be formatted as ``a * b``.
+ ///
+ /// If we specify the macro definition:
+ /// \code{.yaml}
+ /// Macros:
+ /// - A(x)=x
+ /// \endcode
+ ///
+ /// the code will now be parsed as a declaration of the variable b of type a*,
+ /// and formatted as ``a* b`` (depending on pointer-binding rules).
+ ///
+ /// Features and restrictions:
+ /// * Both function-like macros and object-like macros are supported.
+ /// * Macro arguments must be used exactly once in the expansion.
+ /// * No recursive expansion; macros referencing other macros will be
+ /// ignored.
+ /// * Overloading by arity is supported: for example, given the macro
+ /// definitions A=x, A()=y, A(a)=a
+ ///
+ /// \code
+ /// A; -> x;
+ /// A(); -> y;
+ /// A(z); -> z;
+ /// A(a, b); // will not be expanded.
+ /// \endcode
+ ///
+ /// \version 17
+ std::vector<std::string> Macros;
+
/// The maximum number of consecutive empty lines to keep.
/// \code
/// MaxEmptyLinesToKeep: 1 vs. MaxEmptyLinesToKeep: 0
@@ -2587,10 +3161,11 @@ struct FormatStyle {
/// return i;
/// }
/// \endcode
+ /// \version 3.7
unsigned MaxEmptyLinesToKeep;
/// Different ways to indent namespace contents.
- enum NamespaceIndentationKind : unsigned char {
+ enum NamespaceIndentationKind : int8_t {
/// Don't indent in namespaces.
/// \code
/// namespace out {
@@ -2624,8 +3199,22 @@ struct FormatStyle {
};
/// The indentation used for namespaces.
+ /// \version 3.7
NamespaceIndentationKind NamespaceIndentation;
+ /// A vector of macros which are used to open namespace blocks.
+ ///
+ /// These are expected to be macros of the form:
+ /// \code
+ /// NAMESPACE(<namespace-name>, ...) {
+ /// <namespace-content>
+ /// }
+ /// \endcode
+ ///
+ /// For example: TESTSUITE
+ /// \version 9
+ std::vector<std::string> NamespaceMacros;
+
/// Controls bin-packing Objective-C protocol conformance list
/// items into as few lines as possible when they go over ``ColumnLimit``.
///
@@ -2656,6 +3245,7 @@ struct FormatStyle {
/// ddddddddddddd> {
/// }
/// \endcode
+ /// \version 7
BinPackStyle ObjCBinPackProtocolList;
/// The number of characters to use for indentation of ObjC blocks.
@@ -2666,12 +3256,9 @@ struct FormatStyle {
/// [self onOperationDone];
/// }];
/// \endcode
+ /// \version 3.7
unsigned ObjCBlockIndentWidth;
- /// Add a space after ``@property`` in Objective-C, i.e. use
- /// ``@property (readonly)`` instead of ``@property(readonly)``.
- bool ObjCSpaceAfterProperty;
-
/// Break parameters list into lines when there is nested block
/// parameters in a function call.
/// \code
@@ -2693,43 +3280,151 @@ struct FormatStyle {
/// }]
/// }
/// \endcode
+ /// \version 11
bool ObjCBreakBeforeNestedBlockParam;
+ /// The order in which ObjC property attributes should appear.
+ ///
+ /// Attributes in code will be sorted in the order specified. Any attributes
+ /// encountered that are not mentioned in this array will be sorted last, in
+ /// stable order. Comments between attributes will leave the attributes
+ /// untouched.
+ /// \warning
+ /// Using this option could lead to incorrect code formatting due to
+ /// clang-format's lack of complete semantic information. As such, extra
+ /// care should be taken to review code changes made by this option.
+ /// \endwarning
+ /// \code{.yaml}
+ /// ObjCPropertyAttributeOrder: [
+ /// class, direct,
+ /// atomic, nonatomic,
+ /// assign, retain, strong, copy, weak, unsafe_unretained,
+ /// readonly, readwrite, getter, setter,
+ /// nullable, nonnull, null_resettable, null_unspecified
+ /// ]
+ /// \endcode
+ /// \version 18
+ std::vector<std::string> ObjCPropertyAttributeOrder;
+
+ /// Add a space after ``@property`` in Objective-C, i.e. use
+ /// ``@property (readonly)`` instead of ``@property(readonly)``.
+ /// \version 3.7
+ bool ObjCSpaceAfterProperty;
+
/// Add a space in front of an Objective-C protocol list, i.e. use
/// ``Foo <Protocol>`` instead of ``Foo<Protocol>``.
+ /// \version 3.7
bool ObjCSpaceBeforeProtocolList;
+ /// Different ways to try to fit all constructor initializers on a line.
+ enum PackConstructorInitializersStyle : int8_t {
+ /// Always put each constructor initializer on its own line.
+ /// \code
+ /// Constructor()
+ /// : a(),
+ /// b()
+ /// \endcode
+ PCIS_Never,
+ /// Bin-pack constructor initializers.
+ /// \code
+ /// Constructor()
+ /// : aaaaaaaaaaaaaaaaaaaa(), bbbbbbbbbbbbbbbbbbbb(),
+ /// cccccccccccccccccccc()
+ /// \endcode
+ PCIS_BinPack,
+ /// Put all constructor initializers on the current line if they fit.
+ /// Otherwise, put each one on its own line.
+ /// \code
+ /// Constructor() : a(), b()
+ ///
+ /// Constructor()
+ /// : aaaaaaaaaaaaaaaaaaaa(),
+ /// bbbbbbbbbbbbbbbbbbbb(),
+ /// ddddddddddddd()
+ /// \endcode
+ PCIS_CurrentLine,
+ /// Same as ``PCIS_CurrentLine`` except that if all constructor initializers
+ /// do not fit on the current line, try to fit them on the next line.
+ /// \code
+ /// Constructor() : a(), b()
+ ///
+ /// Constructor()
+ /// : aaaaaaaaaaaaaaaaaaaa(), bbbbbbbbbbbbbbbbbbbb(), ddddddddddddd()
+ ///
+ /// Constructor()
+ /// : aaaaaaaaaaaaaaaaaaaa(),
+ /// bbbbbbbbbbbbbbbbbbbb(),
+ /// cccccccccccccccccccc()
+ /// \endcode
+ PCIS_NextLine,
+ /// Put all constructor initializers on the next line if they fit.
+ /// Otherwise, put each one on its own line.
+ /// \code
+ /// Constructor()
+ /// : a(), b()
+ ///
+ /// Constructor()
+ /// : aaaaaaaaaaaaaaaaaaaa(), bbbbbbbbbbbbbbbbbbbb(), ddddddddddddd()
+ ///
+ /// Constructor()
+ /// : aaaaaaaaaaaaaaaaaaaa(),
+ /// bbbbbbbbbbbbbbbbbbbb(),
+ /// cccccccccccccccccccc()
+ /// \endcode
+ PCIS_NextLineOnly,
+ };
+
+ /// The pack constructor initializers style to use.
+ /// \version 14
+ PackConstructorInitializersStyle PackConstructorInitializers;
+
/// The penalty for breaking around an assignment operator.
+ /// \version 5
unsigned PenaltyBreakAssignment;
/// The penalty for breaking a function call after ``call(``.
+ /// \version 3.7
unsigned PenaltyBreakBeforeFirstCallParameter;
/// The penalty for each line break introduced inside a comment.
+ /// \version 3.7
unsigned PenaltyBreakComment;
/// The penalty for breaking before the first ``<<``.
+ /// \version 3.7
unsigned PenaltyBreakFirstLessLess;
+ /// The penalty for breaking after ``(``.
+ /// \version 14
+ unsigned PenaltyBreakOpenParenthesis;
+
+ /// The penalty for breaking after ``::``.
+ /// \version 18
+ unsigned PenaltyBreakScopeResolution;
+
/// The penalty for each line break introduced inside a string literal.
+ /// \version 3.7
unsigned PenaltyBreakString;
/// The penalty for breaking after template declaration.
+ /// \version 7
unsigned PenaltyBreakTemplateDeclaration;
/// The penalty for each character outside of the column limit.
+ /// \version 3.7
unsigned PenaltyExcessCharacter;
- /// Penalty for putting the return type of a function onto its own
- /// line.
- unsigned PenaltyReturnTypeOnItsOwnLine;
-
/// Penalty for each character of whitespace indentation
/// (counted relative to leading non-whitespace column).
+ /// \version 12
unsigned PenaltyIndentedWhitespace;
+ /// Penalty for putting the return type of a function onto its own line.
+ /// \version 3.7
+ unsigned PenaltyReturnTypeOnItsOwnLine;
+
/// The ``&``, ``&&`` and ``*`` alignment style.
- enum PointerAlignmentStyle : unsigned char {
+ enum PointerAlignmentStyle : int8_t {
/// Align pointer to the left.
/// \code
/// int* a;
@@ -2748,6 +3443,7 @@ struct FormatStyle {
};
/// Pointer and reference alignment style.
+ /// \version 3.7
PointerAlignmentStyle PointerAlignment;
/// The number of columns to use for indentation of preprocessor statements.
@@ -2762,8 +3458,81 @@ struct FormatStyle {
/// # define BAR
/// #endif
/// \endcode
+ /// \version 13
int PPIndentWidth;
+ /// Different specifiers and qualifiers alignment styles.
+ enum QualifierAlignmentStyle : int8_t {
+ /// Don't change specifiers/qualifiers to either Left or Right alignment
+ /// (default).
+ /// \code
+ /// int const a;
+ /// const int *a;
+ /// \endcode
+ QAS_Leave,
+ /// Change specifiers/qualifiers to be left-aligned.
+ /// \code
+ /// const int a;
+ /// const int *a;
+ /// \endcode
+ QAS_Left,
+ /// Change specifiers/qualifiers to be right-aligned.
+ /// \code
+ /// int const a;
+ /// int const *a;
+ /// \endcode
+ QAS_Right,
+ /// Change specifiers/qualifiers to be aligned based on ``QualifierOrder``.
+ /// With:
+ /// \code{.yaml}
+ /// QualifierOrder: ['inline', 'static', 'type', 'const']
+ /// \endcode
+ ///
+ /// \code
+ ///
+ /// int const a;
+ /// int const *a;
+ /// \endcode
+ QAS_Custom
+ };
+
+ /// Different ways to arrange specifiers and qualifiers (e.g. const/volatile).
+ /// \warning
+ /// Setting ``QualifierAlignment`` to something other than ``Leave``, COULD
+ /// lead to incorrect code formatting due to incorrect decisions made due to
+ /// clang-formats lack of complete semantic information.
+ /// As such extra care should be taken to review code changes made by the use
+ /// of this option.
+ /// \endwarning
+ /// \version 14
+ QualifierAlignmentStyle QualifierAlignment;
+
+ /// The order in which the qualifiers appear.
+ /// Order is an array that can contain any of the following:
+ ///
+ /// * const
+ /// * inline
+ /// * static
+ /// * friend
+ /// * constexpr
+ /// * volatile
+ /// * restrict
+ /// * type
+ ///
+ /// \note
+ /// it MUST contain 'type'.
+ /// \endnote
+ ///
+ /// Items to the left of 'type' will be placed to the left of the type and
+ /// aligned in the order supplied. Items to the right of 'type' will be
+ /// placed to the right of the type and aligned in the order supplied.
+ ///
+ /// \code{.yaml}
+ /// QualifierOrder: ['inline', 'static', 'type', 'const', 'volatile' ]
+ /// \endcode
+ /// \version 14
+ std::vector<std::string> QualifierOrder;
+
/// See documentation of ``RawStringFormats``.
struct RawStringFormat {
/// The language of this raw string.
@@ -2821,10 +3590,11 @@ struct FormatStyle {
/// BasedOnStyle: llvm
/// CanonicalDelimiter: 'cc'
/// \endcode
+ /// \version 6
std::vector<RawStringFormat> RawStringFormats;
/// \brief The ``&`` and ``&&`` alignment style.
- enum ReferenceAlignmentStyle {
+ enum ReferenceAlignmentStyle : int8_t {
/// Align reference like ``PointerAlignment``.
RAS_Pointer,
/// Align reference to the left.
@@ -2846,10 +3616,13 @@ struct FormatStyle {
/// \brief Reference alignment style (overrides ``PointerAlignment`` for
/// references).
+ /// \version 13
ReferenceAlignmentStyle ReferenceAlignment;
// clang-format off
- /// If ``true``, clang-format will attempt to re-flow comments.
+ /// If ``true``, clang-format will attempt to re-flow comments. That is it
+ /// will touch a comment and *reflow* long comments into new lines, trying to
+ /// obey the ``ColumnLimit``.
/// \code
/// false:
/// // veryVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryLongComment with plenty of information
@@ -2861,9 +3634,282 @@ struct FormatStyle {
/// /* second veryVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryLongComment with plenty of
/// * information */
/// \endcode
+ /// \version 3.8
bool ReflowComments;
// clang-format on
+ /// Remove optional braces of control statements (``if``, ``else``, ``for``,
+ /// and ``while``) in C++ according to the LLVM coding style.
+ /// \warning
+ /// This option will be renamed and expanded to support other styles.
+ /// \endwarning
+ /// \warning
+ /// Setting this option to ``true`` could lead to incorrect code formatting
+ /// due to clang-format's lack of complete semantic information. As such,
+ /// extra care should be taken to review code changes made by this option.
+ /// \endwarning
+ /// \code
+ /// false: true:
+ ///
+ /// if (isa<FunctionDecl>(D)) { vs. if (isa<FunctionDecl>(D))
+ /// handleFunctionDecl(D); handleFunctionDecl(D);
+ /// } else if (isa<VarDecl>(D)) { else if (isa<VarDecl>(D))
+ /// handleVarDecl(D); handleVarDecl(D);
+ /// }
+ ///
+ /// if (isa<VarDecl>(D)) { vs. if (isa<VarDecl>(D)) {
+ /// for (auto *A : D.attrs()) { for (auto *A : D.attrs())
+ /// if (shouldProcessAttr(A)) { if (shouldProcessAttr(A))
+ /// handleAttr(A); handleAttr(A);
+ /// } }
+ /// }
+ /// }
+ ///
+ /// if (isa<FunctionDecl>(D)) { vs. if (isa<FunctionDecl>(D))
+ /// for (auto *A : D.attrs()) { for (auto *A : D.attrs())
+ /// handleAttr(A); handleAttr(A);
+ /// }
+ /// }
+ ///
+ /// if (auto *D = (T)(D)) { vs. if (auto *D = (T)(D)) {
+ /// if (shouldProcess(D)) { if (shouldProcess(D))
+ /// handleVarDecl(D); handleVarDecl(D);
+ /// } else { else
+ /// markAsIgnored(D); markAsIgnored(D);
+ /// } }
+ /// }
+ ///
+ /// if (a) { vs. if (a)
+ /// b(); b();
+ /// } else { else if (c)
+ /// if (c) { d();
+ /// d(); else
+ /// } else { e();
+ /// e();
+ /// }
+ /// }
+ /// \endcode
+ /// \version 14
+ bool RemoveBracesLLVM;
+
+ /// Types of redundant parentheses to remove.
+ enum RemoveParenthesesStyle : int8_t {
+ /// Do not remove parentheses.
+ /// \code
+ /// class __declspec((dllimport)) X {};
+ /// co_return (((0)));
+ /// return ((a + b) - ((c + d)));
+ /// \endcode
+ RPS_Leave,
+ /// Replace multiple parentheses with single parentheses.
+ /// \code
+ /// class __declspec(dllimport) X {};
+ /// co_return (0);
+ /// return ((a + b) - (c + d));
+ /// \endcode
+ RPS_MultipleParentheses,
+ /// Also remove parentheses enclosing the expression in a
+ /// ``return``/``co_return`` statement.
+ /// \code
+ /// class __declspec(dllimport) X {};
+ /// co_return 0;
+ /// return (a + b) - (c + d);
+ /// \endcode
+ RPS_ReturnStatement,
+ };
+
+ /// Remove redundant parentheses.
+ /// \warning
+ /// Setting this option to any value other than ``Leave`` could lead to
+ /// incorrect code formatting due to clang-format's lack of complete semantic
+ /// information. As such, extra care should be taken to review code changes
+ /// made by this option.
+ /// \endwarning
+ /// \version 17
+ RemoveParenthesesStyle RemoveParentheses;
+
+ /// Remove semicolons after the closing brace of a non-empty function.
+ /// \warning
+ /// Setting this option to ``true`` could lead to incorrect code formatting
+ /// due to clang-format's lack of complete semantic information. As such,
+ /// extra care should be taken to review code changes made by this option.
+ /// \endwarning
+ /// \code
+ /// false: true:
+ ///
+ /// int max(int a, int b) { int max(int a, int b) {
+ /// return a > b ? a : b; return a > b ? a : b;
+ /// }; }
+ ///
+ /// \endcode
+ /// \version 16
+ bool RemoveSemicolon;
+
+ /// \brief The possible positions for the requires clause. The
+ /// ``IndentRequires`` option is only used if the ``requires`` is put on the
+ /// start of a line.
+ enum RequiresClausePositionStyle : int8_t {
+ /// Always put the ``requires`` clause on its own line.
+ /// \code
+ /// template <typename T>
+ /// requires C<T>
+ /// struct Foo {...
+ ///
+ /// template <typename T>
+ /// requires C<T>
+ /// void bar(T t) {...
+ ///
+ /// template <typename T>
+ /// void baz(T t)
+ /// requires C<T>
+ /// {...
+ /// \endcode
+ RCPS_OwnLine,
+ /// Try to put the clause together with the preceding part of a declaration.
+ /// For class templates: stick to the template declaration.
+ /// For function templates: stick to the template declaration.
+ /// For function declaration followed by a requires clause: stick to the
+ /// parameter list.
+ /// \code
+ /// template <typename T> requires C<T>
+ /// struct Foo {...
+ ///
+ /// template <typename T> requires C<T>
+ /// void bar(T t) {...
+ ///
+ /// template <typename T>
+ /// void baz(T t) requires C<T>
+ /// {...
+ /// \endcode
+ RCPS_WithPreceding,
+ /// Try to put the ``requires`` clause together with the class or function
+ /// declaration.
+ /// \code
+ /// template <typename T>
+ /// requires C<T> struct Foo {...
+ ///
+ /// template <typename T>
+ /// requires C<T> void bar(T t) {...
+ ///
+ /// template <typename T>
+ /// void baz(T t)
+ /// requires C<T> {...
+ /// \endcode
+ RCPS_WithFollowing,
+ /// Try to put everything in the same line if possible. Otherwise normal
+ /// line breaking rules take over.
+ /// \code
+ /// // Fitting:
+ /// template <typename T> requires C<T> struct Foo {...
+ ///
+ /// template <typename T> requires C<T> void bar(T t) {...
+ ///
+ /// template <typename T> void bar(T t) requires C<T> {...
+ ///
+ /// // Not fitting, one possible example:
+ /// template <typename LongName>
+ /// requires C<LongName>
+ /// struct Foo {...
+ ///
+ /// template <typename LongName>
+ /// requires C<LongName>
+ /// void bar(LongName ln) {
+ ///
+ /// template <typename LongName>
+ /// void bar(LongName ln)
+ /// requires C<LongName> {
+ /// \endcode
+ RCPS_SingleLine,
+ };
+
+ /// \brief The position of the ``requires`` clause.
+ /// \version 15
+ RequiresClausePositionStyle RequiresClausePosition;
+
+ /// Indentation logic for requires expression bodies.
+ enum RequiresExpressionIndentationKind : int8_t {
+ /// Align requires expression body relative to the indentation level of the
+ /// outer scope the requires expression resides in.
+ /// This is the default.
+ /// \code
+ /// template <typename T>
+ /// concept C = requires(T t) {
+ /// ...
+ /// }
+ /// \endcode
+ REI_OuterScope,
+ /// Align requires expression body relative to the ``requires`` keyword.
+ /// \code
+ /// template <typename T>
+ /// concept C = requires(T t) {
+ /// ...
+ /// }
+ /// \endcode
+ REI_Keyword,
+ };
+
+ /// The indentation used for requires expression bodies.
+ /// \version 16
+ RequiresExpressionIndentationKind RequiresExpressionIndentation;
+
+ /// \brief The style if definition blocks should be separated.
+ enum SeparateDefinitionStyle : int8_t {
+ /// Leave definition blocks as they are.
+ SDS_Leave,
+ /// Insert an empty line between definition blocks.
+ SDS_Always,
+ /// Remove any empty line between definition blocks.
+ SDS_Never
+ };
+
+ /// Specifies the use of empty lines to separate definition blocks, including
+ /// classes, structs, enums, and functions.
+ /// \code
+ /// Never v.s. Always
+ /// #include <cstring> #include <cstring>
+ /// struct Foo {
+ /// int a, b, c; struct Foo {
+ /// }; int a, b, c;
+ /// namespace Ns { };
+ /// class Bar {
+ /// public: namespace Ns {
+ /// struct Foobar { class Bar {
+ /// int a; public:
+ /// int b; struct Foobar {
+ /// }; int a;
+ /// private: int b;
+ /// int t; };
+ /// int method1() {
+ /// // ... private:
+ /// } int t;
+ /// enum List {
+ /// ITEM1, int method1() {
+ /// ITEM2 // ...
+ /// }; }
+ /// template<typename T>
+ /// int method2(T x) { enum List {
+ /// // ... ITEM1,
+ /// } ITEM2
+ /// int i, j, k; };
+ /// int method3(int par) {
+ /// // ... template<typename T>
+ /// } int method2(T x) {
+ /// }; // ...
+ /// class C {}; }
+ /// }
+ /// int i, j, k;
+ ///
+ /// int method3(int par) {
+ /// // ...
+ /// }
+ /// };
+ ///
+ /// class C {};
+ /// }
+ /// \endcode
+ /// \version 14
+ SeparateDefinitionStyle SeparateDefinitionBlocks;
+
/// The maximal number of unwrapped lines that a short namespace spans.
/// Defaults to 1.
///
@@ -2883,10 +3929,15 @@ struct FormatStyle {
/// int bar; int bar;
/// } // namespace b } // namespace b
/// \endcode
+ /// \version 13
unsigned ShortNamespaceLines;
+ /// Do not format macro definition body.
+ /// \version 18
+ bool SkipMacroDefinitionBody;
+
/// Include sorting options.
- enum SortIncludesOptions : unsigned char {
+ enum SortIncludesOptions : int8_t {
/// Includes are never sorted.
/// \code
/// #include "B/A.h"
@@ -2917,15 +3968,11 @@ struct FormatStyle {
};
/// Controls if and how clang-format will sort ``#includes``.
- /// If ``Never``, includes are never sorted.
- /// If ``CaseInsensitive``, includes are sorted in an ASCIIbetical or case
- /// insensitive fashion.
- /// If ``CaseSensitive``, includes are sorted in an alphabetical or case
- /// sensitive fashion.
+ /// \version 3.8
SortIncludesOptions SortIncludes;
/// Position for Java Static imports.
- enum SortJavaStaticImportOptions : unsigned char {
+ enum SortJavaStaticImportOptions : int8_t {
/// Static imports are placed before non-static imports.
/// \code{.java}
/// import static org.example.function1;
@@ -2945,29 +3992,59 @@ struct FormatStyle {
/// When sorting Java imports, by default static imports are placed before
/// non-static imports. If ``JavaStaticImportAfterImport`` is ``After``,
/// static imports are placed after non-static imports.
+ /// \version 12
SortJavaStaticImportOptions SortJavaStaticImport;
- /// If ``true``, clang-format will sort using declarations.
- ///
- /// The order of using declarations is defined as follows:
- /// Split the strings by "::" and discard any initial empty strings. The last
- /// element of each list is a non-namespace name; all others are namespace
- /// names. Sort the lists of names lexicographically, where the sort order of
- /// individual names is that all non-namespace names come before all namespace
- /// names, and within those groups, names are in case-insensitive
- /// lexicographic order.
- /// \code
- /// false: true:
- /// using std::cout; vs. using std::cin;
- /// using std::cin; using std::cout;
- /// \endcode
- bool SortUsingDeclarations;
+ /// Using declaration sorting options.
+ enum SortUsingDeclarationsOptions : int8_t {
+ /// Using declarations are never sorted.
+ /// \code
+ /// using std::chrono::duration_cast;
+ /// using std::move;
+ /// using boost::regex;
+ /// using boost::regex_constants::icase;
+ /// using std::string;
+ /// \endcode
+ SUD_Never,
+ /// Using declarations are sorted in the order defined as follows:
+ /// Split the strings by "::" and discard any initial empty strings. Sort
+ /// the lists of names lexicographically, and within those groups, names are
+ /// in case-insensitive lexicographic order.
+ /// \code
+ /// using boost::regex;
+ /// using boost::regex_constants::icase;
+ /// using std::chrono::duration_cast;
+ /// using std::move;
+ /// using std::string;
+ /// \endcode
+ SUD_Lexicographic,
+ /// Using declarations are sorted in the order defined as follows:
+ /// Split the strings by "::" and discard any initial empty strings. The
+ /// last element of each list is a non-namespace name; all others are
+ /// namespace names. Sort the lists of names lexicographically, where the
+ /// sort order of individual names is that all non-namespace names come
+ /// before all namespace names, and within those groups, names are in
+ /// case-insensitive lexicographic order.
+ /// \code
+ /// using boost::regex;
+ /// using boost::regex_constants::icase;
+ /// using std::move;
+ /// using std::string;
+ /// using std::chrono::duration_cast;
+ /// \endcode
+ SUD_LexicographicNumeric,
+ };
+
+ /// Controls if and how clang-format will sort using declarations.
+ /// \version 5
+ SortUsingDeclarationsOptions SortUsingDeclarations;
/// If ``true``, a space is inserted after C style casts.
/// \code
/// true: false:
/// (int) i; vs. (int)i;
/// \endcode
+ /// \version 3.5
bool SpaceAfterCStyleCast;
/// If ``true``, a space is inserted after the logical not operator (``!``).
@@ -2975,6 +4052,7 @@ struct FormatStyle {
/// true: false:
/// ! someExpression(); vs. !someExpression();
/// \endcode
+ /// \version 9
bool SpaceAfterLogicalNot;
/// If \c true, a space will be inserted after the 'template' keyword.
@@ -2982,10 +4060,11 @@ struct FormatStyle {
/// true: false:
/// template <int> void foo(); vs. template<int> void foo();
/// \endcode
+ /// \version 4
bool SpaceAfterTemplateKeyword;
/// Different ways to put a space before opening parentheses.
- enum SpaceAroundPointerQualifiersStyle : unsigned char {
+ enum SpaceAroundPointerQualifiersStyle : int8_t {
/// Don't ensure spaces around pointer qualifiers and use PointerAlignment
/// instead.
/// \code
@@ -3014,6 +4093,7 @@ struct FormatStyle {
};
/// Defines in which cases to put a space before or after pointer qualifiers
+ /// \version 12
SpaceAroundPointerQualifiersStyle SpaceAroundPointerQualifiers;
/// If ``false``, spaces will be removed before assignment operators.
@@ -3022,6 +4102,7 @@ struct FormatStyle {
/// int a = 5; vs. int a= 5;
/// a += 42; a+= 42;
/// \endcode
+ /// \version 3.7
bool SpaceBeforeAssignmentOperators;
/// If ``false``, spaces will be removed before case colon.
@@ -3031,6 +4112,7 @@ struct FormatStyle {
/// case 1 : break; case 1: break;
/// } }
/// \endcode
+ /// \version 12
bool SpaceBeforeCaseColon;
/// If ``true``, a space will be inserted before a C++11 braced list
@@ -3042,6 +4124,7 @@ struct FormatStyle {
/// vector<int> { 1, 2, 3 }; vector<int>{ 1, 2, 3 };
/// new int[3] { 1, 2, 3 }; new int[3]{ 1, 2, 3 };
/// \endcode
+ /// \version 7
bool SpaceBeforeCpp11BracedList;
/// If ``false``, spaces will be removed before constructor initializer
@@ -3050,6 +4133,7 @@ struct FormatStyle {
/// true: false:
/// Foo::Foo() : a(a) {} Foo::Foo(): a(a) {}
/// \endcode
+ /// \version 7
bool SpaceBeforeCtorInitializerColon;
/// If ``false``, spaces will be removed before inheritance colon.
@@ -3057,18 +4141,25 @@ struct FormatStyle {
/// true: false:
/// class Foo : Bar {} vs. class Foo: Bar {}
/// \endcode
+ /// \version 7
bool SpaceBeforeInheritanceColon;
+ /// If ``true``, a space will be added before a JSON colon. For other
+ /// languages, e.g. JavaScript, use ``SpacesInContainerLiterals`` instead.
+ /// \code
+ /// true: false:
+ /// { {
+ /// "key" : "value" vs. "key": "value"
+ /// } }
+ /// \endcode
+ /// \version 17
+ bool SpaceBeforeJsonColon;
+
/// Different ways to put a space before opening parentheses.
- enum SpaceBeforeParensOptions : unsigned char {
- /// Never put a space before opening parentheses.
- /// \code
- /// void f() {
- /// if(true) {
- /// f();
- /// }
- /// }
- /// \endcode
+ enum SpaceBeforeParensStyle : int8_t {
+ /// This is **deprecated** and replaced by ``Custom`` below, with all
+ /// ``SpaceBeforeParensOptions`` but ``AfterPlacementOperator`` set to
+ /// ``false``.
SBPO_Never,
/// Put a space before opening parentheses only after control statement
/// keywords (``for/if/while...``).
@@ -3084,7 +4175,7 @@ struct FormatStyle {
/// ForEach and If macros. This is useful in projects where ForEach/If
/// macros are treated as function calls instead of control statements.
/// ``SBPO_ControlStatementsExceptForEachMacros`` remains an alias for
- /// backward compatability.
+ /// backward compatibility.
/// \code
/// void f() {
/// Q_FOREACH(...) {
@@ -3115,11 +4206,151 @@ struct FormatStyle {
/// }
/// }
/// \endcode
- SBPO_Always
+ SBPO_Always,
+ /// Configure each individual space before parentheses in
+ /// ``SpaceBeforeParensOptions``.
+ SBPO_Custom,
};
/// Defines in which cases to put a space before opening parentheses.
- SpaceBeforeParensOptions SpaceBeforeParens;
+ /// \version 3.5
+ SpaceBeforeParensStyle SpaceBeforeParens;
+
+ /// Precise control over the spacing before parentheses.
+ /// \code
+ /// # Should be declared this way:
+ /// SpaceBeforeParens: Custom
+ /// SpaceBeforeParensOptions:
+ /// AfterControlStatements: true
+ /// AfterFunctionDefinitionName: true
+ /// \endcode
+ struct SpaceBeforeParensCustom {
+ /// If ``true``, put space between control statement keywords
+ /// (for/if/while...) and opening parentheses.
+ /// \code
+ /// true: false:
+ /// if (...) {} vs. if(...) {}
+ /// \endcode
+ bool AfterControlStatements;
+ /// If ``true``, put space between foreach macros and opening parentheses.
+ /// \code
+ /// true: false:
+ /// FOREACH (...) vs. FOREACH(...)
+ /// <loop-body> <loop-body>
+ /// \endcode
+ bool AfterForeachMacros;
+ /// If ``true``, put a space between function declaration name and opening
+ /// parentheses.
+ /// \code
+ /// true: false:
+ /// void f (); vs. void f();
+ /// \endcode
+ bool AfterFunctionDeclarationName;
+ /// If ``true``, put a space between function definition name and opening
+ /// parentheses.
+ /// \code
+ /// true: false:
+ /// void f () {} vs. void f() {}
+ /// \endcode
+ bool AfterFunctionDefinitionName;
+ /// If ``true``, put space between if macros and opening parentheses.
+ /// \code
+ /// true: false:
+ /// IF (...) vs. IF(...)
+ /// <conditional-body> <conditional-body>
+ /// \endcode
+ bool AfterIfMacros;
+ /// If ``true``, put a space between operator overloading and opening
+ /// parentheses.
+ /// \code
+ /// true: false:
+ /// void operator++ (int a); vs. void operator++(int a);
+ /// object.operator++ (10); object.operator++(10);
+ /// \endcode
+ bool AfterOverloadedOperator;
+ /// If ``true``, put a space between operator ``new``/``delete`` and opening
+ /// parenthesis.
+ /// \code
+ /// true: false:
+ /// new (buf) T; vs. new(buf) T;
+ /// delete (buf) T; delete(buf) T;
+ /// \endcode
+ bool AfterPlacementOperator;
+ /// If ``true``, put space between requires keyword in a requires clause and
+ /// opening parentheses, if there is one.
+ /// \code
+ /// true: false:
+ /// template<typename T> vs. template<typename T>
+ /// requires (A<T> && B<T>) requires(A<T> && B<T>)
+ /// ... ...
+ /// \endcode
+ bool AfterRequiresInClause;
+ /// If ``true``, put space between requires keyword in a requires expression
+ /// and opening parentheses.
+ /// \code
+ /// true: false:
+ /// template<typename T> vs. template<typename T>
+ /// concept C = requires (T t) { concept C = requires(T t) {
+ /// ... ...
+ /// } }
+ /// \endcode
+ bool AfterRequiresInExpression;
+ /// If ``true``, put a space before opening parentheses only if the
+ /// parentheses are not empty.
+ /// \code
+ /// true: false:
+ /// void f (int a); vs. void f();
+ /// f (a); f();
+ /// \endcode
+ bool BeforeNonEmptyParentheses;
+
+ SpaceBeforeParensCustom()
+ : AfterControlStatements(false), AfterForeachMacros(false),
+ AfterFunctionDeclarationName(false),
+ AfterFunctionDefinitionName(false), AfterIfMacros(false),
+ AfterOverloadedOperator(false), AfterPlacementOperator(true),
+ AfterRequiresInClause(false), AfterRequiresInExpression(false),
+ BeforeNonEmptyParentheses(false) {}
+
+ bool operator==(const SpaceBeforeParensCustom &Other) const {
+ return AfterControlStatements == Other.AfterControlStatements &&
+ AfterForeachMacros == Other.AfterForeachMacros &&
+ AfterFunctionDeclarationName ==
+ Other.AfterFunctionDeclarationName &&
+ AfterFunctionDefinitionName == Other.AfterFunctionDefinitionName &&
+ AfterIfMacros == Other.AfterIfMacros &&
+ AfterOverloadedOperator == Other.AfterOverloadedOperator &&
+ AfterPlacementOperator == Other.AfterPlacementOperator &&
+ AfterRequiresInClause == Other.AfterRequiresInClause &&
+ AfterRequiresInExpression == Other.AfterRequiresInExpression &&
+ BeforeNonEmptyParentheses == Other.BeforeNonEmptyParentheses;
+ }
+ };
+
+ /// Control of individual space before parentheses.
+ ///
+ /// If ``SpaceBeforeParens`` is set to ``Custom``, use this to specify
+ /// how each individual space before parentheses case should be handled.
+ /// Otherwise, this is ignored.
+ /// \code{.yaml}
+ /// # Example of usage:
+ /// SpaceBeforeParens: Custom
+ /// SpaceBeforeParensOptions:
+ /// AfterControlStatements: true
+ /// AfterFunctionDefinitionName: true
+ /// \endcode
+ /// \version 14
+ SpaceBeforeParensCustom SpaceBeforeParensOptions;
+
+ /// If ``true``, spaces will be before ``[``.
+ /// Lambdas will not be affected. Only the first ``[`` will get a space added.
+ /// \code
+ /// true: false:
+ /// int a [5]; vs. int a[5];
+ /// int a [5][5]; vs. int a[5][5];
+ /// \endcode
+ /// \version 10
+ bool SpaceBeforeSquareBrackets;
/// If ``false``, spaces will be removed before range-based for loop
/// colon.
@@ -3127,6 +4358,7 @@ struct FormatStyle {
/// true: false:
/// for (auto v : values) {} vs. for(auto v: values) {}
/// \endcode
+ /// \version 7
bool SpaceBeforeRangeBasedForLoopColon;
/// If ``true``, spaces will be inserted into ``{}``.
@@ -3135,26 +4367,24 @@ struct FormatStyle {
/// void f() { } vs. void f() {}
/// while (true) { } while (true) {}
/// \endcode
+ /// \version 10
bool SpaceInEmptyBlock;
/// If ``true``, spaces may be inserted into ``()``.
- /// \code
- /// true: false:
- /// void f( ) { vs. void f() {
- /// int x[] = {foo( ), bar( )}; int x[] = {foo(), bar()};
- /// if (true) { if (true) {
- /// f( ); f();
- /// } }
- /// } }
- /// \endcode
- bool SpaceInEmptyParentheses;
+ /// This option is **deprecated**. See ``InEmptyParentheses`` of
+ /// ``SpacesInParensOptions``.
+ /// \version 3.7
+ // bool SpaceInEmptyParentheses;
/// The number of spaces before trailing line comments
/// (``//`` - comments).
///
- /// This does not affect trailing block comments (``/*`` - comments) as
- /// those commonly have different usage patterns and a number of special
- /// cases.
+ /// This does not affect trailing block comments (``/*`` - comments) as those
+ /// commonly have different usage patterns and a number of special cases. In
+ /// the case of Verilog, it doesn't affect a comment right after the opening
+ /// parenthesis in the port or parameter list in a module header, because it
+ /// is probably for the port on the following line instead of the parenthesis
+ /// it follows.
/// \code
/// SpacesBeforeTrailingComments: 3
/// void f() {
@@ -3163,11 +4393,12 @@ struct FormatStyle {
/// } // foo
/// }
/// \endcode
+ /// \version 3.7
unsigned SpacesBeforeTrailingComments;
- /// Styles for adding spacing after ``<`` and before ``>`
+ /// Styles for adding spacing after ``<`` and before ``>``
/// in template argument lists.
- enum SpacesInAnglesStyle : unsigned char {
+ enum SpacesInAnglesStyle : int8_t {
/// Remove spaces after ``<`` and before ``>``.
/// \code
/// static_cast<int>(arg);
@@ -3185,34 +4416,34 @@ struct FormatStyle {
SIAS_Leave
};
/// The SpacesInAnglesStyle to use for template argument lists.
+ /// \version 3.4
SpacesInAnglesStyle SpacesInAngles;
/// If ``true``, spaces will be inserted around if/for/switch/while
/// conditions.
- /// \code
- /// true: false:
- /// if ( a ) { ... } vs. if (a) { ... }
- /// while ( i < 5 ) { ... } while (i < 5) { ... }
- /// \endcode
- bool SpacesInConditionalStatement;
-
- /// If ``true``, spaces are inserted inside container literals (e.g.
- /// ObjC and Javascript array and dict literals).
+ /// This option is **deprecated**. See ``InConditionalStatements`` of
+ /// ``SpacesInParensOptions``.
+ /// \version 10
+ // bool SpacesInConditionalStatement;
+
+ /// If ``true``, spaces are inserted inside container literals (e.g. ObjC and
+ /// Javascript array and dict literals). For JSON, use
+ /// ``SpaceBeforeJsonColon`` instead.
/// \code{.js}
/// true: false:
/// var arr = [ 1, 2, 3 ]; vs. var arr = [1, 2, 3];
/// f({a : 1, b : 2, c : 3}); f({a: 1, b: 2, c: 3});
/// \endcode
+ /// \version 3.7
bool SpacesInContainerLiterals;
/// If ``true``, spaces may be inserted into C style casts.
- /// \code
- /// true: false:
- /// x = ( int32 )y vs. x = (int32)y
- /// \endcode
- bool SpacesInCStyleCastParentheses;
+ /// This option is **deprecated**. See ``InCStyleCasts`` of
+ /// ``SpacesInParensOptions``.
+ /// \version 3.7
+ // bool SpacesInCStyleCastParentheses;
- /// Control of spaces within a single line comment
+ /// Control of spaces within a single line comment.
struct SpacesInLineComment {
/// The minimum number of spaces at the start of the comment.
unsigned Minimum;
@@ -3223,38 +4454,141 @@ struct FormatStyle {
/// How many spaces are allowed at the start of a line comment. To disable the
/// maximum set it to ``-1``, apart from that the maximum takes precedence
/// over the minimum.
- /// \code Minimum = 1 Maximum = -1
- /// // One space is forced
+ /// \code
+ /// Minimum = 1
+ /// Maximum = -1
+ /// // One space is forced
///
- /// // but more spaces are possible
+ /// // but more spaces are possible
///
- /// Minimum = 0
- /// Maximum = 0
- /// //Forces to start every comment directly after the slashes
+ /// Minimum = 0
+ /// Maximum = 0
+ /// //Forces to start every comment directly after the slashes
/// \endcode
///
/// Note that in line comment sections the relative indent of the subsequent
/// lines is kept, that means the following:
/// \code
- /// before: after:
- /// Minimum: 1
- /// //if (b) { // if (b) {
- /// // return true; // return true;
- /// //} // }
- ///
- /// Maximum: 0
- /// /// List: ///List:
- /// /// - Foo /// - Foo
- /// /// - Bar /// - Bar
+ /// before: after:
+ /// Minimum: 1
+ /// //if (b) { // if (b) {
+ /// // return true; // return true;
+ /// //} // }
+ ///
+ /// Maximum: 0
+ /// /// List: ///List:
+ /// /// - Foo /// - Foo
+ /// /// - Bar /// - Bar
/// \endcode
+ ///
+ /// This option has only effect if ``ReflowComments`` is set to ``true``.
+ /// \version 13
SpacesInLineComment SpacesInLineCommentPrefix;
+ /// Different ways to put a space before opening and closing parentheses.
+ enum SpacesInParensStyle : int8_t {
+ /// Never put a space in parentheses.
+ /// \code
+ /// void f() {
+ /// if(true) {
+ /// f();
+ /// }
+ /// }
+ /// \endcode
+ SIPO_Never,
+ /// Configure each individual space in parentheses in
+ /// `SpacesInParensOptions`.
+ SIPO_Custom,
+ };
+
/// If ``true``, spaces will be inserted after ``(`` and before ``)``.
+ /// This option is **deprecated**. The previous behavior is preserved by using
+ /// ``SpacesInParens`` with ``Custom`` and by setting all
+ /// ``SpacesInParensOptions`` to ``true`` except for ``InCStyleCasts`` and
+ /// ``InEmptyParentheses``.
+ /// \version 3.7
+ // bool SpacesInParentheses;
+
+ /// Defines in which cases spaces will be inserted after ``(`` and before
+ /// ``)``.
+ /// \version 17
+ SpacesInParensStyle SpacesInParens;
+
+ /// Precise control over the spacing in parentheses.
/// \code
- /// true: false:
- /// t f( Deleted & ) & = delete; vs. t f(Deleted &) & = delete;
+ /// # Should be declared this way:
+ /// SpacesInParens: Custom
+ /// SpacesInParensOptions:
+ /// InConditionalStatements: true
+ /// Other: true
/// \endcode
- bool SpacesInParentheses;
+ struct SpacesInParensCustom {
+ /// Put a space in parentheses only inside conditional statements
+ /// (``for/if/while/switch...``).
+ /// \code
+ /// true: false:
+ /// if ( a ) { ... } vs. if (a) { ... }
+ /// while ( i < 5 ) { ... } while (i < 5) { ... }
+ /// \endcode
+ bool InConditionalStatements;
+ /// Put a space in C style casts.
+ /// \code
+ /// true: false:
+ /// x = ( int32 )y vs. x = (int32)y
+ /// \endcode
+ bool InCStyleCasts;
+ /// Put a space in parentheses only if the parentheses are empty i.e. '()'
+ /// \code
+ /// true: false:
+ /// void f( ) { vs. void f() {
+ /// int x[] = {foo( ), bar( )}; int x[] = {foo(), bar()};
+ /// if (true) { if (true) {
+ /// f( ); f();
+ /// } }
+ /// } }
+ /// \endcode
+ bool InEmptyParentheses;
+ /// Put a space in parentheses not covered by preceding options.
+ /// \code
+ /// true: false:
+ /// t f( Deleted & ) & = delete; vs. t f(Deleted &) & = delete;
+ /// \endcode
+ bool Other;
+
+ SpacesInParensCustom()
+ : InConditionalStatements(false), InCStyleCasts(false),
+ InEmptyParentheses(false), Other(false) {}
+
+ SpacesInParensCustom(bool InConditionalStatements, bool InCStyleCasts,
+ bool InEmptyParentheses, bool Other)
+ : InConditionalStatements(InConditionalStatements),
+ InCStyleCasts(InCStyleCasts), InEmptyParentheses(InEmptyParentheses),
+ Other(Other) {}
+
+ bool operator==(const SpacesInParensCustom &R) const {
+ return InConditionalStatements == R.InConditionalStatements &&
+ InCStyleCasts == R.InCStyleCasts &&
+ InEmptyParentheses == R.InEmptyParentheses && Other == R.Other;
+ }
+ bool operator!=(const SpacesInParensCustom &R) const {
+ return !(*this == R);
+ }
+ };
+
+ /// Control of individual spaces in parentheses.
+ ///
+ /// If ``SpacesInParens`` is set to ``Custom``, use this to specify
+ /// how each individual space in parentheses case should be handled.
+ /// Otherwise, this is ignored.
+ /// \code{.yaml}
+ /// # Example of usage:
+ /// SpacesInParens: Custom
+ /// SpacesInParensOptions:
+ /// InConditionalStatements: true
+ /// InEmptyParentheses: true
+ /// \endcode
+ /// \version 17
+ SpacesInParensCustom SpacesInParensOptions;
/// If ``true``, spaces will be inserted after ``[`` and before ``]``.
/// Lambdas without arguments or unspecified size array declarations will not
@@ -3264,45 +4598,9 @@ struct FormatStyle {
/// int a[ 5 ]; vs. int a[5];
/// std::unique_ptr<int[]> foo() {} // Won't be affected
/// \endcode
+ /// \version 3.7
bool SpacesInSquareBrackets;
- /// If ``true``, spaces will be before ``[``.
- /// Lambdas will not be affected. Only the first ``[`` will get a space added.
- /// \code
- /// true: false:
- /// int a [5]; vs. int a[5];
- /// int a [5][5]; vs. int a[5][5];
- /// \endcode
- bool SpaceBeforeSquareBrackets;
-
- /// Styles for adding spacing around ``:`` in bitfield definitions.
- enum BitFieldColonSpacingStyle : unsigned char {
- /// Add one space on each side of the ``:``
- /// \code
- /// unsigned bf : 2;
- /// \endcode
- BFCS_Both,
- /// Add no space around the ``:`` (except when needed for
- /// ``AlignConsecutiveBitFields``).
- /// \code
- /// unsigned bf:2;
- /// \endcode
- BFCS_None,
- /// Add space before the ``:`` only
- /// \code
- /// unsigned bf :2;
- /// \endcode
- BFCS_Before,
- /// Add space after the ``:`` only (space may be added before if
- /// needed for ``AlignConsecutiveBitFields``).
- /// \code
- /// unsigned bf: 2;
- /// \endcode
- BFCS_After
- };
- /// The BitFieldColonSpacingStyle to use for bitfields.
- BitFieldColonSpacingStyle BitFieldColonSpacing;
-
/// Supported language standards for parsing and formatting C++ constructs.
/// \code
/// Latest: vector<set<int>>
@@ -3311,7 +4609,7 @@ struct FormatStyle {
///
/// The correct way to spell a specific language version is e.g. ``c++11``.
/// The historical aliases ``Cpp03`` and ``Cpp11`` are deprecated.
- enum LanguageStandard : unsigned char {
+ enum LanguageStandard : int8_t {
/// Parse and format as C++03.
/// ``Cpp03`` is a deprecated alias for ``c++03``
LS_Cpp03, // c++03
@@ -3335,6 +4633,7 @@ struct FormatStyle {
/// c++03: latest:
/// vector<set<int> > x; vs. vector<set<int>> x;
/// \endcode
+ /// \version 3.7
LanguageStandard Standard;
/// Macros which are ignored in front of a statement, as if they were an
@@ -3351,13 +4650,57 @@ struct FormatStyle {
/// unsigned char data = 'x';
/// emit signal(data); // Now it's fine again.
/// \endcode
+ /// \version 12
std::vector<std::string> StatementAttributeLikeMacros;
+ /// A vector of macros that should be interpreted as complete
+ /// statements.
+ ///
+ /// Typical macros are expressions, and require a semi-colon to be
+ /// added; sometimes this is not the case, and this allows to make
+ /// clang-format aware of such cases.
+ ///
+ /// For example: Q_UNUSED
+ /// \version 8
+ std::vector<std::string> StatementMacros;
+
/// The number of columns used for tab stops.
+ /// \version 3.7
unsigned TabWidth;
+ /// A vector of non-keyword identifiers that should be interpreted as type
+ /// names.
+ ///
+ /// A ``*``, ``&``, or ``&&`` between a type name and another non-keyword
+ /// identifier is annotated as a pointer or reference token instead of a
+ /// binary operator.
+ ///
+ /// \version 17
+ std::vector<std::string> TypeNames;
+
+ /// \brief A vector of macros that should be interpreted as type declarations
+ /// instead of as function calls.
+ ///
+ /// These are expected to be macros of the form:
+ /// \code
+ /// STACK_OF(...)
+ /// \endcode
+ ///
+ /// In the .clang-format configuration file, this can be configured like:
+ /// \code{.yaml}
+ /// TypenameMacros: ['STACK_OF', 'LIST']
+ /// \endcode
+ ///
+ /// For example: OpenSSL STACK_OF, BSD LIST_ENTRY.
+ /// \version 9
+ std::vector<std::string> TypenameMacros;
+
+ /// This option is **deprecated**. See ``LF`` and ``CRLF`` of ``LineEnding``.
+ /// \version 10
+ // bool UseCRLF;
+
/// Different ways to use tab in formatting.
- enum UseTabStyle : unsigned char {
+ enum UseTabStyle : int8_t {
/// Never use tab.
UT_Never,
/// Use tabs only for indentation.
@@ -3373,13 +4716,41 @@ struct FormatStyle {
UT_Always
};
- /// \brief Use ``\r\n`` instead of ``\n`` for line breaks.
- /// Also used as fallback if ``DeriveLineEnding`` is true.
- bool UseCRLF;
-
/// The way to use tab characters in the resulting file.
+ /// \version 3.7
UseTabStyle UseTab;
+ /// For Verilog, put each port on its own line in module instantiations.
+ /// \code
+ /// true:
+ /// ffnand ff1(.q(),
+ /// .qbar(out1),
+ /// .clear(in1),
+ /// .preset(in2));
+ ///
+ /// false:
+ /// ffnand ff1(.q(), .qbar(out1), .clear(in1), .preset(in2));
+ /// \endcode
+ /// \version 17
+ bool VerilogBreakBetweenInstancePorts;
+
+ /// A vector of macros which are whitespace-sensitive and should not
+ /// be touched.
+ ///
+ /// These are expected to be macros of the form:
+ /// \code
+ /// STRINGIZE(...)
+ /// \endcode
+ ///
+ /// In the .clang-format configuration file, this can be configured like:
+ /// \code{.yaml}
+ /// WhitespaceSensitiveMacros: ['STRINGIZE', 'PP_STRINGIZE']
+ /// \endcode
+ ///
+ /// For example: BOOST_PP_STRINGIZE
+ /// \version 11
+ std::vector<std::string> WhitespaceSensitiveMacros;
+
bool operator==(const FormatStyle &R) const {
return AccessModifierOffset == R.AccessModifierOffset &&
AlignAfterOpenBracket == R.AlignAfterOpenBracket &&
@@ -3388,18 +4759,22 @@ struct FormatStyle {
AlignConsecutiveBitFields == R.AlignConsecutiveBitFields &&
AlignConsecutiveDeclarations == R.AlignConsecutiveDeclarations &&
AlignConsecutiveMacros == R.AlignConsecutiveMacros &&
+ AlignConsecutiveShortCaseStatements ==
+ R.AlignConsecutiveShortCaseStatements &&
AlignEscapedNewlines == R.AlignEscapedNewlines &&
AlignOperands == R.AlignOperands &&
AlignTrailingComments == R.AlignTrailingComments &&
AllowAllArgumentsOnNextLine == R.AllowAllArgumentsOnNextLine &&
- AllowAllConstructorInitializersOnNextLine ==
- R.AllowAllConstructorInitializersOnNextLine &&
AllowAllParametersOfDeclarationOnNextLine ==
R.AllowAllParametersOfDeclarationOnNextLine &&
- AllowShortEnumsOnASingleLine == R.AllowShortEnumsOnASingleLine &&
+ AllowBreakBeforeNoexceptSpecifier ==
+ R.AllowBreakBeforeNoexceptSpecifier &&
AllowShortBlocksOnASingleLine == R.AllowShortBlocksOnASingleLine &&
AllowShortCaseLabelsOnASingleLine ==
R.AllowShortCaseLabelsOnASingleLine &&
+ AllowShortCompoundRequirementOnASingleLine ==
+ R.AllowShortCompoundRequirementOnASingleLine &&
+ AllowShortEnumsOnASingleLine == R.AllowShortEnumsOnASingleLine &&
AllowShortFunctionsOnASingleLine ==
R.AllowShortFunctionsOnASingleLine &&
AllowShortIfStatementsOnASingleLine ==
@@ -3414,23 +4789,26 @@ struct FormatStyle {
AttributeMacros == R.AttributeMacros &&
BinPackArguments == R.BinPackArguments &&
BinPackParameters == R.BinPackParameters &&
+ BitFieldColonSpacing == R.BitFieldColonSpacing &&
+ BracedInitializerIndentWidth == R.BracedInitializerIndentWidth &&
+ BreakAdjacentStringLiterals == R.BreakAdjacentStringLiterals &&
+ BreakAfterAttributes == R.BreakAfterAttributes &&
+ BreakAfterJavaFieldAnnotations == R.BreakAfterJavaFieldAnnotations &&
+ BreakArrays == R.BreakArrays &&
BreakBeforeBinaryOperators == R.BreakBeforeBinaryOperators &&
BreakBeforeBraces == R.BreakBeforeBraces &&
BreakBeforeConceptDeclarations == R.BreakBeforeConceptDeclarations &&
+ BreakBeforeInlineASMColon == R.BreakBeforeInlineASMColon &&
BreakBeforeTernaryOperators == R.BreakBeforeTernaryOperators &&
BreakConstructorInitializers == R.BreakConstructorInitializers &&
- CompactNamespaces == R.CompactNamespaces &&
- BreakAfterJavaFieldAnnotations == R.BreakAfterJavaFieldAnnotations &&
+ BreakInheritanceList == R.BreakInheritanceList &&
BreakStringLiterals == R.BreakStringLiterals &&
ColumnLimit == R.ColumnLimit && CommentPragmas == R.CommentPragmas &&
- BreakInheritanceList == R.BreakInheritanceList &&
- ConstructorInitializerAllOnOneLineOrOnePerLine ==
- R.ConstructorInitializerAllOnOneLineOrOnePerLine &&
+ CompactNamespaces == R.CompactNamespaces &&
ConstructorInitializerIndentWidth ==
R.ConstructorInitializerIndentWidth &&
ContinuationIndentWidth == R.ContinuationIndentWidth &&
Cpp11BracedListStyle == R.Cpp11BracedListStyle &&
- DeriveLineEnding == R.DeriveLineEnding &&
DerivePointerAlignment == R.DerivePointerAlignment &&
DisableFormat == R.DisableFormat &&
EmptyLineAfterAccessModifier == R.EmptyLineAfterAccessModifier &&
@@ -3446,22 +4824,27 @@ struct FormatStyle {
IncludeStyle.IncludeIsMainSourceRegex ==
R.IncludeStyle.IncludeIsMainSourceRegex &&
IndentAccessModifiers == R.IndentAccessModifiers &&
- IndentCaseLabels == R.IndentCaseLabels &&
IndentCaseBlocks == R.IndentCaseBlocks &&
+ IndentCaseLabels == R.IndentCaseLabels &&
+ IndentExternBlock == R.IndentExternBlock &&
IndentGotoLabels == R.IndentGotoLabels &&
IndentPPDirectives == R.IndentPPDirectives &&
- IndentExternBlock == R.IndentExternBlock &&
- IndentRequires == R.IndentRequires && IndentWidth == R.IndentWidth &&
- Language == R.Language &&
+ IndentRequiresClause == R.IndentRequiresClause &&
+ IndentWidth == R.IndentWidth &&
IndentWrappedFunctionNames == R.IndentWrappedFunctionNames &&
+ InsertBraces == R.InsertBraces &&
+ InsertNewlineAtEOF == R.InsertNewlineAtEOF &&
+ IntegerLiteralSeparator == R.IntegerLiteralSeparator &&
JavaImportGroups == R.JavaImportGroups &&
JavaScriptQuotes == R.JavaScriptQuotes &&
JavaScriptWrapImports == R.JavaScriptWrapImports &&
+ KeepEmptyLinesAtEOF == R.KeepEmptyLinesAtEOF &&
KeepEmptyLinesAtTheStartOfBlocks ==
R.KeepEmptyLinesAtTheStartOfBlocks &&
+ Language == R.Language &&
LambdaBodyIndentation == R.LambdaBodyIndentation &&
- MacroBlockBegin == R.MacroBlockBegin &&
- MacroBlockEnd == R.MacroBlockEnd &&
+ LineEnding == R.LineEnding && MacroBlockBegin == R.MacroBlockBegin &&
+ MacroBlockEnd == R.MacroBlockEnd && Macros == R.Macros &&
MaxEmptyLinesToKeep == R.MaxEmptyLinesToKeep &&
NamespaceIndentation == R.NamespaceIndentation &&
NamespaceMacros == R.NamespaceMacros &&
@@ -3469,22 +4852,35 @@ struct FormatStyle {
ObjCBlockIndentWidth == R.ObjCBlockIndentWidth &&
ObjCBreakBeforeNestedBlockParam ==
R.ObjCBreakBeforeNestedBlockParam &&
+ ObjCPropertyAttributeOrder == R.ObjCPropertyAttributeOrder &&
ObjCSpaceAfterProperty == R.ObjCSpaceAfterProperty &&
ObjCSpaceBeforeProtocolList == R.ObjCSpaceBeforeProtocolList &&
+ PackConstructorInitializers == R.PackConstructorInitializers &&
PenaltyBreakAssignment == R.PenaltyBreakAssignment &&
PenaltyBreakBeforeFirstCallParameter ==
R.PenaltyBreakBeforeFirstCallParameter &&
PenaltyBreakComment == R.PenaltyBreakComment &&
PenaltyBreakFirstLessLess == R.PenaltyBreakFirstLessLess &&
+ PenaltyBreakOpenParenthesis == R.PenaltyBreakOpenParenthesis &&
+ PenaltyBreakScopeResolution == R.PenaltyBreakScopeResolution &&
PenaltyBreakString == R.PenaltyBreakString &&
- PenaltyExcessCharacter == R.PenaltyExcessCharacter &&
- PenaltyReturnTypeOnItsOwnLine == R.PenaltyReturnTypeOnItsOwnLine &&
PenaltyBreakTemplateDeclaration ==
R.PenaltyBreakTemplateDeclaration &&
+ PenaltyExcessCharacter == R.PenaltyExcessCharacter &&
+ PenaltyReturnTypeOnItsOwnLine == R.PenaltyReturnTypeOnItsOwnLine &&
PointerAlignment == R.PointerAlignment &&
+ QualifierAlignment == R.QualifierAlignment &&
+ QualifierOrder == R.QualifierOrder &&
RawStringFormats == R.RawStringFormats &&
ReferenceAlignment == R.ReferenceAlignment &&
+ RemoveBracesLLVM == R.RemoveBracesLLVM &&
+ RemoveParentheses == R.RemoveParentheses &&
+ RemoveSemicolon == R.RemoveSemicolon &&
+ RequiresClausePosition == R.RequiresClausePosition &&
+ RequiresExpressionIndentation == R.RequiresExpressionIndentation &&
+ SeparateDefinitionBlocks == R.SeparateDefinitionBlocks &&
ShortNamespaceLines == R.ShortNamespaceLines &&
+ SkipMacroDefinitionBody == R.SkipMacroDefinitionBody &&
SortIncludes == R.SortIncludes &&
SortJavaStaticImport == R.SortJavaStaticImport &&
SpaceAfterCStyleCast == R.SpaceAfterCStyleCast &&
@@ -3496,33 +4892,35 @@ struct FormatStyle {
SpaceBeforeCtorInitializerColon ==
R.SpaceBeforeCtorInitializerColon &&
SpaceBeforeInheritanceColon == R.SpaceBeforeInheritanceColon &&
+ SpaceBeforeJsonColon == R.SpaceBeforeJsonColon &&
SpaceBeforeParens == R.SpaceBeforeParens &&
+ SpaceBeforeParensOptions == R.SpaceBeforeParensOptions &&
SpaceAroundPointerQualifiers == R.SpaceAroundPointerQualifiers &&
SpaceBeforeRangeBasedForLoopColon ==
R.SpaceBeforeRangeBasedForLoopColon &&
+ SpaceBeforeSquareBrackets == R.SpaceBeforeSquareBrackets &&
SpaceInEmptyBlock == R.SpaceInEmptyBlock &&
- SpaceInEmptyParentheses == R.SpaceInEmptyParentheses &&
SpacesBeforeTrailingComments == R.SpacesBeforeTrailingComments &&
SpacesInAngles == R.SpacesInAngles &&
- SpacesInConditionalStatement == R.SpacesInConditionalStatement &&
SpacesInContainerLiterals == R.SpacesInContainerLiterals &&
- SpacesInCStyleCastParentheses == R.SpacesInCStyleCastParentheses &&
SpacesInLineCommentPrefix.Minimum ==
R.SpacesInLineCommentPrefix.Minimum &&
SpacesInLineCommentPrefix.Maximum ==
R.SpacesInLineCommentPrefix.Maximum &&
- SpacesInParentheses == R.SpacesInParentheses &&
+ SpacesInParens == R.SpacesInParens &&
+ SpacesInParensOptions == R.SpacesInParensOptions &&
SpacesInSquareBrackets == R.SpacesInSquareBrackets &&
- SpaceBeforeSquareBrackets == R.SpaceBeforeSquareBrackets &&
- BitFieldColonSpacing == R.BitFieldColonSpacing &&
Standard == R.Standard &&
StatementAttributeLikeMacros == R.StatementAttributeLikeMacros &&
StatementMacros == R.StatementMacros && TabWidth == R.TabWidth &&
- UseTab == R.UseTab && UseCRLF == R.UseCRLF &&
- TypenameMacros == R.TypenameMacros;
+ TypeNames == R.TypeNames && TypenameMacros == R.TypenameMacros &&
+ UseTab == R.UseTab &&
+ VerilogBreakBetweenInstancePorts ==
+ R.VerilogBreakBetweenInstancePorts &&
+ WhitespaceSensitiveMacros == R.WhitespaceSensitiveMacros;
}
- llvm::Optional<FormatStyle> GetLanguageStyle(LanguageKind Language) const;
+ std::optional<FormatStyle> GetLanguageStyle(LanguageKind Language) const;
// Stores per-language styles. A FormatStyle instance inside has an empty
// StyleSet. A FormatStyle instance returned by the Get method has its
@@ -3534,7 +4932,7 @@ struct FormatStyle {
struct FormatStyleSet {
typedef std::map<FormatStyle::LanguageKind, FormatStyle> MapType;
- llvm::Optional<FormatStyle> Get(FormatStyle::LanguageKind Language) const;
+ std::optional<FormatStyle> Get(FormatStyle::LanguageKind Language) const;
// Adds \p Style to this FormatStyleSet. Style must not have an associated
// FormatStyleSet.
@@ -3580,7 +4978,7 @@ FormatStyle getGoogleStyle(FormatStyle::LanguageKind Language);
FormatStyle getChromiumStyle(FormatStyle::LanguageKind Language);
/// Returns a format style complying with Mozilla's style guide:
-/// https://developer.mozilla.org/en-US/docs/Developer_Guide/Coding_Style.
+/// https://firefox-source-docs.mozilla.org/code-quality/coding-style/index.html.
FormatStyle getMozillaStyle();
/// Returns a format style complying with Webkit's style guide:
@@ -3595,6 +4993,8 @@ FormatStyle getGNUStyle();
/// https://docs.microsoft.com/en-us/visualstudio/ide/editorconfig-code-style-settings-reference?view=vs-2017
FormatStyle getMicrosoftStyle(FormatStyle::LanguageKind Language);
+FormatStyle getClangFormatStyle();
+
/// Returns style indicating formatting should be not applied at all.
FormatStyle getNoStyle();
@@ -3618,7 +5018,7 @@ bool getPredefinedStyle(StringRef Name, FormatStyle::LanguageKind Language,
/// document, are retained in \p Style.
///
/// If AllowUnknownOptions is true, no errors are emitted if unknown
-/// format options are occured.
+/// format options are occurred.
///
/// If set all diagnostics are emitted through the DiagHandler.
std::error_code
@@ -3661,7 +5061,7 @@ formatReplacements(StringRef Code, const tooling::Replacements &Replaces,
/// - If a replacement has offset UINT_MAX, length 1, and a replacement text
/// that is the name of the header to be removed, the header will be removed
/// from \p Code if it exists.
-/// The include manipulation is done via `tooling::HeaderInclude`, see its
+/// The include manipulation is done via ``tooling::HeaderInclude``, see its
/// documentation for more details on how include insertion points are found and
/// what edits are produced.
llvm::Expected<tooling::Replacements>
@@ -3720,6 +5120,17 @@ tooling::Replacements fixNamespaceEndComments(const FormatStyle &Style,
ArrayRef<tooling::Range> Ranges,
StringRef FileName = "<stdin>");
+/// Inserts or removes empty lines separating definition blocks including
+/// classes, structs, functions, namespaces, and enums in the given \p Ranges in
+/// \p Code.
+///
+/// Returns the ``Replacements`` that inserts or removes empty lines separating
+/// definition blocks in all \p Ranges in \p Code.
+tooling::Replacements separateDefinitionBlocks(const FormatStyle &Style,
+ StringRef Code,
+ ArrayRef<tooling::Range> Ranges,
+ StringRef FileName = "<stdin>");
+
/// Sort consecutive using declarations in the given \p Ranges in
/// \p Code.
///
@@ -3741,11 +5152,11 @@ LangOptions getFormattingLangOpts(const FormatStyle &Style = getLLVMStyle());
extern const char *StyleOptionHelpDescription;
/// The suggested format style to use by default. This allows tools using
-/// `getStyle` to have a consistent default style.
+/// ``getStyle`` to have a consistent default style.
/// Different builds can modify the value to the preferred styles.
extern const char *DefaultFormatStyle;
-/// The suggested predefined style to use as the fallback style in `getStyle`.
+/// The suggested predefined style to use as the fallback style in ``getStyle``.
/// Different builds can modify the value to the preferred styles.
extern const char *DefaultFallbackStyle;
@@ -3758,6 +5169,8 @@ extern const char *DefaultFallbackStyle;
/// * "file" - Load style configuration from a file called ``.clang-format``
/// located in one of the parent directories of ``FileName`` or the current
/// directory if ``FileName`` is empty.
+/// * "file:<format_file_path>" to explicitly specify the configuration file to
+/// use.
///
/// \param[in] StyleName Style name to interpret according to the description
/// above.
@@ -3807,11 +5220,16 @@ inline StringRef getLanguageName(FormatStyle::LanguageKind Language) {
return "TableGen";
case FormatStyle::LK_TextProto:
return "TextProto";
+ case FormatStyle::LK_Verilog:
+ return "Verilog";
default:
return "Unknown";
}
}
+bool isClangFormatOn(StringRef Comment);
+bool isClangFormatOff(StringRef Comment);
+
} // end namespace format
} // end namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/ASTConsumers.h b/contrib/llvm-project/clang/include/clang/Frontend/ASTConsumers.h
index 98cfc7cadc0d..0e068bf5cccb 100644
--- a/contrib/llvm-project/clang/include/clang/Frontend/ASTConsumers.h
+++ b/contrib/llvm-project/clang/include/clang/Frontend/ASTConsumers.h
@@ -20,12 +20,6 @@
namespace clang {
class ASTConsumer;
-class CodeGenOptions;
-class DiagnosticsEngine;
-class FileManager;
-class LangOptions;
-class Preprocessor;
-class TargetOptions;
// AST pretty-printer: prints out the AST in a format that is close to the
// original C code. The output is intended to be in a format such that
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/ASTUnit.h b/contrib/llvm-project/clang/include/clang/Frontend/ASTUnit.h
index 6cf9f3ff936f..6af712afdcb6 100644
--- a/contrib/llvm-project/clang/include/clang/Frontend/ASTUnit.h
+++ b/contrib/llvm-project/clang/include/clang/Frontend/ASTUnit.h
@@ -31,8 +31,6 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/IntrusiveRefCntPtr.h"
-#include "llvm/ADT/None.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
@@ -42,6 +40,7 @@
#include <cstddef>
#include <cstdint>
#include <memory>
+#include <optional>
#include <string>
#include <utility>
#include <vector>
@@ -78,6 +77,7 @@ class Preprocessor;
class PreprocessorOptions;
class Sema;
class TargetInfo;
+class SyntaxOnlyAction;
/// \brief Enumerates the available scopes for skipping function bodies.
enum class SkipFunctionBodiesScope { None, Preamble, PreambleAndMainFile };
@@ -120,11 +120,13 @@ private:
std::shared_ptr<PreprocessorOptions> PPOpts;
IntrusiveRefCntPtr<ASTReader> Reader;
bool HadModuleLoaderFatalFailure = false;
+ bool StorePreamblesInMemory = false;
struct ASTWriterData;
std::unique_ptr<ASTWriterData> WriterData;
FileSystemOptions FileSystemOpts;
+ std::string PreambleStoragePath;
/// The AST consumer that received information about the translation
/// unit as it was parsed or loaded.
@@ -222,7 +224,7 @@ private:
llvm::StringMap<SourceLocation> PreambleSrcLocCache;
/// The contents of the preamble.
- llvm::Optional<PrecompiledPreamble> Preamble;
+ std::optional<PrecompiledPreamble> Preamble;
/// When non-NULL, this is the buffer used to store the contents of
/// the main file when it has been padded for use with the precompiled
@@ -353,6 +355,7 @@ private:
/// Bit used by CIndex to mark when a translation unit may be in an
/// inconsistent state, and is not safe to free.
+ LLVM_PREFERRED_TYPE(bool)
unsigned UnsafeToFree : 1;
/// \brief Enumerator specifying the scope for skipping function bodies.
@@ -642,7 +645,7 @@ public:
bool visitLocalTopLevelDecls(void *context, DeclVisitorFn Fn);
/// Get the PCH file if one was included.
- const FileEntry *getPCHFile();
+ OptionalFileEntryRef getPCHFile();
/// Returns true if the ASTUnit was constructed from a serialized
/// module file.
@@ -688,15 +691,16 @@ public:
/// lifetime is expected to extend past that of the returned ASTUnit.
///
/// \returns - The initialized ASTUnit or null if the AST failed to load.
- static std::unique_ptr<ASTUnit>
- LoadFromASTFile(const std::string &Filename,
- const PCHContainerReader &PCHContainerRdr, WhatToLoad ToLoad,
- IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
- const FileSystemOptions &FileSystemOpts,
- bool UseDebugInfo = false, bool OnlyLocalDecls = false,
- CaptureDiagsKind CaptureDiagnostics = CaptureDiagsKind::None,
- bool AllowASTWithCompilerErrors = false,
- bool UserFilesAreVolatile = false);
+ static std::unique_ptr<ASTUnit> LoadFromASTFile(
+ const std::string &Filename, const PCHContainerReader &PCHContainerRdr,
+ WhatToLoad ToLoad, IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
+ const FileSystemOptions &FileSystemOpts,
+ std::shared_ptr<HeaderSearchOptions> HSOpts, bool OnlyLocalDecls = false,
+ CaptureDiagsKind CaptureDiagnostics = CaptureDiagsKind::None,
+ bool AllowASTWithCompilerErrors = false,
+ bool UserFilesAreVolatile = false,
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS =
+ llvm::vfs::getRealFileSystem());
private:
/// Helper function for \c LoadFromCompilerInvocation() and
@@ -801,6 +805,13 @@ public:
///
/// \param ResourceFilesPath - The path to the compiler resource files.
///
+ /// \param StorePreamblesInMemory - Whether to store PCH in memory. If false,
+ /// PCH are stored in temporary files.
+ ///
+ /// \param PreambleStoragePath - The path to a directory, in which to create
+ /// temporary PCH files. If empty, the default system temporary directory is
+ /// used. This parameter is ignored if \p StorePreamblesInMemory is true.
+ ///
/// \param ModuleFormat - If provided, uses the specific module format.
///
/// \param ErrAST - If non-null and parsing failed without any AST to return
@@ -815,13 +826,14 @@ public:
///
// FIXME: Move OnlyLocalDecls, UseBumpAllocator to setters on the ASTUnit, we
// shouldn't need to specify them at construction time.
- static ASTUnit *LoadFromCommandLine(
+ static std::unique_ptr<ASTUnit> LoadFromCommandLine(
const char **ArgBegin, const char **ArgEnd,
std::shared_ptr<PCHContainerOperations> PCHContainerOps,
IntrusiveRefCntPtr<DiagnosticsEngine> Diags, StringRef ResourceFilesPath,
- bool OnlyLocalDecls = false,
+ bool StorePreamblesInMemory = false,
+ StringRef PreambleStoragePath = StringRef(), bool OnlyLocalDecls = false,
CaptureDiagsKind CaptureDiagnostics = CaptureDiagsKind::None,
- ArrayRef<RemappedFile> RemappedFiles = None,
+ ArrayRef<RemappedFile> RemappedFiles = std::nullopt,
bool RemappedFilesKeepOriginalName = true,
unsigned PrecompilePreambleAfterNParses = 0,
TranslationUnitKind TUKind = TU_Complete,
@@ -833,7 +845,7 @@ public:
bool SingleFileParse = false, bool UserFilesAreVolatile = false,
bool ForSerialization = false,
bool RetainExcludedConditionalBlocks = false,
- llvm::Optional<StringRef> ModuleFormat = llvm::None,
+ std::optional<StringRef> ModuleFormat = std::nullopt,
std::unique_ptr<ASTUnit> *ErrAST = nullptr,
IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS = nullptr);
@@ -849,7 +861,7 @@ public:
/// \returns True if a failure occurred that causes the ASTUnit not to
/// contain any translation-unit information, false otherwise.
bool Reparse(std::shared_ptr<PCHContainerOperations> PCHContainerOps,
- ArrayRef<RemappedFile> RemappedFiles = None,
+ ArrayRef<RemappedFile> RemappedFiles = std::nullopt,
IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS = nullptr);
/// Free data that will be re-generated on the next parse.
@@ -875,6 +887,10 @@ public:
/// \param IncludeBriefComments Whether to include brief documentation within
/// the set of code completions returned.
///
+ /// \param Act If supplied, this argument is used to parse the input file,
+ /// allowing customized parsing by overriding SyntaxOnlyAction lifecycle
+ /// methods.
+ ///
/// FIXME: The Diag, LangOpts, SourceMgr, FileMgr, StoredDiagnostics, and
/// OwnedBuffers parameters are all disgusting hacks. They will go away.
void CodeComplete(StringRef File, unsigned Line, unsigned Column,
@@ -885,7 +901,8 @@ public:
DiagnosticsEngine &Diag, LangOptions &LangOpts,
SourceManager &SourceMgr, FileManager &FileMgr,
SmallVectorImpl<StoredDiagnostic> &StoredDiagnostics,
- SmallVectorImpl<const llvm::MemoryBuffer *> &OwnedBuffers);
+ SmallVectorImpl<const llvm::MemoryBuffer *> &OwnedBuffers,
+ std::unique_ptr<SyntaxOnlyAction> Act);
/// Save this translation unit to a file with the given name.
///
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/CommandLineSourceLoc.h b/contrib/llvm-project/clang/include/clang/Frontend/CommandLineSourceLoc.h
index dfc4454b4baf..074800a881a8 100644
--- a/contrib/llvm-project/clang/include/clang/Frontend/CommandLineSourceLoc.h
+++ b/contrib/llvm-project/clang/include/clang/Frontend/CommandLineSourceLoc.h
@@ -17,6 +17,7 @@
#include "clang/Basic/LLVM.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/raw_ostream.h"
+#include <optional>
namespace clang {
@@ -67,8 +68,8 @@ struct ParsedSourceRange {
/// second element is the column.
std::pair<unsigned, unsigned> End;
- /// Returns a parsed source range from a string or None if the string is
- /// invalid.
+ /// Returns a parsed source range from a string or std::nullopt if the string
+ /// is invalid.
///
/// These source string has the following format:
///
@@ -76,7 +77,7 @@ struct ParsedSourceRange {
///
/// If the end line and column are omitted, the starting line and columns
/// are used as the end values.
- static Optional<ParsedSourceRange> fromString(StringRef Str) {
+ static std::optional<ParsedSourceRange> fromString(StringRef Str) {
std::pair<StringRef, StringRef> RangeSplit = Str.rsplit('-');
unsigned EndLine, EndColumn;
bool HasEndLoc = false;
@@ -93,7 +94,7 @@ struct ParsedSourceRange {
}
auto Begin = ParsedSourceLocation::FromString(RangeSplit.first);
if (Begin.FileName.empty())
- return None;
+ return std::nullopt;
if (!HasEndLoc) {
EndLine = Begin.Line;
EndColumn = Begin.Column;
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/CompilerInstance.h b/contrib/llvm-project/clang/include/clang/Frontend/CompilerInstance.h
index 861b15020329..ac2f940769fb 100644
--- a/contrib/llvm-project/clang/include/clang/Frontend/CompilerInstance.h
+++ b/contrib/llvm-project/clang/include/clang/Frontend/CompilerInstance.h
@@ -12,6 +12,7 @@
#include "clang/AST/ASTConsumer.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
#include "clang/Frontend/CompilerInvocation.h"
#include "clang/Frontend/PCHContainerOperations.h"
#include "clang/Frontend/Utils.h"
@@ -26,6 +27,7 @@
#include <cassert>
#include <list>
#include <memory>
+#include <optional>
#include <string>
#include <utility>
@@ -38,11 +40,14 @@ class TimerGroup;
namespace clang {
class ASTContext;
class ASTReader;
+
+namespace serialization {
+class ModuleFile;
+}
+
class CodeCompleteConsumer;
class DiagnosticsEngine;
class DiagnosticConsumer;
-class ExternalASTSource;
-class FileEntry;
class FileManager;
class FrontendAction;
class InMemoryModuleCache;
@@ -166,9 +171,10 @@ class CompilerInstance : public ModuleLoader {
/// failed.
struct OutputFile {
std::string Filename;
- Optional<llvm::sys::fs::TempFile> File;
+ std::optional<llvm::sys::fs::TempFile> File;
- OutputFile(std::string filename, Optional<llvm::sys::fs::TempFile> file)
+ OutputFile(std::string filename,
+ std::optional<llvm::sys::fs::TempFile> file)
: Filename(std::move(filename)), File(std::move(file)) {}
};
@@ -188,7 +194,7 @@ public:
~CompilerInstance() override;
/// @name High-Level Operations
- /// {
+ /// @{
/// ExecuteAction - Execute the provided action against the compiler's
/// CompilerInvocation object.
@@ -219,9 +225,12 @@ public:
// of the context or else not CompilerInstance specific.
bool ExecuteAction(FrontendAction &Act);
- /// }
+ /// Load the list of plugins requested in the \c FrontendOptions.
+ void LoadRequestedPlugins();
+
+ /// @}
/// @name Compiler Invocation and Options
- /// {
+ /// @{
bool hasInvocation() const { return Invocation != nullptr; }
@@ -230,6 +239,8 @@ public:
return *Invocation;
}
+ std::shared_ptr<CompilerInvocation> getInvocationPtr() { return Invocation; }
+
/// setInvocation - Replace the current invocation.
void setInvocation(std::shared_ptr<CompilerInvocation> Value);
@@ -242,13 +253,11 @@ public:
BuildGlobalModuleIndex = Build;
}
- /// }
+ /// @}
/// @name Forwarding Methods
- /// {
+ /// @{
- AnalyzerOptionsRef getAnalyzerOpts() {
- return Invocation->getAnalyzerOpts();
- }
+ AnalyzerOptions &getAnalyzerOpts() { return Invocation->getAnalyzerOpts(); }
CodeGenOptions &getCodeGenOpts() {
return Invocation->getCodeGenOpts();
@@ -295,13 +304,14 @@ public:
return Invocation->getHeaderSearchOptsPtr();
}
- LangOptions &getLangOpts() {
- return *Invocation->getLangOpts();
- }
- const LangOptions &getLangOpts() const {
- return *Invocation->getLangOpts();
+ APINotesOptions &getAPINotesOpts() { return Invocation->getAPINotesOpts(); }
+ const APINotesOptions &getAPINotesOpts() const {
+ return Invocation->getAPINotesOpts();
}
+ LangOptions &getLangOpts() { return Invocation->getLangOpts(); }
+ const LangOptions &getLangOpts() const { return Invocation->getLangOpts(); }
+
PreprocessorOptions &getPreprocessorOpts() {
return Invocation->getPreprocessorOpts();
}
@@ -323,9 +333,9 @@ public:
return Invocation->getTargetOpts();
}
- /// }
+ /// @}
/// @name Diagnostics Engine
- /// {
+ /// @{
bool hasDiagnostics() const { return Diagnostics != nullptr; }
@@ -335,6 +345,11 @@ public:
return *Diagnostics;
}
+ IntrusiveRefCntPtr<DiagnosticsEngine> getDiagnosticsPtr() const {
+ assert(Diagnostics && "Compiler instance has no diagnostics!");
+ return Diagnostics;
+ }
+
/// setDiagnostics - Replace the current diagnostics engine.
void setDiagnostics(DiagnosticsEngine *Value);
@@ -344,9 +359,9 @@ public:
return *Diagnostics->getClient();
}
- /// }
+ /// @}
/// @name VerboseOutputStream
- /// }
+ /// @{
/// Replace the current stream for verbose output.
void setVerboseOutputStream(raw_ostream &Value);
@@ -359,9 +374,9 @@ public:
return *VerboseOutputStream;
}
- /// }
+ /// @}
/// @name Target Info
- /// {
+ /// @{
bool hasTarget() const { return Target != nullptr; }
@@ -370,12 +385,17 @@ public:
return *Target;
}
+ IntrusiveRefCntPtr<TargetInfo> getTargetPtr() const {
+ assert(Target && "Compiler instance has no target!");
+ return Target;
+ }
+
/// Replace the current Target.
void setTarget(TargetInfo *Value);
- /// }
+ /// @}
/// @name AuxTarget Info
- /// {
+ /// @{
TargetInfo *getAuxTarget() const { return AuxTarget.get(); }
@@ -385,15 +405,15 @@ public:
// Create Target and AuxTarget based on current options
bool createTarget();
- /// }
+ /// @}
/// @name Virtual File System
- /// {
+ /// @{
llvm::vfs::FileSystem &getVirtualFileSystem() const;
- /// }
+ /// @}
/// @name File Manager
- /// {
+ /// @{
bool hasFileManager() const { return FileMgr != nullptr; }
@@ -403,6 +423,11 @@ public:
return *FileMgr;
}
+ IntrusiveRefCntPtr<FileManager> getFileManagerPtr() const {
+ assert(FileMgr && "Compiler instance has no file manager!");
+ return FileMgr;
+ }
+
void resetAndLeakFileManager() {
llvm::BuryPointer(FileMgr.get());
FileMgr.resetWithoutRelease();
@@ -411,9 +436,9 @@ public:
/// Replace the current file manager and virtual file system.
void setFileManager(FileManager *Value);
- /// }
+ /// @}
/// @name Source Manager
- /// {
+ /// @{
bool hasSourceManager() const { return SourceMgr != nullptr; }
@@ -423,6 +448,11 @@ public:
return *SourceMgr;
}
+ IntrusiveRefCntPtr<SourceManager> getSourceManagerPtr() const {
+ assert(SourceMgr && "Compiler instance has no source manager!");
+ return SourceMgr;
+ }
+
void resetAndLeakSourceManager() {
llvm::BuryPointer(SourceMgr.get());
SourceMgr.resetWithoutRelease();
@@ -431,9 +461,9 @@ public:
/// setSourceManager - Replace the current source manager.
void setSourceManager(SourceManager *Value);
- /// }
+ /// @}
/// @name Preprocessor
- /// {
+ /// @{
bool hasPreprocessor() const { return PP != nullptr; }
@@ -452,9 +482,9 @@ public:
/// Replace the current preprocessor.
void setPreprocessor(std::shared_ptr<Preprocessor> Value);
- /// }
+ /// @}
/// @name ASTContext
- /// {
+ /// @{
bool hasASTContext() const { return Context != nullptr; }
@@ -463,6 +493,11 @@ public:
return *Context;
}
+ IntrusiveRefCntPtr<ASTContext> getASTContextPtr() const {
+ assert(Context && "Compiler instance has no AST context!");
+ return Context;
+ }
+
void resetAndLeakASTContext() {
llvm::BuryPointer(Context.get());
Context.resetWithoutRelease();
@@ -475,9 +510,9 @@ public:
/// of S.
void setSema(Sema *S);
- /// }
+ /// @}
/// @name ASTConsumer
- /// {
+ /// @{
bool hasASTConsumer() const { return (bool)Consumer; }
@@ -494,9 +529,9 @@ public:
/// takes ownership of \p Value.
void setASTConsumer(std::unique_ptr<ASTConsumer> Value);
- /// }
+ /// @}
/// @name Semantic analysis
- /// {
+ /// @{
bool hasSema() const { return (bool)TheSema; }
Sema &getSema() const {
@@ -507,9 +542,9 @@ public:
std::unique_ptr<Sema> takeSema();
void resetAndLeakSema();
- /// }
+ /// @}
/// @name Module Management
- /// {
+ /// @{
IntrusiveRefCntPtr<ASTReader> getASTReader() const;
void setASTReader(IntrusiveRefCntPtr<ASTReader> Reader);
@@ -550,9 +585,9 @@ public:
return *Reader;
}
- /// }
+ /// @}
/// @name Code Completion
- /// {
+ /// @{
bool hasCodeCompletionConsumer() const { return (bool)CompletionConsumer; }
@@ -566,9 +601,9 @@ public:
/// the compiler instance takes ownership of \p Value.
void setCodeCompletionConsumer(CodeCompleteConsumer *Value);
- /// }
+ /// @}
/// @name Frontend timer
- /// {
+ /// @{
bool hasFrontendTimer() const { return (bool)FrontendTimer; }
@@ -577,9 +612,9 @@ public:
return *FrontendTimer;
}
- /// }
+ /// @}
/// @name Output Files
- /// {
+ /// @{
/// clearOutputFiles - Clear the output file list. The underlying output
/// streams must have been closed beforehand.
@@ -587,9 +622,9 @@ public:
/// \param EraseFiles - If true, attempt to erase the files from disk.
void clearOutputFiles(bool EraseFiles);
- /// }
+ /// @}
/// @name Construction Utility Methods
- /// {
+ /// @{
/// Create the diagnostics engine using the invocation's diagnostic options
/// and replace any existing one with it.
@@ -738,9 +773,9 @@ private:
public:
std::unique_ptr<raw_pwrite_stream> createNullOutputFile();
- /// }
+ /// @}
/// @name Initialization Utility Methods
- /// {
+ /// @{
/// InitializeSourceManager - Initialize the source manager to set InputFile
/// as the main file.
@@ -757,7 +792,7 @@ public:
FileManager &FileMgr,
SourceManager &SourceMgr);
- /// }
+ /// @}
void setOutputStream(std::unique_ptr<llvm::raw_pwrite_stream> OutStream) {
OutputStream = std::move(OutStream);
@@ -769,7 +804,8 @@ public:
void createASTReader();
- bool loadModuleFile(StringRef FileName);
+ bool loadModuleFile(StringRef FileName,
+ serialization::ModuleFile *&LoadedModuleFile);
private:
/// Find a module, potentially compiling it, before reading its AST. This is
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/CompilerInvocation.h b/contrib/llvm-project/clang/include/clang/Frontend/CompilerInvocation.h
index 2245439d0632..c6528779bde7 100644
--- a/contrib/llvm-project/clang/include/clang/Frontend/CompilerInvocation.h
+++ b/contrib/llvm-project/clang/include/clang/Frontend/CompilerInvocation.h
@@ -9,6 +9,7 @@
#ifndef LLVM_CLANG_FRONTEND_COMPILERINVOCATION_H
#define LLVM_CLANG_FRONTEND_COMPILERINVOCATION_H
+#include "clang/APINotes/APINotesOptions.h"
#include "clang/Basic/CodeGenOptions.h"
#include "clang/Basic/DiagnosticOptions.h"
#include "clang/Basic/FileSystemOptions.h"
@@ -50,6 +51,11 @@ class HeaderSearchOptions;
class PreprocessorOptions;
class TargetOptions;
+// This lets us create the DiagnosticsEngine with a properly-filled-out
+// DiagnosticOptions instance.
+std::unique_ptr<DiagnosticOptions>
+CreateAndPopulateDiagOpts(ArrayRef<const char *> Argv);
+
/// Fill out Opts based on the options given in Args.
///
/// Args must have been created from the OptTable returned by
@@ -61,16 +67,12 @@ bool ParseDiagnosticArgs(DiagnosticOptions &Opts, llvm::opt::ArgList &Args,
DiagnosticsEngine *Diags = nullptr,
bool DefaultDiagColor = true);
-/// The base class of CompilerInvocation with reference semantics.
-///
-/// This class stores option objects behind reference-counted pointers. This is
-/// useful for clients that want to keep some option object around even after
-/// CompilerInvocation gets destroyed, without making a copy.
-///
-/// This is a separate class so that we can implement the copy constructor and
-/// assignment here and leave them defaulted in the rest of CompilerInvocation.
-class CompilerInvocationRefBase {
-public:
+/// The base class of CompilerInvocation. It keeps individual option objects
+/// behind reference-counted pointers, which is useful for clients that want to
+/// keep select option objects alive (even after CompilerInvocation gets
+/// destroyed) without making a copy.
+class CompilerInvocationBase {
+protected:
/// Options controlling the language variant.
std::shared_ptr<LangOptions> LangOpts;
@@ -81,103 +83,122 @@ public:
IntrusiveRefCntPtr<DiagnosticOptions> DiagnosticOpts;
/// Options controlling the \#include directive.
- std::shared_ptr<HeaderSearchOptions> HeaderSearchOpts;
+ std::shared_ptr<HeaderSearchOptions> HSOpts;
/// Options controlling the preprocessor (aside from \#include handling).
- std::shared_ptr<PreprocessorOptions> PreprocessorOpts;
+ std::shared_ptr<PreprocessorOptions> PPOpts;
/// Options controlling the static analyzer.
AnalyzerOptionsRef AnalyzerOpts;
- CompilerInvocationRefBase();
- CompilerInvocationRefBase(const CompilerInvocationRefBase &X);
- CompilerInvocationRefBase(CompilerInvocationRefBase &&X);
- CompilerInvocationRefBase &operator=(CompilerInvocationRefBase X);
- CompilerInvocationRefBase &operator=(CompilerInvocationRefBase &&X);
- ~CompilerInvocationRefBase();
-
- LangOptions *getLangOpts() { return LangOpts.get(); }
- const LangOptions *getLangOpts() const { return LangOpts.get(); }
-
- TargetOptions &getTargetOpts() { return *TargetOpts.get(); }
- const TargetOptions &getTargetOpts() const { return *TargetOpts.get(); }
+ std::shared_ptr<MigratorOptions> MigratorOpts;
- DiagnosticOptions &getDiagnosticOpts() const { return *DiagnosticOpts; }
-
- HeaderSearchOptions &getHeaderSearchOpts() { return *HeaderSearchOpts; }
-
- const HeaderSearchOptions &getHeaderSearchOpts() const {
- return *HeaderSearchOpts;
- }
-
- std::shared_ptr<HeaderSearchOptions> getHeaderSearchOptsPtr() const {
- return HeaderSearchOpts;
- }
-
- std::shared_ptr<PreprocessorOptions> getPreprocessorOptsPtr() {
- return PreprocessorOpts;
- }
-
- PreprocessorOptions &getPreprocessorOpts() { return *PreprocessorOpts; }
-
- const PreprocessorOptions &getPreprocessorOpts() const {
- return *PreprocessorOpts;
- }
-
- AnalyzerOptionsRef getAnalyzerOpts() const { return AnalyzerOpts; }
-};
-
-/// The base class of CompilerInvocation with value semantics.
-class CompilerInvocationValueBase {
-protected:
- MigratorOptions MigratorOpts;
+ /// Options controlling API notes.
+ std::shared_ptr<APINotesOptions> APINotesOpts;
/// Options controlling IRgen and the backend.
- CodeGenOptions CodeGenOpts;
-
- /// Options controlling dependency output.
- DependencyOutputOptions DependencyOutputOpts;
+ std::shared_ptr<CodeGenOptions> CodeGenOpts;
/// Options controlling file system operations.
- FileSystemOptions FileSystemOpts;
+ std::shared_ptr<FileSystemOptions> FSOpts;
/// Options controlling the frontend itself.
- FrontendOptions FrontendOpts;
+ std::shared_ptr<FrontendOptions> FrontendOpts;
+
+ /// Options controlling dependency output.
+ std::shared_ptr<DependencyOutputOptions> DependencyOutputOpts;
/// Options controlling preprocessed output.
- PreprocessorOutputOptions PreprocessorOutputOpts;
+ std::shared_ptr<PreprocessorOutputOptions> PreprocessorOutputOpts;
+
+ /// Dummy tag type whose instance can be passed into the constructor to
+ /// prevent creation of the reference-counted option objects.
+ struct EmptyConstructor {};
+
+ CompilerInvocationBase();
+ CompilerInvocationBase(EmptyConstructor) {}
+ CompilerInvocationBase(const CompilerInvocationBase &X) = delete;
+ CompilerInvocationBase(CompilerInvocationBase &&X) = default;
+ CompilerInvocationBase &operator=(const CompilerInvocationBase &X) = delete;
+ CompilerInvocationBase &deep_copy_assign(const CompilerInvocationBase &X);
+ CompilerInvocationBase &shallow_copy_assign(const CompilerInvocationBase &X);
+ CompilerInvocationBase &operator=(CompilerInvocationBase &&X) = default;
+ ~CompilerInvocationBase() = default;
public:
- MigratorOptions &getMigratorOpts() { return MigratorOpts; }
- const MigratorOptions &getMigratorOpts() const { return MigratorOpts; }
-
- CodeGenOptions &getCodeGenOpts() { return CodeGenOpts; }
- const CodeGenOptions &getCodeGenOpts() const { return CodeGenOpts; }
-
- DependencyOutputOptions &getDependencyOutputOpts() {
- return DependencyOutputOpts;
+ /// Const getters.
+ /// @{
+ const LangOptions &getLangOpts() const { return *LangOpts; }
+ const TargetOptions &getTargetOpts() const { return *TargetOpts; }
+ const DiagnosticOptions &getDiagnosticOpts() const { return *DiagnosticOpts; }
+ const HeaderSearchOptions &getHeaderSearchOpts() const { return *HSOpts; }
+ const PreprocessorOptions &getPreprocessorOpts() const { return *PPOpts; }
+ const AnalyzerOptions &getAnalyzerOpts() const { return *AnalyzerOpts; }
+ const MigratorOptions &getMigratorOpts() const { return *MigratorOpts; }
+ const APINotesOptions &getAPINotesOpts() const { return *APINotesOpts; }
+ const CodeGenOptions &getCodeGenOpts() const { return *CodeGenOpts; }
+ const FileSystemOptions &getFileSystemOpts() const { return *FSOpts; }
+ const FrontendOptions &getFrontendOpts() const { return *FrontendOpts; }
+ const DependencyOutputOptions &getDependencyOutputOpts() const {
+ return *DependencyOutputOpts;
+ }
+ const PreprocessorOutputOptions &getPreprocessorOutputOpts() const {
+ return *PreprocessorOutputOpts;
}
+ /// @}
- const DependencyOutputOptions &getDependencyOutputOpts() const {
- return DependencyOutputOpts;
+ /// Command line generation.
+ /// @{
+ using StringAllocator = llvm::function_ref<const char *(const Twine &)>;
+ /// Generate cc1-compatible command line arguments from this instance.
+ ///
+ /// \param [out] Args - The generated arguments. Note that the caller is
+ /// responsible for inserting the path to the clang executable and "-cc1" if
+ /// desired.
+ /// \param SA - A function that given a Twine can allocate storage for a given
+ /// command line argument and return a pointer to the newly allocated string.
+ /// The returned pointer is what gets appended to Args.
+ void generateCC1CommandLine(llvm::SmallVectorImpl<const char *> &Args,
+ StringAllocator SA) const {
+ generateCC1CommandLine([&](const Twine &Arg) {
+ // No need to allocate static string literals.
+ Args.push_back(Arg.isSingleStringLiteral()
+ ? Arg.getSingleStringRef().data()
+ : SA(Arg));
+ });
}
- FileSystemOptions &getFileSystemOpts() { return FileSystemOpts; }
+ using ArgumentConsumer = llvm::function_ref<void(const Twine &)>;
+ /// Generate cc1-compatible command line arguments from this instance.
+ ///
+ /// \param Consumer - Callback that gets invoked for every single generated
+ /// command line argument.
+ void generateCC1CommandLine(ArgumentConsumer Consumer) const;
- const FileSystemOptions &getFileSystemOpts() const {
- return FileSystemOpts;
- }
+ /// Generate cc1-compatible command line arguments from this instance,
+ /// wrapping the result as a std::vector<std::string>.
+ ///
+ /// This is a (less-efficient) wrapper over generateCC1CommandLine().
+ std::vector<std::string> getCC1CommandLine() const;
- FrontendOptions &getFrontendOpts() { return FrontendOpts; }
- const FrontendOptions &getFrontendOpts() const { return FrontendOpts; }
+private:
+ /// Generate command line options from DiagnosticOptions.
+ static void GenerateDiagnosticArgs(const DiagnosticOptions &Opts,
+ ArgumentConsumer Consumer,
+ bool DefaultDiagColor);
- PreprocessorOutputOptions &getPreprocessorOutputOpts() {
- return PreprocessorOutputOpts;
- }
+ /// Generate command line options from LangOptions.
+ static void GenerateLangArgs(const LangOptions &Opts,
+ ArgumentConsumer Consumer, const llvm::Triple &T,
+ InputKind IK);
- const PreprocessorOutputOptions &getPreprocessorOutputOpts() const {
- return PreprocessorOutputOpts;
- }
+ // Generate command line options from CodeGenOptions.
+ static void GenerateCodeGenArgs(const CodeGenOptions &Opts,
+ ArgumentConsumer Consumer,
+ const llvm::Triple &T,
+ const std::string &OutputFile,
+ const LangOptions *LangOpts);
+ /// @}
};
/// Helper class for holding the data necessary to invoke the compiler.
@@ -185,9 +206,73 @@ public:
/// This class is designed to represent an abstract "invocation" of the
/// compiler, including data such as the include paths, the code generation
/// options, the warning flags, and so on.
-class CompilerInvocation : public CompilerInvocationRefBase,
- public CompilerInvocationValueBase {
+class CompilerInvocation : public CompilerInvocationBase {
public:
+ CompilerInvocation() = default;
+ CompilerInvocation(const CompilerInvocation &X)
+ : CompilerInvocationBase(EmptyConstructor{}) {
+ deep_copy_assign(X);
+ }
+ CompilerInvocation(CompilerInvocation &&) = default;
+ CompilerInvocation &operator=(const CompilerInvocation &X) {
+ deep_copy_assign(X);
+ return *this;
+ }
+ ~CompilerInvocation() = default;
+
+ /// Const getters.
+ /// @{
+ // Note: These need to be pulled in manually. Otherwise, they get hidden by
+ // the mutable getters with the same names.
+ using CompilerInvocationBase::getLangOpts;
+ using CompilerInvocationBase::getTargetOpts;
+ using CompilerInvocationBase::getDiagnosticOpts;
+ using CompilerInvocationBase::getHeaderSearchOpts;
+ using CompilerInvocationBase::getPreprocessorOpts;
+ using CompilerInvocationBase::getAnalyzerOpts;
+ using CompilerInvocationBase::getMigratorOpts;
+ using CompilerInvocationBase::getAPINotesOpts;
+ using CompilerInvocationBase::getCodeGenOpts;
+ using CompilerInvocationBase::getFileSystemOpts;
+ using CompilerInvocationBase::getFrontendOpts;
+ using CompilerInvocationBase::getDependencyOutputOpts;
+ using CompilerInvocationBase::getPreprocessorOutputOpts;
+ /// @}
+
+ /// Mutable getters.
+ /// @{
+ LangOptions &getLangOpts() { return *LangOpts; }
+ TargetOptions &getTargetOpts() { return *TargetOpts; }
+ DiagnosticOptions &getDiagnosticOpts() { return *DiagnosticOpts; }
+ HeaderSearchOptions &getHeaderSearchOpts() { return *HSOpts; }
+ PreprocessorOptions &getPreprocessorOpts() { return *PPOpts; }
+ AnalyzerOptions &getAnalyzerOpts() { return *AnalyzerOpts; }
+ MigratorOptions &getMigratorOpts() { return *MigratorOpts; }
+ APINotesOptions &getAPINotesOpts() { return *APINotesOpts; }
+ CodeGenOptions &getCodeGenOpts() { return *CodeGenOpts; }
+ FileSystemOptions &getFileSystemOpts() { return *FSOpts; }
+ FrontendOptions &getFrontendOpts() { return *FrontendOpts; }
+ DependencyOutputOptions &getDependencyOutputOpts() {
+ return *DependencyOutputOpts;
+ }
+ PreprocessorOutputOptions &getPreprocessorOutputOpts() {
+ return *PreprocessorOutputOpts;
+ }
+ /// @}
+
+ /// Base class internals.
+ /// @{
+ using CompilerInvocationBase::LangOpts;
+ using CompilerInvocationBase::TargetOpts;
+ using CompilerInvocationBase::DiagnosticOpts;
+ std::shared_ptr<HeaderSearchOptions> getHeaderSearchOptsPtr() {
+ return HSOpts;
+ }
+ std::shared_ptr<PreprocessorOptions> getPreprocessorOptsPtr() {
+ return PPOpts;
+ }
+ /// @}
+
/// Create a compiler invocation from a list of input options.
/// \returns true on success.
///
@@ -214,70 +299,89 @@ public:
/// executable), for finding the builtin compiler path.
static std::string GetResourcesPath(const char *Argv0, void *MainAddr);
- /// Set language defaults for the given input language and
- /// language standard in the given LangOptions object.
- ///
- /// \param Opts - The LangOptions object to set up.
- /// \param IK - The input language.
- /// \param T - The target triple.
- /// \param Includes - The affected list of included files.
- /// \param LangStd - The input language standard.
- static void
- setLangDefaults(LangOptions &Opts, InputKind IK, const llvm::Triple &T,
- std::vector<std::string> &Includes,
- LangStandard::Kind LangStd = LangStandard::lang_unspecified);
-
/// Retrieve a module hash string that is suitable for uniquely
/// identifying the conditions under which the module was built.
std::string getModuleHash() const;
- using StringAllocator = llvm::function_ref<const char *(const llvm::Twine &)>;
- /// Generate a cc1-compatible command line arguments from this instance.
+ /// Check that \p Args can be parsed and re-serialized without change,
+ /// emiting diagnostics for any differences.
///
- /// \param [out] Args - The generated arguments. Note that the caller is
- /// responsible for inserting the path to the clang executable and "-cc1" if
- /// desired.
- /// \param SA - A function that given a Twine can allocate storage for a given
- /// command line argument and return a pointer to the newly allocated string.
- /// The returned pointer is what gets appended to Args.
- void generateCC1CommandLine(llvm::SmallVectorImpl<const char *> &Args,
- StringAllocator SA) const;
+ /// This check is only suitable for command-lines that are expected to already
+ /// be canonical.
+ ///
+ /// \return false if there are any errors.
+ static bool checkCC1RoundTrip(ArrayRef<const char *> Args,
+ DiagnosticsEngine &Diags,
+ const char *Argv0 = nullptr);
+
+ /// Reset all of the options that are not considered when building a
+ /// module.
+ void resetNonModularOptions();
+
+ /// Disable implicit modules and canonicalize options that are only used by
+ /// implicit modules.
+ void clearImplicitModuleBuildOptions();
private:
static bool CreateFromArgsImpl(CompilerInvocation &Res,
ArrayRef<const char *> CommandLineArgs,
DiagnosticsEngine &Diags, const char *Argv0);
- /// Generate command line options from DiagnosticOptions.
- static void GenerateDiagnosticArgs(const DiagnosticOptions &Opts,
- SmallVectorImpl<const char *> &Args,
- StringAllocator SA, bool DefaultDiagColor);
-
/// Parse command line options that map to LangOptions.
static bool ParseLangArgs(LangOptions &Opts, llvm::opt::ArgList &Args,
InputKind IK, const llvm::Triple &T,
std::vector<std::string> &Includes,
DiagnosticsEngine &Diags);
- /// Generate command line options from LangOptions.
- static void GenerateLangArgs(const LangOptions &Opts,
- SmallVectorImpl<const char *> &Args,
- StringAllocator SA, const llvm::Triple &T,
- InputKind IK);
-
/// Parse command line options that map to CodeGenOptions.
static bool ParseCodeGenArgs(CodeGenOptions &Opts, llvm::opt::ArgList &Args,
InputKind IK, DiagnosticsEngine &Diags,
const llvm::Triple &T,
const std::string &OutputFile,
const LangOptions &LangOptsRef);
+};
- // Generate command line options from CodeGenOptions.
- static void GenerateCodeGenArgs(const CodeGenOptions &Opts,
- SmallVectorImpl<const char *> &Args,
- StringAllocator SA, const llvm::Triple &T,
- const std::string &OutputFile,
- const LangOptions *LangOpts);
+/// Same as \c CompilerInvocation, but with copy-on-write optimization.
+class CowCompilerInvocation : public CompilerInvocationBase {
+public:
+ CowCompilerInvocation() = default;
+ CowCompilerInvocation(const CowCompilerInvocation &X)
+ : CompilerInvocationBase(EmptyConstructor{}) {
+ shallow_copy_assign(X);
+ }
+ CowCompilerInvocation(CowCompilerInvocation &&) = default;
+ CowCompilerInvocation &operator=(const CowCompilerInvocation &X) {
+ shallow_copy_assign(X);
+ return *this;
+ }
+ ~CowCompilerInvocation() = default;
+
+ CowCompilerInvocation(const CompilerInvocation &X)
+ : CompilerInvocationBase(EmptyConstructor{}) {
+ deep_copy_assign(X);
+ }
+
+ CowCompilerInvocation(CompilerInvocation &&X)
+ : CompilerInvocationBase(std::move(X)) {}
+
+ // Const getters are inherited from the base class.
+
+ /// Mutable getters.
+ /// @{
+ LangOptions &getMutLangOpts();
+ TargetOptions &getMutTargetOpts();
+ DiagnosticOptions &getMutDiagnosticOpts();
+ HeaderSearchOptions &getMutHeaderSearchOpts();
+ PreprocessorOptions &getMutPreprocessorOpts();
+ AnalyzerOptions &getMutAnalyzerOpts();
+ MigratorOptions &getMutMigratorOpts();
+ APINotesOptions &getMutAPINotesOpts();
+ CodeGenOptions &getMutCodeGenOpts();
+ FileSystemOptions &getMutFileSystemOpts();
+ FrontendOptions &getMutFrontendOpts();
+ DependencyOutputOptions &getMutDependencyOutputOpts();
+ PreprocessorOutputOptions &getMutPreprocessorOutputOpts();
+ /// @}
};
IntrusiveRefCntPtr<llvm::vfs::FileSystem>
@@ -288,6 +392,11 @@ IntrusiveRefCntPtr<llvm::vfs::FileSystem> createVFSFromCompilerInvocation(
const CompilerInvocation &CI, DiagnosticsEngine &Diags,
IntrusiveRefCntPtr<llvm::vfs::FileSystem> BaseFS);
+IntrusiveRefCntPtr<llvm::vfs::FileSystem>
+createVFSFromOverlayFiles(ArrayRef<std::string> VFSOverlayFiles,
+ DiagnosticsEngine &Diags,
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> BaseFS);
+
} // namespace clang
#endif // LLVM_CLANG_FRONTEND_COMPILERINVOCATION_H
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/DependencyOutputOptions.h b/contrib/llvm-project/clang/include/clang/Frontend/DependencyOutputOptions.h
index 78a2841d1e10..d92a87d78d7c 100644
--- a/contrib/llvm-project/clang/include/clang/Frontend/DependencyOutputOptions.h
+++ b/contrib/llvm-project/clang/include/clang/Frontend/DependencyOutputOptions.h
@@ -9,6 +9,7 @@
#ifndef LLVM_CLANG_FRONTEND_DEPENDENCYOUTPUTOPTIONS_H
#define LLVM_CLANG_FRONTEND_DEPENDENCYOUTPUTOPTIONS_H
+#include "clang/Basic/HeaderInclude.h"
#include <string>
#include <vector>
@@ -32,18 +33,30 @@ enum ExtraDepKind {
/// file generation.
class DependencyOutputOptions {
public:
+ LLVM_PREFERRED_TYPE(bool)
unsigned IncludeSystemHeaders : 1; ///< Include system header dependencies.
+ LLVM_PREFERRED_TYPE(bool)
unsigned ShowHeaderIncludes : 1; ///< Show header inclusions (-H).
+ LLVM_PREFERRED_TYPE(bool)
unsigned UsePhonyTargets : 1; ///< Include phony targets for each
/// dependency, which can avoid some 'make'
/// problems.
+ LLVM_PREFERRED_TYPE(bool)
unsigned AddMissingHeaderDeps : 1; ///< Add missing headers to dependency list
+ LLVM_PREFERRED_TYPE(bool)
unsigned IncludeModuleFiles : 1; ///< Include module file dependencies.
+ LLVM_PREFERRED_TYPE(bool)
unsigned ShowSkippedHeaderIncludes : 1; ///< With ShowHeaderIncludes, show
/// also includes that were skipped
/// due to the "include guard
/// optimization" or #pragma once.
+ /// The format of header information.
+ HeaderIncludeFormatKind HeaderIncludeFormat = HIFMT_Textual;
+
+ /// Determine whether header information should be filtered.
+ HeaderIncludeFilteringKind HeaderIncludeFiltering = HIFIL_None;
+
/// Destination of cl.exe style /showIncludes info.
ShowIncludesDestination ShowIncludesDest = ShowIncludesDestination::None;
@@ -67,9 +80,6 @@ public:
/// target.
std::vector<std::pair<std::string, ExtraDepKind>> ExtraDeps;
- /// In /showIncludes mode, pretend the main TU is a header with this name.
- std::string ShowIncludesPretendHeader;
-
/// The file to write GraphViz-formatted header dependencies to.
std::string DOTOutputFile;
@@ -80,7 +90,8 @@ public:
DependencyOutputOptions()
: IncludeSystemHeaders(0), ShowHeaderIncludes(0), UsePhonyTargets(0),
AddMissingHeaderDeps(0), IncludeModuleFiles(0),
- ShowSkippedHeaderIncludes(0) {}
+ ShowSkippedHeaderIncludes(0), HeaderIncludeFormat(HIFMT_Textual),
+ HeaderIncludeFiltering(HIFIL_None) {}
};
} // end namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/FrontendAction.h b/contrib/llvm-project/clang/include/clang/Frontend/FrontendAction.h
index dfefddfb4527..039f6f247b6d 100644
--- a/contrib/llvm-project/clang/include/clang/Frontend/FrontendAction.h
+++ b/contrib/llvm-project/clang/include/clang/Frontend/FrontendAction.h
@@ -270,17 +270,18 @@ public:
const std::vector<std::string> &arg) = 0;
enum ActionType {
- Cmdline, ///< Action is determined by the cc1 command-line
- ReplaceAction, ///< Replace the main action
- AddBeforeMainAction, ///< Execute the action before the main action
- AddAfterMainAction ///< Execute the action after the main action
+ CmdlineBeforeMainAction, ///< Execute the action before the main action if
+ ///< on the command line
+ CmdlineAfterMainAction, ///< Execute the action after the main action if on
+ ///< the command line
+ ReplaceAction, ///< Replace the main action
+ AddBeforeMainAction, ///< Execute the action before the main action
+ AddAfterMainAction ///< Execute the action after the main action
};
/// Get the action type for this plugin
///
- /// \return The action type. If the type is Cmdline then by default the
- /// plugin does nothing and what it does is determined by the cc1
- /// command-line.
- virtual ActionType getActionType() { return Cmdline; }
+ /// \return The action type. By default we use CmdlineAfterMainAction.
+ virtual ActionType getActionType() { return CmdlineAfterMainAction; }
};
/// Abstract base class to use for preprocessor-based frontend actions.
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/FrontendActions.h b/contrib/llvm-project/clang/include/clang/Frontend/FrontendActions.h
index ff8d4417eaa4..fcce31ac0590 100644
--- a/contrib/llvm-project/clang/include/clang/Frontend/FrontendActions.h
+++ b/contrib/llvm-project/clang/include/clang/Frontend/FrontendActions.h
@@ -10,14 +10,12 @@
#define LLVM_CLANG_FRONTEND_FRONTENDACTIONS_H
#include "clang/Frontend/FrontendAction.h"
+#include <memory>
#include <string>
#include <vector>
namespace clang {
-class Module;
-class FileEntry;
-
//===----------------------------------------------------------------------===//
// Custom Consumer Actions
//===----------------------------------------------------------------------===//
@@ -153,17 +151,16 @@ class GenerateModuleInterfaceAction : public GenerateModuleAction {
private:
bool BeginSourceFileAction(CompilerInstance &CI) override;
+ std::unique_ptr<ASTConsumer> CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) override;
+
std::unique_ptr<raw_pwrite_stream>
CreateOutputFile(CompilerInstance &CI, StringRef InFile) override;
};
-class GenerateHeaderModuleAction : public GenerateModuleAction {
- /// The synthesized module input buffer for the current compilation.
- std::unique_ptr<llvm::MemoryBuffer> Buffer;
- std::vector<std::string> ModuleHeaders;
+class GenerateHeaderUnitAction : public GenerateModuleAction {
private:
- bool PrepareToExecuteAction(CompilerInstance &CI) override;
bool BeginSourceFileAction(CompilerInstance &CI) override;
std::unique_ptr<raw_pwrite_stream>
@@ -183,6 +180,9 @@ public:
/// Dump information about the given module file, to be used for
/// basic debugging and discovery.
class DumpModuleInfoAction : public ASTFrontendAction {
+ // Allow other tools (ex lldb) to direct output for their use.
+ std::shared_ptr<llvm::raw_ostream> OutputStream;
+
protected:
std::unique_ptr<ASTConsumer> CreateASTConsumer(CompilerInstance &CI,
StringRef InFile) override;
@@ -190,6 +190,9 @@ protected:
void ExecuteAction() override;
public:
+ DumpModuleInfoAction() = default;
+ explicit DumpModuleInfoAction(std::shared_ptr<llvm::raw_ostream> Out)
+ : OutputStream(Out) {}
bool hasPCHSupport() const override { return false; }
bool hasASTFileSupport() const override { return true; }
bool hasIRSupport() const override { return false; }
@@ -299,6 +302,15 @@ protected:
bool hasPCHSupport() const override { return true; }
};
+class GetDependenciesByModuleNameAction : public PreprocessOnlyAction {
+ StringRef ModuleName;
+ void ExecuteAction() override;
+
+public:
+ GetDependenciesByModuleNameAction(StringRef ModuleName)
+ : ModuleName(ModuleName) {}
+};
+
} // end namespace clang
#endif
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/FrontendOptions.h b/contrib/llvm-project/clang/include/clang/Frontend/FrontendOptions.h
index 15c905d712a3..53a8681cfdbb 100644
--- a/contrib/llvm-project/clang/include/clang/Frontend/FrontendOptions.h
+++ b/contrib/llvm-project/clang/include/clang/Frontend/FrontendOptions.h
@@ -19,6 +19,7 @@
#include <cassert>
#include <map>
#include <memory>
+#include <optional>
#include <string>
#include <vector>
@@ -75,6 +76,9 @@ enum ActionKind {
/// Emit a .o file.
EmitObj,
+ // Extract API information
+ ExtractAPI,
+
/// Parse and apply any fixits to the source.
FixIt,
@@ -84,8 +88,8 @@ enum ActionKind {
/// Generate pre-compiled module from a C++ module interface file.
GenerateModuleInterface,
- /// Generate pre-compiled module from a set of header files.
- GenerateHeaderModule,
+ /// Generate a C++20 header unit module from a header file.
+ GenerateHeaderUnit,
/// Generate pre-compiled header.
GeneratePCH,
@@ -143,11 +147,6 @@ enum ActionKind {
/// The kind of a file that we've been handed as an input.
class InputKind {
-private:
- Language Lang;
- unsigned Fmt : 3;
- unsigned Preprocessed : 1;
-
public:
/// The input file format.
enum Format {
@@ -156,13 +155,41 @@ public:
Precompiled
};
+ // If we are building a header unit, what kind it is; this affects whether
+ // we look for the file in the user or system include search paths before
+ // flagging a missing input.
+ enum HeaderUnitKind {
+ HeaderUnit_None,
+ HeaderUnit_User,
+ HeaderUnit_System,
+ HeaderUnit_Abs
+ };
+
+private:
+ Language Lang;
+ LLVM_PREFERRED_TYPE(Format)
+ unsigned Fmt : 3;
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned Preprocessed : 1;
+ LLVM_PREFERRED_TYPE(HeaderUnitKind)
+ unsigned HeaderUnit : 3;
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned IsHeader : 1;
+
+public:
constexpr InputKind(Language L = Language::Unknown, Format F = Source,
- bool PP = false)
- : Lang(L), Fmt(F), Preprocessed(PP) {}
+ bool PP = false, HeaderUnitKind HU = HeaderUnit_None,
+ bool HD = false)
+ : Lang(L), Fmt(F), Preprocessed(PP), HeaderUnit(HU), IsHeader(HD) {}
Language getLanguage() const { return static_cast<Language>(Lang); }
Format getFormat() const { return static_cast<Format>(Fmt); }
+ HeaderUnitKind getHeaderUnitKind() const {
+ return static_cast<HeaderUnitKind>(HeaderUnit);
+ }
bool isPreprocessed() const { return Preprocessed; }
+ bool isHeader() const { return IsHeader; }
+ bool isHeaderUnit() const { return HeaderUnit != HeaderUnit_None; }
/// Is the input kind fully-unknown?
bool isUnknown() const { return Lang == Language::Unknown && Fmt == Source; }
@@ -173,11 +200,23 @@ public:
}
InputKind getPreprocessed() const {
- return InputKind(getLanguage(), getFormat(), true);
+ return InputKind(getLanguage(), getFormat(), true, getHeaderUnitKind(),
+ isHeader());
+ }
+
+ InputKind getHeader() const {
+ return InputKind(getLanguage(), getFormat(), isPreprocessed(),
+ getHeaderUnitKind(), true);
+ }
+
+ InputKind withHeaderUnit(HeaderUnitKind HU) const {
+ return InputKind(getLanguage(), getFormat(), isPreprocessed(), HU,
+ isHeader());
}
InputKind withFormat(Format F) const {
- return InputKind(getLanguage(), F, isPreprocessed());
+ return InputKind(getLanguage(), F, isPreprocessed(), getHeaderUnitKind(),
+ isHeader());
}
};
@@ -189,7 +228,7 @@ class FrontendInputFile {
/// The input, if it comes from a buffer rather than a file. This object
/// does not own the buffer, and the caller is responsible for ensuring
/// that it outlives any users.
- llvm::Optional<llvm::MemoryBufferRef> Buffer;
+ std::optional<llvm::MemoryBufferRef> Buffer;
/// The kind of input, e.g., C source, AST file, LLVM IR.
InputKind Kind;
@@ -208,10 +247,14 @@ public:
InputKind getKind() const { return Kind; }
bool isSystem() const { return IsSystem; }
- bool isEmpty() const { return File.empty() && Buffer == None; }
+ bool isEmpty() const { return File.empty() && Buffer == std::nullopt; }
bool isFile() const { return !isBuffer(); }
- bool isBuffer() const { return Buffer != None; }
+ bool isBuffer() const { return Buffer != std::nullopt; }
bool isPreprocessed() const { return Kind.isPreprocessed(); }
+ bool isHeader() const { return Kind.isHeader(); }
+ InputKind::HeaderUnitKind getHeaderUnitKind() const {
+ return Kind.getHeaderUnitKind();
+ }
StringRef getFile() const {
assert(isFile());
@@ -228,82 +271,118 @@ public:
class FrontendOptions {
public:
/// Disable memory freeing on exit.
+ LLVM_PREFERRED_TYPE(bool)
unsigned DisableFree : 1;
/// When generating PCH files, instruct the AST writer to create relocatable
/// PCH files.
+ LLVM_PREFERRED_TYPE(bool)
unsigned RelocatablePCH : 1;
/// Show the -help text.
+ LLVM_PREFERRED_TYPE(bool)
unsigned ShowHelp : 1;
/// Show frontend performance metrics and statistics.
+ LLVM_PREFERRED_TYPE(bool)
unsigned ShowStats : 1;
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned AppendStats : 1;
+
/// print the supported cpus for the current target
+ LLVM_PREFERRED_TYPE(bool)
unsigned PrintSupportedCPUs : 1;
- /// Output time trace profile.
- unsigned TimeTrace : 1;
+ /// Print the supported extensions for the current target.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned PrintSupportedExtensions : 1;
/// Show the -version text.
+ LLVM_PREFERRED_TYPE(bool)
unsigned ShowVersion : 1;
/// Apply fixes even if there are unfixable errors.
+ LLVM_PREFERRED_TYPE(bool)
unsigned FixWhatYouCan : 1;
/// Apply fixes only for warnings.
+ LLVM_PREFERRED_TYPE(bool)
unsigned FixOnlyWarnings : 1;
/// Apply fixes and recompile.
+ LLVM_PREFERRED_TYPE(bool)
unsigned FixAndRecompile : 1;
/// Apply fixes to temporary files.
+ LLVM_PREFERRED_TYPE(bool)
unsigned FixToTemporaries : 1;
/// Emit ARC errors even if the migrator can fix them.
+ LLVM_PREFERRED_TYPE(bool)
unsigned ARCMTMigrateEmitARCErrors : 1;
/// Skip over function bodies to speed up parsing in cases you do not need
/// them (e.g. with code completion).
+ LLVM_PREFERRED_TYPE(bool)
unsigned SkipFunctionBodies : 1;
/// Whether we can use the global module index if available.
+ LLVM_PREFERRED_TYPE(bool)
unsigned UseGlobalModuleIndex : 1;
/// Whether we can generate the global module index if needed.
+ LLVM_PREFERRED_TYPE(bool)
unsigned GenerateGlobalModuleIndex : 1;
/// Whether we include declaration dumps in AST dumps.
+ LLVM_PREFERRED_TYPE(bool)
unsigned ASTDumpDecls : 1;
/// Whether we deserialize all decls when forming AST dumps.
+ LLVM_PREFERRED_TYPE(bool)
unsigned ASTDumpAll : 1;
/// Whether we include lookup table dumps in AST dumps.
+ LLVM_PREFERRED_TYPE(bool)
unsigned ASTDumpLookups : 1;
/// Whether we include declaration type dumps in AST dumps.
+ LLVM_PREFERRED_TYPE(bool)
unsigned ASTDumpDeclTypes : 1;
/// Whether we are performing an implicit module build.
+ LLVM_PREFERRED_TYPE(bool)
unsigned BuildingImplicitModule : 1;
+ /// Whether to use a filesystem lock when building implicit modules.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned BuildingImplicitModuleUsesLock : 1;
+
/// Whether we should embed all used files into the PCM file.
+ LLVM_PREFERRED_TYPE(bool)
unsigned ModulesEmbedAllFiles : 1;
/// Whether timestamps should be written to the produced PCH file.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IncludeTimestamps : 1;
/// Should a temporary file be used during compilation.
+ LLVM_PREFERRED_TYPE(bool)
unsigned UseTemporary : 1;
/// When using -emit-module, treat the modulemap as a system module.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsSystemModule : 1;
/// Output (and read) PCM files regardless of compiler errors.
+ LLVM_PREFERRED_TYPE(bool)
unsigned AllowPCMWithCompilerErrors : 1;
+ /// Whether to share the FileManager when building modules.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned ModulesShareFileManager : 1;
+
CodeCompleteOptions CodeCompleteOpts;
/// Specifies the output format of the AST.
@@ -370,7 +449,7 @@ public:
ObjCMT_MigrateDecls | ObjCMT_PropertyDotSyntax)
};
unsigned ObjCMTAction = ObjCMT_None;
- std::string ObjCMTWhiteListPath;
+ std::string ObjCMTAllowListPath;
std::string MTMigrateDir;
std::string ARCMTMigrateReportOut;
@@ -404,6 +483,21 @@ public:
/// The name of the action to run when using a plugin action.
std::string ActionName;
+ // Currently this is only used as part of the `-extract-api` action.
+ /// The name of the product the input files belong too.
+ std::string ProductName;
+
+ // Currently this is only used as part of the `-extract-api` action.
+ // A comma seperated list of files providing a list of APIs to
+ // ignore when extracting documentation.
+ std::vector<std::string> ExtractAPIIgnoresFileList;
+
+ // Currently this is only used as part of the `-emit-symbol-graph`
+ // action.
+ // Location of output directory where symbol graph information would
+ // be dumped
+ std::string SymbolGraphOutputDir;
+
/// Args to pass to the plugins
std::map<std::string, std::vector<std::string>> PluginArgs;
@@ -441,10 +535,10 @@ public:
std::string AuxTriple;
/// Auxiliary target CPU for CUDA/HIP compilation.
- Optional<std::string> AuxTargetCPU;
+ std::optional<std::string> AuxTargetCPU;
/// Auxiliary target features for CUDA/HIP compilation.
- Optional<std::vector<std::string>> AuxTargetFeatures;
+ std::optional<std::vector<std::string>> AuxTargetFeatures;
/// Filename to write statistics to.
std::string StatsFile;
@@ -452,17 +546,21 @@ public:
/// Minimum time granularity (in microseconds) traced by time profiler.
unsigned TimeTraceGranularity;
+ /// Path which stores the output files for -ftime-trace
+ std::string TimeTracePath;
+
public:
FrontendOptions()
: DisableFree(false), RelocatablePCH(false), ShowHelp(false),
- ShowStats(false), TimeTrace(false), ShowVersion(false),
+ ShowStats(false), AppendStats(false), ShowVersion(false),
FixWhatYouCan(false), FixOnlyWarnings(false), FixAndRecompile(false),
FixToTemporaries(false), ARCMTMigrateEmitARCErrors(false),
SkipFunctionBodies(false), UseGlobalModuleIndex(true),
GenerateGlobalModuleIndex(true), ASTDumpDecls(false),
ASTDumpLookups(false), BuildingImplicitModule(false),
- ModulesEmbedAllFiles(false), IncludeTimestamps(true),
- UseTemporary(true), AllowPCMWithCompilerErrors(false),
+ BuildingImplicitModuleUsesLock(true), ModulesEmbedAllFiles(false),
+ IncludeTimestamps(true), UseTemporary(true),
+ AllowPCMWithCompilerErrors(false), ModulesShareFileManager(true),
TimeTraceGranularity(500) {}
/// getInputKindForExtension - Return the appropriate input kind for a file
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/LayoutOverrideSource.h b/contrib/llvm-project/clang/include/clang/Frontend/LayoutOverrideSource.h
index ea1611470a76..c6e2d7311183 100644
--- a/contrib/llvm-project/clang/include/clang/Frontend/LayoutOverrideSource.h
+++ b/contrib/llvm-project/clang/include/clang/Frontend/LayoutOverrideSource.h
@@ -30,6 +30,12 @@ namespace clang {
/// The alignment of the record.
uint64_t Align;
+ /// The offsets of non-virtual base classes in the record.
+ SmallVector<CharUnits, 8> BaseOffsets;
+
+ /// The offsets of virtual base classes in the record.
+ SmallVector<CharUnits, 8> VBaseOffsets;
+
/// The offsets of the fields, in source order.
SmallVector<uint64_t, 8> FieldOffsets;
};
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/MigratorOptions.h b/contrib/llvm-project/clang/include/clang/Frontend/MigratorOptions.h
index cf50ffcf0c4f..da899643f0b4 100644
--- a/contrib/llvm-project/clang/include/clang/Frontend/MigratorOptions.h
+++ b/contrib/llvm-project/clang/include/clang/Frontend/MigratorOptions.h
@@ -14,11 +14,15 @@
#ifndef LLVM_CLANG_FRONTEND_MIGRATOROPTIONS_H
#define LLVM_CLANG_FRONTEND_MIGRATOROPTIONS_H
+#include "llvm/Support/Compiler.h"
+
namespace clang {
class MigratorOptions {
public:
+ LLVM_PREFERRED_TYPE(bool)
unsigned NoNSAllocReallocError : 1;
+ LLVM_PREFERRED_TYPE(bool)
unsigned NoFinalizeRemoval : 1;
MigratorOptions() {
NoNSAllocReallocError = 0;
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/MultiplexConsumer.h b/contrib/llvm-project/clang/include/clang/Frontend/MultiplexConsumer.h
index 3054e1842811..7f8d2858b386 100644
--- a/contrib/llvm-project/clang/include/clang/Frontend/MultiplexConsumer.h
+++ b/contrib/llvm-project/clang/include/clang/Frontend/MultiplexConsumer.h
@@ -40,6 +40,8 @@ public:
void MacroDefinitionRead(serialization::PreprocessedEntityID,
MacroDefinitionRecord *MD) override;
void ModuleRead(serialization::SubmoduleID ID, Module *Mod) override;
+ void ModuleImportRead(serialization::SubmoduleID ID,
+ SourceLocation ImportLoc) override;
private:
std::vector<ASTDeserializationListener *> Listeners;
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/PCHContainerOperations.h b/contrib/llvm-project/clang/include/clang/Frontend/PCHContainerOperations.h
index fa977a63f32e..098d32ec3869 100644
--- a/contrib/llvm-project/clang/include/clang/Frontend/PCHContainerOperations.h
+++ b/contrib/llvm-project/clang/include/clang/Frontend/PCHContainerOperations.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_PCH_CONTAINER_OPERATIONS_H
-#define LLVM_CLANG_PCH_CONTAINER_OPERATIONS_H
+#ifndef LLVM_CLANG_FRONTEND_PCHCONTAINEROPERATIONS_H
+#define LLVM_CLANG_FRONTEND_PCHCONTAINEROPERATIONS_H
#include "clang/Serialization/PCHContainerOperations.h"
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/PrecompiledPreamble.h b/contrib/llvm-project/clang/include/clang/Frontend/PrecompiledPreamble.h
index bb7fd97fe5df..798870bf24fe 100644
--- a/contrib/llvm-project/clang/include/clang/Frontend/PrecompiledPreamble.h
+++ b/contrib/llvm-project/clang/include/clang/Frontend/PrecompiledPreamble.h
@@ -10,14 +10,13 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_FRONTEND_PRECOMPILED_PREAMBLE_H
-#define LLVM_CLANG_FRONTEND_PRECOMPILED_PREAMBLE_H
+#ifndef LLVM_CLANG_FRONTEND_PRECOMPILEDPREAMBLE_H
+#define LLVM_CLANG_FRONTEND_PRECOMPILEDPREAMBLE_H
#include "clang/Lex/Lexer.h"
#include "clang/Lex/Preprocessor.h"
#include "llvm/ADT/IntrusiveRefCntPtr.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/Support/AlignOf.h"
#include "llvm/Support/MD5.h"
#include <cstddef>
#include <memory>
@@ -76,6 +75,10 @@ public:
/// \param StoreInMemory Store PCH in memory. If false, PCH will be stored in
/// a temporary file.
///
+ /// \param StoragePath The path to a directory, in which to create a temporary
+ /// file to store PCH in. If empty, the default system temporary directory is
+ /// used. This parameter is ignored if \p StoreInMemory is true.
+ ///
/// \param Callbacks A set of callbacks to be executed when building
/// the preamble.
static llvm::ErrorOr<PrecompiledPreamble>
@@ -84,10 +87,12 @@ public:
DiagnosticsEngine &Diagnostics,
IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS,
std::shared_ptr<PCHContainerOperations> PCHContainerOps,
- bool StoreInMemory, PreambleCallbacks &Callbacks);
+ bool StoreInMemory, StringRef StoragePath,
+ PreambleCallbacks &Callbacks);
- PrecompiledPreamble(PrecompiledPreamble &&) = default;
- PrecompiledPreamble &operator=(PrecompiledPreamble &&) = default;
+ PrecompiledPreamble(PrecompiledPreamble &&);
+ PrecompiledPreamble &operator=(PrecompiledPreamble &&);
+ ~PrecompiledPreamble();
/// PreambleBounds used to build the preamble.
PreambleBounds getBounds() const;
@@ -128,79 +133,12 @@ public:
llvm::MemoryBuffer *MainFileBuffer) const;
private:
- PrecompiledPreamble(PCHStorage Storage, std::vector<char> PreambleBytes,
+ PrecompiledPreamble(std::unique_ptr<PCHStorage> Storage,
+ std::vector<char> PreambleBytes,
bool PreambleEndsAtStartOfLine,
llvm::StringMap<PreambleFileHash> FilesInPreamble,
llvm::StringSet<> MissingFiles);
- /// A temp file that would be deleted on destructor call. If destructor is not
- /// called for any reason, the file will be deleted at static objects'
- /// destruction.
- /// An assertion will fire if two TempPCHFiles are created with the same name,
- /// so it's not intended to be used outside preamble-handling.
- class TempPCHFile {
- public:
- // A main method used to construct TempPCHFile.
- static llvm::ErrorOr<TempPCHFile> CreateNewPreamblePCHFile();
-
- private:
- TempPCHFile(std::string FilePath);
-
- public:
- TempPCHFile(TempPCHFile &&Other);
- TempPCHFile &operator=(TempPCHFile &&Other);
-
- TempPCHFile(const TempPCHFile &) = delete;
- ~TempPCHFile();
-
- /// A path where temporary file is stored.
- llvm::StringRef getFilePath() const;
-
- private:
- void RemoveFileIfPresent();
-
- private:
- llvm::Optional<std::string> FilePath;
- };
-
- class InMemoryPreamble {
- public:
- std::string Data;
- };
-
- class PCHStorage {
- public:
- enum class Kind { Empty, InMemory, TempFile };
-
- PCHStorage() = default;
- PCHStorage(TempPCHFile File);
- PCHStorage(InMemoryPreamble Memory);
-
- PCHStorage(const PCHStorage &) = delete;
- PCHStorage &operator=(const PCHStorage &) = delete;
-
- PCHStorage(PCHStorage &&Other);
- PCHStorage &operator=(PCHStorage &&Other);
-
- ~PCHStorage();
-
- Kind getKind() const;
-
- TempPCHFile &asFile();
- const TempPCHFile &asFile() const;
-
- InMemoryPreamble &asMemory();
- const InMemoryPreamble &asMemory() const;
-
- private:
- void destroy();
- void setEmpty();
-
- private:
- Kind StorageKind = Kind::Empty;
- llvm::AlignedCharArrayUnion<TempPCHFile, InMemoryPreamble> Storage = {};
- };
-
/// Data used to determine if a file used in the preamble has been changed.
struct PreambleFileHash {
/// All files have size set.
@@ -245,7 +183,7 @@ private:
IntrusiveRefCntPtr<llvm::vfs::FileSystem> &VFS);
/// Manages the memory buffer or temporary file that stores the PCH.
- PCHStorage Storage;
+ std::unique_ptr<PCHStorage> Storage;
/// Keeps track of the files that were used when computing the
/// preamble, with both their buffer size and their modification time.
///
@@ -274,7 +212,7 @@ class PreambleCallbacks {
public:
virtual ~PreambleCallbacks() = default;
- /// Called before FrontendAction::BeginSourceFile.
+ /// Called before FrontendAction::Execute.
/// Can be used to store references to various CompilerInstance fields
/// (e.g. SourceManager) that may be interesting to the consumers of other
/// callbacks.
@@ -291,7 +229,7 @@ public:
/// used instead, but having only this method allows a simpler API.
virtual void HandleTopLevelDecl(DeclGroupRef DG);
/// Creates wrapper class for PPCallbacks so we can also process information
- /// about includes that are inside of a preamble
+ /// about includes that are inside of a preamble. Called after BeforeExecute.
virtual std::unique_ptr<PPCallbacks> createPPCallbacks();
/// The returned CommentHandler will be added to the preprocessor if not null.
virtual CommentHandler *getCommentHandler();
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/PreprocessorOutputOptions.h b/contrib/llvm-project/clang/include/clang/Frontend/PreprocessorOutputOptions.h
index 72e5ad1137fb..6e19cae33cf2 100644
--- a/contrib/llvm-project/clang/include/clang/Frontend/PreprocessorOutputOptions.h
+++ b/contrib/llvm-project/clang/include/clang/Frontend/PreprocessorOutputOptions.h
@@ -9,21 +9,38 @@
#ifndef LLVM_CLANG_FRONTEND_PREPROCESSOROUTPUTOPTIONS_H
#define LLVM_CLANG_FRONTEND_PREPROCESSOROUTPUTOPTIONS_H
+#include <llvm/Support/Compiler.h>
+
namespace clang {
/// PreprocessorOutputOptions - Options for controlling the C preprocessor
/// output (e.g., -E).
class PreprocessorOutputOptions {
public:
+ LLVM_PREFERRED_TYPE(bool)
unsigned ShowCPP : 1; ///< Print normal preprocessed output.
+ LLVM_PREFERRED_TYPE(bool)
unsigned ShowComments : 1; ///< Show comments.
+ LLVM_PREFERRED_TYPE(bool)
unsigned ShowLineMarkers : 1; ///< Show \#line markers.
+ LLVM_PREFERRED_TYPE(bool)
unsigned UseLineDirectives : 1; ///< Use \#line instead of GCC-style \# N.
+ LLVM_PREFERRED_TYPE(bool)
unsigned ShowMacroComments : 1; ///< Show comments, even in macros.
+ LLVM_PREFERRED_TYPE(bool)
unsigned ShowMacros : 1; ///< Print macro definitions.
+ LLVM_PREFERRED_TYPE(bool)
unsigned ShowIncludeDirectives : 1; ///< Print includes, imports etc. within preprocessed output.
+ LLVM_PREFERRED_TYPE(bool)
unsigned RewriteIncludes : 1; ///< Preprocess include directives only.
+ LLVM_PREFERRED_TYPE(bool)
unsigned RewriteImports : 1; ///< Include contents of transitively-imported modules.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned MinimizeWhitespace : 1; ///< Ignore whitespace from input.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned DirectivesOnly : 1; ///< Process directives but do not expand macros.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned KeepSystemIncludes : 1; ///< Do not expand system headers.
public:
PreprocessorOutputOptions() {
@@ -36,6 +53,9 @@ public:
ShowIncludeDirectives = 0;
RewriteIncludes = 0;
RewriteImports = 0;
+ MinimizeWhitespace = 0;
+ DirectivesOnly = 0;
+ KeepSystemIncludes = 0;
}
};
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/SARIFDiagnostic.h b/contrib/llvm-project/clang/include/clang/Frontend/SARIFDiagnostic.h
new file mode 100644
index 000000000000..ec1d0b8e6a7c
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Frontend/SARIFDiagnostic.h
@@ -0,0 +1,74 @@
+//===--- SARIFDiagnostic.h - SARIF Diagnostic Formatting -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This is a utility class that provides support for constructing a SARIF object
+// containing diagnostics.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_FRONTEND_SARIFDIAGNOSTIC_H
+#define LLVM_CLANG_FRONTEND_SARIFDIAGNOSTIC_H
+
+#include "clang/Basic/Sarif.h"
+#include "clang/Frontend/DiagnosticRenderer.h"
+#include "llvm/ADT/StringRef.h"
+
+namespace clang {
+
+class SARIFDiagnostic : public DiagnosticRenderer {
+public:
+ SARIFDiagnostic(raw_ostream &OS, const LangOptions &LangOpts,
+ DiagnosticOptions *DiagOpts, SarifDocumentWriter *Writer);
+
+ ~SARIFDiagnostic() = default;
+
+ SARIFDiagnostic &operator=(const SARIFDiagnostic &&) = delete;
+ SARIFDiagnostic(SARIFDiagnostic &&) = delete;
+ SARIFDiagnostic &operator=(const SARIFDiagnostic &) = delete;
+ SARIFDiagnostic(const SARIFDiagnostic &) = delete;
+
+protected:
+ void emitDiagnosticMessage(FullSourceLoc Loc, PresumedLoc PLoc,
+ DiagnosticsEngine::Level Level, StringRef Message,
+ ArrayRef<CharSourceRange> Ranges,
+ DiagOrStoredDiag D) override;
+
+ void emitDiagnosticLoc(FullSourceLoc Loc, PresumedLoc PLoc,
+ DiagnosticsEngine::Level Level,
+ ArrayRef<CharSourceRange> Ranges) override;
+
+ void emitCodeContext(FullSourceLoc Loc, DiagnosticsEngine::Level Level,
+ SmallVectorImpl<CharSourceRange> &Ranges,
+ ArrayRef<FixItHint> Hints) override {}
+
+ void emitIncludeLocation(FullSourceLoc Loc, PresumedLoc PLoc) override;
+
+ void emitImportLocation(FullSourceLoc Loc, PresumedLoc PLoc,
+ StringRef ModuleName) override;
+
+ void emitBuildingModuleLocation(FullSourceLoc Loc, PresumedLoc PLoc,
+ StringRef ModuleName) override;
+
+private:
+ // Shared between SARIFDiagnosticPrinter and this renderer.
+ SarifDocumentWriter *Writer;
+
+ SarifResult addLocationToResult(SarifResult Result, FullSourceLoc Loc,
+ PresumedLoc PLoc,
+ ArrayRef<CharSourceRange> Ranges,
+ const Diagnostic &Diag);
+
+ SarifRule addDiagnosticLevelToRule(SarifRule Rule,
+ DiagnosticsEngine::Level Level);
+
+ llvm::StringRef emitFilename(StringRef Filename, const SourceManager &SM);
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/SARIFDiagnosticPrinter.h b/contrib/llvm-project/clang/include/clang/Frontend/SARIFDiagnosticPrinter.h
new file mode 100644
index 000000000000..f2652833b3c1
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Frontend/SARIFDiagnosticPrinter.h
@@ -0,0 +1,76 @@
+//===-- SARIFDiagnosticPrinter.h - SARIF Diagnostic Client -------*- C++-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This is a concrete diagnostic client, which prints the diagnostics to
+// standard error in SARIF format.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_FRONTEND_SARIFDIAGNOSTICPRINTER_H
+#define LLVM_CLANG_FRONTEND_SARIFDIAGNOSTICPRINTER_H
+
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/Basic/Sarif.h"
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
+#include "llvm/ADT/StringRef.h"
+#include <memory>
+
+namespace clang {
+class DiagnosticOptions;
+class LangOptions;
+class SARIFDiagnostic;
+class SarifDocumentWriter;
+
+class SARIFDiagnosticPrinter : public DiagnosticConsumer {
+public:
+ SARIFDiagnosticPrinter(raw_ostream &OS, DiagnosticOptions *Diags);
+ ~SARIFDiagnosticPrinter() = default;
+
+ SARIFDiagnosticPrinter &operator=(const SARIFDiagnosticPrinter &&) = delete;
+ SARIFDiagnosticPrinter(SARIFDiagnosticPrinter &&) = delete;
+ SARIFDiagnosticPrinter &operator=(const SARIFDiagnosticPrinter &) = delete;
+ SARIFDiagnosticPrinter(const SARIFDiagnosticPrinter &) = delete;
+
+ /// setPrefix - Set the diagnostic printer prefix string, which will be
+ /// printed at the start of any diagnostics. If empty, no prefix string is
+ /// used.
+ void setPrefix(llvm::StringRef Value) { Prefix = Value; }
+
+ bool hasSarifWriter() const { return Writer != nullptr; }
+
+ SarifDocumentWriter &getSarifWriter() const {
+ assert(Writer && "SarifWriter not set!");
+ return *Writer;
+ }
+
+ void setSarifWriter(std::unique_ptr<SarifDocumentWriter> SarifWriter) {
+ Writer = std::move(SarifWriter);
+ }
+
+ void BeginSourceFile(const LangOptions &LO, const Preprocessor *PP) override;
+ void EndSourceFile() override;
+ void HandleDiagnostic(DiagnosticsEngine::Level Level,
+ const Diagnostic &Info) override;
+
+private:
+ raw_ostream &OS;
+ IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts;
+
+ /// Handle to the currently active SARIF diagnostic emitter.
+ std::unique_ptr<SARIFDiagnostic> SARIFDiag;
+
+ /// A string to prefix to error messages.
+ std::string Prefix;
+
+ std::unique_ptr<SarifDocumentWriter> Writer;
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/SerializedDiagnosticPrinter.h b/contrib/llvm-project/clang/include/clang/Frontend/SerializedDiagnosticPrinter.h
index 58954dc6bafa..5586ef65e393 100644
--- a/contrib/llvm-project/clang/include/clang/Frontend/SerializedDiagnosticPrinter.h
+++ b/contrib/llvm-project/clang/include/clang/Frontend/SerializedDiagnosticPrinter.h
@@ -19,7 +19,6 @@ class raw_ostream;
namespace clang {
class DiagnosticConsumer;
-class DiagnosticsEngine;
class DiagnosticOptions;
namespace serialized_diags {
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/SerializedDiagnostics.h b/contrib/llvm-project/clang/include/clang/Frontend/SerializedDiagnostics.h
index 4e67fd13ac5b..6464693c1482 100644
--- a/contrib/llvm-project/clang/include/clang/Frontend/SerializedDiagnostics.h
+++ b/contrib/llvm-project/clang/include/clang/Frontend/SerializedDiagnostics.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_FRONTEND_SERIALIZE_DIAGNOSTICS_H_
-#define LLVM_CLANG_FRONTEND_SERIALIZE_DIAGNOSTICS_H_
+#ifndef LLVM_CLANG_FRONTEND_SERIALIZEDDIAGNOSTICS_H
+#define LLVM_CLANG_FRONTEND_SERIALIZEDDIAGNOSTICS_H
#include "llvm/Bitstream/BitCodes.h"
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/TextDiagnostic.h b/contrib/llvm-project/clang/include/clang/Frontend/TextDiagnostic.h
index a2eec46beccd..7eb0ab0cdc9b 100644
--- a/contrib/llvm-project/clang/include/clang/Frontend/TextDiagnostic.h
+++ b/contrib/llvm-project/clang/include/clang/Frontend/TextDiagnostic.h
@@ -103,7 +103,8 @@ private:
SmallVectorImpl<CharSourceRange> &Ranges,
ArrayRef<FixItHint> Hints);
- void emitSnippet(StringRef SourceLine);
+ void emitSnippet(StringRef SourceLine, unsigned MaxLineNoDisplayWidth,
+ unsigned LineNo);
void emitParseableFixits(ArrayRef<FixItHint> Hints, const SourceManager &SM);
};
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/TextDiagnosticPrinter.h b/contrib/llvm-project/clang/include/clang/Frontend/TextDiagnosticPrinter.h
index ba756fa18c30..2610bde7513a 100644
--- a/contrib/llvm-project/clang/include/clang/Frontend/TextDiagnosticPrinter.h
+++ b/contrib/llvm-project/clang/include/clang/Frontend/TextDiagnosticPrinter.h
@@ -34,6 +34,7 @@ class TextDiagnosticPrinter : public DiagnosticConsumer {
/// A string to prefix to error messages.
std::string Prefix;
+ LLVM_PREFERRED_TYPE(bool)
unsigned OwnsOutputStream : 1;
public:
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/Utils.h b/contrib/llvm-project/clang/include/clang/Frontend/Utils.h
index da2d79af2eba..604e42067a3f 100644
--- a/contrib/llvm-project/clang/include/clang/Frontend/Utils.h
+++ b/contrib/llvm-project/clang/include/clang/Frontend/Utils.h
@@ -22,7 +22,6 @@
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSet.h"
-#include "llvm/Option/OptSpecifier.h"
#include "llvm/Support/FileCollector.h"
#include "llvm/Support/VirtualFileSystem.h"
#include <cstdint>
@@ -32,12 +31,6 @@
#include <utility>
#include <vector>
-namespace llvm {
-
-class Triple;
-
-} // namespace llvm
-
namespace clang {
class ASTReader;
@@ -46,25 +39,18 @@ class CompilerInvocation;
class DiagnosticsEngine;
class ExternalSemaSource;
class FrontendOptions;
-class HeaderSearch;
-class HeaderSearchOptions;
-class LangOptions;
class PCHContainerReader;
class Preprocessor;
class PreprocessorOptions;
class PreprocessorOutputOptions;
-
-/// Apply the header search options to get given HeaderSearch object.
-void ApplyHeaderSearchOptions(HeaderSearch &HS,
- const HeaderSearchOptions &HSOpts,
- const LangOptions &Lang,
- const llvm::Triple &triple);
+class CodeGenOptions;
/// InitializePreprocessor - Initialize the preprocessor getting it and the
/// environment ready to process a single file.
void InitializePreprocessor(Preprocessor &PP, const PreprocessorOptions &PPOpts,
const PCHContainerReader &PCHContainerRdr,
- const FrontendOptions &FEOpts);
+ const FrontendOptions &FEOpts,
+ const CodeGenOptions &CodeGenOpts);
/// DoPrintPreprocessedInput - Implement -E mode.
void DoPrintPreprocessedInput(Preprocessor &PP, raw_ostream *OS,
@@ -123,10 +109,10 @@ public:
void finishedMainFile(DiagnosticsEngine &Diags) override;
- bool needSystemDependencies() final override { return IncludeSystemHeaders; }
+ bool needSystemDependencies() final { return IncludeSystemHeaders; }
bool sawDependency(StringRef Filename, bool FromModule, bool IsSystem,
- bool IsModuleFile, bool IsMissing) final override;
+ bool IsModuleFile, bool IsMissing) final;
protected:
void outputDependencyFile(llvm::raw_ostream &OS);
@@ -205,27 +191,50 @@ IntrusiveRefCntPtr<ExternalSemaSource>
createChainedIncludesSource(CompilerInstance &CI,
IntrusiveRefCntPtr<ExternalSemaSource> &Reader);
-/// createInvocationFromCommandLine - Construct a compiler invocation object for
-/// a command line argument vector.
+/// Optional inputs to createInvocation.
+struct CreateInvocationOptions {
+ /// Receives diagnostics encountered while parsing command-line flags.
+ /// If not provided, these are printed to stderr.
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags = nullptr;
+ /// Used e.g. to probe for system headers locations.
+ /// If not provided, the real filesystem is used.
+ /// FIXME: the driver does perform some non-virtualized IO.
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS = nullptr;
+ /// Whether to attempt to produce a non-null (possibly incorrect) invocation
+ /// if any errors were encountered.
+ /// By default, always return null on errors.
+ bool RecoverOnError = false;
+ /// Allow the driver to probe the filesystem for PCH files.
+ /// This is used to replace -include with -include-pch in the cc1 args.
+ /// FIXME: ProbePrecompiled=true is a poor, historical default.
+ /// It misbehaves if the PCH file is from GCC, has the wrong version, etc.
+ bool ProbePrecompiled = false;
+ /// If set, the target is populated with the cc1 args produced by the driver.
+ /// This may be populated even if createInvocation returns nullptr.
+ std::vector<std::string> *CC1Args = nullptr;
+};
+
+/// Interpret clang arguments in preparation to parse a file.
+///
+/// This simulates a number of steps Clang takes when its driver is invoked:
+/// - choosing actions (e.g compile + link) to run
+/// - probing the system for settings like standard library locations
+/// - spawning a cc1 subprocess to compile code, with more explicit arguments
+/// - in the cc1 process, assembling those arguments into a CompilerInvocation
+/// which is used to configure the parser
///
-/// \param ShouldRecoverOnErrors - whether we should attempt to return a
-/// non-null (and possibly incorrect) CompilerInvocation if any errors were
-/// encountered. When this flag is false, always return null on errors.
+/// This simulation is lossy, e.g. in some situations one driver run would
+/// result in multiple parses. (Multi-arch, CUDA, ...).
+/// This function tries to select a reasonable invocation that tools should use.
///
-/// \param CC1Args - if non-null, will be populated with the args to cc1
-/// expanded from \p Args. May be set even if nullptr is returned.
+/// Args[0] should be the driver name, such as "clang" or "/usr/bin/g++".
+/// Absolute path is preferred - this affects searching for system headers.
///
-/// \return A CompilerInvocation, or nullptr if none was built for the given
-/// argument vector.
-std::unique_ptr<CompilerInvocation> createInvocationFromCommandLine(
- ArrayRef<const char *> Args,
- IntrusiveRefCntPtr<DiagnosticsEngine> Diags =
- IntrusiveRefCntPtr<DiagnosticsEngine>(),
- IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS = nullptr,
- bool ShouldRecoverOnErrors = false,
- std::vector<std::string> *CC1Args = nullptr);
-
-// Frontend timing utils
+/// May return nullptr if an invocation could not be determined.
+/// See CreateInvocationOptions::ShouldRecoverOnErrors to try harder!
+std::unique_ptr<CompilerInvocation>
+createInvocation(ArrayRef<const char *> Args,
+ CreateInvocationOptions Opts = {});
} // namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/Frontend/VerifyDiagnosticConsumer.h b/contrib/llvm-project/clang/include/clang/Frontend/VerifyDiagnosticConsumer.h
index a97cd138d159..ddfae2666c4c 100644
--- a/contrib/llvm-project/clang/include/clang/Frontend/VerifyDiagnosticConsumer.h
+++ b/contrib/llvm-project/clang/include/clang/Frontend/VerifyDiagnosticConsumer.h
@@ -32,156 +32,8 @@ class TextDiagnosticBuffer;
/// VerifyDiagnosticConsumer - Create a diagnostic client which will use
/// markers in the input source to check that all the emitted diagnostics match
-/// those expected.
-///
-/// INVOKING THE DIAGNOSTIC CHECKER:
-///
-/// VerifyDiagnosticConsumer is typically invoked via the "-verify" option to
-/// "clang -cc1". "-verify" is equivalent to "-verify=expected", so all
-/// diagnostics are typically specified with the prefix "expected". For
-/// example:
-///
-/// \code
-/// int A = B; // expected-error {{use of undeclared identifier 'B'}}
-/// \endcode
-///
-/// Custom prefixes can be specified as a comma-separated sequence. Each
-/// prefix must start with a letter and contain only alphanumeric characters,
-/// hyphens, and underscores. For example, given just "-verify=foo,bar",
-/// the above diagnostic would be ignored, but the following diagnostics would
-/// be recognized:
-///
-/// \code
-/// int A = B; // foo-error {{use of undeclared identifier 'B'}}
-/// int C = D; // bar-error {{use of undeclared identifier 'D'}}
-/// \endcode
-///
-/// Multiple occurrences accumulate prefixes. For example,
-/// "-verify -verify=foo,bar -verify=baz" is equivalent to
-/// "-verify=expected,foo,bar,baz".
-///
-/// SPECIFYING DIAGNOSTICS:
-///
-/// Indicating that a line expects an error or a warning is simple. Put a
-/// comment on the line that has the diagnostic, use:
-///
-/// \code
-/// expected-{error,warning,remark,note}
-/// \endcode
-///
-/// to tag if it's an expected error, remark or warning, and place the expected
-/// text between {{ and }} markers. The full text doesn't have to be included,
-/// only enough to ensure that the correct diagnostic was emitted.
-///
-/// Here's an example:
-///
-/// \code
-/// int A = B; // expected-error {{use of undeclared identifier 'B'}}
-/// \endcode
-///
-/// You can place as many diagnostics on one line as you wish. To make the code
-/// more readable, you can use slash-newline to separate out the diagnostics.
-///
-/// Alternatively, it is possible to specify the line on which the diagnostic
-/// should appear by appending "@<line>" to "expected-<type>", for example:
-///
-/// \code
-/// #warning some text
-/// // expected-warning@10 {{some text}}
-/// \endcode
-///
-/// The line number may be absolute (as above), or relative to the current
-/// line by prefixing the number with either '+' or '-'.
-///
-/// If the diagnostic is generated in a separate file, for example in a shared
-/// header file, it may be beneficial to be able to declare the file in which
-/// the diagnostic will appear, rather than placing the expected-* directive in
-/// the actual file itself. This can be done using the following syntax:
-///
-/// \code
-/// // expected-error@path/include.h:15 {{error message}}
-/// \endcode
-///
-/// The path can be absolute or relative and the same search paths will be used
-/// as for #include directives. The line number in an external file may be
-/// substituted with '*' meaning that any line number will match (useful where
-/// the included file is, for example, a system header where the actual line
-/// number may change and is not critical).
-///
-/// As an alternative to specifying a fixed line number, the location of a
-/// diagnostic can instead be indicated by a marker of the form "#<marker>".
-/// Markers are specified by including them in a comment, and then referenced
-/// by appending the marker to the diagnostic with "@#<marker>":
-///
-/// \code
-/// #warning some text // #1
-/// // expected-warning@#1 {{some text}}
-/// \endcode
-///
-/// The name of a marker used in a directive must be unique within the
-/// compilation.
-///
-/// The simple syntax above allows each specification to match exactly one
-/// error. You can use the extended syntax to customize this. The extended
-/// syntax is "expected-<type> <n> {{diag text}}", where \<type> is one of
-/// "error", "warning" or "note", and \<n> is a positive integer. This allows
-/// the diagnostic to appear as many times as specified. Example:
-///
-/// \code
-/// void f(); // expected-note 2 {{previous declaration is here}}
-/// \endcode
-///
-/// Where the diagnostic is expected to occur a minimum number of times, this
-/// can be specified by appending a '+' to the number. Example:
-///
-/// \code
-/// void f(); // expected-note 0+ {{previous declaration is here}}
-/// void g(); // expected-note 1+ {{previous declaration is here}}
-/// \endcode
-///
-/// In the first example, the diagnostic becomes optional, i.e. it will be
-/// swallowed if it occurs, but will not generate an error if it does not
-/// occur. In the second example, the diagnostic must occur at least once.
-/// As a short-hand, "one or more" can be specified simply by '+'. Example:
-///
-/// \code
-/// void g(); // expected-note + {{previous declaration is here}}
-/// \endcode
-///
-/// A range can also be specified by "<n>-<m>". Example:
-///
-/// \code
-/// void f(); // expected-note 0-1 {{previous declaration is here}}
-/// \endcode
-///
-/// In this example, the diagnostic may appear only once, if at all.
-///
-/// Regex matching mode may be selected by appending '-re' to type and
-/// including regexes wrapped in double curly braces in the directive, such as:
-///
-/// \code
-/// expected-error-re {{format specifies type 'wchar_t **' (aka '{{.+}}')}}
-/// \endcode
-///
-/// Examples matching error: "variable has incomplete type 'struct s'"
-///
-/// \code
-/// // expected-error {{variable has incomplete type 'struct s'}}
-/// // expected-error {{variable has incomplete type}}
-///
-/// // expected-error-re {{variable has type 'struct {{.}}'}}
-/// // expected-error-re {{variable has type 'struct {{.*}}'}}
-/// // expected-error-re {{variable has type 'struct {{(.*)}}'}}
-/// // expected-error-re {{variable has type 'struct{{[[:space:]](.*)}}'}}
-/// \endcode
-///
-/// VerifyDiagnosticConsumer expects at least one expected-* directive to
-/// be found inside the source code. If no diagnostics are expected the
-/// following directive can be used to indicate this:
-///
-/// \code
-/// // expected-no-diagnostics
-/// \endcode
+/// those expected. See clang/docs/InternalsManual.rst for details about how to
+/// write tests to verify diagnostics.
///
class VerifyDiagnosticConsumer: public DiagnosticConsumer,
public CommentHandler {
@@ -278,14 +130,15 @@ private:
// These facilities are used for validation in debug builds.
class UnparsedFileStatus {
- llvm::PointerIntPair<const FileEntry *, 1, bool> Data;
+ OptionalFileEntryRef File;
+ bool FoundDirectives;
public:
- UnparsedFileStatus(const FileEntry *File, bool FoundDirectives)
- : Data(File, FoundDirectives) {}
+ UnparsedFileStatus(OptionalFileEntryRef File, bool FoundDirectives)
+ : File(File), FoundDirectives(FoundDirectives) {}
- const FileEntry *getFile() const { return Data.getPointer(); }
- bool foundDirectives() const { return Data.getInt(); }
+ OptionalFileEntryRef getFile() const { return File; }
+ bool foundDirectives() const { return FoundDirectives; }
};
using ParsedFilesMap = llvm::DenseMap<FileID, const FileEntry *>;
diff --git a/contrib/llvm-project/clang/include/clang/Index/IndexSymbol.h b/contrib/llvm-project/clang/include/clang/Index/IndexSymbol.h
index 2ba81986c2fe..59e90fced3dd 100644
--- a/contrib/llvm-project/clang/include/clang/Index/IndexSymbol.h
+++ b/contrib/llvm-project/clang/include/clang/Index/IndexSymbol.h
@@ -57,6 +57,8 @@ enum class SymbolKind : uint8_t {
TemplateTypeParm,
TemplateTemplateParm,
NonTypeTemplateParm,
+
+ Concept, /// C++20 concept.
};
enum class SymbolLanguage : uint8_t {
diff --git a/contrib/llvm-project/clang/include/clang/Index/IndexingOptions.h b/contrib/llvm-project/clang/include/clang/Index/IndexingOptions.h
index d19653848d59..97847dd7d5d8 100644
--- a/contrib/llvm-project/clang/include/clang/Index/IndexingOptions.h
+++ b/contrib/llvm-project/clang/include/clang/Index/IndexingOptions.h
@@ -29,9 +29,9 @@ struct IndexingOptions {
bool IndexFunctionLocals = false;
bool IndexImplicitInstantiation = false;
bool IndexMacros = true;
- // Whether to index macro definitions in the Preprocesor when preprocessor
+ // Whether to index macro definitions in the Preprocessor when preprocessor
// callback is not available (e.g. after parsing has finished). Note that
- // macro references are not available in Proprocessor.
+ // macro references are not available in Preprocessor.
bool IndexMacrosInPreprocessor = false;
// Has no effect if IndexFunctionLocals are false.
bool IndexParametersInDeclarations = false;
diff --git a/contrib/llvm-project/clang/include/clang/IndexSerialization/SerializablePathCollection.h b/contrib/llvm-project/clang/include/clang/IndexSerialization/SerializablePathCollection.h
index 20cf8fbdad96..6337a8119668 100644
--- a/contrib/llvm-project/clang/include/clang/IndexSerialization/SerializablePathCollection.h
+++ b/contrib/llvm-project/clang/include/clang/IndexSerialization/SerializablePathCollection.h
@@ -6,13 +6,12 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_INDEX_SerializablePathCollection_H
-#define LLVM_CLANG_INDEX_SerializablePathCollection_H
+#ifndef LLVM_CLANG_INDEXSERIALIZATION_SERIALIZABLEPATHCOLLECTION_H
+#define LLVM_CLANG_INDEXSERIALIZATION_SERIALIZABLEPATHCOLLECTION_H
#include "clang/Basic/FileManager.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
@@ -111,7 +110,7 @@ public:
/// Stores path to \p FE if it hasn't been stored yet.
/// \returns index to array exposed by getPathsBuffer().
- size_t tryStoreFilePath(const clang::FileEntry &FE);
+ size_t tryStoreFilePath(FileEntryRef FE);
private:
/// Stores \p Path if it is non-empty.
@@ -126,4 +125,4 @@ private:
} // namespace index
} // namespace clang
-#endif // LLVM_CLANG_INDEX_SerializablePathCollection_H
+#endif // LLVM_CLANG_INDEXSERIALIZATION_SERIALIZABLEPATHCOLLECTION_H
diff --git a/contrib/llvm-project/clang/include/clang/Interpreter/CodeCompletion.h b/contrib/llvm-project/clang/include/clang/Interpreter/CodeCompletion.h
new file mode 100644
index 000000000000..c64aa899759f
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Interpreter/CodeCompletion.h
@@ -0,0 +1,49 @@
+//===----- CodeCompletion.h - Code Completion for ClangRepl ---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the classes which performs code completion at the REPL.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_INTERPRETER_CODE_COMPLETION_H
+#define LLVM_CLANG_INTERPRETER_CODE_COMPLETION_H
+#include <string>
+#include <vector>
+
+namespace llvm {
+class StringRef;
+} // namespace llvm
+
+namespace clang {
+class CodeCompletionResult;
+class CompilerInstance;
+
+struct ReplCodeCompleter {
+ ReplCodeCompleter() = default;
+ std::string Prefix;
+
+ /// \param InterpCI [in] The compiler instance that is used to trigger code
+ /// completion
+
+ /// \param Content [in] The string where code completion is triggered.
+
+ /// \param Line [in] The line number of the code completion point.
+
+ /// \param Col [in] The column number of the code completion point.
+
+ /// \param ParentCI [in] The running interpreter compiler instance that
+ /// provides ASTContexts.
+
+ /// \param CCResults [out] The completion results.
+ void codeComplete(CompilerInstance *InterpCI, llvm::StringRef Content,
+ unsigned Line, unsigned Col,
+ const CompilerInstance *ParentCI,
+ std::vector<std::string> &CCResults);
+};
+} // namespace clang
+#endif
diff --git a/contrib/llvm-project/clang/include/clang/Interpreter/Interpreter.h b/contrib/llvm-project/clang/include/clang/Interpreter/Interpreter.h
index 020cbe2db3d0..292fa566ae70 100644
--- a/contrib/llvm-project/clang/include/clang/Interpreter/Interpreter.h
+++ b/contrib/llvm-project/clang/include/clang/Interpreter/Interpreter.h
@@ -14,32 +14,62 @@
#ifndef LLVM_CLANG_INTERPRETER_INTERPRETER_H
#define LLVM_CLANG_INTERPRETER_INTERPRETER_H
+#include "clang/AST/Decl.h"
+#include "clang/AST/GlobalDecl.h"
#include "clang/Interpreter/PartialTranslationUnit.h"
+#include "clang/Interpreter/Value.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/ExecutionEngine/Orc/Shared/ExecutorAddress.h"
#include "llvm/Support/Error.h"
-
#include <memory>
#include <vector>
namespace llvm {
namespace orc {
+class LLJIT;
class ThreadSafeContext;
-}
-class Module;
+} // namespace orc
} // namespace llvm
namespace clang {
class CompilerInstance;
-class DeclGroupRef;
class IncrementalExecutor;
class IncrementalParser;
/// Create a pre-configured \c CompilerInstance for incremental processing.
class IncrementalCompilerBuilder {
public:
+ IncrementalCompilerBuilder() {}
+
+ void SetCompilerArgs(const std::vector<const char *> &Args) {
+ UserArgs = Args;
+ }
+
+ // General C++
+ llvm::Expected<std::unique_ptr<CompilerInstance>> CreateCpp();
+
+ // Offload options
+ void SetOffloadArch(llvm::StringRef Arch) { OffloadArch = Arch; };
+
+ // CUDA specific
+ void SetCudaSDK(llvm::StringRef path) { CudaSDKPath = path; };
+
+ llvm::Expected<std::unique_ptr<CompilerInstance>> CreateCudaHost();
+ llvm::Expected<std::unique_ptr<CompilerInstance>> CreateCudaDevice();
+
+private:
static llvm::Expected<std::unique_ptr<CompilerInstance>>
create(std::vector<const char *> &ClangArgv);
+
+ llvm::Expected<std::unique_ptr<CompilerInstance>> createCuda(bool device);
+
+ std::vector<const char *> UserArgs;
+
+ llvm::StringRef OffloadArch;
+ llvm::StringRef CudaSDKPath;
};
/// Provides top-level interfaces for incremental compilation and execution.
@@ -48,23 +78,73 @@ class Interpreter {
std::unique_ptr<IncrementalParser> IncrParser;
std::unique_ptr<IncrementalExecutor> IncrExecutor;
+ // An optional parser for CUDA offloading
+ std::unique_ptr<IncrementalParser> DeviceParser;
+
Interpreter(std::unique_ptr<CompilerInstance> CI, llvm::Error &Err);
+ llvm::Error CreateExecutor();
+ unsigned InitPTUSize = 0;
+
+ // This member holds the last result of the value printing. It's a class
+ // member because we might want to access it after more inputs. If no value
+ // printing happens, it's in an invalid state.
+ Value LastValue;
+
public:
~Interpreter();
static llvm::Expected<std::unique_ptr<Interpreter>>
create(std::unique_ptr<CompilerInstance> CI);
+ static llvm::Expected<std::unique_ptr<Interpreter>>
+ createWithCUDA(std::unique_ptr<CompilerInstance> CI,
+ std::unique_ptr<CompilerInstance> DCI);
+ const ASTContext &getASTContext() const;
+ ASTContext &getASTContext();
const CompilerInstance *getCompilerInstance() const;
+ CompilerInstance *getCompilerInstance();
+ llvm::Expected<llvm::orc::LLJIT &> getExecutionEngine();
+
llvm::Expected<PartialTranslationUnit &> Parse(llvm::StringRef Code);
llvm::Error Execute(PartialTranslationUnit &T);
- llvm::Error ParseAndExecute(llvm::StringRef Code) {
- auto PTU = Parse(Code);
- if (!PTU)
- return PTU.takeError();
- if (PTU->TheModule)
- return Execute(*PTU);
- return llvm::Error::success();
+ llvm::Error ParseAndExecute(llvm::StringRef Code, Value *V = nullptr);
+ llvm::Expected<llvm::orc::ExecutorAddr> CompileDtorCall(CXXRecordDecl *CXXRD);
+
+ /// Undo N previous incremental inputs.
+ llvm::Error Undo(unsigned N = 1);
+
+ /// Link a dynamic library
+ llvm::Error LoadDynamicLibrary(const char *name);
+
+ /// \returns the \c ExecutorAddr of a \c GlobalDecl. This interface uses
+ /// the CodeGenModule's internal mangling cache to avoid recomputing the
+ /// mangled name.
+ llvm::Expected<llvm::orc::ExecutorAddr> getSymbolAddress(GlobalDecl GD) const;
+
+ /// \returns the \c ExecutorAddr of a given name as written in the IR.
+ llvm::Expected<llvm::orc::ExecutorAddr>
+ getSymbolAddress(llvm::StringRef IRName) const;
+
+ /// \returns the \c ExecutorAddr of a given name as written in the object
+ /// file.
+ llvm::Expected<llvm::orc::ExecutorAddr>
+ getSymbolAddressFromLinkerName(llvm::StringRef LinkerName) const;
+
+ enum InterfaceKind { NoAlloc, WithAlloc, CopyArray, NewTag };
+
+ const llvm::SmallVectorImpl<Expr *> &getValuePrintingInfo() const {
+ return ValuePrintingInfo;
}
+
+ Expr *SynthesizeExpr(Expr *E);
+
+private:
+ size_t getEffectivePTUSize() const;
+
+ bool FindRuntimeInterface();
+
+ llvm::DenseMap<CXXRecordDecl *, llvm::orc::ExecutorAddr> Dtors;
+
+ llvm::SmallVector<Expr *, 4> ValuePrintingInfo;
};
} // namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/Interpreter/Value.h b/contrib/llvm-project/clang/include/clang/Interpreter/Value.h
new file mode 100644
index 000000000000..c380cd91550d
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Interpreter/Value.h
@@ -0,0 +1,208 @@
+//===--- Value.h - Definition of interpreter value --------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Value is a lightweight struct that is used for carrying execution results in
+// clang-repl. It's a special runtime that acts like a messager between compiled
+// code and interpreted code. This makes it possible to exchange interesting
+// information between the compiled & interpreted world.
+//
+// A typical usage is like the below:
+//
+// Value V;
+// Interp.ParseAndExecute("int x = 42;");
+// Interp.ParseAndExecute("x", &V);
+// V.getType(); // <-- Yields a clang::QualType.
+// V.getInt(); // <-- Yields 42.
+//
+// The current design is still highly experimental and nobody should rely on the
+// API being stable because we're hopefully going to make significant changes to
+// it in the relatively near future. For example, Value also intends to be used
+// as an exchange token for JIT support enabling remote execution on the embed
+// devices where the JIT infrastructure cannot fit. To support that we will need
+// to split the memory storage in a different place and perhaps add a resource
+// header is similar to intrinsics headers which have stricter performance
+// constraints.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_INTERPRETER_VALUE_H
+#define LLVM_CLANG_INTERPRETER_VALUE_H
+
+#include "llvm/Support/Compiler.h"
+#include <cstdint>
+
+// NOTE: Since the REPL itself could also include this runtime, extreme caution
+// should be taken when MAKING CHANGES to this file, especially when INCLUDE NEW
+// HEADERS, like <string>, <memory> and etc. (That pulls a large number of
+// tokens and will impact the runtime performance of the REPL)
+
+namespace llvm {
+class raw_ostream;
+
+} // namespace llvm
+
+namespace clang {
+
+class ASTContext;
+class Interpreter;
+class QualType;
+
+#if defined(_WIN32)
+// REPL_EXTERNAL_VISIBILITY are symbols that we need to be able to locate
+// at runtime. On Windows, this requires them to be exported from any of the
+// modules loaded at runtime. Marking them as dllexport achieves this; both
+// for DLLs (that normally export symbols as part of their interface) and for
+// EXEs (that normally don't export anything).
+// For a build with libclang-cpp.dll, this doesn't make any difference - the
+// functions would have been exported anyway. But for cases when these are
+// statically linked into an EXE, it makes sure that they're exported.
+#define REPL_EXTERNAL_VISIBILITY __declspec(dllexport)
+#elif __has_attribute(visibility)
+#if defined(LLVM_BUILD_LLVM_DYLIB) || defined(LLVM_BUILD_SHARED_LIBS)
+#define REPL_EXTERNAL_VISIBILITY __attribute__((visibility("default")))
+#else
+#define REPL_EXTERNAL_VISIBILITY
+#endif
+#else
+#define REPL_EXTERNAL_VISIBILITY
+#endif
+
+#define REPL_BUILTIN_TYPES \
+ X(bool, Bool) \
+ X(char, Char_S) \
+ X(signed char, SChar) \
+ X(unsigned char, UChar) \
+ X(short, Short) \
+ X(unsigned short, UShort) \
+ X(int, Int) \
+ X(unsigned int, UInt) \
+ X(long, Long) \
+ X(unsigned long, ULong) \
+ X(long long, LongLong) \
+ X(unsigned long long, ULongLong) \
+ X(float, Float) \
+ X(double, Double) \
+ X(long double, LongDouble)
+
+class REPL_EXTERNAL_VISIBILITY Value {
+ union Storage {
+#define X(type, name) type m_##name;
+ REPL_BUILTIN_TYPES
+#undef X
+ void *m_Ptr;
+ };
+
+public:
+ enum Kind {
+#define X(type, name) K_##name,
+ REPL_BUILTIN_TYPES
+#undef X
+
+ K_Void,
+ K_PtrOrObj,
+ K_Unspecified
+ };
+
+ Value() = default;
+ Value(Interpreter *In, void *Ty);
+ Value(const Value &RHS);
+ Value(Value &&RHS) noexcept;
+ Value &operator=(const Value &RHS);
+ Value &operator=(Value &&RHS) noexcept;
+ ~Value();
+
+ void printType(llvm::raw_ostream &Out) const;
+ void printData(llvm::raw_ostream &Out) const;
+ void print(llvm::raw_ostream &Out) const;
+ void dump() const;
+ void clear();
+
+ ASTContext &getASTContext();
+ const ASTContext &getASTContext() const;
+ Interpreter &getInterpreter();
+ const Interpreter &getInterpreter() const;
+ QualType getType() const;
+
+ bool isValid() const { return ValueKind != K_Unspecified; }
+ bool isVoid() const { return ValueKind == K_Void; }
+ bool hasValue() const { return isValid() && !isVoid(); }
+ bool isManuallyAlloc() const { return IsManuallyAlloc; }
+ Kind getKind() const { return ValueKind; }
+ void setKind(Kind K) { ValueKind = K; }
+ void setOpaqueType(void *Ty) { OpaqueType = Ty; }
+
+ void *getPtr() const;
+ void setPtr(void *Ptr) { Data.m_Ptr = Ptr; }
+
+#define X(type, name) \
+ void set##name(type Val) { Data.m_##name = Val; } \
+ type get##name() const { return Data.m_##name; }
+ REPL_BUILTIN_TYPES
+#undef X
+
+ /// \brief Get the value with cast.
+ //
+ /// Get the value cast to T. This is similar to reinterpret_cast<T>(value),
+ /// casting the value of builtins (except void), enums and pointers.
+ /// Values referencing an object are treated as pointers to the object.
+ template <typename T> T convertTo() const {
+ return convertFwd<T>::cast(*this);
+ }
+
+protected:
+ bool isPointerOrObjectType() const { return ValueKind == K_PtrOrObj; }
+
+ /// \brief Get to the value with type checking casting the underlying
+ /// stored value to T.
+ template <typename T> T as() const {
+ switch (ValueKind) {
+ default:
+ return T();
+#define X(type, name) \
+ case Value::K_##name: \
+ return (T)Data.m_##name;
+ REPL_BUILTIN_TYPES
+#undef X
+ }
+ }
+
+ // Allow convertTo to be partially specialized.
+ template <typename T> struct convertFwd {
+ static T cast(const Value &V) {
+ if (V.isPointerOrObjectType())
+ return (T)(uintptr_t)V.as<void *>();
+ if (!V.isValid() || V.isVoid()) {
+ return T();
+ }
+ return V.as<T>();
+ }
+ };
+
+ template <typename T> struct convertFwd<T *> {
+ static T *cast(const Value &V) {
+ if (V.isPointerOrObjectType())
+ return (T *)(uintptr_t)V.as<void *>();
+ return nullptr;
+ }
+ };
+
+ Interpreter *Interp = nullptr;
+ void *OpaqueType = nullptr;
+ Storage Data;
+ Kind ValueKind = K_Unspecified;
+ bool IsManuallyAlloc = false;
+};
+
+template <> inline void *Value::as() const {
+ if (isPointerOrObjectType())
+ return Data.m_Ptr;
+ return (void *)as<uintptr_t>();
+}
+
+} // namespace clang
+#endif
diff --git a/contrib/llvm-project/clang/include/clang/Lex/DependencyDirectivesScanner.h b/contrib/llvm-project/clang/include/clang/Lex/DependencyDirectivesScanner.h
new file mode 100644
index 000000000000..0e115906fbfe
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Lex/DependencyDirectivesScanner.h
@@ -0,0 +1,140 @@
+//===- clang/Lex/DependencyDirectivesScanner.h ---------------------*- C++ -*-//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This is the interface for scanning header and source files to get the
+/// minimum necessary preprocessor directives for evaluating includes. It
+/// reduces the source down to #define, #include, #import, @import, and any
+/// conditional preprocessor logic that contains one of those.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LEX_DEPENDENCYDIRECTIVESSCANNER_H
+#define LLVM_CLANG_LEX_DEPENDENCYDIRECTIVESSCANNER_H
+
+#include "clang/Basic/SourceLocation.h"
+#include "llvm/ADT/ArrayRef.h"
+
+namespace clang {
+
+namespace tok {
+enum TokenKind : unsigned short;
+}
+
+class DiagnosticsEngine;
+
+namespace dependency_directives_scan {
+
+/// Token lexed as part of dependency directive scanning.
+struct Token {
+ /// Offset into the original source input.
+ unsigned Offset;
+ unsigned Length;
+ tok::TokenKind Kind;
+ unsigned short Flags;
+
+ Token(unsigned Offset, unsigned Length, tok::TokenKind Kind,
+ unsigned short Flags)
+ : Offset(Offset), Length(Length), Kind(Kind), Flags(Flags) {}
+
+ unsigned getEnd() const { return Offset + Length; }
+
+ bool is(tok::TokenKind K) const { return Kind == K; }
+ bool isNot(tok::TokenKind K) const { return Kind != K; }
+ bool isOneOf(tok::TokenKind K1, tok::TokenKind K2) const {
+ return is(K1) || is(K2);
+ }
+ template <typename... Ts> bool isOneOf(tok::TokenKind K1, Ts... Ks) const {
+ return is(K1) || isOneOf(Ks...);
+ }
+};
+
+/// Represents the kind of preprocessor directive or a module declaration that
+/// is tracked by the scanner in its token output.
+enum DirectiveKind : uint8_t {
+ pp_none,
+ pp_include,
+ pp___include_macros,
+ pp_define,
+ pp_undef,
+ pp_import,
+ pp_pragma_import,
+ pp_pragma_once,
+ pp_pragma_push_macro,
+ pp_pragma_pop_macro,
+ pp_pragma_include_alias,
+ pp_pragma_system_header,
+ pp_include_next,
+ pp_if,
+ pp_ifdef,
+ pp_ifndef,
+ pp_elif,
+ pp_elifdef,
+ pp_elifndef,
+ pp_else,
+ pp_endif,
+ decl_at_import,
+ cxx_module_decl,
+ cxx_import_decl,
+ cxx_export_module_decl,
+ cxx_export_import_decl,
+ /// Indicates that there are tokens present between the last scanned directive
+ /// and eof. The \p Directive::Tokens array will be empty for this kind.
+ tokens_present_before_eof,
+ pp_eof,
+};
+
+/// Represents a directive that's lexed as part of the dependency directives
+/// scanning. It's used to track various preprocessor directives that could
+/// potentially have an effect on the dependencies.
+struct Directive {
+ ArrayRef<Token> Tokens;
+
+ /// The kind of token.
+ DirectiveKind Kind = pp_none;
+
+ Directive() = default;
+ Directive(DirectiveKind K, ArrayRef<Token> Tokens)
+ : Tokens(Tokens), Kind(K) {}
+};
+
+} // end namespace dependency_directives_scan
+
+/// Scan the input for the preprocessor directives that might have
+/// an effect on the dependencies for a compilation unit.
+///
+/// This function ignores all non-preprocessor code and anything that
+/// can't affect what gets included.
+///
+/// \returns false on success, true on error. If the diagnostic engine is not
+/// null, an appropriate error is reported using the given input location
+/// with the offset that corresponds to the \p Input buffer offset.
+bool scanSourceForDependencyDirectives(
+ StringRef Input, SmallVectorImpl<dependency_directives_scan::Token> &Tokens,
+ SmallVectorImpl<dependency_directives_scan::Directive> &Directives,
+ DiagnosticsEngine *Diags = nullptr,
+ SourceLocation InputSourceLoc = SourceLocation());
+
+/// Print the previously scanned dependency directives as minimized source text.
+///
+/// \param Source The original source text that the dependency directives were
+/// scanned from.
+/// \param Directives The previously scanned dependency
+/// directives.
+/// \param OS the stream to print the dependency directives on.
+///
+/// This is used primarily for testing purposes, during dependency scanning the
+/// \p Lexer uses the tokens directly, not their printed version.
+void printDependencyDirectivesAsSource(
+ StringRef Source,
+ ArrayRef<dependency_directives_scan::Directive> Directives,
+ llvm::raw_ostream &OS);
+
+} // end namespace clang
+
+#endif // LLVM_CLANG_LEX_DEPENDENCYDIRECTIVESSCANNER_H
diff --git a/contrib/llvm-project/clang/include/clang/Lex/DependencyDirectivesSourceMinimizer.h b/contrib/llvm-project/clang/include/clang/Lex/DependencyDirectivesSourceMinimizer.h
deleted file mode 100644
index 9bb820156c25..000000000000
--- a/contrib/llvm-project/clang/include/clang/Lex/DependencyDirectivesSourceMinimizer.h
+++ /dev/null
@@ -1,112 +0,0 @@
-//===- clang/Lex/DependencyDirectivesSourceMinimizer.h - ----------*- C++ -*-//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-///
-/// \file
-/// This is the interface for minimizing header and source files to the
-/// minimum necessary preprocessor directives for evaluating includes. It
-/// reduces the source down to #define, #include, #import, @import, and any
-/// conditional preprocessor logic that contains one of those.
-///
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CLANG_LEX_DEPENDENCY_DIRECTIVES_SOURCE_MINIMIZER_H
-#define LLVM_CLANG_LEX_DEPENDENCY_DIRECTIVES_SOURCE_MINIMIZER_H
-
-#include "clang/Basic/SourceLocation.h"
-#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringRef.h"
-
-namespace clang {
-
-class DiagnosticsEngine;
-
-namespace minimize_source_to_dependency_directives {
-
-/// Represents the kind of preprocessor directive or a module declaration that
-/// is tracked by the source minimizer in its token output.
-enum TokenKind {
- pp_none,
- pp_include,
- pp___include_macros,
- pp_define,
- pp_undef,
- pp_import,
- pp_pragma_import,
- pp_pragma_once,
- pp_include_next,
- pp_if,
- pp_ifdef,
- pp_ifndef,
- pp_elif,
- pp_elifdef,
- pp_elifndef,
- pp_else,
- pp_endif,
- decl_at_import,
- cxx_export_decl,
- cxx_module_decl,
- cxx_import_decl,
- pp_eof,
-};
-
-/// Represents a simplified token that's lexed as part of the source
-/// minimization. It's used to track the location of various preprocessor
-/// directives that could potentially have an effect on the depedencies.
-struct Token {
- /// The kind of token.
- TokenKind K = pp_none;
-
- /// Offset into the output byte stream of where the directive begins.
- int Offset = -1;
-
- Token(TokenKind K, int Offset) : K(K), Offset(Offset) {}
-};
-
-/// Simplified token range to track the range of a potentially skippable PP
-/// directive.
-struct SkippedRange {
- /// Offset into the output byte stream of where the skipped directive begins.
- int Offset;
-
- /// The number of bytes that can be skipped before the preprocessing must
- /// resume.
- int Length;
-};
-
-/// Computes the potential source ranges that can be skipped by the preprocessor
-/// when skipping a directive like #if, #ifdef or #elsif.
-///
-/// \returns false on success, true on error.
-bool computeSkippedRanges(ArrayRef<Token> Input,
- llvm::SmallVectorImpl<SkippedRange> &Range);
-
-} // end namespace minimize_source_to_dependency_directives
-
-/// Minimize the input down to the preprocessor directives that might have
-/// an effect on the dependencies for a compilation unit.
-///
-/// This function deletes all non-preprocessor code, and strips anything that
-/// can't affect what gets included. It canonicalizes whitespace where
-/// convenient to stabilize the output against formatting changes in the input.
-///
-/// Clears the output vectors at the beginning of the call.
-///
-/// \returns false on success, true on error. If the diagnostic engine is not
-/// null, an appropriate error is reported using the given input location
-/// with the offset that corresponds to the minimizer's current buffer offset.
-bool minimizeSourceToDependencyDirectives(
- llvm::StringRef Input, llvm::SmallVectorImpl<char> &Output,
- llvm::SmallVectorImpl<minimize_source_to_dependency_directives::Token>
- &Tokens,
- DiagnosticsEngine *Diags = nullptr,
- SourceLocation InputSourceLoc = SourceLocation());
-
-} // end namespace clang
-
-#endif // LLVM_CLANG_LEX_DEPENDENCY_DIRECTIVES_SOURCE_MINIMIZER_H
diff --git a/contrib/llvm-project/clang/include/clang/Lex/DirectoryLookup.h b/contrib/llvm-project/clang/include/clang/Lex/DirectoryLookup.h
index da2ae9fce1aa..81680d3b271e 100644
--- a/contrib/llvm-project/clang/include/clang/Lex/DirectoryLookup.h
+++ b/contrib/llvm-project/clang/include/clang/Lex/DirectoryLookup.h
@@ -50,17 +50,21 @@ private:
/// DirCharacteristic - The type of directory this is: this is an instance of
/// SrcMgr::CharacteristicKind.
- unsigned DirCharacteristic : 2;
+ LLVM_PREFERRED_TYPE(SrcMgr::CharacteristicKind)
+ unsigned DirCharacteristic : 3;
/// LookupType - This indicates whether this DirectoryLookup object is a
/// normal directory, a framework, or a headermap.
+ LLVM_PREFERRED_TYPE(LookupType_t)
unsigned LookupType : 2;
/// Whether this is a header map used when building a framework.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsIndexHeaderMap : 1;
/// Whether we've performed an exhaustive search for module maps
/// within the subdirectories of this directory.
+ LLVM_PREFERRED_TYPE(bool)
unsigned SearchedAllModuleMaps : 1;
public:
@@ -91,14 +95,18 @@ public:
return isNormalDir() ? &u.Dir.getDirEntry() : nullptr;
}
+ OptionalDirectoryEntryRef getDirRef() const {
+ return isNormalDir() ? OptionalDirectoryEntryRef(u.Dir) : std::nullopt;
+ }
+
/// getFrameworkDir - Return the directory that this framework refers to.
///
const DirectoryEntry *getFrameworkDir() const {
return isFramework() ? &u.Dir.getDirEntry() : nullptr;
}
- Optional<DirectoryEntryRef> getFrameworkDirRef() const {
- return isFramework() ? Optional<DirectoryEntryRef>(u.Dir) : None;
+ OptionalDirectoryEntryRef getFrameworkDirRef() const {
+ return isFramework() ? OptionalDirectoryEntryRef(u.Dir) : std::nullopt;
}
/// getHeaderMap - Return the directory that this entry refers to.
@@ -176,16 +184,17 @@ public:
/// \param [out] MappedName if this is a headermap which maps the filename to
/// a framework include ("Foo.h" -> "Foo/Foo.h"), set the new name to this
/// vector and point Filename to it.
- Optional<FileEntryRef>
+ OptionalFileEntryRef
LookupFile(StringRef &Filename, HeaderSearch &HS, SourceLocation IncludeLoc,
SmallVectorImpl<char> *SearchPath,
SmallVectorImpl<char> *RelativePath, Module *RequestingModule,
ModuleMap::KnownHeader *SuggestedModule,
bool &InUserSpecifiedSystemFramework, bool &IsFrameworkFound,
- bool &IsInHeaderMap, SmallVectorImpl<char> &MappedName) const;
+ bool &IsInHeaderMap, SmallVectorImpl<char> &MappedName,
+ bool OpenFile = true) const;
private:
- Optional<FileEntryRef> DoFrameworkLookup(
+ OptionalFileEntryRef DoFrameworkLookup(
StringRef Filename, HeaderSearch &HS, SmallVectorImpl<char> *SearchPath,
SmallVectorImpl<char> *RelativePath, Module *RequestingModule,
ModuleMap::KnownHeader *SuggestedModule,
diff --git a/contrib/llvm-project/clang/include/clang/Lex/HeaderMap.h b/contrib/llvm-project/clang/include/clang/Lex/HeaderMap.h
index 53108b00bd16..9d88b36bfd8e 100644
--- a/contrib/llvm-project/clang/include/clang/Lex/HeaderMap.h
+++ b/contrib/llvm-project/clang/include/clang/Lex/HeaderMap.h
@@ -15,11 +15,12 @@
#include "clang/Basic/FileManager.h"
#include "clang/Basic/LLVM.h"
-#include "llvm/ADT/Optional.h"
+#include "clang/Lex/HeaderMapTypes.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/MemoryBuffer.h"
#include <memory>
+#include <optional>
namespace clang {
@@ -39,6 +40,19 @@ public:
// Check for a valid header and extract the byte swap.
static bool checkHeader(const llvm::MemoryBuffer &File, bool &NeedsByteSwap);
+ // Make a call for every Key in the map.
+ template <typename Func> void forEachKey(Func Callback) const {
+ const HMapHeader &Hdr = getHeader();
+ unsigned NumBuckets = getEndianAdjustedWord(Hdr.NumBuckets);
+
+ for (unsigned Bucket = 0; Bucket < NumBuckets; ++Bucket) {
+ HMapBucket B = getBucket(Bucket);
+ if (B.Key != HMAP_EmptyBucketKey)
+ if (std::optional<StringRef> Key = getString(B.Key))
+ Callback(*Key);
+ }
+ }
+
/// If the specified relative filename is located in this HeaderMap return
/// the filename it is mapped to, otherwise return an empty StringRef.
StringRef lookupFilename(StringRef Filename,
@@ -59,8 +73,8 @@ private:
HMapBucket getBucket(unsigned BucketNo) const;
/// Look up the specified string in the string table. If the string index is
- /// not valid, return None.
- Optional<StringRef> getString(unsigned StrTabIdx) const;
+ /// not valid, return std::nullopt.
+ std::optional<StringRef> getString(unsigned StrTabIdx) const;
};
/// This class represents an Apple concept known as a 'header map'. To the
@@ -74,17 +88,10 @@ class HeaderMap : private HeaderMapImpl {
public:
/// This attempts to load the specified file as a header map. If it doesn't
/// look like a HeaderMap, it gives up and returns null.
- static std::unique_ptr<HeaderMap> Create(const FileEntry *FE,
- FileManager &FM);
-
- /// Check to see if the specified relative filename is located in this
- /// HeaderMap. If so, open it and return its FileEntry. If RawPath is not
- /// NULL and the file is found, RawPath will be set to the raw path at which
- /// the file was found in the file system. For example, for a search path
- /// ".." and a filename "../file.h" this would be "../../file.h".
- Optional<FileEntryRef> LookupFile(StringRef Filename, FileManager &FM) const;
+ static std::unique_ptr<HeaderMap> Create(FileEntryRef FE, FileManager &FM);
using HeaderMapImpl::dump;
+ using HeaderMapImpl::forEachKey;
using HeaderMapImpl::getFileName;
using HeaderMapImpl::lookupFilename;
using HeaderMapImpl::reverseLookupFilename;
diff --git a/contrib/llvm-project/clang/include/clang/Lex/HeaderSearch.h b/contrib/llvm-project/clang/include/clang/Lex/HeaderSearch.h
index a35a394f719b..a2c33842924b 100644
--- a/contrib/llvm-project/clang/include/clang/Lex/HeaderSearch.h
+++ b/contrib/llvm-project/clang/include/clang/Lex/HeaderSearch.h
@@ -20,8 +20,6 @@
#include "clang/Lex/ModuleMap.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/SetVector.h"
-#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
@@ -34,6 +32,12 @@
#include <utility>
#include <vector>
+namespace llvm {
+
+class Triple;
+
+} // namespace llvm
+
namespace clang {
class DiagnosticsEngine;
@@ -41,6 +45,7 @@ class DirectoryEntry;
class ExternalPreprocessorSource;
class FileEntry;
class FileManager;
+class HeaderSearch;
class HeaderSearchOptions;
class IdentifierInfo;
class LangOptions;
@@ -51,30 +56,39 @@ class TargetInfo;
/// The preprocessor keeps track of this information for each
/// file that is \#included.
struct HeaderFileInfo {
- /// True if this is a \#import'd or \#pragma once file.
+ // TODO: Whether the file was imported is not a property of the file itself.
+ // It's a preprocessor state, move it there.
+ /// True if this is a \#import'd file.
+ LLVM_PREFERRED_TYPE(bool)
unsigned isImport : 1;
/// True if this is a \#pragma once file.
+ LLVM_PREFERRED_TYPE(bool)
unsigned isPragmaOnce : 1;
/// Keep track of whether this is a system header, and if so,
/// whether it is C++ clean or not. This can be set by the include paths or
/// by \#pragma gcc system_header. This is an instance of
/// SrcMgr::CharacteristicKind.
+ LLVM_PREFERRED_TYPE(SrcMgr::CharacteristicKind)
unsigned DirInfo : 3;
/// Whether this header file info was supplied by an external source,
/// and has not changed since.
+ LLVM_PREFERRED_TYPE(bool)
unsigned External : 1;
/// Whether this header is part of a module.
+ LLVM_PREFERRED_TYPE(bool)
unsigned isModuleHeader : 1;
/// Whether this header is part of the module that we are building.
+ LLVM_PREFERRED_TYPE(bool)
unsigned isCompilingModuleHeader : 1;
/// Whether this structure is considered to already have been
/// "resolved", meaning that it was loaded from the external source.
+ LLVM_PREFERRED_TYPE(bool)
unsigned Resolved : 1;
/// Whether this is a header inside a framework that is currently
@@ -84,14 +98,13 @@ struct HeaderFileInfo {
/// into the appropriate framework subdirectories, and therefore are
/// provided via a header map. This bit indicates when this is one of
/// those framework headers.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IndexHeaderMapHeader : 1;
/// Whether this file has been looked up as a header.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsValid : 1;
- /// The number of times the file has been included already.
- unsigned short NumIncludes = 0;
-
/// The ID number of the controlling macro.
///
/// This ID number will be non-zero when there is a controlling
@@ -113,14 +126,6 @@ struct HeaderFileInfo {
/// of the framework.
StringRef Framework;
- /// List of aliases that this header is known as.
- /// Most headers should only have at most one alias, but a handful
- /// have two.
- llvm::SetVector<llvm::SmallString<32>,
- llvm::SmallVector<llvm::SmallString<32>, 2>,
- llvm::SmallSet<llvm::SmallString<32>, 2>>
- Aliases;
-
HeaderFileInfo()
: isImport(false), isPragmaOnce(false), DirInfo(SrcMgr::C_User),
External(false), isModuleHeader(false), isCompilingModuleHeader(false),
@@ -130,13 +135,6 @@ struct HeaderFileInfo {
/// any.
const IdentifierInfo *
getControllingMacro(ExternalPreprocessorSource *External);
-
- /// Determine whether this is a non-default header file info, e.g.,
- /// it corresponds to an actual header we've included or tried to include.
- bool isNonDefault() const {
- return isImport || isPragmaOnce || NumIncludes || ControllingMacro ||
- ControllingMacroID;
- }
};
/// An external source of header file information, which may supply
@@ -150,13 +148,13 @@ public:
/// \returns Header file information for the given file entry, with the
/// \c External bit set. If the file entry is not known, return a
/// default-constructed \c HeaderFileInfo.
- virtual HeaderFileInfo GetHeaderFileInfo(const FileEntry *FE) = 0;
+ virtual HeaderFileInfo GetHeaderFileInfo(FileEntryRef FE) = 0;
};
/// This structure is used to record entries in our framework cache.
struct FrameworkCacheEntry {
/// The directory entry which should be used for the cached framework.
- const DirectoryEntry *Directory;
+ OptionalDirectoryEntryRef Directory;
/// Whether this framework has been "user-specified" to be treated as if it
/// were a system framework (even if it was found outside a system framework
@@ -164,27 +162,106 @@ struct FrameworkCacheEntry {
bool IsUserSpecifiedSystemFramework;
};
+namespace detail {
+template <bool Const, typename T>
+using Qualified = std::conditional_t<Const, const T, T>;
+
+/// Forward iterator over the search directories of \c HeaderSearch.
+template <bool IsConst>
+struct SearchDirIteratorImpl
+ : llvm::iterator_facade_base<SearchDirIteratorImpl<IsConst>,
+ std::forward_iterator_tag,
+ Qualified<IsConst, DirectoryLookup>> {
+ /// Const -> non-const iterator conversion.
+ template <typename Enable = std::enable_if<IsConst, bool>>
+ SearchDirIteratorImpl(const SearchDirIteratorImpl<false> &Other)
+ : HS(Other.HS), Idx(Other.Idx) {}
+
+ SearchDirIteratorImpl(const SearchDirIteratorImpl &) = default;
+
+ SearchDirIteratorImpl &operator=(const SearchDirIteratorImpl &) = default;
+
+ bool operator==(const SearchDirIteratorImpl &RHS) const {
+ return HS == RHS.HS && Idx == RHS.Idx;
+ }
+
+ SearchDirIteratorImpl &operator++() {
+ assert(*this && "Invalid iterator.");
+ ++Idx;
+ return *this;
+ }
+
+ Qualified<IsConst, DirectoryLookup> &operator*() const {
+ assert(*this && "Invalid iterator.");
+ return HS->SearchDirs[Idx];
+ }
+
+ /// Creates an invalid iterator.
+ SearchDirIteratorImpl(std::nullptr_t) : HS(nullptr), Idx(0) {}
+
+ /// Checks whether the iterator is valid.
+ explicit operator bool() const { return HS != nullptr; }
+
+private:
+ /// The parent \c HeaderSearch. This is \c nullptr for invalid iterator.
+ Qualified<IsConst, HeaderSearch> *HS;
+
+ /// The index of the current element.
+ size_t Idx;
+
+ /// The constructor that creates a valid iterator.
+ SearchDirIteratorImpl(Qualified<IsConst, HeaderSearch> &HS, size_t Idx)
+ : HS(&HS), Idx(Idx) {}
+
+ /// Only HeaderSearch is allowed to instantiate valid iterators.
+ friend HeaderSearch;
+
+ /// Enables const -> non-const conversion.
+ friend SearchDirIteratorImpl<!IsConst>;
+};
+} // namespace detail
+
+using ConstSearchDirIterator = detail::SearchDirIteratorImpl<true>;
+using SearchDirIterator = detail::SearchDirIteratorImpl<false>;
+
+using ConstSearchDirRange = llvm::iterator_range<ConstSearchDirIterator>;
+using SearchDirRange = llvm::iterator_range<SearchDirIterator>;
+
/// Encapsulates the information needed to find the file referenced
/// by a \#include or \#include_next, (sub-)framework lookup, etc.
class HeaderSearch {
friend class DirectoryLookup;
+ friend ConstSearchDirIterator;
+ friend SearchDirIterator;
+
/// Header-search options used to initialize this header search.
std::shared_ptr<HeaderSearchOptions> HSOpts;
+ /// Mapping from SearchDir to HeaderSearchOptions::UserEntries indices.
+ llvm::DenseMap<unsigned, unsigned> SearchDirToHSEntry;
+
DiagnosticsEngine &Diags;
FileManager &FileMgr;
/// \#include search path information. Requests for \#include "x" search the
/// directory of the \#including file first, then each directory in SearchDirs
/// consecutively. Requests for <x> search the current dir first, then each
- /// directory in SearchDirs, starting at AngledDirIdx, consecutively. If
- /// NoCurDirSearch is true, then the check for the file in the current
- /// directory is suppressed.
+ /// directory in SearchDirs, starting at AngledDirIdx, consecutively.
std::vector<DirectoryLookup> SearchDirs;
+ /// Whether the DirectoryLookup at the corresponding index in SearchDirs has
+ /// been successfully used to lookup a file.
+ std::vector<bool> SearchDirsUsage;
unsigned AngledDirIdx = 0;
unsigned SystemDirIdx = 0;
- bool NoCurDirSearch = false;
+
+ /// Maps HeaderMap keys to SearchDir indices. When HeaderMaps are used
+ /// heavily, SearchDirs can start with thousands of HeaderMaps, so this Index
+ /// lets us avoid scanning them all to find a match.
+ llvm::StringMap<unsigned, llvm::BumpPtrAllocator> SearchDirHeaderMapIndex;
+
+ /// The index of the first SearchDir that isn't a header map.
+ unsigned FirstNonHeaderMapSearchDirIdx = 0;
/// \#include prefixes for which the 'system header' property is
/// overridden.
@@ -206,13 +283,16 @@ class HeaderSearch {
/// Keeps track of each lookup performed by LookupFile.
struct LookupFileCacheInfo {
- /// Starting index in SearchDirs that the cached search was performed from.
- /// If there is a hit and this value doesn't match the current query, the
- /// cache has to be ignored.
- unsigned StartIdx = 0;
+ // The requesting module for the lookup we cached.
+ const Module *RequestingModule = nullptr;
+
+ /// Starting search directory iterator that the cached search was performed
+ /// from. If there is a hit and this value doesn't match the current query,
+ /// the cache has to be ignored.
+ ConstSearchDirIterator StartIt = nullptr;
- /// The entry in SearchDirs that satisfied the query.
- unsigned HitIdx = 0;
+ /// The search directory iterator that satisfied the query.
+ ConstSearchDirIterator HitIt = nullptr;
/// This is non-null if the original filename was mapped to a framework
/// include via a headermap.
@@ -221,9 +301,11 @@ class HeaderSearch {
/// Default constructor -- Initialize all members with zero.
LookupFileCacheInfo() = default;
- void reset(unsigned StartIdx) {
- this->StartIdx = StartIdx;
- this->MappedName = nullptr;
+ void reset(const Module *NewRequestingModule,
+ ConstSearchDirIterator NewStartIt) {
+ RequestingModule = NewRequestingModule;
+ StartIt = NewStartIt;
+ MappedName = nullptr;
}
};
llvm::StringMap<LookupFileCacheInfo, llvm::BumpPtrAllocator> LookupFileCache;
@@ -240,7 +322,7 @@ class HeaderSearch {
std::unique_ptr<IncludeAliasMap> IncludeAliases;
/// This is a mapping from FileEntry -> HeaderMap, uniquing headermaps.
- std::vector<std::pair<const FileEntry *, std::unique_ptr<HeaderMap>>> HeaderMaps;
+ std::vector<std::pair<FileEntryRef, std::unique_ptr<HeaderMap>>> HeaderMaps;
/// The mapping between modules and headers.
mutable ModuleMap ModMap;
@@ -252,6 +334,9 @@ class HeaderSearch {
/// whether they were valid or not.
llvm::DenseMap<const FileEntry *, bool> LoadedModuleMaps;
+ // A map of discovered headers with their associated include file name.
+ llvm::DenseMap<const FileEntry *, llvm::SmallString<64>> IncludeNames;
+
/// Uniqued set of framework names, which is used to track which
/// headers were included as framework headers.
llvm::StringSet<llvm::BumpPtrAllocator> FrameworkNames;
@@ -264,6 +349,10 @@ class HeaderSearch {
/// Entity used to look up stored header file information.
ExternalHeaderFileInfoSource *ExternalSource = nullptr;
+ /// Scan all of the header maps at the beginning of SearchDirs and
+ /// map their keys to the SearchDir index of their header map.
+ void indexInitialHeaderMaps();
+
public:
HeaderSearch(std::shared_ptr<HeaderSearchOptions> HSOpts,
SourceManager &SourceMgr, DiagnosticsEngine &Diags,
@@ -280,25 +369,17 @@ public:
DiagnosticsEngine &getDiags() const { return Diags; }
/// Interface for setting the file search paths.
- void SetSearchPaths(const std::vector<DirectoryLookup> &dirs,
- unsigned angledDirIdx, unsigned systemDirIdx,
- bool noCurDirSearch) {
- assert(angledDirIdx <= systemDirIdx && systemDirIdx <= dirs.size() &&
- "Directory indices are unordered");
- SearchDirs = dirs;
- AngledDirIdx = angledDirIdx;
- SystemDirIdx = systemDirIdx;
- NoCurDirSearch = noCurDirSearch;
- //LookupFileCache.clear();
- }
+ void SetSearchPaths(std::vector<DirectoryLookup> dirs, unsigned angledDirIdx,
+ unsigned systemDirIdx,
+ llvm::DenseMap<unsigned, unsigned> searchDirToHSEntry);
/// Add an additional search path.
- void AddSearchPath(const DirectoryLookup &dir, bool isAngled) {
- unsigned idx = isAngled ? SystemDirIdx : AngledDirIdx;
- SearchDirs.insert(SearchDirs.begin() + idx, dir);
- if (!isAngled)
- AngledDirIdx++;
- SystemDirIdx++;
+ void AddSearchPath(const DirectoryLookup &dir, bool isAngled);
+
+ /// Add an additional system search path.
+ void AddSystemSearchPath(const DirectoryLookup &dir) {
+ SearchDirs.push_back(dir);
+ SearchDirsUsage.push_back(false);
}
/// Set the list of system header prefixes.
@@ -409,14 +490,15 @@ public:
/// found in any of searched SearchDirs. Will be set to false if a framework
/// is found only through header maps. Doesn't guarantee the requested file is
/// found.
- Optional<FileEntryRef> LookupFile(
+ OptionalFileEntryRef LookupFile(
StringRef Filename, SourceLocation IncludeLoc, bool isAngled,
- const DirectoryLookup *FromDir, const DirectoryLookup *&CurDir,
- ArrayRef<std::pair<const FileEntry *, const DirectoryEntry *>> Includers,
+ ConstSearchDirIterator FromDir, ConstSearchDirIterator *CurDir,
+ ArrayRef<std::pair<OptionalFileEntryRef, DirectoryEntryRef>> Includers,
SmallVectorImpl<char> *SearchPath, SmallVectorImpl<char> *RelativePath,
Module *RequestingModule, ModuleMap::KnownHeader *SuggestedModule,
bool *IsMapped, bool *IsFrameworkFound, bool SkipCache = false,
- bool BuildSystemModule = false);
+ bool BuildSystemModule = false, bool OpenFile = true,
+ bool CacheFailures = true);
/// Look up a subframework for the specified \#include file.
///
@@ -424,8 +506,8 @@ public:
/// within ".../Carbon.framework/Headers/Carbon.h", check to see if
/// HIToolbox is a subframework within Carbon.framework. If so, return
/// the FileEntry for the designated file, otherwise return null.
- Optional<FileEntryRef> LookupSubframeworkHeader(
- StringRef Filename, const FileEntry *ContextFileEnt,
+ OptionalFileEntryRef LookupSubframeworkHeader(
+ StringRef Filename, FileEntryRef ContextFileEnt,
SmallVectorImpl<char> *SearchPath, SmallVectorImpl<char> *RelativePath,
Module *RequestingModule, ModuleMap::KnownHeader *SuggestedModule);
@@ -440,76 +522,63 @@ public:
///
/// \return false if \#including the file will have no effect or true
/// if we should include it.
- bool ShouldEnterIncludeFile(Preprocessor &PP, const FileEntry *File,
- bool isImport, bool ModulesEnabled,
- Module *M);
+ bool ShouldEnterIncludeFile(Preprocessor &PP, FileEntryRef File,
+ bool isImport, bool ModulesEnabled, Module *M,
+ bool &IsFirstIncludeOfFile);
/// Return whether the specified file is a normal header,
/// a system header, or a C++ friendly system header.
- SrcMgr::CharacteristicKind getFileDirFlavor(const FileEntry *File) {
+ SrcMgr::CharacteristicKind getFileDirFlavor(FileEntryRef File) {
return (SrcMgr::CharacteristicKind)getFileInfo(File).DirInfo;
}
- /// Mark the specified file as a "once only" file, e.g. due to
+ /// Mark the specified file as a "once only" file due to
/// \#pragma once.
- void MarkFileIncludeOnce(const FileEntry *File) {
+ void MarkFileIncludeOnce(FileEntryRef File) {
HeaderFileInfo &FI = getFileInfo(File);
- FI.isImport = true;
FI.isPragmaOnce = true;
}
/// Mark the specified file as a system header, e.g. due to
/// \#pragma GCC system_header.
- void MarkFileSystemHeader(const FileEntry *File) {
+ void MarkFileSystemHeader(FileEntryRef File) {
getFileInfo(File).DirInfo = SrcMgr::C_System;
}
- void AddFileAlias(const FileEntry *File, StringRef Alias) {
- getFileInfo(File).Aliases.insert(Alias);
- }
-
/// Mark the specified file as part of a module.
- void MarkFileModuleHeader(const FileEntry *FE,
- ModuleMap::ModuleHeaderRole Role,
+ void MarkFileModuleHeader(FileEntryRef FE, ModuleMap::ModuleHeaderRole Role,
bool isCompilingModuleHeader);
- /// Increment the count for the number of times the specified
- /// FileEntry has been entered.
- void IncrementIncludeCount(const FileEntry *File) {
- ++getFileInfo(File).NumIncludes;
- }
-
/// Mark the specified file as having a controlling macro.
///
/// This is used by the multiple-include optimization to eliminate
/// no-op \#includes.
- void SetFileControllingMacro(const FileEntry *File,
+ void SetFileControllingMacro(FileEntryRef File,
const IdentifierInfo *ControllingMacro) {
getFileInfo(File).ControllingMacro = ControllingMacro;
}
- /// Return true if this is the first time encountering this header.
- bool FirstTimeLexingFile(const FileEntry *File) {
- return getFileInfo(File).NumIncludes == 1;
- }
-
/// Determine whether this file is intended to be safe from
/// multiple inclusions, e.g., it has \#pragma once or a controlling
/// macro.
///
/// This routine does not consider the effect of \#import
- bool isFileMultipleIncludeGuarded(const FileEntry *File);
+ bool isFileMultipleIncludeGuarded(FileEntryRef File) const;
- /// Determine whether the given file is known to have ever been \#imported
- /// (or if it has been \#included and we've encountered a \#pragma once).
- bool hasFileBeenImported(const FileEntry *File) {
+ /// Determine whether the given file is known to have ever been \#imported.
+ bool hasFileBeenImported(FileEntryRef File) const {
const HeaderFileInfo *FI = getExistingFileInfo(File);
return FI && FI->isImport;
}
+ /// Determine which HeaderSearchOptions::UserEntries have been successfully
+ /// used so far and mark their index with 'true' in the resulting bit vector.
+ /// Note: implicit module maps don't contribute to entry usage.
+ std::vector<bool> computeUserEntryUsage() const;
+
/// This method returns a HeaderMap for the specified
/// FileEntry, uniquing them through the 'HeaderMaps' datastructure.
- const HeaderMap *CreateHeaderMap(const FileEntry *FE);
+ const HeaderMap *CreateHeaderMap(FileEntryRef FE);
/// Get filenames for all registered header maps.
void getHeaderMapFileNames(SmallVectorImpl<std::string> &Names) const;
@@ -562,6 +631,8 @@ public:
///
/// \param ModuleName The name of the module we're looking for.
///
+ /// \param ImportLoc Location of the module include/import.
+ ///
/// \param AllowSearch Whether we are allowed to search in the various
/// search directories to produce a module definition. If not, this lookup
/// will only return an already-known module.
@@ -570,13 +641,15 @@ public:
/// in subdirectories.
///
/// \returns The module with the given name.
- Module *lookupModule(StringRef ModuleName, bool AllowSearch = true,
+ Module *lookupModule(StringRef ModuleName,
+ SourceLocation ImportLoc = SourceLocation(),
+ bool AllowSearch = true,
bool AllowExtraModuleMapSearch = false);
/// Try to find a module map file in the given directory, returning
- /// \c nullptr if none is found.
- const FileEntry *lookupModuleMapFile(const DirectoryEntry *Dir,
- bool IsFramework);
+ /// \c nullopt if none is found.
+ OptionalFileEntryRef lookupModuleMapFile(DirectoryEntryRef Dir,
+ bool IsFramework);
/// Determine whether there is a module map that may map the header
/// with the given file name to a (sub)module.
@@ -596,14 +669,20 @@ public:
///
/// \param File The header that we wish to map to a module.
/// \param AllowTextual Whether we want to find textual headers too.
- ModuleMap::KnownHeader findModuleForHeader(const FileEntry *File,
- bool AllowTextual = false) const;
+ ModuleMap::KnownHeader findModuleForHeader(FileEntryRef File,
+ bool AllowTextual = false,
+ bool AllowExcluded = false) const;
/// Retrieve all the modules corresponding to the given file.
///
/// \ref findModuleForHeader should typically be used instead of this.
ArrayRef<ModuleMap::KnownHeader>
- findAllModulesForHeader(const FileEntry *File) const;
+ findAllModulesForHeader(FileEntryRef File) const;
+
+ /// Like \ref findAllModulesForHeader, but do not attempt to infer module
+ /// ownership from umbrella headers if we've not already done so.
+ ArrayRef<ModuleMap::KnownHeader>
+ findResolvedModulesForHeader(FileEntryRef File) const;
/// Read the contents of the given module map file.
///
@@ -618,8 +697,8 @@ public:
/// used to resolve paths within the module (this is required when
/// building the module from preprocessed source).
/// \returns true if an error occurred, false otherwise.
- bool loadModuleMapFile(const FileEntry *File, bool IsSystem,
- FileID ID = FileID(), unsigned *Offset = nullptr,
+ bool loadModuleMapFile(FileEntryRef File, bool IsSystem, FileID ID = FileID(),
+ unsigned *Offset = nullptr,
StringRef OriginalModuleMapFile = StringRef());
/// Collect the set of all known, top-level modules.
@@ -640,11 +719,14 @@ private:
/// but for compatibility with some buggy frameworks, additional attempts
/// may be made to find the module under a related-but-different search-name.
///
+ /// \param ImportLoc Location of the module include/import.
+ ///
/// \param AllowExtraModuleMapSearch Whether we allow to search modulemaps
/// in subdirectories.
///
/// \returns The module named ModuleName.
Module *lookupModule(StringRef ModuleName, StringRef SearchName,
+ SourceLocation ImportLoc,
bool AllowExtraModuleMapSearch = false);
/// Retrieve the name of the (to-be-)cached module file that should
@@ -674,8 +756,7 @@ private:
/// frameworks.
///
/// \returns The module, if found; otherwise, null.
- Module *loadFrameworkModule(StringRef Name,
- const DirectoryEntry *Dir,
+ Module *loadFrameworkModule(StringRef Name, DirectoryEntryRef Dir,
bool IsSystem);
/// Load all of the module maps within the immediate subdirectories
@@ -686,8 +767,7 @@ private:
///
/// \return \c true if the file can be used, \c false if we are not permitted to
/// find this file due to requirements from \p RequestingModule.
- bool findUsableModuleForHeader(const FileEntry *File,
- const DirectoryEntry *Root,
+ bool findUsableModuleForHeader(FileEntryRef File, const DirectoryEntry *Root,
Module *RequestingModule,
ModuleMap::KnownHeader *SuggestedModule,
bool IsSystemHeaderDir);
@@ -698,16 +778,27 @@ private:
/// \return \c true if the file can be used, \c false if we are not permitted to
/// find this file due to requirements from \p RequestingModule.
bool findUsableModuleForFrameworkHeader(
- const FileEntry *File, StringRef FrameworkName, Module *RequestingModule,
+ FileEntryRef File, StringRef FrameworkName, Module *RequestingModule,
ModuleMap::KnownHeader *SuggestedModule, bool IsSystemFramework);
/// Look up the file with the specified name and determine its owning
/// module.
- Optional<FileEntryRef>
+ OptionalFileEntryRef
getFileAndSuggestModule(StringRef FileName, SourceLocation IncludeLoc,
const DirectoryEntry *Dir, bool IsSystemHeaderDir,
Module *RequestingModule,
- ModuleMap::KnownHeader *SuggestedModule);
+ ModuleMap::KnownHeader *SuggestedModule,
+ bool OpenFile = true, bool CacheFailures = true);
+
+ /// Cache the result of a successful lookup at the given include location
+ /// using the search path at \c HitIt.
+ void cacheLookupSuccess(LookupFileCacheInfo &CacheLookup,
+ ConstSearchDirIterator HitIt,
+ SourceLocation IncludeLoc);
+
+ /// Note that a lookup at the given include location was successful using the
+ /// search path at index `HitIdx`.
+ void noteLookupUsage(unsigned HitIdx, SourceLocation IncludeLoc);
public:
/// Retrieve the module map.
@@ -720,47 +811,61 @@ public:
/// Return the HeaderFileInfo structure for the specified FileEntry,
/// in preparation for updating it in some way.
- HeaderFileInfo &getFileInfo(const FileEntry *FE);
+ HeaderFileInfo &getFileInfo(FileEntryRef FE);
/// Return the HeaderFileInfo structure for the specified FileEntry,
/// if it has ever been filled in.
/// \param WantExternal Whether the caller wants purely-external header file
/// info (where \p External is true).
- const HeaderFileInfo *getExistingFileInfo(const FileEntry *FE,
+ const HeaderFileInfo *getExistingFileInfo(FileEntryRef FE,
bool WantExternal = true) const;
- // Used by external tools
- using search_dir_iterator = std::vector<DirectoryLookup>::const_iterator;
-
- search_dir_iterator search_dir_begin() const { return SearchDirs.begin(); }
- search_dir_iterator search_dir_end() const { return SearchDirs.end(); }
- unsigned search_dir_size() const { return SearchDirs.size(); }
-
- search_dir_iterator quoted_dir_begin() const {
- return SearchDirs.begin();
+ SearchDirIterator search_dir_begin() { return {*this, 0}; }
+ SearchDirIterator search_dir_end() { return {*this, SearchDirs.size()}; }
+ SearchDirRange search_dir_range() {
+ return {search_dir_begin(), search_dir_end()};
}
- search_dir_iterator quoted_dir_end() const {
- return SearchDirs.begin() + AngledDirIdx;
+ ConstSearchDirIterator search_dir_begin() const { return quoted_dir_begin(); }
+ ConstSearchDirIterator search_dir_nth(size_t n) const {
+ assert(n < SearchDirs.size());
+ return {*this, n};
}
-
- search_dir_iterator angled_dir_begin() const {
- return SearchDirs.begin() + AngledDirIdx;
+ ConstSearchDirIterator search_dir_end() const { return system_dir_end(); }
+ ConstSearchDirRange search_dir_range() const {
+ return {search_dir_begin(), search_dir_end()};
}
- search_dir_iterator angled_dir_end() const {
- return SearchDirs.begin() + SystemDirIdx;
+ unsigned search_dir_size() const { return SearchDirs.size(); }
+
+ ConstSearchDirIterator quoted_dir_begin() const { return {*this, 0}; }
+ ConstSearchDirIterator quoted_dir_end() const { return angled_dir_begin(); }
+
+ ConstSearchDirIterator angled_dir_begin() const {
+ return {*this, AngledDirIdx};
}
+ ConstSearchDirIterator angled_dir_end() const { return system_dir_begin(); }
- search_dir_iterator system_dir_begin() const {
- return SearchDirs.begin() + SystemDirIdx;
+ ConstSearchDirIterator system_dir_begin() const {
+ return {*this, SystemDirIdx};
+ }
+ ConstSearchDirIterator system_dir_end() const {
+ return {*this, SearchDirs.size()};
}
- search_dir_iterator system_dir_end() const { return SearchDirs.end(); }
+ /// Get the index of the given search directory.
+ unsigned searchDirIdx(const DirectoryLookup &DL) const;
/// Retrieve a uniqued framework name.
StringRef getUniqueFrameworkName(StringRef Framework);
+ /// Retrieve the include name for the header.
+ ///
+ /// \param File The entry for a given header.
+ /// \returns The name of how the file was included when the header's location
+ /// was resolved.
+ StringRef getIncludeNameForHeader(const FileEntry *File) const;
+
/// Suggest a path by which the specified file could be found, for use in
/// diagnostics to suggest a #include. Returned path will only contain forward
/// slashes as separators. MainFile is the absolute path of the file that we
@@ -768,11 +873,11 @@ public:
/// MainFile location, if none of the include search directories were prefix
/// of File.
///
- /// \param IsSystem If non-null, filled in to indicate whether the suggested
- /// path is relative to a system header directory.
- std::string suggestPathToFileForDiagnostics(const FileEntry *File,
+ /// \param IsAngled If non-null, filled in to indicate whether the suggested
+ /// path should be referenced as <Header.h> instead of "Header.h".
+ std::string suggestPathToFileForDiagnostics(FileEntryRef File,
llvm::StringRef MainFile,
- bool *IsSystem = nullptr);
+ bool *IsAngled = nullptr) const;
/// Suggest a path by which the specified file could be found, for use in
/// diagnostics to suggest a #include. Returned path will only contain forward
@@ -786,7 +891,7 @@ public:
std::string suggestPathToFileForDiagnostics(llvm::StringRef File,
llvm::StringRef WorkingDir,
llvm::StringRef MainFile,
- bool *IsSystem = nullptr);
+ bool *IsAngled = nullptr) const;
void PrintStats();
@@ -809,9 +914,8 @@ private:
LMM_InvalidModuleMap
};
- LoadModuleMapResult loadModuleMapFileImpl(const FileEntry *File,
- bool IsSystem,
- const DirectoryEntry *Dir,
+ LoadModuleMapResult loadModuleMapFileImpl(FileEntryRef File, bool IsSystem,
+ DirectoryEntryRef Dir,
FileID ID = FileID(),
unsigned *Offset = nullptr);
@@ -835,10 +939,16 @@ private:
///
/// \returns The result of attempting to load the module map file from the
/// named directory.
- LoadModuleMapResult loadModuleMapFile(const DirectoryEntry *Dir,
- bool IsSystem, bool IsFramework);
+ LoadModuleMapResult loadModuleMapFile(DirectoryEntryRef Dir, bool IsSystem,
+ bool IsFramework);
};
+/// Apply the header search options to get given HeaderSearch object.
+void ApplyHeaderSearchOptions(HeaderSearch &HS,
+ const HeaderSearchOptions &HSOpts,
+ const LangOptions &Lang,
+ const llvm::Triple &triple);
+
} // namespace clang
#endif // LLVM_CLANG_LEX_HEADERSEARCH_H
diff --git a/contrib/llvm-project/clang/include/clang/Lex/HeaderSearchOptions.h b/contrib/llvm-project/clang/include/clang/Lex/HeaderSearchOptions.h
index 42f3cff8c57a..fa2d0b502d72 100644
--- a/contrib/llvm-project/clang/include/clang/Lex/HeaderSearchOptions.h
+++ b/contrib/llvm-project/clang/include/clang/Lex/HeaderSearchOptions.h
@@ -14,10 +14,11 @@
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/HashBuilder.h"
#include <cstdint>
+#include <map>
#include <string>
#include <vector>
-#include <map>
namespace clang {
@@ -69,11 +70,13 @@ public:
struct Entry {
std::string Path;
frontend::IncludeDirGroup Group;
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsFramework : 1;
/// IgnoreSysRoot - This is false if an absolute path should be treated
/// relative to the sysroot, or true if it should always be the absolute
/// path.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IgnoreSysRoot : 1;
Entry(StringRef path, frontend::IncludeDirGroup group, bool isFramework,
@@ -127,10 +130,12 @@ public:
/// module cache.
///
/// Note: Only used for testing!
+ LLVM_PREFERRED_TYPE(bool)
unsigned DisableModuleHash : 1;
/// Implicit module maps. This option is enabld by default when
/// modules is enabled.
+ LLVM_PREFERRED_TYPE(bool)
unsigned ImplicitModuleMaps : 1;
/// Set the 'home directory' of a module map file to the current
@@ -140,10 +145,19 @@ public:
//
/// The home directory is where we look for files named in the module map
/// file.
+ LLVM_PREFERRED_TYPE(bool)
unsigned ModuleMapFileHomeIsCwd : 1;
+ /// Set the base path of a built module file to be the current working
+ /// directory. This is useful for sharing module files across machines
+ /// that build with different paths without having to rewrite all
+ /// modulemap files to have working directory relative paths.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned ModuleFileHomeIsCwd : 1;
+
/// Also search for prebuilt implicit modules in the prebuilt module cache
/// path.
+ LLVM_PREFERRED_TYPE(bool)
unsigned EnablePrebuiltImplicitModules : 1;
/// The interval (in seconds) between pruning operations.
@@ -178,37 +192,67 @@ public:
std::vector<std::string> VFSOverlayFiles;
/// Include the compiler builtin includes.
+ LLVM_PREFERRED_TYPE(bool)
unsigned UseBuiltinIncludes : 1;
/// Include the system standard include search directories.
+ LLVM_PREFERRED_TYPE(bool)
unsigned UseStandardSystemIncludes : 1;
/// Include the system standard C++ library include search directories.
+ LLVM_PREFERRED_TYPE(bool)
unsigned UseStandardCXXIncludes : 1;
/// Use libc++ instead of the default libstdc++.
+ LLVM_PREFERRED_TYPE(bool)
unsigned UseLibcxx : 1;
/// Whether header search information should be output as for -v.
+ LLVM_PREFERRED_TYPE(bool)
unsigned Verbose : 1;
/// If true, skip verifying input files used by modules if the
/// module was already verified during this build session (see
/// \c BuildSessionTimestamp).
+ LLVM_PREFERRED_TYPE(bool)
unsigned ModulesValidateOncePerBuildSession : 1;
/// Whether to validate system input files when a module is loaded.
+ LLVM_PREFERRED_TYPE(bool)
unsigned ModulesValidateSystemHeaders : 1;
// Whether the content of input files should be hashed and used to
// validate consistency.
+ LLVM_PREFERRED_TYPE(bool)
unsigned ValidateASTInputFilesContent : 1;
+ // Whether the input files from C++20 Modules should be checked.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned ForceCheckCXX20ModulesInputFiles : 1;
+
/// Whether the module includes debug information (-gmodules).
+ LLVM_PREFERRED_TYPE(bool)
unsigned UseDebugInfo : 1;
+ LLVM_PREFERRED_TYPE(bool)
unsigned ModulesValidateDiagnosticOptions : 1;
+ /// Whether to entirely skip writing diagnostic options.
+ /// Primarily used to speed up deserialization during dependency scanning.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned ModulesSkipDiagnosticOptions : 1;
+
+ /// Whether to entirely skip writing header search paths.
+ /// Primarily used to speed up deserialization during dependency scanning.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned ModulesSkipHeaderSearchPaths : 1;
+
+ /// Whether to entirely skip writing pragma diagnostic mappings.
+ /// Primarily used to speed up deserialization during dependency scanning.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned ModulesSkipPragmaDiagnosticMappings : 1;
+
+ LLVM_PREFERRED_TYPE(bool)
unsigned ModulesHashContent : 1;
/// Whether we should include all things that could impact the module in the
@@ -216,18 +260,23 @@ public:
///
/// This includes things like the full header search path, and enabled
/// diagnostics.
+ LLVM_PREFERRED_TYPE(bool)
unsigned ModulesStrictContextHash : 1;
HeaderSearchOptions(StringRef _Sysroot = "/")
: Sysroot(_Sysroot), ModuleFormat("raw"), DisableModuleHash(false),
ImplicitModuleMaps(false), ModuleMapFileHomeIsCwd(false),
- EnablePrebuiltImplicitModules(false), UseBuiltinIncludes(true),
- UseStandardSystemIncludes(true), UseStandardCXXIncludes(true),
- UseLibcxx(false), Verbose(false),
+ ModuleFileHomeIsCwd(false), EnablePrebuiltImplicitModules(false),
+ UseBuiltinIncludes(true), UseStandardSystemIncludes(true),
+ UseStandardCXXIncludes(true), UseLibcxx(false), Verbose(false),
ModulesValidateOncePerBuildSession(false),
ModulesValidateSystemHeaders(false),
- ValidateASTInputFilesContent(false), UseDebugInfo(false),
- ModulesValidateDiagnosticOptions(true), ModulesHashContent(false),
+ ValidateASTInputFilesContent(false),
+ ForceCheckCXX20ModulesInputFiles(false), UseDebugInfo(false),
+ ModulesValidateDiagnosticOptions(true),
+ ModulesSkipDiagnosticOptions(false),
+ ModulesSkipHeaderSearchPaths(false),
+ ModulesSkipPragmaDiagnosticMappings(false), ModulesHashContent(false),
ModulesStrictContextHash(false) {}
/// AddPath - Add the \p Path path to the specified \p Group list.
@@ -256,11 +305,23 @@ inline llvm::hash_code hash_value(const HeaderSearchOptions::Entry &E) {
return llvm::hash_combine(E.Path, E.Group, E.IsFramework, E.IgnoreSysRoot);
}
+template <typename HasherT, llvm::endianness Endianness>
+inline void addHash(llvm::HashBuilder<HasherT, Endianness> &HBuilder,
+ const HeaderSearchOptions::Entry &E) {
+ HBuilder.add(E.Path, E.Group, E.IsFramework, E.IgnoreSysRoot);
+}
+
inline llvm::hash_code
hash_value(const HeaderSearchOptions::SystemHeaderPrefix &SHP) {
return llvm::hash_combine(SHP.Prefix, SHP.IsSystemHeader);
}
+template <typename HasherT, llvm::endianness Endianness>
+inline void addHash(llvm::HashBuilder<HasherT, Endianness> &HBuilder,
+ const HeaderSearchOptions::SystemHeaderPrefix &SHP) {
+ HBuilder.add(SHP.Prefix, SHP.IsSystemHeader);
+}
+
} // namespace clang
#endif // LLVM_CLANG_LEX_HEADERSEARCHOPTIONS_H
diff --git a/contrib/llvm-project/clang/include/clang/Lex/Lexer.h b/contrib/llvm-project/clang/include/clang/Lex/Lexer.h
index a291520ae5ca..b6ecc7e5ded9 100644
--- a/contrib/llvm-project/clang/include/clang/Lex/Lexer.h
+++ b/contrib/llvm-project/clang/include/clang/Lex/Lexer.h
@@ -16,13 +16,14 @@
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/TokenKinds.h"
+#include "clang/Lex/DependencyDirectivesScanner.h"
#include "clang/Lex/PreprocessorLexer.h"
#include "clang/Lex/Token.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include <cassert>
#include <cstdint>
+#include <optional>
#include <string>
namespace llvm {
@@ -36,6 +37,7 @@ namespace clang {
class DiagnosticBuilder;
class Preprocessor;
class SourceManager;
+class LangOptions;
/// ConflictMarkerKind - Kinds of conflict marker which the lexer might be
/// recovering from.
@@ -90,8 +92,18 @@ class Lexer : public PreprocessorLexer {
// Location for start of file.
SourceLocation FileLoc;
- // LangOpts enabled by this language (cache).
- LangOptions LangOpts;
+ // LangOpts enabled by this language.
+ // Storing LangOptions as reference here is important from performance point
+ // of view. Lack of reference means that LangOptions copy constructor would be
+ // called by Lexer(..., const LangOptions &LangOpts,...). Given that local
+ // Lexer objects are created thousands times (in Lexer::getRawToken,
+ // Preprocessor::EnterSourceFile and other places) during single module
+ // processing in frontend it would make std::vector<std::string> copy
+ // constructors surprisingly hot.
+ const LangOptions &LangOpts;
+
+ // True if '//' line comments are enabled.
+ bool LineComment;
// True if lexer for _Pragma handling.
bool Is_PragmaLexer;
@@ -128,6 +140,9 @@ class Lexer : public PreprocessorLexer {
bool HasLeadingEmptyMacro;
+ /// True if this is the first time we're lexing the input file.
+ bool IsFirstTimeLexingFile;
+
// NewLinePtr - A pointer to new line character '\n' being lexed. For '\r\n',
// it also points to '\n.'
const char *NewLinePtr;
@@ -135,6 +150,13 @@ class Lexer : public PreprocessorLexer {
// CurrentConflictMarkerState - The kind of conflict marker we are handling.
ConflictMarkerKind CurrentConflictMarkerState;
+ /// Non-empty if this \p Lexer is \p isDependencyDirectivesLexer().
+ ArrayRef<dependency_directives_scan::Directive> DepDirectives;
+
+ /// If this \p Lexer is \p isDependencyDirectivesLexer(), it represents the
+ /// next token to use from the current dependency directive.
+ unsigned NextDepDirectiveTokenIndex = 0;
+
void InitLexer(const char *BufStart, const char *BufPtr, const char *BufEnd);
public:
@@ -142,19 +164,22 @@ public:
/// with the specified preprocessor managing the lexing process. This lexer
/// assumes that the associated file buffer and Preprocessor objects will
/// outlive it, so it doesn't take ownership of either of them.
- Lexer(FileID FID, const llvm::MemoryBufferRef &InputFile, Preprocessor &PP);
+ Lexer(FileID FID, const llvm::MemoryBufferRef &InputFile, Preprocessor &PP,
+ bool IsFirstIncludeOfFile = true);
/// Lexer constructor - Create a new raw lexer object. This object is only
/// suitable for calls to 'LexFromRawLexer'. This lexer assumes that the
/// text range will outlive it, so it doesn't take ownership of it.
Lexer(SourceLocation FileLoc, const LangOptions &LangOpts,
- const char *BufStart, const char *BufPtr, const char *BufEnd);
+ const char *BufStart, const char *BufPtr, const char *BufEnd,
+ bool IsFirstIncludeOfFile = true);
/// Lexer constructor - Create a new raw lexer object. This object is only
/// suitable for calls to 'LexFromRawLexer'. This lexer assumes that the
/// text range will outlive it, so it doesn't take ownership of it.
Lexer(FileID FID, const llvm::MemoryBufferRef &FromFile,
- const SourceManager &SM, const LangOptions &LangOpts);
+ const SourceManager &SM, const LangOptions &LangOpts,
+ bool IsFirstIncludeOfFile = true);
Lexer(const Lexer &) = delete;
Lexer &operator=(const Lexer &) = delete;
@@ -167,21 +192,34 @@ public:
SourceLocation ExpansionLocEnd,
unsigned TokLen, Preprocessor &PP);
- /// getLangOpts - Return the language features currently enabled.
- /// NOTE: this lexer modifies features as a file is parsed!
- const LangOptions &getLangOpts() const { return LangOpts; }
-
/// getFileLoc - Return the File Location for the file we are lexing out of.
/// The physical location encodes the location where the characters come from,
/// the virtual location encodes where we should *claim* the characters came
/// from. Currently this is only used by _Pragma handling.
SourceLocation getFileLoc() const { return FileLoc; }
-private:
/// Lex - Return the next token in the file. If this is the end of file, it
/// return the tok::eof token. This implicitly involves the preprocessor.
bool Lex(Token &Result);
+private:
+ /// Called when the preprocessor is in 'dependency scanning lexing mode'.
+ bool LexDependencyDirectiveToken(Token &Result);
+
+ /// Called when the preprocessor is in 'dependency scanning lexing mode' and
+ /// is skipping a conditional block.
+ bool LexDependencyDirectiveTokenWhileSkipping(Token &Result);
+
+ /// True when the preprocessor is in 'dependency scanning lexing mode' and
+ /// created this \p Lexer for lexing a set of dependency directive tokens.
+ bool isDependencyDirectivesLexer() const { return !DepDirectives.empty(); }
+
+ /// Initializes \p Result with data from \p DDTok and advances \p BufferPtr to
+ /// the position just after the token.
+ /// \returns the buffer pointer at the beginning of the token.
+ const char *convertDependencyDirectiveToken(
+ const dependency_directives_scan::Token &DDTok, Token &Result);
+
public:
/// isPragmaLexer - Returns true if this Lexer is being used to lex a pragma.
bool isPragmaLexer() const { return Is_PragmaLexer; }
@@ -275,14 +313,8 @@ public:
return BufferPtr - BufferStart;
}
- /// Skip over \p NumBytes bytes.
- ///
- /// If the skip is successful, the next token will be lexed from the new
- /// offset. The lexer also assumes that we skipped to the start of the line.
- ///
- /// \returns true if the skip failed (new offset would have been past the
- /// end of the buffer), false otherwise.
- bool skipOver(unsigned NumBytes);
+ /// Set the lexer's buffer pointer to \p Offset.
+ void seek(unsigned Offset, bool IsAtStartOfLine);
/// Stringify - Convert the specified string into a C string by i) escaping
/// '\\' and " characters and ii) replacing newline character(s) with "\\n".
@@ -519,10 +551,10 @@ public:
/// Finds the token that comes right after the given location.
///
- /// Returns the next token, or none if the location is inside a macro.
- static Optional<Token> findNextToken(SourceLocation Loc,
- const SourceManager &SM,
- const LangOptions &LangOpts);
+ /// Returns the next token, or std::nullopt if the location is inside a macro.
+ static std::optional<Token> findNextToken(SourceLocation Loc,
+ const SourceManager &SM,
+ const LangOptions &LangOpts);
/// Checks that the given token is the first token that occurs after
/// the given location (this excludes comments and whitespace). Returns the
@@ -536,25 +568,30 @@ public:
bool SkipTrailingWhitespaceAndNewLine);
/// Returns true if the given character could appear in an identifier.
- static bool isIdentifierBodyChar(char c, const LangOptions &LangOpts);
+ static bool isAsciiIdentifierContinueChar(char c,
+ const LangOptions &LangOpts);
/// Checks whether new line pointed by Str is preceded by escape
/// sequence.
static bool isNewLineEscaped(const char *BufferStart, const char *Str);
+ /// Represents a char and the number of bytes parsed to produce it.
+ struct SizedChar {
+ char Char;
+ unsigned Size;
+ };
+
/// getCharAndSizeNoWarn - Like the getCharAndSize method, but does not ever
/// emit a warning.
- static inline char getCharAndSizeNoWarn(const char *Ptr, unsigned &Size,
- const LangOptions &LangOpts) {
+ static inline SizedChar getCharAndSizeNoWarn(const char *Ptr,
+ const LangOptions &LangOpts) {
// If this is not a trigraph and not a UCN or escaped newline, return
// quickly.
if (isObviouslySimpleCharacter(Ptr[0])) {
- Size = 1;
- return *Ptr;
+ return {*Ptr, 1u};
}
- Size = 0;
- return getCharAndSizeSlowNoWarn(Ptr, Size, LangOpts);
+ return getCharAndSizeSlowNoWarn(Ptr, LangOpts);
}
/// Returns the leading whitespace for line that corresponds to the given
@@ -562,6 +599,9 @@ public:
static StringRef getIndentationForLine(SourceLocation Loc,
const SourceManager &SM);
+ /// Check if this is the first time we're lexing the input file.
+ bool isFirstTimeLexingFile() const { return IsFirstTimeLexingFile; }
+
private:
//===--------------------------------------------------------------------===//
// Internal implementation interfaces.
@@ -573,10 +613,7 @@ private:
bool CheckUnicodeWhitespace(Token &Result, uint32_t C, const char *CurPtr);
- /// Given that a token begins with the Unicode character \p C, figure out
- /// what kind of token it is and dispatch to the appropriate lexing helper
- /// function.
- bool LexUnicode(Token &Result, uint32_t C, const char *CurPtr);
+ bool LexUnicodeIdentifierStart(Token &Result, uint32_t C, const char *CurPtr);
/// FormTokenWithChars - When we lex a token, we have identified a span
/// starting at BufferPtr, going to TokEnd that forms the token. This method
@@ -632,8 +669,7 @@ private:
// quickly.
if (isObviouslySimpleCharacter(Ptr[0])) return *Ptr++;
- unsigned Size = 0;
- char C = getCharAndSizeSlow(Ptr, Size, &Tok);
+ auto [C, Size] = getCharAndSizeSlow(Ptr, &Tok);
Ptr += Size;
return C;
}
@@ -649,9 +685,7 @@ private:
// Otherwise, re-lex the character with a current token, allowing
// diagnostics to be emitted and flags to be set.
- Size = 0;
- getCharAndSizeSlow(Ptr, Size, &Tok);
- return Ptr+Size;
+ return Ptr + getCharAndSizeSlow(Ptr, &Tok).Size;
}
/// getCharAndSize - Peek a single 'character' from the specified buffer,
@@ -666,14 +700,14 @@ private:
return *Ptr;
}
- Size = 0;
- return getCharAndSizeSlow(Ptr, Size);
+ auto CharAndSize = getCharAndSizeSlow(Ptr);
+ Size = CharAndSize.Size;
+ return CharAndSize.Char;
}
/// getCharAndSizeSlow - Handle the slow/uncommon case of the getCharAndSize
/// method.
- char getCharAndSizeSlow(const char *Ptr, unsigned &Size,
- Token *Tok = nullptr);
+ SizedChar getCharAndSizeSlow(const char *Ptr, Token *Tok = nullptr);
/// getEscapedNewLineSize - Return the size of the specified escaped newline,
/// or 0 if it is not an escaped newline. P[-1] is known to be a "\" on entry
@@ -687,8 +721,8 @@ private:
/// getCharAndSizeSlowNoWarn - Same as getCharAndSizeSlow, but never emits a
/// diagnostic.
- static char getCharAndSizeSlowNoWarn(const char *Ptr, unsigned &Size,
- const LangOptions &LangOpts);
+ static SizedChar getCharAndSizeSlowNoWarn(const char *Ptr,
+ const LangOptions &LangOpts);
//===--------------------------------------------------------------------===//
// Other lexer functions.
@@ -701,7 +735,11 @@ private:
bool IsStringLiteral);
// Helper functions to lex the remainder of a token of the specific type.
- bool LexIdentifier (Token &Result, const char *CurPtr);
+
+ // This function handles both ASCII and Unicode identifiers after
+ // the first codepoint of the identifyier has been parsed.
+ bool LexIdentifierContinue(Token &Result, const char *CurPtr);
+
bool LexNumericConstant (Token &Result, const char *CurPtr);
bool LexStringLiteral (Token &Result, const char *CurPtr,
tok::TokenKind Kind);
@@ -732,10 +770,15 @@ private:
void codeCompleteIncludedFile(const char *PathStart,
const char *CompletionPoint, bool IsAngled);
+ std::optional<uint32_t>
+ tryReadNumericUCN(const char *&StartPtr, const char *SlashLoc, Token *Result);
+ std::optional<uint32_t> tryReadNamedUCN(const char *&StartPtr,
+ const char *SlashLoc, Token *Result);
+
/// Read a universal character name.
///
/// \param StartPtr The position in the source buffer after the initial '\'.
- /// If the UCN is syntactically well-formed (but not
+ /// If the UCN is syntactically well-formed (but not
/// necessarily valid), this parameter will be updated to
/// point to the character after the UCN.
/// \param SlashLoc The position in the source buffer of the '\'.
@@ -763,9 +806,10 @@ private:
/// Try to consume an identifier character encoded in UTF-8.
/// \param CurPtr Points to the start of the (potential) UTF-8 code unit
/// sequence. On success, updated to point past the end of it.
+ /// \param Result The token being formed.
/// \return \c true if a UTF-8 sequence mapping to an acceptable identifier
/// character was lexed, \c false otherwise.
- bool tryConsumeIdentifierUTF8Char(const char *&CurPtr);
+ bool tryConsumeIdentifierUTF8Char(const char *&CurPtr, Token &Result);
};
} // namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/Lex/LiteralSupport.h b/contrib/llvm-project/clang/include/clang/Lex/LiteralSupport.h
index f131f045a73e..643ddbdad8c8 100644
--- a/contrib/llvm-project/clang/include/clang/Lex/LiteralSupport.h
+++ b/contrib/llvm-project/clang/include/clang/Lex/LiteralSupport.h
@@ -36,6 +36,15 @@ class LangOptions;
/// Copy characters from Input to Buf, expanding any UCNs.
void expandUCNs(SmallVectorImpl<char> &Buf, StringRef Input);
+/// Return true if the token corresponds to a function local predefined macro,
+/// which expands to a string literal, that can be concatenated with other
+/// string literals (only in Microsoft mode).
+bool isFunctionLocalStringLiteralMacro(tok::TokenKind K, const LangOptions &LO);
+
+/// Return true if the token is a string literal, or a function local
+/// predefined macro, which expands to a string literal.
+bool tokenIsLikeStringLiteral(const Token &Tok, const LangOptions &LO);
+
/// NumericLiteralParser - This performs strict semantic analysis of the content
/// of a ppnumber, classifying it as either integer, floating, or erroneous,
/// determines the radix of the value and can convert it to a useful value.
@@ -63,16 +72,17 @@ public:
bool isUnsigned : 1;
bool isLong : 1; // This is *not* set for long long.
bool isLongLong : 1;
- bool isSizeT : 1; // 1z, 1uz (C++2b)
+ bool isSizeT : 1; // 1z, 1uz (C++23)
bool isHalf : 1; // 1.0h
bool isFloat : 1; // 1.0f
bool isImaginary : 1; // 1.0i
bool isFloat16 : 1; // 1.0f16
bool isFloat128 : 1; // 1.0q
- uint8_t MicrosoftInteger; // Microsoft suffix extension i8, i16, i32, or i64.
-
bool isFract : 1; // 1.0hr/r/lr/uhr/ur/ulr
bool isAccum : 1; // 1.0hk/k/lk/uhk/uk/ulk
+ bool isBitInt : 1; // 1wb, 1uwb (C23)
+ uint8_t MicrosoftInteger; // Microsoft suffix extension i8, i16, i32, or i64.
+
bool isFixedPointLiteral() const {
return (saw_period || saw_exponent) && saw_fixed_point_suffix;
@@ -120,6 +130,13 @@ public:
/// calculating the digit sequence of the exponent.
bool GetFixedPointValue(llvm::APInt &StoreVal, unsigned Scale);
+ /// Get the digits that comprise the literal. This excludes any prefix or
+ /// suffix associated with the literal.
+ StringRef getLiteralDigits() const {
+ assert(!hadError && "cannot reliably get the literal digits with an error");
+ return StringRef(DigitsBegin, SuffixBegin - DigitsBegin);
+ }
+
private:
void ParseNumberStartingWithZero(SourceLocation TokLoc);
@@ -190,7 +207,7 @@ public:
tok::TokenKind kind);
bool hadError() const { return HadError; }
- bool isAscii() const { return Kind == tok::char_constant; }
+ bool isOrdinary() const { return Kind == tok::char_constant; }
bool isWide() const { return Kind == tok::wide_char_constant; }
bool isUTF8() const { return Kind == tok::utf8_char_constant; }
bool isUTF16() const { return Kind == tok::utf16_char_constant; }
@@ -204,6 +221,11 @@ public:
}
};
+enum class StringLiteralEvalMethod {
+ Evaluated,
+ Unevaluated,
+};
+
/// StringLiteralParser - This decodes string escape characters and performs
/// wide string analysis and Translation Phase #6 (concatenation of string
/// literals) (C99 5.1.1.2p1).
@@ -222,20 +244,23 @@ class StringLiteralParser {
SmallString<32> UDSuffixBuf;
unsigned UDSuffixToken;
unsigned UDSuffixOffset;
+ StringLiteralEvalMethod EvalMethod;
+
public:
- StringLiteralParser(ArrayRef<Token> StringToks,
- Preprocessor &PP, bool Complain = true);
- StringLiteralParser(ArrayRef<Token> StringToks,
- const SourceManager &sm, const LangOptions &features,
- const TargetInfo &target,
+ StringLiteralParser(ArrayRef<Token> StringToks, Preprocessor &PP,
+ StringLiteralEvalMethod StringMethod =
+ StringLiteralEvalMethod::Evaluated);
+ StringLiteralParser(ArrayRef<Token> StringToks, const SourceManager &sm,
+ const LangOptions &features, const TargetInfo &target,
DiagnosticsEngine *diags = nullptr)
- : SM(sm), Features(features), Target(target), Diags(diags),
- MaxTokenLength(0), SizeBound(0), CharByteWidth(0), Kind(tok::unknown),
- ResultPtr(ResultBuf.data()), hadError(false), Pascal(false) {
+ : SM(sm), Features(features), Target(target), Diags(diags),
+ MaxTokenLength(0), SizeBound(0), CharByteWidth(0), Kind(tok::unknown),
+ ResultPtr(ResultBuf.data()),
+ EvalMethod(StringLiteralEvalMethod::Evaluated), hadError(false),
+ Pascal(false) {
init(StringToks);
}
-
bool hadError;
bool Pascal;
@@ -255,12 +280,15 @@ public:
/// checking of the string literal and emit errors and warnings.
unsigned getOffsetOfStringByte(const Token &TheTok, unsigned ByteNo) const;
- bool isAscii() const { return Kind == tok::string_literal; }
+ bool isOrdinary() const { return Kind == tok::string_literal; }
bool isWide() const { return Kind == tok::wide_string_literal; }
bool isUTF8() const { return Kind == tok::utf8_string_literal; }
bool isUTF16() const { return Kind == tok::utf16_string_literal; }
bool isUTF32() const { return Kind == tok::utf32_string_literal; }
bool isPascal() const { return Pascal; }
+ bool isUnevaluated() const {
+ return EvalMethod == StringLiteralEvalMethod::Unevaluated;
+ }
StringRef getUDSuffix() const { return UDSuffixBuf; }
diff --git a/contrib/llvm-project/clang/include/clang/Lex/MacroInfo.h b/contrib/llvm-project/clang/include/clang/Lex/MacroInfo.h
index 0347a7a37186..1237fc62eb6c 100644
--- a/contrib/llvm-project/clang/include/clang/Lex/MacroInfo.h
+++ b/contrib/llvm-project/clang/include/clang/Lex/MacroInfo.h
@@ -54,11 +54,14 @@ class MacroInfo {
/// macro, this includes the \c __VA_ARGS__ identifier on the list.
IdentifierInfo **ParameterList = nullptr;
+ /// This is the list of tokens that the macro is defined to.
+ const Token *ReplacementTokens = nullptr;
+
/// \see ParameterList
unsigned NumParameters = 0;
- /// This is the list of tokens that the macro is defined to.
- SmallVector<Token, 8> ReplacementTokens;
+ /// \see ReplacementTokens
+ unsigned NumReplacementTokens = 0;
/// Length in characters of the macro definition.
mutable unsigned DefinitionLength;
@@ -114,9 +117,8 @@ class MacroInfo {
/// Whether this macro was used as header guard.
bool UsedForHeaderGuard : 1;
- // Only the Preprocessor gets to create and destroy these.
+ // Only the Preprocessor gets to create these.
MacroInfo(SourceLocation DefLoc);
- ~MacroInfo() = default;
public:
/// Return the location that the macro was defined at.
@@ -204,7 +206,7 @@ public:
void setIsGNUVarargs() { IsGNUVarargs = true; }
bool isC99Varargs() const { return IsC99Varargs; }
bool isGNUVarargs() const { return IsGNUVarargs; }
- bool isVariadic() const { return IsC99Varargs | IsGNUVarargs; }
+ bool isVariadic() const { return IsC99Varargs || IsGNUVarargs; }
/// Return true if this macro requires processing before expansion.
///
@@ -230,26 +232,47 @@ public:
bool isWarnIfUnused() const { return IsWarnIfUnused; }
/// Return the number of tokens that this macro expands to.
- unsigned getNumTokens() const { return ReplacementTokens.size(); }
+ unsigned getNumTokens() const { return NumReplacementTokens; }
const Token &getReplacementToken(unsigned Tok) const {
- assert(Tok < ReplacementTokens.size() && "Invalid token #");
+ assert(Tok < NumReplacementTokens && "Invalid token #");
return ReplacementTokens[Tok];
}
- using tokens_iterator = SmallVectorImpl<Token>::const_iterator;
+ using const_tokens_iterator = const Token *;
- tokens_iterator tokens_begin() const { return ReplacementTokens.begin(); }
- tokens_iterator tokens_end() const { return ReplacementTokens.end(); }
- bool tokens_empty() const { return ReplacementTokens.empty(); }
- ArrayRef<Token> tokens() const { return ReplacementTokens; }
+ const_tokens_iterator tokens_begin() const { return ReplacementTokens; }
+ const_tokens_iterator tokens_end() const {
+ return ReplacementTokens + NumReplacementTokens;
+ }
+ bool tokens_empty() const { return NumReplacementTokens == 0; }
+ ArrayRef<Token> tokens() const {
+ return llvm::ArrayRef(ReplacementTokens, NumReplacementTokens);
+ }
- /// Add the specified token to the replacement text for the macro.
- void AddTokenToBody(const Token &Tok) {
+ llvm::MutableArrayRef<Token>
+ allocateTokens(unsigned NumTokens, llvm::BumpPtrAllocator &PPAllocator) {
+ assert(ReplacementTokens == nullptr && NumReplacementTokens == 0 &&
+ "Token list already allocated!");
+ NumReplacementTokens = NumTokens;
+ Token *NewReplacementTokens = PPAllocator.Allocate<Token>(NumTokens);
+ ReplacementTokens = NewReplacementTokens;
+ return llvm::MutableArrayRef(NewReplacementTokens, NumTokens);
+ }
+
+ void setTokens(ArrayRef<Token> Tokens, llvm::BumpPtrAllocator &PPAllocator) {
assert(
!IsDefinitionLengthCached &&
"Changing replacement tokens after definition length got calculated");
- ReplacementTokens.push_back(Tok);
+ assert(ReplacementTokens == nullptr && NumReplacementTokens == 0 &&
+ "Token list already set!");
+ if (Tokens.empty())
+ return;
+
+ NumReplacementTokens = Tokens.size();
+ Token *NewReplacementTokens = PPAllocator.Allocate<Token>(Tokens.size());
+ std::copy(Tokens.begin(), Tokens.end(), NewReplacementTokens);
+ ReplacementTokens = NewReplacementTokens;
}
/// Return true if this macro is enabled.
@@ -302,15 +325,18 @@ protected:
SourceLocation Loc;
/// MacroDirective kind.
+ LLVM_PREFERRED_TYPE(Kind)
unsigned MDKind : 2;
/// True if the macro directive was loaded from a PCH file.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsFromPCH : 1;
// Used by VisibilityMacroDirective ----------------------------------------//
/// Whether the macro has public visibility (when described in a
/// module).
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsPublic : 1;
MacroDirective(Kind K, SourceLocation Loc)
@@ -549,7 +575,7 @@ public:
}
ArrayRef<ModuleMacro *> overrides() const {
- return llvm::makeArrayRef(overrides_begin(), overrides_end());
+ return llvm::ArrayRef(overrides_begin(), overrides_end());
}
/// \}
diff --git a/contrib/llvm-project/clang/include/clang/Lex/ModuleLoader.h b/contrib/llvm-project/clang/include/clang/Lex/ModuleLoader.h
index bf044e0e5f50..f880a9091a2e 100644
--- a/contrib/llvm-project/clang/include/clang/Lex/ModuleLoader.h
+++ b/contrib/llvm-project/clang/include/clang/Lex/ModuleLoader.h
@@ -51,6 +51,11 @@ public:
ModuleLoadResult() = default;
ModuleLoadResult(Module *M) : Storage(M, Normal) {}
ModuleLoadResult(LoadResultKind Kind) : Storage(nullptr, Kind) {}
+ ModuleLoadResult(Module *M, LoadResultKind Kind) : Storage(M, Kind) {}
+
+ operator bool() const {
+ return Storage.getInt() == Normal && Storage.getPointer();
+ }
operator Module *() const { return Storage.getPointer(); }
diff --git a/contrib/llvm-project/clang/include/clang/Lex/ModuleMap.h b/contrib/llvm-project/clang/include/clang/Lex/ModuleMap.h
index 41f85a1f572d..867cb6eab42f 100644
--- a/contrib/llvm-project/clang/include/clang/Lex/ModuleMap.h
+++ b/contrib/llvm-project/clang/include/clang/Lex/ModuleMap.h
@@ -20,8 +20,8 @@
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/PointerIntPair.h"
-#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
@@ -30,6 +30,7 @@
#include "llvm/ADT/Twine.h"
#include <ctime>
#include <memory>
+#include <optional>
#include <string>
#include <utility>
@@ -56,8 +57,8 @@ public:
/// contents.
/// \param File The file itself.
/// \param IsSystem Whether this is a module map from a system include path.
- virtual void moduleMapFileRead(SourceLocation FileStart,
- const FileEntry &File, bool IsSystem) {}
+ virtual void moduleMapFileRead(SourceLocation FileStart, FileEntryRef File,
+ bool IsSystem) {}
/// Called when a header is added during module map parsing.
///
@@ -66,10 +67,8 @@ public:
/// Called when an umbrella header is added during module map parsing.
///
- /// \param FileMgr FileManager instance
/// \param Header The umbrella header to collect.
- virtual void moduleMapAddUmbrellaHeader(FileManager *FileMgr,
- const FileEntry *Header) {}
+ virtual void moduleMapAddUmbrellaHeader(FileEntryRef Header) {}
};
class ModuleMap {
@@ -83,7 +82,7 @@ class ModuleMap {
/// The directory used for Clang-supplied, builtin include headers,
/// such as "stdint.h".
- const DirectoryEntry *BuiltinIncludeDir = nullptr;
+ OptionalDirectoryEntryRef BuiltinIncludeDir;
/// Language options used to parse the module map itself.
///
@@ -136,9 +135,11 @@ public:
/// should be textually included.
TextualHeader = 0x2,
+ /// This header is explicitly excluded from the module.
+ ExcludedHeader = 0x4,
+
// Caution: Adding an enumerator needs other changes.
// Adjust the number of bits for KnownHeader::Storage.
- // Adjust the bitfield HeaderFileInfo::HeaderRole size.
// Adjust the HeaderFileInfoTrait::ReadData streaming.
// Adjust the HeaderFileInfoTrait::EmitData streaming.
// Adjust ModuleMap::addHeader.
@@ -150,10 +151,13 @@ public:
/// Convert a header role to a kind.
static Module::HeaderKind headerRoleToKind(ModuleHeaderRole Role);
+ /// Check if the header with the given role is a modular one.
+ static bool isModular(ModuleHeaderRole Role);
+
/// A header that is known to reside within a given module,
/// whether it was included or excluded.
class KnownHeader {
- llvm::PointerIntPair<Module *, 2, ModuleHeaderRole> Storage;
+ llvm::PointerIntPair<Module *, 3, ModuleHeaderRole> Storage;
public:
KnownHeader() : Storage(nullptr, NormalHeader) {}
@@ -174,7 +178,7 @@ public:
/// Whether this header is available in the module.
bool isAvailable() const {
- return getModule()->isAvailable();
+ return getRole() != ExcludedHeader && getModule()->isAvailable();
}
/// Whether this header is accessible from the specified module.
@@ -190,13 +194,12 @@ public:
}
};
- using AdditionalModMapsSet = llvm::SmallPtrSet<const FileEntry *, 1>;
+ using AdditionalModMapsSet = llvm::DenseSet<FileEntryRef>;
private:
friend class ModuleMapParser;
- using HeadersMap =
- llvm::DenseMap<const FileEntry *, SmallVector<KnownHeader, 1>>;
+ using HeadersMap = llvm::DenseMap<FileEntryRef, SmallVector<KnownHeader, 1>>;
/// Mapping from each header to the module that owns the contents of
/// that header.
@@ -229,16 +232,20 @@ private:
/// The set of attributes that can be attached to a module.
struct Attributes {
/// Whether this is a system module.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsSystem : 1;
/// Whether this is an extern "C" module.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsExternC : 1;
/// Whether this is an exhaustive set of configuration macros.
+ LLVM_PREFERRED_TYPE(bool)
unsigned IsExhaustive : 1;
/// Whether files in this module can only include non-modular headers
/// and headers from used modules.
+ LLVM_PREFERRED_TYPE(bool)
unsigned NoUndeclaredIncludes : 1;
Attributes()
@@ -249,14 +256,15 @@ private:
/// A directory for which framework modules can be inferred.
struct InferredDirectory {
/// Whether to infer modules from this directory.
+ LLVM_PREFERRED_TYPE(bool)
unsigned InferModules : 1;
/// The attributes to use for inferred modules.
Attributes Attrs;
/// If \c InferModules is non-zero, the module map file that allowed
- /// inferred modules. Otherwise, nullptr.
- const FileEntry *ModuleMapFile;
+ /// inferred modules. Otherwise, nullopt.
+ OptionalFileEntryRef ModuleMapFile;
/// The names of modules that cannot be inferred within this
/// directory.
@@ -271,7 +279,8 @@ private:
/// A mapping from an inferred module to the module map that allowed the
/// inference.
- llvm::DenseMap<const Module *, const FileEntry *> InferredModuleAllowedBy;
+ // FIXME: Consider making the values non-optional.
+ llvm::DenseMap<const Module *, OptionalFileEntryRef> InferredModuleAllowedBy;
llvm::DenseMap<const Module *, AdditionalModMapsSet> AdditionalModMaps;
@@ -328,7 +337,7 @@ private:
/// \param NeedsFramework If M is not a framework but a missing header would
/// be found in case M was, set it to true. False otherwise.
/// \return The resolved file, if any.
- Optional<FileEntryRef>
+ OptionalFileEntryRef
findHeader(Module *M, const Module::UnresolvedHeaderDirective &Header,
SmallVectorImpl<char> &RelativePathName, bool &NeedsFramework);
@@ -352,7 +361,7 @@ private:
/// If \p File represents a builtin header within Clang's builtin include
/// directory, this also loads all of the module maps to see if it will get
/// associated with a specific module (e.g. in /usr/include).
- HeadersMap::iterator findKnownHeader(const FileEntry *File);
+ HeadersMap::iterator findKnownHeader(FileEntryRef File);
/// Searches for a module whose umbrella directory contains \p File.
///
@@ -360,22 +369,22 @@ private:
///
/// \param IntermediateDirs On success, contains the set of directories
/// searched before finding \p File.
- KnownHeader findHeaderInUmbrellaDirs(const FileEntry *File,
- SmallVectorImpl<const DirectoryEntry *> &IntermediateDirs);
+ KnownHeader findHeaderInUmbrellaDirs(
+ FileEntryRef File, SmallVectorImpl<DirectoryEntryRef> &IntermediateDirs);
/// Given that \p File is not in the Headers map, look it up within
/// umbrella directories and find or create a module for it.
- KnownHeader findOrCreateModuleForHeaderInUmbrellaDir(const FileEntry *File);
+ KnownHeader findOrCreateModuleForHeaderInUmbrellaDir(FileEntryRef File);
/// A convenience method to determine if \p File is (possibly nested)
/// in an umbrella directory.
- bool isHeaderInUmbrellaDirs(const FileEntry *File) {
- SmallVector<const DirectoryEntry *, 2> IntermediateDirs;
+ bool isHeaderInUmbrellaDirs(FileEntryRef File) {
+ SmallVector<DirectoryEntryRef, 2> IntermediateDirs;
return static_cast<bool>(findHeaderInUmbrellaDirs(File, IntermediateDirs));
}
- Module *inferFrameworkModule(const DirectoryEntry *FrameworkDir,
- Attributes Attrs, Module *Parent);
+ Module *inferFrameworkModule(DirectoryEntryRef FrameworkDir, Attributes Attrs,
+ Module *Parent);
public:
/// Construct a new module map.
@@ -399,20 +408,18 @@ public:
/// Set the target information.
void setTarget(const TargetInfo &Target);
- /// Set the directory that contains Clang-supplied include
- /// files, such as our stdarg.h or tgmath.h.
- void setBuiltinIncludeDir(const DirectoryEntry *Dir) {
- BuiltinIncludeDir = Dir;
- }
+ /// Set the directory that contains Clang-supplied include files, such as our
+ /// stdarg.h or tgmath.h.
+ void setBuiltinIncludeDir(DirectoryEntryRef Dir) { BuiltinIncludeDir = Dir; }
/// Get the directory that contains Clang-supplied include files.
- const DirectoryEntry *getBuiltinDir() const {
- return BuiltinIncludeDir;
- }
+ OptionalDirectoryEntryRef getBuiltinDir() const { return BuiltinIncludeDir; }
/// Is this a compiler builtin header?
- static bool isBuiltinHeader(StringRef FileName);
- bool isBuiltinHeader(const FileEntry *File);
+ bool isBuiltinHeader(FileEntryRef File);
+
+ bool shouldImportRelativeToBuiltinIncludeDir(StringRef FileName,
+ Module *Module) const;
/// Add a module map callback.
void addModuleMapCallbacks(std::unique_ptr<ModuleMapCallbacks> Callback) {
@@ -433,8 +440,8 @@ public:
/// \returns The module KnownHeader, which provides the module that owns the
/// given header file. The KnownHeader is default constructed to indicate
/// that no module owns this header file.
- KnownHeader findModuleForHeader(const FileEntry *File,
- bool AllowTextual = false);
+ KnownHeader findModuleForHeader(FileEntryRef File, bool AllowTextual = false,
+ bool AllowExcluded = false);
/// Retrieve all the modules that contain the given header file. Note that
/// this does not implicitly load module maps, except for builtin headers,
@@ -443,12 +450,11 @@ public:
///
/// Typically, \ref findModuleForHeader should be used instead, as it picks
/// the preferred module for the header.
- ArrayRef<KnownHeader> findAllModulesForHeader(const FileEntry *File);
+ ArrayRef<KnownHeader> findAllModulesForHeader(FileEntryRef File);
/// Like \ref findAllModulesForHeader, but do not attempt to infer module
/// ownership from umbrella headers if we've not already done so.
- ArrayRef<KnownHeader>
- findResolvedModulesForHeader(const FileEntry *File) const;
+ ArrayRef<KnownHeader> findResolvedModulesForHeader(FileEntryRef File) const;
/// Resolve all lazy header directives for the specified file.
///
@@ -456,8 +462,11 @@ public:
/// is effectively internal, but is exposed so HeaderSearch can call it.
void resolveHeaderDirectives(const FileEntry *File) const;
- /// Resolve all lazy header directives for the specified module.
- void resolveHeaderDirectives(Module *Mod) const;
+ /// Resolve lazy header directives for the specified module. If File is
+ /// provided, only headers with same size and modtime are resolved. If File
+ /// is not set, all headers are resolved.
+ void resolveHeaderDirectives(Module *Mod,
+ std::optional<const FileEntry *> File) const;
/// Reports errors if a module must not include a specific file.
///
@@ -476,15 +485,15 @@ public:
void diagnoseHeaderInclusion(Module *RequestingModule,
bool RequestingModuleIsModuleInterface,
SourceLocation FilenameLoc, StringRef Filename,
- const FileEntry *File);
+ FileEntryRef File);
/// Determine whether the given header is part of a module
/// marked 'unavailable'.
- bool isHeaderInUnavailableModule(const FileEntry *Header) const;
+ bool isHeaderInUnavailableModule(FileEntryRef Header) const;
/// Determine whether the given header is unavailable as part
/// of the specified module.
- bool isHeaderUnavailableInModule(const FileEntry *Header,
+ bool isHeaderUnavailableInModule(FileEntryRef Header,
const Module *RequestingModule) const;
/// Retrieve a module with the given name.
@@ -538,13 +547,23 @@ public:
///
/// We model the global module fragment as a submodule of the module
/// interface unit. Unfortunately, we can't create the module interface
- /// unit's Module until later, because we don't know what it will be called.
- Module *createGlobalModuleFragmentForModuleUnit(SourceLocation Loc);
+ /// unit's Module until later, because we don't know what it will be called
+ /// usually. See C++20 [module.unit]/7.2 for the case we could know its
+ /// parent.
+ Module *createGlobalModuleFragmentForModuleUnit(SourceLocation Loc,
+ Module *Parent = nullptr);
+ Module *createImplicitGlobalModuleFragmentForModuleUnit(SourceLocation Loc,
+ Module *Parent);
/// Create a global module fragment for a C++ module interface unit.
Module *createPrivateModuleFragmentForInterfaceUnit(Module *Parent,
SourceLocation Loc);
+ /// Create a new C++ module with the specified kind, and reparent any pending
+ /// global module fragment(s) to it.
+ Module *createModuleUnitWithKind(SourceLocation Loc, StringRef Name,
+ Module::ModuleKind Kind);
+
/// Create a new module for a C++ module interface unit.
/// The module must not already exist, and will be configured for the current
/// compilation.
@@ -552,16 +571,23 @@ public:
/// Note that this also sets the current module to the newly-created module.
///
/// \returns The newly-created module.
- Module *createModuleForInterfaceUnit(SourceLocation Loc, StringRef Name,
- Module *GlobalModule);
+ Module *createModuleForInterfaceUnit(SourceLocation Loc, StringRef Name);
- /// Create a header module from the specified list of headers.
- Module *createHeaderModule(StringRef Name, ArrayRef<Module::Header> Headers);
+ /// Create a new module for a C++ module implementation unit.
+ /// The interface module for this implementation (implicitly imported) must
+ /// exist and be loaded and present in the modules map.
+ ///
+ /// \returns The newly-created module.
+ Module *createModuleForImplementationUnit(SourceLocation Loc, StringRef Name);
+
+ /// Create a C++20 header unit.
+ Module *createHeaderUnit(SourceLocation Loc, StringRef Name,
+ Module::Header H);
/// Infer the contents of a framework module map from the given
/// framework directory.
- Module *inferFrameworkModule(const DirectoryEntry *FrameworkDir,
- bool IsSystem, Module *Parent);
+ Module *inferFrameworkModule(DirectoryEntryRef FrameworkDir, bool IsSystem,
+ Module *Parent);
/// Create a new top-level module that is shadowed by
/// \p ShadowingModule.
@@ -581,6 +607,12 @@ public:
return ModuleScopeIDs[ExistingModule] < CurrentModuleScopeID;
}
+ /// Check whether a framework module can be inferred in the given directory.
+ bool canInferFrameworkModule(const DirectoryEntry *Dir) const {
+ auto It = InferredDirectories.find(Dir);
+ return It != InferredDirectories.end() && It->getSecond().InferModules;
+ }
+
/// Retrieve the module map file containing the definition of the given
/// module.
///
@@ -588,7 +620,7 @@ public:
///
/// \returns The file entry for the module map file containing the given
/// module, or nullptr if the module definition was inferred.
- const FileEntry *getContainingModuleMapFile(const Module *Module) const;
+ OptionalFileEntryRef getContainingModuleMapFile(const Module *Module) const;
/// Get the module map file that (along with the module name) uniquely
/// identifies this module.
@@ -599,9 +631,18 @@ public:
/// of inferred modules, returns the module map that allowed the inference
/// (e.g. contained 'module *'). Otherwise, returns
/// getContainingModuleMapFile().
- const FileEntry *getModuleMapFileForUniquing(const Module *M) const;
+ OptionalFileEntryRef getModuleMapFileForUniquing(const Module *M) const;
- void setInferredModuleAllowedBy(Module *M, const FileEntry *ModMap);
+ void setInferredModuleAllowedBy(Module *M, OptionalFileEntryRef ModMap);
+
+ /// Canonicalize \p Path in a manner suitable for a module map file. In
+ /// particular, this canonicalizes the parent directory separately from the
+ /// filename so that it does not affect header resolution relative to the
+ /// modulemap.
+ ///
+ /// \returns an error code if any filesystem operations failed. In this case
+ /// \p Path is not modified.
+ std::error_code canonicalizeModuleMapPath(SmallVectorImpl<char> &Path);
/// Get any module map files other than getModuleMapFileForUniquing(M)
/// that define submodules of a top-level module \p M. This is cheaper than
@@ -614,7 +655,7 @@ public:
return &I->second;
}
- void addAdditionalModuleMapFile(const Module *M, const FileEntry *ModuleMap);
+ void addAdditionalModuleMapFile(const Module *M, FileEntryRef ModuleMap);
/// Resolve all of the unresolved exports in the given module.
///
@@ -646,26 +687,22 @@ public:
/// false otherwise.
bool resolveConflicts(Module *Mod, bool Complain);
- /// Sets the umbrella header of the given module to the given
- /// header.
- void setUmbrellaHeader(Module *Mod, const FileEntry *UmbrellaHeader,
- const Twine &NameAsWritten,
- const Twine &PathRelativeToRootModuleDirectory);
+ /// Sets the umbrella header of the given module to the given header.
+ void
+ setUmbrellaHeaderAsWritten(Module *Mod, FileEntryRef UmbrellaHeader,
+ const Twine &NameAsWritten,
+ const Twine &PathRelativeToRootModuleDirectory);
- /// Sets the umbrella directory of the given module to the given
- /// directory.
- void setUmbrellaDir(Module *Mod, const DirectoryEntry *UmbrellaDir,
- const Twine &NameAsWritten,
- const Twine &PathRelativeToRootModuleDirectory);
+ /// Sets the umbrella directory of the given module to the given directory.
+ void setUmbrellaDirAsWritten(Module *Mod, DirectoryEntryRef UmbrellaDir,
+ const Twine &NameAsWritten,
+ const Twine &PathRelativeToRootModuleDirectory);
/// Adds this header to the given module.
/// \param Role The role of the header wrt the module.
void addHeader(Module *Mod, Module::Header Header,
ModuleHeaderRole Role, bool Imported = false);
- /// Marks this header as being excluded from the given module.
- void excludeHeader(Module *Mod, Module::Header Header);
-
/// Parse the given module map file, and record any modules we
/// encounter.
///
@@ -686,9 +723,9 @@ public:
/// that caused us to load this module map file, if any.
///
/// \returns true if an error occurred, false otherwise.
- bool parseModuleMapFile(const FileEntry *File, bool IsSystem,
- const DirectoryEntry *HomeDir,
- FileID ID = FileID(), unsigned *Offset = nullptr,
+ bool parseModuleMapFile(FileEntryRef File, bool IsSystem,
+ DirectoryEntryRef HomeDir, FileID ID = FileID(),
+ unsigned *Offset = nullptr,
SourceLocation ExternModuleLoc = SourceLocation());
/// Dump the contents of the module map, for debugging purposes.
@@ -708,10 +745,10 @@ public:
}
/// Return a cached module load.
- llvm::Optional<Module *> getCachedModuleLoad(const IdentifierInfo &II) {
+ std::optional<Module *> getCachedModuleLoad(const IdentifierInfo &II) {
auto I = CachedModuleLoads.find(&II);
if (I == CachedModuleLoads.end())
- return None;
+ return std::nullopt;
return I->second;
}
};
diff --git a/contrib/llvm-project/clang/include/clang/Lex/MultipleIncludeOpt.h b/contrib/llvm-project/clang/include/clang/Lex/MultipleIncludeOpt.h
index 7ceb7e53c75d..8e570226c4b2 100644
--- a/contrib/llvm-project/clang/include/clang/Lex/MultipleIncludeOpt.h
+++ b/contrib/llvm-project/clang/include/clang/Lex/MultipleIncludeOpt.h
@@ -108,6 +108,12 @@ public:
ImmediatelyAfterTopLevelIfndef = false;
}
+ /// SetReadToken - Set whether the value of 'ReadAnyTokens'. Called to
+ /// override when encountering tokens outside of the include guard that have
+ /// no effect if the file in question is is included multiple times (e.g. the
+ /// null directive).
+ void SetReadToken(bool Value) { ReadAnyTokens = Value; }
+
/// ExpandedMacro - When a macro is expanded with this lexer as the current
/// buffer, this method is called to disable the MIOpt if needed.
void ExpandedMacro() { DidMacroExpansion = true; }
diff --git a/contrib/llvm-project/clang/include/clang/Lex/PPCallbacks.h b/contrib/llvm-project/clang/include/clang/Lex/PPCallbacks.h
index bcf49c577735..e3942af7be28 100644
--- a/contrib/llvm-project/clang/include/clang/Lex/PPCallbacks.h
+++ b/contrib/llvm-project/clang/include/clang/Lex/PPCallbacks.h
@@ -22,11 +22,11 @@
#include "llvm/ADT/StringRef.h"
namespace clang {
- class Token;
- class IdentifierInfo;
- class MacroDefinition;
- class MacroDirective;
- class MacroArgs;
+class Token;
+class IdentifierInfo;
+class MacroDefinition;
+class MacroDirective;
+class MacroArgs;
/// This interface provides a way to observe the actions of the
/// preprocessor as it does its thing.
@@ -43,12 +43,34 @@ public:
/// Callback invoked whenever a source file is entered or exited.
///
/// \param Loc Indicates the new location.
- /// \param PrevFID the file that was exited if \p Reason is ExitFile.
+ /// \param PrevFID the file that was exited if \p Reason is ExitFile or the
+ /// the file before the new one entered for \p Reason EnterFile.
virtual void FileChanged(SourceLocation Loc, FileChangeReason Reason,
SrcMgr::CharacteristicKind FileType,
FileID PrevFID = FileID()) {
}
+ enum class LexedFileChangeReason { EnterFile, ExitFile };
+
+ /// Callback invoked whenever the \p Lexer moves to a different file for
+ /// lexing. Unlike \p FileChanged line number directives and other related
+ /// pragmas do not trigger callbacks to \p LexedFileChanged.
+ ///
+ /// \param FID The \p FileID that the \p Lexer moved to.
+ ///
+ /// \param Reason Whether the \p Lexer entered a new file or exited one.
+ ///
+ /// \param FileType The \p CharacteristicKind of the file the \p Lexer moved
+ /// to.
+ ///
+ /// \param PrevFID The \p FileID the \p Lexer was using before the change.
+ ///
+ /// \param Loc The location where the \p Lexer entered a new file from or the
+ /// location that the \p Lexer moved into after exiting a file.
+ virtual void LexedFileChanged(FileID FID, LexedFileChangeReason Reason,
+ SrcMgr::CharacteristicKind FileType,
+ FileID PrevFID, SourceLocation Loc) {}
+
/// Callback invoked whenever a source file is skipped as the result
/// of header guard optimization.
///
@@ -61,22 +83,15 @@ public:
const Token &FilenameTok,
SrcMgr::CharacteristicKind FileType) {}
- /// Callback invoked whenever an inclusion directive results in a
- /// file-not-found error.
+ /// Callback invoked whenever the preprocessor cannot find a file for an
+ /// inclusion directive.
///
/// \param FileName The name of the file being included, as written in the
/// source code.
///
- /// \param RecoveryPath If this client indicates that it can recover from
- /// this missing file, the client should set this as an additional header
- /// search patch.
- ///
- /// \returns true to indicate that the preprocessor should attempt to recover
- /// by adding \p RecoveryPath as a header search path.
- virtual bool FileNotFound(StringRef FileName,
- SmallVectorImpl<char> &RecoveryPath) {
- return false;
- }
+ /// \returns true to indicate that the preprocessor should skip this file
+ /// and not issue any diagnostic.
+ virtual bool FileNotFound(StringRef FileName) { return false; }
/// Callback invoked whenever an inclusion directive of
/// any kind (\c \#include, \c \#import, etc.) has been processed, regardless
@@ -120,16 +135,12 @@ public:
/// implicitly 'extern "C"' in C++ mode.
///
virtual void InclusionDirective(SourceLocation HashLoc,
- const Token &IncludeTok,
- StringRef FileName,
- bool IsAngled,
- CharSourceRange FilenameRange,
- const FileEntry *File,
- StringRef SearchPath,
- StringRef RelativePath,
+ const Token &IncludeTok, StringRef FileName,
+ bool IsAngled, CharSourceRange FilenameRange,
+ OptionalFileEntryRef File,
+ StringRef SearchPath, StringRef RelativePath,
const Module *Imported,
- SrcMgr::CharacteristicKind FileType) {
- }
+ SrcMgr::CharacteristicKind FileType) {}
/// Callback invoked whenever a submodule was entered.
///
@@ -252,9 +263,20 @@ public:
}
/// Callback invoked when a \#pragma warning directive is read.
- virtual void PragmaWarning(SourceLocation Loc, StringRef WarningSpec,
- ArrayRef<int> Ids) {
- }
+ enum PragmaWarningSpecifier {
+ PWS_Default,
+ PWS_Disable,
+ PWS_Error,
+ PWS_Once,
+ PWS_Suppress,
+ PWS_Level1,
+ PWS_Level2,
+ PWS_Level3,
+ PWS_Level4,
+ };
+ virtual void PragmaWarning(SourceLocation Loc,
+ PragmaWarningSpecifier WarningSpec,
+ ArrayRef<int> Ids) {}
/// Callback invoked when a \#pragma warning(push) directive is read.
virtual void PragmaWarningPush(SourceLocation Loc, int Level) {
@@ -311,7 +333,7 @@ public:
/// Hook called when a '__has_include' or '__has_include_next' directive is
/// read.
virtual void HasInclude(SourceLocation Loc, StringRef FileName, bool IsAngled,
- Optional<FileEntryRef> File,
+ OptionalFileEntryRef File,
SrcMgr::CharacteristicKind FileType);
/// Hook called when a source range is skipped.
@@ -426,23 +448,32 @@ public:
Second->FileChanged(Loc, Reason, FileType, PrevFID);
}
+ void LexedFileChanged(FileID FID, LexedFileChangeReason Reason,
+ SrcMgr::CharacteristicKind FileType, FileID PrevFID,
+ SourceLocation Loc) override {
+ First->LexedFileChanged(FID, Reason, FileType, PrevFID, Loc);
+ Second->LexedFileChanged(FID, Reason, FileType, PrevFID, Loc);
+ }
+
void FileSkipped(const FileEntryRef &SkippedFile, const Token &FilenameTok,
SrcMgr::CharacteristicKind FileType) override {
First->FileSkipped(SkippedFile, FilenameTok, FileType);
Second->FileSkipped(SkippedFile, FilenameTok, FileType);
}
- bool FileNotFound(StringRef FileName,
- SmallVectorImpl<char> &RecoveryPath) override {
- return First->FileNotFound(FileName, RecoveryPath) ||
- Second->FileNotFound(FileName, RecoveryPath);
+ bool FileNotFound(StringRef FileName) override {
+ bool Skip = First->FileNotFound(FileName);
+ // Make sure to invoke the second callback, no matter if the first already
+ // returned true to skip the file.
+ Skip |= Second->FileNotFound(FileName);
+ return Skip;
}
void InclusionDirective(SourceLocation HashLoc, const Token &IncludeTok,
StringRef FileName, bool IsAngled,
- CharSourceRange FilenameRange, const FileEntry *File,
- StringRef SearchPath, StringRef RelativePath,
- const Module *Imported,
+ CharSourceRange FilenameRange,
+ OptionalFileEntryRef File, StringRef SearchPath,
+ StringRef RelativePath, const Module *Imported,
SrcMgr::CharacteristicKind FileType) override {
First->InclusionDirective(HashLoc, IncludeTok, FileName, IsAngled,
FilenameRange, File, SearchPath, RelativePath,
@@ -492,6 +523,11 @@ public:
Second->PragmaComment(Loc, Kind, Str);
}
+ void PragmaMark(SourceLocation Loc, StringRef Trivia) override {
+ First->PragmaMark(Loc, Trivia);
+ Second->PragmaMark(Loc, Trivia);
+ }
+
void PragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value) override {
First->PragmaDetectMismatch(Loc, Name, Value);
@@ -526,7 +562,7 @@ public:
}
void HasInclude(SourceLocation Loc, StringRef FileName, bool IsAngled,
- Optional<FileEntryRef> File,
+ OptionalFileEntryRef File,
SrcMgr::CharacteristicKind FileType) override;
void PragmaOpenCLExtension(SourceLocation NameLoc, const IdentifierInfo *Name,
@@ -535,7 +571,7 @@ public:
Second->PragmaOpenCLExtension(NameLoc, Name, StateLoc, State);
}
- void PragmaWarning(SourceLocation Loc, StringRef WarningSpec,
+ void PragmaWarning(SourceLocation Loc, PragmaWarningSpecifier WarningSpec,
ArrayRef<int> Ids) override {
First->PragmaWarning(Loc, WarningSpec, Ids);
Second->PragmaWarning(Loc, WarningSpec, Ids);
diff --git a/contrib/llvm-project/clang/include/clang/Lex/Pragma.h b/contrib/llvm-project/clang/include/clang/Lex/Pragma.h
index cf8cca5414ea..67eca618f6c4 100644
--- a/contrib/llvm-project/clang/include/clang/Lex/Pragma.h
+++ b/contrib/llvm-project/clang/include/clang/Lex/Pragma.h
@@ -123,6 +123,13 @@ public:
PragmaNamespace *getIfNamespace() override { return this; }
};
+/// Destringize a \c _Pragma("") string according to C11 6.10.9.1:
+/// "The string literal is destringized by deleting any encoding prefix,
+/// deleting the leading and trailing double-quotes, replacing each escape
+/// sequence \" by a double-quote, and replacing each escape sequence \\ by a
+/// single backslash."
+void prepare_PragmaString(SmallVectorImpl<char> &StrVal);
+
} // namespace clang
#endif // LLVM_CLANG_LEX_PRAGMA_H
diff --git a/contrib/llvm-project/clang/include/clang/Lex/PreprocessingRecord.h b/contrib/llvm-project/clang/include/clang/Lex/PreprocessingRecord.h
index 0137d871e916..5ddf024186f8 100644
--- a/contrib/llvm-project/clang/include/clang/Lex/PreprocessingRecord.h
+++ b/contrib/llvm-project/clang/include/clang/Lex/PreprocessingRecord.h
@@ -19,8 +19,6 @@
#include "clang/Basic/SourceLocation.h"
#include "clang/Lex/PPCallbacks.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/None.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator.h"
@@ -30,6 +28,7 @@
#include <cassert>
#include <cstddef>
#include <iterator>
+#include <optional>
#include <utility>
#include <vector>
@@ -49,7 +48,6 @@ void operator delete(void *ptr, clang::PreprocessingRecord &PR,
namespace clang {
-class FileEntry;
class IdentifierInfo;
class MacroInfo;
class SourceManager;
@@ -230,25 +228,27 @@ class Token;
/// Whether the file name was in quotation marks; otherwise, it was
/// in angle brackets.
+ LLVM_PREFERRED_TYPE(bool)
unsigned InQuotes : 1;
/// The kind of inclusion directive we have.
///
/// This is a value of type InclusionKind.
+ LLVM_PREFERRED_TYPE(InclusionKind)
unsigned Kind : 2;
/// Whether the inclusion directive was automatically turned into
/// a module import.
+ LLVM_PREFERRED_TYPE(bool)
unsigned ImportedModule : 1;
/// The file that was included.
- const FileEntry *File;
+ OptionalFileEntryRef File;
public:
- InclusionDirective(PreprocessingRecord &PPRec,
- InclusionKind Kind, StringRef FileName,
- bool InQuotes, bool ImportedModule,
- const FileEntry *File, SourceRange Range);
+ InclusionDirective(PreprocessingRecord &PPRec, InclusionKind Kind,
+ StringRef FileName, bool InQuotes, bool ImportedModule,
+ OptionalFileEntryRef File, SourceRange Range);
/// Determine what kind of inclusion directive this is.
InclusionKind getKind() const { return static_cast<InclusionKind>(Kind); }
@@ -266,7 +266,7 @@ class Token;
/// Retrieve the file entry for the actual file that was included
/// by this directive.
- const FileEntry *getFile() const { return File; }
+ OptionalFileEntryRef getFile() const { return File; }
// Implement isa/cast/dyncast/etc.
static bool classof(const PreprocessedEntity *PE) {
@@ -293,9 +293,9 @@ class Token;
/// Optionally returns true or false if the preallocated preprocessed
/// entity with index \p Index came from file \p FID.
- virtual Optional<bool> isPreprocessedEntityInFileID(unsigned Index,
- FileID FID) {
- return None;
+ virtual std::optional<bool> isPreprocessedEntityInFileID(unsigned Index,
+ FileID FID) {
+ return std::nullopt;
}
/// Read a preallocated skipped range from the external source.
@@ -531,7 +531,7 @@ class Token;
void InclusionDirective(SourceLocation HashLoc, const Token &IncludeTok,
StringRef FileName, bool IsAngled,
CharSourceRange FilenameRange,
- const FileEntry *File, StringRef SearchPath,
+ OptionalFileEntryRef File, StringRef SearchPath,
StringRef RelativePath, const Module *Imported,
SrcMgr::CharacteristicKind FileType) override;
void Ifdef(SourceLocation Loc, const Token &MacroNameTok,
diff --git a/contrib/llvm-project/clang/include/clang/Lex/Preprocessor.h b/contrib/llvm-project/clang/include/clang/Lex/Preprocessor.h
index fe2327f0a480..b0a8ec0fec5e 100644
--- a/contrib/llvm-project/clang/include/clang/Lex/Preprocessor.h
+++ b/contrib/llvm-project/clang/include/clang/Lex/Preprocessor.h
@@ -15,6 +15,7 @@
#define LLVM_CLANG_LEX_PREPROCESSOR_H
#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/DiagnosticIDs.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/LangOptions.h"
@@ -22,20 +23,18 @@
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TokenKinds.h"
+#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/Lexer.h"
#include "clang/Lex/MacroInfo.h"
#include "clang/Lex/ModuleLoader.h"
#include "clang/Lex/ModuleMap.h"
#include "clang/Lex/PPCallbacks.h"
-#include "clang/Lex/PreprocessorExcludedConditionalDirectiveSkipMapping.h"
#include "clang/Lex/Token.h"
#include "clang/Lex/TokenLexer.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/FunctionExtras.h"
-#include "llvm/ADT/None.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
@@ -51,6 +50,7 @@
#include <cstdint>
#include <map>
#include <memory>
+#include <optional>
#include <string>
#include <utility>
#include <vector>
@@ -66,7 +66,6 @@ namespace clang {
class CodeCompletionHandler;
class CommentHandler;
class DirectoryEntry;
-class DirectoryLookup;
class EmptylineHandler;
class ExternalPreprocessorSource;
class FileEntry;
@@ -133,7 +132,7 @@ class Preprocessor {
llvm::unique_function<void(const clang::Token &)> OnToken;
std::shared_ptr<PreprocessorOptions> PPOpts;
DiagnosticsEngine *Diags;
- LangOptions &LangOpts;
+ const LangOptions &LangOpts;
const TargetInfo *Target = nullptr;
const TargetInfo *AuxTarget = nullptr;
FileManager &FileMgr;
@@ -164,6 +163,7 @@ class Preprocessor {
IdentifierInfo *Ident__has_feature; // __has_feature
IdentifierInfo *Ident__has_extension; // __has_extension
IdentifierInfo *Ident__has_builtin; // __has_builtin
+ IdentifierInfo *Ident__has_constexpr_builtin; // __has_constexpr_builtin
IdentifierInfo *Ident__has_attribute; // __has_attribute
IdentifierInfo *Ident__has_include; // __has_include
IdentifierInfo *Ident__has_include_next; // __has_include_next
@@ -178,12 +178,29 @@ class Preprocessor {
IdentifierInfo *Ident__is_target_vendor; // __is_target_vendor
IdentifierInfo *Ident__is_target_os; // __is_target_os
IdentifierInfo *Ident__is_target_environment; // __is_target_environment
+ IdentifierInfo *Ident__is_target_variant_os;
+ IdentifierInfo *Ident__is_target_variant_environment;
+ IdentifierInfo *Ident__FLT_EVAL_METHOD__; // __FLT_EVAL_METHOD
// Weak, only valid (and set) while InMacroArgs is true.
Token* ArgMacro;
SourceLocation DATELoc, TIMELoc;
+ // FEM_UnsetOnCommandLine means that an explicit evaluation method was
+ // not specified on the command line. The target is queried to set the
+ // default evaluation method.
+ LangOptions::FPEvalMethodKind CurrentFPEvalMethod =
+ LangOptions::FPEvalMethodKind::FEM_UnsetOnCommandLine;
+
+ // The most recent pragma location where the floating point evaluation
+ // method was modified. This is used to determine whether the
+ // 'pragma clang fp eval_method' was used whithin the current scope.
+ SourceLocation LastFPEvalPragmaLocation;
+
+ LangOptions::FPEvalMethodKind TUFPEvalMethod =
+ LangOptions::FPEvalMethodKind::FEM_UnsetOnCommandLine;
+
// Next __COUNTER__ value, starts at 0.
unsigned CounterValue = 0;
@@ -260,8 +277,7 @@ class Preprocessor {
/// Empty line handler.
EmptylineHandler *Emptyline = nullptr;
- /// True if we want to ignore EOF token and continue later on (thus
- /// avoid tearing the Lexer and etc. down).
+ /// True to avoid tearing down the lexer etc on EOF
bool IncrementalProcessing = false;
public:
@@ -292,14 +308,17 @@ private:
/// lexed, if any.
SourceLocation ModuleImportLoc;
- /// The module import path that we're currently processing.
- SmallVector<std::pair<IdentifierInfo *, SourceLocation>, 2> ModuleImportPath;
+ /// The import path for named module that we're currently processing.
+ SmallVector<std::pair<IdentifierInfo *, SourceLocation>, 2> NamedModuleImportPath;
+
+ /// Whether the import is an `@import` or a standard c++ modules import.
+ bool IsAtImport = false;
/// Whether the last token we lexed was an '@'.
bool LastTokenWasAt = false;
/// A position within a C++20 import-seq.
- class ImportSeq {
+ class StdCXXImportSeq {
public:
enum State : int {
// Positive values represent a number of unclosed brackets.
@@ -309,7 +328,7 @@ private:
AfterImportSeq = -3,
};
- ImportSeq(State S) : S(S) {}
+ StdCXXImportSeq(State S) : S(S) {}
/// Saw any kind of open bracket.
void handleOpenBracket() {
@@ -364,6 +383,7 @@ private:
bool atTopLevel() { return S <= 0; }
bool afterImportSeq() { return S == AfterImportSeq; }
+ bool afterTopLevelSeq() { return S == AfterTopLevelTokenSeq; }
private:
State S;
@@ -374,7 +394,206 @@ private:
};
/// Our current position within a C++20 import-seq.
- ImportSeq ImportSeqState = ImportSeq::AfterTopLevelTokenSeq;
+ StdCXXImportSeq StdCXXImportSeqState = StdCXXImportSeq::AfterTopLevelTokenSeq;
+
+ /// Track whether we are in a Global Module Fragment
+ class TrackGMF {
+ public:
+ enum GMFState : int {
+ GMFActive = 1,
+ MaybeGMF = 0,
+ BeforeGMFIntroducer = -1,
+ GMFAbsentOrEnded = -2,
+ };
+
+ TrackGMF(GMFState S) : S(S) {}
+
+ /// Saw a semicolon.
+ void handleSemi() {
+ // If it is immediately after the first instance of the module keyword,
+ // then that introduces the GMF.
+ if (S == MaybeGMF)
+ S = GMFActive;
+ }
+
+ /// Saw an 'export' identifier.
+ void handleExport() {
+ // The presence of an 'export' keyword always ends or excludes a GMF.
+ S = GMFAbsentOrEnded;
+ }
+
+ /// Saw an 'import' identifier.
+ void handleImport(bool AfterTopLevelTokenSeq) {
+ // If we see this before any 'module' kw, then we have no GMF.
+ if (AfterTopLevelTokenSeq && S == BeforeGMFIntroducer)
+ S = GMFAbsentOrEnded;
+ }
+
+ /// Saw a 'module' identifier.
+ void handleModule(bool AfterTopLevelTokenSeq) {
+ // This was the first module identifier and not preceded by any token
+ // that would exclude a GMF. It could begin a GMF, but only if directly
+ // followed by a semicolon.
+ if (AfterTopLevelTokenSeq && S == BeforeGMFIntroducer)
+ S = MaybeGMF;
+ else
+ S = GMFAbsentOrEnded;
+ }
+
+ /// Saw any other token.
+ void handleMisc() {
+ // We saw something other than ; after the 'module' kw, so not a GMF.
+ if (S == MaybeGMF)
+ S = GMFAbsentOrEnded;
+ }
+
+ bool inGMF() { return S == GMFActive; }
+
+ private:
+ /// Track the transitions into and out of a Global Module Fragment,
+ /// if one is present.
+ GMFState S;
+ };
+
+ TrackGMF TrackGMFState = TrackGMF::BeforeGMFIntroducer;
+
+ /// Track the status of the c++20 module decl.
+ ///
+ /// module-declaration:
+ /// 'export'[opt] 'module' module-name module-partition[opt]
+ /// attribute-specifier-seq[opt] ';'
+ ///
+ /// module-name:
+ /// module-name-qualifier[opt] identifier
+ ///
+ /// module-partition:
+ /// ':' module-name-qualifier[opt] identifier
+ ///
+ /// module-name-qualifier:
+ /// identifier '.'
+ /// module-name-qualifier identifier '.'
+ ///
+ /// Transition state:
+ ///
+ /// NotAModuleDecl --- export ---> FoundExport
+ /// NotAModuleDecl --- module ---> ImplementationCandidate
+ /// FoundExport --- module ---> InterfaceCandidate
+ /// ImplementationCandidate --- Identifier ---> ImplementationCandidate
+ /// ImplementationCandidate --- period ---> ImplementationCandidate
+ /// ImplementationCandidate --- colon ---> ImplementationCandidate
+ /// InterfaceCandidate --- Identifier ---> InterfaceCandidate
+ /// InterfaceCandidate --- period ---> InterfaceCandidate
+ /// InterfaceCandidate --- colon ---> InterfaceCandidate
+ /// ImplementationCandidate --- Semi ---> NamedModuleImplementation
+ /// NamedModuleInterface --- Semi ---> NamedModuleInterface
+ /// NamedModuleImplementation --- Anything ---> NamedModuleImplementation
+ /// NamedModuleInterface --- Anything ---> NamedModuleInterface
+ ///
+ /// FIXME: We haven't handle attribute-specifier-seq here. It may not be bad
+ /// soon since we don't support any module attributes yet.
+ class ModuleDeclSeq {
+ enum ModuleDeclState : int {
+ NotAModuleDecl,
+ FoundExport,
+ InterfaceCandidate,
+ ImplementationCandidate,
+ NamedModuleInterface,
+ NamedModuleImplementation,
+ };
+
+ public:
+ ModuleDeclSeq() = default;
+
+ void handleExport() {
+ if (State == NotAModuleDecl)
+ State = FoundExport;
+ else if (!isNamedModule())
+ reset();
+ }
+
+ void handleModule() {
+ if (State == FoundExport)
+ State = InterfaceCandidate;
+ else if (State == NotAModuleDecl)
+ State = ImplementationCandidate;
+ else if (!isNamedModule())
+ reset();
+ }
+
+ void handleIdentifier(IdentifierInfo *Identifier) {
+ if (isModuleCandidate() && Identifier)
+ Name += Identifier->getName().str();
+ else if (!isNamedModule())
+ reset();
+ }
+
+ void handleColon() {
+ if (isModuleCandidate())
+ Name += ":";
+ else if (!isNamedModule())
+ reset();
+ }
+
+ void handlePeriod() {
+ if (isModuleCandidate())
+ Name += ".";
+ else if (!isNamedModule())
+ reset();
+ }
+
+ void handleSemi() {
+ if (!Name.empty() && isModuleCandidate()) {
+ if (State == InterfaceCandidate)
+ State = NamedModuleInterface;
+ else if (State == ImplementationCandidate)
+ State = NamedModuleImplementation;
+ else
+ llvm_unreachable("Unimaged ModuleDeclState.");
+ } else if (!isNamedModule())
+ reset();
+ }
+
+ void handleMisc() {
+ if (!isNamedModule())
+ reset();
+ }
+
+ bool isModuleCandidate() const {
+ return State == InterfaceCandidate || State == ImplementationCandidate;
+ }
+
+ bool isNamedModule() const {
+ return State == NamedModuleInterface ||
+ State == NamedModuleImplementation;
+ }
+
+ bool isNamedInterface() const { return State == NamedModuleInterface; }
+
+ bool isImplementationUnit() const {
+ return State == NamedModuleImplementation && !getName().contains(':');
+ }
+
+ StringRef getName() const {
+ assert(isNamedModule() && "Can't get name from a non named module");
+ return Name;
+ }
+
+ StringRef getPrimaryName() const {
+ assert(isNamedModule() && "Can't get name from a non named module");
+ return getName().split(':').first;
+ }
+
+ void reset() {
+ Name.clear();
+ State = NotAModuleDecl;
+ }
+
+ private:
+ ModuleDeclState State = NotAModuleDecl;
+ std::string Name;
+ };
+
+ ModuleDeclSeq ModuleDeclState;
/// Whether the module import expects an identifier next. Otherwise,
/// it expects a '.' or ';'.
@@ -388,6 +607,14 @@ private:
/// \#pragma clang assume_nonnull begin.
SourceLocation PragmaAssumeNonNullLoc;
+ /// Set only for preambles which end with an active
+ /// \#pragma clang assume_nonnull begin.
+ ///
+ /// When the preamble is loaded into the main file,
+ /// `PragmaAssumeNonNullLoc` will be set to this to
+ /// replay the unterminated assume_nonnull.
+ SourceLocation PreambleRecordedPragmaAssumeNonNullLoc;
+
/// True if we hit the code-completion point.
bool CodeCompletionReached = false;
@@ -401,7 +628,7 @@ private:
/// The directory that the main file should be considered to occupy,
/// if it does not correspond to a real file (as happens when building a
/// module).
- const DirectoryEntry *MainFileDir = nullptr;
+ OptionalDirectoryEntryRef MainFileDir;
/// The number of bytes that we will initially skip when entering the
/// main file, along with a flag that indicates whether skipping this number
@@ -449,6 +676,8 @@ public:
ElseLoc(ElseLoc) {}
};
+ using IncludedFilesSet = llvm::DenseSet<const FileEntry *>;
+
private:
friend class ASTReader;
friend class MacroArgs;
@@ -486,11 +715,11 @@ private:
bool hasRecordedPreamble() const { return !ConditionalStack.empty(); }
- bool reachedEOFWhileSkipping() const { return SkipInfo.hasValue(); }
+ bool reachedEOFWhileSkipping() const { return SkipInfo.has_value(); }
void clearSkipInfo() { SkipInfo.reset(); }
- llvm::Optional<PreambleSkipInfo> SkipInfo;
+ std::optional<PreambleSkipInfo> SkipInfo;
private:
SmallVector<PPConditionalInfo, 4> ConditionalStack;
@@ -503,7 +732,7 @@ private:
/// Only one of CurLexer, or CurTokenLexer will be non-null.
std::unique_ptr<Lexer> CurLexer;
- /// The current top of the stack what we're lexing from
+ /// The current top of the stack that we're lexing from
/// if not expanding a macro.
///
/// This is an alias for CurLexer.
@@ -514,7 +743,7 @@ private:
///
/// This allows us to implement \#include_next and find directory-specific
/// properties.
- const DirectoryLookup *CurDirLookup = nullptr;
+ ConstSearchDirIterator CurDirLookup = nullptr;
/// The current macro we are expanding, if we are expanding a macro.
///
@@ -522,12 +751,8 @@ private:
std::unique_ptr<TokenLexer> CurTokenLexer;
/// The kind of lexer we're currently working with.
- enum CurLexerKind {
- CLK_Lexer,
- CLK_TokenLexer,
- CLK_CachingLexer,
- CLK_LexAfterModuleImport
- } CurLexerKind = CLK_Lexer;
+ typedef bool (*LexerCallback)(Preprocessor &, Token &);
+ LexerCallback CurLexerCallback = &CLK_Lexer;
/// If the current lexer is for a submodule that is being built, this
/// is that submodule.
@@ -537,21 +762,21 @@ private:
/// \#included, and macros currently being expanded from, not counting
/// CurLexer/CurTokenLexer.
struct IncludeStackInfo {
- enum CurLexerKind CurLexerKind;
+ LexerCallback CurLexerCallback;
Module *TheSubmodule;
std::unique_ptr<Lexer> TheLexer;
PreprocessorLexer *ThePPLexer;
std::unique_ptr<TokenLexer> TheTokenLexer;
- const DirectoryLookup *TheDirLookup;
+ ConstSearchDirIterator TheDirLookup;
// The following constructors are completely useless copies of the default
// versions, only needed to pacify MSVC.
- IncludeStackInfo(enum CurLexerKind CurLexerKind, Module *TheSubmodule,
+ IncludeStackInfo(LexerCallback CurLexerCallback, Module *TheSubmodule,
std::unique_ptr<Lexer> &&TheLexer,
PreprocessorLexer *ThePPLexer,
std::unique_ptr<TokenLexer> &&TheTokenLexer,
- const DirectoryLookup *TheDirLookup)
- : CurLexerKind(std::move(CurLexerKind)),
+ ConstSearchDirIterator TheDirLookup)
+ : CurLexerCallback(std::move(CurLexerCallback)),
TheSubmodule(std::move(TheSubmodule)), TheLexer(std::move(TheLexer)),
ThePPLexer(std::move(ThePPLexer)),
TheTokenLexer(std::move(TheTokenLexer)),
@@ -665,7 +890,7 @@ private:
getActiveModuleMacros(Preprocessor &PP, const IdentifierInfo *II) const {
if (auto *Info = getModuleInfo(PP, II))
return Info->ActiveModuleMacros;
- return None;
+ return std::nullopt;
}
MacroDirective::DefInfo findDirectiveAtLoc(SourceLocation Loc,
@@ -689,7 +914,7 @@ private:
ArrayRef<ModuleMacro*> getOverriddenMacros() const {
if (auto *Info = State.dyn_cast<ModuleMacroInfo*>())
return Info->OverriddenMacros;
- return None;
+ return std::nullopt;
}
void setOverriddenMacros(Preprocessor &PP,
@@ -764,6 +989,13 @@ private:
/// in a submodule.
SubmoduleState *CurSubmoduleState;
+ /// The files that have been included.
+ IncludedFilesSet IncludedFiles;
+
+ /// The set of top-level modules that affected preprocessing, but were not
+ /// imported.
+ llvm::SmallSetVector<Module *, 2> AffectingClangModules;
+
/// The set of known macros exported from modules.
llvm::FoldingSet<ModuleMacro> ModuleMacros;
@@ -786,6 +1018,42 @@ private:
using WarnUnusedMacroLocsTy = llvm::SmallDenseSet<SourceLocation, 32>;
WarnUnusedMacroLocsTy WarnUnusedMacroLocs;
+ /// This is a pair of an optional message and source location used for pragmas
+ /// that annotate macros like pragma clang restrict_expansion and pragma clang
+ /// deprecated. This pair stores the optional message and the location of the
+ /// annotation pragma for use producing diagnostics and notes.
+ using MsgLocationPair = std::pair<std::string, SourceLocation>;
+
+ struct MacroAnnotationInfo {
+ SourceLocation Location;
+ std::string Message;
+ };
+
+ struct MacroAnnotations {
+ std::optional<MacroAnnotationInfo> DeprecationInfo;
+ std::optional<MacroAnnotationInfo> RestrictExpansionInfo;
+ std::optional<SourceLocation> FinalAnnotationLoc;
+
+ static MacroAnnotations makeDeprecation(SourceLocation Loc,
+ std::string Msg) {
+ return MacroAnnotations{MacroAnnotationInfo{Loc, std::move(Msg)},
+ std::nullopt, std::nullopt};
+ }
+
+ static MacroAnnotations makeRestrictExpansion(SourceLocation Loc,
+ std::string Msg) {
+ return MacroAnnotations{
+ std::nullopt, MacroAnnotationInfo{Loc, std::move(Msg)}, std::nullopt};
+ }
+
+ static MacroAnnotations makeFinal(SourceLocation Loc) {
+ return MacroAnnotations{std::nullopt, std::nullopt, Loc};
+ }
+ };
+
+ /// Warning information for macro annotations.
+ llvm::DenseMap<const IdentifierInfo *, MacroAnnotations> AnnotationInfos;
+
/// A "freelist" of MacroArg objects that can be
/// reused for quick allocation.
MacroArgs *MacroArgCache = nullptr;
@@ -872,21 +1140,25 @@ private:
/// invoked (at which point the last position is popped).
std::vector<CachedTokensTy::size_type> BacktrackPositions;
- struct MacroInfoChain {
- MacroInfo MI;
- MacroInfoChain *Next;
- };
+ /// True if \p Preprocessor::SkipExcludedConditionalBlock() is running.
+ /// This is used to guard against calling this function recursively.
+ ///
+ /// See comments at the use-site for more context about why it is needed.
+ bool SkippingExcludedConditionalBlock = false;
- /// MacroInfos are managed as a chain for easy disposal. This is the head
- /// of that list.
- MacroInfoChain *MIChainHead = nullptr;
+ /// Keeps track of skipped range mappings that were recorded while skipping
+ /// excluded conditional directives. It maps the source buffer pointer at
+ /// the beginning of a skipped block, to the number of bytes that should be
+ /// skipped.
+ llvm::DenseMap<const char *, unsigned> RecordedSkippedRanges;
void updateOutOfDateIdentifier(IdentifierInfo &II) const;
public:
Preprocessor(std::shared_ptr<PreprocessorOptions> PPOpts,
- DiagnosticsEngine &diags, LangOptions &opts, SourceManager &SM,
- HeaderSearch &Headers, ModuleLoader &TheModuleLoader,
+ DiagnosticsEngine &diags, const LangOptions &LangOpts,
+ SourceManager &SM, HeaderSearch &Headers,
+ ModuleLoader &TheModuleLoader,
IdentifierInfoLookup *IILookup = nullptr,
bool OwnsHeaderSearch = false,
TranslationUnitKind TUKind = TU_Complete);
@@ -1161,7 +1433,7 @@ public:
auto I = LeafModuleMacros.find(II);
if (I != LeafModuleMacros.end())
return I->second;
- return None;
+ return std::nullopt;
}
/// Get the list of submodules that we're currently building.
@@ -1187,19 +1459,55 @@ public:
/// \}
+ /// Mark the given clang module as affecting the current clang module or translation unit.
+ void markClangModuleAsAffecting(Module *M) {
+ assert(M->isModuleMapModule());
+ if (!BuildingSubmoduleStack.empty()) {
+ if (M != BuildingSubmoduleStack.back().M)
+ BuildingSubmoduleStack.back().M->AffectingClangModules.insert(M);
+ } else {
+ AffectingClangModules.insert(M);
+ }
+ }
+
+ /// Get the set of top-level clang modules that affected preprocessing, but were not
+ /// imported.
+ const llvm::SmallSetVector<Module *, 2> &getAffectingClangModules() const {
+ return AffectingClangModules;
+ }
+
+ /// Mark the file as included.
+ /// Returns true if this is the first time the file was included.
+ bool markIncluded(FileEntryRef File) {
+ HeaderInfo.getFileInfo(File);
+ return IncludedFiles.insert(File).second;
+ }
+
+ /// Return true if this header has already been included.
+ bool alreadyIncluded(FileEntryRef File) const {
+ HeaderInfo.getFileInfo(File);
+ return IncludedFiles.count(File);
+ }
+
+ /// Get the set of included files.
+ IncludedFilesSet &getIncludedFiles() { return IncludedFiles; }
+ const IncludedFilesSet &getIncludedFiles() const { return IncludedFiles; }
+
/// Return the name of the macro defined before \p Loc that has
/// spelling \p Tokens. If there are multiple macros with same spelling,
/// return the last one defined.
StringRef getLastMacroWithSpelling(SourceLocation Loc,
ArrayRef<TokenValue> Tokens) const;
+ /// Get the predefines for this processor.
+ /// Used by some third-party tools to inspect and add predefines (see
+ /// https://github.com/llvm/llvm-project/issues/57483).
const std::string &getPredefines() const { return Predefines; }
/// Set the predefines for this Preprocessor.
///
/// These predefines are automatically injected when parsing the main file.
- void setPredefines(const char *P) { Predefines = P; }
- void setPredefines(StringRef P) { Predefines = std::string(P); }
+ void setPredefines(std::string P) { Predefines = std::move(P); }
/// Return information about the specified preprocessor
/// identifier token.
@@ -1330,8 +1638,8 @@ public:
/// start lexing tokens from it instead of the current buffer.
///
/// Emits a diagnostic, doesn't enter the file, and returns true on error.
- bool EnterSourceFile(FileID FID, const DirectoryLookup *Dir,
- SourceLocation Loc);
+ bool EnterSourceFile(FileID FID, ConstSearchDirIterator Dir,
+ SourceLocation Loc, bool IsFirstIncludeOfFile = true);
/// Add a Macro to the top of the include stack and start lexing
/// tokens from it instead of the current buffer.
@@ -1409,6 +1717,9 @@ public:
/// Lex the next token for this preprocessor.
void Lex(Token &Result);
+ /// Lex all tokens for this preprocessor until (and excluding) end of file.
+ void LexTokensUntilEOF(std::vector<Token> *Tokens = nullptr);
+
/// Lex a token, forming a header-name token if possible.
bool LexHeaderName(Token &Result, bool AllowMacroExpansion = true);
@@ -1585,7 +1896,7 @@ public:
/// Determine whether it's possible for a future call to Lex to produce an
/// annotation token created by a previous call to EnterAnnotationToken.
bool mightHavePendingAnnotationTokens() {
- return CurLexerKind != CLK_Lexer;
+ return CurLexerCallback != CLK_Lexer;
}
/// Update the current token to represent the provided
@@ -1622,8 +1933,8 @@ public:
/// (1-based).
///
/// \returns true if an error occurred, false otherwise.
- bool SetCodeCompletionPoint(const FileEntry *File,
- unsigned Line, unsigned Column);
+ bool SetCodeCompletionPoint(FileEntryRef File, unsigned Line,
+ unsigned Column);
/// Determine if we are performing code completion.
bool isCodeCompletionEnabled() const { return CodeCompletionFile != nullptr; }
@@ -1684,11 +1995,24 @@ public:
PragmaAssumeNonNullLoc = Loc;
}
+ /// Get the location of the recorded unterminated \#pragma clang
+ /// assume_nonnull begin in the preamble, if one exists.
+ ///
+ /// Returns an invalid location if the premable did not end with
+ /// such a pragma active or if there is no recorded preamble.
+ SourceLocation getPreambleRecordedPragmaAssumeNonNullLoc() const {
+ return PreambleRecordedPragmaAssumeNonNullLoc;
+ }
+
+ /// Record the location of the unterminated \#pragma clang
+ /// assume_nonnull begin in the preamble.
+ void setPreambleRecordedPragmaAssumeNonNullLoc(SourceLocation Loc) {
+ PreambleRecordedPragmaAssumeNonNullLoc = Loc;
+ }
+
/// Set the directory in which the main file should be considered
/// to have been found, if it is not a real file.
- void setMainFileDir(const DirectoryEntry *Dir) {
- MainFileDir = Dir;
- }
+ void setMainFileDir(DirectoryEntryRef Dir) { MainFileDir = Dir; }
/// Instruct the preprocessor to skip part of the main source file.
///
@@ -1953,8 +2277,7 @@ public:
/// This either returns the EOF token and returns true, or
/// pops a level off the include stack and returns false, at which point the
/// client should call lex again.
- bool HandleEndOfFile(Token &Result, SourceLocation Loc,
- bool isEndOfMacro = false);
+ bool HandleEndOfFile(Token &Result, bool isEndOfMacro = false);
/// Callback invoked when the current TokenLexer hits the end of its
/// token stream.
@@ -1990,9 +2313,74 @@ public:
unsigned getCounterValue() const { return CounterValue; }
void setCounterValue(unsigned V) { CounterValue = V; }
+ LangOptions::FPEvalMethodKind getCurrentFPEvalMethod() const {
+ assert(CurrentFPEvalMethod != LangOptions::FEM_UnsetOnCommandLine &&
+ "FPEvalMethod should be set either from command line or from the "
+ "target info");
+ return CurrentFPEvalMethod;
+ }
+
+ LangOptions::FPEvalMethodKind getTUFPEvalMethod() const {
+ return TUFPEvalMethod;
+ }
+
+ SourceLocation getLastFPEvalPragmaLocation() const {
+ return LastFPEvalPragmaLocation;
+ }
+
+ void setCurrentFPEvalMethod(SourceLocation PragmaLoc,
+ LangOptions::FPEvalMethodKind Val) {
+ assert(Val != LangOptions::FEM_UnsetOnCommandLine &&
+ "FPEvalMethod should never be set to FEM_UnsetOnCommandLine");
+ // This is the location of the '#pragma float_control" where the
+ // execution state is modifed.
+ LastFPEvalPragmaLocation = PragmaLoc;
+ CurrentFPEvalMethod = Val;
+ TUFPEvalMethod = Val;
+ }
+
+ void setTUFPEvalMethod(LangOptions::FPEvalMethodKind Val) {
+ assert(Val != LangOptions::FEM_UnsetOnCommandLine &&
+ "TUPEvalMethod should never be set to FEM_UnsetOnCommandLine");
+ TUFPEvalMethod = Val;
+ }
+
/// Retrieves the module that we're currently building, if any.
Module *getCurrentModule();
+ /// Retrieves the module whose implementation we're current compiling, if any.
+ Module *getCurrentModuleImplementation();
+
+ /// If we are preprocessing a named module.
+ bool isInNamedModule() const { return ModuleDeclState.isNamedModule(); }
+
+ /// If we are proprocessing a named interface unit.
+ /// Note that a module implementation partition is not considered as an
+ /// named interface unit here although it is importable
+ /// to ease the parsing.
+ bool isInNamedInterfaceUnit() const {
+ return ModuleDeclState.isNamedInterface();
+ }
+
+ /// Get the named module name we're preprocessing.
+ /// Requires we're preprocessing a named module.
+ StringRef getNamedModuleName() const { return ModuleDeclState.getName(); }
+
+ /// If we are implementing an implementation module unit.
+ /// Note that the module implementation partition is not considered as an
+ /// implementation unit.
+ bool isInImplementationUnit() const {
+ return ModuleDeclState.isImplementationUnit();
+ }
+
+ /// If we're importing a standard C++20 Named Modules.
+ bool isInImportingCXXNamedModules() const {
+ // NamedModuleImportPath will be non-empty only if we're importing
+ // Standard C++ named modules.
+ return !NamedModuleImportPath.empty() && getLangOpts().CPlusPlusModules &&
+ !IsAtImport;
+ }
+
/// Allocate a new MacroInfo object with the provided SourceLocation.
MacroInfo *AllocateMacroInfo(SourceLocation L);
@@ -2009,22 +2397,16 @@ public:
/// Given a "foo" or \<foo> reference, look up the indicated file.
///
- /// Returns None on failure. \p isAngled indicates whether the file
+ /// Returns std::nullopt on failure. \p isAngled indicates whether the file
/// reference is for system \#include's or not (i.e. using <> instead of "").
- Optional<FileEntryRef>
+ OptionalFileEntryRef
LookupFile(SourceLocation FilenameLoc, StringRef Filename, bool isAngled,
- const DirectoryLookup *FromDir, const FileEntry *FromFile,
- const DirectoryLookup *&CurDir, SmallVectorImpl<char> *SearchPath,
+ ConstSearchDirIterator FromDir, const FileEntry *FromFile,
+ ConstSearchDirIterator *CurDir, SmallVectorImpl<char> *SearchPath,
SmallVectorImpl<char> *RelativePath,
ModuleMap::KnownHeader *SuggestedModule, bool *IsMapped,
- bool *IsFrameworkFound, bool SkipCache = false);
-
- /// Get the DirectoryLookup structure used to find the current
- /// FileEntry, if CurLexer is non-null and if applicable.
- ///
- /// This allows us to implement \#include_next and find directory-specific
- /// properties.
- const DirectoryLookup *GetCurDirLookup() { return CurDirLookup; }
+ bool *IsFrameworkFound, bool SkipCache = false,
+ bool OpenFile = true, bool CacheFailures = true);
/// Return true if we're in the top-level file, not in a \#include.
bool isInPrimaryFile() const;
@@ -2043,8 +2425,9 @@ private:
friend void TokenLexer::ExpandFunctionArguments();
void PushIncludeMacroStack() {
- assert(CurLexerKind != CLK_CachingLexer && "cannot push a caching lexer");
- IncludeMacroStack.emplace_back(CurLexerKind, CurLexerSubmodule,
+ assert(CurLexerCallback != CLK_CachingLexer &&
+ "cannot push a caching lexer");
+ IncludeMacroStack.emplace_back(CurLexerCallback, CurLexerSubmodule,
std::move(CurLexer), CurPPLexer,
std::move(CurTokenLexer), CurDirLookup);
CurPPLexer = nullptr;
@@ -2056,7 +2439,7 @@ private:
CurTokenLexer = std::move(IncludeMacroStack.back().TheTokenLexer);
CurDirLookup = IncludeMacroStack.back().TheDirLookup;
CurLexerSubmodule = IncludeMacroStack.back().TheSubmodule;
- CurLexerKind = IncludeMacroStack.back().CurLexerKind;
+ CurLexerCallback = IncludeMacroStack.back().CurLexerCallback;
IncludeMacroStack.pop_back();
}
@@ -2107,6 +2490,13 @@ private:
/// Return true if an error occurs parsing the arg list.
bool ReadMacroParameterList(MacroInfo *MI, Token& LastTok);
+ /// Provide a suggestion for a typoed directive. If there is no typo, then
+ /// just skip suggesting.
+ ///
+ /// \param Tok - Token that represents the directive
+ /// \param Directive - String reference for the directive name
+ void SuggestTypoedDirective(const Token &Tok, StringRef Directive) const;
+
/// We just read a \#if or related directive and decided that the
/// subsequent tokens are in the \#if'd out portion of the
/// file. Lex the rest of the file, until we see an \#endif. If \p
@@ -2139,6 +2529,20 @@ private:
/// If the expression is equivalent to "!defined(X)" return X in IfNDefMacro.
DirectiveEvalResult EvaluateDirectiveExpression(IdentifierInfo *&IfNDefMacro);
+ /// Process a '__has_include("path")' expression.
+ ///
+ /// Returns true if successful.
+ bool EvaluateHasInclude(Token &Tok, IdentifierInfo *II);
+
+ /// Process '__has_include_next("path")' expression.
+ ///
+ /// Returns true if successful.
+ bool EvaluateHasIncludeNext(Token &Tok, IdentifierInfo *II);
+
+ /// Get the directory and file from which to start \#include_next lookup.
+ std::pair<ConstSearchDirIterator, const FileEntry *>
+ getIncludeNextStart(const Token &IncludeNextTok) const;
+
/// Install the standard preprocessor pragmas:
/// \#pragma GCC poison/system_header/dependency and \#pragma once.
void RegisterBuiltinPragmas();
@@ -2186,7 +2590,7 @@ private:
/// Add a lexer to the top of the include stack and
/// start lexing tokens from it instead of the current buffer.
- void EnterSourceFileWithLexer(Lexer *TheLexer, const DirectoryLookup *Dir);
+ void EnterSourceFileWithLexer(Lexer *TheLexer, ConstSearchDirIterator Dir);
/// Set the FileID for the preprocessor predefines.
void setPredefinesFileID(FileID FID) {
@@ -2250,6 +2654,7 @@ private:
None,
ModuleBegin,
ModuleImport,
+ HeaderUnitImport,
SkippedModuleImport,
Failure,
} Kind;
@@ -2262,23 +2667,23 @@ private:
}
};
- Optional<FileEntryRef> LookupHeaderIncludeOrImport(
- const DirectoryLookup *&CurDir, StringRef &Filename,
+ OptionalFileEntryRef LookupHeaderIncludeOrImport(
+ ConstSearchDirIterator *CurDir, StringRef &Filename,
SourceLocation FilenameLoc, CharSourceRange FilenameRange,
const Token &FilenameTok, bool &IsFrameworkFound, bool IsImportDecl,
- bool &IsMapped, const DirectoryLookup *LookupFrom,
+ bool &IsMapped, ConstSearchDirIterator LookupFrom,
const FileEntry *LookupFromFile, StringRef &LookupFilename,
SmallVectorImpl<char> &RelativePath, SmallVectorImpl<char> &SearchPath,
ModuleMap::KnownHeader &SuggestedModule, bool isAngled);
// File inclusion.
void HandleIncludeDirective(SourceLocation HashLoc, Token &Tok,
- const DirectoryLookup *LookupFrom = nullptr,
+ ConstSearchDirIterator LookupFrom = nullptr,
const FileEntry *LookupFromFile = nullptr);
ImportAction
HandleHeaderIncludeOrImport(SourceLocation HashLoc, Token &IncludeTok,
Token &FilenameTok, SourceLocation EndLoc,
- const DirectoryLookup *LookupFrom = nullptr,
+ ConstSearchDirIterator LookupFrom = nullptr,
const FileEntry *LookupFromFile = nullptr);
void HandleIncludeNextDirective(SourceLocation HashLoc, Token &Tok);
void HandleIncludeMacrosDirective(SourceLocation HashLoc, Token &Tok);
@@ -2291,13 +2696,13 @@ public:
/// \c false if the module appears to be usable.
static bool checkModuleIsAvailable(const LangOptions &LangOpts,
const TargetInfo &TargetInfo,
- DiagnosticsEngine &Diags, Module *M);
+ const Module &M, DiagnosticsEngine &Diags);
// Module inclusion testing.
/// Find the module that owns the source or header file that
/// \p Loc points to. If the location is in a file that was included
/// into a module, or is outside any module, returns nullptr.
- Module *getModuleForLocation(SourceLocation Loc);
+ Module *getModuleForLocation(SourceLocation Loc, bool AllowTextual);
/// We want to produce a diagnostic at location IncLoc concerning an
/// unreachable effect at location MLoc (eg, where a desired entity was
@@ -2313,8 +2718,8 @@ public:
/// \return A file that can be #included to provide the desired effect. Null
/// if no such file could be determined or if a #include is not
/// appropriate (eg, if a module should be imported instead).
- const FileEntry *getHeaderToIncludeForDiagnostics(SourceLocation IncLoc,
- SourceLocation MLoc);
+ OptionalFileEntryRef getHeaderToIncludeForDiagnostics(SourceLocation IncLoc,
+ SourceLocation MLoc);
bool isRecordingPreamble() const {
return PreambleConditionalStack.isRecording();
@@ -2332,14 +2737,14 @@ public:
PreambleConditionalStack.setStack(s);
}
- void setReplayablePreambleConditionalStack(ArrayRef<PPConditionalInfo> s,
- llvm::Optional<PreambleSkipInfo> SkipInfo) {
+ void setReplayablePreambleConditionalStack(
+ ArrayRef<PPConditionalInfo> s, std::optional<PreambleSkipInfo> SkipInfo) {
PreambleConditionalStack.startReplaying();
PreambleConditionalStack.setStack(s);
PreambleConditionalStack.SkipInfo = SkipInfo;
}
- llvm::Optional<PreambleSkipInfo> getPreambleSkipInfo() const {
+ std::optional<PreambleSkipInfo> getPreambleSkipInfo() const {
return PreambleConditionalStack.SkipInfo;
}
@@ -2364,14 +2769,12 @@ private:
// Pragmas.
void HandlePragmaDirective(PragmaIntroducer Introducer);
- void ResolvePragmaIncludeInstead(SourceLocation Location) const;
public:
void HandlePragmaOnce(Token &OnceTok);
void HandlePragmaMark(Token &MarkTok);
void HandlePragmaPoison();
void HandlePragmaSystemHeader(Token &SysHeaderTok);
- void HandlePragmaIncludeInstead(Token &Tok);
void HandlePragmaDependency(Token &DependencyTok);
void HandlePragmaPushMacro(Token &Tok);
void HandlePragmaPopMacro(Token &Tok);
@@ -2388,14 +2791,141 @@ public:
/// warnings.
void markMacroAsUsed(MacroInfo *MI);
+ void addMacroDeprecationMsg(const IdentifierInfo *II, std::string Msg,
+ SourceLocation AnnotationLoc) {
+ auto Annotations = AnnotationInfos.find(II);
+ if (Annotations == AnnotationInfos.end())
+ AnnotationInfos.insert(std::make_pair(
+ II,
+ MacroAnnotations::makeDeprecation(AnnotationLoc, std::move(Msg))));
+ else
+ Annotations->second.DeprecationInfo =
+ MacroAnnotationInfo{AnnotationLoc, std::move(Msg)};
+ }
+
+ void addRestrictExpansionMsg(const IdentifierInfo *II, std::string Msg,
+ SourceLocation AnnotationLoc) {
+ auto Annotations = AnnotationInfos.find(II);
+ if (Annotations == AnnotationInfos.end())
+ AnnotationInfos.insert(
+ std::make_pair(II, MacroAnnotations::makeRestrictExpansion(
+ AnnotationLoc, std::move(Msg))));
+ else
+ Annotations->second.RestrictExpansionInfo =
+ MacroAnnotationInfo{AnnotationLoc, std::move(Msg)};
+ }
+
+ void addFinalLoc(const IdentifierInfo *II, SourceLocation AnnotationLoc) {
+ auto Annotations = AnnotationInfos.find(II);
+ if (Annotations == AnnotationInfos.end())
+ AnnotationInfos.insert(
+ std::make_pair(II, MacroAnnotations::makeFinal(AnnotationLoc)));
+ else
+ Annotations->second.FinalAnnotationLoc = AnnotationLoc;
+ }
+
+ const MacroAnnotations &getMacroAnnotations(const IdentifierInfo *II) const {
+ return AnnotationInfos.find(II)->second;
+ }
+
+ void emitMacroExpansionWarnings(const Token &Identifier,
+ bool IsIfnDef = false) const {
+ IdentifierInfo *Info = Identifier.getIdentifierInfo();
+ if (Info->isDeprecatedMacro())
+ emitMacroDeprecationWarning(Identifier);
+
+ if (Info->isRestrictExpansion() &&
+ !SourceMgr.isInMainFile(Identifier.getLocation()))
+ emitRestrictExpansionWarning(Identifier);
+
+ if (!IsIfnDef) {
+ if (Info->getName() == "INFINITY" && getLangOpts().NoHonorInfs)
+ emitRestrictInfNaNWarning(Identifier, 0);
+ if (Info->getName() == "NAN" && getLangOpts().NoHonorNaNs)
+ emitRestrictInfNaNWarning(Identifier, 1);
+ }
+ }
+
+ static void processPathForFileMacro(SmallVectorImpl<char> &Path,
+ const LangOptions &LangOpts,
+ const TargetInfo &TI);
+
+ static void processPathToFileName(SmallVectorImpl<char> &FileName,
+ const PresumedLoc &PLoc,
+ const LangOptions &LangOpts,
+ const TargetInfo &TI);
+
private:
- Optional<unsigned>
- getSkippedRangeForExcludedConditionalBlock(SourceLocation HashLoc);
+ void emitMacroDeprecationWarning(const Token &Identifier) const;
+ void emitRestrictExpansionWarning(const Token &Identifier) const;
+ void emitFinalMacroWarning(const Token &Identifier, bool IsUndef) const;
+ void emitRestrictInfNaNWarning(const Token &Identifier,
+ unsigned DiagSelection) const;
+
+ /// This boolean state keeps track if the current scanned token (by this PP)
+ /// is in an "-Wunsafe-buffer-usage" opt-out region. Assuming PP scans a
+ /// translation unit in a linear order.
+ bool InSafeBufferOptOutRegion = false;
+
+ /// Hold the start location of the current "-Wunsafe-buffer-usage" opt-out
+ /// region if PP is currently in such a region. Hold undefined value
+ /// otherwise.
+ SourceLocation CurrentSafeBufferOptOutStart; // It is used to report the start location of an never-closed region.
+
+ // An ordered sequence of "-Wunsafe-buffer-usage" opt-out regions in one
+ // translation unit. Each region is represented by a pair of start and end
+ // locations. A region is "open" if its' start and end locations are
+ // identical.
+ SmallVector<std::pair<SourceLocation, SourceLocation>, 8> SafeBufferOptOutMap;
- /// Contains the currently active skipped range mappings for skipping excluded
- /// conditional directives.
- ExcludedPreprocessorDirectiveSkipMapping
- *ExcludedConditionalDirectiveSkipMappings;
+public:
+ /// \return true iff the given `Loc` is in a "-Wunsafe-buffer-usage" opt-out
+ /// region. This `Loc` must be a source location that has been pre-processed.
+ bool isSafeBufferOptOut(const SourceManager&SourceMgr, const SourceLocation &Loc) const;
+
+ /// Alter the state of whether this PP currently is in a
+ /// "-Wunsafe-buffer-usage" opt-out region.
+ ///
+ /// \param isEnter true if this PP is entering a region; otherwise, this PP
+ /// is exiting a region
+ /// \param Loc the location of the entry or exit of a
+ /// region
+ /// \return true iff it is INVALID to enter or exit a region, i.e.,
+ /// attempt to enter a region before exiting a previous region, or exiting a
+ /// region that PP is not currently in.
+ bool enterOrExitSafeBufferOptOutRegion(bool isEnter,
+ const SourceLocation &Loc);
+
+ /// \return true iff this PP is currently in a "-Wunsafe-buffer-usage"
+ /// opt-out region
+ bool isPPInSafeBufferOptOutRegion();
+
+ /// \param StartLoc output argument. It will be set to the start location of
+ /// the current "-Wunsafe-buffer-usage" opt-out region iff this function
+ /// returns true.
+ /// \return true iff this PP is currently in a "-Wunsafe-buffer-usage"
+ /// opt-out region
+ bool isPPInSafeBufferOptOutRegion(SourceLocation &StartLoc);
+
+private:
+ /// Helper functions to forward lexing to the actual lexer. They all share the
+ /// same signature.
+ static bool CLK_Lexer(Preprocessor &P, Token &Result) {
+ return P.CurLexer->Lex(Result);
+ }
+ static bool CLK_TokenLexer(Preprocessor &P, Token &Result) {
+ return P.CurTokenLexer->Lex(Result);
+ }
+ static bool CLK_CachingLexer(Preprocessor &P, Token &Result) {
+ P.CachingLex(Result);
+ return true;
+ }
+ static bool CLK_DependencyDirectivesLexer(Preprocessor &P, Token &Result) {
+ return P.CurLexer->LexDependencyDirectiveToken(Result);
+ }
+ static bool CLK_LexAfterModuleImport(Preprocessor &P, Token &Result) {
+ return P.LexAfterModuleImport(Result);
+ }
};
/// Abstract base class that describes a handler that will receive
diff --git a/contrib/llvm-project/clang/include/clang/Lex/PreprocessorExcludedConditionalDirectiveSkipMapping.h b/contrib/llvm-project/clang/include/clang/Lex/PreprocessorExcludedConditionalDirectiveSkipMapping.h
deleted file mode 100644
index 1a0d5ed57b28..000000000000
--- a/contrib/llvm-project/clang/include/clang/Lex/PreprocessorExcludedConditionalDirectiveSkipMapping.h
+++ /dev/null
@@ -1,30 +0,0 @@
-//===- PreprocessorExcludedConditionalDirectiveSkipMapping.h - --*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CLANG_LEX_PREPROCESSOR_EXCLUDED_COND_DIRECTIVE_SKIP_MAPPING_H
-#define LLVM_CLANG_LEX_PREPROCESSOR_EXCLUDED_COND_DIRECTIVE_SKIP_MAPPING_H
-
-#include "clang/Basic/LLVM.h"
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/Support/MemoryBuffer.h"
-
-namespace clang {
-
-/// A mapping from an offset into a buffer to the number of bytes that can be
-/// skipped by the preprocessor when skipping over excluded conditional
-/// directive ranges.
-using PreprocessorSkippedRangeMapping = llvm::DenseMap<unsigned, unsigned>;
-
-/// The datastructure that holds the mapping between the active memory buffers
-/// and the individual skip mappings.
-using ExcludedPreprocessorDirectiveSkipMapping =
- llvm::DenseMap<const char *, const PreprocessorSkippedRangeMapping *>;
-
-} // end namespace clang
-
-#endif // LLVM_CLANG_LEX_PREPROCESSOR_EXCLUDED_COND_DIRECTIVE_SKIP_MAPPING_H
diff --git a/contrib/llvm-project/clang/include/clang/Lex/PreprocessorLexer.h b/contrib/llvm-project/clang/include/clang/Lex/PreprocessorLexer.h
index b43197a6031c..d71fe708ab20 100644
--- a/contrib/llvm-project/clang/include/clang/Lex/PreprocessorLexer.h
+++ b/contrib/llvm-project/clang/include/clang/Lex/PreprocessorLexer.h
@@ -14,13 +14,12 @@
#ifndef LLVM_CLANG_LEX_PREPROCESSORLEXER_H
#define LLVM_CLANG_LEX_PREPROCESSORLEXER_H
+#include "clang/Basic/FileEntry.h"
#include "clang/Basic/SourceLocation.h"
-#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/MultipleIncludeOpt.h"
#include "clang/Lex/Token.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringMap.h"
#include <cassert>
namespace clang {
@@ -76,13 +75,6 @@ protected:
/// we are currently in.
SmallVector<PPConditionalInfo, 4> ConditionalStack;
- struct IncludeInfo {
- const FileEntry *File;
- SourceLocation Location;
- };
- // A complete history of all the files included by the current file.
- llvm::StringMap<IncludeInfo> IncludeHistory;
-
PreprocessorLexer() : FID() {}
PreprocessorLexer(Preprocessor *pp, FileID fid);
virtual ~PreprocessorLexer() = default;
@@ -165,7 +157,7 @@ public:
/// getFileEntry - Return the FileEntry corresponding to this FileID. Like
/// getFileID(), this only works for lexers with attached preprocessors.
- const FileEntry *getFileEntry() const;
+ OptionalFileEntryRef getFileEntry() const;
/// Iterator that traverses the current stack of preprocessor
/// conditional directives (\#if/\#ifdef/\#ifndef).
@@ -184,15 +176,6 @@ public:
ConditionalStack.clear();
ConditionalStack.append(CL.begin(), CL.end());
}
-
- void addInclude(StringRef Filename, const FileEntry &File,
- SourceLocation Location) {
- IncludeHistory.insert({Filename, {&File, Location}});
- }
-
- const llvm::StringMap<IncludeInfo> &getIncludeHistory() const {
- return IncludeHistory;
- }
};
} // namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/Lex/PreprocessorOptions.h b/contrib/llvm-project/clang/include/clang/Lex/PreprocessorOptions.h
index a7aabc3e1df2..f841e4a028df 100644
--- a/contrib/llvm-project/clang/include/clang/Lex/PreprocessorOptions.h
+++ b/contrib/llvm-project/clang/include/clang/Lex/PreprocessorOptions.h
@@ -10,13 +10,15 @@
#define LLVM_CLANG_LEX_PREPROCESSOROPTIONS_H_
#include "clang/Basic/BitmaskEnum.h"
+#include "clang/Basic/FileEntry.h"
#include "clang/Basic/LLVM.h"
-#include "clang/Lex/PreprocessorExcludedConditionalDirectiveSkipMapping.h"
+#include "clang/Lex/DependencyDirectivesScanner.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSet.h"
#include <functional>
#include <map>
#include <memory>
+#include <optional>
#include <set>
#include <string>
#include <utility>
@@ -67,10 +69,16 @@ public:
std::vector<std::string> Includes;
std::vector<std::string> MacroIncludes;
+ /// Perform extra checks when loading PCM files for mutable file systems.
+ bool ModulesCheckRelocated = true;
+
/// Initialize the preprocessor with the compiler and target specific
/// predefines.
bool UsePredefines = true;
+ /// Indicates whether to predefine target OS macros.
+ bool DefineTargetOSMacros = false;
+
/// Whether we should maintain a detailed record of all macro
/// definitions and expansions.
bool DetailedRecord = false;
@@ -128,7 +136,8 @@ public:
///
/// When the lexer is done, one of the things that need to be preserved is the
/// conditional #if stack, so the ASTWriter/ASTReader can save/restore it when
- /// processing the rest of the file.
+ /// processing the rest of the file. Similarly, we track an unterminated
+ /// #pragma assume_nonnull.
bool GeneratePreamble = false;
/// Whether to write comment locations into the PCH when building it.
@@ -199,13 +208,18 @@ public:
/// build it again.
std::shared_ptr<FailedModulesSet> FailedModules;
- /// Contains the currently active skipped range mappings for skipping excluded
- /// conditional directives.
+ /// Function for getting the dependency preprocessor directives of a file.
///
- /// The pointer is passed to the Preprocessor when it's constructed. The
- /// pointer is unowned, the client is responsible for its lifetime.
- ExcludedPreprocessorDirectiveSkipMapping
- *ExcludedConditionalDirectiveSkipMappings = nullptr;
+ /// These are directives derived from a special form of lexing where the
+ /// source input is scanned for the preprocessor directives that might have an
+ /// effect on the dependencies for a compilation unit.
+ ///
+ /// Enables a client to cache the directives for a file and provide them
+ /// across multiple compiler invocations.
+ /// FIXME: Allow returning an error.
+ std::function<std::optional<ArrayRef<dependency_directives_scan::Directive>>(
+ FileEntryRef)>
+ DependencyDirectivesForFile;
/// Set up preprocessor for RunAnalysis action.
bool SetUpStaticAnalyzer = false;
@@ -213,6 +227,9 @@ public:
/// Prevents intended crashes when using #pragma clang __debug. For testing.
bool DisablePragmaDebugCrash = false;
+ /// If set, the UNIX timestamp specified by SOURCE_DATE_EPOCH.
+ std::optional<uint64_t> SourceDateEpoch;
+
public:
PreprocessorOptions() : PrecompiledPreambleBytes(0, false) {}
diff --git a/contrib/llvm-project/clang/include/clang/Lex/Token.h b/contrib/llvm-project/clang/include/clang/Lex/Token.h
index 00fbe6d18f72..1409e2c58b55 100644
--- a/contrib/llvm-project/clang/include/clang/Lex/Token.h
+++ b/contrib/llvm-project/clang/include/clang/Lex/Token.h
@@ -15,6 +15,7 @@
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/TokenKinds.h"
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include <cassert>
@@ -99,9 +100,8 @@ public:
bool isOneOf(tok::TokenKind K1, tok::TokenKind K2) const {
return is(K1) || is(K2);
}
- template <typename... Ts>
- bool isOneOf(tok::TokenKind K1, tok::TokenKind K2, Ts... Ks) const {
- return is(K1) || isOneOf(K2, Ks...);
+ template <typename... Ts> bool isOneOf(tok::TokenKind K1, Ts... Ks) const {
+ return is(K1) || isOneOf(Ks...);
}
/// Return true if this is a raw identifier (when lexing
@@ -117,8 +117,13 @@ public:
}
/// Return true if this is any of tok::annot_* kind tokens.
- bool isAnnotation() const {
- return tok::isAnnotation(getKind());
+ bool isAnnotation() const { return tok::isAnnotation(getKind()); }
+
+ /// Return true if the token is a keyword that is parsed in the same
+ /// position as a standard attribute, but that has semantic meaning
+ /// and so cannot be a true attribute.
+ bool isRegularKeywordAttribute() const {
+ return tok::isRegularKeywordAttribute(getKind());
}
/// Return a source location identifier for the specified
@@ -176,6 +181,8 @@ public:
Loc = SourceLocation().getRawEncoding();
}
+ bool hasPtrData() const { return PtrData != nullptr; }
+
IdentifierInfo *getIdentifierInfo() const {
assert(isNot(tok::raw_identifier) &&
"getIdentifierInfo() on a tok::raw_identifier token!");
@@ -329,6 +336,12 @@ struct PPConditionalInfo {
bool FoundElse;
};
+// Extra information needed for annonation tokens.
+struct PragmaLoopHintInfo {
+ Token PragmaName;
+ Token Option;
+ ArrayRef<Token> Toks;
+};
} // end namespace clang
#endif // LLVM_CLANG_LEX_TOKEN_H
diff --git a/contrib/llvm-project/clang/include/clang/Lex/VariadicMacroSupport.h b/contrib/llvm-project/clang/include/clang/Lex/VariadicMacroSupport.h
index 119f02201fc6..cf86a00c6d66 100644
--- a/contrib/llvm-project/clang/include/clang/Lex/VariadicMacroSupport.h
+++ b/contrib/llvm-project/clang/include/clang/Lex/VariadicMacroSupport.h
@@ -129,11 +129,16 @@ namespace clang {
// the function-like macro's new replacement list.
int NumOfTokensPriorToVAOpt = -1;
+ LLVM_PREFERRED_TYPE(bool)
unsigned LeadingSpaceForStringifiedToken : 1;
+ LLVM_PREFERRED_TYPE(bool)
unsigned StringifyBefore : 1;
+ LLVM_PREFERRED_TYPE(bool)
unsigned CharifyBefore : 1;
+ LLVM_PREFERRED_TYPE(bool)
unsigned BeginsWithPlaceholder : 1;
+ LLVM_PREFERRED_TYPE(bool)
unsigned EndsWithPlaceholder : 1;
bool hasStringifyBefore() const {
diff --git a/contrib/llvm-project/clang/include/clang/Parse/LoopHint.h b/contrib/llvm-project/clang/include/clang/Parse/LoopHint.h
index 6e363f72b658..cec5605ea361 100644
--- a/contrib/llvm-project/clang/include/clang/Parse/LoopHint.h
+++ b/contrib/llvm-project/clang/include/clang/Parse/LoopHint.h
@@ -9,13 +9,13 @@
#ifndef LLVM_CLANG_PARSE_LOOPHINT_H
#define LLVM_CLANG_PARSE_LOOPHINT_H
-#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/SourceLocation.h"
-#include "clang/Sema/Ownership.h"
-#include "clang/Sema/ParsedAttr.h"
namespace clang {
+class Expr;
+struct IdentifierLoc;
+
/// Loop optimization hint for loop and unroll pragmas.
struct LoopHint {
// Source range of the directive.
@@ -23,20 +23,18 @@ struct LoopHint {
// Identifier corresponding to the name of the pragma. "loop" for
// "#pragma clang loop" directives and "unroll" for "#pragma unroll"
// hints.
- IdentifierLoc *PragmaNameLoc;
+ IdentifierLoc *PragmaNameLoc = nullptr;
// Name of the loop hint. Examples: "unroll", "vectorize". In the
// "#pragma unroll" and "#pragma nounroll" cases, this is identical to
// PragmaNameLoc.
- IdentifierLoc *OptionLoc;
+ IdentifierLoc *OptionLoc = nullptr;
// Identifier for the hint state argument. If null, then the state is
// default value such as for "#pragma unroll".
- IdentifierLoc *StateLoc;
+ IdentifierLoc *StateLoc = nullptr;
// Expression for the hint argument if it exists, null otherwise.
- Expr *ValueExpr;
+ Expr *ValueExpr = nullptr;
- LoopHint()
- : PragmaNameLoc(nullptr), OptionLoc(nullptr), StateLoc(nullptr),
- ValueExpr(nullptr) {}
+ LoopHint() = default;
};
} // end namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/Parse/Parser.h b/contrib/llvm-project/clang/include/clang/Parse/Parser.h
index 8eb3f9029d9d..ffbde370e8f9 100644
--- a/contrib/llvm-project/clang/include/clang/Parse/Parser.h
+++ b/contrib/llvm-project/clang/include/clang/Parse/Parser.h
@@ -13,21 +13,15 @@
#ifndef LLVM_CLANG_PARSE_PARSER_H
#define LLVM_CLANG_PARSE_PARSER_H
-#include "clang/AST/Availability.h"
-#include "clang/Basic/BitmaskEnum.h"
-#include "clang/Basic/OpenMPKinds.h"
+#include "clang/Basic/OpenACCKinds.h"
#include "clang/Basic/OperatorPrecedence.h"
-#include "clang/Basic/Specifiers.h"
#include "clang/Lex/CodeCompletionHandler.h"
#include "clang/Lex/Preprocessor.h"
-#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/Sema.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Frontend/OpenMP/OMPContext.h"
-#include "llvm/Support/Compiler.h"
-#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/SaveAndRestore.h"
-#include <memory>
+#include <optional>
#include <stack>
namespace clang {
@@ -60,7 +54,9 @@ namespace clang {
class Parser : public CodeCompletionHandler {
friend class ColonProtectionRAIIObject;
friend class ParsingOpenMPDirectiveRAII;
+ friend class ParsingOpenACCDirectiveRAII;
friend class InMessageExpressionRAIIObject;
+ friend class OffsetOfStateRAIIObject;
friend class PoisonSEHIdentifiersRAIIObject;
friend class ObjCDeclContextSwitch;
friend class ParenBraceBracketBalancer;
@@ -155,7 +151,7 @@ class Parser : public CodeCompletionHandler {
/// Identifiers used by the 'external_source_symbol' attribute.
IdentifierInfo *Ident_language, *Ident_defined_in,
- *Ident_generated_declaration;
+ *Ident_generated_declaration, *Ident_USR;
/// C++11 contextual keywords.
mutable IdentifierInfo *Ident_final;
@@ -181,9 +177,11 @@ class Parser : public CodeCompletionHandler {
std::unique_ptr<PragmaHandler> FPContractHandler;
std::unique_ptr<PragmaHandler> OpenCLExtensionHandler;
std::unique_ptr<PragmaHandler> OpenMPHandler;
+ std::unique_ptr<PragmaHandler> OpenACCHandler;
std::unique_ptr<PragmaHandler> PCSectionHandler;
std::unique_ptr<PragmaHandler> MSCommentHandler;
std::unique_ptr<PragmaHandler> MSDetectMismatchHandler;
+ std::unique_ptr<PragmaHandler> FPEvalMethodHandler;
std::unique_ptr<PragmaHandler> FloatControlHandler;
std::unique_ptr<PragmaHandler> MSPointersToMembers;
std::unique_ptr<PragmaHandler> MSVtorDisp;
@@ -193,9 +191,13 @@ class Parser : public CodeCompletionHandler {
std::unique_ptr<PragmaHandler> MSConstSeg;
std::unique_ptr<PragmaHandler> MSCodeSeg;
std::unique_ptr<PragmaHandler> MSSection;
+ std::unique_ptr<PragmaHandler> MSStrictGuardStackCheck;
std::unique_ptr<PragmaHandler> MSRuntimeChecks;
std::unique_ptr<PragmaHandler> MSIntrinsic;
+ std::unique_ptr<PragmaHandler> MSFunction;
std::unique_ptr<PragmaHandler> MSOptimize;
+ std::unique_ptr<PragmaHandler> MSFenvAccess;
+ std::unique_ptr<PragmaHandler> MSAllocText;
std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler;
std::unique_ptr<PragmaHandler> OptimizeHandler;
std::unique_ptr<PragmaHandler> LoopHintHandler;
@@ -211,6 +213,7 @@ class Parser : public CodeCompletionHandler {
std::unique_ptr<PragmaHandler> AttributePragmaHandler;
std::unique_ptr<PragmaHandler> MaxTokensHerePragmaHandler;
std::unique_ptr<PragmaHandler> MaxTokensTotalPragmaHandler;
+ std::unique_ptr<PragmaHandler> RISCVPragmaHandler;
std::unique_ptr<CommentHandler> CommentSemaHandler;
@@ -229,6 +232,29 @@ class Parser : public CodeCompletionHandler {
/// Parsing OpenMP directive mode.
bool OpenMPDirectiveParsing = false;
+ /// Parsing OpenACC directive mode.
+ bool OpenACCDirectiveParsing = false;
+
+ /// Currently parsing a situation where an OpenACC array section could be
+ /// legal, such as a 'var-list'.
+ bool AllowOpenACCArraySections = false;
+
+ /// RAII object to set reset OpenACC parsing a context where Array Sections
+ /// are allowed.
+ class OpenACCArraySectionRAII {
+ Parser &P;
+
+ public:
+ OpenACCArraySectionRAII(Parser &P) : P(P) {
+ assert(!P.AllowOpenACCArraySections);
+ P.AllowOpenACCArraySections = true;
+ }
+ ~OpenACCArraySectionRAII() {
+ assert(P.AllowOpenACCArraySections);
+ P.AllowOpenACCArraySections = false;
+ }
+ };
+
/// When true, we are directly inside an Objective-C message
/// send expression.
///
@@ -241,6 +267,8 @@ class Parser : public CodeCompletionHandler {
/// function call.
bool CalledSignatureHelp = false;
+ Sema::OffsetOfKind OffsetOfState = Sema::OffsetOfKind::OOK_Outside;
+
/// The "depth" of the template parameters currently being parsed.
unsigned TemplateParameterDepth;
@@ -408,18 +436,15 @@ class Parser : public CodeCompletionHandler {
/// Flags describing a context in which we're parsing a statement.
enum class ParsedStmtContext {
- /// This context permits declarations in language modes where declarations
- /// are not statements.
- AllowDeclarationsInC = 0x1,
/// This context permits standalone OpenMP directives.
- AllowStandaloneOpenMPDirectives = 0x2,
+ AllowStandaloneOpenMPDirectives = 0x1,
/// This context is at the top level of a GNU statement expression.
- InStmtExpr = 0x4,
+ InStmtExpr = 0x2,
/// The context of a regular substatement.
SubStmt = 0,
/// The context of a compound-statement.
- Compound = AllowDeclarationsInC | AllowStandaloneOpenMPDirectives,
+ Compound = AllowStandaloneOpenMPDirectives,
LLVM_MARK_AS_BITMASK_ENUM(InStmtExpr)
};
@@ -445,7 +470,9 @@ public:
return Actions.incrementMSManglingNumber();
}
- Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); }
+ ObjCContainerDecl *getObjCDeclContext() const {
+ return Actions.getObjCDeclContext();
+ }
// Type forwarding. All of these are statically 'void*', but they may all be
// different actual classes based on the actions in place.
@@ -456,6 +483,9 @@ public:
typedef Sema::FullExprArg FullExprArg;
+ /// A SmallVector of statements.
+ typedef SmallVector<Stmt *, 32> StmtVector;
+
// Parsing methods.
/// Initialize - Warm up the parser.
@@ -463,14 +493,17 @@ public:
void Initialize();
/// Parse the first top-level declaration in a translation unit.
- bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result);
+ bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result,
+ Sema::ModuleImportState &ImportState);
/// ParseTopLevelDecl - Parse one top-level declaration. Returns true if
/// the EOF was encountered.
- bool ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl = false);
+ bool ParseTopLevelDecl(DeclGroupPtrTy &Result,
+ Sema::ModuleImportState &ImportState);
bool ParseTopLevelDecl() {
DeclGroupPtrTy Result;
- return ParseTopLevelDecl(Result);
+ Sema::ModuleImportState IS = Sema::ModuleImportState::NotACXX20Module;
+ return ParseTopLevelDecl(Result, IS);
}
/// ConsumeToken - Consume the current 'peek token' and lex the next one.
@@ -653,9 +686,9 @@ private:
return PrevTokLocation;
}
- ///\ brief When we are consuming a code-completion token without having
- /// matched specific position in the grammar, provide code-completion results
- /// based on context.
+ /// When we are consuming a code-completion token without having matched
+ /// specific position in the grammar, provide code-completion results based
+ /// on context.
///
/// \returns the source location of the code-completion token.
SourceLocation handleUnexpectedCodeCompletionToken();
@@ -674,7 +707,8 @@ private:
bool isEofOrEom() {
tok::TokenKind Kind = Tok.getKind();
return Kind == tok::eof || Kind == tok::annot_module_begin ||
- Kind == tok::annot_module_end || Kind == tok::annot_module_include;
+ Kind == tok::annot_module_end || Kind == tok::annot_module_include ||
+ Kind == tok::annot_repl_input_end;
}
/// Checks if the \p Level is valid for use in a fold expression.
@@ -715,6 +749,14 @@ private:
SourceLocation PragmaLocation);
bool HandlePragmaMSInitSeg(StringRef PragmaName,
SourceLocation PragmaLocation);
+ bool HandlePragmaMSStrictGuardStackCheck(StringRef PragmaName,
+ SourceLocation PragmaLocation);
+ bool HandlePragmaMSFunction(StringRef PragmaName,
+ SourceLocation PragmaLocation);
+ bool HandlePragmaMSAllocText(StringRef PragmaName,
+ SourceLocation PragmaLocation);
+ bool HandlePragmaMSOptimize(StringRef PragmaName,
+ SourceLocation PragmaLocation);
/// Handle the annotation token produced for
/// #pragma align...
@@ -749,6 +791,10 @@ private:
void HandlePragmaFEnvRound();
/// Handle the annotation token produced for
+ /// #pragma STDC CX_LIMITED_RANGE...
+ void HandlePragmaCXLimitedRange();
+
+ /// Handle the annotation token produced for
/// #pragma float_control
void HandlePragmaFloatControl();
@@ -838,16 +884,20 @@ private:
public:
// If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to
// find a type name by attempting typo correction.
- bool TryAnnotateTypeOrScopeToken();
- bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS,
- bool IsNewScope);
+ bool
+ TryAnnotateTypeOrScopeToken(ImplicitTypenameContext AllowImplicitTypename =
+ ImplicitTypenameContext::No);
+ bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(
+ CXXScopeSpec &SS, bool IsNewScope,
+ ImplicitTypenameContext AllowImplicitTypename);
bool TryAnnotateCXXScopeToken(bool EnteringContext = false);
bool MightBeCXXScopeToken() {
- return Tok.is(tok::identifier) || Tok.is(tok::coloncolon) ||
- (Tok.is(tok::annot_template_id) &&
- NextToken().is(tok::coloncolon)) ||
- Tok.is(tok::kw_decltype) || Tok.is(tok::kw___super);
+ return getLangOpts().CPlusPlus &&
+ (Tok.is(tok::identifier) || Tok.is(tok::coloncolon) ||
+ (Tok.is(tok::annot_template_id) &&
+ NextToken().is(tok::coloncolon)) ||
+ Tok.is(tok::kw_decltype) || Tok.is(tok::kw___super));
}
bool TryAnnotateOptionalCXXScopeToken(bool EnteringContext = false) {
return MightBeCXXScopeToken() && TryAnnotateCXXScopeToken(EnteringContext);
@@ -866,7 +916,11 @@ private:
/// Annotation was successful.
ANK_Success
};
- AnnotatedNameKind TryAnnotateName(CorrectionCandidateCallback *CCC = nullptr);
+
+ AnnotatedNameKind
+ TryAnnotateName(CorrectionCandidateCallback *CCC = nullptr,
+ ImplicitTypenameContext AllowImplicitTypename =
+ ImplicitTypenameContext::No);
/// Push a tok::annot_cxxscope token onto the token stream.
void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation);
@@ -996,18 +1050,18 @@ private:
/// back.
class ObjCDeclContextSwitch {
Parser &P;
- Decl *DC;
+ ObjCContainerDecl *DC;
SaveAndRestore<bool> WithinObjCContainer;
public:
explicit ObjCDeclContextSwitch(Parser &p)
: P(p), DC(p.getObjCDeclContext()),
WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) {
if (DC)
- P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC));
+ P.Actions.ActOnObjCTemporaryExitContainerContext(DC);
}
~ObjCDeclContextSwitch() {
if (DC)
- P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC));
+ P.Actions.ActOnObjCReenterContainerContext(DC);
}
};
@@ -1028,7 +1082,7 @@ private:
/// If the next token is not a semicolon, this emits the specified diagnostic,
/// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior
/// to the semicolon, consumes that extra token.
- bool ExpectAndConsumeSemi(unsigned DiagID);
+ bool ExpectAndConsumeSemi(unsigned DiagID , StringRef TokenUsed = "");
/// The kind of extra semi diagnostic to emit.
enum ExtraSemiKind {
@@ -1055,9 +1109,9 @@ private:
StmtExprBegin,
/// A '}' ')' ending a statement-expression.
StmtExprEnd,
- /// A '[' '[' beginning a C++11 or C2x attribute.
+ /// A '[' '[' beginning a C++11 or C23 attribute.
AttrBegin,
- /// A ']' ']' ending a C++11 or C2x attribute.
+ /// A ']' ']' ending a C++11 or C23 attribute.
AttrEnd,
/// A '::' '*' forming a C++ pointer-to-member declaration.
MemberPtr,
@@ -1152,7 +1206,7 @@ private:
/// RAII object used to modify the scope flags for the current scope.
class ParseScopeFlags {
Scope *CurScope;
- unsigned OldFlags;
+ unsigned OldFlags = 0;
ParseScopeFlags(const ParseScopeFlags &) = delete;
void operator=(const ParseScopeFlags &) = delete;
@@ -1203,7 +1257,7 @@ public:
/// returns false.
bool SkipUntil(tok::TokenKind T,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
- return SkipUntil(llvm::makeArrayRef(T), Flags);
+ return SkipUntil(llvm::ArrayRef(T), Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
@@ -1474,8 +1528,7 @@ private:
/// information that has been parsed prior to parsing declaration
/// specifiers.
struct ParsedTemplateInfo {
- ParsedTemplateInfo()
- : Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { }
+ ParsedTemplateInfo() : Kind(NonTemplate), TemplateParams(nullptr) {}
ParsedTemplateInfo(TemplateParameterLists *TemplateParams,
bool isSpecialization,
@@ -1540,7 +1593,7 @@ private:
};
NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS,
- ParsedAttributes &AccessAttrs,
+ const ParsedAttributesView &AccessAttrs,
ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo,
const VirtSpecifiers &VS,
@@ -1576,15 +1629,16 @@ private:
//===--------------------------------------------------------------------===//
// C99 6.9: External Definitions.
- DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
+ DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributes &DeclAttrs,
+ ParsedAttributes &DeclSpecAttrs,
ParsingDeclSpec *DS = nullptr);
bool isDeclarationAfterDeclarator();
bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator);
DeclGroupPtrTy ParseDeclarationOrFunctionDefinition(
- ParsedAttributesWithRange &attrs,
- ParsingDeclSpec *DS = nullptr,
- AccessSpecifier AS = AS_none);
- DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
+ ParsedAttributes &DeclAttrs, ParsedAttributes &DeclSpecAttrs,
+ ParsingDeclSpec *DS = nullptr, AccessSpecifier AS = AS_none);
+ DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributes &Attrs,
+ ParsedAttributes &DeclSpecAttrs,
ParsingDeclSpec &DS,
AccessSpecifier AS);
@@ -1599,7 +1653,8 @@ private:
// Objective-C External Declarations
void MaybeSkipAttributes(tok::ObjCKeywordKind Kind);
- DeclGroupPtrTy ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs);
+ DeclGroupPtrTy ParseObjCAtDirectives(ParsedAttributes &DeclAttrs,
+ ParsedAttributes &DeclSpecAttrs);
DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc);
Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
ParsedAttributes &prefixAttrs);
@@ -1610,11 +1665,12 @@ private:
SmallVectorImpl<IdentifierLocPair> &protocolIdents,
SourceLocation &rAngleLoc, bool mayBeProtocolList = true);
- void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc,
+ void HelperActionsForIvarDeclarations(ObjCContainerDecl *interfaceDecl,
+ SourceLocation atLoc,
BalancedDelimiterTracker &T,
SmallVectorImpl<Decl *> &AllIvarDecls,
bool RBraceMissing);
- void ParseObjCClassInstanceVariables(Decl *interfaceDecl,
+ void ParseObjCClassInstanceVariables(ObjCContainerDecl *interfaceDecl,
tok::ObjCKeywordKind visibility,
SourceLocation atLoc);
bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P,
@@ -1736,7 +1792,8 @@ public:
ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpressionInExprEvalContext(
TypeCastState isTypeCast = NotTypeCast);
- ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast);
+ ExprResult ParseConstantExpression();
+ ExprResult ParseArrayBoundExpression();
ExprResult ParseCaseExpression(SourceLocation CaseLoc);
ExprResult ParseConstraintExpression();
ExprResult
@@ -1750,8 +1807,12 @@ public:
bool IsUnevaluated);
ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false);
+ ExprResult ParseUnevaluatedStringLiteralExpression();
private:
+ ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral,
+ bool Unevaluated);
+
ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc);
ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc);
@@ -1808,19 +1869,16 @@ private:
ParsedType &CastTy,
SourceRange &CastRange);
- typedef SmallVector<SourceLocation, 20> CommaLocsTy;
-
/// ParseExpressionList - Used for C/C++ (argument-)expression-list.
bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs,
- SmallVectorImpl<SourceLocation> &CommaLocs,
llvm::function_ref<void()> ExpressionStarts =
- llvm::function_ref<void()>());
+ llvm::function_ref<void()>(),
+ bool FailImmediatelyOnInvalidExpr = false,
+ bool EarlyTypoCorrection = false);
/// ParseSimpleExpressionList - A simple comma-separated list of expressions,
/// used for misc language extensions.
- bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs,
- SmallVectorImpl<SourceLocation> &CommaLocs);
-
+ bool ParseSimpleExpressionList(SmallVectorImpl<Expr *> &Exprs);
/// ParenParseOption - Control what ParseParenExpression will parse.
enum ParenParseOption {
@@ -1958,7 +2016,8 @@ private:
/// simple-type-specifier.
void ParseCXXSimpleTypeSpecifier(DeclSpec &DS);
- bool ParseCXXTypeSpecifierSeq(DeclSpec &DS);
+ bool ParseCXXTypeSpecifierSeq(
+ DeclSpec &DS, DeclaratorContext Context = DeclaratorContext::TypeName);
//===--------------------------------------------------------------------===//
// C++ 5.3.4 and 5.3.5: C++ new and delete
@@ -1975,8 +2034,11 @@ private:
Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt,
SourceLocation Loc,
Sema::ConditionKind CK,
+ bool MissingOK,
ForRangeInfo *FRI = nullptr,
bool EnterForConditionScope = false);
+ DeclGroupPtrTy ParseAliasDeclarationInInitStatement(DeclaratorContext Context,
+ ParsedAttributes &Attrs);
//===--------------------------------------------------------------------===//
// C++ Coroutines
@@ -2041,13 +2103,8 @@ private:
//===--------------------------------------------------------------------===//
// C99 6.8: Statements and Blocks.
- /// A SmallVector of statements, with stack size 32 (as that is the only one
- /// used.)
- typedef SmallVector<Stmt*, 32> StmtVector;
- /// A SmallVector of expressions, with stack size 12 (the maximum used.)
+ /// A SmallVector of expressions.
typedef SmallVector<Expr*, 12> ExprVector;
- /// A SmallVector of types.
- typedef SmallVector<ParsedType, 12> TypeVector;
StmtResult
ParseStatement(SourceLocation *TrailingElseLoc = nullptr,
@@ -2056,12 +2113,11 @@ private:
StmtVector &Stmts, ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc = nullptr);
StmtResult ParseStatementOrDeclarationAfterAttributes(
- StmtVector &Stmts,
- ParsedStmtContext StmtCtx,
- SourceLocation *TrailingElseLoc,
- ParsedAttributesWithRange &Attrs);
+ StmtVector &Stmts, ParsedStmtContext StmtCtx,
+ SourceLocation *TrailingElseLoc, ParsedAttributes &DeclAttrs,
+ ParsedAttributes &DeclSpecAttrs);
StmtResult ParseExprStatement(ParsedStmtContext StmtCtx);
- StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs,
+ StmtResult ParseLabeledStatement(ParsedAttributes &Attrs,
ParsedStmtContext StmtCtx);
StmtResult ParseCaseStatement(ParsedStmtContext StmtCtx,
bool MissingCase = false,
@@ -2071,13 +2127,14 @@ private:
StmtResult ParseCompoundStatement(bool isStmtExpr,
unsigned ScopeFlags);
void ParseCompoundStatementLeadingPragmas();
+ void DiagnoseLabelAtEndOfCompoundStatement();
bool ConsumeNullStmt(StmtVector &Stmts);
StmtResult ParseCompoundStatementBody(bool isStmtExpr = false);
bool ParseParenExprOrCondition(StmtResult *InitStmt,
Sema::ConditionResult &CondResult,
SourceLocation Loc, Sema::ConditionKind CK,
- SourceLocation *LParenLoc = nullptr,
- SourceLocation *RParenLoc = nullptr);
+ SourceLocation &LParenLoc,
+ SourceLocation &RParenLoc);
StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc);
@@ -2089,10 +2146,9 @@ private:
StmtResult ParseReturnStatement();
StmtResult ParseAsmStatement(bool &msAsm);
StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc);
- StmtResult ParsePragmaLoopHint(StmtVector &Stmts,
- ParsedStmtContext StmtCtx,
+ StmtResult ParsePragmaLoopHint(StmtVector &Stmts, ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc,
- ParsedAttributesWithRange &Attrs);
+ ParsedAttributes &Attrs);
/// Describes the behavior that should be taken for an __if_exists
/// block.
@@ -2171,16 +2227,21 @@ private:
/// out, there are other significant restrictions on specifiers than
/// would be best implemented in the parser.
enum class DeclSpecContext {
- DSC_normal, // normal context
- DSC_class, // class context, enables 'friend'
+ DSC_normal, // normal context
+ DSC_class, // class context, enables 'friend'
DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list
DSC_trailing, // C++11 trailing-type-specifier in a trailing return type
- DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration
- DSC_top_level, // top-level/namespace declaration context
- DSC_template_param, // template parameter context
- DSC_template_type_arg, // template type argument context
- DSC_objc_method_result, // ObjC method result context, enables 'instancetype'
- DSC_condition // condition declaration context
+ DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration
+ DSC_conv_operator, // C++ type-specifier-seq in an conversion operator
+ DSC_top_level, // top-level/namespace declaration context
+ DSC_template_param, // template parameter context
+ DSC_template_arg, // template argument context
+ DSC_template_type_arg, // template type argument context
+ DSC_objc_method_result, // ObjC method result context, enables
+ // 'instancetype'
+ DSC_condition, // condition declaration context
+ DSC_association, // A _Generic selection expression's type association
+ DSC_new, // C++ new expression
};
/// Is this a context in which we are parsing just a type-specifier (or
@@ -2189,6 +2250,7 @@ private:
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
+ case DeclSpecContext::DSC_template_arg:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_objc_method_result:
@@ -2197,8 +2259,11 @@ private:
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_type_specifier:
+ case DeclSpecContext::DSC_conv_operator:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
+ case DeclSpecContext::DSC_association:
+ case DeclSpecContext::DSC_new:
return true;
}
llvm_unreachable("Missing DeclSpecContext case");
@@ -2223,7 +2288,7 @@ private:
/// so permit class and enum definitions in addition to non-defining class and
/// enum elaborated-type-specifiers)?
static AllowDefiningTypeSpec
- isDefiningTypeSpecifierContext(DeclSpecContext DSC) {
+ isDefiningTypeSpecifierContext(DeclSpecContext DSC, bool IsCPlusPlus) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_class:
@@ -2240,7 +2305,14 @@ private:
case DeclSpecContext::DSC_type_specifier:
return AllowDefiningTypeSpec::NoButErrorRecovery;
+ case DeclSpecContext::DSC_association:
+ return IsCPlusPlus ? AllowDefiningTypeSpec::NoButErrorRecovery
+ : AllowDefiningTypeSpec::Yes;
+
case DeclSpecContext::DSC_trailing:
+ case DeclSpecContext::DSC_conv_operator:
+ case DeclSpecContext::DSC_template_arg:
+ case DeclSpecContext::DSC_new:
return AllowDefiningTypeSpec::No;
}
llvm_unreachable("Missing DeclSpecContext case");
@@ -2261,6 +2333,11 @@ private:
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_type_specifier:
case DeclSpecContext::DSC_trailing:
+ case DeclSpecContext::DSC_association:
+ case DeclSpecContext::DSC_conv_operator:
+ case DeclSpecContext::DSC_template_arg:
+ case DeclSpecContext::DSC_new:
+
return false;
}
llvm_unreachable("Missing DeclSpecContext case");
@@ -2272,10 +2349,14 @@ private:
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
+ case DeclSpecContext::DSC_template_arg:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_condition:
case DeclSpecContext::DSC_type_specifier:
+ case DeclSpecContext::DSC_association:
+ case DeclSpecContext::DSC_conv_operator:
+ case DeclSpecContext::DSC_new:
return true;
case DeclSpecContext::DSC_objc_method_result:
@@ -2287,6 +2368,31 @@ private:
llvm_unreachable("Missing DeclSpecContext case");
}
+ // Is this a context in which an implicit 'typename' is allowed?
+ static ImplicitTypenameContext
+ getImplicitTypenameContext(DeclSpecContext DSC) {
+ switch (DSC) {
+ case DeclSpecContext::DSC_class:
+ case DeclSpecContext::DSC_top_level:
+ case DeclSpecContext::DSC_type_specifier:
+ case DeclSpecContext::DSC_template_type_arg:
+ case DeclSpecContext::DSC_trailing:
+ case DeclSpecContext::DSC_alias_declaration:
+ case DeclSpecContext::DSC_template_param:
+ case DeclSpecContext::DSC_new:
+ return ImplicitTypenameContext::Yes;
+
+ case DeclSpecContext::DSC_normal:
+ case DeclSpecContext::DSC_objc_method_result:
+ case DeclSpecContext::DSC_condition:
+ case DeclSpecContext::DSC_template_arg:
+ case DeclSpecContext::DSC_conv_operator:
+ case DeclSpecContext::DSC_association:
+ return ImplicitTypenameContext::No;
+ }
+ llvm_unreachable("Missing DeclSpecContext case");
+ }
+
/// Information on a C++0x for-range-initializer found while parsing a
/// declaration which turns out to be a for-range-declaration.
struct ForRangeInit {
@@ -2301,15 +2407,18 @@ private:
DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context,
SourceLocation &DeclEnd,
- ParsedAttributesWithRange &attrs,
+ ParsedAttributes &DeclAttrs,
+ ParsedAttributes &DeclSpecAttrs,
SourceLocation *DeclSpecStart = nullptr);
DeclGroupPtrTy
ParseSimpleDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd,
- ParsedAttributesWithRange &attrs, bool RequireSemi,
+ ParsedAttributes &DeclAttrs,
+ ParsedAttributes &DeclSpecAttrs, bool RequireSemi,
ForRangeInit *FRI = nullptr,
SourceLocation *DeclSpecStart = nullptr);
bool MightBeDeclarator(DeclaratorContext Context);
DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context,
+ ParsedAttributes &Attrs,
SourceLocation *DeclEnd = nullptr,
ForRangeInit *FRI = nullptr);
Decl *ParseDeclarationAfterDeclarator(Declarator &D,
@@ -2331,7 +2440,7 @@ private:
bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC,
- ParsedAttributesWithRange &Attrs);
+ ParsedAttributes &Attrs);
DeclSpecContext
getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context);
void ParseDeclarationSpecifiers(
@@ -2339,13 +2448,28 @@ private:
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal,
- LateParsedAttrList *LateAttrs = nullptr);
+ LateParsedAttrList *LateAttrs = nullptr) {
+ return ParseDeclarationSpecifiers(DS, TemplateInfo, AS, DSC, LateAttrs,
+ getImplicitTypenameContext(DSC));
+ }
+ void ParseDeclarationSpecifiers(
+ DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS,
+ DeclSpecContext DSC, LateParsedAttrList *LateAttrs,
+ ImplicitTypenameContext AllowImplicitTypename);
+
bool DiagnoseMissingSemiAfterTagDefinition(
DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext,
LateParsedAttrList *LateAttrs = nullptr);
void ParseSpecifierQualifierList(
DeclSpec &DS, AccessSpecifier AS = AS_none,
+ DeclSpecContext DSC = DeclSpecContext::DSC_normal) {
+ ParseSpecifierQualifierList(DS, getImplicitTypenameContext(DSC), AS, DSC);
+ }
+
+ void ParseSpecifierQualifierList(
+ DeclSpec &DS, ImplicitTypenameContext AllowImplicitTypename,
+ AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal);
void ParseObjCTypeQualifierList(ObjCDeclSpec &DS,
@@ -2362,7 +2486,10 @@ private:
ParsingDeclSpec &DS,
llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback);
- bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false);
+ DeclGroupPtrTy ParseTopLevelStmtDecl();
+
+ bool isDeclarationSpecifier(ImplicitTypenameContext AllowImplicitTypename,
+ bool DisambiguatingWithExpression = false);
bool isTypeSpecifierQualifier();
/// isKnownToBeTypeSpecifier - Return true if we know that the specified token
@@ -2375,17 +2502,21 @@ private:
/// cast. Return false if it's no a decl-specifier, or we're not sure.
bool isKnownToBeDeclarationSpecifier() {
if (getLangOpts().CPlusPlus)
- return isCXXDeclarationSpecifier() == TPResult::True;
- return isDeclarationSpecifier(true);
+ return isCXXDeclarationSpecifier(ImplicitTypenameContext::No) ==
+ TPResult::True;
+ return isDeclarationSpecifier(ImplicitTypenameContext::No, true);
}
/// isDeclarationStatement - Disambiguates between a declaration or an
/// expression statement, when parsing function bodies.
+ ///
+ /// \param DisambiguatingWithExpression - True to indicate that the purpose of
+ /// this check is to disambiguate between an expression and a declaration.
/// Returns true for declaration, false for expression.
- bool isDeclarationStatement() {
+ bool isDeclarationStatement(bool DisambiguatingWithExpression = false) {
if (getLangOpts().CPlusPlus)
- return isCXXDeclarationStatement();
- return isDeclarationSpecifier(true);
+ return isCXXDeclarationStatement(DisambiguatingWithExpression);
+ return isDeclarationSpecifier(ImplicitTypenameContext::No, true);
}
/// isForInitDeclaration - Disambiguates between a declaration or an
@@ -2396,8 +2527,9 @@ private:
if (getLangOpts().OpenMP)
Actions.startOpenMPLoop();
if (getLangOpts().CPlusPlus)
- return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true);
- return isDeclarationSpecifier(true);
+ return Tok.is(tok::kw_using) ||
+ isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true);
+ return isDeclarationSpecifier(ImplicitTypenameContext::No, true);
}
/// Determine whether this is a C++1z for-range-identifier.
@@ -2410,17 +2542,21 @@ private:
/// Starting with a scope specifier, identifier, or
/// template-id that refers to the current class, determine whether
/// this is a constructor declarator.
- bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false);
+ bool isConstructorDeclarator(
+ bool Unqualified, bool DeductionGuide = false,
+ DeclSpec::FriendSpecified IsFriend = DeclSpec::FriendSpecified::No,
+ const ParsedTemplateInfo *TemplateInfo = nullptr);
/// Specifies the context in which type-id/expression
/// disambiguation will occur.
enum TentativeCXXTypeIdContext {
TypeIdInParens,
TypeIdUnambiguous,
- TypeIdAsTemplateArgument
+ TypeIdAsTemplateArgument,
+ TypeIdInTrailingReturnType,
+ TypeIdAsGenericSelectionArgument,
};
-
/// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know
/// whether the parens contain an expression or a type-id.
/// Returns true for a type-id and false for an expression.
@@ -2435,20 +2571,36 @@ private:
return isTypeIdInParens(isAmbiguous);
}
+ /// Checks whether the current tokens form a type-id or an expression for the
+ /// purposes of use as the initial operand to a generic selection expression.
+ /// This requires special handling in C++ because it accepts either a type or
+ /// an expression, and we need to disambiguate which is which. However, we
+ /// cannot use the same logic as we've used for sizeof expressions, because
+ /// that logic relies on the operator only accepting a single argument,
+ /// whereas _Generic accepts a list of arguments.
+ bool isTypeIdForGenericSelection() {
+ if (getLangOpts().CPlusPlus) {
+ bool isAmbiguous;
+ return isCXXTypeId(TypeIdAsGenericSelectionArgument, isAmbiguous);
+ }
+ return isTypeSpecifierQualifier();
+ }
+
/// Checks if the current tokens form type-id or expression.
/// It is similar to isTypeIdInParens but does not suppose that type-id
/// is in parenthesis.
bool isTypeIdUnambiguously() {
- bool IsAmbiguous;
- if (getLangOpts().CPlusPlus)
- return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous);
+ if (getLangOpts().CPlusPlus) {
+ bool isAmbiguous;
+ return isCXXTypeId(TypeIdUnambiguous, isAmbiguous);
+ }
return isTypeSpecifierQualifier();
}
/// isCXXDeclarationStatement - C++-specialized function that disambiguates
/// between a declaration or an expression statement, when parsing function
/// bodies. Returns true for declaration, false for expression.
- bool isCXXDeclarationStatement();
+ bool isCXXDeclarationStatement(bool DisambiguatingWithExpression = false);
/// isCXXSimpleDeclaration - C++-specialized function that disambiguates
/// between a simple-declaration or an expression-statement.
@@ -2464,7 +2616,9 @@ private:
/// might be a constructor-style initializer.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
- bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr);
+ bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr,
+ ImplicitTypenameContext AllowImplicitTypename =
+ ImplicitTypenameContext::No);
struct ConditionDeclarationOrInitStatementState;
enum class ConditionOrInitStatement {
@@ -2510,7 +2664,8 @@ private:
/// BracedCastResult.
/// Doesn't consume tokens.
TPResult
- isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False,
+ isCXXDeclarationSpecifier(ImplicitTypenameContext AllowImplicitTypename,
+ TPResult BracedCastResult = TPResult::False,
bool *InvalidAsDeclSpec = nullptr);
/// Given that isCXXDeclarationSpecifier returns \c TPResult::True or
@@ -2545,13 +2700,16 @@ private:
TPResult TryParseProtocolQualifiers();
TPResult TryParsePtrOperatorSeq();
TPResult TryParseOperatorId();
- TPResult TryParseInitDeclaratorList();
+ TPResult TryParseInitDeclaratorList(bool MayHaveTrailingReturnType = false);
TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier = true,
- bool mayHaveDirectInit = false);
- TPResult
- TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr,
- bool VersusTemplateArg = false);
- TPResult TryParseFunctionDeclarator();
+ bool mayHaveDirectInit = false,
+ bool mayHaveTrailingReturnType = false);
+ TPResult TryParseParameterDeclarationClause(
+ bool *InvalidAsDeclaration = nullptr, bool VersusTemplateArg = false,
+ ImplicitTypenameContext AllowImplicitTypename =
+ ImplicitTypenameContext::No);
+ TPResult TryParseFunctionDeclarator(bool MayHaveTrailingReturnType = false);
+ bool NameAfterArrowIsNonType();
TPResult TryParseBracketDeclarator();
TPResult TryConsumeDeclarationSpecifier();
@@ -2559,6 +2717,10 @@ private:
/// full validation of the syntactic structure of attributes.
bool TrySkipAttributes();
+ /// Diagnoses use of _ExtInt as being deprecated, and diagnoses use of
+ /// _BitInt as an extension when appropriate.
+ void DiagnoseBitIntUse(const Token &Tok);
+
public:
TypeResult
ParseTypeName(SourceRange *Range = nullptr,
@@ -2569,75 +2731,90 @@ public:
private:
void ParseBlockId(SourceLocation CaretLoc);
- /// Are [[]] attributes enabled?
- bool standardAttributesAllowed() const {
- const LangOptions &LO = getLangOpts();
- return LO.DoubleSquareBracketAttributes;
+ /// Return true if the next token should be treated as a [[]] attribute,
+ /// or as a keyword that behaves like one. The former is only true if
+ /// [[]] attributes are enabled, whereas the latter is true whenever
+ /// such a keyword appears. The arguments are as for
+ /// isCXX11AttributeSpecifier.
+ bool isAllowedCXX11AttributeSpecifier(bool Disambiguate = false,
+ bool OuterMightBeMessageSend = false) {
+ return (Tok.isRegularKeywordAttribute() ||
+ isCXX11AttributeSpecifier(Disambiguate, OuterMightBeMessageSend));
}
// Check for the start of an attribute-specifier-seq in a context where an
// attribute is not allowed.
bool CheckProhibitedCXX11Attribute() {
assert(Tok.is(tok::l_square));
- if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square))
+ if (NextToken().isNot(tok::l_square))
return false;
return DiagnoseProhibitedCXX11Attribute();
}
bool DiagnoseProhibitedCXX11Attribute();
- void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
+ void CheckMisplacedCXX11Attribute(ParsedAttributes &Attrs,
SourceLocation CorrectLocation) {
- if (!standardAttributesAllowed())
- return;
- if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) &&
+ if (!Tok.isRegularKeywordAttribute() &&
+ (Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) &&
Tok.isNot(tok::kw_alignas))
return;
DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation);
}
- void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
+ void DiagnoseMisplacedCXX11Attribute(ParsedAttributes &Attrs,
SourceLocation CorrectLocation);
- void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs,
- DeclSpec &DS, Sema::TagUseKind TUK);
+ void stripTypeAttributesOffDeclSpec(ParsedAttributes &Attrs, DeclSpec &DS,
+ Sema::TagUseKind TUK);
// FixItLoc = possible correct location for the attributes
- void ProhibitAttributes(ParsedAttributesWithRange &Attrs,
+ void ProhibitAttributes(ParsedAttributes &Attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
- DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
+ DiagnoseProhibitedAttributes(Attrs, FixItLoc);
Attrs.clear();
}
- void ProhibitAttributes(ParsedAttributesViewWithRange &Attrs,
+ void ProhibitAttributes(ParsedAttributesView &Attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
- DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
+ DiagnoseProhibitedAttributes(Attrs, FixItLoc);
Attrs.clearListOnly();
}
- void DiagnoseProhibitedAttributes(const SourceRange &Range,
+ void DiagnoseProhibitedAttributes(const ParsedAttributesView &Attrs,
SourceLocation FixItLoc);
- // Forbid C++11 and C2x attributes that appear on certain syntactic locations
+ // Forbid C++11 and C23 attributes that appear on certain syntactic locations
// which standard permits but we don't supported yet, for example, attributes
// appertain to decl specifiers.
- void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs,
- unsigned DiagID,
- bool DiagnoseEmptyAttrs = false);
-
- /// Skip C++11 and C2x attributes and return the end location of the
+ // For the most cases we don't want to warn on unknown type attributes, but
+ // left them to later diagnoses. However, for a few cases like module
+ // declarations and module import declarations, we should do it.
+ void ProhibitCXX11Attributes(ParsedAttributes &Attrs, unsigned AttrDiagID,
+ unsigned KeywordDiagId,
+ bool DiagnoseEmptyAttrs = false,
+ bool WarnOnUnknownAttrs = false);
+
+ /// Skip C++11 and C23 attributes and return the end location of the
/// last one.
/// \returns SourceLocation() if there are no attributes.
SourceLocation SkipCXX11Attributes();
- /// Diagnose and skip C++11 and C2x attributes that appear in syntactic
+ /// Diagnose and skip C++11 and C23 attributes that appear in syntactic
/// locations where attributes are not allowed.
void DiagnoseAndSkipCXX11Attributes();
- /// Emit warnings for C++11 and C2x attributes that are in a position that
+ /// Emit warnings for C++11 and C23 attributes that are in a position that
/// clang accepts as an extension.
- void DiagnoseCXX11AttributeExtension(ParsedAttributesWithRange &Attrs);
+ void DiagnoseCXX11AttributeExtension(ParsedAttributes &Attrs);
+
+ ExprResult ParseUnevaluatedStringInAttribute(const IdentifierInfo &AttrName);
+
+ bool
+ ParseAttributeArgumentList(const clang::IdentifierInfo &AttrName,
+ SmallVectorImpl<Expr *> &Exprs,
+ ParsedAttributeArgumentsProperties ArgsProperties);
/// Parses syntax-generic attribute arguments for attributes which are
/// known to the implementation, and adds them to the given ParsedAttributes
@@ -2647,7 +2824,7 @@ private:
ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
- ParsedAttr::Syntax Syntax);
+ ParsedAttr::Form Form);
enum ParseAttrKindMask {
PAKM_GNU = 1 << 0,
@@ -2669,36 +2846,15 @@ private:
/// __attribute__(()) [[]] int i; // Not OK
///
/// Such situations should use the specific attribute parsing functionality.
- void ParseAttributes(unsigned WhichAttrKinds,
- ParsedAttributesWithRange &Attrs,
- SourceLocation *End = nullptr,
- LateParsedAttrList *LateAttrs = nullptr);
void ParseAttributes(unsigned WhichAttrKinds, ParsedAttributes &Attrs,
- SourceLocation *End = nullptr,
- LateParsedAttrList *LateAttrs = nullptr) {
- ParsedAttributesWithRange AttrsWithRange(AttrFactory);
- ParseAttributes(WhichAttrKinds, AttrsWithRange, End, LateAttrs);
- Attrs.takeAllFrom(AttrsWithRange);
- }
+ LateParsedAttrList *LateAttrs = nullptr);
/// \brief Possibly parse attributes based on what syntaxes are desired,
/// allowing for the order to vary.
- bool MaybeParseAttributes(unsigned WhichAttrKinds,
- ParsedAttributesWithRange &Attrs,
- SourceLocation *End = nullptr,
- LateParsedAttrList *LateAttrs = nullptr) {
- if (Tok.isOneOf(tok::kw___attribute, tok::kw___declspec) ||
- (standardAttributesAllowed() && isCXX11AttributeSpecifier())) {
- ParseAttributes(WhichAttrKinds, Attrs, End, LateAttrs);
- return true;
- }
- return false;
- }
bool MaybeParseAttributes(unsigned WhichAttrKinds, ParsedAttributes &Attrs,
- SourceLocation *End = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.isOneOf(tok::kw___attribute, tok::kw___declspec) ||
- (standardAttributesAllowed() && isCXX11AttributeSpecifier())) {
- ParseAttributes(WhichAttrKinds, Attrs, End, LateAttrs);
+ isAllowedCXX11AttributeSpecifier()) {
+ ParseAttributes(WhichAttrKinds, Attrs, LateAttrs);
return true;
}
return false;
@@ -2707,70 +2863,36 @@ private:
void MaybeParseGNUAttributes(Declarator &D,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute)) {
- ParsedAttributes attrs(AttrFactory);
- SourceLocation endLoc;
- ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D);
- D.takeAttributes(attrs, endLoc);
+ ParsedAttributes Attrs(AttrFactory);
+ ParseGNUAttributes(Attrs, LateAttrs, &D);
+ D.takeAttributes(Attrs);
}
}
- /// Parses GNU-style attributes and returns them without source range
- /// information.
- ///
- /// This API is discouraged. Use the version that takes a
- /// ParsedAttributesWithRange instead.
bool MaybeParseGNUAttributes(ParsedAttributes &Attrs,
- SourceLocation *EndLoc = nullptr,
- LateParsedAttrList *LateAttrs = nullptr) {
- if (Tok.is(tok::kw___attribute)) {
- ParsedAttributesWithRange AttrsWithRange(AttrFactory);
- ParseGNUAttributes(Attrs, EndLoc, LateAttrs);
- Attrs.takeAllFrom(AttrsWithRange);
- return true;
- }
- return false;
- }
-
- bool MaybeParseGNUAttributes(ParsedAttributesWithRange &Attrs,
- SourceLocation *EndLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute)) {
- ParseGNUAttributes(Attrs, EndLoc, LateAttrs);
+ ParseGNUAttributes(Attrs, LateAttrs);
return true;
}
return false;
}
- /// Parses GNU-style attributes and returns them without source range
- /// information.
- ///
- /// This API is discouraged. Use the version that takes a
- /// ParsedAttributesWithRange instead.
void ParseGNUAttributes(ParsedAttributes &Attrs,
- SourceLocation *EndLoc = nullptr,
- LateParsedAttrList *LateAttrs = nullptr,
- Declarator *D = nullptr) {
- ParsedAttributesWithRange AttrsWithRange(AttrFactory);
- ParseGNUAttributes(AttrsWithRange, EndLoc, LateAttrs, D);
- Attrs.takeAllFrom(AttrsWithRange);
- }
-
- void ParseGNUAttributes(ParsedAttributesWithRange &Attrs,
- SourceLocation *EndLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr,
Declarator *D = nullptr);
void ParseGNUAttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
- ParsedAttr::Syntax Syntax, Declarator *D);
+ ParsedAttr::Form Form, Declarator *D);
IdentifierLoc *ParseIdentifierLoc();
unsigned
ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
- ParsedAttr::Syntax Syntax);
+ ParsedAttr::Form Form);
void ReplayOpenMPAttributeTokens(CachedTokens &OpenMPTokens) {
// If parsing the attributes found an OpenMP directive, emit those tokens
@@ -2783,35 +2905,23 @@ private:
}
}
void MaybeParseCXX11Attributes(Declarator &D) {
- if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
- ParsedAttributesWithRange attrs(AttrFactory);
- SourceLocation endLoc;
- ParseCXX11Attributes(attrs, &endLoc);
- D.takeAttributes(attrs, endLoc);
- }
- }
- bool MaybeParseCXX11Attributes(ParsedAttributes &attrs,
- SourceLocation *endLoc = nullptr) {
- if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
- ParsedAttributesWithRange attrsWithRange(AttrFactory);
- ParseCXX11Attributes(attrsWithRange, endLoc);
- attrs.takeAllFrom(attrsWithRange);
- return true;
+ if (isAllowedCXX11AttributeSpecifier()) {
+ ParsedAttributes Attrs(AttrFactory);
+ ParseCXX11Attributes(Attrs);
+ D.takeAttributes(Attrs);
}
- return false;
}
- bool MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs,
- SourceLocation *endLoc = nullptr,
+
+ bool MaybeParseCXX11Attributes(ParsedAttributes &Attrs,
bool OuterMightBeMessageSend = false) {
- if (standardAttributesAllowed() &&
- isCXX11AttributeSpecifier(false, OuterMightBeMessageSend)) {
- ParseCXX11Attributes(attrs, endLoc);
+ if (isAllowedCXX11AttributeSpecifier(false, OuterMightBeMessageSend)) {
+ ParseCXX11Attributes(Attrs);
return true;
}
return false;
}
- void ParseOpenMPAttributeArgs(IdentifierInfo *AttrName,
+ void ParseOpenMPAttributeArgs(const IdentifierInfo *AttrName,
CachedTokens &OpenMPTokens);
void ParseCXX11AttributeSpecifierInternal(ParsedAttributes &Attrs,
@@ -2823,9 +2933,8 @@ private:
ParseCXX11AttributeSpecifierInternal(Attrs, OpenMPTokens, EndLoc);
ReplayOpenMPAttributeTokens(OpenMPTokens);
}
- void ParseCXX11Attributes(ParsedAttributesWithRange &attrs,
- SourceLocation *EndLoc = nullptr);
- /// Parses a C++11 (or C2x)-style attribute argument list. Returns true
+ void ParseCXX11Attributes(ParsedAttributes &attrs);
+ /// Parses a C++11 (or C23)-style attribute argument list. Returns true
/// if this results in adding an attribute to the ParsedAttributes list.
bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
@@ -2834,31 +2943,55 @@ private:
SourceLocation ScopeLoc,
CachedTokens &OpenMPTokens);
- IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc);
+ IdentifierInfo *TryParseCXX11AttributeIdentifier(
+ SourceLocation &Loc,
+ Sema::AttributeCompletion Completion = Sema::AttributeCompletion::None,
+ const IdentifierInfo *EnclosingScope = nullptr);
+
+ void MaybeParseHLSLSemantics(Declarator &D,
+ SourceLocation *EndLoc = nullptr) {
+ assert(getLangOpts().HLSL && "MaybeParseHLSLSemantics is for HLSL only");
+ if (Tok.is(tok::colon)) {
+ ParsedAttributes Attrs(AttrFactory);
+ ParseHLSLSemantics(Attrs, EndLoc);
+ D.takeAttributes(Attrs);
+ }
+ }
- void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs,
- SourceLocation *endLoc = nullptr) {
- if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square))
- ParseMicrosoftAttributes(attrs, endLoc);
+ void MaybeParseHLSLSemantics(ParsedAttributes &Attrs,
+ SourceLocation *EndLoc = nullptr) {
+ assert(getLangOpts().HLSL && "MaybeParseHLSLSemantics is for HLSL only");
+ if (getLangOpts().HLSL && Tok.is(tok::colon))
+ ParseHLSLSemantics(Attrs, EndLoc);
+ }
+
+ void ParseHLSLSemantics(ParsedAttributes &Attrs,
+ SourceLocation *EndLoc = nullptr);
+ Decl *ParseHLSLBuffer(SourceLocation &DeclEnd);
+
+ void MaybeParseMicrosoftAttributes(ParsedAttributes &Attrs) {
+ if ((getLangOpts().MicrosoftExt || getLangOpts().HLSL) &&
+ Tok.is(tok::l_square)) {
+ ParsedAttributes AttrsWithRange(AttrFactory);
+ ParseMicrosoftAttributes(AttrsWithRange);
+ Attrs.takeAllFrom(AttrsWithRange);
+ }
}
void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs);
- void ParseMicrosoftAttributes(ParsedAttributes &attrs,
- SourceLocation *endLoc = nullptr);
- bool MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
- SourceLocation *End = nullptr) {
- const auto &LO = getLangOpts();
- if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec)) {
- ParseMicrosoftDeclSpecs(Attrs, End);
+ void ParseMicrosoftAttributes(ParsedAttributes &Attrs);
+ bool MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs) {
+ if (getLangOpts().DeclSpecKeyword && Tok.is(tok::kw___declspec)) {
+ ParseMicrosoftDeclSpecs(Attrs);
return true;
}
return false;
}
- void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
- SourceLocation *End = nullptr);
+ void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs);
bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs);
void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs);
+ void ParseWebAssemblyFuncrefTypeAttribute(ParsedAttributes &Attrs);
void DiagnoseAndSkipExtendedMicrosoftTypeAttributes();
SourceLocation SkipExtendedMicrosoftTypeAttributes();
void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs);
@@ -2866,6 +2999,9 @@ private:
void ParseOpenCLKernelAttributes(ParsedAttributes &attrs);
void ParseOpenCLQualifiers(ParsedAttributes &Attrs);
void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs);
+ void ParseCUDAFunctionAttributes(ParsedAttributes &attrs);
+ bool isHLSLQualifier(const Token &Tok) const;
+ void ParseHLSLQualifiers(ParsedAttributes &Attrs);
VersionTuple ParseVersionTuple(SourceRange &Range);
void ParseAvailabilityAttribute(IdentifierInfo &Availability,
@@ -2874,9 +3010,9 @@ private:
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
- ParsedAttr::Syntax Syntax);
+ ParsedAttr::Form Form);
- Optional<AvailabilitySpec> ParseAvailabilitySpec();
+ std::optional<AvailabilitySpec> ParseAvailabilitySpec();
ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc);
void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol,
@@ -2885,15 +3021,15 @@ private:
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
- ParsedAttr::Syntax Syntax);
+ ParsedAttr::Form Form);
void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated,
SourceLocation ObjCBridgeRelatedLoc,
- ParsedAttributes &attrs,
- SourceLocation *endLoc,
+ ParsedAttributes &Attrs,
+ SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
- ParsedAttr::Syntax Syntax);
+ ParsedAttr::Form Form);
void ParseSwiftNewTypeAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
@@ -2901,7 +3037,7 @@ private:
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
- ParsedAttr::Syntax Syntax);
+ ParsedAttr::Form Form);
void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
@@ -2909,24 +3045,25 @@ private:
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
- ParsedAttr::Syntax Syntax);
+ ParsedAttr::Form Form);
- void
- ParseAttributeWithTypeArg(IdentifierInfo &AttrName,
- SourceLocation AttrNameLoc, ParsedAttributes &Attrs,
- SourceLocation *EndLoc, IdentifierInfo *ScopeName,
- SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax);
+ void ParseAttributeWithTypeArg(IdentifierInfo &AttrName,
+ SourceLocation AttrNameLoc,
+ ParsedAttributes &Attrs,
+ IdentifierInfo *ScopeName,
+ SourceLocation ScopeLoc,
+ ParsedAttr::Form Form);
void ParseTypeofSpecifier(DeclSpec &DS);
SourceLocation ParseDecltypeSpecifier(DeclSpec &DS);
void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS,
SourceLocation StartLoc,
SourceLocation EndLoc);
- void ParseUnderlyingTypeSpecifier(DeclSpec &DS);
void ParseAtomicSpecifier(DeclSpec &DS);
- ExprResult ParseAlignArgument(SourceLocation Start,
- SourceLocation &EllipsisLoc);
+ ExprResult ParseAlignArgument(StringRef KWName, SourceLocation Start,
+ SourceLocation &EllipsisLoc, bool &IsType,
+ ParsedType &Ty);
void ParseAlignmentSpecifier(ParsedAttributes &Attrs,
SourceLocation *endLoc = nullptr);
ExprResult ParseExtIntegerArgument();
@@ -2997,18 +3134,17 @@ private:
void ParseTypeQualifierListOpt(
DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed,
bool AtomicAllowed = true, bool IdentifierRequired = false,
- Optional<llvm::function_ref<void()>> CodeCompletionHandler = None);
+ std::optional<llvm::function_ref<void()>> CodeCompletionHandler =
+ std::nullopt);
void ParseDirectDeclarator(Declarator &D);
void ParseDecompositionDeclarator(Declarator &D);
void ParseParenDeclarator(Declarator &D);
- void ParseFunctionDeclarator(Declarator &D,
- ParsedAttributes &attrs,
+ void ParseFunctionDeclarator(Declarator &D, ParsedAttributes &FirstArgAttrs,
BalancedDelimiterTracker &Tracker,
- bool IsAmbiguous,
- bool RequiresArg = false);
+ bool IsAmbiguous, bool RequiresArg = false);
void InitCXXThisScopeForDeclaratorIfRelevant(
const Declarator &D, const DeclSpec &DS,
- llvm::Optional<Sema::CXXThisScopeRAII> &ThisScope);
+ std::optional<Sema::CXXThisScopeRAII> &ThisScope);
bool ParseRefQualifier(bool &RefQualifierIsLValueRef,
SourceLocation &RefQualifierLoc);
bool isFunctionDeclaratorIdentifierList();
@@ -3016,12 +3152,23 @@ private:
Declarator &D,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo);
void ParseParameterDeclarationClause(
- DeclaratorContext DeclaratorContext,
- ParsedAttributes &attrs,
- SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo,
- SourceLocation &EllipsisLoc);
+ Declarator &D, ParsedAttributes &attrs,
+ SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo,
+ SourceLocation &EllipsisLoc) {
+ return ParseParameterDeclarationClause(
+ D.getContext(), attrs, ParamInfo, EllipsisLoc,
+ D.getCXXScopeSpec().isSet() &&
+ D.isFunctionDeclaratorAFunctionDeclaration());
+ }
+ void ParseParameterDeclarationClause(
+ DeclaratorContext DeclaratorContext, ParsedAttributes &attrs,
+ SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo,
+ SourceLocation &EllipsisLoc, bool IsACXXFunctionDeclaration = false);
+
void ParseBracketDeclarator(Declarator &D);
void ParseMisplacedBracketDeclarator(Declarator &D);
+ bool MaybeParseTypeTransformTypeSpecifier(DeclSpec &DS);
+ DeclSpec::TST TypeTransformTokToDeclSpec();
//===--------------------------------------------------------------------===//
// C++ 7: Declarations [dcl.dcl]
@@ -3062,7 +3209,7 @@ private:
Decl *ParseExportDeclaration();
DeclGroupPtrTy ParseUsingDirectiveOrDeclaration(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
- SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs);
+ SourceLocation &DeclEnd, ParsedAttributes &Attrs);
Decl *ParseUsingDirective(DeclaratorContext Context,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
@@ -3086,7 +3233,7 @@ private:
const ParsedTemplateInfo &TemplateInfo,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
- ParsedAttributesWithRange &Attrs,
+ ParsedAttributes &Attrs,
AccessSpecifier AS = AS_none);
Decl *ParseAliasDeclarationAfterDeclarator(
const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc,
@@ -3104,16 +3251,14 @@ private:
void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc,
DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, bool EnteringContext,
- DeclSpecContext DSC,
- ParsedAttributesWithRange &Attributes);
+ DeclSpecContext DSC, ParsedAttributes &Attributes);
void SkipCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
unsigned TagType,
Decl *TagDecl);
void ParseCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
- ParsedAttributesWithRange &Attrs,
- unsigned TagType,
+ ParsedAttributes &Attrs, unsigned TagType,
Decl *TagDecl);
ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction,
SourceLocation &EqualLoc);
@@ -3128,9 +3273,10 @@ private:
AccessSpecifier AS, ParsedAttributes &Attr,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ParsingDeclRAIIObject *DiagsFromTParams = nullptr);
- DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas(
- AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs,
- DeclSpec::TST TagType, Decl *Tag);
+ DeclGroupPtrTy
+ ParseCXXClassMemberDeclarationWithPragmas(AccessSpecifier &AS,
+ ParsedAttributes &AccessAttrs,
+ DeclSpec::TST TagType, Decl *Tag);
void ParseConstructorInitializer(Decl *ConstructorDecl);
MemInitResult ParseMemInitializer(Decl *ConstructorDecl);
void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo,
@@ -3197,6 +3343,9 @@ private:
/// Parses OpenMP context selectors.
bool parseOMPContextSelectors(SourceLocation Loc, OMPTraitInfo &TI);
+ /// Parse an 'append_args' clause for '#pragma omp declare variant'.
+ bool parseOpenMPAppendArgs(SmallVectorImpl<OMPInteropInfo> &InteropInfos);
+
/// Parse a `match` clause for an '#pragma omp declare variant'. Return true
/// if there was an error.
bool parseOMPDeclareVariantMatchClause(SourceLocation Loc, OMPTraitInfo &TI,
@@ -3213,6 +3362,15 @@ private:
/// Parse 'omp end assumes' directive.
void ParseOpenMPEndAssumesDirective(SourceLocation Loc);
+ /// Parses clauses for directive.
+ ///
+ /// \param DKind Kind of current directive.
+ /// \param clauses for current directive.
+ /// \param start location for clauses of current directive
+ void ParseOpenMPClauses(OpenMPDirectiveKind DKind,
+ SmallVectorImpl<clang::OMPClause *> &Clauses,
+ SourceLocation Loc);
+
/// Parse clauses for '#pragma omp [begin] declare target'.
void ParseOMPDeclareTargetClauses(Sema::DeclareTargetContextInfo &DTCI);
@@ -3238,8 +3396,8 @@ private:
/// Parses declarative OpenMP directives.
DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl(
- AccessSpecifier &AS, ParsedAttributesWithRange &Attrs,
- bool Delayed = false, DeclSpec::TST TagType = DeclSpec::TST_unspecified,
+ AccessSpecifier &AS, ParsedAttributes &Attrs, bool Delayed = false,
+ DeclSpec::TST TagType = DeclSpec::TST_unspecified,
Decl *TagDecl = nullptr);
/// Parse 'omp declare reduction' construct.
DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS);
@@ -3273,8 +3431,10 @@ private:
/// Parses declarative or executable directive.
///
/// \param StmtCtx The context in which we're parsing the directive.
- StmtResult
- ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx);
+ /// \param ReadDirectiveWithinMetadirective true if directive is within a
+ /// metadirective and therefore ends on the closing paren.
+ StmtResult ParseOpenMPDeclarativeOrExecutableDirective(
+ ParsedStmtContext StmtCtx, bool ReadDirectiveWithinMetadirective = false);
/// Parses clause of kind \a CKind for directive of a kind \a Kind.
///
/// \param DKind Kind of current directive.
@@ -3299,6 +3459,11 @@ private:
/// nullptr.
///
OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly);
+ /// Parses indirect clause
+ /// \param ParseOnly true to skip the clause's semantic actions and return
+ // false;
+ bool ParseOpenMPIndirectClause(Sema::DeclareTargetContextInfo &DTCI,
+ bool ParseOnly);
/// Parses clause with a single expression and an additional argument
/// of a kind \a Kind.
///
@@ -3340,6 +3505,9 @@ private:
/// '(' { <allocator> [ '(' <allocator_traits> ')' ] }+ ')'
OMPClause *ParseOpenMPUsesAllocatorClause(OpenMPDirectiveKind DKind);
+ /// Parses the 'interop' parts of the 'append_args' and 'init' clauses.
+ bool ParseOMPInteropInfo(OMPInteropInfo &InteropInfo, OpenMPClauseKind Kind);
+
/// Parses clause with an interop variable of kind \a Kind.
///
/// \param Kind Kind of current clause.
@@ -3348,6 +3516,13 @@ private:
//
OMPClause *ParseOpenMPInteropClause(OpenMPClauseKind Kind, bool ParseOnly);
+ /// Parses a ompx_attribute clause
+ ///
+ /// \param ParseOnly true to skip the clause's semantic actions and return
+ /// nullptr.
+ //
+ OMPClause *ParseOpenMPOMPXAttributesClause(bool ParseOnly);
+
public:
/// Parses simple expression in parens for single-expression clauses of OpenMP
/// constructs.
@@ -3355,30 +3530,14 @@ public:
ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc,
bool IsAddressOfOperand = false);
- /// Data used for parsing list of variables in OpenMP clauses.
- struct OpenMPVarListDataTy {
- Expr *DepModOrTailExpr = nullptr;
- SourceLocation ColonLoc;
- SourceLocation RLoc;
- CXXScopeSpec ReductionOrMapperIdScopeSpec;
- DeclarationNameInfo ReductionOrMapperId;
- int ExtraModifier = -1; ///< Additional modifier for linear, map, depend or
- ///< lastprivate clause.
- SmallVector<OpenMPMapModifierKind, NumberOfOMPMapClauseModifiers>
- MapTypeModifiers;
- SmallVector<SourceLocation, NumberOfOMPMapClauseModifiers>
- MapTypeModifiersLoc;
- SmallVector<OpenMPMotionModifierKind, NumberOfOMPMotionModifiers>
- MotionModifiers;
- SmallVector<SourceLocation, NumberOfOMPMotionModifiers> MotionModifiersLoc;
- bool IsMapTypeImplicit = false;
- SourceLocation ExtraModifierLoc;
- };
-
+ /// Parses a reserved locator like 'omp_all_memory'.
+ bool ParseOpenMPReservedLocator(OpenMPClauseKind Kind,
+ Sema::OpenMPVarListDataTy &Data,
+ const LangOptions &LangOpts);
/// Parses clauses with list.
bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind,
SmallVectorImpl<Expr *> &Vars,
- OpenMPVarListDataTy &Data);
+ Sema::OpenMPVarListDataTy &Data);
bool ParseUnqualifiedId(CXXScopeSpec &SS, ParsedType ObjectType,
bool ObjectHadErrors, bool EnteringContext,
bool AllowDestructorName, bool AllowConstructorName,
@@ -3386,11 +3545,50 @@ public:
SourceLocation *TemplateKWLoc, UnqualifiedId &Result);
/// Parses the mapper modifier in map, to, and from clauses.
- bool parseMapperModifier(OpenMPVarListDataTy &Data);
+ bool parseMapperModifier(Sema::OpenMPVarListDataTy &Data);
/// Parses map-type-modifiers in map clause.
/// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list)
/// where, map-type-modifier ::= always | close | mapper(mapper-identifier)
- bool parseMapTypeModifiers(OpenMPVarListDataTy &Data);
+ bool parseMapTypeModifiers(Sema::OpenMPVarListDataTy &Data);
+
+ //===--------------------------------------------------------------------===//
+ // OpenACC Parsing.
+
+ /// Placeholder for now, should just ignore the directives after emitting a
+ /// diagnostic. Eventually will be split into a few functions to parse
+ /// different situations.
+public:
+ DeclGroupPtrTy ParseOpenACCDirectiveDecl();
+ StmtResult ParseOpenACCDirectiveStmt();
+
+private:
+ void ParseOpenACCDirective();
+ /// Helper that parses an ID Expression based on the language options.
+ ExprResult ParseOpenACCIDExpression();
+ /// Parses the variable list for the `cache` construct.
+ void ParseOpenACCCacheVarList();
+ /// Parses a single variable in a variable list for OpenACC.
+ bool ParseOpenACCVar();
+ /// Parses the variable list for the variety of clauses that take a var-list,
+ /// including the optional Special Token listed for some,based on clause type.
+ bool ParseOpenACCClauseVarList(OpenACCClauseKind Kind);
+ /// Parses any parameters for an OpenACC Clause, including required/optional
+ /// parens.
+ bool ParseOpenACCClauseParams(OpenACCDirectiveKind DirKind,
+ OpenACCClauseKind Kind);
+ /// Parses a single clause in a clause-list for OpenACC.
+ bool ParseOpenACCClause(OpenACCDirectiveKind DirKind);
+ /// Parses the clause-list for an OpenACC directive.
+ void ParseOpenACCClauseList(OpenACCDirectiveKind DirKind);
+ bool ParseOpenACCWaitArgument();
+ /// Parses the clause of the 'bind' argument, which can be a string literal or
+ /// an ID expression.
+ ExprResult ParseOpenACCBindClauseArgument();
+ /// Parses the clause kind of 'int-expr', which can be any integral
+ /// expression.
+ ExprResult ParseOpenACCIntExpr();
+ /// Parses the 'device-type-list', which is a list of identifiers.
+ bool ParseOpenACCDeviceTypeList();
private:
//===--------------------------------------------------------------------===//
@@ -3438,7 +3636,8 @@ private:
bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken,
SourceLocation &LAngleLoc,
TemplateArgList &TemplateArgs,
- SourceLocation &RAngleLoc);
+ SourceLocation &RAngleLoc,
+ TemplateTy NameHint = nullptr);
bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
CXXScopeSpec &SS,
@@ -3446,9 +3645,12 @@ private:
UnqualifiedId &TemplateName,
bool AllowTypeAnnotation = true,
bool TypeConstraint = false);
- void AnnotateTemplateIdTokenAsType(CXXScopeSpec &SS,
- bool IsClassName = false);
- bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs);
+ void
+ AnnotateTemplateIdTokenAsType(CXXScopeSpec &SS,
+ ImplicitTypenameContext AllowImplicitTypename,
+ bool IsClassName = false);
+ bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs,
+ TemplateTy Template, SourceLocation OpenLoc);
ParsedTemplateArgument ParseTemplateTemplateArgument();
ParsedTemplateArgument ParseTemplateArgument();
Decl *ParseExplicitInstantiation(DeclaratorContext Context,
@@ -3462,10 +3664,23 @@ private:
ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd);
+ /// Parse the given string as a type.
+ ///
+ /// This is a dangerous utility function currently employed only by API notes.
+ /// It is not a general entry-point for safely parsing types from strings.
+ ///
+ /// \param TypeStr The string to be parsed as a type.
+ /// \param Context The name of the context in which this string is being
+ /// parsed, which will be used in diagnostics.
+ /// \param IncludeLoc The location at which this parse was triggered.
+ TypeResult ParseTypeFromString(StringRef TypeStr, StringRef Context,
+ SourceLocation IncludeLoc);
+
//===--------------------------------------------------------------------===//
// Modules
- DeclGroupPtrTy ParseModuleDecl(bool IsFirstDecl);
- Decl *ParseModuleImport(SourceLocation AtLoc);
+ DeclGroupPtrTy ParseModuleDecl(Sema::ModuleImportState &ImportState);
+ Decl *ParseModuleImport(SourceLocation AtLoc,
+ Sema::ModuleImportState &ImportState);
bool parseMisplacedModuleImport();
bool tryParseMisplacedModuleImport() {
tok::TokenKind Kind = Tok.getKind();
diff --git a/contrib/llvm-project/clang/include/clang/Parse/RAIIObjectsForParser.h b/contrib/llvm-project/clang/include/clang/Parse/RAIIObjectsForParser.h
index bc1754614ad9..e1626a7870bb 100644
--- a/contrib/llvm-project/clang/include/clang/Parse/RAIIObjectsForParser.h
+++ b/contrib/llvm-project/clang/include/clang/Parse/RAIIObjectsForParser.h
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_LIB_PARSE_RAIIOBJECTSFORPARSER_H
-#define LLVM_CLANG_LIB_PARSE_RAIIOBJECTSFORPARSER_H
+#ifndef LLVM_CLANG_PARSE_RAIIOBJECTSFORPARSER_H
+#define LLVM_CLANG_PARSE_RAIIOBJECTSFORPARSER_H
#include "clang/Parse/ParseDiagnostic.h"
#include "clang/Parse/Parser.h"
@@ -201,9 +201,11 @@ namespace clang {
ParsingDeclRAIIObject ParsingRAII;
public:
- ParsingDeclarator(Parser &P, const ParsingDeclSpec &DS, DeclaratorContext C)
- : Declarator(DS, C), ParsingRAII(P, &DS.getDelayedDiagnosticPool()) {
- }
+ ParsingDeclarator(Parser &P, const ParsingDeclSpec &DS,
+ const ParsedAttributes &DeclarationAttrs,
+ DeclaratorContext C)
+ : Declarator(DS, DeclarationAttrs, C),
+ ParsingRAII(P, &DS.getDelayedDiagnosticPool()) {}
const ParsingDeclSpec &getDeclSpec() const {
return static_cast<const ParsingDeclSpec&>(Declarator::getDeclSpec());
@@ -228,9 +230,10 @@ namespace clang {
ParsingDeclRAIIObject ParsingRAII;
public:
- ParsingFieldDeclarator(Parser &P, const ParsingDeclSpec &DS)
- : FieldDeclarator(DS), ParsingRAII(P, &DS.getDelayedDiagnosticPool()) {
- }
+ ParsingFieldDeclarator(Parser &P, const ParsingDeclSpec &DS,
+ const ParsedAttributes &DeclarationAttrs)
+ : FieldDeclarator(DS, DeclarationAttrs),
+ ParsingRAII(P, &DS.getDelayedDiagnosticPool()) {}
const ParsingDeclSpec &getDeclSpec() const {
return static_cast<const ParsingDeclSpec&>(D.getDeclSpec());
@@ -306,6 +309,25 @@ namespace clang {
~ParsingOpenMPDirectiveRAII() { restore(); }
};
+ /// Activates OpenACC parsing mode to preseve OpenACC specific annotation
+ /// tokens.
+ class ParsingOpenACCDirectiveRAII {
+ Parser &P;
+ bool OldVal;
+
+ public:
+ ParsingOpenACCDirectiveRAII(Parser &P, bool Value = true)
+ : P(P), OldVal(P.OpenACCDirectiveParsing) {
+ P.OpenACCDirectiveParsing = Value;
+ }
+
+ /// This can be used to restore the state early, before the dtor
+ /// is run.
+ void restore() { P.OpenMPDirectiveParsing = OldVal; }
+
+ ~ParsingOpenACCDirectiveRAII() { restore(); }
+ };
+
/// RAII object that makes '>' behave either as an operator
/// or as the closing angle bracket for a template argument list.
class GreaterThanIsOperatorScope {
@@ -338,6 +360,19 @@ namespace clang {
}
};
+ class OffsetOfStateRAIIObject {
+ Sema::OffsetOfKind &OffsetOfState;
+ Sema::OffsetOfKind OldValue;
+
+ public:
+ OffsetOfStateRAIIObject(Parser &P, Sema::OffsetOfKind Value)
+ : OffsetOfState(P.OffsetOfState), OldValue(P.OffsetOfState) {
+ OffsetOfState = Value;
+ }
+
+ ~OffsetOfStateRAIIObject() { OffsetOfState = OldValue; }
+ };
+
/// RAII object that makes sure paren/bracket/brace count is correct
/// after declaration/statement parsing, even when there's a parsing error.
class ParenBraceBracketBalancer {
diff --git a/contrib/llvm-project/clang/include/clang/Rewrite/Core/RewriteRope.h b/contrib/llvm-project/clang/include/clang/Rewrite/Core/RewriteRope.h
index 8fa7af245eb8..73e66e111f57 100644
--- a/contrib/llvm-project/clang/include/clang/Rewrite/Core/RewriteRope.h
+++ b/contrib/llvm-project/clang/include/clang/Rewrite/Core/RewriteRope.h
@@ -181,6 +181,10 @@ public:
RewriteRope() = default;
RewriteRope(const RewriteRope &RHS) : Chunks(RHS.Chunks) {}
+ // The copy assignment operator is defined as deleted pending further
+ // motivation.
+ RewriteRope &operator=(const RewriteRope &) = delete;
+
using iterator = RopePieceBTree::iterator;
using const_iterator = RopePieceBTree::iterator;
diff --git a/contrib/llvm-project/clang/include/clang/Sema/AnalysisBasedWarnings.h b/contrib/llvm-project/clang/include/clang/Sema/AnalysisBasedWarnings.h
index 49b69c585ff7..020ddd36cf73 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/AnalysisBasedWarnings.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/AnalysisBasedWarnings.h
@@ -13,15 +13,14 @@
#ifndef LLVM_CLANG_SEMA_ANALYSISBASEDWARNINGS_H
#define LLVM_CLANG_SEMA_ANALYSISBASEDWARNINGS_H
+#include "clang/AST/Decl.h"
#include "llvm/ADT/DenseMap.h"
#include <memory>
namespace clang {
-class BlockExpr;
class Decl;
class FunctionDecl;
-class ObjCMethodDecl;
class QualType;
class Sema;
namespace sema {
@@ -97,6 +96,9 @@ public:
void IssueWarnings(Policy P, FunctionScopeInfo *fscope,
const Decl *D, QualType BlockType);
+ // Issue warnings that require whole-translation-unit analysis.
+ void IssueWarnings(TranslationUnitDecl *D);
+
Policy getDefaultPolicy() { return DefaultPolicy; }
void PrintStats() const;
diff --git a/contrib/llvm-project/clang/include/clang/Sema/CleanupInfo.h b/contrib/llvm-project/clang/include/clang/Sema/CleanupInfo.h
index ea9df49f77e1..45d16fea93e0 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/CleanupInfo.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/CleanupInfo.h
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_SEMA_CLEANUP_INFO_H
-#define LLVM_CLANG_SEMA_CLEANUP_INFO_H
+#ifndef LLVM_CLANG_SEMA_CLEANUPINFO_H
+#define LLVM_CLANG_SEMA_CLEANUPINFO_H
namespace clang {
diff --git a/contrib/llvm-project/clang/include/clang/Sema/CodeCompleteConsumer.h b/contrib/llvm-project/clang/include/clang/Sema/CodeCompleteConsumer.h
index 87646ab95025..274eaac819af 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/CodeCompleteConsumer.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/CodeCompleteConsumer.h
@@ -21,8 +21,6 @@
#include "clang/Sema/DeclSpec.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/None.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
@@ -30,6 +28,7 @@
#include "llvm/Support/type_traits.h"
#include <cassert>
#include <memory>
+#include <optional>
#include <string>
#include <utility>
@@ -329,9 +328,20 @@ public:
/// Code completion inside the filename part of a #include directive.
CCC_IncludedFile,
+ /// Code completion of an attribute name.
+ CCC_Attribute,
+
/// An unknown context, in which we are recovering from a parsing
/// error and don't know which completions we should give.
- CCC_Recovery
+ CCC_Recovery,
+
+ /// Code completion in a @class forward declaration.
+ CCC_ObjCClassForwardDecl,
+
+ /// Code completion at a top level, i.e. in a namespace or global scope,
+ /// but also in expression statements. This is because REPL inputs can be
+ /// declarations or expression statements.
+ CCC_TopLevelOrExpression,
};
using VisitedContextSet = llvm::SmallPtrSet<DeclContext *, 8>;
@@ -356,7 +366,7 @@ private:
/// The scope specifier that comes before the completion token e.g.
/// "a::b::"
- llvm::Optional<CXXScopeSpec> ScopeSpecifier;
+ std::optional<CXXScopeSpec> ScopeSpecifier;
/// A set of declaration contexts visited by Sema when doing lookup for
/// code completion.
@@ -365,11 +375,11 @@ private:
public:
/// Construct a new code-completion context of the given kind.
CodeCompletionContext(Kind CCKind)
- : CCKind(CCKind), IsUsingDeclaration(false), SelIdents(None) {}
+ : CCKind(CCKind), IsUsingDeclaration(false), SelIdents(std::nullopt) {}
/// Construct a new code-completion context of the given kind.
CodeCompletionContext(Kind CCKind, QualType T,
- ArrayRef<IdentifierInfo *> SelIdents = None)
+ ArrayRef<IdentifierInfo *> SelIdents = std::nullopt)
: CCKind(CCKind), IsUsingDeclaration(false), SelIdents(SelIdents) {
if (CCKind == CCC_DotMemberAccess || CCKind == CCC_ArrowMemberAccess ||
CCKind == CCC_ObjCPropertyAccess || CCKind == CCC_ObjCClassMessage ||
@@ -419,14 +429,14 @@ public:
return VisitedContexts;
}
- llvm::Optional<const CXXScopeSpec *> getCXXScopeSpecifier() {
+ std::optional<const CXXScopeSpec *> getCXXScopeSpecifier() {
if (ScopeSpecifier)
- return ScopeSpecifier.getPointer();
- return llvm::None;
+ return &*ScopeSpecifier;
+ return std::nullopt;
}
};
-/// Get string representation of \p Kind, useful for for debugging.
+/// Get string representation of \p Kind, useful for debugging.
llvm::StringRef getCompletionKindString(CodeCompletionContext::Kind Kind);
/// A "string" used to describe how code completion can
@@ -603,9 +613,12 @@ public:
return begin()[I];
}
- /// Returns the text in the TypedText chunk.
+ /// Returns the text in the first TypedText chunk.
const char *getTypedText() const;
+ /// Returns the combined text from all TypedText chunks.
+ std::string getAllTypedText() const;
+
/// Retrieve the priority of this code completion result.
unsigned getPriority() const { return Priority; }
@@ -844,6 +857,12 @@ public:
/// rather than a use of that entity.
bool DeclaringEntity : 1;
+ /// When completing a function, whether it can be a call. This will usually be
+ /// true, but we have some heuristics, e.g. when a pointer to a non-static
+ /// member function is completed outside of that class' scope, it can never
+ /// be a call.
+ bool FunctionCanBeCall : 1;
+
/// If the result should have a nested-name-specifier, this is it.
/// When \c QualifierIsInformative, the nested-name-specifier is
/// informative rather than required.
@@ -870,7 +889,7 @@ public:
FixIts(std::move(FixIts)), Hidden(false), InBaseClass(false),
QualifierIsInformative(QualifierIsInformative),
StartsNestedNameSpecifier(false), AllParametersAreInformative(false),
- DeclaringEntity(false), Qualifier(Qualifier) {
+ DeclaringEntity(false), FunctionCanBeCall(true), Qualifier(Qualifier) {
// FIXME: Add assert to check FixIts range requirements.
computeCursorKindAndAvailability(Accessible);
}
@@ -880,7 +899,8 @@ public:
: Keyword(Keyword), Priority(Priority), Kind(RK_Keyword),
CursorKind(CXCursor_NotImplemented), Hidden(false), InBaseClass(false),
QualifierIsInformative(false), StartsNestedNameSpecifier(false),
- AllParametersAreInformative(false), DeclaringEntity(false) {}
+ AllParametersAreInformative(false), DeclaringEntity(false),
+ FunctionCanBeCall(true) {}
/// Build a result that refers to a macro.
CodeCompletionResult(const IdentifierInfo *Macro,
@@ -890,7 +910,7 @@ public:
CursorKind(CXCursor_MacroDefinition), Hidden(false), InBaseClass(false),
QualifierIsInformative(false), StartsNestedNameSpecifier(false),
AllParametersAreInformative(false), DeclaringEntity(false),
- MacroDefInfo(MI) {}
+ FunctionCanBeCall(true), MacroDefInfo(MI) {}
/// Build a result that refers to a pattern.
CodeCompletionResult(
@@ -902,7 +922,7 @@ public:
CursorKind(CursorKind), Availability(Availability), Hidden(false),
InBaseClass(false), QualifierIsInformative(false),
StartsNestedNameSpecifier(false), AllParametersAreInformative(false),
- DeclaringEntity(false) {}
+ DeclaringEntity(false), FunctionCanBeCall(true) {}
/// Build a result that refers to a pattern with an associated
/// declaration.
@@ -911,7 +931,7 @@ public:
: Declaration(D), Pattern(Pattern), Priority(Priority), Kind(RK_Pattern),
Hidden(false), InBaseClass(false), QualifierIsInformative(false),
StartsNestedNameSpecifier(false), AllParametersAreInformative(false),
- DeclaringEntity(false) {
+ DeclaringEntity(false), FunctionCanBeCall(true) {
computeCursorKindAndAvailability();
}
@@ -1006,12 +1026,22 @@ public:
/// The candidate is a function declaration.
CK_Function,
- /// The candidate is a function template.
+ /// The candidate is a function template, arguments are being completed.
CK_FunctionTemplate,
/// The "candidate" is actually a variable, expression, or block
/// for which we only have a function prototype.
- CK_FunctionType
+ CK_FunctionType,
+
+ /// The candidate is a variable or expression of function type
+ /// for which we have the location of the prototype declaration.
+ CK_FunctionProtoTypeLoc,
+
+ /// The candidate is a template, template arguments are being completed.
+ CK_Template,
+
+ /// The candidate is aggregate initialization of a record type.
+ CK_Aggregate,
};
private:
@@ -1030,17 +1060,48 @@ public:
/// The function type that describes the entity being called,
/// when Kind == CK_FunctionType.
const FunctionType *Type;
+
+ /// The location of the function prototype that describes the entity being
+ /// called, when Kind == CK_FunctionProtoTypeLoc.
+ FunctionProtoTypeLoc ProtoTypeLoc;
+
+ /// The template overload candidate, available when
+ /// Kind == CK_Template.
+ const TemplateDecl *Template;
+
+ /// The class being aggregate-initialized,
+ /// when Kind == CK_Aggregate
+ const RecordDecl *AggregateType;
};
public:
OverloadCandidate(FunctionDecl *Function)
- : Kind(CK_Function), Function(Function) {}
+ : Kind(CK_Function), Function(Function) {
+ assert(Function != nullptr);
+ }
OverloadCandidate(FunctionTemplateDecl *FunctionTemplateDecl)
- : Kind(CK_FunctionTemplate), FunctionTemplate(FunctionTemplateDecl) {}
+ : Kind(CK_FunctionTemplate), FunctionTemplate(FunctionTemplateDecl) {
+ assert(FunctionTemplateDecl != nullptr);
+ }
OverloadCandidate(const FunctionType *Type)
- : Kind(CK_FunctionType), Type(Type) {}
+ : Kind(CK_FunctionType), Type(Type) {
+ assert(Type != nullptr);
+ }
+
+ OverloadCandidate(FunctionProtoTypeLoc Prototype)
+ : Kind(CK_FunctionProtoTypeLoc), ProtoTypeLoc(Prototype) {
+ assert(!Prototype.isNull());
+ }
+
+ OverloadCandidate(const RecordDecl *Aggregate)
+ : Kind(CK_Aggregate), AggregateType(Aggregate) {
+ assert(Aggregate != nullptr);
+ }
+
+ OverloadCandidate(const TemplateDecl *Template)
+ : Kind(CK_Template), Template(Template) {}
/// Determine the kind of overload candidate.
CandidateKind getKind() const { return Kind; }
@@ -1059,13 +1120,40 @@ public:
/// function is stored.
const FunctionType *getFunctionType() const;
+ /// Retrieve the function ProtoTypeLoc candidate.
+ /// This can be called for any Kind, but returns null for kinds
+ /// other than CK_FunctionProtoTypeLoc.
+ const FunctionProtoTypeLoc getFunctionProtoTypeLoc() const;
+
+ const TemplateDecl *getTemplate() const {
+ assert(getKind() == CK_Template && "Not a template");
+ return Template;
+ }
+
+ /// Retrieve the aggregate type being initialized.
+ const RecordDecl *getAggregate() const {
+ assert(getKind() == CK_Aggregate);
+ return AggregateType;
+ }
+
+ /// Get the number of parameters in this signature.
+ unsigned getNumParams() const;
+
+ /// Get the type of the Nth parameter.
+ /// Returns null if the type is unknown or N is out of range.
+ QualType getParamType(unsigned N) const;
+
+ /// Get the declaration of the Nth parameter.
+ /// Returns null if the decl is unknown or N is out of range.
+ const NamedDecl *getParamDecl(unsigned N) const;
+
/// Create a new code-completion string that describes the function
/// signature of this overload candidate.
- CodeCompletionString *CreateSignatureString(unsigned CurrentArg,
- Sema &S,
- CodeCompletionAllocator &Allocator,
- CodeCompletionTUInfo &CCTUInfo,
- bool IncludeBriefComments) const;
+ CodeCompletionString *
+ CreateSignatureString(unsigned CurrentArg, Sema &S,
+ CodeCompletionAllocator &Allocator,
+ CodeCompletionTUInfo &CCTUInfo,
+ bool IncludeBriefComments, bool Braced) const;
};
CodeCompleteConsumer(const CodeCompleteOptions &CodeCompleteOpts)
@@ -1139,7 +1227,8 @@ public:
virtual void ProcessOverloadCandidates(Sema &S, unsigned CurrentArg,
OverloadCandidate *Candidates,
unsigned NumCandidates,
- SourceLocation OpenParLoc) {}
+ SourceLocation OpenParLoc,
+ bool Braced) {}
//@}
/// Retrieve the allocator that will be used to allocate
@@ -1190,7 +1279,8 @@ public:
void ProcessOverloadCandidates(Sema &S, unsigned CurrentArg,
OverloadCandidate *Candidates,
unsigned NumCandidates,
- SourceLocation OpenParLoc) override;
+ SourceLocation OpenParLoc,
+ bool Braced) override;
bool isResultFilteredOut(StringRef Filter, CodeCompletionResult Results) override;
diff --git a/contrib/llvm-project/clang/include/clang/Sema/DeclSpec.h b/contrib/llvm-project/clang/include/clang/Sema/DeclSpec.h
index 423f4f4ee7b7..4561cca929c0 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/DeclSpec.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/DeclSpec.h
@@ -32,6 +32,7 @@
#include "clang/Lex/Token.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/ParsedAttr.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
@@ -61,9 +62,18 @@ namespace clang {
/// often used as if it meant "present".
///
/// The actual scope is described by getScopeRep().
+///
+/// If the kind of getScopeRep() is TypeSpec then TemplateParamLists may be empty
+/// or contain the template parameter lists attached to the current declaration.
+/// Consider the following example:
+/// template <class T> void SomeType<T>::some_method() {}
+/// If CXXScopeSpec refers to SomeType<T> then TemplateParamLists will contain
+/// a single element referring to template <class T>.
+
class CXXScopeSpec {
SourceRange Range;
NestedNameSpecifierLocBuilder Builder;
+ ArrayRef<TemplateParameterList *> TemplateParamLists;
public:
SourceRange getRange() const { return Range; }
@@ -73,6 +83,13 @@ public:
SourceLocation getBeginLoc() const { return Range.getBegin(); }
SourceLocation getEndLoc() const { return Range.getEnd(); }
+ void setTemplateParamLists(ArrayRef<TemplateParameterList *> L) {
+ TemplateParamLists = L;
+ }
+ ArrayRef<TemplateParameterList *> getTemplateParamLists() const {
+ return TemplateParamLists;
+ }
+
/// Retrieve the representation of the nested-name-specifier.
NestedNameSpecifier *getScopeRep() const {
return Builder.getRepresentation();
@@ -266,7 +283,7 @@ public:
static const TST TST_char32 = clang::TST_char32;
static const TST TST_int = clang::TST_int;
static const TST TST_int128 = clang::TST_int128;
- static const TST TST_extint = clang::TST_extint;
+ static const TST TST_bitint = clang::TST_bitint;
static const TST TST_half = clang::TST_half;
static const TST TST_BFloat16 = clang::TST_BFloat16;
static const TST TST_float = clang::TST_float;
@@ -275,6 +292,7 @@ public:
static const TST TST_accum = clang::TST_Accum;
static const TST TST_fract = clang::TST_Fract;
static const TST TST_float128 = clang::TST_float128;
+ static const TST TST_ibm128 = clang::TST_ibm128;
static const TST TST_bool = clang::TST_bool;
static const TST TST_decimal32 = clang::TST_decimal32;
static const TST TST_decimal64 = clang::TST_decimal64;
@@ -287,9 +305,13 @@ public:
static const TST TST_typename = clang::TST_typename;
static const TST TST_typeofType = clang::TST_typeofType;
static const TST TST_typeofExpr = clang::TST_typeofExpr;
+ static const TST TST_typeof_unqualType = clang::TST_typeof_unqualType;
+ static const TST TST_typeof_unqualExpr = clang::TST_typeof_unqualExpr;
static const TST TST_decltype = clang::TST_decltype;
static const TST TST_decltype_auto = clang::TST_decltype_auto;
- static const TST TST_underlyingType = clang::TST_underlyingType;
+#define TRANSFORM_TYPE_TRAIT_DEF(_, Trait) \
+ static const TST TST_##Trait = clang::TST_##Trait;
+#include "clang/Basic/TransformTypeTraits.def"
static const TST TST_auto = clang::TST_auto;
static const TST TST_auto_type = clang::TST_auto_type;
static const TST TST_unknown_anytype = clang::TST_unknown_anytype;
@@ -322,6 +344,11 @@ public:
// FIXME: Attributes should be included here.
};
+ enum FriendSpecified : bool {
+ No,
+ Yes,
+ };
+
private:
// storage-class-specifier
/*SCS*/unsigned StorageClassSpec : 3;
@@ -332,7 +359,7 @@ private:
/*TypeSpecifierWidth*/ unsigned TypeSpecWidth : 2;
/*TSC*/unsigned TypeSpecComplex : 2;
/*TSS*/unsigned TypeSpecSign : 2;
- /*TST*/unsigned TypeSpecType : 6;
+ /*TST*/unsigned TypeSpecType : 7;
unsigned TypeAltiVecVector : 1;
unsigned TypeAltiVecPixel : 1;
unsigned TypeAltiVecBool : 1;
@@ -399,11 +426,12 @@ private:
ObjCDeclSpec *ObjCQualifiers;
static bool isTypeRep(TST T) {
- return (T == TST_typename || T == TST_typeofType ||
- T == TST_underlyingType || T == TST_atomic);
+ return T == TST_atomic || T == TST_typename || T == TST_typeofType ||
+ T == TST_typeof_unqualType || isTransformTypeTrait(T);
}
static bool isExprRep(TST T) {
- return (T == TST_typeofExpr || T == TST_decltype || T == TST_extint);
+ return T == TST_typeofExpr || T == TST_typeof_unqualExpr ||
+ T == TST_decltype || T == TST_bitint;
}
static bool isTemplateIdRep(TST T) {
return (T == TST_auto || T == TST_decltype_auto);
@@ -417,6 +445,14 @@ public:
T == TST_interface || T == TST_union ||
T == TST_class);
}
+ static bool isTransformTypeTrait(TST T) {
+ constexpr std::array<TST, 16> Traits = {
+#define TRANSFORM_TYPE_TRAIT_DEF(_, Trait) TST_##Trait,
+#include "clang/Basic/TransformTypeTraits.def"
+ };
+
+ return T >= Traits.front() && T <= Traits.back();
+ }
DeclSpec(AttributeFactory &attrFactory)
: StorageClassSpec(SCS_unspecified),
@@ -433,8 +469,7 @@ public:
FS_noreturn_specified(false), Friend_specified(false),
ConstexprSpecifier(
static_cast<unsigned>(ConstexprSpecKind::Unspecified)),
- FS_explicit_specifier(), Attrs(attrFactory), writtenBS(),
- ObjCQualifiers(nullptr) {}
+ Attrs(attrFactory), writtenBS(), ObjCQualifiers(nullptr) {}
// storage-class-specifier
SCS getStorageClassSpec() const { return (SCS)StorageClassSpec; }
@@ -516,12 +551,13 @@ public:
SourceLocation getTypeSpecSatLoc() const { return TSSatLoc; }
SourceLocation getTypeSpecTypeNameLoc() const {
- assert(isDeclRep((TST) TypeSpecType) || TypeSpecType == TST_typename);
+ assert(isDeclRep((TST)TypeSpecType) || isTypeRep((TST)TypeSpecType) ||
+ isExprRep((TST)TypeSpecType));
return TSTNameLoc;
}
SourceRange getTypeofParensRange() const { return TypeofParensRange; }
- void setTypeofParensRange(SourceRange range) { TypeofParensRange = range; }
+ void setTypeArgumentRange(SourceRange range) { TypeofParensRange = range; }
bool hasAutoTypeSpec() const {
return (TypeSpecType == TST_auto || TypeSpecType == TST_auto_type ||
@@ -702,7 +738,7 @@ public:
bool SetTypePipe(bool isPipe, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
const PrintingPolicy &Policy);
- bool SetExtIntType(SourceLocation KWLoc, Expr *BitWidth,
+ bool SetBitIntType(SourceLocation KWLoc, Expr *BitWidth,
const char *&PrevSpec, unsigned &DiagID,
const PrintingPolicy &Policy);
bool SetTypeSpecSat(SourceLocation Loc, const char *&PrevSpec,
@@ -745,7 +781,10 @@ public:
bool SetConstexprSpec(ConstexprSpecKind ConstexprKind, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID);
- bool isFriendSpecified() const { return Friend_specified; }
+ FriendSpecified isFriendSpecified() const {
+ return static_cast<FriendSpecified>(Friend_specified);
+ }
+
SourceLocation getFriendSpecLoc() const { return FriendLoc; }
bool isModulePrivateSpecified() const { return ModulePrivateLoc.isValid(); }
@@ -786,7 +825,7 @@ public:
/// int __attribute__((may_alias)) __attribute__((aligned(16))) var;
/// \endcode
///
- void addAttributes(ParsedAttributesView &AL) {
+ void addAttributes(const ParsedAttributesView &AL) {
Attrs.addAll(AL.begin(), AL.end());
}
@@ -952,10 +991,10 @@ private:
UnqualifiedId(const UnqualifiedId &Other) = delete;
const UnqualifiedId &operator=(const UnqualifiedId &) = delete;
-public:
/// Describes the kind of unqualified-id parsed.
UnqualifiedIdKind Kind;
+public:
struct OFI {
/// The kind of overloaded operator.
OverloadedOperatorKind Operator;
@@ -1342,7 +1381,7 @@ struct DeclaratorChunk {
/// DeclSpec for the function with the qualifier related info.
DeclSpec *MethodQualifiers;
- /// AtttibuteFactory for the MethodQualifiers.
+ /// AttributeFactory for the MethodQualifiers.
AttributeFactory *QualAttrFactory;
union {
@@ -1490,7 +1529,7 @@ struct DeclaratorChunk {
/// prototype. Typically these are tag declarations.
ArrayRef<NamedDecl *> getDeclsInPrototype() const {
assert(ExceptionSpecType == EST_None);
- return llvm::makeArrayRef(DeclsInPrototype, NumExceptionsOrDecls);
+ return llvm::ArrayRef(DeclsInPrototype, NumExceptionsOrDecls);
}
/// Determine whether this function declarator had a
@@ -1737,7 +1776,7 @@ public:
}
ArrayRef<Binding> bindings() const {
- return llvm::makeArrayRef(Bindings, NumBindings);
+ return llvm::ArrayRef(Bindings, NumBindings);
}
bool isSet() const { return LSquareLoc.isValid(); }
@@ -1785,7 +1824,15 @@ enum class DeclaratorContext {
TemplateTypeArg, // Template type argument (in default argument).
AliasDecl, // C++11 alias-declaration.
AliasTemplate, // C++11 alias-declaration template.
- RequiresExpr // C++2a requires-expression.
+ RequiresExpr, // C++2a requires-expression.
+ Association // C11 _Generic selection expression association.
+};
+
+// Describes whether the current context is a context where an implicit
+// typename is allowed (C++2a [temp.res]p5]).
+enum class ImplicitTypenameContext {
+ No,
+ Yes,
};
/// Information about one declarator, including the parsed type
@@ -1850,9 +1897,13 @@ private:
/// Indicates whether this declarator has an initializer.
unsigned HasInitializer : 1;
- /// Attrs - Attributes.
+ /// Attributes attached to the declarator.
ParsedAttributes Attrs;
+ /// Attributes attached to the declaration. See also documentation for the
+ /// corresponding constructor parameter.
+ const ParsedAttributesView &DeclarationAttrs;
+
/// The asm label, if specified.
Expr *AsmLabel;
@@ -1891,16 +1942,41 @@ private:
friend struct DeclaratorChunk;
public:
- Declarator(const DeclSpec &ds, DeclaratorContext C)
- : DS(ds), Range(ds.getSourceRange()), Context(C),
+ /// `DS` and `DeclarationAttrs` must outlive the `Declarator`. In particular,
+ /// take care not to pass temporary objects for these parameters.
+ ///
+ /// `DeclarationAttrs` contains [[]] attributes from the
+ /// attribute-specifier-seq at the beginning of a declaration, which appertain
+ /// to the declared entity itself. Attributes with other syntax (e.g. GNU)
+ /// should not be placed in this attribute list; if they occur at the
+ /// beginning of a declaration, they apply to the `DeclSpec` and should be
+ /// attached to that instead.
+ ///
+ /// Here is an example of an attribute associated with a declaration:
+ ///
+ /// [[deprecated]] int x, y;
+ ///
+ /// This attribute appertains to all of the entities declared in the
+ /// declaration, i.e. `x` and `y` in this case.
+ Declarator(const DeclSpec &DS, const ParsedAttributesView &DeclarationAttrs,
+ DeclaratorContext C)
+ : DS(DS), Range(DS.getSourceRange()), Context(C),
InvalidType(DS.getTypeSpecType() == DeclSpec::TST_error),
GroupingParens(false), FunctionDefinition(static_cast<unsigned>(
FunctionDefinitionKind::Declaration)),
Redeclaration(false), Extension(false), ObjCIvar(false),
ObjCWeakProperty(false), InlineStorageUsed(false),
- HasInitializer(false), Attrs(ds.getAttributePool().getFactory()),
- AsmLabel(nullptr), TrailingRequiresClause(nullptr),
- InventedTemplateParameterList(nullptr) {}
+ HasInitializer(false), Attrs(DS.getAttributePool().getFactory()),
+ DeclarationAttrs(DeclarationAttrs), AsmLabel(nullptr),
+ TrailingRequiresClause(nullptr),
+ InventedTemplateParameterList(nullptr) {
+ assert(llvm::all_of(DeclarationAttrs,
+ [](const ParsedAttr &AL) {
+ return (AL.isStandardAttributeSyntax() ||
+ AL.isRegularKeywordAttribute());
+ }) &&
+ "DeclarationAttrs may only contain [[]] and keyword attributes");
+ }
~Declarator() {
clear();
@@ -2023,6 +2099,7 @@ public:
case DeclaratorContext::TrailingReturn:
case DeclaratorContext::TrailingReturnVar:
case DeclaratorContext::RequiresExpr:
+ case DeclaratorContext::Association:
return true;
}
llvm_unreachable("unknown context kind!");
@@ -2062,6 +2139,7 @@ public:
case DeclaratorContext::TemplateTypeArg:
case DeclaratorContext::TrailingReturn:
case DeclaratorContext::TrailingReturnVar:
+ case DeclaratorContext::Association:
return false;
}
llvm_unreachable("unknown context kind!");
@@ -2105,6 +2183,7 @@ public:
case DeclaratorContext::TemplateTypeArg:
case DeclaratorContext::TrailingReturn:
case DeclaratorContext::TrailingReturnVar:
+ case DeclaratorContext::Association:
return false;
}
llvm_unreachable("unknown context kind!");
@@ -2161,6 +2240,7 @@ public:
case DeclaratorContext::TemplateTypeArg:
case DeclaratorContext::TrailingReturn:
case DeclaratorContext::RequiresExpr:
+ case DeclaratorContext::Association:
return false;
}
llvm_unreachable("unknown context kind!");
@@ -2383,6 +2463,7 @@ public:
case DeclaratorContext::TrailingReturn:
case DeclaratorContext::TrailingReturnVar:
case DeclaratorContext::RequiresExpr:
+ case DeclaratorContext::Association:
return false;
}
llvm_unreachable("unknown context kind!");
@@ -2417,6 +2498,7 @@ public:
case DeclaratorContext::TrailingReturnVar:
case DeclaratorContext::TemplateTypeArg:
case DeclaratorContext::RequiresExpr:
+ case DeclaratorContext::Association:
return false;
case DeclaratorContext::Block:
@@ -2513,19 +2595,24 @@ public:
/// __attribute__((common,deprecated));
///
/// Also extends the range of the declarator.
- void takeAttributes(ParsedAttributes &attrs, SourceLocation lastLoc) {
+ void takeAttributes(ParsedAttributes &attrs) {
Attrs.takeAllFrom(attrs);
- if (!lastLoc.isInvalid())
- SetRangeEnd(lastLoc);
+ if (attrs.Range.getEnd().isValid())
+ SetRangeEnd(attrs.Range.getEnd());
}
const ParsedAttributes &getAttributes() const { return Attrs; }
ParsedAttributes &getAttributes() { return Attrs; }
+ const ParsedAttributesView &getDeclarationAttributes() const {
+ return DeclarationAttrs;
+ }
+
/// hasAttributes - do we contain any attributes?
bool hasAttributes() const {
- if (!getAttributes().empty() || getDeclSpec().hasAttributes())
+ if (!getAttributes().empty() || !getDeclarationAttributes().empty() ||
+ getDeclSpec().hasAttributes())
return true;
for (unsigned i = 0, e = getNumTypeObjects(); i != e; ++i)
if (!getTypeObject(i).getAttrs().empty())
@@ -2533,14 +2620,6 @@ public:
return false;
}
- /// Return a source range list of C++11 attributes associated
- /// with the declarator.
- void getCXX11AttributeRanges(SmallVectorImpl<SourceRange> &Ranges) {
- for (const ParsedAttr &AL : Attrs)
- if (AL.isCXX11Attribute())
- Ranges.push_back(AL.getRange());
- }
-
void setAsmLabel(Expr *E) { AsmLabel = E; }
Expr *getAsmLabel() const { return AsmLabel; }
@@ -2595,6 +2674,8 @@ public:
/// redeclaration time if the decl is static.
bool isStaticMember();
+ bool isExplicitObjectMemberFunction();
+
/// Returns true if this declares a constructor or a destructor.
bool isCtorOrDtor();
@@ -2607,8 +2688,10 @@ public:
struct FieldDeclarator {
Declarator D;
Expr *BitfieldSize;
- explicit FieldDeclarator(const DeclSpec &DS)
- : D(DS, DeclaratorContext::Member), BitfieldSize(nullptr) {}
+ explicit FieldDeclarator(const DeclSpec &DS,
+ const ParsedAttributes &DeclarationAttrs)
+ : D(DS, DeclarationAttrs, DeclaratorContext::Member),
+ BitfieldSize(nullptr) {}
};
/// Represents a C++11 virt-specifier-seq.
@@ -2624,7 +2707,7 @@ public:
VS_Abstract = 16
};
- VirtSpecifiers() : Specifiers(0), LastSpecifier(VS_None) { }
+ VirtSpecifiers() = default;
bool SetSpecifier(Specifier VS, SourceLocation Loc,
const char *&PrevSpec);
@@ -2648,8 +2731,8 @@ public:
Specifier getLastSpecifier() const { return LastSpecifier; }
private:
- unsigned Specifiers;
- Specifier LastSpecifier;
+ unsigned Specifiers = 0;
+ Specifier LastSpecifier = VS_None;
SourceLocation VS_overrideLoc, VS_finalLoc, VS_abstractLoc;
SourceLocation FirstLocation;
@@ -2688,11 +2771,14 @@ struct LambdaIntroducer {
SourceRange Range;
SourceLocation DefaultLoc;
- LambdaCaptureDefault Default;
+ LambdaCaptureDefault Default = LCD_None;
SmallVector<LambdaCapture, 4> Captures;
- LambdaIntroducer()
- : Default(LCD_None) {}
+ LambdaIntroducer() = default;
+
+ bool hasLambdaCapture() const {
+ return Captures.size() > 0 || Default != LCD_None;
+ }
/// Append a capture in a lambda introducer.
void addCapture(LambdaCaptureKind Kind,
diff --git a/contrib/llvm-project/clang/include/clang/Sema/DelayedDiagnostic.h b/contrib/llvm-project/clang/include/clang/Sema/DelayedDiagnostic.h
index 50abcc421d45..9de7131f74c7 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/DelayedDiagnostic.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/DelayedDiagnostic.h
@@ -190,8 +190,8 @@ public:
ArrayRef<SourceLocation> getAvailabilitySelectorLocs() const {
assert(Kind == Availability && "Not an availability diagnostic.");
- return llvm::makeArrayRef(AvailabilityData.SelectorLocs,
- AvailabilityData.NumSelectorLocs);
+ return llvm::ArrayRef(AvailabilityData.SelectorLocs,
+ AvailabilityData.NumSelectorLocs);
}
AvailabilityResult getAvailabilityResult() const {
diff --git a/contrib/llvm-project/clang/include/clang/Sema/Designator.h b/contrib/llvm-project/clang/include/clang/Sema/Designator.h
index 84837bfeba5b..244535978d4b 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/Designator.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/Designator.h
@@ -21,60 +21,108 @@ namespace clang {
class Expr;
class IdentifierInfo;
-class Sema;
/// Designator - A designator in a C99 designated initializer.
///
/// This class is a discriminated union which holds the various
-/// different sorts of designators possible. A Designation is an array of
+/// different sorts of designators possible. A Designation is an array of
/// these. An example of a designator are things like this:
-/// [8] .field [47] // C99 designation: 3 designators
-/// [8 ... 47] field: // GNU extensions: 2 designators
+///
+/// [8] .field [47] // C99 designation: 3 designators
+/// [8 ... 47] field: // GNU extensions: 2 designators
+///
/// These occur in initializers, e.g.:
-/// int a[10] = {2, 4, [8]=9, 10};
+///
+/// int a[10] = {2, 4, [8]=9, 10};
///
class Designator {
-public:
- enum DesignatorKind {
- FieldDesignator, ArrayDesignator, ArrayRangeDesignator
- };
-private:
- Designator() {};
-
- DesignatorKind Kind;
-
+ /// A field designator, e.g., ".x = 42".
struct FieldDesignatorInfo {
- const IdentifierInfo *II;
+ /// Refers to the field being initialized.
+ const IdentifierInfo *FieldName;
+
+ /// The location of the '.' in the designated initializer.
SourceLocation DotLoc;
- SourceLocation NameLoc;
+
+ /// The location of the field name in the designated initializer.
+ SourceLocation FieldLoc;
+
+ FieldDesignatorInfo(const IdentifierInfo *FieldName, SourceLocation DotLoc,
+ SourceLocation FieldLoc)
+ : FieldName(FieldName), DotLoc(DotLoc), FieldLoc(FieldLoc) {}
};
+
+ /// An array designator, e.g., "[42] = 0".
struct ArrayDesignatorInfo {
Expr *Index;
+
+ // The location of the '[' in the designated initializer.
SourceLocation LBracketLoc;
+
+ // The location of the ']' in the designated initializer.
mutable SourceLocation RBracketLoc;
+
+ ArrayDesignatorInfo(Expr *Index, SourceLocation LBracketLoc)
+ : Index(Index), LBracketLoc(LBracketLoc) {}
};
+
+ /// An array range designator, e.g. "[42 ... 50] = 1".
struct ArrayRangeDesignatorInfo {
- Expr *Start, *End;
- SourceLocation LBracketLoc, EllipsisLoc;
+ Expr *Start;
+ Expr *End;
+
+ // The location of the '[' in the designated initializer.
+ SourceLocation LBracketLoc;
+
+ // The location of the '...' in the designated initializer.
+ SourceLocation EllipsisLoc;
+
+ // The location of the ']' in the designated initializer.
mutable SourceLocation RBracketLoc;
+
+ ArrayRangeDesignatorInfo(Expr *Start, Expr *End, SourceLocation LBracketLoc,
+ SourceLocation EllipsisLoc)
+ : Start(Start), End(End), LBracketLoc(LBracketLoc),
+ EllipsisLoc(EllipsisLoc) {}
};
+ /// The kind of designator this describes.
+ enum DesignatorKind {
+ FieldDesignator,
+ ArrayDesignator,
+ ArrayRangeDesignator
+ };
+
+ DesignatorKind Kind;
+
union {
FieldDesignatorInfo FieldInfo;
ArrayDesignatorInfo ArrayInfo;
ArrayRangeDesignatorInfo ArrayRangeInfo;
};
-public:
+ Designator(DesignatorKind Kind) : Kind(Kind) {}
- DesignatorKind getKind() const { return Kind; }
+public:
bool isFieldDesignator() const { return Kind == FieldDesignator; }
bool isArrayDesignator() const { return Kind == ArrayDesignator; }
bool isArrayRangeDesignator() const { return Kind == ArrayRangeDesignator; }
- const IdentifierInfo *getField() const {
+ //===--------------------------------------------------------------------===//
+ // FieldDesignatorInfo
+
+ /// Creates a field designator.
+ static Designator CreateFieldDesignator(const IdentifierInfo *FieldName,
+ SourceLocation DotLoc,
+ SourceLocation FieldLoc) {
+ Designator D(FieldDesignator);
+ new (&D.FieldInfo) FieldDesignatorInfo(FieldName, DotLoc, FieldLoc);
+ return D;
+ }
+
+ const IdentifierInfo *getFieldDecl() const {
assert(isFieldDesignator() && "Invalid accessor");
- return FieldInfo.II;
+ return FieldInfo.FieldName;
}
SourceLocation getDotLoc() const {
@@ -84,7 +132,18 @@ public:
SourceLocation getFieldLoc() const {
assert(isFieldDesignator() && "Invalid accessor");
- return FieldInfo.NameLoc;
+ return FieldInfo.FieldLoc;
+ }
+
+ //===--------------------------------------------------------------------===//
+ // ArrayDesignatorInfo:
+
+ /// Creates an array designator.
+ static Designator CreateArrayDesignator(Expr *Index,
+ SourceLocation LBracketLoc) {
+ Designator D(ArrayDesignator);
+ new (&D.ArrayInfo) ArrayDesignatorInfo(Index, LBracketLoc);
+ return D;
}
Expr *getArrayIndex() const {
@@ -92,73 +151,46 @@ public:
return ArrayInfo.Index;
}
- Expr *getArrayRangeStart() const {
- assert(isArrayRangeDesignator() && "Invalid accessor");
- return ArrayRangeInfo.Start;
- }
- Expr *getArrayRangeEnd() const {
- assert(isArrayRangeDesignator() && "Invalid accessor");
- return ArrayRangeInfo.End;
- }
-
SourceLocation getLBracketLoc() const {
assert((isArrayDesignator() || isArrayRangeDesignator()) &&
"Invalid accessor");
- if (isArrayDesignator())
- return ArrayInfo.LBracketLoc;
- else
- return ArrayRangeInfo.LBracketLoc;
+ return isArrayDesignator() ? ArrayInfo.LBracketLoc
+ : ArrayRangeInfo.LBracketLoc;
}
SourceLocation getRBracketLoc() const {
assert((isArrayDesignator() || isArrayRangeDesignator()) &&
"Invalid accessor");
- if (isArrayDesignator())
- return ArrayInfo.RBracketLoc;
- else
- return ArrayRangeInfo.RBracketLoc;
+ return isArrayDesignator() ? ArrayInfo.RBracketLoc
+ : ArrayRangeInfo.RBracketLoc;
}
- SourceLocation getEllipsisLoc() const {
- assert(isArrayRangeDesignator() && "Invalid accessor");
- return ArrayRangeInfo.EllipsisLoc;
- }
+ //===--------------------------------------------------------------------===//
+ // ArrayRangeDesignatorInfo:
- static Designator getField(const IdentifierInfo *II, SourceLocation DotLoc,
- SourceLocation NameLoc) {
- Designator D;
- D.Kind = FieldDesignator;
- new (&D.FieldInfo) FieldDesignatorInfo;
- D.FieldInfo.II = II;
- D.FieldInfo.DotLoc = DotLoc;
- D.FieldInfo.NameLoc = NameLoc;
+ /// Creates a GNU array-range designator.
+ static Designator CreateArrayRangeDesignator(Expr *Start, Expr *End,
+ SourceLocation LBracketLoc,
+ SourceLocation EllipsisLoc) {
+ Designator D(ArrayRangeDesignator);
+ new (&D.ArrayRangeInfo)
+ ArrayRangeDesignatorInfo(Start, End, LBracketLoc, EllipsisLoc);
return D;
}
- static Designator getArray(Expr *Index,
- SourceLocation LBracketLoc) {
- Designator D;
- D.Kind = ArrayDesignator;
- new (&D.ArrayInfo) ArrayDesignatorInfo;
- D.ArrayInfo.Index = Index;
- D.ArrayInfo.LBracketLoc = LBracketLoc;
- D.ArrayInfo.RBracketLoc = SourceLocation();
- return D;
+ Expr *getArrayRangeStart() const {
+ assert(isArrayRangeDesignator() && "Invalid accessor");
+ return ArrayRangeInfo.Start;
}
- static Designator getArrayRange(Expr *Start,
- Expr *End,
- SourceLocation LBracketLoc,
- SourceLocation EllipsisLoc) {
- Designator D;
- D.Kind = ArrayRangeDesignator;
- new (&D.ArrayRangeInfo) ArrayRangeDesignatorInfo;
- D.ArrayRangeInfo.Start = Start;
- D.ArrayRangeInfo.End = End;
- D.ArrayRangeInfo.LBracketLoc = LBracketLoc;
- D.ArrayRangeInfo.EllipsisLoc = EllipsisLoc;
- D.ArrayRangeInfo.RBracketLoc = SourceLocation();
- return D;
+ Expr *getArrayRangeEnd() const {
+ assert(isArrayRangeDesignator() && "Invalid accessor");
+ return ArrayRangeInfo.End;
+ }
+
+ SourceLocation getEllipsisLoc() const {
+ assert(isArrayRangeDesignator() && "Invalid accessor");
+ return ArrayRangeInfo.EllipsisLoc;
}
void setRBracketLoc(SourceLocation RBracketLoc) const {
@@ -169,17 +201,8 @@ public:
else
ArrayRangeInfo.RBracketLoc = RBracketLoc;
}
-
- /// ClearExprs - Null out any expression references, which prevents
- /// them from being 'delete'd later.
- void ClearExprs(Sema &Actions) {}
-
- /// FreeExprs - Release any unclaimed memory for the expressions in
- /// this designator.
- void FreeExprs(Sema &Actions) {}
};
-
/// Designation - Represent a full designation, which is a sequence of
/// designators. This class is mostly a helper for InitListDesignations.
class Designation {
@@ -188,9 +211,7 @@ class Designation {
public:
/// AddDesignator - Add a designator to the end of this list.
- void AddDesignator(Designator D) {
- Designators.push_back(D);
- }
+ void AddDesignator(Designator D) { Designators.push_back(D); }
bool empty() const { return Designators.empty(); }
@@ -199,14 +220,6 @@ public:
assert(Idx < Designators.size());
return Designators[Idx];
}
-
- /// ClearExprs - Null out any expression references, which prevents them from
- /// being 'delete'd later.
- void ClearExprs(Sema &Actions) {}
-
- /// FreeExprs - Release any unclaimed memory for the expressions in this
- /// designation.
- void FreeExprs(Sema &Actions) {}
};
} // end namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/Sema/EnterExpressionEvaluationContext.h b/contrib/llvm-project/clang/include/clang/Sema/EnterExpressionEvaluationContext.h
new file mode 100644
index 000000000000..5eca797b8842
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Sema/EnterExpressionEvaluationContext.h
@@ -0,0 +1,69 @@
+//===--- EnterExpressionEvaluationContext.h ---------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SEMA_ENTEREXPRESSIONEVALUATIONCONTEXT_H
+#define LLVM_CLANG_SEMA_ENTEREXPRESSIONEVALUATIONCONTEXT_H
+
+#include "clang/Sema/Sema.h"
+
+namespace clang {
+
+class Decl;
+
+/// RAII object that enters a new expression evaluation context.
+class EnterExpressionEvaluationContext {
+ Sema &Actions;
+ bool Entered = true;
+
+public:
+ EnterExpressionEvaluationContext(
+ Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
+ Decl *LambdaContextDecl = nullptr,
+ Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
+ Sema::ExpressionEvaluationContextRecord::EK_Other,
+ bool ShouldEnter = true)
+ : Actions(Actions), Entered(ShouldEnter) {
+ if (Entered)
+ Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
+ ExprContext);
+ }
+ EnterExpressionEvaluationContext(
+ Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
+ Sema::ReuseLambdaContextDecl_t,
+ Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
+ Sema::ExpressionEvaluationContextRecord::EK_Other)
+ : Actions(Actions) {
+ Actions.PushExpressionEvaluationContext(
+ NewContext, Sema::ReuseLambdaContextDecl, ExprContext);
+ }
+
+ enum InitListTag { InitList };
+ EnterExpressionEvaluationContext(Sema &Actions, InitListTag,
+ bool ShouldEnter = true)
+ : Actions(Actions), Entered(false) {
+ // In C++11 onwards, narrowing checks are performed on the contents of
+ // braced-init-lists, even when they occur within unevaluated operands.
+ // Therefore we still need to instantiate constexpr functions used in such
+ // a context.
+ if (ShouldEnter && Actions.isUnevaluatedContext() &&
+ Actions.getLangOpts().CPlusPlus11) {
+ Actions.PushExpressionEvaluationContext(
+ Sema::ExpressionEvaluationContext::UnevaluatedList);
+ Entered = true;
+ }
+ }
+
+ ~EnterExpressionEvaluationContext() {
+ if (Entered)
+ Actions.PopExpressionEvaluationContext();
+ }
+};
+
+} // namespace clang
+
+#endif
diff --git a/contrib/llvm-project/clang/include/clang/Sema/ExternalSemaSource.h b/contrib/llvm-project/clang/include/clang/Sema/ExternalSemaSource.h
index 9c18aa1398d3..22d1ee2df115 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/ExternalSemaSource.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/ExternalSemaSource.h
@@ -26,11 +26,9 @@ template <class T, unsigned n> class SmallSetVector;
namespace clang {
class CXXConstructorDecl;
-class CXXDeleteExpr;
class CXXRecordDecl;
class DeclaratorDecl;
class LookupResult;
-struct ObjCMethodList;
class Scope;
class Sema;
class TypedefNameDecl;
@@ -232,6 +230,11 @@ public:
return false;
}
+ /// Notify the external source that a lambda was assigned a mangling number.
+ /// This enables the external source to track the correspondence between
+ /// lambdas and mangling numbers if necessary.
+ virtual void AssignedLambdaNumbering(const CXXRecordDecl *Lambda) {}
+
/// LLVM-style RTTI.
/// \{
bool isA(const void *ClassID) const override {
diff --git a/contrib/llvm-project/clang/include/clang/Sema/HLSLExternalSemaSource.h b/contrib/llvm-project/clang/include/clang/Sema/HLSLExternalSemaSource.h
new file mode 100644
index 000000000000..c0bfff327139
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Sema/HLSLExternalSemaSource.h
@@ -0,0 +1,55 @@
+//===--- HLSLExternalSemaSource.h - HLSL Sema Source ------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the HLSLExternalSemaSource interface.
+//
+//===----------------------------------------------------------------------===//
+#ifndef CLANG_SEMA_HLSLEXTERNALSEMASOURCE_H
+#define CLANG_SEMA_HLSLEXTERNALSEMASOURCE_H
+
+#include "llvm/ADT/DenseMap.h"
+
+#include "clang/Sema/ExternalSemaSource.h"
+
+namespace clang {
+class NamespaceDecl;
+class Sema;
+
+class HLSLExternalSemaSource : public ExternalSemaSource {
+ Sema *SemaPtr = nullptr;
+ NamespaceDecl *HLSLNamespace = nullptr;
+ CXXRecordDecl *ResourceDecl = nullptr;
+
+ using CompletionFunction = std::function<void(CXXRecordDecl *)>;
+ llvm::DenseMap<CXXRecordDecl *, CompletionFunction> Completions;
+
+ void defineHLSLVectorAlias();
+ void defineTrivialHLSLTypes();
+ void defineHLSLTypesWithForwardDeclarations();
+
+ void onCompletion(CXXRecordDecl *Record, CompletionFunction Fn);
+
+public:
+ ~HLSLExternalSemaSource() override;
+
+ /// Initialize the semantic source with the Sema instance
+ /// being used to perform semantic analysis on the abstract syntax
+ /// tree.
+ void InitializeSema(Sema &S) override;
+
+ /// Inform the semantic consumer that Sema is no longer available.
+ void ForgetSema() override { SemaPtr = nullptr; }
+
+ using ExternalASTSource::CompleteType;
+ /// Complete an incomplete HLSL builtin type
+ void CompleteType(TagDecl *Tag) override;
+};
+
+} // namespace clang
+
+#endif // CLANG_SEMA_HLSLEXTERNALSEMASOURCE_H
diff --git a/contrib/llvm-project/clang/include/clang/Sema/IdentifierResolver.h b/contrib/llvm-project/clang/include/clang/Sema/IdentifierResolver.h
index 7c8dc46307d4..557f51314640 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/IdentifierResolver.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/IdentifierResolver.h
@@ -134,13 +134,17 @@ public:
explicit IdentifierResolver(Preprocessor &PP);
~IdentifierResolver();
- /// begin - Returns an iterator for decls with the name 'Name'.
+ IdentifierResolver(const IdentifierResolver &) = delete;
+ IdentifierResolver &operator=(const IdentifierResolver &) = delete;
+
+ /// Returns a range of decls with the name 'Name'.
+ llvm::iterator_range<iterator> decls(DeclarationName Name);
+
+ /// Returns an iterator over decls with the name 'Name'.
iterator begin(DeclarationName Name);
- /// end - Returns an iterator that has 'finished'.
- iterator end() {
- return iterator();
- }
+ /// Returns the end iterator.
+ iterator end() { return iterator(); }
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
diff --git a/contrib/llvm-project/clang/include/clang/Sema/Initialization.h b/contrib/llvm-project/clang/include/clang/Sema/Initialization.h
index 420803f8e33c..2072cd8d1c3e 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/Initialization.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/Initialization.h
@@ -38,7 +38,6 @@
namespace clang {
-class APValue;
class CXXBaseSpecifier;
class CXXConstructorDecl;
class ObjCMethodDecl;
@@ -124,6 +123,10 @@ public:
/// decomposition declaration.
EK_Binding,
+ /// The entity being initialized is a non-static data member subobject of an
+ /// object initialized via parenthesized aggregate initialization.
+ EK_ParenAggInitMember,
+
// Note: err_init_conversion_failed in DiagnosticSemaKinds.td uses this
// enum as an index for its first %select. When modifying this list,
// that diagnostic text needs to be updated as well.
@@ -228,8 +231,10 @@ private:
/// Create the initialization entity for a member subobject.
InitializedEntity(FieldDecl *Member, const InitializedEntity *Parent,
- bool Implicit, bool DefaultMemberInit)
- : Kind(EK_Member), Parent(Parent), Type(Member->getType()),
+ bool Implicit, bool DefaultMemberInit,
+ bool IsParenAggInit = false)
+ : Kind(IsParenAggInit ? EK_ParenAggInitMember : EK_Member),
+ Parent(Parent), Type(Member->getType()),
Variable{Member, Implicit, DefaultMemberInit} {}
/// Create the initialization entity for an array element.
@@ -335,8 +340,15 @@ public:
}
/// Create the initialization entity for a temporary.
- static InitializedEntity InitializeTemporary(TypeSourceInfo *TypeInfo) {
- return InitializeTemporary(TypeInfo, TypeInfo->getType());
+ static InitializedEntity InitializeTemporary(ASTContext &Context,
+ TypeSourceInfo *TypeInfo) {
+ QualType Type = TypeInfo->getType();
+ if (Context.getLangOpts().OpenCLCPlusPlus) {
+ assert(!Type.hasAddressSpace() && "Temporary already has address space!");
+ Type = Context.getAddrSpaceQualType(Type, LangAS::opencl_private);
+ }
+
+ return InitializeTemporary(TypeInfo, Type);
}
/// Create the initialization entity for a temporary.
@@ -382,6 +394,14 @@ public:
return InitializedEntity(Member->getAnonField(), Parent, Implicit, false);
}
+ /// Create the initialization entity for a member subobject initialized via
+ /// parenthesized aggregate init.
+ static InitializedEntity InitializeMemberFromParenAggInit(FieldDecl *Member) {
+ return InitializedEntity(Member, /*Parent=*/nullptr, /*Implicit=*/false,
+ /*DefaultMemberInit=*/false,
+ /*IsParenAggInit=*/true);
+ }
+
/// Create the initialization entity for a default member initializer.
static InitializedEntity
InitializeMemberFromDefaultMemberInitializer(FieldDecl *Member) {
@@ -481,7 +501,7 @@ public:
/// Determine whether this is an array new with an unknown bound.
bool isVariableLengthArrayNew() const {
- return getKind() == EK_New && dyn_cast_or_null<IncompleteArrayType>(
+ return getKind() == EK_New && isa_and_nonnull<IncompleteArrayType>(
getType()->getAsArrayTypeUnsafe());
}
@@ -917,7 +937,11 @@ public:
SK_OCLSamplerInit,
/// Initialize an opaque OpenCL type (event_t, queue_t, etc.) with zero
- SK_OCLZeroOpaqueType
+ SK_OCLZeroOpaqueType,
+
+ /// Initialize an aggreagate with parenthesized list of values.
+ /// This is a C++20 feature.
+ SK_ParenthesizedListInit
};
/// A single step in the initialization sequence.
@@ -1093,6 +1117,13 @@ public:
/// List-copy-initialization chose an explicit constructor.
FK_ExplicitConstructor,
+
+ /// Parenthesized list initialization failed at some point.
+ /// This is a C++20 feature.
+ FK_ParenthesizedListInitFailed,
+
+ // A designated initializer was provided for a non-aggregate type.
+ FK_DesignatedInitForNonAggregate,
};
private:
@@ -1351,6 +1382,8 @@ public:
/// from a zero constant.
void AddOCLZeroOpaqueTypeStep(QualType T);
+ void AddParenthesizedListInitStep(QualType T);
+
/// Add steps to unwrap a initializer list for a reference around a
/// single element and rewrap it at the end.
void RewrapReferenceInitList(QualType T, InitListExpr *Syntactic);
diff --git a/contrib/llvm-project/clang/include/clang/Sema/Lookup.h b/contrib/llvm-project/clang/include/clang/Sema/Lookup.h
index c6edc2df5b9f..2f2f2607a937 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/Lookup.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/Lookup.h
@@ -26,10 +26,10 @@
#include "clang/Basic/Specifiers.h"
#include "clang/Sema/Sema.h"
#include "llvm/ADT/MapVector.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Casting.h"
#include <cassert>
+#include <optional>
#include <utility>
namespace clang {
@@ -117,6 +117,17 @@ public:
/// @endcode
AmbiguousReference,
+ /// Name lookup results in an ambiguity because multiple placeholder
+ /// variables were found in the same scope.
+ /// @code
+ /// void f() {
+ /// int _ = 0;
+ /// int _ = 0;
+ /// return _; // ambiguous use of placeholder variable
+ /// }
+ /// @endcode
+ AmbiguousReferenceToPlaceholderVariable,
+
/// Name lookup results in an ambiguity because an entity with a
/// tag name was hidden by an entity with an ordinary name from
/// a different context.
@@ -148,20 +159,22 @@ public:
: SemaPtr(&SemaRef), NameInfo(NameInfo), LookupKind(LookupKind),
Redecl(Redecl != Sema::NotForRedeclaration),
ExternalRedecl(Redecl == Sema::ForExternalRedeclaration),
- Diagnose(Redecl == Sema::NotForRedeclaration) {
+ DiagnoseAccess(Redecl == Sema::NotForRedeclaration),
+ DiagnoseAmbiguous(Redecl == Sema::NotForRedeclaration) {
configure();
}
// TODO: consider whether this constructor should be restricted to take
// as input a const IdentifierInfo* (instead of Name),
// forcing other cases towards the constructor taking a DNInfo.
- LookupResult(Sema &SemaRef, DeclarationName Name,
- SourceLocation NameLoc, Sema::LookupNameKind LookupKind,
+ LookupResult(Sema &SemaRef, DeclarationName Name, SourceLocation NameLoc,
+ Sema::LookupNameKind LookupKind,
Sema::RedeclarationKind Redecl = Sema::NotForRedeclaration)
: SemaPtr(&SemaRef), NameInfo(Name, NameLoc), LookupKind(LookupKind),
Redecl(Redecl != Sema::NotForRedeclaration),
ExternalRedecl(Redecl == Sema::ForExternalRedeclaration),
- Diagnose(Redecl == Sema::NotForRedeclaration) {
+ DiagnoseAccess(Redecl == Sema::NotForRedeclaration),
+ DiagnoseAmbiguous(Redecl == Sema::NotForRedeclaration) {
configure();
}
@@ -192,12 +205,14 @@ public:
Redecl(std::move(Other.Redecl)),
ExternalRedecl(std::move(Other.ExternalRedecl)),
HideTags(std::move(Other.HideTags)),
- Diagnose(std::move(Other.Diagnose)),
+ DiagnoseAccess(std::move(Other.DiagnoseAccess)),
+ DiagnoseAmbiguous(std::move(Other.DiagnoseAmbiguous)),
AllowHidden(std::move(Other.AllowHidden)),
Shadowed(std::move(Other.Shadowed)),
TemplateNameLookup(std::move(Other.TemplateNameLookup)) {
Other.Paths = nullptr;
- Other.Diagnose = false;
+ Other.DiagnoseAccess = false;
+ Other.DiagnoseAmbiguous = false;
}
LookupResult &operator=(LookupResult &&Other) {
@@ -215,17 +230,22 @@ public:
Redecl = std::move(Other.Redecl);
ExternalRedecl = std::move(Other.ExternalRedecl);
HideTags = std::move(Other.HideTags);
- Diagnose = std::move(Other.Diagnose);
+ DiagnoseAccess = std::move(Other.DiagnoseAccess);
+ DiagnoseAmbiguous = std::move(Other.DiagnoseAmbiguous);
AllowHidden = std::move(Other.AllowHidden);
Shadowed = std::move(Other.Shadowed);
TemplateNameLookup = std::move(Other.TemplateNameLookup);
Other.Paths = nullptr;
- Other.Diagnose = false;
+ Other.DiagnoseAccess = false;
+ Other.DiagnoseAmbiguous = false;
return *this;
}
~LookupResult() {
- if (Diagnose) diagnose();
+ if (DiagnoseAccess)
+ diagnoseAccess();
+ if (DiagnoseAmbiguous)
+ diagnoseAmbiguous();
if (Paths) deletePaths(Paths);
}
@@ -319,7 +339,7 @@ public:
}
LookupResultKind getResultKind() const {
- assert(sanity());
+ assert(checkDebugAssumptions());
return ResultKind;
}
@@ -346,30 +366,56 @@ public:
/// Determine whether the given declaration is visible to the
/// program.
- static bool isVisible(Sema &SemaRef, NamedDecl *D) {
- // If this declaration is not hidden, it's visible.
- if (D->isUnconditionallyVisible())
- return true;
+ static bool isVisible(Sema &SemaRef, NamedDecl *D);
- // During template instantiation, we can refer to hidden declarations, if
- // they were visible in any module along the path of instantiation.
- return isVisibleSlow(SemaRef, D);
+ static bool isReachable(Sema &SemaRef, NamedDecl *D);
+
+ static bool isAcceptable(Sema &SemaRef, NamedDecl *D,
+ Sema::AcceptableKind Kind) {
+ return Kind == Sema::AcceptableKind::Visible ? isVisible(SemaRef, D)
+ : isReachable(SemaRef, D);
}
+ /// Determine whether this lookup is permitted to see the declaration.
+ /// Note that a reachable but not visible declaration inhabiting a namespace
+ /// is not allowed to be seen during name lookup.
+ ///
+ /// For example:
+ /// ```
+ /// // m.cppm
+ /// export module m;
+ /// struct reachable { int v; }
+ /// export auto func() { return reachable{43}; }
+ /// // Use.cpp
+ /// import m;
+ /// auto Use() {
+ /// // Not valid. We couldn't see reachable here.
+ /// // So isAvailableForLookup would return false when we look
+ /// up 'reachable' here.
+ /// // return reachable(43).v;
+ /// // Valid. The field name 'v' is allowed during name lookup.
+ /// // So isAvailableForLookup would return true when we look up 'v' here.
+ /// return func().v;
+ /// }
+ /// ```
+ static bool isAvailableForLookup(Sema &SemaRef, NamedDecl *ND);
+
/// Retrieve the accepted (re)declaration of the given declaration,
/// if there is one.
NamedDecl *getAcceptableDecl(NamedDecl *D) const {
if (!D->isInIdentifierNamespace(IDNS))
return nullptr;
- if (isVisible(getSema(), D) || isHiddenDeclarationVisible(D))
+ if (isAvailableForLookup(getSema(), D) || isHiddenDeclarationVisible(D))
return D;
return getAcceptableDeclSlow(D);
}
private:
- static bool isVisibleSlow(Sema &SemaRef, NamedDecl *D);
+ static bool isAcceptableSlow(Sema &SemaRef, NamedDecl *D,
+ Sema::AcceptableKind Kind);
+ static bool isReachableSlow(Sema &SemaRef, NamedDecl *D);
NamedDecl *getAcceptableDeclSlow(NamedDecl *D) const;
public:
@@ -481,7 +527,7 @@ public:
Paths = nullptr;
}
} else {
- llvm::Optional<AmbiguityKind> SavedAK;
+ std::optional<AmbiguityKind> SavedAK;
bool WasAmbiguous = false;
if (ResultKind == Ambiguous) {
SavedAK = Ambiguity;
@@ -495,7 +541,7 @@ public:
if (ResultKind == Ambiguous) {
(void)WasAmbiguous;
assert(WasAmbiguous);
- Ambiguity = SavedAK.getValue();
+ Ambiguity = *SavedAK;
} else if (Paths) {
deletePaths(Paths);
Paths = nullptr;
@@ -581,13 +627,20 @@ public:
/// Suppress the diagnostics that would normally fire because of this
/// lookup. This happens during (e.g.) redeclaration lookups.
void suppressDiagnostics() {
- Diagnose = false;
+ DiagnoseAccess = false;
+ DiagnoseAmbiguous = false;
}
- /// Determines whether this lookup is suppressing diagnostics.
- bool isSuppressingDiagnostics() const {
- return !Diagnose;
- }
+ /// Suppress the diagnostics that would normally fire because of this
+ /// lookup due to access control violations.
+ void suppressAccessDiagnostics() { DiagnoseAccess = false; }
+
+ /// Determines whether this lookup is suppressing access control diagnostics.
+ bool isSuppressingAccessDiagnostics() const { return !DiagnoseAccess; }
+
+ /// Determines whether this lookup is suppressing ambiguous lookup
+ /// diagnostics.
+ bool isSuppressingAmbiguousDiagnostics() const { return !DiagnoseAmbiguous; }
/// Sets a 'context' source range.
void setContextRange(SourceRange SR) {
@@ -631,6 +684,15 @@ public:
F.CalledDone = true;
}
+ // The move assignment operator is defined as deleted pending
+ // further motivation.
+ Filter &operator=(Filter &&) = delete;
+
+ // The copy constrcutor and copy assignment operator is defined as deleted
+ // pending further motivation.
+ Filter(const Filter &) = delete;
+ Filter &operator=(const Filter &) = delete;
+
~Filter() {
assert(CalledDone &&
"LookupResult::Filter destroyed without done() call");
@@ -691,11 +753,15 @@ public:
}
private:
- void diagnose() {
+ void diagnoseAccess() {
+ if (!isAmbiguous() && isClassLookup() &&
+ getSema().getLangOpts().AccessControl)
+ getSema().CheckLookupAccess(*this);
+ }
+
+ void diagnoseAmbiguous() {
if (isAmbiguous())
getSema().DiagnoseAmbiguousLookup(*this);
- else if (isClassLookup() && getSema().getLangOpts().AccessControl)
- getSema().CheckLookupAccess(*this);
}
void setAmbiguous(AmbiguityKind AK) {
@@ -706,10 +772,9 @@ private:
void addDeclsFromBasePaths(const CXXBasePaths &P);
void configure();
- // Sanity checks.
- bool sanity() const;
+ bool checkDebugAssumptions() const;
- bool sanityCheckUnresolved() const {
+ bool checkUnresolved() const {
for (iterator I = begin(), E = end(); I != E; ++I)
if (isa<UnresolvedUsingValueDecl>((*I)->getUnderlyingDecl()))
return true;
@@ -742,7 +807,8 @@ private:
/// are present
bool HideTags = true;
- bool Diagnose = false;
+ bool DiagnoseAccess = false;
+ bool DiagnoseAmbiguous = false;
/// True if we should allow hidden declarations to be 'visible'.
bool AllowHidden = false;
diff --git a/contrib/llvm-project/clang/include/clang/Sema/MultiplexExternalSemaSource.h b/contrib/llvm-project/clang/include/clang/Sema/MultiplexExternalSemaSource.h
index 78658dcf990c..2bf91cb5212c 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/MultiplexExternalSemaSource.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/MultiplexExternalSemaSource.h
@@ -40,25 +40,24 @@ class MultiplexExternalSemaSource : public ExternalSemaSource {
static char ID;
private:
- SmallVector<ExternalSemaSource *, 2> Sources; // doesn't own them.
+ SmallVector<ExternalSemaSource *, 2> Sources;
public:
-
- ///Constructs a new multiplexing external sema source and appends the
+ /// Constructs a new multiplexing external sema source and appends the
/// given element to it.
///
- ///\param[in] s1 - A non-null (old) ExternalSemaSource.
- ///\param[in] s2 - A non-null (new) ExternalSemaSource.
+ ///\param[in] S1 - A non-null (old) ExternalSemaSource.
+ ///\param[in] S2 - A non-null (new) ExternalSemaSource.
///
- MultiplexExternalSemaSource(ExternalSemaSource& s1, ExternalSemaSource& s2);
+ MultiplexExternalSemaSource(ExternalSemaSource *S1, ExternalSemaSource *S2);
~MultiplexExternalSemaSource() override;
- ///Appends new source to the source list.
+ /// Appends new source to the source list.
///
- ///\param[in] source - An ExternalSemaSource.
+ ///\param[in] Source - An ExternalSemaSource.
///
- void addSource(ExternalSemaSource &source);
+ void AddSource(ExternalSemaSource *Source);
//===--------------------------------------------------------------------===//
// ExternalASTSource.
@@ -361,6 +360,9 @@ public:
bool MaybeDiagnoseMissingCompleteType(SourceLocation Loc,
QualType T) override;
+ // Inform all attached sources that a mangling number was assigned.
+ void AssignedLambdaNumbering(const CXXRecordDecl *Lambda) override;
+
/// LLVM-style RTTI.
/// \{
bool isA(const void *ClassID) const override {
diff --git a/contrib/llvm-project/clang/include/clang/Sema/Overload.h b/contrib/llvm-project/clang/include/clang/Sema/Overload.h
index 82661cb3d12a..6ccabad3af54 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/Overload.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/Overload.h
@@ -26,7 +26,6 @@
#include "clang/Sema/SemaFixItUtils.h"
#include "clang/Sema/TemplateDeduction.h"
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/None.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
@@ -163,6 +162,9 @@ class Sema;
/// Arm SVE Vector conversions
ICK_SVE_Vector_Conversion,
+ /// RISC-V RVV Vector conversions
+ ICK_RVV_Vector_Conversion,
+
/// A vector splat from an arithmetic type
ICK_Vector_Splat,
@@ -190,6 +192,9 @@ class Sema;
/// C-only conversion between pointers with incompatible types
ICK_Incompatible_Pointer_Conversion,
+ /// Fixed point type conversions according to N1169.
+ ICK_Fixed_Point_Conversion,
+
/// The number of conversion kinds
ICK_Num_Conversion_Kinds,
};
@@ -252,10 +257,7 @@ class Sema;
/// sequence (C++ 13.3.3.1.1). A standard conversion sequence
/// contains between zero and three conversions. If a particular
/// conversion is not needed, it will be set to the identity conversion
- /// (ICK_Identity). Note that the three conversions are
- /// specified as separate members (rather than in an array) so that
- /// we can keep the size of a standard conversion sequence to a
- /// single word.
+ /// (ICK_Identity).
class StandardConversionSequence {
public:
/// First -- The first conversion can be an lvalue-to-rvalue
@@ -469,7 +471,9 @@ class Sema;
unrelated_class,
bad_qualifiers,
lvalue_ref_to_rvalue,
- rvalue_ref_to_lvalue
+ rvalue_ref_to_lvalue,
+ too_few_initializers,
+ too_many_initializers,
};
// This can be null, e.g. for implicit object arguments.
@@ -519,8 +523,12 @@ class Sema;
/// specifies that there is no conversion from the source type to
/// the target type. AmbiguousConversion represents the unique
/// ambiguous conversion (C++0x [over.best.ics]p10).
+ /// StaticObjectArgumentConversion represents the conversion rules for
+ /// the synthesized first argument of calls to static member functions
+ /// ([over.best.ics.general]p8).
enum Kind {
StandardConversion = 0,
+ StaticObjectArgumentConversion,
UserDefinedConversion,
AmbiguousConversion,
EllipsisConversion,
@@ -533,11 +541,17 @@ class Sema;
};
/// ConversionKind - The kind of implicit conversion sequence.
- unsigned ConversionKind : 30;
+ unsigned ConversionKind : 31;
+
+ // Whether the initializer list was of an incomplete array.
+ unsigned InitializerListOfIncompleteArray : 1;
- /// Whether the target is really a std::initializer_list, and the
- /// sequence only represents the worst element conversion.
- unsigned StdInitializerListElement : 1;
+ /// When initializing an array or std::initializer_list from an
+ /// initializer-list, this is the array or std::initializer_list type being
+ /// initialized. The remainder of the conversion sequence, including ToType,
+ /// describe the worst conversion of an initializer to an element of the
+ /// array or std::initializer_list. (Note, 'worst' is not well defined.)
+ QualType InitializerListContainerType;
void setKind(Kind K) {
destruct();
@@ -568,16 +582,21 @@ class Sema;
};
ImplicitConversionSequence()
- : ConversionKind(Uninitialized), StdInitializerListElement(false) {
+ : ConversionKind(Uninitialized),
+ InitializerListOfIncompleteArray(false) {
Standard.setAsIdentityConversion();
}
ImplicitConversionSequence(const ImplicitConversionSequence &Other)
: ConversionKind(Other.ConversionKind),
- StdInitializerListElement(Other.StdInitializerListElement) {
+ InitializerListOfIncompleteArray(
+ Other.InitializerListOfIncompleteArray),
+ InitializerListContainerType(Other.InitializerListContainerType) {
switch (ConversionKind) {
case Uninitialized: break;
case StandardConversion: Standard = Other.Standard; break;
+ case StaticObjectArgumentConversion:
+ break;
case UserDefinedConversion: UserDefined = Other.UserDefined; break;
case AmbiguousConversion: Ambiguous.copyFrom(Other.Ambiguous); break;
case EllipsisConversion: break;
@@ -611,6 +630,7 @@ class Sema;
unsigned getKindRank() const {
switch (getKind()) {
case StandardConversion:
+ case StaticObjectArgumentConversion:
return 0;
case UserDefinedConversion:
@@ -629,6 +649,9 @@ class Sema;
bool isBad() const { return getKind() == BadConversion; }
bool isStandard() const { return getKind() == StandardConversion; }
+ bool isStaticObjectArgument() const {
+ return getKind() == StaticObjectArgumentConversion;
+ }
bool isEllipsis() const { return getKind() == EllipsisConversion; }
bool isAmbiguous() const { return getKind() == AmbiguousConversion; }
bool isUserDefined() const { return getKind() == UserDefinedConversion; }
@@ -654,6 +677,7 @@ class Sema;
}
void setStandard() { setKind(StandardConversion); }
+ void setStaticObjectArgument() { setKind(StaticObjectArgumentConversion); }
void setEllipsis() { setKind(EllipsisConversion); }
void setUserDefined() { setKind(UserDefinedConversion); }
@@ -670,14 +694,22 @@ class Sema;
Standard.setAllToTypes(T);
}
- /// Whether the target is really a std::initializer_list, and the
- /// sequence only represents the worst element conversion.
- bool isStdInitializerListElement() const {
- return StdInitializerListElement;
+ // True iff this is a conversion sequence from an initializer list to an
+ // array or std::initializer.
+ bool hasInitializerListContainerType() const {
+ return !InitializerListContainerType.isNull();
}
-
- void setStdInitializerListElement(bool V = true) {
- StdInitializerListElement = V;
+ void setInitializerListContainerType(QualType T, bool IA) {
+ InitializerListContainerType = T;
+ InitializerListOfIncompleteArray = IA;
+ }
+ bool isInitializerListOfIncompleteArray() const {
+ return InitializerListOfIncompleteArray;
+ }
+ QualType getInitializerListContainerType() const {
+ assert(hasInitializerListContainerType() &&
+ "not initializer list container");
+ return InitializerListContainerType;
}
/// Form an "implicit" conversion sequence from nullptr_t to bool, for a
@@ -776,6 +808,10 @@ class Sema;
/// This candidate was not viable because its associated constraints were
/// not satisfied.
ovl_fail_constraints_not_satisfied,
+
+ /// This candidate was not viable because it has internal linkage and is
+ /// from a different module unit than the use.
+ ovl_fail_module_mismatched,
};
/// A list of implicit conversion sequences for the arguments of an
@@ -905,6 +941,8 @@ class Sema;
return ExplicitCallArguments;
}
+ bool NotValidBecauseConstraintExprHasError() const;
+
private:
friend class OverloadCandidateSet;
OverloadCandidate()
@@ -941,12 +979,16 @@ class Sema;
/// functions to a candidate set.
struct OperatorRewriteInfo {
OperatorRewriteInfo()
- : OriginalOperator(OO_None), AllowRewrittenCandidates(false) {}
- OperatorRewriteInfo(OverloadedOperatorKind Op, bool AllowRewritten)
- : OriginalOperator(Op), AllowRewrittenCandidates(AllowRewritten) {}
+ : OriginalOperator(OO_None), OpLoc(), AllowRewrittenCandidates(false) {}
+ OperatorRewriteInfo(OverloadedOperatorKind Op, SourceLocation OpLoc,
+ bool AllowRewritten)
+ : OriginalOperator(Op), OpLoc(OpLoc),
+ AllowRewrittenCandidates(AllowRewritten) {}
/// The original operator as written in the source.
OverloadedOperatorKind OriginalOperator;
+ /// The source location of the operator.
+ SourceLocation OpLoc;
/// Whether we should include rewritten candidates in the overload set.
bool AllowRewrittenCandidates;
@@ -982,22 +1024,23 @@ class Sema;
CRK = OverloadCandidateRewriteKind(CRK | CRK_Reversed);
return CRK;
}
-
/// Determines whether this operator could be implemented by a function
/// with reversed parameter order.
bool isReversible() {
return AllowRewrittenCandidates && OriginalOperator &&
(getRewrittenOverloadedOperator(OriginalOperator) != OO_None ||
- shouldAddReversed(OriginalOperator));
+ allowsReversed(OriginalOperator));
}
- /// Determine whether we should consider looking for and adding reversed
- /// candidates for operator Op.
- bool shouldAddReversed(OverloadedOperatorKind Op);
+ /// Determine whether reversing parameter order is allowed for operator
+ /// Op.
+ bool allowsReversed(OverloadedOperatorKind Op);
/// Determine whether we should add a rewritten candidate for \p FD with
/// reversed parameter order.
- bool shouldAddReversed(ASTContext &Ctx, const FunctionDecl *FD);
+ /// \param OriginalArgs are the original non reversed arguments.
+ bool shouldAddReversed(Sema &S, ArrayRef<Expr *> OriginalArgs,
+ FunctionDecl *FD);
};
private:
@@ -1105,8 +1148,9 @@ class Sema;
/// Add a new candidate with NumConversions conversion sequence slots
/// to the overload set.
- OverloadCandidate &addCandidate(unsigned NumConversions = 0,
- ConversionSequenceList Conversions = None) {
+ OverloadCandidate &
+ addCandidate(unsigned NumConversions = 0,
+ ConversionSequenceList Conversions = std::nullopt) {
assert((Conversions.empty() || Conversions.size() == NumConversions) &&
"preallocated conversion sequence has wrong length");
@@ -1184,6 +1228,20 @@ class Sema;
return Info;
}
+ // Returns false if signature help is relevant despite number of arguments
+ // exceeding parameters. Specifically, it returns false when
+ // PartialOverloading is true and one of the following:
+ // * Function is variadic
+ // * Function is template variadic
+ // * Function is an instantiation of template variadic function
+ // The last case may seem strange. The idea is that if we added one more
+ // argument, we'd end up with a function similar to Function. Since, in the
+ // context of signature help and/or code completion, we do not know what the
+ // type of the next argument (that the user is typing) will be, this is as
+ // good candidate as we can get, despite the fact that it takes one less
+ // parameter.
+ bool shouldEnforceArgLimit(bool PartialOverloading, FunctionDecl *Function);
+
} // namespace clang
#endif // LLVM_CLANG_SEMA_OVERLOAD_H
diff --git a/contrib/llvm-project/clang/include/clang/Sema/Ownership.h b/contrib/llvm-project/clang/include/clang/Sema/Ownership.h
index 5c7b010ed736..0752f5de7e33 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/Ownership.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/Ownership.h
@@ -132,7 +132,6 @@ namespace llvm {
namespace clang {
- // Basic
class StreamingDiagnostic;
// Determines whether the low bit of the result pointer for the
@@ -140,164 +139,147 @@ class StreamingDiagnostic;
// for it's "invalid" flag.
template <class Ptr> struct IsResultPtrLowBitFree {
static const bool value = false;
- };
-
- /// ActionResult - This structure is used while parsing/acting on
- /// expressions, stmts, etc. It encapsulates both the object returned by
- /// the action, plus a sense of whether or not it is valid.
- /// When CompressInvalid is true, the "invalid" flag will be
- /// stored in the low bit of the Val pointer.
- template<class PtrTy,
- bool CompressInvalid = IsResultPtrLowBitFree<PtrTy>::value>
- class ActionResult {
- PtrTy Val;
- bool Invalid;
-
- public:
- ActionResult(bool Invalid = false) : Val(PtrTy()), Invalid(Invalid) {}
- ActionResult(PtrTy val) : Val(val), Invalid(false) {}
- ActionResult(const DiagnosticBuilder &) : Val(PtrTy()), Invalid(true) {}
-
- // These two overloads prevent void* -> bool conversions.
- ActionResult(const void *) = delete;
- ActionResult(volatile void *) = delete;
-
- bool isInvalid() const { return Invalid; }
- bool isUsable() const { return !Invalid && Val; }
- bool isUnset() const { return !Invalid && !Val; }
-
- PtrTy get() const { return Val; }
- template <typename T> T *getAs() { return static_cast<T*>(get()); }
-
- void set(PtrTy V) { Val = V; }
-
- const ActionResult &operator=(PtrTy RHS) {
- Val = RHS;
- Invalid = false;
- return *this;
- }
- };
-
- // This ActionResult partial specialization places the "invalid"
- // flag into the low bit of the pointer.
- template<typename PtrTy>
- class ActionResult<PtrTy, true> {
- // A pointer whose low bit is 1 if this result is invalid, 0
- // otherwise.
- uintptr_t PtrWithInvalid;
-
- using PtrTraits = llvm::PointerLikeTypeTraits<PtrTy>;
-
- public:
- ActionResult(bool Invalid = false)
- : PtrWithInvalid(static_cast<uintptr_t>(Invalid)) {}
-
- ActionResult(PtrTy V) {
- void *VP = PtrTraits::getAsVoidPointer(V);
- PtrWithInvalid = reinterpret_cast<uintptr_t>(VP);
- assert((PtrWithInvalid & 0x01) == 0 && "Badly aligned pointer");
- }
-
- ActionResult(const DiagnosticBuilder &) : PtrWithInvalid(0x01) {}
-
- // These two overloads prevent void* -> bool conversions.
- ActionResult(const void *) = delete;
- ActionResult(volatile void *) = delete;
-
- bool isInvalid() const { return PtrWithInvalid & 0x01; }
- bool isUsable() const { return PtrWithInvalid > 0x01; }
- bool isUnset() const { return PtrWithInvalid == 0; }
-
- PtrTy get() const {
- void *VP = reinterpret_cast<void *>(PtrWithInvalid & ~0x01);
- return PtrTraits::getFromVoidPointer(VP);
- }
-
- template <typename T> T *getAs() { return static_cast<T*>(get()); }
-
- void set(PtrTy V) {
- void *VP = PtrTraits::getAsVoidPointer(V);
- PtrWithInvalid = reinterpret_cast<uintptr_t>(VP);
- assert((PtrWithInvalid & 0x01) == 0 && "Badly aligned pointer");
- }
-
- const ActionResult &operator=(PtrTy RHS) {
- void *VP = PtrTraits::getAsVoidPointer(RHS);
- PtrWithInvalid = reinterpret_cast<uintptr_t>(VP);
- assert((PtrWithInvalid & 0x01) == 0 && "Badly aligned pointer");
- return *this;
- }
-
- // For types where we can fit a flag in with the pointer, provide
- // conversions to/from pointer type.
- static ActionResult getFromOpaquePointer(void *P) {
- ActionResult Result;
- Result.PtrWithInvalid = (uintptr_t)P;
- return Result;
- }
- void *getAsOpaquePointer() const { return (void*)PtrWithInvalid; }
- };
+};
+
+/// The result of parsing/analyzing an expression, statement etc.
+///
+/// It may be:
+/// - usable: a valid pointer to the result object
+/// - unset (null but valid): for constructs that may legitimately be absent
+/// (for example, the condition of a for loop)
+/// - invalid: indicating an error
+/// (no detail is provided, usually the error has already been diagnosed)
+template <class PtrTy, bool Compress = IsResultPtrLowBitFree<PtrTy>::value>
+class ActionResult {
+ PtrTy Val = {};
+ bool Invalid = false;
+
+public:
+ ActionResult(bool Invalid = false) : Val(PtrTy()), Invalid(Invalid) {}
+ ActionResult(PtrTy Val) { *this = Val; }
+ ActionResult(const DiagnosticBuilder &) : ActionResult(/*Invalid=*/true) {}
+
+ // These two overloads prevent void* -> bool conversions.
+ ActionResult(const void *) = delete;
+ ActionResult(volatile void *) = delete;
+
+ bool isInvalid() const { return Invalid; }
+ bool isUnset() const { return !Invalid && !Val; }
+ bool isUsable() const { return !isInvalid() && !isUnset(); }
+
+ PtrTy get() const { return Val; }
+ template <typename T> T *getAs() { return static_cast<T *>(get()); }
+
+ ActionResult &operator=(PtrTy RHS) {
+ Val = RHS;
+ Invalid = false;
+ return *this;
+ }
+};
- /// An opaque type for threading parsed type information through the
- /// parser.
- using ParsedType = OpaquePtr<QualType>;
- using UnionParsedType = UnionOpaquePtr<QualType>;
+// If we PtrTy has a free bit, we can represent "invalid" as nullptr|1.
+template <typename PtrTy> class ActionResult<PtrTy, true> {
+ static constexpr uintptr_t UnsetValue = 0x0;
+ static constexpr uintptr_t InvalidValue = 0x1;
- // We can re-use the low bit of expression, statement, base, and
- // member-initializer pointers for the "invalid" flag of
- // ActionResult.
- template<> struct IsResultPtrLowBitFree<Expr*> {
- static const bool value = true;
- };
- template<> struct IsResultPtrLowBitFree<Stmt*> {
- static const bool value = true;
- };
- template<> struct IsResultPtrLowBitFree<CXXBaseSpecifier*> {
- static const bool value = true;
- };
- template<> struct IsResultPtrLowBitFree<CXXCtorInitializer*> {
- static const bool value = true;
- };
+ uintptr_t Value = UnsetValue;
- using ExprResult = ActionResult<Expr *>;
- using StmtResult = ActionResult<Stmt *>;
- using TypeResult = ActionResult<ParsedType>;
- using BaseResult = ActionResult<CXXBaseSpecifier *>;
- using MemInitResult = ActionResult<CXXCtorInitializer *>;
+ using PtrTraits = llvm::PointerLikeTypeTraits<PtrTy>;
- using DeclResult = ActionResult<Decl *>;
- using ParsedTemplateTy = OpaquePtr<TemplateName>;
- using UnionParsedTemplateTy = UnionOpaquePtr<TemplateName>;
+public:
+ ActionResult(bool Invalid = false)
+ : Value(Invalid ? InvalidValue : UnsetValue) {}
+ ActionResult(PtrTy V) { *this = V; }
+ ActionResult(const DiagnosticBuilder &) : ActionResult(/*Invalid=*/true) {}
- using MultiExprArg = MutableArrayRef<Expr *>;
- using MultiStmtArg = MutableArrayRef<Stmt *>;
- using ASTTemplateArgsPtr = MutableArrayRef<ParsedTemplateArgument>;
- using MultiTypeArg = MutableArrayRef<ParsedType>;
- using MultiTemplateParamsArg = MutableArrayRef<TemplateParameterList *>;
+ // These two overloads prevent void* -> bool conversions.
+ ActionResult(const void *) = delete;
+ ActionResult(volatile void *) = delete;
- inline ExprResult ExprError() { return ExprResult(true); }
- inline StmtResult StmtError() { return StmtResult(true); }
- inline TypeResult TypeError() { return TypeResult(true); }
+ bool isInvalid() const { return Value == InvalidValue; }
+ bool isUnset() const { return Value == UnsetValue; }
+ bool isUsable() const { return !isInvalid() && !isUnset(); }
- inline ExprResult ExprError(const StreamingDiagnostic &) {
- return ExprError();
+ PtrTy get() const {
+ void *VP = reinterpret_cast<void *>(Value & ~0x01);
+ return PtrTraits::getFromVoidPointer(VP);
}
- inline StmtResult StmtError(const StreamingDiagnostic &) {
- return StmtError();
- }
-
- inline ExprResult ExprEmpty() { return ExprResult(false); }
- inline StmtResult StmtEmpty() { return StmtResult(false); }
+ template <typename T> T *getAs() { return static_cast<T *>(get()); }
- inline Expr *AssertSuccess(ExprResult R) {
- assert(!R.isInvalid() && "operation was asserted to never fail!");
- return R.get();
+ ActionResult &operator=(PtrTy RHS) {
+ void *VP = PtrTraits::getAsVoidPointer(RHS);
+ Value = reinterpret_cast<uintptr_t>(VP);
+ assert((Value & 0x01) == 0 && "Badly aligned pointer");
+ return *this;
}
- inline Stmt *AssertSuccess(StmtResult R) {
- assert(!R.isInvalid() && "operation was asserted to never fail!");
- return R.get();
+ // For types where we can fit a flag in with the pointer, provide
+ // conversions to/from pointer type.
+ static ActionResult getFromOpaquePointer(void *P) {
+ ActionResult Result;
+ Result.Value = (uintptr_t)P;
+ assert(Result.isInvalid() ||
+ PtrTraits::getAsVoidPointer(Result.get()) == P);
+ return Result;
}
+ void *getAsOpaquePointer() const { return (void *)Value; }
+};
+
+/// An opaque type for threading parsed type information through the parser.
+using ParsedType = OpaquePtr<QualType>;
+using UnionParsedType = UnionOpaquePtr<QualType>;
+
+// We can re-use the low bit of expression, statement, base, and
+// member-initializer pointers for the "invalid" flag of
+// ActionResult.
+template <> struct IsResultPtrLowBitFree<Expr *> {
+ static const bool value = true;
+};
+template <> struct IsResultPtrLowBitFree<Stmt *> {
+ static const bool value = true;
+};
+template <> struct IsResultPtrLowBitFree<CXXBaseSpecifier *> {
+ static const bool value = true;
+};
+template <> struct IsResultPtrLowBitFree<CXXCtorInitializer *> {
+ static const bool value = true;
+};
+
+using ExprResult = ActionResult<Expr *>;
+using StmtResult = ActionResult<Stmt *>;
+using TypeResult = ActionResult<ParsedType>;
+using BaseResult = ActionResult<CXXBaseSpecifier *>;
+using MemInitResult = ActionResult<CXXCtorInitializer *>;
+
+using DeclResult = ActionResult<Decl *>;
+using ParsedTemplateTy = OpaquePtr<TemplateName>;
+using UnionParsedTemplateTy = UnionOpaquePtr<TemplateName>;
+
+using MultiExprArg = MutableArrayRef<Expr *>;
+using MultiStmtArg = MutableArrayRef<Stmt *>;
+using ASTTemplateArgsPtr = MutableArrayRef<ParsedTemplateArgument>;
+using MultiTypeArg = MutableArrayRef<ParsedType>;
+using MultiTemplateParamsArg = MutableArrayRef<TemplateParameterList *>;
+
+inline ExprResult ExprError() { return ExprResult(true); }
+inline StmtResult StmtError() { return StmtResult(true); }
+inline TypeResult TypeError() { return TypeResult(true); }
+
+inline ExprResult ExprError(const StreamingDiagnostic &) { return ExprError(); }
+inline StmtResult StmtError(const StreamingDiagnostic &) { return StmtError(); }
+
+inline ExprResult ExprEmpty() { return ExprResult(false); }
+inline StmtResult StmtEmpty() { return StmtResult(false); }
+
+inline Expr *AssertSuccess(ExprResult R) {
+ assert(!R.isInvalid() && "operation was asserted to never fail!");
+ return R.get();
+}
+
+inline Stmt *AssertSuccess(StmtResult R) {
+ assert(!R.isInvalid() && "operation was asserted to never fail!");
+ return R.get();
+}
} // namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/Sema/ParsedAttr.h b/contrib/llvm-project/clang/include/clang/Sema/ParsedAttr.h
index f47f557adeb1..8c0edca1ebc5 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/ParsedAttr.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/ParsedAttr.h
@@ -11,20 +11,20 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_SEMA_ATTRIBUTELIST_H
-#define LLVM_CLANG_SEMA_ATTRIBUTELIST_H
+#ifndef LLVM_CLANG_SEMA_PARSEDATTR_H
+#define LLVM_CLANG_SEMA_PARSEDATTR_H
#include "clang/Basic/AttrSubjectMatchRules.h"
#include "clang/Basic/AttributeCommonInfo.h"
#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/ParsedAttrInfo.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Sema/Ownership.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/Support/Allocator.h"
-#include "llvm/Support/Registry.h"
#include "llvm/Support/VersionTuple.h"
+#include <bitset>
#include <cassert>
#include <cstddef>
#include <cstring>
@@ -37,98 +37,10 @@ class Decl;
class Expr;
class IdentifierInfo;
class LangOptions;
-class ParsedAttr;
class Sema;
class Stmt;
class TargetInfo;
-struct ParsedAttrInfo {
- /// Corresponds to the Kind enum.
- unsigned AttrKind : 16;
- /// The number of required arguments of this attribute.
- unsigned NumArgs : 4;
- /// The number of optional arguments of this attributes.
- unsigned OptArgs : 4;
- /// True if the parsing does not match the semantic content.
- unsigned HasCustomParsing : 1;
- /// True if this attribute is only available for certain targets.
- unsigned IsTargetSpecific : 1;
- /// True if this attribute applies to types.
- unsigned IsType : 1;
- /// True if this attribute applies to statements.
- unsigned IsStmt : 1;
- /// True if this attribute has any spellings that are known to gcc.
- unsigned IsKnownToGCC : 1;
- /// True if this attribute is supported by #pragma clang attribute.
- unsigned IsSupportedByPragmaAttribute : 1;
- /// The syntaxes supported by this attribute and how they're spelled.
- struct Spelling {
- AttributeCommonInfo::Syntax Syntax;
- const char *NormalizedFullName;
- };
- ArrayRef<Spelling> Spellings;
-
- ParsedAttrInfo(AttributeCommonInfo::Kind AttrKind =
- AttributeCommonInfo::NoSemaHandlerAttribute)
- : AttrKind(AttrKind), NumArgs(0), OptArgs(0), HasCustomParsing(0),
- IsTargetSpecific(0), IsType(0), IsStmt(0), IsKnownToGCC(0),
- IsSupportedByPragmaAttribute(0) {}
-
- virtual ~ParsedAttrInfo() = default;
-
- /// Check if this attribute appertains to D, and issue a diagnostic if not.
- virtual bool diagAppertainsToDecl(Sema &S, const ParsedAttr &Attr,
- const Decl *D) const {
- return true;
- }
- /// Check if this attribute appertains to St, and issue a diagnostic if not.
- virtual bool diagAppertainsToStmt(Sema &S, const ParsedAttr &Attr,
- const Stmt *St) const {
- return true;
- }
- /// Check if the given attribute is mutually exclusive with other attributes
- /// already applied to the given declaration.
- virtual bool diagMutualExclusion(Sema &S, const ParsedAttr &A,
- const Decl *D) const {
- return true;
- }
- /// Check if this attribute is allowed by the language we are compiling, and
- /// issue a diagnostic if not.
- virtual bool diagLangOpts(Sema &S, const ParsedAttr &Attr) const {
- return true;
- }
- /// Check if this attribute is allowed when compiling for the given target.
- virtual bool existsInTarget(const TargetInfo &Target) const {
- return true;
- }
- /// Convert the spelling index of Attr to a semantic spelling enum value.
- virtual unsigned
- spellingIndexToSemanticSpelling(const ParsedAttr &Attr) const {
- return UINT_MAX;
- }
- /// Populate Rules with the match rules of this attribute.
- virtual void getPragmaAttributeMatchRules(
- llvm::SmallVectorImpl<std::pair<attr::SubjectMatchRule, bool>> &Rules,
- const LangOptions &LangOpts) const {
- }
- enum AttrHandling {
- NotHandled,
- AttributeApplied,
- AttributeNotApplied
- };
- /// If this ParsedAttrInfo knows how to handle this ParsedAttr applied to this
- /// Decl then do so and return either AttributeApplied if it was applied or
- /// AttributeNotApplied if it wasn't. Otherwise return NotHandled.
- virtual AttrHandling handleDeclAttribute(Sema &S, Decl *D,
- const ParsedAttr &Attr) const {
- return NotHandled;
- }
-
- static const ParsedAttrInfo &get(const AttributeCommonInfo &A);
-};
-
-typedef llvm::Registry<ParsedAttrInfo> ParsedAttrInfoRegistry;
-
/// Represents information about a change in availability for
/// an entity, which is part of the encoding of the 'availability'
/// attribute.
@@ -293,10 +205,9 @@ private:
/// Constructor for attributes with expression arguments.
ParsedAttr(IdentifierInfo *attrName, SourceRange attrRange,
IdentifierInfo *scopeName, SourceLocation scopeLoc,
- ArgsUnion *args, unsigned numArgs, Syntax syntaxUsed,
+ ArgsUnion *args, unsigned numArgs, Form formUsed,
SourceLocation ellipsisLoc)
- : AttributeCommonInfo(attrName, scopeName, attrRange, scopeLoc,
- syntaxUsed),
+ : AttributeCommonInfo(attrName, scopeName, attrRange, scopeLoc, formUsed),
EllipsisLoc(ellipsisLoc), NumArgs(numArgs), Invalid(false),
UsedAsTypeAttr(false), IsAvailability(false),
IsTypeTagForDatatype(false), IsProperty(false), HasParsedType(false),
@@ -312,10 +223,9 @@ private:
IdentifierLoc *Parm, const AvailabilityChange &introduced,
const AvailabilityChange &deprecated,
const AvailabilityChange &obsoleted, SourceLocation unavailable,
- const Expr *messageExpr, Syntax syntaxUsed, SourceLocation strict,
+ const Expr *messageExpr, Form formUsed, SourceLocation strict,
const Expr *replacementExpr)
- : AttributeCommonInfo(attrName, scopeName, attrRange, scopeLoc,
- syntaxUsed),
+ : AttributeCommonInfo(attrName, scopeName, attrRange, scopeLoc, formUsed),
NumArgs(1), Invalid(false), UsedAsTypeAttr(false), IsAvailability(true),
IsTypeTagForDatatype(false), IsProperty(false), HasParsedType(false),
HasProcessingCache(false), IsPragmaClangAttribute(false),
@@ -331,9 +241,8 @@ private:
ParsedAttr(IdentifierInfo *attrName, SourceRange attrRange,
IdentifierInfo *scopeName, SourceLocation scopeLoc,
IdentifierLoc *Parm1, IdentifierLoc *Parm2, IdentifierLoc *Parm3,
- Syntax syntaxUsed)
- : AttributeCommonInfo(attrName, scopeName, attrRange, scopeLoc,
- syntaxUsed),
+ Form formUsed)
+ : AttributeCommonInfo(attrName, scopeName, attrRange, scopeLoc, formUsed),
NumArgs(3), Invalid(false), UsedAsTypeAttr(false),
IsAvailability(false), IsTypeTagForDatatype(false), IsProperty(false),
HasParsedType(false), HasProcessingCache(false),
@@ -348,9 +257,8 @@ private:
ParsedAttr(IdentifierInfo *attrName, SourceRange attrRange,
IdentifierInfo *scopeName, SourceLocation scopeLoc,
IdentifierLoc *ArgKind, ParsedType matchingCType,
- bool layoutCompatible, bool mustBeNull, Syntax syntaxUsed)
- : AttributeCommonInfo(attrName, scopeName, attrRange, scopeLoc,
- syntaxUsed),
+ bool layoutCompatible, bool mustBeNull, Form formUsed)
+ : AttributeCommonInfo(attrName, scopeName, attrRange, scopeLoc, formUsed),
NumArgs(1), Invalid(false), UsedAsTypeAttr(false),
IsAvailability(false), IsTypeTagForDatatype(true), IsProperty(false),
HasParsedType(false), HasProcessingCache(false),
@@ -366,23 +274,21 @@ private:
/// Constructor for attributes with a single type argument.
ParsedAttr(IdentifierInfo *attrName, SourceRange attrRange,
IdentifierInfo *scopeName, SourceLocation scopeLoc,
- ParsedType typeArg, Syntax syntaxUsed)
- : AttributeCommonInfo(attrName, scopeName, attrRange, scopeLoc,
- syntaxUsed),
- NumArgs(0), Invalid(false), UsedAsTypeAttr(false),
- IsAvailability(false), IsTypeTagForDatatype(false), IsProperty(false),
- HasParsedType(true), HasProcessingCache(false),
- IsPragmaClangAttribute(false), Info(ParsedAttrInfo::get(*this)) {
+ ParsedType typeArg, Form formUsed, SourceLocation ellipsisLoc)
+ : AttributeCommonInfo(attrName, scopeName, attrRange, scopeLoc, formUsed),
+ EllipsisLoc(ellipsisLoc), NumArgs(0), Invalid(false),
+ UsedAsTypeAttr(false), IsAvailability(false),
+ IsTypeTagForDatatype(false), IsProperty(false), HasParsedType(true),
+ HasProcessingCache(false), IsPragmaClangAttribute(false),
+ Info(ParsedAttrInfo::get(*this)) {
new (&getTypeBuffer()) ParsedType(typeArg);
}
/// Constructor for microsoft __declspec(property) attribute.
ParsedAttr(IdentifierInfo *attrName, SourceRange attrRange,
IdentifierInfo *scopeName, SourceLocation scopeLoc,
- IdentifierInfo *getterId, IdentifierInfo *setterId,
- Syntax syntaxUsed)
- : AttributeCommonInfo(attrName, scopeName, attrRange, scopeLoc,
- syntaxUsed),
+ IdentifierInfo *getterId, IdentifierInfo *setterId, Form formUsed)
+ : AttributeCommonInfo(attrName, scopeName, attrRange, scopeLoc, formUsed),
NumArgs(0), Invalid(false), UsedAsTypeAttr(false),
IsAvailability(false), IsTypeTagForDatatype(false), IsProperty(true),
HasParsedType(false), HasProcessingCache(false),
@@ -407,7 +313,7 @@ private:
return *getTrailingObjects<ParsedType>();
}
- /// The property data immediately follows the object is is mutually exclusive
+ /// The property data immediately follows the object is mutually exclusive
/// with arguments.
detail::PropertyData &getPropertyDataBuffer() {
assert(IsProperty);
@@ -600,9 +506,13 @@ public:
bool isStmtAttr() const;
bool hasCustomParsing() const;
+ bool acceptsExprPack() const;
+ bool isParamExpr(size_t N) const;
unsigned getMinArgs() const;
unsigned getMaxArgs() const;
+ unsigned getNumArgMembers() const;
bool hasVariadicArg() const;
+ void handleAttrWithDelayedArgs(Sema &S, Decl *D) const;
bool diagnoseAppertainsTo(class Sema &S, const Decl *D) const;
bool diagnoseAppertainsTo(class Sema &S, const Stmt *St) const;
bool diagnoseMutualExclusion(class Sema &S, const Decl *D) const;
@@ -621,6 +531,18 @@ public:
bool isKnownToGCC() const;
bool isSupportedByPragmaAttribute() const;
+ /// Returns whether a [[]] attribute, if specified ahead of a declaration,
+ /// should be applied to the decl-specifier-seq instead (i.e. whether it
+ /// "slides" to the decl-specifier-seq).
+ ///
+ /// By the standard, attributes specified before the declaration always
+ /// appertain to the declaration, but historically we have allowed some of
+ /// these attributes to slide to the decl-specifier-seq, so we need to keep
+ /// supporting this behavior.
+ ///
+ /// This may only be called if isStandardAttributeSyntax() returns true.
+ bool slidesFromDeclToDeclSpecLegacyBehavior() const;
+
/// If the parsed attribute has a semantic equivalent, and it would
/// have a semantic Spelling enumeration (due to having semantically-distinct
/// spelling variations), return the value of that semantic spelling. If the
@@ -628,7 +550,7 @@ public:
/// a Spelling enumeration, the value UINT_MAX is returned.
unsigned getSemanticSpelling() const;
- /// If this is an OpenCL address space attribute returns its representation
+ /// If this is an OpenCL address space attribute, returns its representation
/// in LangAS, otherwise returns default address space.
LangAS asOpenCLLangAS() const {
switch (getParsedKind()) {
@@ -651,7 +573,7 @@ public:
}
}
- /// If this is an OpenCL address space attribute returns its SYCL
+ /// If this is an OpenCL address space attribute, returns its SYCL
/// representation in LangAS, otherwise returns default address space.
LangAS asSYCLLangAS() const {
switch (getKind()) {
@@ -671,6 +593,17 @@ public:
}
}
+ /// If this is an HLSL address space attribute, returns its representation
+ /// in LangAS, otherwise returns default address space.
+ LangAS asHLSLLangAS() const {
+ switch (getParsedKind()) {
+ case ParsedAttr::AT_HLSLGroupSharedAddressSpace:
+ return LangAS::hlsl_groupshared;
+ default:
+ return LangAS::Default;
+ }
+ }
+
AttributeCommonInfo::Kind getKind() const {
return AttributeCommonInfo::Kind(Info.AttrKind);
}
@@ -741,7 +674,7 @@ class AttributePool {
friend class AttributeFactory;
friend class ParsedAttributes;
AttributeFactory &Factory;
- llvm::TinyPtrVector<ParsedAttr *> Attrs;
+ llvm::SmallVector<ParsedAttr *> Attrs;
void *allocate(size_t size) {
return Factory.allocate(size);
@@ -765,12 +698,19 @@ public:
AttributePool(AttributeFactory &factory) : Factory(factory) {}
AttributePool(const AttributePool &) = delete;
+ // The copy assignment operator is defined as deleted pending further
+ // motivation.
+ AttributePool &operator=(const AttributePool &) = delete;
~AttributePool() { Factory.reclaimPool(*this); }
/// Move the given pool's allocations to this pool.
AttributePool(AttributePool &&pool) = default;
+ // The move assignment operator is defined as deleted pending further
+ // motivation.
+ AttributePool &operator=(AttributePool &&pool) = delete;
+
AttributeFactory &getFactory() const { return Factory; }
void clear() {
@@ -786,8 +726,7 @@ public:
ParsedAttr *create(IdentifierInfo *attrName, SourceRange attrRange,
IdentifierInfo *scopeName, SourceLocation scopeLoc,
- ArgsUnion *args, unsigned numArgs,
- ParsedAttr::Syntax syntax,
+ ArgsUnion *args, unsigned numArgs, ParsedAttr::Form form,
SourceLocation ellipsisLoc = SourceLocation()) {
size_t temp =
ParsedAttr::totalSizeToAlloc<ArgsUnion, detail::AvailabilityData,
@@ -800,7 +739,7 @@ public:
detail::PropertyData>(numArgs, 0, 0, 0,
0));
return add(new (memory) ParsedAttr(attrName, attrRange, scopeName, scopeLoc,
- args, numArgs, syntax, ellipsisLoc));
+ args, numArgs, form, ellipsisLoc));
}
ParsedAttr *create(IdentifierInfo *attrName, SourceRange attrRange,
@@ -809,24 +748,24 @@ public:
const AvailabilityChange &deprecated,
const AvailabilityChange &obsoleted,
SourceLocation unavailable, const Expr *MessageExpr,
- ParsedAttr::Syntax syntax, SourceLocation strict,
+ ParsedAttr::Form form, SourceLocation strict,
const Expr *ReplacementExpr) {
void *memory = allocate(AttributeFactory::AvailabilityAllocSize);
return add(new (memory) ParsedAttr(
attrName, attrRange, scopeName, scopeLoc, Param, introduced, deprecated,
- obsoleted, unavailable, MessageExpr, syntax, strict, ReplacementExpr));
+ obsoleted, unavailable, MessageExpr, form, strict, ReplacementExpr));
}
ParsedAttr *create(IdentifierInfo *attrName, SourceRange attrRange,
IdentifierInfo *scopeName, SourceLocation scopeLoc,
IdentifierLoc *Param1, IdentifierLoc *Param2,
- IdentifierLoc *Param3, ParsedAttr::Syntax syntax) {
+ IdentifierLoc *Param3, ParsedAttr::Form form) {
void *memory = allocate(
ParsedAttr::totalSizeToAlloc<ArgsUnion, detail::AvailabilityData,
detail::TypeTagForDatatypeData, ParsedType,
detail::PropertyData>(3, 0, 0, 0, 0));
return add(new (memory) ParsedAttr(attrName, attrRange, scopeName, scopeLoc,
- Param1, Param2, Param3, syntax));
+ Param1, Param2, Param3, form));
}
ParsedAttr *
@@ -834,42 +773,50 @@ public:
IdentifierInfo *scopeName, SourceLocation scopeLoc,
IdentifierLoc *argumentKind,
ParsedType matchingCType, bool layoutCompatible,
- bool mustBeNull, ParsedAttr::Syntax syntax) {
+ bool mustBeNull, ParsedAttr::Form form) {
void *memory = allocate(AttributeFactory::TypeTagForDatatypeAllocSize);
return add(new (memory) ParsedAttr(attrName, attrRange, scopeName, scopeLoc,
argumentKind, matchingCType,
- layoutCompatible, mustBeNull, syntax));
+ layoutCompatible, mustBeNull, form));
}
ParsedAttr *createTypeAttribute(IdentifierInfo *attrName,
SourceRange attrRange,
IdentifierInfo *scopeName,
SourceLocation scopeLoc, ParsedType typeArg,
- ParsedAttr::Syntax syntaxUsed) {
+ ParsedAttr::Form formUsed,
+ SourceLocation ellipsisLoc) {
void *memory = allocate(
ParsedAttr::totalSizeToAlloc<ArgsUnion, detail::AvailabilityData,
detail::TypeTagForDatatypeData, ParsedType,
detail::PropertyData>(0, 0, 0, 1, 0));
return add(new (memory) ParsedAttr(attrName, attrRange, scopeName, scopeLoc,
- typeArg, syntaxUsed));
+ typeArg, formUsed, ellipsisLoc));
}
ParsedAttr *
createPropertyAttribute(IdentifierInfo *attrName, SourceRange attrRange,
IdentifierInfo *scopeName, SourceLocation scopeLoc,
IdentifierInfo *getterId, IdentifierInfo *setterId,
- ParsedAttr::Syntax syntaxUsed) {
+ ParsedAttr::Form formUsed) {
void *memory = allocate(AttributeFactory::PropertyAllocSize);
return add(new (memory) ParsedAttr(attrName, attrRange, scopeName, scopeLoc,
- getterId, setterId, syntaxUsed));
+ getterId, setterId, formUsed));
}
};
class ParsedAttributesView {
- using VecTy = llvm::TinyPtrVector<ParsedAttr *>;
+ using VecTy = llvm::SmallVector<ParsedAttr *>;
using SizeType = decltype(std::declval<VecTy>().size());
public:
+ SourceRange Range;
+
+ static const ParsedAttributesView &none() {
+ static const ParsedAttributesView Attrs;
+ return Attrs;
+ }
+
bool empty() const { return AttrList.empty(); }
SizeType size() const { return AttrList.size(); }
ParsedAttr &operator[](SizeType pos) { return *AttrList[pos]; }
@@ -893,7 +840,7 @@ public:
ParsedAttr> {
iterator() : iterator_adaptor_base(nullptr) {}
iterator(VecTy::iterator I) : iterator_adaptor_base(I) {}
- reference operator*() { return **I; }
+ reference operator*() const { return **I; }
friend class ParsedAttributesView;
};
struct const_iterator
@@ -951,10 +898,34 @@ public:
});
}
+ const ParsedAttr *getMSPropertyAttr() const {
+ auto It = llvm::find_if(AttrList, [](const ParsedAttr *AL) {
+ return AL->isDeclspecPropertyAttribute();
+ });
+ if (It != AttrList.end())
+ return *It;
+ return nullptr;
+ }
+ bool hasMSPropertyAttr() const { return getMSPropertyAttr(); }
+
private:
VecTy AttrList;
};
+struct ParsedAttributeArgumentsProperties {
+ ParsedAttributeArgumentsProperties(uint32_t StringLiteralBits)
+ : StringLiterals(StringLiteralBits) {}
+ bool isStringLiteralArg(unsigned I) const {
+ // If the last bit is set, assume we have a variadic parameter
+ if (I >= StringLiterals.size())
+ return StringLiterals.test(StringLiterals.size() - 1);
+ return StringLiterals.test(I);
+ }
+
+private:
+ std::bitset<32> StringLiterals;
+};
+
/// ParsedAttributes - A collection of parsed attributes. Currently
/// we don't differentiate between the various attribute syntaxes,
/// which is basically silly.
@@ -965,18 +936,23 @@ class ParsedAttributes : public ParsedAttributesView {
public:
ParsedAttributes(AttributeFactory &factory) : pool(factory) {}
ParsedAttributes(const ParsedAttributes &) = delete;
+ ParsedAttributes &operator=(const ParsedAttributes &) = delete;
AttributePool &getPool() const { return pool; }
- void takeAllFrom(ParsedAttributes &attrs) {
- addAll(attrs.begin(), attrs.end());
- attrs.clearListOnly();
- pool.takeAllFrom(attrs.pool);
+ void takeAllFrom(ParsedAttributes &Other) {
+ assert(&Other != this &&
+ "ParsedAttributes can't take attributes from itself");
+ addAll(Other.begin(), Other.end());
+ Other.clearListOnly();
+ pool.takeAllFrom(Other.pool);
}
- void takeOneFrom(ParsedAttributes &Attrs, ParsedAttr *PA) {
- Attrs.getPool().remove(PA);
- Attrs.remove(PA);
+ void takeOneFrom(ParsedAttributes &Other, ParsedAttr *PA) {
+ assert(&Other != this &&
+ "ParsedAttributes can't take attribute from itself");
+ Other.getPool().remove(PA);
+ Other.remove(PA);
getPool().add(PA);
addAtEnd(PA);
}
@@ -984,16 +960,16 @@ public:
void clear() {
clearListOnly();
pool.clear();
+ Range = SourceRange();
}
/// Add attribute with expression arguments.
ParsedAttr *addNew(IdentifierInfo *attrName, SourceRange attrRange,
IdentifierInfo *scopeName, SourceLocation scopeLoc,
- ArgsUnion *args, unsigned numArgs,
- ParsedAttr::Syntax syntax,
+ ArgsUnion *args, unsigned numArgs, ParsedAttr::Form form,
SourceLocation ellipsisLoc = SourceLocation()) {
ParsedAttr *attr = pool.create(attrName, attrRange, scopeName, scopeLoc,
- args, numArgs, syntax, ellipsisLoc);
+ args, numArgs, form, ellipsisLoc);
addAtEnd(attr);
return attr;
}
@@ -1005,11 +981,11 @@ public:
const AvailabilityChange &deprecated,
const AvailabilityChange &obsoleted,
SourceLocation unavailable, const Expr *MessageExpr,
- ParsedAttr::Syntax syntax, SourceLocation strict,
+ ParsedAttr::Form form, SourceLocation strict,
const Expr *ReplacementExpr) {
ParsedAttr *attr = pool.create(
attrName, attrRange, scopeName, scopeLoc, Param, introduced, deprecated,
- obsoleted, unavailable, MessageExpr, syntax, strict, ReplacementExpr);
+ obsoleted, unavailable, MessageExpr, form, strict, ReplacementExpr);
addAtEnd(attr);
return attr;
}
@@ -1018,9 +994,9 @@ public:
ParsedAttr *addNew(IdentifierInfo *attrName, SourceRange attrRange,
IdentifierInfo *scopeName, SourceLocation scopeLoc,
IdentifierLoc *Param1, IdentifierLoc *Param2,
- IdentifierLoc *Param3, ParsedAttr::Syntax syntax) {
+ IdentifierLoc *Param3, ParsedAttr::Form form) {
ParsedAttr *attr = pool.create(attrName, attrRange, scopeName, scopeLoc,
- Param1, Param2, Param3, syntax);
+ Param1, Param2, Param3, form);
addAtEnd(attr);
return attr;
}
@@ -1031,10 +1007,10 @@ public:
IdentifierInfo *scopeName, SourceLocation scopeLoc,
IdentifierLoc *argumentKind,
ParsedType matchingCType, bool layoutCompatible,
- bool mustBeNull, ParsedAttr::Syntax syntax) {
+ bool mustBeNull, ParsedAttr::Form form) {
ParsedAttr *attr = pool.createTypeTagForDatatype(
attrName, attrRange, scopeName, scopeLoc, argumentKind, matchingCType,
- layoutCompatible, mustBeNull, syntax);
+ layoutCompatible, mustBeNull, form);
addAtEnd(attr);
return attr;
}
@@ -1042,10 +1018,11 @@ public:
/// Add an attribute with a single type argument.
ParsedAttr *addNewTypeAttr(IdentifierInfo *attrName, SourceRange attrRange,
IdentifierInfo *scopeName, SourceLocation scopeLoc,
- ParsedType typeArg,
- ParsedAttr::Syntax syntaxUsed) {
- ParsedAttr *attr = pool.createTypeAttribute(attrName, attrRange, scopeName,
- scopeLoc, typeArg, syntaxUsed);
+ ParsedType typeArg, ParsedAttr::Form formUsed,
+ SourceLocation ellipsisLoc = SourceLocation()) {
+ ParsedAttr *attr =
+ pool.createTypeAttribute(attrName, attrRange, scopeName, scopeLoc,
+ typeArg, formUsed, ellipsisLoc);
addAtEnd(attr);
return attr;
}
@@ -1055,10 +1032,9 @@ public:
addNewPropertyAttr(IdentifierInfo *attrName, SourceRange attrRange,
IdentifierInfo *scopeName, SourceLocation scopeLoc,
IdentifierInfo *getterId, IdentifierInfo *setterId,
- ParsedAttr::Syntax syntaxUsed) {
- ParsedAttr *attr =
- pool.createPropertyAttribute(attrName, attrRange, scopeName, scopeLoc,
- getterId, setterId, syntaxUsed);
+ ParsedAttr::Form formUsed) {
+ ParsedAttr *attr = pool.createPropertyAttribute(
+ attrName, attrRange, scopeName, scopeLoc, getterId, setterId, formUsed);
addAtEnd(attr);
return attr;
}
@@ -1067,26 +1043,10 @@ private:
mutable AttributePool pool;
};
-struct ParsedAttributesWithRange : ParsedAttributes {
- ParsedAttributesWithRange(AttributeFactory &factory)
- : ParsedAttributes(factory) {}
-
- void clear() {
- ParsedAttributes::clear();
- Range = SourceRange();
- }
-
- SourceRange Range;
-};
-struct ParsedAttributesViewWithRange : ParsedAttributesView {
- ParsedAttributesViewWithRange() : ParsedAttributesView() {}
- void clearListOnly() {
- ParsedAttributesView::clearListOnly();
- Range = SourceRange();
- }
-
- SourceRange Range;
-};
+/// Consumes the attributes from `First` and `Second` and concatenates them into
+/// `Result`. Sets `Result.Range` to the combined range of `First` and `Second`.
+void takeAndConcatenateAttrs(ParsedAttributes &First, ParsedAttributes &Second,
+ ParsedAttributes &Result);
/// These constants match the enumerated choices of
/// err_attribute_argument_n_type and err_attribute_argument_type.
@@ -1096,6 +1056,7 @@ enum AttributeArgumentNType {
AANT_ArgumentString,
AANT_ArgumentIdentifier,
AANT_ArgumentConstantExpr,
+ AANT_ArgumentBuiltinFunction,
};
/// These constants match the enumerated choices of
@@ -1118,14 +1079,14 @@ enum AttributeDeclKind {
inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &DB,
const ParsedAttr &At) {
- DB.AddTaggedVal(reinterpret_cast<intptr_t>(At.getAttrName()),
+ DB.AddTaggedVal(reinterpret_cast<uint64_t>(At.getAttrName()),
DiagnosticsEngine::ak_identifierinfo);
return DB;
}
inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &DB,
const ParsedAttr *At) {
- DB.AddTaggedVal(reinterpret_cast<intptr_t>(At->getAttrName()),
+ DB.AddTaggedVal(reinterpret_cast<uint64_t>(At->getAttrName()),
DiagnosticsEngine::ak_identifierinfo);
return DB;
}
@@ -1135,26 +1096,26 @@ inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &DB,
/// it explicit is hard. This constructor causes ambiguity with
/// DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB, SourceRange R).
/// We use SFINAE to disable any conversion and remove any ambiguity.
-template <typename ACI,
- typename std::enable_if_t<
- std::is_same<ACI, AttributeCommonInfo>::value, int> = 0>
+template <
+ typename ACI,
+ std::enable_if_t<std::is_same<ACI, AttributeCommonInfo>::value, int> = 0>
inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &DB,
- const ACI &CI) {
- DB.AddTaggedVal(reinterpret_cast<intptr_t>(CI.getAttrName()),
+ const ACI &CI) {
+ DB.AddTaggedVal(reinterpret_cast<uint64_t>(CI.getAttrName()),
DiagnosticsEngine::ak_identifierinfo);
return DB;
}
-template <typename ACI,
- typename std::enable_if_t<
- std::is_same<ACI, AttributeCommonInfo>::value, int> = 0>
+template <
+ typename ACI,
+ std::enable_if_t<std::is_same<ACI, AttributeCommonInfo>::value, int> = 0>
inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &DB,
- const ACI* CI) {
- DB.AddTaggedVal(reinterpret_cast<intptr_t>(CI->getAttrName()),
+ const ACI *CI) {
+ DB.AddTaggedVal(reinterpret_cast<uint64_t>(CI->getAttrName()),
DiagnosticsEngine::ak_identifierinfo);
return DB;
}
} // namespace clang
-#endif // LLVM_CLANG_SEMA_ATTRIBUTELIST_H
+#endif // LLVM_CLANG_SEMA_PARSEDATTR_H
diff --git a/contrib/llvm-project/clang/include/clang/Sema/ParsedTemplate.h b/contrib/llvm-project/clang/include/clang/Sema/ParsedTemplate.h
index f0245b93c7eb..65182d57246a 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/ParsedTemplate.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/ParsedTemplate.h
@@ -63,8 +63,7 @@ namespace clang {
ParsedTemplateTy Template,
SourceLocation TemplateLoc)
: Kind(ParsedTemplateArgument::Template),
- Arg(Template.getAsOpaquePtr()),
- SS(SS), Loc(TemplateLoc), EllipsisLoc() { }
+ Arg(Template.getAsOpaquePtr()), SS(SS), Loc(TemplateLoc) {}
/// Determine whether the given template argument is invalid.
bool isInvalid() const { return Arg == nullptr; }
@@ -213,9 +212,9 @@ namespace clang {
}
void Destroy() {
- std::for_each(
- getTemplateArgs(), getTemplateArgs() + NumArgs,
- [](ParsedTemplateArgument &A) { A.~ParsedTemplateArgument(); });
+ for (ParsedTemplateArgument &A :
+ llvm::make_range(getTemplateArgs(), getTemplateArgs() + NumArgs))
+ A.~ParsedTemplateArgument();
this->~TemplateIdAnnotation();
free(this);
}
diff --git a/contrib/llvm-project/clang/include/clang/Sema/RISCVIntrinsicManager.h b/contrib/llvm-project/clang/include/clang/Sema/RISCVIntrinsicManager.h
new file mode 100644
index 000000000000..2a3dd1e7c469
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Sema/RISCVIntrinsicManager.h
@@ -0,0 +1,41 @@
+//===- RISCVIntrinsicManager.h - RISC-V Intrinsic Handler -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the RISCVIntrinsicManager, which handles RISC-V vector
+// intrinsic functions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SEMA_RISCVINTRINSICMANAGER_H
+#define LLVM_CLANG_SEMA_RISCVINTRINSICMANAGER_H
+
+#include <cstdint>
+
+namespace clang {
+class LookupResult;
+class IdentifierInfo;
+class Preprocessor;
+
+namespace sema {
+class RISCVIntrinsicManager {
+public:
+ enum class IntrinsicKind : uint8_t { RVV, SIFIVE_VECTOR };
+
+ virtual ~RISCVIntrinsicManager() = default;
+
+ virtual void InitIntrinsicList() = 0;
+
+ // Create RISC-V intrinsic and insert into symbol table and return true if
+ // found, otherwise return false.
+ virtual bool CreateIntrinsicIfFound(LookupResult &LR, IdentifierInfo *II,
+ Preprocessor &PP) = 0;
+};
+} // end namespace sema
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm-project/clang/include/clang/Sema/Scope.h b/contrib/llvm-project/clang/include/clang/Sema/Scope.h
index b499ba1e7c2a..9e81706cd2aa 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/Scope.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/Scope.h
@@ -20,6 +20,7 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/iterator_range.h"
#include <cassert>
+#include <optional>
namespace llvm {
@@ -44,11 +45,11 @@ public:
enum ScopeFlags {
/// This indicates that the scope corresponds to a function, which
/// means that labels are set here.
- FnScope = 0x01,
+ FnScope = 0x01,
/// This is a while, do, switch, for, etc that can have break
/// statements embedded into it.
- BreakScope = 0x02,
+ BreakScope = 0x02,
/// This is a while, do, for, which can have continue statements
/// embedded into it.
@@ -140,6 +141,15 @@ public:
/// parsed. If such a scope is a ContinueScope, it's invalid to jump to the
/// continue block from here.
ConditionVarScope = 0x2000000,
+
+ /// This is a scope of some OpenMP directive with
+ /// order clause which specifies concurrent
+ OpenMPOrderClauseScope = 0x4000000,
+ /// This is the scope for a lambda, after the lambda introducer.
+ /// Lambdas need two FunctionPrototypeScope scopes (because there is a
+ /// template scope in between), the outer scope does not increase the
+ /// depth of recursion.
+ LambdaScope = 0x8000000,
};
private:
@@ -210,9 +220,19 @@ private:
/// Used to determine if errors occurred in this scope.
DiagnosticErrorTrap ErrorTrap;
- /// A lattice consisting of undefined, a single NRVO candidate variable in
- /// this scope, or over-defined. The bit is true when over-defined.
- llvm::PointerIntPair<VarDecl *, 1, bool> NRVO;
+ /// A single NRVO candidate variable in this scope.
+ /// There are three possible values:
+ /// 1) pointer to VarDecl that denotes NRVO candidate itself.
+ /// 2) nullptr value means that NRVO is not allowed in this scope
+ /// (e.g. return a function parameter).
+ /// 3) std::nullopt value means that there is no NRVO candidate in this scope
+ /// (i.e. there are no return statements in this scope).
+ std::optional<VarDecl *> NRVO;
+
+ /// Represents return slots for NRVO candidates in the current scope.
+ /// If a variable is present in this set, it means that a return slot is
+ /// available for this variable in the current scope.
+ llvm::SmallPtrSet<VarDecl *, 8> ReturnSlots;
void setFlags(Scope *Parent, unsigned F);
@@ -304,12 +324,14 @@ public:
bool decl_empty() const { return DeclsInScope.empty(); }
void AddDecl(Decl *D) {
+ if (auto *VD = dyn_cast<VarDecl>(D))
+ if (!isa<ParmVarDecl>(VD))
+ ReturnSlots.insert(VD);
+
DeclsInScope.insert(D);
}
- void RemoveDecl(Decl *D) {
- DeclsInScope.erase(D);
- }
+ void RemoveDecl(Decl *D) { DeclsInScope.erase(D); }
void incrementMSManglingNumber() {
if (Scope *MSLMP = getMSLastManglingParent()) {
@@ -337,7 +359,7 @@ public:
/// isDeclScope - Return true if this is the scope that the specified decl is
/// declared in.
- bool isDeclScope(const Decl *D) const { return DeclsInScope.count(D) != 0; }
+ bool isDeclScope(const Decl *D) const { return DeclsInScope.contains(D); }
/// Get the entity corresponding to this scope.
DeclContext *getEntity() const {
@@ -364,11 +386,15 @@ public:
}
/// isFunctionScope() - Return true if this scope is a function scope.
- bool isFunctionScope() const { return (getFlags() & Scope::FnScope); }
+ bool isFunctionScope() const { return getFlags() & Scope::FnScope; }
/// isClassScope - Return true if this scope is a class/struct/union scope.
- bool isClassScope() const {
- return (getFlags() & Scope::ClassScope);
+ bool isClassScope() const { return getFlags() & Scope::ClassScope; }
+
+ /// Determines whether this scope is between inheritance colon and the real
+ /// class/struct definition.
+ bool isClassInheritanceScope() const {
+ return getFlags() & Scope::ClassInheritanceScope;
}
/// isInCXXInlineMethodScope - Return true if this scope is a C++ inline
@@ -426,6 +452,9 @@ public:
return getFlags() & Scope::AtCatchScope;
}
+ /// isCatchScope - Return true if this scope is a C++ catch statement.
+ bool isCatchScope() const { return getFlags() & Scope::CatchScope; }
+
/// isSwitchScope - Return true if this scope is a switch scope.
bool isSwitchScope() const {
for (const Scope *S = this; S; S = S->getParent()) {
@@ -469,9 +498,26 @@ public:
return P && P->isOpenMPLoopDirectiveScope();
}
+ /// Determine whether this scope is some OpenMP directive with
+ /// order clause which specifies concurrent scope.
+ bool isOpenMPOrderClauseScope() const {
+ return getFlags() & Scope::OpenMPOrderClauseScope;
+ }
+
+ /// Determine whether this scope is a while/do/for statement, which can have
+ /// continue statements embedded into it.
+ bool isContinueScope() const {
+ return getFlags() & ScopeFlags::ContinueScope;
+ }
+
/// Determine whether this scope is a C++ 'try' block.
bool isTryScope() const { return getFlags() & Scope::TryScope; }
+ /// Determine whether this scope is a function-level C++ try or catch scope.
+ bool isFnTryCatchScope() const {
+ return getFlags() & ScopeFlags::FnTryCatchScope;
+ }
+
/// Determine whether this scope is a SEH '__try' block.
bool isSEHTryScope() const { return getFlags() & Scope::SEHTryScope; }
@@ -483,6 +529,10 @@ public:
return getFlags() & Scope::CompoundStmtScope;
}
+ /// Determine whether this scope is a controlling scope in a
+ /// if/switch/while/for statement.
+ bool isControlScope() const { return getFlags() & Scope::ControlScope; }
+
/// Returns if rhs has a higher scope depth than this.
///
/// The caller is responsible for calling this only if one of the two scopes
@@ -505,23 +555,9 @@ public:
UsingDirectives.end());
}
- void addNRVOCandidate(VarDecl *VD) {
- if (NRVO.getInt())
- return;
- if (NRVO.getPointer() == nullptr) {
- NRVO.setPointer(VD);
- return;
- }
- if (NRVO.getPointer() != VD)
- setNoNRVO();
- }
-
- void setNoNRVO() {
- NRVO.setInt(true);
- NRVO.setPointer(nullptr);
- }
+ void updateNRVOCandidate(VarDecl *VD);
- void mergeNRVOIntoParent();
+ void applyNRVO();
/// Init - This is used by the parser to implement scope caching.
void Init(Scope *parent, unsigned flags);
diff --git a/contrib/llvm-project/clang/include/clang/Sema/ScopeInfo.h b/contrib/llvm-project/clang/include/clang/Sema/ScopeInfo.h
index 98ed75acd9d2..06e47eed4e93 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/ScopeInfo.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/ScopeInfo.h
@@ -58,7 +58,6 @@ class Scope;
class Stmt;
class SwitchStmt;
class TemplateParameterList;
-class TemplateTypeParmDecl;
class VarDecl;
namespace sema {
@@ -67,7 +66,7 @@ namespace sema {
/// parsed.
class CompoundScopeInfo {
public:
- /// Whether this compound stamement contains `for' or `while' loops
+ /// Whether this compound statement contains `for' or `while' loops
/// with empty bodies.
bool HasEmptyLoopBodies = false;
@@ -75,7 +74,12 @@ public:
/// expression.
bool IsStmtExpr;
- CompoundScopeInfo(bool IsStmtExpr) : IsStmtExpr(IsStmtExpr) {}
+ /// FP options at the beginning of the compound statement, prior to
+ /// any pragma.
+ FPOptions InitialFPFeatures;
+
+ CompoundScopeInfo(bool IsStmtExpr, FPOptions FPO)
+ : IsStmtExpr(IsStmtExpr), InitialFPFeatures(FPO) {}
void setHasEmptyLoopBodies() {
HasEmptyLoopBodies = true;
@@ -164,10 +168,13 @@ public:
/// to build, the initial and final coroutine suspend points
bool NeedsCoroutineSuspends : 1;
- /// An enumeration represeting the kind of the first coroutine statement
+ /// An enumeration representing the kind of the first coroutine statement
/// in the function. One of co_return, co_await, or co_yield.
unsigned char FirstCoroutineStmtKind : 2;
+ /// Whether we found an immediate-escalating expression.
+ bool FoundImmediateEscalatingExpression : 1;
+
/// First coroutine statement in the current function.
/// (ex co_return, co_await, co_yield)
SourceLocation FirstCoroutineStmtLoc;
@@ -175,12 +182,16 @@ public:
/// First 'return' statement in the current function.
SourceLocation FirstReturnLoc;
- /// First C++ 'try' statement in the current function.
- SourceLocation FirstCXXTryLoc;
+ /// First C++ 'try' or ObjC @try statement in the current function.
+ SourceLocation FirstCXXOrObjCTryLoc;
+ enum { TryLocIsCXX, TryLocIsObjC, Unknown } FirstTryType = Unknown;
/// First SEH '__try' statement in the current function.
SourceLocation FirstSEHTryLoc;
+ /// First use of a VLA within the current function.
+ SourceLocation FirstVLALoc;
+
private:
/// Used to determine if errors occurred in this function or block.
DiagnosticErrorTrap ErrorTrap;
@@ -209,7 +220,7 @@ public:
/// The initial and final coroutine suspend points.
std::pair<Stmt *, Stmt *> CoroutineSuspends;
- /// The stack of currently active compound stamement scopes in the
+ /// The stack of currently active compound statement scopes in the
/// function.
SmallVector<CompoundScopeInfo, 4> CompoundScopes;
@@ -228,6 +239,9 @@ public:
/// modified in the function.
llvm::SmallPtrSet<const ParmVarDecl *, 8> ModifiedNonNullParams;
+ /// The set of GNU address of label extension "&&label".
+ llvm::SmallVector<AddrLabelExpr *, 4> AddrLabels;
+
public:
/// Represents a simple identification of a weak object.
///
@@ -380,7 +394,8 @@ public:
HasPotentialAvailabilityViolations(false), ObjCShouldCallSuper(false),
ObjCIsDesignatedInit(false), ObjCWarnForNoDesignatedInitChain(false),
ObjCIsSecondaryInit(false), ObjCWarnForNoInitDelegation(false),
- NeedsCoroutineSuspends(true), ErrorTrap(Diag) {}
+ NeedsCoroutineSuspends(true), FoundImmediateEscalatingExpression(false),
+ ErrorTrap(Diag) {}
virtual ~FunctionScopeInfo();
@@ -446,7 +461,14 @@ public:
void setHasCXXTry(SourceLocation TryLoc) {
setHasBranchProtectedScope();
- FirstCXXTryLoc = TryLoc;
+ FirstCXXOrObjCTryLoc = TryLoc;
+ FirstTryType = TryLocIsCXX;
+ }
+
+ void setHasObjCTry(SourceLocation TryLoc) {
+ setHasBranchProtectedScope();
+ FirstCXXOrObjCTryLoc = TryLoc;
+ FirstTryType = TryLocIsObjC;
}
void setHasSEHTry(SourceLocation TryLoc) {
@@ -454,6 +476,11 @@ public:
FirstSEHTryLoc = TryLoc;
}
+ void setHasVLA(SourceLocation VLALoc) {
+ if (FirstVLALoc.isInvalid())
+ FirstVLALoc = VLALoc;
+ }
+
bool NeedsScopeChecking() const {
return !HasDroppedStmt && (HasIndirectGoto || HasMustTail ||
(HasBranchProtectedScope && HasBranchIntoScope));
@@ -541,7 +568,7 @@ class Capture {
const VariableArrayType *CapturedVLA;
/// Otherwise, the captured variable (if any).
- VarDecl *CapturedVar;
+ ValueDecl *CapturedVar;
};
/// The source location at which the first capture occurred.
@@ -577,12 +604,13 @@ class Capture {
unsigned Invalid : 1;
public:
- Capture(VarDecl *Var, bool Block, bool ByRef, bool IsNested,
+ Capture(ValueDecl *Var, bool Block, bool ByRef, bool IsNested,
SourceLocation Loc, SourceLocation EllipsisLoc, QualType CaptureType,
bool Invalid)
: CapturedVar(Var), Loc(Loc), EllipsisLoc(EllipsisLoc),
- CaptureType(CaptureType),
- Kind(Block ? Cap_Block : ByRef ? Cap_ByRef : Cap_ByCopy),
+ CaptureType(CaptureType), Kind(Block ? Cap_Block
+ : ByRef ? Cap_ByRef
+ : Cap_ByCopy),
Nested(IsNested), CapturesThis(false), ODRUsed(false),
NonODRUsed(false), Invalid(Invalid) {}
@@ -627,7 +655,7 @@ public:
NonODRUsed = true;
}
- VarDecl *getVariable() const {
+ ValueDecl *getVariable() const {
assert(isVariableCapture());
return CapturedVar;
}
@@ -666,7 +694,7 @@ public:
: FunctionScopeInfo(Diag), ImpCaptureStyle(Style) {}
/// CaptureMap - A map of captured variables to (index+1) into Captures.
- llvm::DenseMap<VarDecl*, unsigned> CaptureMap;
+ llvm::DenseMap<ValueDecl *, unsigned> CaptureMap;
/// CXXThisCaptureIndex - The (index+1) of the capture of 'this';
/// zero if 'this' is not captured.
@@ -683,7 +711,7 @@ public:
/// or null if unknown.
QualType ReturnType;
- void addCapture(VarDecl *Var, bool isBlock, bool isByref, bool isNested,
+ void addCapture(ValueDecl *Var, bool isBlock, bool isByref, bool isNested,
SourceLocation Loc, SourceLocation EllipsisLoc,
QualType CaptureType, bool Invalid) {
Captures.push_back(Capture(Var, isBlock, isByref, isNested, Loc,
@@ -710,23 +738,21 @@ public:
}
/// Determine whether the given variable has been captured.
- bool isCaptured(VarDecl *Var) const {
- return CaptureMap.count(Var);
- }
+ bool isCaptured(ValueDecl *Var) const { return CaptureMap.count(Var); }
/// Determine whether the given variable-array type has been captured.
bool isVLATypeCaptured(const VariableArrayType *VAT) const;
/// Retrieve the capture of the given variable, if it has been
/// captured already.
- Capture &getCapture(VarDecl *Var) {
+ Capture &getCapture(ValueDecl *Var) {
assert(isCaptured(Var) && "Variable has not been captured");
return Captures[CaptureMap[Var] - 1];
}
- const Capture &getCapture(VarDecl *Var) const {
- llvm::DenseMap<VarDecl*, unsigned>::const_iterator Known
- = CaptureMap.find(Var);
+ const Capture &getCapture(ValueDecl *Var) const {
+ llvm::DenseMap<ValueDecl *, unsigned>::const_iterator Known =
+ CaptureMap.find(Var);
assert(Known != CaptureMap.end() && "Variable has not been captured");
return Captures[Known->second - 1];
}
@@ -824,6 +850,13 @@ public:
/// The lambda's compiler-generated \c operator().
CXXMethodDecl *CallOperator = nullptr;
+ /// Indicate that we parsed the parameter list
+ /// at which point the mutability of the lambda
+ /// is known.
+ bool AfterParameterList = true;
+
+ ParmVarDecl *ExplicitObjectParameter = nullptr;
+
/// Source range covering the lambda introducer [...].
SourceRange IntroducerRange;
@@ -835,8 +868,9 @@ public:
/// explicit captures.
unsigned NumExplicitCaptures = 0;
- /// Whether this is a mutable lambda.
- bool Mutable = false;
+ /// Whether this is a mutable lambda. Until the mutable keyword is parsed,
+ /// we assume the lambda is mutable.
+ bool Mutable = true;
/// Whether the (empty) parameter list is explicit.
bool ExplicitParams = false;
@@ -872,7 +906,7 @@ public:
/// This is specifically useful for generic lambdas or
/// lambdas within a potentially evaluated-if-used context.
/// If an enclosing variable is named in an expression of a lambda nested
- /// within a generic lambda, we don't always know know whether the variable
+ /// within a generic lambda, we don't always know whether the variable
/// will truly be odr-used (i.e. need to be captured) by that nested lambda,
/// until its instantiation. But we still need to capture it in the
/// enclosing lambda if all intervening lambdas can capture the variable.
@@ -891,8 +925,8 @@ public:
/// that were defined in parent contexts. Used to avoid warnings when the
/// shadowed variables are uncaptured by this lambda.
struct ShadowedOuterDecl {
- const VarDecl *VD;
- const VarDecl *ShadowedDecl;
+ const NamedDecl *VD;
+ const NamedDecl *ShadowedDecl;
};
llvm::SmallVector<ShadowedOuterDecl, 4> ShadowingDecls;
@@ -1001,10 +1035,7 @@ public:
return NonODRUsedCapturingExprs.count(CapturingVarExpr);
}
void removePotentialCapture(Expr *E) {
- PotentiallyCapturingExprs.erase(
- std::remove(PotentiallyCapturingExprs.begin(),
- PotentiallyCapturingExprs.end(), E),
- PotentiallyCapturingExprs.end());
+ llvm::erase(PotentiallyCapturingExprs, E);
}
void clearPotentialCaptures() {
PotentiallyCapturingExprs.clear();
@@ -1020,7 +1051,9 @@ public:
}
void visitPotentialCaptures(
- llvm::function_ref<void(VarDecl *, Expr *)> Callback) const;
+ llvm::function_ref<void(ValueDecl *, Expr *)> Callback) const;
+
+ bool lambdaCaptureShouldBeConst() const;
};
FunctionScopeInfo::WeakObjectProfileTy::WeakObjectProfileTy()
diff --git a/contrib/llvm-project/clang/include/clang/Sema/Sema.h b/contrib/llvm-project/clang/include/clang/Sema/Sema.h
index d8b2546b81a3..6adb8fb7966b 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/Sema.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/Sema.h
@@ -14,6 +14,7 @@
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
+#include "clang/APINotes/APINotesManager.h"
#include "clang/AST/ASTConcept.h"
#include "clang/AST/ASTFwd.h"
#include "clang/AST/Attr.h"
@@ -58,7 +59,6 @@
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
@@ -68,13 +68,13 @@
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include <deque>
#include <memory>
+#include <optional>
#include <string>
#include <tuple>
#include <vector>
namespace llvm {
class APSInt;
- template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
struct InlineAsmIdentifierInfo;
@@ -227,6 +227,7 @@ namespace sema {
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
+ class RISCVIntrinsicManager;
class SemaPPCallbacks;
class TemplateDeductionInfo;
}
@@ -238,8 +239,9 @@ namespace threadSafety {
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
-typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
- SourceLocation> UnexpandedParameterPack;
+typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType *, NamedDecl *>,
+ SourceLocation>
+ UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
@@ -306,7 +308,7 @@ public:
/// Computing a type for the function argument may require running
/// overloading, so we postpone its computation until it is actually needed.
///
- /// Clients should be very careful when using this funciton, as it stores a
+ /// Clients should be very careful when using this function, as it stores a
/// function_ref, clients should make sure all calls to get() with the same
/// location happen while function_ref is alive.
///
@@ -357,15 +359,10 @@ class Sema final {
void operator=(const Sema &) = delete;
///Source of additional semantic information.
- ExternalSemaSource *ExternalSource;
-
- ///Whether Sema has generated a multiplexer and has to delete it.
- bool isMultiplexExternalSource;
+ IntrusiveRefCntPtr<ExternalSemaSource> ExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
- bool isVisibleSlow(const NamedDecl *D);
-
/// Determine whether two declarations should be linked together, given that
/// the old declaration might not be visible and the new declaration might
/// not have external linkage.
@@ -396,8 +393,8 @@ public:
///
/// This is the greatest alignment value supported by load, store, and alloca
/// instructions, and global values.
- static const unsigned MaxAlignmentExponent = 29;
- static const unsigned MaximumAlignment = 1u << MaxAlignmentExponent;
+ static const unsigned MaxAlignmentExponent = 32;
+ static const uint64_t MaximumAlignment = 1ull << MaxAlignmentExponent;
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
@@ -412,6 +409,7 @@ public:
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
+ api_notes::APINotesManager APINotes;
/// Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
@@ -485,6 +483,12 @@ public:
PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value)
};
+ struct PragmaPackInfo {
+ PragmaMsStackAction Action;
+ StringRef SlotLabel;
+ Token Alignment;
+ };
+
// #pragma pack and align.
class AlignPackInfo {
public:
@@ -693,6 +697,9 @@ public:
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
+ // #pragma strict_gs_check.
+ PragmaStack<bool> StrictGuardStackCheckStack;
+
// This stack tracks the current state of Sema.CurFPFeatures.
PragmaStack<FPOptionsOverride> FpPragmaStack;
FPOptionsOverride CurFPFeatureOverrides() {
@@ -705,6 +712,25 @@ public:
return result;
}
+ // Saves the current floating-point pragma stack and clear it in this Sema.
+ class FpPragmaStackSaveRAII {
+ public:
+ FpPragmaStackSaveRAII(Sema &S)
+ : S(S), SavedStack(std::move(S.FpPragmaStack)) {
+ S.FpPragmaStack.Stack.clear();
+ }
+ ~FpPragmaStackSaveRAII() { S.FpPragmaStack = std::move(SavedStack); }
+
+ private:
+ Sema &S;
+ PragmaStack<FPOptionsOverride> SavedStack;
+ };
+
+ void resetFPOptions(FPOptions FPO) {
+ CurFPFeatures = FPO;
+ FpPragmaStack.CurrentValue = FPO.getChangesFrom(FPOptions(LangOpts));
+ }
+
// RAII object to push / pop sentinel slots for all MS #pragma stacks.
// Actions should be performed only if we enter / exit a C++ method body.
class PragmaStackSentinelRAII {
@@ -725,6 +751,9 @@ public:
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
+ /// Sections used with #pragma alloc_text.
+ llvm::StringMap<std::tuple<StringRef, SourceLocation>> FunctionToSectionMap;
+
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
@@ -756,6 +785,16 @@ public:
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
+ /// The "on" or "off" argument passed by \#pragma optimize, that denotes
+ /// whether the optimizations in the list passed to the pragma should be
+ /// turned off or on. This boolean is true by default because command line
+ /// options are honored when `#pragma optimize("", on)`.
+ /// (i.e. `ModifyFnAttributeMSPragmaOptimze()` does nothing)
+ bool MSPragmaOptimizeIsOn = true;
+
+ /// Set of no-builtin functions listed by \#pragma function.
+ llvm::SmallSetVector<StringRef, 4> MSFunctionNoBuiltins;
+
/// Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
@@ -774,8 +813,7 @@ public:
/// we won't know until all lvalue-to-rvalue and discarded value conversions
/// have been applied to all subexpressions of the enclosing full expression.
/// This is cleared at the end of each full expression.
- using MaybeODRUseExprSet = llvm::SetVector<Expr *, SmallVector<Expr *, 4>,
- llvm::SmallPtrSet<Expr *, 4>>;
+ using MaybeODRUseExprSet = llvm::SmallSetVector<Expr *, 4>;
MaybeODRUseExprSet MaybeODRUseExprs;
std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope;
@@ -788,9 +826,12 @@ public:
/// context.
unsigned FunctionScopesStart = 0;
+ /// Track the number of currently active capturing scopes.
+ unsigned CapturingFunctionScopes = 0;
+
ArrayRef<sema::FunctionScopeInfo*> getFunctionScopes() const {
- return llvm::makeArrayRef(FunctionScopes.begin() + FunctionScopesStart,
- FunctionScopes.end());
+ return llvm::ArrayRef(FunctionScopes.begin() + FunctionScopesStart,
+ FunctionScopes.end());
}
/// Stack containing information needed when in C++2a an 'auto' is encountered
@@ -805,9 +846,9 @@ public:
unsigned InventedParameterInfosStart = 0;
ArrayRef<InventedTemplateParameterInfo> getInventedParameterInfos() const {
- return llvm::makeArrayRef(InventedParameterInfos.begin() +
- InventedParameterInfosStart,
- InventedParameterInfos.end());
+ return llvm::ArrayRef(InventedParameterInfos.begin() +
+ InventedParameterInfosStart,
+ InventedParameterInfos.end());
}
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
@@ -822,7 +863,7 @@ public:
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
- typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType;
+ typedef llvm::SmallSetVector<const NamedDecl *, 16> NamedDeclSetType;
/// Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
@@ -914,14 +955,14 @@ public:
OpaqueParser = P;
}
- // Does the work necessary to deal with a SYCL kernel lambda. At the moment,
- // this just marks the list of lambdas required to name the kernel.
- void AddSYCLKernelLambda(const FunctionDecl *FD);
+ /// Callback to the parser to parse a type expressed as a string.
+ std::function<TypeResult(StringRef, StringRef, SourceLocation)>
+ ParseTypeFromStringCallback;
class DelayedDiagnostics;
class DelayedDiagnosticsState {
- sema::DelayedDiagnosticPool *SavedPool;
+ sema::DelayedDiagnosticPool *SavedPool = nullptr;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
@@ -932,10 +973,10 @@ public:
class DelayedDiagnostics {
/// The current pool of diagnostics into which delayed
/// diagnostics should go.
- sema::DelayedDiagnosticPool *CurPool;
+ sema::DelayedDiagnosticPool *CurPool = nullptr;
public:
- DelayedDiagnostics() : CurPool(nullptr) {}
+ DelayedDiagnostics() = default;
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
@@ -980,6 +1021,14 @@ public:
}
} DelayedDiagnostics;
+ enum CUDAFunctionTarget {
+ CFT_Device,
+ CFT_Global,
+ CFT_Host,
+ CFT_HostDevice,
+ CFT_InvalidTarget
+ };
+
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
@@ -1022,20 +1071,6 @@ public:
}
};
- /// Whether the AST is currently being rebuilt to correct immediate
- /// invocations. Immediate invocation candidates and references to consteval
- /// functions aren't tracked when this is set.
- bool RebuildingImmediateInvocation = false;
-
- /// Used to change context to isConstantEvaluated without pushing a heavy
- /// ExpressionEvaluationContextRecord object.
- bool isConstantEvaluatedOverride;
-
- bool isConstantEvaluated() {
- return ExprEvalContexts.back().isConstantEvaluated() ||
- isConstantEvaluatedOverride;
- }
-
/// RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
@@ -1046,12 +1081,21 @@ public:
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC) {
+ auto *FD = dyn_cast<FunctionDecl>(DC);
S.PushFunctionScope();
S.PushExpressionEvaluationContext(
- Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
- if (auto *FD = dyn_cast<FunctionDecl>(DC))
+ (FD && FD->isConsteval())
+ ? ExpressionEvaluationContext::ImmediateFunctionContext
+ : ExpressionEvaluationContext::PotentiallyEvaluated);
+ if (FD) {
FD->setWillHaveBody(true);
- else
+ S.ExprEvalContexts.back().InImmediateFunctionContext =
+ FD->isImmediateFunction() ||
+ S.ExprEvalContexts[S.ExprEvalContexts.size() - 2]
+ .isConstantEvaluated();
+ S.ExprEvalContexts.back().InImmediateEscalatingFunctionContext =
+ S.getLangOpts().CPlusPlus20 && FD->isImmediateEscalating();
+ } else
assert(isa<ObjCMethodDecl>(DC));
}
@@ -1070,17 +1114,29 @@ public:
~SynthesizedFunctionScope() {
if (PushedCodeSynthesisContext)
S.popCodeSynthesisContext();
- if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext))
+ if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) {
FD->setWillHaveBody(false);
+ S.CheckImmediateEscalatingFunctionDefinition(FD, S.getCurFunction());
+ }
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
- /// WeakUndeclaredIdentifiers - Identifiers contained in
- /// \#pragma weak before declared. rare. may alias another
- /// identifier, declared or undeclared
- llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
+ /// WeakUndeclaredIdentifiers - Identifiers contained in \#pragma weak before
+ /// declared. Rare. May alias another identifier, declared or undeclared.
+ ///
+ /// For aliases, the target identifier is used as a key for eventual
+ /// processing when the target is declared. For the single-identifier form,
+ /// the sole identifier is used as the key. Each entry is a `SetVector`
+ /// (ordered by parse order) of aliases (identified by the alias name) in case
+ /// of multiple aliases to the same undeclared identifier.
+ llvm::MapVector<
+ IdentifierInfo *,
+ llvm::SetVector<
+ WeakInfo, llvm::SmallVector<WeakInfo, 1u>,
+ llvm::SmallDenseSet<WeakInfo, 2u, WeakInfo::DenseMapInfoByAliasOnly>>>
+ WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
@@ -1117,10 +1173,6 @@ public:
/// standard library.
LazyDeclPtr StdAlignValT;
- /// The C++ "std::experimental" namespace, where the experimental parts
- /// of the standard library resides.
- NamespaceDecl *StdExperimentalNamespaceCache;
-
/// The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
@@ -1132,8 +1184,9 @@ public:
/// The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
- /// The MSVC "_GUID" struct, which is defined in MSVC header files.
- RecordDecl *MSVCGuidDecl;
+ /// The C++ "std::source_location::__impl" struct, defined in
+ /// \<source_location>.
+ RecordDecl *StdSourceLocationImplDecl;
/// Caches identifiers/selectors for NSFoundation APIs.
std::unique_ptr<NSAPI> NSAPIObj;
@@ -1218,6 +1271,11 @@ public:
/// cases in a switch statement).
ConstantEvaluated,
+ /// In addition of being constant evaluated, the current expression
+ /// occurs in an immediate function context - either a consteval function
+ /// or a consteval if statement.
+ ImmediateFunctionContext,
+
/// The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
@@ -1292,6 +1350,36 @@ public:
EK_Decltype, EK_TemplateArgument, EK_Other
} ExprContext;
+ // A context can be nested in both a discarded statement context and
+ // an immediate function context, so they need to be tracked independently.
+ bool InDiscardedStatement;
+ bool InImmediateFunctionContext;
+ bool InImmediateEscalatingFunctionContext;
+
+ bool IsCurrentlyCheckingDefaultArgumentOrInitializer = false;
+
+ // We are in a constant context, but we also allow
+ // non constant expressions, for example for array bounds (which may be
+ // VLAs).
+ bool InConditionallyConstantEvaluateContext = false;
+
+ // When evaluating immediate functions in the initializer of a default
+ // argument or default member initializer, this is the declaration whose
+ // default initializer is being evaluated and the location of the call
+ // or constructor definition.
+ struct InitializationContext {
+ InitializationContext(SourceLocation Loc, ValueDecl *Decl,
+ DeclContext *Context)
+ : Loc(Loc), Decl(Decl), Context(Context) {
+ assert(Decl && Context && "invalid initialization context");
+ }
+
+ SourceLocation Loc;
+ ValueDecl *Decl = nullptr;
+ DeclContext *Context = nullptr;
+ };
+ std::optional<InitializationContext> DelayedDefaultInitializationContext;
+
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
CleanupInfo ParentCleanup,
@@ -1299,21 +1387,50 @@ public:
ExpressionKind ExprContext)
: Context(Context), ParentCleanup(ParentCleanup),
NumCleanupObjects(NumCleanupObjects), NumTypos(0),
- ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext) {}
+ ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext),
+ InDiscardedStatement(false), InImmediateFunctionContext(false),
+ InImmediateEscalatingFunctionContext(false) {}
bool isUnevaluated() const {
return Context == ExpressionEvaluationContext::Unevaluated ||
Context == ExpressionEvaluationContext::UnevaluatedAbstract ||
Context == ExpressionEvaluationContext::UnevaluatedList;
}
+
bool isConstantEvaluated() const {
- return Context == ExpressionEvaluationContext::ConstantEvaluated;
+ return Context == ExpressionEvaluationContext::ConstantEvaluated ||
+ Context == ExpressionEvaluationContext::ImmediateFunctionContext;
+ }
+
+ bool isImmediateFunctionContext() const {
+ return Context == ExpressionEvaluationContext::ImmediateFunctionContext ||
+ (Context == ExpressionEvaluationContext::DiscardedStatement &&
+ InImmediateFunctionContext) ||
+ // C++23 [expr.const]p14:
+ // An expression or conversion is in an immediate function
+ // context if it is potentially evaluated and either:
+ // * its innermost enclosing non-block scope is a function
+ // parameter scope of an immediate function, or
+ // * its enclosing statement is enclosed by the compound-
+ // statement of a consteval if statement.
+ (Context == ExpressionEvaluationContext::PotentiallyEvaluated &&
+ InImmediateFunctionContext);
+ }
+
+ bool isDiscardedStatementContext() const {
+ return Context == ExpressionEvaluationContext::DiscardedStatement ||
+ (Context ==
+ ExpressionEvaluationContext::ImmediateFunctionContext &&
+ InDiscardedStatement);
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
+ // Set of failed immediate invocations to avoid double diagnosing.
+ llvm::SmallPtrSet<ConstantExpr *, 4> FailedImmediateInvocations;
+
/// Emit a warning for all pending noderef expressions that we recorded.
void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec);
@@ -1340,10 +1457,10 @@ public:
};
private:
- llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
+ llvm::PointerIntPair<CXXMethodDecl *, 2> Pair;
public:
- SpecialMemberOverloadResult() : Pair() {}
+ SpecialMemberOverloadResult() {}
SpecialMemberOverloadResult(CXXMethodDecl *MD)
: Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {}
@@ -1408,7 +1525,7 @@ public:
/// Determine if VD, which must be a variable or function, is an external
/// symbol that nonetheless can't be referenced from outside this translation
/// unit because its type has no linkage and it's not extern "C".
- bool isExternalWithNoLinkageType(ValueDecl *VD);
+ bool isExternalWithNoLinkageType(const ValueDecl *VD) const;
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
@@ -1419,8 +1536,22 @@ public:
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
- typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
- typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
+ class GlobalMethodPool {
+ public:
+ using Lists = std::pair<ObjCMethodList, ObjCMethodList>;
+ using iterator = llvm::DenseMap<Selector, Lists>::iterator;
+ iterator begin() { return Methods.begin(); }
+ iterator end() { return Methods.end(); }
+ iterator find(Selector Sel) { return Methods.find(Sel); }
+ std::pair<iterator, bool> insert(std::pair<Selector, Lists> &&Val) {
+ return Methods.insert(Val);
+ }
+ int count(Selector Sel) const { return Methods.count(Sel); }
+ bool empty() const { return Methods.empty(); }
+
+ private:
+ llvm::DenseMap<Selector, Lists> Methods;
+ };
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
@@ -1502,19 +1633,16 @@ public:
/// statements.
class FPFeaturesStateRAII {
public:
- FPFeaturesStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.CurFPFeatures) {
- OldOverrides = S.FpPragmaStack.CurrentValue;
- }
- ~FPFeaturesStateRAII() {
- S.CurFPFeatures = OldFPFeaturesState;
- S.FpPragmaStack.CurrentValue = OldOverrides;
- }
+ FPFeaturesStateRAII(Sema &S);
+ ~FPFeaturesStateRAII();
FPOptionsOverride getOverrides() { return OldOverrides; }
private:
Sema& S;
FPOptions OldFPFeaturesState;
FPOptionsOverride OldOverrides;
+ LangOptions::FPEvalMethodKind OldEvalMethod;
+ SourceLocation OldFPPragmaLocation;
};
void addImplicitTypedef(StringRef Name, QualType T);
@@ -1526,7 +1654,18 @@ public:
/// assignment.
llvm::DenseMap<const VarDecl *, int> RefsMinusAssignments;
- Optional<std::unique_ptr<DarwinSDKInfo>> CachedDarwinSDKInfo;
+ /// Indicate RISC-V vector builtin functions enabled or not.
+ bool DeclareRISCVVBuiltins = false;
+
+ /// Indicate RISC-V SiFive vector builtin functions enabled or not.
+ bool DeclareRISCVSiFiveVectorBuiltins = false;
+
+private:
+ std::unique_ptr<sema::RISCVIntrinsicManager> RVIntrinsicManager;
+
+ std::optional<std::unique_ptr<DarwinSDKInfo>> CachedDarwinSDKInfo;
+
+ bool WarnedDarwinSDKInfoMissing = false;
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
@@ -1555,9 +1694,11 @@ public:
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
- ExternalSemaSource* getExternalSource() const { return ExternalSource; }
+ ExternalSemaSource *getExternalSource() const { return ExternalSource.get(); }
+
DarwinSDKInfo *getDarwinSDKInfoForAvailabilityChecking(SourceLocation Loc,
StringRef Platform);
+ DarwinSDKInfo *getDarwinSDKInfoForAvailabilityChecking();
///Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
@@ -1635,8 +1776,8 @@ public:
// It is necessary to limit this to rvalue reference to avoid calling this
// function with a bitfield lvalue argument since non-const reference to
// bitfield is not allowed.
- template <typename T, typename = typename std::enable_if<
- !std::is_lvalue_reference<T>::value>::type>
+ template <typename T,
+ typename = std::enable_if_t<!std::is_lvalue_reference<T>::value>>
const ImmediateDiagBuilder &operator<<(T &&V) const {
const DiagnosticBuilder &BaseDiag = *this;
BaseDiag << std::move(V);
@@ -1676,12 +1817,18 @@ public:
};
SemaDiagnosticBuilder(Kind K, SourceLocation Loc, unsigned DiagID,
- FunctionDecl *Fn, Sema &S);
+ const FunctionDecl *Fn, Sema &S);
SemaDiagnosticBuilder(SemaDiagnosticBuilder &&D);
SemaDiagnosticBuilder(const SemaDiagnosticBuilder &) = default;
+
+ // The copy and move assignment operator is defined as deleted pending
+ // further motivation.
+ SemaDiagnosticBuilder &operator=(const SemaDiagnosticBuilder &) = delete;
+ SemaDiagnosticBuilder &operator=(SemaDiagnosticBuilder &&) = delete;
+
~SemaDiagnosticBuilder();
- bool isImmediate() const { return ImmediateDiag.hasValue(); }
+ bool isImmediate() const { return ImmediateDiag.has_value(); }
/// Convertible to bool: True if we immediately emitted an error, false if
/// we didn't emit an error or we created a deferred error.
@@ -1698,9 +1845,9 @@ public:
template <typename T>
friend const SemaDiagnosticBuilder &
operator<<(const SemaDiagnosticBuilder &Diag, const T &Value) {
- if (Diag.ImmediateDiag.hasValue())
+ if (Diag.ImmediateDiag)
*Diag.ImmediateDiag << Value;
- else if (Diag.PartialDiagId.hasValue())
+ else if (Diag.PartialDiagId)
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second
<< Value;
return Diag;
@@ -1709,29 +1856,29 @@ public:
// It is necessary to limit this to rvalue reference to avoid calling this
// function with a bitfield lvalue argument since non-const reference to
// bitfield is not allowed.
- template <typename T, typename = typename std::enable_if<
- !std::is_lvalue_reference<T>::value>::type>
+ template <typename T,
+ typename = std::enable_if_t<!std::is_lvalue_reference<T>::value>>
const SemaDiagnosticBuilder &operator<<(T &&V) const {
- if (ImmediateDiag.hasValue())
+ if (ImmediateDiag)
*ImmediateDiag << std::move(V);
- else if (PartialDiagId.hasValue())
+ else if (PartialDiagId)
S.DeviceDeferredDiags[Fn][*PartialDiagId].second << std::move(V);
return *this;
}
friend const SemaDiagnosticBuilder &
operator<<(const SemaDiagnosticBuilder &Diag, const PartialDiagnostic &PD) {
- if (Diag.ImmediateDiag.hasValue())
+ if (Diag.ImmediateDiag)
PD.Emit(*Diag.ImmediateDiag);
- else if (Diag.PartialDiagId.hasValue())
+ else if (Diag.PartialDiagId)
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second = PD;
return Diag;
}
void AddFixItHint(const FixItHint &Hint) const {
- if (ImmediateDiag.hasValue())
+ if (ImmediateDiag)
ImmediateDiag->AddFixItHint(Hint);
- else if (PartialDiagId.hasValue())
+ else if (PartialDiagId)
S.DeviceDeferredDiags[Fn][*PartialDiagId].second.AddFixItHint(Hint);
}
@@ -1751,13 +1898,13 @@ public:
Sema &S;
SourceLocation Loc;
unsigned DiagID;
- FunctionDecl *Fn;
+ const FunctionDecl *Fn;
bool ShowCallStack;
// Invariant: At most one of these Optionals has a value.
// FIXME: Switch these to a Variant once that exists.
- llvm::Optional<ImmediateDiagBuilder> ImmediateDiag;
- llvm::Optional<unsigned> PartialDiagId;
+ std::optional<ImmediateDiagBuilder> ImmediateDiag;
+ std::optional<unsigned> PartialDiagId;
};
/// Is the last error level diagnostic immediate. This is used to determined
@@ -1944,9 +2091,9 @@ public:
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
- QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
- Expr *ArraySize, unsigned Quals,
- SourceRange Brackets, DeclarationName Entity);
+ QualType BuildArrayType(QualType T, ArraySizeModifier ASM, Expr *ArraySize,
+ unsigned Quals, SourceRange Brackets,
+ DeclarationName Entity);
QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
@@ -1960,10 +2107,17 @@ public:
QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
SourceLocation AttrLoc);
+ CodeAlignAttr *BuildCodeAlignAttr(const AttributeCommonInfo &CI, Expr *E);
+ bool CheckRebuiltStmtAttributes(ArrayRef<const Attr *> Attrs);
+
bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
+ /// Check an argument list for placeholders that we won't try to
+ /// handle later.
+ bool CheckArgsForPlaceholders(MultiExprArg args);
+
/// Build a function type.
///
/// This routine checks the function type according to C++ rules and
@@ -2007,9 +2161,9 @@ public:
SourceLocation Loc);
QualType BuildWritePipeType(QualType T,
SourceLocation Loc);
- QualType BuildExtIntType(bool IsUnsigned, Expr *BitWidth, SourceLocation Loc);
+ QualType BuildBitIntType(bool IsUnsigned, Expr *BitWidth, SourceLocation Loc);
- TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
+ TypeSourceInfo *GetTypeForDeclarator(Declarator &D);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
/// Package the given type and TSI into a ParsedType.
@@ -2038,22 +2192,19 @@ public:
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool handlerCanCatch(QualType HandlerType, QualType ExceptionType);
- bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
- const PartialDiagnostic &NestedDiagID,
- const PartialDiagnostic &NoteID,
- const PartialDiagnostic &NoThrowDiagID,
- const FunctionProtoType *Superset,
- SourceLocation SuperLoc,
- const FunctionProtoType *Subset,
- SourceLocation SubLoc);
- bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID,
- const PartialDiagnostic &NoteID,
- const FunctionProtoType *Target,
- SourceLocation TargetLoc,
- const FunctionProtoType *Source,
- SourceLocation SourceLoc);
-
- TypeResult ActOnTypeName(Scope *S, Declarator &D);
+ bool CheckExceptionSpecSubset(
+ const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID,
+ const PartialDiagnostic &NoteID, const PartialDiagnostic &NoThrowDiagID,
+ const FunctionProtoType *Superset, bool SkipSupersetFirstParameter,
+ SourceLocation SuperLoc, const FunctionProtoType *Subset,
+ bool SkipSubsetFirstParameter, SourceLocation SubLoc);
+ bool CheckParamExceptionSpec(
+ const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID,
+ const FunctionProtoType *Target, bool SkipTargetFirstParameter,
+ SourceLocation TargetLoc, const FunctionProtoType *Source,
+ bool SkipSourceFirstParameter, SourceLocation SourceLoc);
+
+ TypeResult ActOnTypeName(Declarator &D);
/// The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
@@ -2150,6 +2301,8 @@ public:
Default = AcceptSizeless
};
+ enum class AcceptableKind { Visible, Reachable };
+
private:
/// Methods for marking which expressions involve dereferencing a pointer
/// marked with the 'noderef' attribute. Expressions are checked bottom up as
@@ -2168,24 +2321,74 @@ private:
struct ModuleScope {
SourceLocation BeginLoc;
clang::Module *Module = nullptr;
- bool ModuleInterface = false;
- bool ImplicitGlobalModuleFragment = false;
VisibleModuleSet OuterVisibleModules;
};
/// The modules we're currently parsing.
llvm::SmallVector<ModuleScope, 16> ModuleScopes;
+ /// For an interface unit, this is the implicitly imported interface unit.
+ clang::Module *ThePrimaryInterface = nullptr;
+
+ /// The explicit global module fragment of the current translation unit.
+ /// The explicit Global Module Fragment, as specified in C++
+ /// [module.global.frag].
+ clang::Module *TheGlobalModuleFragment = nullptr;
+
+ /// The implicit global module fragments of the current translation unit.
+ ///
+ /// The contents in the implicit global module fragment can't be discarded.
+ clang::Module *TheImplicitGlobalModuleFragment = nullptr;
+
/// Namespace definitions that we will export when they finish.
llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces;
- /// Get the module whose scope we are currently within.
+ /// In a C++ standard module, inline declarations require a definition to be
+ /// present at the end of a definition domain. This set holds the decls to
+ /// be checked at the end of the TU.
+ llvm::SmallPtrSet<const FunctionDecl *, 8> PendingInlineFuncDecls;
+
+ /// Helper function to judge if we are in module purview.
+ /// Return false if we are not in a module.
+ bool isCurrentModulePurview() const;
+
+ /// Enter the scope of the explicit global module fragment.
+ Module *PushGlobalModuleFragment(SourceLocation BeginLoc);
+ /// Leave the scope of the explicit global module fragment.
+ void PopGlobalModuleFragment();
+
+ /// Enter the scope of an implicit global module fragment.
+ Module *PushImplicitGlobalModuleFragment(SourceLocation BeginLoc);
+ /// Leave the scope of an implicit global module fragment.
+ void PopImplicitGlobalModuleFragment();
+
+ VisibleModuleSet VisibleModules;
+
+ /// Cache for module units which is usable for current module.
+ llvm::DenseSet<const Module *> UsableModuleUnitsCache;
+
+ bool isUsableModule(const Module *M);
+
+ bool isAcceptableSlow(const NamedDecl *D, AcceptableKind Kind);
+
+public:
+ /// Get the module unit whose scope we are currently within.
Module *getCurrentModule() const {
return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module;
}
- VisibleModuleSet VisibleModules;
+ /// Is the module scope we are an implementation unit?
+ bool currentModuleIsImplementation() const {
+ return ModuleScopes.empty()
+ ? false
+ : ModuleScopes.back().Module->isModuleImplementation();
+ }
+
+ /// Is the module scope we are in a C++ Header Unit?
+ bool currentModuleIsHeaderUnit() const {
+ return ModuleScopes.empty() ? false
+ : ModuleScopes.back().Module->isHeaderUnit();
+ }
-public:
/// Get the module owning an entity.
Module *getOwningModule(const Decl *Entity) {
return Entity->getOwningModule();
@@ -2205,7 +2408,20 @@ public:
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
- return D->isUnconditionallyVisible() || isVisibleSlow(D);
+ return D->isUnconditionallyVisible() ||
+ isAcceptableSlow(D, AcceptableKind::Visible);
+ }
+
+ /// Determine whether a declaration is reachable.
+ bool isReachable(const NamedDecl *D) {
+ // All visible declarations are reachable.
+ return D->isUnconditionallyVisible() ||
+ isAcceptableSlow(D, AcceptableKind::Reachable);
+ }
+
+ /// Determine whether a declaration is acceptable (visible/reachable).
+ bool isAcceptable(const NamedDecl *D, AcceptableKind Kind) {
+ return Kind == AcceptableKind::Visible ? isVisible(D) : isReachable(D);
}
/// Determine whether any declaration of an entity is visible.
@@ -2214,11 +2430,20 @@ public:
llvm::SmallVectorImpl<Module *> *Modules = nullptr) {
return isVisible(D) || hasVisibleDeclarationSlow(D, Modules);
}
+
bool hasVisibleDeclarationSlow(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules);
+ /// Determine whether any declaration of an entity is reachable.
+ bool
+ hasReachableDeclaration(const NamedDecl *D,
+ llvm::SmallVectorImpl<Module *> *Modules = nullptr) {
+ return isReachable(D) || hasReachableDeclarationSlow(D, Modules);
+ }
+ bool hasReachableDeclarationSlow(
+ const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
- bool hasVisibleMergedDefinition(NamedDecl *Def);
- bool hasMergedDefinitionInCurrentModule(NamedDecl *Def);
+ bool hasVisibleMergedDefinition(const NamedDecl *Def);
+ bool hasMergedDefinitionInCurrentModule(const NamedDecl *Def);
/// Determine if \p D and \p Suggested have a structurally compatible
/// layout as described in C11 6.2.7/1.
@@ -2233,21 +2458,54 @@ public:
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
+ /// Determine if \p D has a reachable definition. If not, suggest a
+ /// declaration that should be made reachable to expose the definition.
+ bool hasReachableDefinition(NamedDecl *D, NamedDecl **Suggested,
+ bool OnlyNeedComplete = false);
+ bool hasReachableDefinition(NamedDecl *D) {
+ NamedDecl *Hidden;
+ return hasReachableDefinition(D, &Hidden);
+ }
+
+ bool hasAcceptableDefinition(NamedDecl *D, NamedDecl **Suggested,
+ AcceptableKind Kind,
+ bool OnlyNeedComplete = false);
+ bool hasAcceptableDefinition(NamedDecl *D, AcceptableKind Kind) {
+ NamedDecl *Hidden;
+ return hasAcceptableDefinition(D, &Hidden, Kind);
+ }
+
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
+ /// Determine if the template parameter \p D has a reachable default argument.
+ bool hasReachableDefaultArgument(
+ const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
+ /// Determine if the template parameter \p D has a reachable default argument.
+ bool hasAcceptableDefaultArgument(const NamedDecl *D,
+ llvm::SmallVectorImpl<Module *> *Modules,
+ Sema::AcceptableKind Kind);
/// Determine if there is a visible declaration of \p D that is an explicit
/// specialization declaration for a specialization of a template. (For a
/// member specialization, use hasVisibleMemberSpecialization.)
bool hasVisibleExplicitSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
+ /// Determine if there is a reachable declaration of \p D that is an explicit
+ /// specialization declaration for a specialization of a template. (For a
+ /// member specialization, use hasReachableMemberSpecialization.)
+ bool hasReachableExplicitSpecialization(
+ const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is a member
/// specialization declaration (as opposed to an instantiated declaration).
bool hasVisibleMemberSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
+ /// Determine if there is a reachable declaration of \p D that is a member
+ /// specialization declaration (as opposed to an instantiated declaration).
+ bool hasReachableMemberSpecialization(
+ const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if \p A and \p B are equivalent internal linkage declarations
/// from different modules, and thus an ambiguity error can be downgraded to
@@ -2260,6 +2518,10 @@ public:
bool isUsualDeallocationFunction(const CXXMethodDecl *FD);
+ // Check whether the size of array element of type \p EltTy is a multiple of
+ // its alignment and return false if it isn't.
+ bool checkArrayElementAlignment(QualType EltTy, SourceLocation Loc);
+
bool isCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind = CompleteTypeKind::Default) {
return !RequireCompleteTypeImpl(Loc, T, Kind, nullptr);
@@ -2334,14 +2596,30 @@ public:
const CXXScopeSpec &SS, QualType T,
TagDecl *OwnedTagDecl = nullptr);
- QualType getDecltypeForParenthesizedExpr(Expr *E);
- QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
+ // Returns the underlying type of a decltype with the given expression.
+ QualType getDecltypeForExpr(Expr *E);
+
+ QualType BuildTypeofExprType(Expr *E, TypeOfKind Kind);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
- QualType BuildDecltypeType(Expr *E, SourceLocation Loc,
- bool AsUnevaluated = true);
- QualType BuildUnaryTransformType(QualType BaseType,
- UnaryTransformType::UTTKind UKind,
+ QualType BuildDecltypeType(Expr *E, bool AsUnevaluated = true);
+
+ using UTTKind = UnaryTransformType::UTTKind;
+ QualType BuildUnaryTransformType(QualType BaseType, UTTKind UKind,
+ SourceLocation Loc);
+ QualType BuiltinEnumUnderlyingType(QualType BaseType, SourceLocation Loc);
+ QualType BuiltinAddPointer(QualType BaseType, SourceLocation Loc);
+ QualType BuiltinRemovePointer(QualType BaseType, SourceLocation Loc);
+ QualType BuiltinDecay(QualType BaseType, SourceLocation Loc);
+ QualType BuiltinAddReference(QualType BaseType, UTTKind UKind,
+ SourceLocation Loc);
+ QualType BuiltinRemoveExtent(QualType BaseType, UTTKind UKind,
+ SourceLocation Loc);
+ QualType BuiltinRemoveReference(QualType BaseType, UTTKind UKind,
+ SourceLocation Loc);
+ QualType BuiltinChangeCVRQualifiers(QualType BaseType, UTTKind UKind,
+ SourceLocation Loc);
+ QualType BuiltinChangeSignedness(QualType BaseType, UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
@@ -2349,13 +2627,11 @@ public:
//
struct SkipBodyInfo {
- SkipBodyInfo()
- : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr),
- New(nullptr) {}
- bool ShouldSkip;
- bool CheckSameAsPrevious;
- NamedDecl *Previous;
- NamedDecl *New;
+ SkipBodyInfo() = default;
+ bool ShouldSkip = false;
+ bool CheckSameAsPrevious = false;
+ NamedDecl *Previous = nullptr;
+ NamedDecl *New = nullptr;
};
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
@@ -2371,6 +2647,8 @@ public:
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
bool IsClassTemplateDeductionContext = true,
+ ImplicitTypenameContext AllowImplicitTypename =
+ ImplicitTypenameContext::No,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
@@ -2682,15 +2960,17 @@ public:
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
- NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
- TypeSourceInfo *TInfo,
- LookupResult &Previous,
- MultiTemplateParamsArg TemplateParamLists,
- bool &AddToScope,
- ArrayRef<BindingDecl *> Bindings = None);
+ NamedDecl *ActOnVariableDeclarator(
+ Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo,
+ LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists,
+ bool &AddToScope, ArrayRef<BindingDecl *> Bindings = std::nullopt);
NamedDecl *
ActOnDecompositionDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists);
+ void DiagPlaceholderVariableDefinition(SourceLocation Loc);
+ bool DiagRedefinedPlaceholderFieldDecl(SourceLocation Loc,
+ RecordDecl *ClassDecl,
+ const IdentifierInfo *Name);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
@@ -2726,19 +3006,30 @@ public:
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
- bool IsMemberSpecialization);
+ bool IsMemberSpecialization, bool DeclIsDefn);
bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl);
bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD,
QualType NewT, QualType OldT);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
+ void ActOnHLSLTopLevelFunction(FunctionDecl *FD);
+ void CheckHLSLEntryPoint(FunctionDecl *FD);
+ void CheckHLSLSemanticAnnotation(FunctionDecl *EntryPoint, const Decl *Param,
+ const HLSLAnnotationAttr *AnnotationAttr);
+ void DiagnoseHLSLAttrStageMismatch(
+ const Attr *A, HLSLShaderAttr::ShaderType Stage,
+ std::initializer_list<HLSLShaderAttr::ShaderType> AllowedStages);
Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD,
bool IsDefinition);
void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D);
- Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
+ Decl *ActOnParamDeclarator(Scope *S, Declarator &D,
+ SourceLocation ExplicitThisLoc = {});
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
+ QualType AdjustParameterTypeForObjCAutoRefCount(QualType T,
+ SourceLocation NameLoc,
+ TypeSourceInfo *TSInfo);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
@@ -2748,7 +3039,8 @@ public:
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc,
SourceLocation ArgLoc);
- void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
+ void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc,
+ Expr* DefaultArg);
ExprResult ConvertParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
void SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
@@ -2804,11 +3096,11 @@ public:
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
- ParsedAttributes &Attrs,
- SourceLocation AttrEnd);
+ ParsedAttributes &Attrs);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void CheckStaticLocalForDllExport(VarDecl *VD);
+ void CheckThreadLocalForLargeAlignment(VarDecl *VD);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
@@ -2819,6 +3111,18 @@ public:
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
+ enum class FnBodyKind {
+ /// C++ [dcl.fct.def.general]p1
+ /// function-body:
+ /// ctor-initializer[opt] compound-statement
+ /// function-try-block
+ Other,
+ /// = default ;
+ Default,
+ /// = delete ;
+ Delete
+ };
+
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(
@@ -2826,9 +3130,12 @@ public:
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists,
- SkipBodyInfo *SkipBody = nullptr);
+ SkipBodyInfo *SkipBody = nullptr,
+ FnBodyKind BodyKind = FnBodyKind::Other);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D,
- SkipBodyInfo *SkipBody = nullptr);
+ SkipBodyInfo *SkipBody = nullptr,
+ FnBodyKind BodyKind = FnBodyKind::Other);
+ void SetFunctionBodyKind(Decl *D, SourceLocation Loc, FnBodyKind BodyKind);
void ActOnStartTrailingRequiresClause(Scope *S, Declarator &D);
ExprResult ActOnFinishTrailingRequiresClause(ExprResult ConstraintExpr);
ExprResult ActOnRequiresClause(ExprResult ConstraintExpr);
@@ -2856,6 +3163,10 @@ public:
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
+ /// Determine whether \param D is function like (function or function
+ /// template) for parsing.
+ bool isDeclaratorFunctionLike(Declarator &D);
+
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
@@ -2882,20 +3193,46 @@ public:
SourceLocation AsmLoc,
SourceLocation RParenLoc);
+ Decl *ActOnTopLevelStmtDecl(Stmt *Statement);
+
/// Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList,
SourceLocation SemiLoc);
enum class ModuleDeclKind {
- Interface, ///< 'export module X;'
- Implementation, ///< 'module X;'
+ Interface, ///< 'export module X;'
+ Implementation, ///< 'module X;'
+ PartitionInterface, ///< 'export module X:Y;'
+ PartitionImplementation, ///< 'module X:Y;'
+ };
+
+ /// An enumeration to represent the transition of states in parsing module
+ /// fragments and imports. If we are not parsing a C++20 TU, or we find
+ /// an error in state transition, the state is set to NotACXX20Module.
+ enum class ModuleImportState {
+ FirstDecl, ///< Parsing the first decl in a TU.
+ GlobalFragment, ///< after 'module;' but before 'module X;'
+ ImportAllowed, ///< after 'module X;' but before any non-import decl.
+ ImportFinished, ///< after any non-import decl.
+ PrivateFragmentImportAllowed, ///< after 'module :private;' but before any
+ ///< non-import decl.
+ PrivateFragmentImportFinished, ///< after 'module :private;' but a
+ ///< non-import decl has already been seen.
+ NotACXX20Module ///< Not a C++20 TU, or an invalid state was found.
};
+private:
+ /// The parser has begun a translation unit to be compiled as a C++20
+ /// Header Unit, helper for ActOnStartOfTranslationUnit() only.
+ void HandleStartOfHeaderUnit();
+
+public:
/// The parser has processed a module-declaration that begins the definition
/// of a module interface or implementation.
DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc,
SourceLocation ModuleLoc, ModuleDeclKind MDK,
- ModuleIdPath Path, bool IsFirstDecl);
+ ModuleIdPath Path, ModuleIdPath Partition,
+ ModuleImportState &ImportState);
/// The parser has processed a global-module-fragment declaration that begins
/// the definition of the global module fragment of the current module unit.
@@ -2915,10 +3252,12 @@ public:
/// could be the location of an '@', 'export', or 'import'.
/// \param ExportLoc The location of the 'export' keyword, if any.
/// \param ImportLoc The location of the 'import' keyword.
- /// \param Path The module access path.
+ /// \param Path The module toplevel name as an access path.
+ /// \param IsPartition If the name is for a partition.
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
- SourceLocation ImportLoc, ModuleIdPath Path);
+ SourceLocation ImportLoc, ModuleIdPath Path,
+ bool IsPartition = false);
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, Module *M,
@@ -2955,9 +3294,9 @@ public:
/// Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
- void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
+ void diagnoseMissingImport(SourceLocation Loc, const NamedDecl *Decl,
MissingImportKind MIK, bool Recover = true);
- void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
+ void diagnoseMissingImport(SourceLocation Loc, const NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
@@ -2968,8 +3307,9 @@ public:
/// We've found a use of a templated declaration that would trigger an
/// implicit instantiation. Check that any relevant explicit specializations
- /// and partial specializations are visible, and diagnose if not.
+ /// and partial specializations are visible/reachable, and diagnose if not.
void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec);
+ void checkSpecializationReachability(SourceLocation Loc, NamedDecl *Spec);
/// Retrieve a suitable printing policy for diagnostics.
PrintingPolicy getPrintingPolicy() const {
@@ -2985,8 +3325,10 @@ public:
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
+ const ParsedAttributesView &DeclAttrs,
RecordDecl *&AnonRecord);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
+ const ParsedAttributesView &DeclAttrs,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation,
RecordDecl *&AnonRecord);
@@ -2996,6 +3338,12 @@ public:
RecordDecl *Record,
const PrintingPolicy &Policy);
+ /// Called once it is known whether
+ /// a tag declaration is an anonymous union or struct.
+ void ActOnDefinedDeclarationSpecifier(Decl *D);
+
+ void DiagPlaceholderFieldDeclDefinitions(RecordDecl *Record);
+
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
@@ -3029,22 +3377,34 @@ public:
TUK_Friend // Friend declaration: 'friend struct foo;'
};
- Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
- SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name,
- SourceLocation NameLoc, const ParsedAttributesView &Attr,
- AccessSpecifier AS, SourceLocation ModulePrivateLoc,
- MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl,
- bool &IsDependent, SourceLocation ScopedEnumKWLoc,
- bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
- bool IsTypeSpecifier, bool IsTemplateParamOrArg,
- SkipBodyInfo *SkipBody = nullptr);
-
- Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
- unsigned TagSpec, SourceLocation TagLoc,
- CXXScopeSpec &SS, IdentifierInfo *Name,
- SourceLocation NameLoc,
- const ParsedAttributesView &Attr,
- MultiTemplateParamsArg TempParamLists);
+ enum OffsetOfKind {
+ // Not parsing a type within __builtin_offsetof.
+ OOK_Outside,
+ // Parsing a type within __builtin_offsetof.
+ OOK_Builtin,
+ // Parsing a type within macro "offsetof", defined in __buitin_offsetof
+ // To improve our diagnostic message.
+ OOK_Macro,
+ };
+
+ DeclResult ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
+ SourceLocation KWLoc, CXXScopeSpec &SS,
+ IdentifierInfo *Name, SourceLocation NameLoc,
+ const ParsedAttributesView &Attr, AccessSpecifier AS,
+ SourceLocation ModulePrivateLoc,
+ MultiTemplateParamsArg TemplateParameterLists,
+ bool &OwnedDecl, bool &IsDependent,
+ SourceLocation ScopedEnumKWLoc,
+ bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
+ bool IsTypeSpecifier, bool IsTemplateParamOrArg,
+ OffsetOfKind OOK, SkipBodyInfo *SkipBody = nullptr);
+
+ DeclResult ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
+ unsigned TagSpec, SourceLocation TagLoc,
+ CXXScopeSpec &SS, IdentifierInfo *Name,
+ SourceLocation NameLoc,
+ const ParsedAttributesView &Attr,
+ MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
@@ -3142,9 +3502,8 @@ public:
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
- Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
- Declarator &D, Expr *BitfieldWidth,
- tok::ObjCKeywordKind visibility);
+ Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D,
+ Expr *BitWidth, tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl,
@@ -3159,15 +3518,28 @@ public:
/// Perform ODR-like check for C/ObjC when merging tag types from modules.
/// Differently from C++, actually parse the body and reject / error out
/// in case of a structural mismatch.
- bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
- SkipBodyInfo &SkipBody);
+ bool ActOnDuplicateDefinition(Decl *Prev, SkipBodyInfo &SkipBody);
+
+ /// Check ODR hashes for C/ObjC when merging types from modules.
+ /// Differently from C++, actually parse the body and reject in case
+ /// of a mismatch.
+ template <typename T,
+ typename = std::enable_if_t<std::is_base_of<NamedDecl, T>::value>>
+ bool ActOnDuplicateODRHashDefinition(T *Duplicate, T *Previous) {
+ if (Duplicate->getODRHash() != Previous->getODRHash())
+ return false;
+
+ // Make the previous decl visible.
+ makeMergedDefinitionVisible(Previous);
+ return true;
+ }
typedef void *SkippedDefinitionContext;
/// Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
- Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
+ void ActOnObjCContainerStartDefinition(ObjCContainerDecl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
@@ -3191,8 +3563,8 @@ public:
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
- void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
- void ActOnObjCReenterContainerContext(DeclContext *DC);
+ void ActOnObjCTemporaryExitContainerContext(ObjCContainerDecl *ObjCCtx);
+ void ActOnObjCReenterContainerContext(ObjCContainerDecl *ObjCCtx);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
@@ -3239,12 +3611,14 @@ public:
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
- DeclContext *getFunctionLevelDeclContext();
+ /// If \p AllowLambda is true, treat lambda as function.
+ DeclContext *getFunctionLevelDeclContext(bool AllowLambda = false) const;
- /// getCurFunctionDecl - If inside of a function body, this returns a pointer
- /// to the function decl for the function being parsed. If we're currently
- /// in a 'block', this returns the containing context.
- FunctionDecl *getCurFunctionDecl();
+ /// Returns a pointer to the innermost enclosing function, or nullptr if the
+ /// current context is not inside a function. If \p AllowLambda is true,
+ /// this can return the call operator of an enclosing lambda, otherwise
+ /// lambdas are skipped when looking for an enclosing function.
+ FunctionDecl *getCurFunctionDecl(bool AllowLambda = false) const;
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
@@ -3254,7 +3628,7 @@ public:
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
- NamedDecl *getCurFunctionOrMethodDecl();
+ NamedDecl *getCurFunctionOrMethodDecl() const;
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
@@ -3267,7 +3641,7 @@ public:
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
- bool AllowInlineNamespace = false);
+ bool AllowInlineNamespace = false) const;
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
@@ -3343,6 +3717,8 @@ public:
const AttributeCommonInfo &CI,
bool BestCase,
MSInheritanceModel Model);
+ ErrorAttr *mergeErrorAttr(Decl *D, const AttributeCommonInfo &CI,
+ StringRef NewUserDiagnostic);
FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Format, int FormatIdx,
int FirstArg);
@@ -3368,13 +3744,22 @@ public:
EnforceTCBAttr *mergeEnforceTCBAttr(Decl *D, const EnforceTCBAttr &AL);
EnforceTCBLeafAttr *mergeEnforceTCBLeafAttr(Decl *D,
const EnforceTCBLeafAttr &AL);
+ BTFDeclTagAttr *mergeBTFDeclTagAttr(Decl *D, const BTFDeclTagAttr &AL);
+ HLSLNumThreadsAttr *mergeHLSLNumThreadsAttr(Decl *D,
+ const AttributeCommonInfo &AL,
+ int X, int Y, int Z);
+ HLSLShaderAttr *mergeHLSLShaderAttr(Decl *D, const AttributeCommonInfo &AL,
+ HLSLShaderAttr::ShaderType ShaderType);
+ HLSLParamModifierAttr *
+ mergeHLSLParamModifierAttr(Decl *D, const AttributeCommonInfo &AL,
+ HLSLParamModifierAttr::Spelling Spelling);
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
- bool MergeTypeWithOld);
+ bool MergeTypeWithOld, bool NewDeclIsDefn);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
@@ -3416,10 +3801,30 @@ public:
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
- bool IsForUsingDecl);
- bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl,
- bool ConsiderCudaAttrs = true,
- bool ConsiderRequiresClauses = true);
+ bool UseMemberUsingDeclRules);
+ bool IsOverload(FunctionDecl *New, FunctionDecl *Old,
+ bool UseMemberUsingDeclRules, bool ConsiderCudaAttrs = true);
+
+ // Checks whether MD constitutes an override the base class method BaseMD.
+ // When checking for overrides, the object object members are ignored.
+ bool IsOverride(FunctionDecl *MD, FunctionDecl *BaseMD,
+ bool UseMemberUsingDeclRules, bool ConsiderCudaAttrs = true);
+
+ // Calculates whether the expression Constraint depends on an enclosing
+ // template, for the purposes of [temp.friend] p9.
+ // TemplateDepth is the 'depth' of the friend function, which is used to
+ // compare whether a declaration reference is referring to a containing
+ // template, or just the current friend function. A 'lower' TemplateDepth in
+ // the AST refers to a 'containing' template. As the constraint is
+ // uninstantiated, this is relative to the 'top' of the TU.
+ bool
+ ConstraintExpressionDependsOnEnclosingTemplate(const FunctionDecl *Friend,
+ unsigned TemplateDepth,
+ const Expr *Constraint);
+
+ // Calculates whether the friend function depends on an enclosing template for
+ // the purposes of [temp.friend] p9.
+ bool FriendConstraintsDependOnEnclosingTemplate(const FunctionDecl *FD);
enum class AllowedExplicit {
/// Allow no explicit functions to be used.
@@ -3450,9 +3855,22 @@ public:
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
+
+ bool FunctionParamTypesAreEqual(ArrayRef<QualType> Old,
+ ArrayRef<QualType> New,
+ unsigned *ArgPos = nullptr,
+ bool Reversed = false);
+
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
- unsigned *ArgPos = nullptr);
+ unsigned *ArgPos = nullptr,
+ bool Reversed = false);
+
+ bool FunctionNonObjectParamTypesAreEqual(const FunctionDecl *OldFunction,
+ const FunctionDecl *NewFunction,
+ unsigned *ArgPos = nullptr,
+ bool Reversed = false);
+
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
@@ -3475,7 +3893,7 @@ public:
bool IsFunctionConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
- bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
+ bool isSameOrCompatibleFunctionType(QualType Param, QualType Arg);
bool CanPerformAggregateInitializationForOverloadResolution(
const InitializedEntity &Entity, InitListExpr *From);
@@ -3489,10 +3907,11 @@ public:
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
- ExprResult PerformObjectArgumentInitialization(Expr *From,
- NestedNameSpecifier *Qualifier,
- NamedDecl *FoundDecl,
- CXXMethodDecl *Method);
+ ExprResult InitializeExplicitObjectArgument(Sema &S, Expr *Obj,
+ FunctionDecl *Fun);
+ ExprResult PerformImplicitObjectArgumentInitialization(
+ Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl,
+ CXXMethodDecl *Method);
/// Check that the lifetime of the initializer (and its subobjects) is
/// sufficient for initializing the entity, and perform lifetime extension
@@ -3504,18 +3923,33 @@ public:
/// Contexts in which a converted constant expression is required.
enum CCEKind {
- CCEK_CaseValue, ///< Expression in a case label.
- CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
- CCEK_TemplateArg, ///< Value of a non-type template parameter.
- CCEK_ArrayBound, ///< Array bound in array declarator or new-expression.
- CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier.
- };
+ CCEK_CaseValue, ///< Expression in a case label.
+ CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
+ CCEK_TemplateArg, ///< Value of a non-type template parameter.
+ CCEK_ArrayBound, ///< Array bound in array declarator or new-expression.
+ CCEK_ExplicitBool, ///< Condition in an explicit(bool) specifier.
+ CCEK_Noexcept, ///< Condition in a noexcept(bool) specifier.
+ CCEK_StaticAssertMessageSize, ///< Call to size() in a static assert
+ ///< message.
+ CCEK_StaticAssertMessageData, ///< Call to data() in a static assert
+ ///< message.
+ };
+
+ ExprResult BuildConvertedConstantExpression(Expr *From, QualType T,
+ CCEKind CCE,
+ NamedDecl *Dest = nullptr);
+
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE,
NamedDecl *Dest = nullptr);
+ ExprResult
+ EvaluateConvertedConstantExpression(Expr *E, QualType T, APValue &Value,
+ CCEKind CCE, bool RequireInt,
+ const APValue &PreNarrowingValue);
+
/// Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
@@ -3626,16 +4060,15 @@ public:
using ADLCallKind = CallExpr::ADLCallKind;
- void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl,
- ArrayRef<Expr *> Args,
- OverloadCandidateSet &CandidateSet,
- bool SuppressUserConversions = false,
- bool PartialOverloading = false,
- bool AllowExplicit = true,
- bool AllowExplicitConversion = false,
- ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
- ConversionSequenceList EarlyConversions = None,
- OverloadCandidateParamOrder PO = {});
+ void AddOverloadCandidate(
+ FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args,
+ OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false,
+ bool PartialOverloading = false, bool AllowExplicit = true,
+ bool AllowExplicitConversion = false,
+ ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
+ ConversionSequenceList EarlyConversions = std::nullopt,
+ OverloadCandidateParamOrder PO = {},
+ bool AggregateCandidateDeduction = false);
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
@@ -3650,16 +4083,15 @@ public:
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false,
OverloadCandidateParamOrder PO = {});
- void AddMethodCandidate(CXXMethodDecl *Method,
- DeclAccessPair FoundDecl,
- CXXRecordDecl *ActingContext, QualType ObjectType,
- Expr::Classification ObjectClassification,
- ArrayRef<Expr *> Args,
- OverloadCandidateSet& CandidateSet,
- bool SuppressUserConversions = false,
- bool PartialOverloading = false,
- ConversionSequenceList EarlyConversions = None,
- OverloadCandidateParamOrder PO = {});
+ void
+ AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl,
+ CXXRecordDecl *ActingContext, QualType ObjectType,
+ Expr::Classification ObjectClassification,
+ ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet,
+ bool SuppressUserConversions = false,
+ bool PartialOverloading = false,
+ ConversionSequenceList EarlyConversions = std::nullopt,
+ OverloadCandidateParamOrder PO = {});
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
@@ -3677,7 +4109,8 @@ public:
OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false,
bool PartialOverloading = false, bool AllowExplicit = true,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
- OverloadCandidateParamOrder PO = {});
+ OverloadCandidateParamOrder PO = {},
+ bool AggregateCandidateDeduction = false);
bool CheckNonDependentConversions(
FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes,
ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet,
@@ -3725,7 +4158,7 @@ public:
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(
- NamedDecl *Found, FunctionDecl *Fn,
+ const NamedDecl *Found, const FunctionDecl *Fn,
OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(),
QualType DestType = QualType(), bool TakingAddress = false);
@@ -3795,23 +4228,18 @@ public:
bool resolveAndFixAddressOfSingleOverloadCandidate(
ExprResult &SrcExpr, bool DoFunctionPointerConversion = false);
- FunctionDecl *
- ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
- bool Complain = false,
- DeclAccessPair *Found = nullptr);
+ FunctionDecl *ResolveSingleFunctionTemplateSpecialization(
+ OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr,
+ TemplateSpecCandidateSet *FailedTSC = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
- ExprResult &SrcExpr,
- bool DoFunctionPointerConverion = false,
- bool Complain = false,
- SourceRange OpRangeForComplaining = SourceRange(),
- QualType DestTypeForComplaining = QualType(),
- unsigned DiagIDForComplaining = 0);
+ ExprResult &SrcExpr, bool DoFunctionPointerConversion = false,
+ bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(),
+ QualType DestTypeForComplaining = QualType(),
+ unsigned DiagIDForComplaining = 0);
-
- Expr *FixOverloadedFunctionReference(Expr *E,
- DeclAccessPair FoundDecl,
- FunctionDecl *Fn);
+ ExprResult FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl,
+ FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
@@ -3881,13 +4309,15 @@ public:
FunctionDecl *DefaultedFn);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
- SourceLocation RLoc,
- Expr *Base,Expr *Idx);
+ SourceLocation RLoc, Expr *Base,
+ MultiExprArg Args);
ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
+ Expr *ExecConfig = nullptr,
+ bool IsExecConfig = false,
bool AllowRecovery = false);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
@@ -4006,7 +4436,7 @@ public:
ForExternalRedeclaration
};
- RedeclarationKind forRedeclarationInCurContext() {
+ RedeclarationKind forRedeclarationInCurContext() const {
// A declaration with an owning module for linkage can never link against
// anything that is not visible. We don't need to check linkage here; if
// the context has internal linkage, redeclaration lookup won't find things
@@ -4111,8 +4541,8 @@ public:
= NotForRedeclaration);
bool LookupBuiltin(LookupResult &R);
void LookupNecessaryTypesForBuiltin(Scope *S, unsigned ID);
- bool LookupName(LookupResult &R, Scope *S,
- bool AllowBuiltinCreation = false);
+ bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false,
+ bool ForceNoCPlusPlus = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
@@ -4160,7 +4590,7 @@ public:
TemplateDiscarded, // Discarded due to uninstantiated templates
Unknown,
};
- FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl,
+ FunctionEmissionStatus getEmissionStatus(const FunctionDecl *Decl,
bool Final = false);
// Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check.
@@ -4258,6 +4688,10 @@ public:
bool ConsiderLinkage, bool AllowInlineNamespace);
bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old);
+ bool CheckRedeclarationExported(NamedDecl *New, NamedDecl *Old);
+ bool CheckRedeclarationInModule(NamedDecl *New, NamedDecl *Old);
+ bool IsRedefinitionInModule(const NamedDecl *New,
+ const NamedDecl *Old) const;
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
@@ -4289,8 +4723,38 @@ public:
// Helper for delayed processing of attributes.
void ProcessDeclAttributeDelayed(Decl *D,
const ParsedAttributesView &AttrList);
- void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL,
- bool IncludeCXX11Attributes = true);
+
+ // Options for ProcessDeclAttributeList().
+ struct ProcessDeclAttributeOptions {
+ ProcessDeclAttributeOptions()
+ : IncludeCXX11Attributes(true), IgnoreTypeAttributes(false) {}
+
+ ProcessDeclAttributeOptions WithIncludeCXX11Attributes(bool Val) {
+ ProcessDeclAttributeOptions Result = *this;
+ Result.IncludeCXX11Attributes = Val;
+ return Result;
+ }
+
+ ProcessDeclAttributeOptions WithIgnoreTypeAttributes(bool Val) {
+ ProcessDeclAttributeOptions Result = *this;
+ Result.IgnoreTypeAttributes = Val;
+ return Result;
+ }
+
+ // Should C++11 attributes be processed?
+ bool IncludeCXX11Attributes;
+
+ // Should any type attributes encountered be ignored?
+ // If this option is false, a diagnostic will be emitted for any type
+ // attributes of a kind that does not "slide" from the declaration to
+ // the decl-specifier-seq.
+ bool IgnoreTypeAttributes;
+ };
+
+ void ProcessDeclAttributeList(Scope *S, Decl *D,
+ const ParsedAttributesView &AttrList,
+ const ProcessDeclAttributeOptions &Options =
+ ProcessDeclAttributeOptions());
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const ParsedAttributesView &AttrList);
@@ -4300,8 +4764,10 @@ public:
/// such as checking whether a parameter was properly specified, or the
/// correct number of arguments were passed, etc. Returns true if the
/// attribute has been diagnosed.
- bool checkCommonAttributeFeatures(const Decl *D, const ParsedAttr &A);
- bool checkCommonAttributeFeatures(const Stmt *S, const ParsedAttr &A);
+ bool checkCommonAttributeFeatures(const Decl *D, const ParsedAttr &A,
+ bool SkipArgCountCheck = false);
+ bool checkCommonAttributeFeatures(const Stmt *S, const ParsedAttr &A,
+ bool SkipArgCountCheck = false);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
@@ -4310,27 +4776,49 @@ public:
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value);
+
+ /// Check validaty of calling convention attribute \p attr. If \p FD
+ /// is not null pointer, use \p FD to determine the CUDA/HIP host/device
+ /// target. Otherwise, it is specified by \p CFT.
bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC,
- const FunctionDecl *FD = nullptr);
+ const FunctionDecl *FD = nullptr,
+ CUDAFunctionTarget CFT = CFT_InvalidTarget);
bool CheckAttrTarget(const ParsedAttr &CurrAttr);
bool CheckAttrNoArgs(const ParsedAttr &CurrAttr);
+ bool checkStringLiteralArgumentAttr(const AttributeCommonInfo &CI,
+ const Expr *E, StringRef &Str,
+ SourceLocation *ArgLocation = nullptr);
bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum,
StringRef &Str,
SourceLocation *ArgLocation = nullptr);
llvm::Error isValidSectionSpecifier(StringRef Str);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
+ bool checkTargetVersionAttr(SourceLocation LiteralLoc, StringRef &Str,
+ bool &isDefault);
+ bool
+ checkTargetClonesAttrString(SourceLocation LiteralLoc, StringRef Str,
+ const StringLiteral *Literal, bool &HasDefault,
+ bool &HasCommas, bool &HasNotDefault,
+ SmallVectorImpl<SmallString<64>> &StringsBuffer);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceModel SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
+ bool CheckNoInlineAttr(const Stmt *OrigSt, const Stmt *CurSt,
+ const AttributeCommonInfo &A);
+ bool CheckAlwaysInlineAttr(const Stmt *OrigSt, const Stmt *CurSt,
+ const AttributeCommonInfo &A);
+
+ bool CheckCountedByAttr(Scope *Scope, const FieldDecl *FD);
+
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
- void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
- SourceLocation Loc);
+ void adjustMemberFunctionCC(QualType &T, bool HasThisPointer,
+ bool IsCtorOrDtor, SourceLocation Loc);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
@@ -4341,10 +4829,32 @@ public:
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
+ /// Check whether a nullability type specifier can be added to the given
+ /// type through some means not written in source (e.g. API notes).
+ ///
+ /// \param Type The type to which the nullability specifier will be
+ /// added. On success, this type will be updated appropriately.
+ ///
+ /// \param Nullability The nullability specifier to add.
+ ///
+ /// \param DiagLoc The location to use for diagnostics.
+ ///
+ /// \param AllowArrayTypes Whether to accept nullability specifiers on an
+ /// array type (e.g., because it will decay to a pointer).
+ ///
+ /// \param OverrideExisting Whether to override an existing, locally-specified
+ /// nullability specifier rather than complaining about the conflict.
+ ///
+ /// \returns true if nullability cannot be applied, false otherwise.
+ bool CheckImplicitNullabilityTypeSpecifier(QualType &Type,
+ NullabilityKind Nullability,
+ SourceLocation DiagLoc,
+ bool AllowArrayTypes,
+ bool OverrideExisting);
+
/// Process the attributes before creating an attributed statement. Returns
/// the semantic attributes that have been processed.
- void ProcessStmtAttributes(Stmt *Stmt,
- const ParsedAttributesWithRange &InAttrs,
+ void ProcessStmtAttributes(Stmt *Stmt, const ParsedAttributes &InAttrs,
SmallVectorImpl<const Attr *> &OutAttrs);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
@@ -4686,15 +5196,16 @@ public:
StmtResult BuildAttributedStmt(SourceLocation AttrsLoc,
ArrayRef<const Attr *> Attrs, Stmt *SubStmt);
- StmtResult ActOnAttributedStmt(const ParsedAttributesWithRange &AttrList,
+ StmtResult ActOnAttributedStmt(const ParsedAttributes &AttrList,
Stmt *SubStmt);
class ConditionResult;
- StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr,
+
+ StmtResult ActOnIfStmt(SourceLocation IfLoc, IfStatementKind StatementKind,
SourceLocation LParenLoc, Stmt *InitStmt,
ConditionResult Cond, SourceLocation RParenLoc,
Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal);
- StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
+ StmtResult BuildIfStmt(SourceLocation IfLoc, IfStatementKind StatementKind,
SourceLocation LParenLoc, Stmt *InitStmt,
ConditionResult Cond, SourceLocation RParenLoc,
Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal);
@@ -4799,7 +5310,8 @@ public:
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
- StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
+ StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
+ bool AllowRecovery = false);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
NamedReturnInfo &NRInfo,
bool SupressSimplerImplicitMoves);
@@ -4891,15 +5403,21 @@ public:
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
+ typedef llvm::function_ref<void(SourceLocation Loc, PartialDiagnostic PD)>
+ DiagReceiverTy;
+
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
- void DiagnoseUnusedExprResult(const Stmt *S);
+ void DiagnoseUnusedExprResult(const Stmt *S, unsigned DiagID);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
+ void DiagnoseUnusedNestedTypedefs(const RecordDecl *D,
+ DiagReceiverTy DiagReceiver);
void DiagnoseUnusedDecl(const NamedDecl *ND);
+ void DiagnoseUnusedDecl(const NamedDecl *ND, DiagReceiverTy DiagReceiver);
/// If VD is set but not otherwise used, diagnose, for a parameter or a
/// variable.
- void DiagnoseUnusedButSetDecl(const VarDecl *VD);
+ void DiagnoseUnusedButSetDecl(const VarDecl *VD, DiagReceiverTy DiagReceiver);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
@@ -4920,6 +5438,11 @@ public:
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
+ /// Returns a field in a CXXRecordDecl that has the same name as the decl \p
+ /// SelfAssigned when inside a CXXMethodDecl.
+ const FieldDecl *
+ getSelfAssignmentClassMemberCandidate(const ValueDecl *SelfAssigned);
+
/// Warn if we're implicitly casting from a _Nullable pointer type to a
/// _Nonnull one.
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
@@ -4963,17 +5486,28 @@ public:
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid);
+ // A version of DiagnoseUseOfDecl that should be used if overload resolution
+ // has been used to find this declaration, which means we don't have to bother
+ // checking the trailing requires clause.
+ bool DiagnoseUseOfOverloadedDecl(NamedDecl *D, SourceLocation Loc) {
+ return DiagnoseUseOfDecl(
+ D, Loc, /*UnknownObjCClass=*/nullptr, /*ObjCPropertyAccess=*/false,
+ /*AvoidPartialAvailabilityChecks=*/false, /*ClassReceiver=*/nullptr,
+ /*SkipTrailingRequiresClause=*/true);
+ }
+
bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass = nullptr,
bool ObjCPropertyAccess = false,
bool AvoidPartialAvailabilityChecks = false,
- ObjCInterfaceDecl *ClassReciever = nullptr);
+ ObjCInterfaceDecl *ClassReciever = nullptr,
+ bool SkipTrailingRequiresClause = false);
void NoteDeletedFunction(FunctionDecl *FD);
void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
- void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
+ void DiagnoseSentinelCalls(const NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(
@@ -4990,6 +5524,7 @@ public:
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
+ TypeSourceInfo *TransformToPotentiallyEvaluated(TypeSourceInfo *TInfo);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult CheckUnevaluatedOperand(Expr *E);
@@ -5016,7 +5551,7 @@ public:
void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr);
void MarkMemberReferenced(MemberExpr *E);
void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E);
- void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc,
+ void MarkCaptureUsedInEnclosingContext(ValueDecl *Capture, SourceLocation Loc,
unsigned CapturingScopeIndex);
ExprResult CheckLValueToRValueConversionOperand(Expr *E);
@@ -5059,30 +5594,31 @@ public:
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
- bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
- SourceLocation EllipsisLoc, bool BuildAndDiagnose,
- QualType &CaptureType,
+ bool tryCaptureVariable(ValueDecl *Var, SourceLocation Loc,
+ TryCaptureKind Kind, SourceLocation EllipsisLoc,
+ bool BuildAndDiagnose, QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// Try to capture the given variable.
- bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
+ bool tryCaptureVariable(ValueDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// Checks if the variable must be captured.
- bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
+ bool NeedToCaptureVariable(ValueDecl *Var, SourceLocation Loc);
/// Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
- QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
+ QualType getCapturedDeclRefType(ValueDecl *Var, SourceLocation Loc);
/// Mark all of the declarations referenced within a particular AST node as
/// referenced. Used when template instantiation instantiates a non-dependent
/// type -- entities referenced by the type are now referenced.
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
- void MarkDeclarationsReferencedInExpr(Expr *E,
- bool SkipLocalVariables = false);
+ void MarkDeclarationsReferencedInExpr(
+ Expr *E, bool SkipLocalVariables = false,
+ ArrayRef<const Expr *> StopAt = std::nullopt);
/// Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
@@ -5099,6 +5635,16 @@ public:
/// conversion.
ExprResult tryConvertExprToType(Expr *E, QualType Ty);
+ /// Conditionally issue a diagnostic based on the statements's reachability
+ /// analysis.
+ ///
+ /// \param Stmts If Stmts is non-empty, delay reporting the diagnostic until
+ /// the function body is parsed, and then do a basic reachability analysis to
+ /// determine if the statement is reachable. If it is unreachable, the
+ /// diagnostic will not be emitted.
+ bool DiagIfReachable(SourceLocation Loc, ArrayRef<const Stmt *> Stmts,
+ const PartialDiagnostic &PD);
+
/// Conditionally issue a diagnostic based on the current
/// evaluation context.
///
@@ -5127,13 +5673,15 @@ public:
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
- bool DiagnoseDependentMemberLookup(LookupResult &R);
+ bool DiagnoseDependentMemberLookup(const LookupResult &R);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
CorrectionCandidateCallback &CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
- ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
+ ArrayRef<Expr *> Args = std::nullopt,
+ DeclContext *LookupCtx = nullptr,
+ TypoExpr **Out = nullptr);
DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S,
IdentifierInfo *II);
@@ -5221,8 +5769,12 @@ public:
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
- ExprResult BuildPredefinedExpr(SourceLocation Loc,
- PredefinedExpr::IdentKind IK);
+ // ExpandFunctionLocalPredefinedMacros - Returns a new vector of Tokens,
+ // where Tokens representing function local predefined macros (such as
+ // __FUNCTION__) are replaced (expanded) with string-literal Tokens.
+ std::vector<Token> ExpandFunctionLocalPredefinedMacros(ArrayRef<Token> Toks);
+
+ ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedIdentKind IK);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
@@ -5250,30 +5802,54 @@ public:
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
+ ExprResult ActOnUnevaluatedStringLiteral(ArrayRef<Token> StringToks);
+
+ /// ControllingExprOrType is either an opaque pointer coming out of a
+ /// ParsedType or an Expr *. FIXME: it'd be better to split this interface
+ /// into two so we don't take a void *, but that's awkward because one of
+ /// the operands is either a ParsedType or an Expr *, which doesn't lend
+ /// itself to generic code very well.
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
- Expr *ControllingExpr,
+ bool PredicateIsExpr,
+ void *ControllingExprOrType,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
+ /// ControllingExprOrType is either a TypeSourceInfo * or an Expr *. FIXME:
+ /// it'd be better to split this interface into two so we don't take a
+ /// void *, but see the FIXME on ActOnGenericSelectionExpr as to why that
+ /// isn't a trivial change.
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
- Expr *ControllingExpr,
+ bool PredicateIsExpr,
+ void *ControllingExprOrType,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
- Expr *InputExpr);
- ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
- UnaryOperatorKind Opc, Expr *Input);
- ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
- tok::TokenKind Op, Expr *Input);
+ Expr *InputExpr, bool IsAfterAmp = false);
+ ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc,
+ Expr *Input, bool IsAfterAmp = false);
+ ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op,
+ Expr *Input, bool IsAfterAmp = false);
bool isQualifiedMemberAccess(Expr *E);
+ bool CheckUseOfCXXMethodAsAddressOfOperand(SourceLocation OpLoc,
+ const Expr *Op,
+ const CXXMethodDecl *MD);
+
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
+ bool CheckTypeTraitArity(unsigned Arity, SourceLocation Loc, size_t N);
+
+ bool ActOnAlignasTypeArgument(StringRef KWName, ParsedType Ty,
+ SourceLocation OpLoc, SourceRange R);
+ bool CheckAlignasTypeArgument(StringRef KWName, TypeSourceInfo *TInfo,
+ SourceLocation OpLoc, SourceRange R);
+
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
@@ -5292,7 +5868,8 @@ public:
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
- UnaryExprOrTypeTrait ExprKind);
+ UnaryExprOrTypeTrait ExprKind,
+ StringRef KWName);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
@@ -5302,7 +5879,8 @@ public:
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
- Expr *Idx, SourceLocation RLoc);
+ MultiExprArg ArgExprs,
+ SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
@@ -5556,14 +6134,14 @@ public:
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
- // __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(),
- // __builtin_COLUMN()
- ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind,
+ // __builtin_LINE(), __builtin_FUNCTION(), __builtin_FUNCSIG(),
+ // __builtin_FILE(), __builtin_COLUMN(), __builtin_source_location()
+ ExprResult ActOnSourceLocExpr(SourceLocIdentKind Kind,
SourceLocation BuiltinLoc,
SourceLocation RPLoc);
// Build a potentially resolved SourceLocExpr.
- ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind,
+ ExprResult BuildSourceLocExpr(SourceLocIdentKind Kind, QualType ResultTy,
SourceLocation BuiltinLoc, SourceLocation RPLoc,
DeclContext *ParentContext);
@@ -5644,6 +6222,12 @@ public:
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
+ //===---------------------------- HLSL Features -------------------------===//
+ Decl *ActOnStartHLSLBuffer(Scope *BufferScope, bool CBuffer,
+ SourceLocation KwLoc, IdentifierInfo *Ident,
+ SourceLocation IdentLoc, SourceLocation LBrace);
+ void ActOnFinishHLSLBuffer(Decl *Dcl, SourceLocation RBrace);
+
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
@@ -5652,17 +6236,18 @@ public:
SourceLocation IdentLoc, IdentifierInfo *Ident,
SourceLocation LBrace,
const ParsedAttributesView &AttrList,
- UsingDirectiveDecl *&UsingDecl);
+ UsingDirectiveDecl *&UsingDecl, bool IsNested);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
- NamespaceDecl *lookupStdExperimentalNamespace();
-
CXXRecordDecl *getStdBadAlloc() const;
EnumDecl *getStdAlignValT() const;
+ ValueDecl *tryLookupUnambiguousFieldDecl(RecordDecl *ClassDecl,
+ const IdentifierInfo *MemberOrBase);
+
private:
// A cache representing if we've fully checked the various comparison category
// types stored in ASTContext. The bit-index corresponds to the integer value
@@ -5755,7 +6340,8 @@ public:
NamedDecl *BuildUsingEnumDeclaration(Scope *S, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation EnumLoc,
- SourceLocation NameLoc, EnumDecl *ED);
+ SourceLocation NameLoc,
+ TypeSourceInfo *EnumType, EnumDecl *ED);
NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
ArrayRef<NamedDecl *> Expansions);
@@ -5775,7 +6361,9 @@ public:
const ParsedAttributesView &AttrList);
Decl *ActOnUsingEnumDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
- SourceLocation EnumLoc, const DeclSpec &);
+ SourceLocation EnumLoc,
+ SourceLocation IdentLoc, IdentifierInfo &II,
+ CXXScopeSpec *SS = nullptr);
Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc, UnqualifiedId &Name,
@@ -5786,36 +6374,33 @@ public:
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
- ExprResult
- BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
- NamedDecl *FoundDecl,
- CXXConstructorDecl *Constructor, MultiExprArg Exprs,
- bool HadMultipleCandidates, bool IsListInitialization,
- bool IsStdInitListInitialization,
- bool RequiresZeroInit, unsigned ConstructKind,
- SourceRange ParenRange);
+ ExprResult BuildCXXConstructExpr(
+ SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl,
+ CXXConstructorDecl *Constructor, MultiExprArg Exprs,
+ bool HadMultipleCandidates, bool IsListInitialization,
+ bool IsStdInitListInitialization, bool RequiresZeroInit,
+ CXXConstructionKind ConstructKind, SourceRange ParenRange);
/// Build a CXXConstructExpr whose constructor has already been resolved if
/// it denotes an inherited constructor.
- ExprResult
- BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
- CXXConstructorDecl *Constructor, bool Elidable,
- MultiExprArg Exprs,
- bool HadMultipleCandidates, bool IsListInitialization,
- bool IsStdInitListInitialization,
- bool RequiresZeroInit, unsigned ConstructKind,
- SourceRange ParenRange);
+ ExprResult BuildCXXConstructExpr(
+ SourceLocation ConstructLoc, QualType DeclInitType,
+ CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs,
+ bool HadMultipleCandidates, bool IsListInitialization,
+ bool IsStdInitListInitialization, bool RequiresZeroInit,
+ CXXConstructionKind ConstructKind, SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
- ExprResult
- BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
- NamedDecl *FoundDecl,
- CXXConstructorDecl *Constructor, bool Elidable,
- MultiExprArg Exprs, bool HadMultipleCandidates,
- bool IsListInitialization,
- bool IsStdInitListInitialization, bool RequiresZeroInit,
- unsigned ConstructKind, SourceRange ParenRange);
+ ExprResult BuildCXXConstructExpr(
+ SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl,
+ CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs,
+ bool HadMultipleCandidates, bool IsListInitialization,
+ bool IsStdInitListInitialization, bool RequiresZeroInit,
+ CXXConstructionKind ConstructKind, SourceRange ParenRange);
+
+ ExprResult ConvertMemberDefaultInitExpression(FieldDecl *FD, Expr *InitExpr,
+ SourceLocation InitLoc);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
@@ -5823,13 +6408,13 @@ public:
/// Instantiate or parse a C++ default argument expression as necessary.
/// Return true on error.
bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
- ParmVarDecl *Param);
+ ParmVarDecl *Param, Expr *Init = nullptr,
+ bool SkipImmediateInvocations = true);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
- ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
- FunctionDecl *FD,
- ParmVarDecl *Param);
+ ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
+ ParmVarDecl *Param, Expr *Init = nullptr);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
@@ -5909,7 +6494,7 @@ public:
/// Check the given noexcept-specifier, convert its expression, and compute
/// the appropriate ExceptionSpecificationType.
- ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr,
+ ExprResult ActOnNoexceptSpec(Expr *NoexceptExpr,
ExceptionSpecificationType &EST);
/// Check the given exception-specification and update the
@@ -6072,6 +6657,13 @@ public:
/// invocation.
ExprResult CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl);
+ bool CheckImmediateEscalatingFunctionDefinition(
+ FunctionDecl *FD, const sema::FunctionScopeInfo *FSI);
+
+ void MarkExpressionAsImmediateEscalating(Expr *E);
+
+ void DiagnoseImmediateEscalatingReason(FunctionDecl *FD);
+
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
QualType DeclInitType, MultiExprArg ArgsPtr,
SourceLocation Loc,
@@ -6086,11 +6678,9 @@ public:
ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
- ParsedType getDestructorName(SourceLocation TildeLoc,
- IdentifierInfo &II, SourceLocation NameLoc,
+ ParsedType getDestructorName(IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
- ParsedType ObjectType,
- bool EnteringContext);
+ ParsedType ObjectType, bool EnteringContext);
ParsedType getDestructorTypeForDecltype(const DeclSpec &DS,
ParsedType ObjectType);
@@ -6105,6 +6695,13 @@ public:
// AltiVecPixel and AltiVecBool when -faltivec-src-compat=xl is specified.
bool ShouldSplatAltivecScalarInCast(const VectorType *VecTy);
+ // Checks if the -faltivec-src-compat=gcc option is specified.
+ // If so, AltiVecVector, AltiVecBool and AltiVecPixel types are
+ // treated the same way as they are when trying to initialize
+ // these vectors on gcc (an error is emitted).
+ bool CheckAltivecInitFromScalar(SourceRange R, QualType VecTy,
+ QualType SrcTy);
+
/// ActOnCXXNamedCast - Parse
/// {dynamic,static,reinterpret,const,addrspace}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
@@ -6170,7 +6767,7 @@ public:
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc,
- Optional<unsigned> NumExpansions);
+ std::optional<unsigned> NumExpansions);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
@@ -6277,16 +6874,12 @@ public:
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
- ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
- SourceLocation PlacementLParen,
- MultiExprArg PlacementArgs,
- SourceLocation PlacementRParen,
- SourceRange TypeIdParens,
- QualType AllocType,
- TypeSourceInfo *AllocTypeInfo,
- Optional<Expr *> ArraySize,
- SourceRange DirectInitRange,
- Expr *Initializer);
+ ExprResult
+ BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen,
+ MultiExprArg PlacementArgs, SourceLocation PlacementRParen,
+ SourceRange TypeIdParens, QualType AllocType,
+ TypeSourceInfo *AllocTypeInfo, std::optional<Expr *> ArraySize,
+ SourceRange DirectInitRange, Expr *Initializer);
/// Determine whether \p FD is an aligned allocation or deallocation
/// function that is unavailable.
@@ -6327,8 +6920,9 @@ public:
ArrayRef<QualType> Params);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
- DeclarationName Name, FunctionDecl* &Operator,
- bool Diagnose = true);
+ DeclarationName Name, FunctionDecl *&Operator,
+ bool Diagnose = true, bool WantSize = false,
+ bool WantAligned = false);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
bool Overaligned,
@@ -6431,7 +7025,8 @@ public:
Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue);
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
- bool DiscardedValue, bool IsConstexpr = false);
+ bool DiscardedValue, bool IsConstexpr = false,
+ bool IsTemplateArgument = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
@@ -6503,9 +7098,6 @@ public:
}
};
- bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
- NestedNameSpecInfo &IdInfo);
-
bool BuildCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
@@ -6530,9 +7122,6 @@ public:
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
- /// \param ErrorRecoveryLookup If true, then this method is called to improve
- /// error recovery. In this case do not emit error message.
- ///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
@@ -6544,7 +7133,6 @@ public:
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
- bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
@@ -6558,6 +7146,8 @@ public:
NestedNameSpecInfo &IdInfo,
bool EnteringContext);
+ bool IsInvalidSMECallConversion(QualType FromType, QualType ToType);
+
/// The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
@@ -6647,33 +7237,37 @@ public:
/// Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
- bool KnownDependent,
+ unsigned LambdaDependencyKind,
LambdaCaptureDefault CaptureDefault);
- /// Start the definition of a lambda expression.
- CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
- SourceRange IntroducerRange,
- TypeSourceInfo *MethodType,
- SourceLocation EndLoc,
- ArrayRef<ParmVarDecl *> Params,
- ConstexprSpecKind ConstexprKind,
- Expr *TrailingRequiresClause);
-
/// Number lambda for linkage purposes if necessary.
- void handleLambdaNumbering(
- CXXRecordDecl *Class, CXXMethodDecl *Method,
- Optional<std::tuple<bool, unsigned, unsigned, Decl *>> Mangling = None);
+ void handleLambdaNumbering(CXXRecordDecl *Class, CXXMethodDecl *Method,
+ std::optional<CXXRecordDecl::LambdaNumbering>
+ NumberingOverride = std::nullopt);
/// Endow the lambda scope info with the relevant properties.
- void buildLambdaScope(sema::LambdaScopeInfo *LSI,
- CXXMethodDecl *CallOperator,
+ void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
- SourceLocation CaptureDefaultLoc,
- bool ExplicitParams,
- bool ExplicitResultType,
+ SourceLocation CaptureDefaultLoc, bool ExplicitParams,
bool Mutable);
+ CXXMethodDecl *CreateLambdaCallOperator(SourceRange IntroducerRange,
+ CXXRecordDecl *Class);
+
+ void AddTemplateParametersToLambdaCallOperator(
+ CXXMethodDecl *CallOperator, CXXRecordDecl *Class,
+ TemplateParameterList *TemplateParams);
+
+ void CompleteLambdaCallOperator(
+ CXXMethodDecl *Method, SourceLocation LambdaLoc,
+ SourceLocation CallOperatorLoc, Expr *TrailingRequiresClause,
+ TypeSourceInfo *MethodTyInfo, ConstexprSpecKind ConstexprKind,
+ StorageClass SC, ArrayRef<ParmVarDecl *> Params,
+ bool HasExplicitResultType);
+
+ void DiagnoseInvalidExplicitObjectParameterInLambda(CXXMethodDecl *Method);
+
/// Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
@@ -6681,54 +7275,62 @@ public:
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) {
return ParsedType::make(buildLambdaInitCaptureInitialization(
- Loc, ByRef, EllipsisLoc, None, Id,
+ Loc, ByRef, EllipsisLoc, std::nullopt, Id,
InitKind != LambdaCaptureInitKind::CopyInit, Init));
}
QualType buildLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
- Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit,
- Expr *&Init);
+ std::optional<unsigned> NumExpansions, IdentifierInfo *Id,
+ bool DirectInit, Expr *&Init);
/// Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
- VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
- QualType InitCaptureType,
- SourceLocation EllipsisLoc,
- IdentifierInfo *Id,
- unsigned InitStyle, Expr *Init);
+ VarDecl *createLambdaInitCaptureVarDecl(
+ SourceLocation Loc, QualType InitCaptureType, SourceLocation EllipsisLoc,
+ IdentifierInfo *Id, unsigned InitStyle, Expr *Init, DeclContext *DeclCtx);
/// Add an init-capture to a lambda scope.
- void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var);
+ void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var, bool ByRef);
/// Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
- /// \brief This is called after parsing the explicit template parameter list
+ /// Deduce a block or lambda's return type based on the return
+ /// statements present in the body.
+ void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
+
+ /// Once the Lambdas capture are known, we can start to create the closure,
+ /// call operator method, and keep track of the captures.
+ /// We do the capture lookup here, but they are not actually captured until
+ /// after we know what the qualifiers of the call operator are.
+ void ActOnLambdaExpressionAfterIntroducer(LambdaIntroducer &Intro,
+ Scope *CurContext);
+
+ /// This is called after parsing the explicit template parameter list
/// on a lambda (if it exists) in C++2a.
- void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc,
+ void ActOnLambdaExplicitTemplateParameterList(LambdaIntroducer &Intro,
+ SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> TParams,
SourceLocation RAngleLoc,
ExprResult RequiresClause);
- /// Introduce the lambda parameters into scope.
- void addLambdaParameters(
- ArrayRef<LambdaIntroducer::LambdaCapture> Captures,
- CXXMethodDecl *CallOperator, Scope *CurScope);
+ void ActOnLambdaClosureQualifiers(LambdaIntroducer &Intro,
+ SourceLocation MutableLoc);
- /// Deduce a block or lambda's return type based on the return
- /// statements present in the body.
- void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
+ void ActOnLambdaClosureParameters(
+ Scope *LambdaScope,
+ MutableArrayRef<DeclaratorChunk::ParamInfo> ParamInfo);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
- Declarator &ParamInfo, Scope *CurScope);
+ Declarator &ParamInfo, const DeclSpec &DS);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
@@ -6737,8 +7339,7 @@ public:
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
- ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
- Scope *CurScope);
+ ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body);
/// Does copying/destroying the captured variable have side effects?
bool CaptureHasSideEffects(const sema::Capture &From);
@@ -6792,6 +7393,17 @@ public:
CXXConversionDecl *Conv,
Expr *Src);
+ sema::LambdaScopeInfo *RebuildLambdaScopeInfo(CXXMethodDecl *CallOperator);
+
+ class LambdaScopeForCallOperatorInstantiationRAII
+ : private FunctionScopeRAII {
+ public:
+ LambdaScopeForCallOperatorInstantiationRAII(
+ Sema &SemasRef, FunctionDecl *FD, MultiLevelTemplateArgumentList MLTAL,
+ LocalInstantiationScope &Scope,
+ bool ShouldAddDeclsFromParentScope = true);
+ };
+
/// Check whether the given expression is a valid constraint expression.
/// A diagnostic is emitted if it is not, false is returned, and
/// PossibleNonPrimary will be set to true if the failure might be due to a
@@ -6815,7 +7427,88 @@ private:
llvm::ContextualFoldingSet<ConstraintSatisfaction, const ASTContext &>
SatisfactionCache;
+ /// Introduce the instantiated local variables into the local
+ /// instantiation scope.
+ void addInstantiatedLocalVarsToScope(FunctionDecl *Function,
+ const FunctionDecl *PatternDecl,
+ LocalInstantiationScope &Scope);
+ /// Introduce the instantiated function parameters into the local
+ /// instantiation scope, and set the parameter names to those used
+ /// in the template.
+ bool addInstantiatedParametersToScope(
+ FunctionDecl *Function, const FunctionDecl *PatternDecl,
+ LocalInstantiationScope &Scope,
+ const MultiLevelTemplateArgumentList &TemplateArgs);
+
+ /// Introduce the instantiated captures of the lambda into the local
+ /// instantiation scope.
+ bool addInstantiatedCapturesToScope(
+ FunctionDecl *Function, const FunctionDecl *PatternDecl,
+ LocalInstantiationScope &Scope,
+ const MultiLevelTemplateArgumentList &TemplateArgs);
+
+ /// used by SetupConstraintCheckingTemplateArgumentsAndScope to recursively(in
+ /// the case of lambdas) set up the LocalInstantiationScope of the current
+ /// function.
+ bool SetupConstraintScope(
+ FunctionDecl *FD, std::optional<ArrayRef<TemplateArgument>> TemplateArgs,
+ MultiLevelTemplateArgumentList MLTAL, LocalInstantiationScope &Scope);
+
+ /// Used during constraint checking, sets up the constraint template argument
+ /// lists, and calls SetupConstraintScope to set up the
+ /// LocalInstantiationScope to have the proper set of ParVarDecls configured.
+ std::optional<MultiLevelTemplateArgumentList>
+ SetupConstraintCheckingTemplateArgumentsAndScope(
+ FunctionDecl *FD, std::optional<ArrayRef<TemplateArgument>> TemplateArgs,
+ LocalInstantiationScope &Scope);
+
+private:
+ // The current stack of constraint satisfactions, so we can exit-early.
+ using SatisfactionStackEntryTy =
+ std::pair<const NamedDecl *, llvm::FoldingSetNodeID>;
+ llvm::SmallVector<SatisfactionStackEntryTy, 10>
+ SatisfactionStack;
+
public:
+ void PushSatisfactionStackEntry(const NamedDecl *D,
+ const llvm::FoldingSetNodeID &ID) {
+ const NamedDecl *Can = cast<NamedDecl>(D->getCanonicalDecl());
+ SatisfactionStack.emplace_back(Can, ID);
+ }
+
+ void PopSatisfactionStackEntry() { SatisfactionStack.pop_back(); }
+
+ bool SatisfactionStackContains(const NamedDecl *D,
+ const llvm::FoldingSetNodeID &ID) const {
+ const NamedDecl *Can = cast<NamedDecl>(D->getCanonicalDecl());
+ return llvm::find(SatisfactionStack,
+ SatisfactionStackEntryTy{Can, ID}) !=
+ SatisfactionStack.end();
+ }
+
+ // Resets the current SatisfactionStack for cases where we are instantiating
+ // constraints as a 'side effect' of normal instantiation in a way that is not
+ // indicative of recursive definition.
+ class SatisfactionStackResetRAII {
+ llvm::SmallVector<SatisfactionStackEntryTy, 10>
+ BackupSatisfactionStack;
+ Sema &SemaRef;
+
+ public:
+ SatisfactionStackResetRAII(Sema &S) : SemaRef(S) {
+ SemaRef.SwapSatisfactionStack(BackupSatisfactionStack);
+ }
+
+ ~SatisfactionStackResetRAII() {
+ SemaRef.SwapSatisfactionStack(BackupSatisfactionStack);
+ }
+ };
+
+ void SwapSatisfactionStack(
+ llvm::SmallVectorImpl<SatisfactionStackEntryTy> &NewSS) {
+ SatisfactionStack.swap(NewSS);
+ }
+
const NormalizedConstraint *
getNormalizedAssociatedConstraints(
NamedDecl *ConstrainedDecl, ArrayRef<const Expr *> AssociatedConstraints);
@@ -6828,8 +7521,8 @@ public:
/// at least constrained than D2, and false otherwise.
///
/// \returns true if an error occurred, false otherwise.
- bool IsAtLeastAsConstrained(NamedDecl *D1, ArrayRef<const Expr *> AC1,
- NamedDecl *D2, ArrayRef<const Expr *> AC2,
+ bool IsAtLeastAsConstrained(NamedDecl *D1, MutableArrayRef<const Expr *> AC1,
+ NamedDecl *D2, MutableArrayRef<const Expr *> AC2,
bool &Result);
/// If D1 was not at least as constrained as D2, but would've been if a pair
@@ -6845,8 +7538,8 @@ public:
/// check (either a concept or a constrained entity).
/// \param ConstraintExprs a list of constraint expressions, treated as if
/// they were 'AND'ed together.
- /// \param TemplateArgs the list of template arguments to substitute into the
- /// constraint expression.
+ /// \param TemplateArgLists the list of template arguments to substitute into
+ /// the constraint expression.
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
/// \param Satisfaction if true is returned, will contain details of the
@@ -6856,13 +7549,46 @@ public:
/// false otherwise.
bool CheckConstraintSatisfaction(
const NamedDecl *Template, ArrayRef<const Expr *> ConstraintExprs,
- ArrayRef<TemplateArgument> TemplateArgs,
+ const MultiLevelTemplateArgumentList &TemplateArgLists,
+ SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction) {
+ llvm::SmallVector<Expr *, 4> Converted;
+ return CheckConstraintSatisfaction(Template, ConstraintExprs, Converted,
+ TemplateArgLists, TemplateIDRange,
+ Satisfaction);
+ }
+
+ /// \brief Check whether the given list of constraint expressions are
+ /// satisfied (as if in a 'conjunction') given template arguments.
+ /// Additionally, takes an empty list of Expressions which is populated with
+ /// the instantiated versions of the ConstraintExprs.
+ /// \param Template the template-like entity that triggered the constraints
+ /// check (either a concept or a constrained entity).
+ /// \param ConstraintExprs a list of constraint expressions, treated as if
+ /// they were 'AND'ed together.
+ /// \param ConvertedConstraints a out parameter that will get populated with
+ /// the instantiated version of the ConstraintExprs if we successfully checked
+ /// satisfaction.
+ /// \param TemplateArgList the multi-level list of template arguments to
+ /// substitute into the constraint expression. This should be relative to the
+ /// top-level (hence multi-level), since we need to instantiate fully at the
+ /// time of checking.
+ /// \param TemplateIDRange The source range of the template id that
+ /// caused the constraints check.
+ /// \param Satisfaction if true is returned, will contain details of the
+ /// satisfaction, with enough information to diagnose an unsatisfied
+ /// expression.
+ /// \returns true if an error occurred and satisfaction could not be checked,
+ /// false otherwise.
+ bool CheckConstraintSatisfaction(
+ const NamedDecl *Template, ArrayRef<const Expr *> ConstraintExprs,
+ llvm::SmallVectorImpl<Expr *> &ConvertedConstraints,
+ const MultiLevelTemplateArgumentList &TemplateArgList,
SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction);
/// \brief Check whether the given non-dependent constraint expression is
/// satisfied. Returns false and updates Satisfaction with the satisfaction
/// verdict if successful, emits a diagnostic and returns true if an error
- /// occured and satisfaction could not be determined.
+ /// occurred and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckConstraintSatisfaction(const Expr *ConstraintExpr,
@@ -6871,13 +7597,13 @@ public:
/// Check whether the given function decl's trailing requires clause is
/// satisfied, if any. Returns false and updates Satisfaction with the
/// satisfaction verdict if successful, emits a diagnostic and returns true if
- /// an error occured and satisfaction could not be determined.
+ /// an error occurred and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckFunctionConstraints(const FunctionDecl *FD,
ConstraintSatisfaction &Satisfaction,
- SourceLocation UsageLoc = SourceLocation());
-
+ SourceLocation UsageLoc = SourceLocation(),
+ bool ForOverloadResolution = false);
/// \brief Ensure that the given template arguments satisfy the constraints
/// associated with the given template, emitting a diagnostic if they do not.
@@ -6892,9 +7618,10 @@ public:
///
/// \returns true if the constrains are not satisfied or could not be checked
/// for satisfaction, false if the constraints are satisfied.
- bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template,
- ArrayRef<TemplateArgument> TemplateArgs,
- SourceRange TemplateIDRange);
+ bool EnsureTemplateArgumentListConstraints(
+ TemplateDecl *Template,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ SourceRange TemplateIDRange);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
@@ -7052,8 +7779,9 @@ public:
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
- bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
- ArrayRef<CXXCtorInitializer *> Initializers = None);
+ bool SetCtorInitializers(
+ CXXConstructorDecl *Constructor, bool AnyErrors,
+ ArrayRef<CXXCtorInitializer *> Initializers = std::nullopt);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
@@ -7178,15 +7906,17 @@ public:
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
+ bool EvaluateStaticAssertMessageAsString(Expr *Message, std::string &Result,
+ ASTContext &Ctx,
+ bool ErrorOnInvalidMessage);
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
- Expr *AssertExpr,
- StringLiteral *AssertMessageExpr,
- SourceLocation RParenLoc,
- bool Failed);
+ Expr *AssertExpr, Expr *AssertMessageExpr,
+ SourceLocation RParenLoc, bool Failed);
+ void DiagnoseStaticAssertDetails(const Expr *E);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
@@ -7205,14 +7935,15 @@ public:
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
- void CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
+ bool CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
StorageClass &SC);
void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD);
void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD);
bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
- CXXSpecialMember CSM);
+ CXXSpecialMember CSM,
+ SourceLocation DefaultLoc);
void CheckDelayedMemberExceptionSpecs();
bool CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *MD,
@@ -7222,6 +7953,13 @@ public:
void DefineDefaultedComparison(SourceLocation Loc, FunctionDecl *FD,
DefaultedComparisonKind DCK);
+ void CheckExplicitObjectMemberFunction(Declarator &D, DeclarationName Name,
+ QualType R, bool IsLambda,
+ DeclContext *DC = nullptr);
+ void CheckExplicitObjectMemberFunction(DeclContext *DC, Declarator &D,
+ DeclarationName Name, QualType R);
+ void CheckExplicitObjectLambda(Declarator &D);
+
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
@@ -7233,11 +7971,9 @@ public:
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
- BaseResult ActOnBaseSpecifier(Decl *classdecl,
- SourceRange SpecifierRange,
- ParsedAttributes &Attrs,
- bool Virtual, AccessSpecifier Access,
- ParsedType basetype,
+ BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange,
+ const ParsedAttributesView &Attrs, bool Virtual,
+ AccessSpecifier Access, ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
@@ -7275,6 +8011,10 @@ public:
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
+ // Check that the overriding method has no explicit object parameter.
+ bool CheckExplicitObjectOverride(CXXMethodDecl *New,
+ const CXXMethodDecl *Old);
+
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
@@ -7342,10 +8082,16 @@ public:
CheckStructuredBindingMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *DecomposedClass,
DeclAccessPair Field);
+ AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr,
+ const SourceRange &,
+ DeclAccessPair FoundDecl);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
+ AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr,
+ ArrayRef<Expr *> ArgExprs,
+ DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
@@ -7439,17 +8185,17 @@ public:
RequiredTemplateKind(SourceLocation TemplateKWLoc = SourceLocation())
: TemplateKW(TemplateKWLoc) {}
/// Template name is unconditionally required.
- RequiredTemplateKind(TemplateNameIsRequiredTag) : TemplateKW() {}
+ RequiredTemplateKind(TemplateNameIsRequiredTag) {}
SourceLocation getTemplateKeywordLoc() const {
- return TemplateKW.getValueOr(SourceLocation());
+ return TemplateKW.value_or(SourceLocation());
}
bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); }
bool isRequired() const { return TemplateKW != SourceLocation(); }
explicit operator bool() const { return isRequired(); }
private:
- llvm::Optional<SourceLocation> TemplateKW;
+ std::optional<SourceLocation> TemplateKW;
};
enum class AssumedTemplateKind {
@@ -7494,7 +8240,7 @@ public:
/// Determine whether a particular identifier might be the name in a C++1z
/// deduction-guide declaration.
bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
- SourceLocation NameLoc,
+ SourceLocation NameLoc, CXXScopeSpec &SS,
ParsedTemplateTy *Template = nullptr);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
@@ -7524,6 +8270,8 @@ public:
SourceLocation EqualLoc,
ParsedType DefaultArg, bool HasTypeConstraint);
+ bool CheckTypeConstraint(TemplateIdAnnotation *TypeConstraint);
+
bool ActOnTypeConstraint(const CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint,
TemplateTypeParmDecl *ConstrainedParameter,
@@ -7542,7 +8290,8 @@ public:
SourceLocation EllipsisLoc);
bool AttachTypeConstraint(AutoTypeLoc TL,
- NonTypeTemplateParmDecl *ConstrainedParameter,
+ NonTypeTemplateParmDecl *NewConstrainedParm,
+ NonTypeTemplateParmDecl *OrigConstrainedParm,
SourceLocation EllipsisLoc);
bool RequireStructuralType(QualType T, SourceLocation Loc);
@@ -7634,7 +8383,9 @@ public:
TemplateTy Template, IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc, SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc,
- bool IsCtorOrDtorName = false, bool IsClassName = false);
+ bool IsCtorOrDtorName = false, bool IsClassName = false,
+ ImplicitTypenameContext AllowImplicitTypename =
+ ImplicitTypenameContext::No);
/// Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
@@ -7724,9 +8475,9 @@ public:
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
- bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
- const TemplateArgumentListInfo &ExplicitTemplateArgs,
- LookupResult &Previous);
+ bool CheckDependentFunctionTemplateSpecialization(
+ FunctionDecl *FD, const TemplateArgumentListInfo *ExplicitTemplateArgs,
+ LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(
FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs,
@@ -7753,14 +8504,13 @@ public:
SourceLocation TemplateLoc,
Declarator &D);
- TemplateArgumentLoc
- SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
- SourceLocation TemplateLoc,
- SourceLocation RAngleLoc,
- Decl *Param,
- SmallVectorImpl<TemplateArgument>
- &Converted,
- bool &HasDefaultArg);
+ TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(
+ TemplateDecl *Template, SourceLocation TemplateLoc,
+ SourceLocation RAngleLoc, Decl *Param,
+ ArrayRef<TemplateArgument> SugaredConverted,
+ ArrayRef<TemplateArgument> CanonicalConverted, bool &HasDefaultArg);
+
+ SourceLocation getTopMostPointOfInstantiation(const NamedDecl *) const;
/// Specifies the context in which a particular template
/// argument is being checked.
@@ -7778,16 +8528,15 @@ public:
CTAK_DeducedFromArrayBound
};
- bool CheckTemplateArgument(NamedDecl *Param,
- TemplateArgumentLoc &Arg,
- NamedDecl *Template,
- SourceLocation TemplateLoc,
- SourceLocation RAngleLoc,
- unsigned ArgumentPackIndex,
- SmallVectorImpl<TemplateArgument> &Converted,
- CheckTemplateArgumentKind CTAK = CTAK_Specified);
-
- /// Check that the given template arguments can be be provided to
+ bool
+ CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg,
+ NamedDecl *Template, SourceLocation TemplateLoc,
+ SourceLocation RAngleLoc, unsigned ArgumentPackIndex,
+ SmallVectorImpl<TemplateArgument> &SugaredConverted,
+ SmallVectorImpl<TemplateArgument> &CanonicalConverted,
+ CheckTemplateArgumentKind CTAK);
+
+ /// Check that the given template arguments can be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
@@ -7811,39 +8560,45 @@ public:
/// contain the converted forms of the template arguments as written.
/// Otherwise, \p TemplateArgs will not be modified.
///
- /// \param ConstraintsNotSatisfied If provided, and an error occured, will
+ /// \param ConstraintsNotSatisfied If provided, and an error occurred, will
/// receive true if the cause for the error is the associated constraints of
/// the template not being satisfied by the template arguments.
///
/// \returns true if an error occurred, false otherwise.
- bool CheckTemplateArgumentList(TemplateDecl *Template,
- SourceLocation TemplateLoc,
- TemplateArgumentListInfo &TemplateArgs,
- bool PartialTemplateArgs,
- SmallVectorImpl<TemplateArgument> &Converted,
- bool UpdateArgsWithConversions = true,
- bool *ConstraintsNotSatisfied = nullptr);
-
- bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
- TemplateArgumentLoc &Arg,
- SmallVectorImpl<TemplateArgument> &Converted);
+ bool CheckTemplateArgumentList(
+ TemplateDecl *Template, SourceLocation TemplateLoc,
+ TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs,
+ SmallVectorImpl<TemplateArgument> &SugaredConverted,
+ SmallVectorImpl<TemplateArgument> &CanonicalConverted,
+ bool UpdateArgsWithConversions = true,
+ bool *ConstraintsNotSatisfied = nullptr);
+
+ bool CheckTemplateTypeArgument(
+ TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg,
+ SmallVectorImpl<TemplateArgument> &SugaredConverted,
+ SmallVectorImpl<TemplateArgument> &CanonicalConverted);
bool CheckTemplateArgument(TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
- TemplateArgument &Converted,
- CheckTemplateArgumentKind CTAK = CTAK_Specified);
+ TemplateArgument &SugaredConverted,
+ TemplateArgument &CanonicalConverted,
+ CheckTemplateArgumentKind CTAK);
bool CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param,
TemplateParameterList *Params,
TemplateArgumentLoc &Arg);
+ void NoteTemplateLocation(const NamedDecl &Decl,
+ std::optional<SourceRange> ParamRange = {});
+ void NoteTemplateParameterLocation(const NamedDecl &Decl);
+
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
- BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
- SourceLocation Loc);
+ BuildExpressionFromNonTypeTemplateArgument(const TemplateArgument &Arg,
+ SourceLocation Loc);
/// Enumeration describing how template parameter lists are compared
/// for equality.
@@ -7876,15 +8631,83 @@ public:
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
- TPL_TemplateTemplateArgumentMatch
+ TPL_TemplateTemplateArgumentMatch,
+
+ /// We are determining whether the template-parameters are equivalent
+ /// according to C++ [temp.over.link]/6. This comparison does not consider
+ /// constraints.
+ ///
+ /// \code
+ /// template<C1 T> void f(T);
+ /// template<C2 T> void f(T);
+ /// \endcode
+ TPL_TemplateParamsEquivalent,
+ };
+
+ // A struct to represent the 'new' declaration, which is either itself just
+ // the named decl, or the important information we need about it in order to
+ // do constraint comparisons.
+ class TemplateCompareNewDeclInfo {
+ const NamedDecl *ND = nullptr;
+ const DeclContext *DC = nullptr;
+ const DeclContext *LexicalDC = nullptr;
+ SourceLocation Loc;
+
+ public:
+ TemplateCompareNewDeclInfo(const NamedDecl *ND) : ND(ND) {}
+ TemplateCompareNewDeclInfo(const DeclContext *DeclCtx,
+ const DeclContext *LexicalDeclCtx,
+ SourceLocation Loc)
+
+ : DC(DeclCtx), LexicalDC(LexicalDeclCtx), Loc(Loc) {
+ assert(DC && LexicalDC &&
+ "Constructor only for cases where we have the information to put "
+ "in here");
+ }
+
+ // If this was constructed with no information, we cannot do substitution
+ // for constraint comparison, so make sure we can check that.
+ bool isInvalid() const { return !ND && !DC; }
+
+ const NamedDecl *getDecl() const { return ND; }
+
+ bool ContainsDecl(const NamedDecl *ND) const { return this->ND == ND; }
+
+ const DeclContext *getLexicalDeclContext() const {
+ return ND ? ND->getLexicalDeclContext() : LexicalDC;
+ }
+
+ const DeclContext *getDeclContext() const {
+ return ND ? ND->getDeclContext() : DC;
+ }
+
+ SourceLocation getLocation() const { return ND ? ND->getLocation() : Loc; }
};
- bool TemplateParameterListsAreEqual(TemplateParameterList *New,
- TemplateParameterList *Old,
- bool Complain,
- TemplateParameterListEqualKind Kind,
- SourceLocation TemplateArgLoc
- = SourceLocation());
+ bool TemplateParameterListsAreEqual(
+ const TemplateCompareNewDeclInfo &NewInstFrom, TemplateParameterList *New,
+ const NamedDecl *OldInstFrom, TemplateParameterList *Old, bool Complain,
+ TemplateParameterListEqualKind Kind,
+ SourceLocation TemplateArgLoc = SourceLocation());
+
+ bool TemplateParameterListsAreEqual(
+ TemplateParameterList *New, TemplateParameterList *Old, bool Complain,
+ TemplateParameterListEqualKind Kind,
+ SourceLocation TemplateArgLoc = SourceLocation()) {
+ return TemplateParameterListsAreEqual(nullptr, New, nullptr, Old, Complain,
+ Kind, TemplateArgLoc);
+ }
+
+ // Calculates whether two constraint expressions are equal irrespective of a
+ // difference in 'depth'. This takes a pair of optional 'NamedDecl's 'Old' and
+ // 'New', which are the "source" of the constraint, since this is necessary
+ // for figuring out the relative 'depth' of the constraint. The depth of the
+ // 'primary template' and the 'instantiated from' templates aren't necessarily
+ // the same, such as a case when one is a 'friend' defined in a class.
+ bool AreConstraintExpressionsEqual(const NamedDecl *Old,
+ const Expr *OldConstr,
+ const TemplateCompareNewDeclInfo &New,
+ const Expr *NewConstr);
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
@@ -7896,10 +8719,11 @@ public:
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
- TypeResult
- ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
- const CXXScopeSpec &SS, const IdentifierInfo &II,
- SourceLocation IdLoc);
+ /// \param IsImplicitTypename context where T::type refers to a type.
+ TypeResult ActOnTypenameType(
+ Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS,
+ const IdentifierInfo &II, SourceLocation IdLoc,
+ ImplicitTypenameContext IsImplicitTypename = ImplicitTypenameContext::No);
/// Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
@@ -7967,6 +8791,9 @@ public:
Scope *S, MultiTemplateParamsArg TemplateParameterLists,
IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr);
+ void CheckConceptRedefinition(ConceptDecl *NewDecl, LookupResult &Previous,
+ bool &AddToScope);
+
RequiresExprBodyDecl *
ActOnStartRequiresExpr(SourceLocation RequiresKWLoc,
ArrayRef<ParmVarDecl *> LocalParameters,
@@ -7998,11 +8825,13 @@ public:
concepts::Requirement::SubstitutionDiagnostic *SubstDiag);
concepts::NestedRequirement *BuildNestedRequirement(Expr *E);
concepts::NestedRequirement *
- BuildNestedRequirement(
- concepts::Requirement::SubstitutionDiagnostic *SubstDiag);
+ BuildNestedRequirement(StringRef InvalidConstraintEntity,
+ const ASTConstraintSatisfaction &Satisfaction);
ExprResult ActOnRequiresExpr(SourceLocation RequiresKWLoc,
RequiresExprBodyDecl *Body,
+ SourceLocation LParenLoc,
ArrayRef<ParmVarDecl *> LocalParameters,
+ SourceLocation RParenLoc,
ArrayRef<concepts::Requirement *> Requirements,
SourceLocation ClosingBraceLoc);
@@ -8065,6 +8894,9 @@ public:
/// The type of an exception.
UPPC_ExceptionType,
+ /// Explicit specialization.
+ UPPC_ExplicitSpecialization,
+
/// Partial specialization.
UPPC_PartialSpecialization,
@@ -8249,14 +9081,13 @@ public:
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
- Optional<unsigned> NumExpansions);
+ std::optional<unsigned> NumExpansions);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
- QualType CheckPackExpansion(QualType Pattern,
- SourceRange PatternRange,
+ QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange,
SourceLocation EllipsisLoc,
- Optional<unsigned> NumExpansions);
+ std::optional<unsigned> NumExpansions);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
@@ -8275,7 +9106,7 @@ public:
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
- Optional<unsigned> NumExpansions);
+ std::optional<unsigned> NumExpansions);
/// Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
@@ -8311,13 +9142,11 @@ public:
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
- bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
- SourceRange PatternRange,
- ArrayRef<UnexpandedParameterPack> Unexpanded,
- const MultiLevelTemplateArgumentList &TemplateArgs,
- bool &ShouldExpand,
- bool &RetainExpansion,
- Optional<unsigned> &NumExpansions);
+ bool CheckParameterPacksForExpansion(
+ SourceLocation EllipsisLoc, SourceRange PatternRange,
+ ArrayRef<UnexpandedParameterPack> Unexpanded,
+ const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand,
+ bool &RetainExpansion, std::optional<unsigned> &NumExpansions);
/// Determine the number of arguments in the given pack expansion
/// type.
@@ -8326,8 +9155,8 @@ public:
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
- Optional<unsigned> getNumArgumentsInExpansion(QualType T,
- const MultiLevelTemplateArgumentList &TemplateArgs);
+ std::optional<unsigned> getNumArgumentsInExpansion(
+ QualType T, const MultiLevelTemplateArgumentList &TemplateArgs);
/// Determine whether the given declarator contains any unexpanded
/// parameter packs.
@@ -8355,9 +9184,8 @@ public:
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
- TemplateArgumentLoc OrigLoc,
- SourceLocation &Ellipsis,
- Optional<unsigned> &NumExpansions) const;
+ TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis,
+ std::optional<unsigned> &NumExpansions) const;
/// Given a template argument that contains an unexpanded parameter pack, but
/// which has already been substituted, attempt to determine the number of
@@ -8365,7 +9193,7 @@ public:
///
/// This is intended for use when transforming 'sizeof...(Arg)' in order to
/// avoid actually expanding the pack where possible.
- Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg);
+ std::optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg);
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
@@ -8439,7 +9267,9 @@ public:
/// Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure,
/// CUDA Target attributes do not match.
- TDK_CUDATargetMismatch
+ TDK_CUDATargetMismatch,
+ /// Some error which was already diagnosed.
+ TDK_AlreadyDiagnosed
};
TemplateDeductionResult
@@ -8487,7 +9317,8 @@ public:
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info,
- bool PartialOverloading,
+ bool PartialOverloading, bool AggregateDeductionCandidate,
+ QualType ObjectType, Expr::Classification ObjectClassification,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent);
TemplateDeductionResult
@@ -8498,11 +9329,10 @@ public:
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
- TemplateDeductionResult
- DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
- QualType ToType,
- CXXConversionDecl *&Specialization,
- sema::TemplateDeductionInfo &Info);
+ TemplateDeductionResult DeduceTemplateArguments(
+ FunctionTemplateDecl *FunctionTemplate, QualType ObjectType,
+ Expr::Classification ObjectClassification, QualType ToType,
+ CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
@@ -8516,35 +9346,42 @@ public:
/// Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
+
+ // Substitute auto in TypeWithAuto for a Dependent auto type
+ QualType SubstAutoTypeDependent(QualType TypeWithAuto);
+
+ // Substitute auto in TypeWithAuto for a Dependent auto type
+ TypeSourceInfo *
+ SubstAutoTypeSourceInfoDependent(TypeSourceInfo *TypeWithAuto);
+
/// Completely replace the \c auto in \p TypeWithAuto by
/// \p Replacement. This does not retain any \c auto type sugar.
QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement);
TypeSourceInfo *ReplaceAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
- /// Result type of DeduceAutoType.
- enum DeduceAutoResult {
- DAR_Succeeded,
- DAR_Failed,
- DAR_FailedAlreadyDiagnosed
- };
-
- DeduceAutoResult
- DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result,
- Optional<unsigned> DependentDeductionDepth = None,
- bool IgnoreConstraints = false);
- DeduceAutoResult
- DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result,
- Optional<unsigned> DependentDeductionDepth = None,
- bool IgnoreConstraints = false);
+ TemplateDeductionResult
+ DeduceAutoType(TypeLoc AutoTypeLoc, Expr *Initializer, QualType &Result,
+ sema::TemplateDeductionInfo &Info,
+ bool DependentDeduction = false,
+ bool IgnoreConstraints = false,
+ TemplateSpecCandidateSet *FailedTSC = nullptr);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
+ bool CheckIfFunctionSpecializationIsImmediate(FunctionDecl *FD,
+ SourceLocation Loc);
+
/// Declare implicit deduction guides for a class template if we've
/// not already done so.
void DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc);
+ FunctionTemplateDecl *DeclareImplicitDeductionGuideFromInitList(
+ TemplateDecl *Template, MutableArrayRef<QualType> ParamTypes,
+ SourceLocation Loc);
+ llvm::DenseMap<unsigned, CXXDeductionGuideDecl *>
+ AggregateDeductionCandidates;
QualType DeduceTemplateSpecializationFromInitializer(
TypeSourceInfo *TInfo, const InitializedEntity &Entity,
@@ -8558,8 +9395,8 @@ public:
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
- SourceLocation ReturnLoc,
- Expr *&RetExpr, AutoType *AT);
+ SourceLocation ReturnLoc, Expr *RetExpr,
+ const AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(
FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc,
@@ -8613,11 +9450,12 @@ public:
// C++ Template Instantiation
//
- MultiLevelTemplateArgumentList
- getTemplateInstantiationArgs(NamedDecl *D,
- const TemplateArgumentList *Innermost = nullptr,
- bool RelativeToPrimary = false,
- const FunctionDecl *Pattern = nullptr);
+ MultiLevelTemplateArgumentList getTemplateInstantiationArgs(
+ const NamedDecl *D, const DeclContext *DC = nullptr, bool Final = false,
+ const TemplateArgumentList *Innermost = nullptr,
+ bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr,
+ bool ForConstraintInstantiation = false,
+ bool SkipForSpecialization = false);
/// A context in which code is being synthesized (where a source location
/// alone is not sufficient to identify the context). This covers template
@@ -8652,6 +9490,9 @@ public:
/// a TemplateDecl.
DeducedTemplateArgumentSubstitution,
+ /// We are substituting into a lambda expression.
+ LambdaExpressionSubstitution,
+
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
@@ -8699,6 +9540,9 @@ public:
// We are normalizing a constraint expression.
ConstraintNormalization,
+ // Instantiating a Requires Expression parameter clause.
+ RequirementParameterInstantiation,
+
// We are substituting into the parameter mapping of an atomic constraint
// during normalization.
ParameterMappingSubstitution,
@@ -8712,11 +9556,18 @@ public:
/// We are marking a class as __dllexport.
MarkingClassDllexported,
+ /// We are building an implied call from __builtin_dump_struct. The
+ /// arguments are in CallArgs.
+ BuildingBuiltinDumpStructCall,
+
/// Added for Template instantiation observation.
/// Memoization means we are _not_ instantiating a template because
/// it is already instantiated (but we entered a context where we
/// would have had to if it was not already instantiated).
- Memoization
+ Memoization,
+
+ /// We are building deduction guides for a class.
+ BuildingDeductionGuides,
} Kind;
/// Was the enclosing context a non-instantiation SFINAE context?
@@ -8733,9 +9584,14 @@ public:
/// arguments.
NamedDecl *Template;
- /// The list of template arguments we are substituting, if they
- /// are not part of the entity.
- const TemplateArgument *TemplateArgs;
+ union {
+ /// The list of template arguments we are substituting, if they
+ /// are not part of the entity.
+ const TemplateArgument *TemplateArgs;
+
+ /// The list of argument expressions in a synthesized call.
+ const Expr *const *CallArgs;
+ };
// FIXME: Wrap this union around more members, or perhaps store the
// kind-specific members in the RAII object owning the context.
@@ -8743,6 +9599,9 @@ public:
/// The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
+ /// The number of expressions in CallArgs.
+ unsigned NumCallArgs;
+
/// The special member being declared or defined.
CXXSpecialMember SpecialMember;
};
@@ -9016,6 +9875,18 @@ public:
concepts::NestedRequirement *Req, ConstraintsCheck,
SourceRange InstantiationRange = SourceRange());
+ /// \brief Note that we are checking a requires clause.
+ InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
+ const RequiresExpr *E,
+ sema::TemplateDeductionInfo &DeductionInfo,
+ SourceRange InstantiationRange);
+
+ struct BuildingDeductionGuidesTag {};
+ /// \brief Note that we are building deduction guides.
+ InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
+ TemplateDecl *Entity, BuildingDeductionGuidesTag,
+ SourceRange InstantiationRange = SourceRange());
+
/// Note that we have finished instantiating this template.
void Clear();
@@ -9040,7 +9911,7 @@ public:
Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
- ArrayRef<TemplateArgument> TemplateArgs = None,
+ ArrayRef<TemplateArgument> TemplateArgs = std::nullopt,
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
@@ -9078,14 +9949,81 @@ public:
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
- Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
+ std::optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
+
+ /// Whether the AST is currently being rebuilt to correct immediate
+ /// invocations. Immediate invocation candidates and references to consteval
+ /// functions aren't tracked when this is set.
+ bool RebuildingImmediateInvocation = false;
+
+ /// Used to change context to isConstantEvaluated without pushing a heavy
+ /// ExpressionEvaluationContextRecord object.
+ bool isConstantEvaluatedOverride = false;
+
+ const ExpressionEvaluationContextRecord &currentEvaluationContext() const {
+ assert(!ExprEvalContexts.empty() &&
+ "Must be in an expression evaluation context");
+ return ExprEvalContexts.back();
+ };
+
+ bool isConstantEvaluatedContext() const {
+ return currentEvaluationContext().isConstantEvaluated() ||
+ isConstantEvaluatedOverride;
+ }
+
+ bool isAlwaysConstantEvaluatedContext() const {
+ const ExpressionEvaluationContextRecord &Ctx = currentEvaluationContext();
+ return (Ctx.isConstantEvaluated() || isConstantEvaluatedOverride) &&
+ !Ctx.InConditionallyConstantEvaluateContext;
+ }
/// Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
+ return currentEvaluationContext().isUnevaluated();
+ }
+
+ bool isImmediateFunctionContext() const {
+ return currentEvaluationContext().isImmediateFunctionContext();
+ }
+
+ bool isCheckingDefaultArgumentOrInitializer() const {
+ const ExpressionEvaluationContextRecord &Ctx = currentEvaluationContext();
+ return (Ctx.Context ==
+ ExpressionEvaluationContext::PotentiallyEvaluatedIfUsed) ||
+ Ctx.IsCurrentlyCheckingDefaultArgumentOrInitializer;
+ }
+
+ std::optional<ExpressionEvaluationContextRecord::InitializationContext>
+ InnermostDeclarationWithDelayedImmediateInvocations() const {
+ assert(!ExprEvalContexts.empty() &&
+ "Must be in an expression evaluation context");
+ for (const auto &Ctx : llvm::reverse(ExprEvalContexts)) {
+ if (Ctx.Context == ExpressionEvaluationContext::PotentiallyEvaluated &&
+ Ctx.DelayedDefaultInitializationContext)
+ return Ctx.DelayedDefaultInitializationContext;
+ if (Ctx.isConstantEvaluated() || Ctx.isImmediateFunctionContext() ||
+ Ctx.isUnevaluated())
+ break;
+ }
+ return std::nullopt;
+ }
+
+ std::optional<ExpressionEvaluationContextRecord::InitializationContext>
+ OutermostDeclarationWithDelayedImmediateInvocations() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
- return ExprEvalContexts.back().isUnevaluated();
+ std::optional<ExpressionEvaluationContextRecord::InitializationContext> Res;
+ for (auto &Ctx : llvm::reverse(ExprEvalContexts)) {
+ if (Ctx.Context == ExpressionEvaluationContext::PotentiallyEvaluated &&
+ !Ctx.DelayedDefaultInitializationContext && Res)
+ break;
+ if (Ctx.isConstantEvaluated() || Ctx.isImmediateFunctionContext() ||
+ Ctx.isUnevaluated())
+ break;
+ Res = Ctx.DelayedDefaultInitializationContext;
+ }
+ return Res;
}
/// RAII class used to determine whether SFINAE has
@@ -9188,14 +10126,21 @@ public:
/// eagerly.
SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations;
+ SmallVector<SmallVector<VTableUse, 16>, 8> SavedVTableUses;
+ SmallVector<std::deque<PendingImplicitInstantiation>, 8>
+ SavedPendingInstantiations;
+
class GlobalEagerInstantiationScope {
public:
GlobalEagerInstantiationScope(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
- SavedPendingInstantiations.swap(S.PendingInstantiations);
- SavedVTableUses.swap(S.VTableUses);
+ S.SavedPendingInstantiations.emplace_back();
+ S.SavedPendingInstantiations.back().swap(S.PendingInstantiations);
+
+ S.SavedVTableUses.emplace_back();
+ S.SavedVTableUses.back().swap(S.VTableUses);
}
void perform() {
@@ -9211,26 +10156,28 @@ public:
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
- S.VTableUses.swap(SavedVTableUses);
+ S.VTableUses.swap(S.SavedVTableUses.back());
+ S.SavedVTableUses.pop_back();
// Restore the set of pending implicit instantiations.
if (S.TUKind != TU_Prefix || !S.LangOpts.PCHInstantiateTemplates) {
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
- S.PendingInstantiations.swap(SavedPendingInstantiations);
+ S.PendingInstantiations.swap(S.SavedPendingInstantiations.back());
+ S.SavedPendingInstantiations.pop_back();
} else {
// Template instantiations in the PCH may be delayed until the TU.
- S.PendingInstantiations.swap(SavedPendingInstantiations);
- S.PendingInstantiations.insert(S.PendingInstantiations.end(),
- SavedPendingInstantiations.begin(),
- SavedPendingInstantiations.end());
+ S.PendingInstantiations.swap(S.SavedPendingInstantiations.back());
+ S.PendingInstantiations.insert(
+ S.PendingInstantiations.end(),
+ S.SavedPendingInstantiations.back().begin(),
+ S.SavedPendingInstantiations.back().end());
+ S.SavedPendingInstantiations.pop_back();
}
}
private:
Sema &S;
- SmallVector<VTableUse, 16> SavedVTableUses;
- std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
@@ -9307,32 +10254,54 @@ public:
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
- TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
- const MultiLevelTemplateArgumentList &TemplateArgs,
- SourceLocation Loc,
- DeclarationName Entity,
- CXXRecordDecl *ThisContext,
- Qualifiers ThisTypeQuals);
+ TypeSourceInfo *SubstFunctionDeclType(
+ TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs,
+ SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext,
+ Qualifiers ThisTypeQuals, bool EvaluateConstraints = true);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
bool SubstExceptionSpec(SourceLocation Loc,
FunctionProtoType::ExceptionSpecInfo &ESI,
SmallVectorImpl<QualType> &ExceptionStorage,
const MultiLevelTemplateArgumentList &Args);
- ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
- const MultiLevelTemplateArgumentList &TemplateArgs,
- int indexAdjustment,
- Optional<unsigned> NumExpansions,
- bool ExpectParameterPack);
+ ParmVarDecl *
+ SubstParmVarDecl(ParmVarDecl *D,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ int indexAdjustment, std::optional<unsigned> NumExpansions,
+ bool ExpectParameterPack, bool EvaluateConstraints = true);
bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params,
const FunctionProtoType::ExtParameterInfo *ExtParamInfos,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams,
ExtParameterInfoBuilder &ParamInfos);
+ bool SubstDefaultArgument(SourceLocation Loc, ParmVarDecl *Param,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ bool ForCallExpr = false);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
+ // A RAII type used by the TemplateDeclInstantiator and TemplateInstantiator
+ // to disable constraint evaluation, then restore the state.
+ template <typename InstTy> struct ConstraintEvalRAII {
+ InstTy &TI;
+ bool OldValue;
+
+ ConstraintEvalRAII(InstTy &TI)
+ : TI(TI), OldValue(TI.getEvaluateConstraints()) {
+ TI.setEvaluateConstraints(false);
+ }
+ ~ConstraintEvalRAII() { TI.setEvaluateConstraints(OldValue); }
+ };
+
+ // Must be used instead of SubstExpr at 'constraint checking' time.
+ ExprResult
+ SubstConstraintExpr(Expr *E,
+ const MultiLevelTemplateArgumentList &TemplateArgs);
+ // Unlike the above, this does not evaluates constraints.
+ ExprResult SubstConstraintExprWithoutSatisfaction(
+ Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs);
+
/// Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
@@ -9355,14 +10324,14 @@ public:
TemplateParameterList *
SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner,
- const MultiLevelTemplateArgumentList &TemplateArgs);
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ bool EvaluateConstraints = true);
bool
SubstTemplateArguments(ArrayRef<TemplateArgumentLoc> Args,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateArgumentListInfo &Outputs);
-
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
@@ -9412,6 +10381,7 @@ public:
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
+ void updateAttrsForLateParsedTemplate(const Decl *Pattern, Decl *Inst);
void
InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs,
@@ -9451,9 +10421,10 @@ public:
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
- bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
- TemplateArgumentListInfo &Result,
- const MultiLevelTemplateArgumentList &TemplateArgs);
+
+ bool SubstTypeConstraint(TemplateTypeParmDecl *Inst, const TypeConstraint *TC,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ bool EvaluateConstraint);
bool InstantiateDefaultArgument(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
@@ -9503,6 +10474,9 @@ public:
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
+ ExplicitSpecifier instantiateExplicitSpecifier(
+ const MultiLevelTemplateArgumentList &TemplateArgs, ExplicitSpecifier ES);
+
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool FindingInstantiatedContext = false);
@@ -9535,14 +10509,14 @@ public:
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
- Decl *ActOnStartClassInterface(
+ ObjCInterfaceDecl *ActOnStartClassInterface(
Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName, SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
- const ParsedAttributesView &AttrList);
+ const ParsedAttributesView &AttrList, SkipBodyInfo *SkipBody);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
@@ -9569,13 +10543,14 @@ public:
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
- Decl *ActOnStartProtocolInterface(
+ ObjCProtocolDecl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName,
SourceLocation ProtocolLoc, Decl *const *ProtoRefNames,
unsigned NumProtoRefs, const SourceLocation *ProtoLocs,
- SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList);
+ SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList,
+ SkipBodyInfo *SkipBody);
- Decl *ActOnStartCategoryInterface(
+ ObjCCategoryDecl *ActOnStartCategoryInterface(
SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName, SourceLocation CategoryLoc,
@@ -9583,19 +10558,15 @@ public:
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
- Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc,
- IdentifierInfo *ClassName,
- SourceLocation ClassLoc,
- IdentifierInfo *SuperClassname,
- SourceLocation SuperClassLoc,
- const ParsedAttributesView &AttrList);
+ ObjCImplementationDecl *ActOnStartClassImplementation(
+ SourceLocation AtClassImplLoc, IdentifierInfo *ClassName,
+ SourceLocation ClassLoc, IdentifierInfo *SuperClassname,
+ SourceLocation SuperClassLoc, const ParsedAttributesView &AttrList);
- Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
- IdentifierInfo *ClassName,
- SourceLocation ClassLoc,
- IdentifierInfo *CatName,
- SourceLocation CatLoc,
- const ParsedAttributesView &AttrList);
+ ObjCCategoryImplDecl *ActOnStartCategoryImplementation(
+ SourceLocation AtCatImplLoc, IdentifierInfo *ClassName,
+ SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc,
+ const ParsedAttributesView &AttrList);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
@@ -9669,16 +10640,12 @@ public:
bool FailOnError = false);
/// Build an Objective-C object pointer type.
- QualType BuildObjCObjectType(QualType BaseType,
- SourceLocation Loc,
- SourceLocation TypeArgsLAngleLoc,
- ArrayRef<TypeSourceInfo *> TypeArgs,
- SourceLocation TypeArgsRAngleLoc,
- SourceLocation ProtocolLAngleLoc,
- ArrayRef<ObjCProtocolDecl *> Protocols,
- ArrayRef<SourceLocation> ProtocolLocs,
- SourceLocation ProtocolRAngleLoc,
- bool FailOnError = false);
+ QualType BuildObjCObjectType(
+ QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc,
+ ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc,
+ SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols,
+ ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc,
+ bool FailOnError, bool Rebuilding);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
@@ -9703,8 +10670,8 @@ public:
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
- ArrayRef<Decl *> allMethods = None,
- ArrayRef<DeclGroupPtrTy> allTUVars = None);
+ ArrayRef<Decl *> allMethods = std::nullopt,
+ ArrayRef<DeclGroupPtrTy> allTUVars = std::nullopt);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
@@ -9950,6 +10917,12 @@ public:
SourceLocation IncludeLoc);
void DiagnoseUnterminatedPragmaAlignPack();
+ /// ActOnPragmaMSStrictGuardStackCheck - Called on well formed \#pragma
+ /// strict_gs_check.
+ void ActOnPragmaMSStrictGuardStackCheck(SourceLocation PragmaLocation,
+ PragmaMsStackAction Action,
+ bool Value);
+
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
@@ -9998,9 +10971,18 @@ public:
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
+ /// Called on well-formed \#pragma alloc_text().
+ void ActOnPragmaMSAllocText(
+ SourceLocation PragmaLocation, StringRef Section,
+ const SmallVector<std::tuple<IdentifierInfo *, SourceLocation>>
+ &Functions);
+
/// Called on #pragma clang __debug dump II
void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II);
+ /// Called on #pragma clang __debug dump E
+ void ActOnPragmaDump(Expr *E);
+
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value);
@@ -10013,6 +10995,9 @@ public:
!CurFPFeatures.getAllowApproxFunc();
}
+ void ActOnPragmaFPEvalMethod(SourceLocation Loc,
+ LangOptions::FPEvalMethodKind Value);
+
/// ActOnPragmaFloatControl - Call on well-formed \#pragma float_control
void ActOnPragmaFloatControl(SourceLocation Loc, PragmaMsStackAction Action,
PragmaFloatControlKind Value);
@@ -10026,9 +11011,9 @@ public:
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
- NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
+ NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, const IdentifierInfo *II,
SourceLocation Loc);
- void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
+ void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, const WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
@@ -10057,18 +11042,26 @@ public:
/// Called on well formed
/// \#pragma clang fp reassociate
- void ActOnPragmaFPReassociate(SourceLocation Loc, bool IsEnabled);
+ /// or
+ /// \#pragma clang fp reciprocal
+ void ActOnPragmaFPValueChangingOption(SourceLocation Loc, PragmaFPKind Kind,
+ bool IsEnabled);
/// ActOnPragmaFenvAccess - Called on well formed
/// \#pragma STDC FENV_ACCESS
void ActOnPragmaFEnvAccess(SourceLocation Loc, bool IsEnabled);
+ /// ActOnPragmaCXLimitedRange - Called on well formed
+ /// \#pragma STDC CX_LIMITED_RANGE
+ void ActOnPragmaCXLimitedRange(SourceLocation Loc,
+ LangOptions::ComplexRangeKind Range);
+
/// Called on well formed '\#pragma clang fp' that has option 'exceptions'.
void ActOnPragmaFPExceptions(SourceLocation Loc,
LangOptions::FPExceptionModeKind);
/// Called to set constant rounding mode for floating point operations.
- void setRoundingMode(SourceLocation Loc, llvm::RoundingMode);
+ void ActOnPragmaFEnvRound(SourceLocation Loc, llvm::RoundingMode);
/// Called to set exception behavior for floating point operations.
void setExceptionMode(SourceLocation Loc, LangOptions::FPExceptionModeKind);
@@ -10120,6 +11113,14 @@ public:
/// Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
+ /// #pragma optimize("[optimization-list]", on | off).
+ void ActOnPragmaMSOptimize(SourceLocation Loc, bool IsOn);
+
+ /// Call on well formed \#pragma function.
+ void
+ ActOnPragmaMSFunction(SourceLocation Loc,
+ const llvm::SmallVectorImpl<StringRef> &NoBuiltins);
+
/// Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
@@ -10131,11 +11132,26 @@ public:
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
+ /// Only called on function definitions; if there is a `#pragma alloc_text`
+ /// that decides which code section the function should be in, add
+ /// attribute section to the function.
+ void AddSectionMSAllocText(FunctionDecl *FD);
+
/// Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
+ /// Only called on function definitions; if there is a MSVC #pragma optimize
+ /// in scope, consider changing the function's attributes based on the
+ /// optimization list passed to the pragma.
+ void ModifyFnAttributesMSPragmaOptimize(FunctionDecl *FD);
+
+ /// Only called on function definitions; if there is a pragma in scope
+ /// with the effect of a range-based no_builtin, consider marking the function
+ /// with attribute no_builtin.
+ void AddImplicitMSFunctionNoBuiltinAttr(FunctionDecl *FD);
+
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
bool IsPackExpansion);
@@ -10160,10 +11176,23 @@ public:
void AddAnnotationAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Annot, MutableArrayRef<Expr *> Args);
+ /// ConstantFoldAttrArgs - Folds attribute arguments into ConstantExprs
+ /// (unless they are value dependent or type dependent). Returns false
+ /// and emits a diagnostic if one or more of the arguments could not be
+ /// folded into a constant.
+ bool ConstantFoldAttrArgs(const AttributeCommonInfo &CI,
+ MutableArrayRef<Expr *> Args);
+
+ /// Create an CUDALaunchBoundsAttr attribute.
+ CUDALaunchBoundsAttr *CreateLaunchBoundsAttr(const AttributeCommonInfo &CI,
+ Expr *MaxThreads,
+ Expr *MinBlocks,
+ Expr *MaxBlocks);
+
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI,
- Expr *MaxThreads, Expr *MinBlocks);
+ Expr *MaxThreads, Expr *MinBlocks, Expr *MaxBlocks);
/// AddModeAttr - Adds a mode attribute to a particular declaration.
void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name,
@@ -10176,11 +11205,21 @@ public:
void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI,
RetainOwnershipKind K, bool IsTemplateInstantiation);
+ /// Create an AMDGPUWavesPerEUAttr attribute.
+ AMDGPUFlatWorkGroupSizeAttr *
+ CreateAMDGPUFlatWorkGroupSizeAttr(const AttributeCommonInfo &CI, Expr *Min,
+ Expr *Max);
+
/// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size
/// attribute to a particular declaration.
void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
+ /// Create an AMDGPUWavesPerEUAttr attribute.
+ AMDGPUWavesPerEUAttr *
+ CreateAMDGPUWavesPerEUAttr(const AttributeCommonInfo &CI, Expr *Min,
+ Expr *Max);
+
/// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a
/// particular declaration.
void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI,
@@ -10189,7 +11228,7 @@ public:
bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type);
//===--------------------------------------------------------------------===//
- // C++ Coroutines TS
+ // C++ Coroutines
//
bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc,
StringRef Keyword);
@@ -10197,10 +11236,13 @@ public:
ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E);
StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E);
- ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
- bool IsImplicit = false);
- ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
- UnresolvedLookupExpr* Lookup);
+ ExprResult BuildOperatorCoawaitLookupExpr(Scope *S, SourceLocation Loc);
+ ExprResult BuildOperatorCoawaitCall(SourceLocation Loc, Expr *E,
+ UnresolvedLookupExpr *Lookup);
+ ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *Operand,
+ Expr *Awaiter, bool IsImplicit = false);
+ ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *Operand,
+ UnresolvedLookupExpr *Lookup);
ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E);
StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
@@ -10208,6 +11250,19 @@ public:
bool buildCoroutineParameterMoves(SourceLocation Loc);
VarDecl *buildCoroutinePromise(SourceLocation Loc);
void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body);
+
+ // Heuristically tells if the function is `get_return_object` member of a
+ // coroutine promise_type by matching the function name.
+ static bool CanBeGetReturnObject(const FunctionDecl *FD);
+ static bool CanBeGetReturnTypeOnAllocFailure(const FunctionDecl *FD);
+
+ // As a clang extension, enforces that a non-coroutine function must be marked
+ // with [[clang::coro_wrapper]] if it returns a type marked with
+ // [[clang::coro_return_type]].
+ // Expects that FD is not a coroutine.
+ void CheckCoroutineWrapper(FunctionDecl *FD);
+ /// Lookup 'coroutine_traits' in std namespace and std::experimental
+ /// namespace. The namespace found is recorded in Namespace.
ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc,
SourceLocation FuncLoc);
/// Check that the expression co_await promise.final_suspend() shall not be
@@ -10234,6 +11289,9 @@ private:
/// The directive kind, `begin declare target` or `declare target`.
OpenMPDirectiveKind Kind;
+ /// The directive with indirect clause.
+ std::optional<Expr *> Indirect;
+
/// The directive location.
SourceLocation Loc;
@@ -10247,10 +11305,7 @@ private:
/// Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
- ExprResult
- VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
- bool StrictlyPositive = true,
- bool SuppressExprDiags = false);
+
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
@@ -10314,6 +11369,26 @@ private:
/// All `omp assumes` we encountered so far.
SmallVector<AssumptionAttr *, 4> OMPAssumeGlobal;
+ /// OMPD_loop is mapped to OMPD_for, OMPD_distribute or OMPD_simd depending
+ /// on the parameter of the bind clause. In the methods for the
+ /// mapped directives, check the parameters of the lastprivate clause.
+ bool checkLastPrivateForMappedDirectives(ArrayRef<OMPClause *> Clauses);
+ /// Depending on the bind clause of OMPD_loop map the directive to new
+ /// directives.
+ /// 1) loop bind(parallel) --> OMPD_for
+ /// 2) loop bind(teams) --> OMPD_distribute
+ /// 3) loop bind(thread) --> OMPD_simd
+ /// This is being handled in Sema instead of Codegen because of the need for
+ /// rigorous semantic checking in the new mapped directives.
+ bool mapLoopConstruct(llvm::SmallVector<OMPClause *> &ClausesWithoutBind,
+ ArrayRef<OMPClause *> Clauses,
+ OpenMPBindClauseKind &BindKind,
+ OpenMPDirectiveKind &Kind,
+ OpenMPDirectiveKind &PrevMappedDirective,
+ SourceLocation StartLoc, SourceLocation EndLoc,
+ const DeclarationNameInfo &DirName,
+ OpenMPDirectiveKind CancelRegion);
+
public:
/// The declarator \p D defines a function in the scope \p S which is nested
/// in an `omp begin/end declare variant` scope. In this method we create a
@@ -10336,6 +11411,11 @@ public:
return !OMPDeclareVariantScopes.empty();
}
+ ExprResult
+ VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
+ bool StrictlyPositive = true,
+ bool SuppressExprDiags = false);
+
/// Given the potential call expression \p Call, determine if there is a
/// specialization via the OpenMP declare variant mechanism available. If
/// there is, return the specialized call expression, otherwise return the
@@ -10376,6 +11456,13 @@ public:
/// constructs.
VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false,
unsigned StopAt = 0);
+
+ /// The member expression(this->fd) needs to be rebuilt in the template
+ /// instantiation to generate private copy for OpenMP when default
+ /// clause is used. The function will return true if default
+ /// cluse is used.
+ bool isOpenMPRebuildMemberExpr(ValueDecl *D);
+
ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
ExprObjectKind OK, SourceLocation Loc);
@@ -10430,6 +11517,12 @@ public:
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
+ /// Called on well-formed '\#pragma omp metadirective' after parsing
+ /// of the associated statement.
+ StmtResult ActOnOpenMPMetaDirective(ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc);
+
// OpenMP directives and clauses.
/// Called on correct id-expression from the '#pragma omp
/// threadprivate'.
@@ -10452,7 +11545,7 @@ public:
/// Called on well-formed '#pragma omp [begin] assume[s]'.
void ActOnOpenMPAssumesDirective(SourceLocation Loc,
OpenMPDirectiveKind DKind,
- ArrayRef<StringRef> Assumptions,
+ ArrayRef<std::string> Assumptions,
bool SkippedClauses);
/// Check if there is an active global `omp begin assumes` directive.
@@ -10510,6 +11603,7 @@ public:
QualType MapperType,
SourceLocation StartLoc,
DeclarationName VN);
+ void ActOnOpenMPIteratorVarDecl(VarDecl *VD);
bool isOpenMPDeclareMapperVarDeclAllowed(const VarDecl *VD) const;
const ValueDecl *getOpenMPDeclareMapperVarName() const;
@@ -10525,6 +11619,10 @@ public:
/// encountered.
void ActOnFinishedOpenMPDeclareTargetContext(DeclareTargetContextInfo &DTCI);
+ /// Report unterminated 'omp declare target' or 'omp begin declare target' at
+ /// the end of a compilation unit.
+ void DiagnoseUnterminatedOpenMPDeclareTarget();
+
/// Searches for the provided declaration name for OpenMP declare target
/// directive.
NamedDecl *lookupOpenMPDeclareTargetName(Scope *CurScope,
@@ -10534,17 +11632,26 @@ public:
/// Called on correct id-expression from the '#pragma omp declare target'.
void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc,
OMPDeclareTargetDeclAttr::MapTypeTy MT,
- OMPDeclareTargetDeclAttr::DevTypeTy DT);
+ DeclareTargetContextInfo &DTCI);
/// Check declaration inside target region.
void
checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
SourceLocation IdLoc = SourceLocation());
+
+ /// Adds OMPDeclareTargetDeclAttr to referenced variables in declare target
+ /// directive.
+ void ActOnOpenMPDeclareTargetInitializer(Decl *D);
+
/// Finishes analysis of the deferred functions calls that may be declared as
/// host/nohost during device/host compilation.
void finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller,
const FunctionDecl *Callee,
SourceLocation Loc);
+
+ /// Return true if currently in OpenMP task with untied clause context.
+ bool isInOpenMPTaskUntiedContext() const;
+
/// Return true inside OpenMP declare target region.
bool isInOpenMPDeclareTargetContext() const {
return !DeclareTargetNesting.empty();
@@ -10562,6 +11669,11 @@ public:
/// an OpenMP loop directive.
StmtResult ActOnOpenMPCanonicalLoop(Stmt *AStmt);
+ /// Process a canonical OpenMP loop nest that can either be a canonical
+ /// literal loop (ForStmt or CXXForRangeStmt), or the generated loop of an
+ /// OpenMP loop transformation construct.
+ StmtResult ActOnOpenMPLoopnest(Stmt *AStmt);
+
/// End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
@@ -10572,7 +11684,8 @@ public:
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
- Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
+ Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc,
+ OpenMPDirectiveKind PrevMappedDirective = llvm::omp::OMPD_unknown);
/// Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
@@ -10618,6 +11731,11 @@ public:
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
+ /// Called on well-formed '\#pragma omp scope' after parsing of the
+ /// associated statement.
+ StmtResult ActOnOpenMPScopeDirective(ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
@@ -10649,6 +11767,12 @@ public:
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
+ /// Called on well-formed '\#pragma omp parallel masked' after
+ /// parsing of the associated statement.
+ StmtResult ActOnOpenMPParallelMaskedDirective(ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
@@ -10663,11 +11787,19 @@ public:
/// Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
+ /// Called on well-formed '\#pragma omp error'.
+ /// Error direcitive is allowed in both declared and excutable contexts.
+ /// Adding InExContext to identify which context is called from.
+ StmtResult ActOnOpenMPErrorDirective(ArrayRef<OMPClause *> Clauses,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ bool InExContext = true);
/// Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskwait'.
- StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
+ StmtResult ActOnOpenMPTaskwaitDirective(ArrayRef<OMPClause *> Clauses,
+ SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
@@ -10733,6 +11865,26 @@ public:
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
+ /// Called on well-formed '\#pragma omp teams loop' after parsing of the
+ /// associated statement.
+ StmtResult ActOnOpenMPTeamsGenericLoopDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
+ /// Called on well-formed '\#pragma omp target teams loop' after parsing of
+ /// the associated statement.
+ StmtResult ActOnOpenMPTargetTeamsGenericLoopDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
+ /// Called on well-formed '\#pragma omp parallel loop' after parsing of the
+ /// associated statement.
+ StmtResult ActOnOpenMPParallelGenericLoopDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
+ /// Called on well-formed '\#pragma omp target parallel loop' after parsing
+ /// of the associated statement.
+ StmtResult ActOnOpenMPTargetParallelGenericLoopDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
@@ -10774,6 +11926,26 @@ public:
StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
+ /// Called on well-formed '\#pragma omp masked taskloop' after parsing of the
+ /// associated statement.
+ StmtResult ActOnOpenMPMaskedTaskLoopDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
+ /// Called on well-formed '\#pragma omp masked taskloop simd' after parsing of
+ /// the associated statement.
+ StmtResult ActOnOpenMPMaskedTaskLoopSimdDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
+ /// Called on well-formed '\#pragma omp parallel masked taskloop' after
+ /// parsing of the associated statement.
+ StmtResult ActOnOpenMPParallelMaskedTaskLoopDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
+ /// Called on well-formed '\#pragma omp parallel masked taskloop simd' after
+ /// parsing of the associated statement.
+ StmtResult ActOnOpenMPParallelMaskedTaskLoopSimdDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute' after parsing
/// of the associated statement.
StmtResult
@@ -10872,6 +12044,12 @@ public:
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
+ /// Called on well-formed '\#pragma omp loop' after parsing of the
+ /// associated statement.
+ StmtResult ActOnOpenMPGenericLoopDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
+
/// Checks correctness of linear modifiers.
bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
SourceLocation LinLoc);
@@ -10896,11 +12074,15 @@ public:
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param TI The trait info object representing the match clause.
- /// \returns None, if the function/variant function are not compatible with
- /// the pragma, pair of original function/variant ref expression otherwise.
- Optional<std::pair<FunctionDecl *, Expr *>>
+ /// \param NumAppendArgs The number of omp_interop_t arguments to account for
+ /// in checking.
+ /// \returns std::nullopt, if the function/variant function are not compatible
+ /// with the pragma, pair of original function/variant ref expression
+ /// otherwise.
+ std::optional<std::pair<FunctionDecl *, Expr *>>
checkOpenMPDeclareVariantFunction(DeclGroupPtrTy DG, Expr *VariantRef,
- OMPTraitInfo &TI, SourceRange SR);
+ OMPTraitInfo &TI, unsigned NumAppendArgs,
+ SourceRange SR);
/// Called on well-formed '\#pragma omp declare variant' after parsing of
/// the associated method/function.
@@ -10909,8 +12091,18 @@ public:
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param TI The context traits associated with the function variant.
- void ActOnOpenMPDeclareVariantDirective(FunctionDecl *FD, Expr *VariantRef,
- OMPTraitInfo &TI, SourceRange SR);
+ /// \param AdjustArgsNothing The list of 'nothing' arguments.
+ /// \param AdjustArgsNeedDevicePtr The list of 'need_device_ptr' arguments.
+ /// \param AppendArgs The list of 'append_args' arguments.
+ /// \param AdjustArgsLoc The Location of an 'adjust_args' clause.
+ /// \param AppendArgsLoc The Location of an 'append_args' clause.
+ /// \param SR The SourceRange of the 'declare variant' directive.
+ void ActOnOpenMPDeclareVariantDirective(
+ FunctionDecl *FD, Expr *VariantRef, OMPTraitInfo &TI,
+ ArrayRef<Expr *> AdjustArgsNothing,
+ ArrayRef<Expr *> AdjustArgsNeedDevicePtr,
+ ArrayRef<OMPInteropInfo> AppendArgs, SourceLocation AdjustArgsLoc,
+ SourceLocation AppendArgsLoc, SourceRange SR);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
@@ -10938,6 +12130,10 @@ public:
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
+ /// Called on well-formed 'align' clause.
+ OMPClause *ActOnOpenMPAlignClause(Expr *Alignment, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc);
/// Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
@@ -10970,12 +12166,16 @@ public:
SourceLocation LParenLoc = SourceLocation(),
Expr *NumForLoops = nullptr);
/// Called on well-formed 'grainsize' clause.
- OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
+ OMPClause *ActOnOpenMPGrainsizeClause(OpenMPGrainsizeClauseModifier Modifier,
+ Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc,
+ SourceLocation ModifierLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_tasks' clause.
- OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
+ OMPClause *ActOnOpenMPNumTasksClause(OpenMPNumTasksClauseModifier Modifier,
+ Expr *NumTasks, SourceLocation StartLoc,
SourceLocation LParenLoc,
+ SourceLocation ModifierLoc,
SourceLocation EndLoc);
/// Called on well-formed 'hint' clause.
OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
@@ -10992,6 +12192,10 @@ public:
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
+ /// Called on well-formed 'when' clause.
+ OMPClause *ActOnOpenMPWhenClause(OMPTraitInfo &TI, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc);
/// Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(llvm::omp::DefaultKind Kind,
SourceLocation KindLoc,
@@ -11005,10 +12209,11 @@ public:
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'order' clause.
- OMPClause *ActOnOpenMPOrderClause(OpenMPOrderClauseKind Kind,
- SourceLocation KindLoc,
+ OMPClause *ActOnOpenMPOrderClause(OpenMPOrderClauseModifier Modifier,
+ OpenMPOrderClauseKind Kind,
SourceLocation StartLoc,
SourceLocation LParenLoc,
+ SourceLocation MLoc, SourceLocation KindLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind,
@@ -11052,6 +12257,16 @@ public:
/// Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
+ /// Called on well-formed 'compare' clause.
+ OMPClause *ActOnOpenMPCompareClause(SourceLocation StartLoc,
+ SourceLocation EndLoc);
+ /// Called on well-formed 'fail' clause.
+ OMPClause *ActOnOpenMPFailClause(SourceLocation StartLoc,
+ SourceLocation EndLoc);
+ OMPClause *ActOnOpenMPFailClause(
+ OpenMPClauseKind Kind, SourceLocation KindLoc,
+ SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
+
/// Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
@@ -11069,12 +12284,10 @@ public:
SourceLocation EndLoc);
/// Called on well-formed 'init' clause.
- OMPClause *ActOnOpenMPInitClause(Expr *InteropVar, ArrayRef<Expr *> PrefExprs,
- bool IsTarget, bool IsTargetSync,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation VarLoc,
- SourceLocation EndLoc);
+ OMPClause *
+ ActOnOpenMPInitClause(Expr *InteropVar, OMPInteropInfo &InteropInfo,
+ SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation VarLoc, SourceLocation EndLoc);
/// Called on well-formed 'use' clause.
OMPClause *ActOnOpenMPUseClause(Expr *InteropVar, SourceLocation StartLoc,
@@ -11130,16 +12343,54 @@ public:
OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
- OMPClause *ActOnOpenMPVarListClause(
- OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *DepModOrTailExpr,
- const OMPVarListLocTy &Locs, SourceLocation ColonLoc,
- CXXScopeSpec &ReductionOrMapperIdScopeSpec,
- DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier,
- ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
- ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit,
- SourceLocation ExtraModifierLoc,
- ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
- ArrayRef<SourceLocation> MotionModifiersLoc);
+ /// Called on well-formed 'at' clause.
+ OMPClause *ActOnOpenMPAtClause(OpenMPAtClauseKind Kind,
+ SourceLocation KindLoc,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc);
+
+ /// Called on well-formed 'severity' clause.
+ OMPClause *ActOnOpenMPSeverityClause(OpenMPSeverityClauseKind Kind,
+ SourceLocation KindLoc,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc);
+
+ /// Called on well-formed 'message' clause.
+ /// passing string for message.
+ OMPClause *ActOnOpenMPMessageClause(Expr *MS, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc);
+
+ /// Data used for processing a list of variables in OpenMP clauses.
+ struct OpenMPVarListDataTy final {
+ Expr *DepModOrTailExpr = nullptr;
+ Expr *IteratorExpr = nullptr;
+ SourceLocation ColonLoc;
+ SourceLocation RLoc;
+ CXXScopeSpec ReductionOrMapperIdScopeSpec;
+ DeclarationNameInfo ReductionOrMapperId;
+ int ExtraModifier = -1; ///< Additional modifier for linear, map, depend or
+ ///< lastprivate clause.
+ SmallVector<OpenMPMapModifierKind, NumberOfOMPMapClauseModifiers>
+ MapTypeModifiers;
+ SmallVector<SourceLocation, NumberOfOMPMapClauseModifiers>
+ MapTypeModifiersLoc;
+ SmallVector<OpenMPMotionModifierKind, NumberOfOMPMotionModifiers>
+ MotionModifiers;
+ SmallVector<SourceLocation, NumberOfOMPMotionModifiers> MotionModifiersLoc;
+ bool IsMapTypeImplicit = false;
+ SourceLocation ExtraModifierLoc;
+ SourceLocation OmpAllMemoryLoc;
+ SourceLocation
+ StepModifierLoc; /// 'step' modifier location for linear clause
+ };
+
+ OMPClause *ActOnOpenMPVarListClause(OpenMPClauseKind Kind,
+ ArrayRef<Expr *> Vars,
+ const OMPVarListLocTy &Locs,
+ OpenMPVarListDataTy &Data);
/// Called on well-formed 'inclusive' clause.
OMPClause *ActOnOpenMPInclusiveClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
@@ -11182,27 +12433,27 @@ public:
SourceLocation ModifierLoc, SourceLocation ColonLoc,
SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
- ArrayRef<Expr *> UnresolvedReductions = llvm::None);
+ ArrayRef<Expr *> UnresolvedReductions = std::nullopt);
/// Called on well-formed 'task_reduction' clause.
OMPClause *ActOnOpenMPTaskReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
- ArrayRef<Expr *> UnresolvedReductions = llvm::None);
+ ArrayRef<Expr *> UnresolvedReductions = std::nullopt);
/// Called on well-formed 'in_reduction' clause.
OMPClause *ActOnOpenMPInReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
- ArrayRef<Expr *> UnresolvedReductions = llvm::None);
+ ArrayRef<Expr *> UnresolvedReductions = std::nullopt);
/// Called on well-formed 'linear' clause.
- OMPClause *
- ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
- SourceLocation StartLoc, SourceLocation LParenLoc,
- OpenMPLinearClauseKind LinKind, SourceLocation LinLoc,
- SourceLocation ColonLoc, SourceLocation EndLoc);
+ OMPClause *ActOnOpenMPLinearClause(
+ ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc,
+ SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind,
+ SourceLocation LinLoc, SourceLocation ColonLoc,
+ SourceLocation StepModifierLoc, SourceLocation EndLoc);
/// Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
@@ -11230,11 +12481,12 @@ public:
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depend' clause.
- OMPClause *
- ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind,
- SourceLocation DepLoc, SourceLocation ColonLoc,
- ArrayRef<Expr *> VarList, SourceLocation StartLoc,
- SourceLocation LParenLoc, SourceLocation EndLoc);
+ OMPClause *ActOnOpenMPDependClause(const OMPDependClause::DependDataTy &Data,
+ Expr *DepModifier,
+ ArrayRef<Expr *> VarList,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc);
/// Called on well-formed 'device' clause.
OMPClause *ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier,
Expr *Device, SourceLocation StartLoc,
@@ -11242,15 +12494,14 @@ public:
SourceLocation ModifierLoc,
SourceLocation EndLoc);
/// Called on well-formed 'map' clause.
- OMPClause *
- ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
- ArrayRef<SourceLocation> MapTypeModifiersLoc,
- CXXScopeSpec &MapperIdScopeSpec,
- DeclarationNameInfo &MapperId,
- OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
- SourceLocation MapLoc, SourceLocation ColonLoc,
- ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
- ArrayRef<Expr *> UnresolvedMappers = llvm::None);
+ OMPClause *ActOnOpenMPMapClause(
+ Expr *IteratorModifier, ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
+ ArrayRef<SourceLocation> MapTypeModifiersLoc,
+ CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId,
+ OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
+ SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
+ const OMPVarListLocTy &Locs, bool NoDiagnose = false,
+ ArrayRef<Expr *> UnresolvedMappers = std::nullopt);
/// Called on well-formed 'num_teams' clause.
OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
SourceLocation LParenLoc,
@@ -11281,7 +12532,7 @@ public:
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
- ArrayRef<Expr *> UnresolvedMappers = llvm::None);
+ ArrayRef<Expr *> UnresolvedMappers = std::nullopt);
/// Called on well-formed 'from' clause.
OMPClause *
ActOnOpenMPFromClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
@@ -11289,7 +12540,7 @@ public:
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
- ArrayRef<Expr *> UnresolvedMappers = llvm::None);
+ ArrayRef<Expr *> UnresolvedMappers = std::nullopt);
/// Called on well-formed 'use_device_ptr' clause.
OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
@@ -11299,6 +12550,9 @@ public:
/// Called on well-formed 'is_device_ptr' clause.
OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
+ /// Called on well-formed 'has_device_addr' clause.
+ OMPClause *ActOnOpenMPHasDeviceAddrClause(ArrayRef<Expr *> VarList,
+ const OMPVarListLocTy &Locs);
/// Called on well-formed 'nontemporal' clause.
OMPClause *ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
@@ -11325,6 +12579,34 @@ public:
SourceLocation ColonLoc,
SourceLocation EndLoc, Expr *Modifier,
ArrayRef<Expr *> Locators);
+ /// Called on a well-formed 'bind' clause.
+ OMPClause *ActOnOpenMPBindClause(OpenMPBindClauseKind Kind,
+ SourceLocation KindLoc,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc);
+
+ /// Called on a well-formed 'ompx_dyn_cgroup_mem' clause.
+ OMPClause *ActOnOpenMPXDynCGroupMemClause(Expr *Size, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc);
+
+ /// Called on well-formed 'doacross' clause.
+ OMPClause *
+ ActOnOpenMPDoacrossClause(OpenMPDoacrossClauseModifier DepType,
+ SourceLocation DepLoc, SourceLocation ColonLoc,
+ ArrayRef<Expr *> VarList, SourceLocation StartLoc,
+ SourceLocation LParenLoc, SourceLocation EndLoc);
+
+ /// Called on a well-formed 'ompx_attribute' clause.
+ OMPClause *ActOnOpenMPXAttributeClause(ArrayRef<const Attr *> Attrs,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc);
+
+ /// Called on a well-formed 'ompx_bare' clause.
+ OMPClause *ActOnOpenMPXBareClause(SourceLocation StartLoc,
+ SourceLocation EndLoc);
/// The kind of conversion being performed.
enum CheckedConversionKind {
@@ -11508,6 +12790,12 @@ public:
/// extension.
IncompatibleFunctionPointer,
+ /// IncompatibleFunctionPointerStrict - The assignment is between two
+ /// function pointer types that are not identical, but are compatible,
+ /// unless compiled with -fsanitize=cfi, in which case the type mismatch
+ /// may trip an indirect call runtime check.
+ IncompatibleFunctionPointerStrict,
+
/// IncompatiblePointerSign - The assignment is between two pointers types
/// which point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
@@ -11680,7 +12968,8 @@ public:
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
- Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
+ Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType,
+ BinaryOperatorKind Opc);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
@@ -11699,6 +12988,10 @@ public:
QualType CheckVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS,
ExprResult &RHS,
SourceLocation QuestionLoc);
+
+ QualType CheckSizelessVectorConditionalTypes(ExprResult &Cond,
+ ExprResult &LHS, ExprResult &RHS,
+ SourceLocation QuestionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool ConvertArgs = true);
QualType FindCompositePointerType(SourceLocation Loc,
@@ -11715,7 +13008,7 @@ public:
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
- bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
+ bool DiagnoseConditionalForNull(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
@@ -11725,14 +13018,24 @@ public:
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
- bool AllowBothBool, bool AllowBoolConversion);
+ bool AllowBothBool, bool AllowBoolConversion,
+ bool AllowBoolOperation, bool ReportInvalid);
QualType GetSignedVectorType(QualType V);
+ QualType GetSignedSizelessVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc);
+ QualType CheckSizelessVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
+ SourceLocation Loc,
+ BinaryOperatorKind Opc);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
+ // type checking for sizeless vector binary operators.
+ QualType CheckSizelessVectorOperands(ExprResult &LHS, ExprResult &RHS,
+ SourceLocation Loc, bool IsCompAssign,
+ ArithConvKind OperationKind);
+
/// Type checking for matrix binary operators.
QualType CheckMatrixElementwiseOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
@@ -11741,12 +13044,14 @@ public:
SourceLocation Loc, bool IsCompAssign);
bool isValidSveBitcast(QualType srcType, QualType destType);
+ bool isValidRVVBitcast(QualType srcType, QualType destType);
bool areMatrixTypesOfTheSameDimension(QualType srcTy, QualType destTy);
bool areVectorTypesSameSize(QualType srcType, QualType destType);
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
+ bool anyAltivecTypes(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
@@ -11898,20 +13203,22 @@ public:
Decl *ConditionVar;
FullExprArg Condition;
bool Invalid;
- bool HasKnownValue;
- bool KnownValue;
+ std::optional<bool> KnownValue;
friend class Sema;
ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition,
bool IsConstexpr)
- : ConditionVar(ConditionVar), Condition(Condition), Invalid(false),
- HasKnownValue(IsConstexpr && Condition.get() &&
- !Condition.get()->isValueDependent()),
- KnownValue(HasKnownValue &&
- !!Condition.get()->EvaluateKnownConstInt(S.Context)) {}
+ : ConditionVar(ConditionVar), Condition(Condition), Invalid(false) {
+ if (IsConstexpr && Condition.get()) {
+ if (std::optional<llvm::APSInt> Val =
+ Condition.get()->getIntegerConstantExpr(S.Context)) {
+ KnownValue = !!(*Val);
+ }
+ }
+ }
explicit ConditionResult(bool Invalid)
: ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid),
- HasKnownValue(false), KnownValue(false) {}
+ KnownValue(std::nullopt) {}
public:
ConditionResult() : ConditionResult(false) {}
@@ -11920,11 +13227,7 @@ public:
return std::make_pair(cast_or_null<VarDecl>(ConditionVar),
Condition.get());
}
- llvm::Optional<bool> getKnownValue() const {
- if (!HasKnownValue)
- return None;
- return KnownValue;
- }
+ std::optional<bool> getKnownValue() const { return KnownValue; }
};
static ConditionResult ConditionError() { return ConditionResult(true); }
@@ -11933,9 +13236,12 @@ public:
ConstexprIf, ///< A constant boolean condition from 'if constexpr'.
Switch ///< An integral condition for a 'switch' statement.
};
+ QualType PreferredConditionType(ConditionKind K) const {
+ return K == ConditionKind::Switch ? Context.IntTy : Context.BoolTy;
+ }
- ConditionResult ActOnCondition(Scope *S, SourceLocation Loc,
- Expr *SubExpr, ConditionKind CK);
+ ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr,
+ ConditionKind CK, bool MissingOK = false);
ConditionResult ActOnConditionVariable(Decl *ConditionVar,
SourceLocation StmtLoc,
@@ -11978,13 +13284,6 @@ public:
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false);
- /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
- /// the specified width and sign. If an overflow occurs, detect it and emit
- /// the specified diagnostic.
- void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
- unsigned NewWidth, bool NewSign,
- SourceLocation Loc, unsigned DiagID);
-
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
@@ -12031,10 +13330,8 @@ public:
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
- /// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
- QualType FieldTy, bool IsMsStruct,
- Expr *BitWidth, bool *ZeroWidth = nullptr);
+ QualType FieldTy, bool IsMsStruct, Expr *BitWidth);
private:
unsigned ForceCUDAHostDeviceDepth = 0;
@@ -12053,14 +13350,14 @@ public:
/// Diagnostics that are emitted only if we discover that the given function
/// must be codegen'ed. Because handling these correctly adds overhead to
/// compilation, this is currently only enabled for CUDA compilations.
- llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>,
+ llvm::DenseMap<CanonicalDeclPtr<const FunctionDecl>,
std::vector<PartialDiagnosticAt>>
DeviceDeferredDiags;
/// A pair of a canonical FunctionDecl and a SourceLocation. When used as the
/// key in a hashtable, both the FD and location are hashed.
struct FunctionDeclAndLoc {
- CanonicalDeclPtr<FunctionDecl> FD;
+ CanonicalDeclPtr<const FunctionDecl> FD;
SourceLocation Loc;
};
@@ -12074,7 +13371,7 @@ public:
///
/// Functions that we can tell a priori must be emitted aren't added to this
/// map.
- llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>,
+ llvm::DenseMap</* Callee = */ CanonicalDeclPtr<const FunctionDecl>,
/* Caller = */ FunctionDeclAndLoc>
DeviceKnownEmittedFns;
@@ -12119,8 +13416,9 @@ public:
/// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
- SemaDiagnosticBuilder
- diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD);
+ SemaDiagnosticBuilder diagIfOpenMPDeviceCode(SourceLocation Loc,
+ unsigned DiagID,
+ const FunctionDecl *FD);
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as host code".
@@ -12136,27 +13434,20 @@ public:
/// return ExprError();
/// // Otherwise, continue parsing as normal.
SemaDiagnosticBuilder diagIfOpenMPHostCode(SourceLocation Loc,
- unsigned DiagID, FunctionDecl *FD);
+ unsigned DiagID,
+ const FunctionDecl *FD);
SemaDiagnosticBuilder targetDiag(SourceLocation Loc, unsigned DiagID,
- FunctionDecl *FD = nullptr);
+ const FunctionDecl *FD = nullptr);
SemaDiagnosticBuilder targetDiag(SourceLocation Loc,
const PartialDiagnostic &PD,
- FunctionDecl *FD = nullptr) {
+ const FunctionDecl *FD = nullptr) {
return targetDiag(Loc, PD.getDiagID(), FD) << PD;
}
- /// Check if the expression is allowed to be used in expressions for the
- /// offloading devices.
- void checkDeviceDecl(ValueDecl *D, SourceLocation Loc);
-
- enum CUDAFunctionTarget {
- CFT_Device,
- CFT_Global,
- CFT_Host,
- CFT_HostDevice,
- CFT_InvalidTarget
- };
+ /// Check if the type is allowed to be used for the current target.
+ void checkTypeSupport(QualType Ty, SourceLocation Loc,
+ ValueDecl *D = nullptr);
/// Determines whether the given function is a CUDA device/host/kernel/etc.
/// function.
@@ -12176,6 +13467,29 @@ public:
/// Determines whether the given variable is emitted on host or device side.
CUDAVariableTarget IdentifyCUDATarget(const VarDecl *D);
+ /// Defines kinds of CUDA global host/device context where a function may be
+ /// called.
+ enum CUDATargetContextKind {
+ CTCK_Unknown, /// Unknown context
+ CTCK_InitGlobalVar, /// Function called during global variable
+ /// initialization
+ };
+
+ /// Define the current global CUDA host/device context where a function may be
+ /// called. Only used when a function is called outside of any functions.
+ struct CUDATargetContext {
+ CUDAFunctionTarget Target = CFT_HostDevice;
+ CUDATargetContextKind Kind = CTCK_Unknown;
+ Decl *D = nullptr;
+ } CurCUDATargetCtx;
+
+ struct CUDATargetContextRAII {
+ Sema &S;
+ CUDATargetContext SavedCtx;
+ CUDATargetContextRAII(Sema &S_, CUDATargetContextKind K, Decl *D);
+ ~CUDATargetContextRAII() { S.CurCUDATargetCtx = SavedCtx; }
+ };
+
/// Gets the CUDA target for the current context.
CUDAFunctionTarget CurrentCUDATarget() {
return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext));
@@ -12251,6 +13565,10 @@ public:
/// host or device attribute.
void CUDASetLambdaAttrs(CXXMethodDecl *Method);
+ /// Record \p FD if it is a CUDA/HIP implicit host device function used on
+ /// device side in device compilation.
+ void CUDARecordImplicitHostDeviceFuncUsedByDevice(const FunctionDecl *FD);
+
/// Finds a function in \p Matches with highest calling priority
/// from \p Caller context and erases all functions with lower
/// calling priority.
@@ -12347,7 +13665,9 @@ public:
PCC_ParenthesizedExpression,
/// Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
- PCC_LocalDeclarationSpecifiers
+ PCC_LocalDeclarationSpecifiers,
+ /// Code completion occurs at top-level in a REPL session
+ PCC_TopLevelOrExpression,
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
@@ -12374,6 +13694,15 @@ public:
const VirtSpecifiers *VS = nullptr);
void CodeCompleteBracketDeclarator(Scope *S);
void CodeCompleteCase(Scope *S);
+ enum class AttributeCompletion {
+ Attribute,
+ Scope,
+ None,
+ };
+ void CodeCompleteAttribute(
+ AttributeCommonInfo::Syntax Syntax,
+ AttributeCompletion Completion = AttributeCompletion::Attribute,
+ const IdentifierInfo *Scope = nullptr);
/// Determines the preferred type of the current function argument, by
/// examining the signatures of all possible overloads.
/// Returns null if unknown or ambiguous, or if code completion is off.
@@ -12382,18 +13711,18 @@ public:
/// signatures that were considered.
///
/// FIXME: rename to GuessCallArgumentType to reduce confusion.
- QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args,
+ QualType ProduceCallSignatureHelp(Expr *Fn, ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
- QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type,
- SourceLocation Loc,
+ QualType ProduceConstructorSignatureHelp(QualType Type, SourceLocation Loc,
ArrayRef<Expr *> Args,
- SourceLocation OpenParLoc);
- QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl,
- CXXScopeSpec SS,
- ParsedType TemplateTypeTy,
- ArrayRef<Expr *> ArgExprs,
- IdentifierInfo *II,
- SourceLocation OpenParLoc);
+ SourceLocation OpenParLoc,
+ bool Braced);
+ QualType ProduceCtorInitMemberSignatureHelp(
+ Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy,
+ ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc,
+ bool Braced);
+ QualType ProduceTemplateArgumentSignatureHelp(
+ TemplateTy, ArrayRef<ParsedTemplateArgument>, SourceLocation LAngleLoc);
void CodeCompleteInitializer(Scope *S, Decl *D);
/// Trigger code completion for a record of \p BaseType. \p InitExprs are
/// expressions in the initializer list seen so far and \p D is the current
@@ -12448,6 +13777,7 @@ public:
ArrayRef<IdentifierLocPair> Protocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
+ void CodeCompleteObjCClassForwardDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
@@ -12461,7 +13791,8 @@ public:
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
- void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
+ void CodeCompleteObjCMethodDecl(Scope *S,
+ std::optional<bool> IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
@@ -12494,21 +13825,29 @@ public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
-private:
- void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
- const ArraySubscriptExpr *ASE=nullptr,
- bool AllowOnePastEnd=true, bool IndexNegated=false);
- void CheckArrayAccess(const Expr *E);
+ enum FormatArgumentPassingKind {
+ FAPK_Fixed, // values to format are fixed (no C-style variadic arguments)
+ FAPK_Variadic, // values to format are passed as variadic arguments
+ FAPK_VAList, // values to format are passed in a va_list
+ };
+
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
- bool HasVAListArg;
+ FormatArgumentPassingKind ArgPassingKind;
};
static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
- FormatStringInfo *FSI);
+ bool IsVariadic, FormatStringInfo *FSI);
+
+private:
+ void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
+ const ArraySubscriptExpr *ASE = nullptr,
+ bool AllowOnePastEnd = true, bool IndexNegated = false);
+ void CheckArrayAccess(const Expr *E);
+
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
@@ -12520,6 +13859,8 @@ private:
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto, SourceLocation Loc);
+ void checkAIXMemberAlignment(SourceLocation Loc, const Expr *Arg);
+
void CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl,
StringRef ParamName, QualType ArgTy, QualType ParamTy);
@@ -12545,6 +13886,9 @@ private:
CallExpr *TheCall);
bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
+ bool ParseSVEImmChecks(CallExpr *TheCall,
+ SmallVector<std::tuple<int, int, int>, 3> &ImmChecks);
+ bool CheckSMEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckARMCoprocessorImmediate(const TargetInfo &TI, const Expr *CoprocArg,
@@ -12579,11 +13923,20 @@ private:
bool CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum);
bool CheckRISCVBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
+ void checkRVVTypeSupport(QualType Ty, SourceLocation Loc, Decl *D);
+ bool CheckLoongArchBuiltinFunctionCall(const TargetInfo &TI,
+ unsigned BuiltinID, CallExpr *TheCall);
+ bool CheckWebAssemblyBuiltinFunctionCall(const TargetInfo &TI,
+ unsigned BuiltinID,
+ CallExpr *TheCall);
+ bool CheckNVPTXBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
+ CallExpr *TheCall);
bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call);
- bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
- bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
+ bool SemaBuiltinUnorderedCompare(CallExpr *TheCall, unsigned BuiltinID);
+ bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs,
+ unsigned BuiltinID);
bool SemaBuiltinComplex(CallExpr *TheCall);
bool SemaBuiltinVSX(CallExpr *TheCall);
bool SemaBuiltinOSLogFormat(CallExpr *TheCall);
@@ -12625,10 +13978,18 @@ private:
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall);
- bool SemaBuiltinPPCMMACall(CallExpr *TheCall, const char *TypeDesc);
+ bool SemaBuiltinPPCMMACall(CallExpr *TheCall, unsigned BuiltinID,
+ const char *TypeDesc);
bool CheckPPCMMAType(QualType Type, SourceLocation TypeLoc);
+ bool SemaBuiltinElementwiseMath(CallExpr *TheCall);
+ bool SemaBuiltinElementwiseTernaryMath(CallExpr *TheCall);
+ bool PrepareBuiltinElementwiseMathOneArgCall(CallExpr *TheCall);
+ bool PrepareBuiltinReduceMathOneArgCall(CallExpr *TheCall);
+
+ bool SemaBuiltinNonDeterministicValue(CallExpr *TheCall);
+
// Matrix builtin handling.
ExprResult SemaBuiltinMatrixTranspose(CallExpr *TheCall,
ExprResult CallResult);
@@ -12637,6 +13998,16 @@ private:
ExprResult SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall,
ExprResult CallResult);
+ // WebAssembly builtin handling.
+ bool BuiltinWasmRefNullExtern(CallExpr *TheCall);
+ bool BuiltinWasmRefNullFunc(CallExpr *TheCall);
+ bool BuiltinWasmTableGet(CallExpr *TheCall);
+ bool BuiltinWasmTableSet(CallExpr *TheCall);
+ bool BuiltinWasmTableSize(CallExpr *TheCall);
+ bool BuiltinWasmTableGrow(CallExpr *TheCall);
+ bool BuiltinWasmTableFill(CallExpr *TheCall);
+ bool BuiltinWasmTableCopy(CallExpr *TheCall);
+
public:
enum FormatStringType {
FST_Scanf,
@@ -12658,18 +14029,19 @@ public:
private:
bool CheckFormatArguments(const FormatAttr *Format,
- ArrayRef<const Expr *> Args,
- bool IsCXXMember,
- VariadicCallType CallType,
- SourceLocation Loc, SourceRange Range,
+ ArrayRef<const Expr *> Args, bool IsCXXMember,
+ VariadicCallType CallType, SourceLocation Loc,
+ SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
- bool HasVAListArg, unsigned format_idx,
+ FormatArgumentPassingKind FAPK, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
- VariadicCallType CallType,
- SourceLocation Loc, SourceRange range,
+ VariadicCallType CallType, SourceLocation Loc,
+ SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
+ void CheckInfNaNFunction(const CallExpr *Call, const FunctionDecl *FDecl);
+
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl);
@@ -12694,12 +14066,13 @@ private:
const FunctionDecl *FD = nullptr);
public:
- void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS);
+ void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS,
+ BinaryOperatorKind Opcode);
private:
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
- void CheckForIntOverflow(Expr *E);
+ void CheckForIntOverflow(const Expr *E);
void CheckUnsequencedOperations(const Expr *E);
/// Perform semantic checks on a completed expression. This will either
@@ -12724,7 +14097,8 @@ private:
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
- void CheckTCBEnforcement(const CallExpr *TheCall, const FunctionDecl *Callee);
+ void CheckTCBEnforcement(const SourceLocation CallExprLoc,
+ const NamedDecl *Callee);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
@@ -12747,7 +14121,9 @@ public:
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
+ LLVM_PREFERRED_TYPE(bool)
unsigned LayoutCompatible : 1;
+ LLVM_PREFERRED_TYPE(bool)
unsigned MustBeNull : 1;
};
@@ -12776,7 +14152,6 @@ private:
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
- mutable IdentifierInfo *Ident___float128;
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
@@ -12825,9 +14200,8 @@ public:
}
IdentifierInfo *getSuperIdentifier() const;
- IdentifierInfo *getFloat128Identifier() const;
- Decl *getObjCDeclContext() const;
+ ObjCContainerDecl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
@@ -12901,7 +14275,7 @@ private:
ValueDecl *MD;
CharUnits Alignment;
- MisalignedMember() : E(), RD(), MD(), Alignment() {}
+ MisalignedMember() : E(), RD(), MD() {}
MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment)
: E(E), RD(RD), MD(MD), Alignment(Alignment) {}
@@ -12968,69 +14342,9 @@ public:
SemaDiagnosticBuilder SYCLDiagIfDeviceCode(SourceLocation Loc,
unsigned DiagID);
- /// Check whether we're allowed to call Callee from the current context.
- ///
- /// - If the call is never allowed in a semantically-correct program
- /// emits an error and returns false.
- ///
- /// - If the call is allowed in semantically-correct programs, but only if
- /// it's never codegen'ed, creates a deferred diagnostic to be emitted if
- /// and when the caller is codegen'ed, and returns true.
- ///
- /// - Otherwise, returns true without emitting any diagnostics.
- ///
- /// Adds Callee to DeviceCallGraph if we don't know if its caller will be
- /// codegen'ed yet.
- bool checkSYCLDeviceFunction(SourceLocation Loc, FunctionDecl *Callee);
-};
-
-/// RAII object that enters a new expression evaluation context.
-class EnterExpressionEvaluationContext {
- Sema &Actions;
- bool Entered = true;
-
-public:
- EnterExpressionEvaluationContext(
- Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
- Decl *LambdaContextDecl = nullptr,
- Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
- Sema::ExpressionEvaluationContextRecord::EK_Other,
- bool ShouldEnter = true)
- : Actions(Actions), Entered(ShouldEnter) {
- if (Entered)
- Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
- ExprContext);
- }
- EnterExpressionEvaluationContext(
- Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
- Sema::ReuseLambdaContextDecl_t,
- Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
- Sema::ExpressionEvaluationContextRecord::EK_Other)
- : Actions(Actions) {
- Actions.PushExpressionEvaluationContext(
- NewContext, Sema::ReuseLambdaContextDecl, ExprContext);
- }
-
- enum InitListTag { InitList };
- EnterExpressionEvaluationContext(Sema &Actions, InitListTag,
- bool ShouldEnter = true)
- : Actions(Actions), Entered(false) {
- // In C++11 onwards, narrowing checks are performed on the contents of
- // braced-init-lists, even when they occur within unevaluated operands.
- // Therefore we still need to instantiate constexpr functions used in such
- // a context.
- if (ShouldEnter && Actions.isUnevaluatedContext() &&
- Actions.getLangOpts().CPlusPlus11) {
- Actions.PushExpressionEvaluationContext(
- Sema::ExpressionEvaluationContext::UnevaluatedList);
- Entered = true;
- }
- }
-
- ~EnterExpressionEvaluationContext() {
- if (Entered)
- Actions.PopExpressionEvaluationContext();
- }
+ void deepTypeCheckForSYCLDevice(SourceLocation UsedAt,
+ llvm::DenseSet<QualType> Visited,
+ ValueDecl *DeclToCheck);
};
DeductionFailureInfo
@@ -13043,6 +14357,8 @@ struct LateParsedTemplate {
CachedTokens Toks;
/// The template function declaration to be late parsed.
Decl *D;
+ /// Floating-point options in the point of definition.
+ FPOptions FPO;
};
template <>
@@ -13051,6 +14367,8 @@ void Sema::PragmaStack<Sema::AlignPackInfo>::Act(SourceLocation PragmaLocation,
llvm::StringRef StackSlotLabel,
AlignPackInfo Value);
+std::unique_ptr<sema::RISCVIntrinsicManager>
+CreateRISCVIntrinsicManager(Sema &S);
} // end namespace clang
namespace llvm {
@@ -13058,7 +14376,8 @@ namespace llvm {
// SourceLocation.
template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> {
using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc;
- using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>;
+ using FDBaseInfo =
+ DenseMapInfo<clang::CanonicalDeclPtr<const clang::FunctionDecl>>;
static FunctionDeclAndLoc getEmptyKey() {
return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()};
diff --git a/contrib/llvm-project/clang/include/clang/Sema/SemaConcept.h b/contrib/llvm-project/clang/include/clang/Sema/SemaConcept.h
index c5f9fc45612a..711443505174 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/SemaConcept.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/SemaConcept.h
@@ -1,9 +1,8 @@
//===-- SemaConcept.h - Semantic Analysis for Constraints and Concepts ----===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
@@ -19,8 +18,8 @@
#include "clang/AST/DeclTemplate.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/PointerUnion.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallVector.h"
+#include <optional>
#include <string>
#include <utility>
@@ -29,7 +28,7 @@ class Sema;
struct AtomicConstraint {
const Expr *ConstraintExpr;
- Optional<MutableArrayRef<TemplateArgumentLoc>> ParameterMapping;
+ std::optional<ArrayRef<TemplateArgumentLoc>> ParameterMapping;
AtomicConstraint(Sema &S, const Expr *ConstraintExpr) :
ConstraintExpr(ConstraintExpr) { };
@@ -145,12 +144,12 @@ struct NormalizedConstraint {
}
private:
- static Optional<NormalizedConstraint>
+ static std::optional<NormalizedConstraint>
fromConstraintExprs(Sema &S, NamedDecl *D, ArrayRef<const Expr *> E);
- static Optional<NormalizedConstraint>
+ static std::optional<NormalizedConstraint>
fromConstraintExpr(Sema &S, NamedDecl *D, const Expr *E);
};
} // clang
-#endif //LLVM_CLANG_SEMA_SEMACONCEPT_H
+#endif // LLVM_CLANG_SEMA_SEMACONCEPT_H
diff --git a/contrib/llvm-project/clang/include/clang/Sema/SemaLambda.h b/contrib/llvm-project/clang/include/clang/Sema/SemaLambda.h
index e8eaa46b88a2..3c9d22df70c0 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/SemaLambda.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/SemaLambda.h
@@ -16,6 +16,7 @@
#define LLVM_CLANG_SEMA_SEMALAMBDA_H
#include "clang/AST/ASTLambda.h"
+#include <optional>
namespace clang {
namespace sema {
@@ -30,9 +31,9 @@ class Sema;
/// of the capture-capable lambda's LambdaScopeInfo.
/// See Implementation for more detailed comments.
-Optional<unsigned> getStackIndexOfNearestEnclosingCaptureCapableLambda(
+std::optional<unsigned> getStackIndexOfNearestEnclosingCaptureCapableLambda(
ArrayRef<const sema::FunctionScopeInfo *> FunctionScopes,
- VarDecl *VarToCapture, Sema &S);
+ ValueDecl *VarToCapture, Sema &S);
} // clang
diff --git a/contrib/llvm-project/clang/include/clang/Sema/Template.h b/contrib/llvm-project/clang/include/clang/Sema/Template.h
index 540d2c9aa87e..ce44aca797b0 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/Template.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/Template.h
@@ -23,6 +23,7 @@
#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/SmallVector.h"
#include <cassert>
+#include <optional>
#include <utility>
namespace clang {
@@ -74,11 +75,20 @@ enum class TemplateSubstitutionKind : char {
/// template argument list (17) at depth 1.
class MultiLevelTemplateArgumentList {
/// The template argument list at a certain template depth
+
using ArgList = ArrayRef<TemplateArgument>;
+ struct ArgumentListLevel {
+ llvm::PointerIntPair<Decl *, 1, bool> AssociatedDeclAndFinal;
+ ArgList Args;
+ };
+ using ContainerType = SmallVector<ArgumentListLevel, 4>;
+
+ using ArgListsIterator = ContainerType::iterator;
+ using ConstArgListsIterator = ContainerType::const_iterator;
/// The template argument lists, stored from the innermost template
/// argument list (first) to the outermost template argument list (last).
- SmallVector<ArgList, 4> TemplateArgumentLists;
+ ContainerType TemplateArgumentLists;
/// The number of outer levels of template arguments that are not
/// being substituted.
@@ -92,9 +102,8 @@ enum class TemplateSubstitutionKind : char {
MultiLevelTemplateArgumentList() = default;
/// Construct a single-level template argument list.
- explicit
- MultiLevelTemplateArgumentList(const TemplateArgumentList &TemplateArgs) {
- addOuterTemplateArguments(&TemplateArgs);
+ MultiLevelTemplateArgumentList(Decl *D, ArgList Args, bool Final) {
+ addOuterTemplateArguments(D, Args, Final);
}
void setKind(TemplateSubstitutionKind K) { Kind = K; }
@@ -121,6 +130,12 @@ enum class TemplateSubstitutionKind : char {
return TemplateArgumentLists.size();
}
+ // Determine the number of substituted args at 'Depth'.
+ unsigned getNumSubsitutedArgs(unsigned Depth) const {
+ assert(NumRetainedOuterLevels <= Depth && Depth < getNumLevels());
+ return TemplateArgumentLists[getNumLevels() - Depth - 1].Args.size();
+ }
+
unsigned getNumRetainedOuterLevels() const {
return NumRetainedOuterLevels;
}
@@ -138,8 +153,19 @@ enum class TemplateSubstitutionKind : char {
/// Retrieve the template argument at a given depth and index.
const TemplateArgument &operator()(unsigned Depth, unsigned Index) const {
assert(NumRetainedOuterLevels <= Depth && Depth < getNumLevels());
- assert(Index < TemplateArgumentLists[getNumLevels() - Depth - 1].size());
- return TemplateArgumentLists[getNumLevels() - Depth - 1][Index];
+ assert(Index <
+ TemplateArgumentLists[getNumLevels() - Depth - 1].Args.size());
+ return TemplateArgumentLists[getNumLevels() - Depth - 1].Args[Index];
+ }
+
+ /// A template-like entity which owns the whole pattern being substituted.
+ /// This will usually own a set of template parameters, or in some
+ /// cases might even be a template parameter itself.
+ std::pair<Decl *, bool> getAssociatedDecl(unsigned Depth) const {
+ assert(NumRetainedOuterLevels <= Depth && Depth < getNumLevels());
+ auto AD = TemplateArgumentLists[getNumLevels() - Depth - 1]
+ .AssociatedDeclAndFinal;
+ return {AD.getPointer(), AD.getInt()};
}
/// Determine whether there is a non-NULL template argument at the
@@ -152,35 +178,77 @@ enum class TemplateSubstitutionKind : char {
if (Depth < NumRetainedOuterLevels)
return false;
- if (Index >= TemplateArgumentLists[getNumLevels() - Depth - 1].size())
+ if (Index >=
+ TemplateArgumentLists[getNumLevels() - Depth - 1].Args.size())
return false;
return !(*this)(Depth, Index).isNull();
}
+ bool isAnyArgInstantiationDependent() const {
+ for (ArgumentListLevel ListLevel : TemplateArgumentLists)
+ for (const TemplateArgument &TA : ListLevel.Args)
+ if (TA.isInstantiationDependent())
+ return true;
+ return false;
+ }
+
/// Clear out a specific template argument.
void setArgument(unsigned Depth, unsigned Index,
TemplateArgument Arg) {
assert(NumRetainedOuterLevels <= Depth && Depth < getNumLevels());
- assert(Index < TemplateArgumentLists[getNumLevels() - Depth - 1].size());
- const_cast<TemplateArgument&>(
- TemplateArgumentLists[getNumLevels() - Depth - 1][Index])
- = Arg;
+ assert(Index <
+ TemplateArgumentLists[getNumLevels() - Depth - 1].Args.size());
+ const_cast<TemplateArgument &>(
+ TemplateArgumentLists[getNumLevels() - Depth - 1].Args[Index]) = Arg;
}
- /// Add a new outermost level to the multi-level template argument
+ /// Add a new outmost level to the multi-level template argument
/// list.
- void addOuterTemplateArguments(const TemplateArgumentList *TemplateArgs) {
- addOuterTemplateArguments(ArgList(TemplateArgs->data(),
- TemplateArgs->size()));
+ /// A 'Final' substitution means that Subst* nodes won't be built
+ /// for the replacements.
+ void addOuterTemplateArguments(Decl *AssociatedDecl, ArgList Args,
+ bool Final) {
+ assert(!NumRetainedOuterLevels &&
+ "substituted args outside retained args?");
+ assert(getKind() == TemplateSubstitutionKind::Specialization);
+ TemplateArgumentLists.push_back(
+ {{AssociatedDecl ? AssociatedDecl->getCanonicalDecl() : nullptr,
+ Final},
+ Args});
}
- /// Add a new outmost level to the multi-level template argument
- /// list.
void addOuterTemplateArguments(ArgList Args) {
assert(!NumRetainedOuterLevels &&
"substituted args outside retained args?");
- TemplateArgumentLists.push_back(Args);
+ assert(getKind() == TemplateSubstitutionKind::Rewrite);
+ TemplateArgumentLists.push_back({{}, Args});
+ }
+
+ void addOuterTemplateArguments(std::nullopt_t) {
+ assert(!NumRetainedOuterLevels &&
+ "substituted args outside retained args?");
+ TemplateArgumentLists.push_back({});
+ }
+
+ /// Replaces the current 'innermost' level with the provided argument list.
+ /// This is useful for type deduction cases where we need to get the entire
+ /// list from the AST, but then add the deduced innermost list.
+ void replaceInnermostTemplateArguments(Decl *AssociatedDecl, ArgList Args) {
+ assert((!TemplateArgumentLists.empty() || NumRetainedOuterLevels) &&
+ "Replacing in an empty list?");
+
+ if (!TemplateArgumentLists.empty()) {
+ assert((TemplateArgumentLists[0].AssociatedDeclAndFinal.getPointer() ||
+ TemplateArgumentLists[0].AssociatedDeclAndFinal.getPointer() ==
+ AssociatedDecl) &&
+ "Trying to change incorrect declaration?");
+ TemplateArgumentLists[0].Args = Args;
+ } else {
+ --NumRetainedOuterLevels;
+ TemplateArgumentLists.push_back(
+ {{AssociatedDecl, /*Final=*/false}, Args});
+ }
}
/// Add an outermost level that we are not substituting. We have no
@@ -195,7 +263,34 @@ enum class TemplateSubstitutionKind : char {
/// Retrieve the innermost template argument list.
const ArgList &getInnermost() const {
- return TemplateArgumentLists.front();
+ return TemplateArgumentLists.front().Args;
+ }
+ /// Retrieve the outermost template argument list.
+ const ArgList &getOutermost() const {
+ return TemplateArgumentLists.back().Args;
+ }
+ ArgListsIterator begin() { return TemplateArgumentLists.begin(); }
+ ConstArgListsIterator begin() const {
+ return TemplateArgumentLists.begin();
+ }
+ ArgListsIterator end() { return TemplateArgumentLists.end(); }
+ ConstArgListsIterator end() const { return TemplateArgumentLists.end(); }
+
+ LLVM_DUMP_METHOD void dump() const {
+ LangOptions LO;
+ LO.CPlusPlus = true;
+ LO.Bool = true;
+ PrintingPolicy PP(LO);
+ llvm::errs() << "NumRetainedOuterLevels: " << NumRetainedOuterLevels
+ << "\n";
+ for (unsigned Depth = NumRetainedOuterLevels; Depth < getNumLevels();
+ ++Depth) {
+ llvm::errs() << Depth << ": ";
+ printTemplateArgumentList(
+ llvm::errs(),
+ TemplateArgumentLists[getNumLevels() - Depth - 1].Args, PP);
+ llvm::errs() << "\n";
+ }
}
};
@@ -398,7 +493,7 @@ enum class TemplateSubstitutionKind : char {
return newScope;
}
- /// deletes the given scope, and all otuer scopes, down to the
+ /// deletes the given scope, and all outer scopes, down to the
/// given outermost scope.
static void deleteScopes(LocalInstantiationScope *Scope,
LocalInstantiationScope *Outermost) {
@@ -469,6 +564,8 @@ enum class TemplateSubstitutionKind : char {
const MultiLevelTemplateArgumentList &TemplateArgs;
Sema::LateInstantiatedAttrVec* LateAttrs = nullptr;
LocalInstantiationScope *StartingScope = nullptr;
+ // Whether to evaluate the C++20 constraints or simply substitute into them.
+ bool EvaluateConstraints = true;
/// A list of out-of-line class template partial
/// specializations that will need to be instantiated after the
@@ -492,6 +589,13 @@ enum class TemplateSubstitutionKind : char {
SubstIndex(SemaRef, SemaRef.ArgumentPackSubstitutionIndex),
Owner(Owner), TemplateArgs(TemplateArgs) {}
+ void setEvaluateConstraints(bool B) {
+ EvaluateConstraints = B;
+ }
+ bool getEvaluateConstraints() {
+ return EvaluateConstraints;
+ }
+
// Define all the decl visitors using DeclNodes.inc
#define DECL(DERIVED, BASE) \
Decl *Visit ## DERIVED ## Decl(DERIVED ## Decl *D);
@@ -500,6 +604,7 @@ enum class TemplateSubstitutionKind : char {
// Decls which never appear inside a class or function.
#define OBJCCONTAINER(DERIVED, BASE)
#define FILESCOPEASM(DERIVED, BASE)
+#define TOPLEVELSTMT(DERIVED, BASE)
#define IMPORT(DERIVED, BASE)
#define EXPORT(DERIVED, BASE)
#define LINKAGESPEC(DERIVED, BASE)
@@ -528,8 +633,6 @@ enum class TemplateSubstitutionKind : char {
// A few supplemental visitor functions.
Decl *VisitCXXMethodDecl(CXXMethodDecl *D,
TemplateParameterList *TemplateParams,
- Optional<const ASTTemplateArgumentListInfo *>
- ClassScopeSpecializationArgs = llvm::None,
RewriteKind RK = RewriteKind::None);
Decl *VisitFunctionDecl(FunctionDecl *D,
TemplateParameterList *TemplateParams,
diff --git a/contrib/llvm-project/clang/include/clang/Sema/TemplateDeduction.h b/contrib/llvm-project/clang/include/clang/Sema/TemplateDeduction.h
index c0af9f3260b6..85691c66a044 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/TemplateDeduction.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/TemplateDeduction.h
@@ -22,10 +22,10 @@
#include "clang/AST/TemplateBase.h"
#include "clang/Basic/PartialDiagnostic.h"
#include "clang/Basic/SourceLocation.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallVector.h"
#include <cassert>
#include <cstddef>
+#include <optional>
#include <utility>
namespace clang {
@@ -41,7 +41,7 @@ namespace sema {
/// TemplateDeductionResult value.
class TemplateDeductionInfo {
/// The deduced template argument list.
- TemplateArgumentList *Deduced = nullptr;
+ TemplateArgumentList *DeducedSugared = nullptr, *DeducedCanonical = nullptr;
/// The source location at which template argument
/// deduction is occurring.
@@ -71,8 +71,8 @@ public:
/// Create temporary template deduction info for speculatively deducing
/// against a base class of an argument's type.
TemplateDeductionInfo(ForBaseTag, const TemplateDeductionInfo &Info)
- : Deduced(Info.Deduced), Loc(Info.Loc), DeducedDepth(Info.DeducedDepth),
- ExplicitArgs(Info.ExplicitArgs) {}
+ : DeducedSugared(Info.DeducedSugared), Loc(Info.Loc),
+ DeducedDepth(Info.DeducedDepth), ExplicitArgs(Info.ExplicitArgs) {}
/// Returns the location at which template argument is
/// occurring.
@@ -91,10 +91,15 @@ public:
return ExplicitArgs;
}
- /// Take ownership of the deduced template argument list.
- TemplateArgumentList *take() {
- TemplateArgumentList *Result = Deduced;
- Deduced = nullptr;
+ /// Take ownership of the deduced template argument lists.
+ TemplateArgumentList *takeSugared() {
+ TemplateArgumentList *Result = DeducedSugared;
+ DeducedSugared = nullptr;
+ return Result;
+ }
+ TemplateArgumentList *takeCanonical() {
+ TemplateArgumentList *Result = DeducedCanonical;
+ DeducedCanonical = nullptr;
return Result;
}
@@ -120,15 +125,20 @@ public:
/// Provide an initial template argument list that contains the
/// explicitly-specified arguments.
- void setExplicitArgs(TemplateArgumentList *NewDeduced) {
- Deduced = NewDeduced;
- ExplicitArgs = Deduced->size();
+ void setExplicitArgs(TemplateArgumentList *NewDeducedSugared,
+ TemplateArgumentList *NewDeducedCanonical) {
+ assert(NewDeducedSugared->size() == NewDeducedCanonical->size());
+ DeducedSugared = NewDeducedSugared;
+ DeducedCanonical = NewDeducedCanonical;
+ ExplicitArgs = DeducedSugared->size();
}
/// Provide a new template argument list that contains the
/// results of template argument deduction.
- void reset(TemplateArgumentList *NewDeduced) {
- Deduced = NewDeduced;
+ void reset(TemplateArgumentList *NewDeducedSugared,
+ TemplateArgumentList *NewDeducedCanonical) {
+ DeducedSugared = NewDeducedSugared;
+ DeducedCanonical = NewDeducedCanonical;
}
/// Is a SFINAE diagnostic available?
@@ -224,6 +234,13 @@ public:
/// different argument type from its substituted parameter type.
unsigned CallArgIndex = 0;
+ // C++20 [over.match.class.deduct]p5.2:
+ // During template argument deduction for the aggregate deduction
+ // candidate, the number of elements in a trailing parameter pack is only
+ // deduced from the number of remaining function arguments if it is not
+ // otherwise deduced.
+ bool AggregateDeductionCandidateHasMismatchedArity = false;
+
/// Information on packs that we're currently expanding.
///
/// FIXME: This should be kept internal to SemaTemplateDeduction.
@@ -274,7 +291,7 @@ struct DeductionFailureInfo {
/// Return the index of the call argument that this deduction
/// failure refers to, if any.
- llvm::Optional<unsigned> getCallArgIndex();
+ std::optional<unsigned> getCallArgIndex();
/// Free any memory associated with this deduction failure.
void Destroy();
diff --git a/contrib/llvm-project/clang/include/clang/Sema/TemplateInstCallback.h b/contrib/llvm-project/clang/include/clang/Sema/TemplateInstCallback.h
index 3ab0e8c6be9f..9258a7f41ac1 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/TemplateInstCallback.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/TemplateInstCallback.h
@@ -11,8 +11,8 @@
//
//===---------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TEMPLATE_INST_CALLBACK_H
-#define LLVM_CLANG_TEMPLATE_INST_CALLBACK_H
+#ifndef LLVM_CLANG_SEMA_TEMPLATEINSTCALLBACK_H
+#define LLVM_CLANG_SEMA_TEMPLATEINSTCALLBACK_H
#include "clang/Sema/Sema.h"
diff --git a/contrib/llvm-project/clang/include/clang/Sema/TypoCorrection.h b/contrib/llvm-project/clang/include/clang/Sema/TypoCorrection.h
index e0f8d152dbe5..09de164297e7 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/TypoCorrection.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/TypoCorrection.h
@@ -282,7 +282,7 @@ class CorrectionCandidateCallback {
public:
static const unsigned InvalidDistance = TypoCorrection::InvalidDistance;
- explicit CorrectionCandidateCallback(IdentifierInfo *Typo = nullptr,
+ explicit CorrectionCandidateCallback(const IdentifierInfo *Typo = nullptr,
NestedNameSpecifier *TypoNNS = nullptr)
: Typo(Typo), TypoNNS(TypoNNS) {}
@@ -319,7 +319,7 @@ public:
/// this method.
virtual std::unique_ptr<CorrectionCandidateCallback> clone() = 0;
- void setTypoName(IdentifierInfo *II) { Typo = II; }
+ void setTypoName(const IdentifierInfo *II) { Typo = II; }
void setTypoNNS(NestedNameSpecifier *NNS) { TypoNNS = NNS; }
// Flags for context-dependent keywords. WantFunctionLikeCasts is only
@@ -345,13 +345,13 @@ protected:
candidate.getCorrectionSpecifier() == TypoNNS;
}
- IdentifierInfo *Typo;
+ const IdentifierInfo *Typo;
NestedNameSpecifier *TypoNNS;
};
class DefaultFilterCCC final : public CorrectionCandidateCallback {
public:
- explicit DefaultFilterCCC(IdentifierInfo *Typo = nullptr,
+ explicit DefaultFilterCCC(const IdentifierInfo *Typo = nullptr,
NestedNameSpecifier *TypoNNS = nullptr)
: CorrectionCandidateCallback(Typo, TypoNNS) {}
@@ -365,6 +365,10 @@ public:
template <class C>
class DeclFilterCCC final : public CorrectionCandidateCallback {
public:
+ explicit DeclFilterCCC(const IdentifierInfo *Typo = nullptr,
+ NestedNameSpecifier *TypoNNS = nullptr)
+ : CorrectionCandidateCallback(Typo, TypoNNS) {}
+
bool ValidateCandidate(const TypoCorrection &candidate) override {
return candidate.getCorrectionDeclAs<C>();
}
diff --git a/contrib/llvm-project/clang/include/clang/Sema/Weak.h b/contrib/llvm-project/clang/include/clang/Sema/Weak.h
index 434393677d42..877b47d2474e 100644
--- a/contrib/llvm-project/clang/include/clang/Sema/Weak.h
+++ b/contrib/llvm-project/clang/include/clang/Sema/Weak.h
@@ -15,6 +15,7 @@
#define LLVM_CLANG_SEMA_WEAK_H
#include "clang/Basic/SourceLocation.h"
+#include "llvm/ADT/DenseMapInfo.h"
namespace clang {
@@ -22,22 +23,32 @@ class IdentifierInfo;
/// Captures information about a \#pragma weak directive.
class WeakInfo {
- IdentifierInfo *alias; // alias (optional)
- SourceLocation loc; // for diagnostics
- bool used; // identifier later declared?
+ const IdentifierInfo *alias = nullptr; // alias (optional)
+ SourceLocation loc; // for diagnostics
public:
- WeakInfo()
- : alias(nullptr), loc(SourceLocation()), used(false) {}
- WeakInfo(IdentifierInfo *Alias, SourceLocation Loc)
- : alias(Alias), loc(Loc), used(false) {}
- inline IdentifierInfo * getAlias() const { return alias; }
+ WeakInfo() = default;
+ WeakInfo(const IdentifierInfo *Alias, SourceLocation Loc)
+ : alias(Alias), loc(Loc) {}
+ inline const IdentifierInfo *getAlias() const { return alias; }
inline SourceLocation getLocation() const { return loc; }
- void setUsed(bool Used=true) { used = Used; }
- inline bool getUsed() { return used; }
- bool operator==(WeakInfo RHS) const {
- return alias == RHS.getAlias() && loc == RHS.getLocation();
- }
- bool operator!=(WeakInfo RHS) const { return !(*this == RHS); }
+ bool operator==(WeakInfo RHS) const = delete;
+ bool operator!=(WeakInfo RHS) const = delete;
+
+ struct DenseMapInfoByAliasOnly
+ : private llvm::DenseMapInfo<const IdentifierInfo *> {
+ static inline WeakInfo getEmptyKey() {
+ return WeakInfo(DenseMapInfo::getEmptyKey(), SourceLocation());
+ }
+ static inline WeakInfo getTombstoneKey() {
+ return WeakInfo(DenseMapInfo::getTombstoneKey(), SourceLocation());
+ }
+ static unsigned getHashValue(const WeakInfo &W) {
+ return DenseMapInfo::getHashValue(W.getAlias());
+ }
+ static bool isEqual(const WeakInfo &LHS, const WeakInfo &RHS) {
+ return DenseMapInfo::isEqual(LHS.getAlias(), RHS.getAlias());
+ }
+ };
};
} // end namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/Serialization/ASTBitCodes.h b/contrib/llvm-project/clang/include/clang/Serialization/ASTBitCodes.h
index 027a981df22c..fdd64f2abbe9 100644
--- a/contrib/llvm-project/clang/include/clang/Serialization/ASTBitCodes.h
+++ b/contrib/llvm-project/clang/include/clang/Serialization/ASTBitCodes.h
@@ -41,7 +41,7 @@ namespace serialization {
/// Version 4 of AST files also requires that the version control branch and
/// revision match exactly, since there is no backward compatibility of
/// AST files at this time.
-const unsigned VERSION_MAJOR = 15;
+const unsigned VERSION_MAJOR = 29;
/// AST file minor version number supported by this version of
/// Clang.
@@ -51,7 +51,7 @@ const unsigned VERSION_MAJOR = 15;
/// for the previous version could still support reading the new
/// version by ignoring new kinds of subblocks), this number
/// should be increased.
-const unsigned VERSION_MINOR = 0;
+const unsigned VERSION_MINOR = 1;
/// An ID number that refers to an identifier in an AST file.
///
@@ -343,9 +343,6 @@ enum ControlRecordTypes {
/// name.
ORIGINAL_FILE,
- /// The directory that the PCH was originally created in.
- ORIGINAL_PCH_DIR,
-
/// Record code for file ID of the file or buffer that was used to
/// generate the AST file.
ORIGINAL_FILE_ID,
@@ -400,8 +397,14 @@ enum UnhashedControlBlockRecordTypes {
/// Record code for the diagnostic options table.
DIAGNOSTIC_OPTIONS,
+ /// Record code for the headers search paths.
+ HEADER_SEARCH_PATHS,
+
/// Record code for \#pragma diagnostic mappings.
DIAG_PRAGMA_MAPPINGS,
+
+ /// Record code for the indices of used header search entries.
+ HEADER_SEARCH_ENTRY_USAGE,
};
/// Record code for extension blocks.
@@ -521,13 +524,7 @@ enum ASTRecordTypes {
/// of source-location information.
SOURCE_LOCATION_OFFSETS = 14,
- /// Record code for the set of source location entries
- /// that need to be preloaded by the AST reader.
- ///
- /// This set contains the source location entry for the
- /// predefines buffer and for any file entries that need to be
- /// preloaded.
- SOURCE_LOCATION_PRELOADS = 15,
+ // ID 15 used to be for source location entry preloads.
/// Record code for the set of ext_vector type names.
EXT_VECTOR_DECLS = 16,
@@ -692,6 +689,12 @@ enum ASTRecordTypes {
/// Record code for \#pragma float_control options.
FLOAT_CONTROL_PRAGMA_OPTIONS = 65,
+
+ /// ID 66 used to be the list of included files.
+
+ /// Record code for an unterminated \#pragma clang assume_nonnull begin
+ /// recorded in a preamble.
+ PP_ASSUME_NONNULL_LOC = 67,
};
/// Record types used within a source manager block.
@@ -822,6 +825,9 @@ enum SubmoduleRecordTypes {
/// Specifies the name of the module that will eventually
/// re-export the entities in this module.
SUBMODULE_EXPORT_AS = 17,
+
+ /// Specifies affecting modules that were not imported.
+ SUBMODULE_AFFECTING_MODULES = 18,
};
/// Record types used within a comments block.
@@ -1064,6 +1070,9 @@ enum PredefinedTypeIDs {
/// \brief The '__bf16' type
PREDEF_TYPE_BFLOAT16_ID = 73,
+ /// \brief The '__ibm128' type
+ PREDEF_TYPE_IBM128_ID = 74,
+
/// OpenCL image types with auto numeration
#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
PREDEF_TYPE_##Id##_ID,
@@ -1080,6 +1089,11 @@ enum PredefinedTypeIDs {
// \brief RISC-V V types with auto numeration
#define RVV_TYPE(Name, Id, SingletonId) PREDEF_TYPE_##Id##_ID,
#include "clang/Basic/RISCVVTypes.def"
+// \brief WebAssembly reference types with auto numeration
+#define WASM_TYPE(Name, Id, SingletonId) PREDEF_TYPE_##Id##_ID,
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
+ // Sentinel value. Considered a predefined type but not useable as one.
+ PREDEF_TYPE_LAST_ID
};
/// The number of predefined type IDs that are reserved for
@@ -1087,7 +1101,13 @@ enum PredefinedTypeIDs {
///
/// Type IDs for non-predefined types will start at
/// NUM_PREDEF_TYPE_IDs.
-const unsigned NUM_PREDEF_TYPE_IDS = 300;
+const unsigned NUM_PREDEF_TYPE_IDS = 502;
+
+// Ensure we do not overrun the predefined types we reserved
+// in the enum PredefinedTypeIDs above.
+static_assert(PREDEF_TYPE_LAST_ID < NUM_PREDEF_TYPE_IDS,
+ "Too many enumerators in PredefinedTypeIDs. Review the value of "
+ "NUM_PREDEF_TYPE_IDS");
/// Record codes for each kind of type.
///
@@ -1302,6 +1322,9 @@ enum DeclCode {
/// A FileScopeAsmDecl record.
DECL_FILE_SCOPE_ASM,
+ /// A TopLevelStmtDecl record.
+ DECL_TOP_LEVEL_STMT_DECL,
+
/// A BlockDecl record.
DECL_BLOCK,
@@ -1452,10 +1475,6 @@ enum DeclCode {
/// template template parameter pack.
DECL_EXPANDED_TEMPLATE_TEMPLATE_PARM_PACK,
- /// A ClassScopeFunctionSpecializationDecl record a class scope
- /// function specialization. (Microsoft extension).
- DECL_CLASS_SCOPE_FUNCTION_SPECIALIZATION,
-
/// An ImportDecl recording a module import.
DECL_IMPORT,
@@ -1495,7 +1514,16 @@ enum DeclCode {
/// An OMPDeclareReductionDecl record.
DECL_OMP_DECLARE_REDUCTION,
- DECL_LAST = DECL_OMP_DECLARE_REDUCTION
+ /// A UnnamedGlobalConstantDecl record.
+ DECL_UNNAMED_GLOBAL_CONSTANT,
+
+ /// A HLSLBufferDecl record.
+ DECL_HLSL_BUFFER,
+
+ /// An ImplicitConceptSpecializationDecl record.
+ DECL_IMPLICIT_CONCEPT_SPECIALIZATION,
+
+ DECL_LAST = DECL_IMPLICIT_CONCEPT_SPECIALIZATION
};
/// Record codes for each kind of statement or expression.
@@ -1833,6 +1861,9 @@ enum StmtCode {
/// A CXXBoolLiteralExpr record.
EXPR_CXX_BOOL_LITERAL,
+ /// A CXXParenListInitExpr record.
+ EXPR_CXX_PAREN_LIST_INIT,
+
EXPR_CXX_NULL_PTR_LITERAL, // CXXNullPtrLiteralExpr
EXPR_CXX_TYPEID_EXPR, // CXXTypeidExpr (of expr).
EXPR_CXX_TYPEID_TYPE, // CXXTypeidExpr (of type).
@@ -1890,6 +1921,7 @@ enum StmtCode {
STMT_SEH_TRY, // SEHTryStmt
// OpenMP directives
+ STMT_OMP_META_DIRECTIVE,
STMT_OMP_CANONICAL_LOOP,
STMT_OMP_PARALLEL_DIRECTIVE,
STMT_OMP_SIMD_DIRECTIVE,
@@ -1905,9 +1937,11 @@ enum StmtCode {
STMT_OMP_PARALLEL_FOR_DIRECTIVE,
STMT_OMP_PARALLEL_FOR_SIMD_DIRECTIVE,
STMT_OMP_PARALLEL_MASTER_DIRECTIVE,
+ STMT_OMP_PARALLEL_MASKED_DIRECTIVE,
STMT_OMP_PARALLEL_SECTIONS_DIRECTIVE,
STMT_OMP_TASK_DIRECTIVE,
STMT_OMP_TASKYIELD_DIRECTIVE,
+ STMT_OMP_ERROR_DIRECTIVE,
STMT_OMP_BARRIER_DIRECTIVE,
STMT_OMP_TASKWAIT_DIRECTIVE,
STMT_OMP_FLUSH_DIRECTIVE,
@@ -1931,6 +1965,10 @@ enum StmtCode {
STMT_OMP_MASTER_TASKLOOP_SIMD_DIRECTIVE,
STMT_OMP_PARALLEL_MASTER_TASKLOOP_DIRECTIVE,
STMT_OMP_PARALLEL_MASTER_TASKLOOP_SIMD_DIRECTIVE,
+ STMT_OMP_MASKED_TASKLOOP_DIRECTIVE,
+ STMT_OMP_MASKED_TASKLOOP_SIMD_DIRECTIVE,
+ STMT_OMP_PARALLEL_MASKED_TASKLOOP_DIRECTIVE,
+ STMT_OMP_PARALLEL_MASKED_TASKLOOP_SIMD_DIRECTIVE,
STMT_OMP_DISTRIBUTE_DIRECTIVE,
STMT_OMP_TARGET_UPDATE_DIRECTIVE,
STMT_OMP_DISTRIBUTE_PARALLEL_FOR_DIRECTIVE,
@@ -1947,9 +1985,15 @@ enum StmtCode {
STMT_OMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_DIRECTIVE,
STMT_OMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_DIRECTIVE,
STMT_OMP_TARGET_TEAMS_DISTRIBUTE_SIMD_DIRECTIVE,
+ STMT_OMP_SCOPE_DIRECTIVE,
STMT_OMP_INTEROP_DIRECTIVE,
STMT_OMP_DISPATCH_DIRECTIVE,
STMT_OMP_MASKED_DIRECTIVE,
+ STMT_OMP_GENERIC_LOOP_DIRECTIVE,
+ STMT_OMP_TEAMS_GENERIC_LOOP_DIRECTIVE,
+ STMT_OMP_TARGET_TEAMS_GENERIC_LOOP_DIRECTIVE,
+ STMT_OMP_PARALLEL_GENERIC_LOOP_DIRECTIVE,
+ STMT_OMP_TARGET_PARALLEL_GENERIC_LOOP_DIRECTIVE,
EXPR_OMP_ARRAY_SECTION,
EXPR_OMP_ARRAY_SHAPING,
EXPR_OMP_ITERATOR,
diff --git a/contrib/llvm-project/clang/include/clang/Serialization/ASTReader.h b/contrib/llvm-project/clang/include/clang/Serialization/ASTReader.h
index 242b75baca6c..62c25f5b7a0d 100644
--- a/contrib/llvm-project/clang/include/clang/Serialization/ASTReader.h
+++ b/contrib/llvm-project/clang/include/clang/Serialization/ASTReader.h
@@ -32,12 +32,13 @@
#include "clang/Serialization/ModuleFile.h"
#include "clang/Serialization/ModuleFileExtension.h"
#include "clang/Serialization/ModuleManager.h"
+#include "clang/Serialization/SourceLocationEncoding.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/IntrusiveRefCntPtr.h"
#include "llvm/ADT/MapVector.h"
-#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/PagedVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
@@ -56,6 +57,7 @@
#include <ctime>
#include <deque>
#include <memory>
+#include <optional>
#include <set>
#include <string>
#include <utility>
@@ -84,7 +86,6 @@ class GlobalModuleIndex;
struct HeaderFileInfo;
class HeaderSearchOptions;
class LangOptions;
-class LazyASTUnresolvedSet;
class MacroInfo;
class InMemoryModuleCache;
class NamedDecl;
@@ -94,7 +95,6 @@ class ObjCInterfaceDecl;
class PCHContainerReader;
class Preprocessor;
class PreprocessorOptions;
-struct QualifierInfo;
class Sema;
class SourceManager;
class Stmt;
@@ -165,6 +165,10 @@ public:
/// Receives the header search options.
///
+ /// \param HSOpts The read header search options. The following fields are
+ /// missing and are reported in ReadHeaderSearchPaths():
+ /// UserEntries, SystemHeaderPrefixes, VFSOverlayFiles.
+ ///
/// \returns true to indicate the header search options are invalid, or false
/// otherwise.
virtual bool ReadHeaderSearchOptions(const HeaderSearchOptions &HSOpts,
@@ -173,6 +177,20 @@ public:
return false;
}
+ /// Receives the header search paths.
+ ///
+ /// \param HSOpts The read header search paths. Only the following fields are
+ /// initialized: UserEntries, SystemHeaderPrefixes,
+ /// VFSOverlayFiles. The rest is reported in
+ /// ReadHeaderSearchOptions().
+ ///
+ /// \returns true to indicate the header search paths are invalid, or false
+ /// otherwise.
+ virtual bool ReadHeaderSearchPaths(const HeaderSearchOptions &HSOpts,
+ bool Complain) {
+ return false;
+ }
+
/// Receives the preprocessor options.
///
/// \param SuggestedPredefines Can be filled in with the set of predefines
@@ -182,7 +200,7 @@ public:
/// \returns true to indicate the preprocessor options are invalid, or false
/// otherwise.
virtual bool ReadPreprocessorOptions(const PreprocessorOptions &PPOpts,
- bool Complain,
+ bool ReadMacros, bool Complain,
std::string &SuggestedPredefines) {
return false;
}
@@ -257,7 +275,7 @@ public:
StringRef SpecificModuleCachePath,
bool Complain) override;
bool ReadPreprocessorOptions(const PreprocessorOptions &PPOpts,
- bool Complain,
+ bool ReadMacros, bool Complain,
std::string &SuggestedPredefines) override;
void ReadCounter(const serialization::ModuleFile &M, unsigned Value) override;
@@ -287,15 +305,13 @@ public:
bool AllowCompatibleDifferences) override;
bool ReadDiagnosticOptions(IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts,
bool Complain) override;
- bool ReadPreprocessorOptions(const PreprocessorOptions &PPOpts, bool Complain,
+ bool ReadPreprocessorOptions(const PreprocessorOptions &PPOpts,
+ bool ReadMacros, bool Complain,
std::string &SuggestedPredefines) override;
bool ReadHeaderSearchOptions(const HeaderSearchOptions &HSOpts,
StringRef SpecificModuleCachePath,
bool Complain) override;
void ReadCounter(const serialization::ModuleFile &M, unsigned Value) override;
-
-private:
- void Error(const char *Msg);
};
/// ASTReaderListenter implementation to set SuggestedPredefines of
@@ -308,7 +324,8 @@ class SimpleASTReaderListener : public ASTReaderListener {
public:
SimpleASTReaderListener(Preprocessor &PP) : PP(PP) {}
- bool ReadPreprocessorOptions(const PreprocessorOptions &PPOpts, bool Complain,
+ bool ReadPreprocessorOptions(const PreprocessorOptions &PPOpts,
+ bool ReadMacros, bool Complain,
std::string &SuggestedPredefines) override;
};
@@ -382,7 +399,7 @@ public:
/// The AST file was written by a different version of Clang.
VersionMismatch,
- /// The AST file was writtten with a different language/target
+ /// The AST file was written with a different language/target
/// configuration.
ConfigurationMismatch,
@@ -398,6 +415,8 @@ public:
using ModuleReverseIterator = ModuleManager::ModuleReverseIterator;
private:
+ using LocSeq = SourceLocationSequence;
+
/// The receiver of some callbacks invoked by ASTReader.
std::unique_ptr<ASTReaderListener> Listener;
@@ -443,7 +462,7 @@ private:
SourceLocation CurrentImportLoc;
/// The module kind that is currently deserializing.
- Optional<ModuleKind> CurrentDeserializingModuleKind;
+ std::optional<ModuleKind> CurrentDeserializingModuleKind;
/// The global module index, if loaded.
std::unique_ptr<GlobalModuleIndex> GlobalIndex;
@@ -466,7 +485,7 @@ private:
///
/// When the pointer at index I is non-NULL, the type with
/// ID = (I + 1) << FastQual::Width has already been loaded
- std::vector<QualType> TypesLoaded;
+ llvm::PagedVector<QualType> TypesLoaded;
using GlobalTypeMapType =
ContinuousRangeMap<serialization::TypeID, ModuleFile *, 4>;
@@ -480,7 +499,7 @@ private:
///
/// When the pointer at index I is non-NULL, the declaration with ID
/// = I + 1 has already been loaded.
- std::vector<Decl *> DeclsLoaded;
+ llvm::PagedVector<Decl *> DeclsLoaded;
using GlobalDeclMapType =
ContinuousRangeMap<serialization::DeclID, ModuleFile *, 4>;
@@ -531,6 +550,10 @@ private:
/// declaration and the value is the deduced return type.
llvm::SmallMapVector<FunctionDecl *, QualType, 4> PendingDeducedTypeUpdates;
+ /// Functions has undededuced return type and we wish we can find the deduced
+ /// return type by iterating the redecls in other modules.
+ llvm::SmallVector<FunctionDecl *, 4> PendingUndeducedFunctionDecls;
+
/// Declarations that have been imported and have typedef names for
/// linkage purposes.
llvm::DenseMap<std::pair<DeclContext *, IdentifierInfo *>, NamedDecl *>
@@ -541,6 +564,10 @@ private:
llvm::DenseMap<Decl*, llvm::SmallVector<NamedDecl*, 2>>
AnonymousDeclarationsForMerging;
+ /// Map from numbering information for lambdas to the corresponding lambdas.
+ llvm::DenseMap<std::pair<const Decl *, unsigned>, NamedDecl *>
+ LambdaDeclarationsForMerging;
+
/// Key used to identify LifetimeExtendedTemporaryDecl for merging,
/// containing the lifetime-extending declaration and the mangling number.
using LETemporaryKey = std::pair<Decl *, unsigned>;
@@ -688,7 +715,7 @@ private:
Module *Mod;
/// The kind of module reference.
- enum { Import, Export, Conflict } Kind;
+ enum { Import, Export, Conflict, Affecting } Kind;
/// The local ID of the module that is being exported.
unsigned ID;
@@ -863,7 +890,7 @@ private:
SourceLocation PointersToMembersPragmaLocation;
/// The pragma float_control state.
- Optional<FPOptionsOverride> FpPragmaCurrentValue;
+ std::optional<FPOptionsOverride> FpPragmaCurrentValue;
SourceLocation FpPragmaCurrentLocation;
struct FpPragmaStackEntry {
FPOptionsOverride Value;
@@ -875,7 +902,7 @@ private:
llvm::SmallVector<std::string, 2> FpPragmaStrings;
/// The pragma align/pack state.
- Optional<Sema::AlignPackInfo> PragmaAlignPackCurrentValue;
+ std::optional<Sema::AlignPackInfo> PragmaAlignPackCurrentValue;
SourceLocation PragmaAlignPackCurrentLocation;
struct PragmaAlignPackStackEntry {
Sema::AlignPackInfo Value;
@@ -914,7 +941,7 @@ private:
/// Sema tracks these to emit deferred diags.
llvm::SmallSetVector<serialization::DeclID, 4> DeclsToCheckForDeferredDiags;
-public:
+private:
struct ImportedSubmodule {
serialization::SubmoduleID ID;
SourceLocation ImportLoc;
@@ -923,10 +950,15 @@ public:
: ID(ID), ImportLoc(ImportLoc) {}
};
-private:
/// A list of modules that were imported by precompiled headers or
- /// any other non-module AST file.
- SmallVector<ImportedSubmodule, 2> ImportedModules;
+ /// any other non-module AST file and have not yet been made visible. If a
+ /// module is made visible in the ASTReader, it will be transfered to
+ /// \c PendingImportedModulesSema.
+ SmallVector<ImportedSubmodule, 2> PendingImportedModules;
+
+ /// A list of modules that were imported by precompiled headers or
+ /// any other non-module AST file and have not yet been made visible for Sema.
+ SmallVector<ImportedSubmodule, 2> PendingImportedModulesSema;
//@}
/// The system include root to be used when loading the
@@ -1082,7 +1114,13 @@ private:
/// they might contain a deduced return type that refers to a local type
/// declared within the function.
SmallVector<std::pair<FunctionDecl *, serialization::TypeID>, 16>
- PendingFunctionTypes;
+ PendingDeducedFunctionTypes;
+
+ /// The list of deduced variable types that we have not yet read, because
+ /// they might contain a deduced type that refers to a local type declared
+ /// within the variable.
+ SmallVector<std::pair<VarDecl *, serialization::TypeID>, 16>
+ PendingDeducedVarTypes;
/// The list of redeclaration chains that still need to be
/// reconstructed, and the local offset to the corresponding list
@@ -1108,6 +1146,23 @@ private:
/// been completed.
std::deque<PendingDeclContextInfo> PendingDeclContextInfos;
+ template <typename DeclTy>
+ using DuplicateObjCDecls = std::pair<DeclTy *, DeclTy *>;
+
+ /// When resolving duplicate ivars from Objective-C extensions we don't error
+ /// out immediately but check if can merge identical extensions. Not checking
+ /// extensions for equality immediately because ivar deserialization isn't
+ /// over yet at that point.
+ llvm::SmallMapVector<DuplicateObjCDecls<ObjCCategoryDecl>,
+ llvm::SmallVector<DuplicateObjCDecls<ObjCIvarDecl>, 4>,
+ 2>
+ PendingObjCExtensionIvarRedeclarations;
+
+ /// Members that have been added to classes, for which the class has not yet
+ /// been notified. CXXRecordDecl::addedMember will be called for each of
+ /// these once recursive deserialization is complete.
+ SmallVector<std::pair<CXXRecordDecl*, Decl*>, 4> PendingAddedClassMembers;
+
/// The set of NamedDecls that have been loaded, but are members of a
/// context that has been merged into another context where the corresponding
/// declaration is either missing or has not yet been loaded.
@@ -1118,11 +1173,20 @@ private:
using DataPointers =
std::pair<CXXRecordDecl *, struct CXXRecordDecl::DefinitionData *>;
+ using ObjCInterfaceDataPointers =
+ std::pair<ObjCInterfaceDecl *,
+ struct ObjCInterfaceDecl::DefinitionData *>;
+ using ObjCProtocolDataPointers =
+ std::pair<ObjCProtocolDecl *, struct ObjCProtocolDecl::DefinitionData *>;
/// Record definitions in which we found an ODR violation.
llvm::SmallDenseMap<CXXRecordDecl *, llvm::SmallVector<DataPointers, 2>, 2>
PendingOdrMergeFailures;
+ /// C/ObjC record definitions in which we found an ODR violation.
+ llvm::SmallDenseMap<RecordDecl *, llvm::SmallVector<RecordDecl *, 2>, 2>
+ PendingRecordOdrMergeFailures;
+
/// Function definitions in which we found an ODR violation.
llvm::SmallDenseMap<FunctionDecl *, llvm::SmallVector<FunctionDecl *, 2>, 2>
PendingFunctionOdrMergeFailures;
@@ -1131,6 +1195,16 @@ private:
llvm::SmallDenseMap<EnumDecl *, llvm::SmallVector<EnumDecl *, 2>, 2>
PendingEnumOdrMergeFailures;
+ /// ObjCInterfaceDecl in which we found an ODR violation.
+ llvm::SmallDenseMap<ObjCInterfaceDecl *,
+ llvm::SmallVector<ObjCInterfaceDataPointers, 2>, 2>
+ PendingObjCInterfaceOdrMergeFailures;
+
+ /// ObjCProtocolDecl in which we found an ODR violation.
+ llvm::SmallDenseMap<ObjCProtocolDecl *,
+ llvm::SmallVector<ObjCProtocolDataPointers, 2>, 2>
+ PendingObjCProtocolOdrMergeFailures;
+
/// DeclContexts in which we have diagnosed an ODR violation.
llvm::SmallPtrSet<DeclContext*, 2> DiagnosedOdrMergeFailures;
@@ -1162,6 +1236,10 @@ private:
/// definitions. Only populated when using modules in C++.
llvm::DenseMap<EnumDecl *, EnumDecl *> EnumDefinitions;
+ /// A mapping from canonical declarations of records to their canonical
+ /// definitions. Doesn't cover CXXRecordDecl.
+ llvm::DenseMap<RecordDecl *, RecordDecl *> RecordDefinitions;
+
/// When reading a Stmt tree, Stmt operands are placed in this stack.
SmallVector<Stmt *, 16> StmtStack;
@@ -1223,18 +1301,8 @@ private:
/// Reads a statement from the specified cursor.
Stmt *ReadStmtFromStream(ModuleFile &F);
- struct InputFileInfo {
- std::string Filename;
- uint64_t ContentHash;
- off_t StoredSize;
- time_t StoredTime;
- bool Overridden;
- bool Transient;
- bool TopLevelModuleMap;
- };
-
- /// Reads the stored information about an input file.
- InputFileInfo readInputFileInfo(ModuleFile &F, unsigned ID);
+ /// Retrieve the stored information about an input file.
+ serialization::InputFileInfo getInputFileInfo(ModuleFile &F, unsigned ID);
/// Retrieve the file entry and 'overridden' bit for an input
/// file in the given module file.
@@ -1320,18 +1388,17 @@ private:
ASTReaderListener *Listener,
bool ValidateDiagnosticOptions);
- ASTReadResult ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities);
- ASTReadResult ReadExtensionBlock(ModuleFile &F);
+ llvm::Error ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities);
+ llvm::Error ReadExtensionBlock(ModuleFile &F);
void ReadModuleOffsetMap(ModuleFile &F) const;
- bool ParseLineTable(ModuleFile &F, const RecordData &Record);
- bool ReadSourceManagerBlock(ModuleFile &F);
- llvm::BitstreamCursor &SLocCursorForID(int ID);
+ void ParseLineTable(ModuleFile &F, const RecordData &Record);
+ llvm::Error ReadSourceManagerBlock(ModuleFile &F);
SourceLocation getImportLocation(ModuleFile *F);
ASTReadResult ReadModuleMapFileBlock(RecordData &Record, ModuleFile &F,
const ModuleFile *ImportedBy,
unsigned ClientLoadCapabilities);
- ASTReadResult ReadSubmoduleBlock(ModuleFile &F,
- unsigned ClientLoadCapabilities);
+ llvm::Error ReadSubmoduleBlock(ModuleFile &F,
+ unsigned ClientLoadCapabilities);
static bool ParseLanguageOptions(const RecordData &Record, bool Complain,
ASTReaderListener &Listener,
bool AllowCompatibleDifferences);
@@ -1344,6 +1411,8 @@ private:
ASTReaderListener &Listener);
static bool ParseHeaderSearchOptions(const RecordData &Record, bool Complain,
ASTReaderListener &Listener);
+ static bool ParseHeaderSearchPaths(const RecordData &Record, bool Complain,
+ ASTReaderListener &Listener);
static bool ParsePreprocessorOptions(const RecordData &Record, bool Complain,
ASTReaderListener &Listener,
std::string &SuggestedPredefines);
@@ -1359,7 +1428,7 @@ private:
RecordLocation TypeCursorForIndex(unsigned Index);
void LoadedDecl(unsigned Index, Decl *D);
Decl *ReadDeclRecord(serialization::DeclID ID);
- void markIncompleteDeclChain(Decl *Canon);
+ void markIncompleteDeclChain(Decl *D);
/// Returns the most recent declaration of a declaration (which must be
/// of a redeclarable kind) that is either local or has already been loaded
@@ -1566,12 +1635,17 @@ public:
/// capabilities, represented as a bitset of the enumerators of
/// LoadFailureCapabilities.
///
- /// \param Imported optional out-parameter to append the list of modules
- /// that were imported by precompiled headers or any other non-module AST file
+ /// \param LoadedModuleFile The optional out-parameter refers to the new
+ /// loaded modules. In case the module specified by FileName is already
+ /// loaded, the module file pointer referred by NewLoadedModuleFile wouldn't
+ /// change. Otherwise if the AST file get loaded successfully,
+ /// NewLoadedModuleFile would refer to the address of the new loaded top level
+ /// module. The state of NewLoadedModuleFile is unspecified if the AST file
+ /// isn't loaded successfully.
ASTReadResult ReadAST(StringRef FileName, ModuleKind Type,
SourceLocation ImportLoc,
unsigned ClientLoadCapabilities,
- SmallVectorImpl<ImportedSubmodule> *Imported = nullptr);
+ ModuleFile **NewLoadedModuleFile = nullptr);
/// Make the entities in the given module and any of its (non-explicit)
/// submodules visible to name lookup.
@@ -1706,21 +1780,23 @@ public:
/// Read the control block for the named AST file.
///
/// \returns true if an error occurred, false otherwise.
- static bool
- readASTFileControlBlock(StringRef Filename, FileManager &FileMgr,
- const PCHContainerReader &PCHContainerRdr,
- bool FindModuleFileExtensions,
- ASTReaderListener &Listener,
- bool ValidateDiagnosticOptions);
+ static bool readASTFileControlBlock(StringRef Filename, FileManager &FileMgr,
+ const InMemoryModuleCache &ModuleCache,
+ const PCHContainerReader &PCHContainerRdr,
+ bool FindModuleFileExtensions,
+ ASTReaderListener &Listener,
+ bool ValidateDiagnosticOptions);
/// Determine whether the given AST file is acceptable to load into a
/// translation unit with the given language and target options.
static bool isAcceptableASTFile(StringRef Filename, FileManager &FileMgr,
+ const InMemoryModuleCache &ModuleCache,
const PCHContainerReader &PCHContainerRdr,
const LangOptions &LangOpts,
const TargetOptions &TargetOpts,
const PreprocessorOptions &PPOpts,
- StringRef ExistingModuleCachePath);
+ StringRef ExistingModuleCachePath,
+ bool RequireStrictOptionMatches = false);
/// Returns the suggested contents of the predefines buffer,
/// which contains a (typically-empty) subset of the predefines
@@ -1740,14 +1816,14 @@ public:
/// Optionally returns true or false if the preallocated preprocessed
/// entity with index \p Index came from file \p FID.
- Optional<bool> isPreprocessedEntityInFileID(unsigned Index,
- FileID FID) override;
+ std::optional<bool> isPreprocessedEntityInFileID(unsigned Index,
+ FileID FID) override;
/// Read a preallocated skipped range from the external source.
SourceRange ReadSkippedRange(unsigned Index) override;
/// Read the header file information for the given file entry.
- HeaderFileInfo GetHeaderFileInfo(const FileEntry *FE) override;
+ HeaderFileInfo GetHeaderFileInfo(FileEntryRef FE) override;
void ReadPragmaDiagnosticMappings(DiagnosticsEngine &Diag);
@@ -1826,10 +1902,6 @@ public:
/// if the declaration is not from a module file.
ModuleFile *getOwningModuleFile(const Decl *D);
- /// Get the best name we know for the module that owns the given
- /// declaration, or an empty string if the declaration is not from a module.
- std::string getOwningModuleNameForDiagnostic(const Decl *D);
-
/// Returns the source location for the decl \p ID.
SourceLocation getSourceLocationForDeclID(serialization::GlobalDeclID ID);
@@ -1904,8 +1976,9 @@ public:
/// ReadBlockAbbrevs - Enter a subblock of the specified BlockID with the
/// specified cursor. Read the abbreviations that are at the top of the block
/// and then leave the cursor pointing into the block.
- static bool ReadBlockAbbrevs(llvm::BitstreamCursor &Cursor, unsigned BlockID,
- uint64_t *StartOfBlockOffset = nullptr);
+ static llvm::Error ReadBlockAbbrevs(llvm::BitstreamCursor &Cursor,
+ unsigned BlockID,
+ uint64_t *StartOfBlockOffset = nullptr);
/// Finds all the visible declarations with a given name.
/// The current implementation of this method just loads the entire
@@ -2024,7 +2097,7 @@ public:
SmallVectorImpl<std::pair<Selector, SourceLocation>> &Sels) override;
void ReadWeakUndeclaredIdentifiers(
- SmallVectorImpl<std::pair<IdentifierInfo *, WeakInfo>> &WI) override;
+ SmallVectorImpl<std::pair<IdentifierInfo *, WeakInfo>> &WeakIDs) override;
void ReadUsedVTables(SmallVectorImpl<ExternalVTableUse> &VTables) override;
@@ -2036,6 +2109,8 @@ public:
llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>>
&LPTMap) override;
+ void AssignedLambdaNumbering(const CXXRecordDecl *Lambda) override;
+
/// Load a selector from disk, registering its ID if it exists.
void LoadSelector(Selector Sel);
@@ -2080,6 +2155,12 @@ public:
/// Read the source location entry with index ID.
bool ReadSLocEntry(int ID) override;
+ /// Get the index ID for the loaded SourceLocation offset.
+ int getSLocEntryID(SourceLocation::UIntTy SLocOffset) override;
+ /// Try to read the offset of the SLocEntry at the given index in the given
+ /// module file.
+ llvm::Expected<SourceLocation::UIntTy> readSLocOffset(ModuleFile *F,
+ unsigned Index);
/// Retrieve the module import location and module name for the
/// given source manager entry ID.
@@ -2107,7 +2188,7 @@ public:
unsigned getModuleFileID(ModuleFile *M);
/// Return a descriptor for the corresponding module.
- llvm::Optional<ASTSourceDescriptor> getSourceDescriptor(unsigned ID) override;
+ std::optional<ASTSourceDescriptor> getSourceDescriptor(unsigned ID) override;
ExtKind hasExternalDefinitions(const Decl *D) override;
@@ -2126,7 +2207,7 @@ public:
/// Retrieve the global selector ID that corresponds to this
/// the local selector ID in a given module.
- serialization::SelectorID getGlobalSelectorID(ModuleFile &F,
+ serialization::SelectorID getGlobalSelectorID(ModuleFile &M,
unsigned LocalID) const;
/// Read the contents of a CXXCtorInitializer array.
@@ -2139,16 +2220,16 @@ public:
/// Read a source location from raw form and return it in its
/// originating module file's source location space.
- SourceLocation
- ReadUntranslatedSourceLocation(SourceLocation::UIntTy Raw) const {
- return SourceLocation::getFromRawEncoding((Raw >> 1) |
- (Raw << (8 * sizeof(Raw) - 1)));
+ SourceLocation ReadUntranslatedSourceLocation(SourceLocation::UIntTy Raw,
+ LocSeq *Seq = nullptr) const {
+ return SourceLocationEncoding::decode(Raw, Seq);
}
/// Read a source location from raw form.
SourceLocation ReadSourceLocation(ModuleFile &ModuleFile,
- SourceLocation::UIntTy Raw) const {
- SourceLocation Loc = ReadUntranslatedSourceLocation(Raw);
+ SourceLocation::UIntTy Raw,
+ LocSeq *Seq = nullptr) const {
+ SourceLocation Loc = ReadUntranslatedSourceLocation(Raw, Seq);
return TranslateSourceLocation(ModuleFile, Loc);
}
@@ -2168,17 +2249,29 @@ public:
/// Read a source location.
SourceLocation ReadSourceLocation(ModuleFile &ModuleFile,
- const RecordDataImpl &Record,
- unsigned &Idx) {
- return ReadSourceLocation(ModuleFile, Record[Idx++]);
+ const RecordDataImpl &Record, unsigned &Idx,
+ LocSeq *Seq = nullptr) {
+ return ReadSourceLocation(ModuleFile, Record[Idx++], Seq);
+ }
+
+ /// Read a FileID.
+ FileID ReadFileID(ModuleFile &F, const RecordDataImpl &Record,
+ unsigned &Idx) const {
+ return TranslateFileID(F, FileID::get(Record[Idx++]));
+ }
+
+ /// Translate a FileID from another module file's FileID space into ours.
+ FileID TranslateFileID(ModuleFile &F, FileID FID) const {
+ assert(FID.ID >= 0 && "Reading non-local FileID.");
+ return FileID::get(F.SLocEntryBaseID + FID.ID - 1);
}
/// Read a source range.
- SourceRange ReadSourceRange(ModuleFile &F,
- const RecordData &Record, unsigned &Idx);
+ SourceRange ReadSourceRange(ModuleFile &F, const RecordData &Record,
+ unsigned &Idx, LocSeq *Seq = nullptr);
// Read a string
- static std::string ReadString(const RecordData &Record, unsigned &Idx);
+ static std::string ReadString(const RecordDataImpl &Record, unsigned &Idx);
// Skip a string
static void SkipString(const RecordData &Record, unsigned &Idx) {
@@ -2294,6 +2387,13 @@ public:
/// Loads comments ranges.
void ReadComments() override;
+ /// Visit all the input file infos of the given module file.
+ void visitInputFileInfos(
+ serialization::ModuleFile &MF, bool IncludeSystem,
+ llvm::function_ref<void(const serialization::InputFileInfo &IFI,
+ bool IsSystem)>
+ Visitor);
+
/// Visit all the input files of the given module file.
void visitInputFiles(serialization::ModuleFile &MF,
bool IncludeSystem, bool Complain,
@@ -2303,12 +2403,54 @@ public:
/// Visit all the top-level module maps loaded when building the given module
/// file.
void visitTopLevelModuleMaps(serialization::ModuleFile &MF,
- llvm::function_ref<
- void(const FileEntry *)> Visitor);
+ llvm::function_ref<void(FileEntryRef)> Visitor);
bool isProcessingUpdateRecords() { return ProcessingUpdateRecords; }
};
+/// A simple helper class to unpack an integer to bits and consuming
+/// the bits in order.
+class BitsUnpacker {
+ constexpr static uint32_t BitsIndexUpbound = 32;
+
+public:
+ BitsUnpacker(uint32_t V) { updateValue(V); }
+ BitsUnpacker(const BitsUnpacker &) = delete;
+ BitsUnpacker(BitsUnpacker &&) = delete;
+ BitsUnpacker operator=(const BitsUnpacker &) = delete;
+ BitsUnpacker operator=(BitsUnpacker &&) = delete;
+ ~BitsUnpacker() = default;
+
+ void updateValue(uint32_t V) {
+ Value = V;
+ CurrentBitsIndex = 0;
+ }
+
+ void advance(uint32_t BitsWidth) { CurrentBitsIndex += BitsWidth; }
+
+ bool getNextBit() {
+ assert(isValid());
+ return Value & (1 << CurrentBitsIndex++);
+ }
+
+ uint32_t getNextBits(uint32_t Width) {
+ assert(isValid());
+ assert(Width < BitsIndexUpbound);
+ uint32_t Ret = (Value >> CurrentBitsIndex) & ((1 << Width) - 1);
+ CurrentBitsIndex += Width;
+ return Ret;
+ }
+
+ bool canGetNextNBits(uint32_t Width) const {
+ return CurrentBitsIndex + Width < BitsIndexUpbound;
+ }
+
+private:
+ bool isValid() const { return CurrentBitsIndex < BitsIndexUpbound; }
+
+ uint32_t Value;
+ uint32_t CurrentBitsIndex = ~0;
+};
} // namespace clang
#endif // LLVM_CLANG_SERIALIZATION_ASTREADER_H
diff --git a/contrib/llvm-project/clang/include/clang/Serialization/ASTRecordReader.h b/contrib/llvm-project/clang/include/clang/Serialization/ASTRecordReader.h
index b85609bf4e05..80a1359fd16a 100644
--- a/contrib/llvm-project/clang/include/clang/Serialization/ASTRecordReader.h
+++ b/contrib/llvm-project/clang/include/clang/Serialization/ASTRecordReader.h
@@ -18,6 +18,7 @@
#include "clang/AST/AbstractBasicReader.h"
#include "clang/Lex/Token.h"
#include "clang/Serialization/ASTReader.h"
+#include "clang/Serialization/SourceLocationEncoding.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/APSInt.h"
@@ -30,6 +31,7 @@ class OMPChildren;
class ASTRecordReader
: public serialization::DataStreamBasicReader<ASTRecordReader> {
using ModuleFile = serialization::ModuleFile;
+ using LocSeq = SourceLocationSequence;
ASTReader *Reader;
ModuleFile *F;
@@ -72,7 +74,7 @@ public:
uint64_t readInt() { return Record[Idx++]; }
ArrayRef<uint64_t> readIntArray(unsigned Len) {
- auto Array = llvm::makeArrayRef(Record).slice(Idx, Len);
+ auto Array = llvm::ArrayRef(Record).slice(Idx, Len);
Idx += Len;
return Array;
}
@@ -153,15 +155,19 @@ public:
/// Reads a TemplateArgumentLoc, advancing Idx.
TemplateArgumentLoc readTemplateArgumentLoc();
+ void readTemplateArgumentListInfo(TemplateArgumentListInfo &Result);
+
const ASTTemplateArgumentListInfo*
readASTTemplateArgumentListInfo();
+ // Reads a concept reference from the given record.
+ ConceptReference *readConceptReference();
+
/// Reads a declarator info from the given record, advancing Idx.
TypeSourceInfo *readTypeSourceInfo();
/// Reads the location information for a type.
- void readTypeLoc(TypeLoc TL);
-
+ void readTypeLoc(TypeLoc TL, LocSeq *Seq = nullptr);
/// Map a local type ID within a given AST file to a global type ID.
serialization::TypeID getGlobalTypeID(unsigned LocalID) const {
@@ -271,13 +277,13 @@ public:
void readOMPChildren(OMPChildren *Data);
/// Read a source location, advancing Idx.
- SourceLocation readSourceLocation() {
- return Reader->ReadSourceLocation(*F, Record, Idx);
+ SourceLocation readSourceLocation(LocSeq *Seq = nullptr) {
+ return Reader->ReadSourceLocation(*F, Record, Idx, Seq);
}
/// Read a source range, advancing Idx.
- SourceRange readSourceRange() {
- return Reader->ReadSourceRange(*F, Record, Idx);
+ SourceRange readSourceRange(LocSeq *Seq = nullptr) {
+ return Reader->ReadSourceRange(*F, Record, Idx, Seq);
}
/// Read an arbitrary constant value, advancing Idx.
@@ -326,6 +332,11 @@ public:
/// Reads attributes from the current stream position, advancing Idx.
void readAttributes(AttrVec &Attrs);
+ /// Read an BTFTypeTagAttr object.
+ BTFTypeTagAttr *readBTFTypeTagAttr() {
+ return cast<BTFTypeTagAttr>(readAttr());
+ }
+
/// Reads a token out of a record, advancing Idx.
Token readToken() {
return Reader->ReadToken(*F, Record, Idx);
@@ -350,7 +361,7 @@ struct SavedStreamPosition {
~SavedStreamPosition() {
if (llvm::Error Err = Cursor.JumpToBit(Offset))
llvm::report_fatal_error(
- "Cursor should always be able to go back, failed: " +
+ llvm::Twine("Cursor should always be able to go back, failed: ") +
toString(std::move(Err)));
}
@@ -359,10 +370,6 @@ private:
uint64_t Offset;
};
-inline void PCHValidator::Error(const char *Msg) {
- Reader.Error(Msg);
-}
-
} // namespace clang
#endif
diff --git a/contrib/llvm-project/clang/include/clang/Serialization/ASTRecordWriter.h b/contrib/llvm-project/clang/include/clang/Serialization/ASTRecordWriter.h
index 0dc69bd3f3bd..9a64735c9fa5 100644
--- a/contrib/llvm-project/clang/include/clang/Serialization/ASTRecordWriter.h
+++ b/contrib/llvm-project/clang/include/clang/Serialization/ASTRecordWriter.h
@@ -17,6 +17,7 @@
#include "clang/AST/AbstractBasicWriter.h"
#include "clang/AST/OpenMPClause.h"
#include "clang/Serialization/ASTWriter.h"
+#include "clang/Serialization/SourceLocationEncoding.h"
namespace clang {
@@ -25,6 +26,8 @@ class TypeLoc;
/// An object for streaming information to a record.
class ASTRecordWriter
: public serialization::DataStreamBasicWriter<ASTRecordWriter> {
+ using LocSeq = SourceLocationSequence;
+
ASTWriter *Writer;
ASTWriter::RecordDataImpl *Record;
@@ -123,21 +126,24 @@ public:
AddStmt(const_cast<Stmt*>(S));
}
+ /// Write an BTFTypeTagAttr object.
+ void writeBTFTypeTagAttr(const BTFTypeTagAttr *A) { AddAttr(A); }
+
/// Add a definition for the given function to the queue of statements
/// to emit.
void AddFunctionDefinition(const FunctionDecl *FD);
/// Emit a source location.
- void AddSourceLocation(SourceLocation Loc) {
- return Writer->AddSourceLocation(Loc, *Record);
+ void AddSourceLocation(SourceLocation Loc, LocSeq *Seq = nullptr) {
+ return Writer->AddSourceLocation(Loc, *Record, Seq);
}
void writeSourceLocation(SourceLocation Loc) {
AddSourceLocation(Loc);
}
/// Emit a source range.
- void AddSourceRange(SourceRange Range) {
- return Writer->AddSourceRange(Range, *Record);
+ void AddSourceRange(SourceRange Range, LocSeq *Seq = nullptr) {
+ return Writer->AddSourceRange(Range, *Record, Seq);
}
void writeBool(bool Value) {
@@ -203,7 +209,7 @@ public:
void AddTypeSourceInfo(TypeSourceInfo *TInfo);
/// Emits source location information for a type. Does not emit the type.
- void AddTypeLoc(TypeLoc TL);
+ void AddTypeLoc(TypeLoc TL, LocSeq *Seq = nullptr);
/// Emits a template argument location info.
void AddTemplateArgumentLocInfo(TemplateArgument::ArgKind Kind,
@@ -216,6 +222,9 @@ public:
void AddASTTemplateArgumentListInfo(
const ASTTemplateArgumentListInfo *ASTTemplArgList);
+ // Emits a concept reference.
+ void AddConceptReference(const ConceptReference *CR);
+
/// Emit a reference to a declaration.
void AddDeclRef(const Decl *D) {
return Writer->AddDeclRef(D, *Record);
diff --git a/contrib/llvm-project/clang/include/clang/Serialization/ASTWriter.h b/contrib/llvm-project/clang/include/clang/Serialization/ASTWriter.h
index ac88cb0a3177..de69f99003d8 100644
--- a/contrib/llvm-project/clang/include/clang/Serialization/ASTWriter.h
+++ b/contrib/llvm-project/clang/include/clang/Serialization/ASTWriter.h
@@ -18,12 +18,14 @@
#include "clang/AST/Decl.h"
#include "clang/AST/Type.h"
#include "clang/Basic/LLVM.h"
+#include "clang/Basic/Module.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Sema/Sema.h"
#include "clang/Sema/SemaConsumer.h"
#include "clang/Serialization/ASTBitCodes.h"
#include "clang/Serialization/ASTDeserializationListener.h"
#include "clang/Serialization/PCHContainerOperations.h"
+#include "clang/Serialization/SourceLocationEncoding.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
@@ -43,26 +45,13 @@
#include <utility>
#include <vector>
-namespace llvm {
-
-class APFloat;
-class APInt;
-class APSInt;
-
-} // namespace llvm
-
namespace clang {
class ASTContext;
class ASTReader;
-class ASTUnresolvedSet;
class Attr;
-class CXXBaseSpecifier;
-class CXXCtorInitializer;
class CXXRecordDecl;
-class CXXTemporary;
class FileEntry;
-class FPOptions;
class FPOptionsOverride;
class FunctionDecl;
class HeaderSearch;
@@ -79,16 +68,13 @@ class NamedDecl;
class ObjCInterfaceDecl;
class PreprocessingRecord;
class Preprocessor;
-struct QualifierInfo;
class RecordDecl;
class Sema;
class SourceManager;
class Stmt;
class StoredDeclsList;
class SwitchCase;
-class TemplateParameterList;
class Token;
-class TypeSourceInfo;
/// Writes an AST file containing the contents of a translation unit.
///
@@ -119,6 +105,8 @@ private:
using TypeIdxMap = llvm::DenseMap<QualType, serialization::TypeIdx,
serialization::UnsafeQualTypeDenseMapInfo>;
+ using LocSeq = SourceLocationSequence;
+
/// The bitstream writer used to emit this precompiled header.
llvm::BitstreamWriter &Stream;
@@ -140,10 +128,17 @@ private:
/// The module we're currently writing, if any.
Module *WritingModule = nullptr;
- /// The offset of the first bit inside the AST_BLOCK.
+ /// The byte range representing all the UNHASHED_CONTROL_BLOCK.
+ std::pair<uint64_t, uint64_t> UnhashedControlBlockRange;
+ /// The bit offset of the AST block hash blob.
+ uint64_t ASTBlockHashOffset = 0;
+ /// The bit offset of the signature blob.
+ uint64_t SignatureOffset = 0;
+
+ /// The bit offset of the first bit inside the AST_BLOCK.
uint64_t ASTBlockStartOffset = 0;
- /// The range representing all the AST_BLOCK.
+ /// The byte range representing all the AST_BLOCK.
std::pair<uint64_t, uint64_t> ASTBlockRange;
/// The base directory for any relative paths we emit.
@@ -155,6 +150,11 @@ private:
/// file is up to date, but not otherwise.
bool IncludeTimestamps;
+ /// Indicates whether the AST file being written is an implicit module.
+ /// If that's the case, we may be able to skip writing some information that
+ /// are guaranteed to be the same in the importer by the context hash.
+ bool BuildingImplicitModule = false;
+
/// Indicates when the AST writing is actively performing
/// serialization, rather than just queueing updates.
bool WritingAST = false;
@@ -456,6 +456,41 @@ private:
std::vector<std::unique_ptr<ModuleFileExtensionWriter>>
ModuleFileExtensionWriters;
+ /// Mapping from a source location entry to whether it is affecting or not.
+ llvm::BitVector IsSLocAffecting;
+
+ /// Mapping from \c FileID to an index into the FileID adjustment table.
+ std::vector<FileID> NonAffectingFileIDs;
+ std::vector<unsigned> NonAffectingFileIDAdjustments;
+
+ /// Mapping from an offset to an index into the offset adjustment table.
+ std::vector<SourceRange> NonAffectingRanges;
+ std::vector<SourceLocation::UIntTy> NonAffectingOffsetAdjustments;
+
+ /// Collects input files that didn't affect compilation of the current module,
+ /// and initializes data structures necessary for leaving those files out
+ /// during \c SourceManager serialization.
+ void collectNonAffectingInputFiles();
+
+ /// Returns an adjusted \c FileID, accounting for any non-affecting input
+ /// files.
+ FileID getAdjustedFileID(FileID FID) const;
+ /// Returns an adjusted number of \c FileIDs created within the specified \c
+ /// FileID, accounting for any non-affecting input files.
+ unsigned getAdjustedNumCreatedFIDs(FileID FID) const;
+ /// Returns an adjusted \c SourceLocation, accounting for any non-affecting
+ /// input files.
+ SourceLocation getAdjustedLocation(SourceLocation Loc) const;
+ /// Returns an adjusted \c SourceRange, accounting for any non-affecting input
+ /// files.
+ SourceRange getAdjustedRange(SourceRange Range) const;
+ /// Returns an adjusted \c SourceLocation offset, accounting for any
+ /// non-affecting input files.
+ SourceLocation::UIntTy getAdjustedOffset(SourceLocation::UIntTy Offset) const;
+ /// Returns an adjustment for offset into SourceManager, accounting for any
+ /// non-affecting input files.
+ SourceLocation::UIntTy getAdjustment(SourceLocation::UIntTy Offset) const;
+
/// Retrieve or create a submodule ID for this module.
unsigned getSubmoduleID(Module *Mod);
@@ -464,18 +499,16 @@ private:
void WriteBlockInfoBlock();
void WriteControlBlock(Preprocessor &PP, ASTContext &Context,
- StringRef isysroot, const std::string &OutputFile);
+ StringRef isysroot);
/// Write out the signature and diagnostic options, and return the signature.
- ASTFileSignature writeUnhashedControlBlock(Preprocessor &PP,
- ASTContext &Context);
+ void writeUnhashedControlBlock(Preprocessor &PP, ASTContext &Context);
+ ASTFileSignature backpatchSignature();
/// Calculate hash of the pcm content.
- static std::pair<ASTFileSignature, ASTFileSignature>
- createSignature(StringRef AllBytes, StringRef ASTBlockBytes);
+ std::pair<ASTFileSignature, ASTFileSignature> createSignature() const;
- void WriteInputFiles(SourceManager &SourceMgr, HeaderSearchOptions &HSOpts,
- bool Modules);
+ void WriteInputFiles(SourceManager &SourceMgr, HeaderSearchOptions &HSOpts);
void WriteSourceManagerBlock(SourceManager &SourceMgr,
const Preprocessor &PP);
void WritePreprocessor(const Preprocessor &PP, bool IsModule);
@@ -488,7 +521,6 @@ private:
bool isModule);
unsigned TypeExtQualAbbrev = 0;
- unsigned TypeFunctionProtoAbbrev = 0;
void WriteTypeAbbrevs();
void WriteType(QualType T);
@@ -532,17 +564,30 @@ private:
unsigned DeclEnumAbbrev = 0;
unsigned DeclObjCIvarAbbrev = 0;
unsigned DeclCXXMethodAbbrev = 0;
+ unsigned DeclDependentNonTemplateCXXMethodAbbrev = 0;
+ unsigned DeclTemplateCXXMethodAbbrev = 0;
+ unsigned DeclMemberSpecializedCXXMethodAbbrev = 0;
+ unsigned DeclTemplateSpecializedCXXMethodAbbrev = 0;
+ unsigned DeclDependentSpecializationCXXMethodAbbrev = 0;
+ unsigned DeclTemplateTypeParmAbbrev = 0;
+ unsigned DeclUsingShadowAbbrev = 0;
unsigned DeclRefExprAbbrev = 0;
unsigned CharacterLiteralAbbrev = 0;
unsigned IntegerLiteralAbbrev = 0;
unsigned ExprImplicitCastAbbrev = 0;
+ unsigned BinaryOperatorAbbrev = 0;
+ unsigned CompoundAssignOperatorAbbrev = 0;
+ unsigned CallExprAbbrev = 0;
+ unsigned CXXOperatorCallExprAbbrev = 0;
+ unsigned CXXMemberCallExprAbbrev = 0;
+
+ unsigned CompoundStmtAbbrev = 0;
void WriteDeclAbbrevs();
void WriteDecl(ASTContext &Context, Decl *D);
ASTFileSignature WriteASTCore(Sema &SemaRef, StringRef isysroot,
- const std::string &OutputFile,
Module *WritingModule);
public:
@@ -551,7 +596,7 @@ public:
ASTWriter(llvm::BitstreamWriter &Stream, SmallVectorImpl<char> &Buffer,
InMemoryModuleCache &ModuleCache,
ArrayRef<std::shared_ptr<ModuleFileExtension>> Extensions,
- bool IncludeTimestamps = true);
+ bool IncludeTimestamps = true, bool BuildingImplicitModule = false);
~ASTWriter() override;
ASTContext &getASTContext() const {
@@ -580,9 +625,8 @@ public:
///
/// \return the module signature, which eventually will be a hash of
/// the module but currently is merely a random 32-bit number.
- ASTFileSignature WriteAST(Sema &SemaRef, const std::string &OutputFile,
+ ASTFileSignature WriteAST(Sema &SemaRef, StringRef OutputFile,
Module *WritingModule, StringRef isysroot,
- bool hasErrors = false,
bool ShouldCacheASTInMemory = false);
/// Emit a token.
@@ -592,11 +636,16 @@ public:
void AddAlignPackInfo(const Sema::AlignPackInfo &Info,
RecordDataImpl &Record);
+ /// Emit a FileID.
+ void AddFileID(FileID FID, RecordDataImpl &Record);
+
/// Emit a source location.
- void AddSourceLocation(SourceLocation Loc, RecordDataImpl &Record);
+ void AddSourceLocation(SourceLocation Loc, RecordDataImpl &Record,
+ LocSeq *Seq = nullptr);
/// Emit a source range.
- void AddSourceRange(SourceRange Range, RecordDataImpl &Record);
+ void AddSourceRange(SourceRange Range, RecordDataImpl &Record,
+ LocSeq *Seq = nullptr);
/// Emit a reference to an identifier.
void AddIdentifierRef(const IdentifierInfo *II, RecordDataImpl &Record);
@@ -693,10 +742,6 @@ public:
return TypeExtQualAbbrev;
}
- unsigned getTypeFunctionProtoAbbrev() const {
- return TypeFunctionProtoAbbrev;
- }
-
unsigned getDeclParmVarAbbrev() const { return DeclParmVarAbbrev; }
unsigned getDeclRecordAbbrev() const { return DeclRecordAbbrev; }
unsigned getDeclTypedefAbbrev() const { return DeclTypedefAbbrev; }
@@ -704,16 +749,49 @@ public:
unsigned getDeclFieldAbbrev() const { return DeclFieldAbbrev; }
unsigned getDeclEnumAbbrev() const { return DeclEnumAbbrev; }
unsigned getDeclObjCIvarAbbrev() const { return DeclObjCIvarAbbrev; }
- unsigned getDeclCXXMethodAbbrev() const { return DeclCXXMethodAbbrev; }
+ unsigned getDeclCXXMethodAbbrev(FunctionDecl::TemplatedKind Kind) const {
+ switch (Kind) {
+ case FunctionDecl::TK_NonTemplate:
+ return DeclCXXMethodAbbrev;
+ case FunctionDecl::TK_FunctionTemplate:
+ return DeclTemplateCXXMethodAbbrev;
+ case FunctionDecl::TK_MemberSpecialization:
+ return DeclMemberSpecializedCXXMethodAbbrev;
+ case FunctionDecl::TK_FunctionTemplateSpecialization:
+ return DeclTemplateSpecializedCXXMethodAbbrev;
+ case FunctionDecl::TK_DependentNonTemplate:
+ return DeclDependentNonTemplateCXXMethodAbbrev;
+ case FunctionDecl::TK_DependentFunctionTemplateSpecialization:
+ return DeclDependentSpecializationCXXMethodAbbrev;
+ }
+ llvm_unreachable("Unknwon Template Kind!");
+ }
+ unsigned getDeclTemplateTypeParmAbbrev() const {
+ return DeclTemplateTypeParmAbbrev;
+ }
+ unsigned getDeclUsingShadowAbbrev() const { return DeclUsingShadowAbbrev; }
unsigned getDeclRefExprAbbrev() const { return DeclRefExprAbbrev; }
unsigned getCharacterLiteralAbbrev() const { return CharacterLiteralAbbrev; }
unsigned getIntegerLiteralAbbrev() const { return IntegerLiteralAbbrev; }
unsigned getExprImplicitCastAbbrev() const { return ExprImplicitCastAbbrev; }
+ unsigned getBinaryOperatorAbbrev() const { return BinaryOperatorAbbrev; }
+ unsigned getCompoundAssignOperatorAbbrev() const {
+ return CompoundAssignOperatorAbbrev;
+ }
+ unsigned getCallExprAbbrev() const { return CallExprAbbrev; }
+ unsigned getCXXOperatorCallExprAbbrev() { return CXXOperatorCallExprAbbrev; }
+ unsigned getCXXMemberCallExprAbbrev() { return CXXMemberCallExprAbbrev; }
+
+ unsigned getCompoundStmtAbbrev() const { return CompoundStmtAbbrev; }
bool hasChain() const { return Chain; }
ASTReader *getChain() const { return Chain; }
+ bool isWritingStdCXXNamedModules() const {
+ return WritingModule && WritingModule->isNamedModule();
+ }
+
private:
// ASTDeserializationListener implementation
void ReaderInitialized(ASTReader *Reader) override;
@@ -784,6 +862,7 @@ public:
std::shared_ptr<PCHBuffer> Buffer,
ArrayRef<std::shared_ptr<ModuleFileExtension>> Extensions,
bool AllowASTWithErrors = false, bool IncludeTimestamps = true,
+ bool BuildingImplicitModule = false,
bool ShouldCacheASTInMemory = false);
~PCHGenerator() override;
@@ -794,6 +873,46 @@ public:
bool hasEmittedPCH() const { return Buffer->IsComplete; }
};
+/// A simple helper class to pack several bits in order into (a) 32 bit
+/// integer(s).
+class BitsPacker {
+ constexpr static uint32_t BitIndexUpbound = 32u;
+
+public:
+ BitsPacker() = default;
+ BitsPacker(const BitsPacker &) = delete;
+ BitsPacker(BitsPacker &&) = delete;
+ BitsPacker operator=(const BitsPacker &) = delete;
+ BitsPacker operator=(BitsPacker &&) = delete;
+ ~BitsPacker() = default;
+
+ bool canWriteNextNBits(uint32_t BitsWidth) const {
+ return CurrentBitIndex + BitsWidth < BitIndexUpbound;
+ }
+
+ void reset(uint32_t Value) {
+ UnderlyingValue = Value;
+ CurrentBitIndex = 0;
+ }
+
+ void addBit(bool Value) { addBits(Value, 1); }
+ void addBits(uint32_t Value, uint32_t BitsWidth) {
+ assert(BitsWidth < BitIndexUpbound);
+ assert((Value < (1u << BitsWidth)) && "Passing narrower bit width!");
+ assert(canWriteNextNBits(BitsWidth) &&
+ "Inserting too much bits into a value!");
+
+ UnderlyingValue |= Value << CurrentBitIndex;
+ CurrentBitIndex += BitsWidth;
+ }
+
+ operator uint32_t() { return UnderlyingValue; }
+
+private:
+ uint32_t UnderlyingValue = 0;
+ uint32_t CurrentBitIndex = 0;
+};
+
} // namespace clang
#endif // LLVM_CLANG_SERIALIZATION_ASTWRITER_H
diff --git a/contrib/llvm-project/clang/include/clang/Serialization/GlobalModuleIndex.h b/contrib/llvm-project/clang/include/clang/Serialization/GlobalModuleIndex.h
index 5f4812626224..93d674e44003 100644
--- a/contrib/llvm-project/clang/include/clang/Serialization/GlobalModuleIndex.h
+++ b/contrib/llvm-project/clang/include/clang/Serialization/GlobalModuleIndex.h
@@ -31,8 +31,6 @@ class MemoryBuffer;
namespace clang {
-class DirectoryEntry;
-class FileEntry;
class FileManager;
class IdentifierIterator;
class PCHContainerOperations;
@@ -69,20 +67,20 @@ class GlobalModuleIndex {
/// Information about a given module file.
struct ModuleInfo {
- ModuleInfo() : File(), Size(), ModTime() { }
+ ModuleInfo() = default;
/// The module file, once it has been resolved.
- ModuleFile *File;
+ ModuleFile *File = nullptr;
/// The module file name.
std::string FileName;
/// Size of the module file at the time the global index was built.
- off_t Size;
+ off_t Size = 0;
/// Modification time of the module file at the time the global
/// index was built.
- time_t ModTime;
+ time_t ModTime = 0;
/// The module IDs on which this module directly depends.
/// FIXME: We don't really need a vector here.
@@ -138,12 +136,6 @@ public:
/// The caller accepts ownership of the returned object.
IdentifierIterator *createIdentifierIterator() const;
- /// Retrieve the set of modules that have up-to-date indexes.
- ///
- /// \param ModuleFiles Will be populated with the set of module files that
- /// have been indexed.
- void getKnownModules(llvm::SmallVectorImpl<ModuleFile *> &ModuleFiles);
-
/// Retrieve the set of module files on which the given module file
/// directly depends.
void getModuleDependencies(ModuleFile *File,
diff --git a/contrib/llvm-project/clang/include/clang/Serialization/InMemoryModuleCache.h b/contrib/llvm-project/clang/include/clang/Serialization/InMemoryModuleCache.h
index 7b5b5c1cf1be..fc3ba334fc64 100644
--- a/contrib/llvm-project/clang/include/clang/Serialization/InMemoryModuleCache.h
+++ b/contrib/llvm-project/clang/include/clang/Serialization/InMemoryModuleCache.h
@@ -10,7 +10,6 @@
#define LLVM_CLANG_SERIALIZATION_INMEMORYMODULECACHE_H
#include "llvm/ADT/IntrusiveRefCntPtr.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/Support/MemoryBuffer.h"
#include <memory>
diff --git a/contrib/llvm-project/clang/include/clang/Serialization/ModuleFile.h b/contrib/llvm-project/clang/include/clang/Serialization/ModuleFile.h
index b1c8a8c8e72b..9a14129d72ff 100644
--- a/contrib/llvm-project/clang/include/clang/Serialization/ModuleFile.h
+++ b/contrib/llvm-project/clang/include/clang/Serialization/ModuleFile.h
@@ -20,6 +20,7 @@
#include "clang/Serialization/ASTBitCodes.h"
#include "clang/Serialization/ContinuousRangeMap.h"
#include "clang/Serialization/ModuleFileExtension.h"
+#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/SetVector.h"
@@ -58,6 +59,19 @@ enum ModuleKind {
MK_PrebuiltModule
};
+/// The input file info that has been loaded from an AST file.
+struct InputFileInfo {
+ std::string FilenameAsRequested;
+ std::string Filename;
+ uint64_t ContentHash;
+ off_t StoredSize;
+ time_t StoredTime;
+ bool Overridden;
+ bool Transient;
+ bool TopLevel;
+ bool ModuleMap;
+};
+
/// The input file that has been loaded from this AST file, along with
/// bools indicating whether this was an overridden buffer or if it was
/// out-of-date or not-found.
@@ -90,10 +104,10 @@ public:
return File;
}
- OptionalFileEntryRefDegradesToFileEntryPtr getFile() const {
+ OptionalFileEntryRef getFile() const {
if (auto *P = Val.getPointer())
return FileEntryRef(*P);
- return None;
+ return std::nullopt;
}
bool isOverridden() const { return Val.getInt() == Overridden; }
bool isOutOfDate() const { return Val.getInt() == OutOfDate; }
@@ -109,8 +123,8 @@ public:
/// other modules.
class ModuleFile {
public:
- ModuleFile(ModuleKind Kind, unsigned Generation)
- : Kind(Kind), Generation(Generation) {}
+ ModuleFile(ModuleKind Kind, FileEntryRef File, unsigned Generation)
+ : Kind(Kind), File(File), Generation(Generation) {}
~ModuleFile();
// === General information ===
@@ -147,15 +161,14 @@ public:
/// build this AST file.
FileID OriginalSourceFileID;
- /// The directory that the PCH was originally created in. Used to
- /// allow resolving headers even after headers+PCH was moved to a new path.
- std::string OriginalDir;
-
std::string ModuleMapPath;
/// Whether this precompiled header is a relocatable PCH file.
bool RelocatablePCH = false;
+ /// Whether this module file is a standard C++ module.
+ bool StandardCXXModule = false;
+
/// Whether timestamps are included in this module file.
bool HasTimestamps = false;
@@ -163,7 +176,7 @@ public:
bool DidReadTopLevelSubmodule = false;
/// The file entry for the module file.
- OptionalFileEntryRefDegradesToFileEntryPtr File;
+ FileEntryRef File;
/// The signature of the module file, which may be used instead of the size
/// and modification time to identify this particular file.
@@ -173,6 +186,9 @@ public:
/// unique module files based on AST contents.
ASTFileSignature ASTBlockHash;
+ /// The bit vector denoting usage of each header search entry (true = used).
+ llvm::BitVector SearchPathUsage;
+
/// Whether this module has been directly imported by the
/// user.
bool DirectlyImported = false;
@@ -182,7 +198,7 @@ public:
/// The memory buffer that stores the data associated with
/// this AST file, owned by the InMemoryModuleCache.
- llvm::MemoryBuffer *Buffer;
+ llvm::MemoryBuffer *Buffer = nullptr;
/// The size of this file, in bits.
uint64_t SizeInBits = 0;
@@ -229,12 +245,18 @@ public:
/// The cursor to the start of the input-files block.
llvm::BitstreamCursor InputFilesCursor;
- /// Offsets for all of the input file entries in the AST file.
+ /// Absolute offset of the start of the input-files block.
+ uint64_t InputFilesOffsetBase = 0;
+
+ /// Relative offsets for all of the input file entries in the AST file.
const llvm::support::unaligned_uint64_t *InputFileOffsets = nullptr;
/// The input files that have been loaded from this AST file.
std::vector<InputFile> InputFilesLoaded;
+ /// The input file infos that have been loaded from this AST file.
+ std::vector<InputFileInfo> InputFileInfosLoaded;
+
// All user input files reside at the index range [0, NumUserInputFiles), and
// system input files reside at [NumUserInputFiles, InputFilesLoaded.size()).
unsigned NumUserInputFiles = 0;
@@ -270,9 +292,6 @@ public:
/// AST file.
const uint32_t *SLocEntryOffsets = nullptr;
- /// SLocEntries that we're going to preload.
- SmallVector<uint64_t, 4> PreloadSLocEntries;
-
/// Remapping table for source locations in this module.
ContinuousRangeMap<SourceLocation::UIntTy, SourceLocation::IntTy, 2>
SLocRemap;
diff --git a/contrib/llvm-project/clang/include/clang/Serialization/ModuleFileExtension.h b/contrib/llvm-project/clang/include/clang/Serialization/ModuleFileExtension.h
index 34ea870724a4..d7d456c8b5db 100644
--- a/contrib/llvm-project/clang/include/clang/Serialization/ModuleFileExtension.h
+++ b/contrib/llvm-project/clang/include/clang/Serialization/ModuleFileExtension.h
@@ -11,13 +11,14 @@
#include "llvm/ADT/IntrusiveRefCntPtr.h"
#include "llvm/Support/ExtensibleRTTI.h"
+#include "llvm/Support/HashBuilder.h"
+#include "llvm/Support/MD5.h"
#include <memory>
#include <string>
namespace llvm {
class BitstreamCursor;
class BitstreamWriter;
-class hash_code;
class raw_ostream;
}
@@ -74,19 +75,19 @@ public:
virtual ModuleFileExtensionMetadata getExtensionMetadata() const = 0;
/// Hash information about the presence of this extension into the
- /// module hash code.
+ /// module hash.
///
- /// The module hash code is used to distinguish different variants
- /// of a module that are incompatible. If the presence, absence, or
- /// version of the module file extension should force the creation
- /// of a separate set of module files, override this method to
- /// combine that distinguishing information into the module hash
- /// code.
+ /// The module hash is used to distinguish different variants of a module that
+ /// are incompatible. If the presence, absence, or version of the module file
+ /// extension should force the creation of a separate set of module files,
+ /// override this method to combine that distinguishing information into the
+ /// module hash.
///
- /// The default implementation of this function simply returns the
- /// hash code as given, so the presence/absence of this extension
- /// does not distinguish module files.
- virtual llvm::hash_code hashExtension(llvm::hash_code c) const;
+ /// The default implementation of this function simply does nothing, so the
+ /// presence/absence of this extension does not distinguish module files.
+ using ExtensionHashBuilder =
+ llvm::HashBuilder<llvm::MD5, llvm::endianness::native>;
+ virtual void hashExtension(ExtensionHashBuilder &HBuilder) const;
/// Create a new module file extension writer, which will be
/// responsible for writing the extension contents into a particular
@@ -152,4 +153,4 @@ public:
} // end namespace clang
-#endif // LLVM_CLANG_FRONTEND_MODULEFILEEXTENSION_H
+#endif // LLVM_CLANG_SERIALIZATION_MODULEFILEEXTENSION_H
diff --git a/contrib/llvm-project/clang/include/clang/Serialization/ModuleManager.h b/contrib/llvm-project/clang/include/clang/Serialization/ModuleManager.h
index 7081eedad4b4..3bd379acf7ed 100644
--- a/contrib/llvm-project/clang/include/clang/Serialization/ModuleManager.h
+++ b/contrib/llvm-project/clang/include/clang/Serialization/ModuleManager.h
@@ -39,7 +39,6 @@ class FileManager;
class GlobalModuleIndex;
class HeaderSearch;
class InMemoryModuleCache;
-class ModuleMap;
class PCHContainerReader;
namespace serialization {
@@ -105,10 +104,6 @@ class ModuleManager {
Stack.reserve(N);
}
- ~VisitState() {
- delete NextState;
- }
-
/// The stack used when marking the imports of a particular module
/// as not-to-be-visited.
SmallVector<ModuleFile *, 4> Stack;
@@ -121,14 +116,14 @@ class ModuleManager {
unsigned NextVisitNumber = 1;
/// The next visit state.
- VisitState *NextState = nullptr;
+ std::unique_ptr<VisitState> NextState;
};
/// The first visit() state in the chain.
- VisitState *FirstVisitState = nullptr;
+ std::unique_ptr<VisitState> FirstVisitState;
- VisitState *allocateVisitState();
- void returnVisitState(VisitState *State);
+ std::unique_ptr<VisitState> allocateVisitState();
+ void returnVisitState(std::unique_ptr<VisitState> State);
public:
using ModuleIterator = llvm::pointee_iterator<
@@ -142,7 +137,6 @@ public:
explicit ModuleManager(FileManager &FileMgr, InMemoryModuleCache &ModuleCache,
const PCHContainerReader &PCHContainerRdr,
const HeaderSearch &HeaderSearchInfo);
- ~ModuleManager();
/// Forward iterator to traverse all loaded modules.
ModuleIterator begin() { return Chain.begin(); }
@@ -255,7 +249,7 @@ public:
std::string &ErrorStr);
/// Remove the modules starting from First (to the end).
- void removeModules(ModuleIterator First, ModuleMap *modMap);
+ void removeModules(ModuleIterator First);
/// Add an in-memory buffer the list of known buffers
void addInMemoryBuffer(StringRef FileName,
@@ -308,7 +302,7 @@ public:
/// modification time criteria, false if the file is either available and
/// suitable, or is missing.
bool lookupModuleFile(StringRef FileName, off_t ExpectedSize,
- time_t ExpectedModTime, Optional<FileEntryRef> &File);
+ time_t ExpectedModTime, OptionalFileEntryRef &File);
/// View the graphviz representation of the module graph.
void viewGraph();
diff --git a/contrib/llvm-project/clang/include/clang/Serialization/PCHContainerOperations.h b/contrib/llvm-project/clang/include/clang/Serialization/PCHContainerOperations.h
index 33fc4a0a24e0..be10feb5e351 100644
--- a/contrib/llvm-project/clang/include/clang/Serialization/PCHContainerOperations.h
+++ b/contrib/llvm-project/clang/include/clang/Serialization/PCHContainerOperations.h
@@ -22,8 +22,6 @@ class raw_pwrite_stream;
namespace clang {
class ASTConsumer;
-class CodeGenOptions;
-class DiagnosticsEngine;
class CompilerInstance;
struct PCHBuffer {
@@ -58,7 +56,7 @@ class PCHContainerReader {
public:
virtual ~PCHContainerReader() = 0;
/// Equivalent to the format passed to -fmodule-format=
- virtual llvm::StringRef getFormat() const = 0;
+ virtual llvm::ArrayRef<llvm::StringRef> getFormats() const = 0;
/// Returns the serialized AST inside the PCH container Buffer.
virtual llvm::StringRef ExtractPCH(llvm::MemoryBufferRef Buffer) const = 0;
@@ -80,8 +78,7 @@ class RawPCHContainerWriter : public PCHContainerWriter {
/// Implements read operations for a raw pass-through PCH container.
class RawPCHContainerReader : public PCHContainerReader {
- llvm::StringRef getFormat() const override { return "raw"; }
-
+ llvm::ArrayRef<llvm::StringRef> getFormats() const override;
/// Simply returns the buffer contained in Buffer.
llvm::StringRef ExtractPCH(llvm::MemoryBufferRef Buffer) const override;
};
@@ -89,7 +86,9 @@ class RawPCHContainerReader : public PCHContainerReader {
/// A registry of PCHContainerWriter and -Reader objects for different formats.
class PCHContainerOperations {
llvm::StringMap<std::unique_ptr<PCHContainerWriter>> Writers;
- llvm::StringMap<std::unique_ptr<PCHContainerReader>> Readers;
+ llvm::StringMap<PCHContainerReader *> Readers;
+ llvm::SmallVector<std::unique_ptr<PCHContainerReader>> OwnedReaders;
+
public:
/// Automatically registers a RawPCHContainerWriter and
/// RawPCHContainerReader.
@@ -98,13 +97,17 @@ public:
Writers[Writer->getFormat()] = std::move(Writer);
}
void registerReader(std::unique_ptr<PCHContainerReader> Reader) {
- Readers[Reader->getFormat()] = std::move(Reader);
+ assert(!Reader->getFormats().empty() &&
+ "PCHContainerReader must handle >=1 format");
+ for (llvm::StringRef Fmt : Reader->getFormats())
+ Readers[Fmt] = Reader.get();
+ OwnedReaders.push_back(std::move(Reader));
}
const PCHContainerWriter *getWriterOrNull(llvm::StringRef Format) {
return Writers[Format].get();
}
const PCHContainerReader *getReaderOrNull(llvm::StringRef Format) {
- return Readers[Format].get();
+ return Readers[Format];
}
const PCHContainerReader &getRawReader() {
return *getReaderOrNull("raw");
diff --git a/contrib/llvm-project/clang/include/clang/Serialization/SourceLocationEncoding.h b/contrib/llvm-project/clang/include/clang/Serialization/SourceLocationEncoding.h
new file mode 100644
index 000000000000..9bb0dbe2e4d6
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Serialization/SourceLocationEncoding.h
@@ -0,0 +1,163 @@
+//===--- SourceLocationEncoding.h - Small serialized locations --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Source locations are stored pervasively in the AST, making up a third of
+// the size of typical serialized files. Storing them efficiently is important.
+//
+// We use integers optimized by VBR-encoding, because:
+// - when abbreviations cannot be used, VBR6 encoding is our only choice
+// - in the worst case a SourceLocation can be ~any 32-bit number, but in
+// practice they are highly predictable
+//
+// We encode the integer so that likely values encode as small numbers that
+// turn into few VBR chunks:
+// - the invalid sentinel location is a very common value: it encodes as 0
+// - the "macro or not" bit is stored at the bottom of the integer
+// (rather than at the top, as in memory), so macro locations can have
+// small representations.
+// - related locations (e.g. of a left and right paren pair) are usually
+// similar, so when encoding a sequence of locations we store only
+// differences between successive elements.
+//
+//===----------------------------------------------------------------------===//
+
+#include <climits>
+#include "clang/Basic/SourceLocation.h"
+
+#ifndef LLVM_CLANG_SERIALIZATION_SOURCELOCATIONENCODING_H
+#define LLVM_CLANG_SERIALIZATION_SOURCELOCATIONENCODING_H
+
+namespace clang {
+class SourceLocationSequence;
+
+/// Serialized encoding of SourceLocations without context.
+/// Optimized to have small unsigned values (=> small after VBR encoding).
+///
+// Macro locations have the top bit set, we rotate by one so it is the low bit.
+class SourceLocationEncoding {
+ using UIntTy = SourceLocation::UIntTy;
+ constexpr static unsigned UIntBits = CHAR_BIT * sizeof(UIntTy);
+
+ static UIntTy encodeRaw(UIntTy Raw) {
+ return (Raw << 1) | (Raw >> (UIntBits - 1));
+ }
+ static UIntTy decodeRaw(UIntTy Raw) {
+ return (Raw >> 1) | (Raw << (UIntBits - 1));
+ }
+ friend SourceLocationSequence;
+
+public:
+ static uint64_t encode(SourceLocation Loc,
+ SourceLocationSequence * = nullptr);
+ static SourceLocation decode(uint64_t, SourceLocationSequence * = nullptr);
+};
+
+/// Serialized encoding of a sequence of SourceLocations.
+///
+/// Optimized to produce small values when locations with the sequence are
+/// similar. Each element can be delta-encoded against the last nonzero element.
+///
+/// Sequences should be started by creating a SourceLocationSequence::State,
+/// and then passed around as SourceLocationSequence*. Example:
+///
+/// // establishes a sequence
+/// void EmitTopLevelThing() {
+/// SourceLocationSequence::State Seq;
+/// EmitContainedThing(Seq);
+/// EmitRecursiveThing(Seq);
+/// }
+///
+/// // optionally part of a sequence
+/// void EmitContainedThing(SourceLocationSequence *Seq = nullptr) {
+/// Record.push_back(SourceLocationEncoding::encode(SomeLoc, Seq));
+/// }
+///
+/// // establishes a sequence if there isn't one already
+/// void EmitRecursiveThing(SourceLocationSequence *ParentSeq = nullptr) {
+/// SourceLocationSequence::State Seq(ParentSeq);
+/// Record.push_back(SourceLocationEncoding::encode(SomeLoc, Seq));
+/// EmitRecursiveThing(Seq);
+/// }
+///
+class SourceLocationSequence {
+ using UIntTy = SourceLocation::UIntTy;
+ using EncodedTy = uint64_t;
+ constexpr static auto UIntBits = SourceLocationEncoding::UIntBits;
+ static_assert(sizeof(EncodedTy) > sizeof(UIntTy), "Need one extra bit!");
+
+ // Prev stores the rotated last nonzero location.
+ UIntTy &Prev;
+
+ // Zig-zag encoding turns small signed integers into small unsigned integers.
+ // 0 => 0, -1 => 1, 1 => 2, -2 => 3, ...
+ static UIntTy zigZag(UIntTy V) {
+ UIntTy Sign = (V & (1 << (UIntBits - 1))) ? UIntTy(-1) : UIntTy(0);
+ return Sign ^ (V << 1);
+ }
+ static UIntTy zagZig(UIntTy V) { return (V >> 1) ^ -(V & 1); }
+
+ SourceLocationSequence(UIntTy &Prev) : Prev(Prev) {}
+
+ EncodedTy encodeRaw(UIntTy Raw) {
+ if (Raw == 0)
+ return 0;
+ UIntTy Rotated = SourceLocationEncoding::encodeRaw(Raw);
+ if (Prev == 0)
+ return Prev = Rotated;
+ UIntTy Delta = Rotated - Prev;
+ Prev = Rotated;
+ // Exactly one 33 bit value is possible! (1 << 32).
+ // This is because we have two representations of zero: trivial & relative.
+ return 1 + EncodedTy{zigZag(Delta)};
+ }
+ UIntTy decodeRaw(EncodedTy Encoded) {
+ if (Encoded == 0)
+ return 0;
+ if (Prev == 0)
+ return SourceLocationEncoding::decodeRaw(Prev = Encoded);
+ return SourceLocationEncoding::decodeRaw(Prev += zagZig(Encoded - 1));
+ }
+
+public:
+ SourceLocation decode(EncodedTy Encoded) {
+ return SourceLocation::getFromRawEncoding(decodeRaw(Encoded));
+ }
+ EncodedTy encode(SourceLocation Loc) {
+ return encodeRaw(Loc.getRawEncoding());
+ }
+
+ class State;
+};
+
+/// This object establishes a SourceLocationSequence.
+class SourceLocationSequence::State {
+ UIntTy Prev = 0;
+ SourceLocationSequence Seq;
+
+public:
+ // If Parent is provided and non-null, then this root becomes part of that
+ // enclosing sequence instead of establishing a new one.
+ State(SourceLocationSequence *Parent = nullptr)
+ : Seq(Parent ? Parent->Prev : Prev) {}
+
+ // Implicit conversion for uniform use of roots vs propagated sequences.
+ operator SourceLocationSequence *() { return &Seq; }
+};
+
+inline uint64_t SourceLocationEncoding::encode(SourceLocation Loc,
+ SourceLocationSequence *Seq) {
+ return Seq ? Seq->encode(Loc) : encodeRaw(Loc.getRawEncoding());
+}
+inline SourceLocation
+SourceLocationEncoding::decode(uint64_t Encoded, SourceLocationSequence *Seq) {
+ return Seq ? Seq->decode(Encoded)
+ : SourceLocation::getFromRawEncoding(decodeRaw(Encoded));
+}
+
+} // namespace clang
+#endif
diff --git a/contrib/llvm-project/clang/include/clang/Serialization/TypeBitCodes.def b/contrib/llvm-project/clang/include/clang/Serialization/TypeBitCodes.def
index e92e05810648..89ae1a2fa395 100644
--- a/contrib/llvm-project/clang/include/clang/Serialization/TypeBitCodes.def
+++ b/contrib/llvm-project/clang/include/clang/Serialization/TypeBitCodes.def
@@ -58,9 +58,11 @@ TYPE_BIT_CODE(DependentSizedExtVector, DEPENDENT_SIZED_EXT_VECTOR, 46)
TYPE_BIT_CODE(DependentAddressSpace, DEPENDENT_ADDRESS_SPACE, 47)
TYPE_BIT_CODE(DependentVector, DEPENDENT_SIZED_VECTOR, 48)
TYPE_BIT_CODE(MacroQualified, MACRO_QUALIFIED, 49)
-TYPE_BIT_CODE(ExtInt, EXT_INT, 50)
-TYPE_BIT_CODE(DependentExtInt, DEPENDENT_EXT_INT, 51)
+TYPE_BIT_CODE(BitInt, BIT_INT, 50)
+TYPE_BIT_CODE(DependentBitInt, DEPENDENT_BIT_INT, 51)
TYPE_BIT_CODE(ConstantMatrix, CONSTANT_MATRIX, 52)
TYPE_BIT_CODE(DependentSizedMatrix, DEPENDENT_SIZE_MATRIX, 53)
+TYPE_BIT_CODE(Using, USING, 54)
+TYPE_BIT_CODE(BTFTagAttributed, BTFTAG_ATTRIBUTED, 55)
#undef TYPE_BIT_CODE
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h
index e2be957821b9..bdfe3901c5b8 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h
@@ -11,19 +11,15 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_CLANGSACHECKERS_H
-#define LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_CLANGSACHECKERS_H
+#ifndef LLVM_CLANG_STATICANALYZER_CHECKERS_BUILTINCHECKERREGISTRATION_H
+#define LLVM_CLANG_STATICANALYZER_CHECKERS_BUILTINCHECKERREGISTRATION_H
#include "clang/StaticAnalyzer/Core/BugReporter/CommonBugCategories.h"
namespace clang {
-
-class LangOptions;
-
namespace ento {
class CheckerManager;
-class CheckerRegistry;
#define GET_CHECKERS
#define CHECKER(FULLNAME, CLASS, HELPTEXT, DOC_URI, IS_HIDDEN) \
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/CheckerBase.td b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/CheckerBase.td
index 98d26aaa637d..bc1da9bb3f90 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/CheckerBase.td
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/CheckerBase.td
@@ -86,15 +86,14 @@ class ParentPackage<Package P> { Package ParentPackage = P; }
class HelpText<string text> { string HelpText = text; }
/// Describes what kind of documentation exists for the checker.
-class DocumentationEnum<bits<2> val> {
- bits<2> Documentation = val;
+class DocumentationEnum<bits<1> val> {
+ bits<1> Documentation = val;
}
def NotDocumented : DocumentationEnum<0>;
def HasDocumentation : DocumentationEnum<1>;
-def HasAlphaDocumentation : DocumentationEnum<2>;
class Documentation<DocumentationEnum val> {
- bits<2> Documentation = val.Documentation;
+ bits<1> Documentation = val.Documentation;
}
/// Describes a checker. Every builtin checker has to be registered with the use
@@ -114,7 +113,7 @@ class Checker<string name = ""> {
list<Checker> Dependencies;
// This field is optional.
list<Checker> WeakDependencies;
- bits<2> Documentation;
+ bits<1> Documentation;
Package ParentPackage;
bit Hidden = 0;
}
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/Checkers.td b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/Checkers.td
index 444b00d73f0b..e7774e5a9392 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/Checkers.td
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/Checkers.td
@@ -36,6 +36,7 @@ def CoreAlpha : Package<"core">, ParentPackage<Alpha>;
// Note: OptIn is *not* intended for checkers that are too noisy to be on by
// default. Such checkers belong in the alpha package.
def OptIn : Package<"optin">;
+def CoreOptIn : Package<"core">, ParentPackage<OptIn>;
// In the Portability package reside checkers for finding code that relies on
// implementation-defined behavior. Such checks are wanted for cross-platform
@@ -71,8 +72,11 @@ def InsecureAPI : Package<"insecureAPI">, ParentPackage<Security>;
def SecurityAlpha : Package<"security">, ParentPackage<Alpha>;
def Taint : Package<"taint">, ParentPackage<SecurityAlpha>;
-def CERT : Package<"cert">, ParentPackage<SecurityAlpha>;
-def POS : Package<"pos">, ParentPackage<CERT>;
+def CERT : Package<"cert">, ParentPackage<Security>;
+def ENV : Package<"env">, ParentPackage<CERT>;
+
+def CERTAlpha : Package<"cert">, ParentPackage<SecurityAlpha>;
+def POSAlpha : Package<"pos">, ParentPackage<CERTAlpha>;
def Unix : Package<"unix">;
def UnixAlpha : Package<"unix">, ParentPackage<Alpha>;
@@ -125,6 +129,19 @@ def WebKitAlpha : Package<"webkit">, ParentPackage<Alpha>;
let ParentPackage = Core in {
+def BitwiseShiftChecker : Checker<"BitwiseShift">,
+ HelpText<"Finds cases where bitwise shift operation causes undefined behaviour.">,
+ CheckerOptions<[
+ CmdLineOption<Boolean,
+ "Pedantic",
+ "If set to true, the checker reports undefined behavior even "
+ "if it is supported by most compilers. (This flag has no "
+ "effect in C++20 where these constructs are legal.)",
+ "false",
+ Released>,
+ ]>,
+ Documentation<HasDocumentation>;
+
def CallAndMessageModeling : Checker<"CallAndMessageModeling">,
HelpText<"Responsible for essential modeling and assumptions after a "
"function/method call. For instance, if we can't reason about the "
@@ -190,6 +207,13 @@ def CallAndMessageChecker : Checker<"CallAndMessage">,
def DereferenceChecker : Checker<"NullDereference">,
HelpText<"Check for dereferences of null pointers">,
+ CheckerOptions<[
+ CmdLineOption<Boolean,
+ "SuppressAddressSpaces",
+ "Suppresses warning when pointer dereferences an address space",
+ "true",
+ Released>
+ ]>,
Documentation<HasDocumentation>;
def NonNullParamChecker : Checker<"NonNullParamChecker">,
@@ -235,57 +259,57 @@ let ParentPackage = CoreAlpha in {
def BoolAssignmentChecker : Checker<"BoolAssignment">,
HelpText<"Warn about assigning non-{0,1} values to Boolean variables">,
- Documentation<HasAlphaDocumentation>;
+ Documentation<HasDocumentation>;
def CastSizeChecker : Checker<"CastSize">,
HelpText<"Check when casting a malloc'ed type T, whether the size is a "
"multiple of the size of T">,
- Documentation<HasAlphaDocumentation>;
+ Documentation<HasDocumentation>;
def CastToStructChecker : Checker<"CastToStruct">,
HelpText<"Check for cast from non-struct pointer to struct pointer">,
- Documentation<HasAlphaDocumentation>;
+ Documentation<HasDocumentation>;
def ConversionChecker : Checker<"Conversion">,
HelpText<"Loss of sign/precision in implicit conversions">,
- Documentation<HasAlphaDocumentation>;
+ Documentation<HasDocumentation>;
def IdenticalExprChecker : Checker<"IdenticalExpr">,
HelpText<"Warn about unintended use of identical expressions in operators">,
- Documentation<HasAlphaDocumentation>;
+ Documentation<HasDocumentation>;
def FixedAddressChecker : Checker<"FixedAddr">,
HelpText<"Check for assignment of a fixed address to a pointer">,
- Documentation<HasAlphaDocumentation>;
+ Documentation<HasDocumentation>;
def PointerArithChecker : Checker<"PointerArithm">,
HelpText<"Check for pointer arithmetic on locations other than array "
"elements">,
- Documentation<HasAlphaDocumentation>;
+ Documentation<HasDocumentation>;
def PointerSubChecker : Checker<"PointerSub">,
HelpText<"Check for pointer subtractions on two pointers pointing to "
"different memory chunks">,
- Documentation<HasAlphaDocumentation>;
+ Documentation<HasDocumentation>;
def SizeofPointerChecker : Checker<"SizeofPtr">,
HelpText<"Warn about unintended use of sizeof() on pointer expressions">,
- Documentation<HasAlphaDocumentation>;
+ Documentation<HasDocumentation>;
def TestAfterDivZeroChecker : Checker<"TestAfterDivZero">,
HelpText<"Check for division by variable that is later compared against 0. "
"Either the comparison is useless or there is division by zero.">,
- Documentation<HasAlphaDocumentation>;
+ Documentation<HasDocumentation>;
def DynamicTypeChecker : Checker<"DynamicTypeChecker">,
HelpText<"Check for cases where the dynamic and the static type of an object "
"are unrelated.">,
- Documentation<HasAlphaDocumentation>;
+ Documentation<HasDocumentation>;
def StackAddrAsyncEscapeChecker : Checker<"StackAddressAsyncEscape">,
HelpText<"Check that addresses to stack memory do not escape the function">,
Dependencies<[StackAddrEscapeBase]>,
- Documentation<HasAlphaDocumentation>;
+ Documentation<HasDocumentation>;
def PthreadLockBase : Checker<"PthreadLockBase">,
HelpText<"Helper registering multiple checks.">,
@@ -295,7 +319,11 @@ def PthreadLockBase : Checker<"PthreadLockBase">,
def C11LockChecker : Checker<"C11Lock">,
HelpText<"Simple lock -> unlock checker">,
Dependencies<[PthreadLockBase]>,
- Documentation<HasAlphaDocumentation>;
+ Documentation<HasDocumentation>;
+
+def StdVariantChecker : Checker<"StdVariant">,
+ HelpText<"Check for bad type access for std::variant.">,
+ Documentation<HasDocumentation>;
} // end "alpha.core"
@@ -347,34 +375,20 @@ def NullableReturnedFromNonnullChecker : Checker<"NullableReturnedFromNonnull">,
let ParentPackage = APIModeling in {
-def StdCLibraryFunctionsChecker : Checker<"StdCLibraryFunctions">,
- HelpText<"Improve modeling of the C standard library functions">,
- // Uninitialized value check is a mandatory dependency. This Checker asserts
- // that arguments are always initialized.
- Dependencies<[CallAndMessageModeling]>,
- CheckerOptions<[
- CmdLineOption<Boolean,
- "DisplayLoadedSummaries",
- "If set to true, the checker displays the found summaries "
- "for the given translation unit.",
- "false",
- Released,
- Hide>,
- CmdLineOption<Boolean,
- "ModelPOSIX",
- "If set to true, the checker models functions from the "
- "POSIX standard.",
- "false",
- InAlpha>
- ]>,
- Documentation<NotDocumented>,
- Hidden;
+def ErrnoModeling : Checker<"Errno">,
+ HelpText<"Make the special value 'errno' available to other checkers.">,
+ Documentation<NotDocumented>;
def TrustNonnullChecker : Checker<"TrustNonnull">,
HelpText<"Trust that returns from framework methods annotated with _Nonnull "
"are not null">,
Documentation<NotDocumented>;
+def TrustReturnsNonnullChecker : Checker<"TrustReturnsNonnull">,
+ HelpText<"Trust that returns from methods annotated with returns_nonnull "
+ "are not null">,
+ Documentation<NotDocumented>;
+
} // end "apiModeling"
//===----------------------------------------------------------------------===//
@@ -420,9 +434,25 @@ def ReturnUndefChecker : Checker<"UndefReturn">,
HelpText<"Check for uninitialized values being returned to the caller">,
Documentation<HasDocumentation>;
+def UndefinedNewArraySizeChecker : Checker<"NewArraySize">,
+ HelpText<"Check if the size of the array in a new[] expression is undefined">,
+ Documentation<HasDocumentation>;
+
} // end "core.uninitialized"
//===----------------------------------------------------------------------===//
+// Optin checkers for core language features
+//===----------------------------------------------------------------------===//
+
+let ParentPackage = CoreOptIn in {
+
+def EnumCastOutOfRangeChecker : Checker<"EnumCastOutOfRange">,
+ HelpText<"Check integer to enumeration casts for out of range values">,
+ Documentation<HasDocumentation>;
+
+} // end "optin.core"
+
+//===----------------------------------------------------------------------===//
// Unix API checkers.
//===----------------------------------------------------------------------===//
@@ -454,17 +484,22 @@ let ParentPackage = CStringAlpha in {
def CStringOutOfBounds : Checker<"OutOfBounds">,
HelpText<"Check for out-of-bounds access in string functions">,
Dependencies<[CStringModeling]>,
- Documentation<HasAlphaDocumentation>;
+ Documentation<HasDocumentation>;
def CStringBufferOverlap : Checker<"BufferOverlap">,
HelpText<"Checks for overlap in two buffer arguments">,
Dependencies<[CStringModeling]>,
- Documentation<HasAlphaDocumentation>;
+ Documentation<HasDocumentation>;
def CStringNotNullTerm : Checker<"NotNullTerminated">,
HelpText<"Check for arguments which are not null-terminating strings">,
Dependencies<[CStringModeling]>,
- Documentation<HasAlphaDocumentation>;
+ Documentation<HasDocumentation>;
+
+def CStringUninitializedRead : Checker<"UninitializedRead">,
+ HelpText<"Checks if the string manipulation function would read uninitialized bytes">,
+ Dependencies<[CStringModeling]>,
+ Documentation<HasDocumentation>;
} // end "alpha.unix.cstring"
@@ -485,12 +520,34 @@ def DynamicMemoryModeling: Checker<"DynamicMemoryModeling">,
"allocating and deallocating functions are annotated with "
"ownership_holds, ownership_takes and ownership_returns.",
"false",
- InAlpha>
+ InAlpha>,
+ CmdLineOption<Boolean,
+ "AddNoOwnershipChangeNotes",
+ "Add an additional note to the bug report for leak-like "
+ "bugs. Dynamically allocated objects passed to functions "
+ "that neither deallocated it, or have taken responsibility "
+ "of the ownership are noted, similarly to "
+ "NoStoreFuncVisitor.",
+ "true",
+ Released,
+ Hide>
]>,
Dependencies<[CStringModeling]>,
Documentation<NotDocumented>,
Hidden;
+def ErrnoChecker : Checker<"Errno">,
+ HelpText<"Check for improper use of 'errno'">,
+ Dependencies<[ErrnoModeling]>,
+ CheckerOptions<[
+ CmdLineOption<Boolean,
+ "AllowErrnoReadOutsideConditionExpressions",
+ "Allow read of undefined value from errno outside of conditions",
+ "true",
+ InAlpha>,
+ ]>,
+ Documentation<HasDocumentation>;
+
def MallocChecker: Checker<"Malloc">,
HelpText<"Check for memory leaks, double free, and use-after-free problems. "
"Traces memory managed by malloc()/free().">,
@@ -506,6 +563,27 @@ def MismatchedDeallocatorChecker : Checker<"MismatchedDeallocator">,
Dependencies<[DynamicMemoryModeling]>,
Documentation<HasDocumentation>;
+def StdCLibraryFunctionsChecker : Checker<"StdCLibraryFunctions">,
+ HelpText<"Check for invalid arguments of C standard library functions, "
+ "and apply relations between arguments and return value">,
+ CheckerOptions<[
+ CmdLineOption<Boolean,
+ "DisplayLoadedSummaries",
+ "If set to true, the checker displays the found summaries "
+ "for the given translation unit.",
+ "false",
+ Released,
+ Hide>,
+ CmdLineOption<Boolean,
+ "ModelPOSIX",
+ "If set to true, the checker models additional functions "
+ "from the POSIX standard.",
+ "false",
+ InAlpha>
+ ]>,
+ WeakDependencies<[CallAndMessageChecker, NonNullParamChecker]>,
+ Documentation<HasDocumentation>;
+
def VforkChecker : Checker<"Vfork">,
HelpText<"Check for proper usage of vfork">,
Documentation<HasDocumentation>;
@@ -516,32 +594,25 @@ let ParentPackage = UnixAlpha in {
def ChrootChecker : Checker<"Chroot">,
HelpText<"Check improper use of chroot">,
- Documentation<HasAlphaDocumentation>;
+ Documentation<HasDocumentation>;
def PthreadLockChecker : Checker<"PthreadLock">,
HelpText<"Simple lock -> unlock checker">,
Dependencies<[PthreadLockBase]>,
- Documentation<HasAlphaDocumentation>;
+ Documentation<HasDocumentation>;
def StreamChecker : Checker<"Stream">,
HelpText<"Check stream handling functions">,
- Documentation<HasAlphaDocumentation>;
+ WeakDependencies<[NonNullParamChecker]>,
+ Documentation<HasDocumentation>;
def SimpleStreamChecker : Checker<"SimpleStream">,
HelpText<"Check for misuses of stream APIs">,
- Documentation<HasAlphaDocumentation>;
+ Documentation<HasDocumentation>;
def BlockInCriticalSectionChecker : Checker<"BlockInCriticalSection">,
HelpText<"Check for calls to blocking functions inside a critical section">,
- Documentation<HasAlphaDocumentation>;
-
-def StdCLibraryFunctionArgsChecker : Checker<"StdCLibraryFunctionArgs">,
- HelpText<"Check constraints of arguments of C standard library functions, "
- "such as whether the parameter of isalpha is in the range [0, 255] "
- "or is EOF.">,
- Dependencies<[StdCLibraryFunctionsChecker]>,
- WeakDependencies<[CallAndMessageChecker, NonNullParamChecker, StreamChecker]>,
- Documentation<NotDocumented>;
+ Documentation<HasDocumentation>;
} // end "alpha.unix"
@@ -592,6 +663,10 @@ def SmartPtrModeling: Checker<"SmartPtrModeling">,
]>,
Hidden;
+def StringChecker: Checker<"StringChecker">,
+ HelpText<"Checks C++ std::string bugs">,
+ Documentation<HasDocumentation>;
+
def MoveChecker: Checker<"Move">,
HelpText<"Find use-after-move bugs in C++">,
CheckerOptions<[
@@ -672,7 +747,7 @@ def UninitializedObjectChecker: Checker<"UninitializedObject">,
"false",
InAlpha>
]>,
- Documentation<HasAlphaDocumentation>;
+ Documentation<HasDocumentation>;
def VirtualCallChecker : Checker<"VirtualCall">,
HelpText<"Check virtual function calls during construction/destruction">,
@@ -702,14 +777,15 @@ def ContainerModeling : Checker<"ContainerModeling">,
Documentation<NotDocumented>,
Hidden;
+def CXXArrayDeleteChecker : Checker<"ArrayDelete">,
+ HelpText<"Reports destructions of arrays of polymorphic objects that are "
+ "destructed as their base class.">,
+ Documentation<HasDocumentation>;
+
def DeleteWithNonVirtualDtorChecker : Checker<"DeleteWithNonVirtualDtor">,
HelpText<"Reports destructions of polymorphic objects with a non-virtual "
"destructor in their base class">,
- Documentation<HasAlphaDocumentation>;
-
-def EnumCastOutOfRangeChecker : Checker<"EnumCastOutOfRange">,
- HelpText<"Check integer to enumeration casts for out of range values">,
- Documentation<HasAlphaDocumentation>;
+ Documentation<HasDocumentation>;
def IteratorModeling : Checker<"IteratorModeling">,
HelpText<"Models iterators of C++ containers">,
@@ -733,23 +809,23 @@ def STLAlgorithmModeling : Checker<"STLAlgorithmModeling">,
def InvalidatedIteratorChecker : Checker<"InvalidatedIterator">,
HelpText<"Check for use of invalidated iterators">,
Dependencies<[IteratorModeling]>,
- Documentation<HasAlphaDocumentation>;
+ Documentation<HasDocumentation>;
def IteratorRangeChecker : Checker<"IteratorRange">,
HelpText<"Check for iterators used outside their valid ranges">,
Dependencies<[IteratorModeling]>,
- Documentation<HasAlphaDocumentation>;
+ Documentation<HasDocumentation>;
def MismatchedIteratorChecker : Checker<"MismatchedIterator">,
HelpText<"Check for use of iterators of different containers where iterators "
"of the same container are expected">,
Dependencies<[IteratorModeling]>,
- Documentation<HasAlphaDocumentation>;
+ Documentation<HasDocumentation>;
def SmartPtrChecker: Checker<"SmartPtr">,
HelpText<"Find the dereference of null SmrtPtr">,
Dependencies<[SmartPtrModeling]>,
- Documentation<HasAlphaDocumentation>;
+ Documentation<HasDocumentation>;
} // end: "alpha.cplusplus"
@@ -812,7 +888,7 @@ let ParentPackage = DeadCodeAlpha in {
def UnreachableCodeChecker : Checker<"UnreachableCode">,
HelpText<"Check unreachable code">,
- Documentation<HasAlphaDocumentation>;
+ Documentation<HasDocumentation>;
} // end "alpha.deadcode"
@@ -928,7 +1004,24 @@ def FloatLoopCounter : Checker<"FloatLoopCounter">,
} // end "security"
-let ParentPackage = POS in {
+let ParentPackage = ENV in {
+
+ def InvalidPtrChecker : Checker<"InvalidPtr">,
+ HelpText<"Finds usages of possibly invalidated pointers">,
+ CheckerOptions<[
+ CmdLineOption<Boolean,
+ "InvalidatingGetEnv",
+ "Regard getenv as an invalidating call (as per POSIX "
+ "standard), which can lead to false positives depending on "
+ "implementation.",
+ "false",
+ Released>,
+ ]>,
+ Documentation<HasDocumentation>;
+
+} // end "security.cert.env"
+
+let ParentPackage = POSAlpha in {
def PutenvWithAuto : Checker<"34c">,
HelpText<"Finds calls to the 'putenv' function which pass a pointer to "
@@ -941,19 +1034,19 @@ let ParentPackage = SecurityAlpha in {
def ArrayBoundChecker : Checker<"ArrayBound">,
HelpText<"Warn about buffer overflows (older checker)">,
- Documentation<HasAlphaDocumentation>;
+ Documentation<HasDocumentation>;
def ArrayBoundCheckerV2 : Checker<"ArrayBoundV2">,
HelpText<"Warn about buffer overflows (newer checker)">,
- Documentation<HasAlphaDocumentation>;
+ Documentation<HasDocumentation>;
def ReturnPointerRangeChecker : Checker<"ReturnPtrRange">,
HelpText<"Check for an out-of-bound pointer being returned to callers">,
- Documentation<HasAlphaDocumentation>;
+ Documentation<HasDocumentation>;
def MallocOverflowSecurityChecker : Checker<"MallocOverflow">,
HelpText<"Check for overflows in the arguments to malloc()">,
- Documentation<HasAlphaDocumentation>;
+ Documentation<HasDocumentation>;
def MmapWriteExecChecker : Checker<"MmapWriteExec">,
HelpText<"Warn on mmap() calls that are both writable and executable">,
@@ -969,7 +1062,7 @@ def MmapWriteExecChecker : Checker<"MmapWriteExec">,
"0x01",
Released>
]>,
- Documentation<HasAlphaDocumentation>;
+ Documentation<HasDocumentation>;
} // end "alpha.security"
@@ -988,7 +1081,7 @@ def GenericTaintChecker : Checker<"TaintPropagation">,
"",
InAlpha>,
]>,
- Documentation<HasAlphaDocumentation>;
+ Documentation<HasDocumentation>;
} // end "alpha.security.taint"
@@ -1179,13 +1272,13 @@ def InstanceVariableInvalidation : Checker<"InstanceVariableInvalidation">,
HelpText<"Check that the invalidatable instance variables are invalidated in "
"the methods annotated with objc_instance_variable_invalidator">,
Dependencies<[IvarInvalidationModeling]>,
- Documentation<HasAlphaDocumentation>;
+ Documentation<HasDocumentation>;
def MissingInvalidationMethod : Checker<"MissingInvalidationMethod">,
HelpText<"Check that the invalidation methods are present in classes that "
"contain invalidatable instance variables">,
Dependencies<[IvarInvalidationModeling]>,
- Documentation<HasAlphaDocumentation>;
+ Documentation<HasDocumentation>;
def DirectIvarAssignment : Checker<"DirectIvarAssignment">,
HelpText<"Check for direct assignments to instance variables">,
@@ -1198,7 +1291,7 @@ def DirectIvarAssignment : Checker<"DirectIvarAssignment">,
"false",
InAlpha>
]>,
- Documentation<HasAlphaDocumentation>;
+ Documentation<HasDocumentation>;
} // end "alpha.osx.cocoa"
@@ -1264,7 +1357,7 @@ let ParentPackage = LocalizabilityAlpha in {
def PluralMisuseChecker : Checker<"PluralMisuseChecker">,
HelpText<"Warns against using one vs. many plural pattern in code when "
"generating localized strings.">,
- Documentation<HasAlphaDocumentation>;
+ Documentation<HasDocumentation>;
} // end "alpha.osx.cocoa.localizability"
@@ -1284,7 +1377,7 @@ let ParentPackage = LLVMAlpha in {
def LLVMConventionsChecker : Checker<"Conventions">,
HelpText<"Check code for LLVM codebase conventions">,
- Documentation<HasAlphaDocumentation>;
+ Documentation<HasDocumentation>;
} // end "llvm"
@@ -1528,6 +1621,11 @@ def StreamTesterChecker : Checker<"StreamTester">,
"purposes.">,
Documentation<NotDocumented>;
+def ErrnoTesterChecker : Checker<"ErrnoTest">,
+ HelpText<"Check modeling aspects of 'errno'.">,
+ Dependencies<[ErrnoModeling]>,
+ Documentation<NotDocumented>;
+
def ExprInspectionChecker : Checker<"ExprInspection">,
HelpText<"Check the analyzer's understanding of expressions">,
Documentation<NotDocumented>;
@@ -1553,7 +1651,7 @@ def DebugIteratorModeling : Checker<"DebugIteratorModeling">,
def StdCLibraryFunctionsTesterChecker : Checker<"StdCLibraryFunctionsTester">,
HelpText<"Add test functions to the summary map, so testing of individual "
"summary constituents becomes possible.">,
- Dependencies<[StdCLibraryFunctionsChecker]>,
+ WeakDependencies<[StdCLibraryFunctionsChecker]>,
Documentation<NotDocumented>;
} // end "debug"
@@ -1589,7 +1687,7 @@ def CloneChecker : Checker<"CloneChecker">,
"\"\"",
Released>
]>,
- Documentation<HasAlphaDocumentation>;
+ Documentation<HasDocumentation>;
} // end "clone"
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/LocalCheckers.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/LocalCheckers.h
deleted file mode 100644
index 8f7148fde19a..000000000000
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/LocalCheckers.h
+++ /dev/null
@@ -1,27 +0,0 @@
-//==- LocalCheckers.h - Intra-Procedural+Flow-Sensitive Checkers -*- C++ -*-==//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the interface to call a set of intra-procedural (local)
-// checkers that use flow/path-sensitive analyses to find bugs.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CLANG_STATICANALYZER_CHECKERS_LOCALCHECKERS_H
-#define LLVM_CLANG_STATICANALYZER_CHECKERS_LOCALCHECKERS_H
-
-namespace clang {
-namespace ento {
-
-class ExprEngine;
-
-void RegisterCallInliner(ExprEngine &Eng);
-
-} // end namespace ento
-} // end namespace clang
-
-#endif
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/MPIFunctionClassifier.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/MPIFunctionClassifier.h
index bbc5111ccacc..6243bbd5d53b 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/MPIFunctionClassifier.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/MPIFunctionClassifier.h
@@ -11,8 +11,8 @@
///
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_MPICHECKER_MPIFUNCTIONCLASSIFIER_H
-#define LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_MPICHECKER_MPIFUNCTIONCLASSIFIER_H
+#ifndef LLVM_CLANG_STATICANALYZER_CHECKERS_MPIFUNCTIONCLASSIFIER_H
+#define LLVM_CLANG_STATICANALYZER_CHECKERS_MPIFUNCTIONCLASSIFIER_H
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/SValExplainer.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/SValExplainer.h
index 31a4ed50a723..43a70f596a4d 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/SValExplainer.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/SValExplainer.h
@@ -32,7 +32,7 @@ private:
std::string Str;
llvm::raw_string_ostream OS(Str);
S->printPretty(OS, nullptr, PrintingPolicy(ACtx.getLangOpts()));
- return OS.str();
+ return Str;
}
bool isThisObject(const SymbolicRegion *R) {
@@ -42,6 +42,18 @@ private:
return false;
}
+ bool isThisObject(const ElementRegion *R) {
+ if (const auto *Idx = R->getIndex().getAsInteger()) {
+ if (const auto *SR = R->getSuperRegion()->getAs<SymbolicRegion>()) {
+ QualType Ty = SR->getPointeeStaticType();
+ bool IsNotReinterpretCast = R->getValueType() == Ty;
+ if (Idx->isZero() && IsNotReinterpretCast)
+ return isThisObject(SR);
+ }
+ }
+ return false;
+ }
+
public:
SValExplainer(ASTContext &Ctx) : ACtx(Ctx) {}
@@ -53,7 +65,7 @@ public:
return "undefined value";
}
- std::string VisitLocMemRegionVal(loc::MemRegionVal V) {
+ std::string VisitMemRegionVal(loc::MemRegionVal V) {
const MemRegion *R = V.getRegion();
// Avoid the weird "pointer to pointee of ...".
if (auto SR = dyn_cast<SymbolicRegion>(R)) {
@@ -64,28 +76,28 @@ public:
return "pointer to " + Visit(R);
}
- std::string VisitLocConcreteInt(loc::ConcreteInt V) {
+ std::string VisitConcreteInt(loc::ConcreteInt V) {
const llvm::APSInt &I = V.getValue();
std::string Str;
llvm::raw_string_ostream OS(Str);
OS << "concrete memory address '" << I << "'";
- return OS.str();
+ return Str;
}
- std::string VisitNonLocSymbolVal(nonloc::SymbolVal V) {
+ std::string VisitSymbolVal(nonloc::SymbolVal V) {
return Visit(V.getSymbol());
}
- std::string VisitNonLocConcreteInt(nonloc::ConcreteInt V) {
+ std::string VisitConcreteInt(nonloc::ConcreteInt V) {
const llvm::APSInt &I = V.getValue();
std::string Str;
llvm::raw_string_ostream OS(Str);
OS << (I.isSigned() ? "signed " : "unsigned ") << I.getBitWidth()
<< "-bit integer '" << I << "'";
- return OS.str();
+ return Str;
}
- std::string VisitNonLocLazyCompoundVal(nonloc::LazyCompoundVal V) {
+ std::string VisitLazyCompoundVal(nonloc::LazyCompoundVal V) {
return "lazily frozen compound value of " + Visit(V.getRegion());
}
@@ -123,7 +135,7 @@ public:
OS << "(" << Visit(S->getLHS()) << ") "
<< std::string(BinaryOperator::getOpcodeStr(S->getOpcode())) << " "
<< S->getRHS();
- return OS.str();
+ return Str;
}
// TODO: IntSymExpr doesn't appear in practice.
@@ -135,11 +147,16 @@ public:
" (" + Visit(S->getRHS()) + ")";
}
+ std::string VisitUnarySymExpr(const UnarySymExpr *S) {
+ return std::string(UnaryOperator::getOpcodeStr(S->getOpcode())) + " (" +
+ Visit(S->getOperand()) + ")";
+ }
+
// TODO: SymbolCast doesn't appear in practice.
// Add the relevant code once it does.
std::string VisitSymbolicRegion(const SymbolicRegion *R) {
- // Explain 'this' object here.
+ // Explain 'this' object here - if it's not wrapped by an ElementRegion.
// TODO: Explain CXXThisRegion itself, find a way to test it.
if (isThisObject(R))
return "'this' object";
@@ -169,15 +186,21 @@ public:
std::string VisitElementRegion(const ElementRegion *R) {
std::string Str;
llvm::raw_string_ostream OS(Str);
- OS << "element of type '" << R->getElementType().getAsString()
- << "' with index ";
+
+ // Explain 'this' object here.
+ // They are represented by a SymRegion wrapped by an ElementRegion; so
+ // match and handle it here.
+ if (isThisObject(R))
+ return "'this' object";
+
+ OS << "element of type '" << R->getElementType() << "' with index ";
// For concrete index: omit type of the index integer.
if (auto I = R->getIndex().getAs<nonloc::ConcreteInt>())
OS << I->getValue();
else
OS << "'" << Visit(R->getIndex()) << "'";
OS << " of " + Visit(R->getSuperRegion());
- return OS.str();
+ return Str;
}
std::string VisitNonParamVarRegion(const NonParamVarRegion *R) {
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/Taint.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/Taint.h
new file mode 100644
index 000000000000..3ec8dbfb09ee
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Checkers/Taint.h
@@ -0,0 +1,128 @@
+//=== Taint.h - Taint tracking and basic propagation rules. --------*- C++ -*-//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines basic, non-domain-specific mechanisms for tracking tainted values.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_TAINT_H
+#define LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_TAINT_H
+
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitors.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+
+namespace clang {
+namespace ento {
+namespace taint {
+
+/// The type of taint, which helps to differentiate between different types of
+/// taint.
+using TaintTagType = unsigned;
+
+static constexpr TaintTagType TaintTagGeneric = 0;
+
+/// Create a new state in which the value of the statement is marked as tainted.
+[[nodiscard]] ProgramStateRef addTaint(ProgramStateRef State, const Stmt *S,
+ const LocationContext *LCtx,
+ TaintTagType Kind = TaintTagGeneric);
+
+/// Create a new state in which the value is marked as tainted.
+[[nodiscard]] ProgramStateRef addTaint(ProgramStateRef State, SVal V,
+ TaintTagType Kind = TaintTagGeneric);
+
+/// Create a new state in which the symbol is marked as tainted.
+[[nodiscard]] ProgramStateRef addTaint(ProgramStateRef State, SymbolRef Sym,
+ TaintTagType Kind = TaintTagGeneric);
+
+/// Create a new state in which the pointer represented by the region
+/// is marked as tainted.
+[[nodiscard]] ProgramStateRef addTaint(ProgramStateRef State,
+ const MemRegion *R,
+ TaintTagType Kind = TaintTagGeneric);
+
+[[nodiscard]] ProgramStateRef removeTaint(ProgramStateRef State, SVal V);
+
+[[nodiscard]] ProgramStateRef removeTaint(ProgramStateRef State,
+ const MemRegion *R);
+
+[[nodiscard]] ProgramStateRef removeTaint(ProgramStateRef State, SymbolRef Sym);
+
+/// Create a new state in a which a sub-region of a given symbol is tainted.
+/// This might be necessary when referring to regions that can not have an
+/// individual symbol, e.g. if they are represented by the default binding of
+/// a LazyCompoundVal.
+[[nodiscard]] ProgramStateRef
+addPartialTaint(ProgramStateRef State, SymbolRef ParentSym,
+ const SubRegion *SubRegion,
+ TaintTagType Kind = TaintTagGeneric);
+
+/// Check if the statement has a tainted value in the given state.
+bool isTainted(ProgramStateRef State, const Stmt *S,
+ const LocationContext *LCtx,
+ TaintTagType Kind = TaintTagGeneric);
+
+/// Check if the value is tainted in the given state.
+bool isTainted(ProgramStateRef State, SVal V,
+ TaintTagType Kind = TaintTagGeneric);
+
+/// Check if the symbol is tainted in the given state.
+bool isTainted(ProgramStateRef State, SymbolRef Sym,
+ TaintTagType Kind = TaintTagGeneric);
+
+/// Check if the pointer represented by the region is tainted in the given
+/// state.
+bool isTainted(ProgramStateRef State, const MemRegion *Reg,
+ TaintTagType Kind = TaintTagGeneric);
+
+/// Returns the tainted Symbols for a given Statement and state.
+std::vector<SymbolRef> getTaintedSymbols(ProgramStateRef State, const Stmt *S,
+ const LocationContext *LCtx,
+ TaintTagType Kind = TaintTagGeneric);
+
+/// Returns the tainted Symbols for a given SVal and state.
+std::vector<SymbolRef> getTaintedSymbols(ProgramStateRef State, SVal V,
+ TaintTagType Kind = TaintTagGeneric);
+
+/// Returns the tainted Symbols for a SymbolRef and state.
+std::vector<SymbolRef> getTaintedSymbols(ProgramStateRef State, SymbolRef Sym,
+ TaintTagType Kind = TaintTagGeneric);
+
+/// Returns the tainted (index, super/sub region, symbolic region) symbols
+/// for a given memory region.
+std::vector<SymbolRef> getTaintedSymbols(ProgramStateRef State,
+ const MemRegion *Reg,
+ TaintTagType Kind = TaintTagGeneric);
+
+std::vector<SymbolRef> getTaintedSymbolsImpl(ProgramStateRef State,
+ const Stmt *S,
+ const LocationContext *LCtx,
+ TaintTagType Kind,
+ bool returnFirstOnly);
+
+std::vector<SymbolRef> getTaintedSymbolsImpl(ProgramStateRef State, SVal V,
+ TaintTagType Kind,
+ bool returnFirstOnly);
+
+std::vector<SymbolRef> getTaintedSymbolsImpl(ProgramStateRef State,
+ SymbolRef Sym, TaintTagType Kind,
+ bool returnFirstOnly);
+
+std::vector<SymbolRef> getTaintedSymbolsImpl(ProgramStateRef State,
+ const MemRegion *Reg,
+ TaintTagType Kind,
+ bool returnFirstOnly);
+
+void printTaint(ProgramStateRef State, raw_ostream &Out, const char *nl = "\n",
+ const char *sep = "");
+
+LLVM_DUMP_METHOD void dumpTaint(ProgramStateRef State);
+} // namespace taint
+} // namespace ento
+} // namespace clang
+
+#endif
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/Analyses.def b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/Analyses.def
index 88c375ce0925..51803e7c1f0d 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/Analyses.def
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/Analyses.def
@@ -10,13 +10,6 @@
//
//===----------------------------------------------------------------------===//
-#ifndef ANALYSIS_STORE
-#define ANALYSIS_STORE(NAME, CMDFLAG, DESC, CREATFN)
-#endif
-
-ANALYSIS_STORE(RegionStore, "region", "Use region-based analyzer store",
- CreateRegionStoreManager)
-
#ifndef ANALYSIS_CONSTRAINTS
#define ANALYSIS_CONSTRAINTS(NAME, CMDFLAG, DESC, CREATFN)
#endif
@@ -94,7 +87,6 @@ ANALYSIS_INLINING_MODE(
NoRedundancy, "noredundancy",
"Do not analyze a function which has been previously inlined")
-#undef ANALYSIS_STORE
#undef ANALYSIS_CONSTRAINTS
#undef ANALYSIS_DIAGNOSTICS
#undef ANALYSIS_PURGE
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/AnalyzerOptions.def b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/AnalyzerOptions.def
index f0359d2dbb3c..2fc825c2af9c 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/AnalyzerOptions.def
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/AnalyzerOptions.def
@@ -128,7 +128,8 @@ ANALYZER_OPTION(bool, MayInlineCXXStandardLibrary, "c++-stdlib-inlining",
true)
ANALYZER_OPTION(bool, MayInlineCXXAllocator, "c++-allocator-inlining",
- "Whether or not allocator call may be considered for inlining.",
+ "Whether or not allocator and deallocator calls may be "
+ "considered for inlining.",
true)
ANALYZER_OPTION(
@@ -190,7 +191,13 @@ ANALYZER_OPTION(bool, ShouldReportIssuesInMainSourceFile,
false)
ANALYZER_OPTION(bool, ShouldWriteStableReportFilename, "stable-report-filename",
- "Whether or not the report filename should be random or not.",
+ "Deprecated: report filenames are now always stable. "
+ "See also 'verbose-report-filename'.",
+ false)
+
+ANALYZER_OPTION(bool, ShouldWriteVerboseReportFilename, "verbose-report-filename",
+ "Whether or not the report filename should contain extra "
+ "information about the issue.",
false)
ANALYZER_OPTION(
@@ -314,6 +321,31 @@ ANALYZER_OPTION(bool, ShouldDisplayCheckerNameForText, "display-checker-name",
"Display the checker name for textual outputs",
true)
+ANALYZER_OPTION(bool, ShouldSupportSymbolicIntegerCasts,
+ "support-symbolic-integer-casts",
+ "Produce cast symbols for integral types.",
+ false)
+
+ANALYZER_OPTION(
+ bool, ShouldAssumeControlledEnvironment, "assume-controlled-environment",
+ "Whether the analyzed application runs in a controlled environment. "
+ "We will assume that environment variables exist in queries and they hold "
+ "no malicious data. For instance, if this option is enabled, 'getenv()' "
+ "might be modeled by the analyzer to never return NULL.",
+ false)
+
+ANALYZER_OPTION(
+ bool, ShouldIgnoreBisonGeneratedFiles, "ignore-bison-generated-files",
+ "If enabled, any files containing the \"/* A Bison parser, made by\" "
+ "won't be analyzed.",
+ true)
+
+ANALYZER_OPTION(
+ bool, ShouldIgnoreFlexGeneratedFiles, "ignore-flex-generated-files",
+ "If enabled, any files containing the \"/* A lexical scanner generated by "
+ "flex\" won't be analyzed.",
+ true)
+
//===----------------------------------------------------------------------===//
// Unsigned analyzer options.
//===----------------------------------------------------------------------===//
@@ -370,13 +402,49 @@ ANALYZER_OPTION_DEPENDS_ON_USER_MODE(
/* SHALLOW_VAL */ 75000, /* DEEP_VAL */ 225000)
ANALYZER_OPTION(
+ unsigned, CTUMaxNodesPercentage, "ctu-max-nodes-pct",
+ "The percentage of single-TU analysed nodes that the CTU analysis is "
+ "allowed to visit.", 50)
+
+ANALYZER_OPTION(
+ unsigned, CTUMaxNodesMin, "ctu-max-nodes-min",
+ "The maximum number of nodes in CTU mode is determinded by "
+ "'ctu-max-nodes-pct'. However, if the number of nodes in single-TU "
+ "analysis is too low, it is meaningful to provide a minimum value that "
+ "serves as an upper bound instead.", 10000)
+
+ANALYZER_OPTION(
+ StringRef, CTUPhase1InliningMode, "ctu-phase1-inlining",
+ "Controls which functions will be inlined during the first phase of the ctu "
+ "analysis. "
+ "If the value is set to 'all' then all foreign functions are inlinied "
+ "immediately during the first phase, thus rendering the second phase a noop. "
+ "The 'ctu-max-nodes-*' budge has no effect in this case. "
+ "If the value is 'small' then only functions with a linear CFG and with a "
+ "limited number of statements would be inlined during the first phase. The "
+ "long and/or nontrivial functions are handled in the second phase and are "
+ "controlled by the 'ctu-max-nodes-*' budge. "
+ "The value 'none' means that all foreign functions are inlined only in the "
+ "second phase, 'ctu-max-nodes-*' budge limits the second phase. "
+ "Value: \"none\", \"small\", \"all\".",
+ "small")
+
+ANALYZER_OPTION(
unsigned, RegionStoreSmallStructLimit, "region-store-small-struct-limit",
"The largest number of fields a struct can have and still be considered "
- "small This is currently used to decide whether or not it is worth forcing "
+ "small. This is currently used to decide whether or not it is worth forcing "
"a LazyCompoundVal on bind. To disable all small-struct-dependent "
"behavior, set the option to 0.",
2)
+ANALYZER_OPTION(
+ unsigned, RegionStoreSmallArrayLimit, "region-store-small-array-limit",
+ "The largest number of elements an array can have and still be considered "
+ "small. This is currently used to decide whether or not it is worth forcing "
+ "a LazyCompoundVal on bind. To disable all small-array-dependent "
+ "behavior, set the option to 0.",
+ 5)
+
//===----------------------------------------------------------------------===//
// String analyzer options.
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/AnalyzerOptions.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/AnalyzerOptions.h
index ccf35e0a81ec..276d11e80a5b 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/AnalyzerOptions.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/AnalyzerOptions.h
@@ -17,10 +17,8 @@
#include "clang/Analysis/PathDiagnostic.h"
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/IntrusiveRefCntPtr.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/StringSwitch.h"
#include <string>
#include <utility>
#include <vector>
@@ -33,20 +31,6 @@ class CheckerBase;
} // namespace ento
-/// Analysis - Set of available source code analyses.
-enum Analyses {
-#define ANALYSIS(NAME, CMDFLAG, DESC, SCOPE) NAME,
-#include "clang/StaticAnalyzer/Core/Analyses.def"
-NumAnalyses
-};
-
-/// AnalysisStores - Set of available analysis store models.
-enum AnalysisStores {
-#define ANALYSIS_STORE(NAME, CMDFLAG, DESC, CREATFN) NAME##Model,
-#include "clang/StaticAnalyzer/Core/Analyses.def"
-NumStores
-};
-
/// AnalysisConstraints - Set of available constraint models.
enum AnalysisConstraints {
#define ANALYSIS_CONSTRAINTS(NAME, CMDFLAG, DESC, CREATFN) NAME##Model,
@@ -138,6 +122,8 @@ enum UserModeKind {
UMK_Deep = 2
};
+enum class CTUPhase1InliningKind { None, Small, All };
+
/// Stores options for the analyzer from the command line.
///
/// Some options are frontend flags (e.g.: -analyzer-output), but some are
@@ -205,7 +191,6 @@ public:
/// A key-value table of use-specified configuration values.
// TODO: This shouldn't be public.
ConfigTable Config;
- AnalysisStores AnalysisStoreOpt = RegionStoreModel;
AnalysisConstraints AnalysisConstraintsOpt = RangeConstraintsModel;
AnalysisDiagClients AnalysisDiagOpt = PD_HTML;
AnalysisPurgeMode AnalysisPurgeOpt = PurgeStmt;
@@ -242,7 +227,6 @@ public:
unsigned ShouldEmitErrorsOnInvalidConfigValue : 1;
unsigned AnalyzeAll : 1;
unsigned AnalyzerDisplayProgress : 1;
- unsigned AnalyzeNestedBlocks : 1;
unsigned eagerlyAssumeBinOpBifurcation : 1;
@@ -276,9 +260,10 @@ public:
#undef ANALYZER_OPTION
#undef ANALYZER_OPTION_DEPENDS_ON_USER_MODE
- // Create an array of all -analyzer-config command line options. Sort it in
- // the constructor.
- std::vector<llvm::StringLiteral> AnalyzerConfigCmdFlags = {
+ bool isUnknownAnalyzerConfig(llvm::StringRef Name) {
+ static std::vector<llvm::StringLiteral> AnalyzerConfigCmdFlags = []() {
+ // Create an array of all -analyzer-config command line options.
+ std::vector<llvm::StringLiteral> AnalyzerConfigCmdFlags = {
#define ANALYZER_OPTION_DEPENDS_ON_USER_MODE(TYPE, NAME, CMDFLAG, DESC, \
SHALLOW_VAL, DEEP_VAL) \
ANALYZER_OPTION(TYPE, NAME, CMDFLAG, DESC, SHALLOW_VAL)
@@ -289,10 +274,11 @@ public:
#include "clang/StaticAnalyzer/Core/AnalyzerOptions.def"
#undef ANALYZER_OPTION
#undef ANALYZER_OPTION_DEPENDS_ON_USER_MODE
- };
-
- bool isUnknownAnalyzerConfig(StringRef Name) const {
- assert(llvm::is_sorted(AnalyzerConfigCmdFlags));
+ };
+ // FIXME: Sort this at compile-time when we get constexpr sort (C++20).
+ llvm::sort(AnalyzerConfigCmdFlags);
+ return AnalyzerConfigCmdFlags;
+ }();
return !std::binary_search(AnalyzerConfigCmdFlags.begin(),
AnalyzerConfigCmdFlags.end(), Name);
@@ -303,13 +289,12 @@ public:
ShowCheckerHelpAlpha(false), ShowCheckerHelpDeveloper(false),
ShowCheckerOptionList(false), ShowCheckerOptionAlphaList(false),
ShowCheckerOptionDeveloperList(false), ShowEnabledCheckerList(false),
- ShowConfigOptionsList(false), AnalyzeAll(false),
- AnalyzerDisplayProgress(false), AnalyzeNestedBlocks(false),
- eagerlyAssumeBinOpBifurcation(false), TrimGraph(false),
- visualizeExplodedGraphWithGraphViz(false), UnoptimizedCFG(false),
- PrintStats(false), NoRetryExhausted(false), AnalyzerWerror(false) {
- llvm::sort(AnalyzerConfigCmdFlags);
- }
+ ShowConfigOptionsList(false),
+ ShouldEmitErrorsOnInvalidConfigValue(false), AnalyzeAll(false),
+ AnalyzerDisplayProgress(false), eagerlyAssumeBinOpBifurcation(false),
+ TrimGraph(false), visualizeExplodedGraphWithGraphViz(false),
+ UnoptimizedCFG(false), PrintStats(false), NoRetryExhausted(false),
+ AnalyzerWerror(false) {}
/// Interprets an option's string value as a boolean. The "true" string is
/// interpreted as true and the "false" string is interpreted as false.
@@ -373,12 +358,8 @@ public:
StringRef OptionName,
bool SearchInParents = false) const;
- /// Retrieves and sets the UserMode. This is a high-level option,
- /// which is used to set other low-level options. It is not accessible
- /// outside of AnalyzerOptions.
- UserModeKind getUserMode() const;
-
ExplorationStrategyKind getExplorationStrategy() const;
+ CTUPhase1InliningKind getCTUPhase1Inlining() const;
/// Returns the inter-procedural analysis mode.
IPAKind getIPAMode() const;
@@ -395,7 +376,11 @@ public:
return {FullCompilerInvocation,
ShouldDisplayMacroExpansions,
ShouldSerializeStats,
- ShouldWriteStableReportFilename,
+ // The stable report filename option is deprecated because
+ // file names are now always stable. Now the old option acts as
+ // an alias to the new verbose filename option because this
+ // closely mimics the behavior under the old option.
+ ShouldWriteStableReportFilename || ShouldWriteVerboseReportFilename,
AnalyzerWerror,
ShouldApplyFixIts,
ShouldDisplayCheckerNameForText};
@@ -412,15 +397,6 @@ using AnalyzerOptionsRef = IntrusiveRefCntPtr<AnalyzerOptions>;
// For this reason, implement some methods in this header file.
//===----------------------------------------------------------------------===//
-inline UserModeKind AnalyzerOptions::getUserMode() const {
- auto K = llvm::StringSwitch<llvm::Optional<UserModeKind>>(UserMode)
- .Case("shallow", UMK_Shallow)
- .Case("deep", UMK_Deep)
- .Default(None);
- assert(K.hasValue() && "User mode is invalid.");
- return K.getValue();
-}
-
inline std::vector<StringRef>
AnalyzerOptions::getRegisteredCheckers(bool IncludeExperimental) {
static constexpr llvm::StringLiteral StaticAnalyzerCheckerNames[] = {
@@ -433,8 +409,8 @@ AnalyzerOptions::getRegisteredCheckers(bool IncludeExperimental) {
};
std::vector<StringRef> Checkers;
for (StringRef CheckerName : StaticAnalyzerCheckerNames) {
- if (!CheckerName.startswith("debug.") &&
- (IncludeExperimental || !CheckerName.startswith("alpha.")))
+ if (!CheckerName.starts_with("debug.") &&
+ (IncludeExperimental || !CheckerName.starts_with("alpha.")))
Checkers.push_back(CheckerName);
}
return Checkers;
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h
index 3c93ebeccde8..e762f7548e0b 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h
@@ -19,6 +19,7 @@
#include "clang/Basic/SourceLocation.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitors.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugSuppression.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
@@ -26,10 +27,8 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymExpr.h"
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/ImmutableSet.h"
-#include "llvm/ADT/None.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
@@ -39,6 +38,7 @@
#include "llvm/ADT/iterator_range.h"
#include <cassert>
#include <memory>
+#include <optional>
#include <string>
#include <utility>
#include <vector>
@@ -48,7 +48,6 @@ namespace clang {
class AnalyzerOptions;
class ASTContext;
class Decl;
-class DiagnosticsEngine;
class LocationContext;
class SourceManager;
class Stmt;
@@ -61,7 +60,6 @@ class ExplodedGraph;
class ExplodedNode;
class ExprEngine;
class MemRegion;
-class SValBuilder;
//===----------------------------------------------------------------------===//
// Interface for individual bug reports.
@@ -455,13 +453,13 @@ public:
bool isInteresting(SVal V) const;
bool isInteresting(const LocationContext *LC) const;
- Optional<bugreporter::TrackingKind>
+ std::optional<bugreporter::TrackingKind>
getInterestingnessKind(SymbolRef sym) const;
- Optional<bugreporter::TrackingKind>
+ std::optional<bugreporter::TrackingKind>
getInterestingnessKind(const MemRegion *R) const;
- Optional<bugreporter::TrackingKind> getInterestingnessKind(SVal V) const;
+ std::optional<bugreporter::TrackingKind> getInterestingnessKind(SVal V) const;
/// Returns whether or not this report should be considered valid.
///
@@ -597,6 +595,9 @@ private:
/// A vector of BugReports for tracking the allocated pointers and cleanup.
std::vector<BugReportEquivClass *> EQClassesVector;
+ /// User-provided in-code suppressions.
+ BugSuppression UserSuppressions;
+
public:
BugReporter(BugReporterData &d);
virtual ~BugReporter();
@@ -610,8 +611,9 @@ public:
/// Iterator over the set of BugReports tracked by the BugReporter.
using EQClasses_iterator = llvm::FoldingSet<BugReportEquivClass>::iterator;
- EQClasses_iterator EQClasses_begin() { return EQClasses.begin(); }
- EQClasses_iterator EQClasses_end() { return EQClasses.end(); }
+ llvm::iterator_range<EQClasses_iterator> equivalenceClasses() {
+ return EQClasses;
+ }
ASTContext &getContext() { return D.getASTContext(); }
@@ -631,14 +633,14 @@ public:
void EmitBasicReport(const Decl *DeclWithIssue, const CheckerBase *Checker,
StringRef BugName, StringRef BugCategory,
StringRef BugStr, PathDiagnosticLocation Loc,
- ArrayRef<SourceRange> Ranges = None,
- ArrayRef<FixItHint> Fixits = None);
+ ArrayRef<SourceRange> Ranges = std::nullopt,
+ ArrayRef<FixItHint> Fixits = std::nullopt);
void EmitBasicReport(const Decl *DeclWithIssue, CheckerNameRef CheckerName,
StringRef BugName, StringRef BugCategory,
StringRef BugStr, PathDiagnosticLocation Loc,
- ArrayRef<SourceRange> Ranges = None,
- ArrayRef<FixItHint> Fixits = None);
+ ArrayRef<SourceRange> Ranges = std::nullopt,
+ ArrayRef<FixItHint> Fixits = std::nullopt);
private:
llvm::StringMap<std::unique_ptr<BugType>> StrBugTypes;
@@ -781,11 +783,11 @@ public:
return T->getTagKind() == &Kind;
}
- Optional<std::string> generateMessage(BugReporterContext &BRC,
- PathSensitiveBugReport &R) const {
+ std::optional<std::string> generateMessage(BugReporterContext &BRC,
+ PathSensitiveBugReport &R) const {
std::string Msg = Cb(BRC, R);
if (Msg.empty())
- return None;
+ return std::nullopt;
return std::move(Msg);
}
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitors.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitors.h
index 24cae12af24a..d9b3d9352d32 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitors.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitors.h
@@ -21,9 +21,11 @@
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/IntrusiveRefCntPtr.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringRef.h"
#include <list>
#include <memory>
+#include <optional>
#include <utility>
namespace clang {
@@ -49,6 +51,12 @@ public:
BugReporterVisitor() = default;
BugReporterVisitor(const BugReporterVisitor &) = default;
BugReporterVisitor(BugReporterVisitor &&) {}
+
+ // The copy and move assignment operator is defined as deleted pending further
+ // motivation.
+ BugReporterVisitor &operator=(const BugReporterVisitor &) = delete;
+ BugReporterVisitor &operator=(BugReporterVisitor &&) = delete;
+
virtual ~BugReporterVisitor();
/// Return a diagnostic piece which should be associated with the
@@ -384,19 +392,19 @@ const Expr *getDerefExpr(const Stmt *S);
} // namespace bugreporter
class TrackConstraintBRVisitor final : public BugReporterVisitor {
- DefinedSVal Constraint;
- bool Assumption;
+ const SmallString<64> Message;
+ const DefinedSVal Constraint;
+ const bool Assumption;
bool IsSatisfied = false;
- bool IsZeroCheck;
/// We should start tracking from the last node along the path in which the
/// value is constrained.
bool IsTrackingTurnedOn = false;
public:
- TrackConstraintBRVisitor(DefinedSVal constraint, bool assumption)
- : Constraint(constraint), Assumption(assumption),
- IsZeroCheck(!Assumption && Constraint.getAs<Loc>()) {}
+ TrackConstraintBRVisitor(DefinedSVal constraint, bool assumption,
+ StringRef Message)
+ : Message(Message), Constraint(constraint), Assumption(assumption) {}
void Profile(llvm::FoldingSetNodeID &ID) const override;
@@ -409,6 +417,9 @@ public:
PathSensitiveBugReport &BR) override;
private:
+ /// Checks if the constraint refers to a null-location.
+ bool isZeroCheck() const;
+
/// Checks if the constraint is valid in the current state.
bool isUnderconstrained(const ExplodedNode *N) const;
};
@@ -501,13 +512,9 @@ public:
bool printValue(const Expr *CondVarExpr, raw_ostream &Out,
const ExplodedNode *N, bool TookTrue, bool IsAssuming);
- bool patternMatch(const Expr *Ex,
- const Expr *ParentEx,
- raw_ostream &Out,
- BugReporterContext &BRC,
- PathSensitiveBugReport &R,
- const ExplodedNode *N,
- Optional<bool> &prunable,
+ bool patternMatch(const Expr *Ex, const Expr *ParentEx, raw_ostream &Out,
+ BugReporterContext &BRC, PathSensitiveBugReport &R,
+ const ExplodedNode *N, std::optional<bool> &prunable,
bool IsSameFieldName);
static bool isPieceMessageGeneric(const PathDiagnosticPiece *Piece);
@@ -622,8 +629,118 @@ public:
PathSensitiveBugReport &R) override;
};
-} // namespace ento
+class ObjCMethodCall;
+class CXXConstructorCall;
+
+/// Put a diagnostic on return statement (or on } in its absence) of all inlined
+/// functions for which some property remained unchanged.
+/// Resulting diagnostics may read such as "Returning without writing to X".
+///
+/// Descendants can define what a "state change is", like a change of value
+/// to a memory region, liveness, etc. For function calls where the state did
+/// not change as defined, a custom note may be constructed.
+///
+/// For a minimal example, check out
+/// clang/unittests/StaticAnalyzer/NoStateChangeFuncVisitorTest.cpp.
+class NoStateChangeFuncVisitor : public BugReporterVisitor {
+private:
+ /// Frames modifying the state as defined in \c wasModifiedBeforeCallExit.
+ /// This visitor generates a note only if a function does *not* change the
+ /// state that way. This information is not immediately available
+ /// by looking at the node associated with the exit from the function
+ /// (usually the return statement). To avoid recomputing the same information
+ /// many times (going up the path for each node and checking whether the
+ /// region was written into) we instead lazily compute the stack frames
+ /// along the path.
+ // TODO: Can't we just use a map instead? This is likely not as cheap as it
+ // makes the code difficult to read.
+ llvm::SmallPtrSet<const StackFrameContext *, 32> FramesModifying;
+ llvm::SmallPtrSet<const StackFrameContext *, 32> FramesModifyingCalculated;
+
+ /// Check and lazily calculate whether the state is modified in the stack
+ /// frame to which \p CallExitBeginN belongs.
+ /// The calculation is cached in FramesModifying.
+ bool isModifiedInFrame(const ExplodedNode *CallExitBeginN);
+
+ void markFrameAsModifying(const StackFrameContext *SCtx);
+
+ /// Write to \c FramesModifying all stack frames along the path in the current
+ /// stack frame which modifies the state.
+ void findModifyingFrames(const ExplodedNode *const CallExitBeginN);
+
+protected:
+ bugreporter::TrackingKind TKind;
+
+ /// \return Whether the state was modified from the current node, \p CurrN, to
+ /// the end of the stack frame, at \p CallExitBeginN. \p CurrN and
+ /// \p CallExitBeginN are always in the same stack frame.
+ /// Clients should override this callback when a state change is important
+ /// not only on the entire function call, but inside of it as well.
+ /// Example: we may want to leave a note about the lack of locking/unlocking
+ /// on a particular mutex, but not if inside the function its state was
+ /// changed, but also restored. wasModifiedInFunction() wouldn't know of this
+ /// change.
+ virtual bool wasModifiedBeforeCallExit(const ExplodedNode *CurrN,
+ const ExplodedNode *CallExitBeginN) {
+ return false;
+ }
+
+ /// \return Whether the state was modified in the inlined function call in
+ /// between \p CallEnterN and \p CallExitEndN. Mind that the stack frame
+ /// retrieved from a CallEnterN and CallExitEndN is the *caller's* stack
+ /// frame! The inlined function's stack should be retrieved from either the
+ /// immediate successor to \p CallEnterN or immediate predecessor to
+ /// \p CallExitEndN.
+ /// Clients should override this function if a state changes local to the
+ /// inlined function are not interesting, only the change occuring as a
+ /// result of it.
+ /// Example: we want to leave a not about a leaked resource object not being
+ /// deallocated / its ownership changed inside a function, and we don't care
+ /// if it was assigned to a local variable (its change in ownership is
+ /// inconsequential).
+ virtual bool wasModifiedInFunction(const ExplodedNode *CallEnterN,
+ const ExplodedNode *CallExitEndN) {
+ return false;
+ }
+
+ /// Consume the information on the non-modifying stack frame in order to
+ /// either emit a note or not. May suppress the report entirely.
+ /// \return Diagnostics piece for the unmodified state in the current
+ /// function, if it decides to emit one. A good description might start with
+ /// "Returning without...".
+ virtual PathDiagnosticPieceRef
+ maybeEmitNoteForObjCSelf(PathSensitiveBugReport &R,
+ const ObjCMethodCall &Call,
+ const ExplodedNode *N) = 0;
+
+ /// Consume the information on the non-modifying stack frame in order to
+ /// either emit a note or not. May suppress the report entirely.
+ /// \return Diagnostics piece for the unmodified state in the current
+ /// function, if it decides to emit one. A good description might start with
+ /// "Returning without...".
+ virtual PathDiagnosticPieceRef
+ maybeEmitNoteForCXXThis(PathSensitiveBugReport &R,
+ const CXXConstructorCall &Call,
+ const ExplodedNode *N) = 0;
+
+ /// Consume the information on the non-modifying stack frame in order to
+ /// either emit a note or not. May suppress the report entirely.
+ /// \return Diagnostics piece for the unmodified state in the current
+ /// function, if it decides to emit one. A good description might start with
+ /// "Returning without...".
+ virtual PathDiagnosticPieceRef
+ maybeEmitNoteForParameters(PathSensitiveBugReport &R, const CallEvent &Call,
+ const ExplodedNode *N) = 0;
+
+public:
+ NoStateChangeFuncVisitor(bugreporter::TrackingKind TKind) : TKind(TKind) {}
+ PathDiagnosticPieceRef VisitNode(const ExplodedNode *N,
+ BugReporterContext &BR,
+ PathSensitiveBugReport &R) final;
+};
+
+} // namespace ento
} // namespace clang
#endif // LLVM_CLANG_STATICANALYZER_CORE_BUGREPORTER_BUGREPORTERVISITORS_H
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugSuppression.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugSuppression.h
new file mode 100644
index 000000000000..4fd81b627519
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugSuppression.h
@@ -0,0 +1,53 @@
+//===- BugSuppression.h - Suppression interface -----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines BugSuppression, a simple interface class encapsulating
+// all user provided in-code suppressions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_STATICANALYZER_CORE_BUGREPORTER_SUPPRESSION_H
+#define LLVM_CLANG_STATICANALYZER_CORE_BUGREPORTER_SUPPRESSION_H
+
+#include "clang/Basic/SourceLocation.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+
+namespace clang {
+class Decl;
+
+namespace ento {
+class BugReport;
+class PathDiagnosticLocation;
+
+class BugSuppression {
+public:
+ using DiagnosticIdentifierList = llvm::ArrayRef<llvm::StringRef>;
+
+ /// Return true if the given bug report was explicitly suppressed by the user.
+ bool isSuppressed(const BugReport &);
+
+ /// Return true if the bug reported at the given location was explicitly
+ /// suppressed by the user.
+ bool isSuppressed(const PathDiagnosticLocation &Location,
+ const Decl *DeclWithIssue,
+ DiagnosticIdentifierList DiagnosticIdentification);
+
+private:
+ // Overly pessimistic number, to be honest.
+ static constexpr unsigned EXPECTED_NUMBER_OF_SUPPRESSIONS = 8;
+ using CachedRanges =
+ llvm::SmallVector<SourceRange, EXPECTED_NUMBER_OF_SUPPRESSIONS>;
+
+ llvm::DenseMap<const Decl *, CachedRanges> CachedSuppressionLocations;
+};
+
+} // end namespace ento
+} // end namespace clang
+
+#endif // LLVM_CLANG_STATICANALYZER_CORE_BUGREPORTER_SUPPRESSION_H
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugType.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugType.h
index 49ab25eca2dd..e50afd6d0da7 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugType.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugType.h
@@ -23,8 +23,6 @@ namespace clang {
namespace ento {
class BugReporter;
-class ExplodedNode;
-class ExprEngine;
class BugType {
private:
@@ -37,13 +35,13 @@ private:
virtual void anchor();
public:
- BugType(CheckerNameRef CheckerName, StringRef Name, StringRef Cat,
- bool SuppressOnSink = false)
- : CheckerName(CheckerName), Description(Name), Category(Cat),
+ BugType(CheckerNameRef CheckerName, StringRef Desc,
+ StringRef Cat = categories::LogicError, bool SuppressOnSink = false)
+ : CheckerName(CheckerName), Description(Desc), Category(Cat),
Checker(nullptr), SuppressOnSink(SuppressOnSink) {}
- BugType(const CheckerBase *Checker, StringRef Name, StringRef Cat,
- bool SuppressOnSink = false)
- : CheckerName(Checker->getCheckerName()), Description(Name),
+ BugType(const CheckerBase *Checker, StringRef Desc,
+ StringRef Cat = categories::LogicError, bool SuppressOnSink = false)
+ : CheckerName(Checker->getCheckerName()), Description(Desc),
Category(Cat), Checker(Checker), SuppressOnSink(SuppressOnSink) {}
virtual ~BugType() = default;
@@ -66,27 +64,6 @@ public:
bool isSuppressOnSink() const { return SuppressOnSink; }
};
-class BuiltinBug : public BugType {
- const std::string desc;
- void anchor() override;
-public:
- BuiltinBug(class CheckerNameRef checker, const char *name,
- const char *description)
- : BugType(checker, name, categories::LogicError), desc(description) {}
-
- BuiltinBug(const CheckerBase *checker, const char *name,
- const char *description)
- : BugType(checker, name, categories::LogicError), desc(description) {}
-
- BuiltinBug(class CheckerNameRef checker, const char *name)
- : BugType(checker, name, categories::LogicError), desc(name) {}
-
- BuiltinBug(const CheckerBase *checker, const char *name)
- : BugType(checker, name, categories::LogicError), desc(name) {}
-
- StringRef getDescription() const { return desc; }
-};
-
} // namespace ento
} // end clang namespace
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/CommonBugCategories.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/CommonBugCategories.h
index 392bc484bf62..45187433c069 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/CommonBugCategories.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/BugReporter/CommonBugCategories.h
@@ -13,6 +13,7 @@
namespace clang {
namespace ento {
namespace categories {
+extern const char *const AppleAPIMisuse;
extern const char *const CoreFoundationObjectiveC;
extern const char *const LogicError;
extern const char *const MemoryRefCount;
@@ -22,6 +23,7 @@ extern const char *const CXXObjectLifecycle;
extern const char *const CXXMoveSemantics;
extern const char *const SecurityError;
extern const char *const UnusedCode;
+extern const char *const TaintedData;
} // namespace categories
} // namespace ento
} // namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/Checker.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/Checker.h
index fdba49664615..2ec54a837c42 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/Checker.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/Checker.h
@@ -193,9 +193,8 @@ public:
class Location {
template <typename CHECKER>
- static void _checkLocation(void *checker,
- const SVal &location, bool isLoad, const Stmt *S,
- CheckerContext &C) {
+ static void _checkLocation(void *checker, SVal location, bool isLoad,
+ const Stmt *S, CheckerContext &C) {
((const CHECKER *)checker)->checkLocation(location, isLoad, S, C);
}
@@ -209,8 +208,7 @@ public:
class Bind {
template <typename CHECKER>
- static void _checkBind(void *checker,
- const SVal &location, const SVal &val, const Stmt *S,
+ static void _checkBind(void *checker, SVal location, SVal val, const Stmt *S,
CheckerContext &C) {
((const CHECKER *)checker)->checkBind(location, val, S, C);
}
@@ -370,13 +368,12 @@ class PointerEscape {
Kind);
InvalidatedSymbols RegularEscape;
- for (InvalidatedSymbols::const_iterator I = Escaped.begin(),
- E = Escaped.end(); I != E; ++I)
- if (!ETraits->hasTrait(*I,
- RegionAndSymbolInvalidationTraits::TK_PreserveContents) &&
- !ETraits->hasTrait(*I,
- RegionAndSymbolInvalidationTraits::TK_SuppressEscape))
- RegularEscape.insert(*I);
+ for (SymbolRef Sym : Escaped)
+ if (!ETraits->hasTrait(
+ Sym, RegionAndSymbolInvalidationTraits::TK_PreserveContents) &&
+ !ETraits->hasTrait(
+ Sym, RegionAndSymbolInvalidationTraits::TK_SuppressEscape))
+ RegularEscape.insert(Sym);
if (RegularEscape.empty())
return State;
@@ -410,13 +407,13 @@ class ConstPointerEscape {
return State;
InvalidatedSymbols ConstEscape;
- for (InvalidatedSymbols::const_iterator I = Escaped.begin(),
- E = Escaped.end(); I != E; ++I)
- if (ETraits->hasTrait(*I,
- RegionAndSymbolInvalidationTraits::TK_PreserveContents) &&
- !ETraits->hasTrait(*I,
- RegionAndSymbolInvalidationTraits::TK_SuppressEscape))
- ConstEscape.insert(*I);
+ for (SymbolRef Sym : Escaped) {
+ if (ETraits->hasTrait(
+ Sym, RegionAndSymbolInvalidationTraits::TK_PreserveContents) &&
+ !ETraits->hasTrait(
+ Sym, RegionAndSymbolInvalidationTraits::TK_SuppressEscape))
+ ConstEscape.insert(Sym);
+ }
if (ConstEscape.empty())
return State;
@@ -457,10 +454,8 @@ namespace eval {
class Assume {
template <typename CHECKER>
- static ProgramStateRef _evalAssume(void *checker,
- ProgramStateRef state,
- const SVal &cond,
- bool assumption) {
+ static ProgramStateRef _evalAssume(void *checker, ProgramStateRef state,
+ SVal cond, bool assumption) {
return ((const CHECKER *)checker)->evalAssume(state, cond, assumption);
}
@@ -534,9 +529,9 @@ public:
template <typename EVENT>
class EventDispatcher {
- CheckerManager *Mgr;
+ CheckerManager *Mgr = nullptr;
public:
- EventDispatcher() : Mgr(nullptr) { }
+ EventDispatcher() = default;
template <typename CHECKER>
static void _register(CHECKER *checker, CheckerManager &mgr) {
@@ -563,18 +558,6 @@ struct ImplicitNullDerefEvent {
static int Tag;
};
-/// A helper class which wraps a boolean value set to false by default.
-///
-/// This class should behave exactly like 'bool' except that it doesn't need to
-/// be explicitly initialized.
-struct DefaultBool {
- bool val;
- DefaultBool() : val(false) {}
- /*implicit*/ operator bool&() { return val; }
- /*implicit*/ operator const bool&() const { return val; }
- DefaultBool &operator=(bool b) { val = b; return *this; }
-};
-
} // end ento namespace
} // end clang namespace
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/CheckerManager.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/CheckerManager.h
index d2f71baa56a4..a45ba1bc573e 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/CheckerManager.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/CheckerManager.h
@@ -28,7 +28,6 @@ namespace clang {
class AnalyzerOptions;
class CallExpr;
-class CXXNewExpr;
class Decl;
class LocationContext;
class Stmt;
@@ -154,7 +153,7 @@ public:
/// Constructs a CheckerManager without requiring an AST. No checker
/// registration will take place. Only useful when one needs to print the
- /// help flags through CheckerRegistryData, and the AST is unavalaible.
+ /// help flags through CheckerRegistryData, and the AST is unavailable.
CheckerManager(AnalyzerOptions &AOptions, const LangOptions &LangOpts,
DiagnosticsEngine &Diags, ArrayRef<std::string> plugins);
@@ -489,13 +488,11 @@ public:
using CheckCallFunc =
CheckerFn<void (const CallEvent &, CheckerContext &)>;
- using CheckLocationFunc =
- CheckerFn<void (const SVal &location, bool isLoad, const Stmt *S,
- CheckerContext &)>;
+ using CheckLocationFunc = CheckerFn<void(SVal location, bool isLoad,
+ const Stmt *S, CheckerContext &)>;
using CheckBindFunc =
- CheckerFn<void (const SVal &location, const SVal &val, const Stmt *S,
- CheckerContext &)>;
+ CheckerFn<void(SVal location, SVal val, const Stmt *S, CheckerContext &)>;
using CheckEndAnalysisFunc =
CheckerFn<void (ExplodedGraph &, BugReporter &, ExprEngine &)>;
@@ -531,8 +528,7 @@ public:
RegionAndSymbolInvalidationTraits *ITraits)>;
using EvalAssumeFunc =
- CheckerFn<ProgramStateRef (ProgramStateRef, const SVal &cond,
- bool assumption)>;
+ CheckerFn<ProgramStateRef(ProgramStateRef, SVal cond, bool assumption)>;
using EvalCallFunc = CheckerFn<bool (const CallEvent &, CheckerContext &)>;
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h
index 71a590d9e9a2..2694aac478cd 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h
@@ -20,7 +20,6 @@
namespace clang {
-class AnalyzerOptions;
class MacroExpansionContext;
class Preprocessor;
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/APSIntType.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/APSIntType.h
index 4b7d6054cd87..f1c50e721937 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/APSIntType.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/APSIntType.h
@@ -21,8 +21,8 @@ class APSIntType {
bool IsUnsigned;
public:
- APSIntType(uint32_t Width, bool Unsigned)
- : BitWidth(Width), IsUnsigned(Unsigned) {}
+ constexpr APSIntType(uint32_t Width, bool Unsigned)
+ : BitWidth(Width), IsUnsigned(Unsigned) {}
/* implicit */ APSIntType(const llvm::APSInt &Value)
: BitWidth(Value.getBitWidth()), IsUnsigned(Value.isUnsigned()) {}
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h
index bb598af68166..ec503b41b381 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h
@@ -34,7 +34,6 @@
namespace clang {
class CXXBaseSpecifier;
-class DeclaratorDecl;
namespace ento {
@@ -67,10 +66,14 @@ class LazyCompoundValData : public llvm::FoldingSetNode {
public:
LazyCompoundValData(const StoreRef &st, const TypedValueRegion *r)
: store(st), region(r) {
+ assert(r);
assert(NonLoc::isCompoundType(r->getValueType()));
}
+ /// It might return null.
const void *getStore() const { return store.getStore(); }
+
+ LLVM_ATTRIBUTE_RETURNS_NONNULL
const TypedValueRegion *getRegion() const { return region; }
static void Profile(llvm::FoldingSetNodeID& ID,
@@ -98,6 +101,8 @@ public:
llvm::ImmutableList<const CXXBaseSpecifier *> L);
void Profile(llvm::FoldingSetNodeID &ID) { Profile(ID, D, L); }
+
+ /// It might return null.
const NamedDecl *getDeclaratorDecl() const { return D; }
llvm::ImmutableList<const CXXBaseSpecifier *> getCXXBaseList() const {
@@ -147,9 +152,15 @@ public:
T = AT->getValueType();
}
- assert(T->isIntegralOrEnumerationType() || Loc::isLocType(T));
- return APSIntType(Ctx.getIntWidth(T),
- !T->isSignedIntegerOrEnumerationType());
+ if (T->isIntegralOrEnumerationType() || Loc::isLocType(T)) {
+ return APSIntType(Ctx.getIntWidth(T),
+ !T->isSignedIntegerOrEnumerationType());
+ } else {
+ // implicitly handle case of T->isFixedPointType()
+ return APSIntType(Ctx.getIntWidth(T), T->isUnsignedFixedPointType());
+ }
+
+ llvm_unreachable("Unsupported type in getAPSIntType!");
}
/// Convert - Create a new persistent APSInt with the same value as 'From'
@@ -221,14 +232,6 @@ public:
return getValue(0, Ctx.getTypeSize(T), true);
}
- const llvm::APSInt &getZeroWithPtrWidth(bool isUnsigned = true) {
- return getValue(0, Ctx.getTypeSize(Ctx.VoidPtrTy), isUnsigned);
- }
-
- const llvm::APSInt &getIntWithPtrWidth(uint64_t X, bool isUnsigned) {
- return getValue(X, Ctx.getTypeSize(Ctx.VoidPtrTy), isUnsigned);
- }
-
const llvm::APSInt &getTruthValue(bool b, QualType T) {
return getValue(b ? 1 : 0, Ctx.getIntWidth(T),
T->isUnsignedIntegerOrEnumerationType());
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h
new file mode 100644
index 000000000000..965838a4408c
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h
@@ -0,0 +1,257 @@
+//===- CallDescription.h - function/method call matching --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file This file defines a generic mechanism for matching for function and
+/// method calls of C, C++, and Objective-C languages. Instances of these
+/// classes are frequently used together with the CallEvent classes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_STATICANALYZER_CORE_PATHSENSITIVE_CALLDESCRIPTION_H
+#define LLVM_CLANG_STATICANALYZER_CORE_PATHSENSITIVE_CALLDESCRIPTION_H
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/Support/Compiler.h"
+#include <optional>
+#include <vector>
+
+namespace clang {
+class IdentifierInfo;
+} // namespace clang
+
+namespace clang {
+namespace ento {
+
+enum CallDescriptionFlags : unsigned {
+ CDF_None = 0,
+
+ /// Describes a C standard function that is sometimes implemented as a macro
+ /// that expands to a compiler builtin with some __builtin prefix.
+ /// The builtin may as well have a few extra arguments on top of the requested
+ /// number of arguments.
+ CDF_MaybeBuiltin = 1 << 0,
+};
+
+/// This class represents a description of a function call using the number of
+/// arguments and the name of the function.
+class CallDescription {
+ friend class CallEvent;
+ using MaybeCount = std::optional<unsigned>;
+
+ mutable std::optional<const IdentifierInfo *> II;
+ // The list of the qualified names used to identify the specified CallEvent,
+ // e.g. "{a, b}" represent the qualified names, like "a::b".
+ std::vector<std::string> QualifiedName;
+ MaybeCount RequiredArgs;
+ MaybeCount RequiredParams;
+ int Flags;
+
+public:
+ /// Constructs a CallDescription object.
+ ///
+ /// @param QualifiedName The list of the name qualifiers of the function that
+ /// will be matched. The user is allowed to skip any of the qualifiers.
+ /// For example, {"std", "basic_string", "c_str"} would match both
+ /// std::basic_string<...>::c_str() and std::__1::basic_string<...>::c_str().
+ ///
+ /// @param RequiredArgs The number of arguments that is expected to match a
+ /// call. Omit this parameter to match every occurrence of call with a given
+ /// name regardless the number of arguments.
+ CallDescription(CallDescriptionFlags Flags, ArrayRef<StringRef> QualifiedName,
+ MaybeCount RequiredArgs = std::nullopt,
+ MaybeCount RequiredParams = std::nullopt);
+
+ /// Construct a CallDescription with default flags.
+ CallDescription(ArrayRef<StringRef> QualifiedName,
+ MaybeCount RequiredArgs = std::nullopt,
+ MaybeCount RequiredParams = std::nullopt);
+
+ CallDescription(std::nullptr_t) = delete;
+
+ /// Get the name of the function that this object matches.
+ StringRef getFunctionName() const { return QualifiedName.back(); }
+
+ /// Get the qualified name parts in reversed order.
+ /// E.g. { "std", "vector", "data" } -> "vector", "std"
+ auto begin_qualified_name_parts() const {
+ return std::next(QualifiedName.rbegin());
+ }
+ auto end_qualified_name_parts() const { return QualifiedName.rend(); }
+
+ /// It's false, if and only if we expect a single identifier, such as
+ /// `getenv`. It's true for `std::swap`, or `my::detail::container::data`.
+ bool hasQualifiedNameParts() const { return QualifiedName.size() > 1; }
+
+ /// @name Matching CallDescriptions against a CallEvent
+ /// @{
+
+ /// Returns true if the CallEvent is a call to a function that matches
+ /// the CallDescription.
+ ///
+ /// \note This function is not intended to be used to match Obj-C method
+ /// calls.
+ bool matches(const CallEvent &Call) const;
+
+ /// Returns true whether the CallEvent matches on any of the CallDescriptions
+ /// supplied.
+ ///
+ /// \note This function is not intended to be used to match Obj-C method
+ /// calls.
+ friend bool matchesAny(const CallEvent &Call, const CallDescription &CD1) {
+ return CD1.matches(Call);
+ }
+
+ /// \copydoc clang::ento::CallDescription::matchesAny(const CallEvent &, const CallDescription &)
+ template <typename... Ts>
+ friend bool matchesAny(const CallEvent &Call, const CallDescription &CD1,
+ const Ts &...CDs) {
+ return CD1.matches(Call) || matchesAny(Call, CDs...);
+ }
+ /// @}
+
+ /// @name Matching CallDescriptions against a CallExpr
+ /// @{
+
+ /// Returns true if the CallExpr is a call to a function that matches the
+ /// CallDescription.
+ ///
+ /// When available, always prefer matching with a CallEvent! This function
+ /// exists only when that is not available, for example, when _only_
+ /// syntactic check is done on a piece of code.
+ ///
+ /// Also, StdLibraryFunctionsChecker::Signature is likely a better candicade
+ /// for syntactic only matching if you are writing a new checker. This is
+ /// handy if a CallDescriptionMap is already there.
+ ///
+ /// The function is imprecise because CallEvent may know path sensitive
+ /// information, such as the precise argument count (see comments for
+ /// CallEvent::getNumArgs), the called function if it was called through a
+ /// function pointer, and other information not available syntactically.
+ bool matchesAsWritten(const CallExpr &CE) const;
+
+ /// Returns true whether the CallExpr matches on any of the CallDescriptions
+ /// supplied.
+ ///
+ /// \note This function is not intended to be used to match Obj-C method
+ /// calls.
+ friend bool matchesAnyAsWritten(const CallExpr &CE,
+ const CallDescription &CD1) {
+ return CD1.matchesAsWritten(CE);
+ }
+
+ /// \copydoc clang::ento::CallDescription::matchesAnyAsWritten(const CallExpr &, const CallDescription &)
+ template <typename... Ts>
+ friend bool matchesAnyAsWritten(const CallExpr &CE,
+ const CallDescription &CD1,
+ const Ts &...CDs) {
+ return CD1.matchesAsWritten(CE) || matchesAnyAsWritten(CE, CDs...);
+ }
+ /// @}
+
+private:
+ bool matchesImpl(const FunctionDecl *Callee, size_t ArgCount,
+ size_t ParamCount) const;
+};
+
+/// An immutable map from CallDescriptions to arbitrary data. Provides a unified
+/// way for checkers to react on function calls.
+template <typename T> class CallDescriptionMap {
+ friend class CallDescriptionSet;
+
+ // Some call descriptions aren't easily hashable (eg., the ones with qualified
+ // names in which some sections are omitted), so let's put them
+ // in a simple vector and use linear lookup.
+ // TODO: Implement an actual map for fast lookup for "hashable" call
+ // descriptions (eg., the ones for C functions that just match the name).
+ std::vector<std::pair<CallDescription, T>> LinearMap;
+
+public:
+ CallDescriptionMap(
+ std::initializer_list<std::pair<CallDescription, T>> &&List)
+ : LinearMap(List) {}
+
+ template <typename InputIt>
+ CallDescriptionMap(InputIt First, InputIt Last) : LinearMap(First, Last) {}
+
+ ~CallDescriptionMap() = default;
+
+ // These maps are usually stored once per checker, so let's make sure
+ // we don't do redundant copies.
+ CallDescriptionMap(const CallDescriptionMap &) = delete;
+ CallDescriptionMap &operator=(const CallDescription &) = delete;
+
+ CallDescriptionMap(CallDescriptionMap &&) = default;
+ CallDescriptionMap &operator=(CallDescriptionMap &&) = default;
+
+ [[nodiscard]] const T *lookup(const CallEvent &Call) const {
+ // Slow path: linear lookup.
+ // TODO: Implement some sort of fast path.
+ for (const std::pair<CallDescription, T> &I : LinearMap)
+ if (I.first.matches(Call))
+ return &I.second;
+
+ return nullptr;
+ }
+
+ /// When available, always prefer lookup with a CallEvent! This function
+ /// exists only when that is not available, for example, when _only_
+ /// syntactic check is done on a piece of code.
+ ///
+ /// Also, StdLibraryFunctionsChecker::Signature is likely a better candicade
+ /// for syntactic only matching if you are writing a new checker. This is
+ /// handy if a CallDescriptionMap is already there.
+ ///
+ /// The function is imprecise because CallEvent may know path sensitive
+ /// information, such as the precise argument count (see comments for
+ /// CallEvent::getNumArgs), the called function if it was called through a
+ /// function pointer, and other information not available syntactically.
+ [[nodiscard]] const T *lookupAsWritten(const CallExpr &Call) const {
+ // Slow path: linear lookup.
+ // TODO: Implement some sort of fast path.
+ for (const std::pair<CallDescription, T> &I : LinearMap)
+ if (I.first.matchesAsWritten(Call))
+ return &I.second;
+
+ return nullptr;
+ }
+};
+
+/// An immutable set of CallDescriptions.
+/// Checkers can efficiently decide if a given CallEvent matches any
+/// CallDescription in the set.
+class CallDescriptionSet {
+ CallDescriptionMap<bool /*unused*/> Impl = {};
+
+public:
+ CallDescriptionSet(std::initializer_list<CallDescription> &&List);
+
+ CallDescriptionSet(const CallDescriptionSet &) = delete;
+ CallDescriptionSet &operator=(const CallDescription &) = delete;
+
+ [[nodiscard]] bool contains(const CallEvent &Call) const;
+
+ /// When available, always prefer lookup with a CallEvent! This function
+ /// exists only when that is not available, for example, when _only_
+ /// syntactic check is done on a piece of code.
+ ///
+ /// Also, StdLibraryFunctionsChecker::Signature is likely a better candicade
+ /// for syntactic only matching if you are writing a new checker. This is
+ /// handy if a CallDescriptionMap is already there.
+ ///
+ /// The function is imprecise because CallEvent may know path sensitive
+ /// information, such as the precise argument count (see comments for
+ /// CallEvent::getNumArgs), the called function if it was called through a
+ /// function pointer, and other information not available syntactically.
+ [[nodiscard]] bool containsAsWritten(const CallExpr &CE) const;
+};
+
+} // namespace ento
+} // namespace clang
+
+#endif // LLVM_CLANG_STATICANALYZER_CORE_PATHSENSITIVE_CALLDESCRIPTION_H
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h
index 060fff1a7407..0d36587484bf 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h
@@ -45,6 +45,7 @@
#include "llvm/Support/ErrorHandling.h"
#include <cassert>
#include <limits>
+#include <optional>
#include <utility>
namespace clang {
@@ -76,22 +77,24 @@ enum CallEventKind {
};
class CallEvent;
-class CallDescription;
-template<typename T = CallEvent>
+template <typename T = CallEvent>
class CallEventRef : public IntrusiveRefCntPtr<const T> {
public:
CallEventRef(const T *Call) : IntrusiveRefCntPtr<const T>(Call) {}
CallEventRef(const CallEventRef &Orig) : IntrusiveRefCntPtr<const T>(Orig) {}
+ // The copy assignment operator is defined as deleted pending further
+ // motivation.
+ CallEventRef &operator=(const CallEventRef &) = delete;
+
CallEventRef<T> cloneWithState(ProgramStateRef State) const {
return this->get()->template cloneWithState<T>(State);
}
// Allow implicit conversions to a superclass type, since CallEventRef
// behaves like a pointer-to-const.
- template <typename SuperT>
- operator CallEventRef<SuperT> () const {
+ template <typename SuperT> operator CallEventRef<SuperT>() const {
return this->get();
}
};
@@ -114,12 +117,18 @@ class RuntimeDefinition {
/// precise.
const MemRegion *R = nullptr;
+ /// A definition is foreign if it has been imported and newly created by the
+ /// ASTImporter. This can be true only if CTU is enabled.
+ const bool Foreign = false;
+
public:
RuntimeDefinition() = default;
- RuntimeDefinition(const Decl *InD): D(InD) {}
- RuntimeDefinition(const Decl *InD, const MemRegion *InR): D(InD), R(InR) {}
+ RuntimeDefinition(const Decl *InD) : D(InD) {}
+ RuntimeDefinition(const Decl *InD, bool Foreign) : D(InD), Foreign(Foreign) {}
+ RuntimeDefinition(const Decl *InD, const MemRegion *InR) : D(InD), R(InR) {}
const Decl *getDecl() { return D; }
+ bool isForeign() const { return Foreign; }
/// Check if the definition we have is precise.
/// If not, it is possible that the call dispatches to another definition at
@@ -148,6 +157,8 @@ private:
ProgramStateRef State;
const LocationContext *LCtx;
llvm::PointerUnion<const Expr *, const Decl *> Origin;
+ CFGBlock::ConstCFGElementRef ElemRef = {nullptr, 0};
+ mutable std::optional<bool> Foreign; // Set by CTU analysis.
protected:
// This is user data for subclasses.
@@ -169,16 +180,19 @@ private:
protected:
friend class CallEventManager;
- CallEvent(const Expr *E, ProgramStateRef state, const LocationContext *lctx)
- : State(std::move(state)), LCtx(lctx), Origin(E) {}
+ CallEvent(const Expr *E, ProgramStateRef state, const LocationContext *lctx,
+ CFGBlock::ConstCFGElementRef ElemRef)
+ : State(std::move(state)), LCtx(lctx), Origin(E), ElemRef(ElemRef) {}
- CallEvent(const Decl *D, ProgramStateRef state, const LocationContext *lctx)
- : State(std::move(state)), LCtx(lctx), Origin(D) {}
+ CallEvent(const Decl *D, ProgramStateRef state, const LocationContext *lctx,
+ CFGBlock::ConstCFGElementRef ElemRef)
+ : State(std::move(state)), LCtx(lctx), Origin(D), ElemRef(ElemRef) {}
// DO NOT MAKE PUBLIC
CallEvent(const CallEvent &Original)
: State(Original.State), LCtx(Original.LCtx), Origin(Original.Origin),
- Data(Original.Data), Location(Original.Location) {}
+ ElemRef(Original.ElemRef), Data(Original.Data),
+ Location(Original.Location) {}
/// Copies this CallEvent, with vtable intact, into a new block of memory.
virtual void cloneTo(void *Dest) const = 0;
@@ -192,8 +206,9 @@ protected:
/// Used to specify non-argument regions that will be invalidated as a
/// result of this call.
- virtual void getExtraInvalidatedValues(ValueList &Values,
- RegionAndSymbolInvalidationTraits *ETraits) const {}
+ virtual void
+ getExtraInvalidatedValues(ValueList &Values,
+ RegionAndSymbolInvalidationTraits *ETraits) const {}
public:
CallEvent &operator=(const CallEvent &) = delete;
@@ -209,14 +224,20 @@ public:
return Origin.dyn_cast<const Decl *>();
}
- /// The state in which the call is being evaluated.
- const ProgramStateRef &getState() const {
- return State;
+ bool isForeign() const {
+ assert(Foreign && "Foreign must be set before querying");
+ return *Foreign;
}
+ void setForeign(bool B) const { Foreign = B; }
+
+ /// The state in which the call is being evaluated.
+ const ProgramStateRef &getState() const { return State; }
/// The context in which the call is being evaluated.
- const LocationContext *getLocationContext() const {
- return LCtx;
+ const LocationContext *getLocationContext() const { return LCtx; }
+
+ const CFGBlock::ConstCFGElementRef &getCFGElementRef() const {
+ return ElemRef;
}
/// Returns the definition of the function or method that will be
@@ -245,7 +266,7 @@ public:
SourceLocation Loc = D->getLocation();
if (Loc.isValid()) {
const SourceManager &SM =
- getState()->getStateManager().getContext().getSourceManager();
+ getState()->getStateManager().getContext().getSourceManager();
return SM.isInSystemHeader(D->getLocation());
}
@@ -257,20 +278,6 @@ public:
return false;
}
- /// Returns true if the CallEvent is a call to a function that matches
- /// the CallDescription.
- ///
- /// Note that this function is not intended to be used to match Obj-C method
- /// calls.
- bool isCalled(const CallDescription &CD) const;
-
- /// Returns true whether the CallEvent is any of the CallDescriptions supplied
- /// as a parameter.
- template <typename FirstCallDesc, typename... CallDescs>
- bool isCalled(const FirstCallDesc &First, const CallDescs &... Rest) const {
- return isCalled(First) || isCalled(Rest...);
- }
-
/// Returns a source range for the entire call, suitable for
/// outputting in diagnostics.
virtual SourceRange getSourceRange() const {
@@ -313,9 +320,7 @@ public:
// NOTE: The exact semantics of this are still being defined!
// We don't really want a list of hardcoded exceptions in the long run,
// but we don't want duplicated lists of known APIs in the short term either.
- virtual bool argumentsMayEscape() const {
- return hasNonZeroCallbackArg();
- }
+ virtual bool argumentsMayEscape() const { return hasNonZeroCallbackArg(); }
/// Returns true if the callee is an externally-visible function in the
/// top-level namespace, such as \c malloc.
@@ -416,14 +421,15 @@ public:
bool isArgumentConstructedDirectly(unsigned Index) const {
// This assumes that the object was not yet removed from the state.
return ExprEngine::getObjectUnderConstruction(
- getState(), {getOriginExpr(), Index}, getLocationContext()).hasValue();
+ getState(), {getOriginExpr(), Index}, getLocationContext())
+ .has_value();
}
/// Some calls have parameter numbering mismatched from argument numbering.
/// This function converts an argument index to the corresponding
- /// parameter index. Returns None is the argument doesn't correspond
+ /// parameter index. Returns std::nullopt is the argument doesn't correspond
/// to any parameter variable.
- virtual Optional<unsigned>
+ virtual std::optional<unsigned>
getAdjustedParameterIndex(unsigned ASTArgumentIndex) const {
return ASTArgumentIndex;
}
@@ -442,7 +448,15 @@ public:
/// If the call returns a C++ record type then the region of its return value
/// can be retrieved from its construction context.
- Optional<SVal> getReturnValueUnderConstruction() const;
+ std::optional<SVal> getReturnValueUnderConstruction() const;
+
+ // Returns the CallEvent representing the caller of this function
+ const CallEventRef<> getCaller() const;
+
+ // Returns true if the function was called from a standard library function.
+ // If not or could not get the caller (it may be a top level function)
+ // returns false.
+ bool isCalledFromSystemHeader() const;
// Iterator access to formal parameters and their types.
private:
@@ -484,11 +498,13 @@ public:
class AnyFunctionCall : public CallEvent {
protected:
AnyFunctionCall(const Expr *E, ProgramStateRef St,
- const LocationContext *LCtx)
- : CallEvent(E, St, LCtx) {}
+ const LocationContext *LCtx,
+ CFGBlock::ConstCFGElementRef ElemRef)
+ : CallEvent(E, St, LCtx, ElemRef) {}
AnyFunctionCall(const Decl *D, ProgramStateRef St,
- const LocationContext *LCtx)
- : CallEvent(D, St, LCtx) {}
+ const LocationContext *LCtx,
+ CFGBlock::ConstCFGElementRef ElemRef)
+ : CallEvent(D, St, LCtx, ElemRef) {}
AnyFunctionCall(const AnyFunctionCall &Other) = default;
public:
@@ -521,8 +537,9 @@ class SimpleFunctionCall : public AnyFunctionCall {
protected:
SimpleFunctionCall(const CallExpr *CE, ProgramStateRef St,
- const LocationContext *LCtx)
- : AnyFunctionCall(CE, St, LCtx) {}
+ const LocationContext *LCtx,
+ CFGBlock::ConstCFGElementRef ElemRef)
+ : AnyFunctionCall(CE, St, LCtx, ElemRef) {}
SimpleFunctionCall(const SimpleFunctionCall &Other) = default;
void cloneTo(void *Dest) const override {
@@ -557,15 +574,16 @@ class BlockCall : public CallEvent {
friend class CallEventManager;
protected:
- BlockCall(const CallExpr *CE, ProgramStateRef St,
- const LocationContext *LCtx)
- : CallEvent(CE, St, LCtx) {}
+ BlockCall(const CallExpr *CE, ProgramStateRef St, const LocationContext *LCtx,
+ CFGBlock::ConstCFGElementRef ElemRef)
+ : CallEvent(CE, St, LCtx, ElemRef) {}
BlockCall(const BlockCall &Other) = default;
void cloneTo(void *Dest) const override { new (Dest) BlockCall(*this); }
- void getExtraInvalidatedValues(ValueList &Values,
- RegionAndSymbolInvalidationTraits *ETraits) const override;
+ void getExtraInvalidatedValues(
+ ValueList &Values,
+ RegionAndSymbolInvalidationTraits *ETraits) const override;
public:
const CallExpr *getOriginExpr() const override {
@@ -605,10 +623,9 @@ public:
const BlockDataRegion *BR = getBlockRegion();
assert(BR && "Block converted from lambda must have a block region");
- auto I = BR->referenced_vars_begin();
- assert(I != BR->referenced_vars_end());
-
- return I.getCapturedRegion();
+ auto ReferencedVars = BR->referenced_vars();
+ assert(!ReferencedVars.empty());
+ return ReferencedVars.begin().getCapturedRegion();
}
RuntimeDefinition getRuntimeDefinition() const override {
@@ -636,14 +653,12 @@ public:
// the block body and analyze the operator() method on the captured lambda.
const VarDecl *LambdaVD = getRegionStoringCapturedLambda()->getDecl();
const CXXRecordDecl *LambdaDecl = LambdaVD->getType()->getAsCXXRecordDecl();
- CXXMethodDecl* LambdaCallOperator = LambdaDecl->getLambdaCallOperator();
+ CXXMethodDecl *LambdaCallOperator = LambdaDecl->getLambdaCallOperator();
return RuntimeDefinition(LambdaCallOperator);
}
- bool argumentsMayEscape() const override {
- return true;
- }
+ bool argumentsMayEscape() const override { return true; }
void getInitialStackFrameContents(const StackFrameContext *CalleeCtx,
BindingsTy &Bindings) const override;
@@ -661,15 +676,18 @@ public:
class CXXInstanceCall : public AnyFunctionCall {
protected:
CXXInstanceCall(const CallExpr *CE, ProgramStateRef St,
- const LocationContext *LCtx)
- : AnyFunctionCall(CE, St, LCtx) {}
+ const LocationContext *LCtx,
+ CFGBlock::ConstCFGElementRef ElemRef)
+ : AnyFunctionCall(CE, St, LCtx, ElemRef) {}
CXXInstanceCall(const FunctionDecl *D, ProgramStateRef St,
- const LocationContext *LCtx)
- : AnyFunctionCall(D, St, LCtx) {}
+ const LocationContext *LCtx,
+ CFGBlock::ConstCFGElementRef ElemRef)
+ : AnyFunctionCall(D, St, LCtx, ElemRef) {}
CXXInstanceCall(const CXXInstanceCall &Other) = default;
- void getExtraInvalidatedValues(ValueList &Values,
- RegionAndSymbolInvalidationTraits *ETraits) const override;
+ void getExtraInvalidatedValues(
+ ValueList &Values,
+ RegionAndSymbolInvalidationTraits *ETraits) const override;
public:
/// Returns the expression representing the implicit 'this' object.
@@ -699,8 +717,9 @@ class CXXMemberCall : public CXXInstanceCall {
protected:
CXXMemberCall(const CXXMemberCallExpr *CE, ProgramStateRef St,
- const LocationContext *LCtx)
- : CXXInstanceCall(CE, St, LCtx) {}
+ const LocationContext *LCtx,
+ CFGBlock::ConstCFGElementRef ElemRef)
+ : CXXInstanceCall(CE, St, LCtx, ElemRef) {}
CXXMemberCall(const CXXMemberCall &Other) = default;
void cloneTo(void *Dest) const override { new (Dest) CXXMemberCall(*this); }
@@ -741,8 +760,9 @@ class CXXMemberOperatorCall : public CXXInstanceCall {
protected:
CXXMemberOperatorCall(const CXXOperatorCallExpr *CE, ProgramStateRef St,
- const LocationContext *LCtx)
- : CXXInstanceCall(CE, St, LCtx) {}
+ const LocationContext *LCtx,
+ CFGBlock::ConstCFGElementRef ElemRef)
+ : CXXInstanceCall(CE, St, LCtx, ElemRef) {}
CXXMemberOperatorCall(const CXXMemberOperatorCall &Other) = default;
void cloneTo(void *Dest) const override {
@@ -771,12 +791,13 @@ public:
return CA->getKind() == CE_CXXMemberOperator;
}
- Optional<unsigned>
+ std::optional<unsigned>
getAdjustedParameterIndex(unsigned ASTArgumentIndex) const override {
// For member operator calls argument 0 on the expression corresponds
// to implicit this-parameter on the declaration.
- return (ASTArgumentIndex > 0) ? Optional<unsigned>(ASTArgumentIndex - 1)
- : None;
+ return (ASTArgumentIndex > 0)
+ ? std::optional<unsigned>(ASTArgumentIndex - 1)
+ : std::nullopt;
}
unsigned getASTArgumentIndex(unsigned CallArgumentIndex) const override {
@@ -807,17 +828,26 @@ protected:
/// \param Target The object region to be destructed.
/// \param St The path-sensitive state at this point in the program.
/// \param LCtx The location context at this point in the program.
+ /// \param ElemRef The reference to this destructor in the CFG.
+ ///
+ /// FIXME: Eventually we want to drop \param Target and deduce it from
+ /// \param ElemRef. To do that we need to migrate the logic for target
+ /// region lookup from ExprEngine::ProcessImplicitDtor() and make it
+ /// independent from ExprEngine.
CXXDestructorCall(const CXXDestructorDecl *DD, const Stmt *Trigger,
const MemRegion *Target, bool IsBaseDestructor,
- ProgramStateRef St, const LocationContext *LCtx)
- : CXXInstanceCall(DD, St, LCtx) {
+ ProgramStateRef St, const LocationContext *LCtx,
+ CFGBlock::ConstCFGElementRef ElemRef)
+ : CXXInstanceCall(DD, St, LCtx, ElemRef) {
Data = DtorDataTy(Target, IsBaseDestructor).getOpaqueValue();
Location = Trigger->getEndLoc();
}
CXXDestructorCall(const CXXDestructorCall &Other) = default;
- void cloneTo(void *Dest) const override {new (Dest) CXXDestructorCall(*this);}
+ void cloneTo(void *Dest) const override {
+ new (Dest) CXXDestructorCall(*this);
+ }
public:
SourceRange getSourceRange() const override { return Location; }
@@ -846,15 +876,17 @@ public:
class AnyCXXConstructorCall : public AnyFunctionCall {
protected:
AnyCXXConstructorCall(const Expr *E, const MemRegion *Target,
- ProgramStateRef St, const LocationContext *LCtx)
- : AnyFunctionCall(E, St, LCtx) {
+ ProgramStateRef St, const LocationContext *LCtx,
+ CFGBlock::ConstCFGElementRef ElemRef)
+ : AnyFunctionCall(E, St, LCtx, ElemRef) {
assert(E && (isa<CXXConstructExpr>(E) || isa<CXXInheritedCtorInitExpr>(E)));
// Target may be null when the region is unknown.
Data = Target;
}
- void getExtraInvalidatedValues(ValueList &Values,
- RegionAndSymbolInvalidationTraits *ETraits) const override;
+ void getExtraInvalidatedValues(
+ ValueList &Values,
+ RegionAndSymbolInvalidationTraits *ETraits) const override;
void getInitialStackFrameContents(const StackFrameContext *CalleeCtx,
BindingsTy &Bindings) const override;
@@ -883,13 +915,20 @@ protected:
/// a new symbolic region will be used.
/// \param St The path-sensitive state at this point in the program.
/// \param LCtx The location context at this point in the program.
+ /// \param ElemRef The reference to this constructor in the CFG.
+ ///
+ /// FIXME: Eventually we want to drop \param Target and deduce it from
+ /// \param ElemRef.
CXXConstructorCall(const CXXConstructExpr *CE, const MemRegion *Target,
- ProgramStateRef St, const LocationContext *LCtx)
- : AnyCXXConstructorCall(CE, Target, St, LCtx) {}
+ ProgramStateRef St, const LocationContext *LCtx,
+ CFGBlock::ConstCFGElementRef ElemRef)
+ : AnyCXXConstructorCall(CE, Target, St, LCtx, ElemRef) {}
CXXConstructorCall(const CXXConstructorCall &Other) = default;
- void cloneTo(void *Dest) const override { new (Dest) CXXConstructorCall(*this); }
+ void cloneTo(void *Dest) const override {
+ new (Dest) CXXConstructorCall(*this);
+ }
public:
const CXXConstructExpr *getOriginExpr() const override {
@@ -940,8 +979,9 @@ class CXXInheritedConstructorCall : public AnyCXXConstructorCall {
protected:
CXXInheritedConstructorCall(const CXXInheritedCtorInitExpr *CE,
const MemRegion *Target, ProgramStateRef St,
- const LocationContext *LCtx)
- : AnyCXXConstructorCall(CE, Target, St, LCtx) {}
+ const LocationContext *LCtx,
+ CFGBlock::ConstCFGElementRef ElemRef)
+ : AnyCXXConstructorCall(CE, Target, St, LCtx, ElemRef) {}
CXXInheritedConstructorCall(const CXXInheritedConstructorCall &Other) =
default;
@@ -1002,11 +1042,14 @@ class CXXAllocatorCall : public AnyFunctionCall {
protected:
CXXAllocatorCall(const CXXNewExpr *E, ProgramStateRef St,
- const LocationContext *LCtx)
- : AnyFunctionCall(E, St, LCtx) {}
+ const LocationContext *LCtx,
+ CFGBlock::ConstCFGElementRef ElemRef)
+ : AnyFunctionCall(E, St, LCtx, ElemRef) {}
CXXAllocatorCall(const CXXAllocatorCall &Other) = default;
- void cloneTo(void *Dest) const override { new (Dest) CXXAllocatorCall(*this); }
+ void cloneTo(void *Dest) const override {
+ new (Dest) CXXAllocatorCall(*this);
+ }
public:
const CXXNewExpr *getOriginExpr() const override {
@@ -1018,9 +1061,8 @@ public:
}
SVal getObjectUnderConstruction() const {
- return ExprEngine::getObjectUnderConstruction(getState(), getOriginExpr(),
- getLocationContext())
- .getValue();
+ return *ExprEngine::getObjectUnderConstruction(getState(), getOriginExpr(),
+ getLocationContext());
}
/// Number of non-placement arguments to the call. It is equal to 2 for
@@ -1034,6 +1076,18 @@ public:
return getOriginExpr()->getNumPlacementArgs() + getNumImplicitArgs();
}
+ bool isArray() const { return getOriginExpr()->isArray(); }
+
+ std::optional<const clang::Expr *> getArraySizeExpr() const {
+ return getOriginExpr()->getArraySize();
+ }
+
+ SVal getArraySizeVal() const {
+ assert(isArray() && "The allocator call doesn't allocate and array!");
+
+ return getState()->getSVal(*getArraySizeExpr(), getLocationContext());
+ }
+
const Expr *getArgExpr(unsigned Index) const override {
// The first argument of an allocator call is the size of the allocation.
if (Index < getNumImplicitArgs())
@@ -1072,8 +1126,9 @@ class CXXDeallocatorCall : public AnyFunctionCall {
protected:
CXXDeallocatorCall(const CXXDeleteExpr *E, ProgramStateRef St,
- const LocationContext *LCtx)
- : AnyFunctionCall(E, St, LCtx) {}
+ const LocationContext *LCtx,
+ CFGBlock::ConstCFGElementRef ElemRef)
+ : AnyFunctionCall(E, St, LCtx, ElemRef) {}
CXXDeallocatorCall(const CXXDeallocatorCall &Other) = default;
void cloneTo(void *Dest) const override {
@@ -1108,11 +1163,7 @@ public:
//
// Note to maintainers: OCM_Message should always be last, since it does not
// need to fit in the Data field's low bits.
-enum ObjCMessageKind {
- OCM_PropertyAccess,
- OCM_Subscript,
- OCM_Message
-};
+enum ObjCMessageKind { OCM_PropertyAccess, OCM_Subscript, OCM_Message };
/// Represents any expression that calls an Objective-C method.
///
@@ -1124,8 +1175,9 @@ class ObjCMethodCall : public CallEvent {
protected:
ObjCMethodCall(const ObjCMessageExpr *Msg, ProgramStateRef St,
- const LocationContext *LCtx)
- : CallEvent(Msg, St, LCtx) {
+ const LocationContext *LCtx,
+ CFGBlock::ConstCFGElementRef ElemRef)
+ : CallEvent(Msg, St, LCtx, ElemRef) {
Data = nullptr;
}
@@ -1133,8 +1185,9 @@ protected:
void cloneTo(void *Dest) const override { new (Dest) ObjCMethodCall(*this); }
- void getExtraInvalidatedValues(ValueList &Values,
- RegionAndSymbolInvalidationTraits *ETraits) const override;
+ void getExtraInvalidatedValues(
+ ValueList &Values,
+ RegionAndSymbolInvalidationTraits *ETraits) const override;
/// Check if the selector may have multiple definitions (may have overrides).
virtual bool canBeOverridenInSubclass(ObjCInterfaceDecl *IDecl,
@@ -1149,9 +1202,7 @@ public:
return getOriginExpr()->getMethodDecl();
}
- unsigned getNumArgs() const override {
- return getOriginExpr()->getNumArgs();
- }
+ unsigned getNumArgs() const override { return getOriginExpr()->getNumArgs(); }
const Expr *getArgExpr(unsigned Index) const override {
return getOriginExpr()->getArg(Index);
@@ -1165,9 +1216,7 @@ public:
return getOriginExpr()->getMethodFamily();
}
- Selector getSelector() const {
- return getOriginExpr()->getSelector();
- }
+ Selector getSelector() const { return getOriginExpr()->getSelector(); }
SourceRange getSourceRange() const override;
@@ -1215,7 +1264,7 @@ public:
void getInitialStackFrameContents(const StackFrameContext *CalleeCtx,
BindingsTy &Bindings) const override;
- ArrayRef<ParmVarDecl*> parameters() const override;
+ ArrayRef<ParmVarDecl *> parameters() const override;
Kind getKind() const override { return CE_ObjCMessage; }
StringRef getKindAsString() const override { return "ObjCMethodCall"; }
@@ -1225,99 +1274,6 @@ public:
}
};
-enum CallDescriptionFlags : int {
- /// Describes a C standard function that is sometimes implemented as a macro
- /// that expands to a compiler builtin with some __builtin prefix.
- /// The builtin may as well have a few extra arguments on top of the requested
- /// number of arguments.
- CDF_MaybeBuiltin = 1 << 0,
-};
-
-/// This class represents a description of a function call using the number of
-/// arguments and the name of the function.
-class CallDescription {
- friend CallEvent;
-
- mutable IdentifierInfo *II = nullptr;
- mutable bool IsLookupDone = false;
- // The list of the qualified names used to identify the specified CallEvent,
- // e.g. "{a, b}" represent the qualified names, like "a::b".
- std::vector<const char *> QualifiedName;
- Optional<unsigned> RequiredArgs;
- Optional<size_t> RequiredParams;
- int Flags;
-
- // A constructor helper.
- static Optional<size_t> readRequiredParams(Optional<unsigned> RequiredArgs,
- Optional<size_t> RequiredParams) {
- if (RequiredParams)
- return RequiredParams;
- if (RequiredArgs)
- return static_cast<size_t>(*RequiredArgs);
- return None;
- }
-
-public:
- /// Constructs a CallDescription object.
- ///
- /// @param QualifiedName The list of the name qualifiers of the function that
- /// will be matched. The user is allowed to skip any of the qualifiers.
- /// For example, {"std", "basic_string", "c_str"} would match both
- /// std::basic_string<...>::c_str() and std::__1::basic_string<...>::c_str().
- ///
- /// @param RequiredArgs The number of arguments that is expected to match a
- /// call. Omit this parameter to match every occurrence of call with a given
- /// name regardless the number of arguments.
- CallDescription(int Flags, ArrayRef<const char *> QualifiedName,
- Optional<unsigned> RequiredArgs = None,
- Optional<size_t> RequiredParams = None)
- : QualifiedName(QualifiedName), RequiredArgs(RequiredArgs),
- RequiredParams(readRequiredParams(RequiredArgs, RequiredParams)),
- Flags(Flags) {}
-
- /// Construct a CallDescription with default flags.
- CallDescription(ArrayRef<const char *> QualifiedName,
- Optional<unsigned> RequiredArgs = None,
- Optional<size_t> RequiredParams = None)
- : CallDescription(0, QualifiedName, RequiredArgs, RequiredParams) {}
-
- /// Get the name of the function that this object matches.
- StringRef getFunctionName() const { return QualifiedName.back(); }
-};
-
-/// An immutable map from CallDescriptions to arbitrary data. Provides a unified
-/// way for checkers to react on function calls.
-template <typename T> class CallDescriptionMap {
- // Some call descriptions aren't easily hashable (eg., the ones with qualified
- // names in which some sections are omitted), so let's put them
- // in a simple vector and use linear lookup.
- // TODO: Implement an actual map for fast lookup for "hashable" call
- // descriptions (eg., the ones for C functions that just match the name).
- std::vector<std::pair<CallDescription, T>> LinearMap;
-
-public:
- CallDescriptionMap(
- std::initializer_list<std::pair<CallDescription, T>> &&List)
- : LinearMap(List) {}
-
- ~CallDescriptionMap() = default;
-
- // These maps are usually stored once per checker, so let's make sure
- // we don't do redundant copies.
- CallDescriptionMap(const CallDescriptionMap &) = delete;
- CallDescriptionMap &operator=(const CallDescription &) = delete;
-
- const T *lookup(const CallEvent &Call) const {
- // Slow path: linear lookup.
- // TODO: Implement some sort of fast path.
- for (const std::pair<CallDescription, T> &I : LinearMap)
- if (Call.isCalled(I.first))
- return &I.second;
-
- return nullptr;
- }
-};
-
/// Manages the lifetime of CallEvent objects.
///
/// CallEventManager provides a way to create arbitrary CallEvents "on the
@@ -1346,89 +1302,98 @@ class CallEventManager {
}
template <typename T, typename Arg>
- T *create(Arg A, ProgramStateRef St, const LocationContext *LCtx) {
+ T *create(Arg A, ProgramStateRef St, const LocationContext *LCtx,
+ CFGBlock::ConstCFGElementRef ElemRef) {
static_assert(sizeof(T) == sizeof(CallEventTemplateTy),
"CallEvent subclasses are not all the same size");
- return new (allocate()) T(A, St, LCtx);
+ return new (allocate()) T(A, St, LCtx, ElemRef);
}
template <typename T, typename Arg1, typename Arg2>
- T *create(Arg1 A1, Arg2 A2, ProgramStateRef St, const LocationContext *LCtx) {
+ T *create(Arg1 A1, Arg2 A2, ProgramStateRef St, const LocationContext *LCtx,
+ CFGBlock::ConstCFGElementRef ElemRef) {
static_assert(sizeof(T) == sizeof(CallEventTemplateTy),
"CallEvent subclasses are not all the same size");
- return new (allocate()) T(A1, A2, St, LCtx);
+ return new (allocate()) T(A1, A2, St, LCtx, ElemRef);
}
template <typename T, typename Arg1, typename Arg2, typename Arg3>
T *create(Arg1 A1, Arg2 A2, Arg3 A3, ProgramStateRef St,
- const LocationContext *LCtx) {
+ const LocationContext *LCtx, CFGBlock::ConstCFGElementRef ElemRef) {
static_assert(sizeof(T) == sizeof(CallEventTemplateTy),
"CallEvent subclasses are not all the same size");
- return new (allocate()) T(A1, A2, A3, St, LCtx);
+ return new (allocate()) T(A1, A2, A3, St, LCtx, ElemRef);
}
template <typename T, typename Arg1, typename Arg2, typename Arg3,
typename Arg4>
T *create(Arg1 A1, Arg2 A2, Arg3 A3, Arg4 A4, ProgramStateRef St,
- const LocationContext *LCtx) {
+ const LocationContext *LCtx, CFGBlock::ConstCFGElementRef ElemRef) {
static_assert(sizeof(T) == sizeof(CallEventTemplateTy),
"CallEvent subclasses are not all the same size");
- return new (allocate()) T(A1, A2, A3, A4, St, LCtx);
+ return new (allocate()) T(A1, A2, A3, A4, St, LCtx, ElemRef);
}
public:
CallEventManager(llvm::BumpPtrAllocator &alloc) : Alloc(alloc) {}
/// Gets an outside caller given a callee context.
- CallEventRef<>
- getCaller(const StackFrameContext *CalleeCtx, ProgramStateRef State);
+ CallEventRef<> getCaller(const StackFrameContext *CalleeCtx,
+ ProgramStateRef State);
/// Gets a call event for a function call, Objective-C method call,
- /// or a 'new' call.
- CallEventRef<>
- getCall(const Stmt *S, ProgramStateRef State,
- const LocationContext *LC);
+ /// a 'new', or a 'delete' call.
+ CallEventRef<> getCall(const Stmt *S, ProgramStateRef State,
+ const LocationContext *LC,
+ CFGBlock::ConstCFGElementRef ElemRef);
- CallEventRef<>
- getSimpleCall(const CallExpr *E, ProgramStateRef State,
- const LocationContext *LCtx);
+ CallEventRef<> getSimpleCall(const CallExpr *E, ProgramStateRef State,
+ const LocationContext *LCtx,
+ CFGBlock::ConstCFGElementRef ElemRef);
CallEventRef<ObjCMethodCall>
getObjCMethodCall(const ObjCMessageExpr *E, ProgramStateRef State,
- const LocationContext *LCtx) {
- return create<ObjCMethodCall>(E, State, LCtx);
+ const LocationContext *LCtx,
+ CFGBlock::ConstCFGElementRef ElemRef) {
+ return create<ObjCMethodCall>(E, State, LCtx, ElemRef);
}
CallEventRef<CXXConstructorCall>
getCXXConstructorCall(const CXXConstructExpr *E, const MemRegion *Target,
- ProgramStateRef State, const LocationContext *LCtx) {
- return create<CXXConstructorCall>(E, Target, State, LCtx);
+ ProgramStateRef State, const LocationContext *LCtx,
+ CFGBlock::ConstCFGElementRef ElemRef) {
+ return create<CXXConstructorCall>(E, Target, State, LCtx, ElemRef);
}
CallEventRef<CXXInheritedConstructorCall>
getCXXInheritedConstructorCall(const CXXInheritedCtorInitExpr *E,
const MemRegion *Target, ProgramStateRef State,
- const LocationContext *LCtx) {
- return create<CXXInheritedConstructorCall>(E, Target, State, LCtx);
+ const LocationContext *LCtx,
+ CFGBlock::ConstCFGElementRef ElemRef) {
+ return create<CXXInheritedConstructorCall>(E, Target, State, LCtx, ElemRef);
}
CallEventRef<CXXDestructorCall>
getCXXDestructorCall(const CXXDestructorDecl *DD, const Stmt *Trigger,
const MemRegion *Target, bool IsBase,
- ProgramStateRef State, const LocationContext *LCtx) {
- return create<CXXDestructorCall>(DD, Trigger, Target, IsBase, State, LCtx);
+ ProgramStateRef State, const LocationContext *LCtx,
+ CFGBlock::ConstCFGElementRef ElemRef) {
+ return create<CXXDestructorCall>(DD, Trigger, Target, IsBase, State, LCtx,
+ ElemRef);
}
CallEventRef<CXXAllocatorCall>
getCXXAllocatorCall(const CXXNewExpr *E, ProgramStateRef State,
- const LocationContext *LCtx) {
- return create<CXXAllocatorCall>(E, State, LCtx);
+ const LocationContext *LCtx,
+ CFGBlock::ConstCFGElementRef ElemRef) {
+ return create<CXXAllocatorCall>(E, State, LCtx, ElemRef);
}
CallEventRef<CXXDeallocatorCall>
getCXXDeallocatorCall(const CXXDeleteExpr *E, ProgramStateRef State,
- const LocationContext *LCtx) {
- return create<CXXDeallocatorCall>(E, State, LCtx);
+ const LocationContext *LCtx,
+ CFGBlock::ConstCFGElementRef ElemRef) {
+ return create<CXXDeallocatorCall>(E, State, LCtx, ElemRef);
}
};
@@ -1470,11 +1435,10 @@ inline void CallEvent::Release() const {
namespace llvm {
// Support isa<>, cast<>, and dyn_cast<> for CallEventRef.
-template<class T> struct simplify_type< clang::ento::CallEventRef<T>> {
+template <class T> struct simplify_type<clang::ento::CallEventRef<T>> {
using SimpleType = const T *;
- static SimpleType
- getSimplifiedValue(clang::ento::CallEventRef<T> Val) {
+ static SimpleType getSimplifiedValue(clang::ento::CallEventRef<T> Val) {
return Val.get();
}
};
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h
index a383012dc351..9923c41e6ad2 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h
@@ -16,6 +16,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include <optional>
namespace clang {
namespace ento {
@@ -84,6 +85,8 @@ public:
return Eng.getContext();
}
+ const ASTContext &getASTContext() const { return Eng.getContext(); }
+
const LangOptions &getLangOpts() const {
return Eng.getContext().getLangOpts();
}
@@ -137,7 +140,7 @@ public:
/// example, for finding variables that the given symbol was assigned to.
static const MemRegion *getLocationRegionIfPostStore(const ExplodedNode *N) {
ProgramPoint L = N->getLocation();
- if (Optional<PostStore> PSL = L.getAs<PostStore>())
+ if (std::optional<PostStore> PSL = L.getAs<PostStore>())
return reinterpret_cast<const MemRegion*>(PSL->getLocationValue());
return nullptr;
}
@@ -210,6 +213,22 @@ public:
}
/// Generate a transition to a node that will be used to report
+ /// an error. This node will be a sink. That is, it will stop exploration of
+ /// the given path.
+ ///
+ /// @param State The state of the generated node.
+ /// @param Pred The transition will be generated from the specified Pred node
+ /// to the newly generated node.
+ /// @param Tag The tag to uniquely identify the creation site. If null,
+ /// the default tag for the checker will be used.
+ ExplodedNode *generateErrorNode(ProgramStateRef State,
+ ExplodedNode *Pred,
+ const ProgramPointTag *Tag = nullptr) {
+ return generateSink(State, Pred,
+ (Tag ? Tag : Location.getTag()));
+ }
+
+ /// Generate a transition to a node that will be used to report
/// an error. This node will not be a sink. That is, exploration will
/// continue along this path.
///
@@ -254,6 +273,7 @@ public:
/// @param IsPrunable Whether the note is prunable. It allows BugReporter
/// to omit the note from the report if it would make the displayed
/// bug path significantly shorter.
+ LLVM_ATTRIBUTE_RETURNS_NONNULL
const NoteTag *getNoteTag(NoteTag::Callback &&Cb, bool IsPrunable = false) {
return Eng.getDataTags().make<NoteTag>(std::move(Cb), IsPrunable);
}
@@ -296,8 +316,8 @@ public:
/// bug path significantly shorter.
const NoteTag *getNoteTag(StringRef Note, bool IsPrunable = false) {
return getNoteTag(
- [Note](BugReporterContext &,
- PathSensitiveBugReport &) { return std::string(Note); },
+ [Note = std::string(Note)](BugReporterContext &,
+ PathSensitiveBugReport &) { return Note; },
IsPrunable);
}
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h
index a81d67ab3063..65982457ad83 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h
@@ -16,7 +16,7 @@
#include "clang/AST/OperationKinds.h"
#include "clang/AST/Stmt.h"
#include "clang/Basic/OperatorKinds.h"
-#include "llvm/ADT/Optional.h"
+#include <optional>
#include <tuple>
namespace clang {
@@ -24,7 +24,6 @@ namespace clang {
class Expr;
class VarDecl;
class QualType;
-class AttributedType;
class Preprocessor;
namespace ento {
@@ -68,8 +67,9 @@ Nullability getNullabilityAnnotation(QualType Type);
/// Try to parse the value of a defined preprocessor macro. We can only parse
/// simple expressions that consist of an optional minus sign token and then a
-/// token for an integer. If we cannot parse the value then None is returned.
-llvm::Optional<int> tryExpandAsInteger(StringRef Macro, const Preprocessor &PP);
+/// token for an integer. If we cannot parse the value then std::nullopt is
+/// returned.
+std::optional<int> tryExpandAsInteger(StringRef Macro, const Preprocessor &PP);
class OperatorKind {
union {
@@ -88,7 +88,7 @@ public:
return Op.Bin;
}
- Optional<BinaryOperatorKind> GetBinaryOp() const {
+ std::optional<BinaryOperatorKind> GetBinaryOp() const {
if (IsBinary)
return Op.Bin;
return {};
@@ -100,7 +100,7 @@ public:
return Op.Un;
}
- Optional<UnaryOperatorKind> GetUnaryOp() const {
+ std::optional<UnaryOperatorKind> GetUnaryOp() const {
if (!IsBinary)
return Op.Un;
return {};
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ConstraintManager.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ConstraintManager.h
index 335536b6a310..4de04bc4d397 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ConstraintManager.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ConstraintManager.h
@@ -17,9 +17,9 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState_Fwd.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymExpr.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/Support/SaveAndRestore.h"
#include <memory>
+#include <optional>
#include <utility>
namespace llvm {
@@ -36,7 +36,7 @@ class ExprEngine;
class SymbolReaper;
class ConditionTruthVal {
- Optional<bool> Val;
+ std::optional<bool> Val;
public:
/// Construct a ConditionTruthVal indicating the constraint is constrained
@@ -53,25 +53,17 @@ public:
}
/// Return true if the constraint is perfectly constrained to 'true'.
- bool isConstrainedTrue() const {
- return Val.hasValue() && Val.getValue();
- }
+ bool isConstrainedTrue() const { return Val && *Val; }
/// Return true if the constraint is perfectly constrained to 'false'.
- bool isConstrainedFalse() const {
- return Val.hasValue() && !Val.getValue();
- }
+ bool isConstrainedFalse() const { return Val && !*Val; }
/// Return true if the constrained is perfectly constrained.
- bool isConstrained() const {
- return Val.hasValue();
- }
+ bool isConstrained() const { return Val.has_value(); }
/// Return true if the constrained is underconstrained and we do not know
/// if the constraint is true of value.
- bool isUnderconstrained() const {
- return !Val.hasValue();
- }
+ bool isUnderconstrained() const { return !Val.has_value(); }
};
class ConstraintManager {
@@ -82,77 +74,58 @@ public:
virtual bool haveEqualConstraints(ProgramStateRef S1,
ProgramStateRef S2) const = 0;
- virtual ProgramStateRef assume(ProgramStateRef state,
- DefinedSVal Cond,
- bool Assumption) = 0;
+ ProgramStateRef assume(ProgramStateRef state, DefinedSVal Cond,
+ bool Assumption);
using ProgramStatePair = std::pair<ProgramStateRef, ProgramStateRef>;
/// Returns a pair of states (StTrue, StFalse) where the given condition is
/// assumed to be true or false, respectively.
- ProgramStatePair assumeDual(ProgramStateRef State, DefinedSVal Cond) {
- ProgramStateRef StTrue = assume(State, Cond, true);
-
- // If StTrue is infeasible, asserting the falseness of Cond is unnecessary
- // because the existing constraints already establish this.
- if (!StTrue) {
-#ifdef EXPENSIVE_CHECKS
- assert(assume(State, Cond, false) && "System is over constrained.");
-#endif
- return ProgramStatePair((ProgramStateRef)nullptr, State);
- }
-
- ProgramStateRef StFalse = assume(State, Cond, false);
- if (!StFalse) {
- // We are careful to return the original state, /not/ StTrue,
- // because we want to avoid having callers generate a new node
- // in the ExplodedGraph.
- return ProgramStatePair(State, (ProgramStateRef)nullptr);
- }
-
- return ProgramStatePair(StTrue, StFalse);
- }
-
- virtual ProgramStateRef assumeInclusiveRange(ProgramStateRef State,
- NonLoc Value,
- const llvm::APSInt &From,
- const llvm::APSInt &To,
- bool InBound) = 0;
-
- virtual ProgramStatePair assumeInclusiveRangeDual(ProgramStateRef State,
- NonLoc Value,
- const llvm::APSInt &From,
- const llvm::APSInt &To) {
- ProgramStateRef StInRange =
- assumeInclusiveRange(State, Value, From, To, true);
-
- // If StTrue is infeasible, asserting the falseness of Cond is unnecessary
- // because the existing constraints already establish this.
- if (!StInRange)
- return ProgramStatePair((ProgramStateRef)nullptr, State);
-
- ProgramStateRef StOutOfRange =
- assumeInclusiveRange(State, Value, From, To, false);
- if (!StOutOfRange) {
- // We are careful to return the original state, /not/ StTrue,
- // because we want to avoid having callers generate a new node
- // in the ExplodedGraph.
- return ProgramStatePair(State, (ProgramStateRef)nullptr);
- }
-
- return ProgramStatePair(StInRange, StOutOfRange);
- }
+ /// (Note that these two states might be equal if the parent state turns out
+ /// to be infeasible. This may happen if the underlying constraint solver is
+ /// not perfectly precise and this may happen very rarely.)
+ ProgramStatePair assumeDual(ProgramStateRef State, DefinedSVal Cond);
+
+ ProgramStateRef assumeInclusiveRange(ProgramStateRef State, NonLoc Value,
+ const llvm::APSInt &From,
+ const llvm::APSInt &To, bool InBound);
+
+ /// Returns a pair of states (StInRange, StOutOfRange) where the given value
+ /// is assumed to be in the range or out of the range, respectively.
+ /// (Note that these two states might be equal if the parent state turns out
+ /// to be infeasible. This may happen if the underlying constraint solver is
+ /// not perfectly precise and this may happen very rarely.)
+ ProgramStatePair assumeInclusiveRangeDual(ProgramStateRef State, NonLoc Value,
+ const llvm::APSInt &From,
+ const llvm::APSInt &To);
/// If a symbol is perfectly constrained to a constant, attempt
/// to return the concrete value.
///
/// Note that a ConstraintManager is not obligated to return a concretized
/// value for a symbol, even if it is perfectly constrained.
+ /// It might return null.
virtual const llvm::APSInt* getSymVal(ProgramStateRef state,
SymbolRef sym) const {
return nullptr;
}
+ /// Attempt to return the minimal possible value for a given symbol. Note
+ /// that a ConstraintManager is not obligated to return a lower bound, it may
+ /// also return nullptr.
+ virtual const llvm::APSInt *getSymMinVal(ProgramStateRef state,
+ SymbolRef sym) const {
+ return nullptr;
+ }
+
+ /// Attempt to return the minimal possible value for a given symbol. Note
+ /// that a ConstraintManager is not obligated to return a lower bound, it may
+ /// also return nullptr.
+ virtual const llvm::APSInt *getSymMaxVal(ProgramStateRef state,
+ SymbolRef sym) const {
+ return nullptr;
+ }
+
/// Scan all symbols referenced by the constraints. If the symbol is not
/// alive, remove it.
virtual ProgramStateRef removeDeadBindings(ProgramStateRef state,
@@ -162,22 +135,38 @@ public:
const char *NL, unsigned int Space,
bool IsDot) const = 0;
+ virtual void printValue(raw_ostream &Out, ProgramStateRef State,
+ SymbolRef Sym) {}
+
/// Convenience method to query the state to see if a symbol is null or
/// not null, or if neither assumption can be made.
ConditionTruthVal isNull(ProgramStateRef State, SymbolRef Sym) {
- SaveAndRestore<bool> DisableNotify(NotifyAssumeClients, false);
-
return checkNull(State, Sym);
}
protected:
- /// A flag to indicate that clients should be notified of assumptions.
- /// By default this is the case, but sometimes this needs to be restricted
- /// to avoid infinite recursions within the ConstraintManager.
- ///
- /// Note that this flag allows the ConstraintManager to be re-entrant,
- /// but not thread-safe.
- bool NotifyAssumeClients = true;
+ /// A helper class to simulate the call stack of nested assume calls.
+ class AssumeStackTy {
+ public:
+ void push(const ProgramState *S) { Aux.push_back(S); }
+ void pop() { Aux.pop_back(); }
+ bool contains(const ProgramState *S) const {
+ return llvm::is_contained(Aux, S);
+ }
+
+ private:
+ llvm::SmallVector<const ProgramState *, 4> Aux;
+ };
+ AssumeStackTy AssumeStack;
+
+ virtual ProgramStateRef assumeInternal(ProgramStateRef state,
+ DefinedSVal Cond, bool Assumption) = 0;
+
+ virtual ProgramStateRef assumeInclusiveRangeInternal(ProgramStateRef State,
+ NonLoc Value,
+ const llvm::APSInt &From,
+ const llvm::APSInt &To,
+ bool InBound) = 0;
/// canReasonAbout - Not all ConstraintManagers can accurately reason about
/// all SVal values. This method returns true if the ConstraintManager can
@@ -189,6 +178,10 @@ protected:
/// Returns whether or not a symbol is known to be null ("true"), known to be
/// non-null ("false"), or may be either ("underconstrained").
virtual ConditionTruthVal checkNull(ProgramStateRef State, SymbolRef Sym);
+
+ template <typename AssumeFunction>
+ ProgramStatePair assumeDualImpl(ProgramStateRef &State,
+ AssumeFunction &Assume);
};
std::unique_ptr<ConstraintManager>
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h
index 9898b9b42f4b..8dbe767cef9d 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h
@@ -25,6 +25,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState_Fwd.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/WorkList.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Casting.h"
#include <cassert>
#include <memory>
@@ -78,6 +79,7 @@ private:
/// worklist algorithm. It is up to the implementation of WList to decide
/// the order that nodes are processed.
std::unique_ptr<WorkList> WList;
+ std::unique_ptr<WorkList> CTUWList;
/// BCounterFactory - A factory object for created BlockCounter objects.
/// These are used to record for key nodes in the ExplodedGraph the
@@ -101,6 +103,8 @@ private:
/// tags.
DataTag::Factory DataTags;
+ void setBlockCounter(BlockCounter C);
+
void generateNode(const ProgramPoint &Loc,
ProgramStateRef State,
ExplodedNode *Pred);
@@ -170,22 +174,13 @@ public:
}
WorkList *getWorkList() const { return WList.get(); }
+ WorkList *getCTUWorkList() const { return CTUWList.get(); }
- BlocksExhausted::const_iterator blocks_exhausted_begin() const {
- return blocksExhausted.begin();
+ auto exhausted_blocks() const {
+ return llvm::iterator_range(blocksExhausted);
}
- BlocksExhausted::const_iterator blocks_exhausted_end() const {
- return blocksExhausted.end();
- }
-
- BlocksAborted::const_iterator blocks_aborted_begin() const {
- return blocksAborted.begin();
- }
-
- BlocksAborted::const_iterator blocks_aborted_end() const {
- return blocksAborted.end();
- }
+ auto aborted_blocks() const { return llvm::iterator_range(blocksAborted); }
/// Enqueue the given set of nodes onto the work list.
void enqueue(ExplodedNodeSet &Set);
@@ -210,8 +205,14 @@ struct NodeBuilderContext {
const CFGBlock *Block;
const LocationContext *LC;
+ NodeBuilderContext(const CoreEngine &E, const CFGBlock *B,
+ const LocationContext *L)
+ : Eng(E), Block(B), LC(L) {
+ assert(B);
+ }
+
NodeBuilderContext(const CoreEngine &E, const CFGBlock *B, ExplodedNode *N)
- : Eng(E), Block(B), LC(N->getLocationContext()) { assert(B); }
+ : NodeBuilderContext(E, B, N->getLocationContext()) {}
/// Return the CFGBlock associated with this builder.
const CFGBlock *getBlock() const { return Block; }
@@ -290,7 +291,9 @@ public:
ExplodedNode *generateNode(const ProgramPoint &PP,
ProgramStateRef State,
ExplodedNode *Pred) {
- return generateNodeImpl(PP, State, Pred, false);
+ return generateNodeImpl(
+ PP, State, Pred,
+ /*MarkAsSink=*/State->isPosteriorlyOverconstrained());
}
/// Generates a sink in the ExplodedGraph.
@@ -495,6 +498,11 @@ public:
iterator(CFGBlock::const_succ_iterator i) : I(i) {}
public:
+ // This isn't really a conventional iterator.
+ // We just implement the deref as a no-op for now to make range-based for
+ // loops work.
+ const iterator &operator*() const { return *this; }
+
iterator &operator++() { ++I; return *this; }
bool operator!=(const iterator &X) const { return I != X.I; }
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicExtent.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicExtent.h
index cfd7aa9664b6..50d5d254415a 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicExtent.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicExtent.h
@@ -53,6 +53,11 @@ ProgramStateRef setDynamicExtent(ProgramStateRef State, const MemRegion *MR,
/// (bufptr) // extent is unknown
SVal getDynamicExtentWithOffset(ProgramStateRef State, SVal BufV);
+/// \returns The stored element count of the region represented by a symbolic
+/// value \p BufV.
+DefinedOrUnknownSVal getDynamicElementCountWithOffset(ProgramStateRef State,
+ SVal BufV, QualType Ty);
+
} // namespace ento
} // namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicType.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicType.h
index ffe1fe846be1..52d1526b1acf 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicType.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicType.h
@@ -22,8 +22,6 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState_Fwd.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
-#include "llvm/ADT/ImmutableMap.h"
-#include "llvm/ADT/Optional.h"
namespace clang {
namespace ento {
@@ -32,6 +30,7 @@ namespace ento {
DynamicTypeInfo getDynamicTypeInfo(ProgramStateRef State, const MemRegion *MR);
/// Get raw dynamic type information for the region \p MR.
+/// It might return null.
const DynamicTypeInfo *getRawDynamicTypeInfo(ProgramStateRef State,
const MemRegion *MR);
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicTypeInfo.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicTypeInfo.h
index 6d2b495dc0f5..3ff453a8de4f 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicTypeInfo.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicTypeInfo.h
@@ -18,7 +18,7 @@ namespace ento {
/// of a region in a given state along the analysis path.
class DynamicTypeInfo {
public:
- DynamicTypeInfo() : DynTy(QualType()) {}
+ DynamicTypeInfo() {}
DynamicTypeInfo(QualType Ty, bool CanBeSub = true)
: DynTy(Ty), CanBeASubClass(CanBeSub) {}
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h
index e87772c04b9b..2fb05ac46e8f 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h
@@ -30,14 +30,15 @@
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/GraphTraits.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Compiler.h"
#include <cassert>
#include <cstdint>
#include <memory>
+#include <optional>
#include <utility>
#include <vector>
@@ -161,15 +162,13 @@ public:
return getLocationContext()->getParentMap();
}
- template <typename T>
- T &getAnalysis() const {
+ template <typename T> T &getAnalysis() const {
return *getLocationContext()->getAnalysis<T>();
}
const ProgramStateRef &getState() const { return State; }
- template <typename T>
- Optional<T> getLocationAs() const LLVM_LVALUE_FUNCTION {
+ template <typename T> std::optional<T> getLocationAs() const & {
return Location.getAs<T>();
}
@@ -397,13 +396,9 @@ public:
using node_iterator = AllNodesTy::iterator;
using const_node_iterator = AllNodesTy::const_iterator;
- node_iterator nodes_begin() { return Nodes.begin(); }
+ llvm::iterator_range<node_iterator> nodes() { return Nodes; }
- node_iterator nodes_end() { return Nodes.end(); }
-
- const_node_iterator nodes_begin() const { return Nodes.begin(); }
-
- const_node_iterator nodes_end() const { return Nodes.end(); }
+ llvm::iterator_range<const_node_iterator> nodes() const { return Nodes; }
roots_iterator roots_begin() { return Roots.begin(); }
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h
index cef7dda172f3..ed5c4adb5e3d 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h
@@ -36,6 +36,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/WorkList.h"
#include "llvm/ADT/ArrayRef.h"
#include <cassert>
+#include <optional>
#include <utility>
namespace clang {
@@ -77,13 +78,9 @@ namespace ento {
class AnalysisManager;
class BasicValueFactory;
-class BlockCounter;
-class BranchNodeBuilder;
class CallEvent;
class CheckerManager;
class ConstraintManager;
-class CXXTempObjectRegion;
-class EndOfFunctionNodeBuilder;
class ExplodedNodeSet;
class ExplodedNode;
class IndirectGotoNodeBuilder;
@@ -139,6 +136,7 @@ public:
private:
cross_tu::CrossTranslationUnitContext &CTU;
+ bool IsCTUEnabled;
AnalysisManager &AMgr;
@@ -231,10 +229,15 @@ public:
const Stmt *getStmt() const;
- void GenerateAutoTransition(ExplodedNode *N);
- void enqueueEndOfPath(ExplodedNodeSet &S);
- void GenerateCallExitNode(ExplodedNode *N);
+ const LocationContext *getRootLocationContext() const {
+ assert(G.roots_begin() != G.roots_end());
+ return (*G.roots_begin())->getLocation().getLocationContext();
+ }
+ CFGBlock::ConstCFGElementRef getCFGElementRef() const {
+ const CFGBlock *blockPtr = currBldrCtx ? currBldrCtx->getBlock() : nullptr;
+ return {blockPtr, currStmtIdx};
+ }
/// Dump graph to the specified filename.
/// If filename is empty, generate a temporary one.
@@ -358,13 +361,13 @@ public:
void processSwitch(SwitchNodeBuilder& builder);
/// Called by CoreEngine. Used to notify checkers that processing a
- /// function has begun. Called for both inlined and and top-level functions.
+ /// function has begun. Called for both inlined and top-level functions.
void processBeginOfFunction(NodeBuilderContext &BC,
ExplodedNode *Pred, ExplodedNodeSet &Dst,
const BlockEdge &L);
/// Called by CoreEngine. Used to notify checkers that processing a
- /// function has ended. Called for both inlined and and top-level functions.
+ /// function has ended. Called for both inlined and top-level functions.
void processEndOfFunction(NodeBuilderContext& BC,
ExplodedNode *Pred,
const ReturnStmt *RS = nullptr);
@@ -442,6 +445,10 @@ public:
/// other functions that handle specific kinds of statements.
void Visit(const Stmt *S, ExplodedNode *Pred, ExplodedNodeSet &Dst);
+ /// VisitArrayInitLoopExpr - Transfer function for array init loop.
+ void VisitArrayInitLoopExpr(const ArrayInitLoopExpr *Ex, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst);
+
/// VisitArraySubscriptExpr - Transfer function for array accesses.
void VisitArraySubscriptExpr(const ArraySubscriptExpr *Ex,
ExplodedNode *Pred,
@@ -589,51 +596,40 @@ public:
static std::pair<const ProgramPointTag *, const ProgramPointTag *>
geteagerlyAssumeBinOpBifurcationTags();
- SVal evalMinus(SVal X) {
- return X.isValid() ? svalBuilder.evalMinus(X.castAs<NonLoc>()) : X;
- }
-
- SVal evalComplement(SVal X) {
- return X.isValid() ? svalBuilder.evalComplement(X.castAs<NonLoc>()) : X;
- }
-
ProgramStateRef handleLValueBitCast(ProgramStateRef state, const Expr *Ex,
const LocationContext *LCtx, QualType T,
QualType ExTy, const CastExpr *CastE,
StmtNodeBuilder &Bldr,
ExplodedNode *Pred);
- ProgramStateRef handleLVectorSplat(ProgramStateRef state,
- const LocationContext *LCtx,
- const CastExpr *CastE,
- StmtNodeBuilder &Bldr,
- ExplodedNode *Pred);
-
- void handleUOExtension(ExplodedNodeSet::iterator I,
- const UnaryOperator* U,
+ void handleUOExtension(ExplodedNode *N, const UnaryOperator *U,
StmtNodeBuilder &Bldr);
public:
- SVal evalBinOp(ProgramStateRef state, BinaryOperator::Opcode op,
- NonLoc L, NonLoc R, QualType T) {
- return svalBuilder.evalBinOpNN(state, op, L, R, T);
- }
-
- SVal evalBinOp(ProgramStateRef state, BinaryOperator::Opcode op,
- NonLoc L, SVal R, QualType T) {
- return R.isValid() ? svalBuilder.evalBinOpNN(state, op, L,
- R.castAs<NonLoc>(), T) : R;
- }
-
SVal evalBinOp(ProgramStateRef ST, BinaryOperator::Opcode Op,
SVal LHS, SVal RHS, QualType T) {
return svalBuilder.evalBinOp(ST, Op, LHS, RHS, T);
}
+ /// Retreives which element is being constructed in a non-POD type array.
+ static std::optional<unsigned>
+ getIndexOfElementToConstruct(ProgramStateRef State, const CXXConstructExpr *E,
+ const LocationContext *LCtx);
+
+ /// Retreives which element is being destructed in a non-POD type array.
+ static std::optional<unsigned>
+ getPendingArrayDestruction(ProgramStateRef State,
+ const LocationContext *LCtx);
+
+ /// Retreives the size of the array in the pending ArrayInitLoopExpr.
+ static std::optional<unsigned>
+ getPendingInitLoop(ProgramStateRef State, const CXXConstructExpr *E,
+ const LocationContext *LCtx);
+
/// By looking at a certain item that may be potentially part of an object's
/// ConstructionContext, retrieve such object's location. A particular
/// statement can be transparently passed as \p Item in most cases.
- static Optional<SVal>
+ static std::optional<SVal>
getObjectUnderConstruction(ProgramStateRef State,
const ConstructionContextItem &Item,
const LocationContext *LC);
@@ -721,10 +717,20 @@ public:
/// fully implemented it sometimes indicates that it failed via its
/// out-parameter CallOpts; in such cases a fake temporary region is
/// returned, which is better than nothing but does not represent
- /// the actual behavior of the program.
- SVal computeObjectUnderConstruction(
- const Expr *E, ProgramStateRef State, const LocationContext *LCtx,
- const ConstructionContext *CC, EvalCallOptions &CallOpts);
+ /// the actual behavior of the program. The Idx parameter is used if we
+ /// construct an array of objects. In that case it points to the index
+ /// of the continuous memory region.
+ /// E.g.:
+ /// For `int arr[4]` this index can be 0,1,2,3.
+ /// For `int arr2[3][3]` this index can be 0,1,...,7,8.
+ /// A multi-dimensional array is also a continuous memory location in a
+ /// row major order, so for arr[0][0] Idx is 0 and for arr[2][2] Idx is 8.
+ SVal computeObjectUnderConstruction(const Expr *E, ProgramStateRef State,
+ const NodeBuilderContext *BldrCtx,
+ const LocationContext *LCtx,
+ const ConstructionContext *CC,
+ EvalCallOptions &CallOpts,
+ unsigned Idx = 0);
/// Update the program state with all the path-sensitive information
/// that's necessary to perform construction of an object with a given
@@ -738,11 +744,15 @@ public:
/// A convenient wrapper around computeObjectUnderConstruction
/// and updateObjectsUnderConstruction.
std::pair<ProgramStateRef, SVal> handleConstructionContext(
- const Expr *E, ProgramStateRef State, const LocationContext *LCtx,
- const ConstructionContext *CC, EvalCallOptions &CallOpts) {
- SVal V = computeObjectUnderConstruction(E, State, LCtx, CC, CallOpts);
- return std::make_pair(
- updateObjectsUnderConstruction(V, E, State, LCtx, CC, CallOpts), V);
+ const Expr *E, ProgramStateRef State, const NodeBuilderContext *BldrCtx,
+ const LocationContext *LCtx, const ConstructionContext *CC,
+ EvalCallOptions &CallOpts, unsigned Idx = 0) {
+
+ SVal V = computeObjectUnderConstruction(E, State, BldrCtx, LCtx, CC,
+ CallOpts, Idx);
+ State = updateObjectsUnderConstruction(V, E, State, LCtx, CC, CallOpts);
+
+ return std::make_pair(State, V);
}
private:
@@ -751,15 +761,6 @@ private:
void finishArgumentConstruction(ExplodedNodeSet &Dst, ExplodedNode *Pred,
const CallEvent &Call);
- void evalLoadCommon(ExplodedNodeSet &Dst,
- const Expr *NodeEx, /* Eventually will be a CFGStmt */
- const Expr *BoundEx,
- ExplodedNode *Pred,
- ProgramStateRef St,
- SVal location,
- const ProgramPointTag *tag,
- QualType LoadTy);
-
void evalLocation(ExplodedNodeSet &Dst,
const Stmt *NodeEx, /* This will eventually be a CFGStmt */
const Stmt *BoundEx,
@@ -809,8 +810,46 @@ private:
const ExplodedNode *Pred,
const EvalCallOptions &CallOpts = {});
- bool inlineCall(const CallEvent &Call, const Decl *D, NodeBuilder &Bldr,
- ExplodedNode *Pred, ProgramStateRef State);
+ /// Checks whether our policies allow us to inline a non-POD type array
+ /// construction.
+ bool shouldInlineArrayConstruction(const ProgramStateRef State,
+ const CXXConstructExpr *CE,
+ const LocationContext *LCtx);
+
+ /// Checks whether our policies allow us to inline a non-POD type array
+ /// destruction.
+ /// \param Size The size of the array.
+ bool shouldInlineArrayDestruction(uint64_t Size);
+
+ /// Prepares the program state for array destruction. If no error happens
+ /// the function binds a 'PendingArrayDestruction' entry to the state, which
+ /// it returns along with the index. If any error happens (we fail to read
+ /// the size, the index would be -1, etc.) the function will return the
+ /// original state along with an index of 0. The actual element count of the
+ /// array can be accessed by the optional 'ElementCountVal' parameter. \param
+ /// State The program state. \param Region The memory region where the array
+ /// is stored. \param ElementTy The type an element in the array. \param LCty
+ /// The location context. \param ElementCountVal A pointer to an optional
+ /// SVal. If specified, the size of the array will be returned in it. It can
+ /// be Unknown.
+ std::pair<ProgramStateRef, uint64_t> prepareStateForArrayDestruction(
+ const ProgramStateRef State, const MemRegion *Region,
+ const QualType &ElementTy, const LocationContext *LCtx,
+ SVal *ElementCountVal = nullptr);
+
+ /// Checks whether we construct an array of non-POD type, and decides if the
+ /// constructor should be inkoved once again.
+ bool shouldRepeatCtorCall(ProgramStateRef State, const CXXConstructExpr *E,
+ const LocationContext *LCtx);
+
+ void inlineCall(WorkList *WList, const CallEvent &Call, const Decl *D,
+ NodeBuilder &Bldr, ExplodedNode *Pred, ProgramStateRef State);
+
+ void ctuBifurcate(const CallEvent &Call, const Decl *D, NodeBuilder &Bldr,
+ ExplodedNode *Pred, ProgramStateRef State);
+
+ /// Returns true if the CTU analysis is running its second phase.
+ bool isSecondPhaseCTU() { return IsCTUEnabled && !Engine.getCTUWorkList(); }
/// Conservatively evaluate call by invalidating regions and binding
/// a conjured return value.
@@ -845,7 +884,7 @@ private:
const Expr *InitWithAdjustments, const Expr *Result = nullptr,
const SubRegion **OutRegionWithAdjustments = nullptr);
- /// Returns a region representing the first element of a (possibly
+ /// Returns a region representing the `Idx`th element of a (possibly
/// multi-dimensional) array, for the purposes of element construction or
/// destruction.
///
@@ -853,15 +892,8 @@ private:
///
/// If the type is not an array type at all, the original value is returned.
/// Otherwise the "IsArray" flag is set.
- static SVal makeZeroElementRegion(ProgramStateRef State, SVal LValue,
- QualType &Ty, bool &IsArray);
-
- /// For a DeclStmt or CXXInitCtorInitializer, walk backward in the current CFG
- /// block to find the constructor expression that directly constructed into
- /// the storage for this statement. Returns null if the constructor for this
- /// statement created a temporary object region rather than directly
- /// constructing into an existing region.
- const CXXConstructExpr *findDirectConstructorForCurrentCFGElement();
+ static SVal makeElementRegion(ProgramStateRef State, SVal LValue,
+ QualType &Ty, bool &IsArray, unsigned Idx = 0);
/// Common code that handles either a CXXConstructExpr or a
/// CXXInheritedCtorInitExpr.
@@ -872,19 +904,56 @@ public:
/// Note whether this loop has any more iteratios to model. These methods are
/// essentially an interface for a GDM trait. Further reading in
/// ExprEngine::VisitObjCForCollectionStmt().
- LLVM_NODISCARD static ProgramStateRef
+ [[nodiscard]] static ProgramStateRef
setWhetherHasMoreIteration(ProgramStateRef State,
const ObjCForCollectionStmt *O,
const LocationContext *LC, bool HasMoreIteraton);
- LLVM_NODISCARD static ProgramStateRef
+ [[nodiscard]] static ProgramStateRef
removeIterationState(ProgramStateRef State, const ObjCForCollectionStmt *O,
const LocationContext *LC);
- LLVM_NODISCARD static bool hasMoreIteration(ProgramStateRef State,
- const ObjCForCollectionStmt *O,
- const LocationContext *LC);
+ [[nodiscard]] static bool hasMoreIteration(ProgramStateRef State,
+ const ObjCForCollectionStmt *O,
+ const LocationContext *LC);
+
private:
+ /// Assuming we construct an array of non-POD types, this method allows us
+ /// to store which element is to be constructed next.
+ static ProgramStateRef
+ setIndexOfElementToConstruct(ProgramStateRef State, const CXXConstructExpr *E,
+ const LocationContext *LCtx, unsigned Idx);
+
+ static ProgramStateRef
+ removeIndexOfElementToConstruct(ProgramStateRef State,
+ const CXXConstructExpr *E,
+ const LocationContext *LCtx);
+
+ /// Assuming we destruct an array of non-POD types, this method allows us
+ /// to store which element is to be destructed next.
+ static ProgramStateRef setPendingArrayDestruction(ProgramStateRef State,
+ const LocationContext *LCtx,
+ unsigned Idx);
+
+ static ProgramStateRef
+ removePendingArrayDestruction(ProgramStateRef State,
+ const LocationContext *LCtx);
+
+ /// Sets the size of the array in a pending ArrayInitLoopExpr.
+ static ProgramStateRef setPendingInitLoop(ProgramStateRef State,
+ const CXXConstructExpr *E,
+ const LocationContext *LCtx,
+ unsigned Idx);
+
+ static ProgramStateRef removePendingInitLoop(ProgramStateRef State,
+ const CXXConstructExpr *E,
+ const LocationContext *LCtx);
+
+ static ProgramStateRef
+ removeStateTraitsUsedForArrayEvaluation(ProgramStateRef State,
+ const CXXConstructExpr *E,
+ const LocationContext *LCtx);
+
/// Store the location of a C++ object corresponding to a statement
/// until the statement is actually encountered. For example, if a DeclStmt
/// has CXXConstructExpr as its initializer, the object would be considered
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/FunctionSummary.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/FunctionSummary.h
index 53b4bf605871..3ee0d229cfc2 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/FunctionSummary.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/FunctionSummary.h
@@ -17,11 +17,10 @@
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
-#include "llvm/ADT/None.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallBitVector.h"
#include <cassert>
#include <deque>
+#include <optional>
#include <utility>
namespace clang {
@@ -86,11 +85,11 @@ public:
markShouldNotInline(D);
}
- Optional<bool> mayInline(const Decl *D) {
+ std::optional<bool> mayInline(const Decl *D) {
MapTy::const_iterator I = Map.find(D);
if (I != Map.end() && I->second.InlineChecked)
return I->second.MayInline;
- return None;
+ return std::nullopt;
}
void markVisitedBasicBlock(unsigned ID, const Decl* D, unsigned TotalIDs) {
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/LoopUnrolling.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/LoopUnrolling.h
index 53b221cb53c9..eb2b0b343428 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/LoopUnrolling.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/LoopUnrolling.h
@@ -28,7 +28,6 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
namespace clang {
namespace ento {
-class AnalysisManager;
/// Returns if the given State indicates that is inside a completely unrolled
/// loop.
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h
index 9f85347db5df..151d3e57c1cb 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h
@@ -30,13 +30,14 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/SymExpr.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/FoldingSet.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Casting.h"
#include <cassert>
#include <cstdint>
#include <limits>
+#include <optional>
#include <string>
#include <utility>
@@ -74,6 +75,7 @@ public:
RegionOffset() = default;
RegionOffset(const MemRegion *r, int64_t off) : R(r), Offset(off) {}
+ /// It might return null.
const MemRegion *getRegion() const { return R; }
bool hasSymbolicOffset() const { return Offset == Symbolic; }
@@ -101,7 +103,7 @@ public:
private:
const Kind kind;
- mutable Optional<RegionOffset> cachedOffset;
+ mutable std::optional<RegionOffset> cachedOffset;
protected:
MemRegion(Kind k) : kind(k) {}
@@ -114,26 +116,27 @@ public:
virtual MemRegionManager &getMemRegionManager() const = 0;
- const MemSpaceRegion *getMemorySpace() const;
+ LLVM_ATTRIBUTE_RETURNS_NONNULL const MemSpaceRegion *getMemorySpace() const;
- const MemRegion *getBaseRegion() const;
+ LLVM_ATTRIBUTE_RETURNS_NONNULL const MemRegion *getBaseRegion() const;
/// Recursively retrieve the region of the most derived class instance of
/// regions of C++ base class instances.
+ LLVM_ATTRIBUTE_RETURNS_NONNULL
const MemRegion *getMostDerivedObjectRegion() const;
/// Check if the region is a subregion of the given region.
/// Each region is a subregion of itself.
virtual bool isSubRegionOf(const MemRegion *R) const;
+ LLVM_ATTRIBUTE_RETURNS_NONNULL
const MemRegion *StripCasts(bool StripBaseAndDerivedCasts = true) const;
/// If this is a symbolic region, returns the region. Otherwise,
/// goes up the base chain looking for the first symbolic base region.
+ /// It might return null.
const SymbolicRegion *getSymbolicBase() const;
- bool hasGlobalsOrParametersStorage() const;
-
bool hasStackStorage() const;
bool hasStackNonParametersStorage() const;
@@ -169,7 +172,8 @@ public:
Kind getKind() const { return kind; }
template<typename RegionTy> const RegionTy* getAs() const;
- template<typename RegionTy> const RegionTy* castAs() const;
+ template <typename RegionTy>
+ LLVM_ATTRIBUTE_RETURNS_NONNULL const RegionTy *castAs() const;
virtual bool isBoundable() const { return false; }
@@ -268,6 +272,7 @@ public:
void dumpToStream(raw_ostream &os) const override;
+ LLVM_ATTRIBUTE_RETURNS_NONNULL
const CodeTextRegion *getCodeRegion() const { return CR; }
static bool classof(const MemRegion *R) {
@@ -391,6 +396,7 @@ protected:
}
public:
+ LLVM_ATTRIBUTE_RETURNS_NONNULL
const StackFrameContext *getStackFrame() const { return SFC; }
void Profile(llvm::FoldingSetNodeID &ID) const override;
@@ -444,6 +450,7 @@ protected:
}
public:
+ LLVM_ATTRIBUTE_RETURNS_NONNULL
const MemRegion* getSuperRegion() const {
return superRegion;
}
@@ -481,6 +488,7 @@ class AllocaRegion : public SubRegion {
unsigned Cnt, const MemRegion *superRegion);
public:
+ LLVM_ATTRIBUTE_RETURNS_NONNULL
const Expr *getExpr() const { return Ex; }
bool isBoundable() const override { return true; }
@@ -639,10 +647,12 @@ public:
return locTy;
}
+ LLVM_ATTRIBUTE_RETURNS_NONNULL
const BlockDecl *getDecl() const {
return BD;
}
+ LLVM_ATTRIBUTE_RETURNS_NONNULL
AnalysisDeclContext *getAnalysisDeclContext() const { return AC; }
void dumpToStream(raw_ostream &os) const override;
@@ -674,6 +684,7 @@ class BlockDataRegion : public TypedRegion {
: TypedRegion(sreg, BlockDataRegionKind), BC(bc), LC(lc),
BlockCount(count) {
assert(bc);
+ assert(bc->getDecl());
assert(lc);
assert(isa<GlobalImmutableSpaceRegion>(sreg) ||
isa<StackLocalsSpaceRegion>(sreg) ||
@@ -685,8 +696,10 @@ class BlockDataRegion : public TypedRegion {
const MemRegion *);
public:
+ LLVM_ATTRIBUTE_RETURNS_NONNULL
const BlockCodeRegion *getCodeRegion() const { return BC; }
+ LLVM_ATTRIBUTE_RETURNS_NONNULL
const BlockDecl *getDecl() const { return BC->getDecl(); }
QualType getLocationType() const override { return BC->getLocationType(); }
@@ -700,10 +713,12 @@ public:
const MemRegion * const *originalR)
: R(r), OriginalR(originalR) {}
+ LLVM_ATTRIBUTE_RETURNS_NONNULL
const VarRegion *getCapturedRegion() const {
return cast<VarRegion>(*R);
}
+ LLVM_ATTRIBUTE_RETURNS_NONNULL
const VarRegion *getOriginalRegion() const {
return cast<VarRegion>(*OriginalR);
}
@@ -723,14 +738,20 @@ public:
++OriginalR;
return *this;
}
+
+ // This isn't really a conventional iterator.
+ // We just implement the deref as a no-op for now to make range-based for
+ // loops work.
+ const referenced_vars_iterator &operator*() const { return *this; }
};
/// Return the original region for a captured region, if
- /// one exists.
+ /// one exists. It might return null.
const VarRegion *getOriginalRegion(const VarRegion *VR) const;
referenced_vars_iterator referenced_vars_begin() const;
referenced_vars_iterator referenced_vars_end() const;
+ llvm::iterator_range<referenced_vars_iterator> referenced_vars() const;
void dumpToStream(raw_ostream &os) const override;
@@ -764,12 +785,26 @@ class SymbolicRegion : public SubRegion {
assert(s->getType()->isAnyPointerType() ||
s->getType()->isReferenceType() ||
s->getType()->isBlockPointerType());
- assert(isa<UnknownSpaceRegion>(sreg) || isa<HeapSpaceRegion>(sreg));
+ assert(isa<UnknownSpaceRegion>(sreg) || isa<HeapSpaceRegion>(sreg) ||
+ isa<GlobalSystemSpaceRegion>(sreg));
}
public:
+ /// It might return null.
SymbolRef getSymbol() const { return sym; }
+ /// Gets the type of the wrapped symbol.
+ /// This type might not be accurate at all times - it's just our best guess.
+ /// Consider these cases:
+ /// void foo(void *data, char *str, base *obj) {...}
+ /// The type of the pointee of `data` is of course not `void`, yet that's our
+ /// best guess. `str` might point to any object and `obj` might point to some
+ /// derived instance. `TypedRegions` other hand are representing the cases
+ /// when we actually know their types.
+ QualType getPointeeStaticType() const {
+ return sym->getType()->getPointeeType();
+ }
+
bool isBoundable() const override { return true; }
void Profile(llvm::FoldingSetNodeID& ID) const override;
@@ -801,6 +836,7 @@ class StringRegion : public TypedValueRegion {
const MemRegion *superRegion);
public:
+ LLVM_ATTRIBUTE_RETURNS_NONNULL
const StringLiteral *getStringLiteral() const { return Str; }
QualType getValueType() const override { return Str->getType(); }
@@ -835,6 +871,7 @@ class ObjCStringRegion : public TypedValueRegion {
const MemRegion *superRegion);
public:
+ LLVM_ATTRIBUTE_RETURNS_NONNULL
const ObjCStringLiteral *getObjCStringLiteral() const { return Str; }
QualType getValueType() const override { return Str->getType(); }
@@ -881,6 +918,7 @@ public:
void dumpToStream(raw_ostream &os) const override;
+ LLVM_ATTRIBUTE_RETURNS_NONNULL
const CompoundLiteralExpr *getLiteralExpr() const { return CL; }
static bool classof(const MemRegion* R) {
@@ -895,6 +933,7 @@ protected:
}
public:
+ // TODO what does this return?
virtual const ValueDecl *getDecl() const = 0;
static bool classof(const MemRegion* R) {
@@ -918,8 +957,10 @@ protected:
}
public:
+ // TODO what does this return?
const VarDecl *getDecl() const override = 0;
+ /// It might return null.
const StackFrameContext *getStackFrame() const;
QualType getValueType() const override {
@@ -947,6 +988,7 @@ class NonParamVarRegion : public VarRegion {
// which, unlike everything else on this list, are not memory spaces.
assert(isa<GlobalsSpaceRegion>(sReg) || isa<StackSpaceRegion>(sReg) ||
isa<BlockDataRegion>(sReg) || isa<UnknownSpaceRegion>(sReg));
+ assert(vd);
}
static void ProfileRegion(llvm::FoldingSetNodeID &ID, const VarDecl *VD,
@@ -955,6 +997,7 @@ class NonParamVarRegion : public VarRegion {
public:
void Profile(llvm::FoldingSetNodeID &ID) const override;
+ LLVM_ATTRIBUTE_RETURNS_NONNULL
const VarDecl *getDecl() const override { return VD; }
QualType getValueType() const override {
@@ -992,12 +1035,14 @@ class ParamVarRegion : public VarRegion {
ParamVarRegion(const Expr *OE, unsigned Idx, const MemRegion *SReg)
: VarRegion(SReg, ParamVarRegionKind), OriginExpr(OE), Index(Idx) {
assert(!cast<StackSpaceRegion>(SReg)->getStackFrame()->inTopFrame());
+ assert(OriginExpr);
}
static void ProfileRegion(llvm::FoldingSetNodeID &ID, const Expr *OE,
unsigned Idx, const MemRegion *SReg);
public:
+ LLVM_ATTRIBUTE_RETURNS_NONNULL
const Expr *getOriginExpr() const { return OriginExpr; }
unsigned getIndex() const { return Index; }
@@ -1006,6 +1051,8 @@ public:
void dumpToStream(raw_ostream &os) const override;
QualType getValueType() const override;
+
+ /// TODO: What does this return?
const ParmVarDecl *getDecl() const override;
bool canPrintPrettyAsExpr() const override;
@@ -1057,7 +1104,9 @@ class FieldRegion : public DeclRegion {
const FieldDecl *FD;
FieldRegion(const FieldDecl *fd, const SubRegion *sReg)
- : DeclRegion(sReg, FieldRegionKind), FD(fd) {}
+ : DeclRegion(sReg, FieldRegionKind), FD(fd) {
+ assert(FD);
+ }
static void ProfileRegion(llvm::FoldingSetNodeID &ID, const FieldDecl *FD,
const MemRegion* superRegion) {
@@ -1067,6 +1116,7 @@ class FieldRegion : public DeclRegion {
}
public:
+ LLVM_ATTRIBUTE_RETURNS_NONNULL
const FieldDecl *getDecl() const override { return FD; }
void Profile(llvm::FoldingSetNodeID &ID) const override;
@@ -1099,6 +1149,7 @@ class ObjCIvarRegion : public DeclRegion {
const MemRegion* superRegion);
public:
+ LLVM_ATTRIBUTE_RETURNS_NONNULL
const ObjCIvarDecl *getDecl() const override;
void Profile(llvm::FoldingSetNodeID& ID) const override;
@@ -1131,6 +1182,8 @@ class RegionRawOffset {
public:
// FIXME: Eventually support symbolic offsets.
CharUnits getOffset() const { return Offset; }
+
+ // It might return null.
const MemRegion *getRegion() const { return Region; }
void dumpToStream(raw_ostream &os) const;
@@ -1147,7 +1200,7 @@ class ElementRegion : public TypedValueRegion {
ElementRegion(QualType elementType, NonLoc Idx, const SubRegion *sReg)
: TypedValueRegion(sReg, ElementRegionKind), ElementType(elementType),
Index(Idx) {
- assert((!Idx.getAs<nonloc::ConcreteInt>() ||
+ assert((!isa<nonloc::ConcreteInt>(Idx) ||
Idx.castAs<nonloc::ConcreteInt>().getValue().isSigned()) &&
"The index must be signed");
assert(!elementType.isNull() && !elementType->isVoidType() &&
@@ -1185,16 +1238,19 @@ class CXXTempObjectRegion : public TypedValueRegion {
CXXTempObjectRegion(Expr const *E, MemSpaceRegion const *sReg)
: TypedValueRegion(sReg, CXXTempObjectRegionKind), Ex(E) {
assert(E);
- assert(isa<StackLocalsSpaceRegion>(sReg) ||
- isa<GlobalInternalSpaceRegion>(sReg));
+ assert(isa<StackLocalsSpaceRegion>(sReg));
}
static void ProfileRegion(llvm::FoldingSetNodeID &ID,
Expr const *E, const MemRegion *sReg);
public:
+ LLVM_ATTRIBUTE_RETURNS_NONNULL
const Expr *getExpr() const { return Ex; }
+ LLVM_ATTRIBUTE_RETURNS_NONNULL
+ const StackFrameContext *getStackFrame() const;
+
QualType getValueType() const override { return Ex->getType(); }
void dumpToStream(raw_ostream &os) const override;
@@ -1206,6 +1262,45 @@ public:
}
};
+// C++ temporary object that have lifetime extended to lifetime of the
+// variable. Usually they represent temporary bounds to reference variables.
+class CXXLifetimeExtendedObjectRegion : public TypedValueRegion {
+ friend class MemRegionManager;
+
+ Expr const *Ex;
+ ValueDecl const *ExD;
+
+ CXXLifetimeExtendedObjectRegion(Expr const *E, ValueDecl const *D,
+ MemSpaceRegion const *sReg)
+ : TypedValueRegion(sReg, CXXLifetimeExtendedObjectRegionKind), Ex(E),
+ ExD(D) {
+ assert(E);
+ assert(D);
+ assert((isa<StackLocalsSpaceRegion, GlobalInternalSpaceRegion>(sReg)));
+ }
+
+ static void ProfileRegion(llvm::FoldingSetNodeID &ID, Expr const *E,
+ ValueDecl const *D, const MemRegion *sReg);
+
+public:
+ LLVM_ATTRIBUTE_RETURNS_NONNULL
+ const Expr *getExpr() const { return Ex; }
+ LLVM_ATTRIBUTE_RETURNS_NONNULL
+ const ValueDecl *getExtendingDecl() const { return ExD; }
+ /// It might return null.
+ const StackFrameContext *getStackFrame() const;
+
+ QualType getValueType() const override { return Ex->getType(); }
+
+ void dumpToStream(raw_ostream &os) const override;
+
+ void Profile(llvm::FoldingSetNodeID &ID) const override;
+
+ static bool classof(const MemRegion *R) {
+ return R->getKind() == CXXLifetimeExtendedObjectRegionKind;
+ }
+};
+
// CXXBaseObjectRegion represents a base object within a C++ object. It is
// identified by the base class declaration and the region of its parent object.
class CXXBaseObjectRegion : public TypedValueRegion {
@@ -1223,6 +1318,7 @@ class CXXBaseObjectRegion : public TypedValueRegion {
bool IsVirtual, const MemRegion *SReg);
public:
+ LLVM_ATTRIBUTE_RETURNS_NONNULL
const CXXRecordDecl *getDecl() const { return Data.getPointer(); }
bool isVirtual() const { return Data.getInt(); }
@@ -1265,6 +1361,7 @@ class CXXDerivedObjectRegion : public TypedValueRegion {
const MemRegion *SReg);
public:
+ LLVM_ATTRIBUTE_RETURNS_NONNULL
const CXXRecordDecl *getDecl() const { return DerivedD; }
QualType getValueType() const override;
@@ -1290,8 +1387,8 @@ const RegionTy* MemRegion::getAs() const {
return nullptr;
}
-template<typename RegionTy>
-const RegionTy* MemRegion::castAs() const {
+template <typename RegionTy>
+LLVM_ATTRIBUTE_RETURNS_NONNULL const RegionTy *MemRegion::castAs() const {
return cast<RegionTy>(this);
}
@@ -1325,6 +1422,7 @@ public:
~MemRegionManager();
ASTContext &getContext() { return Ctx; }
+ const ASTContext &getContext() const { return Ctx; }
llvm::BumpPtrAllocator &getAllocator() { return A; }
@@ -1375,7 +1473,9 @@ public:
const LocationContext *LC);
/// Retrieve or create a "symbolic" memory region.
- const SymbolicRegion* getSymbolicRegion(SymbolRef Sym);
+ /// If no memory space is specified, `UnknownSpaceRegion` will be used.
+ const SymbolicRegion *
+ getSymbolicRegion(SymbolRef Sym, const MemSpaceRegion *MemSpace = nullptr);
/// Return a unique symbolic region belonging to heap memory space.
const SymbolicRegion *getSymbolicHeapRegion(SymbolRef sym);
@@ -1433,6 +1533,19 @@ public:
const CXXTempObjectRegion *getCXXTempObjectRegion(Expr const *Ex,
LocationContext const *LC);
+ /// Create a CXXLifetimeExtendedObjectRegion for temporaries which are
+ /// lifetime-extended by local references.
+ const CXXLifetimeExtendedObjectRegion *
+ getCXXLifetimeExtendedObjectRegion(Expr const *Ex, ValueDecl const *VD,
+ LocationContext const *LC);
+
+ /// Create a CXXLifetimeExtendedObjectRegion for temporaries which are
+ /// lifetime-extended by *static* references.
+ /// This differs from \ref getCXXLifetimeExtendedObjectRegion(Expr const *,
+ /// ValueDecl const *, LocationContext const *) in the super-region used.
+ const CXXLifetimeExtendedObjectRegion *
+ getCXXStaticLifetimeExtendedObjectRegion(const Expr *Ex, ValueDecl const *VD);
+
/// Create a CXXBaseObjectRegion with the given base class for region
/// \p Super.
///
@@ -1471,11 +1584,6 @@ public:
const LocationContext *lc,
unsigned blockCount);
- /// Create a CXXTempObjectRegion for temporaries which are lifetime-extended
- /// by static references. This differs from getCXXTempObjectRegion in the
- /// super-region used.
- const CXXTempObjectRegion *getCXXStaticTempObjectRegion(const Expr *Ex);
-
private:
template <typename RegionTy, typename SuperTy,
typename Arg1Ty>
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h
index 9a34639e2707..ca75c2a756a4 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h
@@ -23,6 +23,7 @@
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/ImmutableMap.h"
#include "llvm/Support/Allocator.h"
+#include <optional>
#include <utility>
namespace llvm {
@@ -47,8 +48,6 @@ typedef std::unique_ptr<StoreManager>(*StoreManagerCreator)(
// ProgramStateTrait - Traits used by the Generic Data Map of a ProgramState.
//===----------------------------------------------------------------------===//
-template <typename T> struct ProgramStatePartialTrait;
-
template <typename T> struct ProgramStateTrait {
typedef typename T::data_type data_type;
static inline void *MakeVoidPtr(data_type D) { return (void*) D; }
@@ -80,11 +79,49 @@ private:
friend class ProgramStateManager;
friend class ExplodedGraph;
friend class ExplodedNode;
+ friend class NodeBuilder;
ProgramStateManager *stateMgr;
Environment Env; // Maps a Stmt to its current SVal.
Store store; // Maps a location to its current value.
GenericDataMap GDM; // Custom data stored by a client of this class.
+
+ // A state is infeasible if there is a contradiction among the constraints.
+ // An infeasible state is represented by a `nullptr`.
+ // In the sense of `assumeDual`, a state can have two children by adding a
+ // new constraint and the negation of that new constraint. A parent state is
+ // over-constrained if both of its children are infeasible. In the
+ // mathematical sense, it means that the parent is infeasible and we should
+ // have realized that at the moment when we have created it. However, we
+ // could not recognize that because of the imperfection of the underlying
+ // constraint solver. We say it is posteriorly over-constrained because we
+ // recognize that a parent is infeasible only *after* a new and more specific
+ // constraint and its negation are evaluated.
+ //
+ // Example:
+ //
+ // x * x = 4 and x is in the range [0, 1]
+ // This is an already infeasible state, but the constraint solver is not
+ // capable of handling sqrt, thus we don't know it yet.
+ //
+ // Then a new constraint `x = 0` is added. At this moment the constraint
+ // solver re-evaluates the existing constraints and realizes the
+ // contradiction `0 * 0 = 4`.
+ // We also evaluate the negated constraint `x != 0`; the constraint solver
+ // deduces `x = 1` and then realizes the contradiction `1 * 1 = 4`.
+ // Both children are infeasible, thus the parent state is marked as
+ // posteriorly over-constrained. These parents are handled with special care:
+ // we do not allow transitions to exploded nodes with such states.
+ bool PosteriorlyOverconstrained = false;
+ // Make internal constraint solver entities friends so they can access the
+ // overconstrained-related functions. We want to keep this API inaccessible
+ // for Checkers.
+ friend class ConstraintManager;
+ bool isPosteriorlyOverconstrained() const {
+ return PosteriorlyOverconstrained;
+ }
+ ProgramStateRef cloneAsPosteriorlyOverconstrained() const;
+
unsigned refCount;
/// makeWithStore - Return a ProgramState with the same values as the current
@@ -137,6 +174,7 @@ public:
V->Env.Profile(ID);
ID.AddPointer(V->store);
V->GDM.Profile(ID);
+ ID.AddBoolean(V->PosteriorlyOverconstrained);
}
/// Profile - Used to profile the contents of this object for inclusion
@@ -179,18 +217,22 @@ public:
///
/// This returns a new state with the added constraint on \p cond.
/// If no new state is feasible, NULL is returned.
- LLVM_NODISCARD ProgramStateRef assume(DefinedOrUnknownSVal cond,
- bool assumption) const;
+ [[nodiscard]] ProgramStateRef assume(DefinedOrUnknownSVal cond,
+ bool assumption) const;
/// Assumes both "true" and "false" for \p cond, and returns both
/// corresponding states (respectively).
///
/// This is more efficient than calling assume() twice. Note that one (but not
/// both) of the returned states may be NULL.
- LLVM_NODISCARD std::pair<ProgramStateRef, ProgramStateRef>
+ [[nodiscard]] std::pair<ProgramStateRef, ProgramStateRef>
assume(DefinedOrUnknownSVal cond) const;
- LLVM_NODISCARD ProgramStateRef
+ [[nodiscard]] std::pair<ProgramStateRef, ProgramStateRef>
+ assumeInBoundDual(DefinedOrUnknownSVal idx, DefinedOrUnknownSVal upperBound,
+ QualType IndexType = QualType()) const;
+
+ [[nodiscard]] ProgramStateRef
assumeInBound(DefinedOrUnknownSVal idx, DefinedOrUnknownSVal upperBound,
bool assumption, QualType IndexType = QualType()) const;
@@ -200,17 +242,17 @@ public:
///
/// This returns a new state with the added constraint on \p cond.
/// If no new state is feasible, NULL is returned.
- LLVM_NODISCARD ProgramStateRef assumeInclusiveRange(DefinedOrUnknownSVal Val,
- const llvm::APSInt &From,
- const llvm::APSInt &To,
- bool assumption) const;
+ [[nodiscard]] ProgramStateRef assumeInclusiveRange(DefinedOrUnknownSVal Val,
+ const llvm::APSInt &From,
+ const llvm::APSInt &To,
+ bool assumption) const;
/// Assumes given range both "true" and "false" for \p Val, and returns both
/// corresponding states (respectively).
///
/// This is more efficient than calling assume() twice. Note that one (but not
/// both) of the returned states may be NULL.
- LLVM_NODISCARD std::pair<ProgramStateRef, ProgramStateRef>
+ [[nodiscard]] std::pair<ProgramStateRef, ProgramStateRef>
assumeInclusiveRange(DefinedOrUnknownSVal Val, const llvm::APSInt &From,
const llvm::APSInt &To) const;
@@ -226,6 +268,7 @@ public:
ConditionTruthVal areEqual(SVal Lhs, SVal Rhs) const;
/// Utility method for getting regions.
+ LLVM_ATTRIBUTE_RETURNS_NONNULL
const VarRegion* getRegion(const VarDecl *D, const LocationContext *LC) const;
//==---------------------------------------------------------------------==//
@@ -234,16 +277,16 @@ public:
/// Create a new state by binding the value 'V' to the statement 'S' in the
/// state's environment.
- LLVM_NODISCARD ProgramStateRef BindExpr(const Stmt *S,
- const LocationContext *LCtx, SVal V,
- bool Invalidate = true) const;
+ [[nodiscard]] ProgramStateRef BindExpr(const Stmt *S,
+ const LocationContext *LCtx, SVal V,
+ bool Invalidate = true) const;
- LLVM_NODISCARD ProgramStateRef bindLoc(Loc location, SVal V,
- const LocationContext *LCtx,
- bool notifyChanges = true) const;
+ [[nodiscard]] ProgramStateRef bindLoc(Loc location, SVal V,
+ const LocationContext *LCtx,
+ bool notifyChanges = true) const;
- LLVM_NODISCARD ProgramStateRef bindLoc(SVal location, SVal V,
- const LocationContext *LCtx) const;
+ [[nodiscard]] ProgramStateRef bindLoc(SVal location, SVal V,
+ const LocationContext *LCtx) const;
/// Initializes the region of memory represented by \p loc with an initial
/// value. Once initialized, all values loaded from any sub-regions of that
@@ -251,15 +294,15 @@ public:
/// This method should not be used on regions that are already initialized.
/// If you need to indicate that memory contents have suddenly become unknown
/// within a certain region of memory, consider invalidateRegions().
- LLVM_NODISCARD ProgramStateRef
+ [[nodiscard]] ProgramStateRef
bindDefaultInitial(SVal loc, SVal V, const LocationContext *LCtx) const;
/// Performs C++ zero-initialization procedure on the region of memory
/// represented by \p loc.
- LLVM_NODISCARD ProgramStateRef
+ [[nodiscard]] ProgramStateRef
bindDefaultZero(SVal loc, const LocationContext *LCtx) const;
- LLVM_NODISCARD ProgramStateRef killBinding(Loc LV) const;
+ [[nodiscard]] ProgramStateRef killBinding(Loc LV) const;
/// Returns the state with bindings for the given regions
/// cleared from the store.
@@ -279,24 +322,25 @@ public:
/// the call and should be considered directly invalidated.
/// \param ITraits information about special handling for a particular
/// region/symbol.
- LLVM_NODISCARD ProgramStateRef
+ [[nodiscard]] ProgramStateRef
invalidateRegions(ArrayRef<const MemRegion *> Regions, const Expr *E,
unsigned BlockCount, const LocationContext *LCtx,
bool CausesPointerEscape, InvalidatedSymbols *IS = nullptr,
const CallEvent *Call = nullptr,
RegionAndSymbolInvalidationTraits *ITraits = nullptr) const;
- LLVM_NODISCARD ProgramStateRef
- invalidateRegions(ArrayRef<SVal> Regions, const Expr *E,
- unsigned BlockCount, const LocationContext *LCtx,
- bool CausesPointerEscape, InvalidatedSymbols *IS = nullptr,
+ [[nodiscard]] ProgramStateRef
+ invalidateRegions(ArrayRef<SVal> Regions, const Expr *E, unsigned BlockCount,
+ const LocationContext *LCtx, bool CausesPointerEscape,
+ InvalidatedSymbols *IS = nullptr,
const CallEvent *Call = nullptr,
RegionAndSymbolInvalidationTraits *ITraits = nullptr) const;
/// enterStackFrame - Returns the state for entry to the given stack frame,
/// preserving the current state.
- LLVM_NODISCARD ProgramStateRef enterStackFrame(
- const CallEvent &Call, const StackFrameContext *CalleeCtx) const;
+ [[nodiscard]] ProgramStateRef
+ enterStackFrame(const CallEvent &Call,
+ const StackFrameContext *CalleeCtx) const;
/// Return the value of 'self' if available in the given context.
SVal getSelfSVal(const LocationContext *LC) const;
@@ -308,10 +352,6 @@ public:
Loc getLValue(const CXXRecordDecl *BaseClass, const SubRegion *Super,
bool IsVirtual) const;
- /// Get the lvalue for a parameter.
- Loc getLValue(const Expr *Call, unsigned Index,
- const LocationContext *LC) const;
-
/// Get the lvalue for a variable reference.
Loc getLValue(const VarDecl *D, const LocationContext *LC) const;
@@ -379,7 +419,7 @@ public:
void *const* FindGDM(void *K) const;
template <typename T>
- LLVM_NODISCARD ProgramStateRef
+ [[nodiscard]] ProgramStateRef
add(typename ProgramStateTrait<T>::key_type K) const;
template <typename T>
@@ -399,27 +439,27 @@ public:
typename ProgramStateTrait<T>::context_type get_context() const;
template <typename T>
- LLVM_NODISCARD ProgramStateRef
+ [[nodiscard]] ProgramStateRef
remove(typename ProgramStateTrait<T>::key_type K) const;
template <typename T>
- LLVM_NODISCARD ProgramStateRef
+ [[nodiscard]] ProgramStateRef
remove(typename ProgramStateTrait<T>::key_type K,
typename ProgramStateTrait<T>::context_type C) const;
- template <typename T> LLVM_NODISCARD ProgramStateRef remove() const;
+ template <typename T> [[nodiscard]] ProgramStateRef remove() const;
template <typename T>
- LLVM_NODISCARD ProgramStateRef
+ [[nodiscard]] ProgramStateRef
set(typename ProgramStateTrait<T>::data_type D) const;
template <typename T>
- LLVM_NODISCARD ProgramStateRef
+ [[nodiscard]] ProgramStateRef
set(typename ProgramStateTrait<T>::key_type K,
typename ProgramStateTrait<T>::value_type E) const;
template <typename T>
- LLVM_NODISCARD ProgramStateRef
+ [[nodiscard]] ProgramStateRef
set(typename ProgramStateTrait<T>::key_type K,
typename ProgramStateTrait<T>::value_type E,
typename ProgramStateTrait<T>::context_type C) const;
@@ -687,7 +727,7 @@ inline ProgramStateRef ProgramState::assumeInclusiveRange(
if (Val.isUnknown())
return this;
- assert(Val.getAs<NonLoc>() && "Only NonLocs are supported!");
+ assert(isa<NonLoc>(Val) && "Only NonLocs are supported!");
return getStateManager().ConstraintMgr->assumeInclusiveRange(
this, Val.castAs<NonLoc>(), From, To, Assumption);
@@ -700,14 +740,14 @@ ProgramState::assumeInclusiveRange(DefinedOrUnknownSVal Val,
if (Val.isUnknown())
return std::make_pair(this, this);
- assert(Val.getAs<NonLoc>() && "Only NonLocs are supported!");
+ assert(isa<NonLoc>(Val) && "Only NonLocs are supported!");
return getStateManager().ConstraintMgr->assumeInclusiveRangeDual(
this, Val.castAs<NonLoc>(), From, To);
}
inline ProgramStateRef ProgramState::bindLoc(SVal LV, SVal V, const LocationContext *LCtx) const {
- if (Optional<Loc> L = LV.getAs<Loc>())
+ if (std::optional<Loc> L = LV.getAs<Loc>())
return bindLoc(*L, V, LCtx);
return this;
}
@@ -757,7 +797,7 @@ inline SVal ProgramState::getLValue(const IndirectFieldDecl *D,
}
inline SVal ProgramState::getLValue(QualType ElementType, SVal Idx, SVal Base) const{
- if (Optional<NonLoc> N = Idx.getAs<NonLoc>())
+ if (std::optional<NonLoc> N = Idx.getAs<NonLoc>())
return getStateManager().StoreMgr->getLValueElement(ElementType, *N, Base);
return UnknownVal();
}
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h
index da82a55e3625..15bec97c5be8 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h
@@ -21,29 +21,32 @@
#include "llvm/ADT/ImmutableSet.h"
#include "llvm/Support/Allocator.h"
#include <cstdint>
+#include <type_traits>
namespace clang {
namespace ento {
- template <typename T> struct ProgramStatePartialTrait;
-
- /// Declares a program state trait for type \p Type called \p Name, and
- /// introduce a type named \c NameTy.
- /// The macro should not be used inside namespaces.
- #define REGISTER_TRAIT_WITH_PROGRAMSTATE(Name, Type) \
- namespace { \
- class Name {}; \
- using Name ## Ty = Type; \
- } \
- namespace clang { \
- namespace ento { \
- template <> \
- struct ProgramStateTrait<Name> \
- : public ProgramStatePartialTrait<Name ## Ty> { \
- static void *GDMIndex() { static int Index; return &Index; } \
- }; \
- } \
- }
+template <typename T, typename Enable = void> struct ProgramStatePartialTrait;
+
+/// Declares a program state trait for type \p Type called \p Name, and
+/// introduce a type named \c NameTy.
+/// The macro should not be used inside namespaces.
+#define REGISTER_TRAIT_WITH_PROGRAMSTATE(Name, Type) \
+ namespace { \
+ class Name {}; \
+ using Name##Ty = Type; \
+ } \
+ namespace clang { \
+ namespace ento { \
+ template <> \
+ struct ProgramStateTrait<Name> : public ProgramStatePartialTrait<Name##Ty> { \
+ static void *GDMIndex() { \
+ static int Index; \
+ return &Index; \
+ } \
+ }; \
+ } \
+ }
/// Declares a factory for objects of type \p Type in the program state
/// manager. The type must provide a ::Factory sub-class. Commonly used for
@@ -267,60 +270,27 @@ namespace ento {
}
};
- // Partial specialization for bool.
- template <> struct ProgramStatePartialTrait<bool> {
- using data_type = bool;
-
- static data_type MakeData(void *const *p) {
- return p ? (data_type) (uintptr_t) *p
- : data_type();
- }
-
- static void *MakeVoidPtr(data_type d) {
- return (void *) (uintptr_t) d;
- }
+ template <typename T> struct DefaultProgramStatePartialTraitImpl {
+ using data_type = T;
+ static T MakeData(void *const *P) { return P ? (T)(uintptr_t)*P : T{}; }
+ static void *MakeVoidPtr(T D) { return (void *)(uintptr_t)D; }
};
- // Partial specialization for unsigned.
- template <> struct ProgramStatePartialTrait<unsigned> {
- using data_type = unsigned;
-
- static data_type MakeData(void *const *p) {
- return p ? (data_type) (uintptr_t) *p
- : data_type();
- }
-
- static void *MakeVoidPtr(data_type d) {
- return (void *) (uintptr_t) d;
- }
- };
-
- // Partial specialization for void*.
- template <> struct ProgramStatePartialTrait<void *> {
- using data_type = void *;
-
- static data_type MakeData(void *const *p) {
- return p ? *p
- : data_type();
- }
-
- static void *MakeVoidPtr(data_type d) {
- return d;
- }
- };
-
- // Partial specialization for const void *.
- template <> struct ProgramStatePartialTrait<const void *> {
- using data_type = const void *;
+ // Partial specialization for integral types.
+ template <typename T>
+ struct ProgramStatePartialTrait<T,
+ std::enable_if_t<std::is_integral<T>::value>>
+ : DefaultProgramStatePartialTraitImpl<T> {};
- static data_type MakeData(void *const *p) {
- return p ? *p : data_type();
- }
+ // Partial specialization for enums.
+ template <typename T>
+ struct ProgramStatePartialTrait<T, std::enable_if_t<std::is_enum<T>::value>>
+ : DefaultProgramStatePartialTraitImpl<T> {};
- static void *MakeVoidPtr(data_type d) {
- return const_cast<void *>(d);
- }
- };
+ // Partial specialization for pointers.
+ template <typename T>
+ struct ProgramStatePartialTrait<T *, void>
+ : DefaultProgramStatePartialTraitImpl<T *> {};
} // namespace ento
} // namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/RangedConstraintManager.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/RangedConstraintManager.h
index c67df1e51b4f..49ea006e27aa 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/RangedConstraintManager.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/RangedConstraintManager.h
@@ -10,8 +10,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_LIB_STATICANALYZER_CORE_RANGEDCONSTRAINTMANAGER_H
-#define LLVM_CLANG_LIB_STATICANALYZER_CORE_RANGEDCONSTRAINTMANAGER_H
+#ifndef LLVM_CLANG_STATICANALYZER_CORE_PATHSENSITIVE_RANGEDCONSTRAINTMANAGER_H
+#define LLVM_CLANG_STATICANALYZER_CORE_PATHSENSITIVE_RANGEDCONSTRAINTMANAGER_H
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
@@ -48,6 +48,7 @@ public:
ID.AddPointer(&To());
}
void dump(raw_ostream &OS) const;
+ void dump() const;
// In order to keep non-overlapping ranges sorted, we can compare only From
// points.
@@ -139,6 +140,30 @@ public:
/// Complexity: O(N)
/// where N = size(Original)
RangeSet add(RangeSet Original, const llvm::APSInt &Point);
+ /// Create a new set which is a union of two given ranges.
+ /// Possible intersections are not checked here.
+ ///
+ /// Complexity: O(N + M)
+ /// where N = size(LHS), M = size(RHS)
+ RangeSet unite(RangeSet LHS, RangeSet RHS);
+ /// Create a new set by uniting given range set with the given range.
+ /// All intersections and adjacent ranges are handled here.
+ ///
+ /// Complexity: O(N)
+ /// where N = size(Original)
+ RangeSet unite(RangeSet Original, Range Element);
+ /// Create a new set by uniting given range set with the given point.
+ /// All intersections and adjacent ranges are handled here.
+ ///
+ /// Complexity: O(N)
+ /// where N = size(Original)
+ RangeSet unite(RangeSet Original, llvm::APSInt Point);
+ /// Create a new set by uniting given range set with the given range
+ /// between points. All intersections and adjacent ranges are handled here.
+ ///
+ /// Complexity: O(N)
+ /// where N = size(Original)
+ RangeSet unite(RangeSet Original, llvm::APSInt From, llvm::APSInt To);
RangeSet getEmptySet() { return &EmptySet; }
@@ -212,6 +237,29 @@ public:
/// Complexity: O(N)
/// where N = size(What)
RangeSet negate(RangeSet What);
+ /// Performs promotions, truncations and conversions of the given set.
+ ///
+ /// This function is optimized for each of the six cast cases:
+ /// - noop
+ /// - conversion
+ /// - truncation
+ /// - truncation-conversion
+ /// - promotion
+ /// - promotion-conversion
+ ///
+ /// NOTE: This function is NOT self-inverse for truncations, because of
+ /// the higher bits loss:
+ /// - castTo(castTo(OrigRangeOfInt, char), int) != OrigRangeOfInt.
+ /// - castTo(castTo(OrigRangeOfChar, int), char) == OrigRangeOfChar.
+ /// But it is self-inverse for all the rest casts.
+ ///
+ /// Complexity:
+ /// - Noop O(1);
+ /// - Truncation O(N^2);
+ /// - Another case O(N);
+ /// where N = size(What)
+ RangeSet castTo(RangeSet What, APSIntType Ty);
+ RangeSet castTo(RangeSet What, QualType T);
/// Return associated value factory.
BasicValueFactory &getValueFactory() const { return ValueFactory; }
@@ -223,6 +271,25 @@ public:
ContainerType *construct(ContainerType &&From);
RangeSet intersect(const ContainerType &LHS, const ContainerType &RHS);
+ /// NOTE: This function relies on the fact that all values in the
+ /// containers are persistent (created via BasicValueFactory::getValue).
+ ContainerType unite(const ContainerType &LHS, const ContainerType &RHS);
+
+ /// This is a helper function for `castTo` method. Implies not to be used
+ /// separately.
+ /// Performs a truncation case of a cast operation.
+ ContainerType truncateTo(RangeSet What, APSIntType Ty);
+
+ /// This is a helper function for `castTo` method. Implies not to be used
+ /// separately.
+ /// Performs a conversion case and a promotion-conversion case for signeds
+ /// of a cast operation.
+ ContainerType convertTo(RangeSet What, APSIntType Ty);
+
+ /// This is a helper function for `castTo` method. Implies not to be used
+ /// separately.
+ /// Performs a promotion for unsigneds only.
+ ContainerType promoteTo(RangeSet What, APSIntType Ty);
// Many operations include producing new APSInt values and that's why
// we need this factory.
@@ -275,13 +342,37 @@ public:
/// Complexity: O(1)
const llvm::APSInt &getMaxValue() const;
+ bool isUnsigned() const;
+ uint32_t getBitWidth() const;
+ APSIntType getAPSIntType() const;
+
/// Test whether the given point is contained by any of the ranges.
///
/// Complexity: O(logN)
/// where N = size(this)
bool contains(llvm::APSInt Point) const { return containsImpl(Point); }
+ bool containsZero() const {
+ APSIntType T{getMinValue()};
+ return contains(T.getZeroValue());
+ }
+
+ /// Test if the range is the [0,0] range.
+ ///
+ /// Complexity: O(1)
+ bool encodesFalseRange() const {
+ const llvm::APSInt *Constant = getConcreteValue();
+ return Constant && Constant->isZero();
+ }
+
+ /// Test if the range doesn't contain zero.
+ ///
+ /// Complexity: O(logN)
+ /// where N = size(this)
+ bool encodesTrueRange() const { return !containsZero(); }
+
void dump(raw_ostream &OS) const;
+ void dump() const;
bool operator==(const RangeSet &Other) const { return *Impl == *Other.Impl; }
bool operator!=(const RangeSet &Other) const { return !(*this == Other); }
@@ -387,11 +478,22 @@ private:
static void computeAdjustment(SymbolRef &Sym, llvm::APSInt &Adjustment);
};
-/// Try to simplify a given symbolic expression's associated value based on the
-/// constraints in State. This is needed because the Environment bindings are
-/// not getting updated when a new constraint is added to the State.
+/// Try to simplify a given symbolic expression based on the constraints in
+/// State. This is needed because the Environment bindings are not getting
+/// updated when a new constraint is added to the State. If the symbol is
+/// simplified to a non-symbol (e.g. to a constant) then the original symbol
+/// is returned. We use this function in the family of assumeSymNE/EQ/LT/../GE
+/// functions where we can work only with symbols. Use the other function
+/// (simplifyToSVal) if you are interested in a simplification that may yield
+/// a concrete constant value.
SymbolRef simplify(ProgramStateRef State, SymbolRef Sym);
+/// Try to simplify a given symbolic expression's associated `SVal` based on the
+/// constraints in State. This is very similar to `simplify`, but this function
+/// always returns the simplified SVal. The simplified SVal might be a single
+/// constant (i.e. `ConcreteInt`).
+SVal simplifyToSVal(ProgramStateRef State, SymbolRef Sym);
+
} // namespace ento
} // namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Regions.def b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Regions.def
index 44ab31fc9f2e..245828a2fcc0 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Regions.def
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Regions.def
@@ -69,6 +69,7 @@ ABSTRACT_REGION(SubRegion, MemRegion)
REGION(CXXBaseObjectRegion, TypedValueRegion)
REGION(CXXDerivedObjectRegion, TypedValueRegion)
REGION(CXXTempObjectRegion, TypedValueRegion)
+ REGION(CXXLifetimeExtendedObjectRegion, TypedValueRegion)
REGION(CXXThisRegion, TypedValueRegion)
ABSTRACT_REGION(DeclRegion, TypedValueRegion)
REGION(FieldRegion, DeclRegion)
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SMTConstraintManager.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SMTConstraintManager.h
index e4878d4e0156..5116a4c06850 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SMTConstraintManager.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SMTConstraintManager.h
@@ -18,6 +18,7 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/RangedConstraintManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SMTConv.h"
+#include <optional>
typedef llvm::ImmutableSet<
std::pair<clang::ento::SymbolRef, const llvm::SMTExpr *>>
@@ -128,8 +129,8 @@ public:
addStateConstraints(State);
// Constraints are unsatisfiable
- Optional<bool> isSat = Solver->check();
- if (!isSat.hasValue() || !isSat.getValue())
+ std::optional<bool> isSat = Solver->check();
+ if (!isSat || !*isSat)
return nullptr;
// Model does not assign interpretation
@@ -145,8 +146,8 @@ public:
Solver->addConstraint(NotExp);
- Optional<bool> isNotSat = Solver->check();
- if (!isNotSat.hasValue() || isNotSat.getValue())
+ std::optional<bool> isNotSat = Solver->check();
+ if (!isNotSat || *isNotSat)
return nullptr;
// This is the only solution, store it
@@ -202,9 +203,9 @@ public:
auto CZ = State->get<ConstraintSMT>();
auto &CZFactory = State->get_context<ConstraintSMT>();
- for (auto I = CZ.begin(), E = CZ.end(); I != E; ++I) {
- if (SymReaper.isDead(I->first))
- CZ = CZFactory.remove(CZ, *I);
+ for (const auto &Entry : CZ) {
+ if (SymReaper.isDead(Entry.first))
+ CZ = CZFactory.remove(CZ, Entry);
}
return State->set<ConstraintSMT>(CZ);
@@ -246,7 +247,7 @@ public:
bool canReasonAbout(SVal X) const override {
const TargetInfo &TI = getBasicVals().getContext().getTargetInfo();
- Optional<nonloc::SymbolVal> SymVal = X.getAs<nonloc::SymbolVal>();
+ std::optional<nonloc::SymbolVal> SymVal = X.getAs<nonloc::SymbolVal>();
if (!SymVal)
return true;
@@ -340,11 +341,11 @@ protected:
Solver->reset();
addStateConstraints(NewState);
- Optional<bool> res = Solver->check();
- if (!res.hasValue())
+ std::optional<bool> res = Solver->check();
+ if (!res)
Cached[hash] = ConditionTruthVal();
else
- Cached[hash] = ConditionTruthVal(res.getValue());
+ Cached[hash] = ConditionTruthVal(*res);
return Cached[hash];
}
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SMTConv.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SMTConv.h
index 2d0f169260a4..fcc9c02999b3 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SMTConv.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SMTConv.h
@@ -446,6 +446,30 @@ public:
return getCastExpr(Solver, Ctx, Exp, FromTy, Sym->getType());
}
+ if (const UnarySymExpr *USE = dyn_cast<UnarySymExpr>(Sym)) {
+ if (RetTy)
+ *RetTy = Sym->getType();
+
+ QualType OperandTy;
+ llvm::SMTExprRef OperandExp =
+ getSymExpr(Solver, Ctx, USE->getOperand(), &OperandTy, hasComparison);
+ llvm::SMTExprRef UnaryExp =
+ OperandTy->isRealFloatingType()
+ ? fromFloatUnOp(Solver, USE->getOpcode(), OperandExp)
+ : fromUnOp(Solver, USE->getOpcode(), OperandExp);
+
+ // Currently, without the `support-symbolic-integer-casts=true` option,
+ // we do not emit `SymbolCast`s for implicit casts.
+ // One such implicit cast is missing if the operand of the unary operator
+ // has a different type than the unary itself.
+ if (Ctx.getTypeSize(OperandTy) != Ctx.getTypeSize(Sym->getType())) {
+ if (hasComparison)
+ *hasComparison = false;
+ return getCastExpr(Solver, Ctx, UnaryExp, OperandTy, Sym->getType());
+ }
+ return UnaryExp;
+ }
+
if (const BinarySymExpr *BSE = dyn_cast<BinarySymExpr>(Sym)) {
llvm::SMTExprRef Exp =
getSymBinExpr(Solver, Ctx, BSE, hasComparison, RetTy);
@@ -654,14 +678,14 @@ public:
assert(!LTy.isNull() && !RTy.isNull() && "Input type is null!");
// Always perform integer promotion before checking type equality.
// Otherwise, e.g. (bool) a + (bool) b could trigger a backend assertion
- if (LTy->isPromotableIntegerType()) {
+ if (Ctx.isPromotableIntegerType(LTy)) {
QualType NewTy = Ctx.getPromotedIntegerType(LTy);
uint64_t NewBitWidth = Ctx.getTypeSize(NewTy);
LHS = (*doCast)(Solver, LHS, NewTy, NewBitWidth, LTy, LBitWidth);
LTy = NewTy;
LBitWidth = NewBitWidth;
}
- if (RTy->isPromotableIntegerType()) {
+ if (Ctx.isPromotableIntegerType(RTy)) {
QualType NewTy = Ctx.getPromotedIntegerType(RTy);
uint64_t NewBitWidth = Ctx.getTypeSize(NewTy);
RHS = (*doCast)(Solver, RHS, NewTy, NewBitWidth, RTy, RBitWidth);
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h
index 87a49cf4ffe9..d7cff49036cb 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h
@@ -28,11 +28,12 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/SymExpr.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
#include "llvm/ADT/ImmutableList.h"
-#include "llvm/ADT/Optional.h"
#include <cstdint>
+#include <optional>
namespace clang {
+class AnalyzerOptions;
class BlockDecl;
class CXXBoolLiteralExpr;
class CXXMethodDecl;
@@ -66,65 +67,28 @@ protected:
ProgramStateManager &StateMgr;
+ const AnalyzerOptions &AnOpts;
+
/// The scalar type to use for array indices.
const QualType ArrayIndexTy;
/// The width of the scalar type used for array indices.
const unsigned ArrayIndexWidth;
- SVal evalCastKind(UndefinedVal V, QualType CastTy, QualType OriginalTy);
- SVal evalCastKind(UnknownVal V, QualType CastTy, QualType OriginalTy);
- SVal evalCastKind(Loc V, QualType CastTy, QualType OriginalTy);
- SVal evalCastKind(NonLoc V, QualType CastTy, QualType OriginalTy);
- SVal evalCastSubKind(loc::ConcreteInt V, QualType CastTy,
- QualType OriginalTy);
- SVal evalCastSubKind(loc::GotoLabel V, QualType CastTy, QualType OriginalTy);
- SVal evalCastSubKind(loc::MemRegionVal V, QualType CastTy,
- QualType OriginalTy);
- SVal evalCastSubKind(nonloc::CompoundVal V, QualType CastTy,
- QualType OriginalTy);
- SVal evalCastSubKind(nonloc::ConcreteInt V, QualType CastTy,
- QualType OriginalTy);
- SVal evalCastSubKind(nonloc::LazyCompoundVal V, QualType CastTy,
- QualType OriginalTy);
- SVal evalCastSubKind(nonloc::LocAsInteger V, QualType CastTy,
- QualType OriginalTy);
- SVal evalCastSubKind(nonloc::SymbolVal V, QualType CastTy,
- QualType OriginalTy);
- SVal evalCastSubKind(nonloc::PointerToMember V, QualType CastTy,
- QualType OriginalTy);
-
public:
SValBuilder(llvm::BumpPtrAllocator &alloc, ASTContext &context,
- ProgramStateManager &stateMgr)
- : Context(context), BasicVals(context, alloc),
- SymMgr(context, BasicVals, alloc), MemMgr(context, alloc),
- StateMgr(stateMgr), ArrayIndexTy(context.LongLongTy),
- ArrayIndexWidth(context.getTypeSize(ArrayIndexTy)) {}
+ ProgramStateManager &stateMgr);
virtual ~SValBuilder() = default;
- bool haveSameType(const SymExpr *Sym1, const SymExpr *Sym2) {
- return haveSameType(Sym1->getType(), Sym2->getType());
- }
-
- bool haveSameType(QualType Ty1, QualType Ty2) {
- // FIXME: Remove the second disjunct when we support symbolic
- // truncation/extension.
- return (Context.getCanonicalType(Ty1) == Context.getCanonicalType(Ty2) ||
- (Ty1->isIntegralOrEnumerationType() &&
- Ty2->isIntegralOrEnumerationType()));
- }
-
SVal evalCast(SVal V, QualType CastTy, QualType OriginalTy);
// Handles casts of type CK_IntegralCast.
SVal evalIntegralCast(ProgramStateRef state, SVal val, QualType castTy,
QualType originalType);
- virtual SVal evalMinus(NonLoc val) = 0;
-
- virtual SVal evalComplement(NonLoc val) = 0;
+ SVal evalMinus(NonLoc val);
+ SVal evalComplement(NonLoc val);
/// Create a new value which represents a binary expression with two non-
/// location operands.
@@ -146,6 +110,14 @@ public:
/// that value is returned. Otherwise, returns NULL.
virtual const llvm::APSInt *getKnownValue(ProgramStateRef state, SVal val) = 0;
+ /// Tries to get the minimal possible (integer) value of a given SVal. If the
+ /// constraint manager cannot provide an useful answer, this returns NULL.
+ virtual const llvm::APSInt *getMinValue(ProgramStateRef state, SVal val) = 0;
+
+ /// Tries to get the maximal possible (integer) value of a given SVal. If the
+ /// constraint manager cannot provide an useful answer, this returns NULL.
+ virtual const llvm::APSInt *getMaxValue(ProgramStateRef state, SVal val) = 0;
+
/// Simplify symbolic expressions within a given SVal. Return an SVal
/// that represents the same value, but is hopefully easier to work with
/// than the original SVal.
@@ -155,6 +127,9 @@ public:
SVal makeSymExprValNN(BinaryOperator::Opcode op,
NonLoc lhs, NonLoc rhs, QualType resultTy);
+ SVal evalUnaryOp(ProgramStateRef state, UnaryOperator::Opcode opc,
+ SVal operand, QualType type);
+
SVal evalBinOp(ProgramStateRef state, BinaryOperator::Opcode op,
SVal lhs, SVal rhs, QualType type);
@@ -188,6 +163,8 @@ public:
MemRegionManager &getRegionManager() { return MemMgr; }
const MemRegionManager &getRegionManager() const { return MemMgr; }
+ const AnalyzerOptions &getAnalyzerOptions() const { return AnOpts; }
+
// Forwarding methods to SymbolManager.
const SymbolConjured* conjureSymbol(const Stmt *stmt,
@@ -246,6 +223,15 @@ public:
const LocationContext *LCtx,
QualType type, unsigned Count);
+ /// Create an SVal representing the result of an alloca()-like call, that is,
+ /// an AllocaRegion on the stack.
+ ///
+ /// After calling this function, it's a good idea to set the extent of the
+ /// returned AllocaRegion.
+ loc::MemRegionVal getAllocaRegionVal(const Expr *E,
+ const LocationContext *LCtx,
+ unsigned Count);
+
DefinedOrUnknownSVal getDerivedRegionValueSymbolVal(
SymbolRef parentSymbol, const TypedValueRegion *region);
@@ -266,8 +252,8 @@ public:
/// Returns the value of \p E, if it can be determined in a non-path-sensitive
/// manner.
///
- /// If \p E is not a constant or cannot be modeled, returns \c None.
- Optional<SVal> getConstantVal(const Expr *E);
+ /// If \p E is not a constant or cannot be modeled, returns \c std::nullopt.
+ std::optional<SVal> getConstantVal(const Expr *E);
NonLoc makeCompoundVal(QualType type, llvm::ImmutableList<SVal> vals) {
return nonloc::CompoundVal(BasicVals.getCompoundValData(type, vals));
@@ -332,26 +318,30 @@ public:
return nonloc::ConcreteInt(BasicVals.getIntValue(integer, isUnsigned));
}
- NonLoc makeIntValWithPtrWidth(uint64_t integer, bool isUnsigned) {
- return nonloc::ConcreteInt(
- BasicVals.getIntWithPtrWidth(integer, isUnsigned));
+ NonLoc makeIntValWithWidth(QualType ptrType, uint64_t integer) {
+ return nonloc::ConcreteInt(BasicVals.getValue(integer, ptrType));
}
NonLoc makeLocAsInteger(Loc loc, unsigned bits) {
return nonloc::LocAsInteger(BasicVals.getPersistentSValWithData(loc, bits));
}
- NonLoc makeNonLoc(const SymExpr *lhs, BinaryOperator::Opcode op,
- const llvm::APSInt& rhs, QualType type);
+ nonloc::SymbolVal makeNonLoc(const SymExpr *lhs, BinaryOperator::Opcode op,
+ const llvm::APSInt &rhs, QualType type);
+
+ nonloc::SymbolVal makeNonLoc(const llvm::APSInt &rhs,
+ BinaryOperator::Opcode op, const SymExpr *lhs,
+ QualType type);
- NonLoc makeNonLoc(const llvm::APSInt& rhs, BinaryOperator::Opcode op,
- const SymExpr *lhs, QualType type);
+ nonloc::SymbolVal makeNonLoc(const SymExpr *lhs, BinaryOperator::Opcode op,
+ const SymExpr *rhs, QualType type);
- NonLoc makeNonLoc(const SymExpr *lhs, BinaryOperator::Opcode op,
- const SymExpr *rhs, QualType type);
+ NonLoc makeNonLoc(const SymExpr *operand, UnaryOperator::Opcode op,
+ QualType type);
/// Create a NonLoc value for cast.
- NonLoc makeNonLoc(const SymExpr *operand, QualType fromTy, QualType toTy);
+ nonloc::SymbolVal makeNonLoc(const SymExpr *operand, QualType fromTy,
+ QualType toTy);
nonloc::ConcreteInt makeTruthVal(bool b, QualType type) {
return nonloc::ConcreteInt(BasicVals.getTruthValue(b, type));
@@ -364,38 +354,46 @@ public:
/// Create NULL pointer, with proper pointer bit-width for given address
/// space.
/// \param type pointer type.
- Loc makeNullWithType(QualType type) {
+ loc::ConcreteInt makeNullWithType(QualType type) {
+ // We cannot use the `isAnyPointerType()`.
+ assert((type->isPointerType() || type->isObjCObjectPointerType() ||
+ type->isBlockPointerType() || type->isNullPtrType() ||
+ type->isReferenceType()) &&
+ "makeNullWithType must use pointer type");
+
+ // The `sizeof(T&)` is `sizeof(T)`, thus we replace the reference with a
+ // pointer. Here we assume that references are actually implemented by
+ // pointers under-the-hood.
+ type = type->isReferenceType()
+ ? Context.getPointerType(type->getPointeeType())
+ : type;
return loc::ConcreteInt(BasicVals.getZeroWithTypeSize(type));
}
- Loc makeNull() {
- return loc::ConcreteInt(BasicVals.getZeroWithPtrWidth());
- }
-
- Loc makeLoc(SymbolRef sym) {
+ loc::MemRegionVal makeLoc(SymbolRef sym) {
return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym));
}
- Loc makeLoc(const MemRegion* region) {
+ loc::MemRegionVal makeLoc(const MemRegion *region) {
return loc::MemRegionVal(region);
}
- Loc makeLoc(const AddrLabelExpr *expr) {
+ loc::GotoLabel makeLoc(const AddrLabelExpr *expr) {
return loc::GotoLabel(expr->getLabel());
}
- Loc makeLoc(const llvm::APSInt& integer) {
+ loc::ConcreteInt makeLoc(const llvm::APSInt &integer) {
return loc::ConcreteInt(BasicVals.getValue(integer));
}
- /// Return MemRegionVal on success cast, otherwise return None.
- Optional<loc::MemRegionVal> getCastedMemRegionVal(const MemRegion *region,
- QualType type);
+ /// Return MemRegionVal on success cast, otherwise return std::nullopt.
+ std::optional<loc::MemRegionVal>
+ getCastedMemRegionVal(const MemRegion *region, QualType type);
/// Make an SVal that represents the given symbol. This follows the convention
/// of representing Loc-type symbols (symbolic pointers and references)
/// as Loc values wrapping the symbol rather than as plain symbol values.
- SVal makeSymbolVal(SymbolRef Sym) {
+ DefinedSVal makeSymbolVal(SymbolRef Sym) {
if (Loc::isLocType(Sym->getType()))
return makeLoc(Sym);
return nonloc::SymbolVal(Sym);
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValVisitor.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValVisitor.h
index fc83e26183b3..b10f416f4435 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValVisitor.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValVisitor.h
@@ -14,9 +14,9 @@
#ifndef LLVM_CLANG_STATICANALYZER_CORE_PATHSENSITIVE_SVALVISITOR_H
#define LLVM_CLANG_STATICANALYZER_CORE_PATHSENSITIVE_SVALVISITOR_H
+#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
namespace clang {
@@ -25,49 +25,40 @@ namespace ento {
/// SValVisitor - this class implements a simple visitor for SVal
/// subclasses.
template <typename ImplClass, typename RetTy = void> class SValVisitor {
-public:
-
-#define DISPATCH(NAME, CLASS) \
- return static_cast<ImplClass *>(this)->Visit ## NAME(V.castAs<CLASS>())
+ ImplClass &derived() { return *static_cast<ImplClass *>(this); }
+public:
RetTy Visit(SVal V) {
// Dispatch to VisitFooVal for each FooVal.
- // Take namespaces (loc:: and nonloc::) into account.
- switch (V.getBaseKind()) {
-#define BASIC_SVAL(Id, Parent) case SVal::Id ## Kind: DISPATCH(Id, Id);
+ switch (V.getKind()) {
+#define BASIC_SVAL(Id, Parent) \
+ case SVal::Id##Kind: \
+ return derived().Visit##Id(V.castAs<Id>());
+#define LOC_SVAL(Id, Parent) \
+ case SVal::Loc##Id##Kind: \
+ return derived().Visit##Id(V.castAs<loc::Id>());
+#define NONLOC_SVAL(Id, Parent) \
+ case SVal::NonLoc##Id##Kind: \
+ return derived().Visit##Id(V.castAs<nonloc::Id>());
#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.def"
- case SVal::LocKind:
- switch (V.getSubKind()) {
-#define LOC_SVAL(Id, Parent) \
- case loc::Id ## Kind: DISPATCH(Loc ## Id, loc :: Id);
-#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.def"
- }
- llvm_unreachable("Unknown Loc sub-kind!");
- case SVal::NonLocKind:
- switch (V.getSubKind()) {
-#define NONLOC_SVAL(Id, Parent) \
- case nonloc::Id ## Kind: DISPATCH(NonLoc ## Id, nonloc :: Id);
-#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.def"
- }
- llvm_unreachable("Unknown NonLoc sub-kind!");
}
llvm_unreachable("Unknown SVal kind!");
}
-#define BASIC_SVAL(Id, Parent) \
- RetTy Visit ## Id(Id V) { DISPATCH(Parent, Id); }
-#define ABSTRACT_SVAL(Id, Parent) \
- BASIC_SVAL(Id, Parent)
-#define LOC_SVAL(Id, Parent) \
- RetTy VisitLoc ## Id(loc::Id V) { DISPATCH(Parent, Parent); }
-#define NONLOC_SVAL(Id, Parent) \
- RetTy VisitNonLoc ## Id(nonloc::Id V) { DISPATCH(Parent, Parent); }
+ // Dispatch to the more generic handler as a default implementation.
+#define BASIC_SVAL(Id, Parent) \
+ RetTy Visit##Id(Id V) { return derived().Visit##Parent(V.castAs<Id>()); }
+#define ABSTRACT_SVAL(Id, Parent) BASIC_SVAL(Id, Parent)
+#define LOC_SVAL(Id, Parent) \
+ RetTy Visit##Id(loc::Id V) { return derived().VisitLoc(V.castAs<Loc>()); }
+#define NONLOC_SVAL(Id, Parent) \
+ RetTy Visit##Id(nonloc::Id V) { \
+ return derived().VisitNonLoc(V.castAs<NonLoc>()); \
+ }
#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.def"
// Base case, ignore it. :)
RetTy VisitSVal(SVal V) { return RetTy(); }
-
-#undef DISPATCH
};
/// SymExprVisitor - this class implements a simple visitor for SymExpr
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SVals.def b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SVals.def
index eb05de6d9933..36d2425d155a 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SVals.def
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SVals.def
@@ -6,28 +6,24 @@
//
//===----------------------------------------------------------------------===//
//
-// The list of symbolic values (SVal kinds and sub-kinds) used in the Static
-// Analyzer. The distinction between loc:: and nonloc:: SVal namespaces is
+// The list of symbolic values (SVal kinds) used in the Static Analyzer.
+// The distinction between `loc::` and `nonloc::` SVal namespaces is
// currently hardcoded, because it is too peculiar and explicit to be handled
// uniformly. In order to use this information, users of this file must define
// one or more of the following macros:
//
-// BASIC_SVAL(Id, Parent) - for specific SVal sub-kinds, which are
-// neither in loc:: nor in nonloc:: namespace; these classes occupy
-// their own base kind IdKind.
+// BASIC_SVAL(Id, Parent) - for specific SVal kinds, which are
+// neither in `loc::` nor in `nonloc::` namespace.
//
// ABSTRACT_SVAL(Id, Parent) - for abstract SVal classes which are
-// neither in loc:: nor in nonloc:: namespace,
+// neither in `loc::` nor in `nonloc::` namespace,
//
-// ABSTRACT_SVAL_WITH_KIND(Id, Parent) - for SVal classes which are also
-// neither in loc:: nor in nonloc:: namespace, but occupy a whole base kind
-// identifier IdKind, much like BASIC_SVALs.
+// LOC_SVAL(Id, Parent) - for values in `loc::` namespace.
//
-// LOC_SVAL(Id, Parent) - for values in loc:: namespace, which occupy a sub-kind
-// loc::IdKind.
+// NONLOC_SVAL(Id, Parent) - for values in `nonloc::` namespace.
//
-// NONLOC_SVAL(Id, Parent) - for values in nonloc:: namespace, which occupy a
-// sub-kind nonloc::IdKind.
+// SVAL_RANGE(Id, First, Last) - for defining range of subtypes of
+// the abstract class `Id`.
//
//===----------------------------------------------------------------------===//
@@ -39,10 +35,6 @@
#define ABSTRACT_SVAL(Id, Parent)
#endif
-#ifndef ABSTRACT_SVAL_WITH_KIND
-#define ABSTRACT_SVAL_WITH_KIND(Id, Parent) ABSTRACT_SVAL(Id, Parent)
-#endif
-
#ifndef LOC_SVAL
#define LOC_SVAL(Id, Parent)
#endif
@@ -51,24 +43,30 @@
#define NONLOC_SVAL(Id, Parent)
#endif
+#ifndef SVAL_RANGE
+#define SVAL_RANGE(Id, First, Last)
+#endif
+
BASIC_SVAL(UndefinedVal, SVal)
ABSTRACT_SVAL(DefinedOrUnknownSVal, SVal)
BASIC_SVAL(UnknownVal, DefinedOrUnknownSVal)
ABSTRACT_SVAL(DefinedSVal, DefinedOrUnknownSVal)
- ABSTRACT_SVAL_WITH_KIND(Loc, DefinedSVal)
+ ABSTRACT_SVAL(Loc, DefinedSVal)
LOC_SVAL(ConcreteInt, Loc)
LOC_SVAL(GotoLabel, Loc)
LOC_SVAL(MemRegionVal, Loc)
- ABSTRACT_SVAL_WITH_KIND(NonLoc, DefinedSVal)
+ SVAL_RANGE(Loc, ConcreteInt, MemRegionVal)
+ ABSTRACT_SVAL(NonLoc, DefinedSVal)
NONLOC_SVAL(CompoundVal, NonLoc)
NONLOC_SVAL(ConcreteInt, NonLoc)
NONLOC_SVAL(LazyCompoundVal, NonLoc)
NONLOC_SVAL(LocAsInteger, NonLoc)
NONLOC_SVAL(SymbolVal, NonLoc)
NONLOC_SVAL(PointerToMember, NonLoc)
+ SVAL_RANGE(NonLoc, CompoundVal, PointerToMember)
+#undef SVAL_RANGE
#undef NONLOC_SVAL
#undef LOC_SVAL
-#undef ABSTRACT_SVAL_WITH_KIND
#undef ABSTRACT_SVAL
#undef BASIC_SVAL
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SVals.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SVals.h
index 6199c8d8d179..c60528b7685f 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SVals.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SVals.h
@@ -18,14 +18,16 @@
#include "clang/AST/Type.h"
#include "clang/Basic/LLVM.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymExpr.h"
+#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/ImmutableList.h"
-#include "llvm/ADT/None.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/STLForwardCompat.h"
+#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Casting.h"
#include <cassert>
#include <cstdint>
+#include <optional>
#include <utility>
//==------------------------------------------------------------------------==//
@@ -35,13 +37,11 @@
namespace clang {
class CXXBaseSpecifier;
-class DeclaratorDecl;
class FunctionDecl;
class LabelDecl;
namespace ento {
-class BasicValueFactory;
class CompoundValData;
class LazyCompoundValData;
class MemRegion;
@@ -49,105 +49,63 @@ class PointerToMemberData;
class SValBuilder;
class TypedValueRegion;
-namespace nonloc {
-
-/// Sub-kinds for NonLoc values.
-enum Kind {
-#define NONLOC_SVAL(Id, Parent) Id ## Kind,
-#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.def"
-};
-
-} // namespace nonloc
-
-namespace loc {
-
-/// Sub-kinds for Loc values.
-enum Kind {
-#define LOC_SVAL(Id, Parent) Id ## Kind,
-#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.def"
-};
-
-} // namespace loc
-
/// SVal - This represents a symbolic expression, which can be either
/// an L-value or an R-value.
///
class SVal {
public:
- enum BaseKind {
- // The enumerators must be representable using 2 bits.
-#define BASIC_SVAL(Id, Parent) Id ## Kind,
-#define ABSTRACT_SVAL_WITH_KIND(Id, Parent) Id ## Kind,
+ enum SValKind : unsigned char {
+#define BASIC_SVAL(Id, Parent) Id##Kind,
+#define LOC_SVAL(Id, Parent) Loc##Id##Kind,
+#define NONLOC_SVAL(Id, Parent) NonLoc##Id##Kind,
+#define SVAL_RANGE(Id, First, Last) \
+ BEGIN_##Id = Id##First##Kind, END_##Id = Id##Last##Kind,
#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.def"
};
- enum { BaseBits = 2, BaseMask = 0b11 };
protected:
const void *Data = nullptr;
+ SValKind Kind = UndefinedValKind;
- /// The lowest 2 bits are a BaseKind (0 -- 3).
- /// The higher bits are an unsigned "kind" value.
- unsigned Kind = 0;
+ explicit SVal(SValKind Kind, const void *Data = nullptr)
+ : Data(Data), Kind(Kind) {}
- explicit SVal(const void *d, bool isLoc, unsigned ValKind)
- : Data(d), Kind((isLoc ? LocKind : NonLocKind) | (ValKind << BaseBits)) {}
-
- explicit SVal(BaseKind k, const void *D = nullptr) : Data(D), Kind(k) {}
+ template <typename T> const T *castDataAs() const {
+ return static_cast<const T *>(Data);
+ }
public:
explicit SVal() = default;
/// Convert to the specified SVal type, asserting that this SVal is of
/// the desired type.
- template<typename T>
- T castAs() const {
- assert(T::isKind(*this));
- return *static_cast<const T *>(this);
- }
+ template <typename T> T castAs() const { return llvm::cast<T>(*this); }
- /// Convert to the specified SVal type, returning None if this SVal is
+ /// Convert to the specified SVal type, returning std::nullopt if this SVal is
/// not of the desired type.
- template<typename T>
- Optional<T> getAs() const {
- if (!T::isKind(*this))
- return None;
- return *static_cast<const T *>(this);
+ template <typename T> std::optional<T> getAs() const {
+ return llvm::dyn_cast<T>(*this);
}
- unsigned getRawKind() const { return Kind; }
- BaseKind getBaseKind() const { return (BaseKind) (Kind & BaseMask); }
- unsigned getSubKind() const { return Kind >> BaseBits; }
+ SValKind getKind() const { return Kind; }
// This method is required for using SVal in a FoldingSetNode. It
// extracts a unique signature for this SVal object.
void Profile(llvm::FoldingSetNodeID &ID) const {
- ID.AddInteger((unsigned) getRawKind());
ID.AddPointer(Data);
+ ID.AddInteger(llvm::to_underlying(getKind()));
}
- bool operator==(const SVal &R) const {
- return getRawKind() == R.getRawKind() && Data == R.Data;
- }
+ bool operator==(SVal R) const { return Kind == R.Kind && Data == R.Data; }
+ bool operator!=(SVal R) const { return !(*this == R); }
- bool operator!=(const SVal &R) const {
- return !(*this == R);
- }
+ bool isUnknown() const { return getKind() == UnknownValKind; }
- bool isUnknown() const {
- return getRawKind() == UnknownValKind;
- }
+ bool isUndef() const { return getKind() == UndefinedValKind; }
- bool isUndef() const {
- return getRawKind() == UndefinedValKind;
- }
+ bool isUnknownOrUndef() const { return isUnknown() || isUndef(); }
- bool isUnknownOrUndef() const {
- return getRawKind() <= UnknownValKind;
- }
-
- bool isValid() const {
- return getRawKind() > UnknownValKind;
- }
+ bool isValid() const { return !isUnknownOrUndef(); }
bool isConstant() const;
@@ -155,9 +113,6 @@ public:
bool isZeroConstant() const;
- /// hasConjuredSymbol - If this SVal wraps a conjured symbol, return true;
- bool hasConjuredSymbol() const;
-
/// getAsFunctionDecl - If this SVal is a MemRegionVal and wraps a
/// CodeTextRegion wrapping a FunctionDecl, return that FunctionDecl.
/// Otherwise return 0.
@@ -182,6 +137,11 @@ public:
/// should continue to the base regions if the region is not symbolic.
SymbolRef getAsSymbol(bool IncludeBaseRegions = false) const;
+ /// If this SVal is loc::ConcreteInt or nonloc::ConcreteInt,
+ /// return a pointer to APSInt which is held in it.
+ /// Otherwise, return nullptr.
+ const llvm::APSInt *getAsInteger() const;
+
const MemRegion *getAsRegion() const;
/// printJson - Pretty-prints in JSON format.
@@ -190,16 +150,11 @@ public:
void dumpToStream(raw_ostream &OS) const;
void dump() const;
- SymExpr::symbol_iterator symbol_begin() const {
- const SymExpr *SE = getAsSymbol(/*IncludeBaseRegions=*/true);
- if (SE)
- return SE->symbol_begin();
- else
- return SymExpr::symbol_iterator();
- }
-
- SymExpr::symbol_iterator symbol_end() const {
- return SymExpr::symbol_end();
+ llvm::iterator_range<SymExpr::symbol_iterator> symbols() const {
+ if (const SymExpr *SE = getAsSymbol(/*IncludeBaseRegions=*/true))
+ return SE->symbols();
+ SymExpr::symbol_iterator end{};
+ return llvm::make_range(end, end);
}
/// Try to get a reasonable type for the given value.
@@ -221,16 +176,24 @@ inline raw_ostream &operator<<(raw_ostream &os, clang::ento::SVal V) {
return os;
}
+namespace nonloc {
+/// Sub-kinds for NonLoc values.
+#define NONLOC_SVAL(Id, Parent) \
+ inline constexpr auto Id##Kind = SVal::SValKind::NonLoc##Id##Kind;
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.def"
+} // namespace nonloc
+
+namespace loc {
+/// Sub-kinds for Loc values.
+#define LOC_SVAL(Id, Parent) \
+ inline constexpr auto Id##Kind = SVal::SValKind::Loc##Id##Kind;
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.def"
+} // namespace loc
+
class UndefinedVal : public SVal {
public:
UndefinedVal() : SVal(UndefinedValKind) {}
-
-private:
- friend class SVal;
-
- static bool isKind(const SVal& V) {
- return V.getBaseKind() == UndefinedValKind;
- }
+ static bool classof(SVal V) { return V.getKind() == UndefinedValKind; }
};
class DefinedOrUnknownSVal : public SVal {
@@ -240,30 +203,18 @@ public:
bool isUndef() const = delete;
bool isValid() const = delete;
-protected:
- DefinedOrUnknownSVal() = default;
- explicit DefinedOrUnknownSVal(const void *d, bool isLoc, unsigned ValKind)
- : SVal(d, isLoc, ValKind) {}
- explicit DefinedOrUnknownSVal(BaseKind k, void *D = nullptr) : SVal(k, D) {}
-
-private:
- friend class SVal;
+ static bool classof(SVal V) { return !V.isUndef(); }
- static bool isKind(const SVal& V) {
- return !V.isUndef();
- }
+protected:
+ explicit DefinedOrUnknownSVal(SValKind Kind, const void *Data = nullptr)
+ : SVal(Kind, Data) {}
};
class UnknownVal : public DefinedOrUnknownSVal {
public:
explicit UnknownVal() : DefinedOrUnknownSVal(UnknownValKind) {}
-private:
- friend class SVal;
-
- static bool isKind(const SVal &V) {
- return V.getBaseKind() == UnknownValKind;
- }
+ static bool classof(SVal V) { return V.getKind() == UnknownValKind; }
};
class DefinedSVal : public DefinedOrUnknownSVal {
@@ -274,39 +225,24 @@ public:
bool isUnknownOrUndef() const = delete;
bool isValid() const = delete;
-protected:
- DefinedSVal() = default;
- explicit DefinedSVal(const void *d, bool isLoc, unsigned ValKind)
- : DefinedOrUnknownSVal(d, isLoc, ValKind) {}
-
-private:
- friend class SVal;
+ static bool classof(SVal V) { return !V.isUnknownOrUndef(); }
- static bool isKind(const SVal& V) {
- return !V.isUnknownOrUndef();
- }
+protected:
+ explicit DefinedSVal(SValKind Kind, const void *Data)
+ : DefinedOrUnknownSVal(Kind, Data) {}
};
/// Represents an SVal that is guaranteed to not be UnknownVal.
class KnownSVal : public SVal {
- friend class SVal;
-
- KnownSVal() = default;
-
- static bool isKind(const SVal &V) {
- return !V.isUnknown();
- }
-
public:
- KnownSVal(const DefinedSVal &V) : SVal(V) {}
- KnownSVal(const UndefinedVal &V) : SVal(V) {}
+ /*implicit*/ KnownSVal(DefinedSVal V) : SVal(V) {}
+ /*implicit*/ KnownSVal(UndefinedVal V) : SVal(V) {}
+ static bool classof(SVal V) { return !V.isUnknown(); }
};
class NonLoc : public DefinedSVal {
protected:
- NonLoc() = default;
- explicit NonLoc(unsigned SubKind, const void *d)
- : DefinedSVal(d, false, SubKind) {}
+ NonLoc(SValKind Kind, const void *Data) : DefinedSVal(Kind, Data) {}
public:
void dumpToStream(raw_ostream &Out) const;
@@ -316,19 +252,14 @@ public:
T->isAnyComplexType() || T->isVectorType();
}
-private:
- friend class SVal;
-
- static bool isKind(const SVal& V) {
- return V.getBaseKind() == NonLocKind;
+ static bool classof(SVal V) {
+ return BEGIN_NonLoc <= V.getKind() && V.getKind() <= END_NonLoc;
}
};
class Loc : public DefinedSVal {
protected:
- Loc() = default;
- explicit Loc(unsigned SubKind, const void *D)
- : DefinedSVal(const_cast<void *>(D), true, SubKind) {}
+ Loc(SValKind Kind, const void *Data) : DefinedSVal(Kind, Data) {}
public:
void dumpToStream(raw_ostream &Out) const;
@@ -338,11 +269,8 @@ public:
T->isReferenceType() || T->isNullPtrType();
}
-private:
- friend class SVal;
-
- static bool isKind(const SVal& V) {
- return V.getBaseKind() == LocKind;
+ static bool classof(SVal V) {
+ return BEGIN_Loc <= V.getKind() && V.getKind() <= END_Loc;
}
};
@@ -356,11 +284,12 @@ namespace nonloc {
class SymbolVal : public NonLoc {
public:
SymbolVal() = delete;
- SymbolVal(SymbolRef sym) : NonLoc(SymbolValKind, sym) {
- assert(sym);
- assert(!Loc::isLocType(sym->getType()));
+ explicit SymbolVal(SymbolRef Sym) : NonLoc(SymbolValKind, Sym) {
+ assert(Sym);
+ assert(!Loc::isLocType(Sym->getType()));
}
+ LLVM_ATTRIBUTE_RETURNS_NONNULL
SymbolRef getSymbol() const {
return (const SymExpr *) Data;
}
@@ -369,49 +298,17 @@ public:
return !isa<SymbolData>(getSymbol());
}
-private:
- friend class SVal;
-
- static bool isKind(const SVal& V) {
- return V.getBaseKind() == NonLocKind &&
- V.getSubKind() == SymbolValKind;
- }
-
- static bool isKind(const NonLoc& V) {
- return V.getSubKind() == SymbolValKind;
- }
+ static bool classof(SVal V) { return V.getKind() == SymbolValKind; }
};
/// Value representing integer constant.
class ConcreteInt : public NonLoc {
public:
- explicit ConcreteInt(const llvm::APSInt& V) : NonLoc(ConcreteIntKind, &V) {}
-
- const llvm::APSInt& getValue() const {
- return *static_cast<const llvm::APSInt *>(Data);
- }
-
- // Transfer functions for binary/unary operations on ConcreteInts.
- SVal evalBinOp(SValBuilder &svalBuilder, BinaryOperator::Opcode Op,
- const ConcreteInt& R) const;
-
- ConcreteInt evalComplement(SValBuilder &svalBuilder) const;
+ explicit ConcreteInt(const llvm::APSInt &V) : NonLoc(ConcreteIntKind, &V) {}
- ConcreteInt evalMinus(SValBuilder &svalBuilder) const;
+ const llvm::APSInt &getValue() const { return *castDataAs<llvm::APSInt>(); }
-private:
- friend class SVal;
-
- ConcreteInt() = default;
-
- static bool isKind(const SVal& V) {
- return V.getBaseKind() == NonLocKind &&
- V.getSubKind() == ConcreteIntKind;
- }
-
- static bool isKind(const NonLoc& V) {
- return V.getSubKind() == ConcreteIntKind;
- }
+ static bool classof(SVal V) { return V.getKind() == ConcreteIntKind; }
};
class LocAsInteger : public NonLoc {
@@ -421,102 +318,63 @@ class LocAsInteger : public NonLoc {
: NonLoc(LocAsIntegerKind, &data) {
// We do not need to represent loc::ConcreteInt as LocAsInteger,
// as it'd collapse into a nonloc::ConcreteInt instead.
- assert(data.first.getBaseKind() == LocKind &&
- (data.first.getSubKind() == loc::MemRegionValKind ||
- data.first.getSubKind() == loc::GotoLabelKind));
+ [[maybe_unused]] SValKind K = data.first.getKind();
+ assert(K == loc::MemRegionValKind || K == loc::GotoLabelKind);
}
public:
Loc getLoc() const {
- const std::pair<SVal, uintptr_t> *D =
- static_cast<const std::pair<SVal, uintptr_t> *>(Data);
- return D->first.castAs<Loc>();
- }
-
- Loc getPersistentLoc() const {
- const std::pair<SVal, uintptr_t> *D =
- static_cast<const std::pair<SVal, uintptr_t> *>(Data);
- const SVal& V = D->first;
- return V.castAs<Loc>();
+ return castDataAs<std::pair<SVal, uintptr_t>>()->first.castAs<Loc>();
}
unsigned getNumBits() const {
- const std::pair<SVal, uintptr_t> *D =
- static_cast<const std::pair<SVal, uintptr_t> *>(Data);
- return D->second;
- }
-
-private:
- friend class SVal;
-
- LocAsInteger() = default;
-
- static bool isKind(const SVal& V) {
- return V.getBaseKind() == NonLocKind &&
- V.getSubKind() == LocAsIntegerKind;
+ return castDataAs<std::pair<SVal, uintptr_t>>()->second;
}
- static bool isKind(const NonLoc& V) {
- return V.getSubKind() == LocAsIntegerKind;
- }
+ static bool classof(SVal V) { return V.getKind() == LocAsIntegerKind; }
};
class CompoundVal : public NonLoc {
friend class ento::SValBuilder;
- explicit CompoundVal(const CompoundValData* D) : NonLoc(CompoundValKind, D) {}
+ explicit CompoundVal(const CompoundValData *D) : NonLoc(CompoundValKind, D) {
+ assert(D);
+ }
public:
+ LLVM_ATTRIBUTE_RETURNS_NONNULL
const CompoundValData* getValue() const {
- return static_cast<const CompoundValData *>(Data);
+ return castDataAs<CompoundValData>();
}
using iterator = llvm::ImmutableList<SVal>::iterator;
-
iterator begin() const;
iterator end() const;
-private:
- friend class SVal;
-
- CompoundVal() = default;
-
- static bool isKind(const SVal& V) {
- return V.getBaseKind() == NonLocKind && V.getSubKind() == CompoundValKind;
- }
-
- static bool isKind(const NonLoc& V) {
- return V.getSubKind() == CompoundValKind;
- }
+ static bool classof(SVal V) { return V.getKind() == CompoundValKind; }
};
class LazyCompoundVal : public NonLoc {
friend class ento::SValBuilder;
explicit LazyCompoundVal(const LazyCompoundValData *D)
- : NonLoc(LazyCompoundValKind, D) {}
+ : NonLoc(LazyCompoundValKind, D) {
+ assert(D);
+ }
public:
+ LLVM_ATTRIBUTE_RETURNS_NONNULL
const LazyCompoundValData *getCVData() const {
- return static_cast<const LazyCompoundValData *>(Data);
+ return castDataAs<LazyCompoundValData>();
}
+ /// It might return null.
const void *getStore() const;
- const TypedValueRegion *getRegion() const;
-
-private:
- friend class SVal;
-
- LazyCompoundVal() = default;
- static bool isKind(const SVal& V) {
- return V.getBaseKind() == NonLocKind &&
- V.getSubKind() == LazyCompoundValKind;
- }
+ LLVM_ATTRIBUTE_RETURNS_NONNULL
+ const TypedValueRegion *getRegion() const;
- static bool isKind(const NonLoc& V) {
- return V.getSubKind() == LazyCompoundValKind;
- }
+ static bool classof(SVal V) { return V.getKind() == LazyCompoundValKind; }
};
/// Value representing pointer-to-member.
@@ -554,21 +412,11 @@ public:
iterator begin() const;
iterator end() const;
-private:
- friend class SVal;
+ static bool classof(SVal V) { return V.getKind() == PointerToMemberKind; }
- PointerToMember() = default;
+private:
explicit PointerToMember(const PTMDataType D)
: NonLoc(PointerToMemberKind, D.getOpaqueValue()) {}
-
- static bool isKind(const SVal& V) {
- return V.getBaseKind() == NonLocKind &&
- V.getSubKind() == PointerToMemberKind;
- }
-
- static bool isKind(const NonLoc& V) {
- return V.getSubKind() == PointerToMemberKind;
- }
};
} // namespace nonloc
@@ -585,36 +433,23 @@ public:
assert(Label);
}
- const LabelDecl *getLabel() const {
- return static_cast<const LabelDecl *>(Data);
- }
+ const LabelDecl *getLabel() const { return castDataAs<LabelDecl>(); }
-private:
- friend class SVal;
-
- GotoLabel() = default;
-
- static bool isKind(const SVal& V) {
- return V.getBaseKind() == LocKind && V.getSubKind() == GotoLabelKind;
- }
-
- static bool isKind(const Loc& V) {
- return V.getSubKind() == GotoLabelKind;
- }
+ static bool classof(SVal V) { return V.getKind() == GotoLabelKind; }
};
class MemRegionVal : public Loc {
public:
- explicit MemRegionVal(const MemRegion* r) : Loc(MemRegionValKind, r) {
+ explicit MemRegionVal(const MemRegion *r) : Loc(MemRegionValKind, r) {
assert(r);
}
/// Get the underlining region.
- const MemRegion *getRegion() const {
- return static_cast<const MemRegion *>(Data);
- }
+ LLVM_ATTRIBUTE_RETURNS_NONNULL
+ const MemRegion *getRegion() const { return castDataAs<MemRegion>(); }
/// Get the underlining region and strip casts.
+ LLVM_ATTRIBUTE_RETURNS_NONNULL
const MemRegion* stripCasts(bool StripBaseCasts = true) const;
template <typename REGION>
@@ -630,52 +465,44 @@ public:
return getRegion() != R.getRegion();
}
-private:
- friend class SVal;
-
- MemRegionVal() = default;
-
- static bool isKind(const SVal& V) {
- return V.getBaseKind() == LocKind &&
- V.getSubKind() == MemRegionValKind;
- }
-
- static bool isKind(const Loc& V) {
- return V.getSubKind() == MemRegionValKind;
- }
+ static bool classof(SVal V) { return V.getKind() == MemRegionValKind; }
};
class ConcreteInt : public Loc {
public:
- explicit ConcreteInt(const llvm::APSInt& V) : Loc(ConcreteIntKind, &V) {}
-
- const llvm::APSInt &getValue() const {
- return *static_cast<const llvm::APSInt *>(Data);
- }
-
- // Transfer functions for binary/unary operations on ConcreteInts.
- SVal evalBinOp(BasicValueFactory& BasicVals, BinaryOperator::Opcode Op,
- const ConcreteInt& R) const;
-
-private:
- friend class SVal;
+ explicit ConcreteInt(const llvm::APSInt &V) : Loc(ConcreteIntKind, &V) {}
- ConcreteInt() = default;
+ const llvm::APSInt &getValue() const { return *castDataAs<llvm::APSInt>(); }
- static bool isKind(const SVal& V) {
- return V.getBaseKind() == LocKind &&
- V.getSubKind() == ConcreteIntKind;
- }
-
- static bool isKind(const Loc& V) {
- return V.getSubKind() == ConcreteIntKind;
- }
+ static bool classof(SVal V) { return V.getKind() == ConcreteIntKind; }
};
} // namespace loc
-
} // namespace ento
-
} // namespace clang
+namespace llvm {
+template <typename To, typename From>
+struct CastInfo<
+ To, From,
+ std::enable_if_t<std::is_base_of<::clang::ento::SVal, From>::value>>
+ : public CastIsPossible<To, ::clang::ento::SVal> {
+ using Self = CastInfo<
+ To, From,
+ std::enable_if_t<std::is_base_of<::clang::ento::SVal, From>::value>>;
+ static bool isPossible(const From &V) {
+ return To::classof(*static_cast<const ::clang::ento::SVal *>(&V));
+ }
+ static std::optional<To> castFailed() { return std::optional<To>{}; }
+ static To doCast(const From &f) {
+ return *static_cast<const To *>(cast<::clang::ento::SVal>(&f));
+ }
+ static std::optional<To> doCastIfPossible(const From &f) {
+ if (!Self::isPossible(f))
+ return Self::castFailed();
+ return doCast(f);
+ }
+};
+} // namespace llvm
+
#endif // LLVM_CLANG_STATICANALYZER_CORE_PATHSENSITIVE_SVALS_H
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SimpleConstraintManager.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SimpleConstraintManager.h
index 87e927f5b480..725140e073c6 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SimpleConstraintManager.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SimpleConstraintManager.h
@@ -34,16 +34,6 @@ public:
// Implementation for interface from ConstraintManager.
//===------------------------------------------------------------------===//
- /// Ensures that the DefinedSVal conditional is expressed as a NonLoc by
- /// creating boolean casts to handle Loc's.
- ProgramStateRef assume(ProgramStateRef State, DefinedSVal Cond,
- bool Assumption) override;
-
- ProgramStateRef assumeInclusiveRange(ProgramStateRef State, NonLoc Value,
- const llvm::APSInt &From,
- const llvm::APSInt &To,
- bool InRange) override;
-
protected:
//===------------------------------------------------------------------===//
// Interface that subclasses must implement.
@@ -74,6 +64,17 @@ protected:
// Internal implementation.
//===------------------------------------------------------------------===//
+ /// Ensures that the DefinedSVal conditional is expressed as a NonLoc by
+ /// creating boolean casts to handle Loc's.
+ ProgramStateRef assumeInternal(ProgramStateRef State, DefinedSVal Cond,
+ bool Assumption) override;
+
+ ProgramStateRef assumeInclusiveRangeInternal(ProgramStateRef State,
+ NonLoc Value,
+ const llvm::APSInt &From,
+ const llvm::APSInt &To,
+ bool InRange) override;
+
SValBuilder &getSValBuilder() const { return SVB; }
BasicValueFactory &getBasicVals() const { return SVB.getBasicValueFactory(); }
SymbolManager &getSymbolManager() const { return SVB.getSymbolManager(); }
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Store.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Store.h
index d2461705d128..fac0c04ae2ca 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Store.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Store.h
@@ -23,11 +23,11 @@
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseSet.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallVector.h"
#include <cassert>
#include <cstdint>
#include <memory>
+#include <optional>
namespace clang {
@@ -83,7 +83,8 @@ public:
/// \param[in] R The region to find the default binding for.
/// \return The default value bound to the region in the store, if a default
/// binding exists.
- virtual Optional<SVal> getDefaultBinding(Store store, const MemRegion *R) = 0;
+ virtual std::optional<SVal> getDefaultBinding(Store store,
+ const MemRegion *R) = 0;
/// Return the default value bound to a LazyCompoundVal. The default binding
/// is used to represent the value of any fields or elements within the
@@ -93,7 +94,7 @@ public:
/// \param[in] lcv The lazy compound value.
/// \return The default value bound to the LazyCompoundVal \c lcv, if a
/// default binding exists.
- Optional<SVal> getDefaultBinding(nonloc::LazyCompoundVal lcv) {
+ std::optional<SVal> getDefaultBinding(nonloc::LazyCompoundVal lcv) {
return getDefaultBinding(lcv.getStore(), lcv.getRegion());
}
@@ -172,17 +173,17 @@ public:
/// dynamic_cast.
/// - We don't know (base is a symbolic region and we don't have
/// enough info to determine if the cast will succeed at run time).
- /// The function returns an SVal representing the derived class; it's
- /// valid only if Failed flag is set to false.
- SVal attemptDownCast(SVal Base, QualType DerivedPtrType, bool &Failed);
+ /// The function returns an optional with SVal representing the derived class
+ /// in case of a successful cast and `std::nullopt` otherwise.
+ std::optional<SVal> evalBaseToDerived(SVal Base, QualType DerivedPtrType);
const ElementRegion *GetElementZeroRegion(const SubRegion *R, QualType T);
/// castRegion - Used by ExprEngine::VisitCast to handle casts from
/// a MemRegion* to a specific location type. 'R' is the region being
/// casted and 'CastToTy' the result type of the cast.
- Optional<const MemRegion *> castRegion(const MemRegion *region,
- QualType CastToTy);
+ std::optional<const MemRegion *> castRegion(const MemRegion *region,
+ QualType CastToTy);
virtual StoreRef removeDeadBindings(Store store, const StackFrameContext *LCtx,
SymbolReaper &SymReaper) = 0;
@@ -316,8 +317,6 @@ inline StoreRef &StoreRef::operator=(StoreRef const &newStore) {
// FIXME: Do we need to pass ProgramStateManager anymore?
std::unique_ptr<StoreManager>
CreateRegionStoreManager(ProgramStateManager &StMgr);
-std::unique_ptr<StoreManager>
-CreateFieldsOnlyRegionStoreManager(ProgramStateManager &StMgr);
} // namespace ento
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SymExpr.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SymExpr.h
index 2f4ac6ba5f97..862a30c0e736 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SymExpr.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SymExpr.h
@@ -17,6 +17,7 @@
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/iterator_range.h"
#include <cassert>
namespace clang {
@@ -83,8 +84,9 @@ public:
bool operator!=(const symbol_iterator &X) const;
};
- symbol_iterator symbol_begin() const { return symbol_iterator(this); }
- static symbol_iterator symbol_end() { return symbol_iterator(); }
+ llvm::iterator_range<symbol_iterator> symbols() const {
+ return llvm::make_range(symbol_iterator(this), symbol_iterator());
+ }
virtual unsigned computeComplexity() const = 0;
@@ -98,6 +100,7 @@ public:
/// the beginning of the analysis, and SymbolDerived which denotes the value
/// of a certain memory region after its super region (a memory space or
/// a larger record region) is default-bound with a certain symbol.
+ /// It might return null.
virtual const MemRegion *getOriginRegion() const { return nullptr; }
};
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h
index c71cb88f5574..3b64d38ee2b2 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h
@@ -24,6 +24,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Allocator.h"
#include <cassert>
@@ -48,6 +49,7 @@ public:
assert(isValidTypeForSymbol(r->getValueType()));
}
+ LLVM_ATTRIBUTE_RETURNS_NONNULL
const TypedValueRegion* getRegion() const { return R; }
static void Profile(llvm::FoldingSetNodeID& profile, const TypedValueRegion* R) {
@@ -95,8 +97,10 @@ public:
assert(isValidTypeForSymbol(t));
}
+ /// It might return null.
const Stmt *getStmt() const { return S; }
unsigned getCount() const { return Count; }
+ /// It might return null.
const void *getTag() const { return SymbolTag; }
QualType getType() const override;
@@ -140,7 +144,9 @@ public:
assert(isValidTypeForSymbol(r->getValueType()));
}
+ LLVM_ATTRIBUTE_RETURNS_NONNULL
SymbolRef getParentSymbol() const { return parentSymbol; }
+ LLVM_ATTRIBUTE_RETURNS_NONNULL
const TypedValueRegion *getRegion() const { return R; }
QualType getType() const override;
@@ -179,6 +185,7 @@ public:
assert(r);
}
+ LLVM_ATTRIBUTE_RETURNS_NONNULL
const SubRegion *getRegion() const { return R; }
QualType getType() const override;
@@ -226,29 +233,37 @@ public:
assert(tag);
}
- const MemRegion *getRegion() const { return R; }
- const Stmt *getStmt() const { return S; }
- const LocationContext *getLocationContext() const { return LCtx; }
- unsigned getCount() const { return Count; }
- const void *getTag() const { return Tag; }
+ LLVM_ATTRIBUTE_RETURNS_NONNULL
+ const MemRegion *getRegion() const { return R; }
- QualType getType() const override;
+ LLVM_ATTRIBUTE_RETURNS_NONNULL
+ const Stmt *getStmt() const { return S; }
- StringRef getKindStr() const override;
+ LLVM_ATTRIBUTE_RETURNS_NONNULL
+ const LocationContext *getLocationContext() const { return LCtx; }
- void dumpToStream(raw_ostream &os) const override;
+ unsigned getCount() const { return Count; }
- static void Profile(llvm::FoldingSetNodeID& profile, const MemRegion *R,
- const Stmt *S, QualType T, const LocationContext *LCtx,
- unsigned Count, const void *Tag) {
- profile.AddInteger((unsigned) SymbolMetadataKind);
- profile.AddPointer(R);
- profile.AddPointer(S);
- profile.Add(T);
- profile.AddPointer(LCtx);
- profile.AddInteger(Count);
- profile.AddPointer(Tag);
- }
+ LLVM_ATTRIBUTE_RETURNS_NONNULL
+ const void *getTag() const { return Tag; }
+
+ QualType getType() const override;
+
+ StringRef getKindStr() const override;
+
+ void dumpToStream(raw_ostream &os) const override;
+
+ static void Profile(llvm::FoldingSetNodeID &profile, const MemRegion *R,
+ const Stmt *S, QualType T, const LocationContext *LCtx,
+ unsigned Count, const void *Tag) {
+ profile.AddInteger((unsigned)SymbolMetadataKind);
+ profile.AddPointer(R);
+ profile.AddPointer(S);
+ profile.Add(T);
+ profile.AddPointer(LCtx);
+ profile.AddInteger(Count);
+ profile.AddPointer(Tag);
+ }
void Profile(llvm::FoldingSetNodeID& profile) override {
Profile(profile, R, S, T, LCtx, Count, Tag);
@@ -287,6 +302,7 @@ public:
QualType getType() const override { return ToTy; }
+ LLVM_ATTRIBUTE_RETURNS_NONNULL
const SymExpr *getOperand() const { return Operand; }
void dumpToStream(raw_ostream &os) const override;
@@ -309,6 +325,55 @@ public:
}
};
+/// Represents a symbolic expression involving a unary operator.
+class UnarySymExpr : public SymExpr {
+ const SymExpr *Operand;
+ UnaryOperator::Opcode Op;
+ QualType T;
+
+public:
+ UnarySymExpr(const SymExpr *In, UnaryOperator::Opcode Op, QualType T)
+ : SymExpr(UnarySymExprKind), Operand(In), Op(Op), T(T) {
+ // Note, some unary operators are modeled as a binary operator. E.g. ++x is
+ // modeled as x + 1.
+ assert((Op == UO_Minus || Op == UO_Not) && "non-supported unary expression");
+ // Unary expressions are results of arithmetic. Pointer arithmetic is not
+ // handled by unary expressions, but it is instead handled by applying
+ // sub-regions to regions.
+ assert(isValidTypeForSymbol(T) && "non-valid type for unary symbol");
+ assert(!Loc::isLocType(T) && "unary symbol should be nonloc");
+ }
+
+ unsigned computeComplexity() const override {
+ if (Complexity == 0)
+ Complexity = 1 + Operand->computeComplexity();
+ return Complexity;
+ }
+
+ const SymExpr *getOperand() const { return Operand; }
+ UnaryOperator::Opcode getOpcode() const { return Op; }
+ QualType getType() const override { return T; }
+
+ void dumpToStream(raw_ostream &os) const override;
+
+ static void Profile(llvm::FoldingSetNodeID &ID, const SymExpr *In,
+ UnaryOperator::Opcode Op, QualType T) {
+ ID.AddInteger((unsigned)UnarySymExprKind);
+ ID.AddPointer(In);
+ ID.AddInteger(Op);
+ ID.Add(T);
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) override {
+ Profile(ID, Operand, Op, T);
+ }
+
+ // Implement isa<T> support.
+ static bool classof(const SymExpr *SE) {
+ return SE->getKind() == UnarySymExprKind;
+ }
+};
+
/// Represents a symbolic expression involving a binary operator
class BinarySymExpr : public SymExpr {
BinaryOperator::Opcode Op;
@@ -486,6 +551,9 @@ public:
const SymSymExpr *getSymSymExpr(const SymExpr *lhs, BinaryOperator::Opcode op,
const SymExpr *rhs, QualType t);
+ const UnarySymExpr *getUnarySymExpr(const SymExpr *operand,
+ UnaryOperator::Opcode op, QualType t);
+
QualType getType(const SymExpr *SE) const {
return SE->getType();
}
@@ -515,7 +583,12 @@ class SymbolReaper {
SymbolMapTy TheLiving;
SymbolSetTy MetadataInUse;
- RegionSetTy RegionRoots;
+ RegionSetTy LiveRegionRoots;
+ // The lazily copied regions are locations for which a program
+ // can access the value stored at that location, but not its address.
+ // These regions are constructed as a set of regions referred to by
+ // lazyCompoundVal.
+ RegionSetTy LazilyCopiedRegionRoots;
const StackFrameContext *LCtx;
const Stmt *Loc;
@@ -535,6 +608,7 @@ public:
SymbolManager &symmgr, StoreManager &storeMgr)
: LCtx(Ctx), Loc(s), SymMgr(symmgr), reapedStore(nullptr, storeMgr) {}
+ /// It might return null.
const LocationContext *getLocationContext() const { return LCtx; }
bool isLive(SymbolRef sym);
@@ -558,10 +632,9 @@ public:
/// symbol marking has occurred, i.e. in the MarkLiveSymbols callback.
void markInUse(SymbolRef sym);
- using region_iterator = RegionSetTy::const_iterator;
-
- region_iterator region_begin() const { return RegionRoots.begin(); }
- region_iterator region_end() const { return RegionRoots.end(); }
+ llvm::iterator_range<RegionSetTy::const_iterator> regions() const {
+ return LiveRegionRoots;
+ }
/// Returns whether or not a symbol has been confirmed dead.
///
@@ -572,6 +645,7 @@ public:
}
void markLive(const MemRegion *region);
+ void markLazilyCopied(const MemRegion *region);
void markElementIndicesLive(const MemRegion *region);
/// Set to the value of the symbolic store after
@@ -579,6 +653,12 @@ public:
void setReapedStore(StoreRef st) { reapedStore = st; }
private:
+ bool isLazilyCopiedRegion(const MemRegion *region) const;
+ // A readable region is a region that live or lazily copied.
+ // Any symbols that refer to values in regions are alive if the region
+ // is readable.
+ bool isReadableRegion(const MemRegion *region);
+
/// Mark the symbols dependent on the input symbol as live.
void markDependentsLive(SymbolRef sym);
};
@@ -592,6 +672,11 @@ public:
SymbolVisitor(const SymbolVisitor &) = default;
SymbolVisitor(SymbolVisitor &&) {}
+ // The copy and move assignment operator is defined as deleted pending further
+ // motivation.
+ SymbolVisitor &operator=(const SymbolVisitor &) = delete;
+ SymbolVisitor &operator=(SymbolVisitor &&) = delete;
+
/// A visitor method invoked by ProgramStateManager::scanReachableSymbols.
///
/// The method returns \c true if symbols should continue be scanned and \c
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Symbols.def b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Symbols.def
index 7163a16263ab..b93f8e250155 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Symbols.def
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Symbols.def
@@ -33,6 +33,8 @@
#define SYMBOL_RANGE(Id, First, Last)
#endif
+SYMBOL(UnarySymExpr, SymExpr)
+
ABSTRACT_SYMBOL(BinarySymExpr, SymExpr)
SYMBOL(IntSymExpr, BinarySymExpr)
SYMBOL(SymIntExpr, BinarySymExpr)
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Frontend/AnalysisConsumer.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Frontend/AnalysisConsumer.h
index bcc29a60ad70..f3b1c1f20645 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Frontend/AnalysisConsumer.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Frontend/AnalysisConsumer.h
@@ -21,14 +21,10 @@
namespace clang {
-class Preprocessor;
-class DiagnosticsEngine;
-class CodeInjector;
class CompilerInstance;
namespace ento {
class PathDiagnosticConsumer;
-class CheckerManager;
class CheckerRegistry;
class AnalysisASTConsumer : public ASTConsumer {
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Frontend/FrontendActions.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Frontend/FrontendActions.h
index 2b12330e4f2d..31b1c245200e 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Frontend/FrontendActions.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Frontend/FrontendActions.h
@@ -16,12 +16,9 @@
namespace clang {
class Stmt;
-class AnalyzerOptions;
namespace ento {
-class CheckerManager;
-
//===----------------------------------------------------------------------===//
// AST Consumer Actions
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Frontend/ModelConsumer.h b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Frontend/ModelConsumer.h
index 5f9ae78dac63..7b7087622bc2 100644
--- a/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Frontend/ModelConsumer.h
+++ b/contrib/llvm-project/clang/include/clang/StaticAnalyzer/Frontend/ModelConsumer.h
@@ -12,8 +12,8 @@
///
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_GR_MODELCONSUMER_H
-#define LLVM_CLANG_GR_MODELCONSUMER_H
+#ifndef LLVM_CLANG_STATICANALYZER_FRONTEND_MODELCONSUMER_H
+#define LLVM_CLANG_STATICANALYZER_FRONTEND_MODELCONSUMER_H
#include "clang/AST/ASTConsumer.h"
#include "llvm/ADT/StringMap.h"
diff --git a/contrib/llvm-project/clang/include/clang/Support/RISCVVIntrinsicUtils.h b/contrib/llvm-project/clang/include/clang/Support/RISCVVIntrinsicUtils.h
new file mode 100644
index 000000000000..05a5e02e1390
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Support/RISCVVIntrinsicUtils.h
@@ -0,0 +1,570 @@
+//===--- RISCVVIntrinsicUtils.h - RISC-V Vector Intrinsic Utils -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_SUPPORT_RISCVVINTRINSICUTILS_H
+#define CLANG_SUPPORT_RISCVVINTRINSICUTILS_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/BitmaskEnum.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include <cstdint>
+#include <optional>
+#include <set>
+#include <string>
+#include <unordered_map>
+#include <vector>
+
+namespace llvm {
+class raw_ostream;
+} // end namespace llvm
+
+namespace clang {
+namespace RISCV {
+
+using VScaleVal = std::optional<unsigned>;
+
+// Modifier for vector type.
+enum class VectorTypeModifier : uint8_t {
+ NoModifier,
+ Widening2XVector,
+ Widening4XVector,
+ Widening8XVector,
+ MaskVector,
+ Log2EEW3,
+ Log2EEW4,
+ Log2EEW5,
+ Log2EEW6,
+ FixedSEW8,
+ FixedSEW16,
+ FixedSEW32,
+ FixedSEW64,
+ LFixedLog2LMULN3,
+ LFixedLog2LMULN2,
+ LFixedLog2LMULN1,
+ LFixedLog2LMUL0,
+ LFixedLog2LMUL1,
+ LFixedLog2LMUL2,
+ LFixedLog2LMUL3,
+ SFixedLog2LMULN3,
+ SFixedLog2LMULN2,
+ SFixedLog2LMULN1,
+ SFixedLog2LMUL0,
+ SFixedLog2LMUL1,
+ SFixedLog2LMUL2,
+ SFixedLog2LMUL3,
+ SEFixedLog2LMULN3,
+ SEFixedLog2LMULN2,
+ SEFixedLog2LMULN1,
+ SEFixedLog2LMUL0,
+ SEFixedLog2LMUL1,
+ SEFixedLog2LMUL2,
+ SEFixedLog2LMUL3,
+ Tuple2,
+ Tuple3,
+ Tuple4,
+ Tuple5,
+ Tuple6,
+ Tuple7,
+ Tuple8,
+};
+
+// Similar to basic type but used to describe what's kind of type related to
+// basic vector type, used to compute type info of arguments.
+enum class BaseTypeModifier : uint8_t {
+ Invalid,
+ Scalar,
+ Vector,
+ Void,
+ SizeT,
+ Ptrdiff,
+ UnsignedLong,
+ SignedLong,
+ Float32
+};
+
+// Modifier for type, used for both scalar and vector types.
+enum class TypeModifier : uint8_t {
+ NoModifier = 0,
+ Pointer = 1 << 0,
+ Const = 1 << 1,
+ Immediate = 1 << 2,
+ UnsignedInteger = 1 << 3,
+ SignedInteger = 1 << 4,
+ Float = 1 << 5,
+ BFloat = 1 << 6,
+ // LMUL1 should be kind of VectorTypeModifier, but that might come with
+ // Widening2XVector for widening reduction.
+ // However that might require VectorTypeModifier become bitmask rather than
+ // simple enum, so we decide keek LMUL1 in TypeModifier for code size
+ // optimization of clang binary size.
+ LMUL1 = 1 << 7,
+ MaxOffset = 7,
+ LLVM_MARK_AS_BITMASK_ENUM(LMUL1),
+};
+
+class Policy {
+public:
+ enum PolicyType {
+ Undisturbed,
+ Agnostic,
+ };
+
+private:
+ // The default assumption for an RVV instruction is TAMA, as an undisturbed
+ // policy generally will affect the performance of an out-of-order core.
+ const PolicyType TailPolicy = Agnostic;
+ const PolicyType MaskPolicy = Agnostic;
+
+public:
+ Policy() = default;
+ Policy(PolicyType TailPolicy) : TailPolicy(TailPolicy) {}
+ Policy(PolicyType TailPolicy, PolicyType MaskPolicy)
+ : TailPolicy(TailPolicy), MaskPolicy(MaskPolicy) {}
+
+ bool isTAMAPolicy() const {
+ return TailPolicy == Agnostic && MaskPolicy == Agnostic;
+ }
+
+ bool isTAMUPolicy() const {
+ return TailPolicy == Agnostic && MaskPolicy == Undisturbed;
+ }
+
+ bool isTUMAPolicy() const {
+ return TailPolicy == Undisturbed && MaskPolicy == Agnostic;
+ }
+
+ bool isTUMUPolicy() const {
+ return TailPolicy == Undisturbed && MaskPolicy == Undisturbed;
+ }
+
+ bool isTAPolicy() const { return TailPolicy == Agnostic; }
+
+ bool isTUPolicy() const { return TailPolicy == Undisturbed; }
+
+ bool isMAPolicy() const { return MaskPolicy == Agnostic; }
+
+ bool isMUPolicy() const { return MaskPolicy == Undisturbed; }
+
+ bool operator==(const Policy &Other) const {
+ return TailPolicy == Other.TailPolicy && MaskPolicy == Other.MaskPolicy;
+ }
+
+ bool operator!=(const Policy &Other) const { return !(*this == Other); }
+
+ bool operator<(const Policy &Other) const {
+ // Just for maintain the old order for quick test.
+ if (MaskPolicy != Other.MaskPolicy)
+ return Other.MaskPolicy < MaskPolicy;
+ return TailPolicy < Other.TailPolicy;
+ }
+};
+
+// PrototypeDescriptor is used to compute type info of arguments or return
+// value.
+struct PrototypeDescriptor {
+ constexpr PrototypeDescriptor() = default;
+ constexpr PrototypeDescriptor(
+ BaseTypeModifier PT,
+ VectorTypeModifier VTM = VectorTypeModifier::NoModifier,
+ TypeModifier TM = TypeModifier::NoModifier)
+ : PT(static_cast<uint8_t>(PT)), VTM(static_cast<uint8_t>(VTM)),
+ TM(static_cast<uint8_t>(TM)) {}
+ constexpr PrototypeDescriptor(uint8_t PT, uint8_t VTM, uint8_t TM)
+ : PT(PT), VTM(VTM), TM(TM) {}
+
+ uint8_t PT = static_cast<uint8_t>(BaseTypeModifier::Invalid);
+ uint8_t VTM = static_cast<uint8_t>(VectorTypeModifier::NoModifier);
+ uint8_t TM = static_cast<uint8_t>(TypeModifier::NoModifier);
+
+ bool operator!=(const PrototypeDescriptor &PD) const {
+ return !(*this == PD);
+ }
+ bool operator==(const PrototypeDescriptor &PD) const {
+ return PD.PT == PT && PD.VTM == VTM && PD.TM == TM;
+ }
+ bool operator<(const PrototypeDescriptor &PD) const {
+ return std::tie(PT, VTM, TM) < std::tie(PD.PT, PD.VTM, PD.TM);
+ }
+ static const PrototypeDescriptor Mask;
+ static const PrototypeDescriptor Vector;
+ static const PrototypeDescriptor VL;
+ static std::optional<PrototypeDescriptor>
+ parsePrototypeDescriptor(llvm::StringRef PrototypeStr);
+};
+
+llvm::SmallVector<PrototypeDescriptor>
+parsePrototypes(llvm::StringRef Prototypes);
+
+// Basic type of vector type.
+enum class BasicType : uint8_t {
+ Unknown = 0,
+ Int8 = 1 << 0,
+ Int16 = 1 << 1,
+ Int32 = 1 << 2,
+ Int64 = 1 << 3,
+ BFloat16 = 1 << 4,
+ Float16 = 1 << 5,
+ Float32 = 1 << 6,
+ Float64 = 1 << 7,
+ MaxOffset = 7,
+ LLVM_MARK_AS_BITMASK_ENUM(Float64),
+};
+
+// Type of vector type.
+enum ScalarTypeKind : uint8_t {
+ Void,
+ Size_t,
+ Ptrdiff_t,
+ UnsignedLong,
+ SignedLong,
+ Boolean,
+ SignedInteger,
+ UnsignedInteger,
+ Float,
+ BFloat,
+ Invalid,
+ Undefined,
+};
+
+// Exponential LMUL
+struct LMULType {
+ int Log2LMUL;
+ LMULType(int Log2LMUL);
+ // Return the C/C++ string representation of LMUL
+ std::string str() const;
+ std::optional<unsigned> getScale(unsigned ElementBitwidth) const;
+ void MulLog2LMUL(int Log2LMUL);
+};
+
+class RVVType;
+using RVVTypePtr = RVVType *;
+using RVVTypes = std::vector<RVVTypePtr>;
+class RVVTypeCache;
+
+// This class is compact representation of a valid and invalid RVVType.
+class RVVType {
+ friend class RVVTypeCache;
+
+ BasicType BT;
+ ScalarTypeKind ScalarType = Undefined;
+ LMULType LMUL;
+ bool IsPointer = false;
+ // IsConstant indices are "int", but have the constant expression.
+ bool IsImmediate = false;
+ // Const qualifier for pointer to const object or object of const type.
+ bool IsConstant = false;
+ unsigned ElementBitwidth = 0;
+ VScaleVal Scale = 0;
+ bool Valid;
+ bool IsTuple = false;
+ unsigned NF = 0;
+
+ std::string BuiltinStr;
+ std::string ClangBuiltinStr;
+ std::string Str;
+ std::string ShortStr;
+
+ enum class FixedLMULType { LargerThan, SmallerThan, SmallerOrEqual };
+
+ RVVType(BasicType BT, int Log2LMUL, const PrototypeDescriptor &Profile);
+
+public:
+ // Return the string representation of a type, which is an encoded string for
+ // passing to the BUILTIN() macro in Builtins.def.
+ const std::string &getBuiltinStr() const { return BuiltinStr; }
+
+ // Return the clang builtin type for RVV vector type which are used in the
+ // riscv_vector.h header file.
+ const std::string &getClangBuiltinStr() const { return ClangBuiltinStr; }
+
+ // Return the C/C++ string representation of a type for use in the
+ // riscv_vector.h header file.
+ const std::string &getTypeStr() const { return Str; }
+
+ // Return the short name of a type for C/C++ name suffix.
+ const std::string &getShortStr() {
+ // Not all types are used in short name, so compute the short name by
+ // demanded.
+ if (ShortStr.empty())
+ initShortStr();
+ return ShortStr;
+ }
+
+ bool isValid() const { return Valid; }
+ bool isScalar() const { return Scale && *Scale == 0; }
+ bool isVector() const { return Scale && *Scale != 0; }
+ bool isVector(unsigned Width) const {
+ return isVector() && ElementBitwidth == Width;
+ }
+ bool isFloat() const { return ScalarType == ScalarTypeKind::Float; }
+ bool isBFloat() const { return ScalarType == ScalarTypeKind::BFloat; }
+ bool isSignedInteger() const {
+ return ScalarType == ScalarTypeKind::SignedInteger;
+ }
+ bool isFloatVector(unsigned Width) const {
+ return isVector() && isFloat() && ElementBitwidth == Width;
+ }
+ bool isFloat(unsigned Width) const {
+ return isFloat() && ElementBitwidth == Width;
+ }
+ bool isConstant() const { return IsConstant; }
+ bool isPointer() const { return IsPointer; }
+ bool isTuple() const { return IsTuple; }
+ unsigned getElementBitwidth() const { return ElementBitwidth; }
+
+ ScalarTypeKind getScalarType() const { return ScalarType; }
+ VScaleVal getScale() const { return Scale; }
+ unsigned getNF() const {
+ assert(NF > 1 && NF <= 8 && "Only legal NF should be fetched");
+ return NF;
+ }
+
+private:
+ // Verify RVV vector type and set Valid.
+ bool verifyType() const;
+
+ // Creates a type based on basic types of TypeRange
+ void applyBasicType();
+
+ // Applies a prototype modifier to the current type. The result maybe an
+ // invalid type.
+ void applyModifier(const PrototypeDescriptor &prototype);
+
+ void applyLog2EEW(unsigned Log2EEW);
+ void applyFixedSEW(unsigned NewSEW);
+ void applyFixedLog2LMUL(int Log2LMUL, enum FixedLMULType Type);
+
+ // Compute and record a string for legal type.
+ void initBuiltinStr();
+ // Compute and record a builtin RVV vector type string.
+ void initClangBuiltinStr();
+ // Compute and record a type string for used in the header.
+ void initTypeStr();
+ // Compute and record a short name of a type for C/C++ name suffix.
+ void initShortStr();
+};
+
+// This class is used to manage RVVType, RVVType should only created by this
+// class, also provided thread-safe cache capability.
+class RVVTypeCache {
+private:
+ std::unordered_map<uint64_t, RVVType> LegalTypes;
+ std::set<uint64_t> IllegalTypes;
+
+public:
+ /// Compute output and input types by applying different config (basic type
+ /// and LMUL with type transformers). It also record result of type in legal
+ /// or illegal set to avoid compute the same config again. The result maybe
+ /// have illegal RVVType.
+ std::optional<RVVTypes>
+ computeTypes(BasicType BT, int Log2LMUL, unsigned NF,
+ llvm::ArrayRef<PrototypeDescriptor> Prototype);
+ std::optional<RVVTypePtr> computeType(BasicType BT, int Log2LMUL,
+ PrototypeDescriptor Proto);
+};
+
+enum PolicyScheme : uint8_t {
+ SchemeNone,
+ // Passthru operand is at first parameter in C builtin.
+ HasPassthruOperand,
+ HasPolicyOperand,
+};
+
+// TODO refactor RVVIntrinsic class design after support all intrinsic
+// combination. This represents an instantiation of an intrinsic with a
+// particular type and prototype
+class RVVIntrinsic {
+
+private:
+ std::string BuiltinName; // Builtin name
+ std::string Name; // C intrinsic name.
+ std::string OverloadedName;
+ std::string IRName;
+ bool IsMasked;
+ bool HasMaskedOffOperand;
+ bool HasVL;
+ PolicyScheme Scheme;
+ bool SupportOverloading;
+ bool HasBuiltinAlias;
+ std::string ManualCodegen;
+ RVVTypePtr OutputType; // Builtin output type
+ RVVTypes InputTypes; // Builtin input types
+ // The types we use to obtain the specific LLVM intrinsic. They are index of
+ // InputTypes. -1 means the return type.
+ std::vector<int64_t> IntrinsicTypes;
+ unsigned NF = 1;
+ Policy PolicyAttrs;
+
+public:
+ RVVIntrinsic(llvm::StringRef Name, llvm::StringRef Suffix,
+ llvm::StringRef OverloadedName, llvm::StringRef OverloadedSuffix,
+ llvm::StringRef IRName, bool IsMasked, bool HasMaskedOffOperand,
+ bool HasVL, PolicyScheme Scheme, bool SupportOverloading,
+ bool HasBuiltinAlias, llvm::StringRef ManualCodegen,
+ const RVVTypes &Types,
+ const std::vector<int64_t> &IntrinsicTypes,
+ const std::vector<llvm::StringRef> &RequiredFeatures,
+ unsigned NF, Policy PolicyAttrs, bool HasFRMRoundModeOp);
+ ~RVVIntrinsic() = default;
+
+ RVVTypePtr getOutputType() const { return OutputType; }
+ const RVVTypes &getInputTypes() const { return InputTypes; }
+ llvm::StringRef getBuiltinName() const { return BuiltinName; }
+ llvm::StringRef getName() const { return Name; }
+ llvm::StringRef getOverloadedName() const { return OverloadedName; }
+ bool hasMaskedOffOperand() const { return HasMaskedOffOperand; }
+ bool hasVL() const { return HasVL; }
+ bool hasPolicy() const { return Scheme != PolicyScheme::SchemeNone; }
+ bool hasPassthruOperand() const {
+ return Scheme == PolicyScheme::HasPassthruOperand;
+ }
+ bool hasPolicyOperand() const {
+ return Scheme == PolicyScheme::HasPolicyOperand;
+ }
+ bool supportOverloading() const { return SupportOverloading; }
+ bool hasBuiltinAlias() const { return HasBuiltinAlias; }
+ bool hasManualCodegen() const { return !ManualCodegen.empty(); }
+ bool isMasked() const { return IsMasked; }
+ llvm::StringRef getIRName() const { return IRName; }
+ llvm::StringRef getManualCodegen() const { return ManualCodegen; }
+ PolicyScheme getPolicyScheme() const { return Scheme; }
+ unsigned getNF() const { return NF; }
+ const std::vector<int64_t> &getIntrinsicTypes() const {
+ return IntrinsicTypes;
+ }
+ Policy getPolicyAttrs() const {
+ return PolicyAttrs;
+ }
+ unsigned getPolicyAttrsBits() const {
+ // CGBuiltin.cpp
+ // The 0th bit simulates the `vta` of RVV
+ // The 1st bit simulates the `vma` of RVV
+ // int PolicyAttrs = 0;
+
+ if (PolicyAttrs.isTUMAPolicy())
+ return 2;
+ if (PolicyAttrs.isTAMAPolicy())
+ return 3;
+ if (PolicyAttrs.isTUMUPolicy())
+ return 0;
+ if (PolicyAttrs.isTAMUPolicy())
+ return 1;
+
+ llvm_unreachable("unsupport policy");
+ return 0;
+ }
+
+ // Return the type string for a BUILTIN() macro in Builtins.def.
+ std::string getBuiltinTypeStr() const;
+
+ static std::string
+ getSuffixStr(RVVTypeCache &TypeCache, BasicType Type, int Log2LMUL,
+ llvm::ArrayRef<PrototypeDescriptor> PrototypeDescriptors);
+
+ static llvm::SmallVector<PrototypeDescriptor>
+ computeBuiltinTypes(llvm::ArrayRef<PrototypeDescriptor> Prototype,
+ bool IsMasked, bool HasMaskedOffOperand, bool HasVL,
+ unsigned NF, PolicyScheme DefaultScheme,
+ Policy PolicyAttrs, bool IsTuple);
+
+ static llvm::SmallVector<Policy> getSupportedUnMaskedPolicies();
+ static llvm::SmallVector<Policy>
+ getSupportedMaskedPolicies(bool HasTailPolicy, bool HasMaskPolicy);
+
+ static void updateNamesAndPolicy(bool IsMasked, bool HasPolicy,
+ std::string &Name, std::string &BuiltinName,
+ std::string &OverloadedName,
+ Policy &PolicyAttrs, bool HasFRMRoundModeOp);
+};
+
+// RVVRequire should be sync'ed with target features, but only
+// required features used in riscv_vector.td.
+enum RVVRequire : uint32_t {
+ RVV_REQ_None = 0,
+ RVV_REQ_RV64 = 1 << 0,
+ RVV_REQ_Zvfhmin = 1 << 1,
+ RVV_REQ_Xsfvcp = 1 << 2,
+ RVV_REQ_Xsfvfnrclipxfqf = 1 << 3,
+ RVV_REQ_Xsfvfwmaccqqq = 1 << 4,
+ RVV_REQ_Xsfvqmaccdod = 1 << 5,
+ RVV_REQ_Xsfvqmaccqoq = 1 << 6,
+ RVV_REQ_Zvbb = 1 << 7,
+ RVV_REQ_Zvbc = 1 << 8,
+ RVV_REQ_Zvkb = 1 << 9,
+ RVV_REQ_Zvkg = 1 << 10,
+ RVV_REQ_Zvkned = 1 << 11,
+ RVV_REQ_Zvknha = 1 << 12,
+ RVV_REQ_Zvknhb = 1 << 13,
+ RVV_REQ_Zvksed = 1 << 14,
+ RVV_REQ_Zvksh = 1 << 15,
+ RVV_REQ_Experimental = 1 << 16,
+
+ LLVM_MARK_AS_BITMASK_ENUM(RVV_REQ_Experimental)
+};
+
+// Raw RVV intrinsic info, used to expand later.
+// This struct is highly compact for minimized code size.
+struct RVVIntrinsicRecord {
+ // Intrinsic name, e.g. vadd_vv
+ const char *Name;
+
+ // Overloaded intrinsic name, could be empty if it can be computed from Name.
+ // e.g. vadd
+ const char *OverloadedName;
+
+ // Prototype for this intrinsic, index of RVVSignatureTable.
+ uint16_t PrototypeIndex;
+
+ // Suffix of intrinsic name, index of RVVSignatureTable.
+ uint16_t SuffixIndex;
+
+ // Suffix of overloaded intrinsic name, index of RVVSignatureTable.
+ uint16_t OverloadedSuffixIndex;
+
+ // Length of the prototype.
+ uint8_t PrototypeLength;
+
+ // Length of intrinsic name suffix.
+ uint8_t SuffixLength;
+
+ // Length of overloaded intrinsic suffix.
+ uint8_t OverloadedSuffixSize;
+
+ // Required target features for this intrinsic.
+ uint32_t RequiredExtensions;
+
+ // Supported type, mask of BasicType.
+ uint8_t TypeRangeMask;
+
+ // Supported LMUL.
+ uint8_t Log2LMULMask;
+
+ // Number of fields, greater than 1 if it's segment load/store.
+ uint8_t NF;
+
+ bool HasMasked : 1;
+ bool HasVL : 1;
+ bool HasMaskedOffOperand : 1;
+ bool HasTailPolicy : 1;
+ bool HasMaskPolicy : 1;
+ bool HasFRMRoundModeOp : 1;
+ bool IsTuple : 1;
+ uint8_t UnMaskedPolicyScheme : 2;
+ uint8_t MaskedPolicyScheme : 2;
+};
+
+llvm::raw_ostream &operator<<(llvm::raw_ostream &OS,
+ const RVVIntrinsicRecord &RVVInstrRecord);
+
+LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();
+} // end namespace RISCV
+
+} // end namespace clang
+
+#endif // CLANG_SUPPORT_RISCVVINTRINSICUTILS_H
diff --git a/contrib/llvm-project/clang/include/clang/Testing/CommandLineArgs.h b/contrib/llvm-project/clang/include/clang/Testing/CommandLineArgs.h
index 95979a2bfb80..4dd28718dfa6 100644
--- a/contrib/llvm-project/clang/include/clang/Testing/CommandLineArgs.h
+++ b/contrib/llvm-project/clang/include/clang/Testing/CommandLineArgs.h
@@ -29,13 +29,20 @@ enum TestLanguage {
Lang_CXX17,
Lang_CXX20,
Lang_OpenCL,
+ Lang_OBJC,
Lang_OBJCXX
};
std::vector<std::string> getCommandLineArgsForTesting(TestLanguage Lang);
+std::vector<std::string> getCC1ArgsForTesting(TestLanguage Lang);
StringRef getFilenameForTesting(TestLanguage Lang);
+/// Find a target name such that looking for it in TargetRegistry by that name
+/// returns the same target. We expect that there is at least one target
+/// configured with this property.
+std::string getAnyTargetForTesting();
+
} // end namespace clang
#endif
diff --git a/contrib/llvm-project/clang/include/clang/Testing/TestAST.h b/contrib/llvm-project/clang/include/clang/Testing/TestAST.h
new file mode 100644
index 000000000000..845e31f65438
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Testing/TestAST.h
@@ -0,0 +1,103 @@
+//===--- TestAST.h - Build clang ASTs for testing -------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// In normal operation of Clang, the FrontendAction's lifecycle both creates
+// and destroys the AST, and code should operate on it during callbacks in
+// between (e.g. via ASTConsumer).
+//
+// For tests it is often more convenient to parse an AST from code, and keep it
+// alive as a normal local object, with assertions as straight-line code.
+// TestAST provides such an interface.
+// (ASTUnit can be used for this purpose, but is a production library with
+// broad scope and complicated API).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_TESTING_TESTAST_H
+#define LLVM_CLANG_TESTING_TESTAST_H
+
+#include "clang/Basic/LLVM.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Testing/CommandLineArgs.h"
+#include "llvm/ADT/StringRef.h"
+#include <string>
+#include <vector>
+
+namespace clang {
+
+/// Specifies a virtual source file to be parsed as part of a test.
+struct TestInputs {
+ TestInputs() = default;
+ TestInputs(StringRef Code) : Code(Code) {}
+
+ /// The source code of the input file to be parsed.
+ std::string Code;
+
+ /// The language to parse as.
+ /// This affects the -x and -std flags used, and the filename.
+ TestLanguage Language = TestLanguage::Lang_OBJCXX;
+
+ /// Extra argv to pass to clang -cc1.
+ std::vector<std::string> ExtraArgs = {};
+
+ /// Extra virtual files that are available to be #included.
+ /// Keys are plain filenames ("foo.h"), values are file content.
+ llvm::StringMap<std::string> ExtraFiles = {};
+
+ /// Filename to use for translation unit. A default will be used when empty.
+ std::string FileName;
+
+ /// By default, error diagnostics during parsing are reported as gtest errors.
+ /// To suppress this, set ErrorOK or include "error-ok" in a comment in Code.
+ /// In either case, all diagnostics appear in TestAST::diagnostics().
+ bool ErrorOK = false;
+
+ /// The action used to parse the code.
+ /// By default, a SyntaxOnlyAction is used.
+ std::function<std::unique_ptr<FrontendAction>()> MakeAction;
+};
+
+/// The result of parsing a file specified by TestInputs.
+///
+/// The ASTContext, Sema etc are valid as long as this object is alive.
+class TestAST {
+public:
+ /// Constructing a TestAST parses the virtual file.
+ ///
+ /// To keep tests terse, critical errors (e.g. invalid flags) are reported as
+ /// unit test failures with ADD_FAILURE() and produce an empty ASTContext,
+ /// Sema etc. This frees the test code from handling these explicitly.
+ TestAST(const TestInputs &);
+ TestAST(StringRef Code) : TestAST(TestInputs(Code)) {}
+ TestAST(TestAST &&M);
+ TestAST &operator=(TestAST &&);
+ ~TestAST();
+
+ /// Provides access to the AST context and other parts of Clang.
+
+ ASTContext &context() { return Clang->getASTContext(); }
+ Sema &sema() { return Clang->getSema(); }
+ SourceManager &sourceManager() { return Clang->getSourceManager(); }
+ FileManager &fileManager() { return Clang->getFileManager(); }
+ Preprocessor &preprocessor() { return Clang->getPreprocessor(); }
+ FrontendAction &action() { return *Action; }
+
+ /// Returns diagnostics emitted during parsing.
+ /// (By default, errors cause test failures, see TestInputs::ErrorOK).
+ llvm::ArrayRef<StoredDiagnostic> diagnostics() { return Diagnostics; }
+
+private:
+ void clear();
+ std::unique_ptr<FrontendAction> Action;
+ std::unique_ptr<CompilerInstance> Clang;
+ std::vector<StoredDiagnostic> Diagnostics;
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm-project/clang/include/clang/Testing/TestClangConfig.h b/contrib/llvm-project/clang/include/clang/Testing/TestClangConfig.h
index 5d6be4f65d0a..92d5cc3cff99 100644
--- a/contrib/llvm-project/clang/include/clang/Testing/TestClangConfig.h
+++ b/contrib/llvm-project/clang/include/clang/Testing/TestClangConfig.h
@@ -73,7 +73,7 @@ struct TestClangConfig {
std::string Result;
llvm::raw_string_ostream OS(Result);
OS << "{ Language=" << Language << ", Target=" << Target << " }";
- return OS.str();
+ return Result;
}
friend std::ostream &operator<<(std::ostream &OS,
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/ASTDiff/ASTDiff.h b/contrib/llvm-project/clang/include/clang/Tooling/ASTDiff/ASTDiff.h
index c772ad84c139..6b351e25db20 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/ASTDiff/ASTDiff.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/ASTDiff/ASTDiff.h
@@ -20,6 +20,7 @@
#define LLVM_CLANG_TOOLING_ASTDIFF_ASTDIFF_H
#include "clang/Tooling/ASTDiff/ASTDiffInternal.h"
+#include <optional>
namespace clang {
namespace diff {
@@ -44,22 +45,8 @@ struct Node {
ASTNodeKind getType() const;
StringRef getTypeLabel() const;
bool isLeaf() const { return Children.empty(); }
- llvm::Optional<StringRef> getIdentifier() const;
- llvm::Optional<std::string> getQualifiedIdentifier() const;
-};
-
-class ASTDiff {
-public:
- ASTDiff(SyntaxTree &Src, SyntaxTree &Dst, const ComparisonOptions &Options);
- ~ASTDiff();
-
- // Returns the ID of the node that is mapped to the given node in SourceTree.
- NodeId getMapped(const SyntaxTree &SourceTree, NodeId Id) const;
-
- class Impl;
-
-private:
- std::unique_ptr<Impl> DiffImpl;
+ std::optional<StringRef> getIdentifier() const;
+ std::optional<std::string> getQualifiedIdentifier() const;
};
/// SyntaxTree objects represent subtrees of the AST.
@@ -120,6 +107,20 @@ struct ComparisonOptions {
}
};
+class ASTDiff {
+public:
+ ASTDiff(SyntaxTree &Src, SyntaxTree &Dst, const ComparisonOptions &Options);
+ ~ASTDiff();
+
+ // Returns the ID of the node that is mapped to the given node in SourceTree.
+ NodeId getMapped(const SyntaxTree &SourceTree, NodeId Id) const;
+
+ class Impl;
+
+private:
+ std::unique_ptr<Impl> DiffImpl;
+};
+
} // end namespace diff
} // end namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/ASTDiff/ASTDiffInternal.h b/contrib/llvm-project/clang/include/clang/Tooling/ASTDiff/ASTDiffInternal.h
index 1e784ef43ac1..b74af5e8f24f 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/ASTDiff/ASTDiffInternal.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/ASTDiff/ASTDiffInternal.h
@@ -17,10 +17,6 @@ namespace diff {
using DynTypedNode = DynTypedNode;
-class SyntaxTree;
-class SyntaxTreeImpl;
-struct ComparisonOptions;
-
/// Within a tree, this identifies a node by its preorder offset.
struct NodeId {
private:
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/AllTUsExecution.h b/contrib/llvm-project/clang/include/clang/Tooling/AllTUsExecution.h
index 43f2792457e7..03cfc9c67f5c 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/AllTUsExecution.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/AllTUsExecution.h
@@ -16,6 +16,7 @@
#include "clang/Tooling/ArgumentsAdjusters.h"
#include "clang/Tooling/Execution.h"
+#include <optional>
namespace clang {
namespace tooling {
@@ -61,7 +62,7 @@ public:
private:
// Used to store the parser when the executor is initialized with parser.
- llvm::Optional<CommonOptionsParser> OptionsParser;
+ std::optional<CommonOptionsParser> OptionsParser;
const CompilationDatabase &Compilations;
std::unique_ptr<ToolResults> Results;
ExecutionContext Context;
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/CommonOptionsParser.h b/contrib/llvm-project/clang/include/clang/Tooling/CommonOptionsParser.h
index 0f072c2886ab..3c0480af3779 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/CommonOptionsParser.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/CommonOptionsParser.h
@@ -141,4 +141,4 @@ private:
} // namespace tooling
} // namespace clang
-#endif // LLVM_TOOLS_CLANG_INCLUDE_CLANG_TOOLING_COMMONOPTIONSPARSER_H
+#endif // LLVM_CLANG_TOOLING_COMMONOPTIONSPARSER_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/CompilationDatabase.h b/contrib/llvm-project/clang/include/clang/Tooling/CompilationDatabase.h
index 90af15536961..fee584acb486 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/CompilationDatabase.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/CompilationDatabase.h
@@ -216,6 +216,8 @@ private:
/// Transforms a compile command so that it applies the same configuration to
/// a different file. Most args are left intact, but tweaks may be needed
/// to certain flags (-x, -std etc).
+///
+/// The output command will always end in {"--", Filename}.
tooling::CompileCommand transferCompileCommand(tooling::CompileCommand,
StringRef Filename);
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Core/Replacement.h b/contrib/llvm-project/clang/include/clang/Tooling/Core/Replacement.h
index 09374c5b1c17..f9452111e147 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Core/Replacement.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Core/Replacement.h
@@ -20,12 +20,12 @@
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/SourceLocation.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/raw_ostream.h"
#include <map>
+#include <optional>
#include <set>
#include <string>
#include <system_error>
@@ -173,11 +173,11 @@ public:
static char ID;
- const llvm::Optional<Replacement> &getNewReplacement() const {
+ const std::optional<Replacement> &getNewReplacement() const {
return NewReplacement;
}
- const llvm::Optional<Replacement> &getExistingReplacement() const {
+ const std::optional<Replacement> &getExistingReplacement() const {
return ExistingReplacement;
}
@@ -191,10 +191,10 @@ private:
// A new replacement, which is to expected be added into a set of
// replacements, that is causing problem.
- llvm::Optional<Replacement> NewReplacement;
+ std::optional<Replacement> NewReplacement;
// An existing replacement in a replacements set that is causing problem.
- llvm::Optional<Replacement> ExistingReplacement;
+ std::optional<Replacement> ExistingReplacement;
};
/// Less-than operator between two Replacements.
@@ -202,6 +202,9 @@ bool operator<(const Replacement &LHS, const Replacement &RHS);
/// Equal-to operator between two Replacements.
bool operator==(const Replacement &LHS, const Replacement &RHS);
+inline bool operator!=(const Replacement &LHS, const Replacement &RHS) {
+ return !(LHS == RHS);
+}
/// Maintains a set of replacements that are conflict-free.
/// Two replacements are considered conflicts if they overlap or have the same
@@ -259,7 +262,7 @@ public:
/// Merges \p Replaces into the current replacements. \p Replaces
/// refers to code after applying the current replacements.
- LLVM_NODISCARD Replacements merge(const Replacements &Replaces) const;
+ [[nodiscard]] Replacements merge(const Replacements &Replaces) const;
// Returns the affected ranges in the changed code.
std::vector<Range> getAffectedRanges() const;
@@ -301,7 +304,7 @@ private:
// applied.
Replacements getCanonicalReplacements() const;
- // If `R` and all existing replacements are order-indepedent, then merge it
+ // If `R` and all existing replacements are order-independent, then merge it
// with `Replaces` and returns the merged replacements; otherwise, returns an
// error.
llvm::Expected<Replacements>
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningFilesystem.h b/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningFilesystem.h
index c52da3305f7c..9a2aea5d6efa 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningFilesystem.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningFilesystem.h
@@ -6,176 +6,274 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_DEPENDENCY_SCANNING_FILESYSTEM_H
-#define LLVM_CLANG_TOOLING_DEPENDENCY_SCANNING_FILESYSTEM_H
+#ifndef LLVM_CLANG_TOOLING_DEPENDENCYSCANNING_DEPENDENCYSCANNINGFILESYSTEM_H
+#define LLVM_CLANG_TOOLING_DEPENDENCYSCANNING_DEPENDENCYSCANNINGFILESYSTEM_H
#include "clang/Basic/LLVM.h"
-#include "clang/Lex/PreprocessorExcludedConditionalDirectiveSkipMapping.h"
+#include "clang/Lex/DependencyDirectivesScanner.h"
+#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringMap.h"
-#include "llvm/ADT/StringSet.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/ErrorOr.h"
#include "llvm/Support/VirtualFileSystem.h"
#include <mutex>
+#include <optional>
namespace clang {
namespace tooling {
namespace dependencies {
+using DependencyDirectivesTy =
+ SmallVector<dependency_directives_scan::Directive, 20>;
+
+/// Contents and directive tokens of a cached file entry. Single instance can
+/// be shared between multiple entries.
+struct CachedFileContents {
+ CachedFileContents(std::unique_ptr<llvm::MemoryBuffer> Contents)
+ : Original(std::move(Contents)), DepDirectives(nullptr) {}
+
+ /// Owning storage for the original contents.
+ std::unique_ptr<llvm::MemoryBuffer> Original;
+
+ /// The mutex that must be locked before mutating directive tokens.
+ std::mutex ValueLock;
+ SmallVector<dependency_directives_scan::Token, 10> DepDirectiveTokens;
+ /// Accessor to the directive tokens that's atomic to avoid data races.
+ /// \p CachedFileContents has ownership of the pointer.
+ std::atomic<const std::optional<DependencyDirectivesTy> *> DepDirectives;
+
+ ~CachedFileContents() { delete DepDirectives.load(); }
+};
+
/// An in-memory representation of a file system entity that is of interest to
/// the dependency scanning filesystem.
///
/// It represents one of the following:
-/// - an opened source file with minimized contents and a stat value.
-/// - an opened source file with original contents and a stat value.
-/// - a directory entry with its stat value.
-/// - an error value to represent a file system error.
-/// - a placeholder with an invalid stat indicating a not yet initialized entry.
+/// - opened file with contents and a stat value,
+/// - opened file with contents, directive tokens and a stat value,
+/// - directory entry with its stat value,
+/// - filesystem error.
+///
+/// Single instance of this class can be shared across different filenames (e.g.
+/// a regular file and a symlink). For this reason the status filename is empty
+/// and is only materialized by \c EntryRef that knows the requested filename.
class CachedFileSystemEntry {
public:
- /// Default constructor creates an entry with an invalid stat.
- CachedFileSystemEntry() : MaybeStat(llvm::vfs::Status()) {}
+ /// Creates an entry without contents: either a filesystem error or
+ /// a directory with stat value.
+ CachedFileSystemEntry(llvm::ErrorOr<llvm::vfs::Status> Stat)
+ : MaybeStat(std::move(Stat)), Contents(nullptr) {
+ clearStatName();
+ }
+
+ /// Creates an entry representing a file with contents.
+ CachedFileSystemEntry(llvm::ErrorOr<llvm::vfs::Status> Stat,
+ CachedFileContents *Contents)
+ : MaybeStat(std::move(Stat)), Contents(std::move(Contents)) {
+ clearStatName();
+ }
- CachedFileSystemEntry(std::error_code Error) : MaybeStat(std::move(Error)) {}
+ /// \returns True if the entry is a filesystem error.
+ bool isError() const { return !MaybeStat; }
- /// Create an entry that represents an opened source file with minimized or
- /// original contents.
- ///
- /// The filesystem opens the file even for `stat` calls open to avoid the
- /// issues with stat + open of minimized files that might lead to a
- /// mismatching size of the file. If file is not minimized, the full file is
- /// read and copied into memory to ensure that it's not memory mapped to avoid
- /// running out of file descriptors.
- static CachedFileSystemEntry createFileEntry(StringRef Filename,
- llvm::vfs::FileSystem &FS,
- bool Minimize = true);
-
- /// Create an entry that represents a directory on the filesystem.
- static CachedFileSystemEntry createDirectoryEntry(llvm::vfs::Status &&Stat);
-
- /// \returns True if the entry is valid.
- bool isValid() const { return !MaybeStat || MaybeStat->isStatusKnown(); }
-
- /// \returns True if the current entry points to a directory.
- bool isDirectory() const { return MaybeStat && MaybeStat->isDirectory(); }
-
- /// \returns The error or the file's contents.
- llvm::ErrorOr<StringRef> getContents() const {
- if (!MaybeStat)
- return MaybeStat.getError();
+ /// \returns True if the current entry represents a directory.
+ bool isDirectory() const { return !isError() && MaybeStat->isDirectory(); }
+
+ /// \returns Original contents of the file.
+ StringRef getOriginalContents() const {
+ assert(!isError() && "error");
assert(!MaybeStat->isDirectory() && "not a file");
- assert(isValid() && "not initialized");
- return Contents.str();
+ assert(Contents && "contents not initialized");
+ return Contents->Original->getBuffer();
}
- /// \returns The error or the status of the entry.
- llvm::ErrorOr<llvm::vfs::Status> getStatus() const {
- assert(isValid() && "not initialized");
- return MaybeStat;
+ /// \returns The scanned preprocessor directive tokens of the file that are
+ /// used to speed up preprocessing, if available.
+ std::optional<ArrayRef<dependency_directives_scan::Directive>>
+ getDirectiveTokens() const {
+ assert(!isError() && "error");
+ assert(!isDirectory() && "not a file");
+ assert(Contents && "contents not initialized");
+ if (auto *Directives = Contents->DepDirectives.load()) {
+ if (Directives->has_value())
+ return ArrayRef<dependency_directives_scan::Directive>(**Directives);
+ }
+ return std::nullopt;
}
- /// \returns the name of the file.
- StringRef getName() const {
- assert(isValid() && "not initialized");
- return MaybeStat->getName();
- }
+ /// \returns The error.
+ std::error_code getError() const { return MaybeStat.getError(); }
- /// Return the mapping between location -> distance that is used to speed up
- /// the block skipping in the preprocessor.
- const PreprocessorSkippedRangeMapping &getPPSkippedRangeMapping() const {
- return PPSkippedRangeMapping;
+ /// \returns The entry status with empty filename.
+ llvm::vfs::Status getStatus() const {
+ assert(!isError() && "error");
+ assert(MaybeStat->getName().empty() && "stat name must be empty");
+ return *MaybeStat;
}
- CachedFileSystemEntry(CachedFileSystemEntry &&) = default;
- CachedFileSystemEntry &operator=(CachedFileSystemEntry &&) = default;
+ /// \returns The unique ID of the entry.
+ llvm::sys::fs::UniqueID getUniqueID() const {
+ assert(!isError() && "error");
+ return MaybeStat->getUniqueID();
+ }
- CachedFileSystemEntry(const CachedFileSystemEntry &) = delete;
- CachedFileSystemEntry &operator=(const CachedFileSystemEntry &) = delete;
+ /// \returns The data structure holding both contents and directive tokens.
+ CachedFileContents *getCachedContents() const {
+ assert(!isError() && "error");
+ assert(!isDirectory() && "not a file");
+ return Contents;
+ }
private:
+ void clearStatName() {
+ if (MaybeStat)
+ MaybeStat = llvm::vfs::Status::copyWithNewName(*MaybeStat, "");
+ }
+
+ /// Either the filesystem error or status of the entry.
+ /// The filename is empty and only materialized by \c EntryRef.
llvm::ErrorOr<llvm::vfs::Status> MaybeStat;
- // Store the contents in a small string to allow a
- // move from the small string for the minimized contents.
- // Note: small size of 1 allows us to store an empty string with an implicit
- // null terminator without any allocations.
- llvm::SmallString<1> Contents;
- PreprocessorSkippedRangeMapping PPSkippedRangeMapping;
+
+ /// Non-owning pointer to the file contents.
+ ///
+ /// We're using pointer here to keep the size of this class small. Instances
+ /// representing directories and filesystem errors don't hold any contents
+ /// anyway.
+ CachedFileContents *Contents;
};
/// This class is a shared cache, that caches the 'stat' and 'open' calls to the
-/// underlying real file system. It distinguishes between minimized and original
+/// underlying real file system, and the scanned preprocessor directives of
/// files.
///
/// It is sharded based on the hash of the key to reduce the lock contention for
/// the worker threads.
class DependencyScanningFilesystemSharedCache {
public:
- struct SharedFileSystemEntry {
- std::mutex ValueLock;
- CachedFileSystemEntry Value;
+ struct CacheShard {
+ /// The mutex that needs to be locked before mutation of any member.
+ mutable std::mutex CacheLock;
+
+ /// Map from filenames to cached entries.
+ llvm::StringMap<const CachedFileSystemEntry *, llvm::BumpPtrAllocator>
+ EntriesByFilename;
+
+ /// Map from unique IDs to cached entries.
+ llvm::DenseMap<llvm::sys::fs::UniqueID, const CachedFileSystemEntry *>
+ EntriesByUID;
+
+ /// The backing storage for cached entries.
+ llvm::SpecificBumpPtrAllocator<CachedFileSystemEntry> EntryStorage;
+
+ /// The backing storage for cached contents.
+ llvm::SpecificBumpPtrAllocator<CachedFileContents> ContentsStorage;
+
+ /// Returns entry associated with the filename or nullptr if none is found.
+ const CachedFileSystemEntry *findEntryByFilename(StringRef Filename) const;
+
+ /// Returns entry associated with the unique ID or nullptr if none is found.
+ const CachedFileSystemEntry *
+ findEntryByUID(llvm::sys::fs::UniqueID UID) const;
+
+ /// Returns entry associated with the filename if there is some. Otherwise,
+ /// constructs new one with the given status, associates it with the
+ /// filename and returns the result.
+ const CachedFileSystemEntry &
+ getOrEmplaceEntryForFilename(StringRef Filename,
+ llvm::ErrorOr<llvm::vfs::Status> Stat);
+
+ /// Returns entry associated with the unique ID if there is some. Otherwise,
+ /// constructs new one with the given status and contents, associates it
+ /// with the unique ID and returns the result.
+ const CachedFileSystemEntry &
+ getOrEmplaceEntryForUID(llvm::sys::fs::UniqueID UID, llvm::vfs::Status Stat,
+ std::unique_ptr<llvm::MemoryBuffer> Contents);
+
+ /// Returns entry associated with the filename if there is some. Otherwise,
+ /// associates the given entry with the filename and returns it.
+ const CachedFileSystemEntry &
+ getOrInsertEntryForFilename(StringRef Filename,
+ const CachedFileSystemEntry &Entry);
};
- /// Returns a cache entry for the corresponding key.
- ///
- /// A new cache entry is created if the key is not in the cache. This is a
- /// thread safe call.
- SharedFileSystemEntry &get(StringRef Key, bool Minimized);
+ DependencyScanningFilesystemSharedCache();
-private:
- class SingleCache {
- public:
- SingleCache();
-
- SharedFileSystemEntry &get(StringRef Key);
-
- private:
- struct CacheShard {
- std::mutex CacheLock;
- llvm::StringMap<SharedFileSystemEntry, llvm::BumpPtrAllocator> Cache;
- };
- std::unique_ptr<CacheShard[]> CacheShards;
- unsigned NumShards;
- };
+ /// Returns shard for the given key.
+ CacheShard &getShardForFilename(StringRef Filename) const;
+ CacheShard &getShardForUID(llvm::sys::fs::UniqueID UID) const;
- SingleCache CacheMinimized;
- SingleCache CacheOriginal;
+private:
+ std::unique_ptr<CacheShard[]> CacheShards;
+ unsigned NumShards;
};
/// This class is a local cache, that caches the 'stat' and 'open' calls to the
-/// underlying real file system. It distinguishes between minimized and original
-/// files.
+/// underlying real file system.
class DependencyScanningFilesystemLocalCache {
-private:
- using SingleCache =
- llvm::StringMap<const CachedFileSystemEntry *, llvm::BumpPtrAllocator>;
+ llvm::StringMap<const CachedFileSystemEntry *, llvm::BumpPtrAllocator> Cache;
- SingleCache CacheMinimized;
- SingleCache CacheOriginal;
+public:
+ /// Returns entry associated with the filename or nullptr if none is found.
+ const CachedFileSystemEntry *findEntryByFilename(StringRef Filename) const {
+ assert(llvm::sys::path::is_absolute_gnu(Filename));
+ auto It = Cache.find(Filename);
+ return It == Cache.end() ? nullptr : It->getValue();
+ }
- SingleCache &selectCache(bool Minimized) {
- return Minimized ? CacheMinimized : CacheOriginal;
+ /// Associates the given entry with the filename and returns the given entry
+ /// pointer (for convenience).
+ const CachedFileSystemEntry &
+ insertEntryForFilename(StringRef Filename,
+ const CachedFileSystemEntry &Entry) {
+ assert(llvm::sys::path::is_absolute_gnu(Filename));
+ const auto *InsertedEntry = Cache.insert({Filename, &Entry}).first->second;
+ assert(InsertedEntry == &Entry && "entry already present");
+ return *InsertedEntry;
}
+};
+
+/// Reference to a CachedFileSystemEntry.
+/// If the underlying entry is an opened file, this wrapper returns the file
+/// contents and the scanned preprocessor directives.
+class EntryRef {
+ /// The filename used to access this entry.
+ std::string Filename;
+
+ /// The underlying cached entry.
+ const CachedFileSystemEntry &Entry;
public:
- void setCachedEntry(StringRef Filename, bool Minimized,
- const CachedFileSystemEntry *Entry) {
- SingleCache &Cache = selectCache(Minimized);
- bool IsInserted = Cache.try_emplace(Filename, Entry).second;
- (void)IsInserted;
- assert(IsInserted && "local cache is updated more than once");
+ EntryRef(StringRef Name, const CachedFileSystemEntry &Entry)
+ : Filename(Name), Entry(Entry) {}
+
+ llvm::vfs::Status getStatus() const {
+ llvm::vfs::Status Stat = Entry.getStatus();
+ if (!Stat.isDirectory())
+ Stat = llvm::vfs::Status::copyWithNewSize(Stat, getContents().size());
+ return llvm::vfs::Status::copyWithNewName(Stat, Filename);
}
- const CachedFileSystemEntry *getCachedEntry(StringRef Filename,
- bool Minimized) {
- SingleCache &Cache = selectCache(Minimized);
- auto It = Cache.find(Filename);
- return It == Cache.end() ? nullptr : It->getValue();
+ bool isError() const { return Entry.isError(); }
+ bool isDirectory() const { return Entry.isDirectory(); }
+
+ /// If the cached entry represents an error, promotes it into `ErrorOr`.
+ llvm::ErrorOr<EntryRef> unwrapError() const {
+ if (isError())
+ return Entry.getError();
+ return *this;
+ }
+
+ StringRef getContents() const { return Entry.getOriginalContents(); }
+
+ std::optional<ArrayRef<dependency_directives_scan::Directive>>
+ getDirectiveTokens() const {
+ return Entry.getDirectiveTokens();
}
};
/// A virtual file system optimized for the dependency discovery.
///
-/// It is primarily designed to work with source files whose contents was was
+/// It is primarily designed to work with source files whose contents was
/// preprocessed to remove any tokens that are unlikely to affect the dependency
/// computation.
///
@@ -186,39 +284,126 @@ class DependencyScanningWorkerFilesystem : public llvm::vfs::ProxyFileSystem {
public:
DependencyScanningWorkerFilesystem(
DependencyScanningFilesystemSharedCache &SharedCache,
- IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS,
- ExcludedPreprocessorDirectiveSkipMapping *PPSkipMappings)
- : ProxyFileSystem(std::move(FS)), SharedCache(SharedCache),
- PPSkipMappings(PPSkipMappings) {}
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS);
llvm::ErrorOr<llvm::vfs::Status> status(const Twine &Path) override;
llvm::ErrorOr<std::unique_ptr<llvm::vfs::File>>
openFileForRead(const Twine &Path) override;
- void clearIgnoredFiles() { IgnoredFiles.clear(); }
- void ignoreFile(StringRef Filename);
+ std::error_code setCurrentWorkingDirectory(const Twine &Path) override;
+
+ /// Returns entry for the given filename.
+ ///
+ /// Attempts to use the local and shared caches first, then falls back to
+ /// using the underlying filesystem.
+ llvm::ErrorOr<EntryRef>
+ getOrCreateFileSystemEntry(StringRef Filename,
+ bool DisableDirectivesScanning = false);
private:
- bool shouldIgnoreFile(StringRef Filename);
+ /// Check whether the file should be scanned for preprocessor directives.
+ bool shouldScanForDirectives(StringRef Filename);
+
+ /// For a filename that's not yet associated with any entry in the caches,
+ /// uses the underlying filesystem to either look up the entry based in the
+ /// shared cache indexed by unique ID, or creates new entry from scratch.
+ /// \p FilenameForLookup will always be an absolute path, and different than
+ /// \p OriginalFilename if \p OriginalFilename is relative.
+ llvm::ErrorOr<const CachedFileSystemEntry &>
+ computeAndStoreResult(StringRef OriginalFilename,
+ StringRef FilenameForLookup);
+
+ /// Scan for preprocessor directives for the given entry if necessary and
+ /// returns a wrapper object with reference semantics.
+ EntryRef scanForDirectivesIfNecessary(const CachedFileSystemEntry &Entry,
+ StringRef Filename, bool Disable);
+
+ /// Represents a filesystem entry that has been stat-ed (and potentially read)
+ /// and that's about to be inserted into the cache as `CachedFileSystemEntry`.
+ struct TentativeEntry {
+ llvm::vfs::Status Status;
+ std::unique_ptr<llvm::MemoryBuffer> Contents;
+
+ TentativeEntry(llvm::vfs::Status Status,
+ std::unique_ptr<llvm::MemoryBuffer> Contents = nullptr)
+ : Status(std::move(Status)), Contents(std::move(Contents)) {}
+ };
+
+ /// Reads file at the given path. Enforces consistency between the file size
+ /// in status and size of read contents.
+ llvm::ErrorOr<TentativeEntry> readFile(StringRef Filename);
- llvm::ErrorOr<const CachedFileSystemEntry *>
- getOrCreateFileSystemEntry(const StringRef Filename);
+ /// Returns entry associated with the unique ID of the given tentative entry
+ /// if there is some in the shared cache. Otherwise, constructs new one,
+ /// associates it with the unique ID and returns the result.
+ const CachedFileSystemEntry &
+ getOrEmplaceSharedEntryForUID(TentativeEntry TEntry);
+
+ /// Returns entry associated with the filename or nullptr if none is found.
+ ///
+ /// Returns entry from local cache if there is some. Otherwise, if the entry
+ /// is found in the shared cache, writes it through the local cache and
+ /// returns it. Otherwise returns nullptr.
+ const CachedFileSystemEntry *
+ findEntryByFilenameWithWriteThrough(StringRef Filename);
+
+ /// Returns entry associated with the unique ID in the shared cache or nullptr
+ /// if none is found.
+ const CachedFileSystemEntry *
+ findSharedEntryByUID(llvm::vfs::Status Stat) const {
+ return SharedCache.getShardForUID(Stat.getUniqueID())
+ .findEntryByUID(Stat.getUniqueID());
+ }
+
+ /// Associates the given entry with the filename in the local cache and
+ /// returns it.
+ const CachedFileSystemEntry &
+ insertLocalEntryForFilename(StringRef Filename,
+ const CachedFileSystemEntry &Entry) {
+ return LocalCache.insertEntryForFilename(Filename, Entry);
+ }
+
+ /// Returns entry associated with the filename in the shared cache if there is
+ /// some. Otherwise, constructs new one with the given error code, associates
+ /// it with the filename and returns the result.
+ const CachedFileSystemEntry &
+ getOrEmplaceSharedEntryForFilename(StringRef Filename, std::error_code EC) {
+ return SharedCache.getShardForFilename(Filename)
+ .getOrEmplaceEntryForFilename(Filename, EC);
+ }
+
+ /// Returns entry associated with the filename in the shared cache if there is
+ /// some. Otherwise, associates the given entry with the filename and returns
+ /// it.
+ const CachedFileSystemEntry &
+ getOrInsertSharedEntryForFilename(StringRef Filename,
+ const CachedFileSystemEntry &Entry) {
+ return SharedCache.getShardForFilename(Filename)
+ .getOrInsertEntryForFilename(Filename, Entry);
+ }
+
+ void printImpl(raw_ostream &OS, PrintType Type,
+ unsigned IndentLevel) const override {
+ printIndent(OS, IndentLevel);
+ OS << "DependencyScanningFilesystem\n";
+ getUnderlyingFS().print(OS, Type, IndentLevel + 1);
+ }
/// The global cache shared between worker threads.
DependencyScanningFilesystemSharedCache &SharedCache;
/// The local cache is used by the worker thread to cache file system queries
/// locally instead of querying the global cache every time.
- DependencyScanningFilesystemLocalCache Cache;
- /// The optional mapping structure which records information about the
- /// excluded conditional directive skip mappings that are used by the
- /// currently active preprocessor.
- ExcludedPreprocessorDirectiveSkipMapping *PPSkipMappings;
- /// The set of files that should not be minimized.
- llvm::StringSet<> IgnoredFiles;
+ DependencyScanningFilesystemLocalCache LocalCache;
+
+ /// The working directory to use for making relative paths absolute before
+ /// using them for cache lookups.
+ llvm::ErrorOr<std::string> WorkingDirForCacheLookup;
+
+ void updateWorkingDirForCacheLookup();
};
} // end namespace dependencies
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_DEPENDENCY_SCANNING_FILESYSTEM_H
+#endif // LLVM_CLANG_TOOLING_DEPENDENCYSCANNING_DEPENDENCYSCANNINGFILESYSTEM_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningService.h b/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningService.h
index 76edf150dbee..dcdf1c171f6d 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningService.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningService.h
@@ -6,10 +6,11 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_DEPENDENCY_SCANNING_SERVICE_H
-#define LLVM_CLANG_TOOLING_DEPENDENCY_SCANNING_SERVICE_H
+#ifndef LLVM_CLANG_TOOLING_DEPENDENCYSCANNING_DEPENDENCYSCANNINGSERVICE_H
+#define LLVM_CLANG_TOOLING_DEPENDENCYSCANNING_DEPENDENCYSCANNINGSERVICE_H
#include "clang/Tooling/DependencyScanning/DependencyScanningFilesystem.h"
+#include "llvm/ADT/BitmaskEnum.h"
namespace clang {
namespace tooling {
@@ -19,15 +20,13 @@ namespace dependencies {
/// dependencies.
enum class ScanningMode {
/// This mode is used to compute the dependencies by running the preprocessor
- /// over
- /// the unmodified source files.
+ /// over the source files.
CanonicalPreprocessing,
/// This mode is used to compute the dependencies by running the preprocessor
- /// over
- /// the source files that have been minimized to contents that might affect
- /// the dependencies.
- MinimizedSourcePreprocessing
+ /// with special kind of lexing after scanning header and source files to get
+ /// the minimum necessary preprocessor directives for evaluating includes.
+ DependencyDirectivesScan,
};
/// The format that is output by the dependency scanner.
@@ -37,26 +36,45 @@ enum class ScanningOutputFormat {
/// intermodule dependency information.
Make,
- /// This outputs the full module dependency graph suitable for use for
+ /// This outputs the full clang module dependency graph suitable for use for
/// explicitly building modules.
Full,
+
+ /// This outputs the dependency graph for standard c++ modules in P1689R5
+ /// format.
+ P1689,
+};
+
+enum class ScanningOptimizations {
+ None = 0,
+
+ /// Remove unused header search paths including header maps.
+ HeaderSearch = 1,
+
+ /// Remove warnings from system modules.
+ SystemWarnings = 2,
+
+ LLVM_MARK_AS_BITMASK_ENUM(SystemWarnings),
+ All = HeaderSearch | SystemWarnings,
+ Default = All
};
-/// The dependency scanning service contains the shared state that is used by
-/// the invidual dependency scanning workers.
+/// The dependency scanning service contains shared configuration and state that
+/// is used by the individual dependency scanning workers.
class DependencyScanningService {
public:
- DependencyScanningService(ScanningMode Mode, ScanningOutputFormat Format,
- bool ReuseFileManager = true,
- bool SkipExcludedPPRanges = true);
+ DependencyScanningService(
+ ScanningMode Mode, ScanningOutputFormat Format,
+ ScanningOptimizations OptimizeArgs = ScanningOptimizations::Default,
+ bool EagerLoadModules = false);
ScanningMode getMode() const { return Mode; }
ScanningOutputFormat getFormat() const { return Format; }
- bool canReuseFileManager() const { return ReuseFileManager; }
+ ScanningOptimizations getOptimizeArgs() const { return OptimizeArgs; }
- bool canSkipExcludedPPRanges() const { return SkipExcludedPPRanges; }
+ bool shouldEagerLoadModules() const { return EagerLoadModules; }
DependencyScanningFilesystemSharedCache &getSharedCache() {
return SharedCache;
@@ -65,11 +83,10 @@ public:
private:
const ScanningMode Mode;
const ScanningOutputFormat Format;
- const bool ReuseFileManager;
- /// Set to true to use the preprocessor optimization that skips excluded PP
- /// ranges by bumping the buffer pointer in the lexer instead of lexing the
- /// tokens in the range until reaching the corresponding directive.
- const bool SkipExcludedPPRanges;
+ /// Whether to optimize the modules' command-line arguments.
+ const ScanningOptimizations OptimizeArgs;
+ /// Whether to set up command-lines to load PCM files eagerly.
+ const bool EagerLoadModules;
/// The global file system cache.
DependencyScanningFilesystemSharedCache SharedCache;
};
@@ -78,4 +95,4 @@ private:
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_DEPENDENCY_SCANNING_SERVICE_H
+#endif // LLVM_CLANG_TOOLING_DEPENDENCYSCANNING_DEPENDENCYSCANNINGSERVICE_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningTool.h b/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningTool.h
index f88dc472c80b..cb9476d1550d 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningTool.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningTool.h
@@ -6,22 +6,35 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_DEPENDENCY_SCANNING_TOOL_H
-#define LLVM_CLANG_TOOLING_DEPENDENCY_SCANNING_TOOL_H
+#ifndef LLVM_CLANG_TOOLING_DEPENDENCYSCANNING_DEPENDENCYSCANNINGTOOL_H
+#define LLVM_CLANG_TOOLING_DEPENDENCYSCANNING_DEPENDENCYSCANNINGTOOL_H
#include "clang/Tooling/DependencyScanning/DependencyScanningService.h"
#include "clang/Tooling/DependencyScanning/DependencyScanningWorker.h"
#include "clang/Tooling/DependencyScanning/ModuleDepCollector.h"
#include "clang/Tooling/JSONCompilationDatabase.h"
-#include "llvm/ADT/StringSet.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/MapVector.h"
+#include <optional>
#include <string>
+#include <vector>
-namespace clang{
-namespace tooling{
-namespace dependencies{
+namespace clang {
+namespace tooling {
+namespace dependencies {
+
+/// A callback to lookup module outputs for "-fmodule-file=", "-o" etc.
+using LookupModuleOutputCallback =
+ llvm::function_ref<std::string(const ModuleID &, ModuleOutputKind)>;
+
+/// Graph of modular dependencies.
+using ModuleDepsGraph = std::vector<ModuleDeps>;
/// The full dependencies and module graph for a specific input.
-struct FullDependencies {
+struct TranslationUnitDeps {
+ /// The graph of direct and transitive modular dependencies.
+ ModuleDepsGraph ModuleGraph;
+
/// The identifier of the C++20 module this translation unit exports.
///
/// If the translation unit is not a module then \c ID.ModuleName is empty.
@@ -42,30 +55,22 @@ struct FullDependencies {
/// determined that the differences are benign for this compilation.
std::vector<ModuleID> ClangModuleDeps;
- /// Get additional arguments suitable for appending to the original Clang
- /// command line.
+ /// The sequence of commands required to build the translation unit. Commands
+ /// should be executed in order.
///
- /// \param LookupPCMPath This function is called to fill in "-fmodule-file="
- /// arguments and the "-o" argument. It needs to return
- /// a path for where the PCM for the given module is to
- /// be located.
- /// \param LookupModuleDeps This function is called to collect the full
- /// transitive set of dependencies for this
- /// compilation and fill in "-fmodule-map-file="
- /// arguments.
- std::vector<std::string> getAdditionalArgs(
- std::function<StringRef(ModuleID)> LookupPCMPath,
- std::function<const ModuleDeps &(ModuleID)> LookupModuleDeps) const;
-
- /// Get additional arguments suitable for appending to the original Clang
- /// command line, excluding arguments containing modules-related paths:
- /// "-fmodule-file=", "-fmodule-map-file=".
- std::vector<std::string> getAdditionalArgsWithoutModulePaths() const;
+ /// FIXME: If we add support for multi-arch builds in clang-scan-deps, we
+ /// should make the dependencies between commands explicit to enable parallel
+ /// builds of each architecture.
+ std::vector<Command> Commands;
+
+ /// Deprecated driver command-line. This will be removed in a future version.
+ std::vector<std::string> DriverCommandLine;
};
-struct FullDependenciesResult {
- FullDependencies FullDeps;
- std::vector<ModuleDeps> DiscoveredModules;
+struct P1689Rule {
+ std::string PrimaryOutput;
+ std::optional<P1689ModuleInfo> Provides;
+ std::vector<P1689ModuleInfo> Requires;
};
/// The high-level implementation of the dependency discovery tool that runs on
@@ -73,7 +78,9 @@ struct FullDependenciesResult {
class DependencyScanningTool {
public:
/// Construct a dependency scanning tool.
- DependencyScanningTool(DependencyScanningService &Service);
+ DependencyScanningTool(DependencyScanningService &Service,
+ llvm::IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS =
+ llvm::vfs::createPhysicalFileSystem());
/// Print out the dependency information into a string using the dependency
/// file format that is specified in the options (-MD is the default) and
@@ -82,30 +89,137 @@ public:
/// \returns A \c StringError with the diagnostic output if clang errors
/// occurred, dependency file contents otherwise.
llvm::Expected<std::string>
- getDependencyFile(const tooling::CompilationDatabase &Compilations,
- StringRef CWD);
+ getDependencyFile(const std::vector<std::string> &CommandLine, StringRef CWD);
- /// Collect the full module dependency graph for the input, ignoring any
- /// modules which have already been seen.
+ /// Collect the module dependency in P1689 format for C++20 named modules.
+ ///
+ /// \param MakeformatOutput The output parameter for dependency information
+ /// in make format if the command line requires to generate make-format
+ /// dependency information by `-MD -MF <dep_file>`.
+ ///
+ /// \param MakeformatOutputPath The output parameter for the path to
+ /// \param MakeformatOutput.
+ ///
+ /// \returns A \c StringError with the diagnostic output if clang errors
+ /// occurred, P1689 dependency format rules otherwise.
+ llvm::Expected<P1689Rule>
+ getP1689ModuleDependencyFile(const clang::tooling::CompileCommand &Command,
+ StringRef CWD, std::string &MakeformatOutput,
+ std::string &MakeformatOutputPath);
+ llvm::Expected<P1689Rule>
+ getP1689ModuleDependencyFile(const clang::tooling::CompileCommand &Command,
+ StringRef CWD) {
+ std::string MakeformatOutput;
+ std::string MakeformatOutputPath;
+
+ return getP1689ModuleDependencyFile(Command, CWD, MakeformatOutput,
+ MakeformatOutputPath);
+ }
+
+ /// Given a Clang driver command-line for a translation unit, gather the
+ /// modular dependencies and return the information needed for explicit build.
///
/// \param AlreadySeen This stores modules which have previously been
/// reported. Use the same instance for all calls to this
/// function for a single \c DependencyScanningTool in a
/// single build. Use a different one for different tools,
/// and clear it between builds.
+ /// \param LookupModuleOutput This function is called to fill in
+ /// "-fmodule-file=", "-o" and other output
+ /// arguments for dependencies.
///
/// \returns a \c StringError with the diagnostic output if clang errors
- /// occurred, \c FullDependencies otherwise.
- llvm::Expected<FullDependenciesResult>
- getFullDependencies(const tooling::CompilationDatabase &Compilations,
- StringRef CWD, const llvm::StringSet<> &AlreadySeen);
+ /// occurred, \c TranslationUnitDeps otherwise.
+ llvm::Expected<TranslationUnitDeps>
+ getTranslationUnitDependencies(const std::vector<std::string> &CommandLine,
+ StringRef CWD,
+ const llvm::DenseSet<ModuleID> &AlreadySeen,
+ LookupModuleOutputCallback LookupModuleOutput);
+
+ /// Given a compilation context specified via the Clang driver command-line,
+ /// gather modular dependencies of module with the given name, and return the
+ /// information needed for explicit build.
+ llvm::Expected<ModuleDepsGraph> getModuleDependencies(
+ StringRef ModuleName, const std::vector<std::string> &CommandLine,
+ StringRef CWD, const llvm::DenseSet<ModuleID> &AlreadySeen,
+ LookupModuleOutputCallback LookupModuleOutput);
private:
DependencyScanningWorker Worker;
};
+class FullDependencyConsumer : public DependencyConsumer {
+public:
+ FullDependencyConsumer(const llvm::DenseSet<ModuleID> &AlreadySeen)
+ : AlreadySeen(AlreadySeen) {}
+
+ void handleBuildCommand(Command Cmd) override {
+ Commands.push_back(std::move(Cmd));
+ }
+
+ void handleDependencyOutputOpts(const DependencyOutputOptions &) override {}
+
+ void handleFileDependency(StringRef File) override {
+ Dependencies.push_back(std::string(File));
+ }
+
+ void handlePrebuiltModuleDependency(PrebuiltModuleDep PMD) override {
+ PrebuiltModuleDeps.emplace_back(std::move(PMD));
+ }
+
+ void handleModuleDependency(ModuleDeps MD) override {
+ ClangModuleDeps[MD.ID] = std::move(MD);
+ }
+
+ void handleDirectModuleDependency(ModuleID ID) override {
+ DirectModuleDeps.push_back(ID);
+ }
+
+ void handleContextHash(std::string Hash) override {
+ ContextHash = std::move(Hash);
+ }
+
+ TranslationUnitDeps takeTranslationUnitDeps();
+ ModuleDepsGraph takeModuleGraphDeps();
+
+private:
+ std::vector<std::string> Dependencies;
+ std::vector<PrebuiltModuleDep> PrebuiltModuleDeps;
+ llvm::MapVector<ModuleID, ModuleDeps> ClangModuleDeps;
+ std::vector<ModuleID> DirectModuleDeps;
+ std::vector<Command> Commands;
+ std::string ContextHash;
+ std::vector<std::string> OutputPaths;
+ const llvm::DenseSet<ModuleID> &AlreadySeen;
+};
+
+/// A simple dependency action controller that uses a callback. If no callback
+/// is provided, it is assumed that looking up module outputs is unreachable.
+class CallbackActionController : public DependencyActionController {
+public:
+ virtual ~CallbackActionController();
+
+ CallbackActionController(LookupModuleOutputCallback LMO)
+ : LookupModuleOutput(std::move(LMO)) {
+ if (!LookupModuleOutput) {
+ LookupModuleOutput = [](const ModuleID &,
+ ModuleOutputKind) -> std::string {
+ llvm::report_fatal_error("unexpected call to lookupModuleOutput");
+ };
+ }
+ }
+
+ std::string lookupModuleOutput(const ModuleID &ID,
+ ModuleOutputKind Kind) override {
+ return LookupModuleOutput(ID, Kind);
+ }
+
+private:
+ LookupModuleOutputCallback LookupModuleOutput;
+};
+
} // end namespace dependencies
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_DEPENDENCY_SCANNING_TOOL_H
+#endif // LLVM_CLANG_TOOLING_DEPENDENCYSCANNING_DEPENDENCYSCANNINGTOOL_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningWorker.h b/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningWorker.h
index 5903ad13c1d8..0f607862194b 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningWorker.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/DependencyScanningWorker.h
@@ -6,19 +6,18 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_DEPENDENCY_SCANNING_WORKER_H
-#define LLVM_CLANG_TOOLING_DEPENDENCY_SCANNING_WORKER_H
+#ifndef LLVM_CLANG_TOOLING_DEPENDENCYSCANNING_DEPENDENCYSCANNINGWORKER_H
+#define LLVM_CLANG_TOOLING_DEPENDENCYSCANNING_DEPENDENCYSCANNINGWORKER_H
#include "clang/Basic/DiagnosticOptions.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/LLVM.h"
#include "clang/Frontend/PCHContainerOperations.h"
-#include "clang/Lex/PreprocessorExcludedConditionalDirectiveSkipMapping.h"
-#include "clang/Tooling/CompilationDatabase.h"
#include "clang/Tooling/DependencyScanning/DependencyScanningService.h"
#include "clang/Tooling/DependencyScanning/ModuleDepCollector.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/FileSystem.h"
+#include <optional>
#include <string>
namespace clang {
@@ -30,10 +29,24 @@ namespace dependencies {
class DependencyScanningWorkerFilesystem;
+/// A command-line tool invocation that is part of building a TU.
+///
+/// \see TranslationUnitDeps::Commands.
+struct Command {
+ std::string Executable;
+ std::vector<std::string> Arguments;
+};
+
class DependencyConsumer {
public:
virtual ~DependencyConsumer() {}
+ virtual void handleProvidedAndRequiredStdCXXModules(
+ std::optional<P1689ModuleInfo> Provided,
+ std::vector<P1689ModuleInfo> Requires) {}
+
+ virtual void handleBuildCommand(Command Cmd) {}
+
virtual void
handleDependencyOutputOpts(const DependencyOutputOptions &Opts) = 0;
@@ -43,9 +56,21 @@ public:
virtual void handleModuleDependency(ModuleDeps MD) = 0;
+ virtual void handleDirectModuleDependency(ModuleID MD) = 0;
+
virtual void handleContextHash(std::string Hash) = 0;
};
+/// Dependency scanner callbacks that are used during scanning to influence the
+/// behaviour of the scan - for example, to customize the scanned invocations.
+class DependencyActionController {
+public:
+ virtual ~DependencyActionController();
+
+ virtual std::string lookupModuleOutput(const ModuleID &ID,
+ ModuleOutputKind Kind) = 0;
+};
+
/// An individual dependency scanning worker that is able to run on its own
/// thread.
///
@@ -54,37 +79,50 @@ public:
/// using the regular processing run.
class DependencyScanningWorker {
public:
- DependencyScanningWorker(DependencyScanningService &Service);
+ DependencyScanningWorker(DependencyScanningService &Service,
+ llvm::IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS);
- /// Run the dependency scanning tool for a given clang driver invocation (as
- /// specified for the given Input in the CDB), and report the discovered
- /// dependencies to the provided consumer.
+ /// Run the dependency scanning tool for a given clang driver command-line,
+ /// and report the discovered dependencies to the provided consumer. If \p
+ /// ModuleName isn't empty, this function reports the dependencies of module
+ /// \p ModuleName.
///
+ /// \returns false if clang errors occurred (with diagnostics reported to
+ /// \c DiagConsumer), true otherwise.
+ bool computeDependencies(StringRef WorkingDirectory,
+ const std::vector<std::string> &CommandLine,
+ DependencyConsumer &DepConsumer,
+ DependencyActionController &Controller,
+ DiagnosticConsumer &DiagConsumer,
+ std::optional<StringRef> ModuleName = std::nullopt);
/// \returns A \c StringError with the diagnostic output if clang errors
/// occurred, success otherwise.
- llvm::Error computeDependencies(const std::string &Input,
- StringRef WorkingDirectory,
- const CompilationDatabase &CDB,
- DependencyConsumer &Consumer);
+ llvm::Error computeDependencies(
+ StringRef WorkingDirectory, const std::vector<std::string> &CommandLine,
+ DependencyConsumer &Consumer, DependencyActionController &Controller,
+ std::optional<StringRef> ModuleName = std::nullopt);
+
+ bool shouldEagerLoadModules() const { return EagerLoadModules; }
private:
- IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts;
std::shared_ptr<PCHContainerOperations> PCHContainerOps;
- std::unique_ptr<ExcludedPreprocessorDirectiveSkipMapping> PPSkipMappings;
-
- llvm::IntrusiveRefCntPtr<llvm::vfs::FileSystem> RealFS;
- /// The file system that is used by each worker when scanning for
- /// dependencies. This filesystem persists accross multiple compiler
- /// invocations.
+ /// The file system to be used during the scan.
+ /// This is either \c FS passed in the constructor (when performing canonical
+ /// preprocessing), or \c DepFS (when performing dependency directives scan).
+ llvm::IntrusiveRefCntPtr<llvm::vfs::FileSystem> BaseFS;
+ /// When performing dependency directives scan, this is the caching (and
+ /// dependency-directives-extracting) filesystem overlaid on top of \c FS
+ /// (passed in the constructor).
llvm::IntrusiveRefCntPtr<DependencyScanningWorkerFilesystem> DepFS;
- /// The file manager that is reused accross multiple invocations by this
- /// worker. If null, the file manager will not be reused.
- llvm::IntrusiveRefCntPtr<FileManager> Files;
ScanningOutputFormat Format;
+ /// Whether to optimize the modules' command-line arguments.
+ ScanningOptimizations OptimizeArgs;
+ /// Whether to set up command-lines to load PCM files eagerly.
+ bool EagerLoadModules;
};
} // end namespace dependencies
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_DEPENDENCY_SCANNING_WORKER_H
+#endif // LLVM_CLANG_TOOLING_DEPENDENCYSCANNING_DEPENDENCYSCANNINGWORKER_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/ModuleDepCollector.h b/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/ModuleDepCollector.h
index a9f2b4d0c6fc..051363b075de 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/ModuleDepCollector.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/DependencyScanning/ModuleDepCollector.h
@@ -1,14 +1,13 @@
//===- ModuleDepCollector.h - Callbacks to collect deps ---------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_DEPENDENCY_SCANNING_MODULE_DEP_COLLECTOR_H
-#define LLVM_CLANG_TOOLING_DEPENDENCY_SCANNING_MODULE_DEP_COLLECTOR_H
+#ifndef LLVM_CLANG_TOOLING_DEPENDENCYSCANNING_MODULEDEPCOLLECTOR_H
+#define LLVM_CLANG_TOOLING_DEPENDENCYSCANNING_MODULEDEPCOLLECTOR_H
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceManager.h"
@@ -17,16 +16,21 @@
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/PPCallbacks.h"
#include "clang/Serialization/ASTReader.h"
+#include "clang/Tooling/DependencyScanning/DependencyScanningService.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/Support/raw_ostream.h"
+#include <optional>
#include <string>
#include <unordered_map>
+#include <variant>
namespace clang {
namespace tooling {
namespace dependencies {
+class DependencyActionController;
class DependencyConsumer;
/// Modular dependency that has already been built prior to the dependency scan.
@@ -47,25 +51,61 @@ struct ModuleID {
/// or a header-name for C++20 header units.
std::string ModuleName;
- /// The context hash of a module represents the set of compiler options that
- /// may make one version of a module incompatible with another. This includes
- /// things like language mode, predefined macros, header search paths, etc...
+ /// The context hash of a module represents the compiler options that affect
+ /// the resulting command-line invocation.
+ ///
+ /// Modules with the same name and ContextHash but different invocations could
+ /// cause non-deterministic build results.
///
/// Modules with the same name but a different \c ContextHash should be
/// treated as separate modules for the purpose of a build.
std::string ContextHash;
bool operator==(const ModuleID &Other) const {
- return ModuleName == Other.ModuleName && ContextHash == Other.ContextHash;
+ return std::tie(ModuleName, ContextHash) ==
+ std::tie(Other.ModuleName, Other.ContextHash);
}
-};
-struct ModuleIDHasher {
- std::size_t operator()(const ModuleID &MID) const {
- return llvm::hash_combine(MID.ModuleName, MID.ContextHash);
+ bool operator<(const ModuleID& Other) const {
+ return std::tie(ModuleName, ContextHash) <
+ std::tie(Other.ModuleName, Other.ContextHash);
}
};
+/// P1689ModuleInfo - Represents the needed information of standard C++20
+/// modules for P1689 format.
+struct P1689ModuleInfo {
+ /// The name of the module. This may include `:` for partitions.
+ std::string ModuleName;
+
+ /// Optional. The source path to the module.
+ std::string SourcePath;
+
+ /// If this module is a standard c++ interface unit.
+ bool IsStdCXXModuleInterface = true;
+
+ enum class ModuleType {
+ NamedCXXModule
+ // To be supported
+ // AngleHeaderUnit,
+ // QuoteHeaderUnit
+ };
+ ModuleType Type = ModuleType::NamedCXXModule;
+};
+
+/// An output from a module compilation, such as the path of the module file.
+enum class ModuleOutputKind {
+ /// The module file (.pcm). Required.
+ ModuleFile,
+ /// The path of the dependency file (.d), if any.
+ DependencyFile,
+ /// The null-separated list of names to use as the targets in the dependency
+ /// file, if any. Defaults to the value of \c ModuleFile, as in the driver.
+ DependencyTargets,
+ /// The path of the serialized diagnostic file (.dia), if any.
+ DiagnosticSerializationFile,
+};
+
struct ModuleDeps {
/// The identifier of the module.
ModuleID ID;
@@ -79,13 +119,14 @@ struct ModuleDeps {
/// additionally appear in \c FileDeps as a dependency.
std::string ClangModuleMapFile;
- /// The path to where an implicit build would put the PCM for this module.
- std::string ImplicitModulePCMPath;
-
/// A collection of absolute paths to files that this module directly depends
/// on, not including transitive dependencies.
llvm::StringSet<> FileDeps;
+ /// A collection of absolute paths to module map files that this module needs
+ /// to know about. The ordering is significant.
+ std::vector<std::string> ModuleMapFileDeps;
+
/// A collection of prebuilt modular dependencies this module directly depends
/// on, not including transitive dependencies.
std::vector<PrebuiltModuleDep> PrebuiltModuleDeps;
@@ -97,43 +138,17 @@ struct ModuleDeps {
/// determined that the differences are benign for this compilation.
std::vector<ModuleID> ClangModuleDeps;
- // Used to track which modules that were discovered were directly imported by
- // the primary TU.
- bool ImportedByMainFile = false;
+ /// Get (or compute) the compiler invocation that can be used to build this
+ /// module. Does not include argv[0].
+ const std::vector<std::string> &getBuildArguments();
- /// Compiler invocation that can be used to build this module (without paths).
- CompilerInvocation Invocation;
+private:
+ friend class ModuleDepCollectorPP;
- /// Gets the canonical command line suitable for passing to clang.
- ///
- /// \param LookupPCMPath This function is called to fill in "-fmodule-file="
- /// arguments and the "-o" argument. It needs to return
- /// a path for where the PCM for the given module is to
- /// be located.
- /// \param LookupModuleDeps This function is called to collect the full
- /// transitive set of dependencies for this
- /// compilation and fill in "-fmodule-map-file="
- /// arguments.
- std::vector<std::string> getCanonicalCommandLine(
- std::function<StringRef(ModuleID)> LookupPCMPath,
- std::function<const ModuleDeps &(ModuleID)> LookupModuleDeps) const;
-
- /// Gets the canonical command line suitable for passing to clang, excluding
- /// arguments containing modules-related paths: "-fmodule-file=", "-o",
- /// "-fmodule-map-file=".
- std::vector<std::string> getCanonicalCommandLineWithoutModulePaths() const;
+ std::variant<std::monostate, CowCompilerInvocation, std::vector<std::string>>
+ BuildInfo;
};
-namespace detail {
-/// Collect the paths of PCM and module map files for the modules in \c Modules
-/// transitively.
-void collectPCMAndModuleMapPaths(
- llvm::ArrayRef<ModuleID> Modules,
- std::function<StringRef(ModuleID)> LookupPCMPath,
- std::function<const ModuleDeps &(ModuleID)> LookupModuleDeps,
- std::vector<std::string> &PCMPaths, std::vector<std::string> &ModMapPaths);
-} // namespace detail
-
class ModuleDepCollector;
/// Callback that records textual includes and direct modular includes/imports
@@ -142,17 +157,16 @@ class ModuleDepCollector;
/// \c DependencyConsumer of the parent \c ModuleDepCollector.
class ModuleDepCollectorPP final : public PPCallbacks {
public:
- ModuleDepCollectorPP(CompilerInstance &I, ModuleDepCollector &MDC)
- : Instance(I), MDC(MDC) {}
+ ModuleDepCollectorPP(ModuleDepCollector &MDC) : MDC(MDC) {}
- void FileChanged(SourceLocation Loc, FileChangeReason Reason,
- SrcMgr::CharacteristicKind FileType,
- FileID PrevFID) override;
+ void LexedFileChanged(FileID FID, LexedFileChangeReason Reason,
+ SrcMgr::CharacteristicKind FileType, FileID PrevFID,
+ SourceLocation Loc) override;
void InclusionDirective(SourceLocation HashLoc, const Token &IncludeTok,
StringRef FileName, bool IsAngled,
- CharSourceRange FilenameRange, const FileEntry *File,
- StringRef SearchPath, StringRef RelativePath,
- const Module *Imported,
+ CharSourceRange FilenameRange,
+ OptionalFileEntryRef File, StringRef SearchPath,
+ StringRef RelativePath, const Module *Imported,
SrcMgr::CharacteristicKind FileType) override;
void moduleImport(SourceLocation ImportLoc, ModuleIdPath Path,
const Module *Imported) override;
@@ -160,29 +174,35 @@ public:
void EndOfMainFile() override;
private:
- /// The compiler instance for the current translation unit.
- CompilerInstance &Instance;
/// The parent dependency collector.
ModuleDepCollector &MDC;
- /// Working set of direct modular dependencies.
- llvm::DenseSet<const Module *> DirectModularDeps;
- /// Working set of direct modular dependencies that have already been built.
- llvm::DenseSet<const Module *> DirectPrebuiltModularDeps;
void handleImport(const Module *Imported);
/// Adds direct modular dependencies that have already been built to the
/// ModuleDeps instance.
- void addDirectPrebuiltModuleDeps(const Module *M, ModuleDeps &MD);
+ void
+ addAllSubmodulePrebuiltDeps(const Module *M, ModuleDeps &MD,
+ llvm::DenseSet<const Module *> &SeenSubmodules);
+ void addModulePrebuiltDeps(const Module *M, ModuleDeps &MD,
+ llvm::DenseSet<const Module *> &SeenSubmodules);
/// Traverses the previously collected direct modular dependencies to discover
/// transitive modular dependencies and fills the parent \c ModuleDepCollector
/// with both.
- ModuleID handleTopLevelModule(const Module *M);
+ /// Returns the ID or nothing if the dependency is spurious and is ignored.
+ std::optional<ModuleID> handleTopLevelModule(const Module *M);
void addAllSubmoduleDeps(const Module *M, ModuleDeps &MD,
llvm::DenseSet<const Module *> &AddedModules);
void addModuleDep(const Module *M, ModuleDeps &MD,
llvm::DenseSet<const Module *> &AddedModules);
+
+ /// Traverses the affecting modules and updates \c MD with references to the
+ /// parent \c ModuleDepCollector info.
+ void addAllAffectingClangModules(const Module *M, ModuleDeps &MD,
+ llvm::DenseSet<const Module *> &AddedModules);
+ void addAffectingClangModule(const Module *M, ModuleDeps &MD,
+ llvm::DenseSet<const Module *> &AddedModules);
};
/// Collects modular and non-modular dependencies of the main file by attaching
@@ -190,19 +210,28 @@ private:
class ModuleDepCollector final : public DependencyCollector {
public:
ModuleDepCollector(std::unique_ptr<DependencyOutputOptions> Opts,
- CompilerInstance &I, DependencyConsumer &C,
- CompilerInvocation &&OriginalCI);
+ CompilerInstance &ScanInstance, DependencyConsumer &C,
+ DependencyActionController &Controller,
+ CompilerInvocation OriginalCI,
+ ScanningOptimizations OptimizeArgs, bool EagerLoadModules,
+ bool IsStdModuleP1689Format);
void attachToPreprocessor(Preprocessor &PP) override;
void attachToASTReader(ASTReader &R) override;
+ /// Apply any changes implied by the discovered dependencies to the given
+ /// invocation, (e.g. disable implicit modules, add explicit module paths).
+ void applyDiscoveredDependencies(CompilerInvocation &CI);
+
private:
friend ModuleDepCollectorPP;
- /// The compiler instance for the current translation unit.
- CompilerInstance &Instance;
+ /// The compiler instance for scanning the current translation unit.
+ CompilerInstance &ScanInstance;
/// The consumer of collected dependency information.
DependencyConsumer &Consumer;
+ /// Callbacks for computing dependency information.
+ DependencyActionController &Controller;
/// Path to the main source file.
std::string MainFile;
/// Hash identifying the compilation conditions of the current TU.
@@ -211,24 +240,88 @@ private:
/// textually included header files.
std::vector<std::string> FileDeps;
/// Direct and transitive modular dependencies of the main source file.
- std::unordered_map<const Module *, ModuleDeps> ModularDeps;
+ llvm::MapVector<const Module *, std::unique_ptr<ModuleDeps>> ModularDeps;
+ /// Secondary mapping for \c ModularDeps allowing lookup by ModuleID without
+ /// a preprocessor. Storage owned by \c ModularDeps.
+ llvm::DenseMap<ModuleID, ModuleDeps *> ModuleDepsByID;
+ /// Direct modular dependencies that have already been built.
+ llvm::MapVector<const Module *, PrebuiltModuleDep> DirectPrebuiltModularDeps;
+ /// Working set of direct modular dependencies.
+ llvm::SetVector<const Module *> DirectModularDeps;
/// Options that control the dependency output generation.
std::unique_ptr<DependencyOutputOptions> Opts;
- /// The original Clang invocation passed to dependency scanner.
- CompilerInvocation OriginalInvocation;
+ /// A Clang invocation that's based on the original TU invocation and that has
+ /// been partially transformed into one that can perform explicit build of
+ /// a discovered modular dependency. Note that this still needs to be adjusted
+ /// for each individual module.
+ CowCompilerInvocation CommonInvocation;
+ /// Whether to optimize the modules' command-line arguments.
+ ScanningOptimizations OptimizeArgs;
+ /// Whether to set up command-lines to load PCM files eagerly.
+ bool EagerLoadModules;
+ /// If we're generating dependency output in P1689 format
+ /// for standard C++ modules.
+ bool IsStdModuleP1689Format;
+
+ std::optional<P1689ModuleInfo> ProvidedStdCXXModule;
+ std::vector<P1689ModuleInfo> RequiredStdCXXModules;
/// Checks whether the module is known as being prebuilt.
bool isPrebuiltModule(const Module *M);
- /// Constructs a CompilerInvocation that can be used to build the given
- /// module, excluding paths to discovered modular dependencies that are yet to
- /// be built.
- CompilerInvocation
- makeInvocationForModuleBuildWithoutPaths(const ModuleDeps &Deps) const;
+ /// Adds \p Path to \c FileDeps, making it absolute if necessary.
+ void addFileDep(StringRef Path);
+ /// Adds \p Path to \c MD.FileDeps, making it absolute if necessary.
+ void addFileDep(ModuleDeps &MD, StringRef Path);
+
+ /// Get a Clang invocation adjusted to build the given modular dependency.
+ /// This excludes paths that are yet-to-be-provided by the build system.
+ CowCompilerInvocation getInvocationAdjustedForModuleBuildWithoutOutputs(
+ const ModuleDeps &Deps,
+ llvm::function_ref<void(CowCompilerInvocation &)> Optimize) const;
+
+ /// Collect module map files for given modules.
+ llvm::DenseSet<const FileEntry *>
+ collectModuleMapFiles(ArrayRef<ModuleID> ClangModuleDeps) const;
+
+ /// Add module map files to the invocation, if needed.
+ void addModuleMapFiles(CompilerInvocation &CI,
+ ArrayRef<ModuleID> ClangModuleDeps) const;
+ /// Add module files (pcm) to the invocation, if needed.
+ void addModuleFiles(CompilerInvocation &CI,
+ ArrayRef<ModuleID> ClangModuleDeps) const;
+ void addModuleFiles(CowCompilerInvocation &CI,
+ ArrayRef<ModuleID> ClangModuleDeps) const;
+
+ /// Add paths that require looking up outputs to the given dependencies.
+ void addOutputPaths(CowCompilerInvocation &CI, ModuleDeps &Deps);
+
+ /// Compute the context hash for \p Deps, and create the mapping
+ /// \c ModuleDepsByID[Deps.ID] = &Deps.
+ void associateWithContextHash(const CowCompilerInvocation &CI,
+ ModuleDeps &Deps);
};
} // end namespace dependencies
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_DEPENDENCY_SCANNING_MODULE_DEP_COLLECTOR_H
+namespace llvm {
+inline hash_code hash_value(const clang::tooling::dependencies::ModuleID &ID) {
+ return hash_combine(ID.ModuleName, ID.ContextHash);
+}
+
+template <> struct DenseMapInfo<clang::tooling::dependencies::ModuleID> {
+ using ModuleID = clang::tooling::dependencies::ModuleID;
+ static inline ModuleID getEmptyKey() { return ModuleID{"", ""}; }
+ static inline ModuleID getTombstoneKey() {
+ return ModuleID{"~", "~"}; // ~ is not a valid module name or context hash
+ }
+ static unsigned getHashValue(const ModuleID &ID) { return hash_value(ID); }
+ static bool isEqual(const ModuleID &LHS, const ModuleID &RHS) {
+ return LHS == RHS;
+ }
+};
+} // namespace llvm
+
+#endif // LLVM_CLANG_TOOLING_DEPENDENCYSCANNING_MODULEDEPCOLLECTOR_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/DiagnosticsYaml.h b/contrib/llvm-project/clang/include/clang/Tooling/DiagnosticsYaml.h
index 3f257d84f813..88f81e1f6299 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/DiagnosticsYaml.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/DiagnosticsYaml.h
@@ -42,8 +42,7 @@ template <> struct MappingTraits<clang::tooling::DiagnosticMessage> {
Io.mapOptional("FileOffset", M.FileOffset);
std::vector<clang::tooling::Replacement> Fixes;
for (auto &Replacements : M.Fix) {
- for (auto &Replacement : Replacements.second)
- Fixes.push_back(Replacement);
+ llvm::append_range(Fixes, Replacements.second);
}
Io.mapRequired("Replacements", Fixes);
for (auto &Fix : Fixes) {
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/FixIt.h b/contrib/llvm-project/clang/include/clang/Tooling/FixIt.h
index 5fce71f2d8f7..1624c2d6be36 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/FixIt.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/FixIt.h
@@ -76,4 +76,4 @@ FixItHint createReplacement(const D &Destination, StringRef Source) {
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_FIXINT_H
+#endif // LLVM_CLANG_TOOLING_FIXIT_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Inclusions/HeaderAnalysis.h b/contrib/llvm-project/clang/include/clang/Tooling/Inclusions/HeaderAnalysis.h
new file mode 100644
index 000000000000..34ec2d80d06a
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Inclusions/HeaderAnalysis.h
@@ -0,0 +1,46 @@
+//===--- HeaderAnalysis.h -----------------------------------------*-C++-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_TOOLING_INCLUSIONS_HEADER_ANALYSIS_H
+#define LLVM_CLANG_TOOLING_INCLUSIONS_HEADER_ANALYSIS_H
+
+#include "clang/Basic/FileEntry.h"
+#include "llvm/ADT/StringRef.h"
+#include <optional>
+
+namespace clang {
+class SourceManager;
+class HeaderSearch;
+
+namespace tooling {
+
+/// Returns true if the given physical file is a self-contained header.
+///
+/// A header is considered self-contained if
+// - it has a proper header guard or has been #imported or contains #import(s)
+// - *and* it doesn't have a dont-include-me pattern.
+///
+/// This function can be expensive as it may scan the source code to find out
+/// dont-include-me pattern heuristically.
+bool isSelfContainedHeader(FileEntryRef FE, const SourceManager &SM,
+ const HeaderSearch &HeaderInfo);
+
+/// This scans the given source code to see if it contains #import(s).
+bool codeContainsImports(llvm::StringRef Code);
+
+/// If Text begins an Include-What-You-Use directive, returns it.
+/// Given "// IWYU pragma: keep", returns "keep".
+/// Input is a null-terminated char* as provided by SM.getCharacterData().
+/// (This should not be StringRef as we do *not* want to scan for its length).
+/// For multi-line comments, we return only the first line.
+std::optional<llvm::StringRef> parseIWYUPragma(const char *Text);
+
+} // namespace tooling
+} // namespace clang
+
+#endif // LLVM_CLANG_TOOLING_INCLUSIONS_HEADER_ANALYSIS_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Inclusions/HeaderIncludes.h b/contrib/llvm-project/clang/include/clang/Tooling/Inclusions/HeaderIncludes.h
index 02fb2875671a..d5439dd2c84e 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Inclusions/HeaderIncludes.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Inclusions/HeaderIncludes.h
@@ -14,6 +14,8 @@
#include "clang/Tooling/Inclusions/IncludeStyle.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Regex.h"
+#include <list>
+#include <optional>
#include <unordered_map>
namespace clang {
@@ -43,6 +45,8 @@ private:
SmallVector<llvm::Regex, 4> CategoryRegexs;
};
+enum class IncludeDirective { Include, Import };
+
/// Generates replacements for inserting or deleting #include directives in a
/// file.
class HeaderIncludes {
@@ -50,9 +54,9 @@ public:
HeaderIncludes(llvm::StringRef FileName, llvm::StringRef Code,
const IncludeStyle &Style);
- /// Inserts an #include directive of \p Header into the code. If \p IsAngled
- /// is true, \p Header will be quoted with <> in the directive; otherwise, it
- /// will be quoted with "".
+ /// Inserts an #include or #import directive of \p Header into the code.
+ /// If \p IsAngled is true, \p Header will be quoted with <> in the directive;
+ /// otherwise, it will be quoted with "".
///
/// When searching for points to insert new header, this ignores #include's
/// after the #include block(s) in the beginning of a file to avoid inserting
@@ -68,25 +72,32 @@ public:
/// this will simply insert the #include in front of the first #include of the
/// same category in the code that should be sorted after \p IncludeName. If
/// \p IncludeName already exists (with exactly the same spelling), this
- /// returns None.
- llvm::Optional<tooling::Replacement> insert(llvm::StringRef Header,
- bool IsAngled) const;
-
- /// Removes all existing #includes of \p Header quoted with <> if \p IsAngled
- /// is true or "" if \p IsAngled is false.
- /// This doesn't resolve the header file path; it only deletes #includes with
- /// exactly the same spelling.
+ /// returns std::nullopt.
+ std::optional<tooling::Replacement> insert(llvm::StringRef Header,
+ bool IsAngled,
+ IncludeDirective Directive) const;
+
+ /// Removes all existing #includes and #imports of \p Header quoted with <> if
+ /// \p IsAngled is true or "" if \p IsAngled is false.
+ /// This doesn't resolve the header file path; it only deletes #includes and
+ /// #imports with exactly the same spelling.
tooling::Replacements remove(llvm::StringRef Header, bool IsAngled) const;
+ // Matches a whole #include directive.
+ static const llvm::Regex IncludeRegex;
+
private:
struct Include {
- Include(StringRef Name, tooling::Range R) : Name(Name), R(R) {}
+ Include(StringRef Name, tooling::Range R, IncludeDirective D)
+ : Name(Name), R(R), Directive(D) {}
// An include header quoted with either <> or "".
std::string Name;
- // The range of the whole line of include directive including any eading
+ // The range of the whole line of include directive including any leading
// whitespaces and trailing comment.
tooling::Range R;
+ // Either #include or #import.
+ IncludeDirective Directive;
};
void addExistingInclude(Include IncludeToAdd, unsigned NextLineOffset);
@@ -97,7 +108,8 @@ private:
// Map from include name (quotation trimmed) to a list of existing includes
// (in case there are more than one) with the name in the current file. <x>
// and "x" will be treated as the same header when deleting #includes.
- llvm::StringMap<llvm::SmallVector<Include, 1>> ExistingIncludes;
+ // std::list is used for pointers stability (see IncludesByPriority)
+ llvm::StringMap<std::list<Include>> ExistingIncludes;
/// Map from priorities of #include categories to all #includes in the same
/// category. This is used to find #includes of the same category when
@@ -116,18 +128,16 @@ private:
// inserting new #includes into the actual code section (e.g. after a
// declaration).
unsigned MaxInsertOffset;
+ // True if we find the main-file header in the Code.
+ bool MainIncludeFound;
IncludeCategoryManager Categories;
// Record the offset of the end of the last include in each category.
std::unordered_map<int, int> CategoryEndOffsets;
// All possible priorities.
std::set<int> Priorities;
-
- // Matches a whole #include directive.
- llvm::Regex IncludeRegex;
};
-
} // namespace tooling
} // namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Inclusions/IncludeStyle.h b/contrib/llvm-project/clang/include/clang/Tooling/Inclusions/IncludeStyle.h
index 4caaf4121f15..d6b2b0192477 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Inclusions/IncludeStyle.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Inclusions/IncludeStyle.h
@@ -50,6 +50,7 @@ struct IncludeStyle {
/// Dependent on the value, multiple ``#include`` blocks can be sorted
/// as one and divided based on category.
+ /// \version 6
IncludeBlocksStyle IncludeBlocks;
/// See documentation of ``IncludeCategories``.
@@ -105,7 +106,7 @@ struct IncludeStyle {
/// Priority: 2
/// SortPriority: 2
/// CaseSensitive: true
- /// - Regex: '^(<|"(gtest|gmock|isl|json)/)'
+ /// - Regex: '^((<|")(gtest|gmock|isl|json)/)'
/// Priority: 3
/// - Regex: '<[[:alnum:].]+>'
/// Priority: 4
@@ -113,6 +114,7 @@ struct IncludeStyle {
/// Priority: 1
/// SortPriority: 0
/// \endcode
+ /// \version 3.8
std::vector<IncludeCategory> IncludeCategories;
/// Specify a regular expression of suffixes that are allowed in the
@@ -126,6 +128,7 @@ struct IncludeStyle {
///
/// For example, if configured to "(_test)?$", then a header a.h would be seen
/// as the "main" include in both a.cc and a_test.cc.
+ /// \version 3.9
std::string IncludeIsMainRegex;
/// Specify a regular expression for files being formatted
@@ -146,6 +149,7 @@ struct IncludeStyle {
/// also being respected in later phase). Without this option set,
/// ``ClassImpl.hpp`` would not have the main include file put on top
/// before any other include.
+ /// \version 10
std::string IncludeIsMainSourceRegex;
};
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Inclusions/StandardLibrary.h b/contrib/llvm-project/clang/include/clang/Tooling/Inclusions/StandardLibrary.h
new file mode 100644
index 000000000000..a39ceb520dcf
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Inclusions/StandardLibrary.h
@@ -0,0 +1,158 @@
+//===--- StandardLibrary.h --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// Provides an interface for querying information about C and C++ Standard
+/// Library headers and symbols.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_TOOLING_INCLUSIONS_STANDARDLIBRARY_H
+#define LLVM_CLANG_TOOLING_INCLUSIONS_STANDARDLIBRARY_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Hashing.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/raw_ostream.h"
+#include <optional>
+#include <string>
+
+namespace clang {
+class Decl;
+class NamespaceDecl;
+class DeclContext;
+namespace tooling {
+namespace stdlib {
+
+class Symbol;
+enum class Lang { C = 0, CXX, LastValue = CXX };
+
+// A standard library header, such as <iostream>
+// Lightweight class, in fact just an index into a table.
+// C++ and C Library compatibility headers are considered different: e.g.
+// "<cstdio>" and "<stdio.h>" (and their symbols) are treated differently.
+class Header {
+public:
+ static std::vector<Header> all(Lang L = Lang::CXX);
+ // Name should contain the angle brackets, e.g. "<vector>".
+ static std::optional<Header> named(llvm::StringRef Name,
+ Lang Language = Lang::CXX);
+
+ friend llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const Header &H) {
+ return OS << H.name();
+ }
+ llvm::StringRef name() const;
+
+private:
+ Header(unsigned ID, Lang Language) : ID(ID), Language(Language) {}
+ unsigned ID;
+ Lang Language;
+
+ friend Symbol;
+ friend llvm::DenseMapInfo<Header>;
+ friend bool operator==(const Header &L, const Header &R) {
+ return L.ID == R.ID;
+ }
+};
+
+// A top-level standard library symbol, such as std::vector
+// Lightweight class, in fact just an index into a table.
+// C++ and C Standard Library symbols are considered distinct: e.g. std::printf
+// and ::printf are not treated as the same symbol.
+// The symbols do not contain macros right now, we don't have a reliable index
+// for them.
+class Symbol {
+public:
+ static std::vector<Symbol> all(Lang L = Lang::CXX);
+ /// \p Scope should have the trailing "::", for example:
+ /// named("std::chrono::", "system_clock")
+ static std::optional<Symbol>
+ named(llvm::StringRef Scope, llvm::StringRef Name, Lang Language = Lang::CXX);
+
+ friend llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const Symbol &S) {
+ return OS << S.qualifiedName();
+ }
+ llvm::StringRef scope() const;
+ llvm::StringRef name() const;
+ llvm::StringRef qualifiedName() const;
+ // The preferred header for this symbol (e.g. the suggested insertion).
+ std::optional<Header> header() const;
+ // Some symbols may be provided by multiple headers.
+ llvm::SmallVector<Header> headers() const;
+
+private:
+ Symbol(unsigned ID, Lang Language) : ID(ID), Language(Language) {}
+ unsigned ID;
+ Lang Language;
+
+ friend class Recognizer;
+ friend llvm::DenseMapInfo<Symbol>;
+ friend bool operator==(const Symbol &L, const Symbol &R) {
+ return L.ID == R.ID;
+ }
+};
+
+// A functor to find the stdlib::Symbol associated with a decl.
+//
+// For non-top-level decls (std::vector<int>::iterator), returns the top-level
+// symbol (std::vector).
+class Recognizer {
+public:
+ Recognizer();
+ std::optional<Symbol> operator()(const Decl *D);
+
+private:
+ using NSSymbolMap = llvm::DenseMap<llvm::StringRef, unsigned>;
+ NSSymbolMap *namespaceSymbols(const DeclContext *DC, Lang L);
+ llvm::DenseMap<const DeclContext *, NSSymbolMap *> NamespaceCache;
+};
+
+} // namespace stdlib
+} // namespace tooling
+} // namespace clang
+
+namespace llvm {
+
+template <> struct DenseMapInfo<clang::tooling::stdlib::Header> {
+ static inline clang::tooling::stdlib::Header getEmptyKey() {
+ return clang::tooling::stdlib::Header(-1,
+ clang::tooling::stdlib::Lang::CXX);
+ }
+ static inline clang::tooling::stdlib::Header getTombstoneKey() {
+ return clang::tooling::stdlib::Header(-2,
+ clang::tooling::stdlib::Lang::CXX);
+ }
+ static unsigned getHashValue(const clang::tooling::stdlib::Header &H) {
+ return hash_value(H.ID);
+ }
+ static bool isEqual(const clang::tooling::stdlib::Header &LHS,
+ const clang::tooling::stdlib::Header &RHS) {
+ return LHS == RHS;
+ }
+};
+
+template <> struct DenseMapInfo<clang::tooling::stdlib::Symbol> {
+ static inline clang::tooling::stdlib::Symbol getEmptyKey() {
+ return clang::tooling::stdlib::Symbol(-1,
+ clang::tooling::stdlib::Lang::CXX);
+ }
+ static inline clang::tooling::stdlib::Symbol getTombstoneKey() {
+ return clang::tooling::stdlib::Symbol(-2,
+ clang::tooling::stdlib::Lang::CXX);
+ }
+ static unsigned getHashValue(const clang::tooling::stdlib::Symbol &S) {
+ return hash_value(S.ID);
+ }
+ static bool isEqual(const clang::tooling::stdlib::Symbol &LHS,
+ const clang::tooling::stdlib::Symbol &RHS) {
+ return LHS == RHS;
+ }
+};
+} // namespace llvm
+
+#endif // LLVM_CLANG_TOOLING_INCLUSIONS_STANDARDLIBRARY_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/ASTSelection.h b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/ASTSelection.h
index 239be36012c3..009437fde03f 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/ASTSelection.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/ASTSelection.h
@@ -6,14 +6,15 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_REFACTOR_AST_SELECTION_H
-#define LLVM_CLANG_TOOLING_REFACTOR_AST_SELECTION_H
+#ifndef LLVM_CLANG_TOOLING_REFACTORING_ASTSELECTION_H
+#define LLVM_CLANG_TOOLING_REFACTORING_ASTSELECTION_H
#include "clang/AST/ASTTypeTraits.h"
#include "clang/AST/Stmt.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/Support/raw_ostream.h"
+#include <optional>
#include <vector>
namespace clang {
@@ -65,10 +66,10 @@ struct SelectedASTNode {
/// Traverses the given ASTContext and creates a tree of selected AST nodes.
///
-/// \returns None if no nodes are selected in the AST, or a selected AST node
-/// that corresponds to the TranslationUnitDecl otherwise.
-Optional<SelectedASTNode> findSelectedASTNodes(const ASTContext &Context,
- SourceRange SelectionRange);
+/// \returns std::nullopt if no nodes are selected in the AST, or a selected AST
+/// node that corresponds to the TranslationUnitDecl otherwise.
+std::optional<SelectedASTNode> findSelectedASTNodes(const ASTContext &Context,
+ SourceRange SelectionRange);
/// An AST selection value that corresponds to a selection of a set of
/// statements that belong to one body of code (like one function).
@@ -130,7 +131,7 @@ public:
/// declaration doesn't exist.
const Decl *getFunctionLikeNearestParent() const;
- static Optional<CodeRangeASTSelection>
+ static std::optional<CodeRangeASTSelection>
create(SourceRange SelectionRange, const SelectedASTNode &ASTSelection);
private:
@@ -152,4 +153,4 @@ private:
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_REFACTOR_AST_SELECTION_H
+#endif // LLVM_CLANG_TOOLING_REFACTORING_ASTSELECTION_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/AtomicChange.h b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/AtomicChange.h
index f1034a3d0579..92f322ef7d80 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/AtomicChange.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/AtomicChange.h
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_REFACTOR_ATOMICCHANGE_H
-#define LLVM_CLANG_TOOLING_REFACTOR_ATOMICCHANGE_H
+#ifndef LLVM_CLANG_TOOLING_REFACTORING_ATOMICCHANGE_H
+#define LLVM_CLANG_TOOLING_REFACTORING_ATOMICCHANGE_H
#include "clang/Basic/SourceManager.h"
#include "clang/Format/Format.h"
@@ -116,6 +116,8 @@ public:
/// Returns a const reference to existing replacements.
const Replacements &getReplacements() const { return Replaces; }
+ Replacements &getReplacements() { return Replaces; }
+
llvm::ArrayRef<std::string> getInsertedHeaders() const {
return InsertedHeaders;
}
@@ -187,4 +189,4 @@ applyAtomicChanges(llvm::StringRef FilePath, llvm::StringRef Code,
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_REFACTOR_ATOMICCHANGE_H
+#endif // LLVM_CLANG_TOOLING_REFACTORING_ATOMICCHANGE_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Extract/Extract.h b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Extract/Extract.h
index 930991328ca0..695ca3879c10 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Extract/Extract.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Extract/Extract.h
@@ -6,11 +6,12 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_REFACTOR_EXTRACT_EXTRACT_H
-#define LLVM_CLANG_TOOLING_REFACTOR_EXTRACT_EXTRACT_H
+#ifndef LLVM_CLANG_TOOLING_REFACTORING_EXTRACT_EXTRACT_H
+#define LLVM_CLANG_TOOLING_REFACTORING_EXTRACT_EXTRACT_H
#include "clang/Tooling/Refactoring/ASTSelection.h"
#include "clang/Tooling/Refactoring/RefactoringActionRules.h"
+#include <optional>
namespace clang {
namespace tooling {
@@ -22,16 +23,17 @@ public:
/// Initiates the extract function refactoring operation.
///
/// \param Code The selected set of statements.
- /// \param DeclName The name name of the extract function. If None,
+ /// \param DeclName The name of the extract function. If None,
/// "extracted" is used.
- static Expected<ExtractFunction> initiate(RefactoringRuleContext &Context,
- CodeRangeASTSelection Code,
- Optional<std::string> DeclName);
+ static Expected<ExtractFunction>
+ initiate(RefactoringRuleContext &Context, CodeRangeASTSelection Code,
+ std::optional<std::string> DeclName);
static const RefactoringDescriptor &describe();
private:
- ExtractFunction(CodeRangeASTSelection Code, Optional<std::string> DeclName)
+ ExtractFunction(CodeRangeASTSelection Code,
+ std::optional<std::string> DeclName)
: Code(std::move(Code)),
DeclName(DeclName ? std::move(*DeclName) : "extracted") {}
@@ -49,4 +51,4 @@ private:
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_REFACTOR_EXTRACT_EXTRACT_H
+#endif // LLVM_CLANG_TOOLING_REFACTORING_EXTRACT_EXTRACT_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Extract/SourceExtraction.h b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Extract/SourceExtraction.h
index 034a0aaaf6db..be44518d4bce 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Extract/SourceExtraction.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Extract/SourceExtraction.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_REFACTORING_EXTRACT_SOURCE_EXTRACTION_H
-#define LLVM_CLANG_TOOLING_REFACTORING_EXTRACT_SOURCE_EXTRACTION_H
+#ifndef LLVM_CLANG_TOOLING_REFACTORING_EXTRACT_SOURCEEXTRACTION_H
+#define LLVM_CLANG_TOOLING_REFACTORING_EXTRACT_SOURCEEXTRACTION_H
#include "clang/Basic/LLVM.h"
@@ -48,4 +48,4 @@ private:
} // end namespace tooling
} // end namespace clang
-#endif //LLVM_CLANG_TOOLING_REFACTORING_EXTRACT_SOURCE_EXTRACTION_H
+#endif // LLVM_CLANG_TOOLING_REFACTORING_EXTRACT_SOURCEEXTRACTION_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Lookup.h b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Lookup.h
index 448bc422c4e7..dcb40b7eee66 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Lookup.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Lookup.h
@@ -10,8 +10,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_REFACTOR_LOOKUP_H
-#define LLVM_CLANG_TOOLING_REFACTOR_LOOKUP_H
+#ifndef LLVM_CLANG_TOOLING_REFACTORING_LOOKUP_H
+#define LLVM_CLANG_TOOLING_REFACTORING_LOOKUP_H
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceLocation.h"
@@ -47,4 +47,4 @@ std::string replaceNestedName(const NestedNameSpecifier *Use,
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_REFACTOR_LOOKUP_H
+#endif // LLVM_CLANG_TOOLING_REFACTORING_LOOKUP_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RecursiveSymbolVisitor.h b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RecursiveSymbolVisitor.h
index 63d46abc2034..015dbba26f68 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RecursiveSymbolVisitor.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RecursiveSymbolVisitor.h
@@ -12,8 +12,8 @@
///
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_REFACTOR_RECURSIVE_SYMBOL_VISITOR_H
-#define LLVM_CLANG_TOOLING_REFACTOR_RECURSIVE_SYMBOL_VISITOR_H
+#ifndef LLVM_CLANG_TOOLING_REFACTORING_RECURSIVESYMBOLVISITOR_H
+#define LLVM_CLANG_TOOLING_REFACTORING_RECURSIVESYMBOLVISITOR_H
#include "clang/AST/AST.h"
#include "clang/AST/RecursiveASTVisitor.h"
@@ -124,10 +124,11 @@ public:
bool VisitDesignatedInitExpr(const DesignatedInitExpr *E) {
for (const DesignatedInitExpr::Designator &D : E->designators()) {
- if (D.isFieldDesignator() && D.getField()) {
- const FieldDecl *Decl = D.getField();
- if (!visit(Decl, D.getFieldLoc(), D.getFieldLoc()))
- return false;
+ if (D.isFieldDesignator()) {
+ if (const FieldDecl *Decl = D.getFieldDecl()) {
+ if (!visit(Decl, D.getFieldLoc(), D.getFieldLoc()))
+ return false;
+ }
}
}
return true;
@@ -150,4 +151,4 @@ private:
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_REFACTOR_RECURSIVE_SYMBOL_VISITOR_H
+#endif // LLVM_CLANG_TOOLING_REFACTORING_RECURSIVESYMBOLVISITOR_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringAction.h b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringAction.h
index d4294ddb2f66..b362f655965e 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringAction.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringAction.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_ACTION_H
-#define LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_ACTION_H
+#ifndef LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGACTION_H
+#define LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGACTION_H
#include "clang/Basic/LLVM.h"
#include "clang/Tooling/Refactoring/RefactoringActionRules.h"
@@ -60,4 +60,4 @@ std::vector<std::unique_ptr<RefactoringAction>> createRefactoringActions();
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_ACTION_H
+#endif // LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGACTION_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringActionRule.h b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringActionRule.h
index 57dffa945acc..c6a6c4f6093a 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringActionRule.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringActionRule.h
@@ -6,11 +6,10 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_ACTION_RULE_H
-#define LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_ACTION_RULE_H
+#ifndef LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGACTIONRULE_H
+#define LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGACTIONRULE_H
#include "clang/Basic/LLVM.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/StringRef.h"
namespace clang {
@@ -69,4 +68,4 @@ public:
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_ACTION_RULE_H
+#endif // LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGACTIONRULE_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringActionRuleRequirements.h b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringActionRuleRequirements.h
index 6a6dd83731e9..1a318da3acca 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringActionRuleRequirements.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringActionRuleRequirements.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_ACTION_RULE_REQUIREMENTS_H
-#define LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_ACTION_RULE_REQUIREMENTS_H
+#ifndef LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGACTIONRULEREQUIREMENTS_H
+#define LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGACTIONRULEREQUIREMENTS_H
#include "clang/Basic/LLVM.h"
#include "clang/Tooling/Refactoring/ASTSelection.h"
@@ -98,7 +98,7 @@ public:
OptionRequirement() : Opt(createRefactoringOption<OptionType>()) {}
ArrayRef<std::shared_ptr<RefactoringOption>>
- getRefactoringOptions() const final override {
+ getRefactoringOptions() const final {
return Opt;
}
@@ -119,4 +119,4 @@ private:
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_ACTION_RULE_REQUIREMENTS_H
+#endif // LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGACTIONRULEREQUIREMENTS_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringActionRules.h b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringActionRules.h
index e9606fd6018e..5cb051d53433 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringActionRules.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringActionRules.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_ACTION_RULES_H
-#define LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_ACTION_RULES_H
+#ifndef LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGACTIONRULES_H
+#define LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGACTIONRULES_H
#include "clang/Tooling/Refactoring/RefactoringActionRule.h"
#include "clang/Tooling/Refactoring/RefactoringActionRulesInternal.h"
@@ -52,7 +52,7 @@ using RefactoringActionRules =
class SourceChangeRefactoringRule : public RefactoringActionRuleBase {
public:
void invoke(RefactoringResultConsumer &Consumer,
- RefactoringRuleContext &Context) final override {
+ RefactoringRuleContext &Context) final {
Expected<AtomicChanges> Changes = createSourceReplacements(Context);
if (!Changes)
Consumer.handleError(Changes.takeError());
@@ -74,7 +74,7 @@ private:
class FindSymbolOccurrencesRefactoringRule : public RefactoringActionRuleBase {
public:
void invoke(RefactoringResultConsumer &Consumer,
- RefactoringRuleContext &Context) final override {
+ RefactoringRuleContext &Context) final {
Expected<SymbolOccurrences> Occurrences = findSymbolOccurrences(Context);
if (!Occurrences)
Consumer.handleError(Occurrences.takeError());
@@ -90,4 +90,4 @@ private:
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_ACTION_RULES_H
+#endif // LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGACTIONRULES_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringActionRulesInternal.h b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringActionRulesInternal.h
index fb373fcf5029..33194c401ea1 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringActionRulesInternal.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringActionRulesInternal.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_ACTION_RULES_INTERNAL_H
-#define LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_ACTION_RULES_INTERNAL_H
+#ifndef LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGACTIONRULESINTERNAL_H
+#define LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGACTIONRULESINTERNAL_H
#include "clang/Basic/LLVM.h"
#include "clang/Tooling/Refactoring/RefactoringActionRule.h"
@@ -94,9 +94,9 @@ void visitRefactoringOptions(
/// A type trait that returns true when the given type list has at least one
/// type whose base is the given base type.
template <typename Base, typename First, typename... Rest>
-struct HasBaseOf : std::conditional<HasBaseOf<Base, First>::value ||
- HasBaseOf<Base, Rest...>::value,
- std::true_type, std::false_type>::type {};
+struct HasBaseOf : std::conditional_t<HasBaseOf<Base, First>::value ||
+ HasBaseOf<Base, Rest...>::value,
+ std::true_type, std::false_type> {};
template <typename Base, typename T>
struct HasBaseOf<Base, T> : std::is_base_of<Base, T> {};
@@ -104,9 +104,9 @@ struct HasBaseOf<Base, T> : std::is_base_of<Base, T> {};
/// A type trait that returns true when the given type list contains types that
/// derive from Base.
template <typename Base, typename First, typename... Rest>
-struct AreBaseOf : std::conditional<AreBaseOf<Base, First>::value &&
- AreBaseOf<Base, Rest...>::value,
- std::true_type, std::false_type>::type {};
+struct AreBaseOf : std::conditional_t<AreBaseOf<Base, First>::value &&
+ AreBaseOf<Base, Rest...>::value,
+ std::true_type, std::false_type> {};
template <typename Base, typename T>
struct AreBaseOf<Base, T> : std::is_base_of<Base, T> {};
@@ -154,4 +154,4 @@ createRefactoringActionRule(const RequirementTypes &... Requirements) {
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_ACTION_RULES_INTERNAL_H
+#endif // LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGACTIONRULESINTERNAL_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringOption.h b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringOption.h
index 659e02b48e5c..b022c5d61b03 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringOption.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringOption.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_OPTION_H
-#define LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_OPTION_H
+#ifndef LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGOPTION_H
+#define LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGOPTION_H
#include "clang/Basic/LLVM.h"
#include <memory>
@@ -60,4 +60,4 @@ std::shared_ptr<OptionType> createRefactoringOption() {
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_OPTION_H
+#endif // LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGOPTION_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringOptionVisitor.h b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringOptionVisitor.h
index d58b11355a26..3234b0976a8e 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringOptionVisitor.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringOptionVisitor.h
@@ -6,10 +6,11 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_OPTION_VISITOR_H
-#define LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_OPTION_VISITOR_H
+#ifndef LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGOPTIONVISITOR_H
+#define LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGOPTIONVISITOR_H
#include "clang/Basic/LLVM.h"
+#include <optional>
#include <type_traits>
namespace clang {
@@ -27,7 +28,7 @@ public:
virtual ~RefactoringOptionVisitor() {}
virtual void visit(const RefactoringOption &Opt,
- Optional<std::string> &Value) = 0;
+ std::optional<std::string> &Value) = 0;
};
namespace traits {
@@ -38,7 +39,8 @@ private:
template <typename ClassT>
static auto check(ClassT *) -> typename std::is_same<
decltype(std::declval<RefactoringOptionVisitor>().visit(
- std::declval<RefactoringOption>(), *std::declval<Optional<T> *>())),
+ std::declval<RefactoringOption>(),
+ *std::declval<std::optional<T> *>())),
void>::type;
template <typename> static std::false_type check(...);
@@ -58,4 +60,4 @@ struct IsValidOptionType : internal::HasHandle<T>::Type {};
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_OPTION_VISITOR_H
+#endif // LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGOPTIONVISITOR_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringOptions.h b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringOptions.h
index 84122b111ee1..62c01e75b1c7 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringOptions.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringOptions.h
@@ -6,14 +6,15 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_OPTIONS_H
-#define LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_OPTIONS_H
+#ifndef LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGOPTIONS_H
+#define LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGOPTIONS_H
#include "clang/Basic/LLVM.h"
#include "clang/Tooling/Refactoring/RefactoringActionRuleRequirements.h"
#include "clang/Tooling/Refactoring/RefactoringOption.h"
#include "clang/Tooling/Refactoring/RefactoringOptionVisitor.h"
#include "llvm/Support/Error.h"
+#include <optional>
#include <type_traits>
namespace clang {
@@ -24,18 +25,18 @@ template <typename T,
typename = std::enable_if_t<traits::IsValidOptionType<T>::value>>
class OptionalRefactoringOption : public RefactoringOption {
public:
- void passToVisitor(RefactoringOptionVisitor &Visitor) final override {
+ void passToVisitor(RefactoringOptionVisitor &Visitor) final {
Visitor.visit(*this, Value);
}
bool isRequired() const override { return false; }
- using ValueType = Optional<T>;
+ using ValueType = std::optional<T>;
const ValueType &getValue() const { return Value; }
protected:
- Optional<T> Value;
+ std::optional<T> Value;
};
/// A required refactoring option that stores a value of type \c T.
@@ -48,10 +49,10 @@ public:
const ValueType &getValue() const {
return *OptionalRefactoringOption<T>::Value;
}
- bool isRequired() const final override { return true; }
+ bool isRequired() const final { return true; }
};
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_OPTIONS_H
+#endif // LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGOPTIONS_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringResultConsumer.h b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringResultConsumer.h
index 2035c02bc17a..016eff80ca7b 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringResultConsumer.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringResultConsumer.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_RESULT_CONSUMER_H
-#define LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_RESULT_CONSUMER_H
+#ifndef LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGRESULTCONSUMER_H
+#define LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGRESULTCONSUMER_H
#include "clang/Basic/LLVM.h"
#include "clang/Tooling/Refactoring/AtomicChange.h"
@@ -48,4 +48,4 @@ private:
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_RESULT_CONSUMER_H
+#endif // LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGRESULTCONSUMER_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringRuleContext.h b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringRuleContext.h
index e0da9469deb5..7d97f811f024 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringRuleContext.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/RefactoringRuleContext.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_RULE_CONTEXT_H
-#define LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_RULE_CONTEXT_H
+#ifndef LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGRULECONTEXT_H
+#define LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGRULECONTEXT_H
#include "clang/Basic/DiagnosticError.h"
#include "clang/Basic/SourceManager.h"
@@ -86,4 +86,4 @@ private:
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_REFACTOR_REFACTORING_RULE_CONTEXT_H
+#endif // LLVM_CLANG_TOOLING_REFACTORING_REFACTORINGRULECONTEXT_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/RenamingAction.h b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/RenamingAction.h
index b04bc3e2d202..43a8d56e4e71 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/RenamingAction.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/RenamingAction.h
@@ -11,8 +11,8 @@
///
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_REFACTOR_RENAME_RENAMING_ACTION_H
-#define LLVM_CLANG_TOOLING_REFACTOR_RENAME_RENAMING_ACTION_H
+#ifndef LLVM_CLANG_TOOLING_REFACTORING_RENAME_RENAMINGACTION_H
+#define LLVM_CLANG_TOOLING_REFACTORING_RENAME_RENAMINGACTION_H
#include "clang/Tooling/Refactoring.h"
#include "clang/Tooling/Refactoring/AtomicChange.h"
@@ -23,7 +23,6 @@
namespace clang {
class ASTConsumer;
-class CompilerInstance;
namespace tooling {
@@ -120,4 +119,4 @@ private:
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_REFACTOR_RENAME_RENAMING_ACTION_H
+#endif // LLVM_CLANG_TOOLING_REFACTORING_RENAME_RENAMINGACTION_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/SymbolName.h b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/SymbolName.h
index 9131a4565da7..6c28d40f3679 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/SymbolName.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/SymbolName.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_REFACTOR_RENAME_SYMBOL_NAME_H
-#define LLVM_CLANG_TOOLING_REFACTOR_RENAME_SYMBOL_NAME_H
+#ifndef LLVM_CLANG_TOOLING_REFACTORING_RENAME_SYMBOLNAME_H
+#define LLVM_CLANG_TOOLING_REFACTORING_RENAME_SYMBOLNAME_H
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/ArrayRef.h"
@@ -45,4 +45,4 @@ private:
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_REFACTOR_RENAME_SYMBOL_NAME_H
+#endif // LLVM_CLANG_TOOLING_REFACTORING_RENAME_SYMBOLNAME_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/SymbolOccurrences.h b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/SymbolOccurrences.h
index c4bfaa9cc377..aff965edb07c 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/SymbolOccurrences.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/SymbolOccurrences.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_REFACTOR_RENAME_SYMBOL_OCCURRENCES_H
-#define LLVM_CLANG_TOOLING_REFACTOR_RENAME_SYMBOL_OCCURRENCES_H
+#ifndef LLVM_CLANG_TOOLING_REFACTORING_RENAME_SYMBOLOCCURRENCES_H
+#define LLVM_CLANG_TOOLING_REFACTORING_RENAME_SYMBOLOCCURRENCES_H
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceLocation.h"
@@ -70,7 +70,7 @@ public:
ArrayRef<SourceRange> getNameRanges() const {
if (MultipleRanges)
- return llvm::makeArrayRef(MultipleRanges.get(), NumRanges);
+ return llvm::ArrayRef(MultipleRanges.get(), NumRanges);
return SingleRange;
}
@@ -88,4 +88,4 @@ using SymbolOccurrences = std::vector<SymbolOccurrence>;
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_REFACTOR_RENAME_SYMBOL_OCCURRENCES_H
+#endif // LLVM_CLANG_TOOLING_REFACTORING_RENAME_SYMBOLOCCURRENCES_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/USRFinder.h b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/USRFinder.h
index 30f7f0a0008c..a7ffa8556888 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/USRFinder.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/USRFinder.h
@@ -12,8 +12,8 @@
///
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_REFACTOR_RENAME_USR_FINDER_H
-#define LLVM_CLANG_TOOLING_REFACTOR_RENAME_USR_FINDER_H
+#ifndef LLVM_CLANG_TOOLING_REFACTORING_RENAME_USRFINDER_H
+#define LLVM_CLANG_TOOLING_REFACTORING_RENAME_USRFINDER_H
#include "clang/AST/AST.h"
#include "clang/AST/ASTContext.h"
@@ -46,4 +46,4 @@ std::string getUSRForDecl(const Decl *Decl);
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_REFACTOR_RENAME_USR_FINDER_H
+#endif // LLVM_CLANG_TOOLING_REFACTORING_RENAME_USRFINDER_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/USRFindingAction.h b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/USRFindingAction.h
index 726987d9d46a..e81b5c2345c9 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/USRFindingAction.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/USRFindingAction.h
@@ -11,8 +11,8 @@
///
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_REFACTOR_RENAME_USR_FINDING_ACTION_H
-#define LLVM_CLANG_TOOLING_REFACTOR_RENAME_USR_FINDING_ACTION_H
+#ifndef LLVM_CLANG_TOOLING_REFACTORING_RENAME_USRFINDINGACTION_H
+#define LLVM_CLANG_TOOLING_REFACTORING_RENAME_USRFINDINGACTION_H
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/ArrayRef.h"
@@ -23,7 +23,6 @@
namespace clang {
class ASTConsumer;
class ASTContext;
-class CompilerInstance;
class NamedDecl;
namespace tooling {
@@ -64,4 +63,4 @@ private:
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_REFACTOR_RENAME_USR_FINDING_ACTION_H
+#endif // LLVM_CLANG_TOOLING_REFACTORING_RENAME_USRFINDINGACTION_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/USRLocFinder.h b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/USRLocFinder.h
index 7a7dd76c4238..c3ffb4421e00 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/USRLocFinder.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Refactoring/Rename/USRLocFinder.h
@@ -12,8 +12,8 @@
///
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_REFACTOR_RENAME_USR_LOC_FINDER_H
-#define LLVM_CLANG_TOOLING_REFACTOR_RENAME_USR_LOC_FINDER_H
+#ifndef LLVM_CLANG_TOOLING_REFACTORING_RENAME_USRLOCFINDER_H
+#define LLVM_CLANG_TOOLING_REFACTORING_RENAME_USRLOCFINDER_H
#include "clang/AST/AST.h"
#include "clang/Tooling/Core/Replacement.h"
@@ -49,4 +49,4 @@ SymbolOccurrences getOccurrencesOfUSRs(ArrayRef<std::string> USRs,
} // end namespace tooling
} // end namespace clang
-#endif // LLVM_CLANG_TOOLING_REFACTOR_RENAME_USR_LOC_FINDER_H
+#endif // LLVM_CLANG_TOOLING_REFACTORING_RENAME_USRLOCFINDER_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/ReplacementsYaml.h b/contrib/llvm-project/clang/include/clang/Tooling/ReplacementsYaml.h
index 83e35d623255..838f87fd1978 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/ReplacementsYaml.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/ReplacementsYaml.h
@@ -30,8 +30,7 @@ template <> struct MappingTraits<clang::tooling::Replacement> {
/// Helper to (de)serialize a Replacement since we don't have direct
/// access to its data members.
struct NormalizedReplacement {
- NormalizedReplacement(const IO &)
- : FilePath(""), Offset(0), Length(0), ReplacementText("") {}
+ NormalizedReplacement(const IO &) : Offset(0), Length(0) {}
NormalizedReplacement(const IO &, const clang::tooling::Replacement &R)
: FilePath(R.getFilePath()), Offset(R.getOffset()),
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/StandaloneExecution.h b/contrib/llvm-project/clang/include/clang/Tooling/StandaloneExecution.h
index 8db6229acf7f..cdbe65a95b9d 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/StandaloneExecution.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/StandaloneExecution.h
@@ -15,6 +15,7 @@
#include "clang/Tooling/ArgumentsAdjusters.h"
#include "clang/Tooling/Execution.h"
+#include <optional>
namespace clang {
namespace tooling {
@@ -83,7 +84,7 @@ public:
private:
// Used to store the parser when the executor is initialized with parser.
- llvm::Optional<CommonOptionsParser> OptionsParser;
+ std::optional<CommonOptionsParser> OptionsParser;
// FIXME: The standalone executor is currently just a wrapper of `ClangTool`.
// Merge `ClangTool` implementation into the this.
ClangTool Tool;
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Syntax/BuildTree.h b/contrib/llvm-project/clang/include/clang/Tooling/Syntax/BuildTree.h
index 3c8dd8ceed09..273d03ddc233 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Syntax/BuildTree.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Syntax/BuildTree.h
@@ -7,12 +7,13 @@
//===----------------------------------------------------------------------===//
// Functions to construct a syntax tree from an AST.
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_SYNTAX_TREE_H
-#define LLVM_CLANG_TOOLING_SYNTAX_TREE_H
+#ifndef LLVM_CLANG_TOOLING_SYNTAX_BUILDTREE_H
+#define LLVM_CLANG_TOOLING_SYNTAX_BUILDTREE_H
#include "clang/AST/Decl.h"
#include "clang/Basic/TokenKinds.h"
#include "clang/Tooling/Syntax/Nodes.h"
+#include "clang/Tooling/Syntax/TokenBufferTokenManager.h"
#include "clang/Tooling/Syntax/Tree.h"
namespace clang {
@@ -21,19 +22,21 @@ namespace syntax {
/// Build a syntax tree for the main file.
/// This usually covers the whole TranslationUnitDecl, but can be restricted by
/// the ASTContext's traversal scope.
-syntax::TranslationUnit *buildSyntaxTree(Arena &A, ASTContext &Context);
+syntax::TranslationUnit *
+buildSyntaxTree(Arena &A, TokenBufferTokenManager &TBTM, ASTContext &Context);
// Create syntax trees from subtrees not backed by the source code.
// Synthesis of Leafs
/// Create `Leaf` from token with `Spelling` and assert it has the desired
/// `TokenKind`.
-syntax::Leaf *createLeaf(syntax::Arena &A, tok::TokenKind K,
- StringRef Spelling);
+syntax::Leaf *createLeaf(syntax::Arena &A, TokenBufferTokenManager &TBTM,
+ tok::TokenKind K, StringRef Spelling);
/// Infer the token spelling from its `TokenKind`, then create `Leaf` from
/// this token
-syntax::Leaf *createLeaf(syntax::Arena &A, tok::TokenKind K);
+syntax::Leaf *createLeaf(syntax::Arena &A, TokenBufferTokenManager &TBTM,
+ tok::TokenKind K);
// Synthesis of Trees
/// Creates the concrete syntax node according to the specified `NodeKind` `K`.
@@ -44,7 +47,8 @@ createTree(syntax::Arena &A,
syntax::NodeKind K);
// Synthesis of Syntax Nodes
-syntax::EmptyStatement *createEmptyStatement(syntax::Arena &A);
+syntax::EmptyStatement *createEmptyStatement(syntax::Arena &A,
+ TokenBufferTokenManager &TBTM);
/// Creates a completely independent copy of `N` with its macros expanded.
///
@@ -52,7 +56,9 @@ syntax::EmptyStatement *createEmptyStatement(syntax::Arena &A);
/// * Detached, i.e. `Parent == NextSibling == nullptr` and
/// `Role == Detached`.
/// * Synthesized, i.e. `Original == false`.
-syntax::Node *deepCopyExpandingMacros(syntax::Arena &A, const syntax::Node *N);
+syntax::Node *deepCopyExpandingMacros(syntax::Arena &A,
+ TokenBufferTokenManager &TBTM,
+ const syntax::Node *N);
} // namespace syntax
} // namespace clang
#endif
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Syntax/Mutations.h b/contrib/llvm-project/clang/include/clang/Tooling/Syntax/Mutations.h
index 8fd58ae34fff..6db9c88ca000 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Syntax/Mutations.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Syntax/Mutations.h
@@ -13,6 +13,7 @@
#include "clang/Tooling/Core/Replacement.h"
#include "clang/Tooling/Syntax/Nodes.h"
+#include "clang/Tooling/Syntax/TokenBufferTokenManager.h"
#include "clang/Tooling/Syntax/Tree.h"
namespace clang {
@@ -20,7 +21,7 @@ namespace syntax {
/// Computes textual replacements required to mimic the tree modifications made
/// to the syntax tree.
-tooling::Replacements computeReplacements(const Arena &A,
+tooling::Replacements computeReplacements(const TokenBufferTokenManager &TBTM,
const syntax::TranslationUnit &TU);
/// Removes a statement or replaces it with an empty statement where one is
@@ -29,7 +30,8 @@ tooling::Replacements computeReplacements(const Arena &A,
/// One can remove `foo();` completely and to remove `bar();` we would need to
/// replace it with an empty statement.
/// EXPECTS: S->canModify() == true
-void removeStatement(syntax::Arena &A, syntax::Statement *S);
+void removeStatement(syntax::Arena &A, TokenBufferTokenManager &TBTM,
+ syntax::Statement *S);
} // namespace syntax
} // namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Syntax/Nodes.h b/contrib/llvm-project/clang/include/clang/Tooling/Syntax/Nodes.h
index edb6d4d4381d..c4f31900d0ce 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Syntax/Nodes.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Syntax/Nodes.h
@@ -21,13 +21,8 @@
#ifndef LLVM_CLANG_TOOLING_SYNTAX_NODES_H
#define LLVM_CLANG_TOOLING_SYNTAX_NODES_H
-#include "clang/Basic/TokenKinds.h"
-#include "clang/Lex/Token.h"
-#include "clang/Tooling/Syntax/Tokens.h"
+#include "clang/Basic/LLVM.h"
#include "clang/Tooling/Syntax/Tree.h"
-#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/StringRef.h"
-#include "llvm/Support/raw_ostream.h"
namespace clang {
namespace syntax {
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Syntax/TokenBufferTokenManager.h b/contrib/llvm-project/clang/include/clang/Tooling/Syntax/TokenBufferTokenManager.h
new file mode 100644
index 000000000000..6522af584e9a
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Syntax/TokenBufferTokenManager.h
@@ -0,0 +1,70 @@
+//===- TokenBufferTokenManager.h -----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_TOOLING_SYNTAX_TOKEN_BUFFER_TOKEN_MANAGER_H
+#define LLVM_CLANG_TOOLING_SYNTAX_TOKEN_BUFFER_TOKEN_MANAGER_H
+
+#include "clang/Tooling/Syntax/TokenManager.h"
+#include "clang/Tooling/Syntax/Tokens.h"
+
+namespace clang {
+namespace syntax {
+
+/// A TokenBuffer-powered token manager.
+/// It tracks the underlying token buffers, source manager, etc.
+class TokenBufferTokenManager : public TokenManager {
+public:
+ TokenBufferTokenManager(const TokenBuffer &Tokens,
+ const LangOptions &LangOpts, SourceManager &SourceMgr)
+ : Tokens(Tokens), LangOpts(LangOpts), SM(SourceMgr) {}
+
+ static bool classof(const TokenManager *N) { return N->kind() == Kind; }
+ llvm::StringLiteral kind() const override { return Kind; }
+
+ llvm::StringRef getText(Key I) const override {
+ const auto *Token = getToken(I);
+ assert(Token);
+ // Handle 'eof' separately, calling text() on it produces an empty string.
+ // FIXME: this special logic is for syntax::Leaf dump, move it when we
+ // have a direct way to retrive token kind in the syntax::Leaf.
+ if (Token->kind() == tok::eof)
+ return "<eof>";
+ return Token->text(SM);
+ }
+
+ const syntax::Token *getToken(Key I) const {
+ return reinterpret_cast<const syntax::Token *>(I);
+ }
+ SourceManager &sourceManager() { return SM; }
+ const SourceManager &sourceManager() const { return SM; }
+ const TokenBuffer &tokenBuffer() const { return Tokens; }
+
+private:
+ // This manager is powered by the TokenBuffer.
+ static constexpr llvm::StringLiteral Kind = "TokenBuffer";
+
+ /// Add \p Buffer to the underlying source manager, tokenize it and store the
+ /// resulting tokens. Used exclusively in `FactoryImpl` to materialize tokens
+ /// that were not written in user code.
+ std::pair<FileID, ArrayRef<Token>>
+ lexBuffer(std::unique_ptr<llvm::MemoryBuffer> Buffer);
+ friend class FactoryImpl;
+
+ const TokenBuffer &Tokens;
+ const LangOptions &LangOpts;
+
+ /// The underlying source manager for the ExtraTokens.
+ SourceManager &SM;
+ /// IDs and storage for additional tokenized files.
+ llvm::DenseMap<FileID, std::vector<Token>> ExtraTokens;
+};
+
+} // namespace syntax
+} // namespace clang
+
+#endif // LLVM_CLANG_TOOLING_SYNTAX_TOKEN_BUFFER_TOKEN_MANAGER_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Syntax/TokenManager.h b/contrib/llvm-project/clang/include/clang/Tooling/Syntax/TokenManager.h
new file mode 100644
index 000000000000..6f0d11ce0d6b
--- /dev/null
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Syntax/TokenManager.h
@@ -0,0 +1,47 @@
+//===- TokenManager.h - Manage Tokens for syntax-tree ------------*- C++-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines Token interfaces for the clang syntax-tree. This is the level of
+// abstraction that the syntax-tree uses to operate on Token.
+//
+// TokenManager decouples the syntax-tree from a particular token
+// implementation. For example, a TokenBuffer captured from a clang parser may
+// track macro expansions and associate tokens with clang's SourceManager, while
+// a clang pseudoparser would use a flat array of raw-lexed tokens in memory.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_TOOLING_SYNTAX_TOKEN_MANAGER_H
+#define LLVM_CLANG_TOOLING_SYNTAX_TOKEN_MANAGER_H
+
+#include "llvm/ADT/StringRef.h"
+#include <cstdint>
+
+namespace clang {
+namespace syntax {
+
+/// Defines interfaces for operating "Token" in the clang syntax-tree.
+class TokenManager {
+public:
+ virtual ~TokenManager() = default;
+
+ /// Describes what the exact class kind of the TokenManager is.
+ virtual llvm::StringLiteral kind() const = 0;
+
+ /// A key to identify a specific token. The token concept depends on the
+ /// underlying implementation -- it can be a spelled token from the original
+ /// source file or an expanded token.
+ /// The syntax-tree Leaf node holds a Key.
+ using Key = uintptr_t;
+ virtual llvm::StringRef getText(Key K) const = 0;
+};
+
+} // namespace syntax
+} // namespace clang
+
+#endif // LLVM_CLANG_TOOLING_SYNTAX_TOKEN_MANAGER_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Syntax/Tokens.h b/contrib/llvm-project/clang/include/clang/Tooling/Syntax/Tokens.h
index e4bc1553c2d6..b1bdefed7c97 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Syntax/Tokens.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Syntax/Tokens.h
@@ -27,7 +27,6 @@
#ifndef LLVM_CLANG_TOOLING_SYNTAX_TOKENS_H
#define LLVM_CLANG_TOOLING_SYNTAX_TOKENS_H
-#include "clang/Basic/FileManager.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
@@ -35,7 +34,6 @@
#include "clang/Lex/Token.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/raw_ostream.h"
@@ -205,7 +203,7 @@ public:
/// Returns the subrange of spelled tokens corresponding to AST node spanning
/// \p Expanded. This is the text that should be replaced if a refactoring
/// were to rewrite the node. If \p Expanded is empty, the returned value is
- /// llvm::None.
+ /// std::nullopt.
///
/// Will fail if the expanded tokens do not correspond to a sequence of
/// spelled tokens. E.g. for the following example:
@@ -230,7 +228,7 @@ public:
///
/// EXPECTS: \p Expanded is a subrange of expandedTokens().
/// Complexity is logarithmic.
- llvm::Optional<llvm::ArrayRef<syntax::Token>>
+ std::optional<llvm::ArrayRef<syntax::Token>>
spelledForExpanded(llvm::ArrayRef<syntax::Token> Expanded) const;
/// Find the subranges of expanded tokens, corresponding to \p Spelled.
@@ -279,7 +277,7 @@ public:
/// If \p Spelled starts a mapping (e.g. if it's a macro name or '#' starting
/// a preprocessor directive) return the subrange of expanded tokens that the
/// macro expands to.
- llvm::Optional<Expansion>
+ std::optional<Expansion>
expansionStartingAt(const syntax::Token *Spelled) const;
/// Returns all expansions (partially) expanded from the specified tokens.
/// This is the expansions whose Spelled range intersects \p Spelled.
@@ -426,7 +424,7 @@ public:
/// Finalizes token collection. Should be called after preprocessing is
/// finished, i.e. after running Execute().
- LLVM_NODISCARD TokenBuffer consume() &&;
+ [[nodiscard]] TokenBuffer consume() &&;
private:
/// Maps from a start to an end spelling location of transformations
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Syntax/Tree.h b/contrib/llvm-project/clang/include/clang/Tooling/Syntax/Tree.h
index b92e92305417..a80b7bf2a3de 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Syntax/Tree.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Syntax/Tree.h
@@ -6,11 +6,11 @@
//
//===----------------------------------------------------------------------===//
// Defines the basic structure of the syntax tree. There are two kinds of nodes:
-// - leaf nodes correspond to a token in the expanded token stream,
+// - leaf nodes correspond to tokens,
// - tree nodes correspond to language grammar constructs.
//
// The tree is initially built from an AST. Each node of a newly built tree
-// covers a continous subrange of expanded tokens (i.e. tokens after
+// covers a continuous subrange of expanded tokens (i.e. tokens after
// preprocessing), the specific tokens coverered are stored in the leaf nodes of
// a tree. A post-order traversal of a tree will visit leaf nodes in an order
// corresponding the original order of expanded tokens.
@@ -18,51 +18,25 @@
// This is still work in progress and highly experimental, we leave room for
// ourselves to completely change the design and/or implementation.
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_SYNTAX_TREE_CASCADE_H
-#define LLVM_CLANG_TOOLING_SYNTAX_TREE_CASCADE_H
+#ifndef LLVM_CLANG_TOOLING_SYNTAX_TREE_H
+#define LLVM_CLANG_TOOLING_SYNTAX_TREE_H
-#include "clang/Basic/LangOptions.h"
-#include "clang/Basic/SourceLocation.h"
-#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TokenKinds.h"
-#include "clang/Tooling/Syntax/Tokens.h"
-#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/DenseMap.h"
+#include "clang/Tooling/Syntax/TokenManager.h"
#include "llvm/ADT/iterator.h"
#include "llvm/Support/Allocator.h"
#include <cstdint>
-#include <iterator>
+#include <vector>
namespace clang {
namespace syntax {
-/// A memory arena for syntax trees. Also tracks the underlying token buffers,
-/// source manager, etc.
+/// A memory arena for syntax trees.
+// FIXME: use BumpPtrAllocator directly.
class Arena {
public:
- Arena(SourceManager &SourceMgr, const LangOptions &LangOpts,
- const TokenBuffer &Tokens);
-
- const SourceManager &getSourceManager() const { return SourceMgr; }
- const LangOptions &getLangOptions() const { return LangOpts; }
-
- const TokenBuffer &getTokenBuffer() const;
llvm::BumpPtrAllocator &getAllocator() { return Allocator; }
-
private:
- /// Add \p Buffer to the underlying source manager, tokenize it and store the
- /// resulting tokens. Used exclusively in `FactoryImpl` to materialize tokens
- /// that were not written in user code.
- std::pair<FileID, ArrayRef<Token>>
- lexBuffer(std::unique_ptr<llvm::MemoryBuffer> Buffer);
- friend class FactoryImpl;
-
-private:
- SourceManager &SourceMgr;
- const LangOptions &LangOpts;
- const TokenBuffer &Tokens;
- /// IDs and storage for additional tokenized files.
- llvm::DenseMap<FileID, std::vector<Token>> ExtraTokens;
/// Keeps all the allocated nodes and their intermediate data structures.
llvm::BumpPtrAllocator Allocator;
};
@@ -122,9 +96,9 @@ public:
Node *getPreviousSibling() { return PreviousSibling; }
/// Dumps the structure of a subtree. For debugging and testing purposes.
- std::string dump(const SourceManager &SM) const;
+ std::string dump(const TokenManager &SM) const;
/// Dumps the tokens forming this subtree.
- std::string dumpTokens(const SourceManager &SM) const;
+ std::string dumpTokens(const TokenManager &SM) const;
/// Asserts invariants on this node of the tree and its immediate children.
/// Will not recurse into the subtree. No-op if NDEBUG is set.
@@ -153,16 +127,17 @@ private:
unsigned CanModify : 1;
};
-/// A leaf node points to a single token inside the expanded token stream.
+/// A leaf node points to a single token.
+// FIXME: add TokenKind field (borrow some bits from the Node::kind).
class Leaf final : public Node {
public:
- Leaf(const Token *T);
+ Leaf(TokenManager::Key K);
static bool classof(const Node *N);
- const Token *getToken() const { return Tok; }
+ TokenManager::Key getTokenKey() const { return K; }
private:
- const Token *Tok;
+ TokenManager::Key K;
};
/// A node that has children and represents a syntactic language construct.
@@ -181,7 +156,10 @@ class Tree : public Node {
ChildIteratorBase() = default;
explicit ChildIteratorBase(NodeT *N) : N(N) {}
- bool operator==(const DerivedT &O) const { return O.N == N; }
+ friend bool operator==(const DerivedT &LHS, const DerivedT &RHS) {
+ return LHS.N == RHS.N;
+ }
+
NodeT &operator*() const { return *N; }
DerivedT &operator++() {
N = N->getNextSibling();
@@ -269,14 +247,6 @@ private:
Node *LastChild = nullptr;
};
-// Provide missing non_const == const overload.
-// iterator_facade_base requires == to be a member, but implicit conversions
-// don't work on the LHS of a member operator.
-inline bool operator==(const Tree::ConstChildIterator &A,
- const Tree::ConstChildIterator &B) {
- return A.operator==(B);
-}
-
/// A list of Elements separated or terminated by a fixed token.
///
/// This type models the following grammar construct:
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Tooling.h b/contrib/llvm-project/clang/include/clang/Tooling/Tooling.h
index 73d09662562b..070706e8fa6d 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Tooling.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Tooling.h
@@ -54,7 +54,6 @@ class CompilerInstance;
class CompilerInvocation;
class DiagnosticConsumer;
class DiagnosticsEngine;
-class SourceManager;
namespace driver {
@@ -115,7 +114,7 @@ public:
/// T must derive from clang::FrontendAction.
///
/// Example:
-/// FrontendActionFactory *Factory =
+/// std::unique_ptr<FrontendActionFactory> Factory =
/// newFrontendActionFactory<clang::SyntaxOnlyAction>();
template <typename T>
std::unique_ptr<FrontendActionFactory> newFrontendActionFactory();
@@ -145,7 +144,7 @@ public:
///
/// Example:
/// struct ProvidesASTConsumers {
-/// clang::ASTConsumer *newASTConsumer();
+/// std::unique_ptr<clang::ASTConsumer> newASTConsumer();
/// } Factory;
/// std::unique_ptr<FrontendActionFactory> FactoryAdapter(
/// newFrontendActionFactory(&Factory));
@@ -268,11 +267,20 @@ public:
~ToolInvocation();
- /// Set a \c DiagnosticConsumer to use during parsing.
+ ToolInvocation(const ToolInvocation &) = delete;
+ ToolInvocation &operator=(const ToolInvocation &) = delete;
+
+ /// Set a \c DiagnosticConsumer to use during driver command-line parsing and
+ /// the action invocation itself.
void setDiagnosticConsumer(DiagnosticConsumer *DiagConsumer) {
this->DiagConsumer = DiagConsumer;
}
+ /// Set a \c DiagnosticOptions to use during driver command-line parsing.
+ void setDiagnosticOptions(DiagnosticOptions *DiagOpts) {
+ this->DiagOpts = DiagOpts;
+ }
+
/// Run the clang invocation.
///
/// \returns True if there were no errors during execution.
@@ -290,6 +298,7 @@ public:
FileManager *Files;
std::shared_ptr<PCHContainerOperations> PCHContainerOps;
DiagnosticConsumer *DiagConsumer = nullptr;
+ DiagnosticOptions *DiagOpts = nullptr;
};
/// Utility to run a FrontendAction over a set of files.
@@ -355,11 +364,6 @@ public:
/// append them to ASTs.
int buildASTs(std::vector<std::unique_ptr<ASTUnit>> &ASTs);
- /// Sets whether working directory should be restored after calling run(). By
- /// default, working directory is restored. However, it could be useful to
- /// turn this off when running on multiple threads to avoid the raciness.
- void setRestoreWorkingDir(bool RestoreCWD);
-
/// Sets whether an error message should be printed out if an action fails. By
/// default, if an action fails, a message is printed out to stderr.
void setPrintErrorMessage(bool PrintErrorMessage);
@@ -389,7 +393,6 @@ private:
DiagnosticConsumer *DiagConsumer = nullptr;
- bool RestoreCWD = true;
bool PrintErrorMessage = true;
};
@@ -500,9 +503,15 @@ llvm::Expected<std::string> getAbsolutePath(llvm::vfs::FileSystem &FS,
void addTargetAndModeForProgramName(std::vector<std::string> &CommandLine,
StringRef InvokedAs);
+/// Helper function that expands response files in command line.
+void addExpandedResponseFiles(std::vector<std::string> &CommandLine,
+ llvm::StringRef WorkingDir,
+ llvm::cl::TokenizerCallback Tokenizer,
+ llvm::vfs::FileSystem &FS);
+
/// Creates a \c CompilerInvocation.
CompilerInvocation *newInvocation(DiagnosticsEngine *Diagnostics,
- const llvm::opt::ArgStringList &CC1Args,
+ ArrayRef<const char *> CC1Args,
const char *const BinaryName);
} // namespace tooling
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Transformer/MatchConsumer.h b/contrib/llvm-project/clang/include/clang/Tooling/Transformer/MatchConsumer.h
index cb0a5f684b7d..fb57dabb0a6f 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Transformer/MatchConsumer.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Transformer/MatchConsumer.h
@@ -12,8 +12,8 @@
///
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_TRANSFORMER_MATCH_CONSUMER_H_
-#define LLVM_CLANG_TOOLING_TRANSFORMER_MATCH_CONSUMER_H_
+#ifndef LLVM_CLANG_TOOLING_TRANSFORMER_MATCHCONSUMER_H
+#define LLVM_CLANG_TOOLING_TRANSFORMER_MATCHCONSUMER_H
#include "clang/AST/ASTTypeTraits.h"
#include "clang/ASTMatchers/ASTMatchFinder.h"
@@ -100,4 +100,4 @@ llvm::Expected<T> MatchComputation<T>::eval(
}
} // namespace transformer
} // namespace clang
-#endif // LLVM_CLANG_TOOLING_TRANSFORMER_MATCH_CONSUMER_H_
+#endif // LLVM_CLANG_TOOLING_TRANSFORMER_MATCHCONSUMER_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Transformer/Parsing.h b/contrib/llvm-project/clang/include/clang/Tooling/Transformer/Parsing.h
index b143f63d8ca8..177eca6a044d 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Transformer/Parsing.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Transformer/Parsing.h
@@ -13,8 +13,8 @@
///
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_REFACTOR_PARSING_H_
-#define LLVM_CLANG_TOOLING_REFACTOR_PARSING_H_
+#ifndef LLVM_CLANG_TOOLING_TRANSFORMER_PARSING_H
+#define LLVM_CLANG_TOOLING_TRANSFORMER_PARSING_H
#include "clang/ASTMatchers/ASTMatchFinder.h"
#include "clang/Basic/SourceLocation.h"
@@ -37,4 +37,4 @@ llvm::Expected<RangeSelector> parseRangeSelector(llvm::StringRef Input);
} // namespace transformer
} // namespace clang
-#endif // LLVM_CLANG_TOOLING_REFACTOR_PARSING_H_
+#endif // LLVM_CLANG_TOOLING_TRANSFORMER_PARSING_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Transformer/RangeSelector.h b/contrib/llvm-project/clang/include/clang/Tooling/Transformer/RangeSelector.h
index 8ff31f7a0342..1e288043f0a8 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Transformer/RangeSelector.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Transformer/RangeSelector.h
@@ -12,8 +12,8 @@
///
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_REFACTOR_RANGE_SELECTOR_H_
-#define LLVM_CLANG_TOOLING_REFACTOR_RANGE_SELECTOR_H_
+#ifndef LLVM_CLANG_TOOLING_TRANSFORMER_RANGESELECTOR_H
+#define LLVM_CLANG_TOOLING_TRANSFORMER_RANGESELECTOR_H
#include "clang/ASTMatchers/ASTMatchFinder.h"
#include "clang/Basic/SourceLocation.h"
@@ -50,7 +50,7 @@ inline RangeSelector range(std::string BeginID, std::string EndID) {
/// Selects the (empty) range [B,B) when \p Selector selects the range [B,E).
RangeSelector before(RangeSelector Selector);
-/// Selects the the point immediately following \p Selector. That is, the
+/// Selects the point immediately following \p Selector. That is, the
/// (empty) range [E,E), when \p Selector selects either
/// * the CharRange [B,E) or
/// * the TokenRange [B,E'] where the token at E' spans the range [E',E).
@@ -105,4 +105,4 @@ RangeSelector expansion(RangeSelector S);
} // namespace transformer
} // namespace clang
-#endif // LLVM_CLANG_TOOLING_REFACTOR_RANGE_SELECTOR_H_
+#endif // LLVM_CLANG_TOOLING_TRANSFORMER_RANGESELECTOR_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Transformer/RewriteRule.h b/contrib/llvm-project/clang/include/clang/Tooling/Transformer/RewriteRule.h
index ac93db8446df..50f460d65e7d 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Transformer/RewriteRule.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Transformer/RewriteRule.h
@@ -12,8 +12,8 @@
///
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_TRANSFORMER_REWRITE_RULE_H_
-#define LLVM_CLANG_TOOLING_TRANSFORMER_REWRITE_RULE_H_
+#ifndef LLVM_CLANG_TOOLING_TRANSFORMER_REWRITERULE_H
+#define LLVM_CLANG_TOOLING_TRANSFORMER_REWRITERULE_H
#include "clang/ASTMatchers/ASTMatchFinder.h"
#include "clang/ASTMatchers/ASTMatchers.h"
@@ -46,6 +46,7 @@ struct Edit {
EditKind Kind = EditKind::Range;
CharSourceRange Range;
std::string Replacement;
+ std::string Note;
llvm::Any Metadata;
};
@@ -61,7 +62,9 @@ enum class IncludeFormat {
/// of `EditGenerator`.
using EditGenerator = MatchConsumer<llvm::SmallVector<Edit, 1>>;
-using TextGenerator = std::shared_ptr<MatchComputation<std::string>>;
+template <typename T> using Generator = std::shared_ptr<MatchComputation<T>>;
+
+using TextGenerator = Generator<std::string>;
using AnyGenerator = MatchConsumer<llvm::Any>;
@@ -136,6 +139,10 @@ inline EditGenerator noEdits() { return editList({}); }
/// diagnostic `Explanation` with the rule.
EditGenerator noopEdit(RangeSelector Anchor);
+/// Generates a single, no-op edit with the associated note anchored at the
+/// start location of the specified range.
+ASTEdit note(RangeSelector Anchor, TextGenerator Note);
+
/// Version of `ifBound` specialized to `ASTEdit`.
inline EditGenerator ifBound(std::string ID, ASTEdit TrueEdit,
ASTEdit FalseEdit) {
@@ -262,12 +269,9 @@ inline EditGenerator shrinkTo(RangeSelector outer, RangeSelector inner) {
//
// * Edits: a set of Edits to the source code, described with ASTEdits.
//
-// * Explanation: explanation of the rewrite. This will be displayed to the
-// user, where possible; for example, in clang-tidy diagnostics.
-//
// However, rules can also consist of (sub)rules, where the first that matches
-// is applied and the rest are ignored. So, the above components are gathered
-// as a `Case` and a rule is a list of cases.
+// is applied and the rest are ignored. So, the above components together form
+// a logical "case" and a rule is a sequence of cases.
//
// Rule cases have an additional, implicit, component: the parameters. These are
// portions of the pattern which are left unspecified, yet bound in the pattern
@@ -275,38 +279,83 @@ inline EditGenerator shrinkTo(RangeSelector outer, RangeSelector inner) {
//
// The \c Transformer class can be used to apply the rewrite rule and obtain the
// corresponding replacements.
-struct RewriteRule {
+struct RewriteRuleBase {
struct Case {
ast_matchers::internal::DynTypedMatcher Matcher;
EditGenerator Edits;
- TextGenerator Explanation;
};
// We expect RewriteRules will most commonly include only one case.
SmallVector<Case, 1> Cases;
+};
- /// DEPRECATED: use `::clang::transformer::RootID` instead.
- static const llvm::StringRef RootID;
+/// A source-code transformation with accompanying metadata.
+///
+/// When a case of the rule matches, the \c Transformer invokes the
+/// corresponding metadata generator and provides it alongside the edits.
+template <typename MetadataT> struct RewriteRuleWith : RewriteRuleBase {
+ SmallVector<Generator<MetadataT>, 1> Metadata;
};
-/// Constructs a simple \c RewriteRule.
+template <> struct RewriteRuleWith<void> : RewriteRuleBase {};
+
+using RewriteRule = RewriteRuleWith<void>;
+
+namespace detail {
+
RewriteRule makeRule(ast_matchers::internal::DynTypedMatcher M,
- EditGenerator Edits, TextGenerator Explanation = nullptr);
-
-/// Constructs a \c RewriteRule from multiple `ASTEdit`s.
-inline RewriteRule makeRule(ast_matchers::internal::DynTypedMatcher M,
- llvm::SmallVector<ASTEdit, 1> Edits,
- TextGenerator Explanation = nullptr) {
- return makeRule(std::move(M), editList(std::move(Edits)),
- std::move(Explanation));
+ EditGenerator Edits);
+
+template <typename MetadataT>
+RewriteRuleWith<MetadataT> makeRule(ast_matchers::internal::DynTypedMatcher M,
+ EditGenerator Edits,
+ Generator<MetadataT> Metadata) {
+ RewriteRuleWith<MetadataT> R;
+ R.Cases = {{std::move(M), std::move(Edits)}};
+ R.Metadata = {std::move(Metadata)};
+ return R;
}
-/// Overload of \c makeRule for common case of only one edit.
-inline RewriteRule makeRule(ast_matchers::internal::DynTypedMatcher M,
- ASTEdit Edit,
- TextGenerator Explanation = nullptr) {
- return makeRule(std::move(M), edit(std::move(Edit)), std::move(Explanation));
+inline EditGenerator makeEditGenerator(EditGenerator Edits) { return Edits; }
+EditGenerator makeEditGenerator(llvm::SmallVector<ASTEdit, 1> Edits);
+EditGenerator makeEditGenerator(ASTEdit Edit);
+
+} // namespace detail
+
+/// Constructs a simple \c RewriteRule. \c Edits can be an \c EditGenerator,
+/// multiple \c ASTEdits, or a single \c ASTEdit.
+/// @{
+template <int &..., typename EditsT>
+RewriteRule makeRule(ast_matchers::internal::DynTypedMatcher M,
+ EditsT &&Edits) {
+ return detail::makeRule(
+ std::move(M), detail::makeEditGenerator(std::forward<EditsT>(Edits)));
}
+RewriteRule makeRule(ast_matchers::internal::DynTypedMatcher M,
+ std::initializer_list<ASTEdit> Edits);
+/// @}
+
+/// Overloads of \c makeRule that also generate metadata when matching.
+/// @{
+template <typename MetadataT, int &..., typename EditsT>
+RewriteRuleWith<MetadataT> makeRule(ast_matchers::internal::DynTypedMatcher M,
+ EditsT &&Edits,
+ Generator<MetadataT> Metadata) {
+ return detail::makeRule(
+ std::move(M), detail::makeEditGenerator(std::forward<EditsT>(Edits)),
+ std::move(Metadata));
+}
+
+template <typename MetadataT>
+RewriteRuleWith<MetadataT> makeRule(ast_matchers::internal::DynTypedMatcher M,
+ std::initializer_list<ASTEdit> Edits,
+ Generator<MetadataT> Metadata) {
+ return detail::makeRule(std::move(M),
+ detail::makeEditGenerator(std::move(Edits)),
+ std::move(Metadata));
+}
+/// @}
+
/// For every case in Rule, adds an include directive for the given header. The
/// common use is assumed to be a rule with only one case. For example, to
/// replace a function call and add headers corresponding to the new code, one
@@ -317,7 +366,7 @@ inline RewriteRule makeRule(ast_matchers::internal::DynTypedMatcher M,
/// addInclude(R, "path/to/bar_header.h");
/// addInclude(R, "vector", IncludeFormat::Angled);
/// \endcode
-void addInclude(RewriteRule &Rule, llvm::StringRef Header,
+void addInclude(RewriteRuleBase &Rule, llvm::StringRef Header,
IncludeFormat Format = IncludeFormat::Quoted);
/// Applies the first rule whose pattern matches; other rules are ignored. If
@@ -359,7 +408,45 @@ void addInclude(RewriteRule &Rule, llvm::StringRef Header,
// makeRule(left_call, left_call_action),
// makeRule(right_call, right_call_action)});
// ```
-RewriteRule applyFirst(ArrayRef<RewriteRule> Rules);
+/// @{
+template <typename MetadataT>
+RewriteRuleWith<MetadataT>
+applyFirst(ArrayRef<RewriteRuleWith<MetadataT>> Rules) {
+ RewriteRuleWith<MetadataT> R;
+ for (auto &Rule : Rules) {
+ assert(Rule.Cases.size() == Rule.Metadata.size() &&
+ "mis-match in case and metadata array size");
+ R.Cases.append(Rule.Cases.begin(), Rule.Cases.end());
+ R.Metadata.append(Rule.Metadata.begin(), Rule.Metadata.end());
+ }
+ return R;
+}
+
+template <>
+RewriteRuleWith<void> applyFirst(ArrayRef<RewriteRuleWith<void>> Rules);
+
+template <typename MetadataT>
+RewriteRuleWith<MetadataT>
+applyFirst(const std::vector<RewriteRuleWith<MetadataT>> &Rules) {
+ return applyFirst(llvm::ArrayRef(Rules));
+}
+
+template <typename MetadataT>
+RewriteRuleWith<MetadataT>
+applyFirst(std::initializer_list<RewriteRuleWith<MetadataT>> Rules) {
+ return applyFirst(llvm::ArrayRef(Rules.begin(), Rules.end()));
+}
+/// @}
+
+/// Converts a \c RewriteRuleWith<T> to a \c RewriteRule by stripping off the
+/// metadata generators.
+template <int &..., typename MetadataT>
+std::enable_if_t<!std::is_same<MetadataT, void>::value, RewriteRule>
+stripMetadata(RewriteRuleWith<MetadataT> Rule) {
+ RewriteRule R;
+ R.Cases = std::move(Rule.Cases);
+ return R;
+}
/// Applies `Rule` to all descendants of the node bound to `NodeId`. `Rule` can
/// refer to nodes bound by the calling rule. `Rule` is not applied to the node
@@ -423,7 +510,8 @@ rewriteDescendants(const DynTypedNode &Node, RewriteRule Rule,
/// Only supports Rules whose cases' matchers share the same base "kind"
/// (`Stmt`, `Decl`, etc.) Deprecated: use `buildMatchers` instead, which
/// supports mixing matchers of different kinds.
-ast_matchers::internal::DynTypedMatcher buildMatcher(const RewriteRule &Rule);
+ast_matchers::internal::DynTypedMatcher
+buildMatcher(const RewriteRuleBase &Rule);
/// Builds a set of matchers that cover the rule.
///
@@ -433,7 +521,7 @@ ast_matchers::internal::DynTypedMatcher buildMatcher(const RewriteRule &Rule);
/// for rewriting. If any such matchers are included, will return an empty
/// vector.
std::vector<ast_matchers::internal::DynTypedMatcher>
-buildMatchers(const RewriteRule &Rule);
+buildMatchers(const RewriteRuleBase &Rule);
/// Gets the beginning location of the source matched by a rewrite rule. If the
/// match occurs within a macro expansion, returns the beginning of the
@@ -441,13 +529,12 @@ buildMatchers(const RewriteRule &Rule);
SourceLocation
getRuleMatchLoc(const ast_matchers::MatchFinder::MatchResult &Result);
-/// Returns the \c Case of \c Rule that was selected in the match result.
-/// Assumes a matcher built with \c buildMatcher.
-const RewriteRule::Case &
-findSelectedCase(const ast_matchers::MatchFinder::MatchResult &Result,
- const RewriteRule &Rule);
+/// Returns the index of the \c Case of \c Rule that was selected in the match
+/// result. Assumes a matcher built with \c buildMatcher.
+size_t findSelectedCase(const ast_matchers::MatchFinder::MatchResult &Result,
+ const RewriteRuleBase &Rule);
} // namespace detail
} // namespace transformer
} // namespace clang
-#endif // LLVM_CLANG_TOOLING_TRANSFORMER_REWRITE_RULE_H_
+#endif // LLVM_CLANG_TOOLING_TRANSFORMER_REWRITERULE_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Transformer/SourceCode.h b/contrib/llvm-project/clang/include/clang/Tooling/Transformer/SourceCode.h
index 2c7eb65371cf..004c614b0b9d 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Transformer/SourceCode.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Transformer/SourceCode.h
@@ -10,12 +10,13 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_TRANSFORMER_SOURCE_CODE_H
-#define LLVM_CLANG_TOOLING_TRANSFORMER_SOURCE_CODE_H
+#ifndef LLVM_CLANG_TOOLING_TRANSFORMER_SOURCECODE_H
+#define LLVM_CLANG_TOOLING_TRANSFORMER_SOURCECODE_H
#include "clang/AST/ASTContext.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/TokenKinds.h"
+#include <optional>
namespace clang {
namespace tooling {
@@ -41,6 +42,10 @@ CharSourceRange getExtendedRange(const T &Node, tok::TokenKind Next,
/// terminators. The returned range consists of file locations, if valid file
/// locations can be found for the associated content; otherwise, an invalid
/// range is returned.
+///
+/// Note that parsing comments is disabled by default. In order to select a
+/// range containing associated comments, you may need to invoke the tool with
+/// `-fparse-all-comments`.
CharSourceRange getAssociatedRange(const Decl &D, ASTContext &Context);
/// Returns the source-code text in the specified range.
@@ -86,18 +91,55 @@ StringRef getExtendedText(const T &Node, tok::TokenKind Next,
llvm::Error validateEditRange(const CharSourceRange &Range,
const SourceManager &SM);
+/// Determines whether \p Range is one that can be read from. If
+/// `AllowSystemHeaders` is false, a range that falls within a system header
+/// fails validation.
+llvm::Error validateRange(const CharSourceRange &Range, const SourceManager &SM,
+ bool AllowSystemHeaders);
+
/// Attempts to resolve the given range to one that can be edited by a rewrite;
-/// generally, one that starts and ends within a particular file. It supports a
-/// limited set of cases involving source locations in macro expansions. If a
-/// value is returned, it satisfies \c validateEditRange.
-llvm::Optional<CharSourceRange>
-getRangeForEdit(const CharSourceRange &EditRange, const SourceManager &SM,
- const LangOptions &LangOpts);
-inline llvm::Optional<CharSourceRange>
-getRangeForEdit(const CharSourceRange &EditRange, const ASTContext &Context) {
- return getRangeForEdit(EditRange, Context.getSourceManager(),
- Context.getLangOpts());
+/// generally, one that starts and ends within a particular file. If a value is
+/// returned, it satisfies \c validateEditRange.
+///
+/// If \c IncludeMacroExpansion is true, a limited set of cases involving source
+/// locations in macro expansions is supported. For example, if we're looking to
+/// rewrite the int literal 3 to 6, and we have the following definition:
+/// #define DO_NOTHING(x) x
+/// then
+/// foo(DO_NOTHING(3))
+/// will be rewritten to
+/// foo(6)
+std::optional<CharSourceRange>
+getFileRangeForEdit(const CharSourceRange &EditRange, const SourceManager &SM,
+ const LangOptions &LangOpts,
+ bool IncludeMacroExpansion = true);
+inline std::optional<CharSourceRange>
+getFileRangeForEdit(const CharSourceRange &EditRange, const ASTContext &Context,
+ bool IncludeMacroExpansion = true) {
+ return getFileRangeForEdit(EditRange, Context.getSourceManager(),
+ Context.getLangOpts(), IncludeMacroExpansion);
}
+
+/// Attempts to resolve the given range to one that starts and ends in a
+/// particular file.
+///
+/// If \c IncludeMacroExpansion is true, a limited set of cases involving source
+/// locations in macro expansions is supported. For example, if we're looking to
+/// get the range of the int literal 3, and we have the following definition:
+/// #define DO_NOTHING(x) x
+/// foo(DO_NOTHING(3))
+/// the returned range will hold the source text `DO_NOTHING(3)`.
+std::optional<CharSourceRange> getFileRange(const CharSourceRange &EditRange,
+ const SourceManager &SM,
+ const LangOptions &LangOpts,
+ bool IncludeMacroExpansion);
+inline std::optional<CharSourceRange>
+getFileRange(const CharSourceRange &EditRange, const ASTContext &Context,
+ bool IncludeMacroExpansion) {
+ return getFileRange(EditRange, Context.getSourceManager(),
+ Context.getLangOpts(), IncludeMacroExpansion);
+}
+
} // namespace tooling
} // namespace clang
-#endif // LLVM_CLANG_TOOLING_TRANSFORMER_SOURCE_CODE_H
+#endif // LLVM_CLANG_TOOLING_TRANSFORMER_SOURCECODE_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Transformer/SourceCodeBuilders.h b/contrib/llvm-project/clang/include/clang/Tooling/Transformer/SourceCodeBuilders.h
index 6c79a7588f28..22fc644dfac5 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Transformer/SourceCodeBuilders.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Transformer/SourceCodeBuilders.h
@@ -11,8 +11,8 @@
///
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_TOOLING_TRANSFORMER_SOURCE_CODE_BUILDERS_H_
-#define LLVM_CLANG_TOOLING_TRANSFORMER_SOURCE_CODE_BUILDERS_H_
+#ifndef LLVM_CLANG_TOOLING_TRANSFORMER_SOURCECODEBUILDERS_H
+#define LLVM_CLANG_TOOLING_TRANSFORMER_SOURCECODEBUILDERS_H
#include "clang/AST/ASTContext.h"
#include "clang/AST/Expr.h"
@@ -43,6 +43,15 @@ inline bool needParensBeforeDotOrArrow(const Expr &E) {
/// Determines whether printing this expression to the right of a unary operator
/// requires a parentheses to preserve its meaning.
bool needParensAfterUnaryOperator(const Expr &E);
+
+// Recognizes known types (and sugared versions thereof) that overload the `*`
+// and `->` operator. Below is the list of currently included types, but it is
+// subject to change:
+//
+// * std::unique_ptr, std::shared_ptr, std::weak_ptr,
+// * std::optional, absl::optional, llvm::Optional,
+// * absl::StatusOr, llvm::Expected.
+bool isKnownPointerLikeType(QualType Ty, ASTContext &Context);
/// @}
/// \name Basic code-string generation utilities.
@@ -50,18 +59,18 @@ bool needParensAfterUnaryOperator(const Expr &E);
/// Builds source for an expression, adding parens if needed for unambiguous
/// parsing.
-llvm::Optional<std::string> buildParens(const Expr &E,
- const ASTContext &Context);
+std::optional<std::string> buildParens(const Expr &E,
+ const ASTContext &Context);
/// Builds idiomatic source for the dereferencing of `E`: prefix with `*` but
/// simplify when it already begins with `&`. \returns empty string on failure.
-llvm::Optional<std::string> buildDereference(const Expr &E,
- const ASTContext &Context);
+std::optional<std::string> buildDereference(const Expr &E,
+ const ASTContext &Context);
/// Builds idiomatic source for taking the address of `E`: prefix with `&` but
/// simplify when it already begins with `*`. \returns empty string on failure.
-llvm::Optional<std::string> buildAddressOf(const Expr &E,
- const ASTContext &Context);
+std::optional<std::string> buildAddressOf(const Expr &E,
+ const ASTContext &Context);
/// Adds a dot to the end of the given expression, but adds parentheses when
/// needed by the syntax, and simplifies to `->` when possible, e.g.:
@@ -69,7 +78,9 @@ llvm::Optional<std::string> buildAddressOf(const Expr &E,
/// `x` becomes `x.`
/// `*a` becomes `a->`
/// `a+b` becomes `(a+b).`
-llvm::Optional<std::string> buildDot(const Expr &E, const ASTContext &Context);
+///
+/// DEPRECATED. Use `buildAccess`.
+std::optional<std::string> buildDot(const Expr &E, const ASTContext &Context);
/// Adds an arrow to the end of the given expression, but adds parentheses
/// when needed by the syntax, and simplifies to `.` when possible, e.g.:
@@ -77,10 +88,33 @@ llvm::Optional<std::string> buildDot(const Expr &E, const ASTContext &Context);
/// `x` becomes `x->`
/// `&a` becomes `a.`
/// `a+b` becomes `(a+b)->`
-llvm::Optional<std::string> buildArrow(const Expr &E,
- const ASTContext &Context);
+///
+/// DEPRECATED. Use `buildAccess`.
+std::optional<std::string> buildArrow(const Expr &E, const ASTContext &Context);
+
+/// Specifies how to classify pointer-like types -- like values or like pointers
+/// -- with regard to generating member-access syntax.
+enum class PLTClass : bool {
+ Value,
+ Pointer,
+};
+
+/// Adds an appropriate access operator (`.`, `->` or nothing, in the case of
+/// implicit `this`) to the end of the given expression. Adds parentheses when
+/// needed by the syntax and simplifies when possible. If `PLTypeClass` is
+/// `Pointer`, for known pointer-like types (see `isKnownPointerLikeType`),
+/// treats `operator->` and `operator*` like the built-in `->` and `*`
+/// operators.
+///
+/// `x` becomes `x->` or `x.`, depending on `E`'s type
+/// `a+b` becomes `(a+b)->` or `(a+b).`, depending on `E`'s type
+/// `&a` becomes `a.`
+/// `*a` becomes `a->`
+std::optional<std::string>
+buildAccess(const Expr &E, ASTContext &Context,
+ PLTClass Classification = PLTClass::Pointer);
/// @}
} // namespace tooling
} // namespace clang
-#endif // LLVM_CLANG_TOOLING_TRANSFORMER_SOURCE_CODE_BUILDERS_H_
+#endif // LLVM_CLANG_TOOLING_TRANSFORMER_SOURCECODEBUILDERS_H
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Transformer/Stencil.h b/contrib/llvm-project/clang/include/clang/Tooling/Transformer/Stencil.h
index 1b7495eb0262..249f95b7391d 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Transformer/Stencil.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Transformer/Stencil.h
@@ -117,6 +117,38 @@ inline Stencil ifBound(llvm::StringRef Id, llvm::StringRef TrueText,
detail::makeStencil(FalseText));
}
+/// Chooses between multiple stencils, based on the presence of bound nodes. \p
+/// CaseStencils takes a vector of (ID, \c Stencil) pairs and checks each ID in
+/// order to see if it's bound to a node. If so, the associated \c Stencil is
+/// run and all other cases are ignored. An optional \p DefaultStencil can be
+/// provided to be run if all cases are exhausted beacause none of the provided
+/// IDs are bound. If no default case is provided and all cases are exhausted,
+/// the stencil will fail with error `llvm::errc::result_out_of_range`.
+///
+/// For example, say one matches a statement's type with:
+/// anyOf(
+/// qualType(isInteger()).bind("int"),
+/// qualType(realFloatingPointType()).bind("float"),
+/// qualType(isAnyCharacter()).bind("char"),
+/// booleanType().bind("bool"))
+///
+/// Then, one can decide in a stencil how to construct a literal.
+/// cat("a = ",
+/// selectBound(
+/// {{"int", cat("0")},
+/// {"float", cat("0.0")},
+/// {"char", cat("'\\0'")},
+/// {"bool", cat("false")}}))
+///
+/// In addition, one could supply a default case for all other types:
+/// selectBound(
+/// {{"int", cat("0")},
+/// ...
+/// {"bool", cat("false")}},
+/// cat("{}"))
+Stencil selectBound(std::vector<std::pair<std::string, Stencil>> CaseStencils,
+ Stencil DefaultStencil = nullptr);
+
/// Wraps a \c MatchConsumer in a \c Stencil, so that it can be used in a \c
/// Stencil. This supports user-defined extensions to the \c Stencil language.
Stencil run(MatchConsumer<std::string> C);
diff --git a/contrib/llvm-project/clang/include/clang/Tooling/Transformer/Transformer.h b/contrib/llvm-project/clang/include/clang/Tooling/Transformer/Transformer.h
index 31feacba5e28..71b1fe81b951 100644
--- a/contrib/llvm-project/clang/include/clang/Tooling/Transformer/Transformer.h
+++ b/contrib/llvm-project/clang/include/clang/Tooling/Transformer/Transformer.h
@@ -18,20 +18,86 @@
namespace clang {
namespace tooling {
+
+namespace detail {
+/// Implementation details of \c Transformer with type erasure around
+/// \c RewriteRule<T> as well as the corresponding consumers.
+class TransformerImpl {
+public:
+ virtual ~TransformerImpl() = default;
+
+ void onMatch(const ast_matchers::MatchFinder::MatchResult &Result);
+
+ virtual std::vector<ast_matchers::internal::DynTypedMatcher>
+ buildMatchers() const = 0;
+
+protected:
+ /// Converts a set of \c Edit into a \c AtomicChange per file modified.
+ /// Returns an error if the edits fail to compose, e.g. overlapping edits.
+ static llvm::Expected<llvm::SmallVector<AtomicChange, 1>>
+ convertToAtomicChanges(const llvm::SmallVectorImpl<transformer::Edit> &Edits,
+ const ast_matchers::MatchFinder::MatchResult &Result);
+
+private:
+ virtual void
+ onMatchImpl(const ast_matchers::MatchFinder::MatchResult &Result) = 0;
+};
+
+// FIXME: Use std::type_identity or backport when available.
+template <class T> struct type_identity {
+ using type = T;
+};
+} // namespace detail
+
+template <typename T> struct TransformerResult {
+ llvm::MutableArrayRef<AtomicChange> Changes;
+ T Metadata;
+};
+
+template <> struct TransformerResult<void> {
+ llvm::MutableArrayRef<AtomicChange> Changes;
+};
+
/// Handles the matcher and callback registration for a single `RewriteRule`, as
/// defined by the arguments of the constructor.
class Transformer : public ast_matchers::MatchFinder::MatchCallback {
public:
- using ChangeConsumer =
- std::function<void(Expected<clang::tooling::AtomicChange> Change)>;
+ /// Provides the set of changes to the consumer. The callback is free to move
+ /// or destructively consume the changes as needed.
+ ///
+ /// We use \c MutableArrayRef as an abstraction to provide decoupling, and we
+ /// expect the majority of consumers to copy or move the individual values
+ /// into a separate data structure.
+ using ChangeSetConsumer = std::function<void(
+ Expected<llvm::MutableArrayRef<AtomicChange>> Changes)>;
- /// \param Consumer Receives each rewrite or error. Will not necessarily be
- /// called for each match; for example, if the rewrite is not applicable
- /// because of macros, but doesn't fail. Note that clients are responsible
+ /// \param Consumer receives all rewrites for a single match, or an error.
+ /// Will not necessarily be called for each match; for example, if the rule
+ /// generates no edits but does not fail. Note that clients are responsible
/// for handling the case that independent \c AtomicChanges conflict with each
/// other.
- Transformer(transformer::RewriteRule Rule, ChangeConsumer Consumer)
- : Rule(std::move(Rule)), Consumer(std::move(Consumer)) {}
+ explicit Transformer(transformer::RewriteRuleWith<void> Rule,
+ ChangeSetConsumer Consumer)
+ : Transformer(std::move(Rule),
+ [Consumer = std::move(Consumer)](
+ llvm::Expected<TransformerResult<void>> Result) {
+ if (Result)
+ Consumer(Result->Changes);
+ else
+ Consumer(Result.takeError());
+ }) {}
+
+ /// \param Consumer receives all rewrites and the associated metadata for a
+ /// single match, or an error. Will always be called for each match, even if
+ /// the rule generates no edits. Note that clients are responsible for
+ /// handling the case that independent \c AtomicChanges conflict with each
+ /// other.
+ template <typename MetadataT>
+ explicit Transformer(
+ transformer::RewriteRuleWith<MetadataT> Rule,
+ std::function<void(llvm::Expected<TransformerResult<
+ typename detail::type_identity<MetadataT>::type>>)>
+ Consumer);
/// N.B. Passes `this` pointer to `MatchFinder`. So, this object should not
/// be moved after this call.
@@ -42,10 +108,104 @@ public:
void run(const ast_matchers::MatchFinder::MatchResult &Result) override;
private:
- transformer::RewriteRule Rule;
- /// Receives each successful rewrites as an \c AtomicChange.
- ChangeConsumer Consumer;
+ std::unique_ptr<detail::TransformerImpl> Impl;
};
+
+namespace detail {
+/// Runs the metadata generator on \c Rule and stuffs it into \c Result.
+/// @{
+template <typename T>
+llvm::Error
+populateMetadata(const transformer::RewriteRuleWith<T> &Rule,
+ size_t SelectedCase,
+ const ast_matchers::MatchFinder::MatchResult &Match,
+ TransformerResult<T> &Result) {
+ // Silence a false positive GCC -Wunused-but-set-parameter warning in constexpr
+ // cases, by marking SelectedCase as used. See
+ // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85827 for details. The issue is
+ // fixed in GCC 10.
+ (void)SelectedCase;
+ if constexpr (!std::is_void_v<T>) {
+ auto Metadata = Rule.Metadata[SelectedCase]->eval(Match);
+ if (!Metadata)
+ return Metadata.takeError();
+ Result.Metadata = std::move(*Metadata);
+ }
+ return llvm::Error::success();
+}
+/// @}
+
+/// Implementation when metadata is generated as a part of the rewrite. This
+/// happens when we have a \c RewriteRuleWith<T>.
+template <typename T> class WithMetadataImpl final : public TransformerImpl {
+ transformer::RewriteRuleWith<T> Rule;
+ std::function<void(llvm::Expected<TransformerResult<T>>)> Consumer;
+
+public:
+ explicit WithMetadataImpl(
+ transformer::RewriteRuleWith<T> R,
+ std::function<void(llvm::Expected<TransformerResult<T>>)> Consumer)
+ : Rule(std::move(R)), Consumer(std::move(Consumer)) {
+ assert(llvm::all_of(Rule.Cases,
+ [](const transformer::RewriteRuleBase::Case &Case)
+ -> bool { return !!Case.Edits; }) &&
+ "edit generator must be provided for each rule");
+ if constexpr (!std::is_void_v<T>)
+ assert(llvm::all_of(Rule.Metadata,
+ [](const typename transformer::Generator<T> &Metadata)
+ -> bool { return !!Metadata; }) &&
+ "metadata generator must be provided for each rule");
+ }
+
+private:
+ void onMatchImpl(const ast_matchers::MatchFinder::MatchResult &Result) final {
+ size_t I = transformer::detail::findSelectedCase(Result, Rule);
+ auto Transformations = Rule.Cases[I].Edits(Result);
+ if (!Transformations) {
+ Consumer(Transformations.takeError());
+ return;
+ }
+
+ llvm::SmallVector<AtomicChange, 1> Changes;
+ if (!Transformations->empty()) {
+ auto C = convertToAtomicChanges(*Transformations, Result);
+ if (C) {
+ Changes = std::move(*C);
+ } else {
+ Consumer(C.takeError());
+ return;
+ }
+ } else if (std::is_void<T>::value) {
+ // If we don't have metadata and we don't have any edits, skip.
+ return;
+ }
+
+ TransformerResult<T> RewriteResult;
+ if (auto E = populateMetadata(Rule, I, Result, RewriteResult)) {
+ Consumer(std::move(E));
+ return;
+ }
+
+ RewriteResult.Changes = llvm::MutableArrayRef<AtomicChange>(Changes);
+ Consumer(std::move(RewriteResult));
+ }
+
+ std::vector<ast_matchers::internal::DynTypedMatcher>
+ buildMatchers() const final {
+ return transformer::detail::buildMatchers(Rule);
+ }
+};
+} // namespace detail
+
+template <typename MetadataT>
+Transformer::Transformer(
+ transformer::RewriteRuleWith<MetadataT> Rule,
+ std::function<void(llvm::Expected<TransformerResult<
+ typename detail::type_identity<MetadataT>::type>>)>
+ Consumer)
+ : Impl(std::make_unique<detail::WithMetadataImpl<MetadataT>>(
+ std::move(Rule), std::move(Consumer))) {}
+
} // namespace tooling
} // namespace clang
diff --git a/contrib/llvm-project/clang/include/clang/module.modulemap b/contrib/llvm-project/clang/include/clang/module.modulemap
deleted file mode 100644
index 33fcf9dc7576..000000000000
--- a/contrib/llvm-project/clang/include/clang/module.modulemap
+++ /dev/null
@@ -1,182 +0,0 @@
-module Clang_Analysis {
- requires cplusplus
- umbrella "Analysis"
-
- textual header "Analysis/Analyses/ThreadSafetyOps.def"
-
- module * { export * }
-
- // FIXME: Exclude these headers to avoid pulling all of the AST matchers
- // library into clang. Due to inline key functions in the headers,
- // importing the AST matchers library gives a link dependency on the AST
- // matchers (and thus the AST), which clang-format should not have.
- exclude header "Analysis/Analyses/ExprMutationAnalyzer.h"
-}
-
-module Clang_AST {
- requires cplusplus
- umbrella "AST"
-
- textual header "AST/BuiltinTypes.def"
- textual header "AST/CXXRecordDeclDefinitionBits.def"
- textual header "AST/OperationKinds.def"
- textual header "AST/TypeLocNodes.def"
-
- module * { export * }
-}
-
-module Clang_ASTMatchers { requires cplusplus umbrella "ASTMatchers" module * { export * } }
-
-module Clang_Basic {
- requires cplusplus
- umbrella "Basic"
-
- textual header "Basic/AArch64SVEACLETypes.def"
- textual header "Basic/BuiltinsAArch64.def"
- textual header "Basic/BuiltinsAMDGPU.def"
- textual header "Basic/BuiltinsARM.def"
- textual header "Basic/BuiltinsBPF.def"
- textual header "Basic/Builtins.def"
- textual header "Basic/BuiltinsHexagon.def"
- textual header "Basic/BuiltinsHexagonDep.def"
- textual header "Basic/BuiltinsHexagonMapCustomDep.def"
- textual header "Basic/BuiltinsMips.def"
- textual header "Basic/BuiltinsNEON.def"
- textual header "Basic/BuiltinsNVPTX.def"
- textual header "Basic/BuiltinsPPC.def"
- textual header "Basic/BuiltinsRISCV.def"
- textual header "Basic/BuiltinsSVE.def"
- textual header "Basic/BuiltinsSystemZ.def"
- textual header "Basic/BuiltinsWebAssembly.def"
- textual header "Basic/BuiltinsX86.def"
- textual header "Basic/BuiltinsX86_64.def"
- textual header "Basic/BuiltinsXCore.def"
- textual header "Basic/CodeGenOptions.def"
- textual header "Basic/DiagnosticOptions.def"
- textual header "Basic/Features.def"
- textual header "Basic/FPOptions.def"
- textual header "Basic/MSP430Target.def"
- textual header "Basic/LangOptions.def"
- textual header "Basic/OpenCLExtensions.def"
- textual header "Basic/OpenCLImageTypes.def"
- textual header "Basic/OpenCLExtensionTypes.def"
- textual header "Basic/OpenMPKinds.def"
- textual header "Basic/OperatorKinds.def"
- textual header "Basic/PPCTypes.def"
- textual header "Basic/RISCVVTypes.def"
- textual header "Basic/Sanitizers.def"
- textual header "Basic/TargetCXXABI.def"
- textual header "Basic/TokenKinds.def"
- textual header "Basic/X86Target.def"
-
- module * { export * }
-}
-
-module Clang_CodeGen { requires cplusplus umbrella "CodeGen" module * { export * } }
-module Clang_Config { requires cplusplus umbrella "Config" module * { export * } }
-
-// Files for diagnostic groups are spread all over the include/clang/ tree, but
-// logically form a single module.
-module Clang_Diagnostics {
- requires cplusplus
-
- module All { header "Basic/AllDiagnostics.h" export * }
- module Analysis { header "Analysis/AnalysisDiagnostic.h" export * }
- module AST { header "AST/ASTDiagnostic.h" export * }
- module Comment { header "AST/CommentDiagnostic.h" export * }
- module Driver { header "Driver/DriverDiagnostic.h" export * }
- module Frontend { header "Frontend/FrontendDiagnostic.h" export * }
- module Lex { header "Lex/LexDiagnostic.h" export * }
- module Parse { header "Parse/ParseDiagnostic.h" export * }
- module Sema { header "Sema/SemaDiagnostic.h" export * }
- module Serialization { header "Serialization/SerializationDiagnostic.h" export * }
- module Refactoring { header "Tooling/Refactoring/RefactoringDiagnostic.h" export * }
-}
-
-module Clang_Driver {
- requires cplusplus
- umbrella "Driver"
-
- textual header "Driver/Types.def"
-
- module * { export * }
-}
-
-module Clang_Edit { requires cplusplus umbrella "Edit" module * { export * } }
-module Clang_Format { requires cplusplus umbrella "Format" module * { export * } }
-
-module Clang_Frontend {
- requires cplusplus
- umbrella "Frontend"
-
- textual header "Basic/LangStandards.def"
-
- module * { export * }
-}
-
-module Clang_FrontendTool { requires cplusplus umbrella "FrontendTool" module * { export * } }
-module Clang_Index { requires cplusplus umbrella "Index" module * { export * } }
-module Clang_Lex { requires cplusplus umbrella "Lex" module * { export * } }
-module Clang_Parse { requires cplusplus umbrella "Parse" module * { export * } }
-module Clang_Rewrite { requires cplusplus umbrella "Rewrite/Core" module * { export * } }
-module Clang_RewriteFrontend { requires cplusplus umbrella "Rewrite/Frontend" module * { export * } }
-module Clang_Sema { requires cplusplus umbrella "Sema" module * { export * } }
-
-module Clang_Serialization {
- requires cplusplus
- umbrella "Serialization"
-
- textual header "Serialization/TypeBitCodes.def"
-
- module * { export * }
-}
-
-module Clang_StaticAnalyzer_Core {
- requires cplusplus
- umbrella "StaticAnalyzer/Core"
-
- textual header "StaticAnalyzer/Core/Analyses.def"
- textual header "StaticAnalyzer/Core/AnalyzerOptions.def"
- textual header "StaticAnalyzer/Core/PathSensitive/SVals.def"
- textual header "StaticAnalyzer/Core/PathSensitive/Symbols.def"
- textual header "StaticAnalyzer/Core/PathSensitive/Regions.def"
-
- module * { export * }
-}
-
-module Clang_StaticAnalyzer_Checkers {
- requires cplusplus
- umbrella "StaticAnalyzer/Checkers"
- module * { export * }
-}
-
-module Clang_StaticAnalyzer_Frontend {
- requires cplusplus
- umbrella "StaticAnalyzer/Frontend"
- module * { export * }
-}
-
-module Clang_Testing {
- requires cplusplus
- umbrella "Testing"
- module * { export * }
-}
-
-module Clang_Tooling {
- requires cplusplus umbrella "Tooling" module * { export * }
- // FIXME: Exclude these headers to avoid pulling all of the AST matchers
- // library into clang-format. Due to inline key functions in the headers,
- // importing the AST matchers library gives a link dependency on the AST
- // matchers (and thus the AST), which clang-format should not have.
- exclude header "Tooling/RefactoringCallbacks.h"
-}
-
-module Clang_ToolingCore {
- requires cplusplus
- umbrella "Tooling/Core" module * { export * }
-}
-
-module Clang_ToolingInclusions {
- requires cplusplus
- umbrella "Tooling/Inclusions" module * { export * }
-}
diff --git a/contrib/llvm-project/clang/include/module.modulemap b/contrib/llvm-project/clang/include/module.modulemap
new file mode 100644
index 000000000000..52395ee9b0fc
--- /dev/null
+++ b/contrib/llvm-project/clang/include/module.modulemap
@@ -0,0 +1,209 @@
+module Clang_C {
+ umbrella "clang-c"
+ module * { export * }
+}
+
+module Clang_Analysis {
+ requires cplusplus
+ umbrella "clang/Analysis"
+
+ textual header "clang/Analysis/Analyses/ThreadSafetyOps.def"
+
+ module * { export * }
+
+ // FIXME: Exclude these headers to avoid pulling all of the AST matchers
+ // library into clang. Due to inline key functions in the headers,
+ // importing the AST matchers library gives a link dependency on the AST
+ // matchers (and thus the AST), which clang-format should not have.
+ exclude header "clang/Analysis/Analyses/ExprMutationAnalyzer.h"
+}
+
+module Clang_AST {
+ requires cplusplus
+ umbrella "clang/AST"
+
+ textual header "clang/AST/BuiltinTypes.def"
+ textual header "clang/AST/CXXRecordDeclDefinitionBits.def"
+ textual header "clang/AST/OperationKinds.def"
+ textual header "clang/AST/TypeLocNodes.def"
+
+ module * { export * }
+}
+
+module Clang_ASTMatchers { requires cplusplus umbrella "clang/ASTMatchers" module * { export * } }
+
+module Clang_Basic {
+ requires cplusplus
+ umbrella "clang/Basic"
+
+ textual header "clang/Basic/AArch64SVEACLETypes.def"
+ textual header "clang/Basic/BuiltinsAArch64.def"
+ textual header "clang/Basic/BuiltinsAMDGPU.def"
+ textual header "clang/Basic/BuiltinsAArch64NeonSVEBridge.def"
+ textual header "clang/Basic/BuiltinsAArch64NeonSVEBridge_cg.def"
+ textual header "clang/Basic/BuiltinsARM.def"
+ textual header "clang/Basic/BuiltinsBPF.def"
+ textual header "clang/Basic/Builtins.def"
+ textual header "clang/Basic/BuiltinHeaders.def"
+ textual header "clang/Basic/BuiltinsHexagon.def"
+ textual header "clang/Basic/BuiltinsHexagonDep.def"
+ textual header "clang/Basic/BuiltinsHexagonMapCustomDep.def"
+ textual header "clang/Basic/BuiltinsLoongArch.def"
+ textual header "clang/Basic/BuiltinsLoongArchBase.def"
+ textual header "clang/Basic/BuiltinsLoongArchLSX.def"
+ textual header "clang/Basic/BuiltinsLoongArchLASX.def"
+ textual header "clang/Basic/BuiltinsMips.def"
+ textual header "clang/Basic/BuiltinsNEON.def"
+ textual header "clang/Basic/BuiltinsNVPTX.def"
+ textual header "clang/Basic/BuiltinsPPC.def"
+ textual header "clang/Basic/BuiltinsRISCV.def"
+ textual header "clang/Basic/BuiltinsRISCVVector.def"
+ textual header "clang/Basic/BuiltinsSME.def"
+ textual header "clang/Basic/BuiltinsSVE.def"
+ textual header "clang/Basic/BuiltinsSystemZ.def"
+ textual header "clang/Basic/BuiltinsVE.def"
+ textual header "clang/Basic/BuiltinsVEVL.gen.def"
+ textual header "clang/Basic/BuiltinsWebAssembly.def"
+ textual header "clang/Basic/BuiltinsX86.def"
+ textual header "clang/Basic/BuiltinsX86_64.def"
+ textual header "clang/Basic/BuiltinsXCore.def"
+ textual header "clang/Basic/CodeGenOptions.def"
+ textual header "clang/Basic/DebugOptions.def"
+ textual header "clang/Basic/DiagnosticOptions.def"
+ textual header "clang/Basic/Features.def"
+ textual header "clang/Basic/FPOptions.def"
+ textual header "clang/Basic/MSP430Target.def"
+ textual header "clang/Basic/LangOptions.def"
+ textual header "clang/Basic/OpenCLExtensions.def"
+ textual header "clang/Basic/OpenCLImageTypes.def"
+ textual header "clang/Basic/OpenCLExtensionTypes.def"
+ textual header "clang/Basic/OpenMPKinds.def"
+ textual header "clang/Basic/OperatorKinds.def"
+ textual header "clang/Basic/PPCTypes.def"
+ textual header "clang/Basic/RISCVVTypes.def"
+ textual header "clang/Basic/Sanitizers.def"
+ textual header "clang/Basic/TargetCXXABI.def"
+ textual header "clang/Basic/TransformTypeTraits.def"
+ textual header "clang/Basic/TokenKinds.def"
+ textual header "clang/Basic/WebAssemblyReferenceTypes.def"
+
+ module * { export * }
+}
+module Clang_Basic_TokenKinds {
+ requires cplusplus
+
+ header "clang/Basic/TokenKinds.h"
+ textual header "clang/Basic/TokenKinds.def"
+
+ export *
+}
+
+module Clang_CodeGen { requires cplusplus umbrella "clang/CodeGen" module * { export * } }
+module Clang_Config { requires cplusplus umbrella "clang/Config" module * { export * } }
+
+// Files for diagnostic groups are spread all over the include/clang/ tree, but
+// logically form a single module.
+module Clang_Diagnostics {
+ requires cplusplus
+
+ module All { header "clang/Basic/AllDiagnostics.h" export * }
+ module Analysis { textual header "clang/Analysis/Analyses/UnsafeBufferUsageGadgets.def" }
+ module AST { header "clang/AST/ASTDiagnostic.h" export * }
+ module Comment { header "clang/AST/CommentDiagnostic.h" export * }
+ module Driver { header "clang/Driver/DriverDiagnostic.h" export * }
+ module Frontend { header "clang/Frontend/FrontendDiagnostic.h" export * }
+ module Lex { header "clang/Lex/LexDiagnostic.h" export * }
+ module Parse { header "clang/Parse/ParseDiagnostic.h" export * }
+ module Sema { header "clang/Sema/SemaDiagnostic.h" export * }
+ module Serialization { header "clang/Serialization/SerializationDiagnostic.h" export * }
+ module Refactoring { header "clang/Tooling/Refactoring/RefactoringDiagnostic.h" export * }
+}
+
+module Clang_Driver {
+ requires cplusplus
+ umbrella "clang/Driver"
+
+ textual header "clang/Driver/Types.def"
+
+ module * { export * }
+}
+
+module Clang_Edit { requires cplusplus umbrella "clang/Edit" module * { export * } }
+module Clang_Format { requires cplusplus umbrella "clang/Format" module * { export * } }
+
+module Clang_Frontend {
+ requires cplusplus
+ umbrella "clang/Frontend"
+
+ textual header "clang/Basic/LangStandards.def"
+
+ module * { export * }
+}
+
+module Clang_FrontendTool { requires cplusplus umbrella "clang/FrontendTool" module * { export * } }
+module Clang_Index { requires cplusplus umbrella "clang/Index" module * { export * } }
+module Clang_Lex { requires cplusplus umbrella "clang/Lex" module * { export * } }
+module Clang_Parse { requires cplusplus umbrella "clang/Parse" module * { export * } }
+module Clang_Rewrite { requires cplusplus umbrella "clang/Rewrite/Core" module * { export * } }
+module Clang_RewriteFrontend { requires cplusplus umbrella "clang/Rewrite/Frontend" module * { export * } }
+module Clang_Sema { requires cplusplus umbrella "clang/Sema" module * { export * } }
+
+module Clang_Serialization {
+ requires cplusplus
+ umbrella "clang/Serialization"
+
+ textual header "clang/Serialization/TypeBitCodes.def"
+
+ module * { export * }
+}
+
+module Clang_StaticAnalyzer_Core {
+ requires cplusplus
+ umbrella "clang/StaticAnalyzer/Core"
+
+ textual header "clang/StaticAnalyzer/Core/Analyses.def"
+ textual header "clang/StaticAnalyzer/Core/AnalyzerOptions.def"
+ textual header "clang/StaticAnalyzer/Core/PathSensitive/SVals.def"
+ textual header "clang/StaticAnalyzer/Core/PathSensitive/Symbols.def"
+ textual header "clang/StaticAnalyzer/Core/PathSensitive/Regions.def"
+
+ module * { export * }
+}
+
+module Clang_StaticAnalyzer_Checkers {
+ requires cplusplus
+ umbrella "clang/StaticAnalyzer/Checkers"
+ module * { export * }
+}
+
+module Clang_StaticAnalyzer_Frontend {
+ requires cplusplus
+ umbrella "clang/StaticAnalyzer/Frontend"
+ module * { export * }
+}
+
+module Clang_Testing {
+ requires cplusplus
+ umbrella "clang/Testing"
+ module * { export * }
+}
+
+module Clang_Tooling {
+ requires cplusplus umbrella "clang/Tooling" module * { export * }
+ // FIXME: Exclude these headers to avoid pulling all of the AST matchers
+ // library into clang-format. Due to inline key functions in the headers,
+ // importing the AST matchers library gives a link dependency on the AST
+ // matchers (and thus the AST), which clang-format should not have.
+ exclude header "clang/Tooling/RefactoringCallbacks.h"
+}
+
+module Clang_ToolingCore {
+ requires cplusplus
+ umbrella "clang/Tooling/Core" module * { export * }
+}
+
+module Clang_ToolingInclusions {
+ requires cplusplus
+ umbrella "clang/Tooling/Inclusions"
+ module * { export * }
+}
diff --git a/contrib/llvm-project/clang/lib/APINotes/APINotesFormat.h b/contrib/llvm-project/clang/lib/APINotes/APINotesFormat.h
index 6b76ecfc2567..615314c46f09 100644
--- a/contrib/llvm-project/clang/lib/APINotes/APINotesFormat.h
+++ b/contrib/llvm-project/clang/lib/APINotes/APINotesFormat.h
@@ -9,6 +9,7 @@
#ifndef LLVM_CLANG_LIB_APINOTES_APINOTESFORMAT_H
#define LLVM_CLANG_LIB_APINOTES_APINOTESFORMAT_H
+#include "clang/APINotes/Types.h"
#include "llvm/ADT/PointerEmbeddedInt.h"
#include "llvm/Bitcode/BitcodeConvenience.h"
@@ -23,7 +24,7 @@ const uint16_t VERSION_MAJOR = 0;
/// API notes file minor version number.
///
/// When the format changes IN ANY WAY, this number should be incremented.
-const uint16_t VERSION_MINOR = 24; // EnumExtensibility + FlagEnum
+const uint16_t VERSION_MINOR = 25; // SwiftImportAs
using IdentifierID = llvm::PointerEmbeddedInt<unsigned, 31>;
using IdentifierIDField = llvm::BCVBR<16>;
@@ -220,7 +221,7 @@ using TagDataLayout =
// below)
llvm::BCBlob // map from name to tag information
>;
-}; // namespace tag_block
+} // namespace tag_block
namespace typedef_block {
enum { TYPEDEF_DATA = 1 };
@@ -231,7 +232,7 @@ using TypedefDataLayout =
// below)
llvm::BCBlob // map from name to typedef information
>;
-}; // namespace typedef_block
+} // namespace typedef_block
namespace enum_constant_block {
enum { ENUM_CONSTANT_DATA = 1 };
@@ -246,10 +247,97 @@ using EnumConstantDataLayout =
/// A stored Objective-C selector.
struct StoredObjCSelector {
- unsigned NumPieces;
+ unsigned NumArgs;
llvm::SmallVector<IdentifierID, 2> Identifiers;
};
+
+/// A stored Objective-C or C++ context, represented by the ID of its parent
+/// context, the kind of this context (Objective-C class / C++ namespace / etc),
+/// and the ID of this context.
+struct ContextTableKey {
+ uint32_t parentContextID;
+ uint8_t contextKind;
+ uint32_t contextID;
+
+ ContextTableKey() : parentContextID(-1), contextKind(-1), contextID(-1) {}
+
+ ContextTableKey(uint32_t parentContextID, uint8_t contextKind,
+ uint32_t contextID)
+ : parentContextID(parentContextID), contextKind(contextKind),
+ contextID(contextID) {}
+
+ ContextTableKey(std::optional<Context> context, IdentifierID nameID)
+ : parentContextID(context ? context->id.Value : (uint32_t)-1),
+ contextKind(context ? static_cast<uint8_t>(context->kind)
+ : static_cast<uint8_t>(-1)),
+ contextID(nameID) {}
+
+ llvm::hash_code hashValue() const {
+ return llvm::hash_value(
+ std::tuple{parentContextID, contextKind, contextID});
+ }
+};
+
+inline bool operator==(const ContextTableKey &lhs, const ContextTableKey &rhs) {
+ return lhs.parentContextID == rhs.parentContextID &&
+ lhs.contextKind == rhs.contextKind && lhs.contextID == rhs.contextID;
+}
+
} // namespace api_notes
} // namespace clang
+namespace llvm {
+template <> struct DenseMapInfo<clang::api_notes::StoredObjCSelector> {
+ typedef DenseMapInfo<unsigned> UnsignedInfo;
+
+ static inline clang::api_notes::StoredObjCSelector getEmptyKey() {
+ return clang::api_notes::StoredObjCSelector{UnsignedInfo::getEmptyKey(),
+ {}};
+ }
+
+ static inline clang::api_notes::StoredObjCSelector getTombstoneKey() {
+ return clang::api_notes::StoredObjCSelector{UnsignedInfo::getTombstoneKey(),
+ {}};
+ }
+
+ static unsigned
+ getHashValue(const clang::api_notes::StoredObjCSelector &Selector) {
+ auto hash = llvm::hash_value(Selector.NumArgs);
+ hash = hash_combine(hash, Selector.Identifiers.size());
+ for (auto piece : Selector.Identifiers)
+ hash = hash_combine(hash, static_cast<unsigned>(piece));
+ // FIXME: Mix upper/lower 32-bit values together to produce
+ // unsigned rather than truncating.
+ return hash;
+ }
+
+ static bool isEqual(const clang::api_notes::StoredObjCSelector &LHS,
+ const clang::api_notes::StoredObjCSelector &RHS) {
+ return LHS.NumArgs == RHS.NumArgs && LHS.Identifiers == RHS.Identifiers;
+ }
+};
+
+template <> struct DenseMapInfo<clang::api_notes::ContextTableKey> {
+ static inline clang::api_notes::ContextTableKey getEmptyKey() {
+ return clang::api_notes::ContextTableKey();
+ }
+
+ static inline clang::api_notes::ContextTableKey getTombstoneKey() {
+ return clang::api_notes::ContextTableKey{
+ DenseMapInfo<uint32_t>::getTombstoneKey(),
+ DenseMapInfo<uint8_t>::getTombstoneKey(),
+ DenseMapInfo<uint32_t>::getTombstoneKey()};
+ }
+
+ static unsigned getHashValue(const clang::api_notes::ContextTableKey &value) {
+ return value.hashValue();
+ }
+
+ static bool isEqual(const clang::api_notes::ContextTableKey &lhs,
+ const clang::api_notes::ContextTableKey &rhs) {
+ return lhs == rhs;
+ }
+};
+} // namespace llvm
+
#endif
diff --git a/contrib/llvm-project/clang/lib/APINotes/APINotesManager.cpp b/contrib/llvm-project/clang/lib/APINotes/APINotesManager.cpp
new file mode 100644
index 000000000000..d3aef09dac91
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/APINotes/APINotesManager.cpp
@@ -0,0 +1,458 @@
+//===--- APINotesManager.cpp - Manage API Notes Files ---------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/APINotes/APINotesManager.h"
+#include "clang/APINotes/APINotesReader.h"
+#include "clang/APINotes/APINotesYAMLCompiler.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/SourceMgrAdapter.h"
+#include "clang/Basic/Version.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/Hashing.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/PrettyStackTrace.h"
+
+using namespace clang;
+using namespace api_notes;
+
+#define DEBUG_TYPE "API Notes"
+STATISTIC(NumHeaderAPINotes, "non-framework API notes files loaded");
+STATISTIC(NumPublicFrameworkAPINotes, "framework public API notes loaded");
+STATISTIC(NumPrivateFrameworkAPINotes, "framework private API notes loaded");
+STATISTIC(NumFrameworksSearched, "frameworks searched");
+STATISTIC(NumDirectoriesSearched, "header directories searched");
+STATISTIC(NumDirectoryCacheHits, "directory cache hits");
+
+namespace {
+/// Prints two successive strings, which much be kept alive as long as the
+/// PrettyStackTrace entry.
+class PrettyStackTraceDoubleString : public llvm::PrettyStackTraceEntry {
+ StringRef First, Second;
+
+public:
+ PrettyStackTraceDoubleString(StringRef First, StringRef Second)
+ : First(First), Second(Second) {}
+ void print(raw_ostream &OS) const override { OS << First << Second; }
+};
+} // namespace
+
+APINotesManager::APINotesManager(SourceManager &SM, const LangOptions &LangOpts)
+ : SM(SM), ImplicitAPINotes(LangOpts.APINotes) {}
+
+APINotesManager::~APINotesManager() {
+ // Free the API notes readers.
+ for (const auto &Entry : Readers) {
+ if (auto Reader = Entry.second.dyn_cast<APINotesReader *>())
+ delete Reader;
+ }
+
+ delete CurrentModuleReaders[ReaderKind::Public];
+ delete CurrentModuleReaders[ReaderKind::Private];
+}
+
+std::unique_ptr<APINotesReader>
+APINotesManager::loadAPINotes(FileEntryRef APINotesFile) {
+ PrettyStackTraceDoubleString Trace("Loading API notes from ",
+ APINotesFile.getName());
+
+ // Open the source file.
+ auto SourceFileID = SM.getOrCreateFileID(APINotesFile, SrcMgr::C_User);
+ auto SourceBuffer = SM.getBufferOrNone(SourceFileID, SourceLocation());
+ if (!SourceBuffer)
+ return nullptr;
+
+ // Compile the API notes source into a buffer.
+ // FIXME: Either propagate OSType through or, better yet, improve the binary
+ // APINotes format to maintain complete availability information.
+ // FIXME: We don't even really need to go through the binary format at all;
+ // we're just going to immediately deserialize it again.
+ llvm::SmallVector<char, 1024> APINotesBuffer;
+ std::unique_ptr<llvm::MemoryBuffer> CompiledBuffer;
+ {
+ SourceMgrAdapter SMAdapter(
+ SM, SM.getDiagnostics(), diag::err_apinotes_message,
+ diag::warn_apinotes_message, diag::note_apinotes_message, APINotesFile);
+ llvm::raw_svector_ostream OS(APINotesBuffer);
+ if (api_notes::compileAPINotes(
+ SourceBuffer->getBuffer(), SM.getFileEntryForID(SourceFileID), OS,
+ SMAdapter.getDiagHandler(), SMAdapter.getDiagContext()))
+ return nullptr;
+
+ // Make a copy of the compiled form into the buffer.
+ CompiledBuffer = llvm::MemoryBuffer::getMemBufferCopy(
+ StringRef(APINotesBuffer.data(), APINotesBuffer.size()));
+ }
+
+ // Load the binary form we just compiled.
+ auto Reader = APINotesReader::Create(std::move(CompiledBuffer), SwiftVersion);
+ assert(Reader && "Could not load the API notes we just generated?");
+ return Reader;
+}
+
+std::unique_ptr<APINotesReader>
+APINotesManager::loadAPINotes(StringRef Buffer) {
+ llvm::SmallVector<char, 1024> APINotesBuffer;
+ std::unique_ptr<llvm::MemoryBuffer> CompiledBuffer;
+ SourceMgrAdapter SMAdapter(
+ SM, SM.getDiagnostics(), diag::err_apinotes_message,
+ diag::warn_apinotes_message, diag::note_apinotes_message, std::nullopt);
+ llvm::raw_svector_ostream OS(APINotesBuffer);
+
+ if (api_notes::compileAPINotes(Buffer, nullptr, OS,
+ SMAdapter.getDiagHandler(),
+ SMAdapter.getDiagContext()))
+ return nullptr;
+
+ CompiledBuffer = llvm::MemoryBuffer::getMemBufferCopy(
+ StringRef(APINotesBuffer.data(), APINotesBuffer.size()));
+ auto Reader = APINotesReader::Create(std::move(CompiledBuffer), SwiftVersion);
+ assert(Reader && "Could not load the API notes we just generated?");
+ return Reader;
+}
+
+bool APINotesManager::loadAPINotes(const DirectoryEntry *HeaderDir,
+ FileEntryRef APINotesFile) {
+ assert(!Readers.contains(HeaderDir));
+ if (auto Reader = loadAPINotes(APINotesFile)) {
+ Readers[HeaderDir] = Reader.release();
+ return false;
+ }
+
+ Readers[HeaderDir] = nullptr;
+ return true;
+}
+
+OptionalFileEntryRef
+APINotesManager::findAPINotesFile(DirectoryEntryRef Directory,
+ StringRef Basename, bool WantPublic) {
+ FileManager &FM = SM.getFileManager();
+
+ llvm::SmallString<128> Path(Directory.getName());
+
+ StringRef Suffix = WantPublic ? "" : "_private";
+
+ // Look for the source API notes file.
+ llvm::sys::path::append(Path, llvm::Twine(Basename) + Suffix + "." +
+ SOURCE_APINOTES_EXTENSION);
+ return FM.getOptionalFileRef(Path, /*Open*/ true);
+}
+
+OptionalDirectoryEntryRef APINotesManager::loadFrameworkAPINotes(
+ llvm::StringRef FrameworkPath, llvm::StringRef FrameworkName, bool Public) {
+ FileManager &FM = SM.getFileManager();
+
+ llvm::SmallString<128> Path(FrameworkPath);
+ unsigned FrameworkNameLength = Path.size();
+
+ StringRef Suffix = Public ? "" : "_private";
+
+ // Form the path to the APINotes file.
+ llvm::sys::path::append(Path, "APINotes");
+ llvm::sys::path::append(Path, (llvm::Twine(FrameworkName) + Suffix + "." +
+ SOURCE_APINOTES_EXTENSION));
+
+ // Try to open the APINotes file.
+ auto APINotesFile = FM.getOptionalFileRef(Path);
+ if (!APINotesFile)
+ return std::nullopt;
+
+ // Form the path to the corresponding header directory.
+ Path.resize(FrameworkNameLength);
+ llvm::sys::path::append(Path, Public ? "Headers" : "PrivateHeaders");
+
+ // Try to access the header directory.
+ auto HeaderDir = FM.getOptionalDirectoryRef(Path);
+ if (!HeaderDir)
+ return std::nullopt;
+
+ // Try to load the API notes.
+ if (loadAPINotes(*HeaderDir, *APINotesFile))
+ return std::nullopt;
+
+ // Success: return the header directory.
+ if (Public)
+ ++NumPublicFrameworkAPINotes;
+ else
+ ++NumPrivateFrameworkAPINotes;
+ return *HeaderDir;
+}
+
+static void checkPrivateAPINotesName(DiagnosticsEngine &Diags,
+ const FileEntry *File, const Module *M) {
+ if (File->tryGetRealPathName().empty())
+ return;
+
+ StringRef RealFileName =
+ llvm::sys::path::filename(File->tryGetRealPathName());
+ StringRef RealStem = llvm::sys::path::stem(RealFileName);
+ if (RealStem.ends_with("_private"))
+ return;
+
+ unsigned DiagID = diag::warn_apinotes_private_case;
+ if (M->IsSystem)
+ DiagID = diag::warn_apinotes_private_case_system;
+
+ Diags.Report(SourceLocation(), DiagID) << M->Name << RealFileName;
+}
+
+/// \returns true if any of \p module's immediate submodules are defined in a
+/// private module map
+static bool hasPrivateSubmodules(const Module *M) {
+ return llvm::any_of(M->submodules(), [](const Module *Submodule) {
+ return Submodule->ModuleMapIsPrivate;
+ });
+}
+
+llvm::SmallVector<FileEntryRef, 2>
+APINotesManager::getCurrentModuleAPINotes(Module *M, bool LookInModule,
+ ArrayRef<std::string> SearchPaths) {
+ FileManager &FM = SM.getFileManager();
+ auto ModuleName = M->getTopLevelModuleName();
+ llvm::SmallVector<FileEntryRef, 2> APINotes;
+
+ // First, look relative to the module itself.
+ if (LookInModule) {
+ // Local function to try loading an API notes file in the given directory.
+ auto tryAPINotes = [&](DirectoryEntryRef Dir, bool WantPublic) {
+ if (auto File = findAPINotesFile(Dir, ModuleName, WantPublic)) {
+ if (!WantPublic)
+ checkPrivateAPINotesName(SM.getDiagnostics(), *File, M);
+
+ APINotes.push_back(*File);
+ }
+ };
+
+ if (M->IsFramework) {
+ // For frameworks, we search in the "Headers" or "PrivateHeaders"
+ // subdirectory.
+ //
+ // Public modules:
+ // - Headers/Foo.apinotes
+ // - PrivateHeaders/Foo_private.apinotes (if there are private submodules)
+ // Private modules:
+ // - PrivateHeaders/Bar.apinotes (except that 'Bar' probably already has
+ // the word "Private" in it in practice)
+ llvm::SmallString<128> Path(M->Directory->getName());
+
+ if (!M->ModuleMapIsPrivate) {
+ unsigned PathLen = Path.size();
+
+ llvm::sys::path::append(Path, "Headers");
+ if (auto APINotesDir = FM.getOptionalDirectoryRef(Path))
+ tryAPINotes(*APINotesDir, /*wantPublic=*/true);
+
+ Path.resize(PathLen);
+ }
+
+ if (M->ModuleMapIsPrivate || hasPrivateSubmodules(M)) {
+ llvm::sys::path::append(Path, "PrivateHeaders");
+ if (auto PrivateAPINotesDir = FM.getOptionalDirectoryRef(Path))
+ tryAPINotes(*PrivateAPINotesDir,
+ /*wantPublic=*/M->ModuleMapIsPrivate);
+ }
+ } else {
+ // Public modules:
+ // - Foo.apinotes
+ // - Foo_private.apinotes (if there are private submodules)
+ // Private modules:
+ // - Bar.apinotes (except that 'Bar' probably already has the word
+ // "Private" in it in practice)
+ tryAPINotes(*M->Directory, /*wantPublic=*/true);
+ if (!M->ModuleMapIsPrivate && hasPrivateSubmodules(M))
+ tryAPINotes(*M->Directory, /*wantPublic=*/false);
+ }
+
+ if (!APINotes.empty())
+ return APINotes;
+ }
+
+ // Second, look for API notes for this module in the module API
+ // notes search paths.
+ for (const auto &SearchPath : SearchPaths) {
+ if (auto SearchDir = FM.getOptionalDirectoryRef(SearchPath)) {
+ if (auto File = findAPINotesFile(*SearchDir, ModuleName)) {
+ APINotes.push_back(*File);
+ return APINotes;
+ }
+ }
+ }
+
+ // Didn't find any API notes.
+ return APINotes;
+}
+
+bool APINotesManager::loadCurrentModuleAPINotes(
+ Module *M, bool LookInModule, ArrayRef<std::string> SearchPaths) {
+ assert(!CurrentModuleReaders[ReaderKind::Public] &&
+ "Already loaded API notes for the current module?");
+
+ auto APINotes = getCurrentModuleAPINotes(M, LookInModule, SearchPaths);
+ unsigned NumReaders = 0;
+ for (auto File : APINotes) {
+ CurrentModuleReaders[NumReaders++] = loadAPINotes(File).release();
+ if (!getCurrentModuleReaders().empty())
+ M->APINotesFile = File.getName().str();
+ }
+
+ return NumReaders > 0;
+}
+
+bool APINotesManager::loadCurrentModuleAPINotesFromBuffer(
+ ArrayRef<StringRef> Buffers) {
+ unsigned NumReader = 0;
+ for (auto Buf : Buffers) {
+ auto Reader = loadAPINotes(Buf);
+ assert(Reader && "Could not load the API notes we just generated?");
+
+ CurrentModuleReaders[NumReader++] = Reader.release();
+ }
+ return NumReader;
+}
+
+llvm::SmallVector<APINotesReader *, 2>
+APINotesManager::findAPINotes(SourceLocation Loc) {
+ llvm::SmallVector<APINotesReader *, 2> Results;
+
+ // If there are readers for the current module, return them.
+ if (!getCurrentModuleReaders().empty()) {
+ Results.append(getCurrentModuleReaders().begin(),
+ getCurrentModuleReaders().end());
+ return Results;
+ }
+
+ // If we're not allowed to implicitly load API notes files, we're done.
+ if (!ImplicitAPINotes)
+ return Results;
+
+ // If we don't have source location information, we're done.
+ if (Loc.isInvalid())
+ return Results;
+
+ // API notes are associated with the expansion location. Retrieve the
+ // file for this location.
+ SourceLocation ExpansionLoc = SM.getExpansionLoc(Loc);
+ FileID ID = SM.getFileID(ExpansionLoc);
+ if (ID.isInvalid())
+ return Results;
+ OptionalFileEntryRef File = SM.getFileEntryRefForID(ID);
+ if (!File)
+ return Results;
+
+ // Look for API notes in the directory corresponding to this file, or one of
+ // its its parent directories.
+ OptionalDirectoryEntryRef Dir = File->getDir();
+ FileManager &FileMgr = SM.getFileManager();
+ llvm::SetVector<const DirectoryEntry *,
+ SmallVector<const DirectoryEntry *, 4>,
+ llvm::SmallPtrSet<const DirectoryEntry *, 4>>
+ DirsVisited;
+ do {
+ // Look for an API notes reader for this header search directory.
+ auto Known = Readers.find(*Dir);
+
+ // If we already know the answer, chase it.
+ if (Known != Readers.end()) {
+ ++NumDirectoryCacheHits;
+
+ // We've been redirected to another directory for answers. Follow it.
+ if (Known->second && Known->second.is<DirectoryEntryRef>()) {
+ DirsVisited.insert(*Dir);
+ Dir = Known->second.get<DirectoryEntryRef>();
+ continue;
+ }
+
+ // We have the answer.
+ if (auto Reader = Known->second.dyn_cast<APINotesReader *>())
+ Results.push_back(Reader);
+ break;
+ }
+
+ // Look for API notes corresponding to this directory.
+ StringRef Path = Dir->getName();
+ if (llvm::sys::path::extension(Path) == ".framework") {
+ // If this is a framework directory, check whether there are API notes
+ // in the APINotes subdirectory.
+ auto FrameworkName = llvm::sys::path::stem(Path);
+ ++NumFrameworksSearched;
+
+ // Look for API notes for both the public and private headers.
+ OptionalDirectoryEntryRef PublicDir =
+ loadFrameworkAPINotes(Path, FrameworkName, /*Public=*/true);
+ OptionalDirectoryEntryRef PrivateDir =
+ loadFrameworkAPINotes(Path, FrameworkName, /*Public=*/false);
+
+ if (PublicDir || PrivateDir) {
+ // We found API notes: don't ever look past the framework directory.
+ Readers[*Dir] = nullptr;
+
+ // Pretend we found the result in the public or private directory,
+ // as appropriate. All headers should be in one of those two places,
+ // but be defensive here.
+ if (!DirsVisited.empty()) {
+ if (PublicDir && DirsVisited.back() == *PublicDir) {
+ DirsVisited.pop_back();
+ Dir = *PublicDir;
+ } else if (PrivateDir && DirsVisited.back() == *PrivateDir) {
+ DirsVisited.pop_back();
+ Dir = *PrivateDir;
+ }
+ }
+
+ // Grab the result.
+ if (auto Reader = Readers[*Dir].dyn_cast<APINotesReader *>())
+ Results.push_back(Reader);
+ break;
+ }
+ } else {
+ // Look for an APINotes file in this directory.
+ llvm::SmallString<128> APINotesPath(Dir->getName());
+ llvm::sys::path::append(
+ APINotesPath, (llvm::Twine("APINotes.") + SOURCE_APINOTES_EXTENSION));
+
+ // If there is an API notes file here, try to load it.
+ ++NumDirectoriesSearched;
+ if (auto APINotesFile = FileMgr.getOptionalFileRef(APINotesPath)) {
+ if (!loadAPINotes(*Dir, *APINotesFile)) {
+ ++NumHeaderAPINotes;
+ if (auto Reader = Readers[*Dir].dyn_cast<APINotesReader *>())
+ Results.push_back(Reader);
+ break;
+ }
+ }
+ }
+
+ // We didn't find anything. Look at the parent directory.
+ if (!DirsVisited.insert(*Dir)) {
+ Dir = std::nullopt;
+ break;
+ }
+
+ StringRef ParentPath = llvm::sys::path::parent_path(Path);
+ while (llvm::sys::path::stem(ParentPath) == "..")
+ ParentPath = llvm::sys::path::parent_path(ParentPath);
+
+ Dir = ParentPath.empty() ? std::nullopt
+ : FileMgr.getOptionalDirectoryRef(ParentPath);
+ } while (Dir);
+
+ // Path compression for all of the directories we visited, redirecting
+ // them to the directory we ended on. If no API notes were found, the
+ // resulting directory will be NULL, indicating no API notes.
+ for (const auto Visited : DirsVisited)
+ Readers[Visited] = Dir ? ReaderEntry(*Dir) : ReaderEntry();
+
+ return Results;
+}
diff --git a/contrib/llvm-project/clang/lib/APINotes/APINotesReader.cpp b/contrib/llvm-project/clang/lib/APINotes/APINotesReader.cpp
new file mode 100644
index 000000000000..ff9b95d9bf75
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/APINotes/APINotesReader.cpp
@@ -0,0 +1,2049 @@
+//===--- APINotesReader.cpp - API Notes Reader ------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/APINotes/APINotesReader.h"
+#include "APINotesFormat.h"
+#include "llvm/ADT/Hashing.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Bitstream/BitstreamReader.h"
+#include "llvm/Support/DJB.h"
+#include "llvm/Support/EndianStream.h"
+#include "llvm/Support/OnDiskHashTable.h"
+
+namespace clang {
+namespace api_notes {
+using namespace llvm::support;
+
+namespace {
+/// Deserialize a version tuple.
+llvm::VersionTuple ReadVersionTuple(const uint8_t *&Data) {
+ uint8_t NumVersions = (*Data++) & 0x03;
+
+ unsigned Major =
+ endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Data);
+ if (NumVersions == 0)
+ return llvm::VersionTuple(Major);
+
+ unsigned Minor =
+ endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Data);
+ if (NumVersions == 1)
+ return llvm::VersionTuple(Major, Minor);
+
+ unsigned Subminor =
+ endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Data);
+ if (NumVersions == 2)
+ return llvm::VersionTuple(Major, Minor, Subminor);
+
+ unsigned Build =
+ endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Data);
+ return llvm::VersionTuple(Major, Minor, Subminor, Build);
+}
+
+/// An on-disk hash table whose data is versioned based on the Swift version.
+template <typename Derived, typename KeyType, typename UnversionedDataType>
+class VersionedTableInfo {
+public:
+ using internal_key_type = KeyType;
+ using external_key_type = KeyType;
+ using data_type =
+ llvm::SmallVector<std::pair<llvm::VersionTuple, UnversionedDataType>, 1>;
+ using hash_value_type = size_t;
+ using offset_type = unsigned;
+
+ internal_key_type GetInternalKey(external_key_type Key) { return Key; }
+
+ external_key_type GetExternalKey(internal_key_type Key) { return Key; }
+
+ static bool EqualKey(internal_key_type LHS, internal_key_type RHS) {
+ return LHS == RHS;
+ }
+
+ static std::pair<unsigned, unsigned> ReadKeyDataLength(const uint8_t *&Data) {
+ unsigned KeyLength =
+ endian::readNext<uint16_t, llvm::endianness::little, unaligned>(Data);
+ unsigned DataLength =
+ endian::readNext<uint16_t, llvm::endianness::little, unaligned>(Data);
+ return {KeyLength, DataLength};
+ }
+
+ static data_type ReadData(internal_key_type Key, const uint8_t *Data,
+ unsigned Length) {
+ unsigned NumElements =
+ endian::readNext<uint16_t, llvm::endianness::little, unaligned>(Data);
+ data_type Result;
+ Result.reserve(NumElements);
+ for (unsigned i = 0; i != NumElements; ++i) {
+ auto version = ReadVersionTuple(Data);
+ const auto *DataBefore = Data;
+ (void)DataBefore;
+ assert(Data != DataBefore &&
+ "Unversioned data reader didn't move pointer");
+ auto UnversionedData = Derived::readUnversioned(Key, Data);
+ Result.push_back({version, UnversionedData});
+ }
+ return Result;
+ }
+};
+
+/// Read serialized CommonEntityInfo.
+void ReadCommonEntityInfo(const uint8_t *&Data, CommonEntityInfo &Info) {
+ uint8_t UnavailableBits = *Data++;
+ Info.Unavailable = (UnavailableBits >> 1) & 0x01;
+ Info.UnavailableInSwift = UnavailableBits & 0x01;
+ if ((UnavailableBits >> 2) & 0x01)
+ Info.setSwiftPrivate(static_cast<bool>((UnavailableBits >> 3) & 0x01));
+
+ unsigned MsgLength =
+ endian::readNext<uint16_t, llvm::endianness::little, unaligned>(Data);
+ Info.UnavailableMsg =
+ std::string(reinterpret_cast<const char *>(Data),
+ reinterpret_cast<const char *>(Data) + MsgLength);
+ Data += MsgLength;
+
+ unsigned SwiftNameLength =
+ endian::readNext<uint16_t, llvm::endianness::little, unaligned>(Data);
+ Info.SwiftName =
+ std::string(reinterpret_cast<const char *>(Data),
+ reinterpret_cast<const char *>(Data) + SwiftNameLength);
+ Data += SwiftNameLength;
+}
+
+/// Read serialized CommonTypeInfo.
+void ReadCommonTypeInfo(const uint8_t *&Data, CommonTypeInfo &Info) {
+ ReadCommonEntityInfo(Data, Info);
+
+ unsigned SwiftBridgeLength =
+ endian::readNext<uint16_t, llvm::endianness::little, unaligned>(Data);
+ if (SwiftBridgeLength > 0) {
+ Info.setSwiftBridge(std::string(reinterpret_cast<const char *>(Data),
+ SwiftBridgeLength - 1));
+ Data += SwiftBridgeLength - 1;
+ }
+
+ unsigned ErrorDomainLength =
+ endian::readNext<uint16_t, llvm::endianness::little, unaligned>(Data);
+ if (ErrorDomainLength > 0) {
+ Info.setNSErrorDomain(std::optional<std::string>(std::string(
+ reinterpret_cast<const char *>(Data), ErrorDomainLength - 1)));
+ Data += ErrorDomainLength - 1;
+ }
+}
+
+/// Used to deserialize the on-disk identifier table.
+class IdentifierTableInfo {
+public:
+ using internal_key_type = llvm::StringRef;
+ using external_key_type = llvm::StringRef;
+ using data_type = IdentifierID;
+ using hash_value_type = uint32_t;
+ using offset_type = unsigned;
+
+ internal_key_type GetInternalKey(external_key_type Key) { return Key; }
+
+ external_key_type GetExternalKey(internal_key_type Key) { return Key; }
+
+ hash_value_type ComputeHash(internal_key_type Key) {
+ return llvm::hash_value(Key);
+ }
+
+ static bool EqualKey(internal_key_type LHS, internal_key_type RHS) {
+ return LHS == RHS;
+ }
+
+ static std::pair<unsigned, unsigned> ReadKeyDataLength(const uint8_t *&Data) {
+ unsigned KeyLength =
+ endian::readNext<uint16_t, llvm::endianness::little, unaligned>(Data);
+ unsigned DataLength =
+ endian::readNext<uint16_t, llvm::endianness::little, unaligned>(Data);
+ return {KeyLength, DataLength};
+ }
+
+ static internal_key_type ReadKey(const uint8_t *Data, unsigned Length) {
+ return llvm::StringRef(reinterpret_cast<const char *>(Data), Length);
+ }
+
+ static data_type ReadData(internal_key_type key, const uint8_t *Data,
+ unsigned Length) {
+ return endian::readNext<uint32_t, llvm::endianness::little, unaligned>(
+ Data);
+ }
+};
+
+/// Used to deserialize the on-disk Objective-C class table.
+class ObjCContextIDTableInfo {
+public:
+ using internal_key_type = ContextTableKey;
+ using external_key_type = internal_key_type;
+ using data_type = unsigned;
+ using hash_value_type = size_t;
+ using offset_type = unsigned;
+
+ internal_key_type GetInternalKey(external_key_type Key) { return Key; }
+
+ external_key_type GetExternalKey(internal_key_type Key) { return Key; }
+
+ hash_value_type ComputeHash(internal_key_type Key) {
+ return static_cast<size_t>(Key.hashValue());
+ }
+
+ static bool EqualKey(internal_key_type LHS, internal_key_type RHS) {
+ return LHS == RHS;
+ }
+
+ static std::pair<unsigned, unsigned> ReadKeyDataLength(const uint8_t *&Data) {
+ unsigned KeyLength =
+ endian::readNext<uint16_t, llvm::endianness::little, unaligned>(Data);
+ unsigned DataLength =
+ endian::readNext<uint16_t, llvm::endianness::little, unaligned>(Data);
+ return {KeyLength, DataLength};
+ }
+
+ static internal_key_type ReadKey(const uint8_t *Data, unsigned Length) {
+ auto ParentCtxID =
+ endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Data);
+ auto ContextKind =
+ endian::readNext<uint8_t, llvm::endianness::little, unaligned>(Data);
+ auto NameID =
+ endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Data);
+ return {ParentCtxID, ContextKind, NameID};
+ }
+
+ static data_type ReadData(internal_key_type Key, const uint8_t *Data,
+ unsigned Length) {
+ return endian::readNext<uint32_t, llvm::endianness::little, unaligned>(
+ Data);
+ }
+};
+
+/// Used to deserialize the on-disk Objective-C property table.
+class ObjCContextInfoTableInfo
+ : public VersionedTableInfo<ObjCContextInfoTableInfo, unsigned,
+ ObjCContextInfo> {
+public:
+ static internal_key_type ReadKey(const uint8_t *Data, unsigned Length) {
+ return endian::readNext<uint32_t, llvm::endianness::little, unaligned>(
+ Data);
+ }
+
+ hash_value_type ComputeHash(internal_key_type Key) {
+ return static_cast<size_t>(llvm::hash_value(Key));
+ }
+
+ static ObjCContextInfo readUnversioned(internal_key_type Key,
+ const uint8_t *&Data) {
+ ObjCContextInfo Info;
+ ReadCommonTypeInfo(Data, Info);
+ uint8_t Payload = *Data++;
+
+ if (Payload & 0x01)
+ Info.setHasDesignatedInits(true);
+ Payload = Payload >> 1;
+
+ if (Payload & 0x4)
+ Info.setDefaultNullability(static_cast<NullabilityKind>(Payload & 0x03));
+ Payload >>= 3;
+
+ if (Payload & (1 << 1))
+ Info.setSwiftObjCMembers(Payload & 1);
+ Payload >>= 2;
+
+ if (Payload & (1 << 1))
+ Info.setSwiftImportAsNonGeneric(Payload & 1);
+
+ return Info;
+ }
+};
+
+/// Read serialized VariableInfo.
+void ReadVariableInfo(const uint8_t *&Data, VariableInfo &Info) {
+ ReadCommonEntityInfo(Data, Info);
+ if (*Data++) {
+ Info.setNullabilityAudited(static_cast<NullabilityKind>(*Data));
+ }
+ ++Data;
+
+ auto TypeLen =
+ endian::readNext<uint16_t, llvm::endianness::little, unaligned>(Data);
+ Info.setType(std::string(Data, Data + TypeLen));
+ Data += TypeLen;
+}
+
+/// Used to deserialize the on-disk Objective-C property table.
+class ObjCPropertyTableInfo
+ : public VersionedTableInfo<ObjCPropertyTableInfo,
+ std::tuple<uint32_t, uint32_t, uint8_t>,
+ ObjCPropertyInfo> {
+public:
+ static internal_key_type ReadKey(const uint8_t *Data, unsigned Length) {
+ auto ClassID =
+ endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Data);
+ auto NameID =
+ endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Data);
+ char IsInstance =
+ endian::readNext<uint8_t, llvm::endianness::little, unaligned>(Data);
+ return {ClassID, NameID, IsInstance};
+ }
+
+ hash_value_type ComputeHash(internal_key_type Key) {
+ return static_cast<size_t>(llvm::hash_value(Key));
+ }
+
+ static ObjCPropertyInfo readUnversioned(internal_key_type Key,
+ const uint8_t *&Data) {
+ ObjCPropertyInfo Info;
+ ReadVariableInfo(Data, Info);
+ uint8_t Flags = *Data++;
+ if (Flags & (1 << 0))
+ Info.setSwiftImportAsAccessors(Flags & (1 << 1));
+ return Info;
+ }
+};
+
+/// Read serialized ParamInfo.
+void ReadParamInfo(const uint8_t *&Data, ParamInfo &Info) {
+ ReadVariableInfo(Data, Info);
+
+ uint8_t Payload =
+ endian::readNext<uint8_t, llvm::endianness::little, unaligned>(Data);
+ if (auto RawConvention = Payload & 0x7) {
+ auto Convention = static_cast<RetainCountConventionKind>(RawConvention - 1);
+ Info.setRetainCountConvention(Convention);
+ }
+ Payload >>= 3;
+ if (Payload & 0x01)
+ Info.setNoEscape(Payload & 0x02);
+ Payload >>= 2;
+ assert(Payload == 0 && "Bad API notes");
+}
+
+/// Read serialized FunctionInfo.
+void ReadFunctionInfo(const uint8_t *&Data, FunctionInfo &Info) {
+ ReadCommonEntityInfo(Data, Info);
+
+ uint8_t Payload =
+ endian::readNext<uint8_t, llvm::endianness::little, unaligned>(Data);
+ if (auto RawConvention = Payload & 0x7) {
+ auto Convention = static_cast<RetainCountConventionKind>(RawConvention - 1);
+ Info.setRetainCountConvention(Convention);
+ }
+ Payload >>= 3;
+ Info.NullabilityAudited = Payload & 0x1;
+ Payload >>= 1;
+ assert(Payload == 0 && "Bad API notes");
+
+ Info.NumAdjustedNullable =
+ endian::readNext<uint8_t, llvm::endianness::little, unaligned>(Data);
+ Info.NullabilityPayload =
+ endian::readNext<uint64_t, llvm::endianness::little, unaligned>(Data);
+
+ unsigned NumParams =
+ endian::readNext<uint16_t, llvm::endianness::little, unaligned>(Data);
+ while (NumParams > 0) {
+ ParamInfo pi;
+ ReadParamInfo(Data, pi);
+ Info.Params.push_back(pi);
+ --NumParams;
+ }
+
+ unsigned ResultTypeLen =
+ endian::readNext<uint16_t, llvm::endianness::little, unaligned>(Data);
+ Info.ResultType = std::string(Data, Data + ResultTypeLen);
+ Data += ResultTypeLen;
+}
+
+/// Used to deserialize the on-disk Objective-C method table.
+class ObjCMethodTableInfo
+ : public VersionedTableInfo<ObjCMethodTableInfo,
+ std::tuple<uint32_t, uint32_t, uint8_t>,
+ ObjCMethodInfo> {
+public:
+ static internal_key_type ReadKey(const uint8_t *Data, unsigned Length) {
+ auto ClassID =
+ endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Data);
+ auto SelectorID =
+ endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Data);
+ auto IsInstance =
+ endian::readNext<uint8_t, llvm::endianness::little, unaligned>(Data);
+ return {ClassID, SelectorID, IsInstance};
+ }
+
+ hash_value_type ComputeHash(internal_key_type Key) {
+ return static_cast<size_t>(llvm::hash_value(Key));
+ }
+
+ static ObjCMethodInfo readUnversioned(internal_key_type Key,
+ const uint8_t *&Data) {
+ ObjCMethodInfo Info;
+ uint8_t Payload = *Data++;
+ Info.RequiredInit = Payload & 0x01;
+ Payload >>= 1;
+ Info.DesignatedInit = Payload & 0x01;
+ Payload >>= 1;
+
+ ReadFunctionInfo(Data, Info);
+ return Info;
+ }
+};
+
+/// Used to deserialize the on-disk Objective-C selector table.
+class ObjCSelectorTableInfo {
+public:
+ using internal_key_type = StoredObjCSelector;
+ using external_key_type = internal_key_type;
+ using data_type = SelectorID;
+ using hash_value_type = unsigned;
+ using offset_type = unsigned;
+
+ internal_key_type GetInternalKey(external_key_type Key) { return Key; }
+
+ external_key_type GetExternalKey(internal_key_type Key) { return Key; }
+
+ hash_value_type ComputeHash(internal_key_type Key) {
+ return llvm::DenseMapInfo<StoredObjCSelector>::getHashValue(Key);
+ }
+
+ static bool EqualKey(internal_key_type LHS, internal_key_type RHS) {
+ return llvm::DenseMapInfo<StoredObjCSelector>::isEqual(LHS, RHS);
+ }
+
+ static std::pair<unsigned, unsigned> ReadKeyDataLength(const uint8_t *&Data) {
+ unsigned KeyLength =
+ endian::readNext<uint16_t, llvm::endianness::little, unaligned>(Data);
+ unsigned DataLength =
+ endian::readNext<uint16_t, llvm::endianness::little, unaligned>(Data);
+ return {KeyLength, DataLength};
+ }
+
+ static internal_key_type ReadKey(const uint8_t *Data, unsigned Length) {
+ internal_key_type Key;
+ Key.NumArgs =
+ endian::readNext<uint16_t, llvm::endianness::little, unaligned>(Data);
+ unsigned NumIdents = (Length - sizeof(uint16_t)) / sizeof(uint32_t);
+ for (unsigned i = 0; i != NumIdents; ++i) {
+ Key.Identifiers.push_back(
+ endian::readNext<uint32_t, llvm::endianness::little, unaligned>(
+ Data));
+ }
+ return Key;
+ }
+
+ static data_type ReadData(internal_key_type Key, const uint8_t *Data,
+ unsigned Length) {
+ return endian::readNext<uint32_t, llvm::endianness::little, unaligned>(
+ Data);
+ }
+};
+
+/// Used to deserialize the on-disk global variable table.
+class GlobalVariableTableInfo
+ : public VersionedTableInfo<GlobalVariableTableInfo, ContextTableKey,
+ GlobalVariableInfo> {
+public:
+ static internal_key_type ReadKey(const uint8_t *Data, unsigned Length) {
+ auto CtxID =
+ endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Data);
+ auto ContextKind =
+ endian::readNext<uint8_t, llvm::endianness::little, unaligned>(Data);
+ auto NameID =
+ endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Data);
+ return {CtxID, ContextKind, NameID};
+ }
+
+ hash_value_type ComputeHash(internal_key_type Key) {
+ return static_cast<size_t>(Key.hashValue());
+ }
+
+ static GlobalVariableInfo readUnversioned(internal_key_type Key,
+ const uint8_t *&Data) {
+ GlobalVariableInfo Info;
+ ReadVariableInfo(Data, Info);
+ return Info;
+ }
+};
+
+/// Used to deserialize the on-disk global function table.
+class GlobalFunctionTableInfo
+ : public VersionedTableInfo<GlobalFunctionTableInfo, ContextTableKey,
+ GlobalFunctionInfo> {
+public:
+ static internal_key_type ReadKey(const uint8_t *Data, unsigned Length) {
+ auto CtxID =
+ endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Data);
+ auto ContextKind =
+ endian::readNext<uint8_t, llvm::endianness::little, unaligned>(Data);
+ auto NameID =
+ endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Data);
+ return {CtxID, ContextKind, NameID};
+ }
+
+ hash_value_type ComputeHash(internal_key_type Key) {
+ return static_cast<size_t>(Key.hashValue());
+ }
+
+ static GlobalFunctionInfo readUnversioned(internal_key_type Key,
+ const uint8_t *&Data) {
+ GlobalFunctionInfo Info;
+ ReadFunctionInfo(Data, Info);
+ return Info;
+ }
+};
+
+/// Used to deserialize the on-disk enumerator table.
+class EnumConstantTableInfo
+ : public VersionedTableInfo<EnumConstantTableInfo, uint32_t,
+ EnumConstantInfo> {
+public:
+ static internal_key_type ReadKey(const uint8_t *Data, unsigned Length) {
+ auto NameID =
+ endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Data);
+ return NameID;
+ }
+
+ hash_value_type ComputeHash(internal_key_type Key) {
+ return static_cast<size_t>(llvm::hash_value(Key));
+ }
+
+ static EnumConstantInfo readUnversioned(internal_key_type Key,
+ const uint8_t *&Data) {
+ EnumConstantInfo Info;
+ ReadCommonEntityInfo(Data, Info);
+ return Info;
+ }
+};
+
+/// Used to deserialize the on-disk tag table.
+class TagTableInfo
+ : public VersionedTableInfo<TagTableInfo, ContextTableKey, TagInfo> {
+public:
+ static internal_key_type ReadKey(const uint8_t *Data, unsigned Length) {
+ auto CtxID =
+ endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Data);
+ auto ContextKind =
+ endian::readNext<uint8_t, llvm::endianness::little, unaligned>(Data);
+ auto NameID =
+ endian::readNext<IdentifierID, llvm::endianness::little, unaligned>(
+ Data);
+ return {CtxID, ContextKind, NameID};
+ }
+
+ hash_value_type ComputeHash(internal_key_type Key) {
+ return static_cast<size_t>(Key.hashValue());
+ }
+
+ static TagInfo readUnversioned(internal_key_type Key, const uint8_t *&Data) {
+ TagInfo Info;
+
+ uint8_t Payload = *Data++;
+ if (Payload & 1)
+ Info.setFlagEnum(Payload & 2);
+ Payload >>= 2;
+ if (Payload > 0)
+ Info.EnumExtensibility =
+ static_cast<EnumExtensibilityKind>((Payload & 0x3) - 1);
+
+ unsigned ImportAsLength =
+ endian::readNext<uint16_t, llvm::endianness::little, unaligned>(Data);
+ if (ImportAsLength > 0) {
+ Info.SwiftImportAs =
+ std::string(reinterpret_cast<const char *>(Data), ImportAsLength - 1);
+ Data += ImportAsLength - 1;
+ }
+ unsigned RetainOpLength =
+ endian::readNext<uint16_t, llvm::endianness::little, unaligned>(Data);
+ if (RetainOpLength > 0) {
+ Info.SwiftRetainOp =
+ std::string(reinterpret_cast<const char *>(Data), RetainOpLength - 1);
+ Data += RetainOpLength - 1;
+ }
+ unsigned ReleaseOpLength =
+ endian::readNext<uint16_t, llvm::endianness::little, unaligned>(Data);
+ if (ReleaseOpLength > 0) {
+ Info.SwiftReleaseOp = std::string(reinterpret_cast<const char *>(Data),
+ ReleaseOpLength - 1);
+ Data += ReleaseOpLength - 1;
+ }
+
+ ReadCommonTypeInfo(Data, Info);
+ return Info;
+ }
+};
+
+/// Used to deserialize the on-disk typedef table.
+class TypedefTableInfo
+ : public VersionedTableInfo<TypedefTableInfo, ContextTableKey,
+ TypedefInfo> {
+public:
+ static internal_key_type ReadKey(const uint8_t *Data, unsigned Length) {
+ auto CtxID =
+ endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Data);
+ auto ContextKind =
+ endian::readNext<uint8_t, llvm::endianness::little, unaligned>(Data);
+ auto nameID =
+ endian::readNext<IdentifierID, llvm::endianness::little, unaligned>(
+ Data);
+ return {CtxID, ContextKind, nameID};
+ }
+
+ hash_value_type ComputeHash(internal_key_type Key) {
+ return static_cast<size_t>(Key.hashValue());
+ }
+
+ static TypedefInfo readUnversioned(internal_key_type Key,
+ const uint8_t *&Data) {
+ TypedefInfo Info;
+
+ uint8_t Payload = *Data++;
+ if (Payload > 0)
+ Info.SwiftWrapper = static_cast<SwiftNewTypeKind>((Payload & 0x3) - 1);
+
+ ReadCommonTypeInfo(Data, Info);
+ return Info;
+ }
+};
+} // end anonymous namespace
+
+class APINotesReader::Implementation {
+public:
+ /// The input buffer for the API notes data.
+ llvm::MemoryBuffer *InputBuffer;
+
+ /// The Swift version to use for filtering.
+ llvm::VersionTuple SwiftVersion;
+
+ /// The name of the module that we read from the control block.
+ std::string ModuleName;
+
+ // The size and modification time of the source file from
+ // which this API notes file was created, if known.
+ std::optional<std::pair<off_t, time_t>> SourceFileSizeAndModTime;
+
+ using SerializedIdentifierTable =
+ llvm::OnDiskIterableChainedHashTable<IdentifierTableInfo>;
+
+ /// The identifier table.
+ std::unique_ptr<SerializedIdentifierTable> IdentifierTable;
+
+ using SerializedObjCContextIDTable =
+ llvm::OnDiskIterableChainedHashTable<ObjCContextIDTableInfo>;
+
+ /// The Objective-C context ID table.
+ std::unique_ptr<SerializedObjCContextIDTable> ObjCContextIDTable;
+
+ using SerializedObjCContextInfoTable =
+ llvm::OnDiskIterableChainedHashTable<ObjCContextInfoTableInfo>;
+
+ /// The Objective-C context info table.
+ std::unique_ptr<SerializedObjCContextInfoTable> ObjCContextInfoTable;
+
+ using SerializedObjCPropertyTable =
+ llvm::OnDiskIterableChainedHashTable<ObjCPropertyTableInfo>;
+
+ /// The Objective-C property table.
+ std::unique_ptr<SerializedObjCPropertyTable> ObjCPropertyTable;
+
+ using SerializedObjCMethodTable =
+ llvm::OnDiskIterableChainedHashTable<ObjCMethodTableInfo>;
+
+ /// The Objective-C method table.
+ std::unique_ptr<SerializedObjCMethodTable> ObjCMethodTable;
+
+ using SerializedObjCSelectorTable =
+ llvm::OnDiskIterableChainedHashTable<ObjCSelectorTableInfo>;
+
+ /// The Objective-C selector table.
+ std::unique_ptr<SerializedObjCSelectorTable> ObjCSelectorTable;
+
+ using SerializedGlobalVariableTable =
+ llvm::OnDiskIterableChainedHashTable<GlobalVariableTableInfo>;
+
+ /// The global variable table.
+ std::unique_ptr<SerializedGlobalVariableTable> GlobalVariableTable;
+
+ using SerializedGlobalFunctionTable =
+ llvm::OnDiskIterableChainedHashTable<GlobalFunctionTableInfo>;
+
+ /// The global function table.
+ std::unique_ptr<SerializedGlobalFunctionTable> GlobalFunctionTable;
+
+ using SerializedEnumConstantTable =
+ llvm::OnDiskIterableChainedHashTable<EnumConstantTableInfo>;
+
+ /// The enumerator table.
+ std::unique_ptr<SerializedEnumConstantTable> EnumConstantTable;
+
+ using SerializedTagTable = llvm::OnDiskIterableChainedHashTable<TagTableInfo>;
+
+ /// The tag table.
+ std::unique_ptr<SerializedTagTable> TagTable;
+
+ using SerializedTypedefTable =
+ llvm::OnDiskIterableChainedHashTable<TypedefTableInfo>;
+
+ /// The typedef table.
+ std::unique_ptr<SerializedTypedefTable> TypedefTable;
+
+ /// Retrieve the identifier ID for the given string, or an empty
+ /// optional if the string is unknown.
+ std::optional<IdentifierID> getIdentifier(llvm::StringRef Str);
+
+ /// Retrieve the selector ID for the given selector, or an empty
+ /// optional if the string is unknown.
+ std::optional<SelectorID> getSelector(ObjCSelectorRef Selector);
+
+ bool readControlBlock(llvm::BitstreamCursor &Cursor,
+ llvm::SmallVectorImpl<uint64_t> &Scratch);
+ bool readIdentifierBlock(llvm::BitstreamCursor &Cursor,
+ llvm::SmallVectorImpl<uint64_t> &Scratch);
+ bool readObjCContextBlock(llvm::BitstreamCursor &Cursor,
+ llvm::SmallVectorImpl<uint64_t> &Scratch);
+ bool readObjCPropertyBlock(llvm::BitstreamCursor &Cursor,
+ llvm::SmallVectorImpl<uint64_t> &Scratch);
+ bool readObjCMethodBlock(llvm::BitstreamCursor &Cursor,
+ llvm::SmallVectorImpl<uint64_t> &Scratch);
+ bool readObjCSelectorBlock(llvm::BitstreamCursor &Cursor,
+ llvm::SmallVectorImpl<uint64_t> &Scratch);
+ bool readGlobalVariableBlock(llvm::BitstreamCursor &Cursor,
+ llvm::SmallVectorImpl<uint64_t> &Scratch);
+ bool readGlobalFunctionBlock(llvm::BitstreamCursor &Cursor,
+ llvm::SmallVectorImpl<uint64_t> &Scratch);
+ bool readEnumConstantBlock(llvm::BitstreamCursor &Cursor,
+ llvm::SmallVectorImpl<uint64_t> &Scratch);
+ bool readTagBlock(llvm::BitstreamCursor &Cursor,
+ llvm::SmallVectorImpl<uint64_t> &Scratch);
+ bool readTypedefBlock(llvm::BitstreamCursor &Cursor,
+ llvm::SmallVectorImpl<uint64_t> &Scratch);
+};
+
+std::optional<IdentifierID>
+APINotesReader::Implementation::getIdentifier(llvm::StringRef Str) {
+ if (!IdentifierTable)
+ return std::nullopt;
+
+ if (Str.empty())
+ return IdentifierID(0);
+
+ auto Known = IdentifierTable->find(Str);
+ if (Known == IdentifierTable->end())
+ return std::nullopt;
+
+ return *Known;
+}
+
+std::optional<SelectorID>
+APINotesReader::Implementation::getSelector(ObjCSelectorRef Selector) {
+ if (!ObjCSelectorTable || !IdentifierTable)
+ return std::nullopt;
+
+ // Translate the identifiers.
+ StoredObjCSelector Key;
+ Key.NumArgs = Selector.NumArgs;
+ for (auto Ident : Selector.Identifiers) {
+ if (auto IdentID = getIdentifier(Ident)) {
+ Key.Identifiers.push_back(*IdentID);
+ } else {
+ return std::nullopt;
+ }
+ }
+
+ auto Known = ObjCSelectorTable->find(Key);
+ if (Known == ObjCSelectorTable->end())
+ return std::nullopt;
+
+ return *Known;
+}
+
+bool APINotesReader::Implementation::readControlBlock(
+ llvm::BitstreamCursor &Cursor, llvm::SmallVectorImpl<uint64_t> &Scratch) {
+ if (Cursor.EnterSubBlock(CONTROL_BLOCK_ID))
+ return true;
+
+ bool SawMetadata = false;
+
+ llvm::Expected<llvm::BitstreamEntry> MaybeNext = Cursor.advance();
+ if (!MaybeNext) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeNext.takeError());
+ return false;
+ }
+ llvm::BitstreamEntry Next = MaybeNext.get();
+
+ while (Next.Kind != llvm::BitstreamEntry::EndBlock) {
+ if (Next.Kind == llvm::BitstreamEntry::Error)
+ return true;
+
+ if (Next.Kind == llvm::BitstreamEntry::SubBlock) {
+ // Unknown metadata sub-block, possibly for use by a future version of the
+ // API notes format.
+ if (Cursor.SkipBlock())
+ return true;
+
+ MaybeNext = Cursor.advance();
+ if (!MaybeNext) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeNext.takeError());
+ return false;
+ }
+ Next = MaybeNext.get();
+ continue;
+ }
+
+ Scratch.clear();
+ llvm::StringRef BlobData;
+ llvm::Expected<unsigned> MaybeKind =
+ Cursor.readRecord(Next.ID, Scratch, &BlobData);
+ if (!MaybeKind) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeKind.takeError());
+ return false;
+ }
+ unsigned Kind = MaybeKind.get();
+
+ switch (Kind) {
+ case control_block::METADATA:
+ // Already saw metadata.
+ if (SawMetadata)
+ return true;
+
+ if (Scratch[0] != VERSION_MAJOR || Scratch[1] != VERSION_MINOR)
+ return true;
+
+ SawMetadata = true;
+ break;
+
+ case control_block::MODULE_NAME:
+ ModuleName = BlobData.str();
+ break;
+
+ case control_block::MODULE_OPTIONS:
+ break;
+
+ case control_block::SOURCE_FILE:
+ SourceFileSizeAndModTime = {Scratch[0], Scratch[1]};
+ break;
+
+ default:
+ // Unknown metadata record, possibly for use by a future version of the
+ // module format.
+ break;
+ }
+
+ MaybeNext = Cursor.advance();
+ if (!MaybeNext) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeNext.takeError());
+ return false;
+ }
+ Next = MaybeNext.get();
+ }
+
+ return !SawMetadata;
+}
+
+bool APINotesReader::Implementation::readIdentifierBlock(
+ llvm::BitstreamCursor &Cursor, llvm::SmallVectorImpl<uint64_t> &Scratch) {
+ if (Cursor.EnterSubBlock(IDENTIFIER_BLOCK_ID))
+ return true;
+
+ llvm::Expected<llvm::BitstreamEntry> MaybeNext = Cursor.advance();
+ if (!MaybeNext) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeNext.takeError());
+ return false;
+ }
+ llvm::BitstreamEntry Next = MaybeNext.get();
+
+ while (Next.Kind != llvm::BitstreamEntry::EndBlock) {
+ if (Next.Kind == llvm::BitstreamEntry::Error)
+ return true;
+
+ if (Next.Kind == llvm::BitstreamEntry::SubBlock) {
+ // Unknown sub-block, possibly for use by a future version of the
+ // API notes format.
+ if (Cursor.SkipBlock())
+ return true;
+
+ MaybeNext = Cursor.advance();
+ if (!MaybeNext) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeNext.takeError());
+ return false;
+ }
+ Next = MaybeNext.get();
+ continue;
+ }
+
+ Scratch.clear();
+ llvm::StringRef BlobData;
+ llvm::Expected<unsigned> MaybeKind =
+ Cursor.readRecord(Next.ID, Scratch, &BlobData);
+ if (!MaybeKind) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeKind.takeError());
+ return false;
+ }
+ unsigned Kind = MaybeKind.get();
+ switch (Kind) {
+ case identifier_block::IDENTIFIER_DATA: {
+ // Already saw identifier table.
+ if (IdentifierTable)
+ return true;
+
+ uint32_t tableOffset;
+ identifier_block::IdentifierDataLayout::readRecord(Scratch, tableOffset);
+ auto base = reinterpret_cast<const uint8_t *>(BlobData.data());
+
+ IdentifierTable.reset(SerializedIdentifierTable::Create(
+ base + tableOffset, base + sizeof(uint32_t), base));
+ break;
+ }
+
+ default:
+ // Unknown record, possibly for use by a future version of the
+ // module format.
+ break;
+ }
+
+ MaybeNext = Cursor.advance();
+ if (!MaybeNext) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeNext.takeError());
+ return false;
+ }
+ Next = MaybeNext.get();
+ }
+
+ return false;
+}
+
+bool APINotesReader::Implementation::readObjCContextBlock(
+ llvm::BitstreamCursor &Cursor, llvm::SmallVectorImpl<uint64_t> &Scratch) {
+ if (Cursor.EnterSubBlock(OBJC_CONTEXT_BLOCK_ID))
+ return true;
+
+ llvm::Expected<llvm::BitstreamEntry> MaybeNext = Cursor.advance();
+ if (!MaybeNext) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeNext.takeError());
+ return false;
+ }
+ llvm::BitstreamEntry Next = MaybeNext.get();
+
+ while (Next.Kind != llvm::BitstreamEntry::EndBlock) {
+ if (Next.Kind == llvm::BitstreamEntry::Error)
+ return true;
+
+ if (Next.Kind == llvm::BitstreamEntry::SubBlock) {
+ // Unknown sub-block, possibly for use by a future version of the
+ // API notes format.
+ if (Cursor.SkipBlock())
+ return true;
+
+ MaybeNext = Cursor.advance();
+ if (!MaybeNext) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeNext.takeError());
+ return false;
+ }
+ Next = MaybeNext.get();
+ continue;
+ }
+
+ Scratch.clear();
+ llvm::StringRef BlobData;
+ llvm::Expected<unsigned> MaybeKind =
+ Cursor.readRecord(Next.ID, Scratch, &BlobData);
+ if (!MaybeKind) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeKind.takeError());
+ return false;
+ }
+ unsigned Kind = MaybeKind.get();
+ switch (Kind) {
+ case objc_context_block::OBJC_CONTEXT_ID_DATA: {
+ // Already saw Objective-C context ID table.
+ if (ObjCContextIDTable)
+ return true;
+
+ uint32_t tableOffset;
+ objc_context_block::ObjCContextIDLayout::readRecord(Scratch, tableOffset);
+ auto base = reinterpret_cast<const uint8_t *>(BlobData.data());
+
+ ObjCContextIDTable.reset(SerializedObjCContextIDTable::Create(
+ base + tableOffset, base + sizeof(uint32_t), base));
+ break;
+ }
+
+ case objc_context_block::OBJC_CONTEXT_INFO_DATA: {
+ // Already saw Objective-C context info table.
+ if (ObjCContextInfoTable)
+ return true;
+
+ uint32_t tableOffset;
+ objc_context_block::ObjCContextInfoLayout::readRecord(Scratch,
+ tableOffset);
+ auto base = reinterpret_cast<const uint8_t *>(BlobData.data());
+
+ ObjCContextInfoTable.reset(SerializedObjCContextInfoTable::Create(
+ base + tableOffset, base + sizeof(uint32_t), base));
+ break;
+ }
+
+ default:
+ // Unknown record, possibly for use by a future version of the
+ // module format.
+ break;
+ }
+
+ MaybeNext = Cursor.advance();
+ if (!MaybeNext) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeNext.takeError());
+ return false;
+ }
+ Next = MaybeNext.get();
+ }
+
+ return false;
+}
+
+bool APINotesReader::Implementation::readObjCPropertyBlock(
+ llvm::BitstreamCursor &Cursor, llvm::SmallVectorImpl<uint64_t> &Scratch) {
+ if (Cursor.EnterSubBlock(OBJC_PROPERTY_BLOCK_ID))
+ return true;
+
+ llvm::Expected<llvm::BitstreamEntry> MaybeNext = Cursor.advance();
+ if (!MaybeNext) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeNext.takeError());
+ return false;
+ }
+ llvm::BitstreamEntry Next = MaybeNext.get();
+
+ while (Next.Kind != llvm::BitstreamEntry::EndBlock) {
+ if (Next.Kind == llvm::BitstreamEntry::Error)
+ return true;
+
+ if (Next.Kind == llvm::BitstreamEntry::SubBlock) {
+ // Unknown sub-block, possibly for use by a future version of the
+ // API notes format.
+ if (Cursor.SkipBlock())
+ return true;
+
+ MaybeNext = Cursor.advance();
+ if (!MaybeNext) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeNext.takeError());
+ return false;
+ }
+ Next = MaybeNext.get();
+ continue;
+ }
+
+ Scratch.clear();
+ llvm::StringRef BlobData;
+ llvm::Expected<unsigned> MaybeKind =
+ Cursor.readRecord(Next.ID, Scratch, &BlobData);
+ if (!MaybeKind) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeKind.takeError());
+ return false;
+ }
+ unsigned Kind = MaybeKind.get();
+ switch (Kind) {
+ case objc_property_block::OBJC_PROPERTY_DATA: {
+ // Already saw Objective-C property table.
+ if (ObjCPropertyTable)
+ return true;
+
+ uint32_t tableOffset;
+ objc_property_block::ObjCPropertyDataLayout::readRecord(Scratch,
+ tableOffset);
+ auto base = reinterpret_cast<const uint8_t *>(BlobData.data());
+
+ ObjCPropertyTable.reset(SerializedObjCPropertyTable::Create(
+ base + tableOffset, base + sizeof(uint32_t), base));
+ break;
+ }
+
+ default:
+ // Unknown record, possibly for use by a future version of the
+ // module format.
+ break;
+ }
+
+ MaybeNext = Cursor.advance();
+ if (!MaybeNext) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeNext.takeError());
+ return false;
+ }
+ Next = MaybeNext.get();
+ }
+
+ return false;
+}
+
+bool APINotesReader::Implementation::readObjCMethodBlock(
+ llvm::BitstreamCursor &Cursor, llvm::SmallVectorImpl<uint64_t> &Scratch) {
+ if (Cursor.EnterSubBlock(OBJC_METHOD_BLOCK_ID))
+ return true;
+
+ llvm::Expected<llvm::BitstreamEntry> MaybeNext = Cursor.advance();
+ if (!MaybeNext) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeNext.takeError());
+ return false;
+ }
+ llvm::BitstreamEntry Next = MaybeNext.get();
+ while (Next.Kind != llvm::BitstreamEntry::EndBlock) {
+ if (Next.Kind == llvm::BitstreamEntry::Error)
+ return true;
+
+ if (Next.Kind == llvm::BitstreamEntry::SubBlock) {
+ // Unknown sub-block, possibly for use by a future version of the
+ // API notes format.
+ if (Cursor.SkipBlock())
+ return true;
+
+ MaybeNext = Cursor.advance();
+ if (!MaybeNext) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeNext.takeError());
+ return false;
+ }
+ Next = MaybeNext.get();
+ continue;
+ }
+
+ Scratch.clear();
+ llvm::StringRef BlobData;
+ llvm::Expected<unsigned> MaybeKind =
+ Cursor.readRecord(Next.ID, Scratch, &BlobData);
+ if (!MaybeKind) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeKind.takeError());
+ return false;
+ }
+ unsigned Kind = MaybeKind.get();
+ switch (Kind) {
+ case objc_method_block::OBJC_METHOD_DATA: {
+ // Already saw Objective-C method table.
+ if (ObjCMethodTable)
+ return true;
+
+ uint32_t tableOffset;
+ objc_method_block::ObjCMethodDataLayout::readRecord(Scratch, tableOffset);
+ auto base = reinterpret_cast<const uint8_t *>(BlobData.data());
+
+ ObjCMethodTable.reset(SerializedObjCMethodTable::Create(
+ base + tableOffset, base + sizeof(uint32_t), base));
+ break;
+ }
+
+ default:
+ // Unknown record, possibly for use by a future version of the
+ // module format.
+ break;
+ }
+
+ MaybeNext = Cursor.advance();
+ if (!MaybeNext) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeNext.takeError());
+ return false;
+ }
+ Next = MaybeNext.get();
+ }
+
+ return false;
+}
+
+bool APINotesReader::Implementation::readObjCSelectorBlock(
+ llvm::BitstreamCursor &Cursor, llvm::SmallVectorImpl<uint64_t> &Scratch) {
+ if (Cursor.EnterSubBlock(OBJC_SELECTOR_BLOCK_ID))
+ return true;
+
+ llvm::Expected<llvm::BitstreamEntry> MaybeNext = Cursor.advance();
+ if (!MaybeNext) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeNext.takeError());
+ return false;
+ }
+ llvm::BitstreamEntry Next = MaybeNext.get();
+ while (Next.Kind != llvm::BitstreamEntry::EndBlock) {
+ if (Next.Kind == llvm::BitstreamEntry::Error)
+ return true;
+
+ if (Next.Kind == llvm::BitstreamEntry::SubBlock) {
+ // Unknown sub-block, possibly for use by a future version of the
+ // API notes format.
+ if (Cursor.SkipBlock())
+ return true;
+
+ MaybeNext = Cursor.advance();
+ if (!MaybeNext) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeNext.takeError());
+ return false;
+ }
+ Next = MaybeNext.get();
+ continue;
+ }
+
+ Scratch.clear();
+ llvm::StringRef BlobData;
+ llvm::Expected<unsigned> MaybeKind =
+ Cursor.readRecord(Next.ID, Scratch, &BlobData);
+ if (!MaybeKind) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeKind.takeError());
+ return false;
+ }
+ unsigned Kind = MaybeKind.get();
+ switch (Kind) {
+ case objc_selector_block::OBJC_SELECTOR_DATA: {
+ // Already saw Objective-C selector table.
+ if (ObjCSelectorTable)
+ return true;
+
+ uint32_t tableOffset;
+ objc_selector_block::ObjCSelectorDataLayout::readRecord(Scratch,
+ tableOffset);
+ auto base = reinterpret_cast<const uint8_t *>(BlobData.data());
+
+ ObjCSelectorTable.reset(SerializedObjCSelectorTable::Create(
+ base + tableOffset, base + sizeof(uint32_t), base));
+ break;
+ }
+
+ default:
+ // Unknown record, possibly for use by a future version of the
+ // module format.
+ break;
+ }
+
+ MaybeNext = Cursor.advance();
+ if (!MaybeNext) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeNext.takeError());
+ return false;
+ }
+ Next = MaybeNext.get();
+ }
+
+ return false;
+}
+
+bool APINotesReader::Implementation::readGlobalVariableBlock(
+ llvm::BitstreamCursor &Cursor, llvm::SmallVectorImpl<uint64_t> &Scratch) {
+ if (Cursor.EnterSubBlock(GLOBAL_VARIABLE_BLOCK_ID))
+ return true;
+
+ llvm::Expected<llvm::BitstreamEntry> MaybeNext = Cursor.advance();
+ if (!MaybeNext) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeNext.takeError());
+ return false;
+ }
+ llvm::BitstreamEntry Next = MaybeNext.get();
+ while (Next.Kind != llvm::BitstreamEntry::EndBlock) {
+ if (Next.Kind == llvm::BitstreamEntry::Error)
+ return true;
+
+ if (Next.Kind == llvm::BitstreamEntry::SubBlock) {
+ // Unknown sub-block, possibly for use by a future version of the
+ // API notes format.
+ if (Cursor.SkipBlock())
+ return true;
+
+ MaybeNext = Cursor.advance();
+ if (!MaybeNext) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeNext.takeError());
+ return false;
+ }
+ Next = MaybeNext.get();
+ continue;
+ }
+
+ Scratch.clear();
+ llvm::StringRef BlobData;
+ llvm::Expected<unsigned> MaybeKind =
+ Cursor.readRecord(Next.ID, Scratch, &BlobData);
+ if (!MaybeKind) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeKind.takeError());
+ return false;
+ }
+ unsigned Kind = MaybeKind.get();
+ switch (Kind) {
+ case global_variable_block::GLOBAL_VARIABLE_DATA: {
+ // Already saw global variable table.
+ if (GlobalVariableTable)
+ return true;
+
+ uint32_t tableOffset;
+ global_variable_block::GlobalVariableDataLayout::readRecord(Scratch,
+ tableOffset);
+ auto base = reinterpret_cast<const uint8_t *>(BlobData.data());
+
+ GlobalVariableTable.reset(SerializedGlobalVariableTable::Create(
+ base + tableOffset, base + sizeof(uint32_t), base));
+ break;
+ }
+
+ default:
+ // Unknown record, possibly for use by a future version of the
+ // module format.
+ break;
+ }
+
+ MaybeNext = Cursor.advance();
+ if (!MaybeNext) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeNext.takeError());
+ return false;
+ }
+ Next = MaybeNext.get();
+ }
+
+ return false;
+}
+
+bool APINotesReader::Implementation::readGlobalFunctionBlock(
+ llvm::BitstreamCursor &Cursor, llvm::SmallVectorImpl<uint64_t> &Scratch) {
+ if (Cursor.EnterSubBlock(GLOBAL_FUNCTION_BLOCK_ID))
+ return true;
+
+ llvm::Expected<llvm::BitstreamEntry> MaybeNext = Cursor.advance();
+ if (!MaybeNext) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeNext.takeError());
+ return false;
+ }
+ llvm::BitstreamEntry Next = MaybeNext.get();
+ while (Next.Kind != llvm::BitstreamEntry::EndBlock) {
+ if (Next.Kind == llvm::BitstreamEntry::Error)
+ return true;
+
+ if (Next.Kind == llvm::BitstreamEntry::SubBlock) {
+ // Unknown sub-block, possibly for use by a future version of the
+ // API notes format.
+ if (Cursor.SkipBlock())
+ return true;
+
+ MaybeNext = Cursor.advance();
+ if (!MaybeNext) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeNext.takeError());
+ return false;
+ }
+ Next = MaybeNext.get();
+ continue;
+ }
+
+ Scratch.clear();
+ llvm::StringRef BlobData;
+ llvm::Expected<unsigned> MaybeKind =
+ Cursor.readRecord(Next.ID, Scratch, &BlobData);
+ if (!MaybeKind) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeKind.takeError());
+ return false;
+ }
+ unsigned Kind = MaybeKind.get();
+ switch (Kind) {
+ case global_function_block::GLOBAL_FUNCTION_DATA: {
+ // Already saw global function table.
+ if (GlobalFunctionTable)
+ return true;
+
+ uint32_t tableOffset;
+ global_function_block::GlobalFunctionDataLayout::readRecord(Scratch,
+ tableOffset);
+ auto base = reinterpret_cast<const uint8_t *>(BlobData.data());
+
+ GlobalFunctionTable.reset(SerializedGlobalFunctionTable::Create(
+ base + tableOffset, base + sizeof(uint32_t), base));
+ break;
+ }
+
+ default:
+ // Unknown record, possibly for use by a future version of the
+ // module format.
+ break;
+ }
+
+ MaybeNext = Cursor.advance();
+ if (!MaybeNext) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeNext.takeError());
+ return false;
+ }
+ Next = MaybeNext.get();
+ }
+
+ return false;
+}
+
+bool APINotesReader::Implementation::readEnumConstantBlock(
+ llvm::BitstreamCursor &Cursor, llvm::SmallVectorImpl<uint64_t> &Scratch) {
+ if (Cursor.EnterSubBlock(ENUM_CONSTANT_BLOCK_ID))
+ return true;
+
+ llvm::Expected<llvm::BitstreamEntry> MaybeNext = Cursor.advance();
+ if (!MaybeNext) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeNext.takeError());
+ return false;
+ }
+ llvm::BitstreamEntry Next = MaybeNext.get();
+ while (Next.Kind != llvm::BitstreamEntry::EndBlock) {
+ if (Next.Kind == llvm::BitstreamEntry::Error)
+ return true;
+
+ if (Next.Kind == llvm::BitstreamEntry::SubBlock) {
+ // Unknown sub-block, possibly for use by a future version of the
+ // API notes format.
+ if (Cursor.SkipBlock())
+ return true;
+
+ MaybeNext = Cursor.advance();
+ if (!MaybeNext) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeNext.takeError());
+ return false;
+ }
+ Next = MaybeNext.get();
+ continue;
+ }
+
+ Scratch.clear();
+ llvm::StringRef BlobData;
+ llvm::Expected<unsigned> MaybeKind =
+ Cursor.readRecord(Next.ID, Scratch, &BlobData);
+ if (!MaybeKind) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeKind.takeError());
+ return false;
+ }
+ unsigned Kind = MaybeKind.get();
+ switch (Kind) {
+ case enum_constant_block::ENUM_CONSTANT_DATA: {
+ // Already saw enumerator table.
+ if (EnumConstantTable)
+ return true;
+
+ uint32_t tableOffset;
+ enum_constant_block::EnumConstantDataLayout::readRecord(Scratch,
+ tableOffset);
+ auto base = reinterpret_cast<const uint8_t *>(BlobData.data());
+
+ EnumConstantTable.reset(SerializedEnumConstantTable::Create(
+ base + tableOffset, base + sizeof(uint32_t), base));
+ break;
+ }
+
+ default:
+ // Unknown record, possibly for use by a future version of the
+ // module format.
+ break;
+ }
+
+ MaybeNext = Cursor.advance();
+ if (!MaybeNext) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeNext.takeError());
+ return false;
+ }
+ Next = MaybeNext.get();
+ }
+
+ return false;
+}
+
+bool APINotesReader::Implementation::readTagBlock(
+ llvm::BitstreamCursor &Cursor, llvm::SmallVectorImpl<uint64_t> &Scratch) {
+ if (Cursor.EnterSubBlock(TAG_BLOCK_ID))
+ return true;
+
+ llvm::Expected<llvm::BitstreamEntry> MaybeNext = Cursor.advance();
+ if (!MaybeNext) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeNext.takeError());
+ return false;
+ }
+ llvm::BitstreamEntry Next = MaybeNext.get();
+ while (Next.Kind != llvm::BitstreamEntry::EndBlock) {
+ if (Next.Kind == llvm::BitstreamEntry::Error)
+ return true;
+
+ if (Next.Kind == llvm::BitstreamEntry::SubBlock) {
+ // Unknown sub-block, possibly for use by a future version of the
+ // API notes format.
+ if (Cursor.SkipBlock())
+ return true;
+
+ MaybeNext = Cursor.advance();
+ if (!MaybeNext) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeNext.takeError());
+ return false;
+ }
+ Next = MaybeNext.get();
+ continue;
+ }
+
+ Scratch.clear();
+ llvm::StringRef BlobData;
+ llvm::Expected<unsigned> MaybeKind =
+ Cursor.readRecord(Next.ID, Scratch, &BlobData);
+ if (!MaybeKind) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeKind.takeError());
+ return false;
+ }
+ unsigned Kind = MaybeKind.get();
+ switch (Kind) {
+ case tag_block::TAG_DATA: {
+ // Already saw tag table.
+ if (TagTable)
+ return true;
+
+ uint32_t tableOffset;
+ tag_block::TagDataLayout::readRecord(Scratch, tableOffset);
+ auto base = reinterpret_cast<const uint8_t *>(BlobData.data());
+
+ TagTable.reset(SerializedTagTable::Create(base + tableOffset,
+ base + sizeof(uint32_t), base));
+ break;
+ }
+
+ default:
+ // Unknown record, possibly for use by a future version of the
+ // module format.
+ break;
+ }
+
+ MaybeNext = Cursor.advance();
+ if (!MaybeNext) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeNext.takeError());
+ return false;
+ }
+ Next = MaybeNext.get();
+ }
+
+ return false;
+}
+
+bool APINotesReader::Implementation::readTypedefBlock(
+ llvm::BitstreamCursor &Cursor, llvm::SmallVectorImpl<uint64_t> &Scratch) {
+ if (Cursor.EnterSubBlock(TYPEDEF_BLOCK_ID))
+ return true;
+
+ llvm::Expected<llvm::BitstreamEntry> MaybeNext = Cursor.advance();
+ if (!MaybeNext) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeNext.takeError());
+ return false;
+ }
+ llvm::BitstreamEntry Next = MaybeNext.get();
+ while (Next.Kind != llvm::BitstreamEntry::EndBlock) {
+ if (Next.Kind == llvm::BitstreamEntry::Error)
+ return true;
+
+ if (Next.Kind == llvm::BitstreamEntry::SubBlock) {
+ // Unknown sub-block, possibly for use by a future version of the
+ // API notes format.
+ if (Cursor.SkipBlock())
+ return true;
+
+ MaybeNext = Cursor.advance();
+ if (!MaybeNext) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeNext.takeError());
+ return false;
+ }
+ Next = MaybeNext.get();
+ continue;
+ }
+
+ Scratch.clear();
+ llvm::StringRef BlobData;
+ llvm::Expected<unsigned> MaybeKind =
+ Cursor.readRecord(Next.ID, Scratch, &BlobData);
+ if (!MaybeKind) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeKind.takeError());
+ return false;
+ }
+ unsigned Kind = MaybeKind.get();
+ switch (Kind) {
+ case typedef_block::TYPEDEF_DATA: {
+ // Already saw typedef table.
+ if (TypedefTable)
+ return true;
+
+ uint32_t tableOffset;
+ typedef_block::TypedefDataLayout::readRecord(Scratch, tableOffset);
+ auto base = reinterpret_cast<const uint8_t *>(BlobData.data());
+
+ TypedefTable.reset(SerializedTypedefTable::Create(
+ base + tableOffset, base + sizeof(uint32_t), base));
+ break;
+ }
+
+ default:
+ // Unknown record, possibly for use by a future version of the
+ // module format.
+ break;
+ }
+
+ MaybeNext = Cursor.advance();
+ if (!MaybeNext) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeNext.takeError());
+ return false;
+ }
+ Next = MaybeNext.get();
+ }
+
+ return false;
+}
+
+APINotesReader::APINotesReader(llvm::MemoryBuffer *InputBuffer,
+ llvm::VersionTuple SwiftVersion, bool &Failed)
+ : Implementation(new class Implementation) {
+ Failed = false;
+
+ // Initialize the input buffer.
+ Implementation->InputBuffer = InputBuffer;
+ Implementation->SwiftVersion = SwiftVersion;
+ llvm::BitstreamCursor Cursor(*Implementation->InputBuffer);
+
+ // Validate signature.
+ for (auto byte : API_NOTES_SIGNATURE) {
+ if (Cursor.AtEndOfStream()) {
+ Failed = true;
+ return;
+ }
+ if (llvm::Expected<llvm::SimpleBitstreamCursor::word_t> maybeRead =
+ Cursor.Read(8)) {
+ if (maybeRead.get() != byte) {
+ Failed = true;
+ return;
+ }
+ } else {
+ // FIXME this drops the error on the floor.
+ consumeError(maybeRead.takeError());
+ Failed = true;
+ return;
+ }
+ }
+
+ // Look at all of the blocks.
+ bool HasValidControlBlock = false;
+ llvm::SmallVector<uint64_t, 64> Scratch;
+ while (!Cursor.AtEndOfStream()) {
+ llvm::Expected<llvm::BitstreamEntry> MaybeTopLevelEntry = Cursor.advance();
+ if (!MaybeTopLevelEntry) {
+ // FIXME this drops the error on the floor.
+ consumeError(MaybeTopLevelEntry.takeError());
+ Failed = true;
+ return;
+ }
+ llvm::BitstreamEntry TopLevelEntry = MaybeTopLevelEntry.get();
+
+ if (TopLevelEntry.Kind != llvm::BitstreamEntry::SubBlock)
+ break;
+
+ switch (TopLevelEntry.ID) {
+ case llvm::bitc::BLOCKINFO_BLOCK_ID:
+ if (!Cursor.ReadBlockInfoBlock()) {
+ Failed = true;
+ break;
+ }
+ break;
+
+ case CONTROL_BLOCK_ID:
+ // Only allow a single control block.
+ if (HasValidControlBlock ||
+ Implementation->readControlBlock(Cursor, Scratch)) {
+ Failed = true;
+ return;
+ }
+
+ HasValidControlBlock = true;
+ break;
+
+ case IDENTIFIER_BLOCK_ID:
+ if (!HasValidControlBlock ||
+ Implementation->readIdentifierBlock(Cursor, Scratch)) {
+ Failed = true;
+ return;
+ }
+ break;
+
+ case OBJC_CONTEXT_BLOCK_ID:
+ if (!HasValidControlBlock ||
+ Implementation->readObjCContextBlock(Cursor, Scratch)) {
+ Failed = true;
+ return;
+ }
+
+ break;
+
+ case OBJC_PROPERTY_BLOCK_ID:
+ if (!HasValidControlBlock ||
+ Implementation->readObjCPropertyBlock(Cursor, Scratch)) {
+ Failed = true;
+ return;
+ }
+ break;
+
+ case OBJC_METHOD_BLOCK_ID:
+ if (!HasValidControlBlock ||
+ Implementation->readObjCMethodBlock(Cursor, Scratch)) {
+ Failed = true;
+ return;
+ }
+ break;
+
+ case OBJC_SELECTOR_BLOCK_ID:
+ if (!HasValidControlBlock ||
+ Implementation->readObjCSelectorBlock(Cursor, Scratch)) {
+ Failed = true;
+ return;
+ }
+ break;
+
+ case GLOBAL_VARIABLE_BLOCK_ID:
+ if (!HasValidControlBlock ||
+ Implementation->readGlobalVariableBlock(Cursor, Scratch)) {
+ Failed = true;
+ return;
+ }
+ break;
+
+ case GLOBAL_FUNCTION_BLOCK_ID:
+ if (!HasValidControlBlock ||
+ Implementation->readGlobalFunctionBlock(Cursor, Scratch)) {
+ Failed = true;
+ return;
+ }
+ break;
+
+ case ENUM_CONSTANT_BLOCK_ID:
+ if (!HasValidControlBlock ||
+ Implementation->readEnumConstantBlock(Cursor, Scratch)) {
+ Failed = true;
+ return;
+ }
+ break;
+
+ case TAG_BLOCK_ID:
+ if (!HasValidControlBlock ||
+ Implementation->readTagBlock(Cursor, Scratch)) {
+ Failed = true;
+ return;
+ }
+ break;
+
+ case TYPEDEF_BLOCK_ID:
+ if (!HasValidControlBlock ||
+ Implementation->readTypedefBlock(Cursor, Scratch)) {
+ Failed = true;
+ return;
+ }
+ break;
+
+ default:
+ // Unknown top-level block, possibly for use by a future version of the
+ // module format.
+ if (Cursor.SkipBlock()) {
+ Failed = true;
+ return;
+ }
+ break;
+ }
+ }
+
+ if (!Cursor.AtEndOfStream()) {
+ Failed = true;
+ return;
+ }
+}
+
+APINotesReader::~APINotesReader() { delete Implementation->InputBuffer; }
+
+std::unique_ptr<APINotesReader>
+APINotesReader::Create(std::unique_ptr<llvm::MemoryBuffer> InputBuffer,
+ llvm::VersionTuple SwiftVersion) {
+ bool Failed = false;
+ std::unique_ptr<APINotesReader> Reader(
+ new APINotesReader(InputBuffer.release(), SwiftVersion, Failed));
+ if (Failed)
+ return nullptr;
+
+ return Reader;
+}
+
+template <typename T>
+APINotesReader::VersionedInfo<T>::VersionedInfo(
+ llvm::VersionTuple Version,
+ llvm::SmallVector<std::pair<llvm::VersionTuple, T>, 1> Results)
+ : Results(std::move(Results)) {
+
+ assert(!Results.empty());
+ assert(std::is_sorted(
+ Results.begin(), Results.end(),
+ [](const std::pair<llvm::VersionTuple, T> &left,
+ const std::pair<llvm::VersionTuple, T> &right) -> bool {
+ assert(left.first != right.first && "two entries for the same version");
+ return left.first < right.first;
+ }));
+
+ Selected = std::nullopt;
+ for (unsigned i = 0, n = Results.size(); i != n; ++i) {
+ if (!Version.empty() && Results[i].first >= Version) {
+ // If the current version is "4", then entries for 4 are better than
+ // entries for 5, but both are valid. Because entries are sorted, we get
+ // that behavior by picking the first match.
+ Selected = i;
+ break;
+ }
+ }
+
+ // If we didn't find a match but we have an unversioned result, use the
+ // unversioned result. This will always be the first entry because we encode
+ // it as version 0.
+ if (!Selected && Results[0].first.empty())
+ Selected = 0;
+}
+
+auto APINotesReader::lookupObjCClassID(llvm::StringRef Name)
+ -> std::optional<ContextID> {
+ if (!Implementation->ObjCContextIDTable)
+ return std::nullopt;
+
+ std::optional<IdentifierID> ClassID = Implementation->getIdentifier(Name);
+ if (!ClassID)
+ return std::nullopt;
+
+ // ObjC classes can't be declared in C++ namespaces, so use -1 as the global
+ // context.
+ auto KnownID = Implementation->ObjCContextIDTable->find(
+ ContextTableKey(-1, (uint8_t)ContextKind::ObjCClass, *ClassID));
+ if (KnownID == Implementation->ObjCContextIDTable->end())
+ return std::nullopt;
+
+ return ContextID(*KnownID);
+}
+
+auto APINotesReader::lookupObjCClassInfo(llvm::StringRef Name)
+ -> VersionedInfo<ObjCContextInfo> {
+ if (!Implementation->ObjCContextInfoTable)
+ return std::nullopt;
+
+ std::optional<ContextID> CtxID = lookupObjCClassID(Name);
+ if (!CtxID)
+ return std::nullopt;
+
+ auto KnownInfo = Implementation->ObjCContextInfoTable->find(CtxID->Value);
+ if (KnownInfo == Implementation->ObjCContextInfoTable->end())
+ return std::nullopt;
+
+ return {Implementation->SwiftVersion, *KnownInfo};
+}
+
+auto APINotesReader::lookupObjCProtocolID(llvm::StringRef Name)
+ -> std::optional<ContextID> {
+ if (!Implementation->ObjCContextIDTable)
+ return std::nullopt;
+
+ std::optional<IdentifierID> classID = Implementation->getIdentifier(Name);
+ if (!classID)
+ return std::nullopt;
+
+ // ObjC classes can't be declared in C++ namespaces, so use -1 as the global
+ // context.
+ auto KnownID = Implementation->ObjCContextIDTable->find(
+ ContextTableKey(-1, (uint8_t)ContextKind::ObjCProtocol, *classID));
+ if (KnownID == Implementation->ObjCContextIDTable->end())
+ return std::nullopt;
+
+ return ContextID(*KnownID);
+}
+
+auto APINotesReader::lookupObjCProtocolInfo(llvm::StringRef Name)
+ -> VersionedInfo<ObjCContextInfo> {
+ if (!Implementation->ObjCContextInfoTable)
+ return std::nullopt;
+
+ std::optional<ContextID> CtxID = lookupObjCProtocolID(Name);
+ if (!CtxID)
+ return std::nullopt;
+
+ auto KnownInfo = Implementation->ObjCContextInfoTable->find(CtxID->Value);
+ if (KnownInfo == Implementation->ObjCContextInfoTable->end())
+ return std::nullopt;
+
+ return {Implementation->SwiftVersion, *KnownInfo};
+}
+
+auto APINotesReader::lookupObjCProperty(ContextID CtxID, llvm::StringRef Name,
+ bool IsInstance)
+ -> VersionedInfo<ObjCPropertyInfo> {
+ if (!Implementation->ObjCPropertyTable)
+ return std::nullopt;
+
+ std::optional<IdentifierID> PropertyID = Implementation->getIdentifier(Name);
+ if (!PropertyID)
+ return std::nullopt;
+
+ auto Known = Implementation->ObjCPropertyTable->find(
+ std::make_tuple(CtxID.Value, *PropertyID, (char)IsInstance));
+ if (Known == Implementation->ObjCPropertyTable->end())
+ return std::nullopt;
+
+ return {Implementation->SwiftVersion, *Known};
+}
+
+auto APINotesReader::lookupObjCMethod(ContextID CtxID, ObjCSelectorRef Selector,
+ bool IsInstanceMethod)
+ -> VersionedInfo<ObjCMethodInfo> {
+ if (!Implementation->ObjCMethodTable)
+ return std::nullopt;
+
+ std::optional<SelectorID> SelID = Implementation->getSelector(Selector);
+ if (!SelID)
+ return std::nullopt;
+
+ auto Known = Implementation->ObjCMethodTable->find(
+ ObjCMethodTableInfo::internal_key_type{CtxID.Value, *SelID,
+ IsInstanceMethod});
+ if (Known == Implementation->ObjCMethodTable->end())
+ return std::nullopt;
+
+ return {Implementation->SwiftVersion, *Known};
+}
+
+auto APINotesReader::lookupGlobalVariable(llvm::StringRef Name,
+ std::optional<Context> Ctx)
+ -> VersionedInfo<GlobalVariableInfo> {
+ if (!Implementation->GlobalVariableTable)
+ return std::nullopt;
+
+ std::optional<IdentifierID> NameID = Implementation->getIdentifier(Name);
+ if (!NameID)
+ return std::nullopt;
+
+ ContextTableKey Key(Ctx, *NameID);
+
+ auto Known = Implementation->GlobalVariableTable->find(Key);
+ if (Known == Implementation->GlobalVariableTable->end())
+ return std::nullopt;
+
+ return {Implementation->SwiftVersion, *Known};
+}
+
+auto APINotesReader::lookupGlobalFunction(llvm::StringRef Name,
+ std::optional<Context> Ctx)
+ -> VersionedInfo<GlobalFunctionInfo> {
+ if (!Implementation->GlobalFunctionTable)
+ return std::nullopt;
+
+ std::optional<IdentifierID> NameID = Implementation->getIdentifier(Name);
+ if (!NameID)
+ return std::nullopt;
+
+ ContextTableKey Key(Ctx, *NameID);
+
+ auto Known = Implementation->GlobalFunctionTable->find(Key);
+ if (Known == Implementation->GlobalFunctionTable->end())
+ return std::nullopt;
+
+ return {Implementation->SwiftVersion, *Known};
+}
+
+auto APINotesReader::lookupEnumConstant(llvm::StringRef Name)
+ -> VersionedInfo<EnumConstantInfo> {
+ if (!Implementation->EnumConstantTable)
+ return std::nullopt;
+
+ std::optional<IdentifierID> NameID = Implementation->getIdentifier(Name);
+ if (!NameID)
+ return std::nullopt;
+
+ auto Known = Implementation->EnumConstantTable->find(*NameID);
+ if (Known == Implementation->EnumConstantTable->end())
+ return std::nullopt;
+
+ return {Implementation->SwiftVersion, *Known};
+}
+
+auto APINotesReader::lookupTag(llvm::StringRef Name, std::optional<Context> Ctx)
+ -> VersionedInfo<TagInfo> {
+ if (!Implementation->TagTable)
+ return std::nullopt;
+
+ std::optional<IdentifierID> NameID = Implementation->getIdentifier(Name);
+ if (!NameID)
+ return std::nullopt;
+
+ ContextTableKey Key(Ctx, *NameID);
+
+ auto Known = Implementation->TagTable->find(Key);
+ if (Known == Implementation->TagTable->end())
+ return std::nullopt;
+
+ return {Implementation->SwiftVersion, *Known};
+}
+
+auto APINotesReader::lookupTypedef(llvm::StringRef Name,
+ std::optional<Context> Ctx)
+ -> VersionedInfo<TypedefInfo> {
+ if (!Implementation->TypedefTable)
+ return std::nullopt;
+
+ std::optional<IdentifierID> NameID = Implementation->getIdentifier(Name);
+ if (!NameID)
+ return std::nullopt;
+
+ ContextTableKey Key(Ctx, *NameID);
+
+ auto Known = Implementation->TypedefTable->find(Key);
+ if (Known == Implementation->TypedefTable->end())
+ return std::nullopt;
+
+ return {Implementation->SwiftVersion, *Known};
+}
+
+auto APINotesReader::lookupNamespaceID(
+ llvm::StringRef Name, std::optional<ContextID> ParentNamespaceID)
+ -> std::optional<ContextID> {
+ if (!Implementation->ObjCContextIDTable)
+ return std::nullopt;
+
+ std::optional<IdentifierID> NamespaceID = Implementation->getIdentifier(Name);
+ if (!NamespaceID)
+ return std::nullopt;
+
+ uint32_t RawParentNamespaceID =
+ ParentNamespaceID ? ParentNamespaceID->Value : -1;
+ auto KnownID = Implementation->ObjCContextIDTable->find(
+ {RawParentNamespaceID, (uint8_t)ContextKind::Namespace, *NamespaceID});
+ if (KnownID == Implementation->ObjCContextIDTable->end())
+ return std::nullopt;
+
+ return ContextID(*KnownID);
+}
+
+} // namespace api_notes
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/APINotes/APINotesWriter.cpp b/contrib/llvm-project/clang/lib/APINotes/APINotesWriter.cpp
new file mode 100644
index 000000000000..62a2ab179991
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/APINotes/APINotesWriter.cpp
@@ -0,0 +1,1384 @@
+//===-- APINotesWriter.cpp - API Notes Writer -------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/APINotes/APINotesWriter.h"
+#include "APINotesFormat.h"
+#include "clang/APINotes/Types.h"
+#include "clang/Basic/FileManager.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/Bitstream/BitstreamWriter.h"
+#include "llvm/Support/DJB.h"
+#include "llvm/Support/OnDiskHashTable.h"
+#include "llvm/Support/VersionTuple.h"
+
+namespace clang {
+namespace api_notes {
+class APINotesWriter::Implementation {
+ friend class APINotesWriter;
+
+ template <typename T>
+ using VersionedSmallVector =
+ llvm::SmallVector<std::pair<llvm::VersionTuple, T>, 1>;
+
+ std::string ModuleName;
+ const FileEntry *SourceFile;
+
+ /// Scratch space for bitstream writing.
+ llvm::SmallVector<uint64_t, 64> Scratch;
+
+ /// Mapping from strings to identifier IDs.
+ llvm::StringMap<IdentifierID> IdentifierIDs;
+
+ /// Information about contexts (Objective-C classes or protocols or C++
+ /// namespaces).
+ ///
+ /// Indexed by the parent context ID, context kind and the identifier ID of
+ /// this context and provides both the context ID and information describing
+ /// the context within that module.
+ llvm::DenseMap<ContextTableKey,
+ std::pair<unsigned, VersionedSmallVector<ObjCContextInfo>>>
+ ObjCContexts;
+
+ /// Information about parent contexts for each context.
+ ///
+ /// Indexed by context ID, provides the parent context ID.
+ llvm::DenseMap<uint32_t, uint32_t> ParentContexts;
+
+ /// Mapping from context IDs to the identifier ID holding the name.
+ llvm::DenseMap<unsigned, unsigned> ObjCContextNames;
+
+ /// Information about Objective-C properties.
+ ///
+ /// Indexed by the context ID, property name, and whether this is an
+ /// instance property.
+ llvm::DenseMap<
+ std::tuple<unsigned, unsigned, char>,
+ llvm::SmallVector<std::pair<VersionTuple, ObjCPropertyInfo>, 1>>
+ ObjCProperties;
+
+ /// Information about Objective-C methods.
+ ///
+ /// Indexed by the context ID, selector ID, and Boolean (stored as a char)
+ /// indicating whether this is a class or instance method.
+ llvm::DenseMap<std::tuple<unsigned, unsigned, char>,
+ llvm::SmallVector<std::pair<VersionTuple, ObjCMethodInfo>, 1>>
+ ObjCMethods;
+
+ /// Mapping from selectors to selector ID.
+ llvm::DenseMap<StoredObjCSelector, SelectorID> SelectorIDs;
+
+ /// Information about global variables.
+ ///
+ /// Indexed by the context ID, contextKind, identifier ID.
+ llvm::DenseMap<
+ ContextTableKey,
+ llvm::SmallVector<std::pair<VersionTuple, GlobalVariableInfo>, 1>>
+ GlobalVariables;
+
+ /// Information about global functions.
+ ///
+ /// Indexed by the context ID, contextKind, identifier ID.
+ llvm::DenseMap<
+ ContextTableKey,
+ llvm::SmallVector<std::pair<VersionTuple, GlobalFunctionInfo>, 1>>
+ GlobalFunctions;
+
+ /// Information about enumerators.
+ ///
+ /// Indexed by the identifier ID.
+ llvm::DenseMap<
+ unsigned, llvm::SmallVector<std::pair<VersionTuple, EnumConstantInfo>, 1>>
+ EnumConstants;
+
+ /// Information about tags.
+ ///
+ /// Indexed by the context ID, contextKind, identifier ID.
+ llvm::DenseMap<ContextTableKey,
+ llvm::SmallVector<std::pair<VersionTuple, TagInfo>, 1>>
+ Tags;
+
+ /// Information about typedefs.
+ ///
+ /// Indexed by the context ID, contextKind, identifier ID.
+ llvm::DenseMap<ContextTableKey,
+ llvm::SmallVector<std::pair<VersionTuple, TypedefInfo>, 1>>
+ Typedefs;
+
+ /// Retrieve the ID for the given identifier.
+ IdentifierID getIdentifier(StringRef Identifier) {
+ if (Identifier.empty())
+ return 0;
+
+ auto Known = IdentifierIDs.find(Identifier);
+ if (Known != IdentifierIDs.end())
+ return Known->second;
+
+ // Add to the identifier table.
+ Known = IdentifierIDs.insert({Identifier, IdentifierIDs.size() + 1}).first;
+ return Known->second;
+ }
+
+ /// Retrieve the ID for the given selector.
+ SelectorID getSelector(ObjCSelectorRef SelectorRef) {
+ // Translate the selector reference into a stored selector.
+ StoredObjCSelector Selector;
+ Selector.Identifiers.reserve(SelectorRef.Identifiers.size());
+ for (auto piece : SelectorRef.Identifiers)
+ Selector.Identifiers.push_back(getIdentifier(piece));
+
+ // Look for the stored selector.
+ auto Known = SelectorIDs.find(Selector);
+ if (Known != SelectorIDs.end())
+ return Known->second;
+
+ // Add to the selector table.
+ Known = SelectorIDs.insert({Selector, SelectorIDs.size()}).first;
+ return Known->second;
+ }
+
+private:
+ void writeBlockInfoBlock(llvm::BitstreamWriter &Stream);
+ void writeControlBlock(llvm::BitstreamWriter &Stream);
+ void writeIdentifierBlock(llvm::BitstreamWriter &Stream);
+ void writeObjCContextBlock(llvm::BitstreamWriter &Stream);
+ void writeObjCPropertyBlock(llvm::BitstreamWriter &Stream);
+ void writeObjCMethodBlock(llvm::BitstreamWriter &Stream);
+ void writeObjCSelectorBlock(llvm::BitstreamWriter &Stream);
+ void writeGlobalVariableBlock(llvm::BitstreamWriter &Stream);
+ void writeGlobalFunctionBlock(llvm::BitstreamWriter &Stream);
+ void writeEnumConstantBlock(llvm::BitstreamWriter &Stream);
+ void writeTagBlock(llvm::BitstreamWriter &Stream);
+ void writeTypedefBlock(llvm::BitstreamWriter &Stream);
+
+public:
+ Implementation(llvm::StringRef ModuleName, const FileEntry *SF)
+ : ModuleName(std::string(ModuleName)), SourceFile(SF) {}
+
+ void writeToStream(llvm::raw_ostream &OS);
+};
+
+void APINotesWriter::Implementation::writeToStream(llvm::raw_ostream &OS) {
+ llvm::SmallVector<char, 0> Buffer;
+
+ {
+ llvm::BitstreamWriter Stream(Buffer);
+
+ // Emit the signature.
+ for (unsigned char Byte : API_NOTES_SIGNATURE)
+ Stream.Emit(Byte, 8);
+
+ // Emit the blocks.
+ writeBlockInfoBlock(Stream);
+ writeControlBlock(Stream);
+ writeIdentifierBlock(Stream);
+ writeObjCContextBlock(Stream);
+ writeObjCPropertyBlock(Stream);
+ writeObjCMethodBlock(Stream);
+ writeObjCSelectorBlock(Stream);
+ writeGlobalVariableBlock(Stream);
+ writeGlobalFunctionBlock(Stream);
+ writeEnumConstantBlock(Stream);
+ writeTagBlock(Stream);
+ writeTypedefBlock(Stream);
+ }
+
+ OS.write(Buffer.data(), Buffer.size());
+ OS.flush();
+}
+
+namespace {
+/// Record the name of a block.
+void emitBlockID(llvm::BitstreamWriter &Stream, unsigned ID,
+ llvm::StringRef Name) {
+ Stream.EmitRecord(llvm::bitc::BLOCKINFO_CODE_SETBID,
+ llvm::ArrayRef<unsigned>{ID});
+
+ // Emit the block name if present.
+ if (Name.empty())
+ return;
+ Stream.EmitRecord(
+ llvm::bitc::BLOCKINFO_CODE_BLOCKNAME,
+ llvm::ArrayRef<unsigned char>(
+ const_cast<unsigned char *>(
+ reinterpret_cast<const unsigned char *>(Name.data())),
+ Name.size()));
+}
+
+/// Record the name of a record within a block.
+void emitRecordID(llvm::BitstreamWriter &Stream, unsigned ID,
+ llvm::StringRef Name) {
+ assert(ID < 256 && "can't fit record ID in next to name");
+
+ llvm::SmallVector<unsigned char, 64> Buffer;
+ Buffer.resize(Name.size() + 1);
+ Buffer[0] = ID;
+ memcpy(Buffer.data() + 1, Name.data(), Name.size());
+
+ Stream.EmitRecord(llvm::bitc::BLOCKINFO_CODE_SETRECORDNAME, Buffer);
+}
+} // namespace
+
+void APINotesWriter::Implementation::writeBlockInfoBlock(
+ llvm::BitstreamWriter &Stream) {
+ llvm::BCBlockRAII Scope(Stream, llvm::bitc::BLOCKINFO_BLOCK_ID, 2);
+
+#define BLOCK(Block) emitBlockID(Stream, Block##_ID, #Block)
+#define BLOCK_RECORD(NameSpace, Block) \
+ emitRecordID(Stream, NameSpace::Block, #Block)
+ BLOCK(CONTROL_BLOCK);
+ BLOCK_RECORD(control_block, METADATA);
+ BLOCK_RECORD(control_block, MODULE_NAME);
+
+ BLOCK(IDENTIFIER_BLOCK);
+ BLOCK_RECORD(identifier_block, IDENTIFIER_DATA);
+
+ BLOCK(OBJC_CONTEXT_BLOCK);
+ BLOCK_RECORD(objc_context_block, OBJC_CONTEXT_ID_DATA);
+
+ BLOCK(OBJC_PROPERTY_BLOCK);
+ BLOCK_RECORD(objc_property_block, OBJC_PROPERTY_DATA);
+
+ BLOCK(OBJC_METHOD_BLOCK);
+ BLOCK_RECORD(objc_method_block, OBJC_METHOD_DATA);
+
+ BLOCK(OBJC_SELECTOR_BLOCK);
+ BLOCK_RECORD(objc_selector_block, OBJC_SELECTOR_DATA);
+
+ BLOCK(GLOBAL_VARIABLE_BLOCK);
+ BLOCK_RECORD(global_variable_block, GLOBAL_VARIABLE_DATA);
+
+ BLOCK(GLOBAL_FUNCTION_BLOCK);
+ BLOCK_RECORD(global_function_block, GLOBAL_FUNCTION_DATA);
+#undef BLOCK_RECORD
+#undef BLOCK
+}
+
+void APINotesWriter::Implementation::writeControlBlock(
+ llvm::BitstreamWriter &Stream) {
+ llvm::BCBlockRAII Scope(Stream, CONTROL_BLOCK_ID, 3);
+
+ control_block::MetadataLayout Metadata(Stream);
+ Metadata.emit(Scratch, VERSION_MAJOR, VERSION_MINOR);
+
+ control_block::ModuleNameLayout ModuleName(Stream);
+ ModuleName.emit(Scratch, this->ModuleName);
+
+ if (SourceFile) {
+ control_block::SourceFileLayout SourceFile(Stream);
+ SourceFile.emit(Scratch, this->SourceFile->getSize(),
+ this->SourceFile->getModificationTime());
+ }
+}
+
+namespace {
+/// Used to serialize the on-disk identifier table.
+class IdentifierTableInfo {
+public:
+ using key_type = StringRef;
+ using key_type_ref = key_type;
+ using data_type = IdentifierID;
+ using data_type_ref = const data_type &;
+ using hash_value_type = uint32_t;
+ using offset_type = unsigned;
+
+ hash_value_type ComputeHash(key_type_ref Key) { return llvm::djbHash(Key); }
+
+ std::pair<unsigned, unsigned>
+ EmitKeyDataLength(raw_ostream &OS, key_type_ref Key, data_type_ref) {
+ uint32_t KeyLength = Key.size();
+ uint32_t DataLength = sizeof(uint32_t);
+
+ llvm::support::endian::Writer writer(OS, llvm::endianness::little);
+ writer.write<uint16_t>(KeyLength);
+ writer.write<uint16_t>(DataLength);
+ return {KeyLength, DataLength};
+ }
+
+ void EmitKey(raw_ostream &OS, key_type_ref Key, unsigned) { OS << Key; }
+
+ void EmitData(raw_ostream &OS, key_type_ref, data_type_ref Data, unsigned) {
+ llvm::support::endian::Writer writer(OS, llvm::endianness::little);
+ writer.write<uint32_t>(Data);
+ }
+};
+} // namespace
+
+void APINotesWriter::Implementation::writeIdentifierBlock(
+ llvm::BitstreamWriter &Stream) {
+ llvm::BCBlockRAII restoreBlock(Stream, IDENTIFIER_BLOCK_ID, 3);
+
+ if (IdentifierIDs.empty())
+ return;
+
+ llvm::SmallString<4096> HashTableBlob;
+ uint32_t Offset;
+ {
+ llvm::OnDiskChainedHashTableGenerator<IdentifierTableInfo> Generator;
+ for (auto &II : IdentifierIDs)
+ Generator.insert(II.first(), II.second);
+
+ llvm::raw_svector_ostream BlobStream(HashTableBlob);
+ // Make sure that no bucket is at offset 0
+ llvm::support::endian::write<uint32_t>(BlobStream, 0,
+ llvm::endianness::little);
+ Offset = Generator.Emit(BlobStream);
+ }
+
+ identifier_block::IdentifierDataLayout IdentifierData(Stream);
+ IdentifierData.emit(Scratch, Offset, HashTableBlob);
+}
+
+namespace {
+/// Used to serialize the on-disk Objective-C context table.
+class ObjCContextIDTableInfo {
+public:
+ using key_type = ContextTableKey;
+ using key_type_ref = key_type;
+ using data_type = unsigned;
+ using data_type_ref = const data_type &;
+ using hash_value_type = size_t;
+ using offset_type = unsigned;
+
+ hash_value_type ComputeHash(key_type_ref Key) {
+ return static_cast<size_t>(Key.hashValue());
+ }
+
+ std::pair<unsigned, unsigned> EmitKeyDataLength(raw_ostream &OS, key_type_ref,
+ data_type_ref) {
+ uint32_t KeyLength = sizeof(uint32_t) + sizeof(uint8_t) + sizeof(uint32_t);
+ uint32_t DataLength = sizeof(uint32_t);
+
+ llvm::support::endian::Writer writer(OS, llvm::endianness::little);
+ writer.write<uint16_t>(KeyLength);
+ writer.write<uint16_t>(DataLength);
+ return {KeyLength, DataLength};
+ }
+
+ void EmitKey(raw_ostream &OS, key_type_ref Key, unsigned) {
+ llvm::support::endian::Writer writer(OS, llvm::endianness::little);
+ writer.write<uint32_t>(Key.parentContextID);
+ writer.write<uint8_t>(Key.contextKind);
+ writer.write<uint32_t>(Key.contextID);
+ }
+
+ void EmitData(raw_ostream &OS, key_type_ref, data_type_ref Data, unsigned) {
+ llvm::support::endian::Writer writer(OS, llvm::endianness::little);
+ writer.write<uint32_t>(Data);
+ }
+};
+
+/// Localized helper to make a type dependent, thwarting template argument
+/// deduction.
+template <typename T> struct MakeDependent { typedef T Type; };
+
+/// Retrieve the serialized size of the given VersionTuple, for use in
+/// on-disk hash tables.
+unsigned getVersionTupleSize(const VersionTuple &VT) {
+ unsigned size = sizeof(uint8_t) + /*major*/ sizeof(uint32_t);
+ if (VT.getMinor())
+ size += sizeof(uint32_t);
+ if (VT.getSubminor())
+ size += sizeof(uint32_t);
+ if (VT.getBuild())
+ size += sizeof(uint32_t);
+ return size;
+}
+
+/// Determine the size of an array of versioned information,
+template <typename T>
+unsigned getVersionedInfoSize(
+ const llvm::SmallVectorImpl<std::pair<llvm::VersionTuple, T>> &VI,
+ llvm::function_ref<unsigned(const typename MakeDependent<T>::Type &)>
+ getInfoSize) {
+ unsigned result = sizeof(uint16_t); // # of elements
+ for (const auto &E : VI) {
+ result += getVersionTupleSize(E.first);
+ result += getInfoSize(E.second);
+ }
+ return result;
+}
+
+/// Emit a serialized representation of a version tuple.
+void emitVersionTuple(raw_ostream &OS, const VersionTuple &VT) {
+ llvm::support::endian::Writer writer(OS, llvm::endianness::little);
+
+ // First byte contains the number of components beyond the 'major' component.
+ uint8_t descriptor;
+ if (VT.getBuild())
+ descriptor = 3;
+ else if (VT.getSubminor())
+ descriptor = 2;
+ else if (VT.getMinor())
+ descriptor = 1;
+ else
+ descriptor = 0;
+ writer.write<uint8_t>(descriptor);
+
+ // Write the components.
+ writer.write<uint32_t>(VT.getMajor());
+ if (auto minor = VT.getMinor())
+ writer.write<uint32_t>(*minor);
+ if (auto subminor = VT.getSubminor())
+ writer.write<uint32_t>(*subminor);
+ if (auto build = VT.getBuild())
+ writer.write<uint32_t>(*build);
+}
+
+/// Emit versioned information.
+template <typename T>
+void emitVersionedInfo(
+ raw_ostream &OS, llvm::SmallVectorImpl<std::pair<VersionTuple, T>> &VI,
+ llvm::function_ref<void(raw_ostream &,
+ const typename MakeDependent<T>::Type &)>
+ emitInfo) {
+ std::sort(VI.begin(), VI.end(),
+ [](const std::pair<VersionTuple, T> &LHS,
+ const std::pair<VersionTuple, T> &RHS) -> bool {
+ assert(LHS.first != RHS.first &&
+ "two entries for the same version");
+ return LHS.first < RHS.first;
+ });
+
+ llvm::support::endian::Writer writer(OS, llvm::endianness::little);
+ writer.write<uint16_t>(VI.size());
+ for (const auto &E : VI) {
+ emitVersionTuple(OS, E.first);
+ emitInfo(OS, E.second);
+ }
+}
+
+/// On-disk hash table info key base for handling versioned data.
+template <typename Derived, typename KeyType, typename UnversionedDataType>
+class VersionedTableInfo {
+ Derived &asDerived() { return *static_cast<Derived *>(this); }
+
+ const Derived &asDerived() const {
+ return *static_cast<const Derived *>(this);
+ }
+
+public:
+ using key_type = KeyType;
+ using key_type_ref = key_type;
+ using data_type =
+ llvm::SmallVector<std::pair<llvm::VersionTuple, UnversionedDataType>, 1>;
+ using data_type_ref = data_type &;
+ using hash_value_type = size_t;
+ using offset_type = unsigned;
+
+ std::pair<unsigned, unsigned>
+ EmitKeyDataLength(raw_ostream &OS, key_type_ref Key, data_type_ref Data) {
+ uint32_t KeyLength = asDerived().getKeyLength(Key);
+ uint32_t DataLength =
+ getVersionedInfoSize(Data, [this](const UnversionedDataType &UI) {
+ return asDerived().getUnversionedInfoSize(UI);
+ });
+
+ llvm::support::endian::Writer writer(OS, llvm::endianness::little);
+ writer.write<uint16_t>(KeyLength);
+ writer.write<uint16_t>(DataLength);
+ return {KeyLength, DataLength};
+ }
+
+ void EmitData(raw_ostream &OS, key_type_ref, data_type_ref Data, unsigned) {
+ emitVersionedInfo(
+ OS, Data, [this](llvm::raw_ostream &OS, const UnversionedDataType &UI) {
+ asDerived().emitUnversionedInfo(OS, UI);
+ });
+ }
+};
+
+/// Emit a serialized representation of the common entity information.
+void emitCommonEntityInfo(raw_ostream &OS, const CommonEntityInfo &CEI) {
+ llvm::support::endian::Writer writer(OS, llvm::endianness::little);
+
+ uint8_t payload = 0;
+ if (auto swiftPrivate = CEI.isSwiftPrivate()) {
+ payload |= 0x01;
+ if (*swiftPrivate)
+ payload |= 0x02;
+ }
+ payload <<= 1;
+ payload |= CEI.Unavailable;
+ payload <<= 1;
+ payload |= CEI.UnavailableInSwift;
+
+ writer.write<uint8_t>(payload);
+
+ writer.write<uint16_t>(CEI.UnavailableMsg.size());
+ OS.write(CEI.UnavailableMsg.c_str(), CEI.UnavailableMsg.size());
+
+ writer.write<uint16_t>(CEI.SwiftName.size());
+ OS.write(CEI.SwiftName.c_str(), CEI.SwiftName.size());
+}
+
+/// Retrieve the serialized size of the given CommonEntityInfo, for use in
+/// on-disk hash tables.
+unsigned getCommonEntityInfoSize(const CommonEntityInfo &CEI) {
+ return 5 + CEI.UnavailableMsg.size() + CEI.SwiftName.size();
+}
+
+// Retrieve the serialized size of the given CommonTypeInfo, for use
+// in on-disk hash tables.
+unsigned getCommonTypeInfoSize(const CommonTypeInfo &CTI) {
+ return 2 + (CTI.getSwiftBridge() ? CTI.getSwiftBridge()->size() : 0) + 2 +
+ (CTI.getNSErrorDomain() ? CTI.getNSErrorDomain()->size() : 0) +
+ getCommonEntityInfoSize(CTI);
+}
+
+/// Emit a serialized representation of the common type information.
+void emitCommonTypeInfo(raw_ostream &OS, const CommonTypeInfo &CTI) {
+ emitCommonEntityInfo(OS, CTI);
+
+ llvm::support::endian::Writer writer(OS, llvm::endianness::little);
+ if (auto swiftBridge = CTI.getSwiftBridge()) {
+ writer.write<uint16_t>(swiftBridge->size() + 1);
+ OS.write(swiftBridge->c_str(), swiftBridge->size());
+ } else {
+ writer.write<uint16_t>(0);
+ }
+ if (auto nsErrorDomain = CTI.getNSErrorDomain()) {
+ writer.write<uint16_t>(nsErrorDomain->size() + 1);
+ OS.write(nsErrorDomain->c_str(), CTI.getNSErrorDomain()->size());
+ } else {
+ writer.write<uint16_t>(0);
+ }
+}
+
+/// Used to serialize the on-disk Objective-C property table.
+class ObjCContextInfoTableInfo
+ : public VersionedTableInfo<ObjCContextInfoTableInfo, unsigned,
+ ObjCContextInfo> {
+public:
+ unsigned getKeyLength(key_type_ref) { return sizeof(uint32_t); }
+
+ void EmitKey(raw_ostream &OS, key_type_ref Key, unsigned) {
+ llvm::support::endian::Writer writer(OS, llvm::endianness::little);
+ writer.write<uint32_t>(Key);
+ }
+
+ hash_value_type ComputeHash(key_type_ref Key) {
+ return static_cast<size_t>(llvm::hash_value(Key));
+ }
+
+ unsigned getUnversionedInfoSize(const ObjCContextInfo &OCI) {
+ return getCommonTypeInfoSize(OCI) + 1;
+ }
+
+ void emitUnversionedInfo(raw_ostream &OS, const ObjCContextInfo &OCI) {
+ emitCommonTypeInfo(OS, OCI);
+
+ uint8_t payload = 0;
+ if (auto swiftImportAsNonGeneric = OCI.getSwiftImportAsNonGeneric())
+ payload |= (0x01 << 1) | (uint8_t)swiftImportAsNonGeneric.value();
+ payload <<= 2;
+ if (auto swiftObjCMembers = OCI.getSwiftObjCMembers())
+ payload |= (0x01 << 1) | (uint8_t)swiftObjCMembers.value();
+ payload <<= 3;
+ if (auto nullable = OCI.getDefaultNullability())
+ payload |= (0x01 << 2) | static_cast<uint8_t>(*nullable);
+ payload = (payload << 1) | (OCI.hasDesignatedInits() ? 1 : 0);
+
+ OS << payload;
+ }
+};
+} // namespace
+
+void APINotesWriter::Implementation::writeObjCContextBlock(
+ llvm::BitstreamWriter &Stream) {
+ llvm::BCBlockRAII restoreBlock(Stream, OBJC_CONTEXT_BLOCK_ID, 3);
+
+ if (ObjCContexts.empty())
+ return;
+
+ {
+ llvm::SmallString<4096> HashTableBlob;
+ uint32_t Offset;
+ {
+ llvm::OnDiskChainedHashTableGenerator<ObjCContextIDTableInfo> Generator;
+ for (auto &OC : ObjCContexts)
+ Generator.insert(OC.first, OC.second.first);
+
+ llvm::raw_svector_ostream BlobStream(HashTableBlob);
+ // Make sure that no bucket is at offset 0
+ llvm::support::endian::write<uint32_t>(BlobStream, 0,
+ llvm::endianness::little);
+ Offset = Generator.Emit(BlobStream);
+ }
+
+ objc_context_block::ObjCContextIDLayout ObjCContextID(Stream);
+ ObjCContextID.emit(Scratch, Offset, HashTableBlob);
+ }
+
+ {
+ llvm::SmallString<4096> HashTableBlob;
+ uint32_t Offset;
+ {
+ llvm::OnDiskChainedHashTableGenerator<ObjCContextInfoTableInfo> Generator;
+ for (auto &OC : ObjCContexts)
+ Generator.insert(OC.second.first, OC.second.second);
+
+ llvm::raw_svector_ostream BlobStream(HashTableBlob);
+ // Make sure that no bucket is at offset 0
+ llvm::support::endian::write<uint32_t>(BlobStream, 0,
+ llvm::endianness::little);
+ Offset = Generator.Emit(BlobStream);
+ }
+
+ objc_context_block::ObjCContextInfoLayout ObjCContextInfo(Stream);
+ ObjCContextInfo.emit(Scratch, Offset, HashTableBlob);
+ }
+}
+
+namespace {
+/// Retrieve the serialized size of the given VariableInfo, for use in
+/// on-disk hash tables.
+unsigned getVariableInfoSize(const VariableInfo &VI) {
+ return 2 + getCommonEntityInfoSize(VI) + 2 + VI.getType().size();
+}
+
+/// Emit a serialized representation of the variable information.
+void emitVariableInfo(raw_ostream &OS, const VariableInfo &VI) {
+ emitCommonEntityInfo(OS, VI);
+
+ uint8_t bytes[2] = {0, 0};
+ if (auto nullable = VI.getNullability()) {
+ bytes[0] = 1;
+ bytes[1] = static_cast<uint8_t>(*nullable);
+ } else {
+ // Nothing to do.
+ }
+
+ OS.write(reinterpret_cast<const char *>(bytes), 2);
+
+ llvm::support::endian::Writer writer(OS, llvm::endianness::little);
+ writer.write<uint16_t>(VI.getType().size());
+ OS.write(VI.getType().data(), VI.getType().size());
+}
+
+/// Used to serialize the on-disk Objective-C property table.
+class ObjCPropertyTableInfo
+ : public VersionedTableInfo<ObjCPropertyTableInfo,
+ std::tuple<unsigned, unsigned, char>,
+ ObjCPropertyInfo> {
+public:
+ unsigned getKeyLength(key_type_ref) {
+ return sizeof(uint32_t) + sizeof(uint32_t) + sizeof(uint8_t);
+ }
+
+ void EmitKey(raw_ostream &OS, key_type_ref Key, unsigned) {
+ llvm::support::endian::Writer writer(OS, llvm::endianness::little);
+ writer.write<uint32_t>(std::get<0>(Key));
+ writer.write<uint32_t>(std::get<1>(Key));
+ writer.write<uint8_t>(std::get<2>(Key));
+ }
+
+ hash_value_type ComputeHash(key_type_ref Key) {
+ return static_cast<size_t>(llvm::hash_value(Key));
+ }
+
+ unsigned getUnversionedInfoSize(const ObjCPropertyInfo &OPI) {
+ return getVariableInfoSize(OPI) + 1;
+ }
+
+ void emitUnversionedInfo(raw_ostream &OS, const ObjCPropertyInfo &OPI) {
+ emitVariableInfo(OS, OPI);
+
+ uint8_t flags = 0;
+ if (auto value = OPI.getSwiftImportAsAccessors()) {
+ flags |= 1 << 0;
+ flags |= value.value() << 1;
+ }
+ OS << flags;
+ }
+};
+} // namespace
+
+void APINotesWriter::Implementation::writeObjCPropertyBlock(
+ llvm::BitstreamWriter &Stream) {
+ llvm::BCBlockRAII Scope(Stream, OBJC_PROPERTY_BLOCK_ID, 3);
+
+ if (ObjCProperties.empty())
+ return;
+
+ {
+ llvm::SmallString<4096> HashTableBlob;
+ uint32_t Offset;
+ {
+ llvm::OnDiskChainedHashTableGenerator<ObjCPropertyTableInfo> Generator;
+ for (auto &OP : ObjCProperties)
+ Generator.insert(OP.first, OP.second);
+
+ llvm::raw_svector_ostream BlobStream(HashTableBlob);
+ // Make sure that no bucket is at offset 0
+ llvm::support::endian::write<uint32_t>(BlobStream, 0,
+ llvm::endianness::little);
+ Offset = Generator.Emit(BlobStream);
+ }
+
+ objc_property_block::ObjCPropertyDataLayout ObjCPropertyData(Stream);
+ ObjCPropertyData.emit(Scratch, Offset, HashTableBlob);
+ }
+}
+
+namespace {
+unsigned getFunctionInfoSize(const FunctionInfo &);
+void emitFunctionInfo(llvm::raw_ostream &, const FunctionInfo &);
+
+/// Used to serialize the on-disk Objective-C method table.
+class ObjCMethodTableInfo
+ : public VersionedTableInfo<ObjCMethodTableInfo,
+ std::tuple<unsigned, unsigned, char>,
+ ObjCMethodInfo> {
+public:
+ unsigned getKeyLength(key_type_ref) {
+ return sizeof(uint32_t) + sizeof(uint32_t) + sizeof(uint8_t);
+ }
+
+ void EmitKey(raw_ostream &OS, key_type_ref Key, unsigned) {
+ llvm::support::endian::Writer writer(OS, llvm::endianness::little);
+ writer.write<uint32_t>(std::get<0>(Key));
+ writer.write<uint32_t>(std::get<1>(Key));
+ writer.write<uint8_t>(std::get<2>(Key));
+ }
+
+ hash_value_type ComputeHash(key_type_ref key) {
+ return static_cast<size_t>(llvm::hash_value(key));
+ }
+
+ unsigned getUnversionedInfoSize(const ObjCMethodInfo &OMI) {
+ return getFunctionInfoSize(OMI) + 1;
+ }
+
+ void emitUnversionedInfo(raw_ostream &OS, const ObjCMethodInfo &OMI) {
+ uint8_t flags = 0;
+ llvm::support::endian::Writer writer(OS, llvm::endianness::little);
+ flags = (flags << 1) | OMI.DesignatedInit;
+ flags = (flags << 1) | OMI.RequiredInit;
+ writer.write<uint8_t>(flags);
+
+ emitFunctionInfo(OS, OMI);
+ }
+};
+} // namespace
+
+void APINotesWriter::Implementation::writeObjCMethodBlock(
+ llvm::BitstreamWriter &Stream) {
+ llvm::BCBlockRAII Scope(Stream, OBJC_METHOD_BLOCK_ID, 3);
+
+ if (ObjCMethods.empty())
+ return;
+
+ {
+ llvm::SmallString<4096> HashTableBlob;
+ uint32_t Offset;
+ {
+ llvm::OnDiskChainedHashTableGenerator<ObjCMethodTableInfo> Generator;
+ for (auto &OM : ObjCMethods)
+ Generator.insert(OM.first, OM.second);
+
+ llvm::raw_svector_ostream BlobStream(HashTableBlob);
+ // Make sure that no bucket is at offset 0
+ llvm::support::endian::write<uint32_t>(BlobStream, 0,
+ llvm::endianness::little);
+ Offset = Generator.Emit(BlobStream);
+ }
+
+ objc_method_block::ObjCMethodDataLayout ObjCMethodData(Stream);
+ ObjCMethodData.emit(Scratch, Offset, HashTableBlob);
+ }
+}
+
+namespace {
+/// Used to serialize the on-disk Objective-C selector table.
+class ObjCSelectorTableInfo {
+public:
+ using key_type = StoredObjCSelector;
+ using key_type_ref = const key_type &;
+ using data_type = SelectorID;
+ using data_type_ref = data_type;
+ using hash_value_type = unsigned;
+ using offset_type = unsigned;
+
+ hash_value_type ComputeHash(key_type_ref Key) {
+ return llvm::DenseMapInfo<StoredObjCSelector>::getHashValue(Key);
+ }
+
+ std::pair<unsigned, unsigned>
+ EmitKeyDataLength(raw_ostream &OS, key_type_ref Key, data_type_ref) {
+ uint32_t KeyLength =
+ sizeof(uint16_t) + sizeof(uint32_t) * Key.Identifiers.size();
+ uint32_t DataLength = sizeof(uint32_t);
+
+ llvm::support::endian::Writer writer(OS, llvm::endianness::little);
+ writer.write<uint16_t>(KeyLength);
+ writer.write<uint16_t>(DataLength);
+ return {KeyLength, DataLength};
+ }
+
+ void EmitKey(raw_ostream &OS, key_type_ref Key, unsigned) {
+ llvm::support::endian::Writer writer(OS, llvm::endianness::little);
+ writer.write<uint16_t>(Key.NumArgs);
+ for (auto Identifier : Key.Identifiers)
+ writer.write<uint32_t>(Identifier);
+ }
+
+ void EmitData(raw_ostream &OS, key_type_ref, data_type_ref Data, unsigned) {
+ llvm::support::endian::Writer writer(OS, llvm::endianness::little);
+ writer.write<uint32_t>(Data);
+ }
+};
+} // namespace
+
+void APINotesWriter::Implementation::writeObjCSelectorBlock(
+ llvm::BitstreamWriter &Stream) {
+ llvm::BCBlockRAII Scope(Stream, OBJC_SELECTOR_BLOCK_ID, 3);
+
+ if (SelectorIDs.empty())
+ return;
+
+ {
+ llvm::SmallString<4096> HashTableBlob;
+ uint32_t Offset;
+ {
+ llvm::OnDiskChainedHashTableGenerator<ObjCSelectorTableInfo> Generator;
+ for (auto &S : SelectorIDs)
+ Generator.insert(S.first, S.second);
+
+ llvm::raw_svector_ostream BlobStream(HashTableBlob);
+ // Make sure that no bucket is at offset 0
+ llvm::support::endian::write<uint32_t>(BlobStream, 0,
+ llvm::endianness::little);
+ Offset = Generator.Emit(BlobStream);
+ }
+
+ objc_selector_block::ObjCSelectorDataLayout ObjCSelectorData(Stream);
+ ObjCSelectorData.emit(Scratch, Offset, HashTableBlob);
+ }
+}
+
+namespace {
+/// Used to serialize the on-disk global variable table.
+class GlobalVariableTableInfo
+ : public VersionedTableInfo<GlobalVariableTableInfo, ContextTableKey,
+ GlobalVariableInfo> {
+public:
+ unsigned getKeyLength(key_type_ref) {
+ return sizeof(uint32_t) + sizeof(uint8_t) + sizeof(uint32_t);
+ }
+
+ void EmitKey(raw_ostream &OS, key_type_ref Key, unsigned) {
+ llvm::support::endian::Writer writer(OS, llvm::endianness::little);
+ writer.write<uint32_t>(Key.parentContextID);
+ writer.write<uint8_t>(Key.contextKind);
+ writer.write<uint32_t>(Key.contextID);
+ }
+
+ hash_value_type ComputeHash(key_type_ref Key) {
+ return static_cast<size_t>(Key.hashValue());
+ }
+
+ unsigned getUnversionedInfoSize(const GlobalVariableInfo &GVI) {
+ return getVariableInfoSize(GVI);
+ }
+
+ void emitUnversionedInfo(raw_ostream &OS, const GlobalVariableInfo &GVI) {
+ emitVariableInfo(OS, GVI);
+ }
+};
+} // namespace
+
+void APINotesWriter::Implementation::writeGlobalVariableBlock(
+ llvm::BitstreamWriter &Stream) {
+ llvm::BCBlockRAII Scope(Stream, GLOBAL_VARIABLE_BLOCK_ID, 3);
+
+ if (GlobalVariables.empty())
+ return;
+
+ {
+ llvm::SmallString<4096> HashTableBlob;
+ uint32_t Offset;
+ {
+ llvm::OnDiskChainedHashTableGenerator<GlobalVariableTableInfo> Generator;
+ for (auto &GV : GlobalVariables)
+ Generator.insert(GV.first, GV.second);
+
+ llvm::raw_svector_ostream BlobStream(HashTableBlob);
+ // Make sure that no bucket is at offset 0
+ llvm::support::endian::write<uint32_t>(BlobStream, 0,
+ llvm::endianness::little);
+ Offset = Generator.Emit(BlobStream);
+ }
+
+ global_variable_block::GlobalVariableDataLayout GlobalVariableData(Stream);
+ GlobalVariableData.emit(Scratch, Offset, HashTableBlob);
+ }
+}
+
+namespace {
+unsigned getParamInfoSize(const ParamInfo &PI) {
+ return getVariableInfoSize(PI) + 1;
+}
+
+void emitParamInfo(raw_ostream &OS, const ParamInfo &PI) {
+ emitVariableInfo(OS, PI);
+
+ uint8_t flags = 0;
+ if (auto noescape = PI.isNoEscape()) {
+ flags |= 0x01;
+ if (*noescape)
+ flags |= 0x02;
+ }
+ flags <<= 3;
+ if (auto RCC = PI.getRetainCountConvention())
+ flags |= static_cast<uint8_t>(RCC.value()) + 1;
+
+ llvm::support::endian::Writer writer(OS, llvm::endianness::little);
+ writer.write<uint8_t>(flags);
+}
+
+/// Retrieve the serialized size of the given FunctionInfo, for use in on-disk
+/// hash tables.
+unsigned getFunctionInfoSize(const FunctionInfo &FI) {
+ unsigned size = getCommonEntityInfoSize(FI) + 2 + sizeof(uint64_t);
+ size += sizeof(uint16_t);
+ for (const auto &P : FI.Params)
+ size += getParamInfoSize(P);
+ size += sizeof(uint16_t) + FI.ResultType.size();
+ return size;
+}
+
+/// Emit a serialized representation of the function information.
+void emitFunctionInfo(raw_ostream &OS, const FunctionInfo &FI) {
+ emitCommonEntityInfo(OS, FI);
+
+ uint8_t flags = 0;
+ flags |= FI.NullabilityAudited;
+ flags <<= 3;
+ if (auto RCC = FI.getRetainCountConvention())
+ flags |= static_cast<uint8_t>(RCC.value()) + 1;
+
+ llvm::support::endian::Writer writer(OS, llvm::endianness::little);
+
+ writer.write<uint8_t>(flags);
+ writer.write<uint8_t>(FI.NumAdjustedNullable);
+ writer.write<uint64_t>(FI.NullabilityPayload);
+
+ writer.write<uint16_t>(FI.Params.size());
+ for (const auto &PI : FI.Params)
+ emitParamInfo(OS, PI);
+
+ writer.write<uint16_t>(FI.ResultType.size());
+ writer.write(ArrayRef<char>{FI.ResultType.data(), FI.ResultType.size()});
+}
+
+/// Used to serialize the on-disk global function table.
+class GlobalFunctionTableInfo
+ : public VersionedTableInfo<GlobalFunctionTableInfo, ContextTableKey,
+ GlobalFunctionInfo> {
+public:
+ unsigned getKeyLength(key_type_ref) {
+ return sizeof(uint32_t) + sizeof(uint8_t) + sizeof(uint32_t);
+ }
+
+ void EmitKey(raw_ostream &OS, key_type_ref Key, unsigned) {
+ llvm::support::endian::Writer writer(OS, llvm::endianness::little);
+ writer.write<uint32_t>(Key.parentContextID);
+ writer.write<uint8_t>(Key.contextKind);
+ writer.write<uint32_t>(Key.contextID);
+ }
+
+ hash_value_type ComputeHash(key_type_ref Key) {
+ return static_cast<size_t>(Key.hashValue());
+ }
+
+ unsigned getUnversionedInfoSize(const GlobalFunctionInfo &GFI) {
+ return getFunctionInfoSize(GFI);
+ }
+
+ void emitUnversionedInfo(raw_ostream &OS, const GlobalFunctionInfo &GFI) {
+ emitFunctionInfo(OS, GFI);
+ }
+};
+} // namespace
+
+void APINotesWriter::Implementation::writeGlobalFunctionBlock(
+ llvm::BitstreamWriter &Stream) {
+ llvm::BCBlockRAII Scope(Stream, GLOBAL_FUNCTION_BLOCK_ID, 3);
+
+ if (GlobalFunctions.empty())
+ return;
+
+ {
+ llvm::SmallString<4096> HashTableBlob;
+ uint32_t Offset;
+ {
+ llvm::OnDiskChainedHashTableGenerator<GlobalFunctionTableInfo> Generator;
+ for (auto &F : GlobalFunctions)
+ Generator.insert(F.first, F.second);
+
+ llvm::raw_svector_ostream BlobStream(HashTableBlob);
+ // Make sure that no bucket is at offset 0
+ llvm::support::endian::write<uint32_t>(BlobStream, 0,
+ llvm::endianness::little);
+ Offset = Generator.Emit(BlobStream);
+ }
+
+ global_function_block::GlobalFunctionDataLayout GlobalFunctionData(Stream);
+ GlobalFunctionData.emit(Scratch, Offset, HashTableBlob);
+ }
+}
+
+namespace {
+/// Used to serialize the on-disk global enum constant.
+class EnumConstantTableInfo
+ : public VersionedTableInfo<EnumConstantTableInfo, unsigned,
+ EnumConstantInfo> {
+public:
+ unsigned getKeyLength(key_type_ref) { return sizeof(uint32_t); }
+
+ void EmitKey(raw_ostream &OS, key_type_ref Key, unsigned) {
+ llvm::support::endian::Writer writer(OS, llvm::endianness::little);
+ writer.write<uint32_t>(Key);
+ }
+
+ hash_value_type ComputeHash(key_type_ref Key) {
+ return static_cast<size_t>(llvm::hash_value(Key));
+ }
+
+ unsigned getUnversionedInfoSize(const EnumConstantInfo &ECI) {
+ return getCommonEntityInfoSize(ECI);
+ }
+
+ void emitUnversionedInfo(raw_ostream &OS, const EnumConstantInfo &ECI) {
+ emitCommonEntityInfo(OS, ECI);
+ }
+};
+} // namespace
+
+void APINotesWriter::Implementation::writeEnumConstantBlock(
+ llvm::BitstreamWriter &Stream) {
+ llvm::BCBlockRAII Scope(Stream, ENUM_CONSTANT_BLOCK_ID, 3);
+
+ if (EnumConstants.empty())
+ return;
+
+ {
+ llvm::SmallString<4096> HashTableBlob;
+ uint32_t Offset;
+ {
+ llvm::OnDiskChainedHashTableGenerator<EnumConstantTableInfo> Generator;
+ for (auto &EC : EnumConstants)
+ Generator.insert(EC.first, EC.second);
+
+ llvm::raw_svector_ostream BlobStream(HashTableBlob);
+ // Make sure that no bucket is at offset 0
+ llvm::support::endian::write<uint32_t>(BlobStream, 0,
+ llvm::endianness::little);
+ Offset = Generator.Emit(BlobStream);
+ }
+
+ enum_constant_block::EnumConstantDataLayout EnumConstantData(Stream);
+ EnumConstantData.emit(Scratch, Offset, HashTableBlob);
+ }
+}
+
+namespace {
+template <typename Derived, typename UnversionedDataType>
+class CommonTypeTableInfo
+ : public VersionedTableInfo<Derived, ContextTableKey, UnversionedDataType> {
+public:
+ using key_type_ref = typename CommonTypeTableInfo::key_type_ref;
+ using hash_value_type = typename CommonTypeTableInfo::hash_value_type;
+
+ unsigned getKeyLength(key_type_ref) {
+ return sizeof(uint32_t) + sizeof(uint8_t) + sizeof(IdentifierID);
+ }
+
+ void EmitKey(raw_ostream &OS, key_type_ref Key, unsigned) {
+ llvm::support::endian::Writer writer(OS, llvm::endianness::little);
+ writer.write<uint32_t>(Key.parentContextID);
+ writer.write<uint8_t>(Key.contextKind);
+ writer.write<IdentifierID>(Key.contextID);
+ }
+
+ hash_value_type ComputeHash(key_type_ref Key) {
+ return static_cast<size_t>(Key.hashValue());
+ }
+
+ unsigned getUnversionedInfoSize(const UnversionedDataType &UDT) {
+ return getCommonTypeInfoSize(UDT);
+ }
+
+ void emitUnversionedInfo(raw_ostream &OS, const UnversionedDataType &UDT) {
+ emitCommonTypeInfo(OS, UDT);
+ }
+};
+
+/// Used to serialize the on-disk tag table.
+class TagTableInfo : public CommonTypeTableInfo<TagTableInfo, TagInfo> {
+public:
+ unsigned getUnversionedInfoSize(const TagInfo &TI) {
+ return 2 + (TI.SwiftImportAs ? TI.SwiftImportAs->size() : 0) +
+ 2 + (TI.SwiftRetainOp ? TI.SwiftRetainOp->size() : 0) +
+ 2 + (TI.SwiftReleaseOp ? TI.SwiftReleaseOp->size() : 0) +
+ 1 + getCommonTypeInfoSize(TI);
+ }
+
+ void emitUnversionedInfo(raw_ostream &OS, const TagInfo &TI) {
+ llvm::support::endian::Writer writer(OS, llvm::endianness::little);
+
+ uint8_t Flags = 0;
+ if (auto extensibility = TI.EnumExtensibility) {
+ Flags |= static_cast<uint8_t>(extensibility.value()) + 1;
+ assert((Flags < (1 << 2)) && "must fit in two bits");
+ }
+
+ Flags <<= 2;
+ if (auto value = TI.isFlagEnum())
+ Flags |= (value.value() << 1 | 1 << 0);
+
+ writer.write<uint8_t>(Flags);
+
+ if (auto ImportAs = TI.SwiftImportAs) {
+ writer.write<uint16_t>(ImportAs->size() + 1);
+ OS.write(ImportAs->c_str(), ImportAs->size());
+ } else {
+ writer.write<uint16_t>(0);
+ }
+ if (auto RetainOp = TI.SwiftRetainOp) {
+ writer.write<uint16_t>(RetainOp->size() + 1);
+ OS.write(RetainOp->c_str(), RetainOp->size());
+ } else {
+ writer.write<uint16_t>(0);
+ }
+ if (auto ReleaseOp = TI.SwiftReleaseOp) {
+ writer.write<uint16_t>(ReleaseOp->size() + 1);
+ OS.write(ReleaseOp->c_str(), ReleaseOp->size());
+ } else {
+ writer.write<uint16_t>(0);
+ }
+
+ emitCommonTypeInfo(OS, TI);
+ }
+};
+} // namespace
+
+void APINotesWriter::Implementation::writeTagBlock(
+ llvm::BitstreamWriter &Stream) {
+ llvm::BCBlockRAII Scope(Stream, TAG_BLOCK_ID, 3);
+
+ if (Tags.empty())
+ return;
+
+ {
+ llvm::SmallString<4096> HashTableBlob;
+ uint32_t Offset;
+ {
+ llvm::OnDiskChainedHashTableGenerator<TagTableInfo> Generator;
+ for (auto &T : Tags)
+ Generator.insert(T.first, T.second);
+
+ llvm::raw_svector_ostream BlobStream(HashTableBlob);
+ // Make sure that no bucket is at offset 0
+ llvm::support::endian::write<uint32_t>(BlobStream, 0,
+ llvm::endianness::little);
+ Offset = Generator.Emit(BlobStream);
+ }
+
+ tag_block::TagDataLayout TagData(Stream);
+ TagData.emit(Scratch, Offset, HashTableBlob);
+ }
+}
+
+namespace {
+/// Used to serialize the on-disk typedef table.
+class TypedefTableInfo
+ : public CommonTypeTableInfo<TypedefTableInfo, TypedefInfo> {
+public:
+ unsigned getUnversionedInfoSize(const TypedefInfo &TI) {
+ return 1 + getCommonTypeInfoSize(TI);
+ }
+
+ void emitUnversionedInfo(raw_ostream &OS, const TypedefInfo &TI) {
+ llvm::support::endian::Writer writer(OS, llvm::endianness::little);
+
+ uint8_t Flags = 0;
+ if (auto swiftWrapper = TI.SwiftWrapper)
+ Flags |= static_cast<uint8_t>(*swiftWrapper) + 1;
+
+ writer.write<uint8_t>(Flags);
+
+ emitCommonTypeInfo(OS, TI);
+ }
+};
+} // namespace
+
+void APINotesWriter::Implementation::writeTypedefBlock(
+ llvm::BitstreamWriter &Stream) {
+ llvm::BCBlockRAII Scope(Stream, TYPEDEF_BLOCK_ID, 3);
+
+ if (Typedefs.empty())
+ return;
+
+ {
+ llvm::SmallString<4096> HashTableBlob;
+ uint32_t Offset;
+ {
+ llvm::OnDiskChainedHashTableGenerator<TypedefTableInfo> Generator;
+ for (auto &T : Typedefs)
+ Generator.insert(T.first, T.second);
+
+ llvm::raw_svector_ostream BlobStream(HashTableBlob);
+ // Make sure that no bucket is at offset 0
+ llvm::support::endian::write<uint32_t>(BlobStream, 0,
+ llvm::endianness::little);
+ Offset = Generator.Emit(BlobStream);
+ }
+
+ typedef_block::TypedefDataLayout TypedefData(Stream);
+ TypedefData.emit(Scratch, Offset, HashTableBlob);
+ }
+}
+
+// APINotesWriter
+
+APINotesWriter::APINotesWriter(llvm::StringRef ModuleName, const FileEntry *SF)
+ : Implementation(new class Implementation(ModuleName, SF)) {}
+
+APINotesWriter::~APINotesWriter() = default;
+
+void APINotesWriter::writeToStream(llvm::raw_ostream &OS) {
+ Implementation->writeToStream(OS);
+}
+
+ContextID APINotesWriter::addObjCContext(std::optional<ContextID> ParentCtxID,
+ StringRef Name, ContextKind Kind,
+ const ObjCContextInfo &Info,
+ VersionTuple SwiftVersion) {
+ IdentifierID NameID = Implementation->getIdentifier(Name);
+
+ uint32_t RawParentCtxID = ParentCtxID ? ParentCtxID->Value : -1;
+ ContextTableKey Key(RawParentCtxID, static_cast<uint8_t>(Kind), NameID);
+ auto Known = Implementation->ObjCContexts.find(Key);
+ if (Known == Implementation->ObjCContexts.end()) {
+ unsigned NextID = Implementation->ObjCContexts.size() + 1;
+
+ Implementation::VersionedSmallVector<ObjCContextInfo> EmptyVersionedInfo;
+ Known = Implementation->ObjCContexts
+ .insert(std::make_pair(
+ Key, std::make_pair(NextID, EmptyVersionedInfo)))
+ .first;
+
+ Implementation->ObjCContextNames[NextID] = NameID;
+ Implementation->ParentContexts[NextID] = RawParentCtxID;
+ }
+
+ // Add this version information.
+ auto &VersionedVec = Known->second.second;
+ bool Found = false;
+ for (auto &Versioned : VersionedVec) {
+ if (Versioned.first == SwiftVersion) {
+ Versioned.second |= Info;
+ Found = true;
+ break;
+ }
+ }
+
+ if (!Found)
+ VersionedVec.push_back({SwiftVersion, Info});
+
+ return ContextID(Known->second.first);
+}
+
+void APINotesWriter::addObjCProperty(ContextID CtxID, StringRef Name,
+ bool IsInstanceProperty,
+ const ObjCPropertyInfo &Info,
+ VersionTuple SwiftVersion) {
+ IdentifierID NameID = Implementation->getIdentifier(Name);
+ Implementation
+ ->ObjCProperties[std::make_tuple(CtxID.Value, NameID, IsInstanceProperty)]
+ .push_back({SwiftVersion, Info});
+}
+
+void APINotesWriter::addObjCMethod(ContextID CtxID, ObjCSelectorRef Selector,
+ bool IsInstanceMethod,
+ const ObjCMethodInfo &Info,
+ VersionTuple SwiftVersion) {
+ SelectorID SelID = Implementation->getSelector(Selector);
+ auto Key = std::tuple<unsigned, unsigned, char>{CtxID.Value, SelID,
+ IsInstanceMethod};
+ Implementation->ObjCMethods[Key].push_back({SwiftVersion, Info});
+
+ // If this method is a designated initializer, update the class to note that
+ // it has designated initializers.
+ if (Info.DesignatedInit) {
+ assert(Implementation->ParentContexts.contains(CtxID.Value));
+ uint32_t ParentCtxID = Implementation->ParentContexts[CtxID.Value];
+ ContextTableKey CtxKey(ParentCtxID,
+ static_cast<uint8_t>(ContextKind::ObjCClass),
+ Implementation->ObjCContextNames[CtxID.Value]);
+ assert(Implementation->ObjCContexts.contains(CtxKey));
+ auto &VersionedVec = Implementation->ObjCContexts[CtxKey].second;
+ bool Found = false;
+ for (auto &Versioned : VersionedVec) {
+ if (Versioned.first == SwiftVersion) {
+ Versioned.second.setHasDesignatedInits(true);
+ Found = true;
+ break;
+ }
+ }
+
+ if (!Found) {
+ VersionedVec.push_back({SwiftVersion, ObjCContextInfo()});
+ VersionedVec.back().second.setHasDesignatedInits(true);
+ }
+ }
+}
+
+void APINotesWriter::addGlobalVariable(std::optional<Context> Ctx,
+ llvm::StringRef Name,
+ const GlobalVariableInfo &Info,
+ VersionTuple SwiftVersion) {
+ IdentifierID VariableID = Implementation->getIdentifier(Name);
+ ContextTableKey Key(Ctx, VariableID);
+ Implementation->GlobalVariables[Key].push_back({SwiftVersion, Info});
+}
+
+void APINotesWriter::addGlobalFunction(std::optional<Context> Ctx,
+ llvm::StringRef Name,
+ const GlobalFunctionInfo &Info,
+ VersionTuple SwiftVersion) {
+ IdentifierID NameID = Implementation->getIdentifier(Name);
+ ContextTableKey Key(Ctx, NameID);
+ Implementation->GlobalFunctions[Key].push_back({SwiftVersion, Info});
+}
+
+void APINotesWriter::addEnumConstant(llvm::StringRef Name,
+ const EnumConstantInfo &Info,
+ VersionTuple SwiftVersion) {
+ IdentifierID EnumConstantID = Implementation->getIdentifier(Name);
+ Implementation->EnumConstants[EnumConstantID].push_back({SwiftVersion, Info});
+}
+
+void APINotesWriter::addTag(std::optional<Context> Ctx, llvm::StringRef Name,
+ const TagInfo &Info, VersionTuple SwiftVersion) {
+ IdentifierID TagID = Implementation->getIdentifier(Name);
+ ContextTableKey Key(Ctx, TagID);
+ Implementation->Tags[Key].push_back({SwiftVersion, Info});
+}
+
+void APINotesWriter::addTypedef(std::optional<Context> Ctx,
+ llvm::StringRef Name, const TypedefInfo &Info,
+ VersionTuple SwiftVersion) {
+ IdentifierID TypedefID = Implementation->getIdentifier(Name);
+ ContextTableKey Key(Ctx, TypedefID);
+ Implementation->Typedefs[Key].push_back({SwiftVersion, Info});
+}
+} // namespace api_notes
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/APINotes/APINotesYAMLCompiler.cpp b/contrib/llvm-project/clang/lib/APINotes/APINotesYAMLCompiler.cpp
index 75100fde59b8..57d6da7a1775 100644
--- a/contrib/llvm-project/clang/lib/APINotes/APINotesYAMLCompiler.cpp
+++ b/contrib/llvm-project/clang/lib/APINotes/APINotesYAMLCompiler.cpp
@@ -14,22 +14,23 @@
//
#include "clang/APINotes/APINotesYAMLCompiler.h"
+#include "clang/APINotes/APINotesWriter.h"
#include "clang/APINotes/Types.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/Specifiers.h"
-#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/VersionTuple.h"
-#include "llvm/Support/YAMLParser.h"
#include "llvm/Support/YAMLTraits.h"
+#include <optional>
#include <vector>
+
using namespace clang;
using namespace api_notes;
namespace {
enum class APIAvailability {
Available = 0,
- OSX,
- IOS,
None,
NonSwift,
};
@@ -39,8 +40,6 @@ namespace llvm {
namespace yaml {
template <> struct ScalarEnumerationTraits<APIAvailability> {
static void enumeration(IO &IO, APIAvailability &AA) {
- IO.enumCase(AA, "OSX", APIAvailability::OSX);
- IO.enumCase(AA, "iOS", APIAvailability::IOS);
IO.enumCase(AA, "none", APIAvailability::None);
IO.enumCase(AA, "nonswift", APIAvailability::NonSwift);
IO.enumCase(AA, "available", APIAvailability::Available);
@@ -70,9 +69,9 @@ template <> struct ScalarEnumerationTraits<MethodKind> {
namespace {
struct Param {
unsigned Position;
- Optional<bool> NoEscape = false;
- Optional<NullabilityKind> Nullability;
- Optional<RetainCountConventionKind> RetainCountConvention;
+ std::optional<bool> NoEscape = false;
+ std::optional<NullabilityKind> Nullability;
+ std::optional<RetainCountConventionKind> RetainCountConvention;
StringRef Type;
};
@@ -119,7 +118,7 @@ template <> struct ScalarEnumerationTraits<RetainCountConventionKind> {
template <> struct MappingTraits<Param> {
static void mapping(IO &IO, Param &P) {
IO.mapRequired("Position", P.Position);
- IO.mapOptional("Nullability", P.Nullability, llvm::None);
+ IO.mapOptional("Nullability", P.Nullability, std::nullopt);
IO.mapOptional("RetainCountConvention", P.RetainCountConvention);
IO.mapOptional("NoEscape", P.NoEscape);
IO.mapOptional("Type", P.Type, StringRef(""));
@@ -151,10 +150,10 @@ struct Method {
MethodKind Kind;
ParamsSeq Params;
NullabilitySeq Nullability;
- Optional<NullabilityKind> NullabilityOfRet;
- Optional<RetainCountConventionKind> RetainCountConvention;
+ std::optional<NullabilityKind> NullabilityOfRet;
+ std::optional<RetainCountConventionKind> RetainCountConvention;
AvailabilityItem Availability;
- Optional<bool> SwiftPrivate;
+ std::optional<bool> SwiftPrivate;
StringRef SwiftName;
FactoryAsInitKind FactoryAsInit = FactoryAsInitKind::Infer;
bool DesignatedInit = false;
@@ -183,7 +182,7 @@ template <> struct MappingTraits<Method> {
IO.mapRequired("MethodKind", M.Kind);
IO.mapOptional("Parameters", M.Params);
IO.mapOptional("Nullability", M.Nullability);
- IO.mapOptional("NullabilityOfRet", M.NullabilityOfRet, llvm::None);
+ IO.mapOptional("NullabilityOfRet", M.NullabilityOfRet, std::nullopt);
IO.mapOptional("RetainCountConvention", M.RetainCountConvention);
IO.mapOptional("Availability", M.Availability.Mode,
APIAvailability::Available);
@@ -202,12 +201,12 @@ template <> struct MappingTraits<Method> {
namespace {
struct Property {
StringRef Name;
- llvm::Optional<MethodKind> Kind;
- llvm::Optional<NullabilityKind> Nullability;
+ std::optional<MethodKind> Kind;
+ std::optional<NullabilityKind> Nullability;
AvailabilityItem Availability;
- Optional<bool> SwiftPrivate;
+ std::optional<bool> SwiftPrivate;
StringRef SwiftName;
- Optional<bool> SwiftImportAsAccessors;
+ std::optional<bool> SwiftImportAsAccessors;
StringRef Type;
};
@@ -222,7 +221,7 @@ template <> struct MappingTraits<Property> {
static void mapping(IO &IO, Property &P) {
IO.mapRequired("Name", P.Name);
IO.mapOptional("PropertyKind", P.Kind);
- IO.mapOptional("Nullability", P.Nullability, llvm::None);
+ IO.mapOptional("Nullability", P.Nullability, std::nullopt);
IO.mapOptional("Availability", P.Availability.Mode,
APIAvailability::Available);
IO.mapOptional("AvailabilityMsg", P.Availability.Msg, StringRef(""));
@@ -240,12 +239,12 @@ struct Class {
StringRef Name;
bool AuditedForNullability = false;
AvailabilityItem Availability;
- Optional<bool> SwiftPrivate;
+ std::optional<bool> SwiftPrivate;
StringRef SwiftName;
- Optional<StringRef> SwiftBridge;
- Optional<StringRef> NSErrorDomain;
- Optional<bool> SwiftImportAsNonGeneric;
- Optional<bool> SwiftObjCMembers;
+ std::optional<StringRef> SwiftBridge;
+ std::optional<StringRef> NSErrorDomain;
+ std::optional<bool> SwiftImportAsNonGeneric;
+ std::optional<bool> SwiftObjCMembers;
MethodsSeq Methods;
PropertiesSeq Properties;
};
@@ -282,10 +281,10 @@ struct Function {
StringRef Name;
ParamsSeq Params;
NullabilitySeq Nullability;
- Optional<NullabilityKind> NullabilityOfRet;
- Optional<api_notes::RetainCountConventionKind> RetainCountConvention;
+ std::optional<NullabilityKind> NullabilityOfRet;
+ std::optional<api_notes::RetainCountConventionKind> RetainCountConvention;
AvailabilityItem Availability;
- Optional<bool> SwiftPrivate;
+ std::optional<bool> SwiftPrivate;
StringRef SwiftName;
StringRef Type;
StringRef ResultType;
@@ -303,7 +302,7 @@ template <> struct MappingTraits<Function> {
IO.mapRequired("Name", F.Name);
IO.mapOptional("Parameters", F.Params);
IO.mapOptional("Nullability", F.Nullability);
- IO.mapOptional("NullabilityOfRet", F.NullabilityOfRet, llvm::None);
+ IO.mapOptional("NullabilityOfRet", F.NullabilityOfRet, std::nullopt);
IO.mapOptional("RetainCountConvention", F.RetainCountConvention);
IO.mapOptional("Availability", F.Availability.Mode,
APIAvailability::Available);
@@ -319,9 +318,9 @@ template <> struct MappingTraits<Function> {
namespace {
struct GlobalVariable {
StringRef Name;
- llvm::Optional<NullabilityKind> Nullability;
+ std::optional<NullabilityKind> Nullability;
AvailabilityItem Availability;
- Optional<bool> SwiftPrivate;
+ std::optional<bool> SwiftPrivate;
StringRef SwiftName;
StringRef Type;
};
@@ -336,7 +335,7 @@ namespace yaml {
template <> struct MappingTraits<GlobalVariable> {
static void mapping(IO &IO, GlobalVariable &GV) {
IO.mapRequired("Name", GV.Name);
- IO.mapOptional("Nullability", GV.Nullability, llvm::None);
+ IO.mapOptional("Nullability", GV.Nullability, std::nullopt);
IO.mapOptional("Availability", GV.Availability.Mode,
APIAvailability::Available);
IO.mapOptional("AvailabilityMsg", GV.Availability.Msg, StringRef(""));
@@ -352,7 +351,7 @@ namespace {
struct EnumConstant {
StringRef Name;
AvailabilityItem Availability;
- Optional<bool> SwiftPrivate;
+ std::optional<bool> SwiftPrivate;
StringRef SwiftName;
};
@@ -411,12 +410,15 @@ struct Tag {
StringRef Name;
AvailabilityItem Availability;
StringRef SwiftName;
- Optional<bool> SwiftPrivate;
- Optional<StringRef> SwiftBridge;
- Optional<StringRef> NSErrorDomain;
- Optional<EnumExtensibilityKind> EnumExtensibility;
- Optional<bool> FlagEnum;
- Optional<EnumConvenienceAliasKind> EnumConvenienceKind;
+ std::optional<bool> SwiftPrivate;
+ std::optional<StringRef> SwiftBridge;
+ std::optional<StringRef> NSErrorDomain;
+ std::optional<std::string> SwiftImportAs;
+ std::optional<std::string> SwiftRetainOp;
+ std::optional<std::string> SwiftReleaseOp;
+ std::optional<EnumExtensibilityKind> EnumExtensibility;
+ std::optional<bool> FlagEnum;
+ std::optional<EnumConvenienceAliasKind> EnumConvenienceKind;
};
typedef std::vector<Tag> TagsSeq;
@@ -444,6 +446,9 @@ template <> struct MappingTraits<Tag> {
IO.mapOptional("SwiftName", T.SwiftName, StringRef(""));
IO.mapOptional("SwiftBridge", T.SwiftBridge);
IO.mapOptional("NSErrorDomain", T.NSErrorDomain);
+ IO.mapOptional("SwiftImportAs", T.SwiftImportAs);
+ IO.mapOptional("SwiftReleaseOp", T.SwiftReleaseOp);
+ IO.mapOptional("SwiftRetainOp", T.SwiftRetainOp);
IO.mapOptional("EnumExtensibility", T.EnumExtensibility);
IO.mapOptional("FlagEnum", T.FlagEnum);
IO.mapOptional("EnumKind", T.EnumConvenienceKind);
@@ -457,10 +462,10 @@ struct Typedef {
StringRef Name;
AvailabilityItem Availability;
StringRef SwiftName;
- Optional<bool> SwiftPrivate;
- Optional<StringRef> SwiftBridge;
- Optional<StringRef> NSErrorDomain;
- Optional<SwiftNewTypeKind> SwiftType;
+ std::optional<bool> SwiftPrivate;
+ std::optional<StringRef> SwiftBridge;
+ std::optional<StringRef> NSErrorDomain;
+ std::optional<SwiftNewTypeKind> SwiftType;
};
typedef std::vector<Typedef> TypedefsSeq;
@@ -495,6 +500,9 @@ template <> struct MappingTraits<Typedef> {
} // namespace llvm
namespace {
+struct Namespace;
+typedef std::vector<Namespace> NamespacesSeq;
+
struct TopLevelItems {
ClassesSeq Classes;
ClassesSeq Protocols;
@@ -503,6 +511,7 @@ struct TopLevelItems {
EnumConstantsSeq EnumConstants;
TagsSeq Tags;
TypedefsSeq Typedefs;
+ NamespacesSeq Namespaces;
};
} // namespace
@@ -516,11 +525,40 @@ static void mapTopLevelItems(IO &IO, TopLevelItems &TLI) {
IO.mapOptional("Enumerators", TLI.EnumConstants);
IO.mapOptional("Tags", TLI.Tags);
IO.mapOptional("Typedefs", TLI.Typedefs);
+ IO.mapOptional("Namespaces", TLI.Namespaces);
}
} // namespace yaml
} // namespace llvm
namespace {
+struct Namespace {
+ StringRef Name;
+ AvailabilityItem Availability;
+ StringRef SwiftName;
+ std::optional<bool> SwiftPrivate;
+ TopLevelItems Items;
+};
+} // namespace
+
+LLVM_YAML_IS_SEQUENCE_VECTOR(Namespace)
+
+namespace llvm {
+namespace yaml {
+template <> struct MappingTraits<Namespace> {
+ static void mapping(IO &IO, Namespace &T) {
+ IO.mapRequired("Name", T.Name);
+ IO.mapOptional("Availability", T.Availability.Mode,
+ APIAvailability::Available);
+ IO.mapOptional("AvailabilityMsg", T.Availability.Msg, StringRef(""));
+ IO.mapOptional("SwiftPrivate", T.SwiftPrivate);
+ IO.mapOptional("SwiftName", T.SwiftName, StringRef(""));
+ mapTopLevelItems(IO, T.Items);
+ }
+};
+} // namespace yaml
+} // namespace llvm
+
+namespace {
struct Versioned {
VersionTuple Version;
TopLevelItems Items;
@@ -549,7 +587,7 @@ struct Module {
TopLevelItems TopLevel;
VersionedSeq SwiftVersions;
- llvm::Optional<bool> SwiftInferImportAsMember = {llvm::None};
+ std::optional<bool> SwiftInferImportAsMember;
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
LLVM_DUMP_METHOD void dump() /*const*/;
@@ -600,3 +638,476 @@ bool clang::api_notes::parseAndDumpAPINotes(StringRef YI,
return false;
}
+
+namespace {
+using namespace api_notes;
+
+class YAMLConverter {
+ const Module &M;
+ APINotesWriter Writer;
+ llvm::raw_ostream &OS;
+ llvm::SourceMgr::DiagHandlerTy DiagHandler;
+ void *DiagHandlerCtxt;
+ bool ErrorOccured;
+
+ /// Emit a diagnostic
+ bool emitError(llvm::Twine Message) {
+ DiagHandler(
+ llvm::SMDiagnostic("", llvm::SourceMgr::DK_Error, Message.str()),
+ DiagHandlerCtxt);
+ ErrorOccured = true;
+ return true;
+ }
+
+public:
+ YAMLConverter(const Module &TheModule, const FileEntry *SourceFile,
+ llvm::raw_ostream &OS,
+ llvm::SourceMgr::DiagHandlerTy DiagHandler,
+ void *DiagHandlerCtxt)
+ : M(TheModule), Writer(TheModule.Name, SourceFile), OS(OS),
+ DiagHandler(DiagHandler), DiagHandlerCtxt(DiagHandlerCtxt),
+ ErrorOccured(false) {}
+
+ void convertAvailability(const AvailabilityItem &Availability,
+ CommonEntityInfo &CEI, llvm::StringRef APIName) {
+ // Populate the unavailability information.
+ CEI.Unavailable = (Availability.Mode == APIAvailability::None);
+ CEI.UnavailableInSwift = (Availability.Mode == APIAvailability::NonSwift);
+ if (CEI.Unavailable || CEI.UnavailableInSwift) {
+ CEI.UnavailableMsg = std::string(Availability.Msg);
+ } else {
+ if (!Availability.Msg.empty())
+ emitError(llvm::Twine("availability message for available API '") +
+ APIName + "' will not be used");
+ }
+ }
+
+ void convertParams(const ParamsSeq &Params, FunctionInfo &OutInfo) {
+ for (const auto &P : Params) {
+ ParamInfo PI;
+ if (P.Nullability)
+ PI.setNullabilityAudited(*P.Nullability);
+ PI.setNoEscape(P.NoEscape);
+ PI.setType(std::string(P.Type));
+ PI.setRetainCountConvention(P.RetainCountConvention);
+ if (OutInfo.Params.size() <= P.Position)
+ OutInfo.Params.resize(P.Position + 1);
+ OutInfo.Params[P.Position] |= PI;
+ }
+ }
+
+ void convertNullability(const NullabilitySeq &Nullability,
+ std::optional<NullabilityKind> ReturnNullability,
+ FunctionInfo &OutInfo, llvm::StringRef APIName) {
+ if (Nullability.size() > FunctionInfo::getMaxNullabilityIndex()) {
+ emitError(llvm::Twine("nullability info for '") + APIName +
+ "' does not fit");
+ return;
+ }
+
+ bool audited = false;
+ unsigned int idx = 1;
+ for (const auto &N : Nullability)
+ OutInfo.addTypeInfo(idx++, N);
+ audited = Nullability.size() > 0 || ReturnNullability;
+ if (audited)
+ OutInfo.addTypeInfo(0, ReturnNullability ? *ReturnNullability
+ : NullabilityKind::NonNull);
+ if (!audited)
+ return;
+ OutInfo.NullabilityAudited = audited;
+ OutInfo.NumAdjustedNullable = idx;
+ }
+
+ /// Convert the common parts of an entity from YAML.
+ template <typename T>
+ void convertCommonEntity(const T &Common, CommonEntityInfo &Info,
+ StringRef APIName) {
+ convertAvailability(Common.Availability, Info, APIName);
+ Info.setSwiftPrivate(Common.SwiftPrivate);
+ Info.SwiftName = std::string(Common.SwiftName);
+ }
+
+ /// Convert the common parts of a type entity from YAML.
+ template <typename T>
+ void convertCommonType(const T &Common, CommonTypeInfo &Info,
+ StringRef APIName) {
+ convertCommonEntity(Common, Info, APIName);
+ if (Common.SwiftBridge)
+ Info.setSwiftBridge(std::string(*Common.SwiftBridge));
+ Info.setNSErrorDomain(Common.NSErrorDomain);
+ }
+
+ // Translate from Method into ObjCMethodInfo and write it out.
+ void convertMethod(const Method &M, ContextID ClassID, StringRef ClassName,
+ VersionTuple SwiftVersion) {
+ ObjCMethodInfo MI;
+ convertCommonEntity(M, MI, M.Selector);
+
+ // Check if the selector ends with ':' to determine if it takes arguments.
+ bool takesArguments = M.Selector.ends_with(":");
+
+ // Split the selector into pieces.
+ llvm::SmallVector<StringRef, 4> Args;
+ M.Selector.split(Args, ":", /*MaxSplit*/ -1, /*KeepEmpty*/ false);
+ if (!takesArguments && Args.size() > 1) {
+ emitError("selector '" + M.Selector + "' is missing a ':' at the end");
+ return;
+ }
+
+ // Construct ObjCSelectorRef.
+ api_notes::ObjCSelectorRef Selector;
+ Selector.NumArgs = !takesArguments ? 0 : Args.size();
+ Selector.Identifiers = Args;
+
+ // Translate the initializer info.
+ MI.DesignatedInit = M.DesignatedInit;
+ MI.RequiredInit = M.Required;
+ if (M.FactoryAsInit != FactoryAsInitKind::Infer)
+ emitError("'FactoryAsInit' is no longer valid; use 'SwiftName' instead");
+
+ MI.ResultType = std::string(M.ResultType);
+
+ // Translate parameter information.
+ convertParams(M.Params, MI);
+
+ // Translate nullability info.
+ convertNullability(M.Nullability, M.NullabilityOfRet, MI, M.Selector);
+
+ MI.setRetainCountConvention(M.RetainCountConvention);
+
+ // Write it.
+ Writer.addObjCMethod(ClassID, Selector, M.Kind == MethodKind::Instance, MI,
+ SwiftVersion);
+ }
+
+ void convertContext(std::optional<ContextID> ParentContextID, const Class &C,
+ ContextKind Kind, VersionTuple SwiftVersion) {
+ // Write the class.
+ ObjCContextInfo CI;
+ convertCommonType(C, CI, C.Name);
+
+ if (C.AuditedForNullability)
+ CI.setDefaultNullability(NullabilityKind::NonNull);
+ if (C.SwiftImportAsNonGeneric)
+ CI.setSwiftImportAsNonGeneric(*C.SwiftImportAsNonGeneric);
+ if (C.SwiftObjCMembers)
+ CI.setSwiftObjCMembers(*C.SwiftObjCMembers);
+
+ ContextID CtxID =
+ Writer.addObjCContext(ParentContextID, C.Name, Kind, CI, SwiftVersion);
+
+ // Write all methods.
+ llvm::StringMap<std::pair<bool, bool>> KnownMethods;
+ for (const auto &method : C.Methods) {
+ // Check for duplicate method definitions.
+ bool IsInstanceMethod = method.Kind == MethodKind::Instance;
+ bool &Known = IsInstanceMethod ? KnownMethods[method.Selector].first
+ : KnownMethods[method.Selector].second;
+ if (Known) {
+ emitError(llvm::Twine("duplicate definition of method '") +
+ (IsInstanceMethod ? "-" : "+") + "[" + C.Name + " " +
+ method.Selector + "]'");
+ continue;
+ }
+ Known = true;
+
+ convertMethod(method, CtxID, C.Name, SwiftVersion);
+ }
+
+ // Write all properties.
+ llvm::StringSet<> KnownInstanceProperties;
+ llvm::StringSet<> KnownClassProperties;
+ for (const auto &Property : C.Properties) {
+ // Check for duplicate property definitions.
+ if ((!Property.Kind || *Property.Kind == MethodKind::Instance) &&
+ !KnownInstanceProperties.insert(Property.Name).second) {
+ emitError(llvm::Twine("duplicate definition of instance property '") +
+ C.Name + "." + Property.Name + "'");
+ continue;
+ }
+
+ if ((!Property.Kind || *Property.Kind == MethodKind::Class) &&
+ !KnownClassProperties.insert(Property.Name).second) {
+ emitError(llvm::Twine("duplicate definition of class property '") +
+ C.Name + "." + Property.Name + "'");
+ continue;
+ }
+
+ // Translate from Property into ObjCPropertyInfo.
+ ObjCPropertyInfo PI;
+ convertAvailability(Property.Availability, PI, Property.Name);
+ PI.setSwiftPrivate(Property.SwiftPrivate);
+ PI.SwiftName = std::string(Property.SwiftName);
+ if (Property.Nullability)
+ PI.setNullabilityAudited(*Property.Nullability);
+ if (Property.SwiftImportAsAccessors)
+ PI.setSwiftImportAsAccessors(*Property.SwiftImportAsAccessors);
+ PI.setType(std::string(Property.Type));
+
+ // Add both instance and class properties with this name.
+ if (Property.Kind) {
+ Writer.addObjCProperty(CtxID, Property.Name,
+ *Property.Kind == MethodKind::Instance, PI,
+ SwiftVersion);
+ } else {
+ Writer.addObjCProperty(CtxID, Property.Name, true, PI, SwiftVersion);
+ Writer.addObjCProperty(CtxID, Property.Name, false, PI, SwiftVersion);
+ }
+ }
+ }
+
+ void convertNamespaceContext(std::optional<ContextID> ParentContextID,
+ const Namespace &TheNamespace,
+ VersionTuple SwiftVersion) {
+ // Write the namespace.
+ ObjCContextInfo CI;
+ convertCommonEntity(TheNamespace, CI, TheNamespace.Name);
+
+ ContextID CtxID =
+ Writer.addObjCContext(ParentContextID, TheNamespace.Name,
+ ContextKind::Namespace, CI, SwiftVersion);
+
+ convertTopLevelItems(Context(CtxID, ContextKind::Namespace),
+ TheNamespace.Items, SwiftVersion);
+ }
+
+ void convertTopLevelItems(std::optional<Context> Ctx,
+ const TopLevelItems &TLItems,
+ VersionTuple SwiftVersion) {
+ std::optional<ContextID> CtxID =
+ Ctx ? std::optional(Ctx->id) : std::nullopt;
+
+ // Write all classes.
+ llvm::StringSet<> KnownClasses;
+ for (const auto &Class : TLItems.Classes) {
+ // Check for duplicate class definitions.
+ if (!KnownClasses.insert(Class.Name).second) {
+ emitError(llvm::Twine("multiple definitions of class '") + Class.Name +
+ "'");
+ continue;
+ }
+
+ convertContext(CtxID, Class, ContextKind::ObjCClass, SwiftVersion);
+ }
+
+ // Write all protocols.
+ llvm::StringSet<> KnownProtocols;
+ for (const auto &Protocol : TLItems.Protocols) {
+ // Check for duplicate protocol definitions.
+ if (!KnownProtocols.insert(Protocol.Name).second) {
+ emitError(llvm::Twine("multiple definitions of protocol '") +
+ Protocol.Name + "'");
+ continue;
+ }
+
+ convertContext(CtxID, Protocol, ContextKind::ObjCProtocol, SwiftVersion);
+ }
+
+ // Write all namespaces.
+ llvm::StringSet<> KnownNamespaces;
+ for (const auto &Namespace : TLItems.Namespaces) {
+ // Check for duplicate namespace definitions.
+ if (!KnownNamespaces.insert(Namespace.Name).second) {
+ emitError(llvm::Twine("multiple definitions of namespace '") +
+ Namespace.Name + "'");
+ continue;
+ }
+
+ convertNamespaceContext(CtxID, Namespace, SwiftVersion);
+ }
+
+ // Write all global variables.
+ llvm::StringSet<> KnownGlobals;
+ for (const auto &Global : TLItems.Globals) {
+ // Check for duplicate global variables.
+ if (!KnownGlobals.insert(Global.Name).second) {
+ emitError(llvm::Twine("multiple definitions of global variable '") +
+ Global.Name + "'");
+ continue;
+ }
+
+ GlobalVariableInfo GVI;
+ convertAvailability(Global.Availability, GVI, Global.Name);
+ GVI.setSwiftPrivate(Global.SwiftPrivate);
+ GVI.SwiftName = std::string(Global.SwiftName);
+ if (Global.Nullability)
+ GVI.setNullabilityAudited(*Global.Nullability);
+ GVI.setType(std::string(Global.Type));
+ Writer.addGlobalVariable(Ctx, Global.Name, GVI, SwiftVersion);
+ }
+
+ // Write all global functions.
+ llvm::StringSet<> KnownFunctions;
+ for (const auto &Function : TLItems.Functions) {
+ // Check for duplicate global functions.
+ if (!KnownFunctions.insert(Function.Name).second) {
+ emitError(llvm::Twine("multiple definitions of global function '") +
+ Function.Name + "'");
+ continue;
+ }
+
+ GlobalFunctionInfo GFI;
+ convertAvailability(Function.Availability, GFI, Function.Name);
+ GFI.setSwiftPrivate(Function.SwiftPrivate);
+ GFI.SwiftName = std::string(Function.SwiftName);
+ convertParams(Function.Params, GFI);
+ convertNullability(Function.Nullability, Function.NullabilityOfRet, GFI,
+ Function.Name);
+ GFI.ResultType = std::string(Function.ResultType);
+ GFI.setRetainCountConvention(Function.RetainCountConvention);
+ Writer.addGlobalFunction(Ctx, Function.Name, GFI, SwiftVersion);
+ }
+
+ // Write all enumerators.
+ llvm::StringSet<> KnownEnumConstants;
+ for (const auto &EnumConstant : TLItems.EnumConstants) {
+ // Check for duplicate enumerators
+ if (!KnownEnumConstants.insert(EnumConstant.Name).second) {
+ emitError(llvm::Twine("multiple definitions of enumerator '") +
+ EnumConstant.Name + "'");
+ continue;
+ }
+
+ EnumConstantInfo ECI;
+ convertAvailability(EnumConstant.Availability, ECI, EnumConstant.Name);
+ ECI.setSwiftPrivate(EnumConstant.SwiftPrivate);
+ ECI.SwiftName = std::string(EnumConstant.SwiftName);
+ Writer.addEnumConstant(EnumConstant.Name, ECI, SwiftVersion);
+ }
+
+ // Write all tags.
+ llvm::StringSet<> KnownTags;
+ for (const auto &Tag : TLItems.Tags) {
+ // Check for duplicate tag definitions.
+ if (!KnownTags.insert(Tag.Name).second) {
+ emitError(llvm::Twine("multiple definitions of tag '") + Tag.Name +
+ "'");
+ continue;
+ }
+
+ TagInfo TI;
+ convertCommonType(Tag, TI, Tag.Name);
+
+ if ((Tag.SwiftRetainOp || Tag.SwiftReleaseOp) && !Tag.SwiftImportAs) {
+ emitError(llvm::Twine("should declare SwiftImportAs to use "
+ "SwiftRetainOp and SwiftReleaseOp (for ") +
+ Tag.Name + ")");
+ continue;
+ }
+ if (Tag.SwiftReleaseOp.has_value() != Tag.SwiftRetainOp.has_value()) {
+ emitError(llvm::Twine("should declare both SwiftReleaseOp and "
+ "SwiftRetainOp (for ") +
+ Tag.Name + ")");
+ continue;
+ }
+
+ if (Tag.SwiftImportAs)
+ TI.SwiftImportAs = Tag.SwiftImportAs;
+ if (Tag.SwiftRetainOp)
+ TI.SwiftRetainOp = Tag.SwiftRetainOp;
+ if (Tag.SwiftReleaseOp)
+ TI.SwiftReleaseOp = Tag.SwiftReleaseOp;
+
+ if (Tag.EnumConvenienceKind) {
+ if (Tag.EnumExtensibility) {
+ emitError(
+ llvm::Twine("cannot mix EnumKind and EnumExtensibility (for ") +
+ Tag.Name + ")");
+ continue;
+ }
+ if (Tag.FlagEnum) {
+ emitError(llvm::Twine("cannot mix EnumKind and FlagEnum (for ") +
+ Tag.Name + ")");
+ continue;
+ }
+ switch (*Tag.EnumConvenienceKind) {
+ case EnumConvenienceAliasKind::None:
+ TI.EnumExtensibility = EnumExtensibilityKind::None;
+ TI.setFlagEnum(false);
+ break;
+ case EnumConvenienceAliasKind::CFEnum:
+ TI.EnumExtensibility = EnumExtensibilityKind::Open;
+ TI.setFlagEnum(false);
+ break;
+ case EnumConvenienceAliasKind::CFOptions:
+ TI.EnumExtensibility = EnumExtensibilityKind::Open;
+ TI.setFlagEnum(true);
+ break;
+ case EnumConvenienceAliasKind::CFClosedEnum:
+ TI.EnumExtensibility = EnumExtensibilityKind::Closed;
+ TI.setFlagEnum(false);
+ break;
+ }
+ } else {
+ TI.EnumExtensibility = Tag.EnumExtensibility;
+ TI.setFlagEnum(Tag.FlagEnum);
+ }
+
+ Writer.addTag(Ctx, Tag.Name, TI, SwiftVersion);
+ }
+
+ // Write all typedefs.
+ llvm::StringSet<> KnownTypedefs;
+ for (const auto &Typedef : TLItems.Typedefs) {
+ // Check for duplicate typedef definitions.
+ if (!KnownTypedefs.insert(Typedef.Name).second) {
+ emitError(llvm::Twine("multiple definitions of typedef '") +
+ Typedef.Name + "'");
+ continue;
+ }
+
+ TypedefInfo TInfo;
+ convertCommonType(Typedef, TInfo, Typedef.Name);
+ TInfo.SwiftWrapper = Typedef.SwiftType;
+
+ Writer.addTypedef(Ctx, Typedef.Name, TInfo, SwiftVersion);
+ }
+ }
+
+ bool convertModule() {
+ // Write the top-level items.
+ convertTopLevelItems(/* context */ std::nullopt, M.TopLevel,
+ VersionTuple());
+
+ // Convert the versioned information.
+ for (const auto &Versioned : M.SwiftVersions)
+ convertTopLevelItems(/* context */ std::nullopt, Versioned.Items,
+ Versioned.Version);
+
+ if (!ErrorOccured)
+ Writer.writeToStream(OS);
+
+ return ErrorOccured;
+ }
+};
+} // namespace
+
+static bool compile(const Module &M, const FileEntry *SourceFile,
+ llvm::raw_ostream &OS,
+ llvm::SourceMgr::DiagHandlerTy DiagHandler,
+ void *DiagHandlerCtxt) {
+ YAMLConverter C(M, SourceFile, OS, DiagHandler, DiagHandlerCtxt);
+ return C.convertModule();
+}
+
+/// Simple diagnostic handler that prints diagnostics to standard error.
+static void printDiagnostic(const llvm::SMDiagnostic &Diag, void *Context) {
+ Diag.print(nullptr, llvm::errs());
+}
+
+bool api_notes::compileAPINotes(StringRef YAMLInput,
+ const FileEntry *SourceFile,
+ llvm::raw_ostream &OS,
+ llvm::SourceMgr::DiagHandlerTy DiagHandler,
+ void *DiagHandlerCtxt) {
+ Module TheModule;
+
+ if (!DiagHandler)
+ DiagHandler = &printDiagnostic;
+
+ if (parseAPINotes(YAMLInput, TheModule, DiagHandler, DiagHandlerCtxt))
+ return true;
+
+ return compile(TheModule, SourceFile, OS, DiagHandler, DiagHandlerCtxt);
+}
diff --git a/contrib/llvm-project/clang/lib/ARCMigrate/ARCMT.cpp b/contrib/llvm-project/clang/lib/ARCMigrate/ARCMT.cpp
index 36fbe90e1e3a..b410d5f3b42a 100644
--- a/contrib/llvm-project/clang/lib/ARCMigrate/ARCMT.cpp
+++ b/contrib/llvm-project/clang/lib/ARCMigrate/ARCMT.cpp
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#include "Internals.h"
#include "clang/ARCMigrate/ARCMT.h"
+#include "Internals.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/Basic/DiagnosticCategories.h"
#include "clang/Frontend/ASTUnit.h"
@@ -20,8 +20,8 @@
#include "clang/Rewrite/Core/Rewriter.h"
#include "clang/Sema/SemaDiagnostic.h"
#include "clang/Serialization/ASTReader.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/TargetParser/Triple.h"
#include <utility>
using namespace clang;
using namespace arcmt;
@@ -65,7 +65,7 @@ bool CapturedDiagList::hasDiagnostic(ArrayRef<unsigned> IDs,
while (I != List.end()) {
FullSourceLoc diagLoc = I->getLocation();
if ((IDs.empty() || // empty means any diagnostic in the range.
- llvm::find(IDs, I->getID()) != IDs.end()) &&
+ llvm::is_contained(IDs, I->getID())) &&
!diagLoc.isBeforeInTranslationUnitThan(range.getBegin()) &&
(diagLoc == range.getEnd() ||
diagLoc.isBeforeInTranslationUnitThan(range.getEnd()))) {
@@ -162,9 +162,7 @@ static bool HasARCRuntime(CompilerInvocation &origCI) {
return triple.getOSMajorVersion() >= 11;
if (triple.getOS() == llvm::Triple::MacOSX) {
- unsigned Major, Minor, Micro;
- triple.getOSVersion(Major, Minor, Micro);
- return Major > 10 || (Major == 10 && Minor >= 7);
+ return triple.getOSVersion() >= VersionTuple(10, 7);
}
return false;
@@ -193,8 +191,8 @@ createInvocationForMigration(CompilerInvocation &origCI,
std::string define = std::string(getARCMTMacroName());
define += '=';
CInvok->getPreprocessorOpts().addMacroDef(define);
- CInvok->getLangOpts()->ObjCAutoRefCount = true;
- CInvok->getLangOpts()->setGC(LangOptions::NonGC);
+ CInvok->getLangOpts().ObjCAutoRefCount = true;
+ CInvok->getLangOpts().setGC(LangOptions::NonGC);
CInvok->getDiagnosticOpts().ErrorLimit = 0;
CInvok->getDiagnosticOpts().PedanticErrors = 0;
@@ -203,14 +201,14 @@ createInvocationForMigration(CompilerInvocation &origCI,
for (std::vector<std::string>::iterator
I = CInvok->getDiagnosticOpts().Warnings.begin(),
E = CInvok->getDiagnosticOpts().Warnings.end(); I != E; ++I) {
- if (!StringRef(*I).startswith("error"))
+ if (!StringRef(*I).starts_with("error"))
WarnOpts.push_back(*I);
}
WarnOpts.push_back("error=arc-unsafe-retained-assign");
CInvok->getDiagnosticOpts().Warnings = std::move(WarnOpts);
- CInvok->getLangOpts()->ObjCWeakRuntime = HasARCRuntime(origCI);
- CInvok->getLangOpts()->ObjCWeak = CInvok->getLangOpts()->ObjCWeakRuntime;
+ CInvok->getLangOpts().ObjCWeakRuntime = HasARCRuntime(origCI);
+ CInvok->getLangOpts().ObjCWeak = CInvok->getLangOpts().ObjCWeakRuntime;
return CInvok.release();
}
@@ -239,10 +237,10 @@ bool arcmt::checkForManualIssues(
std::shared_ptr<PCHContainerOperations> PCHContainerOps,
DiagnosticConsumer *DiagClient, bool emitPremigrationARCErrors,
StringRef plistOut) {
- if (!origCI.getLangOpts()->ObjC)
+ if (!origCI.getLangOpts().ObjC)
return false;
- LangOptions::GCMode OrigGCMode = origCI.getLangOpts()->getGC();
+ LangOptions::GCMode OrigGCMode = origCI.getLangOpts().getGC();
bool NoNSAllocReallocError = origCI.getMigratorOpts().NoNSAllocReallocError;
bool NoFinalizeRemoval = origCI.getMigratorOpts().NoFinalizeRemoval;
@@ -340,10 +338,10 @@ applyTransforms(CompilerInvocation &origCI, const FrontendInputFile &Input,
std::shared_ptr<PCHContainerOperations> PCHContainerOps,
DiagnosticConsumer *DiagClient, StringRef outputDir,
bool emitPremigrationARCErrors, StringRef plistOut) {
- if (!origCI.getLangOpts()->ObjC)
+ if (!origCI.getLangOpts().ObjC)
return false;
- LangOptions::GCMode OrigGCMode = origCI.getLangOpts()->getGC();
+ LangOptions::GCMode OrigGCMode = origCI.getLangOpts().getGC();
// Make sure checking is successful first.
CompilerInvocation CInvokForCheck(origCI);
@@ -374,7 +372,7 @@ applyTransforms(CompilerInvocation &origCI, const FrontendInputFile &Input,
DiagClient, /*ShouldOwnClient=*/false));
if (outputDir.empty()) {
- origCI.getLangOpts()->ObjCAutoRefCount = true;
+ origCI.getLangOpts().ObjCAutoRefCount = true;
return migration.getRemapper().overwriteOriginal(*Diags);
} else {
return migration.getRemapper().flushToDisk(outputDir, *Diags);
@@ -507,7 +505,7 @@ public:
MigrationProcess::RewriteListener::~RewriteListener() { }
MigrationProcess::MigrationProcess(
- const CompilerInvocation &CI,
+ CompilerInvocation &CI,
std::shared_ptr<PCHContainerOperations> PCHContainerOps,
DiagnosticConsumer *diagClient, StringRef outputDir)
: OrigCI(CI), PCHContainerOps(std::move(PCHContainerOps)),
@@ -579,7 +577,7 @@ bool MigrationProcess::applyTransform(TransformFn trans,
Rewriter rewriter(Ctx.getSourceManager(), Ctx.getLangOpts());
TransformActions TA(*Diags, capturedDiags, Ctx, Unit->getPreprocessor());
- MigrationPass pass(Ctx, OrigCI.getLangOpts()->getGC(),
+ MigrationPass pass(Ctx, OrigCI.getLangOpts().getGC(),
Unit->getSema(), TA, capturedDiags, ARCMTMacroLocs);
trans(pass);
@@ -599,7 +597,8 @@ bool MigrationProcess::applyTransform(TransformFn trans,
I = rewriter.buffer_begin(), E = rewriter.buffer_end(); I != E; ++I) {
FileID FID = I->first;
RewriteBuffer &buf = I->second;
- const FileEntry *file = Ctx.getSourceManager().getFileEntryForID(FID);
+ OptionalFileEntryRef file =
+ Ctx.getSourceManager().getFileEntryRefForID(FID);
assert(file);
std::string newFname = std::string(file->getName());
newFname += "-trans";
diff --git a/contrib/llvm-project/clang/lib/ARCMigrate/ARCMTActions.cpp b/contrib/llvm-project/clang/lib/ARCMigrate/ARCMTActions.cpp
index d72f53806e37..0805d90d25aa 100644
--- a/contrib/llvm-project/clang/lib/ARCMigrate/ARCMTActions.cpp
+++ b/contrib/llvm-project/clang/lib/ARCMigrate/ARCMTActions.cpp
@@ -39,7 +39,7 @@ ModifyAction::ModifyAction(std::unique_ptr<FrontendAction> WrappedAction)
bool MigrateAction::BeginInvocation(CompilerInstance &CI) {
if (arcmt::migrateWithTemporaryFiles(
CI.getInvocation(), getCurrentInput(), CI.getPCHContainerOperations(),
- CI.getDiagnostics().getClient(), MigrateDir, EmitPremigrationARCErros,
+ CI.getDiagnostics().getClient(), MigrateDir, EmitPremigrationARCErrors,
PlistOut))
return false; // errors, stop the action.
@@ -53,7 +53,7 @@ MigrateAction::MigrateAction(std::unique_ptr<FrontendAction> WrappedAction,
StringRef plistOut,
bool emitPremigrationARCErrors)
: WrapperFrontendAction(std::move(WrappedAction)), MigrateDir(migrateDir),
- PlistOut(plistOut), EmitPremigrationARCErros(emitPremigrationARCErrors) {
+ PlistOut(plistOut), EmitPremigrationARCErrors(emitPremigrationARCErrors) {
if (MigrateDir.empty())
MigrateDir = "."; // user current directory if none is given.
}
diff --git a/contrib/llvm-project/clang/lib/ARCMigrate/FileRemapper.cpp b/contrib/llvm-project/clang/lib/ARCMigrate/FileRemapper.cpp
index 92027fe4f1f4..84024c3bafdc 100644
--- a/contrib/llvm-project/clang/lib/ARCMigrate/FileRemapper.cpp
+++ b/contrib/llvm-project/clang/lib/ARCMigrate/FileRemapper.cpp
@@ -43,7 +43,7 @@ std::string FileRemapper::getRemapInfoFile(StringRef outputDir) {
assert(!outputDir.empty());
SmallString<128> InfoFile = outputDir;
llvm::sys::path::append(InfoFile, "remap");
- return std::string(InfoFile.str());
+ return std::string(InfoFile);
}
bool FileRemapper::initFromDisk(StringRef outputDir, DiagnosticsEngine &Diag,
@@ -60,7 +60,7 @@ bool FileRemapper::initFromFile(StringRef filePath, DiagnosticsEngine &Diag,
if (!llvm::sys::fs::exists(infoFile))
return false;
- std::vector<std::pair<const FileEntry *, const FileEntry *> > pairs;
+ std::vector<std::pair<FileEntryRef, FileEntryRef>> pairs;
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> fileBuf =
llvm::MemoryBuffer::getFile(infoFile, /*IsText=*/true);
@@ -78,20 +78,20 @@ bool FileRemapper::initFromFile(StringRef filePath, DiagnosticsEngine &Diag,
Diag);
StringRef toFilename = lines[idx+2];
- llvm::ErrorOr<const FileEntry *> origFE = FileMgr->getFile(fromFilename);
+ auto origFE = FileMgr->getOptionalFileRef(fromFilename);
if (!origFE) {
if (ignoreIfFilesChanged)
continue;
return report("File does not exist: " + fromFilename, Diag);
}
- llvm::ErrorOr<const FileEntry *> newFE = FileMgr->getFile(toFilename);
+ auto newFE = FileMgr->getOptionalFileRef(toFilename);
if (!newFE) {
if (ignoreIfFilesChanged)
continue;
return report("File does not exist: " + toFilename, Diag);
}
- if ((uint64_t)(*origFE)->getModificationTime() != timeModified) {
+ if ((uint64_t)origFE->getModificationTime() != timeModified) {
if (ignoreIfFilesChanged)
continue;
return report("File was modified: " + fromFilename, Diag);
@@ -128,13 +128,13 @@ bool FileRemapper::flushToFile(StringRef outputPath, DiagnosticsEngine &Diag) {
for (MappingsTy::iterator
I = FromToMappings.begin(), E = FromToMappings.end(); I != E; ++I) {
- const FileEntry *origFE = I->first;
- SmallString<200> origPath = StringRef(origFE->getName());
+ FileEntryRef origFE = I->first;
+ SmallString<200> origPath = StringRef(origFE.getName());
fs::make_absolute(origPath);
infoOut << origPath << '\n';
- infoOut << (uint64_t)origFE->getModificationTime() << '\n';
+ infoOut << (uint64_t)origFE.getModificationTime() << '\n';
- if (const FileEntry *FE = I->second.dyn_cast<const FileEntry *>()) {
+ if (const auto *FE = std::get_if<FileEntryRef>(&I->second)) {
SmallString<200> newPath = StringRef(FE->getName());
fs::make_absolute(newPath);
infoOut << newPath << '\n';
@@ -143,20 +143,20 @@ bool FileRemapper::flushToFile(StringRef outputPath, DiagnosticsEngine &Diag) {
SmallString<64> tempPath;
int fd;
if (fs::createTemporaryFile(
- path::filename(origFE->getName()),
- path::extension(origFE->getName()).drop_front(), fd, tempPath,
+ path::filename(origFE.getName()),
+ path::extension(origFE.getName()).drop_front(), fd, tempPath,
llvm::sys::fs::OF_Text))
return report("Could not create file: " + tempPath.str(), Diag);
llvm::raw_fd_ostream newOut(fd, /*shouldClose=*/true);
- llvm::MemoryBuffer *mem = I->second.get<llvm::MemoryBuffer *>();
+ llvm::MemoryBuffer *mem = std::get<llvm::MemoryBuffer *>(I->second);
newOut.write(mem->getBufferStart(), mem->getBufferSize());
newOut.close();
- auto newE = FileMgr->getFile(tempPath);
+ auto newE = FileMgr->getOptionalFileRef(tempPath);
if (newE) {
remap(origFE, *newE);
- infoOut << (*newE)->getName() << '\n';
+ infoOut << newE->getName() << '\n';
}
}
}
@@ -171,18 +171,18 @@ bool FileRemapper::overwriteOriginal(DiagnosticsEngine &Diag,
for (MappingsTy::iterator
I = FromToMappings.begin(), E = FromToMappings.end(); I != E; ++I) {
- const FileEntry *origFE = I->first;
- assert(I->second.is<llvm::MemoryBuffer *>());
- if (!fs::exists(origFE->getName()))
- return report(StringRef("File does not exist: ") + origFE->getName(),
+ FileEntryRef origFE = I->first;
+ assert(std::holds_alternative<llvm::MemoryBuffer *>(I->second));
+ if (!fs::exists(origFE.getName()))
+ return report(StringRef("File does not exist: ") + origFE.getName(),
Diag);
std::error_code EC;
- llvm::raw_fd_ostream Out(origFE->getName(), EC, llvm::sys::fs::OF_None);
+ llvm::raw_fd_ostream Out(origFE.getName(), EC, llvm::sys::fs::OF_None);
if (EC)
return report(EC.message(), Diag);
- llvm::MemoryBuffer *mem = I->second.get<llvm::MemoryBuffer *>();
+ llvm::MemoryBuffer *mem = std::get<llvm::MemoryBuffer *>(I->second);
Out.write(mem->getBufferStart(), mem->getBufferSize());
Out.close();
}
@@ -196,24 +196,24 @@ void FileRemapper::forEachMapping(
llvm::function_ref<void(StringRef, const llvm::MemoryBufferRef &)>
CaptureBuffer) const {
for (auto &Mapping : FromToMappings) {
- if (const FileEntry *FE = Mapping.second.dyn_cast<const FileEntry *>()) {
- CaptureFile(Mapping.first->getName(), FE->getName());
+ if (const auto *FE = std::get_if<FileEntryRef>(&Mapping.second)) {
+ CaptureFile(Mapping.first.getName(), FE->getName());
continue;
}
CaptureBuffer(
- Mapping.first->getName(),
- Mapping.second.get<llvm::MemoryBuffer *>()->getMemBufferRef());
+ Mapping.first.getName(),
+ std::get<llvm::MemoryBuffer *>(Mapping.second)->getMemBufferRef());
}
}
void FileRemapper::applyMappings(PreprocessorOptions &PPOpts) const {
for (MappingsTy::const_iterator
I = FromToMappings.begin(), E = FromToMappings.end(); I != E; ++I) {
- if (const FileEntry *FE = I->second.dyn_cast<const FileEntry *>()) {
- PPOpts.addRemappedFile(I->first->getName(), FE->getName());
+ if (const auto *FE = std::get_if<FileEntryRef>(&I->second)) {
+ PPOpts.addRemappedFile(I->first.getName(), FE->getName());
} else {
- llvm::MemoryBuffer *mem = I->second.get<llvm::MemoryBuffer *>();
- PPOpts.addRemappedFile(I->first->getName(), mem);
+ llvm::MemoryBuffer *mem = std::get<llvm::MemoryBuffer *>(I->second);
+ PPOpts.addRemappedFile(I->first.getName(), mem);
}
}
@@ -222,49 +222,47 @@ void FileRemapper::applyMappings(PreprocessorOptions &PPOpts) const {
void FileRemapper::remap(StringRef filePath,
std::unique_ptr<llvm::MemoryBuffer> memBuf) {
- remap(getOriginalFile(filePath), std::move(memBuf));
+ OptionalFileEntryRef File = getOriginalFile(filePath);
+ assert(File);
+ remap(*File, std::move(memBuf));
}
-void FileRemapper::remap(const FileEntry *file,
- std::unique_ptr<llvm::MemoryBuffer> memBuf) {
- assert(file);
- Target &targ = FromToMappings[file];
- resetTarget(targ);
- targ = memBuf.release();
+void FileRemapper::remap(FileEntryRef File,
+ std::unique_ptr<llvm::MemoryBuffer> MemBuf) {
+ auto [It, New] = FromToMappings.insert({File, nullptr});
+ if (!New)
+ resetTarget(It->second);
+ It->second = MemBuf.release();
}
-void FileRemapper::remap(const FileEntry *file, const FileEntry *newfile) {
- assert(file && newfile);
- Target &targ = FromToMappings[file];
- resetTarget(targ);
- targ = newfile;
- ToFromMappings[newfile] = file;
+void FileRemapper::remap(FileEntryRef File, FileEntryRef NewFile) {
+ auto [It, New] = FromToMappings.insert({File, nullptr});
+ if (!New)
+ resetTarget(It->second);
+ It->second = NewFile;
+ ToFromMappings.insert({NewFile, File});
}
-const FileEntry *FileRemapper::getOriginalFile(StringRef filePath) {
- const FileEntry *file = nullptr;
- if (auto fileOrErr = FileMgr->getFile(filePath))
- file = *fileOrErr;
+OptionalFileEntryRef FileRemapper::getOriginalFile(StringRef filePath) {
+ OptionalFileEntryRef File = FileMgr->getOptionalFileRef(filePath);
+ if (!File)
+ return std::nullopt;
// If we are updating a file that overridden an original file,
// actually update the original file.
- llvm::DenseMap<const FileEntry *, const FileEntry *>::iterator
- I = ToFromMappings.find(file);
+ auto I = ToFromMappings.find(*File);
if (I != ToFromMappings.end()) {
- file = I->second;
- assert(FromToMappings.find(file) != FromToMappings.end() &&
- "Original file not in mappings!");
+ *File = I->second;
+ assert(FromToMappings.contains(*File) && "Original file not in mappings!");
}
- return file;
+ return File;
}
void FileRemapper::resetTarget(Target &targ) {
- if (!targ)
- return;
-
- if (llvm::MemoryBuffer *oldmem = targ.dyn_cast<llvm::MemoryBuffer *>()) {
+ if (std::holds_alternative<llvm::MemoryBuffer *>(targ)) {
+ llvm::MemoryBuffer *oldmem = std::get<llvm::MemoryBuffer *>(targ);
delete oldmem;
} else {
- const FileEntry *toFE = targ.get<const FileEntry *>();
+ FileEntryRef toFE = std::get<FileEntryRef>(targ);
ToFromMappings.erase(toFE);
}
}
diff --git a/contrib/llvm-project/clang/lib/ARCMigrate/Internals.h b/contrib/llvm-project/clang/lib/ARCMigrate/Internals.h
index ed0136e4867a..d790c7c02189 100644
--- a/contrib/llvm-project/clang/lib/ARCMigrate/Internals.h
+++ b/contrib/llvm-project/clang/lib/ARCMigrate/Internals.h
@@ -13,8 +13,8 @@
#include "clang/Basic/Diagnostic.h"
#include "clang/Frontend/MigratorOptions.h"
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/Optional.h"
#include <list>
+#include <optional>
namespace clang {
class ASTContext;
@@ -74,7 +74,7 @@ public:
bool clearDiagnostic(ArrayRef<unsigned> IDs, SourceRange range);
bool clearAllDiagnostics(SourceRange range) {
- return clearDiagnostic(None, range);
+ return clearDiagnostic(std::nullopt, range);
}
bool clearDiagnostic(unsigned ID1, unsigned ID2, SourceRange range) {
unsigned IDs[] = { ID1, ID2 };
@@ -152,15 +152,13 @@ public:
TransformActions &TA;
const CapturedDiagList &CapturedDiags;
std::vector<SourceLocation> &ARCMTMacroLocs;
- Optional<bool> EnableCFBridgeFns;
+ std::optional<bool> EnableCFBridgeFns;
- MigrationPass(ASTContext &Ctx, LangOptions::GCMode OrigGCMode,
- Sema &sema, TransformActions &TA,
- const CapturedDiagList &capturedDiags,
+ MigrationPass(ASTContext &Ctx, LangOptions::GCMode OrigGCMode, Sema &sema,
+ TransformActions &TA, const CapturedDiagList &capturedDiags,
std::vector<SourceLocation> &ARCMTMacroLocs)
- : Ctx(Ctx), OrigGCMode(OrigGCMode), MigOptions(),
- SemaRef(sema), TA(TA), CapturedDiags(capturedDiags),
- ARCMTMacroLocs(ARCMTMacroLocs) { }
+ : Ctx(Ctx), OrigGCMode(OrigGCMode), SemaRef(sema), TA(TA),
+ CapturedDiags(capturedDiags), ARCMTMacroLocs(ARCMTMacroLocs) {}
const CapturedDiagList &getDiags() const { return CapturedDiags; }
diff --git a/contrib/llvm-project/clang/lib/ARCMigrate/ObjCMT.cpp b/contrib/llvm-project/clang/lib/ARCMigrate/ObjCMT.cpp
index c8069b51567c..0786c81516b2 100644
--- a/contrib/llvm-project/clang/lib/ARCMigrate/ObjCMT.cpp
+++ b/contrib/llvm-project/clang/lib/ARCMigrate/ObjCMT.cpp
@@ -104,7 +104,7 @@ public:
bool FoundationIncluded;
llvm::SmallPtrSet<ObjCProtocolDecl *, 32> ObjCProtocolDecls;
llvm::SmallVector<const Decl *, 8> CFFunctionIBCandidates;
- llvm::StringSet<> WhiteListFilenames;
+ llvm::StringSet<> AllowListFilenames;
RetainSummaryManager &getSummaryManager(ASTContext &Ctx) {
if (!Summaries)
@@ -118,14 +118,12 @@ public:
FileRemapper &remapper, FileManager &fileMgr,
const PPConditionalDirectiveRecord *PPRec,
Preprocessor &PP, bool isOutputFile,
- ArrayRef<std::string> WhiteList)
+ ArrayRef<std::string> AllowList)
: MigrateDir(migrateDir), ASTMigrateActions(astMigrateActions),
NSIntegerTypedefed(nullptr), NSUIntegerTypedefed(nullptr),
Remapper(remapper), FileMgr(fileMgr), PPRec(PPRec), PP(PP),
IsOutputFile(isOutputFile), FoundationIncluded(false) {
- // FIXME: StringSet should have insert(iter, iter) to use here.
- for (const std::string &Val : WhiteList)
- WhiteListFilenames.insert(Val);
+ AllowListFilenames.insert(AllowList.begin(), AllowList.end());
}
protected:
@@ -151,12 +149,11 @@ protected:
void HandleTranslationUnit(ASTContext &Ctx) override;
bool canModifyFile(StringRef Path) {
- if (WhiteListFilenames.empty())
+ if (AllowListFilenames.empty())
return true;
- return WhiteListFilenames.find(llvm::sys::path::filename(Path))
- != WhiteListFilenames.end();
+ return AllowListFilenames.contains(llvm::sys::path::filename(Path));
}
- bool canModifyFile(Optional<FileEntryRef> FE) {
+ bool canModifyFile(OptionalFileEntryRef FE) {
if (!FE)
return false;
return canModifyFile(FE->getName());
@@ -202,7 +199,7 @@ ObjCMigrateAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
Consumers.push_back(WrapperFrontendAction::CreateASTConsumer(CI, InFile));
Consumers.push_back(std::make_unique<ObjCMigrateASTConsumer>(
MigrateDir, ObjCMigAction, Remapper, CompInst->getFileManager(), PPRec,
- CompInst->getPreprocessor(), false, None));
+ CompInst->getPreprocessor(), false, std::nullopt));
return std::make_unique<MultiplexConsumer>(std::move(Consumers));
}
@@ -487,9 +484,8 @@ static void rewriteToObjCProperty(const ObjCMethodDecl *Getter,
// Short circuit 'delegate' properties that contain the name "delegate" or
// "dataSource", or have exact name "target" to have 'assign' attribute.
- if (PropertyName.equals("target") ||
- (PropertyName.find("delegate") != StringRef::npos) ||
- (PropertyName.find("dataSource") != StringRef::npos)) {
+ if (PropertyName.equals("target") || PropertyName.contains("delegate") ||
+ PropertyName.contains("dataSource")) {
QualType QT = Getter->getReturnType();
if (!QT->isRealType())
append_attr(PropertyString, "assign", LParenAdded);
@@ -506,7 +502,7 @@ static void rewriteToObjCProperty(const ObjCMethodDecl *Getter,
if (LParenAdded)
PropertyString += ')';
QualType RT = Getter->getReturnType();
- if (!isa<TypedefType>(RT)) {
+ if (!RT->getAs<TypedefType>()) {
// strip off any ARC lifetime qualifier.
QualType CanResultTy = Context.getCanonicalType(RT);
if (CanResultTy.getQualifiers().hasObjCLifetime()) {
@@ -566,7 +562,7 @@ static void rewriteToObjCProperty(const ObjCMethodDecl *Getter,
static bool IsCategoryNameWithDeprecatedSuffix(ObjCContainerDecl *D) {
if (ObjCCategoryDecl *CatDecl = dyn_cast<ObjCCategoryDecl>(D)) {
StringRef Name = CatDecl->getName();
- return Name.endswith("Deprecated");
+ return Name.ends_with("Deprecated");
}
return false;
}
@@ -640,7 +636,7 @@ ClassImplementsAllMethodsAndProperties(ASTContext &Ctx,
for (const auto *MD : PDecl->methods()) {
if (MD->isImplicit())
continue;
- if (MD->getImplementationControl() == ObjCMethodDecl::Optional)
+ if (MD->getImplementationControl() == ObjCImplementationControl::Optional)
continue;
DeclContext::lookup_result R = ImpDecl->lookup(MD->getDeclName());
if (R.empty())
@@ -793,7 +789,7 @@ static bool UseNSOptionsMacro(Preprocessor &PP, ASTContext &Ctx,
bool PowerOfTwo = true;
bool AllHexdecimalEnumerator = true;
uint64_t MaxPowerOfTwoVal = 0;
- for (auto Enumerator : EnumDcl->enumerators()) {
+ for (auto *Enumerator : EnumDcl->enumerators()) {
const Expr *InitExpr = Enumerator->getInitExpr();
if (!InitExpr) {
PowerOfTwo = false;
@@ -1054,7 +1050,7 @@ static bool TypeIsInnerPointer(QualType T) {
// Also, typedef-of-pointer-to-incomplete-struct is something that we assume
// is not an innter pointer type.
QualType OrigT = T;
- while (const TypedefType *TD = dyn_cast<TypedefType>(T.getTypePtr()))
+ while (const auto *TD = T->getAs<TypedefType>())
T = TD->getDecl()->getUnderlyingType();
if (OrigT == T || !T->isPointerType())
return true;
@@ -1144,7 +1140,7 @@ static bool AttributesMatch(const Decl *Decl1, const Decl *Decl2,
static bool IsValidIdentifier(ASTContext &Ctx,
const char *Name) {
- if (!isIdentifierHead(Name[0]))
+ if (!isAsciiIdentifierStart(Name[0]))
return false;
std::string NameString = Name;
NameString[0] = toLowercase(NameString[0]);
@@ -1180,12 +1176,12 @@ bool ObjCMigrateASTConsumer::migrateProperty(ASTContext &Ctx,
if (!SetterMethod) {
// try a different naming convention for getter: isXxxxx
StringRef getterNameString = getterName->getName();
- bool IsPrefix = getterNameString.startswith("is");
+ bool IsPrefix = getterNameString.starts_with("is");
// Note that we don't want to change an isXXX method of retainable object
// type to property (readonly or otherwise).
if (IsPrefix && GRT->isObjCRetainableType())
return false;
- if (IsPrefix || getterNameString.startswith("get")) {
+ if (IsPrefix || getterNameString.starts_with("get")) {
LengthOfPrefix = (IsPrefix ? 2 : 3);
const char *CGetterName = getterNameString.data() + LengthOfPrefix;
// Make sure that first character after "is" or "get" prefix can
@@ -1324,11 +1320,11 @@ void ObjCMigrateASTConsumer::migrateFactoryMethod(ASTContext &Ctx,
if (OIT_Family == OIT_Singleton || OIT_Family == OIT_ReturnsSelf) {
StringRef STRefMethodName(MethodName);
size_t len = 0;
- if (STRefMethodName.startswith("standard"))
+ if (STRefMethodName.starts_with("standard"))
len = strlen("standard");
- else if (STRefMethodName.startswith("shared"))
+ else if (STRefMethodName.starts_with("shared"))
len = strlen("shared");
- else if (STRefMethodName.startswith("default"))
+ else if (STRefMethodName.starts_with("default"))
len = strlen("default");
else
return;
@@ -1345,7 +1341,7 @@ void ObjCMigrateASTConsumer::migrateFactoryMethod(ASTContext &Ctx,
StringRef LoweredMethodName(MethodName);
std::string StringLoweredMethodName = LoweredMethodName.lower();
LoweredMethodName = StringLoweredMethodName;
- if (!LoweredMethodName.startswith(ClassNamePostfix))
+ if (!LoweredMethodName.starts_with(ClassNamePostfix))
return;
if (OIT_Family == OIT_ReturnsSelf)
ReplaceWithClasstype(*this, OM);
@@ -1357,9 +1353,6 @@ static bool IsVoidStarType(QualType Ty) {
if (!Ty->isPointerType())
return false;
- while (const TypedefType *TD = dyn_cast<TypedefType>(Ty.getTypePtr()))
- Ty = TD->getDecl()->getUnderlyingType();
-
// Is the type void*?
const PointerType* PT = Ty->castAs<PointerType>();
if (PT->getPointeeType().getUnqualifiedType()->isVoidType())
@@ -1790,7 +1783,7 @@ private:
std::tie(FID, Offset) = SourceMgr.getDecomposedLoc(Loc);
assert(FID.isValid());
SmallString<200> Path =
- StringRef(SourceMgr.getFileEntryForID(FID)->getName());
+ StringRef(SourceMgr.getFileEntryRefForID(FID)->getName());
llvm::sys::fs::make_absolute(Path);
OS << " \"file\": \"";
OS.write_escaped(Path.str()) << "\",\n";
@@ -1962,7 +1955,8 @@ void ObjCMigrateASTConsumer::HandleTranslationUnit(ASTContext &Ctx) {
I = rewriter.buffer_begin(), E = rewriter.buffer_end(); I != E; ++I) {
FileID FID = I->first;
RewriteBuffer &buf = I->second;
- Optional<FileEntryRef> file = Ctx.getSourceManager().getFileEntryRefForID(FID);
+ OptionalFileEntryRef file =
+ Ctx.getSourceManager().getFileEntryRefForID(FID);
assert(file);
SmallString<512> newText;
llvm::raw_svector_ostream vecOS(newText);
@@ -1987,7 +1981,7 @@ bool MigrateSourceAction::BeginInvocation(CompilerInstance &CI) {
return true;
}
-static std::vector<std::string> getWhiteListFilenames(StringRef DirPath) {
+static std::vector<std::string> getAllowListFilenames(StringRef DirPath) {
using namespace llvm::sys::fs;
using namespace llvm::sys::path;
@@ -2018,21 +2012,21 @@ MigrateSourceAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
if (ObjCMTOpts == FrontendOptions::ObjCMT_None) {
// If no specific option was given, enable literals+subscripting transforms
// by default.
- ObjCMTAction |= FrontendOptions::ObjCMT_Literals |
- FrontendOptions::ObjCMT_Subscripting;
+ ObjCMTAction |=
+ FrontendOptions::ObjCMT_Literals | FrontendOptions::ObjCMT_Subscripting;
}
CI.getPreprocessor().addPPCallbacks(std::unique_ptr<PPCallbacks>(PPRec));
- std::vector<std::string> WhiteList =
- getWhiteListFilenames(CI.getFrontendOpts().ObjCMTWhiteListPath);
+ std::vector<std::string> AllowList =
+ getAllowListFilenames(CI.getFrontendOpts().ObjCMTAllowListPath);
return std::make_unique<ObjCMigrateASTConsumer>(
CI.getFrontendOpts().OutputFile, ObjCMTAction, Remapper,
CI.getFileManager(), PPRec, CI.getPreprocessor(),
- /*isOutputFile=*/true, WhiteList);
+ /*isOutputFile=*/true, AllowList);
}
namespace {
struct EditEntry {
- Optional<FileEntryRef> File;
+ OptionalFileEntryRef File;
unsigned Offset = 0;
unsigned RemoveLen = 0;
std::string Text;
@@ -2207,7 +2201,7 @@ static std::string applyEditsToTemp(FileEntryRef FE,
TmpOut.write(NewText.data(), NewText.size());
TmpOut.close();
- return std::string(TempPath.str());
+ return std::string(TempPath);
}
bool arcmt::getFileRemappingsFromFileList(
diff --git a/contrib/llvm-project/clang/lib/ARCMigrate/PlistReporter.cpp b/contrib/llvm-project/clang/lib/ARCMigrate/PlistReporter.cpp
index c233d6bd9002..f78ca5e1c9bd 100644
--- a/contrib/llvm-project/clang/lib/ARCMigrate/PlistReporter.cpp
+++ b/contrib/llvm-project/clang/lib/ARCMigrate/PlistReporter.cpp
@@ -72,7 +72,7 @@ void arcmt::writeARCDiagsToPlist(const std::string &outPath,
" <array>\n";
for (FileID FID : Fids)
- EmitString(o << " ", SM.getFileEntryForID(FID)->getName()) << '\n';
+ EmitString(o << " ", SM.getFileEntryRefForID(FID)->getName()) << '\n';
o << " </array>\n"
" <key>diagnostics</key>\n"
diff --git a/contrib/llvm-project/clang/lib/ARCMigrate/TransAutoreleasePool.cpp b/contrib/llvm-project/clang/lib/ARCMigrate/TransAutoreleasePool.cpp
index 393adcd85a3f..6d501228e712 100644
--- a/contrib/llvm-project/clang/lib/ARCMigrate/TransAutoreleasePool.cpp
+++ b/contrib/llvm-project/clang/lib/ARCMigrate/TransAutoreleasePool.cpp
@@ -229,8 +229,9 @@ private:
bool IsFollowedBySimpleReturnStmt;
SmallVector<ObjCMessageExpr *, 4> Releases;
- PoolScope() : PoolVar(nullptr), CompoundParent(nullptr), Begin(), End(),
- IsFollowedBySimpleReturnStmt(false) { }
+ PoolScope()
+ : PoolVar(nullptr), CompoundParent(nullptr),
+ IsFollowedBySimpleReturnStmt(false) {}
SourceRange getIndentedRange() const {
Stmt::child_iterator rangeS = Begin;
@@ -416,11 +417,11 @@ private:
Selector DrainSel;
struct PoolVarInfo {
- DeclStmt *Dcl;
+ DeclStmt *Dcl = nullptr;
ExprSet Refs;
SmallVector<PoolScope, 2> Scopes;
- PoolVarInfo() : Dcl(nullptr) { }
+ PoolVarInfo() = default;
};
std::map<VarDecl *, PoolVarInfo> PoolVars;
diff --git a/contrib/llvm-project/clang/lib/ARCMigrate/TransGCAttrs.cpp b/contrib/llvm-project/clang/lib/ARCMigrate/TransGCAttrs.cpp
index 99a61e0842a7..28d1db7f4376 100644
--- a/contrib/llvm-project/clang/lib/ARCMigrate/TransGCAttrs.cpp
+++ b/contrib/llvm-project/clang/lib/ARCMigrate/TransGCAttrs.cpp
@@ -46,7 +46,7 @@ public:
if (!D || D->isImplicit())
return true;
- SaveAndRestore<bool> Save(FullyMigratable, isMigratable(D));
+ SaveAndRestore Save(FullyMigratable, isMigratable(D));
if (ObjCPropertyDecl *PropD = dyn_cast<ObjCPropertyDecl>(D)) {
lookForAttribute(PropD, PropD->getTypeSourceInfo());
@@ -158,7 +158,7 @@ public:
if (!D)
return false;
- for (auto I : D->redecls())
+ for (auto *I : D->redecls())
if (!isInMainFile(I->getLocation()))
return false;
diff --git a/contrib/llvm-project/clang/lib/ARCMigrate/TransProperties.cpp b/contrib/llvm-project/clang/lib/ARCMigrate/TransProperties.cpp
index e5ccf1cf79b1..6d1d950821a0 100644
--- a/contrib/llvm-project/clang/lib/ARCMigrate/TransProperties.cpp
+++ b/contrib/llvm-project/clang/lib/ARCMigrate/TransProperties.cpp
@@ -45,7 +45,7 @@ namespace {
class PropertiesRewriter {
MigrationContext &MigrateCtx;
MigrationPass &Pass;
- ObjCImplementationDecl *CurImplD;
+ ObjCImplementationDecl *CurImplD = nullptr;
enum PropActionKind {
PropAction_None,
diff --git a/contrib/llvm-project/clang/lib/ARCMigrate/TransRetainReleaseDealloc.cpp b/contrib/llvm-project/clang/lib/ARCMigrate/TransRetainReleaseDealloc.cpp
index b76fc65574dd..baa503d8a39d 100644
--- a/contrib/llvm-project/clang/lib/ARCMigrate/TransRetainReleaseDealloc.cpp
+++ b/contrib/llvm-project/clang/lib/ARCMigrate/TransRetainReleaseDealloc.cpp
@@ -78,7 +78,7 @@ public:
}
}
// Pass through.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case OMF_retain:
case OMF_release:
if (E->getReceiverKind() == ObjCMessageExpr::Instance)
diff --git a/contrib/llvm-project/clang/lib/ARCMigrate/TransUnbridgedCasts.cpp b/contrib/llvm-project/clang/lib/ARCMigrate/TransUnbridgedCasts.cpp
index e767ad5346c3..1e6354f71e29 100644
--- a/contrib/llvm-project/clang/lib/ARCMigrate/TransUnbridgedCasts.cpp
+++ b/contrib/llvm-project/clang/lib/ARCMigrate/TransUnbridgedCasts.cpp
@@ -146,9 +146,8 @@ private:
ento::cocoa::isRefType(E->getSubExpr()->getType(), "CF",
FD->getIdentifier()->getName())) {
StringRef fname = FD->getIdentifier()->getName();
- if (fname.endswith("Retain") ||
- fname.find("Create") != StringRef::npos ||
- fname.find("Copy") != StringRef::npos) {
+ if (fname.ends_with("Retain") || fname.contains("Create") ||
+ fname.contains("Copy")) {
// Do not migrate to couple of bridge transfer casts which
// cancel each other out. Leave it unchanged so error gets user
// attention instead.
@@ -168,7 +167,7 @@ private:
return;
}
- if (fname.find("Get") != StringRef::npos) {
+ if (fname.contains("Get")) {
castToObjCObject(E, /*retained=*/false);
return;
}
@@ -253,7 +252,8 @@ private:
SourceManager &SM = Pass.Ctx.getSourceManager();
char PrevChar = *SM.getCharacterData(InsertLoc.getLocWithOffset(-1));
- if (Lexer::isIdentifierBodyChar(PrevChar, Pass.Ctx.getLangOpts()))
+ if (Lexer::isAsciiIdentifierContinueChar(PrevChar,
+ Pass.Ctx.getLangOpts()))
BridgeCall += ' ';
if (Kind == OBC_BridgeTransfer)
diff --git a/contrib/llvm-project/clang/lib/ARCMigrate/TransformActions.cpp b/contrib/llvm-project/clang/lib/ARCMigrate/TransformActions.cpp
index 2c48e479dce8..6bc6fed1a903 100644
--- a/contrib/llvm-project/clang/lib/ARCMigrate/TransformActions.cpp
+++ b/contrib/llvm-project/clang/lib/ARCMigrate/TransformActions.cpp
@@ -431,7 +431,7 @@ bool TransformActionsImpl::canReplaceText(SourceLocation loc, StringRef text) {
if (invalidTemp)
return false;
- return file.substr(locInfo.second).startswith(text);
+ return file.substr(locInfo.second).starts_with(text);
}
void TransformActionsImpl::commitInsert(SourceLocation loc, StringRef text) {
@@ -540,7 +540,7 @@ void TransformActionsImpl::addRemoval(CharSourceRange range) {
return;
case Range_Contains:
RI->End = newRange.End;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Range_ExtendsBegin:
newRange.End = RI->End;
Removals.erase(RI);
diff --git a/contrib/llvm-project/clang/lib/ARCMigrate/Transforms.cpp b/contrib/llvm-project/clang/lib/ARCMigrate/Transforms.cpp
index e274a540e408..2808e35135dc 100644
--- a/contrib/llvm-project/clang/lib/ARCMigrate/Transforms.cpp
+++ b/contrib/llvm-project/clang/lib/ARCMigrate/Transforms.cpp
@@ -25,7 +25,7 @@ using namespace trans;
ASTTraverser::~ASTTraverser() { }
bool MigrationPass::CFBridgingFunctionsDefined() {
- if (!EnableCFBridgeFns.hasValue())
+ if (!EnableCFBridgeFns)
EnableCFBridgeFns = SemaRef.isKnownName("CFBridgingRetain") &&
SemaRef.isKnownName("CFBridgingRelease");
return *EnableCFBridgeFns;
@@ -95,11 +95,9 @@ bool trans::isPlusOne(const Expr *E) {
ento::cocoa::isRefType(callE->getType(), "CF",
FD->getIdentifier()->getName())) {
StringRef fname = FD->getIdentifier()->getName();
- if (fname.endswith("Retain") ||
- fname.find("Create") != StringRef::npos ||
- fname.find("Copy") != StringRef::npos) {
+ if (fname.ends_with("Retain") || fname.contains("Create") ||
+ fname.contains("Copy"))
return true;
- }
}
}
}
@@ -419,7 +417,7 @@ bool MigrationContext::rewritePropertyAttribute(StringRef fromAttr,
if (tok.is(tok::r_paren))
return false;
- while (1) {
+ while (true) {
if (tok.isNot(tok::raw_identifier)) return false;
if (tok.getRawIdentifier() == fromAttr) {
if (!toAttr.empty()) {
diff --git a/contrib/llvm-project/clang/lib/AST/APValue.cpp b/contrib/llvm-project/clang/lib/AST/APValue.cpp
index 9a9233bc1ea7..4eae308ef5b3 100644
--- a/contrib/llvm-project/clang/lib/AST/APValue.cpp
+++ b/contrib/llvm-project/clang/lib/AST/APValue.cpp
@@ -156,10 +156,10 @@ void APValue::LValuePathEntry::Profile(llvm::FoldingSetNodeID &ID) const {
APValue::LValuePathSerializationHelper::LValuePathSerializationHelper(
ArrayRef<LValuePathEntry> Path, QualType ElemTy)
- : ElemTy((const void *)ElemTy.getTypePtrOrNull()), Path(Path) {}
+ : Ty((const void *)ElemTy.getTypePtrOrNull()), Path(Path) {}
QualType APValue::LValuePathSerializationHelper::getType() {
- return QualType::getFromOpaquePtr(ElemTy);
+ return QualType::getFromOpaquePtr(Ty);
}
namespace {
@@ -390,11 +390,13 @@ APValue &APValue::operator=(const APValue &RHS) {
}
APValue &APValue::operator=(APValue &&RHS) {
- if (Kind != None && Kind != Indeterminate)
- DestroyDataAndMakeUninit();
- Kind = RHS.Kind;
- Data = RHS.Data;
- RHS.Kind = None;
+ if (this != &RHS) {
+ if (Kind != None && Kind != Indeterminate)
+ DestroyDataAndMakeUninit();
+ Kind = RHS.Kind;
+ Data = RHS.Data;
+ RHS.Kind = None;
+ }
return *this;
}
@@ -625,6 +627,69 @@ static double GetApproxValue(const llvm::APFloat &F) {
return V.convertToDouble();
}
+static bool TryPrintAsStringLiteral(raw_ostream &Out,
+ const PrintingPolicy &Policy,
+ const ArrayType *ATy,
+ ArrayRef<APValue> Inits) {
+ if (Inits.empty())
+ return false;
+
+ QualType Ty = ATy->getElementType();
+ if (!Ty->isAnyCharacterType())
+ return false;
+
+ // Nothing we can do about a sequence that is not null-terminated
+ if (!Inits.back().isInt() || !Inits.back().getInt().isZero())
+ return false;
+
+ Inits = Inits.drop_back();
+
+ llvm::SmallString<40> Buf;
+ Buf.push_back('"');
+
+ // Better than printing a two-digit sequence of 10 integers.
+ constexpr size_t MaxN = 36;
+ StringRef Ellipsis;
+ if (Inits.size() > MaxN && !Policy.EntireContentsOfLargeArray) {
+ Ellipsis = "[...]";
+ Inits =
+ Inits.take_front(std::min(MaxN - Ellipsis.size() / 2, Inits.size()));
+ }
+
+ for (auto &Val : Inits) {
+ if (!Val.isInt())
+ return false;
+ int64_t Char64 = Val.getInt().getExtValue();
+ if (!isASCII(Char64))
+ return false; // Bye bye, see you in integers.
+ auto Ch = static_cast<unsigned char>(Char64);
+ // The diagnostic message is 'quoted'
+ StringRef Escaped = escapeCStyle<EscapeChar::SingleAndDouble>(Ch);
+ if (Escaped.empty()) {
+ if (!isPrintable(Ch))
+ return false;
+ Buf.emplace_back(Ch);
+ } else {
+ Buf.append(Escaped);
+ }
+ }
+
+ Buf.append(Ellipsis);
+ Buf.push_back('"');
+
+ if (Ty->isWideCharType())
+ Out << 'L';
+ else if (Ty->isChar8Type())
+ Out << "u8";
+ else if (Ty->isChar16Type())
+ Out << 'u';
+ else if (Ty->isChar32Type())
+ Out << 'U';
+
+ Out << Buf;
+ return true;
+}
+
void APValue::printPretty(raw_ostream &Out, const ASTContext &Ctx,
QualType Ty) const {
printPretty(Out, Ctx.getPrintingPolicy(), Ty, &Ctx);
@@ -700,7 +765,9 @@ void APValue::printPretty(raw_ostream &Out, const PrintingPolicy &Policy,
if (!hasLValuePath()) {
// No lvalue path: just print the offset.
CharUnits O = getLValueOffset();
- CharUnits S = Ctx ? Ctx->getTypeSizeInChars(InnerTy) : CharUnits::Zero();
+ CharUnits S = Ctx ? Ctx->getTypeSizeInCharsIfKnown(InnerTy).value_or(
+ CharUnits::Zero())
+ : CharUnits::Zero();
if (!O.isZero()) {
if (IsReference)
Out << "*(";
@@ -774,6 +841,10 @@ void APValue::printPretty(raw_ostream &Out, const PrintingPolicy &Policy,
Out << *VD;
ElemTy = VD->getType();
}
+ } else if (ElemTy->isAnyComplexType()) {
+ // The lvalue refers to a complex type
+ Out << (Path[I].getAsArrayIndex() == 0 ? ".real" : ".imag");
+ ElemTy = ElemTy->castAs<ComplexType>()->getElementType();
} else {
// The lvalue must refer to an array.
Out << '[' << Path[I].getAsArrayIndex() << ']';
@@ -793,17 +864,23 @@ void APValue::printPretty(raw_ostream &Out, const PrintingPolicy &Policy,
}
case APValue::Array: {
const ArrayType *AT = Ty->castAsArrayTypeUnsafe();
+ unsigned N = getArrayInitializedElts();
+ if (N != 0 && TryPrintAsStringLiteral(Out, Policy, AT,
+ {&getArrayInitializedElt(0), N}))
+ return;
QualType ElemTy = AT->getElementType();
Out << '{';
- if (unsigned N = getArrayInitializedElts()) {
- getArrayInitializedElt(0).printPretty(Out, Policy, ElemTy, Ctx);
- for (unsigned I = 1; I != N; ++I) {
+ unsigned I = 0;
+ switch (N) {
+ case 0:
+ for (; I != N; ++I) {
Out << ", ";
- if (I == 10) {
- // Avoid printing out the entire contents of large arrays.
- Out << "...";
- break;
+ if (I == 10 && !Policy.EntireContentsOfLargeArray) {
+ Out << "...}";
+ return;
}
+ [[fallthrough]];
+ default:
getArrayInitializedElt(I).printPretty(Out, Policy, ElemTy, Ctx);
}
}
@@ -913,7 +990,7 @@ bool APValue::hasLValuePath() const {
ArrayRef<APValue::LValuePathEntry> APValue::getLValuePath() const {
assert(isLValue() && hasLValuePath() && "Invalid accessor");
const LV &LVal = *((const LV *)(const char *)&Data);
- return llvm::makeArrayRef(LVal.getPath(), LVal.PathLength);
+ return llvm::ArrayRef(LVal.getPath(), LVal.PathLength);
}
unsigned APValue::getLValueCallIndex() const {
@@ -991,7 +1068,7 @@ ArrayRef<const CXXRecordDecl*> APValue::getMemberPointerPath() const {
assert(isMemberPointer() && "Invalid accessor");
const MemberPointerData &MPD =
*((const MemberPointerData *)(const char *)&Data);
- return llvm::makeArrayRef(MPD.getPath(), MPD.PathLength);
+ return llvm::ArrayRef(MPD.getPath(), MPD.PathLength);
}
void APValue::MakeLValue() {
@@ -1038,7 +1115,7 @@ LinkageInfo LinkageComputer::getLVForValue(const APValue &V,
auto MergeLV = [&](LinkageInfo MergeLV) {
LV.merge(MergeLV);
- return LV.getLinkage() == InternalLinkage;
+ return LV.getLinkage() == Linkage::Internal;
};
auto Merge = [&](const APValue &V) {
return MergeLV(getLVForValue(V, computation));
diff --git a/contrib/llvm-project/clang/lib/AST/ASTConcept.cpp b/contrib/llvm-project/clang/lib/AST/ASTConcept.cpp
index 549088ad4a8a..b3ec99448b3e 100644
--- a/contrib/llvm-project/clang/lib/AST/ASTConcept.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ASTConcept.cpp
@@ -1,9 +1,8 @@
//===--- ASTConcept.cpp - Concepts Related AST Data Structures --*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
@@ -14,39 +13,54 @@
#include "clang/AST/ASTConcept.h"
#include "clang/AST/ASTContext.h"
-#include "clang/AST/Decl.h"
-#include "clang/AST/TemplateBase.h"
+#include "clang/AST/PrettyPrinter.h"
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/FoldingSet.h"
+
using namespace clang;
-ASTConstraintSatisfaction::ASTConstraintSatisfaction(const ASTContext &C,
- const ConstraintSatisfaction &Satisfaction):
- NumRecords{Satisfaction.Details.size()},
- IsSatisfied{Satisfaction.IsSatisfied} {
- for (unsigned I = 0; I < NumRecords; ++I) {
- auto &Detail = Satisfaction.Details[I];
- if (Detail.second.is<Expr *>())
- new (getTrailingObjects<UnsatisfiedConstraintRecord>() + I)
- UnsatisfiedConstraintRecord{Detail.first,
- UnsatisfiedConstraintRecord::second_type(
- Detail.second.get<Expr *>())};
- else {
- auto &SubstitutionDiagnostic =
- *Detail.second.get<std::pair<SourceLocation, StringRef> *>();
- unsigned MessageSize = SubstitutionDiagnostic.second.size();
- char *Mem = new (C) char[MessageSize];
- memcpy(Mem, SubstitutionDiagnostic.second.data(), MessageSize);
- auto *NewSubstDiag = new (C) std::pair<SourceLocation, StringRef>(
- SubstitutionDiagnostic.first, StringRef(Mem, MessageSize));
- new (getTrailingObjects<UnsatisfiedConstraintRecord>() + I)
- UnsatisfiedConstraintRecord{Detail.first,
- UnsatisfiedConstraintRecord::second_type(
- NewSubstDiag)};
- }
+namespace {
+void CreatUnsatisfiedConstraintRecord(
+ const ASTContext &C, const UnsatisfiedConstraintRecord &Detail,
+ UnsatisfiedConstraintRecord *TrailingObject) {
+ if (Detail.second.is<Expr *>())
+ new (TrailingObject) UnsatisfiedConstraintRecord{
+ Detail.first,
+ UnsatisfiedConstraintRecord::second_type(Detail.second.get<Expr *>())};
+ else {
+ auto &SubstitutionDiagnostic =
+ *Detail.second.get<std::pair<SourceLocation, StringRef> *>();
+ unsigned MessageSize = SubstitutionDiagnostic.second.size();
+ char *Mem = new (C) char[MessageSize];
+ memcpy(Mem, SubstitutionDiagnostic.second.data(), MessageSize);
+ auto *NewSubstDiag = new (C) std::pair<SourceLocation, StringRef>(
+ SubstitutionDiagnostic.first, StringRef(Mem, MessageSize));
+ new (TrailingObject) UnsatisfiedConstraintRecord{
+ Detail.first, UnsatisfiedConstraintRecord::second_type(NewSubstDiag)};
}
}
+} // namespace
+
+ASTConstraintSatisfaction::ASTConstraintSatisfaction(
+ const ASTContext &C, const ConstraintSatisfaction &Satisfaction)
+ : NumRecords{Satisfaction.Details.size()},
+ IsSatisfied{Satisfaction.IsSatisfied}, ContainsErrors{
+ Satisfaction.ContainsErrors} {
+ for (unsigned I = 0; I < NumRecords; ++I)
+ CreatUnsatisfiedConstraintRecord(
+ C, Satisfaction.Details[I],
+ getTrailingObjects<UnsatisfiedConstraintRecord>() + I);
+}
+ASTConstraintSatisfaction::ASTConstraintSatisfaction(
+ const ASTContext &C, const ASTConstraintSatisfaction &Satisfaction)
+ : NumRecords{Satisfaction.NumRecords},
+ IsSatisfied{Satisfaction.IsSatisfied},
+ ContainsErrors{Satisfaction.ContainsErrors} {
+ for (unsigned I = 0; I < NumRecords; ++I)
+ CreatUnsatisfiedConstraintRecord(
+ C, *(Satisfaction.begin() + I),
+ getTrailingObjects<UnsatisfiedConstraintRecord>() + I);
+}
ASTConstraintSatisfaction *
ASTConstraintSatisfaction::Create(const ASTContext &C,
@@ -58,6 +72,14 @@ ASTConstraintSatisfaction::Create(const ASTContext &C,
return new (Mem) ASTConstraintSatisfaction(C, Satisfaction);
}
+ASTConstraintSatisfaction *ASTConstraintSatisfaction::Rebuild(
+ const ASTContext &C, const ASTConstraintSatisfaction &Satisfaction) {
+ std::size_t size =
+ totalSizeToAlloc<UnsatisfiedConstraintRecord>(Satisfaction.NumRecords);
+ void *Mem = C.Allocate(size, alignof(ASTConstraintSatisfaction));
+ return new (Mem) ASTConstraintSatisfaction(C, Satisfaction);
+}
+
void ConstraintSatisfaction::Profile(
llvm::FoldingSetNodeID &ID, const ASTContext &C,
const NamedDecl *ConstraintOwner, ArrayRef<TemplateArgument> TemplateArgs) {
@@ -66,3 +88,27 @@ void ConstraintSatisfaction::Profile(
for (auto &Arg : TemplateArgs)
Arg.Profile(ID, C);
}
+
+ConceptReference *
+ConceptReference::Create(const ASTContext &C, NestedNameSpecifierLoc NNS,
+ SourceLocation TemplateKWLoc,
+ DeclarationNameInfo ConceptNameInfo,
+ NamedDecl *FoundDecl, ConceptDecl *NamedConcept,
+ const ASTTemplateArgumentListInfo *ArgsAsWritten) {
+ return new (C) ConceptReference(NNS, TemplateKWLoc, ConceptNameInfo,
+ FoundDecl, NamedConcept, ArgsAsWritten);
+}
+
+void ConceptReference::print(llvm::raw_ostream &OS,
+ const PrintingPolicy &Policy) const {
+ if (NestedNameSpec)
+ NestedNameSpec.getNestedNameSpecifier()->print(OS, Policy);
+ ConceptName.printName(OS, Policy);
+ if (hasExplicitTemplateArgs()) {
+ OS << "<";
+ // FIXME: Find corresponding parameter for argument
+ for (auto &ArgLoc : ArgsAsWritten->arguments())
+ ArgLoc.getArgument().print(Policy, OS, /*IncludeType*/ false);
+ OS << ">";
+ }
+}
diff --git a/contrib/llvm-project/clang/lib/AST/ASTContext.cpp b/contrib/llvm-project/clang/lib/AST/ASTContext.cpp
index 9a51cace3c14..cc5de9a6295e 100644
--- a/contrib/llvm-project/clang/lib/AST/ASTContext.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ASTContext.cpp
@@ -58,6 +58,7 @@
#include "clang/Basic/Module.h"
#include "clang/Basic/NoSanitizeList.h"
#include "clang/Basic/ObjCRuntime.h"
+#include "clang/Basic/ProfileList.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/Specifiers.h"
@@ -71,15 +72,13 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/FoldingSet.h"
-#include "llvm/ADT/None.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/Triple.h"
+#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
#include "llvm/Support/Capacity.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
@@ -87,6 +86,7 @@
#include "llvm/Support/MD5.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/Triple.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
@@ -94,6 +94,7 @@
#include <cstdlib>
#include <map>
#include <memory>
+#include <optional>
#include <string>
#include <tuple>
#include <utility>
@@ -101,13 +102,20 @@
using namespace clang;
enum FloatingRank {
- BFloat16Rank, Float16Rank, HalfRank, FloatRank, DoubleRank, LongDoubleRank, Float128Rank
+ BFloat16Rank,
+ Float16Rank,
+ HalfRank,
+ FloatRank,
+ DoubleRank,
+ LongDoubleRank,
+ Float128Rank,
+ Ibm128Rank
};
-/// \returns location that is relevant when searching for Doc comments related
-/// to \p D.
-static SourceLocation getDeclLocForCommentSearch(const Decl *D,
- SourceManager &SourceMgr) {
+/// \returns The locations that are relevant when searching for Doc comments
+/// related to \p D.
+static SmallVector<SourceLocation, 2>
+getDeclLocsForCommentSearch(const Decl *D, SourceManager &SourceMgr) {
assert(D);
// User can not attach documentation to implicit declarations.
@@ -159,42 +167,48 @@ static SourceLocation getDeclLocForCommentSearch(const Decl *D,
isa<TemplateTemplateParmDecl>(D))
return {};
+ SmallVector<SourceLocation, 2> Locations;
// Find declaration location.
// For Objective-C declarations we generally don't expect to have multiple
// declarators, thus use declaration starting location as the "declaration
// location".
// For all other declarations multiple declarators are used quite frequently,
// so we use the location of the identifier as the "declaration location".
+ SourceLocation BaseLocation;
if (isa<ObjCMethodDecl>(D) || isa<ObjCContainerDecl>(D) ||
- isa<ObjCPropertyDecl>(D) ||
- isa<RedeclarableTemplateDecl>(D) ||
+ isa<ObjCPropertyDecl>(D) || isa<RedeclarableTemplateDecl>(D) ||
isa<ClassTemplateSpecializationDecl>(D) ||
// Allow association with Y across {} in `typedef struct X {} Y`.
isa<TypedefDecl>(D))
- return D->getBeginLoc();
- else {
- const SourceLocation DeclLoc = D->getLocation();
- if (DeclLoc.isMacroID()) {
- if (isa<TypedefDecl>(D)) {
- // If location of the typedef name is in a macro, it is because being
- // declared via a macro. Try using declaration's starting location as
- // the "declaration location".
- return D->getBeginLoc();
- } else if (const auto *TD = dyn_cast<TagDecl>(D)) {
- // If location of the tag decl is inside a macro, but the spelling of
- // the tag name comes from a macro argument, it looks like a special
- // macro like NS_ENUM is being used to define the tag decl. In that
- // case, adjust the source location to the expansion loc so that we can
- // attach the comment to the tag decl.
- if (SourceMgr.isMacroArgExpansion(DeclLoc) &&
- TD->isCompleteDefinition())
- return SourceMgr.getExpansionLoc(DeclLoc);
- }
+ BaseLocation = D->getBeginLoc();
+ else
+ BaseLocation = D->getLocation();
+
+ if (!D->getLocation().isMacroID()) {
+ Locations.emplace_back(BaseLocation);
+ } else {
+ const auto *DeclCtx = D->getDeclContext();
+
+ // When encountering definitions generated from a macro (that are not
+ // contained by another declaration in the macro) we need to try and find
+ // the comment at the location of the expansion but if there is no comment
+ // there we should retry to see if there is a comment inside the macro as
+ // well. To this end we return first BaseLocation to first look at the
+ // expansion site, the second value is the spelling location of the
+ // beginning of the declaration defined inside the macro.
+ if (!(DeclCtx &&
+ Decl::castFromDeclContext(DeclCtx)->getLocation().isMacroID())) {
+ Locations.emplace_back(SourceMgr.getExpansionLoc(BaseLocation));
}
- return DeclLoc;
+
+ // We use Decl::getBeginLoc() and not just BaseLocation here to ensure that
+ // we don't refer to the macro argument location at the expansion site (this
+ // can happen if the name's spelling is provided via macro argument), and
+ // always to the declaration itself.
+ Locations.emplace_back(SourceMgr.getSpellingLoc(D->getBeginLoc()));
}
- return {};
+ return Locations;
}
RawComment *ASTContext::getRawCommentForDeclNoCacheImpl(
@@ -269,34 +283,43 @@ RawComment *ASTContext::getRawCommentForDeclNoCacheImpl(
// There should be no other declarations or preprocessor directives between
// comment and declaration.
- if (Text.find_first_of(";{}#@") != StringRef::npos)
+ if (Text.find_last_of(";{}#@") != StringRef::npos)
return nullptr;
return CommentBeforeDecl;
}
RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const {
- const SourceLocation DeclLoc = getDeclLocForCommentSearch(D, SourceMgr);
+ const auto DeclLocs = getDeclLocsForCommentSearch(D, SourceMgr);
- // If the declaration doesn't map directly to a location in a file, we
- // can't find the comment.
- if (DeclLoc.isInvalid() || !DeclLoc.isFileID())
- return nullptr;
+ for (const auto DeclLoc : DeclLocs) {
+ // If the declaration doesn't map directly to a location in a file, we
+ // can't find the comment.
+ if (DeclLoc.isInvalid() || !DeclLoc.isFileID())
+ continue;
- if (ExternalSource && !CommentsLoaded) {
- ExternalSource->ReadComments();
- CommentsLoaded = true;
- }
+ if (ExternalSource && !CommentsLoaded) {
+ ExternalSource->ReadComments();
+ CommentsLoaded = true;
+ }
- if (Comments.empty())
- return nullptr;
+ if (Comments.empty())
+ continue;
- const FileID File = SourceMgr.getDecomposedLoc(DeclLoc).first;
- const auto CommentsInThisFile = Comments.getCommentsInFile(File);
- if (!CommentsInThisFile || CommentsInThisFile->empty())
- return nullptr;
+ const FileID File = SourceMgr.getDecomposedLoc(DeclLoc).first;
+ if (!File.isValid())
+ continue;
- return getRawCommentForDeclNoCacheImpl(D, DeclLoc, *CommentsInThisFile);
+ const auto CommentsInThisFile = Comments.getCommentsInFile(File);
+ if (!CommentsInThisFile || CommentsInThisFile->empty())
+ continue;
+
+ if (RawComment *Comment =
+ getRawCommentForDeclNoCacheImpl(D, DeclLoc, *CommentsInThisFile))
+ return Comment;
+ }
+
+ return nullptr;
}
void ASTContext::addComment(const RawComment &RC) {
@@ -416,10 +439,7 @@ const RawComment *ASTContext::getRawCommentForAnyRedecl(
// Any redeclarations of D that we haven't checked for comments yet?
// We can't use DenseMap::iterator directly since it'd get invalid.
auto LastCheckedRedecl = [this, CanonicalD]() -> const Decl * {
- auto LookupRes = CommentlessRedeclChains.find(CanonicalD);
- if (LookupRes != CommentlessRedeclChains.end())
- return LookupRes->second;
- return nullptr;
+ return CommentlessRedeclChains.lookup(CanonicalD);
}();
for (const auto Redecl : D->redecls()) {
@@ -478,7 +498,11 @@ void ASTContext::attachCommentsToJustParsedDecls(ArrayRef<Decl *> Decls,
return;
FileID File;
- for (Decl *D : Decls) {
+ for (const Decl *D : Decls) {
+ if (D->isInvalidDecl())
+ continue;
+
+ D = &adjustDeclToTemplate(*D);
SourceLocation Loc = D->getLocation();
if (Loc.isValid()) {
// See if there are any new comments that are not attached to a decl.
@@ -503,7 +527,6 @@ void ASTContext::attachCommentsToJustParsedDecls(ArrayRef<Decl *> Decls,
// declaration, but also comments that *follow* the declaration -- thanks to
// the lookahead in the lexer: we've consumed the semicolon and looked
// ahead through comments.
-
for (const Decl *D : Decls) {
assert(D);
if (D->isInvalidDecl())
@@ -511,19 +534,22 @@ void ASTContext::attachCommentsToJustParsedDecls(ArrayRef<Decl *> Decls,
D = &adjustDeclToTemplate(*D);
- const SourceLocation DeclLoc = getDeclLocForCommentSearch(D, SourceMgr);
-
- if (DeclLoc.isInvalid() || !DeclLoc.isFileID())
- continue;
-
if (DeclRawComments.count(D) > 0)
continue;
- if (RawComment *const DocComment =
- getRawCommentForDeclNoCacheImpl(D, DeclLoc, *CommentsInThisFile)) {
- cacheRawCommentForDecl(*D, *DocComment);
- comments::FullComment *FC = DocComment->parse(*this, PP, D);
- ParsedComments[D->getCanonicalDecl()] = FC;
+ const auto DeclLocs = getDeclLocsForCommentSearch(D, SourceMgr);
+
+ for (const auto DeclLoc : DeclLocs) {
+ if (DeclLoc.isInvalid() || !DeclLoc.isFileID())
+ continue;
+
+ if (RawComment *const DocComment = getRawCommentForDeclNoCacheImpl(
+ D, DeclLoc, *CommentsInThisFile)) {
+ cacheRawCommentForDecl(*D, *DocComment);
+ comments::FullComment *FC = DocComment->parse(*this, PP, D);
+ ParsedComments[D->getCanonicalDecl()] = FC;
+ break;
+ }
}
}
}
@@ -671,11 +697,6 @@ ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID,
if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) {
ID.AddInteger(0);
ID.AddBoolean(TTP->isParameterPack());
- const TypeConstraint *TC = TTP->getTypeConstraint();
- ID.AddBoolean(TC != nullptr);
- if (TC)
- TC->getImmediatelyDeclaredConstraint()->Profile(ID, C,
- /*Canonical=*/true);
if (TTP->isExpandedParameterPack()) {
ID.AddBoolean(true);
ID.AddInteger(TTP->getNumExpansionParameters());
@@ -687,7 +708,8 @@ ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID,
if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
ID.AddInteger(1);
ID.AddBoolean(NTTP->isParameterPack());
- ID.AddPointer(NTTP->getType().getCanonicalType().getAsOpaquePtr());
+ ID.AddPointer(C.getUnconstrainedType(C.getCanonicalType(NTTP->getType()))
+ .getAsOpaquePtr());
if (NTTP->isExpandedParameterPack()) {
ID.AddBoolean(true);
ID.AddInteger(NTTP->getNumExpansionTypes());
@@ -704,61 +726,6 @@ ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID,
ID.AddInteger(2);
Profile(ID, C, TTP);
}
- Expr *RequiresClause = Parm->getTemplateParameters()->getRequiresClause();
- ID.AddBoolean(RequiresClause != nullptr);
- if (RequiresClause)
- RequiresClause->Profile(ID, C, /*Canonical=*/true);
-}
-
-static Expr *
-canonicalizeImmediatelyDeclaredConstraint(const ASTContext &C, Expr *IDC,
- QualType ConstrainedType) {
- // This is a bit ugly - we need to form a new immediately-declared
- // constraint that references the new parameter; this would ideally
- // require semantic analysis (e.g. template<C T> struct S {}; - the
- // converted arguments of C<T> could be an argument pack if C is
- // declared as template<typename... T> concept C = ...).
- // We don't have semantic analysis here so we dig deep into the
- // ready-made constraint expr and change the thing manually.
- ConceptSpecializationExpr *CSE;
- if (const auto *Fold = dyn_cast<CXXFoldExpr>(IDC))
- CSE = cast<ConceptSpecializationExpr>(Fold->getLHS());
- else
- CSE = cast<ConceptSpecializationExpr>(IDC);
- ArrayRef<TemplateArgument> OldConverted = CSE->getTemplateArguments();
- SmallVector<TemplateArgument, 3> NewConverted;
- NewConverted.reserve(OldConverted.size());
- if (OldConverted.front().getKind() == TemplateArgument::Pack) {
- // The case:
- // template<typename... T> concept C = true;
- // template<C<int> T> struct S; -> constraint is C<{T, int}>
- NewConverted.push_back(ConstrainedType);
- for (auto &Arg : OldConverted.front().pack_elements().drop_front(1))
- NewConverted.push_back(Arg);
- TemplateArgument NewPack(NewConverted);
-
- NewConverted.clear();
- NewConverted.push_back(NewPack);
- assert(OldConverted.size() == 1 &&
- "Template parameter pack should be the last parameter");
- } else {
- assert(OldConverted.front().getKind() == TemplateArgument::Type &&
- "Unexpected first argument kind for immediately-declared "
- "constraint");
- NewConverted.push_back(ConstrainedType);
- for (auto &Arg : OldConverted.drop_front(1))
- NewConverted.push_back(Arg);
- }
- Expr *NewIDC = ConceptSpecializationExpr::Create(
- C, CSE->getNamedConcept(), NewConverted, nullptr,
- CSE->isInstantiationDependent(), CSE->containsUnexpandedParameterPack());
-
- if (auto *OrigFold = dyn_cast<CXXFoldExpr>(IDC))
- NewIDC = new (C) CXXFoldExpr(
- OrigFold->getType(), /*Callee*/nullptr, SourceLocation(), NewIDC,
- BinaryOperatorKind::BO_LAnd, SourceLocation(), /*RHS=*/nullptr,
- SourceLocation(), /*NumExpansions=*/None);
- return NewIDC;
}
TemplateTemplateParmDecl *
@@ -780,35 +747,19 @@ ASTContext::getCanonicalTemplateTemplateParmDecl(
for (TemplateParameterList::const_iterator P = Params->begin(),
PEnd = Params->end();
P != PEnd; ++P) {
+ // Note that, per C++20 [temp.over.link]/6, when determining whether
+ // template-parameters are equivalent, constraints are ignored.
if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) {
- TemplateTypeParmDecl *NewTTP = TemplateTypeParmDecl::Create(*this,
- getTranslationUnitDecl(), SourceLocation(), SourceLocation(),
+ TemplateTypeParmDecl *NewTTP = TemplateTypeParmDecl::Create(
+ *this, getTranslationUnitDecl(), SourceLocation(), SourceLocation(),
TTP->getDepth(), TTP->getIndex(), nullptr, false,
- TTP->isParameterPack(), TTP->hasTypeConstraint(),
- TTP->isExpandedParameterPack() ?
- llvm::Optional<unsigned>(TTP->getNumExpansionParameters()) : None);
- if (const auto *TC = TTP->getTypeConstraint()) {
- QualType ParamAsArgument(NewTTP->getTypeForDecl(), 0);
- Expr *NewIDC = canonicalizeImmediatelyDeclaredConstraint(
- *this, TC->getImmediatelyDeclaredConstraint(),
- ParamAsArgument);
- TemplateArgumentListInfo CanonArgsAsWritten;
- if (auto *Args = TC->getTemplateArgsAsWritten())
- for (const auto &ArgLoc : Args->arguments())
- CanonArgsAsWritten.addArgument(
- TemplateArgumentLoc(ArgLoc.getArgument(),
- TemplateArgumentLocInfo()));
- NewTTP->setTypeConstraint(
- NestedNameSpecifierLoc(),
- DeclarationNameInfo(TC->getNamedConcept()->getDeclName(),
- SourceLocation()), /*FoundDecl=*/nullptr,
- // Actually canonicalizing a TemplateArgumentLoc is difficult so we
- // simply omit the ArgsAsWritten
- TC->getNamedConcept(), /*ArgsAsWritten=*/nullptr, NewIDC);
- }
+ TTP->isParameterPack(), /*HasTypeConstraint=*/false,
+ TTP->isExpandedParameterPack()
+ ? std::optional<unsigned>(TTP->getNumExpansionParameters())
+ : std::nullopt);
CanonParams.push_back(NewTTP);
} else if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
- QualType T = getCanonicalType(NTTP->getType());
+ QualType T = getUnconstrainedType(getCanonicalType(NTTP->getType()));
TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T);
NonTypeTemplateParmDecl *Param;
if (NTTP->isExpandedParameterPack()) {
@@ -839,35 +790,18 @@ ASTContext::getCanonicalTemplateTemplateParmDecl(
NTTP->isParameterPack(),
TInfo);
}
- if (AutoType *AT = T->getContainedAutoType()) {
- if (AT->isConstrained()) {
- Param->setPlaceholderTypeConstraint(
- canonicalizeImmediatelyDeclaredConstraint(
- *this, NTTP->getPlaceholderTypeConstraint(), T));
- }
- }
CanonParams.push_back(Param);
-
} else
CanonParams.push_back(getCanonicalTemplateTemplateParmDecl(
cast<TemplateTemplateParmDecl>(*P)));
}
- Expr *CanonRequiresClause = nullptr;
- if (Expr *RequiresClause = TTP->getTemplateParameters()->getRequiresClause())
- CanonRequiresClause = RequiresClause;
-
- TemplateTemplateParmDecl *CanonTTP
- = TemplateTemplateParmDecl::Create(*this, getTranslationUnitDecl(),
- SourceLocation(), TTP->getDepth(),
- TTP->getPosition(),
- TTP->isParameterPack(),
- nullptr,
- TemplateParameterList::Create(*this, SourceLocation(),
- SourceLocation(),
- CanonParams,
- SourceLocation(),
- CanonRequiresClause));
+ TemplateTemplateParmDecl *CanonTTP = TemplateTemplateParmDecl::Create(
+ *this, getTranslationUnitDecl(), SourceLocation(), TTP->getDepth(),
+ TTP->getPosition(), TTP->isParameterPack(), nullptr,
+ TemplateParameterList::Create(*this, SourceLocation(), SourceLocation(),
+ CanonParams, SourceLocation(),
+ /*RequiresClause=*/nullptr));
// Get the new insert position for the node we care about.
Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
@@ -882,7 +816,7 @@ ASTContext::getCanonicalTemplateTemplateParmDecl(
TargetCXXABI::Kind ASTContext::getCXXABIKind() const {
auto Kind = getTargetInfo().getCXXABI().getKind();
- return getLangOpts().CXXABI.getValueOr(Kind);
+ return getLangOpts().CXXABI.value_or(Kind);
}
CXXABI *ASTContext::createCXXABI(const TargetInfo &T) {
@@ -919,38 +853,6 @@ ParentMapContext &ASTContext::getParentMapContext() {
return *ParentMapCtx.get();
}
-static const LangASMap *getAddressSpaceMap(const TargetInfo &T,
- const LangOptions &LOpts) {
- if (LOpts.FakeAddressSpaceMap) {
- // The fake address space map must have a distinct entry for each
- // language-specific address space.
- static const unsigned FakeAddrSpaceMap[] = {
- 0, // Default
- 1, // opencl_global
- 3, // opencl_local
- 2, // opencl_constant
- 0, // opencl_private
- 4, // opencl_generic
- 5, // opencl_global_device
- 6, // opencl_global_host
- 7, // cuda_device
- 8, // cuda_constant
- 9, // cuda_shared
- 1, // sycl_global
- 5, // sycl_global_device
- 6, // sycl_global_host
- 3, // sycl_local
- 0, // sycl_private
- 10, // ptr32_sptr
- 11, // ptr32_uptr
- 12 // ptr64
- };
- return &FakeAddrSpaceMap;
- } else {
- return &T.getAddressSpaceMap();
- }
-}
-
static bool isAddrSpaceMapManglingEnabled(const TargetInfo &TI,
const LangOptions &LangOpts) {
switch (LangOpts.getAddressSpaceMapMangling()) {
@@ -967,10 +869,15 @@ static bool isAddrSpaceMapManglingEnabled(const TargetInfo &TI,
ASTContext::ASTContext(LangOptions &LOpts, SourceManager &SM,
IdentifierTable &idents, SelectorTable &sels,
Builtin::Context &builtins, TranslationUnitKind TUKind)
- : ConstantArrayTypes(this_()), FunctionProtoTypes(this_()),
+ : ConstantArrayTypes(this_(), ConstantArrayTypesLog2InitSize),
+ DependentSizedArrayTypes(this_()), DependentSizedExtVectorTypes(this_()),
+ DependentAddressSpaceTypes(this_()), DependentVectorTypes(this_()),
+ DependentSizedMatrixTypes(this_()),
+ FunctionProtoTypes(this_(), FunctionProtoTypesLog2InitSize),
+ DependentTypeOfExprTypes(this_()), DependentDecltypeTypes(this_()),
TemplateSpecializationTypes(this_()),
DependentTemplateSpecializationTypes(this_()), AutoTypes(this_()),
- SubstTemplateTemplateParmPacks(this_()),
+ DependentBitIntTypes(this_()), SubstTemplateTemplateParmPacks(this_()),
CanonTemplateTemplateParms(this_()), SourceMgr(SM), LangOpts(LOpts),
NoSanitizeL(new NoSanitizeList(LangOpts.NoSanitizeFiles, SM)),
XRayFilter(new XRayFunctionFilter(LangOpts.XRayAlwaysInstrumentFiles,
@@ -984,7 +891,7 @@ ASTContext::ASTContext(LangOptions &LOpts, SourceManager &SM,
addTranslationUnitDecl();
}
-ASTContext::~ASTContext() {
+void ASTContext::cleanup() {
// Release the DenseMaps associated with DeclContext objects.
// FIXME: Is this the ideal solution?
ReleaseDeclContextMaps();
@@ -992,6 +899,7 @@ ASTContext::~ASTContext() {
// Call all of the deallocation functions on all of their targets.
for (auto &Pair : Deallocations)
(Pair.first)(Pair.second);
+ Deallocations.clear();
// ASTRecordLayout objects in ASTRecordLayouts must always be destroyed
// because they can contain DenseMaps.
@@ -1001,6 +909,7 @@ ASTContext::~ASTContext() {
// Increment in loop to prevent using deallocated memory.
if (auto *R = const_cast<ASTRecordLayout *>((I++)->second))
R->Destroy(*this);
+ ObjCLayouts.clear();
for (llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator
I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) {
@@ -1008,16 +917,21 @@ ASTContext::~ASTContext() {
if (auto *R = const_cast<ASTRecordLayout *>((I++)->second))
R->Destroy(*this);
}
+ ASTRecordLayouts.clear();
for (llvm::DenseMap<const Decl*, AttrVec*>::iterator A = DeclAttrs.begin(),
AEnd = DeclAttrs.end();
A != AEnd; ++A)
A->second->~AttrVec();
+ DeclAttrs.clear();
for (const auto &Value : ModuleInitializers)
Value.second->~PerModuleInitializers();
+ ModuleInitializers.clear();
}
+ASTContext::~ASTContext() { cleanup(); }
+
void ASTContext::setTraversalScope(const std::vector<Decl *> &TopLevelDecls) {
TraversalScope = TopLevelDecls;
getParentMapContext().clear();
@@ -1112,7 +1026,7 @@ void ASTContext::deduplicateMergedDefinitonsFor(NamedDecl *ND) {
for (Module *&M : Merged)
if (!Found.insert(M).second)
M = nullptr;
- Merged.erase(std::remove(Merged.begin(), Merged.end(), nullptr), Merged.end());
+ llvm::erase(Merged, nullptr);
}
ArrayRef<Module *>
@@ -1120,7 +1034,7 @@ ASTContext::getModulesWithMergedDefinition(const NamedDecl *Def) {
auto MergedIt =
MergedDefModules.find(cast<NamedDecl>(Def->getCanonicalDecl()));
if (MergedIt == MergedDefModules.end())
- return None;
+ return std::nullopt;
return MergedIt->second;
}
@@ -1178,13 +1092,20 @@ void ASTContext::addLazyModuleInitializers(Module *M, ArrayRef<uint32_t> IDs) {
ArrayRef<Decl *> ASTContext::getModuleInitializers(Module *M) {
auto It = ModuleInitializers.find(M);
if (It == ModuleInitializers.end())
- return None;
+ return std::nullopt;
auto *Inits = It->second;
Inits->resolve(*this);
return Inits->Initializers;
}
+void ASTContext::setCurrentNamedModule(Module *M) {
+ assert(M->isNamedModule());
+ assert(!CurrentCXXNamedModule &&
+ "We should set named module for ASTContext for only once");
+ CurrentCXXNamedModule = M;
+}
+
ExternCContextDecl *ASTContext::getExternCContextDecl() const {
if (!ExternCContext)
ExternCContext = ExternCContextDecl::Create(*this, getTranslationUnitDecl());
@@ -1258,7 +1179,7 @@ TypedefDecl *ASTContext::getUInt128Decl() const {
}
void ASTContext::InitBuiltinType(CanQualType &R, BuiltinType::Kind K) {
- auto *Ty = new (*this, TypeAlignment) BuiltinType(K);
+ auto *Ty = new (*this, alignof(BuiltinType)) BuiltinType(K);
R = CanQualType::CreateUnsafe(QualType(Ty, 0));
Types.push_back(Ty);
}
@@ -1273,7 +1194,6 @@ void ASTContext::InitBuiltinTypes(const TargetInfo &Target,
this->AuxTarget = AuxTarget;
ABI.reset(createCXXABI(Target));
- AddrSpaceMap = getAddressSpaceMap(Target, LangOpts);
AddrSpaceMapMangling = isAddrSpaceMapManglingEnabled(Target, LangOpts);
// C99 6.2.5p19.
@@ -1308,6 +1228,9 @@ void ASTContext::InitBuiltinTypes(const TargetInfo &Target,
// GNU extension, __float128 for IEEE quadruple precision
InitBuiltinType(Float128Ty, BuiltinType::Float128);
+ // __ibm128 for IBM extended precision
+ InitBuiltinType(Ibm128Ty, BuiltinType::Ibm128);
+
// C11 extension ISO/IEC TS 18661-3
InitBuiltinType(Float16Ty, BuiltinType::Float16);
@@ -1399,15 +1322,16 @@ void ASTContext::InitBuiltinTypes(const TargetInfo &Target,
InitBuiltinType(OMPArrayShapingTy, BuiltinType::OMPArrayShaping);
InitBuiltinType(OMPIteratorTy, BuiltinType::OMPIterator);
}
+ // Placeholder type for OpenACC array sections.
+ if (LangOpts.OpenACC) {
+ // FIXME: Once we implement OpenACC array sections in Sema, this will either
+ // be combined with the OpenMP type, or given its own type. In the meantime,
+ // just use the OpenMP type so that parsing can work.
+ InitBuiltinType(OMPArraySectionTy, BuiltinType::OMPArraySection);
+ }
if (LangOpts.MatrixTypes)
InitBuiltinType(IncompleteMatrixIdxTy, BuiltinType::IncompleteMatrixIdx);
- // C99 6.2.5p11.
- FloatComplexTy = getComplexType(FloatTy);
- DoubleComplexTy = getComplexType(DoubleTy);
- LongDoubleComplexTy = getComplexType(LongDoubleTy);
- Float128ComplexTy = getComplexType(Float128Ty);
-
// Builtin types for 'id', 'Class', and 'SEL'.
InitBuiltinType(ObjCBuiltinIdTy, BuiltinType::ObjCId);
InitBuiltinType(ObjCBuiltinClassTy, BuiltinType::ObjCClass);
@@ -1435,13 +1359,10 @@ void ASTContext::InitBuiltinTypes(const TargetInfo &Target,
#include "clang/Basic/AArch64SVEACLETypes.def"
}
- if (Target.getTriple().isPPC64() &&
- Target.hasFeature("paired-vector-memops")) {
- if (Target.hasFeature("mma")) {
+ if (Target.getTriple().isPPC64()) {
#define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \
InitBuiltinType(Id##Ty, BuiltinType::Id);
#include "clang/Basic/PPCTypes.def"
- }
#define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \
InitBuiltinType(Id##Ty, BuiltinType::Id);
#include "clang/Basic/PPCTypes.def"
@@ -1453,6 +1374,12 @@ void ASTContext::InitBuiltinTypes(const TargetInfo &Target,
#include "clang/Basic/RISCVVTypes.def"
}
+ if (Target.getTriple().isWasm() && Target.hasFeature("reference-types")) {
+#define WASM_TYPE(Name, Id, SingletonId) \
+ InitBuiltinType(SingletonId, BuiltinType::Id);
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
+ }
+
// Builtin type for __objc_yes and __objc_no
ObjCBuiltinBoolTy = (Target.useSignedCharForObjCBool() ?
SignedCharTy : BoolTy);
@@ -1550,11 +1477,7 @@ ASTContext::setTemplateOrSpecializationInfo(VarDecl *Inst,
NamedDecl *
ASTContext::getInstantiatedFromUsingDecl(NamedDecl *UUD) {
- auto Pos = InstantiatedFromUsingDecl.find(UUD);
- if (Pos == InstantiatedFromUsingDecl.end())
- return nullptr;
-
- return Pos->second;
+ return InstantiatedFromUsingDecl.lookup(UUD);
}
void
@@ -1573,11 +1496,7 @@ ASTContext::setInstantiatedFromUsingDecl(NamedDecl *Inst, NamedDecl *Pattern) {
UsingEnumDecl *
ASTContext::getInstantiatedFromUsingEnumDecl(UsingEnumDecl *UUD) {
- auto Pos = InstantiatedFromUsingEnumDecl.find(UUD);
- if (Pos == InstantiatedFromUsingEnumDecl.end())
- return nullptr;
-
- return Pos->second;
+ return InstantiatedFromUsingEnumDecl.lookup(UUD);
}
void ASTContext::setInstantiatedFromUsingEnumDecl(UsingEnumDecl *Inst,
@@ -1588,12 +1507,7 @@ void ASTContext::setInstantiatedFromUsingEnumDecl(UsingEnumDecl *Inst,
UsingShadowDecl *
ASTContext::getInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst) {
- llvm::DenseMap<UsingShadowDecl*, UsingShadowDecl*>::const_iterator Pos
- = InstantiatedFromUsingShadowDecl.find(Inst);
- if (Pos == InstantiatedFromUsingShadowDecl.end())
- return nullptr;
-
- return Pos->second;
+ return InstantiatedFromUsingShadowDecl.lookup(Inst);
}
void
@@ -1604,12 +1518,7 @@ ASTContext::setInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst,
}
FieldDecl *ASTContext::getInstantiatedFromUnnamedFieldDecl(FieldDecl *Field) {
- llvm::DenseMap<FieldDecl *, FieldDecl *>::iterator Pos
- = InstantiatedFromUnnamedFieldDecl.find(Field);
- if (Pos == InstantiatedFromUnnamedFieldDecl.end())
- return nullptr;
-
- return Pos->second;
+ return InstantiatedFromUnnamedFieldDecl.lookup(Field);
}
void ASTContext::setInstantiatedFromUnnamedFieldDecl(FieldDecl *Inst,
@@ -1700,16 +1609,27 @@ const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const {
case BuiltinType::BFloat16:
return Target->getBFloat16Format();
case BuiltinType::Float16:
- case BuiltinType::Half:
return Target->getHalfFormat();
+ case BuiltinType::Half:
+ // For HLSL, when the native half type is disabled, half will be treat as
+ // float.
+ if (getLangOpts().HLSL)
+ if (getLangOpts().NativeHalfType)
+ return Target->getHalfFormat();
+ else
+ return Target->getFloatFormat();
+ else
+ return Target->getHalfFormat();
case BuiltinType::Float: return Target->getFloatFormat();
case BuiltinType::Double: return Target->getDoubleFormat();
+ case BuiltinType::Ibm128:
+ return Target->getIbm128Format();
case BuiltinType::LongDouble:
- if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice)
+ if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice)
return AuxTarget->getLongDoubleFormat();
return Target->getLongDoubleFormat();
case BuiltinType::Float128:
- if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice)
+ if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice)
return AuxTarget->getFloat128Format();
return Target->getFloat128Format();
}
@@ -1718,28 +1638,22 @@ const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const {
CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const {
unsigned Align = Target->getCharWidth();
- bool UseAlignAttrOnly = false;
- if (unsigned AlignFromAttr = D->getMaxAlignment()) {
+ const unsigned AlignFromAttr = D->getMaxAlignment();
+ if (AlignFromAttr)
Align = AlignFromAttr;
- // __attribute__((aligned)) can increase or decrease alignment
- // *except* on a struct or struct member, where it only increases
- // alignment unless 'packed' is also specified.
- //
- // It is an error for alignas to decrease alignment, so we can
- // ignore that possibility; Sema should diagnose it.
- if (isa<FieldDecl>(D)) {
- UseAlignAttrOnly = D->hasAttr<PackedAttr>() ||
- cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>();
- } else {
- UseAlignAttrOnly = true;
- }
- }
- else if (isa<FieldDecl>(D))
- UseAlignAttrOnly =
- D->hasAttr<PackedAttr>() ||
- cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>();
-
+ // __attribute__((aligned)) can increase or decrease alignment
+ // *except* on a struct or struct member, where it only increases
+ // alignment unless 'packed' is also specified.
+ //
+ // It is an error for alignas to decrease alignment, so we can
+ // ignore that possibility; Sema should diagnose it.
+ bool UseAlignAttrOnly;
+ if (const FieldDecl *FD = dyn_cast<FieldDecl>(D))
+ UseAlignAttrOnly =
+ FD->hasAttr<PackedAttr>() || FD->getParent()->hasAttr<PackedAttr>();
+ else
+ UseAlignAttrOnly = AlignFromAttr != 0;
// If we're using the align attribute only, just ignore everything
// else about the declaration and its type.
if (UseAlignAttrOnly) {
@@ -1771,14 +1685,16 @@ CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const {
Align = std::max(Align, getPreferredTypeAlign(T.getTypePtr()));
if (BaseT.getQualifiers().hasUnaligned())
Align = Target->getCharWidth();
- if (const auto *VD = dyn_cast<VarDecl>(D)) {
- if (VD->hasGlobalStorage() && !ForAlignof) {
- uint64_t TypeSize = getTypeSize(T.getTypePtr());
- Align = std::max(Align, getTargetInfo().getMinGlobalAlign(TypeSize));
- }
- }
}
+ // Ensure miminum alignment for global variables.
+ if (const auto *VD = dyn_cast<VarDecl>(D))
+ if (VD->hasGlobalStorage() && !ForAlignof) {
+ uint64_t TypeSize =
+ !BaseT->isIncompleteType() ? getTypeSize(T.getTypePtr()) : 0;
+ Align = std::max(Align, getTargetInfo().getMinGlobalAlign(TypeSize));
+ }
+
// Fields can be subject to extra alignment constraints, like if
// the field is packed, the struct is packed, or the struct has a
// a max-field-alignment constraint (#pragma pack). So calculate
@@ -1855,11 +1771,11 @@ static getConstantArrayInfoInChars(const ASTContext &Context,
uint64_t Width = EltInfo.Width.getQuantity() * Size;
unsigned Align = EltInfo.Align.getQuantity();
if (!Context.getTargetInfo().getCXXABI().isMicrosoft() ||
- Context.getTargetInfo().getPointerWidth(0) == 64)
+ Context.getTargetInfo().getPointerWidth(LangAS::Default) == 64)
Width = llvm::alignTo(Width, Align);
return TypeInfoChars(CharUnits::fromQuantity(Width),
CharUnits::fromQuantity(Align),
- EltInfo.AlignIsRequired);
+ EltInfo.AlignRequirement);
}
TypeInfoChars ASTContext::getTypeInfoInChars(const Type *T) const {
@@ -1867,16 +1783,53 @@ TypeInfoChars ASTContext::getTypeInfoInChars(const Type *T) const {
return getConstantArrayInfoInChars(*this, CAT);
TypeInfo Info = getTypeInfo(T);
return TypeInfoChars(toCharUnitsFromBits(Info.Width),
- toCharUnitsFromBits(Info.Align),
- Info.AlignIsRequired);
+ toCharUnitsFromBits(Info.Align), Info.AlignRequirement);
}
TypeInfoChars ASTContext::getTypeInfoInChars(QualType T) const {
return getTypeInfoInChars(T.getTypePtr());
}
+bool ASTContext::isPromotableIntegerType(QualType T) const {
+ // HLSL doesn't promote all small integer types to int, it
+ // just uses the rank-based promotion rules for all types.
+ if (getLangOpts().HLSL)
+ return false;
+
+ if (const auto *BT = T->getAs<BuiltinType>())
+ switch (BT->getKind()) {
+ case BuiltinType::Bool:
+ case BuiltinType::Char_S:
+ case BuiltinType::Char_U:
+ case BuiltinType::SChar:
+ case BuiltinType::UChar:
+ case BuiltinType::Short:
+ case BuiltinType::UShort:
+ case BuiltinType::WChar_S:
+ case BuiltinType::WChar_U:
+ case BuiltinType::Char8:
+ case BuiltinType::Char16:
+ case BuiltinType::Char32:
+ return true;
+ default:
+ return false;
+ }
+
+ // Enumerated types are promotable to their compatible integer types
+ // (C99 6.3.1.1) a.k.a. its underlying type (C++ [conv.prom]p2).
+ if (const auto *ET = T->getAs<EnumType>()) {
+ if (T->isDependentType() || ET->getDecl()->getPromotionType().isNull() ||
+ ET->getDecl()->isScoped())
+ return false;
+
+ return true;
+ }
+
+ return false;
+}
+
bool ASTContext::isAlignmentRequired(const Type *T) const {
- return getTypeInfo(T).AlignIsRequired;
+ return getTypeInfo(T).AlignRequirement != AlignRequirementKind::None;
}
bool ASTContext::isAlignmentRequired(QualType T) const {
@@ -1928,8 +1881,8 @@ TypeInfo ASTContext::getTypeInfo(const Type *T) const {
TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
uint64_t Width = 0;
unsigned Align = 8;
- bool AlignIsRequired = false;
- unsigned AS = 0;
+ AlignRequirementKind AlignRequirement = AlignRequirementKind::None;
+ LangAS AS = LangAS::Default;
switch (T->getTypeClass()) {
#define TYPE(Class, Base)
#define ABSTRACT_TYPE(Class, Base)
@@ -1962,9 +1915,9 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
"Overflow in array type bit size evaluation");
Width = EltInfo.Width * Size;
Align = EltInfo.Align;
- AlignIsRequired = EltInfo.AlignIsRequired;
+ AlignRequirement = EltInfo.AlignRequirement;
if (!getTargetInfo().getCXXABI().isMicrosoft() ||
- getTargetInfo().getPointerWidth(0) == 64)
+ getTargetInfo().getPointerWidth(LangAS::Default) == 64)
Width = llvm::alignTo(Width, Align);
break;
}
@@ -1973,25 +1926,33 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
case Type::Vector: {
const auto *VT = cast<VectorType>(T);
TypeInfo EltInfo = getTypeInfo(VT->getElementType());
- Width = EltInfo.Width * VT->getNumElements();
- Align = Width;
+ Width = VT->isExtVectorBoolType() ? VT->getNumElements()
+ : EltInfo.Width * VT->getNumElements();
+ // Enforce at least byte size and alignment.
+ Width = std::max<unsigned>(8, Width);
+ Align = std::max<unsigned>(8, Width);
+
// If the alignment is not a power of 2, round up to the next power of 2.
// This happens for non-power-of-2 length vectors.
if (Align & (Align-1)) {
- Align = llvm::NextPowerOf2(Align);
+ Align = llvm::bit_ceil(Align);
Width = llvm::alignTo(Width, Align);
}
// Adjust the alignment based on the target max.
uint64_t TargetVectorAlign = Target->getMaxVectorAlign();
if (TargetVectorAlign && TargetVectorAlign < Align)
Align = TargetVectorAlign;
- if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector)
+ if (VT->getVectorKind() == VectorKind::SveFixedLengthData)
// Adjust the alignment for fixed-length SVE vectors. This is important
// for non-power-of-2 vector lengths.
Align = 128;
- else if (VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector)
+ else if (VT->getVectorKind() == VectorKind::SveFixedLengthPredicate)
// Adjust the alignment for fixed-length SVE predicates.
Align = 16;
+ else if (VT->getVectorKind() == VectorKind::RVVFixedLengthData ||
+ VT->getVectorKind() == VectorKind::RVVFixedLengthMask)
+ // Adjust the alignment for fixed-length RVV vectors.
+ Align = std::min<unsigned>(64, Width);
break;
}
@@ -2062,7 +2023,7 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
case BuiltinType::Int128:
case BuiltinType::UInt128:
Width = 128;
- Align = 128; // int128_t is 128-bit aligned on all targets.
+ Align = Target->getInt128Align();
break;
case BuiltinType::ShortAccum:
case BuiltinType::UShortAccum:
@@ -2107,17 +2068,25 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
Align = Target->getLongFractAlign();
break;
case BuiltinType::BFloat16:
- Width = Target->getBFloat16Width();
- Align = Target->getBFloat16Align();
+ if (Target->hasBFloat16Type()) {
+ Width = Target->getBFloat16Width();
+ Align = Target->getBFloat16Align();
+ } else if ((getLangOpts().SYCLIsDevice ||
+ (getLangOpts().OpenMP &&
+ getLangOpts().OpenMPIsTargetDevice)) &&
+ AuxTarget->hasBFloat16Type()) {
+ Width = AuxTarget->getBFloat16Width();
+ Align = AuxTarget->getBFloat16Align();
+ }
break;
case BuiltinType::Float16:
case BuiltinType::Half:
if (Target->hasFloat16Type() || !getLangOpts().OpenMP ||
- !getLangOpts().OpenMPIsDevice) {
+ !getLangOpts().OpenMPIsTargetDevice) {
Width = Target->getHalfWidth();
Align = Target->getHalfAlign();
} else {
- assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice &&
+ assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice &&
"Expected OpenMP device compilation.");
Width = AuxTarget->getHalfWidth();
Align = AuxTarget->getHalfAlign();
@@ -2131,8 +2100,12 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
Width = Target->getDoubleWidth();
Align = Target->getDoubleAlign();
break;
+ case BuiltinType::Ibm128:
+ Width = Target->getIbm128Width();
+ Align = Target->getIbm128Align();
+ break;
case BuiltinType::LongDouble:
- if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice &&
+ if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice &&
(Target->getLongDoubleWidth() != AuxTarget->getLongDoubleWidth() ||
Target->getLongDoubleAlign() != AuxTarget->getLongDoubleAlign())) {
Width = AuxTarget->getLongDoubleWidth();
@@ -2144,25 +2117,26 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
break;
case BuiltinType::Float128:
if (Target->hasFloat128Type() || !getLangOpts().OpenMP ||
- !getLangOpts().OpenMPIsDevice) {
+ !getLangOpts().OpenMPIsTargetDevice) {
Width = Target->getFloat128Width();
Align = Target->getFloat128Align();
} else {
- assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice &&
+ assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice &&
"Expected OpenMP device compilation.");
Width = AuxTarget->getFloat128Width();
Align = AuxTarget->getFloat128Align();
}
break;
case BuiltinType::NullPtr:
- Width = Target->getPointerWidth(0); // C++ 3.9.1p11: sizeof(nullptr_t)
- Align = Target->getPointerAlign(0); // == sizeof(void*)
+ // C++ 3.9.1p11: sizeof(nullptr_t) == sizeof(void*)
+ Width = Target->getPointerWidth(LangAS::Default);
+ Align = Target->getPointerAlign(LangAS::Default);
break;
case BuiltinType::ObjCId:
case BuiltinType::ObjCClass:
case BuiltinType::ObjCSel:
- Width = Target->getPointerWidth(0);
- Align = Target->getPointerAlign(0);
+ Width = Target->getPointerWidth(LangAS::Default);
+ Align = Target->getPointerAlign(LangAS::Default);
break;
case BuiltinType::OCLSampler:
case BuiltinType::OCLEvent:
@@ -2175,8 +2149,7 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
case BuiltinType::Id:
#include "clang/Basic/OpenCLExtensionTypes.def"
- AS = getTargetAddressSpace(
- Target->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T)));
+ AS = Target->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T));
Width = Target->getPointerWidth(AS);
Align = Target->getPointerAlign(AS);
break;
@@ -2199,6 +2172,11 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
Width = 0; \
Align = 16; \
break;
+#define SVE_OPAQUE_TYPE(Name, MangledName, Id, SingletonId) \
+ case BuiltinType::Id: \
+ Width = 0; \
+ Align = 16; \
+ break;
#include "clang/Basic/AArch64SVEACLETypes.def"
#define PPC_VECTOR_TYPE(Name, Id, Size) \
case BuiltinType::Id: \
@@ -2207,7 +2185,7 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
break;
#include "clang/Basic/PPCTypes.def"
#define RVV_VECTOR_TYPE(Name, Id, SingletonId, ElKind, ElBits, NF, IsSigned, \
- IsFP) \
+ IsFP, IsBF) \
case BuiltinType::Id: \
Width = 0; \
Align = ElBits; \
@@ -2218,14 +2196,20 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
Align = 8; \
break;
#include "clang/Basic/RISCVVTypes.def"
+#define WASM_TYPE(Name, Id, SingletonId) \
+ case BuiltinType::Id: \
+ Width = 0; \
+ Align = 8; \
+ break;
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
}
break;
case Type::ObjCObjectPointer:
- Width = Target->getPointerWidth(0);
- Align = Target->getPointerAlign(0);
+ Width = Target->getPointerWidth(LangAS::Default);
+ Align = Target->getPointerAlign(LangAS::Default);
break;
case Type::BlockPointer:
- AS = getTargetAddressSpace(cast<BlockPointerType>(T)->getPointeeType());
+ AS = cast<BlockPointerType>(T)->getPointeeType().getAddressSpace();
Width = Target->getPointerWidth(AS);
Align = Target->getPointerAlign(AS);
break;
@@ -2233,12 +2217,12 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
case Type::RValueReference:
// alignof and sizeof should never enter this code path here, so we go
// the pointer route.
- AS = getTargetAddressSpace(cast<ReferenceType>(T)->getPointeeType());
+ AS = cast<ReferenceType>(T)->getPointeeType().getAddressSpace();
Width = Target->getPointerWidth(AS);
Align = Target->getPointerAlign(AS);
break;
case Type::Pointer:
- AS = getTargetAddressSpace(cast<PointerType>(T)->getPointeeType());
+ AS = cast<PointerType>(T)->getPointeeType().getAddressSpace();
Width = Target->getPointerWidth(AS);
Align = Target->getPointerAlign(AS);
break;
@@ -2274,12 +2258,10 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
Align = toBits(Layout.getAlignment());
break;
}
- case Type::ExtInt: {
- const auto *EIT = cast<ExtIntType>(T);
- Align =
- std::min(static_cast<unsigned>(std::max(
- getCharWidth(), llvm::PowerOf2Ceil(EIT->getNumBits()))),
- Target->getLongLongAlign());
+ case Type::BitInt: {
+ const auto *EIT = cast<BitIntType>(T);
+ Align = std::clamp<unsigned>(llvm::PowerOf2Ceil(EIT->getNumBits()),
+ getCharWidth(), Target->getLongLongAlign());
Width = llvm::alignTo(EIT->getNumBits(), Align);
break;
}
@@ -2299,7 +2281,7 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
getTypeInfo(ED->getIntegerType()->getUnqualifiedDesugaredType());
if (unsigned AttrAlign = ED->getMaxAlignment()) {
Info.Align = AttrAlign;
- Info.AlignIsRequired = true;
+ Info.AlignRequirement = AlignRequirementKind::RequiredByEnum;
}
return Info;
}
@@ -2309,7 +2291,9 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
const ASTRecordLayout &Layout = getASTRecordLayout(RD);
Width = toBits(Layout.getSize());
Align = toBits(Layout.getAlignment());
- AlignIsRequired = RD->hasAttr<AlignedAttr>();
+ AlignRequirement = RD->hasAttr<AlignedAttr>()
+ ? AlignRequirementKind::RequiredByRecord
+ : AlignRequirementKind::None;
break;
}
@@ -2335,18 +2319,21 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
case Type::ObjCTypeParam:
return getTypeInfo(cast<ObjCTypeParamType>(T)->desugar().getTypePtr());
+ case Type::Using:
+ return getTypeInfo(cast<UsingType>(T)->desugar().getTypePtr());
+
case Type::Typedef: {
- const TypedefNameDecl *Typedef = cast<TypedefType>(T)->getDecl();
- TypeInfo Info = getTypeInfo(Typedef->getUnderlyingType().getTypePtr());
+ const auto *TT = cast<TypedefType>(T);
+ TypeInfo Info = getTypeInfo(TT->desugar().getTypePtr());
// If the typedef has an aligned attribute on it, it overrides any computed
// alignment we have. This violates the GCC documentation (which says that
// attribute(aligned) can only round up) but matches its implementation.
- if (unsigned AttrAlign = Typedef->getMaxAlignment()) {
+ if (unsigned AttrAlign = TT->getDecl()->getMaxAlignment()) {
Align = AttrAlign;
- AlignIsRequired = true;
+ AlignRequirement = AlignRequirementKind::RequiredByTypedef;
} else {
Align = Info.Align;
- AlignIsRequired = Info.AlignIsRequired;
+ AlignRequirement = Info.AlignRequirement;
}
Width = Info.Width;
break;
@@ -2359,6 +2346,10 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
return getTypeInfo(
cast<AttributedType>(T)->getEquivalentType().getTypePtr());
+ case Type::BTFTagAttributed:
+ return getTypeInfo(
+ cast<BTFTagAttributedType>(T)->getWrappedType().getTypePtr());
+
case Type::Atomic: {
// Start with the base type information.
TypeInfo Info = getTypeInfo(cast<AtomicType>(T)->getValueType());
@@ -2376,8 +2367,7 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
// favorable to atomic operations:
// Round the size up to a power of 2.
- if (!llvm::isPowerOf2_64(Width))
- Width = llvm::NextPowerOf2(Width);
+ Width = llvm::bit_ceil(Width);
// Set the alignment equal to the size.
Align = static_cast<unsigned>(Width);
@@ -2386,13 +2376,13 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
break;
case Type::Pipe:
- Width = Target->getPointerWidth(getTargetAddressSpace(LangAS::opencl_global));
- Align = Target->getPointerAlign(getTargetAddressSpace(LangAS::opencl_global));
+ Width = Target->getPointerWidth(LangAS::opencl_global);
+ Align = Target->getPointerAlign(LangAS::opencl_global);
break;
}
assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2");
- return TypeInfo(Width, Align, AlignIsRequired);
+ return TypeInfo(Width, Align, AlignRequirement);
}
unsigned ASTContext::getTypeUnadjustedAlign(const Type *T) const {
@@ -2417,7 +2407,8 @@ unsigned ASTContext::getTypeUnadjustedAlign(const Type *T) const {
}
unsigned ASTContext::getOpenMPDefaultSimdAlign(QualType T) const {
- unsigned SimdAlign = getTargetInfo().getSimdDefaultAlign();
+ unsigned SimdAlign = llvm::OpenMPIRBuilder::getOpenMPDefaultSimdAlign(
+ getTargetInfo().getTriple(), Target->getTargetOpts().FeatureMap);
return SimdAlign;
}
@@ -2450,7 +2441,7 @@ CharUnits ASTContext::getTypeAlignInChars(const Type *T) const {
}
/// getTypeUnadjustedAlignInChars - Return the ABI-specified alignment of a
-/// type, in characters, before alignment adustments. This method does
+/// type, in characters, before alignment adjustments. This method does
/// not work on incomplete types.
CharUnits ASTContext::getTypeUnadjustedAlignInChars(QualType T) const {
return toCharUnitsFromBits(getTypeUnadjustedAlign(T));
@@ -2478,11 +2469,18 @@ unsigned ASTContext::getPreferredTypeAlign(const Type *T) const {
return ABIAlign;
if (const auto *RT = T->getAs<RecordType>()) {
- if (TI.AlignIsRequired || RT->getDecl()->isInvalidDecl())
+ const RecordDecl *RD = RT->getDecl();
+
+ // When used as part of a typedef, or together with a 'packed' attribute,
+ // the 'aligned' attribute can be used to decrease alignment. Note that the
+ // 'packed' case is already taken into consideration when computing the
+ // alignment, we only need to handle the typedef case here.
+ if (TI.AlignRequirement == AlignRequirementKind::RequiredByTypedef ||
+ RD->isInvalidDecl())
return ABIAlign;
unsigned PreferredAlign = static_cast<unsigned>(
- toBits(getASTRecordLayout(RT->getDecl()).PreferredAlignment));
+ toBits(getASTRecordLayout(RD).PreferredAlignment));
assert(PreferredAlign >= ABIAlign &&
"PreferredAlign should be at least as large as ABIAlign.");
return PreferredAlign;
@@ -2502,7 +2500,7 @@ unsigned ASTContext::getPreferredTypeAlign(const Type *T) const {
Target->defaultsToAIXPowerAlignment()))
// Don't increase the alignment if an alignment attribute was specified on a
// typedef declaration.
- if (!TI.AlignIsRequired)
+ if (!TI.isAlignRequired())
return std::max(ABIAlign, (unsigned)getTypeSize(T));
return ABIAlign;
@@ -2569,8 +2567,7 @@ void ASTContext::DeepCollectObjCIvars(const ObjCInterfaceDecl *OI,
if (const ObjCInterfaceDecl *SuperClass = OI->getSuperClass())
DeepCollectObjCIvars(SuperClass, false, Ivars);
if (!leafClass) {
- for (const auto *I : OI->ivars())
- Ivars.push_back(I);
+ llvm::append_range(Ivars, OI->ivars());
} else {
auto *IDecl = const_cast<ObjCInterfaceDecl *>(OI);
for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv;
@@ -2615,12 +2612,14 @@ void ASTContext::CollectInheritedProtocols(const Decl *CDecl,
}
static bool unionHasUniqueObjectRepresentations(const ASTContext &Context,
- const RecordDecl *RD) {
+ const RecordDecl *RD,
+ bool CheckIfTriviallyCopyable) {
assert(RD->isUnion() && "Must be union type");
CharUnits UnionSize = Context.getTypeSizeInChars(RD->getTypeForDecl());
for (const auto *Field : RD->fields()) {
- if (!Context.hasUniqueObjectRepresentations(Field->getType()))
+ if (!Context.hasUniqueObjectRepresentations(Field->getType(),
+ CheckIfTriviallyCopyable))
return false;
CharUnits FieldSize = Context.getTypeSizeInChars(Field->getType());
if (FieldSize != UnionSize)
@@ -2629,124 +2628,178 @@ static bool unionHasUniqueObjectRepresentations(const ASTContext &Context,
return !RD->field_empty();
}
-static bool isStructEmpty(QualType Ty) {
- const RecordDecl *RD = Ty->castAs<RecordType>()->getDecl();
+static int64_t getSubobjectOffset(const FieldDecl *Field,
+ const ASTContext &Context,
+ const clang::ASTRecordLayout & /*Layout*/) {
+ return Context.getFieldOffset(Field);
+}
- if (!RD->field_empty())
- return false;
+static int64_t getSubobjectOffset(const CXXRecordDecl *RD,
+ const ASTContext &Context,
+ const clang::ASTRecordLayout &Layout) {
+ return Context.toBits(Layout.getBaseClassOffset(RD));
+}
- if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RD))
- return ClassDecl->isEmpty();
+static std::optional<int64_t>
+structHasUniqueObjectRepresentations(const ASTContext &Context,
+ const RecordDecl *RD,
+ bool CheckIfTriviallyCopyable);
+
+static std::optional<int64_t>
+getSubobjectSizeInBits(const FieldDecl *Field, const ASTContext &Context,
+ bool CheckIfTriviallyCopyable) {
+ if (Field->getType()->isRecordType()) {
+ const RecordDecl *RD = Field->getType()->getAsRecordDecl();
+ if (!RD->isUnion())
+ return structHasUniqueObjectRepresentations(Context, RD,
+ CheckIfTriviallyCopyable);
+ }
+
+ // A _BitInt type may not be unique if it has padding bits
+ // but if it is a bitfield the padding bits are not used.
+ bool IsBitIntType = Field->getType()->isBitIntType();
+ if (!Field->getType()->isReferenceType() && !IsBitIntType &&
+ !Context.hasUniqueObjectRepresentations(Field->getType(),
+ CheckIfTriviallyCopyable))
+ return std::nullopt;
+
+ int64_t FieldSizeInBits =
+ Context.toBits(Context.getTypeSizeInChars(Field->getType()));
+ if (Field->isBitField()) {
+ // If we have explicit padding bits, they don't contribute bits
+ // to the actual object representation, so return 0.
+ if (Field->isUnnamedBitfield())
+ return 0;
- return true;
+ int64_t BitfieldSize = Field->getBitWidthValue(Context);
+ if (IsBitIntType) {
+ if ((unsigned)BitfieldSize >
+ cast<BitIntType>(Field->getType())->getNumBits())
+ return std::nullopt;
+ } else if (BitfieldSize > FieldSizeInBits) {
+ return std::nullopt;
+ }
+ FieldSizeInBits = BitfieldSize;
+ } else if (IsBitIntType && !Context.hasUniqueObjectRepresentations(
+ Field->getType(), CheckIfTriviallyCopyable)) {
+ return std::nullopt;
+ }
+ return FieldSizeInBits;
+}
+
+static std::optional<int64_t>
+getSubobjectSizeInBits(const CXXRecordDecl *RD, const ASTContext &Context,
+ bool CheckIfTriviallyCopyable) {
+ return structHasUniqueObjectRepresentations(Context, RD,
+ CheckIfTriviallyCopyable);
+}
+
+template <typename RangeT>
+static std::optional<int64_t> structSubobjectsHaveUniqueObjectRepresentations(
+ const RangeT &Subobjects, int64_t CurOffsetInBits,
+ const ASTContext &Context, const clang::ASTRecordLayout &Layout,
+ bool CheckIfTriviallyCopyable) {
+ for (const auto *Subobject : Subobjects) {
+ std::optional<int64_t> SizeInBits =
+ getSubobjectSizeInBits(Subobject, Context, CheckIfTriviallyCopyable);
+ if (!SizeInBits)
+ return std::nullopt;
+ if (*SizeInBits != 0) {
+ int64_t Offset = getSubobjectOffset(Subobject, Context, Layout);
+ if (Offset != CurOffsetInBits)
+ return std::nullopt;
+ CurOffsetInBits += *SizeInBits;
+ }
+ }
+ return CurOffsetInBits;
}
-static llvm::Optional<int64_t>
+static std::optional<int64_t>
structHasUniqueObjectRepresentations(const ASTContext &Context,
- const RecordDecl *RD) {
+ const RecordDecl *RD,
+ bool CheckIfTriviallyCopyable) {
assert(!RD->isUnion() && "Must be struct/class type");
const auto &Layout = Context.getASTRecordLayout(RD);
int64_t CurOffsetInBits = 0;
if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RD)) {
if (ClassDecl->isDynamicClass())
- return llvm::None;
+ return std::nullopt;
- SmallVector<std::pair<QualType, int64_t>, 4> Bases;
+ SmallVector<CXXRecordDecl *, 4> Bases;
for (const auto &Base : ClassDecl->bases()) {
// Empty types can be inherited from, and non-empty types can potentially
// have tail padding, so just make sure there isn't an error.
- if (!isStructEmpty(Base.getType())) {
- llvm::Optional<int64_t> Size = structHasUniqueObjectRepresentations(
- Context, Base.getType()->castAs<RecordType>()->getDecl());
- if (!Size)
- return llvm::None;
- Bases.emplace_back(Base.getType(), Size.getValue());
- }
+ Bases.emplace_back(Base.getType()->getAsCXXRecordDecl());
}
- llvm::sort(Bases, [&](const std::pair<QualType, int64_t> &L,
- const std::pair<QualType, int64_t> &R) {
- return Layout.getBaseClassOffset(L.first->getAsCXXRecordDecl()) <
- Layout.getBaseClassOffset(R.first->getAsCXXRecordDecl());
+ llvm::sort(Bases, [&](const CXXRecordDecl *L, const CXXRecordDecl *R) {
+ return Layout.getBaseClassOffset(L) < Layout.getBaseClassOffset(R);
});
- for (const auto &Base : Bases) {
- int64_t BaseOffset = Context.toBits(
- Layout.getBaseClassOffset(Base.first->getAsCXXRecordDecl()));
- int64_t BaseSize = Base.second;
- if (BaseOffset != CurOffsetInBits)
- return llvm::None;
- CurOffsetInBits = BaseOffset + BaseSize;
- }
+ std::optional<int64_t> OffsetAfterBases =
+ structSubobjectsHaveUniqueObjectRepresentations(
+ Bases, CurOffsetInBits, Context, Layout, CheckIfTriviallyCopyable);
+ if (!OffsetAfterBases)
+ return std::nullopt;
+ CurOffsetInBits = *OffsetAfterBases;
}
- for (const auto *Field : RD->fields()) {
- if (!Field->getType()->isReferenceType() &&
- !Context.hasUniqueObjectRepresentations(Field->getType()))
- return llvm::None;
-
- int64_t FieldSizeInBits =
- Context.toBits(Context.getTypeSizeInChars(Field->getType()));
- if (Field->isBitField()) {
- int64_t BitfieldSize = Field->getBitWidthValue(Context);
-
- if (BitfieldSize > FieldSizeInBits)
- return llvm::None;
- FieldSizeInBits = BitfieldSize;
- }
-
- int64_t FieldOffsetInBits = Context.getFieldOffset(Field);
-
- if (FieldOffsetInBits != CurOffsetInBits)
- return llvm::None;
-
- CurOffsetInBits = FieldSizeInBits + FieldOffsetInBits;
- }
+ std::optional<int64_t> OffsetAfterFields =
+ structSubobjectsHaveUniqueObjectRepresentations(
+ RD->fields(), CurOffsetInBits, Context, Layout,
+ CheckIfTriviallyCopyable);
+ if (!OffsetAfterFields)
+ return std::nullopt;
+ CurOffsetInBits = *OffsetAfterFields;
return CurOffsetInBits;
}
-bool ASTContext::hasUniqueObjectRepresentations(QualType Ty) const {
+bool ASTContext::hasUniqueObjectRepresentations(
+ QualType Ty, bool CheckIfTriviallyCopyable) const {
// C++17 [meta.unary.prop]:
// The predicate condition for a template specialization
- // has_unique_object_representations<T> shall be
- // satisfied if and only if:
+ // has_unique_object_representations<T> shall be satisfied if and only if:
// (9.1) - T is trivially copyable, and
// (9.2) - any two objects of type T with the same value have the same
- // object representation, where two objects
- // of array or non-union class type are considered to have the same value
- // if their respective sequences of
- // direct subobjects have the same values, and two objects of union type
- // are considered to have the same
- // value if they have the same active member and the corresponding members
- // have the same value.
+ // object representation, where:
+ // - two objects of array or non-union class type are considered to have
+ // the same value if their respective sequences of direct subobjects
+ // have the same values, and
+ // - two objects of union type are considered to have the same value if
+ // they have the same active member and the corresponding members have
+ // the same value.
// The set of scalar types for which this condition holds is
- // implementation-defined. [ Note: If a type has padding
- // bits, the condition does not hold; otherwise, the condition holds true
- // for unsigned integral types. -- end note ]
+ // implementation-defined. [ Note: If a type has padding bits, the condition
+ // does not hold; otherwise, the condition holds true for unsigned integral
+ // types. -- end note ]
assert(!Ty.isNull() && "Null QualType sent to unique object rep check");
// Arrays are unique only if their element type is unique.
if (Ty->isArrayType())
- return hasUniqueObjectRepresentations(getBaseElementType(Ty));
+ return hasUniqueObjectRepresentations(getBaseElementType(Ty),
+ CheckIfTriviallyCopyable);
// (9.1) - T is trivially copyable...
- if (!Ty.isTriviallyCopyableType(*this))
+ if (CheckIfTriviallyCopyable && !Ty.isTriviallyCopyableType(*this))
return false;
// All integrals and enums are unique.
- if (Ty->isIntegralOrEnumerationType())
+ if (Ty->isIntegralOrEnumerationType()) {
+ // Except _BitInt types that have padding bits.
+ if (const auto *BIT = Ty->getAs<BitIntType>())
+ return getTypeSize(BIT) == BIT->getNumBits();
+
return true;
+ }
// All other pointers are unique.
if (Ty->isPointerType())
return true;
- if (Ty->isMemberPointerType()) {
- const auto *MPT = Ty->getAs<MemberPointerType>();
+ if (const auto *MPT = Ty->getAs<MemberPointerType>())
return !ABI->getMemberPointerInfo(MPT).HasPadding;
- }
if (Ty->isRecordType()) {
const RecordDecl *Record = Ty->castAs<RecordType>()->getDecl();
@@ -2755,13 +2808,13 @@ bool ASTContext::hasUniqueObjectRepresentations(QualType Ty) const {
return false;
if (Record->isUnion())
- return unionHasUniqueObjectRepresentations(*this, Record);
+ return unionHasUniqueObjectRepresentations(*this, Record,
+ CheckIfTriviallyCopyable);
- Optional<int64_t> StructSize =
- structHasUniqueObjectRepresentations(*this, Record);
+ std::optional<int64_t> StructSize = structHasUniqueObjectRepresentations(
+ *this, Record, CheckIfTriviallyCopyable);
- return StructSize &&
- StructSize.getValue() == static_cast<int64_t>(getTypeSize(Ty));
+ return StructSize && *StructSize == static_cast<int64_t>(getTypeSize(Ty));
}
// FIXME: More cases to handle here (list by rsmith):
@@ -2898,7 +2951,7 @@ TypeSourceInfo *ASTContext::CreateTypeSourceInfo(QualType T,
auto *TInfo =
(TypeSourceInfo*)BumpAlloc.Allocate(sizeof(TypeSourceInfo) + DataSize, 8);
- new (TInfo) TypeSourceInfo(T);
+ new (TInfo) TypeSourceInfo(T, DataSize);
return TInfo;
}
@@ -2920,6 +2973,18 @@ ASTContext::getASTObjCImplementationLayout(
return getObjCLayout(D->getClassInterface(), D);
}
+static auto getCanonicalTemplateArguments(const ASTContext &C,
+ ArrayRef<TemplateArgument> Args,
+ bool &AnyNonCanonArgs) {
+ SmallVector<TemplateArgument, 16> CanonArgs(Args);
+ for (auto &Arg : CanonArgs) {
+ TemplateArgument OrigArg = Arg;
+ Arg = C.getCanonicalTemplateArgument(Arg);
+ AnyNonCanonArgs |= !Arg.structurallyEquals(OrigArg);
+ }
+ return CanonArgs;
+}
+
//===----------------------------------------------------------------------===//
// Type creation/memoization methods
//===----------------------------------------------------------------------===//
@@ -2949,7 +3014,7 @@ ASTContext::getExtQualType(const Type *baseType, Qualifiers quals) const {
(void) ExtQualNodes.FindNodeOrInsertPos(ID, insertPos);
}
- auto *eq = new (*this, TypeAlignment) ExtQuals(baseType, canon, quals);
+ auto *eq = new (*this, alignof(ExtQuals)) ExtQuals(baseType, canon, quals);
ExtQualNodes.InsertNode(eq, insertPos);
return QualType(eq, fastQuals);
}
@@ -3085,7 +3150,7 @@ void ASTContext::adjustDeducedFunctionResultType(FunctionDecl *FD,
/// declaration of a function with an exception specification is permitted
/// and preserved. Other type sugar (for instance, typedefs) is not.
QualType ASTContext::getFunctionTypeWithExceptionSpec(
- QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI) {
+ QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI) const {
// Might have some parens.
if (const auto *PT = dyn_cast<ParenType>(Orig))
return getParenType(
@@ -3113,7 +3178,7 @@ QualType ASTContext::getFunctionTypeWithExceptionSpec(
}
bool ASTContext::hasSameFunctionTypeIgnoringExceptionSpec(QualType T,
- QualType U) {
+ QualType U) const {
return hasSameType(T, U) ||
(getLangOpts().CPlusPlus17 &&
hasSameType(getFunctionTypeWithExceptionSpec(T, EST_None),
@@ -3123,9 +3188,9 @@ bool ASTContext::hasSameFunctionTypeIgnoringExceptionSpec(QualType T,
QualType ASTContext::getFunctionTypeWithoutPtrSizes(QualType T) {
if (const auto *Proto = T->getAs<FunctionProtoType>()) {
QualType RetTy = removePtrSizeAddrSpace(Proto->getReturnType());
- SmallVector<QualType, 16> Args(Proto->param_types());
+ SmallVector<QualType, 16> Args(Proto->param_types().size());
for (unsigned i = 0, n = Args.size(); i != n; ++i)
- Args[i] = removePtrSizeAddrSpace(Args[i]);
+ Args[i] = removePtrSizeAddrSpace(Proto->param_types()[i]);
return getFunctionType(RetTy, Args, Proto->getExtProtoInfo());
}
@@ -3193,7 +3258,7 @@ QualType ASTContext::getComplexType(QualType T) const {
ComplexType *NewIP = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos);
assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
}
- auto *New = new (*this, TypeAlignment) ComplexType(T, Canonical);
+ auto *New = new (*this, alignof(ComplexType)) ComplexType(T, Canonical);
Types.push_back(New);
ComplexTypes.InsertNode(New, InsertPos);
return QualType(New, 0);
@@ -3221,7 +3286,7 @@ QualType ASTContext::getPointerType(QualType T) const {
PointerType *NewIP = PointerTypes.FindNodeOrInsertPos(ID, InsertPos);
assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
}
- auto *New = new (*this, TypeAlignment) PointerType(T, Canonical);
+ auto *New = new (*this, alignof(PointerType)) PointerType(T, Canonical);
Types.push_back(New);
PointerTypes.InsertNode(New, InsertPos);
return QualType(New, 0);
@@ -3241,13 +3306,33 @@ QualType ASTContext::getAdjustedType(QualType Orig, QualType New) const {
AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
assert(!AT && "Shouldn't be in the map!");
- AT = new (*this, TypeAlignment)
+ AT = new (*this, alignof(AdjustedType))
AdjustedType(Type::Adjusted, Orig, New, Canonical);
Types.push_back(AT);
AdjustedTypes.InsertNode(AT, InsertPos);
return QualType(AT, 0);
}
+QualType ASTContext::getDecayedType(QualType Orig, QualType Decayed) const {
+ llvm::FoldingSetNodeID ID;
+ AdjustedType::Profile(ID, Orig, Decayed);
+ void *InsertPos = nullptr;
+ AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
+ if (AT)
+ return QualType(AT, 0);
+
+ QualType Canonical = getCanonicalType(Decayed);
+
+ // Get the new insert position for the node we care about.
+ AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(!AT && "Shouldn't be in the map!");
+
+ AT = new (*this, alignof(DecayedType)) DecayedType(Orig, Decayed, Canonical);
+ Types.push_back(AT);
+ AdjustedTypes.InsertNode(AT, InsertPos);
+ return QualType(AT, 0);
+}
+
QualType ASTContext::getDecayedType(QualType T) const {
assert((T->isArrayType() || T->isFunctionType()) && "T does not decay");
@@ -3268,23 +3353,7 @@ QualType ASTContext::getDecayedType(QualType T) const {
if (T->isFunctionType())
Decayed = getPointerType(T);
- llvm::FoldingSetNodeID ID;
- AdjustedType::Profile(ID, T, Decayed);
- void *InsertPos = nullptr;
- AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
- if (AT)
- return QualType(AT, 0);
-
- QualType Canonical = getCanonicalType(Decayed);
-
- // Get the new insert position for the node we care about.
- AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
- assert(!AT && "Shouldn't be in the map!");
-
- AT = new (*this, TypeAlignment) DecayedType(T, Decayed, Canonical);
- Types.push_back(AT);
- AdjustedTypes.InsertNode(AT, InsertPos);
- return QualType(AT, 0);
+ return getDecayedType(T, Decayed);
}
/// getBlockPointerType - Return the uniqued reference to the type for
@@ -3312,7 +3381,8 @@ QualType ASTContext::getBlockPointerType(QualType T) const {
BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
}
- auto *New = new (*this, TypeAlignment) BlockPointerType(T, Canonical);
+ auto *New =
+ new (*this, alignof(BlockPointerType)) BlockPointerType(T, Canonical);
Types.push_back(New);
BlockPointerTypes.InsertNode(New, InsertPos);
return QualType(New, 0);
@@ -3322,8 +3392,9 @@ QualType ASTContext::getBlockPointerType(QualType T) const {
/// lvalue reference to the specified type.
QualType
ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const {
- assert(getCanonicalType(T) != OverloadTy &&
- "Unresolved overloaded function type");
+ assert((!T->isPlaceholderType() ||
+ T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) &&
+ "Unresolved placeholder type");
// Unique pointers, to guarantee there is only one pointer of a particular
// structure.
@@ -3350,8 +3421,8 @@ ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const {
assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
}
- auto *New = new (*this, TypeAlignment) LValueReferenceType(T, Canonical,
- SpelledAsLValue);
+ auto *New = new (*this, alignof(LValueReferenceType))
+ LValueReferenceType(T, Canonical, SpelledAsLValue);
Types.push_back(New);
LValueReferenceTypes.InsertNode(New, InsertPos);
@@ -3361,6 +3432,10 @@ ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const {
/// getRValueReferenceType - Return the uniqued reference to the type for an
/// rvalue reference to the specified type.
QualType ASTContext::getRValueReferenceType(QualType T) const {
+ assert((!T->isPlaceholderType() ||
+ T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) &&
+ "Unresolved placeholder type");
+
// Unique pointers, to guarantee there is only one pointer of a particular
// structure.
llvm::FoldingSetNodeID ID;
@@ -3386,7 +3461,8 @@ QualType ASTContext::getRValueReferenceType(QualType T) const {
assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
}
- auto *New = new (*this, TypeAlignment) RValueReferenceType(T, Canonical);
+ auto *New = new (*this, alignof(RValueReferenceType))
+ RValueReferenceType(T, Canonical);
Types.push_back(New);
RValueReferenceTypes.InsertNode(New, InsertPos);
return QualType(New, 0);
@@ -3416,7 +3492,8 @@ QualType ASTContext::getMemberPointerType(QualType T, const Type *Cls) const {
MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
}
- auto *New = new (*this, TypeAlignment) MemberPointerType(T, Cls, Canonical);
+ auto *New = new (*this, alignof(MemberPointerType))
+ MemberPointerType(T, Cls, Canonical);
Types.push_back(New);
MemberPointerTypes.InsertNode(New, InsertPos);
return QualType(New, 0);
@@ -3427,7 +3504,7 @@ QualType ASTContext::getMemberPointerType(QualType T, const Type *Cls) const {
QualType ASTContext::getConstantArrayType(QualType EltTy,
const llvm::APInt &ArySizeIn,
const Expr *SizeExpr,
- ArrayType::ArraySizeModifier ASM,
+ ArraySizeModifier ASM,
unsigned IndexTypeQuals) const {
assert((EltTy->isDependentType() ||
EltTy->isIncompleteType() || EltTy->isConstantSizeType()) &&
@@ -3455,6 +3532,7 @@ QualType ASTContext::getConstantArrayType(QualType EltTy,
// is instantiation-dependent, this won't be a canonical type either, so fill
// in the canonical type field.
QualType Canon;
+ // FIXME: Check below should look for qualifiers behind sugar.
if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers() || SizeExpr) {
SplitQualType canonSplit = getCanonicalType(EltTy).split();
Canon = getConstantArrayType(QualType(canonSplit.Ty, 0), ArySize, nullptr,
@@ -3469,7 +3547,7 @@ QualType ASTContext::getConstantArrayType(QualType EltTy,
void *Mem = Allocate(
ConstantArrayType::totalSizeToAlloc<const Expr *>(SizeExpr ? 1 : 0),
- TypeAlignment);
+ alignof(ConstantArrayType));
auto *New = new (Mem)
ConstantArrayType(EltTy, Canon, ArySize, SizeExpr, ASM, IndexTypeQuals);
ConstantArrayTypes.InsertNode(New, InsertPos);
@@ -3524,8 +3602,8 @@ QualType ASTContext::getVariableArrayDecayedType(QualType type) const {
case Type::Auto:
case Type::DeducedTemplateSpecialization:
case Type::PackExpansion:
- case Type::ExtInt:
- case Type::DependentExtInt:
+ case Type::BitInt:
+ case Type::DependentBitInt:
llvm_unreachable("type should never be variably-modified");
// These types can be variably-modified but should never need to
@@ -3592,12 +3670,10 @@ QualType ASTContext::getVariableArrayDecayedType(QualType type) const {
// Turn incomplete types into [*] types.
case Type::IncompleteArray: {
const auto *iat = cast<IncompleteArrayType>(ty);
- result = getVariableArrayType(
- getVariableArrayDecayedType(iat->getElementType()),
- /*size*/ nullptr,
- ArrayType::Normal,
- iat->getIndexTypeCVRQualifiers(),
- SourceRange());
+ result =
+ getVariableArrayType(getVariableArrayDecayedType(iat->getElementType()),
+ /*size*/ nullptr, ArraySizeModifier::Normal,
+ iat->getIndexTypeCVRQualifiers(), SourceRange());
break;
}
@@ -3605,11 +3681,9 @@ QualType ASTContext::getVariableArrayDecayedType(QualType type) const {
case Type::VariableArray: {
const auto *vat = cast<VariableArrayType>(ty);
result = getVariableArrayType(
- getVariableArrayDecayedType(vat->getElementType()),
- /*size*/ nullptr,
- ArrayType::Star,
- vat->getIndexTypeCVRQualifiers(),
- vat->getBracketsRange());
+ getVariableArrayDecayedType(vat->getElementType()),
+ /*size*/ nullptr, ArraySizeModifier::Star,
+ vat->getIndexTypeCVRQualifiers(), vat->getBracketsRange());
break;
}
}
@@ -3620,9 +3694,8 @@ QualType ASTContext::getVariableArrayDecayedType(QualType type) const {
/// getVariableArrayType - Returns a non-unique reference to the type for a
/// variable array of the specified element type.
-QualType ASTContext::getVariableArrayType(QualType EltTy,
- Expr *NumElts,
- ArrayType::ArraySizeModifier ASM,
+QualType ASTContext::getVariableArrayType(QualType EltTy, Expr *NumElts,
+ ArraySizeModifier ASM,
unsigned IndexTypeQuals,
SourceRange Brackets) const {
// Since we don't unique expressions, it isn't possible to unique VLA's
@@ -3630,6 +3703,7 @@ QualType ASTContext::getVariableArrayType(QualType EltTy,
QualType Canon;
// Be sure to pull qualifiers off the element type.
+ // FIXME: Check below should look for qualifiers behind sugar.
if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) {
SplitQualType canonSplit = getCanonicalType(EltTy).split();
Canon = getVariableArrayType(QualType(canonSplit.Ty, 0), NumElts, ASM,
@@ -3637,8 +3711,8 @@ QualType ASTContext::getVariableArrayType(QualType EltTy,
Canon = getQualifiedType(Canon, canonSplit.Quals);
}
- auto *New = new (*this, TypeAlignment)
- VariableArrayType(EltTy, Canon, NumElts, ASM, IndexTypeQuals, Brackets);
+ auto *New = new (*this, alignof(VariableArrayType))
+ VariableArrayType(EltTy, Canon, NumElts, ASM, IndexTypeQuals, Brackets);
VariableArrayTypes.push_back(New);
Types.push_back(New);
@@ -3650,7 +3724,7 @@ QualType ASTContext::getVariableArrayType(QualType EltTy,
/// type.
QualType ASTContext::getDependentSizedArrayType(QualType elementType,
Expr *numElements,
- ArrayType::ArraySizeModifier ASM,
+ ArraySizeModifier ASM,
unsigned elementTypeQuals,
SourceRange brackets) const {
assert((!numElements || numElements->isTypeDependent() ||
@@ -3662,11 +3736,9 @@ QualType ASTContext::getDependentSizedArrayType(QualType elementType,
// initializer. We do no canonicalization here at all, which is okay
// because they can't be used in most locations.
if (!numElements) {
- auto *newType
- = new (*this, TypeAlignment)
- DependentSizedArrayType(*this, elementType, QualType(),
- numElements, ASM, elementTypeQuals,
- brackets);
+ auto *newType = new (*this, alignof(DependentSizedArrayType))
+ DependentSizedArrayType(elementType, QualType(), numElements, ASM,
+ elementTypeQuals, brackets);
Types.push_back(newType);
return QualType(newType, 0);
}
@@ -3688,10 +3760,9 @@ QualType ASTContext::getDependentSizedArrayType(QualType elementType,
// If we don't have one, build one.
if (!canonTy) {
- canonTy = new (*this, TypeAlignment)
- DependentSizedArrayType(*this, QualType(canonElementType.Ty, 0),
- QualType(), numElements, ASM, elementTypeQuals,
- brackets);
+ canonTy = new (*this, alignof(DependentSizedArrayType))
+ DependentSizedArrayType(QualType(canonElementType.Ty, 0), QualType(),
+ numElements, ASM, elementTypeQuals, brackets);
DependentSizedArrayTypes.InsertNode(canonTy, insertPos);
Types.push_back(canonTy);
}
@@ -3708,16 +3779,15 @@ QualType ASTContext::getDependentSizedArrayType(QualType elementType,
// Otherwise, we need to build a type which follows the spelling
// of the element type.
- auto *sugaredType
- = new (*this, TypeAlignment)
- DependentSizedArrayType(*this, elementType, canon, numElements,
- ASM, elementTypeQuals, brackets);
+ auto *sugaredType = new (*this, alignof(DependentSizedArrayType))
+ DependentSizedArrayType(elementType, canon, numElements, ASM,
+ elementTypeQuals, brackets);
Types.push_back(sugaredType);
return QualType(sugaredType, 0);
}
QualType ASTContext::getIncompleteArrayType(QualType elementType,
- ArrayType::ArraySizeModifier ASM,
+ ArraySizeModifier ASM,
unsigned elementTypeQuals) const {
llvm::FoldingSetNodeID ID;
IncompleteArrayType::Profile(ID, elementType, ASM, elementTypeQuals);
@@ -3732,6 +3802,7 @@ QualType ASTContext::getIncompleteArrayType(QualType elementType,
// qualifiers off the element type.
QualType canon;
+ // FIXME: Check below should look for qualifiers behind sugar.
if (!elementType.isCanonical() || elementType.hasLocalQualifiers()) {
SplitQualType canonSplit = getCanonicalType(elementType).split();
canon = getIncompleteArrayType(QualType(canonSplit.Ty, 0),
@@ -3744,8 +3815,8 @@ QualType ASTContext::getIncompleteArrayType(QualType elementType,
assert(!existing && "Shouldn't be in the map!"); (void) existing;
}
- auto *newType = new (*this, TypeAlignment)
- IncompleteArrayType(elementType, canon, ASM, elementTypeQuals);
+ auto *newType = new (*this, alignof(IncompleteArrayType))
+ IncompleteArrayType(elementType, canon, ASM, elementTypeQuals);
IncompleteArrayTypes.InsertNode(newType, insertPos);
Types.push_back(newType);
@@ -3830,6 +3901,10 @@ ASTContext::getBuiltinVectorTypeInfo(const BuiltinType *Ty) const {
return SVE_INT_ELTTY(64, 2, false, 4);
case BuiltinType::SveBool:
return SVE_ELTTY(BoolTy, 16, 1);
+ case BuiltinType::SveBoolx2:
+ return SVE_ELTTY(BoolTy, 16, 2);
+ case BuiltinType::SveBoolx4:
+ return SVE_ELTTY(BoolTy, 16, 4);
case BuiltinType::SveFloat16:
return SVE_ELTTY(HalfTy, 8, 1);
case BuiltinType::SveFloat16x2:
@@ -3871,6 +3946,9 @@ ASTContext::getBuiltinVectorTypeInfo(const BuiltinType *Ty) const {
case BuiltinType::Id: \
return {ElBits == 16 ? Float16Ty : (ElBits == 32 ? FloatTy : DoubleTy), \
llvm::ElementCount::getScalable(NumEls), NF};
+#define RVV_VECTOR_TYPE_BFLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \
+ case BuiltinType::Id: \
+ return {BFloat16Ty, llvm::ElementCount::getScalable(NumEls), NF};
#define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \
case BuiltinType::Id: \
return {BoolTy, llvm::ElementCount::getScalable(NumEls), 1};
@@ -3878,11 +3956,24 @@ ASTContext::getBuiltinVectorTypeInfo(const BuiltinType *Ty) const {
}
}
+/// getExternrefType - Return a WebAssembly externref type, which represents an
+/// opaque reference to a host value.
+QualType ASTContext::getWebAssemblyExternrefType() const {
+ if (Target->getTriple().isWasm() && Target->hasFeature("reference-types")) {
+#define WASM_REF_TYPE(Name, MangledName, Id, SingletonId, AS) \
+ if (BuiltinType::Id == BuiltinType::WasmExternRef) \
+ return SingletonId;
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
+ }
+ llvm_unreachable(
+ "shouldn't try to generate type externref outside WebAssembly target");
+}
+
/// getScalableVectorType - Return the unique reference to a scalable vector
/// type of the specified element type and size. VectorType must be a built-in
/// type.
-QualType ASTContext::getScalableVectorType(QualType EltTy,
- unsigned NumElts) const {
+QualType ASTContext::getScalableVectorType(QualType EltTy, unsigned NumElts,
+ unsigned NumFields) const {
if (Target->hasAArch64SVETypes()) {
uint64_t EltTySize = getTypeSize(EltTy);
#define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \
@@ -3900,20 +3991,24 @@ QualType ASTContext::getScalableVectorType(QualType EltTy,
#define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \
if (EltTy->isBooleanType() && NumElts == NumEls) \
return SingletonId;
+#define SVE_OPAQUE_TYPE(Name, MangledName, Id, SingleTonId)
#include "clang/Basic/AArch64SVEACLETypes.def"
} else if (Target->hasRISCVVTypes()) {
uint64_t EltTySize = getTypeSize(EltTy);
#define RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, IsSigned, \
- IsFP) \
- if (!EltTy->isBooleanType() && \
- ((EltTy->hasIntegerRepresentation() && \
- EltTy->hasSignedIntegerRepresentation() == IsSigned) || \
- (EltTy->hasFloatingRepresentation() && IsFP)) && \
- EltTySize == ElBits && NumElts == NumEls) \
- return SingletonId;
+ IsFP, IsBF) \
+ if (!EltTy->isBooleanType() && \
+ ((EltTy->hasIntegerRepresentation() && \
+ EltTy->hasSignedIntegerRepresentation() == IsSigned) || \
+ (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \
+ IsFP && !IsBF) || \
+ (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \
+ IsBF && !IsFP)) && \
+ EltTySize == ElBits && NumElts == NumEls && NumFields == NF) \
+ return SingletonId;
#define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \
- if (EltTy->isBooleanType() && NumElts == NumEls) \
- return SingletonId;
+ if (EltTy->isBooleanType() && NumElts == NumEls) \
+ return SingletonId;
#include "clang/Basic/RISCVVTypes.def"
}
return QualType();
@@ -3922,8 +4017,12 @@ QualType ASTContext::getScalableVectorType(QualType EltTy,
/// getVectorType - Return the unique reference to a vector type of
/// the specified element type and size. VectorType must be a built-in type.
QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts,
- VectorType::VectorKind VecKind) const {
- assert(vecType->isBuiltinType());
+ VectorKind VecKind) const {
+ assert(vecType->isBuiltinType() ||
+ (vecType->isBitIntType() &&
+ // Only support _BitInt elements with byte-sized power of 2 NumBits.
+ llvm::isPowerOf2_32(vecType->castAs<BitIntType>()->getNumBits()) &&
+ vecType->castAs<BitIntType>()->getNumBits() >= 8));
// Check if we've already instantiated a vector of this type.
llvm::FoldingSetNodeID ID;
@@ -3943,17 +4042,16 @@ QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts,
VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
}
- auto *New = new (*this, TypeAlignment)
- VectorType(vecType, NumElts, Canonical, VecKind);
+ auto *New = new (*this, alignof(VectorType))
+ VectorType(vecType, NumElts, Canonical, VecKind);
VectorTypes.InsertNode(New, InsertPos);
Types.push_back(New);
return QualType(New, 0);
}
-QualType
-ASTContext::getDependentVectorType(QualType VecType, Expr *SizeExpr,
- SourceLocation AttrLoc,
- VectorType::VectorKind VecKind) const {
+QualType ASTContext::getDependentVectorType(QualType VecType, Expr *SizeExpr,
+ SourceLocation AttrLoc,
+ VectorKind VecKind) const {
llvm::FoldingSetNodeID ID;
DependentVectorType::Profile(ID, *this, getCanonicalType(VecType), SizeExpr,
VecKind);
@@ -3963,13 +4061,13 @@ ASTContext::getDependentVectorType(QualType VecType, Expr *SizeExpr,
DependentVectorType *New;
if (Canon) {
- New = new (*this, TypeAlignment) DependentVectorType(
- *this, VecType, QualType(Canon, 0), SizeExpr, AttrLoc, VecKind);
+ New = new (*this, alignof(DependentVectorType)) DependentVectorType(
+ VecType, QualType(Canon, 0), SizeExpr, AttrLoc, VecKind);
} else {
QualType CanonVecTy = getCanonicalType(VecType);
if (CanonVecTy == VecType) {
- New = new (*this, TypeAlignment) DependentVectorType(
- *this, VecType, QualType(), SizeExpr, AttrLoc, VecKind);
+ New = new (*this, alignof(DependentVectorType))
+ DependentVectorType(VecType, QualType(), SizeExpr, AttrLoc, VecKind);
DependentVectorType *CanonCheck =
DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
@@ -3980,8 +4078,8 @@ ASTContext::getDependentVectorType(QualType VecType, Expr *SizeExpr,
} else {
QualType CanonTy = getDependentVectorType(CanonVecTy, SizeExpr,
SourceLocation(), VecKind);
- New = new (*this, TypeAlignment) DependentVectorType(
- *this, VecType, CanonTy, SizeExpr, AttrLoc, VecKind);
+ New = new (*this, alignof(DependentVectorType))
+ DependentVectorType(VecType, CanonTy, SizeExpr, AttrLoc, VecKind);
}
}
@@ -3991,14 +4089,18 @@ ASTContext::getDependentVectorType(QualType VecType, Expr *SizeExpr,
/// getExtVectorType - Return the unique reference to an extended vector type of
/// the specified element type and size. VectorType must be a built-in type.
-QualType
-ASTContext::getExtVectorType(QualType vecType, unsigned NumElts) const {
- assert(vecType->isBuiltinType() || vecType->isDependentType());
+QualType ASTContext::getExtVectorType(QualType vecType,
+ unsigned NumElts) const {
+ assert(vecType->isBuiltinType() || vecType->isDependentType() ||
+ (vecType->isBitIntType() &&
+ // Only support _BitInt elements with byte-sized power of 2 NumBits.
+ llvm::isPowerOf2_32(vecType->castAs<BitIntType>()->getNumBits()) &&
+ vecType->castAs<BitIntType>()->getNumBits() >= 8));
// Check if we've already instantiated a vector of this type.
llvm::FoldingSetNodeID ID;
VectorType::Profile(ID, vecType, NumElts, Type::ExtVector,
- VectorType::GenericVector);
+ VectorKind::Generic);
void *InsertPos = nullptr;
if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
return QualType(VTP, 0);
@@ -4013,8 +4115,8 @@ ASTContext::getExtVectorType(QualType vecType, unsigned NumElts) const {
VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
}
- auto *New = new (*this, TypeAlignment)
- ExtVectorType(vecType, NumElts, Canonical);
+ auto *New = new (*this, alignof(ExtVectorType))
+ ExtVectorType(vecType, NumElts, Canonical);
VectorTypes.InsertNode(New, InsertPos);
Types.push_back(New);
return QualType(New, 0);
@@ -4035,15 +4137,14 @@ ASTContext::getDependentSizedExtVectorType(QualType vecType,
if (Canon) {
// We already have a canonical version of this array type; use it as
// the canonical type for a newly-built type.
- New = new (*this, TypeAlignment)
- DependentSizedExtVectorType(*this, vecType, QualType(Canon, 0),
- SizeExpr, AttrLoc);
+ New = new (*this, alignof(DependentSizedExtVectorType))
+ DependentSizedExtVectorType(vecType, QualType(Canon, 0), SizeExpr,
+ AttrLoc);
} else {
QualType CanonVecTy = getCanonicalType(vecType);
if (CanonVecTy == vecType) {
- New = new (*this, TypeAlignment)
- DependentSizedExtVectorType(*this, vecType, QualType(), SizeExpr,
- AttrLoc);
+ New = new (*this, alignof(DependentSizedExtVectorType))
+ DependentSizedExtVectorType(vecType, QualType(), SizeExpr, AttrLoc);
DependentSizedExtVectorType *CanonCheck
= DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
@@ -4053,8 +4154,8 @@ ASTContext::getDependentSizedExtVectorType(QualType vecType,
} else {
QualType CanonExtTy = getDependentSizedExtVectorType(CanonVecTy, SizeExpr,
SourceLocation());
- New = new (*this, TypeAlignment) DependentSizedExtVectorType(
- *this, vecType, CanonExtTy, SizeExpr, AttrLoc);
+ New = new (*this, alignof(DependentSizedExtVectorType))
+ DependentSizedExtVectorType(vecType, CanonExtTy, SizeExpr, AttrLoc);
}
}
@@ -4087,7 +4188,7 @@ QualType ASTContext::getConstantMatrixType(QualType ElementTy, unsigned NumRows,
(void)NewIP;
}
- auto *New = new (*this, TypeAlignment)
+ auto *New = new (*this, alignof(ConstantMatrixType))
ConstantMatrixType(ElementTy, NumRows, NumColumns, Canonical);
MatrixTypes.InsertNode(New, InsertPos);
Types.push_back(New);
@@ -4108,8 +4209,9 @@ QualType ASTContext::getDependentSizedMatrixType(QualType ElementTy,
DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos);
if (!Canon) {
- Canon = new (*this, TypeAlignment) DependentSizedMatrixType(
- *this, CanonElementTy, QualType(), RowExpr, ColumnExpr, AttrLoc);
+ Canon = new (*this, alignof(DependentSizedMatrixType))
+ DependentSizedMatrixType(CanonElementTy, QualType(), RowExpr,
+ ColumnExpr, AttrLoc);
#ifndef NDEBUG
DependentSizedMatrixType *CanonCheck =
DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos);
@@ -4127,8 +4229,8 @@ QualType ASTContext::getDependentSizedMatrixType(QualType ElementTy,
return QualType(Canon, 0);
// Use Canon as the canonical type for newly-built type.
- DependentSizedMatrixType *New = new (*this, TypeAlignment)
- DependentSizedMatrixType(*this, ElementTy, QualType(Canon, 0), RowExpr,
+ DependentSizedMatrixType *New = new (*this, alignof(DependentSizedMatrixType))
+ DependentSizedMatrixType(ElementTy, QualType(Canon, 0), RowExpr,
ColumnExpr, AttrLoc);
Types.push_back(New);
return QualType(New, 0);
@@ -4150,9 +4252,9 @@ QualType ASTContext::getDependentAddressSpaceType(QualType PointeeType,
DependentAddressSpaceTypes.FindNodeOrInsertPos(ID, insertPos);
if (!canonTy) {
- canonTy = new (*this, TypeAlignment)
- DependentAddressSpaceType(*this, canonPointeeType,
- QualType(), AddrSpaceExpr, AttrLoc);
+ canonTy = new (*this, alignof(DependentAddressSpaceType))
+ DependentAddressSpaceType(canonPointeeType, QualType(), AddrSpaceExpr,
+ AttrLoc);
DependentAddressSpaceTypes.InsertNode(canonTy, insertPos);
Types.push_back(canonTy);
}
@@ -4161,10 +4263,9 @@ QualType ASTContext::getDependentAddressSpaceType(QualType PointeeType,
canonTy->getAddrSpaceExpr() == AddrSpaceExpr)
return QualType(canonTy, 0);
- auto *sugaredType
- = new (*this, TypeAlignment)
- DependentAddressSpaceType(*this, PointeeType, QualType(canonTy, 0),
- AddrSpaceExpr, AttrLoc);
+ auto *sugaredType = new (*this, alignof(DependentAddressSpaceType))
+ DependentAddressSpaceType(PointeeType, QualType(canonTy, 0),
+ AddrSpaceExpr, AttrLoc);
Types.push_back(sugaredType);
return QualType(sugaredType, 0);
}
@@ -4180,6 +4281,13 @@ static bool isCanonicalResultType(QualType T) {
QualType
ASTContext::getFunctionNoProtoType(QualType ResultTy,
const FunctionType::ExtInfo &Info) const {
+ // FIXME: This assertion cannot be enabled (yet) because the ObjC rewriter
+ // functionality creates a function without a prototype regardless of
+ // language mode (so it makes them even in C++). Once the rewriter has been
+ // fixed, this assertion can be enabled again.
+ //assert(!LangOpts.requiresStrictPrototypes() &&
+ // "strict prototypes are disabled");
+
// Unique functions, to guarantee there is only one function of a particular
// structure.
llvm::FoldingSetNodeID ID;
@@ -4201,8 +4309,8 @@ ASTContext::getFunctionNoProtoType(QualType ResultTy,
assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
}
- auto *New = new (*this, TypeAlignment)
- FunctionNoProtoType(ResultTy, Canonical, Info);
+ auto *New = new (*this, alignof(FunctionNoProtoType))
+ FunctionNoProtoType(ResultTy, Canonical, Info);
Types.push_back(New);
FunctionNoProtoTypes.InsertNode(New, InsertPos);
return QualType(New, 0);
@@ -4327,7 +4435,7 @@ QualType ASTContext::getFunctionTypeInternal(
case EST_Unparsed: case EST_Unevaluated: case EST_Uninstantiated:
// We don't know yet. It shouldn't matter what we pick here; no-one
// should ever look at this.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case EST_None: case EST_MSAny: case EST_NoexceptFalse:
CanonicalEPI.ExceptionSpec.Type = EST_None;
break;
@@ -4381,15 +4489,15 @@ QualType ASTContext::getFunctionTypeInternal(
EPI.ExceptionSpec.Type, EPI.ExceptionSpec.Exceptions.size());
size_t Size = FunctionProtoType::totalSizeToAlloc<
QualType, SourceLocation, FunctionType::FunctionTypeExtraBitfields,
- FunctionType::ExceptionType, Expr *, FunctionDecl *,
- FunctionProtoType::ExtParameterInfo, Qualifiers>(
- NumArgs, EPI.Variadic,
- FunctionProtoType::hasExtraBitfields(EPI.ExceptionSpec.Type),
- ESH.NumExceptionType, ESH.NumExprPtr, ESH.NumFunctionDeclPtr,
+ FunctionType::FunctionTypeArmAttributes, FunctionType::ExceptionType,
+ Expr *, FunctionDecl *, FunctionProtoType::ExtParameterInfo, Qualifiers>(
+ NumArgs, EPI.Variadic, EPI.requiresFunctionProtoTypeExtraBitfields(),
+ EPI.requiresFunctionProtoTypeArmAttributes(), ESH.NumExceptionType,
+ ESH.NumExprPtr, ESH.NumFunctionDeclPtr,
EPI.ExtParameterInfos ? NumArgs : 0,
EPI.TypeQuals.hasNonFastQualifiers() ? 1 : 0);
- auto *FTP = (FunctionProtoType *)Allocate(Size, TypeAlignment);
+ auto *FTP = (FunctionProtoType *)Allocate(Size, alignof(FunctionProtoType));
FunctionProtoType::ExtProtoInfo newEPI = EPI;
new (FTP) FunctionProtoType(ResultTy, ArgArray, Canonical, newEPI);
Types.push_back(FTP);
@@ -4417,7 +4525,7 @@ QualType ASTContext::getPipeType(QualType T, bool ReadOnly) const {
assert(!NewIP && "Shouldn't be in the map!");
(void)NewIP;
}
- auto *New = new (*this, TypeAlignment) PipeType(T, Canonical, ReadOnly);
+ auto *New = new (*this, alignof(PipeType)) PipeType(T, Canonical, ReadOnly);
Types.push_back(New);
PipeTypes.InsertNode(New, InsertPos);
return QualType(New, 0);
@@ -4437,34 +4545,34 @@ QualType ASTContext::getWritePipeType(QualType T) const {
return getPipeType(T, false);
}
-QualType ASTContext::getExtIntType(bool IsUnsigned, unsigned NumBits) const {
+QualType ASTContext::getBitIntType(bool IsUnsigned, unsigned NumBits) const {
llvm::FoldingSetNodeID ID;
- ExtIntType::Profile(ID, IsUnsigned, NumBits);
+ BitIntType::Profile(ID, IsUnsigned, NumBits);
void *InsertPos = nullptr;
- if (ExtIntType *EIT = ExtIntTypes.FindNodeOrInsertPos(ID, InsertPos))
+ if (BitIntType *EIT = BitIntTypes.FindNodeOrInsertPos(ID, InsertPos))
return QualType(EIT, 0);
- auto *New = new (*this, TypeAlignment) ExtIntType(IsUnsigned, NumBits);
- ExtIntTypes.InsertNode(New, InsertPos);
+ auto *New = new (*this, alignof(BitIntType)) BitIntType(IsUnsigned, NumBits);
+ BitIntTypes.InsertNode(New, InsertPos);
Types.push_back(New);
return QualType(New, 0);
}
-QualType ASTContext::getDependentExtIntType(bool IsUnsigned,
+QualType ASTContext::getDependentBitIntType(bool IsUnsigned,
Expr *NumBitsExpr) const {
assert(NumBitsExpr->isInstantiationDependent() && "Only good for dependent");
llvm::FoldingSetNodeID ID;
- DependentExtIntType::Profile(ID, *this, IsUnsigned, NumBitsExpr);
+ DependentBitIntType::Profile(ID, *this, IsUnsigned, NumBitsExpr);
void *InsertPos = nullptr;
- if (DependentExtIntType *Existing =
- DependentExtIntTypes.FindNodeOrInsertPos(ID, InsertPos))
+ if (DependentBitIntType *Existing =
+ DependentBitIntTypes.FindNodeOrInsertPos(ID, InsertPos))
return QualType(Existing, 0);
- auto *New = new (*this, TypeAlignment)
- DependentExtIntType(*this, IsUnsigned, NumBitsExpr);
- DependentExtIntTypes.InsertNode(New, InsertPos);
+ auto *New = new (*this, alignof(DependentBitIntType))
+ DependentBitIntType(IsUnsigned, NumBitsExpr);
+ DependentBitIntTypes.InsertNode(New, InsertPos);
Types.push_back(New);
return QualType(New, 0);
@@ -4495,8 +4603,8 @@ QualType ASTContext::getInjectedClassNameType(CXXRecordDecl *Decl,
Decl->TypeForDecl = PrevDecl->TypeForDecl;
assert(isa<InjectedClassNameType>(Decl->TypeForDecl));
} else {
- Type *newType =
- new (*this, TypeAlignment) InjectedClassNameType(Decl, TST);
+ Type *newType = new (*this, alignof(InjectedClassNameType))
+ InjectedClassNameType(Decl, TST);
Decl->TypeForDecl = newType;
Types.push_back(newType);
}
@@ -4523,9 +4631,7 @@ QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) const {
assert(Enum->isFirstDecl() && "enum has previous declaration");
return getEnumType(Enum);
} else if (const auto *Using = dyn_cast<UnresolvedUsingTypenameDecl>(Decl)) {
- Type *newType = new (*this, TypeAlignment) UnresolvedUsingType(Using);
- Decl->TypeForDecl = newType;
- Types.push_back(newType);
+ return getUnresolvedUsingType(Using);
} else
llvm_unreachable("TypeDecl without a type?");
@@ -4536,16 +4642,63 @@ QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) const {
/// specified typedef name decl.
QualType ASTContext::getTypedefType(const TypedefNameDecl *Decl,
QualType Underlying) const {
- if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
+ if (!Decl->TypeForDecl) {
+ if (Underlying.isNull())
+ Underlying = Decl->getUnderlyingType();
+ auto *NewType = new (*this, alignof(TypedefType)) TypedefType(
+ Type::Typedef, Decl, QualType(), getCanonicalType(Underlying));
+ Decl->TypeForDecl = NewType;
+ Types.push_back(NewType);
+ return QualType(NewType, 0);
+ }
+ if (Underlying.isNull() || Decl->getUnderlyingType() == Underlying)
+ return QualType(Decl->TypeForDecl, 0);
+ assert(hasSameType(Decl->getUnderlyingType(), Underlying));
- if (Underlying.isNull())
- Underlying = Decl->getUnderlyingType();
- QualType Canonical = getCanonicalType(Underlying);
- auto *newType = new (*this, TypeAlignment)
- TypedefType(Type::Typedef, Decl, Underlying, Canonical);
- Decl->TypeForDecl = newType;
- Types.push_back(newType);
- return QualType(newType, 0);
+ llvm::FoldingSetNodeID ID;
+ TypedefType::Profile(ID, Decl, Underlying);
+
+ void *InsertPos = nullptr;
+ if (TypedefType *T = TypedefTypes.FindNodeOrInsertPos(ID, InsertPos)) {
+ assert(!T->typeMatchesDecl() &&
+ "non-divergent case should be handled with TypeDecl");
+ return QualType(T, 0);
+ }
+
+ void *Mem = Allocate(TypedefType::totalSizeToAlloc<QualType>(true),
+ alignof(TypedefType));
+ auto *NewType = new (Mem) TypedefType(Type::Typedef, Decl, Underlying,
+ getCanonicalType(Underlying));
+ TypedefTypes.InsertNode(NewType, InsertPos);
+ Types.push_back(NewType);
+ return QualType(NewType, 0);
+}
+
+QualType ASTContext::getUsingType(const UsingShadowDecl *Found,
+ QualType Underlying) const {
+ llvm::FoldingSetNodeID ID;
+ UsingType::Profile(ID, Found, Underlying);
+
+ void *InsertPos = nullptr;
+ if (UsingType *T = UsingTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(T, 0);
+
+ const Type *TypeForDecl =
+ cast<TypeDecl>(Found->getTargetDecl())->getTypeForDecl();
+
+ assert(!Underlying.hasLocalQualifiers());
+ QualType Canon = Underlying->getCanonicalTypeInternal();
+ assert(TypeForDecl->getCanonicalTypeInternal() == Canon);
+
+ if (Underlying.getTypePtr() == TypeForDecl)
+ Underlying = QualType();
+ void *Mem =
+ Allocate(UsingType::totalSizeToAlloc<QualType>(!Underlying.isNull()),
+ alignof(UsingType));
+ UsingType *NewType = new (Mem) UsingType(Found, Underlying, Canon);
+ Types.push_back(NewType);
+ UsingTypes.InsertNode(NewType, InsertPos);
+ return QualType(NewType, 0);
}
QualType ASTContext::getRecordType(const RecordDecl *Decl) const {
@@ -4555,7 +4708,7 @@ QualType ASTContext::getRecordType(const RecordDecl *Decl) const {
if (PrevDecl->TypeForDecl)
return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0);
- auto *newType = new (*this, TypeAlignment) RecordType(Decl);
+ auto *newType = new (*this, alignof(RecordType)) RecordType(Decl);
Decl->TypeForDecl = newType;
Types.push_back(newType);
return QualType(newType, 0);
@@ -4568,7 +4721,24 @@ QualType ASTContext::getEnumType(const EnumDecl *Decl) const {
if (PrevDecl->TypeForDecl)
return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0);
- auto *newType = new (*this, TypeAlignment) EnumType(Decl);
+ auto *newType = new (*this, alignof(EnumType)) EnumType(Decl);
+ Decl->TypeForDecl = newType;
+ Types.push_back(newType);
+ return QualType(newType, 0);
+}
+
+QualType ASTContext::getUnresolvedUsingType(
+ const UnresolvedUsingTypenameDecl *Decl) const {
+ if (Decl->TypeForDecl)
+ return QualType(Decl->TypeForDecl, 0);
+
+ if (const UnresolvedUsingTypenameDecl *CanonicalDecl =
+ Decl->getCanonicalDecl())
+ if (CanonicalDecl->TypeForDecl)
+ return QualType(Decl->TypeForDecl = CanonicalDecl->TypeForDecl, 0);
+
+ Type *newType =
+ new (*this, alignof(UnresolvedUsingType)) UnresolvedUsingType(Decl);
Decl->TypeForDecl = newType;
Types.push_back(newType);
return QualType(newType, 0);
@@ -4576,7 +4746,7 @@ QualType ASTContext::getEnumType(const EnumDecl *Decl) const {
QualType ASTContext::getAttributedType(attr::Kind attrKind,
QualType modifiedType,
- QualType equivalentType) {
+ QualType equivalentType) const {
llvm::FoldingSetNodeID id;
AttributedType::Profile(id, attrKind, modifiedType, equivalentType);
@@ -4585,7 +4755,7 @@ QualType ASTContext::getAttributedType(attr::Kind attrKind,
if (type) return QualType(type, 0);
QualType canon = getCanonicalType(equivalentType);
- type = new (*this, TypeAlignment)
+ type = new (*this, alignof(AttributedType))
AttributedType(canon, attrKind, modifiedType, equivalentType);
Types.push_back(type);
@@ -4594,22 +4764,44 @@ QualType ASTContext::getAttributedType(attr::Kind attrKind,
return QualType(type, 0);
}
-/// Retrieve a substitution-result type.
-QualType
-ASTContext::getSubstTemplateTypeParmType(const TemplateTypeParmType *Parm,
- QualType Replacement) const {
- assert(Replacement.isCanonical()
- && "replacement types must always be canonical");
+QualType ASTContext::getBTFTagAttributedType(const BTFTypeTagAttr *BTFAttr,
+ QualType Wrapped) {
+ llvm::FoldingSetNodeID ID;
+ BTFTagAttributedType::Profile(ID, Wrapped, BTFAttr);
+
+ void *InsertPos = nullptr;
+ BTFTagAttributedType *Ty =
+ BTFTagAttributedTypes.FindNodeOrInsertPos(ID, InsertPos);
+ if (Ty)
+ return QualType(Ty, 0);
+
+ QualType Canon = getCanonicalType(Wrapped);
+ Ty = new (*this, alignof(BTFTagAttributedType))
+ BTFTagAttributedType(Canon, Wrapped, BTFAttr);
+ Types.push_back(Ty);
+ BTFTagAttributedTypes.InsertNode(Ty, InsertPos);
+
+ return QualType(Ty, 0);
+}
+
+/// Retrieve a substitution-result type.
+QualType ASTContext::getSubstTemplateTypeParmType(
+ QualType Replacement, Decl *AssociatedDecl, unsigned Index,
+ std::optional<unsigned> PackIndex) const {
llvm::FoldingSetNodeID ID;
- SubstTemplateTypeParmType::Profile(ID, Parm, Replacement);
+ SubstTemplateTypeParmType::Profile(ID, Replacement, AssociatedDecl, Index,
+ PackIndex);
void *InsertPos = nullptr;
- SubstTemplateTypeParmType *SubstParm
- = SubstTemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
+ SubstTemplateTypeParmType *SubstParm =
+ SubstTemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
if (!SubstParm) {
- SubstParm = new (*this, TypeAlignment)
- SubstTemplateTypeParmType(Parm, Replacement);
+ void *Mem = Allocate(SubstTemplateTypeParmType::totalSizeToAlloc<QualType>(
+ !Replacement.isCanonical()),
+ alignof(SubstTemplateTypeParmType));
+ SubstParm = new (Mem) SubstTemplateTypeParmType(Replacement, AssociatedDecl,
+ Index, PackIndex);
Types.push_back(SubstParm);
SubstTemplateTypeParmTypes.InsertNode(SubstParm, InsertPos);
}
@@ -4618,34 +4810,39 @@ ASTContext::getSubstTemplateTypeParmType(const TemplateTypeParmType *Parm,
}
/// Retrieve a
-QualType ASTContext::getSubstTemplateTypeParmPackType(
- const TemplateTypeParmType *Parm,
- const TemplateArgument &ArgPack) {
+QualType
+ASTContext::getSubstTemplateTypeParmPackType(Decl *AssociatedDecl,
+ unsigned Index, bool Final,
+ const TemplateArgument &ArgPack) {
#ifndef NDEBUG
- for (const auto &P : ArgPack.pack_elements()) {
- assert(P.getKind() == TemplateArgument::Type &&"Pack contains a non-type");
- assert(P.getAsType().isCanonical() && "Pack contains non-canonical type");
- }
+ for (const auto &P : ArgPack.pack_elements())
+ assert(P.getKind() == TemplateArgument::Type && "Pack contains a non-type");
#endif
llvm::FoldingSetNodeID ID;
- SubstTemplateTypeParmPackType::Profile(ID, Parm, ArgPack);
+ SubstTemplateTypeParmPackType::Profile(ID, AssociatedDecl, Index, Final,
+ ArgPack);
void *InsertPos = nullptr;
- if (SubstTemplateTypeParmPackType *SubstParm
- = SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos))
+ if (SubstTemplateTypeParmPackType *SubstParm =
+ SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos))
return QualType(SubstParm, 0);
QualType Canon;
- if (!Parm->isCanonicalUnqualified()) {
- Canon = getCanonicalType(QualType(Parm, 0));
- Canon = getSubstTemplateTypeParmPackType(cast<TemplateTypeParmType>(Canon),
- ArgPack);
- SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos);
+ {
+ TemplateArgument CanonArgPack = getCanonicalTemplateArgument(ArgPack);
+ if (!AssociatedDecl->isCanonicalDecl() ||
+ !CanonArgPack.structurallyEquals(ArgPack)) {
+ Canon = getSubstTemplateTypeParmPackType(
+ AssociatedDecl->getCanonicalDecl(), Index, Final, CanonArgPack);
+ [[maybe_unused]] const auto *Nothing =
+ SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(!Nothing);
+ }
}
- auto *SubstParm
- = new (*this, TypeAlignment) SubstTemplateTypeParmPackType(Parm, Canon,
- ArgPack);
+ auto *SubstParm = new (*this, alignof(SubstTemplateTypeParmPackType))
+ SubstTemplateTypeParmPackType(Canon, AssociatedDecl, Index, Final,
+ ArgPack);
Types.push_back(SubstParm);
SubstTemplateTypeParmPackTypes.InsertNode(SubstParm, InsertPos);
return QualType(SubstParm, 0);
@@ -4668,15 +4865,16 @@ QualType ASTContext::getTemplateTypeParmType(unsigned Depth, unsigned Index,
if (TTPDecl) {
QualType Canon = getTemplateTypeParmType(Depth, Index, ParameterPack);
- TypeParm = new (*this, TypeAlignment) TemplateTypeParmType(TTPDecl, Canon);
+ TypeParm = new (*this, alignof(TemplateTypeParmType))
+ TemplateTypeParmType(TTPDecl, Canon);
TemplateTypeParmType *TypeCheck
= TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
assert(!TypeCheck && "Template type parameter canonical type broken");
(void)TypeCheck;
} else
- TypeParm = new (*this, TypeAlignment)
- TemplateTypeParmType(Depth, Index, ParameterPack);
+ TypeParm = new (*this, alignof(TemplateTypeParmType))
+ TemplateTypeParmType(Depth, Index, ParameterPack);
Types.push_back(TypeParm);
TemplateTypeParmTypes.InsertNode(TypeParm, InsertPos);
@@ -4691,7 +4889,8 @@ ASTContext::getTemplateSpecializationTypeInfo(TemplateName Name,
QualType Underlying) const {
assert(!Name.getAsDependentTemplateName() &&
"No dependent template names here!");
- QualType TST = getTemplateSpecializationType(Name, Args, Underlying);
+ QualType TST =
+ getTemplateSpecializationType(Name, Args.arguments(), Underlying);
TypeSourceInfo *DI = CreateTypeSourceInfo(TST);
TemplateSpecializationTypeLoc TL =
@@ -4707,14 +4906,14 @@ ASTContext::getTemplateSpecializationTypeInfo(TemplateName Name,
QualType
ASTContext::getTemplateSpecializationType(TemplateName Template,
- const TemplateArgumentListInfo &Args,
+ ArrayRef<TemplateArgumentLoc> Args,
QualType Underlying) const {
assert(!Template.getAsDependentTemplateName() &&
"No dependent template names here!");
SmallVector<TemplateArgument, 4> ArgVec;
ArgVec.reserve(Args.size());
- for (const TemplateArgumentLoc &Arg : Args.arguments())
+ for (const TemplateArgumentLoc &Arg : Args)
ArgVec.push_back(Arg.getArgument());
return getTemplateSpecializationType(Template, ArgVec, Underlying);
@@ -4738,11 +4937,10 @@ ASTContext::getTemplateSpecializationType(TemplateName Template,
"No dependent template names here!");
// Look through qualified template names.
if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
- Template = TemplateName(QTN->getTemplateDecl());
+ Template = QTN->getUnderlyingTemplate();
- bool IsTypeAlias =
- Template.getAsTemplateDecl() &&
- isa<TypeAliasTemplateDecl>(Template.getAsTemplateDecl());
+ const auto *TD = Template.getAsTemplateDecl();
+ bool IsTypeAlias = TD && TD->isTypeAlias();
QualType CanonType;
if (!Underlying.isNull())
CanonType = getCanonicalType(Underlying);
@@ -4759,9 +4957,9 @@ ASTContext::getTemplateSpecializationType(TemplateName Template,
// try to unique it: these types typically have location information that
// we don't unique and don't want to lose.
void *Mem = Allocate(sizeof(TemplateSpecializationType) +
- sizeof(TemplateArgument) * Args.size() +
- (IsTypeAlias? sizeof(QualType) : 0),
- TypeAlignment);
+ sizeof(TemplateArgument) * Args.size() +
+ (IsTypeAlias ? sizeof(QualType) : 0),
+ alignof(TemplateSpecializationType));
auto *Spec
= new (Mem) TemplateSpecializationType(Template, Args, CanonType,
IsTypeAlias ? Underlying : QualType());
@@ -4777,15 +4975,13 @@ QualType ASTContext::getCanonicalTemplateSpecializationType(
// Look through qualified template names.
if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
- Template = TemplateName(QTN->getTemplateDecl());
+ Template = TemplateName(QTN->getUnderlyingTemplate());
// Build the canonical template specialization type.
TemplateName CanonTemplate = getCanonicalTemplateName(Template);
- SmallVector<TemplateArgument, 4> CanonArgs;
- unsigned NumArgs = Args.size();
- CanonArgs.reserve(NumArgs);
- for (const TemplateArgument &Arg : Args)
- CanonArgs.push_back(getCanonicalTemplateArgument(Arg));
+ bool AnyNonCanonArgs = false;
+ auto CanonArgs =
+ ::getCanonicalTemplateArguments(*this, Args, AnyNonCanonArgs);
// Determine whether this canonical template specialization type already
// exists.
@@ -4800,8 +4996,8 @@ QualType ASTContext::getCanonicalTemplateSpecializationType(
if (!Spec) {
// Allocate a new canonical template specialization type.
void *Mem = Allocate((sizeof(TemplateSpecializationType) +
- sizeof(TemplateArgument) * NumArgs),
- TypeAlignment);
+ sizeof(TemplateArgument) * CanonArgs.size()),
+ alignof(TemplateSpecializationType));
Spec = new (Mem) TemplateSpecializationType(CanonTemplate,
CanonArgs,
QualType(), QualType());
@@ -4834,8 +5030,9 @@ QualType ASTContext::getElaboratedType(ElaboratedTypeKeyword Keyword,
(void)CheckT;
}
- void *Mem = Allocate(ElaboratedType::totalSizeToAlloc<TagDecl *>(!!OwnedTagDecl),
- TypeAlignment);
+ void *Mem =
+ Allocate(ElaboratedType::totalSizeToAlloc<TagDecl *>(!!OwnedTagDecl),
+ alignof(ElaboratedType));
T = new (Mem) ElaboratedType(Keyword, NNS, NamedType, Canon, OwnedTagDecl);
Types.push_back(T);
@@ -4861,7 +5058,7 @@ ASTContext::getParenType(QualType InnerType) const {
(void)CheckT;
}
- T = new (*this, TypeAlignment) ParenType(InnerType, Canon);
+ T = new (*this, alignof(ParenType)) ParenType(InnerType, Canon);
Types.push_back(T);
ParenTypes.InsertNode(T, InsertPos);
return QualType(T, 0);
@@ -4874,7 +5071,7 @@ ASTContext::getMacroQualifiedType(QualType UnderlyingTy,
if (!Canon.isCanonical())
Canon = getCanonicalType(UnderlyingTy);
- auto *newType = new (*this, TypeAlignment)
+ auto *newType = new (*this, alignof(MacroQualifiedType))
MacroQualifiedType(UnderlyingTy, Canon, MacroII);
Types.push_back(newType);
return QualType(newType, 0);
@@ -4899,18 +5096,16 @@ QualType ASTContext::getDependentNameType(ElaboratedTypeKeyword Keyword,
if (T)
return QualType(T, 0);
- T = new (*this, TypeAlignment) DependentNameType(Keyword, NNS, Name, Canon);
+ T = new (*this, alignof(DependentNameType))
+ DependentNameType(Keyword, NNS, Name, Canon);
Types.push_back(T);
DependentNameTypes.InsertNode(T, InsertPos);
return QualType(T, 0);
}
-QualType
-ASTContext::getDependentTemplateSpecializationType(
- ElaboratedTypeKeyword Keyword,
- NestedNameSpecifier *NNS,
- const IdentifierInfo *Name,
- const TemplateArgumentListInfo &Args) const {
+QualType ASTContext::getDependentTemplateSpecializationType(
+ ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS,
+ const IdentifierInfo *Name, ArrayRef<TemplateArgumentLoc> Args) const {
// TODO: avoid this copy
SmallVector<TemplateArgument, 16> ArgCopy;
for (unsigned I = 0, E = Args.size(); I != E; ++I)
@@ -4940,16 +5135,12 @@ ASTContext::getDependentTemplateSpecializationType(
NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS);
ElaboratedTypeKeyword CanonKeyword = Keyword;
- if (Keyword == ETK_None) CanonKeyword = ETK_Typename;
+ if (Keyword == ElaboratedTypeKeyword::None)
+ CanonKeyword = ElaboratedTypeKeyword::Typename;
bool AnyNonCanonArgs = false;
- unsigned NumArgs = Args.size();
- SmallVector<TemplateArgument, 16> CanonArgs(NumArgs);
- for (unsigned I = 0; I != NumArgs; ++I) {
- CanonArgs[I] = getCanonicalTemplateArgument(Args[I]);
- if (!CanonArgs[I].structurallyEquals(Args[I]))
- AnyNonCanonArgs = true;
- }
+ auto CanonArgs =
+ ::getCanonicalTemplateArguments(*this, Args, AnyNonCanonArgs);
QualType Canon;
if (AnyNonCanonArgs || CanonNNS != NNS || CanonKeyword != Keyword) {
@@ -4958,12 +5149,14 @@ ASTContext::getDependentTemplateSpecializationType(
CanonArgs);
// Find the insert position again.
- DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
+ [[maybe_unused]] auto *Nothing =
+ DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(!Nothing && "canonical type broken");
}
void *Mem = Allocate((sizeof(DependentTemplateSpecializationType) +
- sizeof(TemplateArgument) * NumArgs),
- TypeAlignment);
+ sizeof(TemplateArgument) * Args.size()),
+ alignof(DependentTemplateSpecializationType));
T = new (Mem) DependentTemplateSpecializationType(Keyword, NNS,
Name, Args, Canon);
Types.push_back(T);
@@ -4976,7 +5169,7 @@ TemplateArgument ASTContext::getInjectedTemplateArg(NamedDecl *Param) {
if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Param)) {
QualType ArgType = getTypeDeclType(TTP);
if (TTP->isParameterPack())
- ArgType = getPackExpansionType(ArgType, None);
+ ArgType = getPackExpansionType(ArgType, std::nullopt);
Arg = TemplateArgument(ArgType);
} else if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param)) {
@@ -4989,17 +5182,17 @@ TemplateArgument ASTContext::getInjectedTemplateArg(NamedDecl *Param) {
if (T->isRecordType())
T.addConst();
Expr *E = new (*this) DeclRefExpr(
- *this, NTTP, /*enclosing*/ false, T,
+ *this, NTTP, /*RefersToEnclosingVariableOrCapture*/ false, T,
Expr::getValueKindForType(NTTP->getType()), NTTP->getLocation());
if (NTTP->isParameterPack())
- E = new (*this) PackExpansionExpr(DependentTy, E, NTTP->getLocation(),
- None);
+ E = new (*this)
+ PackExpansionExpr(DependentTy, E, NTTP->getLocation(), std::nullopt);
Arg = TemplateArgument(E);
} else {
auto *TTP = cast<TemplateTemplateParmDecl>(Param);
if (TTP->isParameterPack())
- Arg = TemplateArgument(TemplateName(TTP), Optional<unsigned>());
+ Arg = TemplateArgument(TemplateName(TTP), std::optional<unsigned>());
else
Arg = TemplateArgument(TemplateName(TTP));
}
@@ -5020,7 +5213,7 @@ ASTContext::getInjectedTemplateArgs(const TemplateParameterList *Params,
}
QualType ASTContext::getPackExpansionType(QualType Pattern,
- Optional<unsigned> NumExpansions,
+ std::optional<unsigned> NumExpansions,
bool ExpectPackInType) {
assert((!ExpectPackInType || Pattern->containsUnexpandedParameterPack()) &&
"Pack expansions must expand one or more parameter packs");
@@ -5043,7 +5236,7 @@ QualType ASTContext::getPackExpansionType(QualType Pattern,
PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos);
}
- T = new (*this, TypeAlignment)
+ T = new (*this, alignof(PackExpansionType))
PackExpansionType(Pattern, Canon, NumExpansions);
Types.push_back(T);
PackExpansionTypes.InsertNode(T, InsertPos);
@@ -5088,7 +5281,7 @@ QualType ASTContext::getObjCObjectType(QualType BaseType,
ObjCProtocolDecl * const *Protocols,
unsigned NumProtocols) const {
return getObjCObjectType(BaseType, {},
- llvm::makeArrayRef(Protocols, NumProtocols),
+ llvm::ArrayRef(Protocols, NumProtocols),
/*isKindOf=*/false);
}
@@ -5123,11 +5316,8 @@ QualType ASTContext::getObjCObjectType(
// sorted-and-uniqued list of protocols and the type arguments
// canonicalized.
QualType canonical;
- bool typeArgsAreCanonical = std::all_of(effectiveTypeArgs.begin(),
- effectiveTypeArgs.end(),
- [&](QualType type) {
- return type.isCanonical();
- });
+ bool typeArgsAreCanonical = llvm::all_of(
+ effectiveTypeArgs, [&](QualType type) { return type.isCanonical(); });
bool protocolsSorted = areSortedAndUniqued(protocols);
if (!typeArgsAreCanonical || !protocolsSorted || !baseType.isCanonical()) {
// Determine the canonical type arguments.
@@ -5162,7 +5352,7 @@ QualType ASTContext::getObjCObjectType(
unsigned size = sizeof(ObjCObjectTypeImpl);
size += typeArgs.size() * sizeof(QualType);
size += protocols.size() * sizeof(ObjCProtocolDecl *);
- void *mem = Allocate(size, TypeAlignment);
+ void *mem = Allocate(size, alignof(ObjCObjectTypeImpl));
auto *T =
new (mem) ObjCObjectTypeImpl(canonical, baseType, typeArgs, protocols,
isKindOf);
@@ -5269,7 +5459,7 @@ ASTContext::getObjCTypeParamType(const ObjCTypeParamDecl *Decl,
unsigned size = sizeof(ObjCTypeParamType);
size += protocols.size() * sizeof(ObjCProtocolDecl *);
- void *mem = Allocate(size, TypeAlignment);
+ void *mem = Allocate(size, alignof(ObjCTypeParamType));
auto *newType = new (mem) ObjCTypeParamType(Decl, Canonical, protocols);
Types.push_back(newType);
@@ -5375,7 +5565,8 @@ QualType ASTContext::getObjCObjectPointerType(QualType ObjectT) const {
}
// No match.
- void *Mem = Allocate(sizeof(ObjCObjectPointerType), TypeAlignment);
+ void *Mem =
+ Allocate(sizeof(ObjCObjectPointerType), alignof(ObjCObjectPointerType));
auto *QType =
new (Mem) ObjCObjectPointerType(Canonical, ObjectT);
@@ -5401,7 +5592,7 @@ QualType ASTContext::getObjCInterfaceType(const ObjCInterfaceDecl *Decl,
if (const ObjCInterfaceDecl *Def = Decl->getDefinition())
Decl = Def;
- void *Mem = Allocate(sizeof(ObjCInterfaceType), TypeAlignment);
+ void *Mem = Allocate(sizeof(ObjCInterfaceType), alignof(ObjCInterfaceType));
auto *T = new (Mem) ObjCInterfaceType(Decl);
Decl->TypeForDecl = T;
Types.push_back(T);
@@ -5413,30 +5604,32 @@ QualType ASTContext::getObjCInterfaceType(const ObjCInterfaceDecl *Decl,
/// multiple declarations that refer to "typeof(x)" all contain different
/// DeclRefExpr's. This doesn't effect the type checker, since it operates
/// on canonical type's (which are always unique).
-QualType ASTContext::getTypeOfExprType(Expr *tofExpr) const {
+QualType ASTContext::getTypeOfExprType(Expr *tofExpr, TypeOfKind Kind) const {
TypeOfExprType *toe;
if (tofExpr->isTypeDependent()) {
llvm::FoldingSetNodeID ID;
- DependentTypeOfExprType::Profile(ID, *this, tofExpr);
+ DependentTypeOfExprType::Profile(ID, *this, tofExpr,
+ Kind == TypeOfKind::Unqualified);
void *InsertPos = nullptr;
- DependentTypeOfExprType *Canon
- = DependentTypeOfExprTypes.FindNodeOrInsertPos(ID, InsertPos);
+ DependentTypeOfExprType *Canon =
+ DependentTypeOfExprTypes.FindNodeOrInsertPos(ID, InsertPos);
if (Canon) {
// We already have a "canonical" version of an identical, dependent
// typeof(expr) type. Use that as our canonical type.
- toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr,
- QualType((TypeOfExprType*)Canon, 0));
+ toe = new (*this, alignof(TypeOfExprType))
+ TypeOfExprType(tofExpr, Kind, QualType((TypeOfExprType *)Canon, 0));
} else {
// Build a new, canonical typeof(expr) type.
- Canon
- = new (*this, TypeAlignment) DependentTypeOfExprType(*this, tofExpr);
+ Canon = new (*this, alignof(DependentTypeOfExprType))
+ DependentTypeOfExprType(tofExpr, Kind);
DependentTypeOfExprTypes.InsertNode(Canon, InsertPos);
toe = Canon;
}
} else {
QualType Canonical = getCanonicalType(tofExpr->getType());
- toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr, Canonical);
+ toe = new (*this, alignof(TypeOfExprType))
+ TypeOfExprType(tofExpr, Kind, Canonical);
}
Types.push_back(toe);
return QualType(toe, 0);
@@ -5447,13 +5640,37 @@ QualType ASTContext::getTypeOfExprType(Expr *tofExpr) const {
/// memory savings. Since typeof(t) is fairly uncommon, space shouldn't be
/// an issue. This doesn't affect the type checker, since it operates
/// on canonical types (which are always unique).
-QualType ASTContext::getTypeOfType(QualType tofType) const {
+QualType ASTContext::getTypeOfType(QualType tofType, TypeOfKind Kind) const {
QualType Canonical = getCanonicalType(tofType);
- auto *tot = new (*this, TypeAlignment) TypeOfType(tofType, Canonical);
+ auto *tot =
+ new (*this, alignof(TypeOfType)) TypeOfType(tofType, Canonical, Kind);
Types.push_back(tot);
return QualType(tot, 0);
}
+/// getReferenceQualifiedType - Given an expr, will return the type for
+/// that expression, as in [dcl.type.simple]p4 but without taking id-expressions
+/// and class member access into account.
+QualType ASTContext::getReferenceQualifiedType(const Expr *E) const {
+ // C++11 [dcl.type.simple]p4:
+ // [...]
+ QualType T = E->getType();
+ switch (E->getValueKind()) {
+ // - otherwise, if e is an xvalue, decltype(e) is T&&, where T is the
+ // type of e;
+ case VK_XValue:
+ return getRValueReferenceType(T);
+ // - otherwise, if e is an lvalue, decltype(e) is T&, where T is the
+ // type of e;
+ case VK_LValue:
+ return getLValueReferenceType(T);
+ // - otherwise, decltype(e) is the type of e.
+ case VK_PRValue:
+ return T;
+ }
+ llvm_unreachable("Unknown value kind");
+}
+
/// Unlike many "get<Type>" functions, we don't unique DecltypeType
/// nodes. This would never be helpful, since each such type has its own
/// expression, and would not give a significant memory saving, since there
@@ -5474,13 +5691,14 @@ QualType ASTContext::getDecltypeType(Expr *e, QualType UnderlyingType) const {
= DependentDecltypeTypes.FindNodeOrInsertPos(ID, InsertPos);
if (!Canon) {
// Build a new, canonical decltype(expr) type.
- Canon = new (*this, TypeAlignment) DependentDecltypeType(*this, e);
+ Canon = new (*this, alignof(DependentDecltypeType))
+ DependentDecltypeType(e, DependentTy);
DependentDecltypeTypes.InsertNode(Canon, InsertPos);
}
- dt = new (*this, TypeAlignment)
+ dt = new (*this, alignof(DecltypeType))
DecltypeType(e, UnderlyingType, QualType((DecltypeType *)Canon, 0));
} else {
- dt = new (*this, TypeAlignment)
+ dt = new (*this, alignof(DecltypeType))
DecltypeType(e, UnderlyingType, getCanonicalType(UnderlyingType));
}
Types.push_back(dt);
@@ -5506,33 +5724,25 @@ QualType ASTContext::getUnaryTransformType(QualType BaseType,
if (!Canon) {
// Build a new, canonical __underlying_type(type) type.
- Canon = new (*this, TypeAlignment)
- DependentUnaryTransformType(*this, getCanonicalType(BaseType),
- Kind);
+ Canon = new (*this, alignof(DependentUnaryTransformType))
+ DependentUnaryTransformType(*this, getCanonicalType(BaseType), Kind);
DependentUnaryTransformTypes.InsertNode(Canon, InsertPos);
}
- ut = new (*this, TypeAlignment) UnaryTransformType (BaseType,
- QualType(), Kind,
- QualType(Canon, 0));
+ ut = new (*this, alignof(UnaryTransformType))
+ UnaryTransformType(BaseType, QualType(), Kind, QualType(Canon, 0));
} else {
QualType CanonType = getCanonicalType(UnderlyingType);
- ut = new (*this, TypeAlignment) UnaryTransformType (BaseType,
- UnderlyingType, Kind,
- CanonType);
+ ut = new (*this, alignof(UnaryTransformType))
+ UnaryTransformType(BaseType, UnderlyingType, Kind, CanonType);
}
Types.push_back(ut);
return QualType(ut, 0);
}
-/// getAutoType - Return the uniqued reference to the 'auto' type which has been
-/// deduced to the given type, or to the canonical undeduced 'auto' type, or the
-/// canonical deduced-but-dependent 'auto' type.
-QualType
-ASTContext::getAutoType(QualType DeducedType, AutoTypeKeyword Keyword,
- bool IsDependent, bool IsPack,
- ConceptDecl *TypeConstraintConcept,
- ArrayRef<TemplateArgument> TypeConstraintArgs) const {
- assert((!IsPack || IsDependent) && "only use IsPack for a dependent pack");
+QualType ASTContext::getAutoTypeInternal(
+ QualType DeducedType, AutoTypeKeyword Keyword, bool IsDependent,
+ bool IsPack, ConceptDecl *TypeConstraintConcept,
+ ArrayRef<TemplateArgument> TypeConstraintArgs, bool IsCanon) const {
if (DeducedType.isNull() && Keyword == AutoTypeKeyword::Auto &&
!TypeConstraintConcept && !IsDependent)
return getAutoDeductType();
@@ -5545,21 +5755,76 @@ ASTContext::getAutoType(QualType DeducedType, AutoTypeKeyword Keyword,
if (AutoType *AT = AutoTypes.FindNodeOrInsertPos(ID, InsertPos))
return QualType(AT, 0);
+ QualType Canon;
+ if (!IsCanon) {
+ if (!DeducedType.isNull()) {
+ Canon = DeducedType.getCanonicalType();
+ } else if (TypeConstraintConcept) {
+ bool AnyNonCanonArgs = false;
+ ConceptDecl *CanonicalConcept = TypeConstraintConcept->getCanonicalDecl();
+ auto CanonicalConceptArgs = ::getCanonicalTemplateArguments(
+ *this, TypeConstraintArgs, AnyNonCanonArgs);
+ if (CanonicalConcept != TypeConstraintConcept || AnyNonCanonArgs) {
+ Canon =
+ getAutoTypeInternal(QualType(), Keyword, IsDependent, IsPack,
+ CanonicalConcept, CanonicalConceptArgs, true);
+ // Find the insert position again.
+ [[maybe_unused]] auto *Nothing =
+ AutoTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(!Nothing && "canonical type broken");
+ }
+ }
+ }
+
void *Mem = Allocate(sizeof(AutoType) +
- sizeof(TemplateArgument) * TypeConstraintArgs.size(),
- TypeAlignment);
+ sizeof(TemplateArgument) * TypeConstraintArgs.size(),
+ alignof(AutoType));
auto *AT = new (Mem) AutoType(
DeducedType, Keyword,
(IsDependent ? TypeDependence::DependentInstantiation
: TypeDependence::None) |
(IsPack ? TypeDependence::UnexpandedPack : TypeDependence::None),
- TypeConstraintConcept, TypeConstraintArgs);
+ Canon, TypeConstraintConcept, TypeConstraintArgs);
Types.push_back(AT);
- if (InsertPos)
- AutoTypes.InsertNode(AT, InsertPos);
+ AutoTypes.InsertNode(AT, InsertPos);
return QualType(AT, 0);
}
+/// getAutoType - Return the uniqued reference to the 'auto' type which has been
+/// deduced to the given type, or to the canonical undeduced 'auto' type, or the
+/// canonical deduced-but-dependent 'auto' type.
+QualType
+ASTContext::getAutoType(QualType DeducedType, AutoTypeKeyword Keyword,
+ bool IsDependent, bool IsPack,
+ ConceptDecl *TypeConstraintConcept,
+ ArrayRef<TemplateArgument> TypeConstraintArgs) const {
+ assert((!IsPack || IsDependent) && "only use IsPack for a dependent pack");
+ assert((!IsDependent || DeducedType.isNull()) &&
+ "A dependent auto should be undeduced");
+ return getAutoTypeInternal(DeducedType, Keyword, IsDependent, IsPack,
+ TypeConstraintConcept, TypeConstraintArgs);
+}
+
+QualType ASTContext::getUnconstrainedType(QualType T) const {
+ QualType CanonT = T.getCanonicalType();
+
+ // Remove a type-constraint from a top-level auto or decltype(auto).
+ if (auto *AT = CanonT->getAs<AutoType>()) {
+ if (!AT->isConstrained())
+ return T;
+ return getQualifiedType(getAutoType(QualType(), AT->getKeyword(), false,
+ AT->containsUnexpandedParameterPack()),
+ T.getQualifiers());
+ }
+
+ // FIXME: We only support constrained auto at the top level in the type of a
+ // non-type template parameter at the moment. Once we lift that restriction,
+ // we'll need to recursively build types containing auto here.
+ assert(!CanonT->getContainedAutoType() ||
+ !CanonT->getContainedAutoType()->isConstrained());
+ return T;
+}
+
/// Return the uniqued reference to the deduced template specialization type
/// which has been deduced to the given type, or to the canonical undeduced
/// such type, or the canonical deduced-but-dependent such type.
@@ -5574,11 +5839,13 @@ QualType ASTContext::getDeducedTemplateSpecializationType(
DeducedTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos))
return QualType(DTST, 0);
- auto *DTST = new (*this, TypeAlignment)
+ auto *DTST = new (*this, alignof(DeducedTemplateSpecializationType))
DeducedTemplateSpecializationType(Template, DeducedType, IsDependent);
+ llvm::FoldingSetNodeID TempID;
+ DTST->Profile(TempID);
+ assert(ID == TempID && "ID does not match");
Types.push_back(DTST);
- if (InsertPos)
- DeducedTemplateSpecializationTypes.InsertNode(DTST, InsertPos);
+ DeducedTemplateSpecializationTypes.InsertNode(DTST, InsertPos);
return QualType(DTST, 0);
}
@@ -5604,7 +5871,7 @@ QualType ASTContext::getAtomicType(QualType T) const {
AtomicType *NewIP = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos);
assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
}
- auto *New = new (*this, TypeAlignment) AtomicType(T, Canonical);
+ auto *New = new (*this, alignof(AtomicType)) AtomicType(T, Canonical);
Types.push_back(New);
AtomicTypes.InsertNode(New, InsertPos);
return QualType(New, 0);
@@ -5613,9 +5880,9 @@ QualType ASTContext::getAtomicType(QualType T) const {
/// getAutoDeductType - Get type pattern for deducing against 'auto'.
QualType ASTContext::getAutoDeductType() const {
if (AutoDeductTy.isNull())
- AutoDeductTy = QualType(new (*this, TypeAlignment)
+ AutoDeductTy = QualType(new (*this, alignof(AutoType))
AutoType(QualType(), AutoTypeKeyword::Auto,
- TypeDependence::None,
+ TypeDependence::None, QualType(),
/*concept*/ nullptr, /*args*/ {}),
0);
return AutoDeductTy;
@@ -5686,14 +5953,14 @@ QualType ASTContext::getUIntPtrType() const {
/// getPointerDiffType - Return the unique type for "ptrdiff_t" (C99 7.17)
/// defined in <stddef.h>. Pointer - pointer requires this (C99 6.5.6p9).
QualType ASTContext::getPointerDiffType() const {
- return getFromTargetType(Target->getPtrDiffType(0));
+ return getFromTargetType(Target->getPtrDiffType(LangAS::Default));
}
/// Return the unique unsigned counterpart of "ptrdiff_t"
/// integer type. The standard (C11 7.21.6.1p7) refers to this type
/// in the definition of %tu format specifier.
QualType ASTContext::getUnsignedPointerDiffType() const {
- return getFromTargetType(Target->getUnsignedPtrDiffType(0));
+ return getFromTargetType(Target->getUnsignedPtrDiffType(LangAS::Default));
}
/// Return the unique type for "pid_t" defined in
@@ -5783,7 +6050,11 @@ QualType ASTContext::getUnqualifiedArrayType(QualType type,
/// Attempt to unwrap two types that may both be array types with the same bound
/// (or both be array types of unknown bound) for the purpose of comparing the
/// cv-decomposition of two types per C++ [conv.qual].
-void ASTContext::UnwrapSimilarArrayTypes(QualType &T1, QualType &T2) {
+///
+/// \param AllowPiMismatch Allow the Pi1 and Pi2 to differ as described in
+/// C++20 [conv.qual], if permitted by the current language mode.
+void ASTContext::UnwrapSimilarArrayTypes(QualType &T1, QualType &T2,
+ bool AllowPiMismatch) {
while (true) {
auto *AT1 = getAsArrayType(T1);
if (!AT1)
@@ -5795,12 +6066,21 @@ void ASTContext::UnwrapSimilarArrayTypes(QualType &T1, QualType &T2) {
// If we don't have two array types with the same constant bound nor two
// incomplete array types, we've unwrapped everything we can.
+ // C++20 also permits one type to be a constant array type and the other
+ // to be an incomplete array type.
+ // FIXME: Consider also unwrapping array of unknown bound and VLA.
if (auto *CAT1 = dyn_cast<ConstantArrayType>(AT1)) {
auto *CAT2 = dyn_cast<ConstantArrayType>(AT2);
- if (!CAT2 || CAT1->getSize() != CAT2->getSize())
+ if (!((CAT2 && CAT1->getSize() == CAT2->getSize()) ||
+ (AllowPiMismatch && getLangOpts().CPlusPlus20 &&
+ isa<IncompleteArrayType>(AT2))))
+ return;
+ } else if (isa<IncompleteArrayType>(AT1)) {
+ if (!(isa<IncompleteArrayType>(AT2) ||
+ (AllowPiMismatch && getLangOpts().CPlusPlus20 &&
+ isa<ConstantArrayType>(AT2))))
return;
- } else if (!isa<IncompleteArrayType>(AT1) ||
- !isa<IncompleteArrayType>(AT2)) {
+ } else {
return;
}
@@ -5819,10 +6099,14 @@ void ASTContext::UnwrapSimilarArrayTypes(QualType &T1, QualType &T2) {
/// "unwraps" pointer and pointer-to-member types to compare them at each
/// level.
///
+/// \param AllowPiMismatch Allow the Pi1 and Pi2 to differ as described in
+/// C++20 [conv.qual], if permitted by the current language mode.
+///
/// \return \c true if a pointer type was unwrapped, \c false if we reached a
/// pair of types that can't be unwrapped further.
-bool ASTContext::UnwrapSimilarTypes(QualType &T1, QualType &T2) {
- UnwrapSimilarArrayTypes(T1, T2);
+bool ASTContext::UnwrapSimilarTypes(QualType &T1, QualType &T2,
+ bool AllowPiMismatch) {
+ UnwrapSimilarArrayTypes(T1, T2, AllowPiMismatch);
const auto *T1PtrType = T1->getAs<PointerType>();
const auto *T2PtrType = T2->getAs<PointerType>();
@@ -5883,7 +6167,7 @@ bool ASTContext::hasCvrSimilarType(QualType T1, QualType T2) {
if (hasSameType(T1, T2))
return true;
- if (!UnwrapSimilarTypes(T1, T2))
+ if (!UnwrapSimilarTypes(T1, T2, /*AllowPiMismatch*/ false))
return false;
}
}
@@ -5937,13 +6221,18 @@ ASTContext::getNameForTemplate(TemplateName Name,
return DeclarationNameInfo(subst->getParameterPack()->getDeclName(),
NameLoc);
}
+ case TemplateName::UsingTemplate:
+ return DeclarationNameInfo(Name.getAsUsingShadowDecl()->getDeclName(),
+ NameLoc);
}
llvm_unreachable("bad template name kind!");
}
-TemplateName ASTContext::getCanonicalTemplateName(TemplateName Name) const {
+TemplateName
+ASTContext::getCanonicalTemplateName(const TemplateName &Name) const {
switch (Name.getKind()) {
+ case TemplateName::UsingTemplate:
case TemplateName::QualifiedTemplate:
case TemplateName::Template: {
TemplateDecl *Template = Name.getAsTemplateDecl();
@@ -5971,23 +6260,472 @@ TemplateName ASTContext::getCanonicalTemplateName(TemplateName Name) const {
}
case TemplateName::SubstTemplateTemplateParmPack: {
- SubstTemplateTemplateParmPackStorage *subst
- = Name.getAsSubstTemplateTemplateParmPack();
- TemplateTemplateParmDecl *canonParameter
- = getCanonicalTemplateTemplateParmDecl(subst->getParameterPack());
- TemplateArgument canonArgPack
- = getCanonicalTemplateArgument(subst->getArgumentPack());
- return getSubstTemplateTemplateParmPack(canonParameter, canonArgPack);
+ SubstTemplateTemplateParmPackStorage *subst =
+ Name.getAsSubstTemplateTemplateParmPack();
+ TemplateArgument canonArgPack =
+ getCanonicalTemplateArgument(subst->getArgumentPack());
+ return getSubstTemplateTemplateParmPack(
+ canonArgPack, subst->getAssociatedDecl()->getCanonicalDecl(),
+ subst->getFinal(), subst->getIndex());
}
}
llvm_unreachable("bad template name!");
}
-bool ASTContext::hasSameTemplateName(TemplateName X, TemplateName Y) {
- X = getCanonicalTemplateName(X);
- Y = getCanonicalTemplateName(Y);
- return X.getAsVoidPointer() == Y.getAsVoidPointer();
+bool ASTContext::hasSameTemplateName(const TemplateName &X,
+ const TemplateName &Y) const {
+ return getCanonicalTemplateName(X).getAsVoidPointer() ==
+ getCanonicalTemplateName(Y).getAsVoidPointer();
+}
+
+bool ASTContext::isSameConstraintExpr(const Expr *XCE, const Expr *YCE) const {
+ if (!XCE != !YCE)
+ return false;
+
+ if (!XCE)
+ return true;
+
+ llvm::FoldingSetNodeID XCEID, YCEID;
+ XCE->Profile(XCEID, *this, /*Canonical=*/true, /*ProfileLambdaExpr=*/true);
+ YCE->Profile(YCEID, *this, /*Canonical=*/true, /*ProfileLambdaExpr=*/true);
+ return XCEID == YCEID;
+}
+
+bool ASTContext::isSameTypeConstraint(const TypeConstraint *XTC,
+ const TypeConstraint *YTC) const {
+ if (!XTC != !YTC)
+ return false;
+
+ if (!XTC)
+ return true;
+
+ auto *NCX = XTC->getNamedConcept();
+ auto *NCY = YTC->getNamedConcept();
+ if (!NCX || !NCY || !isSameEntity(NCX, NCY))
+ return false;
+ if (XTC->getConceptReference()->hasExplicitTemplateArgs() !=
+ YTC->getConceptReference()->hasExplicitTemplateArgs())
+ return false;
+ if (XTC->getConceptReference()->hasExplicitTemplateArgs())
+ if (XTC->getConceptReference()
+ ->getTemplateArgsAsWritten()
+ ->NumTemplateArgs !=
+ YTC->getConceptReference()->getTemplateArgsAsWritten()->NumTemplateArgs)
+ return false;
+
+ // Compare slowly by profiling.
+ //
+ // We couldn't compare the profiling result for the template
+ // args here. Consider the following example in different modules:
+ //
+ // template <__integer_like _Tp, C<_Tp> Sentinel>
+ // constexpr _Tp operator()(_Tp &&__t, Sentinel &&last) const {
+ // return __t;
+ // }
+ //
+ // When we compare the profiling result for `C<_Tp>` in different
+ // modules, it will compare the type of `_Tp` in different modules.
+ // However, the type of `_Tp` in different modules refer to different
+ // types here naturally. So we couldn't compare the profiling result
+ // for the template args directly.
+ return isSameConstraintExpr(XTC->getImmediatelyDeclaredConstraint(),
+ YTC->getImmediatelyDeclaredConstraint());
+}
+
+bool ASTContext::isSameTemplateParameter(const NamedDecl *X,
+ const NamedDecl *Y) const {
+ if (X->getKind() != Y->getKind())
+ return false;
+
+ if (auto *TX = dyn_cast<TemplateTypeParmDecl>(X)) {
+ auto *TY = cast<TemplateTypeParmDecl>(Y);
+ if (TX->isParameterPack() != TY->isParameterPack())
+ return false;
+ if (TX->hasTypeConstraint() != TY->hasTypeConstraint())
+ return false;
+ return isSameTypeConstraint(TX->getTypeConstraint(),
+ TY->getTypeConstraint());
+ }
+
+ if (auto *TX = dyn_cast<NonTypeTemplateParmDecl>(X)) {
+ auto *TY = cast<NonTypeTemplateParmDecl>(Y);
+ return TX->isParameterPack() == TY->isParameterPack() &&
+ TX->getASTContext().hasSameType(TX->getType(), TY->getType()) &&
+ isSameConstraintExpr(TX->getPlaceholderTypeConstraint(),
+ TY->getPlaceholderTypeConstraint());
+ }
+
+ auto *TX = cast<TemplateTemplateParmDecl>(X);
+ auto *TY = cast<TemplateTemplateParmDecl>(Y);
+ return TX->isParameterPack() == TY->isParameterPack() &&
+ isSameTemplateParameterList(TX->getTemplateParameters(),
+ TY->getTemplateParameters());
+}
+
+bool ASTContext::isSameTemplateParameterList(
+ const TemplateParameterList *X, const TemplateParameterList *Y) const {
+ if (X->size() != Y->size())
+ return false;
+
+ for (unsigned I = 0, N = X->size(); I != N; ++I)
+ if (!isSameTemplateParameter(X->getParam(I), Y->getParam(I)))
+ return false;
+
+ return isSameConstraintExpr(X->getRequiresClause(), Y->getRequiresClause());
+}
+
+bool ASTContext::isSameDefaultTemplateArgument(const NamedDecl *X,
+ const NamedDecl *Y) const {
+ // If the type parameter isn't the same already, we don't need to check the
+ // default argument further.
+ if (!isSameTemplateParameter(X, Y))
+ return false;
+
+ if (auto *TTPX = dyn_cast<TemplateTypeParmDecl>(X)) {
+ auto *TTPY = cast<TemplateTypeParmDecl>(Y);
+ if (!TTPX->hasDefaultArgument() || !TTPY->hasDefaultArgument())
+ return false;
+
+ return hasSameType(TTPX->getDefaultArgument(), TTPY->getDefaultArgument());
+ }
+
+ if (auto *NTTPX = dyn_cast<NonTypeTemplateParmDecl>(X)) {
+ auto *NTTPY = cast<NonTypeTemplateParmDecl>(Y);
+ if (!NTTPX->hasDefaultArgument() || !NTTPY->hasDefaultArgument())
+ return false;
+
+ Expr *DefaultArgumentX = NTTPX->getDefaultArgument()->IgnoreImpCasts();
+ Expr *DefaultArgumentY = NTTPY->getDefaultArgument()->IgnoreImpCasts();
+ llvm::FoldingSetNodeID XID, YID;
+ DefaultArgumentX->Profile(XID, *this, /*Canonical=*/true);
+ DefaultArgumentY->Profile(YID, *this, /*Canonical=*/true);
+ return XID == YID;
+ }
+
+ auto *TTPX = cast<TemplateTemplateParmDecl>(X);
+ auto *TTPY = cast<TemplateTemplateParmDecl>(Y);
+
+ if (!TTPX->hasDefaultArgument() || !TTPY->hasDefaultArgument())
+ return false;
+
+ const TemplateArgument &TAX = TTPX->getDefaultArgument().getArgument();
+ const TemplateArgument &TAY = TTPY->getDefaultArgument().getArgument();
+ return hasSameTemplateName(TAX.getAsTemplate(), TAY.getAsTemplate());
+}
+
+static NamespaceDecl *getNamespace(const NestedNameSpecifier *X) {
+ if (auto *NS = X->getAsNamespace())
+ return NS;
+ if (auto *NAS = X->getAsNamespaceAlias())
+ return NAS->getNamespace();
+ return nullptr;
+}
+
+static bool isSameQualifier(const NestedNameSpecifier *X,
+ const NestedNameSpecifier *Y) {
+ if (auto *NSX = getNamespace(X)) {
+ auto *NSY = getNamespace(Y);
+ if (!NSY || NSX->getCanonicalDecl() != NSY->getCanonicalDecl())
+ return false;
+ } else if (X->getKind() != Y->getKind())
+ return false;
+
+ // FIXME: For namespaces and types, we're permitted to check that the entity
+ // is named via the same tokens. We should probably do so.
+ switch (X->getKind()) {
+ case NestedNameSpecifier::Identifier:
+ if (X->getAsIdentifier() != Y->getAsIdentifier())
+ return false;
+ break;
+ case NestedNameSpecifier::Namespace:
+ case NestedNameSpecifier::NamespaceAlias:
+ // We've already checked that we named the same namespace.
+ break;
+ case NestedNameSpecifier::TypeSpec:
+ case NestedNameSpecifier::TypeSpecWithTemplate:
+ if (X->getAsType()->getCanonicalTypeInternal() !=
+ Y->getAsType()->getCanonicalTypeInternal())
+ return false;
+ break;
+ case NestedNameSpecifier::Global:
+ case NestedNameSpecifier::Super:
+ return true;
+ }
+
+ // Recurse into earlier portion of NNS, if any.
+ auto *PX = X->getPrefix();
+ auto *PY = Y->getPrefix();
+ if (PX && PY)
+ return isSameQualifier(PX, PY);
+ return !PX && !PY;
+}
+
+/// Determine whether the attributes we can overload on are identical for A and
+/// B. Will ignore any overloadable attrs represented in the type of A and B.
+static bool hasSameOverloadableAttrs(const FunctionDecl *A,
+ const FunctionDecl *B) {
+ // Note that pass_object_size attributes are represented in the function's
+ // ExtParameterInfo, so we don't need to check them here.
+
+ llvm::FoldingSetNodeID Cand1ID, Cand2ID;
+ auto AEnableIfAttrs = A->specific_attrs<EnableIfAttr>();
+ auto BEnableIfAttrs = B->specific_attrs<EnableIfAttr>();
+
+ for (auto Pair : zip_longest(AEnableIfAttrs, BEnableIfAttrs)) {
+ std::optional<EnableIfAttr *> Cand1A = std::get<0>(Pair);
+ std::optional<EnableIfAttr *> Cand2A = std::get<1>(Pair);
+
+ // Return false if the number of enable_if attributes is different.
+ if (!Cand1A || !Cand2A)
+ return false;
+
+ Cand1ID.clear();
+ Cand2ID.clear();
+
+ (*Cand1A)->getCond()->Profile(Cand1ID, A->getASTContext(), true);
+ (*Cand2A)->getCond()->Profile(Cand2ID, B->getASTContext(), true);
+
+ // Return false if any of the enable_if expressions of A and B are
+ // different.
+ if (Cand1ID != Cand2ID)
+ return false;
+ }
+ return true;
+}
+
+bool ASTContext::isSameEntity(const NamedDecl *X, const NamedDecl *Y) const {
+ // Caution: this function is called by the AST reader during deserialization,
+ // so it cannot rely on AST invariants being met. Non-trivial accessors
+ // should be avoided, along with any traversal of redeclaration chains.
+
+ if (X == Y)
+ return true;
+
+ if (X->getDeclName() != Y->getDeclName())
+ return false;
+
+ // Must be in the same context.
+ //
+ // Note that we can't use DeclContext::Equals here, because the DeclContexts
+ // could be two different declarations of the same function. (We will fix the
+ // semantic DC to refer to the primary definition after merging.)
+ if (!declaresSameEntity(cast<Decl>(X->getDeclContext()->getRedeclContext()),
+ cast<Decl>(Y->getDeclContext()->getRedeclContext())))
+ return false;
+
+ // Two typedefs refer to the same entity if they have the same underlying
+ // type.
+ if (const auto *TypedefX = dyn_cast<TypedefNameDecl>(X))
+ if (const auto *TypedefY = dyn_cast<TypedefNameDecl>(Y))
+ return hasSameType(TypedefX->getUnderlyingType(),
+ TypedefY->getUnderlyingType());
+
+ // Must have the same kind.
+ if (X->getKind() != Y->getKind())
+ return false;
+
+ // Objective-C classes and protocols with the same name always match.
+ if (isa<ObjCInterfaceDecl>(X) || isa<ObjCProtocolDecl>(X))
+ return true;
+
+ if (isa<ClassTemplateSpecializationDecl>(X)) {
+ // No need to handle these here: we merge them when adding them to the
+ // template.
+ return false;
+ }
+
+ // Compatible tags match.
+ if (const auto *TagX = dyn_cast<TagDecl>(X)) {
+ const auto *TagY = cast<TagDecl>(Y);
+ return (TagX->getTagKind() == TagY->getTagKind()) ||
+ ((TagX->getTagKind() == TagTypeKind::Struct ||
+ TagX->getTagKind() == TagTypeKind::Class ||
+ TagX->getTagKind() == TagTypeKind::Interface) &&
+ (TagY->getTagKind() == TagTypeKind::Struct ||
+ TagY->getTagKind() == TagTypeKind::Class ||
+ TagY->getTagKind() == TagTypeKind::Interface));
+ }
+
+ // Functions with the same type and linkage match.
+ // FIXME: This needs to cope with merging of prototyped/non-prototyped
+ // functions, etc.
+ if (const auto *FuncX = dyn_cast<FunctionDecl>(X)) {
+ const auto *FuncY = cast<FunctionDecl>(Y);
+ if (const auto *CtorX = dyn_cast<CXXConstructorDecl>(X)) {
+ const auto *CtorY = cast<CXXConstructorDecl>(Y);
+ if (CtorX->getInheritedConstructor() &&
+ !isSameEntity(CtorX->getInheritedConstructor().getConstructor(),
+ CtorY->getInheritedConstructor().getConstructor()))
+ return false;
+ }
+
+ if (FuncX->isMultiVersion() != FuncY->isMultiVersion())
+ return false;
+
+ // Multiversioned functions with different feature strings are represented
+ // as separate declarations.
+ if (FuncX->isMultiVersion()) {
+ const auto *TAX = FuncX->getAttr<TargetAttr>();
+ const auto *TAY = FuncY->getAttr<TargetAttr>();
+ assert(TAX && TAY && "Multiversion Function without target attribute");
+
+ if (TAX->getFeaturesStr() != TAY->getFeaturesStr())
+ return false;
+ }
+
+ // Per C++20 [temp.over.link]/4, friends in different classes are sometimes
+ // not the same entity if they are constrained.
+ if ((FuncX->isMemberLikeConstrainedFriend() ||
+ FuncY->isMemberLikeConstrainedFriend()) &&
+ !FuncX->getLexicalDeclContext()->Equals(
+ FuncY->getLexicalDeclContext())) {
+ return false;
+ }
+
+ if (!isSameConstraintExpr(FuncX->getTrailingRequiresClause(),
+ FuncY->getTrailingRequiresClause()))
+ return false;
+
+ auto GetTypeAsWritten = [](const FunctionDecl *FD) {
+ // Map to the first declaration that we've already merged into this one.
+ // The TSI of redeclarations might not match (due to calling conventions
+ // being inherited onto the type but not the TSI), but the TSI type of
+ // the first declaration of the function should match across modules.
+ FD = FD->getCanonicalDecl();
+ return FD->getTypeSourceInfo() ? FD->getTypeSourceInfo()->getType()
+ : FD->getType();
+ };
+ QualType XT = GetTypeAsWritten(FuncX), YT = GetTypeAsWritten(FuncY);
+ if (!hasSameType(XT, YT)) {
+ // We can get functions with different types on the redecl chain in C++17
+ // if they have differing exception specifications and at least one of
+ // the excpetion specs is unresolved.
+ auto *XFPT = XT->getAs<FunctionProtoType>();
+ auto *YFPT = YT->getAs<FunctionProtoType>();
+ if (getLangOpts().CPlusPlus17 && XFPT && YFPT &&
+ (isUnresolvedExceptionSpec(XFPT->getExceptionSpecType()) ||
+ isUnresolvedExceptionSpec(YFPT->getExceptionSpecType())) &&
+ hasSameFunctionTypeIgnoringExceptionSpec(XT, YT))
+ return true;
+ return false;
+ }
+
+ return FuncX->getLinkageInternal() == FuncY->getLinkageInternal() &&
+ hasSameOverloadableAttrs(FuncX, FuncY);
+ }
+
+ // Variables with the same type and linkage match.
+ if (const auto *VarX = dyn_cast<VarDecl>(X)) {
+ const auto *VarY = cast<VarDecl>(Y);
+ if (VarX->getLinkageInternal() == VarY->getLinkageInternal()) {
+ // During deserialization, we might compare variables before we load
+ // their types. Assume the types will end up being the same.
+ if (VarX->getType().isNull() || VarY->getType().isNull())
+ return true;
+
+ if (hasSameType(VarX->getType(), VarY->getType()))
+ return true;
+
+ // We can get decls with different types on the redecl chain. Eg.
+ // template <typename T> struct S { static T Var[]; }; // #1
+ // template <typename T> T S<T>::Var[sizeof(T)]; // #2
+ // Only? happens when completing an incomplete array type. In this case
+ // when comparing #1 and #2 we should go through their element type.
+ const ArrayType *VarXTy = getAsArrayType(VarX->getType());
+ const ArrayType *VarYTy = getAsArrayType(VarY->getType());
+ if (!VarXTy || !VarYTy)
+ return false;
+ if (VarXTy->isIncompleteArrayType() || VarYTy->isIncompleteArrayType())
+ return hasSameType(VarXTy->getElementType(), VarYTy->getElementType());
+ }
+ return false;
+ }
+
+ // Namespaces with the same name and inlinedness match.
+ if (const auto *NamespaceX = dyn_cast<NamespaceDecl>(X)) {
+ const auto *NamespaceY = cast<NamespaceDecl>(Y);
+ return NamespaceX->isInline() == NamespaceY->isInline();
+ }
+
+ // Identical template names and kinds match if their template parameter lists
+ // and patterns match.
+ if (const auto *TemplateX = dyn_cast<TemplateDecl>(X)) {
+ const auto *TemplateY = cast<TemplateDecl>(Y);
+
+ // ConceptDecl wouldn't be the same if their constraint expression differs.
+ if (const auto *ConceptX = dyn_cast<ConceptDecl>(X)) {
+ const auto *ConceptY = cast<ConceptDecl>(Y);
+ if (!isSameConstraintExpr(ConceptX->getConstraintExpr(),
+ ConceptY->getConstraintExpr()))
+ return false;
+ }
+
+ return isSameEntity(TemplateX->getTemplatedDecl(),
+ TemplateY->getTemplatedDecl()) &&
+ isSameTemplateParameterList(TemplateX->getTemplateParameters(),
+ TemplateY->getTemplateParameters());
+ }
+
+ // Fields with the same name and the same type match.
+ if (const auto *FDX = dyn_cast<FieldDecl>(X)) {
+ const auto *FDY = cast<FieldDecl>(Y);
+ // FIXME: Also check the bitwidth is odr-equivalent, if any.
+ return hasSameType(FDX->getType(), FDY->getType());
+ }
+
+ // Indirect fields with the same target field match.
+ if (const auto *IFDX = dyn_cast<IndirectFieldDecl>(X)) {
+ const auto *IFDY = cast<IndirectFieldDecl>(Y);
+ return IFDX->getAnonField()->getCanonicalDecl() ==
+ IFDY->getAnonField()->getCanonicalDecl();
+ }
+
+ // Enumerators with the same name match.
+ if (isa<EnumConstantDecl>(X))
+ // FIXME: Also check the value is odr-equivalent.
+ return true;
+
+ // Using shadow declarations with the same target match.
+ if (const auto *USX = dyn_cast<UsingShadowDecl>(X)) {
+ const auto *USY = cast<UsingShadowDecl>(Y);
+ return USX->getTargetDecl() == USY->getTargetDecl();
+ }
+
+ // Using declarations with the same qualifier match. (We already know that
+ // the name matches.)
+ if (const auto *UX = dyn_cast<UsingDecl>(X)) {
+ const auto *UY = cast<UsingDecl>(Y);
+ return isSameQualifier(UX->getQualifier(), UY->getQualifier()) &&
+ UX->hasTypename() == UY->hasTypename() &&
+ UX->isAccessDeclaration() == UY->isAccessDeclaration();
+ }
+ if (const auto *UX = dyn_cast<UnresolvedUsingValueDecl>(X)) {
+ const auto *UY = cast<UnresolvedUsingValueDecl>(Y);
+ return isSameQualifier(UX->getQualifier(), UY->getQualifier()) &&
+ UX->isAccessDeclaration() == UY->isAccessDeclaration();
+ }
+ if (const auto *UX = dyn_cast<UnresolvedUsingTypenameDecl>(X)) {
+ return isSameQualifier(
+ UX->getQualifier(),
+ cast<UnresolvedUsingTypenameDecl>(Y)->getQualifier());
+ }
+
+ // Using-pack declarations are only created by instantiation, and match if
+ // they're instantiated from matching UnresolvedUsing...Decls.
+ if (const auto *UX = dyn_cast<UsingPackDecl>(X)) {
+ return declaresSameEntity(
+ UX->getInstantiatedFromUsingDecl(),
+ cast<UsingPackDecl>(Y)->getInstantiatedFromUsingDecl());
+ }
+
+ // Namespace alias definitions with the same target match.
+ if (const auto *NAX = dyn_cast<NamespaceAliasDecl>(X)) {
+ const auto *NAY = cast<NamespaceAliasDecl>(Y);
+ return NAX->getNamespace()->Equals(NAY->getNamespace());
+ }
+
+ return false;
}
TemplateArgument
@@ -6001,39 +6739,43 @@ ASTContext::getCanonicalTemplateArgument(const TemplateArgument &Arg) const {
case TemplateArgument::Declaration: {
auto *D = cast<ValueDecl>(Arg.getAsDecl()->getCanonicalDecl());
- return TemplateArgument(D, Arg.getParamTypeForDecl());
+ return TemplateArgument(D, getCanonicalType(Arg.getParamTypeForDecl()),
+ Arg.getIsDefaulted());
}
case TemplateArgument::NullPtr:
return TemplateArgument(getCanonicalType(Arg.getNullPtrType()),
- /*isNullPtr*/true);
+ /*isNullPtr*/ true, Arg.getIsDefaulted());
case TemplateArgument::Template:
- return TemplateArgument(getCanonicalTemplateName(Arg.getAsTemplate()));
+ return TemplateArgument(getCanonicalTemplateName(Arg.getAsTemplate()),
+ Arg.getIsDefaulted());
case TemplateArgument::TemplateExpansion:
- return TemplateArgument(getCanonicalTemplateName(
- Arg.getAsTemplateOrTemplatePattern()),
- Arg.getNumTemplateExpansions());
+ return TemplateArgument(
+ getCanonicalTemplateName(Arg.getAsTemplateOrTemplatePattern()),
+ Arg.getNumTemplateExpansions(), Arg.getIsDefaulted());
case TemplateArgument::Integral:
return TemplateArgument(Arg, getCanonicalType(Arg.getIntegralType()));
+ case TemplateArgument::StructuralValue:
+ return TemplateArgument(*this,
+ getCanonicalType(Arg.getStructuralValueType()),
+ Arg.getAsStructuralValue());
+
case TemplateArgument::Type:
- return TemplateArgument(getCanonicalType(Arg.getAsType()));
+ return TemplateArgument(getCanonicalType(Arg.getAsType()),
+ /*isNullPtr*/ false, Arg.getIsDefaulted());
case TemplateArgument::Pack: {
- if (Arg.pack_size() == 0)
+ bool AnyNonCanonArgs = false;
+ auto CanonArgs = ::getCanonicalTemplateArguments(
+ *this, Arg.pack_elements(), AnyNonCanonArgs);
+ if (!AnyNonCanonArgs)
return Arg;
-
- auto *CanonArgs = new (*this) TemplateArgument[Arg.pack_size()];
- unsigned Idx = 0;
- for (TemplateArgument::pack_iterator A = Arg.pack_begin(),
- AEnd = Arg.pack_end();
- A != AEnd; (void)++A, ++Idx)
- CanonArgs[Idx] = getCanonicalTemplateArgument(*A);
-
- return TemplateArgument(llvm::makeArrayRef(CanonArgs, Arg.pack_size()));
+ return TemplateArgument::CreatePackCopy(const_cast<ASTContext &>(*this),
+ CanonArgs);
}
}
@@ -6205,7 +6947,7 @@ QualType ASTContext::getArrayDecayedType(QualType Ty) const {
PrettyArrayType->getIndexTypeQualifiers());
// int x[_Nullable] -> int * _Nullable
- if (auto Nullability = Ty->getNullability(*this)) {
+ if (auto Nullability = Ty->getNullability()) {
Result = const_cast<ASTContext *>(this)->getAttributedType(
AttributedType::getNullabilityAttrKind(*Nullability), Result, Result);
}
@@ -6242,6 +6984,21 @@ ASTContext::getConstantArrayElementCount(const ConstantArrayType *CA) const {
return ElementCount;
}
+uint64_t ASTContext::getArrayInitLoopExprElementCount(
+ const ArrayInitLoopExpr *AILE) const {
+ if (!AILE)
+ return 0;
+
+ uint64_t ElementCount = 1;
+
+ do {
+ ElementCount *= AILE->getArraySize().getZExtValue();
+ AILE = dyn_cast<ArrayInitLoopExpr>(AILE->getSubExpr());
+ } while (AILE);
+
+ return ElementCount;
+}
+
/// getFloatingRank - Return a relative rank for floating point types.
/// This routine will assert if passed a built-in type that isn't a float.
static FloatingRank getFloatingRank(QualType T) {
@@ -6257,41 +7014,10 @@ static FloatingRank getFloatingRank(QualType T) {
case BuiltinType::LongDouble: return LongDoubleRank;
case BuiltinType::Float128: return Float128Rank;
case BuiltinType::BFloat16: return BFloat16Rank;
+ case BuiltinType::Ibm128: return Ibm128Rank;
}
}
-/// getFloatingTypeOfSizeWithinDomain - Returns a real floating
-/// point or a complex type (based on typeDomain/typeSize).
-/// 'typeDomain' is a real floating point or complex type.
-/// 'typeSize' is a real floating point or complex type.
-QualType ASTContext::getFloatingTypeOfSizeWithinDomain(QualType Size,
- QualType Domain) const {
- FloatingRank EltRank = getFloatingRank(Size);
- if (Domain->isComplexType()) {
- switch (EltRank) {
- case BFloat16Rank: llvm_unreachable("Complex bfloat16 is not supported");
- case Float16Rank:
- case HalfRank: llvm_unreachable("Complex half is not supported");
- case FloatRank: return FloatComplexTy;
- case DoubleRank: return DoubleComplexTy;
- case LongDoubleRank: return LongDoubleComplexTy;
- case Float128Rank: return Float128ComplexTy;
- }
- }
-
- assert(Domain->isRealFloatingType() && "Unknown domain!");
- switch (EltRank) {
- case Float16Rank: return HalfTy;
- case BFloat16Rank: return BFloat16Ty;
- case HalfRank: return HalfTy;
- case FloatRank: return FloatTy;
- case DoubleRank: return DoubleTy;
- case LongDoubleRank: return LongDoubleTy;
- case Float128Rank: return Float128Ty;
- }
- llvm_unreachable("getFloatingRank(): illegal value for rank");
-}
-
/// getFloatingTypeOrder - Compare the rank of the two specified floating
/// point types, ignoring the domain of the type (i.e. 'double' ==
/// '_Complex double'). If LHS > RHS, return 1. If LHS == RHS, return 0. If
@@ -6321,7 +7047,7 @@ unsigned ASTContext::getIntegerRank(const Type *T) const {
// Results in this 'losing' to any type of the same size, but winning if
// larger.
- if (const auto *EIT = dyn_cast<ExtIntType>(T))
+ if (const auto *EIT = dyn_cast<BitIntType>(T))
return 0 + (EIT->getNumBits() << 3);
switch (cast<BuiltinType>(T)->getKind()) {
@@ -6348,6 +7074,21 @@ unsigned ASTContext::getIntegerRank(const Type *T) const {
case BuiltinType::Int128:
case BuiltinType::UInt128:
return 7 + (getIntWidth(Int128Ty) << 3);
+
+ // "The ranks of char8_t, char16_t, char32_t, and wchar_t equal the ranks of
+ // their underlying types" [c++20 conv.rank]
+ case BuiltinType::Char8:
+ return getIntegerRank(UnsignedCharTy.getTypePtr());
+ case BuiltinType::Char16:
+ return getIntegerRank(
+ getFromTargetType(Target->getChar16Type()).getTypePtr());
+ case BuiltinType::Char32:
+ return getIntegerRank(
+ getFromTargetType(Target->getChar32Type()).getTypePtr());
+ case BuiltinType::WChar_S:
+ case BuiltinType::WChar_U:
+ return getIntegerRank(
+ getFromTargetType(Target->getWCharType()).getTypePtr());
}
}
@@ -6413,7 +7154,7 @@ QualType ASTContext::isPromotableBitField(Expr *E) const {
/// integer type.
QualType ASTContext::getPromotedIntegerType(QualType Promotable) const {
assert(!Promotable.isNull());
- assert(Promotable->isPromotableIntegerType());
+ assert(isPromotableIntegerType(Promotable));
if (const auto *ET = Promotable->getAs<EnumType>())
return ET->getDecl()->getPromotionType();
@@ -6433,12 +7174,11 @@ QualType ASTContext::getPromotedIntegerType(QualType Promotable) const {
uint64_t FromSize = getTypeSize(BT);
QualType PromoteTypes[] = { IntTy, UnsignedIntTy, LongTy, UnsignedLongTy,
LongLongTy, UnsignedLongLongTy };
- for (size_t Idx = 0; Idx < llvm::array_lengthof(PromoteTypes); ++Idx) {
- uint64_t ToSize = getTypeSize(PromoteTypes[Idx]);
+ for (const auto &PT : PromoteTypes) {
+ uint64_t ToSize = getTypeSize(PT);
if (FromSize < ToSize ||
- (FromSize == ToSize &&
- FromIsSigned == PromoteTypes[Idx]->isSignedIntegerType()))
- return PromoteTypes[Idx];
+ (FromSize == ToSize && FromIsSigned == PT->isSignedIntegerType()))
+ return PT;
}
llvm_unreachable("char type should fit into long long");
}
@@ -6923,7 +7663,7 @@ std::string ASTContext::getObjCEncodingForBlock(const BlockExpr *Expr) const {
// FIXME: There might(should) be a better way of doing this computation!
CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy);
CharUnits ParmOffset = PtrSize;
- for (auto PI : Decl->parameters()) {
+ for (auto *PI : Decl->parameters()) {
QualType PType = PI->getType();
CharUnits sz = getObjCEncodingTypeSize(PType);
if (sz.isZero())
@@ -6938,7 +7678,7 @@ std::string ASTContext::getObjCEncodingForBlock(const BlockExpr *Expr) const {
// Argument types.
ParmOffset = PtrSize;
- for (auto PVDecl : Decl->parameters()) {
+ for (auto *PVDecl : Decl->parameters()) {
QualType PType = PVDecl->getOriginalType();
if (const auto *AT =
dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) {
@@ -6967,7 +7707,7 @@ ASTContext::getObjCEncodingForFunctionDecl(const FunctionDecl *Decl) const {
getObjCEncodingForType(Decl->getReturnType(), S);
CharUnits ParmOffset;
// Compute size of all parameters.
- for (auto PI : Decl->parameters()) {
+ for (auto *PI : Decl->parameters()) {
QualType PType = PI->getType();
CharUnits sz = getObjCEncodingTypeSize(PType);
if (sz.isZero())
@@ -6981,7 +7721,7 @@ ASTContext::getObjCEncodingForFunctionDecl(const FunctionDecl *Decl) const {
ParmOffset = CharUnits::Zero();
// Argument types.
- for (auto PVDecl : Decl->parameters()) {
+ for (auto *PVDecl : Decl->parameters()) {
QualType PType = PVDecl->getOriginalType();
if (const auto *AT =
dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) {
@@ -7005,7 +7745,7 @@ ASTContext::getObjCEncodingForFunctionDecl(const FunctionDecl *Decl) const {
void ASTContext::getObjCEncodingForMethodParameter(Decl::ObjCDeclQualifier QT,
QualType T, std::string& S,
bool Extended) const {
- // Encode type qualifer, 'in', 'inout', etc. for the parameter.
+ // Encode type qualifier, 'in', 'inout', etc. for the parameter.
getObjCEncodingForTypeQualifier(QT, S);
// Encode parameter type.
ObjCEncOptions Options = ObjCEncOptions()
@@ -7113,6 +7853,7 @@ ASTContext::getObjCPropertyImplDeclForPropertyDecl(
/// kPropertyWeak = 'W' // 'weak' property
/// kPropertyStrong = 'P' // property GC'able
/// kPropertyNonAtomic = 'N' // property non-atomic
+/// kPropertyOptional = '?' // property optional
/// };
/// @endcode
std::string
@@ -7138,6 +7879,9 @@ ASTContext::getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD,
// closely resembles encoding of ivars.
getObjCEncodingForPropertyType(PD->getType(), S);
+ if (PD->isOptional())
+ S += ",?";
+
if (PD->isReadOnly()) {
S += ",R";
if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_copy)
@@ -7188,7 +7932,7 @@ ASTContext::getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD,
/// 'l' or 'L' , but not always. For typedefs, we need to use
/// 'i' or 'I' instead if encoding a struct field, or a pointer!
void ASTContext::getLegacyIntegralTypeEncoding (QualType &PointeeTy) const {
- if (isa<TypedefType>(PointeeTy.getTypePtr())) {
+ if (PointeeTy->getAs<TypedefType>()) {
if (const auto *BT = PointeeTy->getAs<BuiltinType>()) {
if (BT->getKind() == BuiltinType::ULong && getIntWidth(PointeeTy) == 32)
PointeeTy = UnsignedIntTy;
@@ -7263,6 +8007,7 @@ static char getObjCEncodingForPrimitiveType(const ASTContext *C,
case BuiltinType::BFloat16:
case BuiltinType::Float16:
case BuiltinType::Float128:
+ case BuiltinType::Ibm128:
case BuiltinType::Half:
case BuiltinType::ShortAccum:
case BuiltinType::Accum:
@@ -7296,6 +8041,8 @@ static char getObjCEncodingForPrimitiveType(const ASTContext *C,
#include "clang/Basic/AArch64SVEACLETypes.def"
#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
#include "clang/Basic/RISCVVTypes.def"
+#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
{
DiagnosticsEngine &Diags = C->getDiagnostics();
unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
@@ -7410,7 +8157,7 @@ static bool hasTemplateSpecializationInEncodedString(const Type *T,
if (!CXXRD->hasDefinition() || !VisitBasesAndFields)
return false;
- for (auto B : CXXRD->bases())
+ for (const auto &B : CXXRD->bases())
if (hasTemplateSpecializationInEncodedString(B.getType().getTypePtr(),
true))
return true;
@@ -7475,7 +8222,7 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S,
// pointee gets emitted _before_ the '^'. The read-only qualifier of
// the pointer itself gets ignored, _unless_ we are looking at a typedef!
// Also, do not emit the 'r' for anything but the outermost type!
- if (isa<TypedefType>(T.getTypePtr())) {
+ if (T->getAs<TypedefType>()) {
if (Options.IsOutermostType() && T.isConstQualified()) {
isReadOnly = true;
S += 'r';
@@ -7493,7 +8240,7 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S,
// Another legacy compatibility encoding. Some ObjC qualifier and type
// combinations need to be rearranged.
// Rewrite "in const" from "nr" to "rn"
- if (StringRef(S).endswith("nr"))
+ if (StringRef(S).ends_with("nr"))
S.replace(S.end()-2, S.end(), "rn");
}
@@ -7657,7 +8404,7 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S,
return;
}
// TODO: Double check to make sure this intentionally falls through.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case Type::ObjCInterface: {
@@ -7709,7 +8456,7 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S,
.setExpandStructures()),
FD);
if (FD || Options.EncodingProperty() || Options.EncodeClassNames()) {
- // Note that we do extended encoding of protocol qualifer list
+ // Note that we do extended encoding of protocol qualifier list
// Only when doing ivar or property encoding.
S += '"';
for (const auto *I : OPT->quals()) {
@@ -7754,6 +8501,11 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S,
*NotEncodedT = T;
return;
+ case Type::BitInt:
+ if (NotEncodedT)
+ *NotEncodedT = T;
+ return;
+
// We could see an undeduced auto type here during error recovery.
// Just ignore it.
case Type::Auto:
@@ -7761,7 +8513,6 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S,
return;
case Type::Pipe:
- case Type::ExtInt:
#define ABSTRACT_TYPE(KIND, BASE)
#define TYPE(KIND, BASE)
#define DEPENDENT_TYPE(KIND, BASE) \
@@ -7803,14 +8554,12 @@ void ASTContext::getObjCEncodingForStructureImpl(RecordDecl *RDecl,
}
}
- unsigned i = 0;
for (FieldDecl *Field : RDecl->fields()) {
if (!Field->isZeroLengthBitField(*this) && Field->isZeroSize(*this))
continue;
- uint64_t offs = layout.getFieldOffset(i);
+ uint64_t offs = layout.getFieldOffset(Field->getFieldIndex());
FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs),
std::make_pair(offs, Field));
- ++i;
}
if (CXXRec && includeVBases) {
@@ -8006,12 +8755,11 @@ CreateAArch64ABIBuiltinVaListDecl(const ASTContext *Context) {
RecordDecl *VaListTagDecl = Context->buildImplicitRecord("__va_list");
if (Context->getLangOpts().CPlusPlus) {
// namespace std { struct __va_list {
- NamespaceDecl *NS;
- NS = NamespaceDecl::Create(const_cast<ASTContext &>(*Context),
- Context->getTranslationUnitDecl(),
- /*Inline*/ false, SourceLocation(),
- SourceLocation(), &Context->Idents.get("std"),
- /*PrevDecl*/ nullptr);
+ auto *NS = NamespaceDecl::Create(
+ const_cast<ASTContext &>(*Context), Context->getTranslationUnitDecl(),
+ /*Inline=*/false, SourceLocation(), SourceLocation(),
+ &Context->Idents.get("std"),
+ /*PrevDecl=*/nullptr, /*Nested=*/false);
NS->setImplicit();
VaListTagDecl->setDeclContext(NS);
}
@@ -8121,9 +8869,8 @@ static TypedefDecl *CreatePowerABIBuiltinVaListDecl(const ASTContext *Context) {
// typedef __va_list_tag __builtin_va_list[1];
llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1);
- QualType VaListTagArrayType
- = Context->getConstantArrayType(VaListTagTypedefType,
- Size, nullptr, ArrayType::Normal, 0);
+ QualType VaListTagArrayType = Context->getConstantArrayType(
+ VaListTagTypedefType, Size, nullptr, ArraySizeModifier::Normal, 0);
return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list");
}
@@ -8177,7 +8924,7 @@ CreateX86_64ABIBuiltinVaListDecl(const ASTContext *Context) {
// typedef struct __va_list_tag __builtin_va_list[1];
llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1);
QualType VaListTagArrayType = Context->getConstantArrayType(
- VaListTagType, Size, nullptr, ArrayType::Normal, 0);
+ VaListTagType, Size, nullptr, ArraySizeModifier::Normal, 0);
return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list");
}
@@ -8185,7 +8932,7 @@ static TypedefDecl *CreatePNaClABIBuiltinVaListDecl(const ASTContext *Context) {
// typedef int __builtin_va_list[4];
llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 4);
QualType IntArrayType = Context->getConstantArrayType(
- Context->IntTy, Size, nullptr, ArrayType::Normal, 0);
+ Context->IntTy, Size, nullptr, ArraySizeModifier::Normal, 0);
return Context->buildImplicitTypedef(IntArrayType, "__builtin_va_list");
}
@@ -8198,9 +8945,9 @@ CreateAAPCSABIBuiltinVaListDecl(const ASTContext *Context) {
NamespaceDecl *NS;
NS = NamespaceDecl::Create(const_cast<ASTContext &>(*Context),
Context->getTranslationUnitDecl(),
- /*Inline*/false, SourceLocation(),
+ /*Inline=*/false, SourceLocation(),
SourceLocation(), &Context->Idents.get("std"),
- /*PrevDecl*/ nullptr);
+ /*PrevDecl=*/nullptr, /*Nested=*/false);
NS->setImplicit();
VaListDecl->setDeclContext(NS);
}
@@ -8280,7 +9027,7 @@ CreateSystemZBuiltinVaListDecl(const ASTContext *Context) {
// typedef __va_list_tag __builtin_va_list[1];
llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1);
QualType VaListTagArrayType = Context->getConstantArrayType(
- VaListTagType, Size, nullptr, ArrayType::Normal, 0);
+ VaListTagType, Size, nullptr, ArraySizeModifier::Normal, 0);
return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list");
}
@@ -8312,8 +9059,8 @@ static TypedefDecl *CreateHexagonBuiltinVaListDecl(const ASTContext *Context) {
FieldDecl *Field = FieldDecl::Create(
const_cast<ASTContext &>(*Context), VaListTagDecl, SourceLocation(),
SourceLocation(), &Context->Idents.get(FieldNames[i]), FieldTypes[i],
- /*TInfo=*/0,
- /*BitWidth=*/0,
+ /*TInfo=*/nullptr,
+ /*BitWidth=*/nullptr,
/*Mutable=*/false, ICIS_NoInit);
Field->setAccess(AS_public);
VaListTagDecl->addDecl(Field);
@@ -8331,7 +9078,7 @@ static TypedefDecl *CreateHexagonBuiltinVaListDecl(const ASTContext *Context) {
// typedef __va_list_tag __builtin_va_list[1];
llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1);
QualType VaListTagArrayType = Context->getConstantArrayType(
- VaListTagTypedefType, Size, nullptr, ArrayType::Normal, 0);
+ VaListTagTypedefType, Size, nullptr, ArraySizeModifier::Normal, 0);
return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list");
}
@@ -8388,6 +9135,10 @@ TypedefDecl *ASTContext::getBuiltinMSVaListDecl() const {
}
bool ASTContext::canBuiltinBeRedeclared(const FunctionDecl *FD) const {
+ // Allow redecl custom type checking builtin for HLSL.
+ if (LangOpts.HLSL && FD->getBuiltinID() != Builtin::NotBuiltin &&
+ BuiltinInfo.hasCustomTypechecking(FD->getBuiltinID()))
+ return true;
return BuiltinInfo.canBeRedeclared(FD->getBuiltinID());
}
@@ -8432,10 +9183,9 @@ TemplateName ASTContext::getAssumedTemplateName(DeclarationName Name) const {
/// Retrieve the template name that represents a qualified
/// template name such as \c std::vector.
-TemplateName
-ASTContext::getQualifiedTemplateName(NestedNameSpecifier *NNS,
- bool TemplateKeyword,
- TemplateDecl *Template) const {
+TemplateName ASTContext::getQualifiedTemplateName(NestedNameSpecifier *NNS,
+ bool TemplateKeyword,
+ TemplateName Template) const {
assert(NNS && "Missing nested-name-specifier in qualified template name");
// FIXME: Canonicalization?
@@ -8527,18 +9277,20 @@ ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS,
return TemplateName(QTN);
}
-TemplateName
-ASTContext::getSubstTemplateTemplateParm(TemplateTemplateParmDecl *param,
- TemplateName replacement) const {
+TemplateName ASTContext::getSubstTemplateTemplateParm(
+ TemplateName Replacement, Decl *AssociatedDecl, unsigned Index,
+ std::optional<unsigned> PackIndex) const {
llvm::FoldingSetNodeID ID;
- SubstTemplateTemplateParmStorage::Profile(ID, param, replacement);
+ SubstTemplateTemplateParmStorage::Profile(ID, Replacement, AssociatedDecl,
+ Index, PackIndex);
void *insertPos = nullptr;
SubstTemplateTemplateParmStorage *subst
= SubstTemplateTemplateParms.FindNodeOrInsertPos(ID, insertPos);
if (!subst) {
- subst = new (*this) SubstTemplateTemplateParmStorage(param, replacement);
+ subst = new (*this) SubstTemplateTemplateParmStorage(
+ Replacement, AssociatedDecl, Index, PackIndex);
SubstTemplateTemplateParms.InsertNode(subst, insertPos);
}
@@ -8546,20 +9298,21 @@ ASTContext::getSubstTemplateTemplateParm(TemplateTemplateParmDecl *param,
}
TemplateName
-ASTContext::getSubstTemplateTemplateParmPack(TemplateTemplateParmDecl *Param,
- const TemplateArgument &ArgPack) const {
+ASTContext::getSubstTemplateTemplateParmPack(const TemplateArgument &ArgPack,
+ Decl *AssociatedDecl,
+ unsigned Index, bool Final) const {
auto &Self = const_cast<ASTContext &>(*this);
llvm::FoldingSetNodeID ID;
- SubstTemplateTemplateParmPackStorage::Profile(ID, Self, Param, ArgPack);
+ SubstTemplateTemplateParmPackStorage::Profile(ID, Self, ArgPack,
+ AssociatedDecl, Index, Final);
void *InsertPos = nullptr;
SubstTemplateTemplateParmPackStorage *Subst
= SubstTemplateTemplateParmPacks.FindNodeOrInsertPos(ID, InsertPos);
if (!Subst) {
- Subst = new (*this) SubstTemplateTemplateParmPackStorage(Param,
- ArgPack.pack_size(),
- ArgPack.pack_begin());
+ Subst = new (*this) SubstTemplateTemplateParmPackStorage(
+ ArgPack.pack_elements(), AssociatedDecl, Index, Final);
SubstTemplateTemplateParmPacks.InsertNode(Subst, InsertPos);
}
@@ -8659,14 +9412,18 @@ bool ASTContext::areCompatibleVectorTypes(QualType FirstVec,
const auto *Second = SecondVec->castAs<VectorType>();
if (First->getNumElements() == Second->getNumElements() &&
hasSameType(First->getElementType(), Second->getElementType()) &&
- First->getVectorKind() != VectorType::AltiVecPixel &&
- First->getVectorKind() != VectorType::AltiVecBool &&
- Second->getVectorKind() != VectorType::AltiVecPixel &&
- Second->getVectorKind() != VectorType::AltiVecBool &&
- First->getVectorKind() != VectorType::SveFixedLengthDataVector &&
- First->getVectorKind() != VectorType::SveFixedLengthPredicateVector &&
- Second->getVectorKind() != VectorType::SveFixedLengthDataVector &&
- Second->getVectorKind() != VectorType::SveFixedLengthPredicateVector)
+ First->getVectorKind() != VectorKind::AltiVecPixel &&
+ First->getVectorKind() != VectorKind::AltiVecBool &&
+ Second->getVectorKind() != VectorKind::AltiVecPixel &&
+ Second->getVectorKind() != VectorKind::AltiVecBool &&
+ First->getVectorKind() != VectorKind::SveFixedLengthData &&
+ First->getVectorKind() != VectorKind::SveFixedLengthPredicate &&
+ Second->getVectorKind() != VectorKind::SveFixedLengthData &&
+ Second->getVectorKind() != VectorKind::SveFixedLengthPredicate &&
+ First->getVectorKind() != VectorKind::RVVFixedLengthData &&
+ Second->getVectorKind() != VectorKind::RVVFixedLengthData &&
+ First->getVectorKind() != VectorKind::RVVFixedLengthMask &&
+ Second->getVectorKind() != VectorKind::RVVFixedLengthMask)
return true;
return false;
@@ -8674,29 +9431,31 @@ bool ASTContext::areCompatibleVectorTypes(QualType FirstVec,
/// getSVETypeSize - Return SVE vector or predicate register size.
static uint64_t getSVETypeSize(ASTContext &Context, const BuiltinType *Ty) {
- assert(Ty->isVLSTBuiltinType() && "Invalid SVE Type");
- return Ty->getKind() == BuiltinType::SveBool
- ? Context.getLangOpts().ArmSveVectorBits / Context.getCharWidth()
- : Context.getLangOpts().ArmSveVectorBits;
+ assert(Ty->isSveVLSBuiltinType() && "Invalid SVE Type");
+ if (Ty->getKind() == BuiltinType::SveBool ||
+ Ty->getKind() == BuiltinType::SveCount)
+ return (Context.getLangOpts().VScaleMin * 128) / Context.getCharWidth();
+ return Context.getLangOpts().VScaleMin * 128;
}
bool ASTContext::areCompatibleSveTypes(QualType FirstType,
QualType SecondType) {
- assert(((FirstType->isSizelessBuiltinType() && SecondType->isVectorType()) ||
- (FirstType->isVectorType() && SecondType->isSizelessBuiltinType())) &&
- "Expected SVE builtin type and vector type!");
+ assert(
+ ((FirstType->isSVESizelessBuiltinType() && SecondType->isVectorType()) ||
+ (FirstType->isVectorType() && SecondType->isSVESizelessBuiltinType())) &&
+ "Expected SVE builtin type and vector type!");
auto IsValidCast = [this](QualType FirstType, QualType SecondType) {
if (const auto *BT = FirstType->getAs<BuiltinType>()) {
if (const auto *VT = SecondType->getAs<VectorType>()) {
// Predicates have the same representation as uint8 so we also have to
// check the kind to make these types incompatible.
- if (VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector)
+ if (VT->getVectorKind() == VectorKind::SveFixedLengthPredicate)
return BT->getKind() == BuiltinType::SveBool;
- else if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector)
+ else if (VT->getVectorKind() == VectorKind::SveFixedLengthData)
return VT->getElementType().getCanonicalType() ==
FirstType->getSveEltType(*this);
- else if (VT->getVectorKind() == VectorType::GenericVector)
+ else if (VT->getVectorKind() == VectorKind::Generic)
return getTypeSize(SecondType) == getSVETypeSize(*this, BT) &&
hasSameType(VT->getElementType(),
getBuiltinVectorTypeInfo(BT).ElementType);
@@ -8711,9 +9470,10 @@ bool ASTContext::areCompatibleSveTypes(QualType FirstType,
bool ASTContext::areLaxCompatibleSveTypes(QualType FirstType,
QualType SecondType) {
- assert(((FirstType->isSizelessBuiltinType() && SecondType->isVectorType()) ||
- (FirstType->isVectorType() && SecondType->isSizelessBuiltinType())) &&
- "Expected SVE builtin type and vector type!");
+ assert(
+ ((FirstType->isSVESizelessBuiltinType() && SecondType->isVectorType()) ||
+ (FirstType->isVectorType() && SecondType->isSVESizelessBuiltinType())) &&
+ "Expected SVE builtin type and vector type!");
auto IsLaxCompatible = [this](QualType FirstType, QualType SecondType) {
const auto *BT = FirstType->getAs<BuiltinType>();
@@ -8721,16 +9481,15 @@ bool ASTContext::areLaxCompatibleSveTypes(QualType FirstType,
return false;
const auto *VecTy = SecondType->getAs<VectorType>();
- if (VecTy &&
- (VecTy->getVectorKind() == VectorType::SveFixedLengthDataVector ||
- VecTy->getVectorKind() == VectorType::GenericVector)) {
+ if (VecTy && (VecTy->getVectorKind() == VectorKind::SveFixedLengthData ||
+ VecTy->getVectorKind() == VectorKind::Generic)) {
const LangOptions::LaxVectorConversionKind LVCKind =
getLangOpts().getLaxVectorConversions();
// Can not convert between sve predicates and sve vectors because of
// different size.
if (BT->getKind() == BuiltinType::SveBool &&
- VecTy->getVectorKind() == VectorType::SveFixedLengthDataVector)
+ VecTy->getVectorKind() == VectorKind::SveFixedLengthData)
return false;
// If __ARM_FEATURE_SVE_BITS != N do not allow GNU vector lax conversion.
@@ -8738,7 +9497,7 @@ bool ASTContext::areLaxCompatibleSveTypes(QualType FirstType,
// converts to VLAT and VLAT implicitly converts to GNUT."
// ACLE Spec Version 00bet6, 3.7.3.2. Behavior common to vectors and
// predicates.
- if (VecTy->getVectorKind() == VectorType::GenericVector &&
+ if (VecTy->getVectorKind() == VectorKind::Generic &&
getTypeSize(SecondType) != getSVETypeSize(*this, BT))
return false;
@@ -8761,6 +9520,97 @@ bool ASTContext::areLaxCompatibleSveTypes(QualType FirstType,
IsLaxCompatible(SecondType, FirstType);
}
+/// getRVVTypeSize - Return RVV vector register size.
+static uint64_t getRVVTypeSize(ASTContext &Context, const BuiltinType *Ty) {
+ assert(Ty->isRVVVLSBuiltinType() && "Invalid RVV Type");
+ auto VScale = Context.getTargetInfo().getVScaleRange(Context.getLangOpts());
+ if (!VScale)
+ return 0;
+
+ ASTContext::BuiltinVectorTypeInfo Info = Context.getBuiltinVectorTypeInfo(Ty);
+
+ unsigned EltSize = Context.getTypeSize(Info.ElementType);
+ if (Info.ElementType == Context.BoolTy)
+ EltSize = 1;
+
+ unsigned MinElts = Info.EC.getKnownMinValue();
+ return VScale->first * MinElts * EltSize;
+}
+
+bool ASTContext::areCompatibleRVVTypes(QualType FirstType,
+ QualType SecondType) {
+ assert(
+ ((FirstType->isRVVSizelessBuiltinType() && SecondType->isVectorType()) ||
+ (FirstType->isVectorType() && SecondType->isRVVSizelessBuiltinType())) &&
+ "Expected RVV builtin type and vector type!");
+
+ auto IsValidCast = [this](QualType FirstType, QualType SecondType) {
+ if (const auto *BT = FirstType->getAs<BuiltinType>()) {
+ if (const auto *VT = SecondType->getAs<VectorType>()) {
+ if (VT->getVectorKind() == VectorKind::RVVFixedLengthMask) {
+ BuiltinVectorTypeInfo Info = getBuiltinVectorTypeInfo(BT);
+ return FirstType->isRVVVLSBuiltinType() &&
+ Info.ElementType == BoolTy &&
+ getTypeSize(SecondType) == getRVVTypeSize(*this, BT);
+ }
+ if (VT->getVectorKind() == VectorKind::RVVFixedLengthData ||
+ VT->getVectorKind() == VectorKind::Generic)
+ return FirstType->isRVVVLSBuiltinType() &&
+ getTypeSize(SecondType) == getRVVTypeSize(*this, BT) &&
+ hasSameType(VT->getElementType(),
+ getBuiltinVectorTypeInfo(BT).ElementType);
+ }
+ }
+ return false;
+ };
+
+ return IsValidCast(FirstType, SecondType) ||
+ IsValidCast(SecondType, FirstType);
+}
+
+bool ASTContext::areLaxCompatibleRVVTypes(QualType FirstType,
+ QualType SecondType) {
+ assert(
+ ((FirstType->isRVVSizelessBuiltinType() && SecondType->isVectorType()) ||
+ (FirstType->isVectorType() && SecondType->isRVVSizelessBuiltinType())) &&
+ "Expected RVV builtin type and vector type!");
+
+ auto IsLaxCompatible = [this](QualType FirstType, QualType SecondType) {
+ const auto *BT = FirstType->getAs<BuiltinType>();
+ if (!BT)
+ return false;
+
+ if (!BT->isRVVVLSBuiltinType())
+ return false;
+
+ const auto *VecTy = SecondType->getAs<VectorType>();
+ if (VecTy && VecTy->getVectorKind() == VectorKind::Generic) {
+ const LangOptions::LaxVectorConversionKind LVCKind =
+ getLangOpts().getLaxVectorConversions();
+
+ // If __riscv_v_fixed_vlen != N do not allow vector lax conversion.
+ if (getTypeSize(SecondType) != getRVVTypeSize(*this, BT))
+ return false;
+
+ // If -flax-vector-conversions=all is specified, the types are
+ // certainly compatible.
+ if (LVCKind == LangOptions::LaxVectorConversionKind::All)
+ return true;
+
+ // If -flax-vector-conversions=integer is specified, the types are
+ // compatible if the elements are integer types.
+ if (LVCKind == LangOptions::LaxVectorConversionKind::Integer)
+ return VecTy->getElementType().getCanonicalType()->isIntegerType() &&
+ FirstType->getRVVEltType(*this)->isIntegerType();
+ }
+
+ return false;
+ };
+
+ return IsLaxCompatible(FirstType, SecondType) ||
+ IsLaxCompatible(SecondType, FirstType);
+}
+
bool ASTContext::hasDirectOwnershipQualifier(QualType Ty) const {
while (true) {
// __strong id
@@ -9076,7 +9926,7 @@ void getIntersectionOfProtocols(ASTContext &Context,
llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSProtocolSet;
// Start with the protocol qualifiers.
- for (auto proto : LHS->quals()) {
+ for (auto *proto : LHS->quals()) {
Context.CollectInheritedProtocols(proto, LHSProtocolSet);
}
@@ -9087,7 +9937,7 @@ void getIntersectionOfProtocols(ASTContext &Context,
llvm::SmallPtrSet<ObjCProtocolDecl *, 8> RHSProtocolSet;
// Start with the protocol qualifiers.
- for (auto proto : RHS->quals()) {
+ for (auto *proto : RHS->quals()) {
Context.CollectInheritedProtocols(proto, RHSProtocolSet);
}
@@ -9095,7 +9945,7 @@ void getIntersectionOfProtocols(ASTContext &Context,
Context.CollectInheritedProtocols(RHS->getInterface(), RHSProtocolSet);
// Compute the intersection of the collected protocol sets.
- for (auto proto : LHSProtocolSet) {
+ for (auto *proto : LHSProtocolSet) {
if (RHSProtocolSet.count(proto))
IntersectionSet.push_back(proto);
}
@@ -9107,13 +9957,9 @@ void getIntersectionOfProtocols(ASTContext &Context,
// Remove any implied protocols from the list of inherited protocols.
if (!ImpliedProtocols.empty()) {
- IntersectionSet.erase(
- std::remove_if(IntersectionSet.begin(),
- IntersectionSet.end(),
- [&](ObjCProtocolDecl *proto) -> bool {
- return ImpliedProtocols.count(proto) > 0;
- }),
- IntersectionSet.end());
+ llvm::erase_if(IntersectionSet, [&](ObjCProtocolDecl *proto) -> bool {
+ return ImpliedProtocols.contains(proto);
+ });
}
// Sort the remaining protocols by name.
@@ -9155,6 +10001,9 @@ static bool sameObjCTypeArgs(ASTContext &ctx,
return false;
ObjCTypeParamList *typeParams = iface->getTypeParamList();
+ if (!typeParams)
+ return false;
+
for (unsigned i = 0, n = lhsArgs.size(); i != n; ++i) {
if (ctx.hasSameType(lhsArgs[i], rhsArgs[i]))
continue;
@@ -9450,7 +10299,8 @@ QualType ASTContext::mergeFunctionParameterTypes(QualType lhs, QualType rhs,
QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs,
bool OfBlockPointer, bool Unqualified,
- bool AllowCXX) {
+ bool AllowCXX,
+ bool IsConditionalOperator) {
const auto *lbase = lhs->castAs<FunctionType>();
const auto *rbase = rhs->castAs<FunctionType>();
const auto *lproto = dyn_cast<FunctionProtoType>(lbase);
@@ -9513,9 +10363,27 @@ QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs,
if (lbaseInfo.getNoCfCheck() != rbaseInfo.getNoCfCheck())
return {};
- // FIXME: some uses, e.g. conditional exprs, really want this to be 'both'.
- bool NoReturn = lbaseInfo.getNoReturn() || rbaseInfo.getNoReturn();
-
+ // When merging declarations, it's common for supplemental information like
+ // attributes to only be present in one of the declarations, and we generally
+ // want type merging to preserve the union of information. So a merged
+ // function type should be noreturn if it was noreturn in *either* operand
+ // type.
+ //
+ // But for the conditional operator, this is backwards. The result of the
+ // operator could be either operand, and its type should conservatively
+ // reflect that. So a function type in a composite type is noreturn only
+ // if it's noreturn in *both* operand types.
+ //
+ // Arguably, noreturn is a kind of subtype, and the conditional operator
+ // ought to produce the most specific common supertype of its operand types.
+ // That would differ from this rule in contravariant positions. However,
+ // neither C nor C++ generally uses this kind of subtype reasoning. Also,
+ // as a practical matter, it would only affect C code that does abstraction of
+ // higher-order functions (taking noreturn callbacks!), which is uncommon to
+ // say the least. So we use the simpler rule.
+ bool NoReturn = IsConditionalOperator
+ ? lbaseInfo.getNoReturn() && rbaseInfo.getNoReturn()
+ : lbaseInfo.getNoReturn() || rbaseInfo.getNoReturn();
if (lbaseInfo.getNoReturn() != NoReturn)
allLTypes = false;
if (rbaseInfo.getNoReturn() != NoReturn)
@@ -9608,7 +10476,7 @@ QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs,
return {};
}
- if (paramTy->isPromotableIntegerType() ||
+ if (isPromotableIntegerType(paramTy) ||
getCanonicalType(paramTy).getUnqualifiedType() == FloatTy)
return {};
}
@@ -9648,9 +10516,9 @@ static QualType mergeEnumWithInteger(ASTContext &Context, const EnumType *ET,
return {};
}
-QualType ASTContext::mergeTypes(QualType LHS, QualType RHS,
- bool OfBlockPointer,
- bool Unqualified, bool BlockReturnType) {
+QualType ASTContext::mergeTypes(QualType LHS, QualType RHS, bool OfBlockPointer,
+ bool Unqualified, bool BlockReturnType,
+ bool IsConditionalOperator) {
// For C++ we will not reach this code with reference types (see below),
// for OpenMP variant call overloading we might.
//
@@ -9659,12 +10527,13 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS,
// designates the object or function denoted by the reference, and the
// expression is an lvalue unless the reference is an rvalue reference and
// the expression is a function call (possibly inside parentheses).
- if (LangOpts.OpenMP && LHS->getAs<ReferenceType>() &&
- RHS->getAs<ReferenceType>() && LHS->getTypeClass() == RHS->getTypeClass())
- return mergeTypes(LHS->getAs<ReferenceType>()->getPointeeType(),
- RHS->getAs<ReferenceType>()->getPointeeType(),
+ auto *LHSRefTy = LHS->getAs<ReferenceType>();
+ auto *RHSRefTy = RHS->getAs<ReferenceType>();
+ if (LangOpts.OpenMP && LHSRefTy && RHSRefTy &&
+ LHS->getTypeClass() == RHS->getTypeClass())
+ return mergeTypes(LHSRefTy->getPointeeType(), RHSRefTy->getPointeeType(),
OfBlockPointer, Unqualified, BlockReturnType);
- if (LHS->getAs<ReferenceType>() || RHS->getAs<ReferenceType>())
+ if (LHSRefTy || RHSRefTy)
return {};
if (Unqualified) {
@@ -9753,7 +10622,16 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS,
if (RHS->isObjCIdType() && LHS->isBlockPointerType())
return RHS;
}
-
+ // Allow __auto_type to match anything; it merges to the type with more
+ // information.
+ if (const auto *AT = LHS->getAs<AutoType>()) {
+ if (!AT->isDeduced() && AT->isGNUAutoType())
+ return RHS;
+ }
+ if (const auto *AT = RHS->getAs<AutoType>()) {
+ if (!AT->isDeduced() && AT->isGNUAutoType())
+ return LHS;
+ }
return {};
}
@@ -9880,7 +10758,7 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS,
const ConstantArrayType* CAT)
-> std::pair<bool,llvm::APInt> {
if (VAT) {
- Optional<llvm::APSInt> TheInt;
+ std::optional<llvm::APSInt> TheInt;
Expr *E = VAT->getSizeExpr();
if (E && (TheInt = E->getIntegerConstantExpr(*this)))
return std::make_pair(true, *TheInt);
@@ -9905,12 +10783,10 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS,
return RHS;
if (LCAT)
return getConstantArrayType(ResultType, LCAT->getSize(),
- LCAT->getSizeExpr(),
- ArrayType::ArraySizeModifier(), 0);
+ LCAT->getSizeExpr(), ArraySizeModifier(), 0);
if (RCAT)
return getConstantArrayType(ResultType, RCAT->getSize(),
- RCAT->getSizeExpr(),
- ArrayType::ArraySizeModifier(), 0);
+ RCAT->getSizeExpr(), ArraySizeModifier(), 0);
if (LVAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType))
return LHS;
if (RVAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType))
@@ -9929,11 +10805,11 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS,
}
if (getCanonicalType(LHSElem) == getCanonicalType(ResultType)) return LHS;
if (getCanonicalType(RHSElem) == getCanonicalType(ResultType)) return RHS;
- return getIncompleteArrayType(ResultType,
- ArrayType::ArraySizeModifier(), 0);
+ return getIncompleteArrayType(ResultType, ArraySizeModifier(), 0);
}
case Type::FunctionNoProto:
- return mergeFunctionTypes(LHS, RHS, OfBlockPointer, Unqualified);
+ return mergeFunctionTypes(LHS, RHS, OfBlockPointer, Unqualified,
+ /*AllowCXX=*/false, IsConditionalOperator);
case Type::Record:
case Type::Enum:
return {};
@@ -9979,14 +10855,14 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS,
assert(LHS != RHS &&
"Equivalent pipe types should have already been handled!");
return {};
- case Type::ExtInt: {
- // Merge two ext-int types, while trying to preserve typedef info.
- bool LHSUnsigned = LHS->castAs<ExtIntType>()->isUnsigned();
- bool RHSUnsigned = RHS->castAs<ExtIntType>()->isUnsigned();
- unsigned LHSBits = LHS->castAs<ExtIntType>()->getNumBits();
- unsigned RHSBits = RHS->castAs<ExtIntType>()->getNumBits();
-
- // Like unsigned/int, shouldn't have a type if they dont match.
+ case Type::BitInt: {
+ // Merge two bit-precise int types, while trying to preserve typedef info.
+ bool LHSUnsigned = LHS->castAs<BitIntType>()->isUnsigned();
+ bool RHSUnsigned = RHS->castAs<BitIntType>()->isUnsigned();
+ unsigned LHSBits = LHS->castAs<BitIntType>()->getNumBits();
+ unsigned RHSBits = RHS->castAs<BitIntType>()->getNumBits();
+
+ // Like unsigned/int, shouldn't have a type if they don't match.
if (LHSUnsigned != RHSUnsigned)
return {};
@@ -10134,14 +11010,15 @@ unsigned ASTContext::getIntWidth(QualType T) const {
T = ET->getDecl()->getIntegerType();
if (T->isBooleanType())
return 1;
- if(const auto *EIT = T->getAs<ExtIntType>())
+ if (const auto *EIT = T->getAs<BitIntType>())
return EIT->getNumBits();
// For builtin types, just use the standard type sizing method
return (unsigned)getTypeSize(T);
}
QualType ASTContext::getCorrespondingUnsignedType(QualType T) const {
- assert((T->hasSignedIntegerRepresentation() || T->isSignedFixedPointType()) &&
+ assert((T->hasIntegerRepresentation() || T->isEnumeralType() ||
+ T->isFixedPointType()) &&
"Unexpected type");
// Turn <4 x signed int> -> <4 x unsigned int>
@@ -10149,9 +11026,9 @@ QualType ASTContext::getCorrespondingUnsignedType(QualType T) const {
return getVectorType(getCorrespondingUnsignedType(VTy->getElementType()),
VTy->getNumElements(), VTy->getVectorKind());
- // For _ExtInt, return an unsigned _ExtInt with same width.
- if (const auto *EITy = T->getAs<ExtIntType>())
- return getExtIntType(/*IsUnsigned=*/true, EITy->getNumBits());
+ // For _BitInt, return an unsigned _BitInt with same width.
+ if (const auto *EITy = T->getAs<BitIntType>())
+ return getBitIntType(/*Unsigned=*/true, EITy->getNumBits());
// For enums, get the underlying integer type of the enum, and let the general
// integer type signchanging code handle it.
@@ -10159,8 +11036,11 @@ QualType ASTContext::getCorrespondingUnsignedType(QualType T) const {
T = ETy->getDecl()->getIntegerType();
switch (T->castAs<BuiltinType>()->getKind()) {
+ case BuiltinType::Char_U:
+ // Plain `char` is mapped to `unsigned char` even if it's already unsigned
case BuiltinType::Char_S:
case BuiltinType::SChar:
+ case BuiltinType::Char8:
return UnsignedCharTy;
case BuiltinType::Short:
return UnsignedShortTy;
@@ -10174,7 +11054,7 @@ QualType ASTContext::getCorrespondingUnsignedType(QualType T) const {
return UnsignedInt128Ty;
// wchar_t is special. It is either signed or not, but when it's signed,
// there's no matching "unsigned wchar_t". Therefore we return the unsigned
- // version of it's underlying type instead.
+ // version of its underlying type instead.
case BuiltinType::WChar_S:
return getUnsignedWCharType();
@@ -10203,13 +11083,16 @@ QualType ASTContext::getCorrespondingUnsignedType(QualType T) const {
case BuiltinType::SatLongFract:
return SatUnsignedLongFractTy;
default:
- llvm_unreachable("Unexpected signed integer or fixed point type");
+ assert((T->hasUnsignedIntegerRepresentation() ||
+ T->isUnsignedFixedPointType()) &&
+ "Unexpected signed integer or fixed point type");
+ return T;
}
}
QualType ASTContext::getCorrespondingSignedType(QualType T) const {
- assert((T->hasUnsignedIntegerRepresentation() ||
- T->isUnsignedFixedPointType()) &&
+ assert((T->hasIntegerRepresentation() || T->isEnumeralType() ||
+ T->isFixedPointType()) &&
"Unexpected type");
// Turn <4 x unsigned int> -> <4 x signed int>
@@ -10217,9 +11100,9 @@ QualType ASTContext::getCorrespondingSignedType(QualType T) const {
return getVectorType(getCorrespondingSignedType(VTy->getElementType()),
VTy->getNumElements(), VTy->getVectorKind());
- // For _ExtInt, return a signed _ExtInt with same width.
- if (const auto *EITy = T->getAs<ExtIntType>())
- return getExtIntType(/*IsUnsigned=*/false, EITy->getNumBits());
+ // For _BitInt, return a signed _BitInt with same width.
+ if (const auto *EITy = T->getAs<BitIntType>())
+ return getBitIntType(/*Unsigned=*/false, EITy->getNumBits());
// For enums, get the underlying integer type of the enum, and let the general
// integer type signchanging code handle it.
@@ -10227,8 +11110,11 @@ QualType ASTContext::getCorrespondingSignedType(QualType T) const {
T = ETy->getDecl()->getIntegerType();
switch (T->castAs<BuiltinType>()->getKind()) {
+ case BuiltinType::Char_S:
+ // Plain `char` is mapped to `signed char` even if it's already signed
case BuiltinType::Char_U:
case BuiltinType::UChar:
+ case BuiltinType::Char8:
return SignedCharTy;
case BuiltinType::UShort:
return ShortTy;
@@ -10242,7 +11128,7 @@ QualType ASTContext::getCorrespondingSignedType(QualType T) const {
return Int128Ty;
// wchar_t is special. It is either unsigned or not, but when it's unsigned,
// there's no matching "signed wchar_t". Therefore we return the signed
- // version of it's underlying type instead.
+ // version of its underlying type instead.
case BuiltinType::WChar_U:
return getSignedWCharType();
@@ -10271,7 +11157,10 @@ QualType ASTContext::getCorrespondingSignedType(QualType T) const {
case BuiltinType::SatULongFract:
return SatLongFractTy;
default:
- llvm_unreachable("Unexpected unsigned integer or fixed point type");
+ assert(
+ (T->hasSignedIntegerRepresentation() || T->isSignedFixedPointType()) &&
+ "Unexpected signed integer or fixed point type");
+ return T;
}
}
@@ -10513,6 +11402,17 @@ static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context,
Type = Context.getScalableVectorType(ElementType, NumElements);
break;
}
+ case 'Q': {
+ switch (*Str++) {
+ case 'a': {
+ Type = Context.SveCountTy;
+ break;
+ }
+ default:
+ llvm_unreachable("Unexpected target builtin type");
+ }
+ break;
+ }
case 'V': {
char *End;
unsigned NumElements = strtoul(Str, &End, 10);
@@ -10524,8 +11424,7 @@ static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context,
assert(!RequiresICE && "Can't require vector ICE");
// TODO: No way to make AltiVec vectors in builtins yet.
- Type = Context.getVectorType(ElementType, NumElements,
- VectorType::GenericVector);
+ Type = Context.getVectorType(ElementType, NumElements, VectorKind::Generic);
break;
}
case 'E': {
@@ -10627,7 +11526,7 @@ static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context,
}
// On some targets such as PowerPC, some of the builtins are defined with custom
-// type decriptors for target-dependent types. These descriptors are decoded in
+// type descriptors for target-dependent types. These descriptors are decoded in
// other functions, but it may be useful to be able to fall back to default
// descriptor decoding to define builtins mixing target-dependent and target-
// independent types. This function allows decoding one type descriptor with
@@ -10690,7 +11589,7 @@ QualType ASTContext::GetBuiltinType(unsigned Id,
// We really shouldn't be making a no-proto type here.
- if (ArgTypes.empty() && Variadic && !getLangOpts().CPlusPlus)
+ if (ArgTypes.empty() && Variadic && !getLangOpts().requiresStrictPrototypes())
return getFunctionNoProtoType(ResType, EI);
FunctionProtoType::ExtProtoInfo EPI;
@@ -10710,9 +11609,8 @@ static GVALinkage basicGVALinkageForFunction(const ASTContext &Context,
// Non-user-provided functions get emitted as weak definitions with every
// use, no matter whether they've been explicitly instantiated etc.
- if (const auto *MD = dyn_cast<CXXMethodDecl>(FD))
- if (!MD->isUserProvided())
- return GVA_DiscardableODR;
+ if (!FD->isUserProvided())
+ return GVA_DiscardableODR;
GVALinkage External;
switch (FD->getTemplateSpecializationKind()) {
@@ -10762,6 +11660,14 @@ static GVALinkage basicGVALinkageForFunction(const ASTContext &Context,
if (FD->isMSExternInline())
return GVA_StrongODR;
+ if (Context.getTargetInfo().getCXXABI().isMicrosoft() &&
+ isa<CXXConstructorDecl>(FD) &&
+ cast<CXXConstructorDecl>(FD)->isInheritingConstructor())
+ // Our approach to inheriting constructors is fundamentally different from
+ // that used by the MS ABI, so keep our inheriting constructor thunks
+ // internal rather than trying to pick an unambiguous mangling for them.
+ return GVA_Internal;
+
return GVA_DiscardableODR;
}
@@ -10787,7 +11693,7 @@ static GVALinkage adjustGVALinkageForAttributes(const ASTContext &Context,
// name between the host and device compilation which is the same for the
// same compilation unit whereas different among different compilation
// units.
- if (Context.shouldExternalizeStaticVar(D))
+ if (Context.shouldExternalize(D))
return GVA_StrongExternal;
}
return L;
@@ -10826,6 +11732,16 @@ GVALinkage ASTContext::GetGVALinkageForFunction(const FunctionDecl *FD) const {
static GVALinkage basicGVALinkageForVariable(const ASTContext &Context,
const VarDecl *VD) {
+ // As an extension for interactive REPLs, make sure constant variables are
+ // only emitted once instead of LinkageComputer::getLVForNamespaceScopeDecl
+ // marking them as internal.
+ if (Context.getLangOpts().CPlusPlus &&
+ Context.getLangOpts().IncrementalExtensions &&
+ VD->getType().isConstQualified() &&
+ !VD->getType().isVolatileQualified() && !VD->isInline() &&
+ !isa<VarTemplateSpecializationDecl>(VD) && !VD->getDescribedVarTemplate())
+ return GVA_DiscardableODR;
+
if (!VD->isExternallyVisible())
return GVA_Internal;
@@ -10901,7 +11817,7 @@ static GVALinkage basicGVALinkageForVariable(const ASTContext &Context,
llvm_unreachable("Invalid Linkage!");
}
-GVALinkage ASTContext::GetGVALinkageForVariable(const VarDecl *VD) {
+GVALinkage ASTContext::GetGVALinkageForVariable(const VarDecl *VD) const {
return adjustGVALinkageForExternalDefinitionKind(*this, VD,
adjustGVALinkageForAttributes(*this, VD,
basicGVALinkageForVariable(*this, VD)));
@@ -10993,6 +11909,10 @@ bool ASTContext::DeclMustBeEmitted(const Decl *D) {
!isMSStaticDataMemberInlineDefinition(VD))
return false;
+ // Variables in other module units shouldn't be forced to be emitted.
+ if (VD->isInAnotherModuleUnit())
+ return false;
+
// Variables that can be needed in other TUs are required.
auto Linkage = GetGVALinkageForVariable(VD);
if (!isDiscardableGVALinkage(Linkage))
@@ -11036,7 +11956,7 @@ void ASTContext::forEachMultiversionedFunctionVersion(
FD->getDeclContext()->getRedeclContext()->lookup(FD->getDeclName())) {
FunctionDecl *CurFD = CurDecl->getAsFunction()->getMostRecentDecl();
if (CurFD && hasSameType(CurFD->getType(), FD->getType()) &&
- std::end(SeenDecls) == llvm::find(SeenDecls, CurFD)) {
+ !SeenDecls.contains(CurFD)) {
SeenDecls.insert(CurFD);
Pred(CurFD);
}
@@ -11076,6 +11996,10 @@ CallingConv ASTContext::getDefaultCallingConvention(bool IsVariadic,
if (!IsVariadic)
return CC_X86RegCall;
break;
+ case LangOptions::DCC_RtdCall:
+ if (!IsVariadic)
+ return CC_M68kRTD;
+ break;
}
}
return Target->getDefaultCallingConv();
@@ -11138,13 +12062,15 @@ MangleContext *ASTContext::createDeviceMangleContext(const TargetInfo &T) {
case TargetCXXABI::XL:
return ItaniumMangleContext::create(
*this, getDiagnostics(),
- [](ASTContext &, const NamedDecl *ND) -> llvm::Optional<unsigned> {
+ [](ASTContext &, const NamedDecl *ND) -> std::optional<unsigned> {
if (const auto *RD = dyn_cast<CXXRecordDecl>(ND))
return RD->getDeviceLambdaManglingNumber();
- return llvm::None;
- });
+ return std::nullopt;
+ },
+ /*IsAux=*/true);
case TargetCXXABI::Microsoft:
- return MicrosoftMangleContext::create(*this, getDiagnostics());
+ return MicrosoftMangleContext::create(*this, getDiagnostics(),
+ /*IsAux=*/true);
}
llvm_unreachable("Unsupported ABI");
}
@@ -11184,19 +12110,23 @@ QualType ASTContext::getIntTypeForBitwidth(unsigned DestWidth,
/// sets floating point QualTy according to specified bitwidth.
/// Returns empty type if there is no appropriate target types.
QualType ASTContext::getRealTypeForBitwidth(unsigned DestWidth,
- bool ExplicitIEEE) const {
- TargetInfo::RealType Ty =
- getTargetInfo().getRealTypeByWidth(DestWidth, ExplicitIEEE);
+ FloatModeKind ExplicitType) const {
+ FloatModeKind Ty =
+ getTargetInfo().getRealTypeByWidth(DestWidth, ExplicitType);
switch (Ty) {
- case TargetInfo::Float:
+ case FloatModeKind::Half:
+ return HalfTy;
+ case FloatModeKind::Float:
return FloatTy;
- case TargetInfo::Double:
+ case FloatModeKind::Double:
return DoubleTy;
- case TargetInfo::LongDouble:
+ case FloatModeKind::LongDouble:
return LongDoubleTy;
- case TargetInfo::Float128:
+ case FloatModeKind::Float128:
return Float128Ty;
- case TargetInfo::NoFloat:
+ case FloatModeKind::Ibm128:
+ return Ibm128Ty;
+ case FloatModeKind::NoFloat:
return {};
}
@@ -11208,9 +12138,19 @@ void ASTContext::setManglingNumber(const NamedDecl *ND, unsigned Number) {
MangleNumbers[ND] = Number;
}
-unsigned ASTContext::getManglingNumber(const NamedDecl *ND) const {
+unsigned ASTContext::getManglingNumber(const NamedDecl *ND,
+ bool ForAuxTarget) const {
auto I = MangleNumbers.find(ND);
- return I != MangleNumbers.end() ? I->second : 1;
+ unsigned Res = I != MangleNumbers.end() ? I->second : 1;
+ // CUDA/HIP host compilation encodes host and device mangling numbers
+ // as lower and upper half of 32 bit integer.
+ if (LangOpts.CUDA && !LangOpts.CUDAIsDevice) {
+ Res = ForAuxTarget ? Res >> 16 : Res & 0xFFFF;
+ } else {
+ assert(!ForAuxTarget && "Only CUDA/HIP host compilation supports mangling "
+ "number for aux target");
+ }
+ return Res > 1 ? Res : 1;
}
void ASTContext::setStaticLocalNumber(const VarDecl *VD, unsigned Number) {
@@ -11301,7 +12241,7 @@ QualType ASTContext::getStringLiteralArrayType(QualType EltTy,
// Get an array type for the string, according to C99 6.4.5. This includes
// the null terminator character.
return getConstantArrayType(EltTy, llvm::APInt(32, Length + 1), nullptr,
- ArrayType::Normal, /*IndexTypeQuals*/ 0);
+ ArraySizeModifier::Normal, /*IndexTypeQuals*/ 0);
}
StringLiteral *
@@ -11309,7 +12249,7 @@ ASTContext::getPredefinedStringLiteralFromCache(StringRef Key) const {
StringLiteral *&Result = StringLiteralCache[Key];
if (!Result)
Result = StringLiteral::Create(
- *this, Key, StringLiteral::Ascii,
+ *this, Key, StringLiteralKind::Ordinary,
/*Pascal*/ false, getStringLiteralArrayType(CharTy, Key.size()),
SourceLocation());
return Result;
@@ -11332,6 +12272,23 @@ ASTContext::getMSGuidDecl(MSGuidDecl::Parts Parts) const {
return New;
}
+UnnamedGlobalConstantDecl *
+ASTContext::getUnnamedGlobalConstantDecl(QualType Ty,
+ const APValue &APVal) const {
+ llvm::FoldingSetNodeID ID;
+ UnnamedGlobalConstantDecl::Profile(ID, Ty, APVal);
+
+ void *InsertPos;
+ if (UnnamedGlobalConstantDecl *Existing =
+ UnnamedGlobalConstantDecls.FindNodeOrInsertPos(ID, InsertPos))
+ return Existing;
+
+ UnnamedGlobalConstantDecl *New =
+ UnnamedGlobalConstantDecl::Create(*this, Ty, APVal);
+ UnnamedGlobalConstantDecls.InsertNode(New, InsertPos);
+ return New;
+}
+
TemplateParamObjectDecl *
ASTContext::getTemplateParamObjectDecl(QualType T, const APValue &V) const {
assert(T->isRecordType() && "template param object of unexpected type");
@@ -11413,10 +12370,894 @@ uint64_t ASTContext::getTargetNullPointerValue(QualType QT) const {
}
unsigned ASTContext::getTargetAddressSpace(LangAS AS) const {
- if (isTargetAddressSpace(AS))
- return toTargetAddressSpace(AS);
+ return getTargetInfo().getTargetAddressSpace(AS);
+}
+
+bool ASTContext::hasSameExpr(const Expr *X, const Expr *Y) const {
+ if (X == Y)
+ return true;
+ if (!X || !Y)
+ return false;
+ llvm::FoldingSetNodeID IDX, IDY;
+ X->Profile(IDX, *this, /*Canonical=*/true);
+ Y->Profile(IDY, *this, /*Canonical=*/true);
+ return IDX == IDY;
+}
+
+// The getCommon* helpers return, for given 'same' X and Y entities given as
+// inputs, another entity which is also the 'same' as the inputs, but which
+// is closer to the canonical form of the inputs, each according to a given
+// criteria.
+// The getCommon*Checked variants are 'null inputs not-allowed' equivalents of
+// the regular ones.
+
+static Decl *getCommonDecl(Decl *X, Decl *Y) {
+ if (!declaresSameEntity(X, Y))
+ return nullptr;
+ for (const Decl *DX : X->redecls()) {
+ // If we reach Y before reaching the first decl, that means X is older.
+ if (DX == Y)
+ return X;
+ // If we reach the first decl, then Y is older.
+ if (DX->isFirstDecl())
+ return Y;
+ }
+ llvm_unreachable("Corrupt redecls chain");
+}
+
+template <class T, std::enable_if_t<std::is_base_of_v<Decl, T>, bool> = true>
+static T *getCommonDecl(T *X, T *Y) {
+ return cast_or_null<T>(
+ getCommonDecl(const_cast<Decl *>(cast_or_null<Decl>(X)),
+ const_cast<Decl *>(cast_or_null<Decl>(Y))));
+}
+
+template <class T, std::enable_if_t<std::is_base_of_v<Decl, T>, bool> = true>
+static T *getCommonDeclChecked(T *X, T *Y) {
+ return cast<T>(getCommonDecl(const_cast<Decl *>(cast<Decl>(X)),
+ const_cast<Decl *>(cast<Decl>(Y))));
+}
+
+static TemplateName getCommonTemplateName(ASTContext &Ctx, TemplateName X,
+ TemplateName Y) {
+ if (X.getAsVoidPointer() == Y.getAsVoidPointer())
+ return X;
+ // FIXME: There are cases here where we could find a common template name
+ // with more sugar. For example one could be a SubstTemplateTemplate*
+ // replacing the other.
+ TemplateName CX = Ctx.getCanonicalTemplateName(X);
+ if (CX.getAsVoidPointer() !=
+ Ctx.getCanonicalTemplateName(Y).getAsVoidPointer())
+ return TemplateName();
+ return CX;
+}
+
+static TemplateName
+getCommonTemplateNameChecked(ASTContext &Ctx, TemplateName X, TemplateName Y) {
+ TemplateName R = getCommonTemplateName(Ctx, X, Y);
+ assert(R.getAsVoidPointer() != nullptr);
+ return R;
+}
+
+static auto getCommonTypes(ASTContext &Ctx, ArrayRef<QualType> Xs,
+ ArrayRef<QualType> Ys, bool Unqualified = false) {
+ assert(Xs.size() == Ys.size());
+ SmallVector<QualType, 8> Rs(Xs.size());
+ for (size_t I = 0; I < Rs.size(); ++I)
+ Rs[I] = Ctx.getCommonSugaredType(Xs[I], Ys[I], Unqualified);
+ return Rs;
+}
+
+template <class T>
+static SourceLocation getCommonAttrLoc(const T *X, const T *Y) {
+ return X->getAttributeLoc() == Y->getAttributeLoc() ? X->getAttributeLoc()
+ : SourceLocation();
+}
+
+static TemplateArgument getCommonTemplateArgument(ASTContext &Ctx,
+ const TemplateArgument &X,
+ const TemplateArgument &Y) {
+ if (X.getKind() != Y.getKind())
+ return TemplateArgument();
+
+ switch (X.getKind()) {
+ case TemplateArgument::ArgKind::Type:
+ if (!Ctx.hasSameType(X.getAsType(), Y.getAsType()))
+ return TemplateArgument();
+ return TemplateArgument(
+ Ctx.getCommonSugaredType(X.getAsType(), Y.getAsType()));
+ case TemplateArgument::ArgKind::NullPtr:
+ if (!Ctx.hasSameType(X.getNullPtrType(), Y.getNullPtrType()))
+ return TemplateArgument();
+ return TemplateArgument(
+ Ctx.getCommonSugaredType(X.getNullPtrType(), Y.getNullPtrType()),
+ /*Unqualified=*/true);
+ case TemplateArgument::ArgKind::Expression:
+ if (!Ctx.hasSameType(X.getAsExpr()->getType(), Y.getAsExpr()->getType()))
+ return TemplateArgument();
+ // FIXME: Try to keep the common sugar.
+ return X;
+ case TemplateArgument::ArgKind::Template: {
+ TemplateName TX = X.getAsTemplate(), TY = Y.getAsTemplate();
+ TemplateName CTN = ::getCommonTemplateName(Ctx, TX, TY);
+ if (!CTN.getAsVoidPointer())
+ return TemplateArgument();
+ return TemplateArgument(CTN);
+ }
+ case TemplateArgument::ArgKind::TemplateExpansion: {
+ TemplateName TX = X.getAsTemplateOrTemplatePattern(),
+ TY = Y.getAsTemplateOrTemplatePattern();
+ TemplateName CTN = ::getCommonTemplateName(Ctx, TX, TY);
+ if (!CTN.getAsVoidPointer())
+ return TemplateName();
+ auto NExpX = X.getNumTemplateExpansions();
+ assert(NExpX == Y.getNumTemplateExpansions());
+ return TemplateArgument(CTN, NExpX);
+ }
+ default:
+ // FIXME: Handle the other argument kinds.
+ return X;
+ }
+}
+
+static bool getCommonTemplateArguments(ASTContext &Ctx,
+ SmallVectorImpl<TemplateArgument> &R,
+ ArrayRef<TemplateArgument> Xs,
+ ArrayRef<TemplateArgument> Ys) {
+ if (Xs.size() != Ys.size())
+ return true;
+ R.resize(Xs.size());
+ for (size_t I = 0; I < R.size(); ++I) {
+ R[I] = getCommonTemplateArgument(Ctx, Xs[I], Ys[I]);
+ if (R[I].isNull())
+ return true;
+ }
+ return false;
+}
+
+static auto getCommonTemplateArguments(ASTContext &Ctx,
+ ArrayRef<TemplateArgument> Xs,
+ ArrayRef<TemplateArgument> Ys) {
+ SmallVector<TemplateArgument, 8> R;
+ bool Different = getCommonTemplateArguments(Ctx, R, Xs, Ys);
+ assert(!Different);
+ (void)Different;
+ return R;
+}
+
+template <class T>
+static ElaboratedTypeKeyword getCommonTypeKeyword(const T *X, const T *Y) {
+ return X->getKeyword() == Y->getKeyword() ? X->getKeyword()
+ : ElaboratedTypeKeyword::None;
+}
+
+template <class T>
+static NestedNameSpecifier *getCommonNNS(ASTContext &Ctx, const T *X,
+ const T *Y) {
+ // FIXME: Try to keep the common NNS sugar.
+ return X->getQualifier() == Y->getQualifier()
+ ? X->getQualifier()
+ : Ctx.getCanonicalNestedNameSpecifier(X->getQualifier());
+}
+
+template <class T>
+static QualType getCommonElementType(ASTContext &Ctx, const T *X, const T *Y) {
+ return Ctx.getCommonSugaredType(X->getElementType(), Y->getElementType());
+}
+
+template <class T>
+static QualType getCommonArrayElementType(ASTContext &Ctx, const T *X,
+ Qualifiers &QX, const T *Y,
+ Qualifiers &QY) {
+ QualType EX = X->getElementType(), EY = Y->getElementType();
+ QualType R = Ctx.getCommonSugaredType(EX, EY,
+ /*Unqualified=*/true);
+ Qualifiers RQ = R.getQualifiers();
+ QX += EX.getQualifiers() - RQ;
+ QY += EY.getQualifiers() - RQ;
+ return R;
+}
+
+template <class T>
+static QualType getCommonPointeeType(ASTContext &Ctx, const T *X, const T *Y) {
+ return Ctx.getCommonSugaredType(X->getPointeeType(), Y->getPointeeType());
+}
+
+template <class T> static auto *getCommonSizeExpr(ASTContext &Ctx, T *X, T *Y) {
+ assert(Ctx.hasSameExpr(X->getSizeExpr(), Y->getSizeExpr()));
+ return X->getSizeExpr();
+}
+
+static auto getCommonSizeModifier(const ArrayType *X, const ArrayType *Y) {
+ assert(X->getSizeModifier() == Y->getSizeModifier());
+ return X->getSizeModifier();
+}
+
+static auto getCommonIndexTypeCVRQualifiers(const ArrayType *X,
+ const ArrayType *Y) {
+ assert(X->getIndexTypeCVRQualifiers() == Y->getIndexTypeCVRQualifiers());
+ return X->getIndexTypeCVRQualifiers();
+}
+
+// Merges two type lists such that the resulting vector will contain
+// each type (in a canonical sense) only once, in the order they appear
+// from X to Y. If they occur in both X and Y, the result will contain
+// the common sugared type between them.
+static void mergeTypeLists(ASTContext &Ctx, SmallVectorImpl<QualType> &Out,
+ ArrayRef<QualType> X, ArrayRef<QualType> Y) {
+ llvm::DenseMap<QualType, unsigned> Found;
+ for (auto Ts : {X, Y}) {
+ for (QualType T : Ts) {
+ auto Res = Found.try_emplace(Ctx.getCanonicalType(T), Out.size());
+ if (!Res.second) {
+ QualType &U = Out[Res.first->second];
+ U = Ctx.getCommonSugaredType(U, T);
+ } else {
+ Out.emplace_back(T);
+ }
+ }
+ }
+}
+
+FunctionProtoType::ExceptionSpecInfo
+ASTContext::mergeExceptionSpecs(FunctionProtoType::ExceptionSpecInfo ESI1,
+ FunctionProtoType::ExceptionSpecInfo ESI2,
+ SmallVectorImpl<QualType> &ExceptionTypeStorage,
+ bool AcceptDependent) {
+ ExceptionSpecificationType EST1 = ESI1.Type, EST2 = ESI2.Type;
+
+ // If either of them can throw anything, that is the result.
+ for (auto I : {EST_None, EST_MSAny, EST_NoexceptFalse}) {
+ if (EST1 == I)
+ return ESI1;
+ if (EST2 == I)
+ return ESI2;
+ }
+
+ // If either of them is non-throwing, the result is the other.
+ for (auto I :
+ {EST_NoThrow, EST_DynamicNone, EST_BasicNoexcept, EST_NoexceptTrue}) {
+ if (EST1 == I)
+ return ESI2;
+ if (EST2 == I)
+ return ESI1;
+ }
+
+ // If we're left with value-dependent computed noexcept expressions, we're
+ // stuck. Before C++17, we can just drop the exception specification entirely,
+ // since it's not actually part of the canonical type. And this should never
+ // happen in C++17, because it would mean we were computing the composite
+ // pointer type of dependent types, which should never happen.
+ if (EST1 == EST_DependentNoexcept || EST2 == EST_DependentNoexcept) {
+ assert(AcceptDependent &&
+ "computing composite pointer type of dependent types");
+ return FunctionProtoType::ExceptionSpecInfo();
+ }
+
+ // Switch over the possibilities so that people adding new values know to
+ // update this function.
+ switch (EST1) {
+ case EST_None:
+ case EST_DynamicNone:
+ case EST_MSAny:
+ case EST_BasicNoexcept:
+ case EST_DependentNoexcept:
+ case EST_NoexceptFalse:
+ case EST_NoexceptTrue:
+ case EST_NoThrow:
+ llvm_unreachable("These ESTs should be handled above");
+
+ case EST_Dynamic: {
+ // This is the fun case: both exception specifications are dynamic. Form
+ // the union of the two lists.
+ assert(EST2 == EST_Dynamic && "other cases should already be handled");
+ mergeTypeLists(*this, ExceptionTypeStorage, ESI1.Exceptions,
+ ESI2.Exceptions);
+ FunctionProtoType::ExceptionSpecInfo Result(EST_Dynamic);
+ Result.Exceptions = ExceptionTypeStorage;
+ return Result;
+ }
+
+ case EST_Unevaluated:
+ case EST_Uninstantiated:
+ case EST_Unparsed:
+ llvm_unreachable("shouldn't see unresolved exception specifications here");
+ }
+
+ llvm_unreachable("invalid ExceptionSpecificationType");
+}
+
+static QualType getCommonNonSugarTypeNode(ASTContext &Ctx, const Type *X,
+ Qualifiers &QX, const Type *Y,
+ Qualifiers &QY) {
+ Type::TypeClass TC = X->getTypeClass();
+ assert(TC == Y->getTypeClass());
+ switch (TC) {
+#define UNEXPECTED_TYPE(Class, Kind) \
+ case Type::Class: \
+ llvm_unreachable("Unexpected " Kind ": " #Class);
+
+#define NON_CANONICAL_TYPE(Class, Base) UNEXPECTED_TYPE(Class, "non-canonical")
+#define TYPE(Class, Base)
+#include "clang/AST/TypeNodes.inc"
+
+#define SUGAR_FREE_TYPE(Class) UNEXPECTED_TYPE(Class, "sugar-free")
+ SUGAR_FREE_TYPE(Builtin)
+ SUGAR_FREE_TYPE(DeducedTemplateSpecialization)
+ SUGAR_FREE_TYPE(DependentBitInt)
+ SUGAR_FREE_TYPE(Enum)
+ SUGAR_FREE_TYPE(BitInt)
+ SUGAR_FREE_TYPE(ObjCInterface)
+ SUGAR_FREE_TYPE(Record)
+ SUGAR_FREE_TYPE(SubstTemplateTypeParmPack)
+ SUGAR_FREE_TYPE(UnresolvedUsing)
+#undef SUGAR_FREE_TYPE
+#define NON_UNIQUE_TYPE(Class) UNEXPECTED_TYPE(Class, "non-unique")
+ NON_UNIQUE_TYPE(TypeOfExpr)
+ NON_UNIQUE_TYPE(VariableArray)
+#undef NON_UNIQUE_TYPE
+
+ UNEXPECTED_TYPE(TypeOf, "sugar")
+
+#undef UNEXPECTED_TYPE
+
+ case Type::Auto: {
+ const auto *AX = cast<AutoType>(X), *AY = cast<AutoType>(Y);
+ assert(AX->getDeducedType().isNull());
+ assert(AY->getDeducedType().isNull());
+ assert(AX->getKeyword() == AY->getKeyword());
+ assert(AX->isInstantiationDependentType() ==
+ AY->isInstantiationDependentType());
+ auto As = getCommonTemplateArguments(Ctx, AX->getTypeConstraintArguments(),
+ AY->getTypeConstraintArguments());
+ return Ctx.getAutoType(QualType(), AX->getKeyword(),
+ AX->isInstantiationDependentType(),
+ AX->containsUnexpandedParameterPack(),
+ getCommonDeclChecked(AX->getTypeConstraintConcept(),
+ AY->getTypeConstraintConcept()),
+ As);
+ }
+ case Type::IncompleteArray: {
+ const auto *AX = cast<IncompleteArrayType>(X),
+ *AY = cast<IncompleteArrayType>(Y);
+ return Ctx.getIncompleteArrayType(
+ getCommonArrayElementType(Ctx, AX, QX, AY, QY),
+ getCommonSizeModifier(AX, AY), getCommonIndexTypeCVRQualifiers(AX, AY));
+ }
+ case Type::DependentSizedArray: {
+ const auto *AX = cast<DependentSizedArrayType>(X),
+ *AY = cast<DependentSizedArrayType>(Y);
+ return Ctx.getDependentSizedArrayType(
+ getCommonArrayElementType(Ctx, AX, QX, AY, QY),
+ getCommonSizeExpr(Ctx, AX, AY), getCommonSizeModifier(AX, AY),
+ getCommonIndexTypeCVRQualifiers(AX, AY),
+ AX->getBracketsRange() == AY->getBracketsRange()
+ ? AX->getBracketsRange()
+ : SourceRange());
+ }
+ case Type::ConstantArray: {
+ const auto *AX = cast<ConstantArrayType>(X),
+ *AY = cast<ConstantArrayType>(Y);
+ assert(AX->getSize() == AY->getSize());
+ const Expr *SizeExpr = Ctx.hasSameExpr(AX->getSizeExpr(), AY->getSizeExpr())
+ ? AX->getSizeExpr()
+ : nullptr;
+ return Ctx.getConstantArrayType(
+ getCommonArrayElementType(Ctx, AX, QX, AY, QY), AX->getSize(), SizeExpr,
+ getCommonSizeModifier(AX, AY), getCommonIndexTypeCVRQualifiers(AX, AY));
+ }
+ case Type::Atomic: {
+ const auto *AX = cast<AtomicType>(X), *AY = cast<AtomicType>(Y);
+ return Ctx.getAtomicType(
+ Ctx.getCommonSugaredType(AX->getValueType(), AY->getValueType()));
+ }
+ case Type::Complex: {
+ const auto *CX = cast<ComplexType>(X), *CY = cast<ComplexType>(Y);
+ return Ctx.getComplexType(getCommonArrayElementType(Ctx, CX, QX, CY, QY));
+ }
+ case Type::Pointer: {
+ const auto *PX = cast<PointerType>(X), *PY = cast<PointerType>(Y);
+ return Ctx.getPointerType(getCommonPointeeType(Ctx, PX, PY));
+ }
+ case Type::BlockPointer: {
+ const auto *PX = cast<BlockPointerType>(X), *PY = cast<BlockPointerType>(Y);
+ return Ctx.getBlockPointerType(getCommonPointeeType(Ctx, PX, PY));
+ }
+ case Type::ObjCObjectPointer: {
+ const auto *PX = cast<ObjCObjectPointerType>(X),
+ *PY = cast<ObjCObjectPointerType>(Y);
+ return Ctx.getObjCObjectPointerType(getCommonPointeeType(Ctx, PX, PY));
+ }
+ case Type::MemberPointer: {
+ const auto *PX = cast<MemberPointerType>(X),
+ *PY = cast<MemberPointerType>(Y);
+ return Ctx.getMemberPointerType(
+ getCommonPointeeType(Ctx, PX, PY),
+ Ctx.getCommonSugaredType(QualType(PX->getClass(), 0),
+ QualType(PY->getClass(), 0))
+ .getTypePtr());
+ }
+ case Type::LValueReference: {
+ const auto *PX = cast<LValueReferenceType>(X),
+ *PY = cast<LValueReferenceType>(Y);
+ // FIXME: Preserve PointeeTypeAsWritten.
+ return Ctx.getLValueReferenceType(getCommonPointeeType(Ctx, PX, PY),
+ PX->isSpelledAsLValue() ||
+ PY->isSpelledAsLValue());
+ }
+ case Type::RValueReference: {
+ const auto *PX = cast<RValueReferenceType>(X),
+ *PY = cast<RValueReferenceType>(Y);
+ // FIXME: Preserve PointeeTypeAsWritten.
+ return Ctx.getRValueReferenceType(getCommonPointeeType(Ctx, PX, PY));
+ }
+ case Type::DependentAddressSpace: {
+ const auto *PX = cast<DependentAddressSpaceType>(X),
+ *PY = cast<DependentAddressSpaceType>(Y);
+ assert(Ctx.hasSameExpr(PX->getAddrSpaceExpr(), PY->getAddrSpaceExpr()));
+ return Ctx.getDependentAddressSpaceType(getCommonPointeeType(Ctx, PX, PY),
+ PX->getAddrSpaceExpr(),
+ getCommonAttrLoc(PX, PY));
+ }
+ case Type::FunctionNoProto: {
+ const auto *FX = cast<FunctionNoProtoType>(X),
+ *FY = cast<FunctionNoProtoType>(Y);
+ assert(FX->getExtInfo() == FY->getExtInfo());
+ return Ctx.getFunctionNoProtoType(
+ Ctx.getCommonSugaredType(FX->getReturnType(), FY->getReturnType()),
+ FX->getExtInfo());
+ }
+ case Type::FunctionProto: {
+ const auto *FX = cast<FunctionProtoType>(X),
+ *FY = cast<FunctionProtoType>(Y);
+ FunctionProtoType::ExtProtoInfo EPIX = FX->getExtProtoInfo(),
+ EPIY = FY->getExtProtoInfo();
+ assert(EPIX.ExtInfo == EPIY.ExtInfo);
+ assert(EPIX.ExtParameterInfos == EPIY.ExtParameterInfos);
+ assert(EPIX.RefQualifier == EPIY.RefQualifier);
+ assert(EPIX.TypeQuals == EPIY.TypeQuals);
+ assert(EPIX.Variadic == EPIY.Variadic);
+
+ // FIXME: Can we handle an empty EllipsisLoc?
+ // Use emtpy EllipsisLoc if X and Y differ.
+
+ EPIX.HasTrailingReturn = EPIX.HasTrailingReturn && EPIY.HasTrailingReturn;
+
+ QualType R =
+ Ctx.getCommonSugaredType(FX->getReturnType(), FY->getReturnType());
+ auto P = getCommonTypes(Ctx, FX->param_types(), FY->param_types(),
+ /*Unqualified=*/true);
+
+ SmallVector<QualType, 8> Exceptions;
+ EPIX.ExceptionSpec = Ctx.mergeExceptionSpecs(
+ EPIX.ExceptionSpec, EPIY.ExceptionSpec, Exceptions, true);
+ return Ctx.getFunctionType(R, P, EPIX);
+ }
+ case Type::ObjCObject: {
+ const auto *OX = cast<ObjCObjectType>(X), *OY = cast<ObjCObjectType>(Y);
+ assert(
+ std::equal(OX->getProtocols().begin(), OX->getProtocols().end(),
+ OY->getProtocols().begin(), OY->getProtocols().end(),
+ [](const ObjCProtocolDecl *P0, const ObjCProtocolDecl *P1) {
+ return P0->getCanonicalDecl() == P1->getCanonicalDecl();
+ }) &&
+ "protocol lists must be the same");
+ auto TAs = getCommonTypes(Ctx, OX->getTypeArgsAsWritten(),
+ OY->getTypeArgsAsWritten());
+ return Ctx.getObjCObjectType(
+ Ctx.getCommonSugaredType(OX->getBaseType(), OY->getBaseType()), TAs,
+ OX->getProtocols(),
+ OX->isKindOfTypeAsWritten() && OY->isKindOfTypeAsWritten());
+ }
+ case Type::ConstantMatrix: {
+ const auto *MX = cast<ConstantMatrixType>(X),
+ *MY = cast<ConstantMatrixType>(Y);
+ assert(MX->getNumRows() == MY->getNumRows());
+ assert(MX->getNumColumns() == MY->getNumColumns());
+ return Ctx.getConstantMatrixType(getCommonElementType(Ctx, MX, MY),
+ MX->getNumRows(), MX->getNumColumns());
+ }
+ case Type::DependentSizedMatrix: {
+ const auto *MX = cast<DependentSizedMatrixType>(X),
+ *MY = cast<DependentSizedMatrixType>(Y);
+ assert(Ctx.hasSameExpr(MX->getRowExpr(), MY->getRowExpr()));
+ assert(Ctx.hasSameExpr(MX->getColumnExpr(), MY->getColumnExpr()));
+ return Ctx.getDependentSizedMatrixType(
+ getCommonElementType(Ctx, MX, MY), MX->getRowExpr(),
+ MX->getColumnExpr(), getCommonAttrLoc(MX, MY));
+ }
+ case Type::Vector: {
+ const auto *VX = cast<VectorType>(X), *VY = cast<VectorType>(Y);
+ assert(VX->getNumElements() == VY->getNumElements());
+ assert(VX->getVectorKind() == VY->getVectorKind());
+ return Ctx.getVectorType(getCommonElementType(Ctx, VX, VY),
+ VX->getNumElements(), VX->getVectorKind());
+ }
+ case Type::ExtVector: {
+ const auto *VX = cast<ExtVectorType>(X), *VY = cast<ExtVectorType>(Y);
+ assert(VX->getNumElements() == VY->getNumElements());
+ return Ctx.getExtVectorType(getCommonElementType(Ctx, VX, VY),
+ VX->getNumElements());
+ }
+ case Type::DependentSizedExtVector: {
+ const auto *VX = cast<DependentSizedExtVectorType>(X),
+ *VY = cast<DependentSizedExtVectorType>(Y);
+ return Ctx.getDependentSizedExtVectorType(getCommonElementType(Ctx, VX, VY),
+ getCommonSizeExpr(Ctx, VX, VY),
+ getCommonAttrLoc(VX, VY));
+ }
+ case Type::DependentVector: {
+ const auto *VX = cast<DependentVectorType>(X),
+ *VY = cast<DependentVectorType>(Y);
+ assert(VX->getVectorKind() == VY->getVectorKind());
+ return Ctx.getDependentVectorType(
+ getCommonElementType(Ctx, VX, VY), getCommonSizeExpr(Ctx, VX, VY),
+ getCommonAttrLoc(VX, VY), VX->getVectorKind());
+ }
+ case Type::InjectedClassName: {
+ const auto *IX = cast<InjectedClassNameType>(X),
+ *IY = cast<InjectedClassNameType>(Y);
+ return Ctx.getInjectedClassNameType(
+ getCommonDeclChecked(IX->getDecl(), IY->getDecl()),
+ Ctx.getCommonSugaredType(IX->getInjectedSpecializationType(),
+ IY->getInjectedSpecializationType()));
+ }
+ case Type::TemplateSpecialization: {
+ const auto *TX = cast<TemplateSpecializationType>(X),
+ *TY = cast<TemplateSpecializationType>(Y);
+ auto As = getCommonTemplateArguments(Ctx, TX->template_arguments(),
+ TY->template_arguments());
+ return Ctx.getTemplateSpecializationType(
+ ::getCommonTemplateNameChecked(Ctx, TX->getTemplateName(),
+ TY->getTemplateName()),
+ As, X->getCanonicalTypeInternal());
+ }
+ case Type::Decltype: {
+ const auto *DX = cast<DecltypeType>(X);
+ [[maybe_unused]] const auto *DY = cast<DecltypeType>(Y);
+ assert(DX->isDependentType());
+ assert(DY->isDependentType());
+ assert(Ctx.hasSameExpr(DX->getUnderlyingExpr(), DY->getUnderlyingExpr()));
+ // As Decltype is not uniqued, building a common type would be wasteful.
+ return QualType(DX, 0);
+ }
+ case Type::DependentName: {
+ const auto *NX = cast<DependentNameType>(X),
+ *NY = cast<DependentNameType>(Y);
+ assert(NX->getIdentifier() == NY->getIdentifier());
+ return Ctx.getDependentNameType(
+ getCommonTypeKeyword(NX, NY), getCommonNNS(Ctx, NX, NY),
+ NX->getIdentifier(), NX->getCanonicalTypeInternal());
+ }
+ case Type::DependentTemplateSpecialization: {
+ const auto *TX = cast<DependentTemplateSpecializationType>(X),
+ *TY = cast<DependentTemplateSpecializationType>(Y);
+ assert(TX->getIdentifier() == TY->getIdentifier());
+ auto As = getCommonTemplateArguments(Ctx, TX->template_arguments(),
+ TY->template_arguments());
+ return Ctx.getDependentTemplateSpecializationType(
+ getCommonTypeKeyword(TX, TY), getCommonNNS(Ctx, TX, TY),
+ TX->getIdentifier(), As);
+ }
+ case Type::UnaryTransform: {
+ const auto *TX = cast<UnaryTransformType>(X),
+ *TY = cast<UnaryTransformType>(Y);
+ assert(TX->getUTTKind() == TY->getUTTKind());
+ return Ctx.getUnaryTransformType(
+ Ctx.getCommonSugaredType(TX->getBaseType(), TY->getBaseType()),
+ Ctx.getCommonSugaredType(TX->getUnderlyingType(),
+ TY->getUnderlyingType()),
+ TX->getUTTKind());
+ }
+ case Type::PackExpansion: {
+ const auto *PX = cast<PackExpansionType>(X),
+ *PY = cast<PackExpansionType>(Y);
+ assert(PX->getNumExpansions() == PY->getNumExpansions());
+ return Ctx.getPackExpansionType(
+ Ctx.getCommonSugaredType(PX->getPattern(), PY->getPattern()),
+ PX->getNumExpansions(), false);
+ }
+ case Type::Pipe: {
+ const auto *PX = cast<PipeType>(X), *PY = cast<PipeType>(Y);
+ assert(PX->isReadOnly() == PY->isReadOnly());
+ auto MP = PX->isReadOnly() ? &ASTContext::getReadPipeType
+ : &ASTContext::getWritePipeType;
+ return (Ctx.*MP)(getCommonElementType(Ctx, PX, PY));
+ }
+ case Type::TemplateTypeParm: {
+ const auto *TX = cast<TemplateTypeParmType>(X),
+ *TY = cast<TemplateTypeParmType>(Y);
+ assert(TX->getDepth() == TY->getDepth());
+ assert(TX->getIndex() == TY->getIndex());
+ assert(TX->isParameterPack() == TY->isParameterPack());
+ return Ctx.getTemplateTypeParmType(
+ TX->getDepth(), TX->getIndex(), TX->isParameterPack(),
+ getCommonDecl(TX->getDecl(), TY->getDecl()));
+ }
+ }
+ llvm_unreachable("Unknown Type Class");
+}
+
+static QualType getCommonSugarTypeNode(ASTContext &Ctx, const Type *X,
+ const Type *Y,
+ SplitQualType Underlying) {
+ Type::TypeClass TC = X->getTypeClass();
+ if (TC != Y->getTypeClass())
+ return QualType();
+ switch (TC) {
+#define UNEXPECTED_TYPE(Class, Kind) \
+ case Type::Class: \
+ llvm_unreachable("Unexpected " Kind ": " #Class);
+#define TYPE(Class, Base)
+#define DEPENDENT_TYPE(Class, Base) UNEXPECTED_TYPE(Class, "dependent")
+#include "clang/AST/TypeNodes.inc"
+
+#define CANONICAL_TYPE(Class) UNEXPECTED_TYPE(Class, "canonical")
+ CANONICAL_TYPE(Atomic)
+ CANONICAL_TYPE(BitInt)
+ CANONICAL_TYPE(BlockPointer)
+ CANONICAL_TYPE(Builtin)
+ CANONICAL_TYPE(Complex)
+ CANONICAL_TYPE(ConstantArray)
+ CANONICAL_TYPE(ConstantMatrix)
+ CANONICAL_TYPE(Enum)
+ CANONICAL_TYPE(ExtVector)
+ CANONICAL_TYPE(FunctionNoProto)
+ CANONICAL_TYPE(FunctionProto)
+ CANONICAL_TYPE(IncompleteArray)
+ CANONICAL_TYPE(LValueReference)
+ CANONICAL_TYPE(MemberPointer)
+ CANONICAL_TYPE(ObjCInterface)
+ CANONICAL_TYPE(ObjCObject)
+ CANONICAL_TYPE(ObjCObjectPointer)
+ CANONICAL_TYPE(Pipe)
+ CANONICAL_TYPE(Pointer)
+ CANONICAL_TYPE(Record)
+ CANONICAL_TYPE(RValueReference)
+ CANONICAL_TYPE(VariableArray)
+ CANONICAL_TYPE(Vector)
+#undef CANONICAL_TYPE
+
+#undef UNEXPECTED_TYPE
+
+ case Type::Adjusted: {
+ const auto *AX = cast<AdjustedType>(X), *AY = cast<AdjustedType>(Y);
+ QualType OX = AX->getOriginalType(), OY = AY->getOriginalType();
+ if (!Ctx.hasSameType(OX, OY))
+ return QualType();
+ // FIXME: It's inefficient to have to unify the original types.
+ return Ctx.getAdjustedType(Ctx.getCommonSugaredType(OX, OY),
+ Ctx.getQualifiedType(Underlying));
+ }
+ case Type::Decayed: {
+ const auto *DX = cast<DecayedType>(X), *DY = cast<DecayedType>(Y);
+ QualType OX = DX->getOriginalType(), OY = DY->getOriginalType();
+ if (!Ctx.hasSameType(OX, OY))
+ return QualType();
+ // FIXME: It's inefficient to have to unify the original types.
+ return Ctx.getDecayedType(Ctx.getCommonSugaredType(OX, OY),
+ Ctx.getQualifiedType(Underlying));
+ }
+ case Type::Attributed: {
+ const auto *AX = cast<AttributedType>(X), *AY = cast<AttributedType>(Y);
+ AttributedType::Kind Kind = AX->getAttrKind();
+ if (Kind != AY->getAttrKind())
+ return QualType();
+ QualType MX = AX->getModifiedType(), MY = AY->getModifiedType();
+ if (!Ctx.hasSameType(MX, MY))
+ return QualType();
+ // FIXME: It's inefficient to have to unify the modified types.
+ return Ctx.getAttributedType(Kind, Ctx.getCommonSugaredType(MX, MY),
+ Ctx.getQualifiedType(Underlying));
+ }
+ case Type::BTFTagAttributed: {
+ const auto *BX = cast<BTFTagAttributedType>(X);
+ const BTFTypeTagAttr *AX = BX->getAttr();
+ // The attribute is not uniqued, so just compare the tag.
+ if (AX->getBTFTypeTag() !=
+ cast<BTFTagAttributedType>(Y)->getAttr()->getBTFTypeTag())
+ return QualType();
+ return Ctx.getBTFTagAttributedType(AX, Ctx.getQualifiedType(Underlying));
+ }
+ case Type::Auto: {
+ const auto *AX = cast<AutoType>(X), *AY = cast<AutoType>(Y);
+
+ AutoTypeKeyword KW = AX->getKeyword();
+ if (KW != AY->getKeyword())
+ return QualType();
+
+ ConceptDecl *CD = ::getCommonDecl(AX->getTypeConstraintConcept(),
+ AY->getTypeConstraintConcept());
+ SmallVector<TemplateArgument, 8> As;
+ if (CD &&
+ getCommonTemplateArguments(Ctx, As, AX->getTypeConstraintArguments(),
+ AY->getTypeConstraintArguments())) {
+ CD = nullptr; // The arguments differ, so make it unconstrained.
+ As.clear();
+ }
+
+ // Both auto types can't be dependent, otherwise they wouldn't have been
+ // sugar. This implies they can't contain unexpanded packs either.
+ return Ctx.getAutoType(Ctx.getQualifiedType(Underlying), AX->getKeyword(),
+ /*IsDependent=*/false, /*IsPack=*/false, CD, As);
+ }
+ case Type::Decltype:
+ return QualType();
+ case Type::DeducedTemplateSpecialization:
+ // FIXME: Try to merge these.
+ return QualType();
+
+ case Type::Elaborated: {
+ const auto *EX = cast<ElaboratedType>(X), *EY = cast<ElaboratedType>(Y);
+ return Ctx.getElaboratedType(
+ ::getCommonTypeKeyword(EX, EY), ::getCommonNNS(Ctx, EX, EY),
+ Ctx.getQualifiedType(Underlying),
+ ::getCommonDecl(EX->getOwnedTagDecl(), EY->getOwnedTagDecl()));
+ }
+ case Type::MacroQualified: {
+ const auto *MX = cast<MacroQualifiedType>(X),
+ *MY = cast<MacroQualifiedType>(Y);
+ const IdentifierInfo *IX = MX->getMacroIdentifier();
+ if (IX != MY->getMacroIdentifier())
+ return QualType();
+ return Ctx.getMacroQualifiedType(Ctx.getQualifiedType(Underlying), IX);
+ }
+ case Type::SubstTemplateTypeParm: {
+ const auto *SX = cast<SubstTemplateTypeParmType>(X),
+ *SY = cast<SubstTemplateTypeParmType>(Y);
+ Decl *CD =
+ ::getCommonDecl(SX->getAssociatedDecl(), SY->getAssociatedDecl());
+ if (!CD)
+ return QualType();
+ unsigned Index = SX->getIndex();
+ if (Index != SY->getIndex())
+ return QualType();
+ auto PackIndex = SX->getPackIndex();
+ if (PackIndex != SY->getPackIndex())
+ return QualType();
+ return Ctx.getSubstTemplateTypeParmType(Ctx.getQualifiedType(Underlying),
+ CD, Index, PackIndex);
+ }
+ case Type::ObjCTypeParam:
+ // FIXME: Try to merge these.
+ return QualType();
+ case Type::Paren:
+ return Ctx.getParenType(Ctx.getQualifiedType(Underlying));
+
+ case Type::TemplateSpecialization: {
+ const auto *TX = cast<TemplateSpecializationType>(X),
+ *TY = cast<TemplateSpecializationType>(Y);
+ TemplateName CTN = ::getCommonTemplateName(Ctx, TX->getTemplateName(),
+ TY->getTemplateName());
+ if (!CTN.getAsVoidPointer())
+ return QualType();
+ SmallVector<TemplateArgument, 8> Args;
+ if (getCommonTemplateArguments(Ctx, Args, TX->template_arguments(),
+ TY->template_arguments()))
+ return QualType();
+ return Ctx.getTemplateSpecializationType(CTN, Args,
+ Ctx.getQualifiedType(Underlying));
+ }
+ case Type::Typedef: {
+ const auto *TX = cast<TypedefType>(X), *TY = cast<TypedefType>(Y);
+ const TypedefNameDecl *CD = ::getCommonDecl(TX->getDecl(), TY->getDecl());
+ if (!CD)
+ return QualType();
+ return Ctx.getTypedefType(CD, Ctx.getQualifiedType(Underlying));
+ }
+ case Type::TypeOf: {
+ // The common sugar between two typeof expressions, where one is
+ // potentially a typeof_unqual and the other is not, we unify to the
+ // qualified type as that retains the most information along with the type.
+ // We only return a typeof_unqual type when both types are unqual types.
+ TypeOfKind Kind = TypeOfKind::Qualified;
+ if (cast<TypeOfType>(X)->getKind() == cast<TypeOfType>(Y)->getKind() &&
+ cast<TypeOfType>(X)->getKind() == TypeOfKind::Unqualified)
+ Kind = TypeOfKind::Unqualified;
+ return Ctx.getTypeOfType(Ctx.getQualifiedType(Underlying), Kind);
+ }
+ case Type::TypeOfExpr:
+ return QualType();
+
+ case Type::UnaryTransform: {
+ const auto *UX = cast<UnaryTransformType>(X),
+ *UY = cast<UnaryTransformType>(Y);
+ UnaryTransformType::UTTKind KX = UX->getUTTKind();
+ if (KX != UY->getUTTKind())
+ return QualType();
+ QualType BX = UX->getBaseType(), BY = UY->getBaseType();
+ if (!Ctx.hasSameType(BX, BY))
+ return QualType();
+ // FIXME: It's inefficient to have to unify the base types.
+ return Ctx.getUnaryTransformType(Ctx.getCommonSugaredType(BX, BY),
+ Ctx.getQualifiedType(Underlying), KX);
+ }
+ case Type::Using: {
+ const auto *UX = cast<UsingType>(X), *UY = cast<UsingType>(Y);
+ const UsingShadowDecl *CD =
+ ::getCommonDecl(UX->getFoundDecl(), UY->getFoundDecl());
+ if (!CD)
+ return QualType();
+ return Ctx.getUsingType(CD, Ctx.getQualifiedType(Underlying));
+ }
+ }
+ llvm_unreachable("Unhandled Type Class");
+}
+
+static auto unwrapSugar(SplitQualType &T, Qualifiers &QTotal) {
+ SmallVector<SplitQualType, 8> R;
+ while (true) {
+ QTotal.addConsistentQualifiers(T.Quals);
+ QualType NT = T.Ty->getLocallyUnqualifiedSingleStepDesugaredType();
+ if (NT == QualType(T.Ty, 0))
+ break;
+ R.push_back(T);
+ T = NT.split();
+ }
+ return R;
+}
+
+QualType ASTContext::getCommonSugaredType(QualType X, QualType Y,
+ bool Unqualified) {
+ assert(Unqualified ? hasSameUnqualifiedType(X, Y) : hasSameType(X, Y));
+ if (X == Y)
+ return X;
+ if (!Unqualified) {
+ if (X.isCanonical())
+ return X;
+ if (Y.isCanonical())
+ return Y;
+ }
+
+ SplitQualType SX = X.split(), SY = Y.split();
+ Qualifiers QX, QY;
+ // Desugar SX and SY, setting the sugar and qualifiers aside into Xs and Ys,
+ // until we reach their underlying "canonical nodes". Note these are not
+ // necessarily canonical types, as they may still have sugared properties.
+ // QX and QY will store the sum of all qualifiers in Xs and Ys respectively.
+ auto Xs = ::unwrapSugar(SX, QX), Ys = ::unwrapSugar(SY, QY);
+ if (SX.Ty != SY.Ty) {
+ // The canonical nodes differ. Build a common canonical node out of the two,
+ // unifying their sugar. This may recurse back here.
+ SX.Ty =
+ ::getCommonNonSugarTypeNode(*this, SX.Ty, QX, SY.Ty, QY).getTypePtr();
+ } else {
+ // The canonical nodes were identical: We may have desugared too much.
+ // Add any common sugar back in.
+ while (!Xs.empty() && !Ys.empty() && Xs.back().Ty == Ys.back().Ty) {
+ QX -= SX.Quals;
+ QY -= SY.Quals;
+ SX = Xs.pop_back_val();
+ SY = Ys.pop_back_val();
+ }
+ }
+ if (Unqualified)
+ QX = Qualifiers::removeCommonQualifiers(QX, QY);
else
- return (*AddrSpaceMap)[(unsigned)AS];
+ assert(QX == QY);
+
+ // Even though the remaining sugar nodes in Xs and Ys differ, some may be
+ // related. Walk up these nodes, unifying them and adding the result.
+ while (!Xs.empty() && !Ys.empty()) {
+ auto Underlying = SplitQualType(
+ SX.Ty, Qualifiers::removeCommonQualifiers(SX.Quals, SY.Quals));
+ SX = Xs.pop_back_val();
+ SY = Ys.pop_back_val();
+ SX.Ty = ::getCommonSugarTypeNode(*this, SX.Ty, SY.Ty, Underlying)
+ .getTypePtrOrNull();
+ // Stop at the first pair which is unrelated.
+ if (!SX.Ty) {
+ SX.Ty = Underlying.Ty;
+ break;
+ }
+ QX -= Underlying.Quals;
+ };
+
+ // Add back the missing accumulated qualifiers, which were stripped off
+ // with the sugar nodes we could not unify.
+ QualType R = getQualifiedType(SX.Ty, QX);
+ assert(Unqualified ? hasSameUnqualifiedType(R, X) : hasSameType(R, X));
+ return R;
}
QualType ASTContext::getCorrespondingSaturatedType(QualType Ty) const {
@@ -11620,18 +13461,27 @@ QualType ASTContext::getCorrespondingSignedFixedPointType(QualType Ty) const {
}
}
+std::vector<std::string> ASTContext::filterFunctionTargetVersionAttrs(
+ const TargetVersionAttr *TV) const {
+ assert(TV != nullptr);
+ llvm::SmallVector<StringRef, 8> Feats;
+ std::vector<std::string> ResFeats;
+ TV->getFeatures(Feats);
+ for (auto &Feature : Feats)
+ if (Target->validateCpuSupports(Feature.str()))
+ // Use '?' to mark features that came from TargetVersion.
+ ResFeats.push_back("?" + Feature.str());
+ return ResFeats;
+}
+
ParsedTargetAttr
ASTContext::filterFunctionTargetAttrs(const TargetAttr *TD) const {
assert(TD != nullptr);
- ParsedTargetAttr ParsedAttr = TD->parse();
-
- ParsedAttr.Features.erase(
- llvm::remove_if(ParsedAttr.Features,
- [&](const std::string &Feat) {
- return !Target->isValidFeatureName(
- StringRef{Feat}.substr(1));
- }),
- ParsedAttr.Features.end());
+ ParsedTargetAttr ParsedAttr = Target->parseTargetAttr(TD->getFeaturesStr());
+
+ llvm::erase_if(ParsedAttr.Features, [&](const std::string &Feat) {
+ return !Target->isValidFeatureName(StringRef{Feat}.substr(1));
+ });
return ParsedAttr;
}
@@ -11661,9 +13511,8 @@ void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap,
Target->getTargetOpts().FeaturesAsWritten.begin(),
Target->getTargetOpts().FeaturesAsWritten.end());
- if (ParsedAttr.Architecture != "" &&
- Target->isValidCPUName(ParsedAttr.Architecture))
- TargetCPU = ParsedAttr.Architecture;
+ if (ParsedAttr.CPU != "" && Target->isValidCPUName(ParsedAttr.CPU))
+ TargetCPU = ParsedAttr.CPU;
// Now populate the feature map, first with the TargetCPU which is either
// the default or a new one from the target attribute string. Then we'll use
@@ -11676,7 +13525,40 @@ void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap,
Target->getCPUSpecificCPUDispatchFeatures(
SD->getCPUName(GD.getMultiVersionIndex())->getName(), FeaturesTmp);
std::vector<std::string> Features(FeaturesTmp.begin(), FeaturesTmp.end());
+ Features.insert(Features.begin(),
+ Target->getTargetOpts().FeaturesAsWritten.begin(),
+ Target->getTargetOpts().FeaturesAsWritten.end());
Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features);
+ } else if (const auto *TC = FD->getAttr<TargetClonesAttr>()) {
+ std::vector<std::string> Features;
+ StringRef VersionStr = TC->getFeatureStr(GD.getMultiVersionIndex());
+ if (Target->getTriple().isAArch64()) {
+ // TargetClones for AArch64
+ if (VersionStr != "default") {
+ SmallVector<StringRef, 1> VersionFeatures;
+ VersionStr.split(VersionFeatures, "+");
+ for (auto &VFeature : VersionFeatures) {
+ VFeature = VFeature.trim();
+ // Use '?' to mark features that came from AArch64 TargetClones.
+ Features.push_back((StringRef{"?"} + VFeature).str());
+ }
+ }
+ Features.insert(Features.begin(),
+ Target->getTargetOpts().FeaturesAsWritten.begin(),
+ Target->getTargetOpts().FeaturesAsWritten.end());
+ } else {
+ if (VersionStr.starts_with("arch="))
+ TargetCPU = VersionStr.drop_front(sizeof("arch=") - 1);
+ else if (VersionStr != "default")
+ Features.push_back((StringRef{"+"} + VersionStr).str());
+ }
+ Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features);
+ } else if (const auto *TV = FD->getAttr<TargetVersionAttr>()) {
+ std::vector<std::string> Feats = filterFunctionTargetVersionAttrs(TV);
+ Feats.insert(Feats.begin(),
+ Target->getTargetOpts().FeaturesAsWritten.begin(),
+ Target->getTargetOpts().FeaturesAsWritten.end());
+ Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Feats);
} else {
FeatureMap = Target->getTargetOpts().FeatureMap;
}
@@ -11695,22 +13577,27 @@ operator<<(const StreamingDiagnostic &DB,
return DB << "a prior #pragma section";
}
-bool ASTContext::mayExternalizeStaticVar(const Decl *D) const {
- bool IsStaticVar =
- isa<VarDecl>(D) && cast<VarDecl>(D)->getStorageClass() == SC_Static;
+bool ASTContext::mayExternalize(const Decl *D) const {
+ bool IsInternalVar =
+ isa<VarDecl>(D) &&
+ basicGVALinkageForVariable(*this, cast<VarDecl>(D)) == GVA_Internal;
bool IsExplicitDeviceVar = (D->hasAttr<CUDADeviceAttr>() &&
!D->getAttr<CUDADeviceAttr>()->isImplicit()) ||
(D->hasAttr<CUDAConstantAttr>() &&
!D->getAttr<CUDAConstantAttr>()->isImplicit());
- // CUDA/HIP: static managed variables need to be externalized since it is
- // a declaration in IR, therefore cannot have internal linkage.
- return IsStaticVar &&
- (D->hasAttr<HIPManagedAttr>() || IsExplicitDeviceVar);
-}
-
-bool ASTContext::shouldExternalizeStaticVar(const Decl *D) const {
- return mayExternalizeStaticVar(D) &&
- (D->hasAttr<HIPManagedAttr>() ||
+ // CUDA/HIP: managed variables need to be externalized since it is
+ // a declaration in IR, therefore cannot have internal linkage. Kernels in
+ // anonymous name space needs to be externalized to avoid duplicate symbols.
+ return (IsInternalVar &&
+ (D->hasAttr<HIPManagedAttr>() || IsExplicitDeviceVar)) ||
+ (D->hasAttr<CUDAGlobalAttr>() &&
+ basicGVALinkageForFunction(*this, cast<FunctionDecl>(D)) ==
+ GVA_Internal);
+}
+
+bool ASTContext::shouldExternalize(const Decl *D) const {
+ return mayExternalize(D) &&
+ (D->hasAttr<HIPManagedAttr>() || D->hasAttr<CUDAGlobalAttr>() ||
CUDADeviceVarODRUsedByHost.count(cast<VarDecl>(D)));
}
@@ -11722,86 +13609,3 @@ StringRef ASTContext::getCUIDHash() const {
CUIDHash = llvm::utohexstr(llvm::MD5Hash(LangOpts.CUID), /*LowerCase=*/true);
return CUIDHash;
}
-
-// Get the closest named parent, so we can order the sycl naming decls somewhere
-// that mangling is meaningful.
-static const DeclContext *GetNamedParent(const CXXRecordDecl *RD) {
- const DeclContext *DC = RD->getDeclContext();
-
- while (!isa<NamedDecl, TranslationUnitDecl>(DC))
- DC = DC->getParent();
- return DC;
-}
-
-void ASTContext::AddSYCLKernelNamingDecl(const CXXRecordDecl *RD) {
- assert(getLangOpts().isSYCL() && "Only valid for SYCL programs");
- RD = RD->getCanonicalDecl();
- const DeclContext *DC = GetNamedParent(RD);
-
- assert(RD->getLocation().isValid() &&
- "Invalid location on kernel naming decl");
-
- (void)SYCLKernelNamingTypes[DC].insert(RD);
-}
-
-bool ASTContext::IsSYCLKernelNamingDecl(const NamedDecl *ND) const {
- assert(getLangOpts().isSYCL() && "Only valid for SYCL programs");
- const auto *RD = dyn_cast<CXXRecordDecl>(ND);
- if (!RD)
- return false;
- RD = RD->getCanonicalDecl();
- const DeclContext *DC = GetNamedParent(RD);
-
- auto Itr = SYCLKernelNamingTypes.find(DC);
-
- if (Itr == SYCLKernelNamingTypes.end())
- return false;
-
- return Itr->getSecond().count(RD);
-}
-
-// Filters the Decls list to those that share the lambda mangling with the
-// passed RD.
-void ASTContext::FilterSYCLKernelNamingDecls(
- const CXXRecordDecl *RD,
- llvm::SmallVectorImpl<const CXXRecordDecl *> &Decls) {
-
- if (!SYCLKernelFilterContext)
- SYCLKernelFilterContext.reset(
- ItaniumMangleContext::create(*this, getDiagnostics()));
-
- llvm::SmallString<128> LambdaSig;
- llvm::raw_svector_ostream Out(LambdaSig);
- SYCLKernelFilterContext->mangleLambdaSig(RD, Out);
-
- llvm::erase_if(Decls, [this, &LambdaSig](const CXXRecordDecl *LocalRD) {
- llvm::SmallString<128> LocalLambdaSig;
- llvm::raw_svector_ostream LocalOut(LocalLambdaSig);
- SYCLKernelFilterContext->mangleLambdaSig(LocalRD, LocalOut);
- return LambdaSig != LocalLambdaSig;
- });
-}
-
-unsigned ASTContext::GetSYCLKernelNamingIndex(const NamedDecl *ND) {
- assert(getLangOpts().isSYCL() && "Only valid for SYCL programs");
- assert(IsSYCLKernelNamingDecl(ND) &&
- "Lambda not involved in mangling asked for a naming index?");
-
- const CXXRecordDecl *RD = cast<CXXRecordDecl>(ND)->getCanonicalDecl();
- const DeclContext *DC = GetNamedParent(RD);
-
- auto Itr = SYCLKernelNamingTypes.find(DC);
- assert(Itr != SYCLKernelNamingTypes.end() && "Not a valid DeclContext?");
-
- const llvm::SmallPtrSet<const CXXRecordDecl *, 4> &Set = Itr->getSecond();
-
- llvm::SmallVector<const CXXRecordDecl *> Decls{Set.begin(), Set.end()};
-
- FilterSYCLKernelNamingDecls(RD, Decls);
-
- llvm::sort(Decls, [](const CXXRecordDecl *LHS, const CXXRecordDecl *RHS) {
- return LHS->getLambdaManglingNumber() < RHS->getLambdaManglingNumber();
- });
-
- return llvm::find(Decls, RD) - Decls.begin();
-}
diff --git a/contrib/llvm-project/clang/lib/AST/ASTDiagnostic.cpp b/contrib/llvm-project/clang/lib/AST/ASTDiagnostic.cpp
index dc22481d0a84..7b0d5f9cc1a9 100644
--- a/contrib/llvm-project/clang/lib/AST/ASTDiagnostic.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ASTDiagnostic.cpp
@@ -25,8 +25,10 @@
using namespace clang;
// Returns a desugared version of the QualType, and marks ShouldAKA as true
-// whenever we remove significant sugar from the type.
-static QualType Desugar(ASTContext &Context, QualType QT, bool &ShouldAKA) {
+// whenever we remove significant sugar from the type. Make sure ShouldAKA
+// is initialized before passing it in.
+QualType clang::desugarForDiagnostic(ASTContext &Context, QualType QT,
+ bool &ShouldAKA) {
QualifierCollector QC;
while (true) {
@@ -37,6 +39,11 @@ static QualType Desugar(ASTContext &Context, QualType QT, bool &ShouldAKA) {
QT = ET->desugar();
continue;
}
+ // ... or a using type ...
+ if (const UsingType *UT = dyn_cast<UsingType>(Ty)) {
+ QT = UT->desugar();
+ continue;
+ }
// ... or a paren type ...
if (const ParenType *PT = dyn_cast<ParenType>(Ty)) {
QT = PT->desugar();
@@ -76,7 +83,7 @@ static QualType Desugar(ASTContext &Context, QualType QT, bool &ShouldAKA) {
if (const FunctionType *FT = dyn_cast<FunctionType>(Ty)) {
bool DesugarReturn = false;
QualType SugarRT = FT->getReturnType();
- QualType RT = Desugar(Context, SugarRT, DesugarReturn);
+ QualType RT = desugarForDiagnostic(Context, SugarRT, DesugarReturn);
if (auto nullability = AttributedType::stripOuterNullability(SugarRT)) {
RT = Context.getAttributedType(
AttributedType::getNullabilityAttrKind(*nullability), RT, RT);
@@ -87,7 +94,7 @@ static QualType Desugar(ASTContext &Context, QualType QT, bool &ShouldAKA) {
const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT);
if (FPT) {
for (QualType SugarPT : FPT->param_types()) {
- QualType PT = Desugar(Context, SugarPT, DesugarArgument);
+ QualType PT = desugarForDiagnostic(Context, SugarPT, DesugarArgument);
if (auto nullability =
AttributedType::stripOuterNullability(SugarPT)) {
PT = Context.getAttributedType(
@@ -112,10 +119,10 @@ static QualType Desugar(ASTContext &Context, QualType QT, bool &ShouldAKA) {
if (!TST->isTypeAlias()) {
bool DesugarArgument = false;
SmallVector<TemplateArgument, 4> Args;
- for (unsigned I = 0, N = TST->getNumArgs(); I != N; ++I) {
- const TemplateArgument &Arg = TST->getArg(I);
+ for (const TemplateArgument &Arg : TST->template_arguments()) {
if (Arg.getKind() == TemplateArgument::Type)
- Args.push_back(Desugar(Context, Arg.getAsType(), DesugarArgument));
+ Args.push_back(desugarForDiagnostic(Context, Arg.getAsType(),
+ DesugarArgument));
else
Args.push_back(Arg);
}
@@ -129,6 +136,29 @@ static QualType Desugar(ASTContext &Context, QualType QT, bool &ShouldAKA) {
}
}
+ if (const auto *AT = dyn_cast<ArrayType>(Ty)) {
+ QualType ElementTy =
+ desugarForDiagnostic(Context, AT->getElementType(), ShouldAKA);
+ if (const auto *CAT = dyn_cast<ConstantArrayType>(AT))
+ QT = Context.getConstantArrayType(
+ ElementTy, CAT->getSize(), CAT->getSizeExpr(),
+ CAT->getSizeModifier(), CAT->getIndexTypeCVRQualifiers());
+ else if (const auto *VAT = dyn_cast<VariableArrayType>(AT))
+ QT = Context.getVariableArrayType(
+ ElementTy, VAT->getSizeExpr(), VAT->getSizeModifier(),
+ VAT->getIndexTypeCVRQualifiers(), VAT->getBracketsRange());
+ else if (const auto *DSAT = dyn_cast<DependentSizedArrayType>(AT))
+ QT = Context.getDependentSizedArrayType(
+ ElementTy, DSAT->getSizeExpr(), DSAT->getSizeModifier(),
+ DSAT->getIndexTypeCVRQualifiers(), DSAT->getBracketsRange());
+ else if (const auto *IAT = dyn_cast<IncompleteArrayType>(AT))
+ QT = Context.getIncompleteArrayType(ElementTy, IAT->getSizeModifier(),
+ IAT->getIndexTypeCVRQualifiers());
+ else
+ llvm_unreachable("Unhandled array type");
+ break;
+ }
+
// Don't desugar magic Objective-C types.
if (QualType(Ty,0) == Context.getObjCIdType() ||
QualType(Ty,0) == Context.getObjCClassType() ||
@@ -181,24 +211,25 @@ break; \
// If we have a pointer-like type, desugar the pointee as well.
// FIXME: Handle other pointer-like types.
if (const PointerType *Ty = QT->getAs<PointerType>()) {
- QT = Context.getPointerType(Desugar(Context, Ty->getPointeeType(),
- ShouldAKA));
+ QT = Context.getPointerType(
+ desugarForDiagnostic(Context, Ty->getPointeeType(), ShouldAKA));
} else if (const auto *Ty = QT->getAs<ObjCObjectPointerType>()) {
- QT = Context.getObjCObjectPointerType(Desugar(Context, Ty->getPointeeType(),
- ShouldAKA));
+ QT = Context.getObjCObjectPointerType(
+ desugarForDiagnostic(Context, Ty->getPointeeType(), ShouldAKA));
} else if (const LValueReferenceType *Ty = QT->getAs<LValueReferenceType>()) {
- QT = Context.getLValueReferenceType(Desugar(Context, Ty->getPointeeType(),
- ShouldAKA));
+ QT = Context.getLValueReferenceType(
+ desugarForDiagnostic(Context, Ty->getPointeeType(), ShouldAKA));
} else if (const RValueReferenceType *Ty = QT->getAs<RValueReferenceType>()) {
- QT = Context.getRValueReferenceType(Desugar(Context, Ty->getPointeeType(),
- ShouldAKA));
+ QT = Context.getRValueReferenceType(
+ desugarForDiagnostic(Context, Ty->getPointeeType(), ShouldAKA));
} else if (const auto *Ty = QT->getAs<ObjCObjectType>()) {
if (Ty->getBaseType().getTypePtr() != Ty && !ShouldAKA) {
- QualType BaseType = Desugar(Context, Ty->getBaseType(), ShouldAKA);
- QT = Context.getObjCObjectType(BaseType, Ty->getTypeArgsAsWritten(),
- llvm::makeArrayRef(Ty->qual_begin(),
- Ty->getNumProtocols()),
- Ty->isKindOfTypeAsWritten());
+ QualType BaseType =
+ desugarForDiagnostic(Context, Ty->getBaseType(), ShouldAKA);
+ QT = Context.getObjCObjectType(
+ BaseType, Ty->getTypeArgsAsWritten(),
+ llvm::ArrayRef(Ty->qual_begin(), Ty->getNumProtocols()),
+ Ty->isKindOfTypeAsWritten());
}
}
@@ -239,9 +270,9 @@ ConvertTypeToDiagnosticString(ASTContext &Context, QualType Ty,
std::string S = Ty.getAsString(Context.getPrintingPolicy());
std::string CanS = CanTy.getAsString(Context.getPrintingPolicy());
- for (unsigned I = 0, E = QualTypeVals.size(); I != E; ++I) {
+ for (const intptr_t &QualTypeVal : QualTypeVals) {
QualType CompareTy =
- QualType::getFromOpaquePtr(reinterpret_cast<void*>(QualTypeVals[I]));
+ QualType::getFromOpaquePtr(reinterpret_cast<void *>(QualTypeVal));
if (CompareTy.isNull())
continue;
if (CompareTy == Ty)
@@ -251,7 +282,8 @@ ConvertTypeToDiagnosticString(ASTContext &Context, QualType Ty,
continue; // Same canonical types
std::string CompareS = CompareTy.getAsString(Context.getPrintingPolicy());
bool ShouldAKA = false;
- QualType CompareDesugar = Desugar(Context, CompareTy, ShouldAKA);
+ QualType CompareDesugar =
+ desugarForDiagnostic(Context, CompareTy, ShouldAKA);
std::string CompareDesugarStr =
CompareDesugar.getAsString(Context.getPrintingPolicy());
if (CompareS != S && CompareDesugarStr != S)
@@ -270,11 +302,11 @@ ConvertTypeToDiagnosticString(ASTContext &Context, QualType Ty,
// Check to see if we already desugared this type in this
// diagnostic. If so, don't do it again.
bool Repeated = false;
- for (unsigned i = 0, e = PrevArgs.size(); i != e; ++i) {
+ for (const auto &PrevArg : PrevArgs) {
// TODO: Handle ak_declcontext case.
- if (PrevArgs[i].first == DiagnosticsEngine::ak_qualtype) {
- void *Ptr = (void*)PrevArgs[i].second;
- QualType PrevTy(QualType::getFromOpaquePtr(Ptr));
+ if (PrevArg.first == DiagnosticsEngine::ak_qualtype) {
+ QualType PrevTy(
+ QualType::getFromOpaquePtr(reinterpret_cast<void *>(PrevArg.second)));
if (PrevTy == Ty) {
Repeated = true;
break;
@@ -286,7 +318,7 @@ ConvertTypeToDiagnosticString(ASTContext &Context, QualType Ty,
// sugar gives us something "significantly different".
if (!Repeated) {
bool ShouldAKA = false;
- QualType DesugaredTy = Desugar(Context, Ty, ShouldAKA);
+ QualType DesugaredTy = desugarForDiagnostic(Context, Ty, ShouldAKA);
if (ShouldAKA || ForceAKA) {
if (DesugaredTy == Ty) {
DesugaredTy = Ty.getCanonicalType();
@@ -308,7 +340,7 @@ ConvertTypeToDiagnosticString(ASTContext &Context, QualType Ty,
OS << "'" << S << "' (vector of " << VTy->getNumElements() << " '"
<< VTy->getElementType().getAsString(Context.getPrintingPolicy())
<< "' " << Values << ")";
- return OS.str();
+ return DecoratedString;
}
}
@@ -340,7 +372,7 @@ void clang::FormatASTNodeDiagnosticArgument(
default: llvm_unreachable("unknown ArgumentKind");
case DiagnosticsEngine::ak_addrspace: {
assert(Modifier.empty() && Argument.empty() &&
- "Invalid modifier for Qualfiers argument");
+ "Invalid modifier for Qualifiers argument");
auto S = Qualifiers::getAddrSpaceAsString(static_cast<LangAS>(Val));
if (S.empty()) {
@@ -355,7 +387,7 @@ void clang::FormatASTNodeDiagnosticArgument(
}
case DiagnosticsEngine::ak_qual: {
assert(Modifier.empty() && Argument.empty() &&
- "Invalid modifier for Qualfiers argument");
+ "Invalid modifier for Qualifiers argument");
Qualifiers Q(Qualifiers::fromOpaqueValue(Val));
auto S = Q.getAsString();
@@ -393,7 +425,7 @@ void clang::FormatASTNodeDiagnosticArgument(
Modifier = StringRef();
Argument = StringRef();
// Fall through
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case DiagnosticsEngine::ak_qualtype: {
assert(Modifier.empty() && Argument.empty() &&
@@ -507,7 +539,7 @@ class TemplateDiff {
bool ShowColor;
/// FromTemplateType - When single type printing is selected, this is the
- /// type to be be printed. When tree printing is selected, this type will
+ /// type to be printed. When tree printing is selected, this type will
/// show up first in the tree.
QualType FromTemplateType;
@@ -954,7 +986,7 @@ class TemplateDiff {
if (isEnd()) return;
// Set to first template argument. If not a parameter pack, done.
- TemplateArgument TA = TST->getArg(0);
+ TemplateArgument TA = TST->template_arguments()[0];
if (TA.getKind() != TemplateArgument::Pack) return;
// Start looking into the parameter pack.
@@ -975,7 +1007,7 @@ class TemplateDiff {
/// isEnd - Returns true if the iterator is one past the end.
bool isEnd() const {
assert(TST && "InternalIterator is invalid with a null TST.");
- return Index >= TST->getNumArgs();
+ return Index >= TST->template_arguments().size();
}
/// &operator++ - Increment the iterator to the next template argument.
@@ -995,11 +1027,11 @@ class TemplateDiff {
// Loop until a template argument is found, or the end is reached.
while (true) {
// Advance to the next template argument. Break if reached the end.
- if (++Index == TST->getNumArgs())
+ if (++Index == TST->template_arguments().size())
break;
// If the TemplateArgument is not a parameter pack, done.
- TemplateArgument TA = TST->getArg(Index);
+ TemplateArgument TA = TST->template_arguments()[Index];
if (TA.getKind() != TemplateArgument::Pack)
break;
@@ -1019,7 +1051,7 @@ class TemplateDiff {
assert(TST && "InternalIterator is invalid with a null TST.");
assert(!isEnd() && "Index exceeds number of arguments.");
if (CurrentTA == EndTA)
- return TST->getArg(Index);
+ return TST->template_arguments()[Index];
else
return *CurrentTA;
}
@@ -1088,6 +1120,9 @@ class TemplateDiff {
Ty->getAs<TemplateSpecializationType>())
return TST;
+ if (const auto* SubstType = Ty->getAs<SubstTemplateTypeParmType>())
+ Ty = SubstType->getReplacementType();
+
const RecordType *RT = Ty->getAs<RecordType>();
if (!RT)
@@ -1649,9 +1684,24 @@ class TemplateDiff {
: FromType.getAsString(Policy);
std::string ToTypeStr = ToType.isNull() ? "(no argument)"
: ToType.getAsString(Policy);
- // Switch to canonical typename if it is better.
+ // Print without ElaboratedType sugar if it is better.
// TODO: merge this with other aka printing above.
if (FromTypeStr == ToTypeStr) {
+ const auto *FromElTy = dyn_cast<ElaboratedType>(FromType),
+ *ToElTy = dyn_cast<ElaboratedType>(ToType);
+ if (FromElTy || ToElTy) {
+ std::string FromNamedTypeStr =
+ FromElTy ? FromElTy->getNamedType().getAsString(Policy)
+ : FromTypeStr;
+ std::string ToNamedTypeStr =
+ ToElTy ? ToElTy->getNamedType().getAsString(Policy) : ToTypeStr;
+ if (FromNamedTypeStr != ToNamedTypeStr) {
+ FromTypeStr = FromNamedTypeStr;
+ ToTypeStr = ToNamedTypeStr;
+ goto PrintTypes;
+ }
+ }
+ // Switch to canonical typename if it is better.
std::string FromCanTypeStr =
FromType.getCanonicalType().getAsString(Policy);
std::string ToCanTypeStr = ToType.getCanonicalType().getAsString(Policy);
@@ -1661,6 +1711,7 @@ class TemplateDiff {
}
}
+ PrintTypes:
if (PrintTree) OS << '[';
OS << (FromDefault ? "(default) " : "");
Bold();
@@ -1839,10 +1890,11 @@ class TemplateDiff {
// FIXME: Diffing the APValue would be neat.
// FIXME: Suppress this and use the full name of the declaration if the
// parameter is a pointer or reference.
- TPO->printAsInit(OS);
+ TPO->getType().getUnqualifiedType().print(OS, Policy);
+ TPO->printAsInit(OS, Policy);
return;
}
- VD->printName(OS);
+ VD->printName(OS, Policy);
return;
}
diff --git a/contrib/llvm-project/clang/lib/AST/ASTDumper.cpp b/contrib/llvm-project/clang/lib/AST/ASTDumper.cpp
index 3d368a0a7b63..cc9a84eecaad 100644
--- a/contrib/llvm-project/clang/lib/AST/ASTDumper.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ASTDumper.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "clang/AST/ASTDumper.h"
+#include "clang/AST/ASTConcept.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclLookups.h"
#include "clang/AST/JSONNodeDumper.h"
@@ -19,9 +20,37 @@
#include "clang/Basic/Module.h"
#include "clang/Basic/SourceManager.h"
#include "llvm/Support/raw_ostream.h"
+
using namespace clang;
using namespace clang::comments;
+void ASTDumper::dumpInvalidDeclContext(const DeclContext *DC) {
+ NodeDumper.AddChild([=] {
+ if (!DC) {
+ ColorScope Color(OS, ShowColors, NullColor);
+ OS << "<<<NULL>>>";
+ return;
+ }
+ // An invalid DeclContext is one for which a dyn_cast() from a DeclContext
+ // pointer to a Decl pointer would fail an assertion or otherwise fall prey
+ // to undefined behavior as a result of an invalid associated DeclKind.
+ // Such invalidity is not supposed to happen of course, but, when it does,
+ // the information provided below is intended to provide some hints about
+ // what might have gone awry.
+ {
+ ColorScope Color(OS, ShowColors, DeclKindNameColor);
+ OS << "DeclContext";
+ }
+ NodeDumper.dumpPointer(DC);
+ OS << " <";
+ {
+ ColorScope Color(OS, ShowColors, DeclNameColor);
+ OS << "unrecognized Decl kind " << (unsigned)DC->getDeclKind();
+ }
+ OS << ">";
+ });
+}
+
void ASTDumper::dumpLookups(const DeclContext *DC, bool DumpDecls) {
NodeDumper.AddChild([=] {
OS << "StoredDeclsMap ";
@@ -90,21 +119,13 @@ void ASTDumper::dumpTemplateDeclSpecialization(const SpecializationDecl *D,
// FIXME: The redecls() range sometimes has elements of a less-specific
// type. (In particular, ClassTemplateSpecializationDecl::redecls() gives
// us TagDecls, and should give CXXRecordDecls).
- auto *Redecl = dyn_cast<SpecializationDecl>(RedeclWithBadType);
- if (!Redecl) {
- // Found the injected-class-name for a class template. This will be dumped
- // as part of its surrounding class so we don't need to dump it here.
- assert(isa<CXXRecordDecl>(RedeclWithBadType) &&
- "expected an injected-class-name");
- continue;
- }
-
+ auto *Redecl = cast<SpecializationDecl>(RedeclWithBadType);
switch (Redecl->getTemplateSpecializationKind()) {
case TSK_ExplicitInstantiationDeclaration:
case TSK_ExplicitInstantiationDefinition:
if (!DumpExplicitInst)
break;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case TSK_Undeclared:
case TSK_ImplicitInstantiation:
if (DumpRefOnly)
@@ -208,6 +229,31 @@ LLVM_DUMP_METHOD void Decl::dumpColor() const {
P.Visit(this);
}
+LLVM_DUMP_METHOD void DeclContext::dumpAsDecl() const {
+ dumpAsDecl(nullptr);
+}
+
+LLVM_DUMP_METHOD void DeclContext::dumpAsDecl(const ASTContext *Ctx) const {
+ // By design, DeclContext is required to be a base class of some class that
+ // derives from Decl. Thus, it should always be possible to dyn_cast() from
+ // a DeclContext pointer to a Decl pointer and Decl::castFromDeclContext()
+ // asserts that to be the case. Since this function is intended for use in a
+ // debugger, it performs an additional check in order to prevent a failed
+ // cast and assertion. If that check fails, then the (invalid) DeclContext
+ // is dumped with an indication of its invalidity.
+ if (hasValidDeclKind()) {
+ const auto *D = cast<Decl>(this);
+ D->dump();
+ } else {
+ // If an ASTContext is not available, a less capable ASTDumper is
+ // constructed for which color diagnostics are, regrettably, disabled.
+ ASTDumper P = Ctx ? ASTDumper(llvm::errs(), *Ctx,
+ Ctx->getDiagnostics().getShowColors())
+ : ASTDumper(llvm::errs(), /*ShowColors*/ false);
+ P.dumpInvalidDeclContext(this);
+ }
+}
+
LLVM_DUMP_METHOD void DeclContext::dumpLookups() const {
dumpLookups(llvm::errs());
}
@@ -288,3 +334,17 @@ LLVM_DUMP_METHOD void APValue::dump(raw_ostream &OS,
Context.getDiagnostics().getShowColors());
Dumper.Visit(*this, /*Ty=*/Context.getPointerType(Context.CharTy));
}
+
+//===----------------------------------------------------------------------===//
+// ConceptReference method implementations
+//===----------------------------------------------------------------------===//
+
+LLVM_DUMP_METHOD void ConceptReference::dump() const {
+ dump(llvm::errs());
+}
+
+LLVM_DUMP_METHOD void ConceptReference::dump(raw_ostream &OS) const {
+ auto &Ctx = getNamedConcept()->getASTContext();
+ ASTDumper P(OS, Ctx, Ctx.getDiagnostics().getShowColors());
+ P.Visit(this);
+}
diff --git a/contrib/llvm-project/clang/lib/AST/ASTImporter.cpp b/contrib/llvm-project/clang/lib/AST/ASTImporter.cpp
index 787e02029dae..12734d62ed9f 100644
--- a/contrib/llvm-project/clang/lib/AST/ASTImporter.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ASTImporter.cpp
@@ -12,9 +12,9 @@
//===----------------------------------------------------------------------===//
#include "clang/AST/ASTImporter.h"
-#include "clang/AST/ASTImporterSharedState.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTDiagnostic.h"
+#include "clang/AST/ASTImporterSharedState.h"
#include "clang/AST/ASTStructuralEquivalence.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
@@ -56,10 +56,8 @@
#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/None.h"
-#include "llvm/ADT/Optional.h"
-#include "llvm/ADT/ScopeExit.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/ScopeExit.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
@@ -68,6 +66,7 @@
#include <cassert>
#include <cstddef>
#include <memory>
+#include <optional>
#include <type_traits>
#include <utility>
@@ -76,6 +75,7 @@ namespace clang {
using llvm::make_error;
using llvm::Error;
using llvm::Expected;
+ using ExpectedTypePtr = llvm::Expected<const Type *>;
using ExpectedType = llvm::Expected<QualType>;
using ExpectedStmt = llvm::Expected<Stmt *>;
using ExpectedExpr = llvm::Expected<Expr *>;
@@ -83,7 +83,7 @@ namespace clang {
using ExpectedSLoc = llvm::Expected<SourceLocation>;
using ExpectedName = llvm::Expected<DeclarationName>;
- std::string ImportError::toString() const {
+ std::string ASTImportError::toString() const {
// FIXME: Improve error texts.
switch (Error) {
case NameConflict:
@@ -97,15 +97,13 @@ namespace clang {
return "Invalid error code.";
}
- void ImportError::log(raw_ostream &OS) const {
- OS << toString();
- }
+ void ASTImportError::log(raw_ostream &OS) const { OS << toString(); }
- std::error_code ImportError::convertToErrorCode() const {
+ std::error_code ASTImportError::convertToErrorCode() const {
llvm_unreachable("Function not implemented.");
}
- char ImportError::ID;
+ char ASTImportError::ID;
template <class T>
SmallVector<Decl *, 2>
@@ -137,6 +135,46 @@ namespace clang {
To->setIsUsed();
}
+ /// How to handle import errors that occur when import of a child declaration
+ /// of a DeclContext fails.
+ class ChildErrorHandlingStrategy {
+ /// This context is imported (in the 'from' domain).
+ /// It is nullptr if a non-DeclContext is imported.
+ const DeclContext *const FromDC;
+ /// Ignore import errors of the children.
+ /// If true, the context can be imported successfully if a child
+ /// of it failed to import. Otherwise the import errors of the child nodes
+ /// are accumulated (joined) into the import error object of the parent.
+ /// (Import of a parent can fail in other ways.)
+ bool const IgnoreChildErrors;
+
+ public:
+ ChildErrorHandlingStrategy(const DeclContext *FromDC)
+ : FromDC(FromDC), IgnoreChildErrors(!isa<TagDecl>(FromDC)) {}
+ ChildErrorHandlingStrategy(const Decl *FromD)
+ : FromDC(dyn_cast<DeclContext>(FromD)),
+ IgnoreChildErrors(!isa<TagDecl>(FromD)) {}
+
+ /// Process the import result of a child (of the current declaration).
+ /// \param ResultErr The import error that can be used as result of
+ /// importing the parent. This may be changed by the function.
+ /// \param ChildErr Result of importing a child. Can be success or error.
+ void handleChildImportResult(Error &ResultErr, Error &&ChildErr) {
+ if (ChildErr && !IgnoreChildErrors)
+ ResultErr = joinErrors(std::move(ResultErr), std::move(ChildErr));
+ else
+ consumeError(std::move(ChildErr));
+ }
+
+ /// Determine if import failure of a child does not cause import failure of
+ /// its parent.
+ bool ignoreChildErrorOnParent(Decl *FromChildD) const {
+ if (!IgnoreChildErrors || !FromDC)
+ return false;
+ return FromDC->containsDecl(FromChildD);
+ }
+ };
+
class ASTNodeImporter : public TypeVisitor<ASTNodeImporter, ExpectedType>,
public DeclVisitor<ASTNodeImporter, ExpectedDecl>,
public StmtVisitor<ASTNodeImporter, ExpectedStmt> {
@@ -144,13 +182,13 @@ namespace clang {
// Use this instead of Importer.importInto .
template <typename ImportT>
- LLVM_NODISCARD Error importInto(ImportT &To, const ImportT &From) {
+ [[nodiscard]] Error importInto(ImportT &To, const ImportT &From) {
return Importer.importInto(To, From);
}
// Use this to import pointers of specific type.
template <typename ImportT>
- LLVM_NODISCARD Error importInto(ImportT *&To, ImportT *From) {
+ [[nodiscard]] Error importInto(ImportT *&To, ImportT *From) {
auto ToOrErr = Importer.Import(From);
if (ToOrErr)
To = cast_or_null<ImportT>(*ToOrErr);
@@ -160,7 +198,9 @@ namespace clang {
// Call the import function of ASTImporter for a baseclass of type `T` and
// cast the return value to `T`.
template <typename T>
- Expected<T *> import(T *From) {
+ auto import(T *From)
+ -> std::conditional_t<std::is_base_of_v<Type, T>, Expected<const T *>,
+ Expected<T *>> {
auto ToOrErr = Importer.Import(From);
if (!ToOrErr)
return ToOrErr.takeError();
@@ -168,7 +208,7 @@ namespace clang {
}
template <typename T>
- Expected<T *> import(const T *From) {
+ auto import(const T *From) {
return import(const_cast<T *>(From));
}
@@ -178,30 +218,14 @@ namespace clang {
return Importer.Import(From);
}
- // Import an Optional<T> by importing the contained T, if any.
- template<typename T>
- Expected<Optional<T>> import(Optional<T> From) {
+ // Import an std::optional<T> by importing the contained T, if any.
+ template <typename T>
+ Expected<std::optional<T>> import(std::optional<T> From) {
if (!From)
- return Optional<T>();
+ return std::nullopt;
return import(*From);
}
- // Helper for chaining together multiple imports. If an error is detected,
- // subsequent imports will return default constructed nodes, so that failure
- // can be detected with a single conditional branch after a sequence of
- // imports.
- template <typename T> T importChecked(Error &Err, const T &From) {
- // Don't attempt to import nodes if we hit an error earlier.
- if (Err)
- return T{};
- Expected<T> MaybeVal = import(From);
- if (!MaybeVal) {
- Err = MaybeVal.takeError();
- return T{};
- }
- return *MaybeVal;
- }
-
ExplicitSpecifier importExplicitSpecifier(Error &Err,
ExplicitSpecifier ESpec);
@@ -219,8 +243,8 @@ namespace clang {
// then to the already imported Decl. Returns a bool value set to true if
// the `FromD` had been imported before.
template <typename ToDeclT, typename FromDeclT, typename... Args>
- LLVM_NODISCARD bool GetImportedOrCreateDecl(ToDeclT *&ToD, FromDeclT *FromD,
- Args &&... args) {
+ [[nodiscard]] bool GetImportedOrCreateDecl(ToDeclT *&ToD, FromDeclT *FromD,
+ Args &&...args) {
// There may be several overloads of ToDeclT::Create. We must make sure
// to call the one which would be chosen by the arguments, thus we use a
// wrapper for the overload set.
@@ -235,8 +259,8 @@ namespace clang {
// GetImportedOrCreateDecl<TypeAliasDecl>(ToTypedef, FromD, ...);
template <typename NewDeclT, typename ToDeclT, typename FromDeclT,
typename... Args>
- LLVM_NODISCARD bool GetImportedOrCreateDecl(ToDeclT *&ToD, FromDeclT *FromD,
- Args &&... args) {
+ [[nodiscard]] bool GetImportedOrCreateDecl(ToDeclT *&ToD, FromDeclT *FromD,
+ Args &&...args) {
CallOverloadedCreateFun<NewDeclT> OC;
return GetImportedOrCreateSpecialDecl(ToD, OC, FromD,
std::forward<Args>(args)...);
@@ -245,9 +269,9 @@ namespace clang {
// used, e.g. CXXRecordDecl::CreateLambda .
template <typename ToDeclT, typename CreateFunT, typename FromDeclT,
typename... Args>
- LLVM_NODISCARD bool
+ [[nodiscard]] bool
GetImportedOrCreateSpecialDecl(ToDeclT *&ToD, CreateFunT CreateFun,
- FromDeclT *FromD, Args &&... args) {
+ FromDeclT *FromD, Args &&...args) {
if (Importer.getImportDeclErrorIfAny(FromD)) {
ToD = nullptr;
return true; // Already imported but with error.
@@ -258,6 +282,7 @@ namespace clang {
ToD = CreateFun(std::forward<Args>(args)...);
// Keep track of imported Decls.
Importer.RegisterImportedDecl(FromD, ToD);
+ Importer.SharedState->markAsNewDecl(ToD);
InitializeImportedDecl(FromD, ToD);
return false; // A new Decl is created.
}
@@ -313,11 +338,8 @@ namespace clang {
auto *ToNamed = cast<NamedDecl>(ToD);
DeclContextLookupResult FromLookup =
FromDC->lookup(FromNamed->getDeclName());
- for (NamedDecl *ND : FromLookup)
- if (ND == FromNamed) {
- ToDC->makeDeclVisibleInContext(ToNamed);
- break;
- }
+ if (llvm::is_contained(FromLookup, FromNamed))
+ ToDC->makeDeclVisibleInContext(ToNamed);
}
}
}
@@ -346,53 +368,9 @@ namespace clang {
// Importing types
ExpectedType VisitType(const Type *T);
- ExpectedType VisitAtomicType(const AtomicType *T);
- ExpectedType VisitBuiltinType(const BuiltinType *T);
- ExpectedType VisitDecayedType(const DecayedType *T);
- ExpectedType VisitComplexType(const ComplexType *T);
- ExpectedType VisitPointerType(const PointerType *T);
- ExpectedType VisitBlockPointerType(const BlockPointerType *T);
- ExpectedType VisitLValueReferenceType(const LValueReferenceType *T);
- ExpectedType VisitRValueReferenceType(const RValueReferenceType *T);
- ExpectedType VisitMemberPointerType(const MemberPointerType *T);
- ExpectedType VisitConstantArrayType(const ConstantArrayType *T);
- ExpectedType VisitIncompleteArrayType(const IncompleteArrayType *T);
- ExpectedType VisitVariableArrayType(const VariableArrayType *T);
- ExpectedType VisitDependentSizedArrayType(const DependentSizedArrayType *T);
- // FIXME: DependentSizedExtVectorType
- ExpectedType VisitVectorType(const VectorType *T);
- ExpectedType VisitExtVectorType(const ExtVectorType *T);
- ExpectedType VisitFunctionNoProtoType(const FunctionNoProtoType *T);
- ExpectedType VisitFunctionProtoType(const FunctionProtoType *T);
- ExpectedType VisitUnresolvedUsingType(const UnresolvedUsingType *T);
- ExpectedType VisitParenType(const ParenType *T);
- ExpectedType VisitTypedefType(const TypedefType *T);
- ExpectedType VisitTypeOfExprType(const TypeOfExprType *T);
- // FIXME: DependentTypeOfExprType
- ExpectedType VisitTypeOfType(const TypeOfType *T);
- ExpectedType VisitDecltypeType(const DecltypeType *T);
- ExpectedType VisitUnaryTransformType(const UnaryTransformType *T);
- ExpectedType VisitAutoType(const AutoType *T);
- ExpectedType VisitDeducedTemplateSpecializationType(
- const DeducedTemplateSpecializationType *T);
- ExpectedType VisitInjectedClassNameType(const InjectedClassNameType *T);
- // FIXME: DependentDecltypeType
- ExpectedType VisitRecordType(const RecordType *T);
- ExpectedType VisitEnumType(const EnumType *T);
- ExpectedType VisitAttributedType(const AttributedType *T);
- ExpectedType VisitTemplateTypeParmType(const TemplateTypeParmType *T);
- ExpectedType VisitSubstTemplateTypeParmType(
- const SubstTemplateTypeParmType *T);
- ExpectedType VisitTemplateSpecializationType(
- const TemplateSpecializationType *T);
- ExpectedType VisitElaboratedType(const ElaboratedType *T);
- ExpectedType VisitDependentNameType(const DependentNameType *T);
- ExpectedType VisitPackExpansionType(const PackExpansionType *T);
- ExpectedType VisitDependentTemplateSpecializationType(
- const DependentTemplateSpecializationType *T);
- ExpectedType VisitObjCInterfaceType(const ObjCInterfaceType *T);
- ExpectedType VisitObjCObjectType(const ObjCObjectType *T);
- ExpectedType VisitObjCObjectPointerType(const ObjCObjectPointerType *T);
+#define TYPE(Class, Base) \
+ ExpectedType Visit##Class##Type(const Class##Type *T);
+#include "clang/AST/TypeNodes.inc"
// Importing declarations
Error ImportDeclParts(NamedDecl *D, DeclarationName &Name, NamedDecl *&ToD,
@@ -408,6 +386,7 @@ namespace clang {
Decl *From, DeclContext *&ToDC, DeclContext *&ToLexicalDC);
Error ImportImplicitMethods(const CXXRecordDecl *From, CXXRecordDecl *To);
+ Error ImportFieldDeclDefinition(const FieldDecl *From, const FieldDecl *To);
Expected<CXXCastPath> ImportCastPath(CastExpr *E);
Expected<APValue> ImportAPValue(const APValue &FromValue);
@@ -444,9 +423,8 @@ namespace clang {
Error ImportDefinition(
ObjCProtocolDecl *From, ObjCProtocolDecl *To,
ImportDefinitionKind Kind = IDK_Default);
- Error ImportTemplateArguments(
- const TemplateArgument *FromArgs, unsigned NumFromArgs,
- SmallVectorImpl<TemplateArgument> &ToArgs);
+ Error ImportTemplateArguments(ArrayRef<TemplateArgument> FromArgs,
+ SmallVectorImpl<TemplateArgument> &ToArgs);
Expected<TemplateArgument>
ImportTemplateArgument(const TemplateArgument &From);
@@ -475,21 +453,14 @@ namespace clang {
Error ImportDefaultArgOfParmVarDecl(const ParmVarDecl *FromParam,
ParmVarDecl *ToParam);
+ Expected<InheritedConstructor>
+ ImportInheritedConstructor(const InheritedConstructor &From);
+
template <typename T>
bool hasSameVisibilityContextAndLinkage(T *Found, T *From);
- bool IsStructuralMatch(Decl *From, Decl *To, bool Complain);
- bool IsStructuralMatch(RecordDecl *FromRecord, RecordDecl *ToRecord,
- bool Complain = true);
- bool IsStructuralMatch(VarDecl *FromVar, VarDecl *ToVar,
- bool Complain = true);
- bool IsStructuralMatch(EnumDecl *FromEnum, EnumDecl *ToRecord);
- bool IsStructuralMatch(EnumConstantDecl *FromEC, EnumConstantDecl *ToEC);
- bool IsStructuralMatch(FunctionTemplateDecl *From,
- FunctionTemplateDecl *To);
- bool IsStructuralMatch(FunctionDecl *From, FunctionDecl *To);
- bool IsStructuralMatch(ClassTemplateDecl *From, ClassTemplateDecl *To);
- bool IsStructuralMatch(VarTemplateDecl *From, VarTemplateDecl *To);
+ bool IsStructuralMatch(Decl *From, Decl *To, bool Complain = true,
+ bool IgnoreTemplateParmDepth = false);
ExpectedDecl VisitDecl(Decl *D);
ExpectedDecl VisitImportDecl(ImportDecl *D);
ExpectedDecl VisitEmptyDecl(EmptyDecl *D);
@@ -528,6 +499,7 @@ namespace clang {
ExpectedDecl VisitUsingDecl(UsingDecl *D);
ExpectedDecl VisitUsingShadowDecl(UsingShadowDecl *D);
ExpectedDecl VisitUsingDirectiveDecl(UsingDirectiveDecl *D);
+ ExpectedDecl VisitUsingPackDecl(UsingPackDecl *D);
ExpectedDecl ImportUsingShadowDecls(BaseUsingDecl *D, BaseUsingDecl *ToSI);
ExpectedDecl VisitUsingEnumDecl(UsingEnumDecl *D);
ExpectedDecl VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D);
@@ -597,6 +569,8 @@ namespace clang {
ExpectedStmt VisitSourceLocExpr(SourceLocExpr *E);
ExpectedStmt VisitVAArgExpr(VAArgExpr *E);
ExpectedStmt VisitChooseExpr(ChooseExpr *E);
+ ExpectedStmt VisitConvertVectorExpr(ConvertVectorExpr *E);
+ ExpectedStmt VisitShuffleVectorExpr(ShuffleVectorExpr *E);
ExpectedStmt VisitGNUNullExpr(GNUNullExpr *E);
ExpectedStmt VisitGenericSelectionExpr(GenericSelectionExpr *E);
ExpectedStmt VisitPredefinedExpr(PredefinedExpr *E);
@@ -622,6 +596,7 @@ namespace clang {
ExpectedStmt VisitBinaryOperator(BinaryOperator *E);
ExpectedStmt VisitConditionalOperator(ConditionalOperator *E);
ExpectedStmt VisitBinaryConditionalOperator(BinaryConditionalOperator *E);
+ ExpectedStmt VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E);
ExpectedStmt VisitOpaqueValueExpr(OpaqueValueExpr *E);
ExpectedStmt VisitArrayTypeTraitExpr(ArrayTypeTraitExpr *E);
ExpectedStmt VisitExpressionTraitExpr(ExpressionTraitExpr *E);
@@ -667,6 +642,22 @@ namespace clang {
ExpectedStmt VisitCXXTypeidExpr(CXXTypeidExpr *E);
ExpectedStmt VisitCXXFoldExpr(CXXFoldExpr *E);
+ // Helper for chaining together multiple imports. If an error is detected,
+ // subsequent imports will return default constructed nodes, so that failure
+ // can be detected with a single conditional branch after a sequence of
+ // imports.
+ template <typename T> T importChecked(Error &Err, const T &From) {
+ // Don't attempt to import nodes if we hit an error earlier.
+ if (Err)
+ return T{};
+ Expected<T> MaybeVal = import(From);
+ if (!MaybeVal) {
+ Err = MaybeVal.takeError();
+ return T{};
+ }
+ return *MaybeVal;
+ }
+
template<typename IIter, typename OIter>
Error ImportArrayChecked(IIter Ibegin, IIter Iend, OIter Obegin) {
using ItemT = std::remove_reference_t<decltype(*Obegin)>;
@@ -754,9 +745,8 @@ ASTNodeImporter::ImportFunctionTemplateWithTemplateArgsFromSpecialization(
return std::move(Err);
// Import template arguments.
- auto TemplArgs = FTSInfo->TemplateArguments->asArray();
- if (Error Err = ImportTemplateArguments(TemplArgs.data(), TemplArgs.size(),
- std::get<1>(Result)))
+ if (Error Err = ImportTemplateArguments(FTSInfo->TemplateArguments->asArray(),
+ std::get<1>(Result)))
return std::move(Err);
return Result;
@@ -803,7 +793,8 @@ ASTNodeImporter::import(const TemplateArgument &From) {
ExpectedType ToTypeOrErr = import(From.getAsType());
if (!ToTypeOrErr)
return ToTypeOrErr.takeError();
- return TemplateArgument(*ToTypeOrErr);
+ return TemplateArgument(*ToTypeOrErr, /*isNullPtr*/ false,
+ From.getIsDefaulted());
}
case TemplateArgument::Integral: {
@@ -820,14 +811,27 @@ ASTNodeImporter::import(const TemplateArgument &From) {
ExpectedType ToTypeOrErr = import(From.getParamTypeForDecl());
if (!ToTypeOrErr)
return ToTypeOrErr.takeError();
- return TemplateArgument(*ToOrErr, *ToTypeOrErr);
+ return TemplateArgument(dyn_cast<ValueDecl>((*ToOrErr)->getCanonicalDecl()),
+ *ToTypeOrErr, From.getIsDefaulted());
}
case TemplateArgument::NullPtr: {
ExpectedType ToTypeOrErr = import(From.getNullPtrType());
if (!ToTypeOrErr)
return ToTypeOrErr.takeError();
- return TemplateArgument(*ToTypeOrErr, /*isNullPtr*/true);
+ return TemplateArgument(*ToTypeOrErr, /*isNullPtr*/ true,
+ From.getIsDefaulted());
+ }
+
+ case TemplateArgument::StructuralValue: {
+ ExpectedType ToTypeOrErr = import(From.getStructuralValueType());
+ if (!ToTypeOrErr)
+ return ToTypeOrErr.takeError();
+ Expected<APValue> ToValueOrErr = import(From.getAsStructuralValue());
+ if (!ToValueOrErr)
+ return ToValueOrErr.takeError();
+ return TemplateArgument(Importer.getToContext(), *ToTypeOrErr,
+ *ToValueOrErr);
}
case TemplateArgument::Template: {
@@ -835,7 +839,7 @@ ASTNodeImporter::import(const TemplateArgument &From) {
if (!ToTemplateOrErr)
return ToTemplateOrErr.takeError();
- return TemplateArgument(*ToTemplateOrErr);
+ return TemplateArgument(*ToTemplateOrErr, From.getIsDefaulted());
}
case TemplateArgument::TemplateExpansion: {
@@ -844,25 +848,24 @@ ASTNodeImporter::import(const TemplateArgument &From) {
if (!ToTemplateOrErr)
return ToTemplateOrErr.takeError();
- return TemplateArgument(
- *ToTemplateOrErr, From.getNumTemplateExpansions());
+ return TemplateArgument(*ToTemplateOrErr, From.getNumTemplateExpansions(),
+ From.getIsDefaulted());
}
case TemplateArgument::Expression:
if (ExpectedExpr ToExpr = import(From.getAsExpr()))
- return TemplateArgument(*ToExpr);
+ return TemplateArgument(*ToExpr, From.getIsDefaulted());
else
return ToExpr.takeError();
case TemplateArgument::Pack: {
SmallVector<TemplateArgument, 2> ToPack;
ToPack.reserve(From.pack_size());
- if (Error Err = ImportTemplateArguments(
- From.pack_begin(), From.pack_size(), ToPack))
+ if (Error Err = ImportTemplateArguments(From.pack_elements(), ToPack))
return std::move(Err);
return TemplateArgument(
- llvm::makeArrayRef(ToPack).copy(Importer.getToContext()));
+ llvm::ArrayRef(ToPack).copy(Importer.getToContext()));
}
}
@@ -942,7 +945,8 @@ ASTNodeImporter::import(const Designator &D) {
if (!ToFieldLocOrErr)
return ToFieldLocOrErr.takeError();
- return Designator(ToFieldName, *ToDotLocOrErr, *ToFieldLocOrErr);
+ return DesignatedInitExpr::Designator::CreateFieldDesignator(
+ ToFieldName, *ToDotLocOrErr, *ToFieldLocOrErr);
}
ExpectedSLoc ToLBracketLocOrErr = import(D.getLBracketLoc());
@@ -954,22 +958,50 @@ ASTNodeImporter::import(const Designator &D) {
return ToRBracketLocOrErr.takeError();
if (D.isArrayDesignator())
- return Designator(D.getFirstExprIndex(),
- *ToLBracketLocOrErr, *ToRBracketLocOrErr);
+ return Designator::CreateArrayDesignator(D.getArrayIndex(),
+ *ToLBracketLocOrErr,
+ *ToRBracketLocOrErr);
ExpectedSLoc ToEllipsisLocOrErr = import(D.getEllipsisLoc());
if (!ToEllipsisLocOrErr)
return ToEllipsisLocOrErr.takeError();
assert(D.isArrayRangeDesignator());
- return Designator(
- D.getFirstExprIndex(), *ToLBracketLocOrErr, *ToEllipsisLocOrErr,
+ return Designator::CreateArrayRangeDesignator(
+ D.getArrayIndex(), *ToLBracketLocOrErr, *ToEllipsisLocOrErr,
*ToRBracketLocOrErr);
}
template <>
+Expected<ConceptReference *> ASTNodeImporter::import(ConceptReference *From) {
+ Error Err = Error::success();
+ auto ToNNS = importChecked(Err, From->getNestedNameSpecifierLoc());
+ auto ToTemplateKWLoc = importChecked(Err, From->getTemplateKWLoc());
+ auto ToConceptNameLoc =
+ importChecked(Err, From->getConceptNameInfo().getLoc());
+ auto ToConceptName = importChecked(Err, From->getConceptNameInfo().getName());
+ auto ToFoundDecl = importChecked(Err, From->getFoundDecl());
+ auto ToNamedConcept = importChecked(Err, From->getNamedConcept());
+ if (Err)
+ return std::move(Err);
+ TemplateArgumentListInfo ToTAInfo;
+ const auto *ASTTemplateArgs = From->getTemplateArgsAsWritten();
+ if (ASTTemplateArgs)
+ if (Error Err = ImportTemplateArgumentListInfo(*ASTTemplateArgs, ToTAInfo))
+ return std::move(Err);
+ auto *ConceptRef = ConceptReference::Create(
+ Importer.getToContext(), ToNNS, ToTemplateKWLoc,
+ DeclarationNameInfo(ToConceptName, ToConceptNameLoc), ToFoundDecl,
+ ToNamedConcept,
+ ASTTemplateArgs ? ASTTemplateArgumentListInfo::Create(
+ Importer.getToContext(), ToTAInfo)
+ : nullptr);
+ return ConceptRef;
+}
+
+template <>
Expected<LambdaCapture> ASTNodeImporter::import(const LambdaCapture &From) {
- VarDecl *Var = nullptr;
+ ValueDecl *Var = nullptr;
if (From.capturesVariable()) {
if (auto VarOrErr = import(From.getCapturedVar()))
Var = *VarOrErr;
@@ -1029,7 +1061,7 @@ using namespace clang;
ExpectedType ASTNodeImporter::VisitType(const Type *T) {
Importer.FromDiag(SourceLocation(), diag::err_unsupported_ast_node)
<< T->getTypeClassName();
- return make_error<ImportError>(ImportError::UnsupportedConstruct);
+ return make_error<ASTImportError>(ASTImportError::UnsupportedConstruct);
}
ExpectedType ASTNodeImporter::VisitAtomicType(const AtomicType *T){
@@ -1062,6 +1094,10 @@ ExpectedType ASTNodeImporter::VisitBuiltinType(const BuiltinType *T) {
case BuiltinType::Id: \
return Importer.getToContext().SingletonId;
#include "clang/Basic/RISCVVTypes.def"
+#define WASM_TYPE(Name, Id, SingletonId) \
+ case BuiltinType::Id: \
+ return Importer.getToContext().SingletonId;
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
#define SHARED_SINGLETON_TYPE(Expansion)
#define BUILTIN_TYPE(Id, SingletonId) \
case BuiltinType::Id: return Importer.getToContext().SingletonId;
@@ -1161,12 +1197,12 @@ ASTNodeImporter::VisitMemberPointerType(const MemberPointerType *T) {
if (!ToPointeeTypeOrErr)
return ToPointeeTypeOrErr.takeError();
- ExpectedType ClassTypeOrErr = import(QualType(T->getClass(), 0));
+ ExpectedTypePtr ClassTypeOrErr = import(T->getClass());
if (!ClassTypeOrErr)
return ClassTypeOrErr.takeError();
- return Importer.getToContext().getMemberPointerType(
- *ToPointeeTypeOrErr, (*ClassTypeOrErr).getTypePtr());
+ return Importer.getToContext().getMemberPointerType(*ToPointeeTypeOrErr,
+ *ClassTypeOrErr);
}
ExpectedType
@@ -1222,6 +1258,18 @@ ExpectedType ASTNodeImporter::VisitDependentSizedArrayType(
T->getIndexTypeCVRQualifiers(), ToBracketsRange);
}
+ExpectedType ASTNodeImporter::VisitDependentSizedExtVectorType(
+ const DependentSizedExtVectorType *T) {
+ Error Err = Error::success();
+ QualType ToElementType = importChecked(Err, T->getElementType());
+ Expr *ToSizeExpr = importChecked(Err, T->getSizeExpr());
+ SourceLocation ToAttrLoc = importChecked(Err, T->getAttributeLoc());
+ if (Err)
+ return std::move(Err);
+ return Importer.getToContext().getDependentSizedExtVectorType(
+ ToElementType, ToSizeExpr, ToAttrLoc);
+}
+
ExpectedType ASTNodeImporter::VisitVectorType(const VectorType *T) {
ExpectedType ToElementTypeOrErr = import(T->getElementType());
if (!ToElementTypeOrErr)
@@ -1326,23 +1374,41 @@ ExpectedType ASTNodeImporter::VisitTypedefType(const TypedefType *T) {
if (!ToDeclOrErr)
return ToDeclOrErr.takeError();
- return Importer.getToContext().getTypeDeclType(*ToDeclOrErr);
+ TypedefNameDecl *ToDecl = *ToDeclOrErr;
+ if (ToDecl->getTypeForDecl())
+ return QualType(ToDecl->getTypeForDecl(), 0);
+
+ ExpectedType ToUnderlyingTypeOrErr = import(T->desugar());
+ if (!ToUnderlyingTypeOrErr)
+ return ToUnderlyingTypeOrErr.takeError();
+
+ return Importer.getToContext().getTypedefType(ToDecl, *ToUnderlyingTypeOrErr);
}
ExpectedType ASTNodeImporter::VisitTypeOfExprType(const TypeOfExprType *T) {
ExpectedExpr ToExprOrErr = import(T->getUnderlyingExpr());
if (!ToExprOrErr)
return ToExprOrErr.takeError();
-
- return Importer.getToContext().getTypeOfExprType(*ToExprOrErr);
+ return Importer.getToContext().getTypeOfExprType(*ToExprOrErr, T->getKind());
}
ExpectedType ASTNodeImporter::VisitTypeOfType(const TypeOfType *T) {
- ExpectedType ToUnderlyingTypeOrErr = import(T->getUnderlyingType());
+ ExpectedType ToUnderlyingTypeOrErr = import(T->getUnmodifiedType());
if (!ToUnderlyingTypeOrErr)
return ToUnderlyingTypeOrErr.takeError();
+ return Importer.getToContext().getTypeOfType(*ToUnderlyingTypeOrErr,
+ T->getKind());
+}
- return Importer.getToContext().getTypeOfType(*ToUnderlyingTypeOrErr);
+ExpectedType ASTNodeImporter::VisitUsingType(const UsingType *T) {
+ Expected<UsingShadowDecl *> FoundOrErr = import(T->getFoundDecl());
+ if (!FoundOrErr)
+ return FoundOrErr.takeError();
+ Expected<QualType> UnderlyingOrErr = import(T->getUnderlyingType());
+ if (!UnderlyingOrErr)
+ return UnderlyingOrErr.takeError();
+
+ return Importer.getToContext().getUsingType(*FoundOrErr, *UnderlyingOrErr);
}
ExpectedType ASTNodeImporter::VisitDecltypeType(const DecltypeType *T) {
@@ -1384,9 +1450,7 @@ ExpectedType ASTNodeImporter::VisitAutoType(const AutoType *T) {
return ToTypeConstraintConcept.takeError();
SmallVector<TemplateArgument, 2> ToTemplateArgs;
- ArrayRef<TemplateArgument> FromTemplateArgs = T->getTypeConstraintArguments();
- if (Error Err = ImportTemplateArguments(FromTemplateArgs.data(),
- FromTemplateArgs.size(),
+ if (Error Err = ImportTemplateArguments(T->getTypeConstraintArguments(),
ToTemplateArgs))
return std::move(Err);
@@ -1416,20 +1480,11 @@ ExpectedType ASTNodeImporter::VisitInjectedClassNameType(
if (!ToDeclOrErr)
return ToDeclOrErr.takeError();
- ExpectedType ToInjTypeOrErr = import(T->getInjectedSpecializationType());
- if (!ToInjTypeOrErr)
- return ToInjTypeOrErr.takeError();
-
- // FIXME: ASTContext::getInjectedClassNameType is not suitable for AST reading
- // See comments in InjectedClassNameType definition for details
- // return Importer.getToContext().getInjectedClassNameType(D, InjType);
- enum {
- TypeAlignmentInBits = 4,
- TypeAlignment = 1 << TypeAlignmentInBits
- };
-
- return QualType(new (Importer.getToContext(), TypeAlignment)
- InjectedClassNameType(*ToDeclOrErr, *ToInjTypeOrErr), 0);
+ // The InjectedClassNameType is created in VisitRecordDecl when the
+ // T->getDecl() is imported. Here we can return the existing type.
+ const Type *Ty = (*ToDeclOrErr)->getTypeForDecl();
+ assert(Ty && isa<InjectedClassNameType>(Ty));
+ return QualType(Ty, 0);
}
ExpectedType ASTNodeImporter::VisitRecordType(const RecordType *T) {
@@ -1472,18 +1527,31 @@ ExpectedType ASTNodeImporter::VisitTemplateTypeParmType(
ExpectedType ASTNodeImporter::VisitSubstTemplateTypeParmType(
const SubstTemplateTypeParmType *T) {
- ExpectedType ReplacedOrErr = import(QualType(T->getReplacedParameter(), 0));
+ Expected<Decl *> ReplacedOrErr = import(T->getAssociatedDecl());
if (!ReplacedOrErr)
return ReplacedOrErr.takeError();
- const TemplateTypeParmType *Replaced =
- cast<TemplateTypeParmType>((*ReplacedOrErr).getTypePtr());
ExpectedType ToReplacementTypeOrErr = import(T->getReplacementType());
if (!ToReplacementTypeOrErr)
return ToReplacementTypeOrErr.takeError();
return Importer.getToContext().getSubstTemplateTypeParmType(
- Replaced, (*ToReplacementTypeOrErr).getCanonicalType());
+ *ToReplacementTypeOrErr, *ReplacedOrErr, T->getIndex(),
+ T->getPackIndex());
+}
+
+ExpectedType ASTNodeImporter::VisitSubstTemplateTypeParmPackType(
+ const SubstTemplateTypeParmPackType *T) {
+ Expected<Decl *> ReplacedOrErr = import(T->getAssociatedDecl());
+ if (!ReplacedOrErr)
+ return ReplacedOrErr.takeError();
+
+ Expected<TemplateArgument> ToArgumentPack = import(T->getArgumentPack());
+ if (!ToArgumentPack)
+ return ToArgumentPack.takeError();
+
+ return Importer.getToContext().getSubstTemplateTypeParmPackType(
+ *ReplacedOrErr, T->getIndex(), T->getFinal(), *ToArgumentPack);
}
ExpectedType ASTNodeImporter::VisitTemplateSpecializationType(
@@ -1493,12 +1561,12 @@ ExpectedType ASTNodeImporter::VisitTemplateSpecializationType(
return ToTemplateOrErr.takeError();
SmallVector<TemplateArgument, 2> ToTemplateArgs;
- if (Error Err = ImportTemplateArguments(
- T->getArgs(), T->getNumArgs(), ToTemplateArgs))
+ if (Error Err =
+ ImportTemplateArguments(T->template_arguments(), ToTemplateArgs))
return std::move(Err);
QualType ToCanonType;
- if (!QualType(T, 0).isCanonical()) {
+ if (!T->isCanonicalUnqualified()) {
QualType FromCanonType
= Importer.getFromContext().getCanonicalType(QualType(T, 0));
if (ExpectedType TyOrErr = import(FromCanonType))
@@ -1551,9 +1619,8 @@ ExpectedType ASTNodeImporter::VisitDependentTemplateSpecializationType(
IdentifierInfo *ToName = Importer.Import(T->getIdentifier());
SmallVector<TemplateArgument, 2> ToPack;
- ToPack.reserve(T->getNumArgs());
- if (Error Err = ImportTemplateArguments(
- T->getArgs(), T->getNumArgs(), ToPack))
+ ToPack.reserve(T->template_arguments().size());
+ if (Error Err = ImportTemplateArguments(T->template_arguments(), ToPack))
return std::move(Err);
return Importer.getToContext().getDependentTemplateSpecializationType(
@@ -1626,6 +1693,134 @@ ASTNodeImporter::VisitObjCObjectPointerType(const ObjCObjectPointerType *T) {
return Importer.getToContext().getObjCObjectPointerType(*ToPointeeTypeOrErr);
}
+ExpectedType
+ASTNodeImporter::VisitMacroQualifiedType(const MacroQualifiedType *T) {
+ ExpectedType ToUnderlyingTypeOrErr = import(T->getUnderlyingType());
+ if (!ToUnderlyingTypeOrErr)
+ return ToUnderlyingTypeOrErr.takeError();
+
+ IdentifierInfo *ToIdentifier = Importer.Import(T->getMacroIdentifier());
+ return Importer.getToContext().getMacroQualifiedType(*ToUnderlyingTypeOrErr,
+ ToIdentifier);
+}
+
+ExpectedType clang::ASTNodeImporter::VisitAdjustedType(const AdjustedType *T) {
+ Error Err = Error::success();
+ QualType ToOriginalType = importChecked(Err, T->getOriginalType());
+ QualType ToAdjustedType = importChecked(Err, T->getAdjustedType());
+ if (Err)
+ return std::move(Err);
+
+ return Importer.getToContext().getAdjustedType(ToOriginalType,
+ ToAdjustedType);
+}
+
+ExpectedType clang::ASTNodeImporter::VisitBitIntType(const BitIntType *T) {
+ return Importer.getToContext().getBitIntType(T->isUnsigned(),
+ T->getNumBits());
+}
+
+ExpectedType clang::ASTNodeImporter::VisitBTFTagAttributedType(
+ const clang::BTFTagAttributedType *T) {
+ Error Err = Error::success();
+ const BTFTypeTagAttr *ToBTFAttr = importChecked(Err, T->getAttr());
+ QualType ToWrappedType = importChecked(Err, T->getWrappedType());
+ if (Err)
+ return std::move(Err);
+
+ return Importer.getToContext().getBTFTagAttributedType(ToBTFAttr,
+ ToWrappedType);
+}
+
+ExpectedType clang::ASTNodeImporter::VisitConstantMatrixType(
+ const clang::ConstantMatrixType *T) {
+ ExpectedType ToElementTypeOrErr = import(T->getElementType());
+ if (!ToElementTypeOrErr)
+ return ToElementTypeOrErr.takeError();
+
+ return Importer.getToContext().getConstantMatrixType(
+ *ToElementTypeOrErr, T->getNumRows(), T->getNumColumns());
+}
+
+ExpectedType clang::ASTNodeImporter::VisitDependentAddressSpaceType(
+ const clang::DependentAddressSpaceType *T) {
+ Error Err = Error::success();
+ QualType ToPointeeType = importChecked(Err, T->getPointeeType());
+ Expr *ToAddrSpaceExpr = importChecked(Err, T->getAddrSpaceExpr());
+ SourceLocation ToAttrLoc = importChecked(Err, T->getAttributeLoc());
+ if (Err)
+ return std::move(Err);
+
+ return Importer.getToContext().getDependentAddressSpaceType(
+ ToPointeeType, ToAddrSpaceExpr, ToAttrLoc);
+}
+
+ExpectedType clang::ASTNodeImporter::VisitDependentBitIntType(
+ const clang::DependentBitIntType *T) {
+ ExpectedExpr ToNumBitsExprOrErr = import(T->getNumBitsExpr());
+ if (!ToNumBitsExprOrErr)
+ return ToNumBitsExprOrErr.takeError();
+ return Importer.getToContext().getDependentBitIntType(T->isUnsigned(),
+ *ToNumBitsExprOrErr);
+}
+
+ExpectedType clang::ASTNodeImporter::VisitDependentSizedMatrixType(
+ const clang::DependentSizedMatrixType *T) {
+ Error Err = Error::success();
+ QualType ToElementType = importChecked(Err, T->getElementType());
+ Expr *ToRowExpr = importChecked(Err, T->getRowExpr());
+ Expr *ToColumnExpr = importChecked(Err, T->getColumnExpr());
+ SourceLocation ToAttrLoc = importChecked(Err, T->getAttributeLoc());
+ if (Err)
+ return std::move(Err);
+
+ return Importer.getToContext().getDependentSizedMatrixType(
+ ToElementType, ToRowExpr, ToColumnExpr, ToAttrLoc);
+}
+
+ExpectedType clang::ASTNodeImporter::VisitDependentVectorType(
+ const clang::DependentVectorType *T) {
+ Error Err = Error::success();
+ QualType ToElementType = importChecked(Err, T->getElementType());
+ Expr *ToSizeExpr = importChecked(Err, T->getSizeExpr());
+ SourceLocation ToAttrLoc = importChecked(Err, T->getAttributeLoc());
+ if (Err)
+ return std::move(Err);
+
+ return Importer.getToContext().getDependentVectorType(
+ ToElementType, ToSizeExpr, ToAttrLoc, T->getVectorKind());
+}
+
+ExpectedType clang::ASTNodeImporter::VisitObjCTypeParamType(
+ const clang::ObjCTypeParamType *T) {
+ Expected<ObjCTypeParamDecl *> ToDeclOrErr = import(T->getDecl());
+ if (!ToDeclOrErr)
+ return ToDeclOrErr.takeError();
+
+ SmallVector<ObjCProtocolDecl *, 4> ToProtocols;
+ for (ObjCProtocolDecl *FromProtocol : T->getProtocols()) {
+ Expected<ObjCProtocolDecl *> ToProtocolOrErr = import(FromProtocol);
+ if (!ToProtocolOrErr)
+ return ToProtocolOrErr.takeError();
+ ToProtocols.push_back(*ToProtocolOrErr);
+ }
+
+ return Importer.getToContext().getObjCTypeParamType(*ToDeclOrErr,
+ ToProtocols);
+}
+
+ExpectedType clang::ASTNodeImporter::VisitPipeType(const clang::PipeType *T) {
+ ExpectedType ToElementTypeOrErr = import(T->getElementType());
+ if (!ToElementTypeOrErr)
+ return ToElementTypeOrErr.takeError();
+
+ ASTContext &ToCtx = Importer.getToContext();
+ if (T->isReadOnly())
+ return ToCtx.getReadPipeType(*ToElementTypeOrErr);
+ else
+ return ToCtx.getWritePipeType(*ToElementTypeOrErr);
+}
+
//----------------------------------------------------------------------------
// Import Declarations
//----------------------------------------------------------------------------
@@ -1655,7 +1850,7 @@ Error ASTNodeImporter::ImportDeclParts(
if (RT && RT->getDecl() == D) {
Importer.FromDiag(D->getLocation(), diag::err_unsupported_ast_node)
<< D->getDeclKindName();
- return make_error<ImportError>(ImportError::UnsupportedConstruct);
+ return make_error<ASTImportError>(ASTImportError::UnsupportedConstruct);
}
}
}
@@ -1788,60 +1983,35 @@ ASTNodeImporter::ImportDeclContext(DeclContext *FromDC, bool ForceImport) {
// because there is an ODR error with two typedefs. As another example,
// the client may allow EnumConstantDecls with same names but with
// different values in two distinct translation units.
- bool AccumulateChildErrors = isa<TagDecl>(FromDC);
+ ChildErrorHandlingStrategy HandleChildErrors(FromDC);
+
+ auto MightNeedReordering = [](const Decl *D) {
+ return isa<FieldDecl>(D) || isa<IndirectFieldDecl>(D) || isa<FriendDecl>(D);
+ };
+ // Import everything that might need reordering first.
Error ChildErrors = Error::success();
for (auto *From : FromDC->decls()) {
+ if (!MightNeedReordering(From))
+ continue;
+
ExpectedDecl ImportedOrErr = import(From);
// If we are in the process of ImportDefinition(...) for a RecordDecl we
// want to make sure that we are also completing each FieldDecl. There
// are currently cases where this does not happen and this is correctness
// fix since operations such as code generation will expect this to be so.
- if (ImportedOrErr) {
- FieldDecl *FieldFrom = dyn_cast_or_null<FieldDecl>(From);
- Decl *ImportedDecl = *ImportedOrErr;
- FieldDecl *FieldTo = dyn_cast_or_null<FieldDecl>(ImportedDecl);
- if (FieldFrom && FieldTo) {
- RecordDecl *FromRecordDecl = nullptr;
- RecordDecl *ToRecordDecl = nullptr;
- // If we have a field that is an ArrayType we need to check if the array
- // element is a RecordDecl and if so we need to import the defintion.
- if (FieldFrom->getType()->isArrayType()) {
- // getBaseElementTypeUnsafe(...) handles multi-dimensonal arrays for us.
- FromRecordDecl = FieldFrom->getType()->getBaseElementTypeUnsafe()->getAsRecordDecl();
- ToRecordDecl = FieldTo->getType()->getBaseElementTypeUnsafe()->getAsRecordDecl();
- }
-
- if (!FromRecordDecl || !ToRecordDecl) {
- const RecordType *RecordFrom =
- FieldFrom->getType()->getAs<RecordType>();
- const RecordType *RecordTo = FieldTo->getType()->getAs<RecordType>();
-
- if (RecordFrom && RecordTo) {
- FromRecordDecl = RecordFrom->getDecl();
- ToRecordDecl = RecordTo->getDecl();
- }
- }
-
- if (FromRecordDecl && ToRecordDecl) {
- if (FromRecordDecl->isCompleteDefinition() &&
- !ToRecordDecl->isCompleteDefinition()) {
- Error Err = ImportDefinition(FromRecordDecl, ToRecordDecl);
-
- if (Err && AccumulateChildErrors)
- ChildErrors = joinErrors(std::move(ChildErrors), std::move(Err));
- else
- consumeError(std::move(Err));
- }
- }
- }
- } else {
- if (AccumulateChildErrors)
- ChildErrors =
- joinErrors(std::move(ChildErrors), ImportedOrErr.takeError());
- else
- consumeError(ImportedOrErr.takeError());
+ if (!ImportedOrErr) {
+ HandleChildErrors.handleChildImportResult(ChildErrors,
+ ImportedOrErr.takeError());
+ continue;
+ }
+ FieldDecl *FieldFrom = dyn_cast_or_null<FieldDecl>(From);
+ Decl *ImportedDecl = *ImportedOrErr;
+ FieldDecl *FieldTo = dyn_cast_or_null<FieldDecl>(ImportedDecl);
+ if (FieldFrom && FieldTo) {
+ Error Err = ImportFieldDeclDefinition(FieldFrom, FieldTo);
+ HandleChildErrors.handleChildImportResult(ChildErrors, std::move(Err));
}
}
@@ -1856,7 +2026,7 @@ ASTNodeImporter::ImportDeclContext(DeclContext *FromDC, bool ForceImport) {
// During the import of `a` we import first the dependencies in sequence,
// thus the order would be `c`, `b`, `a`. We will get the normal order by
// first removing the already imported members and then adding them in the
- // order as they apper in the "from" context.
+ // order as they appear in the "from" context.
//
// Keeping field order is vital because it determines structure layout.
//
@@ -1868,9 +2038,6 @@ ASTNodeImporter::ImportDeclContext(DeclContext *FromDC, bool ForceImport) {
// interface in LLDB is implemented by the means of the ASTImporter. However,
// calling an import at this point would result in an uncontrolled import, we
// must avoid that.
- const auto *FromRD = dyn_cast<RecordDecl>(FromDC);
- if (!FromRD)
- return ChildErrors;
auto ToDCOrErr = Importer.ImportContext(FromDC);
if (!ToDCOrErr) {
@@ -1878,15 +2045,17 @@ ASTNodeImporter::ImportDeclContext(DeclContext *FromDC, bool ForceImport) {
return ToDCOrErr.takeError();
}
- DeclContext *ToDC = *ToDCOrErr;
- // Remove all declarations, which may be in wrong order in the
- // lexical DeclContext and then add them in the proper order.
- for (auto *D : FromRD->decls()) {
- if (isa<FieldDecl>(D) || isa<IndirectFieldDecl>(D) || isa<FriendDecl>(D)) {
+ if (const auto *FromRD = dyn_cast<RecordDecl>(FromDC)) {
+ DeclContext *ToDC = *ToDCOrErr;
+ // Remove all declarations, which may be in wrong order in the
+ // lexical DeclContext and then add them in the proper order.
+ for (auto *D : FromRD->decls()) {
+ if (!MightNeedReordering(D))
+ continue;
+
assert(D && "DC contains a null decl");
- Decl *ToD = Importer.GetAlreadyImportedOrNull(D);
- // Remove only the decls which we successfully imported.
- if (ToD) {
+ if (Decl *ToD = Importer.GetAlreadyImportedOrNull(D)) {
+ // Remove only the decls which we successfully imported.
assert(ToDC == ToD->getLexicalDeclContext() && ToDC->containsDecl(ToD));
// Remove the decl from its wrong place in the linked list.
ToDC->removeDecl(ToD);
@@ -1898,9 +2067,53 @@ ASTNodeImporter::ImportDeclContext(DeclContext *FromDC, bool ForceImport) {
}
}
+ // Import everything else.
+ for (auto *From : FromDC->decls()) {
+ if (MightNeedReordering(From))
+ continue;
+
+ ExpectedDecl ImportedOrErr = import(From);
+ if (!ImportedOrErr)
+ HandleChildErrors.handleChildImportResult(ChildErrors,
+ ImportedOrErr.takeError());
+ }
+
return ChildErrors;
}
+Error ASTNodeImporter::ImportFieldDeclDefinition(const FieldDecl *From,
+ const FieldDecl *To) {
+ RecordDecl *FromRecordDecl = nullptr;
+ RecordDecl *ToRecordDecl = nullptr;
+ // If we have a field that is an ArrayType we need to check if the array
+ // element is a RecordDecl and if so we need to import the definition.
+ QualType FromType = From->getType();
+ QualType ToType = To->getType();
+ if (FromType->isArrayType()) {
+ // getBaseElementTypeUnsafe(...) handles multi-dimensonal arrays for us.
+ FromRecordDecl = FromType->getBaseElementTypeUnsafe()->getAsRecordDecl();
+ ToRecordDecl = ToType->getBaseElementTypeUnsafe()->getAsRecordDecl();
+ }
+
+ if (!FromRecordDecl || !ToRecordDecl) {
+ const RecordType *RecordFrom = FromType->getAs<RecordType>();
+ const RecordType *RecordTo = ToType->getAs<RecordType>();
+
+ if (RecordFrom && RecordTo) {
+ FromRecordDecl = RecordFrom->getDecl();
+ ToRecordDecl = RecordTo->getDecl();
+ }
+ }
+
+ if (FromRecordDecl && ToRecordDecl) {
+ if (FromRecordDecl->isCompleteDefinition() &&
+ !ToRecordDecl->isCompleteDefinition())
+ return ImportDefinition(FromRecordDecl, ToRecordDecl);
+ }
+
+ return Error::success();
+}
+
Error ASTNodeImporter::ImportDeclContext(
Decl *FromD, DeclContext *&ToDC, DeclContext *&ToLexicalDC) {
auto ToDCOrErr = Importer.ImportContext(FromD->getDeclContext());
@@ -1991,6 +2204,14 @@ Error ASTNodeImporter::ImportDefinition(
}
To->startDefinition();
+ // Set the definition to complete even if it is really not complete during
+ // import. Some AST constructs (expressions) require the record layout
+ // to be calculated (see 'clang::computeDependence') at the time they are
+ // constructed. Import of such AST node is possible during import of the
+ // same record, there is no way to have a completely defined record (all
+ // fields imported) at that time without multiple AST import passes.
+ if (!Importer.isMinimalImport())
+ To->setCompleteDefinition(true);
// Complete the definition even if error is returned.
// The RecordDecl may be already part of the AST so it is better to
// have it in complete state even if something is wrong with it.
@@ -2055,9 +2276,10 @@ Error ASTNodeImporter::ImportDefinition(
ToCXX->setBases(Bases.data(), Bases.size());
}
- if (shouldForceImportDeclContext(Kind))
+ if (shouldForceImportDeclContext(Kind)) {
if (Error Err = ImportDeclContext(From, /*ForceImport=*/true))
return Err;
+ }
return Error::success();
}
@@ -2121,10 +2343,10 @@ Error ASTNodeImporter::ImportDefinition(
}
Error ASTNodeImporter::ImportTemplateArguments(
- const TemplateArgument *FromArgs, unsigned NumFromArgs,
+ ArrayRef<TemplateArgument> FromArgs,
SmallVectorImpl<TemplateArgument> &ToArgs) {
- for (unsigned I = 0; I != NumFromArgs; ++I) {
- if (auto ToOrErr = import(FromArgs[I]))
+ for (const auto &Arg : FromArgs) {
+ if (auto ToOrErr = import(Arg))
ToArgs.push_back(*ToOrErr);
else
return ToOrErr.takeError();
@@ -2157,110 +2379,33 @@ getStructuralEquivalenceKind(const ASTImporter &Importer) {
: StructuralEquivalenceKind::Default;
}
-bool ASTNodeImporter::IsStructuralMatch(Decl *From, Decl *To, bool Complain) {
- StructuralEquivalenceContext Ctx(
- Importer.getFromContext(), Importer.getToContext(),
- Importer.getNonEquivalentDecls(), getStructuralEquivalenceKind(Importer),
- false, Complain);
- return Ctx.IsEquivalent(From, To);
-}
-
-bool ASTNodeImporter::IsStructuralMatch(RecordDecl *FromRecord,
- RecordDecl *ToRecord, bool Complain) {
+bool ASTNodeImporter::IsStructuralMatch(Decl *From, Decl *To, bool Complain,
+ bool IgnoreTemplateParmDepth) {
// Eliminate a potential failure point where we attempt to re-import
// something we're trying to import while completing ToRecord.
- Decl *ToOrigin = Importer.GetOriginalDecl(ToRecord);
+ Decl *ToOrigin = Importer.GetOriginalDecl(To);
if (ToOrigin) {
- auto *ToOriginRecord = dyn_cast<RecordDecl>(ToOrigin);
- if (ToOriginRecord)
- ToRecord = ToOriginRecord;
+ To = ToOrigin;
}
- StructuralEquivalenceContext Ctx(Importer.getFromContext(),
- ToRecord->getASTContext(),
- Importer.getNonEquivalentDecls(),
- getStructuralEquivalenceKind(Importer),
- false, Complain);
- return Ctx.IsEquivalent(FromRecord, ToRecord);
-}
-
-bool ASTNodeImporter::IsStructuralMatch(VarDecl *FromVar, VarDecl *ToVar,
- bool Complain) {
- StructuralEquivalenceContext Ctx(
- Importer.getFromContext(), Importer.getToContext(),
- Importer.getNonEquivalentDecls(), getStructuralEquivalenceKind(Importer),
- false, Complain);
- return Ctx.IsEquivalent(FromVar, ToVar);
-}
-
-bool ASTNodeImporter::IsStructuralMatch(EnumDecl *FromEnum, EnumDecl *ToEnum) {
- // Eliminate a potential failure point where we attempt to re-import
- // something we're trying to import while completing ToEnum.
- if (Decl *ToOrigin = Importer.GetOriginalDecl(ToEnum))
- if (auto *ToOriginEnum = dyn_cast<EnumDecl>(ToOrigin))
- ToEnum = ToOriginEnum;
-
- StructuralEquivalenceContext Ctx(
- Importer.getFromContext(), Importer.getToContext(),
- Importer.getNonEquivalentDecls(), getStructuralEquivalenceKind(Importer));
- return Ctx.IsEquivalent(FromEnum, ToEnum);
-}
-
-bool ASTNodeImporter::IsStructuralMatch(FunctionTemplateDecl *From,
- FunctionTemplateDecl *To) {
- StructuralEquivalenceContext Ctx(
- Importer.getFromContext(), Importer.getToContext(),
- Importer.getNonEquivalentDecls(), getStructuralEquivalenceKind(Importer),
- false, false);
- return Ctx.IsEquivalent(From, To);
-}
-
-bool ASTNodeImporter::IsStructuralMatch(FunctionDecl *From, FunctionDecl *To) {
StructuralEquivalenceContext Ctx(
Importer.getFromContext(), Importer.getToContext(),
Importer.getNonEquivalentDecls(), getStructuralEquivalenceKind(Importer),
- false, false);
- return Ctx.IsEquivalent(From, To);
-}
-
-bool ASTNodeImporter::IsStructuralMatch(EnumConstantDecl *FromEC,
- EnumConstantDecl *ToEC) {
- const llvm::APSInt &FromVal = FromEC->getInitVal();
- const llvm::APSInt &ToVal = ToEC->getInitVal();
-
- return FromVal.isSigned() == ToVal.isSigned() &&
- FromVal.getBitWidth() == ToVal.getBitWidth() &&
- FromVal == ToVal;
-}
-
-bool ASTNodeImporter::IsStructuralMatch(ClassTemplateDecl *From,
- ClassTemplateDecl *To) {
- StructuralEquivalenceContext Ctx(Importer.getFromContext(),
- Importer.getToContext(),
- Importer.getNonEquivalentDecls(),
- getStructuralEquivalenceKind(Importer));
- return Ctx.IsEquivalent(From, To);
-}
-
-bool ASTNodeImporter::IsStructuralMatch(VarTemplateDecl *From,
- VarTemplateDecl *To) {
- StructuralEquivalenceContext Ctx(Importer.getFromContext(),
- Importer.getToContext(),
- Importer.getNonEquivalentDecls(),
- getStructuralEquivalenceKind(Importer));
+ /*StrictTypeSpelling=*/false, Complain, /*ErrorOnTagTypeMismatch=*/false,
+ IgnoreTemplateParmDepth);
return Ctx.IsEquivalent(From, To);
}
ExpectedDecl ASTNodeImporter::VisitDecl(Decl *D) {
Importer.FromDiag(D->getLocation(), diag::err_unsupported_ast_node)
<< D->getDeclKindName();
- return make_error<ImportError>(ImportError::UnsupportedConstruct);
+ return make_error<ASTImportError>(ASTImportError::UnsupportedConstruct);
}
ExpectedDecl ASTNodeImporter::VisitImportDecl(ImportDecl *D) {
Importer.FromDiag(D->getLocation(), diag::err_unsupported_ast_node)
<< D->getDeclKindName();
- return make_error<ImportError>(ImportError::UnsupportedConstruct);
+ return make_error<ASTImportError>(ASTImportError::UnsupportedConstruct);
}
ExpectedDecl ASTNodeImporter::VisitEmptyDecl(EmptyDecl *D) {
@@ -2431,10 +2576,10 @@ ExpectedDecl ASTNodeImporter::VisitNamespaceDecl(NamespaceDecl *D) {
// Create the "to" namespace, if needed.
NamespaceDecl *ToNamespace = MergeWithNamespace;
if (!ToNamespace) {
- if (GetImportedOrCreateDecl(
- ToNamespace, D, Importer.getToContext(), DC, D->isInline(),
- *BeginLocOrErr, Loc, Name.getAsIdentifierInfo(),
- /*PrevDecl=*/nullptr))
+ if (GetImportedOrCreateDecl(ToNamespace, D, Importer.getToContext(), DC,
+ D->isInline(), *BeginLocOrErr, Loc,
+ Name.getAsIdentifierInfo(),
+ /*PrevDecl=*/nullptr, D->isNested()))
return ToNamespace;
ToNamespace->setRBraceLoc(*RBraceLocOrErr);
ToNamespace->setLexicalDeclContext(LexicalDC);
@@ -2532,6 +2677,22 @@ ASTNodeImporter::VisitTypedefNameDecl(TypedefNameDecl *D, bool IsAlias) {
QualType FromUT = D->getUnderlyingType();
QualType FoundUT = FoundTypedef->getUnderlyingType();
if (Importer.IsStructurallyEquivalent(FromUT, FoundUT)) {
+ // If the underlying declarations are unnamed records these can be
+ // imported as different types. We should create a distinct typedef
+ // node in this case.
+ // If we found an existing underlying type with a record in a
+ // different context (than the imported), this is already reason for
+ // having distinct typedef nodes for these.
+ // Again this can create situation like
+ // 'typedef int T; typedef int T;' but this is hard to avoid without
+ // a rename strategy at import.
+ if (!FromUT.isNull() && !FoundUT.isNull()) {
+ RecordDecl *FromR = FromUT->getAsRecordDecl();
+ RecordDecl *FoundR = FoundUT->getAsRecordDecl();
+ if (FromR && FoundR &&
+ !hasSameVisibilityContextAndLinkage(FoundR, FromR))
+ continue;
+ }
// If the "From" context has a complete underlying type but we
// already have a complete underlying type then return with that.
if (!FromUT->isIncompleteType() && !FoundUT->isIncompleteType())
@@ -2623,9 +2784,11 @@ ASTNodeImporter::VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D) {
for (auto *FoundDecl : FoundDecls) {
if (!FoundDecl->isInIdentifierNamespace(IDNS))
continue;
- if (auto *FoundAlias = dyn_cast<TypeAliasTemplateDecl>(FoundDecl))
- return Importer.MapImported(D, FoundAlias);
- ConflictingDecls.push_back(FoundDecl);
+ if (auto *FoundAlias = dyn_cast<TypeAliasTemplateDecl>(FoundDecl)) {
+ if (IsStructuralMatch(D, FoundAlias))
+ return Importer.MapImported(D, FoundAlias);
+ ConflictingDecls.push_back(FoundDecl);
+ }
}
if (!ConflictingDecls.empty()) {
@@ -2834,9 +2997,13 @@ ExpectedDecl ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
} else if (Importer.getToContext().getLangOpts().CPlusPlus)
IDNS |= Decl::IDNS_Ordinary | Decl::IDNS_TagFriend;
+ bool IsDependentContext = DC != LexicalDC ? LexicalDC->isDependentContext()
+ : DC->isDependentContext();
+ bool DependentFriend = IsFriendTemplate && IsDependentContext;
+
// We may already have a record of the same name; try to find and match it.
RecordDecl *PrevDecl = nullptr;
- if (!DC->isFunctionOrMethod() && !D->isLambda()) {
+ if (!DependentFriend && !DC->isFunctionOrMethod() && !D->isLambda()) {
SmallVector<NamedDecl *, 4> ConflictingDecls;
auto FoundDecls =
Importer.findDeclsInToCtx(DC, SearchName);
@@ -2923,16 +3090,15 @@ ExpectedDecl ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
return TInfoOrErr.takeError();
if (GetImportedOrCreateSpecialDecl(
D2CXX, CXXRecordDecl::CreateLambda, D, Importer.getToContext(),
- DC, *TInfoOrErr, Loc, DCXX->isDependentLambda(),
+ DC, *TInfoOrErr, Loc, DCXX->getLambdaDependencyKind(),
DCXX->isGenericLambda(), DCXX->getLambdaCaptureDefault()))
return D2CXX;
- ExpectedDecl CDeclOrErr = import(DCXX->getLambdaContextDecl());
+ CXXRecordDecl::LambdaNumbering Numbering = DCXX->getLambdaNumbering();
+ ExpectedDecl CDeclOrErr = import(Numbering.ContextDecl);
if (!CDeclOrErr)
return CDeclOrErr.takeError();
- D2CXX->setLambdaMangling(DCXX->getLambdaManglingNumber(), *CDeclOrErr,
- DCXX->hasKnownLambdaInternalLinkage());
- D2CXX->setDeviceLambdaManglingNumber(
- DCXX->getDeviceLambdaManglingNumber());
+ Numbering.ContextDecl = *CDeclOrErr;
+ D2CXX->setLambdaNumbering(Numbering);
} else if (DCXX->isInjectedClassName()) {
// We have to be careful to do a similar dance to the one in
// Sema::ActOnStartCXXMemberDeclarations
@@ -2968,8 +3134,6 @@ ExpectedDecl ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
// InjectedClassNameType (see Sema::CheckClassTemplate). Update the
// previously set type to the correct value here (ToDescribed is not
// available at record create).
- // FIXME: The previous type is cleared but not removed from
- // ASTContext's internal storage.
CXXRecordDecl *Injected = nullptr;
for (NamedDecl *Found : D2CXX->noload_lookup(Name)) {
auto *Record = dyn_cast<CXXRecordDecl>(Found);
@@ -2979,20 +3143,34 @@ ExpectedDecl ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
}
}
// Create an injected type for the whole redecl chain.
+ // The chain may contain an already existing injected type at the start,
+ // if yes this should be reused. We must ensure that only one type
+ // object exists for the injected type (including the injected record
+ // declaration), ASTContext does not check it.
SmallVector<Decl *, 2> Redecls =
getCanonicalForwardRedeclChain(D2CXX);
+ const Type *FrontTy =
+ cast<CXXRecordDecl>(Redecls.front())->getTypeForDecl();
+ QualType InjSpec;
+ if (auto *InjTy = FrontTy->getAs<InjectedClassNameType>())
+ InjSpec = InjTy->getInjectedSpecializationType();
+ else
+ InjSpec = ToDescribed->getInjectedClassNameSpecialization();
for (auto *R : Redecls) {
auto *RI = cast<CXXRecordDecl>(R);
- RI->setTypeForDecl(nullptr);
- // Below we create a new injected type and assign that to the
- // canonical decl, subsequent declarations in the chain will reuse
- // that type.
- Importer.getToContext().getInjectedClassNameType(
- RI, ToDescribed->getInjectedClassNameSpecialization());
+ if (R != Redecls.front() ||
+ !isa<InjectedClassNameType>(RI->getTypeForDecl()))
+ RI->setTypeForDecl(nullptr);
+ // This function tries to get the injected type from getTypeForDecl,
+ // then from the previous declaration if possible. If not, it creates
+ // a new type.
+ Importer.getToContext().getInjectedClassNameType(RI, InjSpec);
}
- // Set the new type for the previous injected decl too.
+ // Set the new type for the injected decl too.
if (Injected) {
Injected->setTypeForDecl(nullptr);
+ // This function will copy the injected type from D2CXX into Injected.
+ // The injected decl does not have a previous decl to copy from.
Importer.getToContext().getTypeDeclType(Injected, D2CXX);
}
}
@@ -3124,6 +3302,11 @@ Error ASTNodeImporter::ImportTemplateInformation(
case FunctionDecl::TK_FunctionTemplate:
return Error::success();
+ case FunctionDecl::TK_DependentNonTemplate:
+ if (Expected<FunctionDecl *> InstFDOrErr =
+ import(FromFD->getInstantiatedFromDecl()))
+ ToFD->setInstantiatedFromDecl(*InstFDOrErr);
+ return Error::success();
case FunctionDecl::TK_MemberSpecialization: {
TemplateSpecializationKind TSK = FromFD->getTemplateSpecializationKind();
@@ -3175,27 +3358,25 @@ Error ASTNodeImporter::ImportTemplateInformation(
case FunctionDecl::TK_DependentFunctionTemplateSpecialization: {
auto *FromInfo = FromFD->getDependentSpecializationInfo();
- UnresolvedSet<8> TemplDecls;
- unsigned NumTemplates = FromInfo->getNumTemplates();
- for (unsigned I = 0; I < NumTemplates; I++) {
- if (Expected<FunctionTemplateDecl *> ToFTDOrErr =
- import(FromInfo->getTemplate(I)))
- TemplDecls.addDecl(*ToFTDOrErr);
+ UnresolvedSet<8> Candidates;
+ for (FunctionTemplateDecl *FTD : FromInfo->getCandidates()) {
+ if (Expected<FunctionTemplateDecl *> ToFTDOrErr = import(FTD))
+ Candidates.addDecl(*ToFTDOrErr);
else
return ToFTDOrErr.takeError();
}
// Import TemplateArgumentListInfo.
TemplateArgumentListInfo ToTAInfo;
- if (Error Err = ImportTemplateArgumentListInfo(
- FromInfo->getLAngleLoc(), FromInfo->getRAngleLoc(),
- llvm::makeArrayRef(
- FromInfo->getTemplateArgs(), FromInfo->getNumTemplateArgs()),
- ToTAInfo))
- return Err;
+ const auto *FromTAArgsAsWritten = FromInfo->TemplateArgumentsAsWritten;
+ if (FromTAArgsAsWritten)
+ if (Error Err =
+ ImportTemplateArgumentListInfo(*FromTAArgsAsWritten, ToTAInfo))
+ return Err;
- ToFD->setDependentTemplateSpecialization(Importer.getToContext(),
- TemplDecls, ToTAInfo);
+ ToFD->setDependentTemplateSpecialization(
+ Importer.getToContext(), Candidates,
+ FromTAArgsAsWritten ? &ToTAInfo : nullptr);
return Error::success();
}
}
@@ -3229,9 +3410,12 @@ Error ASTNodeImporter::ImportFunctionDeclBody(FunctionDecl *FromFD,
}
// Returns true if the given D has a DeclContext up to the TranslationUnitDecl
-// which is equal to the given DC.
+// which is equal to the given DC, or D is equal to DC.
static bool isAncestorDeclContextOf(const DeclContext *DC, const Decl *D) {
- const DeclContext *DCi = D->getDeclContext();
+ const DeclContext *DCi = dyn_cast<DeclContext>(D);
+ if (!DCi)
+ DCi = D->getDeclContext();
+ assert(DCi && "Declaration should have a context");
while (DCi != D->getTranslationUnitDecl()) {
if (DCi == DC)
return true;
@@ -3240,31 +3424,200 @@ static bool isAncestorDeclContextOf(const DeclContext *DC, const Decl *D) {
return false;
}
-bool ASTNodeImporter::hasAutoReturnTypeDeclaredInside(FunctionDecl *D) {
- QualType FromTy = D->getType();
- const FunctionProtoType *FromFPT = FromTy->getAs<FunctionProtoType>();
- assert(FromFPT && "Must be called on FunctionProtoType");
- if (AutoType *AutoT = FromFPT->getReturnType()->getContainedAutoType()) {
- QualType DeducedT = AutoT->getDeducedType();
- if (const RecordType *RecordT =
- DeducedT.isNull() ? nullptr : dyn_cast<RecordType>(DeducedT)) {
- RecordDecl *RD = RecordT->getDecl();
- assert(RD);
- if (isAncestorDeclContextOf(D, RD)) {
- assert(RD->getLexicalDeclContext() == RD->getDeclContext());
- return true;
- }
+// Check if there is a declaration that has 'DC' as parent context and is
+// referenced from statement 'S' or one of its children. The search is done in
+// BFS order through children of 'S'.
+static bool isAncestorDeclContextOf(const DeclContext *DC, const Stmt *S) {
+ SmallVector<const Stmt *> ToProcess;
+ ToProcess.push_back(S);
+ while (!ToProcess.empty()) {
+ const Stmt *CurrentS = ToProcess.pop_back_val();
+ ToProcess.append(CurrentS->child_begin(), CurrentS->child_end());
+ if (const auto *DeclRef = dyn_cast<DeclRefExpr>(CurrentS)) {
+ if (const Decl *D = DeclRef->getDecl())
+ if (isAncestorDeclContextOf(DC, D))
+ return true;
+ } else if (const auto *E =
+ dyn_cast_or_null<SubstNonTypeTemplateParmExpr>(CurrentS)) {
+ if (const Decl *D = E->getAssociatedDecl())
+ if (isAncestorDeclContextOf(DC, D))
+ return true;
}
}
- if (const TypedefType *TypedefT =
- dyn_cast<TypedefType>(FromFPT->getReturnType())) {
- TypedefNameDecl *TD = TypedefT->getDecl();
+ return false;
+}
+
+namespace {
+/// Check if a type has any reference to a declaration that is inside the body
+/// of a function.
+/// The \c CheckType(QualType) function should be used to determine
+/// this property.
+///
+/// The type visitor visits one type object only (not recursive).
+/// To find all referenced declarations we must discover all type objects until
+/// the canonical type is reached (walk over typedef and similar objects). This
+/// is done by loop over all "sugar" type objects. For every such type we must
+/// check all declarations that are referenced from it. For this check the
+/// visitor is used. In the visit functions all referenced declarations except
+/// the one that follows in the sugar chain (if any) must be checked. For this
+/// check the same visitor is re-used (it has no state-dependent data).
+///
+/// The visit functions have 3 possible return values:
+/// - True, found a declaration inside \c ParentDC.
+/// - False, found declarations only outside \c ParentDC and it is not possible
+/// to find more declarations (the "sugar" chain does not continue).
+/// - Empty optional value, found no declarations or only outside \c ParentDC,
+/// but it is possible to find more declarations in the type "sugar" chain.
+/// The loop over the "sugar" types can be implemented by using type visit
+/// functions only (call \c CheckType with the desugared type). With the current
+/// solution no visit function is needed if the type has only a desugared type
+/// as data.
+class IsTypeDeclaredInsideVisitor
+ : public TypeVisitor<IsTypeDeclaredInsideVisitor, std::optional<bool>> {
+public:
+ IsTypeDeclaredInsideVisitor(const FunctionDecl *ParentDC)
+ : ParentDC(ParentDC) {}
+
+ bool CheckType(QualType T) {
+ // Check the chain of "sugar" types.
+ // The "sugar" types are typedef or similar types that have the same
+ // canonical type.
+ if (std::optional<bool> Res = Visit(T.getTypePtr()))
+ return *Res;
+ QualType DsT =
+ T.getSingleStepDesugaredType(ParentDC->getParentASTContext());
+ while (DsT != T) {
+ if (std::optional<bool> Res = Visit(DsT.getTypePtr()))
+ return *Res;
+ T = DsT;
+ DsT = T.getSingleStepDesugaredType(ParentDC->getParentASTContext());
+ }
+ return false;
+ }
+
+ std::optional<bool> VisitTagType(const TagType *T) {
+ if (auto *Spec = dyn_cast<ClassTemplateSpecializationDecl>(T->getDecl()))
+ for (const auto &Arg : Spec->getTemplateArgs().asArray())
+ if (checkTemplateArgument(Arg))
+ return true;
+ return isAncestorDeclContextOf(ParentDC, T->getDecl());
+ }
+
+ std::optional<bool> VisitPointerType(const PointerType *T) {
+ return CheckType(T->getPointeeType());
+ }
+
+ std::optional<bool> VisitReferenceType(const ReferenceType *T) {
+ return CheckType(T->getPointeeTypeAsWritten());
+ }
+
+ std::optional<bool> VisitTypedefType(const TypedefType *T) {
+ const TypedefNameDecl *TD = T->getDecl();
assert(TD);
- if (isAncestorDeclContextOf(D, TD)) {
- assert(TD->getLexicalDeclContext() == TD->getDeclContext());
+ return isAncestorDeclContextOf(ParentDC, TD);
+ }
+
+ std::optional<bool> VisitUsingType(const UsingType *T) {
+ if (T->getFoundDecl() &&
+ isAncestorDeclContextOf(ParentDC, T->getFoundDecl()))
return true;
+
+ return {};
+ }
+
+ std::optional<bool>
+ VisitTemplateSpecializationType(const TemplateSpecializationType *T) {
+ for (const auto &Arg : T->template_arguments())
+ if (checkTemplateArgument(Arg))
+ return true;
+ // This type is a "sugar" to a record type, it can have a desugared type.
+ return {};
+ }
+
+ std::optional<bool>
+ VisitSubstTemplateTypeParmType(const SubstTemplateTypeParmType *T) {
+ // The "associated declaration" can be the same as ParentDC.
+ if (isAncestorDeclContextOf(ParentDC, T->getAssociatedDecl()))
+ return true;
+ return {};
+ }
+
+ std::optional<bool> VisitConstantArrayType(const ConstantArrayType *T) {
+ if (T->getSizeExpr() && isAncestorDeclContextOf(ParentDC, T->getSizeExpr()))
+ return true;
+
+ return CheckType(T->getElementType());
+ }
+
+ std::optional<bool> VisitVariableArrayType(const VariableArrayType *T) {
+ llvm_unreachable(
+ "Variable array should not occur in deduced return type of a function");
+ }
+
+ std::optional<bool> VisitIncompleteArrayType(const IncompleteArrayType *T) {
+ llvm_unreachable("Incomplete array should not occur in deduced return type "
+ "of a function");
+ }
+
+ std::optional<bool> VisitDependentArrayType(const IncompleteArrayType *T) {
+ llvm_unreachable("Dependent array should not occur in deduced return type "
+ "of a function");
+ }
+
+private:
+ const DeclContext *const ParentDC;
+
+ bool checkTemplateArgument(const TemplateArgument &Arg) {
+ switch (Arg.getKind()) {
+ case TemplateArgument::Null:
+ return false;
+ case TemplateArgument::Integral:
+ return CheckType(Arg.getIntegralType());
+ case TemplateArgument::Type:
+ return CheckType(Arg.getAsType());
+ case TemplateArgument::Expression:
+ return isAncestorDeclContextOf(ParentDC, Arg.getAsExpr());
+ case TemplateArgument::Declaration:
+ // FIXME: The declaration in this case is not allowed to be in a function?
+ return isAncestorDeclContextOf(ParentDC, Arg.getAsDecl());
+ case TemplateArgument::NullPtr:
+ // FIXME: The type is not allowed to be in the function?
+ return CheckType(Arg.getNullPtrType());
+ case TemplateArgument::StructuralValue:
+ return CheckType(Arg.getStructuralValueType());
+ case TemplateArgument::Pack:
+ for (const auto &PackArg : Arg.getPackAsArray())
+ if (checkTemplateArgument(PackArg))
+ return true;
+ return false;
+ case TemplateArgument::Template:
+ // Templates can not be defined locally in functions.
+ // A template passed as argument can be not in ParentDC.
+ return false;
+ case TemplateArgument::TemplateExpansion:
+ // Templates can not be defined locally in functions.
+ // A template passed as argument can be not in ParentDC.
+ return false;
}
+ llvm_unreachable("Unknown TemplateArgument::ArgKind enum");
+ };
+};
+} // namespace
+
+/// This function checks if the function has 'auto' return type that contains
+/// a reference (in any way) to a declaration inside the same function.
+bool ASTNodeImporter::hasAutoReturnTypeDeclaredInside(FunctionDecl *D) {
+ QualType FromTy = D->getType();
+ const auto *FromFPT = FromTy->getAs<FunctionProtoType>();
+ assert(FromFPT && "Must be called on FunctionProtoType");
+
+ QualType RetT = FromFPT->getReturnType();
+ if (isa<AutoType>(RetT.getTypePtr())) {
+ FunctionDecl *Def = D->getDefinition();
+ IsTypeDeclaredInsideVisitor Visitor(Def ? Def : D);
+ return Visitor.CheckType(RetT);
}
+
return false;
}
@@ -3399,11 +3752,14 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
return std::move(Err);
QualType FromTy = D->getType();
+ TypeSourceInfo *FromTSI = D->getTypeSourceInfo();
// Set to true if we do not import the type of the function as is. There are
// cases when the original type would result in an infinite recursion during
// the import. To avoid an infinite recursion when importing, we create the
// FunctionDecl with a simplified function type and update it only after the
// relevant AST nodes are already imported.
+ // The type is related to TypeSourceInfo (it references the type), so we must
+ // do the same with TypeSourceInfo.
bool UsedDifferentProtoType = false;
if (const auto *FromFPT = FromTy->getAs<FunctionProtoType>()) {
QualType FromReturnTy = FromFPT->getReturnType();
@@ -3430,13 +3786,16 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
}
FromTy = Importer.getFromContext().getFunctionType(
FromReturnTy, FromFPT->getParamTypes(), FromEPI);
+ FromTSI = Importer.getFromContext().getTrivialTypeSourceInfo(
+ FromTy, D->getBeginLoc());
}
Error Err = Error::success();
auto T = importChecked(Err, FromTy);
- auto TInfo = importChecked(Err, D->getTypeSourceInfo());
+ auto TInfo = importChecked(Err, FromTSI);
auto ToInnerLocStart = importChecked(Err, D->getInnerLocStart());
auto ToEndLoc = importChecked(Err, D->getEndLoc());
+ auto ToDefaultLoc = importChecked(Err, D->getDefaultLoc());
auto ToQualifierLoc = importChecked(Err, D->getQualifierLoc());
auto TrailingRequiresClause =
importChecked(Err, D->getTrailingRequiresClause());
@@ -3445,7 +3804,7 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
// Import the function parameters.
SmallVector<ParmVarDecl *, 8> Parameters;
- for (auto P : D->parameters()) {
+ for (auto *P : D->parameters()) {
if (Expected<ParmVarDecl *> ToPOrErr = import(P))
Parameters.push_back(*ToPOrErr);
else
@@ -3459,13 +3818,19 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
importExplicitSpecifier(Err, FromConstructor->getExplicitSpecifier());
if (Err)
return std::move(Err);
+ auto ToInheritedConstructor = InheritedConstructor();
+ if (FromConstructor->isInheritingConstructor()) {
+ Expected<InheritedConstructor> ImportedInheritedCtor =
+ import(FromConstructor->getInheritedConstructor());
+ if (!ImportedInheritedCtor)
+ return ImportedInheritedCtor.takeError();
+ ToInheritedConstructor = *ImportedInheritedCtor;
+ }
if (GetImportedOrCreateDecl<CXXConstructorDecl>(
ToFunction, D, Importer.getToContext(), cast<CXXRecordDecl>(DC),
- ToInnerLocStart, NameInfo, T, TInfo, ESpec, D->isInlineSpecified(),
- D->isImplicit(), D->getConstexprKind(),
- InheritedConstructor(), // FIXME: Properly import inherited
- // constructor info
- TrailingRequiresClause))
+ ToInnerLocStart, NameInfo, T, TInfo, ESpec, D->UsesFPIntrin(),
+ D->isInlineSpecified(), D->isImplicit(), D->getConstexprKind(),
+ ToInheritedConstructor, TrailingRequiresClause))
return ToFunction;
} else if (CXXDestructorDecl *FromDtor = dyn_cast<CXXDestructorDecl>(D)) {
@@ -3477,9 +3842,10 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
return std::move(Err);
if (GetImportedOrCreateDecl<CXXDestructorDecl>(
- ToFunction, D, Importer.getToContext(), cast<CXXRecordDecl>(DC),
- ToInnerLocStart, NameInfo, T, TInfo, D->isInlineSpecified(),
- D->isImplicit(), D->getConstexprKind(), TrailingRequiresClause))
+ ToFunction, D, Importer.getToContext(), cast<CXXRecordDecl>(DC),
+ ToInnerLocStart, NameInfo, T, TInfo, D->UsesFPIntrin(),
+ D->isInlineSpecified(), D->isImplicit(), D->getConstexprKind(),
+ TrailingRequiresClause))
return ToFunction;
CXXDestructorDecl *ToDtor = cast<CXXDestructorDecl>(ToFunction);
@@ -3493,15 +3859,16 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
return std::move(Err);
if (GetImportedOrCreateDecl<CXXConversionDecl>(
ToFunction, D, Importer.getToContext(), cast<CXXRecordDecl>(DC),
- ToInnerLocStart, NameInfo, T, TInfo, D->isInlineSpecified(), ESpec,
- D->getConstexprKind(), SourceLocation(), TrailingRequiresClause))
+ ToInnerLocStart, NameInfo, T, TInfo, D->UsesFPIntrin(),
+ D->isInlineSpecified(), ESpec, D->getConstexprKind(),
+ SourceLocation(), TrailingRequiresClause))
return ToFunction;
} else if (auto *Method = dyn_cast<CXXMethodDecl>(D)) {
if (GetImportedOrCreateDecl<CXXMethodDecl>(
ToFunction, D, Importer.getToContext(), cast<CXXRecordDecl>(DC),
ToInnerLocStart, NameInfo, T, TInfo, Method->getStorageClass(),
- Method->isInlineSpecified(), D->getConstexprKind(),
- SourceLocation(), TrailingRequiresClause))
+ Method->UsesFPIntrin(), Method->isInlineSpecified(),
+ D->getConstexprKind(), SourceLocation(), TrailingRequiresClause))
return ToFunction;
} else if (auto *Guide = dyn_cast<CXXDeductionGuideDecl>(D)) {
ExplicitSpecifier ESpec =
@@ -3515,13 +3882,13 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
NameInfo, T, TInfo, ToEndLoc, Ctor))
return ToFunction;
cast<CXXDeductionGuideDecl>(ToFunction)
- ->setIsCopyDeductionCandidate(Guide->isCopyDeductionCandidate());
+ ->setDeductionCandidateKind(Guide->getDeductionCandidateKind());
} else {
if (GetImportedOrCreateDecl(
ToFunction, D, Importer.getToContext(), DC, ToInnerLocStart,
- NameInfo, T, TInfo, D->getStorageClass(), D->isInlineSpecified(),
- D->hasWrittenPrototype(), D->getConstexprKind(),
- TrailingRequiresClause))
+ NameInfo, T, TInfo, D->getStorageClass(), D->UsesFPIntrin(),
+ D->isInlineSpecified(), D->hasWrittenPrototype(),
+ D->getConstexprKind(), TrailingRequiresClause))
return ToFunction;
}
@@ -3542,11 +3909,14 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
ToFunction->setLexicalDeclContext(LexicalDC);
ToFunction->setVirtualAsWritten(D->isVirtualAsWritten());
ToFunction->setTrivial(D->isTrivial());
- ToFunction->setPure(D->isPure());
+ ToFunction->setIsPureVirtual(D->isPureVirtual());
ToFunction->setDefaulted(D->isDefaulted());
ToFunction->setExplicitlyDefaulted(D->isExplicitlyDefaulted());
ToFunction->setDeletedAsWritten(D->isDeletedAsWritten());
+ ToFunction->setFriendConstraintRefersToEnclosingTemplate(
+ D->FriendConstraintRefersToEnclosingTemplate());
ToFunction->setRangeEnd(ToEndLoc);
+ ToFunction->setDefaultLoc(ToDefaultLoc);
// Set the parameters.
for (auto *Param : Parameters) {
@@ -3591,6 +3961,15 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
}
}
+ // If it is a template, import all related things.
+ if (Error Err = ImportTemplateInformation(D, ToFunction))
+ return std::move(Err);
+
+ if (auto *FromCXXMethod = dyn_cast<CXXMethodDecl>(D))
+ if (Error Err = ImportOverriddenMethods(cast<CXXMethodDecl>(ToFunction),
+ FromCXXMethod))
+ return std::move(Err);
+
if (D->doesThisDeclarationHaveABody()) {
Error Err = ImportFunctionDeclBody(D, ToFunction);
@@ -3604,21 +3983,16 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
ToFunction->setType(*TyOrErr);
else
return TyOrErr.takeError();
+ if (Expected<TypeSourceInfo *> TSIOrErr = import(D->getTypeSourceInfo()))
+ ToFunction->setTypeSourceInfo(*TSIOrErr);
+ else
+ return TSIOrErr.takeError();
}
// FIXME: Other bits to merge?
- // If it is a template, import all related things.
- if (Error Err = ImportTemplateInformation(D, ToFunction))
- return std::move(Err);
-
addDeclToContexts(D, ToFunction);
- if (auto *FromCXXMethod = dyn_cast<CXXMethodDecl>(D))
- if (Error Err = ImportOverriddenMethods(cast<CXXMethodDecl>(ToFunction),
- FromCXXMethod))
- return std::move(Err);
-
// Import the rest of the chain. I.e. import all subsequent declarations.
for (++RedeclIt; RedeclIt != Redecls.end(); ++RedeclIt) {
ExpectedDecl ToRedeclOrErr = import(*RedeclIt);
@@ -3678,19 +4052,19 @@ ExpectedDecl ASTNodeImporter::VisitFieldDecl(FieldDecl *D) {
// initializer of a FieldDecl might not had been instantiated in the
// "To" context. However, the "From" context might instantiated that,
// thus we have to merge that.
+ // Note: `hasInClassInitializer()` is not the same as non-null
+ // `getInClassInitializer()` value.
if (Expr *FromInitializer = D->getInClassInitializer()) {
- // We don't have yet the initializer set.
- if (FoundField->hasInClassInitializer() &&
- !FoundField->getInClassInitializer()) {
- if (ExpectedExpr ToInitializerOrErr = import(FromInitializer))
+ if (ExpectedExpr ToInitializerOrErr = import(FromInitializer)) {
+ // Import of the FromInitializer may result in the setting of
+ // InClassInitializer. If not, set it here.
+ assert(FoundField->hasInClassInitializer() &&
+ "Field should have an in-class initializer if it has an "
+ "expression for it.");
+ if (!FoundField->getInClassInitializer())
FoundField->setInClassInitializer(*ToInitializerOrErr);
- else {
- // We can't return error here,
- // since we already mapped D as imported.
- // FIXME: warning message?
- consumeError(ToInitializerOrErr.takeError());
- return FoundField;
- }
+ } else {
+ return ToInitializerOrErr.takeError();
}
}
return FoundField;
@@ -3702,7 +4076,7 @@ ExpectedDecl ASTNodeImporter::VisitFieldDecl(FieldDecl *D) {
Importer.ToDiag(FoundField->getLocation(), diag::note_odr_value_here)
<< FoundField->getType();
- return make_error<ImportError>(ImportError::NameConflict);
+ return make_error<ASTImportError>(ASTImportError::NameConflict);
}
}
@@ -3711,7 +4085,6 @@ ExpectedDecl ASTNodeImporter::VisitFieldDecl(FieldDecl *D) {
auto ToTInfo = importChecked(Err, D->getTypeSourceInfo());
auto ToBitWidth = importChecked(Err, D->getBitWidth());
auto ToInnerLocStart = importChecked(Err, D->getInnerLocStart());
- auto ToInitializer = importChecked(Err, D->getInClassInitializer());
if (Err)
return std::move(Err);
const Type *ToCapturedVLAType = nullptr;
@@ -3726,14 +4099,32 @@ ExpectedDecl ASTNodeImporter::VisitFieldDecl(FieldDecl *D) {
D->getInClassInitStyle()))
return ToField;
+ // We need [[no_unqiue_address]] attributes to be added to FieldDecl, before
+ // we add fields in CXXRecordDecl::addedMember, otherwise record will be
+ // marked as having non-zero size.
+ Err = Importer.ImportAttrs(ToField, D);
+ if (Err)
+ return std::move(Err);
ToField->setAccess(D->getAccess());
ToField->setLexicalDeclContext(LexicalDC);
- if (ToInitializer)
- ToField->setInClassInitializer(ToInitializer);
ToField->setImplicit(D->isImplicit());
if (ToCapturedVLAType)
ToField->setCapturedVLAType(cast<VariableArrayType>(ToCapturedVLAType));
LexicalDC->addDeclInternal(ToField);
+ // Import initializer only after the field was created, it may have recursive
+ // reference to the field.
+ auto ToInitializer = importChecked(Err, D->getInClassInitializer());
+ if (Err)
+ return std::move(Err);
+ if (ToInitializer) {
+ auto *AlreadyImported = ToField->getInClassInitializer();
+ if (AlreadyImported)
+ assert(ToInitializer == AlreadyImported &&
+ "Duplicate import of in-class initializer.");
+ else
+ ToField->setInClassInitializer(ToInitializer);
+ }
+
return ToField;
}
@@ -3775,7 +4166,7 @@ ExpectedDecl ASTNodeImporter::VisitIndirectFieldDecl(IndirectFieldDecl *D) {
Importer.ToDiag(FoundField->getLocation(), diag::note_odr_value_here)
<< FoundField->getType();
- return make_error<ImportError>(ImportError::NameConflict);
+ return make_error<ASTImportError>(ASTImportError::NameConflict);
}
}
@@ -3815,22 +4206,34 @@ struct FriendCountAndPosition {
unsigned int IndexOfDecl;
};
-template <class T>
-static FriendCountAndPosition getFriendCountAndPosition(
- const FriendDecl *FD,
- llvm::function_ref<T(const FriendDecl *)> GetCanTypeOrDecl) {
+static bool IsEquivalentFriend(ASTImporter &Importer, FriendDecl *FD1,
+ FriendDecl *FD2) {
+ if ((!FD1->getFriendType()) != (!FD2->getFriendType()))
+ return false;
+
+ if (const TypeSourceInfo *TSI = FD1->getFriendType())
+ return Importer.IsStructurallyEquivalent(
+ TSI->getType(), FD2->getFriendType()->getType(), /*Complain=*/false);
+
+ ASTImporter::NonEquivalentDeclSet NonEquivalentDecls;
+ StructuralEquivalenceContext Ctx(
+ FD1->getASTContext(), FD2->getASTContext(), NonEquivalentDecls,
+ StructuralEquivalenceKind::Default,
+ /* StrictTypeSpelling = */ false, /* Complain = */ false);
+ return Ctx.IsEquivalent(FD1, FD2);
+}
+
+static FriendCountAndPosition getFriendCountAndPosition(ASTImporter &Importer,
+ FriendDecl *FD) {
unsigned int FriendCount = 0;
- llvm::Optional<unsigned int> FriendPosition;
+ std::optional<unsigned int> FriendPosition;
const auto *RD = cast<CXXRecordDecl>(FD->getLexicalDeclContext());
- T TypeOrDecl = GetCanTypeOrDecl(FD);
-
- for (const FriendDecl *FoundFriend : RD->friends()) {
+ for (FriendDecl *FoundFriend : RD->friends()) {
if (FoundFriend == FD) {
FriendPosition = FriendCount;
++FriendCount;
- } else if (!FoundFriend->getFriendDecl() == !FD->getFriendDecl() &&
- GetCanTypeOrDecl(FoundFriend) == TypeOrDecl) {
+ } else if (IsEquivalentFriend(Importer, FD, FoundFriend)) {
++FriendCount;
}
}
@@ -3840,21 +4243,6 @@ static FriendCountAndPosition getFriendCountAndPosition(
return {FriendCount, *FriendPosition};
}
-static FriendCountAndPosition getFriendCountAndPosition(const FriendDecl *FD) {
- if (FD->getFriendType())
- return getFriendCountAndPosition<QualType>(FD, [](const FriendDecl *F) {
- if (TypeSourceInfo *TSI = F->getFriendType())
- return TSI->getType().getCanonicalType();
- llvm_unreachable("Wrong friend object type.");
- });
- else
- return getFriendCountAndPosition<Decl *>(FD, [](const FriendDecl *F) {
- if (Decl *D = F->getFriendDecl())
- return D->getCanonicalDecl();
- llvm_unreachable("Wrong friend object type.");
- });
-}
-
ExpectedDecl ASTNodeImporter::VisitFriendDecl(FriendDecl *D) {
// Import the major distinguishing characteristics of a declaration.
DeclContext *DC, *LexicalDC;
@@ -3865,26 +4253,13 @@ ExpectedDecl ASTNodeImporter::VisitFriendDecl(FriendDecl *D) {
// FriendDecl is not a NamedDecl so we cannot use lookup.
// We try to maintain order and count of redundant friend declarations.
const auto *RD = cast<CXXRecordDecl>(DC);
- FriendDecl *ImportedFriend = RD->getFirstFriend();
SmallVector<FriendDecl *, 2> ImportedEquivalentFriends;
-
- while (ImportedFriend) {
- bool Match = false;
- if (D->getFriendDecl() && ImportedFriend->getFriendDecl()) {
- Match =
- IsStructuralMatch(D->getFriendDecl(), ImportedFriend->getFriendDecl(),
- /*Complain=*/false);
- } else if (D->getFriendType() && ImportedFriend->getFriendType()) {
- Match = Importer.IsStructurallyEquivalent(
- D->getFriendType()->getType(),
- ImportedFriend->getFriendType()->getType(), /*Complain=*/false);
- }
- if (Match)
+ for (FriendDecl *ImportedFriend : RD->friends())
+ if (IsEquivalentFriend(Importer, D, ImportedFriend))
ImportedEquivalentFriends.push_back(ImportedFriend);
- ImportedFriend = ImportedFriend->getNextFriend();
- }
- FriendCountAndPosition CountAndPosition = getFriendCountAndPosition(D);
+ FriendCountAndPosition CountAndPosition =
+ getFriendCountAndPosition(Importer, D);
assert(ImportedEquivalentFriends.size() <= CountAndPosition.TotalCount &&
"Class with non-matching friends is imported, ODR check wrong?");
@@ -3966,7 +4341,7 @@ ExpectedDecl ASTNodeImporter::VisitObjCIvarDecl(ObjCIvarDecl *D) {
Importer.ToDiag(FoundIvar->getLocation(), diag::note_odr_value_here)
<< FoundIvar->getType();
- return make_error<ImportError>(ImportError::NameConflict);
+ return make_error<ASTImportError>(ASTImportError::NameConflict);
}
}
@@ -4134,6 +4509,17 @@ ExpectedDecl ASTNodeImporter::VisitVarDecl(VarDecl *D) {
auto ToVTOrErr = import(D->getDescribedVarTemplate());
if (!ToVTOrErr)
return ToVTOrErr.takeError();
+ } else if (MemberSpecializationInfo *MSI = D->getMemberSpecializationInfo()) {
+ TemplateSpecializationKind SK = MSI->getTemplateSpecializationKind();
+ VarDecl *FromInst = D->getInstantiatedFromStaticDataMember();
+ if (Expected<VarDecl *> ToInstOrErr = import(FromInst))
+ ToVar->setInstantiationOfStaticDataMember(*ToInstOrErr, SK);
+ else
+ return ToInstOrErr.takeError();
+ if (ExpectedSLoc POIOrErr = import(MSI->getPointOfInstantiation()))
+ ToVar->getMemberSpecializationInfo()->setPointOfInstantiation(*POIOrErr);
+ else
+ return POIOrErr.takeError();
}
if (Error Err = ImportInitializer(D, ToVar))
@@ -4178,6 +4564,8 @@ ExpectedDecl ASTNodeImporter::VisitImplicitParamDecl(ImplicitParamDecl *D) {
Error ASTNodeImporter::ImportDefaultArgOfParmVarDecl(
const ParmVarDecl *FromParam, ParmVarDecl *ToParam) {
ToParam->setHasInheritedDefaultArg(FromParam->hasInheritedDefaultArg());
+ ToParam->setExplicitObjectParameterLoc(
+ FromParam->getExplicitObjectParamThisLoc());
ToParam->setKNRPromoted(FromParam->isKNRPromoted());
if (FromParam->hasUninstantiatedDefaultArg()) {
@@ -4197,6 +4585,17 @@ Error ASTNodeImporter::ImportDefaultArgOfParmVarDecl(
return Error::success();
}
+Expected<InheritedConstructor>
+ASTNodeImporter::ImportInheritedConstructor(const InheritedConstructor &From) {
+ Error Err = Error::success();
+ CXXConstructorDecl *ToBaseCtor = importChecked(Err, From.getConstructor());
+ ConstructorUsingShadowDecl *ToShadow =
+ importChecked(Err, From.getShadowDecl());
+ if (Err)
+ return std::move(Err);
+ return InheritedConstructor(ToShadow, ToBaseCtor);
+}
+
ExpectedDecl ASTNodeImporter::VisitParmVarDecl(ParmVarDecl *D) {
// Parameters are created in the translation unit's context, then moved
// into the function declaration's context afterward.
@@ -4263,7 +4662,7 @@ ExpectedDecl ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
diag::note_odr_objc_method_here)
<< D->isInstanceMethod() << Name;
- return make_error<ImportError>(ImportError::NameConflict);
+ return make_error<ASTImportError>(ASTImportError::NameConflict);
}
// Check the number of parameters.
@@ -4275,7 +4674,7 @@ ExpectedDecl ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
diag::note_odr_objc_method_here)
<< D->isInstanceMethod() << Name;
- return make_error<ImportError>(ImportError::NameConflict);
+ return make_error<ASTImportError>(ASTImportError::NameConflict);
}
// Check parameter types.
@@ -4291,7 +4690,7 @@ ExpectedDecl ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
Importer.ToDiag((*FoundP)->getLocation(), diag::note_odr_value_here)
<< (*FoundP)->getType();
- return make_error<ImportError>(ImportError::NameConflict);
+ return make_error<ASTImportError>(ASTImportError::NameConflict);
}
}
@@ -4304,7 +4703,7 @@ ExpectedDecl ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
diag::note_odr_objc_method_here)
<< D->isInstanceMethod() << Name;
- return make_error<ImportError>(ImportError::NameConflict);
+ return make_error<ASTImportError>(ASTImportError::NameConflict);
}
// FIXME: Any other bits we need to merge?
@@ -4395,6 +4794,11 @@ ExpectedDecl ASTNodeImporter::VisitObjCTypeParamDecl(ObjCTypeParamDecl *D) {
ToColonLoc, ToTypeSourceInfo))
return Result;
+ // Only import 'ObjCTypeParamType' after the decl is created.
+ auto ToTypeForDecl = importChecked(Err, D->getTypeForDecl());
+ if (Err)
+ return std::move(Err);
+ Result->setTypeForDecl(ToTypeForDecl);
Result->setLexicalDeclContext(LexicalDC);
return Result;
}
@@ -4693,13 +5097,14 @@ ExpectedDecl ASTNodeImporter::VisitUsingEnumDecl(UsingEnumDecl *D) {
Error Err = Error::success();
auto ToUsingLoc = importChecked(Err, D->getUsingLoc());
auto ToEnumLoc = importChecked(Err, D->getEnumLoc());
- auto ToEnumDecl = importChecked(Err, D->getEnumDecl());
+ auto ToNameLoc = importChecked(Err, D->getLocation());
+ auto *ToEnumType = importChecked(Err, D->getEnumType());
if (Err)
return std::move(Err);
UsingEnumDecl *ToUsingEnum;
if (GetImportedOrCreateDecl(ToUsingEnum, D, Importer.getToContext(), DC,
- ToUsingLoc, ToEnumLoc, Loc, ToEnumDecl))
+ ToUsingLoc, ToEnumLoc, ToNameLoc, ToEnumType))
return ToUsingEnum;
ToUsingEnum->setLexicalDeclContext(LexicalDC);
@@ -4736,9 +5141,29 @@ ExpectedDecl ASTNodeImporter::VisitUsingShadowDecl(UsingShadowDecl *D) {
return ToTargetOrErr.takeError();
UsingShadowDecl *ToShadow;
- if (GetImportedOrCreateDecl(ToShadow, D, Importer.getToContext(), DC, Loc,
- Name, *ToIntroducerOrErr, *ToTargetOrErr))
- return ToShadow;
+ if (auto *FromConstructorUsingShadow =
+ dyn_cast<ConstructorUsingShadowDecl>(D)) {
+ Error Err = Error::success();
+ ConstructorUsingShadowDecl *Nominated = importChecked(
+ Err, FromConstructorUsingShadow->getNominatedBaseClassShadowDecl());
+ if (Err)
+ return std::move(Err);
+ // The 'Target' parameter of ConstructorUsingShadowDecl constructor
+ // is really the "NominatedBaseClassShadowDecl" value if it exists
+ // (see code of ConstructorUsingShadowDecl::ConstructorUsingShadowDecl).
+ // We should pass the NominatedBaseClassShadowDecl to it (if non-null) to
+ // get the correct values.
+ if (GetImportedOrCreateDecl<ConstructorUsingShadowDecl>(
+ ToShadow, D, Importer.getToContext(), DC, Loc,
+ cast<UsingDecl>(*ToIntroducerOrErr),
+ Nominated ? Nominated : *ToTargetOrErr,
+ FromConstructorUsingShadow->constructsVirtualBase()))
+ return ToShadow;
+ } else {
+ if (GetImportedOrCreateDecl(ToShadow, D, Importer.getToContext(), DC, Loc,
+ Name, *ToIntroducerOrErr, *ToTargetOrErr))
+ return ToShadow;
+ }
ToShadow->setLexicalDeclContext(LexicalDC);
ToShadow->setAccess(D->getAccess());
@@ -4798,6 +5223,35 @@ ExpectedDecl ASTNodeImporter::VisitUsingDirectiveDecl(UsingDirectiveDecl *D) {
return ToUsingDir;
}
+ExpectedDecl ASTNodeImporter::VisitUsingPackDecl(UsingPackDecl *D) {
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ NamedDecl *ToD = nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return std::move(Err);
+ if (ToD)
+ return ToD;
+
+ auto ToInstantiatedFromUsingOrErr =
+ Importer.Import(D->getInstantiatedFromUsingDecl());
+ if (!ToInstantiatedFromUsingOrErr)
+ return ToInstantiatedFromUsingOrErr.takeError();
+ SmallVector<NamedDecl *, 4> Expansions(D->expansions().size());
+ if (Error Err = ImportArrayChecked(D->expansions(), Expansions.begin()))
+ return std::move(Err);
+
+ UsingPackDecl *ToUsingPack;
+ if (GetImportedOrCreateDecl(ToUsingPack, D, Importer.getToContext(), DC,
+ cast<NamedDecl>(*ToInstantiatedFromUsingOrErr),
+ Expansions))
+ return ToUsingPack;
+
+ addDeclToContexts(D, ToUsingPack);
+
+ return ToUsingPack;
+}
+
ExpectedDecl ASTNodeImporter::VisitUnresolvedUsingValueDecl(
UnresolvedUsingValueDecl *D) {
DeclContext *DC, *LexicalDC;
@@ -5182,7 +5636,7 @@ ASTNodeImporter::VisitObjCImplementationDecl(ObjCImplementationDecl *D) {
Importer.FromDiag(D->getLocation(),
diag::note_odr_objc_missing_superclass);
- return make_error<ImportError>(ImportError::NameConflict);
+ return make_error<ASTImportError>(ASTImportError::NameConflict);
}
}
@@ -5221,7 +5675,7 @@ ExpectedDecl ASTNodeImporter::VisitObjCPropertyDecl(ObjCPropertyDecl *D) {
Importer.ToDiag(FoundProp->getLocation(), diag::note_odr_value_here)
<< FoundProp->getType();
- return make_error<ImportError>(ImportError::NameConflict);
+ return make_error<ASTImportError>(ASTImportError::NameConflict);
}
// FIXME: Check property attributes, getters, setters, etc.?
@@ -5326,7 +5780,7 @@ ASTNodeImporter::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D) {
<< D->getPropertyDecl()->getDeclName()
<< (D->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic);
- return make_error<ImportError>(ImportError::NameConflict);
+ return make_error<ASTImportError>(ASTImportError::NameConflict);
}
// For @synthesize, check that we have the same
@@ -5341,7 +5795,7 @@ ASTNodeImporter::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D) {
diag::note_odr_objc_synthesize_ivar_here)
<< D->getPropertyIvarDecl()->getDeclName();
- return make_error<ImportError>(ImportError::NameConflict);
+ return make_error<ASTImportError>(ASTImportError::NameConflict);
}
// Merge the existing implementation with the new implementation.
@@ -5379,28 +5833,12 @@ ASTNodeImporter::VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D) {
if (const TypeConstraint *TC = D->getTypeConstraint()) {
Error Err = Error::success();
- auto ToNNS = importChecked(Err, TC->getNestedNameSpecifierLoc());
- auto ToName = importChecked(Err, TC->getConceptNameInfo().getName());
- auto ToNameLoc = importChecked(Err, TC->getConceptNameInfo().getLoc());
- auto ToFoundDecl = importChecked(Err, TC->getFoundDecl());
- auto ToNamedConcept = importChecked(Err, TC->getNamedConcept());
+ auto ToConceptRef = importChecked(Err, TC->getConceptReference());
auto ToIDC = importChecked(Err, TC->getImmediatelyDeclaredConstraint());
if (Err)
return std::move(Err);
- TemplateArgumentListInfo ToTAInfo;
- const auto *ASTTemplateArgs = TC->getTemplateArgsAsWritten();
- if (ASTTemplateArgs)
- if (Error Err = ImportTemplateArgumentListInfo(*ASTTemplateArgs,
- ToTAInfo))
- return std::move(Err);
-
- ToD->setTypeConstraint(ToNNS, DeclarationNameInfo(ToName, ToNameLoc),
- ToFoundDecl, ToNamedConcept,
- ASTTemplateArgs ?
- ASTTemplateArgumentListInfo::Create(Importer.getToContext(),
- ToTAInfo) : nullptr,
- ToIDC);
+ ToD->setTypeConstraint(ToConceptRef, ToIDC);
}
if (D->hasDefaultArgument()) {
@@ -5504,6 +5942,18 @@ ExpectedDecl ASTNodeImporter::VisitClassTemplateDecl(ClassTemplateDecl *D) {
if (ToD)
return ToD;
+ // Should check if a declaration is friend in a dependent context.
+ // Such templates are not linked together in a declaration chain.
+ // The ASTImporter strategy is to map existing forward declarations to
+ // imported ones only if strictly necessary, otherwise import these as new
+ // forward declarations. In case of the "dependent friend" declarations, new
+ // declarations are created, but not linked in a declaration chain.
+ auto IsDependentFriend = [](ClassTemplateDecl *TD) {
+ return TD->getFriendObjectKind() != Decl::FOK_None &&
+ TD->getLexicalDeclContext()->isDependentContext();
+ };
+ bool DependentFriend = IsDependentFriend(D);
+
ClassTemplateDecl *FoundByLookup = nullptr;
// We may already have a template of the same name; try to find and match it.
@@ -5521,7 +5971,15 @@ ExpectedDecl ASTNodeImporter::VisitClassTemplateDecl(ClassTemplateDecl *D) {
if (!hasSameVisibilityContextAndLinkage(FoundTemplate, D))
continue;
- if (IsStructuralMatch(D, FoundTemplate)) {
+ // FIXME: sufficient conditon for 'IgnoreTemplateParmDepth'?
+ bool IgnoreTemplateParmDepth =
+ (FoundTemplate->getFriendObjectKind() != Decl::FOK_None) !=
+ (D->getFriendObjectKind() != Decl::FOK_None);
+ if (IsStructuralMatch(D, FoundTemplate, /*Complain=*/true,
+ IgnoreTemplateParmDepth)) {
+ if (DependentFriend || IsDependentFriend(FoundTemplate))
+ continue;
+
ClassTemplateDecl *TemplateWithDef =
getTemplateDefinition(FoundTemplate);
if (D->isThisDeclarationADefinition() && TemplateWithDef)
@@ -5596,11 +6054,6 @@ ExpectedDecl ASTNodeImporter::VisitClassTemplateDecl(ClassTemplateDecl *D) {
D2->setPreviousDecl(Recent);
}
- if (FromTemplated->isCompleteDefinition() &&
- !ToTemplated->isCompleteDefinition()) {
- // FIXME: Import definition!
- }
-
return D2;
}
@@ -5617,8 +6070,8 @@ ExpectedDecl ASTNodeImporter::VisitClassTemplateSpecializationDecl(
// Import template arguments.
SmallVector<TemplateArgument, 2> TemplateArgs;
- if (Error Err = ImportTemplateArguments(
- D->getTemplateArgs().data(), D->getTemplateArgs().size(), TemplateArgs))
+ if (Error Err =
+ ImportTemplateArguments(D->getTemplateArgs().asArray(), TemplateArgs))
return std::move(Err);
// Try to find an existing specialization with these template arguments and
// template parameter list.
@@ -5671,7 +6124,7 @@ ExpectedDecl ASTNodeImporter::VisitClassTemplateSpecializationDecl(
}
} else { // ODR violation.
// FIXME HandleNameConflict
- return make_error<ImportError>(ImportError::NameConflict);
+ return make_error<ASTImportError>(ASTImportError::NameConflict);
}
}
@@ -5699,10 +6152,10 @@ ExpectedDecl ASTNodeImporter::VisitClassTemplateSpecializationDecl(
CanonInjType = CanonInjType.getCanonicalType();
if (GetImportedOrCreateDecl<ClassTemplatePartialSpecializationDecl>(
- D2, D, Importer.getToContext(), D->getTagKind(), DC,
- *BeginLocOrErr, *IdLocOrErr, ToTPList, ClassTemplate,
- llvm::makeArrayRef(TemplateArgs.data(), TemplateArgs.size()),
- ToTAInfo, CanonInjType,
+ D2, D, Importer.getToContext(), D->getTagKind(), DC, *BeginLocOrErr,
+ *IdLocOrErr, ToTPList, ClassTemplate,
+ llvm::ArrayRef(TemplateArgs.data(), TemplateArgs.size()), ToTAInfo,
+ CanonInjType,
cast_or_null<ClassTemplatePartialSpecializationDecl>(PrevDecl)))
return D2;
@@ -5713,6 +6166,11 @@ ExpectedDecl ASTNodeImporter::VisitClassTemplateSpecializationDecl(
InsertPos))
// Add this partial specialization to the class template.
ClassTemplate->AddPartialSpecialization(PartSpec2, InsertPos);
+ if (Expected<ClassTemplatePartialSpecializationDecl *> ToInstOrErr =
+ import(PartialSpec->getInstantiatedFromMember()))
+ PartSpec2->setInstantiatedFromMember(*ToInstOrErr);
+ else
+ return ToInstOrErr.takeError();
updateLookupTableForTemplateParameters(*ToTPList);
} else { // Not a partial specialization.
@@ -5776,6 +6234,30 @@ ExpectedDecl ASTNodeImporter::VisitClassTemplateSpecializationDecl(
D2->setTemplateSpecializationKind(D->getTemplateSpecializationKind());
+ if (auto P = D->getInstantiatedFrom()) {
+ if (auto *CTD = P.dyn_cast<ClassTemplateDecl *>()) {
+ if (auto CTDorErr = import(CTD))
+ D2->setInstantiationOf(*CTDorErr);
+ } else {
+ auto *CTPSD = cast<ClassTemplatePartialSpecializationDecl *>(P);
+ auto CTPSDOrErr = import(CTPSD);
+ if (!CTPSDOrErr)
+ return CTPSDOrErr.takeError();
+ const TemplateArgumentList &DArgs = D->getTemplateInstantiationArgs();
+ SmallVector<TemplateArgument, 2> D2ArgsVec(DArgs.size());
+ for (unsigned I = 0; I < DArgs.size(); ++I) {
+ const TemplateArgument &DArg = DArgs[I];
+ if (auto ArgOrErr = import(DArg))
+ D2ArgsVec[I] = *ArgOrErr;
+ else
+ return ArgOrErr.takeError();
+ }
+ D2->setInstantiationOf(
+ *CTPSDOrErr,
+ TemplateArgumentList::CreateCopy(Importer.getToContext(), D2ArgsVec));
+ }
+ }
+
if (D->isCompleteDefinition())
if (Error Err = ImportDefinition(D, D2))
return std::move(Err);
@@ -5811,14 +6293,21 @@ ExpectedDecl ASTNodeImporter::VisitVarTemplateDecl(VarTemplateDecl *D) {
D->getTemplatedDecl()))
continue;
if (IsStructuralMatch(D, FoundTemplate)) {
- // The Decl in the "From" context has a definition, but in the
- // "To" context we already have a definition.
+ // FIXME Check for ODR error if the two definitions have
+ // different initializers?
VarTemplateDecl *FoundDef = getTemplateDefinition(FoundTemplate);
- if (D->isThisDeclarationADefinition() && FoundDef)
- // FIXME Check for ODR error if the two definitions have
- // different initializers?
- return Importer.MapImported(D, FoundDef);
-
+ if (D->getDeclContext()->isRecord()) {
+ assert(FoundTemplate->getDeclContext()->isRecord() &&
+ "Member variable template imported as non-member, "
+ "inconsistent imported AST?");
+ if (FoundDef)
+ return Importer.MapImported(D, FoundDef);
+ if (!D->isThisDeclarationADefinition())
+ return Importer.MapImported(D, FoundTemplate);
+ } else {
+ if (FoundDef && D->isThisDeclarationADefinition())
+ return Importer.MapImported(D, FoundDef);
+ }
FoundByLookup = FoundTemplate;
break;
}
@@ -5879,11 +6368,6 @@ ExpectedDecl ASTNodeImporter::VisitVarTemplateDecl(VarTemplateDecl *D) {
ToVarTD->setPreviousDecl(Recent);
}
- if (DTemplated->isThisDeclarationADefinition() &&
- !ToTemplated->isThisDeclarationADefinition()) {
- // FIXME: Import definition!
- }
-
return ToVarTD;
}
@@ -5920,8 +6404,8 @@ ExpectedDecl ASTNodeImporter::VisitVarTemplateSpecializationDecl(
// Import template arguments.
SmallVector<TemplateArgument, 2> TemplateArgs;
- if (Error Err = ImportTemplateArguments(
- D->getTemplateArgs().data(), D->getTemplateArgs().size(), TemplateArgs))
+ if (Error Err =
+ ImportTemplateArguments(D->getTemplateArgs().asArray(), TemplateArgs))
return std::move(Err);
// Try to find an existing specialization with these template arguments.
@@ -5944,19 +6428,11 @@ ExpectedDecl ASTNodeImporter::VisitVarTemplateSpecializationDecl(
}
}
} else {
- // Import the type.
- QualType T;
- if (Error Err = importInto(T, D->getType()))
- return std::move(Err);
-
- auto TInfoOrErr = import(D->getTypeSourceInfo());
- if (!TInfoOrErr)
- return TInfoOrErr.takeError();
-
TemplateArgumentListInfo ToTAInfo;
- if (Error Err = ImportTemplateArgumentListInfo(
- D->getTemplateArgsInfo(), ToTAInfo))
- return std::move(Err);
+ if (const ASTTemplateArgumentListInfo *Args = D->getTemplateArgsInfo()) {
+ if (Error Err = ImportTemplateArgumentListInfo(*Args, ToTAInfo))
+ return std::move(Err);
+ }
using PartVarSpecDecl = VarTemplatePartialSpecializationDecl;
// Create a new specialization.
@@ -5976,7 +6452,7 @@ ExpectedDecl ASTNodeImporter::VisitVarTemplateSpecializationDecl(
PartVarSpecDecl *ToPartial;
if (GetImportedOrCreateDecl(ToPartial, D, Importer.getToContext(), DC,
*BeginLocOrErr, *IdLocOrErr, *ToTPListOrErr,
- VarTemplate, T, *TInfoOrErr,
+ VarTemplate, QualType(), nullptr,
D->getStorageClass(), TemplateArgs, ArgInfos))
return ToPartial;
@@ -5997,11 +6473,21 @@ ExpectedDecl ASTNodeImporter::VisitVarTemplateSpecializationDecl(
} else { // Full specialization
if (GetImportedOrCreateDecl(D2, D, Importer.getToContext(), DC,
*BeginLocOrErr, *IdLocOrErr, VarTemplate,
- T, *TInfoOrErr,
- D->getStorageClass(), TemplateArgs))
+ QualType(), nullptr, D->getStorageClass(),
+ TemplateArgs))
return D2;
}
+ QualType T;
+ if (Error Err = importInto(T, D->getType()))
+ return std::move(Err);
+ D2->setType(T);
+
+ auto TInfoOrErr = import(D->getTypeSourceInfo());
+ if (!TInfoOrErr)
+ return TInfoOrErr.takeError();
+ D2->setTypeSourceInfo(*TInfoOrErr);
+
if (D->getPointOfInstantiation().isValid()) {
if (ExpectedSLoc POIOrErr = import(D->getPointOfInstantiation()))
D2->setPointOfInstantiation(*POIOrErr);
@@ -6088,20 +6574,24 @@ ASTNodeImporter::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
if (Error Err = importInto(TemplatedFD, D->getTemplatedDecl()))
return std::move(Err);
- // Template parameters of the ClassTemplateDecl and FunctionTemplateDecl are
- // shared, if the FunctionTemplateDecl is a deduction guide for the class.
- // At import the ClassTemplateDecl object is always created first (FIXME: is
- // this really true?) because the dependency, then the FunctionTemplateDecl.
- // The DeclContext of the template parameters is changed when the
- // FunctionTemplateDecl is created, but was set already when the class
- // template was created. So here it is not the TU (default value) any more.
- // FIXME: The DeclContext of the parameters is now set finally to the
- // CXXDeductionGuideDecl object that was imported later. This may not be the
- // same that is in the original AST, specially if there are multiple deduction
- // guides.
- DeclContext *OldParamDC = nullptr;
- if (Params->size() > 0)
- OldParamDC = Params->getParam(0)->getDeclContext();
+ // At creation of the template the template parameters are "adopted"
+ // (DeclContext is changed). After this possible change the lookup table
+ // must be updated.
+ // At deduction guides the DeclContext of the template parameters may be
+ // different from what we would expect, it may be the class template, or a
+ // probably different CXXDeductionGuideDecl. This may come from the fact that
+ // the template parameter objects may be shared between deduction guides or
+ // the class template, and at creation of multiple FunctionTemplateDecl
+ // objects (for deduction guides) the same parameters are re-used. The
+ // "adoption" happens multiple times with different parent, even recursively
+ // for TemplateTemplateParmDecl. The same happens at import when the
+ // FunctionTemplateDecl objects are created, but in different order.
+ // In this way the DeclContext of these template parameters is not necessarily
+ // the same as in the "from" context.
+ SmallVector<DeclContext *, 2> OldParamDC;
+ OldParamDC.reserve(Params->size());
+ llvm::transform(*Params, std::back_inserter(OldParamDC),
+ [](NamedDecl *ND) { return ND->getDeclContext(); });
FunctionTemplateDecl *ToFunc;
if (GetImportedOrCreateDecl(ToFunc, D, Importer.getToContext(), DC, Loc, Name,
@@ -6112,8 +6602,13 @@ ASTNodeImporter::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
ToFunc->setAccess(D->getAccess());
ToFunc->setLexicalDeclContext(LexicalDC);
- LexicalDC->addDeclInternal(ToFunc);
- updateLookupTableForTemplateParameters(*Params, OldParamDC);
+ addDeclToContexts(D, ToFunc);
+
+ ASTImporterLookupTable *LT = Importer.SharedState->getLookupTable();
+ if (LT && !OldParamDC.empty()) {
+ for (unsigned int I = 0; I < OldParamDC.size(); ++I)
+ LT->updateForced(Params->getParam(I), OldParamDC[I]);
+ }
if (FoundByLookup) {
auto *Recent =
@@ -6139,13 +6634,13 @@ ASTNodeImporter::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
ExpectedStmt ASTNodeImporter::VisitStmt(Stmt *S) {
Importer.FromDiag(S->getBeginLoc(), diag::err_unsupported_ast_node)
<< S->getStmtClassName();
- return make_error<ImportError>(ImportError::UnsupportedConstruct);
+ return make_error<ASTImportError>(ASTImportError::UnsupportedConstruct);
}
ExpectedStmt ASTNodeImporter::VisitGCCAsmStmt(GCCAsmStmt *S) {
if (Importer.returnWithErrorInTest())
- return make_error<ImportError>(ImportError::UnsupportedConstruct);
+ return make_error<ASTImportError>(ASTImportError::UnsupportedConstruct);
SmallVector<IdentifierInfo *, 4> Names;
for (unsigned I = 0, E = S->getNumOutputs(); I != E; I++) {
IdentifierInfo *ToII = Importer.Import(S->getOutputIdentifier(I));
@@ -6258,9 +6753,10 @@ ExpectedStmt ASTNodeImporter::VisitCompoundStmt(CompoundStmt *S) {
if (!ToRBracLocOrErr)
return ToRBracLocOrErr.takeError();
- return CompoundStmt::Create(
- Importer.getToContext(), ToStmts,
- *ToLBracLocOrErr, *ToRBracLocOrErr);
+ FPOptionsOverride FPO =
+ S->hasStoredFPFeatures() ? S->getStoredFPFeatures() : FPOptionsOverride();
+ return CompoundStmt::Create(Importer.getToContext(), ToStmts, FPO,
+ *ToLBracLocOrErr, *ToRBracLocOrErr);
}
ExpectedStmt ASTNodeImporter::VisitCaseStmt(CaseStmt *S) {
@@ -6339,7 +6835,7 @@ ExpectedStmt ASTNodeImporter::VisitIfStmt(IfStmt *S) {
if (Err)
return std::move(Err);
- return IfStmt::Create(Importer.getToContext(), ToIfLoc, S->isConstexpr(),
+ return IfStmt::Create(Importer.getToContext(), ToIfLoc, S->getStatementKind(),
ToInit, ToConditionVariable, ToCond, ToLParenLoc,
ToRParenLoc, ToThen, ToElseLoc, ToElse);
}
@@ -6515,8 +7011,8 @@ ExpectedStmt ASTNodeImporter::VisitCXXTryStmt(CXXTryStmt *S) {
return ToHandlerOrErr.takeError();
}
- return CXXTryStmt::Create(
- Importer.getToContext(), *ToTryLocOrErr,*ToTryBlockOrErr, ToHandlers);
+ return CXXTryStmt::Create(Importer.getToContext(), *ToTryLocOrErr,
+ cast<CompoundStmt>(*ToTryBlockOrErr), ToHandlers);
}
ExpectedStmt ASTNodeImporter::VisitCXXForRangeStmt(CXXForRangeStmt *S) {
@@ -6652,11 +7148,12 @@ ExpectedStmt ASTNodeImporter::VisitObjCAutoreleasePoolStmt(
ExpectedStmt ASTNodeImporter::VisitExpr(Expr *E) {
Importer.FromDiag(E->getBeginLoc(), diag::err_unsupported_ast_node)
<< E->getStmtClassName();
- return make_error<ImportError>(ImportError::UnsupportedConstruct);
+ return make_error<ASTImportError>(ASTImportError::UnsupportedConstruct);
}
ExpectedStmt ASTNodeImporter::VisitSourceLocExpr(SourceLocExpr *E) {
Error Err = Error::success();
+ auto ToType = importChecked(Err, E->getType());
auto BLoc = importChecked(Err, E->getBeginLoc());
auto RParenLoc = importChecked(Err, E->getEndLoc());
if (Err)
@@ -6666,8 +7163,8 @@ ExpectedStmt ASTNodeImporter::VisitSourceLocExpr(SourceLocExpr *E) {
return ParentContextOrErr.takeError();
return new (Importer.getToContext())
- SourceLocExpr(Importer.getToContext(), E->getIdentKind(), BLoc, RParenLoc,
- *ParentContextOrErr);
+ SourceLocExpr(Importer.getToContext(), E->getIdentKind(), ToType, BLoc,
+ RParenLoc, *ParentContextOrErr);
}
ExpectedStmt ASTNodeImporter::VisitVAArgExpr(VAArgExpr *E) {
@@ -6710,6 +7207,39 @@ ExpectedStmt ASTNodeImporter::VisitChooseExpr(ChooseExpr *E) {
ToRParenLoc, CondIsTrue);
}
+ExpectedStmt ASTNodeImporter::VisitConvertVectorExpr(ConvertVectorExpr *E) {
+ Error Err = Error::success();
+ auto *ToSrcExpr = importChecked(Err, E->getSrcExpr());
+ auto ToRParenLoc = importChecked(Err, E->getRParenLoc());
+ auto ToBuiltinLoc = importChecked(Err, E->getBuiltinLoc());
+ auto ToType = importChecked(Err, E->getType());
+ auto *ToTSI = importChecked(Err, E->getTypeSourceInfo());
+ if (Err)
+ return std::move(Err);
+
+ return new (Importer.getToContext())
+ ConvertVectorExpr(ToSrcExpr, ToTSI, ToType, E->getValueKind(),
+ E->getObjectKind(), ToBuiltinLoc, ToRParenLoc);
+}
+
+ExpectedStmt ASTNodeImporter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
+ Error Err = Error::success();
+ auto ToRParenLoc = importChecked(Err, E->getRParenLoc());
+ auto ToBeginLoc = importChecked(Err, E->getBeginLoc());
+ auto ToType = importChecked(Err, E->getType());
+ const unsigned NumSubExprs = E->getNumSubExprs();
+
+ llvm::SmallVector<Expr *, 8> ToSubExprs;
+ llvm::ArrayRef<Expr *> FromSubExprs(E->getSubExprs(), NumSubExprs);
+ ToSubExprs.resize(NumSubExprs);
+
+ if ((Err = ImportContainerChecked(FromSubExprs, ToSubExprs)))
+ return std::move(Err);
+
+ return new (Importer.getToContext()) ShuffleVectorExpr(
+ Importer.getToContext(), ToSubExprs, ToType, ToBeginLoc, ToRParenLoc);
+}
+
ExpectedStmt ASTNodeImporter::VisitGNUNullExpr(GNUNullExpr *E) {
ExpectedType TypeOrErr = import(E->getType());
if (!TypeOrErr)
@@ -6726,7 +7256,14 @@ ExpectedStmt
ASTNodeImporter::VisitGenericSelectionExpr(GenericSelectionExpr *E) {
Error Err = Error::success();
auto ToGenericLoc = importChecked(Err, E->getGenericLoc());
- auto *ToControllingExpr = importChecked(Err, E->getControllingExpr());
+ Expr *ToControllingExpr = nullptr;
+ TypeSourceInfo *ToControllingType = nullptr;
+ if (E->isExprPredicate())
+ ToControllingExpr = importChecked(Err, E->getControllingExpr());
+ else
+ ToControllingType = importChecked(Err, E->getControllingType());
+ assert((ToControllingExpr || ToControllingType) &&
+ "Either the controlling expr or type must be nonnull");
auto ToDefaultLoc = importChecked(Err, E->getDefaultLoc());
auto ToRParenLoc = importChecked(Err, E->getRParenLoc());
if (Err)
@@ -6744,15 +7281,27 @@ ASTNodeImporter::VisitGenericSelectionExpr(GenericSelectionExpr *E) {
const ASTContext &ToCtx = Importer.getToContext();
if (E->isResultDependent()) {
+ if (ToControllingExpr) {
+ return GenericSelectionExpr::Create(
+ ToCtx, ToGenericLoc, ToControllingExpr, llvm::ArrayRef(ToAssocTypes),
+ llvm::ArrayRef(ToAssocExprs), ToDefaultLoc, ToRParenLoc,
+ E->containsUnexpandedParameterPack());
+ }
return GenericSelectionExpr::Create(
- ToCtx, ToGenericLoc, ToControllingExpr,
- llvm::makeArrayRef(ToAssocTypes), llvm::makeArrayRef(ToAssocExprs),
- ToDefaultLoc, ToRParenLoc, E->containsUnexpandedParameterPack());
+ ToCtx, ToGenericLoc, ToControllingType, llvm::ArrayRef(ToAssocTypes),
+ llvm::ArrayRef(ToAssocExprs), ToDefaultLoc, ToRParenLoc,
+ E->containsUnexpandedParameterPack());
}
+ if (ToControllingExpr) {
+ return GenericSelectionExpr::Create(
+ ToCtx, ToGenericLoc, ToControllingExpr, llvm::ArrayRef(ToAssocTypes),
+ llvm::ArrayRef(ToAssocExprs), ToDefaultLoc, ToRParenLoc,
+ E->containsUnexpandedParameterPack(), E->getResultIndex());
+ }
return GenericSelectionExpr::Create(
- ToCtx, ToGenericLoc, ToControllingExpr, llvm::makeArrayRef(ToAssocTypes),
- llvm::makeArrayRef(ToAssocExprs), ToDefaultLoc, ToRParenLoc,
+ ToCtx, ToGenericLoc, ToControllingType, llvm::ArrayRef(ToAssocTypes),
+ llvm::ArrayRef(ToAssocExprs), ToDefaultLoc, ToRParenLoc,
E->containsUnexpandedParameterPack(), E->getResultIndex());
}
@@ -6766,7 +7315,8 @@ ExpectedStmt ASTNodeImporter::VisitPredefinedExpr(PredefinedExpr *E) {
return std::move(Err);
return PredefinedExpr::Create(Importer.getToContext(), ToBeginLoc, ToType,
- E->getIdentKind(), ToFunctionName);
+ E->getIdentKind(), E->isTransparent(),
+ ToFunctionName);
}
ExpectedStmt ASTNodeImporter::VisitDeclRefExpr(DeclRefExpr *E) {
@@ -6804,6 +7354,7 @@ ExpectedStmt ASTNodeImporter::VisitDeclRefExpr(DeclRefExpr *E) {
E->getValueKind(), ToFoundD, ToResInfo, E->isNonOdrUse());
if (E->hadMultipleCandidates())
ToE->setHadMultipleCandidates(true);
+ ToE->setIsImmediateEscalating(E->isImmediateEscalating());
return ToE;
}
@@ -7047,10 +7598,17 @@ ExpectedStmt ASTNodeImporter::VisitUnaryOperator(UnaryOperator *E) {
if (Err)
return std::move(Err);
- return UnaryOperator::Create(
- Importer.getToContext(), ToSubExpr, E->getOpcode(), ToType,
- E->getValueKind(), E->getObjectKind(), ToOperatorLoc, E->canOverflow(),
- E->getFPOptionsOverride());
+ auto *UO = UnaryOperator::CreateEmpty(Importer.getToContext(),
+ E->hasStoredFPFeatures());
+ UO->setType(ToType);
+ UO->setSubExpr(ToSubExpr);
+ UO->setOpcode(E->getOpcode());
+ UO->setOperatorLoc(ToOperatorLoc);
+ UO->setCanOverflow(E->canOverflow());
+ if (E->hasStoredFPFeatures())
+ UO->setStoredFPFeatures(E->getStoredFPFeatures());
+
+ return UO;
}
ExpectedStmt
@@ -7094,7 +7652,7 @@ ExpectedStmt ASTNodeImporter::VisitBinaryOperator(BinaryOperator *E) {
return BinaryOperator::Create(
Importer.getToContext(), ToLHS, ToRHS, E->getOpcode(), ToType,
E->getValueKind(), E->getObjectKind(), ToOperatorLoc,
- E->getFPFeatures(Importer.getFromContext().getLangOpts()));
+ E->getFPFeatures());
}
ExpectedStmt ASTNodeImporter::VisitConditionalOperator(ConditionalOperator *E) {
@@ -7133,6 +7691,17 @@ ASTNodeImporter::VisitBinaryConditionalOperator(BinaryConditionalOperator *E) {
E->getObjectKind());
}
+ExpectedStmt ASTNodeImporter::VisitCXXRewrittenBinaryOperator(
+ CXXRewrittenBinaryOperator *E) {
+ Error Err = Error::success();
+ auto ToSemanticForm = importChecked(Err, E->getSemanticForm());
+ if (Err)
+ return std::move(Err);
+
+ return new (Importer.getToContext())
+ CXXRewrittenBinaryOperator(ToSemanticForm, E->isReversed());
+}
+
ExpectedStmt ASTNodeImporter::VisitArrayTypeTraitExpr(ArrayTypeTraitExpr *E) {
Error Err = Error::success();
auto ToBeginLoc = importChecked(Err, E->getBeginLoc());
@@ -7205,7 +7774,7 @@ ASTNodeImporter::VisitCompoundAssignOperator(CompoundAssignOperator *E) {
return CompoundAssignOperator::Create(
Importer.getToContext(), ToLHS, ToRHS, E->getOpcode(), ToType,
E->getValueKind(), E->getObjectKind(), ToOperatorLoc,
- E->getFPFeatures(Importer.getFromContext().getLangOpts()),
+ E->getFPFeatures(),
ToComputationLHSType, ToComputationResultType);
}
@@ -7293,9 +7862,21 @@ ExpectedStmt ASTNodeImporter::VisitExplicitCastExpr(ExplicitCastExpr *E) {
*ToLParenLocOrErr, OCE->getBridgeKind(), E->getCastKind(),
*ToBridgeKeywordLocOrErr, ToTypeInfoAsWritten, ToSubExpr);
}
+ case Stmt::BuiltinBitCastExprClass: {
+ auto *BBC = cast<BuiltinBitCastExpr>(E);
+ ExpectedSLoc ToKWLocOrErr = import(BBC->getBeginLoc());
+ if (!ToKWLocOrErr)
+ return ToKWLocOrErr.takeError();
+ ExpectedSLoc ToRParenLocOrErr = import(BBC->getEndLoc());
+ if (!ToRParenLocOrErr)
+ return ToRParenLocOrErr.takeError();
+ return new (Importer.getToContext()) BuiltinBitCastExpr(
+ ToType, E->getValueKind(), E->getCastKind(), ToSubExpr,
+ ToTypeInfoAsWritten, *ToKWLocOrErr, *ToRParenLocOrErr);
+ }
default:
llvm_unreachable("Cast expression of unsupported type!");
- return make_error<ImportError>(ImportError::UnsupportedConstruct);
+ return make_error<ASTImportError>(ASTImportError::UnsupportedConstruct);
}
}
@@ -7414,15 +7995,23 @@ ExpectedStmt ASTNodeImporter::VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) {
// see VisitParmVarDecl).
ParmVarDecl *ToParam = *ToParamOrErr;
if (!ToParam->getDefaultArg()) {
- Optional<ParmVarDecl *> FromParam = Importer.getImportedFromDecl(ToParam);
+ std::optional<ParmVarDecl *> FromParam =
+ Importer.getImportedFromDecl(ToParam);
assert(FromParam && "ParmVarDecl was not imported?");
if (Error Err = ImportDefaultArgOfParmVarDecl(*FromParam, ToParam))
return std::move(Err);
}
-
+ Expr *RewrittenInit = nullptr;
+ if (E->hasRewrittenInit()) {
+ ExpectedExpr ExprOrErr = import(E->getRewrittenExpr());
+ if (!ExprOrErr)
+ return ExprOrErr.takeError();
+ RewrittenInit = ExprOrErr.get();
+ }
return CXXDefaultArgExpr::Create(Importer.getToContext(), *ToUsedLocOrErr,
- *ToParamOrErr, *UsedContextOrErr);
+ *ToParamOrErr, RewrittenInit,
+ *UsedContextOrErr);
}
ExpectedStmt
@@ -7540,16 +8129,14 @@ ExpectedStmt ASTNodeImporter::VisitSizeOfPackExpr(SizeOfPackExpr *E) {
if (Err)
return std::move(Err);
- Optional<unsigned> Length;
+ std::optional<unsigned> Length;
if (!E->isValueDependent())
Length = E->getPackLength();
SmallVector<TemplateArgument, 8> ToPartialArguments;
if (E->isPartiallySubstituted()) {
- if (Error Err = ImportTemplateArguments(
- E->getPartialArguments().data(),
- E->getPartialArguments().size(),
- ToPartialArguments))
+ if (Error Err = ImportTemplateArguments(E->getPartialArguments(),
+ ToPartialArguments))
return std::move(Err);
}
@@ -7615,12 +8202,14 @@ ExpectedStmt ASTNodeImporter::VisitCXXConstructExpr(CXXConstructExpr *E) {
if (Error Err = ImportContainerChecked(E->arguments(), ToArgs))
return std::move(Err);
- return CXXConstructExpr::Create(
+ CXXConstructExpr *ToE = CXXConstructExpr::Create(
Importer.getToContext(), ToType, ToLocation, ToConstructor,
E->isElidable(), ToArgs, E->hadMultipleCandidates(),
E->isListInitialization(), E->isStdInitListInitialization(),
E->requiresZeroInitialization(), E->getConstructionKind(),
ToParenOrBraceRange);
+ ToE->setIsImmediateEscalating(E->isImmediateEscalating());
+ return ToE;
}
ExpectedStmt ASTNodeImporter::VisitExprWithCleanups(ExprWithCleanups *E) {
@@ -7663,8 +8252,8 @@ ExpectedStmt ASTNodeImporter::VisitCXXThisExpr(CXXThisExpr *E) {
if (!ToLocationOrErr)
return ToLocationOrErr.takeError();
- return new (Importer.getToContext()) CXXThisExpr(
- *ToLocationOrErr, *ToTypeOrErr, E->isImplicit());
+ return CXXThisExpr::Create(Importer.getToContext(), *ToLocationOrErr,
+ *ToTypeOrErr, E->isImplicit());
}
ExpectedStmt ASTNodeImporter::VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *E) {
@@ -7676,8 +8265,8 @@ ExpectedStmt ASTNodeImporter::VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *E) {
if (!ToLocationOrErr)
return ToLocationOrErr.takeError();
- return new (Importer.getToContext()) CXXBoolLiteralExpr(
- E->getValue(), *ToTypeOrErr, *ToLocationOrErr);
+ return CXXBoolLiteralExpr::Create(Importer.getToContext(), E->getValue(),
+ *ToTypeOrErr, *ToLocationOrErr);
}
ExpectedStmt ASTNodeImporter::VisitMemberExpr(MemberExpr *E) {
@@ -7839,7 +8428,7 @@ ExpectedStmt ASTNodeImporter::VisitCXXUnresolvedConstructExpr(
return CXXUnresolvedConstructExpr::Create(
Importer.getToContext(), ToType, ToTypeSourceInfo, ToLParenLoc,
- llvm::makeArrayRef(ToArgs), ToRParenLoc);
+ llvm::ArrayRef(ToArgs), ToRParenLoc, E->isListInitialization());
}
ExpectedStmt
@@ -7881,10 +8470,13 @@ ASTNodeImporter::VisitUnresolvedLookupExpr(UnresolvedLookupExpr *E) {
if (!ToTemplateKeywordLocOrErr)
return ToTemplateKeywordLocOrErr.takeError();
+ const bool KnownDependent =
+ (E->getDependence() & ExprDependence::TypeValue) ==
+ ExprDependence::TypeValue;
return UnresolvedLookupExpr::Create(
Importer.getToContext(), *ToNamingClassOrErr, *ToQualifierLocOrErr,
*ToTemplateKeywordLocOrErr, ToNameInfo, E->requiresADL(), &ToTAInfo,
- ToDecls.begin(), ToDecls.end());
+ ToDecls.begin(), ToDecls.end(), KnownDependent);
}
return UnresolvedLookupExpr::Create(
@@ -8101,8 +8693,31 @@ ExpectedStmt ASTNodeImporter::VisitCXXDefaultInitExpr(CXXDefaultInitExpr *E) {
if (!UsedContextOrErr)
return UsedContextOrErr.takeError();
- return CXXDefaultInitExpr::Create(
- Importer.getToContext(), *ToBeginLocOrErr, *ToFieldOrErr, *UsedContextOrErr);
+ FieldDecl *ToField = *ToFieldOrErr;
+ assert(ToField->hasInClassInitializer() &&
+ "Field should have in-class initializer if there is a default init "
+ "expression that uses it.");
+ if (!ToField->getInClassInitializer()) {
+ // The in-class initializer may be not yet set in "To" AST even if the
+ // field is already there. This must be set here to make construction of
+ // CXXDefaultInitExpr work.
+ auto ToInClassInitializerOrErr =
+ import(E->getField()->getInClassInitializer());
+ if (!ToInClassInitializerOrErr)
+ return ToInClassInitializerOrErr.takeError();
+ ToField->setInClassInitializer(*ToInClassInitializerOrErr);
+ }
+
+ Expr *RewrittenInit = nullptr;
+ if (E->hasRewrittenInit()) {
+ ExpectedExpr ExprOrErr = import(E->getRewrittenExpr());
+ if (!ExprOrErr)
+ return ExprOrErr.takeError();
+ RewrittenInit = ExprOrErr.get();
+ }
+
+ return CXXDefaultInitExpr::Create(Importer.getToContext(), *ToBeginLocOrErr,
+ ToField, *UsedContextOrErr, RewrittenInit);
}
ExpectedStmt ASTNodeImporter::VisitCXXNamedCastExpr(CXXNamedCastExpr *E) {
@@ -8141,7 +8756,7 @@ ExpectedStmt ASTNodeImporter::VisitCXXNamedCastExpr(CXXNamedCastExpr *E) {
ToOperatorLoc, ToRParenLoc, ToAngleBrackets);
} else {
llvm_unreachable("Unknown cast type");
- return make_error<ImportError>();
+ return make_error<ASTImportError>();
}
}
@@ -8150,14 +8765,14 @@ ExpectedStmt ASTNodeImporter::VisitSubstNonTypeTemplateParmExpr(
Error Err = Error::success();
auto ToType = importChecked(Err, E->getType());
auto ToExprLoc = importChecked(Err, E->getExprLoc());
- auto ToParameter = importChecked(Err, E->getParameter());
+ auto ToAssociatedDecl = importChecked(Err, E->getAssociatedDecl());
auto ToReplacement = importChecked(Err, E->getReplacement());
if (Err)
return std::move(Err);
return new (Importer.getToContext()) SubstNonTypeTemplateParmExpr(
- ToType, E->getValueKind(), ToExprLoc, ToParameter,
- E->isReferenceParameter(), ToReplacement);
+ ToType, E->getValueKind(), ToExprLoc, ToReplacement, ToAssociatedDecl,
+ E->getIndex(), E->getPackIndex(), E->isReferenceParameter());
}
ExpectedStmt ASTNodeImporter::VisitTypeTraitExpr(TypeTraitExpr *E) {
@@ -8258,13 +8873,13 @@ ASTImporter::ASTImporter(ASTContext &ToContext, FileManager &ToFileManager,
ASTImporter::~ASTImporter() = default;
-Optional<unsigned> ASTImporter::getFieldIndex(Decl *F) {
+std::optional<unsigned> ASTImporter::getFieldIndex(Decl *F) {
assert(F && (isa<FieldDecl>(*F) || isa<IndirectFieldDecl>(*F)) &&
"Try to get field index for non-field.");
auto *Owner = dyn_cast<RecordDecl>(F->getDeclContext());
if (!Owner)
- return None;
+ return std::nullopt;
unsigned Index = 0;
for (const auto *D : Owner->decls()) {
@@ -8277,7 +8892,7 @@ Optional<unsigned> ASTImporter::getFieldIndex(Decl *F) {
llvm_unreachable("Field was not found in its parent context.");
- return None;
+ return std::nullopt;
}
ASTImporter::FoundDeclsTy
@@ -8336,10 +8951,10 @@ ASTImporter::Import(ExprWithCleanups::CleanupObject From) {
// FIXME: Handle BlockDecl when we implement importing BlockExpr in
// ASTNodeImporter.
- return make_error<ImportError>(ImportError::UnsupportedConstruct);
+ return make_error<ASTImportError>(ASTImportError::UnsupportedConstruct);
}
-Expected<const Type *> ASTImporter::Import(const Type *FromT) {
+ExpectedTypePtr ASTImporter::Import(const Type *FromT) {
if (!FromT)
return FromT;
@@ -8349,7 +8964,7 @@ Expected<const Type *> ASTImporter::Import(const Type *FromT) {
if (Pos != ImportedTypes.end())
return Pos->second;
- // Import the type
+ // Import the type.
ASTNodeImporter Importer(*this);
ExpectedType ToTOrErr = Importer.Visit(FromT);
if (!ToTOrErr)
@@ -8365,7 +8980,7 @@ Expected<QualType> ASTImporter::Import(QualType FromT) {
if (FromT.isNull())
return QualType{};
- Expected<const Type *> ToTyOrErr = Import(FromT.getTypePtr());
+ ExpectedTypePtr ToTyOrErr = Import(FromT.getTypePtr());
if (!ToTyOrErr)
return ToTyOrErr.takeError();
@@ -8388,67 +9003,301 @@ Expected<TypeSourceInfo *> ASTImporter::Import(TypeSourceInfo *FromTSI) {
return ToContext.getTrivialTypeSourceInfo(*TOrErr, *BeginLocOrErr);
}
-Expected<Attr *> ASTImporter::Import(const Attr *FromAttr) {
+namespace {
+// To use this object, it should be created before the new attribute is created,
+// and destructed after it is created. The construction already performs the
+// import of the data.
+template <typename T> struct AttrArgImporter {
+ AttrArgImporter(const AttrArgImporter<T> &) = delete;
+ AttrArgImporter(AttrArgImporter<T> &&) = default;
+ AttrArgImporter<T> &operator=(const AttrArgImporter<T> &) = delete;
+ AttrArgImporter<T> &operator=(AttrArgImporter<T> &&) = default;
+
+ AttrArgImporter(ASTNodeImporter &I, Error &Err, const T &From)
+ : To(I.importChecked(Err, From)) {}
+
+ const T &value() { return To; }
+
+private:
+ T To;
+};
+
+// To use this object, it should be created before the new attribute is created,
+// and destructed after it is created. The construction already performs the
+// import of the data. The array data is accessible in a pointer form, this form
+// is used by the attribute classes. This object should be created once for the
+// array data to be imported (the array size is not imported, just copied).
+template <typename T> struct AttrArgArrayImporter {
+ AttrArgArrayImporter(const AttrArgArrayImporter<T> &) = delete;
+ AttrArgArrayImporter(AttrArgArrayImporter<T> &&) = default;
+ AttrArgArrayImporter<T> &operator=(const AttrArgArrayImporter<T> &) = delete;
+ AttrArgArrayImporter<T> &operator=(AttrArgArrayImporter<T> &&) = default;
+
+ AttrArgArrayImporter(ASTNodeImporter &I, Error &Err,
+ const llvm::iterator_range<T *> &From,
+ unsigned ArraySize) {
+ if (Err)
+ return;
+ To.reserve(ArraySize);
+ Err = I.ImportContainerChecked(From, To);
+ }
+
+ T *value() { return To.data(); }
+
+private:
+ llvm::SmallVector<T, 2> To;
+};
+
+class AttrImporter {
+ Error Err{Error::success()};
Attr *ToAttr = nullptr;
- SourceRange ToRange;
- if (Error Err = importInto(ToRange, FromAttr->getRange()))
- return std::move(Err);
+ ASTImporter &Importer;
+ ASTNodeImporter NImporter;
+
+public:
+ AttrImporter(ASTImporter &I) : Importer(I), NImporter(I) {}
+
+ // Useful for accessing the imported attribute.
+ template <typename T> T *castAttrAs() { return cast<T>(ToAttr); }
+ template <typename T> const T *castAttrAs() const { return cast<T>(ToAttr); }
+
+ // Create an "importer" for an attribute parameter.
+ // Result of the 'value()' of that object is to be passed to the function
+ // 'importAttr', in the order that is expected by the attribute class.
+ template <class T> AttrArgImporter<T> importArg(const T &From) {
+ return AttrArgImporter<T>(NImporter, Err, From);
+ }
+
+ // Create an "importer" for an attribute parameter that has array type.
+ // Result of the 'value()' of that object is to be passed to the function
+ // 'importAttr', then the size of the array as next argument.
+ template <typename T>
+ AttrArgArrayImporter<T> importArrayArg(const llvm::iterator_range<T *> &From,
+ unsigned ArraySize) {
+ return AttrArgArrayImporter<T>(NImporter, Err, From, ArraySize);
+ }
+
+ // Create an attribute object with the specified arguments.
+ // The 'FromAttr' is the original (not imported) attribute, the 'ImportedArg'
+ // should be values that are passed to the 'Create' function of the attribute.
+ // (The 'Create' with 'ASTContext' first and 'AttributeCommonInfo' last is
+ // used here.) As much data is copied or imported from the old attribute
+ // as possible. The passed arguments should be already imported.
+ // If an import error happens, the internal error is set to it, and any
+ // further import attempt is ignored.
+ template <typename T, typename... Arg>
+ void importAttr(const T *FromAttr, Arg &&...ImportedArg) {
+ static_assert(std::is_base_of<Attr, T>::value,
+ "T should be subclass of Attr.");
+ assert(!ToAttr && "Use one AttrImporter to import one Attribute object.");
+
+ const IdentifierInfo *ToAttrName = Importer.Import(FromAttr->getAttrName());
+ const IdentifierInfo *ToScopeName =
+ Importer.Import(FromAttr->getScopeName());
+ SourceRange ToAttrRange =
+ NImporter.importChecked(Err, FromAttr->getRange());
+ SourceLocation ToScopeLoc =
+ NImporter.importChecked(Err, FromAttr->getScopeLoc());
+
+ if (Err)
+ return;
+
+ AttributeCommonInfo ToI(ToAttrName, ToScopeName, ToAttrRange, ToScopeLoc,
+ FromAttr->getParsedKind(), FromAttr->getForm());
+ // The "SemanticSpelling" is not needed to be passed to the constructor.
+ // That value is recalculated from the SpellingListIndex if needed.
+ ToAttr = T::Create(Importer.getToContext(),
+ std::forward<Arg>(ImportedArg)..., ToI);
+
+ ToAttr->setImplicit(FromAttr->isImplicit());
+ ToAttr->setPackExpansion(FromAttr->isPackExpansion());
+ if (auto *ToInheritableAttr = dyn_cast<InheritableAttr>(ToAttr))
+ ToInheritableAttr->setInherited(FromAttr->isInherited());
+ }
+
+ // Create a clone of the 'FromAttr' and import its source range only.
+ // This causes objects with invalid references to be created if the 'FromAttr'
+ // contains other data that should be imported.
+ void cloneAttr(const Attr *FromAttr) {
+ assert(!ToAttr && "Use one AttrImporter to import one Attribute object.");
+
+ SourceRange ToRange = NImporter.importChecked(Err, FromAttr->getRange());
+ if (Err)
+ return;
+
+ ToAttr = FromAttr->clone(Importer.getToContext());
+ ToAttr->setRange(ToRange);
+ ToAttr->setAttrName(Importer.Import(FromAttr->getAttrName()));
+ }
+
+ // Get the result of the previous import attempt (can be used only once).
+ llvm::Expected<Attr *> getResult() && {
+ if (Err)
+ return std::move(Err);
+ assert(ToAttr && "Attribute should be created.");
+ return ToAttr;
+ }
+};
+} // namespace
+
+Expected<Attr *> ASTImporter::Import(const Attr *FromAttr) {
+ AttrImporter AI(*this);
// FIXME: Is there some kind of AttrVisitor to use here?
switch (FromAttr->getKind()) {
case attr::Aligned: {
auto *From = cast<AlignedAttr>(FromAttr);
- AlignedAttr *To;
- auto CreateAlign = [&](bool IsAlignmentExpr, void *Alignment) {
- return AlignedAttr::Create(ToContext, IsAlignmentExpr, Alignment, ToRange,
- From->getSyntax(),
- From->getSemanticSpelling());
- };
- if (From->isAlignmentExpr()) {
- if (auto ToEOrErr = Import(From->getAlignmentExpr()))
- To = CreateAlign(true, *ToEOrErr);
- else
- return ToEOrErr.takeError();
- } else {
- if (auto ToTOrErr = Import(From->getAlignmentType()))
- To = CreateAlign(false, *ToTOrErr);
- else
- return ToTOrErr.takeError();
- }
- To->setInherited(From->isInherited());
- To->setPackExpansion(From->isPackExpansion());
- To->setImplicit(From->isImplicit());
- ToAttr = To;
+ if (From->isAlignmentExpr())
+ AI.importAttr(From, true, AI.importArg(From->getAlignmentExpr()).value());
+ else
+ AI.importAttr(From, false,
+ AI.importArg(From->getAlignmentType()).value());
+ break;
+ }
+
+ case attr::AlignValue: {
+ auto *From = cast<AlignValueAttr>(FromAttr);
+ AI.importAttr(From, AI.importArg(From->getAlignment()).value());
break;
}
+
case attr::Format: {
const auto *From = cast<FormatAttr>(FromAttr);
- FormatAttr *To;
- IdentifierInfo *ToAttrType = Import(From->getType());
- To = FormatAttr::Create(ToContext, ToAttrType, From->getFormatIdx(),
- From->getFirstArg(), ToRange, From->getSyntax());
- To->setInherited(From->isInherited());
- ToAttr = To;
+ AI.importAttr(From, Import(From->getType()), From->getFormatIdx(),
+ From->getFirstArg());
break;
}
- default:
- // FIXME: 'clone' copies every member but some of them should be imported.
- // Handle other Attrs that have parameters that should be imported.
- ToAttr = FromAttr->clone(ToContext);
- ToAttr->setRange(ToRange);
+
+ case attr::EnableIf: {
+ const auto *From = cast<EnableIfAttr>(FromAttr);
+ AI.importAttr(From, AI.importArg(From->getCond()).value(),
+ From->getMessage());
+ break;
+ }
+
+ case attr::AssertCapability: {
+ const auto *From = cast<AssertCapabilityAttr>(FromAttr);
+ AI.importAttr(From,
+ AI.importArrayArg(From->args(), From->args_size()).value(),
+ From->args_size());
+ break;
+ }
+ case attr::AcquireCapability: {
+ const auto *From = cast<AcquireCapabilityAttr>(FromAttr);
+ AI.importAttr(From,
+ AI.importArrayArg(From->args(), From->args_size()).value(),
+ From->args_size());
+ break;
+ }
+ case attr::TryAcquireCapability: {
+ const auto *From = cast<TryAcquireCapabilityAttr>(FromAttr);
+ AI.importAttr(From, AI.importArg(From->getSuccessValue()).value(),
+ AI.importArrayArg(From->args(), From->args_size()).value(),
+ From->args_size());
+ break;
+ }
+ case attr::ReleaseCapability: {
+ const auto *From = cast<ReleaseCapabilityAttr>(FromAttr);
+ AI.importAttr(From,
+ AI.importArrayArg(From->args(), From->args_size()).value(),
+ From->args_size());
+ break;
+ }
+ case attr::RequiresCapability: {
+ const auto *From = cast<RequiresCapabilityAttr>(FromAttr);
+ AI.importAttr(From,
+ AI.importArrayArg(From->args(), From->args_size()).value(),
+ From->args_size());
+ break;
+ }
+ case attr::GuardedBy: {
+ const auto *From = cast<GuardedByAttr>(FromAttr);
+ AI.importAttr(From, AI.importArg(From->getArg()).value());
+ break;
+ }
+ case attr::PtGuardedBy: {
+ const auto *From = cast<PtGuardedByAttr>(FromAttr);
+ AI.importAttr(From, AI.importArg(From->getArg()).value());
+ break;
+ }
+ case attr::AcquiredAfter: {
+ const auto *From = cast<AcquiredAfterAttr>(FromAttr);
+ AI.importAttr(From,
+ AI.importArrayArg(From->args(), From->args_size()).value(),
+ From->args_size());
+ break;
+ }
+ case attr::AcquiredBefore: {
+ const auto *From = cast<AcquiredBeforeAttr>(FromAttr);
+ AI.importAttr(From,
+ AI.importArrayArg(From->args(), From->args_size()).value(),
+ From->args_size());
+ break;
+ }
+ case attr::AssertExclusiveLock: {
+ const auto *From = cast<AssertExclusiveLockAttr>(FromAttr);
+ AI.importAttr(From,
+ AI.importArrayArg(From->args(), From->args_size()).value(),
+ From->args_size());
+ break;
+ }
+ case attr::AssertSharedLock: {
+ const auto *From = cast<AssertSharedLockAttr>(FromAttr);
+ AI.importAttr(From,
+ AI.importArrayArg(From->args(), From->args_size()).value(),
+ From->args_size());
break;
}
- assert(ToAttr && "Attribute should be created.");
-
- return ToAttr;
+ case attr::ExclusiveTrylockFunction: {
+ const auto *From = cast<ExclusiveTrylockFunctionAttr>(FromAttr);
+ AI.importAttr(From, AI.importArg(From->getSuccessValue()).value(),
+ AI.importArrayArg(From->args(), From->args_size()).value(),
+ From->args_size());
+ break;
+ }
+ case attr::SharedTrylockFunction: {
+ const auto *From = cast<SharedTrylockFunctionAttr>(FromAttr);
+ AI.importAttr(From, AI.importArg(From->getSuccessValue()).value(),
+ AI.importArrayArg(From->args(), From->args_size()).value(),
+ From->args_size());
+ break;
+ }
+ case attr::LockReturned: {
+ const auto *From = cast<LockReturnedAttr>(FromAttr);
+ AI.importAttr(From, AI.importArg(From->getArg()).value());
+ break;
+ }
+ case attr::LocksExcluded: {
+ const auto *From = cast<LocksExcludedAttr>(FromAttr);
+ AI.importAttr(From,
+ AI.importArrayArg(From->args(), From->args_size()).value(),
+ From->args_size());
+ break;
+ }
+ case attr::CountedBy: {
+ AI.cloneAttr(FromAttr);
+ const auto *CBA = cast<CountedByAttr>(FromAttr);
+ Expected<SourceRange> SR = Import(CBA->getCountedByFieldLoc()).get();
+ if (!SR)
+ return SR.takeError();
+ AI.castAttrAs<CountedByAttr>()->setCountedByFieldLoc(SR.get());
+ break;
+ }
+
+ default: {
+ // The default branch works for attributes that have no arguments to import.
+ // FIXME: Handle every attribute type that has arguments of type to import
+ // (most often Expr* or Decl* or type) in the switch above.
+ AI.cloneAttr(FromAttr);
+ break;
+ }
+ }
+
+ return std::move(AI).getResult();
}
Decl *ASTImporter::GetAlreadyImportedOrNull(const Decl *FromD) const {
- auto Pos = ImportedDecls.find(FromD);
- if (Pos != ImportedDecls.end())
- return Pos->second;
- else
- return nullptr;
+ return ImportedDecls.lookup(FromD);
}
TranslationUnitDecl *ASTImporter::GetFromTU(Decl *ToD) {
@@ -8458,6 +9307,19 @@ TranslationUnitDecl *ASTImporter::GetFromTU(Decl *ToD) {
return FromDPos->second->getTranslationUnitDecl();
}
+Error ASTImporter::ImportAttrs(Decl *ToD, Decl *FromD) {
+ if (!FromD->hasAttrs() || ToD->hasAttrs())
+ return Error::success();
+ for (const Attr *FromAttr : FromD->getAttrs()) {
+ auto ToAttrOrErr = Import(FromAttr);
+ if (ToAttrOrErr)
+ ToD->addAttr(*ToAttrOrErr);
+ else
+ return ToAttrOrErr.takeError();
+ }
+ return Error::success();
+}
+
Expected<Decl *> ASTImporter::Import(Decl *FromD) {
if (!FromD)
return nullptr;
@@ -8470,7 +9332,7 @@ Expected<Decl *> ASTImporter::Import(Decl *FromD) {
// Check whether there was a previous failed import.
// If yes return the existing error.
if (auto Error = getImportDeclErrorIfAny(FromD))
- return make_error<ImportError>(*Error);
+ return make_error<ASTImportError>(*Error);
// Check whether we've already imported this declaration.
Decl *ToD = GetAlreadyImportedOrNull(FromD);
@@ -8478,7 +9340,7 @@ Expected<Decl *> ASTImporter::Import(Decl *FromD) {
// Already imported (possibly from another TU) and with an error.
if (auto Error = SharedState->getImportDeclErrorIfAny(ToD)) {
setImportDeclError(FromD, *Error);
- return make_error<ImportError>(*Error);
+ return make_error<ASTImportError>(*Error);
}
// If FromD has some updated flags after last import, apply it.
@@ -8530,9 +9392,9 @@ Expected<Decl *> ASTImporter::Import(Decl *FromD) {
// Error encountered for the first time.
// After takeError the error is not usable any more in ToDOrErr.
// Get a copy of the error object (any more simple solution for this?).
- ImportError ErrOut;
+ ASTImportError ErrOut;
handleAllErrors(ToDOrErr.takeError(),
- [&ErrOut](const ImportError &E) { ErrOut = E; });
+ [&ErrOut](const ASTImportError &E) { ErrOut = E; });
setImportDeclError(FromD, ErrOut);
// Set the error for the mapped to Decl, which is in the "to" context.
if (Pos != ImportedDecls.end())
@@ -8540,8 +9402,20 @@ Expected<Decl *> ASTImporter::Import(Decl *FromD) {
// Set the error for all nodes which have been created before we
// recognized the error.
- for (const auto &Path : SavedImportPaths[FromD])
+ for (const auto &Path : SavedImportPaths[FromD]) {
+ // The import path contains import-dependency nodes first.
+ // Save the node that was imported as dependency of the current node.
+ Decl *PrevFromDi = FromD;
for (Decl *FromDi : Path) {
+ // Begin and end of the path equals 'FromD', skip it.
+ if (FromDi == FromD)
+ continue;
+ // We should not set import error on a node and all following nodes in
+ // the path if child import errors are ignored.
+ if (ChildErrorHandlingStrategy(FromDi).ignoreChildErrorOnParent(
+ PrevFromDi))
+ break;
+ PrevFromDi = FromDi;
setImportDeclError(FromDi, ErrOut);
//FIXME Should we remove these Decls from ImportedDecls?
// Set the error for the mapped to Decl, which is in the "to" context.
@@ -8551,10 +9425,11 @@ Expected<Decl *> ASTImporter::Import(Decl *FromD) {
// FIXME Should we remove these Decls from the LookupTable,
// and from ImportedFromDecls?
}
+ }
SavedImportPaths.erase(FromD);
// Do not return ToDOrErr, error was taken out of it.
- return make_error<ImportError>(ErrOut);
+ return make_error<ASTImportError>(ErrOut);
}
ToD = *ToDOrErr;
@@ -8566,7 +9441,7 @@ Expected<Decl *> ASTImporter::Import(Decl *FromD) {
if (!ToD) {
auto Err = getImportDeclErrorIfAny(FromD);
assert(Err);
- return make_error<ImportError>(*Err);
+ return make_error<ASTImportError>(*Err);
}
// We could import from the current TU without error. But previously we
@@ -8574,20 +9449,12 @@ Expected<Decl *> ASTImporter::Import(Decl *FromD) {
// ASTImporter object) and with an error.
if (auto Error = SharedState->getImportDeclErrorIfAny(ToD)) {
setImportDeclError(FromD, *Error);
- return make_error<ImportError>(*Error);
+ return make_error<ASTImportError>(*Error);
}
-
// Make sure that ImportImpl registered the imported decl.
assert(ImportedDecls.count(FromD) != 0 && "Missing call to MapImported?");
-
- if (FromD->hasAttrs())
- for (const Attr *FromAttr : FromD->getAttrs()) {
- auto ToAttrOrErr = Import(FromAttr);
- if (ToAttrOrErr)
- ToD->addAttr(*ToAttrOrErr);
- else
- return ToAttrOrErr.takeError();
- }
+ if (auto Error = ImportAttrs(ToD, FromD))
+ return std::move(Error);
// Notify subclasses.
Imported(FromD, ToD);
@@ -8597,6 +9464,11 @@ Expected<Decl *> ASTImporter::Import(Decl *FromD) {
return ToDOrErr;
}
+llvm::Expected<InheritedConstructor>
+ASTImporter::Import(const InheritedConstructor &From) {
+ return ASTNodeImporter(*this).ImportInheritedConstructor(From);
+}
+
Expected<DeclContext *> ASTImporter::ImportContext(DeclContext *FromDC) {
if (!FromDC)
return FromDC;
@@ -8741,12 +9613,11 @@ ASTImporter::Import(NestedNameSpecifier *FromNNS) {
case NestedNameSpecifier::TypeSpec:
case NestedNameSpecifier::TypeSpecWithTemplate:
- if (Expected<QualType> TyOrErr =
- Import(QualType(FromNNS->getAsType(), 0u))) {
+ if (ExpectedTypePtr TyOrErr = Import(FromNNS->getAsType())) {
bool TSTemplate =
FromNNS->getKind() == NestedNameSpecifier::TypeSpecWithTemplate;
return NestedNameSpecifier::Create(ToContext, Prefix, TSTemplate,
- TyOrErr->getTypePtr());
+ *TyOrErr);
} else {
return TyOrErr.takeError();
}
@@ -8845,7 +9716,7 @@ Expected<TemplateName> ASTImporter::Import(TemplateName From) {
switch (From.getKind()) {
case TemplateName::Template:
if (ExpectedDecl ToTemplateOrErr = Import(From.getAsTemplateDecl()))
- return TemplateName(cast<TemplateDecl>(*ToTemplateOrErr));
+ return TemplateName(cast<TemplateDecl>((*ToTemplateOrErr)->getCanonicalDecl()));
else
return ToTemplateOrErr.takeError();
@@ -8875,13 +9746,11 @@ Expected<TemplateName> ASTImporter::Import(TemplateName From) {
auto QualifierOrErr = Import(QTN->getQualifier());
if (!QualifierOrErr)
return QualifierOrErr.takeError();
-
- if (ExpectedDecl ToTemplateOrErr = Import(From.getAsTemplateDecl()))
- return ToContext.getQualifiedTemplateName(
- *QualifierOrErr, QTN->hasTemplateKeyword(),
- cast<TemplateDecl>(*ToTemplateOrErr));
- else
- return ToTemplateOrErr.takeError();
+ auto TNOrErr = Import(QTN->getUnderlyingTemplate());
+ if (!TNOrErr)
+ return TNOrErr.takeError();
+ return ToContext.getQualifiedTemplateName(
+ *QualifierOrErr, QTN->hasTemplateKeyword(), *TNOrErr);
}
case TemplateName::DependentTemplate: {
@@ -8902,33 +9771,41 @@ Expected<TemplateName> ASTImporter::Import(TemplateName From) {
case TemplateName::SubstTemplateTemplateParm: {
SubstTemplateTemplateParmStorage *Subst =
From.getAsSubstTemplateTemplateParm();
- ExpectedDecl ParamOrErr = Import(Subst->getParameter());
- if (!ParamOrErr)
- return ParamOrErr.takeError();
-
auto ReplacementOrErr = Import(Subst->getReplacement());
if (!ReplacementOrErr)
return ReplacementOrErr.takeError();
+ auto AssociatedDeclOrErr = Import(Subst->getAssociatedDecl());
+ if (!AssociatedDeclOrErr)
+ return AssociatedDeclOrErr.takeError();
+
return ToContext.getSubstTemplateTemplateParm(
- cast<TemplateTemplateParmDecl>(*ParamOrErr), *ReplacementOrErr);
+ *ReplacementOrErr, *AssociatedDeclOrErr, Subst->getIndex(),
+ Subst->getPackIndex());
}
case TemplateName::SubstTemplateTemplateParmPack: {
- SubstTemplateTemplateParmPackStorage *SubstPack
- = From.getAsSubstTemplateTemplateParmPack();
- ExpectedDecl ParamOrErr = Import(SubstPack->getParameterPack());
- if (!ParamOrErr)
- return ParamOrErr.takeError();
-
+ SubstTemplateTemplateParmPackStorage *SubstPack =
+ From.getAsSubstTemplateTemplateParmPack();
ASTNodeImporter Importer(*this);
auto ArgPackOrErr =
Importer.ImportTemplateArgument(SubstPack->getArgumentPack());
if (!ArgPackOrErr)
return ArgPackOrErr.takeError();
+ auto AssociatedDeclOrErr = Import(SubstPack->getAssociatedDecl());
+ if (!AssociatedDeclOrErr)
+ return AssociatedDeclOrErr.takeError();
+
return ToContext.getSubstTemplateTemplateParmPack(
- cast<TemplateTemplateParmDecl>(*ParamOrErr), *ArgPackOrErr);
+ *ArgPackOrErr, *AssociatedDeclOrErr, SubstPack->getIndex(),
+ SubstPack->getFinal());
+ }
+ case TemplateName::UsingTemplate: {
+ auto UsingOrError = Import(From.getAsUsingShadowDecl());
+ if (!UsingOrError)
+ return UsingOrError.takeError();
+ return TemplateName(cast<UsingShadowDecl>(*UsingOrError));
}
}
@@ -8979,13 +9856,13 @@ Expected<FileID> ASTImporter::Import(FileID FromID, bool IsBuiltin) {
ExpectedSLoc ToExLocS = Import(FromEx.getExpansionLocStart());
if (!ToExLocS)
return ToExLocS.takeError();
- unsigned TokenLen = FromSM.getFileIDSize(FromID);
+ unsigned ExLength = FromSM.getFileIDSize(FromID);
SourceLocation MLoc;
if (FromEx.isMacroArgExpansion()) {
- MLoc = ToSM.createMacroArgExpansionLoc(*ToSpLoc, *ToExLocS, TokenLen);
+ MLoc = ToSM.createMacroArgExpansionLoc(*ToSpLoc, *ToExLocS, ExLength);
} else {
if (ExpectedSLoc ToExLocE = Import(FromEx.getExpansionLocEnd()))
- MLoc = ToSM.createExpansionLoc(*ToSpLoc, *ToExLocS, *ToExLocE, TokenLen,
+ MLoc = ToSM.createExpansionLoc(*ToSpLoc, *ToExLocS, *ToExLocE, ExLength,
FromEx.isExpansionTokenRange());
else
return ToExLocE.takeError();
@@ -9027,11 +9904,11 @@ Expected<FileID> ASTImporter::Import(FileID FromID, bool IsBuiltin) {
if (ToID.isInvalid() || IsBuiltin) {
// FIXME: We want to re-use the existing MemoryBuffer!
- llvm::Optional<llvm::MemoryBufferRef> FromBuf =
+ std::optional<llvm::MemoryBufferRef> FromBuf =
Cache->getBufferOrNone(FromContext.getDiagnostics(),
FromSM.getFileManager(), SourceLocation{});
if (!FromBuf)
- return llvm::make_error<ImportError>(ImportError::Unknown);
+ return llvm::make_error<ASTImportError>(ASTImportError::Unknown);
std::unique_ptr<llvm::MemoryBuffer> ToBuf =
llvm::MemoryBuffer::getMemBufferCopy(FromBuf->getBuffer(),
@@ -9107,7 +9984,7 @@ Expected<CXXCtorInitializer *> ASTImporter::Import(CXXCtorInitializer *From) {
*ToExprOrErr, *RParenLocOrErr);
} else {
// FIXME: assert?
- return make_error<ImportError>();
+ return make_error<ASTImportError>();
}
}
@@ -9381,16 +10258,14 @@ ASTNodeImporter::ImportAPValue(const APValue &FromValue) {
}
} else {
FromElemTy = FromValue.getLValueBase().getTypeInfoType();
- QualType ImpTypeInfo = importChecked(
- Err,
- QualType(FromValue.getLValueBase().get<TypeInfoLValue>().getType(),
- 0));
+ const Type *ImpTypeInfo = importChecked(
+ Err, FromValue.getLValueBase().get<TypeInfoLValue>().getType());
QualType ImpType =
importChecked(Err, FromValue.getLValueBase().getTypeInfoType());
if (Err)
return std::move(Err);
- Base = APValue::LValueBase::getTypeInfo(
- TypeInfoLValue(ImpTypeInfo.getTypePtr()), ImpType);
+ Base = APValue::LValueBase::getTypeInfo(TypeInfoLValue(ImpTypeInfo),
+ ImpType);
}
}
CharUnits Offset = FromValue.getLValueOffset();
@@ -9438,7 +10313,7 @@ Expected<DeclarationName> ASTImporter::HandleNameConflict(DeclarationName Name,
unsigned NumDecls) {
if (ODRHandling == ODRHandlingType::Conservative)
// Report error at any name conflict.
- return make_error<ImportError>(ImportError::NameConflict);
+ return make_error<ASTImportError>(ASTImportError::NameConflict);
else
// Allow to create the new Decl with the same name.
return Name;
@@ -9498,16 +10373,16 @@ Decl *ASTImporter::MapImported(Decl *From, Decl *To) {
return To;
}
-llvm::Optional<ImportError>
+std::optional<ASTImportError>
ASTImporter::getImportDeclErrorIfAny(Decl *FromD) const {
auto Pos = ImportDeclErrors.find(FromD);
if (Pos != ImportDeclErrors.end())
return Pos->second;
else
- return Optional<ImportError>();
+ return std::nullopt;
}
-void ASTImporter::setImportDeclError(Decl *From, ImportError Error) {
+void ASTImporter::setImportDeclError(Decl *From, ASTImportError Error) {
auto InsertRes = ImportDeclErrors.insert({From, Error});
(void)InsertRes;
// Either we set the error for the first time, or we already had set one and
diff --git a/contrib/llvm-project/clang/lib/AST/ASTImporterLookupTable.cpp b/contrib/llvm-project/clang/lib/AST/ASTImporterLookupTable.cpp
index b78cc0c053f6..07d39dcee258 100644
--- a/contrib/llvm-project/clang/lib/AST/ASTImporterLookupTable.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ASTImporterLookupTable.cpp
@@ -14,6 +14,7 @@
#include "clang/AST/ASTImporterLookupTable.h"
#include "clang/AST/Decl.h"
#include "clang/AST/RecursiveASTVisitor.h"
+#include "llvm/Support/FormatVariadic.h"
namespace clang {
@@ -66,6 +67,8 @@ struct Builder : RecursiveASTVisitor<Builder> {
} else if (isa<TypedefType>(Ty)) {
// We do not put friend typedefs to the lookup table because
// ASTImporter does not organize typedefs into redecl chains.
+ } else if (isa<UsingType>(Ty)) {
+ // Similar to TypedefType, not putting into lookup table.
} else {
llvm_unreachable("Unhandled type of friend class");
}
@@ -84,6 +87,18 @@ struct Builder : RecursiveASTVisitor<Builder> {
ASTImporterLookupTable::ASTImporterLookupTable(TranslationUnitDecl &TU) {
Builder B(*this);
B.TraverseDecl(&TU);
+ // The VaList declaration may be created on demand only or not traversed.
+ // To ensure it is present and found during import, add it to the table now.
+ if (auto *D =
+ dyn_cast_or_null<NamedDecl>(TU.getASTContext().getVaListTagDecl())) {
+ // On some platforms (AArch64) the VaList declaration can be inside a 'std'
+ // namespace. This is handled specially and not visible by AST traversal.
+ // ASTImporter must be able to find this namespace to import the VaList
+ // declaration (and the namespace) correctly.
+ if (auto *Ns = dyn_cast<NamespaceDecl>(D->getDeclContext()))
+ add(&TU, Ns);
+ add(D->getDeclContext(), D);
+ }
}
void ASTImporterLookupTable::add(DeclContext *DC, NamedDecl *ND) {
@@ -93,10 +108,19 @@ void ASTImporterLookupTable::add(DeclContext *DC, NamedDecl *ND) {
}
void ASTImporterLookupTable::remove(DeclContext *DC, NamedDecl *ND) {
- DeclList &Decls = LookupTable[DC][ND->getDeclName()];
+ const DeclarationName Name = ND->getDeclName();
+ DeclList &Decls = LookupTable[DC][Name];
bool EraseResult = Decls.remove(ND);
(void)EraseResult;
- assert(EraseResult == true && "Trying to remove not contained Decl");
+#ifndef NDEBUG
+ if (!EraseResult) {
+ std::string Message =
+ llvm::formatv("Trying to remove not contained Decl '{0}' of type {1}",
+ Name.getAsString(), DC->getDeclKindName())
+ .str();
+ llvm_unreachable(Message.c_str());
+ }
+#endif
}
void ASTImporterLookupTable::add(NamedDecl *ND) {
@@ -130,6 +154,11 @@ void ASTImporterLookupTable::update(NamedDecl *ND, DeclContext *OldDC) {
add(ND);
}
+void ASTImporterLookupTable::updateForced(NamedDecl *ND, DeclContext *OldDC) {
+ LookupTable[OldDC][ND->getDeclName()].remove(ND);
+ add(ND);
+}
+
ASTImporterLookupTable::LookupResult
ASTImporterLookupTable::lookup(DeclContext *DC, DeclarationName Name) const {
auto DCI = LookupTable.find(DC->getPrimaryContext());
@@ -145,7 +174,7 @@ ASTImporterLookupTable::lookup(DeclContext *DC, DeclarationName Name) const {
}
bool ASTImporterLookupTable::contains(DeclContext *DC, NamedDecl *ND) const {
- return 0 < lookup(DC, ND->getDeclName()).count(ND);
+ return lookup(DC, ND->getDeclName()).contains(ND);
}
void ASTImporterLookupTable::dump(DeclContext *DC) const {
diff --git a/contrib/llvm-project/clang/lib/AST/ASTStructuralEquivalence.cpp b/contrib/llvm-project/clang/lib/AST/ASTStructuralEquivalence.cpp
index c4ff05ba9325..be7a850a2982 100644
--- a/contrib/llvm-project/clang/lib/AST/ASTStructuralEquivalence.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ASTStructuralEquivalence.cpp
@@ -84,13 +84,12 @@
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/APSInt.h"
-#include "llvm/ADT/None.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include <cassert>
+#include <optional>
#include <utility>
using namespace clang;
@@ -100,9 +99,14 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
Decl *D1, Decl *D2);
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ const Stmt *S1, const Stmt *S2);
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
const TemplateArgument &Arg1,
const TemplateArgument &Arg2);
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ const TemplateArgumentLoc &Arg1,
+ const TemplateArgumentLoc &Arg2);
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
NestedNameSpecifier *NNS1,
NestedNameSpecifier *NNS2);
static bool IsStructurallyEquivalent(const IdentifierInfo *Name1,
@@ -212,6 +216,15 @@ class StmtComparer {
return E1->size() == E2->size();
}
+ bool IsStmtEquivalent(const DeclRefExpr *DRE1, const DeclRefExpr *DRE2) {
+ const ValueDecl *Decl1 = DRE1->getDecl();
+ const ValueDecl *Decl2 = DRE2->getDecl();
+ if (!Decl1 || !Decl2)
+ return false;
+ return IsStructurallyEquivalent(Context, const_cast<ValueDecl *>(Decl1),
+ const_cast<ValueDecl *>(Decl2));
+ }
+
bool IsStmtEquivalent(const DependentScopeDeclRefExpr *DE1,
const DependentScopeDeclRefExpr *DE2) {
if (!IsStructurallyEquivalent(Context, DE1->getDeclName(),
@@ -238,8 +251,8 @@ class StmtComparer {
const GenericSelectionExpr *E2) {
for (auto Pair : zip_longest(E1->getAssocTypeSourceInfos(),
E2->getAssocTypeSourceInfos())) {
- Optional<TypeSourceInfo *> Child1 = std::get<0>(Pair);
- Optional<TypeSourceInfo *> Child2 = std::get<1>(Pair);
+ std::optional<TypeSourceInfo *> Child1 = std::get<0>(Pair);
+ std::optional<TypeSourceInfo *> Child2 = std::get<1>(Pair);
// Skip this case if there are a different number of associated types.
if (!Child1 || !Child2)
return false;
@@ -275,6 +288,17 @@ class StmtComparer {
bool IsStmtEquivalent(const Stmt *S1, const Stmt *S2) { return true; }
+ bool IsStmtEquivalent(const GotoStmt *S1, const GotoStmt *S2) {
+ LabelDecl *L1 = S1->getLabel();
+ LabelDecl *L2 = S2->getLabel();
+ if (!L1 || !L2)
+ return L1 == L2;
+
+ IdentifierInfo *Name1 = L1->getIdentifier();
+ IdentifierInfo *Name2 = L2->getIdentifier();
+ return ::IsStructurallyEquivalent(Name1, Name2);
+ }
+
bool IsStmtEquivalent(const SourceLocExpr *E1, const SourceLocExpr *E2) {
return E1->getIdentKind() == E2->getIdentKind();
}
@@ -289,8 +313,14 @@ class StmtComparer {
bool IsStmtEquivalent(const SubstNonTypeTemplateParmExpr *E1,
const SubstNonTypeTemplateParmExpr *E2) {
- return IsStructurallyEquivalent(Context, E1->getParameter(),
- E2->getParameter());
+ if (!IsStructurallyEquivalent(Context, E1->getAssociatedDecl(),
+ E2->getAssociatedDecl()))
+ return false;
+ if (E1->getIndex() != E2->getIndex())
+ return false;
+ if (E1->getPackIndex() != E2->getPackIndex())
+ return false;
+ return true;
}
bool IsStmtEquivalent(const SubstNonTypeTemplateParmPackExpr *E1,
@@ -304,8 +334,8 @@ class StmtComparer {
return false;
for (auto Pair : zip_longest(E1->getArgs(), E2->getArgs())) {
- Optional<TypeSourceInfo *> Child1 = std::get<0>(Pair);
- Optional<TypeSourceInfo *> Child2 = std::get<1>(Pair);
+ std::optional<TypeSourceInfo *> Child1 = std::get<0>(Pair);
+ std::optional<TypeSourceInfo *> Child2 = std::get<1>(Pair);
// Different number of args.
if (!Child1 || !Child2)
return false;
@@ -334,6 +364,34 @@ class StmtComparer {
return true;
}
+ bool IsStmtEquivalent(const OverloadExpr *E1, const OverloadExpr *E2) {
+ if (!IsStructurallyEquivalent(Context, E1->getName(), E2->getName()))
+ return false;
+
+ if (static_cast<bool>(E1->getQualifier()) !=
+ static_cast<bool>(E2->getQualifier()))
+ return false;
+ if (E1->getQualifier() &&
+ !IsStructurallyEquivalent(Context, E1->getQualifier(),
+ E2->getQualifier()))
+ return false;
+
+ if (E1->getNumTemplateArgs() != E2->getNumTemplateArgs())
+ return false;
+ const TemplateArgumentLoc *Args1 = E1->getTemplateArgs();
+ const TemplateArgumentLoc *Args2 = E2->getTemplateArgs();
+ for (unsigned int ArgI = 0, ArgN = E1->getNumTemplateArgs(); ArgI < ArgN;
+ ++ArgI)
+ if (!IsStructurallyEquivalent(Context, Args1[ArgI], Args2[ArgI]))
+ return false;
+
+ return true;
+ }
+
+ bool IsStmtEquivalent(const CXXBoolLiteralExpr *E1, const CXXBoolLiteralExpr *E2) {
+ return E1->getValue() == E2->getValue();
+ }
+
/// End point of the traversal chain.
bool TraverseStmt(const Stmt *S1, const Stmt *S2) { return true; }
@@ -381,12 +439,67 @@ public:
};
} // namespace
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ const UnaryOperator *E1,
+ const CXXOperatorCallExpr *E2) {
+ return UnaryOperator::getOverloadedOperator(E1->getOpcode()) ==
+ E2->getOperator() &&
+ IsStructurallyEquivalent(Context, E1->getSubExpr(), E2->getArg(0));
+}
+
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ const CXXOperatorCallExpr *E1,
+ const UnaryOperator *E2) {
+ return E1->getOperator() ==
+ UnaryOperator::getOverloadedOperator(E2->getOpcode()) &&
+ IsStructurallyEquivalent(Context, E1->getArg(0), E2->getSubExpr());
+}
+
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ const BinaryOperator *E1,
+ const CXXOperatorCallExpr *E2) {
+ return BinaryOperator::getOverloadedOperator(E1->getOpcode()) ==
+ E2->getOperator() &&
+ IsStructurallyEquivalent(Context, E1->getLHS(), E2->getArg(0)) &&
+ IsStructurallyEquivalent(Context, E1->getRHS(), E2->getArg(1));
+}
+
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ const CXXOperatorCallExpr *E1,
+ const BinaryOperator *E2) {
+ return E1->getOperator() ==
+ BinaryOperator::getOverloadedOperator(E2->getOpcode()) &&
+ IsStructurallyEquivalent(Context, E1->getArg(0), E2->getLHS()) &&
+ IsStructurallyEquivalent(Context, E1->getArg(1), E2->getRHS());
+}
+
/// Determine structural equivalence of two statements.
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
const Stmt *S1, const Stmt *S2) {
if (!S1 || !S2)
return S1 == S2;
+ // Check for statements with similar syntax but different AST.
+ // A UnaryOperator node is more lightweight than a CXXOperatorCallExpr node.
+ // The more heavyweight node is only created if the definition-time name
+ // lookup had any results. The lookup results are stored CXXOperatorCallExpr
+ // only. The lookup results can be different in a "From" and "To" AST even if
+ // the compared structure is otherwise equivalent. For this reason we must
+ // treat a similar unary/binary operator node and CXXOperatorCall node as
+ // equivalent.
+ if (const auto *E2CXXOperatorCall = dyn_cast<CXXOperatorCallExpr>(S2)) {
+ if (const auto *E1Unary = dyn_cast<UnaryOperator>(S1))
+ return IsStructurallyEquivalent(Context, E1Unary, E2CXXOperatorCall);
+ if (const auto *E1Binary = dyn_cast<BinaryOperator>(S1))
+ return IsStructurallyEquivalent(Context, E1Binary, E2CXXOperatorCall);
+ }
+ if (const auto *E1CXXOperatorCall = dyn_cast<CXXOperatorCallExpr>(S1)) {
+ if (const auto *E2Unary = dyn_cast<UnaryOperator>(S2))
+ return IsStructurallyEquivalent(Context, E1CXXOperatorCall, E2Unary);
+ if (const auto *E2Binary = dyn_cast<BinaryOperator>(S2))
+ return IsStructurallyEquivalent(Context, E1CXXOperatorCall, E2Binary);
+ }
+
// Compare the statements itself.
StmtComparer Comparer(Context);
if (!Comparer.IsEquivalent(S1, S2))
@@ -394,8 +507,8 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
// Iterate over the children of both statements and also compare them.
for (auto Pair : zip_longest(S1->children(), S2->children())) {
- Optional<const Stmt *> Child1 = std::get<0>(Pair);
- Optional<const Stmt *> Child2 = std::get<1>(Pair);
+ std::optional<const Stmt *> Child1 = std::get<0>(Pair);
+ std::optional<const Stmt *> Child2 = std::get<1>(Pair);
// One of the statements has a different amount of children than the other,
// so the statements can't be equivalent.
if (!Child1 || !Child2)
@@ -510,13 +623,15 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
*P2 = N2.getAsSubstTemplateTemplateParmPack();
return IsStructurallyEquivalent(Context, P1->getArgumentPack(),
P2->getArgumentPack()) &&
- IsStructurallyEquivalent(Context, P1->getParameterPack(),
- P2->getParameterPack());
+ IsStructurallyEquivalent(Context, P1->getAssociatedDecl(),
+ P2->getAssociatedDecl()) &&
+ P1->getIndex() == P2->getIndex();
}
case TemplateName::Template:
case TemplateName::QualifiedTemplate:
case TemplateName::SubstTemplateTemplateParm:
+ case TemplateName::UsingTemplate:
// It is sufficient to check value of getAsTemplateDecl.
break;
@@ -525,6 +640,10 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
return true;
}
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ ArrayRef<TemplateArgument> Args1,
+ ArrayRef<TemplateArgument> Args2);
+
/// Determine whether two template arguments are equivalent.
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
const TemplateArgument &Arg1,
@@ -566,19 +685,36 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
return IsStructurallyEquivalent(Context, Arg1.getAsExpr(),
Arg2.getAsExpr());
+ case TemplateArgument::StructuralValue:
+ return Arg1.structurallyEquals(Arg2);
+
case TemplateArgument::Pack:
- if (Arg1.pack_size() != Arg2.pack_size())
- return false;
+ return IsStructurallyEquivalent(Context, Arg1.pack_elements(),
+ Arg2.pack_elements());
+ }
- for (unsigned I = 0, N = Arg1.pack_size(); I != N; ++I)
- if (!IsStructurallyEquivalent(Context, Arg1.pack_begin()[I],
- Arg2.pack_begin()[I]))
- return false;
+ llvm_unreachable("Invalid template argument kind");
+}
- return true;
+/// Determine structural equivalence of two template argument lists.
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ ArrayRef<TemplateArgument> Args1,
+ ArrayRef<TemplateArgument> Args2) {
+ if (Args1.size() != Args2.size())
+ return false;
+ for (unsigned I = 0, N = Args1.size(); I != N; ++I) {
+ if (!IsStructurallyEquivalent(Context, Args1[I], Args2[I]))
+ return false;
}
+ return true;
+}
- llvm_unreachable("Invalid template argument kind");
+/// Determine whether two template argument locations are equivalent.
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ const TemplateArgumentLoc &Arg1,
+ const TemplateArgumentLoc &Arg2) {
+ return IsStructurallyEquivalent(Context, Arg1.getArgument(),
+ Arg2.getArgument());
}
/// Determine structural equivalence for the common part of array
@@ -899,7 +1035,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
return false;
// Fall through to check the bits common with FunctionNoProtoType.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case Type::FunctionNoProto: {
@@ -932,6 +1068,13 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
return false;
break;
+ case Type::BTFTagAttributed:
+ if (!IsStructurallyEquivalent(
+ Context, cast<BTFTagAttributedType>(T1)->getWrappedType(),
+ cast<BTFTagAttributedType>(T2)->getWrappedType()))
+ return false;
+ break;
+
case Type::Paren:
if (!IsStructurallyEquivalent(Context, cast<ParenType>(T1)->getInnerType(),
cast<ParenType>(T2)->getInnerType()))
@@ -945,9 +1088,21 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
return false;
break;
+ case Type::Using:
+ if (!IsStructurallyEquivalent(Context, cast<UsingType>(T1)->getFoundDecl(),
+ cast<UsingType>(T2)->getFoundDecl()))
+ return false;
+ if (!IsStructurallyEquivalent(Context,
+ cast<UsingType>(T1)->getUnderlyingType(),
+ cast<UsingType>(T2)->getUnderlyingType()))
+ return false;
+ break;
+
case Type::Typedef:
if (!IsStructurallyEquivalent(Context, cast<TypedefType>(T1)->getDecl(),
- cast<TypedefType>(T2)->getDecl()))
+ cast<TypedefType>(T2)->getDecl()) ||
+ !IsStructurallyEquivalent(Context, cast<TypedefType>(T1)->desugar(),
+ cast<TypedefType>(T2)->desugar()))
return false;
break;
@@ -960,8 +1115,8 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
case Type::TypeOf:
if (!IsStructurallyEquivalent(Context,
- cast<TypeOfType>(T1)->getUnderlyingType(),
- cast<TypeOfType>(T2)->getUnderlyingType()))
+ cast<TypeOfType>(T1)->getUnmodifiedType(),
+ cast<TypeOfType>(T2)->getUnmodifiedType()))
return false;
break;
@@ -991,16 +1146,10 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
if (Auto1->getTypeConstraintConcept() !=
Auto2->getTypeConstraintConcept())
return false;
- ArrayRef<TemplateArgument> Auto1Args =
- Auto1->getTypeConstraintArguments();
- ArrayRef<TemplateArgument> Auto2Args =
- Auto2->getTypeConstraintArguments();
- if (Auto1Args.size() != Auto2Args.size())
+ if (!IsStructurallyEquivalent(Context,
+ Auto1->getTypeConstraintArguments(),
+ Auto2->getTypeConstraintArguments()))
return false;
- for (unsigned I = 0, N = Auto1Args.size(); I != N; ++I) {
- if (!IsStructurallyEquivalent(Context, Auto1Args[I], Auto2Args[I]))
- return false;
- }
}
break;
}
@@ -1027,7 +1176,8 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
case Type::TemplateTypeParm: {
const auto *Parm1 = cast<TemplateTypeParmType>(T1);
const auto *Parm2 = cast<TemplateTypeParmType>(T2);
- if (Parm1->getDepth() != Parm2->getDepth())
+ if (!Context.IgnoreTemplateParmDepth &&
+ Parm1->getDepth() != Parm2->getDepth())
return false;
if (Parm1->getIndex() != Parm2->getIndex())
return false;
@@ -1041,22 +1191,26 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
case Type::SubstTemplateTypeParm: {
const auto *Subst1 = cast<SubstTemplateTypeParmType>(T1);
const auto *Subst2 = cast<SubstTemplateTypeParmType>(T2);
- if (!IsStructurallyEquivalent(Context,
- QualType(Subst1->getReplacedParameter(), 0),
- QualType(Subst2->getReplacedParameter(), 0)))
- return false;
if (!IsStructurallyEquivalent(Context, Subst1->getReplacementType(),
Subst2->getReplacementType()))
return false;
+ if (!IsStructurallyEquivalent(Context, Subst1->getAssociatedDecl(),
+ Subst2->getAssociatedDecl()))
+ return false;
+ if (Subst1->getIndex() != Subst2->getIndex())
+ return false;
+ if (Subst1->getPackIndex() != Subst2->getPackIndex())
+ return false;
break;
}
case Type::SubstTemplateTypeParmPack: {
const auto *Subst1 = cast<SubstTemplateTypeParmPackType>(T1);
const auto *Subst2 = cast<SubstTemplateTypeParmPackType>(T2);
- if (!IsStructurallyEquivalent(Context,
- QualType(Subst1->getReplacedParameter(), 0),
- QualType(Subst2->getReplacedParameter(), 0)))
+ if (!IsStructurallyEquivalent(Context, Subst1->getAssociatedDecl(),
+ Subst2->getAssociatedDecl()))
+ return false;
+ if (Subst1->getIndex() != Subst2->getIndex())
return false;
if (!IsStructurallyEquivalent(Context, Subst1->getArgumentPack(),
Subst2->getArgumentPack()))
@@ -1070,20 +1224,18 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
if (!IsStructurallyEquivalent(Context, Spec1->getTemplateName(),
Spec2->getTemplateName()))
return false;
- if (Spec1->getNumArgs() != Spec2->getNumArgs())
+ if (!IsStructurallyEquivalent(Context, Spec1->template_arguments(),
+ Spec2->template_arguments()))
return false;
- for (unsigned I = 0, N = Spec1->getNumArgs(); I != N; ++I) {
- if (!IsStructurallyEquivalent(Context, Spec1->getArg(I),
- Spec2->getArg(I)))
- return false;
- }
break;
}
case Type::Elaborated: {
const auto *Elab1 = cast<ElaboratedType>(T1);
const auto *Elab2 = cast<ElaboratedType>(T2);
- // CHECKME: what if a keyword is ETK_None or ETK_typename ?
+ // CHECKME: what if a keyword is ElaboratedTypeKeyword::None or
+ // ElaboratedTypeKeyword::Typename
+ // ?
if (Elab1->getKeyword() != Elab2->getKeyword())
return false;
if (!IsStructurallyEquivalent(Context, Elab1->getQualifier(),
@@ -1127,13 +1279,9 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
if (!IsStructurallyEquivalent(Spec1->getIdentifier(),
Spec2->getIdentifier()))
return false;
- if (Spec1->getNumArgs() != Spec2->getNumArgs())
+ if (!IsStructurallyEquivalent(Context, Spec1->template_arguments(),
+ Spec2->template_arguments()))
return false;
- for (unsigned I = 0, N = Spec1->getNumArgs(); I != N; ++I) {
- if (!IsStructurallyEquivalent(Context, Spec1->getArg(I),
- Spec2->getArg(I)))
- return false;
- }
break;
}
@@ -1205,33 +1353,50 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
cast<PipeType>(T2)->getElementType()))
return false;
break;
- case Type::ExtInt: {
- const auto *Int1 = cast<ExtIntType>(T1);
- const auto *Int2 = cast<ExtIntType>(T2);
+ case Type::BitInt: {
+ const auto *Int1 = cast<BitIntType>(T1);
+ const auto *Int2 = cast<BitIntType>(T2);
if (Int1->isUnsigned() != Int2->isUnsigned() ||
Int1->getNumBits() != Int2->getNumBits())
return false;
break;
}
- case Type::DependentExtInt: {
- const auto *Int1 = cast<DependentExtIntType>(T1);
- const auto *Int2 = cast<DependentExtIntType>(T2);
+ case Type::DependentBitInt: {
+ const auto *Int1 = cast<DependentBitIntType>(T1);
+ const auto *Int2 = cast<DependentBitIntType>(T2);
if (Int1->isUnsigned() != Int2->isUnsigned() ||
!IsStructurallyEquivalent(Context, Int1->getNumBitsExpr(),
Int2->getNumBitsExpr()))
return false;
+ break;
}
} // end switch
return true;
}
-/// Determine structural equivalence of two fields.
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
- FieldDecl *Field1, FieldDecl *Field2) {
- const auto *Owner2 = cast<RecordDecl>(Field2->getDeclContext());
+ VarDecl *D1, VarDecl *D2) {
+ if (D1->getStorageClass() != D2->getStorageClass())
+ return false;
+
+ IdentifierInfo *Name1 = D1->getIdentifier();
+ IdentifierInfo *Name2 = D2->getIdentifier();
+ if (!::IsStructurallyEquivalent(Name1, Name2))
+ return false;
+
+ if (!IsStructurallyEquivalent(Context, D1->getType(), D2->getType()))
+ return false;
+
+ return IsStructurallyEquivalent(Context, D1->getInit(), D2->getInit());
+}
+
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ FieldDecl *Field1, FieldDecl *Field2,
+ QualType Owner2Type) {
+ const auto *Owner2 = cast<Decl>(Field2->getDeclContext());
// For anonymous structs/unions, match up the anonymous struct/union type
// declarations directly, so that we don't go off searching for anonymous
@@ -1251,7 +1416,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
Context.Diag2(
Owner2->getLocation(),
Context.getApplicableDiagnostic(diag::err_odr_tag_type_inconsistent))
- << Context.ToCtx.getTypeDeclType(Owner2);
+ << Owner2Type;
Context.Diag2(Field2->getLocation(), diag::note_odr_field_name)
<< Field2->getDeclName();
Context.Diag1(Field1->getLocation(), diag::note_odr_field_name)
@@ -1266,7 +1431,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
Context.Diag2(
Owner2->getLocation(),
Context.getApplicableDiagnostic(diag::err_odr_tag_type_inconsistent))
- << Context.ToCtx.getTypeDeclType(Owner2);
+ << Owner2Type;
Context.Diag2(Field2->getLocation(), diag::note_odr_field)
<< Field2->getDeclName() << Field2->getType();
Context.Diag1(Field1->getLocation(), diag::note_odr_field)
@@ -1282,6 +1447,14 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
return true;
}
+/// Determine structural equivalence of two fields.
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ FieldDecl *Field1, FieldDecl *Field2) {
+ const auto *Owner2 = cast<RecordDecl>(Field2->getDeclContext());
+ return IsStructurallyEquivalent(Context, Field1, Field2,
+ Context.ToCtx.getTypeDeclType(Owner2));
+}
+
/// Determine structural equivalence of two methods.
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
CXXMethodDecl *Method1,
@@ -1292,10 +1465,12 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
Method1->getAccess() == Method2->getAccess() &&
Method1->getOverloadedOperator() == Method2->getOverloadedOperator() &&
Method1->isStatic() == Method2->isStatic() &&
+ Method1->isImplicitObjectMemberFunction() ==
+ Method2->isImplicitObjectMemberFunction() &&
Method1->isConst() == Method2->isConst() &&
Method1->isVolatile() == Method2->isVolatile() &&
Method1->isVirtual() == Method2->isVirtual() &&
- Method1->isPure() == Method2->isPure() &&
+ Method1->isPureVirtual() == Method2->isPureVirtual() &&
Method1->isDefaulted() == Method2->isDefaulted() &&
Method1->isDeleted() == Method2->isDeleted();
if (!PropertiesEqual)
@@ -1347,19 +1522,66 @@ IsStructurallyEquivalentLambdas(StructuralEquivalenceContext &Context,
return true;
}
+/// Determine if context of a class is equivalent.
+static bool
+IsRecordContextStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ RecordDecl *D1, RecordDecl *D2) {
+ // The context should be completely equal, including anonymous and inline
+ // namespaces.
+ // We compare objects as part of full translation units, not subtrees of
+ // translation units.
+ DeclContext *DC1 = D1->getDeclContext()->getNonTransparentContext();
+ DeclContext *DC2 = D2->getDeclContext()->getNonTransparentContext();
+ while (true) {
+ // Special case: We allow a struct defined in a function to be equivalent
+ // with a similar struct defined outside of a function.
+ if ((DC1->isFunctionOrMethod() && DC2->isTranslationUnit()) ||
+ (DC2->isFunctionOrMethod() && DC1->isTranslationUnit()))
+ return true;
+
+ if (DC1->getDeclKind() != DC2->getDeclKind())
+ return false;
+ if (DC1->isTranslationUnit())
+ break;
+ if (DC1->isInlineNamespace() != DC2->isInlineNamespace())
+ return false;
+ if (const auto *ND1 = dyn_cast<NamedDecl>(DC1)) {
+ const auto *ND2 = cast<NamedDecl>(DC2);
+ if (!DC1->isInlineNamespace() &&
+ !IsStructurallyEquivalent(ND1->getIdentifier(), ND2->getIdentifier()))
+ return false;
+ }
+
+ if (auto *D1Spec = dyn_cast<ClassTemplateSpecializationDecl>(DC1)) {
+ auto *D2Spec = dyn_cast<ClassTemplateSpecializationDecl>(DC2);
+ if (!IsStructurallyEquivalent(Context, D1Spec, D2Spec))
+ return false;
+ }
+
+ DC1 = DC1->getParent()->getNonTransparentContext();
+ DC2 = DC2->getParent()->getNonTransparentContext();
+ }
+
+ return true;
+}
+
+static bool NameIsStructurallyEquivalent(const TagDecl &D1, const TagDecl &D2) {
+ auto GetName = [](const TagDecl &D) -> const IdentifierInfo * {
+ if (const IdentifierInfo *Name = D.getIdentifier())
+ return Name;
+ if (const TypedefNameDecl *TypedefName = D.getTypedefNameForAnonDecl())
+ return TypedefName->getIdentifier();
+ return nullptr;
+ };
+ return IsStructurallyEquivalent(GetName(D1), GetName(D2));
+}
+
/// Determine structural equivalence of two records.
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
RecordDecl *D1, RecordDecl *D2) {
-
- // Check for equivalent structure names.
- IdentifierInfo *Name1 = D1->getIdentifier();
- if (!Name1 && D1->getTypedefNameForAnonDecl())
- Name1 = D1->getTypedefNameForAnonDecl()->getIdentifier();
- IdentifierInfo *Name2 = D2->getIdentifier();
- if (!Name2 && D2->getTypedefNameForAnonDecl())
- Name2 = D2->getTypedefNameForAnonDecl()->getIdentifier();
- if (!IsStructurallyEquivalent(Name1, Name2))
+ if (!NameIsStructurallyEquivalent(*D1, *D2)) {
return false;
+ }
if (D1->isUnion() != D2->isUnion()) {
if (Context.Complain) {
@@ -1375,9 +1597,9 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
if (!D1->getDeclName() && !D2->getDeclName()) {
// If both anonymous structs/unions are in a record context, make sure
// they occur in the same location in the context records.
- if (Optional<unsigned> Index1 =
+ if (std::optional<unsigned> Index1 =
StructuralEquivalenceContext::findUntaggedStructOrUnionIndex(D1)) {
- if (Optional<unsigned> Index2 =
+ if (std::optional<unsigned> Index2 =
StructuralEquivalenceContext::findUntaggedStructOrUnionIndex(
D2)) {
if (*Index1 != *Index2)
@@ -1386,6 +1608,12 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
}
}
+ // If the records occur in different context (namespace), these should be
+ // different. This is specially important if the definition of one or both
+ // records is missing.
+ if (!IsRecordContextStructurallyEquivalent(Context, D1, D2))
+ return false;
+
// If both declarations are class template specializations, we know
// the ODR applies, so check the template and template arguments.
const auto *Spec1 = dyn_cast<ClassTemplateSpecializationDecl>(D1);
@@ -1554,6 +1782,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
}
// Check the fields for consistency.
+ QualType D2Type = Context.ToCtx.getTypeDeclType(D2);
RecordDecl::field_iterator Field2 = D2->field_begin(),
Field2End = D2->field_end();
for (RecordDecl::field_iterator Field1 = D1->field_begin(),
@@ -1572,7 +1801,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
return false;
}
- if (!IsStructurallyEquivalent(Context, *Field1, *Field2))
+ if (!IsStructurallyEquivalent(Context, *Field1, *Field2, D2Type))
return false;
}
@@ -1591,19 +1820,32 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
return true;
}
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ EnumConstantDecl *D1,
+ EnumConstantDecl *D2) {
+ const llvm::APSInt &FromVal = D1->getInitVal();
+ const llvm::APSInt &ToVal = D2->getInitVal();
+ if (FromVal.isSigned() != ToVal.isSigned())
+ return false;
+ if (FromVal.getBitWidth() != ToVal.getBitWidth())
+ return false;
+ if (FromVal != ToVal)
+ return false;
+
+ if (!IsStructurallyEquivalent(D1->getIdentifier(), D2->getIdentifier()))
+ return false;
+
+ // Init expressions are the most expensive check, so do them last.
+ return IsStructurallyEquivalent(Context, D1->getInitExpr(),
+ D2->getInitExpr());
+}
+
/// Determine structural equivalence of two enums.
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
EnumDecl *D1, EnumDecl *D2) {
-
- // Check for equivalent enum names.
- IdentifierInfo *Name1 = D1->getIdentifier();
- if (!Name1 && D1->getTypedefNameForAnonDecl())
- Name1 = D1->getTypedefNameForAnonDecl()->getIdentifier();
- IdentifierInfo *Name2 = D2->getIdentifier();
- if (!Name2 && D2->getTypedefNameForAnonDecl())
- Name2 = D2->getTypedefNameForAnonDecl()->getIdentifier();
- if (!IsStructurallyEquivalent(Name1, Name2))
+ if (!NameIsStructurallyEquivalent(*D1, *D2)) {
return false;
+ }
// Compare the definitions of these two enums. If either or both are
// incomplete (i.e. forward declared), we assume that they are equivalent.
@@ -1803,6 +2045,18 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
}
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ TypeAliasTemplateDecl *D1,
+ TypeAliasTemplateDecl *D2) {
+ // Check template parameters.
+ if (!IsTemplateDeclCommonStructurallyEquivalent(Context, D1, D2))
+ return false;
+
+ // Check the templated declaration.
+ return IsStructurallyEquivalent(Context, D1->getTemplatedDecl(),
+ D2->getTemplatedDecl());
+}
+
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
ConceptDecl *D1,
ConceptDecl *D2) {
// Check template parameters.
@@ -1858,6 +2112,132 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
return true;
}
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ ObjCIvarDecl *D1, ObjCIvarDecl *D2,
+ QualType Owner2Type) {
+ if (D1->getAccessControl() != D2->getAccessControl())
+ return false;
+
+ return IsStructurallyEquivalent(Context, cast<FieldDecl>(D1),
+ cast<FieldDecl>(D2), Owner2Type);
+}
+
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ ObjCIvarDecl *D1, ObjCIvarDecl *D2) {
+ QualType Owner2Type =
+ Context.ToCtx.getObjCInterfaceType(D2->getContainingInterface());
+ return IsStructurallyEquivalent(Context, D1, D2, Owner2Type);
+}
+
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ ObjCMethodDecl *Method1,
+ ObjCMethodDecl *Method2) {
+ bool PropertiesEqual =
+ Method1->isInstanceMethod() == Method2->isInstanceMethod() &&
+ Method1->isVariadic() == Method2->isVariadic() &&
+ Method1->isDirectMethod() == Method2->isDirectMethod();
+ if (!PropertiesEqual)
+ return false;
+
+ // Compare selector slot names.
+ Selector Selector1 = Method1->getSelector(),
+ Selector2 = Method2->getSelector();
+ unsigned NumArgs = Selector1.getNumArgs();
+ if (NumArgs != Selector2.getNumArgs())
+ return false;
+ // Compare all selector slots. For selectors with arguments it means all arg
+ // slots. And if there are no arguments, compare the first-and-only slot.
+ unsigned SlotsToCheck = NumArgs > 0 ? NumArgs : 1;
+ for (unsigned I = 0; I < SlotsToCheck; ++I) {
+ if (!IsStructurallyEquivalent(Selector1.getIdentifierInfoForSlot(I),
+ Selector2.getIdentifierInfoForSlot(I)))
+ return false;
+ }
+
+ // Compare types.
+ if (!IsStructurallyEquivalent(Context, Method1->getReturnType(),
+ Method2->getReturnType()))
+ return false;
+ assert(
+ Method1->param_size() == Method2->param_size() &&
+ "Same number of arguments should be already enforced in Selector checks");
+ for (ObjCMethodDecl::param_type_iterator
+ ParamT1 = Method1->param_type_begin(),
+ ParamT1End = Method1->param_type_end(),
+ ParamT2 = Method2->param_type_begin(),
+ ParamT2End = Method2->param_type_end();
+ (ParamT1 != ParamT1End) && (ParamT2 != ParamT2End);
+ ++ParamT1, ++ParamT2) {
+ if (!IsStructurallyEquivalent(Context, *ParamT1, *ParamT2))
+ return false;
+ }
+
+ return true;
+}
+
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ ObjCCategoryDecl *D1,
+ ObjCCategoryDecl *D2) {
+ if (!IsStructurallyEquivalent(D1->getIdentifier(), D2->getIdentifier()))
+ return false;
+
+ const ObjCInterfaceDecl *Intf1 = D1->getClassInterface(),
+ *Intf2 = D2->getClassInterface();
+ if ((!Intf1 || !Intf2) && (Intf1 != Intf2))
+ return false;
+
+ if (Intf1 &&
+ !IsStructurallyEquivalent(Intf1->getIdentifier(), Intf2->getIdentifier()))
+ return false;
+
+ // Compare protocols.
+ ObjCCategoryDecl::protocol_iterator Protocol2 = D2->protocol_begin(),
+ Protocol2End = D2->protocol_end();
+ for (ObjCCategoryDecl::protocol_iterator Protocol1 = D1->protocol_begin(),
+ Protocol1End = D1->protocol_end();
+ Protocol1 != Protocol1End; ++Protocol1, ++Protocol2) {
+ if (Protocol2 == Protocol2End)
+ return false;
+ if (!IsStructurallyEquivalent((*Protocol1)->getIdentifier(),
+ (*Protocol2)->getIdentifier()))
+ return false;
+ }
+ if (Protocol2 != Protocol2End)
+ return false;
+
+ // Compare ivars.
+ QualType D2Type =
+ Intf2 ? Context.ToCtx.getObjCInterfaceType(Intf2) : QualType();
+ ObjCCategoryDecl::ivar_iterator Ivar2 = D2->ivar_begin(),
+ Ivar2End = D2->ivar_end();
+ for (ObjCCategoryDecl::ivar_iterator Ivar1 = D1->ivar_begin(),
+ Ivar1End = D1->ivar_end();
+ Ivar1 != Ivar1End; ++Ivar1, ++Ivar2) {
+ if (Ivar2 == Ivar2End)
+ return false;
+ if (!IsStructurallyEquivalent(Context, *Ivar1, *Ivar2, D2Type))
+ return false;
+ }
+ if (Ivar2 != Ivar2End)
+ return false;
+
+ // Compare methods.
+ ObjCCategoryDecl::method_iterator Method2 = D2->meth_begin(),
+ Method2End = D2->meth_end();
+ for (ObjCCategoryDecl::method_iterator Method1 = D1->meth_begin(),
+ Method1End = D1->meth_end();
+ Method1 != Method1End; ++Method1, ++Method2) {
+ if (Method2 == Method2End)
+ return false;
+ if (!IsStructurallyEquivalent(Context, *Method1, *Method2))
+ return false;
+ }
+ if (Method2 != Method2End)
+ return false;
+
+ return true;
+}
+
/// Determine structural equivalence of two declarations.
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
Decl *D1, Decl *D2) {
@@ -1902,14 +2282,14 @@ DiagnosticBuilder StructuralEquivalenceContext::Diag2(SourceLocation Loc,
return ToCtx.getDiagnostics().Report(Loc, DiagID);
}
-Optional<unsigned>
+std::optional<unsigned>
StructuralEquivalenceContext::findUntaggedStructOrUnionIndex(RecordDecl *Anon) {
ASTContext &Context = Anon->getASTContext();
QualType AnonTy = Context.getRecordType(Anon);
const auto *Owner = dyn_cast<RecordDecl>(Anon->getDeclContext());
if (!Owner)
- return None;
+ return std::nullopt;
unsigned Index = 0;
for (const auto *D : Owner->noload_decls()) {
diff --git a/contrib/llvm-project/clang/lib/AST/ASTTypeTraits.cpp b/contrib/llvm-project/clang/lib/AST/ASTTypeTraits.cpp
index 4a033bf50bd4..4c7496c699be 100644
--- a/contrib/llvm-project/clang/lib/AST/ASTTypeTraits.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ASTTypeTraits.cpp
@@ -13,10 +13,14 @@
//===----------------------------------------------------------------------===//
#include "clang/AST/ASTTypeTraits.h"
+#include "clang/AST/ASTConcept.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/Attr.h"
#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/OpenMPClause.h"
+#include "clang/AST/TypeLoc.h"
using namespace clang;
@@ -24,9 +28,12 @@ const ASTNodeKind::KindInfo ASTNodeKind::AllKindInfo[] = {
{NKI_None, "<None>"},
{NKI_None, "TemplateArgument"},
{NKI_None, "TemplateArgumentLoc"},
+ {NKI_None, "LambdaCapture"},
{NKI_None, "TemplateName"},
{NKI_None, "NestedNameSpecifierLoc"},
{NKI_None, "QualType"},
+#define TYPELOC(CLASS, PARENT) {NKI_##PARENT, #CLASS "TypeLoc"},
+#include "clang/AST/TypeLocNodes.def"
{NKI_None, "TypeLoc"},
{NKI_None, "CXXBaseSpecifier"},
{NKI_None, "CXXCtorInitializer"},
@@ -44,12 +51,30 @@ const ASTNodeKind::KindInfo ASTNodeKind::AllKindInfo[] = {
#define GEN_CLANG_CLAUSE_CLASS
#define CLAUSE_CLASS(Enum, Str, Class) {NKI_OMPClause, #Class},
#include "llvm/Frontend/OpenMP/OMP.inc"
+ {NKI_None, "Attr"},
+#define ATTR(A) {NKI_Attr, #A "Attr"},
+#include "clang/Basic/AttrList.inc"
+ {NKI_None, "ObjCProtocolLoc"},
+ {NKI_None, "ConceptReference"},
};
+bool ASTNodeKind::isBaseOf(ASTNodeKind Other) const {
+ return isBaseOf(KindId, Other.KindId);
+}
+
bool ASTNodeKind::isBaseOf(ASTNodeKind Other, unsigned *Distance) const {
return isBaseOf(KindId, Other.KindId, Distance);
}
+bool ASTNodeKind::isBaseOf(NodeKindId Base, NodeKindId Derived) {
+ if (Base == NKI_None || Derived == NKI_None)
+ return false;
+ while (Derived != Base && Derived != NKI_None) {
+ Derived = AllKindInfo[Derived].ParentId;
+ }
+ return Derived == Base;
+}
+
bool ASTNodeKind::isBaseOf(NodeKindId Base, NodeKindId Derived,
unsigned *Distance) {
if (Base == NKI_None || Derived == NKI_None) return false;
@@ -86,7 +111,7 @@ ASTNodeKind ASTNodeKind::getMostDerivedType(ASTNodeKind Kind1,
ASTNodeKind ASTNodeKind::getMostDerivedCommonAncestor(ASTNodeKind Kind1,
ASTNodeKind Kind2) {
NodeKindId Parent = Kind1.KindId;
- while (!isBaseOf(Parent, Kind2.KindId, nullptr) && Parent != NKI_None) {
+ while (!isBaseOf(Parent, Kind2.KindId) && Parent != NKI_None) {
Parent = AllKindInfo[Parent].ParentId;
}
return ASTNodeKind(Parent);
@@ -123,6 +148,17 @@ ASTNodeKind ASTNodeKind::getFromNode(const Type &T) {
llvm_unreachable("invalid type kind");
}
+ ASTNodeKind ASTNodeKind::getFromNode(const TypeLoc &T) {
+ switch (T.getTypeLocClass()) {
+#define ABSTRACT_TYPELOC(CLASS, PARENT)
+#define TYPELOC(CLASS, PARENT) \
+ case TypeLoc::CLASS: \
+ return ASTNodeKind(NKI_##CLASS##TypeLoc);
+#include "clang/AST/TypeLocNodes.def"
+ }
+ llvm_unreachable("invalid typeloc kind");
+ }
+
ASTNodeKind ASTNodeKind::getFromNode(const OMPClause &C) {
switch (C.getClauseKind()) {
#define GEN_CLANG_CLAUSE_CLASS
@@ -134,7 +170,17 @@ ASTNodeKind ASTNodeKind::getFromNode(const OMPClause &C) {
llvm_unreachable("unexpected OpenMP clause kind");
#include "llvm/Frontend/OpenMP/OMP.inc"
}
- llvm_unreachable("invalid stmt kind");
+ llvm_unreachable("invalid omp clause kind");
+}
+
+ASTNodeKind ASTNodeKind::getFromNode(const Attr &A) {
+ switch (A.getKind()) {
+#define ATTR(A) \
+ case attr::A: \
+ return ASTNodeKind(NKI_##A##Attr);
+#include "clang/Basic/AttrList.inc"
+ }
+ llvm_unreachable("invalid attr kind");
}
void DynTypedNode::print(llvm::raw_ostream &OS,
@@ -162,6 +208,12 @@ void DynTypedNode::print(llvm::raw_ostream &OS,
S->printPretty(OS, nullptr, PP);
else if (const Type *T = get<Type>())
QualType(T, 0).print(OS, PP);
+ else if (const Attr *A = get<Attr>())
+ A->printPretty(OS, PP);
+ else if (const ObjCProtocolLoc *P = get<ObjCProtocolLoc>())
+ P->getProtocol()->print(OS, PP);
+ else if (const ConceptReference *C = get<ConceptReference>())
+ C->print(OS, PP);
else
OS << "Unable to print values of type " << NodeKind.asStringRef() << "\n";
}
@@ -174,6 +226,8 @@ void DynTypedNode::dump(llvm::raw_ostream &OS,
S->dump(OS, Context);
else if (const Type *T = get<Type>())
T->dump(OS, Context);
+ else if (const ConceptReference *C = get<ConceptReference>())
+ C->dump(OS);
else
OS << "Unable to dump values of type " << NodeKind.asStringRef() << "\n";
}
@@ -195,5 +249,11 @@ SourceRange DynTypedNode::getSourceRange() const {
return SourceRange(C->getBeginLoc(), C->getEndLoc());
if (const auto *CBS = get<CXXBaseSpecifier>())
return CBS->getSourceRange();
+ if (const auto *A = get<Attr>())
+ return A->getRange();
+ if (const ObjCProtocolLoc *P = get<ObjCProtocolLoc>())
+ return P->getSourceRange();
+ if (const ConceptReference *C = get<ConceptReference>())
+ return C->getSourceRange();
return SourceRange();
}
diff --git a/contrib/llvm-project/clang/lib/AST/AttrDocTable.cpp b/contrib/llvm-project/clang/lib/AST/AttrDocTable.cpp
new file mode 100644
index 000000000000..df7e3d63a6c3
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/AST/AttrDocTable.cpp
@@ -0,0 +1,27 @@
+//===--- AttrDocTable.cpp - implements Attr::getDocumentation() -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains out-of-line methods for Attr classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/Attr.h"
+#include "llvm/ADT/StringRef.h"
+
+#include "AttrDocTable.inc"
+
+static const llvm::StringRef AttrDoc[] = {
+#define ATTR(NAME) AttrDoc_##NAME,
+#include "clang/Basic/AttrList.inc"
+};
+
+llvm::StringRef clang::Attr::getDocumentation(clang::attr::Kind K) {
+ if (K < std::size(AttrDoc))
+ return AttrDoc[K];
+ return "";
+}
diff --git a/contrib/llvm-project/clang/lib/AST/AttrImpl.cpp b/contrib/llvm-project/clang/lib/AST/AttrImpl.cpp
index 662f86722fa3..f198a9acf848 100644
--- a/contrib/llvm-project/clang/lib/AST/AttrImpl.cpp
+++ b/contrib/llvm-project/clang/lib/AST/AttrImpl.cpp
@@ -14,6 +14,7 @@
#include "clang/AST/Attr.h"
#include "clang/AST/Expr.h"
#include "clang/AST/Type.h"
+#include <optional>
using namespace clang;
void LoopHintAttr::printPrettyPragma(raw_ostream &OS,
@@ -60,7 +61,7 @@ std::string LoopHintAttr::getValueString(const PrintingPolicy &Policy) const {
else
OS << "disable";
OS << ")";
- return OS.str();
+ return ValueName;
}
// Return a string suitable for identifying this attribute in diagnostics.
@@ -137,49 +138,58 @@ void OMPDeclareTargetDeclAttr::printPrettyPragma(
// Use fake syntax because it is for testing and debugging purpose only.
if (getDevType() != DT_Any)
OS << " device_type(" << ConvertDevTypeTyToStr(getDevType()) << ")";
- if (getMapType() != MT_To)
+ if (getMapType() != MT_To && getMapType() != MT_Enter)
OS << ' ' << ConvertMapTypeTyToStr(getMapType());
+ if (Expr *E = getIndirectExpr()) {
+ OS << " indirect(";
+ E->printPretty(OS, nullptr, Policy);
+ OS << ")";
+ } else if (getIndirect()) {
+ OS << " indirect";
+ }
}
-llvm::Optional<OMPDeclareTargetDeclAttr *>
+std::optional<OMPDeclareTargetDeclAttr *>
OMPDeclareTargetDeclAttr::getActiveAttr(const ValueDecl *VD) {
- if (!VD->hasAttrs())
- return llvm::None;
+ if (llvm::all_of(VD->redecls(), [](const Decl *D) { return !D->hasAttrs(); }))
+ return std::nullopt;
unsigned Level = 0;
OMPDeclareTargetDeclAttr *FoundAttr = nullptr;
- for (auto *Attr : VD->specific_attrs<OMPDeclareTargetDeclAttr>()) {
- if (Level <= Attr->getLevel()) {
- Level = Attr->getLevel();
- FoundAttr = Attr;
+ for (const Decl *D : VD->redecls()) {
+ for (auto *Attr : D->specific_attrs<OMPDeclareTargetDeclAttr>()) {
+ if (Level <= Attr->getLevel()) {
+ Level = Attr->getLevel();
+ FoundAttr = Attr;
+ }
}
}
if (FoundAttr)
return FoundAttr;
- return llvm::None;
+ return std::nullopt;
}
-llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy>
+std::optional<OMPDeclareTargetDeclAttr::MapTypeTy>
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(const ValueDecl *VD) {
- llvm::Optional<OMPDeclareTargetDeclAttr *> ActiveAttr = getActiveAttr(VD);
- if (ActiveAttr.hasValue())
- return ActiveAttr.getValue()->getMapType();
- return llvm::None;
+ std::optional<OMPDeclareTargetDeclAttr *> ActiveAttr = getActiveAttr(VD);
+ if (ActiveAttr)
+ return (*ActiveAttr)->getMapType();
+ return std::nullopt;
}
-llvm::Optional<OMPDeclareTargetDeclAttr::DevTypeTy>
+std::optional<OMPDeclareTargetDeclAttr::DevTypeTy>
OMPDeclareTargetDeclAttr::getDeviceType(const ValueDecl *VD) {
- llvm::Optional<OMPDeclareTargetDeclAttr *> ActiveAttr = getActiveAttr(VD);
- if (ActiveAttr.hasValue())
- return ActiveAttr.getValue()->getDevType();
- return llvm::None;
+ std::optional<OMPDeclareTargetDeclAttr *> ActiveAttr = getActiveAttr(VD);
+ if (ActiveAttr)
+ return (*ActiveAttr)->getDevType();
+ return std::nullopt;
}
-llvm::Optional<SourceLocation>
+std::optional<SourceLocation>
OMPDeclareTargetDeclAttr::getLocation(const ValueDecl *VD) {
- llvm::Optional<OMPDeclareTargetDeclAttr *> ActiveAttr = getActiveAttr(VD);
- if (ActiveAttr.hasValue())
- return ActiveAttr.getValue()->getRange().getBegin();
- return llvm::None;
+ std::optional<OMPDeclareTargetDeclAttr *> ActiveAttr = getActiveAttr(VD);
+ if (ActiveAttr)
+ return (*ActiveAttr)->getRange().getBegin();
+ return std::nullopt;
}
namespace clang {
@@ -195,6 +205,69 @@ void OMPDeclareVariantAttr::printPrettyPragma(
OS << ")";
}
OS << " match(" << traitInfos << ")";
+
+ auto PrintExprs = [&OS, &Policy](Expr **Begin, Expr **End) {
+ for (Expr **I = Begin; I != End; ++I) {
+ assert(*I && "Expected non-null Stmt");
+ if (I != Begin)
+ OS << ",";
+ (*I)->printPretty(OS, nullptr, Policy);
+ }
+ };
+ if (adjustArgsNothing_size()) {
+ OS << " adjust_args(nothing:";
+ PrintExprs(adjustArgsNothing_begin(), adjustArgsNothing_end());
+ OS << ")";
+ }
+ if (adjustArgsNeedDevicePtr_size()) {
+ OS << " adjust_args(need_device_ptr:";
+ PrintExprs(adjustArgsNeedDevicePtr_begin(), adjustArgsNeedDevicePtr_end());
+ OS << ")";
+ }
+
+ auto PrintInteropInfo = [&OS](OMPInteropInfo *Begin, OMPInteropInfo *End) {
+ for (OMPInteropInfo *I = Begin; I != End; ++I) {
+ if (I != Begin)
+ OS << ", ";
+ OS << "interop(";
+ OS << getInteropTypeString(I);
+ OS << ")";
+ }
+ };
+ if (appendArgs_size()) {
+ OS << " append_args(";
+ PrintInteropInfo(appendArgs_begin(), appendArgs_end());
+ OS << ")";
+ }
+}
+
+unsigned AlignedAttr::getAlignment(ASTContext &Ctx) const {
+ assert(!isAlignmentDependent());
+ if (getCachedAlignmentValue())
+ return *getCachedAlignmentValue();
+
+ // Handle alignmentType case.
+ if (!isAlignmentExpr()) {
+ QualType T = getAlignmentType()->getType();
+
+ // C++ [expr.alignof]p3:
+ // When alignof is applied to a reference type, the result is the
+ // alignment of the referenced type.
+ T = T.getNonReferenceType();
+
+ if (T.getQualifiers().hasUnaligned())
+ return Ctx.getCharWidth();
+
+ return Ctx.getTypeAlignInChars(T.getTypePtr()).getQuantity() *
+ Ctx.getCharWidth();
+ }
+
+ // Handle alignmentExpr case.
+ if (alignmentExpr)
+ return alignmentExpr->EvaluateKnownConstInt(Ctx).getZExtValue() *
+ Ctx.getCharWidth();
+
+ return Ctx.getTargetDefaultAlignForAttributeAligned();
}
#include "clang/AST/AttrImpl.inc"
diff --git a/contrib/llvm-project/clang/lib/AST/CXXABI.h b/contrib/llvm-project/clang/lib/AST/CXXABI.h
index ca9424bcb7a4..9258a53fefeb 100644
--- a/contrib/llvm-project/clang/lib/AST/CXXABI.h
+++ b/contrib/llvm-project/clang/lib/AST/CXXABI.h
@@ -21,7 +21,6 @@ namespace clang {
class ASTContext;
class CXXConstructorDecl;
class DeclaratorDecl;
-class Expr;
class MangleContext;
class MangleNumberingContext;
class MemberPointerType;
diff --git a/contrib/llvm-project/clang/lib/AST/CXXInheritance.cpp b/contrib/llvm-project/clang/lib/AST/CXXInheritance.cpp
index 9027fa7a7515..25de2a20a7f3 100644
--- a/contrib/llvm-project/clang/lib/AST/CXXInheritance.cpp
+++ b/contrib/llvm-project/clang/lib/AST/CXXInheritance.cpp
@@ -22,7 +22,6 @@
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Casting.h"
@@ -81,7 +80,8 @@ bool CXXRecordDecl::isDerivedFrom(const CXXRecordDecl *Base,
const CXXRecordDecl *BaseDecl = Base->getCanonicalDecl();
return lookupInBases(
[BaseDecl](const CXXBaseSpecifier *Specifier, CXXBasePath &Path) {
- return FindBaseClass(Specifier, Path, BaseDecl);
+ return Specifier->getType()->getAsRecordDecl() &&
+ FindBaseClass(Specifier, Path, BaseDecl);
},
Paths);
}
@@ -465,7 +465,7 @@ void OverridingMethods::add(unsigned OverriddenSubobject,
UniqueVirtualMethod Overriding) {
SmallVectorImpl<UniqueVirtualMethod> &SubobjectOverrides
= Overrides[OverriddenSubobject];
- if (llvm::find(SubobjectOverrides, Overriding) == SubobjectOverrides.end())
+ if (!llvm::is_contained(SubobjectOverrides, Overriding))
SubobjectOverrides.push_back(Overriding);
}
@@ -671,9 +671,7 @@ CXXRecordDecl::getFinalOverriders(CXXFinalOverriderMap &FinalOverriders) const {
// FIXME: IsHidden reads from Overriding from the middle of a remove_if
// over the same sequence! Is this guaranteed to work?
- Overriding.erase(
- std::remove_if(Overriding.begin(), Overriding.end(), IsHidden),
- Overriding.end());
+ llvm::erase_if(Overriding, IsHidden);
}
}
}
diff --git a/contrib/llvm-project/clang/lib/AST/Comment.cpp b/contrib/llvm-project/clang/lib/AST/Comment.cpp
index a02cc9d119fe..cce8b12170f2 100644
--- a/contrib/llvm-project/clang/lib/AST/Comment.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Comment.cpp
@@ -29,15 +29,16 @@ namespace comments {
#undef ABSTRACT_COMMENT
// DeclInfo is also allocated with a BumpPtrAllocator.
-static_assert(std::is_trivially_destructible<DeclInfo>::value,
+static_assert(std::is_trivially_destructible_v<DeclInfo>,
"DeclInfo should be trivially destructible!");
const char *Comment::getCommentKindName() const {
switch (getCommentKind()) {
- case NoCommentKind: return "NoCommentKind";
+ case CommentKind::None:
+ return "None";
#define ABSTRACT_COMMENT(COMMENT)
-#define COMMENT(CLASS, PARENT) \
- case CLASS##Kind: \
+#define COMMENT(CLASS, PARENT) \
+ case CommentKind::CLASS: \
return #CLASS;
#include "clang/AST/CommentNodes.inc"
#undef COMMENT
@@ -81,10 +82,11 @@ static inline void CheckCommentASTNodes() {
Comment::child_iterator Comment::child_begin() const {
switch (getCommentKind()) {
- case NoCommentKind: llvm_unreachable("comment without a kind");
+ case CommentKind::None:
+ llvm_unreachable("comment without a kind");
#define ABSTRACT_COMMENT(COMMENT)
-#define COMMENT(CLASS, PARENT) \
- case CLASS##Kind: \
+#define COMMENT(CLASS, PARENT) \
+ case CommentKind::CLASS: \
return static_cast<const CLASS *>(this)->child_begin();
#include "clang/AST/CommentNodes.inc"
#undef COMMENT
@@ -95,10 +97,11 @@ Comment::child_iterator Comment::child_begin() const {
Comment::child_iterator Comment::child_end() const {
switch (getCommentKind()) {
- case NoCommentKind: llvm_unreachable("comment without a kind");
+ case CommentKind::None:
+ llvm_unreachable("comment without a kind");
#define ABSTRACT_COMMENT(COMMENT)
-#define COMMENT(CLASS, PARENT) \
- case CLASS##Kind: \
+#define COMMENT(CLASS, PARENT) \
+ case CommentKind::CLASS: \
return static_cast<const CLASS *>(this)->child_end();
#include "clang/AST/CommentNodes.inc"
#undef COMMENT
@@ -108,12 +111,7 @@ Comment::child_iterator Comment::child_end() const {
}
bool TextComment::isWhitespaceNoCache() const {
- for (StringRef::const_iterator I = Text.begin(), E = Text.end();
- I != E; ++I) {
- if (!clang::isWhitespace(*I))
- return false;
- }
- return true;
+ return llvm::all_of(Text, clang::isWhitespace);
}
bool ParagraphComment::isWhitespaceNoCache() const {
@@ -189,13 +187,14 @@ static bool getFunctionTypeLoc(TypeLoc TL, FunctionTypeLoc &ResFTL) {
return false;
}
-const char *ParamCommandComment::getDirectionAsString(PassDirection D) {
+const char *
+ParamCommandComment::getDirectionAsString(ParamCommandPassDirection D) {
switch (D) {
- case ParamCommandComment::In:
+ case ParamCommandPassDirection::In:
return "[in]";
- case ParamCommandComment::Out:
+ case ParamCommandPassDirection::Out:
return "[out]";
- case ParamCommandComment::InOut:
+ case ParamCommandPassDirection::InOut:
return "[in,out]";
}
llvm_unreachable("unknown PassDirection");
@@ -210,7 +209,8 @@ void DeclInfo::fill() {
IsObjCMethod = false;
IsInstanceMethod = false;
IsClassMethod = false;
- ParamVars = None;
+ IsVariadic = false;
+ ParamVars = std::nullopt;
TemplateParameters = nullptr;
if (!CommentDecl) {
@@ -221,6 +221,7 @@ void DeclInfo::fill() {
CurrentDecl = CommentDecl;
Decl::Kind K = CommentDecl->getKind();
+ const TypeSourceInfo *TSI = nullptr;
switch (K) {
default:
// Defaults are should be good for declarations we don't handle explicitly.
@@ -247,6 +248,8 @@ void DeclInfo::fill() {
IsInstanceMethod = MD->isInstance();
IsClassMethod = !IsInstanceMethod;
}
+ IsVariadic = FD->isVariadic();
+ assert(involvesFunctionType());
break;
}
case Decl::ObjCMethod: {
@@ -257,6 +260,8 @@ void DeclInfo::fill() {
IsObjCMethod = true;
IsInstanceMethod = MD->isInstanceMethod();
IsClassMethod = !IsInstanceMethod;
+ IsVariadic = MD->isVariadic();
+ assert(involvesFunctionType());
break;
}
case Decl::FunctionTemplate: {
@@ -267,6 +272,8 @@ void DeclInfo::fill() {
ParamVars = FD->parameters();
ReturnType = FD->getReturnType();
TemplateParameters = FTD->getTemplateParameters();
+ IsVariadic = FD->isVariadic();
+ assert(involvesFunctionType());
break;
}
case Decl::ClassTemplate: {
@@ -293,76 +300,66 @@ void DeclInfo::fill() {
Kind = ClassKind;
break;
case Decl::Var:
+ if (const VarTemplateDecl *VTD =
+ cast<VarDecl>(CommentDecl)->getDescribedVarTemplate()) {
+ TemplateKind = TemplateSpecialization;
+ TemplateParameters = VTD->getTemplateParameters();
+ }
+ [[fallthrough]];
case Decl::Field:
case Decl::EnumConstant:
case Decl::ObjCIvar:
case Decl::ObjCAtDefsField:
- case Decl::ObjCProperty: {
- const TypeSourceInfo *TSI;
+ case Decl::ObjCProperty:
if (const auto *VD = dyn_cast<DeclaratorDecl>(CommentDecl))
TSI = VD->getTypeSourceInfo();
else if (const auto *PD = dyn_cast<ObjCPropertyDecl>(CommentDecl))
TSI = PD->getTypeSourceInfo();
- else
- TSI = nullptr;
- if (TSI) {
- TypeLoc TL = TSI->getTypeLoc().getUnqualifiedLoc();
- FunctionTypeLoc FTL;
- if (getFunctionTypeLoc(TL, FTL)) {
- ParamVars = FTL.getParams();
- ReturnType = FTL.getReturnLoc().getType();
- }
- }
Kind = VariableKind;
break;
+ case Decl::VarTemplate: {
+ const VarTemplateDecl *VTD = cast<VarTemplateDecl>(CommentDecl);
+ Kind = VariableKind;
+ TemplateKind = Template;
+ TemplateParameters = VTD->getTemplateParameters();
+ if (const VarDecl *VD = VTD->getTemplatedDecl())
+ TSI = VD->getTypeSourceInfo();
+ break;
}
case Decl::Namespace:
Kind = NamespaceKind;
break;
case Decl::TypeAlias:
- case Decl::Typedef: {
+ case Decl::Typedef:
Kind = TypedefKind;
- // If this is a typedef / using to something we consider a function, extract
- // arguments and return type.
- const TypeSourceInfo *TSI =
- K == Decl::Typedef
- ? cast<TypedefDecl>(CommentDecl)->getTypeSourceInfo()
- : cast<TypeAliasDecl>(CommentDecl)->getTypeSourceInfo();
- if (!TSI)
- break;
- TypeLoc TL = TSI->getTypeLoc().getUnqualifiedLoc();
- FunctionTypeLoc FTL;
- if (getFunctionTypeLoc(TL, FTL)) {
- Kind = FunctionKind;
- ParamVars = FTL.getParams();
- ReturnType = FTL.getReturnLoc().getType();
- }
+ TSI = cast<TypedefNameDecl>(CommentDecl)->getTypeSourceInfo();
break;
- }
case Decl::TypeAliasTemplate: {
const TypeAliasTemplateDecl *TAT = cast<TypeAliasTemplateDecl>(CommentDecl);
Kind = TypedefKind;
TemplateKind = Template;
TemplateParameters = TAT->getTemplateParameters();
- TypeAliasDecl *TAD = TAT->getTemplatedDecl();
- if (!TAD)
- break;
+ if (TypeAliasDecl *TAD = TAT->getTemplatedDecl())
+ TSI = TAD->getTypeSourceInfo();
+ break;
+ }
+ case Decl::Enum:
+ Kind = EnumKind;
+ break;
+ }
- const TypeSourceInfo *TSI = TAD->getTypeSourceInfo();
- if (!TSI)
- break;
+ // If the type is a typedef / using to something we consider a function,
+ // extract arguments and return type.
+ if (TSI) {
TypeLoc TL = TSI->getTypeLoc().getUnqualifiedLoc();
FunctionTypeLoc FTL;
if (getFunctionTypeLoc(TL, FTL)) {
- Kind = FunctionKind;
ParamVars = FTL.getParams();
ReturnType = FTL.getReturnLoc().getType();
+ if (const auto *FPT = dyn_cast<FunctionProtoType>(FTL.getTypePtr()))
+ IsVariadic = FPT->isVariadic();
+ assert(involvesFunctionType());
}
- break;
- }
- case Decl::Enum:
- Kind = EnumKind;
- break;
}
IsFilled = true;
diff --git a/contrib/llvm-project/clang/lib/AST/CommentBriefParser.cpp b/contrib/llvm-project/clang/lib/AST/CommentBriefParser.cpp
index 2b648cbb1d4b..bf9e17993497 100644
--- a/contrib/llvm-project/clang/lib/AST/CommentBriefParser.cpp
+++ b/contrib/llvm-project/clang/lib/AST/CommentBriefParser.cpp
@@ -8,15 +8,12 @@
#include "clang/AST/CommentBriefParser.h"
#include "clang/AST/CommentCommandTraits.h"
+#include "clang/Basic/CharInfo.h"
namespace clang {
namespace comments {
namespace {
-inline bool isWhitespace(char C) {
- return C == ' ' || C == '\n' || C == '\r' ||
- C == '\t' || C == '\f' || C == '\v';
-}
/// Convert all whitespace into spaces, remove leading and trailing spaces,
/// compress multiple spaces into one.
@@ -26,12 +23,11 @@ void cleanupBrief(std::string &S) {
for (std::string::iterator I = S.begin(), E = S.end();
I != E; ++I) {
const char C = *I;
- if (isWhitespace(C)) {
+ if (clang::isWhitespace(C)) {
if (!PrevWasSpace) {
*O++ = ' ';
PrevWasSpace = true;
}
- continue;
} else {
*O++ = C;
PrevWasSpace = false;
@@ -44,12 +40,7 @@ void cleanupBrief(std::string &S) {
}
bool isWhitespace(StringRef Text) {
- for (StringRef::const_iterator I = Text.begin(), E = Text.end();
- I != E; ++I) {
- if (!isWhitespace(*I))
- return false;
- }
- return true;
+ return llvm::all_of(Text, clang::isWhitespace);
}
} // unnamed namespace
@@ -123,7 +114,7 @@ std::string BriefParser::Parse() {
// We found a paragraph end. This ends the brief description if
// \command or its equivalent was explicitly used.
// Stop scanning text because an explicit \paragraph is the
- // preffered one.
+ // preferred one.
if (InBrief)
break;
// End first paragraph if we found some non-whitespace text.
diff --git a/contrib/llvm-project/clang/lib/AST/CommentCommandTraits.cpp b/contrib/llvm-project/clang/lib/AST/CommentCommandTraits.cpp
index bdc0dd47fb7d..a37a0e18432c 100644
--- a/contrib/llvm-project/clang/lib/AST/CommentCommandTraits.cpp
+++ b/contrib/llvm-project/clang/lib/AST/CommentCommandTraits.cpp
@@ -16,8 +16,8 @@ namespace comments {
#include "clang/AST/CommentCommandInfo.inc"
CommandTraits::CommandTraits(llvm::BumpPtrAllocator &Allocator,
- const CommentOptions &CommentOptions) :
- NextID(llvm::array_lengthof(Commands)), Allocator(Allocator) {
+ const CommentOptions &CommentOptions)
+ : NextID(std::size(Commands)), Allocator(Allocator) {
registerCommentOptions(CommentOptions);
}
@@ -115,7 +115,7 @@ const CommandInfo *CommandTraits::registerBlockCommand(StringRef CommandName) {
const CommandInfo *CommandTraits::getBuiltinCommandInfo(
unsigned CommandID) {
- if (CommandID < llvm::array_lengthof(Commands))
+ if (CommandID < std::size(Commands))
return &Commands[CommandID];
return nullptr;
}
@@ -131,7 +131,7 @@ const CommandInfo *CommandTraits::getRegisteredCommandInfo(
const CommandInfo *CommandTraits::getRegisteredCommandInfo(
unsigned CommandID) const {
- return RegisteredCommands[CommandID - llvm::array_lengthof(Commands)];
+ return RegisteredCommands[CommandID - std::size(Commands)];
}
} // end namespace comments
diff --git a/contrib/llvm-project/clang/lib/AST/CommentLexer.cpp b/contrib/llvm-project/clang/lib/AST/CommentLexer.cpp
index 4bebd41e15ee..f0250fc9fd55 100644
--- a/contrib/llvm-project/clang/lib/AST/CommentLexer.cpp
+++ b/contrib/llvm-project/clang/lib/AST/CommentLexer.cpp
@@ -94,31 +94,12 @@ void Lexer::skipLineStartingDecorations() {
if (BufferPtr == CommentEnd)
return;
- switch (*BufferPtr) {
- case ' ':
- case '\t':
- case '\f':
- case '\v': {
- const char *NewBufferPtr = BufferPtr;
- NewBufferPtr++;
- if (NewBufferPtr == CommentEnd)
+ const char *NewBufferPtr = BufferPtr;
+ while (isHorizontalWhitespace(*NewBufferPtr))
+ if (++NewBufferPtr == CommentEnd)
return;
-
- char C = *NewBufferPtr;
- while (isHorizontalWhitespace(C)) {
- NewBufferPtr++;
- if (NewBufferPtr == CommentEnd)
- return;
- C = *NewBufferPtr;
- }
- if (C == '*')
- BufferPtr = NewBufferPtr + 1;
- break;
- }
- case '*':
- BufferPtr++;
- break;
- }
+ if (*NewBufferPtr == '*')
+ BufferPtr = NewBufferPtr + 1;
}
namespace {
@@ -289,6 +270,29 @@ void Lexer::formTokenWithChars(Token &Result, const char *TokEnd,
BufferPtr = TokEnd;
}
+const char *Lexer::skipTextToken() {
+ const char *TokenPtr = BufferPtr;
+ assert(TokenPtr < CommentEnd);
+ StringRef TokStartSymbols = ParseCommands ? "\n\r\\@\"&<" : "\n\r";
+
+again:
+ size_t End =
+ StringRef(TokenPtr, CommentEnd - TokenPtr).find_first_of(TokStartSymbols);
+ if (End == StringRef::npos)
+ return CommentEnd;
+
+ // Doxygen doesn't recognize any commands in a one-line double quotation.
+ // If we don't find an ending quotation mark, we pretend it never began.
+ if (*(TokenPtr + End) == '\"') {
+ TokenPtr += End + 1;
+ End = StringRef(TokenPtr, CommentEnd - TokenPtr).find_first_of("\n\r\"");
+ if (End != StringRef::npos && *(TokenPtr + End) == '\"')
+ TokenPtr += End + 1;
+ goto again;
+ }
+ return TokenPtr + End;
+}
+
void Lexer::lexCommentText(Token &T) {
assert(CommentState == LCS_InsideBCPLComment ||
CommentState == LCS_InsideCComment);
@@ -309,17 +313,8 @@ void Lexer::lexCommentText(Token &T) {
skipLineStartingDecorations();
return;
- default: {
- StringRef TokStartSymbols = ParseCommands ? "\n\r\\@&<" : "\n\r";
- size_t End = StringRef(TokenPtr, CommentEnd - TokenPtr)
- .find_first_of(TokStartSymbols);
- if (End != StringRef::npos)
- TokenPtr += End;
- else
- TokenPtr = CommentEnd;
- formTextToken(T, TokenPtr);
- return;
- }
+ default:
+ return formTextToken(T, skipTextToken());
}
};
@@ -392,10 +387,11 @@ void Lexer::lexCommentText(Token &T) {
unsigned Length = TokenPtr - (BufferPtr + 1);
// Hardcoded support for lexing LaTeX formula commands
- // \f$ \f[ \f] \f{ \f} as a single command.
+ // \f$ \f( \f) \f[ \f] \f{ \f} as a single command.
if (Length == 1 && TokenPtr[-1] == 'f' && TokenPtr != CommentEnd) {
C = *TokenPtr;
- if (C == '$' || C == '[' || C == ']' || C == '{' || C == '}') {
+ if (C == '$' || C == '(' || C == ')' || C == '[' || C == ']' ||
+ C == '{' || C == '}') {
TokenPtr++;
Length++;
}
@@ -705,7 +701,7 @@ void Lexer::lexHTMLStartTag(Token &T) {
C = *BufferPtr;
if (!isHTMLIdentifierStartingCharacter(C) &&
- C != '=' && C != '\"' && C != '\'' && C != '>') {
+ C != '=' && C != '\"' && C != '\'' && C != '>' && C != '/') {
State = LS_Normal;
return;
}
diff --git a/contrib/llvm-project/clang/lib/AST/CommentParser.cpp b/contrib/llvm-project/clang/lib/AST/CommentParser.cpp
index 29983b0a16c3..8adfd85d0160 100644
--- a/contrib/llvm-project/clang/lib/AST/CommentParser.cpp
+++ b/contrib/llvm-project/clang/lib/AST/CommentParser.cpp
@@ -245,7 +245,7 @@ public:
Pos.CurToken++;
}
- P.putBack(llvm::makeArrayRef(Toks.begin() + Pos.CurToken, Toks.end()));
+ P.putBack(llvm::ArrayRef(Toks.begin() + Pos.CurToken, Toks.end()));
Pos.CurToken = Toks.size();
if (HavePartialTok)
@@ -289,22 +289,19 @@ void Parser::parseTParamCommandArgs(TParamCommandComment *TPC,
Arg.getText());
}
-void Parser::parseBlockCommandArgs(BlockCommandComment *BC,
- TextTokenRetokenizer &Retokenizer,
- unsigned NumArgs) {
- typedef BlockCommandComment::Argument Argument;
- Argument *Args =
- new (Allocator.Allocate<Argument>(NumArgs)) Argument[NumArgs];
+ArrayRef<Comment::Argument>
+Parser::parseCommandArgs(TextTokenRetokenizer &Retokenizer, unsigned NumArgs) {
+ auto *Args = new (Allocator.Allocate<Comment::Argument>(NumArgs))
+ Comment::Argument[NumArgs];
unsigned ParsedArgs = 0;
Token Arg;
while (ParsedArgs < NumArgs && Retokenizer.lexWord(Arg)) {
- Args[ParsedArgs] = Argument(SourceRange(Arg.getLocation(),
- Arg.getEndLocation()),
- Arg.getText());
+ Args[ParsedArgs] = Comment::Argument{
+ SourceRange(Arg.getLocation(), Arg.getEndLocation()), Arg.getText()};
ParsedArgs++;
}
- S.actOnBlockCommandArgs(BC, llvm::makeArrayRef(Args, ParsedArgs));
+ return llvm::ArrayRef(Args, ParsedArgs);
}
BlockCommandComment *Parser::parseBlockCommand() {
@@ -337,7 +334,7 @@ BlockCommandComment *Parser::parseBlockCommand() {
if (isTokBlockCommand()) {
// Block command ahead. We can't nest block commands, so pretend that this
// command has an empty argument.
- ParagraphComment *Paragraph = S.actOnParagraphComment(None);
+ ParagraphComment *Paragraph = S.actOnParagraphComment(std::nullopt);
if (PC) {
S.actOnParamCommandFinish(PC, Paragraph);
return PC;
@@ -360,7 +357,7 @@ BlockCommandComment *Parser::parseBlockCommand() {
else if (TPC)
parseTParamCommandArgs(TPC, Retokenizer);
else
- parseBlockCommandArgs(BC, Retokenizer, Info->NumArgs);
+ S.actOnBlockCommandArgs(BC, parseCommandArgs(Retokenizer, Info->NumArgs));
Retokenizer.putBackLeftoverTokens();
}
@@ -379,7 +376,7 @@ BlockCommandComment *Parser::parseBlockCommand() {
ParagraphComment *Paragraph;
if (EmptyParagraph)
- Paragraph = S.actOnParagraphComment(None);
+ Paragraph = S.actOnParagraphComment(std::nullopt);
else {
BlockContentComment *Block = parseParagraphOrBlockCommand();
// Since we have checked for a block command, we should have parsed a
@@ -401,32 +398,24 @@ BlockCommandComment *Parser::parseBlockCommand() {
InlineCommandComment *Parser::parseInlineCommand() {
assert(Tok.is(tok::backslash_command) || Tok.is(tok::at_command));
+ const CommandInfo *Info = Traits.getCommandInfo(Tok.getCommandID());
const Token CommandTok = Tok;
consumeToken();
TextTokenRetokenizer Retokenizer(Allocator, *this);
+ ArrayRef<Comment::Argument> Args =
+ parseCommandArgs(Retokenizer, Info->NumArgs);
- Token ArgTok;
- bool ArgTokValid = Retokenizer.lexWord(ArgTok);
-
- InlineCommandComment *IC;
- if (ArgTokValid) {
- IC = S.actOnInlineCommand(CommandTok.getLocation(),
- CommandTok.getEndLocation(),
- CommandTok.getCommandID(),
- ArgTok.getLocation(),
- ArgTok.getEndLocation(),
- ArgTok.getText());
- } else {
- IC = S.actOnInlineCommand(CommandTok.getLocation(),
- CommandTok.getEndLocation(),
- CommandTok.getCommandID());
+ InlineCommandComment *IC = S.actOnInlineCommand(
+ CommandTok.getLocation(), CommandTok.getEndLocation(),
+ CommandTok.getCommandID(), Args);
+ if (Args.size() < Info->NumArgs) {
Diag(CommandTok.getEndLocation().getLocWithOffset(1),
- diag::warn_doc_inline_contents_no_argument)
- << CommandTok.is(tok::at_command)
- << Traits.getCommandInfo(CommandTok.getCommandID())->Name
+ diag::warn_doc_inline_command_not_enough_arguments)
+ << CommandTok.is(tok::at_command) << Info->Name << Args.size()
+ << Info->NumArgs
<< SourceRange(CommandTok.getLocation(), CommandTok.getEndLocation());
}
@@ -478,16 +467,14 @@ HTMLStartTagComment *Parser::parseHTMLStartTag() {
}
case tok::html_greater:
- S.actOnHTMLStartTagFinish(HST,
- S.copyArray(llvm::makeArrayRef(Attrs)),
+ S.actOnHTMLStartTagFinish(HST, S.copyArray(llvm::ArrayRef(Attrs)),
Tok.getLocation(),
/* IsSelfClosing = */ false);
consumeToken();
return HST;
case tok::html_slash_greater:
- S.actOnHTMLStartTagFinish(HST,
- S.copyArray(llvm::makeArrayRef(Attrs)),
+ S.actOnHTMLStartTagFinish(HST, S.copyArray(llvm::ArrayRef(Attrs)),
Tok.getLocation(),
/* IsSelfClosing = */ true);
consumeToken();
@@ -505,16 +492,14 @@ HTMLStartTagComment *Parser::parseHTMLStartTag() {
Tok.is(tok::html_slash_greater))
continue;
- S.actOnHTMLStartTagFinish(HST,
- S.copyArray(llvm::makeArrayRef(Attrs)),
+ S.actOnHTMLStartTagFinish(HST, S.copyArray(llvm::ArrayRef(Attrs)),
SourceLocation(),
/* IsSelfClosing = */ false);
return HST;
default:
// Not a token from an HTML start tag. Thus HTML tag prematurely ended.
- S.actOnHTMLStartTagFinish(HST,
- S.copyArray(llvm::makeArrayRef(Attrs)),
+ S.actOnHTMLStartTagFinish(HST, S.copyArray(llvm::ArrayRef(Attrs)),
SourceLocation(),
/* IsSelfClosing = */ false);
bool StartLineInvalid;
@@ -653,7 +638,7 @@ BlockContentComment *Parser::parseParagraphOrBlockCommand() {
break;
}
- return S.actOnParagraphComment(S.copyArray(llvm::makeArrayRef(Content)));
+ return S.actOnParagraphComment(S.copyArray(llvm::ArrayRef(Content)));
}
VerbatimBlockComment *Parser::parseVerbatimBlock() {
@@ -690,14 +675,13 @@ VerbatimBlockComment *Parser::parseVerbatimBlock() {
if (Tok.is(tok::verbatim_block_end)) {
const CommandInfo *Info = Traits.getCommandInfo(Tok.getVerbatimBlockID());
- S.actOnVerbatimBlockFinish(VB, Tok.getLocation(),
- Info->Name,
- S.copyArray(llvm::makeArrayRef(Lines)));
+ S.actOnVerbatimBlockFinish(VB, Tok.getLocation(), Info->Name,
+ S.copyArray(llvm::ArrayRef(Lines)));
consumeToken();
} else {
// Unterminated \\verbatim block
S.actOnVerbatimBlockFinish(VB, SourceLocation(), "",
- S.copyArray(llvm::makeArrayRef(Lines)));
+ S.copyArray(llvm::ArrayRef(Lines)));
}
return VB;
@@ -773,7 +757,7 @@ FullComment *Parser::parseFullComment() {
while (Tok.is(tok::newline))
consumeToken();
}
- return S.actOnFullComment(S.copyArray(llvm::makeArrayRef(Blocks)));
+ return S.actOnFullComment(S.copyArray(llvm::ArrayRef(Blocks)));
}
} // end namespace comments
diff --git a/contrib/llvm-project/clang/lib/AST/CommentSema.cpp b/contrib/llvm-project/clang/lib/AST/CommentSema.cpp
index 7642e73fa171..bc01baa1d917 100644
--- a/contrib/llvm-project/clang/lib/AST/CommentSema.cpp
+++ b/contrib/llvm-project/clang/lib/AST/CommentSema.cpp
@@ -86,7 +86,7 @@ ParamCommandComment *Sema::actOnParamCommandStart(
new (Allocator) ParamCommandComment(LocBegin, LocEnd, CommandID,
CommandMarker);
- if (!isFunctionDecl() && !isFunctionOrBlockPointerVarLikeDecl())
+ if (!involvesFunctionType())
Diag(Command->getLocation(),
diag::warn_doc_param_not_attached_to_a_function_decl)
<< CommandMarker
@@ -219,12 +219,12 @@ void Sema::checkContainerDecl(const BlockCommandComment *Comment) {
/// Turn a string into the corresponding PassDirection or -1 if it's not
/// valid.
-static int getParamPassDirection(StringRef Arg) {
- return llvm::StringSwitch<int>(Arg)
- .Case("[in]", ParamCommandComment::In)
- .Case("[out]", ParamCommandComment::Out)
- .Cases("[in,out]", "[out,in]", ParamCommandComment::InOut)
- .Default(-1);
+static ParamCommandPassDirection getParamPassDirection(StringRef Arg) {
+ return llvm::StringSwitch<ParamCommandPassDirection>(Arg)
+ .Case("[in]", ParamCommandPassDirection::In)
+ .Case("[out]", ParamCommandPassDirection::Out)
+ .Cases("[in,out]", "[out,in]", ParamCommandPassDirection::InOut)
+ .Default(static_cast<ParamCommandPassDirection>(-1));
}
void Sema::actOnParamCommandDirectionArg(ParamCommandComment *Command,
@@ -232,27 +232,25 @@ void Sema::actOnParamCommandDirectionArg(ParamCommandComment *Command,
SourceLocation ArgLocEnd,
StringRef Arg) {
std::string ArgLower = Arg.lower();
- int Direction = getParamPassDirection(ArgLower);
+ ParamCommandPassDirection Direction = getParamPassDirection(ArgLower);
- if (Direction == -1) {
+ if (Direction == static_cast<ParamCommandPassDirection>(-1)) {
// Try again with whitespace removed.
- ArgLower.erase(
- std::remove_if(ArgLower.begin(), ArgLower.end(), clang::isWhitespace),
- ArgLower.end());
+ llvm::erase_if(ArgLower, clang::isWhitespace);
Direction = getParamPassDirection(ArgLower);
SourceRange ArgRange(ArgLocBegin, ArgLocEnd);
- if (Direction != -1) {
- const char *FixedName = ParamCommandComment::getDirectionAsString(
- (ParamCommandComment::PassDirection)Direction);
+ if (Direction != static_cast<ParamCommandPassDirection>(-1)) {
+ const char *FixedName =
+ ParamCommandComment::getDirectionAsString(Direction);
Diag(ArgLocBegin, diag::warn_doc_param_spaces_in_direction)
<< ArgRange << FixItHint::CreateReplacement(ArgRange, FixedName);
} else {
Diag(ArgLocBegin, diag::warn_doc_param_invalid_direction) << ArgRange;
- Direction = ParamCommandComment::In; // Sane fall back.
+ Direction = ParamCommandPassDirection::In; // Sane fall back.
}
}
- Command->setDirection((ParamCommandComment::PassDirection)Direction,
+ Command->setDirection(Direction,
/*Explicit=*/true);
}
@@ -265,13 +263,12 @@ void Sema::actOnParamCommandParamNameArg(ParamCommandComment *Command,
if (!Command->isDirectionExplicit()) {
// User didn't provide a direction argument.
- Command->setDirection(ParamCommandComment::In, /* Explicit = */ false);
+ Command->setDirection(ParamCommandPassDirection::In,
+ /* Explicit = */ false);
}
- typedef BlockCommandComment::Argument Argument;
- Argument *A = new (Allocator) Argument(SourceRange(ArgLocBegin,
- ArgLocEnd),
- Arg);
- Command->setArgs(llvm::makeArrayRef(A, 1));
+ auto *A = new (Allocator)
+ Comment::Argument{SourceRange(ArgLocBegin, ArgLocEnd), Arg};
+ Command->setArgs(llvm::ArrayRef(A, 1));
}
void Sema::actOnParamCommandFinish(ParamCommandComment *Command,
@@ -305,11 +302,9 @@ void Sema::actOnTParamCommandParamNameArg(TParamCommandComment *Command,
// Parser will not feed us more arguments than needed.
assert(Command->getNumArgs() == 0);
- typedef BlockCommandComment::Argument Argument;
- Argument *A = new (Allocator) Argument(SourceRange(ArgLocBegin,
- ArgLocEnd),
- Arg);
- Command->setArgs(llvm::makeArrayRef(A, 1));
+ auto *A = new (Allocator)
+ Comment::Argument{SourceRange(ArgLocBegin, ArgLocEnd), Arg};
+ Command->setArgs(llvm::ArrayRef(A, 1));
if (!isTemplateOrSpecialization()) {
// We already warned that this \\tparam is not attached to a template decl.
@@ -320,7 +315,7 @@ void Sema::actOnTParamCommandParamNameArg(TParamCommandComment *Command,
ThisDeclInfo->TemplateParameters;
SmallVector<unsigned, 2> Position;
if (resolveTParamReference(Arg, TemplateParameters, &Position)) {
- Command->setPosition(copyArray(llvm::makeArrayRef(Position)));
+ Command->setPosition(copyArray(llvm::ArrayRef(Position)));
TParamCommandComment *&PrevCommand = TemplateParameterDocs[Arg];
if (PrevCommand) {
SourceRange ArgRange(ArgLocBegin, ArgLocEnd);
@@ -363,37 +358,15 @@ void Sema::actOnTParamCommandFinish(TParamCommandComment *Command,
checkBlockCommandEmptyParagraph(Command);
}
-InlineCommandComment *Sema::actOnInlineCommand(SourceLocation CommandLocBegin,
- SourceLocation CommandLocEnd,
- unsigned CommandID) {
- ArrayRef<InlineCommandComment::Argument> Args;
+InlineCommandComment *
+Sema::actOnInlineCommand(SourceLocation CommandLocBegin,
+ SourceLocation CommandLocEnd, unsigned CommandID,
+ ArrayRef<Comment::Argument> Args) {
StringRef CommandName = Traits.getCommandInfo(CommandID)->Name;
- return new (Allocator) InlineCommandComment(
- CommandLocBegin,
- CommandLocEnd,
- CommandID,
- getInlineCommandRenderKind(CommandName),
- Args);
-}
-InlineCommandComment *Sema::actOnInlineCommand(SourceLocation CommandLocBegin,
- SourceLocation CommandLocEnd,
- unsigned CommandID,
- SourceLocation ArgLocBegin,
- SourceLocation ArgLocEnd,
- StringRef Arg) {
- typedef InlineCommandComment::Argument Argument;
- Argument *A = new (Allocator) Argument(SourceRange(ArgLocBegin,
- ArgLocEnd),
- Arg);
- StringRef CommandName = Traits.getCommandInfo(CommandID)->Name;
-
- return new (Allocator) InlineCommandComment(
- CommandLocBegin,
- CommandLocEnd,
- CommandID,
- getInlineCommandRenderKind(CommandName),
- llvm::makeArrayRef(A, 1));
+ return new (Allocator)
+ InlineCommandComment(CommandLocBegin, CommandLocEnd, CommandID,
+ getInlineCommandRenderKind(CommandName), Args);
}
InlineContentComment *Sema::actOnUnknownCommand(SourceLocation LocBegin,
@@ -408,9 +381,7 @@ InlineContentComment *Sema::actOnUnknownCommand(SourceLocation LocBegin,
unsigned CommandID) {
ArrayRef<InlineCommandComment::Argument> Args;
return new (Allocator) InlineCommandComment(
- LocBegin, LocEnd, CommandID,
- InlineCommandComment::RenderNormal,
- Args);
+ LocBegin, LocEnd, CommandID, InlineCommandRenderKind::Normal, Args);
}
TextComment *Sema::actOnText(SourceLocation LocBegin,
@@ -590,7 +561,7 @@ void Sema::checkReturnsCommand(const BlockCommandComment *Command) {
// to document the value that the property getter returns.
if (isObjCPropertyDecl())
return;
- if (isFunctionDecl() || isFunctionOrBlockPointerVarLikeDecl()) {
+ if (involvesFunctionType()) {
assert(!ThisDeclInfo->ReturnType.isNull() &&
"should have a valid return type");
if (ThisDeclInfo->ReturnType->isVoidType()) {
@@ -692,12 +663,12 @@ void Sema::checkDeprecatedCommand(const BlockCommandComment *Command) {
return;
const LangOptions &LO = FD->getLangOpts();
- const bool DoubleSquareBracket = LO.CPlusPlus14 || LO.C2x;
+ const bool DoubleSquareBracket = LO.CPlusPlus14 || LO.C23;
StringRef AttributeSpelling =
DoubleSquareBracket ? "[[deprecated]]" : "__attribute__((deprecated))";
if (PP) {
// Try to find a replacement macro:
- // - In C2x/C++14 we prefer [[deprecated]].
+ // - In C23/C++14 we prefer [[deprecated]].
// - If not found or an older C/C++ look for __attribute__((deprecated)).
StringRef MacroName;
if (DoubleSquareBracket) {
@@ -730,7 +701,7 @@ void Sema::checkDeprecatedCommand(const BlockCommandComment *Command) {
}
void Sema::resolveParamCommandIndexes(const FullComment *FC) {
- if (!isFunctionDecl()) {
+ if (!involvesFunctionType()) {
// We already warned that \\param commands are not attached to a function
// decl.
return;
@@ -818,6 +789,14 @@ void Sema::resolveParamCommandIndexes(const FullComment *FC) {
}
}
+bool Sema::involvesFunctionType() {
+ if (!ThisDeclInfo)
+ return false;
+ if (!ThisDeclInfo->IsFilled)
+ inspectThisDecl();
+ return ThisDeclInfo->involvesFunctionType();
+}
+
bool Sema::isFunctionDecl() {
if (!ThisDeclInfo)
return false;
@@ -832,26 +811,11 @@ bool Sema::isAnyFunctionDecl() {
}
bool Sema::isFunctionOrMethodVariadic() {
- if (!isFunctionDecl() || !ThisDeclInfo->CurrentDecl)
+ if (!ThisDeclInfo)
return false;
- if (const FunctionDecl *FD =
- dyn_cast<FunctionDecl>(ThisDeclInfo->CurrentDecl))
- return FD->isVariadic();
- if (const FunctionTemplateDecl *FTD =
- dyn_cast<FunctionTemplateDecl>(ThisDeclInfo->CurrentDecl))
- return FTD->getTemplatedDecl()->isVariadic();
- if (const ObjCMethodDecl *MD =
- dyn_cast<ObjCMethodDecl>(ThisDeclInfo->CurrentDecl))
- return MD->isVariadic();
- if (const TypedefNameDecl *TD =
- dyn_cast<TypedefNameDecl>(ThisDeclInfo->CurrentDecl)) {
- QualType Type = TD->getUnderlyingType();
- if (Type->isFunctionPointerType() || Type->isBlockPointerType())
- Type = Type->getPointeeType();
- if (const auto *FT = Type->getAs<FunctionProtoType>())
- return FT->isVariadic();
- }
- return false;
+ if (!ThisDeclInfo->IsFilled)
+ inspectThisDecl();
+ return ThisDeclInfo->IsVariadic;
}
bool Sema::isObjCMethodDecl() {
@@ -873,36 +837,6 @@ bool Sema::isFunctionPointerVarDecl() {
return false;
}
-bool Sema::isFunctionOrBlockPointerVarLikeDecl() {
- if (!ThisDeclInfo)
- return false;
- if (!ThisDeclInfo->IsFilled)
- inspectThisDecl();
- if (ThisDeclInfo->getKind() != DeclInfo::VariableKind ||
- !ThisDeclInfo->CurrentDecl)
- return false;
- QualType QT;
- if (const auto *VD = dyn_cast<DeclaratorDecl>(ThisDeclInfo->CurrentDecl))
- QT = VD->getType();
- else if (const auto *PD =
- dyn_cast<ObjCPropertyDecl>(ThisDeclInfo->CurrentDecl))
- QT = PD->getType();
- else
- return false;
- // We would like to warn about the 'returns'/'param' commands for
- // variables that don't directly specify the function type, so type aliases
- // can be ignored.
- if (QT->getAs<TypedefType>())
- return false;
- if (const auto *P = QT->getAs<PointerType>())
- if (P->getPointeeType()->getAs<TypedefType>())
- return false;
- if (const auto *P = QT->getAs<BlockPointerType>())
- if (P->getPointeeType()->getAs<TypedefType>())
- return false;
- return QT->isFunctionPointerType() || QT->isBlockPointerType();
-}
-
bool Sema::isObjCPropertyDecl() {
if (!ThisDeclInfo)
return false;
@@ -1173,16 +1107,15 @@ StringRef Sema::correctTypoInTParamReference(
return StringRef();
}
-InlineCommandComment::RenderKind
-Sema::getInlineCommandRenderKind(StringRef Name) const {
+InlineCommandRenderKind Sema::getInlineCommandRenderKind(StringRef Name) const {
assert(Traits.getCommandInfo(Name)->IsInlineCommand);
- return llvm::StringSwitch<InlineCommandComment::RenderKind>(Name)
- .Case("b", InlineCommandComment::RenderBold)
- .Cases("c", "p", InlineCommandComment::RenderMonospaced)
- .Cases("a", "e", "em", InlineCommandComment::RenderEmphasized)
- .Case("anchor", InlineCommandComment::RenderAnchor)
- .Default(InlineCommandComment::RenderNormal);
+ return llvm::StringSwitch<InlineCommandRenderKind>(Name)
+ .Case("b", InlineCommandRenderKind::Bold)
+ .Cases("c", "p", InlineCommandRenderKind::Monospaced)
+ .Cases("a", "e", "em", InlineCommandRenderKind::Emphasized)
+ .Case("anchor", InlineCommandRenderKind::Anchor)
+ .Default(InlineCommandRenderKind::Normal);
}
} // end namespace comments
diff --git a/contrib/llvm-project/clang/lib/AST/ComparisonCategories.cpp b/contrib/llvm-project/clang/lib/AST/ComparisonCategories.cpp
index 896050482644..58411201c3b0 100644
--- a/contrib/llvm-project/clang/lib/AST/ComparisonCategories.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ComparisonCategories.cpp
@@ -17,10 +17,11 @@
#include "clang/AST/DeclCXX.h"
#include "clang/AST/Type.h"
#include "llvm/ADT/SmallVector.h"
+#include <optional>
using namespace clang;
-Optional<ComparisonCategoryType>
+std::optional<ComparisonCategoryType>
clang::getComparisonCategoryForBuiltinCmp(QualType T) {
using CCT = ComparisonCategoryType;
@@ -37,7 +38,7 @@ clang::getComparisonCategoryForBuiltinCmp(QualType T) {
return CCT::StrongOrdering;
// TODO: Extend support for operator<=> to ObjC types.
- return llvm::None;
+ return std::nullopt;
}
bool ComparisonCategoryInfo::ValueInfo::hasValidIntValue() const {
@@ -57,7 +58,7 @@ bool ComparisonCategoryInfo::ValueInfo::hasValidIntValue() const {
/// Attempt to determine the integer value used to represent the comparison
/// category result by evaluating the initializer for the specified VarDecl as
-/// a constant expression and retreiving the value of the class's first
+/// a constant expression and retrieving the value of the class's first
/// (and only) field.
///
/// Note: The STL types are expected to have the form:
diff --git a/contrib/llvm-project/clang/lib/AST/ComputeDependence.cpp b/contrib/llvm-project/clang/lib/AST/ComputeDependence.cpp
index 5648cf2103d6..584b58473294 100644
--- a/contrib/llvm-project/clang/lib/AST/ComputeDependence.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ComputeDependence.cpp
@@ -26,7 +26,7 @@ ExprDependence clang::computeDependence(FullExpr *E) {
}
ExprDependence clang::computeDependence(OpaqueValueExpr *E) {
- auto D = toExprDependence(E->getType()->getDependence());
+ auto D = toExprDependenceForImpliedType(E->getType()->getDependence());
if (auto *S = E->getSourceExpr())
D |= S->getDependence();
assert(!(D & ExprDependence::UnexpandedPack));
@@ -39,8 +39,10 @@ ExprDependence clang::computeDependence(ParenExpr *E) {
ExprDependence clang::computeDependence(UnaryOperator *E,
const ASTContext &Ctx) {
- ExprDependence Dep = toExprDependence(E->getType()->getDependence()) |
- E->getSubExpr()->getDependence();
+ ExprDependence Dep =
+ // FIXME: Do we need to look at the type?
+ toExprDependenceForImpliedType(E->getType()->getDependence()) |
+ E->getSubExpr()->getDependence();
// C++ [temp.dep.constexpr]p5:
// An expression of the form & qualified-id where the qualified-id names a
@@ -77,7 +79,7 @@ ExprDependence clang::computeDependence(UnaryExprOrTypeTraitExpr *E) {
// Value-dependent if the argument is type-dependent.
if (E->isArgumentType())
return turnTypeToValueDependence(
- toExprDependence(E->getArgumentType()->getDependence()));
+ toExprDependenceAsWritten(E->getArgumentType()->getDependence()));
auto ArgDeps = E->getArgumentExpr()->getDependence();
auto Deps = ArgDeps & ~ExprDependence::TypeValue;
@@ -120,21 +122,36 @@ ExprDependence clang::computeDependence(MatrixSubscriptExpr *E) {
}
ExprDependence clang::computeDependence(CompoundLiteralExpr *E) {
- return toExprDependence(E->getTypeSourceInfo()->getType()->getDependence()) |
+ return toExprDependenceAsWritten(
+ E->getTypeSourceInfo()->getType()->getDependence()) |
+ toExprDependenceForImpliedType(E->getType()->getDependence()) |
turnTypeToValueDependence(E->getInitializer()->getDependence());
}
-ExprDependence clang::computeDependence(CastExpr *E) {
+ExprDependence clang::computeDependence(ImplicitCastExpr *E) {
+ // We model implicit conversions as combining the dependence of their
+ // subexpression, apart from its type, with the semantic portion of the
+ // target type.
+ ExprDependence D =
+ toExprDependenceForImpliedType(E->getType()->getDependence());
+ if (auto *S = E->getSubExpr())
+ D |= S->getDependence() & ~ExprDependence::Type;
+ return D;
+}
+
+ExprDependence clang::computeDependence(ExplicitCastExpr *E) {
// Cast expressions are type-dependent if the type is
// dependent (C++ [temp.dep.expr]p3).
// Cast expressions are value-dependent if the type is
// dependent or if the subexpression is value-dependent.
- auto D = toExprDependence(E->getType()->getDependence());
- if (E->getStmtClass() == Stmt::ImplicitCastExprClass) {
- // An implicit cast expression doesn't (lexically) contain an
- // unexpanded pack, even if its target type does.
- D &= ~ExprDependence::UnexpandedPack;
- }
+ //
+ // Note that we also need to consider the dependence of the actual type here,
+ // because when the type as written is a deduced type, that type is not
+ // dependent, but it may be deduced as a dependent type.
+ ExprDependence D =
+ toExprDependenceAsWritten(
+ cast<ExplicitCastExpr>(E)->getTypeAsWritten()->getDependence()) |
+ toExprDependenceForImpliedType(E->getType()->getDependence());
if (auto *S = E->getSubExpr())
D |= S->getDependence() & ~ExprDependence::Type;
return D;
@@ -158,7 +175,7 @@ ExprDependence clang::computeDependence(BinaryConditionalOperator *E) {
}
ExprDependence clang::computeDependence(StmtExpr *E, unsigned TemplateDepth) {
- auto D = toExprDependence(E->getType()->getDependence());
+ auto D = toExprDependenceForImpliedType(E->getType()->getDependence());
// Propagate dependence of the result.
if (const auto *CompoundExprResult =
dyn_cast_or_null<ValueStmt>(E->getSubStmt()->getStmtExprResult()))
@@ -174,7 +191,8 @@ ExprDependence clang::computeDependence(StmtExpr *E, unsigned TemplateDepth) {
}
ExprDependence clang::computeDependence(ConvertVectorExpr *E) {
- auto D = toExprDependence(E->getType()->getDependence()) |
+ auto D = toExprDependenceAsWritten(
+ E->getTypeSourceInfo()->getType()->getDependence()) |
E->getSrcExpr()->getDependence();
if (!E->getType()->isDependentType())
D &= ~ExprDependence::Type;
@@ -206,14 +224,14 @@ ExprDependence clang::computeDependence(ParenListExpr *P) {
}
ExprDependence clang::computeDependence(VAArgExpr *E) {
- auto D =
- toExprDependence(E->getWrittenTypeInfo()->getType()->getDependence()) |
- (E->getSubExpr()->getDependence() & ~ExprDependence::Type);
- return D & ~ExprDependence::Value;
+ auto D = toExprDependenceAsWritten(
+ E->getWrittenTypeInfo()->getType()->getDependence()) |
+ (E->getSubExpr()->getDependence() & ~ExprDependence::Type);
+ return D;
}
ExprDependence clang::computeDependence(NoInitExpr *E) {
- return toExprDependence(E->getType()->getDependence()) &
+ return toExprDependenceForImpliedType(E->getType()->getDependence()) &
(ExprDependence::Instantiation | ExprDependence::Error);
}
@@ -226,7 +244,7 @@ ExprDependence clang::computeDependence(ArrayInitLoopExpr *E) {
}
ExprDependence clang::computeDependence(ImplicitValueInitExpr *E) {
- return toExprDependence(E->getType()->getDependence()) &
+ return toExprDependenceForImpliedType(E->getType()->getDependence()) &
ExprDependence::Instantiation;
}
@@ -235,14 +253,16 @@ ExprDependence clang::computeDependence(ExtVectorElementExpr *E) {
}
ExprDependence clang::computeDependence(BlockExpr *E) {
- auto D = toExprDependence(E->getType()->getDependence());
+ auto D = toExprDependenceForImpliedType(E->getType()->getDependence());
if (E->getBlockDecl()->isDependentContext())
D |= ExprDependence::Instantiation;
- return D & ~ExprDependence::UnexpandedPack;
+ return D;
}
ExprDependence clang::computeDependence(AsTypeExpr *E) {
- auto D = toExprDependence(E->getType()->getDependence()) |
+ // FIXME: AsTypeExpr doesn't store the type as written. Assume the expression
+ // type has identical sugar for now, so is a type-as-written.
+ auto D = toExprDependenceAsWritten(E->getType()->getDependence()) |
E->getSrcExpr()->getDependence();
if (!E->getType()->isDependentType())
D &= ~ExprDependence::Type;
@@ -255,15 +275,14 @@ ExprDependence clang::computeDependence(CXXRewrittenBinaryOperator *E) {
ExprDependence clang::computeDependence(CXXStdInitializerListExpr *E) {
auto D = turnTypeToValueDependence(E->getSubExpr()->getDependence());
- D |= toExprDependence(E->getType()->getDependence()) &
- (ExprDependence::Type | ExprDependence::Error);
+ D |= toExprDependenceForImpliedType(E->getType()->getDependence());
return D;
}
ExprDependence clang::computeDependence(CXXTypeidExpr *E) {
auto D = ExprDependence::None;
if (E->isTypeOperand())
- D = toExprDependence(
+ D = toExprDependenceAsWritten(
E->getTypeOperandSourceInfo()->getType()->getDependence());
else
D = turnTypeToValueDependence(E->getExprOperand()->getDependence());
@@ -281,7 +300,7 @@ ExprDependence clang::computeDependence(MSPropertySubscriptExpr *E) {
ExprDependence clang::computeDependence(CXXUuidofExpr *E) {
if (E->isTypeOperand())
- return turnTypeToValueDependence(toExprDependence(
+ return turnTypeToValueDependence(toExprDependenceAsWritten(
E->getTypeOperandSourceInfo()->getType()->getDependence()));
return turnTypeToValueDependence(E->getExprOperand()->getDependence());
@@ -290,7 +309,7 @@ ExprDependence clang::computeDependence(CXXUuidofExpr *E) {
ExprDependence clang::computeDependence(CXXThisExpr *E) {
// 'this' is type-dependent if the class type of the enclosing
// member function is dependent (C++ [temp.dep.expr]p2)
- auto D = toExprDependence(E->getType()->getDependence());
+ auto D = toExprDependenceForImpliedType(E->getType()->getDependence());
assert(!(D & ExprDependence::UnexpandedPack));
return D;
}
@@ -307,8 +326,10 @@ ExprDependence clang::computeDependence(CXXBindTemporaryExpr *E) {
}
ExprDependence clang::computeDependence(CXXScalarValueInitExpr *E) {
- return toExprDependence(E->getType()->getDependence()) &
- ~ExprDependence::TypeValue;
+ auto D = toExprDependenceForImpliedType(E->getType()->getDependence());
+ if (auto *TSI = E->getTypeSourceInfo())
+ D |= toExprDependenceAsWritten(TSI->getType()->getDependence());
+ return D;
}
ExprDependence clang::computeDependence(CXXDeleteExpr *E) {
@@ -316,7 +337,7 @@ ExprDependence clang::computeDependence(CXXDeleteExpr *E) {
}
ExprDependence clang::computeDependence(ArrayTypeTraitExpr *E) {
- auto D = toExprDependence(E->getQueriedType()->getDependence());
+ auto D = toExprDependenceAsWritten(E->getQueriedType()->getDependence());
if (auto *Dim = E->getDimensionExpression())
D |= Dim->getDependence();
return turnTypeToValueDependence(D);
@@ -366,7 +387,7 @@ ExprDependence clang::computeDependence(ObjCBoxedExpr *E) {
}
ExprDependence clang::computeDependence(ObjCEncodeExpr *E) {
- return toExprDependence(E->getEncodedType()->getDependence());
+ return toExprDependenceAsWritten(E->getEncodedType()->getDependence());
}
ExprDependence clang::computeDependence(ObjCIvarRefExpr *E) {
@@ -377,7 +398,8 @@ ExprDependence clang::computeDependence(ObjCPropertyRefExpr *E) {
if (E->isObjectReceiver())
return E->getBase()->getDependence() & ~ExprDependence::Type;
if (E->isSuperReceiver())
- return toExprDependence(E->getSuperReceiverType()->getDependence()) &
+ return toExprDependenceForImpliedType(
+ E->getSuperReceiverType()->getDependence()) &
~ExprDependence::TypeValue;
assert(E->isClassReceiver());
return ExprDependence::None;
@@ -406,19 +428,22 @@ ExprDependence clang::computeDependence(OMPArraySectionExpr *E) {
}
ExprDependence clang::computeDependence(OMPArrayShapingExpr *E) {
- auto D = E->getBase()->getDependence() |
- toExprDependence(E->getType()->getDependence());
+ auto D = E->getBase()->getDependence();
for (Expr *Dim: E->getDimensions())
if (Dim)
- D |= Dim->getDependence();
+ D |= turnValueToTypeDependence(Dim->getDependence());
return D;
}
ExprDependence clang::computeDependence(OMPIteratorExpr *E) {
- auto D = toExprDependence(E->getType()->getDependence());
+ auto D = toExprDependenceForImpliedType(E->getType()->getDependence());
for (unsigned I = 0, End = E->numOfIterators(); I < End; ++I) {
- if (auto *VD = cast_or_null<ValueDecl>(E->getIteratorDecl(I)))
- D |= toExprDependence(VD->getType()->getDependence());
+ if (auto *DD = cast_or_null<DeclaratorDecl>(E->getIteratorDecl(I))) {
+ // If the type is omitted, it's 'int', and is not dependent in any way.
+ if (auto *TSI = DD->getTypeSourceInfo()) {
+ D |= toExprDependenceAsWritten(TSI->getType()->getDependence());
+ }
+ }
OMPIteratorExpr::IteratorRange IR = E->getIteratorRange(I);
if (Expr *BE = IR.Begin)
D |= BE->getDependence();
@@ -451,22 +476,32 @@ ExprDependence clang::computeDependence(DeclRefExpr *E, const ASTContext &Ctx) {
if (Decl->isParameterPack())
Deps |= ExprDependence::UnexpandedPack;
- Deps |= toExprDependence(Type->getDependence()) & ExprDependence::Error;
+ Deps |= toExprDependenceForImpliedType(Type->getDependence()) &
+ ExprDependence::Error;
// C++ [temp.dep.expr]p3:
// An id-expression is type-dependent if it contains:
// - an identifier associated by name lookup with one or more declarations
// declared with a dependent type
+ // - an identifier associated by name lookup with an entity captured by
+ // copy ([expr.prim.lambda.capture])
+ // in a lambda-expression that has an explicit object parameter whose
+ // type is dependent ([dcl.fct]),
//
// [The "or more" case is not modeled as a DeclRefExpr. There are a bunch
// more bullets here that we handle by treating the declaration as having a
// dependent type if they involve a placeholder type that can't be deduced.]
if (Type->isDependentType())
- return Deps | ExprDependence::TypeValueInstantiation;
+ Deps |= ExprDependence::TypeValueInstantiation;
else if (Type->isInstantiationDependentType())
Deps |= ExprDependence::Instantiation;
+ // - an identifier associated by name lookup with an entity captured by
+ // copy ([expr.prim.lambda.capture])
+ if (E->isCapturedByCopyInLambdaWithExplicitObjectParameter())
+ Deps |= ExprDependence::Type;
+
// - a conversion-function-id that specifies a dependent type
if (Decl->getDeclName().getNameKind() ==
DeclarationName::CXXConversionFunctionName) {
@@ -499,13 +534,13 @@ ExprDependence clang::computeDependence(DeclRefExpr *E, const ASTContext &Ctx) {
// - it names a potentially-constant variable that is initialized with an
// expression that is value-dependent
if (const auto *Var = dyn_cast<VarDecl>(Decl)) {
- if (Var->mightBeUsableInConstantExpressions(Ctx)) {
- if (const Expr *Init = Var->getAnyInitializer()) {
- if (Init->isValueDependent())
- Deps |= ExprDependence::ValueInstantiation;
- if (Init->containsErrors())
- Deps |= ExprDependence::Error;
- }
+ if (const Expr *Init = Var->getAnyInitializer()) {
+ if (Init->containsErrors())
+ Deps |= ExprDependence::Error;
+
+ if (Var->mightBeUsableInConstantExpressions(Ctx) &&
+ Init->isValueDependent())
+ Deps |= ExprDependence::ValueInstantiation;
}
// - it names a static data member that is a dependent member of the
@@ -547,7 +582,7 @@ ExprDependence clang::computeDependence(RecoveryExpr *E) {
// - type-dependent if we don't know the type (fallback to an opaque
// dependent type), or the type is known and dependent, or it has
// type-dependent subexpressions.
- auto D = toExprDependence(E->getType()->getDependence()) |
+ auto D = toExprDependenceAsWritten(E->getType()->getDependence()) |
ExprDependence::ErrorDependent;
// FIXME: remove the type-dependent bit from subexpressions, if the
// RecoveryExpr has a non-dependent type.
@@ -557,18 +592,20 @@ ExprDependence clang::computeDependence(RecoveryExpr *E) {
}
ExprDependence clang::computeDependence(SYCLUniqueStableNameExpr *E) {
- return toExprDependence(E->getTypeSourceInfo()->getType()->getDependence());
+ return toExprDependenceAsWritten(
+ E->getTypeSourceInfo()->getType()->getDependence());
}
ExprDependence clang::computeDependence(PredefinedExpr *E) {
- return toExprDependence(E->getType()->getDependence()) &
- ~ExprDependence::UnexpandedPack;
+ return toExprDependenceForImpliedType(E->getType()->getDependence());
}
ExprDependence clang::computeDependence(CallExpr *E,
llvm::ArrayRef<Expr *> PreArgs) {
auto D = E->getCallee()->getDependence();
- for (auto *A : llvm::makeArrayRef(E->getArgs(), E->getNumArgs())) {
+ if (E->getType()->isDependentType())
+ D |= ExprDependence::Type;
+ for (auto *A : llvm::ArrayRef(E->getArgs(), E->getNumArgs())) {
if (A)
D |= A->getDependence();
}
@@ -578,16 +615,31 @@ ExprDependence clang::computeDependence(CallExpr *E,
}
ExprDependence clang::computeDependence(OffsetOfExpr *E) {
- auto D = turnTypeToValueDependence(
- toExprDependence(E->getTypeSourceInfo()->getType()->getDependence()));
+ auto D = turnTypeToValueDependence(toExprDependenceAsWritten(
+ E->getTypeSourceInfo()->getType()->getDependence()));
for (unsigned I = 0, N = E->getNumExpressions(); I < N; ++I)
D |= turnTypeToValueDependence(E->getIndexExpr(I)->getDependence());
return D;
}
+static inline ExprDependence getDependenceInExpr(DeclarationNameInfo Name) {
+ auto D = ExprDependence::None;
+ if (Name.isInstantiationDependent())
+ D |= ExprDependence::Instantiation;
+ if (Name.containsUnexpandedParameterPack())
+ D |= ExprDependence::UnexpandedPack;
+ return D;
+}
+
ExprDependence clang::computeDependence(MemberExpr *E) {
- auto *MemberDecl = E->getMemberDecl();
auto D = E->getBase()->getDependence();
+ D |= getDependenceInExpr(E->getMemberNameInfo());
+
+ if (auto *NNS = E->getQualifier())
+ D |= toExprDependence(NNS->getDependence() &
+ ~NestedNameSpecifierDependence::Dependent);
+
+ auto *MemberDecl = E->getMemberDecl();
if (FieldDecl *FD = dyn_cast<FieldDecl>(MemberDecl)) {
DeclContext *DC = MemberDecl->getDeclContext();
// dyn_cast_or_null is used to handle objC variables which do not
@@ -615,8 +667,8 @@ ExprDependence clang::computeDependence(InitListExpr *E) {
}
ExprDependence clang::computeDependence(ShuffleVectorExpr *E) {
- auto D = toExprDependence(E->getType()->getDependence());
- for (auto *C : llvm::makeArrayRef(E->getSubExprs(), E->getNumSubExprs()))
+ auto D = toExprDependenceForImpliedType(E->getType()->getDependence());
+ for (auto *C : llvm::ArrayRef(E->getSubExprs(), E->getNumSubExprs()))
D |= C->getDependence();
return D;
}
@@ -627,7 +679,12 @@ ExprDependence clang::computeDependence(GenericSelectionExpr *E,
: ExprDependence::None;
for (auto *AE : E->getAssocExprs())
D |= AE->getDependence() & ExprDependence::Error;
- D |= E->getControllingExpr()->getDependence() & ExprDependence::Error;
+
+ if (E->isExprPredicate())
+ D |= E->getControllingExpr()->getDependence() & ExprDependence::Error;
+ else
+ D |= toExprDependenceAsWritten(
+ E->getControllingType()->getType()->getDependence());
if (E->isResultDependent())
return D | ExprDependence::TypeValueInstantiation;
@@ -637,7 +694,7 @@ ExprDependence clang::computeDependence(GenericSelectionExpr *E,
ExprDependence clang::computeDependence(DesignatedInitExpr *E) {
auto Deps = E->getInit()->getDependence();
- for (auto D : E->designators()) {
+ for (const auto &D : E->designators()) {
auto DesignatorDeps = ExprDependence::None;
if (D.isArrayDesignator())
DesignatorDeps |= E->getArrayIndex(D)->getDependence();
@@ -660,15 +717,17 @@ ExprDependence clang::computeDependence(PseudoObjectExpr *O) {
ExprDependence clang::computeDependence(AtomicExpr *A) {
auto D = ExprDependence::None;
- for (auto *E : llvm::makeArrayRef(A->getSubExprs(), A->getNumSubExprs()))
+ for (auto *E : llvm::ArrayRef(A->getSubExprs(), A->getNumSubExprs()))
D |= E->getDependence();
return D;
}
ExprDependence clang::computeDependence(CXXNewExpr *E) {
- auto D = toExprDependence(E->getType()->getDependence());
+ auto D = toExprDependenceAsWritten(
+ E->getAllocatedTypeSourceInfo()->getType()->getDependence());
+ D |= toExprDependenceForImpliedType(E->getAllocatedType()->getDependence());
auto Size = E->getArraySize();
- if (Size.hasValue() && *Size)
+ if (Size && *Size)
D |= turnTypeToValueDependence((*Size)->getDependence());
if (auto *I = E->getInitializer())
D |= turnTypeToValueDependence(I->getDependence());
@@ -679,26 +738,17 @@ ExprDependence clang::computeDependence(CXXNewExpr *E) {
ExprDependence clang::computeDependence(CXXPseudoDestructorExpr *E) {
auto D = E->getBase()->getDependence();
- if (!E->getDestroyedType().isNull())
- D |= toExprDependence(E->getDestroyedType()->getDependence());
+ if (auto *TSI = E->getDestroyedTypeInfo())
+ D |= toExprDependenceAsWritten(TSI->getType()->getDependence());
if (auto *ST = E->getScopeTypeInfo())
D |= turnTypeToValueDependence(
- toExprDependence(ST->getType()->getDependence()));
+ toExprDependenceAsWritten(ST->getType()->getDependence()));
if (auto *Q = E->getQualifier())
D |= toExprDependence(Q->getDependence() &
~NestedNameSpecifierDependence::Dependent);
return D;
}
-static inline ExprDependence getDependenceInExpr(DeclarationNameInfo Name) {
- auto D = ExprDependence::None;
- if (Name.isInstantiationDependent())
- D |= ExprDependence::Instantiation;
- if (Name.containsUnexpandedParameterPack())
- D |= ExprDependence::UnexpandedPack;
- return D;
-}
-
ExprDependence
clang::computeDependence(OverloadExpr *E, bool KnownDependent,
bool KnownInstantiationDependent,
@@ -722,7 +772,7 @@ clang::computeDependence(OverloadExpr *E, bool KnownDependent,
// If we have explicit template arguments, check for dependent
// template arguments and whether they contain any unexpanded pack
// expansions.
- for (auto A : E->template_arguments())
+ for (const auto &A : E->template_arguments())
Deps |= toExprDependence(A.getArgument().getDependence());
return Deps;
}
@@ -732,18 +782,26 @@ ExprDependence clang::computeDependence(DependentScopeDeclRefExpr *E) {
D |= getDependenceInExpr(E->getNameInfo());
if (auto *Q = E->getQualifier())
D |= toExprDependence(Q->getDependence());
- for (auto A : E->template_arguments())
+ for (const auto &A : E->template_arguments())
D |= toExprDependence(A.getArgument().getDependence());
return D;
}
ExprDependence clang::computeDependence(CXXConstructExpr *E) {
- auto D = toExprDependence(E->getType()->getDependence());
+ ExprDependence D =
+ toExprDependenceForImpliedType(E->getType()->getDependence());
for (auto *A : E->arguments())
D |= A->getDependence() & ~ExprDependence::Type;
return D;
}
+ExprDependence clang::computeDependence(CXXTemporaryObjectExpr *E) {
+ CXXConstructExpr *BaseE = E;
+ return toExprDependenceAsWritten(
+ E->getTypeSourceInfo()->getType()->getDependence()) |
+ computeDependence(BaseE);
+}
+
ExprDependence clang::computeDependence(CXXDefaultInitExpr *E) {
return E->getExpr()->getDependence();
}
@@ -754,7 +812,7 @@ ExprDependence clang::computeDependence(CXXDefaultArgExpr *E) {
ExprDependence clang::computeDependence(LambdaExpr *E,
bool ContainsUnexpandedParameterPack) {
- auto D = toExprDependence(E->getType()->getDependence());
+ auto D = toExprDependenceForImpliedType(E->getType()->getDependence());
if (ContainsUnexpandedParameterPack)
D |= ExprDependence::UnexpandedPack;
return D;
@@ -762,7 +820,8 @@ ExprDependence clang::computeDependence(LambdaExpr *E,
ExprDependence clang::computeDependence(CXXUnresolvedConstructExpr *E) {
auto D = ExprDependence::ValueInstantiation;
- D |= toExprDependence(E->getType()->getDependence());
+ D |= toExprDependenceAsWritten(E->getTypeAsWritten()->getDependence());
+ D |= toExprDependenceForImpliedType(E->getType()->getDependence());
for (auto *A : E->arguments())
D |= A->getDependence() &
(ExprDependence::UnexpandedPack | ExprDependence::Error);
@@ -776,7 +835,7 @@ ExprDependence clang::computeDependence(CXXDependentScopeMemberExpr *E) {
if (auto *Q = E->getQualifier())
D |= toExprDependence(Q->getDependence());
D |= getDependenceInExpr(E->getMemberNameInfo());
- for (auto A : E->template_arguments())
+ for (const auto &A : E->template_arguments())
D |= toExprDependence(A.getArgument().getDependence());
return D;
}
@@ -794,11 +853,18 @@ ExprDependence clang::computeDependence(CXXFoldExpr *E) {
return D;
}
+ExprDependence clang::computeDependence(CXXParenListInitExpr *E) {
+ auto D = ExprDependence::None;
+ for (const auto *A : E->getInitExprs())
+ D |= A->getDependence();
+ return D;
+}
+
ExprDependence clang::computeDependence(TypeTraitExpr *E) {
auto D = ExprDependence::None;
for (const auto *A : E->getArgs())
- D |=
- toExprDependence(A->getType()->getDependence()) & ~ExprDependence::Type;
+ D |= toExprDependenceAsWritten(A->getType()->getDependence()) &
+ ~ExprDependence::Type;
return D;
}
@@ -816,7 +882,10 @@ ExprDependence clang::computeDependence(ConceptSpecializationExpr *E,
ExprDependence D =
ValueDependent ? ExprDependence::Value : ExprDependence::None;
- return D | toExprDependence(TA);
+ auto Res = D | toExprDependence(TA);
+ if(!ValueDependent && E->getSatisfaction().ContainsErrors)
+ Res |= ExprDependence::Error;
+ return Res;
}
ExprDependence clang::computeDependence(ObjCArrayLiteral *E) {
@@ -845,7 +914,7 @@ ExprDependence clang::computeDependence(ObjCMessageExpr *E) {
if (auto *R = E->getInstanceReceiver())
D |= R->getDependence();
else
- D |= toExprDependence(E->getType()->getDependence());
+ D |= toExprDependenceForImpliedType(E->getType()->getDependence());
for (auto *A : E->arguments())
D |= A->getDependence();
return D;
diff --git a/contrib/llvm-project/clang/lib/AST/Decl.cpp b/contrib/llvm-project/clang/lib/AST/Decl.cpp
index 959a7c415c58..1ee33fd7576d 100644
--- a/contrib/llvm-project/clang/lib/AST/Decl.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Decl.cpp
@@ -30,6 +30,8 @@
#include "clang/AST/ODRHash.h"
#include "clang/AST/PrettyDeclStackTrace.h"
#include "clang/AST/PrettyPrinter.h"
+#include "clang/AST/Randstruct.h"
+#include "clang/AST/RecordLayout.h"
#include "clang/AST/Redeclarable.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/TemplateBase.h"
@@ -52,21 +54,20 @@
#include "clang/Basic/Visibility.h"
#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/None.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/Triple.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstring>
#include <memory>
+#include <optional>
#include <string>
#include <tuple>
#include <type_traits>
@@ -86,7 +87,7 @@ void PrettyDeclStackTraceEntry::print(raw_ostream &OS) const {
}
OS << Message;
- if (auto *ND = dyn_cast_or_null<NamedDecl>(TheDecl)) {
+ if (auto *ND = dyn_cast_if_present<NamedDecl>(TheDecl)) {
OS << " '";
ND->getNameForDiagnostic(OS, Context.getPrintingPolicy(), true);
OS << "'";
@@ -168,8 +169,8 @@ withExplicitVisibilityAlready(LVComputationKind Kind) {
return Kind;
}
-static Optional<Visibility> getExplicitVisibility(const NamedDecl *D,
- LVComputationKind kind) {
+static std::optional<Visibility> getExplicitVisibility(const NamedDecl *D,
+ LVComputationKind kind) {
assert(!kind.IgnoreExplicitVisibility &&
"asking for explicit visibility when we shouldn't be");
return D->getExplicitVisibility(kind.getExplicitVisibilityKind());
@@ -185,8 +186,8 @@ static bool usesTypeVisibility(const NamedDecl *D) {
/// Does the given declaration have member specialization information,
/// and if so, is it an explicit specialization?
-template <class T> static typename
-std::enable_if<!std::is_base_of<RedeclarableTemplateDecl, T>::value, bool>::type
+template <class T>
+static std::enable_if_t<!std::is_base_of_v<RedeclarableTemplateDecl, T>, bool>
isExplicitMemberSpecialization(const T *D) {
if (const MemberSpecializationInfo *member =
D->getMemberSpecializationInfo()) {
@@ -218,8 +219,8 @@ static Visibility getVisibilityFromAttr(const T *attr) {
}
/// Return the explicit visibility of the given declaration.
-static Optional<Visibility> getVisibilityOf(const NamedDecl *D,
- NamedDecl::ExplicitVisibilityKind kind) {
+static std::optional<Visibility>
+getVisibilityOf(const NamedDecl *D, NamedDecl::ExplicitVisibilityKind kind) {
// If we're ultimately computing the visibility of a type, look for
// a 'type_visibility' attribute before looking for 'visibility'.
if (kind == NamedDecl::VisibilityForType) {
@@ -233,7 +234,7 @@ static Optional<Visibility> getVisibilityOf(const NamedDecl *D,
return getVisibilityFromAttr(A);
}
- return None;
+ return std::nullopt;
}
LinkageInfo LinkageComputer::getLVForType(const Type &T,
@@ -342,6 +343,10 @@ LinkageComputer::getLVForTemplateArgumentList(ArrayRef<TemplateArgument> Args,
LV.merge(getTypeLinkageAndVisibility(Arg.getNullPtrType()));
continue;
+ case TemplateArgument::StructuralValue:
+ LV.merge(getLVForValue(Arg.getAsStructuralValue(), computation));
+ continue;
+
case TemplateArgument::Template:
case TemplateArgument::TemplateExpansion:
if (TemplateDecl *Template =
@@ -391,11 +396,17 @@ void LinkageComputer::mergeTemplateLV(
bool considerVisibility =
shouldConsiderTemplateVisibility(fn, specInfo);
- // Merge information from the template parameters.
FunctionTemplateDecl *temp = specInfo->getTemplate();
- LinkageInfo tempLV =
- getLVForTemplateParameterList(temp->getTemplateParameters(), computation);
- LV.mergeMaybeWithVisibility(tempLV, considerVisibility);
+ // Merge information from the template declaration.
+ LinkageInfo tempLV = getLVForDecl(temp, computation);
+ // The linkage of the specialization should be consistent with the
+ // template declaration.
+ LV.setLinkage(tempLV.getLinkage());
+
+ // Merge information from the template parameters.
+ LinkageInfo paramsLV =
+ getLVForTemplateParameterList(temp->getTemplateParameters(), computation);
+ LV.mergeMaybeWithVisibility(paramsLV, considerVisibility);
// Merge information from the template arguments.
const TemplateArgumentList &templateArgs = *specInfo->TemplateArguments;
@@ -459,11 +470,16 @@ void LinkageComputer::mergeTemplateLV(
// Merge information from the template parameters, but ignore
// visibility if we're only considering template arguments.
-
ClassTemplateDecl *temp = spec->getSpecializedTemplate();
- LinkageInfo tempLV =
+ // Merge information from the template declaration.
+ LinkageInfo tempLV = getLVForDecl(temp, computation);
+ // The linkage of the specialization should be consistent with the
+ // template declaration.
+ LV.setLinkage(tempLV.getLinkage());
+
+ LinkageInfo paramsLV =
getLVForTemplateParameterList(temp->getTemplateParameters(), computation);
- LV.mergeMaybeWithVisibility(tempLV,
+ LV.mergeMaybeWithVisibility(paramsLV,
considerVisibility && !hasExplicitVisibilityAlready(computation));
// Merge information from the template arguments. We ignore
@@ -511,7 +527,6 @@ void LinkageComputer::mergeTemplateLV(LinkageInfo &LV,
// Merge information from the template parameters, but ignore
// visibility if we're only considering template arguments.
-
VarTemplateDecl *temp = spec->getSpecializedTemplate();
LinkageInfo tempLV =
getLVForTemplateParameterList(temp->getTemplateParameters(), computation);
@@ -568,46 +583,13 @@ static bool isSingleLineLanguageLinkage(const Decl &D) {
return false;
}
-/// Determine whether D is declared in the purview of a named module.
-static bool isInModulePurview(const NamedDecl *D) {
+static bool isDeclaredInModuleInterfaceOrPartition(const NamedDecl *D) {
if (auto *M = D->getOwningModule())
- return M->isModulePurview();
+ return M->isInterfaceOrPartition();
return false;
}
-static bool isExportedFromModuleInterfaceUnit(const NamedDecl *D) {
- // FIXME: Handle isModulePrivate.
- switch (D->getModuleOwnershipKind()) {
- case Decl::ModuleOwnershipKind::Unowned:
- case Decl::ModuleOwnershipKind::ModulePrivate:
- return false;
- case Decl::ModuleOwnershipKind::Visible:
- case Decl::ModuleOwnershipKind::VisibleWhenImported:
- return isInModulePurview(D);
- }
- llvm_unreachable("unexpected module ownership kind");
-}
-
-static LinkageInfo getInternalLinkageFor(const NamedDecl *D) {
- // Internal linkage declarations within a module interface unit are modeled
- // as "module-internal linkage", which means that they have internal linkage
- // formally but can be indirectly accessed from outside the module via inline
- // functions and templates defined within the module.
- if (isInModulePurview(D))
- return LinkageInfo(ModuleInternalLinkage, DefaultVisibility, false);
-
- return LinkageInfo::internal();
-}
-
static LinkageInfo getExternalLinkageFor(const NamedDecl *D) {
- // C++ Modules TS [basic.link]/6.8:
- // - A name declared at namespace scope that does not have internal linkage
- // by the previous rules and that is introduced by a non-exported
- // declaration has module linkage.
- if (isInModulePurview(D) && !isExportedFromModuleInterfaceUnit(
- cast<NamedDecl>(D->getCanonicalDecl())))
- return LinkageInfo(ModuleLinkage, DefaultVisibility, false);
-
return LinkageInfo::external();
}
@@ -639,21 +621,21 @@ LinkageComputer::getLVForNamespaceScopeDecl(const NamedDecl *D,
// - a variable, variable template, function, or function template
// that is explicitly declared static; or
// (This bullet corresponds to C99 6.2.2p3.)
- return getInternalLinkageFor(D);
+ return LinkageInfo::internal();
}
if (const auto *Var = dyn_cast<VarDecl>(D)) {
// - a non-template variable of non-volatile const-qualified type, unless
// - it is explicitly declared extern, or
- // - it is inline or exported, or
+ // - it is declared in the purview of a module interface unit
+ // (outside the private-module-fragment, if any) or module partition, or
+ // - it is inline, or
// - it was previously declared and the prior declaration did not have
// internal linkage
// (There is no equivalent in C99.)
- if (Context.getLangOpts().CPlusPlus &&
- Var->getType().isConstQualified() &&
- !Var->getType().isVolatileQualified() &&
- !Var->isInline() &&
- !isExportedFromModuleInterfaceUnit(Var) &&
+ if (Context.getLangOpts().CPlusPlus && Var->getType().isConstQualified() &&
+ !Var->getType().isVolatileQualified() && !Var->isInline() &&
+ !isDeclaredInModuleInterfaceOrPartition(Var) &&
!isa<VarTemplateSpecializationDecl>(Var) &&
!Var->getDescribedVarTemplate()) {
const VarDecl *PrevVar = Var->getPreviousDecl();
@@ -663,7 +645,7 @@ LinkageComputer::getLVForNamespaceScopeDecl(const NamedDecl *D,
if (Var->getStorageClass() != SC_Extern &&
Var->getStorageClass() != SC_PrivateExtern &&
!isSingleLineLanguageLinkage(*Var))
- return getInternalLinkageFor(Var);
+ return LinkageInfo::internal();
}
for (const VarDecl *PrevVar = Var->getPreviousDecl(); PrevVar;
@@ -673,7 +655,7 @@ LinkageComputer::getLVForNamespaceScopeDecl(const NamedDecl *D,
return getDeclLinkageAndVisibility(PrevVar);
// Explicitly declared static.
if (PrevVar->getStorageClass() == SC_Static)
- return getInternalLinkageFor(Var);
+ return LinkageInfo::internal();
}
} else if (const auto *IFD = dyn_cast<IndirectFieldDecl>(D)) {
// - a data member of an anonymous union.
@@ -697,7 +679,7 @@ LinkageComputer::getLVForNamespaceScopeDecl(const NamedDecl *D,
// within an unnamed namespace has internal linkage.
if ((!Var || !isFirstInExternCContext(Var)) &&
(!Func || !isFirstInExternCContext(Func)))
- return getInternalLinkageFor(D);
+ return LinkageInfo::internal();
}
// Set up the defaults.
@@ -709,7 +691,7 @@ LinkageComputer::getLVForNamespaceScopeDecl(const NamedDecl *D,
LinkageInfo LV = getExternalLinkageFor(D);
if (!hasExplicitVisibilityAlready(computation)) {
- if (Optional<Visibility> Vis = getExplicitVisibility(D, computation)) {
+ if (std::optional<Visibility> Vis = getExplicitVisibility(D, computation)) {
LV.mergeVisibility(*Vis, true);
} else {
// If we're declared in a namespace with a visibility attribute,
@@ -719,7 +701,8 @@ LinkageComputer::getLVForNamespaceScopeDecl(const NamedDecl *D,
DC = DC->getParent()) {
const auto *ND = dyn_cast<NamespaceDecl>(DC);
if (!ND) continue;
- if (Optional<Visibility> Vis = getExplicitVisibility(ND, computation)) {
+ if (std::optional<Visibility> Vis =
+ getExplicitVisibility(ND, computation)) {
LV.mergeVisibility(*Vis, true);
break;
}
@@ -780,6 +763,7 @@ LinkageComputer::getLVForNamespaceScopeDecl(const NamedDecl *D,
//
// Note that we don't want to make the variable non-external
// because of this, but unique-external linkage suits us.
+
if (Context.getLangOpts().CPlusPlus && !isFirstInExternCContext(Var) &&
!IgnoreVarTypeLinkage) {
LinkageInfo TypeLV = getLVForType(*Var->getType(), computation);
@@ -813,6 +797,16 @@ LinkageComputer::getLVForNamespaceScopeDecl(const NamedDecl *D,
if (Function->getStorageClass() == SC_PrivateExtern)
LV.mergeVisibility(HiddenVisibility, true);
+ // OpenMP target declare device functions are not callable from the host so
+ // they should not be exported from the device image. This applies to all
+ // functions as the host-callable kernel functions are emitted at codegen.
+ if (Context.getLangOpts().OpenMP &&
+ Context.getLangOpts().OpenMPIsTargetDevice &&
+ ((Context.getTargetInfo().getTriple().isAMDGPU() ||
+ Context.getTargetInfo().getTriple().isNVPTX()) ||
+ OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(Function)))
+ LV.mergeVisibility(HiddenVisibility, /*newExplicit=*/false);
+
// Note that Sema::MergeCompatibleFunctionDecls already takes care of
// merging storage classes and visibility attributes, so we don't have to
// look at previous decls in here.
@@ -906,10 +900,6 @@ LinkageComputer::getLVForNamespaceScopeDecl(const NamedDecl *D,
if (!isExternallyVisible(LV.getLinkage()))
return LinkageInfo(LV.getLinkage(), DefaultVisibility, false);
- // Mark the symbols as hidden when compiling for the device.
- if (Context.getLangOpts().OpenMP && Context.getLangOpts().OpenMPIsDevice)
- LV.mergeVisibility(HiddenVisibility, /*newExplicit=*/false);
-
return LV;
}
@@ -939,7 +929,7 @@ LinkageComputer::getLVForClassMember(const NamedDecl *D,
// If we have an explicit visibility attribute, merge that in.
if (!hasExplicitVisibilityAlready(computation)) {
- if (Optional<Visibility> Vis = getExplicitVisibility(D, computation))
+ if (std::optional<Visibility> Vis = getExplicitVisibility(D, computation))
LV.mergeVisibility(*Vis, true);
// If we're paying attention to global visibility, apply
// -finline-visibility-hidden if this is an inline method.
@@ -995,6 +985,17 @@ LinkageComputer::getLVForClassMember(const NamedDecl *D,
explicitSpecSuppressor = MD;
}
+ // OpenMP target declare device functions are not callable from the host so
+ // they should not be exported from the device image. This applies to all
+ // functions as the host-callable kernel functions are emitted at codegen.
+ ASTContext &Context = D->getASTContext();
+ if (Context.getLangOpts().OpenMP &&
+ Context.getLangOpts().OpenMPIsTargetDevice &&
+ ((Context.getTargetInfo().getTriple().isAMDGPU() ||
+ Context.getTargetInfo().getTriple().isNVPTX()) ||
+ OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(MD)))
+ LV.mergeVisibility(HiddenVisibility, /*newExplicit=*/false);
+
} else if (const auto *RD = dyn_cast<CXXRecordDecl>(D)) {
if (const auto *spec = dyn_cast<ClassTemplateSpecializationDecl>(RD)) {
mergeTemplateLV(LV, spec, computation);
@@ -1078,6 +1079,42 @@ bool NamedDecl::isLinkageValid() const {
return L == getCachedLinkage();
}
+bool NamedDecl::isPlaceholderVar(const LangOptions &LangOpts) const {
+ // [C++2c] [basic.scope.scope]/p5
+ // A declaration is name-independent if its name is _ and it declares
+ // - a variable with automatic storage duration,
+ // - a structured binding not inhabiting a namespace scope,
+ // - the variable introduced by an init-capture
+ // - or a non-static data member.
+
+ if (!LangOpts.CPlusPlus || !getIdentifier() ||
+ !getIdentifier()->isPlaceholder())
+ return false;
+ if (isa<FieldDecl>(this))
+ return true;
+ if (const auto *IFD = dyn_cast<IndirectFieldDecl>(this)) {
+ if (!getDeclContext()->isFunctionOrMethod() &&
+ !getDeclContext()->isRecord())
+ return false;
+ const VarDecl *VD = IFD->getVarDecl();
+ return !VD || VD->getStorageDuration() == SD_Automatic;
+ }
+ // and it declares a variable with automatic storage duration
+ if (const auto *VD = dyn_cast<VarDecl>(this)) {
+ if (isa<ParmVarDecl>(VD))
+ return false;
+ if (VD->isInitCapture())
+ return true;
+ return VD->getStorageDuration() == StorageDuration::SD_Automatic;
+ }
+ if (const auto *BD = dyn_cast<BindingDecl>(this);
+ BD && getDeclContext()->isFunctionOrMethod()) {
+ const VarDecl *VD = BD->getHoldingVar();
+ return !VD || VD->getStorageDuration() == StorageDuration::SD_Automatic;
+ }
+ return false;
+}
+
ReservedIdentifierStatus
NamedDecl::isReserved(const LangOptions &LangOpts) const {
const IdentifierInfo *II = getIdentifier();
@@ -1088,13 +1125,29 @@ NamedDecl::isReserved(const LangOptions &LangOpts) const {
return ReservedIdentifierStatus::NotReserved;
ReservedIdentifierStatus Status = II->isReserved(LangOpts);
- if (Status == ReservedIdentifierStatus::StartsWithUnderscoreAtGlobalScope) {
- // Check if we're at TU level or not.
+ if (isReservedAtGlobalScope(Status) && !isReservedInAllContexts(Status)) {
+ // This name is only reserved at global scope. Check if this declaration
+ // conflicts with a global scope declaration.
if (isa<ParmVarDecl>(this) || isTemplateParameter())
return ReservedIdentifierStatus::NotReserved;
+
+ // C++ [dcl.link]/7:
+ // Two declarations [conflict] if [...] one declares a function or
+ // variable with C language linkage, and the other declares [...] a
+ // variable that belongs to the global scope.
+ //
+ // Therefore names that are reserved at global scope are also reserved as
+ // names of variables and functions with C language linkage.
const DeclContext *DC = getDeclContext()->getRedeclContext();
- if (!DC->isTranslationUnit())
- return ReservedIdentifierStatus::NotReserved;
+ if (DC->isTranslationUnit())
+ return Status;
+ if (auto *VD = dyn_cast<VarDecl>(this))
+ if (VD->isExternC())
+ return ReservedIdentifierStatus::StartsWithUnderscoreAndIsExternC;
+ if (auto *FD = dyn_cast<FunctionDecl>(this))
+ if (FD->isExternC())
+ return ReservedIdentifierStatus::StartsWithUnderscoreAndIsExternC;
+ return ReservedIdentifierStatus::NotReserved;
}
return Status;
@@ -1121,18 +1174,61 @@ Linkage NamedDecl::getLinkageInternal() const {
.getLinkage();
}
+/// Determine whether D is attached to a named module.
+static bool isInNamedModule(const NamedDecl *D) {
+ if (auto *M = D->getOwningModule())
+ return M->isNamedModule();
+ return false;
+}
+
+static bool isExportedFromModuleInterfaceUnit(const NamedDecl *D) {
+ // FIXME: Handle isModulePrivate.
+ switch (D->getModuleOwnershipKind()) {
+ case Decl::ModuleOwnershipKind::Unowned:
+ case Decl::ModuleOwnershipKind::ReachableWhenImported:
+ case Decl::ModuleOwnershipKind::ModulePrivate:
+ return false;
+ case Decl::ModuleOwnershipKind::Visible:
+ case Decl::ModuleOwnershipKind::VisibleWhenImported:
+ return isInNamedModule(D);
+ }
+ llvm_unreachable("unexpected module ownership kind");
+}
+
+/// Get the linkage from a semantic point of view. Entities in
+/// anonymous namespaces are external (in c++98).
+Linkage NamedDecl::getFormalLinkage() const {
+ Linkage InternalLinkage = getLinkageInternal();
+
+ // C++ [basic.link]p4.8:
+ // - if the declaration of the name is attached to a named module and is not
+ // exported
+ // the name has module linkage;
+ //
+ // [basic.namespace.general]/p2
+ // A namespace is never attached to a named module and never has a name with
+ // module linkage.
+ if (isInNamedModule(this) && InternalLinkage == Linkage::External &&
+ !isExportedFromModuleInterfaceUnit(
+ cast<NamedDecl>(this->getCanonicalDecl())) &&
+ !isa<NamespaceDecl>(this))
+ InternalLinkage = Linkage::Module;
+
+ return clang::getFormalLinkage(InternalLinkage);
+}
+
LinkageInfo NamedDecl::getLinkageAndVisibility() const {
return LinkageComputer{}.getDeclLinkageAndVisibility(this);
}
-static Optional<Visibility>
+static std::optional<Visibility>
getExplicitVisibilityAux(const NamedDecl *ND,
NamedDecl::ExplicitVisibilityKind kind,
bool IsMostRecent) {
assert(!IsMostRecent || ND == ND->getMostRecentDecl());
// Check the declaration itself first.
- if (Optional<Visibility> V = getVisibilityOf(ND, kind))
+ if (std::optional<Visibility> V = getVisibilityOf(ND, kind))
return V;
// If this is a member class of a specialization of a class template
@@ -1152,11 +1248,11 @@ getExplicitVisibilityAux(const NamedDecl *ND,
const auto *TD = spec->getSpecializedTemplate()->getTemplatedDecl();
while (TD != nullptr) {
auto Vis = getVisibilityOf(TD, kind);
- if (Vis != None)
+ if (Vis != std::nullopt)
return Vis;
TD = TD->getPreviousDecl();
}
- return None;
+ return std::nullopt;
}
// Use the most recent declaration.
@@ -1177,7 +1273,7 @@ getExplicitVisibilityAux(const NamedDecl *ND,
return getVisibilityOf(VTSD->getSpecializedTemplate()->getTemplatedDecl(),
kind);
- return None;
+ return std::nullopt;
}
// Also handle function template specializations.
if (const auto *fn = dyn_cast<FunctionDecl>(ND)) {
@@ -1194,17 +1290,17 @@ getExplicitVisibilityAux(const NamedDecl *ND,
if (InstantiatedFrom)
return getVisibilityOf(InstantiatedFrom, kind);
- return None;
+ return std::nullopt;
}
// The visibility of a template is stored in the templated decl.
if (const auto *TD = dyn_cast<TemplateDecl>(ND))
return getVisibilityOf(TD->getTemplatedDecl(), kind);
- return None;
+ return std::nullopt;
}
-Optional<Visibility>
+std::optional<Visibility>
NamedDecl::getExplicitVisibility(ExplicitVisibilityKind kind) const {
return getExplicitVisibilityAux(this, kind, false);
}
@@ -1219,8 +1315,13 @@ LinkageInfo LinkageComputer::getLVForClosure(const DeclContext *DC,
else if (isa<ParmVarDecl>(ContextDecl))
Owner =
dyn_cast<NamedDecl>(ContextDecl->getDeclContext()->getRedeclContext());
- else
+ else if (isa<ImplicitConceptSpecializationDecl>(ContextDecl)) {
+ // Replace with the concept's owning decl, which is either a namespace or a
+ // TU, so this needs a dyn_cast.
+ Owner = dyn_cast<NamedDecl>(ContextDecl->getDeclContext());
+ } else {
Owner = cast<NamedDecl>(ContextDecl);
+ }
if (!Owner)
return LinkageInfo::none();
@@ -1239,7 +1340,7 @@ LinkageInfo LinkageComputer::getLVForClosure(const DeclContext *DC,
// visible, then the lambda is too. We apply the same rules to blocks.
if (!isExternallyVisible(OwnerLV.getLinkage()))
return LinkageInfo::none();
- return LinkageInfo(VisibleNoLinkage, OwnerLV.getVisibility(),
+ return LinkageInfo(Linkage::VisibleNone, OwnerLV.getVisibility(),
OwnerLV.isVisibilityExplicit());
}
@@ -1248,15 +1349,15 @@ LinkageInfo LinkageComputer::getLVForLocalDecl(const NamedDecl *D,
if (const auto *Function = dyn_cast<FunctionDecl>(D)) {
if (Function->isInAnonymousNamespace() &&
!isFirstInExternCContext(Function))
- return getInternalLinkageFor(Function);
+ return LinkageInfo::internal();
// This is a "void f();" which got merged with a file static.
if (Function->getCanonicalDecl()->getStorageClass() == SC_Static)
- return getInternalLinkageFor(Function);
+ return LinkageInfo::internal();
LinkageInfo LV;
if (!hasExplicitVisibilityAlready(computation)) {
- if (Optional<Visibility> Vis =
+ if (std::optional<Visibility> Vis =
getExplicitVisibility(Function, computation))
LV.mergeVisibility(*Vis, true);
}
@@ -1271,19 +1372,20 @@ LinkageInfo LinkageComputer::getLVForLocalDecl(const NamedDecl *D,
if (const auto *Var = dyn_cast<VarDecl>(D)) {
if (Var->hasExternalStorage()) {
if (Var->isInAnonymousNamespace() && !isFirstInExternCContext(Var))
- return getInternalLinkageFor(Var);
+ return LinkageInfo::internal();
LinkageInfo LV;
if (Var->getStorageClass() == SC_PrivateExtern)
LV.mergeVisibility(HiddenVisibility, true);
else if (!hasExplicitVisibilityAlready(computation)) {
- if (Optional<Visibility> Vis = getExplicitVisibility(Var, computation))
+ if (std::optional<Visibility> Vis =
+ getExplicitVisibility(Var, computation))
LV.mergeVisibility(*Vis, true);
}
if (const VarDecl *Prev = Var->getPreviousDecl()) {
LinkageInfo PrevLV = getLVForDecl(Prev, computation);
- if (PrevLV.getLinkage())
+ if (PrevLV.getLinkage() != Linkage::Invalid)
LV.setLinkage(PrevLV.getLinkage());
LV.mergeVisibility(PrevLV);
}
@@ -1334,14 +1436,14 @@ LinkageInfo LinkageComputer::getLVForLocalDecl(const NamedDecl *D,
computation.isValueVisibility()
? Context.getLangOpts().getValueVisibilityMode()
: Context.getLangOpts().getTypeVisibilityMode();
- return LinkageInfo(VisibleNoLinkage, globalVisibility,
+ return LinkageInfo(Linkage::VisibleNone, globalVisibility,
/*visibilityExplicit=*/false);
}
}
}
if (!isExternallyVisible(LV.getLinkage()))
return LinkageInfo::none();
- return LinkageInfo(VisibleNoLinkage, LV.getVisibility(),
+ return LinkageInfo(Linkage::VisibleNone, LV.getVisibility(),
LV.isVisibilityExplicit());
}
@@ -1350,7 +1452,7 @@ LinkageInfo LinkageComputer::computeLVForDecl(const NamedDecl *D,
bool IgnoreVarTypeLinkage) {
// Internal_linkage attribute overrides other considerations.
if (D->hasAttr<InternalLinkageAttr>())
- return getInternalLinkageFor(D);
+ return LinkageInfo::internal();
// Objective-C: treat all Objective-C declarations as having external
// linkage.
@@ -1408,7 +1510,7 @@ LinkageInfo LinkageComputer::computeLVForDecl(const NamedDecl *D,
if (Record->hasKnownLambdaInternalLinkage() ||
!Record->getLambdaManglingNumber()) {
// This lambda has no mangling number, so it's internal.
- return getInternalLinkageFor(D);
+ return LinkageInfo::internal();
}
return getLVForClosure(
@@ -1467,12 +1569,12 @@ LinkageInfo LinkageComputer::getLVForDecl(const NamedDecl *D,
LVComputationKind computation) {
// Internal_linkage attribute overrides other considerations.
if (D->hasAttr<InternalLinkageAttr>())
- return getInternalLinkageFor(D);
+ return LinkageInfo::internal();
if (computation.IgnoreAllVisibility && D->hasCachedLinkage())
return LinkageInfo(D->getCachedLinkage(), DefaultVisibility, false);
- if (llvm::Optional<LinkageInfo> LI = lookup(D, computation))
+ if (std::optional<LinkageInfo> LI = lookup(D, computation))
return *LI;
LinkageInfo LV = computeLVForDecl(D, computation);
@@ -1494,7 +1596,7 @@ LinkageInfo LinkageComputer::getLVForDecl(const NamedDecl *D,
// that all other computed linkages match, check that the one we just
// computed also does.
NamedDecl *Old = nullptr;
- for (auto I : D->redecls()) {
+ for (auto *I : D->redecls()) {
auto *T = cast<NamedDecl>(I);
if (T == D)
continue;
@@ -1520,6 +1622,11 @@ LinkageInfo LinkageComputer::getDeclLinkageAndVisibility(const NamedDecl *D) {
}
Module *Decl::getOwningModuleForLinkage(bool IgnoreLinkage) const {
+ if (isa<NamespaceDecl>(this))
+ // Namespaces never have module linkage. It is the entities within them
+ // that [may] do.
+ return nullptr;
+
Module *M = getOwningModule();
if (!M)
return nullptr;
@@ -1530,24 +1637,30 @@ Module *Decl::getOwningModuleForLinkage(bool IgnoreLinkage) const {
return nullptr;
case Module::ModuleInterfaceUnit:
+ case Module::ModuleImplementationUnit:
+ case Module::ModulePartitionInterface:
+ case Module::ModulePartitionImplementation:
return M;
- case Module::GlobalModuleFragment: {
+ case Module::ModuleHeaderUnit:
+ case Module::ExplicitGlobalModuleFragment:
+ case Module::ImplicitGlobalModuleFragment: {
// External linkage declarations in the global module have no owning module
// for linkage purposes. But internal linkage declarations in the global
// module fragment of a particular module are owned by that module for
// linkage purposes.
+ // FIXME: p1815 removes the need for this distinction -- there are no
+ // internal linkage declarations that need to be referred to from outside
+ // this TU.
if (IgnoreLinkage)
return nullptr;
bool InternalLinkage;
if (auto *ND = dyn_cast<NamedDecl>(this))
InternalLinkage = !ND->hasExternalFormalLinkage();
- else {
- auto *NSD = dyn_cast<NamespaceDecl>(this);
- InternalLinkage = (NSD && NSD->isAnonymousNamespace()) ||
- isInAnonymousNamespace();
- }
- return InternalLinkage ? M->Parent : nullptr;
+ else
+ InternalLinkage = isInAnonymousNamespace();
+ return InternalLinkage ? M->Kind == Module::ModuleHeaderUnit ? M : M->Parent
+ : nullptr;
}
case Module::PrivateModuleFragment:
@@ -1559,15 +1672,19 @@ Module *Decl::getOwningModuleForLinkage(bool IgnoreLinkage) const {
llvm_unreachable("unknown module kind");
}
-void NamedDecl::printName(raw_ostream &os) const {
- os << Name;
+void NamedDecl::printName(raw_ostream &OS, const PrintingPolicy &Policy) const {
+ Name.print(OS, Policy);
+}
+
+void NamedDecl::printName(raw_ostream &OS) const {
+ printName(OS, getASTContext().getPrintingPolicy());
}
std::string NamedDecl::getQualifiedNameAsString() const {
std::string QualName;
llvm::raw_string_ostream OS(QualName);
printQualifiedName(OS, getASTContext().getPrintingPolicy());
- return OS.str();
+ return QualName;
}
void NamedDecl::printQualifiedName(raw_ostream &OS) const {
@@ -1578,7 +1695,7 @@ void NamedDecl::printQualifiedName(raw_ostream &OS,
const PrintingPolicy &P) const {
if (getDeclContext()->isFunctionOrMethod()) {
// We do not print '(anonymous)' for function parameters without name.
- printName(OS);
+ printName(OS, P);
return;
}
printNestedNameSpecifier(OS, P);
@@ -1589,7 +1706,7 @@ void NamedDecl::printQualifiedName(raw_ostream &OS,
// fall back to "(anonymous)".
SmallString<64> NameBuffer;
llvm::raw_svector_ostream NameOS(NameBuffer);
- printName(NameOS);
+ printName(NameOS, P);
if (NameBuffer.empty())
OS << "(anonymous)";
else
@@ -1647,8 +1764,7 @@ void NamedDecl::printNestedNameSpecifier(raw_ostream &OS,
NameInScope = ND->getDeclName();
}
- for (unsigned I = Contexts.size(); I != 0; --I) {
- const DeclContext *DC = Contexts[I - 1];
+ for (const DeclContext *DC : llvm::reverse(Contexts)) {
if (const auto *Spec = dyn_cast<ClassTemplateSpecializationDecl>(DC)) {
OS << Spec->getName();
const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
@@ -1713,7 +1829,7 @@ void NamedDecl::getNameForDiagnostic(raw_ostream &OS,
if (Qualified)
printQualifiedName(OS, Policy);
else
- printName(OS);
+ printName(OS, Policy);
}
template<typename T> static bool isRedeclarableImpl(Redeclarable<T> *) {
@@ -1731,7 +1847,8 @@ static bool isRedeclarable(Decl::Kind K) {
llvm_unreachable("unknown decl kind");
}
-bool NamedDecl::declarationReplaces(NamedDecl *OldD, bool IsKnownNewer) const {
+bool NamedDecl::declarationReplaces(const NamedDecl *OldD,
+ bool IsKnownNewer) const {
assert(getDeclName() == OldD->getDeclName() && "Declaration name mismatch");
// Never replace one imported declaration with another; we need both results
@@ -1761,13 +1878,13 @@ bool NamedDecl::declarationReplaces(NamedDecl *OldD, bool IsKnownNewer) const {
// Using declarations can be replaced if they import the same name from the
// same context.
- if (auto *UD = dyn_cast<UsingDecl>(this)) {
+ if (const auto *UD = dyn_cast<UsingDecl>(this)) {
ASTContext &Context = getASTContext();
return Context.getCanonicalNestedNameSpecifier(UD->getQualifier()) ==
Context.getCanonicalNestedNameSpecifier(
cast<UsingDecl>(OldD)->getQualifier());
}
- if (auto *UUVD = dyn_cast<UnresolvedUsingValueDecl>(this)) {
+ if (const auto *UUVD = dyn_cast<UnresolvedUsingValueDecl>(this)) {
ASTContext &Context = getASTContext();
return Context.getCanonicalNestedNameSpecifier(UUVD->getQualifier()) ==
Context.getCanonicalNestedNameSpecifier(
@@ -1784,7 +1901,7 @@ bool NamedDecl::declarationReplaces(NamedDecl *OldD, bool IsKnownNewer) const {
// Check whether this is actually newer than OldD. We want to keep the
// newer declaration. This loop will usually only iterate once, because
// OldD is usually the previous declaration.
- for (auto D : redecls()) {
+ for (const auto *D : redecls()) {
if (D == OldD)
break;
@@ -1808,12 +1925,26 @@ bool NamedDecl::declarationReplaces(NamedDecl *OldD, bool IsKnownNewer) const {
}
bool NamedDecl::hasLinkage() const {
- return getFormalLinkage() != NoLinkage;
+ switch (getFormalLinkage()) {
+ case Linkage::Invalid:
+ llvm_unreachable("Linkage hasn't been computed!");
+ case Linkage::None:
+ return false;
+ case Linkage::Internal:
+ return true;
+ case Linkage::UniqueExternal:
+ case Linkage::VisibleNone:
+ llvm_unreachable("Non-formal linkage is not allowed here!");
+ case Linkage::Module:
+ case Linkage::External:
+ return true;
+ }
+ llvm_unreachable("Unhandled Linkage enum");
}
NamedDecl *NamedDecl::getUnderlyingDeclImpl() {
NamedDecl *ND = this;
- while (auto *UD = dyn_cast<UsingShadowDecl>(ND))
+ if (auto *UD = dyn_cast<UsingShadowDecl>(ND))
ND = UD->getTargetDecl();
if (auto *AD = dyn_cast<ObjCCompatibleAliasDecl>(ND))
@@ -1835,7 +1966,7 @@ bool NamedDecl::isCXXInstanceMember() const {
if (isa<FieldDecl>(D) || isa<IndirectFieldDecl>(D) || isa<MSPropertyDecl>(D))
return true;
- if (const auto *MD = dyn_cast_or_null<CXXMethodDecl>(D->getAsFunction()))
+ if (const auto *MD = dyn_cast_if_present<CXXMethodDecl>(D->getAsFunction()))
return MD->isInstance();
return false;
}
@@ -1999,7 +2130,7 @@ const char *VarDecl::getStorageClassSpecifierString(StorageClass SC) {
VarDecl::VarDecl(Kind DK, ASTContext &C, DeclContext *DC,
SourceLocation StartLoc, SourceLocation IdLoc,
- IdentifierInfo *Id, QualType T, TypeSourceInfo *TInfo,
+ const IdentifierInfo *Id, QualType T, TypeSourceInfo *TInfo,
StorageClass SC)
: DeclaratorDecl(DK, DC, IdLoc, Id, T, TInfo, StartLoc),
redeclarable_base(C) {
@@ -2014,10 +2145,9 @@ VarDecl::VarDecl(Kind DK, ASTContext &C, DeclContext *DC,
// Everything else is implicitly initialized to false.
}
-VarDecl *VarDecl::Create(ASTContext &C, DeclContext *DC,
- SourceLocation StartL, SourceLocation IdL,
- IdentifierInfo *Id, QualType T, TypeSourceInfo *TInfo,
- StorageClass S) {
+VarDecl *VarDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation StartL,
+ SourceLocation IdL, const IdentifierInfo *Id,
+ QualType T, TypeSourceInfo *TInfo, StorageClass S) {
return new (C, DC) VarDecl(Var, C, DC, StartL, IdL, Id, T, TInfo, S);
}
@@ -2074,8 +2204,7 @@ static LanguageLinkage getDeclLanguageLinkage(const T &D) {
// Language linkage is a C++ concept, but saying that everything else in C has
// C language linkage fits the implementation nicely.
- ASTContext &Context = D.getASTContext();
- if (!Context.getLangOpts().CPlusPlus)
+ if (!D.getASTContext().getLangOpts().CPlusPlus)
return CLanguageLinkage;
// C++ [dcl.link]p4: A C language linkage is ignored in determining the
@@ -2216,20 +2345,24 @@ VarDecl *VarDecl::getActingDefinition() {
return nullptr;
VarDecl *LastTentative = nullptr;
- VarDecl *First = getFirstDecl();
- for (auto I : First->redecls()) {
- Kind = I->isThisDeclarationADefinition();
+
+ // Loop through the declaration chain, starting with the most recent.
+ for (VarDecl *Decl = getMostRecentDecl(); Decl;
+ Decl = Decl->getPreviousDecl()) {
+ Kind = Decl->isThisDeclarationADefinition();
if (Kind == Definition)
return nullptr;
- if (Kind == TentativeDefinition)
- LastTentative = I;
+ // Record the first (most recent) TentativeDefinition that is encountered.
+ if (Kind == TentativeDefinition && !LastTentative)
+ LastTentative = Decl;
}
+
return LastTentative;
}
VarDecl *VarDecl::getDefinition(ASTContext &C) {
VarDecl *First = getFirstDecl();
- for (auto I : First->redecls()) {
+ for (auto *I : First->redecls()) {
if (I->isThisDeclarationADefinition(C) == Definition)
return I;
}
@@ -2240,7 +2373,7 @@ VarDecl::DefinitionKind VarDecl::hasDefinition(ASTContext &C) const {
DefinitionKind Kind = DeclarationOnly;
const VarDecl *First = getFirstDecl();
- for (auto I : First->redecls()) {
+ for (auto *I : First->redecls()) {
Kind = std::max(Kind, I->isThisDeclarationADefinition(C));
if (Kind == Definition)
break;
@@ -2250,7 +2383,7 @@ VarDecl::DefinitionKind VarDecl::hasDefinition(ASTContext &C) const {
}
const Expr *VarDecl::getAnyInitializer(const VarDecl *&D) const {
- for (auto I : redecls()) {
+ for (auto *I : redecls()) {
if (auto Expr = I->getInit()) {
D = I;
return Expr;
@@ -2274,19 +2407,22 @@ Expr *VarDecl::getInit() {
if (auto *S = Init.dyn_cast<Stmt *>())
return cast<Expr>(S);
- return cast_or_null<Expr>(Init.get<EvaluatedStmt *>()->Value);
+ auto *Eval = getEvaluatedStmt();
+ return cast<Expr>(Eval->Value.isOffset()
+ ? Eval->Value.get(getASTContext().getExternalSource())
+ : Eval->Value.get(nullptr));
}
Stmt **VarDecl::getInitAddress() {
if (auto *ES = Init.dyn_cast<EvaluatedStmt *>())
- return &ES->Value;
+ return ES->Value.getAddressOfPointer(getASTContext().getExternalSource());
return Init.getAddrOfPtr1();
}
VarDecl *VarDecl::getInitializingDeclaration() {
VarDecl *Def = nullptr;
- for (auto I : redecls()) {
+ for (auto *I : redecls()) {
if (I->hasInit())
return I;
@@ -2409,14 +2545,14 @@ EvaluatedStmt *VarDecl::getEvaluatedStmt() const {
APValue *VarDecl::evaluateValue() const {
SmallVector<PartialDiagnosticAt, 8> Notes;
- return evaluateValue(Notes);
+ return evaluateValueImpl(Notes, hasConstantInitialization());
}
-APValue *VarDecl::evaluateValue(
- SmallVectorImpl<PartialDiagnosticAt> &Notes) const {
+APValue *VarDecl::evaluateValueImpl(SmallVectorImpl<PartialDiagnosticAt> &Notes,
+ bool IsConstantInitialization) const {
EvaluatedStmt *Eval = ensureEvaluatedStmt();
- const auto *Init = cast<Expr>(Eval->Value);
+ const auto *Init = getInit();
assert(!Init->isValueDependent());
// We only produce notes indicating why an initializer is non-constant the
@@ -2432,8 +2568,16 @@ APValue *VarDecl::evaluateValue(
Eval->IsEvaluating = true;
- bool Result = Init->EvaluateAsInitializer(Eval->Evaluated, getASTContext(),
- this, Notes);
+ ASTContext &Ctx = getASTContext();
+ bool Result = Init->EvaluateAsInitializer(Eval->Evaluated, Ctx, this, Notes,
+ IsConstantInitialization);
+
+ // In C++, this isn't a constant initializer if we produced notes. In that
+ // case, we can't keep the result, because it may only be correct under the
+ // assumption that the initializer is a constant context.
+ if (IsConstantInitialization && Ctx.getLangOpts().CPlusPlus &&
+ !Notes.empty())
+ Result = false;
// Ensure the computed APValue is cleaned up later if evaluation succeeded,
// or that it's empty (so that there's nothing to clean up) if evaluation
@@ -2441,7 +2585,7 @@ APValue *VarDecl::evaluateValue(
if (!Result)
Eval->Evaluated = APValue();
else if (Eval->Evaluated.needsCleanup())
- getASTContext().addDestruction(&Eval->Evaluated);
+ Ctx.addDestruction(&Eval->Evaluated);
Eval->IsEvaluating = false;
Eval->WasEvaluated = true;
@@ -2492,10 +2636,17 @@ bool VarDecl::checkForConstantInitialization(
"already evaluated var value before checking for constant init");
assert(getASTContext().getLangOpts().CPlusPlus && "only meaningful in C++");
- assert(!cast<Expr>(Eval->Value)->isValueDependent());
+ assert(!getInit()->isValueDependent());
// Evaluate the initializer to check whether it's a constant expression.
- Eval->HasConstantInitialization = evaluateValue(Notes) && Notes.empty();
+ Eval->HasConstantInitialization =
+ evaluateValueImpl(Notes, true) && Notes.empty();
+
+ // If evaluation as a constant initializer failed, allow re-evaluation as a
+ // non-constant initializer if we later find we want the value.
+ if (!Eval->HasConstantInitialization)
+ Eval->WasEvaluated = false;
+
return Eval->HasConstantInitialization;
}
@@ -2521,7 +2672,7 @@ bool VarDecl::isNonEscapingByref() const {
bool VarDecl::hasDependentAlignment() const {
QualType T = getType();
- return T->isDependentType() || T->isUndeducedAutoType() ||
+ return T->isDependentType() || T->isUndeducedType() ||
llvm::any_of(specific_attrs<AlignedAttr>(), [](const AlignedAttr *AA) {
return AA->isAlignmentDependent();
});
@@ -2667,6 +2818,42 @@ VarDecl::needsDestruction(const ASTContext &Ctx) const {
return getType().isDestructedType();
}
+bool VarDecl::hasFlexibleArrayInit(const ASTContext &Ctx) const {
+ assert(hasInit() && "Expect initializer to check for flexible array init");
+ auto *Ty = getType()->getAs<RecordType>();
+ if (!Ty || !Ty->getDecl()->hasFlexibleArrayMember())
+ return false;
+ auto *List = dyn_cast<InitListExpr>(getInit()->IgnoreParens());
+ if (!List)
+ return false;
+ const Expr *FlexibleInit = List->getInit(List->getNumInits() - 1);
+ auto InitTy = Ctx.getAsConstantArrayType(FlexibleInit->getType());
+ if (!InitTy)
+ return false;
+ return InitTy->getSize() != 0;
+}
+
+CharUnits VarDecl::getFlexibleArrayInitChars(const ASTContext &Ctx) const {
+ assert(hasInit() && "Expect initializer to check for flexible array init");
+ auto *Ty = getType()->getAs<RecordType>();
+ if (!Ty || !Ty->getDecl()->hasFlexibleArrayMember())
+ return CharUnits::Zero();
+ auto *List = dyn_cast<InitListExpr>(getInit()->IgnoreParens());
+ if (!List || List->getNumInits() == 0)
+ return CharUnits::Zero();
+ const Expr *FlexibleInit = List->getInit(List->getNumInits() - 1);
+ auto InitTy = Ctx.getAsConstantArrayType(FlexibleInit->getType());
+ if (!InitTy)
+ return CharUnits::Zero();
+ CharUnits FlexibleArraySize = Ctx.getTypeSizeInChars(InitTy);
+ const ASTRecordLayout &RL = Ctx.getASTRecordLayout(Ty->getDecl());
+ CharUnits FlexibleArrayOffset =
+ Ctx.toCharUnitsFromBits(RL.getFieldOffset(RL.getFieldCount() - 1));
+ if (FlexibleArrayOffset + FlexibleArraySize < RL.getSize())
+ return CharUnits::Zero();
+ return FlexibleArrayOffset + FlexibleArraySize - RL.getSize();
+}
+
MemberSpecializationInfo *VarDecl::getMemberSpecializationInfo() const {
if (isStaticDataMember())
// FIXME: Remove ?
@@ -2754,11 +2941,15 @@ SourceRange ParmVarDecl::getSourceRange() const {
}
bool ParmVarDecl::isDestroyedInCallee() const {
+ // ns_consumed only affects code generation in ARC
if (hasAttr<NSConsumedAttr>())
- return true;
+ return getASTContext().getLangOpts().ObjCAutoRefCount;
- auto *RT = getType()->getAs<RecordType>();
- if (RT && RT->getDecl()->isParamDestroyedInCallee())
+ // FIXME: isParamDestroyedInCallee() should probably imply
+ // isDestructedType()
+ const auto *RT = getType()->getAs<RecordType>();
+ if (RT && RT->getDecl()->isParamDestroyedInCallee() &&
+ getType().isDestructedType())
return true;
return false;
@@ -2770,7 +2961,7 @@ Expr *ParmVarDecl::getDefaultArg() {
"Default argument is not yet instantiated!");
Expr *Arg = getInit();
- if (auto *E = dyn_cast_or_null<FullExpr>(Arg))
+ if (auto *E = dyn_cast_if_present<FullExpr>(Arg))
return E->getSubExpr();
return Arg;
@@ -2809,7 +3000,7 @@ void ParmVarDecl::setUninstantiatedDefaultArg(Expr *arg) {
Expr *ParmVarDecl::getUninstantiatedDefaultArg() {
assert(hasUninstantiatedDefaultArg() &&
"Wrong kind of initialization expression!");
- return cast_or_null<Expr>(Init.get<Stmt *>());
+ return cast_if_present<Expr>(Init.get<Stmt *>());
}
bool ParmVarDecl::hasDefaultArg() const {
@@ -2837,7 +3028,7 @@ FunctionDecl::FunctionDecl(Kind DK, ASTContext &C, DeclContext *DC,
SourceLocation StartLoc,
const DeclarationNameInfo &NameInfo, QualType T,
TypeSourceInfo *TInfo, StorageClass S,
- bool isInlineSpecified,
+ bool UsesFPIntrin, bool isInlineSpecified,
ConstexprSpecKind ConstexprKind,
Expr *TrailingRequiresClause)
: DeclaratorDecl(DK, DC, NameInfo.getLoc(), NameInfo.getName(), T, TInfo,
@@ -2849,7 +3040,7 @@ FunctionDecl::FunctionDecl(Kind DK, ASTContext &C, DeclContext *DC,
FunctionDeclBits.IsInline = isInlineSpecified;
FunctionDeclBits.IsInlineSpecified = isInlineSpecified;
FunctionDeclBits.IsVirtualAsWritten = false;
- FunctionDeclBits.IsPure = false;
+ FunctionDeclBits.IsPureVirtual = false;
FunctionDeclBits.HasInheritedPrototype = false;
FunctionDeclBits.HasWrittenPrototype = true;
FunctionDeclBits.IsDeleted = false;
@@ -2858,17 +3049,21 @@ FunctionDecl::FunctionDecl(Kind DK, ASTContext &C, DeclContext *DC,
FunctionDeclBits.IsDefaulted = false;
FunctionDeclBits.IsExplicitlyDefaulted = false;
FunctionDeclBits.HasDefaultedFunctionInfo = false;
+ FunctionDeclBits.IsIneligibleOrNotSelected = false;
FunctionDeclBits.HasImplicitReturnZero = false;
FunctionDeclBits.IsLateTemplateParsed = false;
FunctionDeclBits.ConstexprKind = static_cast<uint64_t>(ConstexprKind);
+ FunctionDeclBits.BodyContainsImmediateEscalatingExpression = false;
FunctionDeclBits.InstantiationIsPending = false;
FunctionDeclBits.UsesSEHTry = false;
- FunctionDeclBits.UsesFPIntrin = false;
+ FunctionDeclBits.UsesFPIntrin = UsesFPIntrin;
FunctionDeclBits.HasSkippedBody = false;
FunctionDeclBits.WillHaveBody = false;
FunctionDeclBits.IsMultiVersion = false;
- FunctionDeclBits.IsCopyDeductionCandidate = false;
+ FunctionDeclBits.DeductionCandidateKind =
+ static_cast<unsigned char>(DeductionCandidate::Normal);
FunctionDeclBits.HasODRHash = false;
+ FunctionDeclBits.FriendConstraintRefersToEnclosingTemplate = false;
if (TrailingRequiresClause)
setTrailingRequiresClause(TrailingRequiresClause);
}
@@ -2914,7 +3109,7 @@ FunctionDecl::getDefaultedFunctionInfo() const {
}
bool FunctionDecl::hasBody(const FunctionDecl *&Definition) const {
- for (auto I : redecls()) {
+ for (const auto *I : redecls()) {
if (I->doesThisDeclarationHaveABody()) {
Definition = I;
return true;
@@ -2925,7 +3120,7 @@ bool FunctionDecl::hasBody(const FunctionDecl *&Definition) const {
}
bool FunctionDecl::hasTrivialBody() const {
- Stmt *S = getBody();
+ const Stmt *S = getBody();
if (!S) {
// Since we don't have a body for this function, we don't know if it's
// trivial or not.
@@ -3012,8 +3207,8 @@ void FunctionDecl::setBody(Stmt *B) {
EndRangeLoc = B->getEndLoc();
}
-void FunctionDecl::setPure(bool P) {
- FunctionDeclBits.IsPure = P;
+void FunctionDecl::setIsPureVirtual(bool P) {
+ FunctionDeclBits.IsPureVirtual = P;
if (P)
if (auto *Parent = dyn_cast<CXXRecordDecl>(getDeclContext()))
Parent->markedVirtualFunctionPure();
@@ -3021,10 +3216,48 @@ void FunctionDecl::setPure(bool P) {
template<std::size_t Len>
static bool isNamed(const NamedDecl *ND, const char (&Str)[Len]) {
- IdentifierInfo *II = ND->getIdentifier();
+ const IdentifierInfo *II = ND->getIdentifier();
return II && II->isStr(Str);
}
+bool FunctionDecl::isImmediateEscalating() const {
+ // C++23 [expr.const]/p17
+ // An immediate-escalating function is
+ // - the call operator of a lambda that is not declared with the consteval
+ // specifier,
+ if (isLambdaCallOperator(this) && !isConsteval())
+ return true;
+ // - a defaulted special member function that is not declared with the
+ // consteval specifier,
+ if (isDefaulted() && !isConsteval())
+ return true;
+ // - a function that results from the instantiation of a templated entity
+ // defined with the constexpr specifier.
+ TemplatedKind TK = getTemplatedKind();
+ if (TK != TK_NonTemplate && TK != TK_DependentNonTemplate &&
+ isConstexprSpecified())
+ return true;
+ return false;
+}
+
+bool FunctionDecl::isImmediateFunction() const {
+ // C++23 [expr.const]/p18
+ // An immediate function is a function or constructor that is
+ // - declared with the consteval specifier
+ if (isConsteval())
+ return true;
+ // - an immediate-escalating function F whose function body contains an
+ // immediate-escalating expression
+ if (isImmediateEscalating() && BodyContainsImmediateEscalatingExpressions())
+ return true;
+
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(this);
+ MD && MD->isLambdaStaticInvoker())
+ return MD->getParent()->getLambdaCallOperator()->isImmediateFunction();
+
+ return false;
+}
+
bool FunctionDecl::isMain() const {
const TranslationUnitDecl *tunit =
dyn_cast<TranslationUnitDecl>(getDeclContext()->getRedeclContext());
@@ -3061,11 +3294,13 @@ bool FunctionDecl::isMSVCRTEntryPoint() const {
}
bool FunctionDecl::isReservedGlobalPlacementOperator() const {
- assert(getDeclName().getNameKind() == DeclarationName::CXXOperatorName);
- assert(getDeclName().getCXXOverloadedOperator() == OO_New ||
- getDeclName().getCXXOverloadedOperator() == OO_Delete ||
- getDeclName().getCXXOverloadedOperator() == OO_Array_New ||
- getDeclName().getCXXOverloadedOperator() == OO_Array_Delete);
+ if (getDeclName().getNameKind() != DeclarationName::CXXOperatorName)
+ return false;
+ if (getDeclName().getCXXOverloadedOperator() != OO_New &&
+ getDeclName().getCXXOverloadedOperator() != OO_Delete &&
+ getDeclName().getCXXOverloadedOperator() != OO_Array_New &&
+ getDeclName().getCXXOverloadedOperator() != OO_Array_Delete)
+ return false;
if (!getDeclContext()->getRedeclContext()->isTranslationUnit())
return false;
@@ -3074,9 +3309,9 @@ bool FunctionDecl::isReservedGlobalPlacementOperator() const {
if (proto->getNumParams() != 2 || proto->isVariadic())
return false;
- ASTContext &Context =
- cast<TranslationUnitDecl>(getDeclContext()->getRedeclContext())
- ->getASTContext();
+ const ASTContext &Context =
+ cast<TranslationUnitDecl>(getDeclContext()->getRedeclContext())
+ ->getASTContext();
// The result type and first argument type are constant across all
// these operators. The second argument must be exactly void*.
@@ -3084,7 +3319,7 @@ bool FunctionDecl::isReservedGlobalPlacementOperator() const {
}
bool FunctionDecl::isReplaceableGlobalAllocationFunction(
- Optional<unsigned> *AlignmentParam, bool *IsNothrow) const {
+ std::optional<unsigned> *AlignmentParam, bool *IsNothrow) const {
if (getDeclName().getNameKind() != DeclarationName::CXXOperatorName)
return false;
if (getDeclName().getCXXOverloadedOperator() != OO_New &&
@@ -3101,7 +3336,7 @@ bool FunctionDecl::isReplaceableGlobalAllocationFunction(
return false;
const auto *FPT = getType()->castAs<FunctionProtoType>();
- if (FPT->getNumParams() == 0 || FPT->getNumParams() > 3 || FPT->isVariadic())
+ if (FPT->getNumParams() == 0 || FPT->getNumParams() > 4 || FPT->isVariadic())
return false;
// If this is a single-parameter function, it must be a replaceable global
@@ -3111,7 +3346,7 @@ bool FunctionDecl::isReplaceableGlobalAllocationFunction(
unsigned Params = 1;
QualType Ty = FPT->getParamType(Params);
- ASTContext &Ctx = getASTContext();
+ const ASTContext &Ctx = getASTContext();
auto Consume = [&] {
++Params;
@@ -3136,8 +3371,8 @@ bool FunctionDecl::isReplaceableGlobalAllocationFunction(
*AlignmentParam = Params;
}
- // Finally, if this is not a sized delete, the final parameter can
- // be a 'const std::nothrow_t&'.
+ // If this is not a sized delete, the next parameter can be a
+ // 'const std::nothrow_t&'.
if (!IsSizedDelete && !Ty.isNull() && Ty->isReferenceType()) {
Ty = Ty->getPointeeType();
if (Ty.getCVRQualifiers() != Qualifiers::Const)
@@ -3149,6 +3384,20 @@ bool FunctionDecl::isReplaceableGlobalAllocationFunction(
}
}
+ // Finally, recognize the not yet standard versions of new that take a
+ // hot/cold allocation hint (__hot_cold_t). These are currently supported by
+ // tcmalloc (see
+ // https://github.com/google/tcmalloc/blob/220043886d4e2efff7a5702d5172cb8065253664/tcmalloc/malloc_extension.h#L53).
+ if (!IsSizedDelete && !Ty.isNull() && Ty->isEnumeralType()) {
+ QualType T = Ty;
+ while (const auto *TD = T->getAs<TypedefType>())
+ T = TD->getDecl()->getUnderlyingType();
+ const IdentifierInfo *II =
+ T->castAs<EnumType>()->getDecl()->getIdentifier();
+ if (II && II->isStr("__hot_cold_t"))
+ Consume();
+ }
+
return Params == FPT->getNumParams();
}
@@ -3157,7 +3406,24 @@ bool FunctionDecl::isInlineBuiltinDeclaration() const {
return false;
const FunctionDecl *Definition;
- return hasBody(Definition) && Definition->isInlineSpecified();
+ if (!hasBody(Definition))
+ return false;
+
+ if (!Definition->isInlineSpecified() ||
+ !Definition->hasAttr<AlwaysInlineAttr>())
+ return false;
+
+ ASTContext &Context = getASTContext();
+ switch (Context.GetGVALinkageForFunction(Definition)) {
+ case GVA_Internal:
+ case GVA_DiscardableODR:
+ case GVA_StrongODR:
+ return false;
+ case GVA_AvailableExternally:
+ case GVA_StrongExternal:
+ return true;
+ }
+ llvm_unreachable("Unknown GVALinkage");
}
bool FunctionDecl::isDestroyingOperatorDelete() const {
@@ -3205,7 +3471,6 @@ bool FunctionDecl::isGlobal() const {
if (const auto *Namespace = cast<NamespaceDecl>(DC)) {
if (!Namespace->getDeclName())
return false;
- break;
}
}
@@ -3223,14 +3488,39 @@ bool FunctionDecl::isNoReturn() const {
return false;
}
+bool FunctionDecl::isMemberLikeConstrainedFriend() const {
+ // C++20 [temp.friend]p9:
+ // A non-template friend declaration with a requires-clause [or]
+ // a friend function template with a constraint that depends on a template
+ // parameter from an enclosing template [...] does not declare the same
+ // function or function template as a declaration in any other scope.
+
+ // If this isn't a friend then it's not a member-like constrained friend.
+ if (!getFriendObjectKind()) {
+ return false;
+ }
+
+ if (!getDescribedFunctionTemplate()) {
+ // If these friends don't have constraints, they aren't constrained, and
+ // thus don't fall under temp.friend p9. Else the simple presence of a
+ // constraint makes them unique.
+ return getTrailingRequiresClause();
+ }
+
+ return FriendConstraintRefersToEnclosingTemplate();
+}
MultiVersionKind FunctionDecl::getMultiVersionKind() const {
if (hasAttr<TargetAttr>())
return MultiVersionKind::Target;
+ if (hasAttr<TargetVersionAttr>())
+ return MultiVersionKind::TargetVersion;
if (hasAttr<CPUDispatchAttr>())
return MultiVersionKind::CPUDispatch;
if (hasAttr<CPUSpecificAttr>())
return MultiVersionKind::CPUSpecific;
+ if (hasAttr<TargetClonesAttr>())
+ return MultiVersionKind::TargetClones;
return MultiVersionKind::None;
}
@@ -3243,7 +3533,12 @@ bool FunctionDecl::isCPUSpecificMultiVersion() const {
}
bool FunctionDecl::isTargetMultiVersion() const {
- return isMultiVersion() && hasAttr<TargetAttr>();
+ return isMultiVersion() &&
+ (hasAttr<TargetAttr>() || hasAttr<TargetVersionAttr>());
+}
+
+bool FunctionDecl::isTargetClonesMultiVersion() const {
+ return isMultiVersion() && hasAttr<TargetClonesAttr>();
}
void
@@ -3296,7 +3591,7 @@ unsigned FunctionDecl::getBuiltinID(bool ConsiderWrapperFunctions) const {
(!hasAttr<ArmBuiltinAliasAttr>() && !hasAttr<BuiltinAliasAttr>()))
return 0;
- ASTContext &Context = getASTContext();
+ const ASTContext &Context = getASTContext();
if (!Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID))
return BuiltinID;
@@ -3325,7 +3620,7 @@ unsigned FunctionDecl::getBuiltinID(bool ConsiderWrapperFunctions) const {
// library, none of the predefined library functions except printf and malloc
// should be treated as a builtin i.e. 0 should be returned for them.
if (Context.getTargetInfo().getTriple().isAMDGCN() &&
- Context.getLangOpts().OpenMPIsDevice &&
+ Context.getLangOpts().OpenMPIsTargetDevice &&
Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID) &&
!(BuiltinID == Builtin::BIprintf || BuiltinID == Builtin::BImalloc))
return 0;
@@ -3375,11 +3670,25 @@ unsigned FunctionDecl::getMinRequiredArguments() const {
return NumRequiredArgs;
}
+bool FunctionDecl::hasCXXExplicitFunctionObjectParameter() const {
+ return getNumParams() != 0 && getParamDecl(0)->isExplicitObjectParameter();
+}
+
+unsigned FunctionDecl::getNumNonObjectParams() const {
+ return getNumParams() -
+ static_cast<unsigned>(hasCXXExplicitFunctionObjectParameter());
+}
+
+unsigned FunctionDecl::getMinRequiredExplicitArguments() const {
+ return getMinRequiredArguments() -
+ static_cast<unsigned>(hasCXXExplicitFunctionObjectParameter());
+}
+
bool FunctionDecl::hasOneParamOrDefaultArgs() const {
return getNumParams() == 1 ||
(getNumParams() > 1 &&
- std::all_of(param_begin() + 1, param_end(),
- [](ParmVarDecl *P) { return P->hasDefaultArg(); }));
+ llvm::all_of(llvm::drop_begin(parameters()),
+ [](ParmVarDecl *P) { return P->hasDefaultArg(); }));
}
/// The combination of the extern and inline keywords under MSVC forces
@@ -3441,7 +3750,7 @@ bool FunctionDecl::doesDeclarationForceExternallyVisibleDefinition() const {
assert(!doesThisDeclarationHaveABody() &&
"Must have a declaration without a body.");
- ASTContext &Context = getASTContext();
+ const ASTContext &Context = getASTContext();
if (Context.getLangOpts().MSVCCompat) {
const FunctionDecl *Definition;
@@ -3578,7 +3887,7 @@ bool FunctionDecl::isInlineDefinitionExternallyVisible() const {
// If any declaration is 'inline' but not 'extern', then this definition
// is externally visible.
- for (auto Redecl : redecls()) {
+ for (auto *Redecl : redecls()) {
if (Redecl->isInlineSpecified() &&
Redecl->getStorageClass() != SC_Extern)
return true;
@@ -3595,7 +3904,7 @@ bool FunctionDecl::isInlineDefinitionExternallyVisible() const {
// [...] If all of the file scope declarations for a function in a
// translation unit include the inline function specifier without extern,
// then the definition in that translation unit is an inline definition.
- for (auto Redecl : redecls()) {
+ for (auto *Redecl : redecls()) {
if (RedeclForcesDefC99(Redecl))
return true;
}
@@ -3626,8 +3935,13 @@ const IdentifierInfo *FunctionDecl::getLiteralIdentifier() const {
FunctionDecl::TemplatedKind FunctionDecl::getTemplatedKind() const {
if (TemplateOrSpecialization.isNull())
return TK_NonTemplate;
- if (TemplateOrSpecialization.is<FunctionTemplateDecl *>())
+ if (const auto *ND = TemplateOrSpecialization.dyn_cast<NamedDecl *>()) {
+ if (isa<FunctionDecl>(ND))
+ return TK_DependentNonTemplate;
+ assert(isa<FunctionTemplateDecl>(ND) &&
+ "No other valid types in NamedDecl");
return TK_FunctionTemplate;
+ }
if (TemplateOrSpecialization.is<MemberSpecializationInfo *>())
return TK_MemberSpecialization;
if (TemplateOrSpecialization.is<FunctionTemplateSpecializationInfo *>())
@@ -3668,15 +3982,34 @@ FunctionDecl::setInstantiationOfMemberFunction(ASTContext &C,
}
FunctionTemplateDecl *FunctionDecl::getDescribedFunctionTemplate() const {
- return TemplateOrSpecialization.dyn_cast<FunctionTemplateDecl *>();
+ return dyn_cast_if_present<FunctionTemplateDecl>(
+ TemplateOrSpecialization.dyn_cast<NamedDecl *>());
}
-void FunctionDecl::setDescribedFunctionTemplate(FunctionTemplateDecl *Template) {
+void FunctionDecl::setDescribedFunctionTemplate(
+ FunctionTemplateDecl *Template) {
assert(TemplateOrSpecialization.isNull() &&
"Member function is already a specialization");
TemplateOrSpecialization = Template;
}
+bool FunctionDecl::isFunctionTemplateSpecialization() const {
+ return TemplateOrSpecialization.is<FunctionTemplateSpecializationInfo *>() ||
+ TemplateOrSpecialization
+ .is<DependentFunctionTemplateSpecializationInfo *>();
+}
+
+void FunctionDecl::setInstantiatedFromDecl(FunctionDecl *FD) {
+ assert(TemplateOrSpecialization.isNull() &&
+ "Function is already a specialization");
+ TemplateOrSpecialization = FD;
+}
+
+FunctionDecl *FunctionDecl::getInstantiatedFromDecl() const {
+ return dyn_cast_if_present<FunctionDecl>(
+ TemplateOrSpecialization.dyn_cast<NamedDecl *>());
+}
+
bool FunctionDecl::isImplicitlyInstantiable() const {
// If the function is invalid, it can't be implicitly instantiated.
if (isInvalidDecl())
@@ -3800,6 +4133,11 @@ FunctionDecl::getTemplateSpecializationArgsAsWritten() const {
.dyn_cast<FunctionTemplateSpecializationInfo*>()) {
return Info->TemplateArgumentsAsWritten;
}
+ if (DependentFunctionTemplateSpecializationInfo *Info =
+ TemplateOrSpecialization
+ .dyn_cast<DependentFunctionTemplateSpecializationInfo *>()) {
+ return Info->TemplateArgumentsAsWritten;
+ }
return nullptr;
}
@@ -3817,6 +4155,7 @@ FunctionDecl::setFunctionTemplateSpecialization(ASTContext &C,
assert(TSK != TSK_Undeclared &&
"Must specify the type of function template specialization");
assert((TemplateOrSpecialization.isNull() ||
+ getFriendObjectKind() != FOK_None ||
TSK == TSK_ExplicitSpecialization) &&
"Member specialization must be an explicit specialization");
FunctionTemplateSpecializationInfo *Info =
@@ -3828,10 +4167,9 @@ FunctionDecl::setFunctionTemplateSpecialization(ASTContext &C,
Template->addSpecialization(Info, InsertPos);
}
-void
-FunctionDecl::setDependentTemplateSpecialization(ASTContext &Context,
- const UnresolvedSetImpl &Templates,
- const TemplateArgumentListInfo &TemplateArgs) {
+void FunctionDecl::setDependentTemplateSpecialization(
+ ASTContext &Context, const UnresolvedSetImpl &Templates,
+ const TemplateArgumentListInfo *TemplateArgs) {
assert(TemplateOrSpecialization.isNull());
DependentFunctionTemplateSpecializationInfo *Info =
DependentFunctionTemplateSpecializationInfo::Create(Context, Templates,
@@ -3847,28 +4185,26 @@ FunctionDecl::getDependentSpecializationInfo() const {
DependentFunctionTemplateSpecializationInfo *
DependentFunctionTemplateSpecializationInfo::Create(
- ASTContext &Context, const UnresolvedSetImpl &Ts,
- const TemplateArgumentListInfo &TArgs) {
- void *Buffer = Context.Allocate(
- totalSizeToAlloc<TemplateArgumentLoc, FunctionTemplateDecl *>(
- TArgs.size(), Ts.size()));
- return new (Buffer) DependentFunctionTemplateSpecializationInfo(Ts, TArgs);
+ ASTContext &Context, const UnresolvedSetImpl &Candidates,
+ const TemplateArgumentListInfo *TArgs) {
+ const auto *TArgsWritten =
+ TArgs ? ASTTemplateArgumentListInfo::Create(Context, *TArgs) : nullptr;
+ return new (Context.Allocate(
+ totalSizeToAlloc<FunctionTemplateDecl *>(Candidates.size())))
+ DependentFunctionTemplateSpecializationInfo(Candidates, TArgsWritten);
}
DependentFunctionTemplateSpecializationInfo::
-DependentFunctionTemplateSpecializationInfo(const UnresolvedSetImpl &Ts,
- const TemplateArgumentListInfo &TArgs)
- : AngleLocs(TArgs.getLAngleLoc(), TArgs.getRAngleLoc()) {
- NumTemplates = Ts.size();
- NumArgs = TArgs.size();
-
- FunctionTemplateDecl **TsArray = getTrailingObjects<FunctionTemplateDecl *>();
- for (unsigned I = 0, E = Ts.size(); I != E; ++I)
- TsArray[I] = cast<FunctionTemplateDecl>(Ts[I]->getUnderlyingDecl());
-
- TemplateArgumentLoc *ArgsArray = getTrailingObjects<TemplateArgumentLoc>();
- for (unsigned I = 0, E = TArgs.size(); I != E; ++I)
- new (&ArgsArray[I]) TemplateArgumentLoc(TArgs[I]);
+ DependentFunctionTemplateSpecializationInfo(
+ const UnresolvedSetImpl &Candidates,
+ const ASTTemplateArgumentListInfo *TemplateArgsWritten)
+ : NumCandidates(Candidates.size()),
+ TemplateArgumentsAsWritten(TemplateArgsWritten) {
+ std::transform(Candidates.begin(), Candidates.end(),
+ getTrailingObjects<FunctionTemplateDecl *>(),
+ [](NamedDecl *ND) {
+ return cast<FunctionTemplateDecl>(ND->getUnderlyingDecl());
+ });
}
TemplateSpecializationKind FunctionDecl::getTemplateSpecializationKind() const {
@@ -3883,6 +4219,13 @@ TemplateSpecializationKind FunctionDecl::getTemplateSpecializationKind() const {
TemplateOrSpecialization.dyn_cast<MemberSpecializationInfo *>())
return MSInfo->getTemplateSpecializationKind();
+ // A dependent function template specialization is an explicit specialization,
+ // except when it's a friend declaration.
+ if (TemplateOrSpecialization
+ .is<DependentFunctionTemplateSpecializationInfo *>() &&
+ getFriendObjectKind() == FOK_None)
+ return TSK_ExplicitSpecialization;
+
return TSK_Undeclared;
}
@@ -3897,6 +4240,11 @@ FunctionDecl::getTemplateSpecializationKindForInstantiation() const {
// template<> void f<int>() {}
// };
//
+ // Within the templated CXXRecordDecl, A<T>::f<int> is a dependent function
+ // template specialization; both getTemplateSpecializationKind() and
+ // getTemplateSpecializationKindForInstantiation() will return
+ // TSK_ExplicitSpecialization.
+ //
// For A<int>::f<int>():
// * getTemplateSpecializationKind() will return TSK_ExplicitSpecialization
// * getTemplateSpecializationKindForInstantiation() will return
@@ -3917,6 +4265,11 @@ FunctionDecl::getTemplateSpecializationKindForInstantiation() const {
TemplateOrSpecialization.dyn_cast<MemberSpecializationInfo *>())
return MSInfo->getTemplateSpecializationKind();
+ if (TemplateOrSpecialization
+ .is<DependentFunctionTemplateSpecializationInfo *>() &&
+ getFriendObjectKind() == FOK_None)
+ return TSK_ExplicitSpecialization;
+
return TSK_Undeclared;
}
@@ -4061,6 +4414,10 @@ unsigned FunctionDecl::getMemoryFunctionKind() const {
case Builtin::BIbzero:
return Builtin::BIbzero;
+ case Builtin::BI__builtin_bcopy:
+ case Builtin::BIbcopy:
+ return Builtin::BIbcopy;
+
case Builtin::BIfree:
return Builtin::BIfree;
@@ -4092,6 +4449,8 @@ unsigned FunctionDecl::getMemoryFunctionKind() const {
return Builtin::BIstrlen;
if (FnInfo->isStr("bzero"))
return Builtin::BIbzero;
+ if (FnInfo->isStr("bcopy"))
+ return Builtin::BIbcopy;
} else if (isInStdNamespace()) {
if (FnInfo->isStr("free"))
return Builtin::BIfree;
@@ -4117,7 +4476,7 @@ unsigned FunctionDecl::getODRHash() {
}
class ODRHash Hash;
- Hash.AddFunctionDecl(this);
+ Hash.AddFunctionDecl(this, /*SkipBody=*/shouldSkipCheckingODR());
setHasODRHash(true);
ODRHash = Hash.CalculateHash();
return ODRHash;
@@ -4152,6 +4511,28 @@ bool FieldDecl::isAnonymousStructOrUnion() const {
return false;
}
+Expr *FieldDecl::getInClassInitializer() const {
+ if (!hasInClassInitializer())
+ return nullptr;
+
+ LazyDeclStmtPtr InitPtr = BitField ? InitAndBitWidth->Init : Init;
+ return cast_if_present<Expr>(
+ InitPtr.isOffset() ? InitPtr.get(getASTContext().getExternalSource())
+ : InitPtr.get(nullptr));
+}
+
+void FieldDecl::setInClassInitializer(Expr *NewInit) {
+ setLazyInClassInitializer(LazyDeclStmtPtr(NewInit));
+}
+
+void FieldDecl::setLazyInClassInitializer(LazyDeclStmtPtr NewInit) {
+ assert(hasInClassInitializer() && !getInClassInitializer());
+ if (BitField)
+ InitAndBitWidth->Init = NewInit;
+ else
+ Init = NewInit;
+}
+
unsigned FieldDecl::getBitWidthValue(const ASTContext &Ctx) const {
assert(isBitField() && "not a bitfield");
return getBitWidth()->EvaluateKnownConstInt(Ctx).getZExtValue();
@@ -4190,9 +4571,18 @@ bool FieldDecl::isZeroSize(const ASTContext &Ctx) const {
// Otherwise, [...] the circumstances under which the object has zero size
// are implementation-defined.
- // FIXME: This might be Itanium ABI specific; we don't yet know what the MS
- // ABI will do.
- return true;
+ if (!Ctx.getTargetInfo().getCXXABI().isMicrosoft())
+ return true;
+
+ // MS ABI: has nonzero size if it is a class type with class type fields,
+ // whether or not they have nonzero size
+ return !llvm::any_of(CXXRD->fields(), [](const FieldDecl *Field) {
+ return Field->getType()->getAs<RecordType>();
+ });
+}
+
+bool FieldDecl::isPotentiallyOverlapping() const {
+ return hasAttr<NoUniqueAddressAttr>() && getType()->getAsCXXRecordDecl();
}
unsigned FieldDecl::getFieldIndex() const {
@@ -4208,6 +4598,8 @@ unsigned FieldDecl::getFieldIndex() const {
for (auto *Field : RD->fields()) {
Field->getCanonicalDecl()->CachedFieldIndex = Index + 1;
+ assert(Field->getCanonicalDecl()->CachedFieldIndex == Index + 1 &&
+ "overflow in field numbering");
++Index;
}
@@ -4227,11 +4619,21 @@ SourceRange FieldDecl::getSourceRange() const {
void FieldDecl::setCapturedVLAType(const VariableArrayType *VLAType) {
assert((getParent()->isLambda() || getParent()->isCapturedRecord()) &&
"capturing type in non-lambda or captured record.");
- assert(InitStorage.getInt() == ISK_NoInit &&
- InitStorage.getPointer() == nullptr &&
- "bit width, initializer or captured type already set");
- InitStorage.setPointerAndInt(const_cast<VariableArrayType *>(VLAType),
- ISK_CapturedVLAType);
+ assert(StorageKind == ISK_NoInit && !BitField &&
+ "bit-field or field with default member initializer cannot capture "
+ "VLA type");
+ StorageKind = ISK_CapturedVLAType;
+ CapturedVLAType = VLAType;
+}
+
+void FieldDecl::printName(raw_ostream &OS, const PrintingPolicy &Policy) const {
+ // Print unnamed members using name of their type.
+ if (isAnonymousStructOrUnion()) {
+ this->getType().print(OS, Policy);
+ return;
+ }
+ // Otherwise, do the normal printing.
+ DeclaratorDecl::printName(OS, Policy);
}
//===----------------------------------------------------------------------===//
@@ -4243,8 +4645,8 @@ TagDecl::TagDecl(Kind DK, TagKind TK, const ASTContext &C, DeclContext *DC,
SourceLocation StartL)
: TypeDecl(DK, DC, L, Id, StartL), DeclContext(DK), redeclarable_base(C),
TypedefNameDeclOrQualifier((TypedefNameDecl *)nullptr) {
- assert((DK != Enum || TK == TTK_Enum) &&
- "EnumDecl not matched with TTK_Enum");
+ assert((DK != Enum || TK == TagTypeKind::Enum) &&
+ "EnumDecl not matched with TagTypeKind::Enum");
setPreviousDecl(PrevDecl);
setTagKind(TK);
setCompleteDefinition(false);
@@ -4252,6 +4654,7 @@ TagDecl::TagDecl(Kind DK, TagKind TK, const ASTContext &C, DeclContext *DC,
setEmbeddedInDeclarator(false);
setFreeStanding(false);
setCompleteDefinitionRequired(false);
+ TagDeclBits.IsThisDeclarationADemotedDefinition = false;
}
SourceLocation TagDecl::getOuterLocStart() const {
@@ -4281,7 +4684,7 @@ void TagDecl::startDefinition() {
if (auto *D = dyn_cast<CXXRecordDecl>(this)) {
struct CXXRecordDecl::DefinitionData *Data =
new (getASTContext()) struct CXXRecordDecl::DefinitionData(D);
- for (auto I : redecls())
+ for (auto *I : redecls())
cast<CXXRecordDecl>(I)->DefinitionData = Data;
}
}
@@ -4314,7 +4717,7 @@ TagDecl *TagDecl::getDefinition() const {
if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(this))
return CXXRD->getDefinition();
- for (auto R : redecls())
+ for (auto *R : redecls())
if (R->isCompleteDefinition())
return R;
@@ -4341,6 +4744,23 @@ void TagDecl::setQualifierInfo(NestedNameSpecifierLoc QualifierLoc) {
}
}
+void TagDecl::printName(raw_ostream &OS, const PrintingPolicy &Policy) const {
+ DeclarationName Name = getDeclName();
+ // If the name is supposed to have an identifier but does not have one, then
+ // the tag is anonymous and we should print it differently.
+ if (Name.isIdentifier() && !Name.getAsIdentifierInfo()) {
+ // If the caller wanted to print a qualified name, they've already printed
+ // the scope. And if the caller doesn't want that, the scope information
+ // is already printed as part of the type.
+ PrintingPolicy Copy(Policy);
+ Copy.SuppressScope = true;
+ getASTContext().getTagDeclType(this).print(OS, Copy);
+ return;
+ }
+ // Otherwise, do the normal printing.
+ Name.print(OS, Policy);
+}
+
void TagDecl::setTemplateParameterListsInfo(
ASTContext &Context, ArrayRef<TemplateParameterList *> TPLists) {
assert(!TPLists.empty());
@@ -4359,7 +4779,7 @@ void TagDecl::setTemplateParameterListsInfo(
EnumDecl::EnumDecl(ASTContext &C, DeclContext *DC, SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id, EnumDecl *PrevDecl,
bool Scoped, bool ScopedUsingClassTag, bool Fixed)
- : TagDecl(Enum, TTK_Enum, C, DC, IdLoc, Id, PrevDecl, StartLoc) {
+ : TagDecl(Enum, TagTypeKind::Enum, C, DC, IdLoc, Id, PrevDecl, StartLoc) {
assert(Scoped || !ScopedUsingClassTag);
IntegerType = nullptr;
setNumPositiveBits(0);
@@ -4483,6 +4903,32 @@ unsigned EnumDecl::getODRHash() {
return ODRHash;
}
+SourceRange EnumDecl::getSourceRange() const {
+ auto Res = TagDecl::getSourceRange();
+ // Set end-point to enum-base, e.g. enum foo : ^bar
+ if (auto *TSI = getIntegerTypeSourceInfo()) {
+ // TagDecl doesn't know about the enum base.
+ if (!getBraceRange().getEnd().isValid())
+ Res.setEnd(TSI->getTypeLoc().getEndLoc());
+ }
+ return Res;
+}
+
+void EnumDecl::getValueRange(llvm::APInt &Max, llvm::APInt &Min) const {
+ unsigned Bitwidth = getASTContext().getIntWidth(getIntegerType());
+ unsigned NumNegativeBits = getNumNegativeBits();
+ unsigned NumPositiveBits = getNumPositiveBits();
+
+ if (NumNegativeBits) {
+ unsigned NumBits = std::max(NumNegativeBits, NumPositiveBits + 1);
+ Max = llvm::APInt(Bitwidth, 1) << (NumBits - 1);
+ Min = -Max;
+ } else {
+ Max = llvm::APInt(Bitwidth, 1) << NumPositiveBits;
+ Min = llvm::APInt::getZero(Bitwidth);
+ }
+}
+
//===----------------------------------------------------------------------===//
// RecordDecl Implementation
//===----------------------------------------------------------------------===//
@@ -4505,7 +4951,9 @@ RecordDecl::RecordDecl(Kind DK, TagKind TK, const ASTContext &C,
setHasNonTrivialToPrimitiveDestructCUnion(false);
setHasNonTrivialToPrimitiveCopyCUnion(false);
setParamDestroyedInCallee(false);
- setArgPassingRestrictions(APK_CanPassInRegs);
+ setArgPassingRestrictions(RecordArgPassingKind::CanPassInRegs);
+ setIsRandomized(false);
+ setODRHash(0);
}
RecordDecl *RecordDecl::Create(const ASTContext &C, TagKind TK, DeclContext *DC,
@@ -4520,9 +4968,9 @@ RecordDecl *RecordDecl::Create(const ASTContext &C, TagKind TK, DeclContext *DC,
}
RecordDecl *RecordDecl::CreateDeserialized(const ASTContext &C, unsigned ID) {
- RecordDecl *R =
- new (C, ID) RecordDecl(Record, TTK_Struct, C, nullptr, SourceLocation(),
- SourceLocation(), nullptr, nullptr);
+ RecordDecl *R = new (C, ID)
+ RecordDecl(Record, TagTypeKind::Struct, C, nullptr, SourceLocation(),
+ SourceLocation(), nullptr, nullptr);
R->setMayHaveOutOfDateDef(C.getLangOpts().Modules);
return R;
}
@@ -4564,7 +5012,10 @@ bool RecordDecl::isOrContainsUnion() const {
RecordDecl::field_iterator RecordDecl::field_begin() const {
if (hasExternalLexicalStorage() && !hasLoadedFieldsFromExternalStorage())
LoadFieldsFromExternalStorage();
-
+ // This is necessary for correctness for C++ with modules.
+ // FIXME: Come up with a test case that breaks without definition.
+ if (RecordDecl *D = getDefinition(); D && D != this)
+ return D->field_begin();
return field_iterator(decl_iterator(FirstDecl));
}
@@ -4589,6 +5040,12 @@ bool RecordDecl::isMsStruct(const ASTContext &C) const {
return hasAttr<MSStructAttr>() || C.getLangOpts().MSBitfields == 1;
}
+void RecordDecl::reorderDecls(const SmallVectorImpl<Decl *> &Decls) {
+ std::tie(FirstDecl, LastDecl) = DeclContext::BuildDeclChain(Decls, false);
+ LastDecl->NextInContextAndBits.setPointer(nullptr);
+ setIsRandomized(true);
+}
+
void RecordDecl::LoadFieldsFromExternalStorage() const {
ExternalASTSource *Source = getASTContext().getExternalSource();
assert(hasExternalLexicalStorage() && Source && "No external storage?");
@@ -4611,8 +5068,13 @@ void RecordDecl::LoadFieldsFromExternalStorage() const {
if (Decls.empty())
return;
- std::tie(FirstDecl, LastDecl) = BuildDeclChain(Decls,
- /*FieldsAlreadyLoaded=*/false);
+ auto [ExternalFirst, ExternalLast] =
+ BuildDeclChain(Decls,
+ /*FieldsAlreadyLoaded=*/false);
+ ExternalLast->NextInContextAndBits.setPointer(FirstDecl);
+ FirstDecl = ExternalFirst;
+ if (!LastDecl)
+ LastDecl = ExternalLast;
}
bool RecordDecl::mayInsertExtraPadding(bool EmitRemark) const {
@@ -4674,6 +5136,19 @@ const FieldDecl *RecordDecl::findFirstNamedDataMember() const {
return nullptr;
}
+unsigned RecordDecl::getODRHash() {
+ if (hasODRHash())
+ return RecordDeclBits.ODRHash;
+
+ // Only calculate hash on first call of getODRHash per record.
+ ODRHash Hash;
+ Hash.AddRecordDecl(this);
+ // For RecordDecl the ODRHash is stored in the remaining 26
+ // bit of RecordDeclBits, adjust the hash to accomodate.
+ setODRHash(Hash.CalculateHash() >> 6);
+ return RecordDeclBits.ODRHash;
+}
+
//===----------------------------------------------------------------------===//
// BlockDecl Implementation
//===----------------------------------------------------------------------===//
@@ -4823,6 +5298,12 @@ bool ValueDecl::isWeak() const {
MostRecent->hasAttr<WeakRefAttr>() || isWeakImported();
}
+bool ValueDecl::isInitCapture() const {
+ if (auto *Var = llvm::dyn_cast<VarDecl>(this))
+ return Var->isInitCapture();
+ return false;
+}
+
void ImplicitParamDecl::anchor() {}
ImplicitParamDecl *ImplicitParamDecl::Create(ASTContext &C, DeclContext *DC,
@@ -4842,18 +5323,16 @@ ImplicitParamDecl *ImplicitParamDecl::CreateDeserialized(ASTContext &C,
return new (C, ID) ImplicitParamDecl(C, QualType(), ImplicitParamKind::Other);
}
-FunctionDecl *FunctionDecl::Create(ASTContext &C, DeclContext *DC,
- SourceLocation StartLoc,
- const DeclarationNameInfo &NameInfo,
- QualType T, TypeSourceInfo *TInfo,
- StorageClass SC, bool isInlineSpecified,
- bool hasWrittenPrototype,
- ConstexprSpecKind ConstexprKind,
- Expr *TrailingRequiresClause) {
- FunctionDecl *New =
- new (C, DC) FunctionDecl(Function, C, DC, StartLoc, NameInfo, T, TInfo,
- SC, isInlineSpecified, ConstexprKind,
- TrailingRequiresClause);
+FunctionDecl *
+FunctionDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation StartLoc,
+ const DeclarationNameInfo &NameInfo, QualType T,
+ TypeSourceInfo *TInfo, StorageClass SC, bool UsesFPIntrin,
+ bool isInlineSpecified, bool hasWrittenPrototype,
+ ConstexprSpecKind ConstexprKind,
+ Expr *TrailingRequiresClause) {
+ FunctionDecl *New = new (C, DC) FunctionDecl(
+ Function, C, DC, StartLoc, NameInfo, T, TInfo, SC, UsesFPIntrin,
+ isInlineSpecified, ConstexprKind, TrailingRequiresClause);
New->setHasWrittenPrototype(hasWrittenPrototype);
return New;
}
@@ -4861,7 +5340,7 @@ FunctionDecl *FunctionDecl::Create(ASTContext &C, DeclContext *DC,
FunctionDecl *FunctionDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
return new (C, ID) FunctionDecl(
Function, C, nullptr, SourceLocation(), DeclarationNameInfo(), QualType(),
- nullptr, SC_None, false, ConstexprSpecKind::Unspecified, nullptr);
+ nullptr, SC_None, false, false, ConstexprSpecKind::Unspecified, nullptr);
}
BlockDecl *BlockDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L) {
@@ -4894,16 +5373,23 @@ void CapturedDecl::setBody(Stmt *B) { BodyAndNothrow.setPointer(B); }
bool CapturedDecl::isNothrow() const { return BodyAndNothrow.getInt(); }
void CapturedDecl::setNothrow(bool Nothrow) { BodyAndNothrow.setInt(Nothrow); }
+EnumConstantDecl::EnumConstantDecl(const ASTContext &C, DeclContext *DC,
+ SourceLocation L, IdentifierInfo *Id,
+ QualType T, Expr *E, const llvm::APSInt &V)
+ : ValueDecl(EnumConstant, DC, L, Id, T), Init((Stmt *)E) {
+ setInitVal(C, V);
+}
+
EnumConstantDecl *EnumConstantDecl::Create(ASTContext &C, EnumDecl *CD,
SourceLocation L,
IdentifierInfo *Id, QualType T,
Expr *E, const llvm::APSInt &V) {
- return new (C, CD) EnumConstantDecl(CD, L, Id, T, E, V);
+ return new (C, CD) EnumConstantDecl(C, CD, L, Id, T, E, V);
}
EnumConstantDecl *
EnumConstantDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
- return new (C, ID) EnumConstantDecl(nullptr, SourceLocation(), nullptr,
+ return new (C, ID) EnumConstantDecl(C, nullptr, SourceLocation(), nullptr,
QualType(), nullptr, llvm::APSInt());
}
@@ -4930,8 +5416,9 @@ IndirectFieldDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L,
IndirectFieldDecl *IndirectFieldDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
- return new (C, ID) IndirectFieldDecl(C, nullptr, SourceLocation(),
- DeclarationName(), QualType(), None);
+ return new (C, ID)
+ IndirectFieldDecl(C, nullptr, SourceLocation(), DeclarationName(),
+ QualType(), std::nullopt);
}
SourceRange EnumConstantDecl::getSourceRange() const {
@@ -5036,6 +5523,29 @@ FileScopeAsmDecl *FileScopeAsmDecl::CreateDeserialized(ASTContext &C,
SourceLocation());
}
+void TopLevelStmtDecl::anchor() {}
+
+TopLevelStmtDecl *TopLevelStmtDecl::Create(ASTContext &C, Stmt *Statement) {
+ assert(Statement);
+ assert(C.getLangOpts().IncrementalExtensions &&
+ "Must be used only in incremental mode");
+
+ SourceLocation BeginLoc = Statement->getBeginLoc();
+ DeclContext *DC = C.getTranslationUnitDecl();
+
+ return new (C, DC) TopLevelStmtDecl(DC, BeginLoc, Statement);
+}
+
+TopLevelStmtDecl *TopLevelStmtDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ return new (C, ID)
+ TopLevelStmtDecl(/*DC=*/nullptr, SourceLocation(), /*S=*/nullptr);
+}
+
+SourceRange TopLevelStmtDecl::getSourceRange() const {
+ return SourceRange(getLocation(), Statement->getEndLoc());
+}
+
void EmptyDecl::anchor() {}
EmptyDecl *EmptyDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L) {
@@ -5046,6 +5556,40 @@ EmptyDecl *EmptyDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
return new (C, ID) EmptyDecl(nullptr, SourceLocation());
}
+HLSLBufferDecl::HLSLBufferDecl(DeclContext *DC, bool CBuffer,
+ SourceLocation KwLoc, IdentifierInfo *ID,
+ SourceLocation IDLoc, SourceLocation LBrace)
+ : NamedDecl(Decl::Kind::HLSLBuffer, DC, IDLoc, DeclarationName(ID)),
+ DeclContext(Decl::Kind::HLSLBuffer), LBraceLoc(LBrace), KwLoc(KwLoc),
+ IsCBuffer(CBuffer) {}
+
+HLSLBufferDecl *HLSLBufferDecl::Create(ASTContext &C,
+ DeclContext *LexicalParent, bool CBuffer,
+ SourceLocation KwLoc, IdentifierInfo *ID,
+ SourceLocation IDLoc,
+ SourceLocation LBrace) {
+ // For hlsl like this
+ // cbuffer A {
+ // cbuffer B {
+ // }
+ // }
+ // compiler should treat it as
+ // cbuffer A {
+ // }
+ // cbuffer B {
+ // }
+ // FIXME: support nested buffers if required for back-compat.
+ DeclContext *DC = LexicalParent;
+ HLSLBufferDecl *Result =
+ new (C, DC) HLSLBufferDecl(DC, CBuffer, KwLoc, ID, IDLoc, LBrace);
+ return Result;
+}
+
+HLSLBufferDecl *HLSLBufferDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ return new (C, ID) HLSLBufferDecl(nullptr, false, SourceLocation(), nullptr,
+ SourceLocation(), SourceLocation());
+}
+
//===----------------------------------------------------------------------===//
// ImportDecl Implementation
//===----------------------------------------------------------------------===//
@@ -5105,11 +5649,11 @@ ImportDecl *ImportDecl::CreateDeserialized(ASTContext &C, unsigned ID,
ArrayRef<SourceLocation> ImportDecl::getIdentifierLocs() const {
if (!isImportComplete())
- return None;
+ return std::nullopt;
const auto *StoredLocs = getTrailingObjects<SourceLocation>();
- return llvm::makeArrayRef(StoredLocs,
- getNumModuleIdentifiers(getImportedModule()));
+ return llvm::ArrayRef(StoredLocs,
+ getNumModuleIdentifiers(getImportedModule()));
}
SourceRange ImportDecl::getSourceRange() const {
diff --git a/contrib/llvm-project/clang/lib/AST/DeclBase.cpp b/contrib/llvm-project/clang/lib/AST/DeclBase.cpp
index 3467da2b549e..6b3c13ff206d 100644
--- a/contrib/llvm-project/clang/lib/AST/DeclBase.cpp
+++ b/contrib/llvm-project/clang/lib/AST/DeclBase.cpp
@@ -29,7 +29,7 @@
#include "clang/AST/Type.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
-#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/Module.h"
#include "clang/Basic/ObjCRuntime.h"
#include "clang/Basic/PartialDiagnostic.h"
#include "clang/Basic/SourceLocation.h"
@@ -152,6 +152,15 @@ void Decl::setInvalidDecl(bool Invalid) {
}
}
+bool DeclContext::hasValidDeclKind() const {
+ switch (getDeclKind()) {
+#define DECL(DERIVED, BASE) case Decl::DERIVED: return true;
+#define ABSTRACT_DECL(DECL)
+#include "clang/AST/DeclNodes.inc"
+ }
+ return false;
+}
+
const char *DeclContext::getDeclKindName() const {
switch (getDeclKind()) {
#define DECL(DERIVED, BASE) case Decl::DERIVED: return #DERIVED;
@@ -252,12 +261,12 @@ const TemplateParameterList *Decl::getDescribedTemplateParams() const {
bool Decl::isTemplated() const {
// A declaration is templated if it is a template or a template pattern, or
- // is within (lexcially for a friend, semantically otherwise) a dependent
- // context.
- // FIXME: Should local extern declarations be treated like friends?
+ // is within (lexcially for a friend or local function declaration,
+ // semantically otherwise) a dependent context.
if (auto *AsDC = dyn_cast<DeclContext>(this))
return AsDC->isDependentContext();
- auto *DC = getFriendObjectKind() ? getLexicalDeclContext() : getDeclContext();
+ auto *DC = getFriendObjectKind() || isLocalExternDecl()
+ ? getLexicalDeclContext() : getDeclContext();
return DC->isDependentContext() || isTemplateDecl() ||
getDescribedTemplateParams();
}
@@ -283,10 +292,10 @@ unsigned Decl::getTemplateDepth() const {
return cast<Decl>(DC)->getTemplateDepth();
}
-const DeclContext *Decl::getParentFunctionOrMethod() const {
- for (const DeclContext *DC = getDeclContext();
- DC && !DC->isTranslationUnit() && !DC->isNamespace();
- DC = DC->getParent())
+const DeclContext *Decl::getParentFunctionOrMethod(bool LexicalParent) const {
+ for (const DeclContext *DC = LexicalParent ? getLexicalDeclContext()
+ : getDeclContext();
+ DC && !DC->isFileContext(); DC = DC->getParent())
if (DC->isFunctionOrMethod())
return DC;
@@ -396,6 +405,84 @@ bool Decl::isInStdNamespace() const {
return DC && DC->isStdNamespace();
}
+bool Decl::isFileContextDecl() const {
+ const auto *DC = dyn_cast<DeclContext>(this);
+ return DC && DC->isFileContext();
+}
+
+bool Decl::isFlexibleArrayMemberLike(
+ ASTContext &Ctx, const Decl *D, QualType Ty,
+ LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel,
+ bool IgnoreTemplateOrMacroSubstitution) {
+ // For compatibility with existing code, we treat arrays of length 0 or
+ // 1 as flexible array members.
+ const auto *CAT = Ctx.getAsConstantArrayType(Ty);
+ if (CAT) {
+ using FAMKind = LangOptions::StrictFlexArraysLevelKind;
+
+ llvm::APInt Size = CAT->getSize();
+ if (StrictFlexArraysLevel == FAMKind::IncompleteOnly)
+ return false;
+
+ // GCC extension, only allowed to represent a FAM.
+ if (Size.isZero())
+ return true;
+
+ if (StrictFlexArraysLevel == FAMKind::ZeroOrIncomplete && Size.uge(1))
+ return false;
+
+ if (StrictFlexArraysLevel == FAMKind::OneZeroOrIncomplete && Size.uge(2))
+ return false;
+ } else if (!Ctx.getAsIncompleteArrayType(Ty)) {
+ return false;
+ }
+
+ if (const auto *OID = dyn_cast_if_present<ObjCIvarDecl>(D))
+ return OID->getNextIvar() == nullptr;
+
+ const auto *FD = dyn_cast_if_present<FieldDecl>(D);
+ if (!FD)
+ return false;
+
+ if (CAT) {
+ // GCC treats an array memeber of a union as an FAM if the size is one or
+ // zero.
+ llvm::APInt Size = CAT->getSize();
+ if (FD->getParent()->isUnion() && (Size.isZero() || Size.isOne()))
+ return true;
+ }
+
+ // Don't consider sizes resulting from macro expansions or template argument
+ // substitution to form C89 tail-padded arrays.
+ if (IgnoreTemplateOrMacroSubstitution) {
+ TypeSourceInfo *TInfo = FD->getTypeSourceInfo();
+ while (TInfo) {
+ TypeLoc TL = TInfo->getTypeLoc();
+
+ // Look through typedefs.
+ if (TypedefTypeLoc TTL = TL.getAsAdjusted<TypedefTypeLoc>()) {
+ const TypedefNameDecl *TDL = TTL.getTypedefNameDecl();
+ TInfo = TDL->getTypeSourceInfo();
+ continue;
+ }
+
+ if (auto CTL = TL.getAs<ConstantArrayTypeLoc>()) {
+ if (const Expr *SizeExpr =
+ dyn_cast_if_present<IntegerLiteral>(CTL.getSizeExpr());
+ !SizeExpr || SizeExpr->getExprLoc().isMacroID())
+ return false;
+ }
+
+ break;
+ }
+ }
+
+ // Test that the field is the last in the structure.
+ RecordDecl::field_iterator FI(
+ DeclContext::decl_iterator(const_cast<FieldDecl *>(FD)));
+ return ++FI == FD->getParent()->field_end();
+}
+
TranslationUnitDecl *Decl::getTranslationUnitDecl() {
if (auto *TUD = dyn_cast<TranslationUnitDecl>(this))
return TUD;
@@ -749,6 +836,7 @@ unsigned Decl::getIdentifierNamespaceForKind(Kind DeclKind) {
case ObjCMethod:
case ObjCProperty:
case MSProperty:
+ case HLSLBuffer:
return IDNS_Ordinary;
case Label:
return IDNS_Label;
@@ -828,6 +916,7 @@ unsigned Decl::getIdentifierNamespaceForKind(Kind DeclKind) {
case LinkageSpec:
case Export:
case FileScopeAsm:
+ case TopLevelStmt:
case StaticAssert:
case ObjCPropertyImpl:
case PragmaComment:
@@ -838,13 +927,13 @@ unsigned Decl::getIdentifierNamespaceForKind(Kind DeclKind) {
case ExternCContext:
case Decomposition:
case MSGuid:
+ case UnnamedGlobalConstant:
case TemplateParamObject:
case UsingDirective:
case BuiltinTemplate:
case ClassTemplateSpecialization:
case ClassTemplatePartialSpecialization:
- case ClassScopeFunctionSpecialization:
case VarTemplateSpecialization:
case VarTemplatePartialSpecialization:
case ObjCImplementation:
@@ -858,6 +947,7 @@ unsigned Decl::getIdentifierNamespaceForKind(Kind DeclKind) {
case Empty:
case LifetimeExtendedTemporary:
case RequiresExprBody:
+ case ImplicitConceptSpecialization:
// Never looked up by name.
return 0;
}
@@ -912,20 +1002,14 @@ const AttrVec &Decl::getAttrs() const {
Decl *Decl::castFromDeclContext (const DeclContext *D) {
Decl::Kind DK = D->getDeclKind();
- switch(DK) {
+ switch (DK) {
#define DECL(NAME, BASE)
-#define DECL_CONTEXT(NAME) \
- case Decl::NAME: \
- return static_cast<NAME##Decl *>(const_cast<DeclContext *>(D));
-#define DECL_CONTEXT_BASE(NAME)
+#define DECL_CONTEXT(NAME) \
+ case Decl::NAME: \
+ return static_cast<NAME##Decl *>(const_cast<DeclContext *>(D));
#include "clang/AST/DeclNodes.inc"
- default:
-#define DECL(NAME, BASE)
-#define DECL_CONTEXT_BASE(NAME) \
- if (DK >= first##NAME && DK <= last##NAME) \
- return static_cast<NAME##Decl *>(const_cast<DeclContext *>(D));
-#include "clang/AST/DeclNodes.inc"
- llvm_unreachable("a decl that inherits DeclContext isn't handled");
+ default:
+ llvm_unreachable("a decl that inherits DeclContext isn't handled");
}
}
@@ -933,18 +1017,12 @@ DeclContext *Decl::castToDeclContext(const Decl *D) {
Decl::Kind DK = D->getKind();
switch(DK) {
#define DECL(NAME, BASE)
-#define DECL_CONTEXT(NAME) \
- case Decl::NAME: \
- return static_cast<NAME##Decl *>(const_cast<Decl *>(D));
-#define DECL_CONTEXT_BASE(NAME)
+#define DECL_CONTEXT(NAME) \
+ case Decl::NAME: \
+ return static_cast<NAME##Decl *>(const_cast<Decl *>(D));
#include "clang/AST/DeclNodes.inc"
- default:
-#define DECL(NAME, BASE)
-#define DECL_CONTEXT_BASE(NAME) \
- if (DK >= first##NAME && DK <= last##NAME) \
- return static_cast<NAME##Decl *>(const_cast<Decl *>(D));
-#include "clang/AST/DeclNodes.inc"
- llvm_unreachable("a decl that inherits DeclContext isn't handled");
+ default:
+ llvm_unreachable("a decl that inherits DeclContext isn't handled");
}
}
@@ -964,7 +1042,7 @@ SourceLocation Decl::getBodyRBrace() const {
return {};
}
-bool Decl::AccessDeclContextSanity() const {
+bool Decl::AccessDeclContextCheck() const {
#ifndef NDEBUG
// Suppress this check if any of the following hold:
// 1. this is the translation unit (and thus has no parent)
@@ -984,9 +1062,7 @@ bool Decl::AccessDeclContextSanity() const {
isa<ParmVarDecl>(this) ||
// FIXME: a ClassTemplateSpecialization or CXXRecordDecl can have
// AS_none as access specifier.
- isa<CXXRecordDecl>(this) ||
- isa<ClassScopeFunctionSpecializationDecl>(this) ||
- isa<LifetimeExtendedTemporaryDecl>(this))
+ isa<CXXRecordDecl>(this) || isa<LifetimeExtendedTemporaryDecl>(this))
return true;
assert(Access != AS_none &&
@@ -995,6 +1071,42 @@ bool Decl::AccessDeclContextSanity() const {
return true;
}
+bool Decl::isInExportDeclContext() const {
+ const DeclContext *DC = getLexicalDeclContext();
+
+ while (DC && !isa<ExportDecl>(DC))
+ DC = DC->getLexicalParent();
+
+ return DC && isa<ExportDecl>(DC);
+}
+
+bool Decl::isInAnotherModuleUnit() const {
+ auto *M = getOwningModule();
+
+ if (!M)
+ return false;
+
+ M = M->getTopLevelModule();
+ // FIXME: It is problematic if the header module lives in another module
+ // unit. Consider to fix this by techniques like
+ // ExternalASTSource::hasExternalDefinitions.
+ if (M->isHeaderLikeModule())
+ return false;
+
+ // A global module without parent implies that we're parsing the global
+ // module. So it can't be in another module unit.
+ if (M->isGlobalModule())
+ return false;
+
+ assert(M->isNamedModule() && "New module kind?");
+ return M != getASTContext().getCurrentNamedModule();
+}
+
+bool Decl::shouldSkipCheckingODR() const {
+ return getASTContext().getLangOpts().SkipODRCheckInGMF && getOwningModule() &&
+ getOwningModule()->isExplicitGlobalModule();
+}
+
static Decl::Kind getKind(const Decl *D) { return D->getKind(); }
static Decl::Kind getKind(const DeclContext *DC) { return DC->getDeclKind(); }
@@ -1021,6 +1133,23 @@ const FunctionType *Decl::getFunctionType(bool BlocksToo) const {
return Ty->getAs<FunctionType>();
}
+bool Decl::isFunctionPointerType() const {
+ QualType Ty;
+ if (const auto *D = dyn_cast<ValueDecl>(this))
+ Ty = D->getType();
+ else if (const auto *D = dyn_cast<TypedefNameDecl>(this))
+ Ty = D->getUnderlyingType();
+ else
+ return false;
+
+ return Ty.getCanonicalType()->isFunctionPointerType();
+}
+
+DeclContext *Decl::getNonTransparentDeclContext() {
+ assert(getDeclContext());
+ return getDeclContext()->getNonTransparentContext();
+}
+
/// Starting at a given context (a Decl or DeclContext), look for a
/// code context that is not a closure (a lambda, block, etc.).
template <class T> static Decl *getNonClosureContext(T *D) {
@@ -1065,20 +1194,14 @@ DeclContext::DeclContext(Decl::Kind K) {
}
bool DeclContext::classof(const Decl *D) {
- switch (D->getKind()) {
+ Decl::Kind DK = D->getKind();
+ switch (DK) {
#define DECL(NAME, BASE)
#define DECL_CONTEXT(NAME) case Decl::NAME:
-#define DECL_CONTEXT_BASE(NAME)
-#include "clang/AST/DeclNodes.inc"
- return true;
- default:
-#define DECL(NAME, BASE)
-#define DECL_CONTEXT_BASE(NAME) \
- if (D->getKind() >= Decl::first##NAME && \
- D->getKind() <= Decl::last##NAME) \
- return true;
#include "clang/AST/DeclNodes.inc"
- return false;
+ return true;
+ default:
+ return false;
}
}
@@ -1152,6 +1275,8 @@ bool DeclContext::isDependentContext() const {
if (Record->isDependentLambda())
return true;
+ if (Record->isNeverDependentLambda())
+ return false;
}
if (const auto *Function = dyn_cast<FunctionDecl>(this)) {
@@ -1175,11 +1300,11 @@ bool DeclContext::isTransparentContext() const {
if (getDeclKind() == Decl::Enum)
return !cast<EnumDecl>(this)->isScoped();
- return getDeclKind() == Decl::LinkageSpec || getDeclKind() == Decl::Export;
+ return isa<LinkageSpecDecl, ExportDecl, HLSLBufferDecl>(this);
}
static bool isLinkageSpecContext(const DeclContext *DC,
- LinkageSpecDecl::LanguageIDs ID) {
+ LinkageSpecLanguageIDs ID) {
while (DC->getDeclKind() != Decl::TranslationUnit) {
if (DC->getDeclKind() == Decl::LinkageSpec)
return cast<LinkageSpecDecl>(DC)->getLanguage() == ID;
@@ -1189,14 +1314,14 @@ static bool isLinkageSpecContext(const DeclContext *DC,
}
bool DeclContext::isExternCContext() const {
- return isLinkageSpecContext(this, LinkageSpecDecl::lang_c);
+ return isLinkageSpecContext(this, LinkageSpecLanguageIDs::C);
}
const LinkageSpecDecl *DeclContext::getExternCContext() const {
const DeclContext *DC = this;
while (DC->getDeclKind() != Decl::TranslationUnit) {
if (DC->getDeclKind() == Decl::LinkageSpec &&
- cast<LinkageSpecDecl>(DC)->getLanguage() == LinkageSpecDecl::lang_c)
+ cast<LinkageSpecDecl>(DC)->getLanguage() == LinkageSpecLanguageIDs::C)
return cast<LinkageSpecDecl>(DC);
DC = DC->getLexicalParent();
}
@@ -1204,7 +1329,7 @@ const LinkageSpecDecl *DeclContext::getExternCContext() const {
}
bool DeclContext::isExternCXXContext() const {
- return isLinkageSpecContext(this, LinkageSpecDecl::lang_cxx);
+ return isLinkageSpecContext(this, LinkageSpecLanguageIDs::CXX);
}
bool DeclContext::Encloses(const DeclContext *DC) const {
@@ -1212,11 +1337,21 @@ bool DeclContext::Encloses(const DeclContext *DC) const {
return getPrimaryContext()->Encloses(DC);
for (; DC; DC = DC->getParent())
- if (DC->getPrimaryContext() == this)
+ if (!isa<LinkageSpecDecl>(DC) && !isa<ExportDecl>(DC) &&
+ DC->getPrimaryContext() == this)
return true;
return false;
}
+DeclContext *DeclContext::getNonTransparentContext() {
+ DeclContext *DC = this;
+ while (DC->isTransparentContext()) {
+ DC = DC->getParent();
+ assert(DC && "All transparent contexts should have a parent!");
+ }
+ return DC;
+}
+
DeclContext *DeclContext::getPrimaryContext() {
switch (getDeclKind()) {
case Decl::ExternCContext:
@@ -1230,6 +1365,15 @@ DeclContext *DeclContext::getPrimaryContext() {
// There is only one DeclContext for these entities.
return this;
+ case Decl::HLSLBuffer:
+ // Each buffer, even with the same name, is a distinct construct.
+ // Multiple buffers with the same name are allowed for backward
+ // compatibility.
+ // As long as buffers have unique resource bindings the names don't matter.
+ // The names get exposed via the CPU-side reflection API which
+ // supports querying bindings, so we cannot remove them.
+ return this;
+
case Decl::TranslationUnit:
return static_cast<TranslationUnitDecl *>(this)->getFirstDecl();
case Decl::Namespace:
@@ -1515,7 +1659,11 @@ void DeclContext::removeDecl(Decl *D) {
if (Map) {
StoredDeclsMap::iterator Pos = Map->find(ND->getDeclName());
assert(Pos != Map->end() && "no lookup entry for decl");
- Pos->second.remove(ND);
+ StoredDeclsList &List = Pos->second;
+ List.remove(ND);
+ // Clean up the entry if there are no more decls.
+ if (List.isNull())
+ Map->erase(Pos);
}
} while (DC->isTransparentContext() && (DC = DC->getParent()));
}
@@ -1634,9 +1782,9 @@ void DeclContext::buildLookupImpl(DeclContext *DCtx, bool Internal) {
DeclContext::lookup_result
DeclContext::lookup(DeclarationName Name) const {
- assert(getDeclKind() != Decl::LinkageSpec &&
- getDeclKind() != Decl::Export &&
- "should not perform lookups into transparent contexts");
+ // For transparent DeclContext, we should lookup in their enclosing context.
+ if (getDeclKind() == Decl::LinkageSpec || getDeclKind() == Decl::Export)
+ return getParent()->lookup(Name);
const DeclContext *PrimaryContext = getPrimaryContext();
if (PrimaryContext != this)
@@ -1739,7 +1887,8 @@ void DeclContext::localUncachedLookup(DeclarationName Name,
if (!hasExternalVisibleStorage() && !hasExternalLexicalStorage() && Name) {
lookup_result LookupResults = lookup(Name);
Results.insert(Results.end(), LookupResults.begin(), LookupResults.end());
- return;
+ if (!Results.empty())
+ return;
}
// If we have a lookup table, check there first. Maybe we'll get lucky.
@@ -1953,6 +2102,7 @@ void ASTContext::ReleaseDeclContextMaps() {
// pointer because the subclass doesn't add anything that needs to
// be deleted.
StoredDeclsMap::DestroyAll(LastSDM.getPointer(), LastSDM.getInt());
+ LastSDM.setPointer(nullptr);
}
void StoredDeclsMap::DestroyAll(StoredDeclsMap *Map, bool Dependent) {
diff --git a/contrib/llvm-project/clang/lib/AST/DeclCXX.cpp b/contrib/llvm-project/clang/lib/AST/DeclCXX.cpp
index aeee35d9c74f..117e802dae2d 100644
--- a/contrib/llvm-project/clang/lib/AST/DeclCXX.cpp
+++ b/contrib/llvm-project/clang/lib/AST/DeclCXX.cpp
@@ -36,7 +36,7 @@
#include "clang/Basic/PartialDiagnostic.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/Specifiers.h"
-#include "llvm/ADT/None.h"
+#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/iterator_range.h"
@@ -79,10 +79,9 @@ CXXRecordDecl::DefinitionData::DefinitionData(CXXRecordDecl *D)
HasBasesWithFields(false), HasBasesWithNonStaticDataMembers(false),
HasPrivateFields(false), HasProtectedFields(false),
HasPublicFields(false), HasMutableFields(false), HasVariantMembers(false),
- HasOnlyCMembers(true), HasInClassInitializer(false),
+ HasOnlyCMembers(true), HasInitMethod(false), HasInClassInitializer(false),
HasUninitializedReferenceMember(false), HasUninitializedFields(false),
- HasInheritedConstructor(false),
- HasInheritedDefaultConstructor(false),
+ HasInheritedConstructor(false), HasInheritedDefaultConstructor(false),
HasInheritedAssignment(false),
NeedOverloadResolutionForCopyConstructor(false),
NeedOverloadResolutionForMoveConstructor(false),
@@ -147,25 +146,25 @@ CXXRecordDecl *CXXRecordDecl::Create(const ASTContext &C, TagKind TK,
CXXRecordDecl *
CXXRecordDecl::CreateLambda(const ASTContext &C, DeclContext *DC,
TypeSourceInfo *Info, SourceLocation Loc,
- bool Dependent, bool IsGeneric,
+ unsigned DependencyKind, bool IsGeneric,
LambdaCaptureDefault CaptureDefault) {
- auto *R = new (C, DC) CXXRecordDecl(CXXRecord, TTK_Class, C, DC, Loc, Loc,
- nullptr, nullptr);
+ auto *R = new (C, DC) CXXRecordDecl(CXXRecord, TagTypeKind::Class, C, DC, Loc,
+ Loc, nullptr, nullptr);
R->setBeingDefined(true);
- R->DefinitionData =
- new (C) struct LambdaDefinitionData(R, Info, Dependent, IsGeneric,
- CaptureDefault);
+ R->DefinitionData = new (C) struct LambdaDefinitionData(
+ R, Info, DependencyKind, IsGeneric, CaptureDefault);
R->setMayHaveOutOfDateDef(false);
R->setImplicit(true);
+
C.getTypeDeclType(R, /*PrevDecl=*/nullptr);
return R;
}
CXXRecordDecl *
CXXRecordDecl::CreateDeserialized(const ASTContext &C, unsigned ID) {
- auto *R = new (C, ID) CXXRecordDecl(
- CXXRecord, TTK_Struct, C, nullptr, SourceLocation(), SourceLocation(),
- nullptr, nullptr);
+ auto *R = new (C, ID)
+ CXXRecordDecl(CXXRecord, TagTypeKind::Struct, C, nullptr,
+ SourceLocation(), SourceLocation(), nullptr, nullptr);
R->setMayHaveOutOfDateDef(false);
return R;
}
@@ -178,6 +177,8 @@ static bool hasRepeatedBaseClass(const CXXRecordDecl *StartRD) {
SmallVector<const CXXRecordDecl*, 8> WorkList = {StartRD};
while (!WorkList.empty()) {
const CXXRecordDecl *RD = WorkList.pop_back_val();
+ if (RD->getTypeForDecl()->isDependentType())
+ continue;
for (const CXXBaseSpecifier &BaseSpec : RD->bases()) {
if (const CXXRecordDecl *B = BaseSpec.getType()->getAsCXXRecordDecl()) {
if (!SeenBaseTypes.insert(B).second)
@@ -445,8 +446,8 @@ CXXRecordDecl::setBases(CXXBaseSpecifier const * const *Bases,
setHasVolatileMember(true);
if (BaseClassDecl->getArgPassingRestrictions() ==
- RecordDecl::APK_CanNeverPassInRegs)
- setArgPassingRestrictions(RecordDecl::APK_CanNeverPassInRegs);
+ RecordArgPassingKind::CanNeverPassInRegs)
+ setArgPassingRestrictions(RecordArgPassingKind::CanNeverPassInRegs);
// Keep track of the presence of mutable fields.
if (BaseClassDecl->hasMutableFields())
@@ -586,6 +587,19 @@ bool CXXRecordDecl::isTriviallyCopyable() const {
return true;
}
+bool CXXRecordDecl::isTriviallyCopyConstructible() const {
+
+ // A trivially copy constructible class is a class that:
+ // -- has no non-trivial copy constructors,
+ if (hasNonTrivialCopyConstructor())
+ return false;
+ // -- has a trivial destructor.
+ if (!hasTrivialDestructor())
+ return false;
+
+ return true;
+}
+
void CXXRecordDecl::markedVirtualFunctionPure() {
// C++ [class.abstract]p2:
// A class is abstract if it has at least one pure virtual function.
@@ -685,17 +699,16 @@ bool CXXRecordDecl::lambdaIsDefaultConstructibleAndAssignable() const {
// C++17 [expr.prim.lambda]p21:
// The closure type associated with a lambda-expression has no default
// constructor and a deleted copy assignment operator.
- if (getLambdaCaptureDefault() != LCD_None || capture_size() != 0)
+ if (!isCapturelessLambda())
return false;
return getASTContext().getLangOpts().CPlusPlus20;
}
void CXXRecordDecl::addedMember(Decl *D) {
- if (!D->isImplicit() &&
- !isa<FieldDecl>(D) &&
- !isa<IndirectFieldDecl>(D) &&
- (!isa<TagDecl>(D) || cast<TagDecl>(D)->getTagKind() == TTK_Class ||
- cast<TagDecl>(D)->getTagKind() == TTK_Interface))
+ if (!D->isImplicit() && !isa<FieldDecl>(D) && !isa<IndirectFieldDecl>(D) &&
+ (!isa<TagDecl>(D) ||
+ cast<TagDecl>(D)->getTagKind() == TagTypeKind::Class ||
+ cast<TagDecl>(D)->getTagKind() == TagTypeKind::Interface))
data().HasOnlyCMembers = false;
// Ignore friends and invalid declarations.
@@ -767,12 +780,16 @@ void CXXRecordDecl::addedMember(Decl *D) {
// Note that we have a user-declared constructor.
data().UserDeclaredConstructor = true;
- // C++ [class]p4:
- // A POD-struct is an aggregate class [...]
- // Since the POD bit is meant to be C++03 POD-ness, clear it even if
- // the type is technically an aggregate in C++0x since it wouldn't be
- // in 03.
- data().PlainOldData = false;
+ const TargetInfo &TI = getASTContext().getTargetInfo();
+ if ((!Constructor->isDeleted() && !Constructor->isDefaulted()) ||
+ !TI.areDefaultedSMFStillPOD(getLangOpts())) {
+ // C++ [class]p4:
+ // A POD-struct is an aggregate class [...]
+ // Since the POD bit is meant to be C++03 POD-ness, clear it even if
+ // the type is technically an aggregate in C++0x since it wouldn't be
+ // in 03.
+ data().PlainOldData = false;
+ }
}
if (Constructor->isDefaultConstructor()) {
@@ -824,34 +841,16 @@ void CXXRecordDecl::addedMember(Decl *D) {
data().HasInheritedDefaultConstructor = true;
}
- // Handle destructors.
- if (const auto *DD = dyn_cast<CXXDestructorDecl>(D)) {
- SMKind |= SMF_Destructor;
-
- if (DD->isUserProvided())
- data().HasIrrelevantDestructor = false;
- // If the destructor is explicitly defaulted and not trivial or not public
- // or if the destructor is deleted, we clear HasIrrelevantDestructor in
- // finishedDefaultedOrDeletedMember.
-
- // C++11 [class.dtor]p5:
- // A destructor is trivial if [...] the destructor is not virtual.
- if (DD->isVirtual()) {
- data().HasTrivialSpecialMembers &= ~SMF_Destructor;
- data().HasTrivialSpecialMembersForCall &= ~SMF_Destructor;
- }
-
- if (DD->isNoReturn())
- data().IsAnyDestructorNoReturn = true;
- }
-
// Handle member functions.
if (const auto *Method = dyn_cast<CXXMethodDecl>(D)) {
+ if (isa<CXXDestructorDecl>(D))
+ SMKind |= SMF_Destructor;
+
if (Method->isCopyAssignmentOperator()) {
SMKind |= SMF_CopyAssignment;
const auto *ParamTy =
- Method->getParamDecl(0)->getType()->getAs<ReferenceType>();
+ Method->getNonObjectParameter(0)->getType()->getAs<ReferenceType>();
if (!ParamTy || ParamTy->getPointeeType().isConstQualified())
data().HasDeclaredCopyAssignmentWithConstParam = true;
}
@@ -892,46 +891,38 @@ void CXXRecordDecl::addedMember(Decl *D) {
data().HasTrivialSpecialMembersForCall &=
data().DeclaredSpecialMembers | ~SMKind;
- if (!Method->isImplicit() && !Method->isUserProvided()) {
- // This method is user-declared but not user-provided. We can't work out
- // whether it's trivial yet (not until we get to the end of the class).
- // We'll handle this method in finishedDefaultedOrDeletedMember.
- } else if (Method->isTrivial()) {
- data().HasTrivialSpecialMembers |= SMKind;
- data().HasTrivialSpecialMembersForCall |= SMKind;
- } else if (Method->isTrivialForCall()) {
- data().HasTrivialSpecialMembersForCall |= SMKind;
- data().DeclaredNonTrivialSpecialMembers |= SMKind;
- } else {
- data().DeclaredNonTrivialSpecialMembers |= SMKind;
- // If this is a user-provided function, do not set
- // DeclaredNonTrivialSpecialMembersForCall here since we don't know
- // yet whether the method would be considered non-trivial for the
- // purpose of calls (attribute "trivial_abi" can be dropped from the
- // class later, which can change the special method's triviality).
- if (!Method->isUserProvided())
- data().DeclaredNonTrivialSpecialMembersForCall |= SMKind;
- }
-
// Note when we have declared a declared special member, and suppress the
// implicit declaration of this special member.
data().DeclaredSpecialMembers |= SMKind;
-
if (!Method->isImplicit()) {
data().UserDeclaredSpecialMembers |= SMKind;
- // C++03 [class]p4:
- // A POD-struct is an aggregate class that has [...] no user-defined
- // copy assignment operator and no user-defined destructor.
- //
- // Since the POD bit is meant to be C++03 POD-ness, and in C++03,
- // aggregates could not have any constructors, clear it even for an
- // explicitly defaulted or deleted constructor.
- // type is technically an aggregate in C++0x since it wouldn't be in 03.
- //
- // Also, a user-declared move assignment operator makes a class non-POD.
- // This is an extension in C++03.
- data().PlainOldData = false;
+ const TargetInfo &TI = getASTContext().getTargetInfo();
+ if ((!Method->isDeleted() && !Method->isDefaulted() &&
+ SMKind != SMF_MoveAssignment) ||
+ !TI.areDefaultedSMFStillPOD(getLangOpts())) {
+ // C++03 [class]p4:
+ // A POD-struct is an aggregate class that has [...] no user-defined
+ // copy assignment operator and no user-defined destructor.
+ //
+ // Since the POD bit is meant to be C++03 POD-ness, and in C++03,
+ // aggregates could not have any constructors, clear it even for an
+ // explicitly defaulted or deleted constructor.
+ // type is technically an aggregate in C++0x since it wouldn't be in
+ // 03.
+ //
+ // Also, a user-declared move assignment operator makes a class
+ // non-POD. This is an extension in C++03.
+ data().PlainOldData = false;
+ }
+ }
+ // When instantiating a class, we delay updating the destructor and
+ // triviality properties of the class until selecting a destructor and
+ // computing the eligibility of its special member functions. This is
+ // because there might be function constraints that we need to evaluate
+ // and compare later in the instantiation.
+ if (!Method->isIneligibleOrNotSelected()) {
+ addedEligibleSpecialMemberFunction(Method, SMKind);
}
}
@@ -1053,7 +1044,7 @@ void CXXRecordDecl::addedMember(Decl *D) {
// Structs with __weak fields should never be passed directly.
if (LT == Qualifiers::OCL_Weak)
- setArgPassingRestrictions(RecordDecl::APK_CanNeverPassInRegs);
+ setArgPassingRestrictions(RecordArgPassingKind::CanNeverPassInRegs);
Data.HasIrrelevantDestructor = false;
@@ -1247,8 +1238,8 @@ void CXXRecordDecl::addedMember(Decl *D) {
if (FieldRec->hasVolatileMember())
setHasVolatileMember(true);
if (FieldRec->getArgPassingRestrictions() ==
- RecordDecl::APK_CanNeverPassInRegs)
- setArgPassingRestrictions(RecordDecl::APK_CanNeverPassInRegs);
+ RecordArgPassingKind::CanNeverPassInRegs)
+ setArgPassingRestrictions(RecordArgPassingKind::CanNeverPassInRegs);
// C++0x [class]p7:
// A standard-layout class is a class that:
@@ -1392,6 +1383,83 @@ void CXXRecordDecl::addedMember(Decl *D) {
}
}
+bool CXXRecordDecl::isLiteral() const {
+ const LangOptions &LangOpts = getLangOpts();
+ if (!(LangOpts.CPlusPlus20 ? hasConstexprDestructor()
+ : hasTrivialDestructor()))
+ return false;
+
+ if (hasNonLiteralTypeFieldsOrBases()) {
+ // CWG2598
+ // is an aggregate union type that has either no variant
+ // members or at least one variant member of non-volatile literal type,
+ if (!isUnion())
+ return false;
+ bool HasAtLeastOneLiteralMember =
+ fields().empty() || any_of(fields(), [this](const FieldDecl *D) {
+ return !D->getType().isVolatileQualified() &&
+ D->getType()->isLiteralType(getASTContext());
+ });
+ if (!HasAtLeastOneLiteralMember)
+ return false;
+ }
+
+ return isAggregate() || (isLambda() && LangOpts.CPlusPlus17) ||
+ hasConstexprNonCopyMoveConstructor() || hasTrivialDefaultConstructor();
+}
+
+void CXXRecordDecl::addedSelectedDestructor(CXXDestructorDecl *DD) {
+ DD->setIneligibleOrNotSelected(false);
+ addedEligibleSpecialMemberFunction(DD, SMF_Destructor);
+}
+
+void CXXRecordDecl::addedEligibleSpecialMemberFunction(const CXXMethodDecl *MD,
+ unsigned SMKind) {
+ // FIXME: We shouldn't change DeclaredNonTrivialSpecialMembers if `MD` is
+ // a function template, but this needs CWG attention before we break ABI.
+ // See https://github.com/llvm/llvm-project/issues/59206
+
+ if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) {
+ if (DD->isUserProvided())
+ data().HasIrrelevantDestructor = false;
+ // If the destructor is explicitly defaulted and not trivial or not public
+ // or if the destructor is deleted, we clear HasIrrelevantDestructor in
+ // finishedDefaultedOrDeletedMember.
+
+ // C++11 [class.dtor]p5:
+ // A destructor is trivial if [...] the destructor is not virtual.
+ if (DD->isVirtual()) {
+ data().HasTrivialSpecialMembers &= ~SMF_Destructor;
+ data().HasTrivialSpecialMembersForCall &= ~SMF_Destructor;
+ }
+
+ if (DD->isNoReturn())
+ data().IsAnyDestructorNoReturn = true;
+ }
+
+ if (!MD->isImplicit() && !MD->isUserProvided()) {
+ // This method is user-declared but not user-provided. We can't work
+ // out whether it's trivial yet (not until we get to the end of the
+ // class). We'll handle this method in
+ // finishedDefaultedOrDeletedMember.
+ } else if (MD->isTrivial()) {
+ data().HasTrivialSpecialMembers |= SMKind;
+ data().HasTrivialSpecialMembersForCall |= SMKind;
+ } else if (MD->isTrivialForCall()) {
+ data().HasTrivialSpecialMembersForCall |= SMKind;
+ data().DeclaredNonTrivialSpecialMembers |= SMKind;
+ } else {
+ data().DeclaredNonTrivialSpecialMembers |= SMKind;
+ // If this is a user-provided function, do not set
+ // DeclaredNonTrivialSpecialMembersForCall here since we don't know
+ // yet whether the method would be considered non-trivial for the
+ // purpose of calls (attribute "trivial_abi" can be dropped from the
+ // class later, which can change the special method's triviality).
+ if (!MD->isUserProvided())
+ data().DeclaredNonTrivialSpecialMembersForCall |= SMKind;
+ }
+}
+
void CXXRecordDecl::finishedDefaultedOrDeletedMember(CXXMethodDecl *D) {
assert(!D->isImplicit() && !D->isUserProvided());
@@ -1422,10 +1490,21 @@ void CXXRecordDecl::finishedDefaultedOrDeletedMember(CXXMethodDecl *D) {
// Update which trivial / non-trivial special members we have.
// addedMember will have skipped this step for this member.
- if (D->isTrivial())
- data().HasTrivialSpecialMembers |= SMKind;
- else
- data().DeclaredNonTrivialSpecialMembers |= SMKind;
+ if (!D->isIneligibleOrNotSelected()) {
+ if (D->isTrivial())
+ data().HasTrivialSpecialMembers |= SMKind;
+ else
+ data().DeclaredNonTrivialSpecialMembers |= SMKind;
+ }
+}
+
+void CXXRecordDecl::LambdaDefinitionData::AddCaptureList(ASTContext &Ctx,
+ Capture *CaptureList) {
+ Captures.push_back(CaptureList);
+ if (Captures.size() == 2) {
+ // The TinyPtrVector member now needs destruction.
+ Ctx.addDestruction(&Captures);
+ }
}
void CXXRecordDecl::setCaptures(ASTContext &Context,
@@ -1435,14 +1514,15 @@ void CXXRecordDecl::setCaptures(ASTContext &Context,
// Copy captures.
Data.NumCaptures = Captures.size();
Data.NumExplicitCaptures = 0;
- Data.Captures = (LambdaCapture *)Context.Allocate(sizeof(LambdaCapture) *
- Captures.size());
- LambdaCapture *ToCapture = Data.Captures;
+ auto *ToCapture = (LambdaCapture *)Context.Allocate(sizeof(LambdaCapture) *
+ Captures.size());
+ Data.AddCaptureList(Context, ToCapture);
for (unsigned I = 0, N = Captures.size(); I != N; ++I) {
if (Captures[I].isExplicit())
++Data.NumExplicitCaptures;
- *ToCapture++ = Captures[I];
+ new (ToCapture) LambdaCapture(Captures[I]);
+ ToCapture++;
}
if (!lambdaIsDefaultConstructibleAndAssignable())
@@ -1467,7 +1547,8 @@ void CXXRecordDecl::setTrivialForCallFlags(CXXMethodDecl *D) {
}
bool CXXRecordDecl::isCLike() const {
- if (getTagKind() == TTK_Class || getTagKind() == TTK_Interface ||
+ if (getTagKind() == TagTypeKind::Class ||
+ getTagKind() == TagTypeKind::Interface ||
!TemplateOrInstantiation.isNull())
return false;
if (!hasDefinition())
@@ -1555,21 +1636,23 @@ CXXMethodDecl *CXXRecordDecl::getLambdaStaticInvoker(CallingConv CC) const {
}
void CXXRecordDecl::getCaptureFields(
- llvm::DenseMap<const VarDecl *, FieldDecl *> &Captures,
- FieldDecl *&ThisCapture) const {
+ llvm::DenseMap<const ValueDecl *, FieldDecl *> &Captures,
+ FieldDecl *&ThisCapture) const {
Captures.clear();
ThisCapture = nullptr;
LambdaDefinitionData &Lambda = getLambdaData();
- RecordDecl::field_iterator Field = field_begin();
- for (const LambdaCapture *C = Lambda.Captures, *CEnd = C + Lambda.NumCaptures;
- C != CEnd; ++C, ++Field) {
- if (C->capturesThis())
- ThisCapture = *Field;
- else if (C->capturesVariable())
- Captures[C->getCapturedVar()] = *Field;
+ for (const LambdaCapture *List : Lambda.Captures) {
+ RecordDecl::field_iterator Field = field_begin();
+ for (const LambdaCapture *C = List, *CEnd = C + Lambda.NumCaptures;
+ C != CEnd; ++C, ++Field) {
+ if (C->capturesThis())
+ ThisCapture = *Field;
+ else if (C->capturesVariable())
+ Captures[C->getCapturedVar()] = *Field;
+ }
+ assert(Field == field_end());
}
- assert(Field == field_end());
}
TemplateParameterList *
@@ -1593,7 +1676,7 @@ CXXRecordDecl::getLambdaExplicitTemplateParameters() const {
const auto ExplicitEnd = llvm::partition_point(
*List, [](const NamedDecl *D) { return !D->isImplicit(); });
- return llvm::makeArrayRef(List->begin(), ExplicitEnd);
+ return llvm::ArrayRef(List->begin(), ExplicitEnd);
}
Decl *CXXRecordDecl::getLambdaContextDecl() const {
@@ -1602,18 +1685,20 @@ Decl *CXXRecordDecl::getLambdaContextDecl() const {
return getLambdaData().ContextDecl.get(Source);
}
-void CXXRecordDecl::setDeviceLambdaManglingNumber(unsigned Num) const {
+void CXXRecordDecl::setLambdaNumbering(LambdaNumbering Numbering) {
assert(isLambda() && "Not a lambda closure type!");
- if (Num)
- getASTContext().DeviceLambdaManglingNumbers[this] = Num;
+ getLambdaData().ManglingNumber = Numbering.ManglingNumber;
+ if (Numbering.DeviceManglingNumber)
+ getASTContext().DeviceLambdaManglingNumbers[this] =
+ Numbering.DeviceManglingNumber;
+ getLambdaData().IndexInContext = Numbering.IndexInContext;
+ getLambdaData().ContextDecl = Numbering.ContextDecl;
+ getLambdaData().HasKnownInternalLinkage = Numbering.HasKnownInternalLinkage;
}
unsigned CXXRecordDecl::getDeviceLambdaManglingNumber() const {
assert(isLambda() && "Not a lambda closure type!");
- auto I = getASTContext().DeviceLambdaManglingNumbers.find(this);
- if (I != getASTContext().DeviceLambdaManglingNumbers.end())
- return I->second;
- return 0;
+ return getASTContext().DeviceLambdaManglingNumbers.lookup(this);
}
static CanQualType GetConversionType(ASTContext &Context, NamedDecl *Conv) {
@@ -1776,7 +1861,7 @@ void CXXRecordDecl::removeConversion(const NamedDecl *ConvDecl) {
for (unsigned I = 0, E = Convs.size(); I != E; ++I) {
if (Convs[I].getDecl() == ConvDecl) {
Convs.erase(I);
- assert(llvm::find(Convs, ConvDecl) == Convs.end() &&
+ assert(!llvm::is_contained(Convs, ConvDecl) &&
"conversion was found multiple times in unresolved set");
return;
}
@@ -1894,7 +1979,14 @@ CXXDestructorDecl *CXXRecordDecl::getDestructor() const {
DeclContext::lookup_result R = lookup(Name);
- return R.empty() ? nullptr : dyn_cast<CXXDestructorDecl>(R.front());
+ // If a destructor was marked as not selected, we skip it. We don't always
+ // have a selected destructor: dependent types, unnamed structs.
+ for (auto *Decl : R) {
+ auto* DD = dyn_cast<CXXDestructorDecl>(Decl);
+ if (DD && !DD->isIneligibleOrNotSelected())
+ return DD;
+ }
+ return nullptr;
}
static bool isDeclContextInNamespace(const DeclContext *DC) {
@@ -1987,7 +2079,7 @@ void CXXRecordDecl::completeDefinition(CXXFinalOverriderMap *FinalOverriders) {
// A class is abstract if it contains or inherits at least one
// pure virtual function for which the final overrider is pure
// virtual.
- if (SO->second.front().Method->isPure()) {
+ if (SO->second.front().Method->isPureVirtual()) {
data().Abstract = true;
Done = true;
break;
@@ -2059,21 +2151,21 @@ ExplicitSpecifier ExplicitSpecifier::getFromDecl(FunctionDecl *Function) {
}
}
-CXXDeductionGuideDecl *
-CXXDeductionGuideDecl::Create(ASTContext &C, DeclContext *DC,
- SourceLocation StartLoc, ExplicitSpecifier ES,
- const DeclarationNameInfo &NameInfo, QualType T,
- TypeSourceInfo *TInfo, SourceLocation EndLocation,
- CXXConstructorDecl *Ctor) {
+CXXDeductionGuideDecl *CXXDeductionGuideDecl::Create(
+ ASTContext &C, DeclContext *DC, SourceLocation StartLoc,
+ ExplicitSpecifier ES, const DeclarationNameInfo &NameInfo, QualType T,
+ TypeSourceInfo *TInfo, SourceLocation EndLocation, CXXConstructorDecl *Ctor,
+ DeductionCandidate Kind) {
return new (C, DC) CXXDeductionGuideDecl(C, DC, StartLoc, ES, NameInfo, T,
- TInfo, EndLocation, Ctor);
+ TInfo, EndLocation, Ctor, Kind);
}
CXXDeductionGuideDecl *CXXDeductionGuideDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
return new (C, ID) CXXDeductionGuideDecl(
C, nullptr, SourceLocation(), ExplicitSpecifier(), DeclarationNameInfo(),
- QualType(), nullptr, SourceLocation(), nullptr);
+ QualType(), nullptr, SourceLocation(), nullptr,
+ DeductionCandidate::Normal);
}
RequiresExprBodyDecl *RequiresExprBodyDecl::Create(
@@ -2156,12 +2248,9 @@ CXXMethodDecl::getCorrespondingMethodInClass(const CXXRecordDecl *RD,
}
// Other candidate final overriders might be overridden by this function.
- FinalOverriders.erase(
- std::remove_if(FinalOverriders.begin(), FinalOverriders.end(),
- [&](CXXMethodDecl *OtherD) {
- return recursivelyOverrides(D, OtherD);
- }),
- FinalOverriders.end());
+ llvm::erase_if(FinalOverriders, [&](CXXMethodDecl *OtherD) {
+ return recursivelyOverrides(D, OtherD);
+ });
FinalOverriders.push_back(D);
};
@@ -2178,25 +2267,23 @@ CXXMethodDecl::getCorrespondingMethodInClass(const CXXRecordDecl *RD,
return FinalOverriders.size() == 1 ? FinalOverriders.front() : nullptr;
}
-CXXMethodDecl *CXXMethodDecl::Create(ASTContext &C, CXXRecordDecl *RD,
- SourceLocation StartLoc,
- const DeclarationNameInfo &NameInfo,
- QualType T, TypeSourceInfo *TInfo,
- StorageClass SC, bool isInline,
- ConstexprSpecKind ConstexprKind,
- SourceLocation EndLocation,
- Expr *TrailingRequiresClause) {
- return new (C, RD)
- CXXMethodDecl(CXXMethod, C, RD, StartLoc, NameInfo, T, TInfo, SC,
- isInline, ConstexprKind, EndLocation,
- TrailingRequiresClause);
+CXXMethodDecl *
+CXXMethodDecl::Create(ASTContext &C, CXXRecordDecl *RD, SourceLocation StartLoc,
+ const DeclarationNameInfo &NameInfo, QualType T,
+ TypeSourceInfo *TInfo, StorageClass SC, bool UsesFPIntrin,
+ bool isInline, ConstexprSpecKind ConstexprKind,
+ SourceLocation EndLocation,
+ Expr *TrailingRequiresClause) {
+ return new (C, RD) CXXMethodDecl(
+ CXXMethod, C, RD, StartLoc, NameInfo, T, TInfo, SC, UsesFPIntrin,
+ isInline, ConstexprKind, EndLocation, TrailingRequiresClause);
}
CXXMethodDecl *CXXMethodDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
- return new (C, ID)
- CXXMethodDecl(CXXMethod, C, nullptr, SourceLocation(),
- DeclarationNameInfo(), QualType(), nullptr, SC_None, false,
- ConstexprSpecKind::Unspecified, SourceLocation(), nullptr);
+ return new (C, ID) CXXMethodDecl(
+ CXXMethod, C, nullptr, SourceLocation(), DeclarationNameInfo(),
+ QualType(), nullptr, SC_None, false, false,
+ ConstexprSpecKind::Unspecified, SourceLocation(), nullptr);
}
CXXMethodDecl *CXXMethodDecl::getDevirtualizedMethod(const Expr *Base,
@@ -2211,7 +2298,7 @@ CXXMethodDecl *CXXMethodDecl::getDevirtualizedMethod(const Expr *Base,
// If the member function is marked 'final', we know that it can't be
// overridden and can therefore devirtualize it unless it's pure virtual.
if (hasAttr<FinalAttr>())
- return isPure() ? nullptr : this;
+ return isPureVirtual() ? nullptr : this;
// If Base is unknown, we cannot devirtualize.
if (!Base)
@@ -2240,7 +2327,7 @@ CXXMethodDecl *CXXMethodDecl::getDevirtualizedMethod(const Expr *Base,
// If that method is pure virtual, we can't devirtualize. If this code is
// reached, the result would be UB, not a direct call to the derived class
// function, and we can't assume the derived class function is defined.
- if (DevirtualizedMethod->isPure())
+ if (DevirtualizedMethod->isPureVirtual())
return nullptr;
// If that method is marked final, we can devirtualize it.
@@ -2339,7 +2426,7 @@ bool CXXMethodDecl::isUsualDeallocationFunction(
// In C++17 onwards, all potential usual deallocation functions are actual
// usual deallocation functions. Honor this behavior when post-C++14
// deallocation functions are offered as extensions too.
- // FIXME(EricWF): Destrying Delete should be a language option. How do we
+ // FIXME(EricWF): Destroying Delete should be a language option. How do we
// handle when destroying delete is used prior to C++17?
if (Context.getLangOpts().CPlusPlus17 ||
Context.getLangOpts().AlignedAllocation ||
@@ -2361,6 +2448,17 @@ bool CXXMethodDecl::isUsualDeallocationFunction(
return Result;
}
+bool CXXMethodDecl::isExplicitObjectMemberFunction() const {
+ // C++2b [dcl.fct]p6:
+ // An explicit object member function is a non-static member
+ // function with an explicit object parameter
+ return !isStatic() && hasCXXExplicitFunctionObjectParameter();
+}
+
+bool CXXMethodDecl::isImplicitObjectMemberFunction() const {
+ return !isStatic() && !hasCXXExplicitFunctionObjectParameter();
+}
+
bool CXXMethodDecl::isCopyAssignmentOperator() const {
// C++0x [class.copy]p17:
// A user-declared copy assignment operator X::operator= is a non-static
@@ -2368,11 +2466,12 @@ bool CXXMethodDecl::isCopyAssignmentOperator() const {
// type X, X&, const X&, volatile X& or const volatile X&.
if (/*operator=*/getOverloadedOperator() != OO_Equal ||
/*non-static*/ isStatic() ||
- /*non-template*/getPrimaryTemplate() || getDescribedFunctionTemplate() ||
- getNumParams() != 1)
+
+ /*non-template*/ getPrimaryTemplate() || getDescribedFunctionTemplate() ||
+ getNumExplicitParams() != 1)
return false;
- QualType ParamType = getParamDecl(0)->getType();
+ QualType ParamType = getNonObjectParameter(0)->getType();
if (const auto *Ref = ParamType->getAs<LValueReferenceType>())
ParamType = Ref->getPointeeType();
@@ -2389,11 +2488,11 @@ bool CXXMethodDecl::isMoveAssignmentOperator() const {
// X&&, const X&&, volatile X&&, or const volatile X&&.
if (getOverloadedOperator() != OO_Equal || isStatic() ||
getPrimaryTemplate() || getDescribedFunctionTemplate() ||
- getNumParams() != 1)
+ getNumExplicitParams() != 1)
return false;
- QualType ParamType = getParamDecl(0)->getType();
- if (!isa<RValueReferenceType>(ParamType))
+ QualType ParamType = getNonObjectParameter(0)->getType();
+ if (!ParamType->isRValueReferenceType())
return false;
ParamType = ParamType->getPointeeType();
@@ -2444,13 +2543,8 @@ QualType CXXMethodDecl::getThisType(const FunctionProtoType *FPT,
const CXXRecordDecl *Decl) {
ASTContext &C = Decl->getASTContext();
QualType ObjectTy = ::getThisObjectType(C, FPT, Decl);
- return C.getPointerType(ObjectTy);
-}
-
-QualType CXXMethodDecl::getThisObjectType(const FunctionProtoType *FPT,
- const CXXRecordDecl *Decl) {
- ASTContext &C = Decl->getASTContext();
- return ::getThisObjectType(C, FPT, Decl);
+ return C.getLangOpts().HLSL ? C.getLValueReferenceType(ObjectTy)
+ : C.getPointerType(ObjectTy);
}
QualType CXXMethodDecl::getThisType() const {
@@ -2464,11 +2558,17 @@ QualType CXXMethodDecl::getThisType() const {
getParent());
}
-QualType CXXMethodDecl::getThisObjectType() const {
- // Ditto getThisType.
- assert(isInstance() && "No 'this' for static methods!");
- return CXXMethodDecl::getThisObjectType(
- getType()->castAs<FunctionProtoType>(), getParent());
+QualType CXXMethodDecl::getFunctionObjectParameterReferenceType() const {
+ if (isExplicitObjectMemberFunction())
+ return parameters()[0]->getType();
+
+ ASTContext &C = getParentASTContext();
+ const FunctionProtoType *FPT = getType()->castAs<FunctionProtoType>();
+ QualType Type = ::getThisObjectType(C, FPT, getParent());
+ RefQualifierKind RK = FPT->getRefQualifier();
+ if (RK == RefQualifierKind::RQ_RValue)
+ return C.getRValueReferenceType(Type);
+ return C.getLValueReferenceType(Type);
}
bool CXXMethodDecl::hasInlineBody() const {
@@ -2549,7 +2649,7 @@ SourceLocation CXXCtorInitializer::getSourceLocation() const {
return getMemberLocation();
if (const auto *TSInfo = Initializee.get<TypeSourceInfo *>())
- return TSInfo->getTypeLoc().getLocalSourceRange().getBegin();
+ return TSInfo->getTypeLoc().getBeginLoc();
return {};
}
@@ -2568,12 +2668,12 @@ SourceRange CXXCtorInitializer::getSourceRange() const {
CXXConstructorDecl::CXXConstructorDecl(
ASTContext &C, CXXRecordDecl *RD, SourceLocation StartLoc,
const DeclarationNameInfo &NameInfo, QualType T, TypeSourceInfo *TInfo,
- ExplicitSpecifier ES, bool isInline, bool isImplicitlyDeclared,
- ConstexprSpecKind ConstexprKind, InheritedConstructor Inherited,
- Expr *TrailingRequiresClause)
+ ExplicitSpecifier ES, bool UsesFPIntrin, bool isInline,
+ bool isImplicitlyDeclared, ConstexprSpecKind ConstexprKind,
+ InheritedConstructor Inherited, Expr *TrailingRequiresClause)
: CXXMethodDecl(CXXConstructor, C, RD, StartLoc, NameInfo, T, TInfo,
- SC_None, isInline, ConstexprKind, SourceLocation(),
- TrailingRequiresClause) {
+ SC_None, UsesFPIntrin, isInline, ConstexprKind,
+ SourceLocation(), TrailingRequiresClause) {
setNumCtorInitializers(0);
setInheritingConstructor(static_cast<bool>(Inherited));
setImplicit(isImplicitlyDeclared);
@@ -2596,7 +2696,7 @@ CXXConstructorDecl *CXXConstructorDecl::CreateDeserialized(ASTContext &C,
isInheritingConstructor, hasTrailingExplicit);
auto *Result = new (C, ID, Extra) CXXConstructorDecl(
C, nullptr, SourceLocation(), DeclarationNameInfo(), QualType(), nullptr,
- ExplicitSpecifier(), false, false, ConstexprSpecKind::Unspecified,
+ ExplicitSpecifier(), false, false, false, ConstexprSpecKind::Unspecified,
InheritedConstructor(), nullptr);
Result->setInheritingConstructor(isInheritingConstructor);
Result->CXXConstructorDeclBits.HasTrailingExplicitSpecifier =
@@ -2608,19 +2708,18 @@ CXXConstructorDecl *CXXConstructorDecl::CreateDeserialized(ASTContext &C,
CXXConstructorDecl *CXXConstructorDecl::Create(
ASTContext &C, CXXRecordDecl *RD, SourceLocation StartLoc,
const DeclarationNameInfo &NameInfo, QualType T, TypeSourceInfo *TInfo,
- ExplicitSpecifier ES, bool isInline, bool isImplicitlyDeclared,
- ConstexprSpecKind ConstexprKind, InheritedConstructor Inherited,
- Expr *TrailingRequiresClause) {
+ ExplicitSpecifier ES, bool UsesFPIntrin, bool isInline,
+ bool isImplicitlyDeclared, ConstexprSpecKind ConstexprKind,
+ InheritedConstructor Inherited, Expr *TrailingRequiresClause) {
assert(NameInfo.getName().getNameKind()
== DeclarationName::CXXConstructorName &&
"Name must refer to a constructor");
unsigned Extra =
additionalSizeToAlloc<InheritedConstructor, ExplicitSpecifier>(
Inherited ? 1 : 0, ES.getExpr() ? 1 : 0);
- return new (C, RD, Extra)
- CXXConstructorDecl(C, RD, StartLoc, NameInfo, T, TInfo, ES, isInline,
- isImplicitlyDeclared, ConstexprKind, Inherited,
- TrailingRequiresClause);
+ return new (C, RD, Extra) CXXConstructorDecl(
+ C, RD, StartLoc, NameInfo, T, TInfo, ES, UsesFPIntrin, isInline,
+ isImplicitlyDeclared, ConstexprKind, Inherited, TrailingRequiresClause);
}
CXXConstructorDecl::init_const_iterator CXXConstructorDecl::init_begin() const {
@@ -2737,21 +2836,20 @@ CXXDestructorDecl *
CXXDestructorDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
return new (C, ID) CXXDestructorDecl(
C, nullptr, SourceLocation(), DeclarationNameInfo(), QualType(), nullptr,
- false, false, ConstexprSpecKind::Unspecified, nullptr);
+ false, false, false, ConstexprSpecKind::Unspecified, nullptr);
}
CXXDestructorDecl *CXXDestructorDecl::Create(
ASTContext &C, CXXRecordDecl *RD, SourceLocation StartLoc,
const DeclarationNameInfo &NameInfo, QualType T, TypeSourceInfo *TInfo,
- bool isInline, bool isImplicitlyDeclared, ConstexprSpecKind ConstexprKind,
- Expr *TrailingRequiresClause) {
+ bool UsesFPIntrin, bool isInline, bool isImplicitlyDeclared,
+ ConstexprSpecKind ConstexprKind, Expr *TrailingRequiresClause) {
assert(NameInfo.getName().getNameKind()
== DeclarationName::CXXDestructorName &&
"Name must refer to a destructor");
- return new (C, RD)
- CXXDestructorDecl(C, RD, StartLoc, NameInfo, T, TInfo, isInline,
- isImplicitlyDeclared, ConstexprKind,
- TrailingRequiresClause);
+ return new (C, RD) CXXDestructorDecl(
+ C, RD, StartLoc, NameInfo, T, TInfo, UsesFPIntrin, isInline,
+ isImplicitlyDeclared, ConstexprKind, TrailingRequiresClause);
}
void CXXDestructorDecl::setOperatorDelete(FunctionDecl *OD, Expr *ThisArg) {
@@ -2770,21 +2868,22 @@ CXXConversionDecl *
CXXConversionDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
return new (C, ID) CXXConversionDecl(
C, nullptr, SourceLocation(), DeclarationNameInfo(), QualType(), nullptr,
- false, ExplicitSpecifier(), ConstexprSpecKind::Unspecified,
+ false, false, ExplicitSpecifier(), ConstexprSpecKind::Unspecified,
SourceLocation(), nullptr);
}
CXXConversionDecl *CXXConversionDecl::Create(
ASTContext &C, CXXRecordDecl *RD, SourceLocation StartLoc,
const DeclarationNameInfo &NameInfo, QualType T, TypeSourceInfo *TInfo,
- bool isInline, ExplicitSpecifier ES, ConstexprSpecKind ConstexprKind,
- SourceLocation EndLocation, Expr *TrailingRequiresClause) {
+ bool UsesFPIntrin, bool isInline, ExplicitSpecifier ES,
+ ConstexprSpecKind ConstexprKind, SourceLocation EndLocation,
+ Expr *TrailingRequiresClause) {
assert(NameInfo.getName().getNameKind()
== DeclarationName::CXXConversionFunctionName &&
"Name must refer to a conversion function");
- return new (C, RD)
- CXXConversionDecl(C, RD, StartLoc, NameInfo, T, TInfo, isInline, ES,
- ConstexprKind, EndLocation, TrailingRequiresClause);
+ return new (C, RD) CXXConversionDecl(
+ C, RD, StartLoc, NameInfo, T, TInfo, UsesFPIntrin, isInline, ES,
+ ConstexprKind, EndLocation, TrailingRequiresClause);
}
bool CXXConversionDecl::isLambdaToBlockPointerConversion() const {
@@ -2793,8 +2892,8 @@ bool CXXConversionDecl::isLambdaToBlockPointerConversion() const {
}
LinkageSpecDecl::LinkageSpecDecl(DeclContext *DC, SourceLocation ExternLoc,
- SourceLocation LangLoc, LanguageIDs lang,
- bool HasBraces)
+ SourceLocation LangLoc,
+ LinkageSpecLanguageIDs lang, bool HasBraces)
: Decl(LinkageSpec, DC, LangLoc), DeclContext(LinkageSpec),
ExternLoc(ExternLoc), RBraceLoc(SourceLocation()) {
setLanguage(lang);
@@ -2803,19 +2902,19 @@ LinkageSpecDecl::LinkageSpecDecl(DeclContext *DC, SourceLocation ExternLoc,
void LinkageSpecDecl::anchor() {}
-LinkageSpecDecl *LinkageSpecDecl::Create(ASTContext &C,
- DeclContext *DC,
+LinkageSpecDecl *LinkageSpecDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation ExternLoc,
SourceLocation LangLoc,
- LanguageIDs Lang,
+ LinkageSpecLanguageIDs Lang,
bool HasBraces) {
return new (C, DC) LinkageSpecDecl(DC, ExternLoc, LangLoc, Lang, HasBraces);
}
LinkageSpecDecl *LinkageSpecDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
- return new (C, ID) LinkageSpecDecl(nullptr, SourceLocation(),
- SourceLocation(), lang_c, false);
+ return new (C, ID)
+ LinkageSpecDecl(nullptr, SourceLocation(), SourceLocation(),
+ LinkageSpecLanguageIDs::C, false);
}
void UsingDirectiveDecl::anchor() {}
@@ -2849,41 +2948,47 @@ NamespaceDecl *UsingDirectiveDecl::getNominatedNamespace() {
NamespaceDecl::NamespaceDecl(ASTContext &C, DeclContext *DC, bool Inline,
SourceLocation StartLoc, SourceLocation IdLoc,
- IdentifierInfo *Id, NamespaceDecl *PrevDecl)
+ IdentifierInfo *Id, NamespaceDecl *PrevDecl,
+ bool Nested)
: NamedDecl(Namespace, DC, IdLoc, Id), DeclContext(Namespace),
- redeclarable_base(C), LocStart(StartLoc),
- AnonOrFirstNamespaceAndInline(nullptr, Inline) {
+ redeclarable_base(C), LocStart(StartLoc) {
+ unsigned Flags = 0;
+ if (Inline)
+ Flags |= F_Inline;
+ if (Nested)
+ Flags |= F_Nested;
+ AnonOrFirstNamespaceAndFlags = {nullptr, Flags};
setPreviousDecl(PrevDecl);
if (PrevDecl)
- AnonOrFirstNamespaceAndInline.setPointer(PrevDecl->getOriginalNamespace());
+ AnonOrFirstNamespaceAndFlags.setPointer(PrevDecl->getOriginalNamespace());
}
NamespaceDecl *NamespaceDecl::Create(ASTContext &C, DeclContext *DC,
bool Inline, SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
- NamespaceDecl *PrevDecl) {
- return new (C, DC) NamespaceDecl(C, DC, Inline, StartLoc, IdLoc, Id,
- PrevDecl);
+ NamespaceDecl *PrevDecl, bool Nested) {
+ return new (C, DC)
+ NamespaceDecl(C, DC, Inline, StartLoc, IdLoc, Id, PrevDecl, Nested);
}
NamespaceDecl *NamespaceDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
return new (C, ID) NamespaceDecl(C, nullptr, false, SourceLocation(),
- SourceLocation(), nullptr, nullptr);
+ SourceLocation(), nullptr, nullptr, false);
}
NamespaceDecl *NamespaceDecl::getOriginalNamespace() {
if (isFirstDecl())
return this;
- return AnonOrFirstNamespaceAndInline.getPointer();
+ return AnonOrFirstNamespaceAndFlags.getPointer();
}
const NamespaceDecl *NamespaceDecl::getOriginalNamespace() const {
if (isFirstDecl())
return this;
- return AnonOrFirstNamespaceAndInline.getPointer();
+ return AnonOrFirstNamespaceAndFlags.getPointer();
}
bool NamespaceDecl::isOriginalNamespace() const { return isFirstDecl(); }
@@ -2973,8 +3078,10 @@ UsingShadowDecl::UsingShadowDecl(Kind K, ASTContext &C, DeclContext *DC,
BaseUsingDecl *Introducer, NamedDecl *Target)
: NamedDecl(K, DC, Loc, Name), redeclarable_base(C),
UsingOrNextShadow(Introducer) {
- if (Target)
+ if (Target) {
+ assert(!isa<UsingShadowDecl>(Target));
setTargetDecl(Target);
+ }
setImplicit();
}
@@ -3017,8 +3124,7 @@ CXXRecordDecl *ConstructorUsingShadowDecl::getNominatedBaseClass() const {
void BaseUsingDecl::anchor() {}
void BaseUsingDecl::addShadowDecl(UsingShadowDecl *S) {
- assert(std::find(shadow_begin(), shadow_end(), S) == shadow_end() &&
- "declaration already in set");
+ assert(!llvm::is_contained(shadows(), S) && "declaration already in set");
assert(S->getIntroducer() == this);
if (FirstUsingShadow.getPointer())
@@ -3027,8 +3133,7 @@ void BaseUsingDecl::addShadowDecl(UsingShadowDecl *S) {
}
void BaseUsingDecl::removeShadowDecl(UsingShadowDecl *S) {
- assert(std::find(shadow_begin(), shadow_end(), S) != shadow_end() &&
- "declaration not in set");
+ assert(llvm::is_contained(shadows(), S) && "declaration not in set");
assert(S->getIntroducer() == this);
// Remove S from the shadow decl chain. This is O(n) but hopefully rare.
@@ -3071,18 +3176,23 @@ SourceRange UsingDecl::getSourceRange() const {
void UsingEnumDecl::anchor() {}
UsingEnumDecl *UsingEnumDecl::Create(ASTContext &C, DeclContext *DC,
- SourceLocation UL, SourceLocation EL,
- SourceLocation NL, EnumDecl *Enum) {
- return new (C, DC) UsingEnumDecl(DC, Enum->getDeclName(), UL, EL, NL, Enum);
+ SourceLocation UL,
+ SourceLocation EL,
+ SourceLocation NL,
+ TypeSourceInfo *EnumType) {
+ assert(isa<EnumDecl>(EnumType->getType()->getAsTagDecl()));
+ return new (C, DC)
+ UsingEnumDecl(DC, EnumType->getType()->getAsTagDecl()->getDeclName(), UL, EL, NL, EnumType);
}
UsingEnumDecl *UsingEnumDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
- return new (C, ID) UsingEnumDecl(nullptr, DeclarationName(), SourceLocation(),
- SourceLocation(), SourceLocation(), nullptr);
+ return new (C, ID)
+ UsingEnumDecl(nullptr, DeclarationName(), SourceLocation(),
+ SourceLocation(), SourceLocation(), nullptr);
}
SourceRange UsingEnumDecl::getSourceRange() const {
- return SourceRange(EnumLocation, getLocation());
+ return SourceRange(UsingLocation, EnumType->getTypeLoc().getEndLoc());
}
void UsingPackDecl::anchor() {}
@@ -3097,7 +3207,8 @@ UsingPackDecl *UsingPackDecl::Create(ASTContext &C, DeclContext *DC,
UsingPackDecl *UsingPackDecl::CreateDeserialized(ASTContext &C, unsigned ID,
unsigned NumExpansions) {
size_t Extra = additionalSizeToAlloc<NamedDecl *>(NumExpansions);
- auto *Result = new (C, ID, Extra) UsingPackDecl(nullptr, nullptr, None);
+ auto *Result =
+ new (C, ID, Extra) UsingPackDecl(nullptr, nullptr, std::nullopt);
Result->NumExpansions = NumExpansions;
auto *Trail = Result->getTrailingObjects<NamedDecl *>();
for (unsigned I = 0; I != NumExpansions; ++I)
@@ -3178,8 +3289,7 @@ void StaticAssertDecl::anchor() {}
StaticAssertDecl *StaticAssertDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation StaticAssertLoc,
- Expr *AssertExpr,
- StringLiteral *Message,
+ Expr *AssertExpr, Expr *Message,
SourceLocation RParenLoc,
bool Failed) {
return new (C, DC) StaticAssertDecl(DC, StaticAssertLoc, AssertExpr, Message,
@@ -3192,6 +3302,16 @@ StaticAssertDecl *StaticAssertDecl::CreateDeserialized(ASTContext &C,
nullptr, SourceLocation(), false);
}
+VarDecl *ValueDecl::getPotentiallyDecomposedVarDecl() {
+ assert((isa<VarDecl, BindingDecl>(this)) &&
+ "expected a VarDecl or a BindingDecl");
+ if (auto *Var = llvm::dyn_cast<VarDecl>(this))
+ return Var;
+ if (auto *BD = llvm::dyn_cast<BindingDecl>(this))
+ return llvm::dyn_cast<VarDecl>(BD->getDecomposedDecl());
+ return nullptr;
+}
+
void BindingDecl::anchor() {}
BindingDecl *BindingDecl::Create(ASTContext &C, DeclContext *DC,
@@ -3235,7 +3355,7 @@ DecompositionDecl *DecompositionDecl::CreateDeserialized(ASTContext &C,
size_t Extra = additionalSizeToAlloc<BindingDecl *>(NumBindings);
auto *Result = new (C, ID, Extra)
DecompositionDecl(C, nullptr, SourceLocation(), SourceLocation(),
- QualType(), nullptr, StorageClass(), None);
+ QualType(), nullptr, StorageClass(), std::nullopt);
// Set up and clean out the bindings array.
Result->NumBindings = NumBindings;
auto *Trail = Result->getTrailingObjects<BindingDecl *>();
@@ -3244,16 +3364,17 @@ DecompositionDecl *DecompositionDecl::CreateDeserialized(ASTContext &C,
return Result;
}
-void DecompositionDecl::printName(llvm::raw_ostream &os) const {
- os << '[';
+void DecompositionDecl::printName(llvm::raw_ostream &OS,
+ const PrintingPolicy &Policy) const {
+ OS << '[';
bool Comma = false;
for (const auto *B : bindings()) {
if (Comma)
- os << ", ";
- B->printName(os);
+ OS << ", ";
+ B->printName(OS, Policy);
Comma = true;
}
- os << ']';
+ OS << ']';
}
void MSPropertyDecl::anchor() {}
@@ -3278,7 +3399,7 @@ void MSGuidDecl::anchor() {}
MSGuidDecl::MSGuidDecl(DeclContext *DC, QualType T, Parts P)
: ValueDecl(Decl::MSGuid, DC, SourceLocation(), DeclarationName(), T),
- PartVal(P), APVal() {}
+ PartVal(P) {}
MSGuidDecl *MSGuidDecl::Create(const ASTContext &C, QualType T, Parts P) {
DeclContext *DC = C.getTranslationUnitDecl();
@@ -3289,7 +3410,8 @@ MSGuidDecl *MSGuidDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
return new (C, ID) MSGuidDecl(nullptr, QualType(), Parts());
}
-void MSGuidDecl::printName(llvm::raw_ostream &OS) const {
+void MSGuidDecl::printName(llvm::raw_ostream &OS,
+ const PrintingPolicy &) const {
OS << llvm::format("GUID{%08" PRIx32 "-%04" PRIx16 "-%04" PRIx16 "-",
PartVal.Part1, PartVal.Part2, PartVal.Part3);
unsigned I = 0;
@@ -3370,6 +3492,39 @@ APValue &MSGuidDecl::getAsAPValue() const {
return APVal;
}
+void UnnamedGlobalConstantDecl::anchor() {}
+
+UnnamedGlobalConstantDecl::UnnamedGlobalConstantDecl(const ASTContext &C,
+ DeclContext *DC,
+ QualType Ty,
+ const APValue &Val)
+ : ValueDecl(Decl::UnnamedGlobalConstant, DC, SourceLocation(),
+ DeclarationName(), Ty),
+ Value(Val) {
+ // Cleanup the embedded APValue if required (note that our destructor is never
+ // run)
+ if (Value.needsCleanup())
+ C.addDestruction(&Value);
+}
+
+UnnamedGlobalConstantDecl *
+UnnamedGlobalConstantDecl::Create(const ASTContext &C, QualType T,
+ const APValue &Value) {
+ DeclContext *DC = C.getTranslationUnitDecl();
+ return new (C, DC) UnnamedGlobalConstantDecl(C, DC, T, Value);
+}
+
+UnnamedGlobalConstantDecl *
+UnnamedGlobalConstantDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ return new (C, ID)
+ UnnamedGlobalConstantDecl(C, nullptr, QualType(), APValue());
+}
+
+void UnnamedGlobalConstantDecl::printName(llvm::raw_ostream &OS,
+ const PrintingPolicy &) const {
+ OS << "unnamed-global-constant";
+}
+
static const char *getAccessName(AccessSpecifier AS) {
switch (AS) {
case AS_none:
diff --git a/contrib/llvm-project/clang/lib/AST/DeclObjC.cpp b/contrib/llvm-project/clang/lib/AST/DeclObjC.cpp
index 6e790f03b027..962f503306a0 100644
--- a/contrib/llvm-project/clang/lib/AST/DeclObjC.cpp
+++ b/contrib/llvm-project/clang/lib/AST/DeclObjC.cpp
@@ -16,6 +16,7 @@
#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclBase.h"
+#include "clang/AST/ODRHash.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/Type.h"
#include "clang/AST/TypeLoc.h"
@@ -23,7 +24,6 @@
#include "clang/Basic/LLVM.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/SourceLocation.h"
-#include "llvm/ADT/None.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Casting.h"
@@ -232,6 +232,18 @@ ObjCPropertyDecl::getDefaultSynthIvarName(ASTContext &Ctx) const {
return &Ctx.Idents.get(ivarName.str());
}
+ObjCPropertyDecl *ObjCContainerDecl::getProperty(const IdentifierInfo *Id,
+ bool IsInstance) const {
+ for (auto *LookupResult : lookup(Id)) {
+ if (auto *Prop = dyn_cast<ObjCPropertyDecl>(LookupResult)) {
+ if (Prop->isInstanceProperty() == IsInstance) {
+ return Prop;
+ }
+ }
+ }
+ return nullptr;
+}
+
/// FindPropertyDeclaration - Finds declaration of the property given its name
/// in 'PropertyId' and returns it. It returns 0, if not found.
ObjCPropertyDecl *ObjCContainerDecl::FindPropertyDeclaration(
@@ -391,21 +403,18 @@ ObjCInterfaceDecl::FindPropertyVisibleInPrimaryClass(
return nullptr;
}
-void ObjCInterfaceDecl::collectPropertiesToImplement(PropertyMap &PM,
- PropertyDeclOrder &PO) const {
+void ObjCInterfaceDecl::collectPropertiesToImplement(PropertyMap &PM) const {
for (auto *Prop : properties()) {
PM[std::make_pair(Prop->getIdentifier(), Prop->isClassProperty())] = Prop;
- PO.push_back(Prop);
}
for (const auto *Ext : known_extensions()) {
const ObjCCategoryDecl *ClassExt = Ext;
for (auto *Prop : ClassExt->properties()) {
PM[std::make_pair(Prop->getIdentifier(), Prop->isClassProperty())] = Prop;
- PO.push_back(Prop);
}
}
for (const auto *PI : all_referenced_protocols())
- PI->collectPropertiesToImplement(PM, PO);
+ PI->collectPropertiesToImplement(PM);
// Note, the properties declared only in class extensions are still copied
// into the main @interface's property list, and therefore we don't
// explicitly, have to search class extension properties.
@@ -603,10 +612,6 @@ void ObjCInterfaceDecl::allocateDefinitionData() {
assert(!hasDefinition() && "ObjC class already has a definition");
Data.setPointer(new (getASTContext()) DefinitionData());
Data.getPointer()->Definition = this;
-
- // Make the type point at the definition, now that we have one.
- if (TypeForDecl)
- cast<ObjCInterfaceType>(TypeForDecl)->Decl = this;
}
void ObjCInterfaceDecl::startDefinition() {
@@ -619,6 +624,17 @@ void ObjCInterfaceDecl::startDefinition() {
}
}
+void ObjCInterfaceDecl::startDuplicateDefinitionForComparison() {
+ Data.setPointer(nullptr);
+ allocateDefinitionData();
+ // Don't propagate data to other redeclarations.
+}
+
+void ObjCInterfaceDecl::mergeDuplicateDefinitionWithCommon(
+ const ObjCInterfaceDecl *Definition) {
+ Data = Definition->Data;
+}
+
ObjCIvarDecl *ObjCInterfaceDecl::lookupInstanceVariable(IdentifierInfo *ID,
ObjCInterfaceDecl *&clsDeclared) {
// FIXME: Should make sure no callers ever do this.
@@ -773,6 +789,33 @@ ObjCMethodDecl *ObjCInterfaceDecl::lookupPrivateMethod(
return Method;
}
+unsigned ObjCInterfaceDecl::getODRHash() {
+ assert(hasDefinition() && "ODRHash only for records with definitions");
+
+ // Previously calculated hash is stored in DefinitionData.
+ if (hasODRHash())
+ return data().ODRHash;
+
+ // Only calculate hash on first call of getODRHash per record.
+ ODRHash Hasher;
+ Hasher.AddObjCInterfaceDecl(getDefinition());
+ data().ODRHash = Hasher.CalculateHash();
+ setHasODRHash(true);
+
+ return data().ODRHash;
+}
+
+bool ObjCInterfaceDecl::hasODRHash() const {
+ if (!hasDefinition())
+ return false;
+ return data().HasODRHash;
+}
+
+void ObjCInterfaceDecl::setHasODRHash(bool HasHash) {
+ assert(hasDefinition() && "Cannot set ODRHash without definition");
+ data().HasODRHash = HasHash;
+}
+
//===----------------------------------------------------------------------===//
// ObjCMethodDecl
//===----------------------------------------------------------------------===//
@@ -782,7 +825,7 @@ ObjCMethodDecl::ObjCMethodDecl(
QualType T, TypeSourceInfo *ReturnTInfo, DeclContext *contextDecl,
bool isInstance, bool isVariadic, bool isPropertyAccessor,
bool isSynthesizedAccessorStub, bool isImplicitlyDeclared, bool isDefined,
- ImplementationControl impControl, bool HasRelatedResultType)
+ ObjCImplementationControl impControl, bool HasRelatedResultType)
: NamedDecl(ObjCMethod, contextDecl, beginLoc, SelInfo),
DeclContext(ObjCMethod), MethodDeclType(T), ReturnTInfo(ReturnTInfo),
DeclEndLoc(endLoc) {
@@ -812,8 +855,8 @@ ObjCMethodDecl *ObjCMethodDecl::Create(
Selector SelInfo, QualType T, TypeSourceInfo *ReturnTInfo,
DeclContext *contextDecl, bool isInstance, bool isVariadic,
bool isPropertyAccessor, bool isSynthesizedAccessorStub,
- bool isImplicitlyDeclared, bool isDefined, ImplementationControl impControl,
- bool HasRelatedResultType) {
+ bool isImplicitlyDeclared, bool isDefined,
+ ObjCImplementationControl impControl, bool HasRelatedResultType) {
return new (C, contextDecl) ObjCMethodDecl(
beginLoc, endLoc, SelInfo, T, ReturnTInfo, contextDecl, isInstance,
isVariadic, isPropertyAccessor, isSynthesizedAccessorStub,
@@ -855,6 +898,14 @@ bool ObjCMethodDecl::isDesignatedInitializerForTheInterface(
return false;
}
+bool ObjCMethodDecl::hasParamDestroyedInCallee() const {
+ for (auto *param : parameters()) {
+ if (param->isDestroyedInCallee())
+ return true;
+ }
+ return false;
+}
+
Stmt *ObjCMethodDecl::getBody() const {
return Body.get(getASTContext().getExternalSource());
}
@@ -880,8 +931,8 @@ void ObjCMethodDecl::setParamsAndSelLocs(ASTContext &C,
unsigned Size = sizeof(ParmVarDecl *) * NumParams +
sizeof(SourceLocation) * SelLocs.size();
ParamsAndSelLocs = C.Allocate(Size);
- std::copy(Params.begin(), Params.end(), getParams());
- std::copy(SelLocs.begin(), SelLocs.end(), getStoredSelLocs());
+ std::uninitialized_copy(Params.begin(), Params.end(), getParams());
+ std::uninitialized_copy(SelLocs.begin(), SelLocs.end(), getStoredSelLocs());
}
void ObjCMethodDecl::getSelectorLocs(
@@ -896,12 +947,12 @@ void ObjCMethodDecl::setMethodParams(ASTContext &C,
assert((!SelLocs.empty() || isImplicit()) &&
"No selector locs for non-implicit method");
if (isImplicit())
- return setParamsAndSelLocs(C, Params, llvm::None);
+ return setParamsAndSelLocs(C, Params, std::nullopt);
setSelLocsKind(hasStandardSelectorLocs(getSelector(), SelLocs, Params,
DeclEndLoc));
if (getSelLocsKind() != SelLoc_NonStandard)
- return setParamsAndSelLocs(C, Params, llvm::None);
+ return setParamsAndSelLocs(C, Params, std::nullopt);
setParamsAndSelLocs(C, Params, SelLocs);
}
@@ -1143,7 +1194,7 @@ void ObjCMethodDecl::createImplicitParams(ASTContext &Context,
getSelfType(Context, OID, selfIsPseudoStrong, selfIsConsumed);
auto *Self = ImplicitParamDecl::Create(Context, this, SourceLocation(),
&Context.Idents.get("self"), selfTy,
- ImplicitParamDecl::ObjCSelf);
+ ImplicitParamKind::ObjCSelf);
setSelfDecl(Self);
if (selfIsConsumed)
@@ -1154,7 +1205,7 @@ void ObjCMethodDecl::createImplicitParams(ASTContext &Context,
setCmdDecl(ImplicitParamDecl::Create(
Context, this, SourceLocation(), &Context.Idents.get("_cmd"),
- Context.getObjCSelType(), ImplicitParamDecl::ObjCCmd));
+ Context.getObjCSelType(), ImplicitParamKind::ObjCCmd));
}
ObjCInterfaceDecl *ObjCMethodDecl::getClassInterface() {
@@ -1480,7 +1531,7 @@ ObjCTypeParamList *ObjCTypeParamList::create(
void ObjCTypeParamList::gatherDefaultTypeArgs(
SmallVectorImpl<QualType> &typeArgs) const {
typeArgs.reserve(size());
- for (auto typeParam : *this)
+ for (auto *typeParam : *this)
typeArgs.push_back(typeParam->getUnderlyingType());
}
@@ -1631,6 +1682,11 @@ ObjCIvarDecl *ObjCInterfaceDecl::all_declared_ivar_begin() {
ObjCIvarDecl *curIvar = nullptr;
if (!data().IvarList) {
+ // Force ivar deserialization upfront, before building IvarList.
+ (void)ivar_empty();
+ for (const auto *Ext : known_extensions()) {
+ (void)Ext->ivar_empty();
+ }
if (!ivar_empty()) {
ObjCInterfaceDecl::ivar_iterator I = ivar_begin(), E = ivar_end();
data().IvarList = *I; ++I;
@@ -1822,8 +1878,8 @@ ObjCIvarDecl *ObjCIvarDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
ObjCIvarDecl::None, nullptr, false);
}
-const ObjCInterfaceDecl *ObjCIvarDecl::getContainingInterface() const {
- const auto *DC = cast<ObjCContainerDecl>(getDeclContext());
+ObjCInterfaceDecl *ObjCIvarDecl::getContainingInterface() {
+ auto *DC = cast<ObjCContainerDecl>(getDeclContext());
switch (DC->getKind()) {
default:
@@ -1833,7 +1889,7 @@ const ObjCInterfaceDecl *ObjCIvarDecl::getContainingInterface() const {
// Ivars can only appear in class extension categories.
case ObjCCategory: {
- const auto *CD = cast<ObjCCategoryDecl>(DC);
+ auto *CD = cast<ObjCCategoryDecl>(DC);
assert(CD->IsClassExtension() && "invalid container for ivar!");
return CD->getClassInterface();
}
@@ -1967,6 +2023,7 @@ void ObjCProtocolDecl::allocateDefinitionData() {
assert(!Data.getPointer() && "Protocol already has a definition!");
Data.setPointer(new (getASTContext()) DefinitionData);
Data.getPointer()->Definition = this;
+ Data.getPointer()->HasODRHash = false;
}
void ObjCProtocolDecl::startDefinition() {
@@ -1977,19 +2034,28 @@ void ObjCProtocolDecl::startDefinition() {
RD->Data = this->Data;
}
-void ObjCProtocolDecl::collectPropertiesToImplement(PropertyMap &PM,
- PropertyDeclOrder &PO) const {
+void ObjCProtocolDecl::startDuplicateDefinitionForComparison() {
+ Data.setPointer(nullptr);
+ allocateDefinitionData();
+ // Don't propagate data to other redeclarations.
+}
+
+void ObjCProtocolDecl::mergeDuplicateDefinitionWithCommon(
+ const ObjCProtocolDecl *Definition) {
+ Data = Definition->Data;
+}
+
+void ObjCProtocolDecl::collectPropertiesToImplement(PropertyMap &PM) const {
if (const ObjCProtocolDecl *PDecl = getDefinition()) {
for (auto *Prop : PDecl->properties()) {
// Insert into PM if not there already.
PM.insert(std::make_pair(
std::make_pair(Prop->getIdentifier(), Prop->isClassProperty()),
Prop));
- PO.push_back(Prop);
}
// Scan through protocol's protocols.
for (const auto *PI : PDecl->protocols())
- PI->collectPropertiesToImplement(PM, PO);
+ PI->collectPropertiesToImplement(PM);
}
}
@@ -2021,6 +2087,33 @@ ObjCProtocolDecl::getObjCRuntimeNameAsString() const {
return getName();
}
+unsigned ObjCProtocolDecl::getODRHash() {
+ assert(hasDefinition() && "ODRHash only for records with definitions");
+
+ // Previously calculated hash is stored in DefinitionData.
+ if (hasODRHash())
+ return data().ODRHash;
+
+ // Only calculate hash on first call of getODRHash per record.
+ ODRHash Hasher;
+ Hasher.AddObjCProtocolDecl(getDefinition());
+ data().ODRHash = Hasher.CalculateHash();
+ setHasODRHash(true);
+
+ return data().ODRHash;
+}
+
+bool ObjCProtocolDecl::hasODRHash() const {
+ if (!hasDefinition())
+ return false;
+ return data().HasODRHash;
+}
+
+void ObjCProtocolDecl::setHasODRHash(bool HasHash) {
+ assert(hasDefinition() && "Cannot set ODRHash without definition");
+ data().HasODRHash = HasHash;
+}
+
//===----------------------------------------------------------------------===//
// ObjCCategoryDecl
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm-project/clang/lib/AST/DeclOpenMP.cpp b/contrib/llvm-project/clang/lib/AST/DeclOpenMP.cpp
index 867ef31656f7..ac5780f82dbb 100644
--- a/contrib/llvm-project/clang/lib/AST/DeclOpenMP.cpp
+++ b/contrib/llvm-project/clang/lib/AST/DeclOpenMP.cpp
@@ -30,7 +30,7 @@ OMPThreadPrivateDecl *OMPThreadPrivateDecl::Create(ASTContext &C,
SourceLocation L,
ArrayRef<Expr *> VL) {
auto *D = OMPDeclarativeDirective::createDirective<OMPThreadPrivateDecl>(
- C, DC, llvm::None, VL.size(), L);
+ C, DC, std::nullopt, VL.size(), L);
D->setVars(VL);
return D;
}
@@ -104,7 +104,7 @@ OMPDeclareReductionDecl::OMPDeclareReductionDecl(
QualType Ty, OMPDeclareReductionDecl *PrevDeclInScope)
: ValueDecl(DK, DC, L, Name, Ty), DeclContext(DK), Combiner(nullptr),
PrevDeclInScope(PrevDeclInScope) {
- setInitializer(nullptr, CallInit);
+ setInitializer(nullptr, OMPDeclareReductionInitKind::Call);
}
void OMPDeclareReductionDecl::anchor() {}
diff --git a/contrib/llvm-project/clang/lib/AST/DeclPrinter.cpp b/contrib/llvm-project/clang/lib/AST/DeclPrinter.cpp
index 4dcf3d0e6ab1..822ac12c4c7d 100644
--- a/contrib/llvm-project/clang/lib/AST/DeclPrinter.cpp
+++ b/contrib/llvm-project/clang/lib/AST/DeclPrinter.cpp
@@ -49,6 +49,18 @@ namespace {
void PrintObjCTypeParams(ObjCTypeParamList *Params);
+ enum class AttrPrintLoc {
+ None = 0,
+ Left = 1,
+ Right = 2,
+ Any = Left | Right,
+
+ LLVM_MARK_AS_BITMASK_ENUM(/*DefaultValue=*/Any)
+ };
+
+ void prettyPrintAttributes(Decl *D, raw_ostream &out,
+ AttrPrintLoc loc = AttrPrintLoc::Any);
+
public:
DeclPrinter(raw_ostream &Out, const PrintingPolicy &Policy,
const ASTContext &Context, unsigned Indentation = 0,
@@ -72,6 +84,7 @@ namespace {
void VisitLabelDecl(LabelDecl *D);
void VisitParmVarDecl(ParmVarDecl *D);
void VisitFileScopeAsmDecl(FileScopeAsmDecl *D);
+ void VisitTopLevelStmtDecl(TopLevelStmtDecl *D);
void VisitImportDecl(ImportDecl *D);
void VisitStaticAssertDecl(StaticAssertDecl *D);
void VisitNamespaceDecl(NamespaceDecl *D);
@@ -108,16 +121,19 @@ namespace {
void VisitOMPCapturedExprDecl(OMPCapturedExprDecl *D);
void VisitTemplateTypeParmDecl(const TemplateTypeParmDecl *TTP);
void VisitNonTypeTemplateParmDecl(const NonTypeTemplateParmDecl *NTTP);
+ void VisitHLSLBufferDecl(HLSLBufferDecl *D);
void printTemplateParameters(const TemplateParameterList *Params,
bool OmitTemplateKW = false);
void printTemplateArguments(llvm::ArrayRef<TemplateArgument> Args,
- const TemplateParameterList *Params,
- bool TemplOverloaded);
+ const TemplateParameterList *Params);
void printTemplateArguments(llvm::ArrayRef<TemplateArgumentLoc> Args,
- const TemplateParameterList *Params,
- bool TemplOverloaded);
- void prettyPrintAttributes(Decl *D);
+ const TemplateParameterList *Params);
+
+ inline void prettyPrintAttributes(Decl *D) {
+ prettyPrintAttributes(D, Out);
+ }
+
void prettyPrintPragmas(Decl *D);
void printDeclType(QualType T, StringRef DeclName, bool Pack = false);
};
@@ -153,11 +169,14 @@ static QualType GetBaseType(QualType T) {
while (!BaseType->isSpecifierType()) {
if (const PointerType *PTy = BaseType->getAs<PointerType>())
BaseType = PTy->getPointeeType();
+ else if (const ObjCObjectPointerType *OPT =
+ BaseType->getAs<ObjCObjectPointerType>())
+ BaseType = OPT->getPointeeType();
else if (const BlockPointerType *BPy = BaseType->getAs<BlockPointerType>())
BaseType = BPy->getPointeeType();
- else if (const ArrayType* ATy = dyn_cast<ArrayType>(BaseType))
+ else if (const ArrayType *ATy = dyn_cast<ArrayType>(BaseType))
BaseType = ATy->getElementType();
- else if (const FunctionType* FTy = BaseType->getAs<FunctionType>())
+ else if (const FunctionType *FTy = BaseType->getAs<FunctionType>())
BaseType = FTy->getReturnType();
else if (const VectorType *VTy = BaseType->getAs<VectorType>())
BaseType = VTy->getElementType();
@@ -231,7 +250,54 @@ raw_ostream& DeclPrinter::Indent(unsigned Indentation) {
return Out;
}
-void DeclPrinter::prettyPrintAttributes(Decl *D) {
+// For CLANG_ATTR_LIST_CanPrintOnLeft macro.
+#include "clang/Basic/AttrLeftSideCanPrintList.inc"
+
+// For CLANG_ATTR_LIST_PrintOnLeft macro.
+#include "clang/Basic/AttrLeftSideMustPrintList.inc"
+
+static bool canPrintOnLeftSide(attr::Kind kind) {
+#ifdef CLANG_ATTR_LIST_CanPrintOnLeft
+ switch (kind) {
+ CLANG_ATTR_LIST_CanPrintOnLeft
+ return true;
+ default:
+ return false;
+ }
+#else
+ return false;
+#endif
+}
+
+static bool canPrintOnLeftSide(const Attr *A) {
+ if (A->isStandardAttributeSyntax())
+ return false;
+
+ return canPrintOnLeftSide(A->getKind());
+}
+
+static bool mustPrintOnLeftSide(attr::Kind kind) {
+#ifdef CLANG_ATTR_LIST_PrintOnLeft
+ switch (kind) {
+ CLANG_ATTR_LIST_PrintOnLeft
+ return true;
+ default:
+ return false;
+ }
+#else
+ return false;
+#endif
+}
+
+static bool mustPrintOnLeftSide(const Attr *A) {
+ if (A->isDeclspecAttribute())
+ return true;
+
+ return mustPrintOnLeftSide(A->getKind());
+}
+
+void DeclPrinter::prettyPrintAttributes(Decl *D, llvm::raw_ostream &Out,
+ AttrPrintLoc Loc) {
if (Policy.PolishForDeclaration)
return;
@@ -240,15 +306,31 @@ void DeclPrinter::prettyPrintAttributes(Decl *D) {
for (auto *A : Attrs) {
if (A->isInherited() || A->isImplicit())
continue;
- switch (A->getKind()) {
-#define ATTR(X)
-#define PRAGMA_SPELLING_ATTR(X) case attr::X:
-#include "clang/Basic/AttrList.inc"
- break;
- default:
- A->printPretty(Out, Policy);
- break;
+
+ AttrPrintLoc AttrLoc = AttrPrintLoc::Right;
+ if (mustPrintOnLeftSide(A)) {
+ // If we must always print on left side (e.g. declspec), then mark as
+ // so.
+ AttrLoc = AttrPrintLoc::Left;
+ } else if (canPrintOnLeftSide(A)) {
+ // For functions with body defined we print the attributes on the left
+ // side so that GCC accept our dumps as well.
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
+ FD && FD->isThisDeclarationADefinition())
+ // In case Decl is a function with a body, then attrs should be print
+ // on the left side.
+ AttrLoc = AttrPrintLoc::Left;
+
+ // In case it is a variable declaration with a ctor, then allow
+ // printing on the left side for readbility.
+ else if (const VarDecl *VD = dyn_cast<VarDecl>(D);
+ VD && VD->getInit() &&
+ VD->getInitStyle() == VarDecl::CallInit)
+ AttrLoc = AttrPrintLoc::Left;
}
+ // Only print the side matches the user requested.
+ if ((Loc & AttrLoc) != AttrPrintLoc::None)
+ A->printPretty(Out, Policy);
}
}
}
@@ -306,6 +388,8 @@ void DeclPrinter::PrintConstructorInitializers(CXXConstructorDecl *CDecl,
for (const auto *BMInitializer : CDecl->inits()) {
if (BMInitializer->isInClassMemberInitializer())
continue;
+ if (!BMInitializer->isWritten())
+ continue;
if (!HasInitializerList) {
Proto += " : ";
@@ -318,15 +402,18 @@ void DeclPrinter::PrintConstructorInitializers(CXXConstructorDecl *CDecl,
if (BMInitializer->isAnyMemberInitializer()) {
FieldDecl *FD = BMInitializer->getAnyMember();
Out << *FD;
+ } else if (BMInitializer->isDelegatingInitializer()) {
+ Out << CDecl->getNameAsString();
} else {
Out << QualType(BMInitializer->getBaseClass(), 0).getAsString(Policy);
}
- Out << "(";
- if (!BMInitializer->getInit()) {
- // Nothing to print
- } else {
- Expr *Init = BMInitializer->getInit();
+ if (Expr *Init = BMInitializer->getInit()) {
+ bool OutParens = !isa<InitListExpr>(Init);
+
+ if (OutParens)
+ Out << "(";
+
if (ExprWithCleanups *Tmp = dyn_cast<ExprWithCleanups>(Init))
Init = Tmp->getSubExpr();
@@ -360,8 +447,13 @@ void DeclPrinter::PrintConstructorInitializers(CXXConstructorDecl *CDecl,
&Context);
}
}
+
+ if (OutParens)
+ Out << ")";
+ } else {
+ Out << "()";
}
- Out << ")";
+
if (BMInitializer->isPackExpansion())
Out << "...";
}
@@ -452,21 +544,18 @@ void DeclPrinter::VisitDeclContext(DeclContext *DC, bool Indent) {
else if (isa<ObjCMethodDecl>(*D) && cast<ObjCMethodDecl>(*D)->hasBody())
Terminator = nullptr;
else if (auto FD = dyn_cast<FunctionDecl>(*D)) {
- if (FD->isThisDeclarationADefinition())
+ if (FD->doesThisDeclarationHaveABody() && !FD->isDefaulted())
Terminator = nullptr;
else
Terminator = ";";
} else if (auto TD = dyn_cast<FunctionTemplateDecl>(*D)) {
- if (TD->getTemplatedDecl()->isThisDeclarationADefinition())
+ if (TD->getTemplatedDecl()->doesThisDeclarationHaveABody())
Terminator = nullptr;
else
Terminator = ";";
- } else if (isa<NamespaceDecl>(*D) || isa<LinkageSpecDecl>(*D) ||
- isa<ObjCImplementationDecl>(*D) ||
- isa<ObjCInterfaceDecl>(*D) ||
- isa<ObjCProtocolDecl>(*D) ||
- isa<ObjCCategoryImplDecl>(*D) ||
- isa<ObjCCategoryDecl>(*D))
+ } else if (isa<NamespaceDecl, LinkageSpecDecl, ObjCImplementationDecl,
+ ObjCInterfaceDecl, ObjCProtocolDecl, ObjCCategoryImplDecl,
+ ObjCCategoryDecl, HLSLBufferDecl>(*D))
Terminator = nullptr;
else if (isa<EnumConstantDecl>(*D)) {
DeclContext::decl_iterator Next = D;
@@ -587,7 +676,7 @@ static void printExplicitSpecifier(ExplicitSpecifier ES, llvm::raw_ostream &Out,
}
EOut << " ";
EOut.flush();
- Out << EOut.str();
+ Out << Proto;
}
void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
@@ -603,6 +692,22 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
printTemplateParameters(D->getTemplateParameterList(I));
}
+ std::string LeftsideAttrs;
+ llvm::raw_string_ostream LSAS(LeftsideAttrs);
+
+ prettyPrintAttributes(D, LSAS, AttrPrintLoc::Left);
+
+ // prettyPrintAttributes print a space on left side of the attribute.
+ if (LeftsideAttrs[0] == ' ') {
+ // Skip the space prettyPrintAttributes generated.
+ LeftsideAttrs.erase(0, LeftsideAttrs.find_first_not_of(' '));
+
+ // Add a single space between the attribute and the Decl name.
+ LSAS << ' ';
+ }
+
+ Out << LeftsideAttrs;
+
CXXConstructorDecl *CDecl = dyn_cast<CXXConstructorDecl>(D);
CXXConversionDecl *ConversionDecl = dyn_cast<CXXConversionDecl>(D);
CXXDeductionGuideDecl *GuideDecl = dyn_cast<CXXDeductionGuideDecl>(D);
@@ -622,6 +727,8 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
if (D->isConstexprSpecified() && !D->isExplicitlyDefaulted())
Out << "constexpr ";
if (D->isConsteval()) Out << "consteval ";
+ else if (D->isImmediateFunction())
+ Out << "immediate ";
ExplicitSpecifier ExplicitSpec = ExplicitSpecifier::getFromDecl(D);
if (ExplicitSpec.isSpecified())
printExplicitSpecifier(ExplicitSpec, Out, Policy, Indentation, Context);
@@ -649,16 +756,11 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
llvm::raw_string_ostream POut(Proto);
DeclPrinter TArgPrinter(POut, SubPolicy, Context, Indentation);
const auto *TArgAsWritten = D->getTemplateSpecializationArgsAsWritten();
- const TemplateParameterList *TPL = D->getTemplateSpecializationInfo()
- ->getTemplate()
- ->getTemplateParameters();
if (TArgAsWritten && !Policy.PrintCanonicalTypes)
- TArgPrinter.printTemplateArguments(TArgAsWritten->arguments(), TPL,
- /*TemplOverloaded*/ true);
+ TArgPrinter.printTemplateArguments(TArgAsWritten->arguments(), nullptr);
else if (const TemplateArgumentList *TArgs =
D->getTemplateSpecializationArgs())
- TArgPrinter.printTemplateArguments(TArgs->asArray(), TPL,
- /*TemplOverloaded*/ true);
+ TArgPrinter.printTemplateArguments(TArgs->asArray(), nullptr);
}
QualType Ty = D->getType();
@@ -684,6 +786,10 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
if (FT->isVariadic()) {
if (D->getNumParams()) POut << ", ";
POut << "...";
+ } else if (!D->getNumParams() && !Context.getLangOpts().CPlusPlus) {
+ // The function has a prototype, so it needs to retain the prototype
+ // in C.
+ POut << "void";
}
} else if (D->doesThisDeclarationHaveABody() && !D->hasPrototype()) {
for (unsigned i = 0, e = D->getNumParams(); i != e; ++i) {
@@ -735,7 +841,6 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
FT->getNoexceptExpr()->printPretty(EOut, nullptr, SubPolicy,
Indentation, "\n", &Context);
EOut.flush();
- Proto += EOut.str();
Proto += ")";
}
}
@@ -764,9 +869,9 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
Ty.print(Out, Policy, Proto);
}
- prettyPrintAttributes(D);
+ prettyPrintAttributes(D, Out, AttrPrintLoc::Right);
- if (D->isPure())
+ if (D->isPureVirtual())
Out << " = 0";
else if (D->isDeletedAsWritten())
Out << " = delete";
@@ -786,11 +891,10 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
Out << ";\n";
}
Indentation -= Policy.Indentation;
- } else
- Out << ' ';
+ }
if (D->getBody())
- D->getBody()->printPretty(Out, nullptr, SubPolicy, Indentation, "\n",
+ D->getBody()->printPrettyControlled(Out, nullptr, SubPolicy, Indentation, "\n",
&Context);
} else {
if (!Policy.TerseOutput && isa<CXXConstructorDecl>(*D))
@@ -858,6 +962,27 @@ void DeclPrinter::VisitLabelDecl(LabelDecl *D) {
void DeclPrinter::VisitVarDecl(VarDecl *D) {
prettyPrintPragmas(D);
+ if (const auto *Param = dyn_cast<ParmVarDecl>(D);
+ Param && Param->isExplicitObjectParameter())
+ Out << "this ";
+
+ std::string LeftSide;
+ llvm::raw_string_ostream LeftSideStream(LeftSide);
+
+ // Print attributes that should be placed on the left, such as __declspec.
+ prettyPrintAttributes(D, LeftSideStream, AttrPrintLoc::Left);
+
+ // prettyPrintAttributes print a space on left side of the attribute.
+ if (LeftSide[0] == ' ') {
+ // Skip the space prettyPrintAttributes generated.
+ LeftSide.erase(0, LeftSide.find_first_not_of(' '));
+
+ // Add a single space between the attribute and the Decl name.
+ LeftSideStream << ' ';
+ }
+
+ Out << LeftSide;
+
QualType T = D->getTypeSourceInfo()
? D->getTypeSourceInfo()->getType()
: D->getASTContext().getUnqualifiedObjCPointerType(D->getType());
@@ -890,16 +1015,31 @@ void DeclPrinter::VisitVarDecl(VarDecl *D) {
}
}
- printDeclType(T, D->getName());
+ StringRef Name;
+
+ Name = (isa<ParmVarDecl>(D) && Policy.CleanUglifiedParameters &&
+ D->getIdentifier())
+ ? D->getIdentifier()->deuglifiedName()
+ : D->getName();
+
+ printDeclType(T, Name);
+
+ // Print the attributes that should be placed right before the end of the
+ // decl.
+ prettyPrintAttributes(D, Out, AttrPrintLoc::Right);
+
Expr *Init = D->getInit();
if (!Policy.SuppressInitializers && Init) {
bool ImplicitInit = false;
- if (CXXConstructExpr *Construct =
- dyn_cast<CXXConstructExpr>(Init->IgnoreImplicit())) {
+ if (D->isCXXForRangeDecl()) {
+ // FIXME: We should print the range expression instead.
+ ImplicitInit = true;
+ } else if (CXXConstructExpr *Construct =
+ dyn_cast<CXXConstructExpr>(Init->IgnoreImplicit())) {
if (D->getInitStyle() == VarDecl::CallInit &&
!Construct->isListInitialization()) {
ImplicitInit = Construct->getNumArgs() == 0 ||
- Construct->getArg(0)->isDefaultArgument();
+ Construct->getArg(0)->isDefaultArgument();
}
}
if (!ImplicitInit) {
@@ -916,7 +1056,6 @@ void DeclPrinter::VisitVarDecl(VarDecl *D) {
Out << ")";
}
}
- prettyPrintAttributes(D);
}
void DeclPrinter::VisitParmVarDecl(ParmVarDecl *D) {
@@ -930,6 +1069,11 @@ void DeclPrinter::VisitFileScopeAsmDecl(FileScopeAsmDecl *D) {
Out << ")";
}
+void DeclPrinter::VisitTopLevelStmtDecl(TopLevelStmtDecl *D) {
+ assert(D->getStmt());
+ D->getStmt()->printPretty(Out, nullptr, Policy, Indentation, "\n", &Context);
+}
+
void DeclPrinter::VisitImportDecl(ImportDecl *D) {
Out << "@import " << D->getImportedModule()->getFullModuleName()
<< ";\n";
@@ -939,9 +1083,9 @@ void DeclPrinter::VisitStaticAssertDecl(StaticAssertDecl *D) {
Out << "static_assert(";
D->getAssertExpr()->printPretty(Out, nullptr, Policy, Indentation, "\n",
&Context);
- if (StringLiteral *SL = D->getMessage()) {
+ if (Expr *E = D->getMessage()) {
Out << ", ";
- SL->printPretty(Out, nullptr, Policy, Indentation, "\n", &Context);
+ E->printPretty(Out, nullptr, Policy, Indentation, "\n", &Context);
}
Out << ")";
}
@@ -989,7 +1133,10 @@ void DeclPrinter::VisitCXXRecordDecl(CXXRecordDecl *D) {
prettyPrintAttributes(D);
if (D->getIdentifier()) {
- Out << ' ' << *D;
+ Out << ' ';
+ if (auto *NNS = D->getQualifier())
+ NNS->print(Out, Policy);
+ Out << *D;
if (auto S = dyn_cast<ClassTemplateSpecializationDecl>(D)) {
ArrayRef<TemplateArgument> Args = S->getTemplateArgs().asArray();
@@ -999,8 +1146,13 @@ void DeclPrinter::VisitCXXRecordDecl(CXXRecordDecl *D) {
dyn_cast<TemplateSpecializationType>(TSI->getType()))
Args = TST->template_arguments();
printTemplateArguments(
- Args, S->getSpecializedTemplate()->getTemplateParameters(),
- /*TemplOverloaded*/ false);
+ Args, S->getSpecializedTemplate()->getTemplateParameters());
+ }
+ }
+
+ if (D->hasDefinition()) {
+ if (D->hasAttr<FinalAttr>()) {
+ Out << " final";
}
}
@@ -1042,10 +1194,10 @@ void DeclPrinter::VisitCXXRecordDecl(CXXRecordDecl *D) {
void DeclPrinter::VisitLinkageSpecDecl(LinkageSpecDecl *D) {
const char *l;
- if (D->getLanguage() == LinkageSpecDecl::lang_c)
+ if (D->getLanguage() == LinkageSpecLanguageIDs::C)
l = "C";
else {
- assert(D->getLanguage() == LinkageSpecDecl::lang_cxx &&
+ assert(D->getLanguage() == LinkageSpecLanguageIDs::CXX &&
"unknown language in linkage specification");
l = "C++";
}
@@ -1093,35 +1245,34 @@ void DeclPrinter::printTemplateParameters(const TemplateParameterList *Params,
}
void DeclPrinter::printTemplateArguments(ArrayRef<TemplateArgument> Args,
- const TemplateParameterList *Params,
- bool TemplOverloaded) {
+ const TemplateParameterList *Params) {
Out << "<";
for (size_t I = 0, E = Args.size(); I < E; ++I) {
if (I)
Out << ", ";
- if (TemplOverloaded || !Params)
+ if (!Params)
Args[I].print(Policy, Out, /*IncludeType*/ true);
else
- Args[I].print(
- Policy, Out,
- TemplateParameterList::shouldIncludeTypeForArgument(Params, I));
+ Args[I].print(Policy, Out,
+ TemplateParameterList::shouldIncludeTypeForArgument(
+ Policy, Params, I));
}
Out << ">";
}
void DeclPrinter::printTemplateArguments(ArrayRef<TemplateArgumentLoc> Args,
- const TemplateParameterList *Params,
- bool TemplOverloaded) {
+ const TemplateParameterList *Params) {
Out << "<";
for (size_t I = 0, E = Args.size(); I < E; ++I) {
if (I)
Out << ", ";
- if (TemplOverloaded)
+ if (!Params)
Args[I].getArgument().print(Policy, Out, /*IncludeType*/ true);
else
Args[I].getArgument().print(
Policy, Out,
- TemplateParameterList::shouldIncludeTypeForArgument(Params, I));
+ TemplateParameterList::shouldIncludeTypeForArgument(Policy, Params,
+ I));
}
Out << ">";
}
@@ -1138,8 +1289,12 @@ void DeclPrinter::VisitTemplateDecl(const TemplateDecl *D) {
else if (TTP->getDeclName())
Out << ' ';
- if (TTP->getDeclName())
- Out << TTP->getDeclName();
+ if (TTP->getDeclName()) {
+ if (Policy.CleanUglifiedParameters && TTP->getIdentifier())
+ Out << TTP->getIdentifier()->deuglifiedName();
+ else
+ Out << TTP->getDeclName();
+ }
} else if (auto *TD = D->getTemplatedDecl())
Visit(TD);
else if (const auto *Concept = dyn_cast<ConceptDecl>(D)) {
@@ -1191,6 +1346,7 @@ void DeclPrinter::VisitClassTemplateDecl(ClassTemplateDecl *D) {
if (D->isThisDeclarationADefinition())
Out << ";";
Out << "\n";
+ Indent();
Visit(I);
}
}
@@ -1572,7 +1728,7 @@ void DeclPrinter::VisitObjCPropertyDecl(ObjCPropertyDecl *PDecl) {
std::string TypeStr = PDecl->getASTContext().getUnqualifiedObjCPointerType(T).
getAsString(Policy);
Out << ' ' << TypeStr;
- if (!StringRef(TypeStr).endswith("*"))
+ if (!StringRef(TypeStr).ends_with("*"))
Out << ' ';
Out << *PDecl;
if (Policy.PolishForDeclaration)
@@ -1645,6 +1801,21 @@ void DeclPrinter::VisitOMPThreadPrivateDecl(OMPThreadPrivateDecl *D) {
}
}
+void DeclPrinter::VisitHLSLBufferDecl(HLSLBufferDecl *D) {
+ if (D->isCBuffer())
+ Out << "cbuffer ";
+ else
+ Out << "tbuffer ";
+
+ Out << *D;
+
+ prettyPrintAttributes(D);
+
+ Out << " {\n";
+ VisitDeclContext(D);
+ Indent() << "}";
+}
+
void DeclPrinter::VisitOMPAllocateDecl(OMPAllocateDecl *D) {
Out << "#pragma omp allocate";
if (!D->varlist_empty()) {
@@ -1658,10 +1829,11 @@ void DeclPrinter::VisitOMPAllocateDecl(OMPAllocateDecl *D) {
Out << ")";
}
if (!D->clauselist_empty()) {
- Out << " ";
OMPClausePrinter Printer(Out, Policy);
- for (OMPClause *C : D->clauselists())
+ for (OMPClause *C : D->clauselists()) {
+ Out << " ";
Printer.Visit(C);
+ }
}
}
@@ -1684,7 +1856,7 @@ void DeclPrinter::VisitOMPDeclareReductionDecl(OMPDeclareReductionDecl *D) {
Out << OpName;
} else {
assert(D->getDeclName().isIdentifier());
- D->printName(Out);
+ D->printName(Out, Policy);
}
Out << " : ";
D->getType().print(Out, Policy);
@@ -1694,17 +1866,17 @@ void DeclPrinter::VisitOMPDeclareReductionDecl(OMPDeclareReductionDecl *D) {
if (auto *Init = D->getInitializer()) {
Out << " initializer(";
switch (D->getInitializerKind()) {
- case OMPDeclareReductionDecl::DirectInit:
+ case OMPDeclareReductionInitKind::Direct:
Out << "omp_priv(";
break;
- case OMPDeclareReductionDecl::CopyInit:
+ case OMPDeclareReductionInitKind::Copy:
Out << "omp_priv = ";
break;
- case OMPDeclareReductionDecl::CallInit:
+ case OMPDeclareReductionInitKind::Call:
break;
}
Init->printPretty(Out, nullptr, Policy, 0, "\n", &Context);
- if (D->getInitializerKind() == OMPDeclareReductionDecl::DirectInit)
+ if (D->getInitializerKind() == OMPDeclareReductionInitKind::Direct)
Out << ")";
Out << ")";
}
@@ -1714,7 +1886,7 @@ void DeclPrinter::VisitOMPDeclareReductionDecl(OMPDeclareReductionDecl *D) {
void DeclPrinter::VisitOMPDeclareMapperDecl(OMPDeclareMapperDecl *D) {
if (!D->isInvalidDecl()) {
Out << "#pragma omp declare mapper (";
- D->printName(Out);
+ D->printName(Out, Policy);
Out << " : ";
D->getType().print(Out, Policy);
Out << " ";
@@ -1747,8 +1919,12 @@ void DeclPrinter::VisitTemplateTypeParmDecl(const TemplateTypeParmDecl *TTP) {
else if (TTP->getDeclName())
Out << ' ';
- if (TTP->getDeclName())
- Out << TTP->getDeclName();
+ if (TTP->getDeclName()) {
+ if (Policy.CleanUglifiedParameters && TTP->getIdentifier())
+ Out << TTP->getIdentifier()->deuglifiedName();
+ else
+ Out << TTP->getDeclName();
+ }
if (TTP->hasDefaultArgument()) {
Out << " = ";
@@ -1760,7 +1936,8 @@ void DeclPrinter::VisitNonTypeTemplateParmDecl(
const NonTypeTemplateParmDecl *NTTP) {
StringRef Name;
if (IdentifierInfo *II = NTTP->getIdentifier())
- Name = II->getName();
+ Name =
+ Policy.CleanUglifiedParameters ? II->deuglifiedName() : II->getName();
printDeclType(NTTP->getType(), Name, NTTP->isParameterPack());
if (NTTP->hasDefaultArgument()) {
diff --git a/contrib/llvm-project/clang/lib/AST/DeclTemplate.cpp b/contrib/llvm-project/clang/lib/AST/DeclTemplate.cpp
index ec8b00a9eb7d..7d7556e670f9 100755
--- a/contrib/llvm-project/clang/lib/AST/DeclTemplate.cpp
+++ b/contrib/llvm-project/clang/lib/AST/DeclTemplate.cpp
@@ -26,8 +26,8 @@
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/FoldingSet.h"
-#include "llvm/ADT/None.h"
#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
@@ -35,6 +35,7 @@
#include <cassert>
#include <cstdint>
#include <memory>
+#include <optional>
#include <utility>
using namespace clang;
@@ -77,7 +78,7 @@ TemplateParameterList::TemplateParameterList(const ASTContext& C,
if (TTP->hasTypeConstraint())
HasConstrainedParameters = true;
} else {
- llvm_unreachable("unexpcted template parameter type");
+ llvm_unreachable("unexpected template parameter type");
}
// FIXME: If a default argument contains an unexpanded parameter pack, the
// template parameter list does too.
@@ -126,11 +127,44 @@ TemplateParameterList::Create(const ASTContext &C, SourceLocation TemplateLoc,
RAngleLoc, RequiresClause);
}
+void TemplateParameterList::Profile(llvm::FoldingSetNodeID &ID,
+ const ASTContext &C) const {
+ const Expr *RC = getRequiresClause();
+ ID.AddBoolean(RC != nullptr);
+ if (RC)
+ RC->Profile(ID, C, /*Canonical=*/true);
+ ID.AddInteger(size());
+ for (NamedDecl *D : *this) {
+ if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(D)) {
+ ID.AddInteger(0);
+ ID.AddBoolean(NTTP->isParameterPack());
+ NTTP->getType().getCanonicalType().Profile(ID);
+ ID.AddBoolean(NTTP->hasPlaceholderTypeConstraint());
+ if (const Expr *E = NTTP->getPlaceholderTypeConstraint())
+ E->Profile(ID, C, /*Canonical=*/true);
+ continue;
+ }
+ if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(D)) {
+ ID.AddInteger(1);
+ ID.AddBoolean(TTP->isParameterPack());
+ ID.AddBoolean(TTP->hasTypeConstraint());
+ if (const TypeConstraint *TC = TTP->getTypeConstraint())
+ TC->getImmediatelyDeclaredConstraint()->Profile(ID, C,
+ /*Canonical=*/true);
+ continue;
+ }
+ const auto *TTP = cast<TemplateTemplateParmDecl>(D);
+ ID.AddInteger(2);
+ ID.AddBoolean(TTP->isParameterPack());
+ TTP->getTemplateParameters()->Profile(ID, C);
+ }
+}
+
unsigned TemplateParameterList::getMinRequiredArguments() const {
unsigned NumRequiredArgs = 0;
for (const NamedDecl *P : asArray()) {
if (P->isTemplateParameterPack()) {
- if (Optional<unsigned> Expansions = getExpandedPackSize(P)) {
+ if (std::optional<unsigned> Expansions = getExpandedPackSize(P)) {
NumRequiredArgs += *Expansions;
continue;
}
@@ -165,14 +199,20 @@ unsigned TemplateParameterList::getDepth() const {
return cast<TemplateTemplateParmDecl>(FirstParm)->getDepth();
}
-static void AdoptTemplateParameterList(TemplateParameterList *Params,
+static bool AdoptTemplateParameterList(TemplateParameterList *Params,
DeclContext *Owner) {
+ bool Invalid = false;
for (NamedDecl *P : *Params) {
P->setDeclContext(Owner);
if (const auto *TTP = dyn_cast<TemplateTemplateParmDecl>(P))
- AdoptTemplateParameterList(TTP->getTemplateParameters(), Owner);
+ if (AdoptTemplateParameterList(TTP->getTemplateParameters(), Owner))
+ Invalid = true;
+
+ if (P->isInvalidDecl())
+ Invalid = true;
}
+ return Invalid;
}
void TemplateParameterList::
@@ -196,8 +236,9 @@ bool TemplateParameterList::hasAssociatedConstraints() const {
}
bool TemplateParameterList::shouldIncludeTypeForArgument(
- const TemplateParameterList *TPL, unsigned Idx) {
- if (!TPL || Idx >= TPL->size())
+ const PrintingPolicy &Policy, const TemplateParameterList *TPL,
+ unsigned Idx) {
+ if (!TPL || Idx >= TPL->size() || Policy.AlwaysIncludeTypeForTemplateArgument)
return true;
const NamedDecl *TemplParam = TPL->getParam(Idx);
if (const auto *ParamValueDecl =
@@ -242,6 +283,16 @@ bool TemplateDecl::hasAssociatedConstraints() const {
return false;
}
+bool TemplateDecl::isTypeAlias() const {
+ switch (getKind()) {
+ case TemplateDecl::TypeAliasTemplate:
+ case TemplateDecl::BuiltinTemplate:
+ return true;
+ default:
+ return false;
+ };
+}
+
//===----------------------------------------------------------------------===//
// RedeclarableTemplateDecl Implementation
//===----------------------------------------------------------------------===//
@@ -335,18 +386,35 @@ void RedeclarableTemplateDecl::addSpecializationImpl(
SETraits::getDecl(Entry));
}
+ArrayRef<TemplateArgument> RedeclarableTemplateDecl::getInjectedTemplateArgs() {
+ TemplateParameterList *Params = getTemplateParameters();
+ auto *CommonPtr = getCommonPtr();
+ if (!CommonPtr->InjectedArgs) {
+ auto &Context = getASTContext();
+ SmallVector<TemplateArgument, 16> TemplateArgs;
+ Context.getInjectedTemplateArgs(Params, TemplateArgs);
+ CommonPtr->InjectedArgs =
+ new (Context) TemplateArgument[TemplateArgs.size()];
+ std::copy(TemplateArgs.begin(), TemplateArgs.end(),
+ CommonPtr->InjectedArgs);
+ }
+
+ return llvm::ArrayRef(CommonPtr->InjectedArgs, Params->size());
+}
+
//===----------------------------------------------------------------------===//
// FunctionTemplateDecl Implementation
//===----------------------------------------------------------------------===//
-FunctionTemplateDecl *FunctionTemplateDecl::Create(ASTContext &C,
- DeclContext *DC,
- SourceLocation L,
- DeclarationName Name,
- TemplateParameterList *Params,
- NamedDecl *Decl) {
- AdoptTemplateParameterList(Params, cast<DeclContext>(Decl));
- return new (C, DC) FunctionTemplateDecl(C, DC, L, Name, Params, Decl);
+FunctionTemplateDecl *
+FunctionTemplateDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L,
+ DeclarationName Name,
+ TemplateParameterList *Params, NamedDecl *Decl) {
+ bool Invalid = AdoptTemplateParameterList(Params, cast<DeclContext>(Decl));
+ auto *TD = new (C, DC) FunctionTemplateDecl(C, DC, L, Name, Params, Decl);
+ if (Invalid)
+ TD->setInvalidDecl();
+ return TD;
}
FunctionTemplateDecl *FunctionTemplateDecl::CreateDeserialized(ASTContext &C,
@@ -384,22 +452,6 @@ void FunctionTemplateDecl::addSpecialization(
InsertPos);
}
-ArrayRef<TemplateArgument> FunctionTemplateDecl::getInjectedTemplateArgs() {
- TemplateParameterList *Params = getTemplateParameters();
- Common *CommonPtr = getCommonPtr();
- if (!CommonPtr->InjectedArgs) {
- auto &Context = getASTContext();
- SmallVector<TemplateArgument, 16> TemplateArgs;
- Context.getInjectedTemplateArgs(Params, TemplateArgs);
- CommonPtr->InjectedArgs =
- new (Context) TemplateArgument[TemplateArgs.size()];
- std::copy(TemplateArgs.begin(), TemplateArgs.end(),
- CommonPtr->InjectedArgs);
- }
-
- return llvm::makeArrayRef(CommonPtr->InjectedArgs, Params->size());
-}
-
void FunctionTemplateDecl::mergePrevDecl(FunctionTemplateDecl *Prev) {
using Base = RedeclarableTemplateDecl;
@@ -438,15 +490,16 @@ void FunctionTemplateDecl::mergePrevDecl(FunctionTemplateDecl *Prev) {
// ClassTemplateDecl Implementation
//===----------------------------------------------------------------------===//
-ClassTemplateDecl *ClassTemplateDecl::Create(ASTContext &C,
- DeclContext *DC,
+ClassTemplateDecl *ClassTemplateDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation L,
DeclarationName Name,
TemplateParameterList *Params,
NamedDecl *Decl) {
- AdoptTemplateParameterList(Params, cast<DeclContext>(Decl));
-
- return new (C, DC) ClassTemplateDecl(C, DC, L, Name, Params, Decl);
+ bool Invalid = AdoptTemplateParameterList(Params, cast<DeclContext>(Decl));
+ auto *TD = new (C, DC) ClassTemplateDecl(C, DC, L, Name, Params, Decl);
+ if (Invalid)
+ TD->setInvalidDecl();
+ return TD;
}
ClassTemplateDecl *ClassTemplateDecl::CreateDeserialized(ASTContext &C,
@@ -497,44 +550,13 @@ ClassTemplateDecl::findPartialSpecialization(
TPL);
}
-static void ProfileTemplateParameterList(ASTContext &C,
- llvm::FoldingSetNodeID &ID, const TemplateParameterList *TPL) {
- const Expr *RC = TPL->getRequiresClause();
- ID.AddBoolean(RC != nullptr);
- if (RC)
- RC->Profile(ID, C, /*Canonical=*/true);
- ID.AddInteger(TPL->size());
- for (NamedDecl *D : *TPL) {
- if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(D)) {
- ID.AddInteger(0);
- ID.AddBoolean(NTTP->isParameterPack());
- NTTP->getType().getCanonicalType().Profile(ID);
- continue;
- }
- if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(D)) {
- ID.AddInteger(1);
- ID.AddBoolean(TTP->isParameterPack());
- ID.AddBoolean(TTP->hasTypeConstraint());
- if (const TypeConstraint *TC = TTP->getTypeConstraint())
- TC->getImmediatelyDeclaredConstraint()->Profile(ID, C,
- /*Canonical=*/true);
- continue;
- }
- const auto *TTP = cast<TemplateTemplateParmDecl>(D);
- ID.AddInteger(2);
- ID.AddBoolean(TTP->isParameterPack());
- ProfileTemplateParameterList(C, ID, TTP->getTemplateParameters());
- }
-}
-
-void
-ClassTemplatePartialSpecializationDecl::Profile(llvm::FoldingSetNodeID &ID,
- ArrayRef<TemplateArgument> TemplateArgs, TemplateParameterList *TPL,
- ASTContext &Context) {
+void ClassTemplatePartialSpecializationDecl::Profile(
+ llvm::FoldingSetNodeID &ID, ArrayRef<TemplateArgument> TemplateArgs,
+ TemplateParameterList *TPL, const ASTContext &Context) {
ID.AddInteger(TemplateArgs.size());
for (const TemplateArgument &TemplateArg : TemplateArgs)
TemplateArg.Profile(ID, Context);
- ProfileTemplateParameterList(Context, ID, TPL);
+ TPL->Profile(ID, Context);
}
void ClassTemplateDecl::AddPartialSpecialization(
@@ -614,13 +636,11 @@ ClassTemplateDecl::getInjectedClassNameSpecialization() {
// TemplateTypeParm Allocation/Deallocation Method Implementations
//===----------------------------------------------------------------------===//
-TemplateTypeParmDecl *
-TemplateTypeParmDecl::Create(const ASTContext &C, DeclContext *DC,
- SourceLocation KeyLoc, SourceLocation NameLoc,
- unsigned D, unsigned P, IdentifierInfo *Id,
- bool Typename, bool ParameterPack,
- bool HasTypeConstraint,
- Optional<unsigned> NumExpanded) {
+TemplateTypeParmDecl *TemplateTypeParmDecl::Create(
+ const ASTContext &C, DeclContext *DC, SourceLocation KeyLoc,
+ SourceLocation NameLoc, unsigned D, unsigned P, IdentifierInfo *Id,
+ bool Typename, bool ParameterPack, bool HasTypeConstraint,
+ std::optional<unsigned> NumExpanded) {
auto *TTPDecl =
new (C, DC,
additionalSizeToAlloc<TypeConstraint>(HasTypeConstraint ? 1 : 0))
@@ -633,9 +653,9 @@ TemplateTypeParmDecl::Create(const ASTContext &C, DeclContext *DC,
TemplateTypeParmDecl *
TemplateTypeParmDecl::CreateDeserialized(const ASTContext &C, unsigned ID) {
- return new (C, ID) TemplateTypeParmDecl(nullptr, SourceLocation(),
- SourceLocation(), nullptr, false,
- false, None);
+ return new (C, ID)
+ TemplateTypeParmDecl(nullptr, SourceLocation(), SourceLocation(), nullptr,
+ false, false, std::nullopt);
}
TemplateTypeParmDecl *
@@ -643,8 +663,8 @@ TemplateTypeParmDecl::CreateDeserialized(const ASTContext &C, unsigned ID,
bool HasTypeConstraint) {
return new (C, ID,
additionalSizeToAlloc<TypeConstraint>(HasTypeConstraint ? 1 : 0))
- TemplateTypeParmDecl(nullptr, SourceLocation(), SourceLocation(),
- nullptr, false, HasTypeConstraint, None);
+ TemplateTypeParmDecl(nullptr, SourceLocation(), SourceLocation(), nullptr,
+ false, HasTypeConstraint, std::nullopt);
}
SourceLocation TemplateTypeParmDecl::getDefaultArgumentLoc() const {
@@ -677,17 +697,15 @@ bool TemplateTypeParmDecl::isParameterPack() const {
return getTypeForDecl()->castAs<TemplateTypeParmType>()->isParameterPack();
}
-void TemplateTypeParmDecl::setTypeConstraint(NestedNameSpecifierLoc NNS,
- DeclarationNameInfo NameInfo, NamedDecl *FoundDecl, ConceptDecl *CD,
- const ASTTemplateArgumentListInfo *ArgsAsWritten,
- Expr *ImmediatelyDeclaredConstraint) {
+void TemplateTypeParmDecl::setTypeConstraint(
+ ConceptReference *Loc, Expr *ImmediatelyDeclaredConstraint) {
assert(HasTypeConstraint &&
"HasTypeConstraint=true must be passed at construction in order to "
"call setTypeConstraint");
assert(!TypeConstraintInitialized &&
"TypeConstraint was already initialized!");
- new (getTrailingObjects<TypeConstraint>()) TypeConstraint(NNS, NameInfo,
- FoundDecl, CD, ArgsAsWritten, ImmediatelyDeclaredConstraint);
+ new (getTrailingObjects<TypeConstraint>())
+ TypeConstraint(Loc, ImmediatelyDeclaredConstraint);
TypeConstraintInitialized = true;
}
@@ -758,12 +776,12 @@ NonTypeTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID,
unsigned NumExpandedTypes,
bool HasTypeConstraint) {
auto *NTTP =
- new (C, ID, additionalSizeToAlloc<std::pair<QualType, TypeSourceInfo *>,
- Expr *>(
- NumExpandedTypes, HasTypeConstraint ? 1 : 0))
+ new (C, ID,
+ additionalSizeToAlloc<std::pair<QualType, TypeSourceInfo *>, Expr *>(
+ NumExpandedTypes, HasTypeConstraint ? 1 : 0))
NonTypeTemplateParmDecl(nullptr, SourceLocation(), SourceLocation(),
- 0, 0, nullptr, QualType(), nullptr, None,
- None);
+ 0, 0, nullptr, QualType(), nullptr,
+ std::nullopt, std::nullopt);
NTTP->NumExpandedTypes = NumExpandedTypes;
return NTTP;
}
@@ -831,7 +849,7 @@ TemplateTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID,
auto *TTP =
new (C, ID, additionalSizeToAlloc<TemplateParameterList *>(NumExpansions))
TemplateTemplateParmDecl(nullptr, SourceLocation(), 0, 0, nullptr,
- nullptr, None);
+ nullptr, std::nullopt);
TTP->NumExpandedParams = NumExpansions;
return TTP;
}
@@ -902,7 +920,7 @@ ClassTemplateSpecializationDecl(ASTContext &Context, Kind DK, TagKind TK,
ClassTemplateSpecializationDecl::ClassTemplateSpecializationDecl(ASTContext &C,
Kind DK)
- : CXXRecordDecl(DK, TTK_Struct, C, nullptr, SourceLocation(),
+ : CXXRecordDecl(DK, TagTypeKind::Struct, C, nullptr, SourceLocation(),
SourceLocation(), nullptr, nullptr),
SpecializationKind(TSK_Undeclared) {}
@@ -920,6 +938,14 @@ ClassTemplateSpecializationDecl::Create(ASTContext &Context, TagKind TK,
SpecializedTemplate, Args, PrevDecl);
Result->setMayHaveOutOfDateDef(false);
+ // If the template decl is incomplete, copy the external lexical storage from
+ // the base template. This allows instantiations of incomplete types to
+ // complete using the external AST if the template's declaration came from an
+ // external AST.
+ if (!SpecializedTemplate->getTemplatedDecl()->isCompleteDefinition())
+ Result->setHasExternalLexicalStorage(
+ SpecializedTemplate->getTemplatedDecl()->hasExternalLexicalStorage());
+
Context.getTypeDeclType(Result, PrevDecl);
return Result;
}
@@ -1005,8 +1031,11 @@ ConceptDecl *ConceptDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation L, DeclarationName Name,
TemplateParameterList *Params,
Expr *ConstraintExpr) {
- AdoptTemplateParameterList(Params, DC);
- return new (C, DC) ConceptDecl(DC, L, Name, Params, ConstraintExpr);
+ bool Invalid = AdoptTemplateParameterList(Params, DC);
+ auto *TD = new (C, DC) ConceptDecl(DC, L, Name, Params, ConstraintExpr);
+ if (Invalid)
+ TD->setInvalidDecl();
+ return TD;
}
ConceptDecl *ConceptDecl::CreateDeserialized(ASTContext &C,
@@ -1019,6 +1048,44 @@ ConceptDecl *ConceptDecl::CreateDeserialized(ASTContext &C,
}
//===----------------------------------------------------------------------===//
+// ImplicitConceptSpecializationDecl Implementation
+//===----------------------------------------------------------------------===//
+ImplicitConceptSpecializationDecl::ImplicitConceptSpecializationDecl(
+ DeclContext *DC, SourceLocation SL,
+ ArrayRef<TemplateArgument> ConvertedArgs)
+ : Decl(ImplicitConceptSpecialization, DC, SL),
+ NumTemplateArgs(ConvertedArgs.size()) {
+ setTemplateArguments(ConvertedArgs);
+}
+
+ImplicitConceptSpecializationDecl::ImplicitConceptSpecializationDecl(
+ EmptyShell Empty, unsigned NumTemplateArgs)
+ : Decl(ImplicitConceptSpecialization, Empty),
+ NumTemplateArgs(NumTemplateArgs) {}
+
+ImplicitConceptSpecializationDecl *ImplicitConceptSpecializationDecl::Create(
+ const ASTContext &C, DeclContext *DC, SourceLocation SL,
+ ArrayRef<TemplateArgument> ConvertedArgs) {
+ return new (C, DC,
+ additionalSizeToAlloc<TemplateArgument>(ConvertedArgs.size()))
+ ImplicitConceptSpecializationDecl(DC, SL, ConvertedArgs);
+}
+
+ImplicitConceptSpecializationDecl *
+ImplicitConceptSpecializationDecl::CreateDeserialized(
+ const ASTContext &C, unsigned ID, unsigned NumTemplateArgs) {
+ return new (C, ID, additionalSizeToAlloc<TemplateArgument>(NumTemplateArgs))
+ ImplicitConceptSpecializationDecl(EmptyShell{}, NumTemplateArgs);
+}
+
+void ImplicitConceptSpecializationDecl::setTemplateArguments(
+ ArrayRef<TemplateArgument> Converted) {
+ assert(Converted.size() == NumTemplateArgs);
+ std::uninitialized_copy(Converted.begin(), Converted.end(),
+ getTrailingObjects<TemplateArgument>());
+}
+
+//===----------------------------------------------------------------------===//
// ClassTemplatePartialSpecializationDecl Implementation
//===----------------------------------------------------------------------===//
void ClassTemplatePartialSpecializationDecl::anchor() {}
@@ -1039,7 +1106,8 @@ ClassTemplatePartialSpecializationDecl(ASTContext &Context, TagKind TK,
SpecializedTemplate, Args, PrevDecl),
TemplateParams(Params), ArgsAsWritten(ArgInfos),
InstantiatedFromMember(nullptr, false) {
- AdoptTemplateParameterList(Params, this);
+ if (AdoptTemplateParameterList(Params, this))
+ setInvalidDecl();
}
ClassTemplatePartialSpecializationDecl *
@@ -1085,7 +1153,13 @@ FriendTemplateDecl::Create(ASTContext &Context, DeclContext *DC,
SourceLocation L,
MutableArrayRef<TemplateParameterList *> Params,
FriendUnion Friend, SourceLocation FLoc) {
- return new (Context, DC) FriendTemplateDecl(DC, L, Params, Friend, FLoc);
+ TemplateParameterList **TPL = nullptr;
+ if (!Params.empty()) {
+ TPL = new (Context) TemplateParameterList *[Params.size()];
+ llvm::copy(Params, TPL);
+ }
+ return new (Context, DC)
+ FriendTemplateDecl(DC, L, TPL, Params.size(), Friend, FLoc);
}
FriendTemplateDecl *FriendTemplateDecl::CreateDeserialized(ASTContext &C,
@@ -1097,14 +1171,15 @@ FriendTemplateDecl *FriendTemplateDecl::CreateDeserialized(ASTContext &C,
// TypeAliasTemplateDecl Implementation
//===----------------------------------------------------------------------===//
-TypeAliasTemplateDecl *TypeAliasTemplateDecl::Create(ASTContext &C,
- DeclContext *DC,
- SourceLocation L,
- DeclarationName Name,
- TemplateParameterList *Params,
- NamedDecl *Decl) {
- AdoptTemplateParameterList(Params, DC);
- return new (C, DC) TypeAliasTemplateDecl(C, DC, L, Name, Params, Decl);
+TypeAliasTemplateDecl *
+TypeAliasTemplateDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L,
+ DeclarationName Name,
+ TemplateParameterList *Params, NamedDecl *Decl) {
+ bool Invalid = AdoptTemplateParameterList(Params, DC);
+ auto *TD = new (C, DC) TypeAliasTemplateDecl(C, DC, L, Name, Params, Decl);
+ if (Invalid)
+ TD->setInvalidDecl();
+ return TD;
}
TypeAliasTemplateDecl *TypeAliasTemplateDecl::CreateDeserialized(ASTContext &C,
@@ -1121,19 +1196,6 @@ TypeAliasTemplateDecl::newCommon(ASTContext &C) const {
}
//===----------------------------------------------------------------------===//
-// ClassScopeFunctionSpecializationDecl Implementation
-//===----------------------------------------------------------------------===//
-
-void ClassScopeFunctionSpecializationDecl::anchor() {}
-
-ClassScopeFunctionSpecializationDecl *
-ClassScopeFunctionSpecializationDecl::CreateDeserialized(ASTContext &C,
- unsigned ID) {
- return new (C, ID) ClassScopeFunctionSpecializationDecl(
- nullptr, SourceLocation(), nullptr, nullptr);
-}
-
-//===----------------------------------------------------------------------===//
// VarTemplateDecl Implementation
//===----------------------------------------------------------------------===//
@@ -1151,8 +1213,11 @@ VarTemplateDecl *VarTemplateDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation L, DeclarationName Name,
TemplateParameterList *Params,
VarDecl *Decl) {
- AdoptTemplateParameterList(Params, DC);
- return new (C, DC) VarTemplateDecl(C, DC, L, Name, Params, Decl);
+ bool Invalid = AdoptTemplateParameterList(Params, DC);
+ auto *TD = new (C, DC) VarTemplateDecl(C, DC, L, Name, Params, Decl);
+ if (Invalid)
+ TD->setInvalidDecl();
+ return TD;
}
VarTemplateDecl *VarTemplateDecl::CreateDeserialized(ASTContext &C,
@@ -1202,14 +1267,13 @@ VarTemplateDecl::findPartialSpecialization(ArrayRef<TemplateArgument> Args,
TPL);
}
-void
-VarTemplatePartialSpecializationDecl::Profile(llvm::FoldingSetNodeID &ID,
- ArrayRef<TemplateArgument> TemplateArgs, TemplateParameterList *TPL,
- ASTContext &Context) {
+void VarTemplatePartialSpecializationDecl::Profile(
+ llvm::FoldingSetNodeID &ID, ArrayRef<TemplateArgument> TemplateArgs,
+ TemplateParameterList *TPL, const ASTContext &Context) {
ID.AddInteger(TemplateArgs.size());
for (const TemplateArgument &TemplateArg : TemplateArgs)
TemplateArg.Profile(ID, Context);
- ProfileTemplateParameterList(Context, ID, TPL);
+ TPL->Profile(ID, Context);
}
void VarTemplateDecl::AddPartialSpecialization(
@@ -1311,12 +1375,25 @@ VarTemplateDecl *VarTemplateSpecializationDecl::getSpecializedTemplate() const {
void VarTemplateSpecializationDecl::setTemplateArgsInfo(
const TemplateArgumentListInfo &ArgsInfo) {
- TemplateArgsInfo.setLAngleLoc(ArgsInfo.getLAngleLoc());
- TemplateArgsInfo.setRAngleLoc(ArgsInfo.getRAngleLoc());
- for (const TemplateArgumentLoc &Loc : ArgsInfo.arguments())
- TemplateArgsInfo.addArgument(Loc);
+ TemplateArgsInfo =
+ ASTTemplateArgumentListInfo::Create(getASTContext(), ArgsInfo);
+}
+
+void VarTemplateSpecializationDecl::setTemplateArgsInfo(
+ const ASTTemplateArgumentListInfo *ArgsInfo) {
+ TemplateArgsInfo =
+ ASTTemplateArgumentListInfo::Create(getASTContext(), ArgsInfo);
}
+SourceRange VarTemplateSpecializationDecl::getSourceRange() const {
+ if (isExplicitSpecialization() && !hasInit()) {
+ if (const ASTTemplateArgumentListInfo *Info = getTemplateArgsInfo())
+ return SourceRange(getOuterLocStart(), Info->getRAngleLoc());
+ }
+ return VarDecl::getSourceRange();
+}
+
+
//===----------------------------------------------------------------------===//
// VarTemplatePartialSpecializationDecl Implementation
//===----------------------------------------------------------------------===//
@@ -1334,8 +1411,8 @@ VarTemplatePartialSpecializationDecl::VarTemplatePartialSpecializationDecl(
TInfo, S, Args),
TemplateParams(Params), ArgsAsWritten(ArgInfos),
InstantiatedFromMember(nullptr, false) {
- // TODO: The template parameters should be in DC by now. Verify.
- // AdoptTemplateParameterList(Params, DC);
+ if (AdoptTemplateParameterList(Params, DC))
+ setInvalidDecl();
}
VarTemplatePartialSpecializationDecl *
@@ -1362,6 +1439,14 @@ VarTemplatePartialSpecializationDecl::CreateDeserialized(ASTContext &C,
return new (C, ID) VarTemplatePartialSpecializationDecl(C);
}
+SourceRange VarTemplatePartialSpecializationDecl::getSourceRange() const {
+ if (isExplicitSpecialization() && !hasInit()) {
+ if (const ASTTemplateArgumentListInfo *Info = getTemplateArgsAsWritten())
+ return SourceRange(getOuterLocStart(), Info->getRAngleLoc());
+ }
+ return VarDecl::getSourceRange();
+}
+
static TemplateParameterList *
createMakeIntegerSeqParameterList(const ASTContext &C, DeclContext *DC) {
// typename T
@@ -1429,8 +1514,8 @@ createTypePackElementParameterList(const ASTContext &C, DeclContext *DC) {
// template <std::size_t Index, typename ...T>
NamedDecl *Params[] = {Index, Ts};
return TemplateParameterList::Create(C, SourceLocation(), SourceLocation(),
- llvm::makeArrayRef(Params),
- SourceLocation(), nullptr);
+ llvm::ArrayRef(Params), SourceLocation(),
+ nullptr);
}
static TemplateParameterList *createBuiltinTemplateParameterList(
@@ -1454,19 +1539,6 @@ BuiltinTemplateDecl::BuiltinTemplateDecl(const ASTContext &C, DeclContext *DC,
createBuiltinTemplateParameterList(C, DC, BTK)),
BTK(BTK) {}
-void TypeConstraint::print(llvm::raw_ostream &OS, PrintingPolicy Policy) const {
- if (NestedNameSpec)
- NestedNameSpec.getNestedNameSpecifier()->print(OS, Policy);
- ConceptName.printName(OS, Policy);
- if (hasExplicitTemplateArgs()) {
- OS << "<";
- // FIXME: Find corresponding parameter for argument
- for (auto &ArgLoc : ArgsAsWritten->arguments())
- ArgLoc.getArgument().print(Policy, OS, /*IncludeType*/ false);
- OS << ">";
- }
-}
-
TemplateParamObjectDecl *TemplateParamObjectDecl::Create(const ASTContext &C,
QualType T,
const APValue &V) {
@@ -1483,19 +1555,81 @@ TemplateParamObjectDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
return TPOD;
}
-void TemplateParamObjectDecl::printName(llvm::raw_ostream &OS) const {
+void TemplateParamObjectDecl::printName(llvm::raw_ostream &OS,
+ const PrintingPolicy &Policy) const {
OS << "<template param ";
- printAsExpr(OS);
+ printAsExpr(OS, Policy);
OS << ">";
}
void TemplateParamObjectDecl::printAsExpr(llvm::raw_ostream &OS) const {
- const ASTContext &Ctx = getASTContext();
- getType().getUnqualifiedType().print(OS, Ctx.getPrintingPolicy());
- printAsInit(OS);
+ printAsExpr(OS, getASTContext().getPrintingPolicy());
+}
+
+void TemplateParamObjectDecl::printAsExpr(llvm::raw_ostream &OS,
+ const PrintingPolicy &Policy) const {
+ getType().getUnqualifiedType().print(OS, Policy);
+ printAsInit(OS, Policy);
}
void TemplateParamObjectDecl::printAsInit(llvm::raw_ostream &OS) const {
- const ASTContext &Ctx = getASTContext();
- getValue().printPretty(OS, Ctx, getType());
+ printAsInit(OS, getASTContext().getPrintingPolicy());
+}
+
+void TemplateParamObjectDecl::printAsInit(llvm::raw_ostream &OS,
+ const PrintingPolicy &Policy) const {
+ getValue().printPretty(OS, Policy, getType(), &getASTContext());
+}
+
+TemplateParameterList *clang::getReplacedTemplateParameterList(Decl *D) {
+ switch (D->getKind()) {
+ case Decl::Kind::ClassTemplate:
+ return cast<ClassTemplateDecl>(D)->getTemplateParameters();
+ case Decl::Kind::ClassTemplateSpecialization: {
+ const auto *CTSD = cast<ClassTemplateSpecializationDecl>(D);
+ auto P = CTSD->getSpecializedTemplateOrPartial();
+ if (const auto *CTPSD =
+ P.dyn_cast<ClassTemplatePartialSpecializationDecl *>())
+ return CTPSD->getTemplateParameters();
+ return cast<ClassTemplateDecl *>(P)->getTemplateParameters();
+ }
+ case Decl::Kind::ClassTemplatePartialSpecialization:
+ return cast<ClassTemplatePartialSpecializationDecl>(D)
+ ->getTemplateParameters();
+ case Decl::Kind::TypeAliasTemplate:
+ return cast<TypeAliasTemplateDecl>(D)->getTemplateParameters();
+ case Decl::Kind::BuiltinTemplate:
+ return cast<BuiltinTemplateDecl>(D)->getTemplateParameters();
+ case Decl::Kind::CXXDeductionGuide:
+ case Decl::Kind::CXXConversion:
+ case Decl::Kind::CXXConstructor:
+ case Decl::Kind::CXXDestructor:
+ case Decl::Kind::CXXMethod:
+ case Decl::Kind::Function:
+ return cast<FunctionDecl>(D)
+ ->getTemplateSpecializationInfo()
+ ->getTemplate()
+ ->getTemplateParameters();
+ case Decl::Kind::FunctionTemplate:
+ return cast<FunctionTemplateDecl>(D)->getTemplateParameters();
+ case Decl::Kind::VarTemplate:
+ return cast<VarTemplateDecl>(D)->getTemplateParameters();
+ case Decl::Kind::VarTemplateSpecialization: {
+ const auto *VTSD = cast<VarTemplateSpecializationDecl>(D);
+ auto P = VTSD->getSpecializedTemplateOrPartial();
+ if (const auto *VTPSD =
+ P.dyn_cast<VarTemplatePartialSpecializationDecl *>())
+ return VTPSD->getTemplateParameters();
+ return cast<VarTemplateDecl *>(P)->getTemplateParameters();
+ }
+ case Decl::Kind::VarTemplatePartialSpecialization:
+ return cast<VarTemplatePartialSpecializationDecl>(D)
+ ->getTemplateParameters();
+ case Decl::Kind::TemplateTemplateParm:
+ return cast<TemplateTemplateParmDecl>(D)->getTemplateParameters();
+ case Decl::Kind::Concept:
+ return cast<ConceptDecl>(D)->getTemplateParameters();
+ default:
+ llvm_unreachable("Unhandled templated declaration kind");
+ }
}
diff --git a/contrib/llvm-project/clang/lib/AST/DeclarationName.cpp b/contrib/llvm-project/clang/lib/AST/DeclarationName.cpp
index 56cf4b457a48..a3ac5551e0cc 100644
--- a/contrib/llvm-project/clang/lib/AST/DeclarationName.cpp
+++ b/contrib/llvm-project/clang/lib/AST/DeclarationName.cpp
@@ -72,15 +72,9 @@ int DeclarationName::compare(DeclarationName LHS, DeclarationName RHS) {
}
unsigned LN = LHSSelector.getNumArgs(), RN = RHSSelector.getNumArgs();
for (unsigned I = 0, N = std::min(LN, RN); I != N; ++I) {
- switch (LHSSelector.getNameForSlot(I).compare(
- RHSSelector.getNameForSlot(I))) {
- case -1:
- return -1;
- case 1:
- return 1;
- default:
- break;
- }
+ if (int Compare = LHSSelector.getNameForSlot(I).compare(
+ RHSSelector.getNameForSlot(I)))
+ return Compare;
}
return compareInt(LN, RN);
@@ -123,12 +117,12 @@ static void printCXXConstructorDestructorName(QualType ClassType,
Policy.adjustForCPlusPlus();
if (const RecordType *ClassRec = ClassType->getAs<RecordType>()) {
- OS << *ClassRec->getDecl();
+ ClassRec->getDecl()->printName(OS, Policy);
return;
}
if (Policy.SuppressTemplateArgsInCXXConstructors) {
if (auto *InjTy = ClassType->getAs<InjectedClassNameType>()) {
- OS << *InjTy->getDecl();
+ InjTy->getDecl()->printName(OS, Policy);
return;
}
}
@@ -236,7 +230,7 @@ std::string DeclarationName::getAsString() const {
std::string Result;
llvm::raw_string_ostream OS(Result);
OS << *this;
- return OS.str();
+ return Result;
}
void *DeclarationName::getFETokenInfoSlow() const {
@@ -371,7 +365,7 @@ DeclarationNameTable::getCXXSpecialName(DeclarationName::NameKind Kind,
}
DeclarationName
-DeclarationNameTable::getCXXLiteralOperatorName(IdentifierInfo *II) {
+DeclarationNameTable::getCXXLiteralOperatorName(const IdentifierInfo *II) {
llvm::FoldingSetNodeID ID;
ID.AddPointer(II);
@@ -460,7 +454,7 @@ std::string DeclarationNameInfo::getAsString() const {
std::string Result;
llvm::raw_string_ostream OS(Result);
OS << *this;
- return OS.str();
+ return Result;
}
raw_ostream &clang::operator<<(raw_ostream &OS, DeclarationNameInfo DNInfo) {
diff --git a/contrib/llvm-project/clang/lib/AST/Expr.cpp b/contrib/llvm-project/clang/lib/AST/Expr.cpp
index 11f10d4695fc..f1efa98e175e 100644
--- a/contrib/llvm-project/clang/lib/AST/Expr.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Expr.cpp
@@ -31,11 +31,13 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/Lex/Lexer.h"
#include "clang/Lex/LiteralSupport.h"
+#include "clang/Lex/Preprocessor.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cstring>
+#include <optional>
using namespace clang;
const Expr *Expr::getBestDynamicClassTypeExpr() const {
@@ -202,6 +204,42 @@ bool Expr::isKnownToHaveBooleanValue(bool Semantic) const {
return false;
}
+bool Expr::isFlexibleArrayMemberLike(
+ ASTContext &Ctx,
+ LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel,
+ bool IgnoreTemplateOrMacroSubstitution) const {
+ const Expr *E = IgnoreParens();
+ const Decl *D = nullptr;
+
+ if (const auto *ME = dyn_cast<MemberExpr>(E))
+ D = ME->getMemberDecl();
+ else if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
+ D = DRE->getDecl();
+ else if (const auto *IRE = dyn_cast<ObjCIvarRefExpr>(E))
+ D = IRE->getDecl();
+
+ return Decl::isFlexibleArrayMemberLike(Ctx, D, E->getType(),
+ StrictFlexArraysLevel,
+ IgnoreTemplateOrMacroSubstitution);
+}
+
+const ValueDecl *
+Expr::getAsBuiltinConstantDeclRef(const ASTContext &Context) const {
+ Expr::EvalResult Eval;
+
+ if (EvaluateAsConstantExpr(Eval, Context)) {
+ APValue &Value = Eval.Val;
+
+ if (Value.isMemberPointer())
+ return Value.getMemberPointerDecl();
+
+ if (Value.isLValue() && Value.getLValueOffset().isZero())
+ return Value.getLValueBase().dyn_cast<const ValueDecl *>();
+ }
+
+ return nullptr;
+}
+
// Amusing macro metaprogramming hack: check whether a class provides
// a more specific implementation of getExprLoc().
//
@@ -243,85 +281,86 @@ SourceLocation Expr::getExprLoc() const {
// Primary Expressions.
//===----------------------------------------------------------------------===//
-static void AssertResultStorageKind(ConstantExpr::ResultStorageKind Kind) {
- assert((Kind == ConstantExpr::RSK_APValue ||
- Kind == ConstantExpr::RSK_Int64 || Kind == ConstantExpr::RSK_None) &&
+static void AssertResultStorageKind(ConstantResultStorageKind Kind) {
+ assert((Kind == ConstantResultStorageKind::APValue ||
+ Kind == ConstantResultStorageKind::Int64 ||
+ Kind == ConstantResultStorageKind::None) &&
"Invalid StorageKind Value");
(void)Kind;
}
-ConstantExpr::ResultStorageKind
-ConstantExpr::getStorageKind(const APValue &Value) {
+ConstantResultStorageKind ConstantExpr::getStorageKind(const APValue &Value) {
switch (Value.getKind()) {
case APValue::None:
case APValue::Indeterminate:
- return ConstantExpr::RSK_None;
+ return ConstantResultStorageKind::None;
case APValue::Int:
if (!Value.getInt().needsCleanup())
- return ConstantExpr::RSK_Int64;
- LLVM_FALLTHROUGH;
+ return ConstantResultStorageKind::Int64;
+ [[fallthrough]];
default:
- return ConstantExpr::RSK_APValue;
+ return ConstantResultStorageKind::APValue;
}
}
-ConstantExpr::ResultStorageKind
+ConstantResultStorageKind
ConstantExpr::getStorageKind(const Type *T, const ASTContext &Context) {
if (T->isIntegralOrEnumerationType() && Context.getTypeInfo(T).Width <= 64)
- return ConstantExpr::RSK_Int64;
- return ConstantExpr::RSK_APValue;
+ return ConstantResultStorageKind::Int64;
+ return ConstantResultStorageKind::APValue;
}
-ConstantExpr::ConstantExpr(Expr *SubExpr, ResultStorageKind StorageKind,
+ConstantExpr::ConstantExpr(Expr *SubExpr, ConstantResultStorageKind StorageKind,
bool IsImmediateInvocation)
: FullExpr(ConstantExprClass, SubExpr) {
- ConstantExprBits.ResultKind = StorageKind;
+ ConstantExprBits.ResultKind = llvm::to_underlying(StorageKind);
ConstantExprBits.APValueKind = APValue::None;
ConstantExprBits.IsUnsigned = false;
ConstantExprBits.BitWidth = 0;
ConstantExprBits.HasCleanup = false;
ConstantExprBits.IsImmediateInvocation = IsImmediateInvocation;
- if (StorageKind == ConstantExpr::RSK_APValue)
+ if (StorageKind == ConstantResultStorageKind::APValue)
::new (getTrailingObjects<APValue>()) APValue();
}
ConstantExpr *ConstantExpr::Create(const ASTContext &Context, Expr *E,
- ResultStorageKind StorageKind,
+ ConstantResultStorageKind StorageKind,
bool IsImmediateInvocation) {
assert(!isa<ConstantExpr>(E));
AssertResultStorageKind(StorageKind);
unsigned Size = totalSizeToAlloc<APValue, uint64_t>(
- StorageKind == ConstantExpr::RSK_APValue,
- StorageKind == ConstantExpr::RSK_Int64);
+ StorageKind == ConstantResultStorageKind::APValue,
+ StorageKind == ConstantResultStorageKind::Int64);
void *Mem = Context.Allocate(Size, alignof(ConstantExpr));
return new (Mem) ConstantExpr(E, StorageKind, IsImmediateInvocation);
}
ConstantExpr *ConstantExpr::Create(const ASTContext &Context, Expr *E,
const APValue &Result) {
- ResultStorageKind StorageKind = getStorageKind(Result);
+ ConstantResultStorageKind StorageKind = getStorageKind(Result);
ConstantExpr *Self = Create(Context, E, StorageKind);
Self->SetResult(Result, Context);
return Self;
}
-ConstantExpr::ConstantExpr(EmptyShell Empty, ResultStorageKind StorageKind)
+ConstantExpr::ConstantExpr(EmptyShell Empty,
+ ConstantResultStorageKind StorageKind)
: FullExpr(ConstantExprClass, Empty) {
- ConstantExprBits.ResultKind = StorageKind;
+ ConstantExprBits.ResultKind = llvm::to_underlying(StorageKind);
- if (StorageKind == ConstantExpr::RSK_APValue)
+ if (StorageKind == ConstantResultStorageKind::APValue)
::new (getTrailingObjects<APValue>()) APValue();
}
ConstantExpr *ConstantExpr::CreateEmpty(const ASTContext &Context,
- ResultStorageKind StorageKind) {
+ ConstantResultStorageKind StorageKind) {
AssertResultStorageKind(StorageKind);
unsigned Size = totalSizeToAlloc<APValue, uint64_t>(
- StorageKind == ConstantExpr::RSK_APValue,
- StorageKind == ConstantExpr::RSK_Int64);
+ StorageKind == ConstantResultStorageKind::APValue,
+ StorageKind == ConstantResultStorageKind::Int64);
void *Mem = Context.Allocate(Size, alignof(ConstantExpr));
return new (Mem) ConstantExpr(EmptyShell(), StorageKind);
}
@@ -330,15 +369,15 @@ void ConstantExpr::MoveIntoResult(APValue &Value, const ASTContext &Context) {
assert((unsigned)getStorageKind(Value) <= ConstantExprBits.ResultKind &&
"Invalid storage for this value kind");
ConstantExprBits.APValueKind = Value.getKind();
- switch (ConstantExprBits.ResultKind) {
- case RSK_None:
+ switch (getResultStorageKind()) {
+ case ConstantResultStorageKind::None:
return;
- case RSK_Int64:
+ case ConstantResultStorageKind::Int64:
Int64Result() = *Value.getInt().getRawData();
ConstantExprBits.BitWidth = Value.getInt().getBitWidth();
ConstantExprBits.IsUnsigned = Value.getInt().isUnsigned();
return;
- case RSK_APValue:
+ case ConstantResultStorageKind::APValue:
if (!ConstantExprBits.HasCleanup && Value.needsCleanup()) {
ConstantExprBits.HasCleanup = true;
Context.addDestruction(&APValueResult());
@@ -350,10 +389,10 @@ void ConstantExpr::MoveIntoResult(APValue &Value, const ASTContext &Context) {
}
llvm::APSInt ConstantExpr::getResultAsAPSInt() const {
- switch (ConstantExprBits.ResultKind) {
- case ConstantExpr::RSK_APValue:
+ switch (getResultStorageKind()) {
+ case ConstantResultStorageKind::APValue:
return APValueResult().getInt();
- case ConstantExpr::RSK_Int64:
+ case ConstantResultStorageKind::Int64:
return llvm::APSInt(llvm::APInt(ConstantExprBits.BitWidth, Int64Result()),
ConstantExprBits.IsUnsigned);
default:
@@ -363,14 +402,14 @@ llvm::APSInt ConstantExpr::getResultAsAPSInt() const {
APValue ConstantExpr::getAPValueResult() const {
- switch (ConstantExprBits.ResultKind) {
- case ConstantExpr::RSK_APValue:
+ switch (getResultStorageKind()) {
+ case ConstantResultStorageKind::APValue:
return APValueResult();
- case ConstantExpr::RSK_Int64:
+ case ConstantResultStorageKind::Int64:
return APValue(
llvm::APSInt(llvm::APInt(ConstantExprBits.BitWidth, Int64Result()),
ConstantExprBits.IsUnsigned));
- case ConstantExpr::RSK_None:
+ case ConstantResultStorageKind::None:
if (ConstantExprBits.APValueKind == APValue::Indeterminate)
return APValue::IndeterminateValue();
return APValue();
@@ -390,7 +429,9 @@ DeclRefExpr::DeclRefExpr(const ASTContext &Ctx, ValueDecl *D,
DeclRefExprBits.HadMultipleCandidates = false;
DeclRefExprBits.RefersToEnclosingVariableOrCapture =
RefersToEnclosingVariableOrCapture;
+ DeclRefExprBits.CapturedByCopyInLambdaWithExplicitObjectParameter = false;
DeclRefExprBits.NonOdrUseReason = NOUR;
+ DeclRefExprBits.IsImmediateEscalating = false;
DeclRefExprBits.Loc = L;
setDependence(computeDependence(this, Ctx));
}
@@ -416,6 +457,7 @@ DeclRefExpr::DeclRefExpr(const ASTContext &Ctx,
= (TemplateArgs || TemplateKWLoc.isValid()) ? 1 : 0;
DeclRefExprBits.RefersToEnclosingVariableOrCapture =
RefersToEnclosingVariableOrCapture;
+ DeclRefExprBits.CapturedByCopyInLambdaWithExplicitObjectParameter = false;
DeclRefExprBits.NonOdrUseReason = NOUR;
if (TemplateArgs) {
auto Deps = TemplateArgumentDependence::None;
@@ -428,6 +470,7 @@ DeclRefExpr::DeclRefExpr(const ASTContext &Ctx,
getTrailingObjects<ASTTemplateKWAndArgsInfo>()->initializeFrom(
TemplateKWLoc);
}
+ DeclRefExprBits.IsImmediateEscalating = false;
DeclRefExprBits.HadMultipleCandidates = 0;
setDependence(computeDependence(this, Ctx));
}
@@ -544,40 +587,33 @@ std::string SYCLUniqueStableNameExpr::ComputeName(ASTContext &Context) const {
std::string SYCLUniqueStableNameExpr::ComputeName(ASTContext &Context,
QualType Ty) {
auto MangleCallback = [](ASTContext &Ctx,
- const NamedDecl *ND) -> llvm::Optional<unsigned> {
- // This replaces the 'lambda number' in the mangling with a unique number
- // based on its order in the declaration. To provide some level of visual
- // notability (actual uniqueness from normal lambdas isn't necessary, as
- // these are used differently), we add 10,000 to the number.
- // For example:
- // _ZTSZ3foovEUlvE10005_
- // Demangles to: typeinfo name for foo()::'lambda10005'()
- // Note that the mangler subtracts 2, since with normal lambdas the lambda
- // mangling number '0' is an anonymous struct mangle, and '1' is omitted.
- // So 10,002 results in the first number being 10,000.
- if (Ctx.IsSYCLKernelNamingDecl(ND))
- return 10'002 + Ctx.GetSYCLKernelNamingIndex(ND);
- return llvm::None;
+ const NamedDecl *ND) -> std::optional<unsigned> {
+ if (const auto *RD = dyn_cast<CXXRecordDecl>(ND))
+ return RD->getDeviceLambdaManglingNumber();
+ return std::nullopt;
};
+
std::unique_ptr<MangleContext> Ctx{ItaniumMangleContext::create(
Context, Context.getDiagnostics(), MangleCallback)};
std::string Buffer;
Buffer.reserve(128);
llvm::raw_string_ostream Out(Buffer);
- Ctx->mangleTypeName(Ty, Out);
+ Ctx->mangleCanonicalTypeName(Ty, Out);
return Out.str();
}
-PredefinedExpr::PredefinedExpr(SourceLocation L, QualType FNTy, IdentKind IK,
+PredefinedExpr::PredefinedExpr(SourceLocation L, QualType FNTy,
+ PredefinedIdentKind IK, bool IsTransparent,
StringLiteral *SL)
: Expr(PredefinedExprClass, FNTy, VK_LValue, OK_Ordinary) {
- PredefinedExprBits.Kind = IK;
+ PredefinedExprBits.Kind = llvm::to_underlying(IK);
assert((getIdentKind() == IK) &&
"IdentKind do not fit in PredefinedExprBitfields!");
bool HasFunctionName = SL != nullptr;
PredefinedExprBits.HasFunctionName = HasFunctionName;
+ PredefinedExprBits.IsTransparent = IsTransparent;
PredefinedExprBits.Loc = L;
if (HasFunctionName)
setFunctionName(SL);
@@ -590,12 +626,12 @@ PredefinedExpr::PredefinedExpr(EmptyShell Empty, bool HasFunctionName)
}
PredefinedExpr *PredefinedExpr::Create(const ASTContext &Ctx, SourceLocation L,
- QualType FNTy, IdentKind IK,
- StringLiteral *SL) {
+ QualType FNTy, PredefinedIdentKind IK,
+ bool IsTransparent, StringLiteral *SL) {
bool HasFunctionName = SL != nullptr;
void *Mem = Ctx.Allocate(totalSizeToAlloc<Stmt *>(HasFunctionName),
alignof(PredefinedExpr));
- return new (Mem) PredefinedExpr(L, FNTy, IK, SL);
+ return new (Mem) PredefinedExpr(L, FNTy, IK, IsTransparent, SL);
}
PredefinedExpr *PredefinedExpr::CreateEmpty(const ASTContext &Ctx,
@@ -605,23 +641,23 @@ PredefinedExpr *PredefinedExpr::CreateEmpty(const ASTContext &Ctx,
return new (Mem) PredefinedExpr(EmptyShell(), HasFunctionName);
}
-StringRef PredefinedExpr::getIdentKindName(PredefinedExpr::IdentKind IK) {
+StringRef PredefinedExpr::getIdentKindName(PredefinedIdentKind IK) {
switch (IK) {
- case Func:
+ case PredefinedIdentKind::Func:
return "__func__";
- case Function:
+ case PredefinedIdentKind::Function:
return "__FUNCTION__";
- case FuncDName:
+ case PredefinedIdentKind::FuncDName:
return "__FUNCDNAME__";
- case LFunction:
+ case PredefinedIdentKind::LFunction:
return "L__FUNCTION__";
- case PrettyFunction:
+ case PredefinedIdentKind::PrettyFunction:
return "__PRETTY_FUNCTION__";
- case FuncSig:
+ case PredefinedIdentKind::FuncSig:
return "__FUNCSIG__";
- case LFuncSig:
+ case PredefinedIdentKind::LFuncSig:
return "L__FUNCSIG__";
- case PrettyFunctionNoVirtual:
+ case PredefinedIdentKind::PrettyFunctionNoVirtual:
break;
}
llvm_unreachable("Unknown ident kind for PredefinedExpr");
@@ -629,10 +665,11 @@ StringRef PredefinedExpr::getIdentKindName(PredefinedExpr::IdentKind IK) {
// FIXME: Maybe this should use DeclPrinter with a special "print predefined
// expr" policy instead.
-std::string PredefinedExpr::ComputeName(IdentKind IK, const Decl *CurrentDecl) {
+std::string PredefinedExpr::ComputeName(PredefinedIdentKind IK,
+ const Decl *CurrentDecl) {
ASTContext &Context = CurrentDecl->getASTContext();
- if (IK == PredefinedExpr::FuncDName) {
+ if (IK == PredefinedIdentKind::FuncDName) {
if (const NamedDecl *ND = dyn_cast<NamedDecl>(CurrentDecl)) {
std::unique_ptr<MangleContext> MC;
MC.reset(Context.createMangleContext());
@@ -653,7 +690,7 @@ std::string PredefinedExpr::ComputeName(IdentKind IK, const Decl *CurrentDecl) {
if (!Buffer.empty() && Buffer.front() == '\01')
return std::string(Buffer.substr(1));
- return std::string(Buffer.str());
+ return std::string(Buffer);
}
return std::string(ND->getIdentifier()->getName());
}
@@ -677,21 +714,37 @@ std::string PredefinedExpr::ComputeName(IdentKind IK, const Decl *CurrentDecl) {
return std::string(Out.str());
}
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CurrentDecl)) {
- if (IK != PrettyFunction && IK != PrettyFunctionNoVirtual &&
- IK != FuncSig && IK != LFuncSig)
+ if (IK != PredefinedIdentKind::PrettyFunction &&
+ IK != PredefinedIdentKind::PrettyFunctionNoVirtual &&
+ IK != PredefinedIdentKind::FuncSig &&
+ IK != PredefinedIdentKind::LFuncSig)
return FD->getNameAsString();
SmallString<256> Name;
llvm::raw_svector_ostream Out(Name);
if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
- if (MD->isVirtual() && IK != PrettyFunctionNoVirtual)
+ if (MD->isVirtual() && IK != PredefinedIdentKind::PrettyFunctionNoVirtual)
Out << "virtual ";
if (MD->isStatic())
Out << "static ";
}
+ class PrettyCallbacks final : public PrintingCallbacks {
+ public:
+ PrettyCallbacks(const LangOptions &LO) : LO(LO) {}
+ std::string remapPath(StringRef Path) const override {
+ SmallString<128> p(Path);
+ LO.remapPathPrefix(p);
+ return std::string(p);
+ }
+
+ private:
+ const LangOptions &LO;
+ };
PrintingPolicy Policy(Context.getLangOpts());
+ PrettyCallbacks PrettyCB(Context.getLangOpts());
+ Policy.Callbacks = &PrettyCB;
std::string Proto;
llvm::raw_string_ostream POut(Proto);
@@ -703,7 +756,8 @@ std::string PredefinedExpr::ComputeName(IdentKind IK, const Decl *CurrentDecl) {
if (FD->hasWrittenPrototype())
FT = dyn_cast<FunctionProtoType>(AFT);
- if (IK == FuncSig || IK == LFuncSig) {
+ if (IK == PredefinedIdentKind::FuncSig ||
+ IK == PredefinedIdentKind::LFuncSig) {
switch (AFT->getCallConv()) {
case CC_C: POut << "__cdecl "; break;
case CC_X86StdCall: POut << "__stdcall "; break;
@@ -728,7 +782,8 @@ std::string PredefinedExpr::ComputeName(IdentKind IK, const Decl *CurrentDecl) {
if (FT->isVariadic()) {
if (FD->getNumParams()) POut << ", ";
POut << "...";
- } else if ((IK == FuncSig || IK == LFuncSig ||
+ } else if ((IK == PredefinedIdentKind::FuncSig ||
+ IK == PredefinedIdentKind::LFuncSig ||
!Context.getLangOpts().CPlusPlus) &&
!Decl->getNumParams()) {
POut << "void";
@@ -762,19 +817,18 @@ std::string PredefinedExpr::ComputeName(IdentKind IK, const Decl *CurrentDecl) {
std::string TemplateParams;
llvm::raw_string_ostream TOut(TemplateParams);
- for (SpecsTy::reverse_iterator I = Specs.rbegin(), E = Specs.rend();
- I != E; ++I) {
- const TemplateParameterList *Params
- = (*I)->getSpecializedTemplate()->getTemplateParameters();
- const TemplateArgumentList &Args = (*I)->getTemplateArgs();
+ for (const ClassTemplateSpecializationDecl *D : llvm::reverse(Specs)) {
+ const TemplateParameterList *Params =
+ D->getSpecializedTemplate()->getTemplateParameters();
+ const TemplateArgumentList &Args = D->getTemplateArgs();
assert(Params->size() == Args.size());
for (unsigned i = 0, numParams = Params->size(); i != numParams; ++i) {
StringRef Param = Params->getParam(i)->getName();
if (Param.empty()) continue;
TOut << Param << " = ";
- Args.get(i).print(
- Policy, TOut,
- TemplateParameterList::shouldIncludeTypeForArgument(Params, i));
+ Args.get(i).print(Policy, TOut,
+ TemplateParameterList::shouldIncludeTypeForArgument(
+ Policy, Params, i));
TOut << ", ";
}
}
@@ -854,7 +908,8 @@ std::string PredefinedExpr::ComputeName(IdentKind IK, const Decl *CurrentDecl) {
return std::string(Name);
}
- if (isa<TranslationUnitDecl>(CurrentDecl) && IK == PrettyFunction) {
+ if (isa<TranslationUnitDecl>(CurrentDecl) &&
+ IK == PredefinedIdentKind::PrettyFunction) {
// __PRETTY_FUNCTION__ -> "top level", the others produce an empty string.
return "top level";
}
@@ -931,67 +986,37 @@ std::string FixedPointLiteral::getValueAsString(unsigned Radix) const {
SmallString<64> S;
FixedPointValueToString(
S, llvm::APSInt::getUnsigned(getValue().getZExtValue()), Scale);
- return std::string(S.str());
+ return std::string(S);
}
-void CharacterLiteral::print(unsigned Val, CharacterKind Kind,
+void CharacterLiteral::print(unsigned Val, CharacterLiteralKind Kind,
raw_ostream &OS) {
switch (Kind) {
- case CharacterLiteral::Ascii:
+ case CharacterLiteralKind::Ascii:
break; // no prefix.
- case CharacterLiteral::Wide:
+ case CharacterLiteralKind::Wide:
OS << 'L';
break;
- case CharacterLiteral::UTF8:
+ case CharacterLiteralKind::UTF8:
OS << "u8";
break;
- case CharacterLiteral::UTF16:
+ case CharacterLiteralKind::UTF16:
OS << 'u';
break;
- case CharacterLiteral::UTF32:
+ case CharacterLiteralKind::UTF32:
OS << 'U';
break;
}
- switch (Val) {
- case '\\':
- OS << "'\\\\'";
- break;
- case '\'':
- OS << "'\\''";
- break;
- case '\a':
- // TODO: K&R: the meaning of '\\a' is different in traditional C
- OS << "'\\a'";
- break;
- case '\b':
- OS << "'\\b'";
- break;
- // Nonstandard escape sequence.
- /*case '\e':
- OS << "'\\e'";
- break;*/
- case '\f':
- OS << "'\\f'";
- break;
- case '\n':
- OS << "'\\n'";
- break;
- case '\r':
- OS << "'\\r'";
- break;
- case '\t':
- OS << "'\\t'";
- break;
- case '\v':
- OS << "'\\v'";
- break;
- default:
+ StringRef Escaped = escapeCStyle<EscapeChar::Single>(Val);
+ if (!Escaped.empty()) {
+ OS << "'" << Escaped << "'";
+ } else {
// A character literal might be sign-extended, which
// would result in an invalid \U escape sequence.
// FIXME: multicharacter literals such as '\xFF\xFF\xFF\xFF'
// are not correctly handled.
- if ((Val & ~0xFFu) == ~0xFFu && Kind == CharacterLiteral::Ascii)
+ if ((Val & ~0xFFu) == ~0xFFu && Kind == CharacterLiteralKind::Ascii)
Val &= 0xFFu;
if (Val < 256 && isPrintable((unsigned char)Val))
OS << "'" << (char)Val << "'";
@@ -1042,22 +1067,24 @@ double FloatingLiteral::getValueAsApproximateDouble() const {
}
unsigned StringLiteral::mapCharByteWidth(TargetInfo const &Target,
- StringKind SK) {
+ StringLiteralKind SK) {
unsigned CharByteWidth = 0;
switch (SK) {
- case Ascii:
- case UTF8:
+ case StringLiteralKind::Ordinary:
+ case StringLiteralKind::UTF8:
CharByteWidth = Target.getCharWidth();
break;
- case Wide:
+ case StringLiteralKind::Wide:
CharByteWidth = Target.getWCharWidth();
break;
- case UTF16:
+ case StringLiteralKind::UTF16:
CharByteWidth = Target.getChar16Width();
break;
- case UTF32:
+ case StringLiteralKind::UTF32:
CharByteWidth = Target.getChar32Width();
break;
+ case StringLiteralKind::Unevaluated:
+ return sizeof(char); // Host;
}
assert((CharByteWidth & 7) == 0 && "Assumes character size is byte multiple");
CharByteWidth /= 8;
@@ -1067,39 +1094,49 @@ unsigned StringLiteral::mapCharByteWidth(TargetInfo const &Target,
}
StringLiteral::StringLiteral(const ASTContext &Ctx, StringRef Str,
- StringKind Kind, bool Pascal, QualType Ty,
+ StringLiteralKind Kind, bool Pascal, QualType Ty,
const SourceLocation *Loc,
unsigned NumConcatenated)
: Expr(StringLiteralClass, Ty, VK_LValue, OK_Ordinary) {
- assert(Ctx.getAsConstantArrayType(Ty) &&
- "StringLiteral must be of constant array type!");
- unsigned CharByteWidth = mapCharByteWidth(Ctx.getTargetInfo(), Kind);
- unsigned ByteLength = Str.size();
- assert((ByteLength % CharByteWidth == 0) &&
- "The size of the data must be a multiple of CharByteWidth!");
-
- // Avoid the expensive division. The compiler should be able to figure it
- // out by itself. However as of clang 7, even with the appropriate
- // llvm_unreachable added just here, it is not able to do so.
- unsigned Length;
- switch (CharByteWidth) {
- case 1:
- Length = ByteLength;
- break;
- case 2:
- Length = ByteLength / 2;
- break;
- case 4:
- Length = ByteLength / 4;
- break;
- default:
- llvm_unreachable("Unsupported character width!");
- }
- StringLiteralBits.Kind = Kind;
- StringLiteralBits.CharByteWidth = CharByteWidth;
- StringLiteralBits.IsPascal = Pascal;
+ unsigned Length = Str.size();
+
+ StringLiteralBits.Kind = llvm::to_underlying(Kind);
StringLiteralBits.NumConcatenated = NumConcatenated;
+
+ if (Kind != StringLiteralKind::Unevaluated) {
+ assert(Ctx.getAsConstantArrayType(Ty) &&
+ "StringLiteral must be of constant array type!");
+ unsigned CharByteWidth = mapCharByteWidth(Ctx.getTargetInfo(), Kind);
+ unsigned ByteLength = Str.size();
+ assert((ByteLength % CharByteWidth == 0) &&
+ "The size of the data must be a multiple of CharByteWidth!");
+
+ // Avoid the expensive division. The compiler should be able to figure it
+ // out by itself. However as of clang 7, even with the appropriate
+ // llvm_unreachable added just here, it is not able to do so.
+ switch (CharByteWidth) {
+ case 1:
+ Length = ByteLength;
+ break;
+ case 2:
+ Length = ByteLength / 2;
+ break;
+ case 4:
+ Length = ByteLength / 4;
+ break;
+ default:
+ llvm_unreachable("Unsupported character width!");
+ }
+
+ StringLiteralBits.CharByteWidth = CharByteWidth;
+ StringLiteralBits.IsPascal = Pascal;
+ } else {
+ assert(!Pascal && "Can't make an unevaluated Pascal string");
+ StringLiteralBits.CharByteWidth = 1;
+ StringLiteralBits.IsPascal = false;
+ }
+
*getTrailingObjects<unsigned>() = Length;
// Initialize the trailing array of SourceLocation.
@@ -1108,7 +1145,7 @@ StringLiteral::StringLiteral(const ASTContext &Ctx, StringRef Str,
NumConcatenated * sizeof(SourceLocation));
// Initialize the trailing array of char holding the string data.
- std::memcpy(getTrailingObjects<char>(), Str.data(), ByteLength);
+ std::memcpy(getTrailingObjects<char>(), Str.data(), Str.size());
setDependence(ExprDependence::None);
}
@@ -1122,8 +1159,8 @@ StringLiteral::StringLiteral(EmptyShell Empty, unsigned NumConcatenated,
}
StringLiteral *StringLiteral::Create(const ASTContext &Ctx, StringRef Str,
- StringKind Kind, bool Pascal, QualType Ty,
- const SourceLocation *Loc,
+ StringLiteralKind Kind, bool Pascal,
+ QualType Ty, const SourceLocation *Loc,
unsigned NumConcatenated) {
void *Mem = Ctx.Allocate(totalSizeToAlloc<unsigned, SourceLocation, char>(
1, NumConcatenated, Str.size()),
@@ -1145,25 +1182,36 @@ StringLiteral *StringLiteral::CreateEmpty(const ASTContext &Ctx,
void StringLiteral::outputString(raw_ostream &OS) const {
switch (getKind()) {
- case Ascii: break; // no prefix.
- case Wide: OS << 'L'; break;
- case UTF8: OS << "u8"; break;
- case UTF16: OS << 'u'; break;
- case UTF32: OS << 'U'; break;
+ case StringLiteralKind::Unevaluated:
+ case StringLiteralKind::Ordinary:
+ break; // no prefix.
+ case StringLiteralKind::Wide:
+ OS << 'L';
+ break;
+ case StringLiteralKind::UTF8:
+ OS << "u8";
+ break;
+ case StringLiteralKind::UTF16:
+ OS << 'u';
+ break;
+ case StringLiteralKind::UTF32:
+ OS << 'U';
+ break;
}
OS << '"';
static const char Hex[] = "0123456789ABCDEF";
unsigned LastSlashX = getLength();
for (unsigned I = 0, N = getLength(); I != N; ++I) {
- switch (uint32_t Char = getCodeUnit(I)) {
- default:
+ uint32_t Char = getCodeUnit(I);
+ StringRef Escaped = escapeCStyle<EscapeChar::Double>(Char);
+ if (Escaped.empty()) {
// FIXME: Convert UTF-8 back to codepoints before rendering.
// Convert UTF-16 surrogate pairs back to codepoints before rendering.
// Leave invalid surrogates alone; we'll use \x for those.
- if (getKind() == UTF16 && I != N - 1 && Char >= 0xd800 &&
- Char <= 0xdbff) {
+ if (getKind() == StringLiteralKind::UTF16 && I != N - 1 &&
+ Char >= 0xd800 && Char <= 0xdbff) {
uint32_t Trail = getCodeUnit(I + 1);
if (Trail >= 0xdc00 && Trail <= 0xdfff) {
Char = 0x10000 + ((Char - 0xd800) << 10) + (Trail - 0xdc00);
@@ -1175,7 +1223,7 @@ void StringLiteral::outputString(raw_ostream &OS) const {
// If this is a wide string, output characters over 0xff using \x
// escapes. Otherwise, this is a UTF-16 or UTF-32 string, and Char is a
// codepoint: use \x escapes for invalid codepoints.
- if (getKind() == Wide ||
+ if (getKind() == StringLiteralKind::Wide ||
(Char >= 0xd800 && Char <= 0xdfff) || Char >= 0x110000) {
// FIXME: Is this the best way to print wchar_t?
OS << "\\x";
@@ -1185,7 +1233,7 @@ void StringLiteral::outputString(raw_ostream &OS) const {
for (/**/; Shift >= 0; Shift -= 4)
OS << Hex[(Char >> Shift) & 15];
LastSlashX = I;
- break;
+ continue;
}
if (Char > 0xffff)
@@ -1198,7 +1246,7 @@ void StringLiteral::outputString(raw_ostream &OS) const {
<< Hex[(Char >> 8) & 15]
<< Hex[(Char >> 4) & 15]
<< Hex[(Char >> 0) & 15];
- break;
+ continue;
}
// If we used \x... for the previous character, and this character is a
@@ -1223,17 +1271,9 @@ void StringLiteral::outputString(raw_ostream &OS) const {
<< (char)('0' + ((Char >> 6) & 7))
<< (char)('0' + ((Char >> 3) & 7))
<< (char)('0' + ((Char >> 0) & 7));
- break;
- // Handle some common non-printable cases to make dumps prettier.
- case '\\': OS << "\\\\"; break;
- case '"': OS << "\\\""; break;
- case '\a': OS << "\\a"; break;
- case '\b': OS << "\\b"; break;
- case '\f': OS << "\\f"; break;
- case '\n': OS << "\\n"; break;
- case '\r': OS << "\\r"; break;
- case '\t': OS << "\\t"; break;
- case '\v': OS << "\\v"; break;
+ } else {
+ // Handle some common non-printable cases to make dumps prettier.
+ OS << Escaped;
}
}
OS << '"';
@@ -1260,8 +1300,9 @@ StringLiteral::getLocationOfByte(unsigned ByteNo, const SourceManager &SM,
const LangOptions &Features,
const TargetInfo &Target, unsigned *StartToken,
unsigned *StartTokenByteOffset) const {
- assert((getKind() == StringLiteral::Ascii ||
- getKind() == StringLiteral::UTF8) &&
+ assert((getKind() == StringLiteralKind::Ordinary ||
+ getKind() == StringLiteralKind::UTF8 ||
+ getKind() == StringLiteralKind::Unevaluated) &&
"Only narrow string literals are currently supported");
// Loop over all of the tokens in this string until we find the one that
@@ -1274,7 +1315,7 @@ StringLiteral::getLocationOfByte(unsigned ByteNo, const SourceManager &SM,
StringOffset = *StartTokenByteOffset;
ByteNo -= StringOffset;
}
- while (1) {
+ while (true) {
assert(TokNo < getNumConcatenated() && "Invalid byte number!");
SourceLocation StrTokLoc = getStrTokenLoc(TokNo);
@@ -1474,19 +1515,17 @@ unsigned CallExpr::offsetToTrailingObjects(StmtClass SC) {
Decl *Expr::getReferencedDeclOfCallee() {
Expr *CEE = IgnoreParenImpCasts();
- while (SubstNonTypeTemplateParmExpr *NTTP =
- dyn_cast<SubstNonTypeTemplateParmExpr>(CEE)) {
+ while (auto *NTTP = dyn_cast<SubstNonTypeTemplateParmExpr>(CEE))
CEE = NTTP->getReplacement()->IgnoreParenImpCasts();
- }
// If we're calling a dereference, look at the pointer instead.
while (true) {
- if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CEE)) {
+ if (auto *BO = dyn_cast<BinaryOperator>(CEE)) {
if (BO->isPtrMemOp()) {
CEE = BO->getRHS()->IgnoreParenImpCasts();
continue;
}
- } else if (UnaryOperator *UO = dyn_cast<UnaryOperator>(CEE)) {
+ } else if (auto *UO = dyn_cast<UnaryOperator>(CEE)) {
if (UO->getOpcode() == UO_Deref || UO->getOpcode() == UO_AddrOf ||
UO->getOpcode() == UO_Plus) {
CEE = UO->getSubExpr()->IgnoreParenImpCasts();
@@ -1496,9 +1535,9 @@ Decl *Expr::getReferencedDeclOfCallee() {
break;
}
- if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CEE))
+ if (auto *DRE = dyn_cast<DeclRefExpr>(CEE))
return DRE->getDecl();
- if (MemberExpr *ME = dyn_cast<MemberExpr>(CEE))
+ if (auto *ME = dyn_cast<MemberExpr>(CEE))
return ME->getMemberDecl();
if (auto *BE = dyn_cast<BlockExpr>(CEE))
return BE->getBlockDecl();
@@ -1508,8 +1547,7 @@ Decl *Expr::getReferencedDeclOfCallee() {
/// If this is a call to a builtin, return the builtin ID. If not, return 0.
unsigned CallExpr::getBuiltinCallee() const {
- auto *FDecl =
- dyn_cast_or_null<FunctionDecl>(getCallee()->getReferencedDeclOfCallee());
+ const auto *FDecl = getDirectCallee();
return FDecl ? FDecl->getBuiltinID() : 0;
}
@@ -1536,6 +1574,10 @@ QualType CallExpr::getCallReturnType(const ASTContext &Ctx) const {
// This should never be overloaded and so should never return null.
CalleeType = Expr::findBoundMemberType(Callee);
assert(!CalleeType.isNull());
+ } else if (CalleeType->isRecordType()) {
+ // If the Callee is a record type, then it is a not-yet-resolved
+ // dependent call to the call operator of that type.
+ return Ctx.DependentTy;
} else if (CalleeType->isDependentType() ||
CalleeType->isSpecificPlaceholderType(BuiltinType::Overload)) {
return Ctx.DependentTy;
@@ -1552,6 +1594,11 @@ const Attr *CallExpr::getUnusedResultAttr(const ASTContext &Ctx) const {
if (const auto *A = TD->getAttr<WarnUnusedResultAttr>())
return A;
+ for (const auto *TD = getCallReturnType(Ctx)->getAs<TypedefType>(); TD;
+ TD = TD->desugar()->getAs<TypedefType>())
+ if (const auto *A = TD->getDecl()->getAttr<WarnUnusedResultAttr>())
+ return A;
+
// Otherwise, see if the callee is marked nodiscard and return that attribute
// instead.
const Decl *D = getCalleeDecl();
@@ -1559,8 +1606,8 @@ const Attr *CallExpr::getUnusedResultAttr(const ASTContext &Ctx) const {
}
SourceLocation CallExpr::getBeginLoc() const {
- if (isa<CXXOperatorCallExpr>(this))
- return cast<CXXOperatorCallExpr>(this)->getBeginLoc();
+ if (const auto *OCE = dyn_cast<CXXOperatorCallExpr>(this))
+ return OCE->getBeginLoc();
SourceLocation begin = getCallee()->getBeginLoc();
if (begin.isInvalid() && getNumArgs() > 0 && getArg(0))
@@ -1568,8 +1615,8 @@ SourceLocation CallExpr::getBeginLoc() const {
return begin;
}
SourceLocation CallExpr::getEndLoc() const {
- if (isa<CXXOperatorCallExpr>(this))
- return cast<CXXOperatorCallExpr>(this)->getEndLoc();
+ if (const auto *OCE = dyn_cast<CXXOperatorCallExpr>(this))
+ return OCE->getEndLoc();
SourceLocation end = getRParenLoc();
if (end.isInvalid() && getNumArgs() > 0 && getArg(getNumArgs() - 1))
@@ -1671,16 +1718,7 @@ MemberExpr *MemberExpr::Create(
MemberExpr *E = new (Mem) MemberExpr(Base, IsArrow, OperatorLoc, MemberDecl,
NameInfo, T, VK, OK, NOUR);
- // FIXME: remove remaining dependence computation to computeDependence().
- auto Deps = E->getDependence();
if (HasQualOrFound) {
- // FIXME: Wrong. We should be looking at the member declaration we found.
- if (QualifierLoc && QualifierLoc.getNestedNameSpecifier()->isDependent())
- Deps |= ExprDependence::TypeValueInstantiation;
- else if (QualifierLoc &&
- QualifierLoc.getNestedNameSpecifier()->isInstantiationDependent())
- Deps |= ExprDependence::Instantiation;
-
E->MemberExprBits.HasQualifierOrFoundDecl = true;
MemberExprNameQualifier *NQ =
@@ -1692,13 +1730,16 @@ MemberExpr *MemberExpr::Create(
E->MemberExprBits.HasTemplateKWAndArgsInfo =
TemplateArgs || TemplateKWLoc.isValid();
+ // FIXME: remove remaining dependence computation to computeDependence().
+ auto Deps = E->getDependence();
if (TemplateArgs) {
auto TemplateArgDeps = TemplateArgumentDependence::None;
E->getTrailingObjects<ASTTemplateKWAndArgsInfo>()->initializeFrom(
TemplateKWLoc, *TemplateArgs,
E->getTrailingObjects<TemplateArgumentLoc>(), TemplateArgDeps);
- if (TemplateArgDeps & TemplateArgumentDependence::Instantiation)
- Deps |= ExprDependence::Instantiation;
+ for (const TemplateArgumentLoc &ArgLoc : TemplateArgs->arguments()) {
+ Deps |= toExprDependence(ArgLoc.getArgument().getDependence());
+ }
} else if (TemplateKWLoc.isValid()) {
E->getTrailingObjects<ASTTemplateKWAndArgsInfo>()->initializeFrom(
TemplateKWLoc);
@@ -1892,51 +1933,53 @@ const char *CastExpr::getCastKindName(CastKind CK) {
}
namespace {
- const Expr *skipImplicitTemporary(const Expr *E) {
- // Skip through reference binding to temporary.
- if (auto *Materialize = dyn_cast<MaterializeTemporaryExpr>(E))
- E = Materialize->getSubExpr();
+// Skip over implicit nodes produced as part of semantic analysis.
+// Designed for use with IgnoreExprNodes.
+static Expr *ignoreImplicitSemaNodes(Expr *E) {
+ if (auto *Materialize = dyn_cast<MaterializeTemporaryExpr>(E))
+ return Materialize->getSubExpr();
- // Skip any temporary bindings; they're implicit.
- if (auto *Binder = dyn_cast<CXXBindTemporaryExpr>(E))
- E = Binder->getSubExpr();
+ if (auto *Binder = dyn_cast<CXXBindTemporaryExpr>(E))
+ return Binder->getSubExpr();
- return E;
- }
+ if (auto *Full = dyn_cast<FullExpr>(E))
+ return Full->getSubExpr();
+
+ if (auto *CPLIE = dyn_cast<CXXParenListInitExpr>(E);
+ CPLIE && CPLIE->getInitExprs().size() == 1)
+ return CPLIE->getInitExprs()[0];
+
+ return E;
}
+} // namespace
Expr *CastExpr::getSubExprAsWritten() {
const Expr *SubExpr = nullptr;
- const CastExpr *E = this;
- do {
- SubExpr = skipImplicitTemporary(E->getSubExpr());
+
+ for (const CastExpr *E = this; E; E = dyn_cast<ImplicitCastExpr>(SubExpr)) {
+ SubExpr = IgnoreExprNodes(E->getSubExpr(), ignoreImplicitSemaNodes);
// Conversions by constructor and conversion functions have a
// subexpression describing the call; strip it off.
- if (E->getCastKind() == CK_ConstructorConversion)
- SubExpr =
- skipImplicitTemporary(cast<CXXConstructExpr>(SubExpr->IgnoreImplicit())->getArg(0));
- else if (E->getCastKind() == CK_UserDefinedConversion) {
- SubExpr = SubExpr->IgnoreImplicit();
- assert((isa<CXXMemberCallExpr>(SubExpr) ||
- isa<BlockExpr>(SubExpr)) &&
+ if (E->getCastKind() == CK_ConstructorConversion) {
+ SubExpr = IgnoreExprNodes(cast<CXXConstructExpr>(SubExpr)->getArg(0),
+ ignoreImplicitSemaNodes);
+ } else if (E->getCastKind() == CK_UserDefinedConversion) {
+ assert((isa<CXXMemberCallExpr>(SubExpr) || isa<BlockExpr>(SubExpr)) &&
"Unexpected SubExpr for CK_UserDefinedConversion.");
if (auto *MCE = dyn_cast<CXXMemberCallExpr>(SubExpr))
SubExpr = MCE->getImplicitObjectArgument();
}
+ }
- // If the subexpression we're left with is an implicit cast, look
- // through that, too.
- } while ((E = dyn_cast<ImplicitCastExpr>(SubExpr)));
-
- return const_cast<Expr*>(SubExpr);
+ return const_cast<Expr *>(SubExpr);
}
NamedDecl *CastExpr::getConversionFunction() const {
const Expr *SubExpr = nullptr;
for (const CastExpr *E = this; E; E = dyn_cast<ImplicitCastExpr>(SubExpr)) {
- SubExpr = skipImplicitTemporary(E->getSubExpr());
+ SubExpr = IgnoreExprNodes(E->getSubExpr(), ignoreImplicitSemaNodes);
if (E->getCastKind() == CK_ConstructorConversion)
return cast<CXXConstructExpr>(SubExpr)->getConstructor();
@@ -2137,12 +2180,13 @@ OverloadedOperatorKind BinaryOperator::getOverloadedOperator(Opcode Opc) {
bool BinaryOperator::isNullPointerArithmeticExtension(ASTContext &Ctx,
Opcode Opc,
- Expr *LHS, Expr *RHS) {
+ const Expr *LHS,
+ const Expr *RHS) {
if (Opc != BO_Add)
return false;
// Check that we have one pointer and one integer operand.
- Expr *PExp;
+ const Expr *PExp;
if (LHS->getType()->isPointerType()) {
if (!RHS->getType()->isIntegerType())
return false;
@@ -2168,41 +2212,35 @@ bool BinaryOperator::isNullPointerArithmeticExtension(ASTContext &Ctx,
return true;
}
-static QualType getDecayedSourceLocExprType(const ASTContext &Ctx,
- SourceLocExpr::IdentKind Kind) {
- switch (Kind) {
- case SourceLocExpr::File:
- case SourceLocExpr::Function: {
- QualType ArrTy = Ctx.getStringLiteralArrayType(Ctx.CharTy, 0);
- return Ctx.getPointerType(ArrTy->getAsArrayTypeUnsafe()->getElementType());
- }
- case SourceLocExpr::Line:
- case SourceLocExpr::Column:
- return Ctx.UnsignedIntTy;
- }
- llvm_unreachable("unhandled case");
-}
-
-SourceLocExpr::SourceLocExpr(const ASTContext &Ctx, IdentKind Kind,
- SourceLocation BLoc, SourceLocation RParenLoc,
+SourceLocExpr::SourceLocExpr(const ASTContext &Ctx, SourceLocIdentKind Kind,
+ QualType ResultTy, SourceLocation BLoc,
+ SourceLocation RParenLoc,
DeclContext *ParentContext)
- : Expr(SourceLocExprClass, getDecayedSourceLocExprType(Ctx, Kind),
- VK_PRValue, OK_Ordinary),
+ : Expr(SourceLocExprClass, ResultTy, VK_PRValue, OK_Ordinary),
BuiltinLoc(BLoc), RParenLoc(RParenLoc), ParentContext(ParentContext) {
- SourceLocExprBits.Kind = Kind;
- setDependence(ExprDependence::None);
+ SourceLocExprBits.Kind = llvm::to_underlying(Kind);
+ // In dependent contexts, function names may change.
+ setDependence(MayBeDependent(Kind) && ParentContext->isDependentContext()
+ ? ExprDependence::Value
+ : ExprDependence::None);
}
StringRef SourceLocExpr::getBuiltinStr() const {
switch (getIdentKind()) {
- case File:
+ case SourceLocIdentKind::File:
return "__builtin_FILE";
- case Function:
+ case SourceLocIdentKind::FileName:
+ return "__builtin_FILE_NAME";
+ case SourceLocIdentKind::Function:
return "__builtin_FUNCTION";
- case Line:
+ case SourceLocIdentKind::FuncSig:
+ return "__builtin_FUNCSIG";
+ case SourceLocIdentKind::Line:
return "__builtin_LINE";
- case Column:
+ case SourceLocIdentKind::Column:
return "__builtin_COLUMN";
+ case SourceLocIdentKind::SourceLocStruct:
+ return "__builtin_source_location";
}
llvm_unreachable("unexpected IdentKind!");
}
@@ -2212,14 +2250,17 @@ APValue SourceLocExpr::EvaluateInContext(const ASTContext &Ctx,
SourceLocation Loc;
const DeclContext *Context;
- std::tie(Loc,
- Context) = [&]() -> std::pair<SourceLocation, const DeclContext *> {
- if (auto *DIE = dyn_cast_or_null<CXXDefaultInitExpr>(DefaultExpr))
- return {DIE->getUsedLocation(), DIE->getUsedContext()};
- if (auto *DAE = dyn_cast_or_null<CXXDefaultArgExpr>(DefaultExpr))
- return {DAE->getUsedLocation(), DAE->getUsedContext()};
- return {this->getLocation(), this->getParentContext()};
- }();
+ if (const auto *DIE = dyn_cast_if_present<CXXDefaultInitExpr>(DefaultExpr)) {
+ Loc = DIE->getUsedLocation();
+ Context = DIE->getUsedContext();
+ } else if (const auto *DAE =
+ dyn_cast_if_present<CXXDefaultArgExpr>(DefaultExpr)) {
+ Loc = DAE->getUsedLocation();
+ Context = DAE->getUsedContext();
+ } else {
+ Loc = getLocation();
+ Context = getParentContext();
+ }
PresumedLoc PLoc = Ctx.getSourceManager().getPresumedLoc(
Ctx.getSourceManager().getExpansionRange(Loc).getEnd());
@@ -2233,24 +2274,75 @@ APValue SourceLocExpr::EvaluateInContext(const ASTContext &Ctx,
};
switch (getIdentKind()) {
- case SourceLocExpr::File: {
+ case SourceLocIdentKind::FileName: {
+ // __builtin_FILE_NAME() is a Clang-specific extension that expands to the
+ // the last part of __builtin_FILE().
+ SmallString<256> FileName;
+ clang::Preprocessor::processPathToFileName(
+ FileName, PLoc, Ctx.getLangOpts(), Ctx.getTargetInfo());
+ return MakeStringLiteral(FileName);
+ }
+ case SourceLocIdentKind::File: {
SmallString<256> Path(PLoc.getFilename());
- Ctx.getLangOpts().remapPathPrefix(Path);
+ clang::Preprocessor::processPathForFileMacro(Path, Ctx.getLangOpts(),
+ Ctx.getTargetInfo());
return MakeStringLiteral(Path);
}
- case SourceLocExpr::Function: {
- const Decl *CurDecl = dyn_cast_or_null<Decl>(Context);
+ case SourceLocIdentKind::Function:
+ case SourceLocIdentKind::FuncSig: {
+ const auto *CurDecl = dyn_cast<Decl>(Context);
+ const auto Kind = getIdentKind() == SourceLocIdentKind::Function
+ ? PredefinedIdentKind::Function
+ : PredefinedIdentKind::FuncSig;
return MakeStringLiteral(
- CurDecl ? PredefinedExpr::ComputeName(PredefinedExpr::Function, CurDecl)
- : std::string(""));
+ CurDecl ? PredefinedExpr::ComputeName(Kind, CurDecl) : std::string(""));
}
- case SourceLocExpr::Line:
- case SourceLocExpr::Column: {
- llvm::APSInt IntVal(Ctx.getIntWidth(Ctx.UnsignedIntTy),
- /*isUnsigned=*/true);
- IntVal = getIdentKind() == SourceLocExpr::Line ? PLoc.getLine()
- : PLoc.getColumn();
- return APValue(IntVal);
+ case SourceLocIdentKind::Line:
+ return APValue(Ctx.MakeIntValue(PLoc.getLine(), Ctx.UnsignedIntTy));
+ case SourceLocIdentKind::Column:
+ return APValue(Ctx.MakeIntValue(PLoc.getColumn(), Ctx.UnsignedIntTy));
+ case SourceLocIdentKind::SourceLocStruct: {
+ // Fill in a std::source_location::__impl structure, by creating an
+ // artificial file-scoped CompoundLiteralExpr, and returning a pointer to
+ // that.
+ const CXXRecordDecl *ImplDecl = getType()->getPointeeCXXRecordDecl();
+ assert(ImplDecl);
+
+ // Construct an APValue for the __impl struct, and get or create a Decl
+ // corresponding to that. Note that we've already verified that the shape of
+ // the ImplDecl type is as expected.
+
+ APValue Value(APValue::UninitStruct(), 0, 4);
+ for (const FieldDecl *F : ImplDecl->fields()) {
+ StringRef Name = F->getName();
+ if (Name == "_M_file_name") {
+ SmallString<256> Path(PLoc.getFilename());
+ clang::Preprocessor::processPathForFileMacro(Path, Ctx.getLangOpts(),
+ Ctx.getTargetInfo());
+ Value.getStructField(F->getFieldIndex()) = MakeStringLiteral(Path);
+ } else if (Name == "_M_function_name") {
+ // Note: this emits the PrettyFunction name -- different than what
+ // __builtin_FUNCTION() above returns!
+ const auto *CurDecl = dyn_cast<Decl>(Context);
+ Value.getStructField(F->getFieldIndex()) = MakeStringLiteral(
+ CurDecl && !isa<TranslationUnitDecl>(CurDecl)
+ ? StringRef(PredefinedExpr::ComputeName(
+ PredefinedIdentKind::PrettyFunction, CurDecl))
+ : "");
+ } else if (Name == "_M_line") {
+ llvm::APSInt IntVal = Ctx.MakeIntValue(PLoc.getLine(), F->getType());
+ Value.getStructField(F->getFieldIndex()) = APValue(IntVal);
+ } else if (Name == "_M_column") {
+ llvm::APSInt IntVal = Ctx.MakeIntValue(PLoc.getColumn(), F->getType());
+ Value.getStructField(F->getFieldIndex()) = APValue(IntVal);
+ }
+ }
+
+ UnnamedGlobalConstantDecl *GV =
+ Ctx.getUnnamedGlobalConstantDecl(getType()->getPointeeType(), Value);
+
+ return APValue(GV, CharUnits::Zero(), ArrayRef<APValue::LValuePathEntry>{},
+ false);
}
}
llvm_unreachable("unhandled case");
@@ -2308,7 +2400,7 @@ bool InitListExpr::isStringLiteralInit() const {
const Expr *Init = getInit(0);
if (!Init)
return false;
- Init = Init->IgnoreParens();
+ Init = Init->IgnoreParenImpCasts();
return isa<StringLiteral>(Init) || isa<ObjCEncodeExpr>(Init);
}
@@ -2370,10 +2462,8 @@ SourceLocation InitListExpr::getEndLoc() const {
SourceLocation End = RBraceLoc;
if (End.isInvalid()) {
// Find the first non-null initializer from the end.
- for (InitExprsTy::const_reverse_iterator I = InitExprs.rbegin(),
- E = InitExprs.rend();
- I != E; ++I) {
- if (Stmt *S = *I) {
+ for (Stmt *S : llvm::reverse(InitExprs)) {
+ if (S) {
End = S->getEndLoc();
break;
}
@@ -2457,8 +2547,12 @@ bool Expr::isReadIfDiscardedInCPlusPlus11() const {
}
// Objective-C++ extensions to the rule.
- if (isa<PseudoObjectExpr>(E) || isa<ObjCIvarRefExpr>(E))
+ if (isa<ObjCIvarRefExpr>(E))
return true;
+ if (const auto *POE = dyn_cast<PseudoObjectExpr>(E)) {
+ if (isa<ObjCPropertyRefExpr, ObjCSubscriptRefExpr>(POE->getSyntacticForm()))
+ return true;
+ }
return false;
}
@@ -2620,7 +2714,7 @@ bool Expr::isUnusedResultAWarning(const Expr *&WarnE, SourceLocation &Loc,
}
// Fallthrough for generic call handling.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case CallExprClass:
case CXXMemberCallExprClass:
@@ -2708,23 +2802,35 @@ bool Expr::isUnusedResultAWarning(const Expr *&WarnE, SourceLocation &Loc,
}
case ObjCPropertyRefExprClass:
+ case ObjCSubscriptRefExprClass:
WarnE = this;
Loc = getExprLoc();
R1 = getSourceRange();
return true;
case PseudoObjectExprClass: {
- const PseudoObjectExpr *PO = cast<PseudoObjectExpr>(this);
+ const auto *POE = cast<PseudoObjectExpr>(this);
- // Only complain about things that have the form of a getter.
- if (isa<UnaryOperator>(PO->getSyntacticForm()) ||
- isa<BinaryOperator>(PO->getSyntacticForm()))
- return false;
+ // For some syntactic forms, we should always warn.
+ if (isa<ObjCPropertyRefExpr, ObjCSubscriptRefExpr>(
+ POE->getSyntacticForm())) {
+ WarnE = this;
+ Loc = getExprLoc();
+ R1 = getSourceRange();
+ return true;
+ }
- WarnE = this;
- Loc = getExprLoc();
- R1 = getSourceRange();
- return true;
+ // For others, we should never warn.
+ if (auto *BO = dyn_cast<BinaryOperator>(POE->getSyntacticForm()))
+ if (BO->isAssignmentOp())
+ return false;
+ if (auto *UO = dyn_cast<UnaryOperator>(POE->getSyntacticForm()))
+ if (UO->isIncrementDecrementOp())
+ return false;
+
+ // Otherwise, warn if the result expression would warn.
+ const Expr *Result = POE->getResultExpr();
+ return Result && Result->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx);
}
case StmtExprClass: {
@@ -3168,6 +3274,10 @@ bool Expr::isConstantInitializer(ASTContext &Ctx, bool IsForRef,
// kill the second parameter.
if (IsForRef) {
+ if (auto *EWC = dyn_cast<ExprWithCleanups>(this))
+ return EWC->getSubExpr()->isConstantInitializer(Ctx, true, Culprit);
+ if (auto *MTE = dyn_cast<MaterializeTemporaryExpr>(this))
+ return MTE->getSubExpr()->isConstantInitializer(Ctx, false, Culprit);
EvalResult Result;
if (EvaluateAsLValue(Result, Ctx) && !Result.HasSideEffects)
return true;
@@ -3305,6 +3415,7 @@ bool Expr::isConstantInitializer(ASTContext &Ctx, bool IsForRef,
CE->getCastKind() == CK_ConstructorConversion ||
CE->getCastKind() == CK_NonAtomicToAtomic ||
CE->getCastKind() == CK_AtomicToNonAtomic ||
+ CE->getCastKind() == CK_NullToPointer ||
CE->getCastKind() == CK_IntToOCLSampler)
return CE->getSubExpr()->isConstantInitializer(Ctx, false, Culprit);
@@ -3336,9 +3447,9 @@ bool Expr::isConstantInitializer(ASTContext &Ctx, bool IsForRef,
}
bool CallExpr::isBuiltinAssumeFalse(const ASTContext &Ctx) const {
- const FunctionDecl* FD = getDirectCallee();
- if (!FD || (FD->getBuiltinID() != Builtin::BI__assume &&
- FD->getBuiltinID() != Builtin::BI__builtin_assume))
+ unsigned BuiltinID = getBuiltinCallee();
+ if (BuiltinID != Builtin::BI__assume &&
+ BuiltinID != Builtin::BI__builtin_assume)
return false;
const Expr* Arg = getArg(0);
@@ -3347,6 +3458,10 @@ bool CallExpr::isBuiltinAssumeFalse(const ASTContext &Ctx) const {
Arg->EvaluateAsBooleanCondition(ArgVal, Ctx) && !ArgVal;
}
+bool CallExpr::isCallToStdMove() const {
+ return getBuiltinCallee() == Builtin::BImove;
+}
+
namespace {
/// Look for any side effects within a Stmt.
class SideEffectFinder : public ConstEvaluatedExprVisitor<SideEffectFinder> {
@@ -3533,6 +3648,7 @@ bool Expr::HasSideEffects(const ASTContext &Ctx,
case ShuffleVectorExprClass:
case ConvertVectorExprClass:
case AsTypeExprClass:
+ case CXXParenListInitExprClass:
// These have a side-effect if any subexpression does.
break;
@@ -3580,7 +3696,7 @@ bool Expr::HasSideEffects(const ASTContext &Ctx,
DCE->getCastKind() == CK_Dynamic)
return true;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ImplicitCastExprClass:
case CStyleCastExprClass:
case CXXStaticCastExprClass:
@@ -3779,11 +3895,8 @@ Expr::isNullPointerConstant(ASTContext &Ctx,
// has non-default address space it is not treated as nullptr.
// (__generic void*)0 in OpenCL 2.0 should not be treated as nullptr
// since it cannot be assigned to a pointer to constant address space.
- if ((Ctx.getLangOpts().OpenCLVersion >= 200 &&
- Pointee.getAddressSpace() == LangAS::opencl_generic) ||
- (Ctx.getLangOpts().OpenCL &&
- Ctx.getLangOpts().OpenCLVersion < 200 &&
- Pointee.getAddressSpace() == LangAS::opencl_private))
+ if (Ctx.getLangOpts().OpenCL &&
+ Pointee.getAddressSpace() == Ctx.getDefaultOpenCLPointeeAddrSpace())
Qs.removeAddressSpace();
if (Pointee->isVoidType() && Qs.empty() && // to void*
@@ -3831,7 +3944,7 @@ Expr::isNullPointerConstant(ASTContext &Ctx,
if (getType().isNull())
return NPCK_NotNull;
- // C++11 nullptr_t is always a null pointer constant.
+ // C++11/C23 nullptr_t is always a null pointer constant.
if (getType()->isNullPtrType())
return NPCK_CXX11_nullptr;
@@ -4128,7 +4241,7 @@ bool ExtVectorElementExpr::containsDuplicateElements() const {
Comp = Comp.substr(1);
for (unsigned i = 0, e = Comp.size(); i != e; ++i)
- if (Comp.substr(i + 1).find(Comp[i]) != StringRef::npos)
+ if (Comp.substr(i + 1).contains(Comp[i]))
return true;
return false;
@@ -4196,18 +4309,48 @@ GenericSelectionExpr::GenericSelectionExpr(
AssocExprs[ResultIndex]->getValueKind(),
AssocExprs[ResultIndex]->getObjectKind()),
NumAssocs(AssocExprs.size()), ResultIndex(ResultIndex),
- DefaultLoc(DefaultLoc), RParenLoc(RParenLoc) {
+ IsExprPredicate(true), DefaultLoc(DefaultLoc), RParenLoc(RParenLoc) {
assert(AssocTypes.size() == AssocExprs.size() &&
"Must have the same number of association expressions"
" and TypeSourceInfo!");
assert(ResultIndex < NumAssocs && "ResultIndex is out-of-bounds!");
GenericSelectionExprBits.GenericLoc = GenericLoc;
- getTrailingObjects<Stmt *>()[ControllingIndex] = ControllingExpr;
+ getTrailingObjects<Stmt *>()[getIndexOfControllingExpression()] =
+ ControllingExpr;
std::copy(AssocExprs.begin(), AssocExprs.end(),
- getTrailingObjects<Stmt *>() + AssocExprStartIndex);
+ getTrailingObjects<Stmt *>() + getIndexOfStartOfAssociatedExprs());
std::copy(AssocTypes.begin(), AssocTypes.end(),
- getTrailingObjects<TypeSourceInfo *>());
+ getTrailingObjects<TypeSourceInfo *>() +
+ getIndexOfStartOfAssociatedTypes());
+
+ setDependence(computeDependence(this, ContainsUnexpandedParameterPack));
+}
+
+GenericSelectionExpr::GenericSelectionExpr(
+ const ASTContext &, SourceLocation GenericLoc,
+ TypeSourceInfo *ControllingType, ArrayRef<TypeSourceInfo *> AssocTypes,
+ ArrayRef<Expr *> AssocExprs, SourceLocation DefaultLoc,
+ SourceLocation RParenLoc, bool ContainsUnexpandedParameterPack,
+ unsigned ResultIndex)
+ : Expr(GenericSelectionExprClass, AssocExprs[ResultIndex]->getType(),
+ AssocExprs[ResultIndex]->getValueKind(),
+ AssocExprs[ResultIndex]->getObjectKind()),
+ NumAssocs(AssocExprs.size()), ResultIndex(ResultIndex),
+ IsExprPredicate(false), DefaultLoc(DefaultLoc), RParenLoc(RParenLoc) {
+ assert(AssocTypes.size() == AssocExprs.size() &&
+ "Must have the same number of association expressions"
+ " and TypeSourceInfo!");
+ assert(ResultIndex < NumAssocs && "ResultIndex is out-of-bounds!");
+
+ GenericSelectionExprBits.GenericLoc = GenericLoc;
+ getTrailingObjects<TypeSourceInfo *>()[getIndexOfControllingType()] =
+ ControllingType;
+ std::copy(AssocExprs.begin(), AssocExprs.end(),
+ getTrailingObjects<Stmt *>() + getIndexOfStartOfAssociatedExprs());
+ std::copy(AssocTypes.begin(), AssocTypes.end(),
+ getTrailingObjects<TypeSourceInfo *>() +
+ getIndexOfStartOfAssociatedTypes());
setDependence(computeDependence(this, ContainsUnexpandedParameterPack));
}
@@ -4220,17 +4363,44 @@ GenericSelectionExpr::GenericSelectionExpr(
: Expr(GenericSelectionExprClass, Context.DependentTy, VK_PRValue,
OK_Ordinary),
NumAssocs(AssocExprs.size()), ResultIndex(ResultDependentIndex),
- DefaultLoc(DefaultLoc), RParenLoc(RParenLoc) {
+ IsExprPredicate(true), DefaultLoc(DefaultLoc), RParenLoc(RParenLoc) {
assert(AssocTypes.size() == AssocExprs.size() &&
"Must have the same number of association expressions"
" and TypeSourceInfo!");
GenericSelectionExprBits.GenericLoc = GenericLoc;
- getTrailingObjects<Stmt *>()[ControllingIndex] = ControllingExpr;
+ getTrailingObjects<Stmt *>()[getIndexOfControllingExpression()] =
+ ControllingExpr;
std::copy(AssocExprs.begin(), AssocExprs.end(),
- getTrailingObjects<Stmt *>() + AssocExprStartIndex);
+ getTrailingObjects<Stmt *>() + getIndexOfStartOfAssociatedExprs());
std::copy(AssocTypes.begin(), AssocTypes.end(),
- getTrailingObjects<TypeSourceInfo *>());
+ getTrailingObjects<TypeSourceInfo *>() +
+ getIndexOfStartOfAssociatedTypes());
+
+ setDependence(computeDependence(this, ContainsUnexpandedParameterPack));
+}
+
+GenericSelectionExpr::GenericSelectionExpr(
+ const ASTContext &Context, SourceLocation GenericLoc,
+ TypeSourceInfo *ControllingType, ArrayRef<TypeSourceInfo *> AssocTypes,
+ ArrayRef<Expr *> AssocExprs, SourceLocation DefaultLoc,
+ SourceLocation RParenLoc, bool ContainsUnexpandedParameterPack)
+ : Expr(GenericSelectionExprClass, Context.DependentTy, VK_PRValue,
+ OK_Ordinary),
+ NumAssocs(AssocExprs.size()), ResultIndex(ResultDependentIndex),
+ IsExprPredicate(false), DefaultLoc(DefaultLoc), RParenLoc(RParenLoc) {
+ assert(AssocTypes.size() == AssocExprs.size() &&
+ "Must have the same number of association expressions"
+ " and TypeSourceInfo!");
+
+ GenericSelectionExprBits.GenericLoc = GenericLoc;
+ getTrailingObjects<TypeSourceInfo *>()[getIndexOfControllingType()] =
+ ControllingType;
+ std::copy(AssocExprs.begin(), AssocExprs.end(),
+ getTrailingObjects<Stmt *>() + getIndexOfStartOfAssociatedExprs());
+ std::copy(AssocTypes.begin(), AssocTypes.end(),
+ getTrailingObjects<TypeSourceInfo *>() +
+ getIndexOfStartOfAssociatedTypes());
setDependence(computeDependence(this, ContainsUnexpandedParameterPack));
}
@@ -4266,6 +4436,35 @@ GenericSelectionExpr *GenericSelectionExpr::Create(
RParenLoc, ContainsUnexpandedParameterPack);
}
+GenericSelectionExpr *GenericSelectionExpr::Create(
+ const ASTContext &Context, SourceLocation GenericLoc,
+ TypeSourceInfo *ControllingType, ArrayRef<TypeSourceInfo *> AssocTypes,
+ ArrayRef<Expr *> AssocExprs, SourceLocation DefaultLoc,
+ SourceLocation RParenLoc, bool ContainsUnexpandedParameterPack,
+ unsigned ResultIndex) {
+ unsigned NumAssocs = AssocExprs.size();
+ void *Mem = Context.Allocate(
+ totalSizeToAlloc<Stmt *, TypeSourceInfo *>(1 + NumAssocs, NumAssocs),
+ alignof(GenericSelectionExpr));
+ return new (Mem) GenericSelectionExpr(
+ Context, GenericLoc, ControllingType, AssocTypes, AssocExprs, DefaultLoc,
+ RParenLoc, ContainsUnexpandedParameterPack, ResultIndex);
+}
+
+GenericSelectionExpr *GenericSelectionExpr::Create(
+ const ASTContext &Context, SourceLocation GenericLoc,
+ TypeSourceInfo *ControllingType, ArrayRef<TypeSourceInfo *> AssocTypes,
+ ArrayRef<Expr *> AssocExprs, SourceLocation DefaultLoc,
+ SourceLocation RParenLoc, bool ContainsUnexpandedParameterPack) {
+ unsigned NumAssocs = AssocExprs.size();
+ void *Mem = Context.Allocate(
+ totalSizeToAlloc<Stmt *, TypeSourceInfo *>(1 + NumAssocs, NumAssocs),
+ alignof(GenericSelectionExpr));
+ return new (Mem) GenericSelectionExpr(
+ Context, GenericLoc, ControllingType, AssocTypes, AssocExprs, DefaultLoc,
+ RParenLoc, ContainsUnexpandedParameterPack);
+}
+
GenericSelectionExpr *
GenericSelectionExpr::CreateEmpty(const ASTContext &Context,
unsigned NumAssocs) {
@@ -4279,11 +4478,11 @@ GenericSelectionExpr::CreateEmpty(const ASTContext &Context,
// DesignatedInitExpr
//===----------------------------------------------------------------------===//
-IdentifierInfo *DesignatedInitExpr::Designator::getFieldName() const {
- assert(Kind == FieldDesignator && "Only valid on a field designator");
- if (Field.NameOrField & 0x01)
- return reinterpret_cast<IdentifierInfo *>(Field.NameOrField & ~0x01);
- return getField()->getIdentifier();
+const IdentifierInfo *DesignatedInitExpr::Designator::getFieldName() const {
+ assert(isFieldDesignator() && "Only valid on a field designator");
+ if (FieldInfo.NameOrField & 0x01)
+ return reinterpret_cast<IdentifierInfo *>(FieldInfo.NameOrField & ~0x01);
+ return getFieldDecl()->getIdentifier();
}
DesignatedInitExpr::DesignatedInitExpr(const ASTContext &C, QualType Ty,
@@ -4358,14 +4557,11 @@ SourceRange DesignatedInitExpr::getDesignatorsSourceRange() const {
}
SourceLocation DesignatedInitExpr::getBeginLoc() const {
- SourceLocation StartLoc;
auto *DIE = const_cast<DesignatedInitExpr *>(this);
Designator &First = *DIE->getDesignator(0);
if (First.isFieldDesignator())
- StartLoc = GNUSyntax ? First.Field.FieldLoc : First.Field.DotLoc;
- else
- StartLoc = First.ArrayOrRange.LBracketLoc;
- return StartLoc;
+ return GNUSyntax ? First.getFieldLoc() : First.getDotLoc();
+ return First.getLBracketLoc();
}
SourceLocation DesignatedInitExpr::getEndLoc() const {
@@ -4373,20 +4569,18 @@ SourceLocation DesignatedInitExpr::getEndLoc() const {
}
Expr *DesignatedInitExpr::getArrayIndex(const Designator& D) const {
- assert(D.Kind == Designator::ArrayDesignator && "Requires array designator");
- return getSubExpr(D.ArrayOrRange.Index + 1);
+ assert(D.isArrayDesignator() && "Requires array designator");
+ return getSubExpr(D.getArrayIndex() + 1);
}
Expr *DesignatedInitExpr::getArrayRangeStart(const Designator &D) const {
- assert(D.Kind == Designator::ArrayRangeDesignator &&
- "Requires array range designator");
- return getSubExpr(D.ArrayOrRange.Index + 1);
+ assert(D.isArrayRangeDesignator() && "Requires array range designator");
+ return getSubExpr(D.getArrayIndex() + 1);
}
Expr *DesignatedInitExpr::getArrayRangeEnd(const Designator &D) const {
- assert(D.Kind == Designator::ArrayRangeDesignator &&
- "Requires array range designator");
- return getSubExpr(D.ArrayOrRange.Index + 2);
+ assert(D.isArrayRangeDesignator() && "Requires array range designator");
+ return getSubExpr(D.getArrayIndex() + 2);
}
/// Replaces the designator at index @p Idx with the series
@@ -4425,7 +4619,8 @@ DesignatedInitUpdateExpr::DesignatedInitUpdateExpr(const ASTContext &C,
OK_Ordinary) {
BaseAndUpdaterExprs[0] = baseExpr;
- InitListExpr *ILE = new (C) InitListExpr(C, lBraceLoc, None, rBraceLoc);
+ InitListExpr *ILE =
+ new (C) InitListExpr(C, lBraceLoc, std::nullopt, rBraceLoc);
ILE->setType(baseExpr->getType());
BaseAndUpdaterExprs[1] = ILE;
@@ -4695,7 +4890,9 @@ unsigned AtomicExpr::getNumSubExprs(AtomicOp Op) {
case AO__atomic_load_n:
return 2;
+ case AO__scoped_atomic_load_n:
case AO__opencl_atomic_load:
+ case AO__hip_atomic_load:
case AO__c11_atomic_store:
case AO__c11_atomic_exchange:
case AO__atomic_load:
@@ -4707,6 +4904,7 @@ unsigned AtomicExpr::getNumSubExprs(AtomicOp Op) {
case AO__c11_atomic_fetch_and:
case AO__c11_atomic_fetch_or:
case AO__c11_atomic_fetch_xor:
+ case AO__c11_atomic_fetch_nand:
case AO__c11_atomic_fetch_max:
case AO__c11_atomic_fetch_min:
case AO__atomic_fetch_add:
@@ -4727,7 +4925,36 @@ unsigned AtomicExpr::getNumSubExprs(AtomicOp Op) {
case AO__atomic_fetch_max:
return 3;
+ case AO__scoped_atomic_load:
+ case AO__scoped_atomic_store:
+ case AO__scoped_atomic_store_n:
+ case AO__scoped_atomic_fetch_add:
+ case AO__scoped_atomic_fetch_sub:
+ case AO__scoped_atomic_fetch_and:
+ case AO__scoped_atomic_fetch_or:
+ case AO__scoped_atomic_fetch_xor:
+ case AO__scoped_atomic_fetch_nand:
+ case AO__scoped_atomic_add_fetch:
+ case AO__scoped_atomic_sub_fetch:
+ case AO__scoped_atomic_and_fetch:
+ case AO__scoped_atomic_or_fetch:
+ case AO__scoped_atomic_xor_fetch:
+ case AO__scoped_atomic_nand_fetch:
+ case AO__scoped_atomic_min_fetch:
+ case AO__scoped_atomic_max_fetch:
+ case AO__scoped_atomic_fetch_min:
+ case AO__scoped_atomic_fetch_max:
+ case AO__scoped_atomic_exchange_n:
+ case AO__hip_atomic_exchange:
+ case AO__hip_atomic_fetch_add:
+ case AO__hip_atomic_fetch_sub:
+ case AO__hip_atomic_fetch_and:
+ case AO__hip_atomic_fetch_or:
+ case AO__hip_atomic_fetch_xor:
+ case AO__hip_atomic_fetch_min:
+ case AO__hip_atomic_fetch_max:
case AO__opencl_atomic_store:
+ case AO__hip_atomic_store:
case AO__opencl_atomic_exchange:
case AO__opencl_atomic_fetch_add:
case AO__opencl_atomic_fetch_sub:
@@ -4739,15 +4966,21 @@ unsigned AtomicExpr::getNumSubExprs(AtomicOp Op) {
case AO__atomic_exchange:
return 4;
+ case AO__scoped_atomic_exchange:
case AO__c11_atomic_compare_exchange_strong:
case AO__c11_atomic_compare_exchange_weak:
return 5;
-
+ case AO__hip_atomic_compare_exchange_strong:
case AO__opencl_atomic_compare_exchange_strong:
case AO__opencl_atomic_compare_exchange_weak:
+ case AO__hip_atomic_compare_exchange_weak:
case AO__atomic_compare_exchange:
case AO__atomic_compare_exchange_n:
return 6;
+
+ case AO__scoped_atomic_compare_exchange:
+ case AO__scoped_atomic_compare_exchange_n:
+ return 7;
}
llvm_unreachable("unknown atomic op");
}
@@ -4779,10 +5012,10 @@ QualType OMPArraySectionExpr::getBaseOriginalType(const Expr *Base) {
for (unsigned Cnt = 0; Cnt < ArraySectionCount; ++Cnt) {
if (OriginalTy->isAnyPointerType())
OriginalTy = OriginalTy->getPointeeType();
- else {
- assert (OriginalTy->isArrayType());
+ else if (OriginalTy->isArrayType())
OriginalTy = OriginalTy->castAsArrayTypeUnsafe()->getElementType();
- }
+ else
+ return {};
}
return OriginalTy;
}
@@ -4794,7 +5027,7 @@ RecoveryExpr::RecoveryExpr(ASTContext &Ctx, QualType T, SourceLocation BeginLoc,
OK_Ordinary),
BeginLoc(BeginLoc), EndLoc(EndLoc), NumExprs(SubExprs.size()) {
assert(!T.isNull());
- assert(llvm::all_of(SubExprs, [](Expr* E) { return E != nullptr; }));
+ assert(!llvm::is_contained(SubExprs, nullptr));
llvm::copy(SubExprs, getTrailingObjects<Expr *>());
setDependence(computeDependence(this));
diff --git a/contrib/llvm-project/clang/lib/AST/ExprCXX.cpp b/contrib/llvm-project/clang/lib/AST/ExprCXX.cpp
index c98cfd74dab0..e61c11dffd88 100644
--- a/contrib/llvm-project/clang/lib/AST/ExprCXX.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ExprCXX.cpp
@@ -38,6 +38,7 @@
#include <cstddef>
#include <cstring>
#include <memory>
+#include <optional>
using namespace clang;
@@ -110,7 +111,7 @@ CXXRewrittenBinaryOperator::getDecomposedForm() const {
return Result;
// Otherwise, we expect a <=> to now be on the LHS.
- E = Result.LHS->IgnoreImplicitAsWritten();
+ E = Result.LHS->IgnoreUnlessSpelledInSource();
if (auto *BO = dyn_cast<BinaryOperator>(E)) {
assert(BO->getOpcode() == BO_Cmp);
Result.LHS = BO->getLHS();
@@ -182,8 +183,8 @@ CXXNewExpr::CXXNewExpr(bool IsGlobalNew, FunctionDecl *OperatorNew,
FunctionDecl *OperatorDelete, bool ShouldPassAlignment,
bool UsualArrayDeleteWantsSize,
ArrayRef<Expr *> PlacementArgs, SourceRange TypeIdParens,
- Optional<Expr *> ArraySize,
- InitializationStyle InitializationStyle,
+ std::optional<Expr *> ArraySize,
+ CXXNewInitializationStyle InitializationStyle,
Expr *Initializer, QualType Ty,
TypeSourceInfo *AllocatedTypeInfo, SourceRange Range,
SourceRange DirectInitRange)
@@ -192,15 +193,17 @@ CXXNewExpr::CXXNewExpr(bool IsGlobalNew, FunctionDecl *OperatorNew,
AllocatedTypeInfo(AllocatedTypeInfo), Range(Range),
DirectInitRange(DirectInitRange) {
- assert((Initializer != nullptr || InitializationStyle == NoInit) &&
- "Only NoInit can have no initializer!");
+ assert((Initializer != nullptr ||
+ InitializationStyle == CXXNewInitializationStyle::None) &&
+ "Only CXXNewInitializationStyle::None can have no initializer!");
CXXNewExprBits.IsGlobalNew = IsGlobalNew;
- CXXNewExprBits.IsArray = ArraySize.hasValue();
+ CXXNewExprBits.IsArray = ArraySize.has_value();
CXXNewExprBits.ShouldPassAlignment = ShouldPassAlignment;
CXXNewExprBits.UsualArrayDeleteWantsSize = UsualArrayDeleteWantsSize;
+ CXXNewExprBits.HasInitializer = Initializer != nullptr;
CXXNewExprBits.StoredInitializationStyle =
- Initializer ? InitializationStyle + 1 : 0;
+ llvm::to_underlying(InitializationStyle);
bool IsParenTypeId = TypeIdParens.isValid();
CXXNewExprBits.IsParenTypeId = IsParenTypeId;
CXXNewExprBits.NumPlacementArgs = PlacementArgs.size();
@@ -216,10 +219,10 @@ CXXNewExpr::CXXNewExpr(bool IsGlobalNew, FunctionDecl *OperatorNew,
getTrailingObjects<SourceRange>()[0] = TypeIdParens;
switch (getInitializationStyle()) {
- case CallInit:
+ case CXXNewInitializationStyle::Parens:
this->Range.setEnd(DirectInitRange.getEnd());
break;
- case ListInit:
+ case CXXNewInitializationStyle::Braces:
this->Range.setEnd(getInitializer()->getSourceRange().getEnd());
break;
default:
@@ -239,16 +242,15 @@ CXXNewExpr::CXXNewExpr(EmptyShell Empty, bool IsArray,
CXXNewExprBits.IsParenTypeId = IsParenTypeId;
}
-CXXNewExpr *
-CXXNewExpr::Create(const ASTContext &Ctx, bool IsGlobalNew,
- FunctionDecl *OperatorNew, FunctionDecl *OperatorDelete,
- bool ShouldPassAlignment, bool UsualArrayDeleteWantsSize,
- ArrayRef<Expr *> PlacementArgs, SourceRange TypeIdParens,
- Optional<Expr *> ArraySize,
- InitializationStyle InitializationStyle, Expr *Initializer,
- QualType Ty, TypeSourceInfo *AllocatedTypeInfo,
- SourceRange Range, SourceRange DirectInitRange) {
- bool IsArray = ArraySize.hasValue();
+CXXNewExpr *CXXNewExpr::Create(
+ const ASTContext &Ctx, bool IsGlobalNew, FunctionDecl *OperatorNew,
+ FunctionDecl *OperatorDelete, bool ShouldPassAlignment,
+ bool UsualArrayDeleteWantsSize, ArrayRef<Expr *> PlacementArgs,
+ SourceRange TypeIdParens, std::optional<Expr *> ArraySize,
+ CXXNewInitializationStyle InitializationStyle, Expr *Initializer,
+ QualType Ty, TypeSourceInfo *AllocatedTypeInfo, SourceRange Range,
+ SourceRange DirectInitRange) {
+ bool IsArray = ArraySize.has_value();
bool HasInit = Initializer != nullptr;
unsigned NumPlacementArgs = PlacementArgs.size();
bool IsParenTypeId = TypeIdParens.isValid();
@@ -275,6 +277,8 @@ CXXNewExpr *CXXNewExpr::CreateEmpty(const ASTContext &Ctx, bool IsArray,
}
bool CXXNewExpr::shouldNullCheckAllocation() const {
+ if (getOperatorNew()->getLangOpts().CheckNew)
+ return true;
return !getOperatorNew()->hasAttr<ReturnsNonNullAttr>() &&
getOperatorNew()
->getType()
@@ -314,7 +318,7 @@ QualType CXXDeleteExpr::getDestroyedType() const {
// CXXPseudoDestructorExpr
PseudoDestructorTypeStorage::PseudoDestructorTypeStorage(TypeSourceInfo *Info)
: Type(Info) {
- Location = Info->getTypeLoc().getLocalSourceRange().getBegin();
+ Location = Info->getTypeLoc().getBeginLoc();
}
CXXPseudoDestructorExpr::CXXPseudoDestructorExpr(
@@ -341,7 +345,7 @@ QualType CXXPseudoDestructorExpr::getDestroyedType() const {
SourceLocation CXXPseudoDestructorExpr::getEndLoc() const {
SourceLocation End = DestroyedType.getLocation();
if (TypeSourceInfo *TInfo = DestroyedType.getTypeSourceInfo())
- End = TInfo->getTypeLoc().getLocalSourceRange().getEnd();
+ End = TInfo->getTypeLoc().getSourceRange().getEnd();
return End;
}
@@ -351,10 +355,10 @@ UnresolvedLookupExpr::UnresolvedLookupExpr(
NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo, bool RequiresADL, bool Overloaded,
const TemplateArgumentListInfo *TemplateArgs, UnresolvedSetIterator Begin,
- UnresolvedSetIterator End)
+ UnresolvedSetIterator End, bool KnownDependent)
: OverloadExpr(UnresolvedLookupExprClass, Context, QualifierLoc,
- TemplateKWLoc, NameInfo, TemplateArgs, Begin, End, false,
- false, false),
+ TemplateKWLoc, NameInfo, TemplateArgs, Begin, End,
+ KnownDependent, false, false),
NamingClass(NamingClass) {
UnresolvedLookupExprBits.RequiresADL = RequiresADL;
UnresolvedLookupExprBits.Overloaded = Overloaded;
@@ -377,7 +381,7 @@ UnresolvedLookupExpr *UnresolvedLookupExpr::Create(
void *Mem = Context.Allocate(Size, alignof(UnresolvedLookupExpr));
return new (Mem) UnresolvedLookupExpr(Context, NamingClass, QualifierLoc,
SourceLocation(), NameInfo, RequiresADL,
- Overloaded, nullptr, Begin, End);
+ Overloaded, nullptr, Begin, End, false);
}
UnresolvedLookupExpr *UnresolvedLookupExpr::Create(
@@ -385,7 +389,7 @@ UnresolvedLookupExpr *UnresolvedLookupExpr::Create(
NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo, bool RequiresADL,
const TemplateArgumentListInfo *Args, UnresolvedSetIterator Begin,
- UnresolvedSetIterator End) {
+ UnresolvedSetIterator End, bool KnownDependent) {
assert(Args || TemplateKWLoc.isValid());
unsigned NumResults = End - Begin;
unsigned NumTemplateArgs = Args ? Args->size() : 0;
@@ -393,9 +397,9 @@ UnresolvedLookupExpr *UnresolvedLookupExpr::Create(
totalSizeToAlloc<DeclAccessPair, ASTTemplateKWAndArgsInfo,
TemplateArgumentLoc>(NumResults, 1, NumTemplateArgs);
void *Mem = Context.Allocate(Size, alignof(UnresolvedLookupExpr));
- return new (Mem) UnresolvedLookupExpr(Context, NamingClass, QualifierLoc,
- TemplateKWLoc, NameInfo, RequiresADL,
- /*Overloaded*/ true, Args, Begin, End);
+ return new (Mem) UnresolvedLookupExpr(
+ Context, NamingClass, QualifierLoc, TemplateKWLoc, NameInfo, RequiresADL,
+ /*Overloaded=*/true, Args, Begin, End, KnownDependent);
}
UnresolvedLookupExpr *UnresolvedLookupExpr::CreateEmpty(
@@ -764,29 +768,35 @@ CXXDynamicCastExpr *CXXDynamicCastExpr::CreateEmpty(const ASTContext &C,
/// struct C { };
///
/// C *f(B* b) { return dynamic_cast<C*>(b); }
-bool CXXDynamicCastExpr::isAlwaysNull() const
-{
+bool CXXDynamicCastExpr::isAlwaysNull() const {
+ if (isValueDependent() || getCastKind() != CK_Dynamic)
+ return false;
+
QualType SrcType = getSubExpr()->getType();
QualType DestType = getType();
- if (const auto *SrcPTy = SrcType->getAs<PointerType>()) {
- SrcType = SrcPTy->getPointeeType();
- DestType = DestType->castAs<PointerType>()->getPointeeType();
- }
-
- if (DestType->isVoidType())
+ if (DestType->isVoidPointerType())
return false;
- const auto *SrcRD =
- cast<CXXRecordDecl>(SrcType->castAs<RecordType>()->getDecl());
+ if (DestType->isPointerType()) {
+ SrcType = SrcType->getPointeeType();
+ DestType = DestType->getPointeeType();
+ }
- if (!SrcRD->hasAttr<FinalAttr>())
- return false;
+ const auto *SrcRD = SrcType->getAsCXXRecordDecl();
+ const auto *DestRD = DestType->getAsCXXRecordDecl();
+ assert(SrcRD && DestRD);
- const auto *DestRD =
- cast<CXXRecordDecl>(DestType->castAs<RecordType>()->getDecl());
+ if (SrcRD->isEffectivelyFinal()) {
+ assert(!SrcRD->isDerivedFrom(DestRD) &&
+ "upcasts should not use CK_Dynamic");
+ return true;
+ }
- return !DestRD->isDerivedFrom(SrcRD);
+ if (DestRD->isEffectivelyFinal() && !DestRD->isDerivedFrom(SrcRD))
+ return true;
+
+ return false;
}
CXXReinterpretCastExpr *
@@ -949,9 +959,43 @@ const IdentifierInfo *UserDefinedLiteral::getUDSuffix() const {
return cast<FunctionDecl>(getCalleeDecl())->getLiteralIdentifier();
}
+CXXDefaultArgExpr *CXXDefaultArgExpr::CreateEmpty(const ASTContext &C,
+ bool HasRewrittenInit) {
+ size_t Size = totalSizeToAlloc<Expr *>(HasRewrittenInit);
+ auto *Mem = C.Allocate(Size, alignof(CXXDefaultArgExpr));
+ return new (Mem) CXXDefaultArgExpr(EmptyShell(), HasRewrittenInit);
+}
+
+CXXDefaultArgExpr *CXXDefaultArgExpr::Create(const ASTContext &C,
+ SourceLocation Loc,
+ ParmVarDecl *Param,
+ Expr *RewrittenExpr,
+ DeclContext *UsedContext) {
+ size_t Size = totalSizeToAlloc<Expr *>(RewrittenExpr != nullptr);
+ auto *Mem = C.Allocate(Size, alignof(CXXDefaultArgExpr));
+ return new (Mem) CXXDefaultArgExpr(CXXDefaultArgExprClass, Loc, Param,
+ RewrittenExpr, UsedContext);
+}
+
+Expr *CXXDefaultArgExpr::getExpr() {
+ return CXXDefaultArgExprBits.HasRewrittenInit ? getAdjustedRewrittenExpr()
+ : getParam()->getDefaultArg();
+}
+
+Expr *CXXDefaultArgExpr::getAdjustedRewrittenExpr() {
+ assert(hasRewrittenInit() &&
+ "expected this CXXDefaultArgExpr to have a rewritten init.");
+ Expr *Init = getRewrittenExpr();
+ if (auto *E = dyn_cast_if_present<FullExpr>(Init))
+ if (!isa<ConstantExpr>(E))
+ return E->getSubExpr();
+ return Init;
+}
+
CXXDefaultInitExpr::CXXDefaultInitExpr(const ASTContext &Ctx,
SourceLocation Loc, FieldDecl *Field,
- QualType Ty, DeclContext *UsedContext)
+ QualType Ty, DeclContext *UsedContext,
+ Expr *RewrittenInitExpr)
: Expr(CXXDefaultInitExprClass, Ty.getNonLValueExprType(Ctx),
Ty->isLValueReferenceType() ? VK_LValue
: Ty->isRValueReferenceType() ? VK_XValue
@@ -959,11 +1003,43 @@ CXXDefaultInitExpr::CXXDefaultInitExpr(const ASTContext &Ctx,
/*FIXME*/ OK_Ordinary),
Field(Field), UsedContext(UsedContext) {
CXXDefaultInitExprBits.Loc = Loc;
+ CXXDefaultInitExprBits.HasRewrittenInit = RewrittenInitExpr != nullptr;
+
+ if (CXXDefaultInitExprBits.HasRewrittenInit)
+ *getTrailingObjects<Expr *>() = RewrittenInitExpr;
+
assert(Field->hasInClassInitializer());
setDependence(computeDependence(this));
}
+CXXDefaultInitExpr *CXXDefaultInitExpr::CreateEmpty(const ASTContext &C,
+ bool HasRewrittenInit) {
+ size_t Size = totalSizeToAlloc<Expr *>(HasRewrittenInit);
+ auto *Mem = C.Allocate(Size, alignof(CXXDefaultInitExpr));
+ return new (Mem) CXXDefaultInitExpr(EmptyShell(), HasRewrittenInit);
+}
+
+CXXDefaultInitExpr *CXXDefaultInitExpr::Create(const ASTContext &Ctx,
+ SourceLocation Loc,
+ FieldDecl *Field,
+ DeclContext *UsedContext,
+ Expr *RewrittenInitExpr) {
+
+ size_t Size = totalSizeToAlloc<Expr *>(RewrittenInitExpr != nullptr);
+ auto *Mem = Ctx.Allocate(Size, alignof(CXXDefaultInitExpr));
+ return new (Mem) CXXDefaultInitExpr(Ctx, Loc, Field, Field->getType(),
+ UsedContext, RewrittenInitExpr);
+}
+
+Expr *CXXDefaultInitExpr::getExpr() {
+ assert(Field->getInClassInitializer() && "initializer hasn't been parsed");
+ if (hasRewrittenInit())
+ return getRewrittenExpr();
+
+ return Field->getInClassInitializer();
+}
+
CXXTemporary *CXXTemporary::Create(const ASTContext &C,
const CXXDestructorDecl *Destructor) {
return new (C) CXXTemporary(Destructor);
@@ -988,8 +1064,10 @@ CXXTemporaryObjectExpr::CXXTemporaryObjectExpr(
CXXTemporaryObjectExprClass, Ty, TSI->getTypeLoc().getBeginLoc(),
Cons, /* Elidable=*/false, Args, HadMultipleCandidates,
ListInitialization, StdInitListInitialization, ZeroInitialization,
- CXXConstructExpr::CK_Complete, ParenOrBraceRange),
- TSI(TSI) {}
+ CXXConstructionKind::Complete, ParenOrBraceRange),
+ TSI(TSI) {
+ setDependence(computeDependence(this));
+}
CXXTemporaryObjectExpr::CXXTemporaryObjectExpr(EmptyShell Empty,
unsigned NumArgs)
@@ -1034,7 +1112,7 @@ CXXConstructExpr *CXXConstructExpr::Create(
CXXConstructorDecl *Ctor, bool Elidable, ArrayRef<Expr *> Args,
bool HadMultipleCandidates, bool ListInitialization,
bool StdInitListInitialization, bool ZeroInitialization,
- ConstructionKind ConstructKind, SourceRange ParenOrBraceRange) {
+ CXXConstructionKind ConstructKind, SourceRange ParenOrBraceRange) {
unsigned SizeOfTrailingObjects = sizeOfTrailingObjects(Args.size());
void *Mem = Ctx.Allocate(sizeof(CXXConstructExpr) + SizeOfTrailingObjects,
alignof(CXXConstructExpr));
@@ -1057,7 +1135,7 @@ CXXConstructExpr::CXXConstructExpr(
StmtClass SC, QualType Ty, SourceLocation Loc, CXXConstructorDecl *Ctor,
bool Elidable, ArrayRef<Expr *> Args, bool HadMultipleCandidates,
bool ListInitialization, bool StdInitListInitialization,
- bool ZeroInitialization, ConstructionKind ConstructKind,
+ bool ZeroInitialization, CXXConstructionKind ConstructKind,
SourceRange ParenOrBraceRange)
: Expr(SC, Ty, VK_PRValue, OK_Ordinary), Constructor(Ctor),
ParenOrBraceRange(ParenOrBraceRange), NumArgs(Args.size()) {
@@ -1066,7 +1144,8 @@ CXXConstructExpr::CXXConstructExpr(
CXXConstructExprBits.ListInitialization = ListInitialization;
CXXConstructExprBits.StdInitListInitialization = StdInitListInitialization;
CXXConstructExprBits.ZeroInitialization = ZeroInitialization;
- CXXConstructExprBits.ConstructionKind = ConstructKind;
+ CXXConstructExprBits.ConstructionKind = llvm::to_underlying(ConstructKind);
+ CXXConstructExprBits.IsImmediateEscalating = false;
CXXConstructExprBits.Loc = Loc;
Stmt **TrailingArgs = getTrailingArgs();
@@ -1075,7 +1154,9 @@ CXXConstructExpr::CXXConstructExpr(
TrailingArgs[I] = Args[I];
}
- setDependence(computeDependence(this));
+ // CXXTemporaryObjectExpr does this itself after setting its TypeSourceInfo.
+ if (SC == CXXConstructExprClass)
+ setDependence(computeDependence(this));
}
CXXConstructExpr::CXXConstructExpr(StmtClass SC, EmptyShell Empty,
@@ -1083,7 +1164,7 @@ CXXConstructExpr::CXXConstructExpr(StmtClass SC, EmptyShell Empty,
: Expr(SC, Empty), NumArgs(NumArgs) {}
LambdaCapture::LambdaCapture(SourceLocation Loc, bool Implicit,
- LambdaCaptureKind Kind, VarDecl *Var,
+ LambdaCaptureKind Kind, ValueDecl *Var,
SourceLocation EllipsisLoc)
: DeclAndBits(Var, 0), Loc(Loc), EllipsisLoc(EllipsisLoc) {
unsigned Bits = 0;
@@ -1093,7 +1174,7 @@ LambdaCapture::LambdaCapture(SourceLocation Loc, bool Implicit,
switch (Kind) {
case LCK_StarThis:
Bits |= Capture_ByCopy;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case LCK_This:
assert(!Var && "'this' capture cannot have a variable!");
Bits |= Capture_This;
@@ -1101,7 +1182,7 @@ LambdaCapture::LambdaCapture(SourceLocation Loc, bool Implicit,
case LCK_ByCopy:
Bits |= Capture_ByCopy;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case LCK_ByRef:
assert(Var && "capture must have a variable!");
break;
@@ -1207,16 +1288,16 @@ const CompoundStmt *LambdaExpr::getCompoundStmtBody() const {
}
bool LambdaExpr::isInitCapture(const LambdaCapture *C) const {
- return (C->capturesVariable() && C->getCapturedVar()->isInitCapture() &&
- (getCallOperator() == C->getCapturedVar()->getDeclContext()));
+ return C->capturesVariable() && C->getCapturedVar()->isInitCapture() &&
+ getCallOperator() == C->getCapturedVar()->getDeclContext();
}
LambdaExpr::capture_iterator LambdaExpr::capture_begin() const {
- return getLambdaClass()->getLambdaData().Captures;
+ return getLambdaClass()->captures_begin();
}
LambdaExpr::capture_iterator LambdaExpr::capture_end() const {
- return capture_begin() + capture_size();
+ return getLambdaClass()->captures_end();
}
LambdaExpr::capture_range LambdaExpr::captures() const {
@@ -1228,9 +1309,8 @@ LambdaExpr::capture_iterator LambdaExpr::explicit_capture_begin() const {
}
LambdaExpr::capture_iterator LambdaExpr::explicit_capture_end() const {
- struct CXXRecordDecl::LambdaDefinitionData &Data
- = getLambdaClass()->getLambdaData();
- return Data.Captures + Data.NumExplicitCaptures;
+ return capture_begin() +
+ getLambdaClass()->getLambdaData().NumExplicitCaptures;
}
LambdaExpr::capture_range LambdaExpr::explicit_captures() const {
@@ -1322,17 +1402,16 @@ ExprWithCleanups *ExprWithCleanups::Create(const ASTContext &C,
return new (buffer) ExprWithCleanups(empty, numObjects);
}
-CXXUnresolvedConstructExpr::CXXUnresolvedConstructExpr(QualType T,
- TypeSourceInfo *TSI,
- SourceLocation LParenLoc,
- ArrayRef<Expr *> Args,
- SourceLocation RParenLoc)
+CXXUnresolvedConstructExpr::CXXUnresolvedConstructExpr(
+ QualType T, TypeSourceInfo *TSI, SourceLocation LParenLoc,
+ ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool IsListInit)
: Expr(CXXUnresolvedConstructExprClass, T,
(TSI->getType()->isLValueReferenceType() ? VK_LValue
: TSI->getType()->isRValueReferenceType() ? VK_XValue
: VK_PRValue),
OK_Ordinary),
- TSI(TSI), LParenLoc(LParenLoc), RParenLoc(RParenLoc) {
+ TypeAndInitForm(TSI, IsListInit), LParenLoc(LParenLoc),
+ RParenLoc(RParenLoc) {
CXXUnresolvedConstructExprBits.NumArgs = Args.size();
auto **StoredArgs = getTrailingObjects<Expr *>();
for (unsigned I = 0; I != Args.size(); ++I)
@@ -1341,11 +1420,12 @@ CXXUnresolvedConstructExpr::CXXUnresolvedConstructExpr(QualType T,
}
CXXUnresolvedConstructExpr *CXXUnresolvedConstructExpr::Create(
- const ASTContext &Context, QualType T, TypeSourceInfo *TSI, SourceLocation LParenLoc,
- ArrayRef<Expr *> Args, SourceLocation RParenLoc) {
+ const ASTContext &Context, QualType T, TypeSourceInfo *TSI,
+ SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc,
+ bool IsListInit) {
void *Mem = Context.Allocate(totalSizeToAlloc<Expr *>(Args.size()));
- return new (Mem)
- CXXUnresolvedConstructExpr(T, TSI, LParenLoc, Args, RParenLoc);
+ return new (Mem) CXXUnresolvedConstructExpr(T, TSI, LParenLoc, Args,
+ RParenLoc, IsListInit);
}
CXXUnresolvedConstructExpr *
@@ -1356,7 +1436,7 @@ CXXUnresolvedConstructExpr::CreateEmpty(const ASTContext &Context,
}
SourceLocation CXXUnresolvedConstructExpr::getBeginLoc() const {
- return TSI->getTypeLoc().getBeginLoc();
+ return TypeAndInitForm.getPointer()->getTypeLoc().getBeginLoc();
}
CXXDependentScopeMemberExpr::CXXDependentScopeMemberExpr(
@@ -1436,6 +1516,16 @@ CXXDependentScopeMemberExpr *CXXDependentScopeMemberExpr::CreateEmpty(
EmptyShell(), HasTemplateKWAndArgsInfo, HasFirstQualifierFoundInScope);
}
+CXXThisExpr *CXXThisExpr::Create(const ASTContext &Ctx, SourceLocation L,
+ QualType Ty, bool IsImplicit) {
+ return new (Ctx) CXXThisExpr(L, Ty, IsImplicit,
+ Ctx.getLangOpts().HLSL ? VK_LValue : VK_PRValue);
+}
+
+CXXThisExpr *CXXThisExpr::CreateEmpty(const ASTContext &Ctx) {
+ return new (Ctx) CXXThisExpr(EmptyShell());
+}
+
static bool hasOnlyNonStaticMemberFunctions(UnresolvedSetIterator begin,
UnresolvedSetIterator end) {
do {
@@ -1551,12 +1641,12 @@ CXXRecordDecl *UnresolvedMemberExpr::getNamingClass() {
return Record;
}
-SizeOfPackExpr *
-SizeOfPackExpr::Create(ASTContext &Context, SourceLocation OperatorLoc,
- NamedDecl *Pack, SourceLocation PackLoc,
- SourceLocation RParenLoc,
- Optional<unsigned> Length,
- ArrayRef<TemplateArgument> PartialArgs) {
+SizeOfPackExpr *SizeOfPackExpr::Create(ASTContext &Context,
+ SourceLocation OperatorLoc,
+ NamedDecl *Pack, SourceLocation PackLoc,
+ SourceLocation RParenLoc,
+ std::optional<unsigned> Length,
+ ArrayRef<TemplateArgument> PartialArgs) {
void *Storage =
Context.Allocate(totalSizeToAlloc<TemplateArgument>(PartialArgs.size()));
return new (Storage) SizeOfPackExpr(Context.getSizeType(), OperatorLoc, Pack,
@@ -1570,6 +1660,11 @@ SizeOfPackExpr *SizeOfPackExpr::CreateDeserialized(ASTContext &Context,
return new (Storage) SizeOfPackExpr(EmptyShell(), NumPartialArgs);
}
+NonTypeTemplateParmDecl *SubstNonTypeTemplateParmExpr::getParameter() const {
+ return cast<NonTypeTemplateParmDecl>(
+ getReplacedTemplateParameterList(getAssociatedDecl())->asArray()[Index]);
+}
+
QualType SubstNonTypeTemplateParmExpr::getParameterType(
const ASTContext &Context) const {
// Note that, for a class type NTTP, we will have an lvalue of type 'const
@@ -1580,17 +1675,24 @@ QualType SubstNonTypeTemplateParmExpr::getParameterType(
}
SubstNonTypeTemplateParmPackExpr::SubstNonTypeTemplateParmPackExpr(
- QualType T, ExprValueKind ValueKind, NonTypeTemplateParmDecl *Param,
- SourceLocation NameLoc, const TemplateArgument &ArgPack)
+ QualType T, ExprValueKind ValueKind, SourceLocation NameLoc,
+ const TemplateArgument &ArgPack, Decl *AssociatedDecl, unsigned Index)
: Expr(SubstNonTypeTemplateParmPackExprClass, T, ValueKind, OK_Ordinary),
- Param(Param), Arguments(ArgPack.pack_begin()),
- NumArguments(ArgPack.pack_size()), NameLoc(NameLoc) {
+ AssociatedDecl(AssociatedDecl), Arguments(ArgPack.pack_begin()),
+ NumArguments(ArgPack.pack_size()), Index(Index), NameLoc(NameLoc) {
+ assert(AssociatedDecl != nullptr);
setDependence(ExprDependence::TypeValueInstantiation |
ExprDependence::UnexpandedPack);
}
+NonTypeTemplateParmDecl *
+SubstNonTypeTemplateParmPackExpr::getParameterPack() const {
+ return cast<NonTypeTemplateParmDecl>(
+ getReplacedTemplateParameterList(getAssociatedDecl())->asArray()[Index]);
+}
+
TemplateArgument SubstNonTypeTemplateParmPackExpr::getArgumentPack() const {
- return TemplateArgument(llvm::makeArrayRef(Arguments, NumArguments));
+ return TemplateArgument(llvm::ArrayRef(Arguments, NumArguments));
}
FunctionParmPackExpr::FunctionParmPackExpr(QualType T, VarDecl *ParamPack,
@@ -1742,3 +1844,21 @@ CUDAKernelCallExpr *CUDAKernelCallExpr::CreateEmpty(const ASTContext &Ctx,
alignof(CUDAKernelCallExpr));
return new (Mem) CUDAKernelCallExpr(NumArgs, HasFPFeatures, Empty);
}
+
+CXXParenListInitExpr *
+CXXParenListInitExpr::Create(ASTContext &C, ArrayRef<Expr *> Args, QualType T,
+ unsigned NumUserSpecifiedExprs,
+ SourceLocation InitLoc, SourceLocation LParenLoc,
+ SourceLocation RParenLoc) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(Args.size()));
+ return new (Mem) CXXParenListInitExpr(Args, T, NumUserSpecifiedExprs, InitLoc,
+ LParenLoc, RParenLoc);
+}
+
+CXXParenListInitExpr *CXXParenListInitExpr::CreateEmpty(ASTContext &C,
+ unsigned NumExprs,
+ EmptyShell Empty) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(NumExprs),
+ alignof(CXXParenListInitExpr));
+ return new (Mem) CXXParenListInitExpr(Empty, NumExprs);
+}
diff --git a/contrib/llvm-project/clang/lib/AST/ExprClassification.cpp b/contrib/llvm-project/clang/lib/AST/ExprClassification.cpp
index 6998e28fd2ea..ffa7c6802ea6 100644
--- a/contrib/llvm-project/clang/lib/AST/ExprClassification.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ExprClassification.cpp
@@ -160,7 +160,6 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
case Expr::CXXPseudoDestructorExprClass:
case Expr::UnaryExprOrTypeTraitExprClass:
case Expr::CXXNewExprClass:
- case Expr::CXXThisExprClass:
case Expr::CXXNullPtrLiteralExprClass:
case Expr::ImaginaryLiteralClass:
case Expr::GNUNullExprClass:
@@ -205,6 +204,10 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
case Expr::RequiresExprClass:
return Cl::CL_PRValue;
+ // Make HLSL this reference-like
+ case Expr::CXXThisExprClass:
+ return Lang.HLSL ? Cl::CL_LValue : Cl::CL_PRValue;
+
case Expr::ConstantExprClass:
return ClassifyInternal(Ctx, cast<ConstantExpr>(E)->getSubExpr());
@@ -442,6 +445,11 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
case Expr::SYCLUniqueStableNameExprClass:
return Cl::CL_PRValue;
break;
+
+ case Expr::CXXParenListInitExprClass:
+ if (isa<ArrayType>(E->getType()))
+ return Cl::CL_ArrayTemporary;
+ return Cl::CL_ClassTemporary;
}
llvm_unreachable("unhandled expression kind in classification");
@@ -457,22 +465,24 @@ static Cl::Kinds ClassifyDecl(ASTContext &Ctx, const Decl *D) {
// lvalue unless it's a reference type (C++ [temp.param]p6), so we need to
// special-case this.
- if (isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance())
- return Cl::CL_MemberFunction;
+ if (const auto *M = dyn_cast<CXXMethodDecl>(D)) {
+ if (M->isImplicitObjectMemberFunction())
+ return Cl::CL_MemberFunction;
+ if (M->isStatic())
+ return Cl::CL_LValue;
+ return Cl::CL_PRValue;
+ }
bool islvalue;
if (const auto *NTTParm = dyn_cast<NonTypeTemplateParmDecl>(D))
islvalue = NTTParm->getType()->isReferenceType() ||
NTTParm->getType()->isRecordType();
else
- islvalue = isa<VarDecl>(D) || isa<FieldDecl>(D) ||
- isa<IndirectFieldDecl>(D) ||
- isa<BindingDecl>(D) ||
- isa<MSGuidDecl>(D) ||
- isa<TemplateParamObjectDecl>(D) ||
- (Ctx.getLangOpts().CPlusPlus &&
- (isa<FunctionDecl>(D) || isa<MSPropertyDecl>(D) ||
- isa<FunctionTemplateDecl>(D)));
+ islvalue =
+ isa<VarDecl, FieldDecl, IndirectFieldDecl, BindingDecl, MSGuidDecl,
+ UnnamedGlobalConstantDecl, TemplateParamObjectDecl>(D) ||
+ (Ctx.getLangOpts().CPlusPlus &&
+ (isa<FunctionDecl, MSPropertyDecl, FunctionTemplateDecl>(D)));
return islvalue ? Cl::CL_LValue : Cl::CL_PRValue;
}
@@ -546,8 +556,13 @@ static Cl::Kinds ClassifyMemberExpr(ASTContext &Ctx, const MemberExpr *E) {
// -- If it refers to a static member function [...], then E1.E2 is an
// lvalue; [...]
// -- Otherwise [...] E1.E2 is a prvalue.
- if (const auto *Method = dyn_cast<CXXMethodDecl>(Member))
- return Method->isStatic() ? Cl::CL_LValue : Cl::CL_MemberFunction;
+ if (const auto *Method = dyn_cast<CXXMethodDecl>(Member)) {
+ if (Method->isStatic())
+ return Cl::CL_LValue;
+ if (Method->isImplicitObjectMemberFunction())
+ return Cl::CL_MemberFunction;
+ return Cl::CL_PRValue;
+ }
// -- If E2 is a member enumerator [...], the expression E1.E2 is a prvalue.
// So is everything else we haven't handled yet.
diff --git a/contrib/llvm-project/clang/lib/AST/ExprConcepts.cpp b/contrib/llvm-project/clang/lib/AST/ExprConcepts.cpp
index 8cb8625e2a1a..0704630c0fc2 100644
--- a/contrib/llvm-project/clang/lib/AST/ExprConcepts.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ExprConcepts.cpp
@@ -31,75 +31,48 @@
using namespace clang;
ConceptSpecializationExpr::ConceptSpecializationExpr(
- const ASTContext &C, NestedNameSpecifierLoc NNS,
- SourceLocation TemplateKWLoc, DeclarationNameInfo ConceptNameInfo,
- NamedDecl *FoundDecl, ConceptDecl *NamedConcept,
- const ASTTemplateArgumentListInfo *ArgsAsWritten,
- ArrayRef<TemplateArgument> ConvertedArgs,
+ const ASTContext &C, ConceptReference *Loc,
+ ImplicitConceptSpecializationDecl *SpecDecl,
const ConstraintSatisfaction *Satisfaction)
: Expr(ConceptSpecializationExprClass, C.BoolTy, VK_PRValue, OK_Ordinary),
- ConceptReference(NNS, TemplateKWLoc, ConceptNameInfo, FoundDecl,
- NamedConcept, ArgsAsWritten),
- NumTemplateArgs(ConvertedArgs.size()),
+ ConceptRef(Loc), SpecDecl(SpecDecl),
Satisfaction(Satisfaction
? ASTConstraintSatisfaction::Create(C, *Satisfaction)
: nullptr) {
- setTemplateArguments(ConvertedArgs);
setDependence(computeDependence(this, /*ValueDependent=*/!Satisfaction));
// Currently guaranteed by the fact concepts can only be at namespace-scope.
- assert(!NestedNameSpec ||
- (!NestedNameSpec.getNestedNameSpecifier()->isInstantiationDependent() &&
- !NestedNameSpec.getNestedNameSpecifier()
- ->containsUnexpandedParameterPack()));
+ assert(!Loc->getNestedNameSpecifierLoc() ||
+ (!Loc->getNestedNameSpecifierLoc()
+ .getNestedNameSpecifier()
+ ->isInstantiationDependent() &&
+ !Loc->getNestedNameSpecifierLoc()
+ .getNestedNameSpecifier()
+ ->containsUnexpandedParameterPack()));
assert((!isValueDependent() || isInstantiationDependent()) &&
"should not be value-dependent");
}
-ConceptSpecializationExpr::ConceptSpecializationExpr(EmptyShell Empty,
- unsigned NumTemplateArgs)
- : Expr(ConceptSpecializationExprClass, Empty), ConceptReference(),
- NumTemplateArgs(NumTemplateArgs) { }
-
-void ConceptSpecializationExpr::setTemplateArguments(
- ArrayRef<TemplateArgument> Converted) {
- assert(Converted.size() == NumTemplateArgs);
- std::uninitialized_copy(Converted.begin(), Converted.end(),
- getTrailingObjects<TemplateArgument>());
-}
+ConceptSpecializationExpr::ConceptSpecializationExpr(EmptyShell Empty)
+ : Expr(ConceptSpecializationExprClass, Empty) {}
ConceptSpecializationExpr *
-ConceptSpecializationExpr::Create(const ASTContext &C,
- NestedNameSpecifierLoc NNS,
- SourceLocation TemplateKWLoc,
- DeclarationNameInfo ConceptNameInfo,
- NamedDecl *FoundDecl,
- ConceptDecl *NamedConcept,
- const ASTTemplateArgumentListInfo *ArgsAsWritten,
- ArrayRef<TemplateArgument> ConvertedArgs,
+ConceptSpecializationExpr::Create(const ASTContext &C, ConceptReference *Loc,
+ ImplicitConceptSpecializationDecl *SpecDecl,
const ConstraintSatisfaction *Satisfaction) {
- void *Buffer = C.Allocate(totalSizeToAlloc<TemplateArgument>(
- ConvertedArgs.size()));
- return new (Buffer) ConceptSpecializationExpr(C, NNS, TemplateKWLoc,
- ConceptNameInfo, FoundDecl,
- NamedConcept, ArgsAsWritten,
- ConvertedArgs, Satisfaction);
+ return new (C) ConceptSpecializationExpr(C, Loc, SpecDecl, Satisfaction);
}
ConceptSpecializationExpr::ConceptSpecializationExpr(
- const ASTContext &C, ConceptDecl *NamedConcept,
- ArrayRef<TemplateArgument> ConvertedArgs,
+ const ASTContext &C, ConceptReference *Loc,
+ ImplicitConceptSpecializationDecl *SpecDecl,
const ConstraintSatisfaction *Satisfaction, bool Dependent,
bool ContainsUnexpandedParameterPack)
: Expr(ConceptSpecializationExprClass, C.BoolTy, VK_PRValue, OK_Ordinary),
- ConceptReference(NestedNameSpecifierLoc(), SourceLocation(),
- DeclarationNameInfo(), NamedConcept, NamedConcept,
- nullptr),
- NumTemplateArgs(ConvertedArgs.size()),
+ ConceptRef(Loc), SpecDecl(SpecDecl),
Satisfaction(Satisfaction
? ASTConstraintSatisfaction::Create(C, *Satisfaction)
: nullptr) {
- setTemplateArguments(ConvertedArgs);
ExprDependence D = ExprDependence::None;
if (!Satisfaction)
D |= ExprDependence::Value;
@@ -111,25 +84,14 @@ ConceptSpecializationExpr::ConceptSpecializationExpr(
}
ConceptSpecializationExpr *
-ConceptSpecializationExpr::Create(const ASTContext &C,
- ConceptDecl *NamedConcept,
- ArrayRef<TemplateArgument> ConvertedArgs,
+ConceptSpecializationExpr::Create(const ASTContext &C, ConceptReference *Loc,
+ ImplicitConceptSpecializationDecl *SpecDecl,
const ConstraintSatisfaction *Satisfaction,
bool Dependent,
bool ContainsUnexpandedParameterPack) {
- void *Buffer = C.Allocate(totalSizeToAlloc<TemplateArgument>(
- ConvertedArgs.size()));
- return new (Buffer) ConceptSpecializationExpr(
- C, NamedConcept, ConvertedArgs, Satisfaction, Dependent,
- ContainsUnexpandedParameterPack);
-}
-
-ConceptSpecializationExpr *
-ConceptSpecializationExpr::Create(ASTContext &C, EmptyShell Empty,
- unsigned NumTemplateArgs) {
- void *Buffer = C.Allocate(totalSizeToAlloc<TemplateArgument>(
- NumTemplateArgs));
- return new (Buffer) ConceptSpecializationExpr(Empty, NumTemplateArgs);
+ return new (C)
+ ConceptSpecializationExpr(C, Loc, SpecDecl, Satisfaction, Dependent,
+ ContainsUnexpandedParameterPack);
}
const TypeConstraint *
@@ -141,14 +103,29 @@ concepts::ExprRequirement::ReturnTypeRequirement::getTypeConstraint() const {
->getTypeConstraint();
}
+// Search through the requirements, and see if any have a RecoveryExpr in it,
+// which means this RequiresExpr ALSO needs to be invalid.
+static bool RequirementContainsError(concepts::Requirement *R) {
+ if (auto *ExprReq = dyn_cast<concepts::ExprRequirement>(R))
+ return ExprReq->getExpr() && ExprReq->getExpr()->containsErrors();
+
+ if (auto *NestedReq = dyn_cast<concepts::NestedRequirement>(R))
+ return !NestedReq->hasInvalidConstraint() &&
+ NestedReq->getConstraintExpr() &&
+ NestedReq->getConstraintExpr()->containsErrors();
+ return false;
+}
+
RequiresExpr::RequiresExpr(ASTContext &C, SourceLocation RequiresKWLoc,
- RequiresExprBodyDecl *Body,
+ RequiresExprBodyDecl *Body, SourceLocation LParenLoc,
ArrayRef<ParmVarDecl *> LocalParameters,
+ SourceLocation RParenLoc,
ArrayRef<concepts::Requirement *> Requirements,
SourceLocation RBraceLoc)
: Expr(RequiresExprClass, C.BoolTy, VK_PRValue, OK_Ordinary),
NumLocalParameters(LocalParameters.size()),
- NumRequirements(Requirements.size()), Body(Body), RBraceLoc(RBraceLoc) {
+ NumRequirements(Requirements.size()), Body(Body), LParenLoc(LParenLoc),
+ RParenLoc(RParenLoc), RBraceLoc(RBraceLoc) {
RequiresExprBits.IsSatisfied = false;
RequiresExprBits.RequiresKWLoc = RequiresKWLoc;
bool Dependent = false;
@@ -167,6 +144,9 @@ RequiresExpr::RequiresExpr(ASTContext &C, SourceLocation RequiresKWLoc,
if (!RequiresExprBits.IsSatisfied)
break;
}
+
+ if (RequirementContainsError(R))
+ setDependence(getDependence() | ExprDependence::Error);
}
std::copy(LocalParameters.begin(), LocalParameters.end(),
getTrailingObjects<ParmVarDecl *>());
@@ -190,18 +170,18 @@ RequiresExpr::RequiresExpr(ASTContext &C, EmptyShell Empty,
: Expr(RequiresExprClass, Empty), NumLocalParameters(NumLocalParameters),
NumRequirements(NumRequirements) { }
-RequiresExpr *
-RequiresExpr::Create(ASTContext &C, SourceLocation RequiresKWLoc,
- RequiresExprBodyDecl *Body,
- ArrayRef<ParmVarDecl *> LocalParameters,
- ArrayRef<concepts::Requirement *> Requirements,
- SourceLocation RBraceLoc) {
+RequiresExpr *RequiresExpr::Create(
+ ASTContext &C, SourceLocation RequiresKWLoc, RequiresExprBodyDecl *Body,
+ SourceLocation LParenLoc, ArrayRef<ParmVarDecl *> LocalParameters,
+ SourceLocation RParenLoc, ArrayRef<concepts::Requirement *> Requirements,
+ SourceLocation RBraceLoc) {
void *Mem =
C.Allocate(totalSizeToAlloc<ParmVarDecl *, concepts::Requirement *>(
LocalParameters.size(), Requirements.size()),
alignof(RequiresExpr));
- return new (Mem) RequiresExpr(C, RequiresKWLoc, Body, LocalParameters,
- Requirements, RBraceLoc);
+ return new (Mem)
+ RequiresExpr(C, RequiresKWLoc, Body, LParenLoc, LocalParameters,
+ RParenLoc, Requirements, RBraceLoc);
}
RequiresExpr *
diff --git a/contrib/llvm-project/clang/lib/AST/ExprConstShared.h b/contrib/llvm-project/clang/lib/AST/ExprConstShared.h
new file mode 100644
index 000000000000..a97eac85abc6
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/AST/ExprConstShared.h
@@ -0,0 +1,59 @@
+//===--- ExprConstShared.h - Shared consetxpr functionality ----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Shared functionality between the new constant expression
+// interpreter (AST/Interp/) and the current one (ExprConstant.cpp).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_AST_EXPRCONSTSHARED_H
+#define LLVM_CLANG_LIB_AST_EXPRCONSTSHARED_H
+
+namespace clang {
+class QualType;
+class LangOptions;
+} // namespace clang
+using namespace clang;
+/// Values returned by __builtin_classify_type, chosen to match the values
+/// produced by GCC's builtin.
+enum class GCCTypeClass {
+ None = -1,
+ Void = 0,
+ Integer = 1,
+ // GCC reserves 2 for character types, but instead classifies them as
+ // integers.
+ Enum = 3,
+ Bool = 4,
+ Pointer = 5,
+ // GCC reserves 6 for references, but appears to never use it (because
+ // expressions never have reference type, presumably).
+ PointerToDataMember = 7,
+ RealFloat = 8,
+ Complex = 9,
+ // GCC reserves 10 for functions, but does not use it since GCC version 6 due
+ // to decay to pointer. (Prior to version 6 it was only used in C++ mode).
+ // GCC claims to reserve 11 for pointers to member functions, but *actually*
+ // uses 12 for that purpose, same as for a class or struct. Maybe it
+ // internally implements a pointer to member as a struct? Who knows.
+ PointerToMemberFunction = 12, // Not a bug, see above.
+ ClassOrStruct = 12,
+ Union = 13,
+ // GCC reserves 14 for arrays, but does not use it since GCC version 6 due to
+ // decay to pointer. (Prior to version 6 it was only used in C++ mode).
+ // GCC reserves 15 for strings, but actually uses 5 (pointer) for string
+ // literals.
+ // Lang = 16,
+ // OpaqueType = 17,
+ BitInt = 18,
+ Vector = 19
+};
+
+GCCTypeClass EvaluateBuiltinClassifyType(QualType T,
+ const LangOptions &LangOpts);
+
+#endif
diff --git a/contrib/llvm-project/clang/lib/AST/ExprConstant.cpp b/contrib/llvm-project/clang/lib/AST/ExprConstant.cpp
index ba2865d66e0a..edf9b5e2d52b 100644
--- a/contrib/llvm-project/clang/lib/AST/ExprConstant.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ExprConstant.cpp
@@ -32,6 +32,7 @@
//
//===----------------------------------------------------------------------===//
+#include "ExprConstShared.h"
#include "Interp/Context.h"
#include "Interp/Frame.h"
#include "Interp/State.h"
@@ -50,15 +51,18 @@
#include "clang/AST/StmtVisitor.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/Builtins.h"
+#include "clang/Basic/DiagnosticSema.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/APFixedPoint.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallBitVector.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/SaveAndRestore.h"
+#include "llvm/Support/TimeProfiler.h"
#include "llvm/Support/raw_ostream.h"
#include <cstring>
#include <functional>
+#include <optional>
#define DEBUG_TYPE "exprconstant"
@@ -68,7 +72,6 @@ using llvm::APInt;
using llvm::APSInt;
using llvm::APFloat;
using llvm::FixedPointSemantics;
-using llvm::Optional;
namespace {
struct LValue;
@@ -530,6 +533,9 @@ namespace {
/// This - The binding for the this pointer in this call, if any.
const LValue *This;
+ /// CallExpr - The syntactical structure of member function calls
+ const Expr *CallExpr;
+
/// Information on how to find the arguments to this call. Our arguments
/// are stored in our parent's CallStackFrame, using the ParmVarDecl* as a
/// key and this value as the version.
@@ -546,8 +552,8 @@ namespace {
/// Temporaries - Temporary lvalues materialized within this stack frame.
MapTy Temporaries;
- /// CallLoc - The location of the call expression for this call.
- SourceLocation CallLoc;
+ /// CallRange - The source range of the call expression for this call.
+ SourceRange CallRange;
/// Index - The call index of this call.
unsigned Index;
@@ -578,12 +584,12 @@ namespace {
/// LambdaCaptureFields - Mapping from captured variables/this to
/// corresponding data members in the closure class.
- llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
- FieldDecl *LambdaThisCaptureField;
+ llvm::DenseMap<const ValueDecl *, FieldDecl *> LambdaCaptureFields;
+ FieldDecl *LambdaThisCaptureField = nullptr;
- CallStackFrame(EvalInfo &Info, SourceLocation CallLoc,
+ CallStackFrame(EvalInfo &Info, SourceRange CallRange,
const FunctionDecl *Callee, const LValue *This,
- CallRef Arguments);
+ const Expr *CallExpr, CallRef Arguments);
~CallStackFrame();
// Return the temporary for Key whose version number is Version.
@@ -592,11 +598,6 @@ namespace {
auto LB = Temporaries.lower_bound(KV);
if (LB != Temporaries.end() && LB->first == KV)
return &LB->second;
- // Pair (Key,Version) wasn't found in the map. Check that no elements
- // in the map have 'Key' as their key.
- assert((LB == Temporaries.end() || LB->first.first != Key) &&
- (LB == Temporaries.begin() || std::prev(LB)->first.first != Key) &&
- "Element with key 'Key' found in map");
return nullptr;
}
@@ -627,10 +628,10 @@ namespace {
/// Allocate storage for a parameter of a function call made in this frame.
APValue &createParam(CallRef Args, const ParmVarDecl *PVD, LValue &LV);
- void describe(llvm::raw_ostream &OS) override;
+ void describe(llvm::raw_ostream &OS) const override;
Frame *getCaller() const override { return Caller; }
- SourceLocation getCallLocation() const override { return CallLoc; }
+ SourceRange getCallRange() const override { return CallRange; }
const FunctionDecl *getCallee() const override { return Callee; }
bool isStdFunction() const {
@@ -640,6 +641,10 @@ namespace {
return false;
}
+ /// Whether we're in a context where [[msvc::constexpr]] evaluation is
+ /// permitted. See MSConstexprDocs for description of permitted contexts.
+ bool CanEvalMSConstexpr = false;
+
private:
APValue &createLocal(APValue::LValueBase Base, const void *Key, QualType T,
ScopeKind Scope);
@@ -660,6 +665,32 @@ namespace {
CallStackFrame &Frame;
const LValue *OldThis;
};
+
+ // A shorthand time trace scope struct, prints source range, for example
+ // {"name":"EvaluateAsRValue","args":{"detail":"<test.cc:8:21, col:25>"}}}
+ class ExprTimeTraceScope {
+ public:
+ ExprTimeTraceScope(const Expr *E, const ASTContext &Ctx, StringRef Name)
+ : TimeScope(Name, [E, &Ctx] {
+ return E->getSourceRange().printToString(Ctx.getSourceManager());
+ }) {}
+
+ private:
+ llvm::TimeTraceScope TimeScope;
+ };
+
+ /// RAII object used to change the current ability of
+ /// [[msvc::constexpr]] evaulation.
+ struct MSConstexprContextRAII {
+ CallStackFrame &Frame;
+ bool OldValue;
+ explicit MSConstexprContextRAII(CallStackFrame &Frame, bool Value)
+ : Frame(Frame), OldValue(Frame.CanEvalMSConstexpr) {
+ Frame.CanEvalMSConstexpr = Value;
+ }
+
+ ~MSConstexprContextRAII() { Frame.CanEvalMSConstexpr = OldValue; }
+ };
}
static bool HandleDestruction(EvalInfo &Info, const Expr *E,
@@ -917,10 +948,6 @@ namespace {
/// fold (not just why it's not strictly a constant expression)?
bool HasFoldFailureDiagnostic;
- /// Whether or not we're in a context where the front end requires a
- /// constant value.
- bool InConstantContext;
-
/// Whether we're checking that an expression is a potential constant
/// expression. If so, do not fail on constructs that could become constant
/// later on (such as a use of an undefined global).
@@ -973,16 +1000,19 @@ namespace {
CallStackDepth(0), NextCallIndex(1),
StepsLeft(C.getLangOpts().ConstexprStepLimit),
EnableNewConstInterp(C.getLangOpts().EnableNewConstInterp),
- BottomFrame(*this, SourceLocation(), nullptr, nullptr, CallRef()),
+ BottomFrame(*this, SourceLocation(), /*Callee=*/nullptr,
+ /*This=*/nullptr,
+ /*CallExpr=*/nullptr, CallRef()),
EvaluatingDecl((const ValueDecl *)nullptr),
EvaluatingDeclValue(nullptr), HasActiveDiagnostic(false),
- HasFoldFailureDiagnostic(false), InConstantContext(false),
- EvalMode(Mode) {}
+ HasFoldFailureDiagnostic(false), EvalMode(Mode) {}
~EvalInfo() {
discardCleanups();
}
+ ASTContext &getCtx() const override { return Ctx; }
+
void setEvaluatingDecl(APValue::LValueBase Base, APValue &Value,
EvaluatingDeclKind EDK = EvaluatingDeclKind::Ctor) {
EvaluatingDecl = Base;
@@ -1007,6 +1037,34 @@ namespace {
return false;
}
+ bool CheckArraySize(SourceLocation Loc, unsigned BitWidth,
+ uint64_t ElemCount, bool Diag) {
+ // FIXME: GH63562
+ // APValue stores array extents as unsigned,
+ // so anything that is greater that unsigned would overflow when
+ // constructing the array, we catch this here.
+ if (BitWidth > ConstantArrayType::getMaxSizeBits(Ctx) ||
+ ElemCount > uint64_t(std::numeric_limits<unsigned>::max())) {
+ if (Diag)
+ FFDiag(Loc, diag::note_constexpr_new_too_large) << ElemCount;
+ return false;
+ }
+
+ // FIXME: GH63562
+ // Arrays allocate an APValue per element.
+ // We use the number of constexpr steps as a proxy for the maximum size
+ // of arrays to avoid exhausting the system resources, as initialization
+ // of each element is likely to take some number of steps anyway.
+ uint64_t Limit = Ctx.getLangOpts().ConstexprStepLimit;
+ if (ElemCount > Limit) {
+ if (Diag)
+ FFDiag(Loc, diag::note_constexpr_new_exceeds_limits)
+ << ElemCount << Limit;
+ return false;
+ }
+ return true;
+ }
+
std::pair<CallStackFrame *, unsigned>
getCallFrameAndDepth(unsigned CallIndex) {
assert(CallIndex && "no call index in getCallFrameAndDepth");
@@ -1034,8 +1092,8 @@ namespace {
APValue *createHeapAlloc(const Expr *E, QualType T, LValue &LV);
- Optional<DynAlloc*> lookupDynamicAlloc(DynamicAllocLValue DA) {
- Optional<DynAlloc*> Result;
+ std::optional<DynAlloc *> lookupDynamicAlloc(DynamicAllocLValue DA) {
+ std::optional<DynAlloc *> Result;
auto It = HeapAllocs.find(DA);
if (It != HeapAllocs.end())
Result = &It->second;
@@ -1084,14 +1142,10 @@ namespace {
void performLifetimeExtension() {
// Disable the cleanups for lifetime-extended temporaries.
- CleanupStack.erase(std::remove_if(CleanupStack.begin(),
- CleanupStack.end(),
- [](Cleanup &C) {
- return !C.isDestroyedAtEndOf(
- ScopeKind::FullExpression);
- }),
- CleanupStack.end());
- }
+ llvm::erase_if(CleanupStack, [](Cleanup &C) {
+ return !C.isDestroyedAtEndOf(ScopeKind::FullExpression);
+ });
+ }
/// Throw away any remaining cleanups at the end of evaluation. If any
/// cleanups would have had a side-effect, note that as an unmodeled
@@ -1120,8 +1174,6 @@ namespace {
Expr::EvalStatus &getEvalStatus() const override { return EvalStatus; }
- ASTContext &getCtx() const override { return Ctx; }
-
// If we have a prior diagnostic, it will be noting that the expression
// isn't a constant expression. This diagnostic is more important,
// unless we require this evaluation to produce a constant expression.
@@ -1136,7 +1188,7 @@ namespace {
if (!HasFoldFailureDiagnostic)
break;
// We've already failed to fold something. Keep that diagnostic.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case EM_ConstantExpression:
case EM_ConstantExpressionUnevaluated:
setActiveDiagnostic(false);
@@ -1223,7 +1275,7 @@ namespace {
/// (Foo(), 1) // use noteSideEffect
/// (Foo() || true) // use noteSideEffect
/// Foo() + 1 // use noteFailure
- LLVM_NODISCARD bool noteFailure() {
+ [[nodiscard]] bool noteFailure() {
// Failure when evaluating some expression often means there is some
// subexpression whose evaluation was skipped. Therefore, (because we
// don't track whether we skipped an expression when unwinding after an
@@ -1295,7 +1347,7 @@ namespace {
class SpeculativeEvaluationRAII {
EvalInfo *Info = nullptr;
Expr::EvalStatus OldStatus;
- unsigned OldSpeculativeEvaluationDepth;
+ unsigned OldSpeculativeEvaluationDepth = 0;
void moveFromAndCancel(SpeculativeEvaluationRAII &&Other) {
Info = Other.Info;
@@ -1434,11 +1486,12 @@ void SubobjectDesignator::diagnosePointerArithmetic(EvalInfo &Info,
setInvalid();
}
-CallStackFrame::CallStackFrame(EvalInfo &Info, SourceLocation CallLoc,
+CallStackFrame::CallStackFrame(EvalInfo &Info, SourceRange CallRange,
const FunctionDecl *Callee, const LValue *This,
- CallRef Call)
+ const Expr *CallExpr, CallRef Call)
: Info(Info), Caller(Info.CurrentCall), Callee(Callee), This(This),
- Arguments(Call), CallLoc(CallLoc), Index(Info.NextCallIndex++) {
+ CallExpr(CallExpr), Arguments(Call), CallRange(CallRange),
+ Index(Info.NextCallIndex++) {
Info.CurrentCall = this;
++Info.CallStackDepth;
}
@@ -1710,8 +1763,8 @@ namespace {
struct MemberPtr {
MemberPtr() {}
- explicit MemberPtr(const ValueDecl *Decl) :
- DeclAndIsDerivedMember(Decl, false), Path() {}
+ explicit MemberPtr(const ValueDecl *Decl)
+ : DeclAndIsDerivedMember(Decl, false) {}
/// The member or (direct or indirect) field referred to by this member
/// pointer, or 0 if this is a null member pointer.
@@ -1826,6 +1879,8 @@ static bool EvaluateComplex(const Expr *E, ComplexValue &Res, EvalInfo &Info);
static bool EvaluateAtomic(const Expr *E, const LValue *This, APValue &Result,
EvalInfo &Info);
static bool EvaluateAsRValue(EvalInfo &Info, const Expr *E, APValue &Result);
+static bool EvaluateBuiltinStrLen(const Expr *E, uint64_t &Result,
+ EvalInfo &Info);
/// Evaluate an integer or fixed point expression into an APResult.
static bool EvaluateFixedPointOrInteger(const Expr *E, APFixedPoint &Result,
@@ -1906,25 +1961,46 @@ APValue *EvalInfo::createHeapAlloc(const Expr *E, QualType T, LValue &LV) {
}
/// Produce a string describing the given constexpr call.
-void CallStackFrame::describe(raw_ostream &Out) {
+void CallStackFrame::describe(raw_ostream &Out) const {
unsigned ArgIndex = 0;
- bool IsMemberCall = isa<CXXMethodDecl>(Callee) &&
- !isa<CXXConstructorDecl>(Callee) &&
- cast<CXXMethodDecl>(Callee)->isInstance();
+ bool IsMemberCall =
+ isa<CXXMethodDecl>(Callee) && !isa<CXXConstructorDecl>(Callee) &&
+ cast<CXXMethodDecl>(Callee)->isImplicitObjectMemberFunction();
if (!IsMemberCall)
- Out << *Callee << '(';
+ Callee->getNameForDiagnostic(Out, Info.Ctx.getPrintingPolicy(),
+ /*Qualified=*/false);
if (This && IsMemberCall) {
- APValue Val;
- This->moveInto(Val);
- Val.printPretty(Out, Info.Ctx,
- This->Designator.MostDerivedType);
- // FIXME: Add parens around Val if needed.
- Out << "->" << *Callee << '(';
+ if (const auto *MCE = dyn_cast_if_present<CXXMemberCallExpr>(CallExpr)) {
+ const Expr *Object = MCE->getImplicitObjectArgument();
+ Object->printPretty(Out, /*Helper=*/nullptr, Info.Ctx.getPrintingPolicy(),
+ /*Indentation=*/0);
+ if (Object->getType()->isPointerType())
+ Out << "->";
+ else
+ Out << ".";
+ } else if (const auto *OCE =
+ dyn_cast_if_present<CXXOperatorCallExpr>(CallExpr)) {
+ OCE->getArg(0)->printPretty(Out, /*Helper=*/nullptr,
+ Info.Ctx.getPrintingPolicy(),
+ /*Indentation=*/0);
+ Out << ".";
+ } else {
+ APValue Val;
+ This->moveInto(Val);
+ Val.printPretty(
+ Out, Info.Ctx,
+ Info.Ctx.getLValueReferenceType(This->Designator.MostDerivedType));
+ Out << ".";
+ }
+ Callee->getNameForDiagnostic(Out, Info.Ctx.getPrintingPolicy(),
+ /*Qualified=*/false);
IsMemberCall = false;
}
+ Out << '(';
+
for (FunctionDecl::param_const_iterator I = Callee->param_begin(),
E = Callee->param_end(); I != E; ++I, ++ArgIndex) {
if (ArgIndex > (unsigned)IsMemberCall)
@@ -1956,11 +2032,12 @@ static bool EvaluateIgnoredValue(EvalInfo &Info, const Expr *E) {
return true;
}
-/// Should this call expression be treated as a string literal?
-static bool IsStringLiteralCall(const CallExpr *E) {
+/// Should this call expression be treated as a no-op?
+static bool IsNoOpCall(const CallExpr *E) {
unsigned Builtin = E->getBuiltinCallee();
return (Builtin == Builtin::BI__builtin___CFStringMakeConstantString ||
- Builtin == Builtin::BI__builtin___NSStringMakeConstantString);
+ Builtin == Builtin::BI__builtin___NSStringMakeConstantString ||
+ Builtin == Builtin::BI__builtin_function_start);
}
static bool IsGlobalLValue(APValue::LValueBase B) {
@@ -1969,7 +2046,8 @@ static bool IsGlobalLValue(APValue::LValueBase B) {
// ... a null pointer value, or a prvalue core constant expression of type
// std::nullptr_t.
- if (!B) return true;
+ if (!B)
+ return true;
if (const ValueDecl *D = B.dyn_cast<const ValueDecl*>()) {
// ... the address of an object with static storage duration,
@@ -1979,7 +2057,8 @@ static bool IsGlobalLValue(APValue::LValueBase B) {
return true;
// ... the address of a function,
// ... the address of a GUID [MS extension],
- return isa<FunctionDecl>(D) || isa<MSGuidDecl>(D);
+ // ... the address of an unnamed global constant
+ return isa<FunctionDecl, MSGuidDecl, UnnamedGlobalConstantDecl>(D);
}
if (B.is<TypeInfoLValue>() || B.is<DynamicAllocLValue>())
@@ -2006,7 +2085,7 @@ static bool IsGlobalLValue(APValue::LValueBase B) {
case Expr::ObjCBoxedExprClass:
return cast<ObjCBoxedExpr>(E)->isExpressibleAsConstantInitializer();
case Expr::CallExprClass:
- return IsStringLiteralCall(cast<CallExpr>(E));
+ return IsNoOpCall(cast<CallExpr>(E));
// For GCC compatibility, &&label has static storage duration.
case Expr::AddrLabelExprClass:
return true;
@@ -2014,6 +2093,10 @@ static bool IsGlobalLValue(APValue::LValueBase B) {
// Block variables at global or local static scope.
case Expr::BlockExprClass:
return !cast<BlockExpr>(E)->getBlockDecl()->hasCaptures();
+ // The APValue generated from a __builtin_source_location will be emitted as a
+ // literal.
+ case Expr::SourceLocExprClass:
+ return true;
case Expr::ImplicitValueInitExprClass:
// FIXME:
// We can never form an lvalue with an implicit value initialization as its
@@ -2091,10 +2174,11 @@ static void NoteLValueLocation(EvalInfo &Info, APValue::LValueBase Base) {
Info.Note(E->getExprLoc(), diag::note_constexpr_temporary_here);
else if (DynamicAllocLValue DA = Base.dyn_cast<DynamicAllocLValue>()) {
// FIXME: Produce a note for dangling pointers too.
- if (Optional<DynAlloc*> Alloc = Info.lookupDynamicAlloc(DA))
+ if (std::optional<DynAlloc *> Alloc = Info.lookupDynamicAlloc(DA))
Info.Note((*Alloc)->AllocExpr->getExprLoc(),
diag::note_constexpr_dynamic_alloc_here);
}
+
// We have no information to show for a typeid(T) object.
}
@@ -2112,7 +2196,7 @@ static bool CheckEvaluationResult(CheckEvaluationResultKind CERK,
EvalInfo &Info, SourceLocation DiagLoc,
QualType Type, const APValue &Value,
ConstantExprKind Kind,
- SourceLocation SubobjectLoc,
+ const FieldDecl *SubobjectDecl,
CheckedTemporaries &CheckedTemps);
/// Check that this reference or pointer core constant expression is a valid
@@ -2156,13 +2240,12 @@ static bool CheckLValueConstantExpression(EvalInfo &Info, SourceLocation Loc,
}
}
- if (auto *FD = dyn_cast_or_null<FunctionDecl>(BaseVD)) {
- if (FD->isConsteval()) {
- Info.FFDiag(Loc, diag::note_consteval_address_accessible)
- << !Type->isAnyPointerType();
- Info.Note(FD->getLocation(), diag::note_declared_at);
- return false;
- }
+ if (auto *FD = dyn_cast_or_null<FunctionDecl>(BaseVD);
+ FD && FD->isImmediateFunction()) {
+ Info.FFDiag(Loc, diag::note_consteval_address_accessible)
+ << !Type->isAnyPointerType();
+ Info.Note(FD->getLocation(), diag::note_declared_at);
+ return false;
}
// Check that the object is a global. Note that the fake 'this' object we
@@ -2170,12 +2253,10 @@ static bool CheckLValueConstantExpression(EvalInfo &Info, SourceLocation Loc,
// assumed to be global here.
if (!IsGlobalLValue(Base)) {
if (Info.getLangOpts().CPlusPlus11) {
- const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>();
Info.FFDiag(Loc, diag::note_constexpr_non_global, 1)
- << IsReferenceType << !Designator.Entries.empty()
- << !!VD << VD;
-
- auto *VarD = dyn_cast_or_null<VarDecl>(VD);
+ << IsReferenceType << !Designator.Entries.empty() << !!BaseVD
+ << BaseVD;
+ auto *VarD = dyn_cast_or_null<VarDecl>(BaseVD);
if (VarD && VarD->isConstexpr()) {
// Non-static local constexpr variables have unintuitive semantics:
// constexpr int a = 1;
@@ -2217,6 +2298,19 @@ static bool CheckLValueConstantExpression(EvalInfo &Info, SourceLocation Loc,
if (!isForManglingOnly(Kind) && Var->hasAttr<DLLImportAttr>())
// FIXME: Diagnostic!
return false;
+
+ // In CUDA/HIP device compilation, only device side variables have
+ // constant addresses.
+ if (Info.getCtx().getLangOpts().CUDA &&
+ Info.getCtx().getLangOpts().CUDAIsDevice &&
+ Info.getCtx().CUDAConstantEvalCtx.NoWrongSidedVars) {
+ if ((!Var->hasAttr<CUDADeviceAttr>() &&
+ !Var->hasAttr<CUDAConstantAttr>() &&
+ !Var->getType()->isCUDADeviceBuiltinSurfaceType() &&
+ !Var->getType()->isCUDADeviceBuiltinTextureType()) ||
+ Var->hasAttr<HIPManagedAttr>())
+ return false;
+ }
}
if (const auto *FD = dyn_cast<const FunctionDecl>(BaseVD)) {
// __declspec(dllimport) must be handled very carefully:
@@ -2248,8 +2342,8 @@ static bool CheckLValueConstantExpression(EvalInfo &Info, SourceLocation Loc,
APValue *V = MTE->getOrCreateValue(false);
assert(V && "evasluation result refers to uninitialised temporary");
if (!CheckEvaluationResult(CheckEvaluationResultKind::ConstantExpression,
- Info, MTE->getExprLoc(), TempType, *V,
- Kind, SourceLocation(), CheckedTemps))
+ Info, MTE->getExprLoc(), TempType, *V, Kind,
+ /*SubobjectDecl=*/nullptr, CheckedTemps))
return false;
}
}
@@ -2287,7 +2381,7 @@ static bool CheckMemberPointerConstantExpression(EvalInfo &Info,
const auto *FD = dyn_cast_or_null<CXXMethodDecl>(Member);
if (!FD)
return true;
- if (FD->isConsteval()) {
+ if (FD->isImmediateFunction()) {
Info.FFDiag(Loc, diag::note_consteval_address_accessible) << /*pointer*/ 0;
Info.Note(FD->getLocation(), diag::note_declared_at);
return false;
@@ -2332,13 +2426,18 @@ static bool CheckEvaluationResult(CheckEvaluationResultKind CERK,
EvalInfo &Info, SourceLocation DiagLoc,
QualType Type, const APValue &Value,
ConstantExprKind Kind,
- SourceLocation SubobjectLoc,
+ const FieldDecl *SubobjectDecl,
CheckedTemporaries &CheckedTemps) {
if (!Value.hasValue()) {
- Info.FFDiag(DiagLoc, diag::note_constexpr_uninitialized)
- << true << Type;
- if (SubobjectLoc.isValid())
- Info.Note(SubobjectLoc, diag::note_constexpr_subobject_declared_here);
+ if (SubobjectDecl) {
+ Info.FFDiag(DiagLoc, diag::note_constexpr_uninitialized)
+ << /*(name)*/ 1 << SubobjectDecl;
+ Info.Note(SubobjectDecl->getLocation(),
+ diag::note_constexpr_subobject_declared_here);
+ } else {
+ Info.FFDiag(DiagLoc, diag::note_constexpr_uninitialized)
+ << /*of type*/ 0 << Type;
+ }
return false;
}
@@ -2355,29 +2454,35 @@ static bool CheckEvaluationResult(CheckEvaluationResultKind CERK,
for (unsigned I = 0, N = Value.getArrayInitializedElts(); I != N; ++I) {
if (!CheckEvaluationResult(CERK, Info, DiagLoc, EltTy,
Value.getArrayInitializedElt(I), Kind,
- SubobjectLoc, CheckedTemps))
+ SubobjectDecl, CheckedTemps))
return false;
}
if (!Value.hasArrayFiller())
return true;
return CheckEvaluationResult(CERK, Info, DiagLoc, EltTy,
- Value.getArrayFiller(), Kind, SubobjectLoc,
+ Value.getArrayFiller(), Kind, SubobjectDecl,
CheckedTemps);
}
if (Value.isUnion() && Value.getUnionField()) {
return CheckEvaluationResult(
CERK, Info, DiagLoc, Value.getUnionField()->getType(),
- Value.getUnionValue(), Kind, Value.getUnionField()->getLocation(),
- CheckedTemps);
+ Value.getUnionValue(), Kind, Value.getUnionField(), CheckedTemps);
}
if (Value.isStruct()) {
RecordDecl *RD = Type->castAs<RecordType>()->getDecl();
if (const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD)) {
unsigned BaseIndex = 0;
for (const CXXBaseSpecifier &BS : CD->bases()) {
- if (!CheckEvaluationResult(CERK, Info, DiagLoc, BS.getType(),
- Value.getStructBase(BaseIndex), Kind,
- BS.getBeginLoc(), CheckedTemps))
+ const APValue &BaseValue = Value.getStructBase(BaseIndex);
+ if (!BaseValue.hasValue()) {
+ SourceLocation TypeBeginLoc = BS.getBaseTypeLoc();
+ Info.FFDiag(TypeBeginLoc, diag::note_constexpr_uninitialized_base)
+ << BS.getType() << SourceRange(TypeBeginLoc, BS.getEndLoc());
+ return false;
+ }
+ if (!CheckEvaluationResult(CERK, Info, DiagLoc, BS.getType(), BaseValue,
+ Kind, /*SubobjectDecl=*/nullptr,
+ CheckedTemps))
return false;
++BaseIndex;
}
@@ -2387,8 +2492,8 @@ static bool CheckEvaluationResult(CheckEvaluationResultKind CERK,
continue;
if (!CheckEvaluationResult(CERK, Info, DiagLoc, I->getType(),
- Value.getStructField(I->getFieldIndex()),
- Kind, I->getLocation(), CheckedTemps))
+ Value.getStructField(I->getFieldIndex()), Kind,
+ I, CheckedTemps))
return false;
}
}
@@ -2422,7 +2527,7 @@ static bool CheckConstantExpression(EvalInfo &Info, SourceLocation DiagLoc,
CheckedTemporaries CheckedTemps;
return CheckEvaluationResult(CheckEvaluationResultKind::ConstantExpression,
Info, DiagLoc, Type, Value, Kind,
- SourceLocation(), CheckedTemps);
+ /*SubobjectDecl=*/nullptr, CheckedTemps);
}
/// Check that this evaluated value is fully-initialized and can be loaded by
@@ -2432,7 +2537,7 @@ static bool CheckFullyInitialized(EvalInfo &Info, SourceLocation DiagLoc,
CheckedTemporaries CheckedTemps;
return CheckEvaluationResult(
CheckEvaluationResultKind::FullyInitialized, Info, DiagLoc, Type, Value,
- ConstantExprKind::Normal, SourceLocation(), CheckedTemps);
+ ConstantExprKind::Normal, /*SubobjectDecl=*/nullptr, CheckedTemps);
}
/// Enforce C++2a [expr.const]/4.17, which disallows new-expressions unless
@@ -2453,6 +2558,7 @@ static bool EvalPointerValueAsBool(const APValue &Value, bool &Result) {
// A null base expression indicates a null pointer. These are always
// evaluatable, and they are false unless the offset is zero.
if (!Value.getLValueBase()) {
+ // TODO: Should a non-null pointer with an offset of zero evaluate to true?
Result = !Value.getLValueOffset().isZero();
return true;
}
@@ -2465,6 +2571,7 @@ static bool EvalPointerValueAsBool(const APValue &Value, bool &Result) {
}
static bool HandleConversionToBool(const APValue &Val, bool &Result) {
+ // TODO: This function should produce notes if it fails.
switch (Val.getKind()) {
case APValue::None:
case APValue::Indeterminate:
@@ -2489,6 +2596,9 @@ static bool HandleConversionToBool(const APValue &Val, bool &Result) {
case APValue::LValue:
return EvalPointerValueAsBool(Val, Result);
case APValue::MemberPointer:
+ if (Val.getMemberPointerDecl() && Val.getMemberPointerDecl()->isWeak()) {
+ return false;
+ }
Result = Val.getMemberPointerDecl();
return true;
case APValue::Vector:
@@ -2535,18 +2645,15 @@ static bool HandleFloatToIntCast(EvalInfo &Info, const Expr *E,
return true;
}
-/// Get rounding mode used for evaluation of the specified expression.
-/// \param[out] DynamicRM Is set to true is the requested rounding mode is
-/// dynamic.
+/// Get rounding mode to use in evaluation of the specified expression.
+///
/// If rounding mode is unknown at compile time, still try to evaluate the
/// expression. If the result is exact, it does not depend on rounding mode.
/// So return "tonearest" mode instead of "dynamic".
-static llvm::RoundingMode getActiveRoundingMode(EvalInfo &Info, const Expr *E,
- bool &DynamicRM) {
+static llvm::RoundingMode getActiveRoundingMode(EvalInfo &Info, const Expr *E) {
llvm::RoundingMode RM =
E->getFPFeaturesInEffect(Info.Ctx.getLangOpts()).getRoundingMode();
- DynamicRM = (RM == llvm::RoundingMode::Dynamic);
- if (DynamicRM)
+ if (RM == llvm::RoundingMode::Dynamic)
RM = llvm::RoundingMode::NearestTiesToEven;
return RM;
}
@@ -2570,14 +2677,14 @@ static bool checkFloatingPointResult(EvalInfo &Info, const Expr *E,
if ((St != APFloat::opOK) &&
(FPO.getRoundingMode() == llvm::RoundingMode::Dynamic ||
- FPO.getFPExceptionMode() != LangOptions::FPE_Ignore ||
+ FPO.getExceptionMode() != LangOptions::FPE_Ignore ||
FPO.getAllowFEnvAccess())) {
Info.FFDiag(E, diag::note_constexpr_float_arithmetic_strict);
return false;
}
if ((St & APFloat::opStatus::opInvalidOp) &&
- FPO.getFPExceptionMode() != LangOptions::FPE_Ignore) {
+ FPO.getExceptionMode() != LangOptions::FPE_Ignore) {
// There is no usefully definable result.
Info.FFDiag(E);
return false;
@@ -2596,8 +2703,7 @@ static bool HandleFloatToFloatCast(EvalInfo &Info, const Expr *E,
QualType SrcType, QualType DestType,
APFloat &Result) {
assert(isa<CastExpr>(E) || isa<CompoundAssignOperator>(E));
- bool DynamicRM;
- llvm::RoundingMode RM = getActiveRoundingMode(Info, E, DynamicRM);
+ llvm::RoundingMode RM = getActiveRoundingMode(Info, E);
APFloat::opStatus St;
APFloat Value = Result;
bool ignored;
@@ -2623,14 +2729,9 @@ static bool HandleIntToFloatCast(EvalInfo &Info, const Expr *E,
QualType SrcType, const APSInt &Value,
QualType DestType, APFloat &Result) {
Result = APFloat(Info.Ctx.getFloatTypeSemantics(DestType), 1);
- APFloat::opStatus St = Result.convertFromAPInt(Value, Value.isSigned(),
- APFloat::rmNearestTiesToEven);
- if (!Info.InConstantContext && St != llvm::APFloatBase::opOK &&
- FPO.isFPConstrained()) {
- Info.FFDiag(E, diag::note_constexpr_float_arithmetic_strict);
- return false;
- }
- return true;
+ llvm::RoundingMode RM = getActiveRoundingMode(Info, E);
+ APFloat::opStatus St = Result.convertFromAPInt(Value, Value.isSigned(), RM);
+ return checkFloatingPointResult(Info, E, St);
}
static bool truncateBitfieldValue(EvalInfo &Info, const Expr *E,
@@ -2654,53 +2755,6 @@ static bool truncateBitfieldValue(EvalInfo &Info, const Expr *E,
return true;
}
-static bool EvalAndBitcastToAPInt(EvalInfo &Info, const Expr *E,
- llvm::APInt &Res) {
- APValue SVal;
- if (!Evaluate(SVal, Info, E))
- return false;
- if (SVal.isInt()) {
- Res = SVal.getInt();
- return true;
- }
- if (SVal.isFloat()) {
- Res = SVal.getFloat().bitcastToAPInt();
- return true;
- }
- if (SVal.isVector()) {
- QualType VecTy = E->getType();
- unsigned VecSize = Info.Ctx.getTypeSize(VecTy);
- QualType EltTy = VecTy->castAs<VectorType>()->getElementType();
- unsigned EltSize = Info.Ctx.getTypeSize(EltTy);
- bool BigEndian = Info.Ctx.getTargetInfo().isBigEndian();
- Res = llvm::APInt::getNullValue(VecSize);
- for (unsigned i = 0; i < SVal.getVectorLength(); i++) {
- APValue &Elt = SVal.getVectorElt(i);
- llvm::APInt EltAsInt;
- if (Elt.isInt()) {
- EltAsInt = Elt.getInt();
- } else if (Elt.isFloat()) {
- EltAsInt = Elt.getFloat().bitcastToAPInt();
- } else {
- // Don't try to handle vectors of anything other than int or float
- // (not sure if it's possible to hit this case).
- Info.FFDiag(E, diag::note_invalid_subexpr_in_const_expr);
- return false;
- }
- unsigned BaseEltSize = EltAsInt.getBitWidth();
- if (BigEndian)
- Res |= EltAsInt.zextOrTrunc(VecSize).rotr(i*EltSize+BaseEltSize);
- else
- Res |= EltAsInt.zextOrTrunc(VecSize).rotl(i*EltSize);
- }
- return true;
- }
- // Give up if the input isn't an int, float, or vector. For example, we
- // reject "(v4i16)(intptr_t)&a".
- Info.FFDiag(E, diag::note_invalid_subexpr_in_const_expr);
- return false;
-}
-
/// Perform the given integer operation, which is known to need at most BitWidth
/// bits, and check for overflow in the original type (if that type was not an
/// unsigned type).
@@ -2720,16 +2774,17 @@ static bool CheckedIntArithmetic(EvalInfo &Info, const Expr *E,
if (Info.checkingForUndefinedBehavior())
Info.Ctx.getDiagnostics().Report(E->getExprLoc(),
diag::warn_integer_constant_overflow)
- << toString(Result, 10) << E->getType();
+ << toString(Result, 10) << E->getType() << E->getSourceRange();
return HandleOverflow(Info, E, Value, E->getType());
}
return true;
}
/// Perform the given binary integer operation.
-static bool handleIntIntBinOp(EvalInfo &Info, const Expr *E, const APSInt &LHS,
- BinaryOperatorKind Opcode, APSInt RHS,
- APSInt &Result) {
+static bool handleIntIntBinOp(EvalInfo &Info, const BinaryOperator *E,
+ const APSInt &LHS, BinaryOperatorKind Opcode,
+ APSInt RHS, APSInt &Result) {
+ bool HandleOverflowResult = true;
switch (Opcode) {
default:
Info.FFDiag(E);
@@ -2749,17 +2804,18 @@ static bool handleIntIntBinOp(EvalInfo &Info, const Expr *E, const APSInt &LHS,
case BO_Div:
case BO_Rem:
if (RHS == 0) {
- Info.FFDiag(E, diag::note_expr_divide_by_zero);
+ Info.FFDiag(E, diag::note_expr_divide_by_zero)
+ << E->getRHS()->getSourceRange();
return false;
}
- Result = (Opcode == BO_Rem ? LHS % RHS : LHS / RHS);
// Check for overflow case: INT_MIN / -1 or INT_MIN % -1. APSInt supports
// this operation and gives the two's complement result.
- if (RHS.isNegative() && RHS.isAllOnesValue() &&
- LHS.isSigned() && LHS.isMinSignedValue())
- return HandleOverflow(Info, E, -LHS.extend(LHS.getBitWidth() + 1),
- E->getType());
- return true;
+ if (RHS.isNegative() && RHS.isAllOnes() && LHS.isSigned() &&
+ LHS.isMinSignedValue())
+ HandleOverflowResult = HandleOverflow(
+ Info, E, -LHS.extend(LHS.getBitWidth() + 1), E->getType());
+ Result = (Opcode == BO_Rem ? LHS % RHS : LHS / RHS);
+ return HandleOverflowResult;
case BO_Shl: {
if (Info.getLangOpts().OpenCL)
// OpenCL 6.3j: shift values are effectively % word size of LHS.
@@ -2787,7 +2843,7 @@ static bool handleIntIntBinOp(EvalInfo &Info, const Expr *E, const APSInt &LHS,
// E1 x 2^E2 module 2^N.
if (LHS.isNegative())
Info.CCEDiag(E, diag::note_constexpr_lshift_of_negative) << LHS;
- else if (LHS.countLeadingZeros() < SA)
+ else if (LHS.countl_zero() < SA)
Info.CCEDiag(E, diag::note_constexpr_lshift_discards);
}
Result = LHS << SA;
@@ -2832,8 +2888,7 @@ static bool handleIntIntBinOp(EvalInfo &Info, const Expr *E, const APSInt &LHS,
static bool handleFloatFloatBinOp(EvalInfo &Info, const BinaryOperator *E,
APFloat &LHS, BinaryOperatorKind Opcode,
const APFloat &RHS) {
- bool DynamicRM;
- llvm::RoundingMode RM = getActiveRoundingMode(Info, E, DynamicRM);
+ llvm::RoundingMode RM = getActiveRoundingMode(Info, E);
APFloat::opStatus St;
switch (Opcode) {
default:
@@ -2933,6 +2988,11 @@ handleCompareOpForVectorHelper(const APTy &LHSValue, BinaryOperatorKind Opcode,
break;
}
+ // The boolean operations on these vector types use an instruction that
+ // results in a mask of '-1' for the 'truth' value. Ensure that we negate 1
+ // to -1 to make sure that we produce the correct value.
+ Result.negate();
+
return true;
}
@@ -3142,9 +3202,14 @@ static bool HandleLValueIndirectMember(EvalInfo &Info, const Expr *E,
return true;
}
+enum class SizeOfType {
+ SizeOf,
+ DataSizeOf,
+};
+
/// Get the size of the given type in char units.
-static bool HandleSizeof(EvalInfo &Info, SourceLocation Loc,
- QualType Type, CharUnits &Size) {
+static bool HandleSizeof(EvalInfo &Info, SourceLocation Loc, QualType Type,
+ CharUnits &Size, SizeOfType SOT = SizeOfType::SizeOf) {
// sizeof(void), __alignof__(void), sizeof(function) = 1 as a gcc
// extension.
if (Type->isVoidType() || Type->isFunctionType()) {
@@ -3164,7 +3229,10 @@ static bool HandleSizeof(EvalInfo &Info, SourceLocation Loc,
return false;
}
- Size = Info.Ctx.getTypeSizeInChars(Type);
+ if (SOT == SizeOfType::SizeOf)
+ Size = Info.Ctx.getTypeSizeInChars(Type);
+ else
+ Size = Info.Ctx.getTypeInfoDataSizeInChars(Type).Width;
return true;
}
@@ -3273,6 +3341,9 @@ static bool evaluateVarDeclInit(EvalInfo &Info, const Expr *E,
return false;
}
+ if (E->isValueDependent())
+ return false;
+
// Dig out the initializer, and use the declaration which it's attached to.
// FIXME: We should eventually check whether the variable has a reachable
// initializing declaration.
@@ -3307,12 +3378,9 @@ static bool evaluateVarDeclInit(EvalInfo &Info, const Expr *E,
// Check that we can fold the initializer. In C++, we will have already done
// this in the cases where it matters for conformance.
- SmallVector<PartialDiagnosticAt, 8> Notes;
- if (!VD->evaluateValue(Notes)) {
- Info.FFDiag(E, diag::note_constexpr_var_init_non_constant,
- Notes.size() + 1) << VD;
+ if (!VD->evaluateValue()) {
+ Info.FFDiag(E, diag::note_constexpr_var_init_non_constant, 1) << VD;
NoteLValueLocation(Info, Base);
- Info.addNotes(Notes);
return false;
}
@@ -3382,8 +3450,7 @@ static APSInt extractStringLiteralCharacter(EvalInfo &Info, const Expr *Lit,
assert(CAT && "string literal isn't an array");
QualType CharType = CAT->getElementType();
assert(CharType->isIntegerType() && "unexpected character type");
-
- APSInt Value(S->getCharByteWidth() * Info.Ctx.getCharWidth(),
+ APSInt Value(Info.Ctx.getTypeSize(CharType),
CharType->isUnsignedIntegerType());
if (Index < S->getLength())
Value = S->getCodeUnit(Index);
@@ -3406,7 +3473,7 @@ static void expandStringLiteral(EvalInfo &Info, const StringLiteral *S,
unsigned Elts = CAT->getSize().getZExtValue();
Result = APValue(APValue::UninitArray(),
std::min(S->getLength(), Elts), Elts);
- APSInt Value(S->getCharByteWidth() * Info.Ctx.getCharWidth(),
+ APSInt Value(Info.Ctx.getTypeSize(CharType),
CharType->isUnsignedIntegerType());
if (Result.hasArrayFiller())
Result.getArrayFiller() = APValue(Value);
@@ -3542,6 +3609,14 @@ static bool lifetimeStartedInEvaluation(EvalInfo &Info,
llvm_unreachable("unknown evaluating decl kind");
}
+static bool CheckArraySize(EvalInfo &Info, const ConstantArrayType *CAT,
+ SourceLocation CallLoc = {}) {
+ return Info.CheckArraySize(
+ CAT->getSizeExpr() ? CAT->getSizeExpr()->getBeginLoc() : CallLoc,
+ CAT->getNumAddressingBits(Info.Ctx), CAT->getSize().getZExtValue(),
+ /*Diag=*/true);
+}
+
namespace {
/// A handle to a complete object (an object that is not a subobject of
/// another object).
@@ -3623,7 +3698,8 @@ findSubobject(EvalInfo &Info, const Expr *E, const CompleteObject &Obj,
!isValidIndeterminateAccess(handler.AccessKind))) {
if (!Info.checkingPotentialConstantExpression())
Info.FFDiag(E, diag::note_constexpr_access_uninit)
- << handler.AccessKind << O->isIndeterminate();
+ << handler.AccessKind << O->isIndeterminate()
+ << E->getSourceRange();
return handler.failed();
}
@@ -3633,9 +3709,9 @@ findSubobject(EvalInfo &Info, const Expr *E, const CompleteObject &Obj,
if ((ObjType.isConstQualified() || ObjType.isVolatileQualified()) &&
ObjType->isRecordType() &&
Info.isEvaluatingCtorDtor(
- Obj.Base, llvm::makeArrayRef(Sub.Entries.begin(),
- Sub.Entries.begin() + I)) !=
- ConstructionPhase::None) {
+ Obj.Base,
+ llvm::ArrayRef(Sub.Entries.begin(), Sub.Entries.begin() + I)) !=
+ ConstructionPhase::None) {
ObjType = Info.Ctx.getCanonicalType(ObjType);
ObjType.removeLocalConst();
ObjType.removeLocalVolatile();
@@ -3716,6 +3792,9 @@ findSubobject(EvalInfo &Info, const Expr *E, const CompleteObject &Obj,
if (O->getArrayInitializedElts() > Index)
O = &O->getArrayInitializedElt(Index);
else if (!isRead(handler.AccessKind)) {
+ if (!CheckArraySize(Info, CAT, E->getExprLoc()))
+ return handler.failed();
+
expandArray(*O, Index);
O = &O->getArrayInitializedElt(Index);
} else
@@ -4010,6 +4089,16 @@ static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E,
return CompleteObject(LVal.Base, &V, GD->getType());
}
+ // Allow reading the APValue from an UnnamedGlobalConstantDecl.
+ if (auto *GCD = dyn_cast<UnnamedGlobalConstantDecl>(D)) {
+ if (isModification(AK)) {
+ Info.FFDiag(E, diag::note_constexpr_modify_global);
+ return CompleteObject();
+ }
+ return CompleteObject(LVal.Base, const_cast<APValue *>(&GCD->getValue()),
+ GCD->getType());
+ }
+
// Allow reading from template parameter objects.
if (auto *TPO = dyn_cast<TemplateParamObjectDecl>(D)) {
if (isModification(AK)) {
@@ -4105,7 +4194,7 @@ static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E,
if (!evaluateVarDeclInit(Info, E, VD, Frame, LVal.getLValueVersion(), BaseVal))
return CompleteObject();
} else if (DynamicAllocLValue DA = LVal.Base.dyn_cast<DynamicAllocLValue>()) {
- Optional<DynAlloc*> Alloc = Info.lookupDynamicAlloc(DA);
+ std::optional<DynAlloc *> Alloc = Info.lookupDynamicAlloc(DA);
if (!Alloc) {
Info.FFDiag(E, diag::note_constexpr_access_deleted_object) << AK;
return CompleteObject();
@@ -4230,9 +4319,33 @@ handleLValueToRValueConversion(EvalInfo &Info, const Expr *Conv, QualType Type,
Info.FFDiag(Conv);
return false;
}
+
APValue Lit;
if (!Evaluate(Lit, Info, CLE->getInitializer()))
return false;
+
+ // According to GCC info page:
+ //
+ // 6.28 Compound Literals
+ //
+ // As an optimization, G++ sometimes gives array compound literals longer
+ // lifetimes: when the array either appears outside a function or has a
+ // const-qualified type. If foo and its initializer had elements of type
+ // char *const rather than char *, or if foo were a global variable, the
+ // array would have static storage duration. But it is probably safest
+ // just to avoid the use of array compound literals in C++ code.
+ //
+ // Obey that rule by checking constness for converted array types.
+
+ QualType CLETy = CLE->getType();
+ if (CLETy->isArrayType() && !Type->isArrayType()) {
+ if (!CLETy.isConstant(Info.Ctx)) {
+ Info.FFDiag(Conv);
+ Info.Note(CLE->getExprLoc(), diag::note_declared_at);
+ return false;
+ }
+ }
+
CompleteObject LitObj(LVal.Base, &Lit, Base->getType());
return extractSubobject(Info, Conv, LitObj, LVal.Designator, RVal, AK);
} else if (isa<StringLiteral>(Base) || isa<PredefinedExpr>(Base)) {
@@ -4316,6 +4429,11 @@ struct CompoundAssignSubobjectHandler {
return foundPointer(Subobj, SubobjType);
case APValue::Vector:
return foundVector(Subobj, SubobjType);
+ case APValue::Indeterminate:
+ Info.FFDiag(E, diag::note_constexpr_access_uninit)
+ << /*read of=*/0 << /*uninitialized object=*/1
+ << E->getLHS()->getSourceRange();
+ return false;
default:
// FIXME: can this happen?
Info.FFDiag(E);
@@ -4522,11 +4640,13 @@ struct IncDecSubobjectHandler {
if (Old) *Old = APValue(Value);
APFloat One(Value.getSemantics(), 1);
+ llvm::RoundingMode RM = getActiveRoundingMode(Info, E);
+ APFloat::opStatus St;
if (AccessKind == AK_Increment)
- Value.add(One, APFloat::rmNearestTiesToEven);
+ St = Value.add(One, RM);
else
- Value.subtract(One, APFloat::rmNearestTiesToEven);
- return true;
+ St = Value.subtract(One, RM);
+ return checkFloatingPointResult(Info, E, St);
}
bool foundPointer(APValue &Subobj, QualType SubobjType) {
if (!checkConst(SubobjType))
@@ -4580,6 +4700,9 @@ static bool EvaluateObjectArgument(EvalInfo &Info, const Expr *Object,
if (Object->getType()->isLiteralType(Info.Ctx))
return EvaluateTemporary(Object, This, Info);
+ if (Object->getType()->isRecordType() && Object->isPRValue())
+ return EvaluateTemporary(Object, This, Info);
+
Info.FFDiag(Object, diag::note_constexpr_nonliteral) << Object->getType();
return false;
}
@@ -4735,8 +4858,13 @@ static bool HandleBaseToDerivedCast(EvalInfo &Info, const CastExpr *E,
/// Get the value to use for a default-initialized object of type T.
/// Return false if it encounters something invalid.
-static bool getDefaultInitValue(QualType T, APValue &Result) {
+static bool handleDefaultInitValue(QualType T, APValue &Result) {
bool Success = true;
+
+ // If there is already a value present don't overwrite it.
+ if (!Result.isAbsent())
+ return true;
+
if (auto *RD = T->getAsCXXRecordDecl()) {
if (RD->isInvalidDecl()) {
Result = APValue();
@@ -4753,13 +4881,14 @@ static bool getDefaultInitValue(QualType T, APValue &Result) {
for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
End = RD->bases_end();
I != End; ++I, ++Index)
- Success &= getDefaultInitValue(I->getType(), Result.getStructBase(Index));
+ Success &=
+ handleDefaultInitValue(I->getType(), Result.getStructBase(Index));
for (const auto *I : RD->fields()) {
if (I->isUnnamedBitfield())
continue;
- Success &= getDefaultInitValue(I->getType(),
- Result.getStructField(I->getFieldIndex()));
+ Success &= handleDefaultInitValue(
+ I->getType(), Result.getStructField(I->getFieldIndex()));
}
return Success;
}
@@ -4769,7 +4898,7 @@ static bool getDefaultInitValue(QualType T, APValue &Result) {
Result = APValue(APValue::UninitArray(), 0, AT->getSize().getZExtValue());
if (Result.hasArrayFiller())
Success &=
- getDefaultInitValue(AT->getElementType(), Result.getArrayFiller());
+ handleDefaultInitValue(AT->getElementType(), Result.getArrayFiller());
return Success;
}
@@ -4796,6 +4925,8 @@ enum EvalStmtResult {
}
static bool EvaluateVarDecl(EvalInfo &Info, const VarDecl *VD) {
+ if (VD->isInvalidDecl())
+ return false;
// We don't need to evaluate the initializer for a static local.
if (!VD->hasLocalStorage())
return true;
@@ -4808,7 +4939,7 @@ static bool EvaluateVarDecl(EvalInfo &Info, const VarDecl *VD) {
if (!InitE) {
if (VD->getType()->isDependentType())
return Info.noteSideEffect();
- return getDefaultInitValue(VD->getType(), Val);
+ return handleDefaultInitValue(VD->getType(), Val);
}
if (InitE->isValueDependent())
return false;
@@ -4932,8 +5063,14 @@ static EvalStmtResult EvaluateSwitch(StmtResult &Result, EvalInfo &Info,
if (SS->getConditionVariable() &&
!EvaluateDecl(Info, SS->getConditionVariable()))
return ESR_Failed;
+ if (SS->getCond()->isValueDependent()) {
+ // We don't know what the value is, and which branch should jump to.
+ EvaluateDependentExpr(SS->getCond(), Info);
+ return ESR_Failed;
+ }
if (!EvaluateInteger(SS->getCond(), Value, Info))
return ESR_Failed;
+
if (!CondScope.destroy())
return ESR_Failed;
}
@@ -4984,6 +5121,20 @@ static EvalStmtResult EvaluateSwitch(StmtResult &Result, EvalInfo &Info,
llvm_unreachable("Invalid EvalStmtResult!");
}
+static bool CheckLocalVariableDeclaration(EvalInfo &Info, const VarDecl *VD) {
+ // An expression E is a core constant expression unless the evaluation of E
+ // would evaluate one of the following: [C++23] - a control flow that passes
+ // through a declaration of a variable with static or thread storage duration
+ // unless that variable is usable in constant expressions.
+ if (VD->isLocalVarDecl() && VD->isStaticLocal() &&
+ !VD->isUsableInConstantExpressions(Info.Ctx)) {
+ Info.CCEDiag(VD->getLocation(), diag::note_constexpr_static_local)
+ << (VD->getTSCSpec() == TSCS_unspecified ? 0 : 1) << VD;
+ return false;
+ }
+ return true;
+}
+
// Evaluate a statement.
static EvalStmtResult EvaluateStmt(StmtResult &Result, EvalInfo &Info,
const Stmt *S, const SwitchCase *Case) {
@@ -5094,6 +5245,8 @@ static EvalStmtResult EvaluateStmt(StmtResult &Result, EvalInfo &Info,
const DeclStmt *DS = cast<DeclStmt>(S);
for (const auto *D : DS->decls()) {
if (const auto *VD = dyn_cast<VarDecl>(D)) {
+ if (!CheckLocalVariableDeclaration(Info, VD))
+ return ESR_Failed;
if (VD->hasLocalStorage() && !VD->getInit())
if (!EvaluateVarDecl(Info, VD))
return ESR_Failed;
@@ -5128,7 +5281,7 @@ static EvalStmtResult EvaluateStmt(StmtResult &Result, EvalInfo &Info,
return ESR_Succeeded;
}
- Info.FFDiag(S->getBeginLoc());
+ Info.FFDiag(S->getBeginLoc()) << S->getSourceRange();
return ESR_Failed;
case Stmt::NullStmtClass:
@@ -5137,6 +5290,9 @@ static EvalStmtResult EvaluateStmt(StmtResult &Result, EvalInfo &Info,
case Stmt::DeclStmtClass: {
const DeclStmt *DS = cast<DeclStmt>(S);
for (const auto *D : DS->decls()) {
+ const VarDecl *VD = dyn_cast_or_null<VarDecl>(D);
+ if (VD && !CheckLocalVariableDeclaration(Info, VD))
+ return ESR_Failed;
// Each declaration initialization is its own full-expression.
FullExpressionRAII Scope(Info);
if (!EvaluateDecl(Info, D) && !Info.noteFailure())
@@ -5196,7 +5352,14 @@ static EvalStmtResult EvaluateStmt(StmtResult &Result, EvalInfo &Info,
}
}
bool Cond;
- if (!EvaluateCond(Info, IS->getConditionVariable(), IS->getCond(), Cond))
+ if (IS->isConsteval()) {
+ Cond = IS->isNonNegatedConsteval();
+ // If we are not in a constant context, if consteval should not evaluate
+ // to true.
+ if (!Info.InConstantContext)
+ Cond = !Cond;
+ } else if (!EvaluateCond(Info, IS->getConditionVariable(), IS->getCond(),
+ Cond))
return ESR_Failed;
if (const Stmt *SubStmt = Cond ? IS->getThen() : IS->getElse()) {
@@ -5321,6 +5484,11 @@ static EvalStmtResult EvaluateStmt(StmtResult &Result, EvalInfo &Info,
return ESR;
}
+ // In error-recovery cases it's possible to get here even if we failed to
+ // synthesize the __begin and __end variables.
+ if (!FS->getBeginStmt() || !FS->getEndStmt() || !FS->getCond())
+ return ESR_Failed;
+
// Create the __begin and __end iterators.
ESR = EvaluateStmt(Result, Info, FS->getBeginStmt());
if (ESR != ESR_Succeeded) {
@@ -5395,11 +5563,14 @@ static EvalStmtResult EvaluateStmt(StmtResult &Result, EvalInfo &Info,
case Stmt::LabelStmtClass:
return EvaluateStmt(Result, Info, cast<LabelStmt>(S)->getSubStmt(), Case);
- case Stmt::AttributedStmtClass:
- // As a general principle, C++11 attributes can be ignored without
- // any semantic impact.
- return EvaluateStmt(Result, Info, cast<AttributedStmt>(S)->getSubStmt(),
- Case);
+ case Stmt::AttributedStmtClass: {
+ const auto *AS = cast<AttributedStmt>(S);
+ const auto *SS = AS->getSubStmt();
+ MSConstexprContextRAII ConstexprContext(
+ *Info.CurrentCall, hasSpecificAttr<MSConstexprAttr>(AS->getAttrs()) &&
+ isa<ReturnStmt>(SS));
+ return EvaluateStmt(Result, Info, SS, Case);
+ }
case Stmt::CaseStmtClass:
case Stmt::DefaultStmtClass:
@@ -5470,7 +5641,9 @@ static bool CheckConstexprFunction(EvalInfo &Info, SourceLocation CallLoc,
}
// Can we evaluate this function call?
- if (Definition && Definition->isConstexpr() && Body)
+ if (Definition && Body &&
+ (Definition->isConstexpr() || (Info.CurrentCall->CanEvalMSConstexpr &&
+ Definition->hasAttr<MSConstexprAttr>())))
return true;
if (Info.getLangOpts().CPlusPlus11) {
@@ -5581,13 +5754,15 @@ static const CXXRecordDecl *getBaseClassType(SubobjectDesignator &Designator,
}
/// Determine the dynamic type of an object.
-static Optional<DynamicType> ComputeDynamicType(EvalInfo &Info, const Expr *E,
- LValue &This, AccessKinds AK) {
+static std::optional<DynamicType> ComputeDynamicType(EvalInfo &Info,
+ const Expr *E,
+ LValue &This,
+ AccessKinds AK) {
// If we don't have an lvalue denoting an object of class type, there is no
// meaningful dynamic type. (We consider objects of non-class type to have no
// dynamic type.)
if (!checkDynamicType(Info, E, This, AK, true))
- return None;
+ return std::nullopt;
// Refuse to compute a dynamic type in the presence of virtual bases. This
// shouldn't happen other than in constant-folding situations, since literal
@@ -5599,7 +5774,7 @@ static Optional<DynamicType> ComputeDynamicType(EvalInfo &Info, const Expr *E,
This.Designator.MostDerivedType->getAsCXXRecordDecl();
if (!Class || Class->getNumVBases()) {
Info.FFDiag(E);
- return None;
+ return std::nullopt;
}
// FIXME: For very deep class hierarchies, it might be beneficial to use a
@@ -5632,14 +5807,14 @@ static Optional<DynamicType> ComputeDynamicType(EvalInfo &Info, const Expr *E,
// 'This', so that object has not yet begun its period of construction and
// any polymorphic operation on it results in undefined behavior.
Info.FFDiag(E);
- return None;
+ return std::nullopt;
}
/// Perform virtual dispatch.
static const CXXMethodDecl *HandleVirtualDispatch(
EvalInfo &Info, const Expr *E, LValue &This, const CXXMethodDecl *Found,
llvm::SmallVectorImpl<QualType> &CovariantAdjustmentPath) {
- Optional<DynamicType> DynType = ComputeDynamicType(
+ std::optional<DynamicType> DynType = ComputeDynamicType(
Info, E, This,
isa<CXXDestructorDecl>(Found) ? AK_Destroy : AK_MemberCall);
if (!DynType)
@@ -5664,7 +5839,7 @@ static const CXXMethodDecl *HandleVirtualDispatch(
// C++2a [class.abstract]p6:
// the effect of making a virtual call to a pure virtual function [...] is
// undefined
- if (Callee->isPure()) {
+ if (Callee->isPureVirtual()) {
Info.FFDiag(E, diag::note_constexpr_pure_virtual_call, 1) << Callee;
Info.Note(Callee->getLocation(), diag::note_declared_at);
return nullptr;
@@ -5757,7 +5932,7 @@ static bool HandleDynamicCast(EvalInfo &Info, const ExplicitCastExpr *E,
// For all the other cases, we need the pointer to point to an object within
// its lifetime / period of construction / destruction, and we need to know
// its dynamic type.
- Optional<DynamicType> DynType =
+ std::optional<DynamicType> DynType =
ComputeDynamicType(Info, E, Ptr, AK_DynamicCast);
if (!DynType)
return false;
@@ -5871,7 +6046,7 @@ struct StartLifetimeOfUnionMemberHandler {
return false;
}
APValue Result;
- Failed = !getDefaultInitValue(Field->getType(), Result);
+ Failed = !handleDefaultInitValue(Field->getType(), Result);
Subobj.setUnion(Field, Result);
return true;
}
@@ -5890,8 +6065,9 @@ const AccessKinds StartLifetimeOfUnionMemberHandler::AccessKind;
/// operator whose left-hand side might involve a union member access. If it
/// does, implicitly start the lifetime of any accessed union elements per
/// C++20 [class.union]5.
-static bool HandleUnionActiveMemberChange(EvalInfo &Info, const Expr *LHSExpr,
- const LValue &LHS) {
+static bool MaybeHandleUnionActiveMemberChange(EvalInfo &Info,
+ const Expr *LHSExpr,
+ const LValue &LHS) {
if (LHS.InvalidBase || LHS.Designator.Invalid)
return false;
@@ -5946,8 +6122,14 @@ static bool HandleUnionActiveMemberChange(EvalInfo &Info, const Expr *LHSExpr,
break;
// Walk path backwards as we walk up from the base to the derived class.
for (const CXXBaseSpecifier *Elt : llvm::reverse(ICE->path())) {
+ if (Elt->isVirtual()) {
+ // A class with virtual base classes never has a trivial default
+ // constructor, so S(E) is empty in this case.
+ E = nullptr;
+ break;
+ }
+
--PathLength;
- (void)Elt;
assert(declaresSameEntity(Elt->getType()->getAsCXXRecordDecl(),
LHS.Designator.Entries[PathLength]
.getAsBaseOrMember().getPointer()));
@@ -6028,7 +6210,7 @@ static bool EvaluateArgs(ArrayRef<const Expr *> Args, CallRef Call,
unsigned ASTIdx = Idx.getASTIndex();
if (ASTIdx >= Args.size())
continue;
- ForbiddenNullArgs[ASTIdx] = 1;
+ ForbiddenNullArgs[ASTIdx] = true;
}
}
}
@@ -6072,13 +6254,13 @@ static bool handleTrivialCopy(EvalInfo &Info, const ParmVarDecl *Param,
/// Evaluate a function call.
static bool HandleFunctionCall(SourceLocation CallLoc,
const FunctionDecl *Callee, const LValue *This,
- ArrayRef<const Expr *> Args, CallRef Call,
- const Stmt *Body, EvalInfo &Info,
+ const Expr *E, ArrayRef<const Expr *> Args,
+ CallRef Call, const Stmt *Body, EvalInfo &Info,
APValue &Result, const LValue *ResultSlot) {
if (!Info.CheckCallLimit(CallLoc))
return false;
- CallStackFrame Frame(Info, CallLoc, Callee, This, Call);
+ CallStackFrame Frame(Info, E->getSourceRange(), Callee, This, E, Call);
// For a trivial copy or move assignment, perform an APValue copy. This is
// essential for unions, where the operations performed by the assignment
@@ -6097,9 +6279,6 @@ static bool HandleFunctionCall(SourceLocation CallLoc,
if (!handleTrivialCopy(Info, MD->getParamDecl(0), Args[0], RHSValue,
MD->getParent()->isUnion()))
return false;
- if (Info.getLangOpts().CPlusPlus20 && MD->isTrivial() &&
- !HandleUnionActiveMemberChange(Info, Args[0], *This))
- return false;
if (!handleAssignment(Info, Args[0], *This, MD->getThisType(),
RHSValue))
return false;
@@ -6146,7 +6325,7 @@ static bool HandleConstructorCall(const Expr *E, const LValue &This,
Info,
ObjectUnderConstruction{This.getLValueBase(), This.Designator.Entries},
RD->getNumBases());
- CallStackFrame Frame(Info, CallLoc, Definition, &This, Call);
+ CallStackFrame Frame(Info, E->getSourceRange(), Definition, &This, E, Call);
// FIXME: Creating an APValue just to hold a nonexistent return value is
// wasteful.
@@ -6219,7 +6398,7 @@ static bool HandleConstructorCall(const Expr *E, const LValue &This,
for (; !declaresSameEntity(*FieldIt, FD); ++FieldIt) {
assert(FieldIt != RD->field_end() && "missing field?");
if (!FieldIt->isUnnamedBitfield())
- Success &= getDefaultInitValue(
+ Success &= handleDefaultInitValue(
FieldIt->getType(),
Result.getStructField(FieldIt->getFieldIndex()));
}
@@ -6276,7 +6455,8 @@ static bool HandleConstructorCall(const Expr *E, const LValue &This,
// FIXME: This immediately starts the lifetime of all members of
// an anonymous struct. It would be preferable to strictly start
// member lifetime in initialization order.
- Success &= getDefaultInitValue(Info.Ctx.getRecordType(CD), *Value);
+ Success &=
+ handleDefaultInitValue(Info.Ctx.getRecordType(CD), *Value);
}
// Store Subobject as its parent before updating it for the last element
// in the chain.
@@ -6328,7 +6508,7 @@ static bool HandleConstructorCall(const Expr *E, const LValue &This,
if (!RD->isUnion()) {
for (; FieldIt != RD->field_end(); ++FieldIt) {
if (!FieldIt->isUnnamedBitfield())
- Success &= getDefaultInitValue(
+ Success &= handleDefaultInitValue(
FieldIt->getType(),
Result.getStructField(FieldIt->getFieldIndex()));
}
@@ -6354,7 +6534,7 @@ static bool HandleConstructorCall(const Expr *E, const LValue &This,
CallScope.destroy();
}
-static bool HandleDestructionImpl(EvalInfo &Info, SourceLocation CallLoc,
+static bool HandleDestructionImpl(EvalInfo &Info, SourceRange CallRange,
const LValue &This, APValue &Value,
QualType T) {
// Objects can only be destroyed while they're within their lifetimes.
@@ -6364,20 +6544,24 @@ static bool HandleDestructionImpl(EvalInfo &Info, SourceLocation CallLoc,
if (Value.isAbsent() && !T->isNullPtrType()) {
APValue Printable;
This.moveInto(Printable);
- Info.FFDiag(CallLoc, diag::note_constexpr_destroy_out_of_lifetime)
- << Printable.getAsString(Info.Ctx, Info.Ctx.getLValueReferenceType(T));
+ Info.FFDiag(CallRange.getBegin(),
+ diag::note_constexpr_destroy_out_of_lifetime)
+ << Printable.getAsString(Info.Ctx, Info.Ctx.getLValueReferenceType(T));
return false;
}
// Invent an expression for location purposes.
// FIXME: We shouldn't need to do this.
- OpaqueValueExpr LocE(CallLoc, Info.Ctx.IntTy, VK_PRValue);
+ OpaqueValueExpr LocE(CallRange.getBegin(), Info.Ctx.IntTy, VK_PRValue);
// For arrays, destroy elements right-to-left.
if (const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(T)) {
uint64_t Size = CAT->getSize().getZExtValue();
QualType ElemT = CAT->getElementType();
+ if (!CheckArraySize(Info, CAT, CallRange.getBegin()))
+ return false;
+
LValue ElemLV = This;
ElemLV.addArray(Info, &LocE, CAT);
if (!HandleLValueArrayAdjustment(Info, &LocE, ElemLV, ElemT, Size))
@@ -6392,7 +6576,7 @@ static bool HandleDestructionImpl(EvalInfo &Info, SourceLocation CallLoc,
for (; Size != 0; --Size) {
APValue &Elem = Value.getArrayInitializedElt(Size - 1);
if (!HandleLValueArrayAdjustment(Info, &LocE, ElemLV, ElemT, -1) ||
- !HandleDestructionImpl(Info, CallLoc, ElemLV, Elem, ElemT))
+ !HandleDestructionImpl(Info, CallRange, ElemLV, Elem, ElemT))
return false;
}
@@ -6404,7 +6588,9 @@ static bool HandleDestructionImpl(EvalInfo &Info, SourceLocation CallLoc,
const CXXRecordDecl *RD = T->getAsCXXRecordDecl();
if (!RD) {
if (T.isDestructedType()) {
- Info.FFDiag(CallLoc, diag::note_constexpr_unsupported_destruction) << T;
+ Info.FFDiag(CallRange.getBegin(),
+ diag::note_constexpr_unsupported_destruction)
+ << T;
return false;
}
@@ -6413,13 +6599,13 @@ static bool HandleDestructionImpl(EvalInfo &Info, SourceLocation CallLoc,
}
if (RD->getNumVBases()) {
- Info.FFDiag(CallLoc, diag::note_constexpr_virtual_base) << RD;
+ Info.FFDiag(CallRange.getBegin(), diag::note_constexpr_virtual_base) << RD;
return false;
}
const CXXDestructorDecl *DD = RD->getDestructor();
if (!DD && !RD->hasTrivialDestructor()) {
- Info.FFDiag(CallLoc);
+ Info.FFDiag(CallRange.getBegin());
return false;
}
@@ -6438,16 +6624,17 @@ static bool HandleDestructionImpl(EvalInfo &Info, SourceLocation CallLoc,
return true;
}
- if (!Info.CheckCallLimit(CallLoc))
+ if (!Info.CheckCallLimit(CallRange.getBegin()))
return false;
const FunctionDecl *Definition = nullptr;
const Stmt *Body = DD->getBody(Definition);
- if (!CheckConstexprFunction(Info, CallLoc, DD, Definition, Body))
+ if (!CheckConstexprFunction(Info, CallRange.getBegin(), DD, Definition, Body))
return false;
- CallStackFrame Frame(Info, CallLoc, Definition, &This, CallRef());
+ CallStackFrame Frame(Info, CallRange, Definition, &This, /*CallExpr=*/nullptr,
+ CallRef());
// We're now in the period of destruction of this object.
unsigned BasesLeft = RD->getNumBases();
@@ -6461,7 +6648,7 @@ static bool HandleDestructionImpl(EvalInfo &Info, SourceLocation CallLoc,
// (Note that formally the lifetime ends when the period of destruction
// begins, even though certain uses of the object remain valid until the
// period of destruction ends.)
- Info.FFDiag(CallLoc, diag::note_constexpr_double_destroy);
+ Info.FFDiag(CallRange.getBegin(), diag::note_constexpr_double_destroy);
return false;
}
@@ -6480,7 +6667,7 @@ static bool HandleDestructionImpl(EvalInfo &Info, SourceLocation CallLoc,
// We don't have a good way to iterate fields in reverse, so collect all the
// fields first and then walk them backwards.
- SmallVector<FieldDecl*, 16> Fields(RD->field_begin(), RD->field_end());
+ SmallVector<FieldDecl*, 16> Fields(RD->fields());
for (const FieldDecl *FD : llvm::reverse(Fields)) {
if (FD->isUnnamedBitfield())
continue;
@@ -6490,7 +6677,7 @@ static bool HandleDestructionImpl(EvalInfo &Info, SourceLocation CallLoc,
return false;
APValue *SubobjectValue = &Value.getStructField(FD->getFieldIndex());
- if (!HandleDestructionImpl(Info, CallLoc, Subobject, *SubobjectValue,
+ if (!HandleDestructionImpl(Info, CallRange, Subobject, *SubobjectValue,
FD->getType()))
return false;
}
@@ -6509,7 +6696,7 @@ static bool HandleDestructionImpl(EvalInfo &Info, SourceLocation CallLoc,
return false;
APValue *SubobjectValue = &Value.getStructBase(BasesLeft);
- if (!HandleDestructionImpl(Info, CallLoc, Subobject, *SubobjectValue,
+ if (!HandleDestructionImpl(Info, CallRange, Subobject, *SubobjectValue,
BaseType))
return false;
}
@@ -6530,7 +6717,7 @@ struct DestroyObjectHandler {
typedef bool result_type;
bool failed() { return false; }
bool found(APValue &Subobj, QualType SubobjType) {
- return HandleDestructionImpl(Info, E->getExprLoc(), This, Subobj,
+ return HandleDestructionImpl(Info, E->getSourceRange(), This, Subobj,
SubobjType);
}
bool found(APSInt &Value, QualType SubobjType) {
@@ -6567,7 +6754,7 @@ static bool HandleDestruction(EvalInfo &Info, SourceLocation Loc,
return HandleDestructionImpl(Info, Loc, LV, Value, T);
}
-/// Perform a call to 'perator new' or to `__builtin_operator_new'.
+/// Perform a call to 'operator new' or to `__builtin_operator_new'.
static bool HandleOperatorNewCall(EvalInfo &Info, const CallExpr *E,
LValue &Result) {
if (Info.checkingPotentialConstantExpression() ||
@@ -6613,18 +6800,17 @@ static bool HandleOperatorNewCall(EvalInfo &Info, const CallExpr *E,
return false;
}
- if (ByteSize.getActiveBits() > ConstantArrayType::getMaxSizeBits(Info.Ctx)) {
+ if (!Info.CheckArraySize(E->getBeginLoc(), ByteSize.getActiveBits(),
+ Size.getZExtValue(), /*Diag=*/!IsNothrow)) {
if (IsNothrow) {
Result.setNull(Info.Ctx, E->getType());
return true;
}
-
- Info.FFDiag(E, diag::note_constexpr_new_too_large) << APSInt(Size, true);
return false;
}
- QualType AllocType = Info.Ctx.getConstantArrayType(ElemType, Size, nullptr,
- ArrayType::Normal, 0);
+ QualType AllocType = Info.Ctx.getConstantArrayType(
+ ElemType, Size, nullptr, ArraySizeModifier::Normal, 0);
APValue *Val = Info.createHeapAlloc(E, AllocType, Result);
*Val = APValue(APValue::UninitArray(), 0, Size.getZExtValue());
Result.addArray(Info, E, cast<ConstantArrayType>(AllocType));
@@ -6649,10 +6835,10 @@ static const FunctionDecl *getVirtualOperatorDelete(QualType T) {
/// still exists and is of the right kind for the purpose of a deletion.
///
/// On success, returns the heap allocation to deallocate. On failure, produces
-/// a diagnostic and returns None.
-static Optional<DynAlloc *> CheckDeleteKind(EvalInfo &Info, const Expr *E,
- const LValue &Pointer,
- DynAlloc::Kind DeallocKind) {
+/// a diagnostic and returns std::nullopt.
+static std::optional<DynAlloc *> CheckDeleteKind(EvalInfo &Info, const Expr *E,
+ const LValue &Pointer,
+ DynAlloc::Kind DeallocKind) {
auto PointerAsString = [&] {
return Pointer.toString(Info.Ctx, Info.Ctx.VoidPtrTy);
};
@@ -6663,21 +6849,21 @@ static Optional<DynAlloc *> CheckDeleteKind(EvalInfo &Info, const Expr *E,
<< PointerAsString();
if (Pointer.Base)
NoteLValueLocation(Info, Pointer.Base);
- return None;
+ return std::nullopt;
}
- Optional<DynAlloc *> Alloc = Info.lookupDynamicAlloc(DA);
+ std::optional<DynAlloc *> Alloc = Info.lookupDynamicAlloc(DA);
if (!Alloc) {
Info.FFDiag(E, diag::note_constexpr_double_delete);
- return None;
+ return std::nullopt;
}
- QualType AllocType = Pointer.Base.getDynamicAllocType();
if (DeallocKind != (*Alloc)->getKind()) {
+ QualType AllocType = Pointer.Base.getDynamicAllocType();
Info.FFDiag(E, diag::note_constexpr_new_delete_mismatch)
<< DeallocKind << (*Alloc)->getKind() << AllocType;
NoteLValueLocation(Info, Pointer.Base);
- return None;
+ return std::nullopt;
}
bool Subobject = false;
@@ -6691,7 +6877,7 @@ static Optional<DynAlloc *> CheckDeleteKind(EvalInfo &Info, const Expr *E,
if (Subobject) {
Info.FFDiag(E, diag::note_constexpr_delete_subobject)
<< PointerAsString() << Pointer.Designator.isOnePastTheEnd();
- return None;
+ return std::nullopt;
}
return Alloc;
@@ -6743,7 +6929,7 @@ class BitCastBuffer {
// FIXME: Its possible under the C++ standard for 'char' to not be 8 bits, but
// we don't support a host or target where that is the case. Still, we should
// use a more generic type in case we ever do.
- SmallVector<Optional<unsigned char>, 32> Bytes;
+ SmallVector<std::optional<unsigned char>, 32> Bytes;
static_assert(std::numeric_limits<unsigned char>::digits >= 8,
"Need at least 8 bit unsigned char");
@@ -6755,12 +6941,11 @@ public:
: Bytes(Width.getQuantity()),
TargetIsLittleEndian(TargetIsLittleEndian) {}
- LLVM_NODISCARD
- bool readObject(CharUnits Offset, CharUnits Width,
- SmallVectorImpl<unsigned char> &Output) const {
+ [[nodiscard]] bool readObject(CharUnits Offset, CharUnits Width,
+ SmallVectorImpl<unsigned char> &Output) const {
for (CharUnits I = Offset, E = Offset + Width; I != E; ++I) {
// If a byte of an integer is uninitialized, then the whole integer is
- // uninitalized.
+ // uninitialized.
if (!Bytes[I.getQuantity()])
return false;
Output.push_back(*Bytes[I.getQuantity()]);
@@ -6824,10 +7009,11 @@ class APValueToBufferConverter {
return visitArray(Val, Ty, Offset);
case APValue::Struct:
return visitRecord(Val, Ty, Offset);
+ case APValue::Vector:
+ return visitVector(Val, Ty, Offset);
case APValue::ComplexInt:
case APValue::ComplexFloat:
- case APValue::Vector:
case APValue::FixedPoint:
// FIXME: We should support these.
@@ -6914,6 +7100,72 @@ class APValueToBufferConverter {
return true;
}
+ bool visitVector(const APValue &Val, QualType Ty, CharUnits Offset) {
+ const VectorType *VTy = Ty->castAs<VectorType>();
+ QualType EltTy = VTy->getElementType();
+ unsigned NElts = VTy->getNumElements();
+ unsigned EltSize =
+ VTy->isExtVectorBoolType() ? 1 : Info.Ctx.getTypeSize(EltTy);
+
+ if ((NElts * EltSize) % Info.Ctx.getCharWidth() != 0) {
+ // The vector's size in bits is not a multiple of the target's byte size,
+ // so its layout is unspecified. For now, we'll simply treat these cases
+ // as unsupported (this should only be possible with OpenCL bool vectors
+ // whose element count isn't a multiple of the byte size).
+ Info.FFDiag(BCE->getBeginLoc(),
+ diag::note_constexpr_bit_cast_invalid_vector)
+ << Ty.getCanonicalType() << EltSize << NElts
+ << Info.Ctx.getCharWidth();
+ return false;
+ }
+
+ if (EltTy->isRealFloatingType() && &Info.Ctx.getFloatTypeSemantics(EltTy) ==
+ &APFloat::x87DoubleExtended()) {
+ // The layout for x86_fp80 vectors seems to be handled very inconsistently
+ // by both clang and LLVM, so for now we won't allow bit_casts involving
+ // it in a constexpr context.
+ Info.FFDiag(BCE->getBeginLoc(),
+ diag::note_constexpr_bit_cast_unsupported_type)
+ << EltTy;
+ return false;
+ }
+
+ if (VTy->isExtVectorBoolType()) {
+ // Special handling for OpenCL bool vectors:
+ // Since these vectors are stored as packed bits, but we can't write
+ // individual bits to the BitCastBuffer, we'll buffer all of the elements
+ // together into an appropriately sized APInt and write them all out at
+ // once. Because we don't accept vectors where NElts * EltSize isn't a
+ // multiple of the char size, there will be no padding space, so we don't
+ // have to worry about writing data which should have been left
+ // uninitialized.
+ bool BigEndian = Info.Ctx.getTargetInfo().isBigEndian();
+
+ llvm::APInt Res = llvm::APInt::getZero(NElts);
+ for (unsigned I = 0; I < NElts; ++I) {
+ const llvm::APSInt &EltAsInt = Val.getVectorElt(I).getInt();
+ assert(EltAsInt.isUnsigned() && EltAsInt.getBitWidth() == 1 &&
+ "bool vector element must be 1-bit unsigned integer!");
+
+ Res.insertBits(EltAsInt, BigEndian ? (NElts - I - 1) : I);
+ }
+
+ SmallVector<uint8_t, 8> Bytes(NElts / 8);
+ llvm::StoreIntToMemory(Res, &*Bytes.begin(), NElts / 8);
+ Buffer.writeObject(Offset, Bytes);
+ } else {
+ // Iterate over each of the elements and write them out to the buffer at
+ // the appropriate offset.
+ CharUnits EltSizeChars = Info.Ctx.getTypeSizeInChars(EltTy);
+ for (unsigned I = 0; I < NElts; ++I) {
+ if (!visit(Val.getVectorElt(I), EltTy, Offset + I * EltSizeChars))
+ return false;
+ }
+ }
+
+ return true;
+ }
+
bool visitInt(const APSInt &Val, QualType Ty, CharUnits Offset) {
APSInt AdjustedVal = Val;
unsigned Width = AdjustedVal.getBitWidth();
@@ -6922,7 +7174,7 @@ class APValueToBufferConverter {
AdjustedVal = AdjustedVal.extend(Width);
}
- SmallVector<unsigned char, 8> Bytes(Width / 8);
+ SmallVector<uint8_t, 8> Bytes(Width / 8);
llvm::StoreIntToMemory(AdjustedVal, &*Bytes.begin(), Width / 8);
Buffer.writeObject(Offset, Bytes);
return true;
@@ -6934,12 +7186,12 @@ class APValueToBufferConverter {
}
public:
- static Optional<BitCastBuffer> convert(EvalInfo &Info, const APValue &Src,
- const CastExpr *BCE) {
+ static std::optional<BitCastBuffer>
+ convert(EvalInfo &Info, const APValue &Src, const CastExpr *BCE) {
CharUnits DstSize = Info.Ctx.getTypeSizeInChars(BCE->getType());
APValueToBufferConverter Converter(Info, DstSize, BCE);
if (!Converter.visit(Src, BCE->getSubExpr()->getType()))
- return None;
+ return std::nullopt;
return Converter.Buffer;
}
};
@@ -6957,22 +7209,22 @@ class BufferToAPValueConverter {
// Emit an unsupported bit_cast type error. Sema refuses to build a bit_cast
// with an invalid type, so anything left is a deficiency on our part (FIXME).
// Ideally this will be unreachable.
- llvm::NoneType unsupportedType(QualType Ty) {
+ std::nullopt_t unsupportedType(QualType Ty) {
Info.FFDiag(BCE->getBeginLoc(),
diag::note_constexpr_bit_cast_unsupported_type)
<< Ty;
- return None;
+ return std::nullopt;
}
- llvm::NoneType unrepresentableValue(QualType Ty, const APSInt &Val) {
+ std::nullopt_t unrepresentableValue(QualType Ty, const APSInt &Val) {
Info.FFDiag(BCE->getBeginLoc(),
diag::note_constexpr_bit_cast_unrepresentable_value)
<< Ty << toString(Val, /*Radix=*/10);
- return None;
+ return std::nullopt;
}
- Optional<APValue> visit(const BuiltinType *T, CharUnits Offset,
- const EnumType *EnumSugar = nullptr) {
+ std::optional<APValue> visit(const BuiltinType *T, CharUnits Offset,
+ const EnumType *EnumSugar = nullptr) {
if (T->isNullPtrType()) {
uint64_t NullValue = Info.Ctx.getTargetNullPointerValue(QualType(T, 0));
return APValue((Expr *)nullptr,
@@ -7008,7 +7260,7 @@ class BufferToAPValueConverter {
Info.FFDiag(BCE->getExprLoc(),
diag::note_constexpr_bit_cast_indet_dest)
<< DisplayType << Info.Ctx.getLangOpts().CharIsSigned;
- return None;
+ return std::nullopt;
}
return APValue::IndeterminateValue();
@@ -7040,7 +7292,7 @@ class BufferToAPValueConverter {
return unsupportedType(QualType(T, 0));
}
- Optional<APValue> visit(const RecordType *RTy, CharUnits Offset) {
+ std::optional<APValue> visit(const RecordType *RTy, CharUnits Offset) {
const RecordDecl *RD = RTy->getAsRecordDecl();
const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD);
@@ -7060,10 +7312,10 @@ class BufferToAPValueConverter {
Info.Ctx.getASTRecordLayout(BaseDecl).getNonVirtualSize().isZero())
continue;
- Optional<APValue> SubObj = visitType(
+ std::optional<APValue> SubObj = visitType(
BS.getType(), Layout.getBaseClassOffset(BaseDecl) + Offset);
if (!SubObj)
- return None;
+ return std::nullopt;
ResultVal.getStructBase(I) = *SubObj;
}
}
@@ -7076,7 +7328,7 @@ class BufferToAPValueConverter {
if (FD->isBitField()) {
Info.FFDiag(BCE->getBeginLoc(),
diag::note_constexpr_bit_cast_unsupported_bitfield);
- return None;
+ return std::nullopt;
}
uint64_t FieldOffsetBits = Layout.getFieldOffset(FieldIdx);
@@ -7086,9 +7338,9 @@ class BufferToAPValueConverter {
CharUnits::fromQuantity(FieldOffsetBits / Info.Ctx.getCharWidth()) +
Offset;
QualType FieldTy = FD->getType();
- Optional<APValue> SubObj = visitType(FieldTy, FieldOffset);
+ std::optional<APValue> SubObj = visitType(FieldTy, FieldOffset);
if (!SubObj)
- return None;
+ return std::nullopt;
ResultVal.getStructField(FieldIdx) = *SubObj;
++FieldIdx;
}
@@ -7096,7 +7348,7 @@ class BufferToAPValueConverter {
return ResultVal;
}
- Optional<APValue> visit(const EnumType *Ty, CharUnits Offset) {
+ std::optional<APValue> visit(const EnumType *Ty, CharUnits Offset) {
QualType RepresentationType = Ty->getDecl()->getIntegerType();
assert(!RepresentationType.isNull() &&
"enum forward decl should be caught by Sema");
@@ -7107,27 +7359,98 @@ class BufferToAPValueConverter {
return visit(AsBuiltin, Offset, /*EnumTy=*/Ty);
}
- Optional<APValue> visit(const ConstantArrayType *Ty, CharUnits Offset) {
+ std::optional<APValue> visit(const ConstantArrayType *Ty, CharUnits Offset) {
size_t Size = Ty->getSize().getLimitedValue();
CharUnits ElementWidth = Info.Ctx.getTypeSizeInChars(Ty->getElementType());
APValue ArrayValue(APValue::UninitArray(), Size, Size);
for (size_t I = 0; I != Size; ++I) {
- Optional<APValue> ElementValue =
+ std::optional<APValue> ElementValue =
visitType(Ty->getElementType(), Offset + I * ElementWidth);
if (!ElementValue)
- return None;
+ return std::nullopt;
ArrayValue.getArrayInitializedElt(I) = std::move(*ElementValue);
}
return ArrayValue;
}
- Optional<APValue> visit(const Type *Ty, CharUnits Offset) {
+ std::optional<APValue> visit(const VectorType *VTy, CharUnits Offset) {
+ QualType EltTy = VTy->getElementType();
+ unsigned NElts = VTy->getNumElements();
+ unsigned EltSize =
+ VTy->isExtVectorBoolType() ? 1 : Info.Ctx.getTypeSize(EltTy);
+
+ if ((NElts * EltSize) % Info.Ctx.getCharWidth() != 0) {
+ // The vector's size in bits is not a multiple of the target's byte size,
+ // so its layout is unspecified. For now, we'll simply treat these cases
+ // as unsupported (this should only be possible with OpenCL bool vectors
+ // whose element count isn't a multiple of the byte size).
+ Info.FFDiag(BCE->getBeginLoc(),
+ diag::note_constexpr_bit_cast_invalid_vector)
+ << QualType(VTy, 0) << EltSize << NElts << Info.Ctx.getCharWidth();
+ return std::nullopt;
+ }
+
+ if (EltTy->isRealFloatingType() && &Info.Ctx.getFloatTypeSemantics(EltTy) ==
+ &APFloat::x87DoubleExtended()) {
+ // The layout for x86_fp80 vectors seems to be handled very inconsistently
+ // by both clang and LLVM, so for now we won't allow bit_casts involving
+ // it in a constexpr context.
+ Info.FFDiag(BCE->getBeginLoc(),
+ diag::note_constexpr_bit_cast_unsupported_type)
+ << EltTy;
+ return std::nullopt;
+ }
+
+ SmallVector<APValue, 4> Elts;
+ Elts.reserve(NElts);
+ if (VTy->isExtVectorBoolType()) {
+ // Special handling for OpenCL bool vectors:
+ // Since these vectors are stored as packed bits, but we can't read
+ // individual bits from the BitCastBuffer, we'll buffer all of the
+ // elements together into an appropriately sized APInt and write them all
+ // out at once. Because we don't accept vectors where NElts * EltSize
+ // isn't a multiple of the char size, there will be no padding space, so
+ // we don't have to worry about reading any padding data which didn't
+ // actually need to be accessed.
+ bool BigEndian = Info.Ctx.getTargetInfo().isBigEndian();
+
+ SmallVector<uint8_t, 8> Bytes;
+ Bytes.reserve(NElts / 8);
+ if (!Buffer.readObject(Offset, CharUnits::fromQuantity(NElts / 8), Bytes))
+ return std::nullopt;
+
+ APSInt SValInt(NElts, true);
+ llvm::LoadIntFromMemory(SValInt, &*Bytes.begin(), Bytes.size());
+
+ for (unsigned I = 0; I < NElts; ++I) {
+ llvm::APInt Elt =
+ SValInt.extractBits(1, (BigEndian ? NElts - I - 1 : I) * EltSize);
+ Elts.emplace_back(
+ APSInt(std::move(Elt), !EltTy->isSignedIntegerType()));
+ }
+ } else {
+ // Iterate over each of the elements and read them from the buffer at
+ // the appropriate offset.
+ CharUnits EltSizeChars = Info.Ctx.getTypeSizeInChars(EltTy);
+ for (unsigned I = 0; I < NElts; ++I) {
+ std::optional<APValue> EltValue =
+ visitType(EltTy, Offset + I * EltSizeChars);
+ if (!EltValue)
+ return std::nullopt;
+ Elts.push_back(std::move(*EltValue));
+ }
+ }
+
+ return APValue(Elts.data(), Elts.size());
+ }
+
+ std::optional<APValue> visit(const Type *Ty, CharUnits Offset) {
return unsupportedType(QualType(Ty, 0));
}
- Optional<APValue> visitType(QualType Ty, CharUnits Offset) {
+ std::optional<APValue> visitType(QualType Ty, CharUnits Offset) {
QualType Can = Ty.getCanonicalType();
switch (Can->getTypeClass()) {
@@ -7152,8 +7475,8 @@ class BufferToAPValueConverter {
public:
// Pull out a full value of type DstType.
- static Optional<APValue> convert(EvalInfo &Info, BitCastBuffer &Buffer,
- const CastExpr *BCE) {
+ static std::optional<APValue> convert(EvalInfo &Info, BitCastBuffer &Buffer,
+ const CastExpr *BCE) {
BufferToAPValueConverter Converter(Info, Buffer, BCE);
return Converter.visitType(BCE->getType(), CharUnits::fromQuantity(0));
}
@@ -7222,33 +7545,23 @@ static bool checkBitCastConstexprEligibility(EvalInfo *Info,
return SourceOK;
}
-static bool handleLValueToRValueBitCast(EvalInfo &Info, APValue &DestValue,
- APValue &SourceValue,
+static bool handleRValueToRValueBitCast(EvalInfo &Info, APValue &DestValue,
+ const APValue &SourceRValue,
const CastExpr *BCE) {
assert(CHAR_BIT == 8 && Info.Ctx.getTargetInfo().getCharWidth() == 8 &&
"no host or target supports non 8-bit chars");
- assert(SourceValue.isLValue() &&
- "LValueToRValueBitcast requires an lvalue operand!");
if (!checkBitCastConstexprEligibility(&Info, Info.Ctx, BCE))
return false;
- LValue SourceLValue;
- APValue SourceRValue;
- SourceLValue.setFrom(Info.Ctx, SourceValue);
- if (!handleLValueToRValueConversion(
- Info, BCE, BCE->getSubExpr()->getType().withConst(), SourceLValue,
- SourceRValue, /*WantObjectRepresentation=*/true))
- return false;
-
// Read out SourceValue into a char buffer.
- Optional<BitCastBuffer> Buffer =
+ std::optional<BitCastBuffer> Buffer =
APValueToBufferConverter::convert(Info, SourceRValue, BCE);
if (!Buffer)
return false;
// Write out the buffer into a new APValue.
- Optional<APValue> MaybeDestValue =
+ std::optional<APValue> MaybeDestValue =
BufferToAPValueConverter::convert(Info, *Buffer, BCE);
if (!MaybeDestValue)
return false;
@@ -7257,6 +7570,25 @@ static bool handleLValueToRValueBitCast(EvalInfo &Info, APValue &DestValue,
return true;
}
+static bool handleLValueToRValueBitCast(EvalInfo &Info, APValue &DestValue,
+ APValue &SourceValue,
+ const CastExpr *BCE) {
+ assert(CHAR_BIT == 8 && Info.Ctx.getTargetInfo().getCharWidth() == 8 &&
+ "no host or target supports non 8-bit chars");
+ assert(SourceValue.isLValue() &&
+ "LValueToRValueBitcast requires an lvalue operand!");
+
+ LValue SourceLValue;
+ APValue SourceRValue;
+ SourceLValue.setFrom(Info.Ctx, SourceValue);
+ if (!handleLValueToRValueConversion(
+ Info, BCE, BCE->getSubExpr()->getType().withConst(), SourceLValue,
+ SourceRValue, /*WantObjectRepresentation=*/true))
+ return false;
+
+ return handleRValueToRValueBitCast(Info, DestValue, SourceRValue, BCE);
+}
+
template <class Derived>
class ExprEvaluatorBase
: public ConstStmtVisitor<Derived, bool> {
@@ -7327,6 +7659,12 @@ protected:
bool ZeroInitialization(const Expr *E) { return Error(E); }
+ bool IsConstantEvaluatedBuiltinCall(const CallExpr *E) {
+ unsigned BuiltinOp = E->getBuiltinCallee();
+ return BuiltinOp != 0 &&
+ Info.Ctx.BuiltinInfo.isConstantEvaluated(BuiltinOp);
+ }
+
public:
ExprEvaluatorBase(EvalInfo &Info) : Info(Info) {}
@@ -7335,7 +7673,7 @@ public:
/// Report an evaluation error. This should only be called when an error is
/// first discovered. When propagating an error, just return false.
bool Error(const Expr *E, diag::kind D) {
- Info.FFDiag(E, D);
+ Info.FFDiag(E, D) << E->getSourceRange();
return false;
}
bool Error(const Expr *E) {
@@ -7349,6 +7687,9 @@ public:
return Error(E);
}
+ bool VisitPredefinedExpr(const PredefinedExpr *E) {
+ return StmtVisitorTy::Visit(E->getFunctionName());
+ }
bool VisitConstantExpr(const ConstantExpr *E) {
if (E->hasAPValueResult())
return DerivedSuccess(E->getAPValueResult(), E);
@@ -7474,13 +7815,14 @@ public:
}
bool VisitOpaqueValueExpr(const OpaqueValueExpr *E) {
- if (APValue *Value = Info.CurrentCall->getCurrentTemporary(E))
+ if (APValue *Value = Info.CurrentCall->getCurrentTemporary(E);
+ Value && !Value->isAbsent())
return DerivedSuccess(*Value, E);
const Expr *Source = E->getSourceExpr();
if (!Source)
return Error(E);
- if (Source == E) { // sanity checking.
+ if (Source == E) {
assert(0 && "OpaqueValueExpr recursively refers to itself");
return Error(E);
}
@@ -7534,7 +7876,7 @@ public:
const FunctionDecl *FD = nullptr;
LValue *This = nullptr, ThisVal;
- auto Args = llvm::makeArrayRef(E->getArgs(), E->getNumArgs());
+ auto Args = llvm::ArrayRef(E->getArgs(), E->getNumArgs());
bool HasQualifier = false;
CallRef Call;
@@ -7576,6 +7918,11 @@ public:
if (!CalleeLV.getLValueOffset().isZero())
return Error(Callee);
+ if (CalleeLV.isNullPointer()) {
+ Info.FFDiag(Callee, diag::note_constexpr_null_callee)
+ << const_cast<Expr *>(Callee);
+ return false;
+ }
FD = dyn_cast_or_null<FunctionDecl>(
CalleeLV.getLValueBase().dyn_cast<const ValueDecl *>());
if (!FD)
@@ -7593,15 +7940,19 @@ public:
if (OCE && OCE->isAssignmentOp()) {
assert(Args.size() == 2 && "wrong number of arguments in assignment");
Call = Info.CurrentCall->createCall(FD);
- if (!EvaluateArgs(isa<CXXMethodDecl>(FD) ? Args.slice(1) : Args, Call,
- Info, FD, /*RightToLeft=*/true))
+ bool HasThis = false;
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(FD))
+ HasThis = MD->isImplicitObjectMemberFunction();
+ if (!EvaluateArgs(HasThis ? Args.slice(1) : Args, Call, Info, FD,
+ /*RightToLeft=*/true))
return false;
}
// Overloaded operator calls to member functions are represented as normal
// calls with '*this' as the first argument.
const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
- if (MD && !MD->isStatic()) {
+ if (MD &&
+ (MD->isImplicitObjectMemberFunction() || (OCE && MD->isStatic()))) {
// FIXME: When selecting an implicit conversion for an overloaded
// operator delete, we sometimes try to evaluate calls to conversion
// operators without a 'this' parameter!
@@ -7610,7 +7961,20 @@ public:
if (!EvaluateObjectArgument(Info, Args[0], ThisVal))
return false;
- This = &ThisVal;
+
+ // If we are calling a static operator, the 'this' argument needs to be
+ // ignored after being evaluated.
+ if (MD->isInstance())
+ This = &ThisVal;
+
+ // If this is syntactically a simple assignment using a trivial
+ // assignment operator, start the lifetimes of union members as needed,
+ // per C++20 [class.union]5.
+ if (Info.getLangOpts().CPlusPlus20 && OCE &&
+ OCE->getOperator() == OO_Equal && MD->isTrivial() &&
+ !MaybeHandleUnionActiveMemberChange(Info, Args[0], ThisVal))
+ return false;
+
Args = Args.slice(1);
} else if (MD && MD->isLambdaStaticInvoker()) {
// Map the static invoker for the lambda back to the call operator.
@@ -7676,7 +8040,7 @@ public:
CovariantAdjustmentPath);
if (!FD)
return false;
- } else {
+ } else if (NamedMember && NamedMember->isImplicitObjectMemberFunction()) {
// Check that the 'this' pointer points to an object of the right type.
// FIXME: If this is an assignment operator call, we may need to change
// the active union member before we check this.
@@ -7697,7 +8061,7 @@ public:
Stmt *Body = FD->getBody(Definition);
if (!CheckConstexprFunction(Info, E->getExprLoc(), FD, Definition, Body) ||
- !HandleFunctionCall(E->getExprLoc(), Definition, This, Args, Call,
+ !HandleFunctionCall(E->getExprLoc(), Definition, This, E, Args, Call,
Body, Info, Result, ResultSlot))
return false;
@@ -7857,8 +8221,8 @@ public:
bool VisitStmtExpr(const StmtExpr *E) {
// We will have checked the full-expressions inside the statement expression
// when they were completed, and don't need to check them again now.
- llvm::SaveAndRestore<bool> NotCheckingForUB(
- Info.CheckingForUndefinedBehavior, false);
+ llvm::SaveAndRestore NotCheckingForUB(Info.CheckingForUndefinedBehavior,
+ false);
const CompoundStmt *CS = E->getSubStmt();
if (CS->body_empty())
@@ -8062,6 +8426,7 @@ public:
bool VisitVarDecl(const Expr *E, const VarDecl *VD);
bool VisitUnaryPreIncDec(const UnaryOperator *UO);
+ bool VisitCallExpr(const CallExpr *E);
bool VisitDeclRefExpr(const DeclRefExpr *E);
bool VisitPredefinedExpr(const PredefinedExpr *E) { return Success(E); }
bool VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E);
@@ -8090,7 +8455,8 @@ public:
return LValueExprEvaluatorBaseTy::VisitCastExpr(E);
case CK_LValueBitCast:
- this->CCEDiag(E, diag::note_constexpr_invalid_cast) << 2;
+ this->CCEDiag(E, diag::note_constexpr_invalid_cast)
+ << 2 << Info.Ctx.getLangOpts().CPlusPlus;
if (!Visit(E->getSubExpr()))
return false;
Result.Designator.setInvalid();
@@ -8119,13 +8485,14 @@ static bool EvaluateLValue(const Expr *E, LValue &Result, EvalInfo &Info,
bool InvalidBaseOK) {
assert(!E->isValueDependent());
assert(E->isGLValue() || E->getType()->isFunctionType() ||
- E->getType()->isVoidType() || isa<ObjCSelectorExpr>(E));
+ E->getType()->isVoidType() || isa<ObjCSelectorExpr>(E->IgnoreParens()));
return LValueExprEvaluator(Info, Result, InvalidBaseOK).Visit(E);
}
bool LValueExprEvaluator::VisitDeclRefExpr(const DeclRefExpr *E) {
const NamedDecl *D = E->getDecl();
- if (isa<FunctionDecl, MSGuidDecl, TemplateParamObjectDecl>(D))
+ if (isa<FunctionDecl, MSGuidDecl, TemplateParamObjectDecl,
+ UnnamedGlobalConstantDecl>(D))
return Success(cast<ValueDecl>(D));
if (const VarDecl *VD = dyn_cast<VarDecl>(D))
return VisitVarDecl(E, VD);
@@ -8152,8 +8519,24 @@ bool LValueExprEvaluator::VisitVarDecl(const Expr *E, const VarDecl *VD) {
return false;
if (auto *FD = Info.CurrentCall->LambdaCaptureFields.lookup(VD)) {
+ const auto *MD = cast<CXXMethodDecl>(Info.CurrentCall->Callee);
+
+ // Static lambda function call operators can't have captures. We already
+ // diagnosed this, so bail out here.
+ if (MD->isStatic()) {
+ assert(Info.CurrentCall->This == nullptr &&
+ "This should not be set for a static call operator");
+ return false;
+ }
+
// Start with 'Result' referring to the complete closure object...
- Result = *Info.CurrentCall->This;
+ if (MD->isExplicitObjectMemberFunction()) {
+ APValue *RefValue =
+ Info.getParamSlot(Info.CurrentCall->Arguments, MD->getParamDecl(0));
+ Result.setFrom(Info.Ctx, *RefValue);
+ } else
+ Result = *Info.CurrentCall->This;
+
// ... then update it to refer to the field of the closure object
// that represents the capture.
if (!HandleLValueMember(Info, E, Result, FD))
@@ -8226,6 +8609,26 @@ bool LValueExprEvaluator::VisitVarDecl(const Expr *E, const VarDecl *VD) {
return Success(*V, E);
}
+bool LValueExprEvaluator::VisitCallExpr(const CallExpr *E) {
+ if (!IsConstantEvaluatedBuiltinCall(E))
+ return ExprEvaluatorBaseTy::VisitCallExpr(E);
+
+ switch (E->getBuiltinCallee()) {
+ default:
+ return false;
+ case Builtin::BIas_const:
+ case Builtin::BIforward:
+ case Builtin::BIforward_like:
+ case Builtin::BImove:
+ case Builtin::BImove_if_noexcept:
+ if (cast<FunctionDecl>(E->getCalleeDecl())->isConstexpr())
+ return Visit(E->getArg(0));
+ break;
+ }
+
+ return ExprEvaluatorBaseTy::VisitCallExpr(E);
+}
+
bool LValueExprEvaluator::VisitMaterializeTemporaryExpr(
const MaterializeTemporaryExpr *E) {
// Walk through the expression to find the materialized temporary itself.
@@ -8235,8 +8638,8 @@ bool LValueExprEvaluator::VisitMaterializeTemporaryExpr(
E->getSubExpr()->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments);
// If we passed any comma operators, evaluate their LHSs.
- for (unsigned I = 0, N = CommaLHSs.size(); I != N; ++I)
- if (!EvaluateIgnoredValue(Info, CommaLHSs[I]))
+ for (const Expr *E : CommaLHSs)
+ if (!EvaluateIgnoredValue(Info, E))
return false;
// A materialized temporary with static storage duration can appear within the
@@ -8244,13 +8647,15 @@ bool LValueExprEvaluator::VisitMaterializeTemporaryExpr(
// value for use outside this evaluation.
APValue *Value;
if (E->getStorageDuration() == SD_Static) {
+ if (Info.EvalMode == EvalInfo::EM_ConstantFold)
+ return false;
// FIXME: What about SD_Thread?
Value = E->getOrCreateValue(true);
*Value = APValue();
Result.set(E);
} else {
Value = &Info.CurrentCall->createTemporary(
- E, E->getType(),
+ E, Inner->getType(),
E->getStorageDuration() == SD_FullExpression ? ScopeKind::FullExpression
: ScopeKind::Block,
Result);
@@ -8320,7 +8725,7 @@ bool LValueExprEvaluator::VisitCXXTypeidExpr(const CXXTypeidExpr *E) {
if (!Visit(E->getExprOperand()))
return false;
- Optional<DynamicType> DynType =
+ std::optional<DynamicType> DynType =
ComputeDynamicType(Info, E, Result, AK_TypeId);
if (!DynType)
return false;
@@ -8357,7 +8762,8 @@ bool LValueExprEvaluator::VisitMemberExpr(const MemberExpr *E) {
bool LValueExprEvaluator::VisitArraySubscriptExpr(const ArraySubscriptExpr *E) {
// FIXME: Deal with vectors as array subscript bases.
- if (E->getBase()->getType()->isVectorType())
+ if (E->getBase()->getType()->isVectorType() ||
+ E->getBase()->getType()->isSveVLSBuiltinType())
return Error(E);
APSInt Index;
@@ -8455,7 +8861,7 @@ bool LValueExprEvaluator::VisitBinAssign(const BinaryOperator *E) {
return false;
if (Info.getLangOpts().CPlusPlus20 &&
- !HandleUnionActiveMemberChange(Info, E->getLHS(), Result))
+ !MaybeHandleUnionActiveMemberChange(Info, E->getLHS(), Result))
return false;
return handleAssignment(this->Info, E, Result, E->getLHS()->getType(),
@@ -8490,7 +8896,7 @@ static bool getBytesReturnedByAllocSizeCall(const ASTContext &Ctx,
Into = ExprResult.Val.getInt();
if (Into.isNegative() || !Into.isIntN(BitsInSizeT))
return false;
- Into = Into.zextOrSelf(BitsInSizeT);
+ Into = Into.zext(BitsInSizeT);
return true;
};
@@ -8549,7 +8955,7 @@ static bool evaluateLValueAsAllocSize(EvalInfo &Info, APValue::LValueBase Base,
return false;
const Expr *Init = VD->getAnyInitializer();
- if (!Init)
+ if (!Init || Init->getType().isNull())
return false;
const Expr *E = Init->IgnoreParens();
@@ -8633,16 +9039,19 @@ public:
return false;
}
Result = *Info.CurrentCall->This;
- // If we are inside a lambda's call operator, the 'this' expression refers
- // to the enclosing '*this' object (either by value or reference) which is
- // either copied into the closure object's field that represents the '*this'
- // or refers to '*this'.
+
if (isLambdaCallOperator(Info.CurrentCall->Callee)) {
- // Ensure we actually have captured 'this'. (an error will have
- // been previously reported if not).
+ // Ensure we actually have captured 'this'. If something was wrong with
+ // 'this' capture, the error would have been previously reported.
+ // Otherwise we can be inside of a default initialization of an object
+ // declared by lambda's body, so no need to return false.
if (!Info.CurrentCall->LambdaThisCaptureField)
- return false;
+ return true;
+ // If we have captured 'this', the 'this' expression refers
+ // to the enclosing '*this' object (either by value or reference) which is
+ // either copied into the closure object's field that represents the
+ // '*this' or refers to '*this'.
// Update 'Result' to refer to the data member/field of the closure object
// that represents the '*this' capture.
if (!HandleLValueMember(Info, E, Result,
@@ -8665,7 +9074,7 @@ public:
bool VisitCXXNewExpr(const CXXNewExpr *E);
bool VisitSourceLocExpr(const SourceLocExpr *E) {
- assert(E->isStringType() && "SourceLocExpr isn't a pointer type?");
+ assert(!E->isIntType() && "SourceLocExpr isn't a pointer type?");
APValue LValResult = E->EvaluateInContext(
Info.Ctx, Info.CurrentCall->CurSourceLocExprScope.getDefaultExpr());
Result.setFrom(Info.Ctx, LValResult);
@@ -8675,16 +9084,14 @@ public:
bool VisitSYCLUniqueStableNameExpr(const SYCLUniqueStableNameExpr *E) {
std::string ResultStr = E->ComputeName(Info.Ctx);
- Info.Ctx.SYCLUniqueStableNameEvaluatedValues[E] = ResultStr;
-
QualType CharTy = Info.Ctx.CharTy.withConst();
APInt Size(Info.Ctx.getTypeSize(Info.Ctx.getSizeType()),
ResultStr.size() + 1);
- QualType ArrayTy = Info.Ctx.getConstantArrayType(CharTy, Size, nullptr,
- ArrayType::Normal, 0);
+ QualType ArrayTy = Info.Ctx.getConstantArrayType(
+ CharTy, Size, nullptr, ArraySizeModifier::Normal, 0);
StringLiteral *SL =
- StringLiteral::Create(Info.Ctx, ResultStr, StringLiteral::Ascii,
+ StringLiteral::Create(Info.Ctx, ResultStr, StringLiteralKind::Ordinary,
/*Pascal*/ false, ArrayTy, E->getLocation());
evaluateLValue(SL, Result);
@@ -8732,6 +9139,22 @@ bool PointerExprEvaluator::VisitUnaryAddrOf(const UnaryOperator *E) {
return evaluateLValue(E->getSubExpr(), Result);
}
+// Is the provided decl 'std::source_location::current'?
+static bool IsDeclSourceLocationCurrent(const FunctionDecl *FD) {
+ if (!FD)
+ return false;
+ const IdentifierInfo *FnII = FD->getIdentifier();
+ if (!FnII || !FnII->isStr("current"))
+ return false;
+
+ const auto *RD = dyn_cast<RecordDecl>(FD->getParent());
+ if (!RD)
+ return false;
+
+ const IdentifierInfo *ClassII = RD->getIdentifier();
+ return RD->isInStdNamespace() && ClassII && ClassII->isStr("source_location");
+}
+
bool PointerExprEvaluator::VisitCastExpr(const CastExpr *E) {
const Expr *SubExpr = E->getSubExpr();
@@ -8749,21 +9172,40 @@ bool PointerExprEvaluator::VisitCastExpr(const CastExpr *E) {
// permitted in constant expressions in C++11. Bitcasts from cv void* are
// also static_casts, but we disallow them as a resolution to DR1312.
if (!E->getType()->isVoidPointerType()) {
- if (!Result.InvalidBase && !Result.Designator.Invalid &&
- !Result.IsNullPtr &&
+ // In some circumstances, we permit casting from void* to cv1 T*, when the
+ // actual pointee object is actually a cv2 T.
+ bool HasValidResult = !Result.InvalidBase && !Result.Designator.Invalid &&
+ !Result.IsNullPtr;
+ bool VoidPtrCastMaybeOK =
+ HasValidResult &&
Info.Ctx.hasSameUnqualifiedType(Result.Designator.getType(Info.Ctx),
- E->getType()->getPointeeType()) &&
- Info.getStdAllocatorCaller("allocate")) {
- // Inside a call to std::allocator::allocate and friends, we permit
- // casting from void* back to cv1 T* for a pointer that points to a
- // cv2 T.
+ E->getType()->getPointeeType());
+ // 1. We'll allow it in std::allocator::allocate, and anything which that
+ // calls.
+ // 2. HACK 2022-03-28: Work around an issue with libstdc++'s
+ // <source_location> header. Fixed in GCC 12 and later (2022-04-??).
+ // We'll allow it in the body of std::source_location::current. GCC's
+ // implementation had a parameter of type `void*`, and casts from
+ // that back to `const __impl*` in its body.
+ if (VoidPtrCastMaybeOK &&
+ (Info.getStdAllocatorCaller("allocate") ||
+ IsDeclSourceLocationCurrent(Info.CurrentCall->Callee) ||
+ Info.getLangOpts().CPlusPlus26)) {
+ // Permitted.
} else {
- Result.Designator.setInvalid();
- if (SubExpr->getType()->isVoidPointerType())
+ if (SubExpr->getType()->isVoidPointerType()) {
+ if (HasValidResult)
+ CCEDiag(E, diag::note_constexpr_invalid_void_star_cast)
+ << SubExpr->getType() << Info.getLangOpts().CPlusPlus26
+ << Result.Designator.getType(Info.Ctx).getCanonicalType()
+ << E->getType()->getPointeeType();
+ else
+ CCEDiag(E, diag::note_constexpr_invalid_cast)
+ << 3 << SubExpr->getType();
+ } else
CCEDiag(E, diag::note_constexpr_invalid_cast)
- << 3 << SubExpr->getType();
- else
- CCEDiag(E, diag::note_constexpr_invalid_cast) << 2;
+ << 2 << Info.Ctx.getLangOpts().CPlusPlus;
+ Result.Designator.setInvalid();
}
}
if (E->getCastKind() == CK_AddressSpaceConversion && Result.IsNullPtr)
@@ -8800,7 +9242,8 @@ bool PointerExprEvaluator::VisitCastExpr(const CastExpr *E) {
return ZeroInitialization(E);
case CK_IntegralToPointer: {
- CCEDiag(E, diag::note_constexpr_invalid_cast) << 2;
+ CCEDiag(E, diag::note_constexpr_invalid_cast)
+ << 2 << Info.Ctx.getLangOpts().CPlusPlus;
APValue Value;
if (!EvaluateIntegerOrLValue(SubExpr, Value, Info))
@@ -8867,8 +9310,7 @@ static CharUnits GetAlignOfType(EvalInfo &Info, QualType T,
// C++ [expr.alignof]p3:
// When alignof is applied to a reference type, the result is the
// alignment of the referenced type.
- if (const ReferenceType *Ref = T->getAs<ReferenceType>())
- T = Ref->getPointeeType();
+ T = T.getNonReferenceType();
if (T.getQualifiers().hasUnaligned())
return CharUnits::One();
@@ -8961,13 +9403,9 @@ bool PointerExprEvaluator::visitNonBuiltinCallExpr(const CallExpr *E) {
}
bool PointerExprEvaluator::VisitCallExpr(const CallExpr *E) {
- if (IsStringLiteralCall(E))
- return Success(E);
-
- if (unsigned BuiltinOp = E->getBuiltinCallee())
- return VisitBuiltinCallExpr(E, BuiltinOp);
-
- return visitNonBuiltinCallExpr(E);
+ if (!IsConstantEvaluatedBuiltinCall(E))
+ return visitNonBuiltinCallExpr(E);
+ return VisitBuiltinCallExpr(E, E->getBuiltinCallee());
}
// Determine if T is a character type for which we guarantee that
@@ -8978,7 +9416,12 @@ static bool isOneByteCharacterType(QualType T) {
bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
unsigned BuiltinOp) {
+ if (IsNoOpCall(E))
+ return Success(E);
+
switch (BuiltinOp) {
+ case Builtin::BIaddressof:
+ case Builtin::BI__addressof:
case Builtin::BI__builtin_addressof:
return evaluateLValue(E->getArg(0), Result);
case Builtin::BI__builtin_assume_aligned: {
@@ -9082,11 +9525,11 @@ bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
case Builtin::BIwmemchr:
if (Info.getLangOpts().CPlusPlus11)
Info.CCEDiag(E, diag::note_constexpr_invalid_function)
- << /*isConstexpr*/0 << /*isConstructor*/0
- << (std::string("'") + Info.Ctx.BuiltinInfo.getName(BuiltinOp) + "'");
+ << /*isConstexpr*/ 0 << /*isConstructor*/ 0
+ << ("'" + Info.Ctx.BuiltinInfo.getName(BuiltinOp) + "'").str();
else
Info.CCEDiag(E, diag::note_invalid_subexpr_in_const_expr);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Builtin::BI__builtin_strchr:
case Builtin::BI__builtin_wcschr:
case Builtin::BI__builtin_memchr:
@@ -9105,7 +9548,7 @@ bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
APSInt N;
if (!EvaluateInteger(E->getArg(2), N, Info))
return false;
- MaxLength = N.getExtValue();
+ MaxLength = N.getZExtValue();
}
// We cannot find the value if there are no candidates to match against.
if (MaxLength == 0u)
@@ -9128,7 +9571,7 @@ bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
// FIXME: We can compare the bytes in the correct order.
if (IsRawByte && !isOneByteCharacterType(CharTy)) {
Info.FFDiag(E, diag::note_constexpr_memchr_unsupported)
- << (std::string("'") + Info.Ctx.BuiltinInfo.getName(BuiltinOp) + "'")
+ << ("'" + Info.Ctx.BuiltinInfo.getName(BuiltinOp) + "'").str()
<< CharTy;
return false;
}
@@ -9147,7 +9590,7 @@ bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
Desired))
return ZeroInitialization(E);
StopAtNull = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Builtin::BImemchr:
case Builtin::BI__builtin_memchr:
case Builtin::BI__builtin_char_memchr:
@@ -9160,7 +9603,7 @@ bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
case Builtin::BIwcschr:
case Builtin::BI__builtin_wcschr:
StopAtNull = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Builtin::BIwmemchr:
case Builtin::BI__builtin_wmemchr:
// wcschr and wmemchr are given a wchar_t to look for. Just use it.
@@ -9190,11 +9633,11 @@ bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
case Builtin::BIwmemmove:
if (Info.getLangOpts().CPlusPlus11)
Info.CCEDiag(E, diag::note_constexpr_invalid_function)
- << /*isConstexpr*/0 << /*isConstructor*/0
- << (std::string("'") + Info.Ctx.BuiltinInfo.getName(BuiltinOp) + "'");
+ << /*isConstexpr*/ 0 << /*isConstructor*/ 0
+ << ("'" + Info.Ctx.BuiltinInfo.getName(BuiltinOp) + "'").str();
else
Info.CCEDiag(E, diag::note_invalid_subexpr_in_const_expr);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Builtin::BI__builtin_memcpy:
case Builtin::BI__builtin_memmove:
case Builtin::BI__builtin_wmemcpy:
@@ -9262,6 +9705,8 @@ bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
// Figure out how many T's we're copying.
uint64_t TSize = Info.Ctx.getTypeSizeInChars(T).getQuantity();
+ if (TSize == 0)
+ return false;
if (!WChar) {
uint64_t Remainder;
llvm::APInt OrigN = N;
@@ -9330,10 +9775,8 @@ bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
}
default:
- break;
+ return false;
}
-
- return visitNonBuiltinCallExpr(E);
}
static bool EvaluateArrayNewInitList(EvalInfo &Info, LValue &This,
@@ -9396,7 +9839,7 @@ bool PointerExprEvaluator::VisitCXXNewExpr(const CXXNewExpr *E) {
bool ValueInit = false;
QualType AllocType = E->getAllocatedType();
- if (Optional<const Expr*> ArraySize = E->getArraySize()) {
+ if (std::optional<const Expr *> ArraySize = E->getArraySize()) {
const Expr *Stripped = *ArraySize;
for (; auto *ICE = dyn_cast<ImplicitCastExpr>(Stripped);
Stripped = ICE->getSubExpr())
@@ -9423,14 +9866,12 @@ bool PointerExprEvaluator::VisitCXXNewExpr(const CXXNewExpr *E) {
// -- its value is such that the size of the allocated object would
// exceed the implementation-defined limit
- if (ConstantArrayType::getNumAddressingBits(Info.Ctx, AllocType,
- ArrayBound) >
- ConstantArrayType::getMaxSizeBits(Info.Ctx)) {
+ if (!Info.CheckArraySize(ArraySize.value()->getExprLoc(),
+ ConstantArrayType::getNumAddressingBits(
+ Info.Ctx, AllocType, ArrayBound),
+ ArrayBound.getZExtValue(), /*Diag=*/!IsNothrow)) {
if (IsNothrow)
return ZeroInitialization(E);
-
- Info.FFDiag(*ArraySize, diag::note_constexpr_new_too_large)
- << ArrayBound << (*ArraySize)->getSourceRange();
return false;
}
@@ -9450,8 +9891,8 @@ bool PointerExprEvaluator::VisitCXXNewExpr(const CXXNewExpr *E) {
unsigned Bits =
std::max(CAT->getSize().getBitWidth(), ArrayBound.getBitWidth());
- llvm::APInt InitBound = CAT->getSize().zextOrSelf(Bits);
- llvm::APInt AllocBound = ArrayBound.zextOrSelf(Bits);
+ llvm::APInt InitBound = CAT->getSize().zext(Bits);
+ llvm::APInt AllocBound = ArrayBound.zext(Bits);
if (InitBound.ugt(AllocBound)) {
if (IsNothrow)
return ZeroInitialization(E);
@@ -9470,7 +9911,7 @@ bool PointerExprEvaluator::VisitCXXNewExpr(const CXXNewExpr *E) {
}
AllocType = Info.Ctx.getConstantArrayType(AllocType, ArrayBound, nullptr,
- ArrayType::Normal, 0);
+ ArraySizeModifier::Normal, 0);
} else {
assert(!AllocType->isArrayType() &&
"array allocation with non-array new");
@@ -9542,7 +9983,7 @@ bool PointerExprEvaluator::VisitCXXNewExpr(const CXXNewExpr *E) {
} else if (Init) {
if (!EvaluateInPlace(*Val, Info, Result, Init))
return false;
- } else if (!getDefaultInitValue(AllocType, *Val)) {
+ } else if (!handleDefaultInitValue(AllocType, *Val)) {
return false;
}
@@ -9678,6 +10119,9 @@ namespace {
bool VisitCXXConstructExpr(const CXXConstructExpr *E, QualType T);
bool VisitCXXStdInitializerListExpr(const CXXStdInitializerListExpr *E);
bool VisitBinCmp(const BinaryOperator *E);
+ bool VisitCXXParenListInitExpr(const CXXParenListInitExpr *E);
+ bool VisitCXXParenListOrInitListExpr(const Expr *ExprToVisit,
+ ArrayRef<Expr *> Args);
};
}
@@ -9796,8 +10240,13 @@ bool RecordExprEvaluator::VisitCastExpr(const CastExpr *E) {
bool RecordExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
if (E->isTransparent())
return Visit(E->getInit(0));
+ return VisitCXXParenListOrInitListExpr(E, E->inits());
+}
- const RecordDecl *RD = E->getType()->castAs<RecordType>()->getDecl();
+bool RecordExprEvaluator::VisitCXXParenListOrInitListExpr(
+ const Expr *ExprToVisit, ArrayRef<Expr *> Args) {
+ const RecordDecl *RD =
+ ExprToVisit->getType()->castAs<RecordType>()->getDecl();
if (RD->isInvalidDecl()) return false;
const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD);
auto *CXXRD = dyn_cast<CXXRecordDecl>(RD);
@@ -9808,7 +10257,16 @@ bool RecordExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
CXXRD && CXXRD->getNumBases());
if (RD->isUnion()) {
- const FieldDecl *Field = E->getInitializedFieldInUnion();
+ const FieldDecl *Field;
+ if (auto *ILE = dyn_cast<InitListExpr>(ExprToVisit)) {
+ Field = ILE->getInitializedFieldInUnion();
+ } else if (auto *PLIE = dyn_cast<CXXParenListInitExpr>(ExprToVisit)) {
+ Field = PLIE->getInitializedFieldInUnion();
+ } else {
+ llvm_unreachable(
+ "Expression is neither an init list nor a C++ paren list");
+ }
+
Result = APValue(Field);
if (!Field)
return true;
@@ -9819,7 +10277,7 @@ bool RecordExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
// Is this difference ever observable for initializer lists which
// we don't build?
ImplicitValueInitExpr VIE(Field->getType());
- const Expr *InitExpr = E->getNumInits() ? E->getInit(0) : &VIE;
+ const Expr *InitExpr = Args.empty() ? &VIE : Args[0];
LValue Subobject = This;
if (!HandleLValueMember(Info, InitExpr, Subobject, Field, &Layout))
@@ -9848,8 +10306,8 @@ bool RecordExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
// Initialize base classes.
if (CXXRD && CXXRD->getNumBases()) {
for (const auto &Base : CXXRD->bases()) {
- assert(ElementNo < E->getNumInits() && "missing init for base class");
- const Expr *Init = E->getInit(ElementNo);
+ assert(ElementNo < Args.size() && "missing init for base class");
+ const Expr *Init = Args[ElementNo];
LValue Subobject = This;
if (!HandleLValueBase(Info, Init, Subobject, CXXRD, &Base))
@@ -9876,18 +10334,29 @@ bool RecordExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
LValue Subobject = This;
- bool HaveInit = ElementNo < E->getNumInits();
+ bool HaveInit = ElementNo < Args.size();
// FIXME: Diagnostics here should point to the end of the initializer
// list, not the start.
- if (!HandleLValueMember(Info, HaveInit ? E->getInit(ElementNo) : E,
+ if (!HandleLValueMember(Info, HaveInit ? Args[ElementNo] : ExprToVisit,
Subobject, Field, &Layout))
return false;
// Perform an implicit value-initialization for members beyond the end of
// the initializer list.
ImplicitValueInitExpr VIE(HaveInit ? Info.Ctx.IntTy : Field->getType());
- const Expr *Init = HaveInit ? E->getInit(ElementNo++) : &VIE;
+ const Expr *Init = HaveInit ? Args[ElementNo++] : &VIE;
+
+ if (Field->getType()->isIncompleteArrayType()) {
+ if (auto *CAT = Info.Ctx.getAsConstantArrayType(Init->getType())) {
+ if (!CAT->getSize().isZero()) {
+ // Bail out for now. This might sort of "work", but the rest of the
+ // code isn't really prepared to handle it.
+ Info.FFDiag(Init, diag::note_constexpr_unsupported_flexible_array);
+ return false;
+ }
+ }
+ }
// Temporarily override This, in case there's a CXXDefaultInitExpr in here.
ThisOverrideRAII ThisOverride(*Info.CurrentCall, &This,
@@ -9924,7 +10393,7 @@ bool RecordExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E,
if (ZeroInit)
return ZeroInitialization(E, T);
- return getDefaultInitValue(T, Result);
+ return handleDefaultInitValue(T, Result);
}
const FunctionDecl *Definition = nullptr;
@@ -9951,7 +10420,7 @@ bool RecordExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E,
if (ZeroInit && !ZeroInitialization(E, T))
return false;
- auto Args = llvm::makeArrayRef(E->getArgs(), E->getNumArgs());
+ auto Args = llvm::ArrayRef(E->getArgs(), E->getNumArgs());
return HandleConstructorCall(E, This, Args,
cast<CXXConstructorDecl>(Definition), Info,
Result);
@@ -9988,6 +10457,8 @@ bool RecordExprEvaluator::VisitCXXStdInitializerListExpr(
if (!EvaluateLValue(E->getSubExpr(), Array, Info))
return false;
+ assert(ArrayType && "unexpected type for array initializer");
+
// Get a pointer to the first element of the array.
Array.addArray(Info, E, ArrayType);
@@ -10054,7 +10525,6 @@ bool RecordExprEvaluator::VisitLambdaExpr(const LambdaExpr *E) {
// Iterate through all the lambda's closure object's fields and initialize
// them.
auto *CaptureInitIt = E->capture_init_begin();
- const LambdaCapture *CaptureIt = ClosureClass->captures_begin();
bool Success = true;
const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(ClosureClass);
for (const auto *Field : ClosureClass->fields()) {
@@ -10078,7 +10548,6 @@ bool RecordExprEvaluator::VisitLambdaExpr(const LambdaExpr *E) {
return false;
Success = false;
}
- ++CaptureIt;
}
return Success;
}
@@ -10178,7 +10647,8 @@ namespace {
bool VisitInitListExpr(const InitListExpr *E);
bool VisitUnaryImag(const UnaryOperator *E);
bool VisitBinaryOperator(const BinaryOperator *E);
- // FIXME: Missing: unary -, unary ~, conditional operator (for GNU
+ bool VisitUnaryOperator(const UnaryOperator *E);
+ // FIXME: Missing: conditional operator (for GNU
// conditional select), shufflevector, ExtVectorElementExpr
};
} // end anonymous namespace
@@ -10218,41 +10688,22 @@ bool VectorExprEvaluator::VisitCastExpr(const CastExpr *E) {
return Success(Elts, E);
}
case CK_BitCast: {
- // Evaluate the operand into an APInt we can extract from.
- llvm::APInt SValInt;
- if (!EvalAndBitcastToAPInt(Info, SE, SValInt))
+ APValue SVal;
+ if (!Evaluate(SVal, Info, SE))
+ return false;
+
+ if (!SVal.isInt() && !SVal.isFloat() && !SVal.isVector()) {
+ // Give up if the input isn't an int, float, or vector. For example, we
+ // reject "(v4i16)(intptr_t)&a".
+ Info.FFDiag(E, diag::note_constexpr_invalid_cast)
+ << 2 << Info.Ctx.getLangOpts().CPlusPlus;
return false;
- // Extract the elements
- QualType EltTy = VTy->getElementType();
- unsigned EltSize = Info.Ctx.getTypeSize(EltTy);
- bool BigEndian = Info.Ctx.getTargetInfo().isBigEndian();
- SmallVector<APValue, 4> Elts;
- if (EltTy->isRealFloatingType()) {
- const llvm::fltSemantics &Sem = Info.Ctx.getFloatTypeSemantics(EltTy);
- unsigned FloatEltSize = EltSize;
- if (&Sem == &APFloat::x87DoubleExtended())
- FloatEltSize = 80;
- for (unsigned i = 0; i < NElts; i++) {
- llvm::APInt Elt;
- if (BigEndian)
- Elt = SValInt.rotl(i*EltSize+FloatEltSize).trunc(FloatEltSize);
- else
- Elt = SValInt.rotr(i*EltSize).trunc(FloatEltSize);
- Elts.push_back(APValue(APFloat(Sem, Elt)));
- }
- } else if (EltTy->isIntegerType()) {
- for (unsigned i = 0; i < NElts; i++) {
- llvm::APInt Elt;
- if (BigEndian)
- Elt = SValInt.rotl(i*EltSize+EltSize).zextOrTrunc(EltSize);
- else
- Elt = SValInt.rotr(i*EltSize).zextOrTrunc(EltSize);
- Elts.push_back(APValue(APSInt(Elt, !EltTy->isSignedIntegerType())));
- }
- } else {
- return Error(E);
}
- return Success(Elts, E);
+
+ if (!handleRValueToRValueBitCast(Info, Result, SVal, E))
+ return false;
+
+ return true;
}
default:
return ExprEvaluatorBaseTy::VisitCastExpr(E);
@@ -10363,6 +10814,92 @@ bool VectorExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
return Success(LHSValue, E);
}
+static std::optional<APValue> handleVectorUnaryOperator(ASTContext &Ctx,
+ QualType ResultTy,
+ UnaryOperatorKind Op,
+ APValue Elt) {
+ switch (Op) {
+ case UO_Plus:
+ // Nothing to do here.
+ return Elt;
+ case UO_Minus:
+ if (Elt.getKind() == APValue::Int) {
+ Elt.getInt().negate();
+ } else {
+ assert(Elt.getKind() == APValue::Float &&
+ "Vector can only be int or float type");
+ Elt.getFloat().changeSign();
+ }
+ return Elt;
+ case UO_Not:
+ // This is only valid for integral types anyway, so we don't have to handle
+ // float here.
+ assert(Elt.getKind() == APValue::Int &&
+ "Vector operator ~ can only be int");
+ Elt.getInt().flipAllBits();
+ return Elt;
+ case UO_LNot: {
+ if (Elt.getKind() == APValue::Int) {
+ Elt.getInt() = !Elt.getInt();
+ // operator ! on vectors returns -1 for 'truth', so negate it.
+ Elt.getInt().negate();
+ return Elt;
+ }
+ assert(Elt.getKind() == APValue::Float &&
+ "Vector can only be int or float type");
+ // Float types result in an int of the same size, but -1 for true, or 0 for
+ // false.
+ APSInt EltResult{Ctx.getIntWidth(ResultTy),
+ ResultTy->isUnsignedIntegerType()};
+ if (Elt.getFloat().isZero())
+ EltResult.setAllBits();
+ else
+ EltResult.clearAllBits();
+
+ return APValue{EltResult};
+ }
+ default:
+ // FIXME: Implement the rest of the unary operators.
+ return std::nullopt;
+ }
+}
+
+bool VectorExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
+ Expr *SubExpr = E->getSubExpr();
+ const auto *VD = SubExpr->getType()->castAs<VectorType>();
+ // This result element type differs in the case of negating a floating point
+ // vector, since the result type is the a vector of the equivilant sized
+ // integer.
+ const QualType ResultEltTy = VD->getElementType();
+ UnaryOperatorKind Op = E->getOpcode();
+
+ APValue SubExprValue;
+ if (!Evaluate(SubExprValue, Info, SubExpr))
+ return false;
+
+ // FIXME: This vector evaluator someday needs to be changed to be LValue
+ // aware/keep LValue information around, rather than dealing with just vector
+ // types directly. Until then, we cannot handle cases where the operand to
+ // these unary operators is an LValue. The only case I've been able to see
+ // cause this is operator++ assigning to a member expression (only valid in
+ // altivec compilations) in C mode, so this shouldn't limit us too much.
+ if (SubExprValue.isLValue())
+ return false;
+
+ assert(SubExprValue.getVectorLength() == VD->getNumElements() &&
+ "Vector length doesn't match type?");
+
+ SmallVector<APValue, 4> ResultElements;
+ for (unsigned EltNum = 0; EltNum < VD->getNumElements(); ++EltNum) {
+ std::optional<APValue> Elt = handleVectorUnaryOperator(
+ Info.Ctx, ResultEltTy, Op, SubExprValue.getVectorElt(EltNum));
+ if (!Elt)
+ return false;
+ ResultElements.push_back(*Elt);
+ }
+ return Success(APValue(ResultElements.data(), ResultElements.size()), E);
+}
+
//===----------------------------------------------------------------------===//
// Array Evaluation
//===----------------------------------------------------------------------===//
@@ -10425,6 +10962,11 @@ namespace {
expandStringLiteral(Info, E, Result, AllocType);
return true;
}
+ bool VisitCXXParenListInitExpr(const CXXParenListInitExpr *E);
+ bool VisitCXXParenListOrInitListExpr(const Expr *ExprToVisit,
+ ArrayRef<Expr *> Args,
+ const Expr *ArrayFiller,
+ QualType AllocType = QualType());
};
} // end anonymous namespace
@@ -10468,6 +11010,11 @@ static bool MaybeElementDependentArrayFiller(const Expr *FillerExpr) {
if (MaybeElementDependentArrayFiller(ILE->getInit(I)))
return true;
}
+
+ if (ILE->hasArrayFiller() &&
+ MaybeElementDependentArrayFiller(ILE->getArrayFiller()))
+ return true;
+
return false;
}
return true;
@@ -10483,13 +11030,27 @@ bool ArrayExprEvaluator::VisitInitListExpr(const InitListExpr *E,
// C++11 [dcl.init.string]p1: A char array [...] can be initialized by [...]
// an appropriately-typed string literal enclosed in braces.
if (E->isStringLiteralInit()) {
- auto *SL = dyn_cast<StringLiteral>(E->getInit(0)->IgnoreParens());
+ auto *SL = dyn_cast<StringLiteral>(E->getInit(0)->IgnoreParenImpCasts());
// FIXME: Support ObjCEncodeExpr here once we support it in
// ArrayExprEvaluator generally.
if (!SL)
return Error(E);
return VisitStringLiteral(SL, AllocType);
}
+ // Any other transparent list init will need proper handling of the
+ // AllocType; we can't just recurse to the inner initializer.
+ assert(!E->isTransparent() &&
+ "transparent array list initialization is not string literal init?");
+
+ return VisitCXXParenListOrInitListExpr(E, E->inits(), E->getArrayFiller(),
+ AllocType);
+}
+
+bool ArrayExprEvaluator::VisitCXXParenListOrInitListExpr(
+ const Expr *ExprToVisit, ArrayRef<Expr *> Args, const Expr *ArrayFiller,
+ QualType AllocType) {
+ const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(
+ AllocType.isNull() ? ExprToVisit->getType() : AllocType);
bool Success = true;
@@ -10499,13 +11060,12 @@ bool ArrayExprEvaluator::VisitInitListExpr(const InitListExpr *E,
if (Result.isArray() && Result.hasArrayFiller())
Filler = Result.getArrayFiller();
- unsigned NumEltsToInit = E->getNumInits();
+ unsigned NumEltsToInit = Args.size();
unsigned NumElts = CAT->getSize().getZExtValue();
- const Expr *FillerExpr = E->hasArrayFiller() ? E->getArrayFiller() : nullptr;
// If the initializer might depend on the array index, run it for each
// array element.
- if (NumEltsToInit != NumElts && MaybeElementDependentArrayFiller(FillerExpr))
+ if (NumEltsToInit != NumElts && MaybeElementDependentArrayFiller(ArrayFiller))
NumEltsToInit = NumElts;
LLVM_DEBUG(llvm::dbgs() << "The number of elements to initialize: "
@@ -10523,10 +11083,9 @@ bool ArrayExprEvaluator::VisitInitListExpr(const InitListExpr *E,
}
LValue Subobject = This;
- Subobject.addArray(Info, E, CAT);
+ Subobject.addArray(Info, ExprToVisit, CAT);
for (unsigned Index = 0; Index != NumEltsToInit; ++Index) {
- const Expr *Init =
- Index < E->getNumInits() ? E->getInit(Index) : FillerExpr;
+ const Expr *Init = Index < Args.size() ? Args[Index] : ArrayFiller;
if (!EvaluateInPlace(Result.getArrayInitializedElt(Index),
Info, Subobject, Init) ||
!HandleLValueArrayAdjustment(Info, Init, Subobject,
@@ -10542,9 +11101,10 @@ bool ArrayExprEvaluator::VisitInitListExpr(const InitListExpr *E,
// If we get here, we have a trivial filler, which we can just evaluate
// once and splat over the rest of the array elements.
- assert(FillerExpr && "no array filler for incomplete init list");
+ assert(ArrayFiller && "no array filler for incomplete init list");
return EvaluateInPlace(Result.getArrayFiller(), Info, Subobject,
- FillerExpr) && Success;
+ ArrayFiller) &&
+ Success;
}
bool ArrayExprEvaluator::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E) {
@@ -10567,6 +11127,16 @@ bool ArrayExprEvaluator::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E) {
bool Success = true;
for (EvalInfo::ArrayInitLoopIndex Index(Info); Index != Elements; ++Index) {
+ // C++ [class.temporary]/5
+ // There are four contexts in which temporaries are destroyed at a different
+ // point than the end of the full-expression. [...] The second context is
+ // when a copy constructor is called to copy an element of an array while
+ // the entire array is copied [...]. In either case, if the constructor has
+ // one or more default arguments, the destruction of every temporary created
+ // in a default argument is sequenced before the construction of the next
+ // array element, if any.
+ FullExpressionRAII Scope(Info);
+
if (!EvaluateInPlace(Result.getArrayInitializedElt(Index),
Info, Subobject, E->getSubExpr()) ||
!HandleLValueArrayAdjustment(Info, E, Subobject,
@@ -10575,6 +11145,9 @@ bool ArrayExprEvaluator::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E) {
return false;
Success = false;
}
+
+ // Make sure we run the destructors too.
+ Scope.destroy();
}
return Success;
@@ -10591,28 +11164,65 @@ bool ArrayExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E,
bool HadZeroInit = Value->hasValue();
if (const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(Type)) {
- unsigned N = CAT->getSize().getZExtValue();
+ unsigned FinalSize = CAT->getSize().getZExtValue();
// Preserve the array filler if we had prior zero-initialization.
APValue Filler =
HadZeroInit && Value->hasArrayFiller() ? Value->getArrayFiller()
: APValue();
- *Value = APValue(APValue::UninitArray(), N, N);
-
- if (HadZeroInit)
- for (unsigned I = 0; I != N; ++I)
- Value->getArrayInitializedElt(I) = Filler;
+ *Value = APValue(APValue::UninitArray(), 0, FinalSize);
+ if (FinalSize == 0)
+ return true;
- // Initialize the elements.
+ bool HasTrivialConstructor = CheckTrivialDefaultConstructor(
+ Info, E->getExprLoc(), E->getConstructor(),
+ E->requiresZeroInitialization());
LValue ArrayElt = Subobject;
ArrayElt.addArray(Info, E, CAT);
- for (unsigned I = 0; I != N; ++I)
- if (!VisitCXXConstructExpr(E, ArrayElt, &Value->getArrayInitializedElt(I),
- CAT->getElementType()) ||
- !HandleLValueArrayAdjustment(Info, E, ArrayElt,
- CAT->getElementType(), 1))
- return false;
+ // We do the whole initialization in two passes, first for just one element,
+ // then for the whole array. It's possible we may find out we can't do const
+ // init in the first pass, in which case we avoid allocating a potentially
+ // large array. We don't do more passes because expanding array requires
+ // copying the data, which is wasteful.
+ for (const unsigned N : {1u, FinalSize}) {
+ unsigned OldElts = Value->getArrayInitializedElts();
+ if (OldElts == N)
+ break;
+
+ // Expand the array to appropriate size.
+ APValue NewValue(APValue::UninitArray(), N, FinalSize);
+ for (unsigned I = 0; I < OldElts; ++I)
+ NewValue.getArrayInitializedElt(I).swap(
+ Value->getArrayInitializedElt(I));
+ Value->swap(NewValue);
+
+ if (HadZeroInit)
+ for (unsigned I = OldElts; I < N; ++I)
+ Value->getArrayInitializedElt(I) = Filler;
+
+ if (HasTrivialConstructor && N == FinalSize && FinalSize != 1) {
+ // If we have a trivial constructor, only evaluate it once and copy
+ // the result into all the array elements.
+ APValue &FirstResult = Value->getArrayInitializedElt(0);
+ for (unsigned I = OldElts; I < FinalSize; ++I)
+ Value->getArrayInitializedElt(I) = FirstResult;
+ } else {
+ for (unsigned I = OldElts; I < N; ++I) {
+ if (!VisitCXXConstructExpr(E, ArrayElt,
+ &Value->getArrayInitializedElt(I),
+ CAT->getElementType()) ||
+ !HandleLValueArrayAdjustment(Info, E, ArrayElt,
+ CAT->getElementType(), 1))
+ return false;
+ // When checking for const initilization any diagnostic is considered
+ // an error.
+ if (Info.EvalStatus.Diag && !Info.EvalStatus.Diag->empty() &&
+ !Info.keepEvaluatingAfterFailure())
+ return false;
+ }
+ }
+ }
return true;
}
@@ -10624,6 +11234,15 @@ bool ArrayExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E,
.VisitCXXConstructExpr(E, Type);
}
+bool ArrayExprEvaluator::VisitCXXParenListInitExpr(
+ const CXXParenListInitExpr *E) {
+ assert(dyn_cast<ConstantArrayType>(E->getType()) &&
+ "Expression result is not a constant array type");
+
+ return VisitCXXParenListOrInitListExpr(E, E->getInitExprs(),
+ E->getArrayFiller());
+}
+
//===----------------------------------------------------------------------===//
// Integer Evaluation
//
@@ -10913,44 +11532,13 @@ bool IntExprEvaluator::CheckReferencedDecl(const Expr* E, const Decl* D) {
return false;
}
-/// Values returned by __builtin_classify_type, chosen to match the values
-/// produced by GCC's builtin.
-enum class GCCTypeClass {
- None = -1,
- Void = 0,
- Integer = 1,
- // GCC reserves 2 for character types, but instead classifies them as
- // integers.
- Enum = 3,
- Bool = 4,
- Pointer = 5,
- // GCC reserves 6 for references, but appears to never use it (because
- // expressions never have reference type, presumably).
- PointerToDataMember = 7,
- RealFloat = 8,
- Complex = 9,
- // GCC reserves 10 for functions, but does not use it since GCC version 6 due
- // to decay to pointer. (Prior to version 6 it was only used in C++ mode).
- // GCC claims to reserve 11 for pointers to member functions, but *actually*
- // uses 12 for that purpose, same as for a class or struct. Maybe it
- // internally implements a pointer to member as a struct? Who knows.
- PointerToMemberFunction = 12, // Not a bug, see above.
- ClassOrStruct = 12,
- Union = 13,
- // GCC reserves 14 for arrays, but does not use it since GCC version 6 due to
- // decay to pointer. (Prior to version 6 it was only used in C++ mode).
- // GCC reserves 15 for strings, but actually uses 5 (pointer) for string
- // literals.
-};
-
/// EvaluateBuiltinClassifyType - Evaluate __builtin_classify_type the same way
/// as GCC.
-static GCCTypeClass
-EvaluateBuiltinClassifyType(QualType T, const LangOptions &LangOpts) {
+GCCTypeClass EvaluateBuiltinClassifyType(QualType T,
+ const LangOptions &LangOpts) {
assert(!T->isDependentType() && "unexpected dependent type");
QualType CanTy = T.getCanonicalType();
- const BuiltinType *BT = dyn_cast<BuiltinType>(CanTy);
switch (CanTy->getTypeClass()) {
#define TYPE(ID, BASE)
@@ -10963,7 +11551,7 @@ EvaluateBuiltinClassifyType(QualType T, const LangOptions &LangOpts) {
llvm_unreachable("unexpected non-canonical or dependent type");
case Type::Builtin:
- switch (BT->getKind()) {
+ switch (cast<BuiltinType>(CanTy)->getKind()) {
#define BUILTIN_TYPE(ID, SINGLETON_ID)
#define SIGNED_TYPE(ID, SINGLETON_ID) \
case BuiltinType::ID: return GCCTypeClass::Integer;
@@ -11029,6 +11617,8 @@ EvaluateBuiltinClassifyType(QualType T, const LangOptions &LangOpts) {
#include "clang/Basic/PPCTypes.def"
#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
#include "clang/Basic/RISCVVTypes.def"
+#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
return GCCTypeClass::None;
case BuiltinType::Dependent:
@@ -11064,19 +11654,23 @@ EvaluateBuiltinClassifyType(QualType T, const LangOptions &LangOpts) {
return EvaluateBuiltinClassifyType(
CanTy->castAs<AtomicType>()->getValueType(), LangOpts);
- case Type::BlockPointer:
case Type::Vector:
case Type::ExtVector:
+ return GCCTypeClass::Vector;
+
+ case Type::BlockPointer:
case Type::ConstantMatrix:
case Type::ObjCObject:
case Type::ObjCInterface:
case Type::ObjCObjectPointer:
case Type::Pipe:
- case Type::ExtInt:
- // GCC classifies vectors as None. We follow its lead and classify all
- // other types that don't fit into the regular classification the same way.
+ // Classify all other types that don't fit into the regular
+ // classification the same way.
return GCCTypeClass::None;
+ case Type::BitInt:
+ return GCCTypeClass::BitInt;
+
case Type::LValueReference:
case Type::RValueReference:
llvm_unreachable("invalid type for expression");
@@ -11338,9 +11932,31 @@ static bool isUserWritingOffTheEnd(const ASTContext &Ctx, const LValue &LVal) {
// conservative with the last element in structs (if it's an array), so our
// current behavior is more compatible than an explicit list approach would
// be.
+ auto isFlexibleArrayMember = [&] {
+ using FAMKind = LangOptions::StrictFlexArraysLevelKind;
+ FAMKind StrictFlexArraysLevel =
+ Ctx.getLangOpts().getStrictFlexArraysLevel();
+
+ if (Designator.isMostDerivedAnUnsizedArray())
+ return true;
+
+ if (StrictFlexArraysLevel == FAMKind::Default)
+ return true;
+
+ if (Designator.getMostDerivedArraySize() == 0 &&
+ StrictFlexArraysLevel != FAMKind::IncompleteOnly)
+ return true;
+
+ if (Designator.getMostDerivedArraySize() == 1 &&
+ StrictFlexArraysLevel == FAMKind::OneZeroOrIncomplete)
+ return true;
+
+ return false;
+ };
+
return LVal.InvalidBase &&
Designator.Entries.size() == Designator.MostDerivedPathLength &&
- Designator.MostDerivedIsArrayElement &&
+ Designator.MostDerivedIsArrayElement && isFlexibleArrayMember() &&
isDesignatorAtObjectEnd(Ctx, LVal);
}
@@ -11355,6 +11971,18 @@ static bool convertUnsignedAPIntToCharUnits(const llvm::APInt &Int,
return true;
}
+/// If we're evaluating the object size of an instance of a struct that
+/// contains a flexible array member, add the size of the initializer.
+static void addFlexibleArrayMemberInitSize(EvalInfo &Info, const QualType &T,
+ const LValue &LV, CharUnits &Size) {
+ if (!T.isNull() && T->isStructureType() &&
+ T->getAsStructureType()->getDecl()->hasFlexibleArrayMember())
+ if (const auto *V = LV.getLValueBase().dyn_cast<const ValueDecl *>())
+ if (const auto *VD = dyn_cast<VarDecl>(V))
+ if (VD->hasInit())
+ Size += VD->getFlexibleArrayInitChars(Info.Ctx);
+}
+
/// Helper for tryEvaluateBuiltinObjectSize -- Given an LValue, this will
/// determine how many bytes exist from the beginning of the object to either
/// the end of the current subobject, or the end of the object itself, depending
@@ -11389,7 +12017,9 @@ static bool determineEndOffset(EvalInfo &Info, SourceLocation ExprLoc,
return false;
QualType BaseTy = getObjectType(LVal.getLValueBase());
- return CheckedHandleSizeof(BaseTy, EndOffset);
+ const bool Ret = CheckedHandleSizeof(BaseTy, EndOffset);
+ addFlexibleArrayMemberInitSize(Info, BaseTy, LVal, EndOffset);
+ return Ret;
}
// We want to evaluate the size of a subobject.
@@ -11487,10 +12117,9 @@ static bool tryEvaluateBuiltinObjectSize(const Expr *E, unsigned Type,
}
bool IntExprEvaluator::VisitCallExpr(const CallExpr *E) {
- if (unsigned BuiltinOp = E->getBuiltinCallee())
- return VisitBuiltinCallExpr(E, BuiltinOp);
-
- return ExprEvaluatorBaseTy::VisitCallExpr(E);
+ if (!IsConstantEvaluatedBuiltinCall(E))
+ return ExprEvaluatorBaseTy::VisitCallExpr(E);
+ return VisitBuiltinCallExpr(E, E->getBuiltinCallee());
}
static bool getBuiltinAlignArguments(const CallExpr *E, EvalInfo &Info,
@@ -11524,7 +12153,7 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
unsigned BuiltinOp) {
switch (BuiltinOp) {
default:
- return ExprEvaluatorBaseTy::VisitCallExpr(E);
+ return false;
case Builtin::BI__builtin_dynamic_object_size:
case Builtin::BI__builtin_object_size: {
@@ -11653,20 +12282,30 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
if (!EvaluateInteger(E->getArg(0), Val, Info))
return false;
- return Success(Val.getBitWidth() - Val.getMinSignedBits(), E);
+ return Success(Val.getBitWidth() - Val.getSignificantBits(), E);
}
case Builtin::BI__builtin_clz:
case Builtin::BI__builtin_clzl:
case Builtin::BI__builtin_clzll:
- case Builtin::BI__builtin_clzs: {
+ case Builtin::BI__builtin_clzs:
+ case Builtin::BI__lzcnt16: // Microsoft variants of count leading-zeroes
+ case Builtin::BI__lzcnt:
+ case Builtin::BI__lzcnt64: {
APSInt Val;
if (!EvaluateInteger(E->getArg(0), Val, Info))
return false;
- if (!Val)
+
+ // When the argument is 0, the result of GCC builtins is undefined, whereas
+ // for Microsoft intrinsics, the result is the bit-width of the argument.
+ bool ZeroIsUndefined = BuiltinOp != Builtin::BI__lzcnt16 &&
+ BuiltinOp != Builtin::BI__lzcnt &&
+ BuiltinOp != Builtin::BI__lzcnt64;
+
+ if (ZeroIsUndefined && !Val)
return Error(E);
- return Success(Val.countLeadingZeros(), E);
+ return Success(Val.countl_zero(), E);
}
case Builtin::BI__builtin_constant_p: {
@@ -11692,8 +12331,9 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
Callee->getIdentifier()->isStr("is_constant_evaluated")))) {
// FIXME: Find a better way to avoid duplicated diagnostics.
if (Info.EvalStatus.Diag)
- Info.report((Info.CallStackDepth == 1) ? E->getExprLoc()
- : Info.CurrentCall->CallLoc,
+ Info.report((Info.CallStackDepth == 1)
+ ? E->getExprLoc()
+ : Info.CurrentCall->getCallRange().getBegin(),
diag::warn_is_constant_evaluated_always_true_constexpr)
<< (Info.CallStackDepth == 1 ? "__builtin_is_constant_evaluated"
: "std::is_constant_evaluated");
@@ -11712,7 +12352,7 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
if (!Val)
return Error(E);
- return Success(Val.countTrailingZeros(), E);
+ return Success(Val.countr_zero(), E);
}
case Builtin::BI__builtin_eh_return_data_regno: {
@@ -11732,7 +12372,7 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
if (!EvaluateInteger(E->getArg(0), Val, Info))
return false;
- unsigned N = Val.countTrailingZeros();
+ unsigned N = Val.countr_zero();
return Success(N == Val.getBitWidth() ? 0 : N + 1, E);
}
@@ -11780,6 +12420,34 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
Success(Val.isNormal() ? 1 : 0, E);
}
+ case Builtin::BI__builtin_issubnormal: {
+ APFloat Val(0.0);
+ return EvaluateFloat(E->getArg(0), Val, Info) &&
+ Success(Val.isDenormal() ? 1 : 0, E);
+ }
+
+ case Builtin::BI__builtin_iszero: {
+ APFloat Val(0.0);
+ return EvaluateFloat(E->getArg(0), Val, Info) &&
+ Success(Val.isZero() ? 1 : 0, E);
+ }
+
+ case Builtin::BI__builtin_issignaling: {
+ APFloat Val(0.0);
+ return EvaluateFloat(E->getArg(0), Val, Info) &&
+ Success(Val.isSignaling() ? 1 : 0, E);
+ }
+
+ case Builtin::BI__builtin_isfpclass: {
+ APSInt MaskVal;
+ if (!EvaluateInteger(E->getArg(1), MaskVal, Info))
+ return false;
+ unsigned Test = static_cast<llvm::FPClassTest>(MaskVal.getZExtValue());
+ APFloat Val(0.0);
+ return EvaluateFloat(E->getArg(0), Val, Info) &&
+ Success((Val.classify() & Test) ? 1 : 0, E);
+ }
+
case Builtin::BI__builtin_parity:
case Builtin::BI__builtin_parityl:
case Builtin::BI__builtin_parityll: {
@@ -11787,17 +12455,20 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
if (!EvaluateInteger(E->getArg(0), Val, Info))
return false;
- return Success(Val.countPopulation() % 2, E);
+ return Success(Val.popcount() % 2, E);
}
case Builtin::BI__builtin_popcount:
case Builtin::BI__builtin_popcountl:
- case Builtin::BI__builtin_popcountll: {
+ case Builtin::BI__builtin_popcountll:
+ case Builtin::BI__popcnt16: // Microsoft variants of popcount
+ case Builtin::BI__popcnt:
+ case Builtin::BI__popcnt64: {
APSInt Val;
if (!EvaluateInteger(E->getArg(0), Val, Info))
return false;
- return Success(Val.countPopulation(), E);
+ return Success(Val.popcount(), E);
}
case Builtin::BI__builtin_rotateleft8:
@@ -11839,55 +12510,19 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
// A call to strlen is not a constant expression.
if (Info.getLangOpts().CPlusPlus11)
Info.CCEDiag(E, diag::note_constexpr_invalid_function)
- << /*isConstexpr*/0 << /*isConstructor*/0
- << (std::string("'") + Info.Ctx.BuiltinInfo.getName(BuiltinOp) + "'");
+ << /*isConstexpr*/ 0 << /*isConstructor*/ 0
+ << ("'" + Info.Ctx.BuiltinInfo.getName(BuiltinOp) + "'").str();
else
Info.CCEDiag(E, diag::note_invalid_subexpr_in_const_expr);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Builtin::BI__builtin_strlen:
case Builtin::BI__builtin_wcslen: {
// As an extension, we support __builtin_strlen() as a constant expression,
// and support folding strlen() to a constant.
- LValue String;
- if (!EvaluatePointer(E->getArg(0), String, Info))
- return false;
-
- QualType CharTy = E->getArg(0)->getType()->getPointeeType();
-
- // Fast path: if it's a string literal, search the string value.
- if (const StringLiteral *S = dyn_cast_or_null<StringLiteral>(
- String.getLValueBase().dyn_cast<const Expr *>())) {
- // The string literal may have embedded null characters. Find the first
- // one and truncate there.
- StringRef Str = S->getBytes();
- int64_t Off = String.Offset.getQuantity();
- if (Off >= 0 && (uint64_t)Off <= (uint64_t)Str.size() &&
- S->getCharByteWidth() == 1 &&
- // FIXME: Add fast-path for wchar_t too.
- Info.Ctx.hasSameUnqualifiedType(CharTy, Info.Ctx.CharTy)) {
- Str = Str.substr(Off);
-
- StringRef::size_type Pos = Str.find(0);
- if (Pos != StringRef::npos)
- Str = Str.substr(0, Pos);
-
- return Success(Str.size(), E);
- }
-
- // Fall through to slow path to issue appropriate diagnostic.
- }
-
- // Slow path: scan the bytes of the string looking for the terminating 0.
- for (uint64_t Strlen = 0; /**/; ++Strlen) {
- APValue Char;
- if (!handleLValueToRValueConversion(Info, E, CharTy, String, Char) ||
- !Char.isInt())
- return false;
- if (!Char.getInt())
- return Success(Strlen, E);
- if (!HandleLValueArrayAdjustment(Info, E, String, CharTy, 1))
- return false;
- }
+ uint64_t StrLen;
+ if (EvaluateBuiltinStrLen(E->getArg(0), StrLen, Info))
+ return Success(StrLen, E);
+ return false;
}
case Builtin::BIstrcmp:
@@ -11900,11 +12535,11 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
// A call to strlen is not a constant expression.
if (Info.getLangOpts().CPlusPlus11)
Info.CCEDiag(E, diag::note_constexpr_invalid_function)
- << /*isConstexpr*/0 << /*isConstructor*/0
- << (std::string("'") + Info.Ctx.BuiltinInfo.getName(BuiltinOp) + "'");
+ << /*isConstexpr*/ 0 << /*isConstructor*/ 0
+ << ("'" + Info.Ctx.BuiltinInfo.getName(BuiltinOp) + "'").str();
else
Info.CCEDiag(E, diag::note_invalid_subexpr_in_const_expr);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Builtin::BI__builtin_strcmp:
case Builtin::BI__builtin_wcscmp:
case Builtin::BI__builtin_strncmp:
@@ -11925,7 +12560,7 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
APSInt N;
if (!EvaluateInteger(E->getArg(2), N, Info))
return false;
- MaxLength = N.getExtValue();
+ MaxLength = N.getZExtValue();
}
// Empty substrings compare equal by definition.
@@ -11956,7 +12591,7 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
!(isOneByteCharacterType(CharTy1) && isOneByteCharacterType(CharTy2))) {
// FIXME: Consider using our bit_cast implementation to support this.
Info.FFDiag(E, diag::note_constexpr_memcmp_unsupported)
- << (std::string("'") + Info.Ctx.BuiltinInfo.getName(BuiltinOp) + "'")
+ << ("'" + Info.Ctx.BuiltinInfo.getName(BuiltinOp) + "'").str()
<< CharTy1 << CharTy2;
return false;
}
@@ -12205,9 +12840,9 @@ namespace {
class DataRecursiveIntBinOpEvaluator {
struct EvalResult {
APValue Val;
- bool Failed;
+ bool Failed = false;
- EvalResult() : Failed(false) { }
+ EvalResult() = default;
void swap(EvalResult &RHS) {
Val.swap(RHS.Val);
@@ -12658,41 +13293,55 @@ EvaluateComparisonBinaryOperator(EvalInfo &Info, const BinaryOperator *E,
// Reject differing bases from the normal codepath; we special-case
// comparisons to null.
if (!HasSameBase(LHSValue, RHSValue)) {
+ auto DiagComparison = [&] (unsigned DiagID, bool Reversed = false) {
+ std::string LHS = LHSValue.toString(Info.Ctx, E->getLHS()->getType());
+ std::string RHS = RHSValue.toString(Info.Ctx, E->getRHS()->getType());
+ Info.FFDiag(E, DiagID)
+ << (Reversed ? RHS : LHS) << (Reversed ? LHS : RHS);
+ return false;
+ };
// Inequalities and subtractions between unrelated pointers have
// unspecified or undefined behavior.
- if (!IsEquality) {
- Info.FFDiag(E, diag::note_constexpr_pointer_comparison_unspecified);
- return false;
- }
+ if (!IsEquality)
+ return DiagComparison(
+ diag::note_constexpr_pointer_comparison_unspecified);
// A constant address may compare equal to the address of a symbol.
// The one exception is that address of an object cannot compare equal
// to a null pointer constant.
+ // TODO: Should we restrict this to actual null pointers, and exclude the
+ // case of zero cast to pointer type?
if ((!LHSValue.Base && !LHSValue.Offset.isZero()) ||
(!RHSValue.Base && !RHSValue.Offset.isZero()))
- return Error(E);
+ return DiagComparison(diag::note_constexpr_pointer_constant_comparison,
+ !RHSValue.Base);
// It's implementation-defined whether distinct literals will have
// distinct addresses. In clang, the result of such a comparison is
// unspecified, so it is not a constant expression. However, we do know
// that the address of a literal will be non-null.
if ((IsLiteralLValue(LHSValue) || IsLiteralLValue(RHSValue)) &&
LHSValue.Base && RHSValue.Base)
- return Error(E);
+ return DiagComparison(diag::note_constexpr_literal_comparison);
// We can't tell whether weak symbols will end up pointing to the same
// object.
if (IsWeakLValue(LHSValue) || IsWeakLValue(RHSValue))
- return Error(E);
+ return DiagComparison(diag::note_constexpr_pointer_weak_comparison,
+ !IsWeakLValue(LHSValue));
// We can't compare the address of the start of one object with the
// past-the-end address of another object, per C++ DR1652.
- if ((LHSValue.Base && LHSValue.Offset.isZero() &&
- isOnePastTheEndOfCompleteObject(Info.Ctx, RHSValue)) ||
- (RHSValue.Base && RHSValue.Offset.isZero() &&
- isOnePastTheEndOfCompleteObject(Info.Ctx, LHSValue)))
- return Error(E);
+ if (LHSValue.Base && LHSValue.Offset.isZero() &&
+ isOnePastTheEndOfCompleteObject(Info.Ctx, RHSValue))
+ return DiagComparison(diag::note_constexpr_pointer_comparison_past_end,
+ true);
+ if (RHSValue.Base && RHSValue.Offset.isZero() &&
+ isOnePastTheEndOfCompleteObject(Info.Ctx, LHSValue))
+ return DiagComparison(diag::note_constexpr_pointer_comparison_past_end,
+ false);
// We can't tell whether an object is at the same address as another
// zero sized object.
if ((RHSValue.Base && isZeroSized(LHSValue)) ||
(LHSValue.Base && isZeroSized(RHSValue)))
- return Error(E);
+ return DiagComparison(
+ diag::note_constexpr_pointer_comparison_zero_sized);
return Success(CmpResult::Unequal, E);
}
@@ -12796,6 +13445,19 @@ EvaluateComparisonBinaryOperator(EvalInfo &Info, const BinaryOperator *E,
if (!EvaluateMemberPointer(E->getRHS(), RHSValue, Info) || !LHSOK)
return false;
+ // If either operand is a pointer to a weak function, the comparison is not
+ // constant.
+ if (LHSValue.getDecl() && LHSValue.getDecl()->isWeak()) {
+ Info.FFDiag(E, diag::note_constexpr_mem_pointer_weak_comparison)
+ << LHSValue.getDecl();
+ return false;
+ }
+ if (RHSValue.getDecl() && RHSValue.getDecl()->isWeak()) {
+ Info.FFDiag(E, diag::note_constexpr_mem_pointer_weak_comparison)
+ << RHSValue.getDecl();
+ return false;
+ }
+
// C++11 [expr.eq]p2:
// If both operands are null, they compare equal. Otherwise if only one is
// null, they compare unequal.
@@ -12827,6 +13489,10 @@ EvaluateComparisonBinaryOperator(EvalInfo &Info, const BinaryOperator *E,
// C++11 [expr.rel]p4, [expr.eq]p3: If two operands of type std::nullptr_t
// are compared, the result is true of the operator is <=, >= or ==, and
// false otherwise.
+ LValue Res;
+ if (!EvaluatePointer(E->getLHS(), Res, Info) ||
+ !EvaluatePointer(E->getRHS(), Res, Info))
+ return false;
return Success(CmpResult::Equal, E);
}
@@ -12873,6 +13539,11 @@ bool RecordExprEvaluator::VisitBinCmp(const BinaryOperator *E) {
});
}
+bool RecordExprEvaluator::VisitCXXParenListInitExpr(
+ const CXXParenListInitExpr *E) {
+ return VisitCXXParenListOrInitListExpr(E, E->getInitExprs());
+}
+
bool IntExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
// We don't support assignment in C. C++ assignments don't get here because
// assignment is an lvalue in C++.
@@ -13040,6 +13711,7 @@ bool IntExprEvaluator::VisitUnaryExprOrTypeTraitExpr(
return Success(1, E);
}
+ case UETT_DataSizeOf:
case UETT_SizeOf: {
QualType SrcTy = E->getTypeOfArgument();
// C++ [expr.sizeof]p2: "When applied to a reference or a reference type,
@@ -13048,8 +13720,11 @@ bool IntExprEvaluator::VisitUnaryExprOrTypeTraitExpr(
SrcTy = Ref->getPointeeType();
CharUnits Sizeof;
- if (!HandleSizeof(Info, E->getExprLoc(), SrcTy, Sizeof))
+ if (!HandleSizeof(Info, E->getExprLoc(), SrcTy, Sizeof,
+ E->getKind() == UETT_DataSizeOf ? SizeOfType::DataSizeOf
+ : SizeOfType::SizeOf)) {
return false;
+ }
return Success(Sizeof, E);
}
case UETT_OpenMPRequiredSimdAlign:
@@ -13059,6 +13734,20 @@ bool IntExprEvaluator::VisitUnaryExprOrTypeTraitExpr(
Info.Ctx.getOpenMPDefaultSimdAlign(E->getArgumentType()))
.getQuantity(),
E);
+ case UETT_VectorElements: {
+ QualType Ty = E->getTypeOfArgument();
+ // If the vector has a fixed size, we can determine the number of elements
+ // at compile time.
+ if (Ty->isVectorType())
+ return Success(Ty->castAs<VectorType>()->getNumElements(), E);
+
+ assert(Ty->isSizelessVectorType());
+ if (Info.InConstantContext)
+ Info.CCEDiag(E, diag::note_constexpr_non_const_vectorelements)
+ << E->getSourceRange();
+
+ return false;
+ }
}
llvm_unreachable("unknown expr/type trait");
@@ -13151,10 +13840,16 @@ bool IntExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
return false;
if (!Result.isInt()) return Error(E);
const APSInt &Value = Result.getInt();
- if (Value.isSigned() && Value.isMinSignedValue() && E->canOverflow() &&
- !HandleOverflow(Info, E, -Value.extend(Value.getBitWidth() + 1),
- E->getType()))
- return false;
+ if (Value.isSigned() && Value.isMinSignedValue() && E->canOverflow()) {
+ if (Info.checkingForUndefinedBehavior())
+ Info.Ctx.getDiagnostics().Report(E->getExprLoc(),
+ diag::warn_integer_constant_overflow)
+ << toString(Value, 10) << E->getType() << E->getSourceRange();
+
+ if (!HandleOverflow(Info, E, -Value.extend(Value.getBitWidth() + 1),
+ E->getType()))
+ return false;
+ }
return Success(-Value, E);
}
case UO_Not: {
@@ -13291,12 +13986,63 @@ bool IntExprEvaluator::VisitCastExpr(const CastExpr *E) {
return Info.Ctx.getTypeSize(DestType) == Info.Ctx.getTypeSize(SrcType);
}
+ if (Info.Ctx.getLangOpts().CPlusPlus && Info.InConstantContext &&
+ Info.EvalMode == EvalInfo::EM_ConstantExpression &&
+ DestType->isEnumeralType()) {
+
+ bool ConstexprVar = true;
+
+ // We know if we are here that we are in a context that we might require
+ // a constant expression or a context that requires a constant
+ // value. But if we are initializing a value we don't know if it is a
+ // constexpr variable or not. We can check the EvaluatingDecl to determine
+ // if it constexpr or not. If not then we don't want to emit a diagnostic.
+ if (const auto *VD = dyn_cast_or_null<VarDecl>(
+ Info.EvaluatingDecl.dyn_cast<const ValueDecl *>()))
+ ConstexprVar = VD->isConstexpr();
+
+ const EnumType *ET = dyn_cast<EnumType>(DestType.getCanonicalType());
+ const EnumDecl *ED = ET->getDecl();
+ // Check that the value is within the range of the enumeration values.
+ //
+ // This corressponds to [expr.static.cast]p10 which says:
+ // A value of integral or enumeration type can be explicitly converted
+ // to a complete enumeration type ... If the enumeration type does not
+ // have a fixed underlying type, the value is unchanged if the original
+ // value is within the range of the enumeration values ([dcl.enum]), and
+ // otherwise, the behavior is undefined.
+ //
+ // This was resolved as part of DR2338 which has CD5 status.
+ if (!ED->isFixed()) {
+ llvm::APInt Min;
+ llvm::APInt Max;
+
+ ED->getValueRange(Max, Min);
+ --Max;
+
+ if (ED->getNumNegativeBits() && ConstexprVar &&
+ (Max.slt(Result.getInt().getSExtValue()) ||
+ Min.sgt(Result.getInt().getSExtValue())))
+ Info.Ctx.getDiagnostics().Report(
+ E->getExprLoc(), diag::warn_constexpr_unscoped_enum_out_of_range)
+ << llvm::toString(Result.getInt(), 10) << Min.getSExtValue()
+ << Max.getSExtValue() << ED;
+ else if (!ED->getNumNegativeBits() && ConstexprVar &&
+ Max.ult(Result.getInt().getZExtValue()))
+ Info.Ctx.getDiagnostics().Report(
+ E->getExprLoc(), diag::warn_constexpr_unscoped_enum_out_of_range)
+ << llvm::toString(Result.getInt(), 10) << Min.getZExtValue()
+ << Max.getZExtValue() << ED;
+ }
+ }
+
return Success(HandleIntToIntCast(Info, E, DestType, SrcType,
Result.getInt()), E);
}
case CK_PointerToIntegral: {
- CCEDiag(E, diag::note_constexpr_invalid_cast) << 2;
+ CCEDiag(E, diag::note_constexpr_invalid_cast)
+ << 2 << Info.Ctx.getLangOpts().CPlusPlus << E->getSourceRange();
LValue LV;
if (!EvaluatePointer(SubExpr, LV, Info))
@@ -13650,17 +14396,22 @@ static bool TryEvaluateBuiltinNaN(const ASTContext &Context,
}
bool FloatExprEvaluator::VisitCallExpr(const CallExpr *E) {
+ if (!IsConstantEvaluatedBuiltinCall(E))
+ return ExprEvaluatorBaseTy::VisitCallExpr(E);
+
switch (E->getBuiltinCallee()) {
default:
- return ExprEvaluatorBaseTy::VisitCallExpr(E);
+ return false;
case Builtin::BI__builtin_huge_val:
case Builtin::BI__builtin_huge_valf:
case Builtin::BI__builtin_huge_vall:
+ case Builtin::BI__builtin_huge_valf16:
case Builtin::BI__builtin_huge_valf128:
case Builtin::BI__builtin_inf:
case Builtin::BI__builtin_inff:
case Builtin::BI__builtin_infl:
+ case Builtin::BI__builtin_inff16:
case Builtin::BI__builtin_inff128: {
const llvm::fltSemantics &Sem =
Info.Ctx.getFloatTypeSemantics(E->getType());
@@ -13671,6 +14422,7 @@ bool FloatExprEvaluator::VisitCallExpr(const CallExpr *E) {
case Builtin::BI__builtin_nans:
case Builtin::BI__builtin_nansf:
case Builtin::BI__builtin_nansl:
+ case Builtin::BI__builtin_nansf16:
case Builtin::BI__builtin_nansf128:
if (!TryEvaluateBuiltinNaN(Info.Ctx, E->getType(), E->getArg(0),
true, Result))
@@ -13680,6 +14432,7 @@ bool FloatExprEvaluator::VisitCallExpr(const CallExpr *E) {
case Builtin::BI__builtin_nan:
case Builtin::BI__builtin_nanf:
case Builtin::BI__builtin_nanl:
+ case Builtin::BI__builtin_nanf16:
case Builtin::BI__builtin_nanf128:
// If this is __builtin_nan() turn this into a nan, otherwise we
// can't constant fold it.
@@ -13722,6 +14475,42 @@ bool FloatExprEvaluator::VisitCallExpr(const CallExpr *E) {
Result.copySign(RHS);
return true;
}
+
+ case Builtin::BI__builtin_fmax:
+ case Builtin::BI__builtin_fmaxf:
+ case Builtin::BI__builtin_fmaxl:
+ case Builtin::BI__builtin_fmaxf16:
+ case Builtin::BI__builtin_fmaxf128: {
+ // TODO: Handle sNaN.
+ APFloat RHS(0.);
+ if (!EvaluateFloat(E->getArg(0), Result, Info) ||
+ !EvaluateFloat(E->getArg(1), RHS, Info))
+ return false;
+ // When comparing zeroes, return +0.0 if one of the zeroes is positive.
+ if (Result.isZero() && RHS.isZero() && Result.isNegative())
+ Result = RHS;
+ else if (Result.isNaN() || RHS > Result)
+ Result = RHS;
+ return true;
+ }
+
+ case Builtin::BI__builtin_fmin:
+ case Builtin::BI__builtin_fminf:
+ case Builtin::BI__builtin_fminl:
+ case Builtin::BI__builtin_fminf16:
+ case Builtin::BI__builtin_fminf128: {
+ // TODO: Handle sNaN.
+ APFloat RHS(0.);
+ if (!EvaluateFloat(E->getArg(0), Result, Info) ||
+ !EvaluateFloat(E->getArg(1), RHS, Info))
+ return false;
+ // When comparing zeroes, return -0.0 if one of the zeroes is negative.
+ if (Result.isZero() && RHS.isZero() && RHS.isNegative())
+ Result = RHS;
+ else if (Result.isNaN() || RHS < Result)
+ Result = RHS;
+ return true;
+ }
}
}
@@ -14332,6 +15121,9 @@ bool ComplexExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
}
bool ComplexExprEvaluator::VisitCallExpr(const CallExpr *E) {
+ if (!IsConstantEvaluatedBuiltinCall(E))
+ return ExprEvaluatorBaseTy::VisitCallExpr(E);
+
switch (E->getBuiltinCallee()) {
case Builtin::BI__builtin_complex:
Result.makeComplexFloat();
@@ -14342,10 +15134,8 @@ bool ComplexExprEvaluator::VisitCallExpr(const CallExpr *E) {
return true;
default:
- break;
+ return false;
}
-
- return ExprEvaluatorBaseTy::VisitCallExpr(E);
}
//===----------------------------------------------------------------------===//
@@ -14380,6 +15170,9 @@ public:
switch (E->getCastKind()) {
default:
return ExprEvaluatorBaseTy::VisitCastExpr(E);
+ case CK_NullToPointer:
+ VisitIgnoredValue(E->getSubExpr());
+ return ZeroInitialization(E);
case CK_NonAtomicToAtomic:
return This ? EvaluateInPlace(Result, Info, *This, E->getSubExpr())
: Evaluate(Result, Info, E->getSubExpr());
@@ -14421,6 +15214,9 @@ public:
}
bool VisitCallExpr(const CallExpr *E) {
+ if (!IsConstantEvaluatedBuiltinCall(E))
+ return ExprEvaluatorBaseTy::VisitCallExpr(E);
+
switch (E->getBuiltinCallee()) {
case Builtin::BI__assume:
case Builtin::BI__builtin_assume:
@@ -14431,10 +15227,8 @@ public:
return HandleOperatorDeleteCall(Info, E);
default:
- break;
+ return false;
}
-
- return ExprEvaluatorBaseTy::VisitCallExpr(E);
}
bool VisitCXXDeleteExpr(const CXXDeleteExpr *E);
@@ -14471,7 +15265,7 @@ bool VoidExprEvaluator::VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
return true;
}
- Optional<DynAlloc *> Alloc = CheckDeleteKind(
+ std::optional<DynAlloc *> Alloc = CheckDeleteKind(
Info, E, Pointer, E->isArrayForm() ? DynAlloc::ArrayNew : DynAlloc::New);
if (!Alloc)
return false;
@@ -14591,6 +15385,7 @@ static bool Evaluate(APValue &Result, EvalInfo &Info, const Expr *E) {
E, Unqual, ScopeKind::FullExpression, LV);
if (!EvaluateAtomic(E, &LV, Value, Info))
return false;
+ Result = Value;
} else {
if (!EvaluateAtomic(E, nullptr, Result, Info))
return false;
@@ -14639,25 +15434,29 @@ static bool EvaluateInPlace(APValue &Result, EvalInfo &Info, const LValue &This,
/// lvalue-to-rvalue cast if it is an lvalue.
static bool EvaluateAsRValue(EvalInfo &Info, const Expr *E, APValue &Result) {
assert(!E->isValueDependent());
+
+ if (E->getType().isNull())
+ return false;
+
+ if (!CheckLiteralType(Info, E))
+ return false;
+
if (Info.EnableNewConstInterp) {
if (!Info.Ctx.getInterpContext().evaluateAsRValue(Info, E, Result))
return false;
- } else {
- if (E->getType().isNull())
- return false;
+ return CheckConstantExpression(Info, E->getExprLoc(), E->getType(), Result,
+ ConstantExprKind::Normal);
+ }
- if (!CheckLiteralType(Info, E))
- return false;
+ if (!::Evaluate(Result, Info, E))
+ return false;
- if (!::Evaluate(Result, Info, E))
+ // Implicit lvalue-to-rvalue cast.
+ if (E->isGLValue()) {
+ LValue LV;
+ LV.setFrom(Info.Ctx, Result);
+ if (!handleLValueToRValueConversion(Info, E, E->getType(), LV, Result))
return false;
-
- if (E->isGLValue()) {
- LValue LV;
- LV.setFrom(Info.Ctx, Result);
- if (!handleLValueToRValueConversion(Info, E, E->getType(), LV, Result))
- return false;
- }
}
// Check this core constant expression is a constant expression.
@@ -14677,6 +15476,23 @@ static bool FastEvaluateAsRValue(const Expr *Exp, Expr::EvalResult &Result,
return true;
}
+ if (const auto *L = dyn_cast<CXXBoolLiteralExpr>(Exp)) {
+ Result.Val = APValue(APSInt(APInt(1, L->getValue())));
+ IsConst = true;
+ return true;
+ }
+
+ if (const auto *CE = dyn_cast<ConstantExpr>(Exp)) {
+ if (CE->hasAPValueResult()) {
+ Result.Val = CE->getAPValueResult();
+ IsConst = true;
+ return true;
+ }
+
+ // The SubExpr is usually just an IntegerLiteral.
+ return FastEvaluateAsRValue(CE->getSubExpr(), Result, Ctx, IsConst);
+ }
+
// This case should be rare, but we need to check it before we check on
// the type below.
if (Exp->getType().isNull()) {
@@ -14684,14 +15500,6 @@ static bool FastEvaluateAsRValue(const Expr *Exp, Expr::EvalResult &Result,
return true;
}
- // FIXME: Evaluating values of large array and record types can cause
- // performance problems. Only do so in C++11 for now.
- if (Exp->isPRValue() &&
- (Exp->getType()->isArrayType() || Exp->getType()->isRecordType()) &&
- !Ctx.getLangOpts().CPlusPlus11) {
- IsConst = false;
- return true;
- }
return false;
}
@@ -14754,6 +15562,7 @@ bool Expr::EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx,
bool InConstantContext) const {
assert(!isValueDependent() &&
"Expression evaluator can't be called on a dependent expression.");
+ ExprTimeTraceScope TimeScope(this, Ctx, "EvaluateAsRValue");
EvalInfo Info(Ctx, Result, EvalInfo::EM_IgnoreSideEffects);
Info.InConstantContext = InConstantContext;
return ::EvaluateAsRValue(this, Result, Ctx, Info);
@@ -14763,6 +15572,7 @@ bool Expr::EvaluateAsBooleanCondition(bool &Result, const ASTContext &Ctx,
bool InConstantContext) const {
assert(!isValueDependent() &&
"Expression evaluator can't be called on a dependent expression.");
+ ExprTimeTraceScope TimeScope(this, Ctx, "EvaluateAsBooleanCondition");
EvalResult Scratch;
return EvaluateAsRValue(Scratch, Ctx, InConstantContext) &&
HandleConversionToBool(Scratch.Val, Result);
@@ -14773,6 +15583,7 @@ bool Expr::EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx,
bool InConstantContext) const {
assert(!isValueDependent() &&
"Expression evaluator can't be called on a dependent expression.");
+ ExprTimeTraceScope TimeScope(this, Ctx, "EvaluateAsInt");
EvalInfo Info(Ctx, Result, EvalInfo::EM_IgnoreSideEffects);
Info.InConstantContext = InConstantContext;
return ::EvaluateAsInt(this, Result, Ctx, AllowSideEffects, Info);
@@ -14783,6 +15594,7 @@ bool Expr::EvaluateAsFixedPoint(EvalResult &Result, const ASTContext &Ctx,
bool InConstantContext) const {
assert(!isValueDependent() &&
"Expression evaluator can't be called on a dependent expression.");
+ ExprTimeTraceScope TimeScope(this, Ctx, "EvaluateAsFixedPoint");
EvalInfo Info(Ctx, Result, EvalInfo::EM_IgnoreSideEffects);
Info.InConstantContext = InConstantContext;
return ::EvaluateAsFixedPoint(this, Result, Ctx, AllowSideEffects, Info);
@@ -14797,6 +15609,7 @@ bool Expr::EvaluateAsFloat(APFloat &Result, const ASTContext &Ctx,
if (!getType()->isRealFloatingType())
return false;
+ ExprTimeTraceScope TimeScope(this, Ctx, "EvaluateAsFloat");
EvalResult ExprResult;
if (!EvaluateAsRValue(ExprResult, Ctx, InConstantContext) ||
!ExprResult.Val.isFloat() ||
@@ -14812,6 +15625,7 @@ bool Expr::EvaluateAsLValue(EvalResult &Result, const ASTContext &Ctx,
assert(!isValueDependent() &&
"Expression evaluator can't be called on a dependent expression.");
+ ExprTimeTraceScope TimeScope(this, Ctx, "EvaluateAsLValue");
EvalInfo Info(Ctx, Result, EvalInfo::EM_ConstantFold);
Info.InConstantContext = InConstantContext;
LValue LV;
@@ -14855,11 +15669,22 @@ bool Expr::EvaluateAsConstantExpr(EvalResult &Result, const ASTContext &Ctx,
ConstantExprKind Kind) const {
assert(!isValueDependent() &&
"Expression evaluator can't be called on a dependent expression.");
+ bool IsConst;
+ if (FastEvaluateAsRValue(this, Result, Ctx, IsConst) && Result.Val.hasValue())
+ return true;
+ ExprTimeTraceScope TimeScope(this, Ctx, "EvaluateAsConstantExpr");
EvalInfo::EvaluationMode EM = EvalInfo::EM_ConstantExpression;
EvalInfo Info(Ctx, Result, EM);
Info.InConstantContext = true;
+ if (Info.EnableNewConstInterp) {
+ if (!Info.Ctx.getInterpContext().evaluate(Info, this, Result.Val))
+ return false;
+ return CheckConstantExpression(Info, getExprLoc(),
+ getStorageType(Ctx, this), Result.Val, Kind);
+ }
+
// The type of the object we're initializing is 'const T' for a class NTTP.
QualType T = getType();
if (Kind == ConstantExprKind::ClassTemplateArgument)
@@ -14870,16 +15695,26 @@ bool Expr::EvaluateAsConstantExpr(EvalResult &Result, const ASTContext &Ctx,
// this doesn't escape.
MaterializeTemporaryExpr BaseMTE(T, const_cast<Expr*>(this), true);
APValue::LValueBase Base(&BaseMTE);
-
Info.setEvaluatingDecl(Base, Result.Val);
- LValue LVal;
- LVal.set(Base);
- if (!::EvaluateInPlace(Result.Val, Info, LVal, this) || Result.HasSideEffects)
- return false;
+ if (Info.EnableNewConstInterp) {
+ if (!Info.Ctx.getInterpContext().evaluateAsRValue(Info, this, Result.Val))
+ return false;
+ } else {
+ LValue LVal;
+ LVal.set(Base);
+ // C++23 [intro.execution]/p5
+ // A full-expression is [...] a constant-expression
+ // So we need to make sure temporary objects are destroyed after having
+ // evaluating the expression (per C++23 [class.temporary]/p4).
+ FullExpressionRAII Scope(Info);
+ if (!::EvaluateInPlace(Result.Val, Info, LVal, this) ||
+ Result.HasSideEffects || !Scope.destroy())
+ return false;
- if (!Info.discardCleanups())
- llvm_unreachable("Unhandled cleanup; missing full expression marker?");
+ if (!Info.discardCleanups())
+ llvm_unreachable("Unhandled cleanup; missing full expression marker?");
+ }
if (!CheckConstantExpression(Info, getExprLoc(), getStorageType(Ctx, this),
Result.Val, Kind))
@@ -14903,24 +15738,27 @@ bool Expr::EvaluateAsConstantExpr(EvalResult &Result, const ASTContext &Ctx,
bool Expr::EvaluateAsInitializer(APValue &Value, const ASTContext &Ctx,
const VarDecl *VD,
- SmallVectorImpl<PartialDiagnosticAt> &Notes) const {
+ SmallVectorImpl<PartialDiagnosticAt> &Notes,
+ bool IsConstantInitialization) const {
assert(!isValueDependent() &&
"Expression evaluator can't be called on a dependent expression.");
- // FIXME: Evaluating initializers for large array and record types can cause
- // performance problems. Only do so in C++11 for now.
- if (isPRValue() && (getType()->isArrayType() || getType()->isRecordType()) &&
- !Ctx.getLangOpts().CPlusPlus11)
- return false;
+ llvm::TimeTraceScope TimeScope("EvaluateAsInitializer", [&] {
+ std::string Name;
+ llvm::raw_string_ostream OS(Name);
+ VD->printQualifiedName(OS);
+ return Name;
+ });
Expr::EvalStatus EStatus;
EStatus.Diag = &Notes;
- EvalInfo Info(Ctx, EStatus, VD->isConstexpr()
- ? EvalInfo::EM_ConstantExpression
- : EvalInfo::EM_ConstantFold);
+ EvalInfo Info(Ctx, EStatus,
+ (IsConstantInitialization && Ctx.getLangOpts().CPlusPlus)
+ ? EvalInfo::EM_ConstantExpression
+ : EvalInfo::EM_ConstantFold);
Info.setEvaluatingDecl(VD, Value);
- Info.InConstantContext = true;
+ Info.InConstantContext = IsConstantInitialization;
SourceLocation DeclLoc = VD->getLocation();
QualType DeclTy = VD->getType();
@@ -14929,14 +15767,29 @@ bool Expr::EvaluateAsInitializer(APValue &Value, const ASTContext &Ctx,
auto &InterpCtx = const_cast<ASTContext &>(Ctx).getInterpContext();
if (!InterpCtx.evaluateAsInitializer(Info, VD, Value))
return false;
+
+ return CheckConstantExpression(Info, DeclLoc, DeclTy, Value,
+ ConstantExprKind::Normal);
} else {
LValue LVal;
LVal.set(VD);
- if (!EvaluateInPlace(Value, Info, LVal, this,
- /*AllowNonLiteralTypes=*/true) ||
- EStatus.HasSideEffects)
- return false;
+ {
+ // C++23 [intro.execution]/p5
+ // A full-expression is ... an init-declarator ([dcl.decl]) or a
+ // mem-initializer.
+ // So we need to make sure temporary objects are destroyed after having
+ // evaluated the expression (per C++23 [class.temporary]/p4).
+ //
+ // FIXME: Otherwise this may break test/Modules/pr68702.cpp because the
+ // serialization code calls ParmVarDecl::getDefaultArg() which strips the
+ // outermost FullExpr, such as ExprWithCleanups.
+ FullExpressionRAII Scope(Info);
+ if (!EvaluateInPlace(Value, Info, LVal, this,
+ /*AllowNonLiteralTypes=*/true) ||
+ EStatus.HasSideEffects)
+ return false;
+ }
// At this point, any lifetime-extended temporaries are completely
// initialized.
@@ -14945,6 +15798,7 @@ bool Expr::EvaluateAsInitializer(APValue &Value, const ASTContext &Ctx,
if (!Info.discardCleanups())
llvm_unreachable("Unhandled cleanup; missing full expression marker?");
}
+
return CheckConstantExpression(Info, DeclLoc, DeclTy, Value,
ConstantExprKind::Normal) &&
CheckMemoryLeaks(Info);
@@ -14965,7 +15819,7 @@ bool VarDecl::evaluateDestruction(
APValue DestroyedValue;
if (getEvaluatedValue() && !getEvaluatedValue()->isAbsent())
DestroyedValue = *getEvaluatedValue();
- else if (!getDefaultInitValue(getType(), DestroyedValue))
+ else if (!handleDefaultInitValue(getType(), DestroyedValue))
return false;
if (!EvaluateDestruction(getASTContext(), this, std::move(DestroyedValue),
@@ -14994,6 +15848,7 @@ APSInt Expr::EvaluateKnownConstInt(const ASTContext &Ctx,
assert(!isValueDependent() &&
"Expression evaluator can't be called on a dependent expression.");
+ ExprTimeTraceScope TimeScope(this, Ctx, "EvaluateKnownConstInt");
EvalResult EVResult;
EVResult.Diag = Diag;
EvalInfo Info(Ctx, EVResult, EvalInfo::EM_IgnoreSideEffects);
@@ -15012,6 +15867,7 @@ APSInt Expr::EvaluateKnownConstIntCheckOverflow(
assert(!isValueDependent() &&
"Expression evaluator can't be called on a dependent expression.");
+ ExprTimeTraceScope TimeScope(this, Ctx, "EvaluateKnownConstIntCheckOverflow");
EvalResult EVResult;
EVResult.Diag = Diag;
EvalInfo Info(Ctx, EVResult, EvalInfo::EM_IgnoreSideEffects);
@@ -15030,6 +15886,7 @@ void Expr::EvaluateForOverflow(const ASTContext &Ctx) const {
assert(!isValueDependent() &&
"Expression evaluator can't be called on a dependent expression.");
+ ExprTimeTraceScope TimeScope(this, Ctx, "EvaluateForOverflow");
bool IsConst;
EvalResult EVResult;
if (!FastEvaluateAsRValue(this, EVResult, Ctx, IsConst)) {
@@ -15191,6 +16048,7 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
case Expr::DependentCoawaitExprClass:
case Expr::CoyieldExprClass:
case Expr::SYCLUniqueStableNameExprClass:
+ case Expr::CXXParenListInitExprClass:
return ICEDiag(IK_NotICE, E->getBeginLoc());
case Expr::InitListExprClass: {
@@ -15357,7 +16215,7 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
llvm::APSInt REval = Exp->getRHS()->EvaluateKnownConstInt(Ctx);
if (REval == 0)
return ICEDiag(IK_ICEIfUnevaluated, E->getBeginLoc());
- if (REval.isSigned() && REval.isAllOnesValue()) {
+ if (REval.isSigned() && REval.isAllOnes()) {
llvm::APSInt LEval = Exp->getLHS()->EvaluateKnownConstInt(Ctx);
if (LEval.isMinSignedValue())
return ICEDiag(IK_ICEIfUnevaluated, E->getBeginLoc());
@@ -15521,6 +16379,8 @@ bool Expr::isIntegerConstantExpr(const ASTContext &Ctx,
assert(!isValueDependent() &&
"Expression evaluator can't be called on a dependent expression.");
+ ExprTimeTraceScope TimeScope(this, Ctx, "isIntegerConstantExpr");
+
if (Ctx.getLangOpts().CPlusPlus11)
return EvaluateCPlusPlus11IntegralConstantExpr(Ctx, this, nullptr, Loc);
@@ -15532,22 +16392,23 @@ bool Expr::isIntegerConstantExpr(const ASTContext &Ctx,
return true;
}
-Optional<llvm::APSInt> Expr::getIntegerConstantExpr(const ASTContext &Ctx,
- SourceLocation *Loc,
- bool isEvaluated) const {
- assert(!isValueDependent() &&
- "Expression evaluator can't be called on a dependent expression.");
+std::optional<llvm::APSInt>
+Expr::getIntegerConstantExpr(const ASTContext &Ctx, SourceLocation *Loc) const {
+ if (isValueDependent()) {
+ // Expression evaluator can't succeed on a dependent expression.
+ return std::nullopt;
+ }
APSInt Value;
if (Ctx.getLangOpts().CPlusPlus11) {
if (EvaluateCPlusPlus11IntegralConstantExpr(Ctx, this, &Value, Loc))
return Value;
- return None;
+ return std::nullopt;
}
if (!isIntegerConstantExpr(Ctx, Loc))
- return None;
+ return std::nullopt;
// The only possible side-effects here are due to UB discovered in the
// evaluation (for instance, INT_MAX + 1). In such a case, we are still
@@ -15611,6 +16472,14 @@ bool Expr::EvaluateWithSubstitution(APValue &Value, ASTContext &Ctx,
assert(!isValueDependent() &&
"Expression evaluator can't be called on a dependent expression.");
+ llvm::TimeTraceScope TimeScope("EvaluateWithSubstitution", [&] {
+ std::string Name;
+ llvm::raw_string_ostream OS(Name);
+ Callee->getNameForDiagnostic(OS, Ctx.getPrintingPolicy(),
+ /*Qualified=*/true);
+ return Name;
+ });
+
Expr::EvalStatus Status;
EvalInfo Info(Ctx, Status, EvalInfo::EM_ConstantExpressionUnevaluated);
Info.InConstantContext = true;
@@ -15621,7 +16490,8 @@ bool Expr::EvaluateWithSubstitution(APValue &Value, ASTContext &Ctx,
#ifndef NDEBUG
auto *MD = dyn_cast<CXXMethodDecl>(Callee);
assert(MD && "Don't provide `this` for non-methods.");
- assert(!MD->isStatic() && "Don't provide `this` for static methods.");
+ assert(MD->isImplicitObjectMemberFunction() &&
+ "Don't provide `this` for methods without an implicit object.");
#endif
if (!This->isValueDependent() &&
EvaluateObjectArgument(Info, This, ThisVal) &&
@@ -15659,7 +16529,8 @@ bool Expr::EvaluateWithSubstitution(APValue &Value, ASTContext &Ctx,
Info.EvalStatus.HasSideEffects = false;
// Build fake call to Callee.
- CallStackFrame Frame(Info, Callee->getLocation(), Callee, ThisPtr, Call);
+ CallStackFrame Frame(Info, Callee->getLocation(), Callee, ThisPtr, This,
+ Call);
// FIXME: Missing ExprWithCleanups in enable_if conditions?
FullExpressionRAII Scope(Info);
return Evaluate(Value, Info, this) && Scope.destroy() &&
@@ -15675,6 +16546,14 @@ bool Expr::isPotentialConstantExpr(const FunctionDecl *FD,
if (FD->isDependentContext())
return true;
+ llvm::TimeTraceScope TimeScope("isPotentialConstantExpr", [&] {
+ std::string Name;
+ llvm::raw_string_ostream OS(Name);
+ FD->getNameForDiagnostic(OS, FD->getASTContext().getPrintingPolicy(),
+ /*Qualified=*/true);
+ return Name;
+ });
+
Expr::EvalStatus Status;
Status.Diag = &Diags;
@@ -15707,8 +16586,10 @@ bool Expr::isPotentialConstantExpr(const FunctionDecl *FD,
HandleConstructorCall(&VIE, This, Args, CD, Info, Scratch);
} else {
SourceLocation Loc = FD->getLocation();
- HandleFunctionCall(Loc, FD, (MD && MD->isInstance()) ? &This : nullptr,
- Args, CallRef(), FD->getBody(), Info, Scratch, nullptr);
+ HandleFunctionCall(
+ Loc, FD, (MD && MD->isImplicitObjectMemberFunction()) ? &This : nullptr,
+ &VIE, Args, CallRef(), FD->getBody(), Info, Scratch,
+ /*ResultSlot=*/nullptr);
}
return Diags.empty();
@@ -15730,7 +16611,8 @@ bool Expr::isPotentialConstantExprUnevaluated(Expr *E,
Info.CheckingPotentialConstantExpression = true;
// Fabricate a call stack frame to give the arguments a plausible cover story.
- CallStackFrame Frame(Info, SourceLocation(), FD, /*This*/ nullptr, CallRef());
+ CallStackFrame Frame(Info, SourceLocation(), FD, /*This=*/nullptr,
+ /*CallExpr=*/nullptr, CallRef());
APValue ResultScratch;
Evaluate(ResultScratch, Info, E);
@@ -15746,3 +16628,97 @@ bool Expr::tryEvaluateObjectSize(uint64_t &Result, ASTContext &Ctx,
EvalInfo Info(Ctx, Status, EvalInfo::EM_ConstantFold);
return tryEvaluateBuiltinObjectSize(this, Type, Info, Result);
}
+
+static bool EvaluateBuiltinStrLen(const Expr *E, uint64_t &Result,
+ EvalInfo &Info) {
+ if (!E->getType()->hasPointerRepresentation() || !E->isPRValue())
+ return false;
+
+ LValue String;
+
+ if (!EvaluatePointer(E, String, Info))
+ return false;
+
+ QualType CharTy = E->getType()->getPointeeType();
+
+ // Fast path: if it's a string literal, search the string value.
+ if (const StringLiteral *S = dyn_cast_or_null<StringLiteral>(
+ String.getLValueBase().dyn_cast<const Expr *>())) {
+ StringRef Str = S->getBytes();
+ int64_t Off = String.Offset.getQuantity();
+ if (Off >= 0 && (uint64_t)Off <= (uint64_t)Str.size() &&
+ S->getCharByteWidth() == 1 &&
+ // FIXME: Add fast-path for wchar_t too.
+ Info.Ctx.hasSameUnqualifiedType(CharTy, Info.Ctx.CharTy)) {
+ Str = Str.substr(Off);
+
+ StringRef::size_type Pos = Str.find(0);
+ if (Pos != StringRef::npos)
+ Str = Str.substr(0, Pos);
+
+ Result = Str.size();
+ return true;
+ }
+
+ // Fall through to slow path.
+ }
+
+ // Slow path: scan the bytes of the string looking for the terminating 0.
+ for (uint64_t Strlen = 0; /**/; ++Strlen) {
+ APValue Char;
+ if (!handleLValueToRValueConversion(Info, E, CharTy, String, Char) ||
+ !Char.isInt())
+ return false;
+ if (!Char.getInt()) {
+ Result = Strlen;
+ return true;
+ }
+ if (!HandleLValueArrayAdjustment(Info, E, String, CharTy, 1))
+ return false;
+ }
+}
+
+bool Expr::EvaluateCharRangeAsString(std::string &Result,
+ const Expr *SizeExpression,
+ const Expr *PtrExpression, ASTContext &Ctx,
+ EvalResult &Status) const {
+ LValue String;
+ EvalInfo Info(Ctx, Status, EvalInfo::EM_ConstantExpression);
+ Info.InConstantContext = true;
+
+ FullExpressionRAII Scope(Info);
+ APSInt SizeValue;
+ if (!::EvaluateInteger(SizeExpression, SizeValue, Info))
+ return false;
+
+ int64_t Size = SizeValue.getExtValue();
+
+ if (!::EvaluatePointer(PtrExpression, String, Info))
+ return false;
+
+ QualType CharTy = PtrExpression->getType()->getPointeeType();
+ for (int64_t I = 0; I < Size; ++I) {
+ APValue Char;
+ if (!handleLValueToRValueConversion(Info, PtrExpression, CharTy, String,
+ Char))
+ return false;
+
+ APSInt C = Char.getInt();
+ Result.push_back(static_cast<char>(C.getExtValue()));
+ if (!HandleLValueArrayAdjustment(Info, PtrExpression, String, CharTy, 1))
+ return false;
+ }
+ if (!Scope.destroy())
+ return false;
+
+ if (!CheckMemoryLeaks(Info))
+ return false;
+
+ return true;
+}
+
+bool Expr::tryEvaluateStrLen(uint64_t &Result, ASTContext &Ctx) const {
+ Expr::EvalStatus Status;
+ EvalInfo Info(Ctx, Status, EvalInfo::EM_ConstantFold);
+ return EvaluateBuiltinStrLen(this, Result, Info);
+}
diff --git a/contrib/llvm-project/clang/lib/AST/ExprObjC.cpp b/contrib/llvm-project/clang/lib/AST/ExprObjC.cpp
index 7d932c8b059d..a3222c2da24f 100644
--- a/contrib/llvm-project/clang/lib/AST/ExprObjC.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ExprObjC.cpp
@@ -271,20 +271,7 @@ QualType ObjCMessageExpr::getCallReturnType(ASTContext &Ctx) const {
}
return QT;
}
-
- // Expression type might be different from an expected call return type,
- // as expression type would never be a reference even if call returns a
- // reference. Reconstruct the original expression type.
- QualType QT = getType();
- switch (getValueKind()) {
- case VK_LValue:
- return Ctx.getLValueReferenceType(QT);
- case VK_XValue:
- return Ctx.getRValueReferenceType(QT);
- case VK_PRValue:
- return QT;
- }
- llvm_unreachable("Unsupported ExprValueKind");
+ return Ctx.getReferenceQualifiedType(this);
}
SourceRange ObjCMessageExpr::getReceiverRange() const {
diff --git a/contrib/llvm-project/clang/lib/AST/ExternalASTMerger.cpp b/contrib/llvm-project/clang/lib/AST/ExternalASTMerger.cpp
index c7789b707b21..8bad3b36244e 100644
--- a/contrib/llvm-project/clang/lib/AST/ExternalASTMerger.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ExternalASTMerger.cpp
@@ -187,10 +187,7 @@ public:
/// Implements the ASTImporter interface for tracking back a declaration
/// to its original declaration it came from.
Decl *GetOriginalDecl(Decl *To) override {
- auto It = ToOrigin.find(To);
- if (It != ToOrigin.end())
- return It->second;
- return nullptr;
+ return ToOrigin.lookup(To);
}
/// Whenever a DeclContext is imported, ensure that ExternalASTSource's origin
@@ -425,16 +422,14 @@ void ExternalASTMerger::RemoveSources(llvm::ArrayRef<ImporterSource> Sources) {
logs() << "(ExternalASTMerger*)" << (void *)this
<< " removing source (ASTContext*)" << (void *)&S.getASTContext()
<< "\n";
- Importers.erase(
- std::remove_if(Importers.begin(), Importers.end(),
- [&Sources](std::unique_ptr<ASTImporter> &Importer) -> bool {
- for (const ImporterSource &S : Sources) {
- if (&Importer->getFromContext() == &S.getASTContext())
- return true;
- }
- return false;
- }),
- Importers.end());
+ llvm::erase_if(Importers,
+ [&Sources](std::unique_ptr<ASTImporter> &Importer) -> bool {
+ for (const ImporterSource &S : Sources) {
+ if (&Importer->getFromContext() == &S.getASTContext())
+ return true;
+ }
+ return false;
+ });
for (OriginMap::iterator OI = Origins.begin(), OE = Origins.end(); OI != OE; ) {
std::pair<const DeclContext *, DCOrigin> Origin = *OI;
bool Erase = false;
@@ -543,4 +538,3 @@ void ExternalASTMerger::FindExternalLexicalDecls(
return false;
});
}
-
diff --git a/contrib/llvm-project/clang/lib/AST/ExternalASTSource.cpp b/contrib/llvm-project/clang/lib/AST/ExternalASTSource.cpp
index 257833182621..090ef02aa422 100644
--- a/contrib/llvm-project/clang/lib/AST/ExternalASTSource.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ExternalASTSource.cpp
@@ -20,9 +20,9 @@
#include "clang/Basic/LLVM.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/SourceManager.h"
-#include "llvm/ADT/None.h"
#include "llvm/Support/ErrorHandling.h"
#include <cstdint>
+#include <optional>
using namespace clang;
@@ -30,9 +30,9 @@ char ExternalASTSource::ID;
ExternalASTSource::~ExternalASTSource() = default;
-llvm::Optional<ASTSourceDescriptor>
+std::optional<ASTSourceDescriptor>
ExternalASTSource::getSourceDescriptor(unsigned ID) {
- return None;
+ return std::nullopt;
}
ExternalASTSource::ExtKind
diff --git a/contrib/llvm-project/clang/lib/AST/FormatString.cpp b/contrib/llvm-project/clang/lib/AST/FormatString.cpp
index 83b952116a5e..c5d14b4af7ff 100644
--- a/contrib/llvm-project/clang/lib/AST/FormatString.cpp
+++ b/contrib/llvm-project/clang/lib/AST/FormatString.cpp
@@ -15,13 +15,13 @@
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/Support/ConvertUTF.h"
+#include <optional>
using clang::analyze_format_string::ArgType;
using clang::analyze_format_string::FormatStringHandler;
using clang::analyze_format_string::FormatSpecifier;
using clang::analyze_format_string::LengthModifier;
using clang::analyze_format_string::OptionalAmount;
-using clang::analyze_format_string::PositionContext;
using clang::analyze_format_string::ConversionSpecifier;
using namespace clang;
@@ -322,6 +322,12 @@ bool clang::analyze_format_string::ParseUTF8InvalidSpecifier(
clang::analyze_format_string::ArgType::MatchKind
ArgType::matchesType(ASTContext &C, QualType argTy) const {
+ // When using the format attribute in C++, you can receive a function or an
+ // array that will necessarily decay to a pointer when passed to the final
+ // format consumer. Apply decay before type comparison.
+ if (argTy->canDecayToPointerType())
+ argTy = C.getDecayedType(argTy);
+
if (Ptr) {
// It has to be a pointer.
const PointerType *PT = argTy->getAs<PointerType>();
@@ -343,72 +349,161 @@ ArgType::matchesType(ASTContext &C, QualType argTy) const {
return Match;
case AnyCharTy: {
- if (const EnumType *ETy = argTy->getAs<EnumType>()) {
+ if (const auto *ETy = argTy->getAs<EnumType>()) {
// If the enum is incomplete we know nothing about the underlying type.
- // Assume that it's 'int'.
+ // Assume that it's 'int'. Do not use the underlying type for a scoped
+ // enumeration.
if (!ETy->getDecl()->isComplete())
return NoMatch;
- argTy = ETy->getDecl()->getIntegerType();
+ if (ETy->isUnscopedEnumerationType())
+ argTy = ETy->getDecl()->getIntegerType();
}
- if (const BuiltinType *BT = argTy->getAs<BuiltinType>())
+ if (const auto *BT = argTy->getAs<BuiltinType>()) {
+ // The types are perfectly matched?
switch (BT->getKind()) {
+ default:
+ break;
+ case BuiltinType::Char_S:
+ case BuiltinType::SChar:
+ case BuiltinType::UChar:
+ case BuiltinType::Char_U:
+ return Match;
+ case BuiltinType::Bool:
+ if (!Ptr)
+ return Match;
+ break;
+ }
+ // "Partially matched" because of promotions?
+ if (!Ptr) {
+ switch (BT->getKind()) {
default:
break;
- case BuiltinType::Char_S:
- case BuiltinType::SChar:
- case BuiltinType::UChar:
- case BuiltinType::Char_U:
- case BuiltinType::Bool:
- return Match;
+ case BuiltinType::Int:
+ case BuiltinType::UInt:
+ return MatchPromotion;
+ case BuiltinType::Short:
+ case BuiltinType::UShort:
+ case BuiltinType::WChar_S:
+ case BuiltinType::WChar_U:
+ return NoMatchPromotionTypeConfusion;
+ }
}
+ }
return NoMatch;
}
case SpecificTy: {
if (const EnumType *ETy = argTy->getAs<EnumType>()) {
// If the enum is incomplete we know nothing about the underlying type.
- // Assume that it's 'int'.
+ // Assume that it's 'int'. Do not use the underlying type for a scoped
+ // enumeration as that needs an exact match.
if (!ETy->getDecl()->isComplete())
argTy = C.IntTy;
- else
+ else if (ETy->isUnscopedEnumerationType())
argTy = ETy->getDecl()->getIntegerType();
}
argTy = C.getCanonicalType(argTy).getUnqualifiedType();
if (T == argTy)
return Match;
- // Check for "compatible types".
- if (const BuiltinType *BT = argTy->getAs<BuiltinType>())
+ if (const auto *BT = argTy->getAs<BuiltinType>()) {
+ // Check if the only difference between them is signed vs unsigned
+ // if true, we consider they are compatible.
switch (BT->getKind()) {
default:
break;
+ case BuiltinType::Bool:
+ if (Ptr && (T == C.UnsignedCharTy || T == C.SignedCharTy))
+ return NoMatch;
+ [[fallthrough]];
case BuiltinType::Char_S:
case BuiltinType::SChar:
case BuiltinType::Char_U:
case BuiltinType::UChar:
- case BuiltinType::Bool:
if (T == C.UnsignedShortTy || T == C.ShortTy)
return NoMatchTypeConfusion;
- return T == C.UnsignedCharTy || T == C.SignedCharTy ? Match
- : NoMatch;
+ if (T == C.UnsignedCharTy || T == C.SignedCharTy)
+ return Match;
+ break;
case BuiltinType::Short:
- return T == C.UnsignedShortTy ? Match : NoMatch;
+ if (T == C.UnsignedShortTy)
+ return Match;
+ break;
case BuiltinType::UShort:
- return T == C.ShortTy ? Match : NoMatch;
+ if (T == C.ShortTy)
+ return Match;
+ break;
case BuiltinType::Int:
- return T == C.UnsignedIntTy ? Match : NoMatch;
+ if (T == C.UnsignedIntTy)
+ return Match;
+ break;
case BuiltinType::UInt:
- return T == C.IntTy ? Match : NoMatch;
+ if (T == C.IntTy)
+ return Match;
+ break;
case BuiltinType::Long:
- return T == C.UnsignedLongTy ? Match : NoMatch;
+ if (T == C.UnsignedLongTy)
+ return Match;
+ break;
case BuiltinType::ULong:
- return T == C.LongTy ? Match : NoMatch;
+ if (T == C.LongTy)
+ return Match;
+ break;
case BuiltinType::LongLong:
- return T == C.UnsignedLongLongTy ? Match : NoMatch;
+ if (T == C.UnsignedLongLongTy)
+ return Match;
+ break;
case BuiltinType::ULongLong:
- return T == C.LongLongTy ? Match : NoMatch;
- }
+ if (T == C.LongLongTy)
+ return Match;
+ break;
+ }
+ // "Partially matched" because of promotions?
+ if (!Ptr) {
+ switch (BT->getKind()) {
+ default:
+ break;
+ case BuiltinType::Bool:
+ if (T == C.IntTy || T == C.UnsignedIntTy)
+ return MatchPromotion;
+ break;
+ case BuiltinType::Int:
+ case BuiltinType::UInt:
+ if (T == C.SignedCharTy || T == C.UnsignedCharTy ||
+ T == C.ShortTy || T == C.UnsignedShortTy || T == C.WCharTy ||
+ T == C.WideCharTy)
+ return MatchPromotion;
+ break;
+ case BuiltinType::Char_U:
+ if (T == C.UnsignedIntTy)
+ return MatchPromotion;
+ if (T == C.UnsignedShortTy)
+ return NoMatchPromotionTypeConfusion;
+ break;
+ case BuiltinType::Char_S:
+ if (T == C.IntTy)
+ return MatchPromotion;
+ if (T == C.ShortTy)
+ return NoMatchPromotionTypeConfusion;
+ break;
+ case BuiltinType::Half:
+ case BuiltinType::Float:
+ if (T == C.DoubleTy)
+ return MatchPromotion;
+ break;
+ case BuiltinType::Short:
+ case BuiltinType::UShort:
+ if (T == C.SignedCharTy || T == C.UnsignedCharTy)
+ return NoMatchPromotionTypeConfusion;
+ break;
+ case BuiltinType::WChar_U:
+ case BuiltinType::WChar_S:
+ if (T != C.WCharTy && T != C.WideCharTy)
+ return NoMatchPromotionTypeConfusion;
+ }
+ }
+ }
return NoMatch;
}
@@ -446,7 +541,7 @@ ArgType::matchesType(ASTContext &C, QualType argTy) const {
if (C.getCanonicalType(argTy).getUnqualifiedType() == WInt)
return Match;
- QualType PromoArg = argTy->isPromotableIntegerType()
+ QualType PromoArg = C.isPromotableIntegerType(argTy)
? C.getPromotedIntegerType(argTy)
: argTy;
PromoArg = C.getCanonicalType(PromoArg).getUnqualifiedType();
@@ -619,6 +714,8 @@ analyze_format_string::LengthModifier::toString() const {
const char *ConversionSpecifier::toString() const {
switch (kind) {
+ case bArg: return "b";
+ case BArg: return "B";
case dArg: return "d";
case DArg: return "D";
case iArg: return "i";
@@ -668,13 +765,13 @@ const char *ConversionSpecifier::toString() const {
return nullptr;
}
-Optional<ConversionSpecifier>
+std::optional<ConversionSpecifier>
ConversionSpecifier::getStandardSpecifier() const {
ConversionSpecifier::Kind NewKind;
switch (getKind()) {
default:
- return None;
+ return std::nullopt;
case DArg:
NewKind = dArg;
break;
@@ -740,7 +837,7 @@ bool FormatSpecifier::hasValidLengthModifier(const TargetInfo &Target,
break;
}
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case LengthModifier::AsChar:
case LengthModifier::AsLongLong:
case LengthModifier::AsQuad:
@@ -748,6 +845,8 @@ bool FormatSpecifier::hasValidLengthModifier(const TargetInfo &Target,
case LengthModifier::AsSizeT:
case LengthModifier::AsPtrDiff:
switch (CS.getKind()) {
+ case ConversionSpecifier::bArg:
+ case ConversionSpecifier::BArg:
case ConversionSpecifier::dArg:
case ConversionSpecifier::DArg:
case ConversionSpecifier::iArg:
@@ -761,7 +860,7 @@ bool FormatSpecifier::hasValidLengthModifier(const TargetInfo &Target,
return true;
case ConversionSpecifier::FreeBSDrArg:
case ConversionSpecifier::FreeBSDyArg:
- return Target.getTriple().isOSFreeBSD() || Target.getTriple().isPS4();
+ return Target.getTriple().isOSFreeBSD() || Target.getTriple().isPS();
default:
return false;
}
@@ -779,6 +878,8 @@ bool FormatSpecifier::hasValidLengthModifier(const TargetInfo &Target,
}
switch (CS.getKind()) {
+ case ConversionSpecifier::bArg:
+ case ConversionSpecifier::BArg:
case ConversionSpecifier::dArg:
case ConversionSpecifier::DArg:
case ConversionSpecifier::iArg:
@@ -796,7 +897,7 @@ bool FormatSpecifier::hasValidLengthModifier(const TargetInfo &Target,
return true;
case ConversionSpecifier::FreeBSDrArg:
case ConversionSpecifier::FreeBSDyArg:
- return Target.getTriple().isOSFreeBSD() || Target.getTriple().isPS4();
+ return Target.getTriple().isOSFreeBSD() || Target.getTriple().isPS();
default:
return false;
}
@@ -903,6 +1004,8 @@ bool FormatSpecifier::hasStandardLengthModifier() const {
bool FormatSpecifier::hasStandardConversionSpecifier(
const LangOptions &LangOpt) const {
switch (CS.getKind()) {
+ case ConversionSpecifier::bArg:
+ case ConversionSpecifier::BArg:
case ConversionSpecifier::cArg:
case ConversionSpecifier::dArg:
case ConversionSpecifier::iArg:
@@ -961,7 +1064,8 @@ bool FormatSpecifier::hasStandardLengthConversionCombination() const {
return true;
}
-Optional<LengthModifier> FormatSpecifier::getCorrectedLengthModifier() const {
+std::optional<LengthModifier>
+FormatSpecifier::getCorrectedLengthModifier() const {
if (CS.isAnyIntArg() || CS.getKind() == ConversionSpecifier::nArg) {
if (LM.getKind() == LengthModifier::AsLongDouble ||
LM.getKind() == LengthModifier::AsQuad) {
@@ -971,15 +1075,14 @@ Optional<LengthModifier> FormatSpecifier::getCorrectedLengthModifier() const {
}
}
- return None;
+ return std::nullopt;
}
bool FormatSpecifier::namedTypeToLengthModifier(QualType QT,
LengthModifier &LM) {
- assert(isa<TypedefType>(QT) && "Expected a TypedefType");
- const TypedefNameDecl *Typedef = cast<TypedefType>(QT)->getDecl();
-
- for (;;) {
+ for (/**/; const auto *TT = QT->getAs<TypedefType>();
+ QT = TT->getDecl()->getUnderlyingType()) {
+ const TypedefNameDecl *Typedef = TT->getDecl();
const IdentifierInfo *Identifier = Typedef->getIdentifier();
if (Identifier->getName() == "size_t") {
LM.setKind(LengthModifier::AsSizeT);
@@ -998,12 +1101,6 @@ bool FormatSpecifier::namedTypeToLengthModifier(QualType QT,
LM.setKind(LengthModifier::AsPtrDiff);
return true;
}
-
- QualType T = Typedef->getUnderlyingType();
- if (!isa<TypedefType>(T))
- break;
-
- Typedef = cast<TypedefType>(T)->getDecl();
}
return false;
}
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Boolean.h b/contrib/llvm-project/clang/lib/AST/Interp/Boolean.h
index 2baa717311bc..336f7941dfc4 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Boolean.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Boolean.h
@@ -22,17 +22,15 @@ namespace clang {
namespace interp {
/// Wrapper around boolean types.
-class Boolean {
+class Boolean final {
private:
/// Underlying boolean.
bool V;
- /// Construct a wrapper from a boolean.
- explicit Boolean(bool V) : V(V) {}
-
public:
/// Zero-initializes a boolean.
Boolean() : V(false) {}
+ explicit Boolean(bool V) : V(V) {}
bool operator<(Boolean RHS) const { return V < RHS.V; }
bool operator>(Boolean RHS) const { return V > RHS.V; }
@@ -44,11 +42,18 @@ class Boolean {
bool operator>(unsigned RHS) const { return static_cast<unsigned>(V) > RHS; }
Boolean operator-() const { return Boolean(V); }
+ Boolean operator-(const Boolean &Other) const { return Boolean(V - Other.V); }
Boolean operator~() const { return Boolean(true); }
- explicit operator unsigned() const { return V; }
+ explicit operator int8_t() const { return V; }
+ explicit operator uint8_t() const { return V; }
+ explicit operator int16_t() const { return V; }
+ explicit operator uint16_t() const { return V; }
+ explicit operator int32_t() const { return V; }
+ explicit operator uint32_t() const { return V; }
explicit operator int64_t() const { return V; }
explicit operator uint64_t() const { return V; }
+ explicit operator bool() const { return V; }
APSInt toAPSInt() const {
return APSInt(APInt(1, static_cast<uint64_t>(V), false), true);
@@ -60,7 +65,7 @@ class Boolean {
Boolean toUnsigned() const { return *this; }
- constexpr static unsigned bitWidth() { return true; }
+ constexpr static unsigned bitWidth() { return 1; }
bool isZero() const { return !V; }
bool isMin() const { return isZero(); }
@@ -80,13 +85,20 @@ class Boolean {
Boolean truncate(unsigned TruncBits) const { return *this; }
void print(llvm::raw_ostream &OS) const { OS << (V ? "true" : "false"); }
+ std::string toDiagnosticString(const ASTContext &Ctx) const {
+ std::string NameStr;
+ llvm::raw_string_ostream OS(NameStr);
+ print(OS);
+ return NameStr;
+ }
static Boolean min(unsigned NumBits) { return Boolean(false); }
static Boolean max(unsigned NumBits) { return Boolean(true); }
- template <typename T>
- static std::enable_if_t<std::is_integral<T>::value, Boolean> from(T Value) {
- return Boolean(Value != 0);
+ template <typename T> static Boolean from(T Value) {
+ if constexpr (std::is_integral<T>::value)
+ return Boolean(Value != 0);
+ return Boolean(static_cast<decltype(Boolean::V)>(Value) != 0);
}
template <unsigned SrcBits, bool SrcSign>
@@ -95,11 +107,6 @@ class Boolean {
return Boolean(!Value.isZero());
}
- template <bool SrcSign>
- static Boolean from(Integral<0, SrcSign> Value) {
- return Boolean(!Value.isZero());
- }
-
static Boolean zero() { return from(false); }
template <typename T>
@@ -134,6 +141,16 @@ class Boolean {
*R = Boolean(A.V && B.V);
return false;
}
+
+ static bool inv(Boolean A, Boolean *R) {
+ *R = Boolean(!A.V);
+ return false;
+ }
+
+ static bool neg(Boolean A, Boolean *R) {
+ *R = Boolean(A.V);
+ return false;
+ }
};
inline llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const Boolean &B) {
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeEmitter.cpp b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeEmitter.cpp
index 7a4569820a1d..fd2a92d9d3f9 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeEmitter.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeEmitter.cpp
@@ -7,74 +7,133 @@
//===----------------------------------------------------------------------===//
#include "ByteCodeEmitter.h"
+#include "ByteCodeGenError.h"
#include "Context.h"
+#include "Floating.h"
#include "Opcode.h"
#include "Program.h"
+#include "clang/AST/ASTLambda.h"
#include "clang/AST/DeclCXX.h"
+#include "clang/Basic/Builtins.h"
+#include <type_traits>
using namespace clang;
using namespace clang::interp;
-using APSInt = llvm::APSInt;
-using Error = llvm::Error;
-
-Expected<Function *> ByteCodeEmitter::compileFunc(const FunctionDecl *F) {
- // Do not try to compile undefined functions.
- if (!F->isDefined(F) || (!F->hasBody() && F->willHaveBody()))
- return nullptr;
-
+Function *ByteCodeEmitter::compileFunc(const FunctionDecl *FuncDecl) {
// Set up argument indices.
unsigned ParamOffset = 0;
SmallVector<PrimType, 8> ParamTypes;
+ SmallVector<unsigned, 8> ParamOffsets;
llvm::DenseMap<unsigned, Function::ParamDescriptor> ParamDescriptors;
- // If the return is not a primitive, a pointer to the storage where the value
- // is initialized in is passed as the first argument.
- QualType Ty = F->getReturnType();
+ // If the return is not a primitive, a pointer to the storage where the
+ // value is initialized in is passed as the first argument. See 'RVO'
+ // elsewhere in the code.
+ QualType Ty = FuncDecl->getReturnType();
+ bool HasRVO = false;
if (!Ty->isVoidType() && !Ctx.classify(Ty)) {
+ HasRVO = true;
ParamTypes.push_back(PT_Ptr);
+ ParamOffsets.push_back(ParamOffset);
ParamOffset += align(primSize(PT_Ptr));
}
- // Assign descriptors to all parameters.
- // Composite objects are lowered to pointers.
- for (const ParmVarDecl *PD : F->parameters()) {
- PrimType Ty;
- if (llvm::Optional<PrimType> T = Ctx.classify(PD->getType())) {
- Ty = *T;
- } else {
- Ty = PT_Ptr;
+ // If the function decl is a member decl, the next parameter is
+ // the 'this' pointer. This parameter is pop()ed from the
+ // InterpStack when calling the function.
+ bool HasThisPointer = false;
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(FuncDecl)) {
+ if (MD->isImplicitObjectMemberFunction()) {
+ HasThisPointer = true;
+ ParamTypes.push_back(PT_Ptr);
+ ParamOffsets.push_back(ParamOffset);
+ ParamOffset += align(primSize(PT_Ptr));
}
- Descriptor *Desc = P.createDescriptor(PD, Ty);
- ParamDescriptors.insert({ParamOffset, {Ty, Desc}});
- Params.insert({PD, ParamOffset});
- ParamOffset += align(primSize(Ty));
- ParamTypes.push_back(Ty);
+ // Set up lambda capture to closure record field mapping.
+ if (isLambdaCallOperator(MD)) {
+ const Record *R = P.getOrCreateRecord(MD->getParent());
+ llvm::DenseMap<const ValueDecl *, FieldDecl *> LC;
+ FieldDecl *LTC;
+
+ MD->getParent()->getCaptureFields(LC, LTC);
+
+ for (auto Cap : LC) {
+ // Static lambdas cannot have any captures. If this one does,
+ // it has already been diagnosed and we can only ignore it.
+ if (MD->isStatic())
+ return nullptr;
+
+ unsigned Offset = R->getField(Cap.second)->Offset;
+ this->LambdaCaptures[Cap.first] = {
+ Offset, Cap.second->getType()->isReferenceType()};
+ }
+ if (LTC)
+ this->LambdaThisCapture = R->getField(LTC)->Offset;
+ }
+ }
+
+ // Assign descriptors to all parameters.
+ // Composite objects are lowered to pointers.
+ for (const ParmVarDecl *PD : FuncDecl->parameters()) {
+ std::optional<PrimType> T = Ctx.classify(PD->getType());
+ PrimType PT = T.value_or(PT_Ptr);
+ Descriptor *Desc = P.createDescriptor(PD, PT);
+ ParamDescriptors.insert({ParamOffset, {PT, Desc}});
+ Params.insert({PD, {ParamOffset, T != std::nullopt}});
+ ParamOffsets.push_back(ParamOffset);
+ ParamOffset += align(primSize(PT));
+ ParamTypes.push_back(PT);
}
// Create a handle over the emitted code.
- Function *Func = P.createFunction(F, ParamOffset, std::move(ParamTypes),
- std::move(ParamDescriptors));
- // Compile the function body.
- if (!F->isConstexpr() || !visitFunc(F)) {
- // Return a dummy function if compilation failed.
- if (BailLocation)
- return llvm::make_error<ByteCodeGenError>(*BailLocation);
- else
- return Func;
- } else {
- // Create scopes from descriptors.
- llvm::SmallVector<Scope, 2> Scopes;
- for (auto &DS : Descriptors) {
- Scopes.emplace_back(std::move(DS));
- }
+ Function *Func = P.getFunction(FuncDecl);
+ if (!Func) {
+ bool IsUnevaluatedBuiltin = false;
+ if (unsigned BI = FuncDecl->getBuiltinID())
+ IsUnevaluatedBuiltin = Ctx.getASTContext().BuiltinInfo.isUnevaluated(BI);
+
+ Func =
+ P.createFunction(FuncDecl, ParamOffset, std::move(ParamTypes),
+ std::move(ParamDescriptors), std::move(ParamOffsets),
+ HasThisPointer, HasRVO, IsUnevaluatedBuiltin);
+ }
- // Set the function's code.
- Func->setCode(NextLocalOffset, std::move(Code), std::move(SrcMap),
- std::move(Scopes));
+ assert(Func);
+ // For not-yet-defined functions, we only create a Function instance and
+ // compile their body later.
+ if (!FuncDecl->isDefined()) {
+ Func->setDefined(false);
return Func;
}
+
+ Func->setDefined(true);
+
+ // Lambda static invokers are a special case that we emit custom code for.
+ bool IsEligibleForCompilation = false;
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(FuncDecl))
+ IsEligibleForCompilation = MD->isLambdaStaticInvoker();
+ if (!IsEligibleForCompilation)
+ IsEligibleForCompilation = FuncDecl->isConstexpr();
+
+ // Compile the function body.
+ if (!IsEligibleForCompilation || !visitFunc(FuncDecl)) {
+ Func->setIsFullyCompiled(true);
+ return Func;
+ }
+
+ // Create scopes from descriptors.
+ llvm::SmallVector<Scope, 2> Scopes;
+ for (auto &DS : Descriptors) {
+ Scopes.emplace_back(std::move(DS));
+ }
+
+ // Set the function's code.
+ Func->setCode(NextLocalOffset, std::move(Code), std::move(SrcMap),
+ std::move(Scopes), FuncDecl->hasBody());
+ Func->setIsFullyCompiled(true);
+ return Func;
}
Scope::Local ByteCodeEmitter::createLocal(Descriptor *D) {
@@ -87,15 +146,17 @@ Scope::Local ByteCodeEmitter::createLocal(Descriptor *D) {
void ByteCodeEmitter::emitLabel(LabelTy Label) {
const size_t Target = Code.size();
LabelOffsets.insert({Label, Target});
- auto It = LabelRelocs.find(Label);
- if (It != LabelRelocs.end()) {
+
+ if (auto It = LabelRelocs.find(Label);
+ It != LabelRelocs.end()) {
for (unsigned Reloc : It->second) {
using namespace llvm::support;
- /// Rewrite the operand of all jumps to this label.
- void *Location = Code.data() + Reloc - sizeof(int32_t);
+ // Rewrite the operand of all jumps to this label.
+ void *Location = Code.data() + Reloc - align(sizeof(int32_t));
+ assert(aligned(Location));
const int32_t Offset = Target - static_cast<int64_t>(Reloc);
- endian::write<int32_t, endianness::native, 1>(Location, Offset);
+ endian::write<int32_t, llvm::endianness::native>(Location, Offset);
}
LabelRelocs.erase(It);
}
@@ -103,48 +164,83 @@ void ByteCodeEmitter::emitLabel(LabelTy Label) {
int32_t ByteCodeEmitter::getOffset(LabelTy Label) {
// Compute the PC offset which the jump is relative to.
- const int64_t Position = Code.size() + sizeof(Opcode) + sizeof(int32_t);
+ const int64_t Position =
+ Code.size() + align(sizeof(Opcode)) + align(sizeof(int32_t));
+ assert(aligned(Position));
// If target is known, compute jump offset.
- auto It = LabelOffsets.find(Label);
- if (It != LabelOffsets.end()) {
+ if (auto It = LabelOffsets.find(Label);
+ It != LabelOffsets.end())
return It->second - Position;
- }
// Otherwise, record relocation and return dummy offset.
LabelRelocs[Label].push_back(Position);
return 0ull;
}
-bool ByteCodeEmitter::bail(const SourceLocation &Loc) {
- if (!BailLocation)
- BailLocation = Loc;
- return false;
+/// Helper to write bytecode and bail out if 32-bit offsets become invalid.
+/// Pointers will be automatically marshalled as 32-bit IDs.
+template <typename T>
+static void emit(Program &P, std::vector<std::byte> &Code, const T &Val,
+ bool &Success) {
+ size_t Size;
+
+ if constexpr (std::is_pointer_v<T>)
+ Size = sizeof(uint32_t);
+ else
+ Size = sizeof(T);
+
+ if (Code.size() + Size > std::numeric_limits<unsigned>::max()) {
+ Success = false;
+ return;
+ }
+
+ // Access must be aligned!
+ size_t ValPos = align(Code.size());
+ Size = align(Size);
+ assert(aligned(ValPos + Size));
+ Code.resize(ValPos + Size);
+
+ if constexpr (!std::is_pointer_v<T>) {
+ new (Code.data() + ValPos) T(Val);
+ } else {
+ uint32_t ID = P.getOrCreateNativePointer(Val);
+ new (Code.data() + ValPos) uint32_t(ID);
+ }
+}
+
+template <>
+void emit(Program &P, std::vector<std::byte> &Code, const Floating &Val,
+ bool &Success) {
+ size_t Size = Val.bytesToSerialize();
+
+ if (Code.size() + Size > std::numeric_limits<unsigned>::max()) {
+ Success = false;
+ return;
+ }
+
+ // Access must be aligned!
+ size_t ValPos = align(Code.size());
+ Size = align(Size);
+ assert(aligned(ValPos + Size));
+ Code.resize(ValPos + Size);
+
+ Val.serialize(Code.data() + ValPos);
}
template <typename... Tys>
bool ByteCodeEmitter::emitOp(Opcode Op, const Tys &... Args, const SourceInfo &SI) {
bool Success = true;
- /// Helper to write bytecode and bail out if 32-bit offsets become invalid.
- auto emit = [this, &Success](const char *Data, size_t Size) {
- if (Code.size() + Size > std::numeric_limits<unsigned>::max()) {
- Success = false;
- return;
- }
- Code.insert(Code.end(), Data, Data + Size);
- };
-
- /// The opcode is followed by arguments. The source info is
- /// attached to the address after the opcode.
- emit(reinterpret_cast<const char *>(&Op), sizeof(Opcode));
+ // The opcode is followed by arguments. The source info is
+ // attached to the address after the opcode.
+ emit(P, Code, Op, Success);
if (SI)
SrcMap.emplace_back(Code.size(), SI);
- /// The initializer list forces the expression to be evaluated
- /// for each argument in the variadic template, in order.
- (void)std::initializer_list<int>{
- (emit(reinterpret_cast<const char *>(&Args), sizeof(Args)), 0)...};
+ // The initializer list forces the expression to be evaluated
+ // for each argument in the variadic template, in order.
+ (void)std::initializer_list<int>{(emit(P, Code, Args, Success), 0)...};
return Success;
}
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeEmitter.h b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeEmitter.h
index 03452a350c96..03de286582c9 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeEmitter.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeEmitter.h
@@ -13,19 +13,13 @@
#ifndef LLVM_CLANG_AST_INTERP_LINKEMITTER_H
#define LLVM_CLANG_AST_INTERP_LINKEMITTER_H
-#include "ByteCodeGenError.h"
#include "Context.h"
-#include "InterpStack.h"
-#include "InterpState.h"
#include "PrimType.h"
#include "Program.h"
#include "Source.h"
-#include "llvm/Support/Error.h"
namespace clang {
namespace interp {
-class Context;
-class SourceInfo;
enum Opcode : uint32_t;
/// An emitter which links the program to bytecode for later use.
@@ -37,7 +31,7 @@ protected:
public:
/// Compiles the function into the module.
- llvm::Expected<Function *> compileFunc(const FunctionDecl *F);
+ Function *compileFunc(const FunctionDecl *FuncDecl);
protected:
ByteCodeEmitter(Context &Ctx, Program &P) : Ctx(Ctx), P(P) {}
@@ -54,11 +48,6 @@ protected:
virtual bool visitExpr(const Expr *E) = 0;
virtual bool visitDecl(const VarDecl *E) = 0;
- /// Bails out if a given node cannot be compiled.
- bool bail(const Stmt *S) { return bail(S->getBeginLoc()); }
- bool bail(const Decl *D) { return bail(D->getBeginLoc()); }
- bool bail(const SourceLocation &Loc);
-
/// Emits jumps.
bool jumpTrue(const LabelTy &Label);
bool jumpFalse(const LabelTy &Label);
@@ -69,7 +58,11 @@ protected:
Local createLocal(Descriptor *D);
/// Parameter indices.
- llvm::DenseMap<const ParmVarDecl *, unsigned> Params;
+ llvm::DenseMap<const ParmVarDecl *, ParamOffset> Params;
+ /// Lambda captures.
+ llvm::DenseMap<const ValueDecl *, ParamOffset> LambdaCaptures;
+ /// Offset of the This parameter in a lambda record.
+ unsigned LambdaThisCapture = 0;
/// Local descriptors.
llvm::SmallVector<SmallVector<Local, 8>, 2> Descriptors;
@@ -82,14 +75,12 @@ private:
LabelTy NextLabel = 0;
/// Offset of the next local variable.
unsigned NextLocalOffset = 0;
- /// Location of a failure.
- llvm::Optional<SourceLocation> BailLocation;
/// Label information for linker.
llvm::DenseMap<LabelTy, unsigned> LabelOffsets;
/// Location of label relocations.
llvm::DenseMap<LabelTy, llvm::SmallVector<unsigned, 5>> LabelRelocs;
/// Program code.
- std::vector<char> Code;
+ std::vector<std::byte> Code;
/// Opcode to expression mapping.
SourceMap SrcMap;
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeExprGen.cpp b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeExprGen.cpp
index 5c8cb4274260..cfcef067b92b 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeExprGen.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeExprGen.cpp
@@ -9,69 +9,56 @@
#include "ByteCodeExprGen.h"
#include "ByteCodeEmitter.h"
#include "ByteCodeGenError.h"
+#include "ByteCodeStmtGen.h"
#include "Context.h"
+#include "Floating.h"
#include "Function.h"
#include "PrimType.h"
#include "Program.h"
-#include "State.h"
using namespace clang;
using namespace clang::interp;
using APSInt = llvm::APSInt;
-template <typename T> using Expected = llvm::Expected<T>;
-template <typename T> using Optional = llvm::Optional<T>;
namespace clang {
namespace interp {
/// Scope used to handle temporaries in toplevel variable declarations.
-template <class Emitter> class DeclScope final : public LocalScope<Emitter> {
+template <class Emitter> class DeclScope final : public VariableScope<Emitter> {
public:
- DeclScope(ByteCodeExprGen<Emitter> *Ctx, const VarDecl *VD)
- : LocalScope<Emitter>(Ctx), Scope(Ctx->P, VD) {}
+ DeclScope(ByteCodeExprGen<Emitter> *Ctx, const ValueDecl *VD)
+ : VariableScope<Emitter>(Ctx), Scope(Ctx->P, VD),
+ OldGlobalDecl(Ctx->GlobalDecl) {
+ Ctx->GlobalDecl = Context::shouldBeGloballyIndexed(VD);
+ }
void addExtended(const Scope::Local &Local) override {
return this->addLocal(Local);
}
+ ~DeclScope() { this->Ctx->GlobalDecl = OldGlobalDecl; }
+
private:
Program::DeclScope Scope;
+ bool OldGlobalDecl;
};
/// Scope used to handle initialization methods.
-template <class Emitter> class OptionScope {
+template <class Emitter> class OptionScope final {
public:
- using InitFnRef = typename ByteCodeExprGen<Emitter>::InitFnRef;
- using ChainedInitFnRef = std::function<bool(InitFnRef)>;
-
/// Root constructor, compiling or discarding primitives.
- OptionScope(ByteCodeExprGen<Emitter> *Ctx, bool NewDiscardResult)
+ OptionScope(ByteCodeExprGen<Emitter> *Ctx, bool NewDiscardResult,
+ bool NewInitializing)
: Ctx(Ctx), OldDiscardResult(Ctx->DiscardResult),
- OldInitFn(std::move(Ctx->InitFn)) {
+ OldInitializing(Ctx->Initializing) {
Ctx->DiscardResult = NewDiscardResult;
- Ctx->InitFn = llvm::Optional<InitFnRef>{};
- }
-
- /// Root constructor, setting up compilation state.
- OptionScope(ByteCodeExprGen<Emitter> *Ctx, InitFnRef NewInitFn)
- : Ctx(Ctx), OldDiscardResult(Ctx->DiscardResult),
- OldInitFn(std::move(Ctx->InitFn)) {
- Ctx->DiscardResult = true;
- Ctx->InitFn = NewInitFn;
- }
-
- /// Extends the chain of initialisation pointers.
- OptionScope(ByteCodeExprGen<Emitter> *Ctx, ChainedInitFnRef NewInitFn)
- : Ctx(Ctx), OldDiscardResult(Ctx->DiscardResult),
- OldInitFn(std::move(Ctx->InitFn)) {
- assert(OldInitFn && "missing initializer");
- Ctx->InitFn = [this, NewInitFn] { return NewInitFn(*OldInitFn); };
+ Ctx->Initializing = NewInitializing;
}
~OptionScope() {
Ctx->DiscardResult = OldDiscardResult;
- Ctx->InitFn = std::move(OldInitFn);
+ Ctx->Initializing = OldInitializing;
}
private:
@@ -79,8 +66,7 @@ private:
ByteCodeExprGen<Emitter> *Ctx;
/// Old discard flag to restore.
bool OldDiscardResult;
- /// Old pointer emitter to restore.
- llvm::Optional<InitFnRef> OldInitFn;
+ bool OldInitializing;
};
} // namespace interp
@@ -88,12 +74,12 @@ private:
template <class Emitter>
bool ByteCodeExprGen<Emitter>::VisitCastExpr(const CastExpr *CE) {
- auto *SubExpr = CE->getSubExpr();
+ const Expr *SubExpr = CE->getSubExpr();
switch (CE->getCastKind()) {
case CK_LValueToRValue: {
return dereference(
- CE->getSubExpr(), DerefKind::Read,
+ SubExpr, DerefKind::Read,
[](PrimType) {
// Value loaded - nothing to do here.
return true;
@@ -106,23 +92,235 @@ bool ByteCodeExprGen<Emitter>::VisitCastExpr(const CastExpr *CE) {
});
}
- case CK_ArrayToPointerDecay:
+ case CK_UncheckedDerivedToBase:
+ case CK_DerivedToBase: {
+ if (!this->visit(SubExpr))
+ return false;
+
+ unsigned DerivedOffset = collectBaseOffset(getRecordTy(CE->getType()),
+ getRecordTy(SubExpr->getType()));
+
+ return this->emitGetPtrBasePop(DerivedOffset, CE);
+ }
+
+ case CK_BaseToDerived: {
+ if (!this->visit(SubExpr))
+ return false;
+
+ unsigned DerivedOffset = collectBaseOffset(getRecordTy(SubExpr->getType()),
+ getRecordTy(CE->getType()));
+
+ return this->emitGetPtrDerivedPop(DerivedOffset, CE);
+ }
+
+ case CK_FloatingCast: {
+ if (DiscardResult)
+ return this->discard(SubExpr);
+ if (!this->visit(SubExpr))
+ return false;
+ const auto *TargetSemantics = &Ctx.getFloatSemantics(CE->getType());
+ return this->emitCastFP(TargetSemantics, getRoundingMode(CE), CE);
+ }
+
+ case CK_IntegralToFloating: {
+ if (DiscardResult)
+ return this->discard(SubExpr);
+ std::optional<PrimType> FromT = classify(SubExpr->getType());
+ if (!FromT)
+ return false;
+
+ if (!this->visit(SubExpr))
+ return false;
+
+ const auto *TargetSemantics = &Ctx.getFloatSemantics(CE->getType());
+ llvm::RoundingMode RM = getRoundingMode(CE);
+ return this->emitCastIntegralFloating(*FromT, TargetSemantics, RM, CE);
+ }
+
+ case CK_FloatingToBoolean:
+ case CK_FloatingToIntegral: {
+ if (DiscardResult)
+ return this->discard(SubExpr);
+
+ std::optional<PrimType> ToT = classify(CE->getType());
+
+ if (!ToT)
+ return false;
+
+ if (!this->visit(SubExpr))
+ return false;
+
+ if (ToT == PT_IntAP)
+ return this->emitCastFloatingIntegralAP(Ctx.getBitWidth(CE->getType()),
+ CE);
+ if (ToT == PT_IntAPS)
+ return this->emitCastFloatingIntegralAPS(Ctx.getBitWidth(CE->getType()),
+ CE);
+
+ return this->emitCastFloatingIntegral(*ToT, CE);
+ }
+
+ case CK_NullToPointer:
+ if (DiscardResult)
+ return true;
+ return this->emitNull(classifyPrim(CE->getType()), CE);
+
+ case CK_PointerToIntegral: {
+ // TODO: Discard handling.
+ if (!this->visit(SubExpr))
+ return false;
+
+ PrimType T = classifyPrim(CE->getType());
+ return this->emitCastPointerIntegral(T, CE);
+ }
+
+ case CK_ArrayToPointerDecay: {
+ if (!this->visit(SubExpr))
+ return false;
+ if (!this->emitArrayDecay(CE))
+ return false;
+ if (DiscardResult)
+ return this->emitPopPtr(CE);
+ return true;
+ }
+
case CK_AtomicToNonAtomic:
case CK_ConstructorConversion:
case CK_FunctionToPointerDecay:
case CK_NonAtomicToAtomic:
case CK_NoOp:
case CK_UserDefinedConversion:
- return this->Visit(SubExpr);
+ case CK_BitCast:
+ return this->delegate(SubExpr);
+
+ case CK_IntegralToBoolean:
+ case CK_IntegralCast: {
+ if (DiscardResult)
+ return this->discard(SubExpr);
+ std::optional<PrimType> FromT = classify(SubExpr->getType());
+ std::optional<PrimType> ToT = classify(CE->getType());
+
+ if (!FromT || !ToT)
+ return false;
+
+ if (!this->visit(SubExpr))
+ return false;
+
+ if (ToT == PT_IntAP)
+ return this->emitCastAP(*FromT, Ctx.getBitWidth(CE->getType()), CE);
+ if (ToT == PT_IntAPS)
+ return this->emitCastAPS(*FromT, Ctx.getBitWidth(CE->getType()), CE);
+
+ if (FromT == ToT)
+ return true;
+ return this->emitCast(*FromT, *ToT, CE);
+ }
+
+ case CK_PointerToBoolean: {
+ PrimType PtrT = classifyPrim(SubExpr->getType());
+
+ // Just emit p != nullptr for this.
+ if (!this->visit(SubExpr))
+ return false;
+
+ if (!this->emitNull(PtrT, CE))
+ return false;
+
+ return this->emitNE(PtrT, CE);
+ }
+
+ case CK_IntegralComplexToBoolean:
+ case CK_FloatingComplexToBoolean: {
+ std::optional<PrimType> ElemT =
+ classifyComplexElementType(SubExpr->getType());
+ if (!ElemT)
+ return false;
+ // We emit the expression (__real(E) != 0 || __imag(E) != 0)
+ // for us, that means (bool)E[0] || (bool)E[1]
+ if (!this->visit(SubExpr))
+ return false;
+ if (!this->emitConstUint8(0, CE))
+ return false;
+ if (!this->emitArrayElemPtrUint8(CE))
+ return false;
+ if (!this->emitLoadPop(*ElemT, CE))
+ return false;
+ if (*ElemT == PT_Float) {
+ if (!this->emitCastFloatingIntegral(PT_Bool, CE))
+ return false;
+ } else {
+ if (!this->emitCast(*ElemT, PT_Bool, CE))
+ return false;
+ }
+
+ // We now have the bool value of E[0] on the stack.
+ LabelTy LabelTrue = this->getLabel();
+ if (!this->jumpTrue(LabelTrue))
+ return false;
+
+ if (!this->emitConstUint8(1, CE))
+ return false;
+ if (!this->emitArrayElemPtrPopUint8(CE))
+ return false;
+ if (!this->emitLoadPop(*ElemT, CE))
+ return false;
+ if (*ElemT == PT_Float) {
+ if (!this->emitCastFloatingIntegral(PT_Bool, CE))
+ return false;
+ } else {
+ if (!this->emitCast(*ElemT, PT_Bool, CE))
+ return false;
+ }
+ // Leave the boolean value of E[1] on the stack.
+ LabelTy EndLabel = this->getLabel();
+ this->jump(EndLabel);
+
+ this->emitLabel(LabelTrue);
+ if (!this->emitPopPtr(CE))
+ return false;
+ if (!this->emitConstBool(true, CE))
+ return false;
+
+ this->fallthrough(EndLabel);
+ this->emitLabel(EndLabel);
+
+ return true;
+ }
+
+ case CK_IntegralComplexToReal:
+ case CK_FloatingComplexToReal:
+ return this->emitComplexReal(SubExpr);
+
+ case CK_IntegralRealToComplex:
+ case CK_FloatingRealToComplex: {
+ // We're creating a complex value here, so we need to
+ // allocate storage for it.
+ if (!Initializing) {
+ std::optional<unsigned> LocalIndex =
+ allocateLocal(CE, /*IsExtended=*/true);
+ if (!LocalIndex)
+ return false;
+ if (!this->emitGetPtrLocal(*LocalIndex, CE))
+ return false;
+ }
+
+ // Init the complex value to {SubExpr, 0}.
+ if (!this->visitArrayElemInit(0, SubExpr))
+ return false;
+ // Zero-init the second element.
+ PrimType T = classifyPrim(SubExpr->getType());
+ if (!this->visitZeroInitializer(T, SubExpr->getType(), SubExpr))
+ return false;
+ return this->emitInitElem(T, 1, SubExpr);
+ }
case CK_ToVoid:
return discard(SubExpr);
- default: {
- // TODO: implement other casts.
- return this->bail(CE);
- }
+ default:
+ assert(false && "Cast not implemented");
}
+ llvm_unreachable("Unhandled clang::CastKind enum");
}
template <class Emitter>
@@ -130,104 +328,1611 @@ bool ByteCodeExprGen<Emitter>::VisitIntegerLiteral(const IntegerLiteral *LE) {
if (DiscardResult)
return true;
- auto Val = LE->getValue();
- QualType LitTy = LE->getType();
- if (Optional<PrimType> T = classify(LitTy))
- return emitConst(*T, getIntWidth(LitTy), LE->getValue(), LE);
- return this->bail(LE);
+ return this->emitConst(LE->getValue(), LE);
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitFloatingLiteral(const FloatingLiteral *E) {
+ if (DiscardResult)
+ return true;
+
+ return this->emitConstFloat(E->getValue(), E);
}
template <class Emitter>
-bool ByteCodeExprGen<Emitter>::VisitParenExpr(const ParenExpr *PE) {
- return this->Visit(PE->getSubExpr());
+bool ByteCodeExprGen<Emitter>::VisitParenExpr(const ParenExpr *E) {
+ return this->delegate(E->getSubExpr());
}
template <class Emitter>
bool ByteCodeExprGen<Emitter>::VisitBinaryOperator(const BinaryOperator *BO) {
+ // Need short-circuiting for these.
+ if (BO->isLogicalOp())
+ return this->VisitLogicalBinOp(BO);
+
+ if (BO->getType()->isAnyComplexType())
+ return this->VisitComplexBinOp(BO);
+
const Expr *LHS = BO->getLHS();
const Expr *RHS = BO->getRHS();
+ if (BO->isPtrMemOp())
+ return this->visit(RHS);
+
+ // Typecheck the args.
+ std::optional<PrimType> LT = classify(LHS->getType());
+ std::optional<PrimType> RT = classify(RHS->getType());
+ std::optional<PrimType> T = classify(BO->getType());
+
// Deal with operations which have composite or void types.
- switch (BO->getOpcode()) {
- case BO_Comma:
- if (!discard(LHS))
+ if (BO->isCommaOp()) {
+ if (!this->discard(LHS))
return false;
- if (!this->Visit(RHS))
+ if (RHS->getType()->isVoidType())
+ return this->discard(RHS);
+
+ return this->delegate(RHS);
+ }
+
+ // Special case for C++'s three-way/spaceship operator <=>, which
+ // returns a std::{strong,weak,partial}_ordering (which is a class, so doesn't
+ // have a PrimType).
+ if (!T) {
+ if (DiscardResult)
+ return true;
+ const ComparisonCategoryInfo *CmpInfo =
+ Ctx.getASTContext().CompCategories.lookupInfoForType(BO->getType());
+ assert(CmpInfo);
+
+ // We need a temporary variable holding our return value.
+ if (!Initializing) {
+ std::optional<unsigned> ResultIndex = this->allocateLocal(BO, false);
+ if (!this->emitGetPtrLocal(*ResultIndex, BO))
+ return false;
+ }
+
+ if (!visit(LHS) || !visit(RHS))
+ return false;
+
+ return this->emitCMP3(*LT, CmpInfo, BO);
+ }
+
+ if (!LT || !RT || !T)
+ return false;
+
+ // Pointer arithmetic special case.
+ if (BO->getOpcode() == BO_Add || BO->getOpcode() == BO_Sub) {
+ if (T == PT_Ptr || (LT == PT_Ptr && RT == PT_Ptr))
+ return this->VisitPointerArithBinOp(BO);
+ }
+
+ if (!visit(LHS) || !visit(RHS))
+ return false;
+
+ // For languages such as C, cast the result of one
+ // of our comparision opcodes to T (which is usually int).
+ auto MaybeCastToBool = [this, T, BO](bool Result) {
+ if (!Result)
return false;
+ if (DiscardResult)
+ return this->emitPop(*T, BO);
+ if (T != PT_Bool)
+ return this->emitCast(PT_Bool, *T, BO);
return true;
+ };
+
+ auto Discard = [this, T, BO](bool Result) {
+ if (!Result)
+ return false;
+ return DiscardResult ? this->emitPop(*T, BO) : true;
+ };
+
+ switch (BO->getOpcode()) {
+ case BO_EQ:
+ return MaybeCastToBool(this->emitEQ(*LT, BO));
+ case BO_NE:
+ return MaybeCastToBool(this->emitNE(*LT, BO));
+ case BO_LT:
+ return MaybeCastToBool(this->emitLT(*LT, BO));
+ case BO_LE:
+ return MaybeCastToBool(this->emitLE(*LT, BO));
+ case BO_GT:
+ return MaybeCastToBool(this->emitGT(*LT, BO));
+ case BO_GE:
+ return MaybeCastToBool(this->emitGE(*LT, BO));
+ case BO_Sub:
+ if (BO->getType()->isFloatingType())
+ return Discard(this->emitSubf(getRoundingMode(BO), BO));
+ return Discard(this->emitSub(*T, BO));
+ case BO_Add:
+ if (BO->getType()->isFloatingType())
+ return Discard(this->emitAddf(getRoundingMode(BO), BO));
+ return Discard(this->emitAdd(*T, BO));
+ case BO_Mul:
+ if (BO->getType()->isFloatingType())
+ return Discard(this->emitMulf(getRoundingMode(BO), BO));
+ return Discard(this->emitMul(*T, BO));
+ case BO_Rem:
+ return Discard(this->emitRem(*T, BO));
+ case BO_Div:
+ if (BO->getType()->isFloatingType())
+ return Discard(this->emitDivf(getRoundingMode(BO), BO));
+ return Discard(this->emitDiv(*T, BO));
+ case BO_Assign:
+ if (DiscardResult)
+ return LHS->refersToBitField() ? this->emitStoreBitFieldPop(*T, BO)
+ : this->emitStorePop(*T, BO);
+ return LHS->refersToBitField() ? this->emitStoreBitField(*T, BO)
+ : this->emitStore(*T, BO);
+ case BO_And:
+ return Discard(this->emitBitAnd(*T, BO));
+ case BO_Or:
+ return Discard(this->emitBitOr(*T, BO));
+ case BO_Shl:
+ return Discard(this->emitShl(*LT, *RT, BO));
+ case BO_Shr:
+ return Discard(this->emitShr(*LT, *RT, BO));
+ case BO_Xor:
+ return Discard(this->emitBitXor(*T, BO));
+ case BO_LOr:
+ case BO_LAnd:
+ llvm_unreachable("Already handled earlier");
default:
- break;
+ return false;
}
- // Typecheck the args.
- Optional<PrimType> LT = classify(LHS->getType());
- Optional<PrimType> RT = classify(RHS->getType());
- if (!LT || !RT) {
- return this->bail(BO);
+ llvm_unreachable("Unhandled binary op");
+}
+
+/// Perform addition/subtraction of a pointer and an integer or
+/// subtraction of two pointers.
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitPointerArithBinOp(const BinaryOperator *E) {
+ BinaryOperatorKind Op = E->getOpcode();
+ const Expr *LHS = E->getLHS();
+ const Expr *RHS = E->getRHS();
+
+ if ((Op != BO_Add && Op != BO_Sub) ||
+ (!LHS->getType()->isPointerType() && !RHS->getType()->isPointerType()))
+ return false;
+
+ std::optional<PrimType> LT = classify(LHS);
+ std::optional<PrimType> RT = classify(RHS);
+
+ if (!LT || !RT)
+ return false;
+
+ if (LHS->getType()->isPointerType() && RHS->getType()->isPointerType()) {
+ if (Op != BO_Sub)
+ return false;
+
+ assert(E->getType()->isIntegerType());
+ if (!visit(RHS) || !visit(LHS))
+ return false;
+
+ return this->emitSubPtr(classifyPrim(E->getType()), E);
+ }
+
+ PrimType OffsetType;
+ if (LHS->getType()->isIntegerType()) {
+ if (!visit(RHS) || !visit(LHS))
+ return false;
+ OffsetType = *LT;
+ } else if (RHS->getType()->isIntegerType()) {
+ if (!visit(LHS) || !visit(RHS))
+ return false;
+ OffsetType = *RT;
+ } else {
+ return false;
+ }
+
+ if (Op == BO_Add)
+ return this->emitAddOffset(OffsetType, E);
+ else if (Op == BO_Sub)
+ return this->emitSubOffset(OffsetType, E);
+
+ return false;
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitLogicalBinOp(const BinaryOperator *E) {
+ assert(E->isLogicalOp());
+ BinaryOperatorKind Op = E->getOpcode();
+ const Expr *LHS = E->getLHS();
+ const Expr *RHS = E->getRHS();
+ std::optional<PrimType> T = classify(E->getType());
+
+ if (Op == BO_LOr) {
+ // Logical OR. Visit LHS and only evaluate RHS if LHS was FALSE.
+ LabelTy LabelTrue = this->getLabel();
+ LabelTy LabelEnd = this->getLabel();
+
+ if (!this->visitBool(LHS))
+ return false;
+ if (!this->jumpTrue(LabelTrue))
+ return false;
+
+ if (!this->visitBool(RHS))
+ return false;
+ if (!this->jump(LabelEnd))
+ return false;
+
+ this->emitLabel(LabelTrue);
+ this->emitConstBool(true, E);
+ this->fallthrough(LabelEnd);
+ this->emitLabel(LabelEnd);
+
+ } else {
+ assert(Op == BO_LAnd);
+ // Logical AND.
+ // Visit LHS. Only visit RHS if LHS was TRUE.
+ LabelTy LabelFalse = this->getLabel();
+ LabelTy LabelEnd = this->getLabel();
+
+ if (!this->visitBool(LHS))
+ return false;
+ if (!this->jumpFalse(LabelFalse))
+ return false;
+
+ if (!this->visitBool(RHS))
+ return false;
+ if (!this->jump(LabelEnd))
+ return false;
+
+ this->emitLabel(LabelFalse);
+ this->emitConstBool(false, E);
+ this->fallthrough(LabelEnd);
+ this->emitLabel(LabelEnd);
}
- if (Optional<PrimType> T = classify(BO->getType())) {
- if (!visit(LHS))
+ if (DiscardResult)
+ return this->emitPopBool(E);
+
+ // For C, cast back to integer type.
+ assert(T);
+ if (T != PT_Bool)
+ return this->emitCast(PT_Bool, *T, E);
+ return true;
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitComplexBinOp(const BinaryOperator *E) {
+ assert(Initializing);
+
+ const Expr *LHS = E->getLHS();
+ const Expr *RHS = E->getRHS();
+ PrimType LHSElemT = *this->classifyComplexElementType(LHS->getType());
+ PrimType RHSElemT = *this->classifyComplexElementType(RHS->getType());
+
+ unsigned LHSOffset = this->allocateLocalPrimitive(LHS, PT_Ptr, true, false);
+ unsigned RHSOffset = this->allocateLocalPrimitive(RHS, PT_Ptr, true, false);
+ unsigned ResultOffset = ~0u;
+ if (!this->DiscardResult)
+ ResultOffset = this->allocateLocalPrimitive(E, PT_Ptr, true, false);
+
+ assert(LHSElemT == RHSElemT);
+
+ // Save result pointer in ResultOffset
+ if (!this->DiscardResult) {
+ if (!this->emitDupPtr(E))
return false;
- if (!visit(RHS))
+ if (!this->emitSetLocal(PT_Ptr, ResultOffset, E))
return false;
+ }
+
+ // Evaluate LHS and save value to LHSOffset.
+ if (!this->visit(LHS))
+ return false;
+ if (!this->emitSetLocal(PT_Ptr, LHSOffset, E))
+ return false;
- auto Discard = [this, T, BO](bool Result) {
- if (!Result)
+ // Same with RHS.
+ if (!this->visit(RHS))
+ return false;
+ if (!this->emitSetLocal(PT_Ptr, RHSOffset, E))
+ return false;
+
+ // Now we can get pointers to the LHS and RHS from the offsets above.
+ BinaryOperatorKind Op = E->getOpcode();
+ for (unsigned ElemIndex = 0; ElemIndex != 2; ++ElemIndex) {
+ // Result pointer for the store later.
+ if (!this->DiscardResult) {
+ if (!this->emitGetLocal(PT_Ptr, ResultOffset, E))
return false;
- return DiscardResult ? this->emitPop(*T, BO) : true;
- };
+ }
- switch (BO->getOpcode()) {
- case BO_EQ:
- return Discard(this->emitEQ(*LT, BO));
- case BO_NE:
- return Discard(this->emitNE(*LT, BO));
- case BO_LT:
- return Discard(this->emitLT(*LT, BO));
- case BO_LE:
- return Discard(this->emitLE(*LT, BO));
- case BO_GT:
- return Discard(this->emitGT(*LT, BO));
- case BO_GE:
- return Discard(this->emitGE(*LT, BO));
- case BO_Sub:
- return Discard(this->emitSub(*T, BO));
+ if (!this->emitGetLocal(PT_Ptr, LHSOffset, E))
+ return false;
+ if (!this->emitConstUint8(ElemIndex, E))
+ return false;
+ if (!this->emitArrayElemPtrPopUint8(E))
+ return false;
+ if (!this->emitLoadPop(LHSElemT, E))
+ return false;
+
+ if (!this->emitGetLocal(PT_Ptr, RHSOffset, E))
+ return false;
+ if (!this->emitConstUint8(ElemIndex, E))
+ return false;
+ if (!this->emitArrayElemPtrPopUint8(E))
+ return false;
+ if (!this->emitLoadPop(RHSElemT, E))
+ return false;
+
+ // The actual operation.
+ switch (Op) {
case BO_Add:
- return Discard(this->emitAdd(*T, BO));
- case BO_Mul:
- return Discard(this->emitMul(*T, BO));
+ if (LHSElemT == PT_Float) {
+ if (!this->emitAddf(getRoundingMode(E), E))
+ return false;
+ } else {
+ if (!this->emitAdd(LHSElemT, E))
+ return false;
+ }
+ break;
+ case BO_Sub:
+ if (LHSElemT == PT_Float) {
+ if (!this->emitSubf(getRoundingMode(E), E))
+ return false;
+ } else {
+ if (!this->emitSub(LHSElemT, E))
+ return false;
+ }
+ break;
+
default:
- return this->bail(BO);
+ return false;
+ }
+
+ if (!this->DiscardResult) {
+ // Initialize array element with the value we just computed.
+ if (!this->emitInitElemPop(LHSElemT, ElemIndex, E))
+ return false;
+ } else {
+ if (!this->emitPop(LHSElemT, E))
+ return false;
+ }
+ }
+ return true;
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
+ QualType QT = E->getType();
+
+ if (std::optional<PrimType> T = classify(QT))
+ return this->visitZeroInitializer(*T, QT, E);
+
+ if (QT->isRecordType())
+ return false;
+
+ if (QT->isIncompleteArrayType())
+ return true;
+
+ if (QT->isArrayType()) {
+ const ArrayType *AT = QT->getAsArrayTypeUnsafe();
+ assert(AT);
+ const auto *CAT = cast<ConstantArrayType>(AT);
+ size_t NumElems = CAT->getSize().getZExtValue();
+ PrimType ElemT = classifyPrim(CAT->getElementType());
+
+ for (size_t I = 0; I != NumElems; ++I) {
+ if (!this->visitZeroInitializer(ElemT, CAT->getElementType(), E))
+ return false;
+ if (!this->emitInitElem(ElemT, I, E))
+ return false;
+ }
+
+ return true;
+ }
+
+ return false;
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitArraySubscriptExpr(
+ const ArraySubscriptExpr *E) {
+ const Expr *Base = E->getBase();
+ const Expr *Index = E->getIdx();
+
+ if (DiscardResult)
+ return this->discard(Base) && this->discard(Index);
+
+ // Take pointer of LHS, add offset from RHS.
+ // What's left on the stack after this is a pointer.
+ if (!this->visit(Base))
+ return false;
+
+ if (!this->visit(Index))
+ return false;
+
+ PrimType IndexT = classifyPrim(Index->getType());
+ return this->emitArrayElemPtrPop(IndexT, E);
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::visitInitList(ArrayRef<const Expr *> Inits,
+ const Expr *E) {
+ assert(E->getType()->isRecordType());
+ const Record *R = getRecord(E->getType());
+
+ unsigned InitIndex = 0;
+ for (const Expr *Init : Inits) {
+ if (!this->emitDupPtr(E))
+ return false;
+
+ if (std::optional<PrimType> T = classify(Init)) {
+ const Record::Field *FieldToInit = R->getField(InitIndex);
+ if (!this->visit(Init))
+ return false;
+
+ if (FieldToInit->isBitField()) {
+ if (!this->emitInitBitField(*T, FieldToInit, E))
+ return false;
+ } else {
+ if (!this->emitInitField(*T, FieldToInit->Offset, E))
+ return false;
+ }
+
+ if (!this->emitPopPtr(E))
+ return false;
+ ++InitIndex;
+ } else {
+ // Initializer for a direct base class.
+ if (const Record::Base *B = R->getBase(Init->getType())) {
+ if (!this->emitGetPtrBasePop(B->Offset, Init))
+ return false;
+
+ if (!this->visitInitializer(Init))
+ return false;
+
+ if (!this->emitInitPtrPop(E))
+ return false;
+ // Base initializers don't increase InitIndex, since they don't count
+ // into the Record's fields.
+ } else {
+ const Record::Field *FieldToInit = R->getField(InitIndex);
+ // Non-primitive case. Get a pointer to the field-to-initialize
+ // on the stack and recurse into visitInitializer().
+ if (!this->emitGetPtrField(FieldToInit->Offset, Init))
+ return false;
+
+ if (!this->visitInitializer(Init))
+ return false;
+
+ if (!this->emitPopPtr(E))
+ return false;
+ ++InitIndex;
+ }
+ }
+ }
+ return true;
+}
+
+/// Pointer to the array(not the element!) must be on the stack when calling
+/// this.
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::visitArrayElemInit(unsigned ElemIndex,
+ const Expr *Init) {
+ if (std::optional<PrimType> T = classify(Init->getType())) {
+ // Visit the primitive element like normal.
+ if (!this->visit(Init))
+ return false;
+ return this->emitInitElem(*T, ElemIndex, Init);
+ }
+
+ // Advance the pointer currently on the stack to the given
+ // dimension.
+ if (!this->emitConstUint32(ElemIndex, Init))
+ return false;
+ if (!this->emitArrayElemPtrUint32(Init))
+ return false;
+ if (!this->visitInitializer(Init))
+ return false;
+ return this->emitPopPtr(Init);
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitInitListExpr(const InitListExpr *E) {
+ // Handle discarding first.
+ if (DiscardResult) {
+ for (const Expr *Init : E->inits()) {
+ if (!this->discard(Init))
+ return false;
+ }
+ return true;
+ }
+
+ // Primitive values.
+ if (std::optional<PrimType> T = classify(E->getType())) {
+ assert(!DiscardResult);
+ if (E->getNumInits() == 0)
+ return this->visitZeroInitializer(*T, E->getType(), E);
+ assert(E->getNumInits() == 1);
+ return this->delegate(E->inits()[0]);
+ }
+
+ QualType T = E->getType();
+ if (T->isRecordType())
+ return this->visitInitList(E->inits(), E);
+
+ if (T->isArrayType()) {
+ // FIXME: Array fillers.
+ unsigned ElementIndex = 0;
+ for (const Expr *Init : E->inits()) {
+ if (!this->visitArrayElemInit(ElementIndex, Init))
+ return false;
+ ++ElementIndex;
+ }
+ return true;
+ }
+
+ if (T->isAnyComplexType()) {
+ unsigned NumInits = E->getNumInits();
+
+ if (NumInits == 1)
+ return this->delegate(E->inits()[0]);
+
+ QualType ElemQT = E->getType()->getAs<ComplexType>()->getElementType();
+ PrimType ElemT = classifyPrim(ElemQT);
+ if (NumInits == 0) {
+ // Zero-initialize both elements.
+ for (unsigned I = 0; I < 2; ++I) {
+ if (!this->visitZeroInitializer(ElemT, ElemQT, E))
+ return false;
+ if (!this->emitInitElem(ElemT, I, E))
+ return false;
+ }
+ } else if (NumInits == 2) {
+ unsigned InitIndex = 0;
+ for (const Expr *Init : E->inits()) {
+ if (!this->visit(Init))
+ return false;
+
+ if (!this->emitInitElem(ElemT, InitIndex, E))
+ return false;
+ ++InitIndex;
+ }
+ }
+ return true;
+ }
+
+ return false;
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitCXXParenListInitExpr(
+ const CXXParenListInitExpr *E) {
+ if (DiscardResult) {
+ for (const Expr *Init : E->getInitExprs()) {
+ if (!this->discard(Init))
+ return false;
+ }
+ return true;
+ }
+
+ assert(E->getType()->isRecordType());
+ return this->visitInitList(E->getInitExprs(), E);
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitSubstNonTypeTemplateParmExpr(
+ const SubstNonTypeTemplateParmExpr *E) {
+ return this->delegate(E->getReplacement());
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitConstantExpr(const ConstantExpr *E) {
+ // Try to emit the APValue directly, without visiting the subexpr.
+ // This will only fail if we can't emit the APValue, so won't emit any
+ // diagnostics or any double values.
+ std::optional<PrimType> T = classify(E->getType());
+ if (T && E->hasAPValueResult() &&
+ this->visitAPValue(E->getAPValueResult(), *T, E))
+ return true;
+
+ return this->delegate(E->getSubExpr());
+}
+
+static CharUnits AlignOfType(QualType T, const ASTContext &ASTCtx,
+ UnaryExprOrTypeTrait Kind) {
+ bool AlignOfReturnsPreferred =
+ ASTCtx.getLangOpts().getClangABICompat() <= LangOptions::ClangABI::Ver7;
+
+ // C++ [expr.alignof]p3:
+ // When alignof is applied to a reference type, the result is the
+ // alignment of the referenced type.
+ if (const auto *Ref = T->getAs<ReferenceType>())
+ T = Ref->getPointeeType();
+
+ // __alignof is defined to return the preferred alignment.
+ // Before 8, clang returned the preferred alignment for alignof and
+ // _Alignof as well.
+ if (Kind == UETT_PreferredAlignOf || AlignOfReturnsPreferred)
+ return ASTCtx.toCharUnitsFromBits(ASTCtx.getPreferredTypeAlign(T));
+
+ return ASTCtx.getTypeAlignInChars(T);
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitUnaryExprOrTypeTraitExpr(
+ const UnaryExprOrTypeTraitExpr *E) {
+ UnaryExprOrTypeTrait Kind = E->getKind();
+ ASTContext &ASTCtx = Ctx.getASTContext();
+
+ if (Kind == UETT_SizeOf) {
+ QualType ArgType = E->getTypeOfArgument();
+ CharUnits Size;
+ if (ArgType->isVoidType() || ArgType->isFunctionType())
+ Size = CharUnits::One();
+ else {
+ if (ArgType->isDependentType() || !ArgType->isConstantSizeType())
+ return false;
+
+ Size = ASTCtx.getTypeSizeInChars(ArgType);
+ }
+
+ if (DiscardResult)
+ return true;
+
+ return this->emitConst(Size.getQuantity(), E);
+ }
+
+ if (Kind == UETT_AlignOf || Kind == UETT_PreferredAlignOf) {
+ CharUnits Size;
+
+ if (E->isArgumentType()) {
+ QualType ArgType = E->getTypeOfArgument();
+
+ Size = AlignOfType(ArgType, ASTCtx, Kind);
+ } else {
+ // Argument is an expression, not a type.
+ const Expr *Arg = E->getArgumentExpr()->IgnoreParens();
+
+ // The kinds of expressions that we have special-case logic here for
+ // should be kept up to date with the special checks for those
+ // expressions in Sema.
+
+ // alignof decl is always accepted, even if it doesn't make sense: we
+ // default to 1 in those cases.
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(Arg))
+ Size = ASTCtx.getDeclAlign(DRE->getDecl(),
+ /*RefAsPointee*/ true);
+ else if (const auto *ME = dyn_cast<MemberExpr>(Arg))
+ Size = ASTCtx.getDeclAlign(ME->getMemberDecl(),
+ /*RefAsPointee*/ true);
+ else
+ Size = AlignOfType(Arg->getType(), ASTCtx, Kind);
+ }
+
+ if (DiscardResult)
+ return true;
+
+ return this->emitConst(Size.getQuantity(), E);
+ }
+
+ return false;
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitMemberExpr(const MemberExpr *E) {
+ // 'Base.Member'
+ const Expr *Base = E->getBase();
+
+ if (DiscardResult)
+ return this->discard(Base);
+
+ if (!this->visit(Base))
+ return false;
+
+ // Base above gives us a pointer on the stack.
+ // TODO: Implement non-FieldDecl members.
+ const ValueDecl *Member = E->getMemberDecl();
+ if (const auto *FD = dyn_cast<FieldDecl>(Member)) {
+ const RecordDecl *RD = FD->getParent();
+ const Record *R = getRecord(RD);
+ const Record::Field *F = R->getField(FD);
+ // Leave a pointer to the field on the stack.
+ if (F->Decl->getType()->isReferenceType())
+ return this->emitGetFieldPop(PT_Ptr, F->Offset, E);
+ return this->emitGetPtrField(F->Offset, E);
+ }
+
+ return false;
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitArrayInitIndexExpr(
+ const ArrayInitIndexExpr *E) {
+ // ArrayIndex might not be set if a ArrayInitIndexExpr is being evaluated
+ // stand-alone, e.g. via EvaluateAsInt().
+ if (!ArrayIndex)
+ return false;
+ return this->emitConst(*ArrayIndex, E);
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitArrayInitLoopExpr(
+ const ArrayInitLoopExpr *E) {
+ assert(Initializing);
+ assert(!DiscardResult);
+ // TODO: This compiles to quite a lot of bytecode if the array is larger.
+ // Investigate compiling this to a loop.
+
+ const Expr *SubExpr = E->getSubExpr();
+ const Expr *CommonExpr = E->getCommonExpr();
+ size_t Size = E->getArraySize().getZExtValue();
+
+ // If the common expression is an opaque expression, we visit it
+ // here once so we have its value cached.
+ // FIXME: This might be necessary (or useful) for all expressions.
+ if (isa<OpaqueValueExpr>(CommonExpr)) {
+ if (!this->discard(CommonExpr))
+ return false;
+ }
+
+ // So, every iteration, we execute an assignment here
+ // where the LHS is on the stack (the target array)
+ // and the RHS is our SubExpr.
+ for (size_t I = 0; I != Size; ++I) {
+ ArrayIndexScope<Emitter> IndexScope(this, I);
+ BlockScope<Emitter> BS(this);
+
+ if (!this->visitArrayElemInit(I, SubExpr))
+ return false;
+ }
+ return true;
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitOpaqueValueExpr(const OpaqueValueExpr *E) {
+ if (Initializing)
+ return this->visitInitializer(E->getSourceExpr());
+
+ PrimType SubExprT = classify(E->getSourceExpr()).value_or(PT_Ptr);
+ if (auto It = OpaqueExprs.find(E); It != OpaqueExprs.end())
+ return this->emitGetLocal(SubExprT, It->second, E);
+
+ if (!this->visit(E->getSourceExpr()))
+ return false;
+
+ // At this point we either have the evaluated source expression or a pointer
+ // to an object on the stack. We want to create a local variable that stores
+ // this value.
+ std::optional<unsigned> LocalIndex =
+ allocateLocalPrimitive(E, SubExprT, /*IsConst=*/true);
+ if (!LocalIndex)
+ return false;
+ if (!this->emitSetLocal(SubExprT, *LocalIndex, E))
+ return false;
+
+ // Here the local variable is created but the value is removed from the stack,
+ // so we put it back, because the caller might need it.
+ if (!DiscardResult) {
+ if (!this->emitGetLocal(SubExprT, *LocalIndex, E))
+ return false;
+ }
+
+ // FIXME: Ideally the cached value should be cleaned up later.
+ OpaqueExprs.insert({E, *LocalIndex});
+
+ return true;
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitAbstractConditionalOperator(
+ const AbstractConditionalOperator *E) {
+ const Expr *Condition = E->getCond();
+ const Expr *TrueExpr = E->getTrueExpr();
+ const Expr *FalseExpr = E->getFalseExpr();
+
+ LabelTy LabelEnd = this->getLabel(); // Label after the operator.
+ LabelTy LabelFalse = this->getLabel(); // Label for the false expr.
+
+ if (!this->visitBool(Condition))
+ return false;
+
+ if (!this->jumpFalse(LabelFalse))
+ return false;
+
+ if (!this->delegate(TrueExpr))
+ return false;
+ if (!this->jump(LabelEnd))
+ return false;
+
+ this->emitLabel(LabelFalse);
+
+ if (!this->delegate(FalseExpr))
+ return false;
+
+ this->fallthrough(LabelEnd);
+ this->emitLabel(LabelEnd);
+
+ return true;
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitStringLiteral(const StringLiteral *E) {
+ if (DiscardResult)
+ return true;
+
+ if (!Initializing) {
+ unsigned StringIndex = P.createGlobalString(E);
+ return this->emitGetPtrGlobal(StringIndex, E);
+ }
+
+ // We are initializing an array on the stack.
+ const ConstantArrayType *CAT =
+ Ctx.getASTContext().getAsConstantArrayType(E->getType());
+ assert(CAT && "a string literal that's not a constant array?");
+
+ // If the initializer string is too long, a diagnostic has already been
+ // emitted. Read only the array length from the string literal.
+ unsigned ArraySize = CAT->getSize().getZExtValue();
+ unsigned N = std::min(ArraySize, E->getLength());
+ size_t CharWidth = E->getCharByteWidth();
+
+ for (unsigned I = 0; I != N; ++I) {
+ uint32_t CodeUnit = E->getCodeUnit(I);
+
+ if (CharWidth == 1) {
+ this->emitConstSint8(CodeUnit, E);
+ this->emitInitElemSint8(I, E);
+ } else if (CharWidth == 2) {
+ this->emitConstUint16(CodeUnit, E);
+ this->emitInitElemUint16(I, E);
+ } else if (CharWidth == 4) {
+ this->emitConstUint32(CodeUnit, E);
+ this->emitInitElemUint32(I, E);
+ } else {
+ llvm_unreachable("unsupported character width");
+ }
+ }
+
+ // Fill up the rest of the char array with NUL bytes.
+ for (unsigned I = N; I != ArraySize; ++I) {
+ if (CharWidth == 1) {
+ this->emitConstSint8(0, E);
+ this->emitInitElemSint8(I, E);
+ } else if (CharWidth == 2) {
+ this->emitConstUint16(0, E);
+ this->emitInitElemUint16(I, E);
+ } else if (CharWidth == 4) {
+ this->emitConstUint32(0, E);
+ this->emitInitElemUint32(I, E);
+ } else {
+ llvm_unreachable("unsupported character width");
+ }
+ }
+
+ return true;
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitCharacterLiteral(
+ const CharacterLiteral *E) {
+ if (DiscardResult)
+ return true;
+ return this->emitConst(E->getValue(), E);
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitFloatCompoundAssignOperator(
+ const CompoundAssignOperator *E) {
+
+ const Expr *LHS = E->getLHS();
+ const Expr *RHS = E->getRHS();
+ QualType LHSType = LHS->getType();
+ QualType LHSComputationType = E->getComputationLHSType();
+ QualType ResultType = E->getComputationResultType();
+ std::optional<PrimType> LT = classify(LHSComputationType);
+ std::optional<PrimType> RT = classify(ResultType);
+
+ assert(ResultType->isFloatingType());
+
+ if (!LT || !RT)
+ return false;
+
+ PrimType LHST = classifyPrim(LHSType);
+
+ // C++17 onwards require that we evaluate the RHS first.
+ // Compute RHS and save it in a temporary variable so we can
+ // load it again later.
+ if (!visit(RHS))
+ return false;
+
+ unsigned TempOffset = this->allocateLocalPrimitive(E, *RT, /*IsConst=*/true);
+ if (!this->emitSetLocal(*RT, TempOffset, E))
+ return false;
+
+ // First, visit LHS.
+ if (!visit(LHS))
+ return false;
+ if (!this->emitLoad(LHST, E))
+ return false;
+
+ // If necessary, convert LHS to its computation type.
+ if (!this->emitPrimCast(LHST, classifyPrim(LHSComputationType),
+ LHSComputationType, E))
+ return false;
+
+ // Now load RHS.
+ if (!this->emitGetLocal(*RT, TempOffset, E))
+ return false;
+
+ llvm::RoundingMode RM = getRoundingMode(E);
+ switch (E->getOpcode()) {
+ case BO_AddAssign:
+ if (!this->emitAddf(RM, E))
+ return false;
+ break;
+ case BO_SubAssign:
+ if (!this->emitSubf(RM, E))
+ return false;
+ break;
+ case BO_MulAssign:
+ if (!this->emitMulf(RM, E))
+ return false;
+ break;
+ case BO_DivAssign:
+ if (!this->emitDivf(RM, E))
+ return false;
+ break;
+ default:
+ return false;
+ }
+
+ if (!this->emitPrimCast(classifyPrim(ResultType), LHST, LHS->getType(), E))
+ return false;
+
+ if (DiscardResult)
+ return this->emitStorePop(LHST, E);
+ return this->emitStore(LHST, E);
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitPointerCompoundAssignOperator(
+ const CompoundAssignOperator *E) {
+ BinaryOperatorKind Op = E->getOpcode();
+ const Expr *LHS = E->getLHS();
+ const Expr *RHS = E->getRHS();
+ std::optional<PrimType> LT = classify(LHS->getType());
+ std::optional<PrimType> RT = classify(RHS->getType());
+
+ if (Op != BO_AddAssign && Op != BO_SubAssign)
+ return false;
+
+ if (!LT || !RT)
+ return false;
+ assert(*LT == PT_Ptr);
+
+ if (!visit(LHS))
+ return false;
+
+ if (!this->emitLoadPtr(LHS))
+ return false;
+
+ if (!visit(RHS))
+ return false;
+
+ if (Op == BO_AddAssign)
+ this->emitAddOffset(*RT, E);
+ else
+ this->emitSubOffset(*RT, E);
+
+ if (DiscardResult)
+ return this->emitStorePopPtr(E);
+ return this->emitStorePtr(E);
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitCompoundAssignOperator(
+ const CompoundAssignOperator *E) {
+
+ const Expr *LHS = E->getLHS();
+ const Expr *RHS = E->getRHS();
+ std::optional<PrimType> LHSComputationT =
+ classify(E->getComputationLHSType());
+ std::optional<PrimType> LT = classify(LHS->getType());
+ std::optional<PrimType> RT = classify(E->getComputationResultType());
+ std::optional<PrimType> ResultT = classify(E->getType());
+
+ if (!LT || !RT || !ResultT || !LHSComputationT)
+ return false;
+
+ // Handle floating point operations separately here, since they
+ // require special care.
+
+ if (ResultT == PT_Float || RT == PT_Float)
+ return VisitFloatCompoundAssignOperator(E);
+
+ if (E->getType()->isPointerType())
+ return VisitPointerCompoundAssignOperator(E);
+
+ assert(!E->getType()->isPointerType() && "Handled above");
+ assert(!E->getType()->isFloatingType() && "Handled above");
+
+ // C++17 onwards require that we evaluate the RHS first.
+ // Compute RHS and save it in a temporary variable so we can
+ // load it again later.
+ // FIXME: Compound assignments are unsequenced in C, so we might
+ // have to figure out how to reject them.
+ if (!visit(RHS))
+ return false;
+
+ unsigned TempOffset = this->allocateLocalPrimitive(E, *RT, /*IsConst=*/true);
+
+ if (!this->emitSetLocal(*RT, TempOffset, E))
+ return false;
+
+ // Get LHS pointer, load its value and cast it to the
+ // computation type if necessary.
+ if (!visit(LHS))
+ return false;
+ if (!this->emitLoad(*LT, E))
+ return false;
+ if (*LT != *LHSComputationT) {
+ if (!this->emitCast(*LT, *LHSComputationT, E))
+ return false;
+ }
+
+ // Get the RHS value on the stack.
+ if (!this->emitGetLocal(*RT, TempOffset, E))
+ return false;
+
+ // Perform operation.
+ switch (E->getOpcode()) {
+ case BO_AddAssign:
+ if (!this->emitAdd(*LHSComputationT, E))
+ return false;
+ break;
+ case BO_SubAssign:
+ if (!this->emitSub(*LHSComputationT, E))
+ return false;
+ break;
+ case BO_MulAssign:
+ if (!this->emitMul(*LHSComputationT, E))
+ return false;
+ break;
+ case BO_DivAssign:
+ if (!this->emitDiv(*LHSComputationT, E))
+ return false;
+ break;
+ case BO_RemAssign:
+ if (!this->emitRem(*LHSComputationT, E))
+ return false;
+ break;
+ case BO_ShlAssign:
+ if (!this->emitShl(*LHSComputationT, *RT, E))
+ return false;
+ break;
+ case BO_ShrAssign:
+ if (!this->emitShr(*LHSComputationT, *RT, E))
+ return false;
+ break;
+ case BO_AndAssign:
+ if (!this->emitBitAnd(*LHSComputationT, E))
+ return false;
+ break;
+ case BO_XorAssign:
+ if (!this->emitBitXor(*LHSComputationT, E))
+ return false;
+ break;
+ case BO_OrAssign:
+ if (!this->emitBitOr(*LHSComputationT, E))
+ return false;
+ break;
+ default:
+ llvm_unreachable("Unimplemented compound assign operator");
+ }
+
+ // And now cast from LHSComputationT to ResultT.
+ if (*ResultT != *LHSComputationT) {
+ if (!this->emitCast(*LHSComputationT, *ResultT, E))
+ return false;
+ }
+
+ // And store the result in LHS.
+ if (DiscardResult) {
+ if (LHS->refersToBitField())
+ return this->emitStoreBitFieldPop(*ResultT, E);
+ return this->emitStorePop(*ResultT, E);
+ }
+ if (LHS->refersToBitField())
+ return this->emitStoreBitField(*ResultT, E);
+ return this->emitStore(*ResultT, E);
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitExprWithCleanups(
+ const ExprWithCleanups *E) {
+ const Expr *SubExpr = E->getSubExpr();
+
+ assert(E->getNumObjects() == 0 && "TODO: Implement cleanups");
+
+ return this->delegate(SubExpr);
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitMaterializeTemporaryExpr(
+ const MaterializeTemporaryExpr *E) {
+ const Expr *SubExpr = E->getSubExpr();
+
+ if (Initializing) {
+ // We already have a value, just initialize that.
+ return this->visitInitializer(SubExpr);
+ }
+ // If we don't end up using the materialized temporary anyway, don't
+ // bother creating it.
+ if (DiscardResult)
+ return this->discard(SubExpr);
+
+ // When we're initializing a global variable *or* the storage duration of
+ // the temporary is explicitly static, create a global variable.
+ std::optional<PrimType> SubExprT = classify(SubExpr);
+ bool IsStatic = E->getStorageDuration() == SD_Static;
+ if (GlobalDecl || IsStatic) {
+ std::optional<unsigned> GlobalIndex = P.createGlobal(E);
+ if (!GlobalIndex)
+ return false;
+
+ const LifetimeExtendedTemporaryDecl *TempDecl =
+ E->getLifetimeExtendedTemporaryDecl();
+ if (IsStatic)
+ assert(TempDecl);
+
+ if (SubExprT) {
+ if (!this->visit(SubExpr))
+ return false;
+ if (IsStatic) {
+ if (!this->emitInitGlobalTemp(*SubExprT, *GlobalIndex, TempDecl, E))
+ return false;
+ } else {
+ if (!this->emitInitGlobal(*SubExprT, *GlobalIndex, E))
+ return false;
+ }
+ return this->emitGetPtrGlobal(*GlobalIndex, E);
+ }
+
+ // Non-primitive values.
+ if (!this->emitGetPtrGlobal(*GlobalIndex, E))
+ return false;
+ if (!this->visitInitializer(SubExpr))
+ return false;
+ if (IsStatic)
+ return this->emitInitGlobalTempComp(TempDecl, E);
+ return true;
+ }
+
+ // For everyhing else, use local variables.
+ if (SubExprT) {
+ if (std::optional<unsigned> LocalIndex = allocateLocalPrimitive(
+ SubExpr, *SubExprT, /*IsConst=*/true, /*IsExtended=*/true)) {
+ if (!this->visit(SubExpr))
+ return false;
+ this->emitSetLocal(*SubExprT, *LocalIndex, E);
+ return this->emitGetPtrLocal(*LocalIndex, E);
+ }
+ } else {
+ if (std::optional<unsigned> LocalIndex =
+ allocateLocal(SubExpr, /*IsExtended=*/true)) {
+ if (!this->emitGetPtrLocal(*LocalIndex, E))
+ return false;
+ return this->visitInitializer(SubExpr);
+ }
+ }
+ return false;
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitCXXBindTemporaryExpr(
+ const CXXBindTemporaryExpr *E) {
+ return this->delegate(E->getSubExpr());
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitCompoundLiteralExpr(
+ const CompoundLiteralExpr *E) {
+ const Expr *Init = E->getInitializer();
+ if (Initializing) {
+ // We already have a value, just initialize that.
+ return this->visitInitializer(Init);
+ }
+
+ std::optional<PrimType> T = classify(E->getType());
+ if (E->isFileScope()) {
+ if (std::optional<unsigned> GlobalIndex = P.createGlobal(E)) {
+ if (classify(E->getType()))
+ return this->visit(Init);
+ if (!this->emitGetPtrGlobal(*GlobalIndex, E))
+ return false;
+ return this->visitInitializer(Init);
+ }
+ }
+
+ // Otherwise, use a local variable.
+ if (T) {
+ // For primitive types, we just visit the initializer.
+ return this->delegate(Init);
+ } else {
+ if (std::optional<unsigned> LocalIndex = allocateLocal(Init)) {
+ if (!this->emitGetPtrLocal(*LocalIndex, E))
+ return false;
+ if (!this->visitInitializer(Init))
+ return false;
+ if (DiscardResult)
+ return this->emitPopPtr(E);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitTypeTraitExpr(const TypeTraitExpr *E) {
+ if (DiscardResult)
+ return true;
+ return this->emitConstBool(E->getValue(), E);
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitLambdaExpr(const LambdaExpr *E) {
+ assert(Initializing);
+ const Record *R = P.getOrCreateRecord(E->getLambdaClass());
+
+ auto *CaptureInitIt = E->capture_init_begin();
+ // Initialize all fields (which represent lambda captures) of the
+ // record with their initializers.
+ for (const Record::Field &F : R->fields()) {
+ const Expr *Init = *CaptureInitIt;
+ ++CaptureInitIt;
+
+ if (std::optional<PrimType> T = classify(Init)) {
+ if (!this->visit(Init))
+ return false;
+
+ if (!this->emitSetField(*T, F.Offset, E))
+ return false;
+ } else {
+ if (!this->emitDupPtr(E))
+ return false;
+
+ if (!this->emitGetPtrField(F.Offset, E))
+ return false;
+
+ if (!this->visitInitializer(Init))
+ return false;
+
+ if (!this->emitPopPtr(E))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitPredefinedExpr(const PredefinedExpr *E) {
+ if (DiscardResult)
+ return true;
+
+ assert(!Initializing);
+ return this->visit(E->getFunctionName());
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitCXXThrowExpr(const CXXThrowExpr *E) {
+ if (E->getSubExpr() && !this->discard(E->getSubExpr()))
+ return false;
+
+ return this->emitInvalid(E);
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitCXXReinterpretCastExpr(
+ const CXXReinterpretCastExpr *E) {
+ if (!this->discard(E->getSubExpr()))
+ return false;
+
+ return this->emitInvalidCast(CastKind::Reinterpret, E);
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
+ assert(E->getType()->isBooleanType());
+
+ if (DiscardResult)
+ return true;
+ return this->emitConstBool(E->getValue(), E);
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitCXXConstructExpr(
+ const CXXConstructExpr *E) {
+ QualType T = E->getType();
+ assert(!classify(T));
+
+ if (T->isRecordType()) {
+ const CXXConstructorDecl *Ctor = E->getConstructor();
+
+ // Trivial zero initialization.
+ if (E->requiresZeroInitialization() && Ctor->isTrivial()) {
+ const Record *R = getRecord(E->getType());
+ return this->visitZeroRecordInitializer(R, E);
+ }
+
+ const Function *Func = getFunction(Ctor);
+
+ if (!Func)
+ return false;
+
+ assert(Func->hasThisPointer());
+ assert(!Func->hasRVO());
+
+ // If we're discarding a construct expression, we still need
+ // to allocate a variable and call the constructor and destructor.
+ if (DiscardResult) {
+ assert(!Initializing);
+ std::optional<unsigned> LocalIndex =
+ allocateLocal(E, /*IsExtended=*/true);
+
+ if (!LocalIndex)
+ return false;
+
+ if (!this->emitGetPtrLocal(*LocalIndex, E))
+ return false;
+ }
+
+ // The This pointer is already on the stack because this is an initializer,
+ // but we need to dup() so the call() below has its own copy.
+ if (!this->emitDupPtr(E))
+ return false;
+
+ // Constructor arguments.
+ for (const auto *Arg : E->arguments()) {
+ if (!this->visit(Arg))
+ return false;
+ }
+
+ if (!this->emitCall(Func, E))
+ return false;
+
+ // Immediately call the destructor if we have to.
+ if (DiscardResult) {
+ if (!this->emitPopPtr(E))
+ return false;
}
+ return true;
+ }
+
+ if (T->isArrayType()) {
+ const ConstantArrayType *CAT =
+ Ctx.getASTContext().getAsConstantArrayType(E->getType());
+ assert(CAT);
+ size_t NumElems = CAT->getSize().getZExtValue();
+ const Function *Func = getFunction(E->getConstructor());
+ if (!Func || !Func->isConstexpr())
+ return false;
+
+ // FIXME(perf): We're calling the constructor once per array element here,
+ // in the old intepreter we had a special-case for trivial constructors.
+ for (size_t I = 0; I != NumElems; ++I) {
+ if (!this->emitConstUint64(I, E))
+ return false;
+ if (!this->emitArrayElemPtrUint64(E))
+ return false;
+
+ // Constructor arguments.
+ for (const auto *Arg : E->arguments()) {
+ if (!this->visit(Arg))
+ return false;
+ }
+
+ if (!this->emitCall(Func, E))
+ return false;
+ }
+ return true;
+ }
+
+ return false;
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitSourceLocExpr(const SourceLocExpr *E) {
+ if (DiscardResult)
+ return true;
+
+ const APValue Val =
+ E->EvaluateInContext(Ctx.getASTContext(), SourceLocDefaultExpr);
+
+ // Things like __builtin_LINE().
+ if (E->getType()->isIntegerType()) {
+ assert(Val.isInt());
+ const APSInt &I = Val.getInt();
+ return this->emitConst(I, E);
}
+ // Otherwise, the APValue is an LValue, with only one element.
+ // Theoretically, we don't need the APValue at all of course.
+ assert(E->getType()->isPointerType());
+ assert(Val.isLValue());
+ const APValue::LValueBase &Base = Val.getLValueBase();
+ if (const Expr *LValueExpr = Base.dyn_cast<const Expr *>())
+ return this->visit(LValueExpr);
+
+ // Otherwise, we have a decl (which is the case for
+ // __builtin_source_location).
+ assert(Base.is<const ValueDecl *>());
+ assert(Val.getLValuePath().size() == 0);
+ const auto *BaseDecl = Base.dyn_cast<const ValueDecl *>();
+ assert(BaseDecl);
+
+ auto *UGCD = cast<UnnamedGlobalConstantDecl>(BaseDecl);
+
+ std::optional<unsigned> GlobalIndex = P.getOrCreateGlobal(UGCD);
+ if (!GlobalIndex)
+ return false;
- return this->bail(BO);
+ if (!this->emitGetPtrGlobal(*GlobalIndex, E))
+ return false;
+
+ const Record *R = getRecord(E->getType());
+ const APValue &V = UGCD->getValue();
+ for (unsigned I = 0, N = R->getNumFields(); I != N; ++I) {
+ const Record::Field *F = R->getField(I);
+ const APValue &FieldValue = V.getStructField(I);
+
+ PrimType FieldT = classifyPrim(F->Decl->getType());
+
+ if (!this->visitAPValue(FieldValue, FieldT, E))
+ return false;
+ if (!this->emitInitField(FieldT, F->Offset, E))
+ return false;
+ }
+
+ // Leave the pointer to the global on the stack.
+ return true;
}
template <class Emitter>
-bool ByteCodeExprGen<Emitter>::discard(const Expr *E) {
- OptionScope<Emitter> Scope(this, /*discardResult=*/true);
+bool ByteCodeExprGen<Emitter>::VisitOffsetOfExpr(const OffsetOfExpr *E) {
+ unsigned N = E->getNumComponents();
+ if (N == 0)
+ return false;
+
+ for (unsigned I = 0; I != N; ++I) {
+ const OffsetOfNode &Node = E->getComponent(I);
+ if (Node.getKind() == OffsetOfNode::Array) {
+ const Expr *ArrayIndexExpr = E->getIndexExpr(Node.getArrayExprIndex());
+ PrimType IndexT = classifyPrim(ArrayIndexExpr->getType());
+
+ if (DiscardResult) {
+ if (!this->discard(ArrayIndexExpr))
+ return false;
+ continue;
+ }
+
+ if (!this->visit(ArrayIndexExpr))
+ return false;
+ // Cast to Sint64.
+ if (IndexT != PT_Sint64) {
+ if (!this->emitCast(IndexT, PT_Sint64, E))
+ return false;
+ }
+ }
+ }
+
+ if (DiscardResult)
+ return true;
+
+ PrimType T = classifyPrim(E->getType());
+ return this->emitOffsetOf(T, E, E);
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitCXXScalarValueInitExpr(
+ const CXXScalarValueInitExpr *E) {
+ QualType Ty = E->getType();
+
+ if (Ty->isVoidType())
+ return true;
+
+ return this->visitZeroInitializer(classifyPrim(Ty), Ty, E);
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitSizeOfPackExpr(const SizeOfPackExpr *E) {
+ return this->emitConst(E->getPackLength(), E);
+}
+
+template <class Emitter> bool ByteCodeExprGen<Emitter>::discard(const Expr *E) {
+ if (E->containsErrors())
+ return false;
+
+ OptionScope<Emitter> Scope(this, /*NewDiscardResult=*/true,
+ /*NewInitializing=*/false);
+ return this->Visit(E);
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::delegate(const Expr *E) {
+ if (E->containsErrors())
+ return false;
+
+ // We're basically doing:
+ // OptionScope<Emitter> Scope(this, DicardResult, Initializing);
+ // but that's unnecessary of course.
+ return this->Visit(E);
+}
+
+template <class Emitter> bool ByteCodeExprGen<Emitter>::visit(const Expr *E) {
+ if (E->containsErrors())
+ return false;
+
+ if (E->getType()->isVoidType())
+ return this->discard(E);
+
+ // Create local variable to hold the return value.
+ if (!E->isGLValue() && !E->getType()->isAnyComplexType() &&
+ !classify(E->getType())) {
+ std::optional<unsigned> LocalIndex = allocateLocal(E, /*IsExtended=*/true);
+ if (!LocalIndex)
+ return false;
+
+ if (!this->emitGetPtrLocal(*LocalIndex, E))
+ return false;
+ return this->visitInitializer(E);
+ }
+
+ // Otherwise,we have a primitive return value, produce the value directly
+ // and push it on the stack.
+ OptionScope<Emitter> Scope(this, /*NewDiscardResult=*/false,
+ /*NewInitializing=*/false);
return this->Visit(E);
}
template <class Emitter>
-bool ByteCodeExprGen<Emitter>::visit(const Expr *E) {
- OptionScope<Emitter> Scope(this, /*discardResult=*/false);
+bool ByteCodeExprGen<Emitter>::visitInitializer(const Expr *E) {
+ assert(!classify(E->getType()));
+
+ if (E->containsErrors())
+ return false;
+
+ OptionScope<Emitter> Scope(this, /*NewDiscardResult=*/false,
+ /*NewInitializing=*/true);
return this->Visit(E);
}
template <class Emitter>
bool ByteCodeExprGen<Emitter>::visitBool(const Expr *E) {
- if (Optional<PrimType> T = classify(E->getType())) {
- return visit(E);
- } else {
- return this->bail(E);
+ std::optional<PrimType> T = classify(E->getType());
+ if (!T)
+ return false;
+
+ if (!this->visit(E))
+ return false;
+
+ if (T == PT_Bool)
+ return true;
+
+ // Convert pointers to bool.
+ if (T == PT_Ptr || T == PT_FnPtr) {
+ if (!this->emitNull(*T, E))
+ return false;
+ return this->emitNE(*T, E);
}
+
+ // Or Floats.
+ if (T == PT_Float)
+ return this->emitCastFloatingIntegralBool(E);
+
+ // Or anything else we can.
+ return this->emitCast(*T, PT_Bool, E);
}
template <class Emitter>
-bool ByteCodeExprGen<Emitter>::visitZeroInitializer(PrimType T, const Expr *E) {
+bool ByteCodeExprGen<Emitter>::visitZeroInitializer(PrimType T, QualType QT,
+ const Expr *E) {
switch (T) {
case PT_Bool:
return this->emitZeroBool(E);
@@ -247,24 +1952,104 @@ bool ByteCodeExprGen<Emitter>::visitZeroInitializer(PrimType T, const Expr *E) {
return this->emitZeroSint64(E);
case PT_Uint64:
return this->emitZeroUint64(E);
+ case PT_IntAP:
+ return this->emitZeroIntAP(Ctx.getBitWidth(QT), E);
+ case PT_IntAPS:
+ return this->emitZeroIntAPS(Ctx.getBitWidth(QT), E);
case PT_Ptr:
return this->emitNullPtr(E);
+ case PT_FnPtr:
+ return this->emitNullFnPtr(E);
+ case PT_Float: {
+ return this->emitConstFloat(APFloat::getZero(Ctx.getFloatSemantics(QT)), E);
+ }
}
llvm_unreachable("unknown primitive type");
}
template <class Emitter>
+bool ByteCodeExprGen<Emitter>::visitZeroRecordInitializer(const Record *R,
+ const Expr *E) {
+ assert(E);
+ assert(R);
+ // Fields
+ for (const Record::Field &Field : R->fields()) {
+ const Descriptor *D = Field.Desc;
+ if (D->isPrimitive()) {
+ QualType QT = D->getType();
+ PrimType T = classifyPrim(D->getType());
+ if (!this->visitZeroInitializer(T, QT, E))
+ return false;
+ if (!this->emitInitField(T, Field.Offset, E))
+ return false;
+ continue;
+ }
+
+ // TODO: Add GetPtrFieldPop and get rid of this dup.
+ if (!this->emitDupPtr(E))
+ return false;
+ if (!this->emitGetPtrField(Field.Offset, E))
+ return false;
+
+ if (D->isPrimitiveArray()) {
+ QualType ET = D->getElemQualType();
+ PrimType T = classifyPrim(ET);
+ for (uint32_t I = 0, N = D->getNumElems(); I != N; ++I) {
+ if (!this->visitZeroInitializer(T, ET, E))
+ return false;
+ if (!this->emitInitElem(T, I, E))
+ return false;
+ }
+ } else if (D->isCompositeArray()) {
+ const Record *ElemRecord = D->ElemDesc->ElemRecord;
+ assert(D->ElemDesc->ElemRecord);
+ for (uint32_t I = 0, N = D->getNumElems(); I != N; ++I) {
+ if (!this->emitConstUint32(I, E))
+ return false;
+ if (!this->emitArrayElemPtr(PT_Uint32, E))
+ return false;
+ if (!this->visitZeroRecordInitializer(ElemRecord, E))
+ return false;
+ if (!this->emitPopPtr(E))
+ return false;
+ }
+ } else if (D->isRecord()) {
+ if (!this->visitZeroRecordInitializer(D->ElemRecord, E))
+ return false;
+ } else {
+ assert(false);
+ }
+
+ if (!this->emitPopPtr(E))
+ return false;
+ }
+
+ for (const Record::Base &B : R->bases()) {
+ if (!this->emitGetPtrBase(B.Offset, E))
+ return false;
+ if (!this->visitZeroRecordInitializer(B.R, E))
+ return false;
+ if (!this->emitInitPtrPop(E))
+ return false;
+ }
+
+ // FIXME: Virtual bases.
+
+ return true;
+}
+
+template <class Emitter>
bool ByteCodeExprGen<Emitter>::dereference(
const Expr *LV, DerefKind AK, llvm::function_ref<bool(PrimType)> Direct,
llvm::function_ref<bool(PrimType)> Indirect) {
- if (Optional<PrimType> T = classify(LV->getType())) {
+ if (std::optional<PrimType> T = classify(LV->getType())) {
if (!LV->refersToBitField()) {
// Only primitive, non bit-field types can be dereferenced directly.
- if (auto *DE = dyn_cast<DeclRefExpr>(LV)) {
+ if (const auto *DE = dyn_cast<DeclRefExpr>(LV)) {
if (!DE->getDecl()->getType()->isReferenceType()) {
- if (auto *PD = dyn_cast<ParmVarDecl>(DE->getDecl()))
+ if (const auto *PD = dyn_cast<ParmVarDecl>(DE->getDecl()))
return dereferenceParam(LV, *T, PD, AK, Direct, Indirect);
- if (auto *VD = dyn_cast<VarDecl>(DE->getDecl()))
+ if (const auto *VD = dyn_cast<VarDecl>(DE->getDecl()))
return dereferenceVar(LV, *T, VD, AK, Direct, Indirect);
}
}
@@ -275,6 +2060,9 @@ bool ByteCodeExprGen<Emitter>::dereference(
return Indirect(*T);
}
+ if (LV->getType()->isAnyComplexType())
+ return this->delegate(LV);
+
return false;
}
@@ -285,7 +2073,7 @@ bool ByteCodeExprGen<Emitter>::dereferenceParam(
llvm::function_ref<bool(PrimType)> Indirect) {
auto It = this->Params.find(PD);
if (It != this->Params.end()) {
- unsigned Idx = It->second;
+ unsigned Idx = It->second.Offset;
switch (AK) {
case DerefKind::Read:
return DiscardResult ? true : this->emitGetParam(T, Idx, LV);
@@ -350,7 +2138,7 @@ bool ByteCodeExprGen<Emitter>::dereferenceVar(
return false;
return DiscardResult ? true : this->emitGetPtrLocal(L.Offset, LV);
}
- } else if (auto Idx = getGlobalIdx(VD)) {
+ } else if (auto Idx = P.getGlobal(VD)) {
switch (AK) {
case DerefKind::Read:
if (!this->emitGetGlobal(T, *Idx, LV))
@@ -382,7 +2170,7 @@ bool ByteCodeExprGen<Emitter>::dereferenceVar(
if (VD->hasLocalStorage() && VD->hasInit() && !VD->isConstexpr()) {
QualType VT = VD->getType();
if (VT.isConstQualified() && VT->isFundamentalType())
- return this->Visit(VD->getInit());
+ return this->visit(VD->getInit());
}
}
@@ -391,28 +2179,34 @@ bool ByteCodeExprGen<Emitter>::dereferenceVar(
}
template <class Emitter>
-bool ByteCodeExprGen<Emitter>::emitConst(PrimType T, unsigned NumBits,
- const APInt &Value, const Expr *E) {
- switch (T) {
+template <typename T>
+bool ByteCodeExprGen<Emitter>::emitConst(T Value, PrimType Ty, const Expr *E) {
+ switch (Ty) {
case PT_Sint8:
- return this->emitConstSint8(Value.getSExtValue(), E);
+ return this->emitConstSint8(Value, E);
case PT_Uint8:
- return this->emitConstUint8(Value.getZExtValue(), E);
+ return this->emitConstUint8(Value, E);
case PT_Sint16:
- return this->emitConstSint16(Value.getSExtValue(), E);
+ return this->emitConstSint16(Value, E);
case PT_Uint16:
- return this->emitConstUint16(Value.getZExtValue(), E);
+ return this->emitConstUint16(Value, E);
case PT_Sint32:
- return this->emitConstSint32(Value.getSExtValue(), E);
+ return this->emitConstSint32(Value, E);
case PT_Uint32:
- return this->emitConstUint32(Value.getZExtValue(), E);
+ return this->emitConstUint32(Value, E);
case PT_Sint64:
- return this->emitConstSint64(Value.getSExtValue(), E);
+ return this->emitConstSint64(Value, E);
case PT_Uint64:
- return this->emitConstUint64(Value.getZExtValue(), E);
+ return this->emitConstUint64(Value, E);
+ case PT_IntAP:
+ case PT_IntAPS:
+ assert(false);
+ return false;
case PT_Bool:
- return this->emitConstBool(Value.getBoolValue(), E);
+ return this->emitConstBool(Value, E);
case PT_Ptr:
+ case PT_FnPtr:
+ case PT_Float:
llvm_unreachable("Invalid integral type");
break;
}
@@ -420,36 +2214,77 @@ bool ByteCodeExprGen<Emitter>::emitConst(PrimType T, unsigned NumBits,
}
template <class Emitter>
+template <typename T>
+bool ByteCodeExprGen<Emitter>::emitConst(T Value, const Expr *E) {
+ return this->emitConst(Value, classifyPrim(E->getType()), E);
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::emitConst(const APSInt &Value, PrimType Ty,
+ const Expr *E) {
+ if (Value.isSigned())
+ return this->emitConst(Value.getSExtValue(), Ty, E);
+ return this->emitConst(Value.getZExtValue(), Ty, E);
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::emitConst(const APSInt &Value, const Expr *E) {
+ return this->emitConst(Value, classifyPrim(E->getType()), E);
+}
+
+template <class Emitter>
unsigned ByteCodeExprGen<Emitter>::allocateLocalPrimitive(DeclTy &&Src,
PrimType Ty,
bool IsConst,
bool IsExtended) {
- Descriptor *D = P.createDescriptor(Src, Ty, IsConst, Src.is<const Expr *>());
+ // Make sure we don't accidentally register the same decl twice.
+ if (const auto *VD =
+ dyn_cast_if_present<ValueDecl>(Src.dyn_cast<const Decl *>())) {
+ assert(!P.getGlobal(VD));
+ assert(!Locals.contains(VD));
+ }
+
+ // FIXME: There are cases where Src.is<Expr*>() is wrong, e.g.
+ // (int){12} in C. Consider using Expr::isTemporaryObject() instead
+ // or isa<MaterializeTemporaryExpr>().
+ Descriptor *D = P.createDescriptor(Src, Ty, Descriptor::InlineDescMD, IsConst,
+ Src.is<const Expr *>());
Scope::Local Local = this->createLocal(D);
- if (auto *VD = dyn_cast_or_null<ValueDecl>(Src.dyn_cast<const Decl *>()))
+ if (auto *VD = dyn_cast_if_present<ValueDecl>(Src.dyn_cast<const Decl *>()))
Locals.insert({VD, Local});
VarScope->add(Local, IsExtended);
return Local.Offset;
}
template <class Emitter>
-llvm::Optional<unsigned>
+std::optional<unsigned>
ByteCodeExprGen<Emitter>::allocateLocal(DeclTy &&Src, bool IsExtended) {
- QualType Ty;
+ // Make sure we don't accidentally register the same decl twice.
+ if ([[maybe_unused]] const auto *VD =
+ dyn_cast_if_present<ValueDecl>(Src.dyn_cast<const Decl *>())) {
+ assert(!P.getGlobal(VD));
+ assert(!Locals.contains(VD));
+ }
+ QualType Ty;
const ValueDecl *Key = nullptr;
+ const Expr *Init = nullptr;
bool IsTemporary = false;
- if (auto *VD = dyn_cast_or_null<ValueDecl>(Src.dyn_cast<const Decl *>())) {
+ if (auto *VD = dyn_cast_if_present<ValueDecl>(Src.dyn_cast<const Decl *>())) {
Key = VD;
Ty = VD->getType();
+
+ if (const auto *VarD = dyn_cast<VarDecl>(VD))
+ Init = VarD->getInit();
}
if (auto *E = Src.dyn_cast<const Expr *>()) {
IsTemporary = true;
Ty = E->getType();
}
- Descriptor *D = P.createDescriptor(Src, Ty.getTypePtr(),
- Ty.isConstQualified(), IsTemporary);
+ Descriptor *D = P.createDescriptor(
+ Src, Ty.getTypePtr(), Descriptor::InlineDescMD, Ty.isConstQualified(),
+ IsTemporary, /*IsMutable=*/false, Init);
if (!D)
return {};
@@ -461,107 +2296,612 @@ ByteCodeExprGen<Emitter>::allocateLocal(DeclTy &&Src, bool IsExtended) {
}
template <class Emitter>
-bool ByteCodeExprGen<Emitter>::visitInitializer(
- const Expr *Init, InitFnRef InitFn) {
- OptionScope<Emitter> Scope(this, InitFn);
- return this->Visit(Init);
+const RecordType *ByteCodeExprGen<Emitter>::getRecordTy(QualType Ty) {
+ if (const PointerType *PT = dyn_cast<PointerType>(Ty))
+ return PT->getPointeeType()->getAs<RecordType>();
+ return Ty->getAs<RecordType>();
}
template <class Emitter>
-bool ByteCodeExprGen<Emitter>::getPtrVarDecl(const VarDecl *VD, const Expr *E) {
- // Generate a pointer to the local, loading refs.
- if (Optional<unsigned> Idx = getGlobalIdx(VD)) {
- if (VD->getType()->isReferenceType())
- return this->emitGetGlobalPtr(*Idx, E);
- else
- return this->emitGetPtrGlobal(*Idx, E);
- }
- return this->bail(VD);
+Record *ByteCodeExprGen<Emitter>::getRecord(QualType Ty) {
+ if (const auto *RecordTy = getRecordTy(Ty))
+ return getRecord(RecordTy->getDecl());
+ return nullptr;
+}
+
+template <class Emitter>
+Record *ByteCodeExprGen<Emitter>::getRecord(const RecordDecl *RD) {
+ return P.getOrCreateRecord(RD);
}
template <class Emitter>
-llvm::Optional<unsigned>
-ByteCodeExprGen<Emitter>::getGlobalIdx(const VarDecl *VD) {
- if (VD->isConstexpr()) {
- // Constexpr decl - it must have already been defined.
- return P.getGlobal(VD);
+const Function *ByteCodeExprGen<Emitter>::getFunction(const FunctionDecl *FD) {
+ return Ctx.getOrCreateFunction(FD);
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::visitExpr(const Expr *E) {
+ ExprScope<Emitter> RootScope(this);
+ // Void expressions.
+ if (E->getType()->isVoidType()) {
+ if (!visit(E))
+ return false;
+ return this->emitRetVoid(E);
}
- if (!VD->hasLocalStorage()) {
- // Not constexpr, but a global var - can have pointer taken.
- Program::DeclScope Scope(P, VD);
- return P.getOrCreateGlobal(VD);
+
+ // Expressions with a primitive return type.
+ if (std::optional<PrimType> T = classify(E)) {
+ if (!visit(E))
+ return false;
+ return this->emitRet(*T, E);
+ }
+
+ // Expressions with a composite return type.
+ // For us, that means everything we don't
+ // have a PrimType for.
+ if (std::optional<unsigned> LocalOffset = this->allocateLocal(E)) {
+ if (!this->visitLocalInitializer(E, *LocalOffset))
+ return false;
+
+ if (!this->emitGetPtrLocal(*LocalOffset, E))
+ return false;
+ return this->emitRetValue(E);
}
- return {};
+
+ return false;
}
+/// Toplevel visitDecl().
+/// We get here from evaluateAsInitializer().
+/// We need to evaluate the initializer and return its value.
template <class Emitter>
-const RecordType *ByteCodeExprGen<Emitter>::getRecordTy(QualType Ty) {
- if (auto *PT = dyn_cast<PointerType>(Ty))
- return PT->getPointeeType()->getAs<RecordType>();
- else
- return Ty->getAs<RecordType>();
+bool ByteCodeExprGen<Emitter>::visitDecl(const VarDecl *VD) {
+ assert(!VD->isInvalidDecl() && "Trying to constant evaluate an invalid decl");
+
+ // Create and initialize the variable.
+ if (!this->visitVarDecl(VD))
+ return false;
+
+ std::optional<PrimType> VarT = classify(VD->getType());
+ // Get a pointer to the variable
+ if (Context::shouldBeGloballyIndexed(VD)) {
+ auto GlobalIndex = P.getGlobal(VD);
+ assert(GlobalIndex); // visitVarDecl() didn't return false.
+ if (VarT) {
+ if (!this->emitGetGlobalUnchecked(*VarT, *GlobalIndex, VD))
+ return false;
+ } else {
+ if (!this->emitGetPtrGlobal(*GlobalIndex, VD))
+ return false;
+ }
+ } else {
+ auto Local = Locals.find(VD);
+ assert(Local != Locals.end()); // Same here.
+ if (VarT) {
+ if (!this->emitGetLocal(*VarT, Local->second.Offset, VD))
+ return false;
+ } else {
+ if (!this->emitGetPtrLocal(Local->second.Offset, VD))
+ return false;
+ }
+ }
+
+ // Return the value
+ if (VarT)
+ return this->emitRet(*VarT, VD);
+
+ // Return non-primitive values as pointers here.
+ return this->emitRet(PT_Ptr, VD);
}
template <class Emitter>
-Record *ByteCodeExprGen<Emitter>::getRecord(QualType Ty) {
- if (auto *RecordTy = getRecordTy(Ty)) {
- return getRecord(RecordTy->getDecl());
+bool ByteCodeExprGen<Emitter>::visitVarDecl(const VarDecl *VD) {
+ // We don't know what to do with these, so just return false.
+ if (VD->getType().isNull())
+ return false;
+
+ const Expr *Init = VD->getInit();
+ std::optional<PrimType> VarT = classify(VD->getType());
+
+ if (Context::shouldBeGloballyIndexed(VD)) {
+ // We've already seen and initialized this global.
+ if (P.getGlobal(VD))
+ return true;
+
+ std::optional<unsigned> GlobalIndex = P.createGlobal(VD, Init);
+
+ if (!GlobalIndex)
+ return false;
+
+ assert(Init);
+ {
+ DeclScope<Emitter> LocalScope(this, VD);
+
+ if (VarT) {
+ if (!this->visit(Init))
+ return false;
+ return this->emitInitGlobal(*VarT, *GlobalIndex, VD);
+ }
+ return this->visitGlobalInitializer(Init, *GlobalIndex);
+ }
+ } else {
+ VariableScope<Emitter> LocalScope(this);
+ if (VarT) {
+ unsigned Offset = this->allocateLocalPrimitive(
+ VD, *VarT, VD->getType().isConstQualified());
+ if (Init) {
+ // Compile the initializer in its own scope.
+ ExprScope<Emitter> Scope(this);
+ if (!this->visit(Init))
+ return false;
+
+ return this->emitSetLocal(*VarT, Offset, VD);
+ }
+ } else {
+ if (std::optional<unsigned> Offset = this->allocateLocal(VD)) {
+ if (Init)
+ return this->visitLocalInitializer(Init, *Offset);
+ }
+ }
+ return true;
}
- return nullptr;
+
+ return false;
}
template <class Emitter>
-Record *ByteCodeExprGen<Emitter>::getRecord(const RecordDecl *RD) {
- return P.getOrCreateRecord(RD);
+bool ByteCodeExprGen<Emitter>::visitAPValue(const APValue &Val,
+ PrimType ValType, const Expr *E) {
+ assert(!DiscardResult);
+ if (Val.isInt())
+ return this->emitConst(Val.getInt(), ValType, E);
+
+ if (Val.isLValue()) {
+ APValue::LValueBase Base = Val.getLValueBase();
+ if (const Expr *BaseExpr = Base.dyn_cast<const Expr *>())
+ return this->visit(BaseExpr);
+ }
+
+ return false;
}
template <class Emitter>
-bool ByteCodeExprGen<Emitter>::visitExpr(const Expr *Exp) {
- ExprScope<Emitter> RootScope(this);
- if (!visit(Exp))
+bool ByteCodeExprGen<Emitter>::VisitBuiltinCallExpr(const CallExpr *E) {
+ const Function *Func = getFunction(E->getDirectCallee());
+ if (!Func)
return false;
- if (Optional<PrimType> T = classify(Exp))
- return this->emitRet(*T, Exp);
- else
- return this->emitRetValue(Exp);
+ if (!Func->isUnevaluatedBuiltin()) {
+ // Put arguments on the stack.
+ for (const auto *Arg : E->arguments()) {
+ if (!this->visit(Arg))
+ return false;
+ }
+ }
+
+ if (!this->emitCallBI(Func, E, E))
+ return false;
+
+ QualType ReturnType = E->getCallReturnType(Ctx.getASTContext());
+ if (DiscardResult && !ReturnType->isVoidType()) {
+ PrimType T = classifyPrim(ReturnType);
+ return this->emitPop(T, E);
+ }
+
+ return true;
}
template <class Emitter>
-bool ByteCodeExprGen<Emitter>::visitDecl(const VarDecl *VD) {
- const Expr *Init = VD->getInit();
-
- if (Optional<unsigned> I = P.createGlobal(VD)) {
- if (Optional<PrimType> T = classify(VD->getType())) {
- {
- // Primitive declarations - compute the value and set it.
- DeclScope<Emitter> LocalScope(this, VD);
- if (!visit(Init))
+bool ByteCodeExprGen<Emitter>::VisitCallExpr(const CallExpr *E) {
+ if (E->getBuiltinCallee())
+ return VisitBuiltinCallExpr(E);
+
+ QualType ReturnType = E->getCallReturnType(Ctx.getASTContext());
+ std::optional<PrimType> T = classify(ReturnType);
+ bool HasRVO = !ReturnType->isVoidType() && !T;
+
+ if (HasRVO) {
+ if (DiscardResult) {
+ // If we need to discard the return value but the function returns its
+ // value via an RVO pointer, we need to create one such pointer just
+ // for this call.
+ if (std::optional<unsigned> LocalIndex = allocateLocal(E)) {
+ if (!this->emitGetPtrLocal(*LocalIndex, E))
return false;
}
-
- // If the declaration is global, save the value for later use.
- if (!this->emitDup(*T, VD))
+ } else {
+ assert(Initializing);
+ if (!this->emitDupPtr(E))
return false;
- if (!this->emitInitGlobal(*T, *I, VD))
+ }
+ }
+
+ // Add the (optional, implicit) This pointer.
+ if (const auto *MC = dyn_cast<CXXMemberCallExpr>(E)) {
+ if (!this->visit(MC->getImplicitObjectArgument()))
+ return false;
+ }
+
+ // Put arguments on the stack.
+ for (const auto *Arg : E->arguments()) {
+ if (!this->visit(Arg))
+ return false;
+ }
+
+ if (const FunctionDecl *FuncDecl = E->getDirectCallee()) {
+ const Function *Func = getFunction(FuncDecl);
+ if (!Func)
+ return false;
+ // If the function is being compiled right now, this is a recursive call.
+ // In that case, the function can't be valid yet, even though it will be
+ // later.
+ // If the function is already fully compiled but not constexpr, it was
+ // found to be faulty earlier on, so bail out.
+ if (Func->isFullyCompiled() && !Func->isConstexpr())
+ return false;
+
+ assert(HasRVO == Func->hasRVO());
+
+ bool HasQualifier = false;
+ if (const auto *ME = dyn_cast<MemberExpr>(E->getCallee()))
+ HasQualifier = ME->hasQualifier();
+
+ bool IsVirtual = false;
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(FuncDecl))
+ IsVirtual = MD->isVirtual();
+
+ // In any case call the function. The return value will end up on the stack
+ // and if the function has RVO, we already have the pointer on the stack to
+ // write the result into.
+ if (IsVirtual && !HasQualifier) {
+ if (!this->emitCallVirt(Func, E))
return false;
- return this->emitRet(*T, VD);
} else {
- {
- // Composite declarations - allocate storage and initialize it.
- DeclScope<Emitter> LocalScope(this, VD);
- if (!visitGlobalInitializer(Init, *I))
+ if (!this->emitCall(Func, E))
+ return false;
+ }
+ } else {
+ // Indirect call. Visit the callee, which will leave a FunctionPointer on
+ // the stack. Cleanup of the returned value if necessary will be done after
+ // the function call completed.
+ if (!this->visit(E->getCallee()))
+ return false;
+
+ if (!this->emitCallPtr(E))
+ return false;
+ }
+
+ // Cleanup for discarded return values.
+ if (DiscardResult && !ReturnType->isVoidType() && T)
+ return this->emitPop(*T, E);
+
+ return true;
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitCXXDefaultInitExpr(
+ const CXXDefaultInitExpr *E) {
+ SourceLocScope<Emitter> SLS(this, E);
+ if (Initializing)
+ return this->visitInitializer(E->getExpr());
+
+ assert(classify(E->getType()));
+ return this->visit(E->getExpr());
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitCXXDefaultArgExpr(
+ const CXXDefaultArgExpr *E) {
+ SourceLocScope<Emitter> SLS(this, E);
+
+ const Expr *SubExpr = E->getExpr();
+ if (std::optional<PrimType> T = classify(E->getExpr()))
+ return this->visit(SubExpr);
+
+ assert(Initializing);
+ return this->visitInitializer(SubExpr);
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitCXXBoolLiteralExpr(
+ const CXXBoolLiteralExpr *E) {
+ if (DiscardResult)
+ return true;
+
+ return this->emitConstBool(E->getValue(), E);
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitCXXNullPtrLiteralExpr(
+ const CXXNullPtrLiteralExpr *E) {
+ if (DiscardResult)
+ return true;
+
+ return this->emitNullPtr(E);
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitGNUNullExpr(const GNUNullExpr *E) {
+ if (DiscardResult)
+ return true;
+
+ assert(E->getType()->isIntegerType());
+
+ PrimType T = classifyPrim(E->getType());
+ return this->emitZero(T, E);
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitCXXThisExpr(const CXXThisExpr *E) {
+ if (DiscardResult)
+ return true;
+
+ if (this->LambdaThisCapture > 0)
+ return this->emitGetThisFieldPtr(this->LambdaThisCapture, E);
+
+ return this->emitThis(E);
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitUnaryOperator(const UnaryOperator *E) {
+ const Expr *SubExpr = E->getSubExpr();
+ std::optional<PrimType> T = classify(SubExpr->getType());
+
+ switch (E->getOpcode()) {
+ case UO_PostInc: { // x++
+ if (!this->visit(SubExpr))
+ return false;
+
+ if (T == PT_Ptr) {
+ if (!this->emitIncPtr(E))
+ return false;
+
+ return DiscardResult ? this->emitPopPtr(E) : true;
+ }
+
+ if (T == PT_Float) {
+ return DiscardResult ? this->emitIncfPop(getRoundingMode(E), E)
+ : this->emitIncf(getRoundingMode(E), E);
+ }
+
+ return DiscardResult ? this->emitIncPop(*T, E) : this->emitInc(*T, E);
+ }
+ case UO_PostDec: { // x--
+ if (!this->visit(SubExpr))
+ return false;
+
+ if (T == PT_Ptr) {
+ if (!this->emitDecPtr(E))
+ return false;
+
+ return DiscardResult ? this->emitPopPtr(E) : true;
+ }
+
+ if (T == PT_Float) {
+ return DiscardResult ? this->emitDecfPop(getRoundingMode(E), E)
+ : this->emitDecf(getRoundingMode(E), E);
+ }
+
+ return DiscardResult ? this->emitDecPop(*T, E) : this->emitDec(*T, E);
+ }
+ case UO_PreInc: { // ++x
+ if (!this->visit(SubExpr))
+ return false;
+
+ if (T == PT_Ptr) {
+ if (!this->emitLoadPtr(E))
+ return false;
+ if (!this->emitConstUint8(1, E))
+ return false;
+ if (!this->emitAddOffsetUint8(E))
+ return false;
+ return DiscardResult ? this->emitStorePopPtr(E) : this->emitStorePtr(E);
+ }
+
+ // Post-inc and pre-inc are the same if the value is to be discarded.
+ if (DiscardResult) {
+ if (T == PT_Float)
+ return this->emitIncfPop(getRoundingMode(E), E);
+ return this->emitIncPop(*T, E);
+ }
+
+ if (T == PT_Float) {
+ const auto &TargetSemantics = Ctx.getFloatSemantics(E->getType());
+ if (!this->emitLoadFloat(E))
+ return false;
+ if (!this->emitConstFloat(llvm::APFloat(TargetSemantics, 1), E))
+ return false;
+ if (!this->emitAddf(getRoundingMode(E), E))
+ return false;
+ return this->emitStoreFloat(E);
+ }
+ if (!this->emitLoad(*T, E))
+ return false;
+ if (!this->emitConst(1, E))
+ return false;
+ if (!this->emitAdd(*T, E))
+ return false;
+ return this->emitStore(*T, E);
+ }
+ case UO_PreDec: { // --x
+ if (!this->visit(SubExpr))
+ return false;
+
+ if (T == PT_Ptr) {
+ if (!this->emitLoadPtr(E))
+ return false;
+ if (!this->emitConstUint8(1, E))
+ return false;
+ if (!this->emitSubOffsetUint8(E))
+ return false;
+ return DiscardResult ? this->emitStorePopPtr(E) : this->emitStorePtr(E);
+ }
+
+ // Post-dec and pre-dec are the same if the value is to be discarded.
+ if (DiscardResult) {
+ if (T == PT_Float)
+ return this->emitDecfPop(getRoundingMode(E), E);
+ return this->emitDecPop(*T, E);
+ }
+
+ if (T == PT_Float) {
+ const auto &TargetSemantics = Ctx.getFloatSemantics(E->getType());
+ if (!this->emitLoadFloat(E))
+ return false;
+ if (!this->emitConstFloat(llvm::APFloat(TargetSemantics, 1), E))
+ return false;
+ if (!this->emitSubf(getRoundingMode(E), E))
+ return false;
+ return this->emitStoreFloat(E);
+ }
+ if (!this->emitLoad(*T, E))
+ return false;
+ if (!this->emitConst(1, E))
+ return false;
+ if (!this->emitSub(*T, E))
+ return false;
+ return this->emitStore(*T, E);
+ }
+ case UO_LNot: // !x
+ if (DiscardResult)
+ return this->discard(SubExpr);
+
+ if (!this->visitBool(SubExpr))
+ return false;
+
+ if (!this->emitInvBool(E))
+ return false;
+
+ if (PrimType ET = classifyPrim(E->getType()); ET != PT_Bool)
+ return this->emitCast(PT_Bool, ET, E);
+ return true;
+ case UO_Minus: // -x
+ if (!this->visit(SubExpr))
+ return false;
+ return DiscardResult ? this->emitPop(*T, E) : this->emitNeg(*T, E);
+ case UO_Plus: // +x
+ if (!this->visit(SubExpr)) // noop
+ return false;
+ return DiscardResult ? this->emitPop(*T, E) : true;
+ case UO_AddrOf: // &x
+ // We should already have a pointer when we get here.
+ return this->delegate(SubExpr);
+ case UO_Deref: // *x
+ return dereference(
+ SubExpr, DerefKind::Read,
+ [](PrimType) {
+ llvm_unreachable("Dereferencing requires a pointer");
return false;
- }
+ },
+ [this, E](PrimType T) {
+ return DiscardResult ? this->emitPop(T, E) : true;
+ });
+ case UO_Not: // ~x
+ if (!this->visit(SubExpr))
+ return false;
+ return DiscardResult ? this->emitPop(*T, E) : this->emitComp(*T, E);
+ case UO_Real: // __real x
+ if (T)
+ return this->delegate(SubExpr);
+ return this->emitComplexReal(SubExpr);
+ case UO_Imag: { // __imag x
+ if (T) {
+ if (!this->discard(SubExpr))
+ return false;
+ return this->visitZeroInitializer(*T, SubExpr->getType(), SubExpr);
+ }
+ if (!this->visit(SubExpr))
+ return false;
+ if (!this->emitConstUint8(1, E))
+ return false;
+ if (!this->emitArrayElemPtrPopUint8(E))
+ return false;
- // Return a pointer to the global.
- if (!this->emitGetPtrGlobal(*I, VD))
+ // Since our _Complex implementation does not map to a primitive type,
+ // we sometimes have to do the lvalue-to-rvalue conversion here manually.
+ if (!SubExpr->isLValue())
+ return this->emitLoadPop(classifyPrim(E->getType()), E);
+ return true;
+ }
+ case UO_Extension:
+ return this->delegate(SubExpr);
+ case UO_Coawait:
+ assert(false && "Unhandled opcode");
+ }
+
+ return false;
+}
+
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitDeclRefExpr(const DeclRefExpr *E) {
+ if (DiscardResult)
+ return true;
+
+ const auto *D = E->getDecl();
+
+ if (const auto *ECD = dyn_cast<EnumConstantDecl>(D)) {
+ return this->emitConst(ECD->getInitVal(), E);
+ } else if (const auto *BD = dyn_cast<BindingDecl>(D)) {
+ return this->visit(BD->getBinding());
+ } else if (const auto *FuncDecl = dyn_cast<FunctionDecl>(D)) {
+ const Function *F = getFunction(FuncDecl);
+ return F && this->emitGetFnPtr(F, E);
+ }
+
+ // References are implemented via pointers, so when we see a DeclRefExpr
+ // pointing to a reference, we need to get its value directly (i.e. the
+ // pointer to the actual value) instead of a pointer to the pointer to the
+ // value.
+ bool IsReference = D->getType()->isReferenceType();
+
+ // Check for local/global variables and parameters.
+ if (auto It = Locals.find(D); It != Locals.end()) {
+ const unsigned Offset = It->second.Offset;
+
+ if (IsReference)
+ return this->emitGetLocal(PT_Ptr, Offset, E);
+ return this->emitGetPtrLocal(Offset, E);
+ } else if (auto GlobalIndex = P.getGlobal(D)) {
+ if (IsReference)
+ return this->emitGetGlobalPtr(*GlobalIndex, E);
+
+ return this->emitGetPtrGlobal(*GlobalIndex, E);
+ } else if (const auto *PVD = dyn_cast<ParmVarDecl>(D)) {
+ if (auto It = this->Params.find(PVD); It != this->Params.end()) {
+ if (IsReference || !It->second.IsPtr)
+ return this->emitGetParamPtr(It->second.Offset, E);
+
+ return this->emitGetPtrParam(It->second.Offset, E);
+ }
+ }
+
+ // Handle lambda captures.
+ if (auto It = this->LambdaCaptures.find(D);
+ It != this->LambdaCaptures.end()) {
+ auto [Offset, IsPtr] = It->second;
+
+ if (IsPtr)
+ return this->emitGetThisFieldPtr(Offset, E);
+ return this->emitGetPtrThisField(Offset, E);
+ }
+
+ // Lazily visit global declarations we haven't seen yet.
+ // This happens in C.
+ if (!Ctx.getLangOpts().CPlusPlus) {
+ if (const auto *VD = dyn_cast<VarDecl>(D);
+ VD && VD->hasGlobalStorage() && VD->getAnyInitializer() &&
+ VD->getType().isConstQualified()) {
+ if (!this->visitVarDecl(VD))
return false;
- return this->emitRetValue(VD);
+ // Retry.
+ return this->VisitDeclRefExpr(E);
}
+
+ if (std::optional<unsigned> I = P.getOrCreateDummy(D))
+ return this->emitGetPtrGlobal(*I, E);
}
- return this->bail(VD);
+ return this->emitInvalidDeclRef(E, E);
}
template <class Emitter>
@@ -570,6 +2910,174 @@ void ByteCodeExprGen<Emitter>::emitCleanup() {
C->emitDestruction();
}
+template <class Emitter>
+unsigned
+ByteCodeExprGen<Emitter>::collectBaseOffset(const RecordType *BaseType,
+ const RecordType *DerivedType) {
+ const auto *FinalDecl = cast<CXXRecordDecl>(BaseType->getDecl());
+ const RecordDecl *CurDecl = DerivedType->getDecl();
+ const Record *CurRecord = getRecord(CurDecl);
+ assert(CurDecl && FinalDecl);
+
+ unsigned OffsetSum = 0;
+ for (;;) {
+ assert(CurRecord->getNumBases() > 0);
+ // One level up
+ for (const Record::Base &B : CurRecord->bases()) {
+ const auto *BaseDecl = cast<CXXRecordDecl>(B.Decl);
+
+ if (BaseDecl == FinalDecl || BaseDecl->isDerivedFrom(FinalDecl)) {
+ OffsetSum += B.Offset;
+ CurRecord = B.R;
+ CurDecl = BaseDecl;
+ break;
+ }
+ }
+ if (CurDecl == FinalDecl)
+ break;
+ }
+
+ assert(OffsetSum > 0);
+ return OffsetSum;
+}
+
+/// Emit casts from a PrimType to another PrimType.
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::emitPrimCast(PrimType FromT, PrimType ToT,
+ QualType ToQT, const Expr *E) {
+
+ if (FromT == PT_Float) {
+ // Floating to floating.
+ if (ToT == PT_Float) {
+ const llvm::fltSemantics *ToSem = &Ctx.getFloatSemantics(ToQT);
+ return this->emitCastFP(ToSem, getRoundingMode(E), E);
+ }
+
+ // Float to integral.
+ if (isIntegralType(ToT) || ToT == PT_Bool)
+ return this->emitCastFloatingIntegral(ToT, E);
+ }
+
+ if (isIntegralType(FromT) || FromT == PT_Bool) {
+ // Integral to integral.
+ if (isIntegralType(ToT) || ToT == PT_Bool)
+ return FromT != ToT ? this->emitCast(FromT, ToT, E) : true;
+
+ if (ToT == PT_Float) {
+ // Integral to floating.
+ const llvm::fltSemantics *ToSem = &Ctx.getFloatSemantics(ToQT);
+ return this->emitCastIntegralFloating(FromT, ToSem, getRoundingMode(E),
+ E);
+ }
+ }
+
+ return false;
+}
+
+/// Emits __real(SubExpr)
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::emitComplexReal(const Expr *SubExpr) {
+ assert(SubExpr->getType()->isAnyComplexType());
+
+ if (DiscardResult)
+ return this->discard(SubExpr);
+
+ if (!this->visit(SubExpr))
+ return false;
+ if (!this->emitConstUint8(0, SubExpr))
+ return false;
+ if (!this->emitArrayElemPtrPopUint8(SubExpr))
+ return false;
+
+ // Since our _Complex implementation does not map to a primitive type,
+ // we sometimes have to do the lvalue-to-rvalue conversion here manually.
+ if (!SubExpr->isLValue())
+ return this->emitLoadPop(*classifyComplexElementType(SubExpr->getType()),
+ SubExpr);
+ return true;
+}
+
+/// When calling this, we have a pointer of the local-to-destroy
+/// on the stack.
+/// Emit destruction of record types (or arrays of record types).
+template <class Emitter>
+bool ByteCodeExprGen<Emitter>::emitRecordDestruction(const Descriptor *Desc) {
+ assert(Desc);
+ assert(!Desc->isPrimitive());
+ assert(!Desc->isPrimitiveArray());
+
+ // Arrays.
+ if (Desc->isArray()) {
+ const Descriptor *ElemDesc = Desc->ElemDesc;
+ assert(ElemDesc);
+
+ // Don't need to do anything for these.
+ if (ElemDesc->isPrimitiveArray())
+ return this->emitPopPtr(SourceInfo{});
+
+ // If this is an array of record types, check if we need
+ // to call the element destructors at all. If not, try
+ // to save the work.
+ if (const Record *ElemRecord = ElemDesc->ElemRecord) {
+ if (const CXXDestructorDecl *Dtor = ElemRecord->getDestructor();
+ !Dtor || Dtor->isTrivial())
+ return this->emitPopPtr(SourceInfo{});
+ }
+
+ for (ssize_t I = Desc->getNumElems() - 1; I >= 0; --I) {
+ if (!this->emitConstUint64(I, SourceInfo{}))
+ return false;
+ if (!this->emitArrayElemPtrUint64(SourceInfo{}))
+ return false;
+ if (!this->emitRecordDestruction(ElemDesc))
+ return false;
+ }
+ return this->emitPopPtr(SourceInfo{});
+ }
+
+ const Record *R = Desc->ElemRecord;
+ assert(R);
+ // First, destroy all fields.
+ for (const Record::Field &Field : llvm::reverse(R->fields())) {
+ const Descriptor *D = Field.Desc;
+ if (!D->isPrimitive() && !D->isPrimitiveArray()) {
+ if (!this->emitDupPtr(SourceInfo{}))
+ return false;
+ if (!this->emitGetPtrField(Field.Offset, SourceInfo{}))
+ return false;
+ if (!this->emitRecordDestruction(D))
+ return false;
+ }
+ }
+
+ // FIXME: Unions need to be handled differently here. We don't want to
+ // call the destructor of its members.
+
+ // Now emit the destructor and recurse into base classes.
+ if (const CXXDestructorDecl *Dtor = R->getDestructor();
+ Dtor && !Dtor->isTrivial()) {
+ if (const Function *DtorFunc = getFunction(Dtor)) {
+ assert(DtorFunc->hasThisPointer());
+ assert(DtorFunc->getNumParams() == 1);
+ if (!this->emitDupPtr(SourceInfo{}))
+ return false;
+ if (!this->emitCall(DtorFunc, SourceInfo{}))
+ return false;
+ }
+ }
+
+ for (const Record::Base &Base : llvm::reverse(R->bases())) {
+ if (!this->emitGetPtrBase(Base.Offset, SourceInfo{}))
+ return false;
+ if (!this->emitRecordDestruction(Base.Desc))
+ return false;
+ }
+ // FIXME: Virtual bases.
+
+ // Remove the instance pointer.
+ return this->emitPopPtr(SourceInfo{});
+}
+
namespace clang {
namespace interp {
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeExprGen.h b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeExprGen.h
index 716f28551e58..df4cb736299c 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeExprGen.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeExprGen.h
@@ -22,39 +22,30 @@
#include "clang/AST/Expr.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/Basic/TargetInfo.h"
-#include "llvm/ADT/Optional.h"
namespace clang {
class QualType;
namespace interp {
-class Function;
-class State;
template <class Emitter> class LocalScope;
+template <class Emitter> class DestructorScope;
template <class Emitter> class RecordScope;
template <class Emitter> class VariableScope;
template <class Emitter> class DeclScope;
template <class Emitter> class OptionScope;
+template <class Emitter> class ArrayIndexScope;
+template <class Emitter> class SourceLocScope;
/// Compilation context for expressions.
template <class Emitter>
class ByteCodeExprGen : public ConstStmtVisitor<ByteCodeExprGen<Emitter>, bool>,
public Emitter {
protected:
- // Emitters for opcodes of various arities.
- using NullaryFn = bool (ByteCodeExprGen::*)(const SourceInfo &);
- using UnaryFn = bool (ByteCodeExprGen::*)(PrimType, const SourceInfo &);
- using BinaryFn = bool (ByteCodeExprGen::*)(PrimType, PrimType,
- const SourceInfo &);
-
// Aliases for types defined in the emitter.
using LabelTy = typename Emitter::LabelTy;
using AddrTy = typename Emitter::AddrTy;
- // Reference to a function generating the pointer of an initialized object.s
- using InitFnRef = std::function<bool()>;
-
/// Current compilation context.
Context &Ctx;
/// Program to link to.
@@ -66,11 +57,57 @@ public:
ByteCodeExprGen(Context &Ctx, Program &P, Tys &&... Args)
: Emitter(Ctx, P, Args...), Ctx(Ctx), P(P) {}
- // Expression visitors - result returned on stack.
+ // Expression visitors - result returned on interp stack.
bool VisitCastExpr(const CastExpr *E);
bool VisitIntegerLiteral(const IntegerLiteral *E);
+ bool VisitFloatingLiteral(const FloatingLiteral *E);
bool VisitParenExpr(const ParenExpr *E);
bool VisitBinaryOperator(const BinaryOperator *E);
+ bool VisitLogicalBinOp(const BinaryOperator *E);
+ bool VisitPointerArithBinOp(const BinaryOperator *E);
+ bool VisitComplexBinOp(const BinaryOperator *E);
+ bool VisitCXXDefaultArgExpr(const CXXDefaultArgExpr *E);
+ bool VisitCallExpr(const CallExpr *E);
+ bool VisitBuiltinCallExpr(const CallExpr *E);
+ bool VisitCXXDefaultInitExpr(const CXXDefaultInitExpr *E);
+ bool VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E);
+ bool VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E);
+ bool VisitGNUNullExpr(const GNUNullExpr *E);
+ bool VisitCXXThisExpr(const CXXThisExpr *E);
+ bool VisitUnaryOperator(const UnaryOperator *E);
+ bool VisitDeclRefExpr(const DeclRefExpr *E);
+ bool VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E);
+ bool VisitSubstNonTypeTemplateParmExpr(const SubstNonTypeTemplateParmExpr *E);
+ bool VisitArraySubscriptExpr(const ArraySubscriptExpr *E);
+ bool VisitInitListExpr(const InitListExpr *E);
+ bool VisitCXXParenListInitExpr(const CXXParenListInitExpr *E);
+ bool VisitConstantExpr(const ConstantExpr *E);
+ bool VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);
+ bool VisitMemberExpr(const MemberExpr *E);
+ bool VisitArrayInitIndexExpr(const ArrayInitIndexExpr *E);
+ bool VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E);
+ bool VisitOpaqueValueExpr(const OpaqueValueExpr *E);
+ bool VisitAbstractConditionalOperator(const AbstractConditionalOperator *E);
+ bool VisitStringLiteral(const StringLiteral *E);
+ bool VisitCharacterLiteral(const CharacterLiteral *E);
+ bool VisitCompoundAssignOperator(const CompoundAssignOperator *E);
+ bool VisitFloatCompoundAssignOperator(const CompoundAssignOperator *E);
+ bool VisitPointerCompoundAssignOperator(const CompoundAssignOperator *E);
+ bool VisitExprWithCleanups(const ExprWithCleanups *E);
+ bool VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E);
+ bool VisitCXXBindTemporaryExpr(const CXXBindTemporaryExpr *E);
+ bool VisitCompoundLiteralExpr(const CompoundLiteralExpr *E);
+ bool VisitTypeTraitExpr(const TypeTraitExpr *E);
+ bool VisitLambdaExpr(const LambdaExpr *E);
+ bool VisitPredefinedExpr(const PredefinedExpr *E);
+ bool VisitCXXThrowExpr(const CXXThrowExpr *E);
+ bool VisitCXXReinterpretCastExpr(const CXXReinterpretCastExpr *E);
+ bool VisitCXXNoexceptExpr(const CXXNoexceptExpr *E);
+ bool VisitCXXConstructExpr(const CXXConstructExpr *E);
+ bool VisitSourceLocExpr(const SourceLocExpr *E);
+ bool VisitOffsetOfExpr(const OffsetOfExpr *E);
+ bool VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E);
+ bool VisitSizeOfPackExpr(const SizeOfPackExpr *E);
protected:
bool visitExpr(const Expr *E) override;
@@ -87,31 +124,24 @@ protected:
Record *getRecord(QualType Ty);
Record *getRecord(const RecordDecl *RD);
- /// Returns the size int bits of an integer.
- unsigned getIntWidth(QualType Ty) {
- auto &ASTContext = Ctx.getASTContext();
- return ASTContext.getIntWidth(Ty);
- }
-
- /// Returns the value of CHAR_BIT.
- unsigned getCharBit() const {
- auto &ASTContext = Ctx.getASTContext();
- return ASTContext.getTargetInfo().getCharWidth();
- }
+ // Returns a function for the given FunctionDecl.
+ // If the function does not exist yet, it is compiled.
+ const Function *getFunction(const FunctionDecl *FD);
/// Classifies a type.
- llvm::Optional<PrimType> classify(const Expr *E) const {
- return E->isGLValue() ? PT_Ptr : classify(E->getType());
+ std::optional<PrimType> classify(const Expr *E) const {
+ if (E->isGLValue()) {
+ if (E->getType()->isFunctionType())
+ return PT_FnPtr;
+ return PT_Ptr;
+ }
+
+ return classify(E->getType());
}
- llvm::Optional<PrimType> classify(QualType Ty) const {
+ std::optional<PrimType> classify(QualType Ty) const {
return Ctx.classify(Ty);
}
- /// Checks if a pointer needs adjustment.
- bool needsAdjust(QualType Ty) const {
- return true;
- }
-
/// Classifies a known primitive type
PrimType classifyPrim(QualType Ty) const {
if (auto T = classify(Ty)) {
@@ -119,53 +149,85 @@ protected:
}
llvm_unreachable("not a primitive type");
}
-
+ /// Evaluates an expression and places the result on the stack. If the
+ /// expression is of composite type, a local variable will be created
+ /// and a pointer to said variable will be placed on the stack.
+ bool visit(const Expr *E);
+ /// Compiles an initializer. This is like visit() but it will never
+ /// create a variable and instead rely on a variable already having
+ /// been created. visitInitializer() then relies on a pointer to this
+ /// variable being on top of the stack.
+ bool visitInitializer(const Expr *E);
/// Evaluates an expression for side effects and discards the result.
bool discard(const Expr *E);
- /// Evaluates an expression and places result on stack.
- bool visit(const Expr *E);
- /// Compiles an initializer for a local.
- bool visitInitializer(const Expr *E, InitFnRef GenPtr);
+ /// Just pass evaluation on to \p E. This leaves all the parsing flags
+ /// intact.
+ bool delegate(const Expr *E);
+
+ /// Creates and initializes a variable from the given decl.
+ bool visitVarDecl(const VarDecl *VD);
+ /// Visit an APValue.
+ bool visitAPValue(const APValue &Val, PrimType ValType, const Expr *E);
/// Visits an expression and converts it to a boolean.
bool visitBool(const Expr *E);
/// Visits an initializer for a local.
bool visitLocalInitializer(const Expr *Init, unsigned I) {
- return visitInitializer(Init, [this, I, Init] {
- return this->emitGetPtrLocal(I, Init);
- });
+ if (!this->emitGetPtrLocal(I, Init))
+ return false;
+
+ if (!visitInitializer(Init))
+ return false;
+
+ return this->emitPopPtr(Init);
}
/// Visits an initializer for a global.
bool visitGlobalInitializer(const Expr *Init, unsigned I) {
- return visitInitializer(Init, [this, I, Init] {
- return this->emitGetPtrGlobal(I, Init);
- });
+ if (!this->emitGetPtrGlobal(I, Init))
+ return false;
+
+ if (!visitInitializer(Init))
+ return false;
+
+ return this->emitPopPtr(Init);
}
/// Visits a delegated initializer.
bool visitThisInitializer(const Expr *I) {
- return visitInitializer(I, [this, I] { return this->emitThis(I); });
+ if (!this->emitThis(I))
+ return false;
+
+ if (!visitInitializer(I))
+ return false;
+
+ return this->emitPopPtr(I);
}
+ bool visitInitList(ArrayRef<const Expr *> Inits, const Expr *E);
+ bool visitArrayElemInit(unsigned ElemIndex, const Expr *Init);
+
/// Creates a local primitive value.
- unsigned allocateLocalPrimitive(DeclTy &&Decl, PrimType Ty, bool IsMutable,
+ unsigned allocateLocalPrimitive(DeclTy &&Decl, PrimType Ty, bool IsConst,
bool IsExtended = false);
/// Allocates a space storing a local given its type.
- llvm::Optional<unsigned> allocateLocal(DeclTy &&Decl,
- bool IsExtended = false);
+ std::optional<unsigned> allocateLocal(DeclTy &&Decl, bool IsExtended = false);
private:
friend class VariableScope<Emitter>;
friend class LocalScope<Emitter>;
+ friend class DestructorScope<Emitter>;
friend class RecordScope<Emitter>;
friend class DeclScope<Emitter>;
friend class OptionScope<Emitter>;
+ friend class ArrayIndexScope<Emitter>;
+ friend class SourceLocScope<Emitter>;
/// Emits a zero initializer.
- bool visitZeroInitializer(PrimType T, const Expr *E);
+ bool visitZeroInitializer(PrimType T, QualType QT, const Expr *E);
+ bool visitZeroRecordInitializer(const Record *R, const Expr *E);
enum class DerefKind {
/// Value is read and pushed to stack.
@@ -190,30 +252,50 @@ private:
DerefKind AK, llvm::function_ref<bool(PrimType)> Direct,
llvm::function_ref<bool(PrimType)> Indirect);
- /// Emits an APInt constant.
- bool emitConst(PrimType T, unsigned NumBits, const llvm::APInt &Value,
- const Expr *E);
+ /// Emits an APSInt constant.
+ bool emitConst(const llvm::APSInt &Value, PrimType Ty, const Expr *E);
+ bool emitConst(const llvm::APSInt &Value, const Expr *E);
+ bool emitConst(const llvm::APInt &Value, const Expr *E) {
+ return emitConst(static_cast<llvm::APSInt>(Value), E);
+ }
/// Emits an integer constant.
- template <typename T> bool emitConst(const Expr *E, T Value) {
- QualType Ty = E->getType();
- unsigned NumBits = getIntWidth(Ty);
- APInt WrappedValue(NumBits, Value, std::is_signed<T>::value);
- return emitConst(*Ctx.classify(Ty), NumBits, WrappedValue, E);
+ template <typename T> bool emitConst(T Value, PrimType Ty, const Expr *E);
+ template <typename T> bool emitConst(T Value, const Expr *E);
+
+ /// Returns the CXXRecordDecl for the type of the given expression,
+ /// or nullptr if no such decl exists.
+ const CXXRecordDecl *getRecordDecl(const Expr *E) const {
+ QualType T = E->getType();
+ if (const auto *RD = T->getPointeeCXXRecordDecl())
+ return RD;
+ return T->getAsCXXRecordDecl();
}
- /// Returns a pointer to a variable declaration.
- bool getPtrVarDecl(const VarDecl *VD, const Expr *E);
+ llvm::RoundingMode getRoundingMode(const Expr *E) const {
+ FPOptions FPO = E->getFPFeaturesInEffect(Ctx.getLangOpts());
- /// Returns the index of a global.
- llvm::Optional<unsigned> getGlobalIdx(const VarDecl *VD);
+ if (FPO.getRoundingMode() == llvm::RoundingMode::Dynamic)
+ return llvm::RoundingMode::NearestTiesToEven;
- /// Emits the initialized pointer.
- bool emitInitFn() {
- assert(InitFn && "missing initializer");
- return (*InitFn)();
+ return FPO.getRoundingMode();
}
+ bool emitPrimCast(PrimType FromT, PrimType ToT, QualType ToQT, const Expr *E);
+ std::optional<PrimType> classifyComplexElementType(QualType T) const {
+ assert(T->isAnyComplexType());
+
+ QualType ElemType = T->getAs<ComplexType>()->getElementType();
+
+ return this->classify(ElemType);
+ }
+
+ bool emitComplexReal(const Expr *SubExpr);
+
+ bool emitRecordDestruction(const Descriptor *Desc);
+ unsigned collectBaseOffset(const RecordType *BaseType,
+ const RecordType *DerivedType);
+
protected:
/// Variable to storage mapping.
llvm::DenseMap<const ValueDecl *, Scope::Local> Locals;
@@ -224,14 +306,21 @@ protected:
/// Current scope.
VariableScope<Emitter> *VarScope = nullptr;
- /// Current argument index.
- llvm::Optional<uint64_t> ArrayIndex;
+ /// Current argument index. Needed to emit ArrayInitIndexExpr.
+ std::optional<uint64_t> ArrayIndex;
+
+ /// DefaultInit- or DefaultArgExpr, needed for SourceLocExpr.
+ const Expr *SourceLocDefaultExpr = nullptr;
/// Flag indicating if return value is to be discarded.
bool DiscardResult = false;
- /// Expression being initialized.
- llvm::Optional<InitFnRef> InitFn = {};
+ /// Flag inidicating if we're initializing an already created
+ /// variable. This is set in visitInitializer().
+ bool Initializing = false;
+
+ /// Flag indicating if we're initializing a global variable.
+ bool GlobalDecl = false;
};
extern template class ByteCodeExprGen<ByteCodeEmitter>;
@@ -240,6 +329,11 @@ extern template class ByteCodeExprGen<EvalEmitter>;
/// Scope chain managing the variable lifetimes.
template <class Emitter> class VariableScope {
public:
+ VariableScope(ByteCodeExprGen<Emitter> *Ctx)
+ : Ctx(Ctx), Parent(Ctx->VarScope) {
+ Ctx->VarScope = this;
+ }
+
virtual ~VariableScope() { Ctx->VarScope = this->Parent; }
void add(const Scope::Local &Local, bool IsExtended) {
@@ -260,33 +354,39 @@ public:
}
virtual void emitDestruction() {}
-
- VariableScope *getParent() { return Parent; }
+ virtual void emitDestructors() {}
+ VariableScope *getParent() const { return Parent; }
protected:
- VariableScope(ByteCodeExprGen<Emitter> *Ctx)
- : Ctx(Ctx), Parent(Ctx->VarScope) {
- Ctx->VarScope = this;
- }
-
/// ByteCodeExprGen instance.
ByteCodeExprGen<Emitter> *Ctx;
/// Link to the parent scope.
VariableScope *Parent;
};
-/// Scope for local variables.
-///
-/// When the scope is destroyed, instructions are emitted to tear down
-/// all variables declared in this scope.
+/// Generic scope for local variables.
template <class Emitter> class LocalScope : public VariableScope<Emitter> {
public:
LocalScope(ByteCodeExprGen<Emitter> *Ctx) : VariableScope<Emitter>(Ctx) {}
- ~LocalScope() override { this->emitDestruction(); }
+ /// Emit a Destroy op for this scope.
+ ~LocalScope() override {
+ if (!Idx)
+ return;
+ this->Ctx->emitDestroy(*Idx, SourceInfo{});
+ }
+
+ /// Overriden to support explicit destruction.
+ void emitDestruction() override {
+ if (!Idx)
+ return;
+ this->emitDestructors();
+ this->Ctx->emitDestroy(*Idx, SourceInfo{});
+ this->Idx = std::nullopt;
+ }
void addLocal(const Scope::Local &Local) override {
- if (!Idx.hasValue()) {
+ if (!Idx) {
Idx = this->Ctx->Descriptors.size();
this->Ctx->Descriptors.emplace_back();
}
@@ -294,36 +394,106 @@ public:
this->Ctx->Descriptors[*Idx].emplace_back(Local);
}
- void emitDestruction() override {
- if (!Idx.hasValue())
+ void emitDestructors() override {
+ if (!Idx)
return;
- this->Ctx->emitDestroy(*Idx, SourceInfo{});
+ // Emit destructor calls for local variables of record
+ // type with a destructor.
+ for (Scope::Local &Local : this->Ctx->Descriptors[*Idx]) {
+ if (!Local.Desc->isPrimitive() && !Local.Desc->isPrimitiveArray()) {
+ this->Ctx->emitGetPtrLocal(Local.Offset, SourceInfo{});
+ this->Ctx->emitRecordDestruction(Local.Desc);
+ }
+ }
}
-protected:
/// Index of the scope in the chain.
- Optional<unsigned> Idx;
+ std::optional<unsigned> Idx;
+};
+
+/// Emits the destructors of the variables of \param OtherScope
+/// when this scope is destroyed. Does not create a Scope in the bytecode at
+/// all, this is just a RAII object to emit destructors.
+template <class Emitter> class DestructorScope final {
+public:
+ DestructorScope(LocalScope<Emitter> &OtherScope) : OtherScope(OtherScope) {}
+
+ ~DestructorScope() { OtherScope.emitDestructors(); }
+
+private:
+ LocalScope<Emitter> &OtherScope;
+};
+
+/// Like a regular LocalScope, except that the destructors of all local
+/// variables are automatically emitted when the AutoScope is destroyed.
+template <class Emitter> class AutoScope : public LocalScope<Emitter> {
+public:
+ AutoScope(ByteCodeExprGen<Emitter> *Ctx)
+ : LocalScope<Emitter>(Ctx), DS(*this) {}
+
+private:
+ DestructorScope<Emitter> DS;
};
/// Scope for storage declared in a compound statement.
-template <class Emitter> class BlockScope final : public LocalScope<Emitter> {
+template <class Emitter> class BlockScope final : public AutoScope<Emitter> {
public:
- BlockScope(ByteCodeExprGen<Emitter> *Ctx) : LocalScope<Emitter>(Ctx) {}
+ BlockScope(ByteCodeExprGen<Emitter> *Ctx) : AutoScope<Emitter>(Ctx) {}
void addExtended(const Scope::Local &Local) override {
- llvm_unreachable("Cannot create temporaries in full scopes");
+ // If we to this point, just add the variable as a normal local
+ // variable. It will be destroyed at the end of the block just
+ // like all others.
+ this->addLocal(Local);
}
};
/// Expression scope which tracks potentially lifetime extended
/// temporaries which are hoisted to the parent scope on exit.
-template <class Emitter> class ExprScope final : public LocalScope<Emitter> {
+template <class Emitter> class ExprScope final : public AutoScope<Emitter> {
public:
- ExprScope(ByteCodeExprGen<Emitter> *Ctx) : LocalScope<Emitter>(Ctx) {}
+ ExprScope(ByteCodeExprGen<Emitter> *Ctx) : AutoScope<Emitter>(Ctx) {}
void addExtended(const Scope::Local &Local) override {
- this->Parent->addLocal(Local);
+ if (this->Parent)
+ this->Parent->addLocal(Local);
+ }
+};
+
+template <class Emitter> class ArrayIndexScope final {
+public:
+ ArrayIndexScope(ByteCodeExprGen<Emitter> *Ctx, uint64_t Index) : Ctx(Ctx) {
+ OldArrayIndex = Ctx->ArrayIndex;
+ Ctx->ArrayIndex = Index;
+ }
+
+ ~ArrayIndexScope() { Ctx->ArrayIndex = OldArrayIndex; }
+
+private:
+ ByteCodeExprGen<Emitter> *Ctx;
+ std::optional<uint64_t> OldArrayIndex;
+};
+
+template <class Emitter> class SourceLocScope final {
+public:
+ SourceLocScope(ByteCodeExprGen<Emitter> *Ctx, const Expr *DefaultExpr)
+ : Ctx(Ctx) {
+ assert(DefaultExpr);
+ // We only switch if the current SourceLocDefaultExpr is null.
+ if (!Ctx->SourceLocDefaultExpr) {
+ Enabled = true;
+ Ctx->SourceLocDefaultExpr = DefaultExpr;
+ }
}
+
+ ~SourceLocScope() {
+ if (Enabled)
+ Ctx->SourceLocDefaultExpr = nullptr;
+ }
+
+private:
+ ByteCodeExprGen<Emitter> *Ctx;
+ bool Enabled = false;
};
} // namespace interp
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeGenError.h b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeGenError.h
index a4fa4917705d..af464b5ed4ab 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeGenError.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeGenError.h
@@ -20,19 +20,19 @@ namespace interp {
/// Error thrown by the compiler.
struct ByteCodeGenError : public llvm::ErrorInfo<ByteCodeGenError> {
public:
- ByteCodeGenError(SourceLocation Loc) : Loc(Loc) {}
- ByteCodeGenError(const Stmt *S) : ByteCodeGenError(S->getBeginLoc()) {}
- ByteCodeGenError(const Decl *D) : ByteCodeGenError(D->getBeginLoc()) {}
+ ByteCodeGenError(SourceRange Range) : Range(Range) {}
+ ByteCodeGenError(const Stmt *S) : ByteCodeGenError(S->getSourceRange()) {}
+ ByteCodeGenError(const Decl *D) : ByteCodeGenError(D->getSourceRange()) {}
void log(raw_ostream &OS) const override { OS << "unimplemented feature"; }
- const SourceLocation &getLoc() const { return Loc; }
+ const SourceRange &getRange() const { return Range; }
static char ID;
private:
- // Start of the item where the error occurred.
- SourceLocation Loc;
+ // Range of the item where the error occurred.
+ SourceRange Range;
// Users are not expected to use error_code.
std::error_code convertToErrorCode() const override {
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeStmtGen.cpp b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeStmtGen.cpp
index 5b47489e65e0..a2d8c4e13010 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeStmtGen.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeStmtGen.cpp
@@ -12,9 +12,6 @@
#include "Context.h"
#include "Function.h"
#include "PrimType.h"
-#include "Program.h"
-#include "State.h"
-#include "clang/Basic/LLVM.h"
using namespace clang;
using namespace clang::interp;
@@ -90,15 +87,150 @@ private:
} // namespace clang
template <class Emitter>
+bool ByteCodeStmtGen<Emitter>::emitLambdaStaticInvokerBody(
+ const CXXMethodDecl *MD) {
+ assert(MD->isLambdaStaticInvoker());
+ assert(MD->hasBody());
+ assert(cast<CompoundStmt>(MD->getBody())->body_empty());
+
+ const CXXRecordDecl *ClosureClass = MD->getParent();
+ const CXXMethodDecl *LambdaCallOp = ClosureClass->getLambdaCallOperator();
+ assert(ClosureClass->captures_begin() == ClosureClass->captures_end());
+ const Function *Func = this->getFunction(LambdaCallOp);
+ if (!Func)
+ return false;
+ assert(Func->hasThisPointer());
+ assert(Func->getNumParams() == (MD->getNumParams() + 1 + Func->hasRVO()));
+
+ if (Func->hasRVO()) {
+ if (!this->emitRVOPtr(MD))
+ return false;
+ }
+
+ // The lambda call operator needs an instance pointer, but we don't have
+ // one here, and we don't need one either because the lambda cannot have
+ // any captures, as verified above. Emit a null pointer. This is then
+ // special-cased when interpreting to not emit any misleading diagnostics.
+ if (!this->emitNullPtr(MD))
+ return false;
+
+ // Forward all arguments from the static invoker to the lambda call operator.
+ for (const ParmVarDecl *PVD : MD->parameters()) {
+ auto It = this->Params.find(PVD);
+ assert(It != this->Params.end());
+
+ // We do the lvalue-to-rvalue conversion manually here, so no need
+ // to care about references.
+ PrimType ParamType = this->classify(PVD->getType()).value_or(PT_Ptr);
+ if (!this->emitGetParam(ParamType, It->second.Offset, MD))
+ return false;
+ }
+
+ if (!this->emitCall(Func, LambdaCallOp))
+ return false;
+
+ this->emitCleanup();
+ if (ReturnType)
+ return this->emitRet(*ReturnType, MD);
+
+ // Nothing to do, since we emitted the RVO pointer above.
+ return this->emitRetVoid(MD);
+}
+
+template <class Emitter>
bool ByteCodeStmtGen<Emitter>::visitFunc(const FunctionDecl *F) {
// Classify the return type.
ReturnType = this->classify(F->getReturnType());
- // Set up fields and context if a constructor.
- if (auto *MD = dyn_cast<CXXMethodDecl>(F))
- return this->bail(MD);
+ auto emitFieldInitializer = [&](const Record::Field *F, unsigned FieldOffset,
+ const Expr *InitExpr) -> bool {
+ if (std::optional<PrimType> T = this->classify(InitExpr)) {
+ if (!this->visit(InitExpr))
+ return false;
+
+ if (F->isBitField())
+ return this->emitInitThisBitField(*T, F, FieldOffset, InitExpr);
+ return this->emitInitThisField(*T, FieldOffset, InitExpr);
+ }
+ // Non-primitive case. Get a pointer to the field-to-initialize
+ // on the stack and call visitInitialzer() for it.
+ if (!this->emitGetPtrThisField(FieldOffset, InitExpr))
+ return false;
- if (auto *Body = F->getBody())
+ if (!this->visitInitializer(InitExpr))
+ return false;
+
+ return this->emitPopPtr(InitExpr);
+ };
+
+ // Emit custom code if this is a lambda static invoker.
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(F);
+ MD && MD->isLambdaStaticInvoker())
+ return this->emitLambdaStaticInvokerBody(MD);
+
+ // Constructor. Set up field initializers.
+ if (const auto *Ctor = dyn_cast<CXXConstructorDecl>(F)) {
+ const RecordDecl *RD = Ctor->getParent();
+ const Record *R = this->getRecord(RD);
+ if (!R)
+ return false;
+
+ for (const auto *Init : Ctor->inits()) {
+ // Scope needed for the initializers.
+ BlockScope<Emitter> Scope(this);
+
+ const Expr *InitExpr = Init->getInit();
+ if (const FieldDecl *Member = Init->getMember()) {
+ const Record::Field *F = R->getField(Member);
+
+ if (!emitFieldInitializer(F, F->Offset, InitExpr))
+ return false;
+ } else if (const Type *Base = Init->getBaseClass()) {
+ // Base class initializer.
+ // Get This Base and call initializer on it.
+ const auto *BaseDecl = Base->getAsCXXRecordDecl();
+ assert(BaseDecl);
+ const Record::Base *B = R->getBase(BaseDecl);
+ assert(B);
+ if (!this->emitGetPtrThisBase(B->Offset, InitExpr))
+ return false;
+ if (!this->visitInitializer(InitExpr))
+ return false;
+ if (!this->emitInitPtrPop(InitExpr))
+ return false;
+ } else if (const IndirectFieldDecl *IFD = Init->getIndirectMember()) {
+ assert(IFD->getChainingSize() >= 2);
+
+ unsigned NestedFieldOffset = 0;
+ const Record::Field *NestedField = nullptr;
+ for (const NamedDecl *ND : IFD->chain()) {
+ const auto *FD = cast<FieldDecl>(ND);
+ const Record *FieldRecord =
+ this->P.getOrCreateRecord(FD->getParent());
+ assert(FieldRecord);
+
+ NestedField = FieldRecord->getField(FD);
+ assert(NestedField);
+
+ NestedFieldOffset += NestedField->Offset;
+ }
+ assert(NestedField);
+
+ if (!emitFieldInitializer(NestedField, NestedFieldOffset, InitExpr))
+ return false;
+ } else {
+ assert(Init->isDelegatingInitializer());
+ if (!this->emitThis(InitExpr))
+ return false;
+ if (!this->visitInitializer(Init->getInit()))
+ return false;
+ if (!this->emitPopPtr(InitExpr))
+ return false;
+ }
+ }
+ }
+
+ if (const auto *Body = F->getBody())
if (!visitStmt(Body))
return false;
@@ -120,16 +252,58 @@ bool ByteCodeStmtGen<Emitter>::visitStmt(const Stmt *S) {
return visitReturnStmt(cast<ReturnStmt>(S));
case Stmt::IfStmtClass:
return visitIfStmt(cast<IfStmt>(S));
+ case Stmt::WhileStmtClass:
+ return visitWhileStmt(cast<WhileStmt>(S));
+ case Stmt::DoStmtClass:
+ return visitDoStmt(cast<DoStmt>(S));
+ case Stmt::ForStmtClass:
+ return visitForStmt(cast<ForStmt>(S));
+ case Stmt::CXXForRangeStmtClass:
+ return visitCXXForRangeStmt(cast<CXXForRangeStmt>(S));
+ case Stmt::BreakStmtClass:
+ return visitBreakStmt(cast<BreakStmt>(S));
+ case Stmt::ContinueStmtClass:
+ return visitContinueStmt(cast<ContinueStmt>(S));
+ case Stmt::SwitchStmtClass:
+ return visitSwitchStmt(cast<SwitchStmt>(S));
+ case Stmt::CaseStmtClass:
+ return visitCaseStmt(cast<CaseStmt>(S));
+ case Stmt::DefaultStmtClass:
+ return visitDefaultStmt(cast<DefaultStmt>(S));
+ case Stmt::GCCAsmStmtClass:
+ case Stmt::MSAsmStmtClass:
+ return visitAsmStmt(cast<AsmStmt>(S));
+ case Stmt::AttributedStmtClass:
+ return visitAttributedStmt(cast<AttributedStmt>(S));
+ case Stmt::CXXTryStmtClass:
+ return visitCXXTryStmt(cast<CXXTryStmt>(S));
case Stmt::NullStmtClass:
return true;
default: {
if (auto *Exp = dyn_cast<Expr>(S))
return this->discard(Exp);
- return this->bail(S);
+ return false;
}
}
}
+/// Visits the given statment without creating a variable
+/// scope for it in case it is a compound statement.
+template <class Emitter>
+bool ByteCodeStmtGen<Emitter>::visitLoopBody(const Stmt *S) {
+ if (isa<NullStmt>(S))
+ return true;
+
+ if (const auto *CS = dyn_cast<CompoundStmt>(S)) {
+ for (auto *InnerStmt : CS->body())
+ if (!visitStmt(InnerStmt))
+ return false;
+ return true;
+ }
+
+ return this->visitStmt(S);
+}
+
template <class Emitter>
bool ByteCodeStmtGen<Emitter>::visitCompoundStmt(
const CompoundStmt *CompoundStmt) {
@@ -143,17 +317,14 @@ bool ByteCodeStmtGen<Emitter>::visitCompoundStmt(
template <class Emitter>
bool ByteCodeStmtGen<Emitter>::visitDeclStmt(const DeclStmt *DS) {
for (auto *D : DS->decls()) {
- // Variable declarator.
- if (auto *VD = dyn_cast<VarDecl>(D)) {
- if (!visitVarDecl(VD))
- return false;
+ if (isa<StaticAssertDecl, TagDecl, TypedefNameDecl>(D))
continue;
- }
- // Decomposition declarator.
- if (auto *DD = dyn_cast<DecompositionDecl>(D)) {
- return this->bail(DD);
- }
+ const auto *VD = dyn_cast<VarDecl>(D);
+ if (!VD)
+ return false;
+ if (!this->visitVarDecl(VD))
+ return false;
}
return true;
@@ -169,27 +340,39 @@ bool ByteCodeStmtGen<Emitter>::visitReturnStmt(const ReturnStmt *RS) {
return false;
this->emitCleanup();
return this->emitRet(*ReturnType, RS);
+ } else if (RE->getType()->isVoidType()) {
+ if (!this->visit(RE))
+ return false;
} else {
// RVO - construct the value in the return location.
- auto ReturnLocation = [this, RE] { return this->emitGetParamPtr(0, RE); };
- if (!this->visitInitializer(RE, ReturnLocation))
+ if (!this->emitRVOPtr(RE))
return false;
+ if (!this->visitInitializer(RE))
+ return false;
+ if (!this->emitPopPtr(RE))
+ return false;
+
this->emitCleanup();
return this->emitRetVoid(RS);
}
- } else {
- this->emitCleanup();
- if (!this->emitRetVoid(RS))
- return false;
- return true;
}
+
+ // Void return.
+ this->emitCleanup();
+ return this->emitRetVoid(RS);
}
template <class Emitter>
bool ByteCodeStmtGen<Emitter>::visitIfStmt(const IfStmt *IS) {
BlockScope<Emitter> IfScope(this);
+
+ if (IS->isNonNegatedConsteval())
+ return visitStmt(IS->getThen());
+ if (IS->isNegatedConsteval())
+ return IS->getElse() ? visitStmt(IS->getElse()) : true;
+
if (auto *CondInit = IS->getInit())
- if (!visitStmt(IS->getInit()))
+ if (!visitStmt(CondInit))
return false;
if (const DeclStmt *CondDecl = IS->getConditionVariableDeclStmt())
@@ -225,33 +408,267 @@ bool ByteCodeStmtGen<Emitter>::visitIfStmt(const IfStmt *IS) {
}
template <class Emitter>
-bool ByteCodeStmtGen<Emitter>::visitVarDecl(const VarDecl *VD) {
- auto DT = VD->getType();
+bool ByteCodeStmtGen<Emitter>::visitWhileStmt(const WhileStmt *S) {
+ const Expr *Cond = S->getCond();
+ const Stmt *Body = S->getBody();
- if (!VD->hasLocalStorage()) {
- // No code generation required.
- return true;
+ LabelTy CondLabel = this->getLabel(); // Label before the condition.
+ LabelTy EndLabel = this->getLabel(); // Label after the loop.
+ LoopScope<Emitter> LS(this, EndLabel, CondLabel);
+
+ this->emitLabel(CondLabel);
+ if (!this->visitBool(Cond))
+ return false;
+ if (!this->jumpFalse(EndLabel))
+ return false;
+
+ LocalScope<Emitter> Scope(this);
+ {
+ DestructorScope<Emitter> DS(Scope);
+ if (!this->visitLoopBody(Body))
+ return false;
+ }
+
+ if (!this->jump(CondLabel))
+ return false;
+ this->emitLabel(EndLabel);
+
+ return true;
+}
+
+template <class Emitter>
+bool ByteCodeStmtGen<Emitter>::visitDoStmt(const DoStmt *S) {
+ const Expr *Cond = S->getCond();
+ const Stmt *Body = S->getBody();
+
+ LabelTy StartLabel = this->getLabel();
+ LabelTy EndLabel = this->getLabel();
+ LabelTy CondLabel = this->getLabel();
+ LoopScope<Emitter> LS(this, EndLabel, CondLabel);
+ LocalScope<Emitter> Scope(this);
+
+ this->emitLabel(StartLabel);
+ {
+ DestructorScope<Emitter> DS(Scope);
+
+ if (!this->visitLoopBody(Body))
+ return false;
+ this->emitLabel(CondLabel);
+ if (!this->visitBool(Cond))
+ return false;
+ }
+ if (!this->jumpTrue(StartLabel))
+ return false;
+
+ this->emitLabel(EndLabel);
+ return true;
+}
+
+template <class Emitter>
+bool ByteCodeStmtGen<Emitter>::visitForStmt(const ForStmt *S) {
+ // for (Init; Cond; Inc) { Body }
+ const Stmt *Init = S->getInit();
+ const Expr *Cond = S->getCond();
+ const Expr *Inc = S->getInc();
+ const Stmt *Body = S->getBody();
+
+ LabelTy EndLabel = this->getLabel();
+ LabelTy CondLabel = this->getLabel();
+ LabelTy IncLabel = this->getLabel();
+ LoopScope<Emitter> LS(this, EndLabel, IncLabel);
+ LocalScope<Emitter> Scope(this);
+
+ if (Init && !this->visitStmt(Init))
+ return false;
+ this->emitLabel(CondLabel);
+ if (Cond) {
+ if (!this->visitBool(Cond))
+ return false;
+ if (!this->jumpFalse(EndLabel))
+ return false;
}
- // Integers, pointers, primitives.
- if (Optional<PrimType> T = this->classify(DT)) {
- auto Off = this->allocateLocalPrimitive(VD, *T, DT.isConstQualified());
- // Compile the initialiser in its own scope.
- {
- ExprScope<Emitter> Scope(this);
- if (!this->visit(VD->getInit()))
+ {
+ DestructorScope<Emitter> DS(Scope);
+
+ if (Body && !this->visitLoopBody(Body))
+ return false;
+ this->emitLabel(IncLabel);
+ if (Inc && !this->discard(Inc))
+ return false;
+ }
+
+ if (!this->jump(CondLabel))
+ return false;
+ this->emitLabel(EndLabel);
+ return true;
+}
+
+template <class Emitter>
+bool ByteCodeStmtGen<Emitter>::visitCXXForRangeStmt(const CXXForRangeStmt *S) {
+ const Stmt *Init = S->getInit();
+ const Expr *Cond = S->getCond();
+ const Expr *Inc = S->getInc();
+ const Stmt *Body = S->getBody();
+ const Stmt *BeginStmt = S->getBeginStmt();
+ const Stmt *RangeStmt = S->getRangeStmt();
+ const Stmt *EndStmt = S->getEndStmt();
+ const VarDecl *LoopVar = S->getLoopVariable();
+
+ LabelTy EndLabel = this->getLabel();
+ LabelTy CondLabel = this->getLabel();
+ LabelTy IncLabel = this->getLabel();
+ LoopScope<Emitter> LS(this, EndLabel, IncLabel);
+
+ // Emit declarations needed in the loop.
+ if (Init && !this->visitStmt(Init))
+ return false;
+ if (!this->visitStmt(RangeStmt))
+ return false;
+ if (!this->visitStmt(BeginStmt))
+ return false;
+ if (!this->visitStmt(EndStmt))
+ return false;
+
+ // Now the condition as well as the loop variable assignment.
+ this->emitLabel(CondLabel);
+ if (!this->visitBool(Cond))
+ return false;
+ if (!this->jumpFalse(EndLabel))
+ return false;
+
+ if (!this->visitVarDecl(LoopVar))
+ return false;
+
+ // Body.
+ LocalScope<Emitter> Scope(this);
+ {
+ DestructorScope<Emitter> DS(Scope);
+
+ if (!this->visitLoopBody(Body))
+ return false;
+ this->emitLabel(IncLabel);
+ if (!this->discard(Inc))
+ return false;
+ }
+ if (!this->jump(CondLabel))
+ return false;
+
+ this->emitLabel(EndLabel);
+ return true;
+}
+
+template <class Emitter>
+bool ByteCodeStmtGen<Emitter>::visitBreakStmt(const BreakStmt *S) {
+ if (!BreakLabel)
+ return false;
+
+ this->VarScope->emitDestructors();
+ return this->jump(*BreakLabel);
+}
+
+template <class Emitter>
+bool ByteCodeStmtGen<Emitter>::visitContinueStmt(const ContinueStmt *S) {
+ if (!ContinueLabel)
+ return false;
+
+ this->VarScope->emitDestructors();
+ return this->jump(*ContinueLabel);
+}
+
+template <class Emitter>
+bool ByteCodeStmtGen<Emitter>::visitSwitchStmt(const SwitchStmt *S) {
+ const Expr *Cond = S->getCond();
+ PrimType CondT = this->classifyPrim(Cond->getType());
+
+ LabelTy EndLabel = this->getLabel();
+ OptLabelTy DefaultLabel = std::nullopt;
+ unsigned CondVar = this->allocateLocalPrimitive(Cond, CondT, true, false);
+
+ if (const auto *CondInit = S->getInit())
+ if (!visitStmt(CondInit))
+ return false;
+
+ // Initialize condition variable.
+ if (!this->visit(Cond))
+ return false;
+ if (!this->emitSetLocal(CondT, CondVar, S))
+ return false;
+
+ CaseMap CaseLabels;
+ // Create labels and comparison ops for all case statements.
+ for (const SwitchCase *SC = S->getSwitchCaseList(); SC;
+ SC = SC->getNextSwitchCase()) {
+ if (const auto *CS = dyn_cast<CaseStmt>(SC)) {
+ // FIXME: Implement ranges.
+ if (CS->caseStmtIsGNURange())
+ return false;
+ CaseLabels[SC] = this->getLabel();
+
+ const Expr *Value = CS->getLHS();
+ PrimType ValueT = this->classifyPrim(Value->getType());
+
+ // Compare the case statement's value to the switch condition.
+ if (!this->emitGetLocal(CondT, CondVar, CS))
+ return false;
+ if (!this->visit(Value))
+ return false;
+
+ // Compare and jump to the case label.
+ if (!this->emitEQ(ValueT, S))
+ return false;
+ if (!this->jumpTrue(CaseLabels[CS]))
return false;
- }
- // Set the value.
- return this->emitSetLocal(*T, Off, VD);
- } else {
- // Composite types - allocate storage and initialize it.
- if (auto Off = this->allocateLocal(VD)) {
- return this->visitLocalInitializer(VD->getInit(), *Off);
} else {
- return this->bail(VD);
+ assert(!DefaultLabel);
+ DefaultLabel = this->getLabel();
}
}
+
+ // If none of the conditions above were true, fall through to the default
+ // statement or jump after the switch statement.
+ if (DefaultLabel) {
+ if (!this->jump(*DefaultLabel))
+ return false;
+ } else {
+ if (!this->jump(EndLabel))
+ return false;
+ }
+
+ SwitchScope<Emitter> SS(this, std::move(CaseLabels), EndLabel, DefaultLabel);
+ if (!this->visitStmt(S->getBody()))
+ return false;
+ this->emitLabel(EndLabel);
+ return true;
+}
+
+template <class Emitter>
+bool ByteCodeStmtGen<Emitter>::visitCaseStmt(const CaseStmt *S) {
+ this->emitLabel(CaseLabels[S]);
+ return this->visitStmt(S->getSubStmt());
+}
+
+template <class Emitter>
+bool ByteCodeStmtGen<Emitter>::visitDefaultStmt(const DefaultStmt *S) {
+ this->emitLabel(*DefaultLabel);
+ return this->visitStmt(S->getSubStmt());
+}
+
+template <class Emitter>
+bool ByteCodeStmtGen<Emitter>::visitAsmStmt(const AsmStmt *S) {
+ return this->emitInvalid(S);
+}
+
+template <class Emitter>
+bool ByteCodeStmtGen<Emitter>::visitAttributedStmt(const AttributedStmt *S) {
+ // Ignore all attributes.
+ return this->visitStmt(S->getSubStmt());
+}
+
+template <class Emitter>
+bool ByteCodeStmtGen<Emitter>::visitCXXTryStmt(const CXXTryStmt *S) {
+ // Ignore all handlers.
+ return this->visitStmt(S->getTryBlock());
}
namespace clang {
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeStmtGen.h b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeStmtGen.h
index d9c0b64ed4b8..64e03587ab21 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeStmtGen.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeStmtGen.h
@@ -16,20 +16,11 @@
#include "ByteCodeEmitter.h"
#include "ByteCodeExprGen.h"
#include "EvalEmitter.h"
-#include "Pointer.h"
#include "PrimType.h"
-#include "Record.h"
-#include "clang/AST/Decl.h"
-#include "clang/AST/Expr.h"
#include "clang/AST/StmtVisitor.h"
-#include "llvm/ADT/Optional.h"
namespace clang {
-class QualType;
-
namespace interp {
-class Function;
-class State;
template <class Emitter> class LoopScope;
template <class Emitter> class SwitchScope;
@@ -37,10 +28,10 @@ template <class Emitter> class LabelScope;
/// Compilation context for statements.
template <class Emitter>
-class ByteCodeStmtGen : public ByteCodeExprGen<Emitter> {
+class ByteCodeStmtGen final : public ByteCodeExprGen<Emitter> {
using LabelTy = typename Emitter::LabelTy;
using AddrTy = typename Emitter::AddrTy;
- using OptLabelTy = llvm::Optional<LabelTy>;
+ using OptLabelTy = std::optional<LabelTy>;
using CaseMap = llvm::DenseMap<const SwitchCase *, LabelTy>;
public:
@@ -59,16 +50,27 @@ private:
// Statement visitors.
bool visitStmt(const Stmt *S);
bool visitCompoundStmt(const CompoundStmt *S);
+ bool visitLoopBody(const Stmt *S);
bool visitDeclStmt(const DeclStmt *DS);
bool visitReturnStmt(const ReturnStmt *RS);
bool visitIfStmt(const IfStmt *IS);
+ bool visitWhileStmt(const WhileStmt *S);
+ bool visitDoStmt(const DoStmt *S);
+ bool visitForStmt(const ForStmt *S);
+ bool visitCXXForRangeStmt(const CXXForRangeStmt *S);
+ bool visitBreakStmt(const BreakStmt *S);
+ bool visitContinueStmt(const ContinueStmt *S);
+ bool visitSwitchStmt(const SwitchStmt *S);
+ bool visitCaseStmt(const CaseStmt *S);
+ bool visitDefaultStmt(const DefaultStmt *S);
+ bool visitAsmStmt(const AsmStmt *S);
+ bool visitAttributedStmt(const AttributedStmt *S);
+ bool visitCXXTryStmt(const CXXTryStmt *S);
+
+ bool emitLambdaStaticInvokerBody(const CXXMethodDecl *MD);
- /// Compiles a variable declaration.
- bool visitVarDecl(const VarDecl *VD);
-
-private:
/// Type of the expression returned by the function.
- llvm::Optional<PrimType> ReturnType;
+ std::optional<PrimType> ReturnType;
/// Switch case mapping.
CaseMap CaseLabels;
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Context.cpp b/contrib/llvm-project/clang/lib/AST/Interp/Context.cpp
index 3bfcdfcd4c58..75a300bcbace 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Context.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Context.cpp
@@ -9,6 +9,7 @@
#include "Context.h"
#include "ByteCodeEmitter.h"
#include "ByteCodeExprGen.h"
+#include "ByteCodeGenError.h"
#include "ByteCodeStmtGen.h"
#include "EvalEmitter.h"
#include "Interp.h"
@@ -27,46 +28,117 @@ Context::Context(ASTContext &Ctx) : Ctx(Ctx), P(new Program(*this)) {}
Context::~Context() {}
bool Context::isPotentialConstantExpr(State &Parent, const FunctionDecl *FD) {
+ assert(Stk.empty());
Function *Func = P->getFunction(FD);
- if (!Func) {
- if (auto R = ByteCodeStmtGen<ByteCodeEmitter>(*this, *P).compileFunc(FD)) {
- Func = *R;
- } else {
- handleAllErrors(R.takeError(), [&Parent](ByteCodeGenError &Err) {
- Parent.FFDiag(Err.getLoc(), diag::err_experimental_clang_interp_failed);
- });
- return false;
- }
- }
+ if (!Func || !Func->hasBody())
+ Func = ByteCodeStmtGen<ByteCodeEmitter>(*this, *P).compileFunc(FD);
- if (!Func->isConstexpr())
+ APValue DummyResult;
+ if (!Run(Parent, Func, DummyResult)) {
return false;
+ }
- APValue Dummy;
- return Run(Parent, Func, Dummy);
+ return Func->isConstexpr();
}
bool Context::evaluateAsRValue(State &Parent, const Expr *E, APValue &Result) {
+ assert(Stk.empty());
ByteCodeExprGen<EvalEmitter> C(*this, *P, Parent, Stk, Result);
- return Check(Parent, C.interpretExpr(E));
+
+ auto Res = C.interpretExpr(E);
+
+ if (Res.isInvalid()) {
+ Stk.clear();
+ return false;
+ }
+
+ assert(Stk.empty());
+#ifndef NDEBUG
+ // Make sure we don't rely on some value being still alive in
+ // InterpStack memory.
+ Stk.clear();
+#endif
+
+ // Implicit lvalue-to-rvalue conversion.
+ if (E->isGLValue()) {
+ std::optional<APValue> RValueResult = Res.toRValue();
+ if (!RValueResult) {
+ return false;
+ }
+ Result = *RValueResult;
+ } else {
+ Result = Res.toAPValue();
+ }
+
+ return true;
+}
+
+bool Context::evaluate(State &Parent, const Expr *E, APValue &Result) {
+ assert(Stk.empty());
+ ByteCodeExprGen<EvalEmitter> C(*this, *P, Parent, Stk, Result);
+
+ auto Res = C.interpretExpr(E);
+ if (Res.isInvalid()) {
+ Stk.clear();
+ return false;
+ }
+
+ assert(Stk.empty());
+#ifndef NDEBUG
+ // Make sure we don't rely on some value being still alive in
+ // InterpStack memory.
+ Stk.clear();
+#endif
+ Result = Res.toAPValue();
+ return true;
}
bool Context::evaluateAsInitializer(State &Parent, const VarDecl *VD,
APValue &Result) {
+ assert(Stk.empty());
ByteCodeExprGen<EvalEmitter> C(*this, *P, Parent, Stk, Result);
- return Check(Parent, C.interpretDecl(VD));
+
+ auto Res = C.interpretDecl(VD);
+ if (Res.isInvalid()) {
+ Stk.clear();
+ return false;
+ }
+
+ assert(Stk.empty());
+#ifndef NDEBUG
+ // Make sure we don't rely on some value being still alive in
+ // InterpStack memory.
+ Stk.clear();
+#endif
+
+ // Ensure global variables are fully initialized.
+ if (shouldBeGloballyIndexed(VD) && !Res.isInvalid() &&
+ (VD->getType()->isRecordType() || VD->getType()->isArrayType())) {
+ assert(Res.isLValue());
+
+ if (!Res.checkFullyInitialized(C.getState()))
+ return false;
+
+ // lvalue-to-rvalue conversion.
+ std::optional<APValue> RValueResult = Res.toRValue();
+ if (!RValueResult)
+ return false;
+ Result = *RValueResult;
+
+ } else
+ Result = Res.toAPValue();
+ return true;
}
const LangOptions &Context::getLangOpts() const { return Ctx.getLangOpts(); }
-llvm::Optional<PrimType> Context::classify(QualType T) {
- if (T->isReferenceType() || T->isPointerType()) {
- return PT_Ptr;
- }
-
+std::optional<PrimType> Context::classify(QualType T) const {
if (T->isBooleanType())
return PT_Bool;
+ if (T->isAnyComplexType())
+ return std::nullopt;
+
if (T->isSignedIntegerOrEnumerationType()) {
switch (Ctx.getIntWidth(T)) {
case 64:
@@ -78,7 +150,7 @@ llvm::Optional<PrimType> Context::classify(QualType T) {
case 8:
return PT_Sint8;
default:
- return {};
+ return PT_IntAPS;
}
}
@@ -93,28 +165,59 @@ llvm::Optional<PrimType> Context::classify(QualType T) {
case 8:
return PT_Uint8;
default:
- return {};
+ return PT_IntAP;
}
}
if (T->isNullPtrType())
return PT_Ptr;
- if (auto *AT = dyn_cast<AtomicType>(T))
+ if (T->isFloatingType())
+ return PT_Float;
+
+ if (T->isFunctionPointerType() || T->isFunctionReferenceType() ||
+ T->isFunctionType() || T->isSpecificBuiltinType(BuiltinType::BoundMember))
+ return PT_FnPtr;
+
+ if (T->isReferenceType() || T->isPointerType())
+ return PT_Ptr;
+
+ if (const auto *AT = dyn_cast<AtomicType>(T))
return classify(AT->getValueType());
- return {};
+ if (const auto *DT = dyn_cast<DecltypeType>(T))
+ return classify(DT->getUnderlyingType());
+
+ if (const auto *DT = dyn_cast<MemberPointerType>(T))
+ return classify(DT->getPointeeType());
+
+ return std::nullopt;
}
unsigned Context::getCharBit() const {
return Ctx.getTargetInfo().getCharWidth();
}
-bool Context::Run(State &Parent, Function *Func, APValue &Result) {
- InterpState State(Parent, *P, Stk, *this);
- State.Current = new InterpFrame(State, Func, nullptr, {}, {});
- if (Interpret(State, Result))
- return true;
+/// Simple wrapper around getFloatTypeSemantics() to make code a
+/// little shorter.
+const llvm::fltSemantics &Context::getFloatSemantics(QualType T) const {
+ return Ctx.getFloatTypeSemantics(T);
+}
+
+bool Context::Run(State &Parent, const Function *Func, APValue &Result) {
+
+ {
+ InterpState State(Parent, *P, Stk, *this);
+ State.Current = new InterpFrame(State, Func, /*Caller=*/nullptr, {});
+ if (Interpret(State, Result)) {
+ assert(Stk.empty());
+ return true;
+ }
+
+ // State gets destroyed here, so the Stk.clear() below doesn't accidentally
+ // remove values the State's destructor might access.
+ }
+
Stk.clear();
return false;
}
@@ -123,7 +226,61 @@ bool Context::Check(State &Parent, llvm::Expected<bool> &&Flag) {
if (Flag)
return *Flag;
handleAllErrors(Flag.takeError(), [&Parent](ByteCodeGenError &Err) {
- Parent.FFDiag(Err.getLoc(), diag::err_experimental_clang_interp_failed);
+ Parent.FFDiag(Err.getRange().getBegin(),
+ diag::err_experimental_clang_interp_failed)
+ << Err.getRange();
});
return false;
}
+
+// TODO: Virtual bases?
+const CXXMethodDecl *
+Context::getOverridingFunction(const CXXRecordDecl *DynamicDecl,
+ const CXXRecordDecl *StaticDecl,
+ const CXXMethodDecl *InitialFunction) const {
+
+ const CXXRecordDecl *CurRecord = DynamicDecl;
+ const CXXMethodDecl *FoundFunction = InitialFunction;
+ for (;;) {
+ const CXXMethodDecl *Overrider =
+ FoundFunction->getCorrespondingMethodDeclaredInClass(CurRecord, false);
+ if (Overrider)
+ return Overrider;
+
+ // Common case of only one base class.
+ if (CurRecord->getNumBases() == 1) {
+ CurRecord = CurRecord->bases_begin()->getType()->getAsCXXRecordDecl();
+ continue;
+ }
+
+ // Otherwise, go to the base class that will lead to the StaticDecl.
+ for (const CXXBaseSpecifier &Spec : CurRecord->bases()) {
+ const CXXRecordDecl *Base = Spec.getType()->getAsCXXRecordDecl();
+ if (Base == StaticDecl || Base->isDerivedFrom(StaticDecl)) {
+ CurRecord = Base;
+ break;
+ }
+ }
+ }
+
+ llvm_unreachable(
+ "Couldn't find an overriding function in the class hierarchy?");
+ return nullptr;
+}
+
+const Function *Context::getOrCreateFunction(const FunctionDecl *FD) {
+ assert(FD);
+ const Function *Func = P->getFunction(FD);
+ bool IsBeingCompiled = Func && Func->isDefined() && !Func->isFullyCompiled();
+ bool WasNotDefined = Func && !Func->isConstexpr() && !Func->isDefined();
+
+ if (IsBeingCompiled)
+ return Func;
+
+ if (!Func || WasNotDefined) {
+ if (auto F = ByteCodeStmtGen<ByteCodeEmitter>(*this, *P).compileFunc(FD))
+ Func = F;
+ }
+
+ return Func;
+}
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Context.h b/contrib/llvm-project/clang/lib/AST/Interp/Context.h
index e8238eea716a..ab83a8d13224 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Context.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Context.h
@@ -17,15 +17,13 @@
#define LLVM_CLANG_AST_INTERP_CONTEXT_H
#include "InterpStack.h"
-#include "clang/AST/APValue.h"
-#include "llvm/ADT/PointerIntPair.h"
namespace clang {
class ASTContext;
class LangOptions;
-class Stmt;
class FunctionDecl;
class VarDecl;
+class APValue;
namespace interp {
class Function;
@@ -33,8 +31,13 @@ class Program;
class State;
enum PrimType : unsigned;
+struct ParamOffset {
+ unsigned Offset;
+ bool IsPtr;
+};
+
/// Holds all information required to evaluate constexpr code in a module.
-class Context {
+class Context final {
public:
/// Initialises the constexpr VM.
Context(ASTContext &Ctx);
@@ -48,6 +51,9 @@ public:
/// Evaluates a toplevel expression as an rvalue.
bool evaluateAsRValue(State &Parent, const Expr *E, APValue &Result);
+ /// Like evaluateAsRvalue(), but does no implicit lvalue-to-rvalue conversion.
+ bool evaluate(State &Parent, const Expr *E, APValue &Result);
+
/// Evaluates a toplevel initializer.
bool evaluateAsInitializer(State &Parent, const VarDecl *VD, APValue &Result);
@@ -59,18 +65,40 @@ public:
InterpStack &getStack() { return Stk; }
/// Returns CHAR_BIT.
unsigned getCharBit() const;
+ /// Return the floating-point semantics for T.
+ const llvm::fltSemantics &getFloatSemantics(QualType T) const;
+ /// Return the size of T in bits.
+ uint32_t getBitWidth(QualType T) const { return Ctx.getIntWidth(T); }
/// Classifies an expression.
- llvm::Optional<PrimType> classify(QualType T);
+ std::optional<PrimType> classify(QualType T) const;
+
+ const CXXMethodDecl *
+ getOverridingFunction(const CXXRecordDecl *DynamicDecl,
+ const CXXRecordDecl *StaticDecl,
+ const CXXMethodDecl *InitialFunction) const;
+
+ const Function *getOrCreateFunction(const FunctionDecl *FD);
+
+ /// Returns whether we should create a global variable for the
+ /// given ValueDecl.
+ static bool shouldBeGloballyIndexed(const ValueDecl *VD) {
+ if (const auto *V = dyn_cast<VarDecl>(VD))
+ return V->hasGlobalStorage() || V->isConstexpr();
+
+ return false;
+ }
+
+ /// Returns the program. This is only needed for unittests.
+ Program &getProgram() const { return *P.get(); }
private:
/// Runs a function.
- bool Run(State &Parent, Function *Func, APValue &Result);
+ bool Run(State &Parent, const Function *Func, APValue &Result);
- /// Checks a result fromt the interpreter.
+ /// Checks a result from the interpreter.
bool Check(State &Parent, llvm::Expected<bool> &&R);
-private:
/// Current compilation context.
ASTContext &Ctx;
/// Interpreter stack, shared across invocations.
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Descriptor.cpp b/contrib/llvm-project/clang/lib/AST/Interp/Descriptor.cpp
index 5c1a8a9cf306..b330e54baf33 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Descriptor.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Descriptor.cpp
@@ -7,6 +7,10 @@
//===----------------------------------------------------------------------===//
#include "Descriptor.h"
+#include "Boolean.h"
+#include "Floating.h"
+#include "FunctionPointer.h"
+#include "IntegralAP.h"
#include "Pointer.h"
#include "PrimType.h"
#include "Record.h"
@@ -15,46 +19,59 @@ using namespace clang;
using namespace clang::interp;
template <typename T>
-static void ctorTy(Block *, char *Ptr, bool, bool, bool, Descriptor *) {
+static void ctorTy(Block *, std::byte *Ptr, bool, bool, bool,
+ const Descriptor *) {
new (Ptr) T();
}
-template <typename T> static void dtorTy(Block *, char *Ptr, Descriptor *) {
+template <typename T>
+static void dtorTy(Block *, std::byte *Ptr, const Descriptor *) {
reinterpret_cast<T *>(Ptr)->~T();
}
template <typename T>
-static void moveTy(Block *, char *Src, char *Dst, Descriptor *) {
- auto *SrcPtr = reinterpret_cast<T *>(Src);
+static void moveTy(Block *, const std::byte *Src, std::byte *Dst,
+ const Descriptor *) {
+ const auto *SrcPtr = reinterpret_cast<const T *>(Src);
auto *DstPtr = reinterpret_cast<T *>(Dst);
new (DstPtr) T(std::move(*SrcPtr));
}
template <typename T>
-static void ctorArrayTy(Block *, char *Ptr, bool, bool, bool, Descriptor *D) {
+static void ctorArrayTy(Block *, std::byte *Ptr, bool, bool, bool,
+ const Descriptor *D) {
+ new (Ptr) InitMapPtr(std::nullopt);
+
+ Ptr += sizeof(InitMapPtr);
for (unsigned I = 0, NE = D->getNumElems(); I < NE; ++I) {
new (&reinterpret_cast<T *>(Ptr)[I]) T();
}
}
template <typename T>
-static void dtorArrayTy(Block *, char *Ptr, Descriptor *D) {
+static void dtorArrayTy(Block *, std::byte *Ptr, const Descriptor *D) {
+ InitMapPtr &IMP = *reinterpret_cast<InitMapPtr *>(Ptr);
+
+ if (IMP)
+ IMP = std::nullopt;
+ Ptr += sizeof(InitMapPtr);
for (unsigned I = 0, NE = D->getNumElems(); I < NE; ++I) {
reinterpret_cast<T *>(Ptr)[I].~T();
}
}
template <typename T>
-static void moveArrayTy(Block *, char *Src, char *Dst, Descriptor *D) {
+static void moveArrayTy(Block *, const std::byte *Src, std::byte *Dst,
+ const Descriptor *D) {
for (unsigned I = 0, NE = D->getNumElems(); I < NE; ++I) {
- auto *SrcPtr = &reinterpret_cast<T *>(Src)[I];
+ const auto *SrcPtr = &reinterpret_cast<const T *>(Src)[I];
auto *DstPtr = &reinterpret_cast<T *>(Dst)[I];
new (DstPtr) T(std::move(*SrcPtr));
}
}
-static void ctorArrayDesc(Block *B, char *Ptr, bool IsConst, bool IsMutable,
- bool IsActive, Descriptor *D) {
+static void ctorArrayDesc(Block *B, std::byte *Ptr, bool IsConst,
+ bool IsMutable, bool IsActive, const Descriptor *D) {
const unsigned NumElems = D->getNumElems();
const unsigned ElemSize =
D->ElemDesc->getAllocSize() + sizeof(InlineDescriptor);
@@ -63,7 +80,7 @@ static void ctorArrayDesc(Block *B, char *Ptr, bool IsConst, bool IsMutable,
for (unsigned I = 0; I < NumElems; ++I, ElemOffset += ElemSize) {
auto *ElemPtr = Ptr + ElemOffset;
auto *Desc = reinterpret_cast<InlineDescriptor *>(ElemPtr);
- auto *ElemLoc = reinterpret_cast<char *>(Desc + 1);
+ auto *ElemLoc = reinterpret_cast<std::byte *>(Desc + 1);
auto *SD = D->ElemDesc;
Desc->Offset = ElemOffset + sizeof(InlineDescriptor);
@@ -72,13 +89,14 @@ static void ctorArrayDesc(Block *B, char *Ptr, bool IsConst, bool IsMutable,
Desc->IsBase = false;
Desc->IsActive = IsActive;
Desc->IsConst = IsConst || D->IsConst;
- Desc->IsMutable = IsMutable || D->IsMutable;
+ Desc->IsFieldMutable = IsMutable || D->IsMutable;
if (auto Fn = D->ElemDesc->CtorFn)
- Fn(B, ElemLoc, Desc->IsConst, Desc->IsMutable, IsActive, D->ElemDesc);
+ Fn(B, ElemLoc, Desc->IsConst, Desc->IsFieldMutable, IsActive,
+ D->ElemDesc);
}
}
-static void dtorArrayDesc(Block *B, char *Ptr, Descriptor *D) {
+static void dtorArrayDesc(Block *B, std::byte *Ptr, const Descriptor *D) {
const unsigned NumElems = D->getNumElems();
const unsigned ElemSize =
D->ElemDesc->getAllocSize() + sizeof(InlineDescriptor);
@@ -87,26 +105,27 @@ static void dtorArrayDesc(Block *B, char *Ptr, Descriptor *D) {
for (unsigned I = 0; I < NumElems; ++I, ElemOffset += ElemSize) {
auto *ElemPtr = Ptr + ElemOffset;
auto *Desc = reinterpret_cast<InlineDescriptor *>(ElemPtr);
- auto *ElemLoc = reinterpret_cast<char *>(Desc + 1);
+ auto *ElemLoc = reinterpret_cast<std::byte *>(Desc + 1);
if (auto Fn = D->ElemDesc->DtorFn)
Fn(B, ElemLoc, D->ElemDesc);
}
}
-static void moveArrayDesc(Block *B, char *Src, char *Dst, Descriptor *D) {
+static void moveArrayDesc(Block *B, const std::byte *Src, std::byte *Dst,
+ const Descriptor *D) {
const unsigned NumElems = D->getNumElems();
const unsigned ElemSize =
D->ElemDesc->getAllocSize() + sizeof(InlineDescriptor);
unsigned ElemOffset = 0;
for (unsigned I = 0; I < NumElems; ++I, ElemOffset += ElemSize) {
- auto *SrcPtr = Src + ElemOffset;
+ const auto *SrcPtr = Src + ElemOffset;
auto *DstPtr = Dst + ElemOffset;
- auto *SrcDesc = reinterpret_cast<InlineDescriptor *>(SrcPtr);
- auto *SrcElemLoc = reinterpret_cast<char *>(SrcDesc + 1);
+ const auto *SrcDesc = reinterpret_cast<const InlineDescriptor *>(SrcPtr);
+ const auto *SrcElemLoc = reinterpret_cast<const std::byte *>(SrcDesc + 1);
auto *DstDesc = reinterpret_cast<InlineDescriptor *>(DstPtr);
- auto *DstElemLoc = reinterpret_cast<char *>(DstDesc + 1);
+ auto *DstElemLoc = reinterpret_cast<std::byte *>(DstDesc + 1);
*DstDesc = *SrcDesc;
if (auto Fn = D->ElemDesc->MoveFn)
@@ -114,20 +133,21 @@ static void moveArrayDesc(Block *B, char *Src, char *Dst, Descriptor *D) {
}
}
-static void ctorRecord(Block *B, char *Ptr, bool IsConst, bool IsMutable,
- bool IsActive, Descriptor *D) {
+static void ctorRecord(Block *B, std::byte *Ptr, bool IsConst, bool IsMutable,
+ bool IsActive, const Descriptor *D) {
const bool IsUnion = D->ElemRecord->isUnion();
auto CtorSub = [=](unsigned SubOff, Descriptor *F, bool IsBase) {
auto *Desc = reinterpret_cast<InlineDescriptor *>(Ptr + SubOff) - 1;
Desc->Offset = SubOff;
Desc->Desc = F;
- Desc->IsInitialized = (B->isStatic() || F->IsArray) && !IsBase;
+ Desc->IsInitialized = F->IsArray && !IsBase;
Desc->IsBase = IsBase;
Desc->IsActive = IsActive && !IsUnion;
Desc->IsConst = IsConst || F->IsConst;
- Desc->IsMutable = IsMutable || F->IsMutable;
+ Desc->IsFieldMutable = IsMutable || F->IsMutable;
if (auto Fn = F->CtorFn)
- Fn(B, Ptr + SubOff, Desc->IsConst, Desc->IsMutable, Desc->IsActive, F);
+ Fn(B, Ptr + SubOff, Desc->IsConst, Desc->IsFieldMutable, Desc->IsActive,
+ F);
};
for (const auto &B : D->ElemRecord->bases())
CtorSub(B.Offset, B.Desc, /*isBase=*/true);
@@ -137,7 +157,7 @@ static void ctorRecord(Block *B, char *Ptr, bool IsConst, bool IsMutable,
CtorSub(V.Offset, V.Desc, /*isBase=*/true);
}
-static void dtorRecord(Block *B, char *Ptr, Descriptor *D) {
+static void dtorRecord(Block *B, std::byte *Ptr, const Descriptor *D) {
auto DtorSub = [=](unsigned SubOff, Descriptor *F) {
if (auto Fn = F->DtorFn)
Fn(B, Ptr + SubOff, F);
@@ -150,22 +170,40 @@ static void dtorRecord(Block *B, char *Ptr, Descriptor *D) {
DtorSub(F.Offset, F.Desc);
}
-static void moveRecord(Block *B, char *Src, char *Dst, Descriptor *D) {
+static void moveRecord(Block *B, const std::byte *Src, std::byte *Dst,
+ const Descriptor *D) {
for (const auto &F : D->ElemRecord->fields()) {
auto FieldOff = F.Offset;
- auto FieldDesc = F.Desc;
+ auto *FieldDesc = F.Desc;
- *(reinterpret_cast<Descriptor **>(Dst + FieldOff) - 1) = FieldDesc;
if (auto Fn = FieldDesc->MoveFn)
Fn(B, Src + FieldOff, Dst + FieldOff, FieldDesc);
}
}
static BlockCtorFn getCtorPrim(PrimType Type) {
+ // Floating types are special. They are primitives, but need their
+ // constructor called.
+ if (Type == PT_Float)
+ return ctorTy<PrimConv<PT_Float>::T>;
+ if (Type == PT_IntAP)
+ return ctorTy<PrimConv<PT_IntAP>::T>;
+ if (Type == PT_IntAPS)
+ return ctorTy<PrimConv<PT_IntAPS>::T>;
+
COMPOSITE_TYPE_SWITCH(Type, return ctorTy<T>, return nullptr);
}
static BlockDtorFn getDtorPrim(PrimType Type) {
+ // Floating types are special. They are primitives, but need their
+ // destructor called, since they might allocate memory.
+ if (Type == PT_Float)
+ return dtorTy<PrimConv<PT_Float>::T>;
+ if (Type == PT_IntAP)
+ return dtorTy<PrimConv<PT_IntAP>::T>;
+ if (Type == PT_IntAPS)
+ return dtorTy<PrimConv<PT_IntAPS>::T>;
+
COMPOSITE_TYPE_SWITCH(Type, return dtorTy<T>, return nullptr);
}
@@ -174,71 +212,94 @@ static BlockMoveFn getMovePrim(PrimType Type) {
}
static BlockCtorFn getCtorArrayPrim(PrimType Type) {
- COMPOSITE_TYPE_SWITCH(Type, return ctorArrayTy<T>, return nullptr);
+ TYPE_SWITCH(Type, return ctorArrayTy<T>);
+ llvm_unreachable("unknown Expr");
}
static BlockDtorFn getDtorArrayPrim(PrimType Type) {
- COMPOSITE_TYPE_SWITCH(Type, return dtorArrayTy<T>, return nullptr);
+ TYPE_SWITCH(Type, return dtorArrayTy<T>);
+ llvm_unreachable("unknown Expr");
}
static BlockMoveFn getMoveArrayPrim(PrimType Type) {
- COMPOSITE_TYPE_SWITCH(Type, return moveArrayTy<T>, return nullptr);
+ TYPE_SWITCH(Type, return moveArrayTy<T>);
+ llvm_unreachable("unknown Expr");
}
-Descriptor::Descriptor(const DeclTy &D, PrimType Type, bool IsConst,
- bool IsTemporary, bool IsMutable)
- : Source(D), ElemSize(primSize(Type)), Size(ElemSize), AllocSize(Size),
- IsConst(IsConst), IsMutable(IsMutable), IsTemporary(IsTemporary),
- CtorFn(getCtorPrim(Type)), DtorFn(getDtorPrim(Type)),
- MoveFn(getMovePrim(Type)) {
+/// Primitives.
+Descriptor::Descriptor(const DeclTy &D, PrimType Type, MetadataSize MD,
+ bool IsConst, bool IsTemporary, bool IsMutable)
+ : Source(D), ElemSize(primSize(Type)), Size(ElemSize),
+ MDSize(MD.value_or(0)), AllocSize(align(Size + MDSize)), IsConst(IsConst),
+ IsMutable(IsMutable), IsTemporary(IsTemporary), CtorFn(getCtorPrim(Type)),
+ DtorFn(getDtorPrim(Type)), MoveFn(getMovePrim(Type)) {
+ assert(AllocSize >= Size);
assert(Source && "Missing source");
}
-Descriptor::Descriptor(const DeclTy &D, PrimType Type, size_t NumElems,
- bool IsConst, bool IsTemporary, bool IsMutable)
+/// Primitive arrays.
+Descriptor::Descriptor(const DeclTy &D, PrimType Type, MetadataSize MD,
+ size_t NumElems, bool IsConst, bool IsTemporary,
+ bool IsMutable)
: Source(D), ElemSize(primSize(Type)), Size(ElemSize * NumElems),
- AllocSize(align(Size) + sizeof(InitMap *)), IsConst(IsConst),
+ MDSize(MD.value_or(0)),
+ AllocSize(align(Size) + sizeof(InitMapPtr) + MDSize), IsConst(IsConst),
IsMutable(IsMutable), IsTemporary(IsTemporary), IsArray(true),
CtorFn(getCtorArrayPrim(Type)), DtorFn(getDtorArrayPrim(Type)),
MoveFn(getMoveArrayPrim(Type)) {
assert(Source && "Missing source");
}
+/// Primitive unknown-size arrays.
Descriptor::Descriptor(const DeclTy &D, PrimType Type, bool IsTemporary,
UnknownSize)
- : Source(D), ElemSize(primSize(Type)), Size(UnknownSizeMark),
- AllocSize(alignof(void *)), IsConst(true), IsMutable(false),
- IsTemporary(IsTemporary), IsArray(true), CtorFn(getCtorArrayPrim(Type)),
- DtorFn(getDtorArrayPrim(Type)), MoveFn(getMoveArrayPrim(Type)) {
+ : Source(D), ElemSize(primSize(Type)), Size(UnknownSizeMark), MDSize(0),
+ AllocSize(alignof(void *) + sizeof(InitMapPtr)), IsConst(true),
+ IsMutable(false), IsTemporary(IsTemporary), IsArray(true),
+ CtorFn(getCtorArrayPrim(Type)), DtorFn(getDtorArrayPrim(Type)),
+ MoveFn(getMoveArrayPrim(Type)) {
assert(Source && "Missing source");
}
-Descriptor::Descriptor(const DeclTy &D, Descriptor *Elem, unsigned NumElems,
- bool IsConst, bool IsTemporary, bool IsMutable)
+/// Arrays of composite elements.
+Descriptor::Descriptor(const DeclTy &D, const Descriptor *Elem, MetadataSize MD,
+ unsigned NumElems, bool IsConst, bool IsTemporary,
+ bool IsMutable)
: Source(D), ElemSize(Elem->getAllocSize() + sizeof(InlineDescriptor)),
- Size(ElemSize * NumElems),
- AllocSize(std::max<size_t>(alignof(void *), Size)), ElemDesc(Elem),
- IsConst(IsConst), IsMutable(IsMutable), IsTemporary(IsTemporary),
- IsArray(true), CtorFn(ctorArrayDesc), DtorFn(dtorArrayDesc),
- MoveFn(moveArrayDesc) {
+ Size(ElemSize * NumElems), MDSize(MD.value_or(0)),
+ AllocSize(std::max<size_t>(alignof(void *), Size) + MDSize),
+ ElemDesc(Elem), IsConst(IsConst), IsMutable(IsMutable),
+ IsTemporary(IsTemporary), IsArray(true), CtorFn(ctorArrayDesc),
+ DtorFn(dtorArrayDesc), MoveFn(moveArrayDesc) {
assert(Source && "Missing source");
}
-Descriptor::Descriptor(const DeclTy &D, Descriptor *Elem, bool IsTemporary,
- UnknownSize)
+/// Unknown-size arrays of composite elements.
+Descriptor::Descriptor(const DeclTy &D, const Descriptor *Elem,
+ bool IsTemporary, UnknownSize)
: Source(D), ElemSize(Elem->getAllocSize() + sizeof(InlineDescriptor)),
- Size(UnknownSizeMark), AllocSize(alignof(void *)), ElemDesc(Elem),
+ Size(UnknownSizeMark), MDSize(0),
+ AllocSize(alignof(void *) + sizeof(InitMapPtr)), ElemDesc(Elem),
IsConst(true), IsMutable(false), IsTemporary(IsTemporary), IsArray(true),
CtorFn(ctorArrayDesc), DtorFn(dtorArrayDesc), MoveFn(moveArrayDesc) {
assert(Source && "Missing source");
}
-Descriptor::Descriptor(const DeclTy &D, Record *R, bool IsConst,
- bool IsTemporary, bool IsMutable)
+/// Composite records.
+Descriptor::Descriptor(const DeclTy &D, const Record *R, MetadataSize MD,
+ bool IsConst, bool IsTemporary, bool IsMutable)
: Source(D), ElemSize(std::max<size_t>(alignof(void *), R->getFullSize())),
- Size(ElemSize), AllocSize(Size), ElemRecord(R), IsConst(IsConst),
- IsMutable(IsMutable), IsTemporary(IsTemporary), CtorFn(ctorRecord),
- DtorFn(dtorRecord), MoveFn(moveRecord) {
+ Size(ElemSize), MDSize(MD.value_or(0)), AllocSize(Size + MDSize),
+ ElemRecord(R), IsConst(IsConst), IsMutable(IsMutable),
+ IsTemporary(IsTemporary), CtorFn(ctorRecord), DtorFn(dtorRecord),
+ MoveFn(moveRecord) {
+ assert(Source && "Missing source");
+}
+
+Descriptor::Descriptor(const DeclTy &D, MetadataSize MD)
+ : Source(D), ElemSize(1), Size(ElemSize), MDSize(MD.value_or(0)),
+ AllocSize(Size + MDSize), ElemRecord(nullptr), IsConst(true),
+ IsMutable(false), IsTemporary(false), IsDummy(true) {
assert(Source && "Missing source");
}
@@ -247,9 +308,17 @@ QualType Descriptor::getType() const {
return E->getType();
if (auto *D = asValueDecl())
return D->getType();
+ if (auto *T = dyn_cast<TypeDecl>(asDecl()))
+ return QualType(T->getTypeForDecl(), 0);
llvm_unreachable("Invalid descriptor type");
}
+QualType Descriptor::getElemQualType() const {
+ assert(isArray());
+ const auto *AT = cast<ArrayType>(getType());
+ return AT->getElementType();
+}
+
SourceLocation Descriptor::getLocation() const {
if (auto *D = Source.dyn_cast<const Decl *>())
return D->getLocation();
@@ -258,20 +327,14 @@ SourceLocation Descriptor::getLocation() const {
llvm_unreachable("Invalid descriptor type");
}
-InitMap::InitMap(unsigned N) : UninitFields(N) {
- for (unsigned I = 0; I < N / PER_FIELD; ++I) {
- data()[I] = 0;
- }
-}
-
-InitMap::T *InitMap::data() {
- auto *Start = reinterpret_cast<char *>(this) + align(sizeof(InitMap));
- return reinterpret_cast<T *>(Start);
+InitMap::InitMap(unsigned N)
+ : UninitFields(N), Data(std::make_unique<T[]>(numFields(N))) {
+ std::fill_n(data(), numFields(N), 0);
}
-bool InitMap::initialize(unsigned I) {
+bool InitMap::initializeElement(unsigned I) {
unsigned Bucket = I / PER_FIELD;
- unsigned Mask = 1ull << static_cast<uint64_t>(I % PER_FIELD);
+ T Mask = T(1) << (I % PER_FIELD);
if (!(data()[Bucket] & Mask)) {
data()[Bucket] |= Mask;
UninitFields -= 1;
@@ -279,14 +342,7 @@ bool InitMap::initialize(unsigned I) {
return UninitFields == 0;
}
-bool InitMap::isInitialized(unsigned I) {
+bool InitMap::isElementInitialized(unsigned I) const {
unsigned Bucket = I / PER_FIELD;
- unsigned Mask = 1ull << static_cast<uint64_t>(I % PER_FIELD);
- return data()[Bucket] & Mask;
-}
-
-InitMap *InitMap::allocate(unsigned N) {
- const size_t NumFields = ((N + PER_FIELD - 1) / PER_FIELD);
- const size_t Size = align(sizeof(InitMap)) + NumFields * PER_FIELD;
- return new (malloc(Size)) InitMap(N);
+ return data()[Bucket] & (T(1) << (I % PER_FIELD));
}
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Descriptor.h b/contrib/llvm-project/clang/lib/AST/Interp/Descriptor.h
index b260b7600974..580c200f9095 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Descriptor.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Descriptor.h
@@ -20,44 +20,74 @@ namespace clang {
namespace interp {
class Block;
class Record;
+struct InitMap;
struct Descriptor;
enum PrimType : unsigned;
using DeclTy = llvm::PointerUnion<const Decl *, const Expr *>;
+using InitMapPtr = std::optional<std::pair<bool, std::shared_ptr<InitMap>>>;
/// Invoked whenever a block is created. The constructor method fills in the
/// inline descriptors of all fields and array elements. It also initializes
/// all the fields which contain non-trivial types.
-using BlockCtorFn = void (*)(Block *Storage, char *FieldPtr, bool IsConst,
+using BlockCtorFn = void (*)(Block *Storage, std::byte *FieldPtr, bool IsConst,
bool IsMutable, bool IsActive,
- Descriptor *FieldDesc);
+ const Descriptor *FieldDesc);
/// Invoked when a block is destroyed. Invokes the destructors of all
/// non-trivial nested fields of arrays and records.
-using BlockDtorFn = void (*)(Block *Storage, char *FieldPtr,
- Descriptor *FieldDesc);
+using BlockDtorFn = void (*)(Block *Storage, std::byte *FieldPtr,
+ const Descriptor *FieldDesc);
/// Invoked when a block with pointers referencing it goes out of scope. Such
/// blocks are persisted: the move function copies all inline descriptors and
/// non-trivial fields, as existing pointers might need to reference those
/// descriptors. Data is not copied since it cannot be legally read.
-using BlockMoveFn = void (*)(Block *Storage, char *SrcFieldPtr,
- char *DstFieldPtr, Descriptor *FieldDesc);
+using BlockMoveFn = void (*)(Block *Storage, const std::byte *SrcFieldPtr,
+ std::byte *DstFieldPtr,
+ const Descriptor *FieldDesc);
-/// Object size as used by the interpreter.
-using InterpSize = unsigned;
+/// Inline descriptor embedded in structures and arrays.
+///
+/// Such descriptors precede all composite array elements and structure fields.
+/// If the base of a pointer is not zero, the base points to the end of this
+/// structure. The offset field is used to traverse the pointer chain up
+/// to the root structure which allocated the object.
+struct InlineDescriptor {
+ /// Offset inside the structure/array.
+ unsigned Offset;
+
+ /// Flag indicating if the storage is constant or not.
+ /// Relevant for primitive fields.
+ unsigned IsConst : 1;
+ /// For primitive fields, it indicates if the field was initialized.
+ /// Primitive fields in static storage are always initialized.
+ /// Arrays are always initialized, even though their elements might not be.
+ /// Base classes are initialized after the constructor is invoked.
+ unsigned IsInitialized : 1;
+ /// Flag indicating if the field is an embedded base class.
+ unsigned IsBase : 1;
+ /// Flag indicating if the field is the active member of a union.
+ unsigned IsActive : 1;
+ /// Flag indicating if the field is mutable (if in a record).
+ unsigned IsFieldMutable : 1;
+
+ const Descriptor *Desc;
+};
/// Describes a memory block created by an allocation site.
-struct Descriptor {
+struct Descriptor final {
private:
/// Original declaration, used to emit the error message.
const DeclTy Source;
/// Size of an element, in host bytes.
- const InterpSize ElemSize;
+ const unsigned ElemSize;
/// Size of the storage, in host bytes.
- const InterpSize Size;
+ const unsigned Size;
+ /// Size of the metadata.
+ const unsigned MDSize;
/// Size of the allocation (storage + metadata), in host bytes.
- const InterpSize AllocSize;
+ const unsigned AllocSize;
/// Value to denote arrays of unknown size.
static constexpr unsigned UnknownSizeMark = (unsigned)-1;
@@ -66,10 +96,13 @@ public:
/// Token to denote structures of unknown size.
struct UnknownSize {};
+ using MetadataSize = std::optional<unsigned>;
+ static constexpr MetadataSize InlineDescMD = sizeof(InlineDescriptor);
+
/// Pointer to the record, if block contains records.
- Record *const ElemRecord = nullptr;
+ const Record *const ElemRecord = nullptr;
/// Descriptor of the array element.
- Descriptor *const ElemDesc = nullptr;
+ const Descriptor *const ElemDesc = nullptr;
/// Flag indicating if the block is mutable.
const bool IsConst = false;
/// Flag indicating if a field is mutable.
@@ -78,6 +111,8 @@ public:
const bool IsTemporary = false;
/// Flag indicating if the block is an array.
const bool IsArray = false;
+ /// Flag indicating if this is a dummy descriptor.
+ const bool IsDummy = false;
/// Storage management methods.
const BlockCtorFn CtorFn = nullptr;
@@ -85,43 +120,47 @@ public:
const BlockMoveFn MoveFn = nullptr;
/// Allocates a descriptor for a primitive.
- Descriptor(const DeclTy &D, PrimType Type, bool IsConst, bool IsTemporary,
- bool IsMutable);
+ Descriptor(const DeclTy &D, PrimType Type, MetadataSize MD, bool IsConst,
+ bool IsTemporary, bool IsMutable);
/// Allocates a descriptor for an array of primitives.
- Descriptor(const DeclTy &D, PrimType Type, size_t NumElems, bool IsConst,
- bool IsTemporary, bool IsMutable);
+ Descriptor(const DeclTy &D, PrimType Type, MetadataSize MD, size_t NumElems,
+ bool IsConst, bool IsTemporary, bool IsMutable);
/// Allocates a descriptor for an array of primitives of unknown size.
Descriptor(const DeclTy &D, PrimType Type, bool IsTemporary, UnknownSize);
/// Allocates a descriptor for an array of composites.
- Descriptor(const DeclTy &D, Descriptor *Elem, unsigned NumElems, bool IsConst,
- bool IsTemporary, bool IsMutable);
+ Descriptor(const DeclTy &D, const Descriptor *Elem, MetadataSize MD,
+ unsigned NumElems, bool IsConst, bool IsTemporary, bool IsMutable);
/// Allocates a descriptor for an array of composites of unknown size.
- Descriptor(const DeclTy &D, Descriptor *Elem, bool IsTemporary, UnknownSize);
+ Descriptor(const DeclTy &D, const Descriptor *Elem, bool IsTemporary,
+ UnknownSize);
/// Allocates a descriptor for a record.
- Descriptor(const DeclTy &D, Record *R, bool IsConst, bool IsTemporary,
- bool IsMutable);
+ Descriptor(const DeclTy &D, const Record *R, MetadataSize MD, bool IsConst,
+ bool IsTemporary, bool IsMutable);
+
+ Descriptor(const DeclTy &D, MetadataSize MD);
QualType getType() const;
+ QualType getElemQualType() const;
SourceLocation getLocation() const;
const Decl *asDecl() const { return Source.dyn_cast<const Decl *>(); }
const Expr *asExpr() const { return Source.dyn_cast<const Expr *>(); }
const ValueDecl *asValueDecl() const {
- return dyn_cast_or_null<ValueDecl>(asDecl());
+ return dyn_cast_if_present<ValueDecl>(asDecl());
}
const FieldDecl *asFieldDecl() const {
- return dyn_cast_or_null<FieldDecl>(asDecl());
+ return dyn_cast_if_present<FieldDecl>(asDecl());
}
const RecordDecl *asRecordDecl() const {
- return dyn_cast_or_null<RecordDecl>(asDecl());
+ return dyn_cast_if_present<RecordDecl>(asDecl());
}
/// Returns the size of the object without metadata.
@@ -134,6 +173,8 @@ public:
unsigned getAllocSize() const { return AllocSize; }
/// returns the size of an element when the structure is viewed as an array.
unsigned getElemSize() const { return ElemSize; }
+ /// Returns the size of the metadata.
+ unsigned getMetadataSize() const { return MDSize; }
/// Returns the number of elements stored in the block.
unsigned getNumElems() const {
@@ -142,6 +183,8 @@ public:
/// Checks if the descriptor is of an array of primitives.
bool isPrimitiveArray() const { return IsArray && !ElemDesc; }
+ /// Checks if the descriptor is of an array of composites.
+ bool isCompositeArray() const { return IsArray && ElemDesc; }
/// Checks if the descriptor is of an array of zero size.
bool isZeroSizeArray() const { return Size == 0; }
/// Checks if the descriptor is of an array of unknown size.
@@ -152,66 +195,43 @@ public:
/// Checks if the descriptor is of an array.
bool isArray() const { return IsArray; }
-};
-
-/// Inline descriptor embedded in structures and arrays.
-///
-/// Such descriptors precede all composite array elements and structure fields.
-/// If the base of a pointer is not zero, the base points to the end of this
-/// structure. The offset field is used to traverse the pointer chain up
-/// to the root structure which allocated the object.
-struct InlineDescriptor {
- /// Offset inside the structure/array.
- unsigned Offset;
-
- /// Flag indicating if the storage is constant or not.
- /// Relevant for primitive fields.
- unsigned IsConst : 1;
- /// For primitive fields, it indicates if the field was initialized.
- /// Primitive fields in static storage are always initialized.
- /// Arrays are always initialized, even though their elements might not be.
- /// Base classes are initialized after the constructor is invoked.
- unsigned IsInitialized : 1;
- /// Flag indicating if the field is an embedded base class.
- unsigned IsBase : 1;
- /// Flag indicating if the field is the active member of a union.
- unsigned IsActive : 1;
- /// Flag indicating if the field is mutable (if in a record).
- unsigned IsMutable : 1;
-
- Descriptor *Desc;
+ /// Checks if the descriptor is of a record.
+ bool isRecord() const { return !IsArray && ElemRecord; }
+ /// Checks if this is a dummy descriptor.
+ bool isDummy() const { return IsDummy; }
};
/// Bitfield tracking the initialisation status of elements of primitive arrays.
-/// A pointer to this is embedded at the end of all primitive arrays.
-/// If the map was not yet created and nothing was initialied, the pointer to
-/// this structure is 0. If the object was fully initialized, the pointer is -1.
-struct InitMap {
+struct InitMap final {
private:
/// Type packing bits.
using T = uint64_t;
/// Bits stored in a single field.
static constexpr uint64_t PER_FIELD = sizeof(T) * CHAR_BIT;
+public:
/// Initializes the map with no fields set.
- InitMap(unsigned N);
+ explicit InitMap(unsigned N);
+
+private:
+ friend class Pointer;
/// Returns a pointer to storage.
- T *data();
+ T *data() { return Data.get(); }
+ const T *data() const { return Data.get(); }
-public:
/// Initializes an element. Returns true when object if fully initialized.
- bool initialize(unsigned I);
+ bool initializeElement(unsigned I);
/// Checks if an element was initialized.
- bool isInitialized(unsigned I);
-
- /// Allocates a map holding N elements.
- static InitMap *allocate(unsigned N);
+ bool isElementInitialized(unsigned I) const;
-private:
- /// Number of fields initialized.
+ static constexpr size_t numFields(unsigned N) {
+ return (N + PER_FIELD - 1) / PER_FIELD;
+ }
+ /// Number of fields not initialized.
unsigned UninitFields;
+ std::unique_ptr<T[]> Data;
};
} // namespace interp
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Disasm.cpp b/contrib/llvm-project/clang/lib/AST/Interp/Disasm.cpp
index c1c18f832d4f..d276df8f2926 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Disasm.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Disasm.cpp
@@ -10,6 +10,7 @@
//
//===----------------------------------------------------------------------===//
+#include "Floating.h"
#include "Function.h"
#include "Opcode.h"
#include "PrimType.h"
@@ -21,29 +22,35 @@
using namespace clang;
using namespace clang::interp;
-LLVM_DUMP_METHOD void Function::dump() const { dump(llvm::errs()); }
-
-LLVM_DUMP_METHOD void Function::dump(llvm::raw_ostream &OS) const {
- if (F) {
- if (auto *Cons = dyn_cast<CXXConstructorDecl>(F)) {
- DeclarationName Name = Cons->getParent()->getDeclName();
- OS << Name << "::" << Name << ":\n";
- } else {
- OS << F->getDeclName() << ":\n";
- }
+template <typename T> inline T ReadArg(Program &P, CodePtr &OpPC) {
+ if constexpr (std::is_pointer_v<T>) {
+ uint32_t ID = OpPC.read<uint32_t>();
+ return reinterpret_cast<T>(P.getNativePointer(ID));
} else {
- OS << "<<expr>>\n";
+ return OpPC.read<T>();
}
+}
+template <> inline Floating ReadArg<Floating>(Program &P, CodePtr &OpPC) {
+ Floating F = Floating::deserialize(*OpPC);
+ OpPC += align(F.bytesToSerialize());
+ return F;
+}
+
+LLVM_DUMP_METHOD void Function::dump() const { dump(llvm::errs()); }
+
+LLVM_DUMP_METHOD void Function::dump(llvm::raw_ostream &OS) const {
+ OS << getName() << " " << (const void *)this << "\n";
OS << "frame size: " << getFrameSize() << "\n";
OS << "arg size: " << getArgSize() << "\n";
OS << "rvo: " << hasRVO() << "\n";
+ OS << "this arg: " << hasThisPointer() << "\n";
auto PrintName = [&OS](const char *Name) {
OS << Name;
- for (long I = 0, N = strlen(Name); I < 30 - N; ++I) {
- OS << ' ';
- }
+ long N = 30 - strlen(Name);
+ if (N > 0)
+ OS.indent(N);
};
for (CodePtr Start = getCodeBegin(), PC = Start; PC != getCodeEnd();) {
@@ -61,6 +68,10 @@ LLVM_DUMP_METHOD void Function::dump(llvm::raw_ostream &OS) const {
LLVM_DUMP_METHOD void Program::dump() const { dump(llvm::errs()); }
LLVM_DUMP_METHOD void Program::dump(llvm::raw_ostream &OS) const {
+ OS << ":: Program\n";
+ OS << "Global Variables: " << Globals.size() << "\n";
+ OS << "Functions: " << Funcs.size() << "\n";
+ OS << "\n";
for (auto &Func : Funcs) {
Func.second->dump();
}
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/EvalEmitter.cpp b/contrib/llvm-project/clang/lib/AST/Interp/EvalEmitter.cpp
index 22e8695b9211..a60f893de8bd 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/EvalEmitter.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/EvalEmitter.cpp
@@ -7,39 +7,48 @@
//===----------------------------------------------------------------------===//
#include "EvalEmitter.h"
+#include "ByteCodeGenError.h"
#include "Context.h"
+#include "IntegralAP.h"
#include "Interp.h"
#include "Opcode.h"
-#include "Program.h"
#include "clang/AST/DeclCXX.h"
using namespace clang;
using namespace clang::interp;
-using APSInt = llvm::APSInt;
-template <typename T> using Expected = llvm::Expected<T>;
-
EvalEmitter::EvalEmitter(Context &Ctx, Program &P, State &Parent,
InterpStack &Stk, APValue &Result)
- : Ctx(Ctx), P(P), S(Parent, P, Stk, Ctx, this), Result(Result) {
+ : Ctx(Ctx), P(P), S(Parent, P, Stk, Ctx, this), EvalResult(&Ctx) {
// Create a dummy frame for the interpreter which does not have locals.
- S.Current = new InterpFrame(S, nullptr, nullptr, CodePtr(), Pointer());
+ S.Current =
+ new InterpFrame(S, /*Func=*/nullptr, /*Caller=*/nullptr, CodePtr());
}
-llvm::Expected<bool> EvalEmitter::interpretExpr(const Expr *E) {
- if (this->visitExpr(E))
- return true;
- if (BailLocation)
- return llvm::make_error<ByteCodeGenError>(*BailLocation);
- return false;
+EvalEmitter::~EvalEmitter() {
+ for (auto &[K, V] : Locals) {
+ Block *B = reinterpret_cast<Block *>(V.get());
+ if (B->isInitialized())
+ B->invokeDtor();
+ }
}
-llvm::Expected<bool> EvalEmitter::interpretDecl(const VarDecl *VD) {
- if (this->visitDecl(VD))
- return true;
- if (BailLocation)
- return llvm::make_error<ByteCodeGenError>(*BailLocation);
- return false;
+EvaluationResult EvalEmitter::interpretExpr(const Expr *E) {
+ EvalResult.setSource(E);
+
+ if (!this->visitExpr(E))
+ EvalResult.setInvalid();
+
+ return std::move(this->EvalResult);
+}
+
+EvaluationResult EvalEmitter::interpretDecl(const VarDecl *VD) {
+ EvalResult.setSource(VD);
+
+ if (!this->visitDecl(VD))
+ EvalResult.setInvalid();
+
+ return std::move(this->EvalResult);
}
void EvalEmitter::emitLabel(LabelTy Label) {
@@ -54,18 +63,22 @@ Scope::Local EvalEmitter::createLocal(Descriptor *D) {
auto *B = new (Memory.get()) Block(D, /*isStatic=*/false);
B->invokeCtor();
+ // Initialize local variable inline descriptor.
+ InlineDescriptor &Desc = *reinterpret_cast<InlineDescriptor *>(B->rawData());
+ Desc.Desc = D;
+ Desc.Offset = sizeof(InlineDescriptor);
+ Desc.IsActive = true;
+ Desc.IsBase = false;
+ Desc.IsFieldMutable = false;
+ Desc.IsConst = false;
+ Desc.IsInitialized = false;
+
// Register the local.
unsigned Off = Locals.size();
Locals.insert({Off, std::move(Memory)});
return {Off, D};
}
-bool EvalEmitter::bail(const SourceLocation &Loc) {
- if (!BailLocation)
- BailLocation = Loc;
- return false;
-}
-
bool EvalEmitter::jumpTrue(const LabelTy &Label) {
if (isActive()) {
if (S.Stk.pop<bool>())
@@ -99,107 +112,45 @@ template <PrimType OpType> bool EvalEmitter::emitRet(const SourceInfo &Info) {
if (!isActive())
return true;
using T = typename PrimConv<OpType>::T;
- return ReturnValue<T>(S.Stk.pop<T>(), Result);
+ EvalResult.setValue(S.Stk.pop<T>().toAPValue());
+ return true;
}
-bool EvalEmitter::emitRetVoid(const SourceInfo &Info) { return true; }
+template <> bool EvalEmitter::emitRet<PT_Ptr>(const SourceInfo &Info) {
+ if (!isActive())
+ return true;
+ EvalResult.setPointer(S.Stk.pop<Pointer>());
+ return true;
+}
+template <> bool EvalEmitter::emitRet<PT_FnPtr>(const SourceInfo &Info) {
+ if (!isActive())
+ return true;
+ EvalResult.setFunctionPointer(S.Stk.pop<FunctionPointer>());
+ return true;
+}
+
+bool EvalEmitter::emitRetVoid(const SourceInfo &Info) {
+ EvalResult.setValid();
+ return true;
+}
bool EvalEmitter::emitRetValue(const SourceInfo &Info) {
- // Method to recursively traverse composites.
- std::function<bool(QualType, const Pointer &, APValue &)> Composite;
- Composite = [this, &Composite](QualType Ty, const Pointer &Ptr, APValue &R) {
- if (auto *AT = Ty->getAs<AtomicType>())
- Ty = AT->getValueType();
-
- if (auto *RT = Ty->getAs<RecordType>()) {
- auto *Record = Ptr.getRecord();
- assert(Record && "Missing record descriptor");
-
- bool Ok = true;
- if (RT->getDecl()->isUnion()) {
- const FieldDecl *ActiveField = nullptr;
- APValue Value;
- for (auto &F : Record->fields()) {
- const Pointer &FP = Ptr.atField(F.Offset);
- QualType FieldTy = F.Decl->getType();
- if (FP.isActive()) {
- if (llvm::Optional<PrimType> T = Ctx.classify(FieldTy)) {
- TYPE_SWITCH(*T, Ok &= ReturnValue<T>(FP.deref<T>(), Value));
- } else {
- Ok &= Composite(FieldTy, FP, Value);
- }
- break;
- }
- }
- R = APValue(ActiveField, Value);
- } else {
- unsigned NF = Record->getNumFields();
- unsigned NB = Record->getNumBases();
- unsigned NV = Ptr.isBaseClass() ? 0 : Record->getNumVirtualBases();
-
- R = APValue(APValue::UninitStruct(), NB, NF);
-
- for (unsigned I = 0; I < NF; ++I) {
- const Record::Field *FD = Record->getField(I);
- QualType FieldTy = FD->Decl->getType();
- const Pointer &FP = Ptr.atField(FD->Offset);
- APValue &Value = R.getStructField(I);
-
- if (llvm::Optional<PrimType> T = Ctx.classify(FieldTy)) {
- TYPE_SWITCH(*T, Ok &= ReturnValue<T>(FP.deref<T>(), Value));
- } else {
- Ok &= Composite(FieldTy, FP, Value);
- }
- }
-
- for (unsigned I = 0; I < NB; ++I) {
- const Record::Base *BD = Record->getBase(I);
- QualType BaseTy = Ctx.getASTContext().getRecordType(BD->Decl);
- const Pointer &BP = Ptr.atField(BD->Offset);
- Ok &= Composite(BaseTy, BP, R.getStructBase(I));
- }
-
- for (unsigned I = 0; I < NV; ++I) {
- const Record::Base *VD = Record->getVirtualBase(I);
- QualType VirtBaseTy = Ctx.getASTContext().getRecordType(VD->Decl);
- const Pointer &VP = Ptr.atField(VD->Offset);
- Ok &= Composite(VirtBaseTy, VP, R.getStructBase(NB + I));
- }
- }
- return Ok;
- }
- if (auto *AT = Ty->getAsArrayTypeUnsafe()) {
- const size_t NumElems = Ptr.getNumElems();
- QualType ElemTy = AT->getElementType();
- R = APValue(APValue::UninitArray{}, NumElems, NumElems);
-
- bool Ok = true;
- for (unsigned I = 0; I < NumElems; ++I) {
- APValue &Slot = R.getArrayInitializedElt(I);
- const Pointer &EP = Ptr.atIndex(I);
- if (llvm::Optional<PrimType> T = Ctx.classify(ElemTy)) {
- TYPE_SWITCH(*T, Ok &= ReturnValue<T>(EP.deref<T>(), Slot));
- } else {
- Ok &= Composite(ElemTy, EP.narrow(), Slot);
- }
- }
- return Ok;
- }
- llvm_unreachable("invalid value to return");
- };
-
- // Return the composite type.
const auto &Ptr = S.Stk.pop<Pointer>();
- return Composite(Ptr.getType(), Ptr, Result);
+ if (std::optional<APValue> APV = Ptr.toRValue(S.getCtx())) {
+ EvalResult.setValue(*APV);
+ return true;
+ }
+
+ EvalResult.setInvalid();
+ return false;
}
bool EvalEmitter::emitGetPtrLocal(uint32_t I, const SourceInfo &Info) {
if (!isActive())
return true;
- auto It = Locals.find(I);
- assert(It != Locals.end() && "Missing local variable");
- S.Stk.push<Pointer>(reinterpret_cast<Block *>(It->second.get()));
+ Block *B = getLocal(I);
+ S.Stk.push<Pointer>(B, sizeof(InlineDescriptor));
return true;
}
@@ -210,10 +161,8 @@ bool EvalEmitter::emitGetLocal(uint32_t I, const SourceInfo &Info) {
using T = typename PrimConv<OpType>::T;
- auto It = Locals.find(I);
- assert(It != Locals.end() && "Missing local variable");
- auto *B = reinterpret_cast<Block *>(It->second.get());
- S.Stk.push<T>(*reinterpret_cast<T *>(B + 1));
+ Block *B = getLocal(I);
+ S.Stk.push<T>(*reinterpret_cast<T *>(B->data()));
return true;
}
@@ -224,10 +173,11 @@ bool EvalEmitter::emitSetLocal(uint32_t I, const SourceInfo &Info) {
using T = typename PrimConv<OpType>::T;
- auto It = Locals.find(I);
- assert(It != Locals.end() && "Missing local variable");
- auto *B = reinterpret_cast<Block *>(It->second.get());
- *reinterpret_cast<T *>(B + 1) = S.Stk.pop<T>();
+ Block *B = getLocal(I);
+ *reinterpret_cast<T *>(B->data()) = S.Stk.pop<T>();
+ InlineDescriptor &Desc = *reinterpret_cast<InlineDescriptor *>(B->rawData());
+ Desc.IsInitialized = true;
+
return true;
}
@@ -236,9 +186,8 @@ bool EvalEmitter::emitDestroy(uint32_t I, const SourceInfo &Info) {
return true;
for (auto &Local : Descriptors[I]) {
- auto It = Locals.find(Local.Offset);
- assert(It != Locals.end() && "Missing local variable");
- S.deallocate(reinterpret_cast<Block *>(It->second.get()));
+ Block *B = getLocal(Local.Offset);
+ S.deallocate(B);
}
return true;
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/EvalEmitter.h b/contrib/llvm-project/clang/lib/AST/Interp/EvalEmitter.h
index eec2ff8ee753..deb2ebc4e61f 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/EvalEmitter.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/EvalEmitter.h
@@ -13,23 +13,18 @@
#ifndef LLVM_CLANG_AST_INTERP_EVALEMITTER_H
#define LLVM_CLANG_AST_INTERP_EVALEMITTER_H
-#include "ByteCodeGenError.h"
-#include "Context.h"
-#include "InterpStack.h"
+#include "EvaluationResult.h"
#include "InterpState.h"
#include "PrimType.h"
-#include "Program.h"
#include "Source.h"
#include "llvm/Support/Error.h"
namespace clang {
-class FunctionDecl;
namespace interp {
class Context;
class Function;
-class InterpState;
+class InterpStack;
class Program;
-class SourceInfo;
enum Opcode : uint32_t;
/// An emitter which evaluates opcodes as they are emitted.
@@ -39,14 +34,16 @@ public:
using AddrTy = uintptr_t;
using Local = Scope::Local;
- llvm::Expected<bool> interpretExpr(const Expr *E);
- llvm::Expected<bool> interpretDecl(const VarDecl *VD);
+ EvaluationResult interpretExpr(const Expr *E);
+ EvaluationResult interpretDecl(const VarDecl *VD);
+
+ InterpState &getState() { return S; }
protected:
EvalEmitter(Context &Ctx, Program &P, State &Parent, InterpStack &Stk,
APValue &Result);
- virtual ~EvalEmitter() {}
+ virtual ~EvalEmitter();
/// Define a label.
void emitLabel(LabelTy Label);
@@ -57,10 +54,6 @@ protected:
virtual bool visitExpr(const Expr *E) = 0;
virtual bool visitDecl(const VarDecl *VD) = 0;
- bool bail(const Stmt *S) { return bail(S->getBeginLoc()); }
- bool bail(const Decl *D) { return bail(D->getBeginLoc()); }
- bool bail(const SourceLocation &Loc);
-
/// Emits jumps.
bool jumpTrue(const LabelTy &Label);
bool jumpFalse(const LabelTy &Label);
@@ -71,12 +64,16 @@ protected:
Local createLocal(Descriptor *D);
/// Returns the source location of the current opcode.
- SourceInfo getSource(Function *F, CodePtr PC) const override {
- return F ? F->getSource(PC) : CurrentSource;
+ SourceInfo getSource(const Function *F, CodePtr PC) const override {
+ return (F && F->hasBody()) ? F->getSource(PC) : CurrentSource;
}
/// Parameter indices.
- llvm::DenseMap<const ParmVarDecl *, unsigned> Params;
+ llvm::DenseMap<const ParmVarDecl *, ParamOffset> Params;
+ /// Lambda captures.
+ llvm::DenseMap<const ValueDecl *, ParamOffset> LambdaCaptures;
+ /// Offset of the This parameter in a lambda record.
+ unsigned LambdaThisCapture = 0;
/// Local descriptors.
llvm::SmallVector<SmallVector<Local, 8>, 2> Descriptors;
@@ -88,16 +85,20 @@ private:
/// Callee evaluation state.
InterpState S;
/// Location to write the result to.
- APValue &Result;
+ EvaluationResult EvalResult;
/// Temporaries which require storage.
llvm::DenseMap<unsigned, std::unique_ptr<char[]>> Locals;
+ Block *getLocal(unsigned Index) const {
+ auto It = Locals.find(Index);
+ assert(It != Locals.end() && "Missing local variable");
+ return reinterpret_cast<Block *>(It->second.get());
+ }
+
// The emitter always tracks the current instruction and sets OpPC to a token
// value which is mapped to the location of the opcode being evaluated.
CodePtr OpPC;
- /// Location of a failure.
- llvm::Optional<SourceLocation> BailLocation;
/// Location of the current instruction.
SourceInfo CurrentSource;
@@ -110,12 +111,7 @@ private:
/// Since expressions can only jump forward, predicated execution is
/// used to deal with if-else statements.
- bool isActive() { return CurrentLabel == ActiveLabel; }
-
- /// Helper to invoke a method.
- bool ExecuteCall(Function *F, Pointer &&This, const SourceInfo &Info);
- /// Helper to emit a diagnostic on a missing method.
- bool ExecuteNoCall(const FunctionDecl *F, const SourceInfo &Info);
+ bool isActive() const { return CurrentLabel == ActiveLabel; }
protected:
#define GET_EVAL_PROTO
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/EvaluationResult.cpp b/contrib/llvm-project/clang/lib/AST/Interp/EvaluationResult.cpp
new file mode 100644
index 000000000000..a14dc87f1dfd
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/AST/Interp/EvaluationResult.cpp
@@ -0,0 +1,196 @@
+//===----- EvaluationResult.cpp - Result class for the VM ------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "EvaluationResult.h"
+#include "Context.h"
+#include "InterpState.h"
+#include "Record.h"
+#include "clang/AST/ExprCXX.h"
+
+namespace clang {
+namespace interp {
+
+APValue EvaluationResult::toAPValue() const {
+ assert(!empty());
+ switch (Kind) {
+ case LValue:
+ // Either a pointer or a function pointer.
+ if (const auto *P = std::get_if<Pointer>(&Value))
+ return P->toAPValue();
+ else if (const auto *FP = std::get_if<FunctionPointer>(&Value))
+ return FP->toAPValue();
+ else
+ llvm_unreachable("Unhandled LValue type");
+ break;
+ case RValue:
+ return std::get<APValue>(Value);
+ case Valid:
+ return APValue();
+ default:
+ llvm_unreachable("Unhandled result kind?");
+ }
+}
+
+std::optional<APValue> EvaluationResult::toRValue() const {
+ if (Kind == RValue)
+ return toAPValue();
+
+ assert(Kind == LValue);
+
+ // We have a pointer and want an RValue.
+ if (const auto *P = std::get_if<Pointer>(&Value))
+ return P->toRValue(*Ctx);
+ else if (const auto *FP = std::get_if<FunctionPointer>(&Value)) // Nope
+ return FP->toAPValue();
+ llvm_unreachable("Unhandled lvalue kind");
+}
+
+static void DiagnoseUninitializedSubobject(InterpState &S, SourceLocation Loc,
+ const FieldDecl *SubObjDecl) {
+ assert(SubObjDecl && "Subobject declaration does not exist");
+ S.FFDiag(Loc, diag::note_constexpr_uninitialized)
+ << /*(name)*/ 1 << SubObjDecl;
+ S.Note(SubObjDecl->getLocation(),
+ diag::note_constexpr_subobject_declared_here);
+}
+
+static bool CheckFieldsInitialized(InterpState &S, SourceLocation Loc,
+ const Pointer &BasePtr, const Record *R);
+
+static bool CheckArrayInitialized(InterpState &S, SourceLocation Loc,
+ const Pointer &BasePtr,
+ const ConstantArrayType *CAT) {
+ bool Result = true;
+ size_t NumElems = CAT->getSize().getZExtValue();
+ QualType ElemType = CAT->getElementType();
+
+ if (ElemType->isRecordType()) {
+ const Record *R = BasePtr.getElemRecord();
+ for (size_t I = 0; I != NumElems; ++I) {
+ Pointer ElemPtr = BasePtr.atIndex(I).narrow();
+ Result &= CheckFieldsInitialized(S, Loc, ElemPtr, R);
+ }
+ } else if (const auto *ElemCAT = dyn_cast<ConstantArrayType>(ElemType)) {
+ for (size_t I = 0; I != NumElems; ++I) {
+ Pointer ElemPtr = BasePtr.atIndex(I).narrow();
+ Result &= CheckArrayInitialized(S, Loc, ElemPtr, ElemCAT);
+ }
+ } else {
+ for (size_t I = 0; I != NumElems; ++I) {
+ if (!BasePtr.atIndex(I).isInitialized()) {
+ DiagnoseUninitializedSubobject(S, Loc, BasePtr.getField());
+ Result = false;
+ }
+ }
+ }
+
+ return Result;
+}
+
+static bool CheckFieldsInitialized(InterpState &S, SourceLocation Loc,
+ const Pointer &BasePtr, const Record *R) {
+ assert(R);
+ bool Result = true;
+ // Check all fields of this record are initialized.
+ for (const Record::Field &F : R->fields()) {
+ Pointer FieldPtr = BasePtr.atField(F.Offset);
+ QualType FieldType = F.Decl->getType();
+
+ if (FieldType->isRecordType()) {
+ Result &= CheckFieldsInitialized(S, Loc, FieldPtr, FieldPtr.getRecord());
+ } else if (FieldType->isIncompleteArrayType()) {
+ // Nothing to do here.
+ } else if (FieldType->isArrayType()) {
+ const auto *CAT =
+ cast<ConstantArrayType>(FieldType->getAsArrayTypeUnsafe());
+ Result &= CheckArrayInitialized(S, Loc, FieldPtr, CAT);
+ } else if (!FieldPtr.isInitialized()) {
+ DiagnoseUninitializedSubobject(S, Loc, F.Decl);
+ Result = false;
+ }
+ }
+
+ // Check Fields in all bases
+ for (const Record::Base &B : R->bases()) {
+ Pointer P = BasePtr.atField(B.Offset);
+ if (!P.isInitialized()) {
+ S.FFDiag(BasePtr.getDeclDesc()->asDecl()->getLocation(),
+ diag::note_constexpr_uninitialized_base)
+ << B.Desc->getType();
+ return false;
+ }
+ Result &= CheckFieldsInitialized(S, Loc, P, B.R);
+ }
+
+ // TODO: Virtual bases
+
+ return Result;
+}
+
+bool EvaluationResult::checkFullyInitialized(InterpState &S) const {
+ assert(Source);
+ assert(isLValue());
+
+ // Our Source must be a VarDecl.
+ const Decl *SourceDecl = Source.dyn_cast<const Decl *>();
+ assert(SourceDecl);
+ const auto *VD = cast<VarDecl>(SourceDecl);
+ assert(VD->getType()->isRecordType() || VD->getType()->isArrayType());
+ SourceLocation InitLoc = VD->getAnyInitializer()->getExprLoc();
+
+ const Pointer &Ptr = *std::get_if<Pointer>(&Value);
+ assert(!Ptr.isZero());
+
+ if (const Record *R = Ptr.getRecord())
+ return CheckFieldsInitialized(S, InitLoc, Ptr, R);
+ const auto *CAT =
+ cast<ConstantArrayType>(Ptr.getType()->getAsArrayTypeUnsafe());
+ return CheckArrayInitialized(S, InitLoc, Ptr, CAT);
+
+ return true;
+}
+
+void EvaluationResult::dump() const {
+ assert(Ctx);
+ auto &OS = llvm::errs();
+ const ASTContext &ASTCtx = Ctx->getASTContext();
+
+ switch (Kind) {
+ case Empty:
+ OS << "Empty\n";
+ break;
+ case RValue:
+ OS << "RValue: ";
+ std::get<APValue>(Value).dump(OS, ASTCtx);
+ break;
+ case LValue: {
+ assert(Source);
+ QualType SourceType;
+ if (const auto *D = Source.dyn_cast<const Decl *>()) {
+ if (const auto *VD = dyn_cast<ValueDecl>(D))
+ SourceType = VD->getType();
+ } else if (const auto *E = Source.dyn_cast<const Expr *>()) {
+ SourceType = E->getType();
+ }
+
+ OS << "LValue: ";
+ if (const auto *P = std::get_if<Pointer>(&Value))
+ P->toAPValue().printPretty(OS, ASTCtx, SourceType);
+ else if (const auto *FP = std::get_if<FunctionPointer>(&Value)) // Nope
+ FP->toAPValue().printPretty(OS, ASTCtx, SourceType);
+ OS << "\n";
+ break;
+ }
+
+ default:
+ llvm_unreachable("Can't print that.");
+ }
+}
+
+} // namespace interp
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/EvaluationResult.h b/contrib/llvm-project/clang/lib/AST/Interp/EvaluationResult.h
new file mode 100644
index 000000000000..2b9fc16f1a0a
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/AST/Interp/EvaluationResult.h
@@ -0,0 +1,111 @@
+//===------ EvaluationResult.h - Result class for the VM -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_INTERP_EVALUATION_RESULT_H
+#define LLVM_CLANG_AST_INTERP_EVALUATION_RESULT_H
+
+#include "FunctionPointer.h"
+#include "Pointer.h"
+#include "clang/AST/APValue.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/Expr.h"
+#include <optional>
+#include <variant>
+
+namespace clang {
+namespace interp {
+class EvalEmitter;
+class Context;
+
+/// Defines the result of an evaluation.
+///
+/// The result might be in different forms--one of the pointer types,
+/// an APValue, or nothing.
+///
+/// We use this class to inspect and diagnose the result, as well as
+/// convert it to the requested form.
+class EvaluationResult final {
+public:
+ enum ResultKind {
+ Empty, // Initial state.
+ LValue, // Result is an lvalue/pointer.
+ RValue, // Result is an rvalue.
+ Invalid, // Result is invalid.
+ Valid, // Result is valid and empty.
+ };
+
+ using DeclTy = llvm::PointerUnion<const Decl *, const Expr *>;
+
+private:
+ const Context *Ctx = nullptr;
+ std::variant<std::monostate, Pointer, FunctionPointer, APValue> Value;
+ ResultKind Kind = Empty;
+ DeclTy Source = nullptr; // Currently only needed for dump().
+
+ EvaluationResult(ResultKind Kind) : Kind(Kind) {
+ // Leave everything empty. Can be used as an
+ // error marker or for void return values.
+ assert(Kind == Valid || Kind == Invalid);
+ }
+
+ void setSource(DeclTy D) { Source = D; }
+
+ void setValue(const APValue &V) {
+ assert(empty());
+ assert(!V.isLValue());
+ Value = std::move(V);
+ Kind = RValue;
+ }
+ void setPointer(const Pointer P) {
+ assert(empty());
+ Value = P;
+ Kind = LValue;
+ }
+ void setFunctionPointer(const FunctionPointer &P) {
+ assert(empty());
+ Value = P;
+ Kind = LValue;
+ }
+ void setInvalid() {
+ assert(empty());
+ Kind = Invalid;
+ }
+ void setValid() {
+ assert(empty());
+ Kind = Valid;
+ }
+
+public:
+ EvaluationResult(const Context *Ctx) : Ctx(Ctx) {}
+
+ bool empty() const { return Kind == Empty; }
+ bool isInvalid() const { return Kind == Invalid; }
+ bool isLValue() const { return Kind == LValue; }
+ bool isRValue() const { return Kind == RValue; }
+
+ /// Returns an APValue for the evaluation result. The returned
+ /// APValue might be an LValue or RValue.
+ APValue toAPValue() const;
+
+ /// If the result is an LValue, convert that to an RValue
+ /// and return it. This may fail, e.g. if the result is an
+ /// LValue and we can't read from it.
+ std::optional<APValue> toRValue() const;
+
+ bool checkFullyInitialized(InterpState &S) const;
+
+ /// Dump to stderr.
+ void dump() const;
+
+ friend class EvalEmitter;
+};
+
+} // namespace interp
+} // namespace clang
+
+#endif
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Floating.cpp b/contrib/llvm-project/clang/lib/AST/Interp/Floating.cpp
new file mode 100644
index 000000000000..922e17ad1450
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Floating.cpp
@@ -0,0 +1,22 @@
+//===---- Floating.cpp - Support for floating point values ------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "Floating.h"
+
+namespace clang {
+namespace interp {
+
+llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, Floating F) {
+ F.print(OS);
+ return OS;
+}
+
+Floating getSwappedBytes(Floating F) { return F; }
+
+} // namespace interp
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Floating.h b/contrib/llvm-project/clang/lib/AST/Interp/Floating.h
new file mode 100644
index 000000000000..e4ac76d8509f
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Floating.h
@@ -0,0 +1,218 @@
+//===--- Floating.h - Types for the constexpr VM ----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines the VM types and helpers operating on types.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_INTERP_FLOATING_H
+#define LLVM_CLANG_AST_INTERP_FLOATING_H
+
+#include "Primitives.h"
+#include "clang/AST/APValue.h"
+#include "llvm/ADT/APFloat.h"
+
+namespace clang {
+namespace interp {
+
+using APFloat = llvm::APFloat;
+using APSInt = llvm::APSInt;
+
+class Floating final {
+private:
+ // The underlying value storage.
+ APFloat F;
+
+public:
+ /// Zero-initializes a Floating.
+ Floating() : F(0.0f) {}
+ Floating(const APFloat &F) : F(F) {}
+
+ // Static constructors for special floating point values.
+ static Floating getInf(const llvm::fltSemantics &Sem) {
+ return Floating(APFloat::getInf(Sem));
+ }
+ const APFloat &getAPFloat() const { return F; }
+
+ bool operator<(Floating RHS) const { return F < RHS.F; }
+ bool operator>(Floating RHS) const { return F > RHS.F; }
+ bool operator<=(Floating RHS) const { return F <= RHS.F; }
+ bool operator>=(Floating RHS) const { return F >= RHS.F; }
+ bool operator==(Floating RHS) const { return F == RHS.F; }
+ bool operator!=(Floating RHS) const { return F != RHS.F; }
+ Floating operator-() const { return Floating(-F); }
+
+ APFloat::opStatus convertToInteger(APSInt &Result) const {
+ bool IsExact;
+ return F.convertToInteger(Result, llvm::APFloat::rmTowardZero, &IsExact);
+ }
+
+ Floating toSemantics(const llvm::fltSemantics *Sem,
+ llvm::RoundingMode RM) const {
+ APFloat Copy = F;
+ bool LosesInfo;
+ Copy.convert(*Sem, RM, &LosesInfo);
+ (void)LosesInfo;
+ return Floating(Copy);
+ }
+
+ /// Convert this Floating to one with the same semantics as \Other.
+ Floating toSemantics(const Floating &Other, llvm::RoundingMode RM) const {
+ return toSemantics(&Other.F.getSemantics(), RM);
+ }
+
+ APSInt toAPSInt(unsigned NumBits = 0) const {
+ return APSInt(F.bitcastToAPInt());
+ }
+ APValue toAPValue() const { return APValue(F); }
+ void print(llvm::raw_ostream &OS) const {
+ // Can't use APFloat::print() since it appends a newline.
+ SmallVector<char, 16> Buffer;
+ F.toString(Buffer);
+ OS << Buffer;
+ }
+ std::string toDiagnosticString(const ASTContext &Ctx) const {
+ std::string NameStr;
+ llvm::raw_string_ostream OS(NameStr);
+ print(OS);
+ return NameStr;
+ }
+
+ unsigned bitWidth() const { return F.semanticsSizeInBits(F.getSemantics()); }
+
+ bool isSigned() const { return true; }
+ bool isNegative() const { return F.isNegative(); }
+ bool isPositive() const { return !F.isNegative(); }
+ bool isZero() const { return F.isZero(); }
+ bool isNonZero() const { return F.isNonZero(); }
+ bool isMin() const { return F.isSmallest(); }
+ bool isMinusOne() const { return F.isExactlyValue(-1.0); }
+ bool isNan() const { return F.isNaN(); }
+ bool isSignaling() const { return F.isSignaling(); }
+ bool isInf() const { return F.isInfinity(); }
+ bool isFinite() const { return F.isFinite(); }
+ bool isNormal() const { return F.isNormal(); }
+ bool isDenormal() const { return F.isDenormal(); }
+ llvm::FPClassTest classify() const { return F.classify(); }
+ APFloat::fltCategory getCategory() const { return F.getCategory(); }
+
+ ComparisonCategoryResult compare(const Floating &RHS) const {
+ llvm::APFloatBase::cmpResult CmpRes = F.compare(RHS.F);
+ switch (CmpRes) {
+ case llvm::APFloatBase::cmpLessThan:
+ return ComparisonCategoryResult::Less;
+ case llvm::APFloatBase::cmpEqual:
+ return ComparisonCategoryResult::Equal;
+ case llvm::APFloatBase::cmpGreaterThan:
+ return ComparisonCategoryResult::Greater;
+ case llvm::APFloatBase::cmpUnordered:
+ return ComparisonCategoryResult::Unordered;
+ }
+ llvm_unreachable("Inavlid cmpResult value");
+ }
+
+ static APFloat::opStatus fromIntegral(APSInt Val,
+ const llvm::fltSemantics &Sem,
+ llvm::RoundingMode RM,
+ Floating &Result) {
+ APFloat F = APFloat(Sem);
+ APFloat::opStatus Status = F.convertFromAPInt(Val, Val.isSigned(), RM);
+ Result = Floating(F);
+ return Status;
+ }
+
+ static Floating bitcastFromMemory(const std::byte *Buff,
+ const llvm::fltSemantics &Sem) {
+ size_t Size = APFloat::semanticsSizeInBits(Sem);
+ llvm::APInt API(Size, true);
+ llvm::LoadIntFromMemory(API, (const uint8_t *)Buff, Size / 8);
+
+ return Floating(APFloat(Sem, API));
+ }
+
+ // === Serialization support ===
+ size_t bytesToSerialize() const {
+ return sizeof(llvm::fltSemantics *) +
+ (APFloat::semanticsSizeInBits(F.getSemantics()) / 8);
+ }
+
+ void serialize(std::byte *Buff) const {
+ // Semantics followed by an APInt.
+ *reinterpret_cast<const llvm::fltSemantics **>(Buff) = &F.getSemantics();
+
+ llvm::APInt API = F.bitcastToAPInt();
+ llvm::StoreIntToMemory(API, (uint8_t *)(Buff + sizeof(void *)),
+ bitWidth() / 8);
+ }
+
+ static Floating deserialize(const std::byte *Buff) {
+ const llvm::fltSemantics *Sem;
+ std::memcpy((void *)&Sem, Buff, sizeof(void *));
+ return bitcastFromMemory(Buff + sizeof(void *), *Sem);
+ }
+
+ static Floating abs(const Floating &F) {
+ APFloat V = F.F;
+ if (V.isNegative())
+ V.changeSign();
+ return Floating(V);
+ }
+
+ // -------
+
+ static APFloat::opStatus add(const Floating &A, const Floating &B,
+ llvm::RoundingMode RM, Floating *R) {
+ *R = Floating(A.F);
+ return R->F.add(B.F, RM);
+ }
+
+ static APFloat::opStatus increment(const Floating &A, llvm::RoundingMode RM,
+ Floating *R) {
+ APFloat One(A.F.getSemantics(), 1);
+ *R = Floating(A.F);
+ return R->F.add(One, RM);
+ }
+
+ static APFloat::opStatus sub(const Floating &A, const Floating &B,
+ llvm::RoundingMode RM, Floating *R) {
+ *R = Floating(A.F);
+ return R->F.subtract(B.F, RM);
+ }
+
+ static APFloat::opStatus decrement(const Floating &A, llvm::RoundingMode RM,
+ Floating *R) {
+ APFloat One(A.F.getSemantics(), 1);
+ *R = Floating(A.F);
+ return R->F.subtract(One, RM);
+ }
+
+ static APFloat::opStatus mul(const Floating &A, const Floating &B,
+ llvm::RoundingMode RM, Floating *R) {
+ *R = Floating(A.F);
+ return R->F.multiply(B.F, RM);
+ }
+
+ static APFloat::opStatus div(const Floating &A, const Floating &B,
+ llvm::RoundingMode RM, Floating *R) {
+ *R = Floating(A.F);
+ return R->F.divide(B.F, RM);
+ }
+
+ static bool neg(const Floating &A, Floating *R) {
+ *R = -A;
+ return false;
+ }
+};
+
+llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, Floating F);
+Floating getSwappedBytes(Floating F);
+
+} // namespace interp
+} // namespace clang
+
+#endif
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Frame.h b/contrib/llvm-project/clang/lib/AST/Interp/Frame.h
index b9a0ea9412f8..079e4259b0ae 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Frame.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Frame.h
@@ -27,13 +27,13 @@ public:
virtual ~Frame();
/// Generates a human-readable description of the call site.
- virtual void describe(llvm::raw_ostream &OS) = 0;
+ virtual void describe(llvm::raw_ostream &OS) const = 0;
/// Returns a pointer to the caller frame.
virtual Frame *getCaller() const = 0;
/// Returns the location of the call site.
- virtual SourceLocation getCallLocation() const = 0;
+ virtual SourceRange getCallRange() const = 0;
/// Returns the called function's declaration.
virtual const FunctionDecl *getCallee() const = 0;
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Function.cpp b/contrib/llvm-project/clang/lib/AST/Interp/Function.cpp
index 0ed13a92aa38..1d04998d5dd1 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Function.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Function.cpp
@@ -7,23 +7,25 @@
//===----------------------------------------------------------------------===//
#include "Function.h"
-#include "Program.h"
#include "Opcode.h"
+#include "Program.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
+#include "clang/Basic/Builtins.h"
using namespace clang;
using namespace clang::interp;
Function::Function(Program &P, const FunctionDecl *F, unsigned ArgSize,
- llvm::SmallVector<PrimType, 8> &&ParamTypes,
- llvm::DenseMap<unsigned, ParamDescriptor> &&Params)
+ llvm::SmallVectorImpl<PrimType> &&ParamTypes,
+ llvm::DenseMap<unsigned, ParamDescriptor> &&Params,
+ llvm::SmallVectorImpl<unsigned> &&ParamOffsets,
+ bool HasThisPointer, bool HasRVO, bool UnevaluatedBuiltin)
: P(P), Loc(F->getBeginLoc()), F(F), ArgSize(ArgSize),
- ParamTypes(std::move(ParamTypes)), Params(std::move(Params)) {}
-
-CodePtr Function::getCodeBegin() const { return Code.data(); }
-
-CodePtr Function::getCodeEnd() const { return Code.data() + Code.size(); }
+ ParamTypes(std::move(ParamTypes)), Params(std::move(Params)),
+ ParamOffsets(std::move(ParamOffsets)), HasThisPointer(HasThisPointer),
+ HasRVO(HasRVO), Variadic(F->isVariadic()),
+ IsUnevaluatedBuiltin(UnevaluatedBuiltin) {}
Function::ParamDescriptor Function::getParamDescriptor(unsigned Offset) const {
auto It = Params.find(Offset);
@@ -32,17 +34,18 @@ Function::ParamDescriptor Function::getParamDescriptor(unsigned Offset) const {
}
SourceInfo Function::getSource(CodePtr PC) const {
+ assert(PC >= getCodeBegin() && "PC does not belong to this function");
+ assert(PC <= getCodeEnd() && "PC Does not belong to this function");
+ assert(hasBody() && "Function has no body");
unsigned Offset = PC - getCodeBegin();
using Elem = std::pair<unsigned, SourceInfo>;
- auto It = std::lower_bound(SrcMap.begin(), SrcMap.end(), Elem{Offset, {}},
- [](Elem A, Elem B) { return A.first < B.first; });
- if (It == SrcMap.end() || It->first != Offset)
- llvm::report_fatal_error("missing source location");
+ auto It = llvm::lower_bound(SrcMap, Elem{Offset, {}}, llvm::less_first());
+ assert(It != SrcMap.end());
return It->second;
}
bool Function::isVirtual() const {
- if (auto *M = dyn_cast<CXXMethodDecl>(F))
+ if (const auto *M = dyn_cast<CXXMethodDecl>(F))
return M->isVirtual();
return false;
}
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Function.h b/contrib/llvm-project/clang/lib/AST/Interp/Function.h
index 28531f04b6e9..7c3e0f630249 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Function.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Function.h
@@ -15,8 +15,9 @@
#ifndef LLVM_CLANG_AST_INTERP_FUNCTION_H
#define LLVM_CLANG_AST_INTERP_FUNCTION_H
-#include "Pointer.h"
#include "Source.h"
+#include "Descriptor.h"
+#include "clang/AST/ASTLambda.h"
#include "clang/AST/Decl.h"
#include "llvm/Support/raw_ostream.h"
@@ -24,12 +25,13 @@ namespace clang {
namespace interp {
class Program;
class ByteCodeEmitter;
+class Pointer;
enum PrimType : uint32_t;
/// Describes a scope block.
///
/// The block gathers all the descriptors of the locals defined in this block.
-class Scope {
+class Scope final {
public:
/// Information about a local's storage.
struct Local {
@@ -43,7 +45,7 @@ public:
Scope(LocalVectorTy &&Descriptors) : Descriptors(std::move(Descriptors)) {}
- llvm::iterator_range<LocalVectorTy::iterator> locals() {
+ llvm::iterator_range<LocalVectorTy::const_iterator> locals() const {
return llvm::make_range(Descriptors.begin(), Descriptors.end());
}
@@ -56,74 +58,158 @@ private:
///
/// Contains links to the bytecode of the function, as well as metadata
/// describing all arguments and stack-local variables.
-class Function {
+///
+/// # Calling Convention
+///
+/// When calling a function, all argument values must be on the stack.
+///
+/// If the function has a This pointer (i.e. hasThisPointer() returns true,
+/// the argument values need to be preceeded by a Pointer for the This object.
+///
+/// If the function uses Return Value Optimization, the arguments (and
+/// potentially the This pointer) need to be preceeded by a Pointer pointing
+/// to the location to construct the returned value.
+///
+/// After the function has been called, it will remove all arguments,
+/// including RVO and This pointer, from the stack.
+///
+class Function final {
public:
using ParamDescriptor = std::pair<PrimType, Descriptor *>;
/// Returns the size of the function's local stack.
unsigned getFrameSize() const { return FrameSize; }
- /// Returns the size of the argument stackx
+ /// Returns the size of the argument stack.
unsigned getArgSize() const { return ArgSize; }
/// Returns a pointer to the start of the code.
- CodePtr getCodeBegin() const;
+ CodePtr getCodeBegin() const { return Code.data(); }
/// Returns a pointer to the end of the code.
- CodePtr getCodeEnd() const;
+ CodePtr getCodeEnd() const { return Code.data() + Code.size(); }
/// Returns the original FunctionDecl.
const FunctionDecl *getDecl() const { return F; }
- /// Returns the lcoation.
+ /// Returns the name of the function decl this code
+ /// was generated for.
+ const std::string getName() const {
+ if (!F)
+ return "<<expr>>";
+
+ return F->getQualifiedNameAsString();
+ }
+
+ /// Returns the location.
SourceLocation getLoc() const { return Loc; }
/// Returns a parameter descriptor.
ParamDescriptor getParamDescriptor(unsigned Offset) const;
/// Checks if the first argument is a RVO pointer.
- bool hasRVO() const { return ParamTypes.size() != Params.size(); }
+ bool hasRVO() const { return HasRVO; }
/// Range over the scope blocks.
- llvm::iterator_range<llvm::SmallVector<Scope, 2>::iterator> scopes() {
+ llvm::iterator_range<llvm::SmallVector<Scope, 2>::const_iterator>
+ scopes() const {
return llvm::make_range(Scopes.begin(), Scopes.end());
}
/// Range over argument types.
- using arg_reverse_iterator = SmallVectorImpl<PrimType>::reverse_iterator;
- llvm::iterator_range<arg_reverse_iterator> args_reverse() {
- return llvm::make_range(ParamTypes.rbegin(), ParamTypes.rend());
+ using arg_reverse_iterator =
+ SmallVectorImpl<PrimType>::const_reverse_iterator;
+ llvm::iterator_range<arg_reverse_iterator> args_reverse() const {
+ return llvm::reverse(ParamTypes);
}
/// Returns a specific scope.
Scope &getScope(unsigned Idx) { return Scopes[Idx]; }
+ const Scope &getScope(unsigned Idx) const { return Scopes[Idx]; }
/// Returns the source information at a given PC.
SourceInfo getSource(CodePtr PC) const;
/// Checks if the function is valid to call in constexpr.
- bool isConstexpr() const { return IsValid; }
+ bool isConstexpr() const { return IsValid || isLambdaStaticInvoker(); }
/// Checks if the function is virtual.
bool isVirtual() const;
/// Checks if the function is a constructor.
bool isConstructor() const { return isa<CXXConstructorDecl>(F); }
+ /// Checks if the function is a destructor.
+ bool isDestructor() const { return isa<CXXDestructorDecl>(F); }
+
+ /// Returns the parent record decl, if any.
+ const CXXRecordDecl *getParentDecl() const {
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(F))
+ return MD->getParent();
+ return nullptr;
+ }
+
+ /// Returns whether this function is a lambda static invoker,
+ /// which we generate custom byte code for.
+ bool isLambdaStaticInvoker() const {
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(F))
+ return MD->isLambdaStaticInvoker();
+ return false;
+ }
+
+ /// Returns whether this function is the call operator
+ /// of a lambda record decl.
+ bool isLambdaCallOperator() const {
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(F))
+ return clang::isLambdaCallOperator(MD);
+ return false;
+ }
+
+ /// Checks if the function is fully done compiling.
+ bool isFullyCompiled() const { return IsFullyCompiled; }
+
+ bool hasThisPointer() const { return HasThisPointer; }
+
+ /// Checks if the function already has a body attached.
+ bool hasBody() const { return HasBody; }
+
+ /// Checks if the function is defined.
+ bool isDefined() const { return Defined; }
+
+ bool isVariadic() const { return Variadic; }
+
+ unsigned getBuiltinID() const { return F->getBuiltinID(); }
+
+ bool isBuiltin() const { return F->getBuiltinID() != 0; }
+
+ bool isUnevaluatedBuiltin() const { return IsUnevaluatedBuiltin; }
+
+ unsigned getNumParams() const { return ParamTypes.size(); }
+
+ unsigned getParamOffset(unsigned ParamIndex) const {
+ return ParamOffsets[ParamIndex];
+ }
private:
/// Construct a function representing an actual function.
Function(Program &P, const FunctionDecl *F, unsigned ArgSize,
- llvm::SmallVector<PrimType, 8> &&ParamTypes,
- llvm::DenseMap<unsigned, ParamDescriptor> &&Params);
+ llvm::SmallVectorImpl<PrimType> &&ParamTypes,
+ llvm::DenseMap<unsigned, ParamDescriptor> &&Params,
+ llvm::SmallVectorImpl<unsigned> &&ParamOffsets, bool HasThisPointer,
+ bool HasRVO, bool UnevaluatedBuiltin);
/// Sets the code of a function.
- void setCode(unsigned NewFrameSize, std::vector<char> &&NewCode, SourceMap &&NewSrcMap,
- llvm::SmallVector<Scope, 2> &&NewScopes) {
+ void setCode(unsigned NewFrameSize, std::vector<std::byte> &&NewCode,
+ SourceMap &&NewSrcMap, llvm::SmallVector<Scope, 2> &&NewScopes,
+ bool NewHasBody) {
FrameSize = NewFrameSize;
Code = std::move(NewCode);
SrcMap = std::move(NewSrcMap);
Scopes = std::move(NewScopes);
IsValid = true;
+ HasBody = NewHasBody;
}
+ void setIsFullyCompiled(bool FC) { IsFullyCompiled = FC; }
+ void setDefined(bool D) { Defined = D; }
+
private:
friend class Program;
friend class ByteCodeEmitter;
@@ -135,11 +221,11 @@ private:
/// Declaration this function was compiled from.
const FunctionDecl *F;
/// Local area size: storage + metadata.
- unsigned FrameSize;
+ unsigned FrameSize = 0;
/// Size of the argument stack.
unsigned ArgSize;
/// Program code.
- std::vector<char> Code;
+ std::vector<std::byte> Code;
/// Opcode-to-expression mapping.
SourceMap SrcMap;
/// List of block descriptors.
@@ -148,8 +234,25 @@ private:
llvm::SmallVector<PrimType, 8> ParamTypes;
/// Map from byte offset to parameter descriptor.
llvm::DenseMap<unsigned, ParamDescriptor> Params;
+ /// List of parameter offsets.
+ llvm::SmallVector<unsigned, 8> ParamOffsets;
/// Flag to indicate if the function is valid.
bool IsValid = false;
+ /// Flag to indicate if the function is done being
+ /// compiled to bytecode.
+ bool IsFullyCompiled = false;
+ /// Flag indicating if this function takes the this pointer
+ /// as the first implicit argument
+ bool HasThisPointer = false;
+ /// Whether this function has Return Value Optimization, i.e.
+ /// the return value is constructed in the caller's stack frame.
+ /// This is done for functions that return non-primive values.
+ bool HasRVO = false;
+ /// If we've already compiled the function's body.
+ bool HasBody = false;
+ bool Defined = false;
+ bool Variadic = false;
+ bool IsUnevaluatedBuiltin = false;
public:
/// Dumps the disassembled bytecode to \c llvm::errs().
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/FunctionPointer.h b/contrib/llvm-project/clang/lib/AST/Interp/FunctionPointer.h
new file mode 100644
index 000000000000..4a3f993d4882
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/AST/Interp/FunctionPointer.h
@@ -0,0 +1,71 @@
+//===--- FunctionPointer.h - Types for the constexpr VM ----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_INTERP_FUNCTION_POINTER_H
+#define LLVM_CLANG_AST_INTERP_FUNCTION_POINTER_H
+
+#include "Function.h"
+#include "Primitives.h"
+#include "clang/AST/APValue.h"
+
+namespace clang {
+class ASTContext;
+namespace interp {
+
+class FunctionPointer final {
+private:
+ const Function *Func;
+
+public:
+ FunctionPointer() : Func(nullptr) {}
+ FunctionPointer(const Function *Func) : Func(Func) { assert(Func); }
+
+ const Function *getFunction() const { return Func; }
+
+ APValue toAPValue() const {
+ if (!Func)
+ return APValue(static_cast<Expr *>(nullptr), CharUnits::Zero(), {},
+ /*OnePastTheEnd=*/false, /*IsNull=*/true);
+
+ return APValue(Func->getDecl(), CharUnits::Zero(), {},
+ /*OnePastTheEnd=*/false, /*IsNull=*/false);
+ }
+
+ void print(llvm::raw_ostream &OS) const {
+ OS << "FnPtr(";
+ if (Func)
+ OS << Func->getName();
+ else
+ OS << "nullptr";
+ OS << ")";
+ }
+
+ std::string toDiagnosticString(const ASTContext &Ctx) const {
+ if (!Func)
+ return "nullptr";
+
+ return toAPValue().getAsString(Ctx, Func->getDecl()->getType());
+ }
+
+ ComparisonCategoryResult compare(const FunctionPointer &RHS) const {
+ if (Func == RHS.Func)
+ return ComparisonCategoryResult::Equal;
+ return ComparisonCategoryResult::Unordered;
+ }
+};
+
+inline llvm::raw_ostream &operator<<(llvm::raw_ostream &OS,
+ FunctionPointer FP) {
+ FP.print(OS);
+ return OS;
+}
+
+} // namespace interp
+} // namespace clang
+
+#endif
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Integral.h b/contrib/llvm-project/clang/lib/AST/Interp/Integral.h
index 46cd611ee389..cc1cab8f39fb 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Integral.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Integral.h
@@ -21,21 +21,15 @@
#include <cstddef>
#include <cstdint>
+#include "Primitives.h"
+
namespace clang {
namespace interp {
using APInt = llvm::APInt;
using APSInt = llvm::APSInt;
-/// Helper to compare two comparable types.
-template <typename T>
-ComparisonCategoryResult Compare(const T &X, const T &Y) {
- if (X < Y)
- return ComparisonCategoryResult::Less;
- if (X > Y)
- return ComparisonCategoryResult::Greater;
- return ComparisonCategoryResult::Equal;
-}
+template <bool Signed> class IntegralAP;
// Helper structure to select the representation.
template <unsigned Bits, bool Signed> struct Repr;
@@ -53,22 +47,24 @@ template <> struct Repr<64, true> { using Type = int64_t; };
/// These wrappers are required to shared an interface between APSint and
/// builtin primitive numeral types, while optimising for storage and
/// allowing methods operating on primitive type to compile to fast code.
-template <unsigned Bits, bool Signed> class Integral {
+template <unsigned Bits, bool Signed> class Integral final {
private:
template <unsigned OtherBits, bool OtherSigned> friend class Integral;
// The primitive representing the integral.
- using T = typename Repr<Bits, Signed>::Type;
- T V;
+ using ReprT = typename Repr<Bits, Signed>::Type;
+ ReprT V;
/// Primitive representing limits.
- static const auto Min = std::numeric_limits<T>::min();
- static const auto Max = std::numeric_limits<T>::max();
+ static const auto Min = std::numeric_limits<ReprT>::min();
+ static const auto Max = std::numeric_limits<ReprT>::max();
/// Construct an integral from anything that is convertible to storage.
template <typename T> explicit Integral(T V) : V(V) {}
public:
+ using AsUnsigned = Integral<Bits, false>;
+
/// Zero-initializes an integral.
Integral() : V(0) {}
@@ -92,6 +88,9 @@ public:
}
Integral operator-() const { return Integral(-V); }
+ Integral operator-(const Integral &Other) const {
+ return Integral(V - Other.V);
+ }
Integral operator~() const { return Integral(~V); }
template <unsigned DstBits, bool DstSign>
@@ -102,12 +101,13 @@ public:
explicit operator unsigned() const { return V; }
explicit operator int64_t() const { return V; }
explicit operator uint64_t() const { return V; }
+ explicit operator int32_t() const { return V; }
APSInt toAPSInt() const {
return APSInt(APInt(Bits, static_cast<uint64_t>(V), Signed), !Signed);
}
APSInt toAPSInt(unsigned NumBits) const {
- if (Signed)
+ if constexpr (Signed)
return APSInt(toAPSInt().sextOrTrunc(NumBits), !Signed);
else
return APSInt(toAPSInt().zextOrTrunc(NumBits), !Signed);
@@ -124,25 +124,36 @@ public:
bool isMin() const { return *this == min(bitWidth()); }
- bool isMinusOne() const { return Signed && V == T(-1); }
+ bool isMinusOne() const { return Signed && V == ReprT(-1); }
constexpr static bool isSigned() { return Signed; }
- bool isNegative() const { return V < T(0); }
+ bool isNegative() const { return V < ReprT(0); }
bool isPositive() const { return !isNegative(); }
ComparisonCategoryResult compare(const Integral &RHS) const {
return Compare(V, RHS.V);
}
- unsigned countLeadingZeros() const { return llvm::countLeadingZeros<T>(V); }
+ std::string toDiagnosticString(const ASTContext &Ctx) const {
+ std::string NameStr;
+ llvm::raw_string_ostream OS(NameStr);
+ OS << V;
+ return NameStr;
+ }
+
+ unsigned countLeadingZeros() const {
+ if constexpr (!Signed)
+ return llvm::countl_zero<ReprT>(V);
+ llvm_unreachable("Don't call countLeadingZeros() on signed types.");
+ }
Integral truncate(unsigned TruncBits) const {
if (TruncBits >= Bits)
return *this;
- const T BitMask = (T(1) << T(TruncBits)) - 1;
- const T SignBit = T(1) << (TruncBits - 1);
- const T ExtMask = ~BitMask;
+ const ReprT BitMask = (ReprT(1) << ReprT(TruncBits)) - 1;
+ const ReprT SignBit = ReprT(1) << (TruncBits - 1);
+ const ReprT ExtMask = ~BitMask;
return Integral((V & BitMask) | (Signed && (V & SignBit) ? ExtMask : 0));
}
@@ -155,9 +166,11 @@ public:
return Integral(Max);
}
- template <typename T>
- static std::enable_if_t<std::is_integral<T>::value, Integral> from(T Value) {
- return Integral(Value);
+ template <typename ValT> static Integral from(ValT Value) {
+ if constexpr (std::is_integral<ValT>::value)
+ return Integral(Value);
+ else
+ return Integral::from(static_cast<Integral::ReprT>(Value));
}
template <unsigned SrcBits, bool SrcSign>
@@ -166,13 +179,6 @@ public:
return Integral(Value.V);
}
- template <bool SrcSign> static Integral from(Integral<0, SrcSign> Value) {
- if (SrcSign)
- return Integral(Value.V.getSExtValue());
- else
- return Integral(Value.V.getZExtValue());
- }
-
static Integral zero() { return from(0); }
template <typename T> static Integral from(T Value, unsigned NumBits) {
@@ -180,15 +186,15 @@ public:
}
static bool inRange(int64_t Value, unsigned NumBits) {
- return CheckRange<T, Min, Max>(Value);
+ return CheckRange<ReprT, Min, Max>(Value);
}
static bool increment(Integral A, Integral *R) {
- return add(A, Integral(T(1)), A.bitWidth(), R);
+ return add(A, Integral(ReprT(1)), A.bitWidth(), R);
}
static bool decrement(Integral A, Integral *R) {
- return sub(A, Integral(T(1)), A.bitWidth(), R);
+ return sub(A, Integral(ReprT(1)), A.bitWidth(), R);
}
static bool add(Integral A, Integral B, unsigned OpBits, Integral *R) {
@@ -203,56 +209,89 @@ public:
return CheckMulUB(A.V, B.V, R->V);
}
-private:
- template <typename T>
- static std::enable_if_t<std::is_signed<T>::value, bool> CheckAddUB(T A, T B,
- T &R) {
- return llvm::AddOverflow<T>(A, B, R);
+ static bool rem(Integral A, Integral B, unsigned OpBits, Integral *R) {
+ *R = Integral(A.V % B.V);
+ return false;
}
- template <typename T>
- static std::enable_if_t<std::is_unsigned<T>::value, bool> CheckAddUB(T A, T B,
- T &R) {
- R = A + B;
+ static bool div(Integral A, Integral B, unsigned OpBits, Integral *R) {
+ *R = Integral(A.V / B.V);
return false;
}
- template <typename T>
- static std::enable_if_t<std::is_signed<T>::value, bool> CheckSubUB(T A, T B,
- T &R) {
- return llvm::SubOverflow<T>(A, B, R);
+ static bool bitAnd(Integral A, Integral B, unsigned OpBits, Integral *R) {
+ *R = Integral(A.V & B.V);
+ return false;
}
- template <typename T>
- static std::enable_if_t<std::is_unsigned<T>::value, bool> CheckSubUB(T A, T B,
- T &R) {
- R = A - B;
+ static bool bitOr(Integral A, Integral B, unsigned OpBits, Integral *R) {
+ *R = Integral(A.V | B.V);
return false;
}
- template <typename T>
- static std::enable_if_t<std::is_signed<T>::value, bool> CheckMulUB(T A, T B,
- T &R) {
- return llvm::MulOverflow<T>(A, B, R);
+ static bool bitXor(Integral A, Integral B, unsigned OpBits, Integral *R) {
+ *R = Integral(A.V ^ B.V);
+ return false;
}
- template <typename T>
- static std::enable_if_t<std::is_unsigned<T>::value, bool> CheckMulUB(T A, T B,
- T &R) {
- R = A * B;
+ static bool neg(Integral A, Integral *R) {
+ if (Signed && A.isMin())
+ return true;
+
+ *R = -A;
return false;
}
- template <typename T, T Min, T Max>
- static std::enable_if_t<std::is_signed<T>::value, bool>
- CheckRange(int64_t V) {
- return Min <= V && V <= Max;
+ static bool comp(Integral A, Integral *R) {
+ *R = Integral(~A.V);
+ return false;
+ }
+
+ template <unsigned RHSBits, bool RHSSign>
+ static void shiftLeft(const Integral A, const Integral<RHSBits, RHSSign> B,
+ unsigned OpBits, Integral *R) {
+ *R = Integral::from(A.V << B.V, OpBits);
+ }
+
+ template <unsigned RHSBits, bool RHSSign>
+ static void shiftRight(const Integral A, const Integral<RHSBits, RHSSign> B,
+ unsigned OpBits, Integral *R) {
+ *R = Integral::from(A.V >> B.V, OpBits);
+ }
+
+private:
+ template <typename T> static bool CheckAddUB(T A, T B, T &R) {
+ if constexpr (std::is_signed_v<T>) {
+ return llvm::AddOverflow<T>(A, B, R);
+ } else {
+ R = A + B;
+ return false;
+ }
}
- template <typename T, T Min, T Max>
- static std::enable_if_t<std::is_unsigned<T>::value, bool>
- CheckRange(int64_t V) {
- return V >= 0 && static_cast<uint64_t>(V) <= Max;
+ template <typename T> static bool CheckSubUB(T A, T B, T &R) {
+ if constexpr (std::is_signed_v<T>) {
+ return llvm::SubOverflow<T>(A, B, R);
+ } else {
+ R = A - B;
+ return false;
+ }
+ }
+
+ template <typename T> static bool CheckMulUB(T A, T B, T &R) {
+ if constexpr (std::is_signed_v<T>) {
+ return llvm::MulOverflow<T>(A, B, R);
+ } else {
+ R = A * B;
+ return false;
+ }
+ }
+ template <typename T, T Min, T Max> static bool CheckRange(int64_t V) {
+ if constexpr (std::is_signed_v<T>) {
+ return Min <= V && V <= Max;
+ } else {
+ return V >= 0 && static_cast<uint64_t>(V) <= Max;
+ }
}
};
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/IntegralAP.h b/contrib/llvm-project/clang/lib/AST/Interp/IntegralAP.h
new file mode 100644
index 000000000000..55e29caa1cd7
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/AST/Interp/IntegralAP.h
@@ -0,0 +1,295 @@
+//===--- Integral.h - Wrapper for numeric types for the VM ------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines the VM types and helpers operating on types.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_INTERP_INTEGRAL_AP_H
+#define LLVM_CLANG_AST_INTERP_INTEGRAL_AP_H
+
+#include "clang/AST/APValue.h"
+#include "clang/AST/ComparisonCategories.h"
+#include "llvm/ADT/APSInt.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cstddef>
+#include <cstdint>
+
+#include "Primitives.h"
+
+namespace clang {
+namespace interp {
+
+using APInt = llvm::APInt;
+using APSInt = llvm::APSInt;
+template <unsigned Bits, bool Signed> class Integral;
+
+template <bool Signed> class IntegralAP final {
+private:
+ friend IntegralAP<!Signed>;
+ APInt V;
+
+ template <typename T, bool InputSigned>
+ static T truncateCast(const APInt &V) {
+ constexpr unsigned BitSize = sizeof(T) * 8;
+ if (BitSize >= V.getBitWidth()) {
+ APInt Extended;
+ if constexpr (InputSigned)
+ Extended = V.sext(BitSize);
+ else
+ Extended = V.zext(BitSize);
+ return std::is_signed_v<T> ? Extended.getSExtValue()
+ : Extended.getZExtValue();
+ }
+
+ return std::is_signed_v<T> ? V.trunc(BitSize).getSExtValue()
+ : V.trunc(BitSize).getZExtValue();
+ }
+
+public:
+ using AsUnsigned = IntegralAP<false>;
+
+ template <typename T>
+ IntegralAP(T Value, unsigned BitWidth)
+ : V(APInt(BitWidth, static_cast<uint64_t>(Value), Signed)) {}
+
+ IntegralAP(APInt V) : V(V) {}
+ /// Arbitrary value for uninitialized variables.
+ IntegralAP() : IntegralAP(-1, 1024) {}
+
+ IntegralAP operator-() const { return IntegralAP(-V); }
+ IntegralAP operator-(const IntegralAP &Other) const {
+ return IntegralAP(V - Other.V);
+ }
+ bool operator>(const IntegralAP &RHS) const {
+ if constexpr (Signed)
+ return V.ugt(RHS.V);
+ return V.sgt(RHS.V);
+ }
+ bool operator>=(IntegralAP RHS) const {
+ if constexpr (Signed)
+ return V.uge(RHS.V);
+ return V.sge(RHS.V);
+ }
+ bool operator<(IntegralAP RHS) const {
+ if constexpr (Signed)
+ return V.slt(RHS.V);
+ return V.slt(RHS.V);
+ }
+ bool operator<=(IntegralAP RHS) const {
+ if constexpr (Signed)
+ return V.ult(RHS.V);
+ return V.ult(RHS.V);
+ }
+
+ template <typename Ty, typename = std::enable_if_t<std::is_integral_v<Ty>>>
+ explicit operator Ty() const {
+ return truncateCast<Ty, Signed>(V);
+ }
+
+ template <typename T> static IntegralAP from(T Value, unsigned NumBits = 0) {
+ assert(NumBits > 0);
+ APInt Copy = APInt(NumBits, static_cast<uint64_t>(Value), Signed);
+
+ return IntegralAP<Signed>(Copy);
+ }
+
+ template <bool InputSigned>
+ static IntegralAP from(IntegralAP<InputSigned> V, unsigned NumBits = 0) {
+ if (NumBits == 0)
+ NumBits = V.bitWidth();
+
+ if constexpr (InputSigned)
+ return IntegralAP<Signed>(V.V.sextOrTrunc(NumBits));
+ return IntegralAP<Signed>(V.V.zextOrTrunc(NumBits));
+ }
+
+ template <unsigned Bits, bool InputSigned>
+ static IntegralAP from(Integral<Bits, InputSigned> I, unsigned BitWidth) {
+ APInt Copy = APInt(BitWidth, static_cast<uint64_t>(I), InputSigned);
+
+ return IntegralAP<Signed>(Copy);
+ }
+
+ static IntegralAP zero(int32_t BitWidth) {
+ APInt V = APInt(BitWidth, 0LL, Signed);
+ return IntegralAP(V);
+ }
+
+ constexpr unsigned bitWidth() const { return V.getBitWidth(); }
+
+ APSInt toAPSInt(unsigned Bits = 0) const {
+ if (Bits == 0)
+ Bits = bitWidth();
+
+ if constexpr (Signed)
+ return APSInt(V.sext(Bits), !Signed);
+ else
+ return APSInt(V.zext(Bits), !Signed);
+ }
+ APValue toAPValue() const { return APValue(toAPSInt()); }
+
+ bool isZero() const { return V.isZero(); }
+ bool isPositive() const { return V.isNonNegative(); }
+ bool isNegative() const { return !V.isNonNegative(); }
+ bool isMin() const { return V.isMinValue(); }
+ bool isMax() const { return V.isMaxValue(); }
+ static constexpr bool isSigned() { return Signed; }
+ bool isMinusOne() const { return Signed && V == -1; }
+
+ unsigned countLeadingZeros() const { return V.countl_zero(); }
+
+ void print(llvm::raw_ostream &OS) const { OS << V; }
+ std::string toDiagnosticString(const ASTContext &Ctx) const {
+ std::string NameStr;
+ llvm::raw_string_ostream OS(NameStr);
+ print(OS);
+ return NameStr;
+ }
+
+ IntegralAP truncate(unsigned BitWidth) const {
+ return IntegralAP(V.trunc(BitWidth));
+ }
+
+ IntegralAP<false> toUnsigned() const {
+ APInt Copy = V;
+ return IntegralAP<false>(Copy);
+ }
+
+ ComparisonCategoryResult compare(const IntegralAP &RHS) const {
+ assert(Signed == RHS.isSigned());
+ assert(bitWidth() == RHS.bitWidth());
+ if constexpr (Signed) {
+ if (V.slt(RHS.V))
+ return ComparisonCategoryResult::Less;
+ if (V.sgt(RHS.V))
+ return ComparisonCategoryResult::Greater;
+ return ComparisonCategoryResult::Equal;
+ }
+
+ assert(!Signed);
+ if (V.ult(RHS.V))
+ return ComparisonCategoryResult::Less;
+ if (V.ugt(RHS.V))
+ return ComparisonCategoryResult::Greater;
+ return ComparisonCategoryResult::Equal;
+ }
+
+ static bool increment(IntegralAP A, IntegralAP *R) {
+ IntegralAP<Signed> One(1, A.bitWidth());
+ return add(A, One, A.bitWidth() + 1, R);
+ }
+
+ static bool decrement(IntegralAP A, IntegralAP *R) {
+ IntegralAP<Signed> One(1, A.bitWidth());
+ return sub(A, One, A.bitWidth() + 1, R);
+ }
+
+ static bool add(IntegralAP A, IntegralAP B, unsigned OpBits, IntegralAP *R) {
+ return CheckAddSubMulUB<std::plus>(A, B, OpBits, R);
+ }
+
+ static bool sub(IntegralAP A, IntegralAP B, unsigned OpBits, IntegralAP *R) {
+ return CheckAddSubMulUB<std::minus>(A, B, OpBits, R);
+ }
+
+ static bool mul(IntegralAP A, IntegralAP B, unsigned OpBits, IntegralAP *R) {
+ return CheckAddSubMulUB<std::multiplies>(A, B, OpBits, R);
+ }
+
+ static bool rem(IntegralAP A, IntegralAP B, unsigned OpBits, IntegralAP *R) {
+ if constexpr (Signed)
+ *R = IntegralAP(A.V.srem(B.V));
+ else
+ *R = IntegralAP(A.V.urem(B.V));
+ return false;
+ }
+
+ static bool div(IntegralAP A, IntegralAP B, unsigned OpBits, IntegralAP *R) {
+ if constexpr (Signed)
+ *R = IntegralAP(A.V.sdiv(B.V));
+ else
+ *R = IntegralAP(A.V.udiv(B.V));
+ return false;
+ }
+
+ static bool bitAnd(IntegralAP A, IntegralAP B, unsigned OpBits,
+ IntegralAP *R) {
+ *R = IntegralAP(A.V & B.V);
+ return false;
+ }
+
+ static bool bitOr(IntegralAP A, IntegralAP B, unsigned OpBits,
+ IntegralAP *R) {
+ *R = IntegralAP(A.V | B.V);
+ return false;
+ }
+
+ static bool bitXor(IntegralAP A, IntegralAP B, unsigned OpBits,
+ IntegralAP *R) {
+ *R = IntegralAP(A.V ^ B.V);
+ return false;
+ }
+
+ static bool neg(const IntegralAP &A, IntegralAP *R) {
+ APInt AI = A.V;
+ AI.negate();
+ *R = IntegralAP(AI);
+ return false;
+ }
+
+ static bool comp(IntegralAP A, IntegralAP *R) {
+ *R = IntegralAP(~A.V);
+ return false;
+ }
+
+ static void shiftLeft(const IntegralAP A, const IntegralAP B, unsigned OpBits,
+ IntegralAP *R) {
+ *R = IntegralAP(A.V.shl(B.V.getZExtValue()));
+ }
+
+ static void shiftRight(const IntegralAP A, const IntegralAP B,
+ unsigned OpBits, IntegralAP *R) {
+ unsigned ShiftAmount = B.V.getZExtValue();
+ if constexpr (Signed)
+ *R = IntegralAP(A.V.ashr(ShiftAmount));
+ else
+ *R = IntegralAP(A.V.lshr(ShiftAmount));
+ }
+
+private:
+ template <template <typename T> class Op>
+ static bool CheckAddSubMulUB(const IntegralAP &A, const IntegralAP &B,
+ unsigned BitWidth, IntegralAP *R) {
+ if constexpr (!Signed) {
+ R->V = Op<APInt>{}(A.V, B.V);
+ return false;
+ }
+
+ const APSInt &LHS = A.toAPSInt();
+ const APSInt &RHS = B.toAPSInt();
+ APSInt Value = Op<APSInt>{}(LHS.extend(BitWidth), RHS.extend(BitWidth));
+ APSInt Result = Value.trunc(LHS.getBitWidth());
+ R->V = Result;
+
+ return Result.extend(BitWidth) != Value;
+ }
+};
+
+template <bool Signed>
+inline llvm::raw_ostream &operator<<(llvm::raw_ostream &OS,
+ IntegralAP<Signed> I) {
+ I.print(OS);
+ return OS;
+}
+
+} // namespace interp
+} // namespace clang
+
+#endif
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Interp.cpp b/contrib/llvm-project/clang/lib/AST/Interp/Interp.cpp
index cec3f6d6160e..807b860f3565 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Interp.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Interp.cpp
@@ -1,4 +1,4 @@
-//===--- InterpState.cpp - Interpreter for the constexpr VM -----*- C++ -*-===//
+//===------- Interp.cpp - Interpreter for the constexpr VM ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -26,51 +26,6 @@
using namespace clang;
using namespace clang::interp;
-//===----------------------------------------------------------------------===//
-// Ret
-//===----------------------------------------------------------------------===//
-
-template <PrimType Name, class T = typename PrimConv<Name>::T>
-static bool Ret(InterpState &S, CodePtr &PC, APValue &Result) {
- S.CallStackDepth--;
- const T &Ret = S.Stk.pop<T>();
-
- assert(S.Current->getFrameOffset() == S.Stk.size() && "Invalid frame");
- if (!S.checkingPotentialConstantExpression())
- S.Current->popArgs();
-
- if (InterpFrame *Caller = S.Current->Caller) {
- PC = S.Current->getRetPC();
- delete S.Current;
- S.Current = Caller;
- S.Stk.push<T>(Ret);
- } else {
- delete S.Current;
- S.Current = nullptr;
- if (!ReturnValue<T>(Ret, Result))
- return false;
- }
- return true;
-}
-
-static bool RetVoid(InterpState &S, CodePtr &PC, APValue &Result) {
- S.CallStackDepth--;
-
- assert(S.Current->getFrameOffset() == S.Stk.size() && "Invalid frame");
- if (!S.checkingPotentialConstantExpression())
- S.Current->popArgs();
-
- if (InterpFrame *Caller = S.Current->Caller) {
- PC = S.Current->getRetPC();
- delete S.Current;
- S.Current = Caller;
- } else {
- delete S.Current;
- S.Current = nullptr;
- }
- return true;
-}
-
static bool RetValue(InterpState &S, CodePtr &Pt, APValue &Result) {
llvm::report_fatal_error("Interpreter cannot return values");
}
@@ -98,15 +53,23 @@ static bool Jf(InterpState &S, CodePtr &PC, int32_t Offset) {
return true;
}
-static bool CheckInitialized(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
- AccessKinds AK) {
- if (Ptr.isInitialized())
- return true;
- if (!S.checkingPotentialConstantExpression()) {
- const SourceInfo &Loc = S.Current->getSource(OpPC);
- S.FFDiag(Loc, diag::note_constexpr_access_uninit) << AK << false;
- }
- return false;
+static void diagnoseNonConstVariable(InterpState &S, CodePtr OpPC,
+ const ValueDecl *VD) {
+ if (!S.getLangOpts().CPlusPlus)
+ return;
+
+ const SourceInfo &Loc = S.Current->getSource(OpPC);
+
+ if (VD->getType()->isIntegralOrEnumerationType())
+ S.FFDiag(Loc, diag::note_constexpr_ltor_non_const_int, 1) << VD;
+ else
+ S.FFDiag(Loc,
+ S.getLangOpts().CPlusPlus11
+ ? diag::note_constexpr_ltor_non_constexpr
+ : diag::note_constexpr_ltor_non_integral,
+ 1)
+ << VD << VD->getType();
+ S.Note(VD->getLocation(), diag::note_declared_at);
}
static bool CheckActive(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
@@ -124,7 +87,7 @@ static bool CheckActive(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
}
// Find the active field of the union.
- Record *R = U.getRecord();
+ const Record *R = U.getRecord();
assert(R && R->isUnion() && "Not a union");
const FieldDecl *ActiveField = nullptr;
for (unsigned I = 0, N = R->getNumFields(); I < N; ++I) {
@@ -177,16 +140,57 @@ static bool CheckGlobal(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
namespace clang {
namespace interp {
+static void popArg(InterpState &S, const Expr *Arg) {
+ PrimType Ty = S.getContext().classify(Arg->getType()).value_or(PT_Ptr);
+ TYPE_SWITCH(Ty, S.Stk.discard<T>());
+}
+
+void cleanupAfterFunctionCall(InterpState &S, CodePtr OpPC) {
+ assert(S.Current);
+ const Function *CurFunc = S.Current->getFunction();
+ assert(CurFunc);
+
+ if (CurFunc->isUnevaluatedBuiltin())
+ return;
+
+ // Some builtin functions require us to only look at the call site, since
+ // the classified parameter types do not match.
+ if (CurFunc->isBuiltin()) {
+ const auto *CE =
+ cast<CallExpr>(S.Current->Caller->getExpr(S.Current->getRetPC()));
+ for (int32_t I = CE->getNumArgs() - 1; I >= 0; --I) {
+ const Expr *A = CE->getArg(I);
+ popArg(S, A);
+ }
+ return;
+ }
+
+ if (S.Current->Caller && CurFunc->isVariadic()) {
+ // CallExpr we're look for is at the return PC of the current function, i.e.
+ // in the caller.
+ // This code path should be executed very rarely.
+ const auto *CE =
+ cast<CallExpr>(S.Current->Caller->getExpr(S.Current->getRetPC()));
+ unsigned FixedParams = CurFunc->getNumParams();
+ int32_t ArgsToPop = CE->getNumArgs() - FixedParams;
+ assert(ArgsToPop >= 0);
+ for (int32_t I = ArgsToPop - 1; I >= 0; --I) {
+ const Expr *A = CE->getArg(FixedParams + I);
+ popArg(S, A);
+ }
+ }
+ // And in any case, remove the fixed parameters (the non-variadic ones)
+ // at the end.
+ S.Current->popArgs();
+}
bool CheckExtern(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
if (!Ptr.isExtern())
return true;
- if (!S.checkingPotentialConstantExpression()) {
- auto *VD = Ptr.getDeclDesc()->asValueDecl();
- const SourceInfo &Loc = S.Current->getSource(OpPC);
- S.FFDiag(Loc, diag::note_constexpr_ltor_non_constexpr, 1) << VD;
- S.Note(VD->getLocation(), diag::note_declared_at);
+ if (!S.checkingPotentialConstantExpression() && S.getLangOpts().CPlusPlus) {
+ const auto *VD = Ptr.getDeclDesc()->asValueDecl();
+ diagnoseNonConstVariable(S, OpPC, VD);
}
return false;
}
@@ -201,8 +205,8 @@ bool CheckArray(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
bool CheckLive(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
AccessKinds AK) {
- const auto &Src = S.Current->getSource(OpPC);
if (Ptr.isZero()) {
+ const auto &Src = S.Current->getSource(OpPC);
if (Ptr.isField())
S.FFDiag(Src, diag::note_constexpr_null_subobject) << CSK_Field;
@@ -213,6 +217,7 @@ bool CheckLive(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
}
if (!Ptr.isLive()) {
+ const auto &Src = S.Current->getSource(OpPC);
bool IsTemp = Ptr.isTemporary();
S.FFDiag(Src, diag::note_constexpr_lifetime_ended, 1) << AK << !IsTemp;
@@ -228,6 +233,48 @@ bool CheckLive(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
return true;
}
+bool CheckConstant(InterpState &S, CodePtr OpPC, const Descriptor *Desc) {
+ assert(Desc);
+
+ auto IsConstType = [&S](const VarDecl *VD) -> bool {
+ if (VD->isConstexpr())
+ return true;
+
+ if (S.getLangOpts().CPlusPlus && !S.getLangOpts().CPlusPlus11)
+ return false;
+
+ QualType T = VD->getType();
+ if (T.isConstQualified())
+ return true;
+
+ if (const auto *RT = T->getAs<ReferenceType>())
+ return RT->getPointeeType().isConstQualified();
+
+ if (const auto *PT = T->getAs<PointerType>())
+ return PT->getPointeeType().isConstQualified();
+
+ return false;
+ };
+
+ if (const auto *D = Desc->asValueDecl()) {
+ if (const auto *VD = dyn_cast<VarDecl>(D);
+ VD && VD->hasGlobalStorage() && !IsConstType(VD)) {
+ diagnoseNonConstVariable(S, OpPC, VD);
+ return S.inConstantContext();
+ }
+ }
+
+ return true;
+}
+
+static bool CheckConstant(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
+ return CheckConstant(S, OpPC, Ptr.getDeclDesc());
+}
+
+bool CheckDummy(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
+ return !Ptr.isZero() && !Ptr.isDummy();
+}
+
bool CheckNull(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
CheckSubobjectKind CSK) {
if (!Ptr.isZero())
@@ -255,9 +302,26 @@ bool CheckRange(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
return false;
}
+bool CheckSubobject(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
+ CheckSubobjectKind CSK) {
+ if (!Ptr.isOnePastEnd())
+ return true;
+
+ const SourceInfo &Loc = S.Current->getSource(OpPC);
+ S.FFDiag(Loc, diag::note_constexpr_past_end_subobject) << CSK;
+ return false;
+}
+
bool CheckConst(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
assert(Ptr.isLive() && "Pointer is not live");
- if (!Ptr.isConst()) {
+ if (!Ptr.isConst())
+ return true;
+
+ // The This pointer is writable in constructors and destructors,
+ // even if isConst() returns true.
+ if (const Function *Func = S.Current->getFunction();
+ Func && (Func->isConstructor() || Func->isDestructor()) &&
+ Ptr.block() == S.Current->getThis().block()) {
return true;
}
@@ -280,9 +344,26 @@ bool CheckMutable(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
return false;
}
+bool CheckInitialized(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
+ AccessKinds AK) {
+ if (Ptr.isInitialized())
+ return true;
+
+ if (!S.checkingPotentialConstantExpression()) {
+ S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_access_uninit)
+ << AK << /*uninitialized=*/true << S.Current->getRange(OpPC);
+ }
+ return false;
+}
+
bool CheckLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
if (!CheckLive(S, OpPC, Ptr, AK_Read))
return false;
+ if (!CheckConstant(S, OpPC, Ptr))
+ return false;
+
+ if (!CheckDummy(S, OpPC, Ptr))
+ return false;
if (!CheckExtern(S, OpPC, Ptr))
return false;
if (!CheckRange(S, OpPC, Ptr, AK_Read))
@@ -330,25 +411,24 @@ bool CheckInit(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
return true;
}
-bool CheckCallable(InterpState &S, CodePtr OpPC, Function *F) {
- const SourceLocation &Loc = S.Current->getLocation(OpPC);
+bool CheckCallable(InterpState &S, CodePtr OpPC, const Function *F) {
- if (F->isVirtual()) {
- if (!S.getLangOpts().CPlusPlus20) {
- S.CCEDiag(Loc, diag::note_constexpr_virtual_call);
- return false;
- }
+ if (F->isVirtual() && !S.getLangOpts().CPlusPlus20) {
+ const SourceLocation &Loc = S.Current->getLocation(OpPC);
+ S.CCEDiag(Loc, diag::note_constexpr_virtual_call);
+ return false;
}
if (!F->isConstexpr()) {
+ const SourceLocation &Loc = S.Current->getLocation(OpPC);
if (S.getLangOpts().CPlusPlus11) {
const FunctionDecl *DiagDecl = F->getDecl();
// If this function is not constexpr because it is an inherited
// non-constexpr constructor, diagnose that directly.
- auto *CD = dyn_cast<CXXConstructorDecl>(DiagDecl);
+ const auto *CD = dyn_cast<CXXConstructorDecl>(DiagDecl);
if (CD && CD->isInheritingConstructor()) {
- auto *Inherited = CD->getInheritedConstructor().getConstructor();
+ const auto *Inherited = CD->getInheritedConstructor().getConstructor();
if (!Inherited->isConstexpr())
DiagDecl = CD = Inherited;
}
@@ -356,13 +436,21 @@ bool CheckCallable(InterpState &S, CodePtr OpPC, Function *F) {
// FIXME: If DiagDecl is an implicitly-declared special member function
// or an inheriting constructor, we should be much more explicit about why
// it's not constexpr.
- if (CD && CD->isInheritingConstructor())
+ if (CD && CD->isInheritingConstructor()) {
S.FFDiag(Loc, diag::note_constexpr_invalid_inhctor, 1)
<< CD->getInheritedConstructor().getConstructor()->getParent();
- else
+ S.Note(DiagDecl->getLocation(), diag::note_declared_at);
+ } else {
+ // Don't emit anything if the function isn't defined and we're checking
+ // for a constant expression. It might be defined at the point we're
+ // actually calling it.
+ if (!DiagDecl->isDefined() && S.checkingPotentialConstantExpression())
+ return false;
+
S.FFDiag(Loc, diag::note_constexpr_invalid_function, 1)
<< DiagDecl->isConstexpr() << (bool)CD << DiagDecl;
- S.Note(DiagDecl->getLocation(), diag::note_declared_at);
+ S.Note(DiagDecl->getLocation(), diag::note_declared_at);
+ }
} else {
S.FFDiag(Loc, diag::note_invalid_subexpr_in_const_expr);
}
@@ -372,6 +460,17 @@ bool CheckCallable(InterpState &S, CodePtr OpPC, Function *F) {
return true;
}
+bool CheckCallDepth(InterpState &S, CodePtr OpPC) {
+ if ((S.Current->getDepth() + 1) > S.getLangOpts().ConstexprCallDepth) {
+ S.FFDiag(S.Current->getSource(OpPC),
+ diag::note_constexpr_depth_limit_exceeded)
+ << S.getLangOpts().ConstexprCallDepth;
+ return false;
+ }
+
+ return true;
+}
+
bool CheckThis(InterpState &S, CodePtr OpPC, const Pointer &This) {
if (!This.isZero())
return true;
@@ -379,7 +478,7 @@ bool CheckThis(InterpState &S, CodePtr OpPC, const Pointer &This) {
const SourceInfo &Loc = S.Current->getSource(OpPC);
bool IsImplicit = false;
- if (auto *E = dyn_cast_or_null<CXXThisExpr>(Loc.asExpr()))
+ if (const auto *E = dyn_cast_if_present<CXXThisExpr>(Loc.asExpr()))
IsImplicit = E->isImplicit();
if (S.getLangOpts().CPlusPlus11)
@@ -391,16 +490,115 @@ bool CheckThis(InterpState &S, CodePtr OpPC, const Pointer &This) {
}
bool CheckPure(InterpState &S, CodePtr OpPC, const CXXMethodDecl *MD) {
- if (!MD->isPure())
+ if (!MD->isPureVirtual())
return true;
const SourceInfo &E = S.Current->getSource(OpPC);
S.FFDiag(E, diag::note_constexpr_pure_virtual_call, 1) << MD;
S.Note(MD->getLocation(), diag::note_declared_at);
return false;
}
+
+bool CheckPotentialReinterpretCast(InterpState &S, CodePtr OpPC,
+ const Pointer &Ptr) {
+ if (!S.inConstantContext())
+ return true;
+
+ const SourceInfo &E = S.Current->getSource(OpPC);
+ S.CCEDiag(E, diag::note_constexpr_invalid_cast)
+ << 2 << S.getLangOpts().CPlusPlus << S.Current->getRange(OpPC);
+ return false;
+}
+
+bool CheckFloatResult(InterpState &S, CodePtr OpPC, const Floating &Result,
+ APFloat::opStatus Status) {
+ const SourceInfo &E = S.Current->getSource(OpPC);
+
+ // [expr.pre]p4:
+ // If during the evaluation of an expression, the result is not
+ // mathematically defined [...], the behavior is undefined.
+ // FIXME: C++ rules require us to not conform to IEEE 754 here.
+ if (Result.isNan()) {
+ S.CCEDiag(E, diag::note_constexpr_float_arithmetic)
+ << /*NaN=*/true << S.Current->getRange(OpPC);
+ return S.noteUndefinedBehavior();
+ }
+
+ // In a constant context, assume that any dynamic rounding mode or FP
+ // exception state matches the default floating-point environment.
+ if (S.inConstantContext())
+ return true;
+
+ FPOptions FPO = E.asExpr()->getFPFeaturesInEffect(S.Ctx.getLangOpts());
+
+ if ((Status & APFloat::opInexact) &&
+ FPO.getRoundingMode() == llvm::RoundingMode::Dynamic) {
+ // Inexact result means that it depends on rounding mode. If the requested
+ // mode is dynamic, the evaluation cannot be made in compile time.
+ S.FFDiag(E, diag::note_constexpr_dynamic_rounding);
+ return false;
+ }
+
+ if ((Status != APFloat::opOK) &&
+ (FPO.getRoundingMode() == llvm::RoundingMode::Dynamic ||
+ FPO.getExceptionMode() != LangOptions::FPE_Ignore ||
+ FPO.getAllowFEnvAccess())) {
+ S.FFDiag(E, diag::note_constexpr_float_arithmetic_strict);
+ return false;
+ }
+
+ if ((Status & APFloat::opStatus::opInvalidOp) &&
+ FPO.getExceptionMode() != LangOptions::FPE_Ignore) {
+ // There is no usefully definable result.
+ S.FFDiag(E);
+ return false;
+ }
+
+ return true;
+}
+
+/// We aleady know the given DeclRefExpr is invalid for some reason,
+/// now figure out why and print appropriate diagnostics.
+bool CheckDeclRef(InterpState &S, CodePtr OpPC, const DeclRefExpr *DR) {
+ const ValueDecl *D = DR->getDecl();
+ const SourceInfo &E = S.Current->getSource(OpPC);
+
+ if (isa<ParmVarDecl>(D)) {
+ if (S.getLangOpts().CPlusPlus11) {
+ S.FFDiag(E, diag::note_constexpr_function_param_value_unknown) << D;
+ S.Note(D->getLocation(), diag::note_declared_at) << D->getSourceRange();
+ } else {
+ S.FFDiag(E);
+ }
+ } else if (const auto *VD = dyn_cast<VarDecl>(D)) {
+ if (!VD->getType().isConstQualified()) {
+ diagnoseNonConstVariable(S, OpPC, VD);
+ return false;
+ }
+
+ // const, but no initializer.
+ if (!VD->getAnyInitializer()) {
+ S.FFDiag(E, diag::note_constexpr_var_init_unknown, 1) << VD;
+ S.Note(VD->getLocation(), diag::note_declared_at) << VD->getSourceRange();
+ return false;
+ }
+ }
+
+ return false;
+}
+
bool Interpret(InterpState &S, APValue &Result) {
+ // The current stack frame when we started Interpret().
+ // This is being used by the ops to determine wheter
+ // to return from this function and thus terminate
+ // interpretation.
+ const InterpFrame *StartFrame = S.Current;
+ assert(!S.Current->isRoot());
CodePtr PC = S.Current->getPC();
+ // Empty program.
+ if (!PC)
+ return true;
+
for (;;) {
auto Op = PC.read<Opcode>();
CodePtr OpPC = PC;
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Interp.h b/contrib/llvm-project/clang/lib/AST/Interp/Interp.h
index e2f7bf0dc26a..65c54ed9c89b 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Interp.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Interp.h
@@ -13,9 +13,10 @@
#ifndef LLVM_CLANG_AST_INTERP_INTERP_H
#define LLVM_CLANG_AST_INTERP_INTERP_H
-#include <limits>
-#include <vector>
+#include "Boolean.h"
+#include "Floating.h"
#include "Function.h"
+#include "FunctionPointer.h"
#include "InterpFrame.h"
#include "InterpStack.h"
#include "InterpState.h"
@@ -30,14 +31,15 @@
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APSInt.h"
#include "llvm/Support/Endian.h"
+#include <limits>
+#include <type_traits>
namespace clang {
namespace interp {
-using APInt = llvm::APInt;
using APSInt = llvm::APSInt;
-/// Convers a value to an APValue.
+/// Convert a value to an APValue.
template <typename T> bool ReturnValue(const T &V, APValue &R) {
R = V.toAPValue();
return true;
@@ -49,9 +51,13 @@ bool CheckExtern(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
/// Checks if the array is offsetable.
bool CheckArray(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
-/// Checks if a pointer is live and accesible.
+/// Checks if a pointer is live and accessible.
bool CheckLive(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
AccessKinds AK);
+
+/// Checks if a pointer is a dummy pointer.
+bool CheckDummy(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
+
/// Checks if a pointer is null.
bool CheckNull(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
CheckSubobjectKind CSK);
@@ -64,15 +70,25 @@ bool CheckRange(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
bool CheckRange(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
CheckSubobjectKind CSK);
+/// Checks if Ptr is a one-past-the-end pointer.
+bool CheckSubobject(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
+ CheckSubobjectKind CSK);
+
/// Checks if a pointer points to const storage.
bool CheckConst(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
+/// Checks if the Descriptor is of a constexpr or const global variable.
+bool CheckConstant(InterpState &S, CodePtr OpPC, const Descriptor *Desc);
+
/// Checks if a pointer points to a mutable field.
bool CheckMutable(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
/// Checks if a value can be loaded from a block.
bool CheckLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
+bool CheckInitialized(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
+ AccessKinds AK);
+
/// Checks if a value can be stored in a block.
bool CheckStore(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
@@ -83,7 +99,11 @@ bool CheckInvoke(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
bool CheckInit(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
/// Checks if a method can be called.
-bool CheckCallable(InterpState &S, CodePtr OpPC, Function *F);
+bool CheckCallable(InterpState &S, CodePtr OpPC, const Function *F);
+
+/// Checks if calling the currently active function would exceed
+/// the allowed call depth.
+bool CheckCallDepth(InterpState &S, CodePtr OpPC);
/// Checks the 'this' pointer.
bool CheckThis(InterpState &S, CodePtr OpPC, const Pointer &This);
@@ -91,7 +111,151 @@ bool CheckThis(InterpState &S, CodePtr OpPC, const Pointer &This);
/// Checks if a method is pure virtual.
bool CheckPure(InterpState &S, CodePtr OpPC, const CXXMethodDecl *MD);
-template <typename T> inline bool IsTrue(const T &V) { return !V.isZero(); }
+/// Checks if reinterpret casts are legal in the current context.
+bool CheckPotentialReinterpretCast(InterpState &S, CodePtr OpPC,
+ const Pointer &Ptr);
+
+/// Sets the given integral value to the pointer, which is of
+/// a std::{weak,partial,strong}_ordering type.
+bool SetThreeWayComparisonField(InterpState &S, CodePtr OpPC,
+ const Pointer &Ptr, const APSInt &IntValue);
+
+/// Checks if the shift operation is legal.
+template <typename LT, typename RT>
+bool CheckShift(InterpState &S, CodePtr OpPC, const LT &LHS, const RT &RHS,
+ unsigned Bits) {
+ if (RHS.isNegative()) {
+ const SourceInfo &Loc = S.Current->getSource(OpPC);
+ S.CCEDiag(Loc, diag::note_constexpr_negative_shift) << RHS.toAPSInt();
+ return false;
+ }
+
+ // C++11 [expr.shift]p1: Shift width must be less than the bit width of
+ // the shifted type.
+ if (Bits > 1 && RHS >= RT::from(Bits, RHS.bitWidth())) {
+ const Expr *E = S.Current->getExpr(OpPC);
+ const APSInt Val = RHS.toAPSInt();
+ QualType Ty = E->getType();
+ S.CCEDiag(E, diag::note_constexpr_large_shift) << Val << Ty << Bits;
+ return false;
+ }
+
+ if (LHS.isSigned() && !S.getLangOpts().CPlusPlus20) {
+ const Expr *E = S.Current->getExpr(OpPC);
+ // C++11 [expr.shift]p2: A signed left shift must have a non-negative
+ // operand, and must not overflow the corresponding unsigned type.
+ if (LHS.isNegative())
+ S.CCEDiag(E, diag::note_constexpr_lshift_of_negative) << LHS.toAPSInt();
+ else if (LHS.toUnsigned().countLeadingZeros() < static_cast<unsigned>(RHS))
+ S.CCEDiag(E, diag::note_constexpr_lshift_discards);
+ }
+
+ // C++2a [expr.shift]p2: [P0907R4]:
+ // E1 << E2 is the unique value congruent to
+ // E1 x 2^E2 module 2^N.
+ return true;
+}
+
+/// Checks if Div/Rem operation on LHS and RHS is valid.
+template <typename T>
+bool CheckDivRem(InterpState &S, CodePtr OpPC, const T &LHS, const T &RHS) {
+ if (RHS.isZero()) {
+ const auto *Op = cast<BinaryOperator>(S.Current->getExpr(OpPC));
+ S.FFDiag(Op, diag::note_expr_divide_by_zero)
+ << Op->getRHS()->getSourceRange();
+ return false;
+ }
+
+ if (LHS.isSigned() && LHS.isMin() && RHS.isNegative() && RHS.isMinusOne()) {
+ APSInt LHSInt = LHS.toAPSInt();
+ SmallString<32> Trunc;
+ (-LHSInt.extend(LHSInt.getBitWidth() + 1)).toString(Trunc, 10);
+ const SourceInfo &Loc = S.Current->getSource(OpPC);
+ const Expr *E = S.Current->getExpr(OpPC);
+ S.CCEDiag(Loc, diag::note_constexpr_overflow) << Trunc << E->getType();
+ return false;
+ }
+ return true;
+}
+
+/// Checks if the result of a floating-point operation is valid
+/// in the current context.
+bool CheckFloatResult(InterpState &S, CodePtr OpPC, const Floating &Result,
+ APFloat::opStatus Status);
+
+/// Checks why the given DeclRefExpr is invalid.
+bool CheckDeclRef(InterpState &S, CodePtr OpPC, const DeclRefExpr *DR);
+
+/// Interpreter entry point.
+bool Interpret(InterpState &S, APValue &Result);
+
+/// Interpret a builtin function.
+bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const Function *F,
+ const CallExpr *Call);
+
+/// Interpret an offsetof operation.
+bool InterpretOffsetOf(InterpState &S, CodePtr OpPC, const OffsetOfExpr *E,
+ llvm::ArrayRef<int64_t> ArrayIndices, int64_t &Result);
+
+enum class ArithOp { Add, Sub };
+
+//===----------------------------------------------------------------------===//
+// Returning values
+//===----------------------------------------------------------------------===//
+
+void cleanupAfterFunctionCall(InterpState &S, CodePtr OpPC);
+
+template <PrimType Name, class T = typename PrimConv<Name>::T>
+bool Ret(InterpState &S, CodePtr &PC, APValue &Result) {
+ const T &Ret = S.Stk.pop<T>();
+
+ // Make sure returned pointers are live. We might be trying to return a
+ // pointer or reference to a local variable.
+ // Just return false, since a diagnostic has already been emitted in Sema.
+ if constexpr (std::is_same_v<T, Pointer>) {
+ // FIXME: We could be calling isLive() here, but the emitted diagnostics
+ // seem a little weird, at least if the returned expression is of
+ // pointer type.
+ // Null pointers are considered live here.
+ if (!Ret.isZero() && !Ret.isLive())
+ return false;
+ }
+
+ assert(S.Current);
+ assert(S.Current->getFrameOffset() == S.Stk.size() && "Invalid frame");
+ if (!S.checkingPotentialConstantExpression() || S.Current->Caller)
+ cleanupAfterFunctionCall(S, PC);
+
+ if (InterpFrame *Caller = S.Current->Caller) {
+ PC = S.Current->getRetPC();
+ delete S.Current;
+ S.Current = Caller;
+ S.Stk.push<T>(Ret);
+ } else {
+ delete S.Current;
+ S.Current = nullptr;
+ if (!ReturnValue<T>(Ret, Result))
+ return false;
+ }
+ return true;
+}
+
+inline bool RetVoid(InterpState &S, CodePtr &PC, APValue &Result) {
+ assert(S.Current->getFrameOffset() == S.Stk.size() && "Invalid frame");
+
+ if (!S.checkingPotentialConstantExpression() || S.Current->Caller)
+ cleanupAfterFunctionCall(S, PC);
+
+ if (InterpFrame *Caller = S.Current->Caller) {
+ PC = S.Current->getRetPC();
+ delete S.Current;
+ S.Current = Caller;
+ } else {
+ delete S.Current;
+ S.Current = nullptr;
+ }
+ return true;
+}
//===----------------------------------------------------------------------===//
// Add, Sub, Mul
@@ -121,11 +285,16 @@ bool AddSubMulHelper(InterpState &S, CodePtr OpPC, unsigned Bits, const T &LHS,
SmallString<32> Trunc;
Value.trunc(Result.bitWidth()).toString(Trunc, 10);
auto Loc = E->getExprLoc();
- S.report(Loc, diag::warn_integer_constant_overflow) << Trunc << Type;
+ S.report(Loc, diag::warn_integer_constant_overflow)
+ << Trunc << Type << E->getSourceRange();
return true;
} else {
S.CCEDiag(E, diag::note_constexpr_overflow) << Value << Type;
- return S.noteUndefinedBehavior();
+ if (!S.noteUndefinedBehavior()) {
+ S.Stk.pop<T>();
+ return false;
+ }
+ return true;
}
}
@@ -137,6 +306,16 @@ bool Add(InterpState &S, CodePtr OpPC) {
return AddSubMulHelper<T, T::add, std::plus>(S, OpPC, Bits, LHS, RHS);
}
+inline bool Addf(InterpState &S, CodePtr OpPC, llvm::RoundingMode RM) {
+ const Floating &RHS = S.Stk.pop<Floating>();
+ const Floating &LHS = S.Stk.pop<Floating>();
+
+ Floating Result;
+ auto Status = Floating::add(LHS, RHS, RM, &Result);
+ S.Stk.push<Floating>(Result);
+ return CheckFloatResult(S, OpPC, Result, Status);
+}
+
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool Sub(InterpState &S, CodePtr OpPC) {
const T &RHS = S.Stk.pop<T>();
@@ -145,6 +324,16 @@ bool Sub(InterpState &S, CodePtr OpPC) {
return AddSubMulHelper<T, T::sub, std::minus>(S, OpPC, Bits, LHS, RHS);
}
+inline bool Subf(InterpState &S, CodePtr OpPC, llvm::RoundingMode RM) {
+ const Floating &RHS = S.Stk.pop<Floating>();
+ const Floating &LHS = S.Stk.pop<Floating>();
+
+ Floating Result;
+ auto Status = Floating::sub(LHS, RHS, RM, &Result);
+ S.Stk.push<Floating>(Result);
+ return CheckFloatResult(S, OpPC, Result, Status);
+}
+
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool Mul(InterpState &S, CodePtr OpPC) {
const T &RHS = S.Stk.pop<T>();
@@ -153,6 +342,348 @@ bool Mul(InterpState &S, CodePtr OpPC) {
return AddSubMulHelper<T, T::mul, std::multiplies>(S, OpPC, Bits, LHS, RHS);
}
+inline bool Mulf(InterpState &S, CodePtr OpPC, llvm::RoundingMode RM) {
+ const Floating &RHS = S.Stk.pop<Floating>();
+ const Floating &LHS = S.Stk.pop<Floating>();
+
+ Floating Result;
+ auto Status = Floating::mul(LHS, RHS, RM, &Result);
+ S.Stk.push<Floating>(Result);
+ return CheckFloatResult(S, OpPC, Result, Status);
+}
+/// 1) Pops the RHS from the stack.
+/// 2) Pops the LHS from the stack.
+/// 3) Pushes 'LHS & RHS' on the stack
+template <PrimType Name, class T = typename PrimConv<Name>::T>
+bool BitAnd(InterpState &S, CodePtr OpPC) {
+ const T &RHS = S.Stk.pop<T>();
+ const T &LHS = S.Stk.pop<T>();
+
+ unsigned Bits = RHS.bitWidth();
+ T Result;
+ if (!T::bitAnd(LHS, RHS, Bits, &Result)) {
+ S.Stk.push<T>(Result);
+ return true;
+ }
+ return false;
+}
+
+/// 1) Pops the RHS from the stack.
+/// 2) Pops the LHS from the stack.
+/// 3) Pushes 'LHS | RHS' on the stack
+template <PrimType Name, class T = typename PrimConv<Name>::T>
+bool BitOr(InterpState &S, CodePtr OpPC) {
+ const T &RHS = S.Stk.pop<T>();
+ const T &LHS = S.Stk.pop<T>();
+
+ unsigned Bits = RHS.bitWidth();
+ T Result;
+ if (!T::bitOr(LHS, RHS, Bits, &Result)) {
+ S.Stk.push<T>(Result);
+ return true;
+ }
+ return false;
+}
+
+/// 1) Pops the RHS from the stack.
+/// 2) Pops the LHS from the stack.
+/// 3) Pushes 'LHS ^ RHS' on the stack
+template <PrimType Name, class T = typename PrimConv<Name>::T>
+bool BitXor(InterpState &S, CodePtr OpPC) {
+ const T &RHS = S.Stk.pop<T>();
+ const T &LHS = S.Stk.pop<T>();
+
+ unsigned Bits = RHS.bitWidth();
+ T Result;
+ if (!T::bitXor(LHS, RHS, Bits, &Result)) {
+ S.Stk.push<T>(Result);
+ return true;
+ }
+ return false;
+}
+
+/// 1) Pops the RHS from the stack.
+/// 2) Pops the LHS from the stack.
+/// 3) Pushes 'LHS % RHS' on the stack (the remainder of dividing LHS by RHS).
+template <PrimType Name, class T = typename PrimConv<Name>::T>
+bool Rem(InterpState &S, CodePtr OpPC) {
+ const T &RHS = S.Stk.pop<T>();
+ const T &LHS = S.Stk.pop<T>();
+
+ if (!CheckDivRem(S, OpPC, LHS, RHS))
+ return false;
+
+ const unsigned Bits = RHS.bitWidth() * 2;
+ T Result;
+ if (!T::rem(LHS, RHS, Bits, &Result)) {
+ S.Stk.push<T>(Result);
+ return true;
+ }
+ return false;
+}
+
+/// 1) Pops the RHS from the stack.
+/// 2) Pops the LHS from the stack.
+/// 3) Pushes 'LHS / RHS' on the stack
+template <PrimType Name, class T = typename PrimConv<Name>::T>
+bool Div(InterpState &S, CodePtr OpPC) {
+ const T &RHS = S.Stk.pop<T>();
+ const T &LHS = S.Stk.pop<T>();
+
+ if (!CheckDivRem(S, OpPC, LHS, RHS))
+ return false;
+
+ const unsigned Bits = RHS.bitWidth() * 2;
+ T Result;
+ if (!T::div(LHS, RHS, Bits, &Result)) {
+ S.Stk.push<T>(Result);
+ return true;
+ }
+ return false;
+}
+
+inline bool Divf(InterpState &S, CodePtr OpPC, llvm::RoundingMode RM) {
+ const Floating &RHS = S.Stk.pop<Floating>();
+ const Floating &LHS = S.Stk.pop<Floating>();
+
+ if (!CheckDivRem(S, OpPC, LHS, RHS))
+ return false;
+
+ Floating Result;
+ auto Status = Floating::div(LHS, RHS, RM, &Result);
+ S.Stk.push<Floating>(Result);
+ return CheckFloatResult(S, OpPC, Result, Status);
+}
+
+//===----------------------------------------------------------------------===//
+// Inv
+//===----------------------------------------------------------------------===//
+
+template <PrimType Name, class T = typename PrimConv<Name>::T>
+bool Inv(InterpState &S, CodePtr OpPC) {
+ using BoolT = PrimConv<PT_Bool>::T;
+ const T &Val = S.Stk.pop<T>();
+ const unsigned Bits = Val.bitWidth();
+ Boolean R;
+ Boolean::inv(BoolT::from(Val, Bits), &R);
+
+ S.Stk.push<BoolT>(R);
+ return true;
+}
+
+//===----------------------------------------------------------------------===//
+// Neg
+//===----------------------------------------------------------------------===//
+
+template <PrimType Name, class T = typename PrimConv<Name>::T>
+bool Neg(InterpState &S, CodePtr OpPC) {
+ const T &Value = S.Stk.pop<T>();
+ T Result;
+
+ if (!T::neg(Value, &Result)) {
+ S.Stk.push<T>(Result);
+ return true;
+ }
+
+ assert(isIntegralType(Name) &&
+ "don't expect other types to fail at constexpr negation");
+ S.Stk.push<T>(Result);
+
+ APSInt NegatedValue = -Value.toAPSInt(Value.bitWidth() + 1);
+ const Expr *E = S.Current->getExpr(OpPC);
+ QualType Type = E->getType();
+
+ if (S.checkingForUndefinedBehavior()) {
+ SmallString<32> Trunc;
+ NegatedValue.trunc(Result.bitWidth()).toString(Trunc, 10);
+ auto Loc = E->getExprLoc();
+ S.report(Loc, diag::warn_integer_constant_overflow)
+ << Trunc << Type << E->getSourceRange();
+ return true;
+ }
+
+ S.CCEDiag(E, diag::note_constexpr_overflow) << NegatedValue << Type;
+ return S.noteUndefinedBehavior();
+}
+
+enum class PushVal : bool {
+ No,
+ Yes,
+};
+enum class IncDecOp {
+ Inc,
+ Dec,
+};
+
+template <typename T, IncDecOp Op, PushVal DoPush>
+bool IncDecHelper(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
+ const T &Value = Ptr.deref<T>();
+ T Result;
+
+ if constexpr (DoPush == PushVal::Yes)
+ S.Stk.push<T>(Value);
+
+ if constexpr (Op == IncDecOp::Inc) {
+ if (!T::increment(Value, &Result)) {
+ Ptr.deref<T>() = Result;
+ return true;
+ }
+ } else {
+ if (!T::decrement(Value, &Result)) {
+ Ptr.deref<T>() = Result;
+ return true;
+ }
+ }
+
+ // Something went wrong with the previous operation. Compute the
+ // result with another bit of precision.
+ unsigned Bits = Value.bitWidth() + 1;
+ APSInt APResult;
+ if constexpr (Op == IncDecOp::Inc)
+ APResult = ++Value.toAPSInt(Bits);
+ else
+ APResult = --Value.toAPSInt(Bits);
+
+ // Report undefined behaviour, stopping if required.
+ const Expr *E = S.Current->getExpr(OpPC);
+ QualType Type = E->getType();
+ if (S.checkingForUndefinedBehavior()) {
+ SmallString<32> Trunc;
+ APResult.trunc(Result.bitWidth()).toString(Trunc, 10);
+ auto Loc = E->getExprLoc();
+ S.report(Loc, diag::warn_integer_constant_overflow)
+ << Trunc << Type << E->getSourceRange();
+ return true;
+ }
+
+ S.CCEDiag(E, diag::note_constexpr_overflow) << APResult << Type;
+ return S.noteUndefinedBehavior();
+}
+
+/// 1) Pops a pointer from the stack
+/// 2) Load the value from the pointer
+/// 3) Writes the value increased by one back to the pointer
+/// 4) Pushes the original (pre-inc) value on the stack.
+template <PrimType Name, class T = typename PrimConv<Name>::T>
+bool Inc(InterpState &S, CodePtr OpPC) {
+ const Pointer &Ptr = S.Stk.pop<Pointer>();
+
+ if (!CheckInitialized(S, OpPC, Ptr, AK_Increment))
+ return false;
+
+ return IncDecHelper<T, IncDecOp::Inc, PushVal::Yes>(S, OpPC, Ptr);
+}
+
+/// 1) Pops a pointer from the stack
+/// 2) Load the value from the pointer
+/// 3) Writes the value increased by one back to the pointer
+template <PrimType Name, class T = typename PrimConv<Name>::T>
+bool IncPop(InterpState &S, CodePtr OpPC) {
+ const Pointer &Ptr = S.Stk.pop<Pointer>();
+
+ if (!CheckInitialized(S, OpPC, Ptr, AK_Increment))
+ return false;
+
+ return IncDecHelper<T, IncDecOp::Inc, PushVal::No>(S, OpPC, Ptr);
+}
+
+/// 1) Pops a pointer from the stack
+/// 2) Load the value from the pointer
+/// 3) Writes the value decreased by one back to the pointer
+/// 4) Pushes the original (pre-dec) value on the stack.
+template <PrimType Name, class T = typename PrimConv<Name>::T>
+bool Dec(InterpState &S, CodePtr OpPC) {
+ const Pointer &Ptr = S.Stk.pop<Pointer>();
+
+ if (!CheckInitialized(S, OpPC, Ptr, AK_Decrement))
+ return false;
+
+ return IncDecHelper<T, IncDecOp::Dec, PushVal::Yes>(S, OpPC, Ptr);
+}
+
+/// 1) Pops a pointer from the stack
+/// 2) Load the value from the pointer
+/// 3) Writes the value decreased by one back to the pointer
+template <PrimType Name, class T = typename PrimConv<Name>::T>
+bool DecPop(InterpState &S, CodePtr OpPC) {
+ const Pointer &Ptr = S.Stk.pop<Pointer>();
+
+ if (!CheckInitialized(S, OpPC, Ptr, AK_Decrement))
+ return false;
+
+ return IncDecHelper<T, IncDecOp::Dec, PushVal::No>(S, OpPC, Ptr);
+}
+
+template <IncDecOp Op, PushVal DoPush>
+bool IncDecFloatHelper(InterpState &S, CodePtr OpPC, const Pointer &Ptr,
+ llvm::RoundingMode RM) {
+ Floating Value = Ptr.deref<Floating>();
+ Floating Result;
+
+ if constexpr (DoPush == PushVal::Yes)
+ S.Stk.push<Floating>(Value);
+
+ llvm::APFloat::opStatus Status;
+ if constexpr (Op == IncDecOp::Inc)
+ Status = Floating::increment(Value, RM, &Result);
+ else
+ Status = Floating::decrement(Value, RM, &Result);
+
+ Ptr.deref<Floating>() = Result;
+
+ return CheckFloatResult(S, OpPC, Result, Status);
+}
+
+inline bool Incf(InterpState &S, CodePtr OpPC, llvm::RoundingMode RM) {
+ const Pointer &Ptr = S.Stk.pop<Pointer>();
+
+ if (!CheckInitialized(S, OpPC, Ptr, AK_Increment))
+ return false;
+
+ return IncDecFloatHelper<IncDecOp::Inc, PushVal::Yes>(S, OpPC, Ptr, RM);
+}
+
+inline bool IncfPop(InterpState &S, CodePtr OpPC, llvm::RoundingMode RM) {
+ const Pointer &Ptr = S.Stk.pop<Pointer>();
+
+ if (!CheckInitialized(S, OpPC, Ptr, AK_Increment))
+ return false;
+
+ return IncDecFloatHelper<IncDecOp::Inc, PushVal::No>(S, OpPC, Ptr, RM);
+}
+
+inline bool Decf(InterpState &S, CodePtr OpPC, llvm::RoundingMode RM) {
+ const Pointer &Ptr = S.Stk.pop<Pointer>();
+
+ if (!CheckInitialized(S, OpPC, Ptr, AK_Decrement))
+ return false;
+
+ return IncDecFloatHelper<IncDecOp::Dec, PushVal::Yes>(S, OpPC, Ptr, RM);
+}
+
+inline bool DecfPop(InterpState &S, CodePtr OpPC, llvm::RoundingMode RM) {
+ const Pointer &Ptr = S.Stk.pop<Pointer>();
+
+ if (!CheckInitialized(S, OpPC, Ptr, AK_Decrement))
+ return false;
+
+ return IncDecFloatHelper<IncDecOp::Dec, PushVal::No>(S, OpPC, Ptr, RM);
+}
+
+/// 1) Pops the value from the stack.
+/// 2) Pushes the bitwise complemented value on the stack (~V).
+template <PrimType Name, class T = typename PrimConv<Name>::T>
+bool Comp(InterpState &S, CodePtr OpPC) {
+ const T &Val = S.Stk.pop<T>();
+ T Result;
+ if (!T::comp(Val, &Result)) {
+ S.Stk.push<T>(Result);
+ return true;
+ }
+
+ return false;
+}
+
//===----------------------------------------------------------------------===//
// EQ, NE, GT, GE, LT, LE
//===----------------------------------------------------------------------===//
@@ -173,6 +704,29 @@ bool CmpHelperEQ(InterpState &S, CodePtr OpPC, CompareFn Fn) {
return CmpHelper<T>(S, OpPC, Fn);
}
+/// Function pointers cannot be compared in an ordered way.
+template <>
+inline bool CmpHelper<FunctionPointer>(InterpState &S, CodePtr OpPC,
+ CompareFn Fn) {
+ const auto &RHS = S.Stk.pop<FunctionPointer>();
+ const auto &LHS = S.Stk.pop<FunctionPointer>();
+
+ const SourceInfo &Loc = S.Current->getSource(OpPC);
+ S.FFDiag(Loc, diag::note_constexpr_pointer_comparison_unspecified)
+ << LHS.toDiagnosticString(S.getCtx())
+ << RHS.toDiagnosticString(S.getCtx());
+ return false;
+}
+
+template <>
+inline bool CmpHelperEQ<FunctionPointer>(InterpState &S, CodePtr OpPC,
+ CompareFn Fn) {
+ const auto &RHS = S.Stk.pop<FunctionPointer>();
+ const auto &LHS = S.Stk.pop<FunctionPointer>();
+ S.Stk.push<Boolean>(Boolean::from(Fn(LHS.compare(RHS))));
+ return true;
+}
+
template <>
inline bool CmpHelper<Pointer>(InterpState &S, CodePtr OpPC, CompareFn Fn) {
using BoolT = PrimConv<PT_Bool>::T;
@@ -181,7 +735,9 @@ inline bool CmpHelper<Pointer>(InterpState &S, CodePtr OpPC, CompareFn Fn) {
if (!Pointer::hasSameBase(LHS, RHS)) {
const SourceInfo &Loc = S.Current->getSource(OpPC);
- S.FFDiag(Loc, diag::note_invalid_subexpr_in_const_expr);
+ S.FFDiag(Loc, diag::note_constexpr_pointer_comparison_unspecified)
+ << LHS.toDiagnosticString(S.getCtx())
+ << RHS.toDiagnosticString(S.getCtx());
return false;
} else {
unsigned VL = LHS.getByteOffset();
@@ -208,6 +764,16 @@ inline bool CmpHelperEQ<Pointer>(InterpState &S, CodePtr OpPC, CompareFn Fn) {
} else {
unsigned VL = LHS.getByteOffset();
unsigned VR = RHS.getByteOffset();
+
+ // In our Pointer class, a pointer to an array and a pointer to the first
+ // element in the same array are NOT equal. They have the same Base value,
+ // but a different Offset. This is a pretty rare case, so we fix this here
+ // by comparing pointers to the first elements.
+ if (LHS.isArrayRoot())
+ VL = LHS.atIndex(0).getByteOffset();
+ if (RHS.isArrayRoot())
+ VR = RHS.atIndex(0).getByteOffset();
+
S.Stk.push<BoolT>(BoolT::from(Fn(Compare(VL, VR))));
return true;
}
@@ -221,6 +787,30 @@ bool EQ(InterpState &S, CodePtr OpPC) {
}
template <PrimType Name, class T = typename PrimConv<Name>::T>
+bool CMP3(InterpState &S, CodePtr OpPC, const ComparisonCategoryInfo *CmpInfo) {
+ const T &RHS = S.Stk.pop<T>();
+ const T &LHS = S.Stk.pop<T>();
+ const Pointer &P = S.Stk.peek<Pointer>();
+
+ ComparisonCategoryResult CmpResult = LHS.compare(RHS);
+ if (CmpResult == ComparisonCategoryResult::Unordered) {
+ // This should only happen with pointers.
+ const SourceInfo &Loc = S.Current->getSource(OpPC);
+ S.FFDiag(Loc, diag::note_constexpr_pointer_comparison_unspecified)
+ << LHS.toDiagnosticString(S.getCtx())
+ << RHS.toDiagnosticString(S.getCtx());
+ return false;
+ }
+
+ assert(CmpInfo);
+ const auto *CmpValueInfo = CmpInfo->getValueInfo(CmpResult);
+ assert(CmpValueInfo);
+ assert(CmpValueInfo->hasValidIntValue());
+ APSInt IntValue = CmpValueInfo->getIntValue();
+ return SetThreeWayComparisonField(S, OpPC, P, IntValue);
+}
+
+template <PrimType Name, class T = typename PrimConv<Name>::T>
bool NE(InterpState &S, CodePtr OpPC) {
return CmpHelperEQ<T>(S, OpPC, [](ComparisonCategoryResult R) {
return R != ComparisonCategoryResult::Equal;
@@ -303,10 +893,16 @@ bool Const(InterpState &S, CodePtr OpPC, const T &Arg) {
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool GetLocal(InterpState &S, CodePtr OpPC, uint32_t I) {
- S.Stk.push<T>(S.Current->getLocal<T>(I));
+ const Pointer &Ptr = S.Current->getLocalPointer(I);
+ if (!CheckLoad(S, OpPC, Ptr))
+ return false;
+ S.Stk.push<T>(Ptr.deref<T>());
return true;
}
+/// 1) Pops the value from the stack.
+/// 2) Writes the value to the local variable with the
+/// given offset.
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool SetLocal(InterpState &S, CodePtr OpPC, uint32_t I) {
S.Current->setLocal<T>(I, S.Stk.pop<T>());
@@ -328,6 +924,8 @@ bool SetParam(InterpState &S, CodePtr OpPC, uint32_t I) {
return true;
}
+/// 1) Peeks a pointer on the stack
+/// 2) Pushes the value of the pointer's field on the stack
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool GetField(InterpState &S, CodePtr OpPC, uint32_t I) {
const Pointer &Obj = S.Stk.peek<Pointer>();
@@ -353,10 +951,13 @@ bool SetField(InterpState &S, CodePtr OpPC, uint32_t I) {
const Pointer &Field = Obj.atField(I);
if (!CheckStore(S, OpPC, Field))
return false;
+ Field.initialize();
Field.deref<T>() = Value;
return true;
}
+/// 1) Pops a pointer from the stack
+/// 2) Pushes the value of the pointer's field on the stack
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool GetFieldPop(InterpState &S, CodePtr OpPC, uint32_t I) {
const Pointer &Obj = S.Stk.pop<Pointer>();
@@ -402,13 +1003,24 @@ bool SetThisField(InterpState &S, CodePtr OpPC, uint32_t I) {
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool GetGlobal(InterpState &S, CodePtr OpPC, uint32_t I) {
- auto *B = S.P.getGlobal(I);
+ const Block *B = S.P.getGlobal(I);
+
+ if (!CheckConstant(S, OpPC, B->getDescriptor()))
+ return false;
if (B->isExtern())
return false;
S.Stk.push<T>(B->deref<T>());
return true;
}
+/// Same as GetGlobal, but without the checks.
+template <PrimType Name, class T = typename PrimConv<Name>::T>
+bool GetGlobalUnchecked(InterpState &S, CodePtr OpPC, uint32_t I) {
+ auto *B = S.P.getGlobal(I);
+ S.Stk.push<T>(B->deref<T>());
+ return true;
+}
+
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool SetGlobal(InterpState &S, CodePtr OpPC, uint32_t I) {
// TODO: emit warning.
@@ -421,6 +1033,39 @@ bool InitGlobal(InterpState &S, CodePtr OpPC, uint32_t I) {
return true;
}
+/// 1) Converts the value on top of the stack to an APValue
+/// 2) Sets that APValue on \Temp
+/// 3) Initialized global with index \I with that
+template <PrimType Name, class T = typename PrimConv<Name>::T>
+bool InitGlobalTemp(InterpState &S, CodePtr OpPC, uint32_t I,
+ const LifetimeExtendedTemporaryDecl *Temp) {
+ assert(Temp);
+ const T Value = S.Stk.peek<T>();
+ APValue APV = Value.toAPValue();
+ APValue *Cached = Temp->getOrCreateValue(true);
+ *Cached = APV;
+
+ S.P.getGlobal(I)->deref<T>() = S.Stk.pop<T>();
+ return true;
+}
+
+/// 1) Converts the value on top of the stack to an APValue
+/// 2) Sets that APValue on \Temp
+/// 3) Initialized global with index \I with that
+inline bool InitGlobalTempComp(InterpState &S, CodePtr OpPC,
+ const LifetimeExtendedTemporaryDecl *Temp) {
+ assert(Temp);
+ const Pointer &P = S.Stk.peek<Pointer>();
+ APValue *Cached = Temp->getOrCreateValue(true);
+
+ if (std::optional<APValue> APV = P.toRValue(S.getCtx())) {
+ *Cached = *APV;
+ return true;
+ }
+
+ return false;
+}
+
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool InitThisField(InterpState &S, CodePtr OpPC, uint32_t I) {
if (S.checkingPotentialConstantExpression())
@@ -434,14 +1079,18 @@ bool InitThisField(InterpState &S, CodePtr OpPC, uint32_t I) {
return true;
}
+// FIXME: The Field pointer here is too much IMO and we could instead just
+// pass an Offset + BitWidth pair.
template <PrimType Name, class T = typename PrimConv<Name>::T>
-bool InitThisBitField(InterpState &S, CodePtr OpPC, const Record::Field *F) {
+bool InitThisBitField(InterpState &S, CodePtr OpPC, const Record::Field *F,
+ uint32_t FieldOffset) {
+ assert(F->isBitField());
if (S.checkingPotentialConstantExpression())
return false;
const Pointer &This = S.Current->getThis();
if (!CheckThis(S, OpPC, This))
return false;
- const Pointer &Field = This.atField(F->Offset);
+ const Pointer &Field = This.atField(FieldOffset);
const auto &Value = S.Stk.pop<T>();
Field.deref<T>() = Value.truncate(F->Decl->getBitWidthValue(S.getCtx()));
Field.initialize();
@@ -462,10 +1111,13 @@ bool InitThisFieldActive(InterpState &S, CodePtr OpPC, uint32_t I) {
return true;
}
+/// 1) Pops the value from the stack
+/// 2) Peeks a pointer from the stack
+/// 3) Pushes the value to field I of the pointer on the stack
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool InitField(InterpState &S, CodePtr OpPC, uint32_t I) {
const T &Value = S.Stk.pop<T>();
- const Pointer &Field = S.Stk.pop<Pointer>().atField(I);
+ const Pointer &Field = S.Stk.peek<Pointer>().atField(I);
Field.deref<T>() = Value;
Field.activate();
Field.initialize();
@@ -474,8 +1126,9 @@ bool InitField(InterpState &S, CodePtr OpPC, uint32_t I) {
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool InitBitField(InterpState &S, CodePtr OpPC, const Record::Field *F) {
+ assert(F->isBitField());
const T &Value = S.Stk.pop<T>();
- const Pointer &Field = S.Stk.pop<Pointer>().atField(F->Offset);
+ const Pointer &Field = S.Stk.peek<Pointer>().atField(F->Offset);
Field.deref<T>() = Value.truncate(F->Decl->getBitWidthValue(S.getCtx()));
Field.activate();
Field.initialize();
@@ -515,14 +1168,19 @@ inline bool GetPtrGlobal(InterpState &S, CodePtr OpPC, uint32_t I) {
return true;
}
+/// 1) Pops a Pointer from the stack
+/// 2) Pushes Pointer.atField(Off) on the stack
inline bool GetPtrField(InterpState &S, CodePtr OpPC, uint32_t Off) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
- if (!CheckNull(S, OpPC, Ptr, CSK_Field))
+ if (S.inConstantContext() && !CheckNull(S, OpPC, Ptr, CSK_Field))
return false;
if (!CheckExtern(S, OpPC, Ptr))
return false;
if (!CheckRange(S, OpPC, Ptr, CSK_Field))
return false;
+ if (!CheckSubobject(S, OpPC, Ptr, CSK_Field))
+ return false;
+
S.Stk.push<Pointer>(Ptr.atField(Off));
return true;
}
@@ -563,10 +1221,32 @@ inline bool GetPtrActiveThisField(InterpState &S, CodePtr OpPC, uint32_t Off) {
return true;
}
+inline bool GetPtrDerivedPop(InterpState &S, CodePtr OpPC, uint32_t Off) {
+ const Pointer &Ptr = S.Stk.pop<Pointer>();
+ if (!CheckNull(S, OpPC, Ptr, CSK_Derived))
+ return false;
+ if (!CheckSubobject(S, OpPC, Ptr, CSK_Derived))
+ return false;
+ S.Stk.push<Pointer>(Ptr.atFieldSub(Off));
+ return true;
+}
+
inline bool GetPtrBase(InterpState &S, CodePtr OpPC, uint32_t Off) {
+ const Pointer &Ptr = S.Stk.peek<Pointer>();
+ if (!CheckNull(S, OpPC, Ptr, CSK_Base))
+ return false;
+ if (!CheckSubobject(S, OpPC, Ptr, CSK_Base))
+ return false;
+ S.Stk.push<Pointer>(Ptr.atField(Off));
+ return true;
+}
+
+inline bool GetPtrBasePop(InterpState &S, CodePtr OpPC, uint32_t Off) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
if (!CheckNull(S, OpPC, Ptr, CSK_Base))
return false;
+ if (!CheckSubobject(S, OpPC, Ptr, CSK_Base))
+ return false;
S.Stk.push<Pointer>(Ptr.atField(Off));
return true;
}
@@ -581,6 +1261,12 @@ inline bool GetPtrThisBase(InterpState &S, CodePtr OpPC, uint32_t Off) {
return true;
}
+inline bool InitPtrPop(InterpState &S, CodePtr OpPC) {
+ const Pointer &Ptr = S.Stk.pop<Pointer>();
+ Ptr.initialize();
+ return true;
+}
+
inline bool VirtBaseHelper(InterpState &S, CodePtr OpPC, const RecordDecl *Decl,
const Pointer &Ptr) {
Pointer Base = Ptr;
@@ -637,6 +1323,8 @@ bool Store(InterpState &S, CodePtr OpPC) {
const Pointer &Ptr = S.Stk.peek<Pointer>();
if (!CheckStore(S, OpPC, Ptr))
return false;
+ if (!Ptr.isRoot())
+ Ptr.initialize();
Ptr.deref<T>() = Value;
return true;
}
@@ -647,6 +1335,8 @@ bool StorePop(InterpState &S, CodePtr OpPC) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
if (!CheckStore(S, OpPC, Ptr))
return false;
+ if (!Ptr.isRoot())
+ Ptr.initialize();
Ptr.deref<T>() = Value;
return true;
}
@@ -657,11 +1347,12 @@ bool StoreBitField(InterpState &S, CodePtr OpPC) {
const Pointer &Ptr = S.Stk.peek<Pointer>();
if (!CheckStore(S, OpPC, Ptr))
return false;
- if (auto *FD = Ptr.getField()) {
+ if (!Ptr.isRoot())
+ Ptr.initialize();
+ if (const auto *FD = Ptr.getField())
Ptr.deref<T>() = Value.truncate(FD->getBitWidthValue(S.getCtx()));
- } else {
+ else
Ptr.deref<T>() = Value;
- }
return true;
}
@@ -671,11 +1362,12 @@ bool StoreBitFieldPop(InterpState &S, CodePtr OpPC) {
const Pointer &Ptr = S.Stk.pop<Pointer>();
if (!CheckStore(S, OpPC, Ptr))
return false;
- if (auto *FD = Ptr.getField()) {
+ if (!Ptr.isRoot())
+ Ptr.initialize();
+ if (const auto *FD = Ptr.getField())
Ptr.deref<T>() = Value.truncate(FD->getBitWidthValue(S.getCtx()));
- } else {
+ else
Ptr.deref<T>() = Value;
- }
return true;
}
@@ -690,6 +1382,9 @@ bool InitPop(InterpState &S, CodePtr OpPC) {
return true;
}
+/// 1) Pops the value from the stack
+/// 2) Peeks a pointer and gets its index \Idx
+/// 3) Sets the value on the pointer, leaving the pointer on the stack.
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool InitElem(InterpState &S, CodePtr OpPC, uint32_t Idx) {
const T &Value = S.Stk.pop<T>();
@@ -701,6 +1396,7 @@ bool InitElem(InterpState &S, CodePtr OpPC, uint32_t Idx) {
return true;
}
+/// The same as InitElem, but pops the pointer as well.
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool InitElemPop(InterpState &S, CodePtr OpPC, uint32_t Idx) {
const T &Value = S.Stk.pop<T>();
@@ -716,74 +1412,151 @@ bool InitElemPop(InterpState &S, CodePtr OpPC, uint32_t Idx) {
// AddOffset, SubOffset
//===----------------------------------------------------------------------===//
-template <class T, bool Add> bool OffsetHelper(InterpState &S, CodePtr OpPC) {
- // Fetch the pointer and the offset.
- const T &Offset = S.Stk.pop<T>();
- const Pointer &Ptr = S.Stk.pop<Pointer>();
- if (!CheckNull(S, OpPC, Ptr, CSK_ArrayIndex))
- return false;
+template <class T, ArithOp Op>
+bool OffsetHelper(InterpState &S, CodePtr OpPC, const T &Offset,
+ const Pointer &Ptr) {
if (!CheckRange(S, OpPC, Ptr, CSK_ArrayToPointer))
return false;
- // Get a version of the index comparable to the type.
- T Index = T::from(Ptr.getIndex(), Offset.bitWidth());
- // A zero offset does not change the pointer, but in the case of an array
- // it has to be adjusted to point to the first element instead of the array.
+ // A zero offset does not change the pointer.
if (Offset.isZero()) {
- S.Stk.push<Pointer>(Index.isZero() ? Ptr.atIndex(0) : Ptr);
+ S.Stk.push<Pointer>(Ptr);
return true;
}
+
+ if (!CheckNull(S, OpPC, Ptr, CSK_ArrayIndex))
+ return false;
+
// Arrays of unknown bounds cannot have pointers into them.
if (!CheckArray(S, OpPC, Ptr))
return false;
+ // Get a version of the index comparable to the type.
+ T Index = T::from(Ptr.getIndex(), Offset.bitWidth());
// Compute the largest index into the array.
- unsigned MaxIndex = Ptr.getNumElems();
+ T MaxIndex = T::from(Ptr.getNumElems(), Offset.bitWidth());
+ bool Invalid = false;
// Helper to report an invalid offset, computed as APSInt.
- auto InvalidOffset = [&]() {
+ auto DiagInvalidOffset = [&]() -> void {
const unsigned Bits = Offset.bitWidth();
APSInt APOffset(Offset.toAPSInt().extend(Bits + 2), false);
APSInt APIndex(Index.toAPSInt().extend(Bits + 2), false);
- APSInt NewIndex = Add ? (APIndex + APOffset) : (APIndex - APOffset);
+ APSInt NewIndex =
+ (Op == ArithOp::Add) ? (APIndex + APOffset) : (APIndex - APOffset);
S.CCEDiag(S.Current->getSource(OpPC), diag::note_constexpr_array_index)
<< NewIndex
<< /*array*/ static_cast<int>(!Ptr.inArray())
<< static_cast<unsigned>(MaxIndex);
- return false;
+ Invalid = true;
};
- // If the new offset would be negative, bail out.
- if (Add && Offset.isNegative() && (Offset.isMin() || -Offset > Index))
- return InvalidOffset();
- if (!Add && Offset.isPositive() && Index < Offset)
- return InvalidOffset();
+ T MaxOffset = T::from(MaxIndex - Index, Offset.bitWidth());
+ if constexpr (Op == ArithOp::Add) {
+ // If the new offset would be negative, bail out.
+ if (Offset.isNegative() && (Offset.isMin() || -Offset > Index))
+ DiagInvalidOffset();
- // If the new offset would be out of bounds, bail out.
- unsigned MaxOffset = MaxIndex - Ptr.getIndex();
- if (Add && Offset.isPositive() && Offset > MaxOffset)
- return InvalidOffset();
- if (!Add && Offset.isNegative() && (Offset.isMin() || -Offset > MaxOffset))
- return InvalidOffset();
+ // If the new offset would be out of bounds, bail out.
+ if (Offset.isPositive() && Offset > MaxOffset)
+ DiagInvalidOffset();
+ } else {
+ // If the new offset would be negative, bail out.
+ if (Offset.isPositive() && Index < Offset)
+ DiagInvalidOffset();
+
+ // If the new offset would be out of bounds, bail out.
+ if (Offset.isNegative() && (Offset.isMin() || -Offset > MaxOffset))
+ DiagInvalidOffset();
+ }
+
+ if (Invalid && !Ptr.isDummy() && S.getLangOpts().CPlusPlus)
+ return false;
// Offset is valid - compute it on unsigned.
int64_t WideIndex = static_cast<int64_t>(Index);
int64_t WideOffset = static_cast<int64_t>(Offset);
- int64_t Result = Add ? (WideIndex + WideOffset) : (WideIndex - WideOffset);
+ int64_t Result;
+ if constexpr (Op == ArithOp::Add)
+ Result = WideIndex + WideOffset;
+ else
+ Result = WideIndex - WideOffset;
+
S.Stk.push<Pointer>(Ptr.atIndex(static_cast<unsigned>(Result)));
return true;
}
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool AddOffset(InterpState &S, CodePtr OpPC) {
- return OffsetHelper<T, true>(S, OpPC);
+ const T &Offset = S.Stk.pop<T>();
+ const Pointer &Ptr = S.Stk.pop<Pointer>();
+ return OffsetHelper<T, ArithOp::Add>(S, OpPC, Offset, Ptr);
}
template <PrimType Name, class T = typename PrimConv<Name>::T>
bool SubOffset(InterpState &S, CodePtr OpPC) {
- return OffsetHelper<T, false>(S, OpPC);
+ const T &Offset = S.Stk.pop<T>();
+ const Pointer &Ptr = S.Stk.pop<Pointer>();
+ return OffsetHelper<T, ArithOp::Sub>(S, OpPC, Offset, Ptr);
}
+template <ArithOp Op>
+static inline bool IncDecPtrHelper(InterpState &S, CodePtr OpPC,
+ const Pointer &Ptr) {
+ using OneT = Integral<8, false>;
+
+ const Pointer &P = Ptr.deref<Pointer>();
+ if (!CheckNull(S, OpPC, P, CSK_ArrayIndex))
+ return false;
+
+ // Get the current value on the stack.
+ S.Stk.push<Pointer>(P);
+
+ // Now the current Ptr again and a constant 1.
+ OneT One = OneT::from(1);
+ if (!OffsetHelper<OneT, Op>(S, OpPC, One, P))
+ return false;
+
+ // Store the new value.
+ Ptr.deref<Pointer>() = S.Stk.pop<Pointer>();
+ return true;
+}
+
+static inline bool IncPtr(InterpState &S, CodePtr OpPC) {
+ const Pointer &Ptr = S.Stk.pop<Pointer>();
+
+ if (!CheckInitialized(S, OpPC, Ptr, AK_Increment))
+ return false;
+
+ return IncDecPtrHelper<ArithOp::Add>(S, OpPC, Ptr);
+}
+
+static inline bool DecPtr(InterpState &S, CodePtr OpPC) {
+ const Pointer &Ptr = S.Stk.pop<Pointer>();
+
+ if (!CheckInitialized(S, OpPC, Ptr, AK_Decrement))
+ return false;
+
+ return IncDecPtrHelper<ArithOp::Sub>(S, OpPC, Ptr);
+}
+
+/// 1) Pops a Pointer from the stack.
+/// 2) Pops another Pointer from the stack.
+/// 3) Pushes the different of the indices of the two pointers on the stack.
+template <PrimType Name, class T = typename PrimConv<Name>::T>
+inline bool SubPtr(InterpState &S, CodePtr OpPC) {
+ const Pointer &LHS = S.Stk.pop<Pointer>();
+ const Pointer &RHS = S.Stk.pop<Pointer>();
+
+ if (!Pointer::hasSameBase(LHS, RHS) && S.getLangOpts().CPlusPlus) {
+ // TODO: Diagnose.
+ return false;
+ }
+
+ T A = T::from(LHS.getIndex());
+ T B = T::from(RHS.getIndex());
+ return AddSubMulHelper<T, T::sub, std::minus>(S, OpPC, A.bitWidth(), A, B);
+}
//===----------------------------------------------------------------------===//
// Destroy
@@ -805,6 +1578,127 @@ template <PrimType TIn, PrimType TOut> bool Cast(InterpState &S, CodePtr OpPC) {
return true;
}
+/// 1) Pops a Floating from the stack.
+/// 2) Pushes a new floating on the stack that uses the given semantics.
+inline bool CastFP(InterpState &S, CodePtr OpPC, const llvm::fltSemantics *Sem,
+ llvm::RoundingMode RM) {
+ Floating F = S.Stk.pop<Floating>();
+ Floating Result = F.toSemantics(Sem, RM);
+ S.Stk.push<Floating>(Result);
+ return true;
+}
+
+/// Like Cast(), but we cast to an arbitrary-bitwidth integral, so we need
+/// to know what bitwidth the result should be.
+template <PrimType Name, class T = typename PrimConv<Name>::T>
+bool CastAP(InterpState &S, CodePtr OpPC, uint32_t BitWidth) {
+ S.Stk.push<IntegralAP<false>>(
+ IntegralAP<false>::from(S.Stk.pop<T>(), BitWidth));
+ return true;
+}
+
+template <PrimType Name, class T = typename PrimConv<Name>::T>
+bool CastAPS(InterpState &S, CodePtr OpPC, uint32_t BitWidth) {
+ S.Stk.push<IntegralAP<true>>(
+ IntegralAP<true>::from(S.Stk.pop<T>(), BitWidth));
+ return true;
+}
+
+template <PrimType Name, class T = typename PrimConv<Name>::T>
+bool CastIntegralFloating(InterpState &S, CodePtr OpPC,
+ const llvm::fltSemantics *Sem,
+ llvm::RoundingMode RM) {
+ const T &From = S.Stk.pop<T>();
+ APSInt FromAP = From.toAPSInt();
+ Floating Result;
+
+ auto Status = Floating::fromIntegral(FromAP, *Sem, RM, Result);
+ S.Stk.push<Floating>(Result);
+
+ return CheckFloatResult(S, OpPC, Result, Status);
+}
+
+template <PrimType Name, class T = typename PrimConv<Name>::T>
+bool CastFloatingIntegral(InterpState &S, CodePtr OpPC) {
+ const Floating &F = S.Stk.pop<Floating>();
+
+ if constexpr (std::is_same_v<T, Boolean>) {
+ S.Stk.push<T>(T(F.isNonZero()));
+ return true;
+ } else {
+ APSInt Result(std::max(8u, T::bitWidth()),
+ /*IsUnsigned=*/!T::isSigned());
+ auto Status = F.convertToInteger(Result);
+
+ // Float-to-Integral overflow check.
+ if ((Status & APFloat::opStatus::opInvalidOp) && F.isFinite()) {
+ const Expr *E = S.Current->getExpr(OpPC);
+ QualType Type = E->getType();
+
+ S.CCEDiag(E, diag::note_constexpr_overflow) << F.getAPFloat() << Type;
+ if (S.noteUndefinedBehavior()) {
+ S.Stk.push<T>(T(Result));
+ return true;
+ }
+ return false;
+ }
+
+ S.Stk.push<T>(T(Result));
+ return CheckFloatResult(S, OpPC, F, Status);
+ }
+}
+
+static inline bool CastFloatingIntegralAP(InterpState &S, CodePtr OpPC,
+ uint32_t BitWidth) {
+ const Floating &F = S.Stk.pop<Floating>();
+
+ APSInt Result(BitWidth, /*IsUnsigned=*/true);
+ auto Status = F.convertToInteger(Result);
+
+ // Float-to-Integral overflow check.
+ if ((Status & APFloat::opStatus::opInvalidOp) && F.isFinite()) {
+ const Expr *E = S.Current->getExpr(OpPC);
+ QualType Type = E->getType();
+
+ S.CCEDiag(E, diag::note_constexpr_overflow) << F.getAPFloat() << Type;
+ return S.noteUndefinedBehavior();
+ }
+
+ S.Stk.push<IntegralAP<true>>(IntegralAP<true>(Result));
+ return CheckFloatResult(S, OpPC, F, Status);
+}
+
+static inline bool CastFloatingIntegralAPS(InterpState &S, CodePtr OpPC,
+ uint32_t BitWidth) {
+ const Floating &F = S.Stk.pop<Floating>();
+
+ APSInt Result(BitWidth, /*IsUnsigned=*/false);
+ auto Status = F.convertToInteger(Result);
+
+ // Float-to-Integral overflow check.
+ if ((Status & APFloat::opStatus::opInvalidOp) && F.isFinite()) {
+ const Expr *E = S.Current->getExpr(OpPC);
+ QualType Type = E->getType();
+
+ S.CCEDiag(E, diag::note_constexpr_overflow) << F.getAPFloat() << Type;
+ return S.noteUndefinedBehavior();
+ }
+
+ S.Stk.push<IntegralAP<true>>(IntegralAP<true>(Result));
+ return CheckFloatResult(S, OpPC, F, Status);
+}
+
+template <PrimType Name, class T = typename PrimConv<Name>::T>
+bool CastPointerIntegral(InterpState &S, CodePtr OpPC) {
+ const Pointer &Ptr = S.Stk.pop<Pointer>();
+
+ if (!CheckPotentialReinterpretCast(S, OpPC, Ptr))
+ return false;
+
+ S.Stk.push<T>(T::from(Ptr.getIntegerRepresentation()));
+ return true;
+}
+
//===----------------------------------------------------------------------===//
// Zero, Nullptr
//===----------------------------------------------------------------------===//
@@ -815,6 +1709,16 @@ bool Zero(InterpState &S, CodePtr OpPC) {
return true;
}
+static inline bool ZeroIntAP(InterpState &S, CodePtr OpPC, uint32_t BitWidth) {
+ S.Stk.push<IntegralAP<false>>(IntegralAP<false>::zero(BitWidth));
+ return true;
+}
+
+static inline bool ZeroIntAPS(InterpState &S, CodePtr OpPC, uint32_t BitWidth) {
+ S.Stk.push<IntegralAP<true>>(IntegralAP<true>::zero(BitWidth));
+ return true;
+}
+
template <PrimType Name, class T = typename PrimConv<Name>::T>
inline bool Null(InterpState &S, CodePtr OpPC) {
S.Stk.push<T>();
@@ -839,88 +1743,53 @@ inline bool This(InterpState &S, CodePtr OpPC) {
return true;
}
+inline bool RVOPtr(InterpState &S, CodePtr OpPC) {
+ assert(S.Current->getFunction()->hasRVO());
+ if (S.checkingPotentialConstantExpression())
+ return false;
+ S.Stk.push<Pointer>(S.Current->getRVOPtr());
+ return true;
+}
+
//===----------------------------------------------------------------------===//
// Shr, Shl
//===----------------------------------------------------------------------===//
-template <PrimType TR, PrimType TL, class T = typename PrimConv<TR>::T>
-unsigned Trunc(InterpState &S, CodePtr OpPC, unsigned Bits, const T &V) {
- // C++11 [expr.shift]p1: Shift width must be less than the bit width of
- // the shifted type.
- if (Bits > 1 && V >= T::from(Bits, V.bitWidth())) {
- const Expr *E = S.Current->getExpr(OpPC);
- const APSInt Val = V.toAPSInt();
- QualType Ty = E->getType();
- S.CCEDiag(E, diag::note_constexpr_large_shift) << Val << Ty << Bits;
- return Bits;
- } else {
- return static_cast<unsigned>(V);
- }
-}
+template <PrimType NameL, PrimType NameR>
+inline bool Shr(InterpState &S, CodePtr OpPC) {
+ using LT = typename PrimConv<NameL>::T;
+ using RT = typename PrimConv<NameR>::T;
+ const auto &RHS = S.Stk.pop<RT>();
+ const auto &LHS = S.Stk.pop<LT>();
+ const unsigned Bits = LHS.bitWidth();
-template <PrimType TL, PrimType TR, typename T = typename PrimConv<TL>::T>
-inline bool ShiftRight(InterpState &S, CodePtr OpPC, const T &V, unsigned RHS) {
- if (RHS >= V.bitWidth()) {
- S.Stk.push<T>(T::from(0, V.bitWidth()));
- } else {
- S.Stk.push<T>(T::from(V >> RHS, V.bitWidth()));
- }
- return true;
-}
+ if (!CheckShift(S, OpPC, LHS, RHS, Bits))
+ return false;
-template <PrimType TL, PrimType TR, typename T = typename PrimConv<TL>::T>
-inline bool ShiftLeft(InterpState &S, CodePtr OpPC, const T &V, unsigned RHS) {
- if (V.isSigned() && !S.getLangOpts().CPlusPlus20) {
- // C++11 [expr.shift]p2: A signed left shift must have a non-negative
- // operand, and must not overflow the corresponding unsigned type.
- // C++2a [expr.shift]p2: E1 << E2 is the unique value congruent to
- // E1 x 2^E2 module 2^N.
- if (V.isNegative()) {
- const Expr *E = S.Current->getExpr(OpPC);
- S.CCEDiag(E, diag::note_constexpr_lshift_of_negative) << V.toAPSInt();
- } else if (V.countLeadingZeros() < RHS) {
- S.CCEDiag(S.Current->getExpr(OpPC), diag::note_constexpr_lshift_discards);
- }
- }
+ typename LT::AsUnsigned R;
+ LT::AsUnsigned::shiftRight(LT::AsUnsigned::from(LHS),
+ LT::AsUnsigned::from(RHS), Bits, &R);
+ S.Stk.push<LT>(LT::from(R));
- if (V.bitWidth() == 1) {
- S.Stk.push<T>(V);
- } else if (RHS >= V.bitWidth()) {
- S.Stk.push<T>(T::from(0, V.bitWidth()));
- } else {
- S.Stk.push<T>(T::from(V.toUnsigned() << RHS, V.bitWidth()));
- }
return true;
}
-template <PrimType TL, PrimType TR>
-inline bool Shr(InterpState &S, CodePtr OpPC) {
- const auto &RHS = S.Stk.pop<typename PrimConv<TR>::T>();
- const auto &LHS = S.Stk.pop<typename PrimConv<TL>::T>();
- const unsigned Bits = LHS.bitWidth();
-
- if (RHS.isSigned() && RHS.isNegative()) {
- const SourceInfo &Loc = S.Current->getSource(OpPC);
- S.CCEDiag(Loc, diag::note_constexpr_negative_shift) << RHS.toAPSInt();
- return ShiftLeft<TL, TR>(S, OpPC, LHS, Trunc<TR, TL>(S, OpPC, Bits, -RHS));
- } else {
- return ShiftRight<TL, TR>(S, OpPC, LHS, Trunc<TR, TL>(S, OpPC, Bits, RHS));
- }
-}
-
-template <PrimType TL, PrimType TR>
+template <PrimType NameL, PrimType NameR>
inline bool Shl(InterpState &S, CodePtr OpPC) {
- const auto &RHS = S.Stk.pop<typename PrimConv<TR>::T>();
- const auto &LHS = S.Stk.pop<typename PrimConv<TL>::T>();
+ using LT = typename PrimConv<NameL>::T;
+ using RT = typename PrimConv<NameR>::T;
+ const auto &RHS = S.Stk.pop<RT>();
+ const auto &LHS = S.Stk.pop<LT>();
const unsigned Bits = LHS.bitWidth();
- if (RHS.isSigned() && RHS.isNegative()) {
- const SourceInfo &Loc = S.Current->getSource(OpPC);
- S.CCEDiag(Loc, diag::note_constexpr_negative_shift) << RHS.toAPSInt();
- return ShiftRight<TL, TR>(S, OpPC, LHS, Trunc<TR, TL>(S, OpPC, Bits, -RHS));
- } else {
- return ShiftLeft<TL, TR>(S, OpPC, LHS, Trunc<TR, TL>(S, OpPC, Bits, RHS));
- }
+ if (!CheckShift(S, OpPC, LHS, RHS, Bits))
+ return false;
+
+ typename LT::AsUnsigned R;
+ LT::AsUnsigned::shiftLeft(LT::AsUnsigned::from(LHS),
+ LT::AsUnsigned::from(RHS, Bits), Bits, &R);
+ S.Stk.push<LT>(LT::from(R));
+ return true;
}
//===----------------------------------------------------------------------===//
@@ -949,8 +1818,228 @@ inline bool ExpandPtr(InterpState &S, CodePtr OpPC) {
return true;
}
-/// Interpreter entry point.
-bool Interpret(InterpState &S, APValue &Result);
+// 1) Pops an integral value from the stack
+// 2) Peeks a pointer
+// 3) Pushes a new pointer that's a narrowed array
+// element of the peeked pointer with the value
+// from 1) added as offset.
+//
+// This leaves the original pointer on the stack and pushes a new one
+// with the offset applied and narrowed.
+template <PrimType Name, class T = typename PrimConv<Name>::T>
+inline bool ArrayElemPtr(InterpState &S, CodePtr OpPC) {
+ const T &Offset = S.Stk.pop<T>();
+ const Pointer &Ptr = S.Stk.peek<Pointer>();
+
+ if (!OffsetHelper<T, ArithOp::Add>(S, OpPC, Offset, Ptr))
+ return false;
+
+ return NarrowPtr(S, OpPC);
+}
+
+/// Just takes a pointer and checks if its' an incomplete
+/// array type.
+inline bool ArrayDecay(InterpState &S, CodePtr OpPC) {
+ const Pointer &Ptr = S.Stk.pop<Pointer>();
+
+ if (!Ptr.isUnknownSizeArray()) {
+ S.Stk.push<Pointer>(Ptr.atIndex(0));
+ return true;
+ }
+
+ const SourceInfo &E = S.Current->getSource(OpPC);
+ S.FFDiag(E, diag::note_constexpr_unsupported_unsized_array);
+
+ return false;
+}
+
+template <PrimType Name, class T = typename PrimConv<Name>::T>
+inline bool ArrayElemPtrPop(InterpState &S, CodePtr OpPC) {
+ const T &Offset = S.Stk.pop<T>();
+ const Pointer &Ptr = S.Stk.pop<Pointer>();
+
+ if (!OffsetHelper<T, ArithOp::Add>(S, OpPC, Offset, Ptr))
+ return false;
+
+ return NarrowPtr(S, OpPC);
+}
+
+inline bool Call(InterpState &S, CodePtr OpPC, const Function *Func) {
+ if (Func->hasThisPointer()) {
+ size_t ThisOffset =
+ Func->getArgSize() - (Func->hasRVO() ? primSize(PT_Ptr) : 0);
+
+ const Pointer &ThisPtr = S.Stk.peek<Pointer>(ThisOffset);
+
+ // If the current function is a lambda static invoker and
+ // the function we're about to call is a lambda call operator,
+ // skip the CheckInvoke, since the ThisPtr is a null pointer
+ // anyway.
+ if (!(S.Current->getFunction() &&
+ S.Current->getFunction()->isLambdaStaticInvoker() &&
+ Func->isLambdaCallOperator())) {
+ if (!CheckInvoke(S, OpPC, ThisPtr))
+ return false;
+ }
+
+ if (S.checkingPotentialConstantExpression())
+ return false;
+ }
+
+ if (!CheckCallable(S, OpPC, Func))
+ return false;
+
+ if (!CheckCallDepth(S, OpPC))
+ return false;
+
+ auto NewFrame = std::make_unique<InterpFrame>(S, Func, OpPC);
+ InterpFrame *FrameBefore = S.Current;
+ S.Current = NewFrame.get();
+
+ APValue CallResult;
+ // Note that we cannot assert(CallResult.hasValue()) here since
+ // Ret() above only sets the APValue if the curent frame doesn't
+ // have a caller set.
+ if (Interpret(S, CallResult)) {
+ NewFrame.release(); // Frame was delete'd already.
+ assert(S.Current == FrameBefore);
+ return true;
+ }
+
+ // Interpreting the function failed somehow. Reset to
+ // previous state.
+ S.Current = FrameBefore;
+ return false;
+}
+
+inline bool CallVirt(InterpState &S, CodePtr OpPC, const Function *Func) {
+ assert(Func->hasThisPointer());
+ assert(Func->isVirtual());
+ size_t ThisOffset =
+ Func->getArgSize() - (Func->hasRVO() ? primSize(PT_Ptr) : 0);
+ Pointer &ThisPtr = S.Stk.peek<Pointer>(ThisOffset);
+
+ const CXXRecordDecl *DynamicDecl =
+ ThisPtr.getDeclDesc()->getType()->getAsCXXRecordDecl();
+ const auto *StaticDecl = cast<CXXRecordDecl>(Func->getParentDecl());
+ const auto *InitialFunction = cast<CXXMethodDecl>(Func->getDecl());
+ const CXXMethodDecl *Overrider = S.getContext().getOverridingFunction(
+ DynamicDecl, StaticDecl, InitialFunction);
+
+ if (Overrider != InitialFunction) {
+ // DR1872: An instantiated virtual constexpr function can't be called in a
+ // constant expression (prior to C++20). We can still constant-fold such a
+ // call.
+ if (!S.getLangOpts().CPlusPlus20 && Overrider->isVirtual()) {
+ const Expr *E = S.Current->getExpr(OpPC);
+ S.CCEDiag(E, diag::note_constexpr_virtual_call) << E->getSourceRange();
+ }
+
+ Func = S.getContext().getOrCreateFunction(Overrider);
+
+ const CXXRecordDecl *ThisFieldDecl =
+ ThisPtr.getFieldDesc()->getType()->getAsCXXRecordDecl();
+ if (Func->getParentDecl()->isDerivedFrom(ThisFieldDecl)) {
+ // If the function we call is further DOWN the hierarchy than the
+ // FieldDesc of our pointer, just get the DeclDesc instead, which
+ // is the furthest we might go up in the hierarchy.
+ ThisPtr = ThisPtr.getDeclPtr();
+ }
+ }
+
+ return Call(S, OpPC, Func);
+}
+
+inline bool CallBI(InterpState &S, CodePtr &PC, const Function *Func,
+ const CallExpr *CE) {
+ auto NewFrame = std::make_unique<InterpFrame>(S, Func, PC);
+
+ InterpFrame *FrameBefore = S.Current;
+ S.Current = NewFrame.get();
+
+ if (InterpretBuiltin(S, PC, Func, CE)) {
+ NewFrame.release();
+ return true;
+ }
+ S.Current = FrameBefore;
+ return false;
+}
+
+inline bool CallPtr(InterpState &S, CodePtr OpPC) {
+ const FunctionPointer &FuncPtr = S.Stk.pop<FunctionPointer>();
+
+ const Function *F = FuncPtr.getFunction();
+ if (!F || !F->isConstexpr())
+ return false;
+
+ if (F->isVirtual())
+ return CallVirt(S, OpPC, F);
+
+ return Call(S, OpPC, F);
+}
+
+inline bool GetFnPtr(InterpState &S, CodePtr OpPC, const Function *Func) {
+ assert(Func);
+ S.Stk.push<FunctionPointer>(Func);
+ return true;
+}
+
+/// Just emit a diagnostic. The expression that caused emission of this
+/// op is not valid in a constant context.
+inline bool Invalid(InterpState &S, CodePtr OpPC) {
+ const SourceLocation &Loc = S.Current->getLocation(OpPC);
+ S.FFDiag(Loc, diag::note_invalid_subexpr_in_const_expr)
+ << S.Current->getRange(OpPC);
+ return false;
+}
+
+/// Same here, but only for casts.
+inline bool InvalidCast(InterpState &S, CodePtr OpPC, CastKind Kind) {
+ const SourceLocation &Loc = S.Current->getLocation(OpPC);
+ S.FFDiag(Loc, diag::note_constexpr_invalid_cast)
+ << static_cast<unsigned>(Kind) << S.Current->getRange(OpPC);
+ return false;
+}
+
+inline bool InvalidDeclRef(InterpState &S, CodePtr OpPC,
+ const DeclRefExpr *DR) {
+ assert(DR);
+ return CheckDeclRef(S, OpPC, DR);
+}
+
+template <PrimType Name, class T = typename PrimConv<Name>::T>
+inline bool OffsetOf(InterpState &S, CodePtr OpPC, const OffsetOfExpr *E) {
+ llvm::SmallVector<int64_t> ArrayIndices;
+ for (size_t I = 0; I != E->getNumExpressions(); ++I)
+ ArrayIndices.emplace_back(S.Stk.pop<int64_t>());
+
+ int64_t Result;
+ if (!InterpretOffsetOf(S, OpPC, E, ArrayIndices, Result))
+ return false;
+
+ S.Stk.push<T>(T::from(Result));
+
+ return true;
+}
+
+//===----------------------------------------------------------------------===//
+// Read opcode arguments
+//===----------------------------------------------------------------------===//
+
+template <typename T> inline T ReadArg(InterpState &S, CodePtr &OpPC) {
+ if constexpr (std::is_pointer<T>::value) {
+ uint32_t ID = OpPC.read<uint32_t>();
+ return reinterpret_cast<T>(S.P.getNativePointer(ID));
+ } else {
+ return OpPC.read<T>();
+ }
+}
+
+template <> inline Floating ReadArg<Floating>(InterpState &S, CodePtr &OpPC) {
+ Floating F = Floating::deserialize(*OpPC);
+ OpPC += align(F.bytesToSerialize());
+ return F;
+}
} // namespace interp
} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/InterpBlock.cpp b/contrib/llvm-project/clang/lib/AST/Interp/InterpBlock.cpp
index ed6e8910194d..a62128d9cfae 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/InterpBlock.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/InterpBlock.cpp
@@ -16,11 +16,16 @@
using namespace clang;
using namespace clang::interp;
-
-
void Block::addPointer(Pointer *P) {
- if (IsStatic)
+ assert(P);
+ if (IsStatic) {
+ assert(!Pointers);
return;
+ }
+
+#ifndef NDEBUG
+ assert(!hasPointer(P));
+#endif
if (Pointers)
Pointers->Prev = P;
P->Next = Pointers;
@@ -29,10 +34,19 @@ void Block::addPointer(Pointer *P) {
}
void Block::removePointer(Pointer *P) {
- if (IsStatic)
+ assert(P);
+ if (IsStatic) {
+ assert(!Pointers);
return;
+ }
+
+#ifndef NDEBUG
+ assert(hasPointer(P));
+#endif
+
if (Pointers == P)
Pointers = P->Next;
+
if (P->Prev)
P->Prev->Next = P->Next;
if (P->Next)
@@ -44,21 +58,38 @@ void Block::cleanup() {
(reinterpret_cast<DeadBlock *>(this + 1) - 1)->free();
}
-void Block::movePointer(Pointer *From, Pointer *To) {
- if (IsStatic)
+void Block::replacePointer(Pointer *Old, Pointer *New) {
+ assert(Old);
+ assert(New);
+ if (IsStatic) {
+ assert(!Pointers);
return;
- To->Prev = From->Prev;
- if (To->Prev)
- To->Prev->Next = To;
- To->Next = From->Next;
- if (To->Next)
- To->Next->Prev = To;
- if (Pointers == From)
- Pointers = To;
-
- From->Prev = nullptr;
- From->Next = nullptr;
+ }
+
+#ifndef NDEBUG
+ assert(hasPointer(Old));
+#endif
+
+ removePointer(Old);
+ addPointer(New);
+
+ Old->Pointee = nullptr;
+
+#ifndef NDEBUG
+ assert(!hasPointer(Old));
+ assert(hasPointer(New));
+#endif
+}
+
+#ifndef NDEBUG
+bool Block::hasPointer(const Pointer *P) const {
+ for (const Pointer *C = Pointers; C; C = C->Next) {
+ if (C == P)
+ return true;
+ }
+ return false;
}
+#endif
DeadBlock::DeadBlock(DeadBlock *&Root, Block *Blk)
: Root(Root), B(Blk->Desc, Blk->IsStatic, Blk->IsExtern, /*isDead=*/true) {
@@ -83,5 +114,5 @@ void DeadBlock::free() {
Next->Prev = Prev;
if (Root == this)
Root = Next;
- ::free(this);
+ std::free(this);
}
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/InterpBlock.h b/contrib/llvm-project/clang/lib/AST/Interp/InterpBlock.h
index 0ccdef221c83..9db82567d2d5 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/InterpBlock.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/InterpBlock.h
@@ -25,28 +25,40 @@ namespace clang {
namespace interp {
class Block;
class DeadBlock;
-class Context;
class InterpState;
class Pointer;
-class Function;
enum PrimType : unsigned;
/// A memory block, either on the stack or in the heap.
///
-/// The storage described by the block immediately follows it in memory.
-class Block {
+/// The storage described by the block is immediately followed by
+/// optional metadata, which is followed by the actual data.
+///
+/// Block* rawData() data()
+/// │ │ │
+/// │ │ │
+/// ▼ ▼ ▼
+/// ┌───────────────┬─────────────────────────┬─────────────────┐
+/// │ Block │ Metadata │ Data │
+/// │ sizeof(Block) │ Desc->getMetadataSize() │ Desc->getSize() │
+/// └───────────────┴─────────────────────────┴─────────────────┘
+///
+/// Desc->getAllocSize() describes the size after the Block, i.e.
+/// the data size and the metadata size.
+///
+class Block final {
public:
- // Creates a new block.
- Block(const llvm::Optional<unsigned> &DeclID, Descriptor *Desc,
+ /// Creates a new block.
+ Block(const std::optional<unsigned> &DeclID, const Descriptor *Desc,
bool IsStatic = false, bool IsExtern = false)
: DeclID(DeclID), IsStatic(IsStatic), IsExtern(IsExtern), Desc(Desc) {}
- Block(Descriptor *Desc, bool IsStatic = false, bool IsExtern = false)
+ Block(const Descriptor *Desc, bool IsStatic = false, bool IsExtern = false)
: DeclID((unsigned)-1), IsStatic(IsStatic), IsExtern(IsExtern),
Desc(Desc) {}
/// Returns the block's descriptor.
- Descriptor *getDescriptor() const { return Desc; }
+ const Descriptor *getDescriptor() const { return Desc; }
/// Checks if the block has any live pointers.
bool hasPointers() const { return Pointers; }
/// Checks if the block is extern.
@@ -56,23 +68,54 @@ public:
/// Checks if the block is temporary.
bool isTemporary() const { return Desc->IsTemporary; }
/// Returns the size of the block.
- InterpSize getSize() const { return Desc->getAllocSize(); }
+ unsigned getSize() const { return Desc->getAllocSize(); }
/// Returns the declaration ID.
- llvm::Optional<unsigned> getDeclID() const { return DeclID; }
+ std::optional<unsigned> getDeclID() const { return DeclID; }
+ bool isInitialized() const { return IsInitialized; }
/// Returns a pointer to the stored data.
- char *data() { return reinterpret_cast<char *>(this + 1); }
+ /// You are allowed to read Desc->getSize() bytes from this address.
+ std::byte *data() {
+ // rawData might contain metadata as well.
+ size_t DataOffset = Desc->getMetadataSize();
+ return rawData() + DataOffset;
+ }
+ const std::byte *data() const {
+ // rawData might contain metadata as well.
+ size_t DataOffset = Desc->getMetadataSize();
+ return rawData() + DataOffset;
+ }
+
+ /// Returns a pointer to the raw data, including metadata.
+ /// You are allowed to read Desc->getAllocSize() bytes from this address.
+ std::byte *rawData() {
+ return reinterpret_cast<std::byte *>(this) + sizeof(Block);
+ }
+ const std::byte *rawData() const {
+ return reinterpret_cast<const std::byte *>(this) + sizeof(Block);
+ }
/// Returns a view over the data.
template <typename T>
T &deref() { return *reinterpret_cast<T *>(data()); }
+ template <typename T> const T &deref() const {
+ return *reinterpret_cast<const T *>(data());
+ }
/// Invokes the constructor.
void invokeCtor() {
- std::memset(data(), 0, getSize());
+ std::memset(rawData(), 0, Desc->getAllocSize());
if (Desc->CtorFn)
Desc->CtorFn(this, data(), Desc->IsConst, Desc->IsMutable,
/*isActive=*/true, Desc);
+ IsInitialized = true;
+ }
+
+ /// Invokes the Destructor.
+ void invokeDtor() {
+ if (Desc->DtorFn)
+ Desc->DtorFn(this, data(), Desc);
+ IsInitialized = false;
}
protected:
@@ -80,42 +123,50 @@ protected:
friend class DeadBlock;
friend class InterpState;
- Block(Descriptor *Desc, bool IsExtern, bool IsStatic, bool IsDead)
- : IsStatic(IsStatic), IsExtern(IsExtern), IsDead(true), Desc(Desc) {}
+ Block(const Descriptor *Desc, bool IsExtern, bool IsStatic, bool IsDead)
+ : IsStatic(IsStatic), IsExtern(IsExtern), IsDead(true), Desc(Desc) {}
- // Deletes a dead block at the end of its lifetime.
+ /// Deletes a dead block at the end of its lifetime.
void cleanup();
- // Pointer chain management.
+ /// Pointer chain management.
void addPointer(Pointer *P);
void removePointer(Pointer *P);
- void movePointer(Pointer *From, Pointer *To);
+ void replacePointer(Pointer *Old, Pointer *New);
+#ifndef NDEBUG
+ bool hasPointer(const Pointer *P) const;
+#endif
/// Start of the chain of pointers.
Pointer *Pointers = nullptr;
/// Unique identifier of the declaration.
- llvm::Optional<unsigned> DeclID;
+ std::optional<unsigned> DeclID;
/// Flag indicating if the block has static storage duration.
bool IsStatic = false;
/// Flag indicating if the block is an extern.
bool IsExtern = false;
- /// Flag indicating if the pointer is dead.
+ /// Flag indicating if the pointer is dead. This is only ever
+ /// set once, when converting the Block to a DeadBlock.
bool IsDead = false;
+ /// Flag indicating if the block contents have been initialized
+ /// via invokeCtor.
+ bool IsInitialized = false;
/// Pointer to the stack slot descriptor.
- Descriptor *Desc;
+ const Descriptor *Desc;
};
/// Descriptor for a dead block.
///
/// Dead blocks are chained in a double-linked list to deallocate them
/// whenever pointers become dead.
-class DeadBlock {
+class DeadBlock final {
public:
/// Copies the block.
DeadBlock(DeadBlock *&Root, Block *Blk);
/// Returns a pointer to the stored data.
- char *data() { return B.data(); }
+ std::byte *data() { return B.data(); }
+ std::byte *rawData() { return B.rawData(); }
private:
friend class Block;
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/InterpBuiltin.cpp b/contrib/llvm-project/clang/lib/AST/Interp/InterpBuiltin.cpp
new file mode 100644
index 000000000000..754ca96b0c64
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/AST/Interp/InterpBuiltin.cpp
@@ -0,0 +1,950 @@
+//===--- InterpBuiltin.cpp - Interpreter for the constexpr VM ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#include "../ExprConstShared.h"
+#include "Boolean.h"
+#include "Interp.h"
+#include "PrimType.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/TargetInfo.h"
+
+namespace clang {
+namespace interp {
+
+template <typename T>
+static T getParam(const InterpFrame *Frame, unsigned Index) {
+ assert(Frame->getFunction()->getNumParams() > Index);
+ unsigned Offset = Frame->getFunction()->getParamOffset(Index);
+ return Frame->getParam<T>(Offset);
+}
+
+PrimType getIntPrimType(const InterpState &S) {
+ const TargetInfo &TI = S.getCtx().getTargetInfo();
+ unsigned IntWidth = TI.getIntWidth();
+
+ if (IntWidth == 32)
+ return PT_Sint32;
+ else if (IntWidth == 16)
+ return PT_Sint16;
+ llvm_unreachable("Int isn't 16 or 32 bit?");
+}
+
+PrimType getLongPrimType(const InterpState &S) {
+ const TargetInfo &TI = S.getCtx().getTargetInfo();
+ unsigned LongWidth = TI.getLongWidth();
+
+ if (LongWidth == 64)
+ return PT_Sint64;
+ else if (LongWidth == 32)
+ return PT_Sint32;
+ else if (LongWidth == 16)
+ return PT_Sint16;
+ llvm_unreachable("long isn't 16, 32 or 64 bit?");
+}
+
+/// Peek an integer value from the stack into an APSInt.
+static APSInt peekToAPSInt(InterpStack &Stk, PrimType T, size_t Offset = 0) {
+ if (Offset == 0)
+ Offset = align(primSize(T));
+
+ APSInt R;
+ INT_TYPE_SWITCH(T, {
+ T Val = Stk.peek<T>(Offset);
+ R = APSInt(
+ APInt(Val.bitWidth(), static_cast<uint64_t>(Val), T::isSigned()));
+ });
+
+ return R;
+}
+
+/// Pushes \p Val to the stack, as a target-dependent 'int'.
+static void pushInt(InterpState &S, int32_t Val) {
+ PrimType IntType = getIntPrimType(S);
+ if (IntType == PT_Sint32)
+ S.Stk.push<Integral<32, true>>(Integral<32, true>::from(Val));
+ else if (IntType == PT_Sint16)
+ S.Stk.push<Integral<16, true>>(Integral<16, true>::from(Val));
+ else
+ llvm_unreachable("Int isn't 16 or 32 bit?");
+}
+
+static void pushAPSInt(InterpState &S, const APSInt &Val) {
+ bool Signed = Val.isSigned();
+
+ if (Signed) {
+ switch (Val.getBitWidth()) {
+ case 64:
+ S.Stk.push<Integral<64, true>>(
+ Integral<64, true>::from(Val.getSExtValue()));
+ break;
+ case 32:
+ S.Stk.push<Integral<32, true>>(
+ Integral<32, true>::from(Val.getSExtValue()));
+ break;
+ case 16:
+ S.Stk.push<Integral<16, true>>(
+ Integral<16, true>::from(Val.getSExtValue()));
+ break;
+ case 8:
+ S.Stk.push<Integral<8, true>>(
+ Integral<8, true>::from(Val.getSExtValue()));
+ break;
+ default:
+ llvm_unreachable("Invalid integer bitwidth");
+ }
+ return;
+ }
+
+ // Unsigned.
+ switch (Val.getBitWidth()) {
+ case 64:
+ S.Stk.push<Integral<64, false>>(
+ Integral<64, false>::from(Val.getZExtValue()));
+ break;
+ case 32:
+ S.Stk.push<Integral<32, false>>(
+ Integral<32, false>::from(Val.getZExtValue()));
+ break;
+ case 16:
+ S.Stk.push<Integral<16, false>>(
+ Integral<16, false>::from(Val.getZExtValue()));
+ break;
+ case 8:
+ S.Stk.push<Integral<8, false>>(
+ Integral<8, false>::from(Val.getZExtValue()));
+ break;
+ default:
+ llvm_unreachable("Invalid integer bitwidth");
+ }
+}
+
+/// Pushes \p Val to the stack, as a target-dependent 'long'.
+static void pushLong(InterpState &S, int64_t Val) {
+ PrimType LongType = getLongPrimType(S);
+ if (LongType == PT_Sint64)
+ S.Stk.push<Integral<64, true>>(Integral<64, true>::from(Val));
+ else if (LongType == PT_Sint32)
+ S.Stk.push<Integral<32, true>>(Integral<32, true>::from(Val));
+ else if (LongType == PT_Sint16)
+ S.Stk.push<Integral<16, true>>(Integral<16, true>::from(Val));
+ else
+ llvm_unreachable("Long isn't 16, 32 or 64 bit?");
+}
+
+static void pushSizeT(InterpState &S, uint64_t Val) {
+ const TargetInfo &TI = S.getCtx().getTargetInfo();
+ unsigned SizeTWidth = TI.getTypeWidth(TI.getSizeType());
+
+ switch (SizeTWidth) {
+ case 64:
+ S.Stk.push<Integral<64, false>>(Integral<64, false>::from(Val));
+ break;
+ case 32:
+ S.Stk.push<Integral<32, false>>(Integral<32, false>::from(Val));
+ break;
+ case 16:
+ S.Stk.push<Integral<16, false>>(Integral<16, false>::from(Val));
+ break;
+ default:
+ llvm_unreachable("We don't handle this size_t size.");
+ }
+}
+
+static bool retPrimValue(InterpState &S, CodePtr OpPC, APValue &Result,
+ std::optional<PrimType> &T) {
+ if (!T)
+ return RetVoid(S, OpPC, Result);
+
+#define RET_CASE(X) \
+ case X: \
+ return Ret<X>(S, OpPC, Result);
+ switch (*T) {
+ RET_CASE(PT_Ptr);
+ RET_CASE(PT_FnPtr);
+ RET_CASE(PT_Float);
+ RET_CASE(PT_Bool);
+ RET_CASE(PT_Sint8);
+ RET_CASE(PT_Uint8);
+ RET_CASE(PT_Sint16);
+ RET_CASE(PT_Uint16);
+ RET_CASE(PT_Sint32);
+ RET_CASE(PT_Uint32);
+ RET_CASE(PT_Sint64);
+ RET_CASE(PT_Uint64);
+ default:
+ llvm_unreachable("Unsupported return type for builtin function");
+ }
+#undef RET_CASE
+}
+
+static bool interp__builtin_strcmp(InterpState &S, CodePtr OpPC,
+ const InterpFrame *Frame) {
+ const Pointer &A = getParam<Pointer>(Frame, 0);
+ const Pointer &B = getParam<Pointer>(Frame, 1);
+
+ if (!CheckLive(S, OpPC, A, AK_Read) || !CheckLive(S, OpPC, B, AK_Read))
+ return false;
+
+ assert(A.getFieldDesc()->isPrimitiveArray());
+ assert(B.getFieldDesc()->isPrimitiveArray());
+
+ unsigned IndexA = A.getIndex();
+ unsigned IndexB = B.getIndex();
+ int32_t Result = 0;
+ for (;; ++IndexA, ++IndexB) {
+ const Pointer &PA = A.atIndex(IndexA);
+ const Pointer &PB = B.atIndex(IndexB);
+ if (!CheckRange(S, OpPC, PA, AK_Read) ||
+ !CheckRange(S, OpPC, PB, AK_Read)) {
+ return false;
+ }
+ uint8_t CA = PA.deref<uint8_t>();
+ uint8_t CB = PB.deref<uint8_t>();
+
+ if (CA > CB) {
+ Result = 1;
+ break;
+ } else if (CA < CB) {
+ Result = -1;
+ break;
+ }
+ if (CA == 0 || CB == 0)
+ break;
+ }
+
+ pushInt(S, Result);
+ return true;
+}
+
+static bool interp__builtin_strlen(InterpState &S, CodePtr OpPC,
+ const InterpFrame *Frame) {
+ const Pointer &StrPtr = getParam<Pointer>(Frame, 0);
+
+ if (!CheckArray(S, OpPC, StrPtr))
+ return false;
+
+ if (!CheckLive(S, OpPC, StrPtr, AK_Read))
+ return false;
+
+ if (!CheckDummy(S, OpPC, StrPtr))
+ return false;
+
+ assert(StrPtr.getFieldDesc()->isPrimitiveArray());
+
+ size_t Len = 0;
+ for (size_t I = StrPtr.getIndex();; ++I, ++Len) {
+ const Pointer &ElemPtr = StrPtr.atIndex(I);
+
+ if (!CheckRange(S, OpPC, ElemPtr, AK_Read))
+ return false;
+
+ uint8_t Val = ElemPtr.deref<uint8_t>();
+ if (Val == 0)
+ break;
+ }
+
+ pushSizeT(S, Len);
+ return true;
+}
+
+static bool interp__builtin_nan(InterpState &S, CodePtr OpPC,
+ const InterpFrame *Frame, const Function *F,
+ bool Signaling) {
+ const Pointer &Arg = getParam<Pointer>(Frame, 0);
+
+ if (!CheckLoad(S, OpPC, Arg))
+ return false;
+
+ assert(Arg.getFieldDesc()->isPrimitiveArray());
+
+ // Convert the given string to an integer using StringRef's API.
+ llvm::APInt Fill;
+ std::string Str;
+ assert(Arg.getNumElems() >= 1);
+ for (unsigned I = 0;; ++I) {
+ const Pointer &Elem = Arg.atIndex(I);
+
+ if (!CheckLoad(S, OpPC, Elem))
+ return false;
+
+ if (Elem.deref<int8_t>() == 0)
+ break;
+
+ Str += Elem.deref<char>();
+ }
+
+ // Treat empty strings as if they were zero.
+ if (Str.empty())
+ Fill = llvm::APInt(32, 0);
+ else if (StringRef(Str).getAsInteger(0, Fill))
+ return false;
+
+ const llvm::fltSemantics &TargetSemantics =
+ S.getCtx().getFloatTypeSemantics(F->getDecl()->getReturnType());
+
+ Floating Result;
+ if (S.getCtx().getTargetInfo().isNan2008()) {
+ if (Signaling)
+ Result = Floating(
+ llvm::APFloat::getSNaN(TargetSemantics, /*Negative=*/false, &Fill));
+ else
+ Result = Floating(
+ llvm::APFloat::getQNaN(TargetSemantics, /*Negative=*/false, &Fill));
+ } else {
+ // Prior to IEEE 754-2008, architectures were allowed to choose whether
+ // the first bit of their significand was set for qNaN or sNaN. MIPS chose
+ // a different encoding to what became a standard in 2008, and for pre-
+ // 2008 revisions, MIPS interpreted sNaN-2008 as qNan and qNaN-2008 as
+ // sNaN. This is now known as "legacy NaN" encoding.
+ if (Signaling)
+ Result = Floating(
+ llvm::APFloat::getQNaN(TargetSemantics, /*Negative=*/false, &Fill));
+ else
+ Result = Floating(
+ llvm::APFloat::getSNaN(TargetSemantics, /*Negative=*/false, &Fill));
+ }
+
+ S.Stk.push<Floating>(Result);
+ return true;
+}
+
+static bool interp__builtin_inf(InterpState &S, CodePtr OpPC,
+ const InterpFrame *Frame, const Function *F) {
+ const llvm::fltSemantics &TargetSemantics =
+ S.getCtx().getFloatTypeSemantics(F->getDecl()->getReturnType());
+
+ S.Stk.push<Floating>(Floating::getInf(TargetSemantics));
+ return true;
+}
+
+static bool interp__builtin_copysign(InterpState &S, CodePtr OpPC,
+ const InterpFrame *Frame,
+ const Function *F) {
+ const Floating &Arg1 = getParam<Floating>(Frame, 0);
+ const Floating &Arg2 = getParam<Floating>(Frame, 1);
+
+ APFloat Copy = Arg1.getAPFloat();
+ Copy.copySign(Arg2.getAPFloat());
+ S.Stk.push<Floating>(Floating(Copy));
+
+ return true;
+}
+
+static bool interp__builtin_fmin(InterpState &S, CodePtr OpPC,
+ const InterpFrame *Frame, const Function *F) {
+ const Floating &LHS = getParam<Floating>(Frame, 0);
+ const Floating &RHS = getParam<Floating>(Frame, 1);
+
+ Floating Result;
+
+ // When comparing zeroes, return -0.0 if one of the zeroes is negative.
+ if (LHS.isZero() && RHS.isZero() && RHS.isNegative())
+ Result = RHS;
+ else if (LHS.isNan() || RHS < LHS)
+ Result = RHS;
+ else
+ Result = LHS;
+
+ S.Stk.push<Floating>(Result);
+ return true;
+}
+
+static bool interp__builtin_fmax(InterpState &S, CodePtr OpPC,
+ const InterpFrame *Frame,
+ const Function *Func) {
+ const Floating &LHS = getParam<Floating>(Frame, 0);
+ const Floating &RHS = getParam<Floating>(Frame, 1);
+
+ Floating Result;
+
+ // When comparing zeroes, return +0.0 if one of the zeroes is positive.
+ if (LHS.isZero() && RHS.isZero() && LHS.isNegative())
+ Result = RHS;
+ else if (LHS.isNan() || RHS > LHS)
+ Result = RHS;
+ else
+ Result = LHS;
+
+ S.Stk.push<Floating>(Result);
+ return true;
+}
+
+/// Defined as __builtin_isnan(...), to accommodate the fact that it can
+/// take a float, double, long double, etc.
+/// But for us, that's all a Floating anyway.
+static bool interp__builtin_isnan(InterpState &S, CodePtr OpPC,
+ const InterpFrame *Frame, const Function *F) {
+ const Floating &Arg = S.Stk.peek<Floating>();
+
+ pushInt(S, Arg.isNan());
+ return true;
+}
+
+static bool interp__builtin_issignaling(InterpState &S, CodePtr OpPC,
+ const InterpFrame *Frame,
+ const Function *F) {
+ const Floating &Arg = S.Stk.peek<Floating>();
+
+ pushInt(S, Arg.isSignaling());
+ return true;
+}
+
+static bool interp__builtin_isinf(InterpState &S, CodePtr OpPC,
+ const InterpFrame *Frame, const Function *F,
+ bool CheckSign) {
+ const Floating &Arg = S.Stk.peek<Floating>();
+ bool IsInf = Arg.isInf();
+
+ if (CheckSign)
+ pushInt(S, IsInf ? (Arg.isNegative() ? -1 : 1) : 0);
+ else
+ pushInt(S, Arg.isInf());
+ return true;
+}
+
+static bool interp__builtin_isfinite(InterpState &S, CodePtr OpPC,
+ const InterpFrame *Frame,
+ const Function *F) {
+ const Floating &Arg = S.Stk.peek<Floating>();
+
+ pushInt(S, Arg.isFinite());
+ return true;
+}
+
+static bool interp__builtin_isnormal(InterpState &S, CodePtr OpPC,
+ const InterpFrame *Frame,
+ const Function *F) {
+ const Floating &Arg = S.Stk.peek<Floating>();
+
+ pushInt(S, Arg.isNormal());
+ return true;
+}
+
+static bool interp__builtin_issubnormal(InterpState &S, CodePtr OpPC,
+ const InterpFrame *Frame,
+ const Function *F) {
+ const Floating &Arg = S.Stk.peek<Floating>();
+
+ pushInt(S, Arg.isDenormal());
+ return true;
+}
+
+static bool interp__builtin_iszero(InterpState &S, CodePtr OpPC,
+ const InterpFrame *Frame,
+ const Function *F) {
+ const Floating &Arg = S.Stk.peek<Floating>();
+
+ pushInt(S, Arg.isZero());
+ return true;
+}
+
+/// First parameter to __builtin_isfpclass is the floating value, the
+/// second one is an integral value.
+static bool interp__builtin_isfpclass(InterpState &S, CodePtr OpPC,
+ const InterpFrame *Frame,
+ const Function *Func,
+ const CallExpr *Call) {
+ PrimType FPClassArgT = *S.getContext().classify(Call->getArg(1)->getType());
+ APSInt FPClassArg = peekToAPSInt(S.Stk, FPClassArgT);
+ const Floating &F =
+ S.Stk.peek<Floating>(align(primSize(FPClassArgT) + primSize(PT_Float)));
+
+ int32_t Result =
+ static_cast<int32_t>((F.classify() & FPClassArg).getZExtValue());
+ pushInt(S, Result);
+
+ return true;
+}
+
+/// Five int values followed by one floating value.
+static bool interp__builtin_fpclassify(InterpState &S, CodePtr OpPC,
+ const InterpFrame *Frame,
+ const Function *Func) {
+ const Floating &Val = S.Stk.peek<Floating>();
+
+ unsigned Index;
+ switch (Val.getCategory()) {
+ case APFloat::fcNaN:
+ Index = 0;
+ break;
+ case APFloat::fcInfinity:
+ Index = 1;
+ break;
+ case APFloat::fcNormal:
+ Index = Val.isDenormal() ? 3 : 2;
+ break;
+ case APFloat::fcZero:
+ Index = 4;
+ break;
+ }
+
+ // The last argument is first on the stack.
+ assert(Index <= 4);
+ unsigned IntSize = primSize(getIntPrimType(S));
+ unsigned Offset =
+ align(primSize(PT_Float)) + ((1 + (4 - Index)) * align(IntSize));
+
+ APSInt I = peekToAPSInt(S.Stk, getIntPrimType(S), Offset);
+ pushInt(S, I.getZExtValue());
+ return true;
+}
+
+// The C standard says "fabs raises no floating-point exceptions,
+// even if x is a signaling NaN. The returned value is independent of
+// the current rounding direction mode." Therefore constant folding can
+// proceed without regard to the floating point settings.
+// Reference, WG14 N2478 F.10.4.3
+static bool interp__builtin_fabs(InterpState &S, CodePtr OpPC,
+ const InterpFrame *Frame,
+ const Function *Func) {
+ const Floating &Val = getParam<Floating>(Frame, 0);
+
+ S.Stk.push<Floating>(Floating::abs(Val));
+ return true;
+}
+
+static bool interp__builtin_popcount(InterpState &S, CodePtr OpPC,
+ const InterpFrame *Frame,
+ const Function *Func,
+ const CallExpr *Call) {
+ PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
+ APSInt Val = peekToAPSInt(S.Stk, ArgT);
+ pushInt(S, Val.popcount());
+ return true;
+}
+
+static bool interp__builtin_parity(InterpState &S, CodePtr OpPC,
+ const InterpFrame *Frame,
+ const Function *Func, const CallExpr *Call) {
+ PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
+ APSInt Val = peekToAPSInt(S.Stk, ArgT);
+ pushInt(S, Val.popcount() % 2);
+ return true;
+}
+
+static bool interp__builtin_clrsb(InterpState &S, CodePtr OpPC,
+ const InterpFrame *Frame,
+ const Function *Func, const CallExpr *Call) {
+ PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
+ APSInt Val = peekToAPSInt(S.Stk, ArgT);
+ pushInt(S, Val.getBitWidth() - Val.getSignificantBits());
+ return true;
+}
+
+static bool interp__builtin_bitreverse(InterpState &S, CodePtr OpPC,
+ const InterpFrame *Frame,
+ const Function *Func,
+ const CallExpr *Call) {
+ PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
+ APSInt Val = peekToAPSInt(S.Stk, ArgT);
+ pushAPSInt(S, APSInt(Val.reverseBits(), /*IsUnsigned=*/true));
+ return true;
+}
+
+static bool interp__builtin_classify_type(InterpState &S, CodePtr OpPC,
+ const InterpFrame *Frame,
+ const Function *Func,
+ const CallExpr *Call) {
+ // This is an unevaluated call, so there are no arguments on the stack.
+ assert(Call->getNumArgs() == 1);
+ const Expr *Arg = Call->getArg(0);
+
+ GCCTypeClass ResultClass =
+ EvaluateBuiltinClassifyType(Arg->getType(), S.getLangOpts());
+ int32_t ReturnVal = static_cast<int32_t>(ResultClass);
+ pushInt(S, ReturnVal);
+ return true;
+}
+
+// __builtin_expect(long, long)
+// __builtin_expect_with_probability(long, long, double)
+static bool interp__builtin_expect(InterpState &S, CodePtr OpPC,
+ const InterpFrame *Frame,
+ const Function *Func, const CallExpr *Call) {
+ // The return value is simply the value of the first parameter.
+ // We ignore the probability.
+ unsigned NumArgs = Call->getNumArgs();
+ assert(NumArgs == 2 || NumArgs == 3);
+
+ PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
+ unsigned Offset = align(primSize(getLongPrimType(S))) * 2;
+ if (NumArgs == 3)
+ Offset += align(primSize(PT_Float));
+
+ APSInt Val = peekToAPSInt(S.Stk, ArgT, Offset);
+ pushLong(S, Val.getSExtValue());
+ return true;
+}
+
+/// rotateleft(value, amount)
+static bool interp__builtin_rotate(InterpState &S, CodePtr OpPC,
+ const InterpFrame *Frame,
+ const Function *Func, const CallExpr *Call,
+ bool Right) {
+ PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
+ assert(ArgT == *S.getContext().classify(Call->getArg(1)->getType()));
+
+ APSInt Amount = peekToAPSInt(S.Stk, ArgT);
+ APSInt Value = peekToAPSInt(S.Stk, ArgT, align(primSize(ArgT)) * 2);
+
+ APSInt Result;
+ if (Right)
+ Result = APSInt(Value.rotr(Amount.urem(Value.getBitWidth())),
+ /*IsUnsigned=*/true);
+ else // Left.
+ Result = APSInt(Value.rotl(Amount.urem(Value.getBitWidth())),
+ /*IsUnsigned=*/true);
+
+ pushAPSInt(S, Result);
+ return true;
+}
+
+static bool interp__builtin_ffs(InterpState &S, CodePtr OpPC,
+ const InterpFrame *Frame, const Function *Func,
+ const CallExpr *Call) {
+ PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
+ APSInt Value = peekToAPSInt(S.Stk, ArgT);
+
+ uint64_t N = Value.countr_zero();
+ pushInt(S, N == Value.getBitWidth() ? 0 : N + 1);
+ return true;
+}
+
+static bool interp__builtin_addressof(InterpState &S, CodePtr OpPC,
+ const InterpFrame *Frame,
+ const Function *Func,
+ const CallExpr *Call) {
+ PrimType PtrT =
+ S.getContext().classify(Call->getArg(0)->getType()).value_or(PT_Ptr);
+
+ if (PtrT == PT_FnPtr) {
+ const FunctionPointer &Arg = S.Stk.peek<FunctionPointer>();
+ S.Stk.push<FunctionPointer>(Arg);
+ } else if (PtrT == PT_Ptr) {
+ const Pointer &Arg = S.Stk.peek<Pointer>();
+ S.Stk.push<Pointer>(Arg);
+ } else {
+ assert(false && "Unsupported pointer type passed to __builtin_addressof()");
+ }
+ return true;
+}
+
+bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const Function *F,
+ const CallExpr *Call) {
+ InterpFrame *Frame = S.Current;
+ APValue Dummy;
+
+ std::optional<PrimType> ReturnT = S.getContext().classify(Call->getType());
+
+ // If classify failed, we assume void.
+ assert(ReturnT || Call->getType()->isVoidType());
+
+ switch (F->getBuiltinID()) {
+ case Builtin::BI__builtin_is_constant_evaluated:
+ S.Stk.push<Boolean>(Boolean::from(S.inConstantContext()));
+ break;
+ case Builtin::BI__builtin_assume:
+ break;
+ case Builtin::BI__builtin_strcmp:
+ if (!interp__builtin_strcmp(S, OpPC, Frame))
+ return false;
+ break;
+ case Builtin::BI__builtin_strlen:
+ if (!interp__builtin_strlen(S, OpPC, Frame))
+ return false;
+ break;
+ case Builtin::BI__builtin_nan:
+ case Builtin::BI__builtin_nanf:
+ case Builtin::BI__builtin_nanl:
+ case Builtin::BI__builtin_nanf16:
+ case Builtin::BI__builtin_nanf128:
+ if (!interp__builtin_nan(S, OpPC, Frame, F, /*Signaling=*/false))
+ return false;
+ break;
+ case Builtin::BI__builtin_nans:
+ case Builtin::BI__builtin_nansf:
+ case Builtin::BI__builtin_nansl:
+ case Builtin::BI__builtin_nansf16:
+ case Builtin::BI__builtin_nansf128:
+ if (!interp__builtin_nan(S, OpPC, Frame, F, /*Signaling=*/true))
+ return false;
+ break;
+
+ case Builtin::BI__builtin_huge_val:
+ case Builtin::BI__builtin_huge_valf:
+ case Builtin::BI__builtin_huge_vall:
+ case Builtin::BI__builtin_huge_valf16:
+ case Builtin::BI__builtin_huge_valf128:
+ case Builtin::BI__builtin_inf:
+ case Builtin::BI__builtin_inff:
+ case Builtin::BI__builtin_infl:
+ case Builtin::BI__builtin_inff16:
+ case Builtin::BI__builtin_inff128:
+ if (!interp__builtin_inf(S, OpPC, Frame, F))
+ return false;
+ break;
+ case Builtin::BI__builtin_copysign:
+ case Builtin::BI__builtin_copysignf:
+ case Builtin::BI__builtin_copysignl:
+ case Builtin::BI__builtin_copysignf128:
+ if (!interp__builtin_copysign(S, OpPC, Frame, F))
+ return false;
+ break;
+
+ case Builtin::BI__builtin_fmin:
+ case Builtin::BI__builtin_fminf:
+ case Builtin::BI__builtin_fminl:
+ case Builtin::BI__builtin_fminf16:
+ case Builtin::BI__builtin_fminf128:
+ if (!interp__builtin_fmin(S, OpPC, Frame, F))
+ return false;
+ break;
+
+ case Builtin::BI__builtin_fmax:
+ case Builtin::BI__builtin_fmaxf:
+ case Builtin::BI__builtin_fmaxl:
+ case Builtin::BI__builtin_fmaxf16:
+ case Builtin::BI__builtin_fmaxf128:
+ if (!interp__builtin_fmax(S, OpPC, Frame, F))
+ return false;
+ break;
+
+ case Builtin::BI__builtin_isnan:
+ if (!interp__builtin_isnan(S, OpPC, Frame, F))
+ return false;
+ break;
+ case Builtin::BI__builtin_issignaling:
+ if (!interp__builtin_issignaling(S, OpPC, Frame, F))
+ return false;
+ break;
+
+ case Builtin::BI__builtin_isinf:
+ if (!interp__builtin_isinf(S, OpPC, Frame, F, /*Sign=*/false))
+ return false;
+ break;
+
+ case Builtin::BI__builtin_isinf_sign:
+ if (!interp__builtin_isinf(S, OpPC, Frame, F, /*Sign=*/true))
+ return false;
+ break;
+
+ case Builtin::BI__builtin_isfinite:
+ if (!interp__builtin_isfinite(S, OpPC, Frame, F))
+ return false;
+ break;
+ case Builtin::BI__builtin_isnormal:
+ if (!interp__builtin_isnormal(S, OpPC, Frame, F))
+ return false;
+ break;
+ case Builtin::BI__builtin_issubnormal:
+ if (!interp__builtin_issubnormal(S, OpPC, Frame, F))
+ return false;
+ break;
+ case Builtin::BI__builtin_iszero:
+ if (!interp__builtin_iszero(S, OpPC, Frame, F))
+ return false;
+ break;
+ case Builtin::BI__builtin_isfpclass:
+ if (!interp__builtin_isfpclass(S, OpPC, Frame, F, Call))
+ return false;
+ break;
+ case Builtin::BI__builtin_fpclassify:
+ if (!interp__builtin_fpclassify(S, OpPC, Frame, F))
+ return false;
+ break;
+
+ case Builtin::BI__builtin_fabs:
+ case Builtin::BI__builtin_fabsf:
+ case Builtin::BI__builtin_fabsl:
+ case Builtin::BI__builtin_fabsf128:
+ if (!interp__builtin_fabs(S, OpPC, Frame, F))
+ return false;
+ break;
+
+ case Builtin::BI__builtin_popcount:
+ case Builtin::BI__builtin_popcountl:
+ case Builtin::BI__builtin_popcountll:
+ case Builtin::BI__popcnt16: // Microsoft variants of popcount
+ case Builtin::BI__popcnt:
+ case Builtin::BI__popcnt64:
+ if (!interp__builtin_popcount(S, OpPC, Frame, F, Call))
+ return false;
+ break;
+
+ case Builtin::BI__builtin_parity:
+ case Builtin::BI__builtin_parityl:
+ case Builtin::BI__builtin_parityll:
+ if (!interp__builtin_parity(S, OpPC, Frame, F, Call))
+ return false;
+ break;
+
+ case Builtin::BI__builtin_clrsb:
+ case Builtin::BI__builtin_clrsbl:
+ case Builtin::BI__builtin_clrsbll:
+ if (!interp__builtin_clrsb(S, OpPC, Frame, F, Call))
+ return false;
+ break;
+
+ case Builtin::BI__builtin_bitreverse8:
+ case Builtin::BI__builtin_bitreverse16:
+ case Builtin::BI__builtin_bitreverse32:
+ case Builtin::BI__builtin_bitreverse64:
+ if (!interp__builtin_bitreverse(S, OpPC, Frame, F, Call))
+ return false;
+ break;
+
+ case Builtin::BI__builtin_classify_type:
+ if (!interp__builtin_classify_type(S, OpPC, Frame, F, Call))
+ return false;
+ break;
+
+ case Builtin::BI__builtin_expect:
+ case Builtin::BI__builtin_expect_with_probability:
+ if (!interp__builtin_expect(S, OpPC, Frame, F, Call))
+ return false;
+ break;
+
+ case Builtin::BI__builtin_rotateleft8:
+ case Builtin::BI__builtin_rotateleft16:
+ case Builtin::BI__builtin_rotateleft32:
+ case Builtin::BI__builtin_rotateleft64:
+ case Builtin::BI_rotl8: // Microsoft variants of rotate left
+ case Builtin::BI_rotl16:
+ case Builtin::BI_rotl:
+ case Builtin::BI_lrotl:
+ case Builtin::BI_rotl64:
+ if (!interp__builtin_rotate(S, OpPC, Frame, F, Call, /*Right=*/false))
+ return false;
+ break;
+
+ case Builtin::BI__builtin_rotateright8:
+ case Builtin::BI__builtin_rotateright16:
+ case Builtin::BI__builtin_rotateright32:
+ case Builtin::BI__builtin_rotateright64:
+ case Builtin::BI_rotr8: // Microsoft variants of rotate right
+ case Builtin::BI_rotr16:
+ case Builtin::BI_rotr:
+ case Builtin::BI_lrotr:
+ case Builtin::BI_rotr64:
+ if (!interp__builtin_rotate(S, OpPC, Frame, F, Call, /*Right=*/true))
+ return false;
+ break;
+
+ case Builtin::BI__builtin_ffs:
+ case Builtin::BI__builtin_ffsl:
+ case Builtin::BI__builtin_ffsll:
+ if (!interp__builtin_ffs(S, OpPC, Frame, F, Call))
+ return false;
+ break;
+ case Builtin::BIaddressof:
+ case Builtin::BI__addressof:
+ case Builtin::BI__builtin_addressof:
+ if (!interp__builtin_addressof(S, OpPC, Frame, F, Call))
+ return false;
+ break;
+
+ default:
+ return false;
+ }
+
+ return retPrimValue(S, OpPC, Dummy, ReturnT);
+}
+
+bool InterpretOffsetOf(InterpState &S, CodePtr OpPC, const OffsetOfExpr *E,
+ llvm::ArrayRef<int64_t> ArrayIndices,
+ int64_t &IntResult) {
+ CharUnits Result;
+ unsigned N = E->getNumComponents();
+ assert(N > 0);
+
+ unsigned ArrayIndex = 0;
+ QualType CurrentType = E->getTypeSourceInfo()->getType();
+ for (unsigned I = 0; I != N; ++I) {
+ const OffsetOfNode &Node = E->getComponent(I);
+ switch (Node.getKind()) {
+ case OffsetOfNode::Field: {
+ const FieldDecl *MemberDecl = Node.getField();
+ const RecordType *RT = CurrentType->getAs<RecordType>();
+ if (!RT)
+ return false;
+ RecordDecl *RD = RT->getDecl();
+ if (RD->isInvalidDecl())
+ return false;
+ const ASTRecordLayout &RL = S.getCtx().getASTRecordLayout(RD);
+ unsigned FieldIndex = MemberDecl->getFieldIndex();
+ assert(FieldIndex < RL.getFieldCount() && "offsetof field in wrong type");
+ Result += S.getCtx().toCharUnitsFromBits(RL.getFieldOffset(FieldIndex));
+ CurrentType = MemberDecl->getType().getNonReferenceType();
+ break;
+ }
+ case OffsetOfNode::Array: {
+ // When generating bytecode, we put all the index expressions as Sint64 on
+ // the stack.
+ int64_t Index = ArrayIndices[ArrayIndex];
+ const ArrayType *AT = S.getCtx().getAsArrayType(CurrentType);
+ if (!AT)
+ return false;
+ CurrentType = AT->getElementType();
+ CharUnits ElementSize = S.getCtx().getTypeSizeInChars(CurrentType);
+ Result += Index * ElementSize;
+ ++ArrayIndex;
+ break;
+ }
+ case OffsetOfNode::Base: {
+ const CXXBaseSpecifier *BaseSpec = Node.getBase();
+ if (BaseSpec->isVirtual())
+ return false;
+
+ // Find the layout of the class whose base we are looking into.
+ const RecordType *RT = CurrentType->getAs<RecordType>();
+ if (!RT)
+ return false;
+ const RecordDecl *RD = RT->getDecl();
+ if (RD->isInvalidDecl())
+ return false;
+ const ASTRecordLayout &RL = S.getCtx().getASTRecordLayout(RD);
+
+ // Find the base class itself.
+ CurrentType = BaseSpec->getType();
+ const RecordType *BaseRT = CurrentType->getAs<RecordType>();
+ if (!BaseRT)
+ return false;
+
+ // Add the offset to the base.
+ Result += RL.getBaseClassOffset(cast<CXXRecordDecl>(BaseRT->getDecl()));
+ break;
+ }
+ case OffsetOfNode::Identifier:
+ llvm_unreachable("Dependent OffsetOfExpr?");
+ }
+ }
+
+ IntResult = Result.getQuantity();
+
+ return true;
+}
+
+bool SetThreeWayComparisonField(InterpState &S, CodePtr OpPC,
+ const Pointer &Ptr, const APSInt &IntValue) {
+
+ const Record *R = Ptr.getRecord();
+ assert(R);
+ assert(R->getNumFields() == 1);
+
+ unsigned FieldOffset = R->getField(0u)->Offset;
+ const Pointer &FieldPtr = Ptr.atField(FieldOffset);
+ PrimType FieldT = *S.getContext().classify(FieldPtr.getType());
+
+ INT_TYPE_SWITCH(FieldT,
+ FieldPtr.deref<T>() = T::from(IntValue.getSExtValue()));
+ FieldPtr.initialize();
+ return true;
+}
+
+} // namespace interp
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/InterpFrame.cpp b/contrib/llvm-project/clang/lib/AST/Interp/InterpFrame.cpp
index 9d01bf0333fe..d460d7ea3710 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/InterpFrame.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/InterpFrame.cpp
@@ -7,44 +7,89 @@
//===----------------------------------------------------------------------===//
#include "InterpFrame.h"
+#include "Boolean.h"
+#include "Floating.h"
#include "Function.h"
-#include "Interp.h"
#include "InterpStack.h"
+#include "InterpState.h"
+#include "Pointer.h"
#include "PrimType.h"
#include "Program.h"
+#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclCXX.h"
using namespace clang;
using namespace clang::interp;
-InterpFrame::InterpFrame(InterpState &S, Function *Func, InterpFrame *Caller,
- CodePtr RetPC, Pointer &&This)
- : Caller(Caller), S(S), Func(Func), This(std::move(This)), RetPC(RetPC),
- ArgSize(Func ? Func->getArgSize() : 0),
+InterpFrame::InterpFrame(InterpState &S, const Function *Func,
+ InterpFrame *Caller, CodePtr RetPC)
+ : Caller(Caller), S(S), Depth(Caller ? Caller->Depth + 1 : 0), Func(Func),
+ RetPC(RetPC), ArgSize(Func ? Func->getArgSize() : 0),
Args(static_cast<char *>(S.Stk.top())), FrameOffset(S.Stk.size()) {
- if (Func) {
- if (unsigned FrameSize = Func->getFrameSize()) {
- Locals = std::make_unique<char[]>(FrameSize);
- for (auto &Scope : Func->scopes()) {
- for (auto &Local : Scope.locals()) {
- Block *B = new (localBlock(Local.Offset)) Block(Local.Desc);
- B->invokeCtor();
- }
- }
+ if (!Func)
+ return;
+
+ unsigned FrameSize = Func->getFrameSize();
+ if (FrameSize == 0)
+ return;
+
+ Locals = std::make_unique<char[]>(FrameSize);
+ for (auto &Scope : Func->scopes()) {
+ for (auto &Local : Scope.locals()) {
+ Block *B = new (localBlock(Local.Offset)) Block(Local.Desc);
+ B->invokeCtor();
+ InlineDescriptor *ID = localInlineDesc(Local.Offset);
+ ID->Desc = Local.Desc;
+ ID->IsActive = true;
+ ID->Offset = sizeof(InlineDescriptor);
+ ID->IsBase = false;
+ ID->IsFieldMutable = false;
+ ID->IsConst = false;
+ ID->IsInitialized = false;
}
}
}
+InterpFrame::InterpFrame(InterpState &S, const Function *Func, CodePtr RetPC)
+ : InterpFrame(S, Func, S.Current, RetPC) {
+ // As per our calling convention, the this pointer is
+ // part of the ArgSize.
+ // If the function has RVO, the RVO pointer is first.
+ // If the fuction has a This pointer, that one is next.
+ // Then follow the actual arguments (but those are handled
+ // in getParamPointer()).
+ if (Func->hasRVO())
+ RVOPtr = stackRef<Pointer>(0);
+
+ if (Func->hasThisPointer()) {
+ if (Func->hasRVO())
+ This = stackRef<Pointer>(sizeof(Pointer));
+ else
+ This = stackRef<Pointer>(0);
+ }
+}
+
InterpFrame::~InterpFrame() {
- if (Func && Func->isConstructor() && This.isBaseClass())
- This.initialize();
for (auto &Param : Params)
S.deallocate(reinterpret_cast<Block *>(Param.second.get()));
+
+ // When destroying the InterpFrame, call the Dtor for all block
+ // that haven't been destroyed via a destroy() op yet.
+ // This happens when the execution is interruped midway-through.
+ if (Func) {
+ for (auto &Scope : Func->scopes()) {
+ for (auto &Local : Scope.locals()) {
+ Block *B = localBlock(Local.Offset);
+ if (B->isInitialized())
+ B->invokeDtor();
+ }
+ }
+ }
}
void InterpFrame::destroy(unsigned Idx) {
for (auto &Local : Func->getScope(Idx).locals()) {
- S.deallocate(reinterpret_cast<Block *>(localBlock(Local.Offset)));
+ S.deallocate(localBlock(Local.Offset));
}
}
@@ -66,20 +111,19 @@ void print(llvm::raw_ostream &OS, const Pointer &P, ASTContext &Ctx,
return;
}
- auto printDesc = [&OS, &Ctx](Descriptor *Desc) {
- if (auto *D = Desc->asDecl()) {
+ auto printDesc = [&OS, &Ctx](const Descriptor *Desc) {
+ if (const auto *D = Desc->asDecl()) {
// Subfields or named values.
- if (auto *VD = dyn_cast<ValueDecl>(D)) {
+ if (const auto *VD = dyn_cast<ValueDecl>(D)) {
OS << *VD;
return;
}
// Base classes.
- if (isa<RecordDecl>(D)) {
+ if (isa<RecordDecl>(D))
return;
- }
}
// Temporary expression.
- if (auto *E = Desc->asExpr()) {
+ if (const auto *E = Desc->asExpr()) {
E->printPretty(OS, nullptr, Ctx.getPrintingPolicy());
return;
}
@@ -94,39 +138,42 @@ void print(llvm::raw_ostream &OS, const Pointer &P, ASTContext &Ctx,
F = F.isArrayElement() ? F.getArray().expand() : F.getBase();
}
+ // Drop the first pointer since we print it unconditionally anyway.
+ if (!Levels.empty())
+ Levels.erase(Levels.begin());
+
printDesc(P.getDeclDesc());
- for (auto It = Levels.rbegin(); It != Levels.rend(); ++It) {
- if (It->inArray()) {
- OS << "[" << It->expand().getIndex() << "]";
+ for (const auto &It : Levels) {
+ if (It.inArray()) {
+ OS << "[" << It.expand().getIndex() << "]";
continue;
}
- if (auto Index = It->getIndex()) {
+ if (auto Index = It.getIndex()) {
OS << " + " << Index;
continue;
}
OS << ".";
- printDesc(It->getFieldDesc());
+ printDesc(It.getFieldDesc());
}
}
-void InterpFrame::describe(llvm::raw_ostream &OS) {
+void InterpFrame::describe(llvm::raw_ostream &OS) const {
const FunctionDecl *F = getCallee();
- auto *M = dyn_cast<CXXMethodDecl>(F);
- if (M && M->isInstance() && !isa<CXXConstructorDecl>(F)) {
+ if (const auto *M = dyn_cast<CXXMethodDecl>(F);
+ M && M->isInstance() && !isa<CXXConstructorDecl>(F)) {
print(OS, This, S.getCtx(), S.getCtx().getRecordType(M->getParent()));
OS << "->";
}
OS << *F << "(";
- unsigned Off = Func->hasRVO() ? primSize(PT_Ptr) : 0;
+ unsigned Off = 0;
+
+ Off += Func->hasRVO() ? primSize(PT_Ptr) : 0;
+ Off += Func->hasThisPointer() ? primSize(PT_Ptr) : 0;
+
for (unsigned I = 0, N = F->getNumParams(); I < N; ++I) {
QualType Ty = F->getParamDecl(I)->getType();
- PrimType PrimTy;
- if (llvm::Optional<PrimType> T = S.Ctx.classify(Ty)) {
- PrimTy = *T;
- } else {
- PrimTy = PT_Ptr;
- }
+ PrimType PrimTy = S.Ctx.classify(Ty).value_or(PT_Ptr);
TYPE_SWITCH(PrimTy, print(OS, stackRef<T>(Off), S.getCtx(), Ty));
Off += align(primSize(PrimTy));
@@ -142,20 +189,19 @@ Frame *InterpFrame::getCaller() const {
return S.getSplitFrame();
}
-SourceLocation InterpFrame::getCallLocation() const {
+SourceRange InterpFrame::getCallRange() const {
if (!Caller->Func)
- return S.getLocation(nullptr, {});
- return S.getLocation(Caller->Func, RetPC - sizeof(uintptr_t));
+ return S.getRange(nullptr, {});
+ return S.getRange(Caller->Func, RetPC - sizeof(uintptr_t));
}
const FunctionDecl *InterpFrame::getCallee() const {
return Func->getDecl();
}
-Pointer InterpFrame::getLocalPointer(unsigned Offset) {
+Pointer InterpFrame::getLocalPointer(unsigned Offset) const {
assert(Offset < Func->getFrameSize() && "Invalid local offset.");
- return Pointer(
- reinterpret_cast<Block *>(Locals.get() + Offset - sizeof(Block)));
+ return Pointer(localBlock(Offset), sizeof(InlineDescriptor));
}
Pointer InterpFrame::getParamPointer(unsigned Off) {
@@ -180,6 +226,11 @@ Pointer InterpFrame::getParamPointer(unsigned Off) {
}
SourceInfo InterpFrame::getSource(CodePtr PC) const {
+ // Implicitly created functions don't have any code we could point at,
+ // so return the call site.
+ if (Func && (!Func->hasBody() || Func->getDecl()->isImplicit()) && Caller)
+ return Caller->getSource(RetPC);
+
return S.getSource(Func, PC);
}
@@ -191,3 +242,9 @@ SourceLocation InterpFrame::getLocation(CodePtr PC) const {
return S.getLocation(Func, PC);
}
+SourceRange InterpFrame::getRange(CodePtr PC) const {
+ if (Func && (!Func->hasBody() || Func->getDecl()->isImplicit()) && Caller)
+ return Caller->getRange(RetPC);
+
+ return S.getRange(Func, PC);
+}
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/InterpFrame.h b/contrib/llvm-project/clang/lib/AST/Interp/InterpFrame.h
index 304e2ad66537..cba4f9560bf5 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/InterpFrame.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/InterpFrame.h
@@ -14,9 +14,7 @@
#define LLVM_CLANG_AST_INTERP_INTERPFRAME_H
#include "Frame.h"
-#include "Pointer.h"
#include "Program.h"
-#include "State.h"
#include <cstdint>
#include <vector>
@@ -24,6 +22,7 @@ namespace clang {
namespace interp {
class Function;
class InterpState;
+class Pointer;
/// Frame storing local variables.
class InterpFrame final : public Frame {
@@ -32,8 +31,14 @@ public:
InterpFrame *Caller;
/// Creates a new frame for a method call.
- InterpFrame(InterpState &S, Function *Func, InterpFrame *Caller,
- CodePtr RetPC, Pointer &&This);
+ InterpFrame(InterpState &S, const Function *Func, InterpFrame *Caller,
+ CodePtr RetPC);
+
+ /// Creates a new frame with the values that make sense.
+ /// I.e., the caller is the current frame of S,
+ /// the This() pointer is the current Pointer on the top of S's stack,
+ /// and the RVO pointer is before that.
+ InterpFrame(InterpState &S, const Function *Func, CodePtr RetPC);
/// Destroys the frame, killing all live pointers to stack slots.
~InterpFrame();
@@ -45,38 +50,39 @@ public:
void popArgs();
/// Describes the frame with arguments for diagnostic purposes.
- void describe(llvm::raw_ostream &OS) override;
+ void describe(llvm::raw_ostream &OS) const override;
/// Returns the parent frame object.
Frame *getCaller() const override;
/// Returns the location of the call to the frame.
- SourceLocation getCallLocation() const override;
+ SourceRange getCallRange() const override;
/// Returns the caller.
const FunctionDecl *getCallee() const override;
/// Returns the current function.
- Function *getFunction() const { return Func; }
+ const Function *getFunction() const { return Func; }
/// Returns the offset on the stack at which the frame starts.
size_t getFrameOffset() const { return FrameOffset; }
/// Returns the value of a local variable.
- template <typename T> const T &getLocal(unsigned Offset) {
+ template <typename T> const T &getLocal(unsigned Offset) const {
return localRef<T>(Offset);
}
/// Mutates a local variable.
template <typename T> void setLocal(unsigned Offset, const T &Value) {
localRef<T>(Offset) = Value;
+ localInlineDesc(Offset)->IsInitialized = true;
}
/// Returns a pointer to a local variables.
- Pointer getLocalPointer(unsigned Offset);
+ Pointer getLocalPointer(unsigned Offset) const;
/// Returns the value of an argument.
- template <typename T> const T &getParam(unsigned Offset) {
+ template <typename T> const T &getParam(unsigned Offset) const {
auto Pt = Params.find(Offset);
if (Pt == Params.end()) {
return stackRef<T>(Offset);
@@ -96,6 +102,9 @@ public:
/// Returns the 'this' pointer.
const Pointer &getThis() const { return This; }
+ /// Returns the RVO pointer, if the Function has one.
+ const Pointer &getRVOPtr() const { return RVOPtr; }
+
/// Checks if the frame is a root frame - return should quit the interpreter.
bool isRoot() const { return !Func; }
@@ -109,30 +118,43 @@ public:
virtual SourceInfo getSource(CodePtr PC) const;
const Expr *getExpr(CodePtr PC) const;
SourceLocation getLocation(CodePtr PC) const;
+ SourceRange getRange(CodePtr PC) const;
+
+ unsigned getDepth() const { return Depth; }
private:
/// Returns an original argument from the stack.
- template <typename T> const T &stackRef(unsigned Offset) {
+ template <typename T> const T &stackRef(unsigned Offset) const {
+ assert(Args);
return *reinterpret_cast<const T *>(Args - ArgSize + Offset);
}
/// Returns an offset to a local.
- template <typename T> T &localRef(unsigned Offset) {
- return *reinterpret_cast<T *>(Locals.get() + Offset);
+ template <typename T> T &localRef(unsigned Offset) const {
+ return getLocalPointer(Offset).deref<T>();
}
/// Returns a pointer to a local's block.
- void *localBlock(unsigned Offset) {
- return Locals.get() + Offset - sizeof(Block);
+ Block *localBlock(unsigned Offset) const {
+ return reinterpret_cast<Block *>(Locals.get() + Offset - sizeof(Block));
+ }
+
+ /// Returns the inline descriptor of the local.
+ InlineDescriptor *localInlineDesc(unsigned Offset) const {
+ return reinterpret_cast<InlineDescriptor *>(Locals.get() + Offset);
}
private:
/// Reference to the interpreter state.
InterpState &S;
+ /// Depth of this frame.
+ unsigned Depth;
/// Reference to the function being executed.
- Function *Func;
+ const Function *Func;
/// Current object pointer for methods.
Pointer This;
+ /// Pointer the non-primitive return value gets constructed in.
+ Pointer RVOPtr;
/// Return address.
CodePtr RetPC;
/// The size of all the arguments.
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/InterpStack.cpp b/contrib/llvm-project/clang/lib/AST/Interp/InterpStack.cpp
index 5c803f3d9424..91fe40feb767 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/InterpStack.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/InterpStack.cpp
@@ -6,9 +6,13 @@
//
//===----------------------------------------------------------------------===//
+#include "InterpStack.h"
+#include "Boolean.h"
+#include "Floating.h"
+#include "Integral.h"
+#include "Pointer.h"
#include <cassert>
#include <cstdlib>
-#include "InterpStack.h"
using namespace clang;
using namespace clang::interp;
@@ -19,11 +23,14 @@ InterpStack::~InterpStack() {
void InterpStack::clear() {
if (Chunk && Chunk->Next)
- free(Chunk->Next);
+ std::free(Chunk->Next);
if (Chunk)
- free(Chunk);
+ std::free(Chunk);
Chunk = nullptr;
StackSize = 0;
+#ifndef NDEBUG
+ ItemTypes.clear();
+#endif
}
void *InterpStack::grow(size_t Size) {
@@ -33,7 +40,7 @@ void *InterpStack::grow(size_t Size) {
if (Chunk && Chunk->Next) {
Chunk = Chunk->Next;
} else {
- StackChunk *Next = new (malloc(ChunkSize)) StackChunk(Chunk);
+ StackChunk *Next = new (std::malloc(ChunkSize)) StackChunk(Chunk);
if (Chunk)
Chunk->Next = Next;
Chunk = Next;
@@ -46,7 +53,7 @@ void *InterpStack::grow(size_t Size) {
return Object;
}
-void *InterpStack::peek(size_t Size) {
+void *InterpStack::peekData(size_t Size) const {
assert(Chunk && "Stack is empty!");
StackChunk *Ptr = Chunk;
@@ -65,7 +72,7 @@ void InterpStack::shrink(size_t Size) {
while (Size > Chunk->size()) {
Size -= Chunk->size();
if (Chunk->Next) {
- free(Chunk->Next);
+ std::free(Chunk->Next);
Chunk->Next = nullptr;
}
Chunk->End = Chunk->start();
@@ -76,3 +83,29 @@ void InterpStack::shrink(size_t Size) {
Chunk->End -= Size;
StackSize -= Size;
}
+
+void InterpStack::dump() const {
+#ifndef NDEBUG
+ llvm::errs() << "Items: " << ItemTypes.size() << ". Size: " << size() << '\n';
+ if (ItemTypes.empty())
+ return;
+
+ size_t Index = 0;
+ size_t Offset = 0;
+
+ // The type of the item on the top of the stack is inserted to the back
+ // of the vector, so the iteration has to happen backwards.
+ for (auto TyIt = ItemTypes.rbegin(); TyIt != ItemTypes.rend(); ++TyIt) {
+ Offset += align(primSize(*TyIt));
+
+ llvm::errs() << Index << '/' << Offset << ": ";
+ TYPE_SWITCH(*TyIt, {
+ const T &V = peek<T>(Offset);
+ llvm::errs() << V;
+ });
+ llvm::errs() << '\n';
+
+ ++Index;
+ }
+#endif
+}
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/InterpStack.h b/contrib/llvm-project/clang/lib/AST/Interp/InterpStack.h
index 127adb6b8eba..3fd0f63c781f 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/InterpStack.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/InterpStack.h
@@ -13,7 +13,11 @@
#ifndef LLVM_CLANG_AST_INTERP_INTERPSTACK_H
#define LLVM_CLANG_AST_INTERP_INTERPSTACK_H
+#include "FunctionPointer.h"
+#include "IntegralAP.h"
+#include "PrimType.h"
#include <memory>
+#include <vector>
namespace clang {
namespace interp {
@@ -29,12 +33,20 @@ public:
/// Constructs a value in place on the top of the stack.
template <typename T, typename... Tys> void push(Tys &&... Args) {
new (grow(aligned_size<T>())) T(std::forward<Tys>(Args)...);
+#ifndef NDEBUG
+ ItemTypes.push_back(toPrimType<T>());
+#endif
}
/// Returns the value from the top of the stack and removes it.
template <typename T> T pop() {
- auto *Ptr = &peek<T>();
- auto Value = std::move(*Ptr);
+#ifndef NDEBUG
+ assert(!ItemTypes.empty());
+ assert(ItemTypes.back() == toPrimType<T>());
+ ItemTypes.pop_back();
+#endif
+ T *Ptr = &peekInternal<T>();
+ T Value = std::move(*Ptr);
Ptr->~T();
shrink(aligned_size<T>());
return Value;
@@ -42,18 +54,32 @@ public:
/// Discards the top value from the stack.
template <typename T> void discard() {
- auto *Ptr = &peek<T>();
+#ifndef NDEBUG
+ assert(!ItemTypes.empty());
+ assert(ItemTypes.back() == toPrimType<T>());
+ ItemTypes.pop_back();
+#endif
+ T *Ptr = &peekInternal<T>();
Ptr->~T();
shrink(aligned_size<T>());
}
/// Returns a reference to the value on the top of the stack.
- template <typename T> T &peek() {
- return *reinterpret_cast<T *>(peek(aligned_size<T>()));
+ template <typename T> T &peek() const {
+#ifndef NDEBUG
+ assert(!ItemTypes.empty());
+ assert(ItemTypes.back() == toPrimType<T>());
+#endif
+ return peekInternal<T>();
+ }
+
+ template <typename T> T &peek(size_t Offset) const {
+ assert(aligned(Offset));
+ return *reinterpret_cast<T *>(peekData(Offset));
}
/// Returns a pointer to the top object.
- void *top() { return Chunk ? peek(0) : nullptr; }
+ void *top() const { return Chunk ? peekData(0) : nullptr; }
/// Returns the size of the stack in bytes.
size_t size() const { return StackSize; }
@@ -61,6 +87,12 @@ public:
/// Clears the stack without calling any destructors.
void clear();
+ /// Returns whether the stack is empty.
+ bool empty() const { return StackSize == 0; }
+
+ /// dump the stack contents to stderr.
+ void dump() const;
+
private:
/// All stack slots are aligned to the native pointer alignment for storage.
/// The size of an object is rounded up to a pointer alignment multiple.
@@ -69,10 +101,15 @@ private:
return ((sizeof(T) + PtrAlign - 1) / PtrAlign) * PtrAlign;
}
- /// Grows the stack to accomodate a value and returns a pointer to it.
+ /// Like the public peek(), but without the debug type checks.
+ template <typename T> T &peekInternal() const {
+ return *reinterpret_cast<T *>(peekData(aligned_size<T>()));
+ }
+
+ /// Grows the stack to accommodate a value and returns a pointer to it.
void *grow(size_t Size);
/// Returns a pointer from the top of the stack.
- void *peek(size_t Size);
+ void *peekData(size_t Size) const;
/// Shrinks the stack.
void shrink(size_t Size);
@@ -94,10 +131,13 @@ private:
: Next(nullptr), Prev(Prev), End(reinterpret_cast<char *>(this + 1)) {}
/// Returns the size of the chunk, minus the header.
- size_t size() { return End - start(); }
+ size_t size() const { return End - start(); }
/// Returns a pointer to the start of the data region.
char *start() { return reinterpret_cast<char *>(this + 1); }
+ const char *start() const {
+ return reinterpret_cast<const char *>(this + 1);
+ }
};
static_assert(sizeof(StackChunk) < ChunkSize, "Invalid chunk size");
@@ -105,6 +145,53 @@ private:
StackChunk *Chunk = nullptr;
/// Total size of the stack.
size_t StackSize = 0;
+
+#ifndef NDEBUG
+ /// vector recording the type of data we pushed into the stack.
+ std::vector<PrimType> ItemTypes;
+
+ template <typename T> static constexpr PrimType toPrimType() {
+ if constexpr (std::is_same_v<T, Pointer>)
+ return PT_Ptr;
+ else if constexpr (std::is_same_v<T, bool> ||
+ std::is_same_v<T, Boolean>)
+ return PT_Bool;
+ else if constexpr (std::is_same_v<T, int8_t> ||
+ std::is_same_v<T, Integral<8, true>>)
+ return PT_Sint8;
+ else if constexpr (std::is_same_v<T, uint8_t> ||
+ std::is_same_v<T, Integral<8, false>>)
+ return PT_Uint8;
+ else if constexpr (std::is_same_v<T, int16_t> ||
+ std::is_same_v<T, Integral<16, true>>)
+ return PT_Sint16;
+ else if constexpr (std::is_same_v<T, uint16_t> ||
+ std::is_same_v<T, Integral<16, false>>)
+ return PT_Uint16;
+ else if constexpr (std::is_same_v<T, int32_t> ||
+ std::is_same_v<T, Integral<32, true>>)
+ return PT_Sint32;
+ else if constexpr (std::is_same_v<T, uint32_t> ||
+ std::is_same_v<T, Integral<32, false>>)
+ return PT_Uint32;
+ else if constexpr (std::is_same_v<T, int64_t> ||
+ std::is_same_v<T, Integral<64, true>>)
+ return PT_Sint64;
+ else if constexpr (std::is_same_v<T, uint64_t> ||
+ std::is_same_v<T, Integral<64, false>>)
+ return PT_Uint64;
+ else if constexpr (std::is_same_v<T, Floating>)
+ return PT_Float;
+ else if constexpr (std::is_same_v<T, FunctionPointer>)
+ return PT_FnPtr;
+ else if constexpr (std::is_same_v<T, IntegralAP<true>>)
+ return PT_IntAP;
+ else if constexpr (std::is_same_v<T, IntegralAP<false>>)
+ return PT_IntAP;
+
+ llvm_unreachable("unknown type push()'ed into InterpStack");
+ }
+#endif
};
} // namespace interp
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/InterpState.cpp b/contrib/llvm-project/clang/lib/AST/Interp/InterpState.cpp
index 25684f3c0939..2cb87ef07fe5 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/InterpState.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/InterpState.cpp
@@ -7,24 +7,17 @@
//===----------------------------------------------------------------------===//
#include "InterpState.h"
-#include <limits>
-#include "Function.h"
#include "InterpFrame.h"
#include "InterpStack.h"
-#include "Opcode.h"
-#include "PrimType.h"
#include "Program.h"
#include "State.h"
using namespace clang;
using namespace clang::interp;
-using APSInt = llvm::APSInt;
-
InterpState::InterpState(State &Parent, Program &P, InterpStack &Stk,
Context &Ctx, SourceMapper *M)
- : Parent(Parent), M(M), P(P), Stk(Stk), Ctx(Ctx), Current(nullptr),
- CallStackDepth(Parent.getCallStackDepth() + 1) {}
+ : Parent(Parent), M(M), P(P), Stk(Stk), Ctx(Ctx), Current(nullptr) {}
InterpState::~InterpState() {
while (Current) {
@@ -35,17 +28,15 @@ InterpState::~InterpState() {
while (DeadBlocks) {
DeadBlock *Next = DeadBlocks->Next;
- free(DeadBlocks);
+ std::free(DeadBlocks);
DeadBlocks = Next;
}
}
Frame *InterpState::getCurrentFrame() {
- if (Current && Current->Caller) {
+ if (Current && Current->Caller)
return Current;
- } else {
- return Parent.getCurrentFrame();
- }
+ return Parent.getCurrentFrame();
}
bool InterpState::reportOverflow(const Expr *E, const llvm::APSInt &Value) {
@@ -55,20 +46,28 @@ bool InterpState::reportOverflow(const Expr *E, const llvm::APSInt &Value) {
}
void InterpState::deallocate(Block *B) {
- Descriptor *Desc = B->getDescriptor();
+ assert(B);
+ const Descriptor *Desc = B->getDescriptor();
+ assert(Desc);
+
if (B->hasPointers()) {
size_t Size = B->getSize();
// Allocate a new block, transferring over pointers.
- char *Memory = reinterpret_cast<char *>(malloc(sizeof(DeadBlock) + Size));
+ char *Memory =
+ reinterpret_cast<char *>(std::malloc(sizeof(DeadBlock) + Size));
auto *D = new (Memory) DeadBlock(DeadBlocks, B);
- // Move data from one block to another.
- if (Desc->MoveFn)
+ // Move data and metadata from the old block to the new (dead)block.
+ if (Desc->MoveFn) {
Desc->MoveFn(B, B->data(), D->data(), Desc);
+ if (Desc->getMetadataSize() > 0)
+ std::memcpy(D->rawData(), B->rawData(), Desc->getMetadataSize());
+ }
+
+ // We moved the contents over to the DeadBlock.
+ B->IsInitialized = false;
} else {
- // Free storage, if necessary.
- if (Desc->DtorFn)
- Desc->DtorFn(B, B->data(), Desc);
+ B->invokeDtor();
}
}
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/InterpState.h b/contrib/llvm-project/clang/lib/AST/Interp/InterpState.h
index c2209bbcbb92..8f84bf6ed2ea 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/InterpState.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/InterpState.h
@@ -15,6 +15,7 @@
#include "Context.h"
#include "Function.h"
+#include "InterpFrame.h"
#include "InterpStack.h"
#include "State.h"
#include "clang/AST/APValue.h"
@@ -38,15 +39,20 @@ public:
~InterpState();
+ InterpState(const InterpState &) = delete;
+ InterpState &operator=(const InterpState &) = delete;
+
// Stack frame accessors.
Frame *getSplitFrame() { return Parent.getCurrentFrame(); }
Frame *getCurrentFrame() override;
- unsigned getCallStackDepth() override { return CallStackDepth; }
+ unsigned getCallStackDepth() override {
+ return Current ? (Current->getDepth() + 1) : 1;
+ }
const Frame *getBottomFrame() const override {
return Parent.getBottomFrame();
}
- // Acces objects from the walker context.
+ // Access objects from the walker context.
Expr::EvalStatus &getEvalStatus() const override {
return Parent.getEvalStatus();
}
@@ -65,6 +71,7 @@ public:
bool noteUndefinedBehavior() override {
return Parent.noteUndefinedBehavior();
}
+ bool inConstantContext() const { return Parent.InConstantContext; }
bool hasActiveDiagnostic() override { return Parent.hasActiveDiagnostic(); }
void setActiveDiagnostic(bool Flag) override {
Parent.setActiveDiagnostic(Flag);
@@ -81,10 +88,12 @@ public:
void deallocate(Block *B);
/// Delegates source mapping to the mapper.
- SourceInfo getSource(Function *F, CodePtr PC) const override {
+ SourceInfo getSource(const Function *F, CodePtr PC) const override {
return M ? M->getSource(F, PC) : F->getSource(PC);
}
+ Context &getContext() const { return Ctx; }
+
private:
/// AST Walker state.
State &Parent;
@@ -102,8 +111,6 @@ public:
Context &Ctx;
/// The current frame.
InterpFrame *Current = nullptr;
- /// Call stack depth.
- unsigned CallStackDepth;
};
} // namespace interp
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Opcodes.td b/contrib/llvm-project/clang/lib/AST/Interp/Opcodes.td
index 4aba5f5cd83c..24747b6b98c1 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Opcodes.td
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Opcodes.td
@@ -25,7 +25,11 @@ def Sint32 : Type;
def Uint32 : Type;
def Sint64 : Type;
def Uint64 : Type;
+def IntAP : Type;
+def IntAPS : Type;
+def Float : Type;
def Ptr : Type;
+def FnPtr : Type;
//===----------------------------------------------------------------------===//
// Types transferred to the interpreter.
@@ -40,45 +44,69 @@ def ArgSint32 : ArgType { let Name = "int32_t"; }
def ArgUint32 : ArgType { let Name = "uint32_t"; }
def ArgSint64 : ArgType { let Name = "int64_t"; }
def ArgUint64 : ArgType { let Name = "uint64_t"; }
+def ArgFloat : ArgType { let Name = "Floating"; }
def ArgBool : ArgType { let Name = "bool"; }
-def ArgFunction : ArgType { let Name = "Function *"; }
-def ArgRecord : ArgType { let Name = "Record *"; }
-
-def ArgSema : ArgType { let Name = "const fltSemantics *"; }
-
-def ArgExpr : ArgType { let Name = "const Expr *"; }
-def ArgFloatingLiteral : ArgType { let Name = "const FloatingLiteral *"; }
-def ArgCXXMethodDecl : ArgType { let Name = "const CXXMethodDecl *"; }
-def ArgFunctionDecl : ArgType { let Name = "const FunctionDecl *"; }
+def ArgFunction : ArgType { let Name = "const Function *"; }
def ArgRecordDecl : ArgType { let Name = "const RecordDecl *"; }
-def ArgCXXRecordDecl : ArgType { let Name = "const CXXRecordDecl *"; }
-def ArgValueDecl : ArgType { let Name = "const ValueDecl *"; }
def ArgRecordField : ArgType { let Name = "const Record::Field *"; }
+def ArgFltSemantics : ArgType { let Name = "const llvm::fltSemantics *"; }
+def ArgRoundingMode : ArgType { let Name = "llvm::RoundingMode"; }
+def ArgLETD: ArgType { let Name = "const LifetimeExtendedTemporaryDecl *"; }
+def ArgCastKind : ArgType { let Name = "CastKind"; }
+def ArgCallExpr : ArgType { let Name = "const CallExpr *"; }
+def ArgOffsetOfExpr : ArgType { let Name = "const OffsetOfExpr *"; }
+def ArgDeclRef : ArgType { let Name = "const DeclRefExpr *"; }
+def ArgCCI : ArgType { let Name = "const ComparisonCategoryInfo *"; }
//===----------------------------------------------------------------------===//
-// Classes of types intructions operate on.
+// Classes of types instructions operate on.
//===----------------------------------------------------------------------===//
class TypeClass {
list<Type> Types;
}
-def AluTypeClass : TypeClass {
+def IntegerTypeClass : TypeClass {
+ let Types = [Sint8, Uint8, Sint16, Uint16, Sint32,
+ Uint32, Sint64, Uint64, IntAP, IntAPS];
+}
+
+def FixedSizeIntegralTypeClass : TypeClass {
let Types = [Sint8, Uint8, Sint16, Uint16, Sint32,
Uint32, Sint64, Uint64, Bool];
}
+def NumberTypeClass : TypeClass {
+ let Types = !listconcat(IntegerTypeClass.Types, [Float]);
+}
+
+def FloatTypeClass : TypeClass {
+ let Types = [Float];
+}
+
+def AluTypeClass : TypeClass {
+ let Types = !listconcat(IntegerTypeClass.Types, [Bool]);
+}
+
def PtrTypeClass : TypeClass {
- let Types = [Ptr];
+ let Types = [Ptr, FnPtr];
+}
+
+def BoolTypeClass : TypeClass {
+ let Types = [Bool];
+}
+
+def NonPtrTypeClass : TypeClass {
+ let Types = !listconcat(IntegerTypeClass.Types, [Bool], [Float]);
}
def AllTypeClass : TypeClass {
- let Types = !listconcat(AluTypeClass.Types, PtrTypeClass.Types);
+ let Types = !listconcat(AluTypeClass.Types, PtrTypeClass.Types, FloatTypeClass.Types);
}
def ComparableTypeClass : TypeClass {
- let Types = !listconcat(AluTypeClass.Types, [Ptr]);
+ let Types = !listconcat(AluTypeClass.Types, [Ptr], [Float], [FnPtr]);
}
class SingletonTypeClass<Type Ty> : TypeClass {
@@ -105,6 +133,16 @@ class AluOpcode : Opcode {
let HasGroup = 1;
}
+class FloatOpcode : Opcode {
+ let Types = [];
+ let Args = [ArgRoundingMode];
+}
+
+class IntegerOpcode : Opcode {
+ let Types = [IntegerTypeClass];
+ let HasGroup = 1;
+}
+
//===----------------------------------------------------------------------===//
// Jump opcodes
//===----------------------------------------------------------------------===//
@@ -149,6 +187,33 @@ def RetValue : Opcode {
// [] -> EXIT
def NoRet : Opcode {}
+
+def Call : Opcode {
+ let Args = [ArgFunction];
+ let Types = [];
+}
+
+def CallVirt : Opcode {
+ let Args = [ArgFunction];
+ let Types = [];
+}
+
+def CallBI : Opcode {
+ let Args = [ArgFunction, ArgCallExpr];
+ let Types = [];
+}
+
+def CallPtr : Opcode {
+ let Args = [];
+ let Types = [];
+}
+
+def OffsetOf : Opcode {
+ let Types = [IntegerTypeClass];
+ let Args = [ArgOffsetOfExpr];
+ let HasGroup = 1;
+}
+
//===----------------------------------------------------------------------===//
// Frame management
//===----------------------------------------------------------------------===//
@@ -178,16 +243,27 @@ def ConstSint32 : ConstOpcode<Sint32, ArgSint32>;
def ConstUint32 : ConstOpcode<Uint32, ArgUint32>;
def ConstSint64 : ConstOpcode<Sint64, ArgSint64>;
def ConstUint64 : ConstOpcode<Uint64, ArgUint64>;
+def ConstFloat : ConstOpcode<Float, ArgFloat>;
def ConstBool : ConstOpcode<Bool, ArgBool>;
// [] -> [Integer]
def Zero : Opcode {
- let Types = [AluTypeClass];
+ let Types = [FixedSizeIntegralTypeClass];
+ let HasGroup = 1;
+}
+
+def ZeroIntAP : Opcode {
+ let Args = [ArgUint32];
+}
+
+def ZeroIntAPS : Opcode {
+ let Args = [ArgUint32];
}
// [] -> [Pointer]
def Null : Opcode {
let Types = [PtrTypeClass];
+ let HasGroup = 1;
}
//===----------------------------------------------------------------------===//
@@ -236,6 +312,20 @@ def GetPtrBase : Opcode {
let Args = [ArgUint32];
}
// [Pointer] -> [Pointer]
+def GetPtrBasePop : Opcode {
+ // Offset of field, which is a base.
+ let Args = [ArgUint32];
+}
+
+def InitPtrPop : Opcode {
+ let Args = [];
+}
+
+def GetPtrDerivedPop : Opcode {
+ let Args = [ArgUint32];
+}
+
+// [Pointer] -> [Pointer]
def GetPtrVirtBase : Opcode {
// RecordDecl of base class.
let Args = [ArgRecordDecl];
@@ -253,10 +343,16 @@ def GetPtrThisVirtBase : Opcode {
// [] -> [Pointer]
def This : Opcode;
+// [] -> [Pointer]
+def RVOPtr : Opcode;
+
// [Pointer] -> [Pointer]
def NarrowPtr : Opcode;
// [Pointer] -> [Pointer]
def ExpandPtr : Opcode;
+// [Pointer, Offset] -> [Pointer]
+def ArrayElemPtr : AluOpcode;
+def ArrayElemPtrPop : AluOpcode;
//===----------------------------------------------------------------------===//
// Direct field accessors
@@ -281,9 +377,20 @@ def SetLocal : AccessOpcode { let HasCustomEval = 1; }
// [] -> [Value]
def GetGlobal : AccessOpcode;
+def GetGlobalUnchecked : AccessOpcode;
// [Value] -> []
def InitGlobal : AccessOpcode;
// [Value] -> []
+def InitGlobalTemp : AccessOpcode {
+ let Args = [ArgUint32, ArgLETD];
+}
+// [Pointer] -> [Pointer]
+def InitGlobalTempComp : Opcode {
+ let Args = [ArgLETD];
+ let Types = [];
+ let HasGroup = 0;
+}
+// [Value] -> []
def SetGlobal : AccessOpcode;
// [] -> [Value]
@@ -308,7 +415,11 @@ def InitThisField : AccessOpcode;
// [Value] -> []
def InitThisFieldActive : AccessOpcode;
// [Value] -> []
-def InitThisBitField : BitFieldOpcode;
+def InitThisBitField : Opcode {
+ let Types = [AluTypeClass];
+ let Args = [ArgRecordField, ArgUint32];
+ let HasGroup = 1;
+}
// [Pointer, Value] -> []
def InitField : AccessOpcode;
// [Pointer, Value] -> []
@@ -374,14 +485,159 @@ def AddOffset : AluOpcode;
// [Pointer, Integral] -> [Pointer]
def SubOffset : AluOpcode;
+// [Pointer, Pointer] -> [Integral]
+def SubPtr : Opcode {
+ let Types = [IntegerTypeClass];
+ let HasGroup = 1;
+}
+
+// [Pointer] -> [Pointer]
+def IncPtr : Opcode {
+ let HasGroup = 0;
+}
+// [Pointer] -> [Pointer]
+def DecPtr : Opcode {
+ let HasGroup = 0;
+}
+
+//===----------------------------------------------------------------------===//
+// Function pointers.
+//===----------------------------------------------------------------------===//
+def GetFnPtr : Opcode {
+ let Args = [ArgFunction];
+}
+
+
//===----------------------------------------------------------------------===//
// Binary operators.
//===----------------------------------------------------------------------===//
// [Real, Real] -> [Real]
-def Sub : AluOpcode;
-def Add : AluOpcode;
-def Mul : AluOpcode;
+def Add : AluOpcode;
+def Addf : FloatOpcode;
+def Sub : AluOpcode;
+def Subf : FloatOpcode;
+def Mul : AluOpcode;
+def Mulf : FloatOpcode;
+def Rem : IntegerOpcode;
+def Div : IntegerOpcode;
+def Divf : FloatOpcode;
+
+def BitAnd : IntegerOpcode;
+def BitOr : IntegerOpcode;
+def BitXor : IntegerOpcode;
+
+def Shl : Opcode {
+ let Types = [IntegerTypeClass, IntegerTypeClass];
+ let HasGroup = 1;
+}
+
+def Shr : Opcode {
+ let Types = [IntegerTypeClass, IntegerTypeClass];
+ let HasGroup = 1;
+}
+
+//===----------------------------------------------------------------------===//
+// Unary operators.
+//===----------------------------------------------------------------------===//
+
+// [Real] -> [Real]
+def Inv: Opcode {
+ let Types = [BoolTypeClass];
+ let HasGroup = 1;
+}
+
+// Increment and decrement.
+def Inc: IntegerOpcode;
+def IncPop : IntegerOpcode;
+def Dec: IntegerOpcode;
+def DecPop: IntegerOpcode;
+
+// Float increment and decrement.
+def Incf: FloatOpcode;
+def IncfPop : FloatOpcode;
+def Decf: FloatOpcode;
+def DecfPop : FloatOpcode;
+
+// [Real] -> [Real]
+def Neg: Opcode {
+ let Types = [NonPtrTypeClass];
+ let HasGroup = 1;
+}
+
+// [Real] -> [Real]
+def Comp: Opcode {
+ let Types = [IntegerTypeClass];
+ let HasGroup = 1;
+}
+
+//===----------------------------------------------------------------------===//
+// Cast, CastFP.
+//===----------------------------------------------------------------------===//
+
+def FromCastTypeClass : TypeClass {
+ let Types = [Uint8, Sint8, Uint16, Sint16, Uint32, Sint32, Uint64, Sint64, Bool, IntAP, IntAPS];
+}
+
+def ToCastTypeClass : TypeClass {
+ let Types = [Uint8, Sint8, Uint16, Sint16, Uint32, Sint32, Uint64, Sint64, Bool];
+}
+
+def Cast: Opcode {
+ let Types = [FromCastTypeClass, ToCastTypeClass];
+ let HasGroup = 1;
+}
+
+def CastFP : Opcode {
+ let Types = [];
+ let Args = [ArgFltSemantics, ArgRoundingMode];
+}
+
+def FixedSizeIntegralTypes : TypeClass {
+ let Types = [Uint8, Sint8, Uint16, Sint16, Uint32, Sint32, Uint64, Sint64, Bool];
+}
+
+def CastAP : Opcode {
+ let Types = [AluTypeClass];
+ let Args = [ArgUint32];
+ let HasGroup = 1;
+}
+
+def CastAPS : Opcode {
+ let Types = [AluTypeClass];
+ let Args = [ArgUint32];
+ let HasGroup = 1;
+}
+
+// Cast an integer to a floating type
+def CastIntegralFloating : Opcode {
+ let Types = [AluTypeClass];
+ let Args = [ArgFltSemantics, ArgRoundingMode];
+ let HasGroup = 1;
+}
+
+// Cast a floating to an integer type
+def CastFloatingIntegral : Opcode {
+ let Types = [FixedSizeIntegralTypes];
+ let Args = [];
+ let HasGroup = 1;
+}
+
+def CastFloatingIntegralAP : Opcode {
+ let Types = [];
+ let Args = [ArgUint32];
+}
+
+def CastFloatingIntegralAPS : Opcode {
+ let Types = [];
+ let Args = [ArgUint32];
+}
+
+def CastPointerIntegral : Opcode {
+ let Types = [AluTypeClass];
+ let Args = [];
+ let HasGroup = 1;
+}
//===----------------------------------------------------------------------===//
// Comparison opcodes.
@@ -400,6 +656,10 @@ class ComparisonOpcode : Opcode {
let HasGroup = 1;
}
+def CMP3 : ComparisonOpcode {
+ let Args = [ArgCCI];
+}
+
def LT : ComparisonOpcode;
def LE : ComparisonOpcode;
def GT : ComparisonOpcode;
@@ -420,3 +680,15 @@ def Dup : Opcode {
let Types = [AllTypeClass];
let HasGroup = 1;
}
+
+// [] -> []
+def Invalid : Opcode {}
+def InvalidCast : Opcode {
+ let Args = [ArgCastKind];
+}
+
+def InvalidDeclRef : Opcode {
+ let Args = [ArgDeclRef];
+}
+
+def ArrayDecay : Opcode;
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Pointer.cpp b/contrib/llvm-project/clang/lib/AST/Interp/Pointer.cpp
index ef2638e2a36b..5af1d6d52e93 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Pointer.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Pointer.cpp
@@ -7,21 +7,29 @@
//===----------------------------------------------------------------------===//
#include "Pointer.h"
+#include "Boolean.h"
+#include "Context.h"
+#include "Floating.h"
#include "Function.h"
+#include "Integral.h"
#include "InterpBlock.h"
#include "PrimType.h"
+#include "Record.h"
using namespace clang;
using namespace clang::interp;
Pointer::Pointer(Block *Pointee) : Pointer(Pointee, 0, 0) {}
+Pointer::Pointer(Block *Pointee, unsigned BaseAndOffset)
+ : Pointer(Pointee, BaseAndOffset, BaseAndOffset) {}
+
Pointer::Pointer(const Pointer &P) : Pointer(P.Pointee, P.Base, P.Offset) {}
Pointer::Pointer(Pointer &&P)
: Pointee(P.Pointee), Base(P.Base), Offset(P.Offset) {
if (Pointee)
- Pointee->movePointer(&P, this);
+ Pointee->replacePointer(&P, this);
}
Pointer::Pointer(Block *Pointee, unsigned Base, unsigned Offset)
@@ -66,7 +74,7 @@ void Pointer::operator=(Pointer &&P) {
Pointee = P.Pointee;
if (Pointee)
- Pointee->movePointer(&P, this);
+ Pointee->replacePointer(&P, this);
if (Old)
Old->cleanup();
@@ -86,7 +94,7 @@ APValue Pointer::toAPValue() const {
Offset = CharUnits::Zero();
} else {
// Build the lvalue base from the block.
- Descriptor *Desc = getDeclDesc();
+ const Descriptor *Desc = getDeclDesc();
if (auto *VD = Desc->asValueDecl())
Base = VD;
else if (auto *E = Desc->asExpr())
@@ -100,13 +108,17 @@ APValue Pointer::toAPValue() const {
if (isUnknownSizeArray()) {
IsOnePastEnd = false;
Offset = CharUnits::Zero();
+ } else if (Desc->asExpr()) {
+ // Pointer pointing to a an expression.
+ IsOnePastEnd = false;
+ Offset = CharUnits::Zero();
} else {
// TODO: compute the offset into the object.
Offset = CharUnits::Zero();
// Build the path into the object.
Pointer Ptr = *this;
- while (Ptr.isField()) {
+ while (Ptr.isField() || Ptr.isArrayElement()) {
if (Ptr.isArrayElement()) {
Path.push_back(APValue::LValuePathEntry::ArrayIndex(Ptr.getIndex()));
Ptr = Ptr.getArray();
@@ -115,8 +127,8 @@ APValue Pointer::toAPValue() const {
bool IsVirtual = false;
// Create a path entry for the field.
- Descriptor *Desc = Ptr.getFieldDesc();
- if (auto *BaseOrMember = Desc->asDecl()) {
+ const Descriptor *Desc = Ptr.getFieldDesc();
+ if (const auto *BaseOrMember = Desc->asDecl()) {
Path.push_back(APValue::LValuePathEntry({BaseOrMember, IsVirtual}));
Ptr = Ptr.getBase();
continue;
@@ -129,49 +141,76 @@ APValue Pointer::toAPValue() const {
}
}
+ // We assemble the LValuePath starting from the innermost pointer to the
+ // outermost one. SO in a.b.c, the first element in Path will refer to
+ // the field 'c', while later code expects it to refer to 'a'.
+ // Just invert the order of the elements.
+ std::reverse(Path.begin(), Path.end());
+
return APValue(Base, Offset, Path, IsOnePastEnd, IsNullPtr);
}
+std::string Pointer::toDiagnosticString(const ASTContext &Ctx) const {
+ if (!Pointee)
+ return "nullptr";
+
+ return toAPValue().getAsString(Ctx, getType());
+}
+
bool Pointer::isInitialized() const {
assert(Pointee && "Cannot check if null pointer was initialized");
- Descriptor *Desc = getFieldDesc();
+ const Descriptor *Desc = getFieldDesc();
+ assert(Desc);
if (Desc->isPrimitiveArray()) {
- if (Pointee->IsStatic)
+ if (isStatic() && Base == 0)
return true;
- // Primitive array field are stored in a bitset.
- InitMap *Map = getInitMap();
- if (!Map)
+
+ InitMapPtr &IM = getInitMap();
+
+ if (!IM)
return false;
- if (Map == (InitMap *)-1)
+
+ if (IM->first)
return true;
- return Map->isInitialized(getIndex());
- } else {
- // Field has its bit in an inline descriptor.
- return Base == 0 || getInlineDesc()->IsInitialized;
+
+ return IM->second->isElementInitialized(getIndex());
}
+
+ // Field has its bit in an inline descriptor.
+ return Base == 0 || getInlineDesc()->IsInitialized;
}
void Pointer::initialize() const {
assert(Pointee && "Cannot initialize null pointer");
- Descriptor *Desc = getFieldDesc();
+ const Descriptor *Desc = getFieldDesc();
+
+ assert(Desc);
if (Desc->isPrimitiveArray()) {
- if (!Pointee->IsStatic) {
- // Primitive array initializer.
- InitMap *&Map = getInitMap();
- if (Map == (InitMap *)-1)
- return;
- if (Map == nullptr)
- Map = InitMap::allocate(Desc->getNumElems());
- if (Map->initialize(getIndex())) {
- free(Map);
- Map = (InitMap *)-1;
- }
+ // Primitive global arrays don't have an initmap.
+ if (isStatic() && Base == 0)
+ return;
+
+ InitMapPtr &IM = getInitMap();
+ if (!IM)
+ IM =
+ std::make_pair(false, std::make_shared<InitMap>(Desc->getNumElems()));
+
+ assert(IM);
+
+ // All initialized.
+ if (IM->first)
+ return;
+
+ if (IM->second->initializeElement(getIndex())) {
+ IM->first = true;
+ IM->second.reset();
}
- } else {
- // Field has its bit in an inline descriptor.
- assert(Base != 0 && "Only composite fields can be initialised");
- getInlineDesc()->IsInitialized = true;
+ return;
}
+
+ // Field has its bit in an inline descriptor.
+ assert(Base != 0 && "Only composite fields can be initialised");
+ getInlineDesc()->IsInitialized = true;
}
void Pointer::activate() const {
@@ -189,5 +228,146 @@ bool Pointer::hasSameBase(const Pointer &A, const Pointer &B) {
}
bool Pointer::hasSameArray(const Pointer &A, const Pointer &B) {
- return A.Base == B.Base && A.getFieldDesc()->IsArray;
+ return hasSameBase(A, B) && A.Base == B.Base && A.getFieldDesc()->IsArray;
+}
+
+std::optional<APValue> Pointer::toRValue(const Context &Ctx) const {
+ // Method to recursively traverse composites.
+ std::function<bool(QualType, const Pointer &, APValue &)> Composite;
+ Composite = [&Composite, &Ctx](QualType Ty, const Pointer &Ptr, APValue &R) {
+ if (const auto *AT = Ty->getAs<AtomicType>())
+ Ty = AT->getValueType();
+
+ // Invalid pointers.
+ if (Ptr.isDummy() || !Ptr.isLive() ||
+ (!Ptr.isUnknownSizeArray() && Ptr.isOnePastEnd()))
+ return false;
+
+ // Primitive values.
+ if (std::optional<PrimType> T = Ctx.classify(Ty)) {
+ if (T == PT_Ptr || T == PT_FnPtr) {
+ R = Ptr.toAPValue();
+ } else {
+ TYPE_SWITCH(*T, R = Ptr.deref<T>().toAPValue());
+ }
+ return true;
+ }
+
+ if (const auto *RT = Ty->getAs<RecordType>()) {
+ const auto *Record = Ptr.getRecord();
+ assert(Record && "Missing record descriptor");
+
+ bool Ok = true;
+ if (RT->getDecl()->isUnion()) {
+ const FieldDecl *ActiveField = nullptr;
+ APValue Value;
+ for (const auto &F : Record->fields()) {
+ const Pointer &FP = Ptr.atField(F.Offset);
+ QualType FieldTy = F.Decl->getType();
+ if (FP.isActive()) {
+ if (std::optional<PrimType> T = Ctx.classify(FieldTy)) {
+ TYPE_SWITCH(*T, Value = FP.deref<T>().toAPValue());
+ } else {
+ Ok &= Composite(FieldTy, FP, Value);
+ }
+ break;
+ }
+ }
+ R = APValue(ActiveField, Value);
+ } else {
+ unsigned NF = Record->getNumFields();
+ unsigned NB = Record->getNumBases();
+ unsigned NV = Ptr.isBaseClass() ? 0 : Record->getNumVirtualBases();
+
+ R = APValue(APValue::UninitStruct(), NB, NF);
+
+ for (unsigned I = 0; I < NF; ++I) {
+ const Record::Field *FD = Record->getField(I);
+ QualType FieldTy = FD->Decl->getType();
+ const Pointer &FP = Ptr.atField(FD->Offset);
+ APValue &Value = R.getStructField(I);
+
+ if (std::optional<PrimType> T = Ctx.classify(FieldTy)) {
+ TYPE_SWITCH(*T, Value = FP.deref<T>().toAPValue());
+ } else {
+ Ok &= Composite(FieldTy, FP, Value);
+ }
+ }
+
+ for (unsigned I = 0; I < NB; ++I) {
+ const Record::Base *BD = Record->getBase(I);
+ QualType BaseTy = Ctx.getASTContext().getRecordType(BD->Decl);
+ const Pointer &BP = Ptr.atField(BD->Offset);
+ Ok &= Composite(BaseTy, BP, R.getStructBase(I));
+ }
+
+ for (unsigned I = 0; I < NV; ++I) {
+ const Record::Base *VD = Record->getVirtualBase(I);
+ QualType VirtBaseTy = Ctx.getASTContext().getRecordType(VD->Decl);
+ const Pointer &VP = Ptr.atField(VD->Offset);
+ Ok &= Composite(VirtBaseTy, VP, R.getStructBase(NB + I));
+ }
+ }
+ return Ok;
+ }
+
+ if (Ty->isIncompleteArrayType()) {
+ R = APValue(APValue::UninitArray(), 0, 0);
+ return true;
+ }
+
+ if (const auto *AT = Ty->getAsArrayTypeUnsafe()) {
+ const size_t NumElems = Ptr.getNumElems();
+ QualType ElemTy = AT->getElementType();
+ R = APValue(APValue::UninitArray{}, NumElems, NumElems);
+
+ bool Ok = true;
+ for (unsigned I = 0; I < NumElems; ++I) {
+ APValue &Slot = R.getArrayInitializedElt(I);
+ const Pointer &EP = Ptr.atIndex(I);
+ if (std::optional<PrimType> T = Ctx.classify(ElemTy)) {
+ TYPE_SWITCH(*T, Slot = EP.deref<T>().toAPValue());
+ } else {
+ Ok &= Composite(ElemTy, EP.narrow(), Slot);
+ }
+ }
+ return Ok;
+ }
+
+ // Complex types.
+ if (const auto *CT = Ty->getAs<ComplexType>()) {
+ QualType ElemTy = CT->getElementType();
+ std::optional<PrimType> ElemT = Ctx.classify(ElemTy);
+ assert(ElemT);
+
+ if (ElemTy->isIntegerType()) {
+ INT_TYPE_SWITCH(*ElemT, {
+ auto V1 = Ptr.atIndex(0).deref<T>();
+ auto V2 = Ptr.atIndex(1).deref<T>();
+ R = APValue(V1.toAPSInt(), V2.toAPSInt());
+ return true;
+ });
+ } else if (ElemTy->isFloatingType()) {
+ R = APValue(Ptr.atIndex(0).deref<Floating>().getAPFloat(),
+ Ptr.atIndex(1).deref<Floating>().getAPFloat());
+ return true;
+ }
+ return false;
+ }
+
+ llvm_unreachable("invalid value to return");
+ };
+
+ if (isZero())
+ return APValue(static_cast<Expr *>(nullptr), CharUnits::Zero(), {}, false,
+ true);
+
+ if (isDummy() || !isLive())
+ return std::nullopt;
+
+ // Return the composite type.
+ APValue Result;
+ if (!Composite(getType(), *this, Result))
+ return std::nullopt;
+ return Result;
}
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Pointer.h b/contrib/llvm-project/clang/lib/AST/Interp/Pointer.h
index f2f6e0e76018..8ccaff41ded8 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Pointer.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Pointer.h
@@ -26,24 +26,51 @@ namespace clang {
namespace interp {
class Block;
class DeadBlock;
-class Context;
-class InterpState;
class Pointer;
-class Function;
+class Context;
enum PrimType : unsigned;
+class Pointer;
+inline llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const Pointer &P);
+
/// A pointer to a memory block, live or dead.
///
/// This object can be allocated into interpreter stack frames. If pointing to
/// a live block, it is a link in the chain of pointers pointing to the block.
+///
+/// In the simplest form, a Pointer has a Block* (the pointee) and both Base
+/// and Offset are 0, which means it will point to raw data.
+///
+/// The Base field is used to access metadata about the data. For primitive
+/// arrays, the Base is followed by an InitMap. In a variety of cases, the
+/// Base is preceded by an InlineDescriptor, which is used to track the
+/// initialization state, among other things.
+///
+/// The Offset field is used to access the actual data. In other words, the
+/// data the pointer decribes can be found at
+/// Pointee->rawData() + Pointer.Offset.
+///
+///
+/// Pointee Offset
+/// │ │
+/// │ │
+/// ▼ ▼
+/// ┌───────┬────────────┬─────────┬────────────────────────────┐
+/// │ Block │ InlineDesc │ InitMap │ Actual Data │
+/// └───────┴────────────┴─────────┴────────────────────────────┘
+/// ▲
+/// │
+/// │
+/// Base
class Pointer {
private:
- static constexpr unsigned PastEndMark = (unsigned)-1;
- static constexpr unsigned RootPtrMark = (unsigned)-1;
+ static constexpr unsigned PastEndMark = ~0u;
+ static constexpr unsigned RootPtrMark = ~0u;
public:
Pointer() {}
Pointer(Block *B);
+ Pointer(Block *B, unsigned BaseAndOffset);
Pointer(const Pointer &P);
Pointer(Pointer &&P);
~Pointer();
@@ -51,29 +78,56 @@ public:
void operator=(const Pointer &P);
void operator=(Pointer &&P);
+ /// Equality operators are just for tests.
+ bool operator==(const Pointer &P) const {
+ return Pointee == P.Pointee && Base == P.Base && Offset == P.Offset;
+ }
+
+ bool operator!=(const Pointer &P) const {
+ return Pointee != P.Pointee || Base != P.Base || Offset != P.Offset;
+ }
+
/// Converts the pointer to an APValue.
APValue toAPValue() const;
+ /// Converts the pointer to a string usable in diagnostics.
+ std::string toDiagnosticString(const ASTContext &Ctx) const;
+
+ unsigned getIntegerRepresentation() const {
+ return reinterpret_cast<uintptr_t>(Pointee) + Offset;
+ }
+
+ /// Converts the pointer to an APValue that is an rvalue.
+ std::optional<APValue> toRValue(const Context &Ctx) const;
+
/// Offsets a pointer inside an array.
- Pointer atIndex(unsigned Idx) const {
+ [[nodiscard]] Pointer atIndex(unsigned Idx) const {
if (Base == RootPtrMark)
return Pointer(Pointee, RootPtrMark, getDeclDesc()->getSize());
unsigned Off = Idx * elemSize();
if (getFieldDesc()->ElemDesc)
Off += sizeof(InlineDescriptor);
else
- Off += sizeof(InitMap *);
+ Off += sizeof(InitMapPtr);
return Pointer(Pointee, Base, Base + Off);
}
/// Creates a pointer to a field.
- Pointer atField(unsigned Off) const {
+ [[nodiscard]] Pointer atField(unsigned Off) const {
unsigned Field = Offset + Off;
return Pointer(Pointee, Field, Field);
}
+ /// Subtract the given offset from the current Base and Offset
+ /// of the pointer.
+ [[nodiscard]] Pointer atFieldSub(unsigned Off) const {
+ assert(Offset >= Off);
+ unsigned O = Offset - Off;
+ return Pointer(Pointee, O, O);
+ }
+
/// Restricts the scope of an array element pointer.
- Pointer narrow() const {
+ [[nodiscard]] Pointer narrow() const {
// Null pointers cannot be narrowed.
if (isZero() || isUnknownSizeArray())
return *this;
@@ -93,7 +147,7 @@ public:
if (inPrimitiveArray()) {
if (Offset != Base)
return *this;
- return Pointer(Pointee, Base, Offset + sizeof(InitMap *));
+ return Pointer(Pointee, Base, Offset + sizeof(InitMapPtr));
}
// Pointer is to a field or array element - enter it.
@@ -109,12 +163,12 @@ public:
}
/// Expands a pointer to the containing array, undoing narrowing.
- Pointer expand() const {
+ [[nodiscard]] Pointer expand() const {
if (isElementPastEnd()) {
// Revert to an outer one-past-end pointer.
unsigned Adjust;
if (inPrimitiveArray())
- Adjust = sizeof(InitMap *);
+ Adjust = sizeof(InitMapPtr);
else
Adjust = sizeof(InlineDescriptor);
return Pointer(Pointee, Base, Base + getSize() + Adjust);
@@ -130,7 +184,8 @@ public:
// Step into the containing array, if inside one.
unsigned Next = Base - getInlineDesc()->Offset;
- Descriptor *Desc = Next == 0 ? getDeclDesc() : getDescriptor(Next)->Desc;
+ const Descriptor *Desc =
+ Next == 0 ? getDeclDesc() : getDescriptor(Next)->Desc;
if (!Desc->IsArray)
return *this;
return Pointer(Pointee, Next, Offset);
@@ -144,11 +199,14 @@ public:
bool isField() const { return Base != 0 && Base != RootPtrMark; }
/// Accessor for information about the declaration site.
- Descriptor *getDeclDesc() const { return Pointee->Desc; }
+ const Descriptor *getDeclDesc() const {
+ assert(Pointee);
+ return Pointee->Desc;
+ }
SourceLocation getDeclLoc() const { return getDeclDesc()->getLocation(); }
/// Returns a pointer to the object of which this pointer is a field.
- Pointer getBase() const {
+ [[nodiscard]] Pointer getBase() const {
if (Base == RootPtrMark) {
assert(Offset == PastEndMark && "cannot get base of a block");
return Pointer(Pointee, Base, 0);
@@ -158,7 +216,7 @@ public:
return Pointer(Pointee, NewBase, NewBase);
}
/// Returns the parent array.
- Pointer getArray() const {
+ [[nodiscard]] Pointer getArray() const {
if (Base == RootPtrMark) {
assert(Offset != 0 && Offset != PastEndMark && "not an array element");
return Pointer(Pointee, Base, 0);
@@ -168,14 +226,20 @@ public:
}
/// Accessors for information about the innermost field.
- Descriptor *getFieldDesc() const {
+ const Descriptor *getFieldDesc() const {
if (Base == 0 || Base == RootPtrMark)
return getDeclDesc();
return getInlineDesc()->Desc;
}
/// Returns the type of the innermost field.
- QualType getType() const { return getFieldDesc()->getType(); }
+ QualType getType() const {
+ if (inPrimitiveArray() && Offset != Base)
+ return getFieldDesc()->getType()->getAsArrayTypeUnsafe()->getElementType();
+ return getFieldDesc()->getType();
+ }
+
+ [[nodiscard]] Pointer getDeclPtr() const { return Pointer(Pointee); }
/// Returns the element size of the innermost field.
size_t elemSize() const {
@@ -197,11 +261,15 @@ public:
if (getFieldDesc()->ElemDesc)
Adjust = sizeof(InlineDescriptor);
else
- Adjust = sizeof(InitMap *);
+ Adjust = sizeof(InitMapPtr);
}
return Offset - Base - Adjust;
}
+ /// Whether this array refers to an array, but not
+ /// to the first element.
+ bool isArrayRoot() const { return inArray() && Offset == Base; }
+
/// Checks if the innermost field is an array.
bool inArray() const { return getFieldDesc()->IsArray; }
/// Checks if the structure is a primitive array.
@@ -211,14 +279,19 @@ public:
return getFieldDesc()->isUnknownSizeArray();
}
/// Checks if the pointer points to an array.
- bool isArrayElement() const { return Base != Offset; }
+ bool isArrayElement() const { return inArray() && Base != Offset; }
/// Pointer points directly to a block.
bool isRoot() const {
return (Base == 0 || Base == RootPtrMark) && Offset == 0;
}
/// Returns the record descriptor of a class.
- Record *getRecord() const { return getFieldDesc()->ElemRecord; }
+ const Record *getRecord() const { return getFieldDesc()->ElemRecord; }
+ /// Returns the element record type, if this is a non-primive array.
+ const Record *getElemRecord() const {
+ const Descriptor *ElemDesc = getFieldDesc()->ElemDesc;
+ return ElemDesc ? ElemDesc->ElemRecord : nullptr;
+ }
/// Returns the field information.
const FieldDecl *getField() const { return getFieldDesc()->asFieldDecl(); }
@@ -226,22 +299,32 @@ public:
bool isUnion() const;
/// Checks if the storage is extern.
- bool isExtern() const { return Pointee->isExtern(); }
+ bool isExtern() const { return Pointee && Pointee->isExtern(); }
/// Checks if the storage is static.
- bool isStatic() const { return Pointee->isStatic(); }
+ bool isStatic() const {
+ assert(Pointee);
+ return Pointee->isStatic();
+ }
/// Checks if the storage is temporary.
- bool isTemporary() const { return Pointee->isTemporary(); }
+ bool isTemporary() const {
+ assert(Pointee);
+ return Pointee->isTemporary();
+ }
/// Checks if the storage is a static temporary.
bool isStaticTemporary() const { return isStatic() && isTemporary(); }
/// Checks if the field is mutable.
- bool isMutable() const { return Base != 0 && getInlineDesc()->IsMutable; }
+ bool isMutable() const {
+ return Base != 0 && getInlineDesc()->IsFieldMutable;
+ }
/// Checks if an object was initialized.
bool isInitialized() const;
/// Checks if the object is active.
bool isActive() const { return Base == 0 || getInlineDesc()->IsActive; }
/// Checks if a structure is a base class.
bool isBaseClass() const { return isField() && getInlineDesc()->IsBase; }
+ /// Checks if the pointer pointers to a dummy value.
+ bool isDummy() const { return getDeclDesc()->isDummy(); }
/// Checks if an object or a subfield is mutable.
bool isConst() const {
@@ -249,7 +332,10 @@ public:
}
/// Returns the declaration ID.
- llvm::Optional<unsigned> getDeclID() const { return Pointee->getDeclID(); }
+ std::optional<unsigned> getDeclID() const {
+ assert(Pointee);
+ return Pointee->getDeclID();
+ }
/// Returns the byte offset from the start.
unsigned getByteOffset() const {
@@ -259,10 +345,17 @@ public:
/// Returns the number of elements.
unsigned getNumElems() const { return getSize() / elemSize(); }
+ const Block *block() const { return Pointee; }
+
/// Returns the index into an array.
int64_t getIndex() const {
if (isElementPastEnd())
return 1;
+
+ // narrow()ed element in a composite array.
+ if (Base > 0 && Base == Offset)
+ return 0;
+
if (auto ElemSize = elemSize())
return getOffset() / ElemSize;
return 0;
@@ -270,6 +363,8 @@ public:
/// Checks if the index is one past end.
bool isOnePastEnd() const {
+ if (!Pointee)
+ return false;
return isElementPastEnd() || getSize() == getOffset();
}
@@ -279,12 +374,20 @@ public:
/// Dereferences the pointer, if it's live.
template <typename T> T &deref() const {
assert(isLive() && "Invalid pointer");
- return *reinterpret_cast<T *>(Pointee->data() + Offset);
+ assert(Pointee);
+ if (isArrayRoot())
+ return *reinterpret_cast<T *>(Pointee->rawData() + Base +
+ sizeof(InitMapPtr));
+
+ assert(Offset + sizeof(T) <= Pointee->getDescriptor()->getAllocSize());
+ return *reinterpret_cast<T *>(Pointee->rawData() + Offset);
}
/// Dereferences a primitive element.
template <typename T> T &elem(unsigned I) const {
- return reinterpret_cast<T *>(Pointee->data())[I];
+ assert(I < getNumElems());
+ assert(Pointee);
+ return reinterpret_cast<T *>(Pointee->data() + sizeof(InitMapPtr))[I];
}
/// Initializes a field.
@@ -294,6 +397,19 @@ public:
/// Deactivates an entire strurcutre.
void deactivate() const;
+ /// Compare two pointers.
+ ComparisonCategoryResult compare(const Pointer &Other) const {
+ if (!hasSameBase(*this, Other))
+ return ComparisonCategoryResult::Unordered;
+
+ if (Offset < Other.Offset)
+ return ComparisonCategoryResult::Less;
+ else if (Offset > Other.Offset)
+ return ComparisonCategoryResult::Greater;
+
+ return ComparisonCategoryResult::Equal;
+ }
+
/// Checks if two pointers are comparable.
static bool hasSameBase(const Pointer &A, const Pointer &B);
/// Checks if two pointers can be subtracted.
@@ -301,7 +417,17 @@ public:
/// Prints the pointer.
void print(llvm::raw_ostream &OS) const {
- OS << "{" << Base << ", " << Offset << ", ";
+ OS << Pointee << " {";
+ if (Base == RootPtrMark)
+ OS << "rootptr, ";
+ else
+ OS << Base << ", ";
+
+ if (Offset == PastEndMark)
+ OS << "pastend, ";
+ else
+ OS << Offset << ", ";
+
if (Pointee)
OS << Pointee->getSize();
else
@@ -312,6 +438,7 @@ public:
private:
friend class Block;
friend class DeadBlock;
+ friend struct InitMap;
Pointer(Block *Pointee, unsigned Base, unsigned Offset);
@@ -321,12 +448,15 @@ private:
/// Returns a descriptor at a given offset.
InlineDescriptor *getDescriptor(unsigned Offset) const {
assert(Offset != 0 && "Not a nested pointer");
- return reinterpret_cast<InlineDescriptor *>(Pointee->data() + Offset) - 1;
+ assert(Pointee);
+ return reinterpret_cast<InlineDescriptor *>(Pointee->rawData() + Offset) -
+ 1;
}
- /// Returns a reference to the pointer which stores the initialization map.
- InitMap *&getInitMap() const {
- return *reinterpret_cast<InitMap **>(Pointee->data() + Base);
+ /// Returns a reference to the InitMapPtr which stores the initialization map.
+ InitMapPtr &getInitMap() const {
+ assert(Pointee);
+ return *reinterpret_cast<InitMapPtr *>(Pointee->rawData() + Base);
}
/// The block the pointer is pointing to.
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/PrimType.cpp b/contrib/llvm-project/clang/lib/AST/Interp/PrimType.cpp
index 082bfaf3c207..9b96dcfe6a27 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/PrimType.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/PrimType.cpp
@@ -1,4 +1,4 @@
-//===--- Type.cpp - Types for the constexpr VM ------------------*- C++ -*-===//
+//===--- PrimType.cpp - Types for the constexpr VM --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -7,6 +7,11 @@
//===----------------------------------------------------------------------===//
#include "PrimType.h"
+#include "Boolean.h"
+#include "Floating.h"
+#include "FunctionPointer.h"
+#include "IntegralAP.h"
+#include "Pointer.h"
using namespace clang;
using namespace clang::interp;
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/PrimType.h b/contrib/llvm-project/clang/lib/AST/Interp/PrimType.h
index f5f4f8e5c32d..8c5e87f37be1 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/PrimType.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/PrimType.h
@@ -13,16 +13,21 @@
#ifndef LLVM_CLANG_AST_INTERP_TYPE_H
#define LLVM_CLANG_AST_INTERP_TYPE_H
+#include "llvm/Support/raw_ostream.h"
#include <climits>
#include <cstddef>
#include <cstdint>
-#include "Boolean.h"
-#include "Integral.h"
-#include "Pointer.h"
namespace clang {
namespace interp {
+class Pointer;
+class Boolean;
+class Floating;
+class FunctionPointer;
+template <bool Signed> class IntegralAP;
+template <unsigned Bits, bool Signed> class Integral;
+
/// Enumeration of the primitive types of the VM.
enum PrimType : unsigned {
PT_Sint8,
@@ -33,9 +38,28 @@ enum PrimType : unsigned {
PT_Uint32,
PT_Sint64,
PT_Uint64,
+ PT_IntAP,
+ PT_IntAPS,
PT_Bool,
+ PT_Float,
PT_Ptr,
+ PT_FnPtr,
+};
+
+enum class CastKind : uint8_t {
+ Reinterpret,
};
+inline llvm::raw_ostream &operator<<(llvm::raw_ostream &OS,
+ interp::CastKind CK) {
+ switch (CK) {
+ case interp::CastKind::Reinterpret:
+ OS << "reinterpret_cast";
+ break;
+ }
+ return OS;
+}
+
+constexpr bool isIntegralType(PrimType T) { return T <= PT_Bool; }
/// Mapping from primitive types to their representation.
template <PrimType T> struct PrimConv;
@@ -47,8 +71,18 @@ template <> struct PrimConv<PT_Sint32> { using T = Integral<32, true>; };
template <> struct PrimConv<PT_Uint32> { using T = Integral<32, false>; };
template <> struct PrimConv<PT_Sint64> { using T = Integral<64, true>; };
template <> struct PrimConv<PT_Uint64> { using T = Integral<64, false>; };
+template <> struct PrimConv<PT_IntAP> {
+ using T = IntegralAP<false>;
+};
+template <> struct PrimConv<PT_IntAPS> {
+ using T = IntegralAP<true>;
+};
+template <> struct PrimConv<PT_Float> { using T = Floating; };
template <> struct PrimConv<PT_Bool> { using T = Boolean; };
template <> struct PrimConv<PT_Ptr> { using T = Pointer; };
+template <> struct PrimConv<PT_FnPtr> {
+ using T = FunctionPointer;
+};
/// Returns the size of a primitive type in bytes.
size_t primSize(PrimType Type);
@@ -58,21 +92,11 @@ constexpr size_t align(size_t Size) {
return ((Size + alignof(void *) - 1) / alignof(void *)) * alignof(void *);
}
-inline bool isPrimitiveIntegral(PrimType Type) {
- switch (Type) {
- case PT_Bool:
- case PT_Sint8:
- case PT_Uint8:
- case PT_Sint16:
- case PT_Uint16:
- case PT_Sint32:
- case PT_Uint32:
- case PT_Sint64:
- case PT_Uint64:
- return true;
- default:
- return false;
- }
+constexpr bool aligned(uintptr_t Value) { return Value == align(Value); }
+static_assert(aligned(sizeof(void *)));
+
+static inline bool aligned(const void *P) {
+ return aligned(reinterpret_cast<uintptr_t>(P));
}
} // namespace interp
@@ -81,35 +105,51 @@ inline bool isPrimitiveIntegral(PrimType Type) {
/// Helper macro to simplify type switches.
/// The macro implicitly exposes a type T in the scope of the inner block.
#define TYPE_SWITCH_CASE(Name, B) \
- case Name: { using T = PrimConv<Name>::T; do {B;} while(0); break; }
+ case Name: { using T = PrimConv<Name>::T; B; break; }
#define TYPE_SWITCH(Expr, B) \
- switch (Expr) { \
- TYPE_SWITCH_CASE(PT_Sint8, B) \
- TYPE_SWITCH_CASE(PT_Uint8, B) \
- TYPE_SWITCH_CASE(PT_Sint16, B) \
- TYPE_SWITCH_CASE(PT_Uint16, B) \
- TYPE_SWITCH_CASE(PT_Sint32, B) \
- TYPE_SWITCH_CASE(PT_Uint32, B) \
- TYPE_SWITCH_CASE(PT_Sint64, B) \
- TYPE_SWITCH_CASE(PT_Uint64, B) \
- TYPE_SWITCH_CASE(PT_Bool, B) \
- TYPE_SWITCH_CASE(PT_Ptr, B) \
- }
-#define COMPOSITE_TYPE_SWITCH(Expr, B, D) \
- switch (Expr) { \
- TYPE_SWITCH_CASE(PT_Ptr, B) \
- default: do { D; } while(0); break; \
- }
+ do { \
+ switch (Expr) { \
+ TYPE_SWITCH_CASE(PT_Sint8, B) \
+ TYPE_SWITCH_CASE(PT_Uint8, B) \
+ TYPE_SWITCH_CASE(PT_Sint16, B) \
+ TYPE_SWITCH_CASE(PT_Uint16, B) \
+ TYPE_SWITCH_CASE(PT_Sint32, B) \
+ TYPE_SWITCH_CASE(PT_Uint32, B) \
+ TYPE_SWITCH_CASE(PT_Sint64, B) \
+ TYPE_SWITCH_CASE(PT_Uint64, B) \
+ TYPE_SWITCH_CASE(PT_IntAP, B) \
+ TYPE_SWITCH_CASE(PT_IntAPS, B) \
+ TYPE_SWITCH_CASE(PT_Float, B) \
+ TYPE_SWITCH_CASE(PT_Bool, B) \
+ TYPE_SWITCH_CASE(PT_Ptr, B) \
+ TYPE_SWITCH_CASE(PT_FnPtr, B) \
+ } \
+ } while (0)
+
#define INT_TYPE_SWITCH(Expr, B) \
- switch (Expr) { \
- TYPE_SWITCH_CASE(PT_Sint8, B) \
- TYPE_SWITCH_CASE(PT_Uint8, B) \
- TYPE_SWITCH_CASE(PT_Sint16, B) \
- TYPE_SWITCH_CASE(PT_Uint16, B) \
- TYPE_SWITCH_CASE(PT_Sint32, B) \
- TYPE_SWITCH_CASE(PT_Uint32, B) \
- TYPE_SWITCH_CASE(PT_Sint64, B) \
- TYPE_SWITCH_CASE(PT_Uint64, B) \
- default: llvm_unreachable("not an integer"); \
- }
+ do { \
+ switch (Expr) { \
+ TYPE_SWITCH_CASE(PT_Sint8, B) \
+ TYPE_SWITCH_CASE(PT_Uint8, B) \
+ TYPE_SWITCH_CASE(PT_Sint16, B) \
+ TYPE_SWITCH_CASE(PT_Uint16, B) \
+ TYPE_SWITCH_CASE(PT_Sint32, B) \
+ TYPE_SWITCH_CASE(PT_Uint32, B) \
+ TYPE_SWITCH_CASE(PT_Sint64, B) \
+ TYPE_SWITCH_CASE(PT_Uint64, B) \
+ TYPE_SWITCH_CASE(PT_IntAP, B) \
+ TYPE_SWITCH_CASE(PT_IntAPS, B) \
+ TYPE_SWITCH_CASE(PT_Bool, B) \
+ default: \
+ llvm_unreachable("Not an integer value"); \
+ } \
+ } while (0)
+
+#define COMPOSITE_TYPE_SWITCH(Expr, B, D) \
+ do { \
+ switch (Expr) { \
+ TYPE_SWITCH_CASE(PT_Ptr, B) \
+ default: { D; break; } \
+ } \
+ } while (0)
#endif
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Primitives.h b/contrib/llvm-project/clang/lib/AST/Interp/Primitives.h
new file mode 100644
index 000000000000..e935dbfd3691
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Primitives.h
@@ -0,0 +1,36 @@
+//===------ Primitives.h - Types for the constexpr VM -----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Utilities and helper functions for all primitive types:
+// - Integral
+// - Floating
+// - Boolean
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_INTERP_PRIMITIVES_H
+#define LLVM_CLANG_AST_INTERP_PRIMITIVES_H
+
+#include "clang/AST/ComparisonCategories.h"
+
+namespace clang {
+namespace interp {
+
+/// Helper to compare two comparable types.
+template <typename T> ComparisonCategoryResult Compare(const T &X, const T &Y) {
+ if (X < Y)
+ return ComparisonCategoryResult::Less;
+ if (X > Y)
+ return ComparisonCategoryResult::Greater;
+ return ComparisonCategoryResult::Equal;
+}
+
+} // namespace interp
+} // namespace clang
+
+#endif
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Program.cpp b/contrib/llvm-project/clang/lib/AST/Interp/Program.cpp
index fcbab0ea8172..1daefab4dcda 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Program.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Program.cpp
@@ -10,6 +10,7 @@
#include "ByteCodeStmtGen.h"
#include "Context.h"
#include "Function.h"
+#include "Integral.h"
#include "Opcode.h"
#include "PrimType.h"
#include "clang/AST/Decl.h"
@@ -18,6 +19,21 @@
using namespace clang;
using namespace clang::interp;
+unsigned Program::getOrCreateNativePointer(const void *Ptr) {
+ auto It = NativePointerIndices.find(Ptr);
+ if (It != NativePointerIndices.end())
+ return It->second;
+
+ unsigned Idx = NativePointers.size();
+ NativePointers.push_back(Ptr);
+ NativePointerIndices[Ptr] = Idx;
+ return Idx;
+}
+
+const void *Program::getNativePointer(unsigned Idx) {
+ return NativePointers[Idx];
+}
+
unsigned Program::createGlobalString(const StringLiteral *S) {
const size_t CharWidth = S->getCharByteWidth();
const size_t BitWidth = CharWidth * Ctx.getCharBit();
@@ -38,10 +54,11 @@ unsigned Program::createGlobalString(const StringLiteral *S) {
}
// Create a descriptor for the string.
- Descriptor *Desc = allocateDescriptor(S, CharType, S->getLength() + 1,
- /*isConst=*/true,
- /*isTemporary=*/false,
- /*isMutable=*/false);
+ Descriptor *Desc =
+ allocateDescriptor(S, CharType, std::nullopt, S->getLength() + 1,
+ /*isConst=*/true,
+ /*isTemporary=*/false,
+ /*isMutable=*/false);
// Allocate storage for the string.
// The byte length does not include the null terminator.
@@ -49,6 +66,7 @@ unsigned Program::createGlobalString(const StringLiteral *S) {
unsigned Sz = Desc->getAllocSize();
auto *G = new (Allocator, Sz) Global(Desc, /*isStatic=*/true,
/*isExtern=*/false);
+ G->block()->invokeCtor();
Globals.push_back(G);
// Construct the string in storage.
@@ -84,13 +102,13 @@ Pointer Program::getPtrGlobal(unsigned Idx) {
return Pointer(Globals[Idx]->block());
}
-llvm::Optional<unsigned> Program::getGlobal(const ValueDecl *VD) {
+std::optional<unsigned> Program::getGlobal(const ValueDecl *VD) {
auto It = GlobalIndices.find(VD);
if (It != GlobalIndices.end())
return It->second;
- // Find any previous declarations which were aleady evaluated.
- llvm::Optional<unsigned> Index;
+ // Find any previous declarations which were already evaluated.
+ std::optional<unsigned> Index;
for (const Decl *P = VD; P; P = P->getPreviousDecl()) {
auto It = GlobalIndices.find(P);
if (It != GlobalIndices.end()) {
@@ -102,76 +120,84 @@ llvm::Optional<unsigned> Program::getGlobal(const ValueDecl *VD) {
// Map the decl to the existing index.
if (Index) {
GlobalIndices[VD] = *Index;
- return {};
+ return std::nullopt;
}
return Index;
}
-llvm::Optional<unsigned> Program::getOrCreateGlobal(const ValueDecl *VD) {
+std::optional<unsigned> Program::getOrCreateGlobal(const ValueDecl *VD,
+ const Expr *Init) {
if (auto Idx = getGlobal(VD))
return Idx;
- if (auto Idx = createGlobal(VD)) {
+ if (auto Idx = createGlobal(VD, Init)) {
GlobalIndices[VD] = *Idx;
return Idx;
}
- return {};
+ return std::nullopt;
}
-llvm::Optional<unsigned> Program::getOrCreateDummy(const ParmVarDecl *PD) {
- auto &ASTCtx = Ctx.getASTContext();
-
- // Create a pointer to an incomplete array of the specified elements.
- QualType ElemTy = PD->getType()->castAs<PointerType>()->getPointeeType();
- QualType Ty = ASTCtx.getIncompleteArrayType(ElemTy, ArrayType::Normal, 0);
-
+std::optional<unsigned> Program::getOrCreateDummy(const ValueDecl *VD) {
// Dedup blocks since they are immutable and pointers cannot be compared.
- auto It = DummyParams.find(PD);
- if (It != DummyParams.end())
+ if (auto It = DummyParams.find(VD); It != DummyParams.end())
return It->second;
- if (auto Idx = createGlobal(PD, Ty, /*isStatic=*/true, /*isExtern=*/true)) {
- DummyParams[PD] = *Idx;
- return Idx;
- }
- return {};
+ // Create dummy descriptor.
+ Descriptor *Desc = allocateDescriptor(VD, std::nullopt);
+ // Allocate a block for storage.
+ unsigned I = Globals.size();
+
+ auto *G = new (Allocator, Desc->getAllocSize())
+ Global(getCurrentDecl(), Desc, /*IsStatic=*/true, /*IsExtern=*/false);
+ G->block()->invokeCtor();
+
+ Globals.push_back(G);
+ DummyParams[VD] = I;
+ return I;
}
-llvm::Optional<unsigned> Program::createGlobal(const ValueDecl *VD) {
+std::optional<unsigned> Program::createGlobal(const ValueDecl *VD,
+ const Expr *Init) {
+ assert(!getGlobal(VD));
bool IsStatic, IsExtern;
- if (auto *Var = dyn_cast<VarDecl>(VD)) {
- IsStatic = !Var->hasLocalStorage();
+ if (const auto *Var = dyn_cast<VarDecl>(VD)) {
+ IsStatic = Context::shouldBeGloballyIndexed(VD);
IsExtern = !Var->getAnyInitializer();
+ } else if (isa<UnnamedGlobalConstantDecl>(VD)) {
+ IsStatic = true;
+ IsExtern = false;
} else {
IsStatic = false;
IsExtern = true;
}
- if (auto Idx = createGlobal(VD, VD->getType(), IsStatic, IsExtern)) {
+ if (auto Idx = createGlobal(VD, VD->getType(), IsStatic, IsExtern, Init)) {
for (const Decl *P = VD; P; P = P->getPreviousDecl())
GlobalIndices[P] = *Idx;
return *Idx;
}
- return {};
+ return std::nullopt;
}
-llvm::Optional<unsigned> Program::createGlobal(const Expr *E) {
+std::optional<unsigned> Program::createGlobal(const Expr *E) {
return createGlobal(E, E->getType(), /*isStatic=*/true, /*isExtern=*/false);
}
-llvm::Optional<unsigned> Program::createGlobal(const DeclTy &D, QualType Ty,
- bool IsStatic, bool IsExtern) {
+std::optional<unsigned> Program::createGlobal(const DeclTy &D, QualType Ty,
+ bool IsStatic, bool IsExtern,
+ const Expr *Init) {
// Create a descriptor for the global.
Descriptor *Desc;
const bool IsConst = Ty.isConstQualified();
const bool IsTemporary = D.dyn_cast<const Expr *>();
if (auto T = Ctx.classify(Ty)) {
- Desc = createDescriptor(D, *T, IsConst, IsTemporary);
+ Desc = createDescriptor(D, *T, std::nullopt, IsConst, IsTemporary);
} else {
- Desc = createDescriptor(D, Ty.getTypePtr(), IsConst, IsTemporary);
+ Desc = createDescriptor(D, Ty.getTypePtr(), std::nullopt, IsConst,
+ IsTemporary);
}
if (!Desc)
- return {};
+ return std::nullopt;
// Allocate a block for storage.
unsigned I = Globals.size();
@@ -186,24 +212,12 @@ llvm::Optional<unsigned> Program::createGlobal(const DeclTy &D, QualType Ty,
}
Function *Program::getFunction(const FunctionDecl *F) {
- F = F->getDefinition();
+ F = F->getCanonicalDecl();
+ assert(F);
auto It = Funcs.find(F);
return It == Funcs.end() ? nullptr : It->second.get();
}
-llvm::Expected<Function *> Program::getOrCreateFunction(const FunctionDecl *F) {
- if (Function *Func = getFunction(F)) {
- return Func;
- }
-
- // Try to compile the function if it wasn't compiled yet.
- if (const FunctionDecl *FD = F->getDefinition())
- return ByteCodeStmtGen<ByteCodeEmitter>(Ctx, *this).compileFunc(FD);
-
- // A relocation which traps if not resolved.
- return nullptr;
-}
-
Record *Program::getOrCreateRecord(const RecordDecl *RD) {
// Use the actual definition as a key.
RD = RD->getDefinition();
@@ -211,13 +225,16 @@ Record *Program::getOrCreateRecord(const RecordDecl *RD) {
return nullptr;
// Deduplicate records.
- auto It = Records.find(RD);
- if (It != Records.end()) {
+ if (auto It = Records.find(RD); It != Records.end())
return It->second;
- }
+
+ // We insert nullptr now and replace that later, so recursive calls
+ // to this function with the same RecordDecl don't run into
+ // infinite recursion.
+ Records.insert({RD, nullptr});
// Number of bytes required by fields and base classes.
- unsigned Size = 0;
+ unsigned BaseSize = 0;
// Number of bytes required by virtual base.
unsigned VirtSize = 0;
@@ -225,7 +242,7 @@ Record *Program::getOrCreateRecord(const RecordDecl *RD) {
auto GetBaseDesc = [this](const RecordDecl *BD, Record *BR) -> Descriptor * {
if (!BR)
return nullptr;
- return allocateDescriptor(BD, BR, /*isConst=*/false,
+ return allocateDescriptor(BD, BR, std::nullopt, /*isConst=*/false,
/*isTemporary=*/false,
/*isMutable=*/false);
};
@@ -241,9 +258,9 @@ Record *Program::getOrCreateRecord(const RecordDecl *RD) {
const RecordDecl *BD = Spec.getType()->castAs<RecordType>()->getDecl();
Record *BR = getOrCreateRecord(BD);
if (Descriptor *Desc = GetBaseDesc(BD, BR)) {
- Size += align(sizeof(InlineDescriptor));
- Bases.push_back({BD, Size, Desc, BR});
- Size += align(BR->getSize());
+ BaseSize += align(sizeof(InlineDescriptor));
+ Bases.push_back({BD, BaseSize, Desc, BR});
+ BaseSize += align(BR->getSize());
continue;
}
return nullptr;
@@ -267,79 +284,82 @@ Record *Program::getOrCreateRecord(const RecordDecl *RD) {
Record::FieldList Fields;
for (const FieldDecl *FD : RD->fields()) {
// Reserve space for the field's descriptor and the offset.
- Size += align(sizeof(InlineDescriptor));
+ BaseSize += align(sizeof(InlineDescriptor));
// Classify the field and add its metadata.
QualType FT = FD->getType();
const bool IsConst = FT.isConstQualified();
const bool IsMutable = FD->isMutable();
Descriptor *Desc;
- if (llvm::Optional<PrimType> T = Ctx.classify(FT)) {
- Desc = createDescriptor(FD, *T, IsConst, /*isTemporary=*/false,
- IsMutable);
+ if (std::optional<PrimType> T = Ctx.classify(FT)) {
+ Desc = createDescriptor(FD, *T, std::nullopt, IsConst,
+ /*isTemporary=*/false, IsMutable);
} else {
- Desc = createDescriptor(FD, FT.getTypePtr(), IsConst,
+ Desc = createDescriptor(FD, FT.getTypePtr(), std::nullopt, IsConst,
/*isTemporary=*/false, IsMutable);
}
if (!Desc)
return nullptr;
- Fields.push_back({FD, Size, Desc});
- Size += align(Desc->getAllocSize());
+ Fields.push_back({FD, BaseSize, Desc});
+ BaseSize += align(Desc->getAllocSize());
}
Record *R = new (Allocator) Record(RD, std::move(Bases), std::move(Fields),
- std::move(VirtBases), VirtSize, Size);
- Records.insert({RD, R});
+ std::move(VirtBases), VirtSize, BaseSize);
+ Records[RD] = R;
return R;
}
Descriptor *Program::createDescriptor(const DeclTy &D, const Type *Ty,
+ Descriptor::MetadataSize MDSize,
bool IsConst, bool IsTemporary,
- bool IsMutable) {
+ bool IsMutable, const Expr *Init) {
// Classes and structures.
- if (auto *RT = Ty->getAs<RecordType>()) {
- if (auto *Record = getOrCreateRecord(RT->getDecl()))
- return allocateDescriptor(D, Record, IsConst, IsTemporary, IsMutable);
+ if (const auto *RT = Ty->getAs<RecordType>()) {
+ if (const auto *Record = getOrCreateRecord(RT->getDecl()))
+ return allocateDescriptor(D, Record, MDSize, IsConst, IsTemporary,
+ IsMutable);
}
// Arrays.
- if (auto ArrayType = Ty->getAsArrayTypeUnsafe()) {
+ if (const auto ArrayType = Ty->getAsArrayTypeUnsafe()) {
QualType ElemTy = ArrayType->getElementType();
// Array of well-known bounds.
if (auto CAT = dyn_cast<ConstantArrayType>(ArrayType)) {
size_t NumElems = CAT->getSize().getZExtValue();
- if (llvm::Optional<PrimType> T = Ctx.classify(ElemTy)) {
+ if (std::optional<PrimType> T = Ctx.classify(ElemTy)) {
// Arrays of primitives.
unsigned ElemSize = primSize(*T);
if (std::numeric_limits<unsigned>::max() / ElemSize <= NumElems) {
return {};
}
- return allocateDescriptor(D, *T, NumElems, IsConst, IsTemporary,
+ return allocateDescriptor(D, *T, MDSize, NumElems, IsConst, IsTemporary,
IsMutable);
} else {
// Arrays of composites. In this case, the array is a list of pointers,
// followed by the actual elements.
- Descriptor *Desc =
- createDescriptor(D, ElemTy.getTypePtr(), IsConst, IsTemporary);
- if (!Desc)
+ const Descriptor *ElemDesc = createDescriptor(
+ D, ElemTy.getTypePtr(), std::nullopt, IsConst, IsTemporary);
+ if (!ElemDesc)
return nullptr;
- InterpSize ElemSize = Desc->getAllocSize() + sizeof(InlineDescriptor);
+ unsigned ElemSize =
+ ElemDesc->getAllocSize() + sizeof(InlineDescriptor);
if (std::numeric_limits<unsigned>::max() / ElemSize <= NumElems)
return {};
- return allocateDescriptor(D, Desc, NumElems, IsConst, IsTemporary,
- IsMutable);
+ return allocateDescriptor(D, ElemDesc, MDSize, NumElems, IsConst,
+ IsTemporary, IsMutable);
}
}
// Array of unknown bounds - cannot be accessed and pointer arithmetic
// is forbidden on pointers to such objects.
if (isa<IncompleteArrayType>(ArrayType)) {
- if (llvm::Optional<PrimType> T = Ctx.classify(ElemTy)) {
+ if (std::optional<PrimType> T = Ctx.classify(ElemTy)) {
return allocateDescriptor(D, *T, IsTemporary,
Descriptor::UnknownSize{});
} else {
- Descriptor *Desc =
- createDescriptor(D, ElemTy.getTypePtr(), IsConst, IsTemporary);
+ const Descriptor *Desc = createDescriptor(D, ElemTy.getTypePtr(),
+ MDSize, IsConst, IsTemporary);
if (!Desc)
return nullptr;
return allocateDescriptor(D, Desc, IsTemporary,
@@ -349,15 +369,17 @@ Descriptor *Program::createDescriptor(const DeclTy &D, const Type *Ty,
}
// Atomic types.
- if (auto *AT = Ty->getAs<AtomicType>()) {
+ if (const auto *AT = Ty->getAs<AtomicType>()) {
const Type *InnerTy = AT->getValueType().getTypePtr();
- return createDescriptor(D, InnerTy, IsConst, IsTemporary, IsMutable);
+ return createDescriptor(D, InnerTy, MDSize, IsConst, IsTemporary,
+ IsMutable);
}
// Complex types - represented as arrays of elements.
- if (auto *CT = Ty->getAs<ComplexType>()) {
+ if (const auto *CT = Ty->getAs<ComplexType>()) {
PrimType ElemTy = *Ctx.classify(CT->getElementType());
- return allocateDescriptor(D, ElemTy, 2, IsConst, IsTemporary, IsMutable);
+ return allocateDescriptor(D, ElemTy, MDSize, 2, IsConst, IsTemporary,
+ IsMutable);
}
return nullptr;
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Program.h b/contrib/llvm-project/clang/lib/AST/Interp/Program.h
index 5f0012db9b3f..17342680102c 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Program.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Program.h
@@ -29,21 +29,40 @@ namespace clang {
class RecordDecl;
class Expr;
class FunctionDecl;
-class Stmt;
class StringLiteral;
class VarDecl;
namespace interp {
class Context;
-class State;
class Record;
-class Scope;
/// The program contains and links the bytecode for all functions.
-class Program {
+class Program final {
public:
Program(Context &Ctx) : Ctx(Ctx) {}
+ ~Program() {
+ // Manually destroy all the blocks. They are almost all harmless,
+ // but primitive arrays might have an InitMap* heap allocated and
+ // that needs to be freed.
+ for (Global *G : Globals)
+ G->block()->invokeDtor();
+
+ // Records might actually allocate memory themselves, but they
+ // are allocated using a BumpPtrAllocator. Call their desctructors
+ // here manually so they are properly freeing their resources.
+ for (auto RecordPair : Records) {
+ if (Record *R = RecordPair.second)
+ R->~Record();
+ }
+ }
+
+ /// Marshals a native pointer to an ID for embedding in bytecode.
+ unsigned getOrCreateNativePointer(const void *Ptr);
+
+ /// Returns the value of a marshalled native pointer.
+ const void *getNativePointer(unsigned Idx);
+
/// Emits a string literal among global data.
unsigned createGlobalString(const StringLiteral *S);
@@ -57,23 +76,25 @@ public:
}
/// Finds a global's index.
- llvm::Optional<unsigned> getGlobal(const ValueDecl *VD);
+ std::optional<unsigned> getGlobal(const ValueDecl *VD);
/// Returns or creates a global an creates an index to it.
- llvm::Optional<unsigned> getOrCreateGlobal(const ValueDecl *VD);
+ std::optional<unsigned> getOrCreateGlobal(const ValueDecl *VD,
+ const Expr *Init = nullptr);
- /// Returns or creates a dummy value for parameters.
- llvm::Optional<unsigned> getOrCreateDummy(const ParmVarDecl *PD);
+ /// Returns or creates a dummy value for unknown declarations.
+ std::optional<unsigned> getOrCreateDummy(const ValueDecl *VD);
/// Creates a global and returns its index.
- llvm::Optional<unsigned> createGlobal(const ValueDecl *VD);
+ std::optional<unsigned> createGlobal(const ValueDecl *VD, const Expr *E);
/// Creates a global from a lifetime-extended temporary.
- llvm::Optional<unsigned> createGlobal(const Expr *E);
+ std::optional<unsigned> createGlobal(const Expr *E);
/// Creates a new function from a code range.
template <typename... Ts>
Function *createFunction(const FunctionDecl *Def, Ts &&... Args) {
+ Def = Def->getCanonicalDecl();
auto *Func = new Function(*this, Def, std::forward<Ts>(Args)...);
Funcs.insert({Def, std::unique_ptr<Function>(Func)});
return Func;
@@ -89,31 +110,30 @@ public:
/// Returns a function.
Function *getFunction(const FunctionDecl *F);
- /// Returns a pointer to a function if it exists and can be compiled.
- /// If a function couldn't be compiled, an error is returned.
- /// If a function was not yet defined, a null pointer is returned.
- llvm::Expected<Function *> getOrCreateFunction(const FunctionDecl *F);
-
/// Returns a record or creates one if it does not exist.
Record *getOrCreateRecord(const RecordDecl *RD);
/// Creates a descriptor for a primitive type.
Descriptor *createDescriptor(const DeclTy &D, PrimType Type,
- bool IsConst = false,
- bool IsTemporary = false,
+ Descriptor::MetadataSize MDSize = std::nullopt,
+ bool IsConst = false, bool IsTemporary = false,
bool IsMutable = false) {
- return allocateDescriptor(D, Type, IsConst, IsTemporary, IsMutable);
+ return allocateDescriptor(D, Type, MDSize, IsConst, IsTemporary, IsMutable);
}
/// Creates a descriptor for a composite type.
Descriptor *createDescriptor(const DeclTy &D, const Type *Ty,
+ Descriptor::MetadataSize MDSize = std::nullopt,
bool IsConst = false, bool IsTemporary = false,
- bool IsMutable = false);
+ bool IsMutable = false,
+ const Expr *Init = nullptr);
/// Context to manage declaration lifetimes.
class DeclScope {
public:
- DeclScope(Program &P, const VarDecl *VD) : P(P) { P.startDeclaration(VD); }
+ DeclScope(Program &P, const ValueDecl *VD) : P(P) {
+ P.startDeclaration(VD);
+ }
~DeclScope() { P.endDeclaration(); }
private:
@@ -121,17 +141,18 @@ public:
};
/// Returns the current declaration ID.
- llvm::Optional<unsigned> getCurrentDecl() const {
+ std::optional<unsigned> getCurrentDecl() const {
if (CurrentDeclaration == NoDeclaration)
- return llvm::Optional<unsigned>{};
+ return std::optional<unsigned>{};
return LastDeclaration;
}
private:
friend class DeclScope;
- llvm::Optional<unsigned> createGlobal(const DeclTy &D, QualType Ty,
- bool IsStatic, bool IsExtern);
+ std::optional<unsigned> createGlobal(const DeclTy &D, QualType Ty,
+ bool IsStatic, bool IsExtern,
+ const Expr *Init = nullptr);
/// Reference to the VM context.
Context &Ctx;
@@ -143,6 +164,11 @@ private:
/// Function relocation locations.
llvm::DenseMap<const FunctionDecl *, std::vector<unsigned>> Relocs;
+ /// Native pointers referenced by bytecode.
+ std::vector<const void *> NativePointers;
+ /// Cached native pointer indices.
+ llvm::DenseMap<const void *, unsigned> NativePointerIndices;
+
/// Custom allocator for global storage.
using PoolAllocTy = llvm::BumpPtrAllocatorImpl<llvm::MallocAllocator>;
@@ -161,7 +187,7 @@ private:
}
/// Return a pointer to the data.
- char *data() { return B.data(); }
+ std::byte *data() { return B.data(); }
/// Return a pointer to the block.
Block *block() { return &B; }
@@ -182,7 +208,7 @@ private:
llvm::DenseMap<const RecordDecl *, Record *> Records;
/// Dummy parameter to generate pointers from.
- llvm::DenseMap<const ParmVarDecl *, unsigned> DummyParams;
+ llvm::DenseMap<const ValueDecl *, unsigned> DummyParams;
/// Creates a new descriptor.
template <typename... Ts>
@@ -198,7 +224,7 @@ private:
unsigned CurrentDeclaration = NoDeclaration;
/// Starts evaluating a declaration.
- void startDeclaration(const VarDecl *Decl) {
+ void startDeclaration(const ValueDecl *Decl) {
LastDeclaration += 1;
CurrentDeclaration = LastDeclaration;
}
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Record.cpp b/contrib/llvm-project/clang/lib/AST/Interp/Record.cpp
index f440c4705051..909416e6e1a1 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Record.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Record.cpp
@@ -39,6 +39,14 @@ const Record::Base *Record::getBase(const RecordDecl *FD) const {
return It->second;
}
+const Record::Base *Record::getBase(QualType T) const {
+ if (!T->isRecordType())
+ return nullptr;
+
+ const RecordDecl *RD = T->getAs<RecordType>()->getDecl();
+ return BaseMap.lookup(RD);
+}
+
const Record::Base *Record::getVirtualBase(const RecordDecl *FD) const {
auto It = VirtualBaseMap.find(FD);
assert(It != VirtualBaseMap.end() && "Missing virtual base");
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Record.h b/contrib/llvm-project/clang/lib/AST/Interp/Record.h
index 9cdee9003752..b0952af2d1ac 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Record.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Record.h
@@ -13,20 +13,23 @@
#ifndef LLVM_CLANG_AST_INTERP_RECORD_H
#define LLVM_CLANG_AST_INTERP_RECORD_H
-#include "Pointer.h"
+#include "Descriptor.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
namespace clang {
namespace interp {
class Program;
/// Structure/Class descriptor.
-class Record {
+class Record final {
public:
/// Describes a record field.
struct Field {
const FieldDecl *Decl;
unsigned Offset;
Descriptor *Desc;
+ bool isBitField() const { return Decl->isBitField(); }
};
/// Describes a base class.
@@ -47,6 +50,8 @@ public:
public:
/// Returns the underlying declaration.
const RecordDecl *getDecl() const { return Decl; }
+ /// Returns the name of the underlying declaration.
+ const std::string getName() const { return Decl->getNameAsString(); }
/// Checks if the record is a union.
bool isUnion() const { return getDecl()->isUnion(); }
/// Returns the size of the record.
@@ -57,15 +62,24 @@ public:
const Field *getField(const FieldDecl *FD) const;
/// Returns a base descriptor.
const Base *getBase(const RecordDecl *FD) const;
+ /// Returns a base descriptor.
+ const Base *getBase(QualType T) const;
/// Returns a virtual base descriptor.
const Base *getVirtualBase(const RecordDecl *RD) const;
+ /// Returns the destructor of the record, if any.
+ const CXXDestructorDecl *getDestructor() const {
+ if (const auto *CXXDecl = dyn_cast<CXXRecordDecl>(Decl))
+ return CXXDecl->getDestructor();
+ return nullptr;
+ }
using const_field_iter = FieldList::const_iterator;
llvm::iterator_range<const_field_iter> fields() const {
return llvm::make_range(Fields.begin(), Fields.end());
}
- unsigned getNumFields() { return Fields.size(); }
+ unsigned getNumFields() const { return Fields.size(); }
+ const Field *getField(unsigned I) const { return &Fields[I]; }
Field *getField(unsigned I) { return &Fields[I]; }
using const_base_iter = BaseList::const_iterator;
@@ -73,16 +87,19 @@ public:
return llvm::make_range(Bases.begin(), Bases.end());
}
- unsigned getNumBases() { return Bases.size(); }
- Base *getBase(unsigned I) { return &Bases[I]; }
+ unsigned getNumBases() const { return Bases.size(); }
+ const Base *getBase(unsigned I) const {
+ assert(I < getNumBases());
+ return &Bases[I];
+ }
using const_virtual_iter = VirtualBaseList::const_iterator;
llvm::iterator_range<const_virtual_iter> virtual_bases() const {
return llvm::make_range(VirtualBases.begin(), VirtualBases.end());
}
- unsigned getNumVirtualBases() { return VirtualBases.size(); }
- Base *getVirtualBase(unsigned I) { return &VirtualBases[I]; }
+ unsigned getNumVirtualBases() const { return VirtualBases.size(); }
+ const Base *getVirtualBase(unsigned I) const { return &VirtualBases[I]; }
private:
/// Constructor used by Program to create record descriptors.
@@ -108,7 +125,6 @@ private:
llvm::DenseMap<const FieldDecl *, Field *> FieldMap;
/// Mapping from declarations to virtual bases.
llvm::DenseMap<const RecordDecl *, Base *> VirtualBaseMap;
- /// Mapping from
/// Size of the structure.
unsigned BaseSize;
/// Size of all virtual bases.
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Source.cpp b/contrib/llvm-project/clang/lib/AST/Interp/Source.cpp
index 4bec87812638..4e032c92d26d 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Source.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Source.cpp
@@ -22,18 +22,32 @@ SourceLocation SourceInfo::getLoc() const {
return SourceLocation();
}
+SourceRange SourceInfo::getRange() const {
+ if (const Expr *E = asExpr())
+ return E->getSourceRange();
+ if (const Stmt *S = asStmt())
+ return S->getSourceRange();
+ if (const Decl *D = asDecl())
+ return D->getSourceRange();
+ return SourceRange();
+}
+
const Expr *SourceInfo::asExpr() const {
if (auto *S = Source.dyn_cast<const Stmt *>())
return dyn_cast<Expr>(S);
return nullptr;
}
-const Expr *SourceMapper::getExpr(Function *F, CodePtr PC) const {
+const Expr *SourceMapper::getExpr(const Function *F, CodePtr PC) const {
if (const Expr *E = getSource(F, PC).asExpr())
return E;
llvm::report_fatal_error("missing source expression");
}
-SourceLocation SourceMapper::getLocation(Function *F, CodePtr PC) const {
+SourceLocation SourceMapper::getLocation(const Function *F, CodePtr PC) const {
return getSource(F, PC).getLoc();
}
+
+SourceRange SourceMapper::getRange(const Function *F, CodePtr PC) const {
+ return getSource(F, PC).getRange();
+}
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Source.h b/contrib/llvm-project/clang/lib/AST/Interp/Source.h
index 19c652b7331a..c28b488ff554 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Source.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Source.h
@@ -13,16 +13,21 @@
#ifndef LLVM_CLANG_AST_INTERP_SOURCE_H
#define LLVM_CLANG_AST_INTERP_SOURCE_H
-#include "clang/AST/Decl.h"
+#include "PrimType.h"
+#include "clang/AST/DeclBase.h"
#include "clang/AST/Stmt.h"
+#include "llvm/ADT/PointerUnion.h"
#include "llvm/Support/Endian.h"
namespace clang {
+class Expr;
+class SourceLocation;
+class SourceRange;
namespace interp {
class Function;
/// Pointer into the code segment.
-class CodePtr {
+class CodePtr final {
public:
CodePtr() : Ptr(nullptr) {}
@@ -42,49 +47,36 @@ public:
}
bool operator!=(const CodePtr &RHS) const { return Ptr != RHS.Ptr; }
+ const std::byte *operator*() const { return Ptr; }
- /// Reads data and advances the pointer.
- template <typename T> T read() {
- T Value = ReadHelper<T>(Ptr);
- Ptr += sizeof(T);
- return Value;
- }
+ operator bool() const { return Ptr; }
-private:
- /// Constructor used by Function to generate pointers.
- CodePtr(const char *Ptr) : Ptr(Ptr) {}
-
- /// Helper to decode a value or a pointer.
- template <typename T>
- static std::enable_if_t<!std::is_pointer<T>::value, T>
- ReadHelper(const char *Ptr) {
- using namespace llvm::support;
- return endian::read<T, endianness::native, 1>(Ptr);
- }
-
- template <typename T>
- static std::enable_if_t<std::is_pointer<T>::value, T>
- ReadHelper(const char *Ptr) {
+ /// Reads data and advances the pointer.
+ template <typename T> std::enable_if_t<!std::is_pointer<T>::value, T> read() {
+ assert(aligned(Ptr));
using namespace llvm::support;
- auto Punned = endian::read<uintptr_t, endianness::native, 1>(Ptr);
- return reinterpret_cast<T>(Punned);
+ T Value = endian::read<T, llvm::endianness::native>(Ptr);
+ Ptr += align(sizeof(T));
+ return Value;
}
private:
friend class Function;
-
+ /// Constructor used by Function to generate pointers.
+ CodePtr(const std::byte *Ptr) : Ptr(Ptr) {}
/// Pointer into the code owned by a function.
- const char *Ptr;
+ const std::byte *Ptr;
};
/// Describes the statement/declaration an opcode was generated from.
-class SourceInfo {
+class SourceInfo final {
public:
SourceInfo() {}
SourceInfo(const Stmt *E) : Source(E) {}
SourceInfo(const Decl *D) : Source(D) {}
SourceLocation getLoc() const;
+ SourceRange getRange() const;
const Stmt *asStmt() const { return Source.dyn_cast<const Stmt *>(); }
const Decl *asDecl() const { return Source.dyn_cast<const Decl *>(); }
@@ -104,12 +96,13 @@ public:
virtual ~SourceMapper() {}
/// Returns source information for a given PC in a function.
- virtual SourceInfo getSource(Function *F, CodePtr PC) const = 0;
+ virtual SourceInfo getSource(const Function *F, CodePtr PC) const = 0;
/// Returns the expression if an opcode belongs to one, null otherwise.
- const Expr *getExpr(Function *F, CodePtr PC) const;
+ const Expr *getExpr(const Function *F, CodePtr PC) const;
/// Returns the location from which an opcode originates.
- SourceLocation getLocation(Function *F, CodePtr PC) const;
+ SourceLocation getLocation(const Function *F, CodePtr PC) const;
+ SourceRange getRange(const Function *F, CodePtr PC) const;
};
} // namespace interp
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/State.cpp b/contrib/llvm-project/clang/lib/AST/Interp/State.cpp
index 56774f88fb45..47fbf5145cd4 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/State.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/State.cpp
@@ -11,6 +11,7 @@
#include "Program.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/OptionalDiagnostic.h"
using namespace clang;
using namespace clang::interp;
@@ -125,16 +126,16 @@ void State::addCallStack(unsigned Limit) {
// Walk the call stack and add the diagnostics.
unsigned CallIdx = 0;
- Frame *Top = getCurrentFrame();
+ const Frame *Top = getCurrentFrame();
const Frame *Bottom = getBottomFrame();
- for (Frame *F = Top; F != Bottom; F = F->getCaller(), ++CallIdx) {
- SourceLocation CallLocation = F->getCallLocation();
+ for (const Frame *F = Top; F != Bottom; F = F->getCaller(), ++CallIdx) {
+ SourceRange CallRange = F->getCallRange();
// Skip this call?
if (CallIdx >= SkipStart && CallIdx < SkipEnd) {
if (CallIdx == SkipStart) {
// Note that we're skipping calls.
- addDiag(CallLocation, diag::note_constexpr_calls_suppressed)
+ addDiag(CallRange.getBegin(), diag::note_constexpr_calls_suppressed)
<< unsigned(ActiveCalls - Limit);
}
continue;
@@ -142,17 +143,19 @@ void State::addCallStack(unsigned Limit) {
// Use a different note for an inheriting constructor, because from the
// user's perspective it's not really a function at all.
- if (auto *CD = dyn_cast_or_null<CXXConstructorDecl>(F->getCallee())) {
- if (CD->isInheritingConstructor()) {
- addDiag(CallLocation, diag::note_constexpr_inherited_ctor_call_here)
- << CD->getParent();
- continue;
- }
+ if (const auto *CD =
+ dyn_cast_if_present<CXXConstructorDecl>(F->getCallee());
+ CD && CD->isInheritingConstructor()) {
+ addDiag(CallRange.getBegin(),
+ diag::note_constexpr_inherited_ctor_call_here)
+ << CD->getParent();
+ continue;
}
SmallString<128> Buffer;
llvm::raw_svector_ostream Out(Buffer);
F->describe(Out);
- addDiag(CallLocation, diag::note_constexpr_call_here) << Out.str();
+ addDiag(CallRange.getBegin(), diag::note_constexpr_call_here)
+ << Out.str() << CallRange;
}
}
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/State.h b/contrib/llvm-project/clang/lib/AST/Interp/State.h
index d9a645a3eb3e..f1e8e3618f34 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/State.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/State.h
@@ -15,9 +15,9 @@
#include "clang/AST/ASTDiagnostic.h"
#include "clang/AST/Expr.h"
-#include "clang/AST/OptionalDiagnostic.h"
namespace clang {
+class OptionalDiagnostic;
/// Kinds of access we can perform on an object, for diagnostics. Note that
/// we consider a member function call to be a kind of access, even though
@@ -36,7 +36,7 @@ enum AccessKinds {
AK_Destroy,
};
-// The order of this enum is important for diagnostics.
+/// The order of this enum is important for diagnostics.
enum CheckSubobjectKind {
CSK_Base,
CSK_Derived,
@@ -71,7 +71,8 @@ public:
virtual unsigned getCallStackDepth() = 0;
public:
- // Diagnose that the evaluation could not be folded (FF => FoldFailure)
+ State() = default;
+ /// Diagnose that the evaluation could not be folded (FF => FoldFailure)
OptionalDiagnostic
FFDiag(SourceLocation Loc,
diag::kind DiagId = diag::note_invalid_subexpr_in_const_expr,
@@ -118,6 +119,10 @@ public:
const LangOptions &getLangOpts() const;
+ /// Whether or not we're in a context where the front end requires a
+ /// constant value.
+ bool InConstantContext = false;
+
private:
void addCallStack(unsigned Limit);
diff --git a/contrib/llvm-project/clang/lib/AST/ItaniumCXXABI.cpp b/contrib/llvm-project/clang/lib/AST/ItaniumCXXABI.cpp
index be10258a2d77..c9aadce73141 100644
--- a/contrib/llvm-project/clang/lib/AST/ItaniumCXXABI.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ItaniumCXXABI.cpp
@@ -26,6 +26,7 @@
#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/iterator.h"
+#include <optional>
using namespace clang;
@@ -84,8 +85,8 @@ template<typename T> bool isDenseMapKeyTombstone(T V) {
V, llvm::DenseMapInfo<T>::getTombstoneKey());
}
-template<typename T>
-Optional<bool> areDenseMapKeysEqualSpecialValues(T LHS, T RHS) {
+template <typename T>
+std::optional<bool> areDenseMapKeysEqualSpecialValues(T LHS, T RHS) {
bool LHSEmpty = isDenseMapKeyEmpty(LHS);
bool RHSEmpty = isDenseMapKeyEmpty(RHS);
if (LHSEmpty || RHSEmpty)
@@ -96,7 +97,7 @@ Optional<bool> areDenseMapKeysEqualSpecialValues(T LHS, T RHS) {
if (LHSTombstone || RHSTombstone)
return LHSTombstone && RHSTombstone;
- return None;
+ return std::nullopt;
}
template<>
@@ -113,8 +114,8 @@ struct DenseMapInfo<DecompositionDeclName> {
return llvm::hash_combine_range(Key.begin(), Key.end());
}
static bool isEqual(DecompositionDeclName LHS, DecompositionDeclName RHS) {
- if (Optional<bool> Result = areDenseMapKeysEqualSpecialValues(
- LHS.Bindings, RHS.Bindings))
+ if (std::optional<bool> Result =
+ areDenseMapKeysEqualSpecialValues(LHS.Bindings, RHS.Bindings))
return *Result;
return LHS.Bindings.size() == RHS.Bindings.size() &&
@@ -181,6 +182,37 @@ public:
}
};
+// A version of this for SYCL that makes sure that 'device' mangling context
+// matches the lambda mangling number, so that __builtin_sycl_unique_stable_name
+// can be consistently generated between a MS and Itanium host by just referring
+// to the device mangling number.
+class ItaniumSYCLNumberingContext : public ItaniumNumberingContext {
+ llvm::DenseMap<const CXXMethodDecl *, unsigned> ManglingNumbers;
+ using ManglingItr = decltype(ManglingNumbers)::iterator;
+
+public:
+ ItaniumSYCLNumberingContext(ItaniumMangleContext *Mangler)
+ : ItaniumNumberingContext(Mangler) {}
+
+ unsigned getManglingNumber(const CXXMethodDecl *CallOperator) override {
+ unsigned Number = ItaniumNumberingContext::getManglingNumber(CallOperator);
+ std::pair<ManglingItr, bool> emplace_result =
+ ManglingNumbers.try_emplace(CallOperator, Number);
+ (void)emplace_result;
+ assert(emplace_result.second && "Lambda number set multiple times?");
+ return Number;
+ }
+
+ using ItaniumNumberingContext::getManglingNumber;
+
+ unsigned getDeviceManglingNumber(const CXXMethodDecl *CallOperator) override {
+ ManglingItr Itr = ManglingNumbers.find(CallOperator);
+ assert(Itr != ManglingNumbers.end() && "Lambda not yet mangled?");
+
+ return Itr->second;
+ }
+};
+
class ItaniumCXXABI : public CXXABI {
private:
std::unique_ptr<MangleContext> Mangler;
@@ -193,7 +225,7 @@ public:
MemberPointerInfo
getMemberPointerInfo(const MemberPointerType *MPT) const override {
const TargetInfo &Target = Context.getTargetInfo();
- TargetInfo::IntType PtrDiff = Target.getPtrDiffType(0);
+ TargetInfo::IntType PtrDiff = Target.getPtrDiffType(LangAS::Default);
MemberPointerInfo MPI;
MPI.Width = Target.getTypeWidth(PtrDiff);
MPI.Align = Target.getTypeAlign(PtrDiff);
@@ -220,8 +252,8 @@ public:
return false;
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
- CharUnits PointerSize =
- Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
+ CharUnits PointerSize = Context.toCharUnitsFromBits(
+ Context.getTargetInfo().getPointerWidth(LangAS::Default));
return Layout.getNonVirtualSize() == PointerSize;
}
@@ -249,6 +281,9 @@ public:
std::unique_ptr<MangleNumberingContext>
createMangleNumberingContext() const override {
+ if (Context.getLangOpts().isSYCL())
+ return std::make_unique<ItaniumSYCLNumberingContext>(
+ cast<ItaniumMangleContext>(Mangler.get()));
return std::make_unique<ItaniumNumberingContext>(
cast<ItaniumMangleContext>(Mangler.get()));
}
diff --git a/contrib/llvm-project/clang/lib/AST/ItaniumMangle.cpp b/contrib/llvm-project/clang/lib/AST/ItaniumMangle.cpp
index 8cbac66fcf00..688141b30441 100644
--- a/contrib/llvm-project/clang/lib/AST/ItaniumMangle.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ItaniumMangle.cpp
@@ -28,6 +28,7 @@
#include "clang/AST/Mangle.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/ABI.h"
+#include "clang/Basic/DiagnosticAST.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
@@ -35,70 +36,17 @@
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/RISCVTargetParser.h"
+#include <optional>
using namespace clang;
namespace {
-/// Retrieve the declaration context that should be used when mangling the given
-/// declaration.
-static const DeclContext *getEffectiveDeclContext(const Decl *D) {
- // The ABI assumes that lambda closure types that occur within
- // default arguments live in the context of the function. However, due to
- // the way in which Clang parses and creates function declarations, this is
- // not the case: the lambda closure type ends up living in the context
- // where the function itself resides, because the function declaration itself
- // had not yet been created. Fix the context here.
- if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
- if (RD->isLambda())
- if (ParmVarDecl *ContextParam
- = dyn_cast_or_null<ParmVarDecl>(RD->getLambdaContextDecl()))
- return ContextParam->getDeclContext();
- }
-
- // Perform the same check for block literals.
- if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) {
- if (ParmVarDecl *ContextParam
- = dyn_cast_or_null<ParmVarDecl>(BD->getBlockManglingContextDecl()))
- return ContextParam->getDeclContext();
- }
-
- const DeclContext *DC = D->getDeclContext();
- if (isa<CapturedDecl>(DC) || isa<OMPDeclareReductionDecl>(DC) ||
- isa<OMPDeclareMapperDecl>(DC)) {
- return getEffectiveDeclContext(cast<Decl>(DC));
- }
-
- if (const auto *VD = dyn_cast<VarDecl>(D))
- if (VD->isExternC())
- return VD->getASTContext().getTranslationUnitDecl();
-
- if (const auto *FD = dyn_cast<FunctionDecl>(D))
- if (FD->isExternC())
- return FD->getASTContext().getTranslationUnitDecl();
-
- return DC->getRedeclContext();
-}
-
-static const DeclContext *getEffectiveParentContext(const DeclContext *DC) {
- return getEffectiveDeclContext(cast<Decl>(DC));
-}
-
static bool isLocalContainerContext(const DeclContext *DC) {
return isa<FunctionDecl>(DC) || isa<ObjCMethodDecl>(DC) || isa<BlockDecl>(DC);
}
-static const RecordDecl *GetLocalClassDecl(const Decl *D) {
- const DeclContext *DC = getEffectiveDeclContext(D);
- while (!DC->isNamespace() && !DC->isTranslationUnit()) {
- if (isLocalContainerContext(DC))
- return dyn_cast<RecordDecl>(D);
- D = cast<Decl>(DC);
- DC = getEffectiveDeclContext(D);
- }
- return nullptr;
-}
-
static const FunctionDecl *getStructor(const FunctionDecl *fn) {
if (const FunctionTemplateDecl *ftd = fn->getPrimaryTemplate())
return ftd->getTemplatedDecl();
@@ -126,14 +74,15 @@ class ItaniumMangleContextImpl : public ItaniumMangleContext {
llvm::DenseMap<DiscriminatorKeyTy, unsigned> Discriminator;
llvm::DenseMap<const NamedDecl*, unsigned> Uniquifier;
const DiscriminatorOverrideTy DiscriminatorOverride = nullptr;
+ NamespaceDecl *StdNamespace = nullptr;
bool NeedsUniqueInternalLinkageNames = false;
public:
explicit ItaniumMangleContextImpl(
ASTContext &Context, DiagnosticsEngine &Diags,
- DiscriminatorOverrideTy DiscriminatorOverride)
- : ItaniumMangleContext(Context, Diags),
+ DiscriminatorOverrideTy DiscriminatorOverride, bool IsAux = false)
+ : ItaniumMangleContext(Context, Diags, IsAux),
DiscriminatorOverride(DiscriminatorOverride) {}
/// @name Mangler Entry Points
@@ -162,8 +111,10 @@ public:
void mangleCXXCtorVTable(const CXXRecordDecl *RD, int64_t Offset,
const CXXRecordDecl *Type, raw_ostream &) override;
void mangleCXXRTTI(QualType T, raw_ostream &) override;
- void mangleCXXRTTIName(QualType T, raw_ostream &) override;
- void mangleTypeName(QualType T, raw_ostream &) override;
+ void mangleCXXRTTIName(QualType T, raw_ostream &,
+ bool NormalizeIntegers) override;
+ void mangleCanonicalTypeName(QualType T, raw_ostream &,
+ bool NormalizeIntegers) override;
void mangleCXXCtorComdat(const CXXConstructorDecl *D, raw_ostream &) override;
void mangleCXXDtorComdat(const CXXDestructorDecl *D, raw_ostream &) override;
@@ -172,9 +123,9 @@ public:
void mangleDynamicAtExitDestructor(const VarDecl *D,
raw_ostream &Out) override;
void mangleDynamicStermFinalizer(const VarDecl *D, raw_ostream &Out) override;
- void mangleSEHFilterExpression(const NamedDecl *EnclosingDecl,
+ void mangleSEHFilterExpression(GlobalDecl EnclosingDecl,
raw_ostream &Out) override;
- void mangleSEHFinallyBlock(const NamedDecl *EnclosingDecl,
+ void mangleSEHFinallyBlock(GlobalDecl EnclosingDecl,
raw_ostream &Out) override;
void mangleItaniumThreadLocalInit(const VarDecl *D, raw_ostream &) override;
void mangleItaniumThreadLocalWrapper(const VarDecl *D,
@@ -184,6 +135,8 @@ public:
void mangleLambdaSig(const CXXRecordDecl *Lambda, raw_ostream &) override;
+ void mangleModuleInitializer(const Module *Module, raw_ostream &) override;
+
bool getNextDiscriminator(const NamedDecl *ND, unsigned &disc) {
// Lambda closure types are already numbered.
if (isLambda(ND))
@@ -197,7 +150,7 @@ public:
// Use the canonical number for externally visible decls.
if (ND->isExternallyVisible()) {
- unsigned discriminator = getASTContext().getManglingNumber(ND);
+ unsigned discriminator = getASTContext().getManglingNumber(ND, isAux());
if (discriminator == 1)
return false;
disc = discriminator - 2;
@@ -249,6 +202,15 @@ public:
return DiscriminatorOverride;
}
+ NamespaceDecl *getStdNamespace();
+
+ const DeclContext *getEffectiveDeclContext(const Decl *D);
+ const DeclContext *getEffectiveParentContext(const DeclContext *DC) {
+ return getEffectiveDeclContext(cast<Decl>(DC));
+ }
+
+ bool isInternalLinkageDecl(const NamedDecl *ND);
+
/// @}
};
@@ -256,6 +218,10 @@ public:
class CXXNameMangler {
ItaniumMangleContextImpl &Context;
raw_ostream &Out;
+ /// Normalize integer types for cross-language CFI support with other
+ /// languages that can't represent and encode C/C++ integer types.
+ bool NormalizeIntegers = false;
+
bool NullOut = false;
/// In the "DisableDerivedAbiTags" mode derived ABI tags are not calculated.
/// This mode is used when mangler creates another mangler recursively to
@@ -267,18 +233,23 @@ class CXXNameMangler {
/// that's not a template specialization; otherwise it's the pattern
/// for that specialization.
const NamedDecl *Structor;
- unsigned StructorType;
+ unsigned StructorType = 0;
+
+ // An offset to add to all template parameter depths while mangling. Used
+ // when mangling a template parameter list to see if it matches a template
+ // template parameter exactly.
+ unsigned TemplateDepthOffset = 0;
/// The next substitution sequence number.
- unsigned SeqID;
+ unsigned SeqID = 0;
class FunctionTypeDepthState {
- unsigned Bits;
+ unsigned Bits = 0;
enum { InResultTypeMask = 1 };
public:
- FunctionTypeDepthState() : Bits(0) {}
+ FunctionTypeDepthState() = default;
/// The number of function types we're inside.
unsigned getDepth() const {
@@ -427,35 +398,58 @@ class CXXNameMangler {
ASTContext &getASTContext() const { return Context.getASTContext(); }
+ bool isCompatibleWith(LangOptions::ClangABI Ver) {
+ return Context.getASTContext().getLangOpts().getClangABICompat() <= Ver;
+ }
+
+ bool isStd(const NamespaceDecl *NS);
+ bool isStdNamespace(const DeclContext *DC);
+
+ const RecordDecl *GetLocalClassDecl(const Decl *D);
+ bool isSpecializedAs(QualType S, llvm::StringRef Name, QualType A);
+ bool isStdCharSpecialization(const ClassTemplateSpecializationDecl *SD,
+ llvm::StringRef Name, bool HasAllocator);
+
public:
CXXNameMangler(ItaniumMangleContextImpl &C, raw_ostream &Out_,
const NamedDecl *D = nullptr, bool NullOut_ = false)
- : Context(C), Out(Out_), NullOut(NullOut_), Structor(getStructor(D)),
- StructorType(0), SeqID(0), AbiTagsRoot(AbiTags) {
+ : Context(C), Out(Out_), NullOut(NullOut_), Structor(getStructor(D)),
+ AbiTagsRoot(AbiTags) {
// These can't be mangled without a ctor type or dtor type.
assert(!D || (!isa<CXXDestructorDecl>(D) &&
!isa<CXXConstructorDecl>(D)));
}
CXXNameMangler(ItaniumMangleContextImpl &C, raw_ostream &Out_,
const CXXConstructorDecl *D, CXXCtorType Type)
- : Context(C), Out(Out_), Structor(getStructor(D)), StructorType(Type),
- SeqID(0), AbiTagsRoot(AbiTags) { }
+ : Context(C), Out(Out_), Structor(getStructor(D)), StructorType(Type),
+ AbiTagsRoot(AbiTags) {}
CXXNameMangler(ItaniumMangleContextImpl &C, raw_ostream &Out_,
const CXXDestructorDecl *D, CXXDtorType Type)
- : Context(C), Out(Out_), Structor(getStructor(D)), StructorType(Type),
- SeqID(0), AbiTagsRoot(AbiTags) { }
+ : Context(C), Out(Out_), Structor(getStructor(D)), StructorType(Type),
+ AbiTagsRoot(AbiTags) {}
+ CXXNameMangler(ItaniumMangleContextImpl &C, raw_ostream &Out_,
+ bool NormalizeIntegers_)
+ : Context(C), Out(Out_), NormalizeIntegers(NormalizeIntegers_),
+ NullOut(false), Structor(nullptr), AbiTagsRoot(AbiTags) {}
CXXNameMangler(CXXNameMangler &Outer, raw_ostream &Out_)
- : Context(Outer.Context), Out(Out_), NullOut(false),
- Structor(Outer.Structor), StructorType(Outer.StructorType),
- SeqID(Outer.SeqID), FunctionTypeDepth(Outer.FunctionTypeDepth),
- AbiTagsRoot(AbiTags), Substitutions(Outer.Substitutions) {}
+ : Context(Outer.Context), Out(Out_), Structor(Outer.Structor),
+ StructorType(Outer.StructorType), SeqID(Outer.SeqID),
+ FunctionTypeDepth(Outer.FunctionTypeDepth), AbiTagsRoot(AbiTags),
+ Substitutions(Outer.Substitutions),
+ ModuleSubstitutions(Outer.ModuleSubstitutions) {}
CXXNameMangler(CXXNameMangler &Outer, llvm::raw_null_ostream &Out_)
- : Context(Outer.Context), Out(Out_), NullOut(true),
- Structor(Outer.Structor), StructorType(Outer.StructorType),
- SeqID(Outer.SeqID), FunctionTypeDepth(Outer.FunctionTypeDepth),
- AbiTagsRoot(AbiTags), Substitutions(Outer.Substitutions) {}
+ : CXXNameMangler(Outer, (raw_ostream &)Out_) {
+ NullOut = true;
+ }
+
+ struct WithTemplateDepthOffset { unsigned Offset; };
+ CXXNameMangler(ItaniumMangleContextImpl &C, raw_ostream &Out,
+ WithTemplateDepthOffset Offset)
+ : CXXNameMangler(C, Out) {
+ TemplateDepthOffset = Offset.Offset;
+ }
raw_ostream &getStream() { return Out; }
@@ -473,10 +467,12 @@ public:
void mangleType(QualType T);
void mangleNameOrStandardSubstitution(const NamedDecl *ND);
void mangleLambdaSig(const CXXRecordDecl *Lambda);
+ void mangleModuleNamePrefix(StringRef Name, bool IsPartition = false);
private:
bool mangleSubstitution(const NamedDecl *ND);
+ bool mangleSubstitution(NestedNameSpecifier *NNS);
bool mangleSubstitution(QualType T);
bool mangleSubstitution(TemplateName Template);
bool mangleSubstitution(uintptr_t Ptr);
@@ -490,6 +486,11 @@ private:
addSubstitution(reinterpret_cast<uintptr_t>(ND));
}
+ void addSubstitution(NestedNameSpecifier *NNS) {
+ NNS = Context.getASTContext().getCanonicalNestedNameSpecifier(NNS);
+
+ addSubstitution(reinterpret_cast<uintptr_t>(NNS));
+ }
void addSubstitution(QualType T);
void addSubstitution(TemplateName Template);
void addSubstitution(uintptr_t Ptr);
@@ -508,22 +509,20 @@ private:
void mangleNameWithAbiTags(GlobalDecl GD,
const AbiTagList *AdditionalAbiTags);
- void mangleModuleName(const Module *M);
- void mangleModuleNamePrefix(StringRef Name);
+ void mangleModuleName(const NamedDecl *ND);
void mangleTemplateName(const TemplateDecl *TD,
- const TemplateArgument *TemplateArgs,
- unsigned NumTemplateArgs);
- void mangleUnqualifiedName(GlobalDecl GD,
+ ArrayRef<TemplateArgument> Args);
+ void mangleUnqualifiedName(GlobalDecl GD, const DeclContext *DC,
const AbiTagList *AdditionalAbiTags) {
- mangleUnqualifiedName(GD, cast<NamedDecl>(GD.getDecl())->getDeclName(), UnknownArity,
- AdditionalAbiTags);
+ mangleUnqualifiedName(GD, cast<NamedDecl>(GD.getDecl())->getDeclName(), DC,
+ UnknownArity, AdditionalAbiTags);
}
void mangleUnqualifiedName(GlobalDecl GD, DeclarationName Name,
- unsigned KnownArity,
+ const DeclContext *DC, unsigned KnownArity,
const AbiTagList *AdditionalAbiTags);
- void mangleUnscopedName(GlobalDecl GD,
+ void mangleUnscopedName(GlobalDecl GD, const DeclContext *DC,
const AbiTagList *AdditionalAbiTags);
- void mangleUnscopedTemplateName(GlobalDecl GD,
+ void mangleUnscopedTemplateName(GlobalDecl GD, const DeclContext *DC,
const AbiTagList *AdditionalAbiTags);
void mangleSourceName(const IdentifierInfo *II);
void mangleRegCallName(const IdentifierInfo *II);
@@ -535,13 +534,17 @@ private:
void mangleBlockForPrefix(const BlockDecl *Block);
void mangleUnqualifiedBlock(const BlockDecl *Block);
void mangleTemplateParamDecl(const NamedDecl *Decl);
+ void mangleTemplateParameterList(const TemplateParameterList *Params);
+ void mangleTypeConstraint(const ConceptDecl *Concept,
+ ArrayRef<TemplateArgument> Arguments);
+ void mangleTypeConstraint(const TypeConstraint *Constraint);
+ void mangleRequiresClause(const Expr *RequiresClause);
void mangleLambda(const CXXRecordDecl *Lambda);
void mangleNestedName(GlobalDecl GD, const DeclContext *DC,
const AbiTagList *AdditionalAbiTags,
bool NoFunction=false);
void mangleNestedName(const TemplateDecl *TD,
- const TemplateArgument *TemplateArgs,
- unsigned NumTemplateArgs);
+ ArrayRef<TemplateArgument> Args);
void mangleNestedNameWithClosurePrefix(GlobalDecl GD,
const NamedDecl *PrefixND,
const AbiTagList *AdditionalAbiTags);
@@ -581,6 +584,8 @@ private:
void mangleAArch64NeonVectorType(const DependentVectorType *T);
void mangleAArch64FixedSveVectorType(const VectorType *T);
void mangleAArch64FixedSveVectorType(const DependentVectorType *T);
+ void mangleRISCVFixedRVVVectorType(const VectorType *T);
+ void mangleRISCVFixedRVVVectorType(const DependentVectorType *T);
void mangleIntegerLiteral(QualType T, const llvm::APSInt &Value);
void mangleFloatLiteral(QualType T, const llvm::APFloat &V);
@@ -597,17 +602,21 @@ private:
unsigned knownArity);
void mangleCastExpression(const Expr *E, StringRef CastEncoding);
void mangleInitListElements(const InitListExpr *InitList);
+ void mangleRequirement(SourceLocation RequiresExprLoc,
+ const concepts::Requirement *Req);
void mangleExpression(const Expr *E, unsigned Arity = UnknownArity,
bool AsTemplateArg = false);
void mangleCXXCtorType(CXXCtorType T, const CXXRecordDecl *InheritedFrom);
void mangleCXXDtorType(CXXDtorType T);
+ struct TemplateArgManglingInfo;
void mangleTemplateArgs(TemplateName TN,
const TemplateArgumentLoc *TemplateArgs,
unsigned NumTemplateArgs);
- void mangleTemplateArgs(TemplateName TN, const TemplateArgument *TemplateArgs,
- unsigned NumTemplateArgs);
+ void mangleTemplateArgs(TemplateName TN, ArrayRef<TemplateArgument> Args);
void mangleTemplateArgs(TemplateName TN, const TemplateArgumentList &AL);
+ void mangleTemplateArg(TemplateArgManglingInfo &Info, unsigned Index,
+ TemplateArgument A);
void mangleTemplateArg(TemplateArgument A, bool NeedExactType);
void mangleTemplateArgExpr(const Expr *E);
void mangleValueInTemplateArg(QualType T, const APValue &V, bool TopLevel,
@@ -628,8 +637,79 @@ private:
}
-static bool isInternalLinkageDecl(const NamedDecl *ND) {
- if (ND && ND->getFormalLinkage() == InternalLinkage &&
+NamespaceDecl *ItaniumMangleContextImpl::getStdNamespace() {
+ if (!StdNamespace) {
+ StdNamespace = NamespaceDecl::Create(
+ getASTContext(), getASTContext().getTranslationUnitDecl(),
+ /*Inline=*/false, SourceLocation(), SourceLocation(),
+ &getASTContext().Idents.get("std"),
+ /*PrevDecl=*/nullptr, /*Nested=*/false);
+ StdNamespace->setImplicit();
+ }
+ return StdNamespace;
+}
+
+/// Retrieve the declaration context that should be used when mangling the given
+/// declaration.
+const DeclContext *
+ItaniumMangleContextImpl::getEffectiveDeclContext(const Decl *D) {
+ // The ABI assumes that lambda closure types that occur within
+ // default arguments live in the context of the function. However, due to
+ // the way in which Clang parses and creates function declarations, this is
+ // not the case: the lambda closure type ends up living in the context
+ // where the function itself resides, because the function declaration itself
+ // had not yet been created. Fix the context here.
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
+ if (RD->isLambda())
+ if (ParmVarDecl *ContextParam =
+ dyn_cast_or_null<ParmVarDecl>(RD->getLambdaContextDecl()))
+ return ContextParam->getDeclContext();
+ }
+
+ // Perform the same check for block literals.
+ if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) {
+ if (ParmVarDecl *ContextParam =
+ dyn_cast_or_null<ParmVarDecl>(BD->getBlockManglingContextDecl()))
+ return ContextParam->getDeclContext();
+ }
+
+ // On ARM and AArch64, the va_list tag is always mangled as if in the std
+ // namespace. We do not represent va_list as actually being in the std
+ // namespace in C because this would result in incorrect debug info in C,
+ // among other things. It is important for both languages to have the same
+ // mangling in order for -fsanitize=cfi-icall to work.
+ if (D == getASTContext().getVaListTagDecl()) {
+ const llvm::Triple &T = getASTContext().getTargetInfo().getTriple();
+ if (T.isARM() || T.isThumb() || T.isAArch64())
+ return getStdNamespace();
+ }
+
+ const DeclContext *DC = D->getDeclContext();
+ if (isa<CapturedDecl>(DC) || isa<OMPDeclareReductionDecl>(DC) ||
+ isa<OMPDeclareMapperDecl>(DC)) {
+ return getEffectiveDeclContext(cast<Decl>(DC));
+ }
+
+ if (const auto *VD = dyn_cast<VarDecl>(D))
+ if (VD->isExternC())
+ return getASTContext().getTranslationUnitDecl();
+
+ if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
+ if (FD->isExternC())
+ return getASTContext().getTranslationUnitDecl();
+ // Member-like constrained friends are mangled as if they were members of
+ // the enclosing class.
+ if (FD->isMemberLikeConstrainedFriend() &&
+ getASTContext().getLangOpts().getClangABICompat() >
+ LangOptions::ClangABI::Ver17)
+ return D->getLexicalDeclContext()->getRedeclContext();
+ }
+
+ return DC->getRedeclContext();
+}
+
+bool ItaniumMangleContextImpl::isInternalLinkageDecl(const NamedDecl *ND) {
+ if (ND && ND->getFormalLinkage() == Linkage::Internal &&
!ND->isExternallyVisible() &&
getEffectiveDeclContext(ND)->isFileContext() &&
!ND->isInAnonymousNamespace())
@@ -659,8 +739,7 @@ bool ItaniumMangleContextImpl::isUniqueInternalLinkageDecl(
}
bool ItaniumMangleContextImpl::shouldMangleCXXName(const NamedDecl *D) {
- const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
- if (FD) {
+ if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
LanguageLinkage L = FD->getLanguageLinkage();
// Overloadable functions need mangling.
if (FD->hasAttr<OverloadableAttr>())
@@ -696,21 +775,26 @@ bool ItaniumMangleContextImpl::shouldMangleCXXName(const NamedDecl *D) {
if (!getASTContext().getLangOpts().CPlusPlus)
return false;
- const VarDecl *VD = dyn_cast<VarDecl>(D);
- if (VD && !isa<DecompositionDecl>(D)) {
+ if (const auto *VD = dyn_cast<VarDecl>(D)) {
+ // Decompositions are mangled.
+ if (isa<DecompositionDecl>(VD))
+ return true;
+
// C variables are not mangled.
if (VD->isExternC())
return false;
- // Variables at global scope with non-internal linkage are not mangled
+ // Variables at global scope are not mangled unless they have internal
+ // linkage or are specializations or are attached to a named module.
const DeclContext *DC = getEffectiveDeclContext(D);
// Check for extern variable declared locally.
if (DC->isFunctionOrMethod() && D->hasLinkage())
- while (!DC->isNamespace() && !DC->isTranslationUnit())
+ while (!DC->isFileContext())
DC = getEffectiveParentContext(DC);
- if (DC->isTranslationUnit() && D->getFormalLinkage() != InternalLinkage &&
+ if (DC->isTranslationUnit() && D->getFormalLinkage() != Linkage::Internal &&
!CXXNameMangler::shouldHaveAbiTags(*this, VD) &&
- !isa<VarTemplateSpecializationDecl>(D))
+ !isa<VarTemplateSpecializationDecl>(VD) &&
+ !VD->getOwningModuleForLinkage())
return false;
}
@@ -758,8 +842,17 @@ void CXXNameMangler::mangleFunctionEncoding(GlobalDecl GD) {
AbiTagList ReturnTypeAbiTags = makeFunctionReturnTypeTags(FD);
if (ReturnTypeAbiTags.empty()) {
- // There are no tags for return type, the simplest case.
+ // There are no tags for return type, the simplest case. Enter the function
+ // parameter scope before mangling the name, because a template using
+ // constrained `auto` can have references to its parameters within its
+ // template argument list:
+ //
+ // template<typename T> void f(T x, C<decltype(x)> auto)
+ // ... is mangled as ...
+ // template<typename T, C<decltype(param 1)> U> void f(T, U)
+ FunctionTypeDepthState Saved = FunctionTypeDepth.push();
mangleName(GD);
+ FunctionTypeDepth.pop(Saved);
mangleFunctionEncodingBareType(FD);
return;
}
@@ -772,7 +865,10 @@ void CXXNameMangler::mangleFunctionEncoding(GlobalDecl GD) {
CXXNameMangler FunctionEncodingMangler(*this, FunctionEncodingStream);
// Output name of the function.
FunctionEncodingMangler.disableDerivedAbiTags();
+
+ FunctionTypeDepthState Saved = FunctionTypeDepth.push();
FunctionEncodingMangler.mangleNameWithAbiTags(FD, nullptr);
+ FunctionTypeDepth.pop(Saved);
// Remember length of the function name in the buffer.
size_t EncodingPositionStart = FunctionEncodingStream.str().size();
@@ -790,7 +886,9 @@ void CXXNameMangler::mangleFunctionEncoding(GlobalDecl GD) {
AdditionalAbiTags.end());
// Output name with implicit tags and function encoding from temporary buffer.
+ Saved = FunctionTypeDepth.push();
mangleNameWithAbiTags(FD, &AdditionalAbiTags);
+ FunctionTypeDepth.pop(Saved);
Out << FunctionEncodingStream.str().substr(EncodingPositionStart);
// Function encoding could create new substitutions so we have to add
@@ -808,16 +906,15 @@ void CXXNameMangler::mangleFunctionEncodingBareType(const FunctionDecl *FD) {
EnableIfAttr *EIA = dyn_cast<EnableIfAttr>(*I);
if (!EIA)
continue;
- if (Context.getASTContext().getLangOpts().getClangABICompat() >
- LangOptions::ClangABI::Ver11) {
- mangleTemplateArgExpr(EIA->getCond());
- } else {
+ if (isCompatibleWith(LangOptions::ClangABI::Ver11)) {
// Prior to Clang 12, we hardcoded the X/E around enable-if's argument,
// even though <template-arg> should not include an X/E around
// <expr-primary>.
Out << 'X';
mangleExpression(EIA->getCond());
Out << 'E';
+ } else {
+ mangleTemplateArgExpr(EIA->getCond());
}
}
Out << 'E';
@@ -860,18 +957,9 @@ void CXXNameMangler::mangleFunctionEncodingBareType(const FunctionDecl *FD) {
MangleReturnType, FD);
}
-static const DeclContext *IgnoreLinkageSpecDecls(const DeclContext *DC) {
- while (isa<LinkageSpecDecl>(DC)) {
- DC = getEffectiveParentContext(DC);
- }
-
- return DC;
-}
-
/// Return whether a given namespace is the 'std' namespace.
-static bool isStd(const NamespaceDecl *NS) {
- if (!IgnoreLinkageSpecDecls(getEffectiveParentContext(NS))
- ->isTranslationUnit())
+bool CXXNameMangler::isStd(const NamespaceDecl *NS) {
+ if (!Context.getEffectiveParentContext(NS)->isTranslationUnit())
return false;
const IdentifierInfo *II = NS->getOriginalNamespace()->getIdentifier();
@@ -880,7 +968,7 @@ static bool isStd(const NamespaceDecl *NS) {
// isStdNamespace - Return whether a given decl context is a toplevel 'std'
// namespace.
-static bool isStdNamespace(const DeclContext *DC) {
+bool CXXNameMangler::isStdNamespace(const DeclContext *DC) {
if (!DC->isNamespace())
return false;
@@ -954,6 +1042,17 @@ void CXXNameMangler::mangleName(GlobalDecl GD) {
}
}
+const RecordDecl *CXXNameMangler::GetLocalClassDecl(const Decl *D) {
+ const DeclContext *DC = Context.getEffectiveDeclContext(D);
+ while (!DC->isNamespace() && !DC->isTranslationUnit()) {
+ if (isLocalContainerContext(DC))
+ return dyn_cast<RecordDecl>(D);
+ D = cast<Decl>(DC);
+ DC = Context.getEffectiveDeclContext(D);
+ }
+ return nullptr;
+}
+
void CXXNameMangler::mangleNameWithAbiTags(GlobalDecl GD,
const AbiTagList *AdditionalAbiTags) {
const NamedDecl *ND = cast<NamedDecl>(GD.getDecl());
@@ -962,7 +1061,7 @@ void CXXNameMangler::mangleNameWithAbiTags(GlobalDecl GD,
// ::= [<module-name>] <unscoped-template-name> <template-args>
// ::= <local-name>
//
- const DeclContext *DC = getEffectiveDeclContext(ND);
+ const DeclContext *DC = Context.getEffectiveDeclContext(ND);
// If this is an extern variable declared locally, the relevant DeclContext
// is that of the containing namespace, or the translation unit.
@@ -970,27 +1069,19 @@ void CXXNameMangler::mangleNameWithAbiTags(GlobalDecl GD,
// a proper semantic declaration context!
if (isLocalContainerContext(DC) && ND->hasLinkage() && !isLambda(ND))
while (!DC->isNamespace() && !DC->isTranslationUnit())
- DC = getEffectiveParentContext(DC);
+ DC = Context.getEffectiveParentContext(DC);
else if (GetLocalClassDecl(ND)) {
mangleLocalName(GD, AdditionalAbiTags);
return;
}
- DC = IgnoreLinkageSpecDecls(DC);
+ assert(!isa<LinkageSpecDecl>(DC) && "context cannot be LinkageSpecDecl");
if (isLocalContainerContext(DC)) {
mangleLocalName(GD, AdditionalAbiTags);
return;
}
- // Do not mangle the owning module for an external linkage declaration.
- // This enables backwards-compatibility with non-modular code, and is
- // a valid choice since conflicts are not permitted by C++ Modules TS
- // [basic.def.odr]/6.2.
- if (!ND->hasExternalFormalLinkage())
- if (Module *M = ND->getOwningModuleForLinkage())
- mangleModuleName(M);
-
// Closures can require a nested-name mangling even if they're semantically
// in the global namespace.
if (const NamedDecl *PrefixND = getClosurePrefix(ND)) {
@@ -1002,38 +1093,35 @@ void CXXNameMangler::mangleNameWithAbiTags(GlobalDecl GD,
// Check if we have a template.
const TemplateArgumentList *TemplateArgs = nullptr;
if (GlobalDecl TD = isTemplate(GD, TemplateArgs)) {
- mangleUnscopedTemplateName(TD, AdditionalAbiTags);
+ mangleUnscopedTemplateName(TD, DC, AdditionalAbiTags);
mangleTemplateArgs(asTemplateName(TD), *TemplateArgs);
return;
}
- mangleUnscopedName(GD, AdditionalAbiTags);
+ mangleUnscopedName(GD, DC, AdditionalAbiTags);
return;
}
mangleNestedName(GD, DC, AdditionalAbiTags);
}
-void CXXNameMangler::mangleModuleName(const Module *M) {
- // Implement the C++ Modules TS name mangling proposal; see
- // https://gcc.gnu.org/wiki/cxx-modules?action=AttachFile
- //
- // <module-name> ::= W <unscoped-name>+ E
- // ::= W <module-subst> <unscoped-name>* E
- Out << 'W';
- mangleModuleNamePrefix(M->Name);
- Out << 'E';
+void CXXNameMangler::mangleModuleName(const NamedDecl *ND) {
+ if (ND->isExternallyVisible())
+ if (Module *M = ND->getOwningModuleForLinkage())
+ mangleModuleNamePrefix(M->getPrimaryModuleInterfaceName());
}
-void CXXNameMangler::mangleModuleNamePrefix(StringRef Name) {
- // <module-subst> ::= _ <seq-id> # 0 < seq-id < 10
- // ::= W <seq-id - 10> _ # otherwise
+// <module-name> ::= <module-subname>
+// ::= <module-name> <module-subname>
+// ::= <substitution>
+// <module-subname> ::= W <source-name>
+// ::= W P <source-name>
+void CXXNameMangler::mangleModuleNamePrefix(StringRef Name, bool IsPartition) {
+ // <substitution> ::= S <seq-id> _
auto It = ModuleSubstitutions.find(Name);
if (It != ModuleSubstitutions.end()) {
- if (It->second < 10)
- Out << '_' << static_cast<char>('0' + It->second);
- else
- Out << 'W' << (It->second - 10) << '_';
+ Out << 'S';
+ mangleSeqID(It->second);
return;
}
@@ -1042,40 +1130,44 @@ void CXXNameMangler::mangleModuleNamePrefix(StringRef Name) {
auto Parts = Name.rsplit('.');
if (Parts.second.empty())
Parts.second = Parts.first;
- else
- mangleModuleNamePrefix(Parts.first);
+ else {
+ mangleModuleNamePrefix(Parts.first, IsPartition);
+ IsPartition = false;
+ }
+ Out << 'W';
+ if (IsPartition)
+ Out << 'P';
Out << Parts.second.size() << Parts.second;
- ModuleSubstitutions.insert({Name, ModuleSubstitutions.size()});
+ ModuleSubstitutions.insert({Name, SeqID++});
}
void CXXNameMangler::mangleTemplateName(const TemplateDecl *TD,
- const TemplateArgument *TemplateArgs,
- unsigned NumTemplateArgs) {
- const DeclContext *DC = IgnoreLinkageSpecDecls(getEffectiveDeclContext(TD));
+ ArrayRef<TemplateArgument> Args) {
+ const DeclContext *DC = Context.getEffectiveDeclContext(TD);
if (DC->isTranslationUnit() || isStdNamespace(DC)) {
- mangleUnscopedTemplateName(TD, nullptr);
- mangleTemplateArgs(asTemplateName(TD), TemplateArgs, NumTemplateArgs);
+ mangleUnscopedTemplateName(TD, DC, nullptr);
+ mangleTemplateArgs(asTemplateName(TD), Args);
} else {
- mangleNestedName(TD, TemplateArgs, NumTemplateArgs);
+ mangleNestedName(TD, Args);
}
}
-void CXXNameMangler::mangleUnscopedName(GlobalDecl GD,
+void CXXNameMangler::mangleUnscopedName(GlobalDecl GD, const DeclContext *DC,
const AbiTagList *AdditionalAbiTags) {
- const NamedDecl *ND = cast<NamedDecl>(GD.getDecl());
// <unscoped-name> ::= <unqualified-name>
// ::= St <unqualified-name> # ::std::
- if (isStdNamespace(IgnoreLinkageSpecDecls(getEffectiveDeclContext(ND))))
+ assert(!isa<LinkageSpecDecl>(DC) && "unskipped LinkageSpecDecl");
+ if (isStdNamespace(DC))
Out << "St";
- mangleUnqualifiedName(GD, AdditionalAbiTags);
+ mangleUnqualifiedName(GD, DC, AdditionalAbiTags);
}
void CXXNameMangler::mangleUnscopedTemplateName(
- GlobalDecl GD, const AbiTagList *AdditionalAbiTags) {
+ GlobalDecl GD, const DeclContext *DC, const AbiTagList *AdditionalAbiTags) {
const TemplateDecl *ND = cast<TemplateDecl>(GD.getDecl());
// <unscoped-template-name> ::= <unscoped-name>
// ::= <substitution>
@@ -1088,9 +1180,10 @@ void CXXNameMangler::mangleUnscopedTemplateName(
"template template param cannot have abi tags");
mangleTemplateParameter(TTP->getDepth(), TTP->getIndex());
} else if (isa<BuiltinTemplateDecl>(ND) || isa<ConceptDecl>(ND)) {
- mangleUnscopedName(GD, AdditionalAbiTags);
+ mangleUnscopedName(GD, DC, AdditionalAbiTags);
} else {
- mangleUnscopedName(GD.getWithDecl(ND->getTemplatedDecl()), AdditionalAbiTags);
+ mangleUnscopedName(GD.getWithDecl(ND->getTemplatedDecl()), DC,
+ AdditionalAbiTags);
}
addSubstitution(ND);
@@ -1206,8 +1299,7 @@ void CXXNameMangler::manglePrefix(QualType type) {
// FIXME: GCC does not appear to mangle the template arguments when
// the template in question is a dependent template name. Should we
// emulate that badness?
- mangleTemplateArgs(TST->getTemplateName(), TST->getArgs(),
- TST->getNumArgs());
+ mangleTemplateArgs(TST->getTemplateName(), TST->template_arguments());
addSubstitution(QualType(TST, 0));
}
} else if (const auto *DTST =
@@ -1220,7 +1312,7 @@ void CXXNameMangler::manglePrefix(QualType type) {
// FIXME: GCC does not appear to mangle the template arguments when
// the template in question is a dependent template name. Should we
// emulate that badness?
- mangleTemplateArgs(Template, DTST->getArgs(), DTST->getNumArgs());
+ mangleTemplateArgs(Template, DTST->template_arguments());
addSubstitution(QualType(DTST, 0));
}
} else {
@@ -1366,15 +1458,29 @@ void CXXNameMangler::mangleUnresolvedName(
mangleTemplateArgs(TemplateName(), TemplateArgs, NumTemplateArgs);
}
-void CXXNameMangler::mangleUnqualifiedName(GlobalDecl GD,
- DeclarationName Name,
- unsigned KnownArity,
- const AbiTagList *AdditionalAbiTags) {
+void CXXNameMangler::mangleUnqualifiedName(
+ GlobalDecl GD, DeclarationName Name, const DeclContext *DC,
+ unsigned KnownArity, const AbiTagList *AdditionalAbiTags) {
const NamedDecl *ND = cast_or_null<NamedDecl>(GD.getDecl());
- unsigned Arity = KnownArity;
- // <unqualified-name> ::= <operator-name>
+ // <unqualified-name> ::= [<module-name>] [F] <operator-name>
// ::= <ctor-dtor-name>
- // ::= <source-name>
+ // ::= [<module-name>] [F] <source-name>
+ // ::= [<module-name>] DC <source-name>* E
+
+ if (ND && DC && DC->isFileContext())
+ mangleModuleName(ND);
+
+ // A member-like constrained friend is mangled with a leading 'F'.
+ // Proposed on https://github.com/itanium-cxx-abi/cxx-abi/issues/24.
+ auto *FD = dyn_cast<FunctionDecl>(ND);
+ auto *FTD = dyn_cast<FunctionTemplateDecl>(ND);
+ if ((FD && FD->isMemberLikeConstrainedFriend()) ||
+ (FTD && FTD->getTemplatedDecl()->isMemberLikeConstrainedFriend())) {
+ if (!isCompatibleWith(LangOptions::ClangABI::Ver17))
+ Out << 'F';
+ }
+
+ unsigned Arity = KnownArity;
switch (Name.getNameKind()) {
case DeclarationName::Identifier: {
const IdentifierInfo *II = Name.getAsIdentifierInfo();
@@ -1385,8 +1491,6 @@ void CXXNameMangler::mangleUnqualifiedName(GlobalDecl GD,
//
// <unqualified-name> ::= DC <source-name>* E
//
- // These can never be referenced across translation units, so we do
- // not need a cross-vendor mangling for anything other than demanglers.
// Proposed on cxx-abi-dev on 2016-08-12
Out << "DC";
for (auto *BD : DD->bindings())
@@ -1428,10 +1532,9 @@ void CXXNameMangler::mangleUnqualifiedName(GlobalDecl GD,
// 12_GLOBAL__N_1 mangling is quite sufficient there, and this better
// matches GCC anyway, because GCC does not treat anonymous namespaces as
// implying internal linkage.
- if (isInternalLinkageDecl(ND))
+ if (Context.isInternalLinkageDecl(ND))
Out << 'L';
- auto *FD = dyn_cast<FunctionDecl>(ND);
bool IsRegCall = FD &&
FD->getType()->castAs<FunctionType>()->getCallConv() ==
clang::CC_X86RegCall;
@@ -1518,9 +1621,16 @@ void CXXNameMangler::mangleUnqualifiedName(GlobalDecl GD,
// <lambda-sig> ::= <template-param-decl>* <parameter-type>+
// # Parameter types or 'v' for 'void'.
if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(TD)) {
- if (Record->isLambda() && (Record->getLambdaManglingNumber() ||
- Context.getDiscriminatorOverride()(
- Context.getASTContext(), Record))) {
+ std::optional<unsigned> DeviceNumber =
+ Context.getDiscriminatorOverride()(Context.getASTContext(), Record);
+
+ // If we have a device-number via the discriminator, use that to mangle
+ // the lambda, otherwise use the typical lambda-mangling-number. In either
+ // case, a '0' should be mangled as a normal unnamed class instead of as a
+ // lambda.
+ if (Record->isLambda() &&
+ ((DeviceNumber && *DeviceNumber > 0) ||
+ (!DeviceNumber && Record->getLambdaManglingNumber() > 0))) {
assert(!AdditionalAbiTags &&
"Lambda type cannot have additional abi tags");
mangleLambda(Record);
@@ -1529,7 +1639,8 @@ void CXXNameMangler::mangleUnqualifiedName(GlobalDecl GD,
}
if (TD->isExternallyVisible()) {
- unsigned UnnamedMangle = getASTContext().getManglingNumber(TD);
+ unsigned UnnamedMangle =
+ getASTContext().getManglingNumber(TD, Context.isAux());
Out << "Ut";
if (UnnamedMangle > 1)
Out << UnnamedMangle - 2;
@@ -1540,7 +1651,9 @@ void CXXNameMangler::mangleUnqualifiedName(GlobalDecl GD,
// Get a unique id for the anonymous struct. If it is not a real output
// ID doesn't matter so use fake one.
- unsigned AnonStructId = NullOut ? 0 : Context.getAnonymousStructId(TD);
+ unsigned AnonStructId =
+ NullOut ? 0
+ : Context.getAnonymousStructId(TD, dyn_cast<FunctionDecl>(DC));
// Mangle it as a source name in the form
// [n] $_<id>
@@ -1599,6 +1712,7 @@ void CXXNameMangler::mangleUnqualifiedName(GlobalDecl GD,
// Otherwise, use the complete destructor name. This is relevant if a
// class with a destructor is declared within a destructor.
mangleCXXDtorType(Dtor_Complete);
+ assert(ND);
writeAbiTags(ND, AdditionalAbiTags);
break;
@@ -1608,10 +1722,10 @@ void CXXNameMangler::mangleUnqualifiedName(GlobalDecl GD,
// If we have a member function, we need to include the 'this' pointer.
if (const auto *MD = dyn_cast<CXXMethodDecl>(ND))
- if (!MD->isStatic())
+ if (MD->isImplicitObjectMemberFunction())
Arity++;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case DeclarationName::CXXConversionFunctionName:
case DeclarationName::CXXLiteralOperatorName:
mangleOperatorName(Name, Arity);
@@ -1630,8 +1744,12 @@ void CXXNameMangler::mangleRegCallName(const IdentifierInfo *II) {
// <source-name> ::= <positive length number> __regcall3__ <identifier>
// <number> ::= [n] <non-negative decimal integer>
// <identifier> ::= <unqualified source code identifier>
- Out << II->getLength() + sizeof("__regcall3__") - 1 << "__regcall3__"
- << II->getName();
+ if (getASTContext().getLangOpts().RegCall4)
+ Out << II->getLength() + sizeof("__regcall4__") - 1 << "__regcall4__"
+ << II->getName();
+ else
+ Out << II->getLength() + sizeof("__regcall3__") - 1 << "__regcall3__"
+ << II->getName();
}
void CXXNameMangler::mangleDeviceStubName(const IdentifierInfo *II) {
@@ -1664,6 +1782,8 @@ void CXXNameMangler::mangleNestedName(GlobalDecl GD,
Qualifiers MethodQuals = Method->getMethodQualifiers();
// We do not consider restrict a distinguishing attribute for overloading
// purposes so we must not mangle it.
+ if (Method->isExplicitObjectMemberFunction())
+ Out << 'H';
MethodQuals.removeRestrict();
mangleQualifiers(MethodQuals);
mangleRefQualifier(Method->getRefQualifier());
@@ -1676,20 +1796,19 @@ void CXXNameMangler::mangleNestedName(GlobalDecl GD,
mangleTemplateArgs(asTemplateName(TD), *TemplateArgs);
} else {
manglePrefix(DC, NoFunction);
- mangleUnqualifiedName(GD, AdditionalAbiTags);
+ mangleUnqualifiedName(GD, DC, AdditionalAbiTags);
}
Out << 'E';
}
void CXXNameMangler::mangleNestedName(const TemplateDecl *TD,
- const TemplateArgument *TemplateArgs,
- unsigned NumTemplateArgs) {
+ ArrayRef<TemplateArgument> Args) {
// <nested-name> ::= N [<CV-qualifiers>] <template-prefix> <template-args> E
Out << 'N';
mangleTemplatePrefix(TD);
- mangleTemplateArgs(asTemplateName(TD), TemplateArgs, NumTemplateArgs);
+ mangleTemplateArgs(asTemplateName(TD), Args);
Out << 'E';
}
@@ -1706,7 +1825,7 @@ void CXXNameMangler::mangleNestedNameWithClosurePrefix(
Out << 'N';
mangleClosurePrefix(PrefixND);
- mangleUnqualifiedName(GD, AdditionalAbiTags);
+ mangleUnqualifiedName(GD, nullptr, AdditionalAbiTags);
Out << 'E';
}
@@ -1736,7 +1855,7 @@ void CXXNameMangler::mangleLocalName(GlobalDecl GD,
// <discriminator> := _ <non-negative number>
assert(isa<NamedDecl>(D) || isa<BlockDecl>(D));
const RecordDecl *RD = GetLocalClassDecl(D);
- const DeclContext *DC = getEffectiveDeclContext(RD ? RD : D);
+ const DeclContext *DC = Context.getEffectiveDeclContext(RD ? RD : D);
Out << 'Z';
@@ -1784,18 +1903,18 @@ void CXXNameMangler::mangleLocalName(GlobalDecl GD,
// Mangle the name relative to the closest enclosing function.
// equality ok because RD derived from ND above
if (D == RD) {
- mangleUnqualifiedName(RD, AdditionalAbiTags);
+ mangleUnqualifiedName(RD, DC, AdditionalAbiTags);
} else if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) {
if (const NamedDecl *PrefixND = getClosurePrefix(BD))
mangleClosurePrefix(PrefixND, true /*NoFunction*/);
else
- manglePrefix(getEffectiveDeclContext(BD), true /*NoFunction*/);
+ manglePrefix(Context.getEffectiveDeclContext(BD), true /*NoFunction*/);
assert(!AdditionalAbiTags && "Block cannot have additional abi tags");
mangleUnqualifiedBlock(BD);
} else {
const NamedDecl *ND = cast<NamedDecl>(D);
- mangleNestedName(GD, getEffectiveDeclContext(ND), AdditionalAbiTags,
- true /*NoFunction*/);
+ mangleNestedName(GD, Context.getEffectiveDeclContext(ND),
+ AdditionalAbiTags, true /*NoFunction*/);
}
} else if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) {
// Mangle a block in a default parameter; see above explanation for
@@ -1815,7 +1934,7 @@ void CXXNameMangler::mangleLocalName(GlobalDecl GD,
assert(!AdditionalAbiTags && "Block cannot have additional abi tags");
mangleUnqualifiedBlock(BD);
} else {
- mangleUnqualifiedName(GD, AdditionalAbiTags);
+ mangleUnqualifiedName(GD, DC, AdditionalAbiTags);
}
if (const NamedDecl *ND = dyn_cast<NamedDecl>(RD ? RD : D)) {
@@ -1834,7 +1953,7 @@ void CXXNameMangler::mangleBlockForPrefix(const BlockDecl *Block) {
mangleLocalName(Block, /* AdditionalAbiTags */ nullptr);
return;
}
- const DeclContext *DC = getEffectiveDeclContext(Block);
+ const DeclContext *DC = Context.getEffectiveDeclContext(Block);
if (isLocalContainerContext(DC)) {
mangleLocalName(Block, /* AdditionalAbiTags */ nullptr);
return;
@@ -1850,8 +1969,7 @@ void CXXNameMangler::mangleUnqualifiedBlock(const BlockDecl *Block) {
// When trying to be ABI-compatibility with clang 12 and before, mangle a
// <data-member-prefix> now, with no substitutions and no <template-args>.
if (Decl *Context = Block->getBlockManglingContextDecl()) {
- if (getASTContext().getLangOpts().getClangABICompat() <=
- LangOptions::ClangABI::Ver12 &&
+ if (isCompatibleWith(LangOptions::ClangABI::Ver12) &&
(isa<VarDecl>(Context) || isa<FieldDecl>(Context)) &&
Context->getDeclContext()->isRecord()) {
const auto *ND = cast<NamedDecl>(Context);
@@ -1879,15 +1997,25 @@ void CXXNameMangler::mangleUnqualifiedBlock(const BlockDecl *Block) {
}
// <template-param-decl>
-// ::= Ty # template type parameter
-// ::= Tn <type> # template non-type parameter
-// ::= Tt <template-param-decl>* E # template template parameter
-// ::= Tp <template-param-decl> # template parameter pack
+// ::= Ty # template type parameter
+// ::= Tk <concept name> [<template-args>] # constrained type parameter
+// ::= Tn <type> # template non-type parameter
+// ::= Tt <template-param-decl>* E [Q <requires-clause expr>]
+// # template template parameter
+// ::= Tp <template-param-decl> # template parameter pack
void CXXNameMangler::mangleTemplateParamDecl(const NamedDecl *Decl) {
+ // Proposed on https://github.com/itanium-cxx-abi/cxx-abi/issues/47.
if (auto *Ty = dyn_cast<TemplateTypeParmDecl>(Decl)) {
if (Ty->isParameterPack())
Out << "Tp";
- Out << "Ty";
+ const TypeConstraint *Constraint = Ty->getTypeConstraint();
+ if (Constraint && !isCompatibleWith(LangOptions::ClangABI::Ver17)) {
+ // Proposed on https://github.com/itanium-cxx-abi/cxx-abi/issues/24.
+ Out << "Tk";
+ mangleTypeConstraint(Constraint);
+ } else {
+ Out << "Ty";
+ }
} else if (auto *Tn = dyn_cast<NonTypeTemplateParmDecl>(Decl)) {
if (Tn->isExpandedParameterPack()) {
for (unsigned I = 0, N = Tn->getNumExpansionTypes(); I != N; ++I) {
@@ -1907,29 +2035,59 @@ void CXXNameMangler::mangleTemplateParamDecl(const NamedDecl *Decl) {
} else if (auto *Tt = dyn_cast<TemplateTemplateParmDecl>(Decl)) {
if (Tt->isExpandedParameterPack()) {
for (unsigned I = 0, N = Tt->getNumExpansionTemplateParameters(); I != N;
- ++I) {
- Out << "Tt";
- for (auto *Param : *Tt->getExpansionTemplateParameters(I))
- mangleTemplateParamDecl(Param);
- Out << "E";
- }
+ ++I)
+ mangleTemplateParameterList(Tt->getExpansionTemplateParameters(I));
} else {
if (Tt->isParameterPack())
Out << "Tp";
- Out << "Tt";
- for (auto *Param : *Tt->getTemplateParameters())
- mangleTemplateParamDecl(Param);
- Out << "E";
+ mangleTemplateParameterList(Tt->getTemplateParameters());
}
}
}
+void CXXNameMangler::mangleTemplateParameterList(
+ const TemplateParameterList *Params) {
+ Out << "Tt";
+ for (auto *Param : *Params)
+ mangleTemplateParamDecl(Param);
+ mangleRequiresClause(Params->getRequiresClause());
+ Out << "E";
+}
+
+void CXXNameMangler::mangleTypeConstraint(
+ const ConceptDecl *Concept, ArrayRef<TemplateArgument> Arguments) {
+ const DeclContext *DC = Context.getEffectiveDeclContext(Concept);
+ if (!Arguments.empty())
+ mangleTemplateName(Concept, Arguments);
+ else if (DC->isTranslationUnit() || isStdNamespace(DC))
+ mangleUnscopedName(Concept, DC, nullptr);
+ else
+ mangleNestedName(Concept, DC, nullptr);
+}
+
+void CXXNameMangler::mangleTypeConstraint(const TypeConstraint *Constraint) {
+ llvm::SmallVector<TemplateArgument, 8> Args;
+ if (Constraint->getTemplateArgsAsWritten()) {
+ for (const TemplateArgumentLoc &ArgLoc :
+ Constraint->getTemplateArgsAsWritten()->arguments())
+ Args.push_back(ArgLoc.getArgument());
+ }
+ return mangleTypeConstraint(Constraint->getNamedConcept(), Args);
+}
+
+void CXXNameMangler::mangleRequiresClause(const Expr *RequiresClause) {
+ // Proposed on https://github.com/itanium-cxx-abi/cxx-abi/issues/24.
+ if (RequiresClause && !isCompatibleWith(LangOptions::ClangABI::Ver17)) {
+ Out << 'Q';
+ mangleExpression(RequiresClause);
+ }
+}
+
void CXXNameMangler::mangleLambda(const CXXRecordDecl *Lambda) {
// When trying to be ABI-compatibility with clang 12 and before, mangle a
// <data-member-prefix> now, with no substitutions.
if (Decl *Context = Lambda->getLambdaContextDecl()) {
- if (getASTContext().getLangOpts().getClangABICompat() <=
- LangOptions::ClangABI::Ver12 &&
+ if (isCompatibleWith(LangOptions::ClangABI::Ver12) &&
(isa<VarDecl>(Context) || isa<FieldDecl>(Context)) &&
!isa<ParmVarDecl>(Context)) {
if (const IdentifierInfo *Name
@@ -1958,10 +2116,10 @@ void CXXNameMangler::mangleLambda(const CXXRecordDecl *Lambda) {
// if the host-side CXX ABI has different numbering for lambda. In such case,
// if the mangle context is that device-side one, use the device-side lambda
// mangling number for this lambda.
- llvm::Optional<unsigned> DeviceNumber =
+ std::optional<unsigned> DeviceNumber =
Context.getDiscriminatorOverride()(Context.getASTContext(), Lambda);
- unsigned Number = DeviceNumber.hasValue() ? *DeviceNumber
- : Lambda->getLambdaManglingNumber();
+ unsigned Number =
+ DeviceNumber ? *DeviceNumber : Lambda->getLambdaManglingNumber();
assert(Number > 0 && "Lambda should be mangled as an unnamed class");
if (Number > 1)
@@ -1970,8 +2128,14 @@ void CXXNameMangler::mangleLambda(const CXXRecordDecl *Lambda) {
}
void CXXNameMangler::mangleLambdaSig(const CXXRecordDecl *Lambda) {
+ // Proposed on https://github.com/itanium-cxx-abi/cxx-abi/issues/31.
for (auto *D : Lambda->getLambdaExplicitTemplateParameters())
mangleTemplateParamDecl(D);
+
+ // Proposed on https://github.com/itanium-cxx-abi/cxx-abi/issues/24.
+ if (auto *TPL = Lambda->getGenericLambdaTemplateParameterList())
+ mangleRequiresClause(TPL->getRequiresClause());
+
auto *Proto =
Lambda->getLambdaTypeInfo()->getType()->castAs<FunctionProtoType>();
mangleBareFunctionType(Proto, /*MangleReturnType=*/false,
@@ -2001,12 +2165,20 @@ void CXXNameMangler::manglePrefix(NestedNameSpecifier *qualifier) {
return;
case NestedNameSpecifier::Identifier:
+ // Clang 14 and before did not consider this substitutable.
+ bool Clang14Compat = isCompatibleWith(LangOptions::ClangABI::Ver14);
+ if (!Clang14Compat && mangleSubstitution(qualifier))
+ return;
+
// Member expressions can have these without prefixes, but that
// should end up in mangleUnresolvedPrefix instead.
assert(qualifier->getPrefix());
manglePrefix(qualifier->getPrefix());
mangleSourceName(qualifier->getAsIdentifier());
+
+ if (!Clang14Compat)
+ addSubstitution(qualifier);
return;
}
@@ -2021,7 +2193,7 @@ void CXXNameMangler::manglePrefix(const DeclContext *DC, bool NoFunction) {
// ::= # empty
// ::= <substitution>
- DC = IgnoreLinkageSpecDecls(DC);
+ assert(!isa<LinkageSpecDecl>(DC) && "prefix cannot be LinkageSpecDecl");
if (DC->isTranslationUnit())
return;
@@ -2042,10 +2214,11 @@ void CXXNameMangler::manglePrefix(const DeclContext *DC, bool NoFunction) {
mangleTemplateArgs(asTemplateName(TD), *TemplateArgs);
} else if (const NamedDecl *PrefixND = getClosurePrefix(ND)) {
mangleClosurePrefix(PrefixND, NoFunction);
- mangleUnqualifiedName(ND, nullptr);
+ mangleUnqualifiedName(ND, nullptr, nullptr);
} else {
- manglePrefix(getEffectiveDeclContext(ND), NoFunction);
- mangleUnqualifiedName(ND, nullptr);
+ const DeclContext *DC = Context.getEffectiveDeclContext(ND);
+ manglePrefix(DC, NoFunction);
+ mangleUnqualifiedName(ND, DC, nullptr);
}
addSubstitution(ND);
@@ -2063,8 +2236,7 @@ void CXXNameMangler::mangleTemplatePrefix(TemplateName Template) {
// Clang 11 and before mangled the substitution for a dependent template name
// after already having emitted (a substitution for) the prefix.
- bool Clang11Compat = getASTContext().getLangOpts().getClangABICompat() <=
- LangOptions::ClangABI::Ver11;
+ bool Clang11Compat = isCompatibleWith(LangOptions::ClangABI::Ver11);
if (!Clang11Compat && mangleSubstitution(Template))
return;
@@ -2098,19 +2270,20 @@ void CXXNameMangler::mangleTemplatePrefix(GlobalDecl GD,
if (const auto *TTP = dyn_cast<TemplateTemplateParmDecl>(ND)) {
mangleTemplateParameter(TTP->getDepth(), TTP->getIndex());
} else {
- manglePrefix(getEffectiveDeclContext(ND), NoFunction);
+ const DeclContext *DC = Context.getEffectiveDeclContext(ND);
+ manglePrefix(DC, NoFunction);
if (isa<BuiltinTemplateDecl>(ND) || isa<ConceptDecl>(ND))
- mangleUnqualifiedName(GD, nullptr);
+ mangleUnqualifiedName(GD, DC, nullptr);
else
- mangleUnqualifiedName(GD.getWithDecl(ND->getTemplatedDecl()), nullptr);
+ mangleUnqualifiedName(GD.getWithDecl(ND->getTemplatedDecl()), DC,
+ nullptr);
}
addSubstitution(ND);
}
const NamedDecl *CXXNameMangler::getClosurePrefix(const Decl *ND) {
- if (getASTContext().getLangOpts().getClangABICompat() <=
- LangOptions::ClangABI::Ver12)
+ if (isCompatibleWith(LangOptions::ClangABI::Ver12))
return nullptr;
const NamedDecl *Context = nullptr;
@@ -2143,8 +2316,9 @@ void CXXNameMangler::mangleClosurePrefix(const NamedDecl *ND, bool NoFunction) {
mangleTemplatePrefix(TD, NoFunction);
mangleTemplateArgs(asTemplateName(TD), *TemplateArgs);
} else {
- manglePrefix(getEffectiveDeclContext(ND), NoFunction);
- mangleUnqualifiedName(ND, nullptr);
+ const auto *DC = Context.getEffectiveDeclContext(ND);
+ manglePrefix(DC, NoFunction);
+ mangleUnqualifiedName(ND, DC, nullptr);
}
Out << 'M';
@@ -2165,9 +2339,7 @@ void CXXNameMangler::mangleType(TemplateName TN) {
switch (TN.getKind()) {
case TemplateName::QualifiedTemplate:
- TD = TN.getAsQualifiedTemplateName()->getTemplateDecl();
- goto HaveDecl;
-
+ case TemplateName::UsingTemplate:
case TemplateName::Template:
TD = TN.getAsTemplateDecl();
goto HaveDecl;
@@ -2246,6 +2418,7 @@ bool CXXNameMangler::mangleUnresolvedTypeOrSimpleId(QualType Ty,
case Type::FunctionNoProto:
case Type::Paren:
case Type::Attributed:
+ case Type::BTFTagAttributed:
case Type::Auto:
case Type::DeducedTemplateSpecialization:
case Type::PackExpansion:
@@ -2256,8 +2429,8 @@ bool CXXNameMangler::mangleUnresolvedTypeOrSimpleId(QualType Ty,
case Type::Atomic:
case Type::Pipe:
case Type::MacroQualified:
- case Type::ExtInt:
- case Type::DependentExtInt:
+ case Type::BitInt:
+ case Type::DependentBitInt:
llvm_unreachable("type is illegal as a nested name specifier");
case Type::SubstTemplateTypeParmPack:
@@ -2343,6 +2516,12 @@ bool CXXNameMangler::mangleUnresolvedTypeOrSimpleId(QualType Ty,
Out << "_SUBSTPACK_";
break;
}
+ case TemplateName::UsingTemplate: {
+ TemplateDecl *TD = TN.getAsTemplateDecl();
+ assert(TD && !isa<TemplateTemplateParmDecl>(TD));
+ mangleSourceNameWithAbiTags(TD);
+ break;
+ }
}
// Note: we don't pass in the template name here. We are mangling the
@@ -2350,7 +2529,7 @@ bool CXXNameMangler::mangleUnresolvedTypeOrSimpleId(QualType Ty,
// conversions to the corresponding template parameter.
// FIXME: Other compilers mangle partially-resolved template arguments in
// unresolved-qualifier-levels.
- mangleTemplateArgs(TemplateName(), TST->getArgs(), TST->getNumArgs());
+ mangleTemplateArgs(TemplateName(), TST->template_arguments());
break;
}
@@ -2369,10 +2548,13 @@ bool CXXNameMangler::mangleUnresolvedTypeOrSimpleId(QualType Ty,
TemplateName Template = getASTContext().getDependentTemplateName(
DTST->getQualifier(), DTST->getIdentifier());
mangleSourceName(DTST->getIdentifier());
- mangleTemplateArgs(Template, DTST->getArgs(), DTST->getNumArgs());
+ mangleTemplateArgs(Template, DTST->template_arguments());
break;
}
+ case Type::Using:
+ return mangleUnresolvedTypeOrSimpleId(cast<UsingType>(Ty)->desugar(),
+ Prefix);
case Type::Elaborated:
return mangleUnresolvedTypeOrSimpleId(
cast<ElaboratedType>(Ty)->getNamedType(), Prefix);
@@ -2708,6 +2890,10 @@ static bool isTypeSubstitutable(Qualifiers Quals, const Type *Ty,
return true;
if (Ty->isOpenCLSpecificType())
return true;
+ // From Clang 18.0 we correctly treat SVE types as substitution candidates.
+ if (Ty->isSVESizelessBuiltinType() &&
+ Ctx.getLangOpts().getClangABICompat() > LangOptions::ClangABI::Ver17)
+ return true;
if (Ty->isBuiltinType())
return false;
// Through to Clang 6.0, we accidentally treated undeduced auto types as
@@ -2860,6 +3046,7 @@ void CXXNameMangler::mangleType(const BuiltinType *T) {
// ::= d # double
// ::= e # long double, __float80
// ::= g # __float128
+ // ::= g # __ibm128
// UNSUPPORTED: ::= Dd # IEEE 754r decimal floating point (64 bits)
// UNSUPPORTED: ::= De # IEEE 754r decimal floating point (128 bits)
// UNSUPPORTED: ::= Df # IEEE 754r decimal floating point (32 bits)
@@ -2868,8 +3055,97 @@ void CXXNameMangler::mangleType(const BuiltinType *T) {
// ::= Di # char32_t
// ::= Ds # char16_t
// ::= Dn # std::nullptr_t (i.e., decltype(nullptr))
+ // ::= [DS] DA # N1169 fixed-point [_Sat] T _Accum
+ // ::= [DS] DR # N1169 fixed-point [_Sat] T _Fract
// ::= u <source-name> # vendor extended type
+ //
+ // <fixed-point-size>
+ // ::= s # short
+ // ::= t # unsigned short
+ // ::= i # plain
+ // ::= j # unsigned
+ // ::= l # long
+ // ::= m # unsigned long
std::string type_name;
+ // Normalize integer types as vendor extended types:
+ // u<length>i<type size>
+ // u<length>u<type size>
+ if (NormalizeIntegers && T->isInteger()) {
+ if (T->isSignedInteger()) {
+ switch (getASTContext().getTypeSize(T)) {
+ case 8:
+ // Pick a representative for each integer size in the substitution
+ // dictionary. (Its actual defined size is not relevant.)
+ if (mangleSubstitution(BuiltinType::SChar))
+ break;
+ Out << "u2i8";
+ addSubstitution(BuiltinType::SChar);
+ break;
+ case 16:
+ if (mangleSubstitution(BuiltinType::Short))
+ break;
+ Out << "u3i16";
+ addSubstitution(BuiltinType::Short);
+ break;
+ case 32:
+ if (mangleSubstitution(BuiltinType::Int))
+ break;
+ Out << "u3i32";
+ addSubstitution(BuiltinType::Int);
+ break;
+ case 64:
+ if (mangleSubstitution(BuiltinType::Long))
+ break;
+ Out << "u3i64";
+ addSubstitution(BuiltinType::Long);
+ break;
+ case 128:
+ if (mangleSubstitution(BuiltinType::Int128))
+ break;
+ Out << "u4i128";
+ addSubstitution(BuiltinType::Int128);
+ break;
+ default:
+ llvm_unreachable("Unknown integer size for normalization");
+ }
+ } else {
+ switch (getASTContext().getTypeSize(T)) {
+ case 8:
+ if (mangleSubstitution(BuiltinType::UChar))
+ break;
+ Out << "u2u8";
+ addSubstitution(BuiltinType::UChar);
+ break;
+ case 16:
+ if (mangleSubstitution(BuiltinType::UShort))
+ break;
+ Out << "u3u16";
+ addSubstitution(BuiltinType::UShort);
+ break;
+ case 32:
+ if (mangleSubstitution(BuiltinType::UInt))
+ break;
+ Out << "u3u32";
+ addSubstitution(BuiltinType::UInt);
+ break;
+ case 64:
+ if (mangleSubstitution(BuiltinType::ULong))
+ break;
+ Out << "u3u64";
+ addSubstitution(BuiltinType::ULong);
+ break;
+ case 128:
+ if (mangleSubstitution(BuiltinType::UInt128))
+ break;
+ Out << "u4u128";
+ addSubstitution(BuiltinType::UInt128);
+ break;
+ default:
+ llvm_unreachable("Unknown integer size for normalization");
+ }
+ }
+ return;
+ }
switch (T->getKind()) {
case BuiltinType::Void:
Out << 'v';
@@ -2934,30 +3210,77 @@ void CXXNameMangler::mangleType(const BuiltinType *T) {
Out << "DF16_";
break;
case BuiltinType::ShortAccum:
+ Out << "DAs";
+ break;
case BuiltinType::Accum:
+ Out << "DAi";
+ break;
case BuiltinType::LongAccum:
+ Out << "DAl";
+ break;
case BuiltinType::UShortAccum:
+ Out << "DAt";
+ break;
case BuiltinType::UAccum:
+ Out << "DAj";
+ break;
case BuiltinType::ULongAccum:
+ Out << "DAm";
+ break;
case BuiltinType::ShortFract:
+ Out << "DRs";
+ break;
case BuiltinType::Fract:
+ Out << "DRi";
+ break;
case BuiltinType::LongFract:
+ Out << "DRl";
+ break;
case BuiltinType::UShortFract:
+ Out << "DRt";
+ break;
case BuiltinType::UFract:
+ Out << "DRj";
+ break;
case BuiltinType::ULongFract:
+ Out << "DRm";
+ break;
case BuiltinType::SatShortAccum:
+ Out << "DSDAs";
+ break;
case BuiltinType::SatAccum:
+ Out << "DSDAi";
+ break;
case BuiltinType::SatLongAccum:
+ Out << "DSDAl";
+ break;
case BuiltinType::SatUShortAccum:
+ Out << "DSDAt";
+ break;
case BuiltinType::SatUAccum:
+ Out << "DSDAj";
+ break;
case BuiltinType::SatULongAccum:
+ Out << "DSDAm";
+ break;
case BuiltinType::SatShortFract:
+ Out << "DSDRs";
+ break;
case BuiltinType::SatFract:
+ Out << "DSDRi";
+ break;
case BuiltinType::SatLongFract:
+ Out << "DSDRl";
+ break;
case BuiltinType::SatUShortFract:
+ Out << "DSDRt";
+ break;
case BuiltinType::SatUFract:
+ Out << "DSDRj";
+ break;
case BuiltinType::SatULongFract:
- llvm_unreachable("Fixed point types are disabled for c++");
+ Out << "DSDRm";
+ break;
case BuiltinType::Half:
Out << "Dh";
break;
@@ -2968,26 +3291,38 @@ void CXXNameMangler::mangleType(const BuiltinType *T) {
Out << 'd';
break;
case BuiltinType::LongDouble: {
- const TargetInfo *TI = getASTContext().getLangOpts().OpenMP &&
- getASTContext().getLangOpts().OpenMPIsDevice
- ? getASTContext().getAuxTargetInfo()
- : &getASTContext().getTargetInfo();
+ const TargetInfo *TI =
+ getASTContext().getLangOpts().OpenMP &&
+ getASTContext().getLangOpts().OpenMPIsTargetDevice
+ ? getASTContext().getAuxTargetInfo()
+ : &getASTContext().getTargetInfo();
Out << TI->getLongDoubleMangling();
break;
}
case BuiltinType::Float128: {
- const TargetInfo *TI = getASTContext().getLangOpts().OpenMP &&
- getASTContext().getLangOpts().OpenMPIsDevice
- ? getASTContext().getAuxTargetInfo()
- : &getASTContext().getTargetInfo();
+ const TargetInfo *TI =
+ getASTContext().getLangOpts().OpenMP &&
+ getASTContext().getLangOpts().OpenMPIsTargetDevice
+ ? getASTContext().getAuxTargetInfo()
+ : &getASTContext().getTargetInfo();
Out << TI->getFloat128Mangling();
break;
}
case BuiltinType::BFloat16: {
- const TargetInfo *TI = &getASTContext().getTargetInfo();
+ const TargetInfo *TI =
+ ((getASTContext().getLangOpts().OpenMP &&
+ getASTContext().getLangOpts().OpenMPIsTargetDevice) ||
+ getASTContext().getLangOpts().SYCLIsDevice)
+ ? getASTContext().getAuxTargetInfo()
+ : &getASTContext().getTargetInfo();
Out << TI->getBFloat16Mangling();
break;
}
+ case BuiltinType::Ibm128: {
+ const TargetInfo *TI = &getASTContext().getTargetInfo();
+ Out << TI->getIbm128Mangling();
+ break;
+ }
case BuiltinType::NullPtr:
Out << "Dn";
break;
@@ -3042,11 +3377,24 @@ void CXXNameMangler::mangleType(const BuiltinType *T) {
#define SVE_VECTOR_TYPE(InternalName, MangledName, Id, SingletonId, NumEls, \
ElBits, IsSigned, IsFP, IsBF) \
case BuiltinType::Id: \
+ if (T->getKind() == BuiltinType::SveBFloat16 && \
+ isCompatibleWith(LangOptions::ClangABI::Ver17)) { \
+ /* Prior to Clang 18.0 we used this incorrect mangled name */ \
+ type_name = "__SVBFloat16_t"; \
+ Out << "u" << type_name.size() << type_name; \
+ } else { \
+ type_name = MangledName; \
+ Out << (type_name == InternalName ? "u" : "") << type_name.size() \
+ << type_name; \
+ } \
+ break;
+#define SVE_PREDICATE_TYPE(InternalName, MangledName, Id, SingletonId, NumEls) \
+ case BuiltinType::Id: \
type_name = MangledName; \
Out << (type_name == InternalName ? "u" : "") << type_name.size() \
<< type_name; \
break;
-#define SVE_PREDICATE_TYPE(InternalName, MangledName, Id, SingletonId, NumEls) \
+#define SVE_OPAQUE_TYPE(InternalName, MangledName, Id, SingletonId) \
case BuiltinType::Id: \
type_name = MangledName; \
Out << (type_name == InternalName ? "u" : "") << type_name.size() \
@@ -3066,6 +3414,12 @@ void CXXNameMangler::mangleType(const BuiltinType *T) {
Out << 'u' << type_name.size() << type_name; \
break;
#include "clang/Basic/RISCVVTypes.def"
+#define WASM_REF_TYPE(InternalName, MangledName, Id, SingletonId, AS) \
+ case BuiltinType::Id: \
+ type_name = MangledName; \
+ Out << 'u' << type_name.size() << type_name; \
+ break;
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
}
}
@@ -3080,11 +3434,14 @@ StringRef CXXNameMangler::getCallingConvQualifierName(CallingConv CC) {
case CC_AAPCS:
case CC_AAPCS_VFP:
case CC_AArch64VectorCall:
+ case CC_AArch64SVEPCS:
+ case CC_AMDGPUKernelCall:
case CC_IntelOclBicc:
case CC_SpirFunction:
case CC_OpenCLKernel:
case CC_PreserveMost:
case CC_PreserveAll:
+ case CC_M68kRTD:
// FIXME: we should be mangling all of the above.
return "";
@@ -3242,39 +3599,42 @@ void CXXNameMangler::mangleBareFunctionType(const FunctionProtoType *Proto,
if (Proto->getNumParams() == 0 && !Proto->isVariadic()) {
// <builtin-type> ::= v # void
Out << 'v';
+ } else {
+ assert(!FD || FD->getNumParams() == Proto->getNumParams());
+ for (unsigned I = 0, E = Proto->getNumParams(); I != E; ++I) {
+ // Mangle extended parameter info as order-sensitive qualifiers here.
+ if (Proto->hasExtParameterInfos() && FD == nullptr) {
+ mangleExtParameterInfo(Proto->getExtParameterInfo(I));
+ }
- FunctionTypeDepth.pop(saved);
- return;
- }
-
- assert(!FD || FD->getNumParams() == Proto->getNumParams());
- for (unsigned I = 0, E = Proto->getNumParams(); I != E; ++I) {
- // Mangle extended parameter info as order-sensitive qualifiers here.
- if (Proto->hasExtParameterInfos() && FD == nullptr) {
- mangleExtParameterInfo(Proto->getExtParameterInfo(I));
+ // Mangle the type.
+ QualType ParamTy = Proto->getParamType(I);
+ mangleType(Context.getASTContext().getSignatureParameterType(ParamTy));
+
+ if (FD) {
+ if (auto *Attr = FD->getParamDecl(I)->getAttr<PassObjectSizeAttr>()) {
+ // Attr can only take 1 character, so we can hardcode the length
+ // below.
+ assert(Attr->getType() <= 9 && Attr->getType() >= 0);
+ if (Attr->isDynamic())
+ Out << "U25pass_dynamic_object_size" << Attr->getType();
+ else
+ Out << "U17pass_object_size" << Attr->getType();
+ }
+ }
}
- // Mangle the type.
- QualType ParamTy = Proto->getParamType(I);
- mangleType(Context.getASTContext().getSignatureParameterType(ParamTy));
+ // <builtin-type> ::= z # ellipsis
+ if (Proto->isVariadic())
+ Out << 'z';
+ }
- if (FD) {
- if (auto *Attr = FD->getParamDecl(I)->getAttr<PassObjectSizeAttr>()) {
- // Attr can only take 1 character, so we can hardcode the length below.
- assert(Attr->getType() <= 9 && Attr->getType() >= 0);
- if (Attr->isDynamic())
- Out << "U25pass_dynamic_object_size" << Attr->getType();
- else
- Out << "U17pass_object_size" << Attr->getType();
- }
- }
+ if (FD) {
+ FunctionTypeDepth.enterResultType();
+ mangleRequiresClause(FD->getTrailingRequiresClause());
}
FunctionTypeDepth.pop(saved);
-
- // <builtin-type> ::= z # ellipsis
- if (Proto->isVariadic())
- Out << 'z';
}
// <type> ::= <class-enum-type>
@@ -3404,7 +3764,7 @@ void CXXNameMangler::mangleNeonVectorType(const VectorType *T) {
QualType EltType = T->getElementType();
assert(EltType->isBuiltinType() && "Neon vector element not a BuiltinType");
const char *EltName = nullptr;
- if (T->getVectorKind() == VectorType::NeonPolyVector) {
+ if (T->getVectorKind() == VectorKind::NeonPoly) {
switch (cast<BuiltinType>(EltType)->getKind()) {
case BuiltinType::SChar:
case BuiltinType::UChar:
@@ -3506,7 +3866,7 @@ void CXXNameMangler::mangleAArch64NeonVectorType(const VectorType *T) {
"Neon vector type not 64 or 128 bits");
StringRef EltName;
- if (T->getVectorKind() == VectorType::NeonPolyVector) {
+ if (T->getVectorKind() == VectorKind::NeonPoly) {
switch (cast<BuiltinType>(EltType)->getKind()) {
case BuiltinType::UChar:
EltName = "Poly8";
@@ -3559,10 +3919,10 @@ void CXXNameMangler::mangleAArch64NeonVectorType(const DependentVectorType *T) {
// mangling scheme, it will be specified in the next revision. The mangling
// scheme is otherwise defined in the appendices to the Procedure Call Standard
// for the Arm Architecture, see
-// https://github.com/ARM-software/abi-aa/blob/master/aapcs64/aapcs64.rst#appendix-c-mangling
+// https://github.com/ARM-software/abi-aa/blob/main/aapcs64/aapcs64.rst#appendix-c-mangling
void CXXNameMangler::mangleAArch64FixedSveVectorType(const VectorType *T) {
- assert((T->getVectorKind() == VectorType::SveFixedLengthDataVector ||
- T->getVectorKind() == VectorType::SveFixedLengthPredicateVector) &&
+ assert((T->getVectorKind() == VectorKind::SveFixedLengthData ||
+ T->getVectorKind() == VectorKind::SveFixedLengthPredicate) &&
"expected fixed-length SVE vector!");
QualType EltType = T->getElementType();
@@ -3575,7 +3935,7 @@ void CXXNameMangler::mangleAArch64FixedSveVectorType(const VectorType *T) {
TypeName = "__SVInt8_t";
break;
case BuiltinType::UChar: {
- if (T->getVectorKind() == VectorType::SveFixedLengthDataVector)
+ if (T->getVectorKind() == VectorKind::SveFixedLengthData)
TypeName = "__SVUint8_t";
else
TypeName = "__SVBool_t";
@@ -3617,7 +3977,7 @@ void CXXNameMangler::mangleAArch64FixedSveVectorType(const VectorType *T) {
unsigned VecSizeInBits = getASTContext().getTypeInfo(T).Width;
- if (T->getVectorKind() == VectorType::SveFixedLengthPredicateVector)
+ if (T->getVectorKind() == VectorKind::SveFixedLengthPredicate)
VecSizeInBits *= 8;
Out << "9__SVE_VLSI" << 'u' << TypeName.size() << TypeName << "Lj"
@@ -3633,6 +3993,90 @@ void CXXNameMangler::mangleAArch64FixedSveVectorType(
Diags.Report(T->getAttributeLoc(), DiagID);
}
+void CXXNameMangler::mangleRISCVFixedRVVVectorType(const VectorType *T) {
+ assert((T->getVectorKind() == VectorKind::RVVFixedLengthData ||
+ T->getVectorKind() == VectorKind::RVVFixedLengthMask) &&
+ "expected fixed-length RVV vector!");
+
+ QualType EltType = T->getElementType();
+ assert(EltType->isBuiltinType() &&
+ "expected builtin type for fixed-length RVV vector!");
+
+ SmallString<20> TypeNameStr;
+ llvm::raw_svector_ostream TypeNameOS(TypeNameStr);
+ TypeNameOS << "__rvv_";
+ switch (cast<BuiltinType>(EltType)->getKind()) {
+ case BuiltinType::SChar:
+ TypeNameOS << "int8";
+ break;
+ case BuiltinType::UChar:
+ if (T->getVectorKind() == VectorKind::RVVFixedLengthData)
+ TypeNameOS << "uint8";
+ else
+ TypeNameOS << "bool";
+ break;
+ case BuiltinType::Short:
+ TypeNameOS << "int16";
+ break;
+ case BuiltinType::UShort:
+ TypeNameOS << "uint16";
+ break;
+ case BuiltinType::Int:
+ TypeNameOS << "int32";
+ break;
+ case BuiltinType::UInt:
+ TypeNameOS << "uint32";
+ break;
+ case BuiltinType::Long:
+ TypeNameOS << "int64";
+ break;
+ case BuiltinType::ULong:
+ TypeNameOS << "uint64";
+ break;
+ case BuiltinType::Float16:
+ TypeNameOS << "float16";
+ break;
+ case BuiltinType::Float:
+ TypeNameOS << "float32";
+ break;
+ case BuiltinType::Double:
+ TypeNameOS << "float64";
+ break;
+ default:
+ llvm_unreachable("unexpected element type for fixed-length RVV vector!");
+ }
+
+ unsigned VecSizeInBits = getASTContext().getTypeInfo(T).Width;
+
+ // Apend the LMUL suffix.
+ auto VScale = getASTContext().getTargetInfo().getVScaleRange(
+ getASTContext().getLangOpts());
+ unsigned VLen = VScale->first * llvm::RISCV::RVVBitsPerBlock;
+
+ if (T->getVectorKind() == VectorKind::RVVFixedLengthData) {
+ TypeNameOS << 'm';
+ if (VecSizeInBits >= VLen)
+ TypeNameOS << (VecSizeInBits / VLen);
+ else
+ TypeNameOS << 'f' << (VLen / VecSizeInBits);
+ } else {
+ TypeNameOS << (VLen / VecSizeInBits);
+ }
+ TypeNameOS << "_t";
+
+ Out << "9__RVV_VLSI" << 'u' << TypeNameStr.size() << TypeNameStr << "Lj"
+ << VecSizeInBits << "EE";
+}
+
+void CXXNameMangler::mangleRISCVFixedRVVVectorType(
+ const DependentVectorType *T) {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(
+ DiagnosticsEngine::Error,
+ "cannot mangle this dependent fixed-length RVV vector type yet");
+ Diags.Report(T->getAttributeLoc(), DiagID);
+}
+
// GNU extension: vector types
// <type> ::= <vector-type>
// <vector-type> ::= Dv <positive dimension number> _
@@ -3642,8 +4086,8 @@ void CXXNameMangler::mangleAArch64FixedSveVectorType(
// ::= p # AltiVec vector pixel
// ::= b # Altivec vector bool
void CXXNameMangler::mangleType(const VectorType *T) {
- if ((T->getVectorKind() == VectorType::NeonVector ||
- T->getVectorKind() == VectorType::NeonPolyVector)) {
+ if ((T->getVectorKind() == VectorKind::Neon ||
+ T->getVectorKind() == VectorKind::NeonPoly)) {
llvm::Triple Target = getASTContext().getTargetInfo().getTriple();
llvm::Triple::ArchType Arch =
getASTContext().getTargetInfo().getTriple().getArch();
@@ -3653,23 +4097,27 @@ void CXXNameMangler::mangleType(const VectorType *T) {
else
mangleNeonVectorType(T);
return;
- } else if (T->getVectorKind() == VectorType::SveFixedLengthDataVector ||
- T->getVectorKind() == VectorType::SveFixedLengthPredicateVector) {
+ } else if (T->getVectorKind() == VectorKind::SveFixedLengthData ||
+ T->getVectorKind() == VectorKind::SveFixedLengthPredicate) {
mangleAArch64FixedSveVectorType(T);
return;
+ } else if (T->getVectorKind() == VectorKind::RVVFixedLengthData ||
+ T->getVectorKind() == VectorKind::RVVFixedLengthMask) {
+ mangleRISCVFixedRVVVectorType(T);
+ return;
}
Out << "Dv" << T->getNumElements() << '_';
- if (T->getVectorKind() == VectorType::AltiVecPixel)
+ if (T->getVectorKind() == VectorKind::AltiVecPixel)
Out << 'p';
- else if (T->getVectorKind() == VectorType::AltiVecBool)
+ else if (T->getVectorKind() == VectorKind::AltiVecBool)
Out << 'b';
else
mangleType(T->getElementType());
}
void CXXNameMangler::mangleType(const DependentVectorType *T) {
- if ((T->getVectorKind() == VectorType::NeonVector ||
- T->getVectorKind() == VectorType::NeonPolyVector)) {
+ if ((T->getVectorKind() == VectorKind::Neon ||
+ T->getVectorKind() == VectorKind::NeonPoly)) {
llvm::Triple Target = getASTContext().getTargetInfo().getTriple();
llvm::Triple::ArchType Arch =
getASTContext().getTargetInfo().getTriple().getArch();
@@ -3679,18 +4127,21 @@ void CXXNameMangler::mangleType(const DependentVectorType *T) {
else
mangleNeonVectorType(T);
return;
- } else if (T->getVectorKind() == VectorType::SveFixedLengthDataVector ||
- T->getVectorKind() == VectorType::SveFixedLengthPredicateVector) {
+ } else if (T->getVectorKind() == VectorKind::SveFixedLengthData ||
+ T->getVectorKind() == VectorKind::SveFixedLengthPredicate) {
mangleAArch64FixedSveVectorType(T);
return;
+ } else if (T->getVectorKind() == VectorKind::RVVFixedLengthData) {
+ mangleRISCVFixedRVVVectorType(T);
+ return;
}
Out << "Dv";
mangleExpression(T->getSizeExpr());
Out << '_';
- if (T->getVectorKind() == VectorType::AltiVecPixel)
+ if (T->getVectorKind() == VectorKind::AltiVecPixel)
Out << 'p';
- else if (T->getVectorKind() == VectorType::AltiVecBool)
+ else if (T->getVectorKind() == VectorKind::AltiVecBool)
Out << 'b';
else
mangleType(T->getElementType());
@@ -3797,7 +4248,7 @@ void CXXNameMangler::mangleType(const InjectedClassNameType *T) {
void CXXNameMangler::mangleType(const TemplateSpecializationType *T) {
if (TemplateDecl *TD = T->getTemplateName().getAsTemplateDecl()) {
- mangleTemplateName(TD, T->getArgs(), T->getNumArgs());
+ mangleTemplateName(TD, T->template_arguments());
} else {
if (mangleSubstitution(QualType(T, 0)))
return;
@@ -3807,7 +4258,7 @@ void CXXNameMangler::mangleType(const TemplateSpecializationType *T) {
// FIXME: GCC does not appear to mangle the template arguments when
// the template in question is a dependent template name. Should we
// emulate that badness?
- mangleTemplateArgs(T->getTemplateName(), T->getArgs(), T->getNumArgs());
+ mangleTemplateArgs(T->getTemplateName(), T->template_arguments());
addSubstitution(QualType(T, 0));
}
}
@@ -3824,20 +4275,20 @@ void CXXNameMangler::mangleType(const DependentNameType *T) {
// ::= Te <name> # dependent elaborated type specifier using
// # 'enum'
switch (T->getKeyword()) {
- case ETK_None:
- case ETK_Typename:
- break;
- case ETK_Struct:
- case ETK_Class:
- case ETK_Interface:
- Out << "Ts";
- break;
- case ETK_Union:
- Out << "Tu";
- break;
- case ETK_Enum:
- Out << "Te";
- break;
+ case ElaboratedTypeKeyword::None:
+ case ElaboratedTypeKeyword::Typename:
+ break;
+ case ElaboratedTypeKeyword::Struct:
+ case ElaboratedTypeKeyword::Class:
+ case ElaboratedTypeKeyword::Interface:
+ Out << "Ts";
+ break;
+ case ElaboratedTypeKeyword::Union:
+ Out << "Tu";
+ break;
+ case ElaboratedTypeKeyword::Enum:
+ Out << "Te";
+ break;
}
// Typename types are always nested
Out << 'N';
@@ -3859,7 +4310,7 @@ void CXXNameMangler::mangleType(const DependentTemplateSpecializationType *T) {
// FIXME: GCC does not appear to mangle the template arguments when
// the template in question is a dependent template name. Should we
// emulate that badness?
- mangleTemplateArgs(Prefix, T->getArgs(), T->getNumArgs());
+ mangleTemplateArgs(Prefix, T->template_arguments());
Out << 'E';
}
@@ -3903,16 +4354,22 @@ void CXXNameMangler::mangleType(const UnaryTransformType *T) {
// If this is dependent, we need to record that. If not, we simply
// mangle it as the underlying type since they are equivalent.
if (T->isDependentType()) {
- Out << 'U';
+ Out << "u";
+ StringRef BuiltinName;
switch (T->getUTTKind()) {
- case UnaryTransformType::EnumUnderlyingType:
- Out << "3eut";
- break;
+#define TRANSFORM_TYPE_TRAIT_DEF(Enum, Trait) \
+ case UnaryTransformType::Enum: \
+ BuiltinName = "__" #Trait; \
+ break;
+#include "clang/Basic/TransformTypeTraits.def"
}
+ Out << BuiltinName.size() << BuiltinName;
}
+ Out << "I";
mangleType(T->getBaseType());
+ Out << "E";
}
void CXXNameMangler::mangleType(const AutoType *T) {
@@ -3922,7 +4379,15 @@ void CXXNameMangler::mangleType(const AutoType *T) {
"shouldn't need to mangle __auto_type!");
// <builtin-type> ::= Da # auto
// ::= Dc # decltype(auto)
- Out << (T->isDecltypeAuto() ? "Dc" : "Da");
+ // ::= Dk # constrained auto
+ // ::= DK # constrained decltype(auto)
+ if (T->isConstrained() && !isCompatibleWith(LangOptions::ClangABI::Ver17)) {
+ Out << (T->isDecltypeAuto() ? "DK" : "Dk");
+ mangleTypeConstraint(T->getTypeConstraintConcept(),
+ T->getTypeConstraintArguments());
+ } else {
+ Out << (T->isDecltypeAuto() ? "Dc" : "Da");
+ }
}
void CXXNameMangler::mangleType(const DeducedTemplateSpecializationType *T) {
@@ -3954,26 +4419,20 @@ void CXXNameMangler::mangleType(const PipeType *T) {
Out << "8ocl_pipe";
}
-void CXXNameMangler::mangleType(const ExtIntType *T) {
- Out << "U7_ExtInt";
- llvm::APSInt BW(32, true);
- BW = T->getNumBits();
- TemplateArgument TA(Context.getASTContext(), BW, getASTContext().IntTy);
- mangleTemplateArgs(TemplateName(), &TA, 1);
- if (T->isUnsigned())
- Out << "j";
- else
- Out << "i";
+void CXXNameMangler::mangleType(const BitIntType *T) {
+ // 5.1.5.2 Builtin types
+ // <type> ::= DB <number | instantiation-dependent expression> _
+ // ::= DU <number | instantiation-dependent expression> _
+ Out << "D" << (T->isUnsigned() ? "U" : "B") << T->getNumBits() << "_";
}
-void CXXNameMangler::mangleType(const DependentExtIntType *T) {
- Out << "U7_ExtInt";
- TemplateArgument TA(T->getNumBitsExpr());
- mangleTemplateArgs(TemplateName(), &TA, 1);
- if (T->isUnsigned())
- Out << "j";
- else
- Out << "i";
+void CXXNameMangler::mangleType(const DependentBitIntType *T) {
+ // 5.1.5.2 Builtin types
+ // <type> ::= DB <number | instantiation-dependent expression> _
+ // ::= DU <number | instantiation-dependent expression> _
+ Out << "D" << (T->isUnsigned() ? "U" : "B");
+ mangleExpression(T->getNumBitsExpr());
+ Out << "_";
}
void CXXNameMangler::mangleIntegerLiteral(QualType T,
@@ -4076,6 +4535,74 @@ void CXXNameMangler::mangleInitListElements(const InitListExpr *InitList) {
mangleExpression(InitList->getInit(i));
}
+void CXXNameMangler::mangleRequirement(SourceLocation RequiresExprLoc,
+ const concepts::Requirement *Req) {
+ using concepts::Requirement;
+
+ // TODO: We can't mangle the result of a failed substitution. It's not clear
+ // whether we should be mangling the original form prior to any substitution
+ // instead. See https://lists.isocpp.org/core/2023/04/14118.php
+ auto HandleSubstitutionFailure =
+ [&](SourceLocation Loc) {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(
+ DiagnosticsEngine::Error, "cannot mangle this requires-expression "
+ "containing a substitution failure");
+ Diags.Report(Loc, DiagID);
+ Out << 'F';
+ };
+
+ switch (Req->getKind()) {
+ case Requirement::RK_Type: {
+ const auto *TR = cast<concepts::TypeRequirement>(Req);
+ if (TR->isSubstitutionFailure())
+ return HandleSubstitutionFailure(
+ TR->getSubstitutionDiagnostic()->DiagLoc);
+
+ Out << 'T';
+ mangleType(TR->getType()->getType());
+ break;
+ }
+
+ case Requirement::RK_Simple:
+ case Requirement::RK_Compound: {
+ const auto *ER = cast<concepts::ExprRequirement>(Req);
+ if (ER->isExprSubstitutionFailure())
+ return HandleSubstitutionFailure(
+ ER->getExprSubstitutionDiagnostic()->DiagLoc);
+
+ Out << 'X';
+ mangleExpression(ER->getExpr());
+
+ if (ER->hasNoexceptRequirement())
+ Out << 'N';
+
+ if (!ER->getReturnTypeRequirement().isEmpty()) {
+ if (ER->getReturnTypeRequirement().isSubstitutionFailure())
+ return HandleSubstitutionFailure(ER->getReturnTypeRequirement()
+ .getSubstitutionDiagnostic()
+ ->DiagLoc);
+
+ Out << 'R';
+ mangleTypeConstraint(ER->getReturnTypeRequirement().getTypeConstraint());
+ }
+ break;
+ }
+
+ case Requirement::RK_Nested:
+ const auto *NR = cast<concepts::NestedRequirement>(Req);
+ if (NR->hasInvalidConstraint()) {
+ // FIXME: NestedRequirement should track the location of its requires
+ // keyword.
+ return HandleSubstitutionFailure(RequiresExprLoc);
+ }
+
+ Out << 'Q';
+ mangleExpression(NR->getConstraintExpr());
+ break;
+ }
+}
+
void CXXNameMangler::mangleExpression(const Expr *E, unsigned Arity,
bool AsTemplateArg) {
// <expression> ::= <unary operator-name> <expression>
@@ -4168,7 +4695,6 @@ recurse:
case Expr::ArrayInitIndexExprClass:
case Expr::NoInitExprClass:
case Expr::ParenListExprClass:
- case Expr::LambdaExprClass:
case Expr::MSPropertyRefExprClass:
case Expr::MSPropertySubscriptExprClass:
case Expr::TypoExprClass: // This should no longer exist in the AST by now.
@@ -4177,6 +4703,7 @@ recurse:
case Expr::OMPArrayShapingExprClass:
case Expr::OMPIteratorExprClass:
case Expr::CXXInheritedCtorInitExprClass:
+ case Expr::CXXParenListInitExprClass:
llvm_unreachable("unexpected statement kind");
case Expr::ConstantExprClass:
@@ -4208,8 +4735,6 @@ recurse:
case Expr::ShuffleVectorExprClass:
case Expr::ConvertVectorExprClass:
case Expr::StmtExprClass:
- case Expr::TypeTraitExprClass:
- case Expr::RequiresExprClass:
case Expr::ArrayTypeTraitExprClass:
case Expr::ExpressionTraitExprClass:
case Expr::VAArgExprClass:
@@ -4238,8 +4763,7 @@ recurse:
const CXXUuidofExpr *UE = cast<CXXUuidofExpr>(E);
// As of clang 12, uuidof uses the vendor extended expression
// mangling. Previously, it used a special-cased nonstandard extension.
- if (Context.getASTContext().getLangOpts().getClangABICompat() >
- LangOptions::ClangABI::Ver11) {
+ if (!isCompatibleWith(LangOptions::ClangABI::Ver11)) {
Out << "u8__uuidof";
if (UE->isTypeOperand())
mangleType(UE->getTypeOperand(Context.getASTContext()));
@@ -4318,9 +4842,23 @@ recurse:
E = cast<CXXStdInitializerListExpr>(E)->getSubExpr();
goto recurse;
- case Expr::SubstNonTypeTemplateParmExprClass:
+ case Expr::SubstNonTypeTemplateParmExprClass: {
+ // Mangle a substituted parameter the same way we mangle the template
+ // argument.
+ auto *SNTTPE = cast<SubstNonTypeTemplateParmExpr>(E);
+ if (auto *CE = dyn_cast<ConstantExpr>(SNTTPE->getReplacement())) {
+ // Pull out the constant value and mangle it as a template argument.
+ QualType ParamType = SNTTPE->getParameterType(Context.getASTContext());
+ assert(CE->hasAPValueResult() && "expected the NTTP to have an APValue");
+ mangleValueInTemplateArg(ParamType, CE->getAPValueResult(), false,
+ /*NeedExactType=*/true);
+ break;
+ }
+ // The remaining cases all happen to be substituted with expressions that
+ // mangle the same as a corresponding template argument anyway.
E = cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement();
goto recurse;
+ }
case Expr::UserDefinedLiteralClass:
// We follow g++'s approach of mangling a UDL as a call to the literal
@@ -4368,7 +4906,7 @@ recurse:
Out << '_';
mangleType(New->getAllocatedType());
if (New->hasInitializer()) {
- if (New->getInitializationStyle() == CXXNewExpr::ListInit)
+ if (New->getInitializationStyle() == CXXNewInitializationStyle::Braces)
Out << "il";
else
Out << "pi";
@@ -4382,7 +4920,8 @@ recurse:
} else if (const ParenListExpr *PLE = dyn_cast<ParenListExpr>(Init)) {
for (unsigned i = 0, e = PLE->getNumExprs(); i != e; ++i)
mangleExpression(PLE->getExpr(i));
- } else if (New->getInitializationStyle() == CXXNewExpr::ListInit &&
+ } else if (New->getInitializationStyle() ==
+ CXXNewInitializationStyle::Braces &&
isa<InitListExpr>(Init)) {
// Only take InitListExprs apart for list-initialization.
mangleInitListElements(cast<InitListExpr>(Init));
@@ -4562,6 +5101,10 @@ recurse:
// If the result of the operator is implicitly converted to a known
// integer type, that type is used for the literal; otherwise, the type
// of std::size_t or std::ptrdiff_t is used.
+ //
+ // FIXME: We still include the operand in the profile in this case. This
+ // can lead to mangling collisions between function templates that we
+ // consider to be different.
QualType T = (ImplicitlyConvertedToType.isNull() ||
!ImplicitlyConvertedToType->isIntegerType())? SAE->getType()
: ImplicitlyConvertedToType;
@@ -4591,8 +5134,7 @@ recurse:
// As of clang 12, we mangle __alignof__ differently than alignof. (They
// have acted differently since Clang 8, but were previously mangled the
// same.)
- if (Context.getASTContext().getLangOpts().getClangABICompat() >
- LangOptions::ClangABI::Ver11) {
+ if (!isCompatibleWith(LangOptions::ClangABI::Ver11)) {
Out << "u11__alignof__";
if (SAE->isArgumentType())
mangleType(SAE->getArgumentType());
@@ -4601,11 +5143,19 @@ recurse:
Out << 'E';
break;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case UETT_AlignOf:
Out << 'a';
MangleAlignofSizeofArg();
break;
+ case UETT_DataSizeOf: {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID =
+ Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot yet mangle __datasizeof expression");
+ Diags.Report(DiagID);
+ return;
+ }
case UETT_VecStep: {
DiagnosticsEngine &Diags = Context.getDiags();
unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
@@ -4621,7 +5171,29 @@ recurse:
Diags.Report(DiagID);
return;
}
+ case UETT_VectorElements: {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(
+ DiagnosticsEngine::Error,
+ "cannot yet mangle __builtin_vectorelements expression");
+ Diags.Report(DiagID);
+ return;
+ }
+ }
+ break;
+ }
+
+ case Expr::TypeTraitExprClass: {
+ // <expression> ::= u <source-name> <template-arg>* E # vendor extension
+ const TypeTraitExpr *TTE = cast<TypeTraitExpr>(E);
+ NotPrimaryExpr();
+ Out << 'u';
+ llvm::StringRef Spelling = getTraitSpelling(TTE->getTrait());
+ Out << Spelling.size() << Spelling;
+ for (TypeSourceInfo *TSI : TTE->getArgs()) {
+ mangleType(TSI->getType());
}
+ Out << 'E';
break;
}
@@ -4813,13 +5385,57 @@ recurse:
goto recurse;
case Expr::ConceptSpecializationExprClass: {
- // <expr-primary> ::= L <mangled-name> E # external name
- Out << "L_Z";
auto *CSE = cast<ConceptSpecializationExpr>(E);
- mangleTemplateName(CSE->getNamedConcept(),
- CSE->getTemplateArguments().data(),
- CSE->getTemplateArguments().size());
- Out << 'E';
+ if (isCompatibleWith(LangOptions::ClangABI::Ver17)) {
+ // Clang 17 and before mangled concept-ids as if they resolved to an
+ // entity, meaning that references to enclosing template arguments don't
+ // work.
+ Out << "L_Z";
+ mangleTemplateName(CSE->getNamedConcept(), CSE->getTemplateArguments());
+ Out << 'E';
+ break;
+ }
+ // Proposed on https://github.com/itanium-cxx-abi/cxx-abi/issues/24.
+ NotPrimaryExpr();
+ mangleUnresolvedName(
+ CSE->getNestedNameSpecifierLoc().getNestedNameSpecifier(),
+ CSE->getConceptNameInfo().getName(),
+ CSE->getTemplateArgsAsWritten()->getTemplateArgs(),
+ CSE->getTemplateArgsAsWritten()->getNumTemplateArgs());
+ break;
+ }
+
+ case Expr::RequiresExprClass: {
+ // Proposed on https://github.com/itanium-cxx-abi/cxx-abi/issues/24.
+ auto *RE = cast<RequiresExpr>(E);
+ // This is a primary-expression in the C++ grammar, but does not have an
+ // <expr-primary> mangling (starting with 'L').
+ NotPrimaryExpr();
+ if (RE->getLParenLoc().isValid()) {
+ Out << "rQ";
+ FunctionTypeDepthState saved = FunctionTypeDepth.push();
+ if (RE->getLocalParameters().empty()) {
+ Out << 'v';
+ } else {
+ for (ParmVarDecl *Param : RE->getLocalParameters()) {
+ mangleType(Context.getASTContext().getSignatureParameterType(
+ Param->getType()));
+ }
+ }
+ Out << '_';
+
+ // The rest of the mangling is in the immediate scope of the parameters.
+ FunctionTypeDepth.enterResultType();
+ for (const concepts::Requirement *Req : RE->getRequirements())
+ mangleRequirement(RE->getExprLoc(), Req);
+ FunctionTypeDepth.pop(saved);
+ Out << 'E';
+ } else {
+ Out << "rq";
+ for (const concepts::Requirement *Req : RE->getRequirements())
+ mangleRequirement(RE->getExprLoc(), Req);
+ Out << 'E';
+ }
break;
}
@@ -4953,6 +5569,16 @@ recurse:
break;
}
+ case Expr::LambdaExprClass: {
+ // A lambda-expression can't appear in the signature of an
+ // externally-visible declaration, so there's no standard mangling for
+ // this, but mangling as a literal of the closure type seems reasonable.
+ Out << "L";
+ mangleType(Context.getASTContext().getRecordType(cast<LambdaExpr>(E)->getLambdaClass()));
+ Out << "E";
+ break;
+ }
+
case Expr::PackExpansionExprClass:
NotPrimaryExpr();
Out << "sp";
@@ -5170,28 +5796,116 @@ void CXXNameMangler::mangleCXXDtorType(CXXDtorType T) {
}
}
-namespace {
// Helper to provide ancillary information on a template used to mangle its
// arguments.
-struct TemplateArgManglingInfo {
+struct CXXNameMangler::TemplateArgManglingInfo {
+ const CXXNameMangler &Mangler;
TemplateDecl *ResolvedTemplate = nullptr;
bool SeenPackExpansionIntoNonPack = false;
const NamedDecl *UnresolvedExpandedPack = nullptr;
- TemplateArgManglingInfo(TemplateName TN) {
+ TemplateArgManglingInfo(const CXXNameMangler &Mangler, TemplateName TN)
+ : Mangler(Mangler) {
if (TemplateDecl *TD = TN.getAsTemplateDecl())
ResolvedTemplate = TD;
}
- /// Do we need to mangle template arguments with exactly correct types?
- ///
+ /// Information about how to mangle a template argument.
+ struct Info {
+ /// Do we need to mangle the template argument with an exactly correct type?
+ bool NeedExactType;
+ /// If we need to prefix the mangling with a mangling of the template
+ /// parameter, the corresponding parameter.
+ const NamedDecl *TemplateParameterToMangle;
+ };
+
+ /// Determine whether the resolved template might be overloaded on its
+ /// template parameter list. If so, the mangling needs to include enough
+ /// information to reconstruct the template parameter list.
+ bool isOverloadable() {
+ // Function templates are generally overloadable. As a special case, a
+ // member function template of a generic lambda is not overloadable.
+ if (auto *FTD = dyn_cast_or_null<FunctionTemplateDecl>(ResolvedTemplate)) {
+ auto *RD = dyn_cast<CXXRecordDecl>(FTD->getDeclContext());
+ if (!RD || !RD->isGenericLambda())
+ return true;
+ }
+
+ // All other templates are not overloadable. Partial specializations would
+ // be, but we never mangle them.
+ return false;
+ }
+
+ /// Determine whether we need to prefix this <template-arg> mangling with a
+ /// <template-param-decl>. This happens if the natural template parameter for
+ /// the argument mangling is not the same as the actual template parameter.
+ bool needToMangleTemplateParam(const NamedDecl *Param,
+ const TemplateArgument &Arg) {
+ // For a template type parameter, the natural parameter is 'typename T'.
+ // The actual parameter might be constrained.
+ if (auto *TTP = dyn_cast<TemplateTypeParmDecl>(Param))
+ return TTP->hasTypeConstraint();
+
+ if (Arg.getKind() == TemplateArgument::Pack) {
+ // For an empty pack, the natural parameter is `typename...`.
+ if (Arg.pack_size() == 0)
+ return true;
+
+ // For any other pack, we use the first argument to determine the natural
+ // template parameter.
+ return needToMangleTemplateParam(Param, *Arg.pack_begin());
+ }
+
+ // For a non-type template parameter, the natural parameter is `T V` (for a
+ // prvalue argument) or `T &V` (for a glvalue argument), where `T` is the
+ // type of the argument, which we require to exactly match. If the actual
+ // parameter has a deduced or instantiation-dependent type, it is not
+ // equivalent to the natural parameter.
+ if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param))
+ return NTTP->getType()->isInstantiationDependentType() ||
+ NTTP->getType()->getContainedDeducedType();
+
+ // For a template template parameter, the template-head might differ from
+ // that of the template.
+ auto *TTP = cast<TemplateTemplateParmDecl>(Param);
+ TemplateName ArgTemplateName = Arg.getAsTemplateOrTemplatePattern();
+ const TemplateDecl *ArgTemplate = ArgTemplateName.getAsTemplateDecl();
+ if (!ArgTemplate)
+ return true;
+
+ // Mangle the template parameter list of the parameter and argument to see
+ // if they are the same. We can't use Profile for this, because it can't
+ // model the depth difference between parameter and argument and might not
+ // necessarily have the same definition of "identical" that we use here --
+ // that is, same mangling.
+ auto MangleTemplateParamListToString =
+ [&](SmallVectorImpl<char> &Buffer, const TemplateParameterList *Params,
+ unsigned DepthOffset) {
+ llvm::raw_svector_ostream Stream(Buffer);
+ CXXNameMangler(Mangler.Context, Stream,
+ WithTemplateDepthOffset{DepthOffset})
+ .mangleTemplateParameterList(Params);
+ };
+ llvm::SmallString<128> ParamTemplateHead, ArgTemplateHead;
+ MangleTemplateParamListToString(ParamTemplateHead,
+ TTP->getTemplateParameters(), 0);
+ // Add the depth of the parameter's template parameter list to all
+ // parameters appearing in the argument to make the indexes line up
+ // properly.
+ MangleTemplateParamListToString(ArgTemplateHead,
+ ArgTemplate->getTemplateParameters(),
+ TTP->getTemplateParameters()->getDepth());
+ return ParamTemplateHead != ArgTemplateHead;
+ }
+
+ /// Determine information about how this template argument should be mangled.
/// This should be called exactly once for each parameter / argument pair, in
/// order.
- bool needExactType(unsigned ParamIdx, const TemplateArgument &Arg) {
+ Info getArgInfo(unsigned ParamIdx, const TemplateArgument &Arg) {
// We need correct types when the template-name is unresolved or when it
// names a template that is able to be overloaded.
if (!ResolvedTemplate || SeenPackExpansionIntoNonPack)
- return true;
+ return {true, nullptr};
// Move to the next parameter.
const NamedDecl *Param = UnresolvedExpandedPack;
@@ -5200,13 +5914,14 @@ struct TemplateArgManglingInfo {
"no parameter for argument");
Param = ResolvedTemplate->getTemplateParameters()->getParam(ParamIdx);
- // If we reach an expanded parameter pack whose argument isn't in pack
- // form, that means Sema couldn't figure out which arguments belonged to
- // it, because it contains a pack expansion. Track the expanded pack for
- // all further template arguments until we hit that pack expansion.
+ // If we reach a parameter pack whose argument isn't in pack form, that
+ // means Sema couldn't or didn't figure out which arguments belonged to
+ // it, because it contains a pack expansion or because Sema bailed out of
+ // computing parameter / argument correspondence before this point. Track
+ // the pack as the corresponding parameter for all further template
+ // arguments until we hit a pack expansion, at which point we don't know
+ // the correspondence between parameters and arguments at all.
if (Param->isParameterPack() && Arg.getKind() != TemplateArgument::Pack) {
- assert(getExpandedPackSize(Param) &&
- "failed to form pack argument for parameter pack");
UnresolvedExpandedPack = Param;
}
}
@@ -5217,17 +5932,13 @@ struct TemplateArgManglingInfo {
if (Arg.isPackExpansion() &&
(!Param->isParameterPack() || UnresolvedExpandedPack)) {
SeenPackExpansionIntoNonPack = true;
- return true;
+ return {true, nullptr};
}
- // We need exact types for function template arguments because they might be
- // overloaded on template parameter type. As a special case, a member
- // function template of a generic lambda is not overloadable.
- if (auto *FTD = dyn_cast<FunctionTemplateDecl>(ResolvedTemplate)) {
- auto *RD = dyn_cast<CXXRecordDecl>(FTD->getDeclContext());
- if (!RD || !RD->isGenericLambda())
- return true;
- }
+ // We need exact types for arguments of a template that might be overloaded
+ // on template parameter type.
+ if (isOverloadable())
+ return {true, needToMangleTemplateParam(Param, Arg) ? Param : nullptr};
// Otherwise, we only need a correct type if the parameter has a deduced
// type.
@@ -5237,44 +5948,75 @@ struct TemplateArgManglingInfo {
// but it doesn't matter because substitution and expansion don't affect
// whether a deduced type appears in the type.
auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param);
- return NTTP && NTTP->getType()->getContainedDeducedType();
+ bool NeedExactType = NTTP && NTTP->getType()->getContainedDeducedType();
+ return {NeedExactType, nullptr};
+ }
+
+ /// Determine if we should mangle a requires-clause after the template
+ /// argument list. If so, returns the expression to mangle.
+ const Expr *getTrailingRequiresClauseToMangle() {
+ if (!isOverloadable())
+ return nullptr;
+ return ResolvedTemplate->getTemplateParameters()->getRequiresClause();
}
};
-}
void CXXNameMangler::mangleTemplateArgs(TemplateName TN,
const TemplateArgumentLoc *TemplateArgs,
unsigned NumTemplateArgs) {
- // <template-args> ::= I <template-arg>+ E
+ // <template-args> ::= I <template-arg>+ [Q <requires-clause expr>] E
Out << 'I';
- TemplateArgManglingInfo Info(TN);
- for (unsigned i = 0; i != NumTemplateArgs; ++i)
- mangleTemplateArg(TemplateArgs[i].getArgument(),
- Info.needExactType(i, TemplateArgs[i].getArgument()));
+ TemplateArgManglingInfo Info(*this, TN);
+ for (unsigned i = 0; i != NumTemplateArgs; ++i) {
+ mangleTemplateArg(Info, i, TemplateArgs[i].getArgument());
+ }
+ mangleRequiresClause(Info.getTrailingRequiresClauseToMangle());
Out << 'E';
}
void CXXNameMangler::mangleTemplateArgs(TemplateName TN,
const TemplateArgumentList &AL) {
- // <template-args> ::= I <template-arg>+ E
+ // <template-args> ::= I <template-arg>+ [Q <requires-clause expr>] E
Out << 'I';
- TemplateArgManglingInfo Info(TN);
- for (unsigned i = 0, e = AL.size(); i != e; ++i)
- mangleTemplateArg(AL[i], Info.needExactType(i, AL[i]));
+ TemplateArgManglingInfo Info(*this, TN);
+ for (unsigned i = 0, e = AL.size(); i != e; ++i) {
+ mangleTemplateArg(Info, i, AL[i]);
+ }
+ mangleRequiresClause(Info.getTrailingRequiresClauseToMangle());
Out << 'E';
}
void CXXNameMangler::mangleTemplateArgs(TemplateName TN,
- const TemplateArgument *TemplateArgs,
- unsigned NumTemplateArgs) {
- // <template-args> ::= I <template-arg>+ E
+ ArrayRef<TemplateArgument> Args) {
+ // <template-args> ::= I <template-arg>+ [Q <requires-clause expr>] E
Out << 'I';
- TemplateArgManglingInfo Info(TN);
- for (unsigned i = 0; i != NumTemplateArgs; ++i)
- mangleTemplateArg(TemplateArgs[i], Info.needExactType(i, TemplateArgs[i]));
+ TemplateArgManglingInfo Info(*this, TN);
+ for (unsigned i = 0; i != Args.size(); ++i) {
+ mangleTemplateArg(Info, i, Args[i]);
+ }
+ mangleRequiresClause(Info.getTrailingRequiresClauseToMangle());
Out << 'E';
}
+void CXXNameMangler::mangleTemplateArg(TemplateArgManglingInfo &Info,
+ unsigned Index, TemplateArgument A) {
+ TemplateArgManglingInfo::Info ArgInfo = Info.getArgInfo(Index, A);
+
+ // Proposed on https://github.com/itanium-cxx-abi/cxx-abi/issues/47.
+ if (ArgInfo.TemplateParameterToMangle &&
+ !isCompatibleWith(LangOptions::ClangABI::Ver17)) {
+ // The template parameter is mangled if the mangling would otherwise be
+ // ambiguous.
+ //
+ // <template-arg> ::= <template-param-decl> <template-arg>
+ //
+ // Clang 17 and before did not do this.
+ mangleTemplateParamDecl(ArgInfo.TemplateParameterToMangle);
+ }
+
+ mangleTemplateArg(A, ArgInfo.NeedExactType);
+}
+
void CXXNameMangler::mangleTemplateArg(TemplateArgument A, bool NeedExactType) {
// <template-arg> ::= <type> # type or template
// ::= X <expression> E # expression
@@ -5327,8 +6069,7 @@ void CXXNameMangler::mangleTemplateArg(TemplateArgument A, bool NeedExactType) {
else if (D->getType()->isArrayType() &&
Ctx.hasSimilarType(Ctx.getDecayedType(D->getType()),
A.getParamTypeForDecl()) &&
- Ctx.getLangOpts().getClangABICompat() >
- LangOptions::ClangABI::Ver11)
+ !isCompatibleWith(LangOptions::ClangABI::Ver11))
// Build a value corresponding to this implicit array-to-pointer decay.
Value = APValue(APValue::LValueBase(D), CharUnits::Zero(),
{APValue::LValuePathEntry::ArrayIndex(0)},
@@ -5346,6 +6087,11 @@ void CXXNameMangler::mangleTemplateArg(TemplateArgument A, bool NeedExactType) {
mangleNullPointer(A.getNullPtrType());
break;
}
+ case TemplateArgument::StructuralValue:
+ mangleValueInTemplateArg(A.getStructuralValueType(),
+ A.getAsStructuralValue(),
+ /*TopLevel=*/true, NeedExactType);
+ break;
case TemplateArgument::Pack: {
// <template-arg> ::= J <template-arg>* E
Out << 'J';
@@ -5357,8 +6103,7 @@ void CXXNameMangler::mangleTemplateArg(TemplateArgument A, bool NeedExactType) {
}
void CXXNameMangler::mangleTemplateArgExpr(const Expr *E) {
- ASTContext &Ctx = Context.getASTContext();
- if (Ctx.getLangOpts().getClangABICompat() > LangOptions::ClangABI::Ver11) {
+ if (!isCompatibleWith(LangOptions::ClangABI::Ver11)) {
mangleExpression(E, UnknownArity, /*AsTemplateArg=*/true);
return;
}
@@ -5493,6 +6238,47 @@ static QualType getLValueType(ASTContext &Ctx, const APValue &LV) {
return T;
}
+static IdentifierInfo *getUnionInitName(SourceLocation UnionLoc,
+ DiagnosticsEngine &Diags,
+ const FieldDecl *FD) {
+ // According to:
+ // http://itanium-cxx-abi.github.io/cxx-abi/abi.html#mangling.anonymous
+ // For the purposes of mangling, the name of an anonymous union is considered
+ // to be the name of the first named data member found by a pre-order,
+ // depth-first, declaration-order walk of the data members of the anonymous
+ // union.
+
+ if (FD->getIdentifier())
+ return FD->getIdentifier();
+
+ // The only cases where the identifer of a FieldDecl would be blank is if the
+ // field represents an anonymous record type or if it is an unnamed bitfield.
+ // There is no type to descend into in the case of a bitfield, so we can just
+ // return nullptr in that case.
+ if (FD->isBitField())
+ return nullptr;
+ const CXXRecordDecl *RD = FD->getType()->getAsCXXRecordDecl();
+
+ // Consider only the fields in declaration order, searched depth-first. We
+ // don't care about the active member of the union, as all we are doing is
+ // looking for a valid name. We also don't check bases, due to guidance from
+ // the Itanium ABI folks.
+ for (const FieldDecl *RDField : RD->fields()) {
+ if (IdentifierInfo *II = getUnionInitName(UnionLoc, Diags, RDField))
+ return II;
+ }
+
+ // According to the Itanium ABI: If there is no such data member (i.e., if all
+ // of the data members in the union are unnamed), then there is no way for a
+ // program to refer to the anonymous union, and there is therefore no need to
+ // mangle its name. However, we should diagnose this anyway.
+ unsigned DiagID = Diags.getCustomDiagID(
+ DiagnosticsEngine::Error, "cannot mangle this unnamed union NTTP yet");
+ Diags.Report(UnionLoc, DiagID);
+
+ return nullptr;
+}
+
void CXXNameMangler::mangleValueInTemplateArg(QualType T, const APValue &V,
bool TopLevel,
bool NeedExactType) {
@@ -5525,8 +6311,7 @@ void CXXNameMangler::mangleValueInTemplateArg(QualType T, const APValue &V,
assert(RD && "unexpected type for record value");
// Drop trailing zero-initialized elements.
- llvm::SmallVector<const FieldDecl *, 16> Fields(RD->field_begin(),
- RD->field_end());
+ llvm::SmallVector<const FieldDecl *, 16> Fields(RD->fields());
while (
!Fields.empty() &&
(Fields.back()->isUnnamedBitfield() ||
@@ -5576,7 +6361,10 @@ void CXXNameMangler::mangleValueInTemplateArg(QualType T, const APValue &V,
mangleType(T);
if (!isZeroInitialized(T, V)) {
Out << "di";
- mangleSourceName(FD->getIdentifier());
+ IdentifierInfo *II = (getUnionInitName(
+ T->getAsCXXRecordDecl()->getLocation(), Context.getDiags(), FD));
+ if (II)
+ mangleSourceName(II);
mangleValueInTemplateArg(FD->getType(), V.getUnionValue(), false);
}
Out << 'E';
@@ -5712,7 +6500,20 @@ void CXXNameMangler::mangleValueInTemplateArg(QualType T, const APValue &V,
Out << "plcvPcad";
Kind = Offset;
} else {
- if (!V.getLValuePath().empty() || V.isLValueOnePastTheEnd()) {
+ // Clang 11 and before mangled an array subject to array-to-pointer decay
+ // as if it were the declaration itself.
+ bool IsArrayToPointerDecayMangledAsDecl = false;
+ if (TopLevel && Ctx.getLangOpts().getClangABICompat() <=
+ LangOptions::ClangABI::Ver11) {
+ QualType BType = B.getType();
+ IsArrayToPointerDecayMangledAsDecl =
+ BType->isArrayType() && V.getLValuePath().size() == 1 &&
+ V.getLValuePath()[0].getAsArrayIndex() == 0 &&
+ Ctx.hasSimilarType(T, Ctx.getDecayedType(BType));
+ }
+
+ if ((!V.getLValuePath().empty() || V.isLValueOnePastTheEnd()) &&
+ !IsArrayToPointerDecayMangledAsDecl) {
NotPrimaryExpr();
// A final conversion to the template parameter's type is usually
// folded into the 'so' mangling, but we can't do that for 'void*'
@@ -5731,8 +6532,7 @@ void CXXNameMangler::mangleValueInTemplateArg(QualType T, const APValue &V,
} else {
if (NeedExactType &&
!Ctx.hasSameType(T->getPointeeType(), getLValueType(Ctx, V)) &&
- Ctx.getLangOpts().getClangABICompat() >
- LangOptions::ClangABI::Ver11) {
+ !isCompatibleWith(LangOptions::ClangABI::Ver11)) {
NotPrimaryExpr();
Out << "cv";
mangleType(T);
@@ -5830,8 +6630,7 @@ void CXXNameMangler::mangleValueInTemplateArg(QualType T, const APValue &V,
!Ctx.hasSameType(
T->castAs<MemberPointerType>()->getPointeeType(),
V.getMemberPointerDecl()->getType()) &&
- Ctx.getLangOpts().getClangABICompat() >
- LangOptions::ClangABI::Ver11) {
+ !isCompatibleWith(LangOptions::ClangABI::Ver11)) {
Out << "cv";
mangleType(T);
}
@@ -5862,6 +6661,7 @@ void CXXNameMangler::mangleTemplateParameter(unsigned Depth, unsigned Index) {
// The latter two manglings are from a proposal here:
// https://github.com/itanium-cxx-abi/cxx-abi/issues/31#issuecomment-528122117
Out << 'T';
+ Depth += TemplateDepthOffset;
if (Depth != 0)
Out << 'L' << (Depth - 1) << '_';
if (Index != 0)
@@ -5870,9 +6670,11 @@ void CXXNameMangler::mangleTemplateParameter(unsigned Depth, unsigned Index) {
}
void CXXNameMangler::mangleSeqID(unsigned SeqID) {
- if (SeqID == 1)
+ if (SeqID == 0) {
+ // Nothing.
+ } else if (SeqID == 1) {
Out << '0';
- else if (SeqID > 1) {
+ } else {
SeqID--;
// <seq-id> is encoded in base-36, using digits and upper case letters.
@@ -5907,6 +6709,14 @@ bool CXXNameMangler::mangleSubstitution(const NamedDecl *ND) {
return mangleSubstitution(reinterpret_cast<uintptr_t>(ND));
}
+bool CXXNameMangler::mangleSubstitution(NestedNameSpecifier *NNS) {
+ assert(NNS->getKind() == NestedNameSpecifier::Identifier &&
+ "mangleSubstitution(NestedNameSpecifier *) is only used for "
+ "identifier nested name specifiers.");
+ NNS = Context.getASTContext().getCanonicalNestedNameSpecifier(NNS);
+ return mangleSubstitution(reinterpret_cast<uintptr_t>(NNS));
+}
+
/// Determine whether the given type has any qualifiers that are relevant for
/// substitutions.
static bool hasMangledSubstitutionQualifiers(QualType T) {
@@ -5946,56 +6756,67 @@ bool CXXNameMangler::mangleSubstitution(uintptr_t Ptr) {
return true;
}
-static bool isCharType(QualType T) {
- if (T.isNull())
+/// Returns whether S is a template specialization of std::Name with a single
+/// argument of type A.
+bool CXXNameMangler::isSpecializedAs(QualType S, llvm::StringRef Name,
+ QualType A) {
+ if (S.isNull())
return false;
- return T->isSpecificBuiltinType(BuiltinType::Char_S) ||
- T->isSpecificBuiltinType(BuiltinType::Char_U);
-}
-
-/// Returns whether a given type is a template specialization of a given name
-/// with a single argument of type char.
-static bool isCharSpecialization(QualType T, const char *Name) {
- if (T.isNull())
- return false;
-
- const RecordType *RT = T->getAs<RecordType>();
+ const RecordType *RT = S->getAs<RecordType>();
if (!RT)
return false;
const ClassTemplateSpecializationDecl *SD =
dyn_cast<ClassTemplateSpecializationDecl>(RT->getDecl());
- if (!SD)
+ if (!SD || !SD->getIdentifier()->isStr(Name))
return false;
- if (!isStdNamespace(getEffectiveDeclContext(SD)))
+ if (!isStdNamespace(Context.getEffectiveDeclContext(SD)))
return false;
const TemplateArgumentList &TemplateArgs = SD->getTemplateArgs();
if (TemplateArgs.size() != 1)
return false;
- if (!isCharType(TemplateArgs[0].getAsType()))
+ if (TemplateArgs[0].getAsType() != A)
+ return false;
+
+ if (SD->getSpecializedTemplate()->getOwningModuleForLinkage())
return false;
- return SD->getIdentifier()->getName() == Name;
+ return true;
}
-template <std::size_t StrLen>
-static bool isStreamCharSpecialization(const ClassTemplateSpecializationDecl*SD,
- const char (&Str)[StrLen]) {
- if (!SD->getIdentifier()->isStr(Str))
+/// Returns whether SD is a template specialization std::Name<char,
+/// std::char_traits<char> [, std::allocator<char>]>
+/// HasAllocator controls whether the 3rd template argument is needed.
+bool CXXNameMangler::isStdCharSpecialization(
+ const ClassTemplateSpecializationDecl *SD, llvm::StringRef Name,
+ bool HasAllocator) {
+ if (!SD->getIdentifier()->isStr(Name))
return false;
const TemplateArgumentList &TemplateArgs = SD->getTemplateArgs();
- if (TemplateArgs.size() != 2)
+ if (TemplateArgs.size() != (HasAllocator ? 3 : 2))
+ return false;
+
+ QualType A = TemplateArgs[0].getAsType();
+ if (A.isNull())
+ return false;
+ // Plain 'char' is named Char_S or Char_U depending on the target ABI.
+ if (!A->isSpecificBuiltinType(BuiltinType::Char_S) &&
+ !A->isSpecificBuiltinType(BuiltinType::Char_U))
return false;
- if (!isCharType(TemplateArgs[0].getAsType()))
+ if (!isSpecializedAs(TemplateArgs[1].getAsType(), "char_traits", A))
return false;
- if (!isCharSpecialization(TemplateArgs[1].getAsType(), "char_traits"))
+ if (HasAllocator &&
+ !isSpecializedAs(TemplateArgs[2].getAsType(), "allocator", A))
+ return false;
+
+ if (SD->getSpecializedTemplate()->getOwningModuleForLinkage())
return false;
return true;
@@ -6008,10 +6829,14 @@ bool CXXNameMangler::mangleStandardSubstitution(const NamedDecl *ND) {
Out << "St";
return true;
}
+ return false;
}
if (const ClassTemplateDecl *TD = dyn_cast<ClassTemplateDecl>(ND)) {
- if (!isStdNamespace(getEffectiveDeclContext(TD)))
+ if (!isStdNamespace(Context.getEffectiveDeclContext(TD)))
+ return false;
+
+ if (TD->getOwningModuleForLinkage())
return false;
// <substitution> ::= Sa # ::std::allocator
@@ -6025,56 +6850,48 @@ bool CXXNameMangler::mangleStandardSubstitution(const NamedDecl *ND) {
Out << "Sb";
return true;
}
+ return false;
}
if (const ClassTemplateSpecializationDecl *SD =
dyn_cast<ClassTemplateSpecializationDecl>(ND)) {
- if (!isStdNamespace(getEffectiveDeclContext(SD)))
+ if (!isStdNamespace(Context.getEffectiveDeclContext(SD)))
+ return false;
+
+ if (SD->getSpecializedTemplate()->getOwningModuleForLinkage())
return false;
// <substitution> ::= Ss # ::std::basic_string<char,
// ::std::char_traits<char>,
// ::std::allocator<char> >
- if (SD->getIdentifier()->isStr("basic_string")) {
- const TemplateArgumentList &TemplateArgs = SD->getTemplateArgs();
-
- if (TemplateArgs.size() != 3)
- return false;
-
- if (!isCharType(TemplateArgs[0].getAsType()))
- return false;
-
- if (!isCharSpecialization(TemplateArgs[1].getAsType(), "char_traits"))
- return false;
-
- if (!isCharSpecialization(TemplateArgs[2].getAsType(), "allocator"))
- return false;
-
+ if (isStdCharSpecialization(SD, "basic_string", /*HasAllocator=*/true)) {
Out << "Ss";
return true;
}
// <substitution> ::= Si # ::std::basic_istream<char,
// ::std::char_traits<char> >
- if (isStreamCharSpecialization(SD, "basic_istream")) {
+ if (isStdCharSpecialization(SD, "basic_istream", /*HasAllocator=*/false)) {
Out << "Si";
return true;
}
// <substitution> ::= So # ::std::basic_ostream<char,
// ::std::char_traits<char> >
- if (isStreamCharSpecialization(SD, "basic_ostream")) {
+ if (isStdCharSpecialization(SD, "basic_ostream", /*HasAllocator=*/false)) {
Out << "So";
return true;
}
// <substitution> ::= Sd # ::std::basic_iostream<char,
// ::std::char_traits<char> >
- if (isStreamCharSpecialization(SD, "basic_iostream")) {
+ if (isStdCharSpecialization(SD, "basic_iostream", /*HasAllocator=*/false)) {
Out << "Sd";
return true;
}
+ return false;
}
+
return false;
}
@@ -6291,23 +7108,25 @@ void ItaniumMangleContextImpl::mangleDynamicStermFinalizer(const VarDecl *D,
}
void ItaniumMangleContextImpl::mangleSEHFilterExpression(
- const NamedDecl *EnclosingDecl, raw_ostream &Out) {
+ GlobalDecl EnclosingDecl, raw_ostream &Out) {
CXXNameMangler Mangler(*this, Out);
Mangler.getStream() << "__filt_";
- if (shouldMangleDeclName(EnclosingDecl))
+ auto *EnclosingFD = cast<FunctionDecl>(EnclosingDecl.getDecl());
+ if (shouldMangleDeclName(EnclosingFD))
Mangler.mangle(EnclosingDecl);
else
- Mangler.getStream() << EnclosingDecl->getName();
+ Mangler.getStream() << EnclosingFD->getName();
}
void ItaniumMangleContextImpl::mangleSEHFinallyBlock(
- const NamedDecl *EnclosingDecl, raw_ostream &Out) {
+ GlobalDecl EnclosingDecl, raw_ostream &Out) {
CXXNameMangler Mangler(*this, Out);
Mangler.getStream() << "__fin_";
- if (shouldMangleDeclName(EnclosingDecl))
+ auto *EnclosingFD = cast<FunctionDecl>(EnclosingDecl.getDecl());
+ if (shouldMangleDeclName(EnclosingFD))
Mangler.mangle(EnclosingDecl);
else
- Mangler.getStream() << EnclosingDecl->getName();
+ Mangler.getStream() << EnclosingFD->getName();
}
void ItaniumMangleContextImpl::mangleItaniumThreadLocalInit(const VarDecl *D,
@@ -6376,16 +7195,17 @@ void ItaniumMangleContextImpl::mangleCXXRTTI(QualType Ty, raw_ostream &Out) {
Mangler.mangleType(Ty);
}
-void ItaniumMangleContextImpl::mangleCXXRTTIName(QualType Ty,
- raw_ostream &Out) {
+void ItaniumMangleContextImpl::mangleCXXRTTIName(
+ QualType Ty, raw_ostream &Out, bool NormalizeIntegers = false) {
// <special-name> ::= TS <type> # typeinfo name (null terminated byte string)
- CXXNameMangler Mangler(*this, Out);
+ CXXNameMangler Mangler(*this, Out, NormalizeIntegers);
Mangler.getStream() << "_ZTS";
Mangler.mangleType(Ty);
}
-void ItaniumMangleContextImpl::mangleTypeName(QualType Ty, raw_ostream &Out) {
- mangleCXXRTTIName(Ty, Out);
+void ItaniumMangleContextImpl::mangleCanonicalTypeName(
+ QualType Ty, raw_ostream &Out, bool NormalizeIntegers = false) {
+ mangleCXXRTTIName(Ty, Out, NormalizeIntegers);
}
void ItaniumMangleContextImpl::mangleStringLiteral(const StringLiteral *, raw_ostream &) {
@@ -6398,17 +7218,36 @@ void ItaniumMangleContextImpl::mangleLambdaSig(const CXXRecordDecl *Lambda,
Mangler.mangleLambdaSig(Lambda);
}
+void ItaniumMangleContextImpl::mangleModuleInitializer(const Module *M,
+ raw_ostream &Out) {
+ // <special-name> ::= GI <module-name> # module initializer function
+ CXXNameMangler Mangler(*this, Out);
+ Mangler.getStream() << "_ZGI";
+ Mangler.mangleModuleNamePrefix(M->getPrimaryModuleInterfaceName());
+ if (M->isModulePartition()) {
+ // The partition needs including, as partitions can have them too.
+ auto Partition = M->Name.find(':');
+ Mangler.mangleModuleNamePrefix(
+ StringRef(&M->Name[Partition + 1], M->Name.size() - Partition - 1),
+ /*IsPartition*/ true);
+ }
+}
+
ItaniumMangleContext *ItaniumMangleContext::create(ASTContext &Context,
- DiagnosticsEngine &Diags) {
+ DiagnosticsEngine &Diags,
+ bool IsAux) {
return new ItaniumMangleContextImpl(
Context, Diags,
- [](ASTContext &, const NamedDecl *) -> llvm::Optional<unsigned> {
- return llvm::None;
- });
+ [](ASTContext &, const NamedDecl *) -> std::optional<unsigned> {
+ return std::nullopt;
+ },
+ IsAux);
}
ItaniumMangleContext *
ItaniumMangleContext::create(ASTContext &Context, DiagnosticsEngine &Diags,
- DiscriminatorOverrideTy DiscriminatorOverride) {
- return new ItaniumMangleContextImpl(Context, Diags, DiscriminatorOverride);
+ DiscriminatorOverrideTy DiscriminatorOverride,
+ bool IsAux) {
+ return new ItaniumMangleContextImpl(Context, Diags, DiscriminatorOverride,
+ IsAux);
}
diff --git a/contrib/llvm-project/clang/lib/AST/JSONNodeDumper.cpp b/contrib/llvm-project/clang/lib/AST/JSONNodeDumper.cpp
index f09f9d38759f..3c11b75d7472 100644
--- a/contrib/llvm-project/clang/lib/AST/JSONNodeDumper.cpp
+++ b/contrib/llvm-project/clang/lib/AST/JSONNodeDumper.cpp
@@ -1,8 +1,10 @@
#include "clang/AST/JSONNodeDumper.h"
+#include "clang/AST/Type.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Lex/Lexer.h"
-#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/StringExtras.h"
+#include <optional>
using namespace clang;
@@ -313,12 +315,16 @@ std::string JSONNodeDumper::createPointerRepresentation(const void *Ptr) {
llvm::json::Object JSONNodeDumper::createQualType(QualType QT, bool Desugar) {
SplitQualType SQT = QT.split();
- llvm::json::Object Ret{{"qualType", QualType::getAsString(SQT, PrintPolicy)}};
+ std::string SQTS = QualType::getAsString(SQT, PrintPolicy);
+ llvm::json::Object Ret{{"qualType", SQTS}};
if (Desugar && !QT.isNull()) {
SplitQualType DSQT = QT.getSplitDesugaredType();
- if (DSQT != SQT)
- Ret["desugaredQualType"] = QualType::getAsString(DSQT, PrintPolicy);
+ if (DSQT != SQT) {
+ std::string DSQTS = QualType::getAsString(DSQT, PrintPolicy);
+ if (DSQTS != SQTS)
+ Ret["desugaredQualType"] = DSQTS;
+ }
if (const auto *TT = QT->getAs<TypedefType>())
Ret["typeAliasDeclId"] = createPointerRepresentation(TT->getDecl());
}
@@ -528,8 +534,49 @@ JSONNodeDumper::createCXXBaseSpecifier(const CXXBaseSpecifier &BS) {
return Ret;
}
+void JSONNodeDumper::VisitAliasAttr(const AliasAttr *AA) {
+ JOS.attribute("aliasee", AA->getAliasee());
+}
+
+void JSONNodeDumper::VisitCleanupAttr(const CleanupAttr *CA) {
+ JOS.attribute("cleanup_function", createBareDeclRef(CA->getFunctionDecl()));
+}
+
+void JSONNodeDumper::VisitDeprecatedAttr(const DeprecatedAttr *DA) {
+ if (!DA->getMessage().empty())
+ JOS.attribute("message", DA->getMessage());
+ if (!DA->getReplacement().empty())
+ JOS.attribute("replacement", DA->getReplacement());
+}
+
+void JSONNodeDumper::VisitUnavailableAttr(const UnavailableAttr *UA) {
+ if (!UA->getMessage().empty())
+ JOS.attribute("message", UA->getMessage());
+}
+
+void JSONNodeDumper::VisitSectionAttr(const SectionAttr *SA) {
+ JOS.attribute("section_name", SA->getName());
+}
+
+void JSONNodeDumper::VisitVisibilityAttr(const VisibilityAttr *VA) {
+ JOS.attribute("visibility", VisibilityAttr::ConvertVisibilityTypeToStr(
+ VA->getVisibility()));
+}
+
+void JSONNodeDumper::VisitTLSModelAttr(const TLSModelAttr *TA) {
+ JOS.attribute("tls_model", TA->getModel());
+}
+
void JSONNodeDumper::VisitTypedefType(const TypedefType *TT) {
JOS.attribute("decl", createBareDeclRef(TT->getDecl()));
+ if (!TT->typeMatchesDecl())
+ JOS.attribute("type", createQualType(TT->desugar()));
+}
+
+void JSONNodeDumper::VisitUsingType(const UsingType *TT) {
+ JOS.attribute("decl", createBareDeclRef(TT->getFoundDecl()));
+ if (!TT->typeMatchesDecl())
+ JOS.attribute("type", createQualType(TT->desugar()));
}
void JSONNodeDumper::VisitFunctionType(const FunctionType *T) {
@@ -599,13 +646,13 @@ void JSONNodeDumper::VisitRValueReferenceType(const ReferenceType *RT) {
void JSONNodeDumper::VisitArrayType(const ArrayType *AT) {
switch (AT->getSizeModifier()) {
- case ArrayType::Star:
+ case ArraySizeModifier::Star:
JOS.attribute("sizeModifier", "*");
break;
- case ArrayType::Static:
+ case ArraySizeModifier::Static:
JOS.attribute("sizeModifier", "static");
break;
- case ArrayType::Normal:
+ case ArraySizeModifier::Normal:
break;
}
@@ -630,29 +677,35 @@ void JSONNodeDumper::VisitDependentSizedExtVectorType(
void JSONNodeDumper::VisitVectorType(const VectorType *VT) {
JOS.attribute("numElements", VT->getNumElements());
switch (VT->getVectorKind()) {
- case VectorType::GenericVector:
+ case VectorKind::Generic:
break;
- case VectorType::AltiVecVector:
+ case VectorKind::AltiVecVector:
JOS.attribute("vectorKind", "altivec");
break;
- case VectorType::AltiVecPixel:
+ case VectorKind::AltiVecPixel:
JOS.attribute("vectorKind", "altivec pixel");
break;
- case VectorType::AltiVecBool:
+ case VectorKind::AltiVecBool:
JOS.attribute("vectorKind", "altivec bool");
break;
- case VectorType::NeonVector:
+ case VectorKind::Neon:
JOS.attribute("vectorKind", "neon");
break;
- case VectorType::NeonPolyVector:
+ case VectorKind::NeonPoly:
JOS.attribute("vectorKind", "neon poly");
break;
- case VectorType::SveFixedLengthDataVector:
+ case VectorKind::SveFixedLengthData:
JOS.attribute("vectorKind", "fixed-length sve data vector");
break;
- case VectorType::SveFixedLengthPredicateVector:
+ case VectorKind::SveFixedLengthPredicate:
JOS.attribute("vectorKind", "fixed-length sve predicate vector");
break;
+ case VectorKind::RVVFixedLengthData:
+ JOS.attribute("vectorKind", "fixed-length rvv data vector");
+ break;
+ case VectorKind::RVVFixedLengthMask:
+ JOS.attribute("vectorKind", "fixed-length rvv mask vector");
+ break;
}
}
@@ -662,9 +715,11 @@ void JSONNodeDumper::VisitUnresolvedUsingType(const UnresolvedUsingType *UUT) {
void JSONNodeDumper::VisitUnaryTransformType(const UnaryTransformType *UTT) {
switch (UTT->getUTTKind()) {
- case UnaryTransformType::EnumUnderlyingType:
- JOS.attribute("transformKind", "underlying_type");
+#define TRANSFORM_TYPE_TRAIT_DEF(Enum, Trait) \
+ case UnaryTransformType::Enum: \
+ JOS.attribute("transformKind", #Trait); \
break;
+#include "clang/Basic/TransformTypeTraits.def"
}
}
@@ -680,6 +735,18 @@ void JSONNodeDumper::VisitTemplateTypeParmType(
JOS.attribute("decl", createBareDeclRef(TTPT->getDecl()));
}
+void JSONNodeDumper::VisitSubstTemplateTypeParmType(
+ const SubstTemplateTypeParmType *STTPT) {
+ JOS.attribute("index", STTPT->getIndex());
+ if (auto PackIndex = STTPT->getPackIndex())
+ JOS.attribute("pack_index", *PackIndex);
+}
+
+void JSONNodeDumper::VisitSubstTemplateTypeParmPackType(
+ const SubstTemplateTypeParmPackType *T) {
+ JOS.attribute("index", T->getIndex());
+}
+
void JSONNodeDumper::VisitAutoType(const AutoType *AT) {
JOS.attribute("undeduced", !AT->isDeduced());
switch (AT->getKeyword()) {
@@ -715,7 +782,7 @@ void JSONNodeDumper::VisitObjCInterfaceType(const ObjCInterfaceType *OIT) {
}
void JSONNodeDumper::VisitPackExpansionType(const PackExpansionType *PET) {
- if (llvm::Optional<unsigned> N = PET->getNumExpansions())
+ if (std::optional<unsigned> N = PET->getNumExpansions())
JOS.attribute("numExpansions", *N);
}
@@ -744,11 +811,28 @@ void JSONNodeDumper::VisitNamedDecl(const NamedDecl *ND) {
JOS.attribute("name", ND->getNameAsString());
// FIXME: There are likely other contexts in which it makes no sense to ask
// for a mangled name.
- if (!isa<RequiresExprBodyDecl>(ND->getDeclContext())) {
- std::string MangledName = ASTNameGen.getName(ND);
- if (!MangledName.empty())
- JOS.attribute("mangledName", MangledName);
- }
+ if (isa<RequiresExprBodyDecl>(ND->getDeclContext()))
+ return;
+
+ // If the declaration is dependent or is in a dependent context, then the
+ // mangling is unlikely to be meaningful (and in some cases may cause
+ // "don't know how to mangle this" assertion failures.
+ if (ND->isTemplated())
+ return;
+
+ // Mangled names are not meaningful for locals, and may not be well-defined
+ // in the case of VLAs.
+ auto *VD = dyn_cast<VarDecl>(ND);
+ if (VD && VD->hasLocalStorage())
+ return;
+
+ // Do not mangle template deduction guides.
+ if (isa<CXXDeductionGuideDecl>(ND))
+ return;
+
+ std::string MangledName = ASTNameGen.getName(ND);
+ if (!MangledName.empty())
+ JOS.attribute("mangledName", MangledName);
}
}
@@ -765,6 +849,7 @@ void JSONNodeDumper::VisitTypeAliasDecl(const TypeAliasDecl *TAD) {
void JSONNodeDumper::VisitNamespaceDecl(const NamespaceDecl *ND) {
VisitNamedDecl(ND);
attributeOnlyIfTrue("isInline", ND->isInline());
+ attributeOnlyIfTrue("isNested", ND->isNested());
if (!ND->isOriginalNamespace())
JOS.attribute("originalNamespace",
createBareDeclRef(ND->getOriginalNamespace()));
@@ -802,6 +887,9 @@ void JSONNodeDumper::VisitUsingShadowDecl(const UsingShadowDecl *USD) {
void JSONNodeDumper::VisitVarDecl(const VarDecl *VD) {
VisitNamedDecl(VD);
JOS.attribute("type", createQualType(VD->getType()));
+ if (const auto *P = dyn_cast<ParmVarDecl>(VD))
+ attributeOnlyIfTrue("explicitObjectParameter",
+ P->isExplicitObjectParameter());
StorageClass SC = VD->getStorageClass();
if (SC != SC_None)
@@ -820,6 +908,9 @@ void JSONNodeDumper::VisitVarDecl(const VarDecl *VD) {
case VarDecl::CInit: JOS.attribute("init", "c"); break;
case VarDecl::CallInit: JOS.attribute("init", "call"); break;
case VarDecl::ListInit: JOS.attribute("init", "list"); break;
+ case VarDecl::ParenListInit:
+ JOS.attribute("init", "paren-list");
+ break;
}
}
attributeOnlyIfTrue("isParameterPack", VD->isParameterPack());
@@ -842,10 +933,11 @@ void JSONNodeDumper::VisitFunctionDecl(const FunctionDecl *FD) {
JOS.attribute("storageClass", VarDecl::getStorageClassSpecifierString(SC));
attributeOnlyIfTrue("inline", FD->isInlineSpecified());
attributeOnlyIfTrue("virtual", FD->isVirtualAsWritten());
- attributeOnlyIfTrue("pure", FD->isPure());
+ attributeOnlyIfTrue("pure", FD->isPureVirtual());
attributeOnlyIfTrue("explicitlyDeleted", FD->isDeletedAsWritten());
attributeOnlyIfTrue("constexpr", FD->isConstexpr());
attributeOnlyIfTrue("variadic", FD->isVariadic());
+ attributeOnlyIfTrue("immediate", FD->isImmediateFunction());
if (FD->isDefaulted())
JOS.attribute("explicitlyDefaulted",
@@ -886,6 +978,11 @@ void JSONNodeDumper::VisitCXXRecordDecl(const CXXRecordDecl *RD) {
}
}
+void JSONNodeDumper::VisitHLSLBufferDecl(const HLSLBufferDecl *D) {
+ VisitNamedDecl(D);
+ JOS.attribute("bufferKind", D->isCBuffer() ? "cbuffer" : "tbuffer");
+}
+
void JSONNodeDumper::VisitTemplateTypeParmDecl(const TemplateTypeParmDecl *D) {
VisitNamedDecl(D);
JOS.attribute("tagUsed", D->wasDeclaredWithTypename() ? "typename" : "class");
@@ -937,8 +1034,12 @@ void JSONNodeDumper::VisitTemplateTemplateParmDecl(
void JSONNodeDumper::VisitLinkageSpecDecl(const LinkageSpecDecl *LSD) {
StringRef Lang;
switch (LSD->getLanguage()) {
- case LinkageSpecDecl::lang_c: Lang = "C"; break;
- case LinkageSpecDecl::lang_cxx: Lang = "C++"; break;
+ case LinkageSpecLanguageIDs::C:
+ Lang = "C";
+ break;
+ case LinkageSpecLanguageIDs::CXX:
+ Lang = "C++";
+ break;
}
JOS.attribute("language", Lang);
attributeOnlyIfTrue("hasBraces", LSD->hasBraces());
@@ -1096,6 +1197,10 @@ void JSONNodeDumper::VisitBlockDecl(const BlockDecl *D) {
attributeOnlyIfTrue("capturesThis", D->capturesCXXThis());
}
+void JSONNodeDumper::VisitAtomicExpr(const AtomicExpr *AE) {
+ JOS.attribute("name", AE->getOpAsString());
+}
+
void JSONNodeDumper::VisitObjCEncodeExpr(const ObjCEncodeExpr *OEE) {
JOS.attribute("encodedType", createQualType(OEE->getEncodedType()));
}
@@ -1201,6 +1306,7 @@ void JSONNodeDumper::VisitDeclRefExpr(const DeclRefExpr *DRE) {
case NOUR_Constant: JOS.attribute("nonOdrUseReason", "constant"); break;
case NOUR_Discarded: JOS.attribute("nonOdrUseReason", "discarded"); break;
}
+ attributeOnlyIfTrue("isImmediateEscalating", DRE->isImmediateEscalating());
}
void JSONNodeDumper::VisitSYCLUniqueStableNameExpr(
@@ -1252,9 +1358,14 @@ void JSONNodeDumper::VisitCXXNewExpr(const CXXNewExpr *NE) {
attributeOnlyIfTrue("isArray", NE->isArray());
attributeOnlyIfTrue("isPlacement", NE->getNumPlacementArgs() != 0);
switch (NE->getInitializationStyle()) {
- case CXXNewExpr::NoInit: break;
- case CXXNewExpr::CallInit: JOS.attribute("initStyle", "call"); break;
- case CXXNewExpr::ListInit: JOS.attribute("initStyle", "list"); break;
+ case CXXNewInitializationStyle::None:
+ break;
+ case CXXNewInitializationStyle::Parens:
+ JOS.attribute("initStyle", "call");
+ break;
+ case CXXNewInitializationStyle::Braces:
+ JOS.attribute("initStyle", "list");
+ break;
}
if (const FunctionDecl *FD = NE->getOperatorNew())
JOS.attribute("operatorNewDecl", createBareDeclRef(FD));
@@ -1360,18 +1471,19 @@ void JSONNodeDumper::VisitCXXConstructExpr(const CXXConstructExpr *CE) {
attributeOnlyIfTrue("initializer_list", CE->isStdInitListInitialization());
attributeOnlyIfTrue("zeroing", CE->requiresZeroInitialization());
attributeOnlyIfTrue("hadMultipleCandidates", CE->hadMultipleCandidates());
+ attributeOnlyIfTrue("isImmediateEscalating", CE->isImmediateEscalating());
switch (CE->getConstructionKind()) {
- case CXXConstructExpr::CK_Complete:
+ case CXXConstructionKind::Complete:
JOS.attribute("constructionKind", "complete");
break;
- case CXXConstructExpr::CK_Delegating:
+ case CXXConstructionKind::Delegating:
JOS.attribute("constructionKind", "delegating");
break;
- case CXXConstructExpr::CK_NonVirtualBase:
+ case CXXConstructionKind::NonVirtualBase:
JOS.attribute("constructionKind", "non-virtual base");
break;
- case CXXConstructExpr::CK_VirtualBase:
+ case CXXConstructionKind::VirtualBase:
JOS.attribute("constructionKind", "virtual base");
break;
}
@@ -1489,6 +1601,8 @@ void JSONNodeDumper::VisitIfStmt(const IfStmt *IS) {
attributeOnlyIfTrue("hasVar", IS->hasVarStorage());
attributeOnlyIfTrue("hasElse", IS->hasElseStorage());
attributeOnlyIfTrue("isConstexpr", IS->isConstexpr());
+ attributeOnlyIfTrue("isConsteval", IS->isConsteval());
+ attributeOnlyIfTrue("constevalIsNegated", IS->isNegatedConsteval());
}
void JSONNodeDumper::VisitSwitchStmt(const SwitchStmt *SS) {
@@ -1572,19 +1686,19 @@ void JSONNodeDumper::visitInlineCommandComment(
JOS.attribute("name", getCommentCommandName(C->getCommandID()));
switch (C->getRenderKind()) {
- case comments::InlineCommandComment::RenderNormal:
+ case comments::InlineCommandRenderKind::Normal:
JOS.attribute("renderKind", "normal");
break;
- case comments::InlineCommandComment::RenderBold:
+ case comments::InlineCommandRenderKind::Bold:
JOS.attribute("renderKind", "bold");
break;
- case comments::InlineCommandComment::RenderEmphasized:
+ case comments::InlineCommandRenderKind::Emphasized:
JOS.attribute("renderKind", "emphasized");
break;
- case comments::InlineCommandComment::RenderMonospaced:
+ case comments::InlineCommandRenderKind::Monospaced:
JOS.attribute("renderKind", "monospaced");
break;
- case comments::InlineCommandComment::RenderAnchor:
+ case comments::InlineCommandRenderKind::Anchor:
JOS.attribute("renderKind", "anchor");
break;
}
@@ -1632,13 +1746,13 @@ void JSONNodeDumper::visitBlockCommandComment(
void JSONNodeDumper::visitParamCommandComment(
const comments::ParamCommandComment *C, const comments::FullComment *FC) {
switch (C->getDirection()) {
- case comments::ParamCommandComment::In:
+ case comments::ParamCommandPassDirection::In:
JOS.attribute("direction", "in");
break;
- case comments::ParamCommandComment::Out:
+ case comments::ParamCommandPassDirection::Out:
JOS.attribute("direction", "out");
break;
- case comments::ParamCommandComment::InOut:
+ case comments::ParamCommandPassDirection::InOut:
JOS.attribute("direction", "in,out");
break;
}
@@ -1683,3 +1797,18 @@ void JSONNodeDumper::visitVerbatimLineComment(
const comments::VerbatimLineComment *C, const comments::FullComment *) {
JOS.attribute("text", C->getText());
}
+
+llvm::json::Object JSONNodeDumper::createFPOptions(FPOptionsOverride FPO) {
+ llvm::json::Object Ret;
+#define OPTION(NAME, TYPE, WIDTH, PREVIOUS) \
+ if (FPO.has##NAME##Override()) \
+ Ret.try_emplace(#NAME, static_cast<unsigned>(FPO.get##NAME##Override()));
+#include "clang/Basic/FPOptions.def"
+ return Ret;
+}
+
+void JSONNodeDumper::VisitCompoundStmt(const CompoundStmt *S) {
+ VisitStmt(S);
+ if (S->hasStoredFPFeatures())
+ JOS.attribute("fpoptions", createFPOptions(S->getStoredFPFeatures()));
+}
diff --git a/contrib/llvm-project/clang/lib/AST/Linkage.h b/contrib/llvm-project/clang/lib/AST/Linkage.h
index cd50d138790a..31f384eb75d0 100644
--- a/contrib/llvm-project/clang/lib/AST/Linkage.h
+++ b/contrib/llvm-project/clang/lib/AST/Linkage.h
@@ -19,8 +19,8 @@
#include "clang/AST/DeclCXX.h"
#include "clang/AST/Type.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/PointerIntPair.h"
+#include <optional>
namespace clang {
/// Kinds of LV computation. The linkage side of the computation is
@@ -91,11 +91,11 @@ class LinkageComputer {
return QueryType(ND, Kind.toBits());
}
- llvm::Optional<LinkageInfo> lookup(const NamedDecl *ND,
- LVComputationKind Kind) const {
+ std::optional<LinkageInfo> lookup(const NamedDecl *ND,
+ LVComputationKind Kind) const {
auto Iter = CachedLinkageInfo.find(makeCacheKey(ND, Kind));
if (Iter == CachedLinkageInfo.end())
- return None;
+ return std::nullopt;
return Iter->second;
}
diff --git a/contrib/llvm-project/clang/lib/AST/Mangle.cpp b/contrib/llvm-project/clang/lib/AST/Mangle.cpp
index 54dbf484f377..30cff1ba2e6f 100644
--- a/contrib/llvm-project/clang/lib/AST/Mangle.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Mangle.cpp
@@ -70,11 +70,9 @@ static CCMangling getCallingConvMangling(const ASTContext &Context,
// On wasm, the argc/argv form of "main" is renamed so that the startup code
// can call it with the correct function signature.
- // On Emscripten, users may be exporting "main" and expecting to call it
- // themselves, so we can't mangle it.
- if (Triple.isWasm() && !Triple.isOSEmscripten())
+ if (Triple.isWasm())
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND))
- if (FD->isMain() && FD->hasPrototype() && FD->param_size() == 2)
+ if (FD->isMain() && FD->getNumParams() == 2)
return CCM_WasmMainArgcArgv;
if (!Triple.isOSWindows() || !Triple.isX86())
@@ -149,7 +147,7 @@ void MangleContext::mangleName(GlobalDecl GD, raw_ostream &Out) {
// If the label isn't literal, or if this is an alias for an LLVM intrinsic,
// do not add a "\01" prefix.
- if (!ALA->getIsLiteralLabel() || ALA->getLabel().startswith("llvm.")) {
+ if (!ALA->getIsLiteralLabel() || ALA->getLabel().starts_with("llvm.")) {
Out << ALA->getLabel();
return;
}
@@ -200,8 +198,12 @@ void MangleContext::mangleName(GlobalDecl GD, raw_ostream &Out) {
Out << '_';
else if (CC == CCM_Fast)
Out << '@';
- else if (CC == CCM_RegCall)
- Out << "__regcall3__";
+ else if (CC == CCM_RegCall) {
+ if (getASTContext().getLangOpts().RegCall4)
+ Out << "__regcall4__";
+ else
+ Out << "__regcall3__";
+ }
if (!MCXX)
Out << D->getIdentifier()->getName();
@@ -223,14 +225,20 @@ void MangleContext::mangleName(GlobalDecl GD, raw_ostream &Out) {
assert(!Proto->isVariadic());
unsigned ArgWords = 0;
if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
- if (!MD->isStatic())
+ if (MD->isImplicitObjectMemberFunction())
++ArgWords;
- for (const auto &AT : Proto->param_types())
+ uint64_t DefaultPtrWidth = TI.getPointerWidth(LangAS::Default);
+ for (const auto &AT : Proto->param_types()) {
+ // If an argument type is incomplete there is no way to get its size to
+ // correctly encode into the mangling scheme.
+ // Follow GCCs behaviour by simply breaking out of the loop.
+ if (AT->isIncompleteType())
+ break;
// Size should be aligned to pointer size.
- ArgWords +=
- llvm::alignTo(ASTContext.getTypeSize(AT), TI.getPointerWidth(0)) /
- TI.getPointerWidth(0);
- Out << ((TI.getPointerWidth(0) / 8) * ArgWords);
+ ArgWords += llvm::alignTo(ASTContext.getTypeSize(AT), DefaultPtrWidth) /
+ DefaultPtrWidth;
+ }
+ Out << ((DefaultPtrWidth / 8) * ArgWords);
}
void MangleContext::mangleMSGuidDecl(const MSGuidDecl *GD, raw_ostream &Out) {
@@ -456,7 +464,7 @@ public:
SmallString<40> Mangled;
auto Prefix = getClassSymbolPrefix(Kind, OCD->getASTContext());
llvm::Mangler::getNameWithPrefix(Mangled, Prefix + ClassName, DL);
- return std::string(Mangled.str());
+ return std::string(Mangled);
};
return {
diff --git a/contrib/llvm-project/clang/lib/AST/MicrosoftCXXABI.cpp b/contrib/llvm-project/clang/lib/AST/MicrosoftCXXABI.cpp
index 166aa3b3bd60..1c020c3ad4ad 100644
--- a/contrib/llvm-project/clang/lib/AST/MicrosoftCXXABI.cpp
+++ b/contrib/llvm-project/clang/lib/AST/MicrosoftCXXABI.cpp
@@ -30,14 +30,12 @@ namespace {
/// Typically these are things like static locals, lambdas, or blocks.
class MicrosoftNumberingContext : public MangleNumberingContext {
llvm::DenseMap<const Type *, unsigned> ManglingNumbers;
- unsigned LambdaManglingNumber;
- unsigned StaticLocalNumber;
- unsigned StaticThreadlocalNumber;
+ unsigned LambdaManglingNumber = 0;
+ unsigned StaticLocalNumber = 0;
+ unsigned StaticThreadlocalNumber = 0;
public:
- MicrosoftNumberingContext()
- : MangleNumberingContext(), LambdaManglingNumber(0),
- StaticLocalNumber(0), StaticThreadlocalNumber(0) {}
+ MicrosoftNumberingContext() = default;
unsigned getManglingNumber(const CXXMethodDecl *CallOperator) override {
return ++LambdaManglingNumber;
@@ -69,6 +67,7 @@ class MSHIPNumberingContext : public MicrosoftNumberingContext {
std::unique_ptr<MangleNumberingContext> DeviceCtx;
public:
+ using MicrosoftNumberingContext::getManglingNumber;
MSHIPNumberingContext(MangleContext *DeviceMangler) {
DeviceCtx = createItaniumNumberingContext(DeviceMangler);
}
@@ -76,6 +75,33 @@ public:
unsigned getDeviceManglingNumber(const CXXMethodDecl *CallOperator) override {
return DeviceCtx->getManglingNumber(CallOperator);
}
+
+ unsigned getManglingNumber(const TagDecl *TD,
+ unsigned MSLocalManglingNumber) override {
+ unsigned DeviceN = DeviceCtx->getManglingNumber(TD, MSLocalManglingNumber);
+ unsigned HostN =
+ MicrosoftNumberingContext::getManglingNumber(TD, MSLocalManglingNumber);
+ if (DeviceN > 0xFFFF || HostN > 0xFFFF) {
+ DiagnosticsEngine &Diags = TD->getASTContext().getDiagnostics();
+ unsigned DiagID = Diags.getCustomDiagID(
+ DiagnosticsEngine::Error, "Mangling number exceeds limit (65535)");
+ Diags.Report(TD->getLocation(), DiagID);
+ }
+ return (DeviceN << 16) | HostN;
+ }
+};
+
+class MSSYCLNumberingContext : public MicrosoftNumberingContext {
+ std::unique_ptr<MangleNumberingContext> DeviceCtx;
+
+public:
+ MSSYCLNumberingContext(MangleContext *DeviceMangler) {
+ DeviceCtx = createItaniumNumberingContext(DeviceMangler);
+ }
+
+ unsigned getDeviceManglingNumber(const CXXMethodDecl *CallOperator) override {
+ return DeviceCtx->getManglingNumber(CallOperator);
+ }
};
class MicrosoftCXXABI : public CXXABI {
@@ -100,6 +126,10 @@ public:
DeviceMangler.reset(
Context.createMangleContext(Context.getAuxTargetInfo()));
}
+ else if (Context.getLangOpts().isSYCL()) {
+ DeviceMangler.reset(
+ ItaniumMangleContext::create(Context, Context.getDiagnostics()));
+ }
}
MemberPointerInfo
@@ -162,7 +192,11 @@ public:
if (Context.getLangOpts().CUDA && Context.getAuxTargetInfo()) {
assert(DeviceMangler && "Missing device mangler");
return std::make_unique<MSHIPNumberingContext>(DeviceMangler.get());
+ } else if (Context.getLangOpts().isSYCL()) {
+ assert(DeviceMangler && "Missing device mangler");
+ return std::make_unique<MSSYCLNumberingContext>(DeviceMangler.get());
}
+
return std::make_unique<MicrosoftNumberingContext>();
}
};
@@ -267,7 +301,7 @@ CXXABI::MemberPointerInfo MicrosoftCXXABI::getMemberPointerInfo(
// The nominal struct is laid out with pointers followed by ints and aligned
// to a pointer width if any are present and an int width otherwise.
const TargetInfo &Target = Context.getTargetInfo();
- unsigned PtrSize = Target.getPointerWidth(0);
+ unsigned PtrSize = Target.getPointerWidth(LangAS::Default);
unsigned IntSize = Target.getIntWidth();
unsigned Ptrs, Ints;
@@ -282,7 +316,7 @@ CXXABI::MemberPointerInfo MicrosoftCXXABI::getMemberPointerInfo(
if (Ptrs + Ints > 1 && Target.getTriple().isArch32Bit())
MPI.Align = 64;
else if (Ptrs)
- MPI.Align = Target.getPointerAlign(0);
+ MPI.Align = Target.getPointerAlign(LangAS::Default);
else
MPI.Align = Target.getIntAlign();
diff --git a/contrib/llvm-project/clang/lib/AST/MicrosoftMangle.cpp b/contrib/llvm-project/clang/lib/AST/MicrosoftMangle.cpp
index d89cddd2adda..36b5bf64f675 100644
--- a/contrib/llvm-project/clang/lib/AST/MicrosoftMangle.cpp
+++ b/contrib/llvm-project/clang/lib/AST/MicrosoftMangle.cpp
@@ -21,6 +21,7 @@
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/AST/GlobalDecl.h"
#include "clang/AST/Mangle.h"
#include "clang/AST/VTableBuilder.h"
#include "clang/Basic/ABI.h"
@@ -28,17 +29,32 @@
#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/CRC.h"
#include "llvm/Support/MD5.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/StringSaver.h"
#include "llvm/Support/xxhash.h"
+#include <functional>
+#include <optional>
using namespace clang;
namespace {
+// Get GlobalDecl of DeclContext of local entities.
+static GlobalDecl getGlobalDeclAsDeclContext(const DeclContext *DC) {
+ GlobalDecl GD;
+ if (auto *CD = dyn_cast<CXXConstructorDecl>(DC))
+ GD = GlobalDecl(CD, Ctor_Complete);
+ else if (auto *DD = dyn_cast<CXXDestructorDecl>(DC))
+ GD = GlobalDecl(DD, Dtor_Complete);
+ else
+ GD = GlobalDecl(cast<FunctionDecl>(DC));
+ return GD;
+}
+
struct msvc_hashing_ostream : public llvm::raw_svector_ostream {
raw_ostream &OS;
llvm::SmallString<64> Buffer;
@@ -47,7 +63,7 @@ struct msvc_hashing_ostream : public llvm::raw_svector_ostream {
: llvm::raw_svector_ostream(Buffer), OS(OS) {}
~msvc_hashing_ostream() override {
StringRef MangledName = str();
- bool StartsWithEscape = MangledName.startswith("\01");
+ bool StartsWithEscape = MangledName.starts_with("\01");
if (StartsWithEscape)
MangledName = MangledName.drop_front(1);
if (MangledName.size() < 4096) {
@@ -129,12 +145,13 @@ class MicrosoftMangleContextImpl : public MicrosoftMangleContext {
llvm::DenseMap<DiscriminatorKeyTy, unsigned> Discriminator;
llvm::DenseMap<const NamedDecl *, unsigned> Uniquifier;
llvm::DenseMap<const CXXRecordDecl *, unsigned> LambdaIds;
- llvm::DenseMap<const NamedDecl *, unsigned> SEHFilterIds;
- llvm::DenseMap<const NamedDecl *, unsigned> SEHFinallyIds;
+ llvm::DenseMap<GlobalDecl, unsigned> SEHFilterIds;
+ llvm::DenseMap<GlobalDecl, unsigned> SEHFinallyIds;
SmallString<16> AnonymousNamespaceHash;
public:
- MicrosoftMangleContextImpl(ASTContext &Context, DiagnosticsEngine &Diags);
+ MicrosoftMangleContextImpl(ASTContext &Context, DiagnosticsEngine &Diags,
+ bool IsAux = false);
bool shouldMangleCXXName(const NamedDecl *D) override;
bool shouldMangleStringLiteral(const StringLiteral *SL) override;
void mangleCXXName(GlobalDecl GD, raw_ostream &Out) override;
@@ -165,7 +182,8 @@ public:
int32_t VBPtrOffset, uint32_t VBIndex,
raw_ostream &Out) override;
void mangleCXXRTTI(QualType T, raw_ostream &Out) override;
- void mangleCXXRTTIName(QualType T, raw_ostream &Out) override;
+ void mangleCXXRTTIName(QualType T, raw_ostream &Out,
+ bool NormalizeIntegers) override;
void mangleCXXRTTIBaseClassDescriptor(const CXXRecordDecl *Derived,
uint32_t NVOffset, int32_t VBPtrOffset,
uint32_t VBTableOffset, uint32_t Flags,
@@ -178,7 +196,8 @@ public:
mangleCXXRTTICompleteObjectLocator(const CXXRecordDecl *Derived,
ArrayRef<const CXXRecordDecl *> BasePath,
raw_ostream &Out) override;
- void mangleTypeName(QualType T, raw_ostream &) override;
+ void mangleCanonicalTypeName(QualType T, raw_ostream &,
+ bool NormalizeIntegers) override;
void mangleReferenceTemporary(const VarDecl *, unsigned ManglingNumber,
raw_ostream &) override;
void mangleStaticGuardVariable(const VarDecl *D, raw_ostream &Out) override;
@@ -187,9 +206,9 @@ public:
void mangleDynamicInitializer(const VarDecl *D, raw_ostream &Out) override;
void mangleDynamicAtExitDestructor(const VarDecl *D,
raw_ostream &Out) override;
- void mangleSEHFilterExpression(const NamedDecl *EnclosingDecl,
+ void mangleSEHFilterExpression(GlobalDecl EnclosingDecl,
raw_ostream &Out) override;
- void mangleSEHFinallyBlock(const NamedDecl *EnclosingDecl,
+ void mangleSEHFinallyBlock(GlobalDecl EnclosingDecl,
raw_ostream &Out) override;
void mangleStringLiteral(const StringLiteral *SL, raw_ostream &Out) override;
bool getNextDiscriminator(const NamedDecl *ND, unsigned &disc) {
@@ -208,7 +227,7 @@ public:
// Use the canonical number for externally visible decls.
if (ND->isExternallyVisible()) {
- disc = getASTContext().getManglingNumber(ND);
+ disc = getASTContext().getManglingNumber(ND, isAux());
return true;
}
@@ -271,12 +290,8 @@ public:
assert(!RD->isExternallyVisible() && "RD must not be visible!");
assert(RD->getLambdaManglingNumber() == 0 &&
"RD must not have a mangling number!");
- llvm::DenseMap<const CXXRecordDecl *, unsigned>::iterator Result =
- LambdaIds.find(RD);
// The lambda should exist, but return 0 in case it doesn't.
- if (Result == LambdaIds.end())
- return 0;
- return Result->second;
+ return LambdaIds.lookup(RD);
}
/// Return a character sequence that is (somewhat) unique to the TU suitable
@@ -310,8 +325,8 @@ class MicrosoftCXXNameMangler {
typedef llvm::DenseMap<const void *, StringRef> TemplateArgStringMap;
TemplateArgStringMap TemplateArgStrings;
- llvm::StringSaver TemplateArgStringStorage;
llvm::BumpPtrAllocator TemplateArgStringStorageAlloc;
+ llvm::StringSaver TemplateArgStringStorage;
typedef std::set<std::pair<int, bool>> PassObjectSizeArgsSet;
PassObjectSizeArgsSet PassObjectSizeArgs;
@@ -322,38 +337,43 @@ class MicrosoftCXXNameMangler {
public:
enum QualifierMangleMode { QMM_Drop, QMM_Mangle, QMM_Escape, QMM_Result };
+ enum class TplArgKind { ClassNTTP, StructuralValue };
MicrosoftCXXNameMangler(MicrosoftMangleContextImpl &C, raw_ostream &Out_)
: Context(C), Out(Out_), Structor(nullptr), StructorType(-1),
TemplateArgStringStorage(TemplateArgStringStorageAlloc),
- PointersAre64Bit(C.getASTContext().getTargetInfo().getPointerWidth(0) ==
- 64) {}
+ PointersAre64Bit(C.getASTContext().getTargetInfo().getPointerWidth(
+ LangAS::Default) == 64) {}
MicrosoftCXXNameMangler(MicrosoftMangleContextImpl &C, raw_ostream &Out_,
const CXXConstructorDecl *D, CXXCtorType Type)
: Context(C), Out(Out_), Structor(getStructor(D)), StructorType(Type),
TemplateArgStringStorage(TemplateArgStringStorageAlloc),
- PointersAre64Bit(C.getASTContext().getTargetInfo().getPointerWidth(0) ==
- 64) {}
+ PointersAre64Bit(C.getASTContext().getTargetInfo().getPointerWidth(
+ LangAS::Default) == 64) {}
MicrosoftCXXNameMangler(MicrosoftMangleContextImpl &C, raw_ostream &Out_,
const CXXDestructorDecl *D, CXXDtorType Type)
: Context(C), Out(Out_), Structor(getStructor(D)), StructorType(Type),
TemplateArgStringStorage(TemplateArgStringStorageAlloc),
- PointersAre64Bit(C.getASTContext().getTargetInfo().getPointerWidth(0) ==
- 64) {}
+ PointersAre64Bit(C.getASTContext().getTargetInfo().getPointerWidth(
+ LangAS::Default) == 64) {}
raw_ostream &getStream() const { return Out; }
- void mangle(const NamedDecl *D, StringRef Prefix = "?");
- void mangleName(const NamedDecl *ND);
- void mangleFunctionEncoding(const FunctionDecl *FD, bool ShouldMangle);
+ void mangle(GlobalDecl GD, StringRef Prefix = "?");
+ void mangleName(GlobalDecl GD);
+ void mangleFunctionEncoding(GlobalDecl GD, bool ShouldMangle);
void mangleVariableEncoding(const VarDecl *VD);
void mangleMemberDataPointer(const CXXRecordDecl *RD, const ValueDecl *VD,
StringRef Prefix = "$");
+ void mangleMemberDataPointerInClassNTTP(const CXXRecordDecl *,
+ const ValueDecl *);
void mangleMemberFunctionPointer(const CXXRecordDecl *RD,
const CXXMethodDecl *MD,
StringRef Prefix = "$");
+ void mangleMemberFunctionPointerInClassNTTP(const CXXRecordDecl *RD,
+ const CXXMethodDecl *MD);
void mangleVirtualMemPtrThunk(const CXXMethodDecl *MD,
const MethodVFTableLocation &ML);
void mangleNumber(int64_t Number);
@@ -362,7 +382,7 @@ public:
void mangleBits(llvm::APInt Number);
void mangleTagTypeKind(TagTypeKind TK);
void mangleArtificialTagType(TagTypeKind TK, StringRef UnqualifiedName,
- ArrayRef<StringRef> NestedNames = None);
+ ArrayRef<StringRef> NestedNames = std::nullopt);
void mangleAddressSpaceType(QualType T, Qualifiers Quals, SourceRange Range);
void mangleType(QualType T, SourceRange Range,
QualifierMangleMode QMM = QMM_Mangle);
@@ -370,7 +390,7 @@ public:
const FunctionDecl *D = nullptr,
bool ForceThisQuals = false,
bool MangleExceptionSpec = true);
- void mangleNestedName(const NamedDecl *ND);
+ void mangleNestedName(GlobalDecl GD);
private:
bool isStructorDecl(const NamedDecl *ND) const {
@@ -384,10 +404,10 @@ private:
AddrSpace == LangAS::ptr32_uptr));
}
- void mangleUnqualifiedName(const NamedDecl *ND) {
- mangleUnqualifiedName(ND, ND->getDeclName());
+ void mangleUnqualifiedName(GlobalDecl GD) {
+ mangleUnqualifiedName(GD, cast<NamedDecl>(GD.getDecl())->getDeclName());
}
- void mangleUnqualifiedName(const NamedDecl *ND, DeclarationName Name);
+ void mangleUnqualifiedName(GlobalDecl GD, DeclarationName Name);
void mangleSourceName(StringRef Name);
void mangleOperatorName(OverloadedOperatorKind OO, SourceLocation Loc);
void mangleCXXDtorType(CXXDtorType T);
@@ -396,9 +416,9 @@ private:
void manglePointerCVQualifiers(Qualifiers Quals);
void manglePointerExtQualifiers(Qualifiers Quals, QualType PointeeType);
- void mangleUnscopedTemplateName(const TemplateDecl *ND);
+ void mangleUnscopedTemplateName(GlobalDecl GD);
void
- mangleTemplateInstantiationName(const TemplateDecl *TD,
+ mangleTemplateInstantiationName(GlobalDecl GD,
const TemplateArgumentList &TemplateArgs);
void mangleObjCMethodName(const ObjCMethodDecl *MD);
@@ -434,7 +454,7 @@ private:
const TemplateArgumentList &TemplateArgs);
void mangleTemplateArg(const TemplateDecl *TD, const TemplateArgument &TA,
const NamedDecl *Parm);
- void mangleTemplateArgValue(QualType T, const APValue &V,
+ void mangleTemplateArgValue(QualType T, const APValue &V, TplArgKind,
bool WithScalarType = false);
void mangleObjCProtocol(const ObjCProtocolDecl *PD);
@@ -446,8 +466,9 @@ private:
}
MicrosoftMangleContextImpl::MicrosoftMangleContextImpl(ASTContext &Context,
- DiagnosticsEngine &Diags)
- : MicrosoftMangleContext(Context, Diags) {
+ DiagnosticsEngine &Diags,
+ bool IsAux)
+ : MicrosoftMangleContext(Context, Diags, IsAux) {
// To mangle anonymous namespaces, hash the path to the main source file. The
// path should be whatever (probably relative) path was passed on the command
// line. The goal is for the compiler to produce the same output regardless of
@@ -463,9 +484,9 @@ MicrosoftMangleContextImpl::MicrosoftMangleContextImpl(ASTContext &Context,
// The generated names are intended to look similar to what MSVC generates,
// which are something like "?A0x01234567@".
SourceManager &SM = Context.getSourceManager();
- if (const FileEntry *FE = SM.getFileEntryForID(SM.getMainFileID())) {
+ if (OptionalFileEntryRef FE = SM.getFileEntryRefForID(SM.getMainFileID())) {
// Truncate the hash so we get 8 characters of hexadecimal.
- uint32_t TruncatedHash = uint32_t(xxHash64(FE->getName()));
+ uint32_t TruncatedHash = uint32_t(xxh3_64bits(FE->getName()));
AnonymousNamespaceHash = llvm::utohexstr(TruncatedHash);
} else {
// If we don't have a path to the main file, we'll just use 0.
@@ -519,9 +540,8 @@ bool MicrosoftMangleContextImpl::shouldMangleCXXName(const NamedDecl *D) {
while (!DC->isNamespace() && !DC->isTranslationUnit())
DC = getEffectiveParentContext(DC);
- if (DC->isTranslationUnit() && D->getFormalLinkage() == InternalLinkage &&
- !isa<VarTemplateSpecializationDecl>(D) &&
- D->getIdentifier() != nullptr)
+ if (DC->isTranslationUnit() && D->getFormalLinkage() == Linkage::Internal &&
+ !isa<VarTemplateSpecializationDecl>(D) && D->getIdentifier() != nullptr)
return false;
}
@@ -533,7 +553,8 @@ MicrosoftMangleContextImpl::shouldMangleStringLiteral(const StringLiteral *SL) {
return true;
}
-void MicrosoftCXXNameMangler::mangle(const NamedDecl *D, StringRef Prefix) {
+void MicrosoftCXXNameMangler::mangle(GlobalDecl GD, StringRef Prefix) {
+ const NamedDecl *D = cast<NamedDecl>(GD.getDecl());
// MSVC doesn't mangle C++ names the same way it mangles extern "C" names.
// Therefore it's really important that we don't decorate the
// name with leading underscores or leading/trailing at signs. So, by
@@ -542,9 +563,9 @@ void MicrosoftCXXNameMangler::mangle(const NamedDecl *D, StringRef Prefix) {
// <mangled-name> ::= ? <name> <type-encoding>
Out << Prefix;
- mangleName(D);
+ mangleName(GD);
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
- mangleFunctionEncoding(FD, Context.shouldMangleDeclName(FD));
+ mangleFunctionEncoding(GD, Context.shouldMangleDeclName(FD));
else if (const VarDecl *VD = dyn_cast<VarDecl>(D))
mangleVariableEncoding(VD);
else if (isa<MSGuidDecl>(D))
@@ -558,8 +579,9 @@ void MicrosoftCXXNameMangler::mangle(const NamedDecl *D, StringRef Prefix) {
llvm_unreachable("Tried to mangle unexpected NamedDecl!");
}
-void MicrosoftCXXNameMangler::mangleFunctionEncoding(const FunctionDecl *FD,
+void MicrosoftCXXNameMangler::mangleFunctionEncoding(GlobalDecl GD,
bool ShouldMangle) {
+ const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
// <type-encoding> ::= <function-class> <function-type>
// Since MSVC operates on the type as written and not the canonical type, it
@@ -691,6 +713,28 @@ void MicrosoftCXXNameMangler::mangleMemberDataPointer(const CXXRecordDecl *RD,
mangleNumber(VBTableOffset);
}
+void MicrosoftCXXNameMangler::mangleMemberDataPointerInClassNTTP(
+ const CXXRecordDecl *RD, const ValueDecl *VD) {
+ MSInheritanceModel IM = RD->getMSInheritanceModel();
+ // <nttp-class-member-data-pointer> ::= <member-data-pointer>
+ // ::= N
+ // ::= 8 <postfix> @ <unqualified-name> @
+
+ if (IM != MSInheritanceModel::Single && IM != MSInheritanceModel::Multiple)
+ return mangleMemberDataPointer(RD, VD, "");
+
+ if (!VD) {
+ Out << 'N';
+ return;
+ }
+
+ Out << '8';
+ mangleNestedName(VD);
+ Out << '@';
+ mangleUnqualifiedName(VD);
+ Out << '@';
+}
+
void
MicrosoftCXXNameMangler::mangleMemberFunctionPointer(const CXXRecordDecl *RD,
const CXXMethodDecl *MD,
@@ -755,11 +799,39 @@ MicrosoftCXXNameMangler::mangleMemberFunctionPointer(const CXXRecordDecl *RD,
mangleNumber(VBTableOffset);
}
+void MicrosoftCXXNameMangler::mangleMemberFunctionPointerInClassNTTP(
+ const CXXRecordDecl *RD, const CXXMethodDecl *MD) {
+ // <nttp-class-member-function-pointer> ::= <member-function-pointer>
+ // ::= N
+ // ::= E? <virtual-mem-ptr-thunk>
+ // ::= E? <mangled-name> <type-encoding>
+
+ if (!MD) {
+ if (RD->getMSInheritanceModel() != MSInheritanceModel::Single)
+ return mangleMemberFunctionPointer(RD, MD, "");
+
+ Out << 'N';
+ return;
+ }
+
+ Out << "E?";
+ if (MD->isVirtual()) {
+ MicrosoftVTableContext *VTContext =
+ cast<MicrosoftVTableContext>(getASTContext().getVTableContext());
+ MethodVFTableLocation ML =
+ VTContext->getMethodVFTableLocation(GlobalDecl(MD));
+ mangleVirtualMemPtrThunk(MD, ML);
+ } else {
+ mangleName(MD);
+ mangleFunctionEncoding(MD, /*ShouldMangle=*/true);
+ }
+}
+
void MicrosoftCXXNameMangler::mangleVirtualMemPtrThunk(
const CXXMethodDecl *MD, const MethodVFTableLocation &ML) {
// Get the vftable offset.
CharUnits PointerWidth = getASTContext().toCharUnitsFromBits(
- getASTContext().getTargetInfo().getPointerWidth(0));
+ getASTContext().getTargetInfo().getPointerWidth(LangAS::Default));
uint64_t OffsetInVFTable = ML.Index * PointerWidth.getQuantity();
Out << "?_9";
@@ -770,13 +842,13 @@ void MicrosoftCXXNameMangler::mangleVirtualMemPtrThunk(
mangleCallingConvention(MD->getType()->castAs<FunctionProtoType>());
}
-void MicrosoftCXXNameMangler::mangleName(const NamedDecl *ND) {
+void MicrosoftCXXNameMangler::mangleName(GlobalDecl GD) {
// <name> ::= <unscoped-name> {[<named-scope>]+ | [<nested-name>]}? @
// Always start with the unqualified name.
- mangleUnqualifiedName(ND);
+ mangleUnqualifiedName(GD);
- mangleNestedName(ND);
+ mangleNestedName(GD);
// Terminate the whole name with an '@'.
Out << '@';
@@ -791,8 +863,8 @@ void MicrosoftCXXNameMangler::mangleNumber(llvm::APSInt Number) {
// to convert every integer to signed 64 bit before mangling (including
// unsigned 64 bit values). Do the same, but preserve bits beyond the bottom
// 64.
- llvm::APInt Value =
- Number.isSigned() ? Number.sextOrSelf(64) : Number.zextOrSelf(64);
+ unsigned Width = std::max(Number.getBitWidth(), 64U);
+ llvm::APInt Value = Number.extend(Width);
// <non-negative integer> ::= A@ # when Number == 0
// ::= <decimal digit> # when 1 <= Number <= 10
@@ -821,6 +893,13 @@ void MicrosoftCXXNameMangler::mangleFloat(llvm::APFloat Number) {
case APFloat::S_x87DoubleExtended: Out << 'X'; break;
case APFloat::S_IEEEquad: Out << 'Y'; break;
case APFloat::S_PPCDoubleDouble: Out << 'Z'; break;
+ case APFloat::S_Float8E5M2:
+ case APFloat::S_Float8E4M3FN:
+ case APFloat::S_Float8E5M2FNUZ:
+ case APFloat::S_Float8E4M3FNUZ:
+ case APFloat::S_Float8E4M3B11FNUZ:
+ case APFloat::S_FloatTF32:
+ llvm_unreachable("Tried to mangle unexpected APFloat semantics");
}
mangleBits(Number.bitcastToAPInt());
@@ -844,13 +923,14 @@ void MicrosoftCXXNameMangler::mangleBits(llvm::APInt Value) {
}
}
-static const TemplateDecl *
-isTemplate(const NamedDecl *ND, const TemplateArgumentList *&TemplateArgs) {
+static GlobalDecl isTemplate(GlobalDecl GD,
+ const TemplateArgumentList *&TemplateArgs) {
+ const NamedDecl *ND = cast<NamedDecl>(GD.getDecl());
// Check if we have a function template.
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) {
if (const TemplateDecl *TD = FD->getPrimaryTemplate()) {
TemplateArgs = FD->getTemplateSpecializationArgs();
- return TD;
+ return GD.getWithDecl(TD);
}
}
@@ -858,21 +938,22 @@ isTemplate(const NamedDecl *ND, const TemplateArgumentList *&TemplateArgs) {
if (const ClassTemplateSpecializationDecl *Spec =
dyn_cast<ClassTemplateSpecializationDecl>(ND)) {
TemplateArgs = &Spec->getTemplateArgs();
- return Spec->getSpecializedTemplate();
+ return GD.getWithDecl(Spec->getSpecializedTemplate());
}
// Check if we have a variable template.
if (const VarTemplateSpecializationDecl *Spec =
dyn_cast<VarTemplateSpecializationDecl>(ND)) {
TemplateArgs = &Spec->getTemplateArgs();
- return Spec->getSpecializedTemplate();
+ return GD.getWithDecl(Spec->getSpecializedTemplate());
}
- return nullptr;
+ return GlobalDecl();
}
-void MicrosoftCXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
+void MicrosoftCXXNameMangler::mangleUnqualifiedName(GlobalDecl GD,
DeclarationName Name) {
+ const NamedDecl *ND = cast<NamedDecl>(GD.getDecl());
// <unqualified-name> ::= <operator-name>
// ::= <ctor-dtor-name>
// ::= <source-name>
@@ -880,11 +961,11 @@ void MicrosoftCXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
// Check if we have a template.
const TemplateArgumentList *TemplateArgs = nullptr;
- if (const TemplateDecl *TD = isTemplate(ND, TemplateArgs)) {
+ if (GlobalDecl TD = isTemplate(GD, TemplateArgs)) {
// Function templates aren't considered for name back referencing. This
// makes sense since function templates aren't likely to occur multiple
// times in a symbol.
- if (isa<FunctionTemplateDecl>(TD)) {
+ if (isa<FunctionTemplateDecl>(TD.getDecl())) {
mangleTemplateInstantiationName(TD, *TemplateArgs);
Out << '@';
return;
@@ -945,7 +1026,19 @@ void MicrosoftCXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
switch (Name.getNameKind()) {
case DeclarationName::Identifier: {
if (const IdentifierInfo *II = Name.getAsIdentifierInfo()) {
- mangleSourceName(II->getName());
+ bool IsDeviceStub =
+ ND &&
+ ((isa<FunctionDecl>(ND) && ND->hasAttr<CUDAGlobalAttr>()) ||
+ (isa<FunctionTemplateDecl>(ND) &&
+ cast<FunctionTemplateDecl>(ND)
+ ->getTemplatedDecl()
+ ->hasAttr<CUDAGlobalAttr>())) &&
+ GD.getKernelReferenceKind() == KernelReferenceKind::Stub;
+ if (IsDeviceStub)
+ mangleSourceName(
+ (llvm::Twine("__device_stub__") + II->getName()).str());
+ else
+ mangleSourceName(II->getName());
break;
}
@@ -996,7 +1089,7 @@ void MicrosoftCXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
if (const auto *TPO = dyn_cast<TemplateParamObjectDecl>(ND)) {
Out << "?__N";
mangleTemplateArgValue(TPO->getType().getUnqualifiedType(),
- TPO->getValue());
+ TPO->getValue(), TplArgKind::ClassNTTP);
break;
}
@@ -1146,7 +1239,13 @@ void MicrosoftCXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
// <postfix> ::= <unqualified-name> [<postfix>]
// ::= <substitution> [<postfix>]
-void MicrosoftCXXNameMangler::mangleNestedName(const NamedDecl *ND) {
+void MicrosoftCXXNameMangler::mangleNestedName(GlobalDecl GD) {
+ const NamedDecl *ND = cast<NamedDecl>(GD.getDecl());
+
+ if (const auto *ID = dyn_cast<IndirectFieldDecl>(ND))
+ for (unsigned I = 1, IE = ID->getChainingSize(); I < IE; ++I)
+ mangleSourceName("<unnamed-tag>");
+
const DeclContext *DC = getEffectiveDeclContext(ND);
while (!DC->isTranslationUnit()) {
if (isa<TagDecl>(ND) || isa<VarDecl>(ND)) {
@@ -1214,9 +1313,9 @@ void MicrosoftCXXNameMangler::mangleNestedName(const NamedDecl *ND) {
if (PointersAre64Bit)
Out << 'E';
Out << 'A';
- mangleArtificialTagType(TTK_Struct,
- Discriminate("__block_literal", Discriminator,
- ParameterDiscriminator));
+ mangleArtificialTagType(TagTypeKind::Struct,
+ Discriminate("__block_literal", Discriminator,
+ ParameterDiscriminator));
Out << "@Z";
// If the effective context was a Record, we have fully mangled the
@@ -1229,7 +1328,7 @@ void MicrosoftCXXNameMangler::mangleNestedName(const NamedDecl *ND) {
} else if (isa<NamedDecl>(DC)) {
ND = cast<NamedDecl>(DC);
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) {
- mangle(FD, "?");
+ mangle(getGlobalDeclAsDeclContext(FD), "?");
break;
} else {
mangleUnqualifiedName(ND);
@@ -1418,7 +1517,7 @@ void MicrosoftCXXNameMangler::mangleObjCMethodName(const ObjCMethodDecl *MD) {
}
void MicrosoftCXXNameMangler::mangleTemplateInstantiationName(
- const TemplateDecl *TD, const TemplateArgumentList &TemplateArgs) {
+ GlobalDecl GD, const TemplateArgumentList &TemplateArgs) {
// <template-name> ::= <unscoped-template-name> <template-args>
// ::= <substitution>
// Always start with the unqualified name.
@@ -1433,8 +1532,8 @@ void MicrosoftCXXNameMangler::mangleTemplateInstantiationName(
TemplateArgBackReferences.swap(OuterTemplateArgsContext);
PassObjectSizeArgs.swap(OuterPassObjectSizeArgs);
- mangleUnscopedTemplateName(TD);
- mangleTemplateArgs(TD, TemplateArgs);
+ mangleUnscopedTemplateName(GD);
+ mangleTemplateArgs(cast<TemplateDecl>(GD.getDecl()), TemplateArgs);
// Restore the previous back reference contexts.
NameBackReferences.swap(OuterTemplateContext);
@@ -1443,11 +1542,10 @@ void MicrosoftCXXNameMangler::mangleTemplateInstantiationName(
PassObjectSizeArgs.swap(OuterPassObjectSizeArgs);
}
-void
-MicrosoftCXXNameMangler::mangleUnscopedTemplateName(const TemplateDecl *TD) {
+void MicrosoftCXXNameMangler::mangleUnscopedTemplateName(GlobalDecl GD) {
// <unscoped-template-name> ::= ?$ <unqualified-name>
Out << "?$";
- mangleUnqualifiedName(TD);
+ mangleUnqualifiedName(GD);
}
void MicrosoftCXXNameMangler::mangleIntegerLiteral(
@@ -1474,7 +1572,7 @@ void MicrosoftCXXNameMangler::mangleIntegerLiteral(
void MicrosoftCXXNameMangler::mangleExpression(
const Expr *E, const NonTypeTemplateParmDecl *PD) {
// See if this is a constant expression.
- if (Optional<llvm::APSInt> Value =
+ if (std::optional<llvm::APSInt> Value =
E->getIntegerConstantExpr(Context.getASTContext())) {
mangleIntegerLiteral(*Value, PD, E->getType());
return;
@@ -1507,6 +1605,22 @@ void MicrosoftCXXNameMangler::mangleTemplateArgs(
}
}
+/// If value V (with type T) represents a decayed pointer to the first element
+/// of an array, return that array.
+static ValueDecl *getAsArrayToPointerDecayedDecl(QualType T, const APValue &V) {
+ // Must be a pointer...
+ if (!T->isPointerType() || !V.isLValue() || !V.hasLValuePath() ||
+ !V.getLValueBase())
+ return nullptr;
+ // ... to element 0 of an array.
+ QualType BaseT = V.getLValueBase().getType();
+ if (!BaseT->isArrayType() || V.getLValuePath().size() != 1 ||
+ V.getLValuePath()[0].getAsArrayIndex() != 0)
+ return nullptr;
+ return const_cast<ValueDecl *>(
+ V.getLValueBase().dyn_cast<const ValueDecl *>());
+}
+
void MicrosoftCXXNameMangler::mangleTemplateArg(const TemplateDecl *TD,
const TemplateArgument &TA,
const NamedDecl *Parm) {
@@ -1530,7 +1644,6 @@ void MicrosoftCXXNameMangler::mangleTemplateArg(const TemplateDecl *TD,
// ::= 8 <class> <unqualified-name> @
// ::= A <type> <non-negative integer> # float
// ::= B <type> <non-negative integer> # double
- // ::= E <mangled-name> # reference to D
// # pointer to member, by component value
// ::= F <number> <number>
// ::= G <number> <number> <number>
@@ -1573,9 +1686,9 @@ void MicrosoftCXXNameMangler::mangleTemplateArg(const TemplateDecl *TD,
Out << "$";
auto *TPO = cast<TemplateParamObjectDecl>(ND);
mangleTemplateArgValue(TPO->getType().getUnqualifiedType(),
- TPO->getValue());
+ TPO->getValue(), TplArgKind::ClassNTTP);
} else {
- mangle(ND, TA.getParamTypeForDecl()->isReferenceType() ? "$E?" : "$1?");
+ mangle(ND, "$1?");
}
break;
}
@@ -1616,6 +1729,27 @@ void MicrosoftCXXNameMangler::mangleTemplateArg(const TemplateDecl *TD,
cast<NonTypeTemplateParmDecl>(Parm), T);
break;
}
+ case TemplateArgument::StructuralValue:
+ if (ValueDecl *D = getAsArrayToPointerDecayedDecl(
+ TA.getStructuralValueType(), TA.getAsStructuralValue())) {
+ // Mangle the result of array-to-pointer decay as if it were a reference
+ // to the original declaration, to match MSVC's behavior. This can result
+ // in mangling collisions in some cases!
+ return mangleTemplateArg(
+ TD, TemplateArgument(D, TA.getStructuralValueType()), Parm);
+ }
+ Out << "$";
+ if (cast<NonTypeTemplateParmDecl>(Parm)
+ ->getType()
+ ->getContainedDeducedType()) {
+ Out << "M";
+ mangleType(TA.getNonTypeTemplateArgumentType(), SourceRange(), QMM_Drop);
+ }
+ mangleTemplateArgValue(TA.getStructuralValueType(),
+ TA.getAsStructuralValue(),
+ TplArgKind::StructuralValue,
+ /*WithScalarType=*/false);
+ break;
case TemplateArgument::Expression:
mangleExpression(TA.getAsExpr(), cast<NonTypeTemplateParmDecl>(Parm));
break;
@@ -1658,6 +1792,7 @@ void MicrosoftCXXNameMangler::mangleTemplateArg(const TemplateDecl *TD,
void MicrosoftCXXNameMangler::mangleTemplateArgValue(QualType T,
const APValue &V,
+ TplArgKind TAK,
bool WithScalarType) {
switch (V.getKind()) {
case APValue::None:
@@ -1704,46 +1839,62 @@ void MicrosoftCXXNameMangler::mangleTemplateArgValue(QualType T,
// FIXME: This can only happen as an extension. Invent a mangling.
break;
} else if (auto *VD = Base.dyn_cast<const ValueDecl*>()) {
- Out << (T->isReferenceType() ? "E" : "1");
+ Out << "E";
mangle(VD);
} else {
break;
}
} else {
- unsigned NumAts = 0;
- if (T->isPointerType()) {
+ if (TAK == TplArgKind::ClassNTTP && T->isPointerType())
Out << "5";
- ++NumAts;
- }
- QualType T = Base.getType();
+ SmallVector<char, 2> EntryTypes;
+ SmallVector<std::function<void()>, 2> EntryManglers;
+ QualType ET = Base.getType();
for (APValue::LValuePathEntry E : V.getLValuePath()) {
- // We don't know how to mangle array subscripting yet.
- if (T->isArrayType())
- goto mangling_unknown;
+ if (auto *AT = ET->getAsArrayTypeUnsafe()) {
+ EntryTypes.push_back('C');
+ EntryManglers.push_back([this, I = E.getAsArrayIndex()] {
+ Out << '0';
+ mangleNumber(I);
+ Out << '@';
+ });
+ ET = AT->getElementType();
+ continue;
+ }
const Decl *D = E.getAsBaseOrMember().getPointer();
- auto *FD = dyn_cast<FieldDecl>(D);
- // We don't know how to mangle derived-to-base conversions yet.
- if (!FD)
- goto mangling_unknown;
-
- Out << "6";
- ++NumAts;
- T = FD->getType();
+ if (auto *FD = dyn_cast<FieldDecl>(D)) {
+ ET = FD->getType();
+ if (const auto *RD = ET->getAsRecordDecl())
+ if (RD->isAnonymousStructOrUnion())
+ continue;
+ } else {
+ ET = getASTContext().getRecordType(cast<CXXRecordDecl>(D));
+ // Bug in MSVC: fully qualified name of base class should be used for
+ // mangling to prevent collisions e.g. on base classes with same names
+ // in different namespaces.
+ }
+
+ EntryTypes.push_back('6');
+ EntryManglers.push_back([this, D] {
+ mangleUnqualifiedName(cast<NamedDecl>(D));
+ Out << '@';
+ });
}
+ for (auto I = EntryTypes.rbegin(), E = EntryTypes.rend(); I != E; ++I)
+ Out << *I;
+
auto *VD = Base.dyn_cast<const ValueDecl*>();
if (!VD)
break;
- Out << "E";
+ Out << (TAK == TplArgKind::ClassNTTP ? 'E' : '1');
mangle(VD);
- for (APValue::LValuePathEntry E : V.getLValuePath()) {
- const Decl *D = E.getAsBaseOrMember().getPointer();
- mangleUnqualifiedName(cast<FieldDecl>(D));
- }
- for (unsigned I = 0; I != NumAts; ++I)
+ for (const std::function<void()> &Mangler : EntryManglers)
+ Mangler();
+ if (TAK == TplArgKind::ClassNTTP && T->isPointerType())
Out << '@';
}
@@ -1754,20 +1905,21 @@ void MicrosoftCXXNameMangler::mangleTemplateArgValue(QualType T,
if (WithScalarType)
mangleType(T, SourceRange(), QMM_Escape);
- // FIXME: The below manglings don't include a conversion, so bail if there
- // would be one. MSVC mangles the (possibly converted) value of the
- // pointer-to-member object as if it were a struct, leading to collisions
- // in some cases.
- if (!V.getMemberPointerPath().empty())
- break;
-
const CXXRecordDecl *RD =
T->castAs<MemberPointerType>()->getMostRecentCXXRecordDecl();
const ValueDecl *D = V.getMemberPointerDecl();
- if (T->isMemberDataPointerType())
- mangleMemberDataPointer(RD, D, "");
- else
- mangleMemberFunctionPointer(RD, cast_or_null<CXXMethodDecl>(D), "");
+ if (TAK == TplArgKind::ClassNTTP) {
+ if (T->isMemberDataPointerType())
+ mangleMemberDataPointerInClassNTTP(RD, D);
+ else
+ mangleMemberFunctionPointerInClassNTTP(RD,
+ cast_or_null<CXXMethodDecl>(D));
+ } else {
+ if (T->isMemberDataPointerType())
+ mangleMemberDataPointer(RD, D, "");
+ else
+ mangleMemberFunctionPointer(RD, cast_or_null<CXXMethodDecl>(D), "");
+ }
return;
}
@@ -1779,11 +1931,11 @@ void MicrosoftCXXNameMangler::mangleTemplateArgValue(QualType T,
unsigned BaseIndex = 0;
for (const CXXBaseSpecifier &B : RD->bases())
- mangleTemplateArgValue(B.getType(), V.getStructBase(BaseIndex++));
+ mangleTemplateArgValue(B.getType(), V.getStructBase(BaseIndex++), TAK);
for (const FieldDecl *FD : RD->fields())
if (!FD->isUnnamedBitfield())
mangleTemplateArgValue(FD->getType(),
- V.getStructField(FD->getFieldIndex()),
+ V.getStructField(FD->getFieldIndex()), TAK,
/*WithScalarType*/ true);
Out << '@';
return;
@@ -1794,7 +1946,7 @@ void MicrosoftCXXNameMangler::mangleTemplateArgValue(QualType T,
mangleType(T, SourceRange(), QMM_Escape);
if (const FieldDecl *FD = V.getUnionField()) {
mangleUnqualifiedName(FD);
- mangleTemplateArgValue(FD->getType(), V.getUnionValue());
+ mangleTemplateArgValue(FD->getType(), V.getUnionValue(), TAK);
}
Out << '@';
return;
@@ -1826,7 +1978,7 @@ void MicrosoftCXXNameMangler::mangleTemplateArgValue(QualType T,
const APValue &ElemV = I < V.getArrayInitializedElts()
? V.getArrayInitializedElt(I)
: V.getArrayFiller();
- mangleTemplateArgValue(ElemT, ElemV);
+ mangleTemplateArgValue(ElemT, ElemV, TAK);
Out << '@';
}
Out << '@';
@@ -1843,7 +1995,7 @@ void MicrosoftCXXNameMangler::mangleTemplateArgValue(QualType T,
mangleType(ElemT, SourceRange(), QMM_Escape);
for (unsigned I = 0, N = V.getVectorLength(); I != N; ++I) {
const APValue &ElemV = V.getVectorElt(I);
- mangleTemplateArgValue(ElemT, ElemV);
+ mangleTemplateArgValue(ElemT, ElemV, TAK);
Out << '@';
}
Out << "@@";
@@ -1855,7 +2007,6 @@ void MicrosoftCXXNameMangler::mangleTemplateArgValue(QualType T,
break;
}
-mangling_unknown:
DiagnosticsEngine &Diags = Context.getDiags();
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error, "cannot mangle this template argument yet");
@@ -1869,9 +2020,9 @@ void MicrosoftCXXNameMangler::mangleObjCProtocol(const ObjCProtocolDecl *PD) {
Stream << "?$";
Extra.mangleSourceName("Protocol");
- Extra.mangleArtificialTagType(TTK_Struct, PD->getName());
+ Extra.mangleArtificialTagType(TagTypeKind::Struct, PD->getName());
- mangleArtificialTagType(TTK_Struct, TemplateMangling, {"__ObjC"});
+ mangleArtificialTagType(TagTypeKind::Struct, TemplateMangling, {"__ObjC"});
}
void MicrosoftCXXNameMangler::mangleObjCLifetime(const QualType Type,
@@ -1900,7 +2051,7 @@ void MicrosoftCXXNameMangler::mangleObjCLifetime(const QualType Type,
Extra.manglePointerExtQualifiers(Quals, Type);
Extra.mangleType(Type, Range);
- mangleArtificialTagType(TTK_Struct, TemplateMangling, {"__ObjC"});
+ mangleArtificialTagType(TagTypeKind::Struct, TemplateMangling, {"__ObjC"});
}
void MicrosoftCXXNameMangler::mangleObjCKindOfType(const ObjCObjectType *T,
@@ -1914,10 +2065,10 @@ void MicrosoftCXXNameMangler::mangleObjCKindOfType(const ObjCObjectType *T,
Extra.mangleSourceName("KindOf");
Extra.mangleType(QualType(T, 0)
.stripObjCKindOfType(getASTContext())
- ->getAs<ObjCObjectType>(),
+ ->castAs<ObjCObjectType>(),
Quals, Range);
- mangleArtificialTagType(TTK_Struct, TemplateMangling, {"__ObjC"});
+ mangleArtificialTagType(TagTypeKind::Struct, TemplateMangling, {"__ObjC"});
}
void MicrosoftCXXNameMangler::mangleQualifiers(Qualifiers Quals,
@@ -2118,7 +2269,8 @@ void MicrosoftCXXNameMangler::manglePassObjectSizeArg(
if (Found == FunArgBackReferences.end()) {
std::string Name =
Dynamic ? "__pass_dynamic_object_size" : "__pass_object_size";
- mangleArtificialTagType(TTK_Enum, Name + llvm::utostr(Type), {"__clang"});
+ mangleArtificialTagType(TagTypeKind::Enum, Name + llvm::utostr(Type),
+ {"__clang"});
if (FunArgBackReferences.size() < 10) {
size_t Size = FunArgBackReferences.size();
@@ -2199,7 +2351,7 @@ void MicrosoftCXXNameMangler::mangleAddressSpaceType(QualType T,
Extra.mangleType(T, Range, QMM_Escape);
mangleQualifiers(Qualifiers(), false);
- mangleArtificialTagType(TTK_Struct, ASMangling, {"__clang"});
+ mangleArtificialTagType(TagTypeKind::Struct, ASMangling, {"__clang"});
}
void MicrosoftCXXNameMangler::mangleType(QualType T, SourceRange Range,
@@ -2381,13 +2533,13 @@ void MicrosoftCXXNameMangler::mangleType(const BuiltinType *T, Qualifiers,
llvm_unreachable("placeholder types shouldn't get to name mangling");
case BuiltinType::ObjCId:
- mangleArtificialTagType(TTK_Struct, "objc_object");
+ mangleArtificialTagType(TagTypeKind::Struct, "objc_object");
break;
case BuiltinType::ObjCClass:
- mangleArtificialTagType(TTK_Struct, "objc_class");
+ mangleArtificialTagType(TagTypeKind::Struct, "objc_class");
break;
case BuiltinType::ObjCSel:
- mangleArtificialTagType(TTK_Struct, "objc_selector");
+ mangleArtificialTagType(TagTypeKind::Struct, "objc_selector");
break;
#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
@@ -2397,27 +2549,27 @@ void MicrosoftCXXNameMangler::mangleType(const BuiltinType *T, Qualifiers,
#include "clang/Basic/OpenCLImageTypes.def"
case BuiltinType::OCLSampler:
Out << "PA";
- mangleArtificialTagType(TTK_Struct, "ocl_sampler");
+ mangleArtificialTagType(TagTypeKind::Struct, "ocl_sampler");
break;
case BuiltinType::OCLEvent:
Out << "PA";
- mangleArtificialTagType(TTK_Struct, "ocl_event");
+ mangleArtificialTagType(TagTypeKind::Struct, "ocl_event");
break;
case BuiltinType::OCLClkEvent:
Out << "PA";
- mangleArtificialTagType(TTK_Struct, "ocl_clkevent");
+ mangleArtificialTagType(TagTypeKind::Struct, "ocl_clkevent");
break;
case BuiltinType::OCLQueue:
Out << "PA";
- mangleArtificialTagType(TTK_Struct, "ocl_queue");
+ mangleArtificialTagType(TagTypeKind::Struct, "ocl_queue");
break;
case BuiltinType::OCLReserveID:
Out << "PA";
- mangleArtificialTagType(TTK_Struct, "ocl_reserveid");
+ mangleArtificialTagType(TagTypeKind::Struct, "ocl_reserveid");
break;
-#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
- case BuiltinType::Id: \
- mangleArtificialTagType(TTK_Struct, "ocl_" #ExtType); \
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
+ case BuiltinType::Id: \
+ mangleArtificialTagType(TagTypeKind::Struct, "ocl_" #ExtType); \
break;
#include "clang/Basic/OpenCLExtensionTypes.def"
@@ -2426,13 +2578,29 @@ void MicrosoftCXXNameMangler::mangleType(const BuiltinType *T, Qualifiers,
break;
case BuiltinType::Float16:
- mangleArtificialTagType(TTK_Struct, "_Float16", {"__clang"});
+ mangleArtificialTagType(TagTypeKind::Struct, "_Float16", {"__clang"});
break;
case BuiltinType::Half:
- mangleArtificialTagType(TTK_Struct, "_Half", {"__clang"});
+ if (!getASTContext().getLangOpts().HLSL)
+ mangleArtificialTagType(TagTypeKind::Struct, "_Half", {"__clang"});
+ else if (getASTContext().getLangOpts().NativeHalfType)
+ Out << "$f16@";
+ else
+ Out << "$halff@";
break;
+ case BuiltinType::BFloat16:
+ mangleArtificialTagType(TagTypeKind::Struct, "__bf16", {"__clang"});
+ break;
+
+#define WASM_REF_TYPE(InternalName, MangledName, Id, SingletonId, AS) \
+ case BuiltinType::Id: \
+ mangleArtificialTagType(TagTypeKind::Struct, MangledName); \
+ mangleArtificialTagType(TagTypeKind::Struct, MangledName, {"__clang"}); \
+ break;
+
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
#define SVE_TYPE(Name, Id, SingletonId) \
case BuiltinType::Id:
#include "clang/Basic/AArch64SVEACLETypes.def"
@@ -2465,7 +2633,7 @@ void MicrosoftCXXNameMangler::mangleType(const BuiltinType *T, Qualifiers,
case BuiltinType::SatUShortFract:
case BuiltinType::SatUFract:
case BuiltinType::SatULongFract:
- case BuiltinType::BFloat16:
+ case BuiltinType::Ibm128:
case BuiltinType::Float128: {
DiagnosticsEngine &Diags = Context.getDiags();
unsigned DiagID = Diags.getCustomDiagID(
@@ -2514,7 +2682,7 @@ void MicrosoftCXXNameMangler::mangleFunctionType(const FunctionType *T,
if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(D)) {
if (MD->getParent()->isLambda())
IsInLambda = true;
- if (MD->isInstance())
+ if (MD->isImplicitObjectMemberFunction())
HasThisQuals = true;
if (isa<CXXDestructorDecl>(MD)) {
IsStructor = true;
@@ -2568,7 +2736,7 @@ void MicrosoftCXXNameMangler::mangleFunctionType(const FunctionType *T,
// Copy constructor closure always takes an unqualified reference.
mangleFunctionArgumentType(getASTContext().getLValueReferenceType(
Proto->getParamType(0)
- ->getAs<LValueReferenceType>()
+ ->castAs<LValueReferenceType>()
->getPointeeType(),
/*SpelledAsLValue=*/true),
Range);
@@ -2623,6 +2791,10 @@ void MicrosoftCXXNameMangler::mangleFunctionType(const FunctionType *T,
} else {
// Happens for function pointer type arguments for example.
for (unsigned I = 0, E = Proto->getNumParams(); I != E; ++I) {
+ // Explicit object parameters are prefixed by "_V".
+ if (I == 0 && D && D->getParamDecl(I)->isExplicitObjectParameter())
+ Out << "_V";
+
mangleFunctionArgumentType(Proto->getParamType(I), Range);
// Mangle each pass_object_size parameter as if it's a parameter of enum
// type passed directly after the parameter with the pass_object_size
@@ -2688,7 +2860,7 @@ void MicrosoftCXXNameMangler::mangleFunctionClass(const FunctionDecl *FD) {
case AS_none:
llvm_unreachable("Unsupported access specifier");
case AS_private:
- if (MD->isStatic())
+ if (!MD->isImplicitObjectMemberFunction())
Out << 'C';
else if (IsVirtual)
Out << 'E';
@@ -2696,7 +2868,7 @@ void MicrosoftCXXNameMangler::mangleFunctionClass(const FunctionDecl *FD) {
Out << 'A';
break;
case AS_protected:
- if (MD->isStatic())
+ if (!MD->isImplicitObjectMemberFunction())
Out << 'K';
else if (IsVirtual)
Out << 'M';
@@ -2704,7 +2876,7 @@ void MicrosoftCXXNameMangler::mangleFunctionClass(const FunctionDecl *FD) {
Out << 'I';
break;
case AS_public:
- if (MD->isStatic())
+ if (!MD->isImplicitObjectMemberFunction())
Out << 'S';
else if (IsVirtual)
Out << 'U';
@@ -2731,6 +2903,7 @@ void MicrosoftCXXNameMangler::mangleCallingConvention(CallingConv CC) {
// ::= T # __attribute__((__swiftasynccall__))
// // Clang-only
// ::= w # __regcall
+ // ::= x # __regcall4
// The 'export' calling conventions are from a bygone era
// (*cough*Win16*cough*) when functions were declared for export with
// that keyword. (It didn't actually export them, it just made them so
@@ -2751,7 +2924,12 @@ void MicrosoftCXXNameMangler::mangleCallingConvention(CallingConv CC) {
case CC_Swift: Out << 'S'; break;
case CC_SwiftAsync: Out << 'W'; break;
case CC_PreserveMost: Out << 'U'; break;
- case CC_X86RegCall: Out << 'w'; break;
+ case CC_X86RegCall:
+ if (getASTContext().getLangOpts().RegCall4)
+ Out << "x";
+ else
+ Out << "w";
+ break;
}
}
void MicrosoftCXXNameMangler::mangleCallingConvention(const FunctionType *T) {
@@ -2786,19 +2964,19 @@ void MicrosoftCXXNameMangler::mangleType(const UnresolvedUsingType *T,
// <enum-type> ::= W4 <name>
void MicrosoftCXXNameMangler::mangleTagTypeKind(TagTypeKind TTK) {
switch (TTK) {
- case TTK_Union:
- Out << 'T';
- break;
- case TTK_Struct:
- case TTK_Interface:
- Out << 'U';
- break;
- case TTK_Class:
- Out << 'V';
- break;
- case TTK_Enum:
- Out << "W4";
- break;
+ case TagTypeKind::Union:
+ Out << 'T';
+ break;
+ case TagTypeKind::Struct:
+ case TagTypeKind::Interface:
+ Out << 'U';
+ break;
+ case TagTypeKind::Class:
+ Out << 'V';
+ break;
+ case TagTypeKind::Enum:
+ Out << "W4";
+ break;
}
}
void MicrosoftCXXNameMangler::mangleType(const EnumType *T, Qualifiers,
@@ -2824,8 +3002,8 @@ void MicrosoftCXXNameMangler::mangleArtificialTagType(
// Always start with the unqualified name.
mangleSourceName(UnqualifiedName);
- for (auto I = NestedNames.rbegin(), E = NestedNames.rend(); I != E; ++I)
- mangleSourceName(*I);
+ for (StringRef N : llvm::reverse(NestedNames))
+ mangleSourceName(N);
// Terminate the whole name with an '@'.
Out << '@';
@@ -3008,11 +3186,11 @@ void MicrosoftCXXNameMangler::mangleType(const ComplexType *T, Qualifiers,
Extra.mangleSourceName("_Complex");
Extra.mangleType(ElementType, Range, QMM_Escape);
- mangleArtificialTagType(TTK_Struct, TemplateMangling, {"__clang"});
+ mangleArtificialTagType(TagTypeKind::Struct, TemplateMangling, {"__clang"});
}
// Returns true for types that mangleArtificialTagType() gets called for with
-// TTK_Union, TTK_Struct, TTK_Class and where compatibility with MSVC's
+// TagTypeKind Union, Struct, Class and where compatibility with MSVC's
// mangling matters.
// (It doesn't matter for Objective-C types and the like that cl.exe doesn't
// support.)
@@ -3033,23 +3211,29 @@ bool MicrosoftCXXNameMangler::isArtificialTagType(QualType T) const {
void MicrosoftCXXNameMangler::mangleType(const VectorType *T, Qualifiers Quals,
SourceRange Range) {
- const BuiltinType *ET = T->getElementType()->getAs<BuiltinType>();
- assert(ET && "vectors with non-builtin elements are unsupported");
+ QualType EltTy = T->getElementType();
+ const BuiltinType *ET = EltTy->getAs<BuiltinType>();
+ const BitIntType *BitIntTy = EltTy->getAs<BitIntType>();
+ assert((ET || BitIntTy) &&
+ "vectors with non-builtin/_BitInt elements are unsupported");
uint64_t Width = getASTContext().getTypeSize(T);
// Pattern match exactly the typedefs in our intrinsic headers. Anything that
// doesn't match the Intel types uses a custom mangling below.
size_t OutSizeBefore = Out.tell();
if (!isa<ExtVectorType>(T)) {
- if (getASTContext().getTargetInfo().getTriple().isX86()) {
+ if (getASTContext().getTargetInfo().getTriple().isX86() && ET) {
if (Width == 64 && ET->getKind() == BuiltinType::LongLong) {
- mangleArtificialTagType(TTK_Union, "__m64");
+ mangleArtificialTagType(TagTypeKind::Union, "__m64");
} else if (Width >= 128) {
if (ET->getKind() == BuiltinType::Float)
- mangleArtificialTagType(TTK_Union, "__m" + llvm::utostr(Width));
+ mangleArtificialTagType(TagTypeKind::Union,
+ "__m" + llvm::utostr(Width));
else if (ET->getKind() == BuiltinType::LongLong)
- mangleArtificialTagType(TTK_Union, "__m" + llvm::utostr(Width) + 'i');
+ mangleArtificialTagType(TagTypeKind::Union,
+ "__m" + llvm::utostr(Width) + 'i');
else if (ET->getKind() == BuiltinType::Double)
- mangleArtificialTagType(TTK_Struct, "__m" + llvm::utostr(Width) + 'd');
+ mangleArtificialTagType(TagTypeKind::Struct,
+ "__m" + llvm::utostr(Width) + 'd');
}
}
}
@@ -3065,10 +3249,11 @@ void MicrosoftCXXNameMangler::mangleType(const VectorType *T, Qualifiers Quals,
MicrosoftCXXNameMangler Extra(Context, Stream);
Stream << "?$";
Extra.mangleSourceName("__vector");
- Extra.mangleType(QualType(ET, 0), Range, QMM_Escape);
+ Extra.mangleType(QualType(ET ? static_cast<const Type *>(ET) : BitIntTy, 0),
+ Range, QMM_Escape);
Extra.mangleIntegerLiteral(llvm::APSInt::getUnsigned(T->getNumElements()));
- mangleArtificialTagType(TTK_Union, TemplateMangling, {"__clang"});
+ mangleArtificialTagType(TagTypeKind::Union, TemplateMangling, {"__clang"});
}
}
@@ -3124,7 +3309,7 @@ void MicrosoftCXXNameMangler::mangleType(const DependentAddressSpaceType *T,
void MicrosoftCXXNameMangler::mangleType(const ObjCInterfaceType *T, Qualifiers,
SourceRange) {
// ObjC interfaces have structs underlying them.
- mangleTagTypeKind(TTK_Struct);
+ mangleTagTypeKind(TagTypeKind::Struct);
mangleName(T->getDecl());
}
@@ -3144,7 +3329,7 @@ void MicrosoftCXXNameMangler::mangleType(const ObjCObjectType *T,
TemplateArgBackReferences.swap(OuterTemplateArgsContext);
NameBackReferences.swap(OuterTemplateContext);
- mangleTagTypeKind(TTK_Struct);
+ mangleTagTypeKind(TagTypeKind::Struct);
Out << "?$";
if (T->isObjCId())
@@ -3292,7 +3477,7 @@ void MicrosoftCXXNameMangler::mangleType(const AtomicType *T, Qualifiers,
Extra.mangleSourceName("_Atomic");
Extra.mangleType(ValueType, Range, QMM_Escape);
- mangleArtificialTagType(TTK_Struct, TemplateMangling, {"__clang"});
+ mangleArtificialTagType(TagTypeKind::Struct, TemplateMangling, {"__clang"});
}
void MicrosoftCXXNameMangler::mangleType(const PipeType *T, Qualifiers,
@@ -3307,7 +3492,7 @@ void MicrosoftCXXNameMangler::mangleType(const PipeType *T, Qualifiers,
Extra.mangleType(ElementType, Range, QMM_Escape);
Extra.mangleIntegerLiteral(llvm::APSInt::get(T->isReadOnly()));
- mangleArtificialTagType(TTK_Struct, TemplateMangling, {"__clang"});
+ mangleArtificialTagType(TagTypeKind::Struct, TemplateMangling, {"__clang"});
}
void MicrosoftMangleContextImpl::mangleCXXName(GlobalDecl GD,
@@ -3322,39 +3507,39 @@ void MicrosoftMangleContextImpl::mangleCXXName(GlobalDecl GD,
if (auto *CD = dyn_cast<CXXConstructorDecl>(D)) {
auto Type = GD.getCtorType();
MicrosoftCXXNameMangler mangler(*this, MHO, CD, Type);
- return mangler.mangle(D);
+ return mangler.mangle(GD);
}
if (auto *DD = dyn_cast<CXXDestructorDecl>(D)) {
auto Type = GD.getDtorType();
MicrosoftCXXNameMangler mangler(*this, MHO, DD, Type);
- return mangler.mangle(D);
+ return mangler.mangle(GD);
}
MicrosoftCXXNameMangler Mangler(*this, MHO);
- return Mangler.mangle(D);
+ return Mangler.mangle(GD);
}
-void MicrosoftCXXNameMangler::mangleType(const ExtIntType *T, Qualifiers,
+void MicrosoftCXXNameMangler::mangleType(const BitIntType *T, Qualifiers,
SourceRange Range) {
llvm::SmallString<64> TemplateMangling;
llvm::raw_svector_ostream Stream(TemplateMangling);
MicrosoftCXXNameMangler Extra(Context, Stream);
Stream << "?$";
if (T->isUnsigned())
- Extra.mangleSourceName("_UExtInt");
+ Extra.mangleSourceName("_UBitInt");
else
- Extra.mangleSourceName("_ExtInt");
+ Extra.mangleSourceName("_BitInt");
Extra.mangleIntegerLiteral(llvm::APSInt::getUnsigned(T->getNumBits()));
- mangleArtificialTagType(TTK_Struct, TemplateMangling, {"__clang"});
+ mangleArtificialTagType(TagTypeKind::Struct, TemplateMangling, {"__clang"});
}
-void MicrosoftCXXNameMangler::mangleType(const DependentExtIntType *T,
+void MicrosoftCXXNameMangler::mangleType(const DependentBitIntType *T,
Qualifiers, SourceRange Range) {
DiagnosticsEngine &Diags = Context.getDiags();
unsigned DiagID = Diags.getCustomDiagID(
- DiagnosticsEngine::Error, "cannot mangle this DependentExtInt type yet");
+ DiagnosticsEngine::Error, "cannot mangle this DependentBitInt type yet");
Diags.Report(Range.getBegin(), DiagID) << Range;
}
@@ -3538,8 +3723,8 @@ void MicrosoftMangleContextImpl::mangleCXXRTTI(QualType T, raw_ostream &Out) {
Mangler.getStream() << "@8";
}
-void MicrosoftMangleContextImpl::mangleCXXRTTIName(QualType T,
- raw_ostream &Out) {
+void MicrosoftMangleContextImpl::mangleCXXRTTIName(
+ QualType T, raw_ostream &Out, bool NormalizeIntegers = false) {
MicrosoftCXXNameMangler Mangler(*this, Out);
Mangler.getStream() << '.';
Mangler.mangleType(T, SourceRange(), MicrosoftCXXNameMangler::QMM_Result);
@@ -3602,7 +3787,7 @@ void MicrosoftMangleContextImpl::mangleCXXCatchableType(
// FIXME: It is known that the Ctor is present in 2013, and in 2017.7
// (_MSC_VER 1914) and newer, and that it's omitted in 2015 and 2017.4
// (_MSC_VER 1911), but it's unknown when exactly it reappeared (1914?
- // Or 1912, 1913 aleady?).
+ // Or 1912, 1913 already?).
bool OmitCopyCtor = getASTContext().getLangOpts().isCompatibleWithMSVC(
LangOptions::MSVC2015) &&
!getASTContext().getLangOpts().isCompatibleWithMSVC(
@@ -3670,20 +3855,20 @@ void MicrosoftMangleContextImpl::mangleCXXRTTICompleteObjectLocator(
llvm::raw_svector_ostream Stream(VFTableMangling);
mangleCXXVFTable(Derived, BasePath, Stream);
- if (VFTableMangling.startswith("??@")) {
- assert(VFTableMangling.endswith("@"));
+ if (VFTableMangling.starts_with("??@")) {
+ assert(VFTableMangling.ends_with("@"));
Out << VFTableMangling << "??_R4@";
return;
}
- assert(VFTableMangling.startswith("??_7") ||
- VFTableMangling.startswith("??_S"));
+ assert(VFTableMangling.starts_with("??_7") ||
+ VFTableMangling.starts_with("??_S"));
Out << "??_R4" << VFTableMangling.str().drop_front(4);
}
void MicrosoftMangleContextImpl::mangleSEHFilterExpression(
- const NamedDecl *EnclosingDecl, raw_ostream &Out) {
+ GlobalDecl EnclosingDecl, raw_ostream &Out) {
msvc_hashing_ostream MHO(Out);
MicrosoftCXXNameMangler Mangler(*this, MHO);
// The function body is in the same comdat as the function with the handler,
@@ -3695,7 +3880,7 @@ void MicrosoftMangleContextImpl::mangleSEHFilterExpression(
}
void MicrosoftMangleContextImpl::mangleSEHFinallyBlock(
- const NamedDecl *EnclosingDecl, raw_ostream &Out) {
+ GlobalDecl EnclosingDecl, raw_ostream &Out) {
msvc_hashing_ostream MHO(Out);
MicrosoftCXXNameMangler Mangler(*this, MHO);
// The function body is in the same comdat as the function with the handler,
@@ -3706,12 +3891,13 @@ void MicrosoftMangleContextImpl::mangleSEHFinallyBlock(
Mangler.mangleName(EnclosingDecl);
}
-void MicrosoftMangleContextImpl::mangleTypeName(QualType T, raw_ostream &Out) {
+void MicrosoftMangleContextImpl::mangleCanonicalTypeName(
+ QualType T, raw_ostream &Out, bool NormalizeIntegers = false) {
// This is just a made up unique string for the purposes of tbaa. undname
// does *not* know how to demangle it.
MicrosoftCXXNameMangler Mangler(*this, Out);
Mangler.getStream() << '?';
- Mangler.mangleType(T, SourceRange());
+ Mangler.mangleType(T.getCanonicalType(), SourceRange());
}
void MicrosoftMangleContextImpl::mangleReferenceTemporary(
@@ -3883,7 +4069,7 @@ void MicrosoftMangleContextImpl::mangleStringLiteral(const StringLiteral *SL,
// - ?[A-Z]: The range from \xc1 to \xda.
// - ?[0-9]: The set of [,/\:. \n\t'-].
// - ?$XX: A fallback which maps nibbles.
- if (isIdentifierBody(Byte, /*AllowDollar=*/true)) {
+ if (isAsciiIdentifierContinue(Byte, /*AllowDollar=*/true)) {
Mangler.getStream() << Byte;
} else if (isLetter(Byte & 0x7f)) {
Mangler.getStream() << '?' << static_cast<char>(Byte & 0x7f);
@@ -3914,7 +4100,8 @@ void MicrosoftMangleContextImpl::mangleStringLiteral(const StringLiteral *SL,
Mangler.getStream() << '@';
}
-MicrosoftMangleContext *
-MicrosoftMangleContext::create(ASTContext &Context, DiagnosticsEngine &Diags) {
- return new MicrosoftMangleContextImpl(Context, Diags);
+MicrosoftMangleContext *MicrosoftMangleContext::create(ASTContext &Context,
+ DiagnosticsEngine &Diags,
+ bool IsAux) {
+ return new MicrosoftMangleContextImpl(Context, Diags, IsAux);
}
diff --git a/contrib/llvm-project/clang/lib/AST/NSAPI.cpp b/contrib/llvm-project/clang/lib/AST/NSAPI.cpp
index 861060d7c875..86dee540e9e2 100644
--- a/contrib/llvm-project/clang/lib/AST/NSAPI.cpp
+++ b/contrib/llvm-project/clang/lib/AST/NSAPI.cpp
@@ -11,6 +11,7 @@
#include "clang/AST/DeclObjC.h"
#include "clang/AST/Expr.h"
#include "llvm/ADT/StringSwitch.h"
+#include <optional>
using namespace clang;
@@ -142,14 +143,15 @@ Selector NSAPI::getNSArraySelector(NSArrayMethodKind MK) const {
return NSArraySelectors[MK];
}
-Optional<NSAPI::NSArrayMethodKind> NSAPI::getNSArrayMethodKind(Selector Sel) {
+std::optional<NSAPI::NSArrayMethodKind>
+NSAPI::getNSArrayMethodKind(Selector Sel) {
for (unsigned i = 0; i != NumNSArrayMethods; ++i) {
NSArrayMethodKind MK = NSArrayMethodKind(i);
if (Sel == getNSArraySelector(MK))
return MK;
}
- return None;
+ return std::nullopt;
}
Selector NSAPI::getNSDictionarySelector(
@@ -243,7 +245,7 @@ Selector NSAPI::getNSDictionarySelector(
return NSDictionarySelectors[MK];
}
-Optional<NSAPI::NSDictionaryMethodKind>
+std::optional<NSAPI::NSDictionaryMethodKind>
NSAPI::getNSDictionaryMethodKind(Selector Sel) {
for (unsigned i = 0; i != NumNSDictionaryMethods; ++i) {
NSDictionaryMethodKind MK = NSDictionaryMethodKind(i);
@@ -251,7 +253,7 @@ NSAPI::getNSDictionaryMethodKind(Selector Sel) {
return MK;
}
- return None;
+ return std::nullopt;
}
Selector NSAPI::getNSSetSelector(NSSetMethodKind MK) const {
@@ -300,15 +302,14 @@ Selector NSAPI::getNSSetSelector(NSSetMethodKind MK) const {
return NSSetSelectors[MK];
}
-Optional<NSAPI::NSSetMethodKind>
-NSAPI::getNSSetMethodKind(Selector Sel) {
+std::optional<NSAPI::NSSetMethodKind> NSAPI::getNSSetMethodKind(Selector Sel) {
for (unsigned i = 0; i != NumNSSetMethods; ++i) {
NSSetMethodKind MK = NSSetMethodKind(i);
if (Sel == getNSSetSelector(MK))
return MK;
}
- return None;
+ return std::nullopt;
}
Selector NSAPI::getNSNumberLiteralSelector(NSNumberLiteralMethodKind MK,
@@ -363,7 +364,7 @@ Selector NSAPI::getNSNumberLiteralSelector(NSNumberLiteralMethodKind MK,
return Sels[MK];
}
-Optional<NSAPI::NSNumberLiteralMethodKind>
+std::optional<NSAPI::NSNumberLiteralMethodKind>
NSAPI::getNSNumberLiteralMethodKind(Selector Sel) const {
for (unsigned i = 0; i != NumNSNumberLiteralMethods; ++i) {
NSNumberLiteralMethodKind MK = NSNumberLiteralMethodKind(i);
@@ -371,14 +372,14 @@ NSAPI::getNSNumberLiteralMethodKind(Selector Sel) const {
return MK;
}
- return None;
+ return std::nullopt;
}
-Optional<NSAPI::NSNumberLiteralMethodKind>
+std::optional<NSAPI::NSNumberLiteralMethodKind>
NSAPI::getNSNumberFactoryMethodKind(QualType T) const {
const BuiltinType *BT = T->getAs<BuiltinType>();
if (!BT)
- return None;
+ return std::nullopt;
const TypedefType *TDT = T->getAs<TypedefType>();
if (TDT) {
@@ -456,6 +457,7 @@ NSAPI::getNSNumberFactoryMethodKind(QualType T) const {
case BuiltinType::UInt128:
case BuiltinType::Float16:
case BuiltinType::Float128:
+ case BuiltinType::Ibm128:
case BuiltinType::NullPtr:
case BuiltinType::ObjCClass:
case BuiltinType::ObjCId:
@@ -479,6 +481,8 @@ NSAPI::getNSNumberFactoryMethodKind(QualType T) const {
#include "clang/Basic/PPCTypes.def"
#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
#include "clang/Basic/RISCVVTypes.def"
+#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
case BuiltinType::BoundMember:
case BuiltinType::Dependent:
case BuiltinType::Overload:
@@ -495,7 +499,7 @@ NSAPI::getNSNumberFactoryMethodKind(QualType T) const {
break;
}
- return None;
+ return std::nullopt;
}
/// Returns true if \param T is a typedef of "BOOL" in objective-c.
diff --git a/contrib/llvm-project/clang/lib/AST/NestedNameSpecifier.cpp b/contrib/llvm-project/clang/lib/AST/NestedNameSpecifier.cpp
index 21afdd1570f4..36f2c47b3000 100644
--- a/contrib/llvm-project/clang/lib/AST/NestedNameSpecifier.cpp
+++ b/contrib/llvm-project/clang/lib/AST/NestedNameSpecifier.cpp
@@ -280,14 +280,14 @@ void NestedNameSpecifier::print(raw_ostream &OS, const PrintingPolicy &Policy,
case TypeSpecWithTemplate:
OS << "template ";
// Fall through to print the type.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case TypeSpec: {
const auto *Record =
dyn_cast_or_null<ClassTemplateSpecializationDecl>(getAsRecordDecl());
if (ResolveTemplateArguments && Record) {
// Print the type trait with resolved template parameters.
- Record->printName(OS);
+ Record->printName(OS, Policy);
printTemplateArgumentList(
OS, Record->getTemplateArgs().asArray(), Policy,
Record->getSpecializedTemplate()->getTemplateParameters());
@@ -311,7 +311,8 @@ void NestedNameSpecifier::print(raw_ostream &OS, const PrintingPolicy &Policy,
= dyn_cast<TemplateSpecializationType>(T)) {
// Print the template name without its corresponding
// nested-name-specifier.
- SpecType->getTemplateName().print(OS, InnerPolicy, true);
+ SpecType->getTemplateName().print(OS, InnerPolicy,
+ TemplateName::Qualified::None);
// Print the template argument list.
printTemplateArgumentList(OS, SpecType->template_arguments(),
diff --git a/contrib/llvm-project/clang/lib/AST/ODRDiagsEmitter.cpp b/contrib/llvm-project/clang/lib/AST/ODRDiagsEmitter.cpp
new file mode 100644
index 000000000000..5b1cdc16e2ea
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/AST/ODRDiagsEmitter.cpp
@@ -0,0 +1,2213 @@
+//===-- ODRDiagsEmitter.cpp - Diagnostics for ODR mismatches ----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ODRDiagsEmitter.h"
+#include "clang/AST/DeclFriend.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/ODRHash.h"
+#include "clang/Basic/DiagnosticAST.h"
+#include "clang/Basic/Module.h"
+
+using namespace clang;
+
+static unsigned computeODRHash(QualType Ty) {
+ ODRHash Hasher;
+ Hasher.AddQualType(Ty);
+ return Hasher.CalculateHash();
+}
+
+static unsigned computeODRHash(const Stmt *S) {
+ ODRHash Hasher;
+ Hasher.AddStmt(S);
+ return Hasher.CalculateHash();
+}
+
+static unsigned computeODRHash(const Decl *D) {
+ assert(D);
+ ODRHash Hasher;
+ Hasher.AddSubDecl(D);
+ return Hasher.CalculateHash();
+}
+
+static unsigned computeODRHash(const TemplateArgument &TA) {
+ ODRHash Hasher;
+ Hasher.AddTemplateArgument(TA);
+ return Hasher.CalculateHash();
+}
+
+std::string ODRDiagsEmitter::getOwningModuleNameForDiagnostic(const Decl *D) {
+ // If we know the owning module, use it.
+ if (Module *M = D->getImportedOwningModule())
+ return M->getFullModuleName();
+
+ // Not from a module.
+ return {};
+}
+
+template <typename MethodT>
+static bool diagnoseSubMismatchMethodParameters(DiagnosticsEngine &Diags,
+ const NamedDecl *FirstContainer,
+ StringRef FirstModule,
+ StringRef SecondModule,
+ const MethodT *FirstMethod,
+ const MethodT *SecondMethod) {
+ enum DiagMethodType {
+ DiagMethod,
+ DiagConstructor,
+ DiagDestructor,
+ };
+ auto GetDiagMethodType = [](const NamedDecl *D) {
+ if (isa<CXXConstructorDecl>(D))
+ return DiagConstructor;
+ if (isa<CXXDestructorDecl>(D))
+ return DiagDestructor;
+ return DiagMethod;
+ };
+
+ enum ODRMethodParametersDifference {
+ NumberParameters,
+ ParameterType,
+ ParameterName,
+ };
+ auto DiagError = [&Diags, &GetDiagMethodType, FirstContainer, FirstModule,
+ FirstMethod](ODRMethodParametersDifference DiffType) {
+ DeclarationName FirstName = FirstMethod->getDeclName();
+ DiagMethodType FirstMethodType = GetDiagMethodType(FirstMethod);
+ return Diags.Report(FirstMethod->getLocation(),
+ diag::err_module_odr_violation_method_params)
+ << FirstContainer << FirstModule.empty() << FirstModule
+ << FirstMethod->getSourceRange() << DiffType << FirstMethodType
+ << FirstName;
+ };
+ auto DiagNote = [&Diags, &GetDiagMethodType, SecondModule,
+ SecondMethod](ODRMethodParametersDifference DiffType) {
+ DeclarationName SecondName = SecondMethod->getDeclName();
+ DiagMethodType SecondMethodType = GetDiagMethodType(SecondMethod);
+ return Diags.Report(SecondMethod->getLocation(),
+ diag::note_module_odr_violation_method_params)
+ << SecondModule.empty() << SecondModule
+ << SecondMethod->getSourceRange() << DiffType << SecondMethodType
+ << SecondName;
+ };
+
+ const unsigned FirstNumParameters = FirstMethod->param_size();
+ const unsigned SecondNumParameters = SecondMethod->param_size();
+ if (FirstNumParameters != SecondNumParameters) {
+ DiagError(NumberParameters) << FirstNumParameters;
+ DiagNote(NumberParameters) << SecondNumParameters;
+ return true;
+ }
+
+ for (unsigned I = 0; I < FirstNumParameters; ++I) {
+ const ParmVarDecl *FirstParam = FirstMethod->getParamDecl(I);
+ const ParmVarDecl *SecondParam = SecondMethod->getParamDecl(I);
+
+ QualType FirstParamType = FirstParam->getType();
+ QualType SecondParamType = SecondParam->getType();
+ if (FirstParamType != SecondParamType &&
+ computeODRHash(FirstParamType) != computeODRHash(SecondParamType)) {
+ if (const DecayedType *ParamDecayedType =
+ FirstParamType->getAs<DecayedType>()) {
+ DiagError(ParameterType) << (I + 1) << FirstParamType << true
+ << ParamDecayedType->getOriginalType();
+ } else {
+ DiagError(ParameterType) << (I + 1) << FirstParamType << false;
+ }
+
+ if (const DecayedType *ParamDecayedType =
+ SecondParamType->getAs<DecayedType>()) {
+ DiagNote(ParameterType) << (I + 1) << SecondParamType << true
+ << ParamDecayedType->getOriginalType();
+ } else {
+ DiagNote(ParameterType) << (I + 1) << SecondParamType << false;
+ }
+ return true;
+ }
+
+ DeclarationName FirstParamName = FirstParam->getDeclName();
+ DeclarationName SecondParamName = SecondParam->getDeclName();
+ if (FirstParamName != SecondParamName) {
+ DiagError(ParameterName) << (I + 1) << FirstParamName;
+ DiagNote(ParameterName) << (I + 1) << SecondParamName;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool ODRDiagsEmitter::diagnoseSubMismatchField(
+ const NamedDecl *FirstRecord, StringRef FirstModule, StringRef SecondModule,
+ const FieldDecl *FirstField, const FieldDecl *SecondField) const {
+ enum ODRFieldDifference {
+ FieldName,
+ FieldTypeName,
+ FieldSingleBitField,
+ FieldDifferentWidthBitField,
+ FieldSingleMutable,
+ FieldSingleInitializer,
+ FieldDifferentInitializers,
+ };
+
+ auto DiagError = [FirstRecord, FirstField, FirstModule,
+ this](ODRFieldDifference DiffType) {
+ return Diag(FirstField->getLocation(), diag::err_module_odr_violation_field)
+ << FirstRecord << FirstModule.empty() << FirstModule
+ << FirstField->getSourceRange() << DiffType;
+ };
+ auto DiagNote = [SecondField, SecondModule,
+ this](ODRFieldDifference DiffType) {
+ return Diag(SecondField->getLocation(),
+ diag::note_module_odr_violation_field)
+ << SecondModule.empty() << SecondModule << SecondField->getSourceRange() << DiffType;
+ };
+
+ IdentifierInfo *FirstII = FirstField->getIdentifier();
+ IdentifierInfo *SecondII = SecondField->getIdentifier();
+ if (FirstII->getName() != SecondII->getName()) {
+ DiagError(FieldName) << FirstII;
+ DiagNote(FieldName) << SecondII;
+ return true;
+ }
+
+ QualType FirstType = FirstField->getType();
+ QualType SecondType = SecondField->getType();
+ if (computeODRHash(FirstType) != computeODRHash(SecondType)) {
+ DiagError(FieldTypeName) << FirstII << FirstType;
+ DiagNote(FieldTypeName) << SecondII << SecondType;
+ return true;
+ }
+
+ assert(Context.hasSameType(FirstField->getType(), SecondField->getType()));
+ (void)Context;
+
+ const bool IsFirstBitField = FirstField->isBitField();
+ const bool IsSecondBitField = SecondField->isBitField();
+ if (IsFirstBitField != IsSecondBitField) {
+ DiagError(FieldSingleBitField) << FirstII << IsFirstBitField;
+ DiagNote(FieldSingleBitField) << SecondII << IsSecondBitField;
+ return true;
+ }
+
+ if (IsFirstBitField && IsSecondBitField) {
+ unsigned FirstBitWidthHash = computeODRHash(FirstField->getBitWidth());
+ unsigned SecondBitWidthHash = computeODRHash(SecondField->getBitWidth());
+ if (FirstBitWidthHash != SecondBitWidthHash) {
+ DiagError(FieldDifferentWidthBitField)
+ << FirstII << FirstField->getBitWidth()->getSourceRange();
+ DiagNote(FieldDifferentWidthBitField)
+ << SecondII << SecondField->getBitWidth()->getSourceRange();
+ return true;
+ }
+ }
+
+ if (!LangOpts.CPlusPlus)
+ return false;
+
+ const bool IsFirstMutable = FirstField->isMutable();
+ const bool IsSecondMutable = SecondField->isMutable();
+ if (IsFirstMutable != IsSecondMutable) {
+ DiagError(FieldSingleMutable) << FirstII << IsFirstMutable;
+ DiagNote(FieldSingleMutable) << SecondII << IsSecondMutable;
+ return true;
+ }
+
+ const Expr *FirstInitializer = FirstField->getInClassInitializer();
+ const Expr *SecondInitializer = SecondField->getInClassInitializer();
+ if ((!FirstInitializer && SecondInitializer) ||
+ (FirstInitializer && !SecondInitializer)) {
+ DiagError(FieldSingleInitializer)
+ << FirstII << (FirstInitializer != nullptr);
+ DiagNote(FieldSingleInitializer)
+ << SecondII << (SecondInitializer != nullptr);
+ return true;
+ }
+
+ if (FirstInitializer && SecondInitializer) {
+ unsigned FirstInitHash = computeODRHash(FirstInitializer);
+ unsigned SecondInitHash = computeODRHash(SecondInitializer);
+ if (FirstInitHash != SecondInitHash) {
+ DiagError(FieldDifferentInitializers)
+ << FirstII << FirstInitializer->getSourceRange();
+ DiagNote(FieldDifferentInitializers)
+ << SecondII << SecondInitializer->getSourceRange();
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool ODRDiagsEmitter::diagnoseSubMismatchTypedef(
+ const NamedDecl *FirstRecord, StringRef FirstModule, StringRef SecondModule,
+ const TypedefNameDecl *FirstTD, const TypedefNameDecl *SecondTD,
+ bool IsTypeAlias) const {
+ enum ODRTypedefDifference {
+ TypedefName,
+ TypedefType,
+ };
+
+ auto DiagError = [FirstRecord, FirstTD, FirstModule,
+ this](ODRTypedefDifference DiffType) {
+ return Diag(FirstTD->getLocation(), diag::err_module_odr_violation_typedef)
+ << FirstRecord << FirstModule.empty() << FirstModule
+ << FirstTD->getSourceRange() << DiffType;
+ };
+ auto DiagNote = [SecondTD, SecondModule,
+ this](ODRTypedefDifference DiffType) {
+ return Diag(SecondTD->getLocation(),
+ diag::note_module_odr_violation_typedef)
+ << SecondModule << SecondTD->getSourceRange() << DiffType;
+ };
+
+ DeclarationName FirstName = FirstTD->getDeclName();
+ DeclarationName SecondName = SecondTD->getDeclName();
+ if (FirstName != SecondName) {
+ DiagError(TypedefName) << IsTypeAlias << FirstName;
+ DiagNote(TypedefName) << IsTypeAlias << SecondName;
+ return true;
+ }
+
+ QualType FirstType = FirstTD->getUnderlyingType();
+ QualType SecondType = SecondTD->getUnderlyingType();
+ if (computeODRHash(FirstType) != computeODRHash(SecondType)) {
+ DiagError(TypedefType) << IsTypeAlias << FirstName << FirstType;
+ DiagNote(TypedefType) << IsTypeAlias << SecondName << SecondType;
+ return true;
+ }
+ return false;
+}
+
+bool ODRDiagsEmitter::diagnoseSubMismatchVar(const NamedDecl *FirstRecord,
+ StringRef FirstModule,
+ StringRef SecondModule,
+ const VarDecl *FirstVD,
+ const VarDecl *SecondVD) const {
+ enum ODRVarDifference {
+ VarName,
+ VarType,
+ VarSingleInitializer,
+ VarDifferentInitializer,
+ VarConstexpr,
+ };
+
+ auto DiagError = [FirstRecord, FirstVD, FirstModule,
+ this](ODRVarDifference DiffType) {
+ return Diag(FirstVD->getLocation(), diag::err_module_odr_violation_variable)
+ << FirstRecord << FirstModule.empty() << FirstModule
+ << FirstVD->getSourceRange() << DiffType;
+ };
+ auto DiagNote = [SecondVD, SecondModule, this](ODRVarDifference DiffType) {
+ return Diag(SecondVD->getLocation(),
+ diag::note_module_odr_violation_variable)
+ << SecondModule << SecondVD->getSourceRange() << DiffType;
+ };
+
+ DeclarationName FirstName = FirstVD->getDeclName();
+ DeclarationName SecondName = SecondVD->getDeclName();
+ if (FirstName != SecondName) {
+ DiagError(VarName) << FirstName;
+ DiagNote(VarName) << SecondName;
+ return true;
+ }
+
+ QualType FirstType = FirstVD->getType();
+ QualType SecondType = SecondVD->getType();
+ if (computeODRHash(FirstType) != computeODRHash(SecondType)) {
+ DiagError(VarType) << FirstName << FirstType;
+ DiagNote(VarType) << SecondName << SecondType;
+ return true;
+ }
+
+ if (!LangOpts.CPlusPlus)
+ return false;
+
+ const Expr *FirstInit = FirstVD->getInit();
+ const Expr *SecondInit = SecondVD->getInit();
+ if ((FirstInit == nullptr) != (SecondInit == nullptr)) {
+ DiagError(VarSingleInitializer)
+ << FirstName << (FirstInit == nullptr)
+ << (FirstInit ? FirstInit->getSourceRange() : SourceRange());
+ DiagNote(VarSingleInitializer)
+ << SecondName << (SecondInit == nullptr)
+ << (SecondInit ? SecondInit->getSourceRange() : SourceRange());
+ return true;
+ }
+
+ if (FirstInit && SecondInit &&
+ computeODRHash(FirstInit) != computeODRHash(SecondInit)) {
+ DiagError(VarDifferentInitializer)
+ << FirstName << FirstInit->getSourceRange();
+ DiagNote(VarDifferentInitializer)
+ << SecondName << SecondInit->getSourceRange();
+ return true;
+ }
+
+ const bool FirstIsConstexpr = FirstVD->isConstexpr();
+ const bool SecondIsConstexpr = SecondVD->isConstexpr();
+ if (FirstIsConstexpr != SecondIsConstexpr) {
+ DiagError(VarConstexpr) << FirstName << FirstIsConstexpr;
+ DiagNote(VarConstexpr) << SecondName << SecondIsConstexpr;
+ return true;
+ }
+ return false;
+}
+
+bool ODRDiagsEmitter::diagnoseSubMismatchProtocols(
+ const ObjCProtocolList &FirstProtocols,
+ const ObjCContainerDecl *FirstContainer, StringRef FirstModule,
+ const ObjCProtocolList &SecondProtocols,
+ const ObjCContainerDecl *SecondContainer, StringRef SecondModule) const {
+ // Keep in sync with err_module_odr_violation_referenced_protocols.
+ enum ODRReferencedProtocolDifference {
+ NumProtocols,
+ ProtocolType,
+ };
+ auto DiagRefProtocolError = [FirstContainer, FirstModule,
+ this](SourceLocation Loc, SourceRange Range,
+ ODRReferencedProtocolDifference DiffType) {
+ return Diag(Loc, diag::err_module_odr_violation_referenced_protocols)
+ << FirstContainer << FirstModule.empty() << FirstModule << Range
+ << DiffType;
+ };
+ auto DiagRefProtocolNote = [SecondModule,
+ this](SourceLocation Loc, SourceRange Range,
+ ODRReferencedProtocolDifference DiffType) {
+ return Diag(Loc, diag::note_module_odr_violation_referenced_protocols)
+ << SecondModule.empty() << SecondModule << Range << DiffType;
+ };
+ auto GetProtoListSourceRange = [](const ObjCProtocolList &PL) {
+ if (PL.empty())
+ return SourceRange();
+ return SourceRange(*PL.loc_begin(), *std::prev(PL.loc_end()));
+ };
+
+ if (FirstProtocols.size() != SecondProtocols.size()) {
+ DiagRefProtocolError(FirstContainer->getLocation(),
+ GetProtoListSourceRange(FirstProtocols), NumProtocols)
+ << FirstProtocols.size();
+ DiagRefProtocolNote(SecondContainer->getLocation(),
+ GetProtoListSourceRange(SecondProtocols), NumProtocols)
+ << SecondProtocols.size();
+ return true;
+ }
+
+ for (unsigned I = 0, E = FirstProtocols.size(); I != E; ++I) {
+ const ObjCProtocolDecl *FirstProtocol = FirstProtocols[I];
+ const ObjCProtocolDecl *SecondProtocol = SecondProtocols[I];
+ DeclarationName FirstProtocolName = FirstProtocol->getDeclName();
+ DeclarationName SecondProtocolName = SecondProtocol->getDeclName();
+ if (FirstProtocolName != SecondProtocolName) {
+ SourceLocation FirstLoc = *(FirstProtocols.loc_begin() + I);
+ SourceLocation SecondLoc = *(SecondProtocols.loc_begin() + I);
+ SourceRange EmptyRange;
+ DiagRefProtocolError(FirstLoc, EmptyRange, ProtocolType)
+ << (I + 1) << FirstProtocolName;
+ DiagRefProtocolNote(SecondLoc, EmptyRange, ProtocolType)
+ << (I + 1) << SecondProtocolName;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool ODRDiagsEmitter::diagnoseSubMismatchObjCMethod(
+ const NamedDecl *FirstObjCContainer, StringRef FirstModule,
+ StringRef SecondModule, const ObjCMethodDecl *FirstMethod,
+ const ObjCMethodDecl *SecondMethod) const {
+ enum ODRMethodDifference {
+ ReturnType,
+ InstanceOrClass,
+ ControlLevel, // optional/required
+ DesignatedInitializer,
+ Directness,
+ Name,
+ };
+
+ auto DiagError = [FirstObjCContainer, FirstModule, FirstMethod,
+ this](ODRMethodDifference DiffType) {
+ return Diag(FirstMethod->getLocation(),
+ diag::err_module_odr_violation_objc_method)
+ << FirstObjCContainer << FirstModule.empty() << FirstModule
+ << FirstMethod->getSourceRange() << DiffType;
+ };
+ auto DiagNote = [SecondModule, SecondMethod,
+ this](ODRMethodDifference DiffType) {
+ return Diag(SecondMethod->getLocation(),
+ diag::note_module_odr_violation_objc_method)
+ << SecondModule.empty() << SecondModule
+ << SecondMethod->getSourceRange() << DiffType;
+ };
+
+ if (computeODRHash(FirstMethod->getReturnType()) !=
+ computeODRHash(SecondMethod->getReturnType())) {
+ DiagError(ReturnType) << FirstMethod << FirstMethod->getReturnType();
+ DiagNote(ReturnType) << SecondMethod << SecondMethod->getReturnType();
+ return true;
+ }
+
+ if (FirstMethod->isInstanceMethod() != SecondMethod->isInstanceMethod()) {
+ DiagError(InstanceOrClass)
+ << FirstMethod << FirstMethod->isInstanceMethod();
+ DiagNote(InstanceOrClass)
+ << SecondMethod << SecondMethod->isInstanceMethod();
+ return true;
+ }
+ if (FirstMethod->getImplementationControl() !=
+ SecondMethod->getImplementationControl()) {
+ DiagError(ControlLevel)
+ << llvm::to_underlying(FirstMethod->getImplementationControl());
+ DiagNote(ControlLevel) << llvm::to_underlying(
+ SecondMethod->getImplementationControl());
+ return true;
+ }
+ if (FirstMethod->isThisDeclarationADesignatedInitializer() !=
+ SecondMethod->isThisDeclarationADesignatedInitializer()) {
+ DiagError(DesignatedInitializer)
+ << FirstMethod
+ << FirstMethod->isThisDeclarationADesignatedInitializer();
+ DiagNote(DesignatedInitializer)
+ << SecondMethod
+ << SecondMethod->isThisDeclarationADesignatedInitializer();
+ return true;
+ }
+ if (FirstMethod->isDirectMethod() != SecondMethod->isDirectMethod()) {
+ DiagError(Directness) << FirstMethod << FirstMethod->isDirectMethod();
+ DiagNote(Directness) << SecondMethod << SecondMethod->isDirectMethod();
+ return true;
+ }
+ if (diagnoseSubMismatchMethodParameters(Diags, FirstObjCContainer,
+ FirstModule, SecondModule,
+ FirstMethod, SecondMethod))
+ return true;
+
+ // Check method name *after* looking at the parameters otherwise we get a
+ // less ideal diagnostics: a ObjCMethodName mismatch given that selectors
+ // for different parameters are likely to be different.
+ DeclarationName FirstName = FirstMethod->getDeclName();
+ DeclarationName SecondName = SecondMethod->getDeclName();
+ if (FirstName != SecondName) {
+ DiagError(Name) << FirstName;
+ DiagNote(Name) << SecondName;
+ return true;
+ }
+
+ return false;
+}
+
+bool ODRDiagsEmitter::diagnoseSubMismatchObjCProperty(
+ const NamedDecl *FirstObjCContainer, StringRef FirstModule,
+ StringRef SecondModule, const ObjCPropertyDecl *FirstProp,
+ const ObjCPropertyDecl *SecondProp) const {
+ enum ODRPropertyDifference {
+ Name,
+ Type,
+ ControlLevel, // optional/required
+ Attribute,
+ };
+
+ auto DiagError = [FirstObjCContainer, FirstModule, FirstProp,
+ this](SourceLocation Loc, ODRPropertyDifference DiffType) {
+ return Diag(Loc, diag::err_module_odr_violation_objc_property)
+ << FirstObjCContainer << FirstModule.empty() << FirstModule
+ << FirstProp->getSourceRange() << DiffType;
+ };
+ auto DiagNote = [SecondModule, SecondProp,
+ this](SourceLocation Loc, ODRPropertyDifference DiffType) {
+ return Diag(Loc, diag::note_module_odr_violation_objc_property)
+ << SecondModule.empty() << SecondModule
+ << SecondProp->getSourceRange() << DiffType;
+ };
+
+ IdentifierInfo *FirstII = FirstProp->getIdentifier();
+ IdentifierInfo *SecondII = SecondProp->getIdentifier();
+ if (FirstII->getName() != SecondII->getName()) {
+ DiagError(FirstProp->getLocation(), Name) << FirstII;
+ DiagNote(SecondProp->getLocation(), Name) << SecondII;
+ return true;
+ }
+ if (computeODRHash(FirstProp->getType()) !=
+ computeODRHash(SecondProp->getType())) {
+ DiagError(FirstProp->getLocation(), Type)
+ << FirstII << FirstProp->getType();
+ DiagNote(SecondProp->getLocation(), Type)
+ << SecondII << SecondProp->getType();
+ return true;
+ }
+ if (FirstProp->getPropertyImplementation() !=
+ SecondProp->getPropertyImplementation()) {
+ DiagError(FirstProp->getLocation(), ControlLevel)
+ << FirstProp->getPropertyImplementation();
+ DiagNote(SecondProp->getLocation(), ControlLevel)
+ << SecondProp->getPropertyImplementation();
+ return true;
+ }
+
+ // Go over the property attributes and stop at the first mismatch.
+ unsigned FirstAttrs = (unsigned)FirstProp->getPropertyAttributes();
+ unsigned SecondAttrs = (unsigned)SecondProp->getPropertyAttributes();
+ if (FirstAttrs != SecondAttrs) {
+ for (unsigned I = 0; I < NumObjCPropertyAttrsBits; ++I) {
+ unsigned CheckedAttr = (1 << I);
+ if ((FirstAttrs & CheckedAttr) == (SecondAttrs & CheckedAttr))
+ continue;
+
+ bool IsFirstWritten =
+ (unsigned)FirstProp->getPropertyAttributesAsWritten() & CheckedAttr;
+ bool IsSecondWritten =
+ (unsigned)SecondProp->getPropertyAttributesAsWritten() & CheckedAttr;
+ DiagError(IsFirstWritten ? FirstProp->getLParenLoc()
+ : FirstProp->getLocation(),
+ Attribute)
+ << FirstII << (I + 1) << IsFirstWritten;
+ DiagNote(IsSecondWritten ? SecondProp->getLParenLoc()
+ : SecondProp->getLocation(),
+ Attribute)
+ << SecondII << (I + 1);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+ODRDiagsEmitter::DiffResult
+ODRDiagsEmitter::FindTypeDiffs(DeclHashes &FirstHashes,
+ DeclHashes &SecondHashes) {
+ auto DifferenceSelector = [](const Decl *D) {
+ assert(D && "valid Decl required");
+ switch (D->getKind()) {
+ default:
+ return Other;
+ case Decl::AccessSpec:
+ switch (D->getAccess()) {
+ case AS_public:
+ return PublicSpecifer;
+ case AS_private:
+ return PrivateSpecifer;
+ case AS_protected:
+ return ProtectedSpecifer;
+ case AS_none:
+ break;
+ }
+ llvm_unreachable("Invalid access specifier");
+ case Decl::StaticAssert:
+ return StaticAssert;
+ case Decl::Field:
+ return Field;
+ case Decl::CXXMethod:
+ case Decl::CXXConstructor:
+ case Decl::CXXDestructor:
+ return CXXMethod;
+ case Decl::TypeAlias:
+ return TypeAlias;
+ case Decl::Typedef:
+ return TypeDef;
+ case Decl::Var:
+ return Var;
+ case Decl::Friend:
+ return Friend;
+ case Decl::FunctionTemplate:
+ return FunctionTemplate;
+ case Decl::ObjCMethod:
+ return ObjCMethod;
+ case Decl::ObjCIvar:
+ return ObjCIvar;
+ case Decl::ObjCProperty:
+ return ObjCProperty;
+ }
+ };
+
+ DiffResult DR;
+ auto FirstIt = FirstHashes.begin();
+ auto SecondIt = SecondHashes.begin();
+ while (FirstIt != FirstHashes.end() || SecondIt != SecondHashes.end()) {
+ if (FirstIt != FirstHashes.end() && SecondIt != SecondHashes.end() &&
+ FirstIt->second == SecondIt->second) {
+ ++FirstIt;
+ ++SecondIt;
+ continue;
+ }
+
+ DR.FirstDecl = FirstIt == FirstHashes.end() ? nullptr : FirstIt->first;
+ DR.SecondDecl = SecondIt == SecondHashes.end() ? nullptr : SecondIt->first;
+
+ DR.FirstDiffType =
+ DR.FirstDecl ? DifferenceSelector(DR.FirstDecl) : EndOfClass;
+ DR.SecondDiffType =
+ DR.SecondDecl ? DifferenceSelector(DR.SecondDecl) : EndOfClass;
+ return DR;
+ }
+ return DR;
+}
+
+void ODRDiagsEmitter::diagnoseSubMismatchUnexpected(
+ DiffResult &DR, const NamedDecl *FirstRecord, StringRef FirstModule,
+ const NamedDecl *SecondRecord, StringRef SecondModule) const {
+ Diag(FirstRecord->getLocation(),
+ diag::err_module_odr_violation_different_definitions)
+ << FirstRecord << FirstModule.empty() << FirstModule;
+
+ if (DR.FirstDecl) {
+ Diag(DR.FirstDecl->getLocation(), diag::note_first_module_difference)
+ << FirstRecord << DR.FirstDecl->getSourceRange();
+ }
+
+ Diag(SecondRecord->getLocation(),
+ diag::note_module_odr_violation_different_definitions)
+ << SecondModule;
+
+ if (DR.SecondDecl) {
+ Diag(DR.SecondDecl->getLocation(), diag::note_second_module_difference)
+ << DR.SecondDecl->getSourceRange();
+ }
+}
+
+void ODRDiagsEmitter::diagnoseSubMismatchDifferentDeclKinds(
+ DiffResult &DR, const NamedDecl *FirstRecord, StringRef FirstModule,
+ const NamedDecl *SecondRecord, StringRef SecondModule) const {
+ auto GetMismatchedDeclLoc = [](const NamedDecl *Container,
+ ODRMismatchDecl DiffType, const Decl *D) {
+ SourceLocation Loc;
+ SourceRange Range;
+ if (DiffType == EndOfClass) {
+ if (auto *Tag = dyn_cast<TagDecl>(Container))
+ Loc = Tag->getBraceRange().getEnd();
+ else if (auto *IF = dyn_cast<ObjCInterfaceDecl>(Container))
+ Loc = IF->getAtEndRange().getBegin();
+ else
+ Loc = Container->getEndLoc();
+ } else {
+ Loc = D->getLocation();
+ Range = D->getSourceRange();
+ }
+ return std::make_pair(Loc, Range);
+ };
+
+ auto FirstDiagInfo =
+ GetMismatchedDeclLoc(FirstRecord, DR.FirstDiffType, DR.FirstDecl);
+ Diag(FirstDiagInfo.first, diag::err_module_odr_violation_mismatch_decl)
+ << FirstRecord << FirstModule.empty() << FirstModule
+ << FirstDiagInfo.second << DR.FirstDiffType;
+
+ auto SecondDiagInfo =
+ GetMismatchedDeclLoc(SecondRecord, DR.SecondDiffType, DR.SecondDecl);
+ Diag(SecondDiagInfo.first, diag::note_module_odr_violation_mismatch_decl)
+ << SecondModule.empty() << SecondModule << SecondDiagInfo.second
+ << DR.SecondDiffType;
+}
+
+bool ODRDiagsEmitter::diagnoseMismatch(
+ const CXXRecordDecl *FirstRecord, const CXXRecordDecl *SecondRecord,
+ const struct CXXRecordDecl::DefinitionData *SecondDD) const {
+ // Multiple different declarations got merged together; tell the user
+ // where they came from.
+ if (FirstRecord == SecondRecord)
+ return false;
+
+ std::string FirstModule = getOwningModuleNameForDiagnostic(FirstRecord);
+ std::string SecondModule = getOwningModuleNameForDiagnostic(SecondRecord);
+
+ const struct CXXRecordDecl::DefinitionData *FirstDD =
+ FirstRecord->DefinitionData;
+ assert(FirstDD && SecondDD && "Definitions without DefinitionData");
+
+ // Diagnostics from DefinitionData are emitted here.
+ if (FirstDD != SecondDD) {
+ // Keep in sync with err_module_odr_violation_definition_data.
+ enum ODRDefinitionDataDifference {
+ NumBases,
+ NumVBases,
+ BaseType,
+ BaseVirtual,
+ BaseAccess,
+ };
+ auto DiagBaseError = [FirstRecord, &FirstModule,
+ this](SourceLocation Loc, SourceRange Range,
+ ODRDefinitionDataDifference DiffType) {
+ return Diag(Loc, diag::err_module_odr_violation_definition_data)
+ << FirstRecord << FirstModule.empty() << FirstModule << Range
+ << DiffType;
+ };
+ auto DiagBaseNote = [&SecondModule,
+ this](SourceLocation Loc, SourceRange Range,
+ ODRDefinitionDataDifference DiffType) {
+ return Diag(Loc, diag::note_module_odr_violation_definition_data)
+ << SecondModule << Range << DiffType;
+ };
+ auto GetSourceRange = [](const struct CXXRecordDecl::DefinitionData *DD) {
+ unsigned NumBases = DD->NumBases;
+ if (NumBases == 0)
+ return SourceRange();
+ ArrayRef<CXXBaseSpecifier> bases = DD->bases();
+ return SourceRange(bases[0].getBeginLoc(),
+ bases[NumBases - 1].getEndLoc());
+ };
+
+ unsigned FirstNumBases = FirstDD->NumBases;
+ unsigned FirstNumVBases = FirstDD->NumVBases;
+ unsigned SecondNumBases = SecondDD->NumBases;
+ unsigned SecondNumVBases = SecondDD->NumVBases;
+ if (FirstNumBases != SecondNumBases) {
+ DiagBaseError(FirstRecord->getLocation(), GetSourceRange(FirstDD),
+ NumBases)
+ << FirstNumBases;
+ DiagBaseNote(SecondRecord->getLocation(), GetSourceRange(SecondDD),
+ NumBases)
+ << SecondNumBases;
+ return true;
+ }
+
+ if (FirstNumVBases != SecondNumVBases) {
+ DiagBaseError(FirstRecord->getLocation(), GetSourceRange(FirstDD),
+ NumVBases)
+ << FirstNumVBases;
+ DiagBaseNote(SecondRecord->getLocation(), GetSourceRange(SecondDD),
+ NumVBases)
+ << SecondNumVBases;
+ return true;
+ }
+
+ ArrayRef<CXXBaseSpecifier> FirstBases = FirstDD->bases();
+ ArrayRef<CXXBaseSpecifier> SecondBases = SecondDD->bases();
+ for (unsigned I = 0; I < FirstNumBases; ++I) {
+ const CXXBaseSpecifier FirstBase = FirstBases[I];
+ const CXXBaseSpecifier SecondBase = SecondBases[I];
+ if (computeODRHash(FirstBase.getType()) !=
+ computeODRHash(SecondBase.getType())) {
+ DiagBaseError(FirstRecord->getLocation(), FirstBase.getSourceRange(),
+ BaseType)
+ << (I + 1) << FirstBase.getType();
+ DiagBaseNote(SecondRecord->getLocation(), SecondBase.getSourceRange(),
+ BaseType)
+ << (I + 1) << SecondBase.getType();
+ return true;
+ }
+
+ if (FirstBase.isVirtual() != SecondBase.isVirtual()) {
+ DiagBaseError(FirstRecord->getLocation(), FirstBase.getSourceRange(),
+ BaseVirtual)
+ << (I + 1) << FirstBase.isVirtual() << FirstBase.getType();
+ DiagBaseNote(SecondRecord->getLocation(), SecondBase.getSourceRange(),
+ BaseVirtual)
+ << (I + 1) << SecondBase.isVirtual() << SecondBase.getType();
+ return true;
+ }
+
+ if (FirstBase.getAccessSpecifierAsWritten() !=
+ SecondBase.getAccessSpecifierAsWritten()) {
+ DiagBaseError(FirstRecord->getLocation(), FirstBase.getSourceRange(),
+ BaseAccess)
+ << (I + 1) << FirstBase.getType()
+ << (int)FirstBase.getAccessSpecifierAsWritten();
+ DiagBaseNote(SecondRecord->getLocation(), SecondBase.getSourceRange(),
+ BaseAccess)
+ << (I + 1) << SecondBase.getType()
+ << (int)SecondBase.getAccessSpecifierAsWritten();
+ return true;
+ }
+ }
+ }
+
+ const ClassTemplateDecl *FirstTemplate =
+ FirstRecord->getDescribedClassTemplate();
+ const ClassTemplateDecl *SecondTemplate =
+ SecondRecord->getDescribedClassTemplate();
+
+ assert(!FirstTemplate == !SecondTemplate &&
+ "Both pointers should be null or non-null");
+
+ if (FirstTemplate && SecondTemplate) {
+ ArrayRef<const NamedDecl *> FirstTemplateParams =
+ FirstTemplate->getTemplateParameters()->asArray();
+ ArrayRef<const NamedDecl *> SecondTemplateParams =
+ SecondTemplate->getTemplateParameters()->asArray();
+ assert(FirstTemplateParams.size() == SecondTemplateParams.size() &&
+ "Number of template parameters should be equal.");
+ for (auto Pair : llvm::zip(FirstTemplateParams, SecondTemplateParams)) {
+ const NamedDecl *FirstDecl = std::get<0>(Pair);
+ const NamedDecl *SecondDecl = std::get<1>(Pair);
+ if (computeODRHash(FirstDecl) == computeODRHash(SecondDecl))
+ continue;
+
+ assert(FirstDecl->getKind() == SecondDecl->getKind() &&
+ "Parameter Decl's should be the same kind.");
+
+ enum ODRTemplateDifference {
+ ParamEmptyName,
+ ParamName,
+ ParamSingleDefaultArgument,
+ ParamDifferentDefaultArgument,
+ };
+
+ auto hasDefaultArg = [](const NamedDecl *D) {
+ if (auto *TTP = dyn_cast<TemplateTypeParmDecl>(D))
+ return TTP->hasDefaultArgument() &&
+ !TTP->defaultArgumentWasInherited();
+ if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(D))
+ return NTTP->hasDefaultArgument() &&
+ !NTTP->defaultArgumentWasInherited();
+ auto *TTP = cast<TemplateTemplateParmDecl>(D);
+ return TTP->hasDefaultArgument() && !TTP->defaultArgumentWasInherited();
+ };
+ bool hasFirstArg = hasDefaultArg(FirstDecl);
+ bool hasSecondArg = hasDefaultArg(SecondDecl);
+
+ ODRTemplateDifference ErrDiffType;
+ ODRTemplateDifference NoteDiffType;
+
+ DeclarationName FirstName = FirstDecl->getDeclName();
+ DeclarationName SecondName = SecondDecl->getDeclName();
+
+ if (FirstName != SecondName) {
+ bool FirstNameEmpty =
+ FirstName.isIdentifier() && !FirstName.getAsIdentifierInfo();
+ bool SecondNameEmpty =
+ SecondName.isIdentifier() && !SecondName.getAsIdentifierInfo();
+ ErrDiffType = FirstNameEmpty ? ParamEmptyName : ParamName;
+ NoteDiffType = SecondNameEmpty ? ParamEmptyName : ParamName;
+ } else if (hasFirstArg == hasSecondArg)
+ ErrDiffType = NoteDiffType = ParamDifferentDefaultArgument;
+ else
+ ErrDiffType = NoteDiffType = ParamSingleDefaultArgument;
+
+ Diag(FirstDecl->getLocation(),
+ diag::err_module_odr_violation_template_parameter)
+ << FirstRecord << FirstModule.empty() << FirstModule
+ << FirstDecl->getSourceRange() << ErrDiffType << hasFirstArg
+ << FirstName;
+ Diag(SecondDecl->getLocation(),
+ diag::note_module_odr_violation_template_parameter)
+ << SecondModule << SecondDecl->getSourceRange() << NoteDiffType
+ << hasSecondArg << SecondName;
+ return true;
+ }
+ }
+
+ auto PopulateHashes = [](DeclHashes &Hashes, const RecordDecl *Record,
+ const DeclContext *DC) {
+ for (const Decl *D : Record->decls()) {
+ if (!ODRHash::isSubDeclToBeProcessed(D, DC))
+ continue;
+ Hashes.emplace_back(D, computeODRHash(D));
+ }
+ };
+
+ DeclHashes FirstHashes;
+ DeclHashes SecondHashes;
+ const DeclContext *DC = FirstRecord;
+ PopulateHashes(FirstHashes, FirstRecord, DC);
+ PopulateHashes(SecondHashes, SecondRecord, DC);
+
+ DiffResult DR = FindTypeDiffs(FirstHashes, SecondHashes);
+ ODRMismatchDecl FirstDiffType = DR.FirstDiffType;
+ ODRMismatchDecl SecondDiffType = DR.SecondDiffType;
+ const Decl *FirstDecl = DR.FirstDecl;
+ const Decl *SecondDecl = DR.SecondDecl;
+
+ if (FirstDiffType == Other || SecondDiffType == Other) {
+ diagnoseSubMismatchUnexpected(DR, FirstRecord, FirstModule, SecondRecord,
+ SecondModule);
+ return true;
+ }
+
+ if (FirstDiffType != SecondDiffType) {
+ diagnoseSubMismatchDifferentDeclKinds(DR, FirstRecord, FirstModule,
+ SecondRecord, SecondModule);
+ return true;
+ }
+
+ // Used with err_module_odr_violation_record and
+ // note_module_odr_violation_record
+ enum ODRCXXRecordDifference {
+ StaticAssertCondition,
+ StaticAssertMessage,
+ StaticAssertOnlyMessage,
+ MethodName,
+ MethodDeleted,
+ MethodDefaulted,
+ MethodVirtual,
+ MethodStatic,
+ MethodVolatile,
+ MethodConst,
+ MethodInline,
+ MethodParameterSingleDefaultArgument,
+ MethodParameterDifferentDefaultArgument,
+ MethodNoTemplateArguments,
+ MethodDifferentNumberTemplateArguments,
+ MethodDifferentTemplateArgument,
+ MethodSingleBody,
+ MethodDifferentBody,
+ FriendTypeFunction,
+ FriendType,
+ FriendFunction,
+ FunctionTemplateDifferentNumberParameters,
+ FunctionTemplateParameterDifferentKind,
+ FunctionTemplateParameterName,
+ FunctionTemplateParameterSingleDefaultArgument,
+ FunctionTemplateParameterDifferentDefaultArgument,
+ FunctionTemplateParameterDifferentType,
+ FunctionTemplatePackParameter,
+ };
+ auto DiagError = [FirstRecord, &FirstModule,
+ this](SourceLocation Loc, SourceRange Range,
+ ODRCXXRecordDifference DiffType) {
+ return Diag(Loc, diag::err_module_odr_violation_record)
+ << FirstRecord << FirstModule.empty() << FirstModule << Range
+ << DiffType;
+ };
+ auto DiagNote = [&SecondModule, this](SourceLocation Loc, SourceRange Range,
+ ODRCXXRecordDifference DiffType) {
+ return Diag(Loc, diag::note_module_odr_violation_record)
+ << SecondModule << Range << DiffType;
+ };
+
+ assert(FirstDiffType == SecondDiffType);
+ switch (FirstDiffType) {
+ case Other:
+ case EndOfClass:
+ case PublicSpecifer:
+ case PrivateSpecifer:
+ case ProtectedSpecifer:
+ case ObjCMethod:
+ case ObjCIvar:
+ case ObjCProperty:
+ llvm_unreachable("Invalid diff type");
+
+ case StaticAssert: {
+ const StaticAssertDecl *FirstSA = cast<StaticAssertDecl>(FirstDecl);
+ const StaticAssertDecl *SecondSA = cast<StaticAssertDecl>(SecondDecl);
+
+ const Expr *FirstExpr = FirstSA->getAssertExpr();
+ const Expr *SecondExpr = SecondSA->getAssertExpr();
+ unsigned FirstODRHash = computeODRHash(FirstExpr);
+ unsigned SecondODRHash = computeODRHash(SecondExpr);
+ if (FirstODRHash != SecondODRHash) {
+ DiagError(FirstExpr->getBeginLoc(), FirstExpr->getSourceRange(),
+ StaticAssertCondition);
+ DiagNote(SecondExpr->getBeginLoc(), SecondExpr->getSourceRange(),
+ StaticAssertCondition);
+ return true;
+ }
+
+ const Expr *FirstMessage = FirstSA->getMessage();
+ const Expr *SecondMessage = SecondSA->getMessage();
+ assert((FirstMessage || SecondMessage) && "Both messages cannot be empty");
+ if ((FirstMessage && !SecondMessage) || (!FirstMessage && SecondMessage)) {
+ SourceLocation FirstLoc, SecondLoc;
+ SourceRange FirstRange, SecondRange;
+ if (FirstMessage) {
+ FirstLoc = FirstMessage->getBeginLoc();
+ FirstRange = FirstMessage->getSourceRange();
+ } else {
+ FirstLoc = FirstSA->getBeginLoc();
+ FirstRange = FirstSA->getSourceRange();
+ }
+ if (SecondMessage) {
+ SecondLoc = SecondMessage->getBeginLoc();
+ SecondRange = SecondMessage->getSourceRange();
+ } else {
+ SecondLoc = SecondSA->getBeginLoc();
+ SecondRange = SecondSA->getSourceRange();
+ }
+ DiagError(FirstLoc, FirstRange, StaticAssertOnlyMessage)
+ << (FirstMessage == nullptr);
+ DiagNote(SecondLoc, SecondRange, StaticAssertOnlyMessage)
+ << (SecondMessage == nullptr);
+ return true;
+ }
+
+ if (FirstMessage && SecondMessage) {
+ unsigned FirstMessageODRHash = computeODRHash(FirstMessage);
+ unsigned SecondMessageODRHash = computeODRHash(SecondMessage);
+ if (FirstMessageODRHash != SecondMessageODRHash) {
+ DiagError(FirstMessage->getBeginLoc(), FirstMessage->getSourceRange(),
+ StaticAssertMessage);
+ DiagNote(SecondMessage->getBeginLoc(), SecondMessage->getSourceRange(),
+ StaticAssertMessage);
+ return true;
+ }
+ }
+ break;
+ }
+
+ case Field: {
+ if (diagnoseSubMismatchField(FirstRecord, FirstModule, SecondModule,
+ cast<FieldDecl>(FirstDecl),
+ cast<FieldDecl>(SecondDecl)))
+ return true;
+ break;
+ }
+
+ case CXXMethod: {
+ enum {
+ DiagMethod,
+ DiagConstructor,
+ DiagDestructor,
+ } FirstMethodType,
+ SecondMethodType;
+ auto GetMethodTypeForDiagnostics = [](const CXXMethodDecl *D) {
+ if (isa<CXXConstructorDecl>(D))
+ return DiagConstructor;
+ if (isa<CXXDestructorDecl>(D))
+ return DiagDestructor;
+ return DiagMethod;
+ };
+ const CXXMethodDecl *FirstMethod = cast<CXXMethodDecl>(FirstDecl);
+ const CXXMethodDecl *SecondMethod = cast<CXXMethodDecl>(SecondDecl);
+ FirstMethodType = GetMethodTypeForDiagnostics(FirstMethod);
+ SecondMethodType = GetMethodTypeForDiagnostics(SecondMethod);
+ DeclarationName FirstName = FirstMethod->getDeclName();
+ DeclarationName SecondName = SecondMethod->getDeclName();
+ auto DiagMethodError = [&DiagError, FirstMethod, FirstMethodType,
+ FirstName](ODRCXXRecordDifference DiffType) {
+ return DiagError(FirstMethod->getLocation(),
+ FirstMethod->getSourceRange(), DiffType)
+ << FirstMethodType << FirstName;
+ };
+ auto DiagMethodNote = [&DiagNote, SecondMethod, SecondMethodType,
+ SecondName](ODRCXXRecordDifference DiffType) {
+ return DiagNote(SecondMethod->getLocation(),
+ SecondMethod->getSourceRange(), DiffType)
+ << SecondMethodType << SecondName;
+ };
+
+ if (FirstMethodType != SecondMethodType || FirstName != SecondName) {
+ DiagMethodError(MethodName);
+ DiagMethodNote(MethodName);
+ return true;
+ }
+
+ const bool FirstDeleted = FirstMethod->isDeletedAsWritten();
+ const bool SecondDeleted = SecondMethod->isDeletedAsWritten();
+ if (FirstDeleted != SecondDeleted) {
+ DiagMethodError(MethodDeleted) << FirstDeleted;
+ DiagMethodNote(MethodDeleted) << SecondDeleted;
+ return true;
+ }
+
+ const bool FirstDefaulted = FirstMethod->isExplicitlyDefaulted();
+ const bool SecondDefaulted = SecondMethod->isExplicitlyDefaulted();
+ if (FirstDefaulted != SecondDefaulted) {
+ DiagMethodError(MethodDefaulted) << FirstDefaulted;
+ DiagMethodNote(MethodDefaulted) << SecondDefaulted;
+ return true;
+ }
+
+ const bool FirstVirtual = FirstMethod->isVirtualAsWritten();
+ const bool SecondVirtual = SecondMethod->isVirtualAsWritten();
+ const bool FirstPure = FirstMethod->isPureVirtual();
+ const bool SecondPure = SecondMethod->isPureVirtual();
+ if ((FirstVirtual || SecondVirtual) &&
+ (FirstVirtual != SecondVirtual || FirstPure != SecondPure)) {
+ DiagMethodError(MethodVirtual) << FirstPure << FirstVirtual;
+ DiagMethodNote(MethodVirtual) << SecondPure << SecondVirtual;
+ return true;
+ }
+
+ // CXXMethodDecl::isStatic uses the canonical Decl. With Decl merging,
+ // FirstDecl is the canonical Decl of SecondDecl, so the storage
+ // class needs to be checked instead.
+ StorageClass FirstStorage = FirstMethod->getStorageClass();
+ StorageClass SecondStorage = SecondMethod->getStorageClass();
+ const bool FirstStatic = FirstStorage == SC_Static;
+ const bool SecondStatic = SecondStorage == SC_Static;
+ if (FirstStatic != SecondStatic) {
+ DiagMethodError(MethodStatic) << FirstStatic;
+ DiagMethodNote(MethodStatic) << SecondStatic;
+ return true;
+ }
+
+ const bool FirstVolatile = FirstMethod->isVolatile();
+ const bool SecondVolatile = SecondMethod->isVolatile();
+ if (FirstVolatile != SecondVolatile) {
+ DiagMethodError(MethodVolatile) << FirstVolatile;
+ DiagMethodNote(MethodVolatile) << SecondVolatile;
+ return true;
+ }
+
+ const bool FirstConst = FirstMethod->isConst();
+ const bool SecondConst = SecondMethod->isConst();
+ if (FirstConst != SecondConst) {
+ DiagMethodError(MethodConst) << FirstConst;
+ DiagMethodNote(MethodConst) << SecondConst;
+ return true;
+ }
+
+ const bool FirstInline = FirstMethod->isInlineSpecified();
+ const bool SecondInline = SecondMethod->isInlineSpecified();
+ if (FirstInline != SecondInline) {
+ DiagMethodError(MethodInline) << FirstInline;
+ DiagMethodNote(MethodInline) << SecondInline;
+ return true;
+ }
+
+ if (diagnoseSubMismatchMethodParameters(Diags, FirstRecord,
+ FirstModule, SecondModule,
+ FirstMethod, SecondMethod))
+ return true;
+
+ for (unsigned I = 0, N = FirstMethod->param_size(); I < N; ++I) {
+ const ParmVarDecl *FirstParam = FirstMethod->getParamDecl(I);
+ const ParmVarDecl *SecondParam = SecondMethod->getParamDecl(I);
+
+ const Expr *FirstInit = FirstParam->getInit();
+ const Expr *SecondInit = SecondParam->getInit();
+ if ((FirstInit == nullptr) != (SecondInit == nullptr)) {
+ DiagMethodError(MethodParameterSingleDefaultArgument)
+ << (I + 1) << (FirstInit == nullptr)
+ << (FirstInit ? FirstInit->getSourceRange() : SourceRange());
+ DiagMethodNote(MethodParameterSingleDefaultArgument)
+ << (I + 1) << (SecondInit == nullptr)
+ << (SecondInit ? SecondInit->getSourceRange() : SourceRange());
+ return true;
+ }
+
+ if (FirstInit && SecondInit &&
+ computeODRHash(FirstInit) != computeODRHash(SecondInit)) {
+ DiagMethodError(MethodParameterDifferentDefaultArgument)
+ << (I + 1) << FirstInit->getSourceRange();
+ DiagMethodNote(MethodParameterDifferentDefaultArgument)
+ << (I + 1) << SecondInit->getSourceRange();
+ return true;
+ }
+ }
+
+ const TemplateArgumentList *FirstTemplateArgs =
+ FirstMethod->getTemplateSpecializationArgs();
+ const TemplateArgumentList *SecondTemplateArgs =
+ SecondMethod->getTemplateSpecializationArgs();
+
+ if ((FirstTemplateArgs && !SecondTemplateArgs) ||
+ (!FirstTemplateArgs && SecondTemplateArgs)) {
+ DiagMethodError(MethodNoTemplateArguments)
+ << (FirstTemplateArgs != nullptr);
+ DiagMethodNote(MethodNoTemplateArguments)
+ << (SecondTemplateArgs != nullptr);
+ return true;
+ }
+
+ if (FirstTemplateArgs && SecondTemplateArgs) {
+ // Remove pack expansions from argument list.
+ auto ExpandTemplateArgumentList = [](const TemplateArgumentList *TAL) {
+ llvm::SmallVector<const TemplateArgument *, 8> ExpandedList;
+ for (const TemplateArgument &TA : TAL->asArray()) {
+ if (TA.getKind() != TemplateArgument::Pack) {
+ ExpandedList.push_back(&TA);
+ continue;
+ }
+ llvm::append_range(ExpandedList,
+ llvm::make_pointer_range(TA.getPackAsArray()));
+ }
+ return ExpandedList;
+ };
+ llvm::SmallVector<const TemplateArgument *, 8> FirstExpandedList =
+ ExpandTemplateArgumentList(FirstTemplateArgs);
+ llvm::SmallVector<const TemplateArgument *, 8> SecondExpandedList =
+ ExpandTemplateArgumentList(SecondTemplateArgs);
+
+ if (FirstExpandedList.size() != SecondExpandedList.size()) {
+ DiagMethodError(MethodDifferentNumberTemplateArguments)
+ << (unsigned)FirstExpandedList.size();
+ DiagMethodNote(MethodDifferentNumberTemplateArguments)
+ << (unsigned)SecondExpandedList.size();
+ return true;
+ }
+
+ for (unsigned i = 0, e = FirstExpandedList.size(); i != e; ++i) {
+ const TemplateArgument &FirstTA = *FirstExpandedList[i],
+ &SecondTA = *SecondExpandedList[i];
+ if (computeODRHash(FirstTA) == computeODRHash(SecondTA))
+ continue;
+
+ DiagMethodError(MethodDifferentTemplateArgument) << FirstTA << i + 1;
+ DiagMethodNote(MethodDifferentTemplateArgument) << SecondTA << i + 1;
+ return true;
+ }
+ }
+
+ // Compute the hash of the method as if it has no body.
+ auto ComputeCXXMethodODRHash = [](const CXXMethodDecl *D) {
+ ODRHash Hasher;
+ Hasher.AddFunctionDecl(D, true /*SkipBody*/);
+ return Hasher.CalculateHash();
+ };
+
+ // Compare the hash generated to the hash stored. A difference means
+ // that a body was present in the original source. Due to merging,
+ // the standard way of detecting a body will not work.
+ const bool HasFirstBody =
+ ComputeCXXMethodODRHash(FirstMethod) != FirstMethod->getODRHash();
+ const bool HasSecondBody =
+ ComputeCXXMethodODRHash(SecondMethod) != SecondMethod->getODRHash();
+
+ if (HasFirstBody != HasSecondBody) {
+ DiagMethodError(MethodSingleBody) << HasFirstBody;
+ DiagMethodNote(MethodSingleBody) << HasSecondBody;
+ return true;
+ }
+
+ if (HasFirstBody && HasSecondBody) {
+ DiagMethodError(MethodDifferentBody);
+ DiagMethodNote(MethodDifferentBody);
+ return true;
+ }
+
+ break;
+ }
+
+ case TypeAlias:
+ case TypeDef: {
+ if (diagnoseSubMismatchTypedef(FirstRecord, FirstModule, SecondModule,
+ cast<TypedefNameDecl>(FirstDecl),
+ cast<TypedefNameDecl>(SecondDecl),
+ FirstDiffType == TypeAlias))
+ return true;
+ break;
+ }
+ case Var: {
+ if (diagnoseSubMismatchVar(FirstRecord, FirstModule, SecondModule,
+ cast<VarDecl>(FirstDecl),
+ cast<VarDecl>(SecondDecl)))
+ return true;
+ break;
+ }
+ case Friend: {
+ const FriendDecl *FirstFriend = cast<FriendDecl>(FirstDecl);
+ const FriendDecl *SecondFriend = cast<FriendDecl>(SecondDecl);
+
+ const NamedDecl *FirstND = FirstFriend->getFriendDecl();
+ const NamedDecl *SecondND = SecondFriend->getFriendDecl();
+
+ TypeSourceInfo *FirstTSI = FirstFriend->getFriendType();
+ TypeSourceInfo *SecondTSI = SecondFriend->getFriendType();
+
+ if (FirstND && SecondND) {
+ DiagError(FirstFriend->getFriendLoc(), FirstFriend->getSourceRange(),
+ FriendFunction)
+ << FirstND;
+ DiagNote(SecondFriend->getFriendLoc(), SecondFriend->getSourceRange(),
+ FriendFunction)
+ << SecondND;
+ return true;
+ }
+
+ if (FirstTSI && SecondTSI) {
+ QualType FirstFriendType = FirstTSI->getType();
+ QualType SecondFriendType = SecondTSI->getType();
+ assert(computeODRHash(FirstFriendType) !=
+ computeODRHash(SecondFriendType));
+ DiagError(FirstFriend->getFriendLoc(), FirstFriend->getSourceRange(),
+ FriendType)
+ << FirstFriendType;
+ DiagNote(SecondFriend->getFriendLoc(), SecondFriend->getSourceRange(),
+ FriendType)
+ << SecondFriendType;
+ return true;
+ }
+
+ DiagError(FirstFriend->getFriendLoc(), FirstFriend->getSourceRange(),
+ FriendTypeFunction)
+ << (FirstTSI == nullptr);
+ DiagNote(SecondFriend->getFriendLoc(), SecondFriend->getSourceRange(),
+ FriendTypeFunction)
+ << (SecondTSI == nullptr);
+ return true;
+ }
+ case FunctionTemplate: {
+ const FunctionTemplateDecl *FirstTemplate =
+ cast<FunctionTemplateDecl>(FirstDecl);
+ const FunctionTemplateDecl *SecondTemplate =
+ cast<FunctionTemplateDecl>(SecondDecl);
+
+ TemplateParameterList *FirstTPL = FirstTemplate->getTemplateParameters();
+ TemplateParameterList *SecondTPL = SecondTemplate->getTemplateParameters();
+
+ auto DiagTemplateError = [&DiagError,
+ FirstTemplate](ODRCXXRecordDifference DiffType) {
+ return DiagError(FirstTemplate->getLocation(),
+ FirstTemplate->getSourceRange(), DiffType)
+ << FirstTemplate;
+ };
+ auto DiagTemplateNote = [&DiagNote,
+ SecondTemplate](ODRCXXRecordDifference DiffType) {
+ return DiagNote(SecondTemplate->getLocation(),
+ SecondTemplate->getSourceRange(), DiffType)
+ << SecondTemplate;
+ };
+
+ if (FirstTPL->size() != SecondTPL->size()) {
+ DiagTemplateError(FunctionTemplateDifferentNumberParameters)
+ << FirstTPL->size();
+ DiagTemplateNote(FunctionTemplateDifferentNumberParameters)
+ << SecondTPL->size();
+ return true;
+ }
+
+ for (unsigned i = 0, e = FirstTPL->size(); i != e; ++i) {
+ NamedDecl *FirstParam = FirstTPL->getParam(i);
+ NamedDecl *SecondParam = SecondTPL->getParam(i);
+
+ if (FirstParam->getKind() != SecondParam->getKind()) {
+ enum {
+ TemplateTypeParameter,
+ NonTypeTemplateParameter,
+ TemplateTemplateParameter,
+ };
+ auto GetParamType = [](NamedDecl *D) {
+ switch (D->getKind()) {
+ default:
+ llvm_unreachable("Unexpected template parameter type");
+ case Decl::TemplateTypeParm:
+ return TemplateTypeParameter;
+ case Decl::NonTypeTemplateParm:
+ return NonTypeTemplateParameter;
+ case Decl::TemplateTemplateParm:
+ return TemplateTemplateParameter;
+ }
+ };
+
+ DiagTemplateError(FunctionTemplateParameterDifferentKind)
+ << (i + 1) << GetParamType(FirstParam);
+ DiagTemplateNote(FunctionTemplateParameterDifferentKind)
+ << (i + 1) << GetParamType(SecondParam);
+ return true;
+ }
+
+ if (FirstParam->getName() != SecondParam->getName()) {
+ DiagTemplateError(FunctionTemplateParameterName)
+ << (i + 1) << (bool)FirstParam->getIdentifier() << FirstParam;
+ DiagTemplateNote(FunctionTemplateParameterName)
+ << (i + 1) << (bool)SecondParam->getIdentifier() << SecondParam;
+ return true;
+ }
+
+ if (isa<TemplateTypeParmDecl>(FirstParam) &&
+ isa<TemplateTypeParmDecl>(SecondParam)) {
+ TemplateTypeParmDecl *FirstTTPD =
+ cast<TemplateTypeParmDecl>(FirstParam);
+ TemplateTypeParmDecl *SecondTTPD =
+ cast<TemplateTypeParmDecl>(SecondParam);
+ bool HasFirstDefaultArgument =
+ FirstTTPD->hasDefaultArgument() &&
+ !FirstTTPD->defaultArgumentWasInherited();
+ bool HasSecondDefaultArgument =
+ SecondTTPD->hasDefaultArgument() &&
+ !SecondTTPD->defaultArgumentWasInherited();
+ if (HasFirstDefaultArgument != HasSecondDefaultArgument) {
+ DiagTemplateError(FunctionTemplateParameterSingleDefaultArgument)
+ << (i + 1) << HasFirstDefaultArgument;
+ DiagTemplateNote(FunctionTemplateParameterSingleDefaultArgument)
+ << (i + 1) << HasSecondDefaultArgument;
+ return true;
+ }
+
+ if (HasFirstDefaultArgument && HasSecondDefaultArgument) {
+ QualType FirstType = FirstTTPD->getDefaultArgument();
+ QualType SecondType = SecondTTPD->getDefaultArgument();
+ if (computeODRHash(FirstType) != computeODRHash(SecondType)) {
+ DiagTemplateError(FunctionTemplateParameterDifferentDefaultArgument)
+ << (i + 1) << FirstType;
+ DiagTemplateNote(FunctionTemplateParameterDifferentDefaultArgument)
+ << (i + 1) << SecondType;
+ return true;
+ }
+ }
+
+ if (FirstTTPD->isParameterPack() != SecondTTPD->isParameterPack()) {
+ DiagTemplateError(FunctionTemplatePackParameter)
+ << (i + 1) << FirstTTPD->isParameterPack();
+ DiagTemplateNote(FunctionTemplatePackParameter)
+ << (i + 1) << SecondTTPD->isParameterPack();
+ return true;
+ }
+ }
+
+ if (isa<TemplateTemplateParmDecl>(FirstParam) &&
+ isa<TemplateTemplateParmDecl>(SecondParam)) {
+ TemplateTemplateParmDecl *FirstTTPD =
+ cast<TemplateTemplateParmDecl>(FirstParam);
+ TemplateTemplateParmDecl *SecondTTPD =
+ cast<TemplateTemplateParmDecl>(SecondParam);
+
+ TemplateParameterList *FirstTPL = FirstTTPD->getTemplateParameters();
+ TemplateParameterList *SecondTPL = SecondTTPD->getTemplateParameters();
+
+ auto ComputeTemplateParameterListODRHash =
+ [](const TemplateParameterList *TPL) {
+ assert(TPL);
+ ODRHash Hasher;
+ Hasher.AddTemplateParameterList(TPL);
+ return Hasher.CalculateHash();
+ };
+
+ if (ComputeTemplateParameterListODRHash(FirstTPL) !=
+ ComputeTemplateParameterListODRHash(SecondTPL)) {
+ DiagTemplateError(FunctionTemplateParameterDifferentType) << (i + 1);
+ DiagTemplateNote(FunctionTemplateParameterDifferentType) << (i + 1);
+ return true;
+ }
+
+ bool HasFirstDefaultArgument =
+ FirstTTPD->hasDefaultArgument() &&
+ !FirstTTPD->defaultArgumentWasInherited();
+ bool HasSecondDefaultArgument =
+ SecondTTPD->hasDefaultArgument() &&
+ !SecondTTPD->defaultArgumentWasInherited();
+ if (HasFirstDefaultArgument != HasSecondDefaultArgument) {
+ DiagTemplateError(FunctionTemplateParameterSingleDefaultArgument)
+ << (i + 1) << HasFirstDefaultArgument;
+ DiagTemplateNote(FunctionTemplateParameterSingleDefaultArgument)
+ << (i + 1) << HasSecondDefaultArgument;
+ return true;
+ }
+
+ if (HasFirstDefaultArgument && HasSecondDefaultArgument) {
+ TemplateArgument FirstTA =
+ FirstTTPD->getDefaultArgument().getArgument();
+ TemplateArgument SecondTA =
+ SecondTTPD->getDefaultArgument().getArgument();
+ if (computeODRHash(FirstTA) != computeODRHash(SecondTA)) {
+ DiagTemplateError(FunctionTemplateParameterDifferentDefaultArgument)
+ << (i + 1) << FirstTA;
+ DiagTemplateNote(FunctionTemplateParameterDifferentDefaultArgument)
+ << (i + 1) << SecondTA;
+ return true;
+ }
+ }
+
+ if (FirstTTPD->isParameterPack() != SecondTTPD->isParameterPack()) {
+ DiagTemplateError(FunctionTemplatePackParameter)
+ << (i + 1) << FirstTTPD->isParameterPack();
+ DiagTemplateNote(FunctionTemplatePackParameter)
+ << (i + 1) << SecondTTPD->isParameterPack();
+ return true;
+ }
+ }
+
+ if (isa<NonTypeTemplateParmDecl>(FirstParam) &&
+ isa<NonTypeTemplateParmDecl>(SecondParam)) {
+ NonTypeTemplateParmDecl *FirstNTTPD =
+ cast<NonTypeTemplateParmDecl>(FirstParam);
+ NonTypeTemplateParmDecl *SecondNTTPD =
+ cast<NonTypeTemplateParmDecl>(SecondParam);
+
+ QualType FirstType = FirstNTTPD->getType();
+ QualType SecondType = SecondNTTPD->getType();
+ if (computeODRHash(FirstType) != computeODRHash(SecondType)) {
+ DiagTemplateError(FunctionTemplateParameterDifferentType) << (i + 1);
+ DiagTemplateNote(FunctionTemplateParameterDifferentType) << (i + 1);
+ return true;
+ }
+
+ bool HasFirstDefaultArgument =
+ FirstNTTPD->hasDefaultArgument() &&
+ !FirstNTTPD->defaultArgumentWasInherited();
+ bool HasSecondDefaultArgument =
+ SecondNTTPD->hasDefaultArgument() &&
+ !SecondNTTPD->defaultArgumentWasInherited();
+ if (HasFirstDefaultArgument != HasSecondDefaultArgument) {
+ DiagTemplateError(FunctionTemplateParameterSingleDefaultArgument)
+ << (i + 1) << HasFirstDefaultArgument;
+ DiagTemplateNote(FunctionTemplateParameterSingleDefaultArgument)
+ << (i + 1) << HasSecondDefaultArgument;
+ return true;
+ }
+
+ if (HasFirstDefaultArgument && HasSecondDefaultArgument) {
+ Expr *FirstDefaultArgument = FirstNTTPD->getDefaultArgument();
+ Expr *SecondDefaultArgument = SecondNTTPD->getDefaultArgument();
+ if (computeODRHash(FirstDefaultArgument) !=
+ computeODRHash(SecondDefaultArgument)) {
+ DiagTemplateError(FunctionTemplateParameterDifferentDefaultArgument)
+ << (i + 1) << FirstDefaultArgument;
+ DiagTemplateNote(FunctionTemplateParameterDifferentDefaultArgument)
+ << (i + 1) << SecondDefaultArgument;
+ return true;
+ }
+ }
+
+ if (FirstNTTPD->isParameterPack() != SecondNTTPD->isParameterPack()) {
+ DiagTemplateError(FunctionTemplatePackParameter)
+ << (i + 1) << FirstNTTPD->isParameterPack();
+ DiagTemplateNote(FunctionTemplatePackParameter)
+ << (i + 1) << SecondNTTPD->isParameterPack();
+ return true;
+ }
+ }
+ }
+ break;
+ }
+ }
+
+ Diag(FirstDecl->getLocation(),
+ diag::err_module_odr_violation_mismatch_decl_unknown)
+ << FirstRecord << FirstModule.empty() << FirstModule << FirstDiffType
+ << FirstDecl->getSourceRange();
+ Diag(SecondDecl->getLocation(),
+ diag::note_module_odr_violation_mismatch_decl_unknown)
+ << SecondModule.empty() << SecondModule << FirstDiffType
+ << SecondDecl->getSourceRange();
+ return true;
+}
+
+bool ODRDiagsEmitter::diagnoseMismatch(const RecordDecl *FirstRecord,
+ const RecordDecl *SecondRecord) const {
+ if (FirstRecord == SecondRecord)
+ return false;
+
+ std::string FirstModule = getOwningModuleNameForDiagnostic(FirstRecord);
+ std::string SecondModule = getOwningModuleNameForDiagnostic(SecondRecord);
+
+ auto PopulateHashes = [](DeclHashes &Hashes, const RecordDecl *Record,
+ const DeclContext *DC) {
+ for (const Decl *D : Record->decls()) {
+ if (!ODRHash::isSubDeclToBeProcessed(D, DC))
+ continue;
+ Hashes.emplace_back(D, computeODRHash(D));
+ }
+ };
+
+ DeclHashes FirstHashes;
+ DeclHashes SecondHashes;
+ const DeclContext *DC = FirstRecord;
+ PopulateHashes(FirstHashes, FirstRecord, DC);
+ PopulateHashes(SecondHashes, SecondRecord, DC);
+
+ DiffResult DR = FindTypeDiffs(FirstHashes, SecondHashes);
+ ODRMismatchDecl FirstDiffType = DR.FirstDiffType;
+ ODRMismatchDecl SecondDiffType = DR.SecondDiffType;
+ const Decl *FirstDecl = DR.FirstDecl;
+ const Decl *SecondDecl = DR.SecondDecl;
+
+ if (FirstDiffType == Other || SecondDiffType == Other) {
+ diagnoseSubMismatchUnexpected(DR, FirstRecord, FirstModule, SecondRecord,
+ SecondModule);
+ return true;
+ }
+
+ if (FirstDiffType != SecondDiffType) {
+ diagnoseSubMismatchDifferentDeclKinds(DR, FirstRecord, FirstModule,
+ SecondRecord, SecondModule);
+ return true;
+ }
+
+ assert(FirstDiffType == SecondDiffType);
+ switch (FirstDiffType) {
+ // Already handled.
+ case EndOfClass:
+ case Other:
+ // C++ only, invalid in this context.
+ case PublicSpecifer:
+ case PrivateSpecifer:
+ case ProtectedSpecifer:
+ case StaticAssert:
+ case CXXMethod:
+ case TypeAlias:
+ case Friend:
+ case FunctionTemplate:
+ // Cannot be contained by RecordDecl, invalid in this context.
+ case ObjCMethod:
+ case ObjCIvar:
+ case ObjCProperty:
+ llvm_unreachable("Invalid diff type");
+
+ case Field: {
+ if (diagnoseSubMismatchField(FirstRecord, FirstModule, SecondModule,
+ cast<FieldDecl>(FirstDecl),
+ cast<FieldDecl>(SecondDecl)))
+ return true;
+ break;
+ }
+ case TypeDef: {
+ if (diagnoseSubMismatchTypedef(FirstRecord, FirstModule, SecondModule,
+ cast<TypedefNameDecl>(FirstDecl),
+ cast<TypedefNameDecl>(SecondDecl),
+ /*IsTypeAlias=*/false))
+ return true;
+ break;
+ }
+ case Var: {
+ if (diagnoseSubMismatchVar(FirstRecord, FirstModule, SecondModule,
+ cast<VarDecl>(FirstDecl),
+ cast<VarDecl>(SecondDecl)))
+ return true;
+ break;
+ }
+ }
+
+ Diag(FirstDecl->getLocation(),
+ diag::err_module_odr_violation_mismatch_decl_unknown)
+ << FirstRecord << FirstModule.empty() << FirstModule << FirstDiffType
+ << FirstDecl->getSourceRange();
+ Diag(SecondDecl->getLocation(),
+ diag::note_module_odr_violation_mismatch_decl_unknown)
+ << SecondModule.empty() << SecondModule << FirstDiffType
+ << SecondDecl->getSourceRange();
+ return true;
+}
+
+bool ODRDiagsEmitter::diagnoseMismatch(
+ const FunctionDecl *FirstFunction,
+ const FunctionDecl *SecondFunction) const {
+ if (FirstFunction == SecondFunction)
+ return false;
+
+ // Keep in sync with select options in err_module_odr_violation_function.
+ enum ODRFunctionDifference {
+ ReturnType,
+ ParameterName,
+ ParameterType,
+ ParameterSingleDefaultArgument,
+ ParameterDifferentDefaultArgument,
+ FunctionBody,
+ };
+
+ std::string FirstModule = getOwningModuleNameForDiagnostic(FirstFunction);
+ std::string SecondModule = getOwningModuleNameForDiagnostic(SecondFunction);
+
+ auto DiagError = [FirstFunction, &FirstModule,
+ this](SourceLocation Loc, SourceRange Range,
+ ODRFunctionDifference DiffType) {
+ return Diag(Loc, diag::err_module_odr_violation_function)
+ << FirstFunction << FirstModule.empty() << FirstModule << Range
+ << DiffType;
+ };
+ auto DiagNote = [&SecondModule, this](SourceLocation Loc, SourceRange Range,
+ ODRFunctionDifference DiffType) {
+ return Diag(Loc, diag::note_module_odr_violation_function)
+ << SecondModule << Range << DiffType;
+ };
+
+ if (computeODRHash(FirstFunction->getReturnType()) !=
+ computeODRHash(SecondFunction->getReturnType())) {
+ DiagError(FirstFunction->getReturnTypeSourceRange().getBegin(),
+ FirstFunction->getReturnTypeSourceRange(), ReturnType)
+ << FirstFunction->getReturnType();
+ DiagNote(SecondFunction->getReturnTypeSourceRange().getBegin(),
+ SecondFunction->getReturnTypeSourceRange(), ReturnType)
+ << SecondFunction->getReturnType();
+ return true;
+ }
+
+ assert(FirstFunction->param_size() == SecondFunction->param_size() &&
+ "Merged functions with different number of parameters");
+
+ size_t ParamSize = FirstFunction->param_size();
+ for (unsigned I = 0; I < ParamSize; ++I) {
+ const ParmVarDecl *FirstParam = FirstFunction->getParamDecl(I);
+ const ParmVarDecl *SecondParam = SecondFunction->getParamDecl(I);
+
+ assert(Context.hasSameType(FirstParam->getType(), SecondParam->getType()) &&
+ "Merged function has different parameter types.");
+
+ if (FirstParam->getDeclName() != SecondParam->getDeclName()) {
+ DiagError(FirstParam->getLocation(), FirstParam->getSourceRange(),
+ ParameterName)
+ << I + 1 << FirstParam->getDeclName();
+ DiagNote(SecondParam->getLocation(), SecondParam->getSourceRange(),
+ ParameterName)
+ << I + 1 << SecondParam->getDeclName();
+ return true;
+ };
+
+ QualType FirstParamType = FirstParam->getType();
+ QualType SecondParamType = SecondParam->getType();
+ if (FirstParamType != SecondParamType &&
+ computeODRHash(FirstParamType) != computeODRHash(SecondParamType)) {
+ if (const DecayedType *ParamDecayedType =
+ FirstParamType->getAs<DecayedType>()) {
+ DiagError(FirstParam->getLocation(), FirstParam->getSourceRange(),
+ ParameterType)
+ << (I + 1) << FirstParamType << true
+ << ParamDecayedType->getOriginalType();
+ } else {
+ DiagError(FirstParam->getLocation(), FirstParam->getSourceRange(),
+ ParameterType)
+ << (I + 1) << FirstParamType << false;
+ }
+
+ if (const DecayedType *ParamDecayedType =
+ SecondParamType->getAs<DecayedType>()) {
+ DiagNote(SecondParam->getLocation(), SecondParam->getSourceRange(),
+ ParameterType)
+ << (I + 1) << SecondParamType << true
+ << ParamDecayedType->getOriginalType();
+ } else {
+ DiagNote(SecondParam->getLocation(), SecondParam->getSourceRange(),
+ ParameterType)
+ << (I + 1) << SecondParamType << false;
+ }
+ return true;
+ }
+
+ // Note, these calls can trigger deserialization.
+ const Expr *FirstInit = FirstParam->getInit();
+ const Expr *SecondInit = SecondParam->getInit();
+ if ((FirstInit == nullptr) != (SecondInit == nullptr)) {
+ DiagError(FirstParam->getLocation(), FirstParam->getSourceRange(),
+ ParameterSingleDefaultArgument)
+ << (I + 1) << (FirstInit == nullptr)
+ << (FirstInit ? FirstInit->getSourceRange() : SourceRange());
+ DiagNote(SecondParam->getLocation(), SecondParam->getSourceRange(),
+ ParameterSingleDefaultArgument)
+ << (I + 1) << (SecondInit == nullptr)
+ << (SecondInit ? SecondInit->getSourceRange() : SourceRange());
+ return true;
+ }
+
+ if (FirstInit && SecondInit &&
+ computeODRHash(FirstInit) != computeODRHash(SecondInit)) {
+ DiagError(FirstParam->getLocation(), FirstParam->getSourceRange(),
+ ParameterDifferentDefaultArgument)
+ << (I + 1) << FirstInit->getSourceRange();
+ DiagNote(SecondParam->getLocation(), SecondParam->getSourceRange(),
+ ParameterDifferentDefaultArgument)
+ << (I + 1) << SecondInit->getSourceRange();
+ return true;
+ }
+
+ assert(computeODRHash(FirstParam) == computeODRHash(SecondParam) &&
+ "Undiagnosed parameter difference.");
+ }
+
+ // If no error has been generated before now, assume the problem is in
+ // the body and generate a message.
+ DiagError(FirstFunction->getLocation(), FirstFunction->getSourceRange(),
+ FunctionBody);
+ DiagNote(SecondFunction->getLocation(), SecondFunction->getSourceRange(),
+ FunctionBody);
+ return true;
+}
+
+bool ODRDiagsEmitter::diagnoseMismatch(const EnumDecl *FirstEnum,
+ const EnumDecl *SecondEnum) const {
+ if (FirstEnum == SecondEnum)
+ return false;
+
+ // Keep in sync with select options in err_module_odr_violation_enum.
+ enum ODREnumDifference {
+ SingleScopedEnum,
+ EnumTagKeywordMismatch,
+ SingleSpecifiedType,
+ DifferentSpecifiedTypes,
+ DifferentNumberEnumConstants,
+ EnumConstantName,
+ EnumConstantSingleInitializer,
+ EnumConstantDifferentInitializer,
+ };
+
+ std::string FirstModule = getOwningModuleNameForDiagnostic(FirstEnum);
+ std::string SecondModule = getOwningModuleNameForDiagnostic(SecondEnum);
+
+ auto DiagError = [FirstEnum, &FirstModule, this](const auto *DiagAnchor,
+ ODREnumDifference DiffType) {
+ return Diag(DiagAnchor->getLocation(), diag::err_module_odr_violation_enum)
+ << FirstEnum << FirstModule.empty() << FirstModule
+ << DiagAnchor->getSourceRange() << DiffType;
+ };
+ auto DiagNote = [&SecondModule, this](const auto *DiagAnchor,
+ ODREnumDifference DiffType) {
+ return Diag(DiagAnchor->getLocation(), diag::note_module_odr_violation_enum)
+ << SecondModule << DiagAnchor->getSourceRange() << DiffType;
+ };
+
+ if (FirstEnum->isScoped() != SecondEnum->isScoped()) {
+ DiagError(FirstEnum, SingleScopedEnum) << FirstEnum->isScoped();
+ DiagNote(SecondEnum, SingleScopedEnum) << SecondEnum->isScoped();
+ return true;
+ }
+
+ if (FirstEnum->isScoped() && SecondEnum->isScoped()) {
+ if (FirstEnum->isScopedUsingClassTag() !=
+ SecondEnum->isScopedUsingClassTag()) {
+ DiagError(FirstEnum, EnumTagKeywordMismatch)
+ << FirstEnum->isScopedUsingClassTag();
+ DiagNote(SecondEnum, EnumTagKeywordMismatch)
+ << SecondEnum->isScopedUsingClassTag();
+ return true;
+ }
+ }
+
+ QualType FirstUnderlyingType =
+ FirstEnum->getIntegerTypeSourceInfo()
+ ? FirstEnum->getIntegerTypeSourceInfo()->getType()
+ : QualType();
+ QualType SecondUnderlyingType =
+ SecondEnum->getIntegerTypeSourceInfo()
+ ? SecondEnum->getIntegerTypeSourceInfo()->getType()
+ : QualType();
+ if (FirstUnderlyingType.isNull() != SecondUnderlyingType.isNull()) {
+ DiagError(FirstEnum, SingleSpecifiedType) << !FirstUnderlyingType.isNull();
+ DiagNote(SecondEnum, SingleSpecifiedType) << !SecondUnderlyingType.isNull();
+ return true;
+ }
+
+ if (!FirstUnderlyingType.isNull() && !SecondUnderlyingType.isNull()) {
+ if (computeODRHash(FirstUnderlyingType) !=
+ computeODRHash(SecondUnderlyingType)) {
+ DiagError(FirstEnum, DifferentSpecifiedTypes) << FirstUnderlyingType;
+ DiagNote(SecondEnum, DifferentSpecifiedTypes) << SecondUnderlyingType;
+ return true;
+ }
+ }
+
+ // Compare enum constants.
+ using DeclHashes =
+ llvm::SmallVector<std::pair<const EnumConstantDecl *, unsigned>, 4>;
+ auto PopulateHashes = [FirstEnum](DeclHashes &Hashes, const EnumDecl *Enum) {
+ for (const Decl *D : Enum->decls()) {
+ // Due to decl merging, the first EnumDecl is the parent of
+ // Decls in both records.
+ if (!ODRHash::isSubDeclToBeProcessed(D, FirstEnum))
+ continue;
+ assert(isa<EnumConstantDecl>(D) && "Unexpected Decl kind");
+ Hashes.emplace_back(cast<EnumConstantDecl>(D), computeODRHash(D));
+ }
+ };
+ DeclHashes FirstHashes;
+ PopulateHashes(FirstHashes, FirstEnum);
+ DeclHashes SecondHashes;
+ PopulateHashes(SecondHashes, SecondEnum);
+
+ if (FirstHashes.size() != SecondHashes.size()) {
+ DiagError(FirstEnum, DifferentNumberEnumConstants)
+ << (int)FirstHashes.size();
+ DiagNote(SecondEnum, DifferentNumberEnumConstants)
+ << (int)SecondHashes.size();
+ return true;
+ }
+
+ for (unsigned I = 0, N = FirstHashes.size(); I < N; ++I) {
+ if (FirstHashes[I].second == SecondHashes[I].second)
+ continue;
+ const EnumConstantDecl *FirstConstant = FirstHashes[I].first;
+ const EnumConstantDecl *SecondConstant = SecondHashes[I].first;
+
+ if (FirstConstant->getDeclName() != SecondConstant->getDeclName()) {
+ DiagError(FirstConstant, EnumConstantName) << I + 1 << FirstConstant;
+ DiagNote(SecondConstant, EnumConstantName) << I + 1 << SecondConstant;
+ return true;
+ }
+
+ const Expr *FirstInit = FirstConstant->getInitExpr();
+ const Expr *SecondInit = SecondConstant->getInitExpr();
+ if (!FirstInit && !SecondInit)
+ continue;
+
+ if (!FirstInit || !SecondInit) {
+ DiagError(FirstConstant, EnumConstantSingleInitializer)
+ << I + 1 << FirstConstant << (FirstInit != nullptr);
+ DiagNote(SecondConstant, EnumConstantSingleInitializer)
+ << I + 1 << SecondConstant << (SecondInit != nullptr);
+ return true;
+ }
+
+ if (computeODRHash(FirstInit) != computeODRHash(SecondInit)) {
+ DiagError(FirstConstant, EnumConstantDifferentInitializer)
+ << I + 1 << FirstConstant;
+ DiagNote(SecondConstant, EnumConstantDifferentInitializer)
+ << I + 1 << SecondConstant;
+ return true;
+ }
+ }
+ return false;
+}
+
+bool ODRDiagsEmitter::diagnoseMismatch(
+ const ObjCInterfaceDecl *FirstID, const ObjCInterfaceDecl *SecondID,
+ const struct ObjCInterfaceDecl::DefinitionData *SecondDD) const {
+ // Multiple different declarations got merged together; tell the user
+ // where they came from.
+ if (FirstID == SecondID)
+ return false;
+
+ std::string FirstModule = getOwningModuleNameForDiagnostic(FirstID);
+ std::string SecondModule = getOwningModuleNameForDiagnostic(SecondID);
+
+ // Keep in sync with err_module_odr_violation_objc_interface.
+ enum ODRInterfaceDifference {
+ SuperClassType,
+ IVarAccess,
+ };
+
+ auto DiagError = [FirstID, &FirstModule,
+ this](SourceLocation Loc, SourceRange Range,
+ ODRInterfaceDifference DiffType) {
+ return Diag(Loc, diag::err_module_odr_violation_objc_interface)
+ << FirstID << FirstModule.empty() << FirstModule << Range
+ << DiffType;
+ };
+ auto DiagNote = [&SecondModule, this](SourceLocation Loc, SourceRange Range,
+ ODRInterfaceDifference DiffType) {
+ return Diag(Loc, diag::note_module_odr_violation_objc_interface)
+ << SecondModule.empty() << SecondModule << Range << DiffType;
+ };
+
+ const struct ObjCInterfaceDecl::DefinitionData *FirstDD = &FirstID->data();
+ assert(FirstDD && SecondDD && "Definitions without DefinitionData");
+ if (FirstDD != SecondDD) {
+ // Check for matching super class.
+ auto GetSuperClassSourceRange = [](const TypeSourceInfo *SuperInfo,
+ const ObjCInterfaceDecl *ID) {
+ if (!SuperInfo)
+ return ID->getSourceRange();
+ TypeLoc Loc = SuperInfo->getTypeLoc();
+ return SourceRange(Loc.getBeginLoc(), Loc.getEndLoc());
+ };
+
+ ObjCInterfaceDecl *FirstSuperClass = FirstID->getSuperClass();
+ ObjCInterfaceDecl *SecondSuperClass = nullptr;
+ const TypeSourceInfo *FirstSuperInfo = FirstID->getSuperClassTInfo();
+ const TypeSourceInfo *SecondSuperInfo = SecondDD->SuperClassTInfo;
+ if (SecondSuperInfo)
+ SecondSuperClass =
+ SecondSuperInfo->getType()->castAs<ObjCObjectType>()->getInterface();
+
+ if ((FirstSuperClass && SecondSuperClass &&
+ FirstSuperClass->getODRHash() != SecondSuperClass->getODRHash()) ||
+ (FirstSuperClass && !SecondSuperClass) ||
+ (!FirstSuperClass && SecondSuperClass)) {
+ QualType FirstType;
+ if (FirstSuperInfo)
+ FirstType = FirstSuperInfo->getType();
+
+ DiagError(FirstID->getLocation(),
+ GetSuperClassSourceRange(FirstSuperInfo, FirstID),
+ SuperClassType)
+ << (bool)FirstSuperInfo << FirstType;
+
+ QualType SecondType;
+ if (SecondSuperInfo)
+ SecondType = SecondSuperInfo->getType();
+
+ DiagNote(SecondID->getLocation(),
+ GetSuperClassSourceRange(SecondSuperInfo, SecondID),
+ SuperClassType)
+ << (bool)SecondSuperInfo << SecondType;
+ return true;
+ }
+
+ // Check both interfaces reference the same protocols.
+ auto &FirstProtos = FirstID->getReferencedProtocols();
+ auto &SecondProtos = SecondDD->ReferencedProtocols;
+ if (diagnoseSubMismatchProtocols(FirstProtos, FirstID, FirstModule,
+ SecondProtos, SecondID, SecondModule))
+ return true;
+ }
+
+ auto PopulateHashes = [](DeclHashes &Hashes, const ObjCInterfaceDecl *ID,
+ const DeclContext *DC) {
+ for (auto *D : ID->decls()) {
+ if (!ODRHash::isSubDeclToBeProcessed(D, DC))
+ continue;
+ Hashes.emplace_back(D, computeODRHash(D));
+ }
+ };
+
+ DeclHashes FirstHashes;
+ DeclHashes SecondHashes;
+ // Use definition as DeclContext because definitions are merged when
+ // DeclContexts are merged and separate when DeclContexts are separate.
+ PopulateHashes(FirstHashes, FirstID, FirstID->getDefinition());
+ PopulateHashes(SecondHashes, SecondID, SecondID->getDefinition());
+
+ DiffResult DR = FindTypeDiffs(FirstHashes, SecondHashes);
+ ODRMismatchDecl FirstDiffType = DR.FirstDiffType;
+ ODRMismatchDecl SecondDiffType = DR.SecondDiffType;
+ const Decl *FirstDecl = DR.FirstDecl;
+ const Decl *SecondDecl = DR.SecondDecl;
+
+ if (FirstDiffType == Other || SecondDiffType == Other) {
+ diagnoseSubMismatchUnexpected(DR, FirstID, FirstModule, SecondID,
+ SecondModule);
+ return true;
+ }
+
+ if (FirstDiffType != SecondDiffType) {
+ diagnoseSubMismatchDifferentDeclKinds(DR, FirstID, FirstModule, SecondID,
+ SecondModule);
+ return true;
+ }
+
+ assert(FirstDiffType == SecondDiffType);
+ switch (FirstDiffType) {
+ // Already handled.
+ case EndOfClass:
+ case Other:
+ // Cannot be contained by ObjCInterfaceDecl, invalid in this context.
+ case Field:
+ case TypeDef:
+ case Var:
+ // C++ only, invalid in this context.
+ case PublicSpecifer:
+ case PrivateSpecifer:
+ case ProtectedSpecifer:
+ case StaticAssert:
+ case CXXMethod:
+ case TypeAlias:
+ case Friend:
+ case FunctionTemplate:
+ llvm_unreachable("Invalid diff type");
+
+ case ObjCMethod: {
+ if (diagnoseSubMismatchObjCMethod(FirstID, FirstModule, SecondModule,
+ cast<ObjCMethodDecl>(FirstDecl),
+ cast<ObjCMethodDecl>(SecondDecl)))
+ return true;
+ break;
+ }
+ case ObjCIvar: {
+ if (diagnoseSubMismatchField(FirstID, FirstModule, SecondModule,
+ cast<FieldDecl>(FirstDecl),
+ cast<FieldDecl>(SecondDecl)))
+ return true;
+
+ // Check if the access match.
+ const ObjCIvarDecl *FirstIvar = cast<ObjCIvarDecl>(FirstDecl);
+ const ObjCIvarDecl *SecondIvar = cast<ObjCIvarDecl>(SecondDecl);
+ if (FirstIvar->getCanonicalAccessControl() !=
+ SecondIvar->getCanonicalAccessControl()) {
+ DiagError(FirstIvar->getLocation(), FirstIvar->getSourceRange(),
+ IVarAccess)
+ << FirstIvar->getName()
+ << (int)FirstIvar->getCanonicalAccessControl();
+ DiagNote(SecondIvar->getLocation(), SecondIvar->getSourceRange(),
+ IVarAccess)
+ << SecondIvar->getName()
+ << (int)SecondIvar->getCanonicalAccessControl();
+ return true;
+ }
+ break;
+ }
+ case ObjCProperty: {
+ if (diagnoseSubMismatchObjCProperty(FirstID, FirstModule, SecondModule,
+ cast<ObjCPropertyDecl>(FirstDecl),
+ cast<ObjCPropertyDecl>(SecondDecl)))
+ return true;
+ break;
+ }
+ }
+
+ Diag(FirstDecl->getLocation(),
+ diag::err_module_odr_violation_mismatch_decl_unknown)
+ << FirstID << FirstModule.empty() << FirstModule << FirstDiffType
+ << FirstDecl->getSourceRange();
+ Diag(SecondDecl->getLocation(),
+ diag::note_module_odr_violation_mismatch_decl_unknown)
+ << SecondModule.empty() << SecondModule << FirstDiffType
+ << SecondDecl->getSourceRange();
+ return true;
+}
+
+bool ODRDiagsEmitter::diagnoseMismatch(
+ const ObjCProtocolDecl *FirstProtocol,
+ const ObjCProtocolDecl *SecondProtocol,
+ const struct ObjCProtocolDecl::DefinitionData *SecondDD) const {
+ if (FirstProtocol == SecondProtocol)
+ return false;
+
+ std::string FirstModule = getOwningModuleNameForDiagnostic(FirstProtocol);
+ std::string SecondModule = getOwningModuleNameForDiagnostic(SecondProtocol);
+
+ const ObjCProtocolDecl::DefinitionData *FirstDD = &FirstProtocol->data();
+ assert(FirstDD && SecondDD && "Definitions without DefinitionData");
+ // Diagnostics from ObjCProtocol DefinitionData are emitted here.
+ if (FirstDD != SecondDD) {
+ // Check both protocols reference the same protocols.
+ const ObjCProtocolList &FirstProtocols =
+ FirstProtocol->getReferencedProtocols();
+ const ObjCProtocolList &SecondProtocols = SecondDD->ReferencedProtocols;
+ if (diagnoseSubMismatchProtocols(FirstProtocols, FirstProtocol, FirstModule,
+ SecondProtocols, SecondProtocol,
+ SecondModule))
+ return true;
+ }
+
+ auto PopulateHashes = [](DeclHashes &Hashes, const ObjCProtocolDecl *ID,
+ const DeclContext *DC) {
+ for (const Decl *D : ID->decls()) {
+ if (!ODRHash::isSubDeclToBeProcessed(D, DC))
+ continue;
+ Hashes.emplace_back(D, computeODRHash(D));
+ }
+ };
+
+ DeclHashes FirstHashes;
+ DeclHashes SecondHashes;
+ // Use definition as DeclContext because definitions are merged when
+ // DeclContexts are merged and separate when DeclContexts are separate.
+ PopulateHashes(FirstHashes, FirstProtocol, FirstProtocol->getDefinition());
+ PopulateHashes(SecondHashes, SecondProtocol, SecondProtocol->getDefinition());
+
+ DiffResult DR = FindTypeDiffs(FirstHashes, SecondHashes);
+ ODRMismatchDecl FirstDiffType = DR.FirstDiffType;
+ ODRMismatchDecl SecondDiffType = DR.SecondDiffType;
+ const Decl *FirstDecl = DR.FirstDecl;
+ const Decl *SecondDecl = DR.SecondDecl;
+
+ if (FirstDiffType == Other || SecondDiffType == Other) {
+ diagnoseSubMismatchUnexpected(DR, FirstProtocol, FirstModule,
+ SecondProtocol, SecondModule);
+ return true;
+ }
+
+ if (FirstDiffType != SecondDiffType) {
+ diagnoseSubMismatchDifferentDeclKinds(DR, FirstProtocol, FirstModule,
+ SecondProtocol, SecondModule);
+ return true;
+ }
+
+ assert(FirstDiffType == SecondDiffType);
+ switch (FirstDiffType) {
+ // Already handled.
+ case EndOfClass:
+ case Other:
+ // Cannot be contained by ObjCProtocolDecl, invalid in this context.
+ case Field:
+ case TypeDef:
+ case Var:
+ case ObjCIvar:
+ // C++ only, invalid in this context.
+ case PublicSpecifer:
+ case PrivateSpecifer:
+ case ProtectedSpecifer:
+ case StaticAssert:
+ case CXXMethod:
+ case TypeAlias:
+ case Friend:
+ case FunctionTemplate:
+ llvm_unreachable("Invalid diff type");
+ case ObjCMethod: {
+ if (diagnoseSubMismatchObjCMethod(FirstProtocol, FirstModule, SecondModule,
+ cast<ObjCMethodDecl>(FirstDecl),
+ cast<ObjCMethodDecl>(SecondDecl)))
+ return true;
+ break;
+ }
+ case ObjCProperty: {
+ if (diagnoseSubMismatchObjCProperty(FirstProtocol, FirstModule,
+ SecondModule,
+ cast<ObjCPropertyDecl>(FirstDecl),
+ cast<ObjCPropertyDecl>(SecondDecl)))
+ return true;
+ break;
+ }
+ }
+
+ Diag(FirstDecl->getLocation(),
+ diag::err_module_odr_violation_mismatch_decl_unknown)
+ << FirstProtocol << FirstModule.empty() << FirstModule << FirstDiffType
+ << FirstDecl->getSourceRange();
+ Diag(SecondDecl->getLocation(),
+ diag::note_module_odr_violation_mismatch_decl_unknown)
+ << SecondModule.empty() << SecondModule << FirstDiffType
+ << SecondDecl->getSourceRange();
+ return true;
+}
diff --git a/contrib/llvm-project/clang/lib/AST/ODRHash.cpp b/contrib/llvm-project/clang/lib/AST/ODRHash.cpp
index 735bcff8f113..2dbc259138a8 100644
--- a/contrib/llvm-project/clang/lib/AST/ODRHash.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ODRHash.cpp
@@ -72,7 +72,10 @@ void ODRHash::AddDeclarationNameImpl(DeclarationName Name) {
AddBoolean(S.isUnarySelector());
unsigned NumArgs = S.getNumArgs();
ID.AddInteger(NumArgs);
- for (unsigned i = 0; i < NumArgs; ++i) {
+ // Compare all selector slots. For selectors with arguments it means all arg
+ // slots. And if there are no arguments, compare the first-and-only slot.
+ unsigned SlotsToCheck = NumArgs > 0 ? NumArgs : 1;
+ for (unsigned i = 0; i < SlotsToCheck; ++i) {
const IdentifierInfo *II = S.getIdentifierInfoForSlot(i);
AddBoolean(II);
if (II) {
@@ -150,6 +153,7 @@ void ODRHash::AddTemplateName(TemplateName Name) {
case TemplateName::DependentTemplate:
case TemplateName::SubstTemplateTemplateParm:
case TemplateName::SubstTemplateTemplateParmPack:
+ case TemplateName::UsingTemplate:
break;
}
}
@@ -168,7 +172,17 @@ void ODRHash::AddTemplateArgument(TemplateArgument TA) {
AddDecl(TA.getAsDecl());
break;
case TemplateArgument::NullPtr:
- case TemplateArgument::Integral:
+ ID.AddPointer(nullptr);
+ break;
+ case TemplateArgument::Integral: {
+ // There are integrals (e.g.: _BitInt(128)) that cannot be represented as
+ // any builtin integral type, so we use the hash of APSInt instead.
+ TA.getAsIntegral().Profile(ID);
+ break;
+ }
+ case TemplateArgument::StructuralValue:
+ AddQualType(TA.getStructuralValueType());
+ AddStructuralValue(TA.getAsStructuralValue());
break;
case TemplateArgument::Template:
case TemplateArgument::TemplateExpansion:
@@ -285,9 +299,9 @@ public:
}
void VisitValueDecl(const ValueDecl *D) {
- if (!isa<FunctionDecl>(D)) {
- AddQualType(D->getType());
- }
+ if (auto *DD = dyn_cast<DeclaratorDecl>(D); DD && DD->getTypeSourceInfo())
+ AddQualType(DD->getTypeSourceInfo()->getType());
+
Inherited::VisitValueDecl(D);
}
@@ -333,6 +347,20 @@ public:
Inherited::VisitFieldDecl(D);
}
+ void VisitObjCIvarDecl(const ObjCIvarDecl *D) {
+ ID.AddInteger(D->getCanonicalAccessControl());
+ Inherited::VisitObjCIvarDecl(D);
+ }
+
+ void VisitObjCPropertyDecl(const ObjCPropertyDecl *D) {
+ ID.AddInteger(D->getPropertyAttributes());
+ ID.AddInteger(D->getPropertyImplementation());
+ AddQualType(D->getTypeSourceInfo()->getType());
+ AddDecl(D);
+
+ Inherited::VisitObjCPropertyDecl(D);
+ }
+
void VisitFunctionDecl(const FunctionDecl *D) {
// Handled by the ODRHash for FunctionDecl
ID.AddInteger(D->getODRHash());
@@ -346,6 +374,64 @@ public:
Inherited::VisitCXXMethodDecl(D);
}
+ void VisitObjCMethodDecl(const ObjCMethodDecl *Method) {
+ ID.AddInteger(Method->getDeclKind());
+ Hash.AddBoolean(Method->isInstanceMethod()); // false if class method
+ Hash.AddBoolean(Method->isVariadic());
+ Hash.AddBoolean(Method->isSynthesizedAccessorStub());
+ Hash.AddBoolean(Method->isDefined());
+ Hash.AddBoolean(Method->isDirectMethod());
+ Hash.AddBoolean(Method->isThisDeclarationADesignatedInitializer());
+ Hash.AddBoolean(Method->hasSkippedBody());
+
+ ID.AddInteger(llvm::to_underlying(Method->getImplementationControl()));
+ ID.AddInteger(Method->getMethodFamily());
+ ImplicitParamDecl *Cmd = Method->getCmdDecl();
+ Hash.AddBoolean(Cmd);
+ if (Cmd)
+ ID.AddInteger(llvm::to_underlying(Cmd->getParameterKind()));
+
+ ImplicitParamDecl *Self = Method->getSelfDecl();
+ Hash.AddBoolean(Self);
+ if (Self)
+ ID.AddInteger(llvm::to_underlying(Self->getParameterKind()));
+
+ AddDecl(Method);
+
+ if (Method->getReturnTypeSourceInfo())
+ AddQualType(Method->getReturnTypeSourceInfo()->getType());
+
+ ID.AddInteger(Method->param_size());
+ for (auto Param : Method->parameters())
+ Hash.AddSubDecl(Param);
+
+ if (Method->hasBody()) {
+ const bool IsDefinition = Method->isThisDeclarationADefinition();
+ Hash.AddBoolean(IsDefinition);
+ if (IsDefinition) {
+ Stmt *Body = Method->getBody();
+ Hash.AddBoolean(Body);
+ if (Body)
+ AddStmt(Body);
+
+ // Filter out sub-Decls which will not be processed in order to get an
+ // accurate count of Decl's.
+ llvm::SmallVector<const Decl *, 16> Decls;
+ for (Decl *SubDecl : Method->decls())
+ if (ODRHash::isSubDeclToBeProcessed(SubDecl, Method))
+ Decls.push_back(SubDecl);
+
+ ID.AddInteger(Decls.size());
+ for (auto SubDecl : Decls)
+ Hash.AddSubDecl(SubDecl);
+ }
+ } else {
+ Hash.AddBoolean(false);
+ }
+
+ Inherited::VisitObjCMethodDecl(Method);
+ }
+
void VisitTypedefNameDecl(const TypedefNameDecl *D) {
AddQualType(D->getUnderlyingType());
@@ -440,7 +526,7 @@ public:
// Only allow a small portion of Decl's to be processed. Remove this once
// all Decl's can be handled.
-bool ODRHash::isDeclToBeProcessed(const Decl *D, const DeclContext *Parent) {
+bool ODRHash::isSubDeclToBeProcessed(const Decl *D, const DeclContext *Parent) {
if (D->isImplicit()) return false;
if (D->getDeclContext() != Parent) return false;
@@ -459,6 +545,9 @@ bool ODRHash::isDeclToBeProcessed(const Decl *D, const DeclContext *Parent) {
case Decl::TypeAlias:
case Decl::Typedef:
case Decl::Var:
+ case Decl::ObjCMethod:
+ case Decl::ObjCIvar:
+ case Decl::ObjCProperty:
return true;
}
}
@@ -487,7 +576,7 @@ void ODRHash::AddCXXRecordDecl(const CXXRecordDecl *Record) {
// accurate count of Decl's.
llvm::SmallVector<const Decl *, 16> Decls;
for (Decl *SubDecl : Record->decls()) {
- if (isDeclToBeProcessed(SubDecl, Record)) {
+ if (isSubDeclToBeProcessed(SubDecl, Record)) {
Decls.push_back(SubDecl);
if (auto *Function = dyn_cast<FunctionDecl>(SubDecl)) {
// Compute/Preload ODRHash into FunctionDecl.
@@ -509,13 +598,58 @@ void ODRHash::AddCXXRecordDecl(const CXXRecordDecl *Record) {
ID.AddInteger(Record->getNumBases());
auto Bases = Record->bases();
- for (auto Base : Bases) {
- AddQualType(Base.getType());
+ for (const auto &Base : Bases) {
+ AddQualType(Base.getTypeSourceInfo()->getType());
ID.AddInteger(Base.isVirtual());
ID.AddInteger(Base.getAccessSpecifierAsWritten());
}
}
+void ODRHash::AddRecordDecl(const RecordDecl *Record) {
+ assert(!isa<CXXRecordDecl>(Record) &&
+ "For CXXRecordDecl should call AddCXXRecordDecl.");
+ AddDecl(Record);
+
+ // Filter out sub-Decls which will not be processed in order to get an
+ // accurate count of Decl's.
+ llvm::SmallVector<const Decl *, 16> Decls;
+ for (Decl *SubDecl : Record->decls()) {
+ if (isSubDeclToBeProcessed(SubDecl, Record))
+ Decls.push_back(SubDecl);
+ }
+
+ ID.AddInteger(Decls.size());
+ for (const Decl *SubDecl : Decls)
+ AddSubDecl(SubDecl);
+}
+
+void ODRHash::AddObjCInterfaceDecl(const ObjCInterfaceDecl *IF) {
+ AddDecl(IF);
+
+ auto *SuperClass = IF->getSuperClass();
+ AddBoolean(SuperClass);
+ if (SuperClass)
+ ID.AddInteger(SuperClass->getODRHash());
+
+ // Hash referenced protocols.
+ ID.AddInteger(IF->getReferencedProtocols().size());
+ for (const ObjCProtocolDecl *RefP : IF->protocols()) {
+ // Hash the name only as a referenced protocol can be a forward declaration.
+ AddDeclarationName(RefP->getDeclName());
+ }
+
+ // Filter out sub-Decls which will not be processed in order to get an
+ // accurate count of Decl's.
+ llvm::SmallVector<const Decl *, 16> Decls;
+ for (Decl *SubDecl : IF->decls())
+ if (isSubDeclToBeProcessed(SubDecl, IF))
+ Decls.push_back(SubDecl);
+
+ ID.AddInteger(Decls.size());
+ for (auto *SubDecl : Decls)
+ AddSubDecl(SubDecl);
+}
+
void ODRHash::AddFunctionDecl(const FunctionDecl *Function,
bool SkipBody) {
assert(Function && "Expecting non-null pointer.");
@@ -528,6 +662,10 @@ void ODRHash::AddFunctionDecl(const FunctionDecl *Function,
if (F->isFunctionTemplateSpecialization()) {
if (!isa<CXXMethodDecl>(DC)) return;
if (DC->getLexicalParent()->isFileContext()) return;
+ // Skip class scope explicit function template specializations,
+ // as they have not yet been instantiated.
+ if (F->getDependentSpecializationInfo())
+ return;
// Inline method specializations are the only supported
// specialization for now.
}
@@ -554,7 +692,7 @@ void ODRHash::AddFunctionDecl(const FunctionDecl *Function,
ID.AddInteger(Function->getStorageClass());
AddBoolean(Function->isInlineSpecified());
AddBoolean(Function->isVirtualAsWritten());
- AddBoolean(Function->isPure());
+ AddBoolean(Function->isPureVirtual());
AddBoolean(Function->isDeletedAsWritten());
AddBoolean(Function->isExplicitlyDefaulted());
@@ -563,7 +701,7 @@ void ODRHash::AddFunctionDecl(const FunctionDecl *Function,
AddQualType(Function->getReturnType());
ID.AddInteger(Function->param_size());
- for (auto Param : Function->parameters())
+ for (auto *Param : Function->parameters())
AddSubDecl(Param);
if (SkipBody) {
@@ -588,7 +726,7 @@ void ODRHash::AddFunctionDecl(const FunctionDecl *Function,
// accurate count of Decl's.
llvm::SmallVector<const Decl *, 16> Decls;
for (Decl *SubDecl : Function->decls()) {
- if (isDeclToBeProcessed(SubDecl, Function)) {
+ if (isSubDeclToBeProcessed(SubDecl, Function)) {
Decls.push_back(SubDecl);
}
}
@@ -608,13 +746,13 @@ void ODRHash::AddEnumDecl(const EnumDecl *Enum) {
AddBoolean(Enum->isScopedUsingClassTag());
if (Enum->getIntegerTypeSourceInfo())
- AddQualType(Enum->getIntegerType());
+ AddQualType(Enum->getIntegerType().getCanonicalType());
// Filter out sub-Decls which will not be processed in order to get an
// accurate count of Decl's.
llvm::SmallVector<const Decl *, 16> Decls;
for (Decl *SubDecl : Enum->decls()) {
- if (isDeclToBeProcessed(SubDecl, Enum)) {
+ if (isSubDeclToBeProcessed(SubDecl, Enum)) {
assert(isa<EnumConstantDecl>(SubDecl) && "Unexpected Decl");
Decls.push_back(SubDecl);
}
@@ -627,6 +765,31 @@ void ODRHash::AddEnumDecl(const EnumDecl *Enum) {
}
+void ODRHash::AddObjCProtocolDecl(const ObjCProtocolDecl *P) {
+ AddDecl(P);
+
+ // Hash referenced protocols.
+ ID.AddInteger(P->getReferencedProtocols().size());
+ for (const ObjCProtocolDecl *RefP : P->protocols()) {
+ // Hash the name only as a referenced protocol can be a forward declaration.
+ AddDeclarationName(RefP->getDeclName());
+ }
+
+ // Filter out sub-Decls which will not be processed in order to get an
+ // accurate count of Decl's.
+ llvm::SmallVector<const Decl *, 16> Decls;
+ for (Decl *SubDecl : P->decls()) {
+ if (isSubDeclToBeProcessed(SubDecl, P)) {
+ Decls.push_back(SubDecl);
+ }
+ }
+
+ ID.AddInteger(Decls.size());
+ for (auto *SubDecl : Decls) {
+ AddSubDecl(SubDecl);
+ }
+}
+
void ODRHash::AddDecl(const Decl *D) {
assert(D && "Expecting non-null pointer.");
D = D->getCanonicalDecl();
@@ -670,7 +833,7 @@ public:
}
}
- void AddDecl(Decl *D) {
+ void AddDecl(const Decl *D) {
Hash.AddBoolean(D);
if (D) {
Hash.AddDecl(D);
@@ -759,29 +922,7 @@ public:
void VisitType(const Type *T) {}
void VisitAdjustedType(const AdjustedType *T) {
- QualType Original = T->getOriginalType();
- QualType Adjusted = T->getAdjustedType();
-
- // The original type and pointee type can be the same, as in the case of
- // function pointers decaying to themselves. Set a bool and only process
- // the type once, to prevent doubling the work.
- SplitQualType split = Adjusted.split();
- if (auto Pointer = dyn_cast<PointerType>(split.Ty)) {
- if (Pointer->getPointeeType() == Original) {
- Hash.AddBoolean(true);
- ID.AddInteger(split.Quals.getAsOpaqueValue());
- AddQualType(Original);
- VisitType(T);
- return;
- }
- }
-
- // The original type and pointee type are different, such as in the case
- // of a array decaying to an element pointer. Set a bool to false and
- // process both types.
- Hash.AddBoolean(false);
- AddQualType(Original);
- AddQualType(Adjusted);
+ AddQualType(T->getOriginalType());
VisitType(T);
}
@@ -794,7 +935,7 @@ public:
void VisitArrayType(const ArrayType *T) {
AddQualType(T->getElementType());
- ID.AddInteger(T->getSizeModifier());
+ ID.AddInteger(llvm::to_underlying(T->getSizeModifier()));
VisitQualifiers(T->getIndexTypeQualifiers());
VisitType(T);
}
@@ -820,7 +961,6 @@ public:
void VisitAttributedType(const AttributedType *T) {
ID.AddInteger(T->getAttrKind());
AddQualType(T->getModifiedType());
- AddQualType(T->getEquivalentType());
VisitType(T);
}
@@ -842,7 +982,6 @@ public:
void VisitDecltypeType(const DecltypeType *T) {
AddStmt(T->getUnderlyingExpr());
- AddQualType(T->getUnderlyingType());
VisitType(T);
}
@@ -860,7 +999,7 @@ public:
ID.AddInteger(T->isConstrained());
if (T->isConstrained()) {
AddDecl(T->getTypeConstraintConcept());
- ID.AddInteger(T->getNumArgs());
+ ID.AddInteger(T->getTypeConstraintArguments().size());
for (const auto &TA : T->getTypeConstraintArguments())
Hash.AddTemplateArgument(TA);
}
@@ -933,7 +1072,7 @@ public:
auto Protocols = T->getProtocols();
ID.AddInteger(Protocols.size());
- for (auto Protocol : Protocols) {
+ for (auto *Protocol : Protocols) {
AddDecl(Protocol);
}
@@ -951,7 +1090,7 @@ public:
AddDecl(T->getDecl());
auto Protocols = T->getProtocols();
ID.AddInteger(Protocols.size());
- for (auto Protocol : Protocols) {
+ for (auto *Protocol : Protocols) {
AddDecl(Protocol);
}
@@ -994,13 +1133,13 @@ public:
void
VisitSubstTemplateTypeParmPackType(const SubstTemplateTypeParmPackType *T) {
- AddType(T->getReplacedParameter());
+ AddDecl(T->getAssociatedDecl());
Hash.AddTemplateArgument(T->getArgumentPack());
VisitType(T);
}
void VisitSubstTemplateTypeParmType(const SubstTemplateTypeParmType *T) {
- AddType(T->getReplacedParameter());
+ AddDecl(T->getAssociatedDecl());
AddQualType(T->getReplacementType());
VisitType(T);
}
@@ -1014,7 +1153,7 @@ public:
void VisitEnumType(const EnumType *T) { VisitTagType(T); }
void VisitTemplateSpecializationType(const TemplateSpecializationType *T) {
- ID.AddInteger(T->getNumArgs());
+ ID.AddInteger(T->template_arguments().size());
for (const auto &TA : T->template_arguments()) {
Hash.AddTemplateArgument(TA);
}
@@ -1031,41 +1170,22 @@ public:
void VisitTypedefType(const TypedefType *T) {
AddDecl(T->getDecl());
- QualType UnderlyingType = T->getDecl()->getUnderlyingType();
- VisitQualifiers(UnderlyingType.getQualifiers());
- while (true) {
- if (const TypedefType *Underlying =
- dyn_cast<TypedefType>(UnderlyingType.getTypePtr())) {
- UnderlyingType = Underlying->getDecl()->getUnderlyingType();
- continue;
- }
- if (const ElaboratedType *Underlying =
- dyn_cast<ElaboratedType>(UnderlyingType.getTypePtr())) {
- UnderlyingType = Underlying->getNamedType();
- continue;
- }
-
- break;
- }
- AddType(UnderlyingType.getTypePtr());
VisitType(T);
}
void VisitTypeOfExprType(const TypeOfExprType *T) {
AddStmt(T->getUnderlyingExpr());
Hash.AddBoolean(T->isSugared());
- if (T->isSugared())
- AddQualType(T->desugar());
VisitType(T);
}
void VisitTypeOfType(const TypeOfType *T) {
- AddQualType(T->getUnderlyingType());
+ AddQualType(T->getUnmodifiedType());
VisitType(T);
}
void VisitTypeWithKeyword(const TypeWithKeyword *T) {
- ID.AddInteger(T->getKeyword());
+ ID.AddInteger(llvm::to_underlying(T->getKeyword()));
VisitType(T);
};
@@ -1079,7 +1199,7 @@ public:
const DependentTemplateSpecializationType *T) {
AddIdentifierInfo(T->getIdentifier());
AddNestedNameSpecifier(T->getQualifier());
- ID.AddInteger(T->getNumArgs());
+ ID.AddInteger(T->template_arguments().size());
for (const auto &TA : T->template_arguments()) {
Hash.AddTemplateArgument(TA);
}
@@ -1106,7 +1226,7 @@ public:
void VisitVectorType(const VectorType *T) {
AddQualType(T->getElementType());
ID.AddInteger(T->getNumElements());
- ID.AddInteger(T->getVectorKind());
+ ID.AddInteger(llvm::to_underlying(T->getVectorKind()));
VisitType(T);
}
@@ -1133,3 +1253,66 @@ void ODRHash::AddQualType(QualType T) {
void ODRHash::AddBoolean(bool Value) {
Bools.push_back(Value);
}
+
+void ODRHash::AddStructuralValue(const APValue &Value) {
+ ID.AddInteger(Value.getKind());
+
+ // 'APValue::Profile' uses pointer values to make hash for LValue and
+ // MemberPointer, but they differ from one compiler invocation to another.
+ // So, handle them explicitly here.
+
+ switch (Value.getKind()) {
+ case APValue::LValue: {
+ const APValue::LValueBase &Base = Value.getLValueBase();
+ if (!Base) {
+ ID.AddInteger(Value.getLValueOffset().getQuantity());
+ break;
+ }
+
+ assert(Base.is<const ValueDecl *>());
+ AddDecl(Base.get<const ValueDecl *>());
+ ID.AddInteger(Value.getLValueOffset().getQuantity());
+
+ bool OnePastTheEnd = Value.isLValueOnePastTheEnd();
+ if (Value.hasLValuePath()) {
+ QualType TypeSoFar = Base.getType();
+ for (APValue::LValuePathEntry E : Value.getLValuePath()) {
+ if (const auto *AT = TypeSoFar->getAsArrayTypeUnsafe()) {
+ if (const auto *CAT = dyn_cast<ConstantArrayType>(AT))
+ OnePastTheEnd |= CAT->getSize() == E.getAsArrayIndex();
+ TypeSoFar = AT->getElementType();
+ } else {
+ const Decl *D = E.getAsBaseOrMember().getPointer();
+ if (const auto *FD = dyn_cast<FieldDecl>(D)) {
+ if (FD->getParent()->isUnion())
+ ID.AddInteger(FD->getFieldIndex());
+ TypeSoFar = FD->getType();
+ } else {
+ TypeSoFar =
+ D->getASTContext().getRecordType(cast<CXXRecordDecl>(D));
+ }
+ }
+ }
+ }
+ unsigned Val = 0;
+ if (Value.isNullPointer())
+ Val |= 1 << 0;
+ if (OnePastTheEnd)
+ Val |= 1 << 1;
+ if (Value.hasLValuePath())
+ Val |= 1 << 2;
+ ID.AddInteger(Val);
+ break;
+ }
+ case APValue::MemberPointer: {
+ const ValueDecl *D = Value.getMemberPointerDecl();
+ assert(D);
+ AddDecl(D);
+ ID.AddInteger(
+ D->getASTContext().getMemberPointerPathAdjustment(Value).getQuantity());
+ break;
+ }
+ default:
+ Value.Profile(ID);
+ }
+}
diff --git a/contrib/llvm-project/clang/lib/AST/OSLog.cpp b/contrib/llvm-project/clang/lib/AST/OSLog.cpp
index 094c0102854b..5e320416b30d 100644
--- a/contrib/llvm-project/clang/lib/AST/OSLog.cpp
+++ b/contrib/llvm-project/clang/lib/AST/OSLog.cpp
@@ -8,6 +8,7 @@
#include "clang/AST/FormatString.h"
#include "clang/Basic/Builtins.h"
#include "llvm/ADT/SmallBitVector.h"
+#include <optional>
using namespace clang;
@@ -20,11 +21,11 @@ class OSLogFormatStringHandler
private:
struct ArgData {
const Expr *E = nullptr;
- Optional<OSLogBufferItem::Kind> Kind;
- Optional<unsigned> Size;
- Optional<const Expr *> Count;
- Optional<const Expr *> Precision;
- Optional<const Expr *> FieldWidth;
+ std::optional<OSLogBufferItem::Kind> Kind;
+ std::optional<unsigned> Size;
+ std::optional<const Expr *> Count;
+ std::optional<const Expr *> Precision;
+ std::optional<const Expr *> FieldWidth;
unsigned char Flags = 0;
StringRef MaskType;
};
@@ -56,8 +57,8 @@ public:
}
bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS,
- const char *StartSpecifier,
- unsigned SpecifierLen) override {
+ const char *StartSpecifier, unsigned SpecifierLen,
+ const TargetInfo &) override {
if (!FS.consumesDataArgument() &&
FS.getConversionSpecifier().getKind() !=
clang::analyze_format_string::ConversionSpecifier::PrintErrno)
@@ -201,7 +202,7 @@ bool clang::analyze_os_log::computeOSLogBufferLayout(
}
const StringLiteral *Lit = cast<StringLiteral>(StringArg->IgnoreParenCasts());
- assert(Lit && (Lit->isAscii() || Lit->isUTF8()));
+ assert(Lit && (Lit->isOrdinary() || Lit->isUTF8()));
StringRef Data = Lit->getString();
OSLogFormatStringHandler H(VarArgs);
ParsePrintfString(H, Data.begin(), Data.end(), Ctx.getLangOpts(),
diff --git a/contrib/llvm-project/clang/lib/AST/OpenMPClause.cpp b/contrib/llvm-project/clang/lib/AST/OpenMPClause.cpp
index 50f40395a197..04f680a8f5c9 100644
--- a/contrib/llvm-project/clang/lib/AST/OpenMPClause.cpp
+++ b/contrib/llvm-project/clang/lib/AST/OpenMPClause.cpp
@@ -23,6 +23,7 @@
#include "llvm/Support/ErrorHandling.h"
#include <algorithm>
#include <cassert>
+#include <optional>
using namespace clang;
using namespace llvm;
@@ -102,6 +103,8 @@ const OMPClauseWithPreInit *OMPClauseWithPreInit::get(const OMPClause *C) {
return static_cast<const OMPNocontextClause *>(C);
case OMPC_filter:
return static_cast<const OMPFilterClause *>(C);
+ case OMPC_ompx_dyn_cgroup_mem:
+ return static_cast<const OMPXDynCGroupMemClause *>(C);
case OMPC_default:
case OMPC_proc_bind:
case OMPC_safelen:
@@ -126,6 +129,8 @@ const OMPClauseWithPreInit *OMPClauseWithPreInit::get(const OMPClause *C) {
case OMPC_write:
case OMPC_update:
case OMPC_capture:
+ case OMPC_compare:
+ case OMPC_fail:
case OMPC_seq_cst:
case OMPC_acq_rel:
case OMPC_acquire:
@@ -145,11 +150,15 @@ const OMPClauseWithPreInit *OMPClauseWithPreInit::get(const OMPClause *C) {
case OMPC_use_device_ptr:
case OMPC_use_device_addr:
case OMPC_is_device_ptr:
+ case OMPC_has_device_addr:
case OMPC_unified_address:
case OMPC_unified_shared_memory:
case OMPC_reverse_offload:
case OMPC_dynamic_allocators:
case OMPC_atomic_default_mem_order:
+ case OMPC_at:
+ case OMPC_severity:
+ case OMPC_message:
case OMPC_device_type:
case OMPC_match:
case OMPC_nontemporal:
@@ -160,6 +169,9 @@ const OMPClauseWithPreInit *OMPClauseWithPreInit::get(const OMPClause *C) {
case OMPC_exclusive:
case OMPC_uses_allocators:
case OMPC_affinity:
+ case OMPC_when:
+ case OMPC_bind:
+ case OMPC_ompx_bare:
break;
default:
break;
@@ -215,6 +227,8 @@ const OMPClauseWithPostUpdate *OMPClauseWithPostUpdate::get(const OMPClause *C)
case OMPC_write:
case OMPC_update:
case OMPC_capture:
+ case OMPC_compare:
+ case OMPC_fail:
case OMPC_seq_cst:
case OMPC_acq_rel:
case OMPC_acquire:
@@ -240,11 +254,15 @@ const OMPClauseWithPostUpdate *OMPClauseWithPostUpdate::get(const OMPClause *C)
case OMPC_use_device_ptr:
case OMPC_use_device_addr:
case OMPC_is_device_ptr:
+ case OMPC_has_device_addr:
case OMPC_unified_address:
case OMPC_unified_shared_memory:
case OMPC_reverse_offload:
case OMPC_dynamic_allocators:
case OMPC_atomic_default_mem_order:
+ case OMPC_at:
+ case OMPC_severity:
+ case OMPC_message:
case OMPC_device_type:
case OMPC_match:
case OMPC_nontemporal:
@@ -257,6 +275,8 @@ const OMPClauseWithPostUpdate *OMPClauseWithPostUpdate::get(const OMPClause *C)
case OMPC_exclusive:
case OMPC_uses_allocators:
case OMPC_affinity:
+ case OMPC_when:
+ case OMPC_bind:
break;
default:
break;
@@ -299,7 +319,7 @@ OMPClause::child_range OMPNumTasksClause::used_children() {
OMPClause::child_range OMPFinalClause::used_children() {
if (Stmt **C = getAddrOfExprAsWritten(getPreInitStmt()))
return child_range(C, C + 1);
- return child_range(&Condition, &Condition + 1);
+ return children();
}
OMPClause::child_range OMPPriorityClause::used_children() {
@@ -311,13 +331,13 @@ OMPClause::child_range OMPPriorityClause::used_children() {
OMPClause::child_range OMPNovariantsClause::used_children() {
if (Stmt **C = getAddrOfExprAsWritten(getPreInitStmt()))
return child_range(C, C + 1);
- return child_range(&Condition, &Condition + 1);
+ return children();
}
OMPClause::child_range OMPNocontextClause::used_children() {
if (Stmt **C = getAddrOfExprAsWritten(getPreInitStmt()))
return child_range(C, C + 1);
- return child_range(&Condition, &Condition + 1);
+ return children();
}
OMPOrderedClause *OMPOrderedClause::Create(const ASTContext &C, Expr *Num,
@@ -353,7 +373,7 @@ void OMPOrderedClause::setLoopNumIterations(unsigned NumLoop,
}
ArrayRef<Expr *> OMPOrderedClause::getLoopNumIterations() const {
- return llvm::makeArrayRef(getTrailingObjects<Expr *>(), NumberOfLoops);
+ return llvm::ArrayRef(getTrailingObjects<Expr *>(), NumberOfLoops);
}
void OMPOrderedClause::setLoopCounter(unsigned NumLoop, Expr *Counter) {
@@ -565,15 +585,17 @@ void OMPLinearClause::setUsedExprs(ArrayRef<Expr *> UE) {
OMPLinearClause *OMPLinearClause::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc,
- SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL,
- ArrayRef<Expr *> PL, ArrayRef<Expr *> IL, Expr *Step, Expr *CalcStep,
- Stmt *PreInit, Expr *PostUpdate) {
+ SourceLocation ColonLoc, SourceLocation StepModifierLoc,
+ SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PL,
+ ArrayRef<Expr *> IL, Expr *Step, Expr *CalcStep, Stmt *PreInit,
+ Expr *PostUpdate) {
// Allocate space for 5 lists (Vars, Inits, Updates, Finals), 2 expressions
// (Step and CalcStep), list of used expression + step.
void *Mem =
C.Allocate(totalSizeToAlloc<Expr *>(5 * VL.size() + 2 + VL.size() + 1));
- OMPLinearClause *Clause = new (Mem) OMPLinearClause(
- StartLoc, LParenLoc, Modifier, ModifierLoc, ColonLoc, EndLoc, VL.size());
+ OMPLinearClause *Clause =
+ new (Mem) OMPLinearClause(StartLoc, LParenLoc, Modifier, ModifierLoc,
+ ColonLoc, StepModifierLoc, EndLoc, VL.size());
Clause->setVarRefs(VL);
Clause->setPrivates(PL);
Clause->setInits(IL);
@@ -625,6 +647,13 @@ OMPAlignedClause *OMPAlignedClause::CreateEmpty(const ASTContext &C,
return new (Mem) OMPAlignedClause(NumVars);
}
+OMPAlignClause *OMPAlignClause::Create(const ASTContext &C, Expr *A,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return new (C) OMPAlignClause(A, StartLoc, LParenLoc, EndLoc);
+}
+
void OMPCopyinClause::setSourceExprs(ArrayRef<Expr *> SrcExprs) {
assert(SrcExprs.size() == varlist_size() && "Number of source expressions is "
"not the same as the "
@@ -1025,19 +1054,19 @@ OMPDepobjClause *OMPDepobjClause::CreateEmpty(const ASTContext &C) {
OMPDependClause *
OMPDependClause::Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc,
- Expr *DepModifier, OpenMPDependClauseKind DepKind,
- SourceLocation DepLoc, SourceLocation ColonLoc,
+ DependDataTy Data, Expr *DepModifier,
ArrayRef<Expr *> VL, unsigned NumLoops) {
void *Mem = C.Allocate(
totalSizeToAlloc<Expr *>(VL.size() + /*depend-modifier*/ 1 + NumLoops),
alignof(OMPDependClause));
OMPDependClause *Clause = new (Mem)
OMPDependClause(StartLoc, LParenLoc, EndLoc, VL.size(), NumLoops);
- Clause->setVarRefs(VL);
- Clause->setDependencyKind(DepKind);
- Clause->setDependencyLoc(DepLoc);
- Clause->setColonLoc(ColonLoc);
+ Clause->setDependencyKind(Data.DepKind);
+ Clause->setDependencyLoc(Data.DepLoc);
+ Clause->setColonLoc(Data.ColonLoc);
+ Clause->setOmpAllMemoryLoc(Data.OmpAllMemoryLoc);
Clause->setModifier(DepModifier);
+ Clause->setVarRefs(VL);
for (unsigned I = 0 ; I < NumLoops; ++I)
Clause->setLoopData(I, nullptr);
return Clause;
@@ -1112,7 +1141,7 @@ OMPMapClause *OMPMapClause::Create(
const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars,
ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists, ArrayRef<Expr *> UDMapperRefs,
- ArrayRef<OpenMPMapModifierKind> MapModifiers,
+ Expr *IteratorModifier, ArrayRef<OpenMPMapModifierKind> MapModifiers,
ArrayRef<SourceLocation> MapModifiersLoc,
NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId,
OpenMPMapClauseKind Type, bool TypeIsImplicit, SourceLocation TypeLoc) {
@@ -1135,7 +1164,7 @@ OMPMapClause *OMPMapClause::Create(
void *Mem = C.Allocate(
totalSizeToAlloc<Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent>(
- 2 * Sizes.NumVars, Sizes.NumUniqueDeclarations,
+ 2 * Sizes.NumVars + 1, Sizes.NumUniqueDeclarations,
Sizes.NumUniqueDeclarations + Sizes.NumComponentLists,
Sizes.NumComponents));
OMPMapClause *Clause = new (Mem)
@@ -1144,6 +1173,7 @@ OMPMapClause *OMPMapClause::Create(
Clause->setVarRefs(Vars);
Clause->setUDMapperRefs(UDMapperRefs);
+ Clause->setIteratorModifier(IteratorModifier);
Clause->setClauseInfo(Declarations, ComponentLists);
Clause->setMapType(Type);
Clause->setMapLoc(TypeLoc);
@@ -1156,10 +1186,12 @@ OMPMapClause::CreateEmpty(const ASTContext &C,
void *Mem = C.Allocate(
totalSizeToAlloc<Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent>(
- 2 * Sizes.NumVars, Sizes.NumUniqueDeclarations,
+ 2 * Sizes.NumVars + 1, Sizes.NumUniqueDeclarations,
Sizes.NumUniqueDeclarations + Sizes.NumComponentLists,
Sizes.NumComponents));
- return new (Mem) OMPMapClause(Sizes);
+ OMPMapClause *Clause = new (Mem) OMPMapClause(Sizes);
+ Clause->setIteratorModifier(nullptr);
+ return Clause;
}
OMPToClause *OMPToClause::Create(
@@ -1419,6 +1451,53 @@ OMPIsDevicePtrClause::CreateEmpty(const ASTContext &C,
return new (Mem) OMPIsDevicePtrClause(Sizes);
}
+OMPHasDeviceAddrClause *
+OMPHasDeviceAddrClause::Create(const ASTContext &C, const OMPVarListLocTy &Locs,
+ ArrayRef<Expr *> Vars,
+ ArrayRef<ValueDecl *> Declarations,
+ MappableExprComponentListsRef ComponentLists) {
+ OMPMappableExprListSizeTy Sizes;
+ Sizes.NumVars = Vars.size();
+ Sizes.NumUniqueDeclarations = getUniqueDeclarationsTotalNumber(Declarations);
+ Sizes.NumComponentLists = ComponentLists.size();
+ Sizes.NumComponents = getComponentsTotalNumber(ComponentLists);
+
+ // We need to allocate:
+ // NumVars x Expr* - we have an original list expression for each clause list
+ // entry.
+ // NumUniqueDeclarations x ValueDecl* - unique base declarations associated
+ // with each component list.
+ // (NumUniqueDeclarations + NumComponentLists) x unsigned - we specify the
+ // number of lists for each unique declaration and the size of each component
+ // list.
+ // NumComponents x MappableComponent - the total of all the components in all
+ // the lists.
+ void *Mem = C.Allocate(
+ totalSizeToAlloc<Expr *, ValueDecl *, unsigned,
+ OMPClauseMappableExprCommon::MappableComponent>(
+ Sizes.NumVars, Sizes.NumUniqueDeclarations,
+ Sizes.NumUniqueDeclarations + Sizes.NumComponentLists,
+ Sizes.NumComponents));
+
+ auto *Clause = new (Mem) OMPHasDeviceAddrClause(Locs, Sizes);
+
+ Clause->setVarRefs(Vars);
+ Clause->setClauseInfo(Declarations, ComponentLists);
+ return Clause;
+}
+
+OMPHasDeviceAddrClause *
+OMPHasDeviceAddrClause::CreateEmpty(const ASTContext &C,
+ const OMPMappableExprListSizeTy &Sizes) {
+ void *Mem = C.Allocate(
+ totalSizeToAlloc<Expr *, ValueDecl *, unsigned,
+ OMPClauseMappableExprCommon::MappableComponent>(
+ Sizes.NumVars, Sizes.NumUniqueDeclarations,
+ Sizes.NumUniqueDeclarations + Sizes.NumComponentLists,
+ Sizes.NumComponents));
+ return new (Mem) OMPHasDeviceAddrClause(Sizes);
+}
+
OMPNontemporalClause *OMPNontemporalClause::Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation LParenLoc,
@@ -1564,18 +1643,19 @@ OMPAffinityClause *OMPAffinityClause::CreateEmpty(const ASTContext &C,
}
OMPInitClause *OMPInitClause::Create(const ASTContext &C, Expr *InteropVar,
- ArrayRef<Expr *> PrefExprs, bool IsTarget,
- bool IsTargetSync, SourceLocation StartLoc,
+ OMPInteropInfo &InteropInfo,
+ SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation VarLoc,
SourceLocation EndLoc) {
- void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(PrefExprs.size() + 1));
- auto *Clause =
- new (Mem) OMPInitClause(IsTarget, IsTargetSync, StartLoc, LParenLoc,
- VarLoc, EndLoc, PrefExprs.size() + 1);
+ void *Mem =
+ C.Allocate(totalSizeToAlloc<Expr *>(InteropInfo.PreferTypes.size() + 1));
+ auto *Clause = new (Mem) OMPInitClause(
+ InteropInfo.IsTarget, InteropInfo.IsTargetSync, StartLoc, LParenLoc,
+ VarLoc, EndLoc, InteropInfo.PreferTypes.size() + 1);
Clause->setInteropVar(InteropVar);
- llvm::copy(PrefExprs, Clause->getTrailingObjects<Expr *>() + 1);
+ llvm::copy(InteropInfo.PreferTypes, Clause->getTrailingObjects<Expr *>() + 1);
return Clause;
}
@@ -1584,6 +1664,62 @@ OMPInitClause *OMPInitClause::CreateEmpty(const ASTContext &C, unsigned N) {
return new (Mem) OMPInitClause(N);
}
+OMPBindClause *
+OMPBindClause::Create(const ASTContext &C, OpenMPBindClauseKind K,
+ SourceLocation KLoc, SourceLocation StartLoc,
+ SourceLocation LParenLoc, SourceLocation EndLoc) {
+ return new (C) OMPBindClause(K, KLoc, StartLoc, LParenLoc, EndLoc);
+}
+
+OMPBindClause *OMPBindClause::CreateEmpty(const ASTContext &C) {
+ return new (C) OMPBindClause();
+}
+
+OMPDoacrossClause *
+OMPDoacrossClause::Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation LParenLoc, SourceLocation EndLoc,
+ OpenMPDoacrossClauseModifier DepType,
+ SourceLocation DepLoc, SourceLocation ColonLoc,
+ ArrayRef<Expr *> VL, unsigned NumLoops) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(VL.size() + NumLoops),
+ alignof(OMPDoacrossClause));
+ OMPDoacrossClause *Clause = new (Mem)
+ OMPDoacrossClause(StartLoc, LParenLoc, EndLoc, VL.size(), NumLoops);
+ Clause->setDependenceType(DepType);
+ Clause->setDependenceLoc(DepLoc);
+ Clause->setColonLoc(ColonLoc);
+ Clause->setVarRefs(VL);
+ for (unsigned I = 0; I < NumLoops; ++I)
+ Clause->setLoopData(I, nullptr);
+ return Clause;
+}
+
+OMPDoacrossClause *OMPDoacrossClause::CreateEmpty(const ASTContext &C,
+ unsigned N,
+ unsigned NumLoops) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(N + NumLoops),
+ alignof(OMPDoacrossClause));
+ return new (Mem) OMPDoacrossClause(N, NumLoops);
+}
+
+void OMPDoacrossClause::setLoopData(unsigned NumLoop, Expr *Cnt) {
+ assert(NumLoop < NumLoops && "Loop index must be less number of loops.");
+ auto *It = std::next(getVarRefs().end(), NumLoop);
+ *It = Cnt;
+}
+
+Expr *OMPDoacrossClause::getLoopData(unsigned NumLoop) {
+ assert(NumLoop < NumLoops && "Loop index must be less number of loops.");
+ auto *It = std::next(getVarRefs().end(), NumLoop);
+ return *It;
+}
+
+const Expr *OMPDoacrossClause::getLoopData(unsigned NumLoop) const {
+ assert(NumLoop < NumLoops && "Loop index must be less number of loops.");
+ const auto *It = std::next(getVarRefs().end(), NumLoop);
+ return *It;
+}
+
//===----------------------------------------------------------------------===//
// OpenMP clauses printing methods
//===----------------------------------------------------------------------===//
@@ -1608,6 +1744,12 @@ void OMPClausePrinter::VisitOMPNumThreadsClause(OMPNumThreadsClause *Node) {
OS << ")";
}
+void OMPClausePrinter::VisitOMPAlignClause(OMPAlignClause *Node) {
+ OS << "align(";
+ Node->getAlignment()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
void OMPClausePrinter::VisitOMPSafelenClause(OMPSafelenClause *Node) {
OS << "safelen(";
Node->getSafelen()->printPretty(OS, nullptr, Policy, 0);
@@ -1623,7 +1765,7 @@ void OMPClausePrinter::VisitOMPSimdlenClause(OMPSimdlenClause *Node) {
void OMPClausePrinter::VisitOMPSizesClause(OMPSizesClause *Node) {
OS << "sizes(";
bool First = true;
- for (auto Size : Node->getSizesRefs()) {
+ for (auto *Size : Node->getSizesRefs()) {
if (!First)
OS << ", ";
Size->printPretty(OS, nullptr, Policy, 0);
@@ -1702,6 +1844,22 @@ void OMPClausePrinter::VisitOMPAtomicDefaultMemOrderClause(
<< ")";
}
+void OMPClausePrinter::VisitOMPAtClause(OMPAtClause *Node) {
+ OS << "at(" << getOpenMPSimpleClauseTypeName(OMPC_at, Node->getAtKind())
+ << ")";
+}
+
+void OMPClausePrinter::VisitOMPSeverityClause(OMPSeverityClause *Node) {
+ OS << "severity("
+ << getOpenMPSimpleClauseTypeName(OMPC_severity, Node->getSeverityKind())
+ << ")";
+}
+
+void OMPClausePrinter::VisitOMPMessageClause(OMPMessageClause *Node) {
+ OS << "message(\""
+ << cast<StringLiteral>(Node->getMessageString())->getString() << "\")";
+}
+
void OMPClausePrinter::VisitOMPScheduleClause(OMPScheduleClause *Node) {
OS << "schedule(";
if (Node->getFirstScheduleModifier() != OMPC_SCHEDULE_MODIFIER_unknown) {
@@ -1765,6 +1923,20 @@ void OMPClausePrinter::VisitOMPCaptureClause(OMPCaptureClause *) {
OS << "capture";
}
+void OMPClausePrinter::VisitOMPCompareClause(OMPCompareClause *) {
+ OS << "compare";
+}
+
+void OMPClausePrinter::VisitOMPFailClause(OMPFailClause *Node) {
+ OS << "fail";
+ if (Node) {
+ OS << "(";
+ OS << getOpenMPSimpleClauseTypeName(
+ Node->getClauseKind(), static_cast<int>(Node->getFailParameter()));
+ OS << ")";
+ }
+}
+
void OMPClausePrinter::VisitOMPSeqCstClause(OMPSeqCstClause *) {
OS << "seq_cst";
}
@@ -1822,12 +1994,22 @@ void OMPClausePrinter::VisitOMPPriorityClause(OMPPriorityClause *Node) {
void OMPClausePrinter::VisitOMPGrainsizeClause(OMPGrainsizeClause *Node) {
OS << "grainsize(";
+ OpenMPGrainsizeClauseModifier Modifier = Node->getModifier();
+ if (Modifier != OMPC_GRAINSIZE_unknown) {
+ OS << getOpenMPSimpleClauseTypeName(Node->getClauseKind(), Modifier)
+ << ": ";
+ }
Node->getGrainsize()->printPretty(OS, nullptr, Policy, 0);
OS << ")";
}
void OMPClausePrinter::VisitOMPNumTasksClause(OMPNumTasksClause *Node) {
OS << "num_tasks(";
+ OpenMPNumTasksClauseModifier Modifier = Node->getModifier();
+ if (Modifier != OMPC_NUMTASKS_unknown) {
+ OS << getOpenMPSimpleClauseTypeName(Node->getClauseKind(), Modifier)
+ << ": ";
+ }
Node->getNumTasks()->printPretty(OS, nullptr, Policy, 0);
OS << ")";
}
@@ -2039,16 +2221,20 @@ void OMPClausePrinter::VisitOMPInReductionClause(OMPInReductionClause *Node) {
void OMPClausePrinter::VisitOMPLinearClause(OMPLinearClause *Node) {
if (!Node->varlist_empty()) {
OS << "linear";
+ VisitOMPClauseList(Node, '(');
+ if (Node->getModifierLoc().isValid() || Node->getStep() != nullptr) {
+ OS << ": ";
+ }
if (Node->getModifierLoc().isValid()) {
- OS << '('
- << getOpenMPSimpleClauseTypeName(OMPC_linear, Node->getModifier());
+ OS << getOpenMPSimpleClauseTypeName(OMPC_linear, Node->getModifier());
}
- VisitOMPClauseList(Node, '(');
- if (Node->getModifierLoc().isValid())
- OS << ')';
if (Node->getStep() != nullptr) {
- OS << ": ";
+ if (Node->getModifierLoc().isValid()) {
+ OS << ", ";
+ }
+ OS << "step(";
Node->getStep()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
}
OS << ")";
}
@@ -2101,11 +2287,23 @@ void OMPClausePrinter::VisitOMPDependClause(OMPDependClause *Node) {
DepModifier->printPretty(OS, nullptr, Policy);
OS << ", ";
}
- OS << getOpenMPSimpleClauseTypeName(Node->getClauseKind(),
- Node->getDependencyKind());
- if (!Node->varlist_empty()) {
+ OpenMPDependClauseKind DepKind = Node->getDependencyKind();
+ OpenMPDependClauseKind PrintKind = DepKind;
+ bool IsOmpAllMemory = false;
+ if (PrintKind == OMPC_DEPEND_outallmemory) {
+ PrintKind = OMPC_DEPEND_out;
+ IsOmpAllMemory = true;
+ } else if (PrintKind == OMPC_DEPEND_inoutallmemory) {
+ PrintKind = OMPC_DEPEND_inout;
+ IsOmpAllMemory = true;
+ }
+ OS << getOpenMPSimpleClauseTypeName(Node->getClauseKind(), PrintKind);
+ if (!Node->varlist_empty() || IsOmpAllMemory)
OS << " :";
- VisitOMPClauseList(Node, ' ');
+ VisitOMPClauseList(Node, ' ');
+ if (IsOmpAllMemory) {
+ OS << (Node->varlist_empty() ? " " : ",");
+ OS << "omp_all_memory";
}
OS << ")";
}
@@ -2121,16 +2319,27 @@ static void PrintMapper(raw_ostream &OS, T *Node,
OS << Node->getMapperIdInfo() << ')';
}
+template <typename T>
+static void PrintIterator(raw_ostream &OS, T *Node,
+ const PrintingPolicy &Policy) {
+ if (Expr *IteratorModifier = Node->getIteratorModifier())
+ IteratorModifier->printPretty(OS, nullptr, Policy);
+}
+
void OMPClausePrinter::VisitOMPMapClause(OMPMapClause *Node) {
if (!Node->varlist_empty()) {
OS << "map(";
if (Node->getMapType() != OMPC_MAP_unknown) {
for (unsigned I = 0; I < NumberOfOMPMapClauseModifiers; ++I) {
if (Node->getMapTypeModifier(I) != OMPC_MAP_MODIFIER_unknown) {
- OS << getOpenMPSimpleClauseTypeName(OMPC_map,
- Node->getMapTypeModifier(I));
- if (Node->getMapTypeModifier(I) == OMPC_MAP_MODIFIER_mapper)
- PrintMapper(OS, Node, Policy);
+ if (Node->getMapTypeModifier(I) == OMPC_MAP_MODIFIER_iterator) {
+ PrintIterator(OS, Node, Policy);
+ } else {
+ OS << getOpenMPSimpleClauseTypeName(OMPC_map,
+ Node->getMapTypeModifier(I));
+ if (Node->getMapTypeModifier(I) == OMPC_MAP_MODIFIER_mapper)
+ PrintMapper(OS, Node, Policy);
+ }
OS << ',';
}
}
@@ -2226,6 +2435,14 @@ void OMPClausePrinter::VisitOMPIsDevicePtrClause(OMPIsDevicePtrClause *Node) {
}
}
+void OMPClausePrinter::VisitOMPHasDeviceAddrClause(OMPHasDeviceAddrClause *Node) {
+ if (!Node->varlist_empty()) {
+ OS << "has_device_addr";
+ VisitOMPClauseList(Node, '(');
+ OS << ")";
+ }
+}
+
void OMPClausePrinter::VisitOMPNontemporalClause(OMPNontemporalClause *Node) {
if (!Node->varlist_empty()) {
OS << "nontemporal";
@@ -2235,8 +2452,12 @@ void OMPClausePrinter::VisitOMPNontemporalClause(OMPNontemporalClause *Node) {
}
void OMPClausePrinter::VisitOMPOrderClause(OMPOrderClause *Node) {
- OS << "order(" << getOpenMPSimpleClauseTypeName(OMPC_order, Node->getKind())
- << ")";
+ OS << "order(";
+ if (Node->getModifier() != OMPC_ORDER_MODIFIER_unknown) {
+ OS << getOpenMPSimpleClauseTypeName(OMPC_order, Node->getModifier());
+ OS << ": ";
+ }
+ OS << getOpenMPSimpleClauseTypeName(OMPC_order, Node->getKind()) << ")";
}
void OMPClausePrinter::VisitOMPInclusiveClause(OMPInclusiveClause *Node) {
@@ -2295,6 +2516,59 @@ void OMPClausePrinter::VisitOMPFilterClause(OMPFilterClause *Node) {
OS << ")";
}
+void OMPClausePrinter::VisitOMPBindClause(OMPBindClause *Node) {
+ OS << "bind("
+ << getOpenMPSimpleClauseTypeName(OMPC_bind, unsigned(Node->getBindKind()))
+ << ")";
+}
+
+void OMPClausePrinter::VisitOMPXDynCGroupMemClause(
+ OMPXDynCGroupMemClause *Node) {
+ OS << "ompx_dyn_cgroup_mem(";
+ Node->getSize()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPDoacrossClause(OMPDoacrossClause *Node) {
+ OS << "doacross(";
+ OpenMPDoacrossClauseModifier DepType = Node->getDependenceType();
+
+ switch (DepType) {
+ case OMPC_DOACROSS_source:
+ OS << "source:";
+ break;
+ case OMPC_DOACROSS_sink:
+ OS << "sink:";
+ break;
+ case OMPC_DOACROSS_source_omp_cur_iteration:
+ OS << "source: omp_cur_iteration";
+ break;
+ case OMPC_DOACROSS_sink_omp_cur_iteration:
+ OS << "sink: omp_cur_iteration - 1";
+ break;
+ default:
+ llvm_unreachable("unknown docaross modifier");
+ }
+ VisitOMPClauseList(Node, ' ');
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPXAttributeClause(OMPXAttributeClause *Node) {
+ OS << "ompx_attribute(";
+ bool IsFirst = true;
+ for (auto &Attr : Node->getAttrs()) {
+ if (!IsFirst)
+ OS << ", ";
+ Attr->printPretty(OS, Policy);
+ IsFirst = false;
+ }
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPXBareClause(OMPXBareClause *Node) {
+ OS << "ompx_bare";
+}
+
void OMPTraitInfo::getAsVariantMatchInfo(ASTContext &ASTCtx,
VariantMatchInfo &VMI) const {
for (const OMPTraitSet &Set : Sets) {
@@ -2309,18 +2583,17 @@ void OMPTraitInfo::getAsVariantMatchInfo(ASTContext &ASTCtx,
TraitProperty::user_condition_unknown &&
"Ill-formed user condition, expected unknown trait property!");
- if (Optional<APSInt> CondVal =
+ if (std::optional<APSInt> CondVal =
Selector.ScoreOrCondition->getIntegerConstantExpr(ASTCtx))
- VMI.addTrait(CondVal->isNullValue()
- ? TraitProperty::user_condition_false
- : TraitProperty::user_condition_true,
+ VMI.addTrait(CondVal->isZero() ? TraitProperty::user_condition_false
+ : TraitProperty::user_condition_true,
"<condition>");
else
VMI.addTrait(TraitProperty::user_condition_false, "<condition>");
continue;
}
- Optional<llvm::APSInt> Score;
+ std::optional<llvm::APSInt> Score;
llvm::APInt *ScorePtr = nullptr;
if (Selector.ScoreOrCondition) {
if ((Score = Selector.ScoreOrCondition->getIntegerConstantExpr(ASTCtx)))
@@ -2342,8 +2615,6 @@ void OMPTraitInfo::getAsVariantMatchInfo(ASTContext &ASTCtx,
getOpenMPContextTraitPropertyForSelector(
Selector.Kind) &&
"Ill-formed construct selector!");
-
- VMI.ConstructTraits.push_back(Selector.Properties.front().Kind);
}
}
}
@@ -2424,7 +2695,7 @@ std::string OMPTraitInfo::getMangledName() const {
Property.RawString);
}
}
- return OS.str();
+ return MangledName;
}
OMPTraitInfo::OMPTraitInfo(StringRef MangledName) {
@@ -2474,14 +2745,18 @@ llvm::raw_ostream &clang::operator<<(llvm::raw_ostream &OS,
TargetOMPContext::TargetOMPContext(
ASTContext &ASTCtx, std::function<void(StringRef)> &&DiagUnknownTrait,
- const FunctionDecl *CurrentFunctionDecl)
- : OMPContext(ASTCtx.getLangOpts().OpenMPIsDevice,
+ const FunctionDecl *CurrentFunctionDecl,
+ ArrayRef<llvm::omp::TraitProperty> ConstructTraits)
+ : OMPContext(ASTCtx.getLangOpts().OpenMPIsTargetDevice,
ASTCtx.getTargetInfo().getTriple()),
FeatureValidityCheck([&](StringRef FeatureName) {
return ASTCtx.getTargetInfo().isValidFeatureName(FeatureName);
}),
DiagUnknownTrait(std::move(DiagUnknownTrait)) {
ASTCtx.getFunctionFeatureMap(FeatureMap, CurrentFunctionDecl);
+
+ for (llvm::omp::TraitProperty Property : ConstructTraits)
+ addTrait(Property);
}
bool TargetOMPContext::matchesISATrait(StringRef RawString) const {
diff --git a/contrib/llvm-project/clang/lib/AST/ParentMap.cpp b/contrib/llvm-project/clang/lib/AST/ParentMap.cpp
index 2ff5c9d8aeb5..3d6a1cc84c7b 100644
--- a/contrib/llvm-project/clang/lib/AST/ParentMap.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ParentMap.cpp
@@ -33,9 +33,11 @@ static void BuildParentMap(MapTy& M, Stmt* S,
switch (S->getStmtClass()) {
case Stmt::PseudoObjectExprClass: {
- assert(OVMode == OV_Transparent && "Should not appear alongside OVEs");
PseudoObjectExpr *POE = cast<PseudoObjectExpr>(S);
+ if (OVMode == OV_Opaque && M[POE->getSyntacticForm()])
+ break;
+
// If we are rebuilding the map, clear out any existing state.
if (M[POE->getSyntacticForm()])
for (Stmt *SubStmt : S->children())
@@ -133,8 +135,7 @@ void ParentMap::setParent(const Stmt *S, const Stmt *Parent) {
Stmt* ParentMap::getParent(Stmt* S) const {
MapTy* M = (MapTy*) Impl;
- MapTy::iterator I = M->find(S);
- return I == M->end() ? nullptr : I->second;
+ return M->lookup(S);
}
Stmt *ParentMap::getParentIgnoreParens(Stmt *S) const {
diff --git a/contrib/llvm-project/clang/lib/AST/ParentMapContext.cpp b/contrib/llvm-project/clang/lib/AST/ParentMapContext.cpp
index 4a3e0a99c8a6..21cfd5b1de6e 100644
--- a/contrib/llvm-project/clang/lib/AST/ParentMapContext.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ParentMapContext.cpp
@@ -99,7 +99,7 @@ class ParentMapContext::ParentMap {
return llvm::ArrayRef<DynTypedNode>();
}
if (const auto *V = I->second.template dyn_cast<ParentVector *>()) {
- return llvm::makeArrayRef(*V);
+ return llvm::ArrayRef(*V);
}
return getSingleDynTypedNodeFromParentMap(I->second);
}
@@ -252,7 +252,7 @@ public:
const auto *S = It->second.dyn_cast<const Stmt *>();
if (!S) {
if (auto *Vec = It->second.dyn_cast<ParentVector *>())
- return llvm::makeArrayRef(*Vec);
+ return llvm::ArrayRef(*Vec);
return getSingleDynTypedNodeFromParentMap(It->second);
}
const auto *P = dyn_cast<Expr>(S);
@@ -265,16 +265,6 @@ public:
}
};
-template <typename Tuple, std::size_t... Is>
-auto tuple_pop_front_impl(const Tuple &tuple, std::index_sequence<Is...>) {
- return std::make_tuple(std::get<1 + Is>(tuple)...);
-}
-
-template <typename Tuple> auto tuple_pop_front(const Tuple &tuple) {
- return tuple_pop_front_impl(
- tuple, std::make_index_sequence<std::tuple_size<Tuple>::value - 1>());
-}
-
template <typename T, typename... U> struct MatchParents {
static std::tuple<bool, DynTypedNodeList, const T *, const U *...>
match(const DynTypedNodeList &NodeList,
@@ -285,10 +275,11 @@ template <typename T, typename... U> struct MatchParents {
if (NextParentList.size() == 1) {
auto TailTuple = MatchParents<U...>::match(NextParentList, ParentMap);
if (std::get<bool>(TailTuple)) {
- return std::tuple_cat(
- std::make_tuple(true, std::get<DynTypedNodeList>(TailTuple),
- TypedNode),
- tuple_pop_front(tuple_pop_front(TailTuple)));
+ return std::apply(
+ [TypedNode](bool, DynTypedNodeList NodeList, auto... TupleTail) {
+ return std::make_tuple(true, NodeList, TypedNode, TupleTail...);
+ },
+ TailTuple);
}
}
}
@@ -330,6 +321,9 @@ template <>
DynTypedNode createDynTypedNode(const NestedNameSpecifierLoc &Node) {
return DynTypedNode::create(Node);
}
+template <> DynTypedNode createDynTypedNode(const ObjCProtocolLoc &Node) {
+ return DynTypedNode::create(Node);
+}
/// @}
/// A \c RecursiveASTVisitor that builds a map from nodes to their
@@ -389,21 +383,23 @@ private:
auto *Vector = NodeOrVector.template get<ParentVector *>();
// Skip duplicates for types that have memoization data.
// We must check that the type has memoization data before calling
- // std::find() because DynTypedNode::operator== can't compare all
+ // llvm::is_contained() because DynTypedNode::operator== can't compare all
// types.
bool Found = ParentStack.back().getMemoizationData() &&
- std::find(Vector->begin(), Vector->end(),
- ParentStack.back()) != Vector->end();
+ llvm::is_contained(*Vector, ParentStack.back());
if (!Found)
Vector->push_back(ParentStack.back());
}
}
+ template <typename T> static bool isNull(T Node) { return !Node; }
+ static bool isNull(ObjCProtocolLoc Node) { return false; }
+
template <typename T, typename MapNodeTy, typename BaseTraverseFn,
typename MapTy>
bool TraverseNode(T Node, MapNodeTy MapNode, BaseTraverseFn BaseTraverse,
MapTy *Parents) {
- if (!Node)
+ if (isNull(Node))
return true;
addParent(MapNode, Parents);
ParentStack.push_back(createDynTypedNode(Node));
@@ -429,6 +425,17 @@ private:
[&] { return VisitorBase::TraverseNestedNameSpecifierLoc(NNSLocNode); },
&Map.OtherParents);
}
+ bool TraverseAttr(Attr *AttrNode) {
+ return TraverseNode(
+ AttrNode, AttrNode, [&] { return VisitorBase::TraverseAttr(AttrNode); },
+ &Map.PointerParents);
+ }
+ bool TraverseObjCProtocolLoc(ObjCProtocolLoc ProtocolLocNode) {
+ return TraverseNode(
+ ProtocolLocNode, DynTypedNode::create(ProtocolLocNode),
+ [&] { return VisitorBase::TraverseObjCProtocolLoc(ProtocolLocNode); },
+ &Map.OtherParents);
+ }
// Using generic TraverseNode for Stmt would prevent data-recursion.
bool dataTraverseStmtPre(Stmt *StmtNode) {
diff --git a/contrib/llvm-project/clang/lib/AST/PrintfFormatString.cpp b/contrib/llvm-project/clang/lib/AST/PrintfFormatString.cpp
index 150dcbec5187..a4bb0d999d99 100644
--- a/contrib/llvm-project/clang/lib/AST/PrintfFormatString.cpp
+++ b/contrib/llvm-project/clang/lib/AST/PrintfFormatString.cpp
@@ -140,7 +140,7 @@ static PrintfSpecifierResult ParsePrintfSpecifier(FormatStringHandler &H,
// Set the privacy flag if the privacy annotation in the
// comma-delimited segment is at least as strict as the privacy
// annotations in previous comma-delimited segments.
- if (MatchedStr.startswith("mask")) {
+ if (MatchedStr.starts_with("mask")) {
StringRef MaskType = MatchedStr.substr(sizeof("mask.") - 1);
unsigned Size = MaskType.size();
if (Warn && (Size == 0 || Size > 8))
@@ -326,6 +326,14 @@ static PrintfSpecifierResult ParsePrintfSpecifier(FormatStringHandler &H,
case 's': k = ConversionSpecifier::sArg; break;
case 'u': k = ConversionSpecifier::uArg; break;
case 'x': k = ConversionSpecifier::xArg; break;
+ // C23.
+ case 'b':
+ if (isFreeBSDKPrintf)
+ k = ConversionSpecifier::FreeBSDbArg; // int followed by char *
+ else
+ k = ConversionSpecifier::bArg;
+ break;
+ case 'B': k = ConversionSpecifier::BArg; break;
// POSIX specific.
case 'C': k = ConversionSpecifier::CArg; break;
case 'S': k = ConversionSpecifier::SArg; break;
@@ -337,11 +345,6 @@ static PrintfSpecifierResult ParsePrintfSpecifier(FormatStringHandler &H,
case '@': k = ConversionSpecifier::ObjCObjArg; break;
// Glibc specific.
case 'm': k = ConversionSpecifier::PrintErrno; break;
- // FreeBSD kernel specific.
- case 'b':
- if (isFreeBSDKPrintf)
- k = ConversionSpecifier::FreeBSDbArg; // int followed by char *
- break;
case 'r':
if (isFreeBSDKPrintf)
k = ConversionSpecifier::FreeBSDrArg; // int
@@ -428,7 +431,7 @@ bool clang::analyze_format_string::ParsePrintfString(FormatStringHandler &H,
continue;
// We have a format specifier. Pass it to the callback.
if (!H.HandlePrintfSpecifier(FSR.getValue(), FSR.getStart(),
- I - FSR.getStart()))
+ I - FSR.getStart(), Target))
return true;
}
assert(I == E && "Format string not exhausted");
@@ -497,7 +500,7 @@ ArgType PrintfSpecifier::getScalarArgType(ASTContext &Ctx,
case LengthModifier::AsShort:
if (Ctx.getTargetInfo().getTriple().isOSMSVCRT())
return Ctx.IntTy;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
default:
return ArgType::Invalid();
}
@@ -711,8 +714,8 @@ bool PrintfSpecifier::fixType(QualType QT, const LangOptions &LangOpt,
CS.setKind(ConversionSpecifier::sArg);
// Disable irrelevant flags
- HasAlternativeForm = 0;
- HasLeadingZeroes = 0;
+ HasAlternativeForm = false;
+ HasLeadingZeroes = false;
// Set the long length modifier for wide characters
if (QT->getPointeeType()->isWideCharType())
@@ -755,6 +758,7 @@ bool PrintfSpecifier::fixType(QualType QT, const LangOptions &LangOpt,
case BuiltinType::BFloat16:
case BuiltinType::Float16:
case BuiltinType::Float128:
+ case BuiltinType::Ibm128:
case BuiltinType::ShortAccum:
case BuiltinType::Accum:
case BuiltinType::LongAccum:
@@ -796,6 +800,8 @@ bool PrintfSpecifier::fixType(QualType QT, const LangOptions &LangOpt,
#include "clang/Basic/PPCTypes.def"
#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
#include "clang/Basic/RISCVVTypes.def"
+#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
#define SIGNED_TYPE(Id, SingletonId)
#define UNSIGNED_TYPE(Id, SingletonId)
#define FLOATING_TYPE(Id, SingletonId)
@@ -843,7 +849,7 @@ bool PrintfSpecifier::fixType(QualType QT, const LangOptions &LangOpt,
}
// Handle size_t, ptrdiff_t, etc. that have dedicated length modifiers in C99.
- if (isa<TypedefType>(QT) && (LangOpt.C99 || LangOpt.CPlusPlus11))
+ if (LangOpt.C99 || LangOpt.CPlusPlus11)
namedTypeToLengthModifier(QT, LM);
// If fixing the length modifier was enough, we might be done.
@@ -873,26 +879,24 @@ bool PrintfSpecifier::fixType(QualType QT, const LangOptions &LangOpt,
// Set conversion specifier and disable any flags which do not apply to it.
// Let typedefs to char fall through to int, as %c is silly for uint8_t.
- if (!isa<TypedefType>(QT) && QT->isCharType()) {
+ if (!QT->getAs<TypedefType>() && QT->isCharType()) {
CS.setKind(ConversionSpecifier::cArg);
LM.setKind(LengthModifier::None);
Precision.setHowSpecified(OptionalAmount::NotSpecified);
- HasAlternativeForm = 0;
- HasLeadingZeroes = 0;
- HasPlusPrefix = 0;
+ HasAlternativeForm = false;
+ HasLeadingZeroes = false;
+ HasPlusPrefix = false;
}
// Test for Floating type first as LongDouble can pass isUnsignedIntegerType
else if (QT->isRealFloatingType()) {
CS.setKind(ConversionSpecifier::fArg);
- }
- else if (QT->isSignedIntegerType()) {
+ } else if (QT->isSignedIntegerType()) {
CS.setKind(ConversionSpecifier::dArg);
- HasAlternativeForm = 0;
- }
- else if (QT->isUnsignedIntegerType()) {
+ HasAlternativeForm = false;
+ } else if (QT->isUnsignedIntegerType()) {
CS.setKind(ConversionSpecifier::uArg);
- HasAlternativeForm = 0;
- HasPlusPrefix = 0;
+ HasAlternativeForm = false;
+ HasPlusPrefix = false;
} else {
llvm_unreachable("Unexpected type");
}
@@ -962,8 +966,10 @@ bool PrintfSpecifier::hasValidAlternativeForm() const {
if (!HasAlternativeForm)
return true;
- // Alternate form flag only valid with the oxXaAeEfFgG conversions
+ // Alternate form flag only valid with the bBoxXaAeEfFgG conversions
switch (CS.getKind()) {
+ case ConversionSpecifier::bArg:
+ case ConversionSpecifier::BArg:
case ConversionSpecifier::oArg:
case ConversionSpecifier::OArg:
case ConversionSpecifier::xArg:
@@ -989,8 +995,10 @@ bool PrintfSpecifier::hasValidLeadingZeros() const {
if (!HasLeadingZeroes)
return true;
- // Leading zeroes flag only valid with the diouxXaAeEfFgG conversions
+ // Leading zeroes flag only valid with the bBdiouxXaAeEfFgG conversions
switch (CS.getKind()) {
+ case ConversionSpecifier::bArg:
+ case ConversionSpecifier::BArg:
case ConversionSpecifier::dArg:
case ConversionSpecifier::DArg:
case ConversionSpecifier::iArg:
@@ -1081,8 +1089,10 @@ bool PrintfSpecifier::hasValidPrecision() const {
if (Precision.getHowSpecified() == OptionalAmount::NotSpecified)
return true;
- // Precision is only valid with the diouxXaAeEfFgGsP conversions
+ // Precision is only valid with the bBdiouxXaAeEfFgGsP conversions
switch (CS.getKind()) {
+ case ConversionSpecifier::bArg:
+ case ConversionSpecifier::BArg:
case ConversionSpecifier::dArg:
case ConversionSpecifier::DArg:
case ConversionSpecifier::iArg:
diff --git a/contrib/llvm-project/clang/lib/AST/QualTypeNames.cpp b/contrib/llvm-project/clang/lib/AST/QualTypeNames.cpp
index 9a1b418f5ac1..066377423df7 100644
--- a/contrib/llvm-project/clang/lib/AST/QualTypeNames.cpp
+++ b/contrib/llvm-project/clang/lib/AST/QualTypeNames.cpp
@@ -80,8 +80,12 @@ static bool getFullyQualifiedTemplateName(const ASTContext &Ctx,
Ctx, ArgTDecl, true, WithGlobalNsPrefix);
}
if (NNS) {
- TName = Ctx.getQualifiedTemplateName(NNS,
- /*TemplateKeyword=*/false, ArgTDecl);
+ TemplateName UnderlyingTN(ArgTDecl);
+ if (UsingShadowDecl *USD = TName.getAsUsingShadowDecl())
+ UnderlyingTN = TemplateName(USD);
+ TName =
+ Ctx.getQualifiedTemplateName(NNS,
+ /*TemplateKeyword=*/false, UnderlyingTN);
Changed = true;
}
return Changed;
@@ -125,11 +129,9 @@ static const Type *getFullyQualifiedTemplateType(const ASTContext &Ctx,
if (const auto *TST = dyn_cast<const TemplateSpecializationType>(TypePtr)) {
bool MightHaveChanged = false;
SmallVector<TemplateArgument, 4> FQArgs;
- for (TemplateSpecializationType::iterator I = TST->begin(), E = TST->end();
- I != E; ++I) {
- // Cheap to copy and potentially modified by
- // getFullyQualifedTemplateArgument.
- TemplateArgument Arg(*I);
+ // Cheap to copy and potentially modified by
+ // getFullyQualifedTemplateArgument.
+ for (TemplateArgument Arg : TST->template_arguments()) {
MightHaveChanged |= getFullyQualifiedTemplateArgument(
Ctx, Arg, WithGlobalNsPrefix);
FQArgs.push_back(Arg);
@@ -296,7 +298,7 @@ static NestedNameSpecifier *createNestedNameSpecifierForScopeOf(
} else if (const auto *TD = dyn_cast<TagDecl>(Outer)) {
return createNestedNameSpecifier(
Ctx, TD, FullyQualified, WithGlobalNsPrefix);
- } else if (dyn_cast<TranslationUnitDecl>(Outer)) {
+ } else if (isa<TranslationUnitDecl>(Outer)) {
// Context is the TU. Nothing needs to be done.
return nullptr;
} else {
@@ -438,12 +440,20 @@ QualType getFullyQualifiedType(QualType QT, const ASTContext &Ctx,
// elaborated type.
Qualifiers PrefixQualifiers = QT.getLocalQualifiers();
QT = QualType(QT.getTypePtr(), 0);
- ElaboratedTypeKeyword Keyword = ETK_None;
+ ElaboratedTypeKeyword Keyword = ElaboratedTypeKeyword::None;
if (const auto *ETypeInput = dyn_cast<ElaboratedType>(QT.getTypePtr())) {
QT = ETypeInput->getNamedType();
assert(!QT.hasLocalQualifiers());
Keyword = ETypeInput->getKeyword();
}
+
+ // We don't consider the alias introduced by `using a::X` as a new type.
+ // The qualified name is still a::X.
+ if (const auto *UT = QT->getAs<UsingType>()) {
+ QT = Ctx.getQualifiedType(UT->getUnderlyingType(), PrefixQualifiers);
+ return getFullyQualifiedType(QT, Ctx, WithGlobalNsPrefix);
+ }
+
// Create a nested name specifier if needed.
Prefix = createNestedNameSpecifierForScopeOf(Ctx, QT.getTypePtr(),
true /*FullyQualified*/,
@@ -461,7 +471,7 @@ QualType getFullyQualifiedType(QualType QT, const ASTContext &Ctx,
Ctx, QT.getTypePtr(), WithGlobalNsPrefix);
QT = QualType(TypePtr, 0);
}
- if (Prefix || Keyword != ETK_None) {
+ if (Prefix || Keyword != ElaboratedTypeKeyword::None) {
QT = Ctx.getElaboratedType(Keyword, Prefix, QT);
}
QT = Ctx.getQualifiedType(QT, PrefixQualifiers);
diff --git a/contrib/llvm-project/clang/lib/AST/Randstruct.cpp b/contrib/llvm-project/clang/lib/AST/Randstruct.cpp
new file mode 100644
index 000000000000..99c665f420e6
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/AST/Randstruct.cpp
@@ -0,0 +1,231 @@
+//===--- Randstruct.cpp ---------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the implementation for Clang's structure field layout
+// randomization.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/Randstruct.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTDiagnostic.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h" // For StaticAssertDecl
+#include "clang/Basic/Diagnostic.h"
+#include "llvm/ADT/SmallVector.h"
+
+#include <algorithm>
+#include <random>
+#include <set>
+#include <sstream>
+#include <string>
+
+using clang::ASTContext;
+using clang::FieldDecl;
+using llvm::SmallVector;
+
+namespace {
+
+// FIXME: Replace this with some discovery once that mechanism exists.
+enum { CACHE_LINE = 64 };
+
+// The Bucket class holds the struct fields we're trying to fill to a
+// cache-line.
+class Bucket {
+ SmallVector<FieldDecl *, 64> Fields;
+ int Size = 0;
+
+public:
+ virtual ~Bucket() = default;
+
+ SmallVector<FieldDecl *, 64> &fields() { return Fields; }
+ void addField(FieldDecl *Field, int FieldSize);
+ virtual bool canFit(int FieldSize) const {
+ return Size + FieldSize <= CACHE_LINE;
+ }
+ virtual bool isBitfieldRun() const { return false; }
+ bool full() const { return Size >= CACHE_LINE; }
+};
+
+void Bucket::addField(FieldDecl *Field, int FieldSize) {
+ Size += FieldSize;
+ Fields.push_back(Field);
+}
+
+struct BitfieldRunBucket : public Bucket {
+ bool canFit(int FieldSize) const override { return true; }
+ bool isBitfieldRun() const override { return true; }
+};
+
+void randomizeStructureLayoutImpl(const ASTContext &Context,
+ llvm::SmallVectorImpl<FieldDecl *> &FieldsOut,
+ std::mt19937 &RNG) {
+ // All of the Buckets produced by best-effort cache-line algorithm.
+ SmallVector<std::unique_ptr<Bucket>, 16> Buckets;
+
+ // The current bucket of fields that we are trying to fill to a cache-line.
+ std::unique_ptr<Bucket> CurrentBucket;
+
+ // The current bucket containing the run of adjacent bitfields to ensure they
+ // remain adjacent.
+ std::unique_ptr<BitfieldRunBucket> CurrentBitfieldRun;
+
+ // Tracks the number of fields that we failed to fit to the current bucket,
+ // and thus still need to be added later.
+ size_t Skipped = 0;
+
+ while (!FieldsOut.empty()) {
+ // If we've Skipped more fields than we have remaining to place, that means
+ // that they can't fit in our current bucket, and we need to start a new
+ // one.
+ if (Skipped >= FieldsOut.size()) {
+ Skipped = 0;
+ Buckets.push_back(std::move(CurrentBucket));
+ }
+
+ // Take the first field that needs to be put in a bucket.
+ auto FieldIter = FieldsOut.begin();
+ FieldDecl *FD = *FieldIter;
+
+ if (FD->isBitField() && !FD->isZeroLengthBitField(Context)) {
+ // Start a bitfield run if this is the first bitfield we have found.
+ if (!CurrentBitfieldRun)
+ CurrentBitfieldRun = std::make_unique<BitfieldRunBucket>();
+
+ // We've placed the field, and can remove it from the "awaiting Buckets"
+ // vector called "Fields."
+ CurrentBitfieldRun->addField(FD, /*FieldSize is irrelevant here*/ 1);
+ FieldsOut.erase(FieldIter);
+ continue;
+ }
+
+ // Else, current field is not a bitfield. If we were previously in a
+ // bitfield run, end it.
+ if (CurrentBitfieldRun)
+ Buckets.push_back(std::move(CurrentBitfieldRun));
+
+ // If we don't have a bucket, make one.
+ if (!CurrentBucket)
+ CurrentBucket = std::make_unique<Bucket>();
+
+ uint64_t Width = Context.getTypeInfo(FD->getType()).Width;
+ if (Width >= CACHE_LINE) {
+ std::unique_ptr<Bucket> OverSized = std::make_unique<Bucket>();
+ OverSized->addField(FD, Width);
+ FieldsOut.erase(FieldIter);
+ Buckets.push_back(std::move(OverSized));
+ continue;
+ }
+
+ // If it fits, add it.
+ if (CurrentBucket->canFit(Width)) {
+ CurrentBucket->addField(FD, Width);
+ FieldsOut.erase(FieldIter);
+
+ // If it's now full, tie off the bucket.
+ if (CurrentBucket->full()) {
+ Skipped = 0;
+ Buckets.push_back(std::move(CurrentBucket));
+ }
+ } else {
+ // We can't fit it in our current bucket. Move to the end for processing
+ // later.
+ ++Skipped; // Mark it skipped.
+ FieldsOut.push_back(FD);
+ FieldsOut.erase(FieldIter);
+ }
+ }
+
+ // Done processing the fields awaiting a bucket.
+
+ // If we were filling a bucket, tie it off.
+ if (CurrentBucket)
+ Buckets.push_back(std::move(CurrentBucket));
+
+ // If we were processing a bitfield run bucket, tie it off.
+ if (CurrentBitfieldRun)
+ Buckets.push_back(std::move(CurrentBitfieldRun));
+
+ std::shuffle(std::begin(Buckets), std::end(Buckets), RNG);
+
+ // Produce the new ordering of the elements from the Buckets.
+ SmallVector<FieldDecl *, 16> FinalOrder;
+ for (const std::unique_ptr<Bucket> &B : Buckets) {
+ llvm::SmallVectorImpl<FieldDecl *> &RandFields = B->fields();
+ if (!B->isBitfieldRun())
+ std::shuffle(std::begin(RandFields), std::end(RandFields), RNG);
+
+ FinalOrder.insert(FinalOrder.end(), RandFields.begin(), RandFields.end());
+ }
+
+ FieldsOut = FinalOrder;
+}
+
+} // anonymous namespace
+
+namespace clang {
+namespace randstruct {
+
+bool randomizeStructureLayout(const ASTContext &Context, RecordDecl *RD,
+ SmallVectorImpl<Decl *> &FinalOrdering) {
+ SmallVector<FieldDecl *, 64> RandomizedFields;
+ SmallVector<Decl *, 8> PostRandomizedFields;
+
+ unsigned TotalNumFields = 0;
+ for (Decl *D : RD->decls()) {
+ ++TotalNumFields;
+ if (auto *FD = dyn_cast<FieldDecl>(D))
+ RandomizedFields.push_back(FD);
+ else if (isa<StaticAssertDecl>(D) || isa<IndirectFieldDecl>(D))
+ PostRandomizedFields.push_back(D);
+ else
+ FinalOrdering.push_back(D);
+ }
+
+ if (RandomizedFields.empty())
+ return false;
+
+ // Struct might end with a flexible array or an array of size 0 or 1,
+ // in which case we don't want to randomize it.
+ FieldDecl *FlexibleArray =
+ RD->hasFlexibleArrayMember() ? RandomizedFields.pop_back_val() : nullptr;
+ if (!FlexibleArray) {
+ if (const auto *CA =
+ dyn_cast<ConstantArrayType>(RandomizedFields.back()->getType()))
+ if (CA->getSize().sle(2))
+ FlexibleArray = RandomizedFields.pop_back_val();
+ }
+
+ std::string Seed =
+ Context.getLangOpts().RandstructSeed + RD->getNameAsString();
+ std::seed_seq SeedSeq(Seed.begin(), Seed.end());
+ std::mt19937 RNG(SeedSeq);
+
+ randomizeStructureLayoutImpl(Context, RandomizedFields, RNG);
+
+ // Plorp the randomized decls into the final ordering.
+ FinalOrdering.insert(FinalOrdering.end(), RandomizedFields.begin(),
+ RandomizedFields.end());
+
+ // Add fields that belong towards the end of the RecordDecl.
+ FinalOrdering.insert(FinalOrdering.end(), PostRandomizedFields.begin(),
+ PostRandomizedFields.end());
+
+ // Add back the flexible array.
+ if (FlexibleArray)
+ FinalOrdering.push_back(FlexibleArray);
+
+ assert(TotalNumFields == FinalOrdering.size() &&
+ "Decl count has been altered after Randstruct randomization!");
+ (void)TotalNumFields;
+ return true;
+}
+
+} // end namespace randstruct
+} // end namespace clang
diff --git a/contrib/llvm-project/clang/lib/AST/RawCommentList.cpp b/contrib/llvm-project/clang/lib/AST/RawCommentList.cpp
index a8d15036cab9..dffa007b6588 100644
--- a/contrib/llvm-project/clang/lib/AST/RawCommentList.cpp
+++ b/contrib/llvm-project/clang/lib/AST/RawCommentList.cpp
@@ -16,6 +16,7 @@
#include "clang/AST/CommentSema.h"
#include "clang/Basic/CharInfo.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/Allocator.h"
using namespace clang;
@@ -140,8 +141,8 @@ RawComment::RawComment(const SourceManager &SourceMgr, SourceRange SR,
Kind = K.first;
IsTrailingComment |= K.second;
- IsAlmostTrailingComment = RawText.startswith("//<") ||
- RawText.startswith("/*<");
+ IsAlmostTrailingComment =
+ RawText.starts_with("//<") || RawText.starts_with("/*<");
} else {
Kind = RCK_Merged;
IsTrailingComment =
@@ -362,6 +363,24 @@ std::string RawComment::getFormattedText(const SourceManager &SourceMgr,
if (CommentText.empty())
return "";
+ std::string Result;
+ for (const RawComment::CommentLine &Line :
+ getFormattedLines(SourceMgr, Diags))
+ Result += Line.Text + "\n";
+
+ auto LastChar = Result.find_last_not_of('\n');
+ Result.erase(LastChar + 1, Result.size());
+
+ return Result;
+}
+
+std::vector<RawComment::CommentLine>
+RawComment::getFormattedLines(const SourceManager &SourceMgr,
+ DiagnosticsEngine &Diags) const {
+ llvm::StringRef CommentText = getRawText(SourceMgr);
+ if (CommentText.empty())
+ return {};
+
llvm::BumpPtrAllocator Allocator;
// We do not parse any commands, so CommentOptions are ignored by
// comments::Lexer. Therefore, we just use default-constructed options.
@@ -371,13 +390,23 @@ std::string RawComment::getFormattedText(const SourceManager &SourceMgr,
CommentText.begin(), CommentText.end(),
/*ParseCommands=*/false);
- std::string Result;
+ std::vector<RawComment::CommentLine> Result;
// A column number of the first non-whitespace token in the comment text.
// We skip whitespace up to this column, but keep the whitespace after this
// column. IndentColumn is calculated when lexing the first line and reused
// for the rest of lines.
unsigned IndentColumn = 0;
+ // Record the line number of the last processed comment line.
+ // For block-style comments, an extra newline token will be produced after
+ // the end-comment marker, e.g.:
+ // /** This is a multi-line comment block.
+ // The lexer will produce two newline tokens here > */
+ // previousLine will record the line number when we previously saw a newline
+ // token and recorded a comment line. If we see another newline token on the
+ // same line, don't record anything in between.
+ unsigned PreviousLine = 0;
+
// Processes one line of the comment and adds it to the result.
// Handles skipping the indent at the start of the line.
// Returns false when eof is reached and true otherwise.
@@ -389,9 +418,14 @@ std::string RawComment::getFormattedText(const SourceManager &SourceMgr,
if (Tok.is(comments::tok::eof))
return false;
if (Tok.is(comments::tok::newline)) {
- Result += "\n";
+ PresumedLoc Loc = SourceMgr.getPresumedLoc(Tok.getLocation());
+ if (Loc.getLine() != PreviousLine) {
+ Result.emplace_back("", Loc, Loc);
+ PreviousLine = Loc.getLine();
+ }
return true;
}
+ SmallString<124> Line;
llvm::StringRef TokText = L.getSpelling(Tok, SourceMgr);
bool LocInvalid = false;
unsigned TokColumn =
@@ -417,32 +451,35 @@ std::string RawComment::getFormattedText(const SourceManager &SourceMgr,
WhitespaceLen,
std::max<int>(static_cast<int>(IndentColumn) - TokColumn, 0));
llvm::StringRef Trimmed = TokText.drop_front(SkipLen);
- Result += Trimmed;
+ Line += Trimmed;
+ // Get the beginning location of the adjusted comment line.
+ PresumedLoc Begin =
+ SourceMgr.getPresumedLoc(Tok.getLocation().getLocWithOffset(SkipLen));
+
// Lex all tokens in the rest of the line.
for (L.lex(Tok); Tok.isNot(comments::tok::eof); L.lex(Tok)) {
if (Tok.is(comments::tok::newline)) {
- Result += "\n";
+ // Get the ending location of the comment line.
+ PresumedLoc End = SourceMgr.getPresumedLoc(Tok.getLocation());
+ if (End.getLine() != PreviousLine) {
+ Result.emplace_back(Line, Begin, End);
+ PreviousLine = End.getLine();
+ }
return true;
}
- Result += L.getSpelling(Tok, SourceMgr);
+ Line += L.getSpelling(Tok, SourceMgr);
}
+ PresumedLoc End = SourceMgr.getPresumedLoc(Tok.getLocation());
+ Result.emplace_back(Line, Begin, End);
// We've reached the end of file token.
return false;
};
- auto DropTrailingNewLines = [](std::string &Str) {
- while (!Str.empty() && Str.back() == '\n')
- Str.pop_back();
- };
-
// Process first line separately to remember indent for the following lines.
- if (!LexLine(/*IsFirstLine=*/true)) {
- DropTrailingNewLines(Result);
+ if (!LexLine(/*IsFirstLine=*/true))
return Result;
- }
// Process the rest of the lines.
while (LexLine(/*IsFirstLine=*/false))
;
- DropTrailingNewLines(Result);
return Result;
}
diff --git a/contrib/llvm-project/clang/lib/AST/RecordLayoutBuilder.cpp b/contrib/llvm-project/clang/lib/AST/RecordLayoutBuilder.cpp
index 972690becf9e..6dfaadd92e79 100644
--- a/contrib/llvm-project/clang/lib/AST/RecordLayoutBuilder.cpp
+++ b/contrib/llvm-project/clang/lib/AST/RecordLayoutBuilder.cpp
@@ -58,13 +58,13 @@ struct BaseSubobjectInfo {
/// as DWARF, lacks all the information that was available at compile time, such
/// as alignment attributes on fields and pragmas in effect.
struct ExternalLayout {
- ExternalLayout() : Size(0), Align(0) {}
+ ExternalLayout() = default;
/// Overall record size in bits.
- uint64_t Size;
+ uint64_t Size = 0;
/// Overall record alignment in bits.
- uint64_t Align;
+ uint64_t Align = 0;
/// Record field offsets in bits.
llvm::DenseMap<const FieldDecl *, uint64_t> FieldOffsets;
@@ -240,7 +240,7 @@ EmptySubobjectMap::CanPlaceSubobjectAtOffset(const CXXRecordDecl *RD,
return true;
const ClassVectorTy &Classes = I->second;
- if (llvm::find(Classes, RD) == Classes.end())
+ if (!llvm::is_contained(Classes, RD))
return true;
// There is already an empty class of the same type at this offset.
@@ -1059,10 +1059,10 @@ void ItaniumRecordLayoutBuilder::LayoutNonVirtualBases(
// primary base, add it in now.
} else if (RD->isDynamicClass()) {
assert(DataSize == 0 && "Vtable pointer must be at offset zero!");
- CharUnits PtrWidth =
- Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
- CharUnits PtrAlign =
- Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerAlign(0));
+ CharUnits PtrWidth = Context.toCharUnitsFromBits(
+ Context.getTargetInfo().getPointerWidth(LangAS::Default));
+ CharUnits PtrAlign = Context.toCharUnitsFromBits(
+ Context.getTargetInfo().getPointerAlign(LangAS::Default));
EnsureVTablePointerAlignment(PtrAlign);
HasOwnVFPtr = true;
@@ -1223,7 +1223,7 @@ ItaniumRecordLayoutBuilder::LayoutBase(const BaseSubobjectInfo *Base) {
// Per GCC's documentation, it only applies to non-static data members.
return (Packed && ((Context.getLangOpts().getClangABICompat() <=
LangOptions::ClangABI::Ver6) ||
- Context.getTargetInfo().getTriple().isPS4() ||
+ Context.getTargetInfo().getTriple().isPS() ||
Context.getTargetInfo().getTriple().isOSAIX()))
? CharUnits::One()
: UnpackedAlign;
@@ -1261,7 +1261,9 @@ ItaniumRecordLayoutBuilder::LayoutBase(const BaseSubobjectInfo *Base) {
(!HasExternalLayout || Offset == CharUnits::Zero()) &&
EmptySubobjects->CanPlaceBaseAtOffset(Base, CharUnits::Zero())) {
setSize(std::max(getSize(), Layout.getSize()));
- UpdateAlignment(BaseAlign, UnpackedAlignTo, PreferredBaseAlign);
+ // On PS4/PS5, don't update the alignment, to preserve compatibility.
+ if (!Context.getTargetInfo().getTriple().isPS())
+ UpdateAlignment(BaseAlign, UnpackedAlignTo, PreferredBaseAlign);
return CharUnits::Zero();
}
@@ -1538,7 +1540,7 @@ void ItaniumRecordLayoutBuilder::LayoutBitField(const FieldDecl *D) {
TypeInfo FieldInfo = Context.getTypeInfo(D->getType());
uint64_t StorageUnitSize = FieldInfo.Width;
unsigned FieldAlign = FieldInfo.Align;
- bool AlignIsRequired = FieldInfo.AlignIsRequired;
+ bool AlignIsRequired = FieldInfo.isAlignRequired();
// UnfilledBitsInLastUnit is the difference between the end of the
// last allocated bitfield (i.e. the first bit offset available for
@@ -1775,11 +1777,18 @@ void ItaniumRecordLayoutBuilder::LayoutBitField(const FieldDecl *D) {
!D->getIdentifier())
FieldAlign = UnpackedFieldAlign = 1;
- // On AIX, zero-width bitfields pad out to the alignment boundary, but then
- // do not affect overall record alignment if there is a pragma pack or
- // pragma align(packed).
- if (isAIXLayout(Context) && !MaxFieldAlignment.isZero() && !FieldSize)
- FieldAlign = std::min(FieldAlign, MaxFieldAlignmentInBits);
+ // On AIX, zero-width bitfields pad out to the natural alignment boundary,
+ // but do not increase the alignment greater than the MaxFieldAlignment, or 1
+ // if packed.
+ if (isAIXLayout(Context) && !FieldSize) {
+ if (FieldPacked)
+ FieldAlign = 1;
+ if (!MaxFieldAlignment.isZero()) {
+ UnpackedFieldAlign =
+ std::min(UnpackedFieldAlign, MaxFieldAlignmentInBits);
+ FieldAlign = std::min(FieldAlign, MaxFieldAlignmentInBits);
+ }
+ }
// Diagnose differences in layout due to padding or packing.
if (!UseExternalLayout)
@@ -1844,9 +1853,8 @@ void ItaniumRecordLayoutBuilder::LayoutBitField(const FieldDecl *D) {
void ItaniumRecordLayoutBuilder::LayoutField(const FieldDecl *D,
bool InsertExtraPadding) {
auto *FieldClass = D->getType()->getAsCXXRecordDecl();
- bool PotentiallyOverlapping = D->hasAttr<NoUniqueAddressAttr>() && FieldClass;
bool IsOverlappingEmptyField =
- PotentiallyOverlapping && FieldClass->isEmpty();
+ D->isPotentiallyOverlapping() && FieldClass->isEmpty();
CharUnits FieldOffset =
(IsUnion || IsOverlappingEmptyField) ? CharUnits::Zero() : getDataSize();
@@ -1880,9 +1888,9 @@ void ItaniumRecordLayoutBuilder::LayoutField(const FieldDecl *D,
UnfilledBitsInLastUnit = 0;
LastBitfieldStorageUnitSize = 0;
- bool FieldPacked = Packed || D->hasAttr<PackedAttr>();
+ llvm::Triple Target = Context.getTargetInfo().getTriple();
- bool AlignIsRequired = false;
+ AlignRequirementKind AlignRequirement = AlignRequirementKind::None;
CharUnits FieldSize;
CharUnits FieldAlign;
// The amount of this class's dsize occupied by the field.
@@ -1897,23 +1905,17 @@ void ItaniumRecordLayoutBuilder::LayoutField(const FieldDecl *D,
// aligned appropriately for their element type.
EffectiveFieldSize = FieldSize =
IsIncompleteArrayType ? CharUnits::Zero() : TI.Width;
- AlignIsRequired = TI.AlignIsRequired;
+ AlignRequirement = TI.AlignRequirement;
};
if (D->getType()->isIncompleteArrayType()) {
setDeclInfo(true /* IsIncompleteArrayType */);
- } else if (const ReferenceType *RT = D->getType()->getAs<ReferenceType>()) {
- unsigned AS = Context.getTargetAddressSpace(RT->getPointeeType());
- EffectiveFieldSize = FieldSize = Context.toCharUnitsFromBits(
- Context.getTargetInfo().getPointerWidth(AS));
- FieldAlign = Context.toCharUnitsFromBits(
- Context.getTargetInfo().getPointerAlign(AS));
} else {
setDeclInfo(false /* IsIncompleteArrayType */);
// A potentially-overlapping field occupies its dsize or nvsize, whichever
// is larger.
- if (PotentiallyOverlapping) {
+ if (D->isPotentiallyOverlapping()) {
const ASTRecordLayout &Layout = Context.getASTRecordLayout(FieldClass);
EffectiveFieldSize =
std::max(Layout.getNonVirtualSize(), Layout.getDataSize());
@@ -1947,7 +1949,7 @@ void ItaniumRecordLayoutBuilder::LayoutField(const FieldDecl *D,
// Since the combination of -mms-bitfields together with structs
// like max_align_t (which contains a long double) for mingw is
- // quite comon (and GCC handles it silently), just handle it
+ // quite common (and GCC handles it silently), just handle it
// silently there. For other targets that have ms_struct enabled
// (most probably via a pragma or attribute), trigger a diagnostic
// that defaults to an error.
@@ -1961,6 +1963,27 @@ void ItaniumRecordLayoutBuilder::LayoutField(const FieldDecl *D,
}
}
+ bool FieldPacked = (Packed && (!FieldClass || FieldClass->isPOD() ||
+ FieldClass->hasAttr<PackedAttr>() ||
+ Context.getLangOpts().getClangABICompat() <=
+ LangOptions::ClangABI::Ver15 ||
+ Target.isPS() || Target.isOSDarwin() ||
+ Target.isOSAIX())) ||
+ D->hasAttr<PackedAttr>();
+
+ // When used as part of a typedef, or together with a 'packed' attribute, the
+ // 'aligned' attribute can be used to decrease alignment. In that case, it
+ // overrides any computed alignment we have, and there is no need to upgrade
+ // the alignment.
+ auto alignedAttrCanDecreaseAIXAlignment = [AlignRequirement, FieldPacked] {
+ // Enum alignment sources can be safely ignored here, because this only
+ // helps decide whether we need the AIX alignment upgrade, which only
+ // applies to floating-point types.
+ return AlignRequirement == AlignRequirementKind::RequiredByTypedef ||
+ (AlignRequirement == AlignRequirementKind::RequiredByRecord &&
+ FieldPacked);
+ };
+
// The AIX `power` alignment rules apply the natural alignment of the
// "first member" if it is of a floating-point data type (or is an aggregate
// whose recursively "first" member or element is such a type). The alignment
@@ -1971,7 +1994,7 @@ void ItaniumRecordLayoutBuilder::LayoutField(const FieldDecl *D,
// and zero-width bit-fields count as prior members; members of empty class
// types marked `no_unique_address` are not considered to be prior members.
CharUnits PreferredAlign = FieldAlign;
- if (DefaultsToAIXPowerAlignment && !AlignIsRequired &&
+ if (DefaultsToAIXPowerAlignment && !alignedAttrCanDecreaseAIXAlignment() &&
(FoundFirstNonOverlappingEmptyFieldForAIX || IsNaturalAlign)) {
auto performBuiltinTypeAlignmentUpgrade = [&](const BuiltinType *BTy) {
if (BTy->getKind() == BuiltinType::Double ||
@@ -1982,12 +2005,13 @@ void ItaniumRecordLayoutBuilder::LayoutField(const FieldDecl *D,
}
};
- const Type *Ty = D->getType()->getBaseElementTypeUnsafe();
- if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
- performBuiltinTypeAlignmentUpgrade(CTy->getElementType()->castAs<BuiltinType>());
- } else if (const BuiltinType *BTy = Ty->getAs<BuiltinType>()) {
+ const Type *BaseTy = D->getType()->getBaseElementTypeUnsafe();
+ if (const ComplexType *CTy = BaseTy->getAs<ComplexType>()) {
+ performBuiltinTypeAlignmentUpgrade(
+ CTy->getElementType()->castAs<BuiltinType>());
+ } else if (const BuiltinType *BTy = BaseTy->getAs<BuiltinType>()) {
performBuiltinTypeAlignmentUpgrade(BTy);
- } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ } else if (const RecordType *RT = BaseTy->getAs<RecordType>()) {
const RecordDecl *RD = RT->getDecl();
assert(RD && "Expected non-null RecordDecl.");
const ASTRecordLayout &FieldRecord = Context.getASTRecordLayout(RD);
@@ -1997,27 +2021,34 @@ void ItaniumRecordLayoutBuilder::LayoutField(const FieldDecl *D,
// The align if the field is not packed. This is to check if the attribute
// was unnecessary (-Wpacked).
- CharUnits UnpackedFieldAlign =
- !DefaultsToAIXPowerAlignment ? FieldAlign : PreferredAlign;
+ CharUnits UnpackedFieldAlign = FieldAlign;
+ CharUnits PackedFieldAlign = CharUnits::One();
CharUnits UnpackedFieldOffset = FieldOffset;
+ CharUnits OriginalFieldAlign = UnpackedFieldAlign;
- if (FieldPacked) {
- FieldAlign = CharUnits::One();
- PreferredAlign = CharUnits::One();
- }
CharUnits MaxAlignmentInChars =
Context.toCharUnitsFromBits(D->getMaxAlignment());
- FieldAlign = std::max(FieldAlign, MaxAlignmentInChars);
+ PackedFieldAlign = std::max(PackedFieldAlign, MaxAlignmentInChars);
PreferredAlign = std::max(PreferredAlign, MaxAlignmentInChars);
UnpackedFieldAlign = std::max(UnpackedFieldAlign, MaxAlignmentInChars);
// The maximum field alignment overrides the aligned attribute.
if (!MaxFieldAlignment.isZero()) {
- FieldAlign = std::min(FieldAlign, MaxFieldAlignment);
+ PackedFieldAlign = std::min(PackedFieldAlign, MaxFieldAlignment);
PreferredAlign = std::min(PreferredAlign, MaxFieldAlignment);
UnpackedFieldAlign = std::min(UnpackedFieldAlign, MaxFieldAlignment);
}
+
+ if (!FieldPacked)
+ FieldAlign = UnpackedFieldAlign;
+ if (DefaultsToAIXPowerAlignment)
+ UnpackedFieldAlign = PreferredAlign;
+ if (FieldPacked) {
+ PreferredAlign = PackedFieldAlign;
+ FieldAlign = PackedFieldAlign;
+ }
+
CharUnits AlignTo =
!DefaultsToAIXPowerAlignment ? FieldAlign : PreferredAlign;
// Round up the current record size to the field's alignment boundary.
@@ -2084,6 +2115,25 @@ void ItaniumRecordLayoutBuilder::LayoutField(const FieldDecl *D,
// Remember max struct/class ABI-specified alignment.
UnadjustedAlignment = std::max(UnadjustedAlignment, FieldAlign);
UpdateAlignment(FieldAlign, UnpackedFieldAlign, PreferredAlign);
+
+ // For checking the alignment of inner fields against
+ // the alignment of its parent record.
+ if (const RecordDecl *RD = D->getParent()) {
+ // Check if packed attribute or pragma pack is present.
+ if (RD->hasAttr<PackedAttr>() || !MaxFieldAlignment.isZero())
+ if (FieldAlign < OriginalFieldAlign)
+ if (D->getType()->isRecordType()) {
+ // If the offset is a multiple of the alignment of
+ // the type, raise the warning.
+ // TODO: Takes no account the alignment of the outer struct
+ if (FieldOffset % OriginalFieldAlign != 0)
+ Diag(D->getLocation(), diag::warn_unaligned_access)
+ << Context.getTypeDeclType(RD) << D->getName() << D->getType();
+ }
+ }
+
+ if (Packed && !FieldPacked && PackedFieldAlign < FieldAlign)
+ Diag(D->getLocation(), diag::warn_unpacked_field) << D;
}
void ItaniumRecordLayoutBuilder::FinishLayout(const NamedDecl *D) {
@@ -2148,11 +2198,19 @@ void ItaniumRecordLayoutBuilder::FinishLayout(const NamedDecl *D) {
<< (InBits ? 1 : 0); // (byte|bit)
}
+ const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD);
+
// Warn if we packed it unnecessarily, when the unpacked alignment is not
// greater than the one after packing, the size in bits doesn't change and
// the offset of each field is identical.
+ // Unless the type is non-POD (for Clang ABI > 15), where the packed
+ // attribute on such a type does allow the type to be packed into other
+ // structures that use the packed attribute.
if (Packed && UnpackedAlignment <= Alignment &&
- UnpackedSizeInBits == getSizeInBits() && !HasPackedField)
+ UnpackedSizeInBits == getSizeInBits() && !HasPackedField &&
+ (!CXXRD || CXXRD->isPOD() ||
+ Context.getLangOpts().getClangABICompat() <=
+ LangOptions::ClangABI::Ver15))
Diag(D->getLocation(), diag::warn_unnecessary_packed)
<< Context.getTypeDeclType(RD);
}
@@ -2209,9 +2267,12 @@ ItaniumRecordLayoutBuilder::updateExternalFieldOffset(const FieldDecl *Field,
/// \returns diagnostic %select index.
static unsigned getPaddingDiagFromTagKind(TagTypeKind Tag) {
switch (Tag) {
- case TTK_Struct: return 0;
- case TTK_Interface: return 1;
- case TTK_Class: return 2;
+ case TagTypeKind::Struct:
+ return 0;
+ case TagTypeKind::Interface:
+ return 1;
+ case TagTypeKind::Class:
+ return 2;
default: llvm_unreachable("Invalid tag kind for field padding diagnostic!");
}
}
@@ -2239,19 +2300,22 @@ void ItaniumRecordLayoutBuilder::CheckFieldPadding(
PadSize = PadSize / CharBitNum;
InBits = false;
}
- if (D->getIdentifier())
- Diag(D->getLocation(), diag::warn_padded_struct_field)
+ if (D->getIdentifier()) {
+ auto Diagnostic = D->isBitField() ? diag::warn_padded_struct_bitfield
+ : diag::warn_padded_struct_field;
+ Diag(D->getLocation(), Diagnostic)
<< getPaddingDiagFromTagKind(D->getParent()->getTagKind())
- << Context.getTypeDeclType(D->getParent())
- << PadSize
+ << Context.getTypeDeclType(D->getParent()) << PadSize
<< (InBits ? 1 : 0) // (byte|bit)
<< D->getIdentifier();
- else
- Diag(D->getLocation(), diag::warn_padded_struct_anon_field)
+ } else {
+ auto Diagnostic = D->isBitField() ? diag::warn_padded_struct_anon_bitfield
+ : diag::warn_padded_struct_anon_field;
+ Diag(D->getLocation(), Diagnostic)
<< getPaddingDiagFromTagKind(D->getParent()->getTagKind())
- << Context.getTypeDeclType(D->getParent())
- << PadSize
+ << Context.getTypeDeclType(D->getParent()) << PadSize
<< (InBits ? 1 : 0); // (byte|bit)
+ }
}
if (isPacked && Offset != UnpackedOffset) {
HasPackedField = true;
@@ -2285,7 +2349,7 @@ static const CXXMethodDecl *computeKeyFunction(ASTContext &Context,
if (!MD->isVirtual())
continue;
- if (MD->isPure())
+ if (MD->isPureVirtual())
continue;
// Ignore implicit member functions, they are always marked as inline, but
@@ -2487,7 +2551,10 @@ struct MicrosoftRecordLayoutBuilder {
CharUnits Alignment;
};
typedef llvm::DenseMap<const CXXRecordDecl *, CharUnits> BaseOffsetsMapTy;
- MicrosoftRecordLayoutBuilder(const ASTContext &Context) : Context(Context) {}
+ MicrosoftRecordLayoutBuilder(const ASTContext &Context,
+ EmptySubobjectMap *EmptySubobjects)
+ : Context(Context), EmptySubobjects(EmptySubobjects) {}
+
private:
MicrosoftRecordLayoutBuilder(const MicrosoftRecordLayoutBuilder &) = delete;
void operator=(const MicrosoftRecordLayoutBuilder &) = delete;
@@ -2537,6 +2604,8 @@ public:
llvm::SmallPtrSetImpl<const CXXRecordDecl *> &HasVtorDispSet,
const CXXRecordDecl *RD) const;
const ASTContext &Context;
+ EmptySubobjectMap *EmptySubobjects;
+
/// The size of the record being laid out.
CharUnits Size;
/// The non-virtual size of the record layout.
@@ -2610,7 +2679,7 @@ MicrosoftRecordLayoutBuilder::getAdjustedElementInfo(
// Track zero-sized subobjects here where it's already available.
EndsWithZeroSizedObject = Layout.endsWithZeroSizedObject();
// Respect required alignment, this is necessary because we may have adjusted
- // the alignment in the case of pragam pack. Note that the required alignment
+ // the alignment in the case of pragma pack. Note that the required alignment
// doesn't actually apply to the struct alignment at this point.
Alignment = std::max(Alignment, Info.Alignment);
RequiredAlignment = std::max(RequiredAlignment, Layout.getRequiredAlignment());
@@ -2712,7 +2781,8 @@ void MicrosoftRecordLayoutBuilder::initializeLayout(const RecordDecl *RD) {
// than the pointer size.
if (const MaxFieldAlignmentAttr *MFAA = RD->getAttr<MaxFieldAlignmentAttr>()){
unsigned PackedAlignment = MFAA->getAlignment();
- if (PackedAlignment <= Context.getTargetInfo().getPointerWidth(0))
+ if (PackedAlignment <=
+ Context.getTargetInfo().getPointerWidth(LangAS::Default))
MaxFieldAlignment = Context.toCharUnitsFromBits(PackedAlignment);
}
// Packed attribute forces max field alignment to be 1.
@@ -2737,10 +2807,10 @@ MicrosoftRecordLayoutBuilder::initializeCXXLayout(const CXXRecordDecl *RD) {
SharedVBPtrBase = nullptr;
// Calculate pointer size and alignment. These are used for vfptr and vbprt
// injection.
- PointerInfo.Size =
- Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
- PointerInfo.Alignment =
- Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerAlign(0));
+ PointerInfo.Size = Context.toCharUnitsFromBits(
+ Context.getTargetInfo().getPointerWidth(LangAS::Default));
+ PointerInfo.Alignment = Context.toCharUnitsFromBits(
+ Context.getTargetInfo().getPointerAlign(LangAS::Default));
// Respect pragma pack.
if (!MaxFieldAlignment.isZero())
PointerInfo.Alignment = std::min(PointerInfo.Alignment, MaxFieldAlignment);
@@ -2849,8 +2919,7 @@ static bool recordUsesEBO(const RecordDecl *RD) {
}
void MicrosoftRecordLayoutBuilder::layoutNonVirtualBase(
- const CXXRecordDecl *RD,
- const CXXRecordDecl *BaseDecl,
+ const CXXRecordDecl *RD, const CXXRecordDecl *BaseDecl,
const ASTRecordLayout &BaseLayout,
const ASTRecordLayout *&PreviousBaseLayout) {
// Insert padding between two bases if the left first one is zero sized or
@@ -2867,15 +2936,14 @@ void MicrosoftRecordLayoutBuilder::layoutNonVirtualBase(
bool FoundBase = false;
if (UseExternalLayout) {
FoundBase = External.getExternalNVBaseOffset(BaseDecl, BaseOffset);
- if (FoundBase) {
- assert(BaseOffset >= Size && "base offset already allocated");
+ if (BaseOffset > Size) {
Size = BaseOffset;
}
}
if (!FoundBase) {
- if (MDCUsesEBO && BaseDecl->isEmpty()) {
- assert(BaseLayout.getNonVirtualSize() == CharUnits::Zero());
+ if (MDCUsesEBO && BaseDecl->isEmpty() &&
+ (BaseLayout.getNonVirtualSize() == CharUnits::Zero())) {
BaseOffset = CharUnits::Zero();
} else {
// Otherwise, lay the base out at the end of the MDC.
@@ -2884,6 +2952,7 @@ void MicrosoftRecordLayoutBuilder::layoutNonVirtualBase(
}
Bases.insert(std::make_pair(BaseDecl, BaseOffset));
Size += BaseLayout.getNonVirtualSize();
+ DataSize = Size;
PreviousBaseLayout = &BaseLayout;
}
@@ -2901,15 +2970,43 @@ void MicrosoftRecordLayoutBuilder::layoutField(const FieldDecl *FD) {
LastFieldIsNonZeroWidthBitfield = false;
ElementInfo Info = getAdjustedElementInfo(FD);
Alignment = std::max(Alignment, Info.Alignment);
- CharUnits FieldOffset;
- if (UseExternalLayout)
+
+ const CXXRecordDecl *FieldClass = FD->getType()->getAsCXXRecordDecl();
+ bool IsOverlappingEmptyField = FD->isPotentiallyOverlapping() &&
+ FieldClass->isEmpty() &&
+ FieldClass->fields().empty();
+ CharUnits FieldOffset = CharUnits::Zero();
+
+ if (UseExternalLayout) {
FieldOffset =
Context.toCharUnitsFromBits(External.getExternalFieldOffset(FD));
- else if (IsUnion)
+ } else if (IsUnion) {
FieldOffset = CharUnits::Zero();
- else
+ } else if (EmptySubobjects) {
+ if (!IsOverlappingEmptyField)
+ FieldOffset = DataSize.alignTo(Info.Alignment);
+
+ while (!EmptySubobjects->CanPlaceFieldAtOffset(FD, FieldOffset)) {
+ const CXXRecordDecl *ParentClass = cast<CXXRecordDecl>(FD->getParent());
+ bool HasBases = ParentClass && (!ParentClass->bases().empty() ||
+ !ParentClass->vbases().empty());
+ if (FieldOffset == CharUnits::Zero() && DataSize != CharUnits::Zero() &&
+ HasBases) {
+ // MSVC appears to only do this when there are base classes;
+ // otherwise it overlaps no_unique_address fields in non-zero offsets.
+ FieldOffset = DataSize.alignTo(Info.Alignment);
+ } else {
+ FieldOffset += Info.Alignment;
+ }
+ }
+ } else {
FieldOffset = Size.alignTo(Info.Alignment);
+ }
placeFieldAtOffset(FieldOffset);
+
+ if (!IsOverlappingEmptyField)
+ DataSize = std::max(DataSize, FieldOffset + Info.Size);
+
Size = std::max(Size, FieldOffset + Info.Size);
}
@@ -2955,6 +3052,7 @@ void MicrosoftRecordLayoutBuilder::layoutBitField(const FieldDecl *FD) {
Alignment = std::max(Alignment, Info.Alignment);
RemainingBitsInField = Context.toBits(Info.Size) - Width;
}
+ DataSize = Size;
}
void
@@ -2980,6 +3078,7 @@ MicrosoftRecordLayoutBuilder::layoutZeroWidthBitField(const FieldDecl *FD) {
Size = FieldOffset;
Alignment = std::max(Alignment, Info.Alignment);
}
+ DataSize = Size;
}
void MicrosoftRecordLayoutBuilder::injectVBPtr(const CXXRecordDecl *RD) {
@@ -3025,10 +3124,9 @@ void MicrosoftRecordLayoutBuilder::injectVFPtr(const CXXRecordDecl *RD) {
VBPtrOffset += Offset;
if (UseExternalLayout) {
- // The class may have no bases or fields, but still have a vfptr
- // (e.g. it's an interface class). The size was not correctly set before
- // in this case.
- if (FieldOffsets.empty() && Bases.empty())
+ // The class may have size 0 and a vfptr (e.g. it's an interface class). The
+ // size was not correctly set before in this case.
+ if (Size.isZero())
Size += Offset;
return;
}
@@ -3070,7 +3168,7 @@ void MicrosoftRecordLayoutBuilder::layoutVirtualBases(const CXXRecordDecl *RD) {
for (const CXXBaseSpecifier &VBase : RD->vbases()) {
const CXXRecordDecl *BaseDecl = VBase.getType()->getAsCXXRecordDecl();
const ASTRecordLayout &BaseLayout = Context.getASTRecordLayout(BaseDecl);
- bool HasVtordisp = HasVtorDispSet.count(BaseDecl) > 0;
+ bool HasVtordisp = HasVtorDispSet.contains(BaseDecl);
// Insert padding between two bases if the left first one is zero sized or
// contains a zero sized subobject and the right is zero sized or one leads
// with a zero sized base. The padding between virtual bases is 4
@@ -3195,7 +3293,7 @@ void MicrosoftRecordLayoutBuilder::computeVtorDispSet(
// Seed the working set with our non-destructor, non-pure virtual methods.
for (const CXXMethodDecl *MD : RD->methods())
if (MicrosoftVTableContext::hasVtableSlot(MD) &&
- !isa<CXXDestructorDecl>(MD) && !MD->isPure())
+ !isa<CXXDestructorDecl>(MD) && !MD->isPureVirtual())
Work.insert(MD);
while (!Work.empty()) {
const CXXMethodDecl *MD = *Work.begin();
@@ -3230,6 +3328,8 @@ ASTContext::getASTRecordLayout(const RecordDecl *D) const {
if (D->hasExternalLexicalStorage() && !D->getDefinition())
getExternalSource()->CompleteType(const_cast<RecordDecl*>(D));
+ // Complete the redecl chain (if necessary).
+ (void)D->getMostRecentDecl();
D = D->getDefinition();
assert(D && "Cannot get layout of forward declarations!");
@@ -3245,8 +3345,9 @@ ASTContext::getASTRecordLayout(const RecordDecl *D) const {
const ASTRecordLayout *NewEntry = nullptr;
if (isMsLayout(*this)) {
- MicrosoftRecordLayoutBuilder Builder(*this);
if (const auto *RD = dyn_cast<CXXRecordDecl>(D)) {
+ EmptySubobjectMap EmptySubobjects(*this, RD);
+ MicrosoftRecordLayoutBuilder Builder(*this, &EmptySubobjects);
Builder.cxxLayout(RD);
NewEntry = new (*this) ASTRecordLayout(
*this, Builder.Size, Builder.Alignment, Builder.Alignment,
@@ -3258,6 +3359,7 @@ ASTContext::getASTRecordLayout(const RecordDecl *D) const {
Builder.EndsWithZeroSizedObject, Builder.LeadsWithZeroSizedBase,
Builder.Bases, Builder.VBases);
} else {
+ MicrosoftRecordLayoutBuilder Builder(*this, /*EmptySubobjects=*/nullptr);
Builder.layout(D);
NewEntry = new (*this) ASTRecordLayout(
*this, Builder.Size, Builder.Alignment, Builder.Alignment,
@@ -3383,6 +3485,7 @@ uint64_t ASTContext::getFieldOffset(const ValueDecl *VD) const {
uint64_t ASTContext::lookupFieldBitOffset(const ObjCInterfaceDecl *OID,
const ObjCImplementationDecl *ID,
const ObjCIvarDecl *Ivar) const {
+ Ivar = Ivar->getCanonicalDecl();
const ObjCInterfaceDecl *Container = Ivar->getContainingInterface();
// FIXME: We should eliminate the need to have ObjCImplementationDecl passed
@@ -3501,7 +3604,7 @@ static void DumpRecordLayout(raw_ostream &OS, const RecordDecl *RD,
auto CXXRD = dyn_cast<CXXRecordDecl>(RD);
PrintOffset(OS, Offset, IndentLevel);
- OS << C.getTypeDeclType(const_cast<RecordDecl*>(RD)).getAsString();
+ OS << C.getTypeDeclType(const_cast<RecordDecl *>(RD));
if (Description)
OS << ' ' << Description;
if (CXXRD && CXXRD->isEmpty())
@@ -3586,7 +3689,7 @@ static void DumpRecordLayout(raw_ostream &OS, const RecordDecl *RD,
const QualType &FieldType = C.getLangOpts().DumpRecordLayoutsCanonical
? Field.getType().getCanonicalType()
: Field.getType();
- OS << FieldType.getAsString() << ' ' << Field << '\n';
+ OS << FieldType << ' ' << Field << '\n';
}
// Dump virtual bases.
@@ -3652,7 +3755,7 @@ void ASTContext::DumpRecordLayout(const RecordDecl *RD, raw_ostream &OS,
// in libFrontend.
const ASTRecordLayout &Info = getASTRecordLayout(RD);
- OS << "Type: " << getTypeDeclType(RD).getAsString() << "\n";
+ OS << "Type: " << getTypeDeclType(RD) << "\n";
OS << "\nLayout: ";
OS << "<ASTRecordLayout\n";
OS << " Size:" << toBits(Info.getSize()) << "\n";
@@ -3662,6 +3765,28 @@ void ASTContext::DumpRecordLayout(const RecordDecl *RD, raw_ostream &OS,
if (Target->defaultsToAIXPowerAlignment())
OS << " PreferredAlignment:" << toBits(Info.getPreferredAlignment())
<< "\n";
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ OS << " BaseOffsets: [";
+ const CXXRecordDecl *Base = nullptr;
+ for (auto I : CXXRD->bases()) {
+ if (I.isVirtual())
+ continue;
+ if (Base)
+ OS << ", ";
+ Base = I.getType()->getAsCXXRecordDecl();
+ OS << Info.CXXInfo->BaseOffsets[Base].getQuantity();
+ }
+ OS << "]>\n";
+ OS << " VBaseOffsets: [";
+ const CXXRecordDecl *VBase = nullptr;
+ for (auto I : CXXRD->vbases()) {
+ if (VBase)
+ OS << ", ";
+ VBase = I.getType()->getAsCXXRecordDecl();
+ OS << Info.CXXInfo->VBaseOffsets[VBase].VBaseOffset.getQuantity();
+ }
+ OS << "]>\n";
+ }
OS << " FieldOffsets: [";
for (unsigned i = 0, e = Info.getFieldCount(); i != e; ++i) {
if (i)
diff --git a/contrib/llvm-project/clang/lib/AST/ScanfFormatString.cpp b/contrib/llvm-project/clang/lib/AST/ScanfFormatString.cpp
index 8d763f28e57f..64c430e623b5 100644
--- a/contrib/llvm-project/clang/lib/AST/ScanfFormatString.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ScanfFormatString.cpp
@@ -161,6 +161,7 @@ static ScanfSpecifierResult ParseScanfSpecifier(FormatStringHandler &H,
default:
break;
case '%': k = ConversionSpecifier::PercentArg; break;
+ case 'b': k = ConversionSpecifier::bArg; break;
case 'A': k = ConversionSpecifier::AArg; break;
case 'E': k = ConversionSpecifier::EArg; break;
case 'F': k = ConversionSpecifier::FArg; break;
@@ -267,6 +268,7 @@ ArgType ScanfSpecifier::getArgType(ASTContext &Ctx) const {
llvm_unreachable("Unsupported LengthModifier Type");
// Unsigned int.
+ case ConversionSpecifier::bArg:
case ConversionSpecifier::oArg:
case ConversionSpecifier::OArg:
case ConversionSpecifier::uArg:
@@ -343,7 +345,7 @@ ArgType ScanfSpecifier::getArgType(ASTContext &Ctx) const {
case LengthModifier::AsShort:
if (Ctx.getTargetInfo().getTriple().isOSMSVCRT())
return ArgType::PtrTo(ArgType::AnyCharTy);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
default:
return ArgType::Invalid();
}
@@ -360,7 +362,7 @@ ArgType ScanfSpecifier::getArgType(ASTContext &Ctx) const {
case LengthModifier::AsShort:
if (Ctx.getTargetInfo().getTriple().isOSMSVCRT())
return ArgType::PtrTo(ArgType::AnyCharTy);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
default:
return ArgType::Invalid();
}
@@ -444,7 +446,7 @@ bool ScanfSpecifier::fixType(QualType QT, QualType RawQT,
// If we know the target array length, we can use it as a field width.
if (const ConstantArrayType *CAT = Ctx.getAsConstantArrayType(RawQT)) {
- if (CAT->getSizeModifier() == ArrayType::Normal)
+ if (CAT->getSizeModifier() == ArraySizeModifier::Normal)
FieldWidth = OptionalAmount(OptionalAmount::Constant,
CAT->getSize().getZExtValue() - 1,
"", 0, false);
@@ -500,7 +502,7 @@ bool ScanfSpecifier::fixType(QualType QT, QualType RawQT,
}
// Handle size_t, ptrdiff_t, etc. that have dedicated length modifiers in C99.
- if (isa<TypedefType>(PT) && (LangOpt.C99 || LangOpt.CPlusPlus11))
+ if (LangOpt.C99 || LangOpt.CPlusPlus11)
namedTypeToLengthModifier(PT, LM);
// If fixing the length modifier was enough, we are done.
diff --git a/contrib/llvm-project/clang/lib/AST/Stmt.cpp b/contrib/llvm-project/clang/lib/AST/Stmt.cpp
index 47693ef9fee3..afd05881cb16 100644
--- a/contrib/llvm-project/clang/lib/AST/Stmt.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Stmt.cpp
@@ -41,6 +41,7 @@
#include <algorithm>
#include <cassert>
#include <cstring>
+#include <optional>
#include <string>
#include <type_traits>
#include <utility>
@@ -361,12 +362,14 @@ int64_t Stmt::getID(const ASTContext &Context) const {
return Context.getAllocator().identifyKnownAlignedObject<Stmt>(this);
}
-CompoundStmt::CompoundStmt(ArrayRef<Stmt *> Stmts, SourceLocation LB,
- SourceLocation RB)
- : Stmt(CompoundStmtClass), RBraceLoc(RB) {
+CompoundStmt::CompoundStmt(ArrayRef<Stmt *> Stmts, FPOptionsOverride FPFeatures,
+ SourceLocation LB, SourceLocation RB)
+ : Stmt(CompoundStmtClass), LBraceLoc(LB), RBraceLoc(RB) {
CompoundStmtBits.NumStmts = Stmts.size();
+ CompoundStmtBits.HasFPFeatures = FPFeatures.requiresTrailingStorage();
setStmts(Stmts);
- CompoundStmtBits.LBraceLoc = LB;
+ if (hasStoredFPFeatures())
+ setStoredFPFeatures(FPFeatures);
}
void CompoundStmt::setStmts(ArrayRef<Stmt *> Stmts) {
@@ -377,18 +380,23 @@ void CompoundStmt::setStmts(ArrayRef<Stmt *> Stmts) {
}
CompoundStmt *CompoundStmt::Create(const ASTContext &C, ArrayRef<Stmt *> Stmts,
+ FPOptionsOverride FPFeatures,
SourceLocation LB, SourceLocation RB) {
void *Mem =
- C.Allocate(totalSizeToAlloc<Stmt *>(Stmts.size()), alignof(CompoundStmt));
- return new (Mem) CompoundStmt(Stmts, LB, RB);
+ C.Allocate(totalSizeToAlloc<Stmt *, FPOptionsOverride>(
+ Stmts.size(), FPFeatures.requiresTrailingStorage()),
+ alignof(CompoundStmt));
+ return new (Mem) CompoundStmt(Stmts, FPFeatures, LB, RB);
}
-CompoundStmt *CompoundStmt::CreateEmpty(const ASTContext &C,
- unsigned NumStmts) {
- void *Mem =
- C.Allocate(totalSizeToAlloc<Stmt *>(NumStmts), alignof(CompoundStmt));
+CompoundStmt *CompoundStmt::CreateEmpty(const ASTContext &C, unsigned NumStmts,
+ bool HasFPFeatures) {
+ void *Mem = C.Allocate(
+ totalSizeToAlloc<Stmt *, FPOptionsOverride>(NumStmts, HasFPFeatures),
+ alignof(CompoundStmt));
CompoundStmt *New = new (Mem) CompoundStmt(EmptyShell());
New->CompoundStmtBits.NumStmts = NumStmts;
+ New->CompoundStmtBits.HasFPFeatures = HasFPFeatures;
return New;
}
@@ -568,21 +576,20 @@ void GCCAsmStmt::setOutputsAndInputsAndClobbers(const ASTContext &C,
/// translate this into a numeric value needed to reference the same operand.
/// This returns -1 if the operand name is invalid.
int GCCAsmStmt::getNamedOperand(StringRef SymbolicName) const {
- unsigned NumPlusOperands = 0;
-
// Check if this is an output operand.
- for (unsigned i = 0, e = getNumOutputs(); i != e; ++i) {
+ unsigned NumOutputs = getNumOutputs();
+ for (unsigned i = 0; i != NumOutputs; ++i)
if (getOutputName(i) == SymbolicName)
return i;
- }
- for (unsigned i = 0, e = getNumInputs(); i != e; ++i)
+ unsigned NumInputs = getNumInputs();
+ for (unsigned i = 0; i != NumInputs; ++i)
if (getInputName(i) == SymbolicName)
- return getNumOutputs() + NumPlusOperands + i;
+ return NumOutputs + i;
for (unsigned i = 0, e = getNumLabels(); i != e; ++i)
if (getLabelName(i) == SymbolicName)
- return i + getNumOutputs() + getNumInputs();
+ return NumOutputs + NumInputs + getNumPlusOperands() + i;
// Not found.
return -1;
@@ -804,11 +811,12 @@ std::string MSAsmStmt::generateAsmString(const ASTContext &C) const {
StringRef Instruction = Pieces[I];
// For vex/vex2/vex3/evex masm style prefix, convert it to att style
// since we don't support masm style prefix in backend.
- if (Instruction.startswith("vex "))
+ if (Instruction.starts_with("vex "))
MSAsmString += '{' + Instruction.substr(0, 3).str() + '}' +
Instruction.substr(3).str();
- else if (Instruction.startswith("vex2 ") ||
- Instruction.startswith("vex3 ") || Instruction.startswith("evex "))
+ else if (Instruction.starts_with("vex2 ") ||
+ Instruction.starts_with("vex3 ") ||
+ Instruction.starts_with("evex "))
MSAsmString += '{' + Instruction.substr(0, 4).str() + '}' +
Instruction.substr(4).str();
else
@@ -912,7 +920,7 @@ void MSAsmStmt::initialize(const ASTContext &C, StringRef asmstr,
});
}
-IfStmt::IfStmt(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr,
+IfStmt::IfStmt(const ASTContext &Ctx, SourceLocation IL, IfStatementKind Kind,
Stmt *Init, VarDecl *Var, Expr *Cond, SourceLocation LPL,
SourceLocation RPL, Stmt *Then, SourceLocation EL, Stmt *Else)
: Stmt(IfStmtClass), LParenLoc(LPL), RParenLoc(RPL) {
@@ -923,7 +931,7 @@ IfStmt::IfStmt(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr,
IfStmtBits.HasVar = HasVar;
IfStmtBits.HasInit = HasInit;
- setConstexpr(IsConstexpr);
+ setStatementKind(Kind);
setCond(Cond);
setThen(Then);
@@ -947,9 +955,9 @@ IfStmt::IfStmt(EmptyShell Empty, bool HasElse, bool HasVar, bool HasInit)
}
IfStmt *IfStmt::Create(const ASTContext &Ctx, SourceLocation IL,
- bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond,
- SourceLocation LPL, SourceLocation RPL, Stmt *Then,
- SourceLocation EL, Stmt *Else) {
+ IfStatementKind Kind, Stmt *Init, VarDecl *Var,
+ Expr *Cond, SourceLocation LPL, SourceLocation RPL,
+ Stmt *Then, SourceLocation EL, Stmt *Else) {
bool HasElse = Else != nullptr;
bool HasVar = Var != nullptr;
bool HasInit = Init != nullptr;
@@ -958,7 +966,7 @@ IfStmt *IfStmt::Create(const ASTContext &Ctx, SourceLocation IL,
NumMandatoryStmtPtr + HasElse + HasVar + HasInit, HasElse),
alignof(IfStmt));
return new (Mem)
- IfStmt(Ctx, IL, IsConstexpr, Init, Var, Cond, LPL, RPL, Then, EL, Else);
+ IfStmt(Ctx, IL, Kind, Init, Var, Cond, LPL, RPL, Then, EL, Else);
}
IfStmt *IfStmt::CreateEmpty(const ASTContext &Ctx, bool HasElse, bool HasVar,
@@ -995,18 +1003,18 @@ bool IfStmt::isObjCAvailabilityCheck() const {
return isa<ObjCAvailabilityCheckExpr>(getCond());
}
-Optional<Stmt *> IfStmt::getNondiscardedCase(const ASTContext &Ctx) {
+std::optional<Stmt *> IfStmt::getNondiscardedCase(const ASTContext &Ctx) {
if (!isConstexpr() || getCond()->isValueDependent())
- return None;
+ return std::nullopt;
return !getCond()->EvaluateKnownConstInt(Ctx) ? getElse() : getThen();
}
-Optional<const Stmt *>
+std::optional<const Stmt *>
IfStmt::getNondiscardedCase(const ASTContext &Ctx) const {
- if (Optional<Stmt *> Result =
+ if (std::optional<Stmt *> Result =
const_cast<IfStmt *>(this)->getNondiscardedCase(Ctx))
return *Result;
- return None;
+ return std::nullopt;
}
ForStmt::ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar,
@@ -1338,6 +1346,11 @@ CapturedStmt::CapturedStmt(EmptyShell Empty, unsigned NumCaptures)
: Stmt(CapturedStmtClass, Empty), NumCaptures(NumCaptures),
CapDeclAndKind(nullptr, CR_Default) {
getStoredStmts()[NumCaptures] = nullptr;
+
+ // Construct default capture objects.
+ Capture *Buffer = getStoredCaptures();
+ for (unsigned I = 0, N = NumCaptures; I != N; ++I)
+ new (Buffer++) Capture();
}
CapturedStmt *CapturedStmt::Create(const ASTContext &Context, Stmt *S,
diff --git a/contrib/llvm-project/clang/lib/AST/StmtCXX.cpp b/contrib/llvm-project/clang/lib/AST/StmtCXX.cpp
index 060d090fc06a..0d6fc848f739 100644
--- a/contrib/llvm-project/clang/lib/AST/StmtCXX.cpp
+++ b/contrib/llvm-project/clang/lib/AST/StmtCXX.cpp
@@ -23,7 +23,8 @@ QualType CXXCatchStmt::getCaughtType() const {
}
CXXTryStmt *CXXTryStmt::Create(const ASTContext &C, SourceLocation tryLoc,
- Stmt *tryBlock, ArrayRef<Stmt *> handlers) {
+ CompoundStmt *tryBlock,
+ ArrayRef<Stmt *> handlers) {
const size_t Size = totalSizeToAlloc<Stmt *>(handlers.size() + 1);
void *Mem = C.Allocate(Size, alignof(CXXTryStmt));
return new (Mem) CXXTryStmt(tryLoc, tryBlock, handlers);
@@ -36,7 +37,7 @@ CXXTryStmt *CXXTryStmt::Create(const ASTContext &C, EmptyShell Empty,
return new (Mem) CXXTryStmt(Empty, numHandlers);
}
-CXXTryStmt::CXXTryStmt(SourceLocation tryLoc, Stmt *tryBlock,
+CXXTryStmt::CXXTryStmt(SourceLocation tryLoc, CompoundStmt *tryBlock,
ArrayRef<Stmt *> handlers)
: Stmt(CXXTryStmtClass), TryLoc(tryLoc), NumHandlers(handlers.size()) {
Stmt **Stmts = getStmts();
@@ -117,8 +118,8 @@ CoroutineBodyStmt::CoroutineBodyStmt(CoroutineBodyStmt::CtorArgs const &Args)
SubStmts[CoroutineBodyStmt::OnFallthrough] = Args.OnFallthrough;
SubStmts[CoroutineBodyStmt::Allocate] = Args.Allocate;
SubStmts[CoroutineBodyStmt::Deallocate] = Args.Deallocate;
- SubStmts[CoroutineBodyStmt::ReturnValue] = Args.ReturnValue;
SubStmts[CoroutineBodyStmt::ResultDecl] = Args.ResultDecl;
+ SubStmts[CoroutineBodyStmt::ReturnValue] = Args.ReturnValue;
SubStmts[CoroutineBodyStmt::ReturnStmt] = Args.ReturnStmt;
SubStmts[CoroutineBodyStmt::ReturnStmtOnAllocFailure] =
Args.ReturnStmtOnAllocFailure;
diff --git a/contrib/llvm-project/clang/lib/AST/StmtObjC.cpp b/contrib/llvm-project/clang/lib/AST/StmtObjC.cpp
index 3d586795517c..12d8a9e7dac8 100644
--- a/contrib/llvm-project/clang/lib/AST/StmtObjC.cpp
+++ b/contrib/llvm-project/clang/lib/AST/StmtObjC.cpp
@@ -46,9 +46,8 @@ ObjCAtTryStmt *ObjCAtTryStmt::Create(const ASTContext &Context,
SourceLocation atTryLoc, Stmt *atTryStmt,
Stmt **CatchStmts, unsigned NumCatchStmts,
Stmt *atFinallyStmt) {
- unsigned Size =
- sizeof(ObjCAtTryStmt) +
- (1 + NumCatchStmts + (atFinallyStmt != nullptr)) * sizeof(Stmt *);
+ size_t Size =
+ totalSizeToAlloc<Stmt *>(1 + NumCatchStmts + (atFinallyStmt != nullptr));
void *Mem = Context.Allocate(Size, alignof(ObjCAtTryStmt));
return new (Mem) ObjCAtTryStmt(atTryLoc, atTryStmt, CatchStmts, NumCatchStmts,
atFinallyStmt);
@@ -57,8 +56,7 @@ ObjCAtTryStmt *ObjCAtTryStmt::Create(const ASTContext &Context,
ObjCAtTryStmt *ObjCAtTryStmt::CreateEmpty(const ASTContext &Context,
unsigned NumCatchStmts,
bool HasFinally) {
- unsigned Size =
- sizeof(ObjCAtTryStmt) + (1 + NumCatchStmts + HasFinally) * sizeof(Stmt *);
+ size_t Size = totalSizeToAlloc<Stmt *>(1 + NumCatchStmts + HasFinally);
void *Mem = Context.Allocate(Size, alignof(ObjCAtTryStmt));
return new (Mem) ObjCAtTryStmt(EmptyShell(), NumCatchStmts, HasFinally);
}
diff --git a/contrib/llvm-project/clang/lib/AST/StmtOpenMP.cpp b/contrib/llvm-project/clang/lib/AST/StmtOpenMP.cpp
index b0ef2f49ba04..426b35848cb5 100644
--- a/contrib/llvm-project/clang/lib/AST/StmtOpenMP.cpp
+++ b/contrib/llvm-project/clang/lib/AST/StmtOpenMP.cpp
@@ -31,7 +31,7 @@ void OMPChildren::setClauses(ArrayRef<OMPClause *> Clauses) {
}
MutableArrayRef<Stmt *> OMPChildren::getChildren() {
- return llvm::makeMutableArrayRef(getTrailingObjects<Stmt *>(), NumChildren);
+ return llvm::MutableArrayRef(getTrailingObjects<Stmt *>(), NumChildren);
}
OMPChildren *OMPChildren::Create(void *Mem, ArrayRef<OMPClause *> Clauses) {
@@ -125,28 +125,34 @@ OMPLoopBasedDirective::tryToFindNextInnerLoop(Stmt *CurStmt,
bool OMPLoopBasedDirective::doForAllLoops(
Stmt *CurStmt, bool TryImperfectlyNestedLoops, unsigned NumLoops,
llvm::function_ref<bool(unsigned, Stmt *)> Callback,
- llvm::function_ref<void(OMPLoopBasedDirective *)>
+ llvm::function_ref<void(OMPLoopTransformationDirective *)>
OnTransformationCallback) {
CurStmt = CurStmt->IgnoreContainers();
for (unsigned Cnt = 0; Cnt < NumLoops; ++Cnt) {
while (true) {
- auto *OrigStmt = CurStmt;
- if (auto *Dir = dyn_cast<OMPTileDirective>(OrigStmt)) {
- OnTransformationCallback(Dir);
- CurStmt = Dir->getTransformedStmt();
- } else if (auto *Dir = dyn_cast<OMPUnrollDirective>(OrigStmt)) {
- OnTransformationCallback(Dir);
- CurStmt = Dir->getTransformedStmt();
- } else {
+ auto *Dir = dyn_cast<OMPLoopTransformationDirective>(CurStmt);
+ if (!Dir)
break;
- }
- if (!CurStmt) {
- // May happen if the loop transformation does not result in a generated
- // loop (such as full unrolling).
- CurStmt = OrigStmt;
- break;
+ OnTransformationCallback(Dir);
+
+ Stmt *TransformedStmt = Dir->getTransformedStmt();
+ if (!TransformedStmt) {
+ unsigned NumGeneratedLoops = Dir->getNumGeneratedLoops();
+ if (NumGeneratedLoops == 0) {
+ // May happen if the loop transformation does not result in a
+ // generated loop (such as full unrolling).
+ break;
+ }
+ if (NumGeneratedLoops > 0) {
+ // The loop transformation construct has generated loops, but these
+ // may not have been generated yet due to being in a dependent
+ // context.
+ return true;
+ }
}
+
+ CurStmt = TransformedStmt;
}
if (auto *CanonLoop = dyn_cast<OMPCanonicalLoop>(CurStmt))
CurStmt = CanonLoop->getLoopStmt();
@@ -253,6 +259,25 @@ void OMPLoopDirective::setFinalsConditions(ArrayRef<Expr *> A) {
llvm::copy(A, getFinalsConditions().begin());
}
+OMPMetaDirective *OMPMetaDirective::Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses,
+ Stmt *AssociatedStmt, Stmt *IfStmt) {
+ auto *Dir = createDirective<OMPMetaDirective>(
+ C, Clauses, AssociatedStmt, /*NumChildren=*/1, StartLoc, EndLoc);
+ Dir->setIfStmt(IfStmt);
+ return Dir;
+}
+
+OMPMetaDirective *OMPMetaDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ EmptyShell) {
+ return createEmptyDirective<OMPMetaDirective>(C, NumClauses,
+ /*HasAssociatedStmt=*/true,
+ /*NumChildren=*/1);
+}
+
OMPParallelDirective *OMPParallelDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef,
@@ -272,11 +297,10 @@ OMPParallelDirective *OMPParallelDirective::CreateEmpty(const ASTContext &C,
/*NumChildren=*/1);
}
-OMPSimdDirective *
-OMPSimdDirective::Create(const ASTContext &C, SourceLocation StartLoc,
- SourceLocation EndLoc, unsigned CollapsedNum,
- ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
- const HelperExprs &Exprs) {
+OMPSimdDirective *OMPSimdDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
+ const HelperExprs &Exprs, OpenMPDirectiveKind ParamPrevMappedDirective) {
auto *Dir = createDirective<OMPSimdDirective>(
C, Clauses, AssociatedStmt, numLoopChildren(CollapsedNum, OMPD_simd),
StartLoc, EndLoc, CollapsedNum);
@@ -296,6 +320,7 @@ OMPSimdDirective::Create(const ASTContext &C, SourceLocation StartLoc,
Dir->setDependentInits(Exprs.DependentInits);
Dir->setFinalsConditions(Exprs.FinalsConditions);
Dir->setPreInits(Exprs.PreInits);
+ Dir->setMappedDirective(ParamPrevMappedDirective);
return Dir;
}
@@ -311,7 +336,8 @@ OMPSimdDirective *OMPSimdDirective::CreateEmpty(const ASTContext &C,
OMPForDirective *OMPForDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
- const HelperExprs &Exprs, Expr *TaskRedRef, bool HasCancel) {
+ const HelperExprs &Exprs, Expr *TaskRedRef, bool HasCancel,
+ OpenMPDirectiveKind ParamPrevMappedDirective) {
auto *Dir = createDirective<OMPForDirective>(
C, Clauses, AssociatedStmt, numLoopChildren(CollapsedNum, OMPD_for) + 1,
StartLoc, EndLoc, CollapsedNum);
@@ -341,9 +367,36 @@ OMPForDirective *OMPForDirective::Create(
Dir->setPreInits(Exprs.PreInits);
Dir->setTaskReductionRefExpr(TaskRedRef);
Dir->setHasCancel(HasCancel);
+ Dir->setMappedDirective(ParamPrevMappedDirective);
return Dir;
}
+Stmt *OMPLoopTransformationDirective::getTransformedStmt() const {
+ switch (getStmtClass()) {
+#define STMT(CLASS, PARENT)
+#define ABSTRACT_STMT(CLASS)
+#define OMPLOOPTRANSFORMATIONDIRECTIVE(CLASS, PARENT) \
+ case Stmt::CLASS##Class: \
+ return static_cast<const CLASS *>(this)->getTransformedStmt();
+#include "clang/AST/StmtNodes.inc"
+ default:
+ llvm_unreachable("Not a loop transformation");
+ }
+}
+
+Stmt *OMPLoopTransformationDirective::getPreInits() const {
+ switch (getStmtClass()) {
+#define STMT(CLASS, PARENT)
+#define ABSTRACT_STMT(CLASS)
+#define OMPLOOPTRANSFORMATIONDIRECTIVE(CLASS, PARENT) \
+ case Stmt::CLASS##Class: \
+ return static_cast<const CLASS *>(this)->getPreInits();
+#include "clang/AST/StmtNodes.inc"
+ default:
+ llvm_unreachable("Not a loop transformation");
+ }
+}
+
OMPForDirective *OMPForDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
@@ -377,10 +430,13 @@ OMPTileDirective *OMPTileDirective::CreateEmpty(const ASTContext &C,
OMPUnrollDirective *
OMPUnrollDirective::Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses,
- Stmt *AssociatedStmt, Stmt *TransformedStmt,
- Stmt *PreInits) {
+ Stmt *AssociatedStmt, unsigned NumGeneratedLoops,
+ Stmt *TransformedStmt, Stmt *PreInits) {
+ assert(NumGeneratedLoops <= 1 && "Unrolling generates at most one loop");
+
auto *Dir = createDirective<OMPUnrollDirective>(
C, Clauses, AssociatedStmt, TransformedStmtOffset + 1, StartLoc, EndLoc);
+ Dir->setNumGeneratedLoops(NumGeneratedLoops);
Dir->setTransformedStmt(TransformedStmt);
Dir->setPreInits(PreInits);
return Dir;
@@ -463,8 +519,8 @@ OMPSectionDirective *OMPSectionDirective::Create(const ASTContext &C,
Stmt *AssociatedStmt,
bool HasCancel) {
auto *Dir =
- createDirective<OMPSectionDirective>(C, llvm::None, AssociatedStmt,
- /*NumChildre=*/0, StartLoc, EndLoc);
+ createDirective<OMPSectionDirective>(C, std::nullopt, AssociatedStmt,
+ /*NumChildren=*/0, StartLoc, EndLoc);
Dir->setHasCancel(HasCancel);
return Dir;
}
@@ -475,6 +531,23 @@ OMPSectionDirective *OMPSectionDirective::CreateEmpty(const ASTContext &C,
/*HasAssociatedStmt=*/true);
}
+OMPScopeDirective *OMPScopeDirective::Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses,
+ Stmt *AssociatedStmt) {
+ return createDirective<OMPScopeDirective>(C, Clauses, AssociatedStmt,
+ /*NumChildren=*/0, StartLoc,
+ EndLoc);
+}
+
+OMPScopeDirective *OMPScopeDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ EmptyShell) {
+ return createEmptyDirective<OMPScopeDirective>(C, NumClauses,
+ /*HasAssociatedStmt=*/true);
+}
+
OMPSingleDirective *OMPSingleDirective::Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
@@ -496,7 +569,7 @@ OMPMasterDirective *OMPMasterDirective::Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt) {
- return createDirective<OMPMasterDirective>(C, llvm::None, AssociatedStmt,
+ return createDirective<OMPMasterDirective>(C, std::nullopt, AssociatedStmt,
/*NumChildren=*/0, StartLoc,
EndLoc);
}
@@ -628,6 +701,22 @@ OMPParallelMasterDirective::CreateEmpty(const ASTContext &C,
C, NumClauses, /*HasAssociatedStmt=*/true, /*NumChildren=*/1);
}
+OMPParallelMaskedDirective *OMPParallelMaskedDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef) {
+ auto *Dir = createDirective<OMPParallelMaskedDirective>(
+ C, Clauses, AssociatedStmt, /*NumChildren=*/1, StartLoc, EndLoc);
+ Dir->setTaskReductionRefExpr(TaskRedRef);
+ return Dir;
+}
+
+OMPParallelMaskedDirective *
+OMPParallelMaskedDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses, EmptyShell) {
+ return createEmptyDirective<OMPParallelMaskedDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true, /*NumChildren=*/1);
+}
+
OMPParallelSectionsDirective *OMPParallelSectionsDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef,
@@ -674,6 +763,21 @@ OMPTaskyieldDirective *OMPTaskyieldDirective::CreateEmpty(const ASTContext &C,
return new (C) OMPTaskyieldDirective();
}
+OMPErrorDirective *OMPErrorDirective::Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses) {
+ return createDirective<OMPErrorDirective>(
+ C, Clauses, /*AssociatedStmt=*/nullptr, /*NumChildren=*/0, StartLoc,
+ EndLoc);
+}
+
+OMPErrorDirective *OMPErrorDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ EmptyShell) {
+ return createEmptyDirective<OMPErrorDirective>(C, NumClauses);
+}
+
OMPBarrierDirective *OMPBarrierDirective::Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc) {
@@ -685,15 +789,19 @@ OMPBarrierDirective *OMPBarrierDirective::CreateEmpty(const ASTContext &C,
return new (C) OMPBarrierDirective();
}
-OMPTaskwaitDirective *OMPTaskwaitDirective::Create(const ASTContext &C,
- SourceLocation StartLoc,
- SourceLocation EndLoc) {
- return new (C) OMPTaskwaitDirective(StartLoc, EndLoc);
+OMPTaskwaitDirective *
+OMPTaskwaitDirective::Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses) {
+ return createDirective<OMPTaskwaitDirective>(
+ C, Clauses, /*AssociatedStmt=*/nullptr, /*NumChildren=*/0, StartLoc,
+ EndLoc);
}
OMPTaskwaitDirective *OMPTaskwaitDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
EmptyShell) {
- return new (C) OMPTaskwaitDirective();
+ return createEmptyDirective<OMPTaskwaitDirective>(C, NumClauses);
}
OMPTaskgroupDirective *OMPTaskgroupDirective::Create(
@@ -805,18 +913,22 @@ OMPOrderedDirective *OMPOrderedDirective::CreateEmpty(const ASTContext &C,
!IsStandalone);
}
-OMPAtomicDirective *OMPAtomicDirective::Create(
- const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
- ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *X, Expr *V,
- Expr *E, Expr *UE, bool IsXLHSInRHSPart, bool IsPostfixUpdate) {
+OMPAtomicDirective *
+OMPAtomicDirective::Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses,
+ Stmt *AssociatedStmt, Expressions Exprs) {
auto *Dir = createDirective<OMPAtomicDirective>(
- C, Clauses, AssociatedStmt, /*NumChildren=*/4, StartLoc, EndLoc);
- Dir->setX(X);
- Dir->setV(V);
- Dir->setExpr(E);
- Dir->setUpdateExpr(UE);
- Dir->IsXLHSInRHSPart = IsXLHSInRHSPart;
- Dir->IsPostfixUpdate = IsPostfixUpdate;
+ C, Clauses, AssociatedStmt, /*NumChildren=*/7, StartLoc, EndLoc);
+ Dir->setX(Exprs.X);
+ Dir->setV(Exprs.V);
+ Dir->setR(Exprs.R);
+ Dir->setExpr(Exprs.E);
+ Dir->setUpdateExpr(Exprs.UE);
+ Dir->setD(Exprs.D);
+ Dir->setCond(Exprs.Cond);
+ Dir->Flags.IsXLHSInRHSPart = Exprs.IsXLHSInRHSPart ? 1 : 0;
+ Dir->Flags.IsPostfixUpdate = Exprs.IsPostfixUpdate ? 1 : 0;
+ Dir->Flags.IsFailOnly = Exprs.IsFailOnly ? 1 : 0;
return Dir;
}
@@ -824,7 +936,7 @@ OMPAtomicDirective *OMPAtomicDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
EmptyShell) {
return createEmptyDirective<OMPAtomicDirective>(
- C, NumClauses, /*HasAssociatedStmt=*/true, /*NumChildren=*/4);
+ C, NumClauses, /*HasAssociatedStmt=*/true, /*NumChildren=*/7);
}
OMPTargetDirective *OMPTargetDirective::Create(const ASTContext &C,
@@ -1098,6 +1210,51 @@ OMPMasterTaskLoopDirective::CreateEmpty(const ASTContext &C,
numLoopChildren(CollapsedNum, OMPD_master_taskloop), CollapsedNum);
}
+OMPMaskedTaskLoopDirective *OMPMaskedTaskLoopDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
+ const HelperExprs &Exprs, bool HasCancel) {
+ auto *Dir = createDirective<OMPMaskedTaskLoopDirective>(
+ C, Clauses, AssociatedStmt,
+ numLoopChildren(CollapsedNum, OMPD_masked_taskloop), StartLoc, EndLoc,
+ CollapsedNum);
+ Dir->setIterationVariable(Exprs.IterationVarRef);
+ Dir->setLastIteration(Exprs.LastIteration);
+ Dir->setCalcLastIteration(Exprs.CalcLastIteration);
+ Dir->setPreCond(Exprs.PreCond);
+ Dir->setCond(Exprs.Cond);
+ Dir->setInit(Exprs.Init);
+ Dir->setInc(Exprs.Inc);
+ Dir->setIsLastIterVariable(Exprs.IL);
+ Dir->setLowerBoundVariable(Exprs.LB);
+ Dir->setUpperBoundVariable(Exprs.UB);
+ Dir->setStrideVariable(Exprs.ST);
+ Dir->setEnsureUpperBound(Exprs.EUB);
+ Dir->setNextLowerBound(Exprs.NLB);
+ Dir->setNextUpperBound(Exprs.NUB);
+ Dir->setNumIterations(Exprs.NumIterations);
+ Dir->setCounters(Exprs.Counters);
+ Dir->setPrivateCounters(Exprs.PrivateCounters);
+ Dir->setInits(Exprs.Inits);
+ Dir->setUpdates(Exprs.Updates);
+ Dir->setFinals(Exprs.Finals);
+ Dir->setDependentCounters(Exprs.DependentCounters);
+ Dir->setDependentInits(Exprs.DependentInits);
+ Dir->setFinalsConditions(Exprs.FinalsConditions);
+ Dir->setPreInits(Exprs.PreInits);
+ Dir->setHasCancel(HasCancel);
+ return Dir;
+}
+
+OMPMaskedTaskLoopDirective *
+OMPMaskedTaskLoopDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ unsigned CollapsedNum, EmptyShell) {
+ return createEmptyDirective<OMPMaskedTaskLoopDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true,
+ numLoopChildren(CollapsedNum, OMPD_masked_taskloop), CollapsedNum);
+}
+
OMPMasterTaskLoopSimdDirective *OMPMasterTaskLoopSimdDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
@@ -1142,6 +1299,50 @@ OMPMasterTaskLoopSimdDirective::CreateEmpty(const ASTContext &C,
numLoopChildren(CollapsedNum, OMPD_master_taskloop_simd), CollapsedNum);
}
+OMPMaskedTaskLoopSimdDirective *OMPMaskedTaskLoopSimdDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
+ const HelperExprs &Exprs) {
+ auto *Dir = createDirective<OMPMaskedTaskLoopSimdDirective>(
+ C, Clauses, AssociatedStmt,
+ numLoopChildren(CollapsedNum, OMPD_masked_taskloop_simd), StartLoc,
+ EndLoc, CollapsedNum);
+ Dir->setIterationVariable(Exprs.IterationVarRef);
+ Dir->setLastIteration(Exprs.LastIteration);
+ Dir->setCalcLastIteration(Exprs.CalcLastIteration);
+ Dir->setPreCond(Exprs.PreCond);
+ Dir->setCond(Exprs.Cond);
+ Dir->setInit(Exprs.Init);
+ Dir->setInc(Exprs.Inc);
+ Dir->setIsLastIterVariable(Exprs.IL);
+ Dir->setLowerBoundVariable(Exprs.LB);
+ Dir->setUpperBoundVariable(Exprs.UB);
+ Dir->setStrideVariable(Exprs.ST);
+ Dir->setEnsureUpperBound(Exprs.EUB);
+ Dir->setNextLowerBound(Exprs.NLB);
+ Dir->setNextUpperBound(Exprs.NUB);
+ Dir->setNumIterations(Exprs.NumIterations);
+ Dir->setCounters(Exprs.Counters);
+ Dir->setPrivateCounters(Exprs.PrivateCounters);
+ Dir->setInits(Exprs.Inits);
+ Dir->setUpdates(Exprs.Updates);
+ Dir->setFinals(Exprs.Finals);
+ Dir->setDependentCounters(Exprs.DependentCounters);
+ Dir->setDependentInits(Exprs.DependentInits);
+ Dir->setFinalsConditions(Exprs.FinalsConditions);
+ Dir->setPreInits(Exprs.PreInits);
+ return Dir;
+}
+
+OMPMaskedTaskLoopSimdDirective *
+OMPMaskedTaskLoopSimdDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ unsigned CollapsedNum, EmptyShell) {
+ return createEmptyDirective<OMPMaskedTaskLoopSimdDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true,
+ numLoopChildren(CollapsedNum, OMPD_masked_taskloop_simd), CollapsedNum);
+}
+
OMPParallelMasterTaskLoopDirective *OMPParallelMasterTaskLoopDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
@@ -1189,6 +1390,53 @@ OMPParallelMasterTaskLoopDirective::CreateEmpty(const ASTContext &C,
CollapsedNum);
}
+OMPParallelMaskedTaskLoopDirective *OMPParallelMaskedTaskLoopDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
+ const HelperExprs &Exprs, bool HasCancel) {
+ auto *Dir = createDirective<OMPParallelMaskedTaskLoopDirective>(
+ C, Clauses, AssociatedStmt,
+ numLoopChildren(CollapsedNum, OMPD_parallel_masked_taskloop), StartLoc,
+ EndLoc, CollapsedNum);
+ Dir->setIterationVariable(Exprs.IterationVarRef);
+ Dir->setLastIteration(Exprs.LastIteration);
+ Dir->setCalcLastIteration(Exprs.CalcLastIteration);
+ Dir->setPreCond(Exprs.PreCond);
+ Dir->setCond(Exprs.Cond);
+ Dir->setInit(Exprs.Init);
+ Dir->setInc(Exprs.Inc);
+ Dir->setIsLastIterVariable(Exprs.IL);
+ Dir->setLowerBoundVariable(Exprs.LB);
+ Dir->setUpperBoundVariable(Exprs.UB);
+ Dir->setStrideVariable(Exprs.ST);
+ Dir->setEnsureUpperBound(Exprs.EUB);
+ Dir->setNextLowerBound(Exprs.NLB);
+ Dir->setNextUpperBound(Exprs.NUB);
+ Dir->setNumIterations(Exprs.NumIterations);
+ Dir->setCounters(Exprs.Counters);
+ Dir->setPrivateCounters(Exprs.PrivateCounters);
+ Dir->setInits(Exprs.Inits);
+ Dir->setUpdates(Exprs.Updates);
+ Dir->setFinals(Exprs.Finals);
+ Dir->setDependentCounters(Exprs.DependentCounters);
+ Dir->setDependentInits(Exprs.DependentInits);
+ Dir->setFinalsConditions(Exprs.FinalsConditions);
+ Dir->setPreInits(Exprs.PreInits);
+ Dir->setHasCancel(HasCancel);
+ return Dir;
+}
+
+OMPParallelMaskedTaskLoopDirective *
+OMPParallelMaskedTaskLoopDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ unsigned CollapsedNum,
+ EmptyShell) {
+ return createEmptyDirective<OMPParallelMaskedTaskLoopDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true,
+ numLoopChildren(CollapsedNum, OMPD_parallel_masked_taskloop),
+ CollapsedNum);
+}
+
OMPParallelMasterTaskLoopSimdDirective *
OMPParallelMasterTaskLoopSimdDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
@@ -1236,10 +1484,57 @@ OMPParallelMasterTaskLoopSimdDirective::CreateEmpty(const ASTContext &C,
CollapsedNum);
}
-OMPDistributeDirective *OMPDistributeDirective::Create(
+OMPParallelMaskedTaskLoopSimdDirective *
+OMPParallelMaskedTaskLoopSimdDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
const HelperExprs &Exprs) {
+ auto *Dir = createDirective<OMPParallelMaskedTaskLoopSimdDirective>(
+ C, Clauses, AssociatedStmt,
+ numLoopChildren(CollapsedNum, OMPD_parallel_masked_taskloop_simd),
+ StartLoc, EndLoc, CollapsedNum);
+ Dir->setIterationVariable(Exprs.IterationVarRef);
+ Dir->setLastIteration(Exprs.LastIteration);
+ Dir->setCalcLastIteration(Exprs.CalcLastIteration);
+ Dir->setPreCond(Exprs.PreCond);
+ Dir->setCond(Exprs.Cond);
+ Dir->setInit(Exprs.Init);
+ Dir->setInc(Exprs.Inc);
+ Dir->setIsLastIterVariable(Exprs.IL);
+ Dir->setLowerBoundVariable(Exprs.LB);
+ Dir->setUpperBoundVariable(Exprs.UB);
+ Dir->setStrideVariable(Exprs.ST);
+ Dir->setEnsureUpperBound(Exprs.EUB);
+ Dir->setNextLowerBound(Exprs.NLB);
+ Dir->setNextUpperBound(Exprs.NUB);
+ Dir->setNumIterations(Exprs.NumIterations);
+ Dir->setCounters(Exprs.Counters);
+ Dir->setPrivateCounters(Exprs.PrivateCounters);
+ Dir->setInits(Exprs.Inits);
+ Dir->setUpdates(Exprs.Updates);
+ Dir->setFinals(Exprs.Finals);
+ Dir->setDependentCounters(Exprs.DependentCounters);
+ Dir->setDependentInits(Exprs.DependentInits);
+ Dir->setFinalsConditions(Exprs.FinalsConditions);
+ Dir->setPreInits(Exprs.PreInits);
+ return Dir;
+}
+
+OMPParallelMaskedTaskLoopSimdDirective *
+OMPParallelMaskedTaskLoopSimdDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ unsigned CollapsedNum,
+ EmptyShell) {
+ return createEmptyDirective<OMPParallelMaskedTaskLoopSimdDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true,
+ numLoopChildren(CollapsedNum, OMPD_parallel_masked_taskloop_simd),
+ CollapsedNum);
+}
+
+OMPDistributeDirective *OMPDistributeDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
+ const HelperExprs &Exprs, OpenMPDirectiveKind ParamPrevMappedDirective) {
auto *Dir = createDirective<OMPDistributeDirective>(
C, Clauses, AssociatedStmt,
numLoopChildren(CollapsedNum, OMPD_distribute), StartLoc, EndLoc,
@@ -1268,6 +1563,7 @@ OMPDistributeDirective *OMPDistributeDirective::Create(
Dir->setDependentInits(Exprs.DependentInits);
Dir->setFinalsConditions(Exprs.FinalsConditions);
Dir->setPreInits(Exprs.PreInits);
+ Dir->setMappedDirective(ParamPrevMappedDirective);
return Dir;
}
@@ -2032,3 +2328,249 @@ OMPMaskedDirective *OMPMaskedDirective::CreateEmpty(const ASTContext &C,
return createEmptyDirective<OMPMaskedDirective>(C, NumClauses,
/*HasAssociatedStmt=*/true);
}
+
+OMPGenericLoopDirective *OMPGenericLoopDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
+ const HelperExprs &Exprs) {
+ auto *Dir = createDirective<OMPGenericLoopDirective>(
+ C, Clauses, AssociatedStmt, numLoopChildren(CollapsedNum, OMPD_loop),
+ StartLoc, EndLoc, CollapsedNum);
+ Dir->setIterationVariable(Exprs.IterationVarRef);
+ Dir->setLastIteration(Exprs.LastIteration);
+ Dir->setCalcLastIteration(Exprs.CalcLastIteration);
+ Dir->setPreCond(Exprs.PreCond);
+ Dir->setCond(Exprs.Cond);
+ Dir->setInit(Exprs.Init);
+ Dir->setInc(Exprs.Inc);
+ Dir->setIsLastIterVariable(Exprs.IL);
+ Dir->setLowerBoundVariable(Exprs.LB);
+ Dir->setUpperBoundVariable(Exprs.UB);
+ Dir->setStrideVariable(Exprs.ST);
+ Dir->setEnsureUpperBound(Exprs.EUB);
+ Dir->setNextLowerBound(Exprs.NLB);
+ Dir->setNextUpperBound(Exprs.NUB);
+ Dir->setNumIterations(Exprs.NumIterations);
+ Dir->setCounters(Exprs.Counters);
+ Dir->setPrivateCounters(Exprs.PrivateCounters);
+ Dir->setInits(Exprs.Inits);
+ Dir->setUpdates(Exprs.Updates);
+ Dir->setFinals(Exprs.Finals);
+ Dir->setDependentCounters(Exprs.DependentCounters);
+ Dir->setDependentInits(Exprs.DependentInits);
+ Dir->setFinalsConditions(Exprs.FinalsConditions);
+ Dir->setPreInits(Exprs.PreInits);
+ return Dir;
+}
+
+OMPGenericLoopDirective *
+OMPGenericLoopDirective::CreateEmpty(const ASTContext &C, unsigned NumClauses,
+ unsigned CollapsedNum, EmptyShell) {
+ return createEmptyDirective<OMPGenericLoopDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true,
+ numLoopChildren(CollapsedNum, OMPD_loop), CollapsedNum);
+}
+
+OMPTeamsGenericLoopDirective *OMPTeamsGenericLoopDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
+ const HelperExprs &Exprs) {
+ auto *Dir = createDirective<OMPTeamsGenericLoopDirective>(
+ C, Clauses, AssociatedStmt,
+ numLoopChildren(CollapsedNum, OMPD_teams_loop), StartLoc, EndLoc,
+ CollapsedNum);
+ Dir->setIterationVariable(Exprs.IterationVarRef);
+ Dir->setLastIteration(Exprs.LastIteration);
+ Dir->setCalcLastIteration(Exprs.CalcLastIteration);
+ Dir->setPreCond(Exprs.PreCond);
+ Dir->setCond(Exprs.Cond);
+ Dir->setInit(Exprs.Init);
+ Dir->setInc(Exprs.Inc);
+ Dir->setIsLastIterVariable(Exprs.IL);
+ Dir->setLowerBoundVariable(Exprs.LB);
+ Dir->setUpperBoundVariable(Exprs.UB);
+ Dir->setStrideVariable(Exprs.ST);
+ Dir->setEnsureUpperBound(Exprs.EUB);
+ Dir->setNextLowerBound(Exprs.NLB);
+ Dir->setNextUpperBound(Exprs.NUB);
+ Dir->setNumIterations(Exprs.NumIterations);
+ Dir->setPrevLowerBoundVariable(Exprs.PrevLB);
+ Dir->setPrevUpperBoundVariable(Exprs.PrevUB);
+ Dir->setDistInc(Exprs.DistInc);
+ Dir->setPrevEnsureUpperBound(Exprs.PrevEUB);
+ Dir->setCounters(Exprs.Counters);
+ Dir->setPrivateCounters(Exprs.PrivateCounters);
+ Dir->setInits(Exprs.Inits);
+ Dir->setUpdates(Exprs.Updates);
+ Dir->setFinals(Exprs.Finals);
+ Dir->setDependentCounters(Exprs.DependentCounters);
+ Dir->setDependentInits(Exprs.DependentInits);
+ Dir->setFinalsConditions(Exprs.FinalsConditions);
+ Dir->setPreInits(Exprs.PreInits);
+ Dir->setCombinedLowerBoundVariable(Exprs.DistCombinedFields.LB);
+ Dir->setCombinedUpperBoundVariable(Exprs.DistCombinedFields.UB);
+ Dir->setCombinedEnsureUpperBound(Exprs.DistCombinedFields.EUB);
+ Dir->setCombinedInit(Exprs.DistCombinedFields.Init);
+ Dir->setCombinedCond(Exprs.DistCombinedFields.Cond);
+ Dir->setCombinedNextLowerBound(Exprs.DistCombinedFields.NLB);
+ Dir->setCombinedNextUpperBound(Exprs.DistCombinedFields.NUB);
+ Dir->setCombinedDistCond(Exprs.DistCombinedFields.DistCond);
+ Dir->setCombinedParForInDistCond(Exprs.DistCombinedFields.ParForInDistCond);
+ return Dir;
+}
+
+OMPTeamsGenericLoopDirective *
+OMPTeamsGenericLoopDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ unsigned CollapsedNum, EmptyShell) {
+ return createEmptyDirective<OMPTeamsGenericLoopDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true,
+ numLoopChildren(CollapsedNum, OMPD_teams_loop), CollapsedNum);
+}
+
+OMPTargetTeamsGenericLoopDirective *OMPTargetTeamsGenericLoopDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
+ const HelperExprs &Exprs) {
+ auto *Dir = createDirective<OMPTargetTeamsGenericLoopDirective>(
+ C, Clauses, AssociatedStmt,
+ numLoopChildren(CollapsedNum, OMPD_target_teams_loop), StartLoc, EndLoc,
+ CollapsedNum);
+ Dir->setIterationVariable(Exprs.IterationVarRef);
+ Dir->setLastIteration(Exprs.LastIteration);
+ Dir->setCalcLastIteration(Exprs.CalcLastIteration);
+ Dir->setPreCond(Exprs.PreCond);
+ Dir->setCond(Exprs.Cond);
+ Dir->setInit(Exprs.Init);
+ Dir->setInc(Exprs.Inc);
+ Dir->setIsLastIterVariable(Exprs.IL);
+ Dir->setLowerBoundVariable(Exprs.LB);
+ Dir->setUpperBoundVariable(Exprs.UB);
+ Dir->setStrideVariable(Exprs.ST);
+ Dir->setEnsureUpperBound(Exprs.EUB);
+ Dir->setNextLowerBound(Exprs.NLB);
+ Dir->setNextUpperBound(Exprs.NUB);
+ Dir->setNumIterations(Exprs.NumIterations);
+ Dir->setPrevLowerBoundVariable(Exprs.PrevLB);
+ Dir->setPrevUpperBoundVariable(Exprs.PrevUB);
+ Dir->setDistInc(Exprs.DistInc);
+ Dir->setPrevEnsureUpperBound(Exprs.PrevEUB);
+ Dir->setCounters(Exprs.Counters);
+ Dir->setPrivateCounters(Exprs.PrivateCounters);
+ Dir->setInits(Exprs.Inits);
+ Dir->setUpdates(Exprs.Updates);
+ Dir->setFinals(Exprs.Finals);
+ Dir->setDependentCounters(Exprs.DependentCounters);
+ Dir->setDependentInits(Exprs.DependentInits);
+ Dir->setFinalsConditions(Exprs.FinalsConditions);
+ Dir->setPreInits(Exprs.PreInits);
+ Dir->setCombinedLowerBoundVariable(Exprs.DistCombinedFields.LB);
+ Dir->setCombinedUpperBoundVariable(Exprs.DistCombinedFields.UB);
+ Dir->setCombinedEnsureUpperBound(Exprs.DistCombinedFields.EUB);
+ Dir->setCombinedInit(Exprs.DistCombinedFields.Init);
+ Dir->setCombinedCond(Exprs.DistCombinedFields.Cond);
+ Dir->setCombinedNextLowerBound(Exprs.DistCombinedFields.NLB);
+ Dir->setCombinedNextUpperBound(Exprs.DistCombinedFields.NUB);
+ Dir->setCombinedDistCond(Exprs.DistCombinedFields.DistCond);
+ Dir->setCombinedParForInDistCond(Exprs.DistCombinedFields.ParForInDistCond);
+ return Dir;
+}
+
+OMPTargetTeamsGenericLoopDirective *
+OMPTargetTeamsGenericLoopDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ unsigned CollapsedNum,
+ EmptyShell) {
+ return createEmptyDirective<OMPTargetTeamsGenericLoopDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true,
+ numLoopChildren(CollapsedNum, OMPD_target_teams_loop), CollapsedNum);
+}
+
+OMPParallelGenericLoopDirective *OMPParallelGenericLoopDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
+ const HelperExprs &Exprs) {
+ auto *Dir = createDirective<OMPParallelGenericLoopDirective>(
+ C, Clauses, AssociatedStmt,
+ numLoopChildren(CollapsedNum, OMPD_parallel_loop), StartLoc, EndLoc,
+ CollapsedNum);
+ Dir->setIterationVariable(Exprs.IterationVarRef);
+ Dir->setLastIteration(Exprs.LastIteration);
+ Dir->setCalcLastIteration(Exprs.CalcLastIteration);
+ Dir->setPreCond(Exprs.PreCond);
+ Dir->setCond(Exprs.Cond);
+ Dir->setInit(Exprs.Init);
+ Dir->setInc(Exprs.Inc);
+ Dir->setIsLastIterVariable(Exprs.IL);
+ Dir->setLowerBoundVariable(Exprs.LB);
+ Dir->setUpperBoundVariable(Exprs.UB);
+ Dir->setStrideVariable(Exprs.ST);
+ Dir->setEnsureUpperBound(Exprs.EUB);
+ Dir->setNextLowerBound(Exprs.NLB);
+ Dir->setNextUpperBound(Exprs.NUB);
+ Dir->setNumIterations(Exprs.NumIterations);
+ Dir->setCounters(Exprs.Counters);
+ Dir->setPrivateCounters(Exprs.PrivateCounters);
+ Dir->setInits(Exprs.Inits);
+ Dir->setUpdates(Exprs.Updates);
+ Dir->setFinals(Exprs.Finals);
+ Dir->setDependentCounters(Exprs.DependentCounters);
+ Dir->setDependentInits(Exprs.DependentInits);
+ Dir->setFinalsConditions(Exprs.FinalsConditions);
+ Dir->setPreInits(Exprs.PreInits);
+ return Dir;
+}
+
+OMPParallelGenericLoopDirective *OMPParallelGenericLoopDirective::CreateEmpty(
+ const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
+ EmptyShell) {
+ return createEmptyDirective<OMPParallelGenericLoopDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true,
+ numLoopChildren(CollapsedNum, OMPD_parallel_loop), CollapsedNum);
+}
+
+OMPTargetParallelGenericLoopDirective *
+OMPTargetParallelGenericLoopDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
+ const HelperExprs &Exprs) {
+ auto *Dir = createDirective<OMPTargetParallelGenericLoopDirective>(
+ C, Clauses, AssociatedStmt,
+ numLoopChildren(CollapsedNum, OMPD_target_parallel_loop), StartLoc,
+ EndLoc, CollapsedNum);
+ Dir->setIterationVariable(Exprs.IterationVarRef);
+ Dir->setLastIteration(Exprs.LastIteration);
+ Dir->setCalcLastIteration(Exprs.CalcLastIteration);
+ Dir->setPreCond(Exprs.PreCond);
+ Dir->setCond(Exprs.Cond);
+ Dir->setInit(Exprs.Init);
+ Dir->setInc(Exprs.Inc);
+ Dir->setIsLastIterVariable(Exprs.IL);
+ Dir->setLowerBoundVariable(Exprs.LB);
+ Dir->setUpperBoundVariable(Exprs.UB);
+ Dir->setStrideVariable(Exprs.ST);
+ Dir->setEnsureUpperBound(Exprs.EUB);
+ Dir->setNextLowerBound(Exprs.NLB);
+ Dir->setNextUpperBound(Exprs.NUB);
+ Dir->setNumIterations(Exprs.NumIterations);
+ Dir->setCounters(Exprs.Counters);
+ Dir->setPrivateCounters(Exprs.PrivateCounters);
+ Dir->setInits(Exprs.Inits);
+ Dir->setUpdates(Exprs.Updates);
+ Dir->setFinals(Exprs.Finals);
+ Dir->setDependentCounters(Exprs.DependentCounters);
+ Dir->setDependentInits(Exprs.DependentInits);
+ Dir->setFinalsConditions(Exprs.FinalsConditions);
+ Dir->setPreInits(Exprs.PreInits);
+ return Dir;
+}
+
+OMPTargetParallelGenericLoopDirective *
+OMPTargetParallelGenericLoopDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ unsigned CollapsedNum,
+ EmptyShell) {
+ return createEmptyDirective<OMPTargetParallelGenericLoopDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true,
+ numLoopChildren(CollapsedNum, OMPD_target_parallel_loop), CollapsedNum);
+}
diff --git a/contrib/llvm-project/clang/lib/AST/StmtPrinter.cpp b/contrib/llvm-project/clang/lib/AST/StmtPrinter.cpp
index 45b15171aa97..9d4aa07ec4da 100644
--- a/contrib/llvm-project/clang/lib/AST/StmtPrinter.cpp
+++ b/contrib/llvm-project/clang/lib/AST/StmtPrinter.cpp
@@ -54,6 +54,7 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
+#include <optional>
#include <string>
using namespace clang;
@@ -128,6 +129,7 @@ namespace {
void PrintRawSEHFinallyStmt(SEHFinallyStmt *S);
void PrintOMPExecutableDirective(OMPExecutableDirective *S,
bool ForceNoStmt = false);
+ void PrintFPPragmas(CompoundStmt *S);
void PrintExpr(Expr *E) {
if (E)
@@ -173,13 +175,75 @@ namespace {
/// PrintRawCompoundStmt - Print a compound stmt without indenting the {, and
/// with no newline after the }.
void StmtPrinter::PrintRawCompoundStmt(CompoundStmt *Node) {
+ assert(Node && "Compound statement cannot be null");
OS << "{" << NL;
+ PrintFPPragmas(Node);
for (auto *I : Node->body())
PrintStmt(I);
Indent() << "}";
}
+void StmtPrinter::PrintFPPragmas(CompoundStmt *S) {
+ if (!S->hasStoredFPFeatures())
+ return;
+ FPOptionsOverride FPO = S->getStoredFPFeatures();
+ bool FEnvAccess = false;
+ if (FPO.hasAllowFEnvAccessOverride()) {
+ FEnvAccess = FPO.getAllowFEnvAccessOverride();
+ Indent() << "#pragma STDC FENV_ACCESS " << (FEnvAccess ? "ON" : "OFF")
+ << NL;
+ }
+ if (FPO.hasSpecifiedExceptionModeOverride()) {
+ LangOptions::FPExceptionModeKind EM =
+ FPO.getSpecifiedExceptionModeOverride();
+ if (!FEnvAccess || EM != LangOptions::FPE_Strict) {
+ Indent() << "#pragma clang fp exceptions(";
+ switch (FPO.getSpecifiedExceptionModeOverride()) {
+ default:
+ break;
+ case LangOptions::FPE_Ignore:
+ OS << "ignore";
+ break;
+ case LangOptions::FPE_MayTrap:
+ OS << "maytrap";
+ break;
+ case LangOptions::FPE_Strict:
+ OS << "strict";
+ break;
+ }
+ OS << ")\n";
+ }
+ }
+ if (FPO.hasConstRoundingModeOverride()) {
+ LangOptions::RoundingMode RM = FPO.getConstRoundingModeOverride();
+ Indent() << "#pragma STDC FENV_ROUND ";
+ switch (RM) {
+ case llvm::RoundingMode::TowardZero:
+ OS << "FE_TOWARDZERO";
+ break;
+ case llvm::RoundingMode::NearestTiesToEven:
+ OS << "FE_TONEAREST";
+ break;
+ case llvm::RoundingMode::TowardPositive:
+ OS << "FE_UPWARD";
+ break;
+ case llvm::RoundingMode::TowardNegative:
+ OS << "FE_DOWNWARD";
+ break;
+ case llvm::RoundingMode::NearestTiesToAway:
+ OS << "FE_TONEARESTFROMZERO";
+ break;
+ case llvm::RoundingMode::Dynamic:
+ OS << "FE_DYNAMIC";
+ break;
+ default:
+ llvm_unreachable("Invalid rounding mode");
+ }
+ OS << NL;
+ }
+}
+
void StmtPrinter::PrintRawDecl(Decl *D) {
D->print(OS, Policy, IndentLevel);
}
@@ -236,6 +300,22 @@ void StmtPrinter::VisitAttributedStmt(AttributedStmt *Node) {
}
void StmtPrinter::PrintRawIfStmt(IfStmt *If) {
+ if (If->isConsteval()) {
+ OS << "if ";
+ if (If->isNegatedConsteval())
+ OS << "!";
+ OS << "consteval";
+ OS << NL;
+ PrintStmt(If->getThen());
+ if (Stmt *Else = If->getElse()) {
+ Indent();
+ OS << "else";
+ PrintStmt(Else);
+ OS << NL;
+ }
+ return;
+ }
+
OS << "if (";
if (If->getInit())
PrintInitStmt(If->getInit(), 4);
@@ -321,7 +401,9 @@ void StmtPrinter::VisitForStmt(ForStmt *Node) {
PrintInitStmt(Node->getInit(), 5);
else
OS << (Node->getCond() ? "; " : ";");
- if (Node->getCond())
+ if (const DeclStmt *DS = Node->getConditionVariableDeclStmt())
+ PrintRawDeclStmt(DS);
+ else if (Node->getCond())
PrintExpr(Node->getCond());
OS << ";";
if (Node->getInc()) {
@@ -505,13 +587,10 @@ void StmtPrinter::VisitObjCAtTryStmt(ObjCAtTryStmt *Node) {
OS << NL;
}
- for (unsigned I = 0, N = Node->getNumCatchStmts(); I != N; ++I) {
- ObjCAtCatchStmt *catchStmt = Node->getCatchStmt(I);
+ for (ObjCAtCatchStmt *catchStmt : Node->catch_stmts()) {
Indent() << "@catch(";
- if (catchStmt->getCatchParamDecl()) {
- if (Decl *DS = catchStmt->getCatchParamDecl())
- PrintRawDecl(DS);
- }
+ if (Decl *DS = catchStmt->getCatchParamDecl())
+ PrintRawDecl(DS);
OS << ")";
if (auto *CS = dyn_cast<CompoundStmt>(catchStmt->getCatchBody())) {
PrintRawCompoundStmt(CS);
@@ -521,8 +600,10 @@ void StmtPrinter::VisitObjCAtTryStmt(ObjCAtTryStmt *Node) {
if (auto *FS = static_cast<ObjCAtFinallyStmt *>(Node->getFinallyStmt())) {
Indent() << "@finally";
- PrintRawCompoundStmt(dyn_cast<CompoundStmt>(FS->getFinallyBody()));
- OS << NL;
+ if (auto *CS = dyn_cast<CompoundStmt>(FS->getFinallyBody())) {
+ PrintRawCompoundStmt(CS);
+ OS << NL;
+ }
}
}
@@ -557,7 +638,7 @@ void StmtPrinter::VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *Node) {
void StmtPrinter::VisitObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *Node) {
Indent() << "@autoreleasepool";
- PrintRawCompoundStmt(dyn_cast<CompoundStmt>(Node->getSubStmt()));
+ PrintRawCompoundStmt(cast<CompoundStmt>(Node->getSubStmt()));
OS << NL;
}
@@ -654,6 +735,11 @@ void StmtPrinter::PrintOMPExecutableDirective(OMPExecutableDirective *S,
PrintStmt(S->getRawStmt());
}
+void StmtPrinter::VisitOMPMetaDirective(OMPMetaDirective *Node) {
+ Indent() << "#pragma omp metadirective";
+ PrintOMPExecutableDirective(Node);
+}
+
void StmtPrinter::VisitOMPParallelDirective(OMPParallelDirective *Node) {
Indent() << "#pragma omp parallel";
PrintOMPExecutableDirective(Node);
@@ -694,6 +780,11 @@ void StmtPrinter::VisitOMPSectionDirective(OMPSectionDirective *Node) {
PrintOMPExecutableDirective(Node);
}
+void StmtPrinter::VisitOMPScopeDirective(OMPScopeDirective *Node) {
+ Indent() << "#pragma omp scope";
+ PrintOMPExecutableDirective(Node);
+}
+
void StmtPrinter::VisitOMPSingleDirective(OMPSingleDirective *Node) {
Indent() << "#pragma omp single";
PrintOMPExecutableDirective(Node);
@@ -731,6 +822,12 @@ void StmtPrinter::VisitOMPParallelMasterDirective(
PrintOMPExecutableDirective(Node);
}
+void StmtPrinter::VisitOMPParallelMaskedDirective(
+ OMPParallelMaskedDirective *Node) {
+ Indent() << "#pragma omp parallel masked";
+ PrintOMPExecutableDirective(Node);
+}
+
void StmtPrinter::VisitOMPParallelSectionsDirective(
OMPParallelSectionsDirective *Node) {
Indent() << "#pragma omp parallel sections";
@@ -757,6 +854,11 @@ void StmtPrinter::VisitOMPTaskwaitDirective(OMPTaskwaitDirective *Node) {
PrintOMPExecutableDirective(Node);
}
+void StmtPrinter::VisitOMPErrorDirective(OMPErrorDirective *Node) {
+ Indent() << "#pragma omp error";
+ PrintOMPExecutableDirective(Node);
+}
+
void StmtPrinter::VisitOMPTaskgroupDirective(OMPTaskgroupDirective *Node) {
Indent() << "#pragma omp taskgroup";
PrintOMPExecutableDirective(Node);
@@ -856,24 +958,48 @@ void StmtPrinter::VisitOMPMasterTaskLoopDirective(
PrintOMPExecutableDirective(Node);
}
+void StmtPrinter::VisitOMPMaskedTaskLoopDirective(
+ OMPMaskedTaskLoopDirective *Node) {
+ Indent() << "#pragma omp masked taskloop";
+ PrintOMPExecutableDirective(Node);
+}
+
void StmtPrinter::VisitOMPMasterTaskLoopSimdDirective(
OMPMasterTaskLoopSimdDirective *Node) {
Indent() << "#pragma omp master taskloop simd";
PrintOMPExecutableDirective(Node);
}
+void StmtPrinter::VisitOMPMaskedTaskLoopSimdDirective(
+ OMPMaskedTaskLoopSimdDirective *Node) {
+ Indent() << "#pragma omp masked taskloop simd";
+ PrintOMPExecutableDirective(Node);
+}
+
void StmtPrinter::VisitOMPParallelMasterTaskLoopDirective(
OMPParallelMasterTaskLoopDirective *Node) {
Indent() << "#pragma omp parallel master taskloop";
PrintOMPExecutableDirective(Node);
}
+void StmtPrinter::VisitOMPParallelMaskedTaskLoopDirective(
+ OMPParallelMaskedTaskLoopDirective *Node) {
+ Indent() << "#pragma omp parallel masked taskloop";
+ PrintOMPExecutableDirective(Node);
+}
+
void StmtPrinter::VisitOMPParallelMasterTaskLoopSimdDirective(
OMPParallelMasterTaskLoopSimdDirective *Node) {
Indent() << "#pragma omp parallel master taskloop simd";
PrintOMPExecutableDirective(Node);
}
+void StmtPrinter::VisitOMPParallelMaskedTaskLoopSimdDirective(
+ OMPParallelMaskedTaskLoopSimdDirective *Node) {
+ Indent() << "#pragma omp parallel masked taskloop simd";
+ PrintOMPExecutableDirective(Node);
+}
+
void StmtPrinter::VisitOMPDistributeDirective(OMPDistributeDirective *Node) {
Indent() << "#pragma omp distribute";
PrintOMPExecutableDirective(Node);
@@ -982,6 +1108,35 @@ void StmtPrinter::VisitOMPMaskedDirective(OMPMaskedDirective *Node) {
PrintOMPExecutableDirective(Node);
}
+void StmtPrinter::VisitOMPGenericLoopDirective(OMPGenericLoopDirective *Node) {
+ Indent() << "#pragma omp loop";
+ PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPTeamsGenericLoopDirective(
+ OMPTeamsGenericLoopDirective *Node) {
+ Indent() << "#pragma omp teams loop";
+ PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPTargetTeamsGenericLoopDirective(
+ OMPTargetTeamsGenericLoopDirective *Node) {
+ Indent() << "#pragma omp target teams loop";
+ PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPParallelGenericLoopDirective(
+ OMPParallelGenericLoopDirective *Node) {
+ Indent() << "#pragma omp parallel loop";
+ PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPTargetParallelGenericLoopDirective(
+ OMPTargetParallelGenericLoopDirective *Node) {
+ Indent() << "#pragma omp target parallel loop";
+ PrintOMPExecutableDirective(Node);
+}
+
//===----------------------------------------------------------------------===//
// Expr printing methods.
//===----------------------------------------------------------------------===//
@@ -1000,14 +1155,19 @@ void StmtPrinter::VisitDeclRefExpr(DeclRefExpr *Node) {
return;
}
if (const auto *TPOD = dyn_cast<TemplateParamObjectDecl>(Node->getDecl())) {
- TPOD->printAsExpr(OS);
+ TPOD->printAsExpr(OS, Policy);
return;
}
if (NestedNameSpecifier *Qualifier = Node->getQualifier())
Qualifier->print(OS, Policy);
if (Node->hasTemplateKeyword())
OS << "template ";
- OS << Node->getNameInfo();
+ if (Policy.CleanUglifiedParameters &&
+ isa<ParmVarDecl, NonTypeTemplateParmDecl>(Node->getDecl()) &&
+ Node->getDecl()->getIdentifier())
+ OS << Node->getDecl()->getIdentifier()->deuglifiedName();
+ else
+ Node->getNameInfo().printName(OS, Policy);
if (Node->hasExplicitTemplateArgs()) {
const TemplateParameterList *TPL = nullptr;
if (!Node->hadMultipleCandidates())
@@ -1041,7 +1201,7 @@ void StmtPrinter::VisitUnresolvedLookupExpr(UnresolvedLookupExpr *Node) {
static bool isImplicitSelf(const Expr *E) {
if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
if (const auto *PD = dyn_cast<ImplicitParamDecl>(DRE->getDecl())) {
- if (PD->getParameterKind() == ImplicitParamDecl::ObjCSelf &&
+ if (PD->getParameterKind() == ImplicitParamKind::ObjCSelf &&
DRE->getBeginLoc().isInvalid())
return true;
}
@@ -1125,12 +1285,18 @@ void StmtPrinter::VisitIntegerLiteral(IntegerLiteral *Node) {
bool isSigned = Node->getType()->isSignedIntegerType();
OS << toString(Node->getValue(), 10, isSigned);
+ if (isa<BitIntType>(Node->getType())) {
+ OS << (isSigned ? "wb" : "uwb");
+ return;
+ }
+
// Emit suffixes. Integer literals are always a builtin integer type.
switch (Node->getType()->castAs<BuiltinType>()->getKind()) {
default: llvm_unreachable("Unexpected type for integer literal!");
case BuiltinType::Char_S:
case BuiltinType::Char_U: OS << "i8"; break;
case BuiltinType::UChar: OS << "Ui8"; break;
+ case BuiltinType::SChar: OS << "i8"; break;
case BuiltinType::Short: OS << "i16"; break;
case BuiltinType::UShort: OS << "Ui16"; break;
case BuiltinType::Int: break; // no suffix.
@@ -1143,6 +1309,9 @@ void StmtPrinter::VisitIntegerLiteral(IntegerLiteral *Node) {
break; // no suffix.
case BuiltinType::UInt128:
break; // no suffix.
+ case BuiltinType::WChar_S:
+ case BuiltinType::WChar_U:
+ break; // no suffix
}
}
@@ -1183,6 +1352,7 @@ static void PrintFloatingLiteral(raw_ostream &OS, FloatingLiteral *Node,
switch (Node->getType()->castAs<BuiltinType>()->getKind()) {
default: llvm_unreachable("Unexpected type for float literal!");
case BuiltinType::Half: break; // FIXME: suffix?
+ case BuiltinType::Ibm128: break; // FIXME: No suffix for ibm128 literal
case BuiltinType::Double: break; // no suffix.
case BuiltinType::Float16: OS << "F16"; break;
case BuiltinType::Float: OS << 'F'; break;
@@ -1298,8 +1468,12 @@ void StmtPrinter::VisitUnaryExprOrTypeTraitExpr(
void StmtPrinter::VisitGenericSelectionExpr(GenericSelectionExpr *Node) {
OS << "_Generic(";
- PrintExpr(Node->getControllingExpr());
- for (const GenericSelectionExpr::Association Assoc : Node->associations()) {
+ if (Node->isExprPredicate())
+ PrintExpr(Node->getControllingExpr());
+ else
+ Node->getControllingType()->getType().print(OS, Policy);
+
+ for (const GenericSelectionExpr::Association &Assoc : Node->associations()) {
OS << ", ";
QualType T = Assoc.getType();
if (T.isNull())
@@ -1580,7 +1754,7 @@ void StmtPrinter::VisitDesignatedInitExpr(DesignatedInitExpr *Node) {
for (const DesignatedInitExpr::Designator &D : Node->designators()) {
if (D.isFieldDesignator()) {
if (D.getDotLoc().isInvalid()) {
- if (IdentifierInfo *II = D.getFieldName()) {
+ if (const IdentifierInfo *II = D.getFieldName()) {
OS << II->getName() << ":";
NeedsEquals = false;
}
@@ -1667,7 +1841,9 @@ void StmtPrinter::VisitAtomicExpr(AtomicExpr *Node) {
PrintExpr(Node->getPtr());
if (Node->getOp() != AtomicExpr::AO__c11_atomic_load &&
Node->getOp() != AtomicExpr::AO__atomic_load_n &&
- Node->getOp() != AtomicExpr::AO__opencl_atomic_load) {
+ Node->getOp() != AtomicExpr::AO__scoped_atomic_load_n &&
+ Node->getOp() != AtomicExpr::AO__opencl_atomic_load &&
+ Node->getOp() != AtomicExpr::AO__hip_atomic_load) {
OS << ", ";
PrintExpr(Node->getVal1());
}
@@ -1706,21 +1882,16 @@ void StmtPrinter::VisitCXXOperatorCallExpr(CXXOperatorCallExpr *Node) {
}
} else if (Kind == OO_Arrow) {
PrintExpr(Node->getArg(0));
- } else if (Kind == OO_Call) {
+ } else if (Kind == OO_Call || Kind == OO_Subscript) {
PrintExpr(Node->getArg(0));
- OS << '(';
+ OS << (Kind == OO_Call ? '(' : '[');
for (unsigned ArgIdx = 1; ArgIdx < Node->getNumArgs(); ++ArgIdx) {
if (ArgIdx > 1)
OS << ", ";
if (!isa<CXXDefaultArgExpr>(Node->getArg(ArgIdx)))
PrintExpr(Node->getArg(ArgIdx));
}
- OS << ')';
- } else if (Kind == OO_Subscript) {
- PrintExpr(Node->getArg(0));
- OS << '[';
- PrintExpr(Node->getArg(1));
- OS << ']';
+ OS << (Kind == OO_Call ? ')' : ']');
} else if (Node->getNumArgs() == 1) {
OS << getOperatorSpelling(Kind) << ' ';
PrintExpr(Node->getArg(0));
@@ -1847,7 +2018,7 @@ void StmtPrinter::VisitUserDefinedLiteral(UserDefinedLiteral *Node) {
cast<FunctionDecl>(DRE->getDecl())->getTemplateSpecializationArgs();
assert(Args);
- if (Args->size() != 1) {
+ if (Args->size() != 1 || Args->get(0).getKind() != TemplateArgument::Pack) {
const TemplateParameterList *TPL = nullptr;
if (!DRE->hadMultipleCandidates())
if (const auto *TD = dyn_cast<TemplateDecl>(DRE->getDecl()))
@@ -1915,14 +2086,23 @@ void StmtPrinter::VisitCXXDefaultInitExpr(CXXDefaultInitExpr *Node) {
}
void StmtPrinter::VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *Node) {
- Node->getType().print(OS, Policy);
- // If there are no parens, this is list-initialization, and the braces are
- // part of the syntax of the inner construct.
- if (Node->getLParenLoc().isValid())
- OS << "(";
+ auto TargetType = Node->getType();
+ auto *Auto = TargetType->getContainedDeducedType();
+ bool Bare = Auto && Auto->isDeduced();
+
+ // Parenthesize deduced casts.
+ if (Bare)
+ OS << '(';
+ TargetType.print(OS, Policy);
+ if (Bare)
+ OS << ')';
+
+ // No extra braces surrounding the inner construct.
+ if (!Node->isListInitialization())
+ OS << '(';
PrintExpr(Node->getSubExpr());
- if (Node->getLParenLoc().isValid())
- OS << ")";
+ if (!Node->isListInitialization())
+ OS << ')';
}
void StmtPrinter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *Node) {
@@ -2009,7 +2189,8 @@ void StmtPrinter::VisitLambdaExpr(LambdaExpr *Node) {
OS << "...";
if (Node->isInitCapture(C)) {
- VarDecl *D = C->getCapturedVar();
+ // Init captures are always VarDecl.
+ auto *D = cast<VarDecl>(C->getCapturedVar());
llvm::StringRef Pre;
llvm::StringRef Post;
@@ -2044,7 +2225,10 @@ void StmtPrinter::VisitLambdaExpr(LambdaExpr *Node) {
} else {
NeedComma = true;
}
- std::string ParamStr = P->getNameAsString();
+ std::string ParamStr =
+ (Policy.CleanUglifiedParameters && P->getIdentifier())
+ ? P->getIdentifier()->deuglifiedName().str()
+ : P->getNameAsString();
P->getOriginalType().print(OS, Policy, ParamStr);
}
if (Method->isVariadic()) {
@@ -2104,10 +2288,10 @@ void StmtPrinter::VisitCXXNewExpr(CXXNewExpr *E) {
if (E->isParenTypeId())
OS << "(";
std::string TypeS;
- if (Optional<Expr *> Size = E->getArraySize()) {
+ if (E->isArray()) {
llvm::raw_string_ostream s(TypeS);
s << '[';
- if (*Size)
+ if (std::optional<Expr *> Size = E->getArraySize())
(*Size)->printPretty(s, Helper, Policy);
s << ']';
}
@@ -2115,12 +2299,14 @@ void StmtPrinter::VisitCXXNewExpr(CXXNewExpr *E) {
if (E->isParenTypeId())
OS << ")";
- CXXNewExpr::InitializationStyle InitStyle = E->getInitializationStyle();
- if (InitStyle) {
- if (InitStyle == CXXNewExpr::CallInit)
+ CXXNewInitializationStyle InitStyle = E->getInitializationStyle();
+ if (InitStyle != CXXNewInitializationStyle::None) {
+ bool Bare = InitStyle == CXXNewInitializationStyle::Parens &&
+ !isa<ParenListExpr>(E->getInitializer());
+ if (Bare)
OS << "(";
PrintExpr(E->getInitializer());
- if (InitStyle == CXXNewExpr::CallInit)
+ if (Bare)
OS << ")";
}
}
@@ -2182,19 +2368,19 @@ void StmtPrinter::VisitExprWithCleanups(ExprWithCleanups *E) {
PrintExpr(E->getSubExpr());
}
-void
-StmtPrinter::VisitCXXUnresolvedConstructExpr(
- CXXUnresolvedConstructExpr *Node) {
+void StmtPrinter::VisitCXXUnresolvedConstructExpr(
+ CXXUnresolvedConstructExpr *Node) {
Node->getTypeAsWritten().print(OS, Policy);
- OS << "(";
- for (CXXUnresolvedConstructExpr::arg_iterator Arg = Node->arg_begin(),
- ArgEnd = Node->arg_end();
- Arg != ArgEnd; ++Arg) {
+ if (!Node->isListInitialization())
+ OS << '(';
+ for (auto Arg = Node->arg_begin(), ArgEnd = Node->arg_end(); Arg != ArgEnd;
+ ++Arg) {
if (Arg != Node->arg_begin())
OS << ", ";
PrintExpr(*Arg);
}
- OS << ")";
+ if (!Node->isListInitialization())
+ OS << ')';
}
void StmtPrinter::VisitCXXDependentScopeMemberExpr(
@@ -2295,6 +2481,13 @@ void StmtPrinter::VisitCXXFoldExpr(CXXFoldExpr *E) {
OS << ")";
}
+void StmtPrinter::VisitCXXParenListInitExpr(CXXParenListInitExpr *Node) {
+ OS << "(";
+ llvm::interleaveComma(Node->getInitExprs(), OS,
+ [&](Expr *E) { PrintExpr(E); });
+ OS << ")";
+}
+
void StmtPrinter::VisitConceptSpecializationExpr(ConceptSpecializationExpr *E) {
NestedNameSpecifierLoc NNS = E->getNestedNameSpecifierLoc();
if (NNS)
@@ -2351,7 +2544,7 @@ void StmtPrinter::VisitRequiresExpr(RequiresExpr *E) {
} else {
auto *NestedReq = cast<concepts::NestedRequirement>(Req);
OS << "requires ";
- if (NestedReq->isSubstitutionFailure())
+ if (NestedReq->hasInvalidConstraint())
OS << "<<error-expression>>";
else
PrintExpr(NestedReq->getConstraintExpr());
@@ -2361,7 +2554,7 @@ void StmtPrinter::VisitRequiresExpr(RequiresExpr *E) {
OS << "}";
}
-// C++ Coroutines TS
+// C++ Coroutines
void StmtPrinter::VisitCoroutineBodyStmt(CoroutineBodyStmt *S) {
Visit(S->getBody());
@@ -2571,6 +2764,14 @@ void Stmt::printPretty(raw_ostream &Out, PrinterHelper *Helper,
P.Visit(const_cast<Stmt *>(this));
}
+void Stmt::printPrettyControlled(raw_ostream &Out, PrinterHelper *Helper,
+ const PrintingPolicy &Policy,
+ unsigned Indentation, StringRef NL,
+ const ASTContext *Context) const {
+ StmtPrinter P(Out, Helper, Policy, Indentation, NL, Context);
+ P.PrintControlledStmt(const_cast<Stmt *>(this));
+}
+
void Stmt::printJson(raw_ostream &Out, PrinterHelper *Helper,
const PrintingPolicy &Policy, bool AddQuotes) const {
std::string Buf;
diff --git a/contrib/llvm-project/clang/lib/AST/StmtProfile.cpp b/contrib/llvm-project/clang/lib/AST/StmtProfile.cpp
index ed000c2467fa..dd0838edab7b 100644
--- a/contrib/llvm-project/clang/lib/AST/StmtProfile.cpp
+++ b/contrib/llvm-project/clang/lib/AST/StmtProfile.cpp
@@ -29,15 +29,21 @@ namespace {
protected:
llvm::FoldingSetNodeID &ID;
bool Canonical;
+ bool ProfileLambdaExpr;
public:
- StmtProfiler(llvm::FoldingSetNodeID &ID, bool Canonical)
- : ID(ID), Canonical(Canonical) {}
+ StmtProfiler(llvm::FoldingSetNodeID &ID, bool Canonical,
+ bool ProfileLambdaExpr)
+ : ID(ID), Canonical(Canonical), ProfileLambdaExpr(ProfileLambdaExpr) {}
virtual ~StmtProfiler() {}
void VisitStmt(const Stmt *S);
+ void VisitStmtNoChildren(const Stmt *S) {
+ HandleStmtClass(S->getStmtClass());
+ }
+
virtual void HandleStmtClass(Stmt::StmtClass SC) = 0;
#define STMT(Node, Base) void Visit##Node(const Node *S);
@@ -79,8 +85,10 @@ namespace {
public:
StmtProfilerWithPointers(llvm::FoldingSetNodeID &ID,
- const ASTContext &Context, bool Canonical)
- : StmtProfiler(ID, Canonical), Context(Context) {}
+ const ASTContext &Context, bool Canonical,
+ bool ProfileLambdaExpr)
+ : StmtProfiler(ID, Canonical, ProfileLambdaExpr), Context(Context) {}
+
private:
void HandleStmtClass(Stmt::StmtClass SC) override {
ID.AddInteger(SC);
@@ -95,7 +103,15 @@ namespace {
ID.AddInteger(NTTP->getDepth());
ID.AddInteger(NTTP->getIndex());
ID.AddBoolean(NTTP->isParameterPack());
- VisitType(NTTP->getType());
+ // C++20 [temp.over.link]p6:
+ // Two template-parameters are equivalent under the following
+ // conditions: [...] if they declare non-type template parameters,
+ // they have equivalent types ignoring the use of type-constraints
+ // for placeholder types
+ //
+ // TODO: Why do we need to include the type in the profile? It's not
+ // part of the mangling.
+ VisitType(Context.getUnconstrainedType(NTTP->getType()));
return;
}
@@ -107,6 +123,9 @@ namespace {
// definition of "equivalent" (per C++ [temp.over.link]) is at
// least as strong as the definition of "equivalent" used for
// name mangling.
+ //
+ // TODO: The Itanium C++ ABI only uses the top-level cv-qualifiers,
+ // not the entirety of the type.
VisitType(Parm->getType());
ID.AddInteger(Parm->getFunctionScopeDepth());
ID.AddInteger(Parm->getFunctionScopeIndex());
@@ -166,7 +185,8 @@ namespace {
ODRHash &Hash;
public:
StmtProfilerWithoutPointers(llvm::FoldingSetNodeID &ID, ODRHash &Hash)
- : StmtProfiler(ID, false), Hash(Hash) {}
+ : StmtProfiler(ID, /*Canonical=*/false, /*ProfileLambdaExpr=*/false),
+ Hash(Hash) {}
private:
void HandleStmtClass(Stmt::StmtClass SC) override {
@@ -218,7 +238,7 @@ namespace {
void StmtProfiler::VisitStmt(const Stmt *S) {
assert(S && "Requires non-null Stmt pointer");
- HandleStmtClass(S->getStmtClass());
+ VisitStmtNoChildren(S);
for (const Stmt *SubStmt : S->children()) {
if (SubStmt)
@@ -452,6 +472,11 @@ void OMPClauseProfiler::VisitOMPNumThreadsClause(const OMPNumThreadsClause *C) {
Profiler->VisitStmt(C->getNumThreads());
}
+void OMPClauseProfiler::VisitOMPAlignClause(const OMPAlignClause *C) {
+ if (C->getAlignment())
+ Profiler->VisitStmt(C->getAlignment());
+}
+
void OMPClauseProfiler::VisitOMPSafelenClause(const OMPSafelenClause *C) {
if (C->getSafelen())
Profiler->VisitStmt(C->getSafelen());
@@ -463,7 +488,7 @@ void OMPClauseProfiler::VisitOMPSimdlenClause(const OMPSimdlenClause *C) {
}
void OMPClauseProfiler::VisitOMPSizesClause(const OMPSizesClause *C) {
- for (auto E : C->getSizesRefs())
+ for (auto *E : C->getSizesRefs())
if (E)
Profiler->VisitExpr(E);
}
@@ -521,6 +546,15 @@ void OMPClauseProfiler::VisitOMPDynamicAllocatorsClause(
void OMPClauseProfiler::VisitOMPAtomicDefaultMemOrderClause(
const OMPAtomicDefaultMemOrderClause *C) {}
+void OMPClauseProfiler::VisitOMPAtClause(const OMPAtClause *C) {}
+
+void OMPClauseProfiler::VisitOMPSeverityClause(const OMPSeverityClause *C) {}
+
+void OMPClauseProfiler::VisitOMPMessageClause(const OMPMessageClause *C) {
+ if (C->getMessageString())
+ Profiler->VisitStmt(C->getMessageString());
+}
+
void OMPClauseProfiler::VisitOMPScheduleClause(const OMPScheduleClause *C) {
VistOMPClauseWithPreInit(C);
if (auto *S = C->getChunkSize())
@@ -546,6 +580,10 @@ void OMPClauseProfiler::VisitOMPUpdateClause(const OMPUpdateClause *) {}
void OMPClauseProfiler::VisitOMPCaptureClause(const OMPCaptureClause *) {}
+void OMPClauseProfiler::VisitOMPCompareClause(const OMPCompareClause *) {}
+
+void OMPClauseProfiler::VisitOMPFailClause(const OMPFailClause *) {}
+
void OMPClauseProfiler::VisitOMPSeqCstClause(const OMPSeqCstClause *) {}
void OMPClauseProfiler::VisitOMPAcqRelClause(const OMPAcqRelClause *) {}
@@ -850,6 +888,10 @@ void OMPClauseProfiler::VisitOMPIsDevicePtrClause(
const OMPIsDevicePtrClause *C) {
VisitOMPClauseList(C);
}
+void OMPClauseProfiler::VisitOMPHasDeviceAddrClause(
+ const OMPHasDeviceAddrClause *C) {
+ VisitOMPClauseList(C);
+}
void OMPClauseProfiler::VisitOMPNontemporalClause(
const OMPNontemporalClause *C) {
VisitOMPClauseList(C);
@@ -878,6 +920,19 @@ void OMPClauseProfiler::VisitOMPAffinityClause(const OMPAffinityClause *C) {
Profiler->VisitStmt(E);
}
void OMPClauseProfiler::VisitOMPOrderClause(const OMPOrderClause *C) {}
+void OMPClauseProfiler::VisitOMPBindClause(const OMPBindClause *C) {}
+void OMPClauseProfiler::VisitOMPXDynCGroupMemClause(
+ const OMPXDynCGroupMemClause *C) {
+ VistOMPClauseWithPreInit(C);
+ if (Expr *Size = C->getSize())
+ Profiler->VisitStmt(Size);
+}
+void OMPClauseProfiler::VisitOMPDoacrossClause(const OMPDoacrossClause *C) {
+ VisitOMPClauseList(C);
+}
+void OMPClauseProfiler::VisitOMPXAttributeClause(const OMPXAttributeClause *C) {
+}
+void OMPClauseProfiler::VisitOMPXBareClause(const OMPXBareClause *C) {}
} // namespace
void
@@ -903,6 +958,10 @@ void StmtProfiler::VisitOMPLoopDirective(const OMPLoopDirective *S) {
VisitOMPLoopBasedDirective(S);
}
+void StmtProfiler::VisitOMPMetaDirective(const OMPMetaDirective *S) {
+ VisitOMPExecutableDirective(S);
+}
+
void StmtProfiler::VisitOMPParallelDirective(const OMPParallelDirective *S) {
VisitOMPExecutableDirective(S);
}
@@ -911,12 +970,17 @@ void StmtProfiler::VisitOMPSimdDirective(const OMPSimdDirective *S) {
VisitOMPLoopDirective(S);
}
-void StmtProfiler::VisitOMPTileDirective(const OMPTileDirective *S) {
+void StmtProfiler::VisitOMPLoopTransformationDirective(
+ const OMPLoopTransformationDirective *S) {
VisitOMPLoopBasedDirective(S);
}
+void StmtProfiler::VisitOMPTileDirective(const OMPTileDirective *S) {
+ VisitOMPLoopTransformationDirective(S);
+}
+
void StmtProfiler::VisitOMPUnrollDirective(const OMPUnrollDirective *S) {
- VisitOMPLoopBasedDirective(S);
+ VisitOMPLoopTransformationDirective(S);
}
void StmtProfiler::VisitOMPForDirective(const OMPForDirective *S) {
@@ -935,6 +999,10 @@ void StmtProfiler::VisitOMPSectionDirective(const OMPSectionDirective *S) {
VisitOMPExecutableDirective(S);
}
+void StmtProfiler::VisitOMPScopeDirective(const OMPScopeDirective *S) {
+ VisitOMPExecutableDirective(S);
+}
+
void StmtProfiler::VisitOMPSingleDirective(const OMPSingleDirective *S) {
VisitOMPExecutableDirective(S);
}
@@ -963,6 +1031,11 @@ void StmtProfiler::VisitOMPParallelMasterDirective(
VisitOMPExecutableDirective(S);
}
+void StmtProfiler::VisitOMPParallelMaskedDirective(
+ const OMPParallelMaskedDirective *S) {
+ VisitOMPExecutableDirective(S);
+}
+
void StmtProfiler::VisitOMPParallelSectionsDirective(
const OMPParallelSectionsDirective *S) {
VisitOMPExecutableDirective(S);
@@ -984,6 +1057,9 @@ void StmtProfiler::VisitOMPTaskwaitDirective(const OMPTaskwaitDirective *S) {
VisitOMPExecutableDirective(S);
}
+void StmtProfiler::VisitOMPErrorDirective(const OMPErrorDirective *S) {
+ VisitOMPExecutableDirective(S);
+}
void StmtProfiler::VisitOMPTaskgroupDirective(const OMPTaskgroupDirective *S) {
VisitOMPExecutableDirective(S);
if (const Expr *E = S->getReductionRef())
@@ -1065,21 +1141,41 @@ void StmtProfiler::VisitOMPMasterTaskLoopDirective(
VisitOMPLoopDirective(S);
}
+void StmtProfiler::VisitOMPMaskedTaskLoopDirective(
+ const OMPMaskedTaskLoopDirective *S) {
+ VisitOMPLoopDirective(S);
+}
+
void StmtProfiler::VisitOMPMasterTaskLoopSimdDirective(
const OMPMasterTaskLoopSimdDirective *S) {
VisitOMPLoopDirective(S);
}
+void StmtProfiler::VisitOMPMaskedTaskLoopSimdDirective(
+ const OMPMaskedTaskLoopSimdDirective *S) {
+ VisitOMPLoopDirective(S);
+}
+
void StmtProfiler::VisitOMPParallelMasterTaskLoopDirective(
const OMPParallelMasterTaskLoopDirective *S) {
VisitOMPLoopDirective(S);
}
+void StmtProfiler::VisitOMPParallelMaskedTaskLoopDirective(
+ const OMPParallelMaskedTaskLoopDirective *S) {
+ VisitOMPLoopDirective(S);
+}
+
void StmtProfiler::VisitOMPParallelMasterTaskLoopSimdDirective(
const OMPParallelMasterTaskLoopSimdDirective *S) {
VisitOMPLoopDirective(S);
}
+void StmtProfiler::VisitOMPParallelMaskedTaskLoopSimdDirective(
+ const OMPParallelMaskedTaskLoopSimdDirective *S) {
+ VisitOMPLoopDirective(S);
+}
+
void StmtProfiler::VisitOMPDistributeDirective(
const OMPDistributeDirective *S) {
VisitOMPLoopDirective(S);
@@ -1181,6 +1277,31 @@ void StmtProfiler::VisitOMPMaskedDirective(const OMPMaskedDirective *S) {
VisitOMPExecutableDirective(S);
}
+void StmtProfiler::VisitOMPGenericLoopDirective(
+ const OMPGenericLoopDirective *S) {
+ VisitOMPLoopDirective(S);
+}
+
+void StmtProfiler::VisitOMPTeamsGenericLoopDirective(
+ const OMPTeamsGenericLoopDirective *S) {
+ VisitOMPLoopDirective(S);
+}
+
+void StmtProfiler::VisitOMPTargetTeamsGenericLoopDirective(
+ const OMPTargetTeamsGenericLoopDirective *S) {
+ VisitOMPLoopDirective(S);
+}
+
+void StmtProfiler::VisitOMPParallelGenericLoopDirective(
+ const OMPParallelGenericLoopDirective *S) {
+ VisitOMPLoopDirective(S);
+}
+
+void StmtProfiler::VisitOMPTargetParallelGenericLoopDirective(
+ const OMPTargetParallelGenericLoopDirective *S) {
+ VisitOMPLoopDirective(S);
+}
+
void StmtProfiler::VisitExpr(const Expr *S) {
VisitStmt(S);
}
@@ -1209,13 +1330,21 @@ void StmtProfiler::VisitSYCLUniqueStableNameExpr(
void StmtProfiler::VisitPredefinedExpr(const PredefinedExpr *S) {
VisitExpr(S);
- ID.AddInteger(S->getIdentKind());
+ ID.AddInteger(llvm::to_underlying(S->getIdentKind()));
}
void StmtProfiler::VisitIntegerLiteral(const IntegerLiteral *S) {
VisitExpr(S);
S->getValue().Profile(ID);
- ID.AddInteger(S->getType()->castAs<BuiltinType>()->getKind());
+
+ QualType T = S->getType();
+ if (Canonical)
+ T = T.getCanonicalType();
+ ID.AddInteger(T->getTypeClass());
+ if (auto BitIntT = T->getAs<BitIntType>())
+ BitIntT->Profile(ID);
+ else
+ ID.AddInteger(T->castAs<BuiltinType>()->getKind());
}
void StmtProfiler::VisitFixedPointLiteral(const FixedPointLiteral *S) {
@@ -1226,7 +1355,7 @@ void StmtProfiler::VisitFixedPointLiteral(const FixedPointLiteral *S) {
void StmtProfiler::VisitCharacterLiteral(const CharacterLiteral *S) {
VisitExpr(S);
- ID.AddInteger(S->getKind());
+ ID.AddInteger(llvm::to_underlying(S->getKind()));
ID.AddInteger(S->getValue());
}
@@ -1244,7 +1373,7 @@ void StmtProfiler::VisitImaginaryLiteral(const ImaginaryLiteral *S) {
void StmtProfiler::VisitStringLiteral(const StringLiteral *S) {
VisitExpr(S);
ID.AddString(S->getBytes());
- ID.AddInteger(S->getKind());
+ ID.AddInteger(llvm::to_underlying(S->getKind()));
}
void StmtProfiler::VisitParenExpr(const ParenExpr *S) {
@@ -1426,7 +1555,7 @@ void StmtProfiler::VisitDesignatedInitExpr(const DesignatedInitExpr *S) {
assert(D.isArrayRangeDesignator());
ID.AddInteger(2);
}
- ID.AddInteger(D.getFirstExprIndex());
+ ID.AddInteger(D.getArrayIndex());
}
}
@@ -1535,8 +1664,8 @@ void StmtProfiler::VisitRequiresExpr(const RequiresExpr *S) {
} else {
ID.AddInteger(concepts::Requirement::RK_Nested);
auto *NestedReq = cast<concepts::NestedRequirement>(Req);
- ID.AddBoolean(NestedReq->isSubstitutionFailure());
- if (!NestedReq->isSubstitutionFailure())
+ ID.AddBoolean(NestedReq->hasInvalidConstraint());
+ if (!NestedReq->hasInvalidConstraint())
Visit(NestedReq->getConstraintExpr());
}
}
@@ -1544,7 +1673,8 @@ void StmtProfiler::VisitRequiresExpr(const RequiresExpr *S) {
static Stmt::StmtClass DecodeOperatorCall(const CXXOperatorCallExpr *S,
UnaryOperatorKind &UnaryOp,
- BinaryOperatorKind &BinaryOp) {
+ BinaryOperatorKind &BinaryOp,
+ unsigned &NumArgs) {
switch (S->getOperator()) {
case OO_None:
case OO_New:
@@ -1557,7 +1687,7 @@ static Stmt::StmtClass DecodeOperatorCall(const CXXOperatorCallExpr *S,
llvm_unreachable("Invalid operator call kind");
case OO_Plus:
- if (S->getNumArgs() == 1) {
+ if (NumArgs == 1) {
UnaryOp = UO_Plus;
return Stmt::UnaryOperatorClass;
}
@@ -1566,7 +1696,7 @@ static Stmt::StmtClass DecodeOperatorCall(const CXXOperatorCallExpr *S,
return Stmt::BinaryOperatorClass;
case OO_Minus:
- if (S->getNumArgs() == 1) {
+ if (NumArgs == 1) {
UnaryOp = UO_Minus;
return Stmt::UnaryOperatorClass;
}
@@ -1575,7 +1705,7 @@ static Stmt::StmtClass DecodeOperatorCall(const CXXOperatorCallExpr *S,
return Stmt::BinaryOperatorClass;
case OO_Star:
- if (S->getNumArgs() == 1) {
+ if (NumArgs == 1) {
UnaryOp = UO_Deref;
return Stmt::UnaryOperatorClass;
}
@@ -1596,7 +1726,7 @@ static Stmt::StmtClass DecodeOperatorCall(const CXXOperatorCallExpr *S,
return Stmt::BinaryOperatorClass;
case OO_Amp:
- if (S->getNumArgs() == 1) {
+ if (NumArgs == 1) {
UnaryOp = UO_AddrOf;
return Stmt::UnaryOperatorClass;
}
@@ -1705,13 +1835,13 @@ static Stmt::StmtClass DecodeOperatorCall(const CXXOperatorCallExpr *S,
return Stmt::BinaryOperatorClass;
case OO_PlusPlus:
- UnaryOp = S->getNumArgs() == 1? UO_PreInc
- : UO_PostInc;
+ UnaryOp = NumArgs == 1 ? UO_PreInc : UO_PostInc;
+ NumArgs = 1;
return Stmt::UnaryOperatorClass;
case OO_MinusMinus:
- UnaryOp = S->getNumArgs() == 1? UO_PreDec
- : UO_PostDec;
+ UnaryOp = NumArgs == 1 ? UO_PreDec : UO_PostDec;
+ NumArgs = 1;
return Stmt::UnaryOperatorClass;
case OO_Comma:
@@ -1757,10 +1887,11 @@ void StmtProfiler::VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *S) {
UnaryOperatorKind UnaryOp = UO_Extension;
BinaryOperatorKind BinaryOp = BO_Comma;
- Stmt::StmtClass SC = DecodeOperatorCall(S, UnaryOp, BinaryOp);
+ unsigned NumArgs = S->getNumArgs();
+ Stmt::StmtClass SC = DecodeOperatorCall(S, UnaryOp, BinaryOp, NumArgs);
ID.AddInteger(SC);
- for (unsigned I = 0, N = S->getNumArgs(); I != N; ++I)
+ for (unsigned I = 0; I != NumArgs; ++I)
Visit(S->getArg(I));
if (SC == Stmt::UnaryOperatorClass)
ID.AddInteger(UnaryOp);
@@ -1923,31 +2054,27 @@ StmtProfiler::VisitCXXTemporaryObjectExpr(const CXXTemporaryObjectExpr *S) {
void
StmtProfiler::VisitLambdaExpr(const LambdaExpr *S) {
- VisitExpr(S);
- for (LambdaExpr::capture_iterator C = S->explicit_capture_begin(),
- CEnd = S->explicit_capture_end();
- C != CEnd; ++C) {
- if (C->capturesVLAType())
- continue;
+ if (!ProfileLambdaExpr) {
+ // Do not recursively visit the children of this expression. Profiling the
+ // body would result in unnecessary work, and is not safe to do during
+ // deserialization.
+ VisitStmtNoChildren(S);
- ID.AddInteger(C->getCaptureKind());
- switch (C->getCaptureKind()) {
- case LCK_StarThis:
- case LCK_This:
- break;
- case LCK_ByRef:
- case LCK_ByCopy:
- VisitDecl(C->getCapturedVar());
- ID.AddBoolean(C->isPackExpansion());
- break;
- case LCK_VLAType:
- llvm_unreachable("VLA type in explicit captures.");
- }
+ // C++20 [temp.over.link]p5:
+ // Two lambda-expressions are never considered equivalent.
+ VisitDecl(S->getLambdaClass());
+
+ return;
+ }
+
+ CXXRecordDecl *Lambda = S->getLambdaClass();
+ ID.AddInteger(Lambda->getODRHash());
+
+ for (const auto &Capture : Lambda->captures()) {
+ ID.AddInteger(Capture.getCaptureKind());
+ if (Capture.capturesVariable())
+ VisitDecl(Capture.getCapturedVar());
}
- // Note: If we actually needed to be able to match lambda
- // expressions, we would have to consider parameters and return type
- // here, among other things.
- VisitStmt(S->getBody());
}
void
@@ -1971,7 +2098,7 @@ void StmtProfiler::VisitCXXNewExpr(const CXXNewExpr *S) {
ID.AddInteger(S->getNumPlacementArgs());
ID.AddBoolean(S->isGlobalNew());
ID.AddBoolean(S->isParenTypeId());
- ID.AddInteger(S->getInitializationStyle());
+ ID.AddInteger(llvm::to_underlying(S->getInitializationStyle()));
}
void
@@ -2123,6 +2250,10 @@ void StmtProfiler::VisitCXXFoldExpr(const CXXFoldExpr *S) {
ID.AddInteger(S->getOperator());
}
+void StmtProfiler::VisitCXXParenListInitExpr(const CXXParenListInitExpr *S) {
+ VisitExpr(S);
+}
+
void StmtProfiler::VisitCoroutineBodyStmt(const CoroutineBodyStmt *S) {
VisitStmt(S);
}
@@ -2285,6 +2416,12 @@ void StmtProfiler::VisitTemplateArgument(const TemplateArgument &Arg) {
Arg.getAsIntegral().Profile(ID);
break;
+ case TemplateArgument::StructuralValue:
+ VisitType(Arg.getStructuralValueType());
+ // FIXME: Do we need to recursively decompose this ourselves?
+ Arg.getAsStructuralValue().Profile(ID);
+ break;
+
case TemplateArgument::Expression:
Visit(Arg.getAsExpr());
break;
@@ -2297,8 +2434,8 @@ void StmtProfiler::VisitTemplateArgument(const TemplateArgument &Arg) {
}
void Stmt::Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
- bool Canonical) const {
- StmtProfilerWithPointers Profiler(ID, Context, Canonical);
+ bool Canonical, bool ProfileLambdaExpr) const {
+ StmtProfilerWithPointers Profiler(ID, Context, Canonical, ProfileLambdaExpr);
Profiler.Visit(this);
}
diff --git a/contrib/llvm-project/clang/lib/AST/TemplateBase.cpp b/contrib/llvm-project/clang/lib/AST/TemplateBase.cpp
index f44230d1bd03..3310d7dc24c5 100644
--- a/contrib/llvm-project/clang/lib/AST/TemplateBase.cpp
+++ b/contrib/llvm-project/clang/lib/AST/TemplateBase.cpp
@@ -29,7 +29,6 @@
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/FoldingSet.h"
-#include "llvm/ADT/None.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
@@ -41,6 +40,7 @@
#include <cstddef>
#include <cstdint>
#include <cstring>
+#include <optional>
using namespace clang;
@@ -59,15 +59,17 @@ static void printIntegral(const TemplateArgument &TemplArg, raw_ostream &Out,
const Type *T = TemplArg.getIntegralType().getTypePtr();
const llvm::APSInt &Val = TemplArg.getAsIntegral();
- if (const EnumType *ET = T->getAs<EnumType>()) {
- for (const EnumConstantDecl* ECD : ET->getDecl()->enumerators()) {
- // In Sema::CheckTemplateArugment, enum template arguments value are
- // extended to the size of the integer underlying the enum type. This
- // may create a size difference between the enum value and template
- // argument value, requiring isSameValue here instead of operator==.
- if (llvm::APSInt::isSameValue(ECD->getInitVal(), Val)) {
- ECD->printQualifiedName(Out, Policy);
- return;
+ if (Policy.UseEnumerators) {
+ if (const EnumType *ET = T->getAs<EnumType>()) {
+ for (const EnumConstantDecl *ECD : ET->getDecl()->enumerators()) {
+ // In Sema::CheckTemplateArugment, enum template arguments value are
+ // extended to the size of the integer underlying the enum type. This
+ // may create a size difference between the enum value and template
+ // argument value, requiring isSameValue here instead of operator==.
+ if (llvm::APSInt::isSameValue(ECD->getInitVal(), Val)) {
+ ECD->printQualifiedName(Out, Policy);
+ return;
+ }
}
}
}
@@ -87,19 +89,20 @@ static void printIntegral(const TemplateArgument &TemplArg, raw_ostream &Out,
else if (T->isSpecificBuiltinType(BuiltinType::UChar))
Out << "(unsigned char)";
}
- CharacterLiteral::print(Val.getZExtValue(), CharacterLiteral::Ascii, Out);
+ CharacterLiteral::print(Val.getZExtValue(), CharacterLiteralKind::Ascii,
+ Out);
} else if (T->isAnyCharacterType() && !Policy.MSVCFormatting) {
- CharacterLiteral::CharacterKind Kind;
+ CharacterLiteralKind Kind;
if (T->isWideCharType())
- Kind = CharacterLiteral::Wide;
+ Kind = CharacterLiteralKind::Wide;
else if (T->isChar8Type())
- Kind = CharacterLiteral::UTF8;
+ Kind = CharacterLiteralKind::UTF8;
else if (T->isChar16Type())
- Kind = CharacterLiteral::UTF16;
+ Kind = CharacterLiteralKind::UTF16;
else if (T->isChar32Type())
- Kind = CharacterLiteral::UTF32;
+ Kind = CharacterLiteralKind::UTF32;
else
- Kind = CharacterLiteral::Ascii;
+ Kind = CharacterLiteralKind::Ascii;
CharacterLiteral::print(Val.getExtValue(), Kind, Out);
} else if (IncludeType) {
if (const auto *BT = T->getAs<BuiltinType>()) {
@@ -158,9 +161,27 @@ static bool needsAmpersandOnTemplateArg(QualType paramType, QualType argType) {
// TemplateArgument Implementation
//===----------------------------------------------------------------------===//
-TemplateArgument::TemplateArgument(ASTContext &Ctx, const llvm::APSInt &Value,
- QualType Type) {
+void TemplateArgument::initFromType(QualType T, bool IsNullPtr,
+ bool IsDefaulted) {
+ TypeOrValue.Kind = IsNullPtr ? NullPtr : Type;
+ TypeOrValue.IsDefaulted = IsDefaulted;
+ TypeOrValue.V = reinterpret_cast<uintptr_t>(T.getAsOpaquePtr());
+}
+
+void TemplateArgument::initFromDeclaration(ValueDecl *D, QualType QT,
+ bool IsDefaulted) {
+ assert(D && "Expected decl");
+ DeclArg.Kind = Declaration;
+ DeclArg.IsDefaulted = IsDefaulted;
+ DeclArg.QT = QT.getAsOpaquePtr();
+ DeclArg.D = D;
+}
+
+void TemplateArgument::initFromIntegral(const ASTContext &Ctx,
+ const llvm::APSInt &Value,
+ QualType Type, bool IsDefaulted) {
Integer.Kind = Integral;
+ Integer.IsDefaulted = IsDefaulted;
// Copy the APSInt value into our decomposed form.
Integer.BitWidth = Value.getBitWidth();
Integer.IsUnsigned = Value.isUnsigned();
@@ -177,6 +198,56 @@ TemplateArgument::TemplateArgument(ASTContext &Ctx, const llvm::APSInt &Value,
Integer.Type = Type.getAsOpaquePtr();
}
+void TemplateArgument::initFromStructural(const ASTContext &Ctx, QualType Type,
+ const APValue &V, bool IsDefaulted) {
+ Value.Kind = StructuralValue;
+ Value.IsDefaulted = IsDefaulted;
+ Value.Value = new (Ctx) APValue(V);
+ Ctx.addDestruction(Value.Value);
+ Value.Type = Type.getAsOpaquePtr();
+}
+
+TemplateArgument::TemplateArgument(const ASTContext &Ctx,
+ const llvm::APSInt &Value, QualType Type,
+ bool IsDefaulted) {
+ initFromIntegral(Ctx, Value, Type, IsDefaulted);
+}
+
+static const ValueDecl *getAsSimpleValueDeclRef(const ASTContext &Ctx,
+ QualType T, const APValue &V) {
+ // Pointers to members are relatively easy.
+ if (V.isMemberPointer() && V.getMemberPointerPath().empty())
+ return V.getMemberPointerDecl();
+
+ // We model class non-type template parameters as their template parameter
+ // object declaration.
+ if (V.isStruct() || V.isUnion())
+ return Ctx.getTemplateParamObjectDecl(T, V);
+
+ // Pointers and references with an empty path use the special 'Declaration'
+ // representation.
+ if (V.isLValue() && V.hasLValuePath() && V.getLValuePath().empty() &&
+ !V.isLValueOnePastTheEnd())
+ return V.getLValueBase().dyn_cast<const ValueDecl *>();
+
+ // Everything else uses the 'structural' representation.
+ return nullptr;
+}
+
+TemplateArgument::TemplateArgument(const ASTContext &Ctx, QualType Type,
+ const APValue &V, bool IsDefaulted) {
+ if (Type->isIntegralOrEnumerationType() && V.isInt())
+ initFromIntegral(Ctx, V.getInt(), Type, IsDefaulted);
+ else if ((V.isLValue() && V.isNullPointer()) ||
+ (V.isMemberPointer() && !V.getMemberPointerDecl()))
+ initFromType(Type, /*isNullPtr=*/true, IsDefaulted);
+ else if (const ValueDecl *VD = getAsSimpleValueDeclRef(Ctx, Type, V))
+ // FIXME: The Declaration form should expose a const ValueDecl*.
+ initFromDeclaration(const_cast<ValueDecl *>(VD), Type, IsDefaulted);
+ else
+ initFromStructural(Ctx, Type, V, IsDefaulted);
+}
+
TemplateArgument
TemplateArgument::CreatePackCopy(ASTContext &Context,
ArrayRef<TemplateArgument> Args) {
@@ -217,6 +288,7 @@ TemplateArgumentDependence TemplateArgument::getDependence() const {
case NullPtr:
case Integral:
+ case StructuralValue:
return TemplateArgumentDependence::None;
case Expression:
@@ -247,6 +319,7 @@ bool TemplateArgument::isPackExpansion() const {
case Null:
case Declaration:
case Integral:
+ case StructuralValue:
case Pack:
case Template:
case NullPtr:
@@ -269,12 +342,12 @@ bool TemplateArgument::containsUnexpandedParameterPack() const {
return getDependence() & TemplateArgumentDependence::UnexpandedPack;
}
-Optional<unsigned> TemplateArgument::getNumTemplateExpansions() const {
+std::optional<unsigned> TemplateArgument::getNumTemplateExpansions() const {
assert(getKind() == TemplateExpansion);
if (TemplateArg.NumExpansions)
return TemplateArg.NumExpansions - 1;
- return None;
+ return std::nullopt;
}
QualType TemplateArgument::getNonTypeTemplateArgumentType() const {
@@ -297,6 +370,9 @@ QualType TemplateArgument::getNonTypeTemplateArgumentType() const {
case TemplateArgument::NullPtr:
return getNullPtrType();
+
+ case TemplateArgument::StructuralValue:
+ return getStructuralValueType();
}
llvm_unreachable("Invalid TemplateArgument Kind!");
@@ -319,30 +395,24 @@ void TemplateArgument::Profile(llvm::FoldingSetNodeID &ID,
case Declaration:
getParamTypeForDecl().Profile(ID);
- ID.AddPointer(getAsDecl()? getAsDecl()->getCanonicalDecl() : nullptr);
+ ID.AddPointer(getAsDecl());
break;
+ case TemplateExpansion:
+ ID.AddInteger(TemplateArg.NumExpansions);
+ [[fallthrough]];
case Template:
- case TemplateExpansion: {
- TemplateName Template = getAsTemplateOrTemplatePattern();
- if (TemplateTemplateParmDecl *TTP
- = dyn_cast_or_null<TemplateTemplateParmDecl>(
- Template.getAsTemplateDecl())) {
- ID.AddBoolean(true);
- ID.AddInteger(TTP->getDepth());
- ID.AddInteger(TTP->getPosition());
- ID.AddBoolean(TTP->isParameterPack());
- } else {
- ID.AddBoolean(false);
- ID.AddPointer(Context.getCanonicalTemplateName(Template)
- .getAsVoidPointer());
- }
+ ID.AddPointer(TemplateArg.Name);
break;
- }
case Integral:
- getAsIntegral().Profile(ID);
getIntegralType().Profile(ID);
+ getAsIntegral().Profile(ID);
+ break;
+
+ case StructuralValue:
+ getStructuralValueType().Profile(ID);
+ getAsStructuralValue().Profile(ID);
break;
case Expression:
@@ -372,12 +442,24 @@ bool TemplateArgument::structurallyEquals(const TemplateArgument &Other) const {
TemplateArg.NumExpansions == Other.TemplateArg.NumExpansions;
case Declaration:
- return getAsDecl() == Other.getAsDecl();
+ return getAsDecl() == Other.getAsDecl() &&
+ getParamTypeForDecl() == Other.getParamTypeForDecl();
case Integral:
return getIntegralType() == Other.getIntegralType() &&
getAsIntegral() == Other.getAsIntegral();
+ case StructuralValue: {
+ if (getStructuralValueType().getCanonicalType() !=
+ Other.getStructuralValueType().getCanonicalType())
+ return false;
+
+ llvm::FoldingSetNodeID A, B;
+ getAsStructuralValue().Profile(A);
+ Other.getAsStructuralValue().Profile(B);
+ return A == B;
+ }
+
case Pack:
if (Args.NumArgs != Other.Args.NumArgs) return false;
for (unsigned I = 0, E = Args.NumArgs; I != E; ++I)
@@ -404,6 +486,7 @@ TemplateArgument TemplateArgument::getPackExpansionPattern() const {
case Declaration:
case Integral:
+ case StructuralValue:
case Pack:
case Null:
case Template:
@@ -430,11 +513,11 @@ void TemplateArgument::print(const PrintingPolicy &Policy, raw_ostream &Out,
}
case Declaration: {
- // FIXME: Include the type if it's not obvious from the context.
NamedDecl *ND = getAsDecl();
if (getParamTypeForDecl()->isRecordType()) {
if (auto *TPO = dyn_cast<TemplateParamObjectDecl>(ND)) {
- TPO->printAsInit(Out);
+ TPO->getType().getUnqualifiedType().print(Out, Policy);
+ TPO->printAsInit(Out, Policy);
break;
}
}
@@ -446,13 +529,17 @@ void TemplateArgument::print(const PrintingPolicy &Policy, raw_ostream &Out,
break;
}
+ case StructuralValue:
+ getAsStructuralValue().printPretty(Out, Policy, getStructuralValueType());
+ break;
+
case NullPtr:
// FIXME: Include the type if it's not obvious from the context.
Out << "nullptr";
break;
case Template:
- getAsTemplate().print(Out, Policy);
+ getAsTemplate().print(Out, Policy, TemplateName::Qualified::Fully);
break;
case TemplateExpansion:
@@ -529,6 +616,9 @@ SourceRange TemplateArgumentLoc::getSourceRange() const {
case TemplateArgument::Integral:
return getSourceIntegralExpression()->getSourceRange();
+ case TemplateArgument::StructuralValue:
+ return getSourceStructuralValueExpression()->getSourceRange();
+
case TemplateArgument::Pack:
case TemplateArgument::Null:
return SourceRange();
@@ -557,6 +647,18 @@ static const T &DiagTemplateArg(const T &DB, const TemplateArgument &Arg) {
case TemplateArgument::Integral:
return DB << toString(Arg.getAsIntegral(), 10);
+ case TemplateArgument::StructuralValue: {
+ // FIXME: We're guessing at LangOptions!
+ SmallString<32> Str;
+ llvm::raw_svector_ostream OS(Str);
+ LangOptions LangOpts;
+ LangOpts.CPlusPlus = true;
+ PrintingPolicy Policy(LangOpts);
+ Arg.getAsStructuralValue().printPretty(OS, Policy,
+ Arg.getStructuralValueType());
+ return DB << OS.str();
+ }
+
case TemplateArgument::Template:
return DB << Arg.getAsTemplate();
@@ -615,6 +717,17 @@ ASTTemplateArgumentListInfo::Create(const ASTContext &C,
return new (Mem) ASTTemplateArgumentListInfo(List);
}
+const ASTTemplateArgumentListInfo *
+ASTTemplateArgumentListInfo::Create(const ASTContext &C,
+ const ASTTemplateArgumentListInfo *List) {
+ if (!List)
+ return nullptr;
+ std::size_t size =
+ totalSizeToAlloc<TemplateArgumentLoc>(List->getNumTemplateArgs());
+ void *Mem = C.Allocate(size, alignof(ASTTemplateArgumentListInfo));
+ return new (Mem) ASTTemplateArgumentListInfo(List);
+}
+
ASTTemplateArgumentListInfo::ASTTemplateArgumentListInfo(
const TemplateArgumentListInfo &Info) {
LAngleLoc = Info.getLAngleLoc();
@@ -626,6 +739,17 @@ ASTTemplateArgumentListInfo::ASTTemplateArgumentListInfo(
new (&ArgBuffer[i]) TemplateArgumentLoc(Info[i]);
}
+ASTTemplateArgumentListInfo::ASTTemplateArgumentListInfo(
+ const ASTTemplateArgumentListInfo *Info) {
+ LAngleLoc = Info->getLAngleLoc();
+ RAngleLoc = Info->getRAngleLoc();
+ NumTemplateArgs = Info->getNumTemplateArgs();
+
+ TemplateArgumentLoc *ArgBuffer = getTrailingObjects<TemplateArgumentLoc>();
+ for (unsigned i = 0; i != NumTemplateArgs; ++i)
+ new (&ArgBuffer[i]) TemplateArgumentLoc((*Info)[i]);
+}
+
void ASTTemplateKWAndArgsInfo::initializeFrom(
SourceLocation TemplateKWLoc, const TemplateArgumentListInfo &Info,
TemplateArgumentLoc *OutArgArray) {
diff --git a/contrib/llvm-project/clang/lib/AST/TemplateName.cpp b/contrib/llvm-project/clang/lib/AST/TemplateName.cpp
index 22cfa9acbe1b..2f0e4181e940 100644
--- a/contrib/llvm-project/clang/lib/AST/TemplateName.cpp
+++ b/contrib/llvm-project/clang/lib/AST/TemplateName.cpp
@@ -13,6 +13,7 @@
#include "clang/AST/TemplateName.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclBase.h"
+#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DependenceFlags.h"
#include "clang/AST/NestedNameSpecifier.h"
@@ -28,37 +29,74 @@
#include "llvm/Support/Compiler.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
+#include <optional>
#include <string>
using namespace clang;
TemplateArgument
SubstTemplateTemplateParmPackStorage::getArgumentPack() const {
- return TemplateArgument(llvm::makeArrayRef(Arguments, size()));
+ return TemplateArgument(llvm::ArrayRef(Arguments, Bits.Data));
+}
+
+TemplateTemplateParmDecl *
+SubstTemplateTemplateParmPackStorage::getParameterPack() const {
+ return cast<TemplateTemplateParmDecl>(
+ getReplacedTemplateParameterList(getAssociatedDecl())
+ ->asArray()[Bits.Index]);
+}
+
+TemplateTemplateParmDecl *
+SubstTemplateTemplateParmStorage::getParameter() const {
+ return cast<TemplateTemplateParmDecl>(
+ getReplacedTemplateParameterList(getAssociatedDecl())
+ ->asArray()[Bits.Index]);
}
void SubstTemplateTemplateParmStorage::Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, Parameter, Replacement);
+ Profile(ID, Replacement, getAssociatedDecl(), getIndex(), getPackIndex());
}
-void SubstTemplateTemplateParmStorage::Profile(llvm::FoldingSetNodeID &ID,
- TemplateTemplateParmDecl *parameter,
- TemplateName replacement) {
- ID.AddPointer(parameter);
- ID.AddPointer(replacement.getAsVoidPointer());
+void SubstTemplateTemplateParmStorage::Profile(
+ llvm::FoldingSetNodeID &ID, TemplateName Replacement, Decl *AssociatedDecl,
+ unsigned Index, std::optional<unsigned> PackIndex) {
+ Replacement.Profile(ID);
+ ID.AddPointer(AssociatedDecl);
+ ID.AddInteger(Index);
+ ID.AddInteger(PackIndex ? *PackIndex + 1 : 0);
+}
+
+SubstTemplateTemplateParmPackStorage::SubstTemplateTemplateParmPackStorage(
+ ArrayRef<TemplateArgument> ArgPack, Decl *AssociatedDecl, unsigned Index,
+ bool Final)
+ : UncommonTemplateNameStorage(SubstTemplateTemplateParmPack, Index,
+ ArgPack.size()),
+ Arguments(ArgPack.data()), AssociatedDeclAndFinal(AssociatedDecl, Final) {
+ assert(AssociatedDecl != nullptr);
}
void SubstTemplateTemplateParmPackStorage::Profile(llvm::FoldingSetNodeID &ID,
ASTContext &Context) {
- Profile(ID, Context, Parameter, getArgumentPack());
+ Profile(ID, Context, getArgumentPack(), getAssociatedDecl(), getIndex(),
+ getFinal());
}
-void SubstTemplateTemplateParmPackStorage::Profile(llvm::FoldingSetNodeID &ID,
- ASTContext &Context,
- TemplateTemplateParmDecl *Parameter,
- const TemplateArgument &ArgPack) {
- ID.AddPointer(Parameter);
+Decl *SubstTemplateTemplateParmPackStorage::getAssociatedDecl() const {
+ return AssociatedDeclAndFinal.getPointer();
+}
+
+bool SubstTemplateTemplateParmPackStorage::getFinal() const {
+ return AssociatedDeclAndFinal.getInt();
+}
+
+void SubstTemplateTemplateParmPackStorage::Profile(
+ llvm::FoldingSetNodeID &ID, ASTContext &Context,
+ const TemplateArgument &ArgPack, Decl *AssociatedDecl, unsigned Index,
+ bool Final) {
ArgPack.Profile(ID, Context);
+ ID.AddPointer(AssociatedDecl);
+ ID.AddInteger(Index);
+ ID.AddBoolean(Final);
}
TemplateName::TemplateName(void *Ptr) {
@@ -76,12 +114,18 @@ TemplateName::TemplateName(SubstTemplateTemplateParmPackStorage *Storage)
: Storage(Storage) {}
TemplateName::TemplateName(QualifiedTemplateName *Qual) : Storage(Qual) {}
TemplateName::TemplateName(DependentTemplateName *Dep) : Storage(Dep) {}
+TemplateName::TemplateName(UsingShadowDecl *Using) : Storage(Using) {}
bool TemplateName::isNull() const { return Storage.isNull(); }
TemplateName::NameKind TemplateName::getKind() const {
- if (Storage.is<TemplateDecl *>())
+ if (auto *ND = Storage.dyn_cast<Decl *>()) {
+ if (isa<UsingShadowDecl>(ND))
+ return UsingTemplate;
+ assert(isa<TemplateDecl>(ND));
return Template;
+ }
+
if (Storage.is<DependentTemplateName *>())
return DependentTemplate;
if (Storage.is<QualifiedTemplateName *>())
@@ -99,15 +143,23 @@ TemplateName::NameKind TemplateName::getKind() const {
}
TemplateDecl *TemplateName::getAsTemplateDecl() const {
- if (TemplateDecl *Template = Storage.dyn_cast<TemplateDecl *>())
- return Template;
+ if (Decl *TemplateOrUsing = Storage.dyn_cast<Decl *>()) {
+ if (UsingShadowDecl *USD = dyn_cast<UsingShadowDecl>(TemplateOrUsing))
+ return cast<TemplateDecl>(USD->getTargetDecl());
+
+ assert(isa<TemplateDecl>(TemplateOrUsing));
+ return cast<TemplateDecl>(TemplateOrUsing);
+ }
if (QualifiedTemplateName *QTN = getAsQualifiedTemplateName())
- return QTN->getTemplateDecl();
+ return QTN->getUnderlyingTemplate().getAsTemplateDecl();
if (SubstTemplateTemplateParmStorage *sub = getAsSubstTemplateTemplateParm())
return sub->getReplacement().getAsTemplateDecl();
+ if (UsingShadowDecl *USD = getAsUsingShadowDecl())
+ return cast<TemplateDecl>(USD->getTargetDecl());
+
return nullptr;
}
@@ -153,6 +205,15 @@ DependentTemplateName *TemplateName::getAsDependentTemplateName() const {
return Storage.dyn_cast<DependentTemplateName *>();
}
+UsingShadowDecl *TemplateName::getAsUsingShadowDecl() const {
+ if (Decl *D = Storage.dyn_cast<Decl *>())
+ if (UsingShadowDecl *USD = dyn_cast<UsingShadowDecl>(D))
+ return USD;
+ if (QualifiedTemplateName *QTN = getAsQualifiedTemplateName())
+ return QTN->getUnderlyingTemplate().getAsUsingShadowDecl();
+ return nullptr;
+}
+
TemplateName TemplateName::getNameToSubstitute() const {
TemplateDecl *Decl = getAsTemplateDecl();
@@ -220,19 +281,57 @@ bool TemplateName::containsUnexpandedParameterPack() const {
return getDependence() & TemplateNameDependence::UnexpandedPack;
}
-void
-TemplateName::print(raw_ostream &OS, const PrintingPolicy &Policy,
- bool SuppressNNS) const {
- if (TemplateDecl *Template = Storage.dyn_cast<TemplateDecl *>())
- OS << *Template;
+void TemplateName::Profile(llvm::FoldingSetNodeID &ID) {
+ if (const auto* USD = getAsUsingShadowDecl())
+ ID.AddPointer(USD->getCanonicalDecl());
+ else if (const auto *TD = getAsTemplateDecl())
+ ID.AddPointer(TD->getCanonicalDecl());
+ else
+ ID.AddPointer(Storage.getOpaqueValue());
+}
+
+void TemplateName::print(raw_ostream &OS, const PrintingPolicy &Policy,
+ Qualified Qual) const {
+ auto Kind = getKind();
+ TemplateDecl *Template = nullptr;
+ if (Kind == TemplateName::Template || Kind == TemplateName::UsingTemplate) {
+ // After `namespace ns { using std::vector }`, what is the fully-qualified
+ // name of the UsingTemplateName `vector` within ns?
+ //
+ // - ns::vector (the qualified name of the using-shadow decl)
+ // - std::vector (the qualified name of the underlying template decl)
+ //
+ // Similar to the UsingType behavior, using declarations are used to import
+ // names more often than to export them, thus using the original name is
+ // most useful in this case.
+ Template = getAsTemplateDecl();
+ }
+
+ if (Template)
+ if (Policy.CleanUglifiedParameters &&
+ isa<TemplateTemplateParmDecl>(Template) && Template->getIdentifier())
+ OS << Template->getIdentifier()->deuglifiedName();
+ else if (Qual == Qualified::Fully &&
+ getDependence() !=
+ TemplateNameDependenceScope::DependentInstantiation)
+ Template->printQualifiedName(OS, Policy);
+ else
+ OS << *Template;
else if (QualifiedTemplateName *QTN = getAsQualifiedTemplateName()) {
- if (!SuppressNNS)
+ if (Qual == Qualified::Fully &&
+ getDependence() !=
+ TemplateNameDependenceScope::DependentInstantiation) {
+ QTN->getUnderlyingTemplate().getAsTemplateDecl()->printQualifiedName(
+ OS, Policy);
+ return;
+ }
+ if (Qual == Qualified::AsWritten)
QTN->getQualifier()->print(OS, Policy);
if (QTN->hasTemplateKeyword())
OS << "template ";
- OS << *QTN->getDecl();
+ OS << *QTN->getUnderlyingTemplate().getAsTemplateDecl();
} else if (DependentTemplateName *DTN = getAsDependentTemplateName()) {
- if (!SuppressNNS && DTN->getQualifier())
+ if (Qual == Qualified::AsWritten && DTN->getQualifier())
DTN->getQualifier()->print(OS, Policy);
OS << "template ";
@@ -242,15 +341,16 @@ TemplateName::print(raw_ostream &OS, const PrintingPolicy &Policy,
OS << "operator " << getOperatorSpelling(DTN->getOperator());
} else if (SubstTemplateTemplateParmStorage *subst
= getAsSubstTemplateTemplateParm()) {
- subst->getReplacement().print(OS, Policy, SuppressNNS);
+ subst->getReplacement().print(OS, Policy, Qual);
} else if (SubstTemplateTemplateParmPackStorage *SubstPack
= getAsSubstTemplateTemplateParmPack())
OS << *SubstPack->getParameterPack();
else if (AssumedTemplateStorage *Assumed = getAsAssumedTemplateName()) {
Assumed->getDeclName().print(OS, Policy);
} else {
+ assert(getKind() == TemplateName::OverloadedTemplate);
OverloadedTemplateStorage *OTS = getAsOverloadedTemplate();
- (*OTS->begin())->printName(OS);
+ (*OTS->begin())->printName(OS, Policy);
}
}
diff --git a/contrib/llvm-project/clang/lib/AST/TextNodeDumper.cpp b/contrib/llvm-project/clang/lib/AST/TextNodeDumper.cpp
index 33f914f9f886..ecf5de0be543 100644
--- a/contrib/llvm-project/clang/lib/AST/TextNodeDumper.cpp
+++ b/contrib/llvm-project/clang/lib/AST/TextNodeDumper.cpp
@@ -16,6 +16,7 @@
#include "clang/AST/DeclOpenMP.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/LocInfoType.h"
+#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/Type.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/SourceManager.h"
@@ -283,6 +284,10 @@ void TextNodeDumper::Visit(const Decl *D) {
OS << " constexpr";
if (FD->isConsteval())
OS << " consteval";
+ else if (FD->isImmediateFunction())
+ OS << " immediate";
+ if (FD->isMultiVersion())
+ OS << " multiversion";
}
if (!isa<FunctionDecl>(*D)) {
@@ -296,6 +301,17 @@ void TextNodeDumper::Visit(const Decl *D) {
}
}
+ switch (D->getFriendObjectKind()) {
+ case Decl::FOK_None:
+ break;
+ case Decl::FOK_Declared:
+ OS << " friend";
+ break;
+ case Decl::FOK_Undeclared:
+ OS << " friend_undeclared";
+ break;
+ }
+
ConstDeclVisitor<TextNodeDumper>::Visit(D);
}
@@ -356,6 +372,20 @@ void TextNodeDumper::Visit(const GenericSelectionExpr::ConstAssociation &A) {
OS << " selected";
}
+void TextNodeDumper::Visit(const ConceptReference *R) {
+ if (!R) {
+ ColorScope Color(OS, ShowColors, NullColor);
+ OS << "<<<NULL>>> ConceptReference";
+ return;
+ }
+
+ OS << "ConceptReference";
+ dumpPointer(R);
+ dumpSourceRange(R->getSourceRange());
+ OS << ' ';
+ dumpBareDeclRef(R->getNamedConcept());
+}
+
void TextNodeDumper::Visit(const concepts::Requirement *R) {
if (!R) {
ColorScope Color(OS, ShowColors, NullColor);
@@ -662,13 +692,18 @@ void TextNodeDumper::dumpBareType(QualType T, bool Desugar) {
ColorScope Color(OS, ShowColors, TypeColor);
SplitQualType T_split = T.split();
- OS << "'" << QualType::getAsString(T_split, PrintPolicy) << "'";
+ std::string T_str = QualType::getAsString(T_split, PrintPolicy);
+ OS << "'" << T_str << "'";
if (Desugar && !T.isNull()) {
- // If the type is sugared, also dump a (shallow) desugared type.
+ // If the type is sugared, also dump a (shallow) desugared type when
+ // it is visibly different.
SplitQualType D_split = T.getSplitDesugaredType();
- if (T_split != D_split)
- OS << ":'" << QualType::getAsString(D_split, PrintPolicy) << "'";
+ if (T_split != D_split) {
+ std::string D_str = QualType::getAsString(D_split, PrintPolicy);
+ if (T_str != D_str)
+ OS << ":'" << QualType::getAsString(D_split, PrintPolicy) << "'";
+ }
}
}
@@ -730,6 +765,66 @@ void TextNodeDumper::dumpCleanupObject(
llvm_unreachable("unexpected cleanup type");
}
+void clang::TextNodeDumper::dumpTemplateSpecializationKind(
+ TemplateSpecializationKind TSK) {
+ switch (TSK) {
+ case TSK_Undeclared:
+ break;
+ case TSK_ImplicitInstantiation:
+ OS << " implicit_instantiation";
+ break;
+ case TSK_ExplicitSpecialization:
+ OS << " explicit_specialization";
+ break;
+ case TSK_ExplicitInstantiationDeclaration:
+ OS << " explicit_instantiation_declaration";
+ break;
+ case TSK_ExplicitInstantiationDefinition:
+ OS << " explicit_instantiation_definition";
+ break;
+ }
+}
+
+void clang::TextNodeDumper::dumpNestedNameSpecifier(const NestedNameSpecifier *NNS) {
+ if (!NNS)
+ return;
+
+ AddChild([=] {
+ OS << "NestedNameSpecifier";
+
+ switch (NNS->getKind()) {
+ case NestedNameSpecifier::Identifier:
+ OS << " Identifier";
+ OS << " '" << NNS->getAsIdentifier()->getName() << "'";
+ break;
+ case NestedNameSpecifier::Namespace:
+ OS << " "; // "Namespace" is printed as the decl kind.
+ dumpBareDeclRef(NNS->getAsNamespace());
+ break;
+ case NestedNameSpecifier::NamespaceAlias:
+ OS << " "; // "NamespaceAlias" is printed as the decl kind.
+ dumpBareDeclRef(NNS->getAsNamespaceAlias());
+ break;
+ case NestedNameSpecifier::TypeSpec:
+ OS << " TypeSpec";
+ dumpType(QualType(NNS->getAsType(), 0));
+ break;
+ case NestedNameSpecifier::TypeSpecWithTemplate:
+ OS << " TypeSpecWithTemplate";
+ dumpType(QualType(NNS->getAsType(), 0));
+ break;
+ case NestedNameSpecifier::Global:
+ OS << " Global";
+ break;
+ case NestedNameSpecifier::Super:
+ OS << " Super";
+ break;
+ }
+
+ dumpNestedNameSpecifier(NNS->getPrefix());
+ });
+}
+
void TextNodeDumper::dumpDeclRef(const Decl *D, StringRef Label) {
if (!D)
return;
@@ -767,19 +862,19 @@ void TextNodeDumper::visitInlineCommandComment(
const comments::InlineCommandComment *C, const comments::FullComment *) {
OS << " Name=\"" << getCommandName(C->getCommandID()) << "\"";
switch (C->getRenderKind()) {
- case comments::InlineCommandComment::RenderNormal:
+ case comments::InlineCommandRenderKind::Normal:
OS << " RenderNormal";
break;
- case comments::InlineCommandComment::RenderBold:
+ case comments::InlineCommandRenderKind::Bold:
OS << " RenderBold";
break;
- case comments::InlineCommandComment::RenderMonospaced:
+ case comments::InlineCommandRenderKind::Monospaced:
OS << " RenderMonospaced";
break;
- case comments::InlineCommandComment::RenderEmphasized:
+ case comments::InlineCommandRenderKind::Emphasized:
OS << " RenderEmphasized";
break;
- case comments::InlineCommandComment::RenderAnchor:
+ case comments::InlineCommandRenderKind::Anchor:
OS << " RenderAnchor";
break;
}
@@ -898,12 +993,17 @@ void TextNodeDumper::VisitIntegralTemplateArgument(const TemplateArgument &TA) {
}
void TextNodeDumper::VisitTemplateTemplateArgument(const TemplateArgument &TA) {
+ if (TA.getAsTemplate().getKind() == TemplateName::UsingTemplate)
+ OS << " using";
OS << " template ";
TA.getAsTemplate().dump(OS);
}
void TextNodeDumper::VisitTemplateExpansionTemplateArgument(
const TemplateArgument &TA) {
+ if (TA.getAsTemplateOrTemplatePattern().getKind() ==
+ TemplateName::UsingTemplate)
+ OS << " using";
OS << " template expansion ";
TA.getAsTemplateOrTemplatePattern().dump(OS);
}
@@ -948,6 +1048,14 @@ void TextNodeDumper::VisitIfStmt(const IfStmt *Node) {
OS << " has_var";
if (Node->hasElseStorage())
OS << " has_else";
+ if (Node->isConstexpr())
+ OS << " constexpr";
+ if (Node->isConsteval()) {
+ OS << " ";
+ if (Node->isNegatedConsteval())
+ OS << "!";
+ OS << "consteval";
+ }
}
void TextNodeDumper::VisitSwitchStmt(const SwitchStmt *Node) {
@@ -978,6 +1086,24 @@ void TextNodeDumper::VisitCaseStmt(const CaseStmt *Node) {
OS << " gnu_range";
}
+void clang::TextNodeDumper::VisitReturnStmt(const ReturnStmt *Node) {
+ if (const VarDecl *Cand = Node->getNRVOCandidate()) {
+ OS << " nrvo_candidate(";
+ dumpBareDeclRef(Cand);
+ OS << ")";
+ }
+}
+
+void clang::TextNodeDumper::VisitCoawaitExpr(const CoawaitExpr *Node) {
+ if (Node->isImplicit())
+ OS << " implicit";
+}
+
+void clang::TextNodeDumper::VisitCoreturnStmt(const CoreturnStmt *Node) {
+ if (Node->isImplicit())
+ OS << " implicit";
+}
+
void TextNodeDumper::VisitConstantExpr(const ConstantExpr *Node) {
if (Node->hasAPValueResult())
AddChild("value",
@@ -1020,6 +1146,7 @@ void TextNodeDumper::VisitImplicitCastExpr(const ImplicitCastExpr *Node) {
void TextNodeDumper::VisitDeclRefExpr(const DeclRefExpr *Node) {
OS << " ";
dumpBareDeclRef(Node->getDecl());
+ dumpNestedNameSpecifier(Node->getQualifier());
if (Node->getDecl() != Node->getFoundDecl()) {
OS << " (";
dumpBareDeclRef(Node->getFoundDecl());
@@ -1031,6 +1158,16 @@ void TextNodeDumper::VisitDeclRefExpr(const DeclRefExpr *Node) {
case NOUR_Constant: OS << " non_odr_use_constant"; break;
case NOUR_Discarded: OS << " non_odr_use_discarded"; break;
}
+ if (Node->refersToEnclosingVariableOrCapture())
+ OS << " refers_to_enclosing_variable_or_capture";
+ if (Node->isImmediateEscalating())
+ OS << " immediate-escalating";
+}
+
+void clang::TextNodeDumper::VisitDependentScopeDeclRefExpr(
+ const DependentScopeDeclRefExpr *Node) {
+
+ dumpNestedNameSpecifier(Node->getQualifier());
}
void TextNodeDumper::VisitUnresolvedLookupExpr(
@@ -1127,6 +1264,7 @@ void TextNodeDumper::VisitUnaryExprOrTypeTraitExpr(
void TextNodeDumper::VisitMemberExpr(const MemberExpr *Node) {
OS << " " << (Node->isArrow() ? "->" : ".") << *Node->getMemberDecl();
dumpPointer(Node->getMemberDecl());
+ dumpNestedNameSpecifier(Node->getQualifier());
switch (Node->isNonOdrUse()) {
case NOUR_None: break;
case NOUR_Unevaluated: OS << " non_odr_use_unevaluated"; break;
@@ -1212,6 +1350,8 @@ void TextNodeDumper::VisitCXXConstructExpr(const CXXConstructExpr *Node) {
OS << " std::initializer_list";
if (Node->requiresZeroInitialization())
OS << " zeroing";
+ if (Node->isImmediateEscalating())
+ OS << " immediate-escalating";
}
void TextNodeDumper::VisitCXXBindTemporaryExpr(
@@ -1419,12 +1559,12 @@ void TextNodeDumper::VisitRValueReferenceType(const ReferenceType *T) {
void TextNodeDumper::VisitArrayType(const ArrayType *T) {
switch (T->getSizeModifier()) {
- case ArrayType::Normal:
+ case ArraySizeModifier::Normal:
break;
- case ArrayType::Static:
+ case ArraySizeModifier::Static:
OS << " static";
break;
- case ArrayType::Star:
+ case ArraySizeModifier::Star:
OS << " *";
break;
}
@@ -1457,29 +1597,35 @@ void TextNodeDumper::VisitDependentSizedExtVectorType(
void TextNodeDumper::VisitVectorType(const VectorType *T) {
switch (T->getVectorKind()) {
- case VectorType::GenericVector:
+ case VectorKind::Generic:
break;
- case VectorType::AltiVecVector:
+ case VectorKind::AltiVecVector:
OS << " altivec";
break;
- case VectorType::AltiVecPixel:
+ case VectorKind::AltiVecPixel:
OS << " altivec pixel";
break;
- case VectorType::AltiVecBool:
+ case VectorKind::AltiVecBool:
OS << " altivec bool";
break;
- case VectorType::NeonVector:
+ case VectorKind::Neon:
OS << " neon";
break;
- case VectorType::NeonPolyVector:
+ case VectorKind::NeonPoly:
OS << " neon poly";
break;
- case VectorType::SveFixedLengthDataVector:
+ case VectorKind::SveFixedLengthData:
OS << " fixed-length sve data vector";
break;
- case VectorType::SveFixedLengthPredicateVector:
+ case VectorKind::SveFixedLengthPredicate:
OS << " fixed-length sve predicate vector";
break;
+ case VectorKind::RVVFixedLengthData:
+ OS << " fixed-length rvv data vector";
+ break;
+ case VectorKind::RVVFixedLengthMask:
+ OS << " fixed-length rvv mask vector";
+ break;
}
OS << " " << T->getNumElements();
}
@@ -1517,7 +1663,64 @@ void TextNodeDumper::VisitFunctionProtoType(const FunctionProtoType *T) {
OS << " &&";
break;
}
- // FIXME: Exception specification.
+
+ switch (EPI.ExceptionSpec.Type) {
+ case EST_None:
+ break;
+ case EST_DynamicNone:
+ OS << " exceptionspec_dynamic_none";
+ break;
+ case EST_Dynamic:
+ OS << " exceptionspec_dynamic";
+ break;
+ case EST_MSAny:
+ OS << " exceptionspec_ms_any";
+ break;
+ case EST_NoThrow:
+ OS << " exceptionspec_nothrow";
+ break;
+ case EST_BasicNoexcept:
+ OS << " exceptionspec_basic_noexcept";
+ break;
+ case EST_DependentNoexcept:
+ OS << " exceptionspec_dependent_noexcept";
+ break;
+ case EST_NoexceptFalse:
+ OS << " exceptionspec_noexcept_false";
+ break;
+ case EST_NoexceptTrue:
+ OS << " exceptionspec_noexcept_true";
+ break;
+ case EST_Unevaluated:
+ OS << " exceptionspec_unevaluated";
+ break;
+ case EST_Uninstantiated:
+ OS << " exceptionspec_uninstantiated";
+ break;
+ case EST_Unparsed:
+ OS << " exceptionspec_unparsed";
+ break;
+ }
+ if (!EPI.ExceptionSpec.Exceptions.empty()) {
+ AddChild([=] {
+ OS << "Exceptions:";
+ for (unsigned I = 0, N = EPI.ExceptionSpec.Exceptions.size(); I != N;
+ ++I) {
+ if (I)
+ OS << ",";
+ dumpType(EPI.ExceptionSpec.Exceptions[I]);
+ }
+ });
+ }
+ if (EPI.ExceptionSpec.NoexceptExpr) {
+ AddChild([=] {
+ OS << "NoexceptExpr: ";
+ Visit(EPI.ExceptionSpec.NoexceptExpr);
+ });
+ }
+ dumpDeclRef(EPI.ExceptionSpec.SourceDecl, "ExceptionSourceDecl");
+ dumpDeclRef(EPI.ExceptionSpec.SourceTemplate, "ExceptionSourceTemplate");
+
// FIXME: Consumed parameters.
VisitFunctionType(T);
}
@@ -1526,15 +1729,25 @@ void TextNodeDumper::VisitUnresolvedUsingType(const UnresolvedUsingType *T) {
dumpDeclRef(T->getDecl());
}
+void TextNodeDumper::VisitUsingType(const UsingType *T) {
+ dumpDeclRef(T->getFoundDecl());
+ if (!T->typeMatchesDecl())
+ OS << " divergent";
+}
+
void TextNodeDumper::VisitTypedefType(const TypedefType *T) {
dumpDeclRef(T->getDecl());
+ if (!T->typeMatchesDecl())
+ OS << " divergent";
}
void TextNodeDumper::VisitUnaryTransformType(const UnaryTransformType *T) {
switch (T->getUTTKind()) {
- case UnaryTransformType::EnumUnderlyingType:
- OS << " underlying_type";
+#define TRANSFORM_TYPE_TRAIT_DEF(Enum, Trait) \
+ case UnaryTransformType::Enum: \
+ OS << " " #Trait; \
break;
+#include "clang/Basic/TransformTypeTraits.def"
}
}
@@ -1549,6 +1762,20 @@ void TextNodeDumper::VisitTemplateTypeParmType(const TemplateTypeParmType *T) {
dumpDeclRef(T->getDecl());
}
+void TextNodeDumper::VisitSubstTemplateTypeParmType(
+ const SubstTemplateTypeParmType *T) {
+ dumpDeclRef(T->getAssociatedDecl());
+ VisitTemplateTypeParmDecl(T->getReplacedParameter());
+ if (auto PackIndex = T->getPackIndex())
+ OS << " pack_index " << *PackIndex;
+}
+
+void TextNodeDumper::VisitSubstTemplateTypeParmPackType(
+ const SubstTemplateTypeParmPackType *T) {
+ dumpDeclRef(T->getAssociatedDecl());
+ VisitTemplateTypeParmDecl(T->getReplacedParameter());
+}
+
void TextNodeDumper::VisitAutoType(const AutoType *T) {
if (T->isDecltypeAuto())
OS << " decltype(auto)";
@@ -1561,10 +1788,18 @@ void TextNodeDumper::VisitAutoType(const AutoType *T) {
}
}
+void TextNodeDumper::VisitDeducedTemplateSpecializationType(
+ const DeducedTemplateSpecializationType *T) {
+ if (T->getTemplateName().getKind() == TemplateName::UsingTemplate)
+ OS << " using";
+}
+
void TextNodeDumper::VisitTemplateSpecializationType(
const TemplateSpecializationType *T) {
if (T->isTypeAlias())
OS << " alias";
+ if (T->getTemplateName().getKind() == TemplateName::UsingTemplate)
+ OS << " using";
OS << " ";
T->getTemplateName().dump(OS);
}
@@ -1631,6 +1866,7 @@ void TextNodeDumper::VisitIndirectFieldDecl(const IndirectFieldDecl *D) {
void TextNodeDumper::VisitFunctionDecl(const FunctionDecl *D) {
dumpName(D);
dumpType(D->getType());
+ dumpTemplateSpecializationKind(D->getTemplateSpecializationKind());
StorageClass SC = D->getStorageClass();
if (SC != SC_None)
@@ -1642,7 +1878,7 @@ void TextNodeDumper::VisitFunctionDecl(const FunctionDecl *D) {
if (D->isModulePrivate())
OS << " __module_private__";
- if (D->isPure())
+ if (D->isPureVirtual())
OS << " pure";
if (D->isDefaulted()) {
OS << " default";
@@ -1654,6 +1890,9 @@ void TextNodeDumper::VisitFunctionDecl(const FunctionDecl *D) {
if (D->isTrivial())
OS << " trivial";
+ if (D->isIneligibleOrNotSelected())
+ OS << (isa<CXXDestructorDecl>(D) ? " not_selected" : " ineligible");
+
if (const auto *FPT = D->getType()->getAs<FunctionProtoType>()) {
FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
switch (EPI.ExceptionSpec.Type) {
@@ -1680,8 +1919,7 @@ void TextNodeDumper::VisitFunctionDecl(const FunctionDecl *D) {
auto Overrides = MD->overridden_methods();
OS << "Overrides: [ ";
dumpOverride(*Overrides.begin());
- for (const auto *Override :
- llvm::make_range(Overrides.begin() + 1, Overrides.end())) {
+ for (const auto *Override : llvm::drop_begin(Overrides)) {
OS << ", ";
dumpOverride(Override);
}
@@ -1690,12 +1928,20 @@ void TextNodeDumper::VisitFunctionDecl(const FunctionDecl *D) {
}
}
+ if (!D->isInlineSpecified() && D->isInlined()) {
+ OS << " implicit-inline";
+ }
// Since NumParams comes from the FunctionProtoType of the FunctionDecl and
// the Params are set later, it is possible for a dump during debugging to
// encounter a FunctionDecl that has been created but hasn't been assigned
// ParmVarDecls yet.
if (!D->param_empty() && !D->param_begin())
OS << " <<<NULL params x " << D->getNumParams() << ">>>";
+
+ if (const auto *Instance = D->getInstantiatedFromMemberFunction()) {
+ OS << " instantiated_from";
+ dumpPointer(Instance);
+ }
}
void TextNodeDumper::VisitLifetimeExtendedTemporaryDecl(
@@ -1719,8 +1965,14 @@ void TextNodeDumper::VisitFieldDecl(const FieldDecl *D) {
}
void TextNodeDumper::VisitVarDecl(const VarDecl *D) {
+ dumpNestedNameSpecifier(D->getQualifier());
dumpName(D);
+ if (const auto *P = dyn_cast<ParmVarDecl>(D);
+ P && P->isExplicitObjectParameter())
+ OS << " this";
+
dumpType(D->getType());
+ dumpTemplateSpecializationKind(D->getTemplateSpecializationKind());
StorageClass SC = D->getStorageClass();
if (SC != SC_None)
OS << ' ' << VarDecl::getStorageClassSpecifierString(SC);
@@ -1753,6 +2005,8 @@ void TextNodeDumper::VisitVarDecl(const VarDecl *D) {
case VarDecl::ListInit:
OS << " listinit";
break;
+ case VarDecl::ParenListInit:
+ OS << " parenlistinit";
}
}
if (D->needsDestruction(D->getASTContext()))
@@ -1763,7 +2017,8 @@ void TextNodeDumper::VisitVarDecl(const VarDecl *D) {
if (D->hasInit()) {
const Expr *E = D->getInit();
// Only dump the value of constexpr VarDecls for now.
- if (E && !E->isValueDependent() && D->isConstexpr()) {
+ if (E && !E->isValueDependent() && D->isConstexpr() &&
+ !D->getType()->isDependentType()) {
const APValue *Value = D->evaluateValue();
if (Value)
AddChild("value", [=] { Visit(*Value, E->getType()); });
@@ -1836,13 +2091,13 @@ void TextNodeDumper::VisitOMPDeclareReductionDecl(
OS << " initializer";
dumpPointer(Initializer);
switch (D->getInitializerKind()) {
- case OMPDeclareReductionDecl::DirectInit:
+ case OMPDeclareReductionInitKind::Direct:
OS << " omp_priv = ";
break;
- case OMPDeclareReductionDecl::CopyInit:
+ case OMPDeclareReductionInitKind::Copy:
OS << " omp_priv ()";
break;
- case OMPDeclareReductionDecl::CallInit:
+ case OMPDeclareReductionInitKind::Call:
break;
}
}
@@ -1878,6 +2133,8 @@ void TextNodeDumper::VisitNamespaceDecl(const NamespaceDecl *D) {
dumpName(D);
if (D->isInline())
OS << " inline";
+ if (D->isNested())
+ OS << " nested";
if (!D->isOriginalNamespace())
dumpDeclRef(D->getOriginalNamespace(), "original");
}
@@ -1904,6 +2161,15 @@ void TextNodeDumper::VisitTypeAliasTemplateDecl(
void TextNodeDumper::VisitCXXRecordDecl(const CXXRecordDecl *D) {
VisitRecordDecl(D);
+ if (const auto *Instance = D->getInstantiatedFromMemberClass()) {
+ OS << " instantiated_from";
+ dumpPointer(Instance);
+ }
+ if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(D))
+ dumpTemplateSpecializationKind(CTSD->getSpecializationKind());
+
+ dumpNestedNameSpecifier(D->getQualifier());
+
if (!D->isCompleteDefinition())
return;
@@ -2103,6 +2369,7 @@ void TextNodeDumper::VisitUsingDecl(const UsingDecl *D) {
if (D->getQualifier())
D->getQualifier()->print(OS, D->getASTContext().getPrintingPolicy());
OS << D->getDeclName();
+ dumpNestedNameSpecifier(D->getQualifier());
}
void TextNodeDumper::VisitUsingEnumDecl(const UsingEnumDecl *D) {
@@ -2159,10 +2426,10 @@ void TextNodeDumper::VisitConstructorUsingShadowDecl(
void TextNodeDumper::VisitLinkageSpecDecl(const LinkageSpecDecl *D) {
switch (D->getLanguage()) {
- case LinkageSpecDecl::lang_c:
+ case LinkageSpecLanguageIDs::C:
OS << " C";
break;
- case LinkageSpecDecl::lang_cxx:
+ case LinkageSpecLanguageIDs::CXX:
OS << " C++";
break;
}
@@ -2341,3 +2608,17 @@ void TextNodeDumper::VisitBlockDecl(const BlockDecl *D) {
void TextNodeDumper::VisitConceptDecl(const ConceptDecl *D) {
dumpName(D);
}
+
+void TextNodeDumper::VisitCompoundStmt(const CompoundStmt *S) {
+ VisitStmt(S);
+ if (S->hasStoredFPFeatures())
+ printFPOptions(S->getStoredFPFeatures());
+}
+
+void TextNodeDumper::VisitHLSLBufferDecl(const HLSLBufferDecl *D) {
+ if (D->isCBuffer())
+ OS << " cbuffer";
+ else
+ OS << " tbuffer";
+ dumpName(D);
+}
diff --git a/contrib/llvm-project/clang/lib/AST/Type.cpp b/contrib/llvm-project/clang/lib/AST/Type.cpp
index 4a2fc5219ef0..d4103025591e 100644
--- a/contrib/llvm-project/clang/lib/AST/Type.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Type.cpp
@@ -18,6 +18,7 @@
#include "clang/AST/Decl.h"
#include "clang/AST/DeclBase.h"
#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclFriend.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DependenceFlags.h"
@@ -42,15 +43,16 @@
#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/FoldingSet.h"
-#include "llvm/ADT/None.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
+#include "llvm/TargetParser/RISCVTargetParser.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <cstring>
+#include <optional>
#include <type_traits>
using namespace clang;
@@ -110,6 +112,25 @@ bool QualType::isConstant(QualType T, const ASTContext &Ctx) {
return T.getAddressSpace() == LangAS::opencl_constant;
}
+std::optional<QualType::NonConstantStorageReason>
+QualType::isNonConstantStorage(const ASTContext &Ctx, bool ExcludeCtor,
+ bool ExcludeDtor) {
+ if (!isConstant(Ctx) && !(*this)->isReferenceType())
+ return NonConstantStorageReason::NonConstNonReferenceType;
+ if (!Ctx.getLangOpts().CPlusPlus)
+ return std::nullopt;
+ if (const CXXRecordDecl *Record =
+ Ctx.getBaseElementType(*this)->getAsCXXRecordDecl()) {
+ if (!ExcludeCtor)
+ return NonConstantStorageReason::NonTrivialCtor;
+ if (Record->hasMutableFields())
+ return NonConstantStorageReason::MutableField;
+ if (!Record->hasTrivialDestructor() && !ExcludeDtor)
+ return NonConstantStorageReason::NonTrivialDtor;
+ }
+ return std::nullopt;
+}
+
// C++ [temp.dep.type]p1:
// A type is dependent if it is...
// - an array type constructed from any dependent type or whose
@@ -135,7 +156,7 @@ ArrayType::ArrayType(TypeClass tc, QualType et, QualType can,
: TypeDependence::None)),
ElementType(et) {
ArrayTypeBits.IndexTypeQuals = tq;
- ArrayTypeBits.SizeModifier = sm;
+ ArrayTypeBits.SizeModifier = llvm::to_underlying(sm);
}
unsigned ConstantArrayType::getNumAddressingBits(const ASTContext &Context,
@@ -158,7 +179,7 @@ unsigned ConstantArrayType::getNumAddressingBits(const ASTContext &Context,
if ((ElementSize >> 32) == 0 && NumElements.getBitWidth() <= 64 &&
(NumElements.getZExtValue() >> 32) == 0) {
uint64_t TotalSize = NumElements.getZExtValue() * ElementSize;
- return 64 - llvm::countLeadingZeros(TotalSize);
+ return llvm::bit_width(TotalSize);
}
// Otherwise, use APSInt to handle arbitrary sized values.
@@ -173,6 +194,11 @@ unsigned ConstantArrayType::getNumAddressingBits(const ASTContext &Context,
return TotalSize.getActiveBits();
}
+unsigned
+ConstantArrayType::getNumAddressingBits(const ASTContext &Context) const {
+ return getNumAddressingBits(Context, getElementType(), getSize());
+}
+
unsigned ConstantArrayType::getMaxSizeBits(const ASTContext &Context) {
unsigned Bits = Context.getTypeSize(Context.getSizeType());
@@ -192,20 +218,19 @@ void ConstantArrayType::Profile(llvm::FoldingSetNodeID &ID,
unsigned TypeQuals) {
ID.AddPointer(ET.getAsOpaquePtr());
ID.AddInteger(ArraySize.getZExtValue());
- ID.AddInteger(SizeMod);
+ ID.AddInteger(llvm::to_underlying(SizeMod));
ID.AddInteger(TypeQuals);
- ID.AddBoolean(SizeExpr != 0);
+ ID.AddBoolean(SizeExpr != nullptr);
if (SizeExpr)
SizeExpr->Profile(ID, Context, true);
}
-DependentSizedArrayType::DependentSizedArrayType(const ASTContext &Context,
- QualType et, QualType can,
+DependentSizedArrayType::DependentSizedArrayType(QualType et, QualType can,
Expr *e, ArraySizeModifier sm,
unsigned tq,
SourceRange brackets)
- : ArrayType(DependentSizedArray, et, can, sm, tq, e),
- Context(Context), SizeExpr((Stmt*) e), Brackets(brackets) {}
+ : ArrayType(DependentSizedArray, et, can, sm, tq, e), SizeExpr((Stmt *)e),
+ Brackets(brackets) {}
void DependentSizedArrayType::Profile(llvm::FoldingSetNodeID &ID,
const ASTContext &Context,
@@ -214,44 +239,42 @@ void DependentSizedArrayType::Profile(llvm::FoldingSetNodeID &ID,
unsigned TypeQuals,
Expr *E) {
ID.AddPointer(ET.getAsOpaquePtr());
- ID.AddInteger(SizeMod);
+ ID.AddInteger(llvm::to_underlying(SizeMod));
ID.AddInteger(TypeQuals);
E->Profile(ID, Context, true);
}
-DependentVectorType::DependentVectorType(const ASTContext &Context,
- QualType ElementType,
+DependentVectorType::DependentVectorType(QualType ElementType,
QualType CanonType, Expr *SizeExpr,
- SourceLocation Loc,
- VectorType::VectorKind VecKind)
+ SourceLocation Loc, VectorKind VecKind)
: Type(DependentVector, CanonType,
TypeDependence::DependentInstantiation |
ElementType->getDependence() |
(SizeExpr ? toTypeDependence(SizeExpr->getDependence())
: TypeDependence::None)),
- Context(Context), ElementType(ElementType), SizeExpr(SizeExpr), Loc(Loc) {
- VectorTypeBits.VecKind = VecKind;
+ ElementType(ElementType), SizeExpr(SizeExpr), Loc(Loc) {
+ VectorTypeBits.VecKind = llvm::to_underlying(VecKind);
}
void DependentVectorType::Profile(llvm::FoldingSetNodeID &ID,
const ASTContext &Context,
QualType ElementType, const Expr *SizeExpr,
- VectorType::VectorKind VecKind) {
+ VectorKind VecKind) {
ID.AddPointer(ElementType.getAsOpaquePtr());
- ID.AddInteger(VecKind);
+ ID.AddInteger(llvm::to_underlying(VecKind));
SizeExpr->Profile(ID, Context, true);
}
-DependentSizedExtVectorType::DependentSizedExtVectorType(
- const ASTContext &Context, QualType ElementType, QualType can,
- Expr *SizeExpr, SourceLocation loc)
+DependentSizedExtVectorType::DependentSizedExtVectorType(QualType ElementType,
+ QualType can,
+ Expr *SizeExpr,
+ SourceLocation loc)
: Type(DependentSizedExtVector, can,
TypeDependence::DependentInstantiation |
ElementType->getDependence() |
(SizeExpr ? toTypeDependence(SizeExpr->getDependence())
: TypeDependence::None)),
- Context(Context), SizeExpr(SizeExpr), ElementType(ElementType), loc(loc) {
-}
+ SizeExpr(SizeExpr), ElementType(ElementType), loc(loc) {}
void
DependentSizedExtVectorType::Profile(llvm::FoldingSetNodeID &ID,
@@ -261,8 +284,7 @@ DependentSizedExtVectorType::Profile(llvm::FoldingSetNodeID &ID,
SizeExpr->Profile(ID, Context, true);
}
-DependentAddressSpaceType::DependentAddressSpaceType(const ASTContext &Context,
- QualType PointeeType,
+DependentAddressSpaceType::DependentAddressSpaceType(QualType PointeeType,
QualType can,
Expr *AddrSpaceExpr,
SourceLocation loc)
@@ -271,8 +293,7 @@ DependentAddressSpaceType::DependentAddressSpaceType(const ASTContext &Context,
PointeeType->getDependence() |
(AddrSpaceExpr ? toTypeDependence(AddrSpaceExpr->getDependence())
: TypeDependence::None)),
- Context(Context), AddrSpaceExpr(AddrSpaceExpr), PointeeType(PointeeType),
- loc(loc) {}
+ AddrSpaceExpr(AddrSpaceExpr), PointeeType(PointeeType), loc(loc) {}
void DependentAddressSpaceType::Profile(llvm::FoldingSetNodeID &ID,
const ASTContext &Context,
@@ -311,12 +332,14 @@ ConstantMatrixType::ConstantMatrixType(TypeClass tc, QualType matrixType,
: MatrixType(tc, matrixType, canonType), NumRows(nRows),
NumColumns(nColumns) {}
-DependentSizedMatrixType::DependentSizedMatrixType(
- const ASTContext &CTX, QualType ElementType, QualType CanonicalType,
- Expr *RowExpr, Expr *ColumnExpr, SourceLocation loc)
+DependentSizedMatrixType::DependentSizedMatrixType(QualType ElementType,
+ QualType CanonicalType,
+ Expr *RowExpr,
+ Expr *ColumnExpr,
+ SourceLocation loc)
: MatrixType(DependentSizedMatrix, ElementType, CanonicalType, RowExpr,
ColumnExpr),
- Context(CTX), RowExpr(RowExpr), ColumnExpr(ColumnExpr), loc(loc) {}
+ RowExpr(RowExpr), ColumnExpr(ColumnExpr), loc(loc) {}
void DependentSizedMatrixType::Profile(llvm::FoldingSetNodeID &ID,
const ASTContext &CTX,
@@ -334,29 +357,28 @@ VectorType::VectorType(QualType vecType, unsigned nElements, QualType canonType,
VectorType::VectorType(TypeClass tc, QualType vecType, unsigned nElements,
QualType canonType, VectorKind vecKind)
: Type(tc, canonType, vecType->getDependence()), ElementType(vecType) {
- VectorTypeBits.VecKind = vecKind;
+ VectorTypeBits.VecKind = llvm::to_underlying(vecKind);
VectorTypeBits.NumElements = nElements;
}
-ExtIntType::ExtIntType(bool IsUnsigned, unsigned NumBits)
- : Type(ExtInt, QualType{}, TypeDependence::None), IsUnsigned(IsUnsigned),
+BitIntType::BitIntType(bool IsUnsigned, unsigned NumBits)
+ : Type(BitInt, QualType{}, TypeDependence::None), IsUnsigned(IsUnsigned),
NumBits(NumBits) {}
-DependentExtIntType::DependentExtIntType(const ASTContext &Context,
- bool IsUnsigned, Expr *NumBitsExpr)
- : Type(DependentExtInt, QualType{},
+DependentBitIntType::DependentBitIntType(bool IsUnsigned, Expr *NumBitsExpr)
+ : Type(DependentBitInt, QualType{},
toTypeDependence(NumBitsExpr->getDependence())),
- Context(Context), ExprAndUnsigned(NumBitsExpr, IsUnsigned) {}
+ ExprAndUnsigned(NumBitsExpr, IsUnsigned) {}
-bool DependentExtIntType::isUnsigned() const {
+bool DependentBitIntType::isUnsigned() const {
return ExprAndUnsigned.getInt();
}
-clang::Expr *DependentExtIntType::getNumBitsExpr() const {
+clang::Expr *DependentBitIntType::getNumBitsExpr() const {
return ExprAndUnsigned.getPointer();
}
-void DependentExtIntType::Profile(llvm::FoldingSetNodeID &ID,
+void DependentBitIntType::Profile(llvm::FoldingSetNodeID &ID,
const ASTContext &Context, bool IsUnsigned,
Expr *NumBitsExpr) {
ID.AddBoolean(IsUnsigned);
@@ -525,6 +547,10 @@ template <> const TypedefType *Type::getAs() const {
return getAsSugar<TypedefType>(this);
}
+template <> const UsingType *Type::getAs() const {
+ return getAsSugar<UsingType>(this);
+}
+
template <> const TemplateSpecializationType *Type::getAs() const {
return getAsSugar<TemplateSpecializationType>(this);
}
@@ -722,8 +748,7 @@ bool Type::isObjCClassOrClassKindOfType() const {
ObjCTypeParamType::ObjCTypeParamType(const ObjCTypeParamDecl *D, QualType can,
ArrayRef<ObjCProtocolDecl *> protocols)
- : Type(ObjCTypeParam, can,
- can->getDependence() & ~TypeDependence::UnexpandedPack),
+ : Type(ObjCTypeParam, can, toSemanticDependence(can->getDependence())),
OTPDecl(const_cast<ObjCTypeParamDecl *>(D)) {
initialize(protocols);
}
@@ -821,6 +846,13 @@ QualType ObjCObjectType::stripObjCKindOfTypeAndQuals(
/*isKindOf=*/false);
}
+ObjCInterfaceDecl *ObjCInterfaceType::getDecl() const {
+ ObjCInterfaceDecl *Canon = Decl->getCanonicalDecl();
+ if (ObjCInterfaceDecl *Def = Canon->getDefinition())
+ return Def;
+ return Canon;
+}
+
const ObjCObjectPointerType *ObjCObjectPointerType::stripObjCKindOfTypeAndQuals(
const ASTContext &ctx) const {
if (!isKindOfType() && qual_empty())
@@ -1067,7 +1099,7 @@ public:
if (exceptionChanged) {
info.ExceptionSpec.Exceptions =
- llvm::makeArrayRef(exceptionTypes).copy(Ctx);
+ llvm::ArrayRef(exceptionTypes).copy(Ctx);
}
}
@@ -1160,8 +1192,9 @@ public:
== T->getReplacementType().getAsOpaquePtr())
return QualType(T, 0);
- return Ctx.getSubstTemplateTypeParmType(T->getReplacedParameter(),
- replacementType);
+ return Ctx.getSubstTemplateTypeParmType(replacementType,
+ T->getAssociatedDecl(),
+ T->getIndex(), T->getPackIndex());
}
// FIXME: Non-trivial to implement, but important for C++
@@ -1208,10 +1241,10 @@ public:
!typeArgChanged)
return QualType(T, 0);
- return Ctx.getObjCObjectType(baseType, typeArgs,
- llvm::makeArrayRef(T->qual_begin(),
- T->getNumProtocols()),
- T->isKindOfTypeAsWritten());
+ return Ctx.getObjCObjectType(
+ baseType, typeArgs,
+ llvm::ArrayRef(T->qual_begin(), T->getNumProtocols()),
+ T->isKindOfTypeAsWritten());
}
TRIVIAL_TYPE_CLASS(ObjCInterface)
@@ -1363,7 +1396,7 @@ struct SubstObjCTypeArgsVisitor
if (exceptionChanged) {
info.ExceptionSpec.Exceptions =
- llvm::makeArrayRef(exceptionTypes).copy(Ctx);
+ llvm::ArrayRef(exceptionTypes).copy(Ctx);
}
}
@@ -1473,6 +1506,39 @@ struct StripObjCKindOfTypeVisitor
} // namespace
+bool QualType::UseExcessPrecision(const ASTContext &Ctx) {
+ const BuiltinType *BT = getTypePtr()->getAs<BuiltinType>();
+ if (!BT) {
+ const VectorType *VT = getTypePtr()->getAs<VectorType>();
+ if (VT) {
+ QualType ElementType = VT->getElementType();
+ return ElementType.UseExcessPrecision(Ctx);
+ }
+ } else {
+ switch (BT->getKind()) {
+ case BuiltinType::Kind::Float16: {
+ const TargetInfo &TI = Ctx.getTargetInfo();
+ if (TI.hasFloat16Type() && !TI.hasLegalHalfType() &&
+ Ctx.getLangOpts().getFloat16ExcessPrecision() !=
+ Ctx.getLangOpts().ExcessPrecisionKind::FPP_None)
+ return true;
+ break;
+ }
+ case BuiltinType::Kind::BFloat16: {
+ const TargetInfo &TI = Ctx.getTargetInfo();
+ if (TI.hasBFloat16Type() && !TI.hasFullBFloat16Type() &&
+ Ctx.getLangOpts().getBFloat16ExcessPrecision() !=
+ Ctx.getLangOpts().ExcessPrecisionKind::FPP_None)
+ return true;
+ break;
+ }
+ default:
+ return false;
+ }
+ }
+ return false;
+}
+
/// Substitute the given type arguments for Objective-C type
/// parameters within the given type, recursively.
QualType QualType::substObjCTypeArgs(ASTContext &ctx,
@@ -1504,8 +1570,8 @@ QualType QualType::getAtomicUnqualifiedType() const {
return getUnqualifiedType();
}
-Optional<ArrayRef<QualType>> Type::getObjCSubstitutions(
- const DeclContext *dc) const {
+std::optional<ArrayRef<QualType>>
+Type::getObjCSubstitutions(const DeclContext *dc) const {
// Look through method scopes.
if (const auto method = dyn_cast<ObjCMethodDecl>(dc))
dc = method->getDeclContext();
@@ -1520,23 +1586,23 @@ Optional<ArrayRef<QualType>> Type::getObjCSubstitutions(
// substitution to do.
dcTypeParams = dcClassDecl->getTypeParamList();
if (!dcTypeParams)
- return None;
+ return std::nullopt;
} else {
// If we are in neither a class nor a category, there's no
// substitution to perform.
dcCategoryDecl = dyn_cast<ObjCCategoryDecl>(dc);
if (!dcCategoryDecl)
- return None;
+ return std::nullopt;
// If the category does not have any type parameters, there's no
// substitution to do.
dcTypeParams = dcCategoryDecl->getTypeParamList();
if (!dcTypeParams)
- return None;
+ return std::nullopt;
dcClassDecl = dcCategoryDecl->getClassInterface();
if (!dcClassDecl)
- return None;
+ return std::nullopt;
}
assert(dcTypeParams && "No substitutions to perform");
assert(dcClassDecl && "No class context");
@@ -1885,15 +1951,26 @@ DeducedType *Type::getContainedDeducedType() const {
}
bool Type::hasAutoForTrailingReturnType() const {
- return dyn_cast_or_null<FunctionType>(
+ return isa_and_nonnull<FunctionType>(
GetContainedDeducedTypeVisitor(true).Visit(this));
}
bool Type::hasIntegerRepresentation() const {
if (const auto *VT = dyn_cast<VectorType>(CanonicalType))
return VT->getElementType()->isIntegerType();
- else
- return isIntegerType();
+ if (CanonicalType->isSveVLSBuiltinType()) {
+ const auto *VT = cast<BuiltinType>(CanonicalType);
+ return VT->getKind() == BuiltinType::SveBool ||
+ (VT->getKind() >= BuiltinType::SveInt8 &&
+ VT->getKind() <= BuiltinType::SveUint64);
+ }
+ if (CanonicalType->isRVVVLSBuiltinType()) {
+ const auto *VT = cast<BuiltinType>(CanonicalType);
+ return (VT->getKind() >= BuiltinType::RvvInt8mf8 &&
+ VT->getKind() <= BuiltinType::RvvUint64m8);
+ }
+
+ return isIntegerType();
}
/// Determine whether this type is an integral type.
@@ -1925,7 +2002,7 @@ bool Type::isIntegralType(const ASTContext &Ctx) const {
if (const auto *ET = dyn_cast<EnumType>(CanonicalType))
return ET->getDecl()->isComplete();
- return isExtIntType();
+ return isBitIntType();
}
bool Type::isIntegralOrUnscopedEnumerationType() const {
@@ -1933,7 +2010,7 @@ bool Type::isIntegralOrUnscopedEnumerationType() const {
return BT->getKind() >= BuiltinType::Bool &&
BT->getKind() <= BuiltinType::Int128;
- if (isExtIntType())
+ if (isBitIntType())
return true;
return isUnscopedEnumerationType();
@@ -2016,7 +2093,9 @@ bool Type::isSignedIntegerType() const {
return ET->getDecl()->getIntegerType()->isSignedIntegerType();
}
- if (const ExtIntType *IT = dyn_cast<ExtIntType>(CanonicalType))
+ if (const auto *IT = dyn_cast<BitIntType>(CanonicalType))
+ return IT->isSigned();
+ if (const auto *IT = dyn_cast<DependentBitIntType>(CanonicalType))
return IT->isSigned();
return false;
@@ -2033,9 +2112,10 @@ bool Type::isSignedIntegerOrEnumerationType() const {
return ET->getDecl()->getIntegerType()->isSignedIntegerType();
}
- if (const ExtIntType *IT = dyn_cast<ExtIntType>(CanonicalType))
+ if (const auto *IT = dyn_cast<BitIntType>(CanonicalType))
+ return IT->isSigned();
+ if (const auto *IT = dyn_cast<DependentBitIntType>(CanonicalType))
return IT->isSigned();
-
return false;
}
@@ -2063,7 +2143,9 @@ bool Type::isUnsignedIntegerType() const {
return ET->getDecl()->getIntegerType()->isUnsignedIntegerType();
}
- if (const ExtIntType *IT = dyn_cast<ExtIntType>(CanonicalType))
+ if (const auto *IT = dyn_cast<BitIntType>(CanonicalType))
+ return IT->isUnsigned();
+ if (const auto *IT = dyn_cast<DependentBitIntType>(CanonicalType))
return IT->isUnsigned();
return false;
@@ -2080,7 +2162,9 @@ bool Type::isUnsignedIntegerOrEnumerationType() const {
return ET->getDecl()->getIntegerType()->isUnsignedIntegerType();
}
- if (const ExtIntType *IT = dyn_cast<ExtIntType>(CanonicalType))
+ if (const auto *IT = dyn_cast<BitIntType>(CanonicalType))
+ return IT->isUnsigned();
+ if (const auto *IT = dyn_cast<DependentBitIntType>(CanonicalType))
return IT->isUnsigned();
return false;
@@ -2091,13 +2175,18 @@ bool Type::hasUnsignedIntegerRepresentation() const {
return VT->getElementType()->isUnsignedIntegerOrEnumerationType();
if (const auto *VT = dyn_cast<MatrixType>(CanonicalType))
return VT->getElementType()->isUnsignedIntegerOrEnumerationType();
+ if (CanonicalType->isSveVLSBuiltinType()) {
+ const auto *VT = cast<BuiltinType>(CanonicalType);
+ return VT->getKind() >= BuiltinType::SveUint8 &&
+ VT->getKind() <= BuiltinType::SveUint64;
+ }
return isUnsignedIntegerOrEnumerationType();
}
bool Type::isFloatingType() const {
if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType))
return BT->getKind() >= BuiltinType::Half &&
- BT->getKind() <= BuiltinType::Float128;
+ BT->getKind() <= BuiltinType::Ibm128;
if (const auto *CT = dyn_cast<ComplexType>(CanonicalType))
return CT->getElementType()->isFloatingType();
return false;
@@ -2106,8 +2195,9 @@ bool Type::isFloatingType() const {
bool Type::hasFloatingRepresentation() const {
if (const auto *VT = dyn_cast<VectorType>(CanonicalType))
return VT->getElementType()->isFloatingType();
- else
- return isFloatingType();
+ if (const auto *MT = dyn_cast<MatrixType>(CanonicalType))
+ return MT->getElementType()->isFloatingType();
+ return isFloatingType();
}
bool Type::isRealFloatingType() const {
@@ -2119,17 +2209,16 @@ bool Type::isRealFloatingType() const {
bool Type::isRealType() const {
if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType))
return BT->getKind() >= BuiltinType::Bool &&
- BT->getKind() <= BuiltinType::Float128;
+ BT->getKind() <= BuiltinType::Ibm128;
if (const auto *ET = dyn_cast<EnumType>(CanonicalType))
return ET->getDecl()->isComplete() && !ET->getDecl()->isScoped();
- return isExtIntType();
+ return isBitIntType();
}
bool Type::isArithmeticType() const {
if (const auto *BT = dyn_cast<BuiltinType>(CanonicalType))
return BT->getKind() >= BuiltinType::Bool &&
- BT->getKind() <= BuiltinType::Float128 &&
- BT->getKind() != BuiltinType::BFloat16;
+ BT->getKind() <= BuiltinType::Ibm128;
if (const auto *ET = dyn_cast<EnumType>(CanonicalType))
// GCC allows forward declaration of enum types (forbid by C99 6.7.2.3p2).
// If a body isn't seen by the time we get here, return false.
@@ -2138,7 +2227,7 @@ bool Type::isArithmeticType() const {
// false for scoped enumerations since that will disable any
// unwanted implicit conversions.
return !ET->getDecl()->isScoped() && ET->getDecl()->isComplete();
- return isa<ComplexType>(CanonicalType) || isExtIntType();
+ return isa<ComplexType>(CanonicalType) || isBitIntType();
}
Type::ScalarTypeKind Type::getScalarTypeKind() const {
@@ -2167,7 +2256,7 @@ Type::ScalarTypeKind Type::getScalarTypeKind() const {
if (CT->getElementType()->isRealFloatingType())
return STK_FloatingComplex;
return STK_IntegralComplex;
- } else if (isExtIntType()) {
+ } else if (isBitIntType()) {
return STK_Integral;
}
@@ -2279,11 +2368,61 @@ bool Type::isIncompleteType(NamedDecl **Def) const {
}
bool Type::isSizelessBuiltinType() const {
+ if (isSizelessVectorType())
+ return true;
+
+ if (const BuiltinType *BT = getAs<BuiltinType>()) {
+ switch (BT->getKind()) {
+ // WebAssembly reference types
+#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
+ return true;
+ default:
+ return false;
+ }
+ }
+ return false;
+}
+
+bool Type::isWebAssemblyExternrefType() const {
+ if (const auto *BT = getAs<BuiltinType>())
+ return BT->getKind() == BuiltinType::WasmExternRef;
+ return false;
+}
+
+bool Type::isWebAssemblyTableType() const {
+ if (const auto *ATy = dyn_cast<ArrayType>(this))
+ return ATy->getElementType().isWebAssemblyReferenceType();
+
+ if (const auto *PTy = dyn_cast<PointerType>(this))
+ return PTy->getPointeeType().isWebAssemblyReferenceType();
+
+ return false;
+}
+
+bool Type::isSizelessType() const { return isSizelessBuiltinType(); }
+
+bool Type::isSizelessVectorType() const {
+ return isSVESizelessBuiltinType() || isRVVSizelessBuiltinType();
+}
+
+bool Type::isSVESizelessBuiltinType() const {
if (const BuiltinType *BT = getAs<BuiltinType>()) {
switch (BT->getKind()) {
// SVE Types
#define SVE_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
#include "clang/Basic/AArch64SVEACLETypes.def"
+ return true;
+ default:
+ return false;
+ }
+ }
+ return false;
+}
+
+bool Type::isRVVSizelessBuiltinType() const {
+ if (const BuiltinType *BT = getAs<BuiltinType>()) {
+ switch (BT->getKind()) {
#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
#include "clang/Basic/RISCVVTypes.def"
return true;
@@ -2294,9 +2433,7 @@ bool Type::isSizelessBuiltinType() const {
return false;
}
-bool Type::isSizelessType() const { return isSizelessBuiltinType(); }
-
-bool Type::isVLSTBuiltinType() const {
+bool Type::isSveVLSBuiltinType() const {
if (const BuiltinType *BT = getAs<BuiltinType>()) {
switch (BT->getKind()) {
case BuiltinType::SveInt8:
@@ -2312,6 +2449,8 @@ bool Type::isVLSTBuiltinType() const {
case BuiltinType::SveFloat64:
case BuiltinType::SveBFloat16:
case BuiltinType::SveBool:
+ case BuiltinType::SveBoolx2:
+ case BuiltinType::SveBoolx4:
return true;
default:
return false;
@@ -2321,9 +2460,9 @@ bool Type::isVLSTBuiltinType() const {
}
QualType Type::getSveEltType(const ASTContext &Ctx) const {
- assert(isVLSTBuiltinType() && "unsupported type!");
+ assert(isSveVLSBuiltinType() && "unsupported type!");
- const BuiltinType *BTy = getAs<BuiltinType>();
+ const BuiltinType *BTy = castAs<BuiltinType>();
if (BTy->getKind() == BuiltinType::SveBool)
// Represent predicates as i8 rather than i1 to avoid any layout issues.
// The type is bitcasted to a scalable predicate type when casting between
@@ -2333,6 +2472,41 @@ QualType Type::getSveEltType(const ASTContext &Ctx) const {
return Ctx.getBuiltinVectorTypeInfo(BTy).ElementType;
}
+bool Type::isRVVVLSBuiltinType() const {
+ if (const BuiltinType *BT = getAs<BuiltinType>()) {
+ switch (BT->getKind()) {
+#define RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, IsSigned, \
+ IsFP, IsBF) \
+ case BuiltinType::Id: \
+ return NF == 1;
+#define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \
+ case BuiltinType::Id: \
+ return true;
+#include "clang/Basic/RISCVVTypes.def"
+ default:
+ return false;
+ }
+ }
+ return false;
+}
+
+QualType Type::getRVVEltType(const ASTContext &Ctx) const {
+ assert(isRVVVLSBuiltinType() && "unsupported type!");
+
+ const BuiltinType *BTy = castAs<BuiltinType>();
+
+ switch (BTy->getKind()) {
+#define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \
+ case BuiltinType::Id: \
+ return Ctx.UnsignedCharTy;
+ default:
+ return Ctx.getBuiltinVectorTypeInfo(BTy).ElementType;
+#include "clang/Basic/RISCVVTypes.def"
+ }
+
+ llvm_unreachable("Unhandled type");
+}
+
bool QualType::isPODType(const ASTContext &Context) const {
// C++11 has a more relaxed definition of POD.
if (Context.getLangOpts().CPlusPlus11)
@@ -2374,7 +2548,7 @@ bool QualType::isCXX98PODType(const ASTContext &Context) const {
case Type::MemberPointer:
case Type::Vector:
case Type::ExtVector:
- case Type::ExtInt:
+ case Type::BitInt:
return true;
case Type::Enum:
@@ -2425,11 +2599,13 @@ bool QualType::isTrivialType(const ASTContext &Context) const {
return true;
if (const auto *RT = CanonicalType->getAs<RecordType>()) {
if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
- // C++11 [class]p6:
- // A trivial class is a class that has a default constructor,
- // has no non-trivial default constructors, and is trivially
- // copyable.
- return ClassDecl->hasDefaultConstructor() &&
+ // C++20 [class]p6:
+ // A trivial class is a class that is trivially copyable, and
+ // has one or more eligible default constructors such that each is
+ // trivial.
+ // FIXME: We should merge this definition of triviality into
+ // CXXRecordDecl::isTrivial. Currently it computes the wrong thing.
+ return ClassDecl->hasTrivialDefaultConstructor() &&
!ClassDecl->hasNonTrivialDefaultConstructor() &&
ClassDecl->isTriviallyCopyable();
}
@@ -2441,19 +2617,22 @@ bool QualType::isTrivialType(const ASTContext &Context) const {
return false;
}
-bool QualType::isTriviallyCopyableType(const ASTContext &Context) const {
- if ((*this)->isArrayType())
- return Context.getBaseElementType(*this).isTriviallyCopyableType(Context);
+static bool isTriviallyCopyableTypeImpl(const QualType &type,
+ const ASTContext &Context,
+ bool IsCopyConstructible) {
+ if (type->isArrayType())
+ return isTriviallyCopyableTypeImpl(Context.getBaseElementType(type),
+ Context, IsCopyConstructible);
- if (hasNonTrivialObjCLifetime())
+ if (type.hasNonTrivialObjCLifetime())
return false;
// C++11 [basic.types]p9 - See Core 2094
// Scalar types, trivially copyable class types, arrays of such types, and
// cv-qualified versions of these types are collectively
- // called trivially copyable types.
+ // called trivially copy constructible types.
- QualType CanonicalType = getCanonicalType();
+ QualType CanonicalType = type.getCanonicalType();
if (CanonicalType->isDependentType())
return false;
@@ -2471,16 +2650,110 @@ bool QualType::isTriviallyCopyableType(const ASTContext &Context) const {
if (const auto *RT = CanonicalType->getAs<RecordType>()) {
if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
- if (!ClassDecl->isTriviallyCopyable()) return false;
+ if (IsCopyConstructible) {
+ return ClassDecl->isTriviallyCopyConstructible();
+ } else {
+ return ClassDecl->isTriviallyCopyable();
+ }
}
-
return true;
}
-
// No other types can match.
return false;
}
+bool QualType::isTriviallyCopyableType(const ASTContext &Context) const {
+ return isTriviallyCopyableTypeImpl(*this, Context,
+ /*IsCopyConstructible=*/false);
+}
+
+bool QualType::isTriviallyCopyConstructibleType(
+ const ASTContext &Context) const {
+ return isTriviallyCopyableTypeImpl(*this, Context,
+ /*IsCopyConstructible=*/true);
+}
+
+bool QualType::isTriviallyRelocatableType(const ASTContext &Context) const {
+ QualType BaseElementType = Context.getBaseElementType(*this);
+
+ if (BaseElementType->isIncompleteType()) {
+ return false;
+ } else if (!BaseElementType->isObjectType()) {
+ return false;
+ } else if (const auto *RD = BaseElementType->getAsRecordDecl()) {
+ return RD->canPassInRegisters();
+ } else {
+ switch (isNonTrivialToPrimitiveDestructiveMove()) {
+ case PCK_Trivial:
+ return !isDestructedType();
+ case PCK_ARCStrong:
+ return true;
+ default:
+ return false;
+ }
+ }
+}
+
+static bool
+HasNonDeletedDefaultedEqualityComparison(const CXXRecordDecl *Decl) {
+ if (Decl->isUnion())
+ return false;
+ if (Decl->isLambda())
+ return Decl->isCapturelessLambda();
+
+ auto IsDefaultedOperatorEqualEqual = [&](const FunctionDecl *Function) {
+ return Function->getOverloadedOperator() ==
+ OverloadedOperatorKind::OO_EqualEqual &&
+ Function->isDefaulted() && Function->getNumParams() > 0 &&
+ (Function->getParamDecl(0)->getType()->isReferenceType() ||
+ Decl->isTriviallyCopyable());
+ };
+
+ if (llvm::none_of(Decl->methods(), IsDefaultedOperatorEqualEqual) &&
+ llvm::none_of(Decl->friends(), [&](const FriendDecl *Friend) {
+ if (NamedDecl *ND = Friend->getFriendDecl()) {
+ return ND->isFunctionOrFunctionTemplate() &&
+ IsDefaultedOperatorEqualEqual(ND->getAsFunction());
+ }
+ return false;
+ }))
+ return false;
+
+ return llvm::all_of(Decl->bases(),
+ [](const CXXBaseSpecifier &BS) {
+ if (const auto *RD = BS.getType()->getAsCXXRecordDecl())
+ return HasNonDeletedDefaultedEqualityComparison(RD);
+ return true;
+ }) &&
+ llvm::all_of(Decl->fields(), [](const FieldDecl *FD) {
+ auto Type = FD->getType();
+ if (Type->isArrayType())
+ Type = Type->getBaseElementTypeUnsafe()->getCanonicalTypeUnqualified();
+
+ if (Type->isReferenceType() || Type->isEnumeralType())
+ return false;
+ if (const auto *RD = Type->getAsCXXRecordDecl())
+ return HasNonDeletedDefaultedEqualityComparison(RD);
+ return true;
+ });
+}
+
+bool QualType::isTriviallyEqualityComparableType(
+ const ASTContext &Context) const {
+ QualType CanonicalType = getCanonicalType();
+ if (CanonicalType->isIncompleteType() || CanonicalType->isDependentType() ||
+ CanonicalType->isEnumeralType() || CanonicalType->isArrayType())
+ return false;
+
+ if (const auto *RD = CanonicalType->getAsCXXRecordDecl()) {
+ if (!HasNonDeletedDefaultedEqualityComparison(RD))
+ return false;
+ }
+
+ return Context.hasUniqueObjectRepresentations(
+ CanonicalType, /*CheckIfTriviallyCopyable=*/false);
+}
+
bool QualType::isNonWeakInMRRWithObjCWeak(const ASTContext &Context) const {
return !Context.getLangOpts().ObjCAutoRefCount &&
Context.getLangOpts().ObjCWeak &&
@@ -2499,6 +2772,19 @@ bool QualType::hasNonTrivialToPrimitiveCopyCUnion(const RecordDecl *RD) {
return RD->hasNonTrivialToPrimitiveCopyCUnion();
}
+bool QualType::isWebAssemblyReferenceType() const {
+ return isWebAssemblyExternrefType() || isWebAssemblyFuncrefType();
+}
+
+bool QualType::isWebAssemblyExternrefType() const {
+ return getTypePtr()->isWebAssemblyExternrefType();
+}
+
+bool QualType::isWebAssemblyFuncrefType() const {
+ return getTypePtr()->isFunctionPointerType() &&
+ getAddressSpace() == LangAS::wasm_funcref;
+}
+
QualType::PrimitiveDefaultInitializeKind
QualType::isNonTrivialToPrimitiveDefaultInitialize() const {
if (const auto *RT =
@@ -2734,39 +3020,6 @@ bool Type::isStdByteType() const {
return false;
}
-bool Type::isPromotableIntegerType() const {
- if (const auto *BT = getAs<BuiltinType>())
- switch (BT->getKind()) {
- case BuiltinType::Bool:
- case BuiltinType::Char_S:
- case BuiltinType::Char_U:
- case BuiltinType::SChar:
- case BuiltinType::UChar:
- case BuiltinType::Short:
- case BuiltinType::UShort:
- case BuiltinType::WChar_S:
- case BuiltinType::WChar_U:
- case BuiltinType::Char8:
- case BuiltinType::Char16:
- case BuiltinType::Char32:
- return true;
- default:
- return false;
- }
-
- // Enumerated types are promotable to their compatible integer types
- // (C99 6.3.1.1) a.k.a. its underlying type (C++ [conv.prom]p2).
- if (const auto *ET = getAs<EnumType>()){
- if (this->isDependentType() || ET->getDecl()->getPromotionType().isNull()
- || ET->getDecl()->isScoped())
- return false;
-
- return true;
- }
-
- return false;
-}
-
bool Type::isSpecifierType() const {
// Note that this intentionally does not use the canonical type.
switch (getTypeClass()) {
@@ -2785,7 +3038,6 @@ bool Type::isSpecifierType() const {
case DependentTemplateSpecialization:
case ObjCInterface:
case ObjCObject:
- case ObjCObjectPointer: // FIXME: object pointers aren't really specifiers
return true;
default:
return false;
@@ -2795,24 +3047,36 @@ bool Type::isSpecifierType() const {
ElaboratedTypeKeyword
TypeWithKeyword::getKeywordForTypeSpec(unsigned TypeSpec) {
switch (TypeSpec) {
- default: return ETK_None;
- case TST_typename: return ETK_Typename;
- case TST_class: return ETK_Class;
- case TST_struct: return ETK_Struct;
- case TST_interface: return ETK_Interface;
- case TST_union: return ETK_Union;
- case TST_enum: return ETK_Enum;
+ default:
+ return ElaboratedTypeKeyword::None;
+ case TST_typename:
+ return ElaboratedTypeKeyword::Typename;
+ case TST_class:
+ return ElaboratedTypeKeyword::Class;
+ case TST_struct:
+ return ElaboratedTypeKeyword::Struct;
+ case TST_interface:
+ return ElaboratedTypeKeyword::Interface;
+ case TST_union:
+ return ElaboratedTypeKeyword::Union;
+ case TST_enum:
+ return ElaboratedTypeKeyword::Enum;
}
}
TagTypeKind
TypeWithKeyword::getTagTypeKindForTypeSpec(unsigned TypeSpec) {
switch(TypeSpec) {
- case TST_class: return TTK_Class;
- case TST_struct: return TTK_Struct;
- case TST_interface: return TTK_Interface;
- case TST_union: return TTK_Union;
- case TST_enum: return TTK_Enum;
+ case TST_class:
+ return TagTypeKind::Class;
+ case TST_struct:
+ return TagTypeKind::Struct;
+ case TST_interface:
+ return TagTypeKind::Interface;
+ case TST_union:
+ return TagTypeKind::Union;
+ case TST_enum:
+ return TagTypeKind::Enum;
}
llvm_unreachable("Type specifier is not a tag type kind.");
@@ -2821,11 +3085,16 @@ TypeWithKeyword::getTagTypeKindForTypeSpec(unsigned TypeSpec) {
ElaboratedTypeKeyword
TypeWithKeyword::getKeywordForTagTypeKind(TagTypeKind Kind) {
switch (Kind) {
- case TTK_Class: return ETK_Class;
- case TTK_Struct: return ETK_Struct;
- case TTK_Interface: return ETK_Interface;
- case TTK_Union: return ETK_Union;
- case TTK_Enum: return ETK_Enum;
+ case TagTypeKind::Class:
+ return ElaboratedTypeKeyword::Class;
+ case TagTypeKind::Struct:
+ return ElaboratedTypeKeyword::Struct;
+ case TagTypeKind::Interface:
+ return ElaboratedTypeKeyword::Interface;
+ case TagTypeKind::Union:
+ return ElaboratedTypeKeyword::Union;
+ case TagTypeKind::Enum:
+ return ElaboratedTypeKeyword::Enum;
}
llvm_unreachable("Unknown tag type kind.");
}
@@ -2833,13 +3102,18 @@ TypeWithKeyword::getKeywordForTagTypeKind(TagTypeKind Kind) {
TagTypeKind
TypeWithKeyword::getTagTypeKindForKeyword(ElaboratedTypeKeyword Keyword) {
switch (Keyword) {
- case ETK_Class: return TTK_Class;
- case ETK_Struct: return TTK_Struct;
- case ETK_Interface: return TTK_Interface;
- case ETK_Union: return TTK_Union;
- case ETK_Enum: return TTK_Enum;
- case ETK_None: // Fall through.
- case ETK_Typename:
+ case ElaboratedTypeKeyword::Class:
+ return TagTypeKind::Class;
+ case ElaboratedTypeKeyword::Struct:
+ return TagTypeKind::Struct;
+ case ElaboratedTypeKeyword::Interface:
+ return TagTypeKind::Interface;
+ case ElaboratedTypeKeyword::Union:
+ return TagTypeKind::Union;
+ case ElaboratedTypeKeyword::Enum:
+ return TagTypeKind::Enum;
+ case ElaboratedTypeKeyword::None: // Fall through.
+ case ElaboratedTypeKeyword::Typename:
llvm_unreachable("Elaborated type keyword is not a tag type kind.");
}
llvm_unreachable("Unknown elaborated type keyword.");
@@ -2848,14 +3122,14 @@ TypeWithKeyword::getTagTypeKindForKeyword(ElaboratedTypeKeyword Keyword) {
bool
TypeWithKeyword::KeywordIsTagTypeKind(ElaboratedTypeKeyword Keyword) {
switch (Keyword) {
- case ETK_None:
- case ETK_Typename:
+ case ElaboratedTypeKeyword::None:
+ case ElaboratedTypeKeyword::Typename:
return false;
- case ETK_Class:
- case ETK_Struct:
- case ETK_Interface:
- case ETK_Union:
- case ETK_Enum:
+ case ElaboratedTypeKeyword::Class:
+ case ElaboratedTypeKeyword::Struct:
+ case ElaboratedTypeKeyword::Interface:
+ case ElaboratedTypeKeyword::Union:
+ case ElaboratedTypeKeyword::Enum:
return true;
}
llvm_unreachable("Unknown elaborated type keyword.");
@@ -2863,13 +3137,20 @@ TypeWithKeyword::KeywordIsTagTypeKind(ElaboratedTypeKeyword Keyword) {
StringRef TypeWithKeyword::getKeywordName(ElaboratedTypeKeyword Keyword) {
switch (Keyword) {
- case ETK_None: return {};
- case ETK_Typename: return "typename";
- case ETK_Class: return "class";
- case ETK_Struct: return "struct";
- case ETK_Interface: return "__interface";
- case ETK_Union: return "union";
- case ETK_Enum: return "enum";
+ case ElaboratedTypeKeyword::None:
+ return {};
+ case ElaboratedTypeKeyword::Typename:
+ return "typename";
+ case ElaboratedTypeKeyword::Class:
+ return "class";
+ case ElaboratedTypeKeyword::Struct:
+ return "struct";
+ case ElaboratedTypeKeyword::Interface:
+ return "__interface";
+ case ElaboratedTypeKeyword::Union:
+ return "union";
+ case ElaboratedTypeKeyword::Enum:
+ return "enum";
}
llvm_unreachable("Unknown elaborated type keyword.");
@@ -2886,7 +3167,7 @@ DependentTemplateSpecializationType::DependentTemplateSpecializationType(
DependentTemplateSpecializationTypeBits.NumArgs = Args.size();
assert((!NNS || NNS->isDependent()) &&
"DependentTemplateSpecializatonType requires dependent qualifier");
- TemplateArgument *ArgBuffer = getArgBuffer();
+ auto *ArgBuffer = const_cast<TemplateArgument *>(template_arguments().data());
for (const TemplateArgument &Arg : Args) {
addDependence(toTypeDependence(Arg.getDependence() &
TemplateArgumentDependence::UnexpandedPack));
@@ -2902,7 +3183,7 @@ DependentTemplateSpecializationType::Profile(llvm::FoldingSetNodeID &ID,
NestedNameSpecifier *Qualifier,
const IdentifierInfo *Name,
ArrayRef<TemplateArgument> Args) {
- ID.AddInteger(Keyword);
+ ID.AddInteger(llvm::to_underlying(Keyword));
ID.AddPointer(Qualifier);
ID.AddPointer(Name);
for (const TemplateArgument &Arg : Args)
@@ -3030,6 +3311,8 @@ StringRef BuiltinType::getName(const PrintingPolicy &Policy) const {
return "_Float16";
case Float128:
return "__float128";
+ case Ibm128:
+ return "__ibm128";
case WChar_S:
case WChar_U:
return Policy.MSWChar ? "__wchar_t" : "wchar_t";
@@ -3040,7 +3323,7 @@ StringRef BuiltinType::getName(const PrintingPolicy &Policy) const {
case Char32:
return "char32_t";
case NullPtr:
- return "nullptr_t";
+ return Policy.NullptrTypeInNamespace ? "std::nullptr_t" : "nullptr_t";
case Overload:
return "<overloaded function type>";
case BoundMember:
@@ -3099,6 +3382,10 @@ StringRef BuiltinType::getName(const PrintingPolicy &Policy) const {
case Id: \
return Name;
#include "clang/Basic/RISCVVTypes.def"
+#define WASM_TYPE(Name, Id, SingletonId) \
+ case Id: \
+ return Name;
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
}
llvm_unreachable("Invalid builtin type.");
@@ -3141,6 +3428,8 @@ StringRef FunctionType::getNameForCallConv(CallingConv CC) {
case CC_AAPCS: return "aapcs";
case CC_AAPCS_VFP: return "aapcs-vfp";
case CC_AArch64VectorCall: return "aarch64_vector_pcs";
+ case CC_AArch64SVEPCS: return "aarch64_sve_pcs";
+ case CC_AMDGPUKernelCall: return "amdgpu_kernel";
case CC_IntelOclBicc: return "intel_ocl_bicc";
case CC_SpirFunction: return "spir_function";
case CC_OpenCLKernel: return "opencl_kernel";
@@ -3148,11 +3437,19 @@ StringRef FunctionType::getNameForCallConv(CallingConv CC) {
case CC_SwiftAsync: return "swiftasynccall";
case CC_PreserveMost: return "preserve_most";
case CC_PreserveAll: return "preserve_all";
+ case CC_M68kRTD: return "m68k_rtd";
}
llvm_unreachable("Invalid calling convention.");
}
+void FunctionProtoType::ExceptionSpecInfo::instantiate() {
+ assert(Type == EST_Uninstantiated);
+ NoexceptExpr =
+ cast<FunctionProtoType>(SourceTemplate->getType())->getNoexceptExpr();
+ Type = EST_DependentNoexcept;
+}
+
FunctionProtoType::FunctionProtoType(QualType result, ArrayRef<QualType> params,
QualType canonical,
const ExtProtoInfo &epi)
@@ -3167,10 +3464,21 @@ FunctionProtoType::FunctionProtoType(QualType result, ArrayRef<QualType> params,
FunctionTypeBits.Variadic = epi.Variadic;
FunctionTypeBits.HasTrailingReturn = epi.HasTrailingReturn;
- // Fill in the extra trailing bitfields if present.
- if (hasExtraBitfields(epi.ExceptionSpec.Type)) {
+ if (epi.requiresFunctionProtoTypeExtraBitfields()) {
+ FunctionTypeBits.HasExtraBitfields = true;
auto &ExtraBits = *getTrailingObjects<FunctionTypeExtraBitfields>();
- ExtraBits.NumExceptionType = epi.ExceptionSpec.Exceptions.size();
+ ExtraBits = FunctionTypeExtraBitfields();
+ } else {
+ FunctionTypeBits.HasExtraBitfields = false;
+ }
+
+ if (epi.requiresFunctionProtoTypeArmAttributes()) {
+ auto &ArmTypeAttrs = *getTrailingObjects<FunctionTypeArmAttributes>();
+ ArmTypeAttrs = FunctionTypeArmAttributes();
+
+ // Also set the bit in FunctionTypeExtraBitfields
+ auto &ExtraBits = *getTrailingObjects<FunctionTypeExtraBitfields>();
+ ExtraBits.HasArmTypeAttributes = true;
}
// Fill in the trailing argument array.
@@ -3181,8 +3489,21 @@ FunctionProtoType::FunctionProtoType(QualType result, ArrayRef<QualType> params,
argSlot[i] = params[i];
}
+ // Propagate the SME ACLE attributes.
+ if (epi.AArch64SMEAttributes != SME_NormalFunction) {
+ auto &ArmTypeAttrs = *getTrailingObjects<FunctionTypeArmAttributes>();
+ assert(epi.AArch64SMEAttributes <= SME_AttributeMask &&
+ "Not enough bits to encode SME attributes");
+ ArmTypeAttrs.AArch64SMEAttributes = epi.AArch64SMEAttributes;
+ }
+
// Fill in the exception type array if present.
if (getExceptionSpecType() == EST_Dynamic) {
+ auto &ExtraBits = *getTrailingObjects<FunctionTypeExtraBitfields>();
+ size_t NumExceptions = epi.ExceptionSpec.Exceptions.size();
+ assert(NumExceptions <= 1023 && "Not enough bits to encode exceptions");
+ ExtraBits.NumExceptionType = NumExceptions;
+
assert(hasExtraBitfields() && "missing trailing extra bitfields!");
auto *exnSlot =
reinterpret_cast<QualType *>(getTrailingObjects<ExceptionType>());
@@ -3286,7 +3607,6 @@ CanThrowResult FunctionProtoType::canThrow() const {
switch (getExceptionSpecType()) {
case EST_Unparsed:
case EST_Unevaluated:
- case EST_Uninstantiated:
llvm_unreachable("should not call this with unresolved exception specs");
case EST_DynamicNone:
@@ -3308,6 +3628,7 @@ CanThrowResult FunctionProtoType::canThrow() const {
return CT_Can;
return CT_Dependent;
+ case EST_Uninstantiated:
case EST_DependentNoexcept:
return CT_Dependent;
}
@@ -3337,8 +3658,11 @@ void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID, QualType Result,
// This is followed by an optional "consumed argument" section of the
// same length as the first type sequence:
// bool*
- // Finally, we have the ext info and trailing return type flag:
- // int bool
+ // This is followed by the ext info:
+ // int
+ // Finally we have a trailing return type flag (bool)
+ // combined with AArch64 SME Attributes, to save space:
+ // int
//
// There is no ambiguity between the consumed arguments and an empty EH
// spec because of the leading 'bool' which unambiguously indicates
@@ -3371,8 +3695,9 @@ void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID, QualType Result,
for (unsigned i = 0; i != NumParams; ++i)
ID.AddInteger(epi.ExtParameterInfos[i].getOpaqueValue());
}
+
epi.ExtInfo.Profile(ID);
- ID.AddBoolean(epi.HasTrailingReturn);
+ ID.AddInteger((epi.AArch64SMEAttributes << 1) | epi.HasTrailingReturn);
}
void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID,
@@ -3382,14 +3707,34 @@ void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID,
}
TypedefType::TypedefType(TypeClass tc, const TypedefNameDecl *D,
- QualType underlying, QualType can)
- : Type(tc, can, underlying->getDependence()),
+ QualType Underlying, QualType can)
+ : Type(tc, can, toSemanticDependence(can->getDependence())),
Decl(const_cast<TypedefNameDecl *>(D)) {
assert(!isa<TypedefType>(can) && "Invalid canonical type");
+ TypedefBits.hasTypeDifferentFromDecl = !Underlying.isNull();
+ if (!typeMatchesDecl())
+ *getTrailingObjects<QualType>() = Underlying;
}
QualType TypedefType::desugar() const {
- return getDecl()->getUnderlyingType();
+ return typeMatchesDecl() ? Decl->getUnderlyingType()
+ : *getTrailingObjects<QualType>();
+}
+
+UsingType::UsingType(const UsingShadowDecl *Found, QualType Underlying,
+ QualType Canon)
+ : Type(Using, Canon, toSemanticDependence(Canon->getDependence())),
+ Found(const_cast<UsingShadowDecl *>(Found)) {
+ UsingBits.hasTypeDifferentFromDecl = !Underlying.isNull();
+ if (!typeMatchesDecl())
+ *getTrailingObjects<QualType>() = Underlying;
+}
+
+QualType UsingType::getUnderlyingType() const {
+ return typeMatchesDecl()
+ ? QualType(
+ cast<TypeDecl>(Found->getTargetDecl())->getTypeForDecl(), 0)
+ : *getTrailingObjects<QualType>();
}
QualType MacroQualifiedType::desugar() const { return getUnderlyingType(); }
@@ -3406,27 +3751,37 @@ QualType MacroQualifiedType::getModifiedType() const {
return Inner;
}
-TypeOfExprType::TypeOfExprType(Expr *E, QualType can)
- : Type(TypeOfExpr, can,
+TypeOfExprType::TypeOfExprType(Expr *E, TypeOfKind Kind, QualType Can)
+ : Type(TypeOfExpr,
+ // We have to protect against 'Can' being invalid through its
+ // default argument.
+ Kind == TypeOfKind::Unqualified && !Can.isNull()
+ ? Can.getAtomicUnqualifiedType()
+ : Can,
toTypeDependence(E->getDependence()) |
(E->getType()->getDependence() &
TypeDependence::VariablyModified)),
- TOExpr(E) {}
+ TOExpr(E) {
+ TypeOfBits.IsUnqual = Kind == TypeOfKind::Unqualified;
+}
bool TypeOfExprType::isSugared() const {
return !TOExpr->isTypeDependent();
}
QualType TypeOfExprType::desugar() const {
- if (isSugared())
- return getUnderlyingExpr()->getType();
-
+ if (isSugared()) {
+ QualType QT = getUnderlyingExpr()->getType();
+ return TypeOfBits.IsUnqual ? QT.getAtomicUnqualifiedType() : QT;
+ }
return QualType(this, 0);
}
void DependentTypeOfExprType::Profile(llvm::FoldingSetNodeID &ID,
- const ASTContext &Context, Expr *E) {
+ const ASTContext &Context, Expr *E,
+ bool IsUnqual) {
E->Profile(ID, Context, true);
+ ID.AddBoolean(IsUnqual);
}
DecltypeType::DecltypeType(Expr *E, QualType underlyingType, QualType can)
@@ -3450,8 +3805,8 @@ QualType DecltypeType::desugar() const {
return QualType(this, 0);
}
-DependentDecltypeType::DependentDecltypeType(const ASTContext &Context, Expr *E)
- : DecltypeType(E, Context.DependentTy), Context(Context) {}
+DependentDecltypeType::DependentDecltypeType(Expr *E, QualType UnderlyingType)
+ : DecltypeType(E, UnderlyingType) {}
void DependentDecltypeType::Profile(llvm::FoldingSetNodeID &ID,
const ASTContext &Context, Expr *E) {
@@ -3476,7 +3831,7 @@ TagType::TagType(TypeClass TC, const TagDecl *D, QualType can)
decl(const_cast<TagDecl *>(D)) {}
static TagDecl *getInterestingTagDecl(TagDecl *decl) {
- for (auto I : decl->redecls()) {
+ for (auto *I : decl->redecls()) {
if (I->isCompleteDefinition() || I->isBeingDefined())
return I;
}
@@ -3505,7 +3860,7 @@ bool RecordType::hasConstFields() const {
return true;
FieldTy = FieldTy.getCanonicalType();
if (const auto *FieldRecTy = FieldTy->getAs<RecordType>()) {
- if (llvm::find(RecordTypeList, FieldRecTy) == RecordTypeList.end())
+ if (!llvm::is_contained(RecordTypeList, FieldRecTy))
RecordTypeList.push_back(FieldRecTy);
}
}
@@ -3551,6 +3906,10 @@ bool AttributedType::isMSTypeSpec() const {
llvm_unreachable("invalid attr kind");
}
+bool AttributedType::isWebAssemblyFuncrefSpec() const {
+ return getAttrKind() == attr::WebAssemblyFuncref;
+}
+
bool AttributedType::isCallingConv() const {
// FIXME: Generate this with TableGen.
switch (getAttrKind()) {
@@ -3565,12 +3924,15 @@ bool AttributedType::isCallingConv() const {
case attr::SwiftAsyncCall:
case attr::VectorCall:
case attr::AArch64VectorPcs:
+ case attr::AArch64SVEPcs:
+ case attr::AMDGPUKernelCall:
case attr::Pascal:
case attr::MSABI:
case attr::SysVABI:
case attr::IntelOclBicc:
case attr::PreserveMost:
case attr::PreserveAll:
+ case attr::M68kRTD:
return true;
}
llvm_unreachable("invalid attr kind");
@@ -3584,28 +3946,80 @@ IdentifierInfo *TemplateTypeParmType::getIdentifier() const {
return isCanonicalUnqualified() ? nullptr : getDecl()->getIdentifier();
}
+static const TemplateTypeParmDecl *getReplacedParameter(Decl *D,
+ unsigned Index) {
+ if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(D))
+ return TTP;
+ return cast<TemplateTypeParmDecl>(
+ getReplacedTemplateParameterList(D)->getParam(Index));
+}
+
+SubstTemplateTypeParmType::SubstTemplateTypeParmType(
+ QualType Replacement, Decl *AssociatedDecl, unsigned Index,
+ std::optional<unsigned> PackIndex)
+ : Type(SubstTemplateTypeParm, Replacement.getCanonicalType(),
+ Replacement->getDependence()),
+ AssociatedDecl(AssociatedDecl) {
+ SubstTemplateTypeParmTypeBits.HasNonCanonicalUnderlyingType =
+ Replacement != getCanonicalTypeInternal();
+ if (SubstTemplateTypeParmTypeBits.HasNonCanonicalUnderlyingType)
+ *getTrailingObjects<QualType>() = Replacement;
+
+ SubstTemplateTypeParmTypeBits.Index = Index;
+ SubstTemplateTypeParmTypeBits.PackIndex = PackIndex ? *PackIndex + 1 : 0;
+ assert(AssociatedDecl != nullptr);
+}
+
+const TemplateTypeParmDecl *
+SubstTemplateTypeParmType::getReplacedParameter() const {
+ return ::getReplacedParameter(getAssociatedDecl(), getIndex());
+}
+
SubstTemplateTypeParmPackType::SubstTemplateTypeParmPackType(
- const TemplateTypeParmType *Param, QualType Canon,
+ QualType Canon, Decl *AssociatedDecl, unsigned Index, bool Final,
const TemplateArgument &ArgPack)
: Type(SubstTemplateTypeParmPack, Canon,
TypeDependence::DependentInstantiation |
TypeDependence::UnexpandedPack),
- Replaced(Param), Arguments(ArgPack.pack_begin()) {
+ Arguments(ArgPack.pack_begin()),
+ AssociatedDeclAndFinal(AssociatedDecl, Final) {
+ SubstTemplateTypeParmPackTypeBits.Index = Index;
SubstTemplateTypeParmPackTypeBits.NumArgs = ArgPack.pack_size();
+ assert(AssociatedDecl != nullptr);
+}
+
+Decl *SubstTemplateTypeParmPackType::getAssociatedDecl() const {
+ return AssociatedDeclAndFinal.getPointer();
+}
+
+bool SubstTemplateTypeParmPackType::getFinal() const {
+ return AssociatedDeclAndFinal.getInt();
+}
+
+const TemplateTypeParmDecl *
+SubstTemplateTypeParmPackType::getReplacedParameter() const {
+ return ::getReplacedParameter(getAssociatedDecl(), getIndex());
+}
+
+IdentifierInfo *SubstTemplateTypeParmPackType::getIdentifier() const {
+ return getReplacedParameter()->getIdentifier();
}
TemplateArgument SubstTemplateTypeParmPackType::getArgumentPack() const {
- return TemplateArgument(llvm::makeArrayRef(Arguments, getNumArgs()));
+ return TemplateArgument(llvm::ArrayRef(Arguments, getNumArgs()));
}
void SubstTemplateTypeParmPackType::Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, getReplacedParameter(), getArgumentPack());
+ Profile(ID, getAssociatedDecl(), getIndex(), getFinal(), getArgumentPack());
}
void SubstTemplateTypeParmPackType::Profile(llvm::FoldingSetNodeID &ID,
- const TemplateTypeParmType *Replaced,
+ const Decl *AssociatedDecl,
+ unsigned Index, bool Final,
const TemplateArgument &ArgPack) {
- ID.AddPointer(Replaced);
+ ID.AddPointer(AssociatedDecl);
+ ID.AddInteger(Index);
+ ID.AddBoolean(Final);
ID.AddInteger(ArgPack.pack_size());
for (const auto &P : ArgPack.pack_elements())
ID.AddPointer(P.getAsType().getAsOpaquePtr());
@@ -3639,8 +4053,7 @@ TemplateSpecializationType::TemplateSpecializationType(
: Type(TemplateSpecialization, Canon.isNull() ? QualType(this, 0) : Canon,
(Canon.isNull()
? TypeDependence::DependentInstantiation
- : Canon->getDependence() & ~(TypeDependence::VariablyModified |
- TypeDependence::UnexpandedPack)) |
+ : toSemanticDependence(Canon->getDependence())) |
(toTypeDependence(T.getDependence()) &
TypeDependence::UnexpandedPack)),
Template(T) {
@@ -3651,7 +4064,8 @@ TemplateSpecializationType::TemplateSpecializationType(
"Use DependentTemplateSpecializationType for dependent template-name");
assert((T.getKind() == TemplateName::Template ||
T.getKind() == TemplateName::SubstTemplateTemplateParm ||
- T.getKind() == TemplateName::SubstTemplateTemplateParmPack) &&
+ T.getKind() == TemplateName::SubstTemplateTemplateParmPack ||
+ T.getKind() == TemplateName::UsingTemplate) &&
"Unexpected template name for TemplateSpecializationType");
auto *TemplateArgs = reinterpret_cast<TemplateArgument *>(this + 1);
@@ -3675,10 +4089,22 @@ TemplateSpecializationType::TemplateSpecializationType(
// Store the aliased type if this is a type alias template specialization.
if (isTypeAlias()) {
auto *Begin = reinterpret_cast<TemplateArgument *>(this + 1);
- *reinterpret_cast<QualType*>(Begin + getNumArgs()) = AliasedType;
+ *reinterpret_cast<QualType *>(Begin + Args.size()) = AliasedType;
}
}
+QualType TemplateSpecializationType::getAliasedType() const {
+ assert(isTypeAlias() && "not a type alias template specialization");
+ return *reinterpret_cast<const QualType *>(template_arguments().end());
+}
+
+void TemplateSpecializationType::Profile(llvm::FoldingSetNodeID &ID,
+ const ASTContext &Ctx) {
+ Profile(ID, Template, template_arguments(), Ctx);
+ if (isTypeAlias())
+ getAliasedType().Profile(ID);
+}
+
void
TemplateSpecializationType::Profile(llvm::FoldingSetNodeID &ID,
TemplateName T,
@@ -3715,14 +4141,14 @@ void ObjCObjectTypeImpl::Profile(llvm::FoldingSetNodeID &ID,
for (auto typeArg : typeArgs)
ID.AddPointer(typeArg.getAsOpaquePtr());
ID.AddInteger(protocols.size());
- for (auto proto : protocols)
+ for (auto *proto : protocols)
ID.AddPointer(proto);
ID.AddBoolean(isKindOf);
}
void ObjCObjectTypeImpl::Profile(llvm::FoldingSetNodeID &ID) {
Profile(ID, getBaseType(), getTypeArgsAsWritten(),
- llvm::makeArrayRef(qual_begin(), getNumProtocols()),
+ llvm::ArrayRef(qual_begin(), getNumProtocols()),
isKindOfTypeAsWritten());
}
@@ -3733,13 +4159,13 @@ void ObjCTypeParamType::Profile(llvm::FoldingSetNodeID &ID,
ID.AddPointer(OTPDecl);
ID.AddPointer(CanonicalType.getAsOpaquePtr());
ID.AddInteger(protocols.size());
- for (auto proto : protocols)
+ for (auto *proto : protocols)
ID.AddPointer(proto);
}
void ObjCTypeParamType::Profile(llvm::FoldingSetNodeID &ID) {
Profile(ID, getDecl(), getCanonicalTypeInternal(),
- llvm::makeArrayRef(qual_begin(), getNumProtocols()));
+ llvm::ArrayRef(qual_begin(), getNumProtocols()));
}
namespace {
@@ -3757,8 +4183,8 @@ public:
friend CachedProperties merge(CachedProperties L, CachedProperties R) {
Linkage MergedLinkage = minLinkage(L.L, R.L);
- return CachedProperties(MergedLinkage,
- L.hasLocalOrUnnamedType() | R.hasLocalOrUnnamedType());
+ return CachedProperties(MergedLinkage, L.hasLocalOrUnnamedType() ||
+ R.hasLocalOrUnnamedType());
}
};
@@ -3801,7 +4227,7 @@ public:
// Compute the cached properties and then set the cache.
CachedProperties Result = computeCachedProperties(T);
T->TypeBits.CacheValid = true;
- T->TypeBits.CachedLinkage = Result.getLinkage();
+ T->TypeBits.CachedLinkage = llvm::to_underlying(Result.getLinkage());
T->TypeBits.CachedLocalOrUnnamed = Result.hasLocalOrUnnamedType();
}
};
@@ -3833,20 +4259,20 @@ static CachedProperties computeCachedProperties(const Type *T) {
// Treat instantiation-dependent types as external.
if (!T->isInstantiationDependentType()) T->dump();
assert(T->isInstantiationDependentType());
- return CachedProperties(ExternalLinkage, false);
+ return CachedProperties(Linkage::External, false);
case Type::Auto:
case Type::DeducedTemplateSpecialization:
// Give non-deduced 'auto' types external linkage. We should only see them
// here in error recovery.
- return CachedProperties(ExternalLinkage, false);
+ return CachedProperties(Linkage::External, false);
- case Type::ExtInt:
+ case Type::BitInt:
case Type::Builtin:
// C++ [basic.link]p8:
// A type is said to have linkage if and only if:
// - it is a fundamental type (3.9.1); or
- return CachedProperties(ExternalLinkage, false);
+ return CachedProperties(Linkage::External, false);
case Type::Record:
case Type::Enum: {
@@ -3941,7 +4367,7 @@ LinkageInfo LinkageComputer::computeTypeLinkageInfo(const Type *T) {
assert(T->isInstantiationDependentType());
return LinkageInfo::external();
- case Type::ExtInt:
+ case Type::BitInt:
case Type::Builtin:
return LinkageInfo::external();
@@ -4026,8 +4452,7 @@ LinkageInfo Type::getLinkageAndVisibility() const {
return LinkageComputer{}.getTypeLinkageAndVisibility(this);
}
-Optional<NullabilityKind>
-Type::getNullability(const ASTContext &Context) const {
+std::optional<NullabilityKind> Type::getNullability() const {
QualType Type(this, 0);
while (const auto *AT = Type->getAs<AttributedType>()) {
// Check whether this is an attributed type with nullability
@@ -4037,7 +4462,7 @@ Type::getNullability(const ASTContext &Context) const {
Type = AT->getEquivalentType();
}
- return None;
+ return std::nullopt;
}
bool Type::canHaveNullability(bool ResultIfUnknown) const {
@@ -4125,6 +4550,8 @@ bool Type::canHaveNullability(bool ResultIfUnknown) const {
#include "clang/Basic/PPCTypes.def"
#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
#include "clang/Basic/RISCVVTypes.def"
+#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
case BuiltinType::BuiltinFn:
case BuiltinType::NullPtr:
case BuiltinType::IncompleteMatrixIdx:
@@ -4161,15 +4588,14 @@ bool Type::canHaveNullability(bool ResultIfUnknown) const {
case Type::ObjCInterface:
case Type::Atomic:
case Type::Pipe:
- case Type::ExtInt:
- case Type::DependentExtInt:
+ case Type::BitInt:
+ case Type::DependentBitInt:
return false;
}
llvm_unreachable("bad type kind!");
}
-llvm::Optional<NullabilityKind>
-AttributedType::getImmediateNullability() const {
+std::optional<NullabilityKind> AttributedType::getImmediateNullability() const {
if (getAttrKind() == attr::TypeNonNull)
return NullabilityKind::NonNull;
if (getAttrKind() == attr::TypeNullable)
@@ -4178,10 +4604,11 @@ AttributedType::getImmediateNullability() const {
return NullabilityKind::Unspecified;
if (getAttrKind() == attr::TypeNullableResult)
return NullabilityKind::NullableResult;
- return None;
+ return std::nullopt;
}
-Optional<NullabilityKind> AttributedType::stripOuterNullability(QualType &T) {
+std::optional<NullabilityKind>
+AttributedType::stripOuterNullability(QualType &T) {
QualType AttrTy = T;
if (auto MacroTy = dyn_cast<MacroQualifiedType>(T))
AttrTy = MacroTy->getUnderlyingType();
@@ -4193,7 +4620,7 @@ Optional<NullabilityKind> AttributedType::stripOuterNullability(QualType &T) {
}
}
- return None;
+ return std::nullopt;
}
bool Type::isBlockCompatibleObjCPointerType(ASTContext &ctx) const {
@@ -4255,20 +4682,13 @@ bool Type::isObjCARCImplicitlyUnretainedType() const {
}
bool Type::isObjCNSObjectType() const {
- const Type *cur = this;
- while (true) {
- if (const auto *typedefType = dyn_cast<TypedefType>(cur))
- return typedefType->getDecl()->hasAttr<ObjCNSObjectAttr>();
-
- // Single-step desugar until we run out of sugar.
- QualType next = cur->getLocallyUnqualifiedSingleStepDesugaredType();
- if (next.getTypePtr() == cur) return false;
- cur = next.getTypePtr();
- }
+ if (const auto *typedefType = getAs<TypedefType>())
+ return typedefType->getDecl()->hasAttr<ObjCNSObjectAttr>();
+ return false;
}
bool Type::isObjCIndependentClassType() const {
- if (const auto *typedefType = dyn_cast<TypedefType>(this))
+ if (const auto *typedefType = getAs<TypedefType>())
return typedefType->getDecl()->hasAttr<ObjCIndependentClassAttr>();
return false;
}
@@ -4392,16 +4812,20 @@ void clang::FixedPointValueToString(SmallVectorImpl<char> &Str,
}
AutoType::AutoType(QualType DeducedAsType, AutoTypeKeyword Keyword,
- TypeDependence ExtraDependence,
+ TypeDependence ExtraDependence, QualType Canon,
ConceptDecl *TypeConstraintConcept,
ArrayRef<TemplateArgument> TypeConstraintArgs)
- : DeducedType(Auto, DeducedAsType, ExtraDependence) {
- AutoTypeBits.Keyword = (unsigned)Keyword;
+ : DeducedType(Auto, DeducedAsType, ExtraDependence, Canon) {
+ AutoTypeBits.Keyword = llvm::to_underlying(Keyword);
AutoTypeBits.NumArgs = TypeConstraintArgs.size();
this->TypeConstraintConcept = TypeConstraintConcept;
+ assert(TypeConstraintConcept || AutoTypeBits.NumArgs == 0);
if (TypeConstraintConcept) {
- TemplateArgument *ArgBuffer = getArgBuffer();
+ auto *ArgBuffer =
+ const_cast<TemplateArgument *>(getTypeConstraintArguments().data());
for (const TemplateArgument &Arg : TypeConstraintArgs) {
+ // We only syntactically depend on the constraint arguments. They don't
+ // affect the deduced type, only its validity.
addDependence(
toSyntacticDependence(toTypeDependence(Arg.getDependence())));
@@ -4421,3 +4845,8 @@ void AutoType::Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
for (const TemplateArgument &Arg : Arguments)
Arg.Profile(ID, Context);
}
+
+void AutoType::Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context) {
+ Profile(ID, Context, getDeducedType(), getKeyword(), isDependentType(),
+ getTypeConstraintConcept(), getTypeConstraintArguments());
+}
diff --git a/contrib/llvm-project/clang/lib/AST/TypeLoc.cpp b/contrib/llvm-project/clang/lib/AST/TypeLoc.cpp
index 16d953b4bece..66732bba18e2 100644
--- a/contrib/llvm-project/clang/lib/AST/TypeLoc.cpp
+++ b/contrib/llvm-project/clang/lib/AST/TypeLoc.cpp
@@ -11,9 +11,10 @@
//===----------------------------------------------------------------------===//
#include "clang/AST/TypeLoc.h"
-#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/ASTConcept.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
+#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/TemplateBase.h"
@@ -194,15 +195,21 @@ SourceLocation TypeLoc::getBeginLoc() const {
while (true) {
switch (Cur.getTypeLocClass()) {
case Elaborated:
- LeftMost = Cur;
- break;
+ if (Cur.getLocalSourceRange().getBegin().isValid()) {
+ LeftMost = Cur;
+ break;
+ }
+ Cur = Cur.getNextTypeLoc();
+ if (Cur.isNull())
+ break;
+ continue;
case FunctionProto:
if (Cur.castAs<FunctionProtoTypeLoc>().getTypePtr()
->hasTrailingReturn()) {
LeftMost = Cur;
break;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case FunctionNoProto:
case ConstantArray:
case DependentSizedArray:
@@ -240,6 +247,8 @@ SourceLocation TypeLoc::getEndLoc() const {
case IncompleteArray:
case VariableArray:
case FunctionNoProto:
+ // The innermost type with suffix syntax always determines the end of the
+ // type.
Last = Cur;
break;
case FunctionProto:
@@ -248,12 +257,19 @@ SourceLocation TypeLoc::getEndLoc() const {
else
Last = Cur;
break;
+ case ObjCObjectPointer:
+ // `id` and `id<...>` have no star location.
+ if (Cur.castAs<ObjCObjectPointerTypeLoc>().getStarLoc().isInvalid())
+ break;
+ [[fallthrough]];
case Pointer:
case BlockPointer:
case MemberPointer:
case LValueReference:
case RValueReference:
case PackExpansion:
+ // Types with prefix syntax only determine the end of the type if there
+ // is no suffix type.
if (!Last)
Last = Cur;
break;
@@ -351,6 +367,7 @@ TypeSpecifierType BuiltinTypeLoc::getWrittenTypeSpec() const {
case BuiltinType::LongDouble:
case BuiltinType::Float16:
case BuiltinType::Float128:
+ case BuiltinType::Ibm128:
case BuiltinType::ShortAccum:
case BuiltinType::Accum:
case BuiltinType::LongAccum:
@@ -408,6 +425,8 @@ TypeSpecifierType BuiltinTypeLoc::getWrittenTypeSpec() const {
#include "clang/Basic/PPCTypes.def"
#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
#include "clang/Basic/RISCVVTypes.def"
+#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
case BuiltinType::BuiltinFn:
case BuiltinType::IncompleteMatrixIdx:
case BuiltinType::OMPArraySection:
@@ -497,12 +516,16 @@ SourceRange AttributedTypeLoc::getLocalSourceRange() const {
return getAttr() ? getAttr()->getRange() : SourceRange();
}
+SourceRange BTFTagAttributedTypeLoc::getLocalSourceRange() const {
+ return getAttr() ? getAttr()->getRange() : SourceRange();
+}
+
void TypeOfTypeLoc::initializeLocal(ASTContext &Context,
SourceLocation Loc) {
TypeofLikeTypeLoc<TypeOfTypeLoc, TypeOfType, TypeOfTypeLocInfo>
::initializeLocal(Context, Loc);
- this->getLocalData()->UnderlyingTInfo = Context.getTrivialTypeSourceInfo(
- getUnderlyingType(), Loc);
+ this->getLocalData()->UnmodifiedTInfo =
+ Context.getTrivialTypeSourceInfo(getUnmodifiedType(), Loc);
}
void UnaryTransformTypeLoc::initializeLocal(ASTContext &Context,
@@ -516,6 +539,8 @@ void UnaryTransformTypeLoc::initializeLocal(ASTContext &Context,
void ElaboratedTypeLoc::initializeLocal(ASTContext &Context,
SourceLocation Loc) {
+ if (isEmpty())
+ return;
setElaboratedKeywordLoc(Loc);
NestedNameSpecifierLocBuilder Builder;
Builder.MakeTrivial(Context, getTypePtr()->getQualifier(), Loc);
@@ -546,17 +571,14 @@ DependentTemplateSpecializationTypeLoc::initializeLocal(ASTContext &Context,
setTemplateNameLoc(Loc);
setLAngleLoc(Loc);
setRAngleLoc(Loc);
- TemplateSpecializationTypeLoc::initializeArgLocs(Context, getNumArgs(),
- getTypePtr()->getArgs(),
- getArgInfos(), Loc);
+ TemplateSpecializationTypeLoc::initializeArgLocs(
+ Context, getTypePtr()->template_arguments(), getArgInfos(), Loc);
}
-void TemplateSpecializationTypeLoc::initializeArgLocs(ASTContext &Context,
- unsigned NumArgs,
- const TemplateArgument *Args,
- TemplateArgumentLocInfo *ArgInfos,
- SourceLocation Loc) {
- for (unsigned i = 0, e = NumArgs; i != e; ++i) {
+void TemplateSpecializationTypeLoc::initializeArgLocs(
+ ASTContext &Context, ArrayRef<TemplateArgument> Args,
+ TemplateArgumentLocInfo *ArgInfos, SourceLocation Loc) {
+ for (unsigned i = 0, e = Args.size(); i != e; ++i) {
switch (Args[i].getKind()) {
case TemplateArgument::Null:
llvm_unreachable("Impossible TemplateArgument");
@@ -564,6 +586,7 @@ void TemplateSpecializationTypeLoc::initializeArgLocs(ASTContext &Context,
case TemplateArgument::Integral:
case TemplateArgument::Declaration:
case TemplateArgument::NullPtr:
+ case TemplateArgument::StructuralValue:
ArgInfos[i] = TemplateArgumentLocInfo();
break;
@@ -600,25 +623,43 @@ void TemplateSpecializationTypeLoc::initializeArgLocs(ASTContext &Context,
}
}
-DeclarationNameInfo AutoTypeLoc::getConceptNameInfo() const {
- return DeclarationNameInfo(getNamedConcept()->getDeclName(),
- getLocalData()->ConceptNameLoc);
+// Builds a ConceptReference where all locations point at the same token,
+// for use in trivial TypeSourceInfo for constrained AutoType
+static ConceptReference *createTrivialConceptReference(ASTContext &Context,
+ SourceLocation Loc,
+ const AutoType *AT) {
+ DeclarationNameInfo DNI =
+ DeclarationNameInfo(AT->getTypeConstraintConcept()->getDeclName(), Loc,
+ AT->getTypeConstraintConcept()->getDeclName());
+ unsigned size = AT->getTypeConstraintArguments().size();
+ TemplateArgumentLocInfo *TALI = new TemplateArgumentLocInfo[size];
+ TemplateSpecializationTypeLoc::initializeArgLocs(
+ Context, AT->getTypeConstraintArguments(), TALI, Loc);
+ TemplateArgumentListInfo TAListI;
+ for (unsigned i = 0; i < size; ++i) {
+ TAListI.addArgument(
+ TemplateArgumentLoc(AT->getTypeConstraintArguments()[i],
+ TALI[i])); // TemplateArgumentLocInfo()
+ }
+
+ auto *ConceptRef = ConceptReference::Create(
+ Context, NestedNameSpecifierLoc{}, Loc, DNI, nullptr,
+ AT->getTypeConstraintConcept(),
+ ASTTemplateArgumentListInfo::Create(Context, TAListI));
+ delete[] TALI;
+ return ConceptRef;
}
void AutoTypeLoc::initializeLocal(ASTContext &Context, SourceLocation Loc) {
- setNestedNameSpecifierLoc(NestedNameSpecifierLoc());
- setTemplateKWLoc(Loc);
- setConceptNameLoc(Loc);
- setFoundDecl(nullptr);
- setRAngleLoc(Loc);
- setLAngleLoc(Loc);
- TemplateSpecializationTypeLoc::initializeArgLocs(Context, getNumArgs(),
- getTypePtr()->getArgs(),
- getArgInfos(), Loc);
+ setRParenLoc(Loc);
setNameLoc(Loc);
+ setConceptReference(nullptr);
+ if (getTypePtr()->isConstrained()) {
+ setConceptReference(
+ createTrivialConceptReference(Context, Loc, getTypePtr()));
+ }
}
-
namespace {
class GetContainedAutoTypeLocVisitor :
@@ -672,6 +713,10 @@ namespace {
return Visit(T.getModifiedLoc());
}
+ TypeLoc VisitBTFTagAttributedTypeLoc(BTFTagAttributedTypeLoc T) {
+ return Visit(T.getWrappedLoc());
+ }
+
TypeLoc VisitMacroQualifiedTypeLoc(MacroQualifiedTypeLoc T) {
return Visit(T.getInnerLoc());
}
diff --git a/contrib/llvm-project/clang/lib/AST/TypePrinter.cpp b/contrib/llvm-project/clang/lib/AST/TypePrinter.cpp
index 5de22f76f458..e9b6e810b02e 100644
--- a/contrib/llvm-project/clang/lib/AST/TypePrinter.cpp
+++ b/contrib/llvm-project/clang/lib/AST/TypePrinter.cpp
@@ -22,6 +22,7 @@
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/TemplateBase.h"
#include "clang/AST/TemplateName.h"
+#include "clang/AST/TextNodeDumper.h"
#include "clang/AST/Type.h"
#include "clang/Basic/AddressSpaces.h"
#include "clang/Basic/ExceptionSpecificationType.h"
@@ -32,6 +33,7 @@
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/Specifiers.h"
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
@@ -47,94 +49,103 @@ using namespace clang;
namespace {
- /// RAII object that enables printing of the ARC __strong lifetime
- /// qualifier.
- class IncludeStrongLifetimeRAII {
- PrintingPolicy &Policy;
- bool Old;
-
- public:
- explicit IncludeStrongLifetimeRAII(PrintingPolicy &Policy)
- : Policy(Policy), Old(Policy.SuppressStrongLifetime) {
- if (!Policy.SuppressLifetimeQualifiers)
- Policy.SuppressStrongLifetime = false;
- }
+/// RAII object that enables printing of the ARC __strong lifetime
+/// qualifier.
+class IncludeStrongLifetimeRAII {
+ PrintingPolicy &Policy;
+ bool Old;
+
+public:
+ explicit IncludeStrongLifetimeRAII(PrintingPolicy &Policy)
+ : Policy(Policy), Old(Policy.SuppressStrongLifetime) {
+ if (!Policy.SuppressLifetimeQualifiers)
+ Policy.SuppressStrongLifetime = false;
+ }
- ~IncludeStrongLifetimeRAII() {
- Policy.SuppressStrongLifetime = Old;
- }
- };
+ ~IncludeStrongLifetimeRAII() { Policy.SuppressStrongLifetime = Old; }
+};
- class ParamPolicyRAII {
- PrintingPolicy &Policy;
- bool Old;
+class ParamPolicyRAII {
+ PrintingPolicy &Policy;
+ bool Old;
- public:
- explicit ParamPolicyRAII(PrintingPolicy &Policy)
- : Policy(Policy), Old(Policy.SuppressSpecifiers) {
- Policy.SuppressSpecifiers = false;
- }
+public:
+ explicit ParamPolicyRAII(PrintingPolicy &Policy)
+ : Policy(Policy), Old(Policy.SuppressSpecifiers) {
+ Policy.SuppressSpecifiers = false;
+ }
- ~ParamPolicyRAII() {
- Policy.SuppressSpecifiers = Old;
- }
- };
-
- class ElaboratedTypePolicyRAII {
- PrintingPolicy &Policy;
- bool SuppressTagKeyword;
- bool SuppressScope;
-
- public:
- explicit ElaboratedTypePolicyRAII(PrintingPolicy &Policy) : Policy(Policy) {
- SuppressTagKeyword = Policy.SuppressTagKeyword;
- SuppressScope = Policy.SuppressScope;
- Policy.SuppressTagKeyword = true;
- Policy.SuppressScope = true;
- }
+ ~ParamPolicyRAII() { Policy.SuppressSpecifiers = Old; }
+};
- ~ElaboratedTypePolicyRAII() {
- Policy.SuppressTagKeyword = SuppressTagKeyword;
- Policy.SuppressScope = SuppressScope;
- }
- };
-
- class TypePrinter {
- PrintingPolicy Policy;
- unsigned Indentation;
- bool HasEmptyPlaceHolder = false;
- bool InsideCCAttribute = false;
-
- public:
- explicit TypePrinter(const PrintingPolicy &Policy, unsigned Indentation = 0)
- : Policy(Policy), Indentation(Indentation) {}
-
- void print(const Type *ty, Qualifiers qs, raw_ostream &OS,
- StringRef PlaceHolder);
- void print(QualType T, raw_ostream &OS, StringRef PlaceHolder);
-
- static bool canPrefixQualifiers(const Type *T, bool &NeedARCStrongQualifier);
- void spaceBeforePlaceHolder(raw_ostream &OS);
- void printTypeSpec(NamedDecl *D, raw_ostream &OS);
- void printTemplateId(const TemplateSpecializationType *T, raw_ostream &OS,
- bool FullyQualify);
-
- void printBefore(QualType T, raw_ostream &OS);
- void printAfter(QualType T, raw_ostream &OS);
- void AppendScope(DeclContext *DC, raw_ostream &OS,
- DeclarationName NameInScope);
- void printTag(TagDecl *T, raw_ostream &OS);
- void printFunctionAfter(const FunctionType::ExtInfo &Info, raw_ostream &OS);
+class DefaultTemplateArgsPolicyRAII {
+ PrintingPolicy &Policy;
+ bool Old;
+
+public:
+ explicit DefaultTemplateArgsPolicyRAII(PrintingPolicy &Policy)
+ : Policy(Policy), Old(Policy.SuppressDefaultTemplateArgs) {
+ Policy.SuppressDefaultTemplateArgs = false;
+ }
+
+ ~DefaultTemplateArgsPolicyRAII() { Policy.SuppressDefaultTemplateArgs = Old; }
+};
+
+class ElaboratedTypePolicyRAII {
+ PrintingPolicy &Policy;
+ bool SuppressTagKeyword;
+ bool SuppressScope;
+
+public:
+ explicit ElaboratedTypePolicyRAII(PrintingPolicy &Policy) : Policy(Policy) {
+ SuppressTagKeyword = Policy.SuppressTagKeyword;
+ SuppressScope = Policy.SuppressScope;
+ Policy.SuppressTagKeyword = true;
+ Policy.SuppressScope = true;
+ }
+
+ ~ElaboratedTypePolicyRAII() {
+ Policy.SuppressTagKeyword = SuppressTagKeyword;
+ Policy.SuppressScope = SuppressScope;
+ }
+};
+
+class TypePrinter {
+ PrintingPolicy Policy;
+ unsigned Indentation;
+ bool HasEmptyPlaceHolder = false;
+ bool InsideCCAttribute = false;
+
+public:
+ explicit TypePrinter(const PrintingPolicy &Policy, unsigned Indentation = 0)
+ : Policy(Policy), Indentation(Indentation) {}
+
+ void print(const Type *ty, Qualifiers qs, raw_ostream &OS,
+ StringRef PlaceHolder);
+ void print(QualType T, raw_ostream &OS, StringRef PlaceHolder);
+
+ static bool canPrefixQualifiers(const Type *T, bool &NeedARCStrongQualifier);
+ void spaceBeforePlaceHolder(raw_ostream &OS);
+ void printTypeSpec(NamedDecl *D, raw_ostream &OS);
+ void printTemplateId(const TemplateSpecializationType *T, raw_ostream &OS,
+ bool FullyQualify);
+
+ void printBefore(QualType T, raw_ostream &OS);
+ void printAfter(QualType T, raw_ostream &OS);
+ void AppendScope(DeclContext *DC, raw_ostream &OS,
+ DeclarationName NameInScope);
+ void printTag(TagDecl *T, raw_ostream &OS);
+ void printFunctionAfter(const FunctionType::ExtInfo &Info, raw_ostream &OS);
#define ABSTRACT_TYPE(CLASS, PARENT)
-#define TYPE(CLASS, PARENT) \
- void print##CLASS##Before(const CLASS##Type *T, raw_ostream &OS); \
- void print##CLASS##After(const CLASS##Type *T, raw_ostream &OS);
+#define TYPE(CLASS, PARENT) \
+ void print##CLASS##Before(const CLASS##Type *T, raw_ostream &OS); \
+ void print##CLASS##After(const CLASS##Type *T, raw_ostream &OS);
#include "clang/AST/TypeNodes.inc"
- private:
- void printBefore(const Type *ty, Qualifiers qs, raw_ostream &OS);
- void printAfter(const Type *ty, Qualifiers qs, raw_ostream &OS);
- };
+private:
+ void printBefore(const Type *ty, Qualifiers qs, raw_ostream &OS);
+ void printAfter(const Type *ty, Qualifiers qs, raw_ostream &OS);
+};
} // namespace
@@ -184,7 +195,7 @@ void TypePrinter::print(const Type *T, Qualifiers Quals, raw_ostream &OS,
return;
}
- SaveAndRestore<bool> PHVal(HasEmptyPlaceHolder, PlaceHolder.empty());
+ SaveAndRestore PHVal(HasEmptyPlaceHolder, PlaceHolder.empty());
printBefore(T, Quals, OS);
OS << PlaceHolder;
@@ -200,17 +211,19 @@ bool TypePrinter::canPrefixQualifiers(const Type *T,
// type expands to a simple string.
bool CanPrefixQualifiers = false;
NeedARCStrongQualifier = false;
- Type::TypeClass TC = T->getTypeClass();
+ const Type *UnderlyingType = T;
if (const auto *AT = dyn_cast<AutoType>(T))
- TC = AT->desugar()->getTypeClass();
+ UnderlyingType = AT->desugar().getTypePtr();
if (const auto *Subst = dyn_cast<SubstTemplateTypeParmType>(T))
- TC = Subst->getReplacementType()->getTypeClass();
+ UnderlyingType = Subst->getReplacementType().getTypePtr();
+ Type::TypeClass TC = UnderlyingType->getTypeClass();
switch (TC) {
case Type::Auto:
case Type::Builtin:
case Type::Complex:
case Type::UnresolvedUsing:
+ case Type::Using:
case Type::Typedef:
case Type::TypeOfExpr:
case Type::TypeOf:
@@ -231,8 +244,9 @@ bool TypePrinter::canPrefixQualifiers(const Type *T,
case Type::ObjCInterface:
case Type::Atomic:
case Type::Pipe:
- case Type::ExtInt:
- case Type::DependentExtInt:
+ case Type::BitInt:
+ case Type::DependentBitInt:
+ case Type::BTFTagAttributed:
CanPrefixQualifiers = true;
break;
@@ -241,12 +255,16 @@ bool TypePrinter::canPrefixQualifiers(const Type *T,
T->isObjCQualifiedIdType() || T->isObjCQualifiedClassType();
break;
- case Type::ConstantArray:
- case Type::IncompleteArray:
case Type::VariableArray:
case Type::DependentSizedArray:
NeedARCStrongQualifier = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
+
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ return canPrefixQualifiers(
+ cast<ArrayType>(UnderlyingType)->getElementType().getTypePtr(),
+ NeedARCStrongQualifier);
case Type::Adjusted:
case Type::Decayed:
@@ -274,8 +292,9 @@ bool TypePrinter::canPrefixQualifiers(const Type *T,
case Type::Attributed: {
// We still want to print the address_space before the type if it is an
// address_space attribute.
- const auto *AttrTy = cast<AttributedType>(T);
+ const auto *AttrTy = cast<AttributedType>(UnderlyingType);
CanPrefixQualifiers = AttrTy->getAttrKind() == attr::AddressSpace;
+ break;
}
}
@@ -300,7 +319,7 @@ void TypePrinter::printBefore(const Type *T,Qualifiers Quals, raw_ostream &OS) {
if (Policy.SuppressSpecifiers && T->isSpecifierType())
return;
- SaveAndRestore<bool> PrevPHIsEmpty(HasEmptyPlaceHolder);
+ SaveAndRestore PrevPHIsEmpty(HasEmptyPlaceHolder);
// Print qualifiers as appropriate.
@@ -377,7 +396,7 @@ void TypePrinter::printComplexAfter(const ComplexType *T, raw_ostream &OS) {
void TypePrinter::printPointerBefore(const PointerType *T, raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
- SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
+ SaveAndRestore NonEmptyPH(HasEmptyPlaceHolder, false);
printBefore(T->getPointeeType(), OS);
// Handle things like 'int (*A)[4];' correctly.
// FIXME: this should include vectors, but vectors use attributes I guess.
@@ -388,7 +407,7 @@ void TypePrinter::printPointerBefore(const PointerType *T, raw_ostream &OS) {
void TypePrinter::printPointerAfter(const PointerType *T, raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
- SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
+ SaveAndRestore NonEmptyPH(HasEmptyPlaceHolder, false);
// Handle things like 'int (*A)[4];' correctly.
// FIXME: this should include vectors, but vectors use attributes I guess.
if (isa<ArrayType>(T->getPointeeType()))
@@ -398,14 +417,14 @@ void TypePrinter::printPointerAfter(const PointerType *T, raw_ostream &OS) {
void TypePrinter::printBlockPointerBefore(const BlockPointerType *T,
raw_ostream &OS) {
- SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
+ SaveAndRestore NonEmptyPH(HasEmptyPlaceHolder, false);
printBefore(T->getPointeeType(), OS);
OS << '^';
}
void TypePrinter::printBlockPointerAfter(const BlockPointerType *T,
raw_ostream &OS) {
- SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
+ SaveAndRestore NonEmptyPH(HasEmptyPlaceHolder, false);
printAfter(T->getPointeeType(), OS);
}
@@ -420,7 +439,7 @@ static QualType skipTopLevelReferences(QualType T) {
void TypePrinter::printLValueReferenceBefore(const LValueReferenceType *T,
raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
- SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
+ SaveAndRestore NonEmptyPH(HasEmptyPlaceHolder, false);
QualType Inner = skipTopLevelReferences(T->getPointeeTypeAsWritten());
printBefore(Inner, OS);
// Handle things like 'int (&A)[4];' correctly.
@@ -433,7 +452,7 @@ void TypePrinter::printLValueReferenceBefore(const LValueReferenceType *T,
void TypePrinter::printLValueReferenceAfter(const LValueReferenceType *T,
raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
- SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
+ SaveAndRestore NonEmptyPH(HasEmptyPlaceHolder, false);
QualType Inner = skipTopLevelReferences(T->getPointeeTypeAsWritten());
// Handle things like 'int (&A)[4];' correctly.
// FIXME: this should include vectors, but vectors use attributes I guess.
@@ -445,7 +464,7 @@ void TypePrinter::printLValueReferenceAfter(const LValueReferenceType *T,
void TypePrinter::printRValueReferenceBefore(const RValueReferenceType *T,
raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
- SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
+ SaveAndRestore NonEmptyPH(HasEmptyPlaceHolder, false);
QualType Inner = skipTopLevelReferences(T->getPointeeTypeAsWritten());
printBefore(Inner, OS);
// Handle things like 'int (&&A)[4];' correctly.
@@ -458,7 +477,7 @@ void TypePrinter::printRValueReferenceBefore(const RValueReferenceType *T,
void TypePrinter::printRValueReferenceAfter(const RValueReferenceType *T,
raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
- SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
+ SaveAndRestore NonEmptyPH(HasEmptyPlaceHolder, false);
QualType Inner = skipTopLevelReferences(T->getPointeeTypeAsWritten());
// Handle things like 'int (&&A)[4];' correctly.
// FIXME: this should include vectors, but vectors use attributes I guess.
@@ -470,7 +489,7 @@ void TypePrinter::printRValueReferenceAfter(const RValueReferenceType *T,
void TypePrinter::printMemberPointerBefore(const MemberPointerType *T,
raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
- SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
+ SaveAndRestore NonEmptyPH(HasEmptyPlaceHolder, false);
printBefore(T->getPointeeType(), OS);
// Handle things like 'int (Cls::*A)[4];' correctly.
// FIXME: this should include vectors, but vectors use attributes I guess.
@@ -487,7 +506,7 @@ void TypePrinter::printMemberPointerBefore(const MemberPointerType *T,
void TypePrinter::printMemberPointerAfter(const MemberPointerType *T,
raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
- SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
+ SaveAndRestore NonEmptyPH(HasEmptyPlaceHolder, false);
// Handle things like 'int (Cls::*A)[4];' correctly.
// FIXME: this should include vectors, but vectors use attributes I guess.
if (isa<ArrayType>(T->getPointeeType()))
@@ -498,7 +517,6 @@ void TypePrinter::printMemberPointerAfter(const MemberPointerType *T,
void TypePrinter::printConstantArrayBefore(const ConstantArrayType *T,
raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
- SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
printBefore(T->getElementType(), OS);
}
@@ -511,7 +529,7 @@ void TypePrinter::printConstantArrayAfter(const ConstantArrayType *T,
OS << ' ';
}
- if (T->getSizeModifier() == ArrayType::Static)
+ if (T->getSizeModifier() == ArraySizeModifier::Static)
OS << "static ";
OS << T->getSize().getZExtValue() << ']';
@@ -521,7 +539,6 @@ void TypePrinter::printConstantArrayAfter(const ConstantArrayType *T,
void TypePrinter::printIncompleteArrayBefore(const IncompleteArrayType *T,
raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
- SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
printBefore(T->getElementType(), OS);
}
@@ -534,7 +551,6 @@ void TypePrinter::printIncompleteArrayAfter(const IncompleteArrayType *T,
void TypePrinter::printVariableArrayBefore(const VariableArrayType *T,
raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
- SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
printBefore(T->getElementType(), OS);
}
@@ -546,9 +562,9 @@ void TypePrinter::printVariableArrayAfter(const VariableArrayType *T,
OS << ' ';
}
- if (T->getSizeModifier() == VariableArrayType::Static)
+ if (T->getSizeModifier() == ArraySizeModifier::Static)
OS << "static ";
- else if (T->getSizeModifier() == VariableArrayType::Star)
+ else if (T->getSizeModifier() == ArraySizeModifier::Star)
OS << '*';
if (T->getSizeExpr())
@@ -581,7 +597,6 @@ void TypePrinter::printDependentSizedArrayBefore(
const DependentSizedArrayType *T,
raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
- SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
printBefore(T->getElementType(), OS);
}
@@ -627,28 +642,28 @@ void TypePrinter::printDependentSizedExtVectorAfter(
void TypePrinter::printVectorBefore(const VectorType *T, raw_ostream &OS) {
switch (T->getVectorKind()) {
- case VectorType::AltiVecPixel:
+ case VectorKind::AltiVecPixel:
OS << "__vector __pixel ";
break;
- case VectorType::AltiVecBool:
+ case VectorKind::AltiVecBool:
OS << "__vector __bool ";
printBefore(T->getElementType(), OS);
break;
- case VectorType::AltiVecVector:
+ case VectorKind::AltiVecVector:
OS << "__vector ";
printBefore(T->getElementType(), OS);
break;
- case VectorType::NeonVector:
+ case VectorKind::Neon:
OS << "__attribute__((neon_vector_type("
<< T->getNumElements() << "))) ";
printBefore(T->getElementType(), OS);
break;
- case VectorType::NeonPolyVector:
+ case VectorKind::NeonPoly:
OS << "__attribute__((neon_polyvector_type(" <<
T->getNumElements() << "))) ";
printBefore(T->getElementType(), OS);
break;
- case VectorType::GenericVector: {
+ case VectorKind::Generic: {
// FIXME: We prefer to print the size directly here, but have no way
// to get the size of the type.
OS << "__attribute__((__vector_size__("
@@ -659,13 +674,13 @@ void TypePrinter::printVectorBefore(const VectorType *T, raw_ostream &OS) {
printBefore(T->getElementType(), OS);
break;
}
- case VectorType::SveFixedLengthDataVector:
- case VectorType::SveFixedLengthPredicateVector:
+ case VectorKind::SveFixedLengthData:
+ case VectorKind::SveFixedLengthPredicate:
// FIXME: We prefer to print the size directly here, but have no way
// to get the size of the type.
OS << "__attribute__((__arm_sve_vector_bits__(";
- if (T->getVectorKind() == VectorType::SveFixedLengthPredicateVector)
+ if (T->getVectorKind() == VectorKind::SveFixedLengthPredicate)
// Predicates take a bit per byte of the vector size, multiply by 8 to
// get the number of bits passed to the attribute.
OS << T->getNumElements() * 8;
@@ -677,6 +692,21 @@ void TypePrinter::printVectorBefore(const VectorType *T, raw_ostream &OS) {
// Multiply by 8 for the number of bits.
OS << ") * 8))) ";
printBefore(T->getElementType(), OS);
+ break;
+ case VectorKind::RVVFixedLengthData:
+ case VectorKind::RVVFixedLengthMask:
+ // FIXME: We prefer to print the size directly here, but have no way
+ // to get the size of the type.
+ OS << "__attribute__((__riscv_rvv_vector_bits__(";
+
+ OS << T->getNumElements();
+
+ OS << " * sizeof(";
+ print(T->getElementType(), OS, StringRef());
+ // Multiply by 8 for the number of bits.
+ OS << ") * 8))) ";
+ printBefore(T->getElementType(), OS);
+ break;
}
}
@@ -687,32 +717,32 @@ void TypePrinter::printVectorAfter(const VectorType *T, raw_ostream &OS) {
void TypePrinter::printDependentVectorBefore(
const DependentVectorType *T, raw_ostream &OS) {
switch (T->getVectorKind()) {
- case VectorType::AltiVecPixel:
+ case VectorKind::AltiVecPixel:
OS << "__vector __pixel ";
break;
- case VectorType::AltiVecBool:
+ case VectorKind::AltiVecBool:
OS << "__vector __bool ";
printBefore(T->getElementType(), OS);
break;
- case VectorType::AltiVecVector:
+ case VectorKind::AltiVecVector:
OS << "__vector ";
printBefore(T->getElementType(), OS);
break;
- case VectorType::NeonVector:
+ case VectorKind::Neon:
OS << "__attribute__((neon_vector_type(";
if (T->getSizeExpr())
T->getSizeExpr()->printPretty(OS, nullptr, Policy);
OS << "))) ";
printBefore(T->getElementType(), OS);
break;
- case VectorType::NeonPolyVector:
+ case VectorKind::NeonPoly:
OS << "__attribute__((neon_polyvector_type(";
if (T->getSizeExpr())
T->getSizeExpr()->printPretty(OS, nullptr, Policy);
OS << "))) ";
printBefore(T->getElementType(), OS);
break;
- case VectorType::GenericVector: {
+ case VectorKind::Generic: {
// FIXME: We prefer to print the size directly here, but have no way
// to get the size of the type.
OS << "__attribute__((__vector_size__(";
@@ -724,14 +754,14 @@ void TypePrinter::printDependentVectorBefore(
printBefore(T->getElementType(), OS);
break;
}
- case VectorType::SveFixedLengthDataVector:
- case VectorType::SveFixedLengthPredicateVector:
+ case VectorKind::SveFixedLengthData:
+ case VectorKind::SveFixedLengthPredicate:
// FIXME: We prefer to print the size directly here, but have no way
// to get the size of the type.
OS << "__attribute__((__arm_sve_vector_bits__(";
if (T->getSizeExpr()) {
T->getSizeExpr()->printPretty(OS, nullptr, Policy);
- if (T->getVectorKind() == VectorType::SveFixedLengthPredicateVector)
+ if (T->getVectorKind() == VectorKind::SveFixedLengthPredicate)
// Predicates take a bit per byte of the vector size, multiply by 8 to
// get the number of bits passed to the attribute.
OS << " * 8";
@@ -742,6 +772,22 @@ void TypePrinter::printDependentVectorBefore(
}
OS << "))) ";
printBefore(T->getElementType(), OS);
+ break;
+ case VectorKind::RVVFixedLengthData:
+ case VectorKind::RVVFixedLengthMask:
+ // FIXME: We prefer to print the size directly here, but have no way
+ // to get the size of the type.
+ OS << "__attribute__((__riscv_rvv_vector_bits__(";
+ if (T->getSizeExpr()) {
+ T->getSizeExpr()->printPretty(OS, nullptr, Policy);
+ OS << " * sizeof(";
+ print(T->getElementType(), OS, StringRef());
+ // Multiply by 8 for the number of bits.
+ OS << ") * 8";
+ }
+ OS << "))) ";
+ printBefore(T->getElementType(), OS);
+ break;
}
}
@@ -833,7 +879,7 @@ void TypePrinter::printFunctionProtoBefore(const FunctionProtoType *T,
OS << '(';
} else {
// If needed for precedence reasons, wrap the inner part in grouping parens.
- SaveAndRestore<bool> PrevPHIsEmpty(HasEmptyPlaceHolder, false);
+ SaveAndRestore PrevPHIsEmpty(HasEmptyPlaceHolder, false);
printBefore(T->getReturnType(), OS);
if (!PrevPHIsEmpty.get())
OS << '(';
@@ -861,7 +907,7 @@ void TypePrinter::printFunctionProtoAfter(const FunctionProtoType *T,
// If needed for precedence reasons, wrap the inner part in grouping parens.
if (!HasEmptyPlaceHolder)
OS << ')';
- SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
+ SaveAndRestore NonEmptyPH(HasEmptyPlaceHolder, false);
OS << '(';
{
@@ -893,6 +939,28 @@ void TypePrinter::printFunctionProtoAfter(const FunctionProtoType *T,
OS << ')';
FunctionType::ExtInfo Info = T->getExtInfo();
+ unsigned SMEBits = T->getAArch64SMEAttributes();
+
+ if (SMEBits & FunctionType::SME_PStateSMCompatibleMask)
+ OS << " __arm_streaming_compatible";
+ if (SMEBits & FunctionType::SME_PStateSMEnabledMask)
+ OS << " __arm_streaming";
+ if (FunctionType::getArmZAState(SMEBits) == FunctionType::ARM_Preserves)
+ OS << " __arm_preserves(\"za\")";
+ if (FunctionType::getArmZAState(SMEBits) == FunctionType::ARM_In)
+ OS << " __arm_in(\"za\")";
+ if (FunctionType::getArmZAState(SMEBits) == FunctionType::ARM_Out)
+ OS << " __arm_out(\"za\")";
+ if (FunctionType::getArmZAState(SMEBits) == FunctionType::ARM_InOut)
+ OS << " __arm_inout(\"za\")";
+ if (FunctionType::getArmZT0State(SMEBits) == FunctionType::ARM_Preserves)
+ OS << " __arm_preserves(\"zt0\")";
+ if (FunctionType::getArmZT0State(SMEBits) == FunctionType::ARM_In)
+ OS << " __arm_in(\"zt0\")";
+ if (FunctionType::getArmZT0State(SMEBits) == FunctionType::ARM_Out)
+ OS << " __arm_out(\"zt0\")";
+ if (FunctionType::getArmZT0State(SMEBits) == FunctionType::ARM_InOut)
+ OS << " __arm_inout(\"zt0\")";
printFunctionAfter(Info, OS);
@@ -957,6 +1025,12 @@ void TypePrinter::printFunctionAfter(const FunctionType::ExtInfo &Info,
case CC_AArch64VectorCall:
OS << "__attribute__((aarch64_vector_pcs))";
break;
+ case CC_AArch64SVEPCS:
+ OS << "__attribute__((aarch64_sve_pcs))";
+ break;
+ case CC_AMDGPUKernelCall:
+ OS << "__attribute__((amdgpu_kernel))";
+ break;
case CC_IntelOclBicc:
OS << " __attribute__((intel_ocl_bicc))";
break;
@@ -985,6 +1059,9 @@ void TypePrinter::printFunctionAfter(const FunctionType::ExtInfo &Info,
case CC_PreserveAll:
OS << " __attribute__((preserve_all))";
break;
+ case CC_M68kRTD:
+ OS << " __attribute__((m68k_rtd))";
+ break;
}
}
@@ -1006,7 +1083,7 @@ void TypePrinter::printFunctionAfter(const FunctionType::ExtInfo &Info,
void TypePrinter::printFunctionNoProtoBefore(const FunctionNoProtoType *T,
raw_ostream &OS) {
// If needed for precedence reasons, wrap the inner part in grouping parens.
- SaveAndRestore<bool> PrevPHIsEmpty(HasEmptyPlaceHolder, false);
+ SaveAndRestore PrevPHIsEmpty(HasEmptyPlaceHolder, false);
printBefore(T->getReturnType(), OS);
if (!PrevPHIsEmpty.get())
OS << '(';
@@ -1017,7 +1094,7 @@ void TypePrinter::printFunctionNoProtoAfter(const FunctionNoProtoType *T,
// If needed for precedence reasons, wrap the inner part in grouping parens.
if (!HasEmptyPlaceHolder)
OS << ')';
- SaveAndRestore<bool> NonEmptyPH(HasEmptyPlaceHolder, false);
+ SaveAndRestore NonEmptyPH(HasEmptyPlaceHolder, false);
OS << "()";
printFunctionAfter(T->getExtInfo(), OS);
@@ -1045,6 +1122,21 @@ void TypePrinter::printUnresolvedUsingBefore(const UnresolvedUsingType *T,
void TypePrinter::printUnresolvedUsingAfter(const UnresolvedUsingType *T,
raw_ostream &OS) {}
+void TypePrinter::printUsingBefore(const UsingType *T, raw_ostream &OS) {
+ // After `namespace b { using a::X }`, is the type X within B a::X or b::X?
+ //
+ // - b::X is more formally correct given the UsingType model
+ // - b::X makes sense if "re-exporting" a symbol in a new namespace
+ // - a::X makes sense if "importing" a symbol for convenience
+ //
+ // The "importing" use seems much more common, so we print a::X.
+ // This could be a policy option, but the right choice seems to rest more
+ // with the intent of the code than the caller.
+ printTypeSpec(T->getFoundDecl()->getUnderlyingDecl(), OS);
+}
+
+void TypePrinter::printUsingAfter(const UsingType *T, raw_ostream &OS) {}
+
void TypePrinter::printTypedefBefore(const TypedefType *T, raw_ostream &OS) {
printTypeSpec(T->getDecl(), OS);
}
@@ -1068,7 +1160,8 @@ void TypePrinter::printTypedefAfter(const TypedefType *T, raw_ostream &OS) {}
void TypePrinter::printTypeOfExprBefore(const TypeOfExprType *T,
raw_ostream &OS) {
- OS << "typeof ";
+ OS << (T->getKind() == TypeOfKind::Unqualified ? "typeof_unqual "
+ : "typeof ");
if (T->getUnderlyingExpr())
T->getUnderlyingExpr()->printPretty(OS, nullptr, Policy);
spaceBeforePlaceHolder(OS);
@@ -1078,8 +1171,9 @@ void TypePrinter::printTypeOfExprAfter(const TypeOfExprType *T,
raw_ostream &OS) {}
void TypePrinter::printTypeOfBefore(const TypeOfType *T, raw_ostream &OS) {
- OS << "typeof(";
- print(T->getUnderlyingType(), OS, StringRef());
+ OS << (T->getKind() == TypeOfKind::Unqualified ? "typeof_unqual("
+ : "typeof(");
+ print(T->getUnmodifiedType(), OS, StringRef());
OS << ')';
spaceBeforePlaceHolder(OS);
}
@@ -1100,29 +1194,19 @@ void TypePrinter::printUnaryTransformBefore(const UnaryTransformType *T,
raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
- switch (T->getUTTKind()) {
- case UnaryTransformType::EnumUnderlyingType:
- OS << "__underlying_type(";
- print(T->getBaseType(), OS, StringRef());
- OS << ')';
- spaceBeforePlaceHolder(OS);
- return;
- }
-
- printBefore(T->getBaseType(), OS);
+ static llvm::DenseMap<int, const char *> Transformation = {{
+#define TRANSFORM_TYPE_TRAIT_DEF(Enum, Trait) \
+ {UnaryTransformType::Enum, "__" #Trait},
+#include "clang/Basic/TransformTypeTraits.def"
+ }};
+ OS << Transformation[T->getUTTKind()] << '(';
+ print(T->getBaseType(), OS, StringRef());
+ OS << ')';
+ spaceBeforePlaceHolder(OS);
}
void TypePrinter::printUnaryTransformAfter(const UnaryTransformType *T,
- raw_ostream &OS) {
- IncludeStrongLifetimeRAII Strong(Policy);
-
- switch (T->getUTTKind()) {
- case UnaryTransformType::EnumUnderlyingType:
- return;
- }
-
- printAfter(T->getBaseType(), OS);
-}
+ raw_ostream &OS) {}
void TypePrinter::printAutoBefore(const AutoType *T, raw_ostream &OS) {
// If the type has been deduced, do not print 'auto'.
@@ -1199,26 +1283,26 @@ void TypePrinter::printPipeBefore(const PipeType *T, raw_ostream &OS) {
void TypePrinter::printPipeAfter(const PipeType *T, raw_ostream &OS) {}
-void TypePrinter::printExtIntBefore(const ExtIntType *T, raw_ostream &OS) {
+void TypePrinter::printBitIntBefore(const BitIntType *T, raw_ostream &OS) {
if (T->isUnsigned())
OS << "unsigned ";
- OS << "_ExtInt(" << T->getNumBits() << ")";
+ OS << "_BitInt(" << T->getNumBits() << ")";
spaceBeforePlaceHolder(OS);
}
-void TypePrinter::printExtIntAfter(const ExtIntType *T, raw_ostream &OS) {}
+void TypePrinter::printBitIntAfter(const BitIntType *T, raw_ostream &OS) {}
-void TypePrinter::printDependentExtIntBefore(const DependentExtIntType *T,
+void TypePrinter::printDependentBitIntBefore(const DependentBitIntType *T,
raw_ostream &OS) {
if (T->isUnsigned())
OS << "unsigned ";
- OS << "_ExtInt(";
+ OS << "_BitInt(";
T->getNumBitsExpr()->printPretty(OS, nullptr, Policy);
OS << ")";
spaceBeforePlaceHolder(OS);
}
-void TypePrinter::printDependentExtIntAfter(const DependentExtIntType *T,
+void TypePrinter::printDependentBitIntAfter(const DependentBitIntType *T,
raw_ostream &OS) {}
/// Appends the given scope to the end of a string.
@@ -1328,11 +1412,20 @@ void TypePrinter::printTag(TagDecl *D, raw_ostream &OS) {
if (PLoc.isValid()) {
OS << " at ";
StringRef File = PLoc.getFilename();
+ llvm::SmallString<1024> WrittenFile(File);
if (auto *Callbacks = Policy.Callbacks)
- OS << Callbacks->remapPath(File);
- else
- OS << File;
- OS << ':' << PLoc.getLine() << ':' << PLoc.getColumn();
+ WrittenFile = Callbacks->remapPath(File);
+ // Fix inconsistent path separator created by
+ // clang::DirectoryLookup::LookupFile when the file path is relative
+ // path.
+ llvm::sys::path::Style Style =
+ llvm::sys::path::is_absolute(WrittenFile)
+ ? llvm::sys::path::Style::native
+ : (Policy.MSVCFormatting
+ ? llvm::sys::path::Style::windows_backslash
+ : llvm::sys::path::Style::posix);
+ llvm::sys::path::native(WrittenFile, Style);
+ OS << WrittenFile << ':' << PLoc.getLine() << ':' << PLoc.getColumn();
}
}
@@ -1363,9 +1456,11 @@ void TypePrinter::printTag(TagDecl *D, raw_ostream &OS) {
void TypePrinter::printRecordBefore(const RecordType *T, raw_ostream &OS) {
// Print the preferred name if we have one for this type.
- for (const auto *PNA : T->getDecl()->specific_attrs<PreferredNameAttr>()) {
- if (declaresSameEntity(PNA->getTypedefType()->getAsCXXRecordDecl(),
- T->getDecl())) {
+ if (Policy.UsePreferredNames) {
+ for (const auto *PNA : T->getDecl()->specific_attrs<PreferredNameAttr>()) {
+ if (!declaresSameEntity(PNA->getTypedefType()->getAsCXXRecordDecl(),
+ T->getDecl()))
+ continue;
// Find the outermost typedef or alias template.
QualType T = PNA->getTypedefType();
while (true) {
@@ -1399,7 +1494,8 @@ void TypePrinter::printTemplateTypeParmBefore(const TemplateTypeParmType *T,
}
OS << "auto";
} else if (IdentifierInfo *Id = T->getIdentifier())
- OS << Id->getName();
+ OS << (Policy.CleanUglifiedParameters ? Id->deuglifiedName()
+ : Id->getName());
else
OS << "type-parameter-" << T->getDepth() << '-' << T->getIndex();
@@ -1427,14 +1523,27 @@ void TypePrinter::printSubstTemplateTypeParmPackBefore(
const SubstTemplateTypeParmPackType *T,
raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
- printTemplateTypeParmBefore(T->getReplacedParameter(), OS);
+ if (const TemplateTypeParmDecl *D = T->getReplacedParameter()) {
+ if (D && D->isImplicit()) {
+ if (auto *TC = D->getTypeConstraint()) {
+ TC->print(OS, Policy);
+ OS << ' ';
+ }
+ OS << "auto";
+ } else if (IdentifierInfo *Id = D->getIdentifier())
+ OS << (Policy.CleanUglifiedParameters ? Id->deuglifiedName()
+ : Id->getName());
+ else
+ OS << "type-parameter-" << D->getDepth() << '-' << D->getIndex();
+
+ spaceBeforePlaceHolder(OS);
+ }
}
void TypePrinter::printSubstTemplateTypeParmPackAfter(
const SubstTemplateTypeParmPackType *T,
raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
- printTemplateTypeParmAfter(T->getReplacedParameter(), OS);
}
void TypePrinter::printTemplateId(const TemplateSpecializationType *T,
@@ -1442,17 +1551,19 @@ void TypePrinter::printTemplateId(const TemplateSpecializationType *T,
IncludeStrongLifetimeRAII Strong(Policy);
TemplateDecl *TD = T->getTemplateName().getAsTemplateDecl();
+ // FIXME: Null TD never excercised in test suite.
if (FullyQualify && TD) {
if (!Policy.SuppressScope)
AppendScope(TD->getDeclContext(), OS, TD->getDeclName());
- IdentifierInfo *II = TD->getIdentifier();
- OS << II->getName();
+ OS << TD->getName();
} else {
T->getTemplateName().print(OS, Policy);
}
- printTemplateArgumentList(OS, T->template_arguments(), Policy);
+ DefaultTemplateArgsPolicyRAII TemplateArgs(Policy);
+ const TemplateParameterList *TPL = TD ? TD->getTemplateParameters() : nullptr;
+ printTemplateArgumentList(OS, T->template_arguments(), Policy, TPL);
spaceBeforePlaceHolder(OS);
}
@@ -1492,11 +1603,16 @@ void TypePrinter::printElaboratedBefore(const ElaboratedType *T,
return;
}
+ if (Policy.SuppressElaboration) {
+ printBefore(T->getNamedType(), OS);
+ return;
+ }
+
// The tag definition will take care of these.
if (!Policy.IncludeTagDefinition)
{
OS << TypeWithKeyword::getKeywordName(T->getKeyword());
- if (T->getKeyword() != ETK_None)
+ if (T->getKeyword() != ElaboratedTypeKeyword::None)
OS << " ";
NestedNameSpecifier *Qualifier = T->getQualifier();
if (Qualifier)
@@ -1511,6 +1627,12 @@ void TypePrinter::printElaboratedAfter(const ElaboratedType *T,
raw_ostream &OS) {
if (Policy.IncludeTagDefinition && T->getOwnedTagDecl())
return;
+
+ if (Policy.SuppressElaboration) {
+ printAfter(T->getNamedType(), OS);
+ return;
+ }
+
ElaboratedTypePolicyRAII PolicyRAII(Policy);
printAfter(T->getNamedType(), OS);
}
@@ -1534,7 +1656,7 @@ void TypePrinter::printParenAfter(const ParenType *T, raw_ostream &OS) {
void TypePrinter::printDependentNameBefore(const DependentNameType *T,
raw_ostream &OS) {
OS << TypeWithKeyword::getKeywordName(T->getKeyword());
- if (T->getKeyword() != ETK_None)
+ if (T->getKeyword() != ElaboratedTypeKeyword::None)
OS << " ";
T->getQualifier()->print(OS, Policy);
@@ -1551,7 +1673,7 @@ void TypePrinter::printDependentTemplateSpecializationBefore(
IncludeStrongLifetimeRAII Strong(Policy);
OS << TypeWithKeyword::getKeywordName(T->getKeyword());
- if (T->getKeyword() != ETK_None)
+ if (T->getKeyword() != ElaboratedTypeKeyword::None)
OS << " ";
if (T->getQualifier())
@@ -1603,6 +1725,9 @@ void TypePrinter::printAttributedBefore(const AttributedType *T,
spaceBeforePlaceHolder(OS);
}
+ if (T->isWebAssemblyFuncrefSpec())
+ OS << "__funcref";
+
// Print nullability type specifiers.
if (T->getImmediateNullability()) {
if (T->getAttrKind() == attr::TypeNonNull)
@@ -1630,14 +1755,14 @@ void TypePrinter::printAttributedAfter(const AttributedType *T,
// If this is a calling convention attribute, don't print the implicit CC from
// the modified type.
- SaveAndRestore<bool> MaybeSuppressCC(InsideCCAttribute, T->isCallingConv());
+ SaveAndRestore MaybeSuppressCC(InsideCCAttribute, T->isCallingConv());
printAfter(T->getModifiedType(), OS);
// Some attributes are printed as qualifiers before the type, so we have
// nothing left to do.
- if (T->getAttrKind() == attr::ObjCKindOf ||
- T->isMSTypeSpec() || T->getImmediateNullability())
+ if (T->getAttrKind() == attr::ObjCKindOf || T->isMSTypeSpec() ||
+ T->getImmediateNullability() || T->isWebAssemblyFuncrefSpec())
return;
// Don't print the inert __unsafe_unretained attribute at all.
@@ -1661,6 +1786,24 @@ void TypePrinter::printAttributedAfter(const AttributedType *T,
if (T->getAttrKind() == attr::AddressSpace)
return;
+ if (T->getAttrKind() == attr::AnnotateType) {
+ // FIXME: Print the attribute arguments once we have a way to retrieve these
+ // here. For the meantime, we just print `[[clang::annotate_type(...)]]`
+ // without the arguments so that we know at least that we had _some_
+ // annotation on the type.
+ OS << " [[clang::annotate_type(...)]]";
+ return;
+ }
+
+ if (T->getAttrKind() == attr::ArmStreaming) {
+ OS << "__arm_streaming";
+ return;
+ }
+ if (T->getAttrKind() == attr::ArmStreamingCompatible) {
+ OS << "__arm_streaming_compatible";
+ return;
+ }
+
OS << " __attribute__((";
switch (T->getAttrKind()) {
#define TYPE_ATTR(NAME)
@@ -1669,6 +1812,9 @@ void TypePrinter::printAttributedAfter(const AttributedType *T,
#include "clang/Basic/AttrList.inc"
llvm_unreachable("non-type attribute attached to type");
+ case attr::BTFTypeTag:
+ llvm_unreachable("BTFTypeTag attribute handled separately");
+
case attr::OpenCLPrivateAddressSpace:
case attr::OpenCLGlobalAddressSpace:
case attr::OpenCLGlobalDeviceAddressSpace:
@@ -1676,6 +1822,7 @@ void TypePrinter::printAttributedAfter(const AttributedType *T,
case attr::OpenCLLocalAddressSpace:
case attr::OpenCLConstantAddressSpace:
case attr::OpenCLGenericAddressSpace:
+ case attr::HLSLGroupSharedAddressSpace:
// FIXME: Update printAttributedBefore to print these once we generate
// AttributedType nodes for them.
break;
@@ -1695,6 +1842,14 @@ void TypePrinter::printAttributedAfter(const AttributedType *T,
case attr::UPtr:
case attr::AddressSpace:
case attr::CmseNSCall:
+ case attr::AnnotateType:
+ case attr::WebAssemblyFuncref:
+ case attr::ArmStreaming:
+ case attr::ArmStreamingCompatible:
+ case attr::ArmIn:
+ case attr::ArmOut:
+ case attr::ArmInOut:
+ case attr::ArmPreserves:
llvm_unreachable("This attribute should have been handled already");
case attr::NSReturnsRetained:
@@ -1726,6 +1881,8 @@ void TypePrinter::printAttributedAfter(const AttributedType *T,
break;
}
case attr::AArch64VectorPcs: OS << "aarch64_vector_pcs"; break;
+ case attr::AArch64SVEPcs: OS << "aarch64_sve_pcs"; break;
+ case attr::AMDGPUKernelCall: OS << "amdgpu_kernel"; break;
case attr::IntelOclBicc: OS << "inteloclbicc"; break;
case attr::PreserveMost:
OS << "preserve_most";
@@ -1734,6 +1891,9 @@ void TypePrinter::printAttributedAfter(const AttributedType *T,
case attr::PreserveAll:
OS << "preserve_all";
break;
+ case attr::M68kRTD:
+ OS << "m68k_rtd";
+ break;
case attr::NoDeref:
OS << "noderef";
break;
@@ -1743,10 +1903,25 @@ void TypePrinter::printAttributedAfter(const AttributedType *T,
case attr::ArmMveStrictPolymorphism:
OS << "__clang_arm_mve_strict_polymorphism";
break;
+
+ // Nothing to print for this attribute.
+ case attr::HLSLParamModifier:
+ break;
}
OS << "))";
}
+void TypePrinter::printBTFTagAttributedBefore(const BTFTagAttributedType *T,
+ raw_ostream &OS) {
+ printBefore(T->getWrappedType(), OS);
+ OS << " __attribute__((btf_type_tag(\"" << T->getAttr()->getBTFTypeTag() << "\")))";
+}
+
+void TypePrinter::printBTFTagAttributedAfter(const BTFTagAttributedType *T,
+ raw_ostream &OS) {
+ printAfter(T->getWrappedType(), OS);
+}
+
void TypePrinter::printObjCInterfaceBefore(const ObjCInterfaceType *T,
raw_ostream &OS) {
OS << T->getDecl()->getName();
@@ -1921,11 +2096,11 @@ static bool isSubstitutedType(ASTContext &Ctx, QualType T, QualType Pattern,
if (!isSubstitutedTemplateArgument(Ctx, Template, PTST->getTemplateName(),
Args, Depth))
return false;
- if (TemplateArgs.size() != PTST->getNumArgs())
+ if (TemplateArgs.size() != PTST->template_arguments().size())
return false;
for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I)
- if (!isSubstitutedTemplateArgument(Ctx, TemplateArgs[I], PTST->getArg(I),
- Args, Depth))
+ if (!isSubstitutedTemplateArgument(
+ Ctx, TemplateArgs[I], PTST->template_arguments()[I], Args, Depth))
return false;
return true;
}
@@ -1934,6 +2109,36 @@ static bool isSubstitutedType(ASTContext &Ctx, QualType T, QualType Pattern,
return false;
}
+/// Evaluates the expression template argument 'Pattern' and returns true
+/// if 'Arg' evaluates to the same result.
+static bool templateArgumentExpressionsEqual(ASTContext const &Ctx,
+ TemplateArgument const &Pattern,
+ TemplateArgument const &Arg) {
+ if (Pattern.getKind() != TemplateArgument::Expression)
+ return false;
+
+ // Can't evaluate value-dependent expressions so bail early
+ Expr const *pattern_expr = Pattern.getAsExpr();
+ if (pattern_expr->isValueDependent() ||
+ !pattern_expr->isIntegerConstantExpr(Ctx))
+ return false;
+
+ if (Arg.getKind() == TemplateArgument::Integral)
+ return llvm::APSInt::isSameValue(pattern_expr->EvaluateKnownConstInt(Ctx),
+ Arg.getAsIntegral());
+
+ if (Arg.getKind() == TemplateArgument::Expression) {
+ Expr const *args_expr = Arg.getAsExpr();
+ if (args_expr->isValueDependent() || !args_expr->isIntegerConstantExpr(Ctx))
+ return false;
+
+ return llvm::APSInt::isSameValue(args_expr->EvaluateKnownConstInt(Ctx),
+ pattern_expr->EvaluateKnownConstInt(Ctx));
+ }
+
+ return false;
+}
+
static bool isSubstitutedTemplateArgument(ASTContext &Ctx, TemplateArgument Arg,
TemplateArgument Pattern,
ArrayRef<TemplateArgument> Args,
@@ -1952,6 +2157,9 @@ static bool isSubstitutedTemplateArgument(ASTContext &Ctx, TemplateArgument Arg,
}
}
+ if (templateArgumentExpressionsEqual(Ctx, Pattern, Arg))
+ return true;
+
if (Arg.getKind() != Pattern.getKind())
return false;
@@ -1971,9 +2179,7 @@ static bool isSubstitutedTemplateArgument(ASTContext &Ctx, TemplateArgument Arg,
return false;
}
-/// Make a best-effort determination of whether the type T can be produced by
-/// substituting Args into the default argument of Param.
-static bool isSubstitutedDefaultArgument(ASTContext &Ctx, TemplateArgument Arg,
+bool clang::isSubstitutedDefaultArgument(ASTContext &Ctx, TemplateArgument Arg,
const NamedDecl *Param,
ArrayRef<TemplateArgument> Args,
unsigned Depth) {
@@ -1998,27 +2204,22 @@ static bool isSubstitutedDefaultArgument(ASTContext &Ctx, TemplateArgument Arg,
}
template <typename TA>
-static void printTo(raw_ostream &OS, ArrayRef<TA> Args,
- const PrintingPolicy &Policy, bool SkipBrackets,
- const TemplateParameterList *TPL, bool IsPack,
- unsigned ParmIndex) {
+static void
+printTo(raw_ostream &OS, ArrayRef<TA> Args, const PrintingPolicy &Policy,
+ const TemplateParameterList *TPL, bool IsPack, unsigned ParmIndex) {
// Drop trailing template arguments that match default arguments.
if (TPL && Policy.SuppressDefaultTemplateArgs &&
!Policy.PrintCanonicalTypes && !Args.empty() && !IsPack &&
Args.size() <= TPL->size()) {
- ASTContext &Ctx = TPL->getParam(0)->getASTContext();
llvm::SmallVector<TemplateArgument, 8> OrigArgs;
for (const TA &A : Args)
OrigArgs.push_back(getArgument(A));
- while (!Args.empty() &&
- isSubstitutedDefaultArgument(Ctx, getArgument(Args.back()),
- TPL->getParam(Args.size() - 1),
- OrigArgs, TPL->getDepth()))
+ while (!Args.empty() && getArgument(Args.back()).getIsDefaulted())
Args = Args.drop_back();
}
const char *Comma = Policy.MSVCFormatting ? "," : ", ";
- if (!SkipBrackets)
+ if (!IsPack)
OS << '<';
bool NeedSpace = false;
@@ -2031,15 +2232,15 @@ static void printTo(raw_ostream &OS, ArrayRef<TA> Args,
if (Argument.getKind() == TemplateArgument::Pack) {
if (Argument.pack_size() && !FirstArg)
OS << Comma;
- printTo(ArgOS, Argument.getPackAsArray(), Policy, true, TPL,
+ printTo(ArgOS, Argument.getPackAsArray(), Policy, TPL,
/*IsPack*/ true, ParmIndex);
} else {
if (!FirstArg)
OS << Comma;
// Tries to print the argument with location info if exists.
- printArgument(
- Arg, Policy, ArgOS,
- TemplateParameterList::shouldIncludeTypeForArgument(TPL, ParmIndex));
+ printArgument(Arg, Policy, ArgOS,
+ TemplateParameterList::shouldIncludeTypeForArgument(
+ Policy, TPL, ParmIndex));
}
StringRef ArgString = ArgOS.str();
@@ -2053,20 +2254,21 @@ static void printTo(raw_ostream &OS, ArrayRef<TA> Args,
// If the last character of our string is '>', add another space to
// keep the two '>''s separate tokens.
- NeedSpace = Policy.SplitTemplateClosers && !ArgString.empty() &&
- ArgString.back() == '>';
- FirstArg = false;
+ if (!ArgString.empty()) {
+ NeedSpace = Policy.SplitTemplateClosers && ArgString.back() == '>';
+ FirstArg = false;
+ }
// Use same template parameter for all elements of Pack
if (!IsPack)
ParmIndex++;
}
- if (NeedSpace)
- OS << ' ';
-
- if (!SkipBrackets)
+ if (!IsPack) {
+ if (NeedSpace)
+ OS << ' ';
OS << '>';
+ }
}
void clang::printTemplateArgumentList(raw_ostream &OS,
@@ -2080,14 +2282,14 @@ void clang::printTemplateArgumentList(raw_ostream &OS,
ArrayRef<TemplateArgument> Args,
const PrintingPolicy &Policy,
const TemplateParameterList *TPL) {
- printTo(OS, Args, Policy, false, TPL, /*isPack*/ false, /*parmIndex*/ 0);
+ printTo(OS, Args, Policy, TPL, /*isPack*/ false, /*parmIndex*/ 0);
}
void clang::printTemplateArgumentList(raw_ostream &OS,
ArrayRef<TemplateArgumentLoc> Args,
const PrintingPolicy &Policy,
const TemplateParameterList *TPL) {
- printTo(OS, Args, Policy, false, TPL, /*isPack*/ false, /*parmIndex*/ 0);
+ printTo(OS, Args, Policy, TPL, /*isPack*/ false, /*parmIndex*/ 0);
}
std::string Qualifiers::getAsString() const {
@@ -2157,6 +2359,10 @@ std::string Qualifiers::getAddrSpaceAsString(LangAS AS) {
return "__uptr __ptr32";
case LangAS::ptr64:
return "__ptr64";
+ case LangAS::wasm_funcref:
+ return "__funcref";
+ case LangAS::hlsl_groupshared:
+ return "groupshared";
default:
return std::to_string(toTargetAddressSpace(AS));
}
@@ -2272,3 +2478,9 @@ void QualType::getAsStringInternal(const Type *ty, Qualifiers qs,
std::string str = std::string(StrOS.str());
buffer.swap(str);
}
+
+raw_ostream &clang::operator<<(raw_ostream &OS, QualType QT) {
+ SplitQualType S = QT.split();
+ TypePrinter(LangOptions()).print(S.Ty, S.Quals, OS, /*PlaceHolder=*/"");
+ return OS;
+}
diff --git a/contrib/llvm-project/clang/lib/AST/VTableBuilder.cpp b/contrib/llvm-project/clang/lib/AST/VTableBuilder.cpp
index 38d6fc28e098..a956ca5b37ac 100644
--- a/contrib/llvm-project/clang/lib/AST/VTableBuilder.cpp
+++ b/contrib/llvm-project/clang/lib/AST/VTableBuilder.cpp
@@ -17,6 +17,7 @@
#include "clang/AST/RecordLayout.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/SetOperations.h"
+#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
@@ -421,7 +422,7 @@ void FinalOverriders::dump(raw_ostream &Out, BaseSubobject Base,
Out << ", " << Overrider.Offset.getQuantity() << ')';
BaseOffset Offset;
- if (!Overrider.Method->isPure())
+ if (!Overrider.Method->isPureVirtual())
Offset = ComputeReturnAdjustmentBaseOffset(Context, Overrider.Method, MD);
if (!Offset.isEmpty()) {
@@ -664,13 +665,18 @@ CharUnits VCallAndVBaseOffsetBuilder::getCurrentOffsetOffset() const {
// vtable address point. (We subtract 3 to account for the information just
// above the address point, the RTTI info, the offset to top, and the
// vcall offset itself).
- int64_t OffsetIndex = -(int64_t)(3 + Components.size());
+ size_t NumComponentsAboveAddrPoint = 3;
+ if (Context.getLangOpts().OmitVTableRTTI)
+ NumComponentsAboveAddrPoint--;
+ int64_t OffsetIndex =
+ -(int64_t)(NumComponentsAboveAddrPoint + Components.size());
// Under the relative ABI, the offset widths are 32-bit ints instead of
// pointer widths.
CharUnits OffsetWidth = Context.toCharUnitsFromBits(
- VTables.isRelativeLayout() ? 32
- : Context.getTargetInfo().getPointerWidth(0));
+ VTables.isRelativeLayout()
+ ? 32
+ : Context.getTargetInfo().getPointerWidth(LangAS::Default));
CharUnits OffsetOffset = OffsetWidth * OffsetIndex;
return OffsetOffset;
@@ -1070,7 +1076,7 @@ void ItaniumVTableBuilder::AddThunk(const CXXMethodDecl *MD,
SmallVectorImpl<ThunkInfo> &ThunksVector = Thunks[MD];
// Check if we have this thunk already.
- if (llvm::find(ThunksVector, Thunk) != ThunksVector.end())
+ if (llvm::is_contained(ThunksVector, Thunk))
return;
ThunksVector.push_back(Thunk);
@@ -1255,7 +1261,7 @@ ThisAdjustment ItaniumVTableBuilder::ComputeThisAdjustment(
const CXXMethodDecl *MD, CharUnits BaseOffsetInLayoutClass,
FinalOverriders::OverriderInfo Overrider) {
// Ignore adjustments for pure virtual member functions.
- if (Overrider.Method->isPure())
+ if (Overrider.Method->isPureVirtual())
return ThisAdjustment();
BaseSubobject OverriddenBaseSubobject(MD->getParent(),
@@ -1418,8 +1424,7 @@ FindNearestOverriddenMethod(const CXXMethodDecl *MD,
OverriddenMethodsSetTy OverriddenMethods;
ComputeAllOverriddenMethods(MD, OverriddenMethods);
- for (const CXXRecordDecl *PrimaryBase :
- llvm::make_range(Bases.rbegin(), Bases.rend())) {
+ for (const CXXRecordDecl *PrimaryBase : llvm::reverse(Bases)) {
// Now check the overridden methods.
for (const CXXMethodDecl *OverriddenMD : OverriddenMethods) {
// We found our overridden method.
@@ -1559,6 +1564,8 @@ void ItaniumVTableBuilder::AddMethods(
std::stable_sort(
NewImplicitVirtualFunctions.begin(), NewImplicitVirtualFunctions.end(),
[](const CXXMethodDecl *A, const CXXMethodDecl *B) {
+ if (A == B)
+ return false;
if (A->isCopyAssignmentOperator() != B->isCopyAssignmentOperator())
return A->isCopyAssignmentOperator();
if (A->isMoveAssignmentOperator() != B->isMoveAssignmentOperator())
@@ -1600,7 +1607,7 @@ void ItaniumVTableBuilder::AddMethods(
// Check if this overrider needs a return adjustment.
// We don't want to do this for pure virtual member functions.
BaseOffset ReturnAdjustmentOffset;
- if (!OverriderMD->isPure()) {
+ if (!OverriderMD->isPureVirtual()) {
ReturnAdjustmentOffset =
ComputeReturnAdjustmentBaseOffset(Context, OverriderMD, MD);
}
@@ -1666,7 +1673,8 @@ void ItaniumVTableBuilder::LayoutPrimaryAndSecondaryVTables(
Components.push_back(VTableComponent::MakeOffsetToTop(OffsetToTop));
// Next, add the RTTI.
- Components.push_back(VTableComponent::MakeRTTI(MostDerivedClass));
+ if (!Context.getLangOpts().OmitVTableRTTI)
+ Components.push_back(VTableComponent::MakeRTTI(MostDerivedClass));
uint64_t AddressPoint = Components.size();
@@ -1948,11 +1956,10 @@ void ItaniumVTableBuilder::dumpLayout(raw_ostream &Out) {
case VTableComponent::CK_FunctionPointer: {
const CXXMethodDecl *MD = Component.getFunctionDecl();
- std::string Str =
- PredefinedExpr::ComputeName(PredefinedExpr::PrettyFunctionNoVirtual,
- MD);
+ std::string Str = PredefinedExpr::ComputeName(
+ PredefinedIdentKind::PrettyFunctionNoVirtual, MD);
Out << Str;
- if (MD->isPure())
+ if (MD->isPureVirtual())
Out << " [pure]";
if (MD->isDeleted())
@@ -2003,7 +2010,7 @@ void ItaniumVTableBuilder::dumpLayout(raw_ostream &Out) {
else
Out << "() [deleting]";
- if (DD->isPure())
+ if (DD->isPureVirtual())
Out << " [pure]";
ThunkInfo Thunk = VTableThunks.lookup(I);
@@ -2028,11 +2035,10 @@ void ItaniumVTableBuilder::dumpLayout(raw_ostream &Out) {
case VTableComponent::CK_UnusedFunctionPointer: {
const CXXMethodDecl *MD = Component.getUnusedFunctionDecl();
- std::string Str =
- PredefinedExpr::ComputeName(PredefinedExpr::PrettyFunctionNoVirtual,
- MD);
+ std::string Str = PredefinedExpr::ComputeName(
+ PredefinedIdentKind::PrettyFunctionNoVirtual, MD);
Out << "[unused] " << Str;
- if (MD->isPure())
+ if (MD->isPureVirtual())
Out << " [pure]";
}
@@ -2107,9 +2113,8 @@ void ItaniumVTableBuilder::dumpLayout(raw_ostream &Out) {
for (const auto &I : Thunks) {
const CXXMethodDecl *MD = I.first;
- std::string MethodName =
- PredefinedExpr::ComputeName(PredefinedExpr::PrettyFunctionNoVirtual,
- MD);
+ std::string MethodName = PredefinedExpr::ComputeName(
+ PredefinedIdentKind::PrettyFunctionNoVirtual, MD);
MethodNamesAndDecls.insert(std::make_pair(MethodName, MD));
}
@@ -2173,9 +2178,8 @@ void ItaniumVTableBuilder::dumpLayout(raw_ostream &Out) {
continue;
MD = MD->getCanonicalDecl();
- std::string MethodName =
- PredefinedExpr::ComputeName(PredefinedExpr::PrettyFunctionNoVirtual,
- MD);
+ std::string MethodName = PredefinedExpr::ComputeName(
+ PredefinedIdentKind::PrettyFunctionNoVirtual, MD);
if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
GlobalDecl GD(DD, Dtor_Complete);
@@ -2258,7 +2262,7 @@ VTableLayout::VTableLayout(ArrayRef<size_t> VTableIndices,
VTableLayout::~VTableLayout() { }
bool VTableContextBase::hasVtableSlot(const CXXMethodDecl *MD) {
- return MD->isVirtual() && !MD->isConsteval();
+ return MD->isVirtual() && !MD->isImmediateFunction();
}
ItaniumVTableContext::ItaniumVTableContext(
@@ -2329,7 +2333,7 @@ ItaniumVTableContext::computeVTableRelatedInformation(const CXXRecordDecl *RD) {
return;
ItaniumVTableBuilder Builder(*this, RD, CharUnits::Zero(),
- /*MostDerivedClassIsVirtual=*/0, RD);
+ /*MostDerivedClassIsVirtual=*/false, RD);
Entry = CreateVTableLayout(Builder);
MethodVTableIndices.insert(Builder.vtable_indices_begin(),
@@ -2498,7 +2502,7 @@ private:
SmallVector<ThunkInfo, 1> &ThunksVector = Thunks[MD];
// Check if we have this thunk already.
- if (llvm::find(ThunksVector, Thunk) != ThunksVector.end())
+ if (llvm::is_contained(ThunksVector, Thunk))
return;
ThunksVector.push_back(Thunk);
@@ -3072,7 +3076,7 @@ void VFTableBuilder::AddMethods(BaseSubobject Base, unsigned BaseDepth,
// We don't want to do this for pure virtual member functions.
BaseOffset ReturnAdjustmentOffset;
ReturnAdjustment ReturnAdjustment;
- if (!FinalOverriderMD->isPure()) {
+ if (!FinalOverriderMD->isPureVirtual()) {
ReturnAdjustmentOffset =
ComputeReturnAdjustmentBaseOffset(Context, FinalOverriderMD, MD);
}
@@ -3098,8 +3102,7 @@ void VFTableBuilder::AddMethods(BaseSubobject Base, unsigned BaseDepth,
}
static void PrintBasePath(const VPtrInfo::BasePath &Path, raw_ostream &Out) {
- for (const CXXRecordDecl *Elem :
- llvm::make_range(Path.rbegin(), Path.rend())) {
+ for (const CXXRecordDecl *Elem : llvm::reverse(Path)) {
Out << "'";
Elem->printQualifiedName(Out);
Out << "' in ";
@@ -3115,8 +3118,7 @@ static void dumpMicrosoftThunkAdjustment(const ThunkInfo &TI, raw_ostream &Out,
if (!ContinueFirstLine)
Out << LinePrefix;
Out << "[return adjustment (to type '"
- << TI.Method->getReturnType().getCanonicalType().getAsString()
- << "'): ";
+ << TI.Method->getReturnType().getCanonicalType() << "'): ";
if (R.Virtual.Microsoft.VBPtrOffset)
Out << "vbptr at offset " << R.Virtual.Microsoft.VBPtrOffset << ", ";
if (R.Virtual.Microsoft.VBIndex)
@@ -3171,9 +3173,9 @@ void VFTableBuilder::dumpLayout(raw_ostream &Out) {
// FIXME: Figure out how to print the real thunk type, since they can
// differ in the return type.
std::string Str = PredefinedExpr::ComputeName(
- PredefinedExpr::PrettyFunctionNoVirtual, MD);
+ PredefinedIdentKind::PrettyFunctionNoVirtual, MD);
Out << Str;
- if (MD->isPure())
+ if (MD->isPureVirtual())
Out << " [pure]";
if (MD->isDeleted())
@@ -3192,7 +3194,7 @@ void VFTableBuilder::dumpLayout(raw_ostream &Out) {
DD->printQualifiedName(Out);
Out << "() [scalar deleting]";
- if (DD->isPure())
+ if (DD->isPureVirtual())
Out << " [pure]";
ThunkInfo Thunk = VTableThunks.lookup(I);
@@ -3226,7 +3228,7 @@ void VFTableBuilder::dumpLayout(raw_ostream &Out) {
for (const auto &I : Thunks) {
const CXXMethodDecl *MD = I.first;
std::string MethodName = PredefinedExpr::ComputeName(
- PredefinedExpr::PrettyFunctionNoVirtual, MD);
+ PredefinedIdentKind::PrettyFunctionNoVirtual, MD);
MethodNamesAndDecls.insert(std::make_pair(MethodName, MD));
}
@@ -3386,10 +3388,8 @@ static bool rebucketPaths(VPtrInfoVector &Paths) {
// sorted vector to implement a multiset to form the buckets. Note that the
// ordering is based on pointers, but it doesn't change our output order. The
// current algorithm is designed to match MSVC 2012's names.
- llvm::SmallVector<std::reference_wrapper<VPtrInfo>, 2> PathsSorted;
- PathsSorted.reserve(Paths.size());
- for (auto& P : Paths)
- PathsSorted.push_back(*P);
+ llvm::SmallVector<std::reference_wrapper<VPtrInfo>, 2> PathsSorted(
+ llvm::make_pointee_range(Paths));
llvm::sort(PathsSorted, [](const VPtrInfo &LHS, const VPtrInfo &RHS) {
return LHS.MangledPath < RHS.MangledPath;
});
@@ -3454,7 +3454,7 @@ static void removeRedundantPaths(std::list<FullPathTy> &FullPaths) {
if (&SpecificPath == &OtherPath)
continue;
if (llvm::all_of(SpecificPath, [&](const BaseSubobject &BSO) {
- return OtherPath.count(BSO) != 0;
+ return OtherPath.contains(BSO);
})) {
return true;
}
@@ -3660,7 +3660,7 @@ void MicrosoftVTableContext::dumpMethodLocations(
assert(hasVtableSlot(MD));
std::string MethodName = PredefinedExpr::ComputeName(
- PredefinedExpr::PrettyFunctionNoVirtual, MD);
+ PredefinedIdentKind::PrettyFunctionNoVirtual, MD);
if (isa<CXXDestructorDecl>(MD)) {
IndicesMap[I.second] = MethodName + " [scalar deleting]";
diff --git a/contrib/llvm-project/clang/lib/ASTMatchers/ASTMatchFinder.cpp b/contrib/llvm-project/clang/lib/ASTMatchers/ASTMatchFinder.cpp
index 5d6cea54b8ec..0bac2ed63a92 100644
--- a/contrib/llvm-project/clang/lib/ASTMatchers/ASTMatchFinder.cpp
+++ b/contrib/llvm-project/clang/lib/ASTMatchers/ASTMatchFinder.cpp
@@ -18,9 +18,12 @@
#include "clang/ASTMatchers/ASTMatchFinder.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclCXX.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringMap.h"
+#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/Timer.h"
#include <deque>
#include <memory>
@@ -133,6 +136,8 @@ public:
else if (const TemplateArgumentLoc *TALoc =
DynNode.get<TemplateArgumentLoc>())
traverse(*TALoc);
+ else if (const Attr *A = DynNode.get<Attr>())
+ traverse(*A);
// FIXME: Add other base types after adding tests.
// It's OK to always overwrite the bound nodes, as if there was
@@ -263,6 +268,15 @@ public:
return match(*Node->getLHS()) && match(*Node->getRHS());
}
+ bool TraverseAttr(Attr *A) {
+ if (A == nullptr ||
+ (A->isImplicit() &&
+ Finder->getASTContext().getParentMapContext().getTraversalKind() ==
+ TK_IgnoreUnlessSpelledInSource))
+ return true;
+ ScopedIncrement ScopedDepth(&CurrentDepth);
+ return traverse(*A);
+ }
bool TraverseLambdaExpr(LambdaExpr *Node) {
if (!Finder->isTraversalIgnoringImplicitNodes())
return VisitorBase::TraverseLambdaExpr(Node);
@@ -345,6 +359,9 @@ private:
bool baseTraverse(TemplateArgumentLoc TAL) {
return VisitorBase::TraverseTemplateArgumentLoc(TAL);
}
+ bool baseTraverse(const Attr &AttrNode) {
+ return VisitorBase::TraverseAttr(const_cast<Attr *>(&AttrNode));
+ }
// Sets 'Matched' to true if 'Matcher' matches 'Node' and:
// 0 < CurrentDepth <= MaxDepth.
@@ -414,7 +431,7 @@ public:
}
void onStartOfTranslationUnit() {
- const bool EnableCheckProfiling = Options.CheckProfiling.hasValue();
+ const bool EnableCheckProfiling = Options.CheckProfiling.has_value();
TimeBucketRegion Timer;
for (MatchCallback *MC : Matchers->AllCallbacks) {
if (EnableCheckProfiling)
@@ -424,7 +441,7 @@ public:
}
void onEndOfTranslationUnit() {
- const bool EnableCheckProfiling = Options.CheckProfiling.hasValue();
+ const bool EnableCheckProfiling = Options.CheckProfiling.has_value();
TimeBucketRegion Timer;
for (MatchCallback *MC : Matchers->AllCallbacks) {
if (EnableCheckProfiling)
@@ -489,6 +506,7 @@ public:
bool TraverseNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS);
bool TraverseConstructorInitializer(CXXCtorInitializer *CtorInit);
bool TraverseTemplateArgumentLoc(TemplateArgumentLoc TAL);
+ bool TraverseAttr(Attr *AttrNode);
bool dataTraverseNode(Stmt *S, DataRecursionQueue *Queue) {
if (auto *RF = dyn_cast<CXXForRangeStmt>(S)) {
@@ -635,11 +653,20 @@ public:
BoundNodesTreeBuilder *Builder,
bool Directly) override;
+private:
+ bool
+ classIsDerivedFromImpl(const CXXRecordDecl *Declaration,
+ const Matcher<NamedDecl> &Base,
+ BoundNodesTreeBuilder *Builder, bool Directly,
+ llvm::SmallPtrSetImpl<const CXXRecordDecl *> &Visited);
+
+public:
bool objcClassIsDerivedFrom(const ObjCInterfaceDecl *Declaration,
const Matcher<NamedDecl> &Base,
BoundNodesTreeBuilder *Builder,
bool Directly) override;
+public:
// Implements ASTMatchFinder::matchesChildOf.
bool matchesChildOf(const DynTypedNode &Node, ASTContext &Ctx,
const DynTypedMatcher &Matcher,
@@ -694,6 +721,8 @@ public:
match(*N);
} else if (auto *N = Node.get<TemplateArgumentLoc>()) {
match(*N);
+ } else if (auto *N = Node.get<Attr>()) {
+ match(*N);
}
}
@@ -748,6 +777,191 @@ private:
bool TraversingASTNodeNotAsIs = false;
bool TraversingASTChildrenNotSpelledInSource = false;
+ class CurMatchData {
+// We don't have enough free low bits in 32bit builds to discriminate 8 pointer
+// types in PointerUnion. so split the union in 2 using a free bit from the
+// callback pointer.
+#define CMD_TYPES_0 \
+ const QualType *, const TypeLoc *, const NestedNameSpecifier *, \
+ const NestedNameSpecifierLoc *
+#define CMD_TYPES_1 \
+ const CXXCtorInitializer *, const TemplateArgumentLoc *, const Attr *, \
+ const DynTypedNode *
+
+#define IMPL(Index) \
+ template <typename NodeType> \
+ std::enable_if_t< \
+ llvm::is_one_of<const NodeType *, CMD_TYPES_##Index>::value> \
+ SetCallbackAndRawNode(const MatchCallback *CB, const NodeType &N) { \
+ assertEmpty(); \
+ Callback.setPointerAndInt(CB, Index); \
+ Node##Index = &N; \
+ } \
+ \
+ template <typename T> \
+ std::enable_if_t<llvm::is_one_of<const T *, CMD_TYPES_##Index>::value, \
+ const T *> \
+ getNode() const { \
+ assertHoldsState(); \
+ return Callback.getInt() == (Index) ? Node##Index.dyn_cast<const T *>() \
+ : nullptr; \
+ }
+
+ public:
+ CurMatchData() : Node0(nullptr) {}
+
+ IMPL(0)
+ IMPL(1)
+
+ const MatchCallback *getCallback() const { return Callback.getPointer(); }
+
+ void SetBoundNodes(const BoundNodes &BN) {
+ assertHoldsState();
+ BNodes = &BN;
+ }
+
+ void clearBoundNodes() {
+ assertHoldsState();
+ BNodes = nullptr;
+ }
+
+ const BoundNodes *getBoundNodes() const {
+ assertHoldsState();
+ return BNodes;
+ }
+
+ void reset() {
+ assertHoldsState();
+ Callback.setPointerAndInt(nullptr, 0);
+ Node0 = nullptr;
+ }
+
+ private:
+ void assertHoldsState() const {
+ assert(Callback.getPointer() != nullptr && !Node0.isNull());
+ }
+
+ void assertEmpty() const {
+ assert(Callback.getPointer() == nullptr && Node0.isNull() &&
+ BNodes == nullptr);
+ }
+
+ llvm::PointerIntPair<const MatchCallback *, 1> Callback;
+ union {
+ llvm::PointerUnion<CMD_TYPES_0> Node0;
+ llvm::PointerUnion<CMD_TYPES_1> Node1;
+ };
+ const BoundNodes *BNodes = nullptr;
+
+#undef CMD_TYPES_0
+#undef CMD_TYPES_1
+#undef IMPL
+ } CurMatchState;
+
+ struct CurMatchRAII {
+ template <typename NodeType>
+ CurMatchRAII(MatchASTVisitor &MV, const MatchCallback *CB,
+ const NodeType &NT)
+ : MV(MV) {
+ MV.CurMatchState.SetCallbackAndRawNode(CB, NT);
+ }
+
+ ~CurMatchRAII() { MV.CurMatchState.reset(); }
+
+ private:
+ MatchASTVisitor &MV;
+ };
+
+public:
+ class TraceReporter : llvm::PrettyStackTraceEntry {
+ static void dumpNode(const ASTContext &Ctx, const DynTypedNode &Node,
+ raw_ostream &OS) {
+ if (const auto *D = Node.get<Decl>()) {
+ OS << D->getDeclKindName() << "Decl ";
+ if (const auto *ND = dyn_cast<NamedDecl>(D)) {
+ ND->printQualifiedName(OS);
+ OS << " : ";
+ } else
+ OS << ": ";
+ D->getSourceRange().print(OS, Ctx.getSourceManager());
+ } else if (const auto *S = Node.get<Stmt>()) {
+ OS << S->getStmtClassName() << " : ";
+ S->getSourceRange().print(OS, Ctx.getSourceManager());
+ } else if (const auto *T = Node.get<Type>()) {
+ OS << T->getTypeClassName() << "Type : ";
+ QualType(T, 0).print(OS, Ctx.getPrintingPolicy());
+ } else if (const auto *QT = Node.get<QualType>()) {
+ OS << "QualType : ";
+ QT->print(OS, Ctx.getPrintingPolicy());
+ } else {
+ OS << Node.getNodeKind().asStringRef() << " : ";
+ Node.getSourceRange().print(OS, Ctx.getSourceManager());
+ }
+ }
+
+ static void dumpNodeFromState(const ASTContext &Ctx,
+ const CurMatchData &State, raw_ostream &OS) {
+ if (const DynTypedNode *MatchNode = State.getNode<DynTypedNode>()) {
+ dumpNode(Ctx, *MatchNode, OS);
+ } else if (const auto *QT = State.getNode<QualType>()) {
+ dumpNode(Ctx, DynTypedNode::create(*QT), OS);
+ } else if (const auto *TL = State.getNode<TypeLoc>()) {
+ dumpNode(Ctx, DynTypedNode::create(*TL), OS);
+ } else if (const auto *NNS = State.getNode<NestedNameSpecifier>()) {
+ dumpNode(Ctx, DynTypedNode::create(*NNS), OS);
+ } else if (const auto *NNSL = State.getNode<NestedNameSpecifierLoc>()) {
+ dumpNode(Ctx, DynTypedNode::create(*NNSL), OS);
+ } else if (const auto *CtorInit = State.getNode<CXXCtorInitializer>()) {
+ dumpNode(Ctx, DynTypedNode::create(*CtorInit), OS);
+ } else if (const auto *TAL = State.getNode<TemplateArgumentLoc>()) {
+ dumpNode(Ctx, DynTypedNode::create(*TAL), OS);
+ } else if (const auto *At = State.getNode<Attr>()) {
+ dumpNode(Ctx, DynTypedNode::create(*At), OS);
+ }
+ }
+
+ public:
+ TraceReporter(const MatchASTVisitor &MV) : MV(MV) {}
+ void print(raw_ostream &OS) const override {
+ const CurMatchData &State = MV.CurMatchState;
+ const MatchCallback *CB = State.getCallback();
+ if (!CB) {
+ OS << "ASTMatcher: Not currently matching\n";
+ return;
+ }
+
+ assert(MV.ActiveASTContext &&
+ "ActiveASTContext should be set if there is a matched callback");
+
+ ASTContext &Ctx = MV.getASTContext();
+
+ if (const BoundNodes *Nodes = State.getBoundNodes()) {
+ OS << "ASTMatcher: Processing '" << CB->getID() << "' against:\n\t";
+ dumpNodeFromState(Ctx, State, OS);
+ const BoundNodes::IDToNodeMap &Map = Nodes->getMap();
+ if (Map.empty()) {
+ OS << "\nNo bound nodes\n";
+ return;
+ }
+ OS << "\n--- Bound Nodes Begin ---\n";
+ for (const auto &Item : Map) {
+ OS << " " << Item.first << " - { ";
+ dumpNode(Ctx, Item.second, OS);
+ OS << " }\n";
+ }
+ OS << "--- Bound Nodes End ---\n";
+ } else {
+ OS << "ASTMatcher: Matching '" << CB->getID() << "' against:\n\t";
+ dumpNodeFromState(Ctx, State, OS);
+ OS << '\n';
+ }
+ }
+
+ private:
+ const MatchASTVisitor &MV;
+ };
+
+private:
struct ASTNodeNotSpelledInSourceScope {
ASTNodeNotSpelledInSourceScope(MatchASTVisitor *V, bool B)
: MV(V), MB(V->TraversingASTNodeNotSpelledInSource) {
@@ -776,7 +990,7 @@ private:
class TimeBucketRegion {
public:
- TimeBucketRegion() : Bucket(nullptr) {}
+ TimeBucketRegion() = default;
~TimeBucketRegion() { setBucket(nullptr); }
/// Start timing for \p NewBucket.
@@ -799,7 +1013,7 @@ private:
}
private:
- llvm::TimeRecord *Bucket;
+ llvm::TimeRecord *Bucket = nullptr;
};
/// Runs all the \p Matchers on \p Node.
@@ -807,14 +1021,15 @@ private:
/// Used by \c matchDispatch() below.
template <typename T, typename MC>
void matchWithoutFilter(const T &Node, const MC &Matchers) {
- const bool EnableCheckProfiling = Options.CheckProfiling.hasValue();
+ const bool EnableCheckProfiling = Options.CheckProfiling.has_value();
TimeBucketRegion Timer;
for (const auto &MP : Matchers) {
if (EnableCheckProfiling)
Timer.setBucket(&TimeByBucket[MP.second->getID()]);
BoundNodesTreeBuilder Builder;
+ CurMatchRAII RAII(*this, MP.second, Node);
if (MP.first.matches(Node, this, &Builder)) {
- MatchVisitor Visitor(ActiveASTContext, MP.second);
+ MatchVisitor Visitor(*this, ActiveASTContext, MP.second);
Builder.visitMatches(&Visitor);
}
}
@@ -829,7 +1044,7 @@ private:
if (Filter.empty())
return;
- const bool EnableCheckProfiling = Options.CheckProfiling.hasValue();
+ const bool EnableCheckProfiling = Options.CheckProfiling.has_value();
TimeBucketRegion Timer;
auto &Matchers = this->Matchers->DeclOrStmt;
for (unsigned short I : Filter) {
@@ -845,8 +1060,9 @@ private:
continue;
}
+ CurMatchRAII RAII(*this, MP.second, DynNode);
if (MP.first.matches(DynNode, this, &Builder)) {
- MatchVisitor Visitor(ActiveASTContext, MP.second);
+ MatchVisitor Visitor(*this, ActiveASTContext, MP.second);
Builder.visitMatches(&Visitor);
}
}
@@ -894,6 +1110,9 @@ private:
void matchDispatch(const TemplateArgumentLoc *Node) {
matchWithoutFilter(*Node, Matchers->TemplateArgumentLoc);
}
+ void matchDispatch(const Attr *Node) {
+ matchWithoutFilter(*Node, Matchers->Attr);
+ }
void matchDispatch(const void *) { /* Do nothing. */ }
/// @}
@@ -1029,18 +1248,31 @@ private:
// Implements a BoundNodesTree::Visitor that calls a MatchCallback with
// the aggregated bound nodes for each match.
class MatchVisitor : public BoundNodesTreeBuilder::Visitor {
+ struct CurBoundScope {
+ CurBoundScope(MatchASTVisitor::CurMatchData &State, const BoundNodes &BN)
+ : State(State) {
+ State.SetBoundNodes(BN);
+ }
+
+ ~CurBoundScope() { State.clearBoundNodes(); }
+
+ private:
+ MatchASTVisitor::CurMatchData &State;
+ };
+
public:
- MatchVisitor(ASTContext* Context,
- MatchFinder::MatchCallback* Callback)
- : Context(Context),
- Callback(Callback) {}
+ MatchVisitor(MatchASTVisitor &MV, ASTContext *Context,
+ MatchFinder::MatchCallback *Callback)
+ : State(MV.CurMatchState), Context(Context), Callback(Callback) {}
void visitMatch(const BoundNodes& BoundNodesView) override {
TraversalKindScope RAII(*Context, Callback->getCheckTraversalKind());
+ CurBoundScope RAII2(State, BoundNodesView);
Callback->run(MatchFinder::MatchResult(BoundNodesView, Context));
}
private:
+ MatchASTVisitor::CurMatchData &State;
ASTContext* Context;
MatchFinder::MatchCallback* Callback;
};
@@ -1140,8 +1372,18 @@ bool MatchASTVisitor::classIsDerivedFrom(const CXXRecordDecl *Declaration,
const Matcher<NamedDecl> &Base,
BoundNodesTreeBuilder *Builder,
bool Directly) {
+ llvm::SmallPtrSet<const CXXRecordDecl *, 8> Visited;
+ return classIsDerivedFromImpl(Declaration, Base, Builder, Directly, Visited);
+}
+
+bool MatchASTVisitor::classIsDerivedFromImpl(
+ const CXXRecordDecl *Declaration, const Matcher<NamedDecl> &Base,
+ BoundNodesTreeBuilder *Builder, bool Directly,
+ llvm::SmallPtrSetImpl<const CXXRecordDecl *> &Visited) {
if (!Declaration->hasDefinition())
return false;
+ if (!Visited.insert(Declaration).second)
+ return false;
for (const auto &It : Declaration->bases()) {
const Type *TypeNode = It.getType().getTypePtr();
@@ -1163,7 +1405,8 @@ bool MatchASTVisitor::classIsDerivedFrom(const CXXRecordDecl *Declaration,
*Builder = std::move(Result);
return true;
}
- if (!Directly && classIsDerivedFrom(ClassDecl, Base, Builder, Directly))
+ if (!Directly &&
+ classIsDerivedFromImpl(ClassDecl, Base, Builder, Directly, Visited))
return true;
}
return false;
@@ -1300,6 +1543,11 @@ bool MatchASTVisitor::TraverseTemplateArgumentLoc(TemplateArgumentLoc Loc) {
return RecursiveASTVisitor<MatchASTVisitor>::TraverseTemplateArgumentLoc(Loc);
}
+bool MatchASTVisitor::TraverseAttr(Attr *AttrNode) {
+ match(*AttrNode);
+ return RecursiveASTVisitor<MatchASTVisitor>::TraverseAttr(AttrNode);
+}
+
class MatchASTConsumer : public ASTConsumer {
public:
MatchASTConsumer(MatchFinder *Finder,
@@ -1336,7 +1584,7 @@ MatchFinder::~MatchFinder() {}
void MatchFinder::addMatcher(const DeclarationMatcher &NodeMatch,
MatchCallback *Action) {
- llvm::Optional<TraversalKind> TK;
+ std::optional<TraversalKind> TK;
if (Action)
TK = Action->getCheckTraversalKind();
if (TK)
@@ -1354,7 +1602,7 @@ void MatchFinder::addMatcher(const TypeMatcher &NodeMatch,
void MatchFinder::addMatcher(const StatementMatcher &NodeMatch,
MatchCallback *Action) {
- llvm::Optional<TraversalKind> TK;
+ std::optional<TraversalKind> TK;
if (Action)
TK = Action->getCheckTraversalKind();
if (TK)
@@ -1394,6 +1642,12 @@ void MatchFinder::addMatcher(const TemplateArgumentLocMatcher &NodeMatch,
Matchers.AllCallbacks.insert(Action);
}
+void MatchFinder::addMatcher(const AttrMatcher &AttrMatch,
+ MatchCallback *Action) {
+ Matchers.Attr.emplace_back(AttrMatch, Action);
+ Matchers.AllCallbacks.insert(Action);
+}
+
bool MatchFinder::addDynamicMatcher(const internal::DynTypedMatcher &NodeMatch,
MatchCallback *Action) {
if (NodeMatch.canConvertTo<Decl>()) {
@@ -1420,6 +1674,9 @@ bool MatchFinder::addDynamicMatcher(const internal::DynTypedMatcher &NodeMatch,
} else if (NodeMatch.canConvertTo<TemplateArgumentLoc>()) {
addMatcher(NodeMatch.convertTo<TemplateArgumentLoc>(), Action);
return true;
+ } else if (NodeMatch.canConvertTo<Attr>()) {
+ addMatcher(NodeMatch.convertTo<Attr>(), Action);
+ return true;
}
return false;
}
@@ -1436,6 +1693,7 @@ void MatchFinder::match(const clang::DynTypedNode &Node, ASTContext &Context) {
void MatchFinder::matchAST(ASTContext &Context) {
internal::MatchASTVisitor Visitor(&Matchers, Options);
+ internal::MatchASTVisitor::TraceReporter StackTrace(Visitor);
Visitor.set_active_ast_context(&Context);
Visitor.onStartOfTranslationUnit();
Visitor.TraverseAST(Context);
@@ -1449,9 +1707,9 @@ void MatchFinder::registerTestCallbackAfterParsing(
StringRef MatchFinder::MatchCallback::getID() const { return "<unknown>"; }
-llvm::Optional<TraversalKind>
+std::optional<TraversalKind>
MatchFinder::MatchCallback::getCheckTraversalKind() const {
- return llvm::None;
+ return std::nullopt;
}
} // end namespace ast_matchers
diff --git a/contrib/llvm-project/clang/lib/ASTMatchers/ASTMatchersInternal.cpp b/contrib/llvm-project/clang/lib/ASTMatchers/ASTMatchersInternal.cpp
index 169ce3b83980..bf87b1aa0992 100644
--- a/contrib/llvm-project/clang/lib/ASTMatchers/ASTMatchersInternal.cpp
+++ b/contrib/llvm-project/clang/lib/ASTMatchers/ASTMatchersInternal.cpp
@@ -22,7 +22,6 @@
#include "clang/Lex/Lexer.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/IntrusiveRefCntPtr.h"
-#include "llvm/ADT/None.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
@@ -35,6 +34,7 @@
#include <algorithm>
#include <cassert>
#include <cstddef>
+#include <optional>
#include <string>
#include <utility>
#include <vector>
@@ -87,7 +87,7 @@ bool matchesAnyBase(const CXXRecordDecl &Node,
[Finder, Builder, &BaseSpecMatcher](const CXXBaseSpecifier *BaseSpec,
CXXBasePath &IgnoredParam) {
BoundNodesTreeBuilder Result(*Builder);
- if (BaseSpecMatcher.matches(*BaseSpec, Finder, Builder)) {
+ if (BaseSpecMatcher.matches(*BaseSpec, Finder, &Result)) {
*Builder = std::move(Result);
return true;
}
@@ -140,7 +140,7 @@ public:
return Result;
}
- llvm::Optional<clang::TraversalKind> TraversalKind() const override {
+ std::optional<clang::TraversalKind> TraversalKind() const override {
return InnerMatcher->TraversalKind();
}
@@ -176,7 +176,7 @@ public:
return this->InnerMatcher->dynMatches(DynNode, Finder, Builder);
}
- llvm::Optional<clang::TraversalKind> TraversalKind() const override {
+ std::optional<clang::TraversalKind> TraversalKind() const override {
return TK;
}
@@ -339,8 +339,9 @@ bool DynTypedMatcher::matchesNoKindCheck(const DynTypedNode &DynNode,
return false;
}
-llvm::Optional<DynTypedMatcher> DynTypedMatcher::tryBind(StringRef ID) const {
- if (!AllowBind) return llvm::None;
+std::optional<DynTypedMatcher> DynTypedMatcher::tryBind(StringRef ID) const {
+ if (!AllowBind)
+ return std::nullopt;
auto Result = *this;
Result.Implementation =
new IdDynMatcher(ID, std::move(Result.Implementation));
@@ -468,8 +469,8 @@ hasAnyOverloadedOperatorNameFunc(ArrayRef<const StringRef *> NameRefs) {
}
HasNameMatcher::HasNameMatcher(std::vector<std::string> N)
- : UseUnqualifiedMatch(llvm::all_of(
- N, [](StringRef Name) { return Name.find("::") == Name.npos; })),
+ : UseUnqualifiedMatch(
+ llvm::all_of(N, [](StringRef Name) { return !Name.contains("::"); })),
Names(std::move(N)) {
#ifndef NDEBUG
for (StringRef Name : Names)
@@ -479,11 +480,11 @@ HasNameMatcher::HasNameMatcher(std::vector<std::string> N)
static bool consumeNameSuffix(StringRef &FullName, StringRef Suffix) {
StringRef Name = FullName;
- if (!Name.endswith(Suffix))
+ if (!Name.ends_with(Suffix))
return false;
Name = Name.drop_back(Suffix.size());
if (!Name.empty()) {
- if (!Name.endswith("::"))
+ if (!Name.ends_with("::"))
return false;
Name = Name.drop_back(2);
}
@@ -529,7 +530,7 @@ public:
PatternSet(ArrayRef<std::string> Names) {
Patterns.reserve(Names.size());
for (StringRef Name : Names)
- Patterns.push_back({Name, Name.startswith("::")});
+ Patterns.push_back({Name, Name.starts_with("::")});
}
/// Consumes the name suffix from each pattern in the set and removes the ones
@@ -651,11 +652,11 @@ bool HasNameMatcher::matchesNodeFullSlow(const NamedDecl &Node) const {
const StringRef FullName = OS.str();
for (const StringRef Pattern : Names) {
- if (Pattern.startswith("::")) {
+ if (Pattern.starts_with("::")) {
if (FullName == Pattern)
return true;
- } else if (FullName.endswith(Pattern) &&
- FullName.drop_back(Pattern.size()).endswith("::")) {
+ } else if (FullName.ends_with(Pattern) &&
+ FullName.drop_back(Pattern.size()).ends_with("::")) {
return true;
}
}
@@ -685,7 +686,7 @@ static bool isTokenAtLoc(const SourceManager &SM, const LangOptions &LangOpts,
return !Invalid && Text == TokenText;
}
-llvm::Optional<SourceLocation>
+std::optional<SourceLocation>
getExpansionLocOfMacro(StringRef MacroName, SourceLocation Loc,
const ASTContext &Context) {
auto &SM = Context.getSourceManager();
@@ -696,14 +697,14 @@ getExpansionLocOfMacro(StringRef MacroName, SourceLocation Loc,
if (Expansion.isMacroArgExpansion())
// Check macro argument for an expansion of the given macro. For example,
// `F(G(3))`, where `MacroName` is `G`.
- if (llvm::Optional<SourceLocation> ArgLoc = getExpansionLocOfMacro(
+ if (std::optional<SourceLocation> ArgLoc = getExpansionLocOfMacro(
MacroName, Expansion.getSpellingLoc(), Context))
return ArgLoc;
Loc = Expansion.getExpansionLocStart();
if (isTokenAtLoc(SM, LangOpts, MacroName, Loc))
return Loc;
}
- return llvm::None;
+ return std::nullopt;
}
std::shared_ptr<llvm::Regex> createAndVerifyRegex(StringRef Regex,
@@ -768,9 +769,23 @@ const internal::VariadicDynCastAllOfMatcher<Decl, TemplateTypeParmDecl>
const internal::VariadicDynCastAllOfMatcher<Decl, TemplateTemplateParmDecl>
templateTemplateParmDecl;
+const internal::VariadicAllOfMatcher<LambdaCapture> lambdaCapture;
const internal::VariadicAllOfMatcher<QualType> qualType;
const internal::VariadicAllOfMatcher<Type> type;
const internal::VariadicAllOfMatcher<TypeLoc> typeLoc;
+
+const internal::VariadicDynCastAllOfMatcher<TypeLoc, QualifiedTypeLoc>
+ qualifiedTypeLoc;
+const internal::VariadicDynCastAllOfMatcher<TypeLoc, PointerTypeLoc>
+ pointerTypeLoc;
+const internal::VariadicDynCastAllOfMatcher<TypeLoc, ReferenceTypeLoc>
+ referenceTypeLoc;
+const internal::VariadicDynCastAllOfMatcher<TypeLoc,
+ TemplateSpecializationTypeLoc>
+ templateSpecializationTypeLoc;
+const internal::VariadicDynCastAllOfMatcher<TypeLoc, ElaboratedTypeLoc>
+ elaboratedTypeLoc;
+
const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryExprOrTypeTraitExpr>
unaryExprOrTypeTraitExpr;
const internal::VariadicDynCastAllOfMatcher<Decl, ValueDecl> valueDecl;
@@ -785,6 +800,7 @@ const internal::VariadicDynCastAllOfMatcher<Decl, TagDecl> tagDecl;
const internal::VariadicDynCastAllOfMatcher<Decl, CXXMethodDecl> cxxMethodDecl;
const internal::VariadicDynCastAllOfMatcher<Decl, CXXConversionDecl>
cxxConversionDecl;
+const internal::VariadicDynCastAllOfMatcher<Decl, ConceptDecl> conceptDecl;
const internal::VariadicDynCastAllOfMatcher<Decl, VarDecl> varDecl;
const internal::VariadicDynCastAllOfMatcher<Decl, FieldDecl> fieldDecl;
const internal::VariadicDynCastAllOfMatcher<Decl, IndirectFieldDecl>
@@ -867,12 +883,17 @@ const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNoexceptExpr>
cxxNoexceptExpr;
const internal::VariadicDynCastAllOfMatcher<Stmt, ArraySubscriptExpr>
arraySubscriptExpr;
+const internal::VariadicDynCastAllOfMatcher<Stmt, ArrayInitIndexExpr>
+ arrayInitIndexExpr;
+const internal::VariadicDynCastAllOfMatcher<Stmt, ArrayInitLoopExpr>
+ arrayInitLoopExpr;
const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDefaultArgExpr>
cxxDefaultArgExpr;
const internal::VariadicDynCastAllOfMatcher<Stmt, CXXOperatorCallExpr>
cxxOperatorCallExpr;
const internal::VariadicDynCastAllOfMatcher<Stmt, CXXRewrittenBinaryOperator>
cxxRewrittenBinaryOperator;
+const internal::VariadicDynCastAllOfMatcher<Stmt, CXXFoldExpr> cxxFoldExpr;
const internal::VariadicDynCastAllOfMatcher<Stmt, Expr> expr;
const internal::VariadicDynCastAllOfMatcher<Stmt, DeclRefExpr> declRefExpr;
const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCIvarRefExpr> objcIvarRefExpr;
@@ -895,6 +916,8 @@ const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchCase> switchCase;
const internal::VariadicDynCastAllOfMatcher<Stmt, CaseStmt> caseStmt;
const internal::VariadicDynCastAllOfMatcher<Stmt, DefaultStmt> defaultStmt;
const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundStmt> compoundStmt;
+const internal::VariadicDynCastAllOfMatcher<Stmt, CoroutineBodyStmt>
+ coroutineBodyStmt;
const internal::VariadicDynCastAllOfMatcher<Stmt, CXXCatchStmt> cxxCatchStmt;
const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTryStmt> cxxTryStmt;
const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThrowExpr> cxxThrowExpr;
@@ -903,6 +926,7 @@ const internal::VariadicDynCastAllOfMatcher<Stmt, AsmStmt> asmStmt;
const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBoolLiteralExpr>
cxxBoolLiteral;
const internal::VariadicDynCastAllOfMatcher<Stmt, StringLiteral> stringLiteral;
+const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCStringLiteral> objcStringLiteral;
const internal::VariadicDynCastAllOfMatcher<Stmt, CharacterLiteral>
characterLiteral;
const internal::VariadicDynCastAllOfMatcher<Stmt, IntegerLiteral>
@@ -918,6 +942,8 @@ const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundLiteralExpr>
const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNullPtrLiteralExpr>
cxxNullPtrLiteralExpr;
const internal::VariadicDynCastAllOfMatcher<Stmt, ChooseExpr> chooseExpr;
+const internal::VariadicDynCastAllOfMatcher<Stmt, ConvertVectorExpr>
+ convertVectorExpr;
const internal::VariadicDynCastAllOfMatcher<Stmt, CoawaitExpr>
coawaitExpr;
const internal::VariadicDynCastAllOfMatcher<Stmt, DependentCoawaitExpr>
@@ -1000,19 +1026,20 @@ const internal::ArgumentAdaptingMatcherFunc<internal::ForEachDescendantMatcher>
forEachDescendant = {};
const internal::ArgumentAdaptingMatcherFunc<
internal::HasParentMatcher,
- internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>,
- internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>>
+ internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc, Attr>,
+ internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc, Attr>>
hasParent = {};
const internal::ArgumentAdaptingMatcherFunc<
internal::HasAncestorMatcher,
- internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>,
- internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>>
+ internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc, Attr>,
+ internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc, Attr>>
hasAncestor = {};
const internal::VariadicOperatorMatcherFunc<1, 1> unless = {
internal::DynTypedMatcher::VO_UnaryNot};
const internal::VariadicAllOfMatcher<NestedNameSpecifier> nestedNameSpecifier;
const internal::VariadicAllOfMatcher<NestedNameSpecifierLoc>
nestedNameSpecifierLoc;
+const internal::VariadicAllOfMatcher<Attr> attr;
const internal::VariadicDynCastAllOfMatcher<Stmt, CUDAKernelCallExpr>
cudaKernelCallExpr;
const AstTypeMatcher<BuiltinType> builtinType;
@@ -1022,6 +1049,7 @@ const AstTypeMatcher<ConstantArrayType> constantArrayType;
const AstTypeMatcher<DeducedTemplateSpecializationType>
deducedTemplateSpecializationType;
const AstTypeMatcher<DependentSizedArrayType> dependentSizedArrayType;
+const AstTypeMatcher<DependentSizedExtVectorType> dependentSizedExtVectorType;
const AstTypeMatcher<IncompleteArrayType> incompleteArrayType;
const AstTypeMatcher<VariableArrayType> variableArrayType;
const AstTypeMatcher<AtomicType> atomicType;
@@ -1031,6 +1059,7 @@ const AstTypeMatcher<FunctionType> functionType;
const AstTypeMatcher<FunctionProtoType> functionProtoType;
const AstTypeMatcher<ParenType> parenType;
const AstTypeMatcher<BlockPointerType> blockPointerType;
+const AstTypeMatcher<MacroQualifiedType> macroQualifiedType;
const AstTypeMatcher<MemberPointerType> memberPointerType;
const AstTypeMatcher<PointerType> pointerType;
const AstTypeMatcher<ObjCObjectPointerType> objcObjectPointerType;
@@ -1044,6 +1073,7 @@ const AstTypeMatcher<UnaryTransformType> unaryTransformType;
const AstTypeMatcher<RecordType> recordType;
const AstTypeMatcher<TagType> tagType;
const AstTypeMatcher<ElaboratedType> elaboratedType;
+const AstTypeMatcher<UsingType> usingType;
const AstTypeMatcher<SubstTemplateTypeParmType> substTemplateTypeParmType;
const AstTypeMatcher<TemplateTypeParmType> templateTypeParmType;
const AstTypeMatcher<InjectedClassNameType> injectedClassNameType;
diff --git a/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Diagnostics.cpp b/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Diagnostics.cpp
index ba2f49e6b623..41ab0ed70fda 100644
--- a/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Diagnostics.cpp
+++ b/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Diagnostics.cpp
@@ -204,7 +204,7 @@ std::string Diagnostics::toString() const {
std::string S;
llvm::raw_string_ostream OS(S);
printToStream(OS);
- return OS.str();
+ return S;
}
void Diagnostics::printToStreamFull(llvm::raw_ostream &OS) const {
@@ -223,7 +223,7 @@ std::string Diagnostics::toStringFull() const {
std::string S;
llvm::raw_string_ostream OS(S);
printToStreamFull(OS);
- return OS.str();
+ return S;
}
} // namespace dynamic
diff --git a/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.cpp b/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.cpp
index 40db70e6f4a5..cf9ae7c974a6 100644
--- a/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.cpp
+++ b/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.cpp
@@ -8,12 +8,12 @@
#include "Marshallers.h"
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Regex.h"
+#include <optional>
#include <string>
-static llvm::Optional<std::string>
+static std::optional<std::string>
getBestGuess(llvm::StringRef Search, llvm::ArrayRef<llvm::StringRef> Allowed,
llvm::StringRef DropPrefix = "", unsigned MaxEditDistance = 3) {
if (MaxEditDistance != ~0U)
@@ -56,10 +56,10 @@ getBestGuess(llvm::StringRef Search, llvm::ArrayRef<llvm::StringRef> Allowed,
if (!Res.empty())
return Res.str();
}
- return llvm::None;
+ return std::nullopt;
}
-llvm::Optional<std::string>
+std::optional<std::string>
clang::ast_matchers::dynamic::internal::ArgTypeTraits<
clang::attr::Kind>::getBestGuess(const VariantValue &Value) {
static constexpr llvm::StringRef Allowed[] = {
@@ -67,12 +67,11 @@ clang::ast_matchers::dynamic::internal::ArgTypeTraits<
#include "clang/Basic/AttrList.inc"
};
if (Value.isString())
- return ::getBestGuess(Value.getString(), llvm::makeArrayRef(Allowed),
- "attr::");
- return llvm::None;
+ return ::getBestGuess(Value.getString(), llvm::ArrayRef(Allowed), "attr::");
+ return std::nullopt;
}
-llvm::Optional<std::string>
+std::optional<std::string>
clang::ast_matchers::dynamic::internal::ArgTypeTraits<
clang::CastKind>::getBestGuess(const VariantValue &Value) {
static constexpr llvm::StringRef Allowed[] = {
@@ -80,12 +79,11 @@ clang::ast_matchers::dynamic::internal::ArgTypeTraits<
#include "clang/AST/OperationKinds.def"
};
if (Value.isString())
- return ::getBestGuess(Value.getString(), llvm::makeArrayRef(Allowed),
- "CK_");
- return llvm::None;
+ return ::getBestGuess(Value.getString(), llvm::ArrayRef(Allowed), "CK_");
+ return std::nullopt;
}
-llvm::Optional<std::string>
+std::optional<std::string>
clang::ast_matchers::dynamic::internal::ArgTypeTraits<
clang::OpenMPClauseKind>::getBestGuess(const VariantValue &Value) {
static constexpr llvm::StringRef Allowed[] = {
@@ -94,12 +92,11 @@ clang::ast_matchers::dynamic::internal::ArgTypeTraits<
#include "llvm/Frontend/OpenMP/OMP.inc"
};
if (Value.isString())
- return ::getBestGuess(Value.getString(), llvm::makeArrayRef(Allowed),
- "OMPC_");
- return llvm::None;
+ return ::getBestGuess(Value.getString(), llvm::ArrayRef(Allowed), "OMPC_");
+ return std::nullopt;
}
-llvm::Optional<std::string>
+std::optional<std::string>
clang::ast_matchers::dynamic::internal::ArgTypeTraits<
clang::UnaryExprOrTypeTrait>::getBestGuess(const VariantValue &Value) {
static constexpr llvm::StringRef Allowed[] = {
@@ -108,9 +105,8 @@ clang::ast_matchers::dynamic::internal::ArgTypeTraits<
#include "clang/Basic/TokenKinds.def"
};
if (Value.isString())
- return ::getBestGuess(Value.getString(), llvm::makeArrayRef(Allowed),
- "UETT_");
- return llvm::None;
+ return ::getBestGuess(Value.getString(), llvm::ArrayRef(Allowed), "UETT_");
+ return std::nullopt;
}
static constexpr std::pair<llvm::StringRef, llvm::Regex::RegexFlags>
@@ -121,55 +117,54 @@ static constexpr std::pair<llvm::StringRef, llvm::Regex::RegexFlags>
{"BasicRegex", llvm::Regex::RegexFlags::BasicRegex},
};
-static llvm::Optional<llvm::Regex::RegexFlags>
+static std::optional<llvm::Regex::RegexFlags>
getRegexFlag(llvm::StringRef Flag) {
for (const auto &StringFlag : RegexMap) {
if (Flag == StringFlag.first)
return StringFlag.second;
}
- return llvm::None;
+ return std::nullopt;
}
-static llvm::Optional<llvm::StringRef>
-getCloseRegexMatch(llvm::StringRef Flag) {
+static std::optional<llvm::StringRef> getCloseRegexMatch(llvm::StringRef Flag) {
for (const auto &StringFlag : RegexMap) {
if (Flag.edit_distance(StringFlag.first) < 3)
return StringFlag.first;
}
- return llvm::None;
+ return std::nullopt;
}
-llvm::Optional<llvm::Regex::RegexFlags>
+std::optional<llvm::Regex::RegexFlags>
clang::ast_matchers::dynamic::internal::ArgTypeTraits<
llvm::Regex::RegexFlags>::getFlags(llvm::StringRef Flags) {
- llvm::Optional<llvm::Regex::RegexFlags> Flag;
+ std::optional<llvm::Regex::RegexFlags> Flag;
SmallVector<StringRef, 4> Split;
Flags.split(Split, '|', -1, false);
for (StringRef OrFlag : Split) {
- if (llvm::Optional<llvm::Regex::RegexFlags> NextFlag =
+ if (std::optional<llvm::Regex::RegexFlags> NextFlag =
getRegexFlag(OrFlag.trim()))
- Flag = Flag.getValueOr(llvm::Regex::NoFlags) | *NextFlag;
+ Flag = Flag.value_or(llvm::Regex::NoFlags) | *NextFlag;
else
- return None;
+ return std::nullopt;
}
return Flag;
}
-llvm::Optional<std::string>
+std::optional<std::string>
clang::ast_matchers::dynamic::internal::ArgTypeTraits<
llvm::Regex::RegexFlags>::getBestGuess(const VariantValue &Value) {
if (!Value.isString())
- return llvm::None;
+ return std::nullopt;
SmallVector<StringRef, 4> Split;
llvm::StringRef(Value.getString()).split(Split, '|', -1, false);
for (llvm::StringRef &Flag : Split) {
- if (llvm::Optional<llvm::StringRef> BestGuess =
+ if (std::optional<llvm::StringRef> BestGuess =
getCloseRegexMatch(Flag.trim()))
Flag = *BestGuess;
else
- return None;
+ return std::nullopt;
}
if (Split.empty())
- return None;
+ return std::nullopt;
return llvm::join(Split, " | ");
}
diff --git a/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.h b/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.h
index 783fb203c408..c76ddf17b719 100644
--- a/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.h
+++ b/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.h
@@ -29,8 +29,6 @@
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/None.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
@@ -41,6 +39,7 @@
#include <iterator>
#include <limits>
#include <memory>
+#include <optional>
#include <string>
#include <utility>
#include <vector>
@@ -71,8 +70,8 @@ template <> struct ArgTypeTraits<std::string> {
return ArgKind(ArgKind::AK_String);
}
- static llvm::Optional<std::string> getBestGuess(const VariantValue &) {
- return llvm::None;
+ static std::optional<std::string> getBestGuess(const VariantValue &) {
+ return std::nullopt;
}
};
@@ -96,8 +95,8 @@ template <class T> struct ArgTypeTraits<ast_matchers::internal::Matcher<T>> {
return ArgKind::MakeMatcherArg(ASTNodeKind::getFromNodeKind<T>());
}
- static llvm::Optional<std::string> getBestGuess(const VariantValue &) {
- return llvm::None;
+ static std::optional<std::string> getBestGuess(const VariantValue &) {
+ return std::nullopt;
}
};
@@ -115,8 +114,8 @@ template <> struct ArgTypeTraits<bool> {
return ArgKind(ArgKind::AK_Boolean);
}
- static llvm::Optional<std::string> getBestGuess(const VariantValue &) {
- return llvm::None;
+ static std::optional<std::string> getBestGuess(const VariantValue &) {
+ return std::nullopt;
}
};
@@ -134,8 +133,8 @@ template <> struct ArgTypeTraits<double> {
return ArgKind(ArgKind::AK_Double);
}
- static llvm::Optional<std::string> getBestGuess(const VariantValue &) {
- return llvm::None;
+ static std::optional<std::string> getBestGuess(const VariantValue &) {
+ return std::nullopt;
}
};
@@ -153,20 +152,20 @@ template <> struct ArgTypeTraits<unsigned> {
return ArgKind(ArgKind::AK_Unsigned);
}
- static llvm::Optional<std::string> getBestGuess(const VariantValue &) {
- return llvm::None;
+ static std::optional<std::string> getBestGuess(const VariantValue &) {
+ return std::nullopt;
}
};
template <> struct ArgTypeTraits<attr::Kind> {
private:
- static Optional<attr::Kind> getAttrKind(llvm::StringRef AttrKind) {
+ static std::optional<attr::Kind> getAttrKind(llvm::StringRef AttrKind) {
if (!AttrKind.consume_front("attr::"))
- return llvm::None;
- return llvm::StringSwitch<Optional<attr::Kind>>(AttrKind)
+ return std::nullopt;
+ return llvm::StringSwitch<std::optional<attr::Kind>>(AttrKind)
#define ATTR(X) .Case(#X, attr::X)
#include "clang/Basic/AttrList.inc"
- .Default(llvm::None);
+ .Default(std::nullopt);
}
public:
@@ -174,7 +173,7 @@ public:
return Value.isString();
}
static bool hasCorrectValue(const VariantValue& Value) {
- return getAttrKind(Value.getString()).hasValue();
+ return getAttrKind(Value.getString()).has_value();
}
static attr::Kind get(const VariantValue &Value) {
@@ -185,18 +184,18 @@ public:
return ArgKind(ArgKind::AK_String);
}
- static llvm::Optional<std::string> getBestGuess(const VariantValue &Value);
+ static std::optional<std::string> getBestGuess(const VariantValue &Value);
};
template <> struct ArgTypeTraits<CastKind> {
private:
- static Optional<CastKind> getCastKind(llvm::StringRef AttrKind) {
+ static std::optional<CastKind> getCastKind(llvm::StringRef AttrKind) {
if (!AttrKind.consume_front("CK_"))
- return llvm::None;
- return llvm::StringSwitch<Optional<CastKind>>(AttrKind)
+ return std::nullopt;
+ return llvm::StringSwitch<std::optional<CastKind>>(AttrKind)
#define CAST_OPERATION(Name) .Case(#Name, CK_##Name)
#include "clang/AST/OperationKinds.def"
- .Default(llvm::None);
+ .Default(std::nullopt);
}
public:
@@ -204,7 +203,7 @@ public:
return Value.isString();
}
static bool hasCorrectValue(const VariantValue& Value) {
- return getCastKind(Value.getString()).hasValue();
+ return getCastKind(Value.getString()).has_value();
}
static CastKind get(const VariantValue &Value) {
@@ -215,19 +214,19 @@ public:
return ArgKind(ArgKind::AK_String);
}
- static llvm::Optional<std::string> getBestGuess(const VariantValue &Value);
+ static std::optional<std::string> getBestGuess(const VariantValue &Value);
};
template <> struct ArgTypeTraits<llvm::Regex::RegexFlags> {
private:
- static Optional<llvm::Regex::RegexFlags> getFlags(llvm::StringRef Flags);
+ static std::optional<llvm::Regex::RegexFlags> getFlags(llvm::StringRef Flags);
public:
static bool hasCorrectType(const VariantValue &Value) {
return Value.isString();
}
static bool hasCorrectValue(const VariantValue& Value) {
- return getFlags(Value.getString()).hasValue();
+ return getFlags(Value.getString()).has_value();
}
static llvm::Regex::RegexFlags get(const VariantValue &Value) {
@@ -236,17 +235,18 @@ public:
static ArgKind getKind() { return ArgKind(ArgKind::AK_String); }
- static llvm::Optional<std::string> getBestGuess(const VariantValue &Value);
+ static std::optional<std::string> getBestGuess(const VariantValue &Value);
};
template <> struct ArgTypeTraits<OpenMPClauseKind> {
private:
- static Optional<OpenMPClauseKind> getClauseKind(llvm::StringRef ClauseKind) {
- return llvm::StringSwitch<Optional<OpenMPClauseKind>>(ClauseKind)
+ static std::optional<OpenMPClauseKind>
+ getClauseKind(llvm::StringRef ClauseKind) {
+ return llvm::StringSwitch<std::optional<OpenMPClauseKind>>(ClauseKind)
#define GEN_CLANG_CLAUSE_CLASS
#define CLAUSE_CLASS(Enum, Str, Class) .Case(#Enum, llvm::omp::Clause::Enum)
#include "llvm/Frontend/OpenMP/OMP.inc"
- .Default(llvm::None);
+ .Default(std::nullopt);
}
public:
@@ -254,7 +254,7 @@ public:
return Value.isString();
}
static bool hasCorrectValue(const VariantValue& Value) {
- return getClauseKind(Value.getString()).hasValue();
+ return getClauseKind(Value.getString()).has_value();
}
static OpenMPClauseKind get(const VariantValue &Value) {
@@ -263,21 +263,21 @@ public:
static ArgKind getKind() { return ArgKind(ArgKind::AK_String); }
- static llvm::Optional<std::string> getBestGuess(const VariantValue &Value);
+ static std::optional<std::string> getBestGuess(const VariantValue &Value);
};
template <> struct ArgTypeTraits<UnaryExprOrTypeTrait> {
private:
- static Optional<UnaryExprOrTypeTrait>
+ static std::optional<UnaryExprOrTypeTrait>
getUnaryOrTypeTraitKind(llvm::StringRef ClauseKind) {
if (!ClauseKind.consume_front("UETT_"))
- return llvm::None;
- return llvm::StringSwitch<Optional<UnaryExprOrTypeTrait>>(ClauseKind)
+ return std::nullopt;
+ return llvm::StringSwitch<std::optional<UnaryExprOrTypeTrait>>(ClauseKind)
#define UNARY_EXPR_OR_TYPE_TRAIT(Spelling, Name, Key) .Case(#Name, UETT_##Name)
#define CXX11_UNARY_EXPR_OR_TYPE_TRAIT(Spelling, Name, Key) \
.Case(#Name, UETT_##Name)
#include "clang/Basic/TokenKinds.def"
- .Default(llvm::None);
+ .Default(std::nullopt);
}
public:
@@ -285,7 +285,7 @@ public:
return Value.isString();
}
static bool hasCorrectValue(const VariantValue& Value) {
- return getUnaryOrTypeTraitKind(Value.getString()).hasValue();
+ return getUnaryOrTypeTraitKind(Value.getString()).has_value();
}
static UnaryExprOrTypeTrait get(const VariantValue &Value) {
@@ -294,7 +294,7 @@ public:
static ArgKind getKind() { return ArgKind(ArgKind::AK_String); }
- static llvm::Optional<std::string> getBestGuess(const VariantValue &Value);
+ static std::optional<std::string> getBestGuess(const VariantValue &Value);
};
/// Matcher descriptor interface.
@@ -508,7 +508,7 @@ variadicMatcherDescriptor(StringRef MatcherName, SourceRange NameRange,
return {};
}
if (!ArgTraits::hasCorrectValue(Value)) {
- if (llvm::Optional<std::string> BestGuess =
+ if (std::optional<std::string> BestGuess =
ArgTraits::getBestGuess(Value)) {
Error->addError(Arg.Range, Error->ET_RegistryUnknownEnumWithReplace)
<< i + 1 << Value.getString() << *BestGuess;
@@ -524,8 +524,9 @@ variadicMatcherDescriptor(StringRef MatcherName, SourceRange NameRange,
}
return {};
}
- InnerArgs.set_size(i + 1);
- InnerArgsPtr[i] = new (&InnerArgs[i]) ArgT(ArgTraits::get(Value));
+ assert(InnerArgs.size() < InnerArgs.capacity());
+ InnerArgs.emplace_back(ArgTraits::get(Value));
+ InnerArgsPtr[i] = &InnerArgs[i];
}
return outvalueToVariantMatcher(Func(InnerArgsPtr));
}
@@ -634,7 +635,7 @@ private:
return VariantMatcher(); \
} \
if (!ArgTypeTraits<type>::hasCorrectValue(Args[index].Value)) { \
- if (llvm::Optional<std::string> BestGuess = \
+ if (std::optional<std::string> BestGuess = \
ArgTypeTraits<type>::getBestGuess(Args[index].Value)) { \
Error->addError(Args[index].Range, \
Error->ET_RegistryUnknownEnumWithReplace) \
@@ -844,7 +845,7 @@ public:
}
if (!ArgTypeTraits<llvm::Regex::RegexFlags>::hasCorrectValue(
Args[1].Value)) {
- if (llvm::Optional<std::string> BestGuess =
+ if (std::optional<std::string> BestGuess =
ArgTypeTraits<llvm::Regex::RegexFlags>::getBestGuess(
Args[1].Value)) {
Error->addError(Args[1].Range, Error->ET_RegistryUnknownEnumWithReplace)
@@ -1007,7 +1008,7 @@ public:
Diagnostics *) const override {
std::vector<ASTNodeKind> NodeKinds;
- for (auto Arg : Args) {
+ for (const auto &Arg : Args) {
if (!Arg.Value.isNodeKind())
return {};
NodeKinds.push_back(Arg.Value.getNodeKind());
@@ -1035,7 +1036,6 @@ public:
void getArgKinds(ASTNodeKind ThisKind, unsigned,
std::vector<ArgKind> &ArgKinds) const override {
ArgKinds.push_back(ArgKind::MakeNodeArg(ThisKind));
- return;
}
bool isConvertibleTo(ASTNodeKind Kind, unsigned *Specificity = nullptr,
ASTNodeKind *LeastDerivedKind = nullptr) const override {
@@ -1060,7 +1060,7 @@ makeMatcherAutoMarshall(ReturnType (*Func)(), StringRef MatcherName) {
BuildReturnTypeVector<ReturnType>::build(RetTypes);
return std::make_unique<FixedArgCountMatcherDescriptor>(
matcherMarshall0<ReturnType>, reinterpret_cast<void (*)()>(Func),
- MatcherName, RetTypes, None);
+ MatcherName, RetTypes, std::nullopt);
}
/// 1-arg overload
@@ -1167,4 +1167,4 @@ std::unique_ptr<MatcherDescriptor> makeMatcherAutoMarshall(
} // namespace ast_matchers
} // namespace clang
-#endif // LLVM_CLANG_AST_MATCHERS_DYNAMIC_MARSHALLERS_H
+#endif // LLVM_CLANG_LIB_ASTMATCHERS_DYNAMIC_MARSHALLERS_H
diff --git a/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Parser.cpp b/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Parser.cpp
index c6a77bb6c2e0..6a16c2184fcf 100644
--- a/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Parser.cpp
+++ b/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Parser.cpp
@@ -16,7 +16,6 @@
#include "clang/ASTMatchers/Dynamic/Diagnostics.h"
#include "clang/ASTMatchers/Dynamic/Registry.h"
#include "clang/Basic/CharInfo.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/ManagedStatic.h"
@@ -25,6 +24,7 @@
#include <cerrno>
#include <cstddef>
#include <cstdlib>
+#include <optional>
#include <string>
#include <utility>
#include <vector>
@@ -187,10 +187,10 @@ private:
break;
++TokenLength;
}
- if (TokenLength == 4 && Code.startswith("true")) {
+ if (TokenLength == 4 && Code.starts_with("true")) {
Result.Kind = TokenInfo::TK_Literal;
Result.Value = true;
- } else if (TokenLength == 5 && Code.startswith("false")) {
+ } else if (TokenLength == 5 && Code.starts_with("false")) {
Result.Kind = TokenInfo::TK_Literal;
Result.Value = false;
} else {
@@ -299,10 +299,8 @@ private:
/// Consume all leading whitespace from \c Code.
void consumeWhitespace() {
- Code = Code.drop_while([](char c) {
- // Don't trim newlines.
- return StringRef(" \t\v\f\r").contains(c);
- });
+ // Don't trim newlines.
+ Code = Code.ltrim(" \t\v\f\r");
}
SourceLocation currentLocation() {
@@ -395,11 +393,11 @@ bool Parser::parseIdentifierPrefixImpl(VariantValue *Value) {
return false;
assert(NamedValue.isMatcher());
- llvm::Optional<DynTypedMatcher> Result =
+ std::optional<DynTypedMatcher> Result =
NamedValue.getMatcher().getSingleMatcher();
- if (Result.hasValue()) {
- llvm::Optional<DynTypedMatcher> Bound = Result->tryBind(BindID);
- if (Bound.hasValue()) {
+ if (Result) {
+ std::optional<DynTypedMatcher> Bound = Result->tryBind(BindID);
+ if (Bound) {
*Value = VariantMatcher::SingleMatcher(*Bound);
return true;
}
@@ -438,7 +436,7 @@ bool Parser::parseIdentifierPrefixImpl(VariantValue *Value) {
return false;
}
- llvm::Optional<MatcherCtor> Ctor = S->lookupMatcherCtor(NameToken.Text);
+ std::optional<MatcherCtor> Ctor = S->lookupMatcherCtor(NameToken.Text);
// Parse as a matcher expression.
return parseMatcherExpressionImpl(NameToken, OpenToken, Ctor, Value);
@@ -517,7 +515,7 @@ bool Parser::parseMatcherBuilder(MatcherCtor Ctor, const TokenInfo &NameToken,
ArgValue.Text = NodeMatcherToken.Text;
ArgValue.Range = NodeMatcherToken.Range;
- llvm::Optional<MatcherCtor> MappedMatcher =
+ std::optional<MatcherCtor> MappedMatcher =
S->lookupMatcherCtor(ArgValue.Text);
if (!MappedMatcher) {
@@ -628,7 +626,7 @@ bool Parser::parseMatcherBuilder(MatcherCtor Ctor, const TokenInfo &NameToken,
/// returns \c false.
bool Parser::parseMatcherExpressionImpl(const TokenInfo &NameToken,
const TokenInfo &OpenToken,
- llvm::Optional<MatcherCtor> Ctor,
+ std::optional<MatcherCtor> Ctor,
VariantValue *Value) {
if (!Ctor) {
Error->addError(NameToken.Range, Error->ET_RegistryMatcherNotFound)
@@ -645,7 +643,7 @@ bool Parser::parseMatcherExpressionImpl(const TokenInfo &NameToken,
Tokenizer->SkipNewlines();
{
- ScopedContextEntry SCE(this, Ctor ? *Ctor : nullptr);
+ ScopedContextEntry SCE(this, Ctor.value_or(nullptr));
while (Tokenizer->nextTokenKind() != TokenInfo::TK_Eof) {
if (Tokenizer->nextTokenKind() == TokenInfo::TK_CloseParen) {
@@ -737,7 +735,7 @@ bool Parser::parseMatcherExpressionImpl(const TokenInfo &NameToken,
// Completions minus the prefix.
void Parser::addCompletion(const TokenInfo &CompToken,
const MatcherCompletion& Completion) {
- if (StringRef(Completion.TypedText).startswith(CompToken.Text) &&
+ if (StringRef(Completion.TypedText).starts_with(CompToken.Text) &&
Completion.Specificity > 0) {
Completions.emplace_back(Completion.TypedText.substr(CompToken.Text.size()),
Completion.MatcherDecl, Completion.Specificity);
@@ -828,7 +826,7 @@ Parser::Parser(CodeTokenizer *Tokenizer, Sema *S,
Parser::RegistrySema::~RegistrySema() = default;
-llvm::Optional<MatcherCtor>
+std::optional<MatcherCtor>
Parser::RegistrySema::lookupMatcherCtor(StringRef MatcherName) {
return Registry::lookupMatcherCtor(MatcherName);
}
@@ -904,20 +902,19 @@ Parser::completeExpression(StringRef &Code, unsigned CompletionOffset, Sema *S,
return P.Completions;
}
-llvm::Optional<DynTypedMatcher>
+std::optional<DynTypedMatcher>
Parser::parseMatcherExpression(StringRef &Code, Sema *S,
const NamedValueMap *NamedValues,
Diagnostics *Error) {
VariantValue Value;
if (!parseExpression(Code, S, NamedValues, &Value, Error))
- return llvm::Optional<DynTypedMatcher>();
+ return std::nullopt;
if (!Value.isMatcher()) {
Error->addError(SourceRange(), Error->ET_ParserNotAMatcher);
- return llvm::Optional<DynTypedMatcher>();
+ return std::nullopt;
}
- llvm::Optional<DynTypedMatcher> Result =
- Value.getMatcher().getSingleMatcher();
- if (!Result.hasValue()) {
+ std::optional<DynTypedMatcher> Result = Value.getMatcher().getSingleMatcher();
+ if (!Result) {
Error->addError(SourceRange(), Error->ET_ParserOverloadedType)
<< Value.getTypeAsString();
}
diff --git a/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Registry.cpp b/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Registry.cpp
index 0048f1133ca2..15dad022df5f 100644
--- a/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Registry.cpp
+++ b/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Registry.cpp
@@ -17,7 +17,6 @@
#include "clang/ASTMatchers/ASTMatchers.h"
#include "clang/ASTMatchers/Dynamic/Diagnostics.h"
#include "clang/ASTMatchers/Dynamic/VariantValue.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
@@ -26,6 +25,7 @@
#include <cassert>
#include <iterator>
#include <memory>
+#include <optional>
#include <set>
#include <string>
#include <utility>
@@ -60,7 +60,7 @@ private:
void RegistryMaps::registerMatcher(
StringRef MatcherName, std::unique_ptr<MatcherDescriptor> Callback) {
- assert(Constructors.find(MatcherName) == Constructors.end());
+ assert(!Constructors.contains(MatcherName));
Constructors[MatcherName] = std::move(Callback);
}
@@ -106,7 +106,6 @@ RegistryMaps::RegistryMaps() {
std::make_unique<internal::MapAnyOfBuilderDescriptor>());
REGISTER_OVERLOADED_2(callee);
- REGISTER_OVERLOADED_2(hasAnyCapture);
REGISTER_OVERLOADED_2(hasPrefix);
REGISTER_OVERLOADED_2(hasType);
REGISTER_OVERLOADED_2(ignoringParens);
@@ -135,13 +134,17 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(allOf);
REGISTER_MATCHER(anyOf);
REGISTER_MATCHER(anything);
+ REGISTER_MATCHER(arrayInitIndexExpr);
+ REGISTER_MATCHER(arrayInitLoopExpr);
REGISTER_MATCHER(argumentCountIs);
+ REGISTER_MATCHER(argumentCountAtLeast);
REGISTER_MATCHER(arraySubscriptExpr);
REGISTER_MATCHER(arrayType);
REGISTER_MATCHER(asString);
REGISTER_MATCHER(asmStmt);
REGISTER_MATCHER(atomicExpr);
REGISTER_MATCHER(atomicType);
+ REGISTER_MATCHER(attr);
REGISTER_MATCHER(autoType);
REGISTER_MATCHER(autoreleasePoolStmt)
REGISTER_MATCHER(binaryConditionalOperator);
@@ -156,6 +159,8 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(builtinType);
REGISTER_MATCHER(cStyleCastExpr);
REGISTER_MATCHER(callExpr);
+ REGISTER_MATCHER(capturesThis);
+ REGISTER_MATCHER(capturesVar);
REGISTER_MATCHER(caseStmt);
REGISTER_MATCHER(castExpr);
REGISTER_MATCHER(characterLiteral);
@@ -167,12 +172,15 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(compoundLiteralExpr);
REGISTER_MATCHER(compoundStmt);
REGISTER_MATCHER(coawaitExpr);
+ REGISTER_MATCHER(conceptDecl);
REGISTER_MATCHER(conditionalOperator);
REGISTER_MATCHER(constantArrayType);
REGISTER_MATCHER(constantExpr);
REGISTER_MATCHER(containsDeclaration);
REGISTER_MATCHER(continueStmt);
+ REGISTER_MATCHER(convertVectorExpr);
REGISTER_MATCHER(coreturnStmt);
+ REGISTER_MATCHER(coroutineBodyStmt);
REGISTER_MATCHER(coyieldExpr);
REGISTER_MATCHER(cudaKernelCallExpr);
REGISTER_MATCHER(cxxBaseSpecifier);
@@ -190,6 +198,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(cxxDependentScopeMemberExpr);
REGISTER_MATCHER(cxxDestructorDecl);
REGISTER_MATCHER(cxxDynamicCastExpr);
+ REGISTER_MATCHER(cxxFoldExpr);
REGISTER_MATCHER(cxxForRangeStmt);
REGISTER_MATCHER(cxxFunctionalCastExpr);
REGISTER_MATCHER(cxxMemberCallExpr);
@@ -220,11 +229,14 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(defaultStmt);
REGISTER_MATCHER(dependentCoawaitExpr);
REGISTER_MATCHER(dependentSizedArrayType);
+ REGISTER_MATCHER(dependentSizedExtVectorType);
REGISTER_MATCHER(designatedInitExpr);
REGISTER_MATCHER(designatorCountIs);
REGISTER_MATCHER(doStmt);
REGISTER_MATCHER(eachOf);
REGISTER_MATCHER(elaboratedType);
+ REGISTER_MATCHER(elaboratedTypeLoc);
+ REGISTER_MATCHER(usingType);
REGISTER_MATCHER(enumConstantDecl);
REGISTER_MATCHER(enumDecl);
REGISTER_MATCHER(enumType);
@@ -243,8 +255,10 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(forEachArgumentWithParamType);
REGISTER_MATCHER(forEachConstructorInitializer);
REGISTER_MATCHER(forEachDescendant);
+ REGISTER_MATCHER(forEachLambdaCapture);
REGISTER_MATCHER(forEachOverridden);
REGISTER_MATCHER(forEachSwitchCase);
+ REGISTER_MATCHER(forEachTemplateArgument);
REGISTER_MATCHER(forField);
REGISTER_MATCHER(forFunction);
REGISTER_MATCHER(forStmt);
@@ -262,6 +276,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(hasAnyBase);
REGISTER_MATCHER(hasAnyBinding);
REGISTER_MATCHER(hasAnyBody);
+ REGISTER_MATCHER(hasAnyCapture);
REGISTER_MATCHER(hasAnyClause);
REGISTER_MATCHER(hasAnyConstructorInitializer);
REGISTER_MATCHER(hasAnyDeclaration);
@@ -273,6 +288,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(hasAnySelector);
REGISTER_MATCHER(hasAnySubstatement);
REGISTER_MATCHER(hasAnyTemplateArgument);
+ REGISTER_MATCHER(hasAnyTemplateArgumentLoc);
REGISTER_MATCHER(hasAnyUsingShadowDecl);
REGISTER_MATCHER(hasArgument);
REGISTER_MATCHER(hasArgumentOfType);
@@ -304,6 +320,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(hasExplicitSpecifier);
REGISTER_MATCHER(hasExternalFormalLinkage);
REGISTER_MATCHER(hasFalseExpression);
+ REGISTER_MATCHER(hasFoldInit);
REGISTER_MATCHER(hasGlobalStorage);
REGISTER_MATCHER(hasImplicitDestinationType);
REGISTER_MATCHER(hasInClassInitializer);
@@ -321,6 +338,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(hasMemberName);
REGISTER_MATCHER(hasMethod);
REGISTER_MATCHER(hasName);
+ REGISTER_MATCHER(hasNamedTypeLoc);
REGISTER_MATCHER(hasNullSelector);
REGISTER_MATCHER(hasObjectExpression);
REGISTER_MATCHER(hasOperands);
@@ -328,12 +346,16 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(hasOverloadedOperatorName);
REGISTER_MATCHER(hasParameter);
REGISTER_MATCHER(hasParent);
+ REGISTER_MATCHER(hasPattern);
+ REGISTER_MATCHER(hasPointeeLoc);
REGISTER_MATCHER(hasQualifier);
REGISTER_MATCHER(hasRHS);
REGISTER_MATCHER(hasRangeInit);
REGISTER_MATCHER(hasReceiver);
REGISTER_MATCHER(hasReceiverType);
+ REGISTER_MATCHER(hasReferentLoc);
REGISTER_MATCHER(hasReplacementType);
+ REGISTER_MATCHER(hasReturnTypeLoc);
REGISTER_MATCHER(hasReturnValue);
REGISTER_MATCHER(hasPlacementArg);
REGISTER_MATCHER(hasSelector);
@@ -347,6 +369,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(hasSyntacticForm);
REGISTER_MATCHER(hasTargetDecl);
REGISTER_MATCHER(hasTemplateArgument);
+ REGISTER_MATCHER(hasTemplateArgumentLoc);
REGISTER_MATCHER(hasThen);
REGISTER_MATCHER(hasThreadStorageDuration);
REGISTER_MATCHER(hasTrailingReturn);
@@ -357,6 +380,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(hasUnderlyingDecl);
REGISTER_MATCHER(hasUnderlyingType);
REGISTER_MATCHER(hasUnqualifiedDesugaredType);
+ REGISTER_MATCHER(hasUnqualifiedLoc);
REGISTER_MATCHER(hasValueType);
REGISTER_MATCHER(ifStmt);
REGISTER_MATCHER(ignoringElidableConstructorCall);
@@ -383,6 +407,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(isAssignmentOperator);
REGISTER_MATCHER(isAtPosition);
REGISTER_MATCHER(isBaseInitializer);
+ REGISTER_MATCHER(isBinaryFold);
REGISTER_MATCHER(isBitField);
REGISTER_MATCHER(isCatchAll);
REGISTER_MATCHER(isClass);
@@ -391,7 +416,9 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(isComparisonOperator);
REGISTER_MATCHER(isConst);
REGISTER_MATCHER(isConstQualified);
+ REGISTER_MATCHER(isConsteval);
REGISTER_MATCHER(isConstexpr);
+ REGISTER_MATCHER(isConstinit);
REGISTER_MATCHER(isCopyAssignmentOperator);
REGISTER_MATCHER(isCopyConstructor);
REGISTER_MATCHER(isDefaultConstructor);
@@ -409,10 +436,13 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(isExpr);
REGISTER_MATCHER(isExternC);
REGISTER_MATCHER(isFinal);
+ REGISTER_MATCHER(isPrivateKind);
REGISTER_MATCHER(isFirstPrivateKind);
REGISTER_MATCHER(isImplicit);
+ REGISTER_MATCHER(isInAnonymousNamespace);
REGISTER_MATCHER(isInStdNamespace);
REGISTER_MATCHER(isInTemplateInstantiation);
+ REGISTER_MATCHER(isInitCapture);
REGISTER_MATCHER(isInline);
REGISTER_MATCHER(isInstanceMessage);
REGISTER_MATCHER(isInstanceMethod);
@@ -421,6 +451,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(isInteger);
REGISTER_MATCHER(isIntegral);
REGISTER_MATCHER(isLambda);
+ REGISTER_MATCHER(isLeftFold);
REGISTER_MATCHER(isListInitialization);
REGISTER_MATCHER(isMain);
REGISTER_MATCHER(isMemberInitializer);
@@ -434,6 +465,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(isProtected);
REGISTER_MATCHER(isPublic);
REGISTER_MATCHER(isPure);
+ REGISTER_MATCHER(isRightFold);
REGISTER_MATCHER(isScoped);
REGISTER_MATCHER(isSharedKind);
REGISTER_MATCHER(isSignedInteger);
@@ -443,6 +475,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(isStruct);
REGISTER_MATCHER(isTemplateInstantiation);
REGISTER_MATCHER(isTypeDependent);
+ REGISTER_MATCHER(isUnaryFold);
REGISTER_MATCHER(isUnion);
REGISTER_MATCHER(isUnsignedInteger);
REGISTER_MATCHER(isUserProvided);
@@ -456,8 +489,10 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(lValueReferenceType);
REGISTER_MATCHER(labelDecl);
REGISTER_MATCHER(labelStmt);
+ REGISTER_MATCHER(lambdaCapture);
REGISTER_MATCHER(lambdaExpr);
REGISTER_MATCHER(linkageSpecDecl);
+ REGISTER_MATCHER(macroQualifiedType);
REGISTER_MATCHER(materializeTemporaryExpr);
REGISTER_MATCHER(member);
REGISTER_MATCHER(memberExpr);
@@ -486,6 +521,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(objcObjectPointerType);
REGISTER_MATCHER(objcPropertyDecl);
REGISTER_MATCHER(objcProtocolDecl);
+ REGISTER_MATCHER(objcStringLiteral);
REGISTER_MATCHER(objcThrowStmt);
REGISTER_MATCHER(objcTryStmt);
REGISTER_MATCHER(ofClass);
@@ -503,13 +539,16 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(parmVarDecl);
REGISTER_MATCHER(pointee);
REGISTER_MATCHER(pointerType);
+ REGISTER_MATCHER(pointerTypeLoc);
REGISTER_MATCHER(predefinedExpr);
REGISTER_MATCHER(qualType);
+ REGISTER_MATCHER(qualifiedTypeLoc);
REGISTER_MATCHER(rValueReferenceType);
REGISTER_MATCHER(realFloatingPointType);
REGISTER_MATCHER(recordDecl);
REGISTER_MATCHER(recordType);
REGISTER_MATCHER(referenceType);
+ REGISTER_MATCHER(referenceTypeLoc);
REGISTER_MATCHER(refersToDeclaration);
REGISTER_MATCHER(refersToIntegralType);
REGISTER_MATCHER(refersToTemplate);
@@ -537,6 +576,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(templateArgumentLoc);
REGISTER_MATCHER(templateName);
REGISTER_MATCHER(templateSpecializationType);
+ REGISTER_MATCHER(templateSpecializationTypeLoc);
REGISTER_MATCHER(templateTemplateParmDecl);
REGISTER_MATCHER(templateTypeParmDecl);
REGISTER_MATCHER(templateTypeParmType);
@@ -596,11 +636,10 @@ Registry::buildMatcherCtor(MatcherCtor Ctor, SourceRange NameRange,
}
// static
-llvm::Optional<MatcherCtor> Registry::lookupMatcherCtor(StringRef MatcherName) {
+std::optional<MatcherCtor> Registry::lookupMatcherCtor(StringRef MatcherName) {
auto it = RegistryData->constructors().find(MatcherName);
- return it == RegistryData->constructors().end()
- ? llvm::Optional<MatcherCtor>()
- : it->second.get();
+ return it == RegistryData->constructors().end() ? std::optional<MatcherCtor>()
+ : it->second.get();
}
static llvm::raw_ostream &operator<<(llvm::raw_ostream &OS,
@@ -773,10 +812,10 @@ VariantMatcher Registry::constructBoundMatcher(MatcherCtor Ctor,
VariantMatcher Out = constructMatcher(Ctor, NameRange, Args, Error);
if (Out.isNull()) return Out;
- llvm::Optional<DynTypedMatcher> Result = Out.getSingleMatcher();
- if (Result.hasValue()) {
- llvm::Optional<DynTypedMatcher> Bound = Result->tryBind(BindID);
- if (Bound.hasValue()) {
+ std::optional<DynTypedMatcher> Result = Out.getSingleMatcher();
+ if (Result) {
+ std::optional<DynTypedMatcher> Bound = Result->tryBind(BindID);
+ if (Bound) {
return VariantMatcher::SingleMatcher(*Bound);
}
}
diff --git a/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/VariantValue.cpp b/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/VariantValue.cpp
index 813eb1597756..4f6b021b26f0 100644
--- a/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/VariantValue.cpp
+++ b/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/VariantValue.cpp
@@ -14,6 +14,7 @@
#include "clang/ASTMatchers/Dynamic/VariantValue.h"
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/STLExtras.h"
+#include <optional>
namespace clang {
namespace ast_matchers {
@@ -66,7 +67,7 @@ DynTypedMatcher VariantMatcher::MatcherOps::convertMatcher(
return Matcher.dynCastTo(NodeKind);
}
-llvm::Optional<DynTypedMatcher>
+std::optional<DynTypedMatcher>
VariantMatcher::MatcherOps::constructVariadicOperator(
DynTypedMatcher::VariadicOperator Op,
ArrayRef<VariantMatcher> InnerMatchers) const {
@@ -75,11 +76,11 @@ VariantMatcher::MatcherOps::constructVariadicOperator(
// Abort if any of the inner matchers can't be converted to
// Matcher<T>.
if (!InnerMatcher.Value)
- return llvm::None;
- llvm::Optional<DynTypedMatcher> Inner =
+ return std::nullopt;
+ std::optional<DynTypedMatcher> Inner =
InnerMatcher.Value->getTypedMatcher(*this);
if (!Inner)
- return llvm::None;
+ return std::nullopt;
DynMatchers.push_back(*Inner);
}
return DynTypedMatcher::constructVariadic(Op, NodeKind, DynMatchers);
@@ -91,7 +92,7 @@ class VariantMatcher::SinglePayload : public VariantMatcher::Payload {
public:
SinglePayload(const DynTypedMatcher &Matcher) : Matcher(Matcher) {}
- llvm::Optional<DynTypedMatcher> getSingleMatcher() const override {
+ std::optional<DynTypedMatcher> getSingleMatcher() const override {
return Matcher;
}
@@ -100,12 +101,12 @@ public:
.str();
}
- llvm::Optional<DynTypedMatcher>
+ std::optional<DynTypedMatcher>
getTypedMatcher(const MatcherOps &Ops) const override {
bool Ignore;
if (Ops.canConstructFrom(Matcher, Ignore))
return Matcher;
- return llvm::None;
+ return std::nullopt;
}
bool isConvertibleTo(ASTNodeKind Kind, unsigned *Specificity) const override {
@@ -124,9 +125,9 @@ public:
~PolymorphicPayload() override {}
- llvm::Optional<DynTypedMatcher> getSingleMatcher() const override {
+ std::optional<DynTypedMatcher> getSingleMatcher() const override {
if (Matchers.size() != 1)
- return llvm::Optional<DynTypedMatcher>();
+ return std::nullopt;
return Matchers[0];
}
@@ -140,7 +141,7 @@ public:
return (Twine("Matcher<") + Inner + ">").str();
}
- llvm::Optional<DynTypedMatcher>
+ std::optional<DynTypedMatcher>
getTypedMatcher(const MatcherOps &Ops) const override {
bool FoundIsExact = false;
const DynTypedMatcher *Found = nullptr;
@@ -162,7 +163,7 @@ public:
// We only succeed if we found exactly one, or if we found an exact match.
if (Found && (FoundIsExact || NumFound == 1))
return *Found;
- return llvm::None;
+ return std::nullopt;
}
bool isConvertibleTo(ASTNodeKind Kind, unsigned *Specificity) const override {
@@ -189,8 +190,8 @@ public:
std::vector<VariantMatcher> Args)
: Op(Op), Args(std::move(Args)) {}
- llvm::Optional<DynTypedMatcher> getSingleMatcher() const override {
- return llvm::Optional<DynTypedMatcher>();
+ std::optional<DynTypedMatcher> getSingleMatcher() const override {
+ return std::nullopt;
}
std::string getTypeAsString() const override {
@@ -203,7 +204,7 @@ public:
return Inner;
}
- llvm::Optional<DynTypedMatcher>
+ std::optional<DynTypedMatcher>
getTypedMatcher(const MatcherOps &Ops) const override {
return Ops.constructVariadicOperator(Op, Args);
}
@@ -240,8 +241,8 @@ VariantMatcher VariantMatcher::VariadicOperatorMatcher(
std::make_shared<VariadicOpPayload>(Op, std::move(Args)));
}
-llvm::Optional<DynTypedMatcher> VariantMatcher::getSingleMatcher() const {
- return Value ? Value->getSingleMatcher() : llvm::Optional<DynTypedMatcher>();
+std::optional<DynTypedMatcher> VariantMatcher::getSingleMatcher() const {
+ return Value ? Value->getSingleMatcher() : std::optional<DynTypedMatcher>();
}
void VariantMatcher::reset() { Value.reset(); }
diff --git a/contrib/llvm-project/clang/lib/ASTMatchers/GtestMatchers.cpp b/contrib/llvm-project/clang/lib/ASTMatchers/GtestMatchers.cpp
index 6e4c12f31969..a556d8ef2da0 100644
--- a/contrib/llvm-project/clang/lib/ASTMatchers/GtestMatchers.cpp
+++ b/contrib/llvm-project/clang/lib/ASTMatchers/GtestMatchers.cpp
@@ -21,7 +21,6 @@
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/ASTMatchers/ASTMatchFinder.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
namespace clang {
diff --git a/contrib/llvm-project/clang/lib/Analysis/AnalysisDeclContext.cpp b/contrib/llvm-project/clang/lib/Analysis/AnalysisDeclContext.cpp
index d8466ac34a3d..d3a1a993711f 100644
--- a/contrib/llvm-project/clang/lib/Analysis/AnalysisDeclContext.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/AnalysisDeclContext.cpp
@@ -142,7 +142,7 @@ bool AnalysisDeclContext::isBodyAutosynthesizedFromModelFile() const {
/// Returns true if \param VD is an Objective-C implicit 'self' parameter.
static bool isSelfDecl(const VarDecl *VD) {
- return isa<ImplicitParamDecl>(VD) && VD->getName() == "self";
+ return isa_and_nonnull<ImplicitParamDecl>(VD) && VD->getName() == "self";
}
const ImplicitParamDecl *AnalysisDeclContext::getSelfDecl() const {
@@ -169,8 +169,8 @@ const ImplicitParamDecl *AnalysisDeclContext::getSelfDecl() const {
if (!LC.capturesVariable())
continue;
- VarDecl *VD = LC.getCapturedVar();
- if (isSelfDecl(VD))
+ ValueDecl *VD = LC.getCapturedVar();
+ if (isSelfDecl(dyn_cast<VarDecl>(VD)))
return dyn_cast<ImplicitParamDecl>(VD);
}
@@ -231,8 +231,7 @@ CFG *AnalysisDeclContext::getCFG() {
CFG *AnalysisDeclContext::getUnoptimizedCFG() {
if (!builtCompleteCFG) {
- SaveAndRestore<bool> NotPrune(cfgBuildOptions.PruneTriviallyFalseEdges,
- false);
+ SaveAndRestore NotPrune(cfgBuildOptions.PruneTriviallyFalseEdges, false);
completeCFG =
CFG::buildCFG(D, getBody(), &D->getASTContext(), cfgBuildOptions);
// Even when the cfg is not successfully built, we don't
@@ -352,7 +351,7 @@ std::string AnalysisDeclContext::getFunctionName(const Decl *D) {
for (const auto &P : FD->parameters()) {
if (P != *FD->param_begin())
OS << ", ";
- OS << P->getType().getAsString();
+ OS << P->getType();
}
OS << ')';
}
@@ -387,7 +386,7 @@ std::string AnalysisDeclContext::getFunctionName(const Decl *D) {
OS << ' ' << OMD->getSelector().getAsString() << ']';
}
- return OS.str();
+ return Str;
}
LocationContextManager &AnalysisDeclContext::getLocationContextManager() {
diff --git a/contrib/llvm-project/clang/lib/Analysis/BodyFarm.cpp b/contrib/llvm-project/clang/lib/Analysis/BodyFarm.cpp
index e357bfb29b82..127e843d4ead 100644
--- a/contrib/llvm-project/clang/lib/Analysis/BodyFarm.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/BodyFarm.cpp
@@ -20,9 +20,11 @@
#include "clang/AST/ExprObjC.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/Analysis/CodeInjector.h"
+#include "clang/Basic/Builtins.h"
#include "clang/Basic/OperatorKinds.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/Debug.h"
+#include <optional>
#define DEBUG_TYPE "body-farm"
@@ -86,6 +88,9 @@ public:
ImplicitCastExpr *makeImplicitCast(const Expr *Arg, QualType Ty,
CastKind CK = CK_LValueToRValue);
+ /// Create a cast to reference type.
+ CastExpr *makeReferenceCast(const Expr *Arg, QualType Ty);
+
/// Create an Objective-C bool literal.
ObjCBoolLiteralExpr *makeObjCBool(bool Val);
@@ -130,7 +135,8 @@ BinaryOperator *ASTMaker::makeComparison(const Expr *LHS, const Expr *RHS,
}
CompoundStmt *ASTMaker::makeCompound(ArrayRef<Stmt *> Stmts) {
- return CompoundStmt::Create(C, Stmts, SourceLocation(), SourceLocation());
+ return CompoundStmt::Create(C, Stmts, FPOptionsOverride(), SourceLocation(),
+ SourceLocation());
}
DeclRefExpr *ASTMaker::makeDeclRefExpr(
@@ -173,6 +179,16 @@ ImplicitCastExpr *ASTMaker::makeImplicitCast(const Expr *Arg, QualType Ty,
/* FPFeatures */ FPOptionsOverride());
}
+CastExpr *ASTMaker::makeReferenceCast(const Expr *Arg, QualType Ty) {
+ assert(Ty->isReferenceType());
+ return CXXStaticCastExpr::Create(
+ C, Ty.getNonReferenceType(),
+ Ty->isLValueReferenceType() ? VK_LValue : VK_XValue, CK_NoOp,
+ const_cast<Expr *>(Arg), /*CXXCastPath=*/nullptr,
+ /*Written=*/C.getTrivialTypeSourceInfo(Ty), FPOptionsOverride(),
+ SourceLocation(), SourceLocation(), SourceRange());
+}
+
Expr *ASTMaker::makeIntegralCast(const Expr *Arg, QualType Ty) {
if (Arg->getType() == Ty)
return const_cast<Expr*>(Arg);
@@ -296,6 +312,22 @@ static CallExpr *create_call_once_lambda_call(ASTContext &C, ASTMaker M,
/*FPFeatures=*/FPOptionsOverride());
}
+/// Create a fake body for 'std::move' or 'std::forward'. This is just:
+///
+/// \code
+/// return static_cast<return_type>(param);
+/// \endcode
+static Stmt *create_std_move_forward(ASTContext &C, const FunctionDecl *D) {
+ LLVM_DEBUG(llvm::dbgs() << "Generating body for std::move / std::forward\n");
+
+ ASTMaker M(C);
+
+ QualType ReturnType = D->getType()->castAs<FunctionType>()->getReturnType();
+ Expr *Param = M.makeDeclRefExpr(D->getParamDecl(0));
+ Expr *Cast = M.makeReferenceCast(Param, ReturnType);
+ return M.makeReturn(Cast);
+}
+
/// Create a fake body for std::call_once.
/// Emulates the following function body:
///
@@ -461,8 +493,7 @@ static Stmt *create_call_once(ASTContext &C, const FunctionDecl *D) {
DerefType);
auto *Out =
- IfStmt::Create(C, SourceLocation(),
- /* IsConstexpr=*/false,
+ IfStmt::Create(C, SourceLocation(), IfStatementKind::Ordinary,
/* Init=*/nullptr,
/* Var=*/nullptr,
/* Cond=*/FlagCheck,
@@ -511,7 +542,7 @@ static Stmt *create_dispatch_once(ASTContext &C, const FunctionDecl *D) {
CallExpr *CE = CallExpr::Create(
/*ASTContext=*/C,
/*StmtClass=*/M.makeLvalueToRvalue(/*Expr=*/Block),
- /*Args=*/None,
+ /*Args=*/std::nullopt,
/*QualType=*/C.VoidTy,
/*ExprValueType=*/VK_PRValue,
/*SourceLocation=*/SourceLocation(), FPOptionsOverride());
@@ -547,8 +578,7 @@ static Stmt *create_dispatch_once(ASTContext &C, const FunctionDecl *D) {
Expr *GuardCondition = M.makeComparison(LValToRval, DoneValue, BO_NE);
// (5) Create the 'if' statement.
- auto *If = IfStmt::Create(C, SourceLocation(),
- /* IsConstexpr=*/false,
+ auto *If = IfStmt::Create(C, SourceLocation(), IfStatementKind::Ordinary,
/* Init=*/nullptr,
/* Var=*/nullptr,
/* Cond=*/GuardCondition,
@@ -580,7 +610,7 @@ static Stmt *create_dispatch_sync(ASTContext &C, const FunctionDecl *D) {
ASTMaker M(C);
DeclRefExpr *DR = M.makeDeclRefExpr(PV);
ImplicitCastExpr *ICE = M.makeLvalueToRvalue(DR, Ty);
- CallExpr *CE = CallExpr::Create(C, ICE, None, C.VoidTy, VK_PRValue,
+ CallExpr *CE = CallExpr::Create(C, ICE, std::nullopt, C.VoidTy, VK_PRValue,
SourceLocation(), FPOptionsOverride());
return CE;
}
@@ -658,8 +688,7 @@ static Stmt *create_OSAtomicCompareAndSwap(ASTContext &C, const FunctionDecl *D)
/// Construct the If.
auto *If =
- IfStmt::Create(C, SourceLocation(),
- /* IsConstexpr=*/false,
+ IfStmt::Create(C, SourceLocation(), IfStatementKind::Ordinary,
/* Init=*/nullptr,
/* Var=*/nullptr, Comparison,
/* LPL=*/SourceLocation(),
@@ -669,9 +698,9 @@ static Stmt *create_OSAtomicCompareAndSwap(ASTContext &C, const FunctionDecl *D)
}
Stmt *BodyFarm::getBody(const FunctionDecl *D) {
- Optional<Stmt *> &Val = Bodies[D];
- if (Val.hasValue())
- return Val.getValue();
+ std::optional<Stmt *> &Val = Bodies[D];
+ if (Val)
+ return *Val;
Val = nullptr;
@@ -684,8 +713,21 @@ Stmt *BodyFarm::getBody(const FunctionDecl *D) {
FunctionFarmer FF;
- if (Name.startswith("OSAtomicCompareAndSwap") ||
- Name.startswith("objc_atomicCompareAndSwap")) {
+ if (unsigned BuiltinID = D->getBuiltinID()) {
+ switch (BuiltinID) {
+ case Builtin::BIas_const:
+ case Builtin::BIforward:
+ case Builtin::BIforward_like:
+ case Builtin::BImove:
+ case Builtin::BImove_if_noexcept:
+ FF = create_std_move_forward;
+ break;
+ default:
+ FF = nullptr;
+ break;
+ }
+ } else if (Name.starts_with("OSAtomicCompareAndSwap") ||
+ Name.starts_with("objc_atomicCompareAndSwap")) {
FF = create_OSAtomicCompareAndSwap;
} else if (Name == "call_once" && D->getDeclContext()->isStdNamespace()) {
FF = create_call_once;
@@ -698,7 +740,7 @@ Stmt *BodyFarm::getBody(const FunctionDecl *D) {
if (FF) { Val = FF(C, D); }
else if (Injector) { Val = Injector->getBody(D); }
- return Val.getValue();
+ return *Val;
}
static const ObjCIvarDecl *findBackingIvar(const ObjCPropertyDecl *Prop) {
@@ -764,7 +806,7 @@ static Stmt *createObjCPropertyGetter(ASTContext &Ctx,
if (!IVar) {
Prop = MD->findPropertyDecl();
- IVar = findBackingIvar(Prop);
+ IVar = Prop ? findBackingIvar(Prop) : nullptr;
}
if (!IVar || !Prop)
@@ -793,9 +835,8 @@ static Stmt *createObjCPropertyGetter(ASTContext &Ctx,
}
}
- // Sanity check that the property is the same type as the ivar, or a
- // reference to it, and that it is either an object pointer or trivially
- // copyable.
+ // We expect that the property is the same type as the ivar, or a reference to
+ // it, and that it is either an object pointer or trivially copyable.
if (!Ctx.hasSameUnqualifiedType(IVar->getType(),
Prop->getType().getNonReferenceType()))
return nullptr;
@@ -833,9 +874,9 @@ Stmt *BodyFarm::getBody(const ObjCMethodDecl *D) {
if (!D->isImplicit())
return nullptr;
- Optional<Stmt *> &Val = Bodies[D];
- if (Val.hasValue())
- return Val.getValue();
+ std::optional<Stmt *> &Val = Bodies[D];
+ if (Val)
+ return *Val;
Val = nullptr;
// For now, we only synthesize getters.
@@ -862,5 +903,5 @@ Stmt *BodyFarm::getBody(const ObjCMethodDecl *D) {
Val = createObjCPropertyGetter(C, D);
- return Val.getValue();
+ return *Val;
}
diff --git a/contrib/llvm-project/clang/lib/Analysis/CFG.cpp b/contrib/llvm-project/clang/lib/Analysis/CFG.cpp
index ba5eceda24b5..03ab4c6fdf29 100644
--- a/contrib/llvm-project/clang/lib/Analysis/CFG.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/CFG.cpp
@@ -40,7 +40,6 @@
#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
@@ -56,6 +55,7 @@
#include "llvm/Support/raw_ostream.h"
#include <cassert>
#include <memory>
+#include <optional>
#include <string>
#include <tuple>
#include <utility>
@@ -72,6 +72,10 @@ static SourceLocation GetEndLoc(Decl *D) {
/// Returns true on constant values based around a single IntegerLiteral.
/// Allow for use of parentheses, integer casts, and negative signs.
+/// FIXME: it would be good to unify this function with
+/// getIntegerLiteralSubexpressionValue at some point given the similarity
+/// between the functions.
+
static bool IsIntegerLiteralConstantExpr(const Expr *E) {
// Allow parentheses
E = E->IgnoreParens();
@@ -296,6 +300,7 @@ public:
int distance(const_iterator L);
const_iterator shared_parent(const_iterator L);
bool pointsToFirstDeclaredVar() { return VarIter == 1; }
+ bool inSameLocalScope(const_iterator rhs) { return Scope == rhs.Scope; }
};
private:
@@ -345,18 +350,33 @@ int LocalScope::const_iterator::distance(LocalScope::const_iterator L) {
/// between this and shared_parent(L) end.
LocalScope::const_iterator
LocalScope::const_iterator::shared_parent(LocalScope::const_iterator L) {
- llvm::SmallPtrSet<const LocalScope *, 4> ScopesOfL;
+ // one of iterators is not valid (we are not in scope), so common
+ // parent is const_iterator() (i.e. sentinel).
+ if ((*this == const_iterator()) || (L == const_iterator())) {
+ return const_iterator();
+ }
+
+ const_iterator F = *this;
+ if (F.inSameLocalScope(L)) {
+ // Iterators are in the same scope, get common subset of variables.
+ F.VarIter = std::min(F.VarIter, L.VarIter);
+ return F;
+ }
+
+ llvm::SmallDenseMap<const LocalScope *, unsigned, 4> ScopesOfL;
while (true) {
- ScopesOfL.insert(L.Scope);
+ ScopesOfL.try_emplace(L.Scope, L.VarIter);
if (L == const_iterator())
break;
L = L.Scope->Prev;
}
- const_iterator F = *this;
while (true) {
- if (ScopesOfL.count(F.Scope))
+ if (auto LIt = ScopesOfL.find(F.Scope); LIt != ScopesOfL.end()) {
+ // Get common subset of variables in given scope
+ F.VarIter = std::min(F.VarIter, LIt->getSecond());
return F;
+ }
assert(F != const_iterator() &&
"L iterator is not reachable from F iterator.");
F = F.Scope->Prev;
@@ -432,8 +452,8 @@ reverse_children::reverse_children(Stmt *S) {
// Note: Fill in this switch with more cases we want to optimize.
case Stmt::InitListExprClass: {
InitListExpr *IE = cast<InitListExpr>(S);
- children = llvm::makeArrayRef(reinterpret_cast<Stmt**>(IE->getInits()),
- IE->getNumInits());
+ children = llvm::ArrayRef(reinterpret_cast<Stmt **>(IE->getInits()),
+ IE->getNumInits());
return;
}
default:
@@ -441,8 +461,7 @@ reverse_children::reverse_children(Stmt *S) {
}
// Default case for all other statements.
- for (Stmt *SubStmt : S->children())
- childrenBuf.push_back(SubStmt);
+ llvm::append_range(childrenBuf, S->children());
// This needs to be done *after* childrenBuf has been populated.
children = childrenBuf;
@@ -482,8 +501,10 @@ class CFGBuilder {
CFGBlock *SwitchTerminatedBlock = nullptr;
CFGBlock *DefaultCaseBlock = nullptr;
- // This can point either to a try or a __try block. The frontend forbids
- // mixing both kinds in one function, so having one for both is enough.
+ // This can point to either a C++ try, an Objective-C @try, or an SEH __try.
+ // try and @try can be mixed and generally work the same.
+ // The frontend forbids mixing SEH __try with either try or @try.
+ // So having one for all three is enough.
CFGBlock *TryTerminatedBlock = nullptr;
// Current position in local scope.
@@ -508,9 +529,6 @@ class CFGBuilder {
llvm::DenseMap<Expr *, const ConstructionContextLayer *>
ConstructionContextMap;
- using DeclsWithEndedScopeSetTy = llvm::SmallSetVector<VarDecl *, 16>;
- DeclsWithEndedScopeSetTy DeclsWithEndedScope;
-
bool badCFG = false;
const CFG::BuildOptions &BuildOpts;
@@ -529,9 +547,7 @@ class CFGBuilder {
public:
explicit CFGBuilder(ASTContext *astContext,
const CFG::BuildOptions &buildOpts)
- : Context(astContext), cfg(new CFG()), // crew a new CFG
- ConstructionContextMap(), BuildOpts(buildOpts) {}
-
+ : Context(astContext), cfg(new CFG()), BuildOpts(buildOpts) {}
// buildCFG - Used by external clients to construct the CFG.
std::unique_ptr<CFG> buildCFG(const Decl *D, Stmt *Statement);
@@ -542,6 +558,7 @@ private:
// Visitors to walk an AST and construct the CFG.
CFGBlock *VisitInitListExpr(InitListExpr *ILE, AddStmtChoice asc);
CFGBlock *VisitAddrLabelExpr(AddrLabelExpr *A, AddStmtChoice asc);
+ CFGBlock *VisitAttributedStmt(AttributedStmt *A, AddStmtChoice asc);
CFGBlock *VisitBinaryOperator(BinaryOperator *B, AddStmtChoice asc);
CFGBlock *VisitBreakStmt(BreakStmt *B);
CFGBlock *VisitCallExpr(CallExpr *C, AddStmtChoice asc);
@@ -564,6 +581,7 @@ private:
AddStmtChoice asc);
CFGBlock *VisitCXXThrowExpr(CXXThrowExpr *T);
CFGBlock *VisitCXXTryStmt(CXXTryStmt *S);
+ CFGBlock *VisitCXXTypeidExpr(CXXTypeidExpr *S, AddStmtChoice asc);
CFGBlock *VisitDeclStmt(DeclStmt *DS);
CFGBlock *VisitDeclSubExpr(DeclStmt *DS);
CFGBlock *VisitDefaultStmt(DefaultStmt *D);
@@ -597,6 +615,8 @@ private:
CFGBlock *VisitObjCMessageExpr(ObjCMessageExpr *E, AddStmtChoice asc);
CFGBlock *VisitPseudoObjectExpr(PseudoObjectExpr *E);
CFGBlock *VisitReturnStmt(Stmt *S);
+ CFGBlock *VisitCoroutineSuspendExpr(CoroutineSuspendExpr *S,
+ AddStmtChoice asc);
CFGBlock *VisitSEHExceptStmt(SEHExceptStmt *S);
CFGBlock *VisitSEHFinallyStmt(SEHFinallyStmt *S);
CFGBlock *VisitSEHLeaveStmt(SEHLeaveStmt *S);
@@ -607,6 +627,7 @@ private:
AddStmtChoice asc);
CFGBlock *VisitUnaryOperator(UnaryOperator *U, AddStmtChoice asc);
CFGBlock *VisitWhileStmt(WhileStmt *W);
+ CFGBlock *VisitArrayInitLoopExpr(ArrayInitLoopExpr *A, AddStmtChoice asc);
CFGBlock *Visit(Stmt *S, AddStmtChoice asc = AddStmtChoice::NotAlwaysAdd,
bool ExternallyDestructed = false);
@@ -719,9 +740,9 @@ private:
// hence strict duck-typing.
template <typename CallLikeExpr,
typename = std::enable_if_t<
- std::is_base_of<CallExpr, CallLikeExpr>::value ||
- std::is_base_of<CXXConstructExpr, CallLikeExpr>::value ||
- std::is_base_of<ObjCMessageExpr, CallLikeExpr>::value>>
+ std::is_base_of_v<CallExpr, CallLikeExpr> ||
+ std::is_base_of_v<CXXConstructExpr, CallLikeExpr> ||
+ std::is_base_of_v<ObjCMessageExpr, CallLikeExpr>>>
void findConstructionContextsForArguments(CallLikeExpr *E) {
for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
Expr *Arg = E->getArg(i);
@@ -748,18 +769,20 @@ private:
CFGBlock *addInitializer(CXXCtorInitializer *I);
void addLoopExit(const Stmt *LoopStmt);
- void addAutomaticObjDtors(LocalScope::const_iterator B,
- LocalScope::const_iterator E, Stmt *S);
- void addLifetimeEnds(LocalScope::const_iterator B,
- LocalScope::const_iterator E, Stmt *S);
void addAutomaticObjHandling(LocalScope::const_iterator B,
LocalScope::const_iterator E, Stmt *S);
+ void addAutomaticObjDestruction(LocalScope::const_iterator B,
+ LocalScope::const_iterator E, Stmt *S);
+ void addScopeExitHandling(LocalScope::const_iterator B,
+ LocalScope::const_iterator E, Stmt *S);
void addImplicitDtorsForDestructor(const CXXDestructorDecl *DD);
- void addScopesEnd(LocalScope::const_iterator B, LocalScope::const_iterator E,
- Stmt *S);
-
- void getDeclsWithEndedScope(LocalScope::const_iterator B,
- LocalScope::const_iterator E, Stmt *S);
+ void addScopeChangesHandling(LocalScope::const_iterator SrcPos,
+ LocalScope::const_iterator DstPos,
+ Stmt *S);
+ CFGBlock *createScopeChangesHandlingBlock(LocalScope::const_iterator SrcPos,
+ CFGBlock *SrcBlk,
+ LocalScope::const_iterator DstPost,
+ CFGBlock *DstBlk);
// Local scopes creation.
LocalScope* createOrReuseLocalScope(LocalScope* Scope);
@@ -858,6 +881,10 @@ private:
B->appendAutomaticObjDtor(VD, S, cfg->getBumpVectorContext());
}
+ void appendCleanupFunction(CFGBlock *B, VarDecl *VD) {
+ B->appendCleanupFunction(VD, cfg->getBumpVectorContext());
+ }
+
void appendLifetimeEnds(CFGBlock *B, VarDecl *VD, Stmt *S) {
B->appendLifetimeEnds(VD, S, cfg->getBumpVectorContext());
}
@@ -870,18 +897,6 @@ private:
B->appendDeleteDtor(RD, DE, cfg->getBumpVectorContext());
}
- void prependAutomaticObjDtorsWithTerminator(CFGBlock *Blk,
- LocalScope::const_iterator B, LocalScope::const_iterator E);
-
- void prependAutomaticObjLifetimeWithTerminator(CFGBlock *Blk,
- LocalScope::const_iterator B,
- LocalScope::const_iterator E);
-
- const VarDecl *
- prependAutomaticObjScopeEndWithTerminator(CFGBlock *Blk,
- LocalScope::const_iterator B,
- LocalScope::const_iterator E);
-
void addSuccessor(CFGBlock *B, CFGBlock *S, bool IsReachable = true) {
B->addSuccessor(CFGBlock::AdjacentBlock(S, IsReachable),
cfg->getBumpVectorContext());
@@ -899,21 +914,11 @@ private:
B->appendScopeBegin(VD, S, cfg->getBumpVectorContext());
}
- void prependScopeBegin(CFGBlock *B, const VarDecl *VD, const Stmt *S) {
- if (BuildOpts.AddScopes)
- B->prependScopeBegin(VD, S, cfg->getBumpVectorContext());
- }
-
void appendScopeEnd(CFGBlock *B, const VarDecl *VD, const Stmt *S) {
if (BuildOpts.AddScopes)
B->appendScopeEnd(VD, S, cfg->getBumpVectorContext());
}
- void prependScopeEnd(CFGBlock *B, const VarDecl *VD, const Stmt *S) {
- if (BuildOpts.AddScopes)
- B->prependScopeEnd(VD, S, cfg->getBumpVectorContext());
- }
-
/// Find a relational comparison with an expression evaluating to a
/// boolean and a constant other than 0 and 1.
/// e.g. if ((x < y) == 10)
@@ -960,15 +965,16 @@ private:
const Expr *LHSExpr = B->getLHS()->IgnoreParens();
const Expr *RHSExpr = B->getRHS()->IgnoreParens();
- const IntegerLiteral *IntLiteral = dyn_cast<IntegerLiteral>(LHSExpr);
+ std::optional<llvm::APInt> IntLiteral1 =
+ getIntegerLiteralSubexpressionValue(LHSExpr);
const Expr *BoolExpr = RHSExpr;
- if (!IntLiteral) {
- IntLiteral = dyn_cast<IntegerLiteral>(RHSExpr);
+ if (!IntLiteral1) {
+ IntLiteral1 = getIntegerLiteralSubexpressionValue(RHSExpr);
BoolExpr = LHSExpr;
}
- if (!IntLiteral)
+ if (!IntLiteral1)
return TryResult();
const BinaryOperator *BitOp = dyn_cast<BinaryOperator>(BoolExpr);
@@ -977,26 +983,26 @@ private:
const Expr *LHSExpr2 = BitOp->getLHS()->IgnoreParens();
const Expr *RHSExpr2 = BitOp->getRHS()->IgnoreParens();
- const IntegerLiteral *IntLiteral2 = dyn_cast<IntegerLiteral>(LHSExpr2);
+ std::optional<llvm::APInt> IntLiteral2 =
+ getIntegerLiteralSubexpressionValue(LHSExpr2);
if (!IntLiteral2)
- IntLiteral2 = dyn_cast<IntegerLiteral>(RHSExpr2);
+ IntLiteral2 = getIntegerLiteralSubexpressionValue(RHSExpr2);
if (!IntLiteral2)
return TryResult();
- llvm::APInt L1 = IntLiteral->getValue();
- llvm::APInt L2 = IntLiteral2->getValue();
- if ((BitOp->getOpcode() == BO_And && (L2 & L1) != L1) ||
- (BitOp->getOpcode() == BO_Or && (L2 | L1) != L1)) {
+ if ((BitOp->getOpcode() == BO_And &&
+ (*IntLiteral2 & *IntLiteral1) != *IntLiteral1) ||
+ (BitOp->getOpcode() == BO_Or &&
+ (*IntLiteral2 | *IntLiteral1) != *IntLiteral1)) {
if (BuildOpts.Observer)
BuildOpts.Observer->compareBitwiseEquality(B,
B->getOpcode() != BO_EQ);
- TryResult(B->getOpcode() != BO_EQ);
+ return TryResult(B->getOpcode() != BO_EQ);
}
} else if (BoolExpr->isKnownToHaveBooleanValue()) {
- llvm::APInt IntValue = IntLiteral->getValue();
- if ((IntValue == 1) || (IntValue == 0)) {
+ if ((*IntLiteral1 == 1) || (*IntLiteral1 == 0)) {
return TryResult();
}
return TryResult(B->getOpcode() != BO_EQ);
@@ -1005,6 +1011,47 @@ private:
return TryResult();
}
+ // Helper function to get an APInt from an expression. Supports expressions
+ // which are an IntegerLiteral or a UnaryOperator and returns the value with
+ // all operations performed on it.
+ // FIXME: it would be good to unify this function with
+ // IsIntegerLiteralConstantExpr at some point given the similarity between the
+ // functions.
+ std::optional<llvm::APInt>
+ getIntegerLiteralSubexpressionValue(const Expr *E) {
+
+ // If unary.
+ if (const auto *UnOp = dyn_cast<UnaryOperator>(E->IgnoreParens())) {
+ // Get the sub expression of the unary expression and get the Integer
+ // Literal.
+ const Expr *SubExpr = UnOp->getSubExpr()->IgnoreParens();
+
+ if (const auto *IntLiteral = dyn_cast<IntegerLiteral>(SubExpr)) {
+
+ llvm::APInt Value = IntLiteral->getValue();
+
+ // Perform the operation manually.
+ switch (UnOp->getOpcode()) {
+ case UO_Plus:
+ return Value;
+ case UO_Minus:
+ return -Value;
+ case UO_Not:
+ return ~Value;
+ case UO_LNot:
+ return llvm::APInt(Context->getTypeSize(Context->IntTy), !Value);
+ default:
+ assert(false && "Unexpected unary operator!");
+ return std::nullopt;
+ }
+ }
+ } else if (const auto *IntLiteral =
+ dyn_cast<IntegerLiteral>(E->IgnoreParens()))
+ return IntLiteral->getValue();
+
+ return std::nullopt;
+ }
+
TryResult analyzeLogicOperatorCondition(BinaryOperatorKind Relation,
const llvm::APSInt &Value1,
const llvm::APSInt &Value2) {
@@ -1027,16 +1074,41 @@ private:
}
}
- /// Find a pair of comparison expressions with or without parentheses
+ /// There are two checks handled by this function:
+ /// 1. Find a law-of-excluded-middle or law-of-noncontradiction expression
+ /// e.g. if (x || !x), if (x && !x)
+ /// 2. Find a pair of comparison expressions with or without parentheses
/// with a shared variable and constants and a logical operator between them
/// that always evaluates to either true or false.
/// e.g. if (x != 3 || x != 4)
TryResult checkIncorrectLogicOperator(const BinaryOperator *B) {
assert(B->isLogicalOp());
- const BinaryOperator *LHS =
- dyn_cast<BinaryOperator>(B->getLHS()->IgnoreParens());
- const BinaryOperator *RHS =
- dyn_cast<BinaryOperator>(B->getRHS()->IgnoreParens());
+ const Expr *LHSExpr = B->getLHS()->IgnoreParens();
+ const Expr *RHSExpr = B->getRHS()->IgnoreParens();
+
+ auto CheckLogicalOpWithNegatedVariable = [this, B](const Expr *E1,
+ const Expr *E2) {
+ if (const auto *Negate = dyn_cast<UnaryOperator>(E1)) {
+ if (Negate->getOpcode() == UO_LNot &&
+ Expr::isSameComparisonOperand(Negate->getSubExpr(), E2)) {
+ bool AlwaysTrue = B->getOpcode() == BO_LOr;
+ if (BuildOpts.Observer)
+ BuildOpts.Observer->logicAlwaysTrue(B, AlwaysTrue);
+ return TryResult(AlwaysTrue);
+ }
+ }
+ return TryResult();
+ };
+
+ TryResult Result = CheckLogicalOpWithNegatedVariable(LHSExpr, RHSExpr);
+ if (Result.isKnown())
+ return Result;
+ Result = CheckLogicalOpWithNegatedVariable(RHSExpr, LHSExpr);
+ if (Result.isKnown())
+ return Result;
+
+ const auto *LHS = dyn_cast<BinaryOperator>(LHSExpr);
+ const auto *RHS = dyn_cast<BinaryOperator>(RHSExpr);
if (!LHS || !RHS)
return {};
@@ -1278,11 +1350,24 @@ private:
return {};
}
- bool hasTrivialDestructor(VarDecl *VD);
+ bool hasTrivialDestructor(const VarDecl *VD) const;
+ bool needsAutomaticDestruction(const VarDecl *VD) const;
};
} // namespace
+Expr *
+clang::extractElementInitializerFromNestedAILE(const ArrayInitLoopExpr *AILE) {
+ if (!AILE)
+ return nullptr;
+
+ Expr *AILEInit = AILE->getSubExpr();
+ while (const auto *E = dyn_cast<ArrayInitLoopExpr>(AILEInit))
+ AILEInit = E->getSubExpr();
+
+ return AILEInit;
+}
+
inline bool AddStmtChoice::alwaysAdd(CFGBuilder &builder,
const Stmt *stmt) const {
return builder.alwaysAdd(stmt) || kind == AlwaysAdd;
@@ -1476,7 +1561,6 @@ void CFGBuilder::cleanupConstructionContext(Expr *E) {
ConstructionContextMap.erase(E);
}
-
/// BuildCFG - Constructs a CFG from an AST (a Stmt*). The AST can represent an
/// arbitrary statement. Examples include a single expression or a function
/// body (compound statement). The ownership of the returned CFG is
@@ -1494,9 +1578,6 @@ std::unique_ptr<CFG> CFGBuilder::buildCFG(const Decl *D, Stmt *Statement) {
assert(Succ == &cfg->getExit());
Block = nullptr; // the EXIT block is empty. Create all other blocks lazily.
- assert(!(BuildOpts.AddImplicitDtors && BuildOpts.AddLifetime) &&
- "AddImplicitDtors and AddLifetime cannot be used at the same time");
-
if (BuildOpts.AddImplicitDtors)
if (const CXXDestructorDecl *DD = dyn_cast_or_null<CXXDestructorDecl>(D))
addImplicitDtorsForDestructor(DD);
@@ -1560,16 +1641,11 @@ std::unique_ptr<CFG> CFGBuilder::buildCFG(const Decl *D, Stmt *Statement) {
if (LI == LabelMap.end())
continue;
JumpTarget JT = LI->second;
- prependAutomaticObjLifetimeWithTerminator(B, I->scopePosition,
- JT.scopePosition);
- prependAutomaticObjDtorsWithTerminator(B, I->scopePosition,
- JT.scopePosition);
- const VarDecl *VD = prependAutomaticObjScopeEndWithTerminator(
- B, I->scopePosition, JT.scopePosition);
- appendScopeBegin(JT.block, VD, G);
- addSuccessor(B, JT.block);
- };
- if (auto *G = dyn_cast<GCCAsmStmt>(B->getTerminator())) {
+
+ CFGBlock *SuccBlk = createScopeChangesHandlingBlock(
+ I->scopePosition, B, JT.scopePosition, JT.block);
+ addSuccessor(B, SuccBlk);
+ } else if (auto *G = dyn_cast<GCCAsmStmt>(B->getTerminator())) {
CFGBlock *Successor = (I+1)->block;
for (auto *L : G->labels()) {
LabelMapTy::iterator LI = LabelMap.find(L->getLabel());
@@ -1612,7 +1688,7 @@ std::unique_ptr<CFG> CFGBuilder::buildCFG(const Decl *D, Stmt *Statement) {
}
/// createBlock - Used to lazily create blocks that are connected
-/// to the current (global) succcessor.
+/// to the current (global) successor.
CFGBlock *CFGBuilder::createBlock(bool add_successor) {
CFGBlock *B = cfg->createBlock();
if (add_successor && Succ)
@@ -1655,9 +1731,14 @@ CFGBlock *CFGBuilder::addInitializer(CXXCtorInitializer *I) {
appendInitializer(Block, I);
if (Init) {
+ // If the initializer is an ArrayInitLoopExpr, we want to extract the
+ // initializer, that's used for each element.
+ auto *AILEInit = extractElementInitializerFromNestedAILE(
+ dyn_cast<ArrayInitLoopExpr>(Init));
+
findConstructionContexts(
ConstructionContextLayer::create(cfg->getBumpVectorContext(), I),
- Init);
+ AILEInit ? AILEInit : Init);
if (HasTemporaries) {
// For expression with temporaries go directly to subexpression to omit
@@ -1731,153 +1812,198 @@ void CFGBuilder::addLoopExit(const Stmt *LoopStmt){
appendLoopExit(Block, LoopStmt);
}
-void CFGBuilder::getDeclsWithEndedScope(LocalScope::const_iterator B,
- LocalScope::const_iterator E, Stmt *S) {
- if (!BuildOpts.AddScopes)
+/// Adds the CFG elements for leaving the scope of automatic objects in
+/// range [B, E). This include following:
+/// * AutomaticObjectDtor for variables with non-trivial destructor
+/// * LifetimeEnds for all variables
+/// * ScopeEnd for each scope left
+void CFGBuilder::addAutomaticObjHandling(LocalScope::const_iterator B,
+ LocalScope::const_iterator E,
+ Stmt *S) {
+ if (!BuildOpts.AddScopes && !BuildOpts.AddImplicitDtors &&
+ !BuildOpts.AddLifetime)
return;
if (B == E)
return;
- // To go from B to E, one first goes up the scopes from B to P
- // then sideways in one scope from P to P' and then down
- // the scopes from P' to E.
- // The lifetime of all objects between B and P end.
- LocalScope::const_iterator P = B.shared_parent(E);
- int Dist = B.distance(P);
- if (Dist <= 0)
+ // Not leaving the scope, only need to handle destruction and lifetime
+ if (B.inSameLocalScope(E)) {
+ addAutomaticObjDestruction(B, E, S);
return;
+ }
- for (LocalScope::const_iterator I = B; I != P; ++I)
- if (I.pointsToFirstDeclaredVar())
- DeclsWithEndedScope.insert(*I);
-}
+ // Extract information about all local scopes that are left
+ SmallVector<LocalScope::const_iterator, 10> LocalScopeEndMarkers;
+ LocalScopeEndMarkers.push_back(B);
+ for (LocalScope::const_iterator I = B; I != E; ++I) {
+ if (!I.inSameLocalScope(LocalScopeEndMarkers.back()))
+ LocalScopeEndMarkers.push_back(I);
+ }
+ LocalScopeEndMarkers.push_back(E);
+
+ // We need to leave the scope in reverse order, so we reverse the end
+ // markers
+ std::reverse(LocalScopeEndMarkers.begin(), LocalScopeEndMarkers.end());
+ auto Pairwise =
+ llvm::zip(LocalScopeEndMarkers, llvm::drop_begin(LocalScopeEndMarkers));
+ for (auto [E, B] : Pairwise) {
+ if (!B.inSameLocalScope(E))
+ addScopeExitHandling(B, E, S);
+ addAutomaticObjDestruction(B, E, S);
+ }
+}
+
+/// Add CFG elements corresponding to call destructor and end of lifetime
+/// of all automatic variables with non-trivial destructor in range [B, E).
+/// This include AutomaticObjectDtor and LifetimeEnds elements.
+void CFGBuilder::addAutomaticObjDestruction(LocalScope::const_iterator B,
+ LocalScope::const_iterator E,
+ Stmt *S) {
+ if (!BuildOpts.AddImplicitDtors && !BuildOpts.AddLifetime)
+ return;
-void CFGBuilder::addAutomaticObjHandling(LocalScope::const_iterator B,
- LocalScope::const_iterator E,
- Stmt *S) {
- getDeclsWithEndedScope(B, E, S);
- if (BuildOpts.AddScopes)
- addScopesEnd(B, E, S);
- if (BuildOpts.AddImplicitDtors)
- addAutomaticObjDtors(B, E, S);
- if (BuildOpts.AddLifetime)
- addLifetimeEnds(B, E, S);
+ if (B == E)
+ return;
+
+ SmallVector<VarDecl *, 10> DeclsNeedDestruction;
+ DeclsNeedDestruction.reserve(B.distance(E));
+
+ for (VarDecl* D : llvm::make_range(B, E))
+ if (needsAutomaticDestruction(D))
+ DeclsNeedDestruction.push_back(D);
+
+ for (VarDecl *VD : llvm::reverse(DeclsNeedDestruction)) {
+ if (BuildOpts.AddImplicitDtors) {
+ // If this destructor is marked as a no-return destructor, we need to
+ // create a new block for the destructor which does not have as a
+ // successor anything built thus far: control won't flow out of this
+ // block.
+ QualType Ty = VD->getType();
+ if (Ty->isReferenceType())
+ Ty = getReferenceInitTemporaryType(VD->getInit());
+ Ty = Context->getBaseElementType(Ty);
+
+ const CXXRecordDecl *CRD = Ty->getAsCXXRecordDecl();
+ if (CRD && CRD->isAnyDestructorNoReturn())
+ Block = createNoReturnBlock();
+ }
+
+ autoCreateBlock();
+
+ // Add LifetimeEnd after automatic obj with non-trivial destructors,
+ // as they end their lifetime when the destructor returns. For trivial
+ // objects, we end lifetime with scope end.
+ if (BuildOpts.AddLifetime)
+ appendLifetimeEnds(Block, VD, S);
+ if (BuildOpts.AddImplicitDtors && !hasTrivialDestructor(VD))
+ appendAutomaticObjDtor(Block, VD, S);
+ if (VD->hasAttr<CleanupAttr>())
+ appendCleanupFunction(Block, VD);
+ }
}
-/// Add to current block automatic objects that leave the scope.
-void CFGBuilder::addLifetimeEnds(LocalScope::const_iterator B,
- LocalScope::const_iterator E, Stmt *S) {
- if (!BuildOpts.AddLifetime)
+/// Add CFG elements corresponding to leaving a scope.
+/// Assumes that range [B, E) corresponds to single scope.
+/// This add following elements:
+/// * LifetimeEnds for all variables with non-trivial destructor
+/// * ScopeEnd for each scope left
+void CFGBuilder::addScopeExitHandling(LocalScope::const_iterator B,
+ LocalScope::const_iterator E, Stmt *S) {
+ assert(!B.inSameLocalScope(E));
+ if (!BuildOpts.AddLifetime && !BuildOpts.AddScopes)
return;
- if (B == E)
- return;
+ if (BuildOpts.AddScopes) {
+ autoCreateBlock();
+ appendScopeEnd(Block, B.getFirstVarInScope(), S);
+ }
- // To go from B to E, one first goes up the scopes from B to P
- // then sideways in one scope from P to P' and then down
- // the scopes from P' to E.
- // The lifetime of all objects between B and P end.
- LocalScope::const_iterator P = B.shared_parent(E);
- int dist = B.distance(P);
- if (dist <= 0)
+ if (!BuildOpts.AddLifetime)
return;
// We need to perform the scope leaving in reverse order
SmallVector<VarDecl *, 10> DeclsTrivial;
- SmallVector<VarDecl *, 10> DeclsNonTrivial;
- DeclsTrivial.reserve(dist);
- DeclsNonTrivial.reserve(dist);
+ DeclsTrivial.reserve(B.distance(E));
- for (LocalScope::const_iterator I = B; I != P; ++I)
- if (hasTrivialDestructor(*I))
- DeclsTrivial.push_back(*I);
- else
- DeclsNonTrivial.push_back(*I);
+ // Objects with trivial destructor ends their lifetime when their storage
+ // is destroyed, for automatic variables, this happens when the end of the
+ // scope is added.
+ for (VarDecl* D : llvm::make_range(B, E))
+ if (!needsAutomaticDestruction(D))
+ DeclsTrivial.push_back(D);
- autoCreateBlock();
- // object with trivial destructor end their lifetime last (when storage
- // duration ends)
- for (SmallVectorImpl<VarDecl *>::reverse_iterator I = DeclsTrivial.rbegin(),
- E = DeclsTrivial.rend();
- I != E; ++I)
- appendLifetimeEnds(Block, *I, S);
-
- for (SmallVectorImpl<VarDecl *>::reverse_iterator
- I = DeclsNonTrivial.rbegin(),
- E = DeclsNonTrivial.rend();
- I != E; ++I)
- appendLifetimeEnds(Block, *I, S);
-}
-
-/// Add to current block markers for ending scopes.
-void CFGBuilder::addScopesEnd(LocalScope::const_iterator B,
- LocalScope::const_iterator E, Stmt *S) {
- // If implicit destructors are enabled, we'll add scope ends in
- // addAutomaticObjDtors.
- if (BuildOpts.AddImplicitDtors)
+ if (DeclsTrivial.empty())
return;
autoCreateBlock();
-
- for (auto I = DeclsWithEndedScope.rbegin(), E = DeclsWithEndedScope.rend();
- I != E; ++I)
- appendScopeEnd(Block, *I, S);
-
- return;
+ for (VarDecl *VD : llvm::reverse(DeclsTrivial))
+ appendLifetimeEnds(Block, VD, S);
}
-/// addAutomaticObjDtors - Add to current block automatic objects destructors
-/// for objects in range of local scope positions. Use S as trigger statement
-/// for destructors.
-void CFGBuilder::addAutomaticObjDtors(LocalScope::const_iterator B,
- LocalScope::const_iterator E, Stmt *S) {
- if (!BuildOpts.AddImplicitDtors)
+/// addScopeChangesHandling - appends information about destruction, lifetime
+/// and cfgScopeEnd for variables in the scope that was left by the jump, and
+/// appends cfgScopeBegin for all scopes that where entered.
+/// We insert the cfgScopeBegin at the end of the jump node, as depending on
+/// the sourceBlock, each goto, may enter different amount of scopes.
+void CFGBuilder::addScopeChangesHandling(LocalScope::const_iterator SrcPos,
+ LocalScope::const_iterator DstPos,
+ Stmt *S) {
+ assert(Block && "Source block should be always crated");
+ if (!BuildOpts.AddImplicitDtors && !BuildOpts.AddLifetime &&
+ !BuildOpts.AddScopes) {
return;
+ }
- if (B == E)
+ if (SrcPos == DstPos)
return;
- // We need to append the destructors in reverse order, but any one of them
- // may be a no-return destructor which changes the CFG. As a result, buffer
- // this sequence up and replay them in reverse order when appending onto the
- // CFGBlock(s).
- SmallVector<VarDecl*, 10> Decls;
- Decls.reserve(B.distance(E));
- for (LocalScope::const_iterator I = B; I != E; ++I)
- Decls.push_back(*I);
-
- for (SmallVectorImpl<VarDecl*>::reverse_iterator I = Decls.rbegin(),
- E = Decls.rend();
- I != E; ++I) {
- if (hasTrivialDestructor(*I)) {
- // If AddScopes is enabled and *I is a first variable in a scope, add a
- // ScopeEnd marker in a Block.
- if (BuildOpts.AddScopes && DeclsWithEndedScope.count(*I)) {
- autoCreateBlock();
- appendScopeEnd(Block, *I, S);
- }
- continue;
- }
- // If this destructor is marked as a no-return destructor, we need to
- // create a new block for the destructor which does not have as a successor
- // anything built thus far: control won't flow out of this block.
- QualType Ty = (*I)->getType();
- if (Ty->isReferenceType()) {
- Ty = getReferenceInitTemporaryType((*I)->getInit());
- }
- Ty = Context->getBaseElementType(Ty);
-
- if (Ty->getAsCXXRecordDecl()->isAnyDestructorNoReturn())
- Block = createNoReturnBlock();
- else
- autoCreateBlock();
+ // Get common scope, the jump leaves all scopes [SrcPos, BasePos), and
+ // enter all scopes between [DstPos, BasePos)
+ LocalScope::const_iterator BasePos = SrcPos.shared_parent(DstPos);
- // Add ScopeEnd just after automatic obj destructor.
- if (BuildOpts.AddScopes && DeclsWithEndedScope.count(*I))
- appendScopeEnd(Block, *I, S);
- appendAutomaticObjDtor(Block, *I, S);
+ // Append scope begins for scopes entered by goto
+ if (BuildOpts.AddScopes && !DstPos.inSameLocalScope(BasePos)) {
+ for (LocalScope::const_iterator I = DstPos; I != BasePos; ++I)
+ if (I.pointsToFirstDeclaredVar())
+ appendScopeBegin(Block, *I, S);
}
+
+ // Append scopeEnds, destructor and lifetime with the terminator for
+ // block left by goto.
+ addAutomaticObjHandling(SrcPos, BasePos, S);
+}
+
+/// createScopeChangesHandlingBlock - Creates a block with cfgElements
+/// corresponding to changing the scope from the source scope of the GotoStmt,
+/// to destination scope. Add destructor, lifetime and cfgScopeEnd
+/// CFGElements to newly created CFGBlock, that will have the CFG terminator
+/// transferred.
+CFGBlock *CFGBuilder::createScopeChangesHandlingBlock(
+ LocalScope::const_iterator SrcPos, CFGBlock *SrcBlk,
+ LocalScope::const_iterator DstPos, CFGBlock *DstBlk) {
+ if (SrcPos == DstPos)
+ return DstBlk;
+
+ if (!BuildOpts.AddImplicitDtors && !BuildOpts.AddLifetime &&
+ (!BuildOpts.AddScopes || SrcPos.inSameLocalScope(DstPos)))
+ return DstBlk;
+
+ // We will update CFBBuilder when creating new block, restore the
+ // previous state at exit.
+ SaveAndRestore save_Block(Block), save_Succ(Succ);
+
+ // Create a new block, and transfer terminator
+ Block = createBlock(false);
+ Block->setTerminator(SrcBlk->getTerminator());
+ SrcBlk->setTerminator(CFGTerminator());
+ addSuccessor(Block, DstBlk);
+
+ // Fill the created Block with the required elements.
+ addScopeChangesHandling(SrcPos, DstPos, Block->getTerminatorStmt());
+
+ assert(Block && "There should be at least one scope changing Block");
+ return Block;
}
/// addImplicitDtorsForDestructor - Add implicit destructors generated for
@@ -1893,7 +2019,7 @@ void CFGBuilder::addImplicitDtorsForDestructor(const CXXDestructorDecl *DD) {
// (which is different from the current class) is responsible for
// destroying them.
const CXXRecordDecl *CD = VI.getType()->getAsCXXRecordDecl();
- if (!CD->hasTrivialDestructor()) {
+ if (CD && !CD->hasTrivialDestructor()) {
autoCreateBlock();
appendBaseDtor(Block, &VI);
}
@@ -1903,7 +2029,7 @@ void CFGBuilder::addImplicitDtorsForDestructor(const CXXDestructorDecl *DD) {
for (const auto &BI : RD->bases()) {
if (!BI.isVirtual()) {
const CXXRecordDecl *CD = BI.getType()->getAsCXXRecordDecl();
- if (!CD->hasTrivialDestructor()) {
+ if (CD && !CD->hasTrivialDestructor()) {
autoCreateBlock();
appendBaseDtor(Block, &BI);
}
@@ -1914,9 +2040,10 @@ void CFGBuilder::addImplicitDtorsForDestructor(const CXXDestructorDecl *DD) {
for (auto *FI : RD->fields()) {
// Check for constant size array. Set type to array element type.
QualType QT = FI->getType();
- if (const ConstantArrayType *AT = Context->getAsConstantArrayType(QT)) {
+ // It may be a multidimensional array.
+ while (const ConstantArrayType *AT = Context->getAsConstantArrayType(QT)) {
if (AT->getSize() == 0)
- continue;
+ break;
QT = AT->getElementType();
}
@@ -1934,8 +2061,7 @@ LocalScope* CFGBuilder::createOrReuseLocalScope(LocalScope* Scope) {
if (Scope)
return Scope;
llvm::BumpPtrAllocator &alloc = cfg->getAllocator();
- return new (alloc.Allocate<LocalScope>())
- LocalScope(BumpVectorContext(alloc), ScopePos);
+ return new (alloc) LocalScope(BumpVectorContext(alloc), ScopePos);
}
/// addLocalScopeForStmt - Add LocalScope to local scopes tree for statement
@@ -1977,7 +2103,11 @@ LocalScope* CFGBuilder::addLocalScopeForDeclStmt(DeclStmt *DS,
return Scope;
}
-bool CFGBuilder::hasTrivialDestructor(VarDecl *VD) {
+bool CFGBuilder::needsAutomaticDestruction(const VarDecl *VD) const {
+ return !hasTrivialDestructor(VD) || VD->hasAttr<CleanupAttr>();
+}
+
+bool CFGBuilder::hasTrivialDestructor(const VarDecl *VD) const {
// Check for const references bound to temporary. Set type to pointee.
QualType QT = VD->getType();
if (QT->isReferenceType()) {
@@ -2022,32 +2152,20 @@ bool CFGBuilder::hasTrivialDestructor(VarDecl *VD) {
/// const reference. Will reuse Scope if not NULL.
LocalScope* CFGBuilder::addLocalScopeForVarDecl(VarDecl *VD,
LocalScope* Scope) {
- assert(!(BuildOpts.AddImplicitDtors && BuildOpts.AddLifetime) &&
- "AddImplicitDtors and AddLifetime cannot be used at the same time");
if (!BuildOpts.AddImplicitDtors && !BuildOpts.AddLifetime &&
!BuildOpts.AddScopes)
return Scope;
// Check if variable is local.
- switch (VD->getStorageClass()) {
- case SC_None:
- case SC_Auto:
- case SC_Register:
- break;
- default: return Scope;
- }
+ if (!VD->hasLocalStorage())
+ return Scope;
- if (BuildOpts.AddImplicitDtors) {
- if (!hasTrivialDestructor(VD) || BuildOpts.AddScopes) {
- // Add the variable to scope
- Scope = createOrReuseLocalScope(Scope);
- Scope->addVar(VD);
- ScopePos = Scope->begin();
- }
+ if (!BuildOpts.AddLifetime && !BuildOpts.AddScopes &&
+ !needsAutomaticDestruction(VD)) {
+ assert(BuildOpts.AddImplicitDtors);
return Scope;
}
- assert(BuildOpts.AddLifetime);
// Add the variable to scope
Scope = createOrReuseLocalScope(Scope);
Scope->addVar(VD);
@@ -2063,63 +2181,6 @@ void CFGBuilder::addLocalScopeAndDtors(Stmt *S) {
addAutomaticObjHandling(ScopePos, scopeBeginPos, S);
}
-/// prependAutomaticObjDtorsWithTerminator - Prepend destructor CFGElements for
-/// variables with automatic storage duration to CFGBlock's elements vector.
-/// Elements will be prepended to physical beginning of the vector which
-/// happens to be logical end. Use blocks terminator as statement that specifies
-/// destructors call site.
-/// FIXME: This mechanism for adding automatic destructors doesn't handle
-/// no-return destructors properly.
-void CFGBuilder::prependAutomaticObjDtorsWithTerminator(CFGBlock *Blk,
- LocalScope::const_iterator B, LocalScope::const_iterator E) {
- if (!BuildOpts.AddImplicitDtors)
- return;
- BumpVectorContext &C = cfg->getBumpVectorContext();
- CFGBlock::iterator InsertPos
- = Blk->beginAutomaticObjDtorsInsert(Blk->end(), B.distance(E), C);
- for (LocalScope::const_iterator I = B; I != E; ++I)
- InsertPos = Blk->insertAutomaticObjDtor(InsertPos, *I,
- Blk->getTerminatorStmt());
-}
-
-/// prependAutomaticObjLifetimeWithTerminator - Prepend lifetime CFGElements for
-/// variables with automatic storage duration to CFGBlock's elements vector.
-/// Elements will be prepended to physical beginning of the vector which
-/// happens to be logical end. Use blocks terminator as statement that specifies
-/// where lifetime ends.
-void CFGBuilder::prependAutomaticObjLifetimeWithTerminator(
- CFGBlock *Blk, LocalScope::const_iterator B, LocalScope::const_iterator E) {
- if (!BuildOpts.AddLifetime)
- return;
- BumpVectorContext &C = cfg->getBumpVectorContext();
- CFGBlock::iterator InsertPos =
- Blk->beginLifetimeEndsInsert(Blk->end(), B.distance(E), C);
- for (LocalScope::const_iterator I = B; I != E; ++I) {
- InsertPos =
- Blk->insertLifetimeEnds(InsertPos, *I, Blk->getTerminatorStmt());
- }
-}
-
-/// prependAutomaticObjScopeEndWithTerminator - Prepend scope end CFGElements for
-/// variables with automatic storage duration to CFGBlock's elements vector.
-/// Elements will be prepended to physical beginning of the vector which
-/// happens to be logical end. Use blocks terminator as statement that specifies
-/// where scope ends.
-const VarDecl *
-CFGBuilder::prependAutomaticObjScopeEndWithTerminator(
- CFGBlock *Blk, LocalScope::const_iterator B, LocalScope::const_iterator E) {
- if (!BuildOpts.AddScopes)
- return nullptr;
- BumpVectorContext &C = cfg->getBumpVectorContext();
- CFGBlock::iterator InsertPos =
- Blk->beginScopeEndInsert(Blk->end(), 1, C);
- LocalScope::const_iterator PlaceToInsert = B;
- for (LocalScope::const_iterator I = B; I != E; ++I)
- PlaceToInsert = I;
- Blk->insertScopeEnd(InsertPos, *PlaceToInsert, Blk->getTerminatorStmt());
- return *PlaceToInsert;
-}
-
/// Visit - Walk the subtree of a statement and add extra
/// blocks for ternary operators, &&, and ||. We also process "," and
/// DeclStmts (which may contain nested control-flow).
@@ -2149,6 +2210,9 @@ CFGBlock *CFGBuilder::Visit(Stmt * S, AddStmtChoice asc,
case Stmt::InitListExprClass:
return VisitInitListExpr(cast<InitListExpr>(S), asc);
+ case Stmt::AttributedStmtClass:
+ return VisitAttributedStmt(cast<AttributedStmt>(S), asc);
+
case Stmt::AddrLabelExprClass:
return VisitAddrLabelExpr(cast<AddrLabelExpr>(S), asc);
@@ -2197,8 +2261,7 @@ CFGBlock *CFGBuilder::Visit(Stmt * S, AddStmtChoice asc,
// FIXME: The expression inside a CXXDefaultArgExpr is owned by the
// called function's declaration, not by the caller. If we simply add
// this expression to the CFG, we could end up with the same Expr
- // appearing multiple times.
- // PR13385 / <rdar://problem/12156507>
+ // appearing multiple times (PR13385).
//
// It's likewise possible for multiple CXXDefaultInitExprs for the same
// expression to be used in the same function (through aggregate
@@ -2229,6 +2292,9 @@ CFGBlock *CFGBuilder::Visit(Stmt * S, AddStmtChoice asc,
case Stmt::CXXTryStmtClass:
return VisitCXXTryStmt(cast<CXXTryStmt>(S));
+ case Stmt::CXXTypeidExprClass:
+ return VisitCXXTypeidExpr(cast<CXXTypeidExpr>(S), asc);
+
case Stmt::CXXForRangeStmtClass:
return VisitCXXForRangeStmt(cast<CXXForRangeStmt>(S));
@@ -2282,7 +2348,7 @@ CFGBlock *CFGBuilder::Visit(Stmt * S, AddStmtChoice asc,
return VisitObjCAtCatchStmt(cast<ObjCAtCatchStmt>(S));
case Stmt::ObjCAutoreleasePoolStmtClass:
- return VisitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(S));
+ return VisitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(S));
case Stmt::ObjCAtSynchronizedStmtClass:
return VisitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(S));
@@ -2309,6 +2375,10 @@ CFGBlock *CFGBuilder::Visit(Stmt * S, AddStmtChoice asc,
case Stmt::CoreturnStmtClass:
return VisitReturnStmt(S);
+ case Stmt::CoyieldExprClass:
+ case Stmt::CoawaitExprClass:
+ return VisitCoroutineSuspendExpr(cast<CoroutineSuspendExpr>(S), asc);
+
case Stmt::SEHExceptStmtClass:
return VisitSEHExceptStmt(cast<SEHExceptStmt>(S));
@@ -2336,6 +2406,9 @@ CFGBlock *CFGBuilder::Visit(Stmt * S, AddStmtChoice asc,
case Stmt::WhileStmtClass:
return VisitWhileStmt(cast<WhileStmt>(S));
+
+ case Stmt::ArrayInitLoopExprClass:
+ return VisitArrayInitLoopExpr(cast<ArrayInitLoopExpr>(S), asc);
}
}
@@ -2398,8 +2471,32 @@ CFGBlock *CFGBuilder::VisitAddrLabelExpr(AddrLabelExpr *A,
return Block;
}
-CFGBlock *CFGBuilder::VisitUnaryOperator(UnaryOperator *U,
- AddStmtChoice asc) {
+static bool isFallthroughStatement(const AttributedStmt *A) {
+ bool isFallthrough = hasSpecificAttr<FallThroughAttr>(A->getAttrs());
+ assert((!isFallthrough || isa<NullStmt>(A->getSubStmt())) &&
+ "expected fallthrough not to have children");
+ return isFallthrough;
+}
+
+CFGBlock *CFGBuilder::VisitAttributedStmt(AttributedStmt *A,
+ AddStmtChoice asc) {
+ // AttributedStmts for [[likely]] can have arbitrary statements as children,
+ // and the current visitation order here would add the AttributedStmts
+ // for [[likely]] after the child nodes, which is undesirable: For example,
+ // if the child contains an unconditional return, the [[likely]] would be
+ // considered unreachable.
+ // So only add the AttributedStmt for FallThrough, which has CFG effects and
+ // also no children, and omit the others. None of the other current StmtAttrs
+ // have semantic meaning for the CFG.
+ if (isFallthroughStatement(A) && asc.alwaysAdd(*this, A)) {
+ autoCreateBlock();
+ appendStmt(Block, A);
+ }
+
+ return VisitChildren(A);
+}
+
+CFGBlock *CFGBuilder::VisitUnaryOperator(UnaryOperator *U, AddStmtChoice asc) {
if (asc.alwaysAdd(*this, U)) {
autoCreateBlock();
appendStmt(Block, U);
@@ -2711,7 +2808,8 @@ CFGBlock *CFGBuilder::VisitChooseExpr(ChooseExpr *C,
return addStmt(C->getCond());
}
-CFGBlock *CFGBuilder::VisitCompoundStmt(CompoundStmt *C, bool ExternallyDestructed) {
+CFGBlock *CFGBuilder::VisitCompoundStmt(CompoundStmt *C,
+ bool ExternallyDestructed) {
LocalScope::const_iterator scopeBeginPos = ScopePos;
addLocalScopeForStmt(C);
@@ -2723,11 +2821,10 @@ CFGBlock *CFGBuilder::VisitCompoundStmt(CompoundStmt *C, bool ExternallyDestruct
CFGBlock *LastBlock = Block;
- for (CompoundStmt::reverse_body_iterator I=C->body_rbegin(), E=C->body_rend();
- I != E; ++I ) {
+ for (Stmt *S : llvm::reverse(C->body())) {
// If we hit a segment of code just containing ';' (NullStmts), we can
// get a null block back. In such cases, just use the LastBlock
- CFGBlock *newBlock = Visit(*I, AddStmtChoice::AlwaysAdd,
+ CFGBlock *newBlock = Visit(S, AddStmtChoice::AlwaysAdd,
ExternallyDestructed);
if (newBlock)
@@ -2902,12 +2999,30 @@ CFGBlock *CFGBuilder::VisitDeclSubExpr(DeclStmt *DS) {
}
}
+ // If we bind to a tuple-like type, we iterate over the HoldingVars, and
+ // create a DeclStmt for each of them.
+ if (const auto *DD = dyn_cast<DecompositionDecl>(VD)) {
+ for (auto *BD : llvm::reverse(DD->bindings())) {
+ if (auto *VD = BD->getHoldingVar()) {
+ DeclGroupRef DG(VD);
+ DeclStmt *DSNew =
+ new (Context) DeclStmt(DG, VD->getLocation(), GetEndLoc(VD));
+ cfg->addSyntheticDeclStmt(DSNew, DS);
+ Block = VisitDeclSubExpr(DSNew);
+ }
+ }
+ }
+
autoCreateBlock();
appendStmt(Block, DS);
+ // If the initializer is an ArrayInitLoopExpr, we want to extract the
+ // initializer, that's used for each element.
+ const auto *AILE = dyn_cast_or_null<ArrayInitLoopExpr>(Init);
+
findConstructionContexts(
ConstructionContextLayer::create(cfg->getBumpVectorContext(), DS),
- Init);
+ AILE ? AILE->getSubExpr() : Init);
// Keep track of the last non-null block, as 'Block' can be nulled out
// if the initializer expression is something like a 'while' in a
@@ -2966,7 +3081,7 @@ CFGBlock *CFGBuilder::VisitIfStmt(IfStmt *I) {
// Save local scope position because in case of condition variable ScopePos
// won't be restored when traversing AST.
- SaveAndRestore<LocalScope::const_iterator> save_scope_pos(ScopePos);
+ SaveAndRestore save_scope_pos(ScopePos);
// Create local scope for C++17 if init-stmt if one exists.
if (Stmt *Init = I->getInit())
@@ -2991,7 +3106,7 @@ CFGBlock *CFGBuilder::VisitIfStmt(IfStmt *I) {
CFGBlock *ElseBlock = Succ;
if (Stmt *Else = I->getElse()) {
- SaveAndRestore<CFGBlock*> sv(Succ);
+ SaveAndRestore sv(Succ);
// NULL out Block so that the recursive call to Visit will
// create a new basic block.
@@ -3017,7 +3132,7 @@ CFGBlock *CFGBuilder::VisitIfStmt(IfStmt *I) {
{
Stmt *Then = I->getThen();
assert(Then);
- SaveAndRestore<CFGBlock*> sv(Succ);
+ SaveAndRestore sv(Succ);
Block = nullptr;
// If branch is not a compound statement create implicit scope
@@ -3047,7 +3162,7 @@ CFGBlock *CFGBuilder::VisitIfStmt(IfStmt *I) {
// control-flow transfer of '&&' or '||' go directly into the then/else
// blocks directly.
BinaryOperator *Cond =
- I->getConditionVariable()
+ (I->isConsteval() || I->getConditionVariable())
? nullptr
: dyn_cast<BinaryOperator>(I->getCond()->IgnoreParens());
CFGBlock *LastBlock;
@@ -3061,7 +3176,9 @@ CFGBlock *CFGBuilder::VisitIfStmt(IfStmt *I) {
Block->setTerminator(I);
// See if this is a known constant.
- const TryResult &KnownVal = tryEvaluateBool(I->getCond());
+ TryResult KnownVal;
+ if (!I->isConsteval())
+ KnownVal = tryEvaluateBool(I->getCond());
// Add the successors. If we know that specific branches are
// unreachable, inform addSuccessor() of that knowledge.
@@ -3122,9 +3239,41 @@ CFGBlock *CFGBuilder::VisitReturnStmt(Stmt *S) {
if (Expr *O = RS->getRetValue())
return Visit(O, AddStmtChoice::AlwaysAdd, /*ExternallyDestructed=*/true);
return Block;
- } else { // co_return
- return VisitChildren(S);
}
+
+ CoreturnStmt *CRS = cast<CoreturnStmt>(S);
+ auto *B = Block;
+ if (CFGBlock *R = Visit(CRS->getPromiseCall()))
+ B = R;
+
+ if (Expr *RV = CRS->getOperand())
+ if (RV->getType()->isVoidType() && !isa<InitListExpr>(RV))
+ // A non-initlist void expression.
+ if (CFGBlock *R = Visit(RV))
+ B = R;
+
+ return B;
+}
+
+CFGBlock *CFGBuilder::VisitCoroutineSuspendExpr(CoroutineSuspendExpr *E,
+ AddStmtChoice asc) {
+ // We're modelling the pre-coro-xform CFG. Thus just evalate the various
+ // active components of the co_await or co_yield. Note we do not model the
+ // edge from the builtin_suspend to the exit node.
+ if (asc.alwaysAdd(*this, E)) {
+ autoCreateBlock();
+ appendStmt(Block, E);
+ }
+ CFGBlock *B = Block;
+ if (auto *R = Visit(E->getResumeExpr()))
+ B = R;
+ if (auto *R = Visit(E->getSuspendExpr()))
+ B = R;
+ if (auto *R = Visit(E->getReadyExpr()))
+ B = R;
+ if (auto *R = Visit(E->getCommonExpr()))
+ B = R;
+ return B;
}
CFGBlock *CFGBuilder::VisitSEHExceptStmt(SEHExceptStmt *ES) {
@@ -3133,7 +3282,7 @@ CFGBlock *CFGBuilder::VisitSEHExceptStmt(SEHExceptStmt *ES) {
// Save local scope position because in case of exception variable ScopePos
// won't be restored when traversing AST.
- SaveAndRestore<LocalScope::const_iterator> save_scope_pos(ScopePos);
+ SaveAndRestore save_scope_pos(ScopePos);
addStmt(ES->getBlock());
CFGBlock *SEHExceptBlock = Block;
@@ -3223,14 +3372,13 @@ CFGBlock *CFGBuilder::VisitSEHTryStmt(SEHTryStmt *Terminator) {
Succ = SEHTrySuccessor;
// Save the current "__try" context.
- SaveAndRestore<CFGBlock *> save_try(TryTerminatedBlock,
- NewTryTerminatedBlock);
+ SaveAndRestore SaveTry(TryTerminatedBlock, NewTryTerminatedBlock);
cfg->addTryDispatchBlock(TryTerminatedBlock);
// Save the current value for the __leave target.
// All __leaves should go to the code following the __try
// (FIXME: or if the __try has a __finally, to the __finally.)
- SaveAndRestore<JumpTarget> save_break(SEHLeaveJumpTarget);
+ SaveAndRestore save_break(SEHLeaveJumpTarget);
SEHLeaveJumpTarget = JumpTarget(SEHTrySuccessor, ScopePos);
assert(Terminator->getTryBlock() && "__try must contain a non-NULL body");
@@ -3246,8 +3394,7 @@ CFGBlock *CFGBuilder::VisitLabelStmt(LabelStmt *L) {
if (!LabelBlock) // This can happen when the body is empty, i.e.
LabelBlock = createBlock(); // scopes that only contains NullStmts.
- assert(LabelMap.find(L->getDecl()) == LabelMap.end() &&
- "label already in map");
+ assert(!LabelMap.contains(L->getDecl()) && "label already in map");
LabelMap[L->getDecl()] = JumpTarget(LabelBlock, ScopePos);
// Labels partition blocks, so this is the end of the basic block we were
@@ -3258,7 +3405,7 @@ CFGBlock *CFGBuilder::VisitLabelStmt(LabelStmt *L) {
if (badCFG)
return nullptr;
- // We set Block to NULL to allow lazy creation of a new block (if necessary);
+ // We set Block to NULL to allow lazy creation of a new block (if necessary).
Block = nullptr;
// This block is now the implicit successor of other blocks.
@@ -3281,9 +3428,21 @@ CFGBlock *CFGBuilder::VisitBlockExpr(BlockExpr *E, AddStmtChoice asc) {
CFGBlock *CFGBuilder::VisitLambdaExpr(LambdaExpr *E, AddStmtChoice asc) {
CFGBlock *LastBlock = VisitNoRecurse(E, asc);
+
+ unsigned Idx = 0;
for (LambdaExpr::capture_init_iterator it = E->capture_init_begin(),
- et = E->capture_init_end(); it != et; ++it) {
+ et = E->capture_init_end();
+ it != et; ++it, ++Idx) {
if (Expr *Init = *it) {
+ // If the initializer is an ArrayInitLoopExpr, we want to extract the
+ // initializer, that's used for each element.
+ auto *AILEInit = extractElementInitializerFromNestedAILE(
+ dyn_cast<ArrayInitLoopExpr>(Init));
+
+ findConstructionContexts(ConstructionContextLayer::create(
+ cfg->getBumpVectorContext(), {E, Idx}),
+ AILEInit ? AILEInit : Init);
+
CFGBlock *Tmp = Visit(Init);
if (Tmp)
LastBlock = Tmp;
@@ -3307,8 +3466,8 @@ CFGBlock *CFGBuilder::VisitGotoStmt(GotoStmt *G) {
BackpatchBlocks.push_back(JumpSource(Block, ScopePos));
else {
JumpTarget JT = I->second;
- addAutomaticObjHandling(ScopePos, JT.scopePosition, G);
addSuccessor(Block, JT.block);
+ addScopeChangesHandling(ScopePos, JT.scopePosition, G);
}
return Block;
@@ -3333,7 +3492,7 @@ CFGBlock *CFGBuilder::VisitGCCAsmStmt(GCCAsmStmt *G, AddStmtChoice asc) {
// Save "Succ" in BackpatchBlocks. In the backpatch processing, "Succ" is
// used to avoid adding "Succ" again.
BackpatchBlocks.push_back(JumpSource(Succ, ScopePos));
- return Block;
+ return VisitChildren(G);
}
CFGBlock *CFGBuilder::VisitForStmt(ForStmt *F) {
@@ -3341,7 +3500,7 @@ CFGBlock *CFGBuilder::VisitForStmt(ForStmt *F) {
// Save local scope position because in case of condition variable ScopePos
// won't be restored when traversing AST.
- SaveAndRestore<LocalScope::const_iterator> save_scope_pos(ScopePos);
+ SaveAndRestore save_scope_pos(ScopePos);
// Create local scope for init statement and possible condition variable.
// Add destructor for init statement and condition variable.
@@ -3369,7 +3528,7 @@ CFGBlock *CFGBuilder::VisitForStmt(ForStmt *F) {
// Save the current value for the break targets.
// All breaks should go to the code following the loop.
- SaveAndRestore<JumpTarget> save_break(BreakJumpTarget);
+ SaveAndRestore save_break(BreakJumpTarget);
BreakJumpTarget = JumpTarget(LoopSuccessor, ScopePos);
CFGBlock *BodyBlock = nullptr, *TransitionBlock = nullptr;
@@ -3379,8 +3538,8 @@ CFGBlock *CFGBuilder::VisitForStmt(ForStmt *F) {
assert(F->getBody());
// Save the current values for Block, Succ, continue and break targets.
- SaveAndRestore<CFGBlock*> save_Block(Block), save_Succ(Succ);
- SaveAndRestore<JumpTarget> save_continue(ContinueJumpTarget);
+ SaveAndRestore save_Block(Block), save_Succ(Succ);
+ SaveAndRestore save_continue(ContinueJumpTarget);
// Create an empty block to represent the transition block for looping back
// to the head of the loop. If we have increment code, it will
@@ -3388,6 +3547,11 @@ CFGBlock *CFGBuilder::VisitForStmt(ForStmt *F) {
Block = Succ = TransitionBlock = createBlock(false);
TransitionBlock->setLoopTarget(F);
+
+ // Loop iteration (after increment) should end with destructor of Condition
+ // variable (if any).
+ addAutomaticObjHandling(ScopePos, LoopBeginScopePos, F);
+
if (Stmt *I = F->getInc()) {
// Generate increment code in its own basic block. This is the target of
// continue statements.
@@ -3407,8 +3571,6 @@ CFGBlock *CFGBuilder::VisitForStmt(ForStmt *F) {
ContinueJumpTarget = JumpTarget(Succ, ContinueScopePos);
ContinueJumpTarget.block->setLoopTarget(F);
- // Loop body should end with destructor of Condition variable (if any).
- addAutomaticObjHandling(ScopePos, LoopBeginScopePos, F);
// If body is not a compound statement create implicit scope
// and add destructors.
@@ -3435,7 +3597,7 @@ CFGBlock *CFGBuilder::VisitForStmt(ForStmt *F) {
do {
Expr *C = F->getCond();
- SaveAndRestore<LocalScope::const_iterator> save_scope_pos(ScopePos);
+ SaveAndRestore save_scope_pos(ScopePos);
// Specially handle logical operators, which have a slightly
// more optimal CFG representation.
@@ -3501,7 +3663,7 @@ CFGBlock *CFGBuilder::VisitForStmt(ForStmt *F) {
// If the loop contains initialization, create a new block for those
// statements. This block can also contain statements that precede the loop.
if (Stmt *I = F->getInit()) {
- SaveAndRestore<LocalScope::const_iterator> save_scope_pos(ScopePos);
+ SaveAndRestore save_scope_pos(ScopePos);
ScopePos = LoopBeginScopePos;
Block = createBlock();
return addStmt(I);
@@ -3604,9 +3766,9 @@ CFGBlock *CFGBuilder::VisitObjCForCollectionStmt(ObjCForCollectionStmt *S) {
// Now create the true branch.
{
// Save the current values for Succ, continue and break targets.
- SaveAndRestore<CFGBlock*> save_Block(Block), save_Succ(Succ);
- SaveAndRestore<JumpTarget> save_continue(ContinueJumpTarget),
- save_break(BreakJumpTarget);
+ SaveAndRestore save_Block(Block), save_Succ(Succ);
+ SaveAndRestore save_continue(ContinueJumpTarget),
+ save_break(BreakJumpTarget);
// Add an intermediate block between the BodyBlock and the
// EntryConditionBlock to represent the "loop back" transition, for looping
@@ -3670,11 +3832,6 @@ CFGBlock *CFGBuilder::VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *S) {
return addStmt(S->getSynchExpr());
}
-CFGBlock *CFGBuilder::VisitObjCAtTryStmt(ObjCAtTryStmt *S) {
- // FIXME
- return NYS();
-}
-
CFGBlock *CFGBuilder::VisitPseudoObjectExpr(PseudoObjectExpr *E) {
autoCreateBlock();
@@ -3705,7 +3862,7 @@ CFGBlock *CFGBuilder::VisitWhileStmt(WhileStmt *W) {
// Save local scope position because in case of condition variable ScopePos
// won't be restored when traversing AST.
- SaveAndRestore<LocalScope::const_iterator> save_scope_pos(ScopePos);
+ SaveAndRestore save_scope_pos(ScopePos);
// Create local scope for possible condition variable.
// Store scope position for continue statement.
@@ -3734,9 +3891,9 @@ CFGBlock *CFGBuilder::VisitWhileStmt(WhileStmt *W) {
assert(W->getBody());
// Save the current values for Block, Succ, continue and break targets.
- SaveAndRestore<CFGBlock*> save_Block(Block), save_Succ(Succ);
- SaveAndRestore<JumpTarget> save_continue(ContinueJumpTarget),
- save_break(BreakJumpTarget);
+ SaveAndRestore save_Block(Block), save_Succ(Succ);
+ SaveAndRestore save_continue(ContinueJumpTarget),
+ save_break(BreakJumpTarget);
// Create an empty block to represent the transition block for looping back
// to the head of the loop.
@@ -3835,16 +3992,58 @@ CFGBlock *CFGBuilder::VisitWhileStmt(WhileStmt *W) {
return EntryConditionBlock;
}
-CFGBlock *CFGBuilder::VisitObjCAtCatchStmt(ObjCAtCatchStmt *S) {
- // FIXME: For now we pretend that @catch and the code it contains does not
- // exit.
- return Block;
+CFGBlock *CFGBuilder::VisitArrayInitLoopExpr(ArrayInitLoopExpr *A,
+ AddStmtChoice asc) {
+ if (asc.alwaysAdd(*this, A)) {
+ autoCreateBlock();
+ appendStmt(Block, A);
+ }
+
+ CFGBlock *B = Block;
+
+ if (CFGBlock *R = Visit(A->getSubExpr()))
+ B = R;
+
+ auto *OVE = dyn_cast<OpaqueValueExpr>(A->getCommonExpr());
+ assert(OVE && "ArrayInitLoopExpr->getCommonExpr() should be wrapped in an "
+ "OpaqueValueExpr!");
+ if (CFGBlock *R = Visit(OVE->getSourceExpr()))
+ B = R;
+
+ return B;
}
-CFGBlock *CFGBuilder::VisitObjCAtThrowStmt(ObjCAtThrowStmt *S) {
- // FIXME: This isn't complete. We basically treat @throw like a return
- // statement.
+CFGBlock *CFGBuilder::VisitObjCAtCatchStmt(ObjCAtCatchStmt *CS) {
+ // ObjCAtCatchStmt are treated like labels, so they are the first statement
+ // in a block.
+
+ // Save local scope position because in case of exception variable ScopePos
+ // won't be restored when traversing AST.
+ SaveAndRestore save_scope_pos(ScopePos);
+
+ if (CS->getCatchBody())
+ addStmt(CS->getCatchBody());
+
+ CFGBlock *CatchBlock = Block;
+ if (!CatchBlock)
+ CatchBlock = createBlock();
+ appendStmt(CatchBlock, CS);
+
+ // Also add the ObjCAtCatchStmt as a label, like with regular labels.
+ CatchBlock->setLabel(CS);
+
+ // Bail out if the CFG is bad.
+ if (badCFG)
+ return nullptr;
+
+ // We set Block to NULL to allow lazy creation of a new block (if necessary).
+ Block = nullptr;
+
+ return CatchBlock;
+}
+
+CFGBlock *CFGBuilder::VisitObjCAtThrowStmt(ObjCAtThrowStmt *S) {
// If we were in the middle of a block we stop processing that block.
if (badCFG)
return nullptr;
@@ -3852,14 +4051,77 @@ CFGBlock *CFGBuilder::VisitObjCAtThrowStmt(ObjCAtThrowStmt *S) {
// Create the new block.
Block = createBlock(false);
- // The Exit block is the only successor.
- addSuccessor(Block, &cfg->getExit());
+ if (TryTerminatedBlock)
+ // The current try statement is the only successor.
+ addSuccessor(Block, TryTerminatedBlock);
+ else
+ // otherwise the Exit block is the only successor.
+ addSuccessor(Block, &cfg->getExit());
// Add the statement to the block. This may create new blocks if S contains
// control-flow (short-circuit operations).
return VisitStmt(S, AddStmtChoice::AlwaysAdd);
}
+CFGBlock *CFGBuilder::VisitObjCAtTryStmt(ObjCAtTryStmt *Terminator) {
+ // "@try"/"@catch" is a control-flow statement. Thus we stop processing the
+ // current block.
+ CFGBlock *TrySuccessor = nullptr;
+
+ if (Block) {
+ if (badCFG)
+ return nullptr;
+ TrySuccessor = Block;
+ } else
+ TrySuccessor = Succ;
+
+ // FIXME: Implement @finally support.
+ if (Terminator->getFinallyStmt())
+ return NYS();
+
+ CFGBlock *PrevTryTerminatedBlock = TryTerminatedBlock;
+
+ // Create a new block that will contain the try statement.
+ CFGBlock *NewTryTerminatedBlock = createBlock(false);
+ // Add the terminator in the try block.
+ NewTryTerminatedBlock->setTerminator(Terminator);
+
+ bool HasCatchAll = false;
+ for (ObjCAtCatchStmt *CS : Terminator->catch_stmts()) {
+ // The code after the try is the implicit successor.
+ Succ = TrySuccessor;
+ if (CS->hasEllipsis()) {
+ HasCatchAll = true;
+ }
+ Block = nullptr;
+ CFGBlock *CatchBlock = VisitObjCAtCatchStmt(CS);
+ if (!CatchBlock)
+ return nullptr;
+ // Add this block to the list of successors for the block with the try
+ // statement.
+ addSuccessor(NewTryTerminatedBlock, CatchBlock);
+ }
+
+ // FIXME: This needs updating when @finally support is added.
+ if (!HasCatchAll) {
+ if (PrevTryTerminatedBlock)
+ addSuccessor(NewTryTerminatedBlock, PrevTryTerminatedBlock);
+ else
+ addSuccessor(NewTryTerminatedBlock, &cfg->getExit());
+ }
+
+ // The code after the try is the implicit successor.
+ Succ = TrySuccessor;
+
+ // Save the current "try" context.
+ SaveAndRestore SaveTry(TryTerminatedBlock, NewTryTerminatedBlock);
+ cfg->addTryDispatchBlock(TryTerminatedBlock);
+
+ assert(Terminator->getTryBody() && "try must contain a non-NULL body");
+ Block = nullptr;
+ return addStmt(Terminator->getTryBody());
+}
+
CFGBlock *CFGBuilder::VisitObjCMessageExpr(ObjCMessageExpr *ME,
AddStmtChoice asc) {
findConstructionContextsForArguments(ME);
@@ -3890,6 +4152,25 @@ CFGBlock *CFGBuilder::VisitCXXThrowExpr(CXXThrowExpr *T) {
return VisitStmt(T, AddStmtChoice::AlwaysAdd);
}
+CFGBlock *CFGBuilder::VisitCXXTypeidExpr(CXXTypeidExpr *S, AddStmtChoice asc) {
+ if (asc.alwaysAdd(*this, S)) {
+ autoCreateBlock();
+ appendStmt(Block, S);
+ }
+
+ // C++ [expr.typeid]p3:
+ // When typeid is applied to an expression other than an glvalue of a
+ // polymorphic class type [...] [the] expression is an unevaluated
+ // operand. [...]
+ // We add only potentially evaluated statements to the block to avoid
+ // CFG generation for unevaluated operands.
+ if (!S->isTypeDependent() && S->isPotentiallyEvaluated())
+ return VisitChildren(S);
+
+ // Return block without CFG for unevaluated operands.
+ return Block;
+}
+
CFGBlock *CFGBuilder::VisitDoStmt(DoStmt *D) {
CFGBlock *LoopSuccessor = nullptr;
@@ -3936,8 +4217,8 @@ CFGBlock *CFGBuilder::VisitDoStmt(DoStmt *D) {
assert(D->getBody());
// Save the current values for Block, Succ, and continue and break targets
- SaveAndRestore<CFGBlock*> save_Block(Block), save_Succ(Succ);
- SaveAndRestore<JumpTarget> save_continue(ContinueJumpTarget),
+ SaveAndRestore save_Block(Block), save_Succ(Succ);
+ SaveAndRestore save_continue(ContinueJumpTarget),
save_break(BreakJumpTarget);
// All continues within this loop should go to the condition block
@@ -4055,7 +4336,7 @@ CFGBlock *CFGBuilder::VisitSwitchStmt(SwitchStmt *Terminator) {
// Save local scope position because in case of condition variable ScopePos
// won't be restored when traversing AST.
- SaveAndRestore<LocalScope::const_iterator> save_scope_pos(ScopePos);
+ SaveAndRestore save_scope_pos(ScopePos);
// Create local scope for C++17 switch init-stmt if one exists.
if (Stmt *Init = Terminator->getInit())
@@ -4075,9 +4356,9 @@ CFGBlock *CFGBuilder::VisitSwitchStmt(SwitchStmt *Terminator) {
} else SwitchSuccessor = Succ;
// Save the current "switch" context.
- SaveAndRestore<CFGBlock*> save_switch(SwitchTerminatedBlock),
- save_default(DefaultCaseBlock);
- SaveAndRestore<JumpTarget> save_break(BreakJumpTarget);
+ SaveAndRestore save_switch(SwitchTerminatedBlock),
+ save_default(DefaultCaseBlock);
+ SaveAndRestore save_break(BreakJumpTarget);
// Set the "default" case to be the block after the switch statement. If the
// switch statement contains a "default:", this value will be overwritten with
@@ -4100,15 +4381,13 @@ CFGBlock *CFGBuilder::VisitSwitchStmt(SwitchStmt *Terminator) {
// For pruning unreachable case statements, save the current state
// for tracking the condition value.
- SaveAndRestore<bool> save_switchExclusivelyCovered(switchExclusivelyCovered,
- false);
+ SaveAndRestore save_switchExclusivelyCovered(switchExclusivelyCovered, false);
// Determine if the switch condition can be explicitly evaluated.
assert(Terminator->getCond() && "switch condition must be non-NULL");
Expr::EvalResult result;
bool b = tryEvaluate(Terminator->getCond(), result);
- SaveAndRestore<Expr::EvalResult*> save_switchCond(switchCond,
- b ? &result : nullptr);
+ SaveAndRestore save_switchCond(switchCond, b ? &result : nullptr);
// If body is not a compound statement create implicit scope
// and add destructors.
@@ -4244,7 +4523,7 @@ CFGBlock *CFGBuilder::VisitCaseStmt(CaseStmt *CS) {
shouldAddCase(switchExclusivelyCovered, switchCond,
CS, *Context));
- // We set Block to NULL to allow lazy creation of a new block (if necessary)
+ // We set Block to NULL to allow lazy creation of a new block (if necessary).
Block = nullptr;
if (TopBlock) {
@@ -4280,7 +4559,7 @@ CFGBlock *CFGBuilder::VisitDefaultStmt(DefaultStmt *Terminator) {
// (including a fall-through to the code after the switch statement) to always
// be the last successor of a switch-terminated block.
- // We set Block to NULL to allow lazy creation of a new block (if necessary)
+ // We set Block to NULL to allow lazy creation of a new block (if necessary).
Block = nullptr;
// This block is now the implicit successor of other blocks.
@@ -4298,7 +4577,8 @@ CFGBlock *CFGBuilder::VisitCXXTryStmt(CXXTryStmt *Terminator) {
if (badCFG)
return nullptr;
TrySuccessor = Block;
- } else TrySuccessor = Succ;
+ } else
+ TrySuccessor = Succ;
CFGBlock *PrevTryTerminatedBlock = TryTerminatedBlock;
@@ -4308,10 +4588,10 @@ CFGBlock *CFGBuilder::VisitCXXTryStmt(CXXTryStmt *Terminator) {
NewTryTerminatedBlock->setTerminator(Terminator);
bool HasCatchAll = false;
- for (unsigned h = 0; h <Terminator->getNumHandlers(); ++h) {
+ for (unsigned I = 0, E = Terminator->getNumHandlers(); I != E; ++I) {
// The code after the try is the implicit successor.
Succ = TrySuccessor;
- CXXCatchStmt *CS = Terminator->getHandler(h);
+ CXXCatchStmt *CS = Terminator->getHandler(I);
if (CS->getExceptionDecl() == nullptr) {
HasCatchAll = true;
}
@@ -4334,7 +4614,7 @@ CFGBlock *CFGBuilder::VisitCXXTryStmt(CXXTryStmt *Terminator) {
Succ = TrySuccessor;
// Save the current "try" context.
- SaveAndRestore<CFGBlock*> save_try(TryTerminatedBlock, NewTryTerminatedBlock);
+ SaveAndRestore SaveTry(TryTerminatedBlock, NewTryTerminatedBlock);
cfg->addTryDispatchBlock(TryTerminatedBlock);
assert(Terminator->getTryBlock() && "try must contain a non-NULL body");
@@ -4348,7 +4628,7 @@ CFGBlock *CFGBuilder::VisitCXXCatchStmt(CXXCatchStmt *CS) {
// Save local scope position because in case of exception variable ScopePos
// won't be restored when traversing AST.
- SaveAndRestore<LocalScope::const_iterator> save_scope_pos(ScopePos);
+ SaveAndRestore save_scope_pos(ScopePos);
// Create local scope for possible exception variable.
// Store scope position. Add implicit destructor.
@@ -4379,7 +4659,7 @@ CFGBlock *CFGBuilder::VisitCXXCatchStmt(CXXCatchStmt *CS) {
if (badCFG)
return nullptr;
- // We set Block to NULL to allow lazy creation of a new block (if necessary)
+ // We set Block to NULL to allow lazy creation of a new block (if necessary).
Block = nullptr;
return CatchBlock;
@@ -4400,7 +4680,7 @@ CFGBlock *CFGBuilder::VisitCXXForRangeStmt(CXXForRangeStmt *S) {
// }
// Save local scope position before the addition of the implicit variables.
- SaveAndRestore<LocalScope::const_iterator> save_scope_pos(ScopePos);
+ SaveAndRestore save_scope_pos(ScopePos);
// Create local scopes and destructors for range, begin and end variables.
if (Stmt *Range = S->getRangeStmt())
@@ -4425,7 +4705,7 @@ CFGBlock *CFGBuilder::VisitCXXForRangeStmt(CXXForRangeStmt *S) {
// Save the current value for the break targets.
// All breaks should go to the code following the loop.
- SaveAndRestore<JumpTarget> save_break(BreakJumpTarget);
+ SaveAndRestore save_break(BreakJumpTarget);
BreakJumpTarget = JumpTarget(LoopSuccessor, ScopePos);
// The block for the __begin != __end expression.
@@ -4458,8 +4738,8 @@ CFGBlock *CFGBuilder::VisitCXXForRangeStmt(CXXForRangeStmt *S) {
assert(S->getBody());
// Save the current values for Block, Succ, and continue targets.
- SaveAndRestore<CFGBlock*> save_Block(Block), save_Succ(Succ);
- SaveAndRestore<JumpTarget> save_continue(ContinueJumpTarget);
+ SaveAndRestore save_Block(Block), save_Succ(Succ);
+ SaveAndRestore save_continue(ContinueJumpTarget);
// Generate increment code in its own basic block. This is the target of
// continue statements.
@@ -4944,8 +5224,7 @@ CFGBlock *CFG::createBlock() {
bool first_block = begin() == end();
// Create the block.
- CFGBlock *Mem = getAllocator().Allocate<CFGBlock>();
- new (Mem) CFGBlock(NumBlockIDs++, BlkBVC, this);
+ CFGBlock *Mem = new (getAllocator()) CFGBlock(NumBlockIDs++, BlkBVC, this);
Blocks.push_back(Mem, BlkBVC);
// If this is the first block, set it as the Entry and Exit.
@@ -5020,6 +5299,7 @@ CFGImplicitDtor::getDestructorDecl(ASTContext &astContext) const {
case CFGElement::CXXRecordTypedCall:
case CFGElement::ScopeBegin:
case CFGElement::ScopeEnd:
+ case CFGElement::CleanupFunction:
llvm_unreachable("getDestructorDecl should only be used with "
"ImplicitDtors");
case CFGElement::AutomaticObjectDtor: {
@@ -5062,8 +5342,19 @@ CFGImplicitDtor::getDestructorDecl(ASTContext &astContext) const {
const CXXTemporary *temp = bindExpr->getTemporary();
return temp->getDestructor();
}
+ case CFGElement::MemberDtor: {
+ const FieldDecl *field = castAs<CFGMemberDtor>().getFieldDecl();
+ QualType ty = field->getType();
+
+ while (const ArrayType *arrayType = astContext.getAsArrayType(ty)) {
+ ty = arrayType->getElementType();
+ }
+
+ const CXXRecordDecl *classDecl = ty->getAsCXXRecordDecl();
+ assert(classDecl);
+ return classDecl->getDestructor();
+ }
case CFGElement::BaseDtor:
- case CFGElement::MemberDtor:
// Not yet supported.
return nullptr;
}
@@ -5141,7 +5432,7 @@ public:
unsigned j = 1;
for (CFGBlock::const_iterator BI = (*I)->begin(), BEnd = (*I)->end() ;
BI != BEnd; ++BI, ++j ) {
- if (Optional<CFGStmt> SE = BI->getAs<CFGStmt>()) {
+ if (std::optional<CFGStmt> SE = BI->getAs<CFGStmt>()) {
const Stmt *stmt= SE->getStmt();
std::pair<unsigned, unsigned> P((*I)->getBlockID(), j);
StmtMap[stmt] = P;
@@ -5287,13 +5578,11 @@ public:
Terminator->getCond()->printPretty(OS, Helper, Policy);
}
- void VisitCXXTryStmt(CXXTryStmt *CS) {
- OS << "try ...";
- }
+ void VisitCXXTryStmt(CXXTryStmt *) { OS << "try ..."; }
- void VisitSEHTryStmt(SEHTryStmt *CS) {
- OS << "__try ...";
- }
+ void VisitObjCAtTryStmt(ObjCAtTryStmt *) { OS << "@try ..."; }
+
+ void VisitSEHTryStmt(SEHTryStmt *CS) { OS << "__try ..."; }
void VisitAbstractConditionalOperator(AbstractConditionalOperator* C) {
if (Stmt *Cond = C->getCond())
@@ -5439,6 +5728,12 @@ static void print_construction_context(raw_ostream &OS,
Stmts.push_back(TOCC->getConstructorAfterElision());
break;
}
+ case ConstructionContext::LambdaCaptureKind: {
+ const auto *LCC = cast<LambdaCaptureConstructionContext>(CC);
+ Helper.handledStmt(const_cast<LambdaExpr *>(LCC->getLambdaExpr()), OS);
+ OS << "+" << LCC->getIndex();
+ return;
+ }
case ConstructionContext::ArgumentKind: {
const auto *ACC = cast<ArgumentConstructionContext>(CC);
if (const Stmt *BTE = ACC->getCXXBindTemporaryExpr()) {
@@ -5462,7 +5757,8 @@ static void print_elem(raw_ostream &OS, StmtPrinterHelper &Helper,
const CFGElement &E);
void CFGElement::dumpToStream(llvm::raw_ostream &OS) const {
- StmtPrinterHelper Helper(nullptr, {});
+ LangOptions LangOpts;
+ StmtPrinterHelper Helper(nullptr, LangOpts);
print_elem(OS, Helper, *this);
}
@@ -5511,15 +5807,13 @@ static void print_elem(raw_ostream &OS, StmtPrinterHelper &Helper,
OS << " (BindTemporary)";
} else if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(S)) {
OS << " (CXXConstructExpr";
- if (Optional<CFGConstructor> CE = E.getAs<CFGConstructor>()) {
+ if (std::optional<CFGConstructor> CE = E.getAs<CFGConstructor>()) {
print_construction_context(OS, Helper, CE->getConstructionContext());
}
- OS << ", " << CCE->getType().getAsString() << ")";
+ OS << ", " << CCE->getType() << ")";
} else if (const CastExpr *CE = dyn_cast<CastExpr>(S)) {
- OS << " (" << CE->getStmtClassName() << ", "
- << CE->getCastKindName()
- << ", " << CE->getType().getAsString()
- << ")";
+ OS << " (" << CE->getStmtClassName() << ", " << CE->getCastKindName()
+ << ", " << CE->getType() << ")";
}
// Expressions need a newline.
@@ -5549,6 +5843,11 @@ static void print_elem(raw_ostream &OS, StmtPrinterHelper &Helper,
break;
}
+ case CFGElement::Kind::CleanupFunction:
+ OS << "CleanupFunction ("
+ << E.castAs<CFGCleanupFunction>().getFunctionDecl()->getName() << ")\n";
+ break;
+
case CFGElement::Kind::LifetimeEnds:
Helper.handleDecl(E.castAs<CFGLifetimeEnds>().getVarDecl(), OS);
OS << " (Lifetime ends)\n";
@@ -5609,7 +5908,8 @@ static void print_elem(raw_ostream &OS, StmtPrinterHelper &Helper,
}
case CFGElement::Kind::TemporaryDtor: {
- const CXXBindTemporaryExpr *BT = E.castAs<CFGTemporaryDtor>().getBindTemporaryExpr();
+ const CXXBindTemporaryExpr *BT =
+ E.castAs<CFGTemporaryDtor>().getBindTemporaryExpr();
OS << "~";
BT->getType().print(OS, PrintingPolicy(Helper.getLangOpts()));
OS << "() (Temporary object destructor)\n";
@@ -5653,21 +5953,25 @@ static void print_block(raw_ostream &OS, const CFG* cfg,
OS << L->getName();
else if (CaseStmt *C = dyn_cast<CaseStmt>(Label)) {
OS << "case ";
- if (C->getLHS())
- C->getLHS()->printPretty(OS, &Helper,
- PrintingPolicy(Helper.getLangOpts()));
- if (C->getRHS()) {
+ if (const Expr *LHS = C->getLHS())
+ LHS->printPretty(OS, &Helper, PrintingPolicy(Helper.getLangOpts()));
+ if (const Expr *RHS = C->getRHS()) {
OS << " ... ";
- C->getRHS()->printPretty(OS, &Helper,
- PrintingPolicy(Helper.getLangOpts()));
+ RHS->printPretty(OS, &Helper, PrintingPolicy(Helper.getLangOpts()));
}
} else if (isa<DefaultStmt>(Label))
OS << "default";
else if (CXXCatchStmt *CS = dyn_cast<CXXCatchStmt>(Label)) {
OS << "catch (";
- if (CS->getExceptionDecl())
- CS->getExceptionDecl()->print(OS, PrintingPolicy(Helper.getLangOpts()),
- 0);
+ if (const VarDecl *ED = CS->getExceptionDecl())
+ ED->print(OS, PrintingPolicy(Helper.getLangOpts()), 0);
+ else
+ OS << "...";
+ OS << ")";
+ } else if (ObjCAtCatchStmt *CS = dyn_cast<ObjCAtCatchStmt>(Label)) {
+ OS << "@catch (";
+ if (const VarDecl *PD = CS->getCatchParamDecl())
+ PD->print(OS, PrintingPolicy(Helper.getLangOpts()), 0);
else
OS << "...";
OS << ")";
@@ -5882,8 +6186,8 @@ static bool isImmediateSinkBlock(const CFGBlock *Blk) {
// at least for now, but once we have better support for exceptions,
// we'd need to carefully handle the case when the throw is being
// immediately caught.
- if (std::any_of(Blk->begin(), Blk->end(), [](const CFGElement &Elm) {
- if (Optional<CFGStmt> StmtElm = Elm.getAs<CFGStmt>())
+ if (llvm::any_of(*Blk, [](const CFGElement &Elm) {
+ if (std::optional<CFGStmt> StmtElm = Elm.getAs<CFGStmt>())
if (isa<CXXThrowExpr>(StmtElm->getStmt()))
return true;
return false;
@@ -6028,17 +6332,13 @@ Stmt *CFGBlock::getTerminatorCondition(bool StripParens) {
// CFG Graphviz Visualization
//===----------------------------------------------------------------------===//
-#ifndef NDEBUG
-static StmtPrinterHelper* GraphHelper;
-#endif
+static StmtPrinterHelper *GraphHelper;
void CFG::viewCFG(const LangOptions &LO) const {
-#ifndef NDEBUG
StmtPrinterHelper H(this, LO);
GraphHelper = &H;
llvm::ViewGraph(this,"CFG");
GraphHelper = nullptr;
-#endif
}
namespace llvm {
@@ -6047,8 +6347,7 @@ template<>
struct DOTGraphTraits<const CFG*> : public DefaultDOTGraphTraits {
DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {}
- static std::string getNodeLabel(const CFGBlock *Node, const CFG* Graph) {
-#ifndef NDEBUG
+ static std::string getNodeLabel(const CFGBlock *Node, const CFG *Graph) {
std::string OutSStr;
llvm::raw_string_ostream Out(OutSStr);
print_block(Out,Graph, *Node, *GraphHelper, false, false);
@@ -6064,9 +6363,6 @@ struct DOTGraphTraits<const CFG*> : public DefaultDOTGraphTraits {
}
return OutStr;
-#else
- return {};
-#endif
}
};
diff --git a/contrib/llvm-project/clang/lib/Analysis/CFGStmtMap.cpp b/contrib/llvm-project/clang/lib/Analysis/CFGStmtMap.cpp
index d1c23e3c879b..c3a4581e1fb1 100644
--- a/contrib/llvm-project/clang/lib/Analysis/CFGStmtMap.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/CFGStmtMap.cpp
@@ -15,6 +15,7 @@
#include "clang/AST/ParentMap.h"
#include "clang/Analysis/CFG.h"
#include "clang/Analysis/CFGStmtMap.h"
+#include <optional>
using namespace clang;
@@ -49,7 +50,7 @@ static void Accumulate(SMap &SM, CFGBlock *B) {
// First walk the block-level expressions.
for (CFGBlock::iterator I = B->begin(), E = B->end(); I != E; ++I) {
const CFGElement &CE = *I;
- Optional<CFGStmt> CS = CE.getAs<CFGStmt>();
+ std::optional<CFGStmt> CS = CE.getAs<CFGStmt>();
if (!CS)
continue;
diff --git a/contrib/llvm-project/clang/lib/Analysis/CallGraph.cpp b/contrib/llvm-project/clang/lib/Analysis/CallGraph.cpp
index 59cc939b6fd1..f892980ed313 100644
--- a/contrib/llvm-project/clang/lib/Analysis/CallGraph.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/CallGraph.cpp
@@ -168,7 +168,7 @@ bool CallGraph::includeCalleeInGraph(const Decl *D) {
return false;
IdentifierInfo *II = FD->getIdentifier();
- if (II && II->getName().startswith("__inline"))
+ if (II && II->getName().starts_with("__inline"))
return false;
}
diff --git a/contrib/llvm-project/clang/lib/Analysis/CalledOnceCheck.cpp b/contrib/llvm-project/clang/lib/Analysis/CalledOnceCheck.cpp
index 661f7b999f2b..04c5f6aa9c74 100644
--- a/contrib/llvm-project/clang/lib/Analysis/CalledOnceCheck.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/CalledOnceCheck.cpp
@@ -28,7 +28,6 @@
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/BitmaskEnum.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/Sequence.h"
@@ -38,6 +37,7 @@
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include <memory>
+#include <optional>
using namespace clang;
@@ -319,7 +319,7 @@ public:
// We care about logical not only if we care about comparisons.
if (!ShouldRetrieveFromComparisons)
return nullptr;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
// Function pointer/references can be dereferenced before a call.
// That doesn't make it, however, any different from a regular call.
// For this reason, dereference operation is a "no-op".
@@ -494,7 +494,7 @@ struct Clarification {
/// of basic blocks.
class NotCalledClarifier
: public ConstStmtVisitor<NotCalledClarifier,
- llvm::Optional<Clarification>> {
+ std::optional<Clarification>> {
public:
/// The main entrypoint for the class, the function that tries to find the
/// clarification of how to explain which sub-path starts with a CFG edge
@@ -508,24 +508,24 @@ public:
/// results only for such cases. For this very reason, the parent basic
/// block, Conditional, is named that way, so it is clear what kind of
/// block is expected.
- static llvm::Optional<Clarification>
- clarify(const CFGBlock *Conditional, const CFGBlock *SuccWithoutCall) {
+ static std::optional<Clarification> clarify(const CFGBlock *Conditional,
+ const CFGBlock *SuccWithoutCall) {
if (const Stmt *Terminator = Conditional->getTerminatorStmt()) {
return NotCalledClarifier{Conditional, SuccWithoutCall}.Visit(Terminator);
}
- return llvm::None;
+ return std::nullopt;
}
- llvm::Optional<Clarification> VisitIfStmt(const IfStmt *If) {
+ std::optional<Clarification> VisitIfStmt(const IfStmt *If) {
return VisitBranchingBlock(If, NeverCalledReason::IfThen);
}
- llvm::Optional<Clarification>
+ std::optional<Clarification>
VisitAbstractConditionalOperator(const AbstractConditionalOperator *Ternary) {
return VisitBranchingBlock(Ternary, NeverCalledReason::IfThen);
}
- llvm::Optional<Clarification> VisitSwitchStmt(const SwitchStmt *Switch) {
+ std::optional<Clarification> VisitSwitchStmt(const SwitchStmt *Switch) {
const Stmt *CaseToBlame = SuccInQuestion->getLabel();
if (!CaseToBlame) {
// If interesting basic block is not labeled, it means that this
@@ -543,15 +543,15 @@ public:
llvm_unreachable("Found unexpected switch structure");
}
- llvm::Optional<Clarification> VisitForStmt(const ForStmt *For) {
+ std::optional<Clarification> VisitForStmt(const ForStmt *For) {
return VisitBranchingBlock(For, NeverCalledReason::LoopEntered);
}
- llvm::Optional<Clarification> VisitWhileStmt(const WhileStmt *While) {
+ std::optional<Clarification> VisitWhileStmt(const WhileStmt *While) {
return VisitBranchingBlock(While, NeverCalledReason::LoopEntered);
}
- llvm::Optional<Clarification>
+ std::optional<Clarification>
VisitBranchingBlock(const Stmt *Terminator, NeverCalledReason DefaultReason) {
assert(Parent->succ_size() == 2 &&
"Branching block should have exactly two successors");
@@ -561,12 +561,12 @@ public:
return Clarification{ActualReason, Terminator};
}
- llvm::Optional<Clarification> VisitBinaryOperator(const BinaryOperator *) {
+ std::optional<Clarification> VisitBinaryOperator(const BinaryOperator *) {
// We don't want to report on short-curcuit logical operations.
- return llvm::None;
+ return std::nullopt;
}
- llvm::Optional<Clarification> VisitStmt(const Stmt *Terminator) {
+ std::optional<Clarification> VisitStmt(const Stmt *Terminator) {
// If we got here, we didn't have a visit function for more derived
// classes of statement that this terminator actually belongs to.
//
@@ -753,7 +753,7 @@ private:
// We use a backward dataflow propagation and for this reason we
// should traverse basic blocks bottom-up.
for (const CFGElement &Element : llvm::reverse(*BB)) {
- if (Optional<CFGStmt> S = Element.getAs<CFGStmt>()) {
+ if (std::optional<CFGStmt> S = Element.getAs<CFGStmt>()) {
check(S->getStmt());
}
}
@@ -880,8 +880,8 @@ private:
template <class CallLikeExpr>
void checkIndirectCall(const CallLikeExpr *CallOrMessage) {
// CallExpr::arguments does not interact nicely with llvm::enumerate.
- llvm::ArrayRef<const Expr *> Arguments = llvm::makeArrayRef(
- CallOrMessage->getArgs(), CallOrMessage->getNumArgs());
+ llvm::ArrayRef<const Expr *> Arguments =
+ llvm::ArrayRef(CallOrMessage->getArgs(), CallOrMessage->getNumArgs());
// Let's check if any of the call arguments is a point of interest.
for (const auto &Argument : llvm::enumerate(Arguments)) {
@@ -973,7 +973,7 @@ private:
/// Return true if the given name has conventional suffixes.
static bool hasConventionalSuffix(llvm::StringRef Name) {
return llvm::any_of(CONVENTIONAL_SUFFIXES, [Name](llvm::StringRef Suffix) {
- return Name.endswith(Suffix);
+ return Name.ends_with(Suffix);
});
}
@@ -997,10 +997,10 @@ private:
/// Return true/false if 'swift_async' attribute states that the given
/// parameter is conventionally called once.
- /// Return llvm::None if the given declaration doesn't have 'swift_async'
+ /// Return std::nullopt if the given declaration doesn't have 'swift_async'
/// attribute.
- static llvm::Optional<bool> isConventionalSwiftAsync(const Decl *D,
- unsigned ParamIndex) {
+ static std::optional<bool> isConventionalSwiftAsync(const Decl *D,
+ unsigned ParamIndex) {
if (const SwiftAsyncAttr *A = D->getAttr<SwiftAsyncAttr>()) {
if (A->getKind() == SwiftAsyncAttr::None) {
return false;
@@ -1008,7 +1008,7 @@ private:
return A->getCompletionHandlerIndex().getASTIndex() == ParamIndex;
}
- return llvm::None;
+ return std::nullopt;
}
/// Return true if the specified selector represents init method.
@@ -1065,7 +1065,7 @@ private:
// 'swift_async' goes first and overrides anything else.
if (auto ConventionalAsync =
isConventionalSwiftAsync(Function, ParamIndex)) {
- return ConventionalAsync.getValue();
+ return *ConventionalAsync;
}
return shouldBeCalledOnce(Function->getParamDecl(ParamIndex)) ||
@@ -1082,7 +1082,7 @@ private:
// 'swift_async' goes first and overrides anything else.
if (auto ConventionalAsync = isConventionalSwiftAsync(Method, ParamIndex)) {
- return ConventionalAsync.getValue();
+ return *ConventionalAsync;
}
const ParmVarDecl *Parameter = Method->getParamDecl(ParamIndex);
@@ -1157,8 +1157,8 @@ private:
bool shouldBlockArgumentBeCalledOnce(const CallLikeExpr *CallOrMessage,
const Stmt *BlockArgument) const {
// CallExpr::arguments does not interact nicely with llvm::enumerate.
- llvm::ArrayRef<const Expr *> Arguments = llvm::makeArrayRef(
- CallOrMessage->getArgs(), CallOrMessage->getNumArgs());
+ llvm::ArrayRef<const Expr *> Arguments =
+ llvm::ArrayRef(CallOrMessage->getArgs(), CallOrMessage->getNumArgs());
for (const auto &Argument : llvm::enumerate(Arguments)) {
if (Argument.value() == BlockArgument) {
@@ -1265,7 +1265,7 @@ private:
llvm::reverse(*BB), // we should start with return statements, if we
// have any, i.e. from the bottom of the block
[&ReturnChildren](const CFGElement &Element) {
- if (Optional<CFGStmt> S = Element.getAs<CFGStmt>()) {
+ if (std::optional<CFGStmt> S = Element.getAs<CFGStmt>()) {
const Stmt *SuspiciousStmt = S->getStmt();
if (isa<ReturnStmt>(SuspiciousStmt)) {
@@ -1635,19 +1635,19 @@ public:
private:
unsigned size() const { return TrackedParams.size(); }
- llvm::Optional<unsigned> getIndexOfCallee(const CallExpr *Call) const {
+ std::optional<unsigned> getIndexOfCallee(const CallExpr *Call) const {
return getIndexOfExpression(Call->getCallee());
}
- llvm::Optional<unsigned> getIndexOfExpression(const Expr *E) const {
+ std::optional<unsigned> getIndexOfExpression(const Expr *E) const {
if (const ParmVarDecl *Parameter = findReferencedParmVarDecl(E)) {
return getIndex(*Parameter);
}
- return llvm::None;
+ return std::nullopt;
}
- llvm::Optional<unsigned> getIndex(const ParmVarDecl &Parameter) const {
+ std::optional<unsigned> getIndex(const ParmVarDecl &Parameter) const {
// Expected number of parameters that we actually track is 1.
//
// Also, the maximum number of declared parameters could not be on a scale
@@ -1662,7 +1662,7 @@ private:
return It - TrackedParams.begin();
}
- return llvm::None;
+ return std::nullopt;
}
const ParmVarDecl *getParameter(unsigned Index) const {
diff --git a/contrib/llvm-project/clang/lib/Analysis/CloneDetection.cpp b/contrib/llvm-project/clang/lib/Analysis/CloneDetection.cpp
index 0a1122bd5a4a..65ac4ad6a5e5 100644
--- a/contrib/llvm-project/clang/lib/Analysis/CloneDetection.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/CloneDetection.cpp
@@ -147,9 +147,8 @@ void OnlyLargestCloneConstraint::constrain(
// Erasing a list of indexes from the vector should be done with decreasing
// indexes. As IndexesToRemove is constructed with increasing values, we just
// reverse iterate over it to get the desired order.
- for (auto I = IndexesToRemove.rbegin(); I != IndexesToRemove.rend(); ++I) {
- Result.erase(Result.begin() + *I);
- }
+ for (unsigned I : llvm::reverse(IndexesToRemove))
+ Result.erase(Result.begin() + I);
}
bool FilenamePatternConstraint::isAutoGenerated(
diff --git a/contrib/llvm-project/clang/lib/Analysis/CocoaConventions.cpp b/contrib/llvm-project/clang/lib/Analysis/CocoaConventions.cpp
index 571d72e1a841..836859c22345 100644
--- a/contrib/llvm-project/clang/lib/Analysis/CocoaConventions.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/CocoaConventions.cpp
@@ -26,10 +26,10 @@ bool cocoa::isRefType(QualType RetTy, StringRef Prefix,
// Recursively walk the typedef stack, allowing typedefs of reference types.
while (const TypedefType *TD = RetTy->getAs<TypedefType>()) {
StringRef TDName = TD->getDecl()->getIdentifier()->getName();
- if (TDName.startswith(Prefix) && TDName.endswith("Ref"))
+ if (TDName.starts_with(Prefix) && TDName.ends_with("Ref"))
return true;
// XPC unfortunately uses CF-style function names, but aren't CF types.
- if (TDName.startswith("xpc_"))
+ if (TDName.starts_with("xpc_"))
return false;
RetTy = TD->getDecl()->getUnderlyingType();
}
@@ -43,7 +43,7 @@ bool cocoa::isRefType(QualType RetTy, StringRef Prefix,
return false;
// Does the name start with the prefix?
- return Name.startswith(Prefix);
+ return Name.starts_with(Prefix);
}
/// Returns true when the passed-in type is a CF-style reference-counted
@@ -127,10 +127,9 @@ bool coreFoundation::followsCreateRule(const FunctionDecl *fn) {
// Scan for *lowercase* 'reate' or 'opy', followed by no lowercase
// character.
StringRef suffix = functionName.substr(it - start);
- if (suffix.startswith("reate")) {
+ if (suffix.starts_with("reate")) {
it += 5;
- }
- else if (suffix.startswith("opy")) {
+ } else if (suffix.starts_with("opy")) {
it += 3;
} else {
// Keep scanning.
diff --git a/contrib/llvm-project/clang/lib/Analysis/ConstructionContext.cpp b/contrib/llvm-project/clang/lib/Analysis/ConstructionContext.cpp
index 6ba1e2173d2c..8a862c06f13a 100644
--- a/contrib/llvm-project/clang/lib/Analysis/ConstructionContext.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/ConstructionContext.cpp
@@ -156,6 +156,12 @@ const ConstructionContext *ConstructionContext::createBoundTemporaryFromLayers(
return create<CXX17ElidedCopyConstructorInitializerConstructionContext>(
C, I, BTE);
}
+ case ConstructionContextItem::LambdaCaptureKind: {
+ assert(ParentLayer->isLast());
+ const auto *E = cast<LambdaExpr>(ParentItem.getStmt());
+ return create<LambdaCaptureConstructionContext>(C, E,
+ ParentItem.getIndex());
+ }
} // switch (ParentItem.getKind())
llvm_unreachable("Unexpected construction context with destructor!");
@@ -200,6 +206,11 @@ const ConstructionContext *ConstructionContext::createFromLayers(
case ConstructionContextItem::ElidableConstructorKind: {
llvm_unreachable("The argument needs to be materialized first!");
}
+ case ConstructionContextItem::LambdaCaptureKind: {
+ assert(TopLayer->isLast());
+ const auto *E = cast<LambdaExpr>(TopItem.getStmt());
+ return create<LambdaCaptureConstructionContext>(C, E, TopItem.getIndex());
+ }
case ConstructionContextItem::InitializerKind: {
assert(TopLayer->isLast());
const CXXCtorInitializer *I = TopItem.getCXXCtorInitializer();
diff --git a/contrib/llvm-project/clang/lib/Analysis/Consumed.cpp b/contrib/llvm-project/clang/lib/Analysis/Consumed.cpp
index 9560248b173f..d01c7f688e8b 100644
--- a/contrib/llvm-project/clang/lib/Analysis/Consumed.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/Consumed.cpp
@@ -27,13 +27,13 @@
#include "clang/Basic/OperatorKinds.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
#include <cassert>
#include <memory>
+#include <optional>
#include <utility>
// TODO: Adjust states of args to constructors in the same way that arguments to
@@ -62,7 +62,7 @@ static SourceLocation getFirstStmtLoc(const CFGBlock *Block) {
// Find the source location of the first statement in the block, if the block
// is not empty.
for (const auto &B : *Block)
- if (Optional<CFGStmt> CS = B.getAs<CFGStmt>())
+ if (std::optional<CFGStmt> CS = B.getAs<CFGStmt>())
return CS->getStmt()->getBeginLoc();
// Block is empty.
@@ -81,7 +81,7 @@ static SourceLocation getLastStmtLoc(const CFGBlock *Block) {
} else {
for (CFGBlock::const_reverse_iterator BI = Block->rbegin(),
BE = Block->rend(); BI != BE; ++BI) {
- if (Optional<CFGStmt> CS = BI->getAs<CFGStmt>())
+ if (std::optional<CFGStmt> CS = BI->getAs<CFGStmt>())
return CS->getStmt()->getBeginLoc();
}
}
@@ -771,7 +771,7 @@ void ConsumedStmtVisitor::VisitCXXBindTemporaryExpr(
void ConsumedStmtVisitor::VisitCXXConstructExpr(const CXXConstructExpr *Call) {
CXXConstructorDecl *Constructor = Call->getConstructor();
- QualType ThisType = Constructor->getThisType()->getPointeeType();
+ QualType ThisType = Constructor->getFunctionObjectParameterType();
if (!isConsumableType(ThisType))
return;
@@ -1199,7 +1199,7 @@ void ConsumedAnalyzer::determineExpectedReturnState(AnalysisDeclContext &AC,
const FunctionDecl *D) {
QualType ReturnType;
if (const auto *Constructor = dyn_cast<CXXConstructorDecl>(D)) {
- ReturnType = Constructor->getThisType()->getPointeeType();
+ ReturnType = Constructor->getFunctionObjectParameterType();
} else
ReturnType = D->getCallResultType();
diff --git a/contrib/llvm-project/clang/lib/Analysis/ExprMutationAnalyzer.cpp b/contrib/llvm-project/clang/lib/Analysis/ExprMutationAnalyzer.cpp
index e9ff5e5e8765..bb042760d297 100644
--- a/contrib/llvm-project/clang/lib/Analysis/ExprMutationAnalyzer.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/ExprMutationAnalyzer.cpp
@@ -15,6 +15,81 @@
namespace clang {
using namespace ast_matchers;
+// Check if result of Source expression could be a Target expression.
+// Checks:
+// - Implicit Casts
+// - Binary Operators
+// - ConditionalOperator
+// - BinaryConditionalOperator
+static bool canExprResolveTo(const Expr *Source, const Expr *Target) {
+
+ const auto IgnoreDerivedToBase = [](const Expr *E, auto Matcher) {
+ if (Matcher(E))
+ return true;
+ if (const auto *Cast = dyn_cast<ImplicitCastExpr>(E)) {
+ if ((Cast->getCastKind() == CK_DerivedToBase ||
+ Cast->getCastKind() == CK_UncheckedDerivedToBase) &&
+ Matcher(Cast->getSubExpr()))
+ return true;
+ }
+ return false;
+ };
+
+ const auto EvalCommaExpr = [](const Expr *E, auto Matcher) {
+ const Expr *Result = E;
+ while (const auto *BOComma =
+ dyn_cast_or_null<BinaryOperator>(Result->IgnoreParens())) {
+ if (!BOComma->isCommaOp())
+ break;
+ Result = BOComma->getRHS();
+ }
+
+ return Result != E && Matcher(Result);
+ };
+
+ // The 'ConditionalOperatorM' matches on `<anything> ? <expr> : <expr>`.
+ // This matching must be recursive because `<expr>` can be anything resolving
+ // to the `InnerMatcher`, for example another conditional operator.
+ // The edge-case `BaseClass &b = <cond> ? DerivedVar1 : DerivedVar2;`
+ // is handled, too. The implicit cast happens outside of the conditional.
+ // This is matched by `IgnoreDerivedToBase(canResolveToExpr(InnerMatcher))`
+ // below.
+ const auto ConditionalOperatorM = [Target](const Expr *E) {
+ if (const auto *OP = dyn_cast<ConditionalOperator>(E)) {
+ if (const auto *TE = OP->getTrueExpr()->IgnoreParens())
+ if (canExprResolveTo(TE, Target))
+ return true;
+ if (const auto *FE = OP->getFalseExpr()->IgnoreParens())
+ if (canExprResolveTo(FE, Target))
+ return true;
+ }
+ return false;
+ };
+
+ const auto ElvisOperator = [Target](const Expr *E) {
+ if (const auto *OP = dyn_cast<BinaryConditionalOperator>(E)) {
+ if (const auto *TE = OP->getTrueExpr()->IgnoreParens())
+ if (canExprResolveTo(TE, Target))
+ return true;
+ if (const auto *FE = OP->getFalseExpr()->IgnoreParens())
+ if (canExprResolveTo(FE, Target))
+ return true;
+ }
+ return false;
+ };
+
+ const Expr *SourceExprP = Source->IgnoreParens();
+ return IgnoreDerivedToBase(SourceExprP,
+ [&](const Expr *E) {
+ return E == Target || ConditionalOperatorM(E) ||
+ ElvisOperator(E);
+ }) ||
+ EvalCommaExpr(SourceExprP, [&](const Expr *E) {
+ return IgnoreDerivedToBase(
+ E->IgnoreParens(), [&](const Expr *EE) { return EE == Target; });
+ });
+}
+
namespace {
AST_MATCHER_P(LambdaExpr, hasCaptureInit, const Expr *, E) {
@@ -27,51 +102,14 @@ AST_MATCHER_P(CXXForRangeStmt, hasRangeStmt,
return InnerMatcher.matches(*Range, Finder, Builder);
}
-AST_MATCHER_P(Expr, maybeEvalCommaExpr, ast_matchers::internal::Matcher<Expr>,
- InnerMatcher) {
- const Expr *Result = &Node;
- while (const auto *BOComma =
- dyn_cast_or_null<BinaryOperator>(Result->IgnoreParens())) {
- if (!BOComma->isCommaOp())
- break;
- Result = BOComma->getRHS();
- }
- return InnerMatcher.matches(*Result, Finder, Builder);
-}
-
-AST_MATCHER_P(Expr, canResolveToExpr, ast_matchers::internal::Matcher<Expr>,
- InnerMatcher) {
- auto DerivedToBase = [](const ast_matchers::internal::Matcher<Expr> &Inner) {
- return implicitCastExpr(anyOf(hasCastKind(CK_DerivedToBase),
- hasCastKind(CK_UncheckedDerivedToBase)),
- hasSourceExpression(Inner));
- };
- auto IgnoreDerivedToBase =
- [&DerivedToBase](const ast_matchers::internal::Matcher<Expr> &Inner) {
- return ignoringParens(expr(anyOf(Inner, DerivedToBase(Inner))));
- };
-
- // The 'ConditionalOperator' matches on `<anything> ? <expr> : <expr>`.
- // This matching must be recursive because `<expr>` can be anything resolving
- // to the `InnerMatcher`, for example another conditional operator.
- // The edge-case `BaseClass &b = <cond> ? DerivedVar1 : DerivedVar2;`
- // is handled, too. The implicit cast happens outside of the conditional.
- // This is matched by `IgnoreDerivedToBase(canResolveToExpr(InnerMatcher))`
- // below.
- auto const ConditionalOperator = conditionalOperator(anyOf(
- hasTrueExpression(ignoringParens(canResolveToExpr(InnerMatcher))),
- hasFalseExpression(ignoringParens(canResolveToExpr(InnerMatcher)))));
- auto const ElvisOperator = binaryConditionalOperator(anyOf(
- hasTrueExpression(ignoringParens(canResolveToExpr(InnerMatcher))),
- hasFalseExpression(ignoringParens(canResolveToExpr(InnerMatcher)))));
-
- auto const ComplexMatcher = ignoringParens(
- expr(anyOf(IgnoreDerivedToBase(InnerMatcher),
- maybeEvalCommaExpr(IgnoreDerivedToBase(InnerMatcher)),
- IgnoreDerivedToBase(ConditionalOperator),
- IgnoreDerivedToBase(ElvisOperator))));
-
- return ComplexMatcher.matches(Node, Finder, Builder);
+AST_MATCHER_P(Stmt, canResolveToExpr, const Stmt *, Inner) {
+ auto *Exp = dyn_cast<Expr>(&Node);
+ if (!Exp)
+ return true;
+ auto *Target = dyn_cast<Expr>(Inner);
+ if (!Target)
+ return false;
+ return canExprResolveTo(Exp, Target);
}
// Similar to 'hasAnyArgument', but does not work because 'InitListExpr' does
@@ -95,11 +133,33 @@ AST_MATCHER(CXXTypeidExpr, isPotentiallyEvaluated) {
return Node.isPotentiallyEvaluated();
}
+AST_MATCHER(CXXMemberCallExpr, isConstCallee) {
+ const Decl *CalleeDecl = Node.getCalleeDecl();
+ const auto *VD = dyn_cast_or_null<ValueDecl>(CalleeDecl);
+ if (!VD)
+ return false;
+ const QualType T = VD->getType().getCanonicalType();
+ const auto *MPT = dyn_cast<MemberPointerType>(T);
+ const auto *FPT = MPT ? cast<FunctionProtoType>(MPT->getPointeeType())
+ : dyn_cast<FunctionProtoType>(T);
+ if (!FPT)
+ return false;
+ return FPT->isConst();
+}
+
AST_MATCHER_P(GenericSelectionExpr, hasControllingExpr,
ast_matchers::internal::Matcher<Expr>, InnerMatcher) {
+ if (Node.isTypePredicate())
+ return false;
return InnerMatcher.matches(*Node.getControllingExpr(), Finder, Builder);
}
+template <typename T>
+ast_matchers::internal::Matcher<T>
+findFirst(const ast_matchers::internal::Matcher<T> &Matcher) {
+ return anyOf(Matcher, hasDescendant(Matcher));
+}
+
const auto nonConstReferenceType = [] {
return hasUnqualifiedDesugaredType(
referenceType(pointee(unless(isConstQualified()))));
@@ -183,9 +243,16 @@ const Stmt *ExprMutationAnalyzer::findMutationMemoized(
const Stmt *ExprMutationAnalyzer::tryEachDeclRef(const Decl *Dec,
MutationFinder Finder) {
- const auto Refs =
- match(findAll(declRefExpr(to(equalsNode(Dec))).bind(NodeID<Expr>::value)),
- Stm, Context);
+ const auto Refs = match(
+ findAll(
+ declRefExpr(to(
+ // `Dec` or a binding if `Dec` is a decomposition.
+ anyOf(equalsNode(Dec),
+ bindingDecl(forDecomposition(equalsNode(Dec))))
+ //
+ ))
+ .bind(NodeID<Expr>::value)),
+ Stm, Context);
for (const auto &RefNodes : Refs) {
const auto *E = RefNodes.getNodeAs<Expr>(NodeID<Expr>::value);
if ((this->*Finder)(E))
@@ -194,12 +261,13 @@ const Stmt *ExprMutationAnalyzer::tryEachDeclRef(const Decl *Dec,
return nullptr;
}
-bool ExprMutationAnalyzer::isUnevaluated(const Expr *Exp) {
- return selectFirst<Expr>(
+bool ExprMutationAnalyzer::isUnevaluated(const Stmt *Exp, const Stmt &Stm,
+ ASTContext &Context) {
+ return selectFirst<Stmt>(
NodeID<Expr>::value,
match(
- findAll(
- expr(canResolveToExpr(equalsNode(Exp)),
+ findFirst(
+ stmt(canResolveToExpr(Exp),
anyOf(
// `Exp` is part of the underlying expression of
// decltype/typeof if it has an ancestor of
@@ -225,6 +293,10 @@ bool ExprMutationAnalyzer::isUnevaluated(const Expr *Exp) {
Stm, Context)) != nullptr;
}
+bool ExprMutationAnalyzer::isUnevaluated(const Expr *Exp) {
+ return isUnevaluated(Exp, Stm, Context);
+}
+
const Stmt *
ExprMutationAnalyzer::findExprMutation(ArrayRef<BoundNodes> Matches) {
return tryEachMatch<Expr>(Matches, this, &ExprMutationAnalyzer::findMutation);
@@ -249,44 +321,45 @@ const Stmt *ExprMutationAnalyzer::findDeclPointeeMutation(
const Stmt *ExprMutationAnalyzer::findDirectMutation(const Expr *Exp) {
// LHS of any assignment operators.
- const auto AsAssignmentLhs = binaryOperator(
- isAssignmentOperator(), hasLHS(canResolveToExpr(equalsNode(Exp))));
+ const auto AsAssignmentLhs =
+ binaryOperator(isAssignmentOperator(), hasLHS(canResolveToExpr(Exp)));
// Operand of increment/decrement operators.
const auto AsIncDecOperand =
unaryOperator(anyOf(hasOperatorName("++"), hasOperatorName("--")),
- hasUnaryOperand(canResolveToExpr(equalsNode(Exp))));
+ hasUnaryOperand(canResolveToExpr(Exp)));
// Invoking non-const member function.
// A member function is assumed to be non-const when it is unresolved.
const auto NonConstMethod = cxxMethodDecl(unless(isConst()));
const auto AsNonConstThis = expr(anyOf(
- cxxMemberCallExpr(callee(NonConstMethod),
- on(canResolveToExpr(equalsNode(Exp)))),
+ cxxMemberCallExpr(on(canResolveToExpr(Exp)), unless(isConstCallee())),
cxxOperatorCallExpr(callee(NonConstMethod),
- hasArgument(0, canResolveToExpr(equalsNode(Exp)))),
+ hasArgument(0, canResolveToExpr(Exp))),
// In case of a templated type, calling overloaded operators is not
// resolved and modelled as `binaryOperator` on a dependent type.
// Such instances are considered a modification, because they can modify
// in different instantiations of the template.
- binaryOperator(hasEitherOperand(
- allOf(ignoringImpCasts(canResolveToExpr(equalsNode(Exp))),
- isTypeDependent()))),
+ binaryOperator(isTypeDependent(),
+ hasEitherOperand(ignoringImpCasts(canResolveToExpr(Exp)))),
+ // A fold expression may contain `Exp` as it's initializer.
+ // We don't know if the operator modifies `Exp` because the
+ // operator is type dependent due to the parameter pack.
+ cxxFoldExpr(hasFoldInit(ignoringImpCasts(canResolveToExpr(Exp)))),
// Within class templates and member functions the member expression might
// not be resolved. In that case, the `callExpr` is considered to be a
// modification.
- callExpr(
- callee(expr(anyOf(unresolvedMemberExpr(hasObjectExpression(
- canResolveToExpr(equalsNode(Exp)))),
- cxxDependentScopeMemberExpr(hasObjectExpression(
- canResolveToExpr(equalsNode(Exp)))))))),
+ callExpr(callee(expr(anyOf(
+ unresolvedMemberExpr(hasObjectExpression(canResolveToExpr(Exp))),
+ cxxDependentScopeMemberExpr(
+ hasObjectExpression(canResolveToExpr(Exp))))))),
// Match on a call to a known method, but the call itself is type
// dependent (e.g. `vector<T> v; v.push(T{});` in a templated function).
- callExpr(allOf(isTypeDependent(),
- callee(memberExpr(hasDeclaration(NonConstMethod),
- hasObjectExpression(canResolveToExpr(
- equalsNode(Exp)))))))));
+ callExpr(allOf(
+ isTypeDependent(),
+ callee(memberExpr(hasDeclaration(NonConstMethod),
+ hasObjectExpression(canResolveToExpr(Exp))))))));
// Taking address of 'Exp'.
// We're assuming 'Exp' is mutated as soon as its address is taken, though in
@@ -296,11 +369,10 @@ const Stmt *ExprMutationAnalyzer::findDirectMutation(const Expr *Exp) {
unaryOperator(hasOperatorName("&"),
// A NoOp implicit cast is adding const.
unless(hasParent(implicitCastExpr(hasCastKind(CK_NoOp)))),
- hasUnaryOperand(canResolveToExpr(equalsNode(Exp))));
- const auto AsPointerFromArrayDecay =
- castExpr(hasCastKind(CK_ArrayToPointerDecay),
- unless(hasParent(arraySubscriptExpr())),
- has(canResolveToExpr(equalsNode(Exp))));
+ hasUnaryOperand(canResolveToExpr(Exp)));
+ const auto AsPointerFromArrayDecay = castExpr(
+ hasCastKind(CK_ArrayToPointerDecay),
+ unless(hasParent(arraySubscriptExpr())), has(canResolveToExpr(Exp)));
// Treat calling `operator->()` of move-only classes as taking address.
// These are typically smart pointers with unique ownership so we treat
// mutation of pointee as mutation of the smart pointer itself.
@@ -308,7 +380,7 @@ const Stmt *ExprMutationAnalyzer::findDirectMutation(const Expr *Exp) {
hasOverloadedOperatorName("->"),
callee(
cxxMethodDecl(ofClass(isMoveOnly()), returns(nonConstPointerType()))),
- argumentCountIs(1), hasArgument(0, canResolveToExpr(equalsNode(Exp))));
+ argumentCountIs(1), hasArgument(0, canResolveToExpr(Exp)));
// Used as non-const-ref argument when calling a function.
// An argument is assumed to be non-const-ref when the function is unresolved.
@@ -316,8 +388,8 @@ const Stmt *ExprMutationAnalyzer::findDirectMutation(const Expr *Exp) {
// findFunctionArgMutation which has additional smarts for handling forwarding
// references.
const auto NonConstRefParam = forEachArgumentWithParamType(
- anyOf(canResolveToExpr(equalsNode(Exp)),
- memberExpr(hasObjectExpression(canResolveToExpr(equalsNode(Exp))))),
+ anyOf(canResolveToExpr(Exp),
+ memberExpr(hasObjectExpression(canResolveToExpr(Exp)))),
nonConstReferenceType());
const auto NotInstantiated = unless(hasDeclaration(isInstantiated()));
const auto TypeDependentCallee =
@@ -328,19 +400,17 @@ const Stmt *ExprMutationAnalyzer::findDirectMutation(const Expr *Exp) {
const auto AsNonConstRefArg = anyOf(
callExpr(NonConstRefParam, NotInstantiated),
cxxConstructExpr(NonConstRefParam, NotInstantiated),
- callExpr(TypeDependentCallee,
- hasAnyArgument(canResolveToExpr(equalsNode(Exp)))),
- cxxUnresolvedConstructExpr(
- hasAnyArgument(canResolveToExpr(equalsNode(Exp)))),
+ callExpr(TypeDependentCallee, hasAnyArgument(canResolveToExpr(Exp))),
+ cxxUnresolvedConstructExpr(hasAnyArgument(canResolveToExpr(Exp))),
// Previous False Positive in the following Code:
// `template <typename T> void f() { int i = 42; new Type<T>(i); }`
// Where the constructor of `Type` takes its argument as reference.
// The AST does not resolve in a `cxxConstructExpr` because it is
// type-dependent.
- parenListExpr(hasDescendant(expr(canResolveToExpr(equalsNode(Exp))))),
+ parenListExpr(hasDescendant(expr(canResolveToExpr(Exp)))),
// If the initializer is for a reference type, there is no cast for
// the variable. Values are cast to RValue first.
- initListExpr(hasAnyInit(expr(canResolveToExpr(equalsNode(Exp))))));
+ initListExpr(hasAnyInit(expr(canResolveToExpr(Exp)))));
// Captured by a lambda by reference.
// If we're initializing a capture with 'Exp' directly then we're initializing
@@ -354,74 +424,72 @@ const Stmt *ExprMutationAnalyzer::findDirectMutation(const Expr *Exp) {
// For returning by const-ref there will be an ImplicitCastExpr <NoOp> (for
// adding const.)
const auto AsNonConstRefReturn =
- returnStmt(hasReturnValue(canResolveToExpr(equalsNode(Exp))));
+ returnStmt(hasReturnValue(canResolveToExpr(Exp)));
// It is used as a non-const-reference for initalizing a range-for loop.
- const auto AsNonConstRefRangeInit = cxxForRangeStmt(
- hasRangeInit(declRefExpr(allOf(canResolveToExpr(equalsNode(Exp)),
- hasType(nonConstReferenceType())))));
+ const auto AsNonConstRefRangeInit = cxxForRangeStmt(hasRangeInit(declRefExpr(
+ allOf(canResolveToExpr(Exp), hasType(nonConstReferenceType())))));
const auto Matches = match(
- traverse(TK_AsIs,
- findAll(stmt(anyOf(AsAssignmentLhs, AsIncDecOperand,
- AsNonConstThis, AsAmpersandOperand,
- AsPointerFromArrayDecay, AsOperatorArrowThis,
- AsNonConstRefArg, AsLambdaRefCaptureInit,
- AsNonConstRefReturn, AsNonConstRefRangeInit))
- .bind("stmt"))),
+ traverse(
+ TK_AsIs,
+ findFirst(stmt(anyOf(AsAssignmentLhs, AsIncDecOperand, AsNonConstThis,
+ AsAmpersandOperand, AsPointerFromArrayDecay,
+ AsOperatorArrowThis, AsNonConstRefArg,
+ AsLambdaRefCaptureInit, AsNonConstRefReturn,
+ AsNonConstRefRangeInit))
+ .bind("stmt"))),
Stm, Context);
return selectFirst<Stmt>("stmt", Matches);
}
const Stmt *ExprMutationAnalyzer::findMemberMutation(const Expr *Exp) {
// Check whether any member of 'Exp' is mutated.
- const auto MemberExprs =
- match(findAll(expr(anyOf(memberExpr(hasObjectExpression(
- canResolveToExpr(equalsNode(Exp)))),
- cxxDependentScopeMemberExpr(hasObjectExpression(
- canResolveToExpr(equalsNode(Exp))))))
- .bind(NodeID<Expr>::value)),
- Stm, Context);
+ const auto MemberExprs = match(
+ findAll(expr(anyOf(memberExpr(hasObjectExpression(canResolveToExpr(Exp))),
+ cxxDependentScopeMemberExpr(
+ hasObjectExpression(canResolveToExpr(Exp))),
+ binaryOperator(hasOperatorName(".*"),
+ hasLHS(equalsNode(Exp)))))
+ .bind(NodeID<Expr>::value)),
+ Stm, Context);
return findExprMutation(MemberExprs);
}
const Stmt *ExprMutationAnalyzer::findArrayElementMutation(const Expr *Exp) {
// Check whether any element of an array is mutated.
- const auto SubscriptExprs =
- match(findAll(arraySubscriptExpr(
- anyOf(hasBase(canResolveToExpr(equalsNode(Exp))),
- hasBase(implicitCastExpr(
- allOf(hasCastKind(CK_ArrayToPointerDecay),
- hasSourceExpression(canResolveToExpr(
- equalsNode(Exp))))))))
- .bind(NodeID<Expr>::value)),
- Stm, Context);
+ const auto SubscriptExprs = match(
+ findAll(arraySubscriptExpr(
+ anyOf(hasBase(canResolveToExpr(Exp)),
+ hasBase(implicitCastExpr(allOf(
+ hasCastKind(CK_ArrayToPointerDecay),
+ hasSourceExpression(canResolveToExpr(Exp)))))))
+ .bind(NodeID<Expr>::value)),
+ Stm, Context);
return findExprMutation(SubscriptExprs);
}
const Stmt *ExprMutationAnalyzer::findCastMutation(const Expr *Exp) {
// If the 'Exp' is explicitly casted to a non-const reference type the
// 'Exp' is considered to be modified.
- const auto ExplicitCast = match(
- findAll(
- stmt(castExpr(hasSourceExpression(canResolveToExpr(equalsNode(Exp))),
- explicitCastExpr(
- hasDestinationType(nonConstReferenceType()))))
- .bind("stmt")),
- Stm, Context);
+ const auto ExplicitCast =
+ match(findFirst(stmt(castExpr(hasSourceExpression(canResolveToExpr(Exp)),
+ explicitCastExpr(hasDestinationType(
+ nonConstReferenceType()))))
+ .bind("stmt")),
+ Stm, Context);
if (const auto *CastStmt = selectFirst<Stmt>("stmt", ExplicitCast))
return CastStmt;
// If 'Exp' is casted to any non-const reference type, check the castExpr.
const auto Casts = match(
- findAll(
- expr(castExpr(hasSourceExpression(canResolveToExpr(equalsNode(Exp))),
- anyOf(explicitCastExpr(
- hasDestinationType(nonConstReferenceType())),
- implicitCastExpr(hasImplicitDestinationType(
- nonConstReferenceType())))))
- .bind(NodeID<Expr>::value)),
+ findAll(expr(castExpr(hasSourceExpression(canResolveToExpr(Exp)),
+ anyOf(explicitCastExpr(hasDestinationType(
+ nonConstReferenceType())),
+ implicitCastExpr(hasImplicitDestinationType(
+ nonConstReferenceType())))))
+ .bind(NodeID<Expr>::value)),
Stm, Context);
if (const Stmt *S = findExprMutation(Casts))
@@ -430,7 +498,7 @@ const Stmt *ExprMutationAnalyzer::findCastMutation(const Expr *Exp) {
const auto Calls =
match(findAll(callExpr(callee(namedDecl(
hasAnyName("::std::move", "::std::forward"))),
- hasArgument(0, canResolveToExpr(equalsNode(Exp))))
+ hasArgument(0, canResolveToExpr(Exp)))
.bind("expr")),
Stm, Context);
return findExprMutation(Calls);
@@ -446,12 +514,14 @@ const Stmt *ExprMutationAnalyzer::findRangeLoopMutation(const Expr *Exp) {
const auto DeclStmtToNonRefToArray = declStmt(hasSingleDecl(varDecl(hasType(
hasUnqualifiedDesugaredType(referenceType(pointee(arrayType())))))));
const auto RefToArrayRefToElements = match(
- findAll(stmt(cxxForRangeStmt(
- hasLoopVariable(varDecl(hasType(nonConstReferenceType()))
- .bind(NodeID<Decl>::value)),
- hasRangeStmt(DeclStmtToNonRefToArray),
- hasRangeInit(canResolveToExpr(equalsNode(Exp)))))
- .bind("stmt")),
+ findFirst(stmt(cxxForRangeStmt(
+ hasLoopVariable(
+ varDecl(anyOf(hasType(nonConstReferenceType()),
+ hasType(nonConstPointerType())))
+ .bind(NodeID<Decl>::value)),
+ hasRangeStmt(DeclStmtToNonRefToArray),
+ hasRangeInit(canResolveToExpr(Exp))))
+ .bind("stmt")),
Stm, Context);
if (const auto *BadRangeInitFromArray =
@@ -475,12 +545,12 @@ const Stmt *ExprMutationAnalyzer::findRangeLoopMutation(const Expr *Exp) {
hasSingleDecl(varDecl(hasType(hasUnqualifiedDesugaredType(referenceType(
pointee(hasDeclaration(cxxRecordDecl(HasAnyNonConstIterator)))))))));
- const auto RefToContainerBadIterators =
- match(findAll(stmt(cxxForRangeStmt(allOf(
- hasRangeStmt(DeclStmtToNonConstIteratorContainer),
- hasRangeInit(canResolveToExpr(equalsNode(Exp))))))
- .bind("stmt")),
- Stm, Context);
+ const auto RefToContainerBadIterators = match(
+ findFirst(stmt(cxxForRangeStmt(allOf(
+ hasRangeStmt(DeclStmtToNonConstIteratorContainer),
+ hasRangeInit(canResolveToExpr(Exp)))))
+ .bind("stmt")),
+ Stm, Context);
if (const auto *BadIteratorsContainer =
selectFirst<Stmt>("stmt", RefToContainerBadIterators))
@@ -492,7 +562,7 @@ const Stmt *ExprMutationAnalyzer::findRangeLoopMutation(const Expr *Exp) {
match(findAll(cxxForRangeStmt(
hasLoopVariable(varDecl(hasType(nonConstReferenceType()))
.bind(NodeID<Decl>::value)),
- hasRangeInit(canResolveToExpr(equalsNode(Exp))))),
+ hasRangeInit(canResolveToExpr(Exp)))),
Stm, Context);
return findDeclMutation(LoopVars);
}
@@ -501,31 +571,29 @@ const Stmt *ExprMutationAnalyzer::findReferenceMutation(const Expr *Exp) {
// Follow non-const reference returned by `operator*()` of move-only classes.
// These are typically smart pointers with unique ownership so we treat
// mutation of pointee as mutation of the smart pointer itself.
- const auto Ref =
- match(findAll(cxxOperatorCallExpr(
- hasOverloadedOperatorName("*"),
- callee(cxxMethodDecl(ofClass(isMoveOnly()),
- returns(nonConstReferenceType()))),
- argumentCountIs(1),
- hasArgument(0, canResolveToExpr(equalsNode(Exp))))
- .bind(NodeID<Expr>::value)),
- Stm, Context);
+ const auto Ref = match(
+ findAll(cxxOperatorCallExpr(
+ hasOverloadedOperatorName("*"),
+ callee(cxxMethodDecl(ofClass(isMoveOnly()),
+ returns(nonConstReferenceType()))),
+ argumentCountIs(1), hasArgument(0, canResolveToExpr(Exp)))
+ .bind(NodeID<Expr>::value)),
+ Stm, Context);
if (const Stmt *S = findExprMutation(Ref))
return S;
// If 'Exp' is bound to a non-const reference, check all declRefExpr to that.
const auto Refs = match(
stmt(forEachDescendant(
- varDecl(
- hasType(nonConstReferenceType()),
- hasInitializer(anyOf(canResolveToExpr(equalsNode(Exp)),
- memberExpr(hasObjectExpression(
- canResolveToExpr(equalsNode(Exp)))))),
- hasParent(declStmt().bind("stmt")),
- // Don't follow the reference in range statement, we've
- // handled that separately.
- unless(hasParent(declStmt(hasParent(
- cxxForRangeStmt(hasRangeStmt(equalsBoundNode("stmt"))))))))
+ varDecl(hasType(nonConstReferenceType()),
+ hasInitializer(anyOf(
+ canResolveToExpr(Exp),
+ memberExpr(hasObjectExpression(canResolveToExpr(Exp))))),
+ hasParent(declStmt().bind("stmt")),
+ // Don't follow the reference in range statement, we've
+ // handled that separately.
+ unless(hasParent(declStmt(hasParent(cxxForRangeStmt(
+ hasRangeStmt(equalsBoundNode("stmt"))))))))
.bind(NodeID<Decl>::value))),
Stm, Context);
return findDeclMutation(Refs);
@@ -533,7 +601,7 @@ const Stmt *ExprMutationAnalyzer::findReferenceMutation(const Expr *Exp) {
const Stmt *ExprMutationAnalyzer::findFunctionArgMutation(const Expr *Exp) {
const auto NonConstRefParam = forEachArgumentWithParam(
- canResolveToExpr(equalsNode(Exp)),
+ canResolveToExpr(Exp),
parmVarDecl(hasType(nonConstReferenceType())).bind("parm"));
const auto IsInstantiated = hasDeclaration(isInstantiated());
const auto FuncDecl = hasDeclaration(functionDecl().bind("func"));
@@ -593,7 +661,7 @@ FunctionParmMutationAnalyzer::FunctionParmMutationAnalyzer(
for (const CXXCtorInitializer *Init : Ctor->inits()) {
ExprMutationAnalyzer InitAnalyzer(*Init->getInit(), Context);
for (const ParmVarDecl *Parm : Ctor->parameters()) {
- if (Results.find(Parm) != Results.end())
+ if (Results.contains(Parm))
continue;
if (const Stmt *S = InitAnalyzer.findMutation(Parm))
Results[Parm] = S;
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Arena.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Arena.cpp
new file mode 100644
index 000000000000..81137e8088e3
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Arena.cpp
@@ -0,0 +1,213 @@
+//===-- Arena.cpp ---------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/FlowSensitive/Arena.h"
+#include "clang/Analysis/FlowSensitive/Formula.h"
+#include "clang/Analysis/FlowSensitive/Value.h"
+#include "llvm/Support/Error.h"
+#include <string>
+
+namespace clang::dataflow {
+
+static std::pair<const Formula *, const Formula *>
+canonicalFormulaPair(const Formula &LHS, const Formula &RHS) {
+ auto Res = std::make_pair(&LHS, &RHS);
+ if (&RHS < &LHS) // FIXME: use a deterministic order instead
+ std::swap(Res.first, Res.second);
+ return Res;
+}
+
+template <class Key, class ComputeFunc>
+const Formula &cached(llvm::DenseMap<Key, const Formula *> &Cache, Key K,
+ ComputeFunc &&Compute) {
+ auto [It, Inserted] = Cache.try_emplace(std::forward<Key>(K));
+ if (Inserted)
+ It->second = Compute();
+ return *It->second;
+}
+
+const Formula &Arena::makeAtomRef(Atom A) {
+ return cached(AtomRefs, A, [&] {
+ return &Formula::create(Alloc, Formula::AtomRef, {},
+ static_cast<unsigned>(A));
+ });
+}
+
+const Formula &Arena::makeAnd(const Formula &LHS, const Formula &RHS) {
+ return cached(Ands, canonicalFormulaPair(LHS, RHS), [&] {
+ if (&LHS == &RHS)
+ return &LHS;
+ if (LHS.kind() == Formula::Literal)
+ return LHS.literal() ? &RHS : &LHS;
+ if (RHS.kind() == Formula::Literal)
+ return RHS.literal() ? &LHS : &RHS;
+
+ return &Formula::create(Alloc, Formula::And, {&LHS, &RHS});
+ });
+}
+
+const Formula &Arena::makeOr(const Formula &LHS, const Formula &RHS) {
+ return cached(Ors, canonicalFormulaPair(LHS, RHS), [&] {
+ if (&LHS == &RHS)
+ return &LHS;
+ if (LHS.kind() == Formula::Literal)
+ return LHS.literal() ? &LHS : &RHS;
+ if (RHS.kind() == Formula::Literal)
+ return RHS.literal() ? &RHS : &LHS;
+
+ return &Formula::create(Alloc, Formula::Or, {&LHS, &RHS});
+ });
+}
+
+const Formula &Arena::makeNot(const Formula &Val) {
+ return cached(Nots, &Val, [&] {
+ if (Val.kind() == Formula::Not)
+ return Val.operands()[0];
+ if (Val.kind() == Formula::Literal)
+ return &makeLiteral(!Val.literal());
+
+ return &Formula::create(Alloc, Formula::Not, {&Val});
+ });
+}
+
+const Formula &Arena::makeImplies(const Formula &LHS, const Formula &RHS) {
+ return cached(Implies, std::make_pair(&LHS, &RHS), [&] {
+ if (&LHS == &RHS)
+ return &makeLiteral(true);
+ if (LHS.kind() == Formula::Literal)
+ return LHS.literal() ? &RHS : &makeLiteral(true);
+ if (RHS.kind() == Formula::Literal)
+ return RHS.literal() ? &RHS : &makeNot(LHS);
+
+ return &Formula::create(Alloc, Formula::Implies, {&LHS, &RHS});
+ });
+}
+
+const Formula &Arena::makeEquals(const Formula &LHS, const Formula &RHS) {
+ return cached(Equals, canonicalFormulaPair(LHS, RHS), [&] {
+ if (&LHS == &RHS)
+ return &makeLiteral(true);
+ if (LHS.kind() == Formula::Literal)
+ return LHS.literal() ? &RHS : &makeNot(RHS);
+ if (RHS.kind() == Formula::Literal)
+ return RHS.literal() ? &LHS : &makeNot(LHS);
+
+ return &Formula::create(Alloc, Formula::Equal, {&LHS, &RHS});
+ });
+}
+
+IntegerValue &Arena::makeIntLiteral(llvm::APInt Value) {
+ auto [It, Inserted] = IntegerLiterals.try_emplace(Value, nullptr);
+
+ if (Inserted)
+ It->second = &create<IntegerValue>();
+ return *It->second;
+}
+
+BoolValue &Arena::makeBoolValue(const Formula &F) {
+ auto [It, Inserted] = FormulaValues.try_emplace(&F);
+ if (Inserted)
+ It->second = (F.kind() == Formula::AtomRef)
+ ? (BoolValue *)&create<AtomicBoolValue>(F)
+ : &create<FormulaBoolValue>(F);
+ return *It->second;
+}
+
+namespace {
+const Formula *parse(Arena &A, llvm::StringRef &In) {
+ auto EatSpaces = [&] { In = In.ltrim(' '); };
+ EatSpaces();
+
+ if (In.consume_front("!")) {
+ if (auto *Arg = parse(A, In))
+ return &A.makeNot(*Arg);
+ return nullptr;
+ }
+
+ if (In.consume_front("(")) {
+ auto *Arg1 = parse(A, In);
+ if (!Arg1)
+ return nullptr;
+
+ EatSpaces();
+ decltype(&Arena::makeOr) Op;
+ if (In.consume_front("|"))
+ Op = &Arena::makeOr;
+ else if (In.consume_front("&"))
+ Op = &Arena::makeAnd;
+ else if (In.consume_front("=>"))
+ Op = &Arena::makeImplies;
+ else if (In.consume_front("="))
+ Op = &Arena::makeEquals;
+ else
+ return nullptr;
+
+ auto *Arg2 = parse(A, In);
+ if (!Arg2)
+ return nullptr;
+
+ EatSpaces();
+ if (!In.consume_front(")"))
+ return nullptr;
+
+ return &(A.*Op)(*Arg1, *Arg2);
+ }
+
+ // For now, only support unnamed variables V0, V1 etc.
+ // FIXME: parse e.g. "X" by allocating an atom and storing a name somewhere.
+ if (In.consume_front("V")) {
+ std::underlying_type_t<Atom> At;
+ if (In.consumeInteger(10, At))
+ return nullptr;
+ return &A.makeAtomRef(static_cast<Atom>(At));
+ }
+
+ if (In.consume_front("true"))
+ return &A.makeLiteral(true);
+ if (In.consume_front("false"))
+ return &A.makeLiteral(false);
+
+ return nullptr;
+}
+
+class FormulaParseError : public llvm::ErrorInfo<FormulaParseError> {
+ std::string Formula;
+ unsigned Offset;
+
+public:
+ static char ID;
+ FormulaParseError(llvm::StringRef Formula, unsigned Offset)
+ : Formula(Formula), Offset(Offset) {}
+
+ void log(raw_ostream &OS) const override {
+ OS << "bad formula at offset " << Offset << "\n";
+ OS << Formula << "\n";
+ OS.indent(Offset) << "^";
+ }
+
+ std::error_code convertToErrorCode() const override {
+ return std::make_error_code(std::errc::invalid_argument);
+ }
+};
+
+char FormulaParseError::ID = 0;
+
+} // namespace
+
+llvm::Expected<const Formula &> Arena::parseFormula(llvm::StringRef In) {
+ llvm::StringRef Rest = In;
+ auto *Result = parse(*this, Rest);
+ if (!Result) // parse() hit something unparseable
+ return llvm::make_error<FormulaParseError>(In, In.size() - Rest.size());
+ Rest = Rest.ltrim();
+ if (!Rest.empty()) // parse didn't consume all the input
+ return llvm::make_error<FormulaParseError>(In, In.size() - Rest.size());
+ return *Result;
+}
+
+} // namespace clang::dataflow
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/ControlFlowContext.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/ControlFlowContext.cpp
new file mode 100644
index 000000000000..c9ebffe6f378
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/ControlFlowContext.cpp
@@ -0,0 +1,121 @@
+//===- ControlFlowContext.cpp ---------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a ControlFlowContext class that is used by dataflow
+// analyses that run over Control-Flow Graphs (CFGs).
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/FlowSensitive/ControlFlowContext.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/Stmt.h"
+#include "clang/Analysis/CFG.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/Support/Error.h"
+#include <utility>
+
+namespace clang {
+namespace dataflow {
+
+/// Returns a map from statements to basic blocks that contain them.
+static llvm::DenseMap<const Stmt *, const CFGBlock *>
+buildStmtToBasicBlockMap(const CFG &Cfg) {
+ llvm::DenseMap<const Stmt *, const CFGBlock *> StmtToBlock;
+ for (const CFGBlock *Block : Cfg) {
+ if (Block == nullptr)
+ continue;
+
+ for (const CFGElement &Element : *Block) {
+ auto Stmt = Element.getAs<CFGStmt>();
+ if (!Stmt)
+ continue;
+
+ StmtToBlock[Stmt->getStmt()] = Block;
+ }
+ if (const Stmt *TerminatorStmt = Block->getTerminatorStmt())
+ StmtToBlock[TerminatorStmt] = Block;
+ }
+ return StmtToBlock;
+}
+
+static llvm::BitVector findReachableBlocks(const CFG &Cfg) {
+ llvm::BitVector BlockReachable(Cfg.getNumBlockIDs(), false);
+
+ llvm::SmallVector<const CFGBlock *> BlocksToVisit;
+ BlocksToVisit.push_back(&Cfg.getEntry());
+ while (!BlocksToVisit.empty()) {
+ const CFGBlock *Block = BlocksToVisit.back();
+ BlocksToVisit.pop_back();
+
+ if (BlockReachable[Block->getBlockID()])
+ continue;
+
+ BlockReachable[Block->getBlockID()] = true;
+
+ for (const CFGBlock *Succ : Block->succs())
+ if (Succ)
+ BlocksToVisit.push_back(Succ);
+ }
+
+ return BlockReachable;
+}
+
+llvm::Expected<ControlFlowContext>
+ControlFlowContext::build(const FunctionDecl &Func) {
+ if (!Func.doesThisDeclarationHaveABody())
+ return llvm::createStringError(
+ std::make_error_code(std::errc::invalid_argument),
+ "Cannot analyze function without a body");
+
+ return build(Func, *Func.getBody(), Func.getASTContext());
+}
+
+llvm::Expected<ControlFlowContext>
+ControlFlowContext::build(const Decl &D, Stmt &S, ASTContext &C) {
+ if (D.isTemplated())
+ return llvm::createStringError(
+ std::make_error_code(std::errc::invalid_argument),
+ "Cannot analyze templated declarations");
+
+ // The shape of certain elements of the AST can vary depending on the
+ // language. We currently only support C++.
+ if (!C.getLangOpts().CPlusPlus)
+ return llvm::createStringError(
+ std::make_error_code(std::errc::invalid_argument),
+ "Can only analyze C++");
+
+ CFG::BuildOptions Options;
+ Options.PruneTriviallyFalseEdges = true;
+ Options.AddImplicitDtors = true;
+ Options.AddTemporaryDtors = true;
+ Options.AddInitializers = true;
+ Options.AddCXXDefaultInitExprInCtors = true;
+ Options.AddLifetime = true;
+
+ // Ensure that all sub-expressions in basic blocks are evaluated.
+ Options.setAllAlwaysAdd();
+
+ auto Cfg = CFG::buildCFG(&D, &S, &C, Options);
+ if (Cfg == nullptr)
+ return llvm::createStringError(
+ std::make_error_code(std::errc::invalid_argument),
+ "CFG::buildCFG failed");
+
+ llvm::DenseMap<const Stmt *, const CFGBlock *> StmtToBlock =
+ buildStmtToBasicBlockMap(*Cfg);
+
+ llvm::BitVector BlockReachable = findReachableBlocks(*Cfg);
+
+ return ControlFlowContext(D, std::move(Cfg), std::move(StmtToBlock),
+ std::move(BlockReachable));
+}
+
+} // namespace dataflow
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DataflowAnalysisContext.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DataflowAnalysisContext.cpp
new file mode 100644
index 000000000000..f4c4af022f51
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DataflowAnalysisContext.cpp
@@ -0,0 +1,413 @@
+//===-- DataflowAnalysisContext.cpp -----------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a DataflowAnalysisContext class that owns objects that
+// encompass the state of a program and stores context that is used during
+// dataflow analysis.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/FlowSensitive/DataflowAnalysisContext.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/Analysis/FlowSensitive/DebugSupport.h"
+#include "clang/Analysis/FlowSensitive/Formula.h"
+#include "clang/Analysis/FlowSensitive/Logger.h"
+#include "clang/Analysis/FlowSensitive/SimplifyConstraints.h"
+#include "clang/Analysis/FlowSensitive/Value.h"
+#include "llvm/ADT/SetOperations.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+static llvm::cl::opt<std::string> DataflowLog(
+ "dataflow-log", llvm::cl::Hidden, llvm::cl::ValueOptional,
+ llvm::cl::desc("Emit log of dataflow analysis. With no arg, writes textual "
+ "log to stderr. With an arg, writes HTML logs under the "
+ "specified directory (one per analyzed function)."));
+
+namespace clang {
+namespace dataflow {
+
+FieldSet DataflowAnalysisContext::getModeledFields(QualType Type) {
+ // During context-sensitive analysis, a struct may be allocated in one
+ // function, but its field accessed in a function lower in the stack than
+ // the allocation. Since we only collect fields used in the function where
+ // the allocation occurs, we can't apply that filter when performing
+ // context-sensitive analysis. But, this only applies to storage locations,
+ // since field access it not allowed to fail. In contrast, field *values*
+ // don't need this allowance, since the API allows for uninitialized fields.
+ if (Opts.ContextSensitiveOpts)
+ return getObjectFields(Type);
+
+ return llvm::set_intersection(getObjectFields(Type), ModeledFields);
+}
+
+void DataflowAnalysisContext::addModeledFields(const FieldSet &Fields) {
+ ModeledFields.set_union(Fields);
+}
+
+StorageLocation &DataflowAnalysisContext::createStorageLocation(QualType Type) {
+ if (!Type.isNull() && Type->isRecordType()) {
+ llvm::DenseMap<const ValueDecl *, StorageLocation *> FieldLocs;
+ for (const FieldDecl *Field : getModeledFields(Type))
+ if (Field->getType()->isReferenceType())
+ FieldLocs.insert({Field, nullptr});
+ else
+ FieldLocs.insert({Field, &createStorageLocation(
+ Field->getType().getNonReferenceType())});
+
+ RecordStorageLocation::SyntheticFieldMap SyntheticFields;
+ for (const auto &Entry : getSyntheticFields(Type))
+ SyntheticFields.insert(
+ {Entry.getKey(),
+ &createStorageLocation(Entry.getValue().getNonReferenceType())});
+
+ return createRecordStorageLocation(Type, std::move(FieldLocs),
+ std::move(SyntheticFields));
+ }
+ return arena().create<ScalarStorageLocation>(Type);
+}
+
+// Returns the keys for a given `StringMap`.
+// Can't use `StringSet` as the return type as it doesn't support `operator==`.
+template <typename T>
+static llvm::DenseSet<llvm::StringRef> getKeys(const llvm::StringMap<T> &Map) {
+ return llvm::DenseSet<llvm::StringRef>(Map.keys().begin(), Map.keys().end());
+}
+
+RecordStorageLocation &DataflowAnalysisContext::createRecordStorageLocation(
+ QualType Type, RecordStorageLocation::FieldToLoc FieldLocs,
+ RecordStorageLocation::SyntheticFieldMap SyntheticFields) {
+ assert(Type->isRecordType());
+ assert(containsSameFields(getModeledFields(Type), FieldLocs));
+ assert(getKeys(getSyntheticFields(Type)) == getKeys(SyntheticFields));
+
+ RecordStorageLocationCreated = true;
+ return arena().create<RecordStorageLocation>(Type, std::move(FieldLocs),
+ std::move(SyntheticFields));
+}
+
+StorageLocation &
+DataflowAnalysisContext::getStableStorageLocation(const ValueDecl &D) {
+ if (auto *Loc = DeclToLoc.lookup(&D))
+ return *Loc;
+ auto &Loc = createStorageLocation(D.getType().getNonReferenceType());
+ DeclToLoc[&D] = &Loc;
+ return Loc;
+}
+
+StorageLocation &
+DataflowAnalysisContext::getStableStorageLocation(const Expr &E) {
+ const Expr &CanonE = ignoreCFGOmittedNodes(E);
+
+ if (auto *Loc = ExprToLoc.lookup(&CanonE))
+ return *Loc;
+ auto &Loc = createStorageLocation(CanonE.getType());
+ ExprToLoc[&CanonE] = &Loc;
+ return Loc;
+}
+
+PointerValue &
+DataflowAnalysisContext::getOrCreateNullPointerValue(QualType PointeeType) {
+ auto CanonicalPointeeType =
+ PointeeType.isNull() ? PointeeType : PointeeType.getCanonicalType();
+ auto Res = NullPointerVals.try_emplace(CanonicalPointeeType, nullptr);
+ if (Res.second) {
+ auto &PointeeLoc = createStorageLocation(CanonicalPointeeType);
+ Res.first->second = &arena().create<PointerValue>(PointeeLoc);
+ }
+ return *Res.first->second;
+}
+
+void DataflowAnalysisContext::addInvariant(const Formula &Constraint) {
+ if (Invariant == nullptr)
+ Invariant = &Constraint;
+ else
+ Invariant = &arena().makeAnd(*Invariant, Constraint);
+}
+
+void DataflowAnalysisContext::addFlowConditionConstraint(
+ Atom Token, const Formula &Constraint) {
+ auto Res = FlowConditionConstraints.try_emplace(Token, &Constraint);
+ if (!Res.second) {
+ Res.first->second =
+ &arena().makeAnd(*Res.first->second, Constraint);
+ }
+}
+
+Atom DataflowAnalysisContext::forkFlowCondition(Atom Token) {
+ Atom ForkToken = arena().makeFlowConditionToken();
+ FlowConditionDeps[ForkToken].insert(Token);
+ addFlowConditionConstraint(ForkToken, arena().makeAtomRef(Token));
+ return ForkToken;
+}
+
+Atom
+DataflowAnalysisContext::joinFlowConditions(Atom FirstToken,
+ Atom SecondToken) {
+ Atom Token = arena().makeFlowConditionToken();
+ FlowConditionDeps[Token].insert(FirstToken);
+ FlowConditionDeps[Token].insert(SecondToken);
+ addFlowConditionConstraint(Token,
+ arena().makeOr(arena().makeAtomRef(FirstToken),
+ arena().makeAtomRef(SecondToken)));
+ return Token;
+}
+
+Solver::Result DataflowAnalysisContext::querySolver(
+ llvm::SetVector<const Formula *> Constraints) {
+ return S->solve(Constraints.getArrayRef());
+}
+
+bool DataflowAnalysisContext::flowConditionImplies(Atom Token,
+ const Formula &F) {
+ if (F.isLiteral(true))
+ return true;
+
+ // Returns true if and only if truth assignment of the flow condition implies
+ // that `F` is also true. We prove whether or not this property holds by
+ // reducing the problem to satisfiability checking. In other words, we attempt
+ // to show that assuming `F` is false makes the constraints induced by the
+ // flow condition unsatisfiable.
+ llvm::SetVector<const Formula *> Constraints;
+ Constraints.insert(&arena().makeAtomRef(Token));
+ Constraints.insert(&arena().makeNot(F));
+ addTransitiveFlowConditionConstraints(Token, Constraints);
+ return isUnsatisfiable(std::move(Constraints));
+}
+
+bool DataflowAnalysisContext::flowConditionAllows(Atom Token,
+ const Formula &F) {
+ if (F.isLiteral(false))
+ return false;
+
+ llvm::SetVector<const Formula *> Constraints;
+ Constraints.insert(&arena().makeAtomRef(Token));
+ Constraints.insert(&F);
+ addTransitiveFlowConditionConstraints(Token, Constraints);
+ return isSatisfiable(std::move(Constraints));
+}
+
+bool DataflowAnalysisContext::equivalentFormulas(const Formula &Val1,
+ const Formula &Val2) {
+ llvm::SetVector<const Formula *> Constraints;
+ Constraints.insert(&arena().makeNot(arena().makeEquals(Val1, Val2)));
+ return isUnsatisfiable(std::move(Constraints));
+}
+
+void DataflowAnalysisContext::addTransitiveFlowConditionConstraints(
+ Atom Token, llvm::SetVector<const Formula *> &Constraints) {
+ llvm::DenseSet<Atom> AddedTokens;
+ std::vector<Atom> Remaining = {Token};
+
+ if (Invariant)
+ Constraints.insert(Invariant);
+ // Define all the flow conditions that might be referenced in constraints.
+ while (!Remaining.empty()) {
+ auto Token = Remaining.back();
+ Remaining.pop_back();
+ if (!AddedTokens.insert(Token).second)
+ continue;
+
+ auto ConstraintsIt = FlowConditionConstraints.find(Token);
+ if (ConstraintsIt == FlowConditionConstraints.end()) {
+ Constraints.insert(&arena().makeAtomRef(Token));
+ } else {
+ // Bind flow condition token via `iff` to its set of constraints:
+ // FC <=> (C1 ^ C2 ^ ...), where Ci are constraints
+ Constraints.insert(&arena().makeEquals(arena().makeAtomRef(Token),
+ *ConstraintsIt->second));
+ }
+
+ if (auto DepsIt = FlowConditionDeps.find(Token);
+ DepsIt != FlowConditionDeps.end())
+ for (Atom A : DepsIt->second)
+ Remaining.push_back(A);
+ }
+}
+
+static void printAtomList(const llvm::SmallVector<Atom> &Atoms,
+ llvm::raw_ostream &OS) {
+ OS << "(";
+ for (size_t i = 0; i < Atoms.size(); ++i) {
+ OS << Atoms[i];
+ if (i + 1 < Atoms.size())
+ OS << ", ";
+ }
+ OS << ")\n";
+}
+
+void DataflowAnalysisContext::dumpFlowCondition(Atom Token,
+ llvm::raw_ostream &OS) {
+ llvm::SetVector<const Formula *> Constraints;
+ Constraints.insert(&arena().makeAtomRef(Token));
+ addTransitiveFlowConditionConstraints(Token, Constraints);
+
+ OS << "Flow condition token: " << Token << "\n";
+ SimplifyConstraintsInfo Info;
+ llvm::SetVector<const Formula *> OriginalConstraints = Constraints;
+ simplifyConstraints(Constraints, arena(), &Info);
+ if (!Constraints.empty()) {
+ OS << "Constraints:\n";
+ for (const auto *Constraint : Constraints) {
+ Constraint->print(OS);
+ OS << "\n";
+ }
+ }
+ if (!Info.TrueAtoms.empty()) {
+ OS << "True atoms: ";
+ printAtomList(Info.TrueAtoms, OS);
+ }
+ if (!Info.FalseAtoms.empty()) {
+ OS << "False atoms: ";
+ printAtomList(Info.FalseAtoms, OS);
+ }
+ if (!Info.EquivalentAtoms.empty()) {
+ OS << "Equivalent atoms:\n";
+ for (const llvm::SmallVector<Atom> &Class : Info.EquivalentAtoms)
+ printAtomList(Class, OS);
+ }
+
+ OS << "\nFlow condition constraints before simplification:\n";
+ for (const auto *Constraint : OriginalConstraints) {
+ Constraint->print(OS);
+ OS << "\n";
+ }
+}
+
+const ControlFlowContext *
+DataflowAnalysisContext::getControlFlowContext(const FunctionDecl *F) {
+ // Canonicalize the key:
+ F = F->getDefinition();
+ if (F == nullptr)
+ return nullptr;
+ auto It = FunctionContexts.find(F);
+ if (It != FunctionContexts.end())
+ return &It->second;
+
+ if (F->doesThisDeclarationHaveABody()) {
+ auto CFCtx = ControlFlowContext::build(*F);
+ // FIXME: Handle errors.
+ assert(CFCtx);
+ auto Result = FunctionContexts.insert({F, std::move(*CFCtx)});
+ return &Result.first->second;
+ }
+
+ return nullptr;
+}
+
+static std::unique_ptr<Logger> makeLoggerFromCommandLine() {
+ if (DataflowLog.empty())
+ return Logger::textual(llvm::errs());
+
+ llvm::StringRef Dir = DataflowLog;
+ if (auto EC = llvm::sys::fs::create_directories(Dir))
+ llvm::errs() << "Failed to create log dir: " << EC.message() << "\n";
+ // All analysis runs within a process will log to the same directory.
+ // Share a counter so they don't all overwrite each other's 0.html.
+ // (Don't share a logger, it's not threadsafe).
+ static std::atomic<unsigned> Counter = {0};
+ auto StreamFactory =
+ [Dir(Dir.str())]() mutable -> std::unique_ptr<llvm::raw_ostream> {
+ llvm::SmallString<256> File(Dir);
+ llvm::sys::path::append(File,
+ std::to_string(Counter.fetch_add(1)) + ".html");
+ std::error_code EC;
+ auto OS = std::make_unique<llvm::raw_fd_ostream>(File, EC);
+ if (EC) {
+ llvm::errs() << "Failed to create log " << File << ": " << EC.message()
+ << "\n";
+ return std::make_unique<llvm::raw_null_ostream>();
+ }
+ return OS;
+ };
+ return Logger::html(std::move(StreamFactory));
+}
+
+DataflowAnalysisContext::DataflowAnalysisContext(std::unique_ptr<Solver> S,
+ Options Opts)
+ : S(std::move(S)), A(std::make_unique<Arena>()), Opts(Opts) {
+ assert(this->S != nullptr);
+ // If the -dataflow-log command-line flag was set, synthesize a logger.
+ // This is ugly but provides a uniform method for ad-hoc debugging dataflow-
+ // based tools.
+ if (Opts.Log == nullptr) {
+ if (DataflowLog.getNumOccurrences() > 0) {
+ LogOwner = makeLoggerFromCommandLine();
+ this->Opts.Log = LogOwner.get();
+ // FIXME: if the flag is given a value, write an HTML log to a file.
+ } else {
+ this->Opts.Log = &Logger::null();
+ }
+ }
+}
+
+DataflowAnalysisContext::~DataflowAnalysisContext() = default;
+
+} // namespace dataflow
+} // namespace clang
+
+using namespace clang;
+
+const Expr &clang::dataflow::ignoreCFGOmittedNodes(const Expr &E) {
+ const Expr *Current = &E;
+ if (auto *EWC = dyn_cast<ExprWithCleanups>(Current)) {
+ Current = EWC->getSubExpr();
+ assert(Current != nullptr);
+ }
+ Current = Current->IgnoreParens();
+ assert(Current != nullptr);
+ return *Current;
+}
+
+const Stmt &clang::dataflow::ignoreCFGOmittedNodes(const Stmt &S) {
+ if (auto *E = dyn_cast<Expr>(&S))
+ return ignoreCFGOmittedNodes(*E);
+ return S;
+}
+
+// FIXME: Does not precisely handle non-virtual diamond inheritance. A single
+// field decl will be modeled for all instances of the inherited field.
+static void getFieldsFromClassHierarchy(QualType Type,
+ clang::dataflow::FieldSet &Fields) {
+ if (Type->isIncompleteType() || Type->isDependentType() ||
+ !Type->isRecordType())
+ return;
+
+ for (const FieldDecl *Field : Type->getAsRecordDecl()->fields())
+ Fields.insert(Field);
+ if (auto *CXXRecord = Type->getAsCXXRecordDecl())
+ for (const CXXBaseSpecifier &Base : CXXRecord->bases())
+ getFieldsFromClassHierarchy(Base.getType(), Fields);
+}
+
+/// Gets the set of all fields in the type.
+clang::dataflow::FieldSet clang::dataflow::getObjectFields(QualType Type) {
+ FieldSet Fields;
+ getFieldsFromClassHierarchy(Type, Fields);
+ return Fields;
+}
+
+bool clang::dataflow::containsSameFields(
+ const clang::dataflow::FieldSet &Fields,
+ const clang::dataflow::RecordStorageLocation::FieldToLoc &FieldLocs) {
+ if (Fields.size() != FieldLocs.size())
+ return false;
+ for ([[maybe_unused]] auto [Field, Loc] : FieldLocs)
+ if (!Fields.contains(cast_or_null<FieldDecl>(Field)))
+ return false;
+ return true;
+}
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp
new file mode 100644
index 000000000000..196a1360a775
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp
@@ -0,0 +1,1112 @@
+//===-- DataflowEnvironment.cpp ---------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines an Environment class that is used by dataflow analyses
+// that run over Control-Flow Graphs (CFGs) to keep track of the state of the
+// program at given program points.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/FlowSensitive/DataflowEnvironment.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/Type.h"
+#include "clang/Analysis/FlowSensitive/DataflowLattice.h"
+#include "clang/Analysis/FlowSensitive/Value.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+#include <utility>
+
+namespace clang {
+namespace dataflow {
+
+// FIXME: convert these to parameters of the analysis or environment. Current
+// settings have been experimentaly validated, but only for a particular
+// analysis.
+static constexpr int MaxCompositeValueDepth = 3;
+static constexpr int MaxCompositeValueSize = 1000;
+
+/// Returns a map consisting of key-value entries that are present in both maps.
+static llvm::DenseMap<const ValueDecl *, StorageLocation *> intersectDeclToLoc(
+ const llvm::DenseMap<const ValueDecl *, StorageLocation *> &DeclToLoc1,
+ const llvm::DenseMap<const ValueDecl *, StorageLocation *> &DeclToLoc2) {
+ llvm::DenseMap<const ValueDecl *, StorageLocation *> Result;
+ for (auto &Entry : DeclToLoc1) {
+ auto It = DeclToLoc2.find(Entry.first);
+ if (It != DeclToLoc2.end() && Entry.second == It->second)
+ Result.insert({Entry.first, Entry.second});
+ }
+ return Result;
+}
+
+// Whether to consider equivalent two values with an unknown relation.
+//
+// FIXME: this function is a hack enabling unsoundness to support
+// convergence. Once we have widening support for the reference/pointer and
+// struct built-in models, this should be unconditionally `false` (and inlined
+// as such at its call sites).
+static bool equateUnknownValues(Value::Kind K) {
+ switch (K) {
+ case Value::Kind::Integer:
+ case Value::Kind::Pointer:
+ case Value::Kind::Record:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool compareDistinctValues(QualType Type, Value &Val1,
+ const Environment &Env1, Value &Val2,
+ const Environment &Env2,
+ Environment::ValueModel &Model) {
+ // Note: Potentially costly, but, for booleans, we could check whether both
+ // can be proven equivalent in their respective environments.
+
+ // FIXME: move the reference/pointers logic from `areEquivalentValues` to here
+ // and implement separate, join/widen specific handling for
+ // reference/pointers.
+ switch (Model.compare(Type, Val1, Env1, Val2, Env2)) {
+ case ComparisonResult::Same:
+ return true;
+ case ComparisonResult::Different:
+ return false;
+ case ComparisonResult::Unknown:
+ return equateUnknownValues(Val1.getKind());
+ }
+ llvm_unreachable("All cases covered in switch");
+}
+
+/// Attempts to merge distinct values `Val1` and `Val2` in `Env1` and `Env2`,
+/// respectively, of the same type `Type`. Merging generally produces a single
+/// value that (soundly) approximates the two inputs, although the actual
+/// meaning depends on `Model`.
+static Value *mergeDistinctValues(QualType Type, Value &Val1,
+ const Environment &Env1, Value &Val2,
+ const Environment &Env2,
+ Environment &MergedEnv,
+ Environment::ValueModel &Model) {
+ // Join distinct boolean values preserving information about the constraints
+ // in the respective path conditions.
+ if (isa<BoolValue>(&Val1) && isa<BoolValue>(&Val2)) {
+ // FIXME: Checking both values should be unnecessary, since they should have
+ // a consistent shape. However, right now we can end up with BoolValue's in
+ // integer-typed variables due to our incorrect handling of
+ // boolean-to-integer casts (we just propagate the BoolValue to the result
+ // of the cast). So, a join can encounter an integer in one branch but a
+ // bool in the other.
+ // For example:
+ // ```
+ // std::optional<bool> o;
+ // int x;
+ // if (o.has_value())
+ // x = o.value();
+ // ```
+ auto &Expr1 = cast<BoolValue>(Val1).formula();
+ auto &Expr2 = cast<BoolValue>(Val2).formula();
+ auto &A = MergedEnv.arena();
+ auto &MergedVal = A.makeAtomRef(A.makeAtom());
+ MergedEnv.assume(
+ A.makeOr(A.makeAnd(A.makeAtomRef(Env1.getFlowConditionToken()),
+ A.makeEquals(MergedVal, Expr1)),
+ A.makeAnd(A.makeAtomRef(Env2.getFlowConditionToken()),
+ A.makeEquals(MergedVal, Expr2))));
+ return &A.makeBoolValue(MergedVal);
+ }
+
+ Value *MergedVal = nullptr;
+ if (auto *RecordVal1 = dyn_cast<RecordValue>(&Val1)) {
+ auto *RecordVal2 = cast<RecordValue>(&Val2);
+
+ if (&RecordVal1->getLoc() == &RecordVal2->getLoc())
+ // `RecordVal1` and `RecordVal2` may have different properties associated
+ // with them. Create a new `RecordValue` with the same location but
+ // without any properties so that we soundly approximate both values. If a
+ // particular analysis needs to merge properties, it should do so in
+ // `DataflowAnalysis::merge()`.
+ MergedVal = &MergedEnv.create<RecordValue>(RecordVal1->getLoc());
+ else
+ // If the locations for the two records are different, need to create a
+ // completely new value.
+ MergedVal = MergedEnv.createValue(Type);
+ } else {
+ MergedVal = MergedEnv.createValue(Type);
+ }
+
+ // FIXME: Consider destroying `MergedValue` immediately if `ValueModel::merge`
+ // returns false to avoid storing unneeded values in `DACtx`.
+ if (MergedVal)
+ if (Model.merge(Type, Val1, Env1, Val2, Env2, *MergedVal, MergedEnv))
+ return MergedVal;
+
+ return nullptr;
+}
+
+// When widening does not change `Current`, return value will equal `&Prev`.
+static Value &widenDistinctValues(QualType Type, Value &Prev,
+ const Environment &PrevEnv, Value &Current,
+ Environment &CurrentEnv,
+ Environment::ValueModel &Model) {
+ // Boolean-model widening.
+ if (auto *PrevBool = dyn_cast<BoolValue>(&Prev)) {
+ // If previous value was already Top, re-use that to (implicitly) indicate
+ // that no change occurred.
+ if (isa<TopBoolValue>(Prev))
+ return Prev;
+
+ // We may need to widen to Top, but before we do so, check whether both
+ // values are implied to be either true or false in the current environment.
+ // In that case, we can simply return a literal instead.
+ auto &CurBool = cast<BoolValue>(Current);
+ bool TruePrev = PrevEnv.proves(PrevBool->formula());
+ bool TrueCur = CurrentEnv.proves(CurBool.formula());
+ if (TruePrev && TrueCur)
+ return CurrentEnv.getBoolLiteralValue(true);
+ if (!TruePrev && !TrueCur &&
+ PrevEnv.proves(PrevEnv.arena().makeNot(PrevBool->formula())) &&
+ CurrentEnv.proves(CurrentEnv.arena().makeNot(CurBool.formula())))
+ return CurrentEnv.getBoolLiteralValue(false);
+
+ return CurrentEnv.makeTopBoolValue();
+ }
+
+ // FIXME: Add other built-in model widening.
+
+ // Custom-model widening.
+ if (auto *W = Model.widen(Type, Prev, PrevEnv, Current, CurrentEnv))
+ return *W;
+
+ return equateUnknownValues(Prev.getKind()) ? Prev : Current;
+}
+
+// Returns whether the values in `Map1` and `Map2` compare equal for those
+// keys that `Map1` and `Map2` have in common.
+template <typename Key>
+bool compareKeyToValueMaps(const llvm::MapVector<Key, Value *> &Map1,
+ const llvm::MapVector<Key, Value *> &Map2,
+ const Environment &Env1, const Environment &Env2,
+ Environment::ValueModel &Model) {
+ for (auto &Entry : Map1) {
+ Key K = Entry.first;
+ assert(K != nullptr);
+
+ Value *Val = Entry.second;
+ assert(Val != nullptr);
+
+ auto It = Map2.find(K);
+ if (It == Map2.end())
+ continue;
+ assert(It->second != nullptr);
+
+ if (!areEquivalentValues(*Val, *It->second) &&
+ !compareDistinctValues(K->getType(), *Val, Env1, *It->second, Env2,
+ Model))
+ return false;
+ }
+
+ return true;
+}
+
+// Perform a join on two `LocToVal` maps.
+static llvm::MapVector<const StorageLocation *, Value *>
+joinLocToVal(const llvm::MapVector<const StorageLocation *, Value *> &LocToVal,
+ const llvm::MapVector<const StorageLocation *, Value *> &LocToVal2,
+ const Environment &Env1, const Environment &Env2,
+ Environment &JoinedEnv, Environment::ValueModel &Model) {
+ llvm::MapVector<const StorageLocation *, Value *> Result;
+ for (auto &Entry : LocToVal) {
+ const StorageLocation *Loc = Entry.first;
+ assert(Loc != nullptr);
+
+ Value *Val = Entry.second;
+ assert(Val != nullptr);
+
+ auto It = LocToVal2.find(Loc);
+ if (It == LocToVal2.end())
+ continue;
+ assert(It->second != nullptr);
+
+ if (areEquivalentValues(*Val, *It->second)) {
+ Result.insert({Loc, Val});
+ continue;
+ }
+
+ if (Value *MergedVal = mergeDistinctValues(
+ Loc->getType(), *Val, Env1, *It->second, Env2, JoinedEnv, Model)) {
+ Result.insert({Loc, MergedVal});
+ }
+ }
+
+ return Result;
+}
+
+// Perform widening on either `LocToVal` or `ExprToVal`. `Key` must be either
+// `const StorageLocation *` or `const Expr *`.
+template <typename Key>
+llvm::MapVector<Key, Value *>
+widenKeyToValueMap(const llvm::MapVector<Key, Value *> &CurMap,
+ const llvm::MapVector<Key, Value *> &PrevMap,
+ Environment &CurEnv, const Environment &PrevEnv,
+ Environment::ValueModel &Model, LatticeJoinEffect &Effect) {
+ llvm::MapVector<Key, Value *> WidenedMap;
+ for (auto &Entry : CurMap) {
+ Key K = Entry.first;
+ assert(K != nullptr);
+
+ Value *Val = Entry.second;
+ assert(Val != nullptr);
+
+ auto PrevIt = PrevMap.find(K);
+ if (PrevIt == PrevMap.end())
+ continue;
+ assert(PrevIt->second != nullptr);
+
+ if (areEquivalentValues(*Val, *PrevIt->second)) {
+ WidenedMap.insert({K, Val});
+ continue;
+ }
+
+ Value &WidenedVal = widenDistinctValues(K->getType(), *PrevIt->second,
+ PrevEnv, *Val, CurEnv, Model);
+ WidenedMap.insert({K, &WidenedVal});
+ if (&WidenedVal != PrevIt->second)
+ Effect = LatticeJoinEffect::Changed;
+ }
+
+ return WidenedMap;
+}
+
+/// Initializes a global storage value.
+static void insertIfGlobal(const Decl &D,
+ llvm::DenseSet<const VarDecl *> &Vars) {
+ if (auto *V = dyn_cast<VarDecl>(&D))
+ if (V->hasGlobalStorage())
+ Vars.insert(V);
+}
+
+static void insertIfFunction(const Decl &D,
+ llvm::DenseSet<const FunctionDecl *> &Funcs) {
+ if (auto *FD = dyn_cast<FunctionDecl>(&D))
+ Funcs.insert(FD);
+}
+
+static MemberExpr *getMemberForAccessor(const CXXMemberCallExpr &C) {
+ // Use getCalleeDecl instead of getMethodDecl in order to handle
+ // pointer-to-member calls.
+ const auto *MethodDecl = dyn_cast_or_null<CXXMethodDecl>(C.getCalleeDecl());
+ if (!MethodDecl)
+ return nullptr;
+ auto *Body = dyn_cast_or_null<CompoundStmt>(MethodDecl->getBody());
+ if (!Body || Body->size() != 1)
+ return nullptr;
+ if (auto *RS = dyn_cast<ReturnStmt>(*Body->body_begin()))
+ if (auto *Return = RS->getRetValue())
+ return dyn_cast<MemberExpr>(Return->IgnoreParenImpCasts());
+ return nullptr;
+}
+
+static void
+getFieldsGlobalsAndFuncs(const Decl &D, FieldSet &Fields,
+ llvm::DenseSet<const VarDecl *> &Vars,
+ llvm::DenseSet<const FunctionDecl *> &Funcs) {
+ insertIfGlobal(D, Vars);
+ insertIfFunction(D, Funcs);
+ if (const auto *Decomp = dyn_cast<DecompositionDecl>(&D))
+ for (const auto *B : Decomp->bindings())
+ if (auto *ME = dyn_cast_or_null<MemberExpr>(B->getBinding()))
+ // FIXME: should we be using `E->getFoundDecl()`?
+ if (const auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl()))
+ Fields.insert(FD);
+}
+
+/// Traverses `S` and inserts into `Fields`, `Vars` and `Funcs` any fields,
+/// global variables and functions that are declared in or referenced from
+/// sub-statements.
+static void
+getFieldsGlobalsAndFuncs(const Stmt &S, FieldSet &Fields,
+ llvm::DenseSet<const VarDecl *> &Vars,
+ llvm::DenseSet<const FunctionDecl *> &Funcs) {
+ for (auto *Child : S.children())
+ if (Child != nullptr)
+ getFieldsGlobalsAndFuncs(*Child, Fields, Vars, Funcs);
+ if (const auto *DefaultInit = dyn_cast<CXXDefaultInitExpr>(&S))
+ getFieldsGlobalsAndFuncs(*DefaultInit->getExpr(), Fields, Vars, Funcs);
+
+ if (auto *DS = dyn_cast<DeclStmt>(&S)) {
+ if (DS->isSingleDecl())
+ getFieldsGlobalsAndFuncs(*DS->getSingleDecl(), Fields, Vars, Funcs);
+ else
+ for (auto *D : DS->getDeclGroup())
+ getFieldsGlobalsAndFuncs(*D, Fields, Vars, Funcs);
+ } else if (auto *E = dyn_cast<DeclRefExpr>(&S)) {
+ insertIfGlobal(*E->getDecl(), Vars);
+ insertIfFunction(*E->getDecl(), Funcs);
+ } else if (const auto *C = dyn_cast<CXXMemberCallExpr>(&S)) {
+ // If this is a method that returns a member variable but does nothing else,
+ // model the field of the return value.
+ if (MemberExpr *E = getMemberForAccessor(*C))
+ if (const auto *FD = dyn_cast<FieldDecl>(E->getMemberDecl()))
+ Fields.insert(FD);
+ } else if (auto *E = dyn_cast<MemberExpr>(&S)) {
+ // FIXME: should we be using `E->getFoundDecl()`?
+ const ValueDecl *VD = E->getMemberDecl();
+ insertIfGlobal(*VD, Vars);
+ insertIfFunction(*VD, Funcs);
+ if (const auto *FD = dyn_cast<FieldDecl>(VD))
+ Fields.insert(FD);
+ } else if (auto *InitList = dyn_cast<InitListExpr>(&S)) {
+ if (RecordDecl *RD = InitList->getType()->getAsRecordDecl())
+ for (const auto *FD : getFieldsForInitListExpr(RD))
+ Fields.insert(FD);
+ }
+}
+
+Environment::Environment(DataflowAnalysisContext &DACtx)
+ : DACtx(&DACtx),
+ FlowConditionToken(DACtx.arena().makeFlowConditionToken()) {}
+
+Environment::Environment(DataflowAnalysisContext &DACtx,
+ const DeclContext &DeclCtx)
+ : Environment(DACtx) {
+ CallStack.push_back(&DeclCtx);
+}
+
+void Environment::initialize() {
+ const DeclContext *DeclCtx = getDeclCtx();
+ if (DeclCtx == nullptr)
+ return;
+
+ if (const auto *FuncDecl = dyn_cast<FunctionDecl>(DeclCtx)) {
+ assert(FuncDecl->doesThisDeclarationHaveABody());
+
+ initFieldsGlobalsAndFuncs(FuncDecl);
+
+ for (const auto *ParamDecl : FuncDecl->parameters()) {
+ assert(ParamDecl != nullptr);
+ setStorageLocation(*ParamDecl, createObject(*ParamDecl, nullptr));
+ }
+ }
+
+ if (const auto *MethodDecl = dyn_cast<CXXMethodDecl>(DeclCtx)) {
+ auto *Parent = MethodDecl->getParent();
+ assert(Parent != nullptr);
+
+ if (Parent->isLambda()) {
+ for (auto Capture : Parent->captures()) {
+ if (Capture.capturesVariable()) {
+ const auto *VarDecl = Capture.getCapturedVar();
+ assert(VarDecl != nullptr);
+ setStorageLocation(*VarDecl, createObject(*VarDecl, nullptr));
+ } else if (Capture.capturesThis()) {
+ const auto *SurroundingMethodDecl =
+ cast<CXXMethodDecl>(DeclCtx->getNonClosureAncestor());
+ QualType ThisPointeeType =
+ SurroundingMethodDecl->getFunctionObjectParameterType();
+ setThisPointeeStorageLocation(
+ cast<RecordValue>(createValue(ThisPointeeType))->getLoc());
+ }
+ }
+ } else if (MethodDecl->isImplicitObjectMemberFunction()) {
+ QualType ThisPointeeType = MethodDecl->getFunctionObjectParameterType();
+ setThisPointeeStorageLocation(
+ cast<RecordValue>(createValue(ThisPointeeType))->getLoc());
+ }
+ }
+}
+
+// FIXME: Add support for resetting globals after function calls to enable
+// the implementation of sound analyses.
+void Environment::initFieldsGlobalsAndFuncs(const FunctionDecl *FuncDecl) {
+ assert(FuncDecl->doesThisDeclarationHaveABody());
+
+ FieldSet Fields;
+ llvm::DenseSet<const VarDecl *> Vars;
+ llvm::DenseSet<const FunctionDecl *> Funcs;
+
+ // Look for global variable and field references in the
+ // constructor-initializers.
+ if (const auto *CtorDecl = dyn_cast<CXXConstructorDecl>(FuncDecl)) {
+ for (const auto *Init : CtorDecl->inits()) {
+ if (Init->isMemberInitializer()) {
+ Fields.insert(Init->getMember());
+ } else if (Init->isIndirectMemberInitializer()) {
+ for (const auto *I : Init->getIndirectMember()->chain())
+ Fields.insert(cast<FieldDecl>(I));
+ }
+ const Expr *E = Init->getInit();
+ assert(E != nullptr);
+ getFieldsGlobalsAndFuncs(*E, Fields, Vars, Funcs);
+ }
+ // Add all fields mentioned in default member initializers.
+ for (const FieldDecl *F : CtorDecl->getParent()->fields())
+ if (const auto *I = F->getInClassInitializer())
+ getFieldsGlobalsAndFuncs(*I, Fields, Vars, Funcs);
+ }
+ getFieldsGlobalsAndFuncs(*FuncDecl->getBody(), Fields, Vars, Funcs);
+
+ // These have to be added before the lines that follow to ensure that
+ // `create*` work correctly for structs.
+ DACtx->addModeledFields(Fields);
+
+ for (const VarDecl *D : Vars) {
+ if (getStorageLocation(*D) != nullptr)
+ continue;
+
+ setStorageLocation(*D, createObject(*D));
+ }
+
+ for (const FunctionDecl *FD : Funcs) {
+ if (getStorageLocation(*FD) != nullptr)
+ continue;
+ auto &Loc = createStorageLocation(FD->getType());
+ setStorageLocation(*FD, Loc);
+ }
+}
+
+Environment Environment::fork() const {
+ Environment Copy(*this);
+ Copy.FlowConditionToken = DACtx->forkFlowCondition(FlowConditionToken);
+ return Copy;
+}
+
+bool Environment::canDescend(unsigned MaxDepth,
+ const DeclContext *Callee) const {
+ return CallStack.size() <= MaxDepth && !llvm::is_contained(CallStack, Callee);
+}
+
+Environment Environment::pushCall(const CallExpr *Call) const {
+ Environment Env(*this);
+
+ if (const auto *MethodCall = dyn_cast<CXXMemberCallExpr>(Call)) {
+ if (const Expr *Arg = MethodCall->getImplicitObjectArgument()) {
+ if (!isa<CXXThisExpr>(Arg))
+ Env.ThisPointeeLoc =
+ cast<RecordStorageLocation>(getStorageLocation(*Arg));
+ // Otherwise (when the argument is `this`), retain the current
+ // environment's `ThisPointeeLoc`.
+ }
+ }
+
+ Env.pushCallInternal(Call->getDirectCallee(),
+ llvm::ArrayRef(Call->getArgs(), Call->getNumArgs()));
+
+ return Env;
+}
+
+Environment Environment::pushCall(const CXXConstructExpr *Call) const {
+ Environment Env(*this);
+
+ Env.ThisPointeeLoc = &Env.getResultObjectLocation(*Call);
+
+ Env.pushCallInternal(Call->getConstructor(),
+ llvm::ArrayRef(Call->getArgs(), Call->getNumArgs()));
+
+ return Env;
+}
+
+void Environment::pushCallInternal(const FunctionDecl *FuncDecl,
+ ArrayRef<const Expr *> Args) {
+ // Canonicalize to the definition of the function. This ensures that we're
+ // putting arguments into the same `ParamVarDecl`s` that the callee will later
+ // be retrieving them from.
+ assert(FuncDecl->getDefinition() != nullptr);
+ FuncDecl = FuncDecl->getDefinition();
+
+ CallStack.push_back(FuncDecl);
+
+ initFieldsGlobalsAndFuncs(FuncDecl);
+
+ const auto *ParamIt = FuncDecl->param_begin();
+
+ // FIXME: Parameters don't always map to arguments 1:1; examples include
+ // overloaded operators implemented as member functions, and parameter packs.
+ for (unsigned ArgIndex = 0; ArgIndex < Args.size(); ++ParamIt, ++ArgIndex) {
+ assert(ParamIt != FuncDecl->param_end());
+ const VarDecl *Param = *ParamIt;
+ setStorageLocation(*Param, createObject(*Param, Args[ArgIndex]));
+ }
+}
+
+void Environment::popCall(const CallExpr *Call, const Environment &CalleeEnv) {
+ // We ignore some entries of `CalleeEnv`:
+ // - `DACtx` because is already the same in both
+ // - We don't want the callee's `DeclCtx`, `ReturnVal`, `ReturnLoc` or
+ // `ThisPointeeLoc` because they don't apply to us.
+ // - `DeclToLoc`, `ExprToLoc`, and `ExprToVal` capture information from the
+ // callee's local scope, so when popping that scope, we do not propagate
+ // the maps.
+ this->LocToVal = std::move(CalleeEnv.LocToVal);
+ this->FlowConditionToken = std::move(CalleeEnv.FlowConditionToken);
+
+ if (Call->isGLValue()) {
+ if (CalleeEnv.ReturnLoc != nullptr)
+ setStorageLocation(*Call, *CalleeEnv.ReturnLoc);
+ } else if (!Call->getType()->isVoidType()) {
+ if (CalleeEnv.ReturnVal != nullptr)
+ setValue(*Call, *CalleeEnv.ReturnVal);
+ }
+}
+
+void Environment::popCall(const CXXConstructExpr *Call,
+ const Environment &CalleeEnv) {
+ // See also comment in `popCall(const CallExpr *, const Environment &)` above.
+ this->LocToVal = std::move(CalleeEnv.LocToVal);
+ this->FlowConditionToken = std::move(CalleeEnv.FlowConditionToken);
+
+ if (Value *Val = CalleeEnv.getValue(*CalleeEnv.ThisPointeeLoc)) {
+ setValue(*Call, *Val);
+ }
+}
+
+bool Environment::equivalentTo(const Environment &Other,
+ Environment::ValueModel &Model) const {
+ assert(DACtx == Other.DACtx);
+
+ if (ReturnVal != Other.ReturnVal)
+ return false;
+
+ if (ReturnLoc != Other.ReturnLoc)
+ return false;
+
+ if (ThisPointeeLoc != Other.ThisPointeeLoc)
+ return false;
+
+ if (DeclToLoc != Other.DeclToLoc)
+ return false;
+
+ if (ExprToLoc != Other.ExprToLoc)
+ return false;
+
+ if (!compareKeyToValueMaps(ExprToVal, Other.ExprToVal, *this, Other, Model))
+ return false;
+
+ if (!compareKeyToValueMaps(LocToVal, Other.LocToVal, *this, Other, Model))
+ return false;
+
+ return true;
+}
+
+LatticeJoinEffect Environment::widen(const Environment &PrevEnv,
+ Environment::ValueModel &Model) {
+ assert(DACtx == PrevEnv.DACtx);
+ assert(ReturnVal == PrevEnv.ReturnVal);
+ assert(ReturnLoc == PrevEnv.ReturnLoc);
+ assert(ThisPointeeLoc == PrevEnv.ThisPointeeLoc);
+ assert(CallStack == PrevEnv.CallStack);
+
+ auto Effect = LatticeJoinEffect::Unchanged;
+
+ // By the API, `PrevEnv` is a previous version of the environment for the same
+ // block, so we have some guarantees about its shape. In particular, it will
+ // be the result of a join or widen operation on previous values for this
+ // block. For `DeclToLoc`, `ExprToVal`, and `ExprToLoc`, join guarantees that
+ // these maps are subsets of the maps in `PrevEnv`. So, as long as we maintain
+ // this property here, we don't need change their current values to widen.
+ assert(DeclToLoc.size() <= PrevEnv.DeclToLoc.size());
+ assert(ExprToVal.size() <= PrevEnv.ExprToVal.size());
+ assert(ExprToLoc.size() <= PrevEnv.ExprToLoc.size());
+
+ ExprToVal = widenKeyToValueMap(ExprToVal, PrevEnv.ExprToVal, *this, PrevEnv,
+ Model, Effect);
+
+ LocToVal = widenKeyToValueMap(LocToVal, PrevEnv.LocToVal, *this, PrevEnv,
+ Model, Effect);
+ if (DeclToLoc.size() != PrevEnv.DeclToLoc.size() ||
+ ExprToLoc.size() != PrevEnv.ExprToLoc.size() ||
+ ExprToVal.size() != PrevEnv.ExprToVal.size() ||
+ LocToVal.size() != PrevEnv.LocToVal.size())
+ Effect = LatticeJoinEffect::Changed;
+
+ return Effect;
+}
+
+Environment Environment::join(const Environment &EnvA, const Environment &EnvB,
+ Environment::ValueModel &Model) {
+ assert(EnvA.DACtx == EnvB.DACtx);
+ assert(EnvA.ThisPointeeLoc == EnvB.ThisPointeeLoc);
+ assert(EnvA.CallStack == EnvB.CallStack);
+
+ Environment JoinedEnv(*EnvA.DACtx);
+
+ JoinedEnv.CallStack = EnvA.CallStack;
+ JoinedEnv.ThisPointeeLoc = EnvA.ThisPointeeLoc;
+
+ if (EnvA.ReturnVal == nullptr || EnvB.ReturnVal == nullptr) {
+ // `ReturnVal` might not always get set -- for example if we have a return
+ // statement of the form `return some_other_func()` and we decide not to
+ // analyze `some_other_func()`.
+ // In this case, we can't say anything about the joined return value -- we
+ // don't simply want to propagate the return value that we do have, because
+ // it might not be the correct one.
+ // This occurs for example in the test `ContextSensitiveMutualRecursion`.
+ JoinedEnv.ReturnVal = nullptr;
+ } else if (areEquivalentValues(*EnvA.ReturnVal, *EnvB.ReturnVal)) {
+ JoinedEnv.ReturnVal = EnvA.ReturnVal;
+ } else {
+ assert(!EnvA.CallStack.empty());
+ // FIXME: Make `CallStack` a vector of `FunctionDecl` so we don't need this
+ // cast.
+ auto *Func = dyn_cast<FunctionDecl>(EnvA.CallStack.back());
+ assert(Func != nullptr);
+ if (Value *MergedVal =
+ mergeDistinctValues(Func->getReturnType(), *EnvA.ReturnVal, EnvA,
+ *EnvB.ReturnVal, EnvB, JoinedEnv, Model))
+ JoinedEnv.ReturnVal = MergedVal;
+ }
+
+ if (EnvA.ReturnLoc == EnvB.ReturnLoc)
+ JoinedEnv.ReturnLoc = EnvA.ReturnLoc;
+ else
+ JoinedEnv.ReturnLoc = nullptr;
+
+ JoinedEnv.DeclToLoc = intersectDeclToLoc(EnvA.DeclToLoc, EnvB.DeclToLoc);
+
+ // FIXME: update join to detect backedges and simplify the flow condition
+ // accordingly.
+ JoinedEnv.FlowConditionToken = EnvA.DACtx->joinFlowConditions(
+ EnvA.FlowConditionToken, EnvB.FlowConditionToken);
+
+ JoinedEnv.LocToVal =
+ joinLocToVal(EnvA.LocToVal, EnvB.LocToVal, EnvA, EnvB, JoinedEnv, Model);
+
+ // We intentionally leave `JoinedEnv.ExprToLoc` and `JoinedEnv.ExprToVal`
+ // empty, as we never need to access entries in these maps outside of the
+ // basic block that sets them.
+
+ return JoinedEnv;
+}
+
+StorageLocation &Environment::createStorageLocation(QualType Type) {
+ return DACtx->createStorageLocation(Type);
+}
+
+StorageLocation &Environment::createStorageLocation(const ValueDecl &D) {
+ // Evaluated declarations are always assigned the same storage locations to
+ // ensure that the environment stabilizes across loop iterations. Storage
+ // locations for evaluated declarations are stored in the analysis context.
+ return DACtx->getStableStorageLocation(D);
+}
+
+StorageLocation &Environment::createStorageLocation(const Expr &E) {
+ // Evaluated expressions are always assigned the same storage locations to
+ // ensure that the environment stabilizes across loop iterations. Storage
+ // locations for evaluated expressions are stored in the analysis context.
+ return DACtx->getStableStorageLocation(E);
+}
+
+void Environment::setStorageLocation(const ValueDecl &D, StorageLocation &Loc) {
+ assert(!DeclToLoc.contains(&D));
+ DeclToLoc[&D] = &Loc;
+}
+
+StorageLocation *Environment::getStorageLocation(const ValueDecl &D) const {
+ auto It = DeclToLoc.find(&D);
+ if (It == DeclToLoc.end())
+ return nullptr;
+
+ StorageLocation *Loc = It->second;
+
+ return Loc;
+}
+
+void Environment::removeDecl(const ValueDecl &D) { DeclToLoc.erase(&D); }
+
+void Environment::setStorageLocation(const Expr &E, StorageLocation &Loc) {
+ // `DeclRefExpr`s to builtin function types aren't glvalues, for some reason,
+ // but we still want to be able to associate a `StorageLocation` with them,
+ // so allow these as an exception.
+ assert(E.isGLValue() ||
+ E.getType()->isSpecificBuiltinType(BuiltinType::BuiltinFn));
+ const Expr &CanonE = ignoreCFGOmittedNodes(E);
+ assert(!ExprToLoc.contains(&CanonE));
+ ExprToLoc[&CanonE] = &Loc;
+}
+
+StorageLocation *Environment::getStorageLocation(const Expr &E) const {
+ // See comment in `setStorageLocation()`.
+ assert(E.isGLValue() ||
+ E.getType()->isSpecificBuiltinType(BuiltinType::BuiltinFn));
+ auto It = ExprToLoc.find(&ignoreCFGOmittedNodes(E));
+ return It == ExprToLoc.end() ? nullptr : &*It->second;
+}
+
+// Returns whether a prvalue of record type is the one that originally
+// constructs the object (i.e. it doesn't propagate it from one of its
+// children).
+static bool isOriginalRecordConstructor(const Expr &RecordPRValue) {
+ if (auto *Init = dyn_cast<InitListExpr>(&RecordPRValue))
+ return !Init->isSemanticForm() || !Init->isTransparent();
+ return isa<CXXConstructExpr>(RecordPRValue) || isa<CallExpr>(RecordPRValue) ||
+ isa<LambdaExpr>(RecordPRValue) ||
+ isa<CXXDefaultInitExpr>(RecordPRValue) ||
+ // The framework currently does not propagate the objects created in
+ // the two branches of a `ConditionalOperator` because there is no way
+ // to reconcile their storage locations, which are different. We
+ // therefore claim that the `ConditionalOperator` is the expression
+ // that originally constructs the object.
+ // Ultimately, this will be fixed by propagating locations down from
+ // the result object, rather than up from the original constructor as
+ // we do now (see also the FIXME in the documentation for
+ // `getResultObjectLocation()`).
+ isa<ConditionalOperator>(RecordPRValue);
+}
+
+RecordStorageLocation &
+Environment::getResultObjectLocation(const Expr &RecordPRValue) const {
+ assert(RecordPRValue.getType()->isRecordType());
+ assert(RecordPRValue.isPRValue());
+
+ // Returns a storage location that we can use if assertions fail.
+ auto FallbackForAssertFailure =
+ [this, &RecordPRValue]() -> RecordStorageLocation & {
+ return cast<RecordStorageLocation>(
+ DACtx->getStableStorageLocation(RecordPRValue));
+ };
+
+ if (isOriginalRecordConstructor(RecordPRValue)) {
+ auto *Val = cast_or_null<RecordValue>(getValue(RecordPRValue));
+ // The builtin transfer function should have created a `RecordValue` for all
+ // original record constructors.
+ assert(Val);
+ if (!Val)
+ return FallbackForAssertFailure();
+ return Val->getLoc();
+ }
+
+ if (auto *Op = dyn_cast<BinaryOperator>(&RecordPRValue);
+ Op && Op->isCommaOp()) {
+ return getResultObjectLocation(*Op->getRHS());
+ }
+
+ // All other expression nodes that propagate a record prvalue should have
+ // exactly one child.
+ llvm::SmallVector<const Stmt *> children(RecordPRValue.child_begin(),
+ RecordPRValue.child_end());
+ assert(children.size() == 1);
+ if (children.empty())
+ return FallbackForAssertFailure();
+
+ return getResultObjectLocation(*cast<Expr>(children[0]));
+}
+
+PointerValue &Environment::getOrCreateNullPointerValue(QualType PointeeType) {
+ return DACtx->getOrCreateNullPointerValue(PointeeType);
+}
+
+void Environment::setValue(const StorageLocation &Loc, Value &Val) {
+ assert(!isa<RecordValue>(&Val) || &cast<RecordValue>(&Val)->getLoc() == &Loc);
+
+ LocToVal[&Loc] = &Val;
+}
+
+void Environment::setValue(const Expr &E, Value &Val) {
+ const Expr &CanonE = ignoreCFGOmittedNodes(E);
+
+ if (auto *RecordVal = dyn_cast<RecordValue>(&Val)) {
+ assert(isOriginalRecordConstructor(CanonE) ||
+ &RecordVal->getLoc() == &getResultObjectLocation(CanonE));
+ }
+
+ assert(CanonE.isPRValue());
+ ExprToVal[&CanonE] = &Val;
+}
+
+Value *Environment::getValue(const StorageLocation &Loc) const {
+ return LocToVal.lookup(&Loc);
+}
+
+Value *Environment::getValue(const ValueDecl &D) const {
+ auto *Loc = getStorageLocation(D);
+ if (Loc == nullptr)
+ return nullptr;
+ return getValue(*Loc);
+}
+
+Value *Environment::getValue(const Expr &E) const {
+ if (E.isPRValue()) {
+ auto It = ExprToVal.find(&ignoreCFGOmittedNodes(E));
+ return It == ExprToVal.end() ? nullptr : It->second;
+ }
+
+ auto It = ExprToLoc.find(&ignoreCFGOmittedNodes(E));
+ if (It == ExprToLoc.end())
+ return nullptr;
+ return getValue(*It->second);
+}
+
+Value *Environment::createValue(QualType Type) {
+ llvm::DenseSet<QualType> Visited;
+ int CreatedValuesCount = 0;
+ Value *Val = createValueUnlessSelfReferential(Type, Visited, /*Depth=*/0,
+ CreatedValuesCount);
+ if (CreatedValuesCount > MaxCompositeValueSize) {
+ llvm::errs() << "Attempting to initialize a huge value of type: " << Type
+ << '\n';
+ }
+ return Val;
+}
+
+Value *Environment::createValueUnlessSelfReferential(
+ QualType Type, llvm::DenseSet<QualType> &Visited, int Depth,
+ int &CreatedValuesCount) {
+ assert(!Type.isNull());
+ assert(!Type->isReferenceType());
+
+ // Allow unlimited fields at depth 1; only cap at deeper nesting levels.
+ if ((Depth > 1 && CreatedValuesCount > MaxCompositeValueSize) ||
+ Depth > MaxCompositeValueDepth)
+ return nullptr;
+
+ if (Type->isBooleanType()) {
+ CreatedValuesCount++;
+ return &makeAtomicBoolValue();
+ }
+
+ if (Type->isIntegerType()) {
+ // FIXME: consider instead `return nullptr`, given that we do nothing useful
+ // with integers, and so distinguishing them serves no purpose, but could
+ // prevent convergence.
+ CreatedValuesCount++;
+ return &arena().create<IntegerValue>();
+ }
+
+ if (Type->isPointerType()) {
+ CreatedValuesCount++;
+ QualType PointeeType = Type->getPointeeType();
+ StorageLocation &PointeeLoc =
+ createLocAndMaybeValue(PointeeType, Visited, Depth, CreatedValuesCount);
+
+ return &arena().create<PointerValue>(PointeeLoc);
+ }
+
+ if (Type->isRecordType()) {
+ CreatedValuesCount++;
+ llvm::DenseMap<const ValueDecl *, StorageLocation *> FieldLocs;
+ for (const FieldDecl *Field : DACtx->getModeledFields(Type)) {
+ assert(Field != nullptr);
+
+ QualType FieldType = Field->getType();
+
+ FieldLocs.insert(
+ {Field, &createLocAndMaybeValue(FieldType, Visited, Depth + 1,
+ CreatedValuesCount)});
+ }
+
+ RecordStorageLocation::SyntheticFieldMap SyntheticFieldLocs;
+ for (const auto &Entry : DACtx->getSyntheticFields(Type)) {
+ SyntheticFieldLocs.insert(
+ {Entry.getKey(),
+ &createLocAndMaybeValue(Entry.getValue(), Visited, Depth + 1,
+ CreatedValuesCount)});
+ }
+
+ RecordStorageLocation &Loc = DACtx->createRecordStorageLocation(
+ Type, std::move(FieldLocs), std::move(SyntheticFieldLocs));
+ RecordValue &RecordVal = create<RecordValue>(Loc);
+
+ // As we already have a storage location for the `RecordValue`, we can and
+ // should associate them in the environment.
+ setValue(Loc, RecordVal);
+
+ return &RecordVal;
+ }
+
+ return nullptr;
+}
+
+StorageLocation &
+Environment::createLocAndMaybeValue(QualType Ty,
+ llvm::DenseSet<QualType> &Visited,
+ int Depth, int &CreatedValuesCount) {
+ if (!Visited.insert(Ty.getCanonicalType()).second)
+ return createStorageLocation(Ty.getNonReferenceType());
+ Value *Val = createValueUnlessSelfReferential(
+ Ty.getNonReferenceType(), Visited, Depth, CreatedValuesCount);
+ Visited.erase(Ty.getCanonicalType());
+
+ Ty = Ty.getNonReferenceType();
+
+ if (Val == nullptr)
+ return createStorageLocation(Ty);
+
+ if (Ty->isRecordType())
+ return cast<RecordValue>(Val)->getLoc();
+
+ StorageLocation &Loc = createStorageLocation(Ty);
+ setValue(Loc, *Val);
+ return Loc;
+}
+
+StorageLocation &Environment::createObjectInternal(const ValueDecl *D,
+ QualType Ty,
+ const Expr *InitExpr) {
+ if (Ty->isReferenceType()) {
+ // Although variables of reference type always need to be initialized, it
+ // can happen that we can't see the initializer, so `InitExpr` may still
+ // be null.
+ if (InitExpr) {
+ if (auto *InitExprLoc = getStorageLocation(*InitExpr))
+ return *InitExprLoc;
+ }
+
+ // Even though we have an initializer, we might not get an
+ // InitExprLoc, for example if the InitExpr is a CallExpr for which we
+ // don't have a function body. In this case, we just invent a storage
+ // location and value -- it's the best we can do.
+ return createObjectInternal(D, Ty.getNonReferenceType(), nullptr);
+ }
+
+ Value *Val = nullptr;
+ if (InitExpr)
+ // In the (few) cases where an expression is intentionally
+ // "uninterpreted", `InitExpr` is not associated with a value. There are
+ // two ways to handle this situation: propagate the status, so that
+ // uninterpreted initializers result in uninterpreted variables, or
+ // provide a default value. We choose the latter so that later refinements
+ // of the variable can be used for reasoning about the surrounding code.
+ // For this reason, we let this case be handled by the `createValue()`
+ // call below.
+ //
+ // FIXME. If and when we interpret all language cases, change this to
+ // assert that `InitExpr` is interpreted, rather than supplying a
+ // default value (assuming we don't update the environment API to return
+ // references).
+ Val = getValue(*InitExpr);
+ if (!Val)
+ Val = createValue(Ty);
+
+ if (Ty->isRecordType())
+ return cast<RecordValue>(Val)->getLoc();
+
+ StorageLocation &Loc =
+ D ? createStorageLocation(*D) : createStorageLocation(Ty);
+
+ if (Val)
+ setValue(Loc, *Val);
+
+ return Loc;
+}
+
+void Environment::assume(const Formula &F) {
+ DACtx->addFlowConditionConstraint(FlowConditionToken, F);
+}
+
+bool Environment::proves(const Formula &F) const {
+ return DACtx->flowConditionImplies(FlowConditionToken, F);
+}
+
+bool Environment::allows(const Formula &F) const {
+ return DACtx->flowConditionAllows(FlowConditionToken, F);
+}
+
+void Environment::dump(raw_ostream &OS) const {
+ // FIXME: add printing for remaining fields and allow caller to decide what
+ // fields are printed.
+ OS << "DeclToLoc:\n";
+ for (auto [D, L] : DeclToLoc)
+ OS << " [" << D->getNameAsString() << ", " << L << "]\n";
+
+ OS << "ExprToLoc:\n";
+ for (auto [E, L] : ExprToLoc)
+ OS << " [" << E << ", " << L << "]\n";
+
+ OS << "ExprToVal:\n";
+ for (auto [E, V] : ExprToVal)
+ OS << " [" << E << ", " << V << ": " << *V << "]\n";
+
+ OS << "LocToVal:\n";
+ for (auto [L, V] : LocToVal) {
+ OS << " [" << L << ", " << V << ": " << *V << "]\n";
+ }
+
+ OS << "\n";
+ DACtx->dumpFlowCondition(FlowConditionToken, OS);
+}
+
+void Environment::dump() const {
+ dump(llvm::dbgs());
+}
+
+RecordStorageLocation *getImplicitObjectLocation(const CXXMemberCallExpr &MCE,
+ const Environment &Env) {
+ Expr *ImplicitObject = MCE.getImplicitObjectArgument();
+ if (ImplicitObject == nullptr)
+ return nullptr;
+ if (ImplicitObject->getType()->isPointerType()) {
+ if (auto *Val = Env.get<PointerValue>(*ImplicitObject))
+ return &cast<RecordStorageLocation>(Val->getPointeeLoc());
+ return nullptr;
+ }
+ return cast_or_null<RecordStorageLocation>(
+ Env.getStorageLocation(*ImplicitObject));
+}
+
+RecordStorageLocation *getBaseObjectLocation(const MemberExpr &ME,
+ const Environment &Env) {
+ Expr *Base = ME.getBase();
+ if (Base == nullptr)
+ return nullptr;
+ if (ME.isArrow()) {
+ if (auto *Val = Env.get<PointerValue>(*Base))
+ return &cast<RecordStorageLocation>(Val->getPointeeLoc());
+ return nullptr;
+ }
+ return Env.get<RecordStorageLocation>(*Base);
+}
+
+std::vector<FieldDecl *> getFieldsForInitListExpr(const RecordDecl *RD) {
+ // Unnamed bitfields are only used for padding and do not appear in
+ // `InitListExpr`'s inits. However, those fields do appear in `RecordDecl`'s
+ // field list, and we thus need to remove them before mapping inits to
+ // fields to avoid mapping inits to the wrongs fields.
+ std::vector<FieldDecl *> Fields;
+ llvm::copy_if(
+ RD->fields(), std::back_inserter(Fields),
+ [](const FieldDecl *Field) { return !Field->isUnnamedBitfield(); });
+ return Fields;
+}
+
+RecordValue &refreshRecordValue(RecordStorageLocation &Loc, Environment &Env) {
+ auto &NewVal = Env.create<RecordValue>(Loc);
+ Env.setValue(Loc, NewVal);
+ return NewVal;
+}
+
+RecordValue &refreshRecordValue(const Expr &Expr, Environment &Env) {
+ assert(Expr.getType()->isRecordType());
+
+ if (Expr.isPRValue()) {
+ if (auto *ExistingVal = Env.get<RecordValue>(Expr)) {
+ auto &NewVal = Env.create<RecordValue>(ExistingVal->getLoc());
+ Env.setValue(Expr, NewVal);
+ Env.setValue(NewVal.getLoc(), NewVal);
+ return NewVal;
+ }
+
+ auto &NewVal = *cast<RecordValue>(Env.createValue(Expr.getType()));
+ Env.setValue(Expr, NewVal);
+ return NewVal;
+ }
+
+ if (auto *Loc = Env.get<RecordStorageLocation>(Expr)) {
+ auto &NewVal = Env.create<RecordValue>(*Loc);
+ Env.setValue(*Loc, NewVal);
+ return NewVal;
+ }
+
+ auto &NewVal = *cast<RecordValue>(Env.createValue(Expr.getType()));
+ Env.setStorageLocation(Expr, NewVal.getLoc());
+ return NewVal;
+}
+
+} // namespace dataflow
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DebugSupport.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DebugSupport.cpp
new file mode 100644
index 000000000000..573c4b1d474b
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DebugSupport.cpp
@@ -0,0 +1,79 @@
+//===- DebugSupport.cpp -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines functions which generate more readable forms of data
+// structures used in the dataflow analyses, for debugging purposes.
+//
+//===----------------------------------------------------------------------===//
+
+#include <utility>
+
+#include "clang/Analysis/FlowSensitive/DebugSupport.h"
+#include "clang/Analysis/FlowSensitive/Solver.h"
+#include "clang/Analysis/FlowSensitive/Value.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/ErrorHandling.h"
+
+namespace clang {
+namespace dataflow {
+
+llvm::StringRef debugString(Value::Kind Kind) {
+ switch (Kind) {
+ case Value::Kind::Integer:
+ return "Integer";
+ case Value::Kind::Pointer:
+ return "Pointer";
+ case Value::Kind::Record:
+ return "Record";
+ case Value::Kind::AtomicBool:
+ return "AtomicBool";
+ case Value::Kind::TopBool:
+ return "TopBool";
+ case Value::Kind::FormulaBool:
+ return "FormulaBool";
+ }
+ llvm_unreachable("Unhandled value kind");
+}
+
+llvm::raw_ostream &operator<<(llvm::raw_ostream &OS,
+ Solver::Result::Assignment Assignment) {
+ switch (Assignment) {
+ case Solver::Result::Assignment::AssignedFalse:
+ return OS << "False";
+ case Solver::Result::Assignment::AssignedTrue:
+ return OS << "True";
+ }
+ llvm_unreachable("Booleans can only be assigned true/false");
+}
+
+llvm::StringRef debugString(Solver::Result::Status Status) {
+ switch (Status) {
+ case Solver::Result::Status::Satisfiable:
+ return "Satisfiable";
+ case Solver::Result::Status::Unsatisfiable:
+ return "Unsatisfiable";
+ case Solver::Result::Status::TimedOut:
+ return "TimedOut";
+ }
+ llvm_unreachable("Unhandled SAT check result status");
+}
+
+llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const Solver::Result &R) {
+ OS << debugString(R.getStatus()) << "\n";
+ if (auto Solution = R.getSolution()) {
+ std::vector<std::pair<Atom, Solver::Result::Assignment>> Sorted = {
+ Solution->begin(), Solution->end()};
+ llvm::sort(Sorted);
+ for (const auto &Entry : Sorted)
+ OS << Entry.first << " = " << Entry.second << "\n";
+ }
+ return OS;
+}
+
+} // namespace dataflow
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Formula.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Formula.cpp
new file mode 100644
index 000000000000..ef7d23ff6c56
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Formula.cpp
@@ -0,0 +1,94 @@
+//===- Formula.cpp ----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/FlowSensitive/Formula.h"
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+#include <type_traits>
+
+namespace clang::dataflow {
+
+const Formula &Formula::create(llvm::BumpPtrAllocator &Alloc, Kind K,
+ ArrayRef<const Formula *> Operands,
+ unsigned Value) {
+ assert(Operands.size() == numOperands(K));
+ if (Value != 0) // Currently, formulas have values or operands, not both.
+ assert(numOperands(K) == 0);
+ void *Mem = Alloc.Allocate(sizeof(Formula) +
+ Operands.size() * sizeof(Operands.front()),
+ alignof(Formula));
+ Formula *Result = new (Mem) Formula();
+ Result->FormulaKind = K;
+ Result->Value = Value;
+ // Operands are stored as `const Formula *`s after the formula itself.
+ // We don't need to construct an object as pointers are trivial types.
+ // Formula is alignas(const Formula *), so alignment is satisfied.
+ llvm::copy(Operands, reinterpret_cast<const Formula **>(Result + 1));
+ return *Result;
+}
+
+static llvm::StringLiteral sigil(Formula::Kind K) {
+ switch (K) {
+ case Formula::AtomRef:
+ case Formula::Literal:
+ return "";
+ case Formula::Not:
+ return "!";
+ case Formula::And:
+ return " & ";
+ case Formula::Or:
+ return " | ";
+ case Formula::Implies:
+ return " => ";
+ case Formula::Equal:
+ return " = ";
+ }
+ llvm_unreachable("unhandled formula kind");
+}
+
+void Formula::print(llvm::raw_ostream &OS, const AtomNames *Names) const {
+ if (Names && kind() == AtomRef)
+ if (auto It = Names->find(getAtom()); It != Names->end()) {
+ OS << It->second;
+ return;
+ }
+
+ switch (numOperands(kind())) {
+ case 0:
+ switch (kind()) {
+ case AtomRef:
+ OS << getAtom();
+ break;
+ case Literal:
+ OS << (literal() ? "true" : "false");
+ break;
+ default:
+ llvm_unreachable("unhandled formula kind");
+ }
+ break;
+ case 1:
+ OS << sigil(kind());
+ operands()[0]->print(OS, Names);
+ break;
+ case 2:
+ OS << '(';
+ operands()[0]->print(OS, Names);
+ OS << sigil(kind());
+ operands()[1]->print(OS, Names);
+ OS << ')';
+ break;
+ default:
+ llvm_unreachable("unhandled formula arity");
+ }
+}
+
+} // namespace clang::dataflow \ No newline at end of file
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/HTMLLogger.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/HTMLLogger.cpp
new file mode 100644
index 000000000000..2a7bfce53501
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/HTMLLogger.cpp
@@ -0,0 +1,565 @@
+//===-- HTMLLogger.cpp ----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the HTML logger. Given a directory dir/, we write
+// dir/0.html for the first analysis, etc.
+// These files contain a visualization that allows inspecting the CFG and the
+// state of the analysis at each point.
+// Static assets (HTMLLogger.js, HTMLLogger.css) and SVG graphs etc are embedded
+// so each output file is self-contained.
+//
+// VIEWS
+//
+// The timeline and function view are always shown. These allow selecting basic
+// blocks, statements within them, and processing iterations (BBs are visited
+// multiple times when e.g. loops are involved).
+// These are written directly into the HTML body.
+//
+// There are also listings of particular basic blocks, and dumps of the state
+// at particular analysis points (i.e. BB2 iteration 3 statement 2).
+// These are only shown when the relevant BB/analysis point is *selected*.
+//
+// DATA AND TEMPLATES
+//
+// The HTML proper is mostly static.
+// The analysis data is in a JSON object HTMLLoggerData which is embedded as
+// a <script> in the <head>.
+// This gets rendered into DOM by a simple template processor which substitutes
+// the data into <template> tags embedded in the HTML. (see inflate() in JS).
+//
+// SELECTION
+//
+// This is the only real interactive mechanism.
+//
+// At any given time, there are several named selections, e.g.:
+// bb: B2 (basic block 0 is selected)
+// elt: B2.4 (statement 4 is selected)
+// iter: B2:1 (iteration 1 of the basic block is selected)
+// hover: B3 (hovering over basic block 3)
+//
+// The selection is updated by mouse events: hover by moving the mouse and
+// others by clicking. Elements that are click targets generally have attributes
+// (id or data-foo) that define what they should select.
+// See watchSelection() in JS for the exact logic.
+//
+// When the "bb" selection is set to "B2":
+// - sections <section data-selection="bb"> get shown
+// - templates under such sections get re-rendered
+// - elements with class/id "B2" get class "bb-select"
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/FlowSensitive/ControlFlowContext.h"
+#include "clang/Analysis/FlowSensitive/DebugSupport.h"
+#include "clang/Analysis/FlowSensitive/Logger.h"
+#include "clang/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.h"
+#include "clang/Analysis/FlowSensitive/Value.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Lex/Lexer.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/ScopeExit.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/FormatVariadic.h"
+#include "llvm/Support/JSON.h"
+#include "llvm/Support/Program.h"
+#include "llvm/Support/ScopedPrinter.h"
+#include "llvm/Support/raw_ostream.h"
+// Defines assets: HTMLLogger_{html_js,css}
+#include "HTMLLogger.inc"
+
+namespace clang::dataflow {
+namespace {
+
+// Render a graphviz graph specification to SVG using the `dot` tool.
+llvm::Expected<std::string> renderSVG(llvm::StringRef DotGraph);
+
+using StreamFactory = std::function<std::unique_ptr<llvm::raw_ostream>()>;
+
+// Recursively dumps Values/StorageLocations as JSON
+class ModelDumper {
+public:
+ ModelDumper(llvm::json::OStream &JOS, const Environment &Env)
+ : JOS(JOS), Env(Env) {}
+
+ void dump(Value &V) {
+ JOS.attribute("value_id", llvm::to_string(&V));
+ if (!Visited.insert(&V).second)
+ return;
+
+ JOS.attribute("kind", debugString(V.getKind()));
+
+ switch (V.getKind()) {
+ case Value::Kind::Integer:
+ case Value::Kind::Record:
+ case Value::Kind::TopBool:
+ case Value::Kind::AtomicBool:
+ case Value::Kind::FormulaBool:
+ break;
+ case Value::Kind::Pointer:
+ JOS.attributeObject(
+ "pointee", [&] { dump(cast<PointerValue>(V).getPointeeLoc()); });
+ break;
+ }
+
+ for (const auto& Prop : V.properties())
+ JOS.attributeObject(("p:" + Prop.first()).str(),
+ [&] { dump(*Prop.second); });
+
+ // Running the SAT solver is expensive, but knowing which booleans are
+ // guaranteed true/false here is valuable and hard to determine by hand.
+ if (auto *B = llvm::dyn_cast<BoolValue>(&V)) {
+ JOS.attribute("formula", llvm::to_string(B->formula()));
+ JOS.attribute("truth", Env.proves(B->formula()) ? "true"
+ : Env.proves(Env.arena().makeNot(B->formula()))
+ ? "false"
+ : "unknown");
+ }
+ }
+ void dump(const StorageLocation &L) {
+ JOS.attribute("location", llvm::to_string(&L));
+ if (!Visited.insert(&L).second)
+ return;
+
+ JOS.attribute("type", L.getType().getAsString());
+ if (auto *V = Env.getValue(L))
+ dump(*V);
+
+ if (auto *RLoc = dyn_cast<RecordStorageLocation>(&L)) {
+ for (const auto &Child : RLoc->children())
+ JOS.attributeObject("f:" + Child.first->getNameAsString(), [&] {
+ if (Child.second)
+ if (Value *Val = Env.getValue(*Child.second))
+ dump(*Val);
+ });
+
+ for (const auto &SyntheticField : RLoc->synthetic_fields())
+ JOS.attributeObject(("sf:" + SyntheticField.first()).str(),
+ [&] { dump(*SyntheticField.second); });
+ }
+ }
+
+ llvm::DenseSet<const void*> Visited;
+ llvm::json::OStream &JOS;
+ const Environment &Env;
+};
+
+class HTMLLogger : public Logger {
+ struct Iteration {
+ const CFGBlock *Block;
+ unsigned Iter;
+ bool PostVisit;
+ bool Converged;
+ };
+
+ StreamFactory Streams;
+ std::unique_ptr<llvm::raw_ostream> OS;
+ std::optional<llvm::json::OStream> JOS;
+
+ const ControlFlowContext *CFG;
+ // Timeline of iterations of CFG block visitation.
+ std::vector<Iteration> Iters;
+ // Indexes in `Iters` of the iterations for each block.
+ llvm::DenseMap<const CFGBlock *, llvm::SmallVector<size_t>> BlockIters;
+ // The messages logged in the current context but not yet written.
+ std::string ContextLogs;
+ // The number of elements we have visited within the current CFG block.
+ unsigned ElementIndex;
+
+public:
+ explicit HTMLLogger(StreamFactory Streams) : Streams(std::move(Streams)) {}
+ void beginAnalysis(const ControlFlowContext &CFG,
+ TypeErasedDataflowAnalysis &A) override {
+ OS = Streams();
+ this->CFG = &CFG;
+ *OS << llvm::StringRef(HTMLLogger_html).split("<?INJECT?>").first;
+
+ const auto &D = CFG.getDecl();
+ const auto &SM = A.getASTContext().getSourceManager();
+ *OS << "<title>";
+ if (const auto *ND = dyn_cast<NamedDecl>(&D))
+ *OS << ND->getNameAsString() << " at ";
+ *OS << SM.getFilename(D.getLocation()) << ":"
+ << SM.getSpellingLineNumber(D.getLocation());
+ *OS << "</title>\n";
+
+ *OS << "<style>" << HTMLLogger_css << "</style>\n";
+ *OS << "<script>" << HTMLLogger_js << "</script>\n";
+
+ writeCode();
+ writeCFG();
+
+ *OS << "<script>var HTMLLoggerData = \n";
+ JOS.emplace(*OS, /*Indent=*/2);
+ JOS->objectBegin();
+ JOS->attributeBegin("states");
+ JOS->objectBegin();
+ }
+ // Between beginAnalysis() and endAnalysis() we write all the states for
+ // particular analysis points into the `timeline` array.
+ void endAnalysis() override {
+ JOS->objectEnd();
+ JOS->attributeEnd();
+
+ JOS->attributeArray("timeline", [&] {
+ for (const auto &E : Iters) {
+ JOS->object([&] {
+ JOS->attribute("block", blockID(E.Block->getBlockID()));
+ JOS->attribute("iter", E.Iter);
+ JOS->attribute("post_visit", E.PostVisit);
+ JOS->attribute("converged", E.Converged);
+ });
+ }
+ });
+ JOS->attributeObject("cfg", [&] {
+ for (const auto &E : BlockIters)
+ writeBlock(*E.first, E.second);
+ });
+
+ JOS->objectEnd();
+ JOS.reset();
+ *OS << ";\n</script>\n";
+ *OS << llvm::StringRef(HTMLLogger_html).split("<?INJECT?>").second;
+ }
+
+ void enterBlock(const CFGBlock &B, bool PostVisit) override {
+ llvm::SmallVector<size_t> &BIter = BlockIters[&B];
+ unsigned IterNum = BIter.size() + 1;
+ BIter.push_back(Iters.size());
+ Iters.push_back({&B, IterNum, PostVisit, /*Converged=*/false});
+ ElementIndex = 0;
+ }
+ void enterElement(const CFGElement &E) override {
+ ++ElementIndex;
+ }
+
+ static std::string blockID(unsigned Block) {
+ return llvm::formatv("B{0}", Block);
+ }
+ static std::string eltID(unsigned Block, unsigned Element) {
+ return llvm::formatv("B{0}.{1}", Block, Element);
+ }
+ static std::string iterID(unsigned Block, unsigned Iter) {
+ return llvm::formatv("B{0}:{1}", Block, Iter);
+ }
+ static std::string elementIterID(unsigned Block, unsigned Iter,
+ unsigned Element) {
+ return llvm::formatv("B{0}:{1}_B{0}.{2}", Block, Iter, Element);
+ }
+
+ // Write the analysis state associated with a particular analysis point.
+ // FIXME: this dump is fairly opaque. We should show:
+ // - values associated with the current Stmt
+ // - values associated with its children
+ // - meaningful names for values
+ // - which boolean values are implied true/false by the flow condition
+ void recordState(TypeErasedDataflowAnalysisState &State) override {
+ unsigned Block = Iters.back().Block->getBlockID();
+ unsigned Iter = Iters.back().Iter;
+ bool PostVisit = Iters.back().PostVisit;
+ JOS->attributeObject(elementIterID(Block, Iter, ElementIndex), [&] {
+ JOS->attribute("block", blockID(Block));
+ JOS->attribute("iter", Iter);
+ JOS->attribute("post_visit", PostVisit);
+ JOS->attribute("element", ElementIndex);
+
+ // If this state immediately follows an Expr, show its built-in model.
+ if (ElementIndex > 0) {
+ auto S =
+ Iters.back().Block->Elements[ElementIndex - 1].getAs<CFGStmt>();
+ if (const Expr *E = S ? llvm::dyn_cast<Expr>(S->getStmt()) : nullptr) {
+ if (E->isPRValue()) {
+ if (auto *V = State.Env.getValue(*E))
+ JOS->attributeObject(
+ "value", [&] { ModelDumper(*JOS, State.Env).dump(*V); });
+ } else {
+ if (auto *Loc = State.Env.getStorageLocation(*E))
+ JOS->attributeObject(
+ "value", [&] { ModelDumper(*JOS, State.Env).dump(*Loc); });
+ }
+ }
+ }
+ if (!ContextLogs.empty()) {
+ JOS->attribute("logs", ContextLogs);
+ ContextLogs.clear();
+ }
+ {
+ std::string BuiltinLattice;
+ llvm::raw_string_ostream BuiltinLatticeS(BuiltinLattice);
+ State.Env.dump(BuiltinLatticeS);
+ JOS->attribute("builtinLattice", BuiltinLattice);
+ }
+ });
+ }
+ void blockConverged() override { Iters.back().Converged = true; }
+
+ void logText(llvm::StringRef S) override {
+ ContextLogs.append(S.begin(), S.end());
+ ContextLogs.push_back('\n');
+ }
+
+private:
+ // Write the CFG block details.
+ // Currently this is just the list of elements in execution order.
+ // FIXME: an AST dump would be a useful view, too.
+ void writeBlock(const CFGBlock &B, llvm::ArrayRef<size_t> ItersForB) {
+ JOS->attributeObject(blockID(B.getBlockID()), [&] {
+ JOS->attributeArray("iters", [&] {
+ for (size_t IterIdx : ItersForB) {
+ const Iteration &Iter = Iters[IterIdx];
+ JOS->object([&] {
+ JOS->attribute("iter", Iter.Iter);
+ JOS->attribute("post_visit", Iter.PostVisit);
+ JOS->attribute("converged", Iter.Converged);
+ });
+ }
+ });
+ JOS->attributeArray("elements", [&] {
+ for (const auto &Elt : B.Elements) {
+ std::string Dump;
+ llvm::raw_string_ostream DumpS(Dump);
+ Elt.dumpToStream(DumpS);
+ JOS->value(Dump);
+ }
+ });
+ });
+ }
+
+ // Write the code of function being examined.
+ // We want to overlay the code with <span>s that mark which BB particular
+ // tokens are associated with, and even which BB element (so that clicking
+ // can select the right element).
+ void writeCode() {
+ const auto &AST = CFG->getDecl().getASTContext();
+ bool Invalid = false;
+
+ // Extract the source code from the original file.
+ // Pretty-printing from the AST would probably be nicer (no macros or
+ // indentation to worry about), but we need the boundaries of particular
+ // AST nodes and the printer doesn't provide this.
+ auto Range = clang::Lexer::makeFileCharRange(
+ CharSourceRange::getTokenRange(CFG->getDecl().getSourceRange()),
+ AST.getSourceManager(), AST.getLangOpts());
+ if (Range.isInvalid())
+ return;
+ llvm::StringRef Code = clang::Lexer::getSourceText(
+ Range, AST.getSourceManager(), AST.getLangOpts(), &Invalid);
+ if (Invalid)
+ return;
+
+ // TokenInfo stores the BB and set of elements that a token is part of.
+ struct TokenInfo {
+ enum : unsigned { Missing = static_cast<unsigned>(-1) };
+
+ // The basic block this is part of.
+ // This is the BB of the stmt with the smallest containing range.
+ unsigned BB = Missing;
+ unsigned BBPriority = 0;
+ // The most specific stmt this is part of (smallest range).
+ unsigned Elt = Missing;
+ unsigned EltPriority = 0;
+ // All stmts this is part of.
+ SmallVector<unsigned> Elts;
+
+ // Mark this token as being part of BB.Elt.
+ // RangeLen is the character length of the element's range, used to
+ // distinguish inner vs outer statements.
+ // For example in `a==0`, token "a" is part of the stmts "a" and "a==0".
+ // However "a" has a smaller range, so is more specific. Clicking on the
+ // token "a" should select the stmt "a".
+ void assign(unsigned BB, unsigned Elt, unsigned RangeLen) {
+ // A worse BB (larger range) => ignore.
+ if (this->BB != Missing && BB != this->BB && BBPriority <= RangeLen)
+ return;
+ if (BB != this->BB) {
+ this->BB = BB;
+ Elts.clear();
+ BBPriority = RangeLen;
+ }
+ BBPriority = std::min(BBPriority, RangeLen);
+ Elts.push_back(Elt);
+ if (this->Elt == Missing || EltPriority > RangeLen)
+ this->Elt = Elt;
+ }
+ bool operator==(const TokenInfo &Other) const {
+ return std::tie(BB, Elt, Elts) ==
+ std::tie(Other.BB, Other.Elt, Other.Elts);
+ }
+ // Write the attributes for the <span> on this token.
+ void write(llvm::raw_ostream &OS) const {
+ OS << "class='c";
+ if (BB != Missing)
+ OS << " " << blockID(BB);
+ for (unsigned Elt : Elts)
+ OS << " " << eltID(BB, Elt);
+ OS << "'";
+
+ if (Elt != Missing)
+ OS << " data-elt='" << eltID(BB, Elt) << "'";
+ if (BB != Missing)
+ OS << " data-bb='" << blockID(BB) << "'";
+ }
+ };
+
+ // Construct one TokenInfo per character in a flat array.
+ // This is inefficient (chars in a token all have the same info) but simple.
+ std::vector<TokenInfo> State(Code.size());
+ for (const auto *Block : CFG->getCFG()) {
+ unsigned EltIndex = 0;
+ for (const auto& Elt : *Block) {
+ ++EltIndex;
+ if (const auto S = Elt.getAs<CFGStmt>()) {
+ auto EltRange = clang::Lexer::makeFileCharRange(
+ CharSourceRange::getTokenRange(S->getStmt()->getSourceRange()),
+ AST.getSourceManager(), AST.getLangOpts());
+ if (EltRange.isInvalid())
+ continue;
+ if (EltRange.getBegin() < Range.getBegin() ||
+ EltRange.getEnd() >= Range.getEnd() ||
+ EltRange.getEnd() < Range.getBegin() ||
+ EltRange.getEnd() >= Range.getEnd())
+ continue;
+ unsigned Off = EltRange.getBegin().getRawEncoding() -
+ Range.getBegin().getRawEncoding();
+ unsigned Len = EltRange.getEnd().getRawEncoding() -
+ EltRange.getBegin().getRawEncoding();
+ for (unsigned I = 0; I < Len; ++I)
+ State[Off + I].assign(Block->getBlockID(), EltIndex, Len);
+ }
+ }
+ }
+
+ // Finally, write the code with the correct <span>s.
+ unsigned Line =
+ AST.getSourceManager().getSpellingLineNumber(Range.getBegin());
+ *OS << "<template data-copy='code'>\n";
+ *OS << "<code class='filename'>";
+ llvm::printHTMLEscaped(
+ llvm::sys::path::filename(
+ AST.getSourceManager().getFilename(Range.getBegin())),
+ *OS);
+ *OS << "</code>";
+ *OS << "<code class='line' data-line='" << Line++ << "'>";
+ for (unsigned I = 0; I < Code.size(); ++I) {
+ // Don't actually write a <span> around each character, only break spans
+ // when the TokenInfo changes.
+ bool NeedOpen = I == 0 || !(State[I] == State[I-1]);
+ bool NeedClose = I + 1 == Code.size() || !(State[I] == State[I + 1]);
+ if (NeedOpen) {
+ *OS << "<span ";
+ State[I].write(*OS);
+ *OS << ">";
+ }
+ if (Code[I] == '\n')
+ *OS << "</code>\n<code class='line' data-line='" << Line++ << "'>";
+ else
+ llvm::printHTMLEscaped(Code.substr(I, 1), *OS);
+ if (NeedClose) *OS << "</span>";
+ }
+ *OS << "</code>\n";
+ *OS << "</template>";
+ }
+
+ // Write the CFG diagram, a graph of basic blocks.
+ // Laying out graphs is hard, so we construct a graphviz description and shell
+ // out to `dot` to turn it into an SVG.
+ void writeCFG() {
+ *OS << "<template data-copy='cfg'>\n";
+ if (auto SVG = renderSVG(buildCFGDot(CFG->getCFG())))
+ *OS << *SVG;
+ else
+ *OS << "Can't draw CFG: " << toString(SVG.takeError());
+ *OS << "</template>\n";
+ }
+
+ // Produce a graphviz description of a CFG.
+ static std::string buildCFGDot(const clang::CFG &CFG) {
+ std::string Graph;
+ llvm::raw_string_ostream GraphS(Graph);
+ // Graphviz likes to add unhelpful tooltips everywhere, " " suppresses.
+ GraphS << R"(digraph {
+ tooltip=" "
+ node[class=bb, shape=square, fontname="sans-serif", tooltip=" "]
+ edge[tooltip = " "]
+)";
+ for (unsigned I = 0; I < CFG.getNumBlockIDs(); ++I)
+ GraphS << " " << blockID(I) << " [id=" << blockID(I) << "]\n";
+ for (const auto *Block : CFG) {
+ for (const auto &Succ : Block->succs()) {
+ if (Succ.getReachableBlock())
+ GraphS << " " << blockID(Block->getBlockID()) << " -> "
+ << blockID(Succ.getReachableBlock()->getBlockID()) << "\n";
+ }
+ }
+ GraphS << "}\n";
+ return Graph;
+ }
+};
+
+// Nothing interesting here, just subprocess/temp-file plumbing.
+llvm::Expected<std::string> renderSVG(llvm::StringRef DotGraph) {
+ std::string DotPath;
+ if (const auto *FromEnv = ::getenv("GRAPHVIZ_DOT"))
+ DotPath = FromEnv;
+ else {
+ auto FromPath = llvm::sys::findProgramByName("dot");
+ if (!FromPath)
+ return llvm::createStringError(FromPath.getError(),
+ "'dot' not found on PATH");
+ DotPath = FromPath.get();
+ }
+
+ // Create input and output files for `dot` subprocess.
+ // (We create the output file as empty, to reserve the temp filename).
+ llvm::SmallString<256> Input, Output;
+ int InputFD;
+ if (auto EC = llvm::sys::fs::createTemporaryFile("analysis", ".dot", InputFD,
+ Input))
+ return llvm::createStringError(EC, "failed to create `dot` temp input");
+ llvm::raw_fd_ostream(InputFD, /*shouldClose=*/true) << DotGraph;
+ auto DeleteInput =
+ llvm::make_scope_exit([&] { llvm::sys::fs::remove(Input); });
+ if (auto EC = llvm::sys::fs::createTemporaryFile("analysis", ".svg", Output))
+ return llvm::createStringError(EC, "failed to create `dot` temp output");
+ auto DeleteOutput =
+ llvm::make_scope_exit([&] { llvm::sys::fs::remove(Output); });
+
+ std::vector<std::optional<llvm::StringRef>> Redirects = {
+ Input, Output,
+ /*stderr=*/std::nullopt};
+ std::string ErrMsg;
+ int Code = llvm::sys::ExecuteAndWait(
+ DotPath, {"dot", "-Tsvg"}, /*Env=*/std::nullopt, Redirects,
+ /*SecondsToWait=*/0, /*MemoryLimit=*/0, &ErrMsg);
+ if (!ErrMsg.empty())
+ return llvm::createStringError(llvm::inconvertibleErrorCode(),
+ "'dot' failed: " + ErrMsg);
+ if (Code != 0)
+ return llvm::createStringError(llvm::inconvertibleErrorCode(),
+ "'dot' failed (" + llvm::Twine(Code) + ")");
+
+ auto Buf = llvm::MemoryBuffer::getFile(Output);
+ if (!Buf)
+ return llvm::createStringError(Buf.getError(), "Can't read `dot` output");
+
+ // Output has <?xml> prefix we don't want. Skip to <svg> tag.
+ llvm::StringRef Result = Buf.get()->getBuffer();
+ auto Pos = Result.find("<svg");
+ if (Pos == llvm::StringRef::npos)
+ return llvm::createStringError(llvm::inconvertibleErrorCode(),
+ "Can't find <svg> tag in `dot` output");
+ return Result.substr(Pos).str();
+}
+
+} // namespace
+
+std::unique_ptr<Logger>
+Logger::html(std::function<std::unique_ptr<llvm::raw_ostream>()> Streams) {
+ return std::make_unique<HTMLLogger>(std::move(Streams));
+}
+
+} // namespace clang::dataflow
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/HTMLLogger.css b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/HTMLLogger.css
new file mode 100644
index 000000000000..5da8db8fa87b
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/HTMLLogger.css
@@ -0,0 +1,159 @@
+/*===-- HTMLLogger.css ----------------------------------------------------===
+*
+* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+* See https://llvm.org/LICENSE.txt for license information.
+* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+*
+*===----------------------------------------------------------------------===*/
+html { font-family: sans-serif; }
+body { margin: 0; display: flex; justify-content: left; }
+body > * { box-sizing: border-box; }
+body > section {
+ border: 1px solid black;
+ min-width: 20em;
+ overflow: auto;
+ max-height: 100vh;
+}
+section header {
+ background-color: #008;
+ color: white;
+ font-weight: bold;
+ font-size: large;
+ padding-right: 0.5em;
+}
+section h2 {
+ font-size: medium;
+ margin-bottom: 0.5em;
+ padding-top: 0.5em;
+ border-top: 1px solid #aaa;
+}
+#timeline {
+ min-width: max-content;
+}
+#timeline .entry.hover {
+ background-color: #aaa;
+}
+#timeline .entry.iter-select {
+ background-color: #aac;
+}
+
+#bb-elements {
+ font-family: monospace;
+ font-size: x-small;
+ border-collapse: collapse;
+}
+#bb-elements td:nth-child(1) {
+ text-align: right;
+ width: 4em;
+ border-right: 1px solid #008;
+ padding: 0.3em 0.5em;
+
+ font-weight: bold;
+ color: #888;
+};
+#bb-elements tr.hover {
+ background-color: #abc;
+}
+#bb-elements tr.elt-select {
+ background-color: #acf;
+}
+#iterations {
+ display: flex;
+}
+#iterations .chooser {
+ flex-grow: 1;
+ text-align: center;
+ padding-left: 0.2em;
+}
+#iterations .chooser :last-child {
+ padding-right: 0.2em;
+}
+#iterations .chooser:not(.iter-select).hover {
+ background-color: #ddd;
+}
+#iterations .iter-select {
+ font-weight: bold;
+}
+#iterations .chooser:not(.iter-select) {
+ text-decoration: underline;
+ color: blue;
+ cursor: pointer;
+ background-color: #ccc;
+}
+
+code.filename {
+ font-weight: bold;
+ color: black;
+ background-color: #ccc;
+ display: block;
+ text-align: center;
+}
+code.line {
+ display: block;
+ white-space: pre;
+}
+code.line:before { /* line numbers */
+ content: attr(data-line);
+ display: inline-block;
+ width: 2em;
+ text-align: right;
+ padding-right: 2px;
+ background-color: #ccc;
+ border-right: 1px solid #888;
+ margin-right: 8px;
+}
+code.line:has(.bb-select):before {
+ border-right: 4px solid black;
+ margin-right: 5px;
+}
+.c.hover, .bb.hover {
+ filter: saturate(200%) brightness(90%);
+}
+.c.elt-select {
+ box-shadow: inset 0 -4px 2px -2px #a00;
+}
+.bb.bb-select polygon {
+ stroke-width: 4px;
+ filter: brightness(70%) saturate(150%);
+}
+.bb { user-select: none; }
+.bb polygon { fill: white; }
+#cfg {
+ position: relative;
+ margin-left: 0.5em;
+}
+
+.value {
+ border: 1px solid #888;
+ font-size: x-small;
+ flex-grow: 1;
+}
+.value > summary {
+ background-color: #ace;
+ display: flex;
+ cursor: pointer;
+}
+.value > summary::before {
+ content: '\25ba'; /* Black Right-Pointing Pointer */
+ margin-right: 0.5em;
+ font-size: 0.9em;
+}
+.value[open] > summary::before {
+ content: '\25bc'; /* Black Down-Pointing Triangle */
+}
+.value > summary > .location {
+ margin-left: auto;
+}
+.value .address {
+ font-size: xx-small;
+ font-family: monospace;
+ color: #888;
+}
+.value .property {
+ display: flex;
+ margin-top: 0.5em;
+}
+.value .property .key {
+ font-weight: bold;
+ min-width: 5em;
+}
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/HTMLLogger.html b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/HTMLLogger.html
new file mode 100644
index 000000000000..b9f76c5074c7
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/HTMLLogger.html
@@ -0,0 +1,118 @@
+<!doctype html>
+<html>
+<!-- HTMLLogger.cpp ----------------------------------------------------
+
+ Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ See https://llvm.org/LICENSE.txt for license information.
+ SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+//===------------------------------------------------------------------------>
+
+<head>
+<?INJECT?>
+
+<template id="value-template">
+ <details class="value" open>
+ <summary>
+ <span>{{v.kind}}
+ <template data-if="v.value_id"><span class="address">#{{v.value_id}}</span></template>
+ </span>
+ <template data-if="v.location">
+ <span class="location">{{v.type}} <span class="address">@{{v.location}}</span></span>
+ </template>
+ </summary>
+ <template
+ data-for="kv in Object.entries(v)"
+ data-if="['kind', 'value_id', 'type', 'location'].indexOf(kv[0]) < 0">
+ <div class="property"><span class="key">{{kv[0]}}</span>
+ <template data-if="typeof(kv[1]) != 'object'">{{kv[1]}}</template>
+ <template data-if="typeof(kv[1]) == 'object'" data-let="v = kv[1]">
+ <template data-use="value-template"></template>
+ </template>
+ </div>
+ </template>
+ </details>
+</template>
+
+</head>
+
+<body>
+
+<section id="timeline" data-selection="">
+<header>Timeline</header>
+<template data-for="entry in timeline">
+ <div id="{{entry.block}}:{{entry.iter}}" data-bb="{{entry.block}}" class="entry">
+ {{entry.block}}
+ <template data-if="entry.post_visit">(post-visit)</template>
+ <template data-if="!entry.post_visit">({{entry.iter}})</template>
+ <template data-if="entry.converged"> &#x2192;&#x7c;<!--Rightwards arrow, vertical line--></template>
+ </div>
+</template>
+</section>
+
+<section id="function" data-selection="">
+<header>Function</header>
+<div id="code"></div>
+<div id="cfg"></div>
+</section>
+
+<section id="block" data-selection="bb">
+<header><template>Block {{selection.bb}}</template></header>
+<div id="iterations">
+ <template data-for="iter in cfg[selection.bb].iters">
+ <a class="chooser {{selection.bb}}:{{iter.iter}}" data-iter="{{selection.bb}}:{{iter.iter}}">
+ <template data-if="iter.post_visit">Post-visit</template>
+ <template data-if="!iter.post_visit">{{iter.iter}}</template>
+ <template data-if="iter.converged"> &#x2192;&#x7c;<!--Rightwards arrow, vertical line--></template>
+ </a>
+ </template>
+</div>
+<table id="bb-elements">
+<template>
+ <tr id="{{selection.bb}}.0">
+ <td class="{{selection.bb}}">{{selection.bb}}.0</td>
+ <td>(initial state)</td>
+ </tr>
+</template>
+<template data-for="elt in cfg[selection.bb].elements">
+ <tr id="{{selection.bb}}.{{elt_index+1}}">
+ <td class="{{selection.bb}}">{{selection.bb}}.{{elt_index+1}}</td>
+ <td>{{elt}}</td>
+ </tr>
+</template>
+</table>
+</section>
+
+<section id="element" data-selection="iter,elt">
+<template data-let="state = states[selection.iter + '_' + selection.elt]">
+<header>
+ <template data-if="state.element == 0">{{state.block}} initial state</template>
+ <template data-if="state.element != 0">Element {{selection.elt}}</template>
+ <template data-if="state.post_visit"> (post-visit)</template>
+ <template data-if="!state.post_visit"> (iteration {{state.iter}})</template>
+</header>
+<template data-if="state.value" data-let="v = state.value">
+ <h2>Value</h2>
+ <template data-use="value-template"></template>
+</template>
+<template data-if="state.logs">
+ <h2>Logs</h2>
+ <pre>{{state.logs}}</pre>
+</template>
+<h2>Built-in lattice</h2>
+<pre>{{state.builtinLattice}}</pre>
+</template>
+</section>
+
+<script>
+addBBColors(Object.keys(HTMLLoggerData.cfg).length);
+watchSelection(HTMLLoggerData);
+updateSelection({}, HTMLLoggerData);
+// Copy code and cfg from <template>s into the body.
+for (tmpl of document.querySelectorAll('template[data-copy]'))
+ document.getElementById(tmpl.dataset.copy).replaceChildren(
+ ...tmpl.content.cloneNode(/*deep=*/true).childNodes);
+</script>
+
+</body>
+</html>
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/HTMLLogger.js b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/HTMLLogger.js
new file mode 100644
index 000000000000..6e04bc00f663
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/HTMLLogger.js
@@ -0,0 +1,219 @@
+//===-- HTMLLogger.js -----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// Based on selected objects, hide/show sections & populate data from templates.
+//
+// For example, if the selection is {bb="BB4", elt="BB4.6" iter="BB4:2"}:
+// - show the "block" and "element" sections
+// - re-render templates within these sections (if selection changed)
+// - apply "bb-select" to items with class class "BB4", etc
+let selection = {};
+function updateSelection(changes, data) {
+ Object.assign(selection, changes);
+
+ data = Object.create(data);
+ data.selection = selection;
+ for (root of document.querySelectorAll('[data-selection]'))
+ updateSection(root, data);
+
+ for (var k in changes)
+ applyClassIf(k + '-select', classSelector(changes[k]));
+}
+
+// Given <section data-selection="x,y">:
+// - hide section if selections x or y are null
+// - re-render templates if x or y have changed
+function updateSection(root, data) {
+ let changed = root.selection == null;
+ root.selection ||= {};
+ for (key of root.dataset.selection.split(',')) {
+ if (!key) continue;
+ if (data.selection[key] != root.selection[key]) {
+ root.selection[key] = data.selection[key];
+ changed = true;
+ }
+ if (data.selection[key] == null) {
+ root.hidden = true;
+ return;
+ }
+ }
+ if (changed) {
+ root.hidden = false;
+ for (tmpl of root.getElementsByTagName('template'))
+ reinflate(tmpl, data);
+ }
+}
+
+// Expands template `tmpl` based on input `data`:
+// - interpolates {{expressions}} in text and attributes
+// - <template> tags can modify expansion: if, for etc
+// Outputs to `parent` element, inserting before `next`.
+function inflate(tmpl, data, parent, next) {
+ // We use eval() as our expression language in templates!
+ // The templates are static and trusted.
+ let evalExpr = (expr, data) => eval('with (data) { ' + expr + ' }');
+ let interpolate = (str, data) =>
+ str.replace(/\{\{(.*?)\}\}/g, (_, expr) => evalExpr(expr, data))
+ // Anything other than <template> tag: copy, interpolate, recursively inflate.
+ if (tmpl.nodeName != 'TEMPLATE') {
+ let clone = tmpl.cloneNode();
+ clone.inflated = true;
+ if (clone instanceof Text)
+ clone.textContent = interpolate(clone.textContent, data);
+ if (clone instanceof Element) {
+ for (attr of clone.attributes)
+ attr.value = interpolate(attr.value, data);
+ for (c of tmpl.childNodes)
+ inflate(c, data, clone, /*next=*/null);
+ }
+ return parent.insertBefore(clone, next);
+ }
+ // data-use="xyz": use <template id="xyz"> instead. (Allows recursion.)
+ if ('use' in tmpl.dataset)
+ return inflate(document.getElementById(tmpl.dataset.use), data, parent, next);
+ // <template> tag handling. Base case: recursively inflate.
+ function handle(data) {
+ for (c of tmpl.content.childNodes)
+ inflate(c, data, parent, next);
+ }
+ // Directives on <template> tags modify behavior.
+ const directives = {
+ // data-for="x in expr": expr is enumerable, bind x to each in turn
+ 'for': (nameInExpr, data, proceed) => {
+ let [name, expr] = nameInExpr.split(' in ');
+ let newData = Object.create(data);
+ let index = 0;
+ for (val of evalExpr(expr, data) || []) {
+ newData[name] = val;
+ newData[name + '_index'] = index++;
+ proceed(newData);
+ }
+ },
+ // data-if="expr": only include contents if expression is truthy
+ 'if': (expr, data, proceed) => { if (evalExpr(expr, data)) proceed(data); },
+ // data-let="x = expr": bind x to value of expr
+ 'let': (nameEqExpr, data, proceed) => {
+ let [name, expr] = nameEqExpr.split(' = ');
+ let newData = Object.create(data);
+ newData[name] = evalExpr(expr, data);
+ proceed(newData);
+ },
+ }
+ // Compose directive handlers on top of the base handler.
+ for (let [dir, value] of Object.entries(tmpl.dataset).reverse()) {
+ if (dir in directives) {
+ let proceed = handle;
+ handle = (data) => directives[dir](value, data, proceed);
+ }
+ }
+ handle(data);
+}
+// Expand a template, after first removing any prior expansion of it.
+function reinflate(tmpl, data) {
+ // Clear previously rendered template contents.
+ while (tmpl.nextSibling && tmpl.nextSibling.inflated)
+ tmpl.parentNode.removeChild(tmpl.nextSibling);
+ inflate(tmpl, data, tmpl.parentNode, tmpl.nextSibling);
+}
+
+// Handle a mouse event on a region containing selectable items.
+// This might end up changing the hover state or the selection state.
+//
+// targetSelector describes what target HTML element is selectable.
+// targetToID specifies how to determine the selection from it:
+// hover: a function from target to the class name to highlight
+// bb: a function from target to the basic-block name to select (BB4)
+// elt: a function from target to the CFG element name to select (BB4.5)
+// iter: a function from target to the BB iteration to select (BB4:2)
+// If an entry is missing, the selection is unmodified.
+// If an entry is null, the selection is always cleared.
+function mouseEventHandler(event, targetSelector, targetToID, data) {
+ var target = event.type == "mouseout" ? null : event.target.closest(targetSelector);
+ let selTarget = k => (target && targetToID[k]) ? targetToID[k](target) : null;
+ if (event.type == "click") {
+ let newSel = {};
+ for (var k in targetToID) {
+ if (k == 'hover') continue;
+ let t = selTarget(k);
+ newSel[k] = t;
+ }
+ updateSelection(newSel, data);
+ } else if ("hover" in targetToID) {
+ applyClassIf("hover", classSelector(selTarget("hover")));
+ }
+}
+function watch(rootSelector, targetSelector, targetToID, data) {
+ var root = document.querySelector(rootSelector);
+ for (event of ['mouseout', 'mousemove', 'click'])
+ root.addEventListener(event, e => mouseEventHandler(e, targetSelector, targetToID, data));
+}
+function watchSelection(data) {
+ let lastIter = (bb) => `${bb}:${data.cfg[bb].iters}`;
+ watch('#code', '.c', {
+ hover: e => e.dataset.elt,
+ bb: e => e.dataset.bb,
+ elt: e => e.dataset.elt,
+ // If we're already viewing an iteration of this BB, stick with the same.
+ iter: e => (selection.iter && selection.bb == e.dataset.bb) ? selection.iter : lastIter(e.dataset.bb),
+ }, data);
+ watch('#cfg', '.bb', {
+ hover: e => e.id,
+ bb: e => e.id,
+ elt: e => e.id + ".0",
+ iter: e => lastIter(e.id),
+ }, data);
+ watch('#timeline', '.entry', {
+ hover: e => [e.id, e.dataset.bb],
+ bb: e => e.dataset.bb,
+ elt: e => e.dataset.bb + ".0",
+ iter: e => e.id,
+ }, data);
+ watch('#bb-elements', 'tr', {
+ hover: e => e.id,
+ elt: e => e.id,
+ }, data);
+ watch('#iterations', '.chooser', {
+ hover: e => e.dataset.iter,
+ iter: e => e.dataset.iter,
+ }, data);
+ updateSelection({}, data);
+}
+function applyClassIf(cls, query) {
+ document.querySelectorAll('.' + cls).forEach(elt => elt.classList.remove(cls));
+ document.querySelectorAll(query).forEach(elt => elt.classList.add(cls));
+}
+// Turns a class name into a CSS selector matching it, with some wrinkles:
+// - we treat id="foo" just like class="foo" to avoid repetition in the HTML
+// - cls can be an array of strings, we match them all
+function classSelector(cls) {
+ if (cls == null) return null;
+ if (Array.isArray(cls)) return cls.map(classSelector).join(', ');
+ var escaped = cls.replace('.', '\\.').replace(':', '\\:');
+ // don't require id="foo" class="foo"
+ return '.' + escaped + ", #" + escaped;
+}
+
+// Add a stylesheet defining colors for n basic blocks.
+function addBBColors(n) {
+ let sheet = new CSSStyleSheet();
+ // hex values to subtract from fff to get a base color
+ options = [0x001, 0x010, 0x011, 0x100, 0x101, 0x110, 0x111];
+ function color(hex) {
+ return "#" + hex.toString(16).padStart(3, "0");
+ }
+ function add(selector, property, hex) {
+ sheet.insertRule(`${selector} { ${property}: ${color(hex)}; }`)
+ }
+ for (var i = 0; i < n; ++i) {
+ let opt = options[i%options.length];
+ add(`.B${i}`, 'background-color', 0xfff - 2*opt);
+ add(`#B${i} polygon`, 'fill', 0xfff - 2*opt);
+ add(`#B${i} polygon`, 'stroke', 0x888 - 4*opt);
+ }
+ document.adoptedStyleSheets.push(sheet);
+}
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Logger.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Logger.cpp
new file mode 100644
index 000000000000..8c401df62e44
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Logger.cpp
@@ -0,0 +1,111 @@
+//===-- Logger.cpp --------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/FlowSensitive/Logger.h"
+#include "clang/Analysis/FlowSensitive/ControlFlowContext.h"
+#include "clang/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.h"
+#include "llvm/Support/WithColor.h"
+
+namespace clang::dataflow {
+
+Logger &Logger::null() {
+ struct NullLogger final : Logger {};
+ static auto *Instance = new NullLogger();
+ return *Instance;
+}
+
+namespace {
+struct TextualLogger final : Logger {
+ llvm::raw_ostream &OS;
+ const CFG *CurrentCFG;
+ const CFGBlock *CurrentBlock;
+ const CFGElement *CurrentElement;
+ unsigned CurrentElementIndex;
+ bool ShowColors;
+ llvm::DenseMap<const CFGBlock *, unsigned> VisitCount;
+ TypeErasedDataflowAnalysis *CurrentAnalysis;
+
+ TextualLogger(llvm::raw_ostream &OS)
+ : OS(OS), ShowColors(llvm::WithColor::defaultAutoDetectFunction()(OS)) {}
+
+ virtual void beginAnalysis(const ControlFlowContext &CFG,
+ TypeErasedDataflowAnalysis &Analysis) override {
+ {
+ llvm::WithColor Header(OS, llvm::raw_ostream::Colors::RED, /*Bold=*/true);
+ OS << "=== Beginning data flow analysis ===\n";
+ }
+ auto &D = CFG.getDecl();
+ D.print(OS);
+ OS << "\n";
+ D.dump(OS);
+ CurrentCFG = &CFG.getCFG();
+ CurrentCFG->print(OS, Analysis.getASTContext().getLangOpts(), ShowColors);
+ CurrentAnalysis = &Analysis;
+ }
+ virtual void endAnalysis() override {
+ llvm::WithColor Header(OS, llvm::raw_ostream::Colors::RED, /*Bold=*/true);
+ unsigned Blocks = 0, Steps = 0;
+ for (const auto &E : VisitCount) {
+ ++Blocks;
+ Steps += E.second;
+ }
+ llvm::errs() << "=== Finished analysis: " << Blocks << " blocks in "
+ << Steps << " total steps ===\n";
+ }
+ virtual void enterBlock(const CFGBlock &Block, bool PostVisit) override {
+ unsigned Count = ++VisitCount[&Block];
+ {
+ llvm::WithColor Header(OS, llvm::raw_ostream::Colors::RED, /*Bold=*/true);
+ OS << "=== Entering block B" << Block.getBlockID();
+ if (PostVisit)
+ OS << " (post-visit)";
+ else
+ OS << " (iteration " << Count << ")";
+ OS << " ===\n";
+ }
+ Block.print(OS, CurrentCFG, CurrentAnalysis->getASTContext().getLangOpts(),
+ ShowColors);
+ CurrentBlock = &Block;
+ CurrentElement = nullptr;
+ CurrentElementIndex = 0;
+ }
+ virtual void enterElement(const CFGElement &Element) override {
+ ++CurrentElementIndex;
+ CurrentElement = &Element;
+ {
+ llvm::WithColor Subheader(OS, llvm::raw_ostream::Colors::CYAN,
+ /*Bold=*/true);
+ OS << "Processing element B" << CurrentBlock->getBlockID() << "."
+ << CurrentElementIndex << ": ";
+ Element.dumpToStream(OS);
+ }
+ }
+ void recordState(TypeErasedDataflowAnalysisState &State) override {
+ {
+ llvm::WithColor Subheader(OS, llvm::raw_ostream::Colors::CYAN,
+ /*Bold=*/true);
+ OS << "Computed state for B" << CurrentBlock->getBlockID() << "."
+ << CurrentElementIndex << ":\n";
+ }
+ // FIXME: currently the environment dump is verbose and unenlightening.
+ // FIXME: dump the user-defined lattice, too.
+ State.Env.dump(OS);
+ OS << "\n";
+ }
+ void blockConverged() override {
+ OS << "B" << CurrentBlock->getBlockID() << " has converged!\n";
+ }
+ virtual void logText(llvm::StringRef S) override { OS << S << "\n"; }
+};
+} // namespace
+
+std::unique_ptr<Logger> Logger::textual(llvm::raw_ostream &OS) {
+ return std::make_unique<TextualLogger>(OS);
+}
+
+} // namespace clang::dataflow
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Models/ChromiumCheckModel.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Models/ChromiumCheckModel.cpp
new file mode 100644
index 000000000000..5ac71e1d6bf6
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Models/ChromiumCheckModel.cpp
@@ -0,0 +1,71 @@
+//===-- ChromiumCheckModel.cpp ----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/FlowSensitive/Models/ChromiumCheckModel.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "llvm/ADT/DenseSet.h"
+
+namespace clang {
+namespace dataflow {
+
+/// Determines whether `D` is one of the methods used to implement Chromium's
+/// `CHECK` macros. Populates `CheckDecls`, if empty.
+bool isCheckLikeMethod(llvm::SmallDenseSet<const CXXMethodDecl *> &CheckDecls,
+ const CXXMethodDecl &D) {
+ // All of the methods of interest are static, so avoid any lookup for
+ // non-static methods (the common case).
+ if (!D.isStatic())
+ return false;
+
+ if (CheckDecls.empty()) {
+ // Attempt to initialize `CheckDecls` with the methods in class
+ // `CheckError`.
+ const CXXRecordDecl *ParentClass = D.getParent();
+ if (ParentClass == nullptr || !ParentClass->getDeclName().isIdentifier() ||
+ ParentClass->getName() != "CheckError")
+ return false;
+
+ // Check whether namespace is "logging".
+ const auto *N =
+ dyn_cast_or_null<NamespaceDecl>(ParentClass->getDeclContext());
+ if (N == nullptr || !N->getDeclName().isIdentifier() ||
+ N->getName() != "logging")
+ return false;
+
+ // Check whether "logging" is a top-level namespace.
+ if (N->getParent() == nullptr || !N->getParent()->isTranslationUnit())
+ return false;
+
+ for (const CXXMethodDecl *M : ParentClass->methods())
+ if (M->getDeclName().isIdentifier() && M->getName().ends_with("Check"))
+ CheckDecls.insert(M);
+ }
+
+ return CheckDecls.contains(&D);
+}
+
+bool ChromiumCheckModel::transfer(const CFGElement &Element, Environment &Env) {
+ auto CS = Element.getAs<CFGStmt>();
+ if (!CS)
+ return false;
+ auto Stmt = CS->getStmt();
+ if (const auto *Call = dyn_cast<CallExpr>(Stmt)) {
+ if (const auto *M = dyn_cast<CXXMethodDecl>(Call->getDirectCallee())) {
+ if (isCheckLikeMethod(CheckDecls, *M)) {
+ // Mark this branch as unreachable.
+ Env.assume(Env.arena().makeLiteral(false));
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+} // namespace dataflow
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.cpp
new file mode 100644
index 000000000000..1d31b22b6d25
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.cpp
@@ -0,0 +1,879 @@
+//===-- UncheckedOptionalAccessModel.cpp ------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a dataflow analysis that detects unsafe uses of optional
+// values.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/Stmt.h"
+#include "clang/ASTMatchers/ASTMatchers.h"
+#include "clang/ASTMatchers/ASTMatchersMacros.h"
+#include "clang/Analysis/CFG.h"
+#include "clang/Analysis/FlowSensitive/CFGMatchSwitch.h"
+#include "clang/Analysis/FlowSensitive/DataflowEnvironment.h"
+#include "clang/Analysis/FlowSensitive/Formula.h"
+#include "clang/Analysis/FlowSensitive/NoopLattice.h"
+#include "clang/Analysis/FlowSensitive/StorageLocation.h"
+#include "clang/Analysis/FlowSensitive/Value.h"
+#include "clang/Basic/SourceLocation.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+#include <memory>
+#include <optional>
+#include <utility>
+
+namespace clang {
+namespace dataflow {
+
+static bool isTopLevelNamespaceWithName(const NamespaceDecl &NS,
+ llvm::StringRef Name) {
+ return NS.getDeclName().isIdentifier() && NS.getName() == Name &&
+ NS.getParent() != nullptr && NS.getParent()->isTranslationUnit();
+}
+
+static bool hasOptionalClassName(const CXXRecordDecl &RD) {
+ if (!RD.getDeclName().isIdentifier())
+ return false;
+
+ if (RD.getName() == "optional") {
+ if (const auto *N = dyn_cast_or_null<NamespaceDecl>(RD.getDeclContext()))
+ return N->isStdNamespace() || isTopLevelNamespaceWithName(*N, "absl");
+ return false;
+ }
+
+ if (RD.getName() == "Optional") {
+ // Check whether namespace is "::base" or "::folly".
+ const auto *N = dyn_cast_or_null<NamespaceDecl>(RD.getDeclContext());
+ return N != nullptr && (isTopLevelNamespaceWithName(*N, "base") ||
+ isTopLevelNamespaceWithName(*N, "folly"));
+ }
+
+ return false;
+}
+
+namespace {
+
+using namespace ::clang::ast_matchers;
+using LatticeTransferState = TransferState<NoopLattice>;
+
+AST_MATCHER(CXXRecordDecl, hasOptionalClassNameMatcher) {
+ return hasOptionalClassName(Node);
+}
+
+DeclarationMatcher optionalClass() {
+ return classTemplateSpecializationDecl(
+ hasOptionalClassNameMatcher(),
+ hasTemplateArgument(0, refersToType(type().bind("T"))));
+}
+
+auto optionalOrAliasType() {
+ return hasUnqualifiedDesugaredType(
+ recordType(hasDeclaration(optionalClass())));
+}
+
+/// Matches any of the spellings of the optional types and sugar, aliases, etc.
+auto hasOptionalType() { return hasType(optionalOrAliasType()); }
+
+auto isOptionalMemberCallWithNameMatcher(
+ ast_matchers::internal::Matcher<NamedDecl> matcher,
+ const std::optional<StatementMatcher> &Ignorable = std::nullopt) {
+ auto Exception = unless(Ignorable ? expr(anyOf(*Ignorable, cxxThisExpr()))
+ : cxxThisExpr());
+ return cxxMemberCallExpr(
+ on(expr(Exception,
+ anyOf(hasOptionalType(),
+ hasType(pointerType(pointee(optionalOrAliasType())))))),
+ callee(cxxMethodDecl(matcher)));
+}
+
+auto isOptionalOperatorCallWithName(
+ llvm::StringRef operator_name,
+ const std::optional<StatementMatcher> &Ignorable = std::nullopt) {
+ return cxxOperatorCallExpr(
+ hasOverloadedOperatorName(operator_name),
+ callee(cxxMethodDecl(ofClass(optionalClass()))),
+ Ignorable ? callExpr(unless(hasArgument(0, *Ignorable))) : callExpr());
+}
+
+auto isMakeOptionalCall() {
+ return callExpr(callee(functionDecl(hasAnyName(
+ "std::make_optional", "base::make_optional",
+ "absl::make_optional", "folly::make_optional"))),
+ hasOptionalType());
+}
+
+auto nulloptTypeDecl() {
+ return namedDecl(hasAnyName("std::nullopt_t", "absl::nullopt_t",
+ "base::nullopt_t", "folly::None"));
+}
+
+auto hasNulloptType() { return hasType(nulloptTypeDecl()); }
+
+auto inPlaceClass() {
+ return recordDecl(hasAnyName("std::in_place_t", "absl::in_place_t",
+ "base::in_place_t", "folly::in_place_t"));
+}
+
+auto isOptionalNulloptConstructor() {
+ return cxxConstructExpr(
+ hasOptionalType(),
+ hasDeclaration(cxxConstructorDecl(parameterCountIs(1),
+ hasParameter(0, hasNulloptType()))));
+}
+
+auto isOptionalInPlaceConstructor() {
+ return cxxConstructExpr(hasOptionalType(),
+ hasArgument(0, hasType(inPlaceClass())));
+}
+
+auto isOptionalValueOrConversionConstructor() {
+ return cxxConstructExpr(
+ hasOptionalType(),
+ unless(hasDeclaration(
+ cxxConstructorDecl(anyOf(isCopyConstructor(), isMoveConstructor())))),
+ argumentCountIs(1), hasArgument(0, unless(hasNulloptType())));
+}
+
+auto isOptionalValueOrConversionAssignment() {
+ return cxxOperatorCallExpr(
+ hasOverloadedOperatorName("="),
+ callee(cxxMethodDecl(ofClass(optionalClass()))),
+ unless(hasDeclaration(cxxMethodDecl(
+ anyOf(isCopyAssignmentOperator(), isMoveAssignmentOperator())))),
+ argumentCountIs(2), hasArgument(1, unless(hasNulloptType())));
+}
+
+auto isOptionalNulloptAssignment() {
+ return cxxOperatorCallExpr(hasOverloadedOperatorName("="),
+ callee(cxxMethodDecl(ofClass(optionalClass()))),
+ argumentCountIs(2),
+ hasArgument(1, hasNulloptType()));
+}
+
+auto isStdSwapCall() {
+ return callExpr(callee(functionDecl(hasName("std::swap"))),
+ argumentCountIs(2), hasArgument(0, hasOptionalType()),
+ hasArgument(1, hasOptionalType()));
+}
+
+auto isStdForwardCall() {
+ return callExpr(callee(functionDecl(hasName("std::forward"))),
+ argumentCountIs(1), hasArgument(0, hasOptionalType()));
+}
+
+constexpr llvm::StringLiteral ValueOrCallID = "ValueOrCall";
+
+auto isValueOrStringEmptyCall() {
+ // `opt.value_or("").empty()`
+ return cxxMemberCallExpr(
+ callee(cxxMethodDecl(hasName("empty"))),
+ onImplicitObjectArgument(ignoringImplicit(
+ cxxMemberCallExpr(on(expr(unless(cxxThisExpr()))),
+ callee(cxxMethodDecl(hasName("value_or"),
+ ofClass(optionalClass()))),
+ hasArgument(0, stringLiteral(hasSize(0))))
+ .bind(ValueOrCallID))));
+}
+
+auto isValueOrNotEqX() {
+ auto ComparesToSame = [](ast_matchers::internal::Matcher<Stmt> Arg) {
+ return hasOperands(
+ ignoringImplicit(
+ cxxMemberCallExpr(on(expr(unless(cxxThisExpr()))),
+ callee(cxxMethodDecl(hasName("value_or"),
+ ofClass(optionalClass()))),
+ hasArgument(0, Arg))
+ .bind(ValueOrCallID)),
+ ignoringImplicit(Arg));
+ };
+
+ // `opt.value_or(X) != X`, for X is `nullptr`, `""`, or `0`. Ideally, we'd
+ // support this pattern for any expression, but the AST does not have a
+ // generic expression comparison facility, so we specialize to common cases
+ // seen in practice. FIXME: define a matcher that compares values across
+ // nodes, which would let us generalize this to any `X`.
+ return binaryOperation(hasOperatorName("!="),
+ anyOf(ComparesToSame(cxxNullPtrLiteralExpr()),
+ ComparesToSame(stringLiteral(hasSize(0))),
+ ComparesToSame(integerLiteral(equals(0)))));
+}
+
+auto isCallReturningOptional() {
+ return callExpr(hasType(qualType(anyOf(
+ optionalOrAliasType(), referenceType(pointee(optionalOrAliasType()))))));
+}
+
+template <typename L, typename R>
+auto isComparisonOperatorCall(L lhs_arg_matcher, R rhs_arg_matcher) {
+ return cxxOperatorCallExpr(
+ anyOf(hasOverloadedOperatorName("=="), hasOverloadedOperatorName("!=")),
+ argumentCountIs(2), hasArgument(0, lhs_arg_matcher),
+ hasArgument(1, rhs_arg_matcher));
+}
+
+/// Ensures that `Expr` is mapped to a `BoolValue` and returns its formula.
+const Formula &forceBoolValue(Environment &Env, const Expr &Expr) {
+ auto *Value = Env.get<BoolValue>(Expr);
+ if (Value != nullptr)
+ return Value->formula();
+
+ Value = &Env.makeAtomicBoolValue();
+ Env.setValue(Expr, *Value);
+ return Value->formula();
+}
+
+StorageLocation &locForHasValue(const RecordStorageLocation &OptionalLoc) {
+ return OptionalLoc.getSyntheticField("has_value");
+}
+
+StorageLocation &locForValue(const RecordStorageLocation &OptionalLoc) {
+ return OptionalLoc.getSyntheticField("value");
+}
+
+/// Sets `HasValueVal` as the symbolic value that represents the "has_value"
+/// property of the optional at `OptionalLoc`.
+void setHasValue(RecordStorageLocation &OptionalLoc, BoolValue &HasValueVal,
+ Environment &Env) {
+ Env.setValue(locForHasValue(OptionalLoc), HasValueVal);
+}
+
+/// Creates a symbolic value for an `optional` value at an existing storage
+/// location. Uses `HasValueVal` as the symbolic value of the "has_value"
+/// property.
+RecordValue &createOptionalValue(RecordStorageLocation &Loc,
+ BoolValue &HasValueVal, Environment &Env) {
+ auto &OptionalVal = Env.create<RecordValue>(Loc);
+ Env.setValue(Loc, OptionalVal);
+ setHasValue(Loc, HasValueVal, Env);
+ return OptionalVal;
+}
+
+/// Returns the symbolic value that represents the "has_value" property of the
+/// optional at `OptionalLoc`. Returns null if `OptionalLoc` is null.
+BoolValue *getHasValue(Environment &Env, RecordStorageLocation *OptionalLoc) {
+ if (OptionalLoc == nullptr)
+ return nullptr;
+ StorageLocation &HasValueLoc = locForHasValue(*OptionalLoc);
+ auto *HasValueVal = Env.get<BoolValue>(HasValueLoc);
+ if (HasValueVal == nullptr) {
+ HasValueVal = &Env.makeAtomicBoolValue();
+ Env.setValue(HasValueLoc, *HasValueVal);
+ }
+ return HasValueVal;
+}
+
+/// Returns true if and only if `Type` is an optional type.
+bool isOptionalType(QualType Type) {
+ if (!Type->isRecordType())
+ return false;
+ const CXXRecordDecl *D = Type->getAsCXXRecordDecl();
+ return D != nullptr && hasOptionalClassName(*D);
+}
+
+/// Returns the number of optional wrappers in `Type`.
+///
+/// For example, if `Type` is `optional<optional<int>>`, the result of this
+/// function will be 2.
+int countOptionalWrappers(const ASTContext &ASTCtx, QualType Type) {
+ if (!isOptionalType(Type))
+ return 0;
+ return 1 + countOptionalWrappers(
+ ASTCtx,
+ cast<ClassTemplateSpecializationDecl>(Type->getAsRecordDecl())
+ ->getTemplateArgs()
+ .get(0)
+ .getAsType()
+ .getDesugaredType(ASTCtx));
+}
+
+StorageLocation *getLocBehindPossiblePointer(const Expr &E,
+ const Environment &Env) {
+ if (E.isPRValue()) {
+ if (auto *PointerVal = dyn_cast_or_null<PointerValue>(Env.getValue(E)))
+ return &PointerVal->getPointeeLoc();
+ return nullptr;
+ }
+ return Env.getStorageLocation(E);
+}
+
+void transferUnwrapCall(const Expr *UnwrapExpr, const Expr *ObjectExpr,
+ LatticeTransferState &State) {
+ if (auto *OptionalLoc = cast_or_null<RecordStorageLocation>(
+ getLocBehindPossiblePointer(*ObjectExpr, State.Env))) {
+ if (State.Env.getStorageLocation(*UnwrapExpr) == nullptr)
+ State.Env.setStorageLocation(*UnwrapExpr, locForValue(*OptionalLoc));
+ }
+}
+
+void transferArrowOpCall(const Expr *UnwrapExpr, const Expr *ObjectExpr,
+ LatticeTransferState &State) {
+ if (auto *OptionalLoc = cast_or_null<RecordStorageLocation>(
+ getLocBehindPossiblePointer(*ObjectExpr, State.Env)))
+ State.Env.setValue(
+ *UnwrapExpr, State.Env.create<PointerValue>(locForValue(*OptionalLoc)));
+}
+
+void transferMakeOptionalCall(const CallExpr *E,
+ const MatchFinder::MatchResult &,
+ LatticeTransferState &State) {
+ State.Env.setValue(
+ *E, createOptionalValue(State.Env.getResultObjectLocation(*E),
+ State.Env.getBoolLiteralValue(true), State.Env));
+}
+
+void transferOptionalHasValueCall(const CXXMemberCallExpr *CallExpr,
+ const MatchFinder::MatchResult &,
+ LatticeTransferState &State) {
+ if (auto *HasValueVal = getHasValue(
+ State.Env, getImplicitObjectLocation(*CallExpr, State.Env))) {
+ State.Env.setValue(*CallExpr, *HasValueVal);
+ }
+}
+
+/// `ModelPred` builds a logical formula relating the predicate in
+/// `ValueOrPredExpr` to the optional's `has_value` property.
+void transferValueOrImpl(
+ const clang::Expr *ValueOrPredExpr, const MatchFinder::MatchResult &Result,
+ LatticeTransferState &State,
+ const Formula &(*ModelPred)(Environment &Env, const Formula &ExprVal,
+ const Formula &HasValueVal)) {
+ auto &Env = State.Env;
+
+ const auto *MCE =
+ Result.Nodes.getNodeAs<clang::CXXMemberCallExpr>(ValueOrCallID);
+
+ auto *HasValueVal =
+ getHasValue(State.Env, getImplicitObjectLocation(*MCE, State.Env));
+ if (HasValueVal == nullptr)
+ return;
+
+ Env.assume(ModelPred(Env, forceBoolValue(Env, *ValueOrPredExpr),
+ HasValueVal->formula()));
+}
+
+void transferValueOrStringEmptyCall(const clang::Expr *ComparisonExpr,
+ const MatchFinder::MatchResult &Result,
+ LatticeTransferState &State) {
+ return transferValueOrImpl(ComparisonExpr, Result, State,
+ [](Environment &Env, const Formula &ExprVal,
+ const Formula &HasValueVal) -> const Formula & {
+ auto &A = Env.arena();
+ // If the result is *not* empty, then we know the
+ // optional must have been holding a value. If
+ // `ExprVal` is true, though, we don't learn
+ // anything definite about `has_value`, so we
+ // don't add any corresponding implications to
+ // the flow condition.
+ return A.makeImplies(A.makeNot(ExprVal),
+ HasValueVal);
+ });
+}
+
+void transferValueOrNotEqX(const Expr *ComparisonExpr,
+ const MatchFinder::MatchResult &Result,
+ LatticeTransferState &State) {
+ transferValueOrImpl(ComparisonExpr, Result, State,
+ [](Environment &Env, const Formula &ExprVal,
+ const Formula &HasValueVal) -> const Formula & {
+ auto &A = Env.arena();
+ // We know that if `(opt.value_or(X) != X)` then
+ // `opt.hasValue()`, even without knowing further
+ // details about the contents of `opt`.
+ return A.makeImplies(ExprVal, HasValueVal);
+ });
+}
+
+void transferCallReturningOptional(const CallExpr *E,
+ const MatchFinder::MatchResult &Result,
+ LatticeTransferState &State) {
+ if (State.Env.getValue(*E) != nullptr)
+ return;
+
+ RecordStorageLocation *Loc = nullptr;
+ if (E->isPRValue()) {
+ Loc = &State.Env.getResultObjectLocation(*E);
+ } else {
+ Loc = State.Env.get<RecordStorageLocation>(*E);
+ if (Loc == nullptr) {
+ Loc = &cast<RecordStorageLocation>(State.Env.createStorageLocation(*E));
+ State.Env.setStorageLocation(*E, *Loc);
+ }
+ }
+
+ RecordValue &Val =
+ createOptionalValue(*Loc, State.Env.makeAtomicBoolValue(), State.Env);
+ if (E->isPRValue())
+ State.Env.setValue(*E, Val);
+}
+
+void constructOptionalValue(const Expr &E, Environment &Env,
+ BoolValue &HasValueVal) {
+ RecordStorageLocation &Loc = Env.getResultObjectLocation(E);
+ Env.setValue(E, createOptionalValue(Loc, HasValueVal, Env));
+}
+
+/// Returns a symbolic value for the "has_value" property of an `optional<T>`
+/// value that is constructed/assigned from a value of type `U` or `optional<U>`
+/// where `T` is constructible from `U`.
+BoolValue &valueOrConversionHasValue(const FunctionDecl &F, const Expr &E,
+ const MatchFinder::MatchResult &MatchRes,
+ LatticeTransferState &State) {
+ assert(F.getTemplateSpecializationArgs() != nullptr);
+ assert(F.getTemplateSpecializationArgs()->size() > 0);
+
+ const int TemplateParamOptionalWrappersCount =
+ countOptionalWrappers(*MatchRes.Context, F.getTemplateSpecializationArgs()
+ ->get(0)
+ .getAsType()
+ .getNonReferenceType());
+ const int ArgTypeOptionalWrappersCount = countOptionalWrappers(
+ *MatchRes.Context, E.getType().getNonReferenceType());
+
+ // Check if this is a constructor/assignment call for `optional<T>` with
+ // argument of type `U` such that `T` is constructible from `U`.
+ if (TemplateParamOptionalWrappersCount == ArgTypeOptionalWrappersCount)
+ return State.Env.getBoolLiteralValue(true);
+
+ // This is a constructor/assignment call for `optional<T>` with argument of
+ // type `optional<U>` such that `T` is constructible from `U`.
+ auto *Loc = State.Env.get<RecordStorageLocation>(E);
+ if (auto *HasValueVal = getHasValue(State.Env, Loc))
+ return *HasValueVal;
+ return State.Env.makeAtomicBoolValue();
+}
+
+void transferValueOrConversionConstructor(
+ const CXXConstructExpr *E, const MatchFinder::MatchResult &MatchRes,
+ LatticeTransferState &State) {
+ assert(E->getNumArgs() > 0);
+
+ constructOptionalValue(*E, State.Env,
+ valueOrConversionHasValue(*E->getConstructor(),
+ *E->getArg(0), MatchRes,
+ State));
+}
+
+void transferAssignment(const CXXOperatorCallExpr *E, BoolValue &HasValueVal,
+ LatticeTransferState &State) {
+ assert(E->getNumArgs() > 0);
+
+ if (auto *Loc = State.Env.get<RecordStorageLocation>(*E->getArg(0))) {
+ createOptionalValue(*Loc, HasValueVal, State.Env);
+
+ // Assign a storage location for the whole expression.
+ State.Env.setStorageLocation(*E, *Loc);
+ }
+}
+
+void transferValueOrConversionAssignment(
+ const CXXOperatorCallExpr *E, const MatchFinder::MatchResult &MatchRes,
+ LatticeTransferState &State) {
+ assert(E->getNumArgs() > 1);
+ transferAssignment(E,
+ valueOrConversionHasValue(*E->getDirectCallee(),
+ *E->getArg(1), MatchRes, State),
+ State);
+}
+
+void transferNulloptAssignment(const CXXOperatorCallExpr *E,
+ const MatchFinder::MatchResult &,
+ LatticeTransferState &State) {
+ transferAssignment(E, State.Env.getBoolLiteralValue(false), State);
+}
+
+void transferSwap(RecordStorageLocation *Loc1, RecordStorageLocation *Loc2,
+ Environment &Env) {
+ // We account for cases where one or both of the optionals are not modeled,
+ // either lacking associated storage locations, or lacking values associated
+ // to such storage locations.
+
+ if (Loc1 == nullptr) {
+ if (Loc2 != nullptr)
+ createOptionalValue(*Loc2, Env.makeAtomicBoolValue(), Env);
+ return;
+ }
+ if (Loc2 == nullptr) {
+ createOptionalValue(*Loc1, Env.makeAtomicBoolValue(), Env);
+ return;
+ }
+
+ // Both expressions have locations, though they may not have corresponding
+ // values. In that case, we create a fresh value at this point. Note that if
+ // two branches both do this, they will not share the value, but it at least
+ // allows for local reasoning about the value. To avoid the above, we would
+ // need *lazy* value allocation.
+ // FIXME: allocate values lazily, instead of just creating a fresh value.
+ BoolValue *BoolVal1 = getHasValue(Env, Loc1);
+ if (BoolVal1 == nullptr)
+ BoolVal1 = &Env.makeAtomicBoolValue();
+
+ BoolValue *BoolVal2 = getHasValue(Env, Loc2);
+ if (BoolVal2 == nullptr)
+ BoolVal2 = &Env.makeAtomicBoolValue();
+
+ createOptionalValue(*Loc1, *BoolVal2, Env);
+ createOptionalValue(*Loc2, *BoolVal1, Env);
+}
+
+void transferSwapCall(const CXXMemberCallExpr *E,
+ const MatchFinder::MatchResult &,
+ LatticeTransferState &State) {
+ assert(E->getNumArgs() == 1);
+ auto *OtherLoc = State.Env.get<RecordStorageLocation>(*E->getArg(0));
+ transferSwap(getImplicitObjectLocation(*E, State.Env), OtherLoc, State.Env);
+}
+
+void transferStdSwapCall(const CallExpr *E, const MatchFinder::MatchResult &,
+ LatticeTransferState &State) {
+ assert(E->getNumArgs() == 2);
+ auto *Arg0Loc = State.Env.get<RecordStorageLocation>(*E->getArg(0));
+ auto *Arg1Loc = State.Env.get<RecordStorageLocation>(*E->getArg(1));
+ transferSwap(Arg0Loc, Arg1Loc, State.Env);
+}
+
+void transferStdForwardCall(const CallExpr *E, const MatchFinder::MatchResult &,
+ LatticeTransferState &State) {
+ assert(E->getNumArgs() == 1);
+
+ if (auto *Loc = State.Env.getStorageLocation(*E->getArg(0)))
+ State.Env.setStorageLocation(*E, *Loc);
+}
+
+const Formula &evaluateEquality(Arena &A, const Formula &EqVal,
+ const Formula &LHS, const Formula &RHS) {
+ // Logically, an optional<T> object is composed of two values - a `has_value`
+ // bit and a value of type T. Equality of optional objects compares both
+ // values. Therefore, merely comparing the `has_value` bits isn't sufficient:
+ // when two optional objects are engaged, the equality of their respective
+ // values of type T matters. Since we only track the `has_value` bits, we
+ // can't make any conclusions about equality when we know that two optional
+ // objects are engaged.
+ //
+ // We express this as two facts about the equality:
+ // a) EqVal => (LHS & RHS) v (!RHS & !LHS)
+ // If they are equal, then either both are set or both are unset.
+ // b) (!LHS & !RHS) => EqVal
+ // If neither is set, then they are equal.
+ // We rewrite b) as !EqVal => (LHS v RHS), for a more compact formula.
+ return A.makeAnd(
+ A.makeImplies(EqVal, A.makeOr(A.makeAnd(LHS, RHS),
+ A.makeAnd(A.makeNot(LHS), A.makeNot(RHS)))),
+ A.makeImplies(A.makeNot(EqVal), A.makeOr(LHS, RHS)));
+}
+
+void transferOptionalAndOptionalCmp(const clang::CXXOperatorCallExpr *CmpExpr,
+ const MatchFinder::MatchResult &,
+ LatticeTransferState &State) {
+ Environment &Env = State.Env;
+ auto &A = Env.arena();
+ auto *CmpValue = &forceBoolValue(Env, *CmpExpr);
+ auto *Arg0Loc = Env.get<RecordStorageLocation>(*CmpExpr->getArg(0));
+ if (auto *LHasVal = getHasValue(Env, Arg0Loc)) {
+ auto *Arg1Loc = Env.get<RecordStorageLocation>(*CmpExpr->getArg(1));
+ if (auto *RHasVal = getHasValue(Env, Arg1Loc)) {
+ if (CmpExpr->getOperator() == clang::OO_ExclaimEqual)
+ CmpValue = &A.makeNot(*CmpValue);
+ Env.assume(evaluateEquality(A, *CmpValue, LHasVal->formula(),
+ RHasVal->formula()));
+ }
+ }
+}
+
+void transferOptionalAndValueCmp(const clang::CXXOperatorCallExpr *CmpExpr,
+ const clang::Expr *E, Environment &Env) {
+ auto &A = Env.arena();
+ auto *CmpValue = &forceBoolValue(Env, *CmpExpr);
+ auto *Loc = Env.get<RecordStorageLocation>(*E);
+ if (auto *HasVal = getHasValue(Env, Loc)) {
+ if (CmpExpr->getOperator() == clang::OO_ExclaimEqual)
+ CmpValue = &A.makeNot(*CmpValue);
+ Env.assume(
+ evaluateEquality(A, *CmpValue, HasVal->formula(), A.makeLiteral(true)));
+ }
+}
+
+void transferOptionalAndNulloptCmp(const clang::CXXOperatorCallExpr *CmpExpr,
+ const clang::Expr *E, Environment &Env) {
+ auto &A = Env.arena();
+ auto *CmpValue = &forceBoolValue(Env, *CmpExpr);
+ auto *Loc = Env.get<RecordStorageLocation>(*E);
+ if (auto *HasVal = getHasValue(Env, Loc)) {
+ if (CmpExpr->getOperator() == clang::OO_ExclaimEqual)
+ CmpValue = &A.makeNot(*CmpValue);
+ Env.assume(evaluateEquality(A, *CmpValue, HasVal->formula(),
+ A.makeLiteral(false)));
+ }
+}
+
+std::optional<StatementMatcher>
+ignorableOptional(const UncheckedOptionalAccessModelOptions &Options) {
+ if (Options.IgnoreSmartPointerDereference) {
+ auto SmartPtrUse = expr(ignoringParenImpCasts(cxxOperatorCallExpr(
+ anyOf(hasOverloadedOperatorName("->"), hasOverloadedOperatorName("*")),
+ unless(hasArgument(0, expr(hasOptionalType()))))));
+ return expr(
+ anyOf(SmartPtrUse, memberExpr(hasObjectExpression(SmartPtrUse))));
+ }
+ return std::nullopt;
+}
+
+StatementMatcher
+valueCall(const std::optional<StatementMatcher> &IgnorableOptional) {
+ return isOptionalMemberCallWithNameMatcher(hasName("value"),
+ IgnorableOptional);
+}
+
+StatementMatcher
+valueOperatorCall(const std::optional<StatementMatcher> &IgnorableOptional) {
+ return expr(anyOf(isOptionalOperatorCallWithName("*", IgnorableOptional),
+ isOptionalOperatorCallWithName("->", IgnorableOptional)));
+}
+
+auto buildTransferMatchSwitch() {
+ // FIXME: Evaluate the efficiency of matchers. If using matchers results in a
+ // lot of duplicated work (e.g. string comparisons), consider providing APIs
+ // that avoid it through memoization.
+ return CFGMatchSwitchBuilder<LatticeTransferState>()
+ // make_optional
+ .CaseOfCFGStmt<CallExpr>(isMakeOptionalCall(), transferMakeOptionalCall)
+
+ // optional::optional (in place)
+ .CaseOfCFGStmt<CXXConstructExpr>(
+ isOptionalInPlaceConstructor(),
+ [](const CXXConstructExpr *E, const MatchFinder::MatchResult &,
+ LatticeTransferState &State) {
+ constructOptionalValue(*E, State.Env,
+ State.Env.getBoolLiteralValue(true));
+ })
+ // optional::optional(nullopt_t)
+ .CaseOfCFGStmt<CXXConstructExpr>(
+ isOptionalNulloptConstructor(),
+ [](const CXXConstructExpr *E, const MatchFinder::MatchResult &,
+ LatticeTransferState &State) {
+ constructOptionalValue(*E, State.Env,
+ State.Env.getBoolLiteralValue(false));
+ })
+ // optional::optional (value/conversion)
+ .CaseOfCFGStmt<CXXConstructExpr>(isOptionalValueOrConversionConstructor(),
+ transferValueOrConversionConstructor)
+
+ // optional::operator=
+ .CaseOfCFGStmt<CXXOperatorCallExpr>(
+ isOptionalValueOrConversionAssignment(),
+ transferValueOrConversionAssignment)
+ .CaseOfCFGStmt<CXXOperatorCallExpr>(isOptionalNulloptAssignment(),
+ transferNulloptAssignment)
+
+ // optional::value
+ .CaseOfCFGStmt<CXXMemberCallExpr>(
+ valueCall(std::nullopt),
+ [](const CXXMemberCallExpr *E, const MatchFinder::MatchResult &,
+ LatticeTransferState &State) {
+ transferUnwrapCall(E, E->getImplicitObjectArgument(), State);
+ })
+
+ // optional::operator*
+ .CaseOfCFGStmt<CallExpr>(isOptionalOperatorCallWithName("*"),
+ [](const CallExpr *E,
+ const MatchFinder::MatchResult &,
+ LatticeTransferState &State) {
+ transferUnwrapCall(E, E->getArg(0), State);
+ })
+
+ // optional::operator->
+ .CaseOfCFGStmt<CallExpr>(isOptionalOperatorCallWithName("->"),
+ [](const CallExpr *E,
+ const MatchFinder::MatchResult &,
+ LatticeTransferState &State) {
+ transferArrowOpCall(E, E->getArg(0), State);
+ })
+
+ // optional::has_value, optional::hasValue
+ // Of the supported optionals only folly::Optional uses hasValue, but this
+ // will also pass for other types
+ .CaseOfCFGStmt<CXXMemberCallExpr>(
+ isOptionalMemberCallWithNameMatcher(
+ hasAnyName("has_value", "hasValue")),
+ transferOptionalHasValueCall)
+
+ // optional::operator bool
+ .CaseOfCFGStmt<CXXMemberCallExpr>(
+ isOptionalMemberCallWithNameMatcher(hasName("operator bool")),
+ transferOptionalHasValueCall)
+
+ // optional::emplace
+ .CaseOfCFGStmt<CXXMemberCallExpr>(
+ isOptionalMemberCallWithNameMatcher(hasName("emplace")),
+ [](const CXXMemberCallExpr *E, const MatchFinder::MatchResult &,
+ LatticeTransferState &State) {
+ if (RecordStorageLocation *Loc =
+ getImplicitObjectLocation(*E, State.Env)) {
+ createOptionalValue(*Loc, State.Env.getBoolLiteralValue(true),
+ State.Env);
+ }
+ })
+
+ // optional::reset
+ .CaseOfCFGStmt<CXXMemberCallExpr>(
+ isOptionalMemberCallWithNameMatcher(hasName("reset")),
+ [](const CXXMemberCallExpr *E, const MatchFinder::MatchResult &,
+ LatticeTransferState &State) {
+ if (RecordStorageLocation *Loc =
+ getImplicitObjectLocation(*E, State.Env)) {
+ createOptionalValue(*Loc, State.Env.getBoolLiteralValue(false),
+ State.Env);
+ }
+ })
+
+ // optional::swap
+ .CaseOfCFGStmt<CXXMemberCallExpr>(
+ isOptionalMemberCallWithNameMatcher(hasName("swap")),
+ transferSwapCall)
+
+ // std::swap
+ .CaseOfCFGStmt<CallExpr>(isStdSwapCall(), transferStdSwapCall)
+
+ // std::forward
+ .CaseOfCFGStmt<CallExpr>(isStdForwardCall(), transferStdForwardCall)
+
+ // opt.value_or("").empty()
+ .CaseOfCFGStmt<Expr>(isValueOrStringEmptyCall(),
+ transferValueOrStringEmptyCall)
+
+ // opt.value_or(X) != X
+ .CaseOfCFGStmt<Expr>(isValueOrNotEqX(), transferValueOrNotEqX)
+
+ // Comparisons (==, !=):
+ .CaseOfCFGStmt<CXXOperatorCallExpr>(
+ isComparisonOperatorCall(hasOptionalType(), hasOptionalType()),
+ transferOptionalAndOptionalCmp)
+ .CaseOfCFGStmt<CXXOperatorCallExpr>(
+ isComparisonOperatorCall(hasOptionalType(), hasNulloptType()),
+ [](const clang::CXXOperatorCallExpr *Cmp,
+ const MatchFinder::MatchResult &, LatticeTransferState &State) {
+ transferOptionalAndNulloptCmp(Cmp, Cmp->getArg(0), State.Env);
+ })
+ .CaseOfCFGStmt<CXXOperatorCallExpr>(
+ isComparisonOperatorCall(hasNulloptType(), hasOptionalType()),
+ [](const clang::CXXOperatorCallExpr *Cmp,
+ const MatchFinder::MatchResult &, LatticeTransferState &State) {
+ transferOptionalAndNulloptCmp(Cmp, Cmp->getArg(1), State.Env);
+ })
+ .CaseOfCFGStmt<CXXOperatorCallExpr>(
+ isComparisonOperatorCall(
+ hasOptionalType(),
+ unless(anyOf(hasOptionalType(), hasNulloptType()))),
+ [](const clang::CXXOperatorCallExpr *Cmp,
+ const MatchFinder::MatchResult &, LatticeTransferState &State) {
+ transferOptionalAndValueCmp(Cmp, Cmp->getArg(0), State.Env);
+ })
+ .CaseOfCFGStmt<CXXOperatorCallExpr>(
+ isComparisonOperatorCall(
+ unless(anyOf(hasOptionalType(), hasNulloptType())),
+ hasOptionalType()),
+ [](const clang::CXXOperatorCallExpr *Cmp,
+ const MatchFinder::MatchResult &, LatticeTransferState &State) {
+ transferOptionalAndValueCmp(Cmp, Cmp->getArg(1), State.Env);
+ })
+
+ // returns optional
+ .CaseOfCFGStmt<CallExpr>(isCallReturningOptional(),
+ transferCallReturningOptional)
+
+ .Build();
+}
+
+llvm::SmallVector<SourceLocation> diagnoseUnwrapCall(const Expr *ObjectExpr,
+ const Environment &Env) {
+ if (auto *OptionalLoc = cast_or_null<RecordStorageLocation>(
+ getLocBehindPossiblePointer(*ObjectExpr, Env))) {
+ auto *Prop = Env.getValue(locForHasValue(*OptionalLoc));
+ if (auto *HasValueVal = cast_or_null<BoolValue>(Prop)) {
+ if (Env.proves(HasValueVal->formula()))
+ return {};
+ }
+ }
+
+ // Record that this unwrap is *not* provably safe.
+ // FIXME: include either the name of the optional (if applicable) or a source
+ // range of the access for easier interpretation of the result.
+ return {ObjectExpr->getBeginLoc()};
+}
+
+auto buildDiagnoseMatchSwitch(
+ const UncheckedOptionalAccessModelOptions &Options) {
+ // FIXME: Evaluate the efficiency of matchers. If using matchers results in a
+ // lot of duplicated work (e.g. string comparisons), consider providing APIs
+ // that avoid it through memoization.
+ auto IgnorableOptional = ignorableOptional(Options);
+ return CFGMatchSwitchBuilder<const Environment,
+ llvm::SmallVector<SourceLocation>>()
+ // optional::value
+ .CaseOfCFGStmt<CXXMemberCallExpr>(
+ valueCall(IgnorableOptional),
+ [](const CXXMemberCallExpr *E, const MatchFinder::MatchResult &,
+ const Environment &Env) {
+ return diagnoseUnwrapCall(E->getImplicitObjectArgument(), Env);
+ })
+
+ // optional::operator*, optional::operator->
+ .CaseOfCFGStmt<CallExpr>(valueOperatorCall(IgnorableOptional),
+ [](const CallExpr *E,
+ const MatchFinder::MatchResult &,
+ const Environment &Env) {
+ return diagnoseUnwrapCall(E->getArg(0), Env);
+ })
+ .Build();
+}
+
+} // namespace
+
+ast_matchers::DeclarationMatcher
+UncheckedOptionalAccessModel::optionalClassDecl() {
+ return optionalClass();
+}
+
+static QualType valueTypeFromOptionalType(QualType OptionalTy) {
+ auto *CTSD =
+ cast<ClassTemplateSpecializationDecl>(OptionalTy->getAsCXXRecordDecl());
+ return CTSD->getTemplateArgs()[0].getAsType();
+}
+
+UncheckedOptionalAccessModel::UncheckedOptionalAccessModel(ASTContext &Ctx,
+ Environment &Env)
+ : DataflowAnalysis<UncheckedOptionalAccessModel, NoopLattice>(Ctx),
+ TransferMatchSwitch(buildTransferMatchSwitch()) {
+ Env.getDataflowAnalysisContext().setSyntheticFieldCallback(
+ [&Ctx](QualType Ty) -> llvm::StringMap<QualType> {
+ if (!isOptionalType(Ty))
+ return {};
+ return {{"value", valueTypeFromOptionalType(Ty)},
+ {"has_value", Ctx.BoolTy}};
+ });
+}
+
+void UncheckedOptionalAccessModel::transfer(const CFGElement &Elt,
+ NoopLattice &L, Environment &Env) {
+ LatticeTransferState State(L, Env);
+ TransferMatchSwitch(Elt, getASTContext(), State);
+}
+
+UncheckedOptionalAccessDiagnoser::UncheckedOptionalAccessDiagnoser(
+ UncheckedOptionalAccessModelOptions Options)
+ : DiagnoseMatchSwitch(buildDiagnoseMatchSwitch(Options)) {}
+
+} // namespace dataflow
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/RecordOps.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/RecordOps.cpp
new file mode 100644
index 000000000000..da4dd6dc0785
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/RecordOps.cpp
@@ -0,0 +1,118 @@
+//===-- RecordOps.cpp -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Operations on records (structs, classes, and unions).
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/FlowSensitive/RecordOps.h"
+
+#define DEBUG_TYPE "dataflow"
+
+void clang::dataflow::copyRecord(RecordStorageLocation &Src,
+ RecordStorageLocation &Dst, Environment &Env) {
+ auto SrcType = Src.getType().getCanonicalType().getUnqualifiedType();
+ auto DstType = Dst.getType().getCanonicalType().getUnqualifiedType();
+
+ auto SrcDecl = SrcType->getAsCXXRecordDecl();
+ auto DstDecl = DstType->getAsCXXRecordDecl();
+
+ bool compatibleTypes =
+ SrcType == DstType ||
+ (SrcDecl && DstDecl && SrcDecl->isDerivedFrom(DstDecl));
+ (void)compatibleTypes;
+
+ LLVM_DEBUG({
+ if (!compatibleTypes) {
+ llvm::dbgs() << "Source type " << Src.getType() << "\n";
+ llvm::dbgs() << "Destination type " << Dst.getType() << "\n";
+ }
+ });
+ assert(compatibleTypes);
+
+ for (auto [Field, DstFieldLoc] : Dst.children()) {
+ StorageLocation *SrcFieldLoc = Src.getChild(*Field);
+
+ assert(Field->getType()->isReferenceType() ||
+ (SrcFieldLoc != nullptr && DstFieldLoc != nullptr));
+
+ if (Field->getType()->isRecordType()) {
+ copyRecord(cast<RecordStorageLocation>(*SrcFieldLoc),
+ cast<RecordStorageLocation>(*DstFieldLoc), Env);
+ } else if (Field->getType()->isReferenceType()) {
+ Dst.setChild(*Field, SrcFieldLoc);
+ } else {
+ if (Value *Val = Env.getValue(*SrcFieldLoc))
+ Env.setValue(*DstFieldLoc, *Val);
+ else
+ Env.clearValue(*DstFieldLoc);
+ }
+ }
+
+ for (const auto &[Name, SynthFieldLoc] : Src.synthetic_fields()) {
+ if (SynthFieldLoc->getType()->isRecordType()) {
+ copyRecord(*cast<RecordStorageLocation>(SynthFieldLoc),
+ cast<RecordStorageLocation>(Dst.getSyntheticField(Name)), Env);
+ } else {
+ if (Value *Val = Env.getValue(*SynthFieldLoc))
+ Env.setValue(Dst.getSyntheticField(Name), *Val);
+ else
+ Env.clearValue(Dst.getSyntheticField(Name));
+ }
+ }
+
+ RecordValue *DstVal = &Env.create<RecordValue>(Dst);
+ Env.setValue(Dst, *DstVal);
+}
+
+bool clang::dataflow::recordsEqual(const RecordStorageLocation &Loc1,
+ const Environment &Env1,
+ const RecordStorageLocation &Loc2,
+ const Environment &Env2) {
+ LLVM_DEBUG({
+ if (Loc2.getType().getCanonicalType().getUnqualifiedType() !=
+ Loc1.getType().getCanonicalType().getUnqualifiedType()) {
+ llvm::dbgs() << "Loc1 type " << Loc1.getType() << "\n";
+ llvm::dbgs() << "Loc2 type " << Loc2.getType() << "\n";
+ }
+ });
+ assert(Loc2.getType().getCanonicalType().getUnqualifiedType() ==
+ Loc1.getType().getCanonicalType().getUnqualifiedType());
+
+ for (auto [Field, FieldLoc1] : Loc1.children()) {
+ StorageLocation *FieldLoc2 = Loc2.getChild(*Field);
+
+ assert(Field->getType()->isReferenceType() ||
+ (FieldLoc1 != nullptr && FieldLoc2 != nullptr));
+
+ if (Field->getType()->isRecordType()) {
+ if (!recordsEqual(cast<RecordStorageLocation>(*FieldLoc1), Env1,
+ cast<RecordStorageLocation>(*FieldLoc2), Env2))
+ return false;
+ } else if (Field->getType()->isReferenceType()) {
+ if (FieldLoc1 != FieldLoc2)
+ return false;
+ } else if (Env1.getValue(*FieldLoc1) != Env2.getValue(*FieldLoc2)) {
+ return false;
+ }
+ }
+
+ for (const auto &[Name, SynthFieldLoc1] : Loc1.synthetic_fields()) {
+ if (SynthFieldLoc1->getType()->isRecordType()) {
+ if (!recordsEqual(
+ *cast<RecordStorageLocation>(SynthFieldLoc1), Env1,
+ cast<RecordStorageLocation>(Loc2.getSyntheticField(Name)), Env2))
+ return false;
+ } else if (Env1.getValue(*SynthFieldLoc1) !=
+ Env2.getValue(Loc2.getSyntheticField(Name))) {
+ return false;
+ }
+ }
+
+ return true;
+}
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/SimplifyConstraints.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/SimplifyConstraints.cpp
new file mode 100644
index 000000000000..cc20202768b9
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/SimplifyConstraints.cpp
@@ -0,0 +1,180 @@
+//===-- SimplifyConstraints.cpp ---------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/FlowSensitive/SimplifyConstraints.h"
+#include "llvm/ADT/EquivalenceClasses.h"
+
+namespace clang {
+namespace dataflow {
+
+// Substitutes all occurrences of a given atom in `F` by a given formula and
+// returns the resulting formula.
+static const Formula &
+substitute(const Formula &F,
+ const llvm::DenseMap<Atom, const Formula *> &Substitutions,
+ Arena &arena) {
+ switch (F.kind()) {
+ case Formula::AtomRef:
+ if (auto iter = Substitutions.find(F.getAtom());
+ iter != Substitutions.end())
+ return *iter->second;
+ return F;
+ case Formula::Literal:
+ return F;
+ case Formula::Not:
+ return arena.makeNot(substitute(*F.operands()[0], Substitutions, arena));
+ case Formula::And:
+ return arena.makeAnd(substitute(*F.operands()[0], Substitutions, arena),
+ substitute(*F.operands()[1], Substitutions, arena));
+ case Formula::Or:
+ return arena.makeOr(substitute(*F.operands()[0], Substitutions, arena),
+ substitute(*F.operands()[1], Substitutions, arena));
+ case Formula::Implies:
+ return arena.makeImplies(
+ substitute(*F.operands()[0], Substitutions, arena),
+ substitute(*F.operands()[1], Substitutions, arena));
+ case Formula::Equal:
+ return arena.makeEquals(substitute(*F.operands()[0], Substitutions, arena),
+ substitute(*F.operands()[1], Substitutions, arena));
+ }
+ llvm_unreachable("Unknown formula kind");
+}
+
+// Returns the result of replacing atoms in `Atoms` with the leader of their
+// equivalence class in `EquivalentAtoms`.
+// Atoms that don't have an equivalence class in `EquivalentAtoms` are inserted
+// into it as single-member equivalence classes.
+static llvm::DenseSet<Atom>
+projectToLeaders(const llvm::DenseSet<Atom> &Atoms,
+ llvm::EquivalenceClasses<Atom> &EquivalentAtoms) {
+ llvm::DenseSet<Atom> Result;
+
+ for (Atom Atom : Atoms)
+ Result.insert(EquivalentAtoms.getOrInsertLeaderValue(Atom));
+
+ return Result;
+}
+
+// Returns the atoms in the equivalence class for the leader identified by
+// `LeaderIt`.
+static llvm::SmallVector<Atom>
+atomsInEquivalenceClass(const llvm::EquivalenceClasses<Atom> &EquivalentAtoms,
+ llvm::EquivalenceClasses<Atom>::iterator LeaderIt) {
+ llvm::SmallVector<Atom> Result;
+ for (auto MemberIt = EquivalentAtoms.member_begin(LeaderIt);
+ MemberIt != EquivalentAtoms.member_end(); ++MemberIt)
+ Result.push_back(*MemberIt);
+ return Result;
+}
+
+void simplifyConstraints(llvm::SetVector<const Formula *> &Constraints,
+ Arena &arena, SimplifyConstraintsInfo *Info) {
+ auto contradiction = [&]() {
+ Constraints.clear();
+ Constraints.insert(&arena.makeLiteral(false));
+ };
+
+ llvm::EquivalenceClasses<Atom> EquivalentAtoms;
+ llvm::DenseSet<Atom> TrueAtoms;
+ llvm::DenseSet<Atom> FalseAtoms;
+
+ while (true) {
+ for (const auto *Constraint : Constraints) {
+ switch (Constraint->kind()) {
+ case Formula::AtomRef:
+ TrueAtoms.insert(Constraint->getAtom());
+ break;
+ case Formula::Not:
+ if (Constraint->operands()[0]->kind() == Formula::AtomRef)
+ FalseAtoms.insert(Constraint->operands()[0]->getAtom());
+ break;
+ case Formula::Equal: {
+ ArrayRef<const Formula *> operands = Constraint->operands();
+ if (operands[0]->kind() == Formula::AtomRef &&
+ operands[1]->kind() == Formula::AtomRef) {
+ EquivalentAtoms.unionSets(operands[0]->getAtom(),
+ operands[1]->getAtom());
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ }
+
+ TrueAtoms = projectToLeaders(TrueAtoms, EquivalentAtoms);
+ FalseAtoms = projectToLeaders(FalseAtoms, EquivalentAtoms);
+
+ llvm::DenseMap<Atom, const Formula *> Substitutions;
+ for (auto It = EquivalentAtoms.begin(); It != EquivalentAtoms.end(); ++It) {
+ Atom TheAtom = It->getData();
+ Atom Leader = EquivalentAtoms.getLeaderValue(TheAtom);
+ if (TrueAtoms.contains(Leader)) {
+ if (FalseAtoms.contains(Leader)) {
+ contradiction();
+ return;
+ }
+ Substitutions.insert({TheAtom, &arena.makeLiteral(true)});
+ } else if (FalseAtoms.contains(Leader)) {
+ Substitutions.insert({TheAtom, &arena.makeLiteral(false)});
+ } else if (TheAtom != Leader) {
+ Substitutions.insert({TheAtom, &arena.makeAtomRef(Leader)});
+ }
+ }
+
+ llvm::SetVector<const Formula *> NewConstraints;
+ for (const auto *Constraint : Constraints) {
+ const Formula &NewConstraint =
+ substitute(*Constraint, Substitutions, arena);
+ if (NewConstraint.isLiteral(true))
+ continue;
+ if (NewConstraint.isLiteral(false)) {
+ contradiction();
+ return;
+ }
+ if (NewConstraint.kind() == Formula::And) {
+ NewConstraints.insert(NewConstraint.operands()[0]);
+ NewConstraints.insert(NewConstraint.operands()[1]);
+ continue;
+ }
+ NewConstraints.insert(&NewConstraint);
+ }
+
+ if (NewConstraints == Constraints)
+ break;
+ Constraints = std::move(NewConstraints);
+ }
+
+ if (Info) {
+ for (auto It = EquivalentAtoms.begin(), End = EquivalentAtoms.end();
+ It != End; ++It) {
+ if (!It->isLeader())
+ continue;
+ Atom At = *EquivalentAtoms.findLeader(It);
+ if (TrueAtoms.contains(At) || FalseAtoms.contains(At))
+ continue;
+ llvm::SmallVector<Atom> Atoms =
+ atomsInEquivalenceClass(EquivalentAtoms, It);
+ if (Atoms.size() == 1)
+ continue;
+ std::sort(Atoms.begin(), Atoms.end());
+ Info->EquivalentAtoms.push_back(std::move(Atoms));
+ }
+ for (Atom At : TrueAtoms)
+ Info->TrueAtoms.append(atomsInEquivalenceClass(
+ EquivalentAtoms, EquivalentAtoms.findValue(At)));
+ std::sort(Info->TrueAtoms.begin(), Info->TrueAtoms.end());
+ for (Atom At : FalseAtoms)
+ Info->FalseAtoms.append(atomsInEquivalenceClass(
+ EquivalentAtoms, EquivalentAtoms.findValue(At)));
+ std::sort(Info->FalseAtoms.begin(), Info->FalseAtoms.end());
+ }
+}
+
+} // namespace dataflow
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Transfer.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Transfer.cpp
new file mode 100644
index 000000000000..2271a75fbcaf
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Transfer.cpp
@@ -0,0 +1,834 @@
+//===-- Transfer.cpp --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines transfer functions that evaluate program statements and
+// update an environment accordingly.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/FlowSensitive/Transfer.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclBase.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/OperationKinds.h"
+#include "clang/AST/Stmt.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Analysis/FlowSensitive/ControlFlowContext.h"
+#include "clang/Analysis/FlowSensitive/DataflowEnvironment.h"
+#include "clang/Analysis/FlowSensitive/NoopAnalysis.h"
+#include "clang/Analysis/FlowSensitive/RecordOps.h"
+#include "clang/Analysis/FlowSensitive/Value.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/OperatorKinds.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Debug.h"
+#include <assert.h>
+#include <cassert>
+
+#define DEBUG_TYPE "dataflow"
+
+namespace clang {
+namespace dataflow {
+
+const Environment *StmtToEnvMap::getEnvironment(const Stmt &S) const {
+ auto BlockIt = CFCtx.getStmtToBlock().find(&ignoreCFGOmittedNodes(S));
+ assert(BlockIt != CFCtx.getStmtToBlock().end());
+ if (!CFCtx.isBlockReachable(*BlockIt->getSecond()))
+ return nullptr;
+ if (BlockIt->getSecond()->getBlockID() == CurBlockID)
+ return &CurState.Env;
+ const auto &State = BlockToState[BlockIt->getSecond()->getBlockID()];
+ if (!(State))
+ return nullptr;
+ return &State->Env;
+}
+
+static BoolValue &evaluateBooleanEquality(const Expr &LHS, const Expr &RHS,
+ Environment &Env) {
+ Value *LHSValue = Env.getValue(LHS);
+ Value *RHSValue = Env.getValue(RHS);
+
+ if (LHSValue == RHSValue)
+ return Env.getBoolLiteralValue(true);
+
+ if (auto *LHSBool = dyn_cast_or_null<BoolValue>(LHSValue))
+ if (auto *RHSBool = dyn_cast_or_null<BoolValue>(RHSValue))
+ return Env.makeIff(*LHSBool, *RHSBool);
+
+ return Env.makeAtomicBoolValue();
+}
+
+static BoolValue &unpackValue(BoolValue &V, Environment &Env) {
+ if (auto *Top = llvm::dyn_cast<TopBoolValue>(&V)) {
+ auto &A = Env.getDataflowAnalysisContext().arena();
+ return A.makeBoolValue(A.makeAtomRef(Top->getAtom()));
+ }
+ return V;
+}
+
+// Unpacks the value (if any) associated with `E` and updates `E` to the new
+// value, if any unpacking occured. Also, does the lvalue-to-rvalue conversion,
+// by skipping past the reference.
+static Value *maybeUnpackLValueExpr(const Expr &E, Environment &Env) {
+ auto *Loc = Env.getStorageLocation(E);
+ if (Loc == nullptr)
+ return nullptr;
+ auto *Val = Env.getValue(*Loc);
+
+ auto *B = dyn_cast_or_null<BoolValue>(Val);
+ if (B == nullptr)
+ return Val;
+
+ auto &UnpackedVal = unpackValue(*B, Env);
+ if (&UnpackedVal == Val)
+ return Val;
+ Env.setValue(*Loc, UnpackedVal);
+ return &UnpackedVal;
+}
+
+static void propagateValue(const Expr &From, const Expr &To, Environment &Env) {
+ if (auto *Val = Env.getValue(From))
+ Env.setValue(To, *Val);
+}
+
+static void propagateStorageLocation(const Expr &From, const Expr &To,
+ Environment &Env) {
+ if (auto *Loc = Env.getStorageLocation(From))
+ Env.setStorageLocation(To, *Loc);
+}
+
+// Propagates the value or storage location of `From` to `To` in cases where
+// `From` may be either a glvalue or a prvalue. `To` must be a glvalue iff
+// `From` is a glvalue.
+static void propagateValueOrStorageLocation(const Expr &From, const Expr &To,
+ Environment &Env) {
+ assert(From.isGLValue() == To.isGLValue());
+ if (From.isGLValue())
+ propagateStorageLocation(From, To, Env);
+ else
+ propagateValue(From, To, Env);
+}
+
+namespace {
+
+class TransferVisitor : public ConstStmtVisitor<TransferVisitor> {
+public:
+ TransferVisitor(const StmtToEnvMap &StmtToEnv, Environment &Env)
+ : StmtToEnv(StmtToEnv), Env(Env) {}
+
+ void VisitBinaryOperator(const BinaryOperator *S) {
+ const Expr *LHS = S->getLHS();
+ assert(LHS != nullptr);
+
+ const Expr *RHS = S->getRHS();
+ assert(RHS != nullptr);
+
+ switch (S->getOpcode()) {
+ case BO_Assign: {
+ auto *LHSLoc = Env.getStorageLocation(*LHS);
+ if (LHSLoc == nullptr)
+ break;
+
+ auto *RHSVal = Env.getValue(*RHS);
+ if (RHSVal == nullptr)
+ break;
+
+ // Assign a value to the storage location of the left-hand side.
+ Env.setValue(*LHSLoc, *RHSVal);
+
+ // Assign a storage location for the whole expression.
+ Env.setStorageLocation(*S, *LHSLoc);
+ break;
+ }
+ case BO_LAnd:
+ case BO_LOr: {
+ BoolValue &LHSVal = getLogicOperatorSubExprValue(*LHS);
+ BoolValue &RHSVal = getLogicOperatorSubExprValue(*RHS);
+
+ if (S->getOpcode() == BO_LAnd)
+ Env.setValue(*S, Env.makeAnd(LHSVal, RHSVal));
+ else
+ Env.setValue(*S, Env.makeOr(LHSVal, RHSVal));
+ break;
+ }
+ case BO_NE:
+ case BO_EQ: {
+ auto &LHSEqRHSValue = evaluateBooleanEquality(*LHS, *RHS, Env);
+ Env.setValue(*S, S->getOpcode() == BO_EQ ? LHSEqRHSValue
+ : Env.makeNot(LHSEqRHSValue));
+ break;
+ }
+ case BO_Comma: {
+ propagateValueOrStorageLocation(*RHS, *S, Env);
+ break;
+ }
+ default:
+ break;
+ }
+ }
+
+ void VisitDeclRefExpr(const DeclRefExpr *S) {
+ const ValueDecl *VD = S->getDecl();
+ assert(VD != nullptr);
+
+ // Some `DeclRefExpr`s aren't glvalues, so we can't associate them with a
+ // `StorageLocation`, and there's also no sensible `Value` that we can
+ // assign to them. Examples:
+ // - Non-static member variables
+ // - Non static member functions
+ // Note: Member operators are an exception to this, but apparently only
+ // if the `DeclRefExpr` is used within the callee of a
+ // `CXXOperatorCallExpr`. In other cases, for example when applying the
+ // address-of operator, the `DeclRefExpr` is a prvalue.
+ if (!S->isGLValue())
+ return;
+
+ auto *DeclLoc = Env.getStorageLocation(*VD);
+ if (DeclLoc == nullptr)
+ return;
+
+ Env.setStorageLocation(*S, *DeclLoc);
+ }
+
+ void VisitDeclStmt(const DeclStmt *S) {
+ // Group decls are converted into single decls in the CFG so the cast below
+ // is safe.
+ const auto &D = *cast<VarDecl>(S->getSingleDecl());
+
+ ProcessVarDecl(D);
+ }
+
+ void ProcessVarDecl(const VarDecl &D) {
+ // Static local vars are already initialized in `Environment`.
+ if (D.hasGlobalStorage())
+ return;
+
+ // If this is the holding variable for a `BindingDecl`, we may already
+ // have a storage location set up -- so check. (See also explanation below
+ // where we process the `BindingDecl`.)
+ if (D.getType()->isReferenceType() && Env.getStorageLocation(D) != nullptr)
+ return;
+
+ assert(Env.getStorageLocation(D) == nullptr);
+
+ Env.setStorageLocation(D, Env.createObject(D));
+
+ // `DecompositionDecl` must be handled after we've interpreted the loc
+ // itself, because the binding expression refers back to the
+ // `DecompositionDecl` (even though it has no written name).
+ if (const auto *Decomp = dyn_cast<DecompositionDecl>(&D)) {
+ // If VarDecl is a DecompositionDecl, evaluate each of its bindings. This
+ // needs to be evaluated after initializing the values in the storage for
+ // VarDecl, as the bindings refer to them.
+ // FIXME: Add support for ArraySubscriptExpr.
+ // FIXME: Consider adding AST nodes used in BindingDecls to the CFG.
+ for (const auto *B : Decomp->bindings()) {
+ if (auto *ME = dyn_cast_or_null<MemberExpr>(B->getBinding())) {
+ auto *DE = dyn_cast_or_null<DeclRefExpr>(ME->getBase());
+ if (DE == nullptr)
+ continue;
+
+ // ME and its base haven't been visited because they aren't included
+ // in the statements of the CFG basic block.
+ VisitDeclRefExpr(DE);
+ VisitMemberExpr(ME);
+
+ if (auto *Loc = Env.getStorageLocation(*ME))
+ Env.setStorageLocation(*B, *Loc);
+ } else if (auto *VD = B->getHoldingVar()) {
+ // Holding vars are used to back the `BindingDecl`s of tuple-like
+ // types. The holding var declarations appear after the
+ // `DecompositionDecl`, so we have to explicitly process them here
+ // to know their storage location. They will be processed a second
+ // time when we visit their `VarDecl`s, so we have code that protects
+ // against this above.
+ ProcessVarDecl(*VD);
+ auto *VDLoc = Env.getStorageLocation(*VD);
+ assert(VDLoc != nullptr);
+ Env.setStorageLocation(*B, *VDLoc);
+ }
+ }
+ }
+ }
+
+ void VisitImplicitCastExpr(const ImplicitCastExpr *S) {
+ const Expr *SubExpr = S->getSubExpr();
+ assert(SubExpr != nullptr);
+
+ switch (S->getCastKind()) {
+ case CK_IntegralToBoolean: {
+ // This cast creates a new, boolean value from the integral value. We
+ // model that with a fresh value in the environment, unless it's already a
+ // boolean.
+ if (auto *SubExprVal =
+ dyn_cast_or_null<BoolValue>(Env.getValue(*SubExpr)))
+ Env.setValue(*S, *SubExprVal);
+ else
+ // FIXME: If integer modeling is added, then update this code to create
+ // the boolean based on the integer model.
+ Env.setValue(*S, Env.makeAtomicBoolValue());
+ break;
+ }
+
+ case CK_LValueToRValue: {
+ // When an L-value is used as an R-value, it may result in sharing, so we
+ // need to unpack any nested `Top`s.
+ auto *SubExprVal = maybeUnpackLValueExpr(*SubExpr, Env);
+ if (SubExprVal == nullptr)
+ break;
+
+ Env.setValue(*S, *SubExprVal);
+ break;
+ }
+
+ case CK_IntegralCast:
+ // FIXME: This cast creates a new integral value from the
+ // subexpression. But, because we don't model integers, we don't
+ // distinguish between this new value and the underlying one. If integer
+ // modeling is added, then update this code to create a fresh location and
+ // value.
+ case CK_UncheckedDerivedToBase:
+ case CK_ConstructorConversion:
+ case CK_UserDefinedConversion:
+ // FIXME: Add tests that excercise CK_UncheckedDerivedToBase,
+ // CK_ConstructorConversion, and CK_UserDefinedConversion.
+ case CK_NoOp: {
+ // FIXME: Consider making `Environment::getStorageLocation` skip noop
+ // expressions (this and other similar expressions in the file) instead
+ // of assigning them storage locations.
+ propagateValueOrStorageLocation(*SubExpr, *S, Env);
+ break;
+ }
+ case CK_NullToPointer: {
+ auto &NullPointerVal =
+ Env.getOrCreateNullPointerValue(S->getType()->getPointeeType());
+ Env.setValue(*S, NullPointerVal);
+ break;
+ }
+ case CK_NullToMemberPointer:
+ // FIXME: Implement pointers to members. For now, don't associate a value
+ // with this expression.
+ break;
+ case CK_FunctionToPointerDecay: {
+ StorageLocation *PointeeLoc = Env.getStorageLocation(*SubExpr);
+ if (PointeeLoc == nullptr)
+ break;
+
+ Env.setValue(*S, Env.create<PointerValue>(*PointeeLoc));
+ break;
+ }
+ case CK_BuiltinFnToFnPtr:
+ // Despite its name, the result type of `BuiltinFnToFnPtr` is a function,
+ // not a function pointer. In addition, builtin functions can only be
+ // called directly; it is not legal to take their address. We therefore
+ // don't need to create a value or storage location for them.
+ break;
+ default:
+ break;
+ }
+ }
+
+ void VisitUnaryOperator(const UnaryOperator *S) {
+ const Expr *SubExpr = S->getSubExpr();
+ assert(SubExpr != nullptr);
+
+ switch (S->getOpcode()) {
+ case UO_Deref: {
+ const auto *SubExprVal = Env.get<PointerValue>(*SubExpr);
+ if (SubExprVal == nullptr)
+ break;
+
+ Env.setStorageLocation(*S, SubExprVal->getPointeeLoc());
+ break;
+ }
+ case UO_AddrOf: {
+ // FIXME: Model pointers to members.
+ if (S->getType()->isMemberPointerType())
+ break;
+
+ if (StorageLocation *PointeeLoc = Env.getStorageLocation(*SubExpr))
+ Env.setValue(*S, Env.create<PointerValue>(*PointeeLoc));
+ break;
+ }
+ case UO_LNot: {
+ auto *SubExprVal = dyn_cast_or_null<BoolValue>(Env.getValue(*SubExpr));
+ if (SubExprVal == nullptr)
+ break;
+
+ Env.setValue(*S, Env.makeNot(*SubExprVal));
+ break;
+ }
+ default:
+ break;
+ }
+ }
+
+ void VisitCXXThisExpr(const CXXThisExpr *S) {
+ auto *ThisPointeeLoc = Env.getThisPointeeStorageLocation();
+ if (ThisPointeeLoc == nullptr)
+ // Unions are not supported yet, and will not have a location for the
+ // `this` expression's pointee.
+ return;
+
+ Env.setValue(*S, Env.create<PointerValue>(*ThisPointeeLoc));
+ }
+
+ void VisitCXXNewExpr(const CXXNewExpr *S) {
+ if (Value *Val = Env.createValue(S->getType()))
+ Env.setValue(*S, *Val);
+ }
+
+ void VisitCXXDeleteExpr(const CXXDeleteExpr *S) {
+ // Empty method.
+ // We consciously don't do anything on deletes. Diagnosing double deletes
+ // (for example) should be done by a specific analysis, not by the
+ // framework.
+ }
+
+ void VisitReturnStmt(const ReturnStmt *S) {
+ if (!Env.getDataflowAnalysisContext().getOptions().ContextSensitiveOpts)
+ return;
+
+ auto *Ret = S->getRetValue();
+ if (Ret == nullptr)
+ return;
+
+ if (Ret->isPRValue()) {
+ auto *Val = Env.getValue(*Ret);
+ if (Val == nullptr)
+ return;
+
+ // FIXME: Model NRVO.
+ Env.setReturnValue(Val);
+ } else {
+ auto *Loc = Env.getStorageLocation(*Ret);
+ if (Loc == nullptr)
+ return;
+
+ // FIXME: Model NRVO.
+ Env.setReturnStorageLocation(Loc);
+ }
+ }
+
+ void VisitMemberExpr(const MemberExpr *S) {
+ ValueDecl *Member = S->getMemberDecl();
+ assert(Member != nullptr);
+
+ // FIXME: Consider assigning pointer values to function member expressions.
+ if (Member->isFunctionOrFunctionTemplate())
+ return;
+
+ // FIXME: if/when we add support for modeling enums, use that support here.
+ if (isa<EnumConstantDecl>(Member))
+ return;
+
+ if (auto *D = dyn_cast<VarDecl>(Member)) {
+ if (D->hasGlobalStorage()) {
+ auto *VarDeclLoc = Env.getStorageLocation(*D);
+ if (VarDeclLoc == nullptr)
+ return;
+
+ Env.setStorageLocation(*S, *VarDeclLoc);
+ return;
+ }
+ }
+
+ RecordStorageLocation *BaseLoc = getBaseObjectLocation(*S, Env);
+ if (BaseLoc == nullptr)
+ return;
+
+ auto *MemberLoc = BaseLoc->getChild(*Member);
+ if (MemberLoc == nullptr)
+ return;
+ Env.setStorageLocation(*S, *MemberLoc);
+ }
+
+ void VisitCXXDefaultInitExpr(const CXXDefaultInitExpr *S) {
+ const Expr *InitExpr = S->getExpr();
+ assert(InitExpr != nullptr);
+ propagateValueOrStorageLocation(*InitExpr, *S, Env);
+ }
+
+ void VisitCXXConstructExpr(const CXXConstructExpr *S) {
+ const CXXConstructorDecl *ConstructorDecl = S->getConstructor();
+ assert(ConstructorDecl != nullptr);
+
+ if (ConstructorDecl->isCopyOrMoveConstructor()) {
+ // It is permissible for a copy/move constructor to have additional
+ // parameters as long as they have default arguments defined for them.
+ assert(S->getNumArgs() != 0);
+
+ const Expr *Arg = S->getArg(0);
+ assert(Arg != nullptr);
+
+ auto *ArgLoc = Env.get<RecordStorageLocation>(*Arg);
+ if (ArgLoc == nullptr)
+ return;
+
+ if (S->isElidable()) {
+ if (Value *Val = Env.getValue(*ArgLoc))
+ Env.setValue(*S, *Val);
+ } else {
+ auto &Val = *cast<RecordValue>(Env.createValue(S->getType()));
+ Env.setValue(*S, Val);
+ copyRecord(*ArgLoc, Val.getLoc(), Env);
+ }
+ return;
+ }
+
+ // `CXXConstructExpr` can have array type if default-initializing an array
+ // of records, and we currently can't create values for arrays. So check if
+ // we've got a record type.
+ if (S->getType()->isRecordType()) {
+ auto &InitialVal = *cast<RecordValue>(Env.createValue(S->getType()));
+ Env.setValue(*S, InitialVal);
+ }
+
+ transferInlineCall(S, ConstructorDecl);
+ }
+
+ void VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *S) {
+ if (S->getOperator() == OO_Equal) {
+ assert(S->getNumArgs() == 2);
+
+ const Expr *Arg0 = S->getArg(0);
+ assert(Arg0 != nullptr);
+
+ const Expr *Arg1 = S->getArg(1);
+ assert(Arg1 != nullptr);
+
+ // Evaluate only copy and move assignment operators.
+ const auto *Method =
+ dyn_cast_or_null<CXXMethodDecl>(S->getDirectCallee());
+ if (!Method)
+ return;
+ if (!Method->isCopyAssignmentOperator() &&
+ !Method->isMoveAssignmentOperator())
+ return;
+
+ RecordStorageLocation *LocSrc = nullptr;
+ if (Arg1->isPRValue()) {
+ if (auto *Val = Env.get<RecordValue>(*Arg1))
+ LocSrc = &Val->getLoc();
+ } else {
+ LocSrc = Env.get<RecordStorageLocation>(*Arg1);
+ }
+ auto *LocDst = Env.get<RecordStorageLocation>(*Arg0);
+
+ if (LocSrc == nullptr || LocDst == nullptr)
+ return;
+
+ // The assignment operators are different from the type of the destination
+ // in this model (i.e. in one of their base classes). This must be very
+ // rare and we just bail.
+ if (Method->getFunctionObjectParameterType()
+ .getCanonicalType()
+ .getUnqualifiedType() !=
+ LocDst->getType().getCanonicalType().getUnqualifiedType())
+ return;
+
+ copyRecord(*LocSrc, *LocDst, Env);
+ Env.setStorageLocation(*S, *LocDst);
+ }
+ }
+
+ void VisitCXXFunctionalCastExpr(const CXXFunctionalCastExpr *S) {
+ if (S->getCastKind() == CK_ConstructorConversion) {
+ const Expr *SubExpr = S->getSubExpr();
+ assert(SubExpr != nullptr);
+
+ propagateValue(*SubExpr, *S, Env);
+ }
+ }
+
+ void VisitCXXTemporaryObjectExpr(const CXXTemporaryObjectExpr *S) {
+ if (Value *Val = Env.createValue(S->getType()))
+ Env.setValue(*S, *Val);
+ }
+
+ void VisitCallExpr(const CallExpr *S) {
+ // Of clang's builtins, only `__builtin_expect` is handled explicitly, since
+ // others (like trap, debugtrap, and unreachable) are handled by CFG
+ // construction.
+ if (S->isCallToStdMove()) {
+ assert(S->getNumArgs() == 1);
+
+ const Expr *Arg = S->getArg(0);
+ assert(Arg != nullptr);
+
+ auto *ArgLoc = Env.getStorageLocation(*Arg);
+ if (ArgLoc == nullptr)
+ return;
+
+ Env.setStorageLocation(*S, *ArgLoc);
+ } else if (S->getDirectCallee() != nullptr &&
+ S->getDirectCallee()->getBuiltinID() ==
+ Builtin::BI__builtin_expect) {
+ assert(S->getNumArgs() > 0);
+ assert(S->getArg(0) != nullptr);
+ auto *ArgVal = Env.getValue(*S->getArg(0));
+ if (ArgVal == nullptr)
+ return;
+ Env.setValue(*S, *ArgVal);
+ } else if (const FunctionDecl *F = S->getDirectCallee()) {
+ transferInlineCall(S, F);
+
+ // If this call produces a prvalue of record type, make sure that we have
+ // a `RecordValue` for it. This is required so that
+ // `Environment::getResultObjectLocation()` is able to return a location
+ // for this `CallExpr`.
+ if (S->getType()->isRecordType() && S->isPRValue())
+ if (Env.getValue(*S) == nullptr)
+ refreshRecordValue(*S, Env);
+ }
+ }
+
+ void VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *S) {
+ const Expr *SubExpr = S->getSubExpr();
+ assert(SubExpr != nullptr);
+
+ Value *SubExprVal = Env.getValue(*SubExpr);
+ if (SubExprVal == nullptr)
+ return;
+
+ if (RecordValue *RecordVal = dyn_cast<RecordValue>(SubExprVal)) {
+ Env.setStorageLocation(*S, RecordVal->getLoc());
+ return;
+ }
+
+ StorageLocation &Loc = Env.createStorageLocation(*S);
+ Env.setValue(Loc, *SubExprVal);
+ Env.setStorageLocation(*S, Loc);
+ }
+
+ void VisitCXXBindTemporaryExpr(const CXXBindTemporaryExpr *S) {
+ const Expr *SubExpr = S->getSubExpr();
+ assert(SubExpr != nullptr);
+
+ propagateValue(*SubExpr, *S, Env);
+ }
+
+ void VisitCXXStaticCastExpr(const CXXStaticCastExpr *S) {
+ if (S->getCastKind() == CK_NoOp) {
+ const Expr *SubExpr = S->getSubExpr();
+ assert(SubExpr != nullptr);
+
+ propagateValueOrStorageLocation(*SubExpr, *S, Env);
+ }
+ }
+
+ void VisitConditionalOperator(const ConditionalOperator *S) {
+ // FIXME: Revisit this once flow conditions are added to the framework. For
+ // `a = b ? c : d` we can add `b => a == c && !b => a == d` to the flow
+ // condition.
+ // When we do this, we will need to retrieve the values of the operands from
+ // the environments for the basic blocks they are computed in, in a similar
+ // way to how this is done for short-circuited logical operators in
+ // `getLogicOperatorSubExprValue()`.
+ if (S->isGLValue())
+ Env.setStorageLocation(*S, Env.createObject(S->getType()));
+ else if (Value *Val = Env.createValue(S->getType()))
+ Env.setValue(*S, *Val);
+ }
+
+ void VisitInitListExpr(const InitListExpr *S) {
+ QualType Type = S->getType();
+
+ if (!Type->isStructureOrClassType()) {
+ if (auto *Val = Env.createValue(Type))
+ Env.setValue(*S, *Val);
+
+ return;
+ }
+
+ // In case the initializer list is transparent, we just need to propagate
+ // the value that it contains.
+ if (S->isSemanticForm() && S->isTransparent()) {
+ propagateValue(*S->getInit(0), *S, Env);
+ return;
+ }
+
+ llvm::DenseMap<const ValueDecl *, StorageLocation *> FieldLocs;
+
+ // This only contains the direct fields for the given type.
+ std::vector<FieldDecl *> FieldsForInit =
+ getFieldsForInitListExpr(Type->getAsRecordDecl());
+
+ // `S->inits()` contains all the initializer epressions, including the
+ // ones for direct base classes.
+ auto Inits = S->inits();
+ size_t InitIdx = 0;
+
+ // Initialize base classes.
+ if (auto* R = S->getType()->getAsCXXRecordDecl()) {
+ assert(FieldsForInit.size() + R->getNumBases() == Inits.size());
+ for ([[maybe_unused]] const CXXBaseSpecifier &Base : R->bases()) {
+ assert(InitIdx < Inits.size());
+ auto Init = Inits[InitIdx++];
+ assert(Base.getType().getCanonicalType() ==
+ Init->getType().getCanonicalType());
+ auto *BaseVal = Env.get<RecordValue>(*Init);
+ if (!BaseVal)
+ BaseVal = cast<RecordValue>(Env.createValue(Init->getType()));
+ // Take ownership of the fields of the `RecordValue` for the base class
+ // and incorporate them into the "flattened" set of fields for the
+ // derived class.
+ auto Children = BaseVal->getLoc().children();
+ FieldLocs.insert(Children.begin(), Children.end());
+ }
+ }
+
+ assert(FieldsForInit.size() == Inits.size() - InitIdx);
+ for (auto Field : FieldsForInit) {
+ assert(InitIdx < Inits.size());
+ auto Init = Inits[InitIdx++];
+ assert(
+ // The types are same, or
+ Field->getType().getCanonicalType().getUnqualifiedType() ==
+ Init->getType().getCanonicalType().getUnqualifiedType() ||
+ // The field's type is T&, and initializer is T
+ (Field->getType()->isReferenceType() &&
+ Field->getType().getCanonicalType()->getPointeeType() ==
+ Init->getType().getCanonicalType()));
+ auto& Loc = Env.createObject(Field->getType(), Init);
+ FieldLocs.insert({Field, &Loc});
+ }
+
+ // Check that we satisfy the invariant that a `RecordStorageLoation`
+ // contains exactly the set of modeled fields for that type.
+ // `ModeledFields` includes fields from all the bases, but only the
+ // modeled ones. However, if a class type is initialized with an
+ // `InitListExpr`, all fields in the class, including those from base
+ // classes, are included in the set of modeled fields. The code above
+ // should therefore populate exactly the modeled fields.
+ assert(containsSameFields(
+ Env.getDataflowAnalysisContext().getModeledFields(Type), FieldLocs));
+
+ RecordStorageLocation::SyntheticFieldMap SyntheticFieldLocs;
+ for (const auto &Entry :
+ Env.getDataflowAnalysisContext().getSyntheticFields(Type)) {
+ SyntheticFieldLocs.insert(
+ {Entry.getKey(), &Env.createObject(Entry.getValue())});
+ }
+
+ auto &Loc = Env.getDataflowAnalysisContext().createRecordStorageLocation(
+ Type, std::move(FieldLocs), std::move(SyntheticFieldLocs));
+ RecordValue &RecordVal = Env.create<RecordValue>(Loc);
+
+ Env.setValue(Loc, RecordVal);
+
+ Env.setValue(*S, RecordVal);
+
+ // FIXME: Implement array initialization.
+ }
+
+ void VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *S) {
+ Env.setValue(*S, Env.getBoolLiteralValue(S->getValue()));
+ }
+
+ void VisitIntegerLiteral(const IntegerLiteral *S) {
+ Env.setValue(*S, Env.getIntLiteralValue(S->getValue()));
+ }
+
+ void VisitParenExpr(const ParenExpr *S) {
+ // The CFG does not contain `ParenExpr` as top-level statements in basic
+ // blocks, however manual traversal to sub-expressions may encounter them.
+ // Redirect to the sub-expression.
+ auto *SubExpr = S->getSubExpr();
+ assert(SubExpr != nullptr);
+ Visit(SubExpr);
+ }
+
+ void VisitExprWithCleanups(const ExprWithCleanups *S) {
+ // The CFG does not contain `ExprWithCleanups` as top-level statements in
+ // basic blocks, however manual traversal to sub-expressions may encounter
+ // them. Redirect to the sub-expression.
+ auto *SubExpr = S->getSubExpr();
+ assert(SubExpr != nullptr);
+ Visit(SubExpr);
+ }
+
+private:
+ /// Returns the value for the sub-expression `SubExpr` of a logic operator.
+ BoolValue &getLogicOperatorSubExprValue(const Expr &SubExpr) {
+ // `SubExpr` and its parent logic operator might be part of different basic
+ // blocks. We try to access the value that is assigned to `SubExpr` in the
+ // corresponding environment.
+ if (const Environment *SubExprEnv = StmtToEnv.getEnvironment(SubExpr))
+ if (auto *Val =
+ dyn_cast_or_null<BoolValue>(SubExprEnv->getValue(SubExpr)))
+ return *Val;
+
+ // The sub-expression may lie within a basic block that isn't reachable,
+ // even if we need it to evaluate the current (reachable) expression
+ // (see https://discourse.llvm.org/t/70775). In this case, visit `SubExpr`
+ // within the current environment and then try to get the value that gets
+ // assigned to it.
+ if (Env.getValue(SubExpr) == nullptr)
+ Visit(&SubExpr);
+ if (auto *Val = dyn_cast_or_null<BoolValue>(Env.getValue(SubExpr)))
+ return *Val;
+
+ // If the value of `SubExpr` is still unknown, we create a fresh symbolic
+ // boolean value for it.
+ return Env.makeAtomicBoolValue();
+ }
+
+ // If context sensitivity is enabled, try to analyze the body of the callee
+ // `F` of `S`. The type `E` must be either `CallExpr` or `CXXConstructExpr`.
+ template <typename E>
+ void transferInlineCall(const E *S, const FunctionDecl *F) {
+ const auto &Options = Env.getDataflowAnalysisContext().getOptions();
+ if (!(Options.ContextSensitiveOpts &&
+ Env.canDescend(Options.ContextSensitiveOpts->Depth, F)))
+ return;
+
+ const ControlFlowContext *CFCtx =
+ Env.getDataflowAnalysisContext().getControlFlowContext(F);
+ if (!CFCtx)
+ return;
+
+ // FIXME: We don't support context-sensitive analysis of recursion, so
+ // we should return early here if `F` is the same as the `FunctionDecl`
+ // holding `S` itself.
+
+ auto ExitBlock = CFCtx->getCFG().getExit().getBlockID();
+
+ auto CalleeEnv = Env.pushCall(S);
+
+ // FIXME: Use the same analysis as the caller for the callee. Note,
+ // though, that doing so would require support for changing the analysis's
+ // ASTContext.
+ auto Analysis = NoopAnalysis(CFCtx->getDecl().getASTContext(),
+ DataflowAnalysisOptions{Options});
+
+ auto BlockToOutputState =
+ dataflow::runDataflowAnalysis(*CFCtx, Analysis, CalleeEnv);
+ assert(BlockToOutputState);
+ assert(ExitBlock < BlockToOutputState->size());
+
+ auto &ExitState = (*BlockToOutputState)[ExitBlock];
+ assert(ExitState);
+
+ Env.popCall(S, ExitState->Env);
+ }
+
+ const StmtToEnvMap &StmtToEnv;
+ Environment &Env;
+};
+
+} // namespace
+
+void transfer(const StmtToEnvMap &StmtToEnv, const Stmt &S, Environment &Env) {
+ TransferVisitor(StmtToEnv, Env).Visit(&S);
+}
+
+} // namespace dataflow
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.cpp
new file mode 100644
index 000000000000..49e425bde66a
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.cpp
@@ -0,0 +1,620 @@
+//===- TypeErasedDataflowAnalysis.cpp -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines type-erased base types and functions for building dataflow
+// analyses that run over Control-Flow Graphs (CFGs).
+//
+//===----------------------------------------------------------------------===//
+
+#include <algorithm>
+#include <optional>
+#include <system_error>
+#include <utility>
+#include <vector>
+
+#include "clang/AST/ASTDumper.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/OperationKinds.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Analysis/Analyses/PostOrderCFGView.h"
+#include "clang/Analysis/CFG.h"
+#include "clang/Analysis/FlowSensitive/DataflowEnvironment.h"
+#include "clang/Analysis/FlowSensitive/DataflowLattice.h"
+#include "clang/Analysis/FlowSensitive/DataflowWorklist.h"
+#include "clang/Analysis/FlowSensitive/RecordOps.h"
+#include "clang/Analysis/FlowSensitive/Transfer.h"
+#include "clang/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.h"
+#include "clang/Analysis/FlowSensitive/Value.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallBitVector.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Error.h"
+
+#define DEBUG_TYPE "clang-dataflow"
+
+namespace clang {
+namespace dataflow {
+
+/// Returns the index of `Block` in the successors of `Pred`.
+static int blockIndexInPredecessor(const CFGBlock &Pred,
+ const CFGBlock &Block) {
+ auto BlockPos = llvm::find_if(
+ Pred.succs(), [&Block](const CFGBlock::AdjacentBlock &Succ) {
+ return Succ && Succ->getBlockID() == Block.getBlockID();
+ });
+ return BlockPos - Pred.succ_begin();
+}
+
+// A "backedge" node is a block introduced in the CFG exclusively to indicate a
+// loop backedge. They are exactly identified by the presence of a non-null
+// pointer to the entry block of the loop condition. Note that this is not
+// necessarily the block with the loop statement as terminator, because
+// short-circuit operators will result in multiple blocks encoding the loop
+// condition, only one of which will contain the loop statement as terminator.
+static bool isBackedgeNode(const CFGBlock &B) {
+ return B.getLoopTarget() != nullptr;
+}
+
+namespace {
+
+// The return type of the visit functions in TerminatorVisitor. The first
+// element represents the terminator expression (that is the conditional
+// expression in case of a path split in the CFG). The second element
+// represents whether the condition was true or false.
+using TerminatorVisitorRetTy = std::pair<const Expr *, bool>;
+
+/// Extends the flow condition of an environment based on a terminator
+/// statement.
+class TerminatorVisitor
+ : public ConstStmtVisitor<TerminatorVisitor, TerminatorVisitorRetTy> {
+public:
+ TerminatorVisitor(Environment &Env, int BlockSuccIdx)
+ : Env(Env), BlockSuccIdx(BlockSuccIdx) {}
+
+ TerminatorVisitorRetTy VisitIfStmt(const IfStmt *S) {
+ auto *Cond = S->getCond();
+ assert(Cond != nullptr);
+ return extendFlowCondition(*Cond);
+ }
+
+ TerminatorVisitorRetTy VisitWhileStmt(const WhileStmt *S) {
+ auto *Cond = S->getCond();
+ assert(Cond != nullptr);
+ return extendFlowCondition(*Cond);
+ }
+
+ TerminatorVisitorRetTy VisitDoStmt(const DoStmt *S) {
+ auto *Cond = S->getCond();
+ assert(Cond != nullptr);
+ return extendFlowCondition(*Cond);
+ }
+
+ TerminatorVisitorRetTy VisitForStmt(const ForStmt *S) {
+ auto *Cond = S->getCond();
+ if (Cond != nullptr)
+ return extendFlowCondition(*Cond);
+ return {nullptr, false};
+ }
+
+ TerminatorVisitorRetTy VisitCXXForRangeStmt(const CXXForRangeStmt *) {
+ // Don't do anything special for CXXForRangeStmt, because the condition
+ // (being implicitly generated) isn't visible from the loop body.
+ return {nullptr, false};
+ }
+
+ TerminatorVisitorRetTy VisitBinaryOperator(const BinaryOperator *S) {
+ assert(S->getOpcode() == BO_LAnd || S->getOpcode() == BO_LOr);
+ auto *LHS = S->getLHS();
+ assert(LHS != nullptr);
+ return extendFlowCondition(*LHS);
+ }
+
+ TerminatorVisitorRetTy
+ VisitConditionalOperator(const ConditionalOperator *S) {
+ auto *Cond = S->getCond();
+ assert(Cond != nullptr);
+ return extendFlowCondition(*Cond);
+ }
+
+private:
+ TerminatorVisitorRetTy extendFlowCondition(const Expr &Cond) {
+ auto *Val = Env.get<BoolValue>(Cond);
+ // In transferCFGBlock(), we ensure that we always have a `Value` for the
+ // terminator condition, so assert this.
+ // We consciously assert ourselves instead of asserting via `cast()` so
+ // that we get a more meaningful line number if the assertion fails.
+ assert(Val != nullptr);
+
+ bool ConditionValue = true;
+ // The condition must be inverted for the successor that encompasses the
+ // "else" branch, if such exists.
+ if (BlockSuccIdx == 1) {
+ Val = &Env.makeNot(*Val);
+ ConditionValue = false;
+ }
+
+ Env.assume(Val->formula());
+ return {&Cond, ConditionValue};
+ }
+
+ Environment &Env;
+ int BlockSuccIdx;
+};
+
+/// Holds data structures required for running dataflow analysis.
+struct AnalysisContext {
+ AnalysisContext(const ControlFlowContext &CFCtx,
+ TypeErasedDataflowAnalysis &Analysis,
+ const Environment &InitEnv,
+ llvm::ArrayRef<std::optional<TypeErasedDataflowAnalysisState>>
+ BlockStates)
+ : CFCtx(CFCtx), Analysis(Analysis), InitEnv(InitEnv),
+ Log(*InitEnv.getDataflowAnalysisContext().getOptions().Log),
+ BlockStates(BlockStates) {
+ Log.beginAnalysis(CFCtx, Analysis);
+ }
+ ~AnalysisContext() { Log.endAnalysis(); }
+
+ /// Contains the CFG being analyzed.
+ const ControlFlowContext &CFCtx;
+ /// The analysis to be run.
+ TypeErasedDataflowAnalysis &Analysis;
+ /// Initial state to start the analysis.
+ const Environment &InitEnv;
+ Logger &Log;
+ /// Stores the state of a CFG block if it has been evaluated by the analysis.
+ /// The indices correspond to the block IDs.
+ llvm::ArrayRef<std::optional<TypeErasedDataflowAnalysisState>> BlockStates;
+};
+
+class PrettyStackTraceAnalysis : public llvm::PrettyStackTraceEntry {
+public:
+ PrettyStackTraceAnalysis(const ControlFlowContext &CFCtx, const char *Message)
+ : CFCtx(CFCtx), Message(Message) {}
+
+ void print(raw_ostream &OS) const override {
+ OS << Message << "\n";
+ OS << "Decl:\n";
+ CFCtx.getDecl().dump(OS);
+ OS << "CFG:\n";
+ CFCtx.getCFG().print(OS, LangOptions(), false);
+ }
+
+private:
+ const ControlFlowContext &CFCtx;
+ const char *Message;
+};
+
+class PrettyStackTraceCFGElement : public llvm::PrettyStackTraceEntry {
+public:
+ PrettyStackTraceCFGElement(const CFGElement &Element, int BlockIdx,
+ int ElementIdx, const char *Message)
+ : Element(Element), BlockIdx(BlockIdx), ElementIdx(ElementIdx),
+ Message(Message) {}
+
+ void print(raw_ostream &OS) const override {
+ OS << Message << ": Element [B" << BlockIdx << "." << ElementIdx << "]\n";
+ if (auto Stmt = Element.getAs<CFGStmt>()) {
+ OS << "Stmt:\n";
+ ASTDumper Dumper(OS, false);
+ Dumper.Visit(Stmt->getStmt());
+ }
+ }
+
+private:
+ const CFGElement &Element;
+ int BlockIdx;
+ int ElementIdx;
+ const char *Message;
+};
+
+// Builds a joined TypeErasedDataflowAnalysisState from 0 or more sources,
+// each of which may be owned (built as part of the join) or external (a
+// reference to an Environment that will outlive the builder).
+// Avoids unneccesary copies of the environment.
+class JoinedStateBuilder {
+ AnalysisContext &AC;
+ std::vector<const TypeErasedDataflowAnalysisState *> All;
+ std::deque<TypeErasedDataflowAnalysisState> Owned;
+
+ TypeErasedDataflowAnalysisState
+ join(const TypeErasedDataflowAnalysisState &L,
+ const TypeErasedDataflowAnalysisState &R) {
+ return {AC.Analysis.joinTypeErased(L.Lattice, R.Lattice),
+ Environment::join(L.Env, R.Env, AC.Analysis)};
+ }
+
+public:
+ JoinedStateBuilder(AnalysisContext &AC) : AC(AC) {}
+
+ void addOwned(TypeErasedDataflowAnalysisState State) {
+ Owned.push_back(std::move(State));
+ All.push_back(&Owned.back());
+ }
+ void addUnowned(const TypeErasedDataflowAnalysisState &State) {
+ All.push_back(&State);
+ }
+ TypeErasedDataflowAnalysisState take() && {
+ if (All.empty())
+ // FIXME: Consider passing `Block` to Analysis.typeErasedInitialElement
+ // to enable building analyses like computation of dominators that
+ // initialize the state of each basic block differently.
+ return {AC.Analysis.typeErasedInitialElement(), AC.InitEnv.fork()};
+ if (All.size() == 1)
+ // Join the environment with itself so that we discard the entries from
+ // `ExprToLoc` and `ExprToVal`.
+ // FIXME: We could consider writing special-case code for this that only
+ // does the discarding, but it's not clear if this is worth it.
+ return {All[0]->Lattice,
+ Environment::join(All[0]->Env, All[0]->Env, AC.Analysis)};
+
+ auto Result = join(*All[0], *All[1]);
+ for (unsigned I = 2; I < All.size(); ++I)
+ Result = join(Result, *All[I]);
+ return Result;
+ }
+};
+
+} // namespace
+
+/// Computes the input state for a given basic block by joining the output
+/// states of its predecessors.
+///
+/// Requirements:
+///
+/// All predecessors of `Block` except those with loop back edges must have
+/// already been transferred. States in `AC.BlockStates` that are set to
+/// `std::nullopt` represent basic blocks that are not evaluated yet.
+static TypeErasedDataflowAnalysisState
+computeBlockInputState(const CFGBlock &Block, AnalysisContext &AC) {
+ std::vector<const CFGBlock *> Preds(Block.pred_begin(), Block.pred_end());
+ if (Block.getTerminator().isTemporaryDtorsBranch()) {
+ // This handles a special case where the code that produced the CFG includes
+ // a conditional operator with a branch that constructs a temporary and
+ // calls a destructor annotated as noreturn. The CFG models this as follows:
+ //
+ // B1 (contains the condition of the conditional operator) - succs: B2, B3
+ // B2 (contains code that does not call a noreturn destructor) - succs: B4
+ // B3 (contains code that calls a noreturn destructor) - succs: B4
+ // B4 (has temporary destructor terminator) - succs: B5, B6
+ // B5 (noreturn block that is associated with the noreturn destructor call)
+ // B6 (contains code that follows the conditional operator statement)
+ //
+ // The first successor (B5 above) of a basic block with a temporary
+ // destructor terminator (B4 above) is the block that evaluates the
+ // destructor. If that block has a noreturn element then the predecessor
+ // block that constructed the temporary object (B3 above) is effectively a
+ // noreturn block and its state should not be used as input for the state
+ // of the block that has a temporary destructor terminator (B4 above). This
+ // holds regardless of which branch of the ternary operator calls the
+ // noreturn destructor. However, it doesn't cases where a nested ternary
+ // operator includes a branch that contains a noreturn destructor call.
+ //
+ // See `NoreturnDestructorTest` for concrete examples.
+ if (Block.succ_begin()->getReachableBlock() != nullptr &&
+ Block.succ_begin()->getReachableBlock()->hasNoReturnElement()) {
+ auto &StmtToBlock = AC.CFCtx.getStmtToBlock();
+ auto StmtBlock = StmtToBlock.find(Block.getTerminatorStmt());
+ assert(StmtBlock != StmtToBlock.end());
+ llvm::erase(Preds, StmtBlock->getSecond());
+ }
+ }
+
+ JoinedStateBuilder Builder(AC);
+ for (const CFGBlock *Pred : Preds) {
+ // Skip if the `Block` is unreachable or control flow cannot get past it.
+ if (!Pred || Pred->hasNoReturnElement())
+ continue;
+
+ // Skip if `Pred` was not evaluated yet. This could happen if `Pred` has a
+ // loop back edge to `Block`.
+ const std::optional<TypeErasedDataflowAnalysisState> &MaybePredState =
+ AC.BlockStates[Pred->getBlockID()];
+ if (!MaybePredState)
+ continue;
+
+ if (AC.Analysis.builtinOptions()) {
+ if (const Stmt *PredTerminatorStmt = Pred->getTerminatorStmt()) {
+ // We have a terminator: we need to mutate an environment to describe
+ // when the terminator is taken. Copy now.
+ TypeErasedDataflowAnalysisState Copy = MaybePredState->fork();
+
+ auto [Cond, CondValue] =
+ TerminatorVisitor(Copy.Env, blockIndexInPredecessor(*Pred, Block))
+ .Visit(PredTerminatorStmt);
+ if (Cond != nullptr)
+ // FIXME: Call transferBranchTypeErased even if BuiltinTransferOpts
+ // are not set.
+ AC.Analysis.transferBranchTypeErased(CondValue, Cond, Copy.Lattice,
+ Copy.Env);
+ Builder.addOwned(std::move(Copy));
+ continue;
+ }
+ }
+ Builder.addUnowned(*MaybePredState);
+ }
+ return std::move(Builder).take();
+}
+
+/// Built-in transfer function for `CFGStmt`.
+static void
+builtinTransferStatement(unsigned CurBlockID, const CFGStmt &Elt,
+ TypeErasedDataflowAnalysisState &InputState,
+ AnalysisContext &AC) {
+ const Stmt *S = Elt.getStmt();
+ assert(S != nullptr);
+ transfer(StmtToEnvMap(AC.CFCtx, AC.BlockStates, CurBlockID, InputState), *S,
+ InputState.Env);
+}
+
+/// Built-in transfer function for `CFGInitializer`.
+static void
+builtinTransferInitializer(const CFGInitializer &Elt,
+ TypeErasedDataflowAnalysisState &InputState) {
+ const CXXCtorInitializer *Init = Elt.getInitializer();
+ assert(Init != nullptr);
+
+ auto &Env = InputState.Env;
+ auto &ThisLoc = *Env.getThisPointeeStorageLocation();
+
+ if (!Init->isAnyMemberInitializer())
+ // FIXME: Handle base initialization
+ return;
+
+ auto *InitExpr = Init->getInit();
+ assert(InitExpr != nullptr);
+
+ const FieldDecl *Member = nullptr;
+ RecordStorageLocation *ParentLoc = &ThisLoc;
+ StorageLocation *MemberLoc = nullptr;
+ if (Init->isMemberInitializer()) {
+ Member = Init->getMember();
+ MemberLoc = ThisLoc.getChild(*Member);
+ } else {
+ IndirectFieldDecl *IndirectField = Init->getIndirectMember();
+ assert(IndirectField != nullptr);
+ MemberLoc = &ThisLoc;
+ for (const auto *I : IndirectField->chain()) {
+ Member = cast<FieldDecl>(I);
+ ParentLoc = cast<RecordStorageLocation>(MemberLoc);
+ MemberLoc = ParentLoc->getChild(*Member);
+ }
+ }
+ assert(Member != nullptr);
+ assert(MemberLoc != nullptr);
+
+ // FIXME: Instead of these case distinctions, we would ideally want to be able
+ // to simply use `Environment::createObject()` here, the same way that we do
+ // this in `TransferVisitor::VisitInitListExpr()`. However, this would require
+ // us to be able to build a list of fields that we then use to initialize an
+ // `RecordStorageLocation` -- and the problem is that, when we get here,
+ // the `RecordStorageLocation` already exists. We should explore if there's
+ // anything that we can do to change this.
+ if (Member->getType()->isReferenceType()) {
+ auto *InitExprLoc = Env.getStorageLocation(*InitExpr);
+ if (InitExprLoc == nullptr)
+ return;
+
+ ParentLoc->setChild(*Member, InitExprLoc);
+ } else if (auto *InitExprVal = Env.getValue(*InitExpr)) {
+ if (Member->getType()->isRecordType()) {
+ auto *InitValStruct = cast<RecordValue>(InitExprVal);
+ // FIXME: Rather than performing a copy here, we should really be
+ // initializing the field in place. This would require us to propagate the
+ // storage location of the field to the AST node that creates the
+ // `RecordValue`.
+ copyRecord(InitValStruct->getLoc(),
+ *cast<RecordStorageLocation>(MemberLoc), Env);
+ } else {
+ Env.setValue(*MemberLoc, *InitExprVal);
+ }
+ }
+}
+
+static void builtinTransfer(unsigned CurBlockID, const CFGElement &Elt,
+ TypeErasedDataflowAnalysisState &State,
+ AnalysisContext &AC) {
+ switch (Elt.getKind()) {
+ case CFGElement::Statement:
+ builtinTransferStatement(CurBlockID, Elt.castAs<CFGStmt>(), State, AC);
+ break;
+ case CFGElement::Initializer:
+ builtinTransferInitializer(Elt.castAs<CFGInitializer>(), State);
+ break;
+ case CFGElement::LifetimeEnds:
+ // Removing declarations when their lifetime ends serves two purposes:
+ // - Eliminate unnecessary clutter from `Environment::DeclToLoc`
+ // - Allow us to assert that, when joining two `Environment`s, the two
+ // `DeclToLoc` maps never contain entries that map the same declaration to
+ // different storage locations.
+ if (const ValueDecl *VD = Elt.castAs<CFGLifetimeEnds>().getVarDecl())
+ State.Env.removeDecl(*VD);
+ break;
+ default:
+ // FIXME: Evaluate other kinds of `CFGElement`
+ break;
+ }
+}
+
+/// Transfers `State` by evaluating each element in the `Block` based on the
+/// `AC.Analysis` specified.
+///
+/// Built-in transfer functions (if the option for `ApplyBuiltinTransfer` is set
+/// by the analysis) will be applied to the element before evaluation by the
+/// user-specified analysis.
+/// `PostVisitCFG` (if provided) will be applied to the element after evaluation
+/// by the user-specified analysis.
+static TypeErasedDataflowAnalysisState
+transferCFGBlock(const CFGBlock &Block, AnalysisContext &AC,
+ std::function<void(const CFGElement &,
+ const TypeErasedDataflowAnalysisState &)>
+ PostVisitCFG = nullptr) {
+ AC.Log.enterBlock(Block, PostVisitCFG != nullptr);
+ auto State = computeBlockInputState(Block, AC);
+ AC.Log.recordState(State);
+ int ElementIdx = 1;
+ for (const auto &Element : Block) {
+ PrettyStackTraceCFGElement CrashInfo(Element, Block.getBlockID(),
+ ElementIdx++, "transferCFGBlock");
+
+ AC.Log.enterElement(Element);
+ // Built-in analysis
+ if (AC.Analysis.builtinOptions()) {
+ builtinTransfer(Block.getBlockID(), Element, State, AC);
+ }
+
+ // User-provided analysis
+ AC.Analysis.transferTypeErased(Element, State.Lattice, State.Env);
+
+ // Post processing
+ if (PostVisitCFG) {
+ PostVisitCFG(Element, State);
+ }
+ AC.Log.recordState(State);
+ }
+
+ // If we have a terminator, evaluate its condition.
+ // This `Expr` may not appear as a `CFGElement` anywhere else, and it's
+ // important that we evaluate it here (rather than while processing the
+ // terminator) so that we put the corresponding value in the right
+ // environment.
+ if (const Expr *TerminatorCond =
+ dyn_cast_or_null<Expr>(Block.getTerminatorCondition())) {
+ if (State.Env.getValue(*TerminatorCond) == nullptr)
+ // FIXME: This only runs the builtin transfer, not the analysis-specific
+ // transfer. Fixing this isn't trivial, as the analysis-specific transfer
+ // takes a `CFGElement` as input, but some expressions only show up as a
+ // terminator condition, but not as a `CFGElement`. The condition of an if
+ // statement is one such example.
+ transfer(
+ StmtToEnvMap(AC.CFCtx, AC.BlockStates, Block.getBlockID(), State),
+ *TerminatorCond, State.Env);
+
+ // If the transfer function didn't produce a value, create an atom so that
+ // we have *some* value for the condition expression. This ensures that
+ // when we extend the flow condition, it actually changes.
+ if (State.Env.getValue(*TerminatorCond) == nullptr)
+ State.Env.setValue(*TerminatorCond, State.Env.makeAtomicBoolValue());
+ AC.Log.recordState(State);
+ }
+
+ return State;
+}
+
+llvm::Expected<std::vector<std::optional<TypeErasedDataflowAnalysisState>>>
+runTypeErasedDataflowAnalysis(
+ const ControlFlowContext &CFCtx, TypeErasedDataflowAnalysis &Analysis,
+ const Environment &InitEnv,
+ std::function<void(const CFGElement &,
+ const TypeErasedDataflowAnalysisState &)>
+ PostVisitCFG,
+ std::int32_t MaxBlockVisits) {
+ PrettyStackTraceAnalysis CrashInfo(CFCtx, "runTypeErasedDataflowAnalysis");
+
+ std::optional<Environment> MaybeStartingEnv;
+ if (InitEnv.callStackSize() == 1) {
+ MaybeStartingEnv = InitEnv.fork();
+ MaybeStartingEnv->initialize();
+ }
+ const Environment &StartingEnv =
+ MaybeStartingEnv ? *MaybeStartingEnv : InitEnv;
+
+ const clang::CFG &CFG = CFCtx.getCFG();
+ PostOrderCFGView POV(&CFG);
+ ForwardDataflowWorklist Worklist(CFG, &POV);
+
+ std::vector<std::optional<TypeErasedDataflowAnalysisState>> BlockStates(
+ CFG.size());
+
+ // The entry basic block doesn't contain statements so it can be skipped.
+ const CFGBlock &Entry = CFG.getEntry();
+ BlockStates[Entry.getBlockID()] = {Analysis.typeErasedInitialElement(),
+ StartingEnv.fork()};
+ Worklist.enqueueSuccessors(&Entry);
+
+ AnalysisContext AC(CFCtx, Analysis, StartingEnv, BlockStates);
+
+ // FIXME: remove relative cap. There isn't really any good setting for
+ // `MaxAverageVisitsPerBlock`, so it has no clear value over using
+ // `MaxBlockVisits` directly.
+ static constexpr std::int32_t MaxAverageVisitsPerBlock = 4;
+ const std::int32_t RelativeMaxBlockVisits =
+ MaxAverageVisitsPerBlock * BlockStates.size();
+ MaxBlockVisits = std::min(RelativeMaxBlockVisits, MaxBlockVisits);
+ std::int32_t BlockVisits = 0;
+ while (const CFGBlock *Block = Worklist.dequeue()) {
+ LLVM_DEBUG(llvm::dbgs()
+ << "Processing Block " << Block->getBlockID() << "\n");
+ if (++BlockVisits > MaxBlockVisits) {
+ return llvm::createStringError(std::errc::timed_out,
+ "maximum number of blocks processed");
+ }
+
+ const std::optional<TypeErasedDataflowAnalysisState> &OldBlockState =
+ BlockStates[Block->getBlockID()];
+ TypeErasedDataflowAnalysisState NewBlockState =
+ transferCFGBlock(*Block, AC);
+ LLVM_DEBUG({
+ llvm::errs() << "New Env:\n";
+ NewBlockState.Env.dump();
+ });
+
+ if (OldBlockState) {
+ LLVM_DEBUG({
+ llvm::errs() << "Old Env:\n";
+ OldBlockState->Env.dump();
+ });
+ if (isBackedgeNode(*Block)) {
+ LatticeJoinEffect Effect1 = Analysis.widenTypeErased(
+ NewBlockState.Lattice, OldBlockState->Lattice);
+ LatticeJoinEffect Effect2 =
+ NewBlockState.Env.widen(OldBlockState->Env, Analysis);
+ if (Effect1 == LatticeJoinEffect::Unchanged &&
+ Effect2 == LatticeJoinEffect::Unchanged) {
+ // The state of `Block` didn't change from widening so there's no need
+ // to revisit its successors.
+ AC.Log.blockConverged();
+ continue;
+ }
+ } else if (Analysis.isEqualTypeErased(OldBlockState->Lattice,
+ NewBlockState.Lattice) &&
+ OldBlockState->Env.equivalentTo(NewBlockState.Env, Analysis)) {
+ // The state of `Block` didn't change after transfer so there's no need
+ // to revisit its successors.
+ AC.Log.blockConverged();
+ continue;
+ }
+ }
+
+ BlockStates[Block->getBlockID()] = std::move(NewBlockState);
+
+ // Do not add unreachable successor blocks to `Worklist`.
+ if (Block->hasNoReturnElement())
+ continue;
+
+ Worklist.enqueueSuccessors(Block);
+ }
+ // FIXME: Consider evaluating unreachable basic blocks (those that have a
+ // state set to `std::nullopt` at this point) to also analyze dead code.
+
+ if (PostVisitCFG) {
+ for (const CFGBlock *Block : CFCtx.getCFG()) {
+ // Skip blocks that were not evaluated.
+ if (!BlockStates[Block->getBlockID()])
+ continue;
+ transferCFGBlock(*Block, AC, PostVisitCFG);
+ }
+ }
+
+ return std::move(BlockStates);
+}
+
+} // namespace dataflow
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Value.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Value.cpp
new file mode 100644
index 000000000000..7fad6deb0e91
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Value.cpp
@@ -0,0 +1,62 @@
+//===-- Value.cpp -----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines support functions for the `Value` type.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/FlowSensitive/Value.h"
+#include "clang/Analysis/FlowSensitive/DebugSupport.h"
+#include "llvm/Support/Casting.h"
+
+namespace clang {
+namespace dataflow {
+
+static bool areEquivalentIndirectionValues(const Value &Val1,
+ const Value &Val2) {
+ if (auto *IndVal1 = dyn_cast<PointerValue>(&Val1)) {
+ auto *IndVal2 = cast<PointerValue>(&Val2);
+ return &IndVal1->getPointeeLoc() == &IndVal2->getPointeeLoc();
+ }
+ return false;
+}
+
+bool areEquivalentValues(const Value &Val1, const Value &Val2) {
+ if (&Val1 == &Val2)
+ return true;
+ if (Val1.getKind() != Val2.getKind())
+ return false;
+ // If values are distinct and have properties, we don't consider them equal,
+ // leaving equality up to the user model.
+ if (!Val1.properties().empty() || !Val2.properties().empty())
+ return false;
+ if (isa<TopBoolValue>(&Val1))
+ return true;
+ return areEquivalentIndirectionValues(Val1, Val2);
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const Value &Val) {
+ switch (Val.getKind()) {
+ case Value::Kind::Integer:
+ return OS << "Integer(@" << &Val << ")";
+ case Value::Kind::Pointer:
+ return OS << "Pointer(" << &cast<PointerValue>(Val).getPointeeLoc() << ")";
+ case Value::Kind::Record:
+ return OS << "Record(" << &cast<RecordValue>(Val).getLoc() << ")";
+ case Value::Kind::TopBool:
+ return OS << "TopBool(" << cast<TopBoolValue>(Val).getAtom() << ")";
+ case Value::Kind::AtomicBool:
+ return OS << "AtomicBool(" << cast<AtomicBoolValue>(Val).getAtom() << ")";
+ case Value::Kind::FormulaBool:
+ return OS << "FormulaBool(" << cast<FormulaBoolValue>(Val).formula() << ")";
+ }
+ llvm_unreachable("Unknown clang::dataflow::Value::Kind enum");
+}
+
+} // namespace dataflow
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/WatchedLiteralsSolver.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/WatchedLiteralsSolver.cpp
new file mode 100644
index 000000000000..3ef363753532
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/WatchedLiteralsSolver.cpp
@@ -0,0 +1,796 @@
+//===- WatchedLiteralsSolver.cpp --------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a SAT solver implementation that can be used by dataflow
+// analyses.
+//
+//===----------------------------------------------------------------------===//
+
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <queue>
+#include <vector>
+
+#include "clang/Analysis/FlowSensitive/Formula.h"
+#include "clang/Analysis/FlowSensitive/Solver.h"
+#include "clang/Analysis/FlowSensitive/WatchedLiteralsSolver.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/STLExtras.h"
+
+
+namespace clang {
+namespace dataflow {
+
+// `WatchedLiteralsSolver` is an implementation of Algorithm D from Knuth's
+// The Art of Computer Programming Volume 4: Satisfiability, Fascicle 6. It is
+// based on the backtracking DPLL algorithm [1], keeps references to a single
+// "watched" literal per clause, and uses a set of "active" variables to perform
+// unit propagation.
+//
+// The solver expects that its input is a boolean formula in conjunctive normal
+// form that consists of clauses of at least one literal. A literal is either a
+// boolean variable or its negation. Below we define types, data structures, and
+// utilities that are used to represent boolean formulas in conjunctive normal
+// form.
+//
+// [1] https://en.wikipedia.org/wiki/DPLL_algorithm
+
+/// Boolean variables are represented as positive integers.
+using Variable = uint32_t;
+
+/// A null boolean variable is used as a placeholder in various data structures
+/// and algorithms.
+static constexpr Variable NullVar = 0;
+
+/// Literals are represented as positive integers. Specifically, for a boolean
+/// variable `V` that is represented as the positive integer `I`, the positive
+/// literal `V` is represented as the integer `2*I` and the negative literal
+/// `!V` is represented as the integer `2*I+1`.
+using Literal = uint32_t;
+
+/// A null literal is used as a placeholder in various data structures and
+/// algorithms.
+[[maybe_unused]] static constexpr Literal NullLit = 0;
+
+/// Returns the positive literal `V`.
+static constexpr Literal posLit(Variable V) { return 2 * V; }
+
+static constexpr bool isPosLit(Literal L) { return 0 == (L & 1); }
+
+static constexpr bool isNegLit(Literal L) { return 1 == (L & 1); }
+
+/// Returns the negative literal `!V`.
+static constexpr Literal negLit(Variable V) { return 2 * V + 1; }
+
+/// Returns the negated literal `!L`.
+static constexpr Literal notLit(Literal L) { return L ^ 1; }
+
+/// Returns the variable of `L`.
+static constexpr Variable var(Literal L) { return L >> 1; }
+
+/// Clause identifiers are represented as positive integers.
+using ClauseID = uint32_t;
+
+/// A null clause identifier is used as a placeholder in various data structures
+/// and algorithms.
+static constexpr ClauseID NullClause = 0;
+
+/// A boolean formula in conjunctive normal form.
+struct CNFFormula {
+ /// `LargestVar` is equal to the largest positive integer that represents a
+ /// variable in the formula.
+ const Variable LargestVar;
+
+ /// Literals of all clauses in the formula.
+ ///
+ /// The element at index 0 stands for the literal in the null clause. It is
+ /// set to 0 and isn't used. Literals of clauses in the formula start from the
+ /// element at index 1.
+ ///
+ /// For example, for the formula `(L1 v L2) ^ (L2 v L3 v L4)` the elements of
+ /// `Clauses` will be `[0, L1, L2, L2, L3, L4]`.
+ std::vector<Literal> Clauses;
+
+ /// Start indices of clauses of the formula in `Clauses`.
+ ///
+ /// The element at index 0 stands for the start index of the null clause. It
+ /// is set to 0 and isn't used. Start indices of clauses in the formula start
+ /// from the element at index 1.
+ ///
+ /// For example, for the formula `(L1 v L2) ^ (L2 v L3 v L4)` the elements of
+ /// `ClauseStarts` will be `[0, 1, 3]`. Note that the literals of the first
+ /// clause always start at index 1. The start index for the literals of the
+ /// second clause depends on the size of the first clause and so on.
+ std::vector<size_t> ClauseStarts;
+
+ /// Maps literals (indices of the vector) to clause identifiers (elements of
+ /// the vector) that watch the respective literals.
+ ///
+ /// For a given clause, its watched literal is always its first literal in
+ /// `Clauses`. This invariant is maintained when watched literals change.
+ std::vector<ClauseID> WatchedHead;
+
+ /// Maps clause identifiers (elements of the vector) to identifiers of other
+ /// clauses that watch the same literals, forming a set of linked lists.
+ ///
+ /// The element at index 0 stands for the identifier of the clause that
+ /// follows the null clause. It is set to 0 and isn't used. Identifiers of
+ /// clauses in the formula start from the element at index 1.
+ std::vector<ClauseID> NextWatched;
+
+ /// Stores the variable identifier and Atom for atomic booleans in the
+ /// formula.
+ llvm::DenseMap<Variable, Atom> Atomics;
+
+ /// Indicates that we already know the formula is unsatisfiable.
+ /// During construction, we catch simple cases of conflicting unit-clauses.
+ bool KnownContradictory;
+
+ explicit CNFFormula(Variable LargestVar,
+ llvm::DenseMap<Variable, Atom> Atomics)
+ : LargestVar(LargestVar), Atomics(std::move(Atomics)),
+ KnownContradictory(false) {
+ Clauses.push_back(0);
+ ClauseStarts.push_back(0);
+ NextWatched.push_back(0);
+ const size_t NumLiterals = 2 * LargestVar + 1;
+ WatchedHead.resize(NumLiterals + 1, 0);
+ }
+
+ /// Adds the `L1 v ... v Ln` clause to the formula.
+ /// Requirements:
+ ///
+ /// `Li` must not be `NullLit`.
+ ///
+ /// All literals in the input that are not `NullLit` must be distinct.
+ void addClause(ArrayRef<Literal> lits) {
+ assert(!lits.empty());
+ assert(llvm::all_of(lits, [](Literal L) { return L != NullLit; }));
+
+ const ClauseID C = ClauseStarts.size();
+ const size_t S = Clauses.size();
+ ClauseStarts.push_back(S);
+ Clauses.insert(Clauses.end(), lits.begin(), lits.end());
+
+ // Designate the first literal as the "watched" literal of the clause.
+ NextWatched.push_back(WatchedHead[lits.front()]);
+ WatchedHead[lits.front()] = C;
+ }
+
+ /// Returns the number of literals in clause `C`.
+ size_t clauseSize(ClauseID C) const {
+ return C == ClauseStarts.size() - 1 ? Clauses.size() - ClauseStarts[C]
+ : ClauseStarts[C + 1] - ClauseStarts[C];
+ }
+
+ /// Returns the literals of clause `C`.
+ llvm::ArrayRef<Literal> clauseLiterals(ClauseID C) const {
+ return llvm::ArrayRef<Literal>(&Clauses[ClauseStarts[C]], clauseSize(C));
+ }
+};
+
+/// Applies simplifications while building up a BooleanFormula.
+/// We keep track of unit clauses, which tell us variables that must be
+/// true/false in any model that satisfies the overall formula.
+/// Such variables can be dropped from subsequently-added clauses, which
+/// may in turn yield more unit clauses or even a contradiction.
+/// The total added complexity of this preprocessing is O(N) where we
+/// for every clause, we do a lookup for each unit clauses.
+/// The lookup is O(1) on average. This method won't catch all
+/// contradictory formulas, more passes can in principle catch
+/// more cases but we leave all these and the general case to the
+/// proper SAT solver.
+struct CNFFormulaBuilder {
+ // Formula should outlive CNFFormulaBuilder.
+ explicit CNFFormulaBuilder(CNFFormula &CNF)
+ : Formula(CNF) {}
+
+ /// Adds the `L1 v ... v Ln` clause to the formula. Applies
+ /// simplifications, based on single-literal clauses.
+ ///
+ /// Requirements:
+ ///
+ /// `Li` must not be `NullLit`.
+ ///
+ /// All literals must be distinct.
+ void addClause(ArrayRef<Literal> Literals) {
+ // We generate clauses with up to 3 literals in this file.
+ assert(!Literals.empty() && Literals.size() <= 3);
+ // Contains literals of the simplified clause.
+ llvm::SmallVector<Literal> Simplified;
+ for (auto L : Literals) {
+ assert(L != NullLit &&
+ llvm::all_of(Simplified,
+ [L](Literal S) { return S != L; }));
+ auto X = var(L);
+ if (trueVars.contains(X)) { // X must be true
+ if (isPosLit(L))
+ return; // Omit clause `(... v X v ...)`, it is `true`.
+ else
+ continue; // Omit `!X` from `(... v !X v ...)`.
+ }
+ if (falseVars.contains(X)) { // X must be false
+ if (isNegLit(L))
+ return; // Omit clause `(... v !X v ...)`, it is `true`.
+ else
+ continue; // Omit `X` from `(... v X v ...)`.
+ }
+ Simplified.push_back(L);
+ }
+ if (Simplified.empty()) {
+ // Simplification made the clause empty, which is equivalent to `false`.
+ // We already know that this formula is unsatisfiable.
+ Formula.KnownContradictory = true;
+ // We can add any of the input literals to get an unsatisfiable formula.
+ Formula.addClause(Literals[0]);
+ return;
+ }
+ if (Simplified.size() == 1) {
+ // We have new unit clause.
+ const Literal lit = Simplified.front();
+ const Variable v = var(lit);
+ if (isPosLit(lit))
+ trueVars.insert(v);
+ else
+ falseVars.insert(v);
+ }
+ Formula.addClause(Simplified);
+ }
+
+ /// Returns true if we observed a contradiction while adding clauses.
+ /// In this case then the formula is already known to be unsatisfiable.
+ bool isKnownContradictory() { return Formula.KnownContradictory; }
+
+private:
+ CNFFormula &Formula;
+ llvm::DenseSet<Variable> trueVars;
+ llvm::DenseSet<Variable> falseVars;
+};
+
+/// Converts the conjunction of `Vals` into a formula in conjunctive normal
+/// form where each clause has at least one and at most three literals.
+CNFFormula buildCNF(const llvm::ArrayRef<const Formula *> &Vals) {
+ // The general strategy of the algorithm implemented below is to map each
+ // of the sub-values in `Vals` to a unique variable and use these variables in
+ // the resulting CNF expression to avoid exponential blow up. The number of
+ // literals in the resulting formula is guaranteed to be linear in the number
+ // of sub-formulas in `Vals`.
+
+ // Map each sub-formula in `Vals` to a unique variable.
+ llvm::DenseMap<const Formula *, Variable> SubValsToVar;
+ // Store variable identifiers and Atom of atomic booleans.
+ llvm::DenseMap<Variable, Atom> Atomics;
+ Variable NextVar = 1;
+ {
+ std::queue<const Formula *> UnprocessedSubVals;
+ for (const Formula *Val : Vals)
+ UnprocessedSubVals.push(Val);
+ while (!UnprocessedSubVals.empty()) {
+ Variable Var = NextVar;
+ const Formula *Val = UnprocessedSubVals.front();
+ UnprocessedSubVals.pop();
+
+ if (!SubValsToVar.try_emplace(Val, Var).second)
+ continue;
+ ++NextVar;
+
+ for (const Formula *F : Val->operands())
+ UnprocessedSubVals.push(F);
+ if (Val->kind() == Formula::AtomRef)
+ Atomics[Var] = Val->getAtom();
+ }
+ }
+
+ auto GetVar = [&SubValsToVar](const Formula *Val) {
+ auto ValIt = SubValsToVar.find(Val);
+ assert(ValIt != SubValsToVar.end());
+ return ValIt->second;
+ };
+
+ CNFFormula CNF(NextVar - 1, std::move(Atomics));
+ std::vector<bool> ProcessedSubVals(NextVar, false);
+ CNFFormulaBuilder builder(CNF);
+
+ // Add a conjunct for each variable that represents a top-level conjunction
+ // value in `Vals`.
+ for (const Formula *Val : Vals)
+ builder.addClause(posLit(GetVar(Val)));
+
+ // Add conjuncts that represent the mapping between newly-created variables
+ // and their corresponding sub-formulas.
+ std::queue<const Formula *> UnprocessedSubVals;
+ for (const Formula *Val : Vals)
+ UnprocessedSubVals.push(Val);
+ while (!UnprocessedSubVals.empty()) {
+ const Formula *Val = UnprocessedSubVals.front();
+ UnprocessedSubVals.pop();
+ const Variable Var = GetVar(Val);
+
+ if (ProcessedSubVals[Var])
+ continue;
+ ProcessedSubVals[Var] = true;
+
+ switch (Val->kind()) {
+ case Formula::AtomRef:
+ break;
+ case Formula::Literal:
+ CNF.addClause(Val->literal() ? posLit(Var) : negLit(Var));
+ break;
+ case Formula::And: {
+ const Variable LHS = GetVar(Val->operands()[0]);
+ const Variable RHS = GetVar(Val->operands()[1]);
+
+ if (LHS == RHS) {
+ // `X <=> (A ^ A)` is equivalent to `(!X v A) ^ (X v !A)` which is
+ // already in conjunctive normal form. Below we add each of the
+ // conjuncts of the latter expression to the result.
+ builder.addClause({negLit(Var), posLit(LHS)});
+ builder.addClause({posLit(Var), negLit(LHS)});
+ } else {
+ // `X <=> (A ^ B)` is equivalent to `(!X v A) ^ (!X v B) ^ (X v !A v
+ // !B)` which is already in conjunctive normal form. Below we add each
+ // of the conjuncts of the latter expression to the result.
+ builder.addClause({negLit(Var), posLit(LHS)});
+ builder.addClause({negLit(Var), posLit(RHS)});
+ builder.addClause({posLit(Var), negLit(LHS), negLit(RHS)});
+ }
+ break;
+ }
+ case Formula::Or: {
+ const Variable LHS = GetVar(Val->operands()[0]);
+ const Variable RHS = GetVar(Val->operands()[1]);
+
+ if (LHS == RHS) {
+ // `X <=> (A v A)` is equivalent to `(!X v A) ^ (X v !A)` which is
+ // already in conjunctive normal form. Below we add each of the
+ // conjuncts of the latter expression to the result.
+ builder.addClause({negLit(Var), posLit(LHS)});
+ builder.addClause({posLit(Var), negLit(LHS)});
+ } else {
+ // `X <=> (A v B)` is equivalent to `(!X v A v B) ^ (X v !A) ^ (X v
+ // !B)` which is already in conjunctive normal form. Below we add each
+ // of the conjuncts of the latter expression to the result.
+ builder.addClause({negLit(Var), posLit(LHS), posLit(RHS)});
+ builder.addClause({posLit(Var), negLit(LHS)});
+ builder.addClause({posLit(Var), negLit(RHS)});
+ }
+ break;
+ }
+ case Formula::Not: {
+ const Variable Operand = GetVar(Val->operands()[0]);
+
+ // `X <=> !Y` is equivalent to `(!X v !Y) ^ (X v Y)` which is
+ // already in conjunctive normal form. Below we add each of the
+ // conjuncts of the latter expression to the result.
+ builder.addClause({negLit(Var), negLit(Operand)});
+ builder.addClause({posLit(Var), posLit(Operand)});
+ break;
+ }
+ case Formula::Implies: {
+ const Variable LHS = GetVar(Val->operands()[0]);
+ const Variable RHS = GetVar(Val->operands()[1]);
+
+ // `X <=> (A => B)` is equivalent to
+ // `(X v A) ^ (X v !B) ^ (!X v !A v B)` which is already in
+ // conjunctive normal form. Below we add each of the conjuncts of
+ // the latter expression to the result.
+ builder.addClause({posLit(Var), posLit(LHS)});
+ builder.addClause({posLit(Var), negLit(RHS)});
+ builder.addClause({negLit(Var), negLit(LHS), posLit(RHS)});
+ break;
+ }
+ case Formula::Equal: {
+ const Variable LHS = GetVar(Val->operands()[0]);
+ const Variable RHS = GetVar(Val->operands()[1]);
+
+ if (LHS == RHS) {
+ // `X <=> (A <=> A)` is equivalent to `X` which is already in
+ // conjunctive normal form. Below we add each of the conjuncts of the
+ // latter expression to the result.
+ builder.addClause(posLit(Var));
+
+ // No need to visit the sub-values of `Val`.
+ continue;
+ }
+ // `X <=> (A <=> B)` is equivalent to
+ // `(X v A v B) ^ (X v !A v !B) ^ (!X v A v !B) ^ (!X v !A v B)` which
+ // is already in conjunctive normal form. Below we add each of the
+ // conjuncts of the latter expression to the result.
+ builder.addClause({posLit(Var), posLit(LHS), posLit(RHS)});
+ builder.addClause({posLit(Var), negLit(LHS), negLit(RHS)});
+ builder.addClause({negLit(Var), posLit(LHS), negLit(RHS)});
+ builder.addClause({negLit(Var), negLit(LHS), posLit(RHS)});
+ break;
+ }
+ }
+ if (builder.isKnownContradictory()) {
+ return CNF;
+ }
+ for (const Formula *Child : Val->operands())
+ UnprocessedSubVals.push(Child);
+ }
+
+ // Unit clauses that were added later were not
+ // considered for the simplification of earlier clauses. Do a final
+ // pass to find more opportunities for simplification.
+ CNFFormula FinalCNF(NextVar - 1, std::move(CNF.Atomics));
+ CNFFormulaBuilder FinalBuilder(FinalCNF);
+
+ // Collect unit clauses.
+ for (ClauseID C = 1; C < CNF.ClauseStarts.size(); ++C) {
+ if (CNF.clauseSize(C) == 1) {
+ FinalBuilder.addClause(CNF.clauseLiterals(C)[0]);
+ }
+ }
+
+ // Add all clauses that were added previously, preserving the order.
+ for (ClauseID C = 1; C < CNF.ClauseStarts.size(); ++C) {
+ FinalBuilder.addClause(CNF.clauseLiterals(C));
+ if (FinalBuilder.isKnownContradictory()) {
+ break;
+ }
+ }
+ // It is possible there were new unit clauses again, but
+ // we stop here and leave the rest to the solver algorithm.
+ return FinalCNF;
+}
+
+class WatchedLiteralsSolverImpl {
+ /// A boolean formula in conjunctive normal form that the solver will attempt
+ /// to prove satisfiable. The formula will be modified in the process.
+ CNFFormula CNF;
+
+ /// The search for a satisfying assignment of the variables in `Formula` will
+ /// proceed in levels, starting from 1 and going up to `Formula.LargestVar`
+ /// (inclusive). The current level is stored in `Level`. At each level the
+ /// solver will assign a value to an unassigned variable. If this leads to a
+ /// consistent partial assignment, `Level` will be incremented. Otherwise, if
+ /// it results in a conflict, the solver will backtrack by decrementing
+ /// `Level` until it reaches the most recent level where a decision was made.
+ size_t Level = 0;
+
+ /// Maps levels (indices of the vector) to variables (elements of the vector)
+ /// that are assigned values at the respective levels.
+ ///
+ /// The element at index 0 isn't used. Variables start from the element at
+ /// index 1.
+ std::vector<Variable> LevelVars;
+
+ /// State of the solver at a particular level.
+ enum class State : uint8_t {
+ /// Indicates that the solver made a decision.
+ Decision = 0,
+
+ /// Indicates that the solver made a forced move.
+ Forced = 1,
+ };
+
+ /// State of the solver at a particular level. It keeps track of previous
+ /// decisions that the solver can refer to when backtracking.
+ ///
+ /// The element at index 0 isn't used. States start from the element at index
+ /// 1.
+ std::vector<State> LevelStates;
+
+ enum class Assignment : int8_t {
+ Unassigned = -1,
+ AssignedFalse = 0,
+ AssignedTrue = 1
+ };
+
+ /// Maps variables (indices of the vector) to their assignments (elements of
+ /// the vector).
+ ///
+ /// The element at index 0 isn't used. Variable assignments start from the
+ /// element at index 1.
+ std::vector<Assignment> VarAssignments;
+
+ /// A set of unassigned variables that appear in watched literals in
+ /// `Formula`. The vector is guaranteed to contain unique elements.
+ std::vector<Variable> ActiveVars;
+
+public:
+ explicit WatchedLiteralsSolverImpl(
+ const llvm::ArrayRef<const Formula *> &Vals)
+ : CNF(buildCNF(Vals)), LevelVars(CNF.LargestVar + 1),
+ LevelStates(CNF.LargestVar + 1) {
+ assert(!Vals.empty());
+
+ // Initialize the state at the root level to a decision so that in
+ // `reverseForcedMoves` we don't have to check that `Level >= 0` on each
+ // iteration.
+ LevelStates[0] = State::Decision;
+
+ // Initialize all variables as unassigned.
+ VarAssignments.resize(CNF.LargestVar + 1, Assignment::Unassigned);
+
+ // Initialize the active variables.
+ for (Variable Var = CNF.LargestVar; Var != NullVar; --Var) {
+ if (isWatched(posLit(Var)) || isWatched(negLit(Var)))
+ ActiveVars.push_back(Var);
+ }
+ }
+
+ // Returns the `Result` and the number of iterations "remaining" from
+ // `MaxIterations` (that is, `MaxIterations` - iterations in this call).
+ std::pair<Solver::Result, std::int64_t> solve(std::int64_t MaxIterations) && {
+ if (CNF.KnownContradictory) {
+ // Short-cut the solving process. We already found out at CNF
+ // construction time that the formula is unsatisfiable.
+ return std::make_pair(Solver::Result::Unsatisfiable(), MaxIterations);
+ }
+ size_t I = 0;
+ while (I < ActiveVars.size()) {
+ if (MaxIterations == 0)
+ return std::make_pair(Solver::Result::TimedOut(), 0);
+ --MaxIterations;
+
+ // Assert that the following invariants hold:
+ // 1. All active variables are unassigned.
+ // 2. All active variables form watched literals.
+ // 3. Unassigned variables that form watched literals are active.
+ // FIXME: Consider replacing these with test cases that fail if the any
+ // of the invariants is broken. That might not be easy due to the
+ // transformations performed by `buildCNF`.
+ assert(activeVarsAreUnassigned());
+ assert(activeVarsFormWatchedLiterals());
+ assert(unassignedVarsFormingWatchedLiteralsAreActive());
+
+ const Variable ActiveVar = ActiveVars[I];
+
+ // Look for unit clauses that contain the active variable.
+ const bool unitPosLit = watchedByUnitClause(posLit(ActiveVar));
+ const bool unitNegLit = watchedByUnitClause(negLit(ActiveVar));
+ if (unitPosLit && unitNegLit) {
+ // We found a conflict!
+
+ // Backtrack and rewind the `Level` until the most recent non-forced
+ // assignment.
+ reverseForcedMoves();
+
+ // If the root level is reached, then all possible assignments lead to
+ // a conflict.
+ if (Level == 0)
+ return std::make_pair(Solver::Result::Unsatisfiable(), MaxIterations);
+
+ // Otherwise, take the other branch at the most recent level where a
+ // decision was made.
+ LevelStates[Level] = State::Forced;
+ const Variable Var = LevelVars[Level];
+ VarAssignments[Var] = VarAssignments[Var] == Assignment::AssignedTrue
+ ? Assignment::AssignedFalse
+ : Assignment::AssignedTrue;
+
+ updateWatchedLiterals();
+ } else if (unitPosLit || unitNegLit) {
+ // We found a unit clause! The value of its unassigned variable is
+ // forced.
+ ++Level;
+
+ LevelVars[Level] = ActiveVar;
+ LevelStates[Level] = State::Forced;
+ VarAssignments[ActiveVar] =
+ unitPosLit ? Assignment::AssignedTrue : Assignment::AssignedFalse;
+
+ // Remove the variable that was just assigned from the set of active
+ // variables.
+ if (I + 1 < ActiveVars.size()) {
+ // Replace the variable that was just assigned with the last active
+ // variable for efficient removal.
+ ActiveVars[I] = ActiveVars.back();
+ } else {
+ // This was the last active variable. Repeat the process from the
+ // beginning.
+ I = 0;
+ }
+ ActiveVars.pop_back();
+
+ updateWatchedLiterals();
+ } else if (I + 1 == ActiveVars.size()) {
+ // There are no remaining unit clauses in the formula! Make a decision
+ // for one of the active variables at the current level.
+ ++Level;
+
+ LevelVars[Level] = ActiveVar;
+ LevelStates[Level] = State::Decision;
+ VarAssignments[ActiveVar] = decideAssignment(ActiveVar);
+
+ // Remove the variable that was just assigned from the set of active
+ // variables.
+ ActiveVars.pop_back();
+
+ updateWatchedLiterals();
+
+ // This was the last active variable. Repeat the process from the
+ // beginning.
+ I = 0;
+ } else {
+ ++I;
+ }
+ }
+ return std::make_pair(Solver::Result::Satisfiable(buildSolution()),
+ MaxIterations);
+ }
+
+private:
+ /// Returns a satisfying truth assignment to the atoms in the boolean formula.
+ llvm::DenseMap<Atom, Solver::Result::Assignment> buildSolution() {
+ llvm::DenseMap<Atom, Solver::Result::Assignment> Solution;
+ for (auto &Atomic : CNF.Atomics) {
+ // A variable may have a definite true/false assignment, or it may be
+ // unassigned indicating its truth value does not affect the result of
+ // the formula. Unassigned variables are assigned to true as a default.
+ Solution[Atomic.second] =
+ VarAssignments[Atomic.first] == Assignment::AssignedFalse
+ ? Solver::Result::Assignment::AssignedFalse
+ : Solver::Result::Assignment::AssignedTrue;
+ }
+ return Solution;
+ }
+
+ /// Reverses forced moves until the most recent level where a decision was
+ /// made on the assignment of a variable.
+ void reverseForcedMoves() {
+ for (; LevelStates[Level] == State::Forced; --Level) {
+ const Variable Var = LevelVars[Level];
+
+ VarAssignments[Var] = Assignment::Unassigned;
+
+ // If the variable that we pass through is watched then we add it to the
+ // active variables.
+ if (isWatched(posLit(Var)) || isWatched(negLit(Var)))
+ ActiveVars.push_back(Var);
+ }
+ }
+
+ /// Updates watched literals that are affected by a variable assignment.
+ void updateWatchedLiterals() {
+ const Variable Var = LevelVars[Level];
+
+ // Update the watched literals of clauses that currently watch the literal
+ // that falsifies `Var`.
+ const Literal FalseLit = VarAssignments[Var] == Assignment::AssignedTrue
+ ? negLit(Var)
+ : posLit(Var);
+ ClauseID FalseLitWatcher = CNF.WatchedHead[FalseLit];
+ CNF.WatchedHead[FalseLit] = NullClause;
+ while (FalseLitWatcher != NullClause) {
+ const ClauseID NextFalseLitWatcher = CNF.NextWatched[FalseLitWatcher];
+
+ // Pick the first non-false literal as the new watched literal.
+ const size_t FalseLitWatcherStart = CNF.ClauseStarts[FalseLitWatcher];
+ size_t NewWatchedLitIdx = FalseLitWatcherStart + 1;
+ while (isCurrentlyFalse(CNF.Clauses[NewWatchedLitIdx]))
+ ++NewWatchedLitIdx;
+ const Literal NewWatchedLit = CNF.Clauses[NewWatchedLitIdx];
+ const Variable NewWatchedLitVar = var(NewWatchedLit);
+
+ // Swap the old watched literal for the new one in `FalseLitWatcher` to
+ // maintain the invariant that the watched literal is at the beginning of
+ // the clause.
+ CNF.Clauses[NewWatchedLitIdx] = FalseLit;
+ CNF.Clauses[FalseLitWatcherStart] = NewWatchedLit;
+
+ // If the new watched literal isn't watched by any other clause and its
+ // variable isn't assigned we need to add it to the active variables.
+ if (!isWatched(NewWatchedLit) && !isWatched(notLit(NewWatchedLit)) &&
+ VarAssignments[NewWatchedLitVar] == Assignment::Unassigned)
+ ActiveVars.push_back(NewWatchedLitVar);
+
+ CNF.NextWatched[FalseLitWatcher] = CNF.WatchedHead[NewWatchedLit];
+ CNF.WatchedHead[NewWatchedLit] = FalseLitWatcher;
+
+ // Go to the next clause that watches `FalseLit`.
+ FalseLitWatcher = NextFalseLitWatcher;
+ }
+ }
+
+ /// Returns true if and only if one of the clauses that watch `Lit` is a unit
+ /// clause.
+ bool watchedByUnitClause(Literal Lit) const {
+ for (ClauseID LitWatcher = CNF.WatchedHead[Lit]; LitWatcher != NullClause;
+ LitWatcher = CNF.NextWatched[LitWatcher]) {
+ llvm::ArrayRef<Literal> Clause = CNF.clauseLiterals(LitWatcher);
+
+ // Assert the invariant that the watched literal is always the first one
+ // in the clause.
+ // FIXME: Consider replacing this with a test case that fails if the
+ // invariant is broken by `updateWatchedLiterals`. That might not be easy
+ // due to the transformations performed by `buildCNF`.
+ assert(Clause.front() == Lit);
+
+ if (isUnit(Clause))
+ return true;
+ }
+ return false;
+ }
+
+ /// Returns true if and only if `Clause` is a unit clause.
+ bool isUnit(llvm::ArrayRef<Literal> Clause) const {
+ return llvm::all_of(Clause.drop_front(),
+ [this](Literal L) { return isCurrentlyFalse(L); });
+ }
+
+ /// Returns true if and only if `Lit` evaluates to `false` in the current
+ /// partial assignment.
+ bool isCurrentlyFalse(Literal Lit) const {
+ return static_cast<int8_t>(VarAssignments[var(Lit)]) ==
+ static_cast<int8_t>(Lit & 1);
+ }
+
+ /// Returns true if and only if `Lit` is watched by a clause in `Formula`.
+ bool isWatched(Literal Lit) const {
+ return CNF.WatchedHead[Lit] != NullClause;
+ }
+
+ /// Returns an assignment for an unassigned variable.
+ Assignment decideAssignment(Variable Var) const {
+ return !isWatched(posLit(Var)) || isWatched(negLit(Var))
+ ? Assignment::AssignedFalse
+ : Assignment::AssignedTrue;
+ }
+
+ /// Returns a set of all watched literals.
+ llvm::DenseSet<Literal> watchedLiterals() const {
+ llvm::DenseSet<Literal> WatchedLiterals;
+ for (Literal Lit = 2; Lit < CNF.WatchedHead.size(); Lit++) {
+ if (CNF.WatchedHead[Lit] == NullClause)
+ continue;
+ WatchedLiterals.insert(Lit);
+ }
+ return WatchedLiterals;
+ }
+
+ /// Returns true if and only if all active variables are unassigned.
+ bool activeVarsAreUnassigned() const {
+ return llvm::all_of(ActiveVars, [this](Variable Var) {
+ return VarAssignments[Var] == Assignment::Unassigned;
+ });
+ }
+
+ /// Returns true if and only if all active variables form watched literals.
+ bool activeVarsFormWatchedLiterals() const {
+ const llvm::DenseSet<Literal> WatchedLiterals = watchedLiterals();
+ return llvm::all_of(ActiveVars, [&WatchedLiterals](Variable Var) {
+ return WatchedLiterals.contains(posLit(Var)) ||
+ WatchedLiterals.contains(negLit(Var));
+ });
+ }
+
+ /// Returns true if and only if all unassigned variables that are forming
+ /// watched literals are active.
+ bool unassignedVarsFormingWatchedLiteralsAreActive() const {
+ const llvm::DenseSet<Variable> ActiveVarsSet(ActiveVars.begin(),
+ ActiveVars.end());
+ for (Literal Lit : watchedLiterals()) {
+ const Variable Var = var(Lit);
+ if (VarAssignments[Var] != Assignment::Unassigned)
+ continue;
+ if (ActiveVarsSet.contains(Var))
+ continue;
+ return false;
+ }
+ return true;
+ }
+};
+
+Solver::Result
+WatchedLiteralsSolver::solve(llvm::ArrayRef<const Formula *> Vals) {
+ if (Vals.empty())
+ return Solver::Result::Satisfiable({{}});
+ auto [Res, Iterations] = WatchedLiteralsSolverImpl(Vals).solve(MaxIterations);
+ MaxIterations = Iterations;
+ return Res;
+}
+
+} // namespace dataflow
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Analysis/IntervalPartition.cpp b/contrib/llvm-project/clang/lib/Analysis/IntervalPartition.cpp
new file mode 100644
index 000000000000..5f06606ec132
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Analysis/IntervalPartition.cpp
@@ -0,0 +1,241 @@
+//===- IntervalPartition.cpp - CFG Partitioning into Intervals --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines functionality for partitioning a CFG into intervals.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/Analyses/IntervalPartition.h"
+#include "clang/Analysis/CFG.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/STLExtras.h"
+#include <optional>
+#include <queue>
+#include <vector>
+
+namespace clang {
+
+// Intermediate data used in constructing a CFGIntervalNode.
+template <typename Node> struct BuildResult {
+ // Use a vector to maintain the insertion order. Given the expected small
+ // number of nodes, vector should be sufficiently efficient. Elements must not
+ // be null.
+ std::vector<const Node *> Nodes;
+ // Elements must not be null.
+ llvm::SmallDenseSet<const Node *> Successors;
+};
+
+namespace internal {
+static unsigned getID(const CFGBlock &B) { return B.getBlockID(); }
+static unsigned getID(const CFGIntervalNode &I) { return I.ID; }
+
+// `Node` must be one of `CFGBlock` or `CFGIntervalNode`.
+template <typename Node>
+BuildResult<Node> buildInterval(llvm::BitVector &Partitioned,
+ const Node *Header) {
+ assert(Header != nullptr);
+ BuildResult<Node> Interval;
+ Interval.Nodes.push_back(Header);
+ Partitioned.set(getID(*Header));
+
+ // FIXME: Compare performance against using RPO to consider nodes, rather than
+ // following successors.
+ //
+ // Elements must not be null. Duplicates are prevented using `Workset`, below.
+ std::queue<const Node *> Worklist;
+ llvm::BitVector Workset(Partitioned.size(), false);
+ for (const Node *S : Header->succs())
+ if (S != nullptr)
+ if (auto SID = getID(*S); !Partitioned.test(SID)) {
+ // Successors are unique, so we don't test against `Workset` before
+ // adding to `Worklist`.
+ Worklist.push(S);
+ Workset.set(SID);
+ }
+
+ // Contains successors of blocks in the interval that couldn't be added to the
+ // interval on their first encounter. This occurs when they have a predecessor
+ // that is either definitively outside the interval or hasn't been considered
+ // yet. In the latter case, we'll revisit the block through some other path
+ // from the interval. At the end of processing the worklist, we filter out any
+ // that ended up in the interval to produce the output set of interval
+ // successors. Elements are never null.
+ std::vector<const Node *> MaybeSuccessors;
+
+ while (!Worklist.empty()) {
+ const auto *B = Worklist.front();
+ auto ID = getID(*B);
+ Worklist.pop();
+ Workset.reset(ID);
+
+ // Check whether all predecessors are in the interval, in which case `B`
+ // is included as well.
+ bool AllInInterval = llvm::all_of(B->preds(), [&](const Node *P) {
+ return llvm::is_contained(Interval.Nodes, P);
+ });
+ if (AllInInterval) {
+ Interval.Nodes.push_back(B);
+ Partitioned.set(ID);
+ for (const Node *S : B->succs())
+ if (S != nullptr)
+ if (auto SID = getID(*S);
+ !Partitioned.test(SID) && !Workset.test(SID)) {
+ Worklist.push(S);
+ Workset.set(SID);
+ }
+ } else {
+ MaybeSuccessors.push_back(B);
+ }
+ }
+
+ // Any block successors not in the current interval are interval successors.
+ for (const Node *B : MaybeSuccessors)
+ if (!llvm::is_contained(Interval.Nodes, B))
+ Interval.Successors.insert(B);
+
+ return Interval;
+}
+
+template <typename Node>
+void fillIntervalNode(CFGIntervalGraph &Graph,
+ std::vector<CFGIntervalNode *> &Index,
+ std::queue<const Node *> &Successors,
+ llvm::BitVector &Partitioned, const Node *Header) {
+ BuildResult<Node> Result = buildInterval(Partitioned, Header);
+ for (const auto *S : Result.Successors)
+ Successors.push(S);
+
+ CFGIntervalNode &Interval = Graph.emplace_back(Graph.size());
+
+ // Index the nodes of the new interval. The index maps nodes from the input
+ // graph (specifically, `Result.Nodes`) to identifiers of nodes in the output
+ // graph. In this case, the new interval has identifier `ID` so all of its
+ // nodes (`Result.Nodes`) map to `ID`.
+ for (const auto *N : Result.Nodes) {
+ assert(N != nullptr);
+ assert(getID(*N) < Index.size());
+ Index[getID(*N)] = &Interval;
+ }
+
+ if constexpr (std::is_same_v<std::decay_t<Node>, CFGBlock>)
+ Interval.Nodes = std::move(Result.Nodes);
+ else {
+ std::vector<const CFGBlock *> Nodes;
+ // Flatten the sub vectors into a single list.
+ size_t Count = 0;
+ for (auto &N : Result.Nodes)
+ Count += N->Nodes.size();
+ Nodes.reserve(Count);
+ for (auto &N : Result.Nodes)
+ Nodes.insert(Nodes.end(), N->Nodes.begin(), N->Nodes.end());
+ Interval.Nodes = std::move(Nodes);
+ }
+}
+
+template <typename Node>
+CFGIntervalGraph partitionIntoIntervalsImpl(unsigned NumBlockIDs,
+ const Node *EntryBlock) {
+ assert(EntryBlock != nullptr);
+ CFGIntervalGraph Graph;
+ // `Index` maps all of the nodes of the input graph to the interval to which
+ // they are assigned in the output graph. The values (interval pointers) are
+ // never null.
+ std::vector<CFGIntervalNode *> Index(NumBlockIDs, nullptr);
+
+ // Lists header nodes (from the input graph) and their associated
+ // interval. Since header nodes can vary in type and are only needed within
+ // this function, we record them separately from `CFGIntervalNode`. This
+ // choice enables to express `CFGIntervalNode` without using a variant.
+ std::vector<std::pair<const Node *, CFGIntervalNode *>> Intervals;
+ llvm::BitVector Partitioned(NumBlockIDs, false);
+ std::queue<const Node *> Successors;
+
+ fillIntervalNode(Graph, Index, Successors, Partitioned, EntryBlock);
+ Intervals.emplace_back(EntryBlock, &Graph.back());
+
+ while (!Successors.empty()) {
+ const auto *B = Successors.front();
+ Successors.pop();
+ assert(B != nullptr);
+ if (Partitioned.test(getID(*B)))
+ continue;
+
+ // B has not been partitioned, but it has a predecessor that has. Create a
+ // new interval from `B`.
+ fillIntervalNode(Graph, Index, Successors, Partitioned, B);
+ Intervals.emplace_back(B, &Graph.back());
+ }
+
+ // Go back and patch up all the Intervals -- the successors and predecessors.
+ for (auto [H, N] : Intervals) {
+ // Map input-graph predecessors to output-graph nodes and mark those as
+ // predecessors of `N`. Then, mark `N` as a successor of said predecessor.
+ for (const Node *P : H->preds()) {
+ if (P == nullptr)
+ continue;
+
+ assert(getID(*P) < NumBlockIDs);
+ CFGIntervalNode *Pred = Index[getID(*P)];
+ if (Pred == nullptr)
+ // Unreachable node.
+ continue;
+ if (Pred != N // Not a backedge.
+ && N->Predecessors.insert(Pred).second)
+ // Note: given the guard above, which guarantees we only ever insert
+ // unique elements, we could use a simple list (like `vector`) for
+ // `Successors`, rather than a set.
+ Pred->Successors.insert(N);
+ }
+ }
+
+ return Graph;
+}
+
+std::vector<const CFGBlock *> buildInterval(const CFGBlock *Header) {
+ llvm::BitVector Partitioned(Header->getParent()->getNumBlockIDs(), false);
+ return buildInterval(Partitioned, Header).Nodes;
+}
+
+CFGIntervalGraph partitionIntoIntervals(const CFG &Cfg) {
+ return partitionIntoIntervalsImpl(Cfg.getNumBlockIDs(), &Cfg.getEntry());
+}
+
+CFGIntervalGraph partitionIntoIntervals(const CFGIntervalGraph &Graph) {
+ return partitionIntoIntervalsImpl(Graph.size(), &Graph[0]);
+}
+} // namespace internal
+
+std::optional<std::vector<const CFGBlock *>> getIntervalWTO(const CFG &Cfg) {
+ // Backing storage for the allocated nodes in each graph.
+ unsigned PrevSize = Cfg.size();
+ if (PrevSize == 0)
+ return {};
+ internal::CFGIntervalGraph Graph = internal::partitionIntoIntervals(Cfg);
+ unsigned Size = Graph.size();
+ while (Size > 1 && Size < PrevSize) {
+ PrevSize = Graph.size();
+ Graph = internal::partitionIntoIntervals(Graph);
+ Size = Graph.size();
+ }
+ if (Size > 1)
+ // Not reducible.
+ return std::nullopt;
+
+ assert(Size != 0);
+ return std::move(Graph[0].Nodes);
+}
+
+WTOCompare::WTOCompare(const WeakTopologicalOrdering &WTO) {
+ if (WTO.empty())
+ return;
+ auto N = WTO[0]->getParent()->getNumBlockIDs();
+ BlockOrder.resize(N, 0);
+ for (unsigned I = 0, S = WTO.size(); I < S; ++I)
+ BlockOrder[WTO[I]->getBlockID()] = I + 1;
+}
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Analysis/IssueHash.cpp b/contrib/llvm-project/clang/lib/Analysis/IssueHash.cpp
index 94816747668d..4d56e774b76a 100644
--- a/contrib/llvm-project/clang/lib/Analysis/IssueHash.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/IssueHash.cpp
@@ -21,6 +21,7 @@
#include "llvm/Support/Path.h"
#include <functional>
+#include <optional>
#include <sstream>
#include <string>
@@ -121,7 +122,7 @@ static std::string GetEnclosingDeclContextSignature(const Decl *D) {
return "";
}
-static StringRef GetNthLineOfFile(llvm::Optional<llvm::MemoryBufferRef> Buffer,
+static StringRef GetNthLineOfFile(std::optional<llvm::MemoryBufferRef> Buffer,
int Line) {
if (!Buffer)
return "";
@@ -146,7 +147,7 @@ static std::string NormalizeLine(const SourceManager &SM, const FullSourceLoc &L
col++;
SourceLocation StartOfLine =
SM.translateLineCol(SM.getFileID(L), L.getExpansionLineNumber(), col);
- Optional<llvm::MemoryBufferRef> Buffer =
+ std::optional<llvm::MemoryBufferRef> Buffer =
SM.getBufferOrNone(SM.getFileID(StartOfLine), StartOfLine);
if (!Buffer)
return {};
diff --git a/contrib/llvm-project/clang/lib/Analysis/LiveVariables.cpp b/contrib/llvm-project/clang/lib/Analysis/LiveVariables.cpp
index 6c601c290c92..6d03dd05ca3d 100644
--- a/contrib/llvm-project/clang/lib/Analysis/LiveVariables.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/LiveVariables.cpp
@@ -19,6 +19,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
+#include <optional>
#include <vector>
using namespace clang;
@@ -72,6 +73,11 @@ bool LiveVariables::LivenessValues::isLive(const VarDecl *D) const {
bool alive = false;
for (const BindingDecl *BD : DD->bindings())
alive |= liveBindings.contains(BD);
+
+ // Note: the only known case this condition is necessary, is when a bindig
+ // to a tuple-like structure is created. The HoldingVar initializers have a
+ // DeclRefExpr to the DecompositionDecl.
+ alive |= liveDecls.contains(DD);
return alive;
}
return liveDecls.contains(D);
@@ -343,8 +349,12 @@ void TransferFunctions::VisitBinaryOperator(BinaryOperator *B) {
if (const BindingDecl* BD = dyn_cast<BindingDecl>(D)) {
Killed = !BD->getType()->isReferenceType();
- if (Killed)
+ if (Killed) {
+ if (const auto *HV = BD->getHoldingVar())
+ val.liveDecls = LV.DSetFact.remove(val.liveDecls, HV);
+
val.liveBindings = LV.BSetFact.remove(val.liveBindings, BD);
+ }
} else if (const auto *VD = dyn_cast<VarDecl>(D)) {
Killed = writeShouldKill(VD);
if (Killed)
@@ -371,8 +381,12 @@ void TransferFunctions::VisitDeclRefExpr(DeclRefExpr *DR) {
const Decl* D = DR->getDecl();
bool InAssignment = LV.inAssignment[DR];
if (const auto *BD = dyn_cast<BindingDecl>(D)) {
- if (!InAssignment)
+ if (!InAssignment) {
+ if (const auto *HV = BD->getHoldingVar())
+ val.liveDecls = LV.DSetFact.add(val.liveDecls, HV);
+
val.liveBindings = LV.BSetFact.add(val.liveBindings, BD);
+ }
} else if (const auto *VD = dyn_cast<VarDecl>(D)) {
if (!InAssignment && !isAlwaysAlive(VD))
val.liveDecls = LV.DSetFact.add(val.liveDecls, VD);
@@ -382,8 +396,16 @@ void TransferFunctions::VisitDeclRefExpr(DeclRefExpr *DR) {
void TransferFunctions::VisitDeclStmt(DeclStmt *DS) {
for (const auto *DI : DS->decls()) {
if (const auto *DD = dyn_cast<DecompositionDecl>(DI)) {
- for (const auto *BD : DD->bindings())
+ for (const auto *BD : DD->bindings()) {
+ if (const auto *HV = BD->getHoldingVar())
+ val.liveDecls = LV.DSetFact.remove(val.liveDecls, HV);
+
val.liveBindings = LV.BSetFact.remove(val.liveBindings, BD);
+ }
+
+ // When a bindig to a tuple-like structure is created, the HoldingVar
+ // initializers have a DeclRefExpr to the DecompositionDecl.
+ val.liveDecls = LV.DSetFact.remove(val.liveDecls, DD);
} else if (const auto *VD = dyn_cast<VarDecl>(DI)) {
if (!isAlwaysAlive(VD))
val.liveDecls = LV.DSetFact.remove(val.liveDecls, VD);
@@ -469,7 +491,7 @@ LiveVariablesImpl::runOnBlock(const CFGBlock *block,
ei = block->rend(); it != ei; ++it) {
const CFGElement &elem = *it;
- if (Optional<CFGAutomaticObjDtor> Dtor =
+ if (std::optional<CFGAutomaticObjDtor> Dtor =
elem.getAs<CFGAutomaticObjDtor>()) {
val.liveDecls = DSetFact.add(val.liveDecls, Dtor->getVarDecl());
continue;
diff --git a/contrib/llvm-project/clang/lib/Analysis/MacroExpansionContext.cpp b/contrib/llvm-project/clang/lib/Analysis/MacroExpansionContext.cpp
index 290510691891..564e359668a5 100644
--- a/contrib/llvm-project/clang/lib/Analysis/MacroExpansionContext.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/MacroExpansionContext.cpp
@@ -8,6 +8,7 @@
#include "clang/Analysis/MacroExpansionContext.h"
#include "llvm/Support/Debug.h"
+#include <optional>
#define DEBUG_TYPE "macro-expansion-context"
@@ -96,14 +97,14 @@ void MacroExpansionContext::registerForPreprocessor(Preprocessor &NewPP) {
PP->setTokenWatcher([this](const Token &Tok) { onTokenLexed(Tok); });
}
-Optional<StringRef>
+std::optional<StringRef>
MacroExpansionContext::getExpandedText(SourceLocation MacroExpansionLoc) const {
if (MacroExpansionLoc.isMacroID())
- return llvm::None;
+ return std::nullopt;
- // If there was no macro expansion at that location, return None.
+ // If there was no macro expansion at that location, return std::nullopt.
if (ExpansionRanges.find_as(MacroExpansionLoc) == ExpansionRanges.end())
- return llvm::None;
+ return std::nullopt;
// There was macro expansion, but resulted in no tokens, return empty string.
const auto It = ExpandedTokens.find_as(MacroExpansionLoc);
@@ -114,14 +115,14 @@ MacroExpansionContext::getExpandedText(SourceLocation MacroExpansionLoc) const {
return It->getSecond().str();
}
-Optional<StringRef>
+std::optional<StringRef>
MacroExpansionContext::getOriginalText(SourceLocation MacroExpansionLoc) const {
if (MacroExpansionLoc.isMacroID())
- return llvm::None;
+ return std::nullopt;
const auto It = ExpansionRanges.find_as(MacroExpansionLoc);
if (It == ExpansionRanges.end())
- return llvm::None;
+ return std::nullopt;
assert(It->getFirst() != It->getSecond() &&
"Every macro expansion must cover a non-empty range.");
diff --git a/contrib/llvm-project/clang/lib/Analysis/ObjCNoReturn.cpp b/contrib/llvm-project/clang/lib/Analysis/ObjCNoReturn.cpp
index fe1edb496859..9d7c365c3b99 100644
--- a/contrib/llvm-project/clang/lib/Analysis/ObjCNoReturn.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/ObjCNoReturn.cpp
@@ -54,12 +54,9 @@ bool ObjCNoReturn::isImplicitNoReturn(const ObjCMessageExpr *ME) {
}
if (const ObjCInterfaceDecl *ID = ME->getReceiverInterface()) {
- if (isSubclass(ID, NSExceptionII)) {
- for (unsigned i = 0; i < NUM_RAISE_SELECTORS; ++i) {
- if (S == NSExceptionInstanceRaiseSelectors[i])
- return true;
- }
- }
+ if (isSubclass(ID, NSExceptionII) &&
+ llvm::is_contained(NSExceptionInstanceRaiseSelectors, S))
+ return true;
}
return false;
diff --git a/contrib/llvm-project/clang/lib/Analysis/PathDiagnostic.cpp b/contrib/llvm-project/clang/lib/Analysis/PathDiagnostic.cpp
index ee8185c2147c..79f337a91ec8 100644
--- a/contrib/llvm-project/clang/lib/Analysis/PathDiagnostic.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/PathDiagnostic.cpp
@@ -32,8 +32,6 @@
#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/FoldingSet.h"
-#include "llvm/ADT/None.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
@@ -45,18 +43,14 @@
#include <cassert>
#include <cstring>
#include <memory>
+#include <optional>
#include <utility>
#include <vector>
using namespace clang;
using namespace ento;
-static StringRef StripTrailingDots(StringRef s) {
- for (StringRef::size_type i = s.size(); i != 0; --i)
- if (s[i - 1] != '.')
- return s.substr(0, i);
- return {};
-}
+static StringRef StripTrailingDots(StringRef s) { return s.rtrim('.'); }
PathDiagnosticPiece::PathDiagnosticPiece(StringRef s,
Kind k, DisplayHint hint)
@@ -226,9 +220,10 @@ void PathDiagnosticConsumer::HandlePathDiagnostic(
Diags.InsertNode(D.release());
}
-static Optional<bool> comparePath(const PathPieces &X, const PathPieces &Y);
+static std::optional<bool> comparePath(const PathPieces &X,
+ const PathPieces &Y);
-static Optional<bool>
+static std::optional<bool>
compareControlFlow(const PathDiagnosticControlFlowPiece &X,
const PathDiagnosticControlFlowPiece &Y) {
FullSourceLoc XSL = X.getStartLocation().asLocation();
@@ -239,16 +234,16 @@ compareControlFlow(const PathDiagnosticControlFlowPiece &X,
FullSourceLoc YEL = Y.getEndLocation().asLocation();
if (XEL != YEL)
return XEL.isBeforeInTranslationUnitThan(YEL);
- return None;
+ return std::nullopt;
}
-static Optional<bool> compareMacro(const PathDiagnosticMacroPiece &X,
- const PathDiagnosticMacroPiece &Y) {
+static std::optional<bool> compareMacro(const PathDiagnosticMacroPiece &X,
+ const PathDiagnosticMacroPiece &Y) {
return comparePath(X.subPieces, Y.subPieces);
}
-static Optional<bool> compareCall(const PathDiagnosticCallPiece &X,
- const PathDiagnosticCallPiece &Y) {
+static std::optional<bool> compareCall(const PathDiagnosticCallPiece &X,
+ const PathDiagnosticCallPiece &Y) {
FullSourceLoc X_CEL = X.callEnter.asLocation();
FullSourceLoc Y_CEL = Y.callEnter.asLocation();
if (X_CEL != Y_CEL)
@@ -264,8 +259,8 @@ static Optional<bool> compareCall(const PathDiagnosticCallPiece &X,
return comparePath(X.path, Y.path);
}
-static Optional<bool> comparePiece(const PathDiagnosticPiece &X,
- const PathDiagnosticPiece &Y) {
+static std::optional<bool> comparePiece(const PathDiagnosticPiece &X,
+ const PathDiagnosticPiece &Y) {
if (X.getKind() != Y.getKind())
return X.getKind() < Y.getKind();
@@ -305,25 +300,24 @@ static Optional<bool> comparePiece(const PathDiagnosticPiece &X,
case PathDiagnosticPiece::Event:
case PathDiagnosticPiece::Note:
case PathDiagnosticPiece::PopUp:
- return None;
+ return std::nullopt;
}
llvm_unreachable("all cases handled");
}
-static Optional<bool> comparePath(const PathPieces &X, const PathPieces &Y) {
+static std::optional<bool> comparePath(const PathPieces &X,
+ const PathPieces &Y) {
if (X.size() != Y.size())
return X.size() < Y.size();
PathPieces::const_iterator X_I = X.begin(), X_end = X.end();
PathPieces::const_iterator Y_I = Y.begin(), Y_end = Y.end();
- for ( ; X_I != X_end && Y_I != Y_end; ++X_I, ++Y_I) {
- Optional<bool> b = comparePiece(**X_I, **Y_I);
- if (b.hasValue())
- return b.getValue();
- }
+ for (; X_I != X_end && Y_I != Y_end; ++X_I, ++Y_I)
+ if (std::optional<bool> b = comparePiece(**X_I, **Y_I))
+ return *b;
- return None;
+ return std::nullopt;
}
static bool compareCrossTUSourceLocs(FullSourceLoc XL, FullSourceLoc YL) {
@@ -337,13 +331,15 @@ static bool compareCrossTUSourceLocs(FullSourceLoc XL, FullSourceLoc YL) {
std::pair<bool, bool> InSameTU = SM.isInTheSameTranslationUnit(XOffs, YOffs);
if (InSameTU.first)
return XL.isBeforeInTranslationUnitThan(YL);
- const FileEntry *XFE = SM.getFileEntryForID(XL.getSpellingLoc().getFileID());
- const FileEntry *YFE = SM.getFileEntryForID(YL.getSpellingLoc().getFileID());
+ OptionalFileEntryRef XFE =
+ SM.getFileEntryRefForID(XL.getSpellingLoc().getFileID());
+ OptionalFileEntryRef YFE =
+ SM.getFileEntryRefForID(YL.getSpellingLoc().getFileID());
if (!XFE || !YFE)
return XFE && !YFE;
int NameCmp = XFE->getName().compare(YFE->getName());
if (NameCmp != 0)
- return NameCmp == -1;
+ return NameCmp < 0;
// Last resort: Compare raw file IDs that are possibly expansions.
return XL.getFileID() < YL.getFileID();
}
@@ -365,9 +361,10 @@ static bool compare(const PathDiagnostic &X, const PathDiagnostic &Y) {
return X.getVerboseDescription() < Y.getVerboseDescription();
if (X.getShortDescription() != Y.getShortDescription())
return X.getShortDescription() < Y.getShortDescription();
- auto CompareDecls = [&XL](const Decl *D1, const Decl *D2) -> Optional<bool> {
+ auto CompareDecls = [&XL](const Decl *D1,
+ const Decl *D2) -> std::optional<bool> {
if (D1 == D2)
- return None;
+ return std::nullopt;
if (!D1)
return true;
if (!D2)
@@ -379,7 +376,7 @@ static bool compare(const PathDiagnostic &X, const PathDiagnostic &Y) {
return compareCrossTUSourceLocs(FullSourceLoc(D1L, SM),
FullSourceLoc(D2L, SM));
}
- return None;
+ return std::nullopt;
};
if (auto Result = CompareDecls(X.getDeclWithIssue(), Y.getDeclWithIssue()))
return *Result;
@@ -395,9 +392,7 @@ static bool compare(const PathDiagnostic &X, const PathDiagnostic &Y) {
if (*XI != *YI)
return (*XI) < (*YI);
}
- Optional<bool> b = comparePath(X.path, Y.path);
- assert(b.hasValue());
- return b.getValue();
+ return *comparePath(X.path, Y.path);
}
void PathDiagnosticConsumer::FlushDiagnostics(
@@ -434,8 +429,8 @@ void PathDiagnosticConsumer::FlushDiagnostics(
}
PathDiagnosticConsumer::FilesMade::~FilesMade() {
- for (PDFileEntry &Entry : Set)
- Entry.~PDFileEntry();
+ for (auto It = Set.begin(); It != Set.end();)
+ (It++)->~PDFileEntry();
}
void PathDiagnosticConsumer::FilesMade::addDiagnostic(const PathDiagnostic &PD,
@@ -567,6 +562,7 @@ getLocationForCaller(const StackFrameContext *SFC,
}
case CFGElement::ScopeBegin:
case CFGElement::ScopeEnd:
+ case CFGElement::CleanupFunction:
llvm_unreachable("not yet implemented!");
case CFGElement::LifetimeEnds:
case CFGElement::LoopExit:
@@ -586,6 +582,7 @@ PathDiagnosticLocation
PathDiagnosticLocation::createBegin(const Stmt *S,
const SourceManager &SM,
LocationOrAnalysisDeclContext LAC) {
+ assert(S && "Statement cannot be null");
return PathDiagnosticLocation(getValidSourceLocation(S, LAC),
SM, SingleLocK);
}
@@ -665,7 +662,7 @@ PathDiagnosticLocation
PathDiagnosticLocation::create(const ProgramPoint& P,
const SourceManager &SMng) {
const Stmt* S = nullptr;
- if (Optional<BlockEdge> BE = P.getAs<BlockEdge>()) {
+ if (std::optional<BlockEdge> BE = P.getAs<BlockEdge>()) {
const CFGBlock *BSrc = BE->getSrc();
if (BSrc->getTerminator().isVirtualBaseBranch()) {
// TODO: VirtualBaseBranches should also appear for destructors.
@@ -685,22 +682,23 @@ PathDiagnosticLocation::create(const ProgramPoint& P,
P.getLocationContext()->getDecl(), SMng);
}
}
- } else if (Optional<StmtPoint> SP = P.getAs<StmtPoint>()) {
+ } else if (std::optional<StmtPoint> SP = P.getAs<StmtPoint>()) {
S = SP->getStmt();
if (P.getAs<PostStmtPurgeDeadSymbols>())
return PathDiagnosticLocation::createEnd(S, SMng, P.getLocationContext());
- } else if (Optional<PostInitializer> PIP = P.getAs<PostInitializer>()) {
+ } else if (std::optional<PostInitializer> PIP = P.getAs<PostInitializer>()) {
return PathDiagnosticLocation(PIP->getInitializer()->getSourceLocation(),
SMng);
- } else if (Optional<PreImplicitCall> PIC = P.getAs<PreImplicitCall>()) {
+ } else if (std::optional<PreImplicitCall> PIC = P.getAs<PreImplicitCall>()) {
return PathDiagnosticLocation(PIC->getLocation(), SMng);
- } else if (Optional<PostImplicitCall> PIE = P.getAs<PostImplicitCall>()) {
+ } else if (std::optional<PostImplicitCall> PIE =
+ P.getAs<PostImplicitCall>()) {
return PathDiagnosticLocation(PIE->getLocation(), SMng);
- } else if (Optional<CallEnter> CE = P.getAs<CallEnter>()) {
+ } else if (std::optional<CallEnter> CE = P.getAs<CallEnter>()) {
return getLocationForCaller(CE->getCalleeContext(),
CE->getLocationContext(),
SMng);
- } else if (Optional<CallExitEnd> CEE = P.getAs<CallExitEnd>()) {
+ } else if (std::optional<CallExitEnd> CEE = P.getAs<CallExitEnd>()) {
return getLocationForCaller(CEE->getCalleeContext(),
CEE->getLocationContext(),
SMng);
@@ -710,8 +708,8 @@ PathDiagnosticLocation::create(const ProgramPoint& P,
CEB->getLocationContext());
return PathDiagnosticLocation(
CEB->getLocationContext()->getDecl()->getSourceRange().getEnd(), SMng);
- } else if (Optional<BlockEntrance> BE = P.getAs<BlockEntrance>()) {
- if (Optional<CFGElement> BlockFront = BE->getFirstElement()) {
+ } else if (std::optional<BlockEntrance> BE = P.getAs<BlockEntrance>()) {
+ if (std::optional<CFGElement> BlockFront = BE->getFirstElement()) {
if (auto StmtElt = BlockFront->getAs<CFGStmt>()) {
return PathDiagnosticLocation(StmtElt->getStmt()->getBeginLoc(), SMng);
} else if (auto NewAllocElt = BlockFront->getAs<CFGNewAllocator>()) {
@@ -723,7 +721,8 @@ PathDiagnosticLocation::create(const ProgramPoint& P,
return PathDiagnosticLocation(
BE->getBlock()->getTerminatorStmt()->getBeginLoc(), SMng);
- } else if (Optional<FunctionExitPoint> FE = P.getAs<FunctionExitPoint>()) {
+ } else if (std::optional<FunctionExitPoint> FE =
+ P.getAs<FunctionExitPoint>()) {
return PathDiagnosticLocation(FE->getStmt(), SMng,
FE->getLocationContext());
} else {
diff --git a/contrib/llvm-project/clang/lib/Analysis/ReachableCode.cpp b/contrib/llvm-project/clang/lib/Analysis/ReachableCode.cpp
index 221d137dadb8..1bf0d9aec862 100644
--- a/contrib/llvm-project/clang/lib/Analysis/ReachableCode.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/ReachableCode.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Analysis/Analyses/ReachableCode.h"
+#include "clang/AST/Attr.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
@@ -24,6 +25,7 @@
#include "clang/Lex/Preprocessor.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/SmallVector.h"
+#include <optional>
using namespace clang;
@@ -73,7 +75,7 @@ static bool isBuiltinAssumeFalse(const CFGBlock *B, const Stmt *S,
// (e.g. a CFGBlock containing only a goto).
return false;
}
- if (Optional<CFGStmt> CS = B->back().getAs<CFGStmt>()) {
+ if (std::optional<CFGStmt> CS = B->back().getAs<CFGStmt>()) {
if (const auto *CE = dyn_cast<CallExpr>(CS->getStmt())) {
return CE->getCallee()->IgnoreCasts() == S && CE->isBuiltinAssumeFalse(C);
}
@@ -87,10 +89,8 @@ static bool isDeadReturn(const CFGBlock *B, const Stmt *S) {
// block, or may be in a subsequent block because of destructors.
const CFGBlock *Current = B;
while (true) {
- for (CFGBlock::const_reverse_iterator I = Current->rbegin(),
- E = Current->rend();
- I != E; ++I) {
- if (Optional<CFGStmt> CS = I->getAs<CFGStmt>()) {
+ for (const CFGElement &CE : llvm::reverse(*Current)) {
+ if (std::optional<CFGStmt> CS = CE.getAs<CFGStmt>()) {
if (const ReturnStmt *RS = dyn_cast<ReturnStmt>(CS->getStmt())) {
if (RS == S)
return true;
@@ -220,14 +220,15 @@ static bool isConfigurationValue(const Stmt *S,
return isConfigurationValue(cast<DeclRefExpr>(S)->getDecl(), PP);
case Stmt::ObjCBoolLiteralExprClass:
IgnoreYES_NO = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Stmt::CXXBoolLiteralExprClass:
case Stmt::IntegerLiteralClass: {
const Expr *E = cast<Expr>(S);
if (IncludeIntegers) {
if (SilenceableCondVal && !SilenceableCondVal->getBegin().isValid())
*SilenceableCondVal = E->getSourceRange();
- return WrappedInParens || isExpandedFromConfigurationMacro(E, PP, IgnoreYES_NO);
+ return WrappedInParens ||
+ isExpandedFromConfigurationMacro(E, PP, IgnoreYES_NO);
}
return false;
}
@@ -300,6 +301,12 @@ static bool shouldTreatSuccessorsAsReachable(const CFGBlock *B,
if (isa<BinaryOperator>(Term)) {
return isConfigurationValue(Term, PP);
}
+ // Do not treat constexpr if statement successors as unreachable in warnings
+ // since the point of these statements is to determine branches at compile
+ // time.
+ if (const auto *IS = dyn_cast<IfStmt>(Term);
+ IS != nullptr && IS->isConstexpr())
+ return true;
}
const Stmt *Cond = B->getTerminatorCondition(/* stripParens */ false);
@@ -334,7 +341,7 @@ static unsigned scanFromBlock(const CFGBlock *Start,
// This allows us to potentially uncover some "always unreachable" code
// within the "sometimes unreachable" code.
// Look at the successors and mark then reachable.
- Optional<bool> TreatAllSuccessorsAsReachable;
+ std::optional<bool> TreatAllSuccessorsAsReachable;
if (!IncludeSometimesUnreachableEdges)
TreatAllSuccessorsAsReachable = false;
@@ -346,13 +353,13 @@ static unsigned scanFromBlock(const CFGBlock *Start,
if (!UB)
break;
- if (!TreatAllSuccessorsAsReachable.hasValue()) {
+ if (!TreatAllSuccessorsAsReachable) {
assert(PP);
TreatAllSuccessorsAsReachable =
shouldTreatSuccessorsAsReachable(item, *PP);
}
- if (TreatAllSuccessorsAsReachable.getValue()) {
+ if (*TreatAllSuccessorsAsReachable) {
B = UB;
break;
}
@@ -456,7 +463,7 @@ static bool isValidDeadStmt(const Stmt *S) {
const Stmt *DeadCodeScan::findDeadCode(const clang::CFGBlock *Block) {
for (CFGBlock::const_iterator I = Block->begin(), E = Block->end(); I!=E; ++I)
- if (Optional<CFGStmt> CS = I->getAs<CFGStmt>()) {
+ if (std::optional<CFGStmt> CS = I->getAs<CFGStmt>()) {
const Stmt *S = CS->getStmt();
if (isValidDeadStmt(S))
return S;
@@ -530,12 +537,11 @@ unsigned DeadCodeScan::scanBackwards(const clang::CFGBlock *Start,
// earliest location.
if (!DeferredLocs.empty()) {
llvm::array_pod_sort(DeferredLocs.begin(), DeferredLocs.end(), SrcCmp);
- for (DeferredLocsTy::iterator I = DeferredLocs.begin(),
- E = DeferredLocs.end(); I != E; ++I) {
- const CFGBlock *Block = I->first;
+ for (const auto &I : DeferredLocs) {
+ const CFGBlock *Block = I.first;
if (Reachable[Block->getBlockID()])
continue;
- reportDeadCode(Block, I->second, CB);
+ reportDeadCode(Block, I.second, CB);
count += scanMaybeReachableFromBlock(Block, PP, Reachable);
}
}
@@ -624,6 +630,10 @@ void DeadCodeScan::reportDeadCode(const CFGBlock *B,
UK = reachable_code::UK_Return;
}
+ const auto *AS = dyn_cast<AttributedStmt>(S);
+ bool HasFallThroughAttr =
+ AS && hasSpecificAttr<FallThroughAttr>(AS->getAttrs());
+
SourceRange SilenceableCondVal;
if (UK == reachable_code::UK_Other) {
@@ -640,8 +650,9 @@ void DeadCodeScan::reportDeadCode(const CFGBlock *B,
R2 = Inc->getSourceRange();
}
- CB.HandleUnreachable(reachable_code::UK_Loop_Increment,
- Loc, SourceRange(), SourceRange(Loc, Loc), R2);
+ CB.HandleUnreachable(reachable_code::UK_Loop_Increment, Loc,
+ SourceRange(), SourceRange(Loc, Loc), R2,
+ HasFallThroughAttr);
return;
}
@@ -660,7 +671,7 @@ void DeadCodeScan::reportDeadCode(const CFGBlock *B,
SourceRange R1, R2;
SourceLocation Loc = GetUnreachableLoc(S, R1, R2);
- CB.HandleUnreachable(UK, Loc, SilenceableCondVal, R1, R2);
+ CB.HandleUnreachable(UK, Loc, SilenceableCondVal, R1, R2, HasFallThroughAttr);
}
//===----------------------------------------------------------------------===//
@@ -694,18 +705,15 @@ void FindUnreachableCode(AnalysisDeclContext &AC, Preprocessor &PP,
// If there aren't explicit EH edges, we should include the 'try' dispatch
// blocks as roots.
if (!AC.getCFGBuildOptions().AddEHEdges) {
- for (CFG::try_block_iterator I = cfg->try_blocks_begin(),
- E = cfg->try_blocks_end() ; I != E; ++I) {
- numReachable += scanMaybeReachableFromBlock(*I, PP, reachable);
- }
+ for (const CFGBlock *B : cfg->try_blocks())
+ numReachable += scanMaybeReachableFromBlock(B, PP, reachable);
if (numReachable == cfg->getNumBlockIDs())
return;
}
// There are some unreachable blocks. We need to find the root blocks that
// contain code that should be considered unreachable.
- for (CFG::iterator I = cfg->begin(), E = cfg->end(); I != E; ++I) {
- const CFGBlock *block = *I;
+ for (const CFGBlock *block : *cfg) {
// A block may have been marked reachable during this loop.
if (reachable[block->getBlockID()])
continue;
diff --git a/contrib/llvm-project/clang/lib/Analysis/RetainSummaryManager.cpp b/contrib/llvm-project/clang/lib/Analysis/RetainSummaryManager.cpp
index 7ed1e40333f4..8d279d969b61 100644
--- a/contrib/llvm-project/clang/lib/Analysis/RetainSummaryManager.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/RetainSummaryManager.cpp
@@ -19,6 +19,7 @@
#include "clang/AST/DeclObjC.h"
#include "clang/AST/ParentMap.h"
#include "clang/ASTMatchers/ASTMatchFinder.h"
+#include <optional>
using namespace clang;
using namespace ento;
@@ -32,7 +33,7 @@ constexpr static bool isOneOf() {
/// rest of varargs.
template <class T, class P, class... ToCompare>
constexpr static bool isOneOf() {
- return std::is_same<T, P>::value || isOneOf<T, ToCompare...>();
+ return std::is_same_v<T, P> || isOneOf<T, ToCompare...>();
}
namespace {
@@ -65,13 +66,13 @@ struct GeneralizedConsumedAttr {
}
template <class T>
-Optional<ObjKind> RetainSummaryManager::hasAnyEnabledAttrOf(const Decl *D,
- QualType QT) {
+std::optional<ObjKind> RetainSummaryManager::hasAnyEnabledAttrOf(const Decl *D,
+ QualType QT) {
ObjKind K;
if (isOneOf<T, CFConsumedAttr, CFReturnsRetainedAttr,
CFReturnsNotRetainedAttr>()) {
if (!TrackObjCAndCFObjects)
- return None;
+ return std::nullopt;
K = ObjKind::CF;
} else if (isOneOf<T, NSConsumedAttr, NSConsumesSelfAttr,
@@ -79,19 +80,19 @@ Optional<ObjKind> RetainSummaryManager::hasAnyEnabledAttrOf(const Decl *D,
NSReturnsNotRetainedAttr, NSConsumesSelfAttr>()) {
if (!TrackObjCAndCFObjects)
- return None;
+ return std::nullopt;
if (isOneOf<T, NSReturnsRetainedAttr, NSReturnsAutoreleasedAttr,
NSReturnsNotRetainedAttr>() &&
!cocoa::isCocoaObjectRef(QT))
- return None;
+ return std::nullopt;
K = ObjKind::ObjC;
} else if (isOneOf<T, OSConsumedAttr, OSConsumesThisAttr,
OSReturnsNotRetainedAttr, OSReturnsRetainedAttr,
OSReturnsRetainedOnZeroAttr,
OSReturnsRetainedOnNonZeroAttr>()) {
if (!TrackOSObjects)
- return None;
+ return std::nullopt;
K = ObjKind::OS;
} else if (isOneOf<T, GeneralizedReturnsNotRetainedAttr,
GeneralizedReturnsRetainedAttr,
@@ -102,12 +103,12 @@ Optional<ObjKind> RetainSummaryManager::hasAnyEnabledAttrOf(const Decl *D,
}
if (D->hasAttr<T>())
return K;
- return None;
+ return std::nullopt;
}
template <class T1, class T2, class... Others>
-Optional<ObjKind> RetainSummaryManager::hasAnyEnabledAttrOf(const Decl *D,
- QualType QT) {
+std::optional<ObjKind> RetainSummaryManager::hasAnyEnabledAttrOf(const Decl *D,
+ QualType QT) {
if (auto Out = hasAnyEnabledAttrOf<T1>(D, QT))
return Out;
return hasAnyEnabledAttrOf<T2, Others...>(D, QT);
@@ -173,7 +174,7 @@ static bool isOSObjectPtr(QualType QT) {
}
static bool isISLObjectRef(QualType Ty) {
- return StringRef(Ty.getAsString()).startswith("isl_");
+ return StringRef(Ty.getAsString()).starts_with("isl_");
}
static bool isOSIteratorSubclass(const Decl *D) {
@@ -189,18 +190,18 @@ static bool hasRCAnnotation(const Decl *D, StringRef rcAnnotation) {
}
static bool isRetain(const FunctionDecl *FD, StringRef FName) {
- return FName.startswith_insensitive("retain") ||
- FName.endswith_insensitive("retain");
+ return FName.starts_with_insensitive("retain") ||
+ FName.ends_with_insensitive("retain");
}
static bool isRelease(const FunctionDecl *FD, StringRef FName) {
- return FName.startswith_insensitive("release") ||
- FName.endswith_insensitive("release");
+ return FName.starts_with_insensitive("release") ||
+ FName.ends_with_insensitive("release");
}
static bool isAutorelease(const FunctionDecl *FD, StringRef FName) {
- return FName.startswith_insensitive("autorelease") ||
- FName.endswith_insensitive("autorelease");
+ return FName.starts_with_insensitive("autorelease") ||
+ FName.ends_with_insensitive("autorelease");
}
static bool isMakeCollectable(StringRef FName) {
@@ -254,13 +255,13 @@ RetainSummaryManager::getSummaryForOSObject(const FunctionDecl *FD,
// TODO: Add support for the slightly common *Matching(table) idiom.
// Cf. IOService::nameMatching() etc. - these function have an unusual
// contract of returning at +0 or +1 depending on their last argument.
- if (FName.endswith("Matching")) {
+ if (FName.ends_with("Matching")) {
return getPersistentStopSummary();
}
// All objects returned with functions *not* starting with 'get',
// or iterators, are returned at +1.
- if ((!FName.startswith("get") && !FName.startswith("Get")) ||
+ if ((!FName.starts_with("get") && !FName.starts_with("Get")) ||
isOSIteratorSubclass(PD)) {
return getOSSummaryCreateRule(FD);
} else {
@@ -300,8 +301,9 @@ const RetainSummary *RetainSummaryManager::getSummaryForObjCOrCFObject(
std::string RetTyName = RetTy.getAsString();
if (FName == "pthread_create" || FName == "pthread_setspecific") {
- // Part of: <rdar://problem/7299394> and <rdar://problem/11282706>.
- // This will be addressed better with IPA.
+ // It's not uncommon to pass a tracked object into the thread
+ // as 'void *arg', and then release it inside the thread.
+ // FIXME: We could build a much more precise model for these functions.
return getPersistentStopSummary();
} else if(FName == "NSMakeCollectable") {
// Handle: id NSMakeCollectable(CFTypeRef)
@@ -310,7 +312,8 @@ const RetainSummary *RetainSummaryManager::getSummaryForObjCOrCFObject(
: getPersistentStopSummary();
} else if (FName == "CMBufferQueueDequeueAndRetain" ||
FName == "CMBufferQueueDequeueIfDataReadyAndRetain") {
- // Part of: <rdar://problem/39390714>.
+ // These API functions are known to NOT act as a CFRetain wrapper.
+ // They simply make a new object owned by the caller.
return getPersistentSummary(RetEffect::MakeOwned(ObjKind::CF),
ScratchArgs,
ArgEffect(DoNothing),
@@ -323,40 +326,39 @@ const RetainSummary *RetainSummaryManager::getSummaryForObjCOrCFObject(
FName == "IOServiceNameMatching" ||
FName == "IORegistryEntryIDMatching" ||
FName == "IOOpenFirmwarePathMatching"))) {
- // Part of <rdar://problem/6961230>. (IOKit)
- // This should be addressed using a API table.
+ // Yes, these IOKit functions return CF objects.
+ // They also violate the CF naming convention.
return getPersistentSummary(RetEffect::MakeOwned(ObjKind::CF), ScratchArgs,
ArgEffect(DoNothing), ArgEffect(DoNothing));
} else if (FName == "IOServiceGetMatchingService" ||
FName == "IOServiceGetMatchingServices") {
- // FIXES: <rdar://problem/6326900>
- // This should be addressed using a API table. This strcmp is also
- // a little gross, but there is no need to super optimize here.
+ // These IOKit functions accept CF objects as arguments.
+ // They also consume them without an appropriate annotation.
ScratchArgs = AF.add(ScratchArgs, 1, ArgEffect(DecRef, ObjKind::CF));
return getPersistentSummary(RetEffect::MakeNoRet(),
ScratchArgs,
ArgEffect(DoNothing), ArgEffect(DoNothing));
} else if (FName == "IOServiceAddNotification" ||
FName == "IOServiceAddMatchingNotification") {
- // Part of <rdar://problem/6961230>. (IOKit)
- // This should be addressed using a API table.
+ // More IOKit functions suddenly accepting (and even more suddenly,
+ // consuming) CF objects.
ScratchArgs = AF.add(ScratchArgs, 2, ArgEffect(DecRef, ObjKind::CF));
return getPersistentSummary(RetEffect::MakeNoRet(),
ScratchArgs,
ArgEffect(DoNothing), ArgEffect(DoNothing));
} else if (FName == "CVPixelBufferCreateWithBytes") {
- // FIXES: <rdar://problem/7283567>
// Eventually this can be improved by recognizing that the pixel
// buffer passed to CVPixelBufferCreateWithBytes is released via
// a callback and doing full IPA to make sure this is done correctly.
- // FIXME: This function has an out parameter that returns an
+ // Note that it's passed as a 'void *', so it's hard to annotate.
+ // FIXME: This function also has an out parameter that returns an
// allocated object.
ScratchArgs = AF.add(ScratchArgs, 7, ArgEffect(StopTracking));
return getPersistentSummary(RetEffect::MakeNoRet(),
ScratchArgs,
ArgEffect(DoNothing), ArgEffect(DoNothing));
} else if (FName == "CGBitmapContextCreateWithData") {
- // FIXES: <rdar://problem/7358899>
+ // This is similar to the CVPixelBufferCreateWithBytes situation above.
// Eventually this can be improved by recognizing that 'releaseInfo'
// passed to CGBitmapContextCreateWithData is released via
// a callback and doing full IPA to make sure this is done correctly.
@@ -364,17 +366,14 @@ const RetainSummary *RetainSummaryManager::getSummaryForObjCOrCFObject(
return getPersistentSummary(RetEffect::MakeOwned(ObjKind::CF), ScratchArgs,
ArgEffect(DoNothing), ArgEffect(DoNothing));
} else if (FName == "CVPixelBufferCreateWithPlanarBytes") {
- // FIXES: <rdar://problem/7283567>
- // Eventually this can be improved by recognizing that the pixel
- // buffer passed to CVPixelBufferCreateWithPlanarBytes is released
- // via a callback and doing full IPA to make sure this is done
- // correctly.
+ // Same as CVPixelBufferCreateWithBytes, just more arguments.
ScratchArgs = AF.add(ScratchArgs, 12, ArgEffect(StopTracking));
return getPersistentSummary(RetEffect::MakeNoRet(),
ScratchArgs,
ArgEffect(DoNothing), ArgEffect(DoNothing));
- } else if (FName == "VTCompressionSessionEncodeFrame") {
- // The context argument passed to VTCompressionSessionEncodeFrame()
+ } else if (FName == "VTCompressionSessionEncodeFrame" ||
+ FName == "VTCompressionSessionEncodeMultiImageFrame") {
+ // The context argument passed to VTCompressionSessionEncodeFrame() et.al.
// is passed to the callback specified when creating the session
// (e.g. with VTCompressionSessionCreate()) which can release it.
// To account for this possibility, conservatively stop tracking
@@ -385,22 +384,19 @@ const RetainSummary *RetainSummaryManager::getSummaryForObjCOrCFObject(
ArgEffect(DoNothing), ArgEffect(DoNothing));
} else if (FName == "dispatch_set_context" ||
FName == "xpc_connection_set_context") {
- // <rdar://problem/11059275> - The analyzer currently doesn't have
- // a good way to reason about the finalizer function for libdispatch.
+ // The analyzer currently doesn't have a good way to reason about
+ // dispatch_set_finalizer_f() which typically cleans up the context.
// If we pass a context object that is memory managed, stop tracking it.
- // <rdar://problem/13783514> - Same problem, but for XPC.
- // FIXME: this hack should possibly go away once we can handle
- // libdispatch and XPC finalizers.
+ // Same with xpc_connection_set_finalizer_f().
ScratchArgs = AF.add(ScratchArgs, 1, ArgEffect(StopTracking));
return getPersistentSummary(RetEffect::MakeNoRet(),
ScratchArgs,
ArgEffect(DoNothing), ArgEffect(DoNothing));
- } else if (FName.startswith("NSLog")) {
+ } else if (FName.starts_with("NSLog")) {
return getDoNothingSummary();
- } else if (FName.startswith("NS") &&
- (FName.find("Insert") != StringRef::npos)) {
- // Whitelist NSXXInsertXX, for example NSMapInsertIfAbsent, since they can
- // be deallocated by NSMapRemove. (radar://11152419)
+ } else if (FName.starts_with("NS") && FName.contains("Insert")) {
+ // Allowlist NSXXInsertXX, for example NSMapInsertIfAbsent, since they can
+ // be deallocated by NSMapRemove.
ScratchArgs = AF.add(ScratchArgs, 1, ArgEffect(StopTracking));
ScratchArgs = AF.add(ScratchArgs, 2, ArgEffect(StopTracking));
return getPersistentSummary(RetEffect::MakeNoRet(),
@@ -457,9 +453,9 @@ const RetainSummary *RetainSummaryManager::getSummaryForObjCOrCFObject(
// Check for release functions, the only kind of functions that we care
// about that don't return a pointer type.
- if (FName.startswith("CG") || FName.startswith("CF")) {
+ if (FName.starts_with("CG") || FName.starts_with("CF")) {
// Test for 'CGCF'.
- FName = FName.substr(FName.startswith("CGCF") ? 4 : 2);
+ FName = FName.substr(FName.starts_with("CGCF") ? 4 : 2);
if (isRelease(FD, FName))
return getUnarySummary(FT, DecRef);
@@ -719,13 +715,13 @@ bool RetainSummaryManager::isTrustedReferenceCountImplementation(
return hasRCAnnotation(FD, "rc_ownership_trusted_implementation");
}
-Optional<RetainSummaryManager::BehaviorSummary>
+std::optional<RetainSummaryManager::BehaviorSummary>
RetainSummaryManager::canEval(const CallExpr *CE, const FunctionDecl *FD,
bool &hasTrustedImplementationAnnotation) {
IdentifierInfo *II = FD->getIdentifier();
if (!II)
- return None;
+ return std::nullopt;
StringRef FName = II->getName();
FName = FName.substr(FName.find_first_not_of('_'));
@@ -740,9 +736,9 @@ RetainSummaryManager::canEval(const CallExpr *CE, const FunctionDecl *FD,
// It's okay to be a little sloppy here.
if (FName == "CMBufferQueueDequeueAndRetain" ||
FName == "CMBufferQueueDequeueIfDataReadyAndRetain") {
- // Part of: <rdar://problem/39390714>.
- // These are not retain. They just return something and retain it.
- return None;
+ // These API functions are known to NOT act as a CFRetain wrapper.
+ // They simply make a new object owned by the caller.
+ return std::nullopt;
}
if (CE->getNumArgs() == 1 &&
(cocoa::isRefType(ResultTy, "CF", FName) ||
@@ -782,7 +778,7 @@ RetainSummaryManager::canEval(const CallExpr *CE, const FunctionDecl *FD,
return BehaviorSummary::NoOp;
}
- return None;
+ return std::nullopt;
}
const RetainSummary *
@@ -792,7 +788,7 @@ RetainSummaryManager::getUnarySummary(const FunctionType* FT,
// Unary functions have no arg effects by definition.
ArgEffects ScratchArgs(AF.getEmptyMap());
- // Sanity check that this is *really* a unary function. This can
+ // Verify that this is *really* a unary function. This can
// happen if people do weird things.
const FunctionProtoType* FTP = dyn_cast<FunctionProtoType>(FT);
if (!FTP || FTP->getNumParams() != 1)
@@ -865,7 +861,7 @@ RetainSummaryManager::getCFSummaryGetRule(const FunctionDecl *FD) {
// Summary creation for Selectors.
//===----------------------------------------------------------------------===//
-Optional<RetEffect>
+std::optional<RetEffect>
RetainSummaryManager::getRetEffectFromAnnotations(QualType RetTy,
const Decl *D) {
if (hasAnyEnabledAttrOf<NSReturnsRetainedAttr>(D, RetTy))
@@ -886,14 +882,14 @@ RetainSummaryManager::getRetEffectFromAnnotations(QualType RetTy,
if (auto RE = getRetEffectFromAnnotations(RetTy, PD))
return RE;
- return None;
+ return std::nullopt;
}
/// \return Whether the chain of typedefs starting from @c QT
/// has a typedef with a given name @c Name.
static bool hasTypedefNamed(QualType QT,
StringRef Name) {
- while (auto *T = dyn_cast<TypedefType>(QT)) {
+ while (auto *T = QT->getAs<TypedefType>()) {
const auto &Context = T->getDecl()->getASTContext();
if (T->getDecl()->getIdentifier() == &Context.Idents.get(Name))
return true;
@@ -991,7 +987,7 @@ RetainSummaryManager::updateSummaryFromAnnotations(const RetainSummary *&Summ,
applyParamAnnotationEffect(*pi, parm_idx, FD, Template);
QualType RetTy = FD->getReturnType();
- if (Optional<RetEffect> RetE = getRetEffectFromAnnotations(RetTy, FD))
+ if (std::optional<RetEffect> RetE = getRetEffectFromAnnotations(RetTy, FD))
Template->setRetEffect(*RetE);
if (hasAnyEnabledAttrOf<OSConsumesThisAttr>(FD, RetTy))
@@ -1018,7 +1014,7 @@ RetainSummaryManager::updateSummaryFromAnnotations(const RetainSummary *&Summ,
applyParamAnnotationEffect(*pi, parm_idx, MD, Template);
QualType RetTy = MD->getReturnType();
- if (Optional<RetEffect> RetE = getRetEffectFromAnnotations(RetTy, MD))
+ if (std::optional<RetEffect> RetE = getRetEffectFromAnnotations(RetTy, MD))
Template->setRetEffect(*RetE);
}
@@ -1102,7 +1098,7 @@ RetainSummaryManager::getStandardMethodSummary(const ObjCMethodDecl *MD,
if (S.isKeywordSelector()) {
for (unsigned i = 0, e = S.getNumArgs(); i != e; ++i) {
StringRef Slot = S.getNameForSlot(i);
- if (Slot.substr(Slot.size() - 8).equals_insensitive("delegate")) {
+ if (Slot.ends_with_insensitive("delegate")) {
if (ResultEff == ObjCInitRetE)
ResultEff = RetEffect::MakeNoRetHard();
else
@@ -1243,8 +1239,6 @@ void RetainSummaryManager::InitializeMethodSummaries() {
// FIXME: For now we opt for false negatives with NSWindow, as these objects
// self-own themselves. However, they only do this once they are displayed.
// Thus, we need to track an NSWindow's display status.
- // This is tracked in <rdar://problem/6062711>.
- // See also http://llvm.org/bugs/show_bug.cgi?id=3714.
const RetainSummary *NoTrackYet =
getPersistentSummary(RetEffect::MakeNoRet(), ScratchArgs,
ArgEffect(StopTracking), ArgEffect(StopTracking));
@@ -1259,7 +1253,6 @@ void RetainSummaryManager::InitializeMethodSummaries() {
// For NSNull, objects returned by +null are singletons that ignore
// retain/release semantics. Just don't track them.
- // <rdar://problem/12858915>
addClassMethSummary("NSNull", "null", NoTrackYet);
// Don't track allocated autorelease pools, as it is okay to prematurely
diff --git a/contrib/llvm-project/clang/lib/Analysis/ThreadSafety.cpp b/contrib/llvm-project/clang/lib/Analysis/ThreadSafety.cpp
index 41a55f9579bd..e25b843c9bf8 100644
--- a/contrib/llvm-project/clang/lib/Analysis/ThreadSafety.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/ThreadSafety.cpp
@@ -40,8 +40,6 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/ImmutableMap.h"
-#include "llvm/ADT/Optional.h"
-#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
@@ -54,6 +52,7 @@
#include <functional>
#include <iterator>
#include <memory>
+#include <optional>
#include <string>
#include <type_traits>
#include <utility>
@@ -75,7 +74,7 @@ static void warnInvalidLock(ThreadSafetyHandler &Handler,
// FIXME: add a note about the attribute location in MutexExp or D
if (Loc.isValid())
- Handler.handleInvalidLockExp(Kind, Loc);
+ Handler.handleInvalidLockExp(Loc);
}
namespace {
@@ -86,11 +85,9 @@ class CapExprSet : public SmallVector<CapabilityExpr, 4> {
public:
/// Push M onto list, but discard duplicates.
void push_back_nodup(const CapabilityExpr &CapE) {
- iterator It = std::find_if(begin(), end(),
- [=](const CapabilityExpr &CapE2) {
- return CapE.equals(CapE2);
- });
- if (It == end())
+ if (llvm::none_of(*this, [=](const CapabilityExpr &CapE2) {
+ return CapE.equals(CapE2);
+ }))
push_back(CapE);
}
};
@@ -142,12 +139,12 @@ public:
SourceLocation JoinLoc, LockErrorKind LEK,
ThreadSafetyHandler &Handler) const = 0;
virtual void handleLock(FactSet &FSet, FactManager &FactMan,
- const FactEntry &entry, ThreadSafetyHandler &Handler,
- StringRef DiagKind) const = 0;
+ const FactEntry &entry,
+ ThreadSafetyHandler &Handler) const = 0;
virtual void handleUnlock(FactSet &FSet, FactManager &FactMan,
const CapabilityExpr &Cp, SourceLocation UnlockLoc,
- bool FullyRemove, ThreadSafetyHandler &Handler,
- StringRef DiagKind) const = 0;
+ bool FullyRemove,
+ ThreadSafetyHandler &Handler) const = 0;
// Return true if LKind >= LK, where exclusive > shared
bool isAtLeast(LockKind LK) const {
@@ -405,7 +402,7 @@ public:
// The map with which Exp should be interpreted.
Context Ctx;
- bool isReference() { return !Exp; }
+ bool isReference() const { return !Exp; }
private:
// Create ordinary variable definition
@@ -420,7 +417,6 @@ public:
private:
Context::Factory ContextFactory;
std::vector<VarDefinition> VarDefinitions;
- std::vector<unsigned> CtxIndices;
std::vector<std::pair<const Stmt *, Context>> SavedContexts;
public:
@@ -506,9 +502,8 @@ public:
for (Context::iterator I = C.begin(), E = C.end(); I != E; ++I) {
const NamedDecl *D = I.getKey();
D->printName(llvm::errs());
- const unsigned *i = C.lookup(D);
llvm::errs() << " -> ";
- dumpVarDefinitionName(*i);
+ dumpVarDefinitionName(I.getData());
llvm::errs() << "\n";
}
}
@@ -733,8 +728,6 @@ void LocalVariableMap::traverseCFG(CFG *CFGraph,
std::vector<CFGBlockInfo> &BlockInfo) {
PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph);
- CtxIndices.resize(CFGraph->getNumBlockIDs());
-
for (const auto *CurrBlock : *SortedGraph) {
unsigned CurrBlockID = CurrBlock->getBlockID();
CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID];
@@ -826,7 +819,7 @@ static void findBlockLocations(CFG *CFGraph,
for (CFGBlock::const_reverse_iterator BI = CurrBlock->rbegin(),
BE = CurrBlock->rend(); BI != BE; ++BI) {
// FIXME: Handle other CFGElement kinds.
- if (Optional<CFGStmt> CS = BI->getAs<CFGStmt>()) {
+ if (std::optional<CFGStmt> CS = BI->getAs<CFGStmt>()) {
CurrBlockInfo->ExitLoc = CS->getStmt()->getBeginLoc();
break;
}
@@ -838,7 +831,7 @@ static void findBlockLocations(CFG *CFGraph,
// of the first statement in the block.
for (const auto &BI : *CurrBlock) {
// FIXME: Handle other CFGElement kinds.
- if (Optional<CFGStmt> CS = BI.getAs<CFGStmt>()) {
+ if (std::optional<CFGStmt> CS = BI.getAs<CFGStmt>()) {
CurrBlockInfo->EntryLoc = CS->getStmt()->getBeginLoc();
break;
}
@@ -849,6 +842,11 @@ static void findBlockLocations(CFG *CFGraph,
// location.
CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc =
BlockInfo[(*CurrBlock->pred_begin())->getBlockID()].ExitLoc;
+ } else if (CurrBlock->succ_size() == 1 && *CurrBlock->succ_begin()) {
+ // The block is empty, and has a single successor. Use its entry
+ // location.
+ CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc =
+ BlockInfo[(*CurrBlock->succ_begin())->getBlockID()].EntryLoc;
}
}
}
@@ -866,21 +864,21 @@ public:
SourceLocation JoinLoc, LockErrorKind LEK,
ThreadSafetyHandler &Handler) const override {
if (!asserted() && !negative() && !isUniversal()) {
- Handler.handleMutexHeldEndOfScope("mutex", toString(), loc(), JoinLoc,
+ Handler.handleMutexHeldEndOfScope(getKind(), toString(), loc(), JoinLoc,
LEK);
}
}
void handleLock(FactSet &FSet, FactManager &FactMan, const FactEntry &entry,
- ThreadSafetyHandler &Handler,
- StringRef DiagKind) const override {
- Handler.handleDoubleLock(DiagKind, entry.toString(), loc(), entry.loc());
+ ThreadSafetyHandler &Handler) const override {
+ Handler.handleDoubleLock(entry.getKind(), entry.toString(), loc(),
+ entry.loc());
}
void handleUnlock(FactSet &FSet, FactManager &FactMan,
const CapabilityExpr &Cp, SourceLocation UnlockLoc,
- bool FullyRemove, ThreadSafetyHandler &Handler,
- StringRef DiagKind) const override {
+ bool FullyRemove,
+ ThreadSafetyHandler &Handler) const override {
FSet.removeLock(FactMan, Cp);
if (!Cp.negative()) {
FSet.addLock(FactMan, std::make_unique<LockableFactEntry>(
@@ -897,25 +895,27 @@ private:
UCK_ReleasedExclusive, ///< Exclusive capability that was released.
};
- using UnderlyingCapability =
- llvm::PointerIntPair<const til::SExpr *, 2, UnderlyingCapabilityKind>;
+ struct UnderlyingCapability {
+ CapabilityExpr Cap;
+ UnderlyingCapabilityKind Kind;
+ };
- SmallVector<UnderlyingCapability, 4> UnderlyingMutexes;
+ SmallVector<UnderlyingCapability, 2> UnderlyingMutexes;
public:
ScopedLockableFactEntry(const CapabilityExpr &CE, SourceLocation Loc)
: FactEntry(CE, LK_Exclusive, Loc, Acquired) {}
void addLock(const CapabilityExpr &M) {
- UnderlyingMutexes.emplace_back(M.sexpr(), UCK_Acquired);
+ UnderlyingMutexes.push_back(UnderlyingCapability{M, UCK_Acquired});
}
void addExclusiveUnlock(const CapabilityExpr &M) {
- UnderlyingMutexes.emplace_back(M.sexpr(), UCK_ReleasedExclusive);
+ UnderlyingMutexes.push_back(UnderlyingCapability{M, UCK_ReleasedExclusive});
}
void addSharedUnlock(const CapabilityExpr &M) {
- UnderlyingMutexes.emplace_back(M.sexpr(), UCK_ReleasedShared);
+ UnderlyingMutexes.push_back(UnderlyingCapability{M, UCK_ReleasedShared});
}
void
@@ -923,51 +923,45 @@ public:
SourceLocation JoinLoc, LockErrorKind LEK,
ThreadSafetyHandler &Handler) const override {
for (const auto &UnderlyingMutex : UnderlyingMutexes) {
- const auto *Entry = FSet.findLock(
- FactMan, CapabilityExpr(UnderlyingMutex.getPointer(), false));
- if ((UnderlyingMutex.getInt() == UCK_Acquired && Entry) ||
- (UnderlyingMutex.getInt() != UCK_Acquired && !Entry)) {
+ const auto *Entry = FSet.findLock(FactMan, UnderlyingMutex.Cap);
+ if ((UnderlyingMutex.Kind == UCK_Acquired && Entry) ||
+ (UnderlyingMutex.Kind != UCK_Acquired && !Entry)) {
// If this scoped lock manages another mutex, and if the underlying
// mutex is still/not held, then warn about the underlying mutex.
- Handler.handleMutexHeldEndOfScope(
- "mutex", sx::toString(UnderlyingMutex.getPointer()), loc(), JoinLoc,
- LEK);
+ Handler.handleMutexHeldEndOfScope(UnderlyingMutex.Cap.getKind(),
+ UnderlyingMutex.Cap.toString(), loc(),
+ JoinLoc, LEK);
}
}
}
void handleLock(FactSet &FSet, FactManager &FactMan, const FactEntry &entry,
- ThreadSafetyHandler &Handler,
- StringRef DiagKind) const override {
+ ThreadSafetyHandler &Handler) const override {
for (const auto &UnderlyingMutex : UnderlyingMutexes) {
- CapabilityExpr UnderCp(UnderlyingMutex.getPointer(), false);
-
- if (UnderlyingMutex.getInt() == UCK_Acquired)
- lock(FSet, FactMan, UnderCp, entry.kind(), entry.loc(), &Handler,
- DiagKind);
+ if (UnderlyingMutex.Kind == UCK_Acquired)
+ lock(FSet, FactMan, UnderlyingMutex.Cap, entry.kind(), entry.loc(),
+ &Handler);
else
- unlock(FSet, FactMan, UnderCp, entry.loc(), &Handler, DiagKind);
+ unlock(FSet, FactMan, UnderlyingMutex.Cap, entry.loc(), &Handler);
}
}
void handleUnlock(FactSet &FSet, FactManager &FactMan,
const CapabilityExpr &Cp, SourceLocation UnlockLoc,
- bool FullyRemove, ThreadSafetyHandler &Handler,
- StringRef DiagKind) const override {
+ bool FullyRemove,
+ ThreadSafetyHandler &Handler) const override {
assert(!Cp.negative() && "Managing object cannot be negative.");
for (const auto &UnderlyingMutex : UnderlyingMutexes) {
- CapabilityExpr UnderCp(UnderlyingMutex.getPointer(), false);
-
// Remove/lock the underlying mutex if it exists/is still unlocked; warn
// on double unlocking/locking if we're not destroying the scoped object.
ThreadSafetyHandler *TSHandler = FullyRemove ? nullptr : &Handler;
- if (UnderlyingMutex.getInt() == UCK_Acquired) {
- unlock(FSet, FactMan, UnderCp, UnlockLoc, TSHandler, DiagKind);
+ if (UnderlyingMutex.Kind == UCK_Acquired) {
+ unlock(FSet, FactMan, UnderlyingMutex.Cap, UnlockLoc, TSHandler);
} else {
- LockKind kind = UnderlyingMutex.getInt() == UCK_ReleasedShared
+ LockKind kind = UnderlyingMutex.Kind == UCK_ReleasedShared
? LK_Shared
: LK_Exclusive;
- lock(FSet, FactMan, UnderCp, kind, UnlockLoc, TSHandler, DiagKind);
+ lock(FSet, FactMan, UnderlyingMutex.Cap, kind, UnlockLoc, TSHandler);
}
}
if (FullyRemove)
@@ -976,11 +970,12 @@ public:
private:
void lock(FactSet &FSet, FactManager &FactMan, const CapabilityExpr &Cp,
- LockKind kind, SourceLocation loc, ThreadSafetyHandler *Handler,
- StringRef DiagKind) const {
+ LockKind kind, SourceLocation loc,
+ ThreadSafetyHandler *Handler) const {
if (const FactEntry *Fact = FSet.findLock(FactMan, Cp)) {
if (Handler)
- Handler->handleDoubleLock(DiagKind, Cp.toString(), Fact->loc(), loc);
+ Handler->handleDoubleLock(Cp.getKind(), Cp.toString(), Fact->loc(),
+ loc);
} else {
FSet.removeLock(FactMan, !Cp);
FSet.addLock(FactMan,
@@ -989,8 +984,7 @@ private:
}
void unlock(FactSet &FSet, FactManager &FactMan, const CapabilityExpr &Cp,
- SourceLocation loc, ThreadSafetyHandler *Handler,
- StringRef DiagKind) const {
+ SourceLocation loc, ThreadSafetyHandler *Handler) const {
if (FSet.findLock(FactMan, Cp)) {
FSet.removeLock(FactMan, Cp);
FSet.addLock(FactMan, std::make_unique<LockableFactEntry>(
@@ -999,7 +993,7 @@ private:
SourceLocation PrevLoc;
if (const FactEntry *Neg = FSet.findLock(FactMan, !Cp))
PrevLoc = Neg->loc();
- Handler->handleUnmatchedUnlock(DiagKind, Cp.toString(), loc, PrevLoc);
+ Handler->handleUnmatchedUnlock(Cp.getKind(), Cp.toString(), loc, PrevLoc);
}
}
};
@@ -1014,8 +1008,10 @@ class ThreadSafetyAnalyzer {
threadSafety::SExprBuilder SxBuilder;
ThreadSafetyHandler &Handler;
- const CXXMethodDecl *CurrentMethod;
+ const FunctionDecl *CurrentFunction;
LocalVariableMap LocalVarMap;
+ // Maps constructed objects to `this` placeholder prior to initialization.
+ llvm::SmallDenseMap<const Expr *, til::LiteralPtr *> ConstructedObjects;
FactManager FactMan;
std::vector<CFGBlockInfo> BlockInfo;
@@ -1028,14 +1024,13 @@ public:
bool inCurrentScope(const CapabilityExpr &CapE);
void addLock(FactSet &FSet, std::unique_ptr<FactEntry> Entry,
- StringRef DiagKind, bool ReqAttr = false);
+ bool ReqAttr = false);
void removeLock(FactSet &FSet, const CapabilityExpr &CapE,
- SourceLocation UnlockLoc, bool FullyRemove, LockKind Kind,
- StringRef DiagKind);
+ SourceLocation UnlockLoc, bool FullyRemove, LockKind Kind);
template <typename AttrType>
void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, const Expr *Exp,
- const NamedDecl *D, VarDecl *SelfDecl = nullptr);
+ const NamedDecl *D, til::SExpr *Self = nullptr);
template <class AttrType>
void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, const Expr *Exp,
@@ -1062,6 +1057,19 @@ public:
}
void runAnalysis(AnalysisDeclContext &AC);
+
+ void warnIfMutexNotHeld(const FactSet &FSet, const NamedDecl *D,
+ const Expr *Exp, AccessKind AK, Expr *MutexExp,
+ ProtectedOperationKind POK, til::LiteralPtr *Self,
+ SourceLocation Loc);
+ void warnIfMutexHeld(const FactSet &FSet, const NamedDecl *D, const Expr *Exp,
+ Expr *MutexExp, til::LiteralPtr *Self,
+ SourceLocation Loc);
+
+ void checkAccess(const FactSet &FSet, const Expr *Exp, AccessKind AK,
+ ProtectedOperationKind POK);
+ void checkPtAccess(const FactSet &FSet, const Expr *Exp, AccessKind AK,
+ ProtectedOperationKind POK);
};
} // namespace
@@ -1169,7 +1177,7 @@ void BeforeSet::checkBeforeAfter(const ValueDecl* StartVd,
}
// Transitively search other before sets, and warn on cycles.
if (traverse(Vdb)) {
- if (CycMap.find(Vd) == CycMap.end()) {
+ if (!CycMap.contains(Vd)) {
CycMap.insert(std::make_pair(Vd, true));
StringRef L1 = Vd->getName();
Analyzer.Handler.handleBeforeAfterCycle(L1, Vd->getLocation());
@@ -1219,53 +1227,6 @@ public:
} // namespace
-static StringRef ClassifyDiagnostic(const CapabilityAttr *A) {
- return A->getName();
-}
-
-static StringRef ClassifyDiagnostic(QualType VDT) {
- // We need to look at the declaration of the type of the value to determine
- // which it is. The type should either be a record or a typedef, or a pointer
- // or reference thereof.
- if (const auto *RT = VDT->getAs<RecordType>()) {
- if (const auto *RD = RT->getDecl())
- if (const auto *CA = RD->getAttr<CapabilityAttr>())
- return ClassifyDiagnostic(CA);
- } else if (const auto *TT = VDT->getAs<TypedefType>()) {
- if (const auto *TD = TT->getDecl())
- if (const auto *CA = TD->getAttr<CapabilityAttr>())
- return ClassifyDiagnostic(CA);
- } else if (VDT->isPointerType() || VDT->isReferenceType())
- return ClassifyDiagnostic(VDT->getPointeeType());
-
- return "mutex";
-}
-
-static StringRef ClassifyDiagnostic(const ValueDecl *VD) {
- assert(VD && "No ValueDecl passed");
-
- // The ValueDecl is the declaration of a mutex or role (hopefully).
- return ClassifyDiagnostic(VD->getType());
-}
-
-template <typename AttrTy>
-static std::enable_if_t<!has_arg_iterator_range<AttrTy>::value, StringRef>
-ClassifyDiagnostic(const AttrTy *A) {
- if (const ValueDecl *VD = getValueDecl(A->getArg()))
- return ClassifyDiagnostic(VD);
- return "mutex";
-}
-
-template <typename AttrTy>
-static std::enable_if_t<has_arg_iterator_range<AttrTy>::value, StringRef>
-ClassifyDiagnostic(const AttrTy *A) {
- for (const auto *Arg : A->args()) {
- if (const ValueDecl *VD = getValueDecl(Arg))
- return ClassifyDiagnostic(VD);
- }
- return "mutex";
-}
-
bool ThreadSafetyAnalyzer::inCurrentScope(const CapabilityExpr &CapE) {
const threadSafety::til::SExpr *SExp = CapE.sexpr();
assert(SExp && "Null expressions should be ignored");
@@ -1273,7 +1234,7 @@ bool ThreadSafetyAnalyzer::inCurrentScope(const CapabilityExpr &CapE) {
if (const auto *LP = dyn_cast<til::LiteralPtr>(SExp)) {
const ValueDecl *VD = LP->clangDecl();
// Variables defined in a function are always inaccessible.
- if (!VD->isDefinedOutsideFunctionOrMethod())
+ if (!VD || !VD->isDefinedOutsideFunctionOrMethod())
return false;
// For now we consider static class members to be inaccessible.
if (isa<CXXRecordDecl>(VD->getDeclContext()))
@@ -1284,10 +1245,10 @@ bool ThreadSafetyAnalyzer::inCurrentScope(const CapabilityExpr &CapE) {
// Members are in scope from methods of the same class.
if (const auto *P = dyn_cast<til::Project>(SExp)) {
- if (!CurrentMethod)
+ if (!isa_and_nonnull<CXXMethodDecl>(CurrentFunction))
return false;
const ValueDecl *VD = P->clangDecl();
- return VD->getDeclContext() == CurrentMethod->getDeclContext();
+ return VD->getDeclContext() == CurrentFunction->getDeclContext();
}
return false;
@@ -1297,7 +1258,7 @@ bool ThreadSafetyAnalyzer::inCurrentScope(const CapabilityExpr &CapE) {
/// \param ReqAttr -- true if this is part of an initial Requires attribute.
void ThreadSafetyAnalyzer::addLock(FactSet &FSet,
std::unique_ptr<FactEntry> Entry,
- StringRef DiagKind, bool ReqAttr) {
+ bool ReqAttr) {
if (Entry->shouldIgnore())
return;
@@ -1310,7 +1271,7 @@ void ThreadSafetyAnalyzer::addLock(FactSet &FSet,
}
else {
if (inCurrentScope(*Entry) && !Entry->asserted())
- Handler.handleNegativeNotHeld(DiagKind, Entry->toString(),
+ Handler.handleNegativeNotHeld(Entry->getKind(), Entry->toString(),
NegC.toString(), Entry->loc());
}
}
@@ -1319,13 +1280,13 @@ void ThreadSafetyAnalyzer::addLock(FactSet &FSet,
if (Handler.issueBetaWarnings() &&
!Entry->asserted() && !Entry->declared()) {
GlobalBeforeSet->checkBeforeAfter(Entry->valueDecl(), FSet, *this,
- Entry->loc(), DiagKind);
+ Entry->loc(), Entry->getKind());
}
// FIXME: Don't always warn when we have support for reentrant locks.
if (const FactEntry *Cp = FSet.findLock(FactMan, *Entry)) {
if (!Entry->asserted())
- Cp->handleLock(FSet, FactMan, *Entry, Handler, DiagKind);
+ Cp->handleLock(FSet, FactMan, *Entry, Handler);
} else {
FSet.addLock(FactMan, std::move(Entry));
}
@@ -1335,8 +1296,7 @@ void ThreadSafetyAnalyzer::addLock(FactSet &FSet,
/// \param UnlockLoc The source location of the unlock (only used in error msg)
void ThreadSafetyAnalyzer::removeLock(FactSet &FSet, const CapabilityExpr &Cp,
SourceLocation UnlockLoc,
- bool FullyRemove, LockKind ReceivedKind,
- StringRef DiagKind) {
+ bool FullyRemove, LockKind ReceivedKind) {
if (Cp.shouldIgnore())
return;
@@ -1345,19 +1305,19 @@ void ThreadSafetyAnalyzer::removeLock(FactSet &FSet, const CapabilityExpr &Cp,
SourceLocation PrevLoc;
if (const FactEntry *Neg = FSet.findLock(FactMan, !Cp))
PrevLoc = Neg->loc();
- Handler.handleUnmatchedUnlock(DiagKind, Cp.toString(), UnlockLoc, PrevLoc);
+ Handler.handleUnmatchedUnlock(Cp.getKind(), Cp.toString(), UnlockLoc,
+ PrevLoc);
return;
}
// Generic lock removal doesn't care about lock kind mismatches, but
// otherwise diagnose when the lock kinds are mismatched.
if (ReceivedKind != LK_Generic && LDat->kind() != ReceivedKind) {
- Handler.handleIncorrectUnlockKind(DiagKind, Cp.toString(), LDat->kind(),
+ Handler.handleIncorrectUnlockKind(Cp.getKind(), Cp.toString(), LDat->kind(),
ReceivedKind, LDat->loc(), UnlockLoc);
}
- LDat->handleUnlock(FSet, FactMan, Cp, UnlockLoc, FullyRemove, Handler,
- DiagKind);
+ LDat->handleUnlock(FSet, FactMan, Cp, UnlockLoc, FullyRemove, Handler);
}
/// Extract the list of mutexIDs from the attribute on an expression,
@@ -1365,13 +1325,13 @@ void ThreadSafetyAnalyzer::removeLock(FactSet &FSet, const CapabilityExpr &Cp,
template <typename AttrType>
void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr,
const Expr *Exp, const NamedDecl *D,
- VarDecl *SelfDecl) {
+ til::SExpr *Self) {
if (Attr->args_size() == 0) {
// The mutex held is the "this" object.
- CapabilityExpr Cp = SxBuilder.translateAttrExpr(nullptr, D, Exp, SelfDecl);
+ CapabilityExpr Cp = SxBuilder.translateAttrExpr(nullptr, D, Exp, Self);
if (Cp.isInvalid()) {
- warnInvalidLock(Handler, nullptr, D, Exp, ClassifyDiagnostic(Attr));
- return;
+ warnInvalidLock(Handler, nullptr, D, Exp, Cp.getKind());
+ return;
}
//else
if (!Cp.shouldIgnore())
@@ -1380,10 +1340,10 @@ void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr,
}
for (const auto *Arg : Attr->args()) {
- CapabilityExpr Cp = SxBuilder.translateAttrExpr(Arg, D, Exp, SelfDecl);
+ CapabilityExpr Cp = SxBuilder.translateAttrExpr(Arg, D, Exp, Self);
if (Cp.isInvalid()) {
- warnInvalidLock(Handler, nullptr, D, Exp, ClassifyDiagnostic(Attr));
- continue;
+ warnInvalidLock(Handler, nullptr, D, Exp, Cp.getKind());
+ continue;
}
//else
if (!Cp.shouldIgnore())
@@ -1522,7 +1482,6 @@ void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result,
bool Negate = false;
const CFGBlockInfo *PredBlockInfo = &BlockInfo[PredBlock->getBlockID()];
const LocalVarContext &LVarCtx = PredBlockInfo->ExitContext;
- StringRef CapDiagKind = "mutex";
const auto *Exp = getTrylockCallExpr(Cond, LVarCtx, Negate);
if (!Exp)
@@ -1543,21 +1502,18 @@ void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result,
getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A,
Exp, FunDecl, PredBlock, CurrBlock, A->getSuccessValue(),
Negate);
- CapDiagKind = ClassifyDiagnostic(A);
break;
};
case attr::ExclusiveTrylockFunction: {
const auto *A = cast<ExclusiveTrylockFunctionAttr>(Attr);
- getMutexIDs(ExclusiveLocksToAdd, A, Exp, FunDecl,
- PredBlock, CurrBlock, A->getSuccessValue(), Negate);
- CapDiagKind = ClassifyDiagnostic(A);
+ getMutexIDs(ExclusiveLocksToAdd, A, Exp, FunDecl, PredBlock, CurrBlock,
+ A->getSuccessValue(), Negate);
break;
}
case attr::SharedTrylockFunction: {
const auto *A = cast<SharedTrylockFunctionAttr>(Attr);
- getMutexIDs(SharedLocksToAdd, A, Exp, FunDecl,
- PredBlock, CurrBlock, A->getSuccessValue(), Negate);
- CapDiagKind = ClassifyDiagnostic(A);
+ getMutexIDs(SharedLocksToAdd, A, Exp, FunDecl, PredBlock, CurrBlock,
+ A->getSuccessValue(), Negate);
break;
}
default:
@@ -1569,12 +1525,10 @@ void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result,
SourceLocation Loc = Exp->getExprLoc();
for (const auto &ExclusiveLockToAdd : ExclusiveLocksToAdd)
addLock(Result, std::make_unique<LockableFactEntry>(ExclusiveLockToAdd,
- LK_Exclusive, Loc),
- CapDiagKind);
+ LK_Exclusive, Loc));
for (const auto &SharedLockToAdd : SharedLocksToAdd)
addLock(Result, std::make_unique<LockableFactEntry>(SharedLockToAdd,
- LK_Shared, Loc),
- CapDiagKind);
+ LK_Shared, Loc));
}
namespace {
@@ -1589,31 +1543,36 @@ class BuildLockset : public ConstStmtVisitor<BuildLockset> {
ThreadSafetyAnalyzer *Analyzer;
FactSet FSet;
+ // The fact set for the function on exit.
+ const FactSet &FunctionExitFSet;
LocalVariableMap::Context LVarCtx;
unsigned CtxIndex;
// helper functions
- void warnIfMutexNotHeld(const NamedDecl *D, const Expr *Exp, AccessKind AK,
- Expr *MutexExp, ProtectedOperationKind POK,
- StringRef DiagKind, SourceLocation Loc);
- void warnIfMutexHeld(const NamedDecl *D, const Expr *Exp, Expr *MutexExp,
- StringRef DiagKind);
void checkAccess(const Expr *Exp, AccessKind AK,
- ProtectedOperationKind POK = POK_VarAccess);
+ ProtectedOperationKind POK = POK_VarAccess) {
+ Analyzer->checkAccess(FSet, Exp, AK, POK);
+ }
void checkPtAccess(const Expr *Exp, AccessKind AK,
- ProtectedOperationKind POK = POK_VarAccess);
+ ProtectedOperationKind POK = POK_VarAccess) {
+ Analyzer->checkPtAccess(FSet, Exp, AK, POK);
+ }
- void handleCall(const Expr *Exp, const NamedDecl *D, VarDecl *VD = nullptr);
+ void handleCall(const Expr *Exp, const NamedDecl *D,
+ til::LiteralPtr *Self = nullptr,
+ SourceLocation Loc = SourceLocation());
void examineArguments(const FunctionDecl *FD,
CallExpr::const_arg_iterator ArgBegin,
CallExpr::const_arg_iterator ArgEnd,
bool SkipFirstParam = false);
public:
- BuildLockset(ThreadSafetyAnalyzer *Anlzr, CFGBlockInfo &Info)
+ BuildLockset(ThreadSafetyAnalyzer *Anlzr, CFGBlockInfo &Info,
+ const FactSet &FunctionExitFSet)
: ConstStmtVisitor<BuildLockset>(), Analyzer(Anlzr), FSet(Info.EntrySet),
- LVarCtx(Info.EntryContext), CtxIndex(Info.EntryIndex) {}
+ FunctionExitFSet(FunctionExitFSet), LVarCtx(Info.EntryContext),
+ CtxIndex(Info.EntryIndex) {}
void VisitUnaryOperator(const UnaryOperator *UO);
void VisitBinaryOperator(const BinaryOperator *BO);
@@ -1621,21 +1580,22 @@ public:
void VisitCallExpr(const CallExpr *Exp);
void VisitCXXConstructExpr(const CXXConstructExpr *Exp);
void VisitDeclStmt(const DeclStmt *S);
+ void VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *Exp);
+ void VisitReturnStmt(const ReturnStmt *S);
};
} // namespace
/// Warn if the LSet does not contain a lock sufficient to protect access
/// of at least the passed in AccessKind.
-void BuildLockset::warnIfMutexNotHeld(const NamedDecl *D, const Expr *Exp,
- AccessKind AK, Expr *MutexExp,
- ProtectedOperationKind POK,
- StringRef DiagKind, SourceLocation Loc) {
+void ThreadSafetyAnalyzer::warnIfMutexNotHeld(
+ const FactSet &FSet, const NamedDecl *D, const Expr *Exp, AccessKind AK,
+ Expr *MutexExp, ProtectedOperationKind POK, til::LiteralPtr *Self,
+ SourceLocation Loc) {
LockKind LK = getLockKindFromAccessKind(AK);
-
- CapabilityExpr Cp = Analyzer->SxBuilder.translateAttrExpr(MutexExp, D, Exp);
+ CapabilityExpr Cp = SxBuilder.translateAttrExpr(MutexExp, D, Exp, Self);
if (Cp.isInvalid()) {
- warnInvalidLock(Analyzer->Handler, MutexExp, D, Exp, DiagKind);
+ warnInvalidLock(Handler, MutexExp, D, Exp, Cp.getKind());
return;
} else if (Cp.shouldIgnore()) {
return;
@@ -1643,66 +1603,67 @@ void BuildLockset::warnIfMutexNotHeld(const NamedDecl *D, const Expr *Exp,
if (Cp.negative()) {
// Negative capabilities act like locks excluded
- const FactEntry *LDat = FSet.findLock(Analyzer->FactMan, !Cp);
+ const FactEntry *LDat = FSet.findLock(FactMan, !Cp);
if (LDat) {
- Analyzer->Handler.handleFunExcludesLock(
- DiagKind, D->getNameAsString(), (!Cp).toString(), Loc);
+ Handler.handleFunExcludesLock(Cp.getKind(), D->getNameAsString(),
+ (!Cp).toString(), Loc);
return;
}
// If this does not refer to a negative capability in the same class,
// then stop here.
- if (!Analyzer->inCurrentScope(Cp))
+ if (!inCurrentScope(Cp))
return;
// Otherwise the negative requirement must be propagated to the caller.
- LDat = FSet.findLock(Analyzer->FactMan, Cp);
+ LDat = FSet.findLock(FactMan, Cp);
if (!LDat) {
- Analyzer->Handler.handleNegativeNotHeld(D, Cp.toString(), Loc);
+ Handler.handleNegativeNotHeld(D, Cp.toString(), Loc);
}
return;
}
- const FactEntry *LDat = FSet.findLockUniv(Analyzer->FactMan, Cp);
+ const FactEntry *LDat = FSet.findLockUniv(FactMan, Cp);
bool NoError = true;
if (!LDat) {
// No exact match found. Look for a partial match.
- LDat = FSet.findPartialMatch(Analyzer->FactMan, Cp);
+ LDat = FSet.findPartialMatch(FactMan, Cp);
if (LDat) {
// Warn that there's no precise match.
std::string PartMatchStr = LDat->toString();
StringRef PartMatchName(PartMatchStr);
- Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Cp.toString(),
- LK, Loc, &PartMatchName);
+ Handler.handleMutexNotHeld(Cp.getKind(), D, POK, Cp.toString(), LK, Loc,
+ &PartMatchName);
} else {
// Warn that there's no match at all.
- Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Cp.toString(),
- LK, Loc);
+ Handler.handleMutexNotHeld(Cp.getKind(), D, POK, Cp.toString(), LK, Loc);
}
NoError = false;
}
// Make sure the mutex we found is the right kind.
if (NoError && LDat && !LDat->isAtLeast(LK)) {
- Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Cp.toString(),
- LK, Loc);
+ Handler.handleMutexNotHeld(Cp.getKind(), D, POK, Cp.toString(), LK, Loc);
}
}
/// Warn if the LSet contains the given lock.
-void BuildLockset::warnIfMutexHeld(const NamedDecl *D, const Expr *Exp,
- Expr *MutexExp, StringRef DiagKind) {
- CapabilityExpr Cp = Analyzer->SxBuilder.translateAttrExpr(MutexExp, D, Exp);
+void ThreadSafetyAnalyzer::warnIfMutexHeld(const FactSet &FSet,
+ const NamedDecl *D, const Expr *Exp,
+ Expr *MutexExp,
+ til::LiteralPtr *Self,
+ SourceLocation Loc) {
+ CapabilityExpr Cp = SxBuilder.translateAttrExpr(MutexExp, D, Exp, Self);
if (Cp.isInvalid()) {
- warnInvalidLock(Analyzer->Handler, MutexExp, D, Exp, DiagKind);
+ warnInvalidLock(Handler, MutexExp, D, Exp, Cp.getKind());
return;
} else if (Cp.shouldIgnore()) {
return;
}
- const FactEntry *LDat = FSet.findLock(Analyzer->FactMan, Cp);
+ const FactEntry *LDat = FSet.findLock(FactMan, Cp);
if (LDat) {
- Analyzer->Handler.handleFunExcludesLock(
- DiagKind, D->getNameAsString(), Cp.toString(), Exp->getExprLoc());
+ Handler.handleFunExcludesLock(Cp.getKind(), D->getNameAsString(),
+ Cp.toString(), Loc);
}
}
@@ -1711,8 +1672,9 @@ void BuildLockset::warnIfMutexHeld(const NamedDecl *D, const Expr *Exp,
/// marked with guarded_by, we must ensure the appropriate mutexes are held.
/// Similarly, we check if the access is to an expression that dereferences
/// a pointer marked with pt_guarded_by.
-void BuildLockset::checkAccess(const Expr *Exp, AccessKind AK,
- ProtectedOperationKind POK) {
+void ThreadSafetyAnalyzer::checkAccess(const FactSet &FSet, const Expr *Exp,
+ AccessKind AK,
+ ProtectedOperationKind POK) {
Exp = Exp->IgnoreImplicit()->IgnoreParenCasts();
SourceLocation Loc = Exp->getExprLoc();
@@ -1736,39 +1698,50 @@ void BuildLockset::checkAccess(const Expr *Exp, AccessKind AK,
if (const auto *UO = dyn_cast<UnaryOperator>(Exp)) {
// For dereferences
if (UO->getOpcode() == UO_Deref)
- checkPtAccess(UO->getSubExpr(), AK, POK);
+ checkPtAccess(FSet, UO->getSubExpr(), AK, POK);
return;
}
+ if (const auto *BO = dyn_cast<BinaryOperator>(Exp)) {
+ switch (BO->getOpcode()) {
+ case BO_PtrMemD: // .*
+ return checkAccess(FSet, BO->getLHS(), AK, POK);
+ case BO_PtrMemI: // ->*
+ return checkPtAccess(FSet, BO->getLHS(), AK, POK);
+ default:
+ return;
+ }
+ }
+
if (const auto *AE = dyn_cast<ArraySubscriptExpr>(Exp)) {
- checkPtAccess(AE->getLHS(), AK, POK);
+ checkPtAccess(FSet, AE->getLHS(), AK, POK);
return;
}
if (const auto *ME = dyn_cast<MemberExpr>(Exp)) {
if (ME->isArrow())
- checkPtAccess(ME->getBase(), AK, POK);
+ checkPtAccess(FSet, ME->getBase(), AK, POK);
else
- checkAccess(ME->getBase(), AK, POK);
+ checkAccess(FSet, ME->getBase(), AK, POK);
}
const ValueDecl *D = getValueDecl(Exp);
if (!D || !D->hasAttrs())
return;
- if (D->hasAttr<GuardedVarAttr>() && FSet.isEmpty(Analyzer->FactMan)) {
- Analyzer->Handler.handleNoMutexHeld("mutex", D, POK, AK, Loc);
+ if (D->hasAttr<GuardedVarAttr>() && FSet.isEmpty(FactMan)) {
+ Handler.handleNoMutexHeld(D, POK, AK, Loc);
}
for (const auto *I : D->specific_attrs<GuardedByAttr>())
- warnIfMutexNotHeld(D, Exp, AK, I->getArg(), POK,
- ClassifyDiagnostic(I), Loc);
+ warnIfMutexNotHeld(FSet, D, Exp, AK, I->getArg(), POK, nullptr, Loc);
}
/// Checks pt_guarded_by and pt_guarded_var attributes.
/// POK is the same operationKind that was passed to checkAccess.
-void BuildLockset::checkPtAccess(const Expr *Exp, AccessKind AK,
- ProtectedOperationKind POK) {
+void ThreadSafetyAnalyzer::checkPtAccess(const FactSet &FSet, const Expr *Exp,
+ AccessKind AK,
+ ProtectedOperationKind POK) {
while (true) {
if (const auto *PE = dyn_cast<ParenExpr>(Exp)) {
Exp = PE->getSubExpr();
@@ -1778,7 +1751,7 @@ void BuildLockset::checkPtAccess(const Expr *Exp, AccessKind AK,
if (CE->getCastKind() == CK_ArrayToPointerDecay) {
// If it's an actual array, and not a pointer, then it's elements
// are protected by GUARDED_BY, not PT_GUARDED_BY;
- checkAccess(CE->getSubExpr(), AK, POK);
+ checkAccess(FSet, CE->getSubExpr(), AK, POK);
return;
}
Exp = CE->getSubExpr();
@@ -1790,18 +1763,19 @@ void BuildLockset::checkPtAccess(const Expr *Exp, AccessKind AK,
// Pass by reference warnings are under a different flag.
ProtectedOperationKind PtPOK = POK_VarDereference;
if (POK == POK_PassByRef) PtPOK = POK_PtPassByRef;
+ if (POK == POK_ReturnByRef)
+ PtPOK = POK_PtReturnByRef;
const ValueDecl *D = getValueDecl(Exp);
if (!D || !D->hasAttrs())
return;
- if (D->hasAttr<PtGuardedVarAttr>() && FSet.isEmpty(Analyzer->FactMan))
- Analyzer->Handler.handleNoMutexHeld("mutex", D, PtPOK, AK,
- Exp->getExprLoc());
+ if (D->hasAttr<PtGuardedVarAttr>() && FSet.isEmpty(FactMan))
+ Handler.handleNoMutexHeld(D, PtPOK, AK, Exp->getExprLoc());
for (auto const *I : D->specific_attrs<PtGuardedByAttr>())
- warnIfMutexNotHeld(D, Exp, AK, I->getArg(), PtPOK,
- ClassifyDiagnostic(I), Exp->getExprLoc());
+ warnIfMutexNotHeld(FSet, D, Exp, AK, I->getArg(), PtPOK, nullptr,
+ Exp->getExprLoc());
}
/// Process a function call, method call, constructor call,
@@ -1814,22 +1788,36 @@ void BuildLockset::checkPtAccess(const Expr *Exp, AccessKind AK,
/// and check that the appropriate locks are held. Non-const method calls with
/// the same signature as const method calls can be also treated as reads.
///
+/// \param Exp The call expression.
+/// \param D The callee declaration.
+/// \param Self If \p Exp = nullptr, the implicit this argument or the argument
+/// of an implicitly called cleanup function.
+/// \param Loc If \p Exp = nullptr, the location.
void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D,
- VarDecl *VD) {
- SourceLocation Loc = Exp->getExprLoc();
+ til::LiteralPtr *Self, SourceLocation Loc) {
CapExprSet ExclusiveLocksToAdd, SharedLocksToAdd;
CapExprSet ExclusiveLocksToRemove, SharedLocksToRemove, GenericLocksToRemove;
CapExprSet ScopedReqsAndExcludes;
- StringRef CapDiagKind = "mutex";
// Figure out if we're constructing an object of scoped lockable class
- bool isScopedVar = false;
- if (VD) {
- if (const auto *CD = dyn_cast<const CXXConstructorDecl>(D)) {
- const CXXRecordDecl* PD = CD->getParent();
- if (PD && PD->hasAttr<ScopedLockableAttr>())
- isScopedVar = true;
+ CapabilityExpr Scp;
+ if (Exp) {
+ assert(!Self);
+ const auto *TagT = Exp->getType()->getAs<TagType>();
+ if (TagT && Exp->isPRValue()) {
+ std::pair<til::LiteralPtr *, StringRef> Placeholder =
+ Analyzer->SxBuilder.createThisPlaceholder(Exp);
+ [[maybe_unused]] auto inserted =
+ Analyzer->ConstructedObjects.insert({Exp, Placeholder.first});
+ assert(inserted.second && "Are we visiting the same expression again?");
+ if (isa<CXXConstructExpr>(Exp))
+ Self = Placeholder.first;
+ if (TagT->getDecl()->hasAttr<ScopedLockableAttr>())
+ Scp = CapabilityExpr(Placeholder.first, Placeholder.second, false);
}
+
+ assert(Loc.isInvalid());
+ Loc = Exp->getExprLoc();
}
for(const Attr *At : D->attrs()) {
@@ -1840,9 +1828,7 @@ void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D,
const auto *A = cast<AcquireCapabilityAttr>(At);
Analyzer->getMutexIDs(A->isShared() ? SharedLocksToAdd
: ExclusiveLocksToAdd,
- A, Exp, D, VD);
-
- CapDiagKind = ClassifyDiagnostic(A);
+ A, Exp, D, Self);
break;
}
@@ -1853,40 +1839,34 @@ void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D,
const auto *A = cast<AssertExclusiveLockAttr>(At);
CapExprSet AssertLocks;
- Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD);
+ Analyzer->getMutexIDs(AssertLocks, A, Exp, D, Self);
for (const auto &AssertLock : AssertLocks)
Analyzer->addLock(
- FSet,
- std::make_unique<LockableFactEntry>(AssertLock, LK_Exclusive, Loc,
- FactEntry::Asserted),
- ClassifyDiagnostic(A));
+ FSet, std::make_unique<LockableFactEntry>(
+ AssertLock, LK_Exclusive, Loc, FactEntry::Asserted));
break;
}
case attr::AssertSharedLock: {
const auto *A = cast<AssertSharedLockAttr>(At);
CapExprSet AssertLocks;
- Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD);
+ Analyzer->getMutexIDs(AssertLocks, A, Exp, D, Self);
for (const auto &AssertLock : AssertLocks)
Analyzer->addLock(
- FSet,
- std::make_unique<LockableFactEntry>(AssertLock, LK_Shared, Loc,
- FactEntry::Asserted),
- ClassifyDiagnostic(A));
+ FSet, std::make_unique<LockableFactEntry>(
+ AssertLock, LK_Shared, Loc, FactEntry::Asserted));
break;
}
case attr::AssertCapability: {
const auto *A = cast<AssertCapabilityAttr>(At);
CapExprSet AssertLocks;
- Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD);
+ Analyzer->getMutexIDs(AssertLocks, A, Exp, D, Self);
for (const auto &AssertLock : AssertLocks)
- Analyzer->addLock(FSet,
- std::make_unique<LockableFactEntry>(
- AssertLock,
- A->isShared() ? LK_Shared : LK_Exclusive, Loc,
- FactEntry::Asserted),
- ClassifyDiagnostic(A));
+ Analyzer->addLock(FSet, std::make_unique<LockableFactEntry>(
+ AssertLock,
+ A->isShared() ? LK_Shared : LK_Exclusive,
+ Loc, FactEntry::Asserted));
break;
}
@@ -1895,25 +1875,23 @@ void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D,
case attr::ReleaseCapability: {
const auto *A = cast<ReleaseCapabilityAttr>(At);
if (A->isGeneric())
- Analyzer->getMutexIDs(GenericLocksToRemove, A, Exp, D, VD);
+ Analyzer->getMutexIDs(GenericLocksToRemove, A, Exp, D, Self);
else if (A->isShared())
- Analyzer->getMutexIDs(SharedLocksToRemove, A, Exp, D, VD);
+ Analyzer->getMutexIDs(SharedLocksToRemove, A, Exp, D, Self);
else
- Analyzer->getMutexIDs(ExclusiveLocksToRemove, A, Exp, D, VD);
-
- CapDiagKind = ClassifyDiagnostic(A);
+ Analyzer->getMutexIDs(ExclusiveLocksToRemove, A, Exp, D, Self);
break;
}
case attr::RequiresCapability: {
const auto *A = cast<RequiresCapabilityAttr>(At);
for (auto *Arg : A->args()) {
- warnIfMutexNotHeld(D, Exp, A->isShared() ? AK_Read : AK_Written, Arg,
- POK_FunctionCall, ClassifyDiagnostic(A),
- Exp->getExprLoc());
+ Analyzer->warnIfMutexNotHeld(FSet, D, Exp,
+ A->isShared() ? AK_Read : AK_Written,
+ Arg, POK_FunctionCall, Self, Loc);
// use for adopting a lock
- if (isScopedVar)
- Analyzer->getMutexIDs(ScopedReqsAndExcludes, A, Exp, D, VD);
+ if (!Scp.shouldIgnore())
+ Analyzer->getMutexIDs(ScopedReqsAndExcludes, A, Exp, D, Self);
}
break;
}
@@ -1921,10 +1899,10 @@ void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D,
case attr::LocksExcluded: {
const auto *A = cast<LocksExcludedAttr>(At);
for (auto *Arg : A->args()) {
- warnIfMutexHeld(D, Exp, Arg, ClassifyDiagnostic(A));
+ Analyzer->warnIfMutexHeld(FSet, D, Exp, Arg, Self, Loc);
// use for deferring a lock
- if (isScopedVar)
- Analyzer->getMutexIDs(ScopedReqsAndExcludes, A, Exp, D, VD);
+ if (!Scp.shouldIgnore())
+ Analyzer->getMutexIDs(ScopedReqsAndExcludes, A, Exp, D, Self);
}
break;
}
@@ -1939,33 +1917,25 @@ void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D,
// FIXME -- should only fully remove if the attribute refers to 'this'.
bool Dtor = isa<CXXDestructorDecl>(D);
for (const auto &M : ExclusiveLocksToRemove)
- Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Exclusive, CapDiagKind);
+ Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Exclusive);
for (const auto &M : SharedLocksToRemove)
- Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Shared, CapDiagKind);
+ Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Shared);
for (const auto &M : GenericLocksToRemove)
- Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Generic, CapDiagKind);
+ Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Generic);
// Add locks.
FactEntry::SourceKind Source =
- isScopedVar ? FactEntry::Managed : FactEntry::Acquired;
+ !Scp.shouldIgnore() ? FactEntry::Managed : FactEntry::Acquired;
for (const auto &M : ExclusiveLocksToAdd)
- Analyzer->addLock(
- FSet, std::make_unique<LockableFactEntry>(M, LK_Exclusive, Loc, Source),
- CapDiagKind);
+ Analyzer->addLock(FSet, std::make_unique<LockableFactEntry>(M, LK_Exclusive,
+ Loc, Source));
for (const auto &M : SharedLocksToAdd)
Analyzer->addLock(
- FSet, std::make_unique<LockableFactEntry>(M, LK_Shared, Loc, Source),
- CapDiagKind);
+ FSet, std::make_unique<LockableFactEntry>(M, LK_Shared, Loc, Source));
- if (isScopedVar) {
+ if (!Scp.shouldIgnore()) {
// Add the managing object as a dummy mutex, mapped to the underlying mutex.
- SourceLocation MLoc = VD->getLocation();
- DeclRefExpr DRE(VD->getASTContext(), VD, false, VD->getType(), VK_LValue,
- VD->getLocation());
- // FIXME: does this store a pointer to DRE?
- CapabilityExpr Scp = Analyzer->SxBuilder.translateAttrExpr(&DRE, nullptr);
-
- auto ScopedEntry = std::make_unique<ScopedLockableFactEntry>(Scp, MLoc);
+ auto ScopedEntry = std::make_unique<ScopedLockableFactEntry>(Scp, Loc);
for (const auto &M : ExclusiveLocksToAdd)
ScopedEntry->addLock(M);
for (const auto &M : SharedLocksToAdd)
@@ -1976,7 +1946,7 @@ void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D,
ScopedEntry->addExclusiveUnlock(M);
for (const auto &M : SharedLocksToRemove)
ScopedEntry->addSharedUnlock(M);
- Analyzer->addLock(FSet, std::move(ScopedEntry), CapDiagKind);
+ Analyzer->addLock(FSet, std::move(ScopedEntry));
}
}
@@ -2066,23 +2036,34 @@ void BuildLockset::VisitCallExpr(const CallExpr *Exp) {
examineArguments(CE->getDirectCallee(), CE->arg_begin(), CE->arg_end());
} else if (const auto *OE = dyn_cast<CXXOperatorCallExpr>(Exp)) {
- auto OEop = OE->getOperator();
+ OverloadedOperatorKind OEop = OE->getOperator();
switch (OEop) {
- case OO_Equal: {
- const Expr *Target = OE->getArg(0);
- const Expr *Source = OE->getArg(1);
- checkAccess(Target, AK_Written);
- checkAccess(Source, AK_Read);
+ case OO_Equal:
+ case OO_PlusEqual:
+ case OO_MinusEqual:
+ case OO_StarEqual:
+ case OO_SlashEqual:
+ case OO_PercentEqual:
+ case OO_CaretEqual:
+ case OO_AmpEqual:
+ case OO_PipeEqual:
+ case OO_LessLessEqual:
+ case OO_GreaterGreaterEqual:
+ checkAccess(OE->getArg(1), AK_Read);
+ [[fallthrough]];
+ case OO_PlusPlus:
+ case OO_MinusMinus:
+ checkAccess(OE->getArg(0), AK_Written);
break;
- }
case OO_Star:
+ case OO_ArrowStar:
case OO_Arrow:
case OO_Subscript:
if (!(OEop == OO_Star && OE->getNumArgs() > 1)) {
// Grrr. operator* can be multiplication...
checkPtAccess(OE->getArg(0), AK_Read);
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
default: {
// TODO: get rid of this, and rely on pass-by-ref instead.
const Expr *Obj = OE->getArg(0);
@@ -2114,33 +2095,21 @@ void BuildLockset::VisitCXXConstructExpr(const CXXConstructExpr *Exp) {
} else {
examineArguments(D, Exp->arg_begin(), Exp->arg_end());
}
+ if (D && D->hasAttrs())
+ handleCall(Exp, D);
}
-static CXXConstructorDecl *
-findConstructorForByValueReturn(const CXXRecordDecl *RD) {
- // Prefer a move constructor over a copy constructor. If there's more than
- // one copy constructor or more than one move constructor, we arbitrarily
- // pick the first declared such constructor rather than trying to guess which
- // one is more appropriate.
- CXXConstructorDecl *CopyCtor = nullptr;
- for (auto *Ctor : RD->ctors()) {
- if (Ctor->isDeleted())
- continue;
- if (Ctor->isMoveConstructor())
- return Ctor;
- if (!CopyCtor && Ctor->isCopyConstructor())
- CopyCtor = Ctor;
- }
- return CopyCtor;
-}
-
-static Expr *buildFakeCtorCall(CXXConstructorDecl *CD, ArrayRef<Expr *> Args,
- SourceLocation Loc) {
- ASTContext &Ctx = CD->getASTContext();
- return CXXConstructExpr::Create(Ctx, Ctx.getRecordType(CD->getParent()), Loc,
- CD, true, Args, false, false, false, false,
- CXXConstructExpr::CK_Complete,
- SourceRange(Loc, Loc));
+static const Expr *UnpackConstruction(const Expr *E) {
+ if (auto *CE = dyn_cast<CastExpr>(E))
+ if (CE->getCastKind() == CK_NoOp)
+ E = CE->getSubExpr()->IgnoreParens();
+ if (auto *CE = dyn_cast<CastExpr>(E))
+ if (CE->getCastKind() == CK_ConstructorConversion ||
+ CE->getCastKind() == CK_UserDefinedConversion)
+ E = CE->getSubExpr();
+ if (auto *BTE = dyn_cast<CXXBindTemporaryExpr>(E))
+ E = BTE->getSubExpr();
+ return E;
}
void BuildLockset::VisitDeclStmt(const DeclStmt *S) {
@@ -2149,7 +2118,7 @@ void BuildLockset::VisitDeclStmt(const DeclStmt *S) {
for (auto *D : S->getDeclGroup()) {
if (auto *VD = dyn_cast_or_null<VarDecl>(D)) {
- Expr *E = VD->getInit();
+ const Expr *E = VD->getInit();
if (!E)
continue;
E = E->IgnoreParens();
@@ -2157,37 +2126,48 @@ void BuildLockset::VisitDeclStmt(const DeclStmt *S) {
// handle constructors that involve temporaries
if (auto *EWC = dyn_cast<ExprWithCleanups>(E))
E = EWC->getSubExpr()->IgnoreParens();
- if (auto *CE = dyn_cast<CastExpr>(E))
- if (CE->getCastKind() == CK_NoOp ||
- CE->getCastKind() == CK_ConstructorConversion ||
- CE->getCastKind() == CK_UserDefinedConversion)
- E = CE->getSubExpr()->IgnoreParens();
- if (auto *BTE = dyn_cast<CXXBindTemporaryExpr>(E))
- E = BTE->getSubExpr()->IgnoreParens();
-
- if (const auto *CE = dyn_cast<CXXConstructExpr>(E)) {
- const auto *CtorD = dyn_cast_or_null<NamedDecl>(CE->getConstructor());
- if (!CtorD || !CtorD->hasAttrs())
- continue;
- handleCall(E, CtorD, VD);
- } else if (isa<CallExpr>(E) && E->isPRValue()) {
- // If the object is initialized by a function call that returns a
- // scoped lockable by value, use the attributes on the copy or move
- // constructor to figure out what effect that should have on the
- // lockset.
- // FIXME: Is this really the best way to handle this situation?
- auto *RD = E->getType()->getAsCXXRecordDecl();
- if (!RD || !RD->hasAttr<ScopedLockableAttr>())
- continue;
- CXXConstructorDecl *CtorD = findConstructorForByValueReturn(RD);
- if (!CtorD || !CtorD->hasAttrs())
- continue;
- handleCall(buildFakeCtorCall(CtorD, {E}, E->getBeginLoc()), CtorD, VD);
+ E = UnpackConstruction(E);
+
+ if (auto Object = Analyzer->ConstructedObjects.find(E);
+ Object != Analyzer->ConstructedObjects.end()) {
+ Object->second->setClangDecl(VD);
+ Analyzer->ConstructedObjects.erase(Object);
}
}
}
}
+void BuildLockset::VisitMaterializeTemporaryExpr(
+ const MaterializeTemporaryExpr *Exp) {
+ if (const ValueDecl *ExtD = Exp->getExtendingDecl()) {
+ if (auto Object = Analyzer->ConstructedObjects.find(
+ UnpackConstruction(Exp->getSubExpr()));
+ Object != Analyzer->ConstructedObjects.end()) {
+ Object->second->setClangDecl(ExtD);
+ Analyzer->ConstructedObjects.erase(Object);
+ }
+ }
+}
+
+void BuildLockset::VisitReturnStmt(const ReturnStmt *S) {
+ if (Analyzer->CurrentFunction == nullptr)
+ return;
+ const Expr *RetVal = S->getRetValue();
+ if (!RetVal)
+ return;
+
+ // If returning by reference, check that the function requires the appropriate
+ // capabilities.
+ const QualType ReturnType =
+ Analyzer->CurrentFunction->getReturnType().getCanonicalType();
+ if (ReturnType->isLValueReferenceType()) {
+ Analyzer->checkAccess(
+ FunctionExitFSet, RetVal,
+ ReturnType->getPointeeType().isConstQualified() ? AK_Read : AK_Written,
+ POK_ReturnByRef);
+ }
+}
+
/// Given two facts merging on a join point, possibly warn and decide whether to
/// keep or replace.
///
@@ -2204,7 +2184,8 @@ bool ThreadSafetyAnalyzer::join(const FactEntry &A, const FactEntry &B,
if (CanModify || !ShouldTakeB)
return ShouldTakeB;
}
- Handler.handleExclusiveAndShared("mutex", B.toString(), B.loc(), A.loc());
+ Handler.handleExclusiveAndShared(B.getKind(), B.toString(), B.loc(),
+ A.loc());
// Take the exclusive capability to reduce further warnings.
return CanModify && B.kind() == LK_Exclusive;
} else {
@@ -2272,7 +2253,7 @@ static bool neverReturns(const CFGBlock *B) {
return false;
CFGElement Last = B->back();
- if (Optional<CFGStmt> S = Last.getAs<CFGStmt>()) {
+ if (std::optional<CFGStmt> S = Last.getAs<CFGStmt>()) {
if (isa<CXXThrowExpr>(S->getStmt()))
return true;
}
@@ -2296,8 +2277,7 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
CFG *CFGraph = walker.getGraph();
const NamedDecl *D = walker.getDecl();
- const auto *CurrentFunction = dyn_cast<FunctionDecl>(D);
- CurrentMethod = dyn_cast<CXXMethodDecl>(D);
+ CurrentFunction = dyn_cast<FunctionDecl>(D);
if (D->hasAttr<NoThreadSafetyAnalysisAttr>())
return;
@@ -2322,8 +2302,11 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
const PostOrderCFGView *SortedGraph = walker.getSortedGraph();
PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph);
+ CFGBlockInfo &Initial = BlockInfo[CFGraph->getEntry().getBlockID()];
+ CFGBlockInfo &Final = BlockInfo[CFGraph->getExit().getBlockID()];
+
// Mark entry block as reachable
- BlockInfo[CFGraph->getEntry().getBlockID()].Reachable = true;
+ Initial.Reachable = true;
// Compute SSA names for local variables
LocalVarMap.traverseCFG(CFGraph, SortedGraph, BlockInfo);
@@ -2339,12 +2322,11 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
// to initial lockset. Also turn off checking for lock and unlock functions.
// FIXME: is there a more intelligent way to check lock/unlock functions?
if (!SortedGraph->empty() && D->hasAttrs()) {
- const CFGBlock *FirstBlock = *SortedGraph->begin();
- FactSet &InitialLockset = BlockInfo[FirstBlock->getBlockID()].EntrySet;
+ assert(*SortedGraph->begin() == &CFGraph->getEntry());
+ FactSet &InitialLockset = Initial.EntrySet;
CapExprSet ExclusiveLocksToAdd;
CapExprSet SharedLocksToAdd;
- StringRef CapDiagKind = "mutex";
SourceLocation Loc = D->getLocation();
for (const auto *Attr : D->attrs()) {
@@ -2352,7 +2334,6 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
if (const auto *A = dyn_cast<RequiresCapabilityAttr>(Attr)) {
getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A,
nullptr, D);
- CapDiagKind = ClassifyDiagnostic(A);
} else if (const auto *A = dyn_cast<ReleaseCapabilityAttr>(Attr)) {
// UNLOCK_FUNCTION() is used to hide the underlying lock implementation.
// We must ignore such methods.
@@ -2361,14 +2342,12 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A,
nullptr, D);
getMutexIDs(LocksReleased, A, nullptr, D);
- CapDiagKind = ClassifyDiagnostic(A);
} else if (const auto *A = dyn_cast<AcquireCapabilityAttr>(Attr)) {
if (A->args_size() == 0)
return;
getMutexIDs(A->isShared() ? SharedLocksAcquired
: ExclusiveLocksAcquired,
A, nullptr, D);
- CapDiagKind = ClassifyDiagnostic(A);
} else if (isa<ExclusiveTrylockFunctionAttr>(Attr)) {
// Don't try to check trylock functions for now.
return;
@@ -2385,15 +2364,34 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
for (const auto &Mu : ExclusiveLocksToAdd) {
auto Entry = std::make_unique<LockableFactEntry>(Mu, LK_Exclusive, Loc,
FactEntry::Declared);
- addLock(InitialLockset, std::move(Entry), CapDiagKind, true);
+ addLock(InitialLockset, std::move(Entry), true);
}
for (const auto &Mu : SharedLocksToAdd) {
auto Entry = std::make_unique<LockableFactEntry>(Mu, LK_Shared, Loc,
FactEntry::Declared);
- addLock(InitialLockset, std::move(Entry), CapDiagKind, true);
+ addLock(InitialLockset, std::move(Entry), true);
}
}
+ // Compute the expected exit set.
+ // By default, we expect all locks held on entry to be held on exit.
+ FactSet ExpectedFunctionExitSet = Initial.EntrySet;
+
+ // Adjust the expected exit set by adding or removing locks, as declared
+ // by *-LOCK_FUNCTION and UNLOCK_FUNCTION. The intersect below will then
+ // issue the appropriate warning.
+ // FIXME: the location here is not quite right.
+ for (const auto &Lock : ExclusiveLocksAcquired)
+ ExpectedFunctionExitSet.addLock(
+ FactMan, std::make_unique<LockableFactEntry>(Lock, LK_Exclusive,
+ D->getLocation()));
+ for (const auto &Lock : SharedLocksAcquired)
+ ExpectedFunctionExitSet.addLock(
+ FactMan,
+ std::make_unique<LockableFactEntry>(Lock, LK_Shared, D->getLocation()));
+ for (const auto &Lock : LocksReleased)
+ ExpectedFunctionExitSet.removeLock(FactMan, Lock);
+
for (const auto *CurrBlock : *SortedGraph) {
unsigned CurrBlockID = CurrBlock->getBlockID();
CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID];
@@ -2415,7 +2413,6 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
// union because the real error is probably that we forgot to unlock M on
// all code paths.
bool LocksetInitialized = false;
- SmallVector<CFGBlock *, 8> SpecialBlocks;
for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
PE = CurrBlock->pred_end(); PI != PE; ++PI) {
// if *PI -> CurrBlock is a back edge
@@ -2432,17 +2429,6 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
// Okay, we can reach this block from the entry.
CurrBlockInfo->Reachable = true;
- // If the previous block ended in a 'continue' or 'break' statement, then
- // a difference in locksets is probably due to a bug in that block, rather
- // than in some other predecessor. In that case, keep the other
- // predecessor's lockset.
- if (const Stmt *Terminator = (*PI)->getTerminatorStmt()) {
- if (isa<ContinueStmt>(Terminator) || isa<BreakStmt>(Terminator)) {
- SpecialBlocks.push_back(*PI);
- continue;
- }
- }
-
FactSet PrevLockset;
getEdgeLockset(PrevLockset, PrevBlockInfo->ExitSet, *PI, CurrBlock);
@@ -2450,9 +2436,14 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
CurrBlockInfo->EntrySet = PrevLockset;
LocksetInitialized = true;
} else {
- intersectAndWarn(CurrBlockInfo->EntrySet, PrevLockset,
- CurrBlockInfo->EntryLoc,
- LEK_LockedSomePredecessors);
+ // Surprisingly 'continue' doesn't always produce back edges, because
+ // the CFG has empty "transition" blocks where they meet with the end
+ // of the regular loop body. We still want to diagnose them as loop.
+ intersectAndWarn(
+ CurrBlockInfo->EntrySet, PrevLockset, CurrBlockInfo->EntryLoc,
+ isa_and_nonnull<ContinueStmt>((*PI)->getTerminatorStmt())
+ ? LEK_LockedSomeLoopIterations
+ : LEK_LockedSomePredecessors);
}
}
@@ -2460,36 +2451,7 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
if (!CurrBlockInfo->Reachable)
continue;
- // Process continue and break blocks. Assume that the lockset for the
- // resulting block is unaffected by any discrepancies in them.
- for (const auto *PrevBlock : SpecialBlocks) {
- unsigned PrevBlockID = PrevBlock->getBlockID();
- CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
-
- if (!LocksetInitialized) {
- CurrBlockInfo->EntrySet = PrevBlockInfo->ExitSet;
- LocksetInitialized = true;
- } else {
- // Determine whether this edge is a loop terminator for diagnostic
- // purposes. FIXME: A 'break' statement might be a loop terminator, but
- // it might also be part of a switch. Also, a subsequent destructor
- // might add to the lockset, in which case the real issue might be a
- // double lock on the other path.
- const Stmt *Terminator = PrevBlock->getTerminatorStmt();
- bool IsLoop = Terminator && isa<ContinueStmt>(Terminator);
-
- FactSet PrevLockset;
- getEdgeLockset(PrevLockset, PrevBlockInfo->ExitSet,
- PrevBlock, CurrBlock);
-
- // Do not update EntrySet.
- intersectAndWarn(
- CurrBlockInfo->EntrySet, PrevLockset, PrevBlockInfo->ExitLoc,
- IsLoop ? LEK_LockedSomeLoopIterations : LEK_LockedSomePredecessors);
- }
- }
-
- BuildLockset LocksetBuilder(this, *CurrBlockInfo);
+ BuildLockset LocksetBuilder(this, *CurrBlockInfo, ExpectedFunctionExitSet);
// Visit all the statements in the basic block.
for (const auto &BI : *CurrBlock) {
@@ -2499,19 +2461,42 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
LocksetBuilder.Visit(CS.getStmt());
break;
}
- // Ignore BaseDtor, MemberDtor, and TemporaryDtor for now.
+ // Ignore BaseDtor and MemberDtor for now.
case CFGElement::AutomaticObjectDtor: {
CFGAutomaticObjDtor AD = BI.castAs<CFGAutomaticObjDtor>();
const auto *DD = AD.getDestructorDecl(AC.getASTContext());
if (!DD->hasAttrs())
break;
- // Create a dummy expression,
- auto *VD = const_cast<VarDecl *>(AD.getVarDecl());
- DeclRefExpr DRE(VD->getASTContext(), VD, false,
- VD->getType().getNonReferenceType(), VK_LValue,
- AD.getTriggerStmt()->getEndLoc());
- LocksetBuilder.handleCall(&DRE, DD);
+ LocksetBuilder.handleCall(nullptr, DD,
+ SxBuilder.createVariable(AD.getVarDecl()),
+ AD.getTriggerStmt()->getEndLoc());
+ break;
+ }
+
+ case CFGElement::CleanupFunction: {
+ const CFGCleanupFunction &CF = BI.castAs<CFGCleanupFunction>();
+ LocksetBuilder.handleCall(/*Exp=*/nullptr, CF.getFunctionDecl(),
+ SxBuilder.createVariable(CF.getVarDecl()),
+ CF.getVarDecl()->getLocation());
+ break;
+ }
+
+ case CFGElement::TemporaryDtor: {
+ auto TD = BI.castAs<CFGTemporaryDtor>();
+
+ // Clean up constructed object even if there are no attributes to
+ // keep the number of objects in limbo as small as possible.
+ if (auto Object = ConstructedObjects.find(
+ TD.getBindTemporaryExpr()->getSubExpr());
+ Object != ConstructedObjects.end()) {
+ const auto *DD = TD.getDestructorDecl(AC.getASTContext());
+ if (DD->hasAttrs())
+ // TODO: the location here isn't quite correct.
+ LocksetBuilder.handleCall(nullptr, DD, Object->second,
+ TD.getBindTemporaryExpr()->getEndLoc());
+ ConstructedObjects.erase(Object);
+ }
break;
}
default:
@@ -2538,31 +2523,12 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
}
}
- CFGBlockInfo *Initial = &BlockInfo[CFGraph->getEntry().getBlockID()];
- CFGBlockInfo *Final = &BlockInfo[CFGraph->getExit().getBlockID()];
-
// Skip the final check if the exit block is unreachable.
- if (!Final->Reachable)
+ if (!Final.Reachable)
return;
- // By default, we expect all locks held on entry to be held on exit.
- FactSet ExpectedExitSet = Initial->EntrySet;
-
- // Adjust the expected exit set by adding or removing locks, as declared
- // by *-LOCK_FUNCTION and UNLOCK_FUNCTION. The intersect below will then
- // issue the appropriate warning.
- // FIXME: the location here is not quite right.
- for (const auto &Lock : ExclusiveLocksAcquired)
- ExpectedExitSet.addLock(FactMan, std::make_unique<LockableFactEntry>(
- Lock, LK_Exclusive, D->getLocation()));
- for (const auto &Lock : SharedLocksAcquired)
- ExpectedExitSet.addLock(FactMan, std::make_unique<LockableFactEntry>(
- Lock, LK_Shared, D->getLocation()));
- for (const auto &Lock : LocksReleased)
- ExpectedExitSet.removeLock(FactMan, Lock);
-
// FIXME: Should we call this function for all blocks which exit the function?
- intersectAndWarn(ExpectedExitSet, Final->ExitSet, Final->ExitLoc,
+ intersectAndWarn(ExpectedFunctionExitSet, Final.ExitSet, Final.ExitLoc,
LEK_LockedAtEndOfFunction, LEK_NotLockedAtEndOfFunction);
Handler.leaveFunction(CurrentFunction);
diff --git a/contrib/llvm-project/clang/lib/Analysis/ThreadSafetyCommon.cpp b/contrib/llvm-project/clang/lib/Analysis/ThreadSafetyCommon.cpp
index e6b4a05501e2..2fe0f85897c3 100644
--- a/contrib/llvm-project/clang/lib/Analysis/ThreadSafetyCommon.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/ThreadSafetyCommon.cpp
@@ -69,12 +69,7 @@ static bool isIncompletePhi(const til::SExpr *E) {
using CallingContext = SExprBuilder::CallingContext;
-til::SExpr *SExprBuilder::lookupStmt(const Stmt *S) {
- auto It = SMap.find(S);
- if (It != SMap.end())
- return It->second;
- return nullptr;
-}
+til::SExpr *SExprBuilder::lookupStmt(const Stmt *S) { return SMap.lookup(S); }
til::SCFG *SExprBuilder::buildCFG(CFGWalker &Walker) {
Walker.walk(*this);
@@ -86,6 +81,28 @@ static bool isCalleeArrow(const Expr *E) {
return ME ? ME->isArrow() : false;
}
+static StringRef ClassifyDiagnostic(const CapabilityAttr *A) {
+ return A->getName();
+}
+
+static StringRef ClassifyDiagnostic(QualType VDT) {
+ // We need to look at the declaration of the type of the value to determine
+ // which it is. The type should either be a record or a typedef, or a pointer
+ // or reference thereof.
+ if (const auto *RT = VDT->getAs<RecordType>()) {
+ if (const auto *RD = RT->getDecl())
+ if (const auto *CA = RD->getAttr<CapabilityAttr>())
+ return ClassifyDiagnostic(CA);
+ } else if (const auto *TT = VDT->getAs<TypedefType>()) {
+ if (const auto *TD = TT->getDecl())
+ if (const auto *CA = TD->getAttr<CapabilityAttr>())
+ return ClassifyDiagnostic(CA);
+ } else if (VDT->isPointerType() || VDT->isReferenceType())
+ return ClassifyDiagnostic(VDT->getPointeeType());
+
+ return "mutex";
+}
+
/// Translate a clang expression in an attribute to a til::SExpr.
/// Constructs the context from D, DeclExp, and SelfDecl.
///
@@ -93,19 +110,23 @@ static bool isCalleeArrow(const Expr *E) {
/// \param D The declaration to which the attribute is attached.
/// \param DeclExp An expression involving the Decl to which the attribute
/// is attached. E.g. the call to a function.
+/// \param Self S-expression to substitute for a \ref CXXThisExpr in a call,
+/// or argument to a cleanup function.
CapabilityExpr SExprBuilder::translateAttrExpr(const Expr *AttrExp,
const NamedDecl *D,
const Expr *DeclExp,
- VarDecl *SelfDecl) {
+ til::SExpr *Self) {
// If we are processing a raw attribute expression, with no substitutions.
- if (!DeclExp)
+ if (!DeclExp && !Self)
return translateAttrExpr(AttrExp, nullptr);
CallingContext Ctx(nullptr, D);
// Examine DeclExp to find SelfArg and FunArgs, which are used to substitute
// for formal parameters when we call buildMutexID later.
- if (const auto *ME = dyn_cast<MemberExpr>(DeclExp)) {
+ if (!DeclExp)
+ /* We'll use Self. */;
+ else if (const auto *ME = dyn_cast<MemberExpr>(DeclExp)) {
Ctx.SelfArg = ME->getBase();
Ctx.SelfArrow = ME->isArrow();
} else if (const auto *CE = dyn_cast<CXXMemberCallExpr>(DeclExp)) {
@@ -120,29 +141,30 @@ CapabilityExpr SExprBuilder::translateAttrExpr(const Expr *AttrExp,
Ctx.SelfArg = nullptr; // Will be set below
Ctx.NumArgs = CE->getNumArgs();
Ctx.FunArgs = CE->getArgs();
- } else if (D && isa<CXXDestructorDecl>(D)) {
- // There's no such thing as a "destructor call" in the AST.
- Ctx.SelfArg = DeclExp;
}
- // Hack to handle constructors, where self cannot be recovered from
- // the expression.
- if (SelfDecl && !Ctx.SelfArg) {
- DeclRefExpr SelfDRE(SelfDecl->getASTContext(), SelfDecl, false,
- SelfDecl->getType(), VK_LValue,
- SelfDecl->getLocation());
- Ctx.SelfArg = &SelfDRE;
+ if (Self) {
+ assert(!Ctx.SelfArg && "Ambiguous self argument");
+ assert(isa<FunctionDecl>(D) && "Self argument requires function");
+ if (isa<CXXMethodDecl>(D))
+ Ctx.SelfArg = Self;
+ else
+ Ctx.FunArgs = Self;
// If the attribute has no arguments, then assume the argument is "this".
if (!AttrExp)
- return translateAttrExpr(Ctx.SelfArg, nullptr);
+ return CapabilityExpr(
+ Self,
+ ClassifyDiagnostic(
+ cast<CXXMethodDecl>(D)->getFunctionObjectParameterType()),
+ false);
else // For most attributes.
return translateAttrExpr(AttrExp, &Ctx);
}
// If the attribute has no arguments, then assume the argument is "this".
if (!AttrExp)
- return translateAttrExpr(Ctx.SelfArg, nullptr);
+ return translateAttrExpr(cast<const Expr *>(Ctx.SelfArg), nullptr);
else // For most attributes.
return translateAttrExpr(AttrExp, &Ctx);
}
@@ -152,16 +174,17 @@ CapabilityExpr SExprBuilder::translateAttrExpr(const Expr *AttrExp,
CapabilityExpr SExprBuilder::translateAttrExpr(const Expr *AttrExp,
CallingContext *Ctx) {
if (!AttrExp)
- return CapabilityExpr(nullptr, false);
+ return CapabilityExpr();
if (const auto* SLit = dyn_cast<StringLiteral>(AttrExp)) {
if (SLit->getString() == StringRef("*"))
// The "*" expr is a universal lock, which essentially turns off
// checks until it is removed from the lockset.
- return CapabilityExpr(new (Arena) til::Wildcard(), false);
+ return CapabilityExpr(new (Arena) til::Wildcard(), StringRef("wildcard"),
+ false);
else
// Ignore other string literals for now.
- return CapabilityExpr(nullptr, false);
+ return CapabilityExpr();
}
bool Neg = false;
@@ -183,14 +206,26 @@ CapabilityExpr SExprBuilder::translateAttrExpr(const Expr *AttrExp,
// Trap mutex expressions like nullptr, or 0.
// Any literal value is nonsense.
if (!E || isa<til::Literal>(E))
- return CapabilityExpr(nullptr, false);
+ return CapabilityExpr();
+
+ StringRef Kind = ClassifyDiagnostic(AttrExp->getType());
// Hack to deal with smart pointers -- strip off top-level pointer casts.
if (const auto *CE = dyn_cast<til::Cast>(E)) {
if (CE->castOpcode() == til::CAST_objToPtr)
- return CapabilityExpr(CE->expr(), Neg);
+ return CapabilityExpr(CE->expr(), Kind, Neg);
}
- return CapabilityExpr(E, Neg);
+ return CapabilityExpr(E, Kind, Neg);
+}
+
+til::LiteralPtr *SExprBuilder::createVariable(const VarDecl *VD) {
+ return new (Arena) til::LiteralPtr(VD);
+}
+
+std::pair<til::LiteralPtr *, StringRef>
+SExprBuilder::createThisPlaceholder(const Expr *Exp) {
+ return {new (Arena) til::LiteralPtr(nullptr),
+ ClassifyDiagnostic(Exp->getType())};
}
// Translate a clang statement or expression to a TIL expression.
@@ -284,8 +319,14 @@ til::SExpr *SExprBuilder::translateDeclRefExpr(const DeclRefExpr *DRE,
? (cast<FunctionDecl>(D)->getCanonicalDecl() == Canonical)
: (cast<ObjCMethodDecl>(D)->getCanonicalDecl() == Canonical)) {
// Substitute call arguments for references to function parameters
- assert(I < Ctx->NumArgs);
- return translate(Ctx->FunArgs[I], Ctx->Prev);
+ if (const Expr *const *FunArgs =
+ Ctx->FunArgs.dyn_cast<const Expr *const *>()) {
+ assert(I < Ctx->NumArgs);
+ return translate(FunArgs[I], Ctx->Prev);
+ }
+
+ assert(I == 0);
+ return Ctx->FunArgs.get<til::SExpr *>();
}
}
// Map the param back to the param of the original function declaration
@@ -302,8 +343,12 @@ til::SExpr *SExprBuilder::translateDeclRefExpr(const DeclRefExpr *DRE,
til::SExpr *SExprBuilder::translateCXXThisExpr(const CXXThisExpr *TE,
CallingContext *Ctx) {
// Substitute for 'this'
- if (Ctx && Ctx->SelfArg)
- return translate(Ctx->SelfArg, Ctx->Prev);
+ if (Ctx && Ctx->SelfArg) {
+ if (const auto *SelfArg = dyn_cast<const Expr *>(Ctx->SelfArg))
+ return translate(SelfArg, Ctx->Prev);
+ else
+ return cast<til::SExpr *>(Ctx->SelfArg);
+ }
assert(SelfVar && "We have no variable for 'this'!");
return SelfVar;
}
@@ -612,7 +657,7 @@ SExprBuilder::translateAbstractConditionalOperator(
til::SExpr *
SExprBuilder::translateDeclStmt(const DeclStmt *S, CallingContext *Ctx) {
DeclGroupRef DGrp = S->getDeclGroup();
- for (auto I : DGrp) {
+ for (auto *I : DGrp) {
if (auto *VD = dyn_cast_or_null<VarDecl>(I)) {
Expr *E = VD->getInit();
til::SExpr* SE = translate(E, Ctx);
diff --git a/contrib/llvm-project/clang/lib/Analysis/UninitializedValues.cpp b/contrib/llvm-project/clang/lib/Analysis/UninitializedValues.cpp
index 67cd39728c35..e9111ded64eb 100644
--- a/contrib/llvm-project/clang/lib/Analysis/UninitializedValues.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/UninitializedValues.cpp
@@ -28,25 +28,43 @@
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/None.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/PackedVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Casting.h"
#include <algorithm>
#include <cassert>
+#include <optional>
using namespace clang;
#define DEBUG_LOGGING 0
+static bool recordIsNotEmpty(const RecordDecl *RD) {
+ // We consider a record decl to be empty if it contains only unnamed bit-
+ // fields, zero-width fields, and fields of empty record type.
+ for (const auto *FD : RD->fields()) {
+ if (FD->isUnnamedBitfield())
+ continue;
+ if (FD->isZeroSize(FD->getASTContext()))
+ continue;
+ // The only case remaining to check is for a field declaration of record
+ // type and whether that record itself is empty.
+ if (const auto *FieldRD = FD->getType()->getAsRecordDecl();
+ !FieldRD || recordIsNotEmpty(FieldRD))
+ return true;
+ }
+ return false;
+}
+
static bool isTrackedVar(const VarDecl *vd, const DeclContext *dc) {
if (vd->isLocalVarDecl() && !vd->hasGlobalStorage() &&
- !vd->isExceptionVariable() && !vd->isInitCapture() &&
- !vd->isImplicit() && vd->getDeclContext() == dc) {
+ !vd->isExceptionVariable() && !vd->isInitCapture() && !vd->isImplicit() &&
+ vd->getDeclContext() == dc) {
QualType ty = vd->getType();
- return ty->isScalarType() || ty->isVectorType() || ty->isRecordType();
+ if (const auto *RD = ty->getAsRecordDecl())
+ return recordIsNotEmpty(RD);
+ return ty->isScalarType() || ty->isVectorType() || ty->isRVVSizelessBuiltinType();
}
return false;
}
@@ -70,7 +88,7 @@ public:
unsigned size() const { return map.size(); }
/// Returns the bit vector index for a given declaration.
- Optional<unsigned> getValueIndex(const VarDecl *d) const;
+ std::optional<unsigned> getValueIndex(const VarDecl *d) const;
};
} // namespace
@@ -86,10 +104,10 @@ void DeclToIndex::computeMap(const DeclContext &dc) {
}
}
-Optional<unsigned> DeclToIndex::getValueIndex(const VarDecl *d) const {
+std::optional<unsigned> DeclToIndex::getValueIndex(const VarDecl *d) const {
llvm::DenseMap<const VarDecl *, unsigned>::const_iterator I = map.find(d);
if (I == map.end())
- return None;
+ return std::nullopt;
return I->second;
}
@@ -147,9 +165,8 @@ public:
Value getValue(const CFGBlock *block, const CFGBlock *dstBlock,
const VarDecl *vd) {
- const Optional<unsigned> &idx = declToIndex.getValueIndex(vd);
- assert(idx.hasValue());
- return getValueVector(block)[idx.getValue()];
+ std::optional<unsigned> idx = declToIndex.getValueIndex(vd);
+ return getValueVector(block)[*idx];
}
};
@@ -208,9 +225,7 @@ void CFGBlockValues::resetScratch() {
}
ValueVector::reference CFGBlockValues::operator[](const VarDecl *vd) {
- const Optional<unsigned> &idx = declToIndex.getValueIndex(vd);
- assert(idx.hasValue());
- return scratch[idx.getValue()];
+ return scratch[*declToIndex.getValueIndex(vd)];
}
//------------------------------------------------------------------------====//
@@ -589,28 +604,6 @@ public:
continue;
}
- if (AtPredExit == MayUninitialized) {
- // If the predecessor's terminator is an "asm goto" that initializes
- // the variable, then it won't be counted as "initialized" on the
- // non-fallthrough paths.
- CFGTerminator term = Pred->getTerminator();
- if (const auto *as = dyn_cast_or_null<GCCAsmStmt>(term.getStmt())) {
- const CFGBlock *fallthrough = *Pred->succ_begin();
- if (as->isAsmGoto() &&
- llvm::any_of(as->outputs(), [&](const Expr *output) {
- return vd == findVar(output).getDecl() &&
- llvm::any_of(as->labels(),
- [&](const AddrLabelExpr *label) {
- return label->getLabel()->getStmt() == B->Label &&
- B != fallthrough;
- });
- })) {
- Use.setUninitAfterDecl();
- continue;
- }
- }
- }
-
unsigned &SV = SuccsVisited[Pred->getBlockID()];
if (!SV) {
// When visiting the first successor of a block, mark all NULL
@@ -810,13 +803,22 @@ void TransferFunctions::VisitGCCAsmStmt(GCCAsmStmt *as) {
if (!as->isAsmGoto())
return;
- for (const Expr *o : as->outputs())
- if (const VarDecl *VD = findVar(o).getDecl())
+ ASTContext &C = ac.getASTContext();
+ for (const Expr *O : as->outputs()) {
+ const Expr *Ex = stripCasts(C, O);
+
+ // Strip away any unary operators. Invalid l-values are reported by other
+ // semantic analysis passes.
+ while (const auto *UO = dyn_cast<UnaryOperator>(Ex))
+ Ex = stripCasts(C, UO->getSubExpr());
+
+ // Mark the variable as potentially uninitialized for those cases where
+ // it's used on an indirect path, where it's not guaranteed to be
+ // defined.
+ if (const VarDecl *VD = findVar(Ex).getDecl())
if (vals[VD] != Initialized)
- // If the variable isn't initialized by the time we get here, then we
- // mark it as potentially uninitialized for those cases where it's used
- // on an indirect path, where it's not guaranteed to be defined.
vals[VD] = MayUninitialized;
+ }
}
void TransferFunctions::VisitObjCMessageExpr(ObjCMessageExpr *ME) {
@@ -853,7 +855,7 @@ static bool runOnBlock(const CFGBlock *block, const CFG &cfg,
// Apply the transfer function.
TransferFunctions tf(vals, cfg, block, ac, classification, handler);
for (const auto &I : *block) {
- if (Optional<CFGStmt> cs = I.getAs<CFGStmt>())
+ if (std::optional<CFGStmt> cs = I.getAs<CFGStmt>())
tf.Visit(const_cast<Stmt *>(cs->getStmt()));
}
CFGTerminator terminator = block->getTerminator();
@@ -894,7 +896,7 @@ struct PruneBlocksHandler : public UninitVariablesHandler {
hadUse[currentBlock] = true;
hadAnyUse = true;
}
-
+
/// Called when the uninitialized variable analysis detects the
/// idiom 'int x = x'. All other uses of 'x' within the initializer
/// are handled by handleUseOfUninitVariable.
diff --git a/contrib/llvm-project/clang/lib/Analysis/UnsafeBufferUsage.cpp b/contrib/llvm-project/clang/lib/Analysis/UnsafeBufferUsage.cpp
new file mode 100644
index 000000000000..7df706beb226
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Analysis/UnsafeBufferUsage.cpp
@@ -0,0 +1,2941 @@
+//===- UnsafeBufferUsage.cpp - Replace pointers with modern C++ -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/Analyses/UnsafeBufferUsage.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/ASTMatchers/ASTMatchFinder.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Lex/Preprocessor.h"
+#include "llvm/ADT/SmallVector.h"
+#include <memory>
+#include <optional>
+#include <sstream>
+#include <queue>
+
+using namespace llvm;
+using namespace clang;
+using namespace ast_matchers;
+
+#ifndef NDEBUG
+namespace {
+class StmtDebugPrinter
+ : public ConstStmtVisitor<StmtDebugPrinter, std::string> {
+public:
+ std::string VisitStmt(const Stmt *S) { return S->getStmtClassName(); }
+
+ std::string VisitBinaryOperator(const BinaryOperator *BO) {
+ return "BinaryOperator(" + BO->getOpcodeStr().str() + ")";
+ }
+
+ std::string VisitUnaryOperator(const UnaryOperator *UO) {
+ return "UnaryOperator(" + UO->getOpcodeStr(UO->getOpcode()).str() + ")";
+ }
+
+ std::string VisitImplicitCastExpr(const ImplicitCastExpr *ICE) {
+ return "ImplicitCastExpr(" + std::string(ICE->getCastKindName()) + ")";
+ }
+};
+
+// Returns a string of ancestor `Stmt`s of the given `DRE` in such a form:
+// "DRE ==> parent-of-DRE ==> grandparent-of-DRE ==> ...".
+static std::string getDREAncestorString(const DeclRefExpr *DRE,
+ ASTContext &Ctx) {
+ std::stringstream SS;
+ const Stmt *St = DRE;
+ StmtDebugPrinter StmtPriner;
+
+ do {
+ SS << StmtPriner.Visit(St);
+
+ DynTypedNodeList StParents = Ctx.getParents(*St);
+
+ if (StParents.size() > 1)
+ return "unavailable due to multiple parents";
+ if (StParents.size() == 0)
+ break;
+ St = StParents.begin()->get<Stmt>();
+ if (St)
+ SS << " ==> ";
+ } while (St);
+ return SS.str();
+}
+} // namespace
+#endif /* NDEBUG */
+
+namespace clang::ast_matchers {
+// A `RecursiveASTVisitor` that traverses all descendants of a given node "n"
+// except for those belonging to a different callable of "n".
+class MatchDescendantVisitor
+ : public RecursiveASTVisitor<MatchDescendantVisitor> {
+public:
+ typedef RecursiveASTVisitor<MatchDescendantVisitor> VisitorBase;
+
+ // Creates an AST visitor that matches `Matcher` on all
+ // descendants of a given node "n" except for the ones
+ // belonging to a different callable of "n".
+ MatchDescendantVisitor(const internal::DynTypedMatcher *Matcher,
+ internal::ASTMatchFinder *Finder,
+ internal::BoundNodesTreeBuilder *Builder,
+ internal::ASTMatchFinder::BindKind Bind,
+ const bool ignoreUnevaluatedContext)
+ : Matcher(Matcher), Finder(Finder), Builder(Builder), Bind(Bind),
+ Matches(false), ignoreUnevaluatedContext(ignoreUnevaluatedContext) {}
+
+ // Returns true if a match is found in a subtree of `DynNode`, which belongs
+ // to the same callable of `DynNode`.
+ bool findMatch(const DynTypedNode &DynNode) {
+ Matches = false;
+ if (const Stmt *StmtNode = DynNode.get<Stmt>()) {
+ TraverseStmt(const_cast<Stmt *>(StmtNode));
+ *Builder = ResultBindings;
+ return Matches;
+ }
+ return false;
+ }
+
+ // The following are overriding methods from the base visitor class.
+ // They are public only to allow CRTP to work. They are *not *part
+ // of the public API of this class.
+
+ // For the matchers so far used in safe buffers, we only need to match
+ // `Stmt`s. To override more as needed.
+
+ bool TraverseDecl(Decl *Node) {
+ if (!Node)
+ return true;
+ if (!match(*Node))
+ return false;
+ // To skip callables:
+ if (isa<FunctionDecl, BlockDecl, ObjCMethodDecl>(Node))
+ return true;
+ // Traverse descendants
+ return VisitorBase::TraverseDecl(Node);
+ }
+
+ bool TraverseGenericSelectionExpr(GenericSelectionExpr *Node) {
+ // These are unevaluated, except the result expression.
+ if(ignoreUnevaluatedContext)
+ return TraverseStmt(Node->getResultExpr());
+ return VisitorBase::TraverseGenericSelectionExpr(Node);
+ }
+
+ bool TraverseUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *Node) {
+ // Unevaluated context.
+ if(ignoreUnevaluatedContext)
+ return true;
+ return VisitorBase::TraverseUnaryExprOrTypeTraitExpr(Node);
+ }
+
+ bool TraverseTypeOfExprTypeLoc(TypeOfExprTypeLoc Node) {
+ // Unevaluated context.
+ if(ignoreUnevaluatedContext)
+ return true;
+ return VisitorBase::TraverseTypeOfExprTypeLoc(Node);
+ }
+
+ bool TraverseDecltypeTypeLoc(DecltypeTypeLoc Node) {
+ // Unevaluated context.
+ if(ignoreUnevaluatedContext)
+ return true;
+ return VisitorBase::TraverseDecltypeTypeLoc(Node);
+ }
+
+ bool TraverseCXXNoexceptExpr(CXXNoexceptExpr *Node) {
+ // Unevaluated context.
+ if(ignoreUnevaluatedContext)
+ return true;
+ return VisitorBase::TraverseCXXNoexceptExpr(Node);
+ }
+
+ bool TraverseCXXTypeidExpr(CXXTypeidExpr *Node) {
+ // Unevaluated context.
+ if(ignoreUnevaluatedContext)
+ return true;
+ return VisitorBase::TraverseCXXTypeidExpr(Node);
+ }
+
+ bool TraverseStmt(Stmt *Node, DataRecursionQueue *Queue = nullptr) {
+ if (!Node)
+ return true;
+ if (!match(*Node))
+ return false;
+ return VisitorBase::TraverseStmt(Node);
+ }
+
+ bool shouldVisitTemplateInstantiations() const { return true; }
+ bool shouldVisitImplicitCode() const {
+ // TODO: let's ignore implicit code for now
+ return false;
+ }
+
+private:
+ // Sets 'Matched' to true if 'Matcher' matches 'Node'
+ //
+ // Returns 'true' if traversal should continue after this function
+ // returns, i.e. if no match is found or 'Bind' is 'BK_All'.
+ template <typename T> bool match(const T &Node) {
+ internal::BoundNodesTreeBuilder RecursiveBuilder(*Builder);
+
+ if (Matcher->matches(DynTypedNode::create(Node), Finder,
+ &RecursiveBuilder)) {
+ ResultBindings.addMatch(RecursiveBuilder);
+ Matches = true;
+ if (Bind != internal::ASTMatchFinder::BK_All)
+ return false; // Abort as soon as a match is found.
+ }
+ return true;
+ }
+
+ const internal::DynTypedMatcher *const Matcher;
+ internal::ASTMatchFinder *const Finder;
+ internal::BoundNodesTreeBuilder *const Builder;
+ internal::BoundNodesTreeBuilder ResultBindings;
+ const internal::ASTMatchFinder::BindKind Bind;
+ bool Matches;
+ bool ignoreUnevaluatedContext;
+};
+
+// Because we're dealing with raw pointers, let's define what we mean by that.
+static auto hasPointerType() {
+ return hasType(hasCanonicalType(pointerType()));
+}
+
+static auto hasArrayType() {
+ return hasType(hasCanonicalType(arrayType()));
+}
+
+AST_MATCHER_P(Stmt, forEachDescendantEvaluatedStmt, internal::Matcher<Stmt>, innerMatcher) {
+ const DynTypedMatcher &DTM = static_cast<DynTypedMatcher>(innerMatcher);
+
+ MatchDescendantVisitor Visitor(&DTM, Finder, Builder, ASTMatchFinder::BK_All, true);
+ return Visitor.findMatch(DynTypedNode::create(Node));
+}
+
+AST_MATCHER_P(Stmt, forEachDescendantStmt, internal::Matcher<Stmt>, innerMatcher) {
+ const DynTypedMatcher &DTM = static_cast<DynTypedMatcher>(innerMatcher);
+
+ MatchDescendantVisitor Visitor(&DTM, Finder, Builder, ASTMatchFinder::BK_All, false);
+ return Visitor.findMatch(DynTypedNode::create(Node));
+}
+
+// Matches a `Stmt` node iff the node is in a safe-buffer opt-out region
+AST_MATCHER_P(Stmt, notInSafeBufferOptOut, const UnsafeBufferUsageHandler *,
+ Handler) {
+ return !Handler->isSafeBufferOptOut(Node.getBeginLoc());
+}
+
+AST_MATCHER_P(CastExpr, castSubExpr, internal::Matcher<Expr>, innerMatcher) {
+ return innerMatcher.matches(*Node.getSubExpr(), Finder, Builder);
+}
+
+// Matches a `UnaryOperator` whose operator is pre-increment:
+AST_MATCHER(UnaryOperator, isPreInc) {
+ return Node.getOpcode() == UnaryOperator::Opcode::UO_PreInc;
+}
+
+// Returns a matcher that matches any expression 'e' such that `innerMatcher`
+// matches 'e' and 'e' is in an Unspecified Lvalue Context.
+static auto isInUnspecifiedLvalueContext(internal::Matcher<Expr> innerMatcher) {
+ // clang-format off
+ return
+ expr(anyOf(
+ implicitCastExpr(
+ hasCastKind(CastKind::CK_LValueToRValue),
+ castSubExpr(innerMatcher)),
+ binaryOperator(
+ hasAnyOperatorName("="),
+ hasLHS(innerMatcher)
+ )
+ ));
+// clang-format on
+}
+
+
+// Returns a matcher that matches any expression `e` such that `InnerMatcher`
+// matches `e` and `e` is in an Unspecified Pointer Context (UPC).
+static internal::Matcher<Stmt>
+isInUnspecifiedPointerContext(internal::Matcher<Stmt> InnerMatcher) {
+ // A UPC can be
+ // 1. an argument of a function call (except the callee has [[unsafe_...]]
+ // attribute), or
+ // 2. the operand of a pointer-to-(integer or bool) cast operation; or
+ // 3. the operand of a comparator operation; or
+ // 4. the operand of a pointer subtraction operation
+ // (i.e., computing the distance between two pointers); or ...
+
+ auto CallArgMatcher =
+ callExpr(forEachArgumentWithParam(InnerMatcher,
+ hasPointerType() /* array also decays to pointer type*/),
+ unless(callee(functionDecl(hasAttr(attr::UnsafeBufferUsage)))));
+
+ auto CastOperandMatcher =
+ castExpr(anyOf(hasCastKind(CastKind::CK_PointerToIntegral),
+ hasCastKind(CastKind::CK_PointerToBoolean)),
+ castSubExpr(allOf(hasPointerType(), InnerMatcher)));
+
+ auto CompOperandMatcher =
+ binaryOperator(hasAnyOperatorName("!=", "==", "<", "<=", ">", ">="),
+ eachOf(hasLHS(allOf(hasPointerType(), InnerMatcher)),
+ hasRHS(allOf(hasPointerType(), InnerMatcher))));
+
+ // A matcher that matches pointer subtractions:
+ auto PtrSubtractionMatcher =
+ binaryOperator(hasOperatorName("-"),
+ // Note that here we need both LHS and RHS to be
+ // pointer. Then the inner matcher can match any of
+ // them:
+ allOf(hasLHS(hasPointerType()),
+ hasRHS(hasPointerType())),
+ eachOf(hasLHS(InnerMatcher),
+ hasRHS(InnerMatcher)));
+
+ return stmt(anyOf(CallArgMatcher, CastOperandMatcher, CompOperandMatcher,
+ PtrSubtractionMatcher));
+ // FIXME: any more cases? (UPC excludes the RHS of an assignment. For now we
+ // don't have to check that.)
+}
+
+// Returns a matcher that matches any expression 'e' such that `innerMatcher`
+// matches 'e' and 'e' is in an unspecified untyped context (i.e the expression
+// 'e' isn't evaluated to an RValue). For example, consider the following code:
+// int *p = new int[4];
+// int *q = new int[4];
+// if ((p = q)) {}
+// p = q;
+// The expression `p = q` in the conditional of the `if` statement
+// `if ((p = q))` is evaluated as an RValue, whereas the expression `p = q;`
+// in the assignment statement is in an untyped context.
+static internal::Matcher<Stmt>
+isInUnspecifiedUntypedContext(internal::Matcher<Stmt> InnerMatcher) {
+ // An unspecified context can be
+ // 1. A compound statement,
+ // 2. The body of an if statement
+ // 3. Body of a loop
+ auto CompStmt = compoundStmt(forEach(InnerMatcher));
+ auto IfStmtThen = ifStmt(hasThen(InnerMatcher));
+ auto IfStmtElse = ifStmt(hasElse(InnerMatcher));
+ // FIXME: Handle loop bodies.
+ return stmt(anyOf(CompStmt, IfStmtThen, IfStmtElse));
+}
+} // namespace clang::ast_matchers
+
+namespace {
+// Because the analysis revolves around variables and their types, we'll need to
+// track uses of variables (aka DeclRefExprs).
+using DeclUseList = SmallVector<const DeclRefExpr *, 1>;
+
+// Convenience typedef.
+using FixItList = SmallVector<FixItHint, 4>;
+
+// Defined below.
+class Strategy;
+} // namespace
+
+namespace {
+/// Gadget is an individual operation in the code that may be of interest to
+/// this analysis. Each (non-abstract) subclass corresponds to a specific
+/// rigid AST structure that constitutes an operation on a pointer-type object.
+/// Discovery of a gadget in the code corresponds to claiming that we understand
+/// what this part of code is doing well enough to potentially improve it.
+/// Gadgets can be warning (immediately deserving a warning) or fixable (not
+/// always deserving a warning per se, but requires our attention to identify
+/// it warrants a fixit).
+class Gadget {
+public:
+ enum class Kind {
+#define GADGET(x) x,
+#include "clang/Analysis/Analyses/UnsafeBufferUsageGadgets.def"
+ };
+
+ /// Common type of ASTMatchers used for discovering gadgets.
+ /// Useful for implementing the static matcher() methods
+ /// that are expected from all non-abstract subclasses.
+ using Matcher = decltype(stmt());
+
+ Gadget(Kind K) : K(K) {}
+
+ Kind getKind() const { return K; }
+
+#ifndef NDEBUG
+ StringRef getDebugName() const {
+ switch (K) {
+#define GADGET(x) case Kind::x: return #x;
+#include "clang/Analysis/Analyses/UnsafeBufferUsageGadgets.def"
+ }
+ llvm_unreachable("Unhandled Gadget::Kind enum");
+ }
+#endif
+
+ virtual bool isWarningGadget() const = 0;
+ virtual const Stmt *getBaseStmt() const = 0;
+
+ /// Returns the list of pointer-type variables on which this gadget performs
+ /// its operation. Typically, there's only one variable. This isn't a list
+ /// of all DeclRefExprs in the gadget's AST!
+ virtual DeclUseList getClaimedVarUseSites() const = 0;
+
+ virtual ~Gadget() = default;
+
+private:
+ Kind K;
+};
+
+
+/// Warning gadgets correspond to unsafe code patterns that warrants
+/// an immediate warning.
+class WarningGadget : public Gadget {
+public:
+ WarningGadget(Kind K) : Gadget(K) {}
+
+ static bool classof(const Gadget *G) { return G->isWarningGadget(); }
+ bool isWarningGadget() const final { return true; }
+};
+
+/// Fixable gadgets correspond to code patterns that aren't always unsafe but need to be
+/// properly recognized in order to emit fixes. For example, if a raw pointer-type
+/// variable is replaced by a safe C++ container, every use of such variable must be
+/// carefully considered and possibly updated.
+class FixableGadget : public Gadget {
+public:
+ FixableGadget(Kind K) : Gadget(K) {}
+
+ static bool classof(const Gadget *G) { return !G->isWarningGadget(); }
+ bool isWarningGadget() const final { return false; }
+
+ /// Returns a fixit that would fix the current gadget according to
+ /// the current strategy. Returns std::nullopt if the fix cannot be produced;
+ /// returns an empty list if no fixes are necessary.
+ virtual std::optional<FixItList> getFixits(const Strategy &) const {
+ return std::nullopt;
+ }
+
+ /// Returns a list of two elements where the first element is the LHS of a pointer assignment
+ /// statement and the second element is the RHS. This two-element list represents the fact that
+ /// the LHS buffer gets its bounds information from the RHS buffer. This information will be used
+ /// later to group all those variables whose types must be modified together to prevent type
+ /// mismatches.
+ virtual std::optional<std::pair<const VarDecl *, const VarDecl *>>
+ getStrategyImplications() const {
+ return std::nullopt;
+ }
+};
+
+static auto toSupportedVariable() {
+ return to(varDecl());
+}
+
+using FixableGadgetList = std::vector<std::unique_ptr<FixableGadget>>;
+using WarningGadgetList = std::vector<std::unique_ptr<WarningGadget>>;
+
+/// An increment of a pointer-type value is unsafe as it may run the pointer
+/// out of bounds.
+class IncrementGadget : public WarningGadget {
+ static constexpr const char *const OpTag = "op";
+ const UnaryOperator *Op;
+
+public:
+ IncrementGadget(const MatchFinder::MatchResult &Result)
+ : WarningGadget(Kind::Increment),
+ Op(Result.Nodes.getNodeAs<UnaryOperator>(OpTag)) {}
+
+ static bool classof(const Gadget *G) {
+ return G->getKind() == Kind::Increment;
+ }
+
+ static Matcher matcher() {
+ return stmt(unaryOperator(
+ hasOperatorName("++"),
+ hasUnaryOperand(ignoringParenImpCasts(hasPointerType()))
+ ).bind(OpTag));
+ }
+
+ const UnaryOperator *getBaseStmt() const override { return Op; }
+
+ DeclUseList getClaimedVarUseSites() const override {
+ SmallVector<const DeclRefExpr *, 2> Uses;
+ if (const auto *DRE =
+ dyn_cast<DeclRefExpr>(Op->getSubExpr()->IgnoreParenImpCasts())) {
+ Uses.push_back(DRE);
+ }
+
+ return std::move(Uses);
+ }
+};
+
+/// A decrement of a pointer-type value is unsafe as it may run the pointer
+/// out of bounds.
+class DecrementGadget : public WarningGadget {
+ static constexpr const char *const OpTag = "op";
+ const UnaryOperator *Op;
+
+public:
+ DecrementGadget(const MatchFinder::MatchResult &Result)
+ : WarningGadget(Kind::Decrement),
+ Op(Result.Nodes.getNodeAs<UnaryOperator>(OpTag)) {}
+
+ static bool classof(const Gadget *G) {
+ return G->getKind() == Kind::Decrement;
+ }
+
+ static Matcher matcher() {
+ return stmt(unaryOperator(
+ hasOperatorName("--"),
+ hasUnaryOperand(ignoringParenImpCasts(hasPointerType()))
+ ).bind(OpTag));
+ }
+
+ const UnaryOperator *getBaseStmt() const override { return Op; }
+
+ DeclUseList getClaimedVarUseSites() const override {
+ if (const auto *DRE =
+ dyn_cast<DeclRefExpr>(Op->getSubExpr()->IgnoreParenImpCasts())) {
+ return {DRE};
+ }
+
+ return {};
+ }
+};
+
+/// Array subscript expressions on raw pointers as if they're arrays. Unsafe as
+/// it doesn't have any bounds checks for the array.
+class ArraySubscriptGadget : public WarningGadget {
+ static constexpr const char *const ArraySubscrTag = "ArraySubscript";
+ const ArraySubscriptExpr *ASE;
+
+public:
+ ArraySubscriptGadget(const MatchFinder::MatchResult &Result)
+ : WarningGadget(Kind::ArraySubscript),
+ ASE(Result.Nodes.getNodeAs<ArraySubscriptExpr>(ArraySubscrTag)) {}
+
+ static bool classof(const Gadget *G) {
+ return G->getKind() == Kind::ArraySubscript;
+ }
+
+ static Matcher matcher() {
+ // FIXME: What if the index is integer literal 0? Should this be
+ // a safe gadget in this case?
+ // clang-format off
+ return stmt(arraySubscriptExpr(
+ hasBase(ignoringParenImpCasts(
+ anyOf(hasPointerType(), hasArrayType()))),
+ unless(hasIndex(
+ anyOf(integerLiteral(equals(0)), arrayInitIndexExpr())
+ )))
+ .bind(ArraySubscrTag));
+ // clang-format on
+ }
+
+ const ArraySubscriptExpr *getBaseStmt() const override { return ASE; }
+
+ DeclUseList getClaimedVarUseSites() const override {
+ if (const auto *DRE =
+ dyn_cast<DeclRefExpr>(ASE->getBase()->IgnoreParenImpCasts())) {
+ return {DRE};
+ }
+
+ return {};
+ }
+};
+
+/// A pointer arithmetic expression of one of the forms:
+/// \code
+/// ptr + n | n + ptr | ptr - n | ptr += n | ptr -= n
+/// \endcode
+class PointerArithmeticGadget : public WarningGadget {
+ static constexpr const char *const PointerArithmeticTag = "ptrAdd";
+ static constexpr const char *const PointerArithmeticPointerTag = "ptrAddPtr";
+ const BinaryOperator *PA; // pointer arithmetic expression
+ const Expr *Ptr; // the pointer expression in `PA`
+
+public:
+ PointerArithmeticGadget(const MatchFinder::MatchResult &Result)
+ : WarningGadget(Kind::PointerArithmetic),
+ PA(Result.Nodes.getNodeAs<BinaryOperator>(PointerArithmeticTag)),
+ Ptr(Result.Nodes.getNodeAs<Expr>(PointerArithmeticPointerTag)) {}
+
+ static bool classof(const Gadget *G) {
+ return G->getKind() == Kind::PointerArithmetic;
+ }
+
+ static Matcher matcher() {
+ auto HasIntegerType = anyOf(hasType(isInteger()), hasType(enumType()));
+ auto PtrAtRight =
+ allOf(hasOperatorName("+"),
+ hasRHS(expr(hasPointerType()).bind(PointerArithmeticPointerTag)),
+ hasLHS(HasIntegerType));
+ auto PtrAtLeft =
+ allOf(anyOf(hasOperatorName("+"), hasOperatorName("-"),
+ hasOperatorName("+="), hasOperatorName("-=")),
+ hasLHS(expr(hasPointerType()).bind(PointerArithmeticPointerTag)),
+ hasRHS(HasIntegerType));
+
+ return stmt(binaryOperator(anyOf(PtrAtLeft, PtrAtRight))
+ .bind(PointerArithmeticTag));
+ }
+
+ const Stmt *getBaseStmt() const override { return PA; }
+
+ DeclUseList getClaimedVarUseSites() const override {
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(Ptr->IgnoreParenImpCasts())) {
+ return {DRE};
+ }
+
+ return {};
+ }
+ // FIXME: pointer adding zero should be fine
+ // FIXME: this gadge will need a fix-it
+};
+
+/// A pointer initialization expression of the form:
+/// \code
+/// int *p = q;
+/// \endcode
+class PointerInitGadget : public FixableGadget {
+private:
+ static constexpr const char *const PointerInitLHSTag = "ptrInitLHS";
+ static constexpr const char *const PointerInitRHSTag = "ptrInitRHS";
+ const VarDecl * PtrInitLHS; // the LHS pointer expression in `PI`
+ const DeclRefExpr * PtrInitRHS; // the RHS pointer expression in `PI`
+
+public:
+ PointerInitGadget(const MatchFinder::MatchResult &Result)
+ : FixableGadget(Kind::PointerInit),
+ PtrInitLHS(Result.Nodes.getNodeAs<VarDecl>(PointerInitLHSTag)),
+ PtrInitRHS(Result.Nodes.getNodeAs<DeclRefExpr>(PointerInitRHSTag)) {}
+
+ static bool classof(const Gadget *G) {
+ return G->getKind() == Kind::PointerInit;
+ }
+
+ static Matcher matcher() {
+ auto PtrInitStmt = declStmt(hasSingleDecl(varDecl(
+ hasInitializer(ignoringImpCasts(declRefExpr(
+ hasPointerType(),
+ toSupportedVariable()).
+ bind(PointerInitRHSTag)))).
+ bind(PointerInitLHSTag)));
+
+ return stmt(PtrInitStmt);
+ }
+
+ virtual std::optional<FixItList> getFixits(const Strategy &S) const override;
+
+ virtual const Stmt *getBaseStmt() const override {
+ // FIXME: This needs to be the entire DeclStmt, assuming that this method
+ // makes sense at all on a FixableGadget.
+ return PtrInitRHS;
+ }
+
+ virtual DeclUseList getClaimedVarUseSites() const override {
+ return DeclUseList{PtrInitRHS};
+ }
+
+ virtual std::optional<std::pair<const VarDecl *, const VarDecl *>>
+ getStrategyImplications() const override {
+ return std::make_pair(PtrInitLHS,
+ cast<VarDecl>(PtrInitRHS->getDecl()));
+ }
+};
+
+/// A pointer assignment expression of the form:
+/// \code
+/// p = q;
+/// \endcode
+class PointerAssignmentGadget : public FixableGadget {
+private:
+ static constexpr const char *const PointerAssignLHSTag = "ptrLHS";
+ static constexpr const char *const PointerAssignRHSTag = "ptrRHS";
+ const DeclRefExpr * PtrLHS; // the LHS pointer expression in `PA`
+ const DeclRefExpr * PtrRHS; // the RHS pointer expression in `PA`
+
+public:
+ PointerAssignmentGadget(const MatchFinder::MatchResult &Result)
+ : FixableGadget(Kind::PointerAssignment),
+ PtrLHS(Result.Nodes.getNodeAs<DeclRefExpr>(PointerAssignLHSTag)),
+ PtrRHS(Result.Nodes.getNodeAs<DeclRefExpr>(PointerAssignRHSTag)) {}
+
+ static bool classof(const Gadget *G) {
+ return G->getKind() == Kind::PointerAssignment;
+ }
+
+ static Matcher matcher() {
+ auto PtrAssignExpr = binaryOperator(allOf(hasOperatorName("="),
+ hasRHS(ignoringParenImpCasts(declRefExpr(hasPointerType(),
+ toSupportedVariable()).
+ bind(PointerAssignRHSTag))),
+ hasLHS(declRefExpr(hasPointerType(),
+ toSupportedVariable()).
+ bind(PointerAssignLHSTag))));
+
+ return stmt(isInUnspecifiedUntypedContext(PtrAssignExpr));
+ }
+
+ virtual std::optional<FixItList> getFixits(const Strategy &S) const override;
+
+ virtual const Stmt *getBaseStmt() const override {
+ // FIXME: This should be the binary operator, assuming that this method
+ // makes sense at all on a FixableGadget.
+ return PtrLHS;
+ }
+
+ virtual DeclUseList getClaimedVarUseSites() const override {
+ return DeclUseList{PtrLHS, PtrRHS};
+ }
+
+ virtual std::optional<std::pair<const VarDecl *, const VarDecl *>>
+ getStrategyImplications() const override {
+ return std::make_pair(cast<VarDecl>(PtrLHS->getDecl()),
+ cast<VarDecl>(PtrRHS->getDecl()));
+ }
+};
+
+/// A call of a function or method that performs unchecked buffer operations
+/// over one of its pointer parameters.
+class UnsafeBufferUsageAttrGadget : public WarningGadget {
+ constexpr static const char *const OpTag = "call_expr";
+ const CallExpr *Op;
+
+public:
+ UnsafeBufferUsageAttrGadget(const MatchFinder::MatchResult &Result)
+ : WarningGadget(Kind::UnsafeBufferUsageAttr),
+ Op(Result.Nodes.getNodeAs<CallExpr>(OpTag)) {}
+
+ static bool classof(const Gadget *G) {
+ return G->getKind() == Kind::UnsafeBufferUsageAttr;
+ }
+
+ static Matcher matcher() {
+ return stmt(callExpr(callee(functionDecl(hasAttr(attr::UnsafeBufferUsage))))
+ .bind(OpTag));
+ }
+ const Stmt *getBaseStmt() const override { return Op; }
+
+ DeclUseList getClaimedVarUseSites() const override { return {}; }
+};
+
+// Warning gadget for unsafe invocation of span::data method.
+// Triggers when the pointer returned by the invocation is immediately
+// cast to a larger type.
+
+class DataInvocationGadget : public WarningGadget {
+ constexpr static const char *const OpTag = "data_invocation_expr";
+ const ExplicitCastExpr *Op;
+
+public:
+ DataInvocationGadget(const MatchFinder::MatchResult &Result)
+ : WarningGadget(Kind::DataInvocation),
+ Op(Result.Nodes.getNodeAs<ExplicitCastExpr>(OpTag)) {}
+
+ static bool classof(const Gadget *G) {
+ return G->getKind() == Kind::DataInvocation;
+ }
+
+ static Matcher matcher() {
+ Matcher callExpr = cxxMemberCallExpr(
+ callee(cxxMethodDecl(hasName("data"), ofClass(hasName("std::span")))));
+ return stmt(
+ explicitCastExpr(anyOf(has(callExpr), has(parenExpr(has(callExpr)))))
+ .bind(OpTag));
+ }
+ const Stmt *getBaseStmt() const override { return Op; }
+
+ DeclUseList getClaimedVarUseSites() const override { return {}; }
+};
+
+// Represents expressions of the form `DRE[*]` in the Unspecified Lvalue
+// Context (see `isInUnspecifiedLvalueContext`).
+// Note here `[]` is the built-in subscript operator.
+class ULCArraySubscriptGadget : public FixableGadget {
+private:
+ static constexpr const char *const ULCArraySubscriptTag =
+ "ArraySubscriptUnderULC";
+ const ArraySubscriptExpr *Node;
+
+public:
+ ULCArraySubscriptGadget(const MatchFinder::MatchResult &Result)
+ : FixableGadget(Kind::ULCArraySubscript),
+ Node(Result.Nodes.getNodeAs<ArraySubscriptExpr>(ULCArraySubscriptTag)) {
+ assert(Node != nullptr && "Expecting a non-null matching result");
+ }
+
+ static bool classof(const Gadget *G) {
+ return G->getKind() == Kind::ULCArraySubscript;
+ }
+
+ static Matcher matcher() {
+ auto ArrayOrPtr = anyOf(hasPointerType(), hasArrayType());
+ auto BaseIsArrayOrPtrDRE =
+ hasBase(ignoringParenImpCasts(declRefExpr(ArrayOrPtr,
+ toSupportedVariable())));
+ auto Target =
+ arraySubscriptExpr(BaseIsArrayOrPtrDRE).bind(ULCArraySubscriptTag);
+
+ return expr(isInUnspecifiedLvalueContext(Target));
+ }
+
+ virtual std::optional<FixItList> getFixits(const Strategy &S) const override;
+
+ virtual const Stmt *getBaseStmt() const override { return Node; }
+
+ virtual DeclUseList getClaimedVarUseSites() const override {
+ if (const auto *DRE =
+ dyn_cast<DeclRefExpr>(Node->getBase()->IgnoreImpCasts())) {
+ return {DRE};
+ }
+ return {};
+ }
+};
+
+// Fixable gadget to handle stand alone pointers of the form `UPC(DRE)` in the
+// unspecified pointer context (isInUnspecifiedPointerContext). The gadget emits
+// fixit of the form `UPC(DRE.data())`.
+class UPCStandalonePointerGadget : public FixableGadget {
+private:
+ static constexpr const char *const DeclRefExprTag = "StandalonePointer";
+ const DeclRefExpr *Node;
+
+public:
+ UPCStandalonePointerGadget(const MatchFinder::MatchResult &Result)
+ : FixableGadget(Kind::UPCStandalonePointer),
+ Node(Result.Nodes.getNodeAs<DeclRefExpr>(DeclRefExprTag)) {
+ assert(Node != nullptr && "Expecting a non-null matching result");
+ }
+
+ static bool classof(const Gadget *G) {
+ return G->getKind() == Kind::UPCStandalonePointer;
+ }
+
+ static Matcher matcher() {
+ auto ArrayOrPtr = anyOf(hasPointerType(), hasArrayType());
+ auto target = expr(
+ ignoringParenImpCasts(declRefExpr(allOf(ArrayOrPtr,
+ toSupportedVariable())).bind(DeclRefExprTag)));
+ return stmt(isInUnspecifiedPointerContext(target));
+ }
+
+ virtual std::optional<FixItList> getFixits(const Strategy &S) const override;
+
+ virtual const Stmt *getBaseStmt() const override { return Node; }
+
+ virtual DeclUseList getClaimedVarUseSites() const override {
+ return {Node};
+ }
+};
+
+class PointerDereferenceGadget : public FixableGadget {
+ static constexpr const char *const BaseDeclRefExprTag = "BaseDRE";
+ static constexpr const char *const OperatorTag = "op";
+
+ const DeclRefExpr *BaseDeclRefExpr = nullptr;
+ const UnaryOperator *Op = nullptr;
+
+public:
+ PointerDereferenceGadget(const MatchFinder::MatchResult &Result)
+ : FixableGadget(Kind::PointerDereference),
+ BaseDeclRefExpr(
+ Result.Nodes.getNodeAs<DeclRefExpr>(BaseDeclRefExprTag)),
+ Op(Result.Nodes.getNodeAs<UnaryOperator>(OperatorTag)) {}
+
+ static bool classof(const Gadget *G) {
+ return G->getKind() == Kind::PointerDereference;
+ }
+
+ static Matcher matcher() {
+ auto Target =
+ unaryOperator(
+ hasOperatorName("*"),
+ has(expr(ignoringParenImpCasts(
+ declRefExpr(toSupportedVariable()).bind(BaseDeclRefExprTag)))))
+ .bind(OperatorTag);
+
+ return expr(isInUnspecifiedLvalueContext(Target));
+ }
+
+ DeclUseList getClaimedVarUseSites() const override {
+ return {BaseDeclRefExpr};
+ }
+
+ virtual const Stmt *getBaseStmt() const final { return Op; }
+
+ virtual std::optional<FixItList> getFixits(const Strategy &S) const override;
+};
+
+// Represents expressions of the form `&DRE[any]` in the Unspecified Pointer
+// Context (see `isInUnspecifiedPointerContext`).
+// Note here `[]` is the built-in subscript operator.
+class UPCAddressofArraySubscriptGadget : public FixableGadget {
+private:
+ static constexpr const char *const UPCAddressofArraySubscriptTag =
+ "AddressofArraySubscriptUnderUPC";
+ const UnaryOperator *Node; // the `&DRE[any]` node
+
+public:
+ UPCAddressofArraySubscriptGadget(const MatchFinder::MatchResult &Result)
+ : FixableGadget(Kind::ULCArraySubscript),
+ Node(Result.Nodes.getNodeAs<UnaryOperator>(
+ UPCAddressofArraySubscriptTag)) {
+ assert(Node != nullptr && "Expecting a non-null matching result");
+ }
+
+ static bool classof(const Gadget *G) {
+ return G->getKind() == Kind::UPCAddressofArraySubscript;
+ }
+
+ static Matcher matcher() {
+ return expr(isInUnspecifiedPointerContext(expr(ignoringImpCasts(
+ unaryOperator(hasOperatorName("&"),
+ hasUnaryOperand(arraySubscriptExpr(
+ hasBase(ignoringParenImpCasts(declRefExpr(
+ toSupportedVariable()))))))
+ .bind(UPCAddressofArraySubscriptTag)))));
+ }
+
+ virtual std::optional<FixItList> getFixits(const Strategy &) const override;
+
+ virtual const Stmt *getBaseStmt() const override { return Node; }
+
+ virtual DeclUseList getClaimedVarUseSites() const override {
+ const auto *ArraySubst = cast<ArraySubscriptExpr>(Node->getSubExpr());
+ const auto *DRE =
+ cast<DeclRefExpr>(ArraySubst->getBase()->IgnoreImpCasts());
+ return {DRE};
+ }
+};
+} // namespace
+
+namespace {
+// An auxiliary tracking facility for the fixit analysis. It helps connect
+// declarations to its uses and make sure we've covered all uses with our
+// analysis before we try to fix the declaration.
+class DeclUseTracker {
+ using UseSetTy = SmallSet<const DeclRefExpr *, 16>;
+ using DefMapTy = DenseMap<const VarDecl *, const DeclStmt *>;
+
+ // Allocate on the heap for easier move.
+ std::unique_ptr<UseSetTy> Uses{std::make_unique<UseSetTy>()};
+ DefMapTy Defs{};
+
+public:
+ DeclUseTracker() = default;
+ DeclUseTracker(const DeclUseTracker &) = delete; // Let's avoid copies.
+ DeclUseTracker &operator=(const DeclUseTracker &) = delete;
+ DeclUseTracker(DeclUseTracker &&) = default;
+ DeclUseTracker &operator=(DeclUseTracker &&) = default;
+
+ // Start tracking a freshly discovered DRE.
+ void discoverUse(const DeclRefExpr *DRE) { Uses->insert(DRE); }
+
+ // Stop tracking the DRE as it's been fully figured out.
+ void claimUse(const DeclRefExpr *DRE) {
+ assert(Uses->count(DRE) &&
+ "DRE not found or claimed by multiple matchers!");
+ Uses->erase(DRE);
+ }
+
+ // A variable is unclaimed if at least one use is unclaimed.
+ bool hasUnclaimedUses(const VarDecl *VD) const {
+ // FIXME: Can this be less linear? Maybe maintain a map from VDs to DREs?
+ return any_of(*Uses, [VD](const DeclRefExpr *DRE) {
+ return DRE->getDecl()->getCanonicalDecl() == VD->getCanonicalDecl();
+ });
+ }
+
+ UseSetTy getUnclaimedUses(const VarDecl *VD) const {
+ UseSetTy ReturnSet;
+ for (auto use : *Uses) {
+ if (use->getDecl()->getCanonicalDecl() == VD->getCanonicalDecl()) {
+ ReturnSet.insert(use);
+ }
+ }
+ return ReturnSet;
+ }
+
+ void discoverDecl(const DeclStmt *DS) {
+ for (const Decl *D : DS->decls()) {
+ if (const auto *VD = dyn_cast<VarDecl>(D)) {
+ // FIXME: Assertion temporarily disabled due to a bug in
+ // ASTMatcher internal behavior in presence of GNU
+ // statement-expressions. We need to properly investigate this
+ // because it can screw up our algorithm in other ways.
+ // assert(Defs.count(VD) == 0 && "Definition already discovered!");
+ Defs[VD] = DS;
+ }
+ }
+ }
+
+ const DeclStmt *lookupDecl(const VarDecl *VD) const {
+ return Defs.lookup(VD);
+ }
+};
+} // namespace
+
+namespace {
+// Strategy is a map from variables to the way we plan to emit fixes for
+// these variables. It is figured out gradually by trying different fixes
+// for different variables depending on gadgets in which these variables
+// participate.
+class Strategy {
+public:
+ enum class Kind {
+ Wontfix, // We don't plan to emit a fixit for this variable.
+ Span, // We recommend replacing the variable with std::span.
+ Iterator, // We recommend replacing the variable with std::span::iterator.
+ Array, // We recommend replacing the variable with std::array.
+ Vector // We recommend replacing the variable with std::vector.
+ };
+
+private:
+ using MapTy = llvm::DenseMap<const VarDecl *, Kind>;
+
+ MapTy Map;
+
+public:
+ Strategy() = default;
+ Strategy(const Strategy &) = delete; // Let's avoid copies.
+ Strategy &operator=(const Strategy &) = delete;
+ Strategy(Strategy &&) = default;
+ Strategy &operator=(Strategy &&) = default;
+
+ void set(const VarDecl *VD, Kind K) { Map[VD] = K; }
+
+ Kind lookup(const VarDecl *VD) const {
+ auto I = Map.find(VD);
+ if (I == Map.end())
+ return Kind::Wontfix;
+
+ return I->second;
+ }
+};
+} // namespace
+
+
+// Representing a pointer type expression of the form `++Ptr` in an Unspecified
+// Pointer Context (UPC):
+class UPCPreIncrementGadget : public FixableGadget {
+private:
+ static constexpr const char *const UPCPreIncrementTag =
+ "PointerPreIncrementUnderUPC";
+ const UnaryOperator *Node; // the `++Ptr` node
+
+public:
+ UPCPreIncrementGadget(const MatchFinder::MatchResult &Result)
+ : FixableGadget(Kind::UPCPreIncrement),
+ Node(Result.Nodes.getNodeAs<UnaryOperator>(UPCPreIncrementTag)) {
+ assert(Node != nullptr && "Expecting a non-null matching result");
+ }
+
+ static bool classof(const Gadget *G) {
+ return G->getKind() == Kind::UPCPreIncrement;
+ }
+
+ static Matcher matcher() {
+ // Note here we match `++Ptr` for any expression `Ptr` of pointer type.
+ // Although currently we can only provide fix-its when `Ptr` is a DRE, we
+ // can have the matcher be general, so long as `getClaimedVarUseSites` does
+ // things right.
+ return stmt(isInUnspecifiedPointerContext(expr(ignoringImpCasts(
+ unaryOperator(isPreInc(),
+ hasUnaryOperand(declRefExpr(
+ toSupportedVariable()))
+ ).bind(UPCPreIncrementTag)))));
+ }
+
+ virtual std::optional<FixItList> getFixits(const Strategy &S) const override;
+
+ virtual const Stmt *getBaseStmt() const override { return Node; }
+
+ virtual DeclUseList getClaimedVarUseSites() const override {
+ return {dyn_cast<DeclRefExpr>(Node->getSubExpr())};
+ }
+};
+
+// Representing a pointer type expression of the form `Ptr += n` in an
+// Unspecified Untyped Context (UUC):
+class UUCAddAssignGadget : public FixableGadget {
+private:
+ static constexpr const char *const UUCAddAssignTag =
+ "PointerAddAssignUnderUUC";
+ static constexpr const char *const OffsetTag = "Offset";
+
+ const BinaryOperator *Node; // the `Ptr += n` node
+ const Expr *Offset = nullptr;
+
+public:
+ UUCAddAssignGadget(const MatchFinder::MatchResult &Result)
+ : FixableGadget(Kind::UUCAddAssign),
+ Node(Result.Nodes.getNodeAs<BinaryOperator>(UUCAddAssignTag)),
+ Offset(Result.Nodes.getNodeAs<Expr>(OffsetTag)) {
+ assert(Node != nullptr && "Expecting a non-null matching result");
+ }
+
+ static bool classof(const Gadget *G) {
+ return G->getKind() == Kind::UUCAddAssign;
+ }
+
+ static Matcher matcher() {
+ return stmt(isInUnspecifiedUntypedContext(expr(ignoringImpCasts(
+ binaryOperator(hasOperatorName("+="),
+ hasLHS(declRefExpr(toSupportedVariable())),
+ hasRHS(expr().bind(OffsetTag)))
+ .bind(UUCAddAssignTag)))));
+ }
+
+ virtual std::optional<FixItList> getFixits(const Strategy &S) const override;
+
+ virtual const Stmt *getBaseStmt() const override { return Node; }
+
+ virtual DeclUseList getClaimedVarUseSites() const override {
+ return {dyn_cast<DeclRefExpr>(Node->getLHS())};
+ }
+};
+
+// Representing a fixable expression of the form `*(ptr + 123)` or `*(123 +
+// ptr)`:
+class DerefSimplePtrArithFixableGadget : public FixableGadget {
+ static constexpr const char *const BaseDeclRefExprTag = "BaseDRE";
+ static constexpr const char *const DerefOpTag = "DerefOp";
+ static constexpr const char *const AddOpTag = "AddOp";
+ static constexpr const char *const OffsetTag = "Offset";
+
+ const DeclRefExpr *BaseDeclRefExpr = nullptr;
+ const UnaryOperator *DerefOp = nullptr;
+ const BinaryOperator *AddOp = nullptr;
+ const IntegerLiteral *Offset = nullptr;
+
+public:
+ DerefSimplePtrArithFixableGadget(const MatchFinder::MatchResult &Result)
+ : FixableGadget(Kind::DerefSimplePtrArithFixable),
+ BaseDeclRefExpr(
+ Result.Nodes.getNodeAs<DeclRefExpr>(BaseDeclRefExprTag)),
+ DerefOp(Result.Nodes.getNodeAs<UnaryOperator>(DerefOpTag)),
+ AddOp(Result.Nodes.getNodeAs<BinaryOperator>(AddOpTag)),
+ Offset(Result.Nodes.getNodeAs<IntegerLiteral>(OffsetTag)) {}
+
+ static Matcher matcher() {
+ // clang-format off
+ auto ThePtr = expr(hasPointerType(),
+ ignoringImpCasts(declRefExpr(toSupportedVariable()).
+ bind(BaseDeclRefExprTag)));
+ auto PlusOverPtrAndInteger = expr(anyOf(
+ binaryOperator(hasOperatorName("+"), hasLHS(ThePtr),
+ hasRHS(integerLiteral().bind(OffsetTag)))
+ .bind(AddOpTag),
+ binaryOperator(hasOperatorName("+"), hasRHS(ThePtr),
+ hasLHS(integerLiteral().bind(OffsetTag)))
+ .bind(AddOpTag)));
+ return isInUnspecifiedLvalueContext(unaryOperator(
+ hasOperatorName("*"),
+ hasUnaryOperand(ignoringParens(PlusOverPtrAndInteger)))
+ .bind(DerefOpTag));
+ // clang-format on
+ }
+
+ virtual std::optional<FixItList> getFixits(const Strategy &s) const final;
+
+ // TODO remove this method from FixableGadget interface
+ virtual const Stmt *getBaseStmt() const final { return nullptr; }
+
+ virtual DeclUseList getClaimedVarUseSites() const final {
+ return {BaseDeclRefExpr};
+ }
+};
+
+/// Scan the function and return a list of gadgets found with provided kits.
+static std::tuple<FixableGadgetList, WarningGadgetList, DeclUseTracker>
+findGadgets(const Decl *D, const UnsafeBufferUsageHandler &Handler,
+ bool EmitSuggestions) {
+
+ struct GadgetFinderCallback : MatchFinder::MatchCallback {
+ FixableGadgetList FixableGadgets;
+ WarningGadgetList WarningGadgets;
+ DeclUseTracker Tracker;
+
+ void run(const MatchFinder::MatchResult &Result) override {
+ // In debug mode, assert that we've found exactly one gadget.
+ // This helps us avoid conflicts in .bind() tags.
+#if NDEBUG
+#define NEXT return
+#else
+ [[maybe_unused]] int numFound = 0;
+#define NEXT ++numFound
+#endif
+
+ if (const auto *DRE = Result.Nodes.getNodeAs<DeclRefExpr>("any_dre")) {
+ Tracker.discoverUse(DRE);
+ NEXT;
+ }
+
+ if (const auto *DS = Result.Nodes.getNodeAs<DeclStmt>("any_ds")) {
+ Tracker.discoverDecl(DS);
+ NEXT;
+ }
+
+ // Figure out which matcher we've found, and call the appropriate
+ // subclass constructor.
+ // FIXME: Can we do this more logarithmically?
+#define FIXABLE_GADGET(name) \
+ if (Result.Nodes.getNodeAs<Stmt>(#name)) { \
+ FixableGadgets.push_back(std::make_unique<name##Gadget>(Result)); \
+ NEXT; \
+ }
+#include "clang/Analysis/Analyses/UnsafeBufferUsageGadgets.def"
+#define WARNING_GADGET(name) \
+ if (Result.Nodes.getNodeAs<Stmt>(#name)) { \
+ WarningGadgets.push_back(std::make_unique<name##Gadget>(Result)); \
+ NEXT; \
+ }
+#include "clang/Analysis/Analyses/UnsafeBufferUsageGadgets.def"
+
+ assert(numFound >= 1 && "Gadgets not found in match result!");
+ assert(numFound <= 1 && "Conflicting bind tags in gadgets!");
+ }
+ };
+
+ MatchFinder M;
+ GadgetFinderCallback CB;
+
+ // clang-format off
+ M.addMatcher(
+ stmt(
+ forEachDescendantEvaluatedStmt(stmt(anyOf(
+ // Add Gadget::matcher() for every gadget in the registry.
+#define WARNING_GADGET(x) \
+ allOf(x ## Gadget::matcher().bind(#x), \
+ notInSafeBufferOptOut(&Handler)),
+#include "clang/Analysis/Analyses/UnsafeBufferUsageGadgets.def"
+ // Avoid a hanging comma.
+ unless(stmt())
+ )))
+ ),
+ &CB
+ );
+ // clang-format on
+
+ if (EmitSuggestions) {
+ // clang-format off
+ M.addMatcher(
+ stmt(
+ forEachDescendantStmt(stmt(eachOf(
+#define FIXABLE_GADGET(x) \
+ x ## Gadget::matcher().bind(#x),
+#include "clang/Analysis/Analyses/UnsafeBufferUsageGadgets.def"
+ // In parallel, match all DeclRefExprs so that to find out
+ // whether there are any uncovered by gadgets.
+ declRefExpr(anyOf(hasPointerType(), hasArrayType()),
+ to(anyOf(varDecl(), bindingDecl()))).bind("any_dre"),
+ // Also match DeclStmts because we'll need them when fixing
+ // their underlying VarDecls that otherwise don't have
+ // any backreferences to DeclStmts.
+ declStmt().bind("any_ds")
+ )))
+ ),
+ &CB
+ );
+ // clang-format on
+ }
+
+ M.match(*D->getBody(), D->getASTContext());
+ return {std::move(CB.FixableGadgets), std::move(CB.WarningGadgets),
+ std::move(CB.Tracker)};
+}
+
+// Compares AST nodes by source locations.
+template <typename NodeTy> struct CompareNode {
+ bool operator()(const NodeTy *N1, const NodeTy *N2) const {
+ return N1->getBeginLoc().getRawEncoding() <
+ N2->getBeginLoc().getRawEncoding();
+ }
+};
+
+struct WarningGadgetSets {
+ std::map<const VarDecl *, std::set<const WarningGadget *>,
+ // To keep keys sorted by their locations in the map so that the
+ // order is deterministic:
+ CompareNode<VarDecl>>
+ byVar;
+ // These Gadgets are not related to pointer variables (e. g. temporaries).
+ llvm::SmallVector<const WarningGadget *, 16> noVar;
+};
+
+static WarningGadgetSets
+groupWarningGadgetsByVar(const WarningGadgetList &AllUnsafeOperations) {
+ WarningGadgetSets result;
+ // If some gadgets cover more than one
+ // variable, they'll appear more than once in the map.
+ for (auto &G : AllUnsafeOperations) {
+ DeclUseList ClaimedVarUseSites = G->getClaimedVarUseSites();
+
+ bool AssociatedWithVarDecl = false;
+ for (const DeclRefExpr *DRE : ClaimedVarUseSites) {
+ if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
+ result.byVar[VD].insert(G.get());
+ AssociatedWithVarDecl = true;
+ }
+ }
+
+ if (!AssociatedWithVarDecl) {
+ result.noVar.push_back(G.get());
+ continue;
+ }
+ }
+ return result;
+}
+
+struct FixableGadgetSets {
+ std::map<const VarDecl *, std::set<const FixableGadget *>,
+ // To keep keys sorted by their locations in the map so that the
+ // order is deterministic:
+ CompareNode<VarDecl>>
+ byVar;
+};
+
+static FixableGadgetSets
+groupFixablesByVar(FixableGadgetList &&AllFixableOperations) {
+ FixableGadgetSets FixablesForUnsafeVars;
+ for (auto &F : AllFixableOperations) {
+ DeclUseList DREs = F->getClaimedVarUseSites();
+
+ for (const DeclRefExpr *DRE : DREs) {
+ if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
+ FixablesForUnsafeVars.byVar[VD].insert(F.get());
+ }
+ }
+ }
+ return FixablesForUnsafeVars;
+}
+
+bool clang::internal::anyConflict(const SmallVectorImpl<FixItHint> &FixIts,
+ const SourceManager &SM) {
+ // A simple interval overlap detection algorithm. Sorts all ranges by their
+ // begin location then finds the first overlap in one pass.
+ std::vector<const FixItHint *> All; // a copy of `FixIts`
+
+ for (const FixItHint &H : FixIts)
+ All.push_back(&H);
+ std::sort(All.begin(), All.end(),
+ [&SM](const FixItHint *H1, const FixItHint *H2) {
+ return SM.isBeforeInTranslationUnit(H1->RemoveRange.getBegin(),
+ H2->RemoveRange.getBegin());
+ });
+
+ const FixItHint *CurrHint = nullptr;
+
+ for (const FixItHint *Hint : All) {
+ if (!CurrHint ||
+ SM.isBeforeInTranslationUnit(CurrHint->RemoveRange.getEnd(),
+ Hint->RemoveRange.getBegin())) {
+ // Either to initialize `CurrHint` or `CurrHint` does not
+ // overlap with `Hint`:
+ CurrHint = Hint;
+ } else
+ // In case `Hint` overlaps the `CurrHint`, we found at least one
+ // conflict:
+ return true;
+ }
+ return false;
+}
+
+std::optional<FixItList>
+PointerAssignmentGadget::getFixits(const Strategy &S) const {
+ const auto *LeftVD = cast<VarDecl>(PtrLHS->getDecl());
+ const auto *RightVD = cast<VarDecl>(PtrRHS->getDecl());
+ switch (S.lookup(LeftVD)) {
+ case Strategy::Kind::Span:
+ if (S.lookup(RightVD) == Strategy::Kind::Span)
+ return FixItList{};
+ return std::nullopt;
+ case Strategy::Kind::Wontfix:
+ return std::nullopt;
+ case Strategy::Kind::Iterator:
+ case Strategy::Kind::Array:
+ case Strategy::Kind::Vector:
+ llvm_unreachable("unsupported strategies for FixableGadgets");
+ }
+ return std::nullopt;
+}
+
+std::optional<FixItList>
+PointerInitGadget::getFixits(const Strategy &S) const {
+ const auto *LeftVD = PtrInitLHS;
+ const auto *RightVD = cast<VarDecl>(PtrInitRHS->getDecl());
+ switch (S.lookup(LeftVD)) {
+ case Strategy::Kind::Span:
+ if (S.lookup(RightVD) == Strategy::Kind::Span)
+ return FixItList{};
+ return std::nullopt;
+ case Strategy::Kind::Wontfix:
+ return std::nullopt;
+ case Strategy::Kind::Iterator:
+ case Strategy::Kind::Array:
+ case Strategy::Kind::Vector:
+ llvm_unreachable("unsupported strategies for FixableGadgets");
+ }
+ return std::nullopt;
+}
+
+static bool isNonNegativeIntegerExpr(const Expr *Expr, const VarDecl *VD,
+ const ASTContext &Ctx) {
+ if (auto ConstVal = Expr->getIntegerConstantExpr(Ctx)) {
+ if (ConstVal->isNegative())
+ return false;
+ } else if (!Expr->getType()->isUnsignedIntegerType())
+ return false;
+ return true;
+}
+
+std::optional<FixItList>
+ULCArraySubscriptGadget::getFixits(const Strategy &S) const {
+ if (const auto *DRE =
+ dyn_cast<DeclRefExpr>(Node->getBase()->IgnoreImpCasts()))
+ if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
+ switch (S.lookup(VD)) {
+ case Strategy::Kind::Span: {
+
+ // If the index has a negative constant value, we give up as no valid
+ // fix-it can be generated:
+ const ASTContext &Ctx = // FIXME: we need ASTContext to be passed in!
+ VD->getASTContext();
+ if (!isNonNegativeIntegerExpr(Node->getIdx(), VD, Ctx))
+ return std::nullopt;
+ // no-op is a good fix-it, otherwise
+ return FixItList{};
+ }
+ case Strategy::Kind::Wontfix:
+ case Strategy::Kind::Iterator:
+ case Strategy::Kind::Array:
+ case Strategy::Kind::Vector:
+ llvm_unreachable("unsupported strategies for FixableGadgets");
+ }
+ }
+ return std::nullopt;
+}
+
+static std::optional<FixItList> // forward declaration
+fixUPCAddressofArraySubscriptWithSpan(const UnaryOperator *Node);
+
+std::optional<FixItList>
+UPCAddressofArraySubscriptGadget::getFixits(const Strategy &S) const {
+ auto DREs = getClaimedVarUseSites();
+ const auto *VD = cast<VarDecl>(DREs.front()->getDecl());
+
+ switch (S.lookup(VD)) {
+ case Strategy::Kind::Span:
+ return fixUPCAddressofArraySubscriptWithSpan(Node);
+ case Strategy::Kind::Wontfix:
+ case Strategy::Kind::Iterator:
+ case Strategy::Kind::Array:
+ case Strategy::Kind::Vector:
+ llvm_unreachable("unsupported strategies for FixableGadgets");
+ }
+ return std::nullopt; // something went wrong, no fix-it
+}
+
+// FIXME: this function should be customizable through format
+static StringRef getEndOfLine() {
+ static const char *const EOL = "\n";
+ return EOL;
+}
+
+// Returns the text indicating that the user needs to provide input there:
+std::string getUserFillPlaceHolder(StringRef HintTextToUser = "placeholder") {
+ std::string s = std::string("<# ");
+ s += HintTextToUser;
+ s += " #>";
+ return s;
+}
+
+// Return the text representation of the given `APInt Val`:
+static std::string getAPIntText(APInt Val) {
+ SmallVector<char> Txt;
+ Val.toString(Txt, 10, true);
+ // APInt::toString does not add '\0' to the end of the string for us:
+ Txt.push_back('\0');
+ return Txt.data();
+}
+
+// Return the source location of the last character of the AST `Node`.
+template <typename NodeTy>
+static std::optional<SourceLocation>
+getEndCharLoc(const NodeTy *Node, const SourceManager &SM,
+ const LangOptions &LangOpts) {
+ unsigned TkLen = Lexer::MeasureTokenLength(Node->getEndLoc(), SM, LangOpts);
+ SourceLocation Loc = Node->getEndLoc().getLocWithOffset(TkLen - 1);
+
+ if (Loc.isValid())
+ return Loc;
+
+ return std::nullopt;
+}
+
+// Return the source location just past the last character of the AST `Node`.
+template <typename NodeTy>
+static std::optional<SourceLocation> getPastLoc(const NodeTy *Node,
+ const SourceManager &SM,
+ const LangOptions &LangOpts) {
+ SourceLocation Loc =
+ Lexer::getLocForEndOfToken(Node->getEndLoc(), 0, SM, LangOpts);
+ if (Loc.isValid())
+ return Loc;
+ return std::nullopt;
+}
+
+// Return text representation of an `Expr`.
+static std::optional<StringRef> getExprText(const Expr *E,
+ const SourceManager &SM,
+ const LangOptions &LangOpts) {
+ std::optional<SourceLocation> LastCharLoc = getPastLoc(E, SM, LangOpts);
+
+ if (LastCharLoc)
+ return Lexer::getSourceText(
+ CharSourceRange::getCharRange(E->getBeginLoc(), *LastCharLoc), SM,
+ LangOpts);
+
+ return std::nullopt;
+}
+
+// Returns the literal text in `SourceRange SR`, if `SR` is a valid range.
+static std::optional<StringRef> getRangeText(SourceRange SR,
+ const SourceManager &SM,
+ const LangOptions &LangOpts) {
+ bool Invalid = false;
+ CharSourceRange CSR = CharSourceRange::getCharRange(SR);
+ StringRef Text = Lexer::getSourceText(CSR, SM, LangOpts, &Invalid);
+
+ if (!Invalid)
+ return Text;
+ return std::nullopt;
+}
+
+// Returns the begin location of the identifier of the given variable
+// declaration.
+static SourceLocation getVarDeclIdentifierLoc(const VarDecl *VD) {
+ // According to the implementation of `VarDecl`, `VD->getLocation()` actually
+ // returns the begin location of the identifier of the declaration:
+ return VD->getLocation();
+}
+
+// Returns the literal text of the identifier of the given variable declaration.
+static std::optional<StringRef>
+getVarDeclIdentifierText(const VarDecl *VD, const SourceManager &SM,
+ const LangOptions &LangOpts) {
+ SourceLocation ParmIdentBeginLoc = getVarDeclIdentifierLoc(VD);
+ SourceLocation ParmIdentEndLoc =
+ Lexer::getLocForEndOfToken(ParmIdentBeginLoc, 0, SM, LangOpts);
+
+ if (ParmIdentEndLoc.isMacroID() &&
+ !Lexer::isAtEndOfMacroExpansion(ParmIdentEndLoc, SM, LangOpts))
+ return std::nullopt;
+ return getRangeText({ParmIdentBeginLoc, ParmIdentEndLoc}, SM, LangOpts);
+}
+
+// We cannot fix a variable declaration if it has some other specifiers than the
+// type specifier. Because the source ranges of those specifiers could overlap
+// with the source range that is being replaced using fix-its. Especially when
+// we often cannot obtain accurate source ranges of cv-qualified type
+// specifiers.
+// FIXME: also deal with type attributes
+static bool hasUnsupportedSpecifiers(const VarDecl *VD,
+ const SourceManager &SM) {
+ // AttrRangeOverlapping: true if at least one attribute of `VD` overlaps the
+ // source range of `VD`:
+ bool AttrRangeOverlapping = llvm::any_of(VD->attrs(), [&](Attr *At) -> bool {
+ return !(SM.isBeforeInTranslationUnit(At->getRange().getEnd(),
+ VD->getBeginLoc())) &&
+ !(SM.isBeforeInTranslationUnit(VD->getEndLoc(),
+ At->getRange().getBegin()));
+ });
+ return VD->isInlineSpecified() || VD->isConstexpr() ||
+ VD->hasConstantInitialization() || !VD->hasLocalStorage() ||
+ AttrRangeOverlapping;
+}
+
+// Returns the `SourceRange` of `D`. The reason why this function exists is
+// that `D->getSourceRange()` may return a range where the end location is the
+// starting location of the last token. The end location of the source range
+// returned by this function is the last location of the last token.
+static SourceRange getSourceRangeToTokenEnd(const Decl *D,
+ const SourceManager &SM,
+ const LangOptions &LangOpts) {
+ SourceLocation Begin = D->getBeginLoc();
+ SourceLocation
+ End = // `D->getEndLoc` should always return the starting location of the
+ // last token, so we should get the end of the token
+ Lexer::getLocForEndOfToken(D->getEndLoc(), 0, SM, LangOpts);
+
+ return SourceRange(Begin, End);
+}
+
+// Returns the text of the pointee type of `T` from a `VarDecl` of a pointer
+// type. The text is obtained through from `TypeLoc`s. Since `TypeLoc` does not
+// have source ranges of qualifiers ( The `QualifiedTypeLoc` looks hacky too me
+// :( ), `Qualifiers` of the pointee type is returned separately through the
+// output parameter `QualifiersToAppend`.
+static std::optional<std::string>
+getPointeeTypeText(const VarDecl *VD, const SourceManager &SM,
+ const LangOptions &LangOpts,
+ std::optional<Qualifiers> *QualifiersToAppend) {
+ QualType Ty = VD->getType();
+ QualType PteTy;
+
+ assert(Ty->isPointerType() && !Ty->isFunctionPointerType() &&
+ "Expecting a VarDecl of type of pointer to object type");
+ PteTy = Ty->getPointeeType();
+
+ TypeLoc TyLoc = VD->getTypeSourceInfo()->getTypeLoc().getUnqualifiedLoc();
+ TypeLoc PteTyLoc;
+
+ // We only deal with the cases that we know `TypeLoc::getNextTypeLoc` returns
+ // the `TypeLoc` of the pointee type:
+ switch (TyLoc.getTypeLocClass()) {
+ case TypeLoc::ConstantArray:
+ case TypeLoc::IncompleteArray:
+ case TypeLoc::VariableArray:
+ case TypeLoc::DependentSizedArray:
+ case TypeLoc::Decayed:
+ assert(isa<ParmVarDecl>(VD) && "An array type shall not be treated as a "
+ "pointer type unless it decays.");
+ PteTyLoc = TyLoc.getNextTypeLoc();
+ break;
+ case TypeLoc::Pointer:
+ PteTyLoc = TyLoc.castAs<PointerTypeLoc>().getPointeeLoc();
+ break;
+ default:
+ return std::nullopt;
+ }
+ if (PteTyLoc.isNull())
+ // Sometimes we cannot get a useful `TypeLoc` for the pointee type, e.g.,
+ // when the pointer type is `auto`.
+ return std::nullopt;
+
+ SourceLocation IdentLoc = getVarDeclIdentifierLoc(VD);
+
+ if (!(IdentLoc.isValid() && PteTyLoc.getSourceRange().isValid())) {
+ // We are expecting these locations to be valid. But in some cases, they are
+ // not all valid. It is a Clang bug to me and we are not responsible for
+ // fixing it. So we will just give up for now when it happens.
+ return std::nullopt;
+ }
+
+ // Note that TypeLoc.getEndLoc() returns the begin location of the last token:
+ SourceLocation PteEndOfTokenLoc =
+ Lexer::getLocForEndOfToken(PteTyLoc.getEndLoc(), 0, SM, LangOpts);
+
+ if (!PteEndOfTokenLoc.isValid())
+ // Sometimes we cannot get the end location of the pointee type, e.g., when
+ // there are macros involved.
+ return std::nullopt;
+ if (!SM.isBeforeInTranslationUnit(PteEndOfTokenLoc, IdentLoc)) {
+ // We only deal with the cases where the source text of the pointee type
+ // appears on the left-hand side of the variable identifier completely,
+ // including the following forms:
+ // `T ident`,
+ // `T ident[]`, where `T` is any type.
+ // Examples of excluded cases are `T (*ident)[]` or `T ident[][n]`.
+ return std::nullopt;
+ }
+ if (PteTy.hasQualifiers()) {
+ // TypeLoc does not provide source ranges for qualifiers (it says it's
+ // intentional but seems fishy to me), so we cannot get the full text
+ // `PteTy` via source ranges.
+ *QualifiersToAppend = PteTy.getQualifiers();
+ }
+ return getRangeText({PteTyLoc.getBeginLoc(), PteEndOfTokenLoc}, SM, LangOpts)
+ ->str();
+}
+
+// Returns the text of the name (with qualifiers) of a `FunctionDecl`.
+static std::optional<StringRef> getFunNameText(const FunctionDecl *FD,
+ const SourceManager &SM,
+ const LangOptions &LangOpts) {
+ SourceLocation BeginLoc = FD->getQualifier()
+ ? FD->getQualifierLoc().getBeginLoc()
+ : FD->getNameInfo().getBeginLoc();
+ // Note that `FD->getNameInfo().getEndLoc()` returns the begin location of the
+ // last token:
+ SourceLocation EndLoc = Lexer::getLocForEndOfToken(
+ FD->getNameInfo().getEndLoc(), 0, SM, LangOpts);
+ SourceRange NameRange{BeginLoc, EndLoc};
+
+ return getRangeText(NameRange, SM, LangOpts);
+}
+
+// Returns the text representing a `std::span` type where the element type is
+// represented by `EltTyText`.
+//
+// Note the optional parameter `Qualifiers`: one needs to pass qualifiers
+// explicitly if the element type needs to be qualified.
+static std::string
+getSpanTypeText(StringRef EltTyText,
+ std::optional<Qualifiers> Quals = std::nullopt) {
+ const char *const SpanOpen = "std::span<";
+
+ if (Quals)
+ return SpanOpen + EltTyText.str() + ' ' + Quals->getAsString() + '>';
+ return SpanOpen + EltTyText.str() + '>';
+}
+
+std::optional<FixItList>
+DerefSimplePtrArithFixableGadget::getFixits(const Strategy &s) const {
+ const VarDecl *VD = dyn_cast<VarDecl>(BaseDeclRefExpr->getDecl());
+
+ if (VD && s.lookup(VD) == Strategy::Kind::Span) {
+ ASTContext &Ctx = VD->getASTContext();
+ // std::span can't represent elements before its begin()
+ if (auto ConstVal = Offset->getIntegerConstantExpr(Ctx))
+ if (ConstVal->isNegative())
+ return std::nullopt;
+
+ // note that the expr may (oddly) has multiple layers of parens
+ // example:
+ // *((..(pointer + 123)..))
+ // goal:
+ // pointer[123]
+ // Fix-It:
+ // remove '*('
+ // replace ' + ' with '['
+ // replace ')' with ']'
+
+ // example:
+ // *((..(123 + pointer)..))
+ // goal:
+ // 123[pointer]
+ // Fix-It:
+ // remove '*('
+ // replace ' + ' with '['
+ // replace ')' with ']'
+
+ const Expr *LHS = AddOp->getLHS(), *RHS = AddOp->getRHS();
+ const SourceManager &SM = Ctx.getSourceManager();
+ const LangOptions &LangOpts = Ctx.getLangOpts();
+ CharSourceRange StarWithTrailWhitespace =
+ clang::CharSourceRange::getCharRange(DerefOp->getOperatorLoc(),
+ LHS->getBeginLoc());
+
+ std::optional<SourceLocation> LHSLocation = getPastLoc(LHS, SM, LangOpts);
+ if (!LHSLocation)
+ return std::nullopt;
+
+ CharSourceRange PlusWithSurroundingWhitespace =
+ clang::CharSourceRange::getCharRange(*LHSLocation, RHS->getBeginLoc());
+
+ std::optional<SourceLocation> AddOpLocation =
+ getPastLoc(AddOp, SM, LangOpts);
+ std::optional<SourceLocation> DerefOpLocation =
+ getPastLoc(DerefOp, SM, LangOpts);
+
+ if (!AddOpLocation || !DerefOpLocation)
+ return std::nullopt;
+
+ CharSourceRange ClosingParenWithPrecWhitespace =
+ clang::CharSourceRange::getCharRange(*AddOpLocation, *DerefOpLocation);
+
+ return FixItList{
+ {FixItHint::CreateRemoval(StarWithTrailWhitespace),
+ FixItHint::CreateReplacement(PlusWithSurroundingWhitespace, "["),
+ FixItHint::CreateReplacement(ClosingParenWithPrecWhitespace, "]")}};
+ }
+ return std::nullopt; // something wrong or unsupported, give up
+}
+
+std::optional<FixItList>
+PointerDereferenceGadget::getFixits(const Strategy &S) const {
+ const VarDecl *VD = cast<VarDecl>(BaseDeclRefExpr->getDecl());
+ switch (S.lookup(VD)) {
+ case Strategy::Kind::Span: {
+ ASTContext &Ctx = VD->getASTContext();
+ SourceManager &SM = Ctx.getSourceManager();
+ // Required changes: *(ptr); => (ptr[0]); and *ptr; => ptr[0]
+ // Deletes the *operand
+ CharSourceRange derefRange = clang::CharSourceRange::getCharRange(
+ Op->getBeginLoc(), Op->getBeginLoc().getLocWithOffset(1));
+ // Inserts the [0]
+ if (auto LocPastOperand =
+ getPastLoc(BaseDeclRefExpr, SM, Ctx.getLangOpts())) {
+ return FixItList{{FixItHint::CreateRemoval(derefRange),
+ FixItHint::CreateInsertion(*LocPastOperand, "[0]")}};
+ }
+ break;
+ }
+ case Strategy::Kind::Iterator:
+ case Strategy::Kind::Array:
+ case Strategy::Kind::Vector:
+ llvm_unreachable("Strategy not implemented yet!");
+ case Strategy::Kind::Wontfix:
+ llvm_unreachable("Invalid strategy!");
+ }
+
+ return std::nullopt;
+}
+
+// Generates fix-its replacing an expression of the form UPC(DRE) with
+// `DRE.data()`
+std::optional<FixItList> UPCStandalonePointerGadget::getFixits(const Strategy &S)
+ const {
+ const auto VD = cast<VarDecl>(Node->getDecl());
+ switch (S.lookup(VD)) {
+ case Strategy::Kind::Span: {
+ ASTContext &Ctx = VD->getASTContext();
+ SourceManager &SM = Ctx.getSourceManager();
+ // Inserts the .data() after the DRE
+ std::optional<SourceLocation> EndOfOperand =
+ getPastLoc(Node, SM, Ctx.getLangOpts());
+
+ if (EndOfOperand)
+ return FixItList{{FixItHint::CreateInsertion(
+ *EndOfOperand, ".data()")}};
+ // FIXME: Points inside a macro expansion.
+ break;
+ }
+ case Strategy::Kind::Wontfix:
+ case Strategy::Kind::Iterator:
+ case Strategy::Kind::Array:
+ case Strategy::Kind::Vector:
+ llvm_unreachable("unsupported strategies for FixableGadgets");
+ }
+
+ return std::nullopt;
+}
+
+// Generates fix-its replacing an expression of the form `&DRE[e]` with
+// `&DRE.data()[e]`:
+static std::optional<FixItList>
+fixUPCAddressofArraySubscriptWithSpan(const UnaryOperator *Node) {
+ const auto *ArraySub = cast<ArraySubscriptExpr>(Node->getSubExpr());
+ const auto *DRE = cast<DeclRefExpr>(ArraySub->getBase()->IgnoreImpCasts());
+ // FIXME: this `getASTContext` call is costly, we should pass the
+ // ASTContext in:
+ const ASTContext &Ctx = DRE->getDecl()->getASTContext();
+ const Expr *Idx = ArraySub->getIdx();
+ const SourceManager &SM = Ctx.getSourceManager();
+ const LangOptions &LangOpts = Ctx.getLangOpts();
+ std::stringstream SS;
+ bool IdxIsLitZero = false;
+
+ if (auto ICE = Idx->getIntegerConstantExpr(Ctx))
+ if ((*ICE).isZero())
+ IdxIsLitZero = true;
+ std::optional<StringRef> DreString = getExprText(DRE, SM, LangOpts);
+ if (!DreString)
+ return std::nullopt;
+
+ if (IdxIsLitZero) {
+ // If the index is literal zero, we produce the most concise fix-it:
+ SS << (*DreString).str() << ".data()";
+ } else {
+ std::optional<StringRef> IndexString = getExprText(Idx, SM, LangOpts);
+ if (!IndexString)
+ return std::nullopt;
+
+ SS << "&" << (*DreString).str() << ".data()"
+ << "[" << (*IndexString).str() << "]";
+ }
+ return FixItList{
+ FixItHint::CreateReplacement(Node->getSourceRange(), SS.str())};
+}
+
+std::optional<FixItList>
+UUCAddAssignGadget::getFixits(const Strategy &S) const {
+ DeclUseList DREs = getClaimedVarUseSites();
+
+ if (DREs.size() != 1)
+ return std::nullopt; // In cases of `Ptr += n` where `Ptr` is not a DRE, we
+ // give up
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DREs.front()->getDecl())) {
+ if (S.lookup(VD) == Strategy::Kind::Span) {
+ FixItList Fixes;
+
+ const Stmt *AddAssignNode = getBaseStmt();
+ StringRef varName = VD->getName();
+ const ASTContext &Ctx = VD->getASTContext();
+
+ if (!isNonNegativeIntegerExpr(Offset, VD, Ctx))
+ return std::nullopt;
+
+ // To transform UUC(p += n) to UUC(p = p.subspan(..)):
+ bool NotParenExpr =
+ (Offset->IgnoreParens()->getBeginLoc() == Offset->getBeginLoc());
+ std::string SS = varName.str() + " = " + varName.str() + ".subspan";
+ if (NotParenExpr)
+ SS += "(";
+
+ std::optional<SourceLocation> AddAssignLocation = getEndCharLoc(
+ AddAssignNode, Ctx.getSourceManager(), Ctx.getLangOpts());
+ if (!AddAssignLocation)
+ return std::nullopt;
+
+ Fixes.push_back(FixItHint::CreateReplacement(
+ SourceRange(AddAssignNode->getBeginLoc(), Node->getOperatorLoc()),
+ SS));
+ if (NotParenExpr)
+ Fixes.push_back(FixItHint::CreateInsertion(
+ Offset->getEndLoc().getLocWithOffset(1), ")"));
+ return Fixes;
+ }
+ }
+ return std::nullopt; // Not in the cases that we can handle for now, give up.
+}
+
+std::optional<FixItList> UPCPreIncrementGadget::getFixits(const Strategy &S) const {
+ DeclUseList DREs = getClaimedVarUseSites();
+
+ if (DREs.size() != 1)
+ return std::nullopt; // In cases of `++Ptr` where `Ptr` is not a DRE, we
+ // give up
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DREs.front()->getDecl())) {
+ if (S.lookup(VD) == Strategy::Kind::Span) {
+ FixItList Fixes;
+ std::stringstream SS;
+ const Stmt *PreIncNode = getBaseStmt();
+ StringRef varName = VD->getName();
+ const ASTContext &Ctx = VD->getASTContext();
+
+ // To transform UPC(++p) to UPC((p = p.subspan(1)).data()):
+ SS << "(" << varName.data() << " = " << varName.data()
+ << ".subspan(1)).data()";
+ std::optional<SourceLocation> PreIncLocation =
+ getEndCharLoc(PreIncNode, Ctx.getSourceManager(), Ctx.getLangOpts());
+ if (!PreIncLocation)
+ return std::nullopt;
+
+ Fixes.push_back(FixItHint::CreateReplacement(
+ SourceRange(PreIncNode->getBeginLoc(), *PreIncLocation), SS.str()));
+ return Fixes;
+ }
+ }
+ return std::nullopt; // Not in the cases that we can handle for now, give up.
+}
+
+
+// For a non-null initializer `Init` of `T *` type, this function returns
+// `FixItHint`s producing a list initializer `{Init, S}` as a part of a fix-it
+// to output stream.
+// In many cases, this function cannot figure out the actual extent `S`. It
+// then will use a place holder to replace `S` to ask users to fill `S` in. The
+// initializer shall be used to initialize a variable of type `std::span<T>`.
+//
+// FIXME: Support multi-level pointers
+//
+// Parameters:
+// `Init` a pointer to the initializer expression
+// `Ctx` a reference to the ASTContext
+static FixItList
+FixVarInitializerWithSpan(const Expr *Init, ASTContext &Ctx,
+ const StringRef UserFillPlaceHolder) {
+ const SourceManager &SM = Ctx.getSourceManager();
+ const LangOptions &LangOpts = Ctx.getLangOpts();
+
+ // If `Init` has a constant value that is (or equivalent to) a
+ // NULL pointer, we use the default constructor to initialize the span
+ // object, i.e., a `std:span` variable declaration with no initializer.
+ // So the fix-it is just to remove the initializer.
+ if (Init->isNullPointerConstant(Ctx,
+ // FIXME: Why does this function not ask for `const ASTContext
+ // &`? It should. Maybe worth an NFC patch later.
+ Expr::NullPointerConstantValueDependence::
+ NPC_ValueDependentIsNotNull)) {
+ std::optional<SourceLocation> InitLocation =
+ getEndCharLoc(Init, SM, LangOpts);
+ if (!InitLocation)
+ return {};
+
+ SourceRange SR(Init->getBeginLoc(), *InitLocation);
+
+ return {FixItHint::CreateRemoval(SR)};
+ }
+
+ FixItList FixIts{};
+ std::string ExtentText = UserFillPlaceHolder.data();
+ StringRef One = "1";
+
+ // Insert `{` before `Init`:
+ FixIts.push_back(FixItHint::CreateInsertion(Init->getBeginLoc(), "{"));
+ // Try to get the data extent. Break into different cases:
+ if (auto CxxNew = dyn_cast<CXXNewExpr>(Init->IgnoreImpCasts())) {
+ // In cases `Init` is `new T[n]` and there is no explicit cast over
+ // `Init`, we know that `Init` must evaluates to a pointer to `n` objects
+ // of `T`. So the extent is `n` unless `n` has side effects. Similar but
+ // simpler for the case where `Init` is `new T`.
+ if (const Expr *Ext = CxxNew->getArraySize().value_or(nullptr)) {
+ if (!Ext->HasSideEffects(Ctx)) {
+ std::optional<StringRef> ExtentString = getExprText(Ext, SM, LangOpts);
+ if (!ExtentString)
+ return {};
+ ExtentText = *ExtentString;
+ }
+ } else if (!CxxNew->isArray())
+ // Although the initializer is not allocating a buffer, the pointer
+ // variable could still be used in buffer access operations.
+ ExtentText = One;
+ } else if (const auto *CArrTy = Ctx.getAsConstantArrayType(
+ Init->IgnoreImpCasts()->getType())) {
+ // In cases `Init` is of an array type after stripping off implicit casts,
+ // the extent is the array size. Note that if the array size is not a
+ // constant, we cannot use it as the extent.
+ ExtentText = getAPIntText(CArrTy->getSize());
+ } else {
+ // In cases `Init` is of the form `&Var` after stripping of implicit
+ // casts, where `&` is the built-in operator, the extent is 1.
+ if (auto AddrOfExpr = dyn_cast<UnaryOperator>(Init->IgnoreImpCasts()))
+ if (AddrOfExpr->getOpcode() == UnaryOperatorKind::UO_AddrOf &&
+ isa_and_present<DeclRefExpr>(AddrOfExpr->getSubExpr()))
+ ExtentText = One;
+ // TODO: we can handle more cases, e.g., `&a[0]`, `&a`, `std::addressof`,
+ // and explicit casting, etc. etc.
+ }
+
+ SmallString<32> StrBuffer{};
+ std::optional<SourceLocation> LocPassInit = getPastLoc(Init, SM, LangOpts);
+
+ if (!LocPassInit)
+ return {};
+
+ StrBuffer.append(", ");
+ StrBuffer.append(ExtentText);
+ StrBuffer.append("}");
+ FixIts.push_back(FixItHint::CreateInsertion(*LocPassInit, StrBuffer.str()));
+ return FixIts;
+}
+
+#ifndef NDEBUG
+#define DEBUG_NOTE_DECL_FAIL(D, Msg) \
+Handler.addDebugNoteForVar((D), (D)->getBeginLoc(), "failed to produce fixit for declaration '" + (D)->getNameAsString() + "'" + (Msg))
+#else
+#define DEBUG_NOTE_DECL_FAIL(D, Msg)
+#endif
+
+// For the given variable declaration with a pointer-to-T type, returns the text
+// `std::span<T>`. If it is unable to generate the text, returns
+// `std::nullopt`.
+static std::optional<std::string> createSpanTypeForVarDecl(const VarDecl *VD,
+ const ASTContext &Ctx) {
+ assert(VD->getType()->isPointerType());
+
+ std::optional<Qualifiers> PteTyQualifiers = std::nullopt;
+ std::optional<std::string> PteTyText = getPointeeTypeText(
+ VD, Ctx.getSourceManager(), Ctx.getLangOpts(), &PteTyQualifiers);
+
+ if (!PteTyText)
+ return std::nullopt;
+
+ std::string SpanTyText = "std::span<";
+
+ SpanTyText.append(*PteTyText);
+ // Append qualifiers to span element type if any:
+ if (PteTyQualifiers) {
+ SpanTyText.append(" ");
+ SpanTyText.append(PteTyQualifiers->getAsString());
+ }
+ SpanTyText.append(">");
+ return SpanTyText;
+}
+
+// For a `VarDecl` of the form `T * var (= Init)?`, this
+// function generates fix-its that
+// 1) replace `T * var` with `std::span<T> var`; and
+// 2) change `Init` accordingly to a span constructor, if it exists.
+//
+// FIXME: support Multi-level pointers
+//
+// Parameters:
+// `D` a pointer the variable declaration node
+// `Ctx` a reference to the ASTContext
+// `UserFillPlaceHolder` the user-input placeholder text
+// Returns:
+// the non-empty fix-it list, if fix-its are successfuly generated; empty
+// list otherwise.
+static FixItList fixLocalVarDeclWithSpan(const VarDecl *D, ASTContext &Ctx,
+ const StringRef UserFillPlaceHolder,
+ UnsafeBufferUsageHandler &Handler) {
+ if (hasUnsupportedSpecifiers(D, Ctx.getSourceManager()))
+ return {};
+
+ FixItList FixIts{};
+ std::optional<std::string> SpanTyText = createSpanTypeForVarDecl(D, Ctx);
+
+ if (!SpanTyText) {
+ DEBUG_NOTE_DECL_FAIL(D, " : failed to generate 'std::span' type");
+ return {};
+ }
+
+ // Will hold the text for `std::span<T> Ident`:
+ std::stringstream SS;
+
+ SS << *SpanTyText;
+ // Append qualifiers to the type of `D`, if any:
+ if (D->getType().hasQualifiers())
+ SS << " " << D->getType().getQualifiers().getAsString();
+
+ // The end of the range of the original source that will be replaced
+ // by `std::span<T> ident`:
+ SourceLocation EndLocForReplacement = D->getEndLoc();
+ std::optional<StringRef> IdentText =
+ getVarDeclIdentifierText(D, Ctx.getSourceManager(), Ctx.getLangOpts());
+
+ if (!IdentText) {
+ DEBUG_NOTE_DECL_FAIL(D, " : failed to locate the identifier");
+ return {};
+ }
+ // Fix the initializer if it exists:
+ if (const Expr *Init = D->getInit()) {
+ FixItList InitFixIts =
+ FixVarInitializerWithSpan(Init, Ctx, UserFillPlaceHolder);
+ if (InitFixIts.empty())
+ return {};
+ FixIts.insert(FixIts.end(), std::make_move_iterator(InitFixIts.begin()),
+ std::make_move_iterator(InitFixIts.end()));
+ // If the declaration has the form `T *ident = init`, we want to replace
+ // `T *ident = ` with `std::span<T> ident`:
+ EndLocForReplacement = Init->getBeginLoc().getLocWithOffset(-1);
+ }
+ SS << " " << IdentText->str();
+ if (!EndLocForReplacement.isValid()) {
+ DEBUG_NOTE_DECL_FAIL(D, " : failed to locate the end of the declaration");
+ return {};
+ }
+ FixIts.push_back(FixItHint::CreateReplacement(
+ SourceRange(D->getBeginLoc(), EndLocForReplacement), SS.str()));
+ return FixIts;
+}
+
+static bool hasConflictingOverload(const FunctionDecl *FD) {
+ return !FD->getDeclContext()->lookup(FD->getDeclName()).isSingleResult();
+}
+
+// For a `FunctionDecl`, whose `ParmVarDecl`s are being changed to have new
+// types, this function produces fix-its to make the change self-contained. Let
+// 'F' be the entity defined by the original `FunctionDecl` and "NewF" be the
+// entity defined by the `FunctionDecl` after the change to the parameters.
+// Fix-its produced by this function are
+// 1. Add the `[[clang::unsafe_buffer_usage]]` attribute to each declaration
+// of 'F';
+// 2. Create a declaration of "NewF" next to each declaration of `F`;
+// 3. Create a definition of "F" (as its' original definition is now belongs
+// to "NewF") next to its original definition. The body of the creating
+// definition calls to "NewF".
+//
+// Example:
+//
+// void f(int *p); // original declaration
+// void f(int *p) { // original definition
+// p[5];
+// }
+//
+// To change the parameter `p` to be of `std::span<int>` type, we
+// also add overloads:
+//
+// [[clang::unsafe_buffer_usage]] void f(int *p); // original decl
+// void f(std::span<int> p); // added overload decl
+// void f(std::span<int> p) { // original def where param is changed
+// p[5];
+// }
+// [[clang::unsafe_buffer_usage]] void f(int *p) { // added def
+// return f(std::span(p, <# size #>));
+// }
+//
+static std::optional<FixItList>
+createOverloadsForFixedParams(const Strategy &S, const FunctionDecl *FD,
+ const ASTContext &Ctx,
+ UnsafeBufferUsageHandler &Handler) {
+ // FIXME: need to make this conflict checking better:
+ if (hasConflictingOverload(FD))
+ return std::nullopt;
+
+ const SourceManager &SM = Ctx.getSourceManager();
+ const LangOptions &LangOpts = Ctx.getLangOpts();
+ const unsigned NumParms = FD->getNumParams();
+ std::vector<std::string> NewTysTexts(NumParms);
+ std::vector<bool> ParmsMask(NumParms, false);
+ bool AtLeastOneParmToFix = false;
+
+ for (unsigned i = 0; i < NumParms; i++) {
+ const ParmVarDecl *PVD = FD->getParamDecl(i);
+
+ if (S.lookup(PVD) == Strategy::Kind::Wontfix)
+ continue;
+ if (S.lookup(PVD) != Strategy::Kind::Span)
+ // Not supported, not suppose to happen:
+ return std::nullopt;
+
+ std::optional<Qualifiers> PteTyQuals = std::nullopt;
+ std::optional<std::string> PteTyText =
+ getPointeeTypeText(PVD, SM, LangOpts, &PteTyQuals);
+
+ if (!PteTyText)
+ // something wrong in obtaining the text of the pointee type, give up
+ return std::nullopt;
+ // FIXME: whether we should create std::span type depends on the Strategy.
+ NewTysTexts[i] = getSpanTypeText(*PteTyText, PteTyQuals);
+ ParmsMask[i] = true;
+ AtLeastOneParmToFix = true;
+ }
+ if (!AtLeastOneParmToFix)
+ // No need to create function overloads:
+ return {};
+ // FIXME Respect indentation of the original code.
+
+ // A lambda that creates the text representation of a function declaration
+ // with the new type signatures:
+ const auto NewOverloadSignatureCreator =
+ [&SM, &LangOpts, &NewTysTexts,
+ &ParmsMask](const FunctionDecl *FD) -> std::optional<std::string> {
+ std::stringstream SS;
+
+ SS << ";";
+ SS << getEndOfLine().str();
+ // Append: ret-type func-name "("
+ if (auto Prefix = getRangeText(
+ SourceRange(FD->getBeginLoc(), (*FD->param_begin())->getBeginLoc()),
+ SM, LangOpts))
+ SS << Prefix->str();
+ else
+ return std::nullopt; // give up
+ // Append: parameter-type-list
+ const unsigned NumParms = FD->getNumParams();
+
+ for (unsigned i = 0; i < NumParms; i++) {
+ const ParmVarDecl *Parm = FD->getParamDecl(i);
+
+ if (Parm->isImplicit())
+ continue;
+ if (ParmsMask[i]) {
+ // This `i`-th parameter will be fixed with `NewTysTexts[i]` being its
+ // new type:
+ SS << NewTysTexts[i];
+ // print parameter name if provided:
+ if (IdentifierInfo *II = Parm->getIdentifier())
+ SS << ' ' << II->getName().str();
+ } else if (auto ParmTypeText = getRangeText(
+ getSourceRangeToTokenEnd(Parm, SM, LangOpts),
+ SM, LangOpts)) {
+ // print the whole `Parm` without modification:
+ SS << ParmTypeText->str();
+ } else
+ return std::nullopt; // something wrong, give up
+ if (i != NumParms - 1)
+ SS << ", ";
+ }
+ SS << ")";
+ return SS.str();
+ };
+
+ // A lambda that creates the text representation of a function definition with
+ // the original signature:
+ const auto OldOverloadDefCreator =
+ [&Handler, &SM, &LangOpts, &NewTysTexts,
+ &ParmsMask](const FunctionDecl *FD) -> std::optional<std::string> {
+ std::stringstream SS;
+
+ SS << getEndOfLine().str();
+ // Append: attr-name ret-type func-name "(" param-list ")" "{"
+ if (auto FDPrefix = getRangeText(
+ SourceRange(FD->getBeginLoc(), FD->getBody()->getBeginLoc()), SM,
+ LangOpts))
+ SS << Handler.getUnsafeBufferUsageAttributeTextAt(FD->getBeginLoc(), " ")
+ << FDPrefix->str() << "{";
+ else
+ return std::nullopt;
+ // Append: "return" func-name "("
+ if (auto FunQualName = getFunNameText(FD, SM, LangOpts))
+ SS << "return " << FunQualName->str() << "(";
+ else
+ return std::nullopt;
+
+ // Append: arg-list
+ const unsigned NumParms = FD->getNumParams();
+ for (unsigned i = 0; i < NumParms; i++) {
+ const ParmVarDecl *Parm = FD->getParamDecl(i);
+
+ if (Parm->isImplicit())
+ continue;
+ // FIXME: If a parameter has no name, it is unused in the
+ // definition. So we could just leave it as it is.
+ if (!Parm->getIdentifier())
+ // If a parameter of a function definition has no name:
+ return std::nullopt;
+ if (ParmsMask[i])
+ // This is our spanified paramter!
+ SS << NewTysTexts[i] << "(" << Parm->getIdentifier()->getName().str()
+ << ", " << getUserFillPlaceHolder("size") << ")";
+ else
+ SS << Parm->getIdentifier()->getName().str();
+ if (i != NumParms - 1)
+ SS << ", ";
+ }
+ // finish call and the body
+ SS << ");}" << getEndOfLine().str();
+ // FIXME: 80-char line formatting?
+ return SS.str();
+ };
+
+ FixItList FixIts{};
+ for (FunctionDecl *FReDecl : FD->redecls()) {
+ std::optional<SourceLocation> Loc = getPastLoc(FReDecl, SM, LangOpts);
+
+ if (!Loc)
+ return {};
+ if (FReDecl->isThisDeclarationADefinition()) {
+ assert(FReDecl == FD && "inconsistent function definition");
+ // Inserts a definition with the old signature to the end of
+ // `FReDecl`:
+ if (auto OldOverloadDef = OldOverloadDefCreator(FReDecl))
+ FixIts.emplace_back(FixItHint::CreateInsertion(*Loc, *OldOverloadDef));
+ else
+ return {}; // give up
+ } else {
+ // Adds the unsafe-buffer attribute (if not already there) to `FReDecl`:
+ if (!FReDecl->hasAttr<UnsafeBufferUsageAttr>()) {
+ FixIts.emplace_back(FixItHint::CreateInsertion(
+ FReDecl->getBeginLoc(), Handler.getUnsafeBufferUsageAttributeTextAt(
+ FReDecl->getBeginLoc(), " ")));
+ }
+ // Inserts a declaration with the new signature to the end of `FReDecl`:
+ if (auto NewOverloadDecl = NewOverloadSignatureCreator(FReDecl))
+ FixIts.emplace_back(FixItHint::CreateInsertion(*Loc, *NewOverloadDecl));
+ else
+ return {};
+ }
+ }
+ return FixIts;
+}
+
+// To fix a `ParmVarDecl` to be of `std::span` type.
+static FixItList fixParamWithSpan(const ParmVarDecl *PVD, const ASTContext &Ctx,
+ UnsafeBufferUsageHandler &Handler) {
+ if (hasUnsupportedSpecifiers(PVD, Ctx.getSourceManager())) {
+ DEBUG_NOTE_DECL_FAIL(PVD, " : has unsupport specifier(s)");
+ return {};
+ }
+ if (PVD->hasDefaultArg()) {
+ // FIXME: generate fix-its for default values:
+ DEBUG_NOTE_DECL_FAIL(PVD, " : has default arg");
+ return {};
+ }
+
+ std::optional<Qualifiers> PteTyQualifiers = std::nullopt;
+ std::optional<std::string> PteTyText = getPointeeTypeText(
+ PVD, Ctx.getSourceManager(), Ctx.getLangOpts(), &PteTyQualifiers);
+
+ if (!PteTyText) {
+ DEBUG_NOTE_DECL_FAIL(PVD, " : invalid pointee type");
+ return {};
+ }
+
+ std::optional<StringRef> PVDNameText = PVD->getIdentifier()->getName();
+
+ if (!PVDNameText) {
+ DEBUG_NOTE_DECL_FAIL(PVD, " : invalid identifier name");
+ return {};
+ }
+
+ std::stringstream SS;
+ std::optional<std::string> SpanTyText = createSpanTypeForVarDecl(PVD, Ctx);
+
+ if (PteTyQualifiers)
+ // Append qualifiers if they exist:
+ SS << getSpanTypeText(*PteTyText, PteTyQualifiers);
+ else
+ SS << getSpanTypeText(*PteTyText);
+ // Append qualifiers to the type of the parameter:
+ if (PVD->getType().hasQualifiers())
+ SS << ' ' << PVD->getType().getQualifiers().getAsString();
+ // Append parameter's name:
+ SS << ' ' << PVDNameText->str();
+ // Add replacement fix-it:
+ return {FixItHint::CreateReplacement(PVD->getSourceRange(), SS.str())};
+}
+
+static FixItList fixVariableWithSpan(const VarDecl *VD,
+ const DeclUseTracker &Tracker,
+ ASTContext &Ctx,
+ UnsafeBufferUsageHandler &Handler) {
+ const DeclStmt *DS = Tracker.lookupDecl(VD);
+ if (!DS) {
+ DEBUG_NOTE_DECL_FAIL(VD, " : variables declared this way not implemented yet");
+ return {};
+ }
+ if (!DS->isSingleDecl()) {
+ // FIXME: to support handling multiple `VarDecl`s in a single `DeclStmt`
+ DEBUG_NOTE_DECL_FAIL(VD, " : multiple VarDecls");
+ return {};
+ }
+ // Currently DS is an unused variable but we'll need it when
+ // non-single decls are implemented, where the pointee type name
+ // and the '*' are spread around the place.
+ (void)DS;
+
+ // FIXME: handle cases where DS has multiple declarations
+ return fixLocalVarDeclWithSpan(VD, Ctx, getUserFillPlaceHolder(), Handler);
+}
+
+// TODO: we should be consistent to use `std::nullopt` to represent no-fix due
+// to any unexpected problem.
+static FixItList
+fixVariable(const VarDecl *VD, Strategy::Kind K,
+ /* The function decl under analysis */ const Decl *D,
+ const DeclUseTracker &Tracker, ASTContext &Ctx,
+ UnsafeBufferUsageHandler &Handler) {
+ if (const auto *PVD = dyn_cast<ParmVarDecl>(VD)) {
+ auto *FD = dyn_cast<clang::FunctionDecl>(PVD->getDeclContext());
+ if (!FD || FD != D) {
+ // `FD != D` means that `PVD` belongs to a function that is not being
+ // analyzed currently. Thus `FD` may not be complete.
+ DEBUG_NOTE_DECL_FAIL(VD, " : function not currently analyzed");
+ return {};
+ }
+
+ // TODO If function has a try block we can't change params unless we check
+ // also its catch block for their use.
+ // FIXME We might support static class methods, some select methods,
+ // operators and possibly lamdas.
+ if (FD->isMain() || FD->isConstexpr() ||
+ FD->getTemplatedKind() != FunctionDecl::TemplatedKind::TK_NonTemplate ||
+ FD->isVariadic() ||
+ // also covers call-operator of lamdas
+ isa<CXXMethodDecl>(FD) ||
+ // skip when the function body is a try-block
+ (FD->hasBody() && isa<CXXTryStmt>(FD->getBody())) ||
+ FD->isOverloadedOperator()) {
+ DEBUG_NOTE_DECL_FAIL(VD, " : unsupported function decl");
+ return {}; // TODO test all these cases
+ }
+ }
+
+ switch (K) {
+ case Strategy::Kind::Span: {
+ if (VD->getType()->isPointerType()) {
+ if (const auto *PVD = dyn_cast<ParmVarDecl>(VD))
+ return fixParamWithSpan(PVD, Ctx, Handler);
+
+ if (VD->isLocalVarDecl())
+ return fixVariableWithSpan(VD, Tracker, Ctx, Handler);
+ }
+ DEBUG_NOTE_DECL_FAIL(VD, " : not a pointer");
+ return {};
+ }
+ case Strategy::Kind::Iterator:
+ case Strategy::Kind::Array:
+ case Strategy::Kind::Vector:
+ llvm_unreachable("Strategy not implemented yet!");
+ case Strategy::Kind::Wontfix:
+ llvm_unreachable("Invalid strategy!");
+ }
+ llvm_unreachable("Unknown strategy!");
+}
+
+// Returns true iff there exists a `FixItHint` 'h' in `FixIts` such that the
+// `RemoveRange` of 'h' overlaps with a macro use.
+static bool overlapWithMacro(const FixItList &FixIts) {
+ // FIXME: For now we only check if the range (or the first token) is (part of)
+ // a macro expansion. Ideally, we want to check for all tokens in the range.
+ return llvm::any_of(FixIts, [](const FixItHint &Hint) {
+ auto Range = Hint.RemoveRange;
+ if (Range.getBegin().isMacroID() || Range.getEnd().isMacroID())
+ // If the range (or the first token) is (part of) a macro expansion:
+ return true;
+ return false;
+ });
+}
+
+// Returns true iff `VD` is a parameter of the declaration `D`:
+static bool isParameterOf(const VarDecl *VD, const Decl *D) {
+ return isa<ParmVarDecl>(VD) &&
+ VD->getDeclContext() == dyn_cast<DeclContext>(D);
+}
+
+// Erases variables in `FixItsForVariable`, if such a variable has an unfixable
+// group mate. A variable `v` is unfixable iff `FixItsForVariable` does not
+// contain `v`.
+static void eraseVarsForUnfixableGroupMates(
+ std::map<const VarDecl *, FixItList> &FixItsForVariable,
+ const VariableGroupsManager &VarGrpMgr) {
+ // Variables will be removed from `FixItsForVariable`:
+ SmallVector<const VarDecl *, 8> ToErase;
+
+ for (const auto &[VD, Ignore] : FixItsForVariable) {
+ VarGrpRef Grp = VarGrpMgr.getGroupOfVar(VD);
+ if (llvm::any_of(Grp,
+ [&FixItsForVariable](const VarDecl *GrpMember) -> bool {
+ return !FixItsForVariable.count(GrpMember);
+ })) {
+ // At least one group member cannot be fixed, so we have to erase the
+ // whole group:
+ for (const VarDecl *Member : Grp)
+ ToErase.push_back(Member);
+ }
+ }
+ for (auto *VarToErase : ToErase)
+ FixItsForVariable.erase(VarToErase);
+}
+
+// Returns the fix-its that create bounds-safe function overloads for the
+// function `D`, if `D`'s parameters will be changed to safe-types through
+// fix-its in `FixItsForVariable`.
+//
+// NOTE: In case `D`'s parameters will be changed but bounds-safe function
+// overloads cannot created, the whole group that contains the parameters will
+// be erased from `FixItsForVariable`.
+static FixItList createFunctionOverloadsForParms(
+ std::map<const VarDecl *, FixItList> &FixItsForVariable /* mutable */,
+ const VariableGroupsManager &VarGrpMgr, const FunctionDecl *FD,
+ const Strategy &S, ASTContext &Ctx, UnsafeBufferUsageHandler &Handler) {
+ FixItList FixItsSharedByParms{};
+
+ std::optional<FixItList> OverloadFixes =
+ createOverloadsForFixedParams(S, FD, Ctx, Handler);
+
+ if (OverloadFixes) {
+ FixItsSharedByParms.append(*OverloadFixes);
+ } else {
+ // Something wrong in generating `OverloadFixes`, need to remove the
+ // whole group, where parameters are in, from `FixItsForVariable` (Note
+ // that all parameters should be in the same group):
+ for (auto *Member : VarGrpMgr.getGroupOfParms())
+ FixItsForVariable.erase(Member);
+ }
+ return FixItsSharedByParms;
+}
+
+// Constructs self-contained fix-its for each variable in `FixablesForAllVars`.
+static std::map<const VarDecl *, FixItList>
+getFixIts(FixableGadgetSets &FixablesForAllVars, const Strategy &S,
+ ASTContext &Ctx,
+ /* The function decl under analysis */ const Decl *D,
+ const DeclUseTracker &Tracker, UnsafeBufferUsageHandler &Handler,
+ const VariableGroupsManager &VarGrpMgr) {
+ // `FixItsForVariable` will map each variable to a set of fix-its directly
+ // associated to the variable itself. Fix-its of distinct variables in
+ // `FixItsForVariable` are disjoint.
+ std::map<const VarDecl *, FixItList> FixItsForVariable;
+
+ // Populate `FixItsForVariable` with fix-its directly associated with each
+ // variable. Fix-its directly associated to a variable 'v' are the ones
+ // produced by the `FixableGadget`s whose claimed variable is 'v'.
+ for (const auto &[VD, Fixables] : FixablesForAllVars.byVar) {
+ FixItsForVariable[VD] =
+ fixVariable(VD, S.lookup(VD), D, Tracker, Ctx, Handler);
+ // If we fail to produce Fix-It for the declaration we have to skip the
+ // variable entirely.
+ if (FixItsForVariable[VD].empty()) {
+ FixItsForVariable.erase(VD);
+ continue;
+ }
+ for (const auto &F : Fixables) {
+ std::optional<FixItList> Fixits = F->getFixits(S);
+
+ if (Fixits) {
+ FixItsForVariable[VD].insert(FixItsForVariable[VD].end(),
+ Fixits->begin(), Fixits->end());
+ continue;
+ }
+#ifndef NDEBUG
+ Handler.addDebugNoteForVar(
+ VD, F->getBaseStmt()->getBeginLoc(),
+ ("gadget '" + F->getDebugName() + "' refused to produce a fix")
+ .str());
+#endif
+ FixItsForVariable.erase(VD);
+ break;
+ }
+ }
+
+ // `FixItsForVariable` now contains only variables that can be
+ // fixed. A variable can be fixed if its' declaration and all Fixables
+ // associated to it can all be fixed.
+
+ // To further remove from `FixItsForVariable` variables whose group mates
+ // cannot be fixed...
+ eraseVarsForUnfixableGroupMates(FixItsForVariable, VarGrpMgr);
+ // Now `FixItsForVariable` gets further reduced: a variable is in
+ // `FixItsForVariable` iff it can be fixed and all its group mates can be
+ // fixed.
+
+ // Fix-its of bounds-safe overloads of `D` are shared by parameters of `D`.
+ // That is, when fixing multiple parameters in one step, these fix-its will
+ // be applied only once (instead of being applied per parameter).
+ FixItList FixItsSharedByParms{};
+
+ if (auto *FD = dyn_cast<FunctionDecl>(D))
+ FixItsSharedByParms = createFunctionOverloadsForParms(
+ FixItsForVariable, VarGrpMgr, FD, S, Ctx, Handler);
+
+ // The map that maps each variable `v` to fix-its for the whole group where
+ // `v` is in:
+ std::map<const VarDecl *, FixItList> FinalFixItsForVariable{
+ FixItsForVariable};
+
+ for (auto &[Var, Ignore] : FixItsForVariable) {
+ bool AnyParm = false;
+ const auto VarGroupForVD = VarGrpMgr.getGroupOfVar(Var, &AnyParm);
+
+ for (const VarDecl *GrpMate : VarGroupForVD) {
+ if (Var == GrpMate)
+ continue;
+ if (FixItsForVariable.count(GrpMate))
+ FinalFixItsForVariable[Var].append(FixItsForVariable[GrpMate]);
+ }
+ if (AnyParm) {
+ // This assertion should never fail. Otherwise we have a bug.
+ assert(!FixItsSharedByParms.empty() &&
+ "Should not try to fix a parameter that does not belong to a "
+ "FunctionDecl");
+ FinalFixItsForVariable[Var].append(FixItsSharedByParms);
+ }
+ }
+ // Fix-its that will be applied in one step shall NOT:
+ // 1. overlap with macros or/and templates; or
+ // 2. conflict with each other.
+ // Otherwise, the fix-its will be dropped.
+ for (auto Iter = FinalFixItsForVariable.begin();
+ Iter != FinalFixItsForVariable.end();)
+ if (overlapWithMacro(Iter->second) ||
+ clang::internal::anyConflict(Iter->second, Ctx.getSourceManager())) {
+ Iter = FinalFixItsForVariable.erase(Iter);
+ } else
+ Iter++;
+ return FinalFixItsForVariable;
+}
+
+template <typename VarDeclIterTy>
+static Strategy
+getNaiveStrategy(llvm::iterator_range<VarDeclIterTy> UnsafeVars) {
+ Strategy S;
+ for (const VarDecl *VD : UnsafeVars) {
+ S.set(VD, Strategy::Kind::Span);
+ }
+ return S;
+}
+
+// Manages variable groups:
+class VariableGroupsManagerImpl : public VariableGroupsManager {
+ const std::vector<VarGrpTy> Groups;
+ const std::map<const VarDecl *, unsigned> &VarGrpMap;
+ const llvm::SetVector<const VarDecl *> &GrpsUnionForParms;
+
+public:
+ VariableGroupsManagerImpl(
+ const std::vector<VarGrpTy> &Groups,
+ const std::map<const VarDecl *, unsigned> &VarGrpMap,
+ const llvm::SetVector<const VarDecl *> &GrpsUnionForParms)
+ : Groups(Groups), VarGrpMap(VarGrpMap),
+ GrpsUnionForParms(GrpsUnionForParms) {}
+
+ VarGrpRef getGroupOfVar(const VarDecl *Var, bool *HasParm) const override {
+ if (GrpsUnionForParms.contains(Var)) {
+ if (HasParm)
+ *HasParm = true;
+ return GrpsUnionForParms.getArrayRef();
+ }
+ if (HasParm)
+ *HasParm = false;
+
+ auto It = VarGrpMap.find(Var);
+
+ if (It == VarGrpMap.end())
+ return std::nullopt;
+ return Groups[It->second];
+ }
+
+ VarGrpRef getGroupOfParms() const override {
+ return GrpsUnionForParms.getArrayRef();
+ }
+};
+
+void clang::checkUnsafeBufferUsage(const Decl *D,
+ UnsafeBufferUsageHandler &Handler,
+ bool EmitSuggestions) {
+#ifndef NDEBUG
+ Handler.clearDebugNotes();
+#endif
+
+ assert(D && D->getBody());
+ // We do not want to visit a Lambda expression defined inside a method independently.
+ // Instead, it should be visited along with the outer method.
+ // FIXME: do we want to do the same thing for `BlockDecl`s?
+ if (const auto *fd = dyn_cast<CXXMethodDecl>(D)) {
+ if (fd->getParent()->isLambda() && fd->getParent()->isLocalClass())
+ return;
+ }
+
+ // Do not emit fixit suggestions for functions declared in an
+ // extern "C" block.
+ if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
+ for (FunctionDecl *FReDecl : FD->redecls()) {
+ if (FReDecl->isExternC()) {
+ EmitSuggestions = false;
+ break;
+ }
+ }
+ }
+
+ WarningGadgetSets UnsafeOps;
+ FixableGadgetSets FixablesForAllVars;
+
+ auto [FixableGadgets, WarningGadgets, Tracker] =
+ findGadgets(D, Handler, EmitSuggestions);
+
+ if (!EmitSuggestions) {
+ // Our job is very easy without suggestions. Just warn about
+ // every problematic operation and consider it done. No need to deal
+ // with fixable gadgets, no need to group operations by variable.
+ for (const auto &G : WarningGadgets) {
+ Handler.handleUnsafeOperation(G->getBaseStmt(), /*IsRelatedToDecl=*/false,
+ D->getASTContext());
+ }
+
+ // This return guarantees that most of the machine doesn't run when
+ // suggestions aren't requested.
+ assert(FixableGadgets.size() == 0 &&
+ "Fixable gadgets found but suggestions not requested!");
+ return;
+ }
+
+ // If no `WarningGadget`s ever matched, there is no unsafe operations in the
+ // function under the analysis. No need to fix any Fixables.
+ if (!WarningGadgets.empty()) {
+ // Gadgets "claim" variables they're responsible for. Once this loop
+ // finishes, the tracker will only track DREs that weren't claimed by any
+ // gadgets, i.e. not understood by the analysis.
+ for (const auto &G : FixableGadgets) {
+ for (const auto *DRE : G->getClaimedVarUseSites()) {
+ Tracker.claimUse(DRE);
+ }
+ }
+ }
+
+ // If no `WarningGadget`s ever matched, there is no unsafe operations in the
+ // function under the analysis. Thus, it early returns here as there is
+ // nothing needs to be fixed.
+ //
+ // Note this claim is based on the assumption that there is no unsafe
+ // variable whose declaration is invisible from the analyzing function.
+ // Otherwise, we need to consider if the uses of those unsafe varuables needs
+ // fix.
+ // So far, we are not fixing any global variables or class members. And,
+ // lambdas will be analyzed along with the enclosing function. So this early
+ // return is correct for now.
+ if (WarningGadgets.empty())
+ return;
+
+ UnsafeOps = groupWarningGadgetsByVar(std::move(WarningGadgets));
+ FixablesForAllVars = groupFixablesByVar(std::move(FixableGadgets));
+
+ std::map<const VarDecl *, FixItList> FixItsForVariableGroup;
+
+ // Filter out non-local vars and vars with unclaimed DeclRefExpr-s.
+ for (auto it = FixablesForAllVars.byVar.cbegin();
+ it != FixablesForAllVars.byVar.cend();) {
+ // FIXME: need to deal with global variables later
+ if ((!it->first->isLocalVarDecl() && !isa<ParmVarDecl>(it->first))) {
+#ifndef NDEBUG
+ Handler.addDebugNoteForVar(
+ it->first, it->first->getBeginLoc(),
+ ("failed to produce fixit for '" + it->first->getNameAsString() +
+ "' : neither local nor a parameter"));
+#endif
+ it = FixablesForAllVars.byVar.erase(it);
+ } else if (it->first->getType().getCanonicalType()->isReferenceType()) {
+#ifndef NDEBUG
+ Handler.addDebugNoteForVar(it->first, it->first->getBeginLoc(),
+ ("failed to produce fixit for '" +
+ it->first->getNameAsString() +
+ "' : has a reference type"));
+#endif
+ it = FixablesForAllVars.byVar.erase(it);
+ } else if (Tracker.hasUnclaimedUses(it->first)) {
+#ifndef NDEBUG
+ auto AllUnclaimed = Tracker.getUnclaimedUses(it->first);
+ for (auto UnclaimedDRE : AllUnclaimed) {
+ std::string UnclaimedUseTrace =
+ getDREAncestorString(UnclaimedDRE, D->getASTContext());
+
+ Handler.addDebugNoteForVar(
+ it->first, UnclaimedDRE->getBeginLoc(),
+ ("failed to produce fixit for '" + it->first->getNameAsString() +
+ "' : has an unclaimed use\nThe unclaimed DRE trace: " +
+ UnclaimedUseTrace));
+ }
+#endif
+ it = FixablesForAllVars.byVar.erase(it);
+ } else if (it->first->isInitCapture()) {
+#ifndef NDEBUG
+ Handler.addDebugNoteForVar(
+ it->first, it->first->getBeginLoc(),
+ ("failed to produce fixit for '" + it->first->getNameAsString() +
+ "' : init capture"));
+#endif
+ it = FixablesForAllVars.byVar.erase(it);
+ }else {
+ ++it;
+ }
+ }
+
+ // Fixpoint iteration for pointer assignments
+ using DepMapTy = DenseMap<const VarDecl *, llvm::SetVector<const VarDecl *>>;
+ DepMapTy DependenciesMap{};
+ DepMapTy PtrAssignmentGraph{};
+
+ for (auto it : FixablesForAllVars.byVar) {
+ for (const FixableGadget *fixable : it.second) {
+ std::optional<std::pair<const VarDecl *, const VarDecl *>> ImplPair =
+ fixable->getStrategyImplications();
+ if (ImplPair) {
+ std::pair<const VarDecl *, const VarDecl *> Impl = std::move(*ImplPair);
+ PtrAssignmentGraph[Impl.first].insert(Impl.second);
+ }
+ }
+ }
+
+ /*
+ The following code does a BFS traversal of the `PtrAssignmentGraph`
+ considering all unsafe vars as starting nodes and constructs an undirected
+ graph `DependenciesMap`. Constructing the `DependenciesMap` in this manner
+ elimiates all variables that are unreachable from any unsafe var. In other
+ words, this removes all dependencies that don't include any unsafe variable
+ and consequently don't need any fixit generation.
+ Note: A careful reader would observe that the code traverses
+ `PtrAssignmentGraph` using `CurrentVar` but adds edges between `Var` and
+ `Adj` and not between `CurrentVar` and `Adj`. Both approaches would
+ achieve the same result but the one used here dramatically cuts the
+ amount of hoops the second part of the algorithm needs to jump, given that
+ a lot of these connections become "direct". The reader is advised not to
+ imagine how the graph is transformed because of using `Var` instead of
+ `CurrentVar`. The reader can continue reading as if `CurrentVar` was used,
+ and think about why it's equivalent later.
+ */
+ std::set<const VarDecl *> VisitedVarsDirected{};
+ for (const auto &[Var, ignore] : UnsafeOps.byVar) {
+ if (VisitedVarsDirected.find(Var) == VisitedVarsDirected.end()) {
+
+ std::queue<const VarDecl*> QueueDirected{};
+ QueueDirected.push(Var);
+ while(!QueueDirected.empty()) {
+ const VarDecl* CurrentVar = QueueDirected.front();
+ QueueDirected.pop();
+ VisitedVarsDirected.insert(CurrentVar);
+ auto AdjacentNodes = PtrAssignmentGraph[CurrentVar];
+ for (const VarDecl *Adj : AdjacentNodes) {
+ if (VisitedVarsDirected.find(Adj) == VisitedVarsDirected.end()) {
+ QueueDirected.push(Adj);
+ }
+ DependenciesMap[Var].insert(Adj);
+ DependenciesMap[Adj].insert(Var);
+ }
+ }
+ }
+ }
+
+ // `Groups` stores the set of Connected Components in the graph.
+ std::vector<VarGrpTy> Groups;
+ // `VarGrpMap` maps variables that need fix to the groups (indexes) that the
+ // variables belong to. Group indexes refer to the elements in `Groups`.
+ // `VarGrpMap` is complete in that every variable that needs fix is in it.
+ std::map<const VarDecl *, unsigned> VarGrpMap;
+ // The union group over the ones in "Groups" that contain parameters of `D`:
+ llvm::SetVector<const VarDecl *>
+ GrpsUnionForParms; // these variables need to be fixed in one step
+
+ // Group Connected Components for Unsafe Vars
+ // (Dependencies based on pointer assignments)
+ std::set<const VarDecl *> VisitedVars{};
+ for (const auto &[Var, ignore] : UnsafeOps.byVar) {
+ if (VisitedVars.find(Var) == VisitedVars.end()) {
+ VarGrpTy &VarGroup = Groups.emplace_back();
+ std::queue<const VarDecl*> Queue{};
+
+ Queue.push(Var);
+ while(!Queue.empty()) {
+ const VarDecl* CurrentVar = Queue.front();
+ Queue.pop();
+ VisitedVars.insert(CurrentVar);
+ VarGroup.push_back(CurrentVar);
+ auto AdjacentNodes = DependenciesMap[CurrentVar];
+ for (const VarDecl *Adj : AdjacentNodes) {
+ if (VisitedVars.find(Adj) == VisitedVars.end()) {
+ Queue.push(Adj);
+ }
+ }
+ }
+
+ bool HasParm = false;
+ unsigned GrpIdx = Groups.size() - 1;
+
+ for (const VarDecl *V : VarGroup) {
+ VarGrpMap[V] = GrpIdx;
+ if (!HasParm && isParameterOf(V, D))
+ HasParm = true;
+ }
+ if (HasParm)
+ GrpsUnionForParms.insert(VarGroup.begin(), VarGroup.end());
+ }
+ }
+
+ // Remove a `FixableGadget` if the associated variable is not in the graph
+ // computed above. We do not want to generate fix-its for such variables,
+ // since they are neither warned nor reachable from a warned one.
+ //
+ // Note a variable is not warned if it is not directly used in any unsafe
+ // operation. A variable `v` is NOT reachable from an unsafe variable, if it
+ // does not exist another variable `u` such that `u` is warned and fixing `u`
+ // (transitively) implicates fixing `v`.
+ //
+ // For example,
+ // ```
+ // void f(int * p) {
+ // int * a = p; *p = 0;
+ // }
+ // ```
+ // `*p = 0` is a fixable gadget associated with a variable `p` that is neither
+ // warned nor reachable from a warned one. If we add `a[5] = 0` to the end of
+ // the function above, `p` becomes reachable from a warned variable.
+ for (auto I = FixablesForAllVars.byVar.begin();
+ I != FixablesForAllVars.byVar.end();) {
+ // Note `VisitedVars` contain all the variables in the graph:
+ if (!VisitedVars.count((*I).first)) {
+ // no such var in graph:
+ I = FixablesForAllVars.byVar.erase(I);
+ } else
+ ++I;
+ }
+
+ // We assign strategies to variables that are 1) in the graph and 2) can be
+ // fixed. Other variables have the default "Won't fix" strategy.
+ Strategy NaiveStrategy = getNaiveStrategy(llvm::make_filter_range(
+ VisitedVars, [&FixablesForAllVars](const VarDecl *V) {
+ // If a warned variable has no "Fixable", it is considered unfixable:
+ return FixablesForAllVars.byVar.count(V);
+ }));
+ VariableGroupsManagerImpl VarGrpMgr(Groups, VarGrpMap, GrpsUnionForParms);
+
+ if (isa<NamedDecl>(D))
+ // The only case where `D` is not a `NamedDecl` is when `D` is a
+ // `BlockDecl`. Let's not fix variables in blocks for now
+ FixItsForVariableGroup =
+ getFixIts(FixablesForAllVars, NaiveStrategy, D->getASTContext(), D,
+ Tracker, Handler, VarGrpMgr);
+
+ for (const auto &G : UnsafeOps.noVar) {
+ Handler.handleUnsafeOperation(G->getBaseStmt(), /*IsRelatedToDecl=*/false,
+ D->getASTContext());
+ }
+
+ for (const auto &[VD, WarningGadgets] : UnsafeOps.byVar) {
+ auto FixItsIt = FixItsForVariableGroup.find(VD);
+ Handler.handleUnsafeVariableGroup(VD, VarGrpMgr,
+ FixItsIt != FixItsForVariableGroup.end()
+ ? std::move(FixItsIt->second)
+ : FixItList{},
+ D);
+ for (const auto &G : WarningGadgets) {
+ Handler.handleUnsafeOperation(G->getBaseStmt(), /*IsRelatedToDecl=*/true,
+ D->getASTContext());
+ }
+ }
+}
diff --git a/contrib/llvm-project/clang/lib/Basic/Attributes.cpp b/contrib/llvm-project/clang/lib/Basic/Attributes.cpp
index 62eea9c59082..44a4f1890d39 100644
--- a/contrib/llvm-project/clang/lib/Basic/Attributes.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Attributes.cpp
@@ -1,16 +1,39 @@
+//===--- Attributes.cpp ---------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the AttributeCommonInfo interface.
+//
+//===----------------------------------------------------------------------===//
+
#include "clang/Basic/Attributes.h"
#include "clang/Basic/AttrSubjectMatchRules.h"
-#include "clang/Basic/AttributeCommonInfo.h"
#include "clang/Basic/IdentifierTable.h"
-#include "llvm/ADT/StringSwitch.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/ParsedAttrInfo.h"
+#include "clang/Basic/TargetInfo.h"
+
using namespace clang;
-int clang::hasAttribute(AttrSyntax Syntax, const IdentifierInfo *Scope,
- const IdentifierInfo *Attr, const TargetInfo &Target,
- const LangOptions &LangOpts) {
+static int hasAttributeImpl(AttributeCommonInfo::Syntax Syntax, StringRef Name,
+ StringRef ScopeName, const TargetInfo &Target,
+ const LangOptions &LangOpts) {
+
+#include "clang/Basic/AttrHasAttributeImpl.inc"
+
+ return 0;
+}
+
+int clang::hasAttribute(AttributeCommonInfo::Syntax Syntax,
+ const IdentifierInfo *Scope, const IdentifierInfo *Attr,
+ const TargetInfo &Target, const LangOptions &LangOpts) {
StringRef Name = Attr->getName();
// Normalize the attribute name, __foo__ becomes foo.
- if (Name.size() >= 4 && Name.startswith("__") && Name.endswith("__"))
+ if (Name.size() >= 4 && Name.starts_with("__") && Name.ends_with("__"))
Name = Name.substr(2, Name.size() - 4);
// Normalize the scope name, but only for gnu and clang attributes.
@@ -24,11 +47,17 @@ int clang::hasAttribute(AttrSyntax Syntax, const IdentifierInfo *Scope,
// attributes. We support those, but not through the typical attribute
// machinery that goes through TableGen. We support this in all OpenMP modes
// so long as double square brackets are enabled.
- if (LangOpts.OpenMP && LangOpts.DoubleSquareBracketAttributes &&
- ScopeName == "omp")
+ if (LangOpts.OpenMP && ScopeName == "omp")
return (Name == "directive" || Name == "sequence") ? 1 : 0;
-#include "clang/Basic/AttrHasAttributeImpl.inc"
+ int res = hasAttributeImpl(Syntax, Name, ScopeName, Target, LangOpts);
+ if (res)
+ return res;
+
+ // Check if any plugin provides this attribute.
+ for (auto &Ptr : getAttributePluginInstances())
+ if (Ptr->hasSpelling(Syntax, Name))
+ return 1;
return 0;
}
@@ -53,7 +82,7 @@ normalizeAttrScopeName(const IdentifierInfo *Scope,
// to be "clang".
StringRef ScopeName = Scope->getName();
if (SyntaxUsed == AttributeCommonInfo::AS_CXX11 ||
- SyntaxUsed == AttributeCommonInfo::AS_C2x) {
+ SyntaxUsed == AttributeCommonInfo::AS_C23) {
if (ScopeName == "__gnu__")
ScopeName = "gnu";
else if (ScopeName == "_Clang")
@@ -70,12 +99,12 @@ static StringRef normalizeAttrName(const IdentifierInfo *Name,
bool ShouldNormalize =
SyntaxUsed == AttributeCommonInfo::AS_GNU ||
((SyntaxUsed == AttributeCommonInfo::AS_CXX11 ||
- SyntaxUsed == AttributeCommonInfo::AS_C2x) &&
+ SyntaxUsed == AttributeCommonInfo::AS_C23) &&
(NormalizedScopeName.empty() || NormalizedScopeName == "gnu" ||
NormalizedScopeName == "clang"));
StringRef AttrName = Name->getName();
- if (ShouldNormalize && AttrName.size() >= 4 && AttrName.startswith("__") &&
- AttrName.endswith("__"))
+ if (ShouldNormalize && AttrName.size() >= 4 && AttrName.starts_with("__") &&
+ AttrName.ends_with("__"))
AttrName = AttrName.slice(2, AttrName.size() - 2);
return AttrName;
@@ -85,6 +114,10 @@ bool AttributeCommonInfo::isGNUScope() const {
return ScopeName && (ScopeName->isStr("gnu") || ScopeName->isStr("__gnu__"));
}
+bool AttributeCommonInfo::isClangScope() const {
+ return ScopeName && (ScopeName->isStr("clang") || ScopeName->isStr("_Clang"));
+}
+
#include "clang/Sema/AttrParsedAttrKinds.inc"
static SmallString<64> normalizeName(const IdentifierInfo *Name,
@@ -96,7 +129,7 @@ static SmallString<64> normalizeName(const IdentifierInfo *Name,
SmallString<64> FullName = ScopeName;
if (!ScopeName.empty()) {
assert(SyntaxUsed == AttributeCommonInfo::AS_CXX11 ||
- SyntaxUsed == AttributeCommonInfo::AS_C2x);
+ SyntaxUsed == AttributeCommonInfo::AS_C23);
FullName += "::";
}
FullName += AttrName;
diff --git a/contrib/llvm-project/clang/lib/Basic/BuiltinTargetFeatures.h b/contrib/llvm-project/clang/lib/Basic/BuiltinTargetFeatures.h
new file mode 100644
index 000000000000..9754acda2a68
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Basic/BuiltinTargetFeatures.h
@@ -0,0 +1,95 @@
+//===-- CodeGenFunction.h - Target features for builtin ---------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the internal required target features for builtin.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_BASIC_BUILTINTARGETFEATURES_H
+#define LLVM_CLANG_LIB_BASIC_BUILTINTARGETFEATURES_H
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+
+using llvm::StringRef;
+
+namespace clang {
+namespace Builtin {
+/// TargetFeatures - This class is used to check whether the builtin function
+/// has the required tagert specific features. It is able to support the
+/// combination of ','(and), '|'(or), and '()'. By default, the priority of
+/// ',' is higher than that of '|' .
+/// E.g:
+/// A,B|C means the builtin function requires both A and B, or C.
+/// If we want the builtin function requires both A and B, or both A and C,
+/// there are two ways: A,B|A,C or A,(B|C).
+/// The FeaturesList should not contain spaces, and brackets must appear in
+/// pairs.
+class TargetFeatures {
+ struct FeatureListStatus {
+ bool HasFeatures;
+ StringRef CurFeaturesList;
+ };
+
+ const llvm::StringMap<bool> &CallerFeatureMap;
+
+ FeatureListStatus getAndFeatures(StringRef FeatureList) {
+ int InParentheses = 0;
+ bool HasFeatures = true;
+ size_t SubexpressionStart = 0;
+ for (size_t i = 0, e = FeatureList.size(); i < e; ++i) {
+ char CurrentToken = FeatureList[i];
+ switch (CurrentToken) {
+ default:
+ break;
+ case '(':
+ if (InParentheses == 0)
+ SubexpressionStart = i + 1;
+ ++InParentheses;
+ break;
+ case ')':
+ --InParentheses;
+ assert(InParentheses >= 0 && "Parentheses are not in pair");
+ [[fallthrough]];
+ case '|':
+ case ',':
+ if (InParentheses == 0) {
+ if (HasFeatures && i != SubexpressionStart) {
+ StringRef F = FeatureList.slice(SubexpressionStart, i);
+ HasFeatures = CurrentToken == ')' ? hasRequiredFeatures(F)
+ : CallerFeatureMap.lookup(F);
+ }
+ SubexpressionStart = i + 1;
+ if (CurrentToken == '|') {
+ return {HasFeatures, FeatureList.substr(SubexpressionStart)};
+ }
+ }
+ break;
+ }
+ }
+ assert(InParentheses == 0 && "Parentheses are not in pair");
+ if (HasFeatures && SubexpressionStart != FeatureList.size())
+ HasFeatures =
+ CallerFeatureMap.lookup(FeatureList.substr(SubexpressionStart));
+ return {HasFeatures, StringRef()};
+ }
+
+public:
+ bool hasRequiredFeatures(StringRef FeatureList) {
+ FeatureListStatus FS = {false, FeatureList};
+ while (!FS.HasFeatures && !FS.CurFeaturesList.empty())
+ FS = getAndFeatures(FS.CurFeaturesList);
+ return FS.HasFeatures;
+ }
+
+ TargetFeatures(const llvm::StringMap<bool> &CallerFeatureMap)
+ : CallerFeatureMap(CallerFeatureMap) {}
+};
+
+} // namespace Builtin
+} // namespace clang
+#endif /* CLANG_LIB_BASIC_BUILTINTARGETFEATURES_H */
diff --git a/contrib/llvm-project/clang/lib/Basic/Builtins.cpp b/contrib/llvm-project/clang/lib/Basic/Builtins.cpp
index 7118aa9dc210..d366989bafc5 100644
--- a/contrib/llvm-project/clang/lib/Basic/Builtins.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Builtins.cpp
@@ -11,20 +11,33 @@
//===----------------------------------------------------------------------===//
#include "clang/Basic/Builtins.h"
+#include "BuiltinTargetFeatures.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/StringRef.h"
using namespace clang;
-static const Builtin::Info BuiltinInfo[] = {
- { "not a builtin function", nullptr, nullptr, nullptr, ALL_LANGUAGES,nullptr},
+const char *HeaderDesc::getName() const {
+ switch (ID) {
+#define HEADER(ID, NAME) \
+ case ID: \
+ return NAME;
+#include "clang/Basic/BuiltinHeaders.def"
+#undef HEADER
+ };
+ llvm_unreachable("Unknown HeaderDesc::HeaderID enum");
+}
+
+static constexpr Builtin::Info BuiltinInfo[] = {
+ {"not a builtin function", nullptr, nullptr, nullptr, HeaderDesc::NO_HEADER,
+ ALL_LANGUAGES},
#define BUILTIN(ID, TYPE, ATTRS) \
- { #ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr },
+ {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
#define LANGBUILTIN(ID, TYPE, ATTRS, LANGS) \
- { #ID, TYPE, ATTRS, nullptr, LANGS, nullptr },
+ {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, LANGS},
#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER, LANGS) \
- { #ID, TYPE, ATTRS, HEADER, LANGS, nullptr },
+ {#ID, TYPE, ATTRS, nullptr, HeaderDesc::HEADER, LANGS},
#include "clang/Basic/Builtins.def"
};
@@ -48,43 +61,65 @@ void Builtin::Context::InitializeTarget(const TargetInfo &Target,
}
bool Builtin::Context::isBuiltinFunc(llvm::StringRef FuncName) {
- for (unsigned i = Builtin::NotBuiltin + 1; i != Builtin::FirstTSBuiltin; ++i)
- if (FuncName.equals(BuiltinInfo[i].Name))
+ bool InStdNamespace = FuncName.consume_front("std-");
+ for (unsigned i = Builtin::NotBuiltin + 1; i != Builtin::FirstTSBuiltin;
+ ++i) {
+ if (FuncName.equals(BuiltinInfo[i].Name) &&
+ (bool)strchr(BuiltinInfo[i].Attributes, 'z') == InStdNamespace)
return strchr(BuiltinInfo[i].Attributes, 'f') != nullptr;
+ }
return false;
}
-bool Builtin::Context::builtinIsSupported(const Builtin::Info &BuiltinInfo,
- const LangOptions &LangOpts) {
- bool BuiltinsUnsupported =
- (LangOpts.NoBuiltin || LangOpts.isNoBuiltinFunc(BuiltinInfo.Name)) &&
- strchr(BuiltinInfo.Attributes, 'f');
- bool CorBuiltinsUnsupported =
- !LangOpts.Coroutines && (BuiltinInfo.Langs & COR_LANG);
- bool MathBuiltinsUnsupported =
- LangOpts.NoMathBuiltin && BuiltinInfo.HeaderName &&
- llvm::StringRef(BuiltinInfo.HeaderName).equals("math.h");
- bool GnuModeUnsupported = !LangOpts.GNUMode && (BuiltinInfo.Langs & GNU_LANG);
- bool MSModeUnsupported =
- !LangOpts.MicrosoftExt && (BuiltinInfo.Langs & MS_LANG);
- bool ObjCUnsupported = !LangOpts.ObjC && BuiltinInfo.Langs == OBJC_LANG;
- bool OclC1Unsupported = (LangOpts.OpenCLVersion / 100) != 1 &&
- (BuiltinInfo.Langs & ALL_OCLC_LANGUAGES ) == OCLC1X_LANG;
- bool OclC2Unsupported =
- (LangOpts.OpenCLVersion != 200 && !LangOpts.OpenCLCPlusPlus) &&
- (BuiltinInfo.Langs & ALL_OCLC_LANGUAGES) == OCLC20_LANG;
- bool OclCUnsupported = !LangOpts.OpenCL &&
- (BuiltinInfo.Langs & ALL_OCLC_LANGUAGES);
- bool OpenMPUnsupported = !LangOpts.OpenMP && BuiltinInfo.Langs == OMP_LANG;
- bool CUDAUnsupported = !LangOpts.CUDA && BuiltinInfo.Langs == CUDA_LANG;
- bool CPlusPlusUnsupported =
- !LangOpts.CPlusPlus && BuiltinInfo.Langs == CXX_LANG;
- return !BuiltinsUnsupported && !CorBuiltinsUnsupported &&
- !MathBuiltinsUnsupported && !OclCUnsupported && !OclC1Unsupported &&
- !OclC2Unsupported && !OpenMPUnsupported && !GnuModeUnsupported &&
- !MSModeUnsupported && !ObjCUnsupported && !CPlusPlusUnsupported &&
- !CUDAUnsupported;
+/// Is this builtin supported according to the given language options?
+static bool builtinIsSupported(const Builtin::Info &BuiltinInfo,
+ const LangOptions &LangOpts) {
+ /* Builtins Unsupported */
+ if (LangOpts.NoBuiltin && strchr(BuiltinInfo.Attributes, 'f') != nullptr)
+ return false;
+ /* CorBuiltins Unsupported */
+ if (!LangOpts.Coroutines && (BuiltinInfo.Langs & COR_LANG))
+ return false;
+ /* MathBuiltins Unsupported */
+ if (LangOpts.NoMathBuiltin && BuiltinInfo.Header.ID == HeaderDesc::MATH_H)
+ return false;
+ /* GnuMode Unsupported */
+ if (!LangOpts.GNUMode && (BuiltinInfo.Langs & GNU_LANG))
+ return false;
+ /* MSMode Unsupported */
+ if (!LangOpts.MicrosoftExt && (BuiltinInfo.Langs & MS_LANG))
+ return false;
+ /* ObjC Unsupported */
+ if (!LangOpts.ObjC && BuiltinInfo.Langs == OBJC_LANG)
+ return false;
+ /* OpenCLC Unsupported */
+ if (!LangOpts.OpenCL && (BuiltinInfo.Langs & ALL_OCL_LANGUAGES))
+ return false;
+ /* OopenCL GAS Unsupported */
+ if (!LangOpts.OpenCLGenericAddressSpace && (BuiltinInfo.Langs & OCL_GAS))
+ return false;
+ /* OpenCL Pipe Unsupported */
+ if (!LangOpts.OpenCLPipes && (BuiltinInfo.Langs & OCL_PIPE))
+ return false;
+
+ // Device side enqueue is not supported until OpenCL 2.0. In 2.0 and higher
+ // support is indicated with language option for blocks.
+
+ /* OpenCL DSE Unsupported */
+ if ((LangOpts.getOpenCLCompatibleVersion() < 200 || !LangOpts.Blocks) &&
+ (BuiltinInfo.Langs & OCL_DSE))
+ return false;
+ /* OpenMP Unsupported */
+ if (!LangOpts.OpenMP && BuiltinInfo.Langs == OMP_LANG)
+ return false;
+ /* CUDA Unsupported */
+ if (!LangOpts.CUDA && BuiltinInfo.Langs == CUDA_LANG)
+ return false;
+ /* CPlusPlus Unsupported */
+ if (!LangOpts.CPlusPlus && BuiltinInfo.Langs == CXX_LANG)
+ return false;
+ return true;
}
/// initializeBuiltins - Mark the identifiers for all the builtins with their
@@ -107,6 +142,19 @@ void Builtin::Context::initializeBuiltins(IdentifierTable &Table,
for (unsigned i = 0, e = AuxTSRecords.size(); i != e; ++i)
Table.get(AuxTSRecords[i].Name)
.setBuiltinID(i + Builtin::FirstTSBuiltin + TSRecords.size());
+
+ // Step #4: Unregister any builtins specified by -fno-builtin-foo.
+ for (llvm::StringRef Name : LangOpts.NoBuiltinFuncs) {
+ bool InStdNamespace = Name.consume_front("std-");
+ auto NameIt = Table.find(Name);
+ if (NameIt != Table.end()) {
+ unsigned ID = NameIt->second->getBuiltinID();
+ if (ID != Builtin::NotBuiltin && isPredefinedLibFunction(ID) &&
+ isInStdNamespace(ID) == InStdNamespace) {
+ NameIt->second->clearBuiltinID();
+ }
+ }
+ }
}
unsigned Builtin::Context::getRequiredVectorWidth(unsigned ID) const {
@@ -186,8 +234,19 @@ bool Builtin::Context::performsCallback(unsigned ID,
}
bool Builtin::Context::canBeRedeclared(unsigned ID) const {
- return ID == Builtin::NotBuiltin ||
- ID == Builtin::BI__va_start ||
- (!hasReferenceArgsOrResult(ID) &&
- !hasCustomTypechecking(ID));
+ return ID == Builtin::NotBuiltin || ID == Builtin::BI__va_start ||
+ ID == Builtin::BI__builtin_assume_aligned ||
+ (!hasReferenceArgsOrResult(ID) && !hasCustomTypechecking(ID)) ||
+ isInStdNamespace(ID);
+}
+
+bool Builtin::evaluateRequiredTargetFeatures(
+ StringRef RequiredFeatures, const llvm::StringMap<bool> &TargetFetureMap) {
+ // Return true if the builtin doesn't have any required features.
+ if (RequiredFeatures.empty())
+ return true;
+ assert(!RequiredFeatures.contains(' ') && "Space in feature list");
+
+ TargetFeatures TF(TargetFetureMap);
+ return TF.hasRequiredFeatures(RequiredFeatures);
}
diff --git a/contrib/llvm-project/clang/lib/Basic/CLWarnings.cpp b/contrib/llvm-project/clang/lib/Basic/CLWarnings.cpp
new file mode 100644
index 000000000000..5449d8f59fcf
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Basic/CLWarnings.cpp
@@ -0,0 +1,29 @@
+//===--- CLWarnings.h - Maps some cl.exe warning ids -----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Diagnostic-related interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/CLWarnings.h"
+#include "clang/Basic/DiagnosticCategories.h"
+#include <optional>
+
+using namespace clang;
+
+std::optional<diag::Group>
+clang::diagGroupFromCLWarningID(unsigned CLWarningID) {
+ switch (CLWarningID) {
+ case 4005: return diag::Group::MacroRedefined;
+ case 4018: return diag::Group::SignCompare;
+ case 4100: return diag::Group::UnusedParameter;
+ case 4910: return diag::Group::DllexportExplicitInstantiationDecl;
+ case 4996: return diag::Group::DeprecatedDeclarations;
+ }
+ return {};
+}
diff --git a/contrib/llvm-project/clang/lib/Basic/CodeGenOptions.cpp b/contrib/llvm-project/clang/lib/Basic/CodeGenOptions.cpp
index 0c609cfa61de..79d715305ef2 100644
--- a/contrib/llvm-project/clang/lib/Basic/CodeGenOptions.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/CodeGenOptions.cpp
@@ -20,4 +20,41 @@ CodeGenOptions::CodeGenOptions() {
memcpy(CoverageVersion, "408*", 4);
}
+void CodeGenOptions::resetNonModularOptions(StringRef ModuleFormat) {
+ // First reset all CodeGen options only. The Debug options are handled later.
+#define DEBUGOPT(Name, Bits, Default)
+#define VALUE_DEBUGOPT(Name, Bits, Default)
+#define ENUM_DEBUGOPT(Name, Type, Bits, Default)
+#define CODEGENOPT(Name, Bits, Default) Name = Default;
+#define ENUM_CODEGENOPT(Name, Type, Bits, Default) set##Name(Default);
+// Do not reset AST affecting code generation options.
+#define AFFECTING_VALUE_CODEGENOPT(Name, Bits, Default)
+#include "clang/Basic/CodeGenOptions.def"
+
+ // Next reset all debug options that can always be reset, because they never
+ // affect the PCM.
+#define DEBUGOPT(Name, Bits, Default)
+#define VALUE_DEBUGOPT(Name, Bits, Default)
+#define ENUM_DEBUGOPT(Name, Type, Bits, Default)
+#define BENIGN_DEBUGOPT(Name, Bits, Default) Name = Default;
+#define BENIGN_VALUE_DEBUGOPT(Name, Bits, Default) Name = Default;
+#define BENIGN_ENUM_DEBUGOPT(Name, Type, Bits, Default) set##Name(Default);
+#include "clang/Basic/DebugOptions.def"
+
+ // Conditionally reset debug options that only matter when the debug info is
+ // emitted into the PCM (-gmodules).
+ if (ModuleFormat == "raw" && !DebugTypeExtRefs) {
+#define DEBUGOPT(Name, Bits, Default) Name = Default;
+#define VALUE_DEBUGOPT(Name, Bits, Default) Name = Default;
+#define ENUM_DEBUGOPT(Name, Type, Bits, Default) set##Name(Default);
+#define BENIGN_DEBUGOPT(Name, Bits, Default)
+#define BENIGN_VALUE_DEBUGOPT(Name, Bits, Default)
+#define BENIGN_ENUM_DEBUGOPT(Name, Type, Bits, Default)
+#include "clang/Basic/DebugOptions.def"
+ }
+
+ RelocationModel = llvm::Reloc::PIC_;
+ memcpy(CoverageVersion, "408*", 4);
+}
+
} // end namespace clang
diff --git a/contrib/llvm-project/clang/lib/Basic/Cuda.cpp b/contrib/llvm-project/clang/lib/Basic/Cuda.cpp
index 766135bcb376..1b1da6a1356f 100644
--- a/contrib/llvm-project/clang/lib/Basic/Cuda.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Cuda.cpp
@@ -1,60 +1,72 @@
#include "clang/Basic/Cuda.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/VersionTuple.h"
namespace clang {
-const char *CudaVersionToString(CudaVersion V) {
- switch (V) {
- case CudaVersion::UNKNOWN:
- return "unknown";
- case CudaVersion::CUDA_70:
- return "7.0";
- case CudaVersion::CUDA_75:
- return "7.5";
- case CudaVersion::CUDA_80:
- return "8.0";
- case CudaVersion::CUDA_90:
- return "9.0";
- case CudaVersion::CUDA_91:
- return "9.1";
- case CudaVersion::CUDA_92:
- return "9.2";
- case CudaVersion::CUDA_100:
- return "10.0";
- case CudaVersion::CUDA_101:
- return "10.1";
- case CudaVersion::CUDA_102:
- return "10.2";
- case CudaVersion::CUDA_110:
- return "11.0";
- case CudaVersion::CUDA_111:
- return "11.1";
- case CudaVersion::CUDA_112:
- return "11.2";
+struct CudaVersionMapEntry {
+ const char *Name;
+ CudaVersion Version;
+ llvm::VersionTuple TVersion;
+};
+#define CUDA_ENTRY(major, minor) \
+ { \
+#major "." #minor, CudaVersion::CUDA_##major##minor, \
+ llvm::VersionTuple(major, minor) \
}
- llvm_unreachable("invalid enum");
+
+static const CudaVersionMapEntry CudaNameVersionMap[] = {
+ CUDA_ENTRY(7, 0),
+ CUDA_ENTRY(7, 5),
+ CUDA_ENTRY(8, 0),
+ CUDA_ENTRY(9, 0),
+ CUDA_ENTRY(9, 1),
+ CUDA_ENTRY(9, 2),
+ CUDA_ENTRY(10, 0),
+ CUDA_ENTRY(10, 1),
+ CUDA_ENTRY(10, 2),
+ CUDA_ENTRY(11, 0),
+ CUDA_ENTRY(11, 1),
+ CUDA_ENTRY(11, 2),
+ CUDA_ENTRY(11, 3),
+ CUDA_ENTRY(11, 4),
+ CUDA_ENTRY(11, 5),
+ CUDA_ENTRY(11, 6),
+ CUDA_ENTRY(11, 7),
+ CUDA_ENTRY(11, 8),
+ CUDA_ENTRY(12, 0),
+ CUDA_ENTRY(12, 1),
+ CUDA_ENTRY(12, 2),
+ CUDA_ENTRY(12, 3),
+ {"", CudaVersion::NEW, llvm::VersionTuple(std::numeric_limits<int>::max())},
+ {"unknown", CudaVersion::UNKNOWN, {}} // End of list tombstone.
+};
+#undef CUDA_ENTRY
+
+const char *CudaVersionToString(CudaVersion V) {
+ for (auto *I = CudaNameVersionMap; I->Version != CudaVersion::UNKNOWN; ++I)
+ if (I->Version == V)
+ return I->Name;
+
+ return CudaVersionToString(CudaVersion::UNKNOWN);
}
CudaVersion CudaStringToVersion(const llvm::Twine &S) {
- return llvm::StringSwitch<CudaVersion>(S.str())
- .Case("7.0", CudaVersion::CUDA_70)
- .Case("7.5", CudaVersion::CUDA_75)
- .Case("8.0", CudaVersion::CUDA_80)
- .Case("9.0", CudaVersion::CUDA_90)
- .Case("9.1", CudaVersion::CUDA_91)
- .Case("9.2", CudaVersion::CUDA_92)
- .Case("10.0", CudaVersion::CUDA_100)
- .Case("10.1", CudaVersion::CUDA_101)
- .Case("10.2", CudaVersion::CUDA_102)
- .Case("11.0", CudaVersion::CUDA_110)
- .Case("11.1", CudaVersion::CUDA_111)
- .Case("11.2", CudaVersion::CUDA_112)
- .Default(CudaVersion::UNKNOWN);
+ std::string VS = S.str();
+ for (auto *I = CudaNameVersionMap; I->Version != CudaVersion::UNKNOWN; ++I)
+ if (I->Name == VS)
+ return I->Version;
+ return CudaVersion::UNKNOWN;
+}
+
+CudaVersion ToCudaVersion(llvm::VersionTuple Version) {
+ for (auto *I = CudaNameVersionMap; I->Version != CudaVersion::UNKNOWN; ++I)
+ if (I->TVersion == Version)
+ return I->Version;
+ return CudaVersion::UNKNOWN;
}
namespace {
@@ -80,6 +92,10 @@ static const CudaArchToStringMap arch_names[] = {
SM(70), SM(72), // Volta
SM(75), // Turing
SM(80), SM(86), // Ampere
+ SM(87), // Jetson/Drive AGX Orin
+ SM(89), // Ada Lovelace
+ SM(90), // Hopper
+ SM(90a), // Hopper
GFX(600), // gfx600
GFX(601), // gfx601
GFX(602), // gfx602
@@ -102,6 +118,9 @@ static const CudaArchToStringMap arch_names[] = {
GFX(909), // gfx909
GFX(90a), // gfx90a
GFX(90c), // gfx90c
+ GFX(940), // gfx940
+ GFX(941), // gfx941
+ GFX(942), // gfx942
GFX(1010), // gfx1010
GFX(1011), // gfx1011
GFX(1012), // gfx1012
@@ -112,6 +131,16 @@ static const CudaArchToStringMap arch_names[] = {
GFX(1033), // gfx1033
GFX(1034), // gfx1034
GFX(1035), // gfx1035
+ GFX(1036), // gfx1036
+ GFX(1100), // gfx1100
+ GFX(1101), // gfx1101
+ GFX(1102), // gfx1102
+ GFX(1103), // gfx1103
+ GFX(1150), // gfx1150
+ GFX(1151), // gfx1151
+ GFX(1200), // gfx1200
+ GFX(1201), // gfx1201
+ {CudaArch::Generic, "generic", ""},
// clang-format on
};
#undef SM
@@ -178,6 +207,13 @@ CudaVersion MinVersionForCudaArch(CudaArch A) {
return CudaVersion::CUDA_110;
case CudaArch::SM_86:
return CudaVersion::CUDA_111;
+ case CudaArch::SM_87:
+ return CudaVersion::CUDA_114;
+ case CudaArch::SM_89:
+ case CudaArch::SM_90:
+ return CudaVersion::CUDA_118;
+ case CudaArch::SM_90a:
+ return CudaVersion::CUDA_120;
default:
llvm_unreachable("invalid enum");
}
@@ -186,7 +222,7 @@ CudaVersion MinVersionForCudaArch(CudaArch A) {
CudaVersion MaxVersionForCudaArch(CudaArch A) {
// AMD GPUs do not depend on CUDA versions.
if (IsAMDGpuArch(A))
- return CudaVersion::LATEST;
+ return CudaVersion::NEW;
switch (A) {
case CudaArch::UNKNOWN:
@@ -194,41 +230,14 @@ CudaVersion MaxVersionForCudaArch(CudaArch A) {
case CudaArch::SM_20:
case CudaArch::SM_21:
return CudaVersion::CUDA_80;
- default:
- return CudaVersion::LATEST;
- }
-}
-
-CudaVersion ToCudaVersion(llvm::VersionTuple Version) {
- int IVer =
- Version.getMajor() * 10 + Version.getMinor().getValueOr(0);
- switch(IVer) {
- case 70:
- return CudaVersion::CUDA_70;
- case 75:
- return CudaVersion::CUDA_75;
- case 80:
- return CudaVersion::CUDA_80;
- case 90:
- return CudaVersion::CUDA_90;
- case 91:
- return CudaVersion::CUDA_91;
- case 92:
- return CudaVersion::CUDA_92;
- case 100:
- return CudaVersion::CUDA_100;
- case 101:
- return CudaVersion::CUDA_101;
- case 102:
+ case CudaArch::SM_30:
+ case CudaArch::SM_32:
return CudaVersion::CUDA_102;
- case 110:
- return CudaVersion::CUDA_110;
- case 111:
- return CudaVersion::CUDA_111;
- case 112:
- return CudaVersion::CUDA_112;
+ case CudaArch::SM_35:
+ case CudaArch::SM_37:
+ return CudaVersion::CUDA_118;
default:
- return CudaVersion::UNKNOWN;
+ return CudaVersion::NEW;
}
}
diff --git a/contrib/llvm-project/clang/lib/Basic/DarwinSDKInfo.cpp b/contrib/llvm-project/clang/lib/Basic/DarwinSDKInfo.cpp
index fe35f77782c9..00aa5f9e63cd 100644
--- a/contrib/llvm-project/clang/lib/Basic/DarwinSDKInfo.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/DarwinSDKInfo.cpp
@@ -11,12 +11,13 @@
#include "llvm/Support/JSON.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
+#include <optional>
using namespace clang;
-Optional<VersionTuple> DarwinSDKInfo::RelatedTargetVersionMapping::map(
+std::optional<VersionTuple> DarwinSDKInfo::RelatedTargetVersionMapping::map(
const VersionTuple &Key, const VersionTuple &MinimumValue,
- Optional<VersionTuple> MaximumValue) const {
+ std::optional<VersionTuple> MaximumValue) const {
if (Key < MinimumKeyVersion)
return MinimumValue;
if (Key > MaximumKeyVersion)
@@ -29,11 +30,11 @@ Optional<VersionTuple> DarwinSDKInfo::RelatedTargetVersionMapping::map(
// the major-only check.
if (Key.getMinor())
return map(VersionTuple(Key.getMajor()), MinimumValue, MaximumValue);
- // If this a major only key, return None for a missing entry.
- return None;
+ // If this a major only key, return std::nullopt for a missing entry.
+ return std::nullopt;
}
-Optional<DarwinSDKInfo::RelatedTargetVersionMapping>
+std::optional<DarwinSDKInfo::RelatedTargetVersionMapping>
DarwinSDKInfo::RelatedTargetVersionMapping::parseJSON(
const llvm::json::Object &Obj, VersionTuple MaximumDeploymentTarget) {
VersionTuple Min = VersionTuple(std::numeric_limits<unsigned>::max());
@@ -45,7 +46,7 @@ DarwinSDKInfo::RelatedTargetVersionMapping::parseJSON(
llvm::VersionTuple KeyVersion;
llvm::VersionTuple ValueVersion;
if (KeyVersion.tryParse(KV.getFirst()) || ValueVersion.tryParse(*Val))
- return None;
+ return std::nullopt;
Mapping[KeyVersion.normalize()] = ValueVersion;
if (KeyVersion < Min)
Min = KeyVersion;
@@ -56,39 +57,59 @@ DarwinSDKInfo::RelatedTargetVersionMapping::parseJSON(
}
}
if (Mapping.empty())
- return None;
+ return std::nullopt;
return RelatedTargetVersionMapping(
Min, Max, MinValue, MaximumDeploymentTarget, std::move(Mapping));
}
-static Optional<VersionTuple> getVersionKey(const llvm::json::Object &Obj,
- StringRef Key) {
+static std::optional<VersionTuple> getVersionKey(const llvm::json::Object &Obj,
+ StringRef Key) {
auto Value = Obj.getString(Key);
if (!Value)
- return None;
+ return std::nullopt;
VersionTuple Version;
if (Version.tryParse(*Value))
- return None;
+ return std::nullopt;
return Version;
}
-Optional<DarwinSDKInfo>
+std::optional<DarwinSDKInfo>
DarwinSDKInfo::parseDarwinSDKSettingsJSON(const llvm::json::Object *Obj) {
auto Version = getVersionKey(*Obj, "Version");
if (!Version)
- return None;
+ return std::nullopt;
auto MaximumDeploymentVersion =
getVersionKey(*Obj, "MaximumDeploymentTarget");
if (!MaximumDeploymentVersion)
- return None;
- llvm::DenseMap<OSEnvPair::StorageType, Optional<RelatedTargetVersionMapping>>
+ return std::nullopt;
+ llvm::DenseMap<OSEnvPair::StorageType,
+ std::optional<RelatedTargetVersionMapping>>
VersionMappings;
if (const auto *VM = Obj->getObject("VersionMap")) {
+ // FIXME: Generalize this out beyond iOS-deriving targets.
+ // Look for ios_<targetos> version mapping for targets that derive from ios.
+ for (const auto &KV : *VM) {
+ auto Pair = StringRef(KV.getFirst()).split("_");
+ if (Pair.first.compare_insensitive("ios") == 0) {
+ llvm::Triple TT(llvm::Twine("--") + Pair.second.lower());
+ if (TT.getOS() != llvm::Triple::UnknownOS) {
+ auto Mapping = RelatedTargetVersionMapping::parseJSON(
+ *KV.getSecond().getAsObject(), *MaximumDeploymentVersion);
+ if (Mapping)
+ VersionMappings[OSEnvPair(llvm::Triple::IOS,
+ llvm::Triple::UnknownEnvironment,
+ TT.getOS(),
+ llvm::Triple::UnknownEnvironment)
+ .Value] = std::move(Mapping);
+ }
+ }
+ }
+
if (const auto *Mapping = VM->getObject("macOS_iOSMac")) {
auto VersionMap = RelatedTargetVersionMapping::parseJSON(
*Mapping, *MaximumDeploymentVersion);
if (!VersionMap)
- return None;
+ return std::nullopt;
VersionMappings[OSEnvPair::macOStoMacCatalystPair().Value] =
std::move(VersionMap);
}
@@ -96,7 +117,7 @@ DarwinSDKInfo::parseDarwinSDKSettingsJSON(const llvm::json::Object *Obj) {
auto VersionMap = RelatedTargetVersionMapping::parseJSON(
*Mapping, *MaximumDeploymentVersion);
if (!VersionMap)
- return None;
+ return std::nullopt;
VersionMappings[OSEnvPair::macCatalystToMacOSPair().Value] =
std::move(VersionMap);
}
@@ -107,7 +128,7 @@ DarwinSDKInfo::parseDarwinSDKSettingsJSON(const llvm::json::Object *Obj) {
std::move(VersionMappings));
}
-Expected<Optional<DarwinSDKInfo>>
+Expected<std::optional<DarwinSDKInfo>>
clang::parseDarwinSDKInfo(llvm::vfs::FileSystem &VFS, StringRef SDKRootPath) {
llvm::SmallString<256> Filepath = SDKRootPath;
llvm::sys::path::append(Filepath, "SDKSettings.json");
@@ -115,7 +136,7 @@ clang::parseDarwinSDKInfo(llvm::vfs::FileSystem &VFS, StringRef SDKRootPath) {
VFS.getBufferForFile(Filepath);
if (!File) {
// If the file couldn't be read, assume it just doesn't exist.
- return None;
+ return std::nullopt;
}
Expected<llvm::json::Value> Result =
llvm::json::parse(File.get()->getBuffer());
diff --git a/contrib/llvm-project/clang/lib/Basic/Diagnostic.cpp b/contrib/llvm-project/clang/lib/Basic/Diagnostic.cpp
index d3b2122e9c59..0208ccc31bd7 100644
--- a/contrib/llvm-project/clang/lib/Basic/Diagnostic.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Diagnostic.cpp
@@ -25,8 +25,9 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/CrashRecoveryContext.h"
-#include "llvm/Support/Locale.h"
+#include "llvm/Support/Unicode.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cassert>
@@ -42,28 +43,12 @@ using namespace clang;
const StreamingDiagnostic &clang::operator<<(const StreamingDiagnostic &DB,
DiagNullabilityKind nullability) {
- StringRef string;
- switch (nullability.first) {
- case NullabilityKind::NonNull:
- string = nullability.second ? "'nonnull'" : "'_Nonnull'";
- break;
-
- case NullabilityKind::Nullable:
- string = nullability.second ? "'nullable'" : "'_Nullable'";
- break;
-
- case NullabilityKind::Unspecified:
- string = nullability.second ? "'null_unspecified'" : "'_Null_unspecified'";
- break;
-
- case NullabilityKind::NullableResult:
- assert(!nullability.second &&
- "_Nullable_result isn't supported as context-sensitive keyword");
- string = "_Nullable_result";
- break;
- }
-
- DB.AddString(string);
+ DB.AddString(
+ ("'" +
+ getNullabilitySpelling(nullability.first,
+ /*isContextSensitive=*/nullability.second) +
+ "'")
+ .str());
return DB;
}
@@ -130,7 +115,7 @@ bool DiagnosticsEngine::popMappings(SourceLocation Loc) {
return true;
}
-void DiagnosticsEngine::Reset() {
+void DiagnosticsEngine::Reset(bool soft /*=false*/) {
ErrorOccurred = false;
UncompilableErrorOccurred = false;
FatalErrorOccurred = false;
@@ -145,15 +130,17 @@ void DiagnosticsEngine::Reset() {
LastDiagLevel = DiagnosticIDs::Ignored;
DelayedDiagID = 0;
- // Clear state related to #pragma diagnostic.
- DiagStates.clear();
- DiagStatesByLoc.clear();
- DiagStateOnPushStack.clear();
+ if (!soft) {
+ // Clear state related to #pragma diagnostic.
+ DiagStates.clear();
+ DiagStatesByLoc.clear();
+ DiagStateOnPushStack.clear();
- // Create a DiagState and DiagStatePoint representing diagnostic changes
- // through command-line.
- DiagStates.emplace_back();
- DiagStatesByLoc.appendFirst(&DiagStates.back());
+ // Create a DiagState and DiagStatePoint representing diagnostic changes
+ // through command-line.
+ DiagStates.emplace_back();
+ DiagStatesByLoc.appendFirst(&DiagStates.back());
+ }
}
void DiagnosticsEngine::SetDelayedDiagnostic(unsigned DiagID, StringRef Arg1,
@@ -173,6 +160,18 @@ void DiagnosticsEngine::ReportDelayed() {
Report(ID) << DelayedDiagArg1 << DelayedDiagArg2 << DelayedDiagArg3;
}
+DiagnosticMapping &
+DiagnosticsEngine::DiagState::getOrAddMapping(diag::kind Diag) {
+ std::pair<iterator, bool> Result =
+ DiagMap.insert(std::make_pair(Diag, DiagnosticMapping()));
+
+ // Initialize the entry if we added it.
+ if (Result.second)
+ Result.first->second = DiagnosticIDs::getDefaultMapping(Diag);
+
+ return Result.first->second;
+}
+
void DiagnosticsEngine::DiagStateMap::appendFirst(DiagState *State) {
assert(Files.empty() && "not first");
FirstDiagState = CurDiagState = State;
@@ -374,6 +373,12 @@ void DiagnosticsEngine::setSeverity(diag::kind Diag, diag::Severity Map,
DiagnosticMapping Mapping = makeUserMapping(Map, L);
Mapping.setUpgradedFromWarning(WasUpgradedFromWarning);
+ // Make sure we propagate the NoWarningAsError flag from an existing
+ // mapping (which may be the default mapping).
+ DiagnosticMapping &Info = GetCurDiagState()->getOrAddMapping(Diag);
+ Mapping.setNoWarningAsError(Info.hasNoWarningAsError() ||
+ Mapping.hasNoWarningAsError());
+
// Common case; setting all the diagnostics of a group in one place.
if ((L.isInvalid() || L == DiagStatesByLoc.getCurDiagStateLoc()) &&
DiagStatesByLoc.getCurDiagState()) {
@@ -408,6 +413,14 @@ bool DiagnosticsEngine::setSeverityForGroup(diag::Flavor Flavor,
return false;
}
+bool DiagnosticsEngine::setSeverityForGroup(diag::Flavor Flavor,
+ diag::Group Group,
+ diag::Severity Map,
+ SourceLocation Loc) {
+ return setSeverityForGroup(Flavor, Diags->getWarningOptionForGroup(Group),
+ Map, Loc);
+}
+
bool DiagnosticsEngine::setDiagnosticGroupWarningAsError(StringRef Group,
bool Enabled) {
// If we are enabling this feature, just set the diagnostic mappings to map to
@@ -776,8 +789,8 @@ static const char *getTokenDescForDiagnostic(tok::TokenKind Kind) {
/// array.
void Diagnostic::
FormatDiagnostic(SmallVectorImpl<char> &OutStr) const {
- if (!StoredDiagMessage.empty()) {
- OutStr.append(StoredDiagMessage.begin(), StoredDiagMessage.end());
+ if (StoredDiagMessage.has_value()) {
+ OutStr.append(StoredDiagMessage->begin(), StoredDiagMessage->end());
return;
}
@@ -787,6 +800,51 @@ FormatDiagnostic(SmallVectorImpl<char> &OutStr) const {
FormatDiagnostic(Diag.begin(), Diag.end(), OutStr);
}
+/// EscapeStringForDiagnostic - Append Str to the diagnostic buffer,
+/// escaping non-printable characters and ill-formed code unit sequences.
+void clang::EscapeStringForDiagnostic(StringRef Str,
+ SmallVectorImpl<char> &OutStr) {
+ OutStr.reserve(OutStr.size() + Str.size());
+ auto *Begin = reinterpret_cast<const unsigned char *>(Str.data());
+ llvm::raw_svector_ostream OutStream(OutStr);
+ const unsigned char *End = Begin + Str.size();
+ while (Begin != End) {
+ // ASCII case
+ if (isPrintable(*Begin) || isWhitespace(*Begin)) {
+ OutStream << *Begin;
+ ++Begin;
+ continue;
+ }
+ if (llvm::isLegalUTF8Sequence(Begin, End)) {
+ llvm::UTF32 CodepointValue;
+ llvm::UTF32 *CpPtr = &CodepointValue;
+ const unsigned char *CodepointBegin = Begin;
+ const unsigned char *CodepointEnd =
+ Begin + llvm::getNumBytesForUTF8(*Begin);
+ llvm::ConversionResult Res = llvm::ConvertUTF8toUTF32(
+ &Begin, CodepointEnd, &CpPtr, CpPtr + 1, llvm::strictConversion);
+ (void)Res;
+ assert(
+ llvm::conversionOK == Res &&
+ "the sequence is legal UTF-8 but we couldn't convert it to UTF-32");
+ assert(Begin == CodepointEnd &&
+ "we must be further along in the string now");
+ if (llvm::sys::unicode::isPrintable(CodepointValue) ||
+ llvm::sys::unicode::isFormatting(CodepointValue)) {
+ OutStr.append(CodepointBegin, CodepointEnd);
+ continue;
+ }
+ // Unprintable code point.
+ OutStream << "<U+" << llvm::format_hex_no_prefix(CodepointValue, 4, true)
+ << ">";
+ continue;
+ }
+ // Invalid code unit.
+ OutStream << "<" << llvm::format_hex_no_prefix(*Begin, 2, true) << ">";
+ ++Begin;
+ }
+}
+
void Diagnostic::
FormatDiagnostic(const char *DiagStr, const char *DiagEnd,
SmallVectorImpl<char> &OutStr) const {
@@ -797,11 +855,7 @@ FormatDiagnostic(const char *DiagStr, const char *DiagEnd,
StringRef(DiagStr, DiagEnd - DiagStr).equals("%0") &&
getArgKind(0) == DiagnosticsEngine::ak_std_string) {
const std::string &S = getArgStdStr(0);
- for (char c : S) {
- if (llvm::sys::locale::isPrint(c) || c == '\t') {
- OutStr.push_back(c);
- }
- }
+ EscapeStringForDiagnostic(S, OutStr);
return;
}
@@ -908,7 +962,7 @@ FormatDiagnostic(const char *DiagStr, const char *DiagEnd,
case DiagnosticsEngine::ak_std_string: {
const std::string &S = getArgStdStr(ArgNo);
assert(ModifierLen == 0 && "No modifiers for strings yet");
- OutStr.append(S.begin(), S.end());
+ EscapeStringForDiagnostic(S, OutStr);
break;
}
case DiagnosticsEngine::ak_c_string: {
@@ -918,13 +972,12 @@ FormatDiagnostic(const char *DiagStr, const char *DiagEnd,
// Don't crash if get passed a null pointer by accident.
if (!S)
S = "(null)";
-
- OutStr.append(S, S + strlen(S));
+ EscapeStringForDiagnostic(S, OutStr);
break;
}
// ---- INTEGERS ----
case DiagnosticsEngine::ak_sint: {
- int Val = getArgSInt(ArgNo);
+ int64_t Val = getArgSInt(ArgNo);
if (ModifierIs(Modifier, ModifierLen, "select")) {
HandleSelectModifier(*this, (unsigned)Val, Argument, ArgumentLen,
@@ -943,7 +996,7 @@ FormatDiagnostic(const char *DiagStr, const char *DiagEnd,
break;
}
case DiagnosticsEngine::ak_uint: {
- unsigned Val = getArgUInt(ArgNo);
+ uint64_t Val = getArgUInt(ArgNo);
if (ModifierIs(Modifier, ModifierLen, "select")) {
HandleSelectModifier(*this, Val, Argument, ArgumentLen, OutStr);
@@ -969,13 +1022,13 @@ FormatDiagnostic(const char *DiagStr, const char *DiagEnd,
if (const char *S = tok::getPunctuatorSpelling(Kind))
// Quoted token spelling for punctuators.
Out << '\'' << S << '\'';
- else if (const char *S = tok::getKeywordSpelling(Kind))
+ else if ((S = tok::getKeywordSpelling(Kind)))
// Unquoted token spelling for keywords.
Out << S;
- else if (const char *S = getTokenDescForDiagnostic(Kind))
+ else if ((S = getTokenDescForDiagnostic(Kind)))
// Unquoted translatable token name.
Out << S;
- else if (const char *S = tok::getTokenName(Kind))
+ else if ((S = tok::getTokenName(Kind)))
// Debug name, shouldn't appear in user-facing diagnostics.
Out << '<' << S << '>';
else
@@ -1124,6 +1177,14 @@ StoredDiagnostic::StoredDiagnostic(DiagnosticsEngine::Level Level, unsigned ID,
{
}
+llvm::raw_ostream &clang::operator<<(llvm::raw_ostream &OS,
+ const StoredDiagnostic &SD) {
+ if (SD.getLocation().hasManager())
+ OS << SD.getLocation().printToString(SD.getLocation().getManager()) << ": ";
+ OS << SD.getMessage();
+ return OS;
+}
+
/// IncludeInDiagnosticCounts - This method (whose default implementation
/// returns true) indicates whether the diagnostics handled by this
/// DiagnosticConsumer should be included in the number of diagnostics
diff --git a/contrib/llvm-project/clang/lib/Basic/DiagnosticIDs.cpp b/contrib/llvm-project/clang/lib/Basic/DiagnosticIDs.cpp
index c333076d2efc..6c7bd50eefb7 100644
--- a/contrib/llvm-project/clang/lib/Basic/DiagnosticIDs.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/DiagnosticIDs.cpp
@@ -18,6 +18,7 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/ErrorHandling.h"
#include <map>
+#include <optional>
using namespace clang;
//===----------------------------------------------------------------------===//
@@ -33,7 +34,7 @@ struct StaticDiagInfoRec;
// platforms. See "How To Write Shared Libraries" by Ulrich Drepper.
struct StaticDiagInfoDescriptionStringTable {
#define DIAG(ENUM, CLASS, DEFAULT_SEVERITY, DESC, GROUP, SFINAE, NOWERROR, \
- SHOWINSYSHEADER, DEFERRABLE, CATEGORY) \
+ SHOWINSYSHEADER, SHOWINSYSMACRO, DEFERRABLE, CATEGORY) \
char ENUM##_desc[sizeof(DESC)];
// clang-format off
#include "clang/Basic/DiagnosticCommonKinds.inc"
@@ -54,7 +55,7 @@ struct StaticDiagInfoDescriptionStringTable {
const StaticDiagInfoDescriptionStringTable StaticDiagInfoDescriptions = {
#define DIAG(ENUM, CLASS, DEFAULT_SEVERITY, DESC, GROUP, SFINAE, NOWERROR, \
- SHOWINSYSHEADER, DEFERRABLE, CATEGORY) \
+ SHOWINSYSHEADER, SHOWINSYSMACRO, DEFERRABLE, CATEGORY) \
DESC,
// clang-format off
#include "clang/Basic/DiagnosticCommonKinds.inc"
@@ -79,7 +80,7 @@ extern const StaticDiagInfoRec StaticDiagInfo[];
// StaticDiagInfoRec would have extra padding on 64-bit platforms.
const uint32_t StaticDiagInfoDescriptionOffsets[] = {
#define DIAG(ENUM, CLASS, DEFAULT_SEVERITY, DESC, GROUP, SFINAE, NOWERROR, \
- SHOWINSYSHEADER, DEFERRABLE, CATEGORY) \
+ SHOWINSYSHEADER, SHOWINSYSMACRO, DEFERRABLE, CATEGORY) \
offsetof(StaticDiagInfoDescriptionStringTable, ENUM##_desc),
// clang-format off
#include "clang/Basic/DiagnosticCommonKinds.inc"
@@ -115,6 +116,7 @@ struct StaticDiagInfoRec {
uint8_t Category : 6;
uint8_t WarnNoWerror : 1;
uint8_t WarnShowInSystemHeader : 1;
+ uint8_t WarnShowInSystemMacro : 1;
uint16_t OptionGroupIndex : 15;
uint16_t Deferrable : 1;
@@ -170,7 +172,7 @@ VALIDATE_DIAG_SIZE(REFACTORING)
const StaticDiagInfoRec StaticDiagInfo[] = {
// clang-format off
#define DIAG(ENUM, CLASS, DEFAULT_SEVERITY, DESC, GROUP, SFINAE, NOWERROR, \
- SHOWINSYSHEADER, DEFERRABLE, CATEGORY) \
+ SHOWINSYSHEADER, SHOWINSYSMACRO, DEFERRABLE, CATEGORY) \
{ \
diag::ENUM, \
DEFAULT_SEVERITY, \
@@ -179,6 +181,7 @@ const StaticDiagInfoRec StaticDiagInfo[] = {
CATEGORY, \
NOWERROR, \
SHOWINSYSHEADER, \
+ SHOWINSYSMACRO, \
GROUP, \
DEFERRABLE, \
STR_SIZE(DESC, uint16_t)},
@@ -200,7 +203,7 @@ const StaticDiagInfoRec StaticDiagInfo[] = {
} // namespace
-static const unsigned StaticDiagInfoSize = llvm::array_lengthof(StaticDiagInfo);
+static const unsigned StaticDiagInfoSize = std::size(StaticDiagInfo);
/// GetDiagInfo - Return the StaticDiagInfoRec entry for the specified DiagID,
/// or null if the ID is invalid.
@@ -253,7 +256,7 @@ CATEGORY(REFACTORING, ANALYSIS)
return Found;
}
-static DiagnosticMapping GetDefaultDiagMapping(unsigned DiagID) {
+DiagnosticMapping DiagnosticIDs::getDefaultMapping(unsigned DiagID) {
DiagnosticMapping Info = DiagnosticMapping::Make(
diag::Severity::Fatal, /*IsUser=*/false, /*IsPragma=*/false);
@@ -290,21 +293,6 @@ namespace {
};
}
-// Unfortunately, the split between DiagnosticIDs and Diagnostic is not
-// particularly clean, but for now we just implement this method here so we can
-// access GetDefaultDiagMapping.
-DiagnosticMapping &
-DiagnosticsEngine::DiagState::getOrAddMapping(diag::kind Diag) {
- std::pair<iterator, bool> Result =
- DiagMap.insert(std::make_pair(Diag, DiagnosticMapping()));
-
- // Initialize the entry if we added it.
- if (Result.second)
- Result.first->second = GetDefaultDiagMapping(Diag);
-
- return Result.first->second;
-}
-
static const StaticDiagCategoryRec CategoryNameTable[] = {
#define GET_CATEGORY_TABLE
#define CATEGORY(X, ENUM) { X, STR_SIZE(X, uint8_t) },
@@ -315,7 +303,7 @@ static const StaticDiagCategoryRec CategoryNameTable[] = {
/// getNumberOfCategories - Return the number of categories
unsigned DiagnosticIDs::getNumberOfCategories() {
- return llvm::array_lengthof(CategoryNameTable) - 1;
+ return std::size(CategoryNameTable) - 1;
}
/// getCategoryNameFromID - Given a category ID, return the name of the
@@ -446,7 +434,7 @@ bool DiagnosticIDs::isBuiltinExtensionDiag(unsigned DiagID,
return false;
EnabledByDefault =
- GetDefaultDiagMapping(DiagID).getSeverity() != diag::Severity::Ignored;
+ getDefaultMapping(DiagID).getSeverity() != diag::Severity::Ignored;
return true;
}
@@ -454,7 +442,7 @@ bool DiagnosticIDs::isDefaultMappingAsError(unsigned DiagID) {
if (DiagID >= diag::DIAG_UPPER_LIMIT)
return false;
- return GetDefaultDiagMapping(DiagID).getSeverity() >= diag::Severity::Error;
+ return getDefaultMapping(DiagID).getSeverity() >= diag::Severity::Error;
}
/// getDescription - Given a diagnostic ID, return a description of the
@@ -544,7 +532,7 @@ DiagnosticIDs::getDiagnosticSeverity(unsigned DiagID, SourceLocation Loc,
if (Result == diag::Severity::Ignored)
return Result;
- // Honor -w: this disables all messages which which are not Error/Fatal by
+ // Honor -w: this disables all messages which are not Error/Fatal by
// default (disregarding attempts to upgrade severity from Warning to Error),
// as well as disabling all messages which are currently mapped to Warning
// (whether by default or downgraded from Error via e.g. -Wno-error or #pragma
@@ -586,6 +574,13 @@ DiagnosticIDs::getDiagnosticSeverity(unsigned DiagID, SourceLocation Loc,
Diag.getSourceManager().getExpansionLoc(Loc)))
return diag::Severity::Ignored;
+ // We also ignore warnings due to system macros
+ bool ShowInSystemMacro =
+ !GetDiagInfo(DiagID) || GetDiagInfo(DiagID)->WarnShowInSystemMacro;
+ if (State->SuppressSystemWarnings && !ShowInSystemMacro && Loc.isValid() &&
+ Diag.getSourceManager().isInSystemMacro(Loc))
+ return diag::Severity::Ignored;
+
return Result;
}
@@ -598,6 +593,7 @@ namespace {
uint16_t NameOffset;
uint16_t Members;
uint16_t SubGroups;
+ StringRef Documentation;
// String is stored with a pascal-style length byte.
StringRef getName() const {
@@ -609,22 +605,47 @@ namespace {
// Second the table of options, sorted by name for fast binary lookup.
static const WarningOption OptionTable[] = {
-#define GET_DIAG_TABLE
+#define DIAG_ENTRY(GroupName, FlagNameOffset, Members, SubGroups, Docs) \
+ {FlagNameOffset, Members, SubGroups, Docs},
#include "clang/Basic/DiagnosticGroups.inc"
-#undef GET_DIAG_TABLE
+#undef DIAG_ENTRY
};
+/// Given a diagnostic group ID, return its documentation.
+StringRef DiagnosticIDs::getWarningOptionDocumentation(diag::Group Group) {
+ return OptionTable[static_cast<int>(Group)].Documentation;
+}
+
+StringRef DiagnosticIDs::getWarningOptionForGroup(diag::Group Group) {
+ return OptionTable[static_cast<int>(Group)].getName();
+}
+
+std::optional<diag::Group>
+DiagnosticIDs::getGroupForWarningOption(StringRef Name) {
+ const auto *Found = llvm::partition_point(
+ OptionTable, [=](const WarningOption &O) { return O.getName() < Name; });
+ if (Found == std::end(OptionTable) || Found->getName() != Name)
+ return std::nullopt;
+ return static_cast<diag::Group>(Found - OptionTable);
+}
+
+std::optional<diag::Group> DiagnosticIDs::getGroupForDiag(unsigned DiagID) {
+ if (const StaticDiagInfoRec *Info = GetDiagInfo(DiagID))
+ return static_cast<diag::Group>(Info->getOptionGroupIndex());
+ return std::nullopt;
+}
+
/// getWarningOptionForDiag - Return the lowest-level warning option that
/// enables the specified diagnostic. If there is no -Wfoo flag that controls
/// the diagnostic, this returns null.
StringRef DiagnosticIDs::getWarningOptionForDiag(unsigned DiagID) {
- if (const StaticDiagInfoRec *Info = GetDiagInfo(DiagID))
- return OptionTable[Info->getOptionGroupIndex()].getName();
+ if (auto G = getGroupForDiag(DiagID))
+ return getWarningOptionForGroup(*G);
return StringRef();
}
std::vector<std::string> DiagnosticIDs::getDiagnosticFlags() {
- std::vector<std::string> Res;
+ std::vector<std::string> Res{"-W", "-Wno-"};
for (size_t I = 1; DiagGroupNames[I] != '\0';) {
std::string Diag(DiagGroupNames + I + 1, DiagGroupNames[I]);
I += DiagGroupNames[I] + 1;
@@ -668,12 +689,10 @@ static bool getDiagnosticsInGroup(diag::Flavor Flavor,
bool
DiagnosticIDs::getDiagnosticsInGroup(diag::Flavor Flavor, StringRef Group,
SmallVectorImpl<diag::kind> &Diags) const {
- auto Found = llvm::partition_point(
- OptionTable, [=](const WarningOption &O) { return O.getName() < Group; });
- if (Found == std::end(OptionTable) || Found->getName() != Group)
- return true; // Option not found.
-
- return ::getDiagnosticsInGroup(Flavor, Found, Diags);
+ if (std::optional<diag::Group> G = getGroupForWarningOption(Group))
+ return ::getDiagnosticsInGroup(
+ Flavor, &OptionTable[static_cast<unsigned>(*G)], Diags);
+ return true;
}
void DiagnosticIDs::getAllDiagnostics(diag::Flavor Flavor,
@@ -686,7 +705,7 @@ void DiagnosticIDs::getAllDiagnostics(diag::Flavor Flavor,
StringRef DiagnosticIDs::getNearestOption(diag::Flavor Flavor,
StringRef Group) {
StringRef Best;
- unsigned BestDistance = Group.size() + 1; // Sanity threshold.
+ unsigned BestDistance = Group.size() + 1; // Maximum threshold.
for (const WarningOption &O : OptionTable) {
// Don't suggest ignored warning flags.
if (!O.Members && !O.SubGroups)
@@ -834,5 +853,5 @@ bool DiagnosticIDs::isUnrecoverable(unsigned DiagID) const {
bool DiagnosticIDs::isARCDiagnostic(unsigned DiagID) {
unsigned cat = getCategoryNumberForDiag(DiagID);
- return DiagnosticIDs::getCategoryNameFromID(cat).startswith("ARC ");
+ return DiagnosticIDs::getCategoryNameFromID(cat).starts_with("ARC ");
}
diff --git a/contrib/llvm-project/clang/lib/Basic/DiagnosticOptions.cpp b/contrib/llvm-project/clang/lib/Basic/DiagnosticOptions.cpp
index 68571f2cf94f..12e47ea0231e 100644
--- a/contrib/llvm-project/clang/lib/Basic/DiagnosticOptions.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/DiagnosticOptions.cpp
@@ -17,7 +17,7 @@
namespace clang {
raw_ostream &operator<<(raw_ostream &Out, DiagnosticLevelMask M) {
- using UT = std::underlying_type<DiagnosticLevelMask>::type;
+ using UT = std::underlying_type_t<DiagnosticLevelMask>;
return Out << static_cast<UT>(M);
}
diff --git a/contrib/llvm-project/clang/lib/Basic/FileManager.cpp b/contrib/llvm-project/clang/lib/Basic/FileManager.cpp
index 74cd2f295be6..974c8c22598f 100644
--- a/contrib/llvm-project/clang/lib/Basic/FileManager.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/FileManager.cpp
@@ -31,6 +31,7 @@
#include <climits>
#include <cstdint>
#include <cstdlib>
+#include <optional>
#include <string>
#include <utility>
@@ -105,10 +106,10 @@ void FileManager::addAncestorsAsVirtualDirs(StringRef Path) {
return;
// Add the virtual directory to the cache.
- auto UDE = std::make_unique<DirectoryEntry>();
+ auto *UDE = new (DirsAlloc.Allocate()) DirectoryEntry();
UDE->Name = NamedDirEnt.first();
- NamedDirEnt.second = *UDE.get();
- VirtualDirectoryEntries.push_back(std::move(UDE));
+ NamedDirEnt.second = *UDE;
+ VirtualDirectoryEntries.push_back(UDE);
// Recursively add the other ancestors.
addAncestorsAsVirtualDirs(DirName);
@@ -123,16 +124,16 @@ FileManager::getDirectoryRef(StringRef DirName, bool CacheFailure) {
DirName != llvm::sys::path::root_path(DirName) &&
llvm::sys::path::is_separator(DirName.back()))
DirName = DirName.substr(0, DirName.size()-1);
-#ifdef _WIN32
- // Fixing a problem with "clang C:test.c" on Windows.
- // Stat("C:") does not recognize "C:" as a valid directory
- std::string DirNameStr;
- if (DirName.size() > 1 && DirName.back() == ':' &&
- DirName.equals_insensitive(llvm::sys::path::root_name(DirName))) {
- DirNameStr = DirName.str() + '.';
- DirName = DirNameStr;
+ std::optional<std::string> DirNameStr;
+ if (is_style_windows(llvm::sys::path::Style::native)) {
+ // Fixing a problem with "clang C:test.c" on Windows.
+ // Stat("C:") does not recognize "C:" as a valid directory
+ if (DirName.size() > 1 && DirName.back() == ':' &&
+ DirName.equals_insensitive(llvm::sys::path::root_name(DirName))) {
+ DirNameStr = DirName.str() + '.';
+ DirName = *DirNameStr;
+ }
}
-#endif
++NumDirLookups;
@@ -172,14 +173,15 @@ FileManager::getDirectoryRef(StringRef DirName, bool CacheFailure) {
// same inode (this occurs on Unix-like systems when one dir is
// symlinked to another, for example) or the same path (on
// Windows).
- DirectoryEntry &UDE = UniqueRealDirs[Status.getUniqueID()];
+ DirectoryEntry *&UDE = UniqueRealDirs[Status.getUniqueID()];
- NamedDirEnt.second = UDE;
- if (UDE.getName().empty()) {
+ if (!UDE) {
// We don't have this directory yet, add it. We use the string
// key from the SeenDirEntries map as the string.
- UDE.Name = InterndDirName;
+ UDE = new (DirsAlloc.Allocate()) DirectoryEntry();
+ UDE->Name = InterndDirName;
}
+ NamedDirEnt.second = *UDE;
return DirectoryEntryRef(NamedDirEnt);
}
@@ -211,13 +213,7 @@ FileManager::getFileRef(StringRef Filename, bool openFile, bool CacheFailure) {
if (!SeenFileInsertResult.first->second)
return llvm::errorCodeToError(
SeenFileInsertResult.first->second.getError());
- // Construct and return and FileEntryRef, unless it's a redirect to another
- // filename.
- FileEntryRef::MapValue Value = *SeenFileInsertResult.first->second;
- if (LLVM_LIKELY(Value.V.is<FileEntry *>()))
- return FileEntryRef(*SeenFileInsertResult.first);
- return FileEntryRef(*reinterpret_cast<const FileEntryRef::MapEntry *>(
- Value.V.get<const void *>()));
+ return FileEntryRef(*SeenFileInsertResult.first);
}
// We've not seen this before. Fill it in.
@@ -268,42 +264,77 @@ FileManager::getFileRef(StringRef Filename, bool openFile, bool CacheFailure) {
// It exists. See if we have already opened a file with the same inode.
// This occurs when one dir is symlinked to another, for example.
- FileEntry &UFE = UniqueRealFiles[Status.getUniqueID()];
-
- if (Status.getName() == Filename) {
- // The name matches. Set the FileEntry.
- NamedFileEnt->second = FileEntryRef::MapValue(UFE, DirInfo);
+ FileEntry *&UFE = UniqueRealFiles[Status.getUniqueID()];
+ bool ReusingEntry = UFE != nullptr;
+ if (!UFE)
+ UFE = new (FilesAlloc.Allocate()) FileEntry();
+
+ if (!Status.ExposesExternalVFSPath || Status.getName() == Filename) {
+ // Use the requested name. Set the FileEntry.
+ NamedFileEnt->second = FileEntryRef::MapValue(*UFE, DirInfo);
} else {
// Name mismatch. We need a redirect. First grab the actual entry we want
// to return.
+ //
+ // This redirection logic intentionally leaks the external name of a
+ // redirected file that uses 'use-external-name' in \a
+ // vfs::RedirectionFileSystem. This allows clang to report the external
+ // name to users (in diagnostics) and to tools that don't have access to
+ // the VFS (in debug info and dependency '.d' files).
+ //
+ // FIXME: This is pretty complex and has some very complicated interactions
+ // with the rest of clang. It's also inconsistent with how "real"
+ // filesystems behave and confuses parts of clang expect to see the
+ // name-as-accessed on the \a FileEntryRef.
+ //
+ // A potential plan to remove this is as follows -
+ // - Update callers such as `HeaderSearch::findUsableModuleForHeader()`
+ // to explicitly use the `getNameAsRequested()` rather than just using
+ // `getName()`.
+ // - Add a `FileManager::getExternalPath` API for explicitly getting the
+ // remapped external filename when there is one available. Adopt it in
+ // callers like diagnostics/deps reporting instead of calling
+ // `getName()` directly.
+ // - Switch the meaning of `FileEntryRef::getName()` to get the requested
+ // name, not the external name. Once that sticks, revert callers that
+ // want the requested name back to calling `getName()`.
+ // - Update the VFS to always return the requested name. This could also
+ // return the external name, or just have an API to request it
+ // lazily. The latter has the benefit of making accesses of the
+ // external path easily tracked, but may also require extra work than
+ // just returning up front.
+ // - (Optionally) Add an API to VFS to get the external filename lazily
+ // and update `FileManager::getExternalPath()` to use it instead. This
+ // has the benefit of making such accesses easily tracked, though isn't
+ // necessarily required (and could cause extra work than just adding to
+ // eg. `vfs::Status` up front).
auto &Redirection =
*SeenFileEntries
- .insert({Status.getName(), FileEntryRef::MapValue(UFE, DirInfo)})
+ .insert({Status.getName(), FileEntryRef::MapValue(*UFE, DirInfo)})
.first;
assert(Redirection.second->V.is<FileEntry *>() &&
"filename redirected to a non-canonical filename?");
- assert(Redirection.second->V.get<FileEntry *>() == &UFE &&
+ assert(Redirection.second->V.get<FileEntry *>() == UFE &&
"filename from getStatValue() refers to wrong file");
// Cache the redirection in the previously-inserted entry, still available
// in the tentative return value.
- NamedFileEnt->second = FileEntryRef::MapValue(Redirection);
-
- // Fix the tentative return value.
- NamedFileEnt = &Redirection;
+ NamedFileEnt->second = FileEntryRef::MapValue(Redirection, DirInfo);
}
FileEntryRef ReturnedRef(*NamedFileEnt);
- if (UFE.isValid()) { // Already have an entry with this inode, return it.
+ if (ReusingEntry) { // Already have an entry with this inode, return it.
- // FIXME: this hack ensures that if we look up a file by a virtual path in
- // the VFS that the getDir() will have the virtual path, even if we found
- // the file by a 'real' path first. This is required in order to find a
- // module's structure when its headers/module map are mapped in the VFS.
- // We should remove this as soon as we can properly support a file having
- // multiple names.
- if (&DirInfo.getDirEntry() != UFE.Dir && Status.IsVFSMapped)
- UFE.Dir = &DirInfo.getDirEntry();
+ // FIXME: This hack ensures that `getDir()` will use the path that was
+ // used to lookup this file, even if we found a file by different path
+ // first. This is required in order to find a module's structure when its
+ // headers/module map are mapped in the VFS.
+ //
+ // See above for how this will eventually be removed. `IsVFSMapped`
+ // *cannot* be narrowed to `ExposesExternalVFSPath` as crash reproducers
+ // also depend on this logic and they have `use-external-paths: false`.
+ if (&DirInfo.getDirEntry() != UFE->Dir && Status.IsVFSMapped)
+ UFE->Dir = &DirInfo.getDirEntry();
// Always update LastRef to the last name by which a file was accessed.
// FIXME: Neither this nor always using the first reference is correct; we
@@ -312,28 +343,27 @@ FileManager::getFileRef(StringRef Filename, bool openFile, bool CacheFailure) {
// corresponding FileEntry.
// FIXME: LastRef should be removed from FileEntry once all clients adopt
// FileEntryRef.
- UFE.LastRef = ReturnedRef;
+ UFE->LastRef = ReturnedRef;
return ReturnedRef;
}
// Otherwise, we don't have this file yet, add it.
- UFE.LastRef = ReturnedRef;
- UFE.Size = Status.getSize();
- UFE.ModTime = llvm::sys::toTimeT(Status.getLastModificationTime());
- UFE.Dir = &DirInfo.getDirEntry();
- UFE.UID = NextFileUID++;
- UFE.UniqueID = Status.getUniqueID();
- UFE.IsNamedPipe = Status.getType() == llvm::sys::fs::file_type::fifo_file;
- UFE.File = std::move(F);
- UFE.IsValid = true;
-
- if (UFE.File) {
- if (auto PathName = UFE.File->getName())
- fillRealPathName(&UFE, *PathName);
+ UFE->LastRef = ReturnedRef;
+ UFE->Size = Status.getSize();
+ UFE->ModTime = llvm::sys::toTimeT(Status.getLastModificationTime());
+ UFE->Dir = &DirInfo.getDirEntry();
+ UFE->UID = NextFileUID++;
+ UFE->UniqueID = Status.getUniqueID();
+ UFE->IsNamedPipe = Status.getType() == llvm::sys::fs::file_type::fifo_file;
+ UFE->File = std::move(F);
+
+ if (UFE->File) {
+ if (auto PathName = UFE->File->getName())
+ fillRealPathName(UFE, *PathName);
} else if (!openFile) {
// We should still fill the path even if we aren't opening the file.
- fillRealPathName(&UFE, InterndFileName);
+ fillRealPathName(UFE, InterndFileName);
}
return ReturnedRef;
}
@@ -373,8 +403,7 @@ FileEntryRef FileManager::getVirtualFileRef(StringRef Filename, off_t Size,
FileEntryRef::MapValue Value = *NamedFileEnt.second;
if (LLVM_LIKELY(Value.V.is<FileEntry *>()))
return FileEntryRef(NamedFileEnt);
- return FileEntryRef(*reinterpret_cast<const FileEntryRef::MapEntry *>(
- Value.V.get<const void *>()));
+ return FileEntryRef(*Value.V.get<const FileEntryRef::MapEntry *>());
}
// We've not seen this before, or the file is cached as non-existent.
@@ -397,52 +426,55 @@ FileEntryRef FileManager::getVirtualFileRef(StringRef Filename, off_t Size,
llvm::vfs::Status Status;
const char *InterndFileName = NamedFileEnt.first().data();
if (!getStatValue(InterndFileName, Status, true, nullptr)) {
- UFE = &UniqueRealFiles[Status.getUniqueID()];
Status = llvm::vfs::Status(
Status.getName(), Status.getUniqueID(),
llvm::sys::toTimePoint(ModificationTime),
Status.getUser(), Status.getGroup(), Size,
Status.getType(), Status.getPermissions());
- NamedFileEnt.second = FileEntryRef::MapValue(*UFE, *DirInfo);
-
- // If we had already opened this file, close it now so we don't
- // leak the descriptor. We're not going to use the file
- // descriptor anyway, since this is a virtual file.
- if (UFE->File)
- UFE->closeFile();
-
- // If we already have an entry with this inode, return it.
- //
- // FIXME: Surely this should add a reference by the new name, and return
- // it instead...
- if (UFE->isValid())
+ auto &RealFE = UniqueRealFiles[Status.getUniqueID()];
+ if (RealFE) {
+ // If we had already opened this file, close it now so we don't
+ // leak the descriptor. We're not going to use the file
+ // descriptor anyway, since this is a virtual file.
+ if (RealFE->File)
+ RealFE->closeFile();
+ // If we already have an entry with this inode, return it.
+ //
+ // FIXME: Surely this should add a reference by the new name, and return
+ // it instead...
+ NamedFileEnt.second = FileEntryRef::MapValue(*RealFE, *DirInfo);
return FileEntryRef(NamedFileEnt);
-
- UFE->UniqueID = Status.getUniqueID();
- UFE->IsNamedPipe = Status.getType() == llvm::sys::fs::file_type::fifo_file;
- fillRealPathName(UFE, Status.getName());
+ }
+ // File exists, but no entry - create it.
+ RealFE = new (FilesAlloc.Allocate()) FileEntry();
+ RealFE->UniqueID = Status.getUniqueID();
+ RealFE->IsNamedPipe =
+ Status.getType() == llvm::sys::fs::file_type::fifo_file;
+ fillRealPathName(RealFE, Status.getName());
+
+ UFE = RealFE;
} else {
- VirtualFileEntries.push_back(std::make_unique<FileEntry>());
- UFE = VirtualFileEntries.back().get();
- NamedFileEnt.second = FileEntryRef::MapValue(*UFE, *DirInfo);
+ // File does not exist, create a virtual entry.
+ UFE = new (FilesAlloc.Allocate()) FileEntry();
+ VirtualFileEntries.push_back(UFE);
}
+ NamedFileEnt.second = FileEntryRef::MapValue(*UFE, *DirInfo);
UFE->LastRef = FileEntryRef(NamedFileEnt);
UFE->Size = Size;
UFE->ModTime = ModificationTime;
UFE->Dir = &DirInfo->getDirEntry();
UFE->UID = NextFileUID++;
- UFE->IsValid = true;
UFE->File.reset();
return FileEntryRef(NamedFileEnt);
}
-llvm::Optional<FileEntryRef> FileManager::getBypassFile(FileEntryRef VF) {
+OptionalFileEntryRef FileManager::getBypassFile(FileEntryRef VF) {
// Stat of the file and return nullptr if it doesn't exist.
llvm::vfs::Status Status;
if (getStatValue(VF.getName(), Status, /*isFile=*/true, /*F=*/nullptr))
- return None;
+ return std::nullopt;
if (!SeenBypassFileEntries)
SeenBypassFileEntries = std::make_unique<
@@ -455,16 +487,14 @@ llvm::Optional<FileEntryRef> FileManager::getBypassFile(FileEntryRef VF) {
return FileEntryRef(*Insertion.first);
// Fill in the new entry from the stat.
- BypassFileEntries.push_back(std::make_unique<FileEntry>());
- const FileEntry &VFE = VF.getFileEntry();
- FileEntry &BFE = *BypassFileEntries.back();
- Insertion.first->second = FileEntryRef::MapValue(BFE, VF.getDir());
- BFE.LastRef = FileEntryRef(*Insertion.first);
- BFE.Size = Status.getSize();
- BFE.Dir = VFE.Dir;
- BFE.ModTime = llvm::sys::toTimeT(Status.getLastModificationTime());
- BFE.UID = NextFileUID++;
- BFE.IsValid = true;
+ FileEntry *BFE = new (FilesAlloc.Allocate()) FileEntry();
+ BypassFileEntries.push_back(BFE);
+ Insertion.first->second = FileEntryRef::MapValue(*BFE, VF.getDir());
+ BFE->LastRef = FileEntryRef(*Insertion.first);
+ BFE->Size = Status.getSize();
+ BFE->Dir = VF.getFileEntry().Dir;
+ BFE->ModTime = llvm::sys::toTimeT(Status.getLastModificationTime());
+ BFE->UID = NextFileUID++;
// Save the entry in the bypass table and return.
return FileEntryRef(*Insertion.first);
@@ -502,12 +532,13 @@ void FileManager::fillRealPathName(FileEntry *UFE, llvm::StringRef FileName) {
// misleading. We need to clean up the interface here.
makeAbsolutePath(AbsPath);
llvm::sys::path::remove_dots(AbsPath, /*remove_dot_dot=*/true);
- UFE->RealPathName = std::string(AbsPath.str());
+ UFE->RealPathName = std::string(AbsPath);
}
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>>
-FileManager::getBufferForFile(const FileEntry *Entry, bool isVolatile,
+FileManager::getBufferForFile(FileEntryRef FE, bool isVolatile,
bool RequiresNullTerminator) {
+ const FileEntry *Entry = &FE.getFileEntry();
// If the content is living on the file entry, return a reference to it.
if (Entry->Content)
return llvm::MemoryBuffer::getMemBuffer(Entry->Content->getMemBufferRef());
@@ -518,7 +549,7 @@ FileManager::getBufferForFile(const FileEntry *Entry, bool isVolatile,
if (isVolatile || Entry->isNamedPipe())
FileSize = -1;
- StringRef Filename = Entry->getName();
+ StringRef Filename = FE.getName();
// If the file is already open, use the open file descriptor.
if (Entry->File) {
auto Result = Entry->File->getBuffer(Filename, FileSize,
@@ -581,55 +612,66 @@ FileManager::getNoncachedStatValue(StringRef Path,
}
void FileManager::GetUniqueIDMapping(
- SmallVectorImpl<const FileEntry *> &UIDToFiles) const {
+ SmallVectorImpl<OptionalFileEntryRef> &UIDToFiles) const {
UIDToFiles.clear();
UIDToFiles.resize(NextFileUID);
- // Map file entries
- for (llvm::StringMap<llvm::ErrorOr<FileEntryRef::MapValue>,
- llvm::BumpPtrAllocator>::const_iterator
- FE = SeenFileEntries.begin(),
- FEEnd = SeenFileEntries.end();
- FE != FEEnd; ++FE)
- if (llvm::ErrorOr<FileEntryRef::MapValue> Entry = FE->getValue()) {
- if (const auto *FE = Entry->V.dyn_cast<FileEntry *>())
- UIDToFiles[FE->getUID()] = FE;
- }
-
- // Map virtual file entries
- for (const auto &VFE : VirtualFileEntries)
- UIDToFiles[VFE->getUID()] = VFE.get();
+ for (const auto &Entry : SeenFileEntries) {
+ // Only return files that exist and are not redirected.
+ if (!Entry.getValue() || !Entry.getValue()->V.is<FileEntry *>())
+ continue;
+ FileEntryRef FE(Entry);
+ // Add this file if it's the first one with the UID, or if its name is
+ // better than the existing one.
+ OptionalFileEntryRef &ExistingFE = UIDToFiles[FE.getUID()];
+ if (!ExistingFE || FE.getName() < ExistingFE->getName())
+ ExistingFE = FE;
+ }
}
-StringRef FileManager::getCanonicalName(const DirectoryEntry *Dir) {
- llvm::DenseMap<const void *, llvm::StringRef>::iterator Known
- = CanonicalNames.find(Dir);
- if (Known != CanonicalNames.end())
- return Known->second;
-
- StringRef CanonicalName(Dir->getName());
-
- SmallString<4096> CanonicalNameBuf;
- if (!FS->getRealPath(Dir->getName(), CanonicalNameBuf))
- CanonicalName = CanonicalNameBuf.str().copy(CanonicalNameStorage);
+StringRef FileManager::getCanonicalName(DirectoryEntryRef Dir) {
+ return getCanonicalName(Dir, Dir.getName());
+}
- CanonicalNames.insert({Dir, CanonicalName});
- return CanonicalName;
+StringRef FileManager::getCanonicalName(FileEntryRef File) {
+ return getCanonicalName(File, File.getName());
}
-StringRef FileManager::getCanonicalName(const FileEntry *File) {
- llvm::DenseMap<const void *, llvm::StringRef>::iterator Known
- = CanonicalNames.find(File);
+StringRef FileManager::getCanonicalName(const void *Entry, StringRef Name) {
+ llvm::DenseMap<const void *, llvm::StringRef>::iterator Known =
+ CanonicalNames.find(Entry);
if (Known != CanonicalNames.end())
return Known->second;
- StringRef CanonicalName(File->getName());
-
- SmallString<4096> CanonicalNameBuf;
- if (!FS->getRealPath(File->getName(), CanonicalNameBuf))
- CanonicalName = CanonicalNameBuf.str().copy(CanonicalNameStorage);
+ // Name comes from FileEntry/DirectoryEntry::getName(), so it is safe to
+ // store it in the DenseMap below.
+ StringRef CanonicalName(Name);
+
+ SmallString<256> AbsPathBuf;
+ SmallString<256> RealPathBuf;
+ if (!FS->getRealPath(Name, RealPathBuf)) {
+ if (is_style_windows(llvm::sys::path::Style::native)) {
+ // For Windows paths, only use the real path if it doesn't resolve
+ // a substitute drive, as those are used to avoid MAX_PATH issues.
+ AbsPathBuf = Name;
+ if (!FS->makeAbsolute(AbsPathBuf)) {
+ if (llvm::sys::path::root_name(RealPathBuf) ==
+ llvm::sys::path::root_name(AbsPathBuf)) {
+ CanonicalName = RealPathBuf.str().copy(CanonicalNameStorage);
+ } else {
+ // Fallback to using the absolute path.
+ // Simplifying /../ is semantically valid on Windows even in the
+ // presence of symbolic links.
+ llvm::sys::path::remove_dots(AbsPathBuf, /*remove_dot_dot=*/true);
+ CanonicalName = AbsPathBuf.str().copy(CanonicalNameStorage);
+ }
+ }
+ } else {
+ CanonicalName = RealPathBuf.str().copy(CanonicalNameStorage);
+ }
+ }
- CanonicalNames.insert({File, CanonicalName});
+ CanonicalNames.insert({Entry, CanonicalName});
return CanonicalName;
}
diff --git a/contrib/llvm-project/clang/lib/Basic/IdentifierTable.cpp b/contrib/llvm-project/clang/lib/Basic/IdentifierTable.cpp
index d811aeec84a0..d0d8316385b4 100644
--- a/contrib/llvm-project/clang/lib/Basic/IdentifierTable.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/IdentifierTable.cpp
@@ -13,6 +13,7 @@
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/DiagnosticLex.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/OperatorKinds.h"
#include "clang/Basic/Specifiers.h"
@@ -24,7 +25,6 @@
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Allocator.h"
-#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
#include <cstdio>
@@ -51,8 +51,7 @@ namespace {
/// A simple identifier lookup iterator that represents an
/// empty sequence of identifiers.
-class EmptyLookupIterator : public IdentifierIterator
-{
+class EmptyLookupIterator : public IdentifierIterator {
public:
StringRef Next() override { return StringRef(); }
};
@@ -82,7 +81,7 @@ IdentifierTable::IdentifierTable(const LangOptions &LangOpts,
// Constants for TokenKinds.def
namespace {
- enum {
+ enum TokenKey : unsigned {
KEYC99 = 0x1,
KEYCXX = 0x2,
KEYCXX11 = 0x4,
@@ -93,72 +92,158 @@ namespace {
KEYNOCXX = 0x80,
KEYBORLAND = 0x100,
KEYOPENCLC = 0x200,
- KEYC11 = 0x400,
+ KEYC23 = 0x400,
KEYNOMS18 = 0x800,
KEYNOOPENCL = 0x1000,
WCHARSUPPORT = 0x2000,
HALFSUPPORT = 0x4000,
CHAR8SUPPORT = 0x8000,
- KEYCONCEPTS = 0x10000,
- KEYOBJC = 0x20000,
- KEYZVECTOR = 0x40000,
- KEYCOROUTINES = 0x80000,
- KEYMODULES = 0x100000,
- KEYCXX20 = 0x200000,
- KEYOPENCLCXX = 0x400000,
- KEYMSCOMPAT = 0x800000,
- KEYSYCL = 0x1000000,
+ KEYOBJC = 0x10000,
+ KEYZVECTOR = 0x20000,
+ KEYCOROUTINES = 0x40000,
+ KEYMODULES = 0x80000,
+ KEYCXX20 = 0x100000,
+ KEYOPENCLCXX = 0x200000,
+ KEYMSCOMPAT = 0x400000,
+ KEYSYCL = 0x800000,
+ KEYCUDA = 0x1000000,
+ KEYHLSL = 0x2000000,
+ KEYFIXEDPOINT = 0x4000000,
+ KEYMAX = KEYFIXEDPOINT, // The maximum key
KEYALLCXX = KEYCXX | KEYCXX11 | KEYCXX20,
- KEYALL = (0x1ffffff & ~KEYNOMS18 &
- ~KEYNOOPENCL) // KEYNOMS18 and KEYNOOPENCL are used to exclude.
+ KEYALL = (KEYMAX | (KEYMAX-1)) & ~KEYNOMS18 &
+ ~KEYNOOPENCL // KEYNOMS18 and KEYNOOPENCL are used to exclude.
};
- /// How a keyword is treated in the selected standard.
+ /// How a keyword is treated in the selected standard. This enum is ordered
+ /// intentionally so that the value that 'wins' is the most 'permissive'.
enum KeywordStatus {
+ KS_Unknown, // Not yet calculated. Used when figuring out the status.
KS_Disabled, // Disabled
+ KS_Future, // Is a keyword in future standard
KS_Extension, // Is an extension
KS_Enabled, // Enabled
- KS_Future // Is a keyword in future standard
};
} // namespace
+// This works on a single TokenKey flag and checks the LangOpts to get the
+// KeywordStatus based exclusively on this flag, so that it can be merged in
+// getKeywordStatus. Most should be enabled/disabled, but some might imply
+// 'future' versions, or extensions. Returns 'unknown' unless this is KNOWN to
+// be disabled, and the calling function makes it 'disabled' if no other flag
+// changes it. This is necessary for the KEYNOCXX and KEYNOOPENCL flags.
+static KeywordStatus getKeywordStatusHelper(const LangOptions &LangOpts,
+ TokenKey Flag) {
+ // Flag is a single bit version of TokenKey (that is, not
+ // KEYALL/KEYALLCXX/etc), so we can check with == throughout this function.
+ assert((Flag & ~(Flag - 1)) == Flag && "Multiple bits set?");
+
+ switch (Flag) {
+ case KEYC99:
+ if (LangOpts.C99)
+ return KS_Enabled;
+ return !LangOpts.CPlusPlus ? KS_Future : KS_Unknown;
+ case KEYC23:
+ if (LangOpts.C23)
+ return KS_Enabled;
+ return !LangOpts.CPlusPlus ? KS_Future : KS_Unknown;
+ case KEYCXX:
+ return LangOpts.CPlusPlus ? KS_Enabled : KS_Unknown;
+ case KEYCXX11:
+ if (LangOpts.CPlusPlus11)
+ return KS_Enabled;
+ return LangOpts.CPlusPlus ? KS_Future : KS_Unknown;
+ case KEYCXX20:
+ if (LangOpts.CPlusPlus20)
+ return KS_Enabled;
+ return LangOpts.CPlusPlus ? KS_Future : KS_Unknown;
+ case KEYGNU:
+ return LangOpts.GNUKeywords ? KS_Extension : KS_Unknown;
+ case KEYMS:
+ return LangOpts.MicrosoftExt ? KS_Extension : KS_Unknown;
+ case BOOLSUPPORT:
+ if (LangOpts.Bool) return KS_Enabled;
+ return !LangOpts.CPlusPlus ? KS_Future : KS_Unknown;
+ case KEYALTIVEC:
+ return LangOpts.AltiVec ? KS_Enabled : KS_Unknown;
+ case KEYBORLAND:
+ return LangOpts.Borland ? KS_Extension : KS_Unknown;
+ case KEYOPENCLC:
+ return LangOpts.OpenCL && !LangOpts.OpenCLCPlusPlus ? KS_Enabled
+ : KS_Unknown;
+ case WCHARSUPPORT:
+ return LangOpts.WChar ? KS_Enabled : KS_Unknown;
+ case HALFSUPPORT:
+ return LangOpts.Half ? KS_Enabled : KS_Unknown;
+ case CHAR8SUPPORT:
+ if (LangOpts.Char8) return KS_Enabled;
+ if (LangOpts.CPlusPlus20) return KS_Unknown;
+ if (LangOpts.CPlusPlus) return KS_Future;
+ return KS_Unknown;
+ case KEYOBJC:
+ // We treat bridge casts as objective-C keywords so we can warn on them
+ // in non-arc mode.
+ return LangOpts.ObjC ? KS_Enabled : KS_Unknown;
+ case KEYZVECTOR:
+ return LangOpts.ZVector ? KS_Enabled : KS_Unknown;
+ case KEYCOROUTINES:
+ return LangOpts.Coroutines ? KS_Enabled : KS_Unknown;
+ case KEYMODULES:
+ return KS_Unknown;
+ case KEYOPENCLCXX:
+ return LangOpts.OpenCLCPlusPlus ? KS_Enabled : KS_Unknown;
+ case KEYMSCOMPAT:
+ return LangOpts.MSVCCompat ? KS_Enabled : KS_Unknown;
+ case KEYSYCL:
+ return LangOpts.isSYCL() ? KS_Enabled : KS_Unknown;
+ case KEYCUDA:
+ return LangOpts.CUDA ? KS_Enabled : KS_Unknown;
+ case KEYHLSL:
+ return LangOpts.HLSL ? KS_Enabled : KS_Unknown;
+ case KEYNOCXX:
+ // This is enabled in all non-C++ modes, but might be enabled for other
+ // reasons as well.
+ return LangOpts.CPlusPlus ? KS_Unknown : KS_Enabled;
+ case KEYNOOPENCL:
+ // The disable behavior for this is handled in getKeywordStatus.
+ return KS_Unknown;
+ case KEYNOMS18:
+ // The disable behavior for this is handled in getKeywordStatus.
+ return KS_Unknown;
+ case KEYFIXEDPOINT:
+ return LangOpts.FixedPoint ? KS_Enabled : KS_Disabled;
+ default:
+ llvm_unreachable("Unknown KeywordStatus flag");
+ }
+}
+
/// Translates flags as specified in TokenKinds.def into keyword status
/// in the given language standard.
static KeywordStatus getKeywordStatus(const LangOptions &LangOpts,
unsigned Flags) {
+ // KEYALL means always enabled, so special case this one.
if (Flags == KEYALL) return KS_Enabled;
- if (LangOpts.CPlusPlus && (Flags & KEYCXX)) return KS_Enabled;
- if (LangOpts.CPlusPlus11 && (Flags & KEYCXX11)) return KS_Enabled;
- if (LangOpts.CPlusPlus20 && (Flags & KEYCXX20)) return KS_Enabled;
- if (LangOpts.C99 && (Flags & KEYC99)) return KS_Enabled;
- if (LangOpts.GNUKeywords && (Flags & KEYGNU)) return KS_Extension;
- if (LangOpts.MicrosoftExt && (Flags & KEYMS)) return KS_Extension;
- if (LangOpts.MSVCCompat && (Flags & KEYMSCOMPAT)) return KS_Enabled;
- if (LangOpts.Borland && (Flags & KEYBORLAND)) return KS_Extension;
- if (LangOpts.Bool && (Flags & BOOLSUPPORT)) return KS_Enabled;
- if (LangOpts.Half && (Flags & HALFSUPPORT)) return KS_Enabled;
- if (LangOpts.WChar && (Flags & WCHARSUPPORT)) return KS_Enabled;
- if (LangOpts.Char8 && (Flags & CHAR8SUPPORT)) return KS_Enabled;
- if (LangOpts.AltiVec && (Flags & KEYALTIVEC)) return KS_Enabled;
- if (LangOpts.ZVector && (Flags & KEYZVECTOR)) return KS_Enabled;
- if (LangOpts.OpenCL && !LangOpts.OpenCLCPlusPlus && (Flags & KEYOPENCLC))
- return KS_Enabled;
- if (LangOpts.OpenCLCPlusPlus && (Flags & KEYOPENCLCXX)) return KS_Enabled;
- if (!LangOpts.CPlusPlus && (Flags & KEYNOCXX)) return KS_Enabled;
- if (LangOpts.C11 && (Flags & KEYC11)) return KS_Enabled;
- // We treat bridge casts as objective-C keywords so we can warn on them
- // in non-arc mode.
- if (LangOpts.ObjC && (Flags & KEYOBJC)) return KS_Enabled;
- if (LangOpts.CPlusPlus20 && (Flags & KEYCONCEPTS)) return KS_Enabled;
- if (LangOpts.Coroutines && (Flags & KEYCOROUTINES)) return KS_Enabled;
- if (LangOpts.ModulesTS && (Flags & KEYMODULES)) return KS_Enabled;
- if (LangOpts.CPlusPlus && (Flags & KEYALLCXX)) return KS_Future;
- if (LangOpts.CPlusPlus && !LangOpts.CPlusPlus20 && (Flags & CHAR8SUPPORT))
- return KS_Future;
- if (LangOpts.isSYCL() && (Flags & KEYSYCL))
- return KS_Enabled;
- return KS_Disabled;
+ // These are tests that need to 'always win', as they are special in that they
+ // disable based on certain conditions.
+ if (LangOpts.OpenCL && (Flags & KEYNOOPENCL)) return KS_Disabled;
+ if (LangOpts.MSVCCompat && (Flags & KEYNOMS18) &&
+ !LangOpts.isCompatibleWithMSVC(LangOptions::MSVC2015))
+ return KS_Disabled;
+
+ KeywordStatus CurStatus = KS_Unknown;
+
+ while (Flags != 0) {
+ unsigned CurFlag = Flags & ~(Flags - 1);
+ Flags = Flags & ~CurFlag;
+ CurStatus = std::max(
+ CurStatus,
+ getKeywordStatusHelper(LangOpts, static_cast<TokenKey>(CurFlag)));
+ }
+
+ if (CurStatus == KS_Unknown)
+ return KS_Disabled;
+ return CurStatus;
}
/// AddKeyword - This method is used to associate a token ID with specific
@@ -169,15 +254,6 @@ static void AddKeyword(StringRef Keyword,
const LangOptions &LangOpts, IdentifierTable &Table) {
KeywordStatus AddResult = getKeywordStatus(LangOpts, Flags);
- // Don't add this keyword under MSVCCompat.
- if (LangOpts.MSVCCompat && (Flags & KEYNOMS18) &&
- !LangOpts.isCompatibleWithMSVC(LangOptions::MSVC2015))
- return;
-
- // Don't add this keyword under OpenCL.
- if (LangOpts.OpenCL && (Flags & KEYNOOPENCL))
- return;
-
// Don't add this keyword if disabled in this language.
if (AddResult == KS_Disabled) return;
@@ -204,6 +280,16 @@ static void AddObjCKeyword(StringRef Name,
Table.get(Name).setObjCKeywordID(ObjCID);
}
+static void AddInterestingIdentifier(StringRef Name,
+ tok::InterestingIdentifierKind BTID,
+ IdentifierTable &Table) {
+ // Don't add 'not_interesting' identifier.
+ if (BTID != tok::not_interesting) {
+ IdentifierInfo &Info = Table.get(Name, tok::identifier);
+ Info.setInterestingIdentifierID(BTID);
+ }
+}
+
/// AddKeywords - Add all keywords to the symbol table.
///
void IdentifierTable::AddKeywords(const LangOptions &LangOpts) {
@@ -220,6 +306,9 @@ void IdentifierTable::AddKeywords(const LangOptions &LangOpts) {
#define OBJC_AT_KEYWORD(NAME) \
if (LangOpts.ObjC) \
AddObjCKeyword(StringRef(#NAME), tok::objc_##NAME, *this);
+#define INTERESTING_IDENTIFIER(NAME) \
+ AddInterestingIdentifier(StringRef(#NAME), tok::NAME, *this);
+
#define TESTING_KEYWORD(NAME, FLAGS)
#include "clang/Basic/TokenKinds.def"
@@ -309,6 +398,27 @@ IdentifierInfo::isReserved(const LangOptions &LangOpts) const {
return ReservedIdentifierStatus::NotReserved;
}
+ReservedLiteralSuffixIdStatus
+IdentifierInfo::isReservedLiteralSuffixId() const {
+ StringRef Name = getName();
+
+ if (Name[0] != '_')
+ return ReservedLiteralSuffixIdStatus::NotStartsWithUnderscore;
+
+ if (Name.contains("__"))
+ return ReservedLiteralSuffixIdStatus::ContainsDoubleUnderscore;
+
+ return ReservedLiteralSuffixIdStatus::NotReserved;
+}
+
+StringRef IdentifierInfo::deuglifiedName() const {
+ StringRef Name = getName();
+ if (Name.size() >= 2 && Name.front() == '_' &&
+ (Name[1] == '_' || (Name[1] >= 'A' && Name[1] <= 'Z')))
+ return Name.ltrim('_');
+ return Name;
+}
+
tok::PPKeywordKind IdentifierInfo::getPPKeywordID() const {
// We use a perfect hash function here involving the length of the keyword,
// the first and third character. For preprocessor ID's there are no
@@ -405,63 +515,6 @@ unsigned llvm::DenseMapInfo<clang::Selector>::getHashValue(clang::Selector S) {
return DenseMapInfo<void*>::getHashValue(S.getAsOpaquePtr());
}
-namespace clang {
-
-/// One of these variable length records is kept for each
-/// selector containing more than one keyword. We use a folding set
-/// to unique aggregate names (keyword selectors in ObjC parlance). Access to
-/// this class is provided strictly through Selector.
-class alignas(IdentifierInfoAlignment) MultiKeywordSelector
- : public detail::DeclarationNameExtra,
- public llvm::FoldingSetNode {
- MultiKeywordSelector(unsigned nKeys) : DeclarationNameExtra(nKeys) {}
-
-public:
- // Constructor for keyword selectors.
- MultiKeywordSelector(unsigned nKeys, IdentifierInfo **IIV)
- : DeclarationNameExtra(nKeys) {
- assert((nKeys > 1) && "not a multi-keyword selector");
-
- // Fill in the trailing keyword array.
- IdentifierInfo **KeyInfo = reinterpret_cast<IdentifierInfo **>(this + 1);
- for (unsigned i = 0; i != nKeys; ++i)
- KeyInfo[i] = IIV[i];
- }
-
- // getName - Derive the full selector name and return it.
- std::string getName() const;
-
- using DeclarationNameExtra::getNumArgs;
-
- using keyword_iterator = IdentifierInfo *const *;
-
- keyword_iterator keyword_begin() const {
- return reinterpret_cast<keyword_iterator>(this + 1);
- }
-
- keyword_iterator keyword_end() const {
- return keyword_begin() + getNumArgs();
- }
-
- IdentifierInfo *getIdentifierInfoForSlot(unsigned i) const {
- assert(i < getNumArgs() && "getIdentifierInfoForSlot(): illegal index");
- return keyword_begin()[i];
- }
-
- static void Profile(llvm::FoldingSetNodeID &ID, keyword_iterator ArgTys,
- unsigned NumArgs) {
- ID.AddInteger(NumArgs);
- for (unsigned i = 0; i != NumArgs; ++i)
- ID.AddPointer(ArgTys[i]);
- }
-
- void Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, keyword_begin(), getNumArgs());
- }
-};
-
-} // namespace clang.
-
bool Selector::isKeywordSelector(ArrayRef<StringRef> Names) const {
assert(!Names.empty() && "must have >= 1 selector slots");
if (getNumArgs() != Names.size())
@@ -517,7 +570,7 @@ std::string MultiKeywordSelector::getName() const {
}
std::string Selector::getAsString() const {
- if (InfoPtr == 0)
+ if (isNull())
return "<null selector>";
if (getIdentifierInfoFlag() < MultiArg) {
@@ -551,7 +604,7 @@ LLVM_DUMP_METHOD void Selector::dump() const { print(llvm::errs()); }
static bool startsWithWord(StringRef name, StringRef word) {
if (name.size() < word.size()) return false;
return ((name.size() == word.size() || !isLowercase(name[word.size()])) &&
- name.startswith(word));
+ name.starts_with(word));
}
ObjCMethodFamily Selector::getMethodFamilyImpl(Selector sel) {
@@ -575,8 +628,7 @@ ObjCMethodFamily Selector::getMethodFamilyImpl(Selector sel) {
return OMF_performSelector;
// The other method families may begin with a prefix of underscores.
- while (!name.empty() && name.front() == '_')
- name = name.substr(1);
+ name = name.ltrim('_');
if (name.empty()) return OMF_None;
switch (name.front()) {
@@ -689,7 +741,7 @@ SelectorTable::constructSetterSelector(IdentifierTable &Idents,
std::string SelectorTable::getPropertyNameFromSetterSelector(Selector Sel) {
StringRef Name = Sel.getNameForSlot(0);
- assert(Name.startswith("set") && "invalid setter name");
+ assert(Name.starts_with("set") && "invalid setter name");
return (Twine(toLowercase(Name[3])) + Name.drop_front(4)).str();
}
@@ -765,3 +817,50 @@ StringRef clang::getNullabilitySpelling(NullabilityKind kind,
}
llvm_unreachable("Unknown nullability kind.");
}
+
+llvm::raw_ostream &clang::operator<<(llvm::raw_ostream &OS,
+ NullabilityKind NK) {
+ switch (NK) {
+ case NullabilityKind::NonNull:
+ return OS << "NonNull";
+ case NullabilityKind::Nullable:
+ return OS << "Nullable";
+ case NullabilityKind::NullableResult:
+ return OS << "NullableResult";
+ case NullabilityKind::Unspecified:
+ return OS << "Unspecified";
+ }
+ llvm_unreachable("Unknown nullability kind.");
+}
+
+diag::kind
+IdentifierTable::getFutureCompatDiagKind(const IdentifierInfo &II,
+ const LangOptions &LangOpts) {
+ assert(II.isFutureCompatKeyword() && "diagnostic should not be needed");
+
+ unsigned Flags = llvm::StringSwitch<unsigned>(II.getName())
+#define KEYWORD(NAME, FLAGS) .Case(#NAME, FLAGS)
+#include "clang/Basic/TokenKinds.def"
+#undef KEYWORD
+ ;
+
+ if (LangOpts.CPlusPlus) {
+ if ((Flags & KEYCXX11) == KEYCXX11)
+ return diag::warn_cxx11_keyword;
+
+ // char8_t is not modeled as a CXX20_KEYWORD because it's not
+ // unconditionally enabled in C++20 mode. (It can be disabled
+ // by -fno-char8_t.)
+ if (((Flags & KEYCXX20) == KEYCXX20) ||
+ ((Flags & CHAR8SUPPORT) == CHAR8SUPPORT))
+ return diag::warn_cxx20_keyword;
+ } else {
+ if ((Flags & KEYC99) == KEYC99)
+ return diag::warn_c99_keyword;
+ if ((Flags & KEYC23) == KEYC23)
+ return diag::warn_c23_keyword;
+ }
+
+ llvm_unreachable(
+ "Keyword not known to come from a newer Standard or proposed Standard");
+}
diff --git a/contrib/llvm-project/clang/lib/Basic/LangOptions.cpp b/contrib/llvm-project/clang/lib/Basic/LangOptions.cpp
index bebf3178426f..a0adfbf61840 100644
--- a/contrib/llvm-project/clang/lib/Basic/LangOptions.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/LangOptions.cpp
@@ -29,6 +29,14 @@ void LangOptions::resetNonModularOptions() {
Name = static_cast<unsigned>(Default);
#include "clang/Basic/LangOptions.def"
+ // Reset "benign" options with implied values (Options.td ImpliedBy relations)
+ // rather than their defaults. This avoids unexpected combinations and
+ // invocations that cannot be round-tripped to arguments.
+ // FIXME: we should derive this automatically from ImpliedBy in tablegen.
+ AllowFPReassoc = UnsafeFPMath;
+ NoHonorNaNs = FiniteMathOnly;
+ NoHonorInfs = FiniteMathOnly;
+
// These options do not affect AST generation.
NoSanitizeFiles.clear();
XRayAlwaysInstrumentFiles.clear();
@@ -47,20 +55,173 @@ bool LangOptions::isNoBuiltinFunc(StringRef FuncName) const {
VersionTuple LangOptions::getOpenCLVersionTuple() const {
const int Ver = OpenCLCPlusPlus ? OpenCLCPlusPlusVersion : OpenCLVersion;
+ if (OpenCLCPlusPlus && Ver != 100)
+ return VersionTuple(Ver / 100);
return VersionTuple(Ver / 100, (Ver % 100) / 10);
}
-void LangOptions::remapPathPrefix(SmallString<256> &Path) const {
+unsigned LangOptions::getOpenCLCompatibleVersion() const {
+ if (!OpenCLCPlusPlus)
+ return OpenCLVersion;
+ if (OpenCLCPlusPlusVersion == 100)
+ return 200;
+ if (OpenCLCPlusPlusVersion == 202100)
+ return 300;
+ llvm_unreachable("Unknown OpenCL version");
+}
+
+void LangOptions::remapPathPrefix(SmallVectorImpl<char> &Path) const {
for (const auto &Entry : MacroPrefixMap)
if (llvm::sys::path::replace_path_prefix(Path, Entry.first, Entry.second))
break;
}
+std::string LangOptions::getOpenCLVersionString() const {
+ std::string Result;
+ {
+ llvm::raw_string_ostream Out(Result);
+ Out << (OpenCLCPlusPlus ? "C++ for OpenCL" : "OpenCL C") << " version "
+ << getOpenCLVersionTuple().getAsString();
+ }
+ return Result;
+}
+
+void LangOptions::setLangDefaults(LangOptions &Opts, Language Lang,
+ const llvm::Triple &T,
+ std::vector<std::string> &Includes,
+ LangStandard::Kind LangStd) {
+ // Set some properties which depend solely on the input kind; it would be nice
+ // to move these to the language standard, and have the driver resolve the
+ // input kind + language standard.
+ //
+ // FIXME: Perhaps a better model would be for a single source file to have
+ // multiple language standards (C / C++ std, ObjC std, OpenCL std, OpenMP std)
+ // simultaneously active?
+ if (Lang == Language::Asm) {
+ Opts.AsmPreprocessor = 1;
+ } else if (Lang == Language::ObjC || Lang == Language::ObjCXX) {
+ Opts.ObjC = 1;
+ }
+
+ if (LangStd == LangStandard::lang_unspecified)
+ LangStd = getDefaultLanguageStandard(Lang, T);
+ const LangStandard &Std = LangStandard::getLangStandardForKind(LangStd);
+ Opts.LangStd = LangStd;
+ Opts.LineComment = Std.hasLineComments();
+ Opts.C99 = Std.isC99();
+ Opts.C11 = Std.isC11();
+ Opts.C17 = Std.isC17();
+ Opts.C23 = Std.isC23();
+ Opts.CPlusPlus = Std.isCPlusPlus();
+ Opts.CPlusPlus11 = Std.isCPlusPlus11();
+ Opts.CPlusPlus14 = Std.isCPlusPlus14();
+ Opts.CPlusPlus17 = Std.isCPlusPlus17();
+ Opts.CPlusPlus20 = Std.isCPlusPlus20();
+ Opts.CPlusPlus23 = Std.isCPlusPlus23();
+ Opts.CPlusPlus26 = Std.isCPlusPlus26();
+ Opts.GNUMode = Std.isGNUMode();
+ Opts.GNUCVersion = 0;
+ Opts.HexFloats = Std.hasHexFloats();
+ Opts.WChar = Std.isCPlusPlus();
+ Opts.Digraphs = Std.hasDigraphs();
+
+ Opts.HLSL = Lang == Language::HLSL;
+ if (Opts.HLSL && Opts.IncludeDefaultHeader)
+ Includes.push_back("hlsl.h");
+
+ // Set OpenCL Version.
+ Opts.OpenCL = Std.isOpenCL();
+ if (LangStd == LangStandard::lang_opencl10)
+ Opts.OpenCLVersion = 100;
+ else if (LangStd == LangStandard::lang_opencl11)
+ Opts.OpenCLVersion = 110;
+ else if (LangStd == LangStandard::lang_opencl12)
+ Opts.OpenCLVersion = 120;
+ else if (LangStd == LangStandard::lang_opencl20)
+ Opts.OpenCLVersion = 200;
+ else if (LangStd == LangStandard::lang_opencl30)
+ Opts.OpenCLVersion = 300;
+ else if (LangStd == LangStandard::lang_openclcpp10)
+ Opts.OpenCLCPlusPlusVersion = 100;
+ else if (LangStd == LangStandard::lang_openclcpp2021)
+ Opts.OpenCLCPlusPlusVersion = 202100;
+ else if (LangStd == LangStandard::lang_hlsl2015)
+ Opts.HLSLVersion = (unsigned)LangOptions::HLSL_2015;
+ else if (LangStd == LangStandard::lang_hlsl2016)
+ Opts.HLSLVersion = (unsigned)LangOptions::HLSL_2016;
+ else if (LangStd == LangStandard::lang_hlsl2017)
+ Opts.HLSLVersion = (unsigned)LangOptions::HLSL_2017;
+ else if (LangStd == LangStandard::lang_hlsl2018)
+ Opts.HLSLVersion = (unsigned)LangOptions::HLSL_2018;
+ else if (LangStd == LangStandard::lang_hlsl2021)
+ Opts.HLSLVersion = (unsigned)LangOptions::HLSL_2021;
+ else if (LangStd == LangStandard::lang_hlsl202x)
+ Opts.HLSLVersion = (unsigned)LangOptions::HLSL_202x;
+
+ // OpenCL has some additional defaults.
+ if (Opts.OpenCL) {
+ Opts.AltiVec = 0;
+ Opts.ZVector = 0;
+ Opts.setDefaultFPContractMode(LangOptions::FPM_On);
+ Opts.OpenCLCPlusPlus = Opts.CPlusPlus;
+ Opts.OpenCLPipes = Opts.getOpenCLCompatibleVersion() == 200;
+ Opts.OpenCLGenericAddressSpace = Opts.getOpenCLCompatibleVersion() == 200;
+
+ // Include default header file for OpenCL.
+ if (Opts.IncludeDefaultHeader) {
+ if (Opts.DeclareOpenCLBuiltins) {
+ // Only include base header file for builtin types and constants.
+ Includes.push_back("opencl-c-base.h");
+ } else {
+ Includes.push_back("opencl-c.h");
+ }
+ }
+ }
+
+ Opts.HIP = Lang == Language::HIP;
+ Opts.CUDA = Lang == Language::CUDA || Opts.HIP;
+ if (Opts.HIP) {
+ // HIP toolchain does not support 'Fast' FPOpFusion in backends since it
+ // fuses multiplication/addition instructions without contract flag from
+ // device library functions in LLVM bitcode, which causes accuracy loss in
+ // certain math functions, e.g. tan(-1e20) becomes -0.933 instead of 0.8446.
+ // For device library functions in bitcode to work, 'Strict' or 'Standard'
+ // FPOpFusion options in backends is needed. Therefore 'fast-honor-pragmas'
+ // FP contract option is used to allow fuse across statements in frontend
+ // whereas respecting contract flag in backend.
+ Opts.setDefaultFPContractMode(LangOptions::FPM_FastHonorPragmas);
+ } else if (Opts.CUDA) {
+ if (T.isSPIRV()) {
+ // Emit OpenCL version metadata in LLVM IR when targeting SPIR-V.
+ Opts.OpenCLVersion = 200;
+ }
+ // Allow fuse across statements disregarding pragmas.
+ Opts.setDefaultFPContractMode(LangOptions::FPM_Fast);
+ }
+
+ Opts.RenderScript = Lang == Language::RenderScript;
+
+ // OpenCL, C++ and C23 have bool, true, false keywords.
+ Opts.Bool = Opts.OpenCL || Opts.CPlusPlus || Opts.C23;
+
+ // OpenCL and HLSL have half keyword
+ Opts.Half = Opts.OpenCL || Opts.HLSL;
+}
+
FPOptions FPOptions::defaultWithoutTrailingStorage(const LangOptions &LO) {
FPOptions result(LO);
return result;
}
+FPOptionsOverride FPOptions::getChangesSlow(const FPOptions &Base) const {
+ FPOptions::storage_type OverrideMask = 0;
+#define OPTION(NAME, TYPE, WIDTH, PREVIOUS) \
+ if (get##NAME() != Base.get##NAME()) \
+ OverrideMask |= NAME##Mask;
+#include "clang/Basic/FPOptions.def"
+ return FPOptionsOverride(*this, OverrideMask);
+}
+
LLVM_DUMP_METHOD void FPOptions::dump() {
#define OPTION(NAME, TYPE, WIDTH, PREVIOUS) \
llvm::errs() << "\n " #NAME " " << get##NAME();
diff --git a/contrib/llvm-project/clang/lib/Basic/LangStandards.cpp b/contrib/llvm-project/clang/lib/Basic/LangStandards.cpp
index ee27bfd12113..ab09c7221dda 100644
--- a/contrib/llvm-project/clang/lib/Basic/LangStandards.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/LangStandards.cpp
@@ -7,10 +7,45 @@
//===----------------------------------------------------------------------===//
#include "clang/Basic/LangStandard.h"
+#include "clang/Config/config.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/TargetParser/Triple.h"
using namespace clang;
+StringRef clang::languageToString(Language L) {
+ switch (L) {
+ case Language::Unknown:
+ return "Unknown";
+ case Language::Asm:
+ return "Asm";
+ case Language::LLVM_IR:
+ return "LLVM IR";
+ case Language::C:
+ return "C";
+ case Language::CXX:
+ return "C++";
+ case Language::ObjC:
+ return "Objective-C";
+ case Language::ObjCXX:
+ return "Objective-C++";
+ case Language::OpenCL:
+ return "OpenCL";
+ case Language::OpenCLCXX:
+ return "OpenCLC++";
+ case Language::CUDA:
+ return "CUDA";
+ case Language::RenderScript:
+ return "RenderScript";
+ case Language::HIP:
+ return "HIP";
+ case Language::HLSL:
+ return "HLSL";
+ }
+
+ llvm_unreachable("unhandled language kind");
+}
+
#define LANGSTANDARD(id, name, lang, desc, features) \
static const LangStandard Lang_##id = {name, desc, features, Language::lang};
#include "clang/Basic/LangStandards.def"
@@ -42,4 +77,33 @@ const LangStandard *LangStandard::getLangStandardForName(StringRef Name) {
return &getLangStandardForKind(K);
}
-
+LangStandard::Kind clang::getDefaultLanguageStandard(clang::Language Lang,
+ const llvm::Triple &T) {
+ switch (Lang) {
+ case Language::Unknown:
+ case Language::LLVM_IR:
+ llvm_unreachable("Invalid input kind!");
+ case Language::OpenCL:
+ return LangStandard::lang_opencl12;
+ case Language::OpenCLCXX:
+ return LangStandard::lang_openclcpp10;
+ case Language::Asm:
+ case Language::C:
+ // The PS4 uses C99 as the default C standard.
+ if (T.isPS4())
+ return LangStandard::lang_gnu99;
+ return LangStandard::lang_gnu17;
+ case Language::ObjC:
+ return LangStandard::lang_gnu11;
+ case Language::CXX:
+ case Language::ObjCXX:
+ case Language::CUDA:
+ case Language::HIP:
+ return LangStandard::lang_gnucxx17;
+ case Language::RenderScript:
+ return LangStandard::lang_c99;
+ case Language::HLSL:
+ return LangStandard::lang_hlsl2021;
+ }
+ llvm_unreachable("unhandled Language kind!");
+}
diff --git a/contrib/llvm-project/clang/lib/Basic/MakeSupport.cpp b/contrib/llvm-project/clang/lib/Basic/MakeSupport.cpp
new file mode 100644
index 000000000000..4ddfcc350410
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Basic/MakeSupport.cpp
@@ -0,0 +1,35 @@
+//===-- MakeSuport.cpp --------------------------------------------------*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/MakeSupport.h"
+
+void clang::quoteMakeTarget(StringRef Target, SmallVectorImpl<char> &Res) {
+ for (unsigned i = 0, e = Target.size(); i != e; ++i) {
+ switch (Target[i]) {
+ case ' ':
+ case '\t':
+ // Escape the preceding backslashes
+ for (int j = i - 1; j >= 0 && Target[j] == '\\'; --j)
+ Res.push_back('\\');
+
+ // Escape the space/tab
+ Res.push_back('\\');
+ break;
+ case '$':
+ Res.push_back('$');
+ break;
+ case '#':
+ Res.push_back('\\');
+ break;
+ default:
+ break;
+ }
+
+ Res.push_back(Target[i]);
+ }
+}
diff --git a/contrib/llvm-project/clang/lib/Basic/Module.cpp b/contrib/llvm-project/clang/lib/Basic/Module.cpp
index b6cf1624ef01..0dac8748a98a 100644
--- a/contrib/llvm-project/clang/lib/Basic/Module.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Module.cpp
@@ -44,7 +44,7 @@ Module::Module(StringRef Name, SourceLocation DefinitionLoc, Module *Parent,
InferSubmodules(false), InferExplicitSubmodules(false),
InferExportWildcard(false), ConfigMacrosExhaustive(false),
NoUndeclaredIncludes(false), ModuleMapIsPrivate(false),
- NameVisibility(Hidden) {
+ NamedModuleHasInit(true), NameVisibility(Hidden) {
if (Parent) {
IsAvailable = Parent->isAvailable();
IsUnimportable = Parent->isUnimportable();
@@ -59,9 +59,8 @@ Module::Module(StringRef Name, SourceLocation DefinitionLoc, Module *Parent,
}
Module::~Module() {
- for (submodule_iterator I = submodule_begin(), IEnd = submodule_end();
- I != IEnd; ++I) {
- delete *I;
+ for (auto *Submodule : SubModules) {
+ delete Submodule;
}
}
@@ -90,7 +89,7 @@ static bool isPlatformEnvironment(const TargetInfo &Target, StringRef Feature) {
// where both are valid examples of the same platform+environment but in the
// variant (2) the simulator is hardcoded as part of the platform name. Both
// forms above should match for "iossimulator" requirement.
- if (Target.getTriple().isOSDarwin() && PlatformEnv.endswith("simulator"))
+ if (Target.getTriple().isOSDarwin() && PlatformEnv.ends_with("simulator"))
return PlatformEnv == Feature || CmpPlatformEnv(PlatformEnv, Feature);
return PlatformEnv == Feature;
@@ -108,9 +107,13 @@ static bool hasFeature(StringRef Feature, const LangOptions &LangOpts,
.Case("cplusplus11", LangOpts.CPlusPlus11)
.Case("cplusplus14", LangOpts.CPlusPlus14)
.Case("cplusplus17", LangOpts.CPlusPlus17)
+ .Case("cplusplus20", LangOpts.CPlusPlus20)
+ .Case("cplusplus23", LangOpts.CPlusPlus23)
+ .Case("cplusplus26", LangOpts.CPlusPlus26)
.Case("c99", LangOpts.C99)
.Case("c11", LangOpts.C11)
.Case("c17", LangOpts.C17)
+ .Case("c23", LangOpts.C23)
.Case("freestanding", LangOpts.Freestanding)
.Case("gnuinlineasm", LangOpts.GNUAsm)
.Case("objc", LangOpts.ObjC)
@@ -121,9 +124,7 @@ static bool hasFeature(StringRef Feature, const LangOptions &LangOpts,
.Default(Target.hasFeature(Feature) ||
isPlatformEnvironment(Target, Feature));
if (!HasFeature)
- HasFeature = std::find(LangOpts.ModuleFeatures.begin(),
- LangOpts.ModuleFeatures.end(),
- Feature) != LangOpts.ModuleFeatures.end();
+ HasFeature = llvm::is_contained(LangOpts.ModuleFeatures, Feature);
return HasFeature;
}
@@ -150,6 +151,28 @@ bool Module::isUnimportable(const LangOptions &LangOpts,
llvm_unreachable("could not find a reason why module is unimportable");
}
+// The -fmodule-name option tells the compiler to textually include headers in
+// the specified module, meaning Clang won't build the specified module. This
+// is useful in a number of situations, for instance, when building a library
+// that vends a module map, one might want to avoid hitting intermediate build
+// products containing the module map or avoid finding the system installed
+// modulemap for that library.
+bool Module::isForBuilding(const LangOptions &LangOpts) const {
+ StringRef TopLevelName = getTopLevelModuleName();
+ StringRef CurrentModule = LangOpts.CurrentModule;
+
+ // When building the implementation of framework Foo, we want to make sure
+ // that Foo *and* Foo_Private are textually included and no modules are built
+ // for either.
+ if (!LangOpts.isCompilingModule() && getTopLevelModule()->IsFramework &&
+ CurrentModule == LangOpts.ModuleName &&
+ !CurrentModule.ends_with("_Private") &&
+ TopLevelName.ends_with("_Private"))
+ TopLevelName = TopLevelName.drop_back(8);
+
+ return TopLevelName == CurrentModule;
+}
+
bool Module::isAvailable(const LangOptions &LangOpts, const TargetInfo &Target,
Requirement &Req,
UnresolvedHeaderDirective &MissingHeader,
@@ -203,7 +226,7 @@ static void printModuleId(raw_ostream &OS, InputIter Begin, InputIter End,
OS << ".";
StringRef Name = getModuleNameFromComponent(*It);
- if (!AllowStringLiterals || isValidIdentifier(Name))
+ if (!AllowStringLiterals || isValidAsciiIdentifier(Name))
OS << Name;
else {
OS << '"';
@@ -243,33 +266,31 @@ bool Module::fullModuleNameIs(ArrayRef<StringRef> nameParts) const {
return nameParts.empty();
}
-Module::DirectoryName Module::getUmbrellaDir() const {
- if (Header U = getUmbrellaHeader())
- return {"", "", U.Entry->getDir()};
-
- return {UmbrellaAsWritten, UmbrellaRelativeToRootModuleDirectory,
- Umbrella.dyn_cast<const DirectoryEntry *>()};
+OptionalDirectoryEntryRef Module::getEffectiveUmbrellaDir() const {
+ if (const auto *Hdr = std::get_if<FileEntryRef>(&Umbrella))
+ return Hdr->getDir();
+ if (const auto *Dir = std::get_if<DirectoryEntryRef>(&Umbrella))
+ return *Dir;
+ return std::nullopt;
}
-void Module::addTopHeader(const FileEntry *File) {
+void Module::addTopHeader(FileEntryRef File) {
assert(File);
TopHeaders.insert(File);
}
-ArrayRef<const FileEntry *> Module::getTopHeaders(FileManager &FileMgr) {
+ArrayRef<FileEntryRef> Module::getTopHeaders(FileManager &FileMgr) {
if (!TopHeaderNames.empty()) {
- for (std::vector<std::string>::iterator
- I = TopHeaderNames.begin(), E = TopHeaderNames.end(); I != E; ++I) {
- if (auto FE = FileMgr.getFile(*I))
+ for (StringRef TopHeaderName : TopHeaderNames)
+ if (auto FE = FileMgr.getOptionalFileRef(TopHeaderName))
TopHeaders.insert(*FE);
- }
TopHeaderNames.clear();
}
- return llvm::makeArrayRef(TopHeaders.begin(), TopHeaders.end());
+ return llvm::ArrayRef(TopHeaders.begin(), TopHeaders.end());
}
-bool Module::directlyUses(const Module *Requested) const {
+bool Module::directlyUses(const Module *Requested) {
auto *Top = getTopLevelModule();
// A top-level module implicitly uses itself.
@@ -280,10 +301,14 @@ bool Module::directlyUses(const Module *Requested) const {
if (Requested->isSubModuleOf(Use))
return true;
- // Anyone is allowed to use our builtin stddef.h and its accompanying module.
- if (!Requested->Parent && Requested->Name == "_Builtin_stddef_max_align_t")
+ // Anyone is allowed to use our builtin stddef.h and its accompanying modules.
+ if (Requested->fullModuleNameIs({"_Builtin_stddef", "max_align_t"}) ||
+ Requested->fullModuleNameIs({"_Builtin_stddef_wint_t"}))
return true;
+ if (NoUndeclaredIncludes)
+ UndeclaredUses.insert(Requested);
+
return false;
}
@@ -318,11 +343,9 @@ void Module::markUnavailable(bool Unimportable) {
Current->IsAvailable = false;
Current->IsUnimportable |= Unimportable;
- for (submodule_iterator Sub = Current->submodule_begin(),
- SubEnd = Current->submodule_end();
- Sub != SubEnd; ++Sub) {
- if (needUpdate(*Sub))
- Stack.push_back(*Sub);
+ for (auto *Submodule : Current->submodules()) {
+ if (needUpdate(Submodule))
+ Stack.push_back(Submodule);
}
}
}
@@ -350,6 +373,28 @@ Module *Module::findOrInferSubmodule(StringRef Name) {
return Result;
}
+Module *Module::getGlobalModuleFragment() const {
+ assert(isNamedModuleUnit() && "We should only query the global module "
+ "fragment from the C++ 20 Named modules");
+
+ for (auto *SubModule : SubModules)
+ if (SubModule->isExplicitGlobalModule())
+ return SubModule;
+
+ return nullptr;
+}
+
+Module *Module::getPrivateModuleFragment() const {
+ assert(isNamedModuleUnit() && "We should only query the private module "
+ "fragment from the C++ 20 Named modules");
+
+ for (auto *SubModule : SubModules)
+ if (SubModule->isPrivateModule())
+ return SubModule;
+
+ return nullptr;
+}
+
void Module::getExportedModules(SmallVectorImpl<Module *> &Exported) const {
// All non-explicit submodules are exported.
for (std::vector<Module *>::const_iterator I = SubModules.begin(),
@@ -462,15 +507,15 @@ void Module::print(raw_ostream &OS, unsigned Indent, bool Dump) const {
OS << "\n";
}
- if (Header H = getUmbrellaHeader()) {
+ if (std::optional<Header> H = getUmbrellaHeaderAsWritten()) {
OS.indent(Indent + 2);
OS << "umbrella header \"";
- OS.write_escaped(H.NameAsWritten);
+ OS.write_escaped(H->NameAsWritten);
OS << "\"\n";
- } else if (DirectoryName D = getUmbrellaDir()) {
+ } else if (std::optional<DirectoryName> D = getUmbrellaDirAsWritten()) {
OS.indent(Indent + 2);
OS << "umbrella \"";
- OS.write_escaped(D.NameAsWritten);
+ OS.write_escaped(D->NameAsWritten);
OS << "\"\n";
}
@@ -502,8 +547,8 @@ void Module::print(raw_ostream &OS, unsigned Indent, bool Dump) const {
OS.indent(Indent + 2);
OS << K.Prefix << "header \"";
OS.write_escaped(H.NameAsWritten);
- OS << "\" { size " << H.Entry->getSize()
- << " mtime " << H.Entry->getModificationTime() << " }\n";
+ OS << "\" { size " << H.Entry.getSize()
+ << " mtime " << H.Entry.getModificationTime() << " }\n";
}
}
for (auto *Unresolved : {&UnresolvedHeaders, &MissingHeaders}) {
@@ -529,14 +574,13 @@ void Module::print(raw_ostream &OS, unsigned Indent, bool Dump) const {
OS << "export_as" << ExportAsModule << "\n";
}
- for (submodule_const_iterator MI = submodule_begin(), MIEnd = submodule_end();
- MI != MIEnd; ++MI)
+ for (auto *Submodule : submodules())
// Print inferred subframework modules so that we don't need to re-infer
// them (requires expensive directory iteration + stat calls) when we build
// the module. Regular inferred submodules are OK, as we need to look at all
// those header files anyway.
- if (!(*MI)->IsInferred || (*MI)->IsFramework)
- (*MI)->print(OS, Indent + 2, Dump);
+ if (!Submodule->IsInferred || Submodule->IsFramework)
+ Submodule->print(OS, Indent + 2, Dump);
for (unsigned I = 0, N = Exports.size(); I != N; ++I) {
OS.indent(Indent + 2);
@@ -632,7 +676,9 @@ LLVM_DUMP_METHOD void Module::dump() const {
void VisibleModuleSet::setVisible(Module *M, SourceLocation Loc,
VisibleCallback Vis, ConflictCallback Cb) {
- assert(Loc.isValid() && "setVisible expects a valid import location");
+ // We can't import a global module fragment so the location can be invalid.
+ assert((M->isGlobalModule() || Loc.isValid()) &&
+ "setVisible expects a valid import location");
if (isVisible(M))
return;
@@ -652,7 +698,7 @@ void VisibleModuleSet::setVisible(Module *M, SourceLocation Loc,
return;
ImportLocs[ID] = Loc;
- Vis(M);
+ Vis(V.M);
// Make any exported modules visible.
SmallVector<Module *, 16> Exports;
@@ -675,6 +721,14 @@ void VisibleModuleSet::setVisible(Module *M, SourceLocation Loc,
VisitModule({M, nullptr});
}
+void VisibleModuleSet::makeTransitiveImportsVisible(Module *M,
+ SourceLocation Loc,
+ VisibleCallback Vis,
+ ConflictCallback Cb) {
+ for (auto *I : M->Imports)
+ setVisible(I, Loc, Vis, Cb);
+}
+
ASTSourceDescriptor::ASTSourceDescriptor(Module &M)
: Signature(M.Signature), ClangModule(&M) {
if (M.Directory)
diff --git a/contrib/llvm-project/clang/lib/Basic/NoSanitizeList.cpp b/contrib/llvm-project/clang/lib/Basic/NoSanitizeList.cpp
index 3efd613b0d33..e7e63c1f419e 100644
--- a/contrib/llvm-project/clang/lib/Basic/NoSanitizeList.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/NoSanitizeList.cpp
@@ -47,6 +47,11 @@ bool NoSanitizeList::containsFile(SanitizerMask Mask, StringRef FileName,
return SSCL->inSection(Mask, "src", FileName, Category);
}
+bool NoSanitizeList::containsMainFile(SanitizerMask Mask, StringRef FileName,
+ StringRef Category) const {
+ return SSCL->inSection(Mask, "mainfile", FileName, Category);
+}
+
bool NoSanitizeList::containsLocation(SanitizerMask Mask, SourceLocation Loc,
StringRef Category) const {
return Loc.isValid() &&
diff --git a/contrib/llvm-project/clang/lib/Basic/OpenCLOptions.cpp b/contrib/llvm-project/clang/lib/Basic/OpenCLOptions.cpp
index b7408f39bdab..d39686ea688e 100644
--- a/contrib/llvm-project/clang/lib/Basic/OpenCLOptions.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/OpenCLOptions.cpp
@@ -12,8 +12,21 @@
namespace clang {
+// First feature in a pair requires the second one to be supported.
+static const std::pair<StringRef, StringRef> DependentFeaturesList[] = {
+ {"__opencl_c_read_write_images", "__opencl_c_images"},
+ {"__opencl_c_3d_image_writes", "__opencl_c_images"},
+ {"__opencl_c_pipes", "__opencl_c_generic_address_space"},
+ {"__opencl_c_device_enqueue", "__opencl_c_generic_address_space"},
+ {"__opencl_c_device_enqueue", "__opencl_c_program_scope_global_variables"}};
+
+// Extensions and equivalent feature pairs.
+static const std::pair<StringRef, StringRef> FeatureExtensionMap[] = {
+ {"cl_khr_fp64", "__opencl_c_fp64"},
+ {"cl_khr_3d_image_writes", "__opencl_c_3d_image_writes"}};
+
bool OpenCLOptions::isKnown(llvm::StringRef Ext) const {
- return OptMap.find(Ext) != OptMap.end();
+ return OptMap.contains(Ext);
}
bool OpenCLOptions::isAvailableOption(llvm::StringRef Ext,
@@ -108,42 +121,32 @@ void OpenCLOptions::disableAll() {
bool OpenCLOptions::diagnoseUnsupportedFeatureDependencies(
const TargetInfo &TI, DiagnosticsEngine &Diags) {
- // Feature pairs. First feature in a pair requires the second one to be
- // supported.
- static const llvm::StringMap<llvm::StringRef> DependentFeaturesMap = {
- {"__opencl_c_read_write_images", "__opencl_c_images"},
- {"__opencl_c_3d_image_writes", "__opencl_c_images"},
- {"__opencl_c_pipes", "__opencl_c_generic_address_space"}};
-
auto OpenCLFeaturesMap = TI.getSupportedOpenCLOpts();
bool IsValid = true;
- for (auto &FeaturePair : DependentFeaturesMap)
- if (TI.hasFeatureEnabled(OpenCLFeaturesMap, FeaturePair.getKey()) &&
- !TI.hasFeatureEnabled(OpenCLFeaturesMap, FeaturePair.getValue())) {
+ for (auto &FeaturePair : DependentFeaturesList) {
+ auto Feature = FeaturePair.first;
+ auto Dep = FeaturePair.second;
+ if (TI.hasFeatureEnabled(OpenCLFeaturesMap, Feature) &&
+ !TI.hasFeatureEnabled(OpenCLFeaturesMap, Dep)) {
IsValid = false;
- Diags.Report(diag::err_opencl_feature_requires)
- << FeaturePair.getKey() << FeaturePair.getValue();
+ Diags.Report(diag::err_opencl_feature_requires) << Feature << Dep;
}
+ }
return IsValid;
}
bool OpenCLOptions::diagnoseFeatureExtensionDifferences(
const TargetInfo &TI, DiagnosticsEngine &Diags) {
- // Extensions and equivalent feature pairs.
- static const llvm::StringMap<llvm::StringRef> FeatureExtensionMap = {
- {"cl_khr_fp64", "__opencl_c_fp64"},
- {"cl_khr_3d_image_writes", "__opencl_c_3d_image_writes"}};
-
auto OpenCLFeaturesMap = TI.getSupportedOpenCLOpts();
bool IsValid = true;
for (auto &ExtAndFeat : FeatureExtensionMap)
- if (TI.hasFeatureEnabled(OpenCLFeaturesMap, ExtAndFeat.getKey()) !=
- TI.hasFeatureEnabled(OpenCLFeaturesMap, ExtAndFeat.getValue())) {
+ if (TI.hasFeatureEnabled(OpenCLFeaturesMap, ExtAndFeat.first) !=
+ TI.hasFeatureEnabled(OpenCLFeaturesMap, ExtAndFeat.second)) {
IsValid = false;
Diags.Report(diag::err_opencl_extension_and_feature_differs)
- << ExtAndFeat.getKey() << ExtAndFeat.getValue();
+ << ExtAndFeat.first << ExtAndFeat.second;
}
return IsValid;
}
diff --git a/contrib/llvm-project/clang/lib/Basic/OpenMPKinds.cpp b/contrib/llvm-project/clang/lib/Basic/OpenMPKinds.cpp
index cfdba09eb1ec..6c31b0824eb8 100644
--- a/contrib/llvm-project/clang/lib/Basic/OpenMPKinds.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/OpenMPKinds.cpp
@@ -21,7 +21,7 @@ using namespace clang;
using namespace llvm::omp;
unsigned clang::getOpenMPSimpleClauseType(OpenMPClauseKind Kind, StringRef Str,
- unsigned OpenMPVersion) {
+ const LangOptions &LangOpts) {
switch (Kind) {
case OMPC_default:
return llvm::StringSwitch<unsigned>(Str)
@@ -41,11 +41,20 @@ unsigned clang::getOpenMPSimpleClauseType(OpenMPClauseKind Kind, StringRef Str,
.Case(#Name, static_cast<unsigned>(OMPC_SCHEDULE_MODIFIER_##Name))
#include "clang/Basic/OpenMPKinds.def"
.Default(OMPC_SCHEDULE_unknown);
- case OMPC_depend:
- return llvm::StringSwitch<OpenMPDependClauseKind>(Str)
+ case OMPC_depend: {
+ unsigned Type = llvm::StringSwitch<unsigned>(Str)
#define OPENMP_DEPEND_KIND(Name) .Case(#Name, OMPC_DEPEND_##Name)
#include "clang/Basic/OpenMPKinds.def"
- .Default(OMPC_DEPEND_unknown);
+ .Default(OMPC_DEPEND_unknown);
+ if (LangOpts.OpenMP < 51 && Type == OMPC_DEPEND_inoutset)
+ return OMPC_DEPEND_unknown;
+ return Type;
+ }
+ case OMPC_doacross:
+ return llvm::StringSwitch<OpenMPDoacrossClauseModifier>(Str)
+#define OPENMP_DOACROSS_MODIFIER(Name) .Case(#Name, OMPC_DOACROSS_##Name)
+#include "clang/Basic/OpenMPKinds.def"
+ .Default(OMPC_DOACROSS_unknown);
case OMPC_linear:
return llvm::StringSwitch<OpenMPLinearClauseKind>(Str)
#define OPENMP_LINEAR_KIND(Name) .Case(#Name, OMPC_LINEAR_##Name)
@@ -59,7 +68,9 @@ unsigned clang::getOpenMPSimpleClauseType(OpenMPClauseKind Kind, StringRef Str,
.Case(#Name, static_cast<unsigned>(OMPC_MAP_MODIFIER_##Name))
#include "clang/Basic/OpenMPKinds.def"
.Default(OMPC_MAP_unknown);
- if (OpenMPVersion < 51 && Type == OMPC_MAP_MODIFIER_present)
+ if (LangOpts.OpenMP < 51 && Type == OMPC_MAP_MODIFIER_present)
+ return OMPC_MAP_MODIFIER_unknown;
+ if (!LangOpts.OpenMPExtensions && Type == OMPC_MAP_MODIFIER_ompx_hold)
return OMPC_MAP_MODIFIER_unknown;
return Type;
}
@@ -70,7 +81,7 @@ unsigned clang::getOpenMPSimpleClauseType(OpenMPClauseKind Kind, StringRef Str,
.Case(#Name, static_cast<unsigned>(OMPC_MOTION_MODIFIER_##Name))
#include "clang/Basic/OpenMPKinds.def"
.Default(OMPC_MOTION_MODIFIER_unknown);
- if (OpenMPVersion < 51 && Type == OMPC_MOTION_MODIFIER_present)
+ if (LangOpts.OpenMP < 51 && Type == OMPC_MOTION_MODIFIER_present)
return OMPC_MOTION_MODIFIER_unknown;
return Type;
}
@@ -93,19 +104,37 @@ unsigned clang::getOpenMPSimpleClauseType(OpenMPClauseKind Kind, StringRef Str,
.Case(#Name, OMPC_ATOMIC_DEFAULT_MEM_ORDER_##Name)
#include "clang/Basic/OpenMPKinds.def"
.Default(OMPC_ATOMIC_DEFAULT_MEM_ORDER_unknown);
+ case OMPC_fail:
+ return static_cast<unsigned int>(llvm::StringSwitch<llvm::omp::Clause>(Str)
+#define OPENMP_ATOMIC_FAIL_MODIFIER(Name) .Case(#Name, OMPC_##Name)
+#include "clang/Basic/OpenMPKinds.def"
+ .Default(OMPC_unknown));
case OMPC_device_type:
return llvm::StringSwitch<OpenMPDeviceType>(Str)
#define OPENMP_DEVICE_TYPE_KIND(Name) .Case(#Name, OMPC_DEVICE_TYPE_##Name)
#include "clang/Basic/OpenMPKinds.def"
.Default(OMPC_DEVICE_TYPE_unknown);
+ case OMPC_at:
+ return llvm::StringSwitch<OpenMPAtClauseKind>(Str)
+#define OPENMP_AT_KIND(Name) .Case(#Name, OMPC_AT_##Name)
+#include "clang/Basic/OpenMPKinds.def"
+ .Default(OMPC_AT_unknown);
+ case OMPC_severity:
+ return llvm::StringSwitch<OpenMPSeverityClauseKind>(Str)
+#define OPENMP_SEVERITY_KIND(Name) .Case(#Name, OMPC_SEVERITY_##Name)
+#include "clang/Basic/OpenMPKinds.def"
+ .Default(OMPC_SEVERITY_unknown);
case OMPC_lastprivate:
return llvm::StringSwitch<OpenMPLastprivateModifier>(Str)
#define OPENMP_LASTPRIVATE_KIND(Name) .Case(#Name, OMPC_LASTPRIVATE_##Name)
#include "clang/Basic/OpenMPKinds.def"
.Default(OMPC_LASTPRIVATE_unknown);
case OMPC_order:
- return llvm::StringSwitch<OpenMPOrderClauseKind>(Str)
-#define OPENMP_ORDER_KIND(Name) .Case(#Name, OMPC_ORDER_##Name)
+ return llvm::StringSwitch<unsigned>(Str)
+#define OPENMP_ORDER_KIND(Name) \
+ .Case(#Name, static_cast<unsigned>(OMPC_ORDER_##Name))
+#define OPENMP_ORDER_MODIFIER(Name) \
+ .Case(#Name, static_cast<unsigned>(OMPC_ORDER_MODIFIER_##Name))
#include "clang/Basic/OpenMPKinds.def"
.Default(OMPC_ORDER_unknown);
case OMPC_update:
@@ -123,6 +152,34 @@ unsigned clang::getOpenMPSimpleClauseType(OpenMPClauseKind Kind, StringRef Str,
#define OPENMP_REDUCTION_MODIFIER(Name) .Case(#Name, OMPC_REDUCTION_##Name)
#include "clang/Basic/OpenMPKinds.def"
.Default(OMPC_REDUCTION_unknown);
+ case OMPC_adjust_args:
+ return llvm::StringSwitch<OpenMPAdjustArgsOpKind>(Str)
+#define OPENMP_ADJUST_ARGS_KIND(Name) .Case(#Name, OMPC_ADJUST_ARGS_##Name)
+#include "clang/Basic/OpenMPKinds.def"
+ .Default(OMPC_ADJUST_ARGS_unknown);
+ case OMPC_bind:
+ return llvm::StringSwitch<unsigned>(Str)
+#define OPENMP_BIND_KIND(Name) .Case(#Name, OMPC_BIND_##Name)
+#include "clang/Basic/OpenMPKinds.def"
+ .Default(OMPC_BIND_unknown);
+ case OMPC_grainsize: {
+ unsigned Type = llvm::StringSwitch<unsigned>(Str)
+#define OPENMP_GRAINSIZE_MODIFIER(Name) .Case(#Name, OMPC_GRAINSIZE_##Name)
+#include "clang/Basic/OpenMPKinds.def"
+ .Default(OMPC_GRAINSIZE_unknown);
+ if (LangOpts.OpenMP < 51)
+ return OMPC_GRAINSIZE_unknown;
+ return Type;
+ }
+ case OMPC_num_tasks: {
+ unsigned Type = llvm::StringSwitch<unsigned>(Str)
+#define OPENMP_NUMTASKS_MODIFIER(Name) .Case(#Name, OMPC_NUMTASKS_##Name)
+#include "clang/Basic/OpenMPKinds.def"
+ .Default(OMPC_NUMTASKS_unknown);
+ if (LangOpts.OpenMP < 51)
+ return OMPC_NUMTASKS_unknown;
+ return Type;
+ }
case OMPC_unknown:
case OMPC_threadprivate:
case OMPC_if:
@@ -151,6 +208,7 @@ unsigned clang::getOpenMPSimpleClauseType(OpenMPClauseKind Kind, StringRef Str,
case OMPC_read:
case OMPC_write:
case OMPC_capture:
+ case OMPC_compare:
case OMPC_seq_cst:
case OMPC_acq_rel:
case OMPC_acquire:
@@ -161,14 +219,13 @@ unsigned clang::getOpenMPSimpleClauseType(OpenMPClauseKind Kind, StringRef Str,
case OMPC_num_teams:
case OMPC_thread_limit:
case OMPC_priority:
- case OMPC_grainsize:
case OMPC_nogroup:
- case OMPC_num_tasks:
case OMPC_hint:
case OMPC_uniform:
case OMPC_use_device_ptr:
case OMPC_use_device_addr:
case OMPC_is_device_ptr:
+ case OMPC_has_device_addr:
case OMPC_unified_address:
case OMPC_unified_shared_memory:
case OMPC_reverse_offload:
@@ -183,6 +240,8 @@ unsigned clang::getOpenMPSimpleClauseType(OpenMPClauseKind Kind, StringRef Str,
case OMPC_exclusive:
case OMPC_uses_allocators:
case OMPC_affinity:
+ case OMPC_when:
+ case OMPC_append_args:
break;
default:
break;
@@ -233,6 +292,16 @@ const char *clang::getOpenMPSimpleClauseTypeName(OpenMPClauseKind Kind,
#include "clang/Basic/OpenMPKinds.def"
}
llvm_unreachable("Invalid OpenMP 'depend' clause type");
+ case OMPC_doacross:
+ switch (Type) {
+ case OMPC_DOACROSS_unknown:
+ return "unknown";
+#define OPENMP_DOACROSS_MODIFIER(Name) \
+ case OMPC_DOACROSS_##Name: \
+ return #Name;
+#include "clang/Basic/OpenMPKinds.def"
+ }
+ llvm_unreachable("Invalid OpenMP 'doacross' clause type");
case OMPC_linear:
switch (Type) {
case OMPC_LINEAR_unknown:
@@ -316,6 +385,26 @@ const char *clang::getOpenMPSimpleClauseTypeName(OpenMPClauseKind Kind,
#include "clang/Basic/OpenMPKinds.def"
}
llvm_unreachable("Invalid OpenMP 'device_type' clause type");
+ case OMPC_at:
+ switch (Type) {
+ case OMPC_AT_unknown:
+ return "unknown";
+#define OPENMP_AT_KIND(Name) \
+ case OMPC_AT_##Name: \
+ return #Name;
+#include "clang/Basic/OpenMPKinds.def"
+ }
+ llvm_unreachable("Invalid OpenMP 'at' clause type");
+ case OMPC_severity:
+ switch (Type) {
+ case OMPC_SEVERITY_unknown:
+ return "unknown";
+#define OPENMP_SEVERITY_KIND(Name) \
+ case OMPC_SEVERITY_##Name: \
+ return #Name;
+#include "clang/Basic/OpenMPKinds.def"
+ }
+ llvm_unreachable("Invalid OpenMP 'severity' clause type");
case OMPC_lastprivate:
switch (Type) {
case OMPC_LASTPRIVATE_unknown:
@@ -329,10 +418,14 @@ const char *clang::getOpenMPSimpleClauseTypeName(OpenMPClauseKind Kind,
case OMPC_order:
switch (Type) {
case OMPC_ORDER_unknown:
+ case OMPC_ORDER_MODIFIER_last:
return "unknown";
#define OPENMP_ORDER_KIND(Name) \
- case OMPC_ORDER_##Name: \
- return #Name;
+ case OMPC_ORDER_##Name: \
+ return #Name;
+#define OPENMP_ORDER_MODIFIER(Name) \
+ case OMPC_ORDER_MODIFIER_##Name: \
+ return #Name;
#include "clang/Basic/OpenMPKinds.def"
}
llvm_unreachable("Invalid OpenMP 'order' clause type");
@@ -346,6 +439,11 @@ const char *clang::getOpenMPSimpleClauseTypeName(OpenMPClauseKind Kind,
#include "clang/Basic/OpenMPKinds.def"
}
llvm_unreachable("Invalid OpenMP 'depend' clause type");
+ case OMPC_fail: {
+ OpenMPClauseKind CK = static_cast<OpenMPClauseKind>(Type);
+ return getOpenMPClauseName(CK).data();
+ llvm_unreachable("Invalid OpenMP 'fail' clause modifier");
+ }
case OMPC_device:
switch (Type) {
case OMPC_DEVICE_unknown:
@@ -366,6 +464,46 @@ const char *clang::getOpenMPSimpleClauseTypeName(OpenMPClauseKind Kind,
#include "clang/Basic/OpenMPKinds.def"
}
llvm_unreachable("Invalid OpenMP 'reduction' clause modifier");
+ case OMPC_adjust_args:
+ switch (Type) {
+ case OMPC_ADJUST_ARGS_unknown:
+ return "unknown";
+#define OPENMP_ADJUST_ARGS_KIND(Name) \
+ case OMPC_ADJUST_ARGS_##Name: \
+ return #Name;
+#include "clang/Basic/OpenMPKinds.def"
+ }
+ llvm_unreachable("Invalid OpenMP 'adjust_args' clause kind");
+ case OMPC_bind:
+ switch (Type) {
+ case OMPC_BIND_unknown:
+ return "unknown";
+#define OPENMP_BIND_KIND(Name) \
+ case OMPC_BIND_##Name: \
+ return #Name;
+#include "clang/Basic/OpenMPKinds.def"
+ }
+ llvm_unreachable("Invalid OpenMP 'bind' clause type");
+ case OMPC_grainsize:
+ switch (Type) {
+ case OMPC_GRAINSIZE_unknown:
+ return "unknown";
+#define OPENMP_GRAINSIZE_MODIFIER(Name) \
+ case OMPC_GRAINSIZE_##Name: \
+ return #Name;
+#include "clang/Basic/OpenMPKinds.def"
+ }
+ llvm_unreachable("Invalid OpenMP 'grainsize' clause modifier");
+ case OMPC_num_tasks:
+ switch (Type) {
+ case OMPC_NUMTASKS_unknown:
+ return "unknown";
+#define OPENMP_NUMTASKS_MODIFIER(Name) \
+ case OMPC_NUMTASKS_##Name: \
+ return #Name;
+#include "clang/Basic/OpenMPKinds.def"
+ }
+ llvm_unreachable("Invalid OpenMP 'num_tasks' clause modifier");
case OMPC_unknown:
case OMPC_threadprivate:
case OMPC_if:
@@ -394,6 +532,7 @@ const char *clang::getOpenMPSimpleClauseTypeName(OpenMPClauseKind Kind,
case OMPC_read:
case OMPC_write:
case OMPC_capture:
+ case OMPC_compare:
case OMPC_seq_cst:
case OMPC_acq_rel:
case OMPC_acquire:
@@ -404,14 +543,13 @@ const char *clang::getOpenMPSimpleClauseTypeName(OpenMPClauseKind Kind,
case OMPC_num_teams:
case OMPC_thread_limit:
case OMPC_priority:
- case OMPC_grainsize:
case OMPC_nogroup:
- case OMPC_num_tasks:
case OMPC_hint:
case OMPC_uniform:
case OMPC_use_device_ptr:
case OMPC_use_device_addr:
case OMPC_is_device_ptr:
+ case OMPC_has_device_addr:
case OMPC_unified_address:
case OMPC_unified_shared_memory:
case OMPC_reverse_offload:
@@ -426,6 +564,8 @@ const char *clang::getOpenMPSimpleClauseTypeName(OpenMPClauseKind Kind,
case OMPC_exclusive:
case OMPC_uses_allocators:
case OMPC_affinity:
+ case OMPC_when:
+ case OMPC_append_args:
break;
default:
break;
@@ -440,7 +580,10 @@ bool clang::isOpenMPLoopDirective(OpenMPDirectiveKind DKind) {
DKind == OMPD_master_taskloop || DKind == OMPD_master_taskloop_simd ||
DKind == OMPD_parallel_master_taskloop ||
DKind == OMPD_parallel_master_taskloop_simd ||
- DKind == OMPD_distribute || DKind == OMPD_target_parallel_for ||
+ DKind == OMPD_masked_taskloop || DKind == OMPD_masked_taskloop_simd ||
+ DKind == OMPD_parallel_masked_taskloop || DKind == OMPD_distribute ||
+ DKind == OMPD_parallel_masked_taskloop_simd ||
+ DKind == OMPD_target_parallel_for ||
DKind == OMPD_distribute_parallel_for ||
DKind == OMPD_distribute_parallel_for_simd ||
DKind == OMPD_distribute_simd ||
@@ -453,7 +596,9 @@ bool clang::isOpenMPLoopDirective(OpenMPDirectiveKind DKind) {
DKind == OMPD_target_teams_distribute_parallel_for ||
DKind == OMPD_target_teams_distribute_parallel_for_simd ||
DKind == OMPD_target_teams_distribute_simd || DKind == OMPD_tile ||
- DKind == OMPD_unroll;
+ DKind == OMPD_unroll || DKind == OMPD_loop ||
+ DKind == OMPD_teams_loop || DKind == OMPD_target_teams_loop ||
+ DKind == OMPD_parallel_loop || DKind == OMPD_target_parallel_loop;
}
bool clang::isOpenMPWorksharingDirective(OpenMPDirectiveKind DKind) {
@@ -468,13 +613,18 @@ bool clang::isOpenMPWorksharingDirective(OpenMPDirectiveKind DKind) {
DKind == OMPD_teams_distribute_parallel_for_simd ||
DKind == OMPD_teams_distribute_parallel_for ||
DKind == OMPD_target_teams_distribute_parallel_for ||
- DKind == OMPD_target_teams_distribute_parallel_for_simd;
+ DKind == OMPD_target_teams_distribute_parallel_for_simd ||
+ DKind == OMPD_parallel_loop || DKind == OMPD_teams_loop ||
+ DKind == OMPD_target_parallel_loop || DKind == OMPD_target_teams_loop;
}
bool clang::isOpenMPTaskLoopDirective(OpenMPDirectiveKind DKind) {
return DKind == OMPD_taskloop || DKind == OMPD_taskloop_simd ||
DKind == OMPD_master_taskloop || DKind == OMPD_master_taskloop_simd ||
DKind == OMPD_parallel_master_taskloop ||
+ DKind == OMPD_masked_taskloop || DKind == OMPD_masked_taskloop_simd ||
+ DKind == OMPD_parallel_masked_taskloop ||
+ DKind == OMPD_parallel_masked_taskloop_simd ||
DKind == OMPD_parallel_master_taskloop_simd;
}
@@ -489,9 +639,13 @@ bool clang::isOpenMPParallelDirective(OpenMPDirectiveKind DKind) {
DKind == OMPD_teams_distribute_parallel_for_simd ||
DKind == OMPD_target_teams_distribute_parallel_for ||
DKind == OMPD_target_teams_distribute_parallel_for_simd ||
- DKind == OMPD_parallel_master ||
+ DKind == OMPD_parallel_master || DKind == OMPD_parallel_masked ||
DKind == OMPD_parallel_master_taskloop ||
- DKind == OMPD_parallel_master_taskloop_simd;
+ DKind == OMPD_parallel_master_taskloop_simd ||
+ DKind == OMPD_parallel_masked_taskloop ||
+ DKind == OMPD_parallel_masked_taskloop_simd ||
+ DKind == OMPD_parallel_loop || DKind == OMPD_target_parallel_loop ||
+ DKind == OMPD_teams_loop;
}
bool clang::isOpenMPTargetExecutionDirective(OpenMPDirectiveKind DKind) {
@@ -501,7 +655,8 @@ bool clang::isOpenMPTargetExecutionDirective(OpenMPDirectiveKind DKind) {
DKind == OMPD_target_teams || DKind == OMPD_target_teams_distribute ||
DKind == OMPD_target_teams_distribute_parallel_for ||
DKind == OMPD_target_teams_distribute_parallel_for_simd ||
- DKind == OMPD_target_teams_distribute_simd;
+ DKind == OMPD_target_teams_distribute_simd ||
+ DKind == OMPD_target_teams_loop || DKind == OMPD_target_parallel_loop;
}
bool clang::isOpenMPTargetDataManagementDirective(OpenMPDirectiveKind DKind) {
@@ -513,22 +668,26 @@ bool clang::isOpenMPNestingTeamsDirective(OpenMPDirectiveKind DKind) {
return DKind == OMPD_teams || DKind == OMPD_teams_distribute ||
DKind == OMPD_teams_distribute_simd ||
DKind == OMPD_teams_distribute_parallel_for_simd ||
- DKind == OMPD_teams_distribute_parallel_for;
+ DKind == OMPD_teams_distribute_parallel_for ||
+ DKind == OMPD_teams_loop;
}
bool clang::isOpenMPTeamsDirective(OpenMPDirectiveKind DKind) {
- return isOpenMPNestingTeamsDirective(DKind) ||
- DKind == OMPD_target_teams || DKind == OMPD_target_teams_distribute ||
+ return isOpenMPNestingTeamsDirective(DKind) || DKind == OMPD_target_teams ||
+ DKind == OMPD_target_teams_distribute ||
DKind == OMPD_target_teams_distribute_parallel_for ||
DKind == OMPD_target_teams_distribute_parallel_for_simd ||
- DKind == OMPD_target_teams_distribute_simd;
+ DKind == OMPD_target_teams_distribute_simd ||
+ DKind == OMPD_target_teams_loop;
}
bool clang::isOpenMPSimdDirective(OpenMPDirectiveKind DKind) {
return DKind == OMPD_simd || DKind == OMPD_for_simd ||
DKind == OMPD_parallel_for_simd || DKind == OMPD_taskloop_simd ||
DKind == OMPD_master_taskloop_simd ||
+ DKind == OMPD_masked_taskloop_simd ||
DKind == OMPD_parallel_master_taskloop_simd ||
+ DKind == OMPD_parallel_masked_taskloop_simd ||
DKind == OMPD_distribute_parallel_for_simd ||
DKind == OMPD_distribute_simd || DKind == OMPD_target_simd ||
DKind == OMPD_teams_distribute_simd ||
@@ -556,6 +715,12 @@ bool clang::isOpenMPDistributeDirective(OpenMPDirectiveKind Kind) {
Kind == OMPD_target_teams_distribute_simd;
}
+bool clang::isOpenMPGenericLoopDirective(OpenMPDirectiveKind Kind) {
+ return Kind == OMPD_loop || Kind == OMPD_teams_loop ||
+ Kind == OMPD_target_teams_loop || Kind == OMPD_parallel_loop ||
+ Kind == OMPD_target_parallel_loop;
+}
+
bool clang::isOpenMPPrivate(OpenMPClauseKind Kind) {
return Kind == OMPC_private || Kind == OMPC_firstprivate ||
Kind == OMPC_lastprivate || Kind == OMPC_linear ||
@@ -577,25 +742,46 @@ bool clang::isOpenMPLoopBoundSharingDirective(OpenMPDirectiveKind Kind) {
Kind == OMPD_teams_distribute_parallel_for_simd ||
Kind == OMPD_teams_distribute_parallel_for ||
Kind == OMPD_target_teams_distribute_parallel_for ||
- Kind == OMPD_target_teams_distribute_parallel_for_simd;
+ Kind == OMPD_target_teams_distribute_parallel_for_simd ||
+ Kind == OMPD_teams_loop || Kind == OMPD_target_teams_loop;
}
bool clang::isOpenMPLoopTransformationDirective(OpenMPDirectiveKind DKind) {
return DKind == OMPD_tile || DKind == OMPD_unroll;
}
+bool clang::isOpenMPCombinedParallelADirective(OpenMPDirectiveKind DKind) {
+ return DKind == OMPD_parallel_for || DKind == OMPD_parallel_for_simd ||
+ DKind == OMPD_parallel_master ||
+ DKind == OMPD_parallel_master_taskloop ||
+ DKind == OMPD_parallel_master_taskloop_simd ||
+ DKind == OMPD_parallel_sections;
+}
+
+bool clang::needsTaskBasedThreadLimit(OpenMPDirectiveKind DKind) {
+ return DKind == OMPD_target || DKind == OMPD_target_parallel ||
+ DKind == OMPD_target_parallel_for ||
+ DKind == OMPD_target_parallel_for_simd || DKind == OMPD_target_simd ||
+ DKind == OMPD_target_parallel_loop;
+}
+
void clang::getOpenMPCaptureRegions(
SmallVectorImpl<OpenMPDirectiveKind> &CaptureRegions,
OpenMPDirectiveKind DKind) {
assert(unsigned(DKind) < llvm::omp::Directive_enumSize);
switch (DKind) {
+ case OMPD_metadirective:
+ CaptureRegions.push_back(OMPD_metadirective);
+ break;
case OMPD_parallel:
case OMPD_parallel_for:
case OMPD_parallel_for_simd:
case OMPD_parallel_master:
+ case OMPD_parallel_masked:
case OMPD_parallel_sections:
case OMPD_distribute_parallel_for:
case OMPD_distribute_parallel_for_simd:
+ case OMPD_parallel_loop:
CaptureRegions.push_back(OMPD_parallel);
break;
case OMPD_target_teams:
@@ -615,6 +801,7 @@ void clang::getOpenMPCaptureRegions(
CaptureRegions.push_back(OMPD_task);
CaptureRegions.push_back(OMPD_target);
break;
+ case OMPD_teams_loop:
case OMPD_teams_distribute_parallel_for:
case OMPD_teams_distribute_parallel_for_simd:
CaptureRegions.push_back(OMPD_teams);
@@ -623,6 +810,7 @@ void clang::getOpenMPCaptureRegions(
case OMPD_target_parallel:
case OMPD_target_parallel_for:
case OMPD_target_parallel_for_simd:
+ case OMPD_target_parallel_loop:
CaptureRegions.push_back(OMPD_task);
CaptureRegions.push_back(OMPD_target);
CaptureRegions.push_back(OMPD_parallel);
@@ -637,13 +825,18 @@ void clang::getOpenMPCaptureRegions(
case OMPD_taskloop_simd:
case OMPD_master_taskloop:
case OMPD_master_taskloop_simd:
+ case OMPD_masked_taskloop:
+ case OMPD_masked_taskloop_simd:
CaptureRegions.push_back(OMPD_taskloop);
break;
+ case OMPD_parallel_masked_taskloop:
+ case OMPD_parallel_masked_taskloop_simd:
case OMPD_parallel_master_taskloop:
case OMPD_parallel_master_taskloop_simd:
CaptureRegions.push_back(OMPD_parallel);
CaptureRegions.push_back(OMPD_taskloop);
break;
+ case OMPD_target_teams_loop:
case OMPD_target_teams_distribute_parallel_for:
case OMPD_target_teams_distribute_parallel_for_simd:
CaptureRegions.push_back(OMPD_task);
@@ -651,6 +844,13 @@ void clang::getOpenMPCaptureRegions(
CaptureRegions.push_back(OMPD_teams);
CaptureRegions.push_back(OMPD_parallel);
break;
+ case OMPD_nothing:
+ CaptureRegions.push_back(OMPD_nothing);
+ break;
+ case OMPD_loop:
+ // TODO: 'loop' may require different capture regions depending on the bind
+ // clause or the parent directive when there is no bind clause. Use
+ // OMPD_unknown for now.
case OMPD_simd:
case OMPD_for:
case OMPD_for_simd:
@@ -665,6 +865,7 @@ void clang::getOpenMPCaptureRegions(
case OMPD_atomic:
case OMPD_target_data:
case OMPD_distribute_simd:
+ case OMPD_scope:
case OMPD_dispatch:
CaptureRegions.push_back(OMPD_unknown);
break;
@@ -676,6 +877,7 @@ void clang::getOpenMPCaptureRegions(
case OMPD_allocate:
case OMPD_taskyield:
case OMPD_barrier:
+ case OMPD_error:
case OMPD_taskwait:
case OMPD_cancellation_point:
case OMPD_cancel:
@@ -697,3 +899,10 @@ void clang::getOpenMPCaptureRegions(
llvm_unreachable("Unknown OpenMP directive");
}
}
+
+bool clang::checkFailClauseParameter(OpenMPClauseKind FailClauseParameter) {
+ return FailClauseParameter == llvm::omp::OMPC_acquire ||
+ FailClauseParameter == llvm::omp::OMPC_relaxed ||
+ FailClauseParameter == llvm::omp::OMPC_seq_cst;
+}
+
diff --git a/contrib/llvm-project/clang/lib/Basic/ParsedAttrInfo.cpp b/contrib/llvm-project/clang/lib/Basic/ParsedAttrInfo.cpp
new file mode 100644
index 000000000000..16fa314b642b
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Basic/ParsedAttrInfo.cpp
@@ -0,0 +1,32 @@
+//===- ParsedAttrInfo.cpp - Registry for attribute plugins ------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the Registry of attributes added by plugins which
+// derive the ParsedAttrInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/ParsedAttrInfo.h"
+#include "llvm/Support/ManagedStatic.h"
+#include <list>
+#include <memory>
+
+using namespace clang;
+
+LLVM_INSTANTIATE_REGISTRY(ParsedAttrInfoRegistry)
+
+const std::list<std::unique_ptr<ParsedAttrInfo>> &
+clang::getAttributePluginInstances() {
+ static llvm::ManagedStatic<std::list<std::unique_ptr<ParsedAttrInfo>>>
+ PluginAttrInstances;
+ if (PluginAttrInstances->empty())
+ for (const auto &It : ParsedAttrInfoRegistry::entries())
+ PluginAttrInstances->emplace_back(It.instantiate());
+
+ return *PluginAttrInstances;
+}
diff --git a/contrib/llvm-project/clang/lib/Basic/ProfileList.cpp b/contrib/llvm-project/clang/lib/Basic/ProfileList.cpp
index 2cb05c1c3c07..8fa16e2eb069 100644
--- a/contrib/llvm-project/clang/lib/Basic/ProfileList.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/ProfileList.cpp
@@ -17,6 +17,7 @@
#include "llvm/Support/SpecialCaseList.h"
#include "llvm/Support/raw_ostream.h"
+#include <optional>
using namespace clang;
@@ -35,8 +36,8 @@ public:
bool isEmpty() const { return Sections.empty(); }
bool hasPrefix(StringRef Prefix) const {
- for (auto &SectionIter : Sections)
- if (SectionIter.Entries.count(Prefix) > 0)
+ for (const auto &It : Sections)
+ if (It.second.Entries.count(Prefix) > 0)
return true;
return false;
}
@@ -58,7 +59,7 @@ ProfileSpecialCaseList::createOrDie(const std::vector<std::string> &Paths,
std::string Error;
if (auto PSCL = create(Paths, VFS, Error))
return PSCL;
- llvm::report_fatal_error(Error);
+ llvm::report_fatal_error(llvm::Twine(Error));
}
}
@@ -66,8 +67,7 @@ ProfileSpecialCaseList::createOrDie(const std::vector<std::string> &Paths,
ProfileList::ProfileList(ArrayRef<std::string> Paths, SourceManager &SM)
: SCL(ProfileSpecialCaseList::createOrDie(
Paths, SM.getFileManager().getVirtualFileSystem())),
- Empty(SCL->isEmpty()),
- Default(SCL->hasPrefix("fun") || SCL->hasPrefix("src")), SM(SM) {}
+ Empty(SCL->isEmpty()), SM(SM) {}
ProfileList::~ProfileList() = default;
@@ -85,30 +85,66 @@ static StringRef getSectionName(CodeGenOptions::ProfileInstrKind Kind) {
llvm_unreachable("Unhandled CodeGenOptions::ProfileInstrKind enum");
}
-llvm::Optional<bool>
+ProfileList::ExclusionType
+ProfileList::getDefault(CodeGenOptions::ProfileInstrKind Kind) const {
+ StringRef Section = getSectionName(Kind);
+ // Check for "default:<type>"
+ if (SCL->inSection(Section, "default", "allow"))
+ return Allow;
+ if (SCL->inSection(Section, "default", "skip"))
+ return Skip;
+ if (SCL->inSection(Section, "default", "forbid"))
+ return Forbid;
+ // If any cases use "fun" or "src", set the default to FORBID.
+ if (SCL->hasPrefix("fun") || SCL->hasPrefix("src"))
+ return Forbid;
+ return Allow;
+}
+
+std::optional<ProfileList::ExclusionType>
+ProfileList::inSection(StringRef Section, StringRef Prefix,
+ StringRef Query) const {
+ if (SCL->inSection(Section, Prefix, Query, "allow"))
+ return Allow;
+ if (SCL->inSection(Section, Prefix, Query, "skip"))
+ return Skip;
+ if (SCL->inSection(Section, Prefix, Query, "forbid"))
+ return Forbid;
+ if (SCL->inSection(Section, Prefix, Query))
+ return Allow;
+ return std::nullopt;
+}
+
+std::optional<ProfileList::ExclusionType>
ProfileList::isFunctionExcluded(StringRef FunctionName,
CodeGenOptions::ProfileInstrKind Kind) const {
StringRef Section = getSectionName(Kind);
+ // Check for "function:<regex>=<case>"
+ if (auto V = inSection(Section, "function", FunctionName))
+ return V;
if (SCL->inSection(Section, "!fun", FunctionName))
- return true;
+ return Forbid;
if (SCL->inSection(Section, "fun", FunctionName))
- return false;
- return None;
+ return Allow;
+ return std::nullopt;
}
-llvm::Optional<bool>
+std::optional<ProfileList::ExclusionType>
ProfileList::isLocationExcluded(SourceLocation Loc,
CodeGenOptions::ProfileInstrKind Kind) const {
return isFileExcluded(SM.getFilename(SM.getFileLoc(Loc)), Kind);
}
-llvm::Optional<bool>
+std::optional<ProfileList::ExclusionType>
ProfileList::isFileExcluded(StringRef FileName,
CodeGenOptions::ProfileInstrKind Kind) const {
StringRef Section = getSectionName(Kind);
+ // Check for "source:<regex>=<case>"
+ if (auto V = inSection(Section, "source", FileName))
+ return V;
if (SCL->inSection(Section, "!src", FileName))
- return true;
+ return Forbid;
if (SCL->inSection(Section, "src", FileName))
- return false;
- return None;
+ return Allow;
+ return std::nullopt;
}
diff --git a/contrib/llvm-project/clang/lib/Basic/SanitizerSpecialCaseList.cpp b/contrib/llvm-project/clang/lib/Basic/SanitizerSpecialCaseList.cpp
index 5bf8d39ffd95..b02e868cdaa4 100644
--- a/contrib/llvm-project/clang/lib/Basic/SanitizerSpecialCaseList.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/SanitizerSpecialCaseList.cpp
@@ -33,11 +33,12 @@ SanitizerSpecialCaseList::createOrDie(const std::vector<std::string> &Paths,
std::string Error;
if (auto SSCL = create(Paths, VFS, Error))
return SSCL;
- llvm::report_fatal_error(Error);
+ llvm::report_fatal_error(StringRef(Error));
}
void SanitizerSpecialCaseList::createSanitizerSections() {
- for (auto &S : Sections) {
+ for (auto &It : Sections) {
+ auto &S = It.second;
SanitizerMask Mask;
#define SANITIZER(NAME, ID) \
diff --git a/contrib/llvm-project/clang/lib/Basic/Sanitizers.cpp b/contrib/llvm-project/clang/lib/Basic/Sanitizers.cpp
index 7d903c8fdf5e..62ccdf8e9bbf 100644
--- a/contrib/llvm-project/clang/lib/Basic/Sanitizers.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Sanitizers.cpp
@@ -61,7 +61,7 @@ namespace clang {
unsigned SanitizerMask::countPopulation() const {
unsigned total = 0;
for (const auto &Val : maskLoToHigh)
- total += llvm::countPopulation(Val);
+ total += llvm::popcount(Val);
return total;
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Sarif.cpp b/contrib/llvm-project/clang/lib/Basic/Sarif.cpp
new file mode 100644
index 000000000000..1cae7b937bc6
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Basic/Sarif.cpp
@@ -0,0 +1,425 @@
+//===-- clang/Basic/Sarif.cpp - SarifDocumentWriter class definition ------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file contains the declaration of the SARIFDocumentWriter class, and
+/// associated builders such as:
+/// - \ref SarifArtifact
+/// - \ref SarifArtifactLocation
+/// - \ref SarifRule
+/// - \ref SarifResult
+//===----------------------------------------------------------------------===//
+#include "clang/Basic/Sarif.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/ConvertUTF.h"
+#include "llvm/Support/JSON.h"
+#include "llvm/Support/Path.h"
+
+#include <optional>
+#include <string>
+#include <utility>
+
+using namespace clang;
+using namespace llvm;
+
+using clang::detail::SarifArtifact;
+using clang::detail::SarifArtifactLocation;
+
+static StringRef getFileName(FileEntryRef FE) {
+ StringRef Filename = FE.getFileEntry().tryGetRealPathName();
+ if (Filename.empty())
+ Filename = FE.getName();
+ return Filename;
+}
+/// \name URI
+/// @{
+
+/// \internal
+/// \brief
+/// Return the RFC3986 encoding of the input character.
+///
+/// \param C Character to encode to RFC3986.
+///
+/// \return The RFC3986 representation of \c C.
+static std::string percentEncodeURICharacter(char C) {
+ // RFC 3986 claims alpha, numeric, and this handful of
+ // characters are not reserved for the path component and
+ // should be written out directly. Otherwise, percent
+ // encode the character and write that out instead of the
+ // reserved character.
+ if (llvm::isAlnum(C) ||
+ StringRef::npos != StringRef("-._~:@!$&'()*+,;=").find(C))
+ return std::string(&C, 1);
+ return "%" + llvm::toHex(StringRef(&C, 1));
+}
+
+/// \internal
+/// \brief Return a URI representing the given file name.
+///
+/// \param Filename The filename to be represented as URI.
+///
+/// \return RFC3986 URI representing the input file name.
+static std::string fileNameToURI(StringRef Filename) {
+ SmallString<32> Ret = StringRef("file://");
+
+ // Get the root name to see if it has a URI authority.
+ StringRef Root = sys::path::root_name(Filename);
+ if (Root.starts_with("//")) {
+ // There is an authority, so add it to the URI.
+ Ret += Root.drop_front(2).str();
+ } else if (!Root.empty()) {
+ // There is no authority, so end the component and add the root to the URI.
+ Ret += Twine("/" + Root).str();
+ }
+
+ auto Iter = sys::path::begin(Filename), End = sys::path::end(Filename);
+ assert(Iter != End && "Expected there to be a non-root path component.");
+ // Add the rest of the path components, encoding any reserved characters;
+ // we skip past the first path component, as it was handled it above.
+ for (StringRef Component : llvm::make_range(++Iter, End)) {
+ // For reasons unknown to me, we may get a backslash with Windows native
+ // paths for the initial backslash following the drive component, which
+ // we need to ignore as a URI path part.
+ if (Component == "\\")
+ continue;
+
+ // Add the separator between the previous path part and the one being
+ // currently processed.
+ Ret += "/";
+
+ // URI encode the part.
+ for (char C : Component) {
+ Ret += percentEncodeURICharacter(C);
+ }
+ }
+
+ return std::string(Ret);
+}
+/// @}
+
+/// \brief Calculate the column position expressed in the number of UTF-8 code
+/// points from column start to the source location
+///
+/// \param Loc The source location whose column needs to be calculated.
+/// \param TokenLen Optional hint for when the token is multiple bytes long.
+///
+/// \return The column number as a UTF-8 aware byte offset from column start to
+/// the effective source location.
+static unsigned int adjustColumnPos(FullSourceLoc Loc,
+ unsigned int TokenLen = 0) {
+ assert(!Loc.isInvalid() && "invalid Loc when adjusting column position");
+
+ std::pair<FileID, unsigned> LocInfo = Loc.getDecomposedExpansionLoc();
+ std::optional<MemoryBufferRef> Buf =
+ Loc.getManager().getBufferOrNone(LocInfo.first);
+ assert(Buf && "got an invalid buffer for the location's file");
+ assert(Buf->getBufferSize() >= (LocInfo.second + TokenLen) &&
+ "token extends past end of buffer?");
+
+ // Adjust the offset to be the start of the line, since we'll be counting
+ // Unicode characters from there until our column offset.
+ unsigned int Off = LocInfo.second - (Loc.getExpansionColumnNumber() - 1);
+ unsigned int Ret = 1;
+ while (Off < (LocInfo.second + TokenLen)) {
+ Off += getNumBytesForUTF8(Buf->getBuffer()[Off]);
+ Ret++;
+ }
+
+ return Ret;
+}
+
+/// \name SARIF Utilities
+/// @{
+
+/// \internal
+json::Object createMessage(StringRef Text) {
+ return json::Object{{"text", Text.str()}};
+}
+
+/// \internal
+/// \pre CharSourceRange must be a token range
+static json::Object createTextRegion(const SourceManager &SM,
+ const CharSourceRange &R) {
+ FullSourceLoc BeginCharLoc{R.getBegin(), SM};
+ FullSourceLoc EndCharLoc{R.getEnd(), SM};
+ json::Object Region{{"startLine", BeginCharLoc.getExpansionLineNumber()},
+ {"startColumn", adjustColumnPos(BeginCharLoc)}};
+
+ if (BeginCharLoc == EndCharLoc) {
+ Region["endColumn"] = adjustColumnPos(BeginCharLoc);
+ } else {
+ Region["endLine"] = EndCharLoc.getExpansionLineNumber();
+ Region["endColumn"] = adjustColumnPos(EndCharLoc);
+ }
+ return Region;
+}
+
+static json::Object createLocation(json::Object &&PhysicalLocation,
+ StringRef Message = "") {
+ json::Object Ret{{"physicalLocation", std::move(PhysicalLocation)}};
+ if (!Message.empty())
+ Ret.insert({"message", createMessage(Message)});
+ return Ret;
+}
+
+static StringRef importanceToStr(ThreadFlowImportance I) {
+ switch (I) {
+ case ThreadFlowImportance::Important:
+ return "important";
+ case ThreadFlowImportance::Essential:
+ return "essential";
+ case ThreadFlowImportance::Unimportant:
+ return "unimportant";
+ }
+ llvm_unreachable("Fully covered switch is not so fully covered");
+}
+
+static StringRef resultLevelToStr(SarifResultLevel R) {
+ switch (R) {
+ case SarifResultLevel::None:
+ return "none";
+ case SarifResultLevel::Note:
+ return "note";
+ case SarifResultLevel::Warning:
+ return "warning";
+ case SarifResultLevel::Error:
+ return "error";
+ }
+ llvm_unreachable("Potentially un-handled SarifResultLevel. "
+ "Is the switch not fully covered?");
+}
+
+static json::Object
+createThreadFlowLocation(json::Object &&Location,
+ const ThreadFlowImportance &Importance) {
+ return json::Object{{"location", std::move(Location)},
+ {"importance", importanceToStr(Importance)}};
+}
+/// @}
+
+json::Object
+SarifDocumentWriter::createPhysicalLocation(const CharSourceRange &R) {
+ assert(R.isValid() &&
+ "Cannot create a physicalLocation from invalid SourceRange!");
+ assert(R.isCharRange() &&
+ "Cannot create a physicalLocation from a token range!");
+ FullSourceLoc Start{R.getBegin(), SourceMgr};
+ OptionalFileEntryRef FE = Start.getExpansionLoc().getFileEntryRef();
+ assert(FE && "Diagnostic does not exist within a valid file!");
+
+ const std::string &FileURI = fileNameToURI(getFileName(*FE));
+ auto I = CurrentArtifacts.find(FileURI);
+
+ if (I == CurrentArtifacts.end()) {
+ uint32_t Idx = static_cast<uint32_t>(CurrentArtifacts.size());
+ const SarifArtifactLocation &Location =
+ SarifArtifactLocation::create(FileURI).setIndex(Idx);
+ const SarifArtifact &Artifact = SarifArtifact::create(Location)
+ .setRoles({"resultFile"})
+ .setLength(FE->getSize())
+ .setMimeType("text/plain");
+ auto StatusIter = CurrentArtifacts.insert({FileURI, Artifact});
+ // If inserted, ensure the original iterator points to the newly inserted
+ // element, so it can be used downstream.
+ if (StatusIter.second)
+ I = StatusIter.first;
+ }
+ assert(I != CurrentArtifacts.end() && "Failed to insert new artifact");
+ const SarifArtifactLocation &Location = I->second.Location;
+ json::Object ArtifactLocationObject{{"uri", Location.URI}};
+ if (Location.Index.has_value())
+ ArtifactLocationObject["index"] = *Location.Index;
+ return json::Object{{{"artifactLocation", std::move(ArtifactLocationObject)},
+ {"region", createTextRegion(SourceMgr, R)}}};
+}
+
+json::Object &SarifDocumentWriter::getCurrentTool() {
+ assert(!Closed && "SARIF Document is closed. "
+ "Need to call createRun() before using getcurrentTool!");
+
+ // Since Closed = false here, expect there to be at least 1 Run, anything
+ // else is an invalid state.
+ assert(!Runs.empty() && "There are no runs associated with the document!");
+
+ return *Runs.back().getAsObject()->get("tool")->getAsObject();
+}
+
+void SarifDocumentWriter::reset() {
+ CurrentRules.clear();
+ CurrentArtifacts.clear();
+}
+
+void SarifDocumentWriter::endRun() {
+ // Exit early if trying to close a closed Document.
+ if (Closed) {
+ reset();
+ return;
+ }
+
+ // Since Closed = false here, expect there to be at least 1 Run, anything
+ // else is an invalid state.
+ assert(!Runs.empty() && "There are no runs associated with the document!");
+
+ // Flush all the rules.
+ json::Object &Tool = getCurrentTool();
+ json::Array Rules;
+ for (const SarifRule &R : CurrentRules) {
+ json::Object Config{
+ {"enabled", R.DefaultConfiguration.Enabled},
+ {"level", resultLevelToStr(R.DefaultConfiguration.Level)},
+ {"rank", R.DefaultConfiguration.Rank}};
+ json::Object Rule{
+ {"name", R.Name},
+ {"id", R.Id},
+ {"fullDescription", json::Object{{"text", R.Description}}},
+ {"defaultConfiguration", std::move(Config)}};
+ if (!R.HelpURI.empty())
+ Rule["helpUri"] = R.HelpURI;
+ Rules.emplace_back(std::move(Rule));
+ }
+ json::Object &Driver = *Tool.getObject("driver");
+ Driver["rules"] = std::move(Rules);
+
+ // Flush all the artifacts.
+ json::Object &Run = getCurrentRun();
+ json::Array *Artifacts = Run.getArray("artifacts");
+ SmallVector<std::pair<StringRef, SarifArtifact>, 0> Vec;
+ for (const auto &[K, V] : CurrentArtifacts)
+ Vec.emplace_back(K, V);
+ llvm::sort(Vec, llvm::less_first());
+ for (const auto &[_, A] : Vec) {
+ json::Object Loc{{"uri", A.Location.URI}};
+ if (A.Location.Index.has_value()) {
+ Loc["index"] = static_cast<int64_t>(*A.Location.Index);
+ }
+ json::Object Artifact;
+ Artifact["location"] = std::move(Loc);
+ if (A.Length.has_value())
+ Artifact["length"] = static_cast<int64_t>(*A.Length);
+ if (!A.Roles.empty())
+ Artifact["roles"] = json::Array(A.Roles);
+ if (!A.MimeType.empty())
+ Artifact["mimeType"] = A.MimeType;
+ if (A.Offset.has_value())
+ Artifact["offset"] = *A.Offset;
+ Artifacts->push_back(json::Value(std::move(Artifact)));
+ }
+
+ // Clear, reset temporaries before next run.
+ reset();
+
+ // Mark the document as closed.
+ Closed = true;
+}
+
+json::Array
+SarifDocumentWriter::createThreadFlows(ArrayRef<ThreadFlow> ThreadFlows) {
+ json::Object Ret{{"locations", json::Array{}}};
+ json::Array Locs;
+ for (const auto &ThreadFlow : ThreadFlows) {
+ json::Object PLoc = createPhysicalLocation(ThreadFlow.Range);
+ json::Object Loc = createLocation(std::move(PLoc), ThreadFlow.Message);
+ Locs.emplace_back(
+ createThreadFlowLocation(std::move(Loc), ThreadFlow.Importance));
+ }
+ Ret["locations"] = std::move(Locs);
+ return json::Array{std::move(Ret)};
+}
+
+json::Object
+SarifDocumentWriter::createCodeFlow(ArrayRef<ThreadFlow> ThreadFlows) {
+ return json::Object{{"threadFlows", createThreadFlows(ThreadFlows)}};
+}
+
+void SarifDocumentWriter::createRun(StringRef ShortToolName,
+ StringRef LongToolName,
+ StringRef ToolVersion) {
+ // Clear resources associated with a previous run.
+ endRun();
+
+ // Signify a new run has begun.
+ Closed = false;
+
+ json::Object Tool{
+ {"driver",
+ json::Object{{"name", ShortToolName},
+ {"fullName", LongToolName},
+ {"language", "en-US"},
+ {"version", ToolVersion},
+ {"informationUri",
+ "https://clang.llvm.org/docs/UsersManual.html"}}}};
+ json::Object TheRun{{"tool", std::move(Tool)},
+ {"results", {}},
+ {"artifacts", {}},
+ {"columnKind", "unicodeCodePoints"}};
+ Runs.emplace_back(std::move(TheRun));
+}
+
+json::Object &SarifDocumentWriter::getCurrentRun() {
+ assert(!Closed &&
+ "SARIF Document is closed. "
+ "Can only getCurrentRun() if document is opened via createRun(), "
+ "create a run first");
+
+ // Since Closed = false here, expect there to be at least 1 Run, anything
+ // else is an invalid state.
+ assert(!Runs.empty() && "There are no runs associated with the document!");
+ return *Runs.back().getAsObject();
+}
+
+size_t SarifDocumentWriter::createRule(const SarifRule &Rule) {
+ size_t Ret = CurrentRules.size();
+ CurrentRules.emplace_back(Rule);
+ return Ret;
+}
+
+void SarifDocumentWriter::appendResult(const SarifResult &Result) {
+ size_t RuleIdx = Result.RuleIdx;
+ assert(RuleIdx < CurrentRules.size() &&
+ "Trying to reference a rule that doesn't exist");
+ const SarifRule &Rule = CurrentRules[RuleIdx];
+ assert(Rule.DefaultConfiguration.Enabled &&
+ "Cannot add a result referencing a disabled Rule");
+ json::Object Ret{{"message", createMessage(Result.DiagnosticMessage)},
+ {"ruleIndex", static_cast<int64_t>(RuleIdx)},
+ {"ruleId", Rule.Id}};
+ if (!Result.Locations.empty()) {
+ json::Array Locs;
+ for (auto &Range : Result.Locations) {
+ Locs.emplace_back(createLocation(createPhysicalLocation(Range)));
+ }
+ Ret["locations"] = std::move(Locs);
+ }
+ if (!Result.ThreadFlows.empty())
+ Ret["codeFlows"] = json::Array{createCodeFlow(Result.ThreadFlows)};
+
+ Ret["level"] = resultLevelToStr(
+ Result.LevelOverride.value_or(Rule.DefaultConfiguration.Level));
+
+ json::Object &Run = getCurrentRun();
+ json::Array *Results = Run.getArray("results");
+ Results->emplace_back(std::move(Ret));
+}
+
+json::Object SarifDocumentWriter::createDocument() {
+ // Flush all temporaries to their destinations if needed.
+ endRun();
+
+ json::Object Doc{
+ {"$schema", SchemaURI},
+ {"version", SchemaVersion},
+ };
+ if (!Runs.empty())
+ Doc["runs"] = json::Array(Runs);
+ return Doc;
+}
diff --git a/contrib/llvm-project/clang/lib/Basic/SourceLocation.cpp b/contrib/llvm-project/clang/lib/Basic/SourceLocation.cpp
index 6986fcd322f2..37baf643a0a9 100644
--- a/contrib/llvm-project/clang/lib/Basic/SourceLocation.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/SourceLocation.cpp
@@ -42,11 +42,11 @@ void PrettyStackTraceLoc::print(raw_ostream &OS) const {
// SourceLocation
//===----------------------------------------------------------------------===//
-static_assert(std::is_trivially_destructible<SourceLocation>::value,
+static_assert(std::is_trivially_destructible_v<SourceLocation>,
"SourceLocation must be trivially destructible because it is "
"used in unions");
-static_assert(std::is_trivially_destructible<SourceRange>::value,
+static_assert(std::is_trivially_destructible_v<SourceRange>,
"SourceRange must be trivially destructible because it is "
"used in unions");
@@ -90,7 +90,7 @@ SourceLocation::printToString(const SourceManager &SM) const {
std::string S;
llvm::raw_string_ostream OS(S);
print(OS, SM);
- return OS.str();
+ return S;
}
LLVM_DUMP_METHOD void SourceLocation::dump(const SourceManager &SM) const {
@@ -149,7 +149,7 @@ SourceRange::printToString(const SourceManager &SM) const {
std::string S;
llvm::raw_string_ostream OS(S);
print(OS, SM);
- return OS.str();
+ return S;
}
//===----------------------------------------------------------------------===//
@@ -166,6 +166,10 @@ FullSourceLoc FullSourceLoc::getExpansionLoc() const {
return FullSourceLoc(SrcMgr->getExpansionLoc(*this), *SrcMgr);
}
+std::pair<FileID, unsigned> FullSourceLoc::getDecomposedExpansionLoc() const {
+ return SrcMgr->getDecomposedExpansionLoc(*this);
+}
+
FullSourceLoc FullSourceLoc::getSpellingLoc() const {
assert(isValid());
return FullSourceLoc(SrcMgr->getSpellingLoc(*this), *SrcMgr);
@@ -223,6 +227,11 @@ const FileEntry *FullSourceLoc::getFileEntry() const {
return SrcMgr->getFileEntryForID(getFileID());
}
+OptionalFileEntryRef FullSourceLoc::getFileEntryRef() const {
+ assert(isValid());
+ return SrcMgr->getFileEntryRefForID(getFileID());
+}
+
unsigned FullSourceLoc::getExpansionLineNumber(bool *Invalid) const {
assert(isValid());
return SrcMgr->getExpansionLineNumber(*this, Invalid);
diff --git a/contrib/llvm-project/clang/lib/Basic/SourceManager.cpp b/contrib/llvm-project/clang/lib/Basic/SourceManager.cpp
index 8cba379aa0f8..37734d3b10e7 100644
--- a/contrib/llvm-project/clang/lib/Basic/SourceManager.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/SourceManager.cpp
@@ -17,8 +17,7 @@
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManagerInternals.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/None.h"
-#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
@@ -38,6 +37,7 @@
#include <cstddef>
#include <cstdint>
#include <memory>
+#include <optional>
#include <tuple>
#include <utility>
#include <vector>
@@ -59,12 +59,10 @@ unsigned ContentCache::getSizeBytesMapped() const {
/// Returns the kind of memory used to back the memory buffer for
/// this content cache. This is used for performance analysis.
llvm::MemoryBuffer::BufferKind ContentCache::getMemoryBufferKind() const {
- assert(Buffer);
-
- // Should be unreachable, but keep for sanity.
- if (!Buffer)
+ if (Buffer == nullptr) {
+ assert(0 && "Buffer should never be null");
return llvm::MemoryBuffer::MemoryBuffer_Malloc;
-
+ }
return Buffer->getBufferKind();
}
@@ -100,23 +98,23 @@ const char *ContentCache::getInvalidBOM(StringRef BufStr) {
return InvalidBOM;
}
-llvm::Optional<llvm::MemoryBufferRef>
+std::optional<llvm::MemoryBufferRef>
ContentCache::getBufferOrNone(DiagnosticsEngine &Diag, FileManager &FM,
SourceLocation Loc) const {
// Lazily create the Buffer for ContentCaches that wrap files. If we already
// computed it, just return what we have.
if (IsBufferInvalid)
- return None;
+ return std::nullopt;
if (Buffer)
return Buffer->getMemBufferRef();
if (!ContentsEntry)
- return None;
+ return std::nullopt;
// Start with the assumption that the buffer is invalid to simplify early
// return paths.
IsBufferInvalid = true;
- auto BufferOrError = FM.getBufferForFile(ContentsEntry, IsFileVolatile);
+ auto BufferOrError = FM.getBufferForFile(*ContentsEntry, IsFileVolatile);
// If we were unable to open the file, then we are in an inconsistent
// situation where the content cache referenced a file which no longer
@@ -132,7 +130,7 @@ ContentCache::getBufferOrNone(DiagnosticsEngine &Diag, FileManager &FM,
Diag.Report(Loc, diag::err_cannot_open_file)
<< ContentsEntry->getName() << BufferOrError.getError().message();
- return None;
+ return std::nullopt;
}
Buffer = std::move(*BufferOrError);
@@ -154,7 +152,7 @@ ContentCache::getBufferOrNone(DiagnosticsEngine &Diag, FileManager &FM,
Diag.Report(Loc, diag::err_file_too_large)
<< ContentsEntry->getName();
- return None;
+ return std::nullopt;
}
// Unless this is a named pipe (in which case we can handle a mismatch),
@@ -169,7 +167,7 @@ ContentCache::getBufferOrNone(DiagnosticsEngine &Diag, FileManager &FM,
Diag.Report(Loc, diag::err_file_modified)
<< ContentsEntry->getName();
- return None;
+ return std::nullopt;
}
// If the buffer is valid, check to see if it has a UTF Byte Order Mark
@@ -181,7 +179,7 @@ ContentCache::getBufferOrNone(DiagnosticsEngine &Diag, FileManager &FM,
if (InvalidBOM) {
Diag.Report(Loc, diag::err_unsupported_bom)
<< InvalidBOM << ContentsEntry->getName();
- return None;
+ return std::nullopt;
}
// Buffer has been validated.
@@ -207,28 +205,30 @@ void LineTableInfo::AddLineNote(FileID FID, unsigned Offset, unsigned LineNo,
SrcMgr::CharacteristicKind FileKind) {
std::vector<LineEntry> &Entries = LineEntries[FID];
- // An unspecified FilenameID means use the last filename if available, or the
- // main source file otherwise.
- if (FilenameID == -1 && !Entries.empty())
- FilenameID = Entries.back().FilenameID;
-
assert((Entries.empty() || Entries.back().FileOffset < Offset) &&
"Adding line entries out of order!");
unsigned IncludeOffset = 0;
- if (EntryExit == 0) { // No #include stack change.
- IncludeOffset = Entries.empty() ? 0 : Entries.back().IncludeOffset;
- } else if (EntryExit == 1) {
+ if (EntryExit == 1) {
+ // Push #include
IncludeOffset = Offset-1;
- } else if (EntryExit == 2) {
- assert(!Entries.empty() && Entries.back().IncludeOffset &&
- "PPDirectives should have caught case when popping empty include stack");
-
- // Get the include loc of the last entries' include loc as our include loc.
- IncludeOffset = 0;
- if (const LineEntry *PrevEntry =
- FindNearestLineEntry(FID, Entries.back().IncludeOffset))
+ } else {
+ const auto *PrevEntry = Entries.empty() ? nullptr : &Entries.back();
+ if (EntryExit == 2) {
+ // Pop #include
+ assert(PrevEntry && PrevEntry->IncludeOffset &&
+ "PPDirectives should have caught case when popping empty include "
+ "stack");
+ PrevEntry = FindNearestLineEntry(FID, PrevEntry->IncludeOffset);
+ }
+ if (PrevEntry) {
IncludeOffset = PrevEntry->IncludeOffset;
+ if (FilenameID == -1) {
+ // An unspecified FilenameID means use the previous (or containing)
+ // filename if available, or the main source file otherwise.
+ FilenameID = PrevEntry->FilenameID;
+ }
+ }
}
Entries.push_back(LineEntry::get(Offset, LineNo, FilenameID, FileKind,
@@ -324,8 +324,7 @@ SourceManager::~SourceManager() {
ContentCacheAlloc.Deallocate(MemBufferInfos[i]);
}
}
- for (llvm::DenseMap<const FileEntry*, SrcMgr::ContentCache*>::iterator
- I = FileInfos.begin(), E = FileInfos.end(); I != E; ++I) {
+ for (auto I = FileInfos.begin(), E = FileInfos.end(); I != E; ++I) {
if (I->second) {
I->second->~ContentCache();
ContentCacheAlloc.Deallocate(I->second);
@@ -338,6 +337,7 @@ void SourceManager::clearIDTables() {
LocalSLocEntryTable.clear();
LoadedSLocEntryTable.clear();
SLocEntryLoaded.clear();
+ SLocEntryOffsetLoaded.clear();
LastLineNoFileIDQuery = FileID();
LastLineNoContentCache = nullptr;
LastFileIDLookup = FileID();
@@ -399,8 +399,7 @@ ContentCache &SourceManager::getOrCreateContentCache(FileEntryRef FileEnt,
if (OverriddenFilesInfo) {
// If the file contents are overridden with contents from another file,
// pass that file to ContentCache.
- llvm::DenseMap<const FileEntry *, const FileEntry *>::iterator
- overI = OverriddenFilesInfo->OverriddenFiles.find(FileEnt);
+ auto overI = OverriddenFilesInfo->OverriddenFiles.find(FileEnt);
if (overI == OverriddenFilesInfo->OverriddenFiles.end())
new (Entry) ContentCache(FileEnt);
else
@@ -455,13 +454,17 @@ SourceManager::AllocateLoadedSLocEntries(unsigned NumSLocEntries,
SourceLocation::UIntTy TotalSize) {
assert(ExternalSLocEntries && "Don't have an external sloc source");
// Make sure we're not about to run out of source locations.
- if (CurrentLoadedOffset - TotalSize < NextLocalOffset)
+ if (CurrentLoadedOffset < TotalSize ||
+ CurrentLoadedOffset - TotalSize < NextLocalOffset) {
return std::make_pair(0, 0);
+ }
LoadedSLocEntryTable.resize(LoadedSLocEntryTable.size() + NumSLocEntries);
SLocEntryLoaded.resize(LoadedSLocEntryTable.size());
+ SLocEntryOffsetLoaded.resize(LoadedSLocEntryTable.size());
CurrentLoadedOffset -= TotalSize;
- int ID = LoadedSLocEntryTable.size();
- return std::make_pair(-ID - 1, CurrentLoadedOffset);
+ int BaseID = -int(LoadedSLocEntryTable.size()) - 1;
+ LoadedSLocEntryAllocBegin.push_back(FileID::get(BaseID));
+ return std::make_pair(BaseID, CurrentLoadedOffset);
}
/// As part of recovering from missing or changed content, produce a
@@ -527,17 +530,6 @@ FileID SourceManager::getNextFileID(FileID FID) const {
/// Create a new FileID that represents the specified file
/// being \#included from the specified IncludePosition.
-///
-/// This translates NULL into standard input.
-FileID SourceManager::createFileID(const FileEntry *SourceFile,
- SourceLocation IncludePos,
- SrcMgr::CharacteristicKind FileCharacter,
- int LoadedID,
- SourceLocation::UIntTy LoadedOffset) {
- return createFileID(SourceFile->getLastRef(), IncludePos, FileCharacter,
- LoadedID, LoadedOffset);
-}
-
FileID SourceManager::createFileID(FileEntryRef SourceFile,
SourceLocation IncludePos,
SrcMgr::CharacteristicKind FileCharacter,
@@ -585,7 +577,7 @@ FileID SourceManager::createFileID(const llvm::MemoryBufferRef &Buffer,
/// Get the FileID for \p SourceFile if it exists. Otherwise, create a
/// new FileID for the \p SourceFile.
FileID
-SourceManager::getOrCreateFileID(const FileEntry *SourceFile,
+SourceManager::getOrCreateFileID(FileEntryRef SourceFile,
SrcMgr::CharacteristicKind FileCharacter) {
FileID ID = translateFile(SourceFile);
return ID.isValid() ? ID : createFileID(SourceFile, SourceLocation(),
@@ -607,13 +599,14 @@ FileID SourceManager::createFileIDImpl(ContentCache &File, StringRef Filename,
assert(!SLocEntryLoaded[Index] && "FileID already loaded");
LoadedSLocEntryTable[Index] = SLocEntry::get(
LoadedOffset, FileInfo::get(IncludePos, File, FileCharacter, Filename));
- SLocEntryLoaded[Index] = true;
+ SLocEntryLoaded[Index] = SLocEntryOffsetLoaded[Index] = true;
return FileID::get(LoadedID);
}
unsigned FileSize = File.getSize();
if (!(NextLocalOffset + FileSize + 1 > NextLocalOffset &&
NextLocalOffset + FileSize + 1 <= CurrentLoadedOffset)) {
- Diag.Report(IncludePos, diag::err_include_too_large);
+ Diag.Report(IncludePos, diag::err_sloc_space_too_large);
+ noteSLocAddressSpaceUsage(Diag);
return FileID();
}
LocalSLocEntryTable.push_back(
@@ -629,23 +622,21 @@ FileID SourceManager::createFileIDImpl(ContentCache &File, StringRef Filename,
return LastFileIDLookup = FID;
}
-SourceLocation
-SourceManager::createMacroArgExpansionLoc(SourceLocation SpellingLoc,
- SourceLocation ExpansionLoc,
- unsigned TokLength) {
+SourceLocation SourceManager::createMacroArgExpansionLoc(
+ SourceLocation SpellingLoc, SourceLocation ExpansionLoc, unsigned Length) {
ExpansionInfo Info = ExpansionInfo::createForMacroArg(SpellingLoc,
ExpansionLoc);
- return createExpansionLocImpl(Info, TokLength);
+ return createExpansionLocImpl(Info, Length);
}
SourceLocation SourceManager::createExpansionLoc(
SourceLocation SpellingLoc, SourceLocation ExpansionLocStart,
- SourceLocation ExpansionLocEnd, unsigned TokLength,
+ SourceLocation ExpansionLocEnd, unsigned Length,
bool ExpansionIsTokenRange, int LoadedID,
SourceLocation::UIntTy LoadedOffset) {
ExpansionInfo Info = ExpansionInfo::create(
SpellingLoc, ExpansionLocStart, ExpansionLocEnd, ExpansionIsTokenRange);
- return createExpansionLocImpl(Info, TokLength, LoadedID, LoadedOffset);
+ return createExpansionLocImpl(Info, Length, LoadedID, LoadedOffset);
}
SourceLocation SourceManager::createTokenSplitLoc(SourceLocation Spelling,
@@ -660,7 +651,7 @@ SourceLocation SourceManager::createTokenSplitLoc(SourceLocation Spelling,
SourceLocation
SourceManager::createExpansionLocImpl(const ExpansionInfo &Info,
- unsigned TokLength, int LoadedID,
+ unsigned Length, int LoadedID,
SourceLocation::UIntTy LoadedOffset) {
if (LoadedID < 0) {
assert(LoadedID != -1 && "Loading sentinel FileID");
@@ -668,27 +659,34 @@ SourceManager::createExpansionLocImpl(const ExpansionInfo &Info,
assert(Index < LoadedSLocEntryTable.size() && "FileID out of range");
assert(!SLocEntryLoaded[Index] && "FileID already loaded");
LoadedSLocEntryTable[Index] = SLocEntry::get(LoadedOffset, Info);
- SLocEntryLoaded[Index] = true;
+ SLocEntryLoaded[Index] = SLocEntryOffsetLoaded[Index] = true;
return SourceLocation::getMacroLoc(LoadedOffset);
}
LocalSLocEntryTable.push_back(SLocEntry::get(NextLocalOffset, Info));
- assert(NextLocalOffset + TokLength + 1 > NextLocalOffset &&
- NextLocalOffset + TokLength + 1 <= CurrentLoadedOffset &&
- "Ran out of source locations!");
+ if (NextLocalOffset + Length + 1 <= NextLocalOffset ||
+ NextLocalOffset + Length + 1 > CurrentLoadedOffset) {
+ Diag.Report(SourceLocation(), diag::err_sloc_space_too_large);
+ // FIXME: call `noteSLocAddressSpaceUsage` to report details to users and
+ // use a source location from `Info` to point at an error.
+ // Currently, both cause Clang to run indefinitely, this needs to be fixed.
+ // FIXME: return an error instead of crashing. Returning invalid source
+ // locations causes compiler to run indefinitely.
+ llvm::report_fatal_error("ran out of source locations");
+ }
// See createFileID for that +1.
- NextLocalOffset += TokLength + 1;
- return SourceLocation::getMacroLoc(NextLocalOffset - (TokLength + 1));
+ NextLocalOffset += Length + 1;
+ return SourceLocation::getMacroLoc(NextLocalOffset - (Length + 1));
}
-llvm::Optional<llvm::MemoryBufferRef>
-SourceManager::getMemoryBufferForFileOrNone(const FileEntry *File) {
- SrcMgr::ContentCache &IR = getOrCreateContentCache(File->getLastRef());
+std::optional<llvm::MemoryBufferRef>
+SourceManager::getMemoryBufferForFileOrNone(FileEntryRef File) {
+ SrcMgr::ContentCache &IR = getOrCreateContentCache(File);
return IR.getBufferOrNone(Diag, getFileManager(), SourceLocation());
}
void SourceManager::overrideFileContents(
- const FileEntry *SourceFile, std::unique_ptr<llvm::MemoryBuffer> Buffer) {
- SrcMgr::ContentCache &IR = getOrCreateContentCache(SourceFile->getLastRef());
+ FileEntryRef SourceFile, std::unique_ptr<llvm::MemoryBuffer> Buffer) {
+ SrcMgr::ContentCache &IR = getOrCreateContentCache(SourceFile);
IR.setBuffer(std::move(Buffer));
IR.BufferOverridden = true;
@@ -697,39 +695,43 @@ void SourceManager::overrideFileContents(
}
void SourceManager::overrideFileContents(const FileEntry *SourceFile,
- const FileEntry *NewFile) {
- assert(SourceFile->getSize() == NewFile->getSize() &&
+ FileEntryRef NewFile) {
+ assert(SourceFile->getSize() == NewFile.getSize() &&
"Different sizes, use the FileManager to create a virtual file with "
"the correct size");
- assert(FileInfos.count(SourceFile) == 0 &&
+ assert(FileInfos.find_as(SourceFile) == FileInfos.end() &&
"This function should be called at the initialization stage, before "
"any parsing occurs.");
- getOverriddenFilesInfo().OverriddenFiles[SourceFile] = NewFile;
+ // FileEntryRef is not default-constructible.
+ auto Pair = getOverriddenFilesInfo().OverriddenFiles.insert(
+ std::make_pair(SourceFile, NewFile));
+ if (!Pair.second)
+ Pair.first->second = NewFile;
}
-Optional<FileEntryRef>
+OptionalFileEntryRef
SourceManager::bypassFileContentsOverride(FileEntryRef File) {
assert(isFileOverridden(&File.getFileEntry()));
- llvm::Optional<FileEntryRef> BypassFile = FileMgr.getBypassFile(File);
+ OptionalFileEntryRef BypassFile = FileMgr.getBypassFile(File);
// If the file can't be found in the FS, give up.
if (!BypassFile)
- return None;
+ return std::nullopt;
(void)getOrCreateContentCache(*BypassFile);
return BypassFile;
}
-void SourceManager::setFileIsTransient(const FileEntry *File) {
- getOrCreateContentCache(File->getLastRef()).IsTransient = true;
+void SourceManager::setFileIsTransient(FileEntryRef File) {
+ getOrCreateContentCache(File).IsTransient = true;
}
-Optional<StringRef>
+std::optional<StringRef>
SourceManager::getNonBuiltinFilenameForID(FileID FID) const {
if (const SrcMgr::SLocEntry *Entry = getSLocEntryForFile(FID))
if (Entry->getFile().getContentCache().OrigEntry)
return Entry->getFile().getName();
- return None;
+ return std::nullopt;
}
StringRef SourceManager::getBufferData(FileID FID, bool *Invalid) const {
@@ -739,19 +741,19 @@ StringRef SourceManager::getBufferData(FileID FID, bool *Invalid) const {
return B ? *B : "<<<<<INVALID SOURCE LOCATION>>>>>";
}
-llvm::Optional<StringRef>
+std::optional<StringRef>
SourceManager::getBufferDataIfLoaded(FileID FID) const {
if (const SrcMgr::SLocEntry *Entry = getSLocEntryForFile(FID))
return Entry->getFile().getContentCache().getBufferDataIfLoaded();
- return None;
+ return std::nullopt;
}
-llvm::Optional<StringRef> SourceManager::getBufferDataOrNone(FileID FID) const {
+std::optional<StringRef> SourceManager::getBufferDataOrNone(FileID FID) const {
if (const SrcMgr::SLocEntry *Entry = getSLocEntryForFile(FID))
if (auto B = Entry->getFile().getContentCache().getBufferOrNone(
Diag, getFileManager(), SourceLocation()))
return B->getBuffer();
- return None;
+ return std::nullopt;
}
//===----------------------------------------------------------------------===//
@@ -792,24 +794,28 @@ FileID SourceManager::getFileIDLocal(SourceLocation::UIntTy SLocOffset) const {
// See if this is near the file point - worst case we start scanning from the
// most newly created FileID.
- const SrcMgr::SLocEntry *I;
- if (LastFileIDLookup.ID < 0 ||
- LocalSLocEntryTable[LastFileIDLookup.ID].getOffset() < SLocOffset) {
- // Neither loc prunes our search.
- I = LocalSLocEntryTable.end();
- } else {
- // Perhaps it is near the file point.
- I = LocalSLocEntryTable.begin()+LastFileIDLookup.ID;
+ // LessIndex - This is the lower bound of the range that we're searching.
+ // We know that the offset corresponding to the FileID is less than
+ // SLocOffset.
+ unsigned LessIndex = 0;
+ // upper bound of the search range.
+ unsigned GreaterIndex = LocalSLocEntryTable.size();
+ if (LastFileIDLookup.ID >= 0) {
+ // Use the LastFileIDLookup to prune the search space.
+ if (LocalSLocEntryTable[LastFileIDLookup.ID].getOffset() < SLocOffset)
+ LessIndex = LastFileIDLookup.ID;
+ else
+ GreaterIndex = LastFileIDLookup.ID;
}
- // Find the FileID that contains this. "I" is an iterator that points to a
- // FileID whose offset is known to be larger than SLocOffset.
+ // Find the FileID that contains this.
unsigned NumProbes = 0;
while (true) {
- --I;
- if (I->getOffset() <= SLocOffset) {
- FileID Res = FileID::get(int(I - LocalSLocEntryTable.begin()));
+ --GreaterIndex;
+ assert(GreaterIndex < LocalSLocEntryTable.size());
+ if (LocalSLocEntryTable[GreaterIndex].getOffset() <= SLocOffset) {
+ FileID Res = FileID::get(int(GreaterIndex));
// Remember it. We have good locality across FileID lookups.
LastFileIDLookup = Res;
NumLinearScans += NumProbes+1;
@@ -819,13 +825,6 @@ FileID SourceManager::getFileIDLocal(SourceLocation::UIntTy SLocOffset) const {
break;
}
- // Convert "I" back into an index. We know that it is an entry whose index is
- // larger than the offset we are looking for.
- unsigned GreaterIndex = I - LocalSLocEntryTable.begin();
- // LessIndex - This is the lower bound of the range that we're searching.
- // We know that the offset corresponding to the FileID is is less than
- // SLocOffset.
- unsigned LessIndex = 0;
NumProbes = 0;
while (true) {
unsigned MiddleIndex = (GreaterIndex-LessIndex)/2+LessIndex;
@@ -862,74 +861,12 @@ FileID SourceManager::getFileIDLocal(SourceLocation::UIntTy SLocOffset) const {
/// This function knows that the SourceLocation is in a loaded buffer, not a
/// local one.
FileID SourceManager::getFileIDLoaded(SourceLocation::UIntTy SLocOffset) const {
- // Sanity checking, otherwise a bug may lead to hanging in release build.
if (SLocOffset < CurrentLoadedOffset) {
assert(0 && "Invalid SLocOffset or bad function choice");
return FileID();
}
- // Essentially the same as the local case, but the loaded array is sorted
- // in the other direction.
-
- // First do a linear scan from the last lookup position, if possible.
- unsigned I;
- int LastID = LastFileIDLookup.ID;
- if (LastID >= 0 || getLoadedSLocEntryByID(LastID).getOffset() < SLocOffset)
- I = 0;
- else
- I = (-LastID - 2) + 1;
-
- unsigned NumProbes;
- for (NumProbes = 0; NumProbes < 8; ++NumProbes, ++I) {
- // Make sure the entry is loaded!
- const SrcMgr::SLocEntry &E = getLoadedSLocEntry(I);
- if (E.getOffset() <= SLocOffset) {
- FileID Res = FileID::get(-int(I) - 2);
- LastFileIDLookup = Res;
- NumLinearScans += NumProbes + 1;
- return Res;
- }
- }
-
- // Linear scan failed. Do the binary search. Note the reverse sorting of the
- // table: GreaterIndex is the one where the offset is greater, which is
- // actually a lower index!
- unsigned GreaterIndex = I;
- unsigned LessIndex = LoadedSLocEntryTable.size();
- NumProbes = 0;
- while (true) {
- ++NumProbes;
- unsigned MiddleIndex = (LessIndex - GreaterIndex) / 2 + GreaterIndex;
- const SrcMgr::SLocEntry &E = getLoadedSLocEntry(MiddleIndex);
- if (E.getOffset() == 0)
- return FileID(); // invalid entry.
-
- ++NumProbes;
-
- if (E.getOffset() > SLocOffset) {
- // Sanity checking, otherwise a bug may lead to hanging in release build.
- if (GreaterIndex == MiddleIndex) {
- assert(0 && "binary search missed the entry");
- return FileID();
- }
- GreaterIndex = MiddleIndex;
- continue;
- }
-
- if (isOffsetInFileID(FileID::get(-int(MiddleIndex) - 2), SLocOffset)) {
- FileID Res = FileID::get(-int(MiddleIndex) - 2);
- LastFileIDLookup = Res;
- NumBinaryProbes += NumProbes;
- return Res;
- }
-
- // Sanity checking, otherwise a bug may lead to hanging in release build.
- if (LessIndex == MiddleIndex) {
- assert(0 && "binary search missed the entry");
- return FileID();
- }
- LessIndex = MiddleIndex;
- }
+ return FileID::get(ExternalSLocEntries->getSLocEntryID(SLocOffset));
}
SourceLocation SourceManager::
@@ -1016,7 +953,7 @@ SourceLocation SourceManager::getImmediateSpellingLoc(SourceLocation Loc) const{
/// Return the filename of the file containing a SourceLocation.
StringRef SourceManager::getFilename(SourceLocation SpellingLoc) const {
- if (const FileEntry *F = getFileEntryForID(getFileID(SpellingLoc)))
+ if (OptionalFileEntryRef F = getFileEntryRefForID(getFileID(SpellingLoc)))
return F->getName();
return StringRef();
}
@@ -1169,7 +1106,7 @@ const char *SourceManager::getCharacterData(SourceLocation SL,
return "<<<<INVALID BUFFER>>>>";
}
- llvm::Optional<llvm::MemoryBufferRef> Buffer =
+ std::optional<llvm::MemoryBufferRef> Buffer =
Entry.getFile().getContentCache().getBufferOrNone(Diag, getFileManager(),
SourceLocation());
if (Invalid)
@@ -1182,7 +1119,7 @@ const char *SourceManager::getCharacterData(SourceLocation SL,
/// this is significantly cheaper to compute than the line number.
unsigned SourceManager::getColumnNumber(FileID FID, unsigned FilePos,
bool *Invalid) const {
- llvm::Optional<llvm::MemoryBufferRef> MemBuf = getBufferOrNone(FID);
+ std::optional<llvm::MemoryBufferRef> MemBuf = getBufferOrNone(FID);
if (Invalid)
*Invalid = !MemBuf;
@@ -1278,22 +1215,21 @@ LineOffsetMapping LineOffsetMapping::get(llvm::MemoryBufferRef Buffer,
// Line #1 starts at char 0.
LineOffsets.push_back(0);
- const unsigned char *Buf = (const unsigned char *)Buffer.getBufferStart();
+ const unsigned char *Start = (const unsigned char *)Buffer.getBufferStart();
const unsigned char *End = (const unsigned char *)Buffer.getBufferEnd();
- const std::size_t BufLen = End - Buf;
+ const unsigned char *Buf = Start;
- unsigned I = 0;
uint64_t Word;
// scan sizeof(Word) bytes at a time for new lines.
// This is much faster than scanning each byte independently.
- if (BufLen > sizeof(Word)) {
+ if ((unsigned long)(End - Start) > sizeof(Word)) {
do {
- Word = llvm::support::endian::read64(Buf + I, llvm::support::little);
+ Word = llvm::support::endian::read64(Buf, llvm::endianness::little);
// no new line => jump over sizeof(Word) bytes.
auto Mask = likelyhasbetween(Word, '\n', '\r');
if (!Mask) {
- I += sizeof(Word);
+ Buf += sizeof(Word);
continue;
}
@@ -1301,33 +1237,35 @@ LineOffsetMapping LineOffsetMapping::get(llvm::MemoryBufferRef Buffer,
// in [\n, \r + 1 [
// Scan for the next newline - it's very likely there's one.
- unsigned N =
- llvm::countTrailingZeros(Mask) - 7; // -7 because 0x80 is the marker
+ unsigned N = llvm::countr_zero(Mask) - 7; // -7 because 0x80 is the marker
Word >>= N;
- I += N / 8 + 1;
+ Buf += N / 8 + 1;
unsigned char Byte = Word;
- if (Byte == '\n') {
- LineOffsets.push_back(I);
- } else if (Byte == '\r') {
+ switch (Byte) {
+ case '\r':
// If this is \r\n, skip both characters.
- if (Buf[I] == '\n')
- ++I;
- LineOffsets.push_back(I);
- }
- } while (I < BufLen - sizeof(Word) - 1);
+ if (*Buf == '\n') {
+ ++Buf;
+ }
+ [[fallthrough]];
+ case '\n':
+ LineOffsets.push_back(Buf - Start);
+ };
+ } while (Buf < End - sizeof(Word) - 1);
}
// Handle tail using a regular check.
- while (I < BufLen) {
- if (Buf[I] == '\n') {
- LineOffsets.push_back(I + 1);
- } else if (Buf[I] == '\r') {
+ while (Buf < End) {
+ if (*Buf == '\n') {
+ LineOffsets.push_back(Buf - Start + 1);
+ } else if (*Buf == '\r') {
// If this is \r\n, skip both characters.
- if (I + 1 < BufLen && Buf[I + 1] == '\n')
- ++I;
- LineOffsets.push_back(I + 1);
+ if (Buf + 1 < End && Buf[1] == '\n') {
+ ++Buf;
+ }
+ LineOffsets.push_back(Buf - Start + 1);
}
- ++I;
+ ++Buf;
}
return LineOffsetMapping(LineOffsets, Alloc);
@@ -1368,9 +1306,9 @@ unsigned SourceManager::getLineNumber(FileID FID, unsigned FilePos,
}
// If this is the first use of line information for this buffer, compute the
- /// SourceLineCache for it on demand.
+ // SourceLineCache for it on demand.
if (!Content->SourceLineCache) {
- llvm::Optional<llvm::MemoryBufferRef> Buffer =
+ std::optional<llvm::MemoryBufferRef> Buffer =
Content->getBufferOrNone(Diag, getFileManager(), SourceLocation());
if (Invalid)
*Invalid = !Buffer;
@@ -1720,7 +1658,7 @@ SourceLocation SourceManager::translateLineCol(FileID FID,
// If this is the first use of line information for this buffer, compute the
// SourceLineCache for it on demand.
- llvm::Optional<llvm::MemoryBufferRef> Buffer =
+ std::optional<llvm::MemoryBufferRef> Buffer =
Content->getBufferOrNone(Diag, getFileManager());
if (!Buffer)
return SourceLocation();
@@ -1797,11 +1735,11 @@ void SourceManager::computeMacroArgsCache(MacroArgsMap &MacroArgsCache,
if (Entry.getFile().NumCreatedFIDs)
ID += Entry.getFile().NumCreatedFIDs - 1 /*because of next ++ID*/;
continue;
- } else if (IncludeLoc.isValid()) {
- // If file was included but not from FID, there is no more files/macros
- // that may be "contained" in this file.
- return;
}
+ // If file was included but not from FID, there is no more files/macros
+ // that may be "contained" in this file.
+ if (IncludeLoc.isValid())
+ return;
continue;
}
@@ -1973,14 +1911,39 @@ SourceManager::getDecomposedIncludedLoc(FileID FID) const {
return DecompLoc;
}
+bool SourceManager::isInTheSameTranslationUnitImpl(
+ const std::pair<FileID, unsigned> &LOffs,
+ const std::pair<FileID, unsigned> &ROffs) const {
+ // If one is local while the other is loaded.
+ if (isLoadedFileID(LOffs.first) != isLoadedFileID(ROffs.first))
+ return false;
+
+ if (isLoadedFileID(LOffs.first) && isLoadedFileID(ROffs.first)) {
+ auto FindSLocEntryAlloc = [this](FileID FID) {
+ // Loaded FileIDs are negative, we store the lowest FileID from each
+ // allocation, later allocations have lower FileIDs.
+ return llvm::lower_bound(LoadedSLocEntryAllocBegin, FID,
+ std::greater<FileID>{});
+ };
+
+ // If both are loaded from different AST files.
+ if (FindSLocEntryAlloc(LOffs.first) != FindSLocEntryAlloc(ROffs.first))
+ return false;
+ }
+
+ return true;
+}
+
/// Given a decomposed source location, move it up the include/expansion stack
-/// to the parent source location. If this is possible, return the decomposed
-/// version of the parent in Loc and return false. If Loc is the top-level
-/// entry, return true and don't modify it.
-static bool MoveUpIncludeHierarchy(std::pair<FileID, unsigned> &Loc,
- const SourceManager &SM) {
+/// to the parent source location within the same translation unit. If this is
+/// possible, return the decomposed version of the parent in Loc and return
+/// false. If Loc is a top-level entry, return true and don't modify it.
+static bool
+MoveUpTranslationUnitIncludeHierarchy(std::pair<FileID, unsigned> &Loc,
+ const SourceManager &SM) {
std::pair<FileID, unsigned> UpperLoc = SM.getDecomposedIncludedLoc(Loc.first);
- if (UpperLoc.first.isInvalid())
+ if (UpperLoc.first.isInvalid() ||
+ !SM.isInTheSameTranslationUnitImpl(UpperLoc, Loc))
return true; // We reached the top.
Loc = UpperLoc;
@@ -1994,6 +1957,7 @@ InBeforeInTUCacheEntry &SourceManager::getInBeforeInTUCache(FileID LFID,
// This is a magic number for limiting the cache size. It was experimentally
// derived from a small Objective-C project (where the cache filled
// out to ~250 items). We can make it larger if necessary.
+ // FIXME: this is almost certainly full these days. Use an LRU cache?
enum { MagicCacheSize = 300 };
IsBeforeInTUCacheKey Key(LFID, RFID);
@@ -2002,7 +1966,7 @@ InBeforeInTUCacheEntry &SourceManager::getInBeforeInTUCache(FileID LFID,
// use. When they update the value, the cache will get automatically
// updated as well.
if (IBTUCache.size() < MagicCacheSize)
- return IBTUCache[Key];
+ return IBTUCache.try_emplace(Key, LFID, RFID).first->second;
// Otherwise, do a lookup that will not construct a new value.
InBeforeInTUCache::iterator I = IBTUCache.find(Key);
@@ -2010,6 +1974,7 @@ InBeforeInTUCacheEntry &SourceManager::getInBeforeInTUCache(FileID LFID,
return I->second;
// Fall back to the overflow value.
+ IBTUCacheOverflow.setQueryFIDs(LFID, RFID);
return IBTUCacheOverflow;
}
@@ -2034,45 +1999,18 @@ bool SourceManager::isBeforeInTranslationUnit(SourceLocation LHS,
std::pair<bool, bool> InSameTU = isInTheSameTranslationUnit(LOffs, ROffs);
if (InSameTU.first)
return InSameTU.second;
-
- // If we arrived here, the location is either in a built-ins buffer or
- // associated with global inline asm. PR5662 and PR22576 are examples.
-
- StringRef LB = getBufferOrFake(LOffs.first).getBufferIdentifier();
- StringRef RB = getBufferOrFake(ROffs.first).getBufferIdentifier();
- bool LIsBuiltins = LB == "<built-in>";
- bool RIsBuiltins = RB == "<built-in>";
- // Sort built-in before non-built-in.
- if (LIsBuiltins || RIsBuiltins) {
- if (LIsBuiltins != RIsBuiltins)
- return LIsBuiltins;
- // Both are in built-in buffers, but from different files. We just claim that
- // lower IDs come first.
- return LOffs.first < ROffs.first;
- }
- bool LIsAsm = LB == "<inline asm>";
- bool RIsAsm = RB == "<inline asm>";
- // Sort assembler after built-ins, but before the rest.
- if (LIsAsm || RIsAsm) {
- if (LIsAsm != RIsAsm)
- return RIsAsm;
- assert(LOffs.first == ROffs.first);
- return false;
- }
- bool LIsScratch = LB == "<scratch space>";
- bool RIsScratch = RB == "<scratch space>";
- // Sort scratch after inline asm, but before the rest.
- if (LIsScratch || RIsScratch) {
- if (LIsScratch != RIsScratch)
- return LIsScratch;
- return LOffs.second < ROffs.second;
- }
- llvm_unreachable("Unsortable locations found");
+ // TODO: This should be unreachable, but some clients are calling this
+ // function before making sure LHS and RHS are in the same TU.
+ return LOffs.first < ROffs.first;
}
std::pair<bool, bool> SourceManager::isInTheSameTranslationUnit(
std::pair<FileID, unsigned> &LOffs,
std::pair<FileID, unsigned> &ROffs) const {
+ // If the source locations are not in the same TU, return early.
+ if (!isInTheSameTranslationUnitImpl(LOffs, ROffs))
+ return std::make_pair(false, false);
+
// If the source locations are in the same file, just compare offsets.
if (LOffs.first == ROffs.first)
return std::make_pair(true, LOffs.second < ROffs.second);
@@ -2084,58 +2022,115 @@ std::pair<bool, bool> SourceManager::isInTheSameTranslationUnit(
// If we are comparing a source location with multiple locations in the same
// file, we get a big win by caching the result.
- if (IsBeforeInTUCache.isCacheValid(LOffs.first, ROffs.first))
+ if (IsBeforeInTUCache.isCacheValid())
return std::make_pair(
true, IsBeforeInTUCache.getCachedResult(LOffs.second, ROffs.second));
- // Okay, we missed in the cache, start updating the cache for this query.
- IsBeforeInTUCache.setQueryFIDs(LOffs.first, ROffs.first,
- /*isLFIDBeforeRFID=*/LOffs.first.ID < ROffs.first.ID);
-
+ // Okay, we missed in the cache, we'll compute the answer and populate it.
// We need to find the common ancestor. The only way of doing this is to
// build the complete include chain for one and then walking up the chain
// of the other looking for a match.
- // We use a map from FileID to Offset to store the chain. Easier than writing
- // a custom set hash info that only depends on the first part of a pair.
- using LocSet = llvm::SmallDenseMap<FileID, unsigned, 16>;
- LocSet LChain;
+
+ // A location within a FileID on the path up from LOffs to the main file.
+ struct Entry {
+ std::pair<FileID, unsigned> DecomposedLoc; // FileID redundant, but clearer.
+ FileID ChildFID; // Used for breaking ties. Invalid for the initial loc.
+ };
+ llvm::SmallDenseMap<FileID, Entry, 16> LChain;
+
+ FileID LChild;
do {
- LChain.insert(LOffs);
+ LChain.try_emplace(LOffs.first, Entry{LOffs, LChild});
// We catch the case where LOffs is in a file included by ROffs and
// quit early. The other way round unfortunately remains suboptimal.
- } while (LOffs.first != ROffs.first && !MoveUpIncludeHierarchy(LOffs, *this));
- LocSet::iterator I;
- while((I = LChain.find(ROffs.first)) == LChain.end()) {
- if (MoveUpIncludeHierarchy(ROffs, *this))
- break; // Met at topmost file.
+ if (LOffs.first == ROffs.first)
+ break;
+ LChild = LOffs.first;
+ } while (!MoveUpTranslationUnitIncludeHierarchy(LOffs, *this));
+
+ FileID RChild;
+ do {
+ auto LIt = LChain.find(ROffs.first);
+ if (LIt != LChain.end()) {
+ // Compare the locations within the common file and cache them.
+ LOffs = LIt->second.DecomposedLoc;
+ LChild = LIt->second.ChildFID;
+ // The relative order of LChild and RChild is a tiebreaker when
+ // - locs expand to the same location (occurs in macro arg expansion)
+ // - one loc is a parent of the other (we consider the parent as "first")
+ // For the parent entry to be first, its invalid child file ID must
+ // compare smaller to the valid child file ID of the other entry.
+ // However loaded FileIDs are <0, so we perform *unsigned* comparison!
+ // This changes the relative order of local vs loaded FileIDs, but it
+ // doesn't matter as these are never mixed in macro expansion.
+ unsigned LChildID = LChild.ID;
+ unsigned RChildID = RChild.ID;
+ assert(((LOffs.second != ROffs.second) ||
+ (LChildID == 0 || RChildID == 0) ||
+ isInSameSLocAddrSpace(getComposedLoc(LChild, 0),
+ getComposedLoc(RChild, 0), nullptr)) &&
+ "Mixed local/loaded FileIDs with same include location?");
+ IsBeforeInTUCache.setCommonLoc(LOffs.first, LOffs.second, ROffs.second,
+ LChildID < RChildID);
+ return std::make_pair(
+ true, IsBeforeInTUCache.getCachedResult(LOffs.second, ROffs.second));
+ }
+ RChild = ROffs.first;
+ } while (!MoveUpTranslationUnitIncludeHierarchy(ROffs, *this));
+
+ // If we found no match, the location is either in a built-ins buffer or
+ // associated with global inline asm. PR5662 and PR22576 are examples.
+
+ StringRef LB = getBufferOrFake(LOffs.first).getBufferIdentifier();
+ StringRef RB = getBufferOrFake(ROffs.first).getBufferIdentifier();
+
+ bool LIsBuiltins = LB == "<built-in>";
+ bool RIsBuiltins = RB == "<built-in>";
+ // Sort built-in before non-built-in.
+ if (LIsBuiltins || RIsBuiltins) {
+ if (LIsBuiltins != RIsBuiltins)
+ return std::make_pair(true, LIsBuiltins);
+ // Both are in built-in buffers, but from different files. We just claim
+ // that lower IDs come first.
+ return std::make_pair(true, LOffs.first < ROffs.first);
}
- if (I != LChain.end())
- LOffs = *I;
- // If we exited because we found a nearest common ancestor, compare the
- // locations within the common file and cache them.
- if (LOffs.first == ROffs.first) {
- IsBeforeInTUCache.setCommonLoc(LOffs.first, LOffs.second, ROffs.second);
- return std::make_pair(
- true, IsBeforeInTUCache.getCachedResult(LOffs.second, ROffs.second));
+ bool LIsAsm = LB == "<inline asm>";
+ bool RIsAsm = RB == "<inline asm>";
+ // Sort assembler after built-ins, but before the rest.
+ if (LIsAsm || RIsAsm) {
+ if (LIsAsm != RIsAsm)
+ return std::make_pair(true, RIsAsm);
+ assert(LOffs.first == ROffs.first);
+ return std::make_pair(true, false);
+ }
+
+ bool LIsScratch = LB == "<scratch space>";
+ bool RIsScratch = RB == "<scratch space>";
+ // Sort scratch after inline asm, but before the rest.
+ if (LIsScratch || RIsScratch) {
+ if (LIsScratch != RIsScratch)
+ return std::make_pair(true, LIsScratch);
+ return std::make_pair(true, LOffs.second < ROffs.second);
}
- // Clear the lookup cache, it depends on a common location.
- IsBeforeInTUCache.clear();
- return std::make_pair(false, false);
+
+ llvm_unreachable("Unsortable locations found");
}
void SourceManager::PrintStats() const {
llvm::errs() << "\n*** Source Manager Stats:\n";
llvm::errs() << FileInfos.size() << " files mapped, " << MemBufferInfos.size()
<< " mem buffers mapped.\n";
- llvm::errs() << LocalSLocEntryTable.size() << " local SLocEntry's allocated ("
+ llvm::errs() << LocalSLocEntryTable.size() << " local SLocEntries allocated ("
<< llvm::capacity_in_bytes(LocalSLocEntryTable)
- << " bytes of capacity), "
- << NextLocalOffset << "B of Sloc address space used.\n";
+ << " bytes of capacity), " << NextLocalOffset
+ << "B of SLoc address space used.\n";
llvm::errs() << LoadedSLocEntryTable.size()
- << " loaded SLocEntries allocated, "
+ << " loaded SLocEntries allocated ("
+ << llvm::capacity_in_bytes(LoadedSLocEntryTable)
+ << " bytes of capacity), "
<< MaxLoadedOffset - CurrentLoadedOffset
- << "B of Sloc address space used.\n";
+ << "B of SLoc address space used.\n";
unsigned NumLineNumsComputed = 0;
unsigned NumFileBytesMapped = 0;
@@ -2156,7 +2151,7 @@ LLVM_DUMP_METHOD void SourceManager::dump() const {
llvm::raw_ostream &out = llvm::errs();
auto DumpSLocEntry = [&](int ID, const SrcMgr::SLocEntry &Entry,
- llvm::Optional<SourceLocation::UIntTy> NextStart) {
+ std::optional<SourceLocation::UIntTy> NextStart) {
out << "SLocEntry <FileID " << ID << "> " << (Entry.isFile() ? "file" : "expansion")
<< " <SourceLocation " << Entry.getOffset() << ":";
if (NextStart)
@@ -2196,15 +2191,103 @@ LLVM_DUMP_METHOD void SourceManager::dump() const {
: LocalSLocEntryTable[ID + 1].getOffset());
}
// Dump loaded SLocEntries.
- llvm::Optional<SourceLocation::UIntTy> NextStart;
+ std::optional<SourceLocation::UIntTy> NextStart;
for (unsigned Index = 0; Index != LoadedSLocEntryTable.size(); ++Index) {
int ID = -(int)Index - 2;
if (SLocEntryLoaded[Index]) {
DumpSLocEntry(ID, LoadedSLocEntryTable[Index], NextStart);
NextStart = LoadedSLocEntryTable[Index].getOffset();
} else {
- NextStart = None;
+ NextStart = std::nullopt;
+ }
+ }
+}
+
+void SourceManager::noteSLocAddressSpaceUsage(
+ DiagnosticsEngine &Diag, std::optional<unsigned> MaxNotes) const {
+ struct Info {
+ // A location where this file was entered.
+ SourceLocation Loc;
+ // Number of times this FileEntry was entered.
+ unsigned Inclusions = 0;
+ // Size usage from the file itself.
+ uint64_t DirectSize = 0;
+ // Total size usage from the file and its macro expansions.
+ uint64_t TotalSize = 0;
+ };
+ using UsageMap = llvm::MapVector<const FileEntry*, Info>;
+
+ UsageMap Usage;
+ uint64_t CountedSize = 0;
+
+ auto AddUsageForFileID = [&](FileID ID) {
+ // The +1 here is because getFileIDSize doesn't include the extra byte for
+ // the one-past-the-end location.
+ unsigned Size = getFileIDSize(ID) + 1;
+
+ // Find the file that used this address space, either directly or by
+ // macro expansion.
+ SourceLocation FileStart = getFileLoc(getComposedLoc(ID, 0));
+ FileID FileLocID = getFileID(FileStart);
+ const FileEntry *Entry = getFileEntryForID(FileLocID);
+
+ Info &EntryInfo = Usage[Entry];
+ if (EntryInfo.Loc.isInvalid())
+ EntryInfo.Loc = FileStart;
+ if (ID == FileLocID) {
+ ++EntryInfo.Inclusions;
+ EntryInfo.DirectSize += Size;
}
+ EntryInfo.TotalSize += Size;
+ CountedSize += Size;
+ };
+
+ // Loaded SLocEntries have indexes counting downwards from -2.
+ for (size_t Index = 0; Index != LoadedSLocEntryTable.size(); ++Index) {
+ AddUsageForFileID(FileID::get(-2 - Index));
+ }
+ // Local SLocEntries have indexes counting upwards from 0.
+ for (size_t Index = 0; Index != LocalSLocEntryTable.size(); ++Index) {
+ AddUsageForFileID(FileID::get(Index));
+ }
+
+ // Sort the usage by size from largest to smallest. Break ties by raw source
+ // location.
+ auto SortedUsage = Usage.takeVector();
+ auto Cmp = [](const UsageMap::value_type &A, const UsageMap::value_type &B) {
+ return A.second.TotalSize > B.second.TotalSize ||
+ (A.second.TotalSize == B.second.TotalSize &&
+ A.second.Loc < B.second.Loc);
+ };
+ auto SortedEnd = SortedUsage.end();
+ if (MaxNotes && SortedUsage.size() > *MaxNotes) {
+ SortedEnd = SortedUsage.begin() + *MaxNotes;
+ std::nth_element(SortedUsage.begin(), SortedEnd, SortedUsage.end(), Cmp);
+ }
+ std::sort(SortedUsage.begin(), SortedEnd, Cmp);
+
+ // Produce note on sloc address space usage total.
+ uint64_t LocalUsage = NextLocalOffset;
+ uint64_t LoadedUsage = MaxLoadedOffset - CurrentLoadedOffset;
+ int UsagePercent = static_cast<int>(100.0 * double(LocalUsage + LoadedUsage) /
+ MaxLoadedOffset);
+ Diag.Report(SourceLocation(), diag::note_total_sloc_usage)
+ << LocalUsage << LoadedUsage << (LocalUsage + LoadedUsage) << UsagePercent;
+
+ // Produce notes on sloc address space usage for each file with a high usage.
+ uint64_t ReportedSize = 0;
+ for (auto &[Entry, FileInfo] :
+ llvm::make_range(SortedUsage.begin(), SortedEnd)) {
+ Diag.Report(FileInfo.Loc, diag::note_file_sloc_usage)
+ << FileInfo.Inclusions << FileInfo.DirectSize
+ << (FileInfo.TotalSize - FileInfo.DirectSize);
+ ReportedSize += FileInfo.TotalSize;
+ }
+
+ // Describe any remaining usage not reported in the per-file usage.
+ if (ReportedSize != CountedSize) {
+ Diag.Report(SourceLocation(), diag::note_file_misc_sloc_usage)
+ << (SortedUsage.end() - SortedEnd) << CountedSize - ReportedSize;
}
}
@@ -2231,11 +2314,11 @@ SourceManager::MemoryBufferSizes SourceManager::getMemoryBufferSizes() const {
}
size_t SourceManager::getDataStructureSizes() const {
- size_t size = llvm::capacity_in_bytes(MemBufferInfos)
- + llvm::capacity_in_bytes(LocalSLocEntryTable)
- + llvm::capacity_in_bytes(LoadedSLocEntryTable)
- + llvm::capacity_in_bytes(SLocEntryLoaded)
- + llvm::capacity_in_bytes(FileInfos);
+ size_t size = llvm::capacity_in_bytes(MemBufferInfos) +
+ llvm::capacity_in_bytes(LocalSLocEntryTable) +
+ llvm::capacity_in_bytes(LoadedSLocEntryTable) +
+ llvm::capacity_in_bytes(SLocEntryLoaded) +
+ llvm::capacity_in_bytes(FileInfos);
if (OverriddenFilesInfo)
size += llvm::capacity_in_bytes(OverriddenFilesInfo->OverriddenFiles);
@@ -2263,8 +2346,9 @@ SourceManagerForFile::SourceManagerForFile(StringRef FileName,
IntrusiveRefCntPtr<DiagnosticIDs>(new DiagnosticIDs),
new DiagnosticOptions);
SourceMgr = std::make_unique<SourceManager>(*Diagnostics, *FileMgr);
- FileID ID = SourceMgr->createFileID(*FileMgr->getFile(FileName),
- SourceLocation(), clang::SrcMgr::C_User);
+ FileEntryRef FE = llvm::cantFail(FileMgr->getFileRef(FileName));
+ FileID ID =
+ SourceMgr->createFileID(FE, SourceLocation(), clang::SrcMgr::C_User);
assert(ID.isValid());
SourceMgr->setMainFileID(ID);
}
diff --git a/contrib/llvm-project/clang/lib/Basic/SourceMgrAdapter.cpp b/contrib/llvm-project/clang/lib/Basic/SourceMgrAdapter.cpp
new file mode 100644
index 000000000000..e39e4de9d42d
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Basic/SourceMgrAdapter.cpp
@@ -0,0 +1,136 @@
+//=== SourceMgrAdapter.cpp - SourceMgr to SourceManager Adapter -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the adapter that maps diagnostics from llvm::SourceMgr
+// to Clang's SourceManager.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/SourceMgrAdapter.h"
+#include "clang/Basic/Diagnostic.h"
+
+using namespace clang;
+
+void SourceMgrAdapter::handleDiag(const llvm::SMDiagnostic &Diag,
+ void *Context) {
+ static_cast<SourceMgrAdapter *>(Context)->handleDiag(Diag);
+}
+
+SourceMgrAdapter::SourceMgrAdapter(SourceManager &SM,
+ DiagnosticsEngine &Diagnostics,
+ unsigned ErrorDiagID, unsigned WarningDiagID,
+ unsigned NoteDiagID,
+ OptionalFileEntryRef DefaultFile)
+ : SrcMgr(SM), Diagnostics(Diagnostics), ErrorDiagID(ErrorDiagID),
+ WarningDiagID(WarningDiagID), NoteDiagID(NoteDiagID),
+ DefaultFile(DefaultFile) {}
+
+SourceMgrAdapter::~SourceMgrAdapter() {}
+
+SourceLocation SourceMgrAdapter::mapLocation(const llvm::SourceMgr &LLVMSrcMgr,
+ llvm::SMLoc Loc) {
+ // Map invalid locations.
+ if (!Loc.isValid())
+ return SourceLocation();
+
+ // Find the buffer containing the location.
+ unsigned BufferID = LLVMSrcMgr.FindBufferContainingLoc(Loc);
+ if (!BufferID)
+ return SourceLocation();
+
+ // If we haven't seen this buffer before, copy it over.
+ auto Buffer = LLVMSrcMgr.getMemoryBuffer(BufferID);
+ auto KnownBuffer = FileIDMapping.find(std::make_pair(&LLVMSrcMgr, BufferID));
+ if (KnownBuffer == FileIDMapping.end()) {
+ FileID FileID;
+ if (DefaultFile) {
+ // Map to the default file.
+ FileID = SrcMgr.getOrCreateFileID(*DefaultFile, SrcMgr::C_User);
+
+ // Only do this once.
+ DefaultFile = std::nullopt;
+ } else {
+ // Make a copy of the memory buffer.
+ StringRef bufferName = Buffer->getBufferIdentifier();
+ auto bufferCopy = std::unique_ptr<llvm::MemoryBuffer>(
+ llvm::MemoryBuffer::getMemBufferCopy(Buffer->getBuffer(),
+ bufferName));
+
+ // Add this memory buffer to the Clang source manager.
+ FileID = SrcMgr.createFileID(std::move(bufferCopy));
+ }
+
+ // Save the mapping.
+ KnownBuffer = FileIDMapping
+ .insert(std::make_pair(
+ std::make_pair(&LLVMSrcMgr, BufferID), FileID))
+ .first;
+ }
+
+ // Translate the offset into the file.
+ unsigned Offset = Loc.getPointer() - Buffer->getBufferStart();
+ return SrcMgr.getLocForStartOfFile(KnownBuffer->second)
+ .getLocWithOffset(Offset);
+}
+
+SourceRange SourceMgrAdapter::mapRange(const llvm::SourceMgr &LLVMSrcMgr,
+ llvm::SMRange Range) {
+ if (!Range.isValid())
+ return SourceRange();
+
+ SourceLocation Start = mapLocation(LLVMSrcMgr, Range.Start);
+ SourceLocation End = mapLocation(LLVMSrcMgr, Range.End);
+ return SourceRange(Start, End);
+}
+
+void SourceMgrAdapter::handleDiag(const llvm::SMDiagnostic &Diag) {
+ // Map the location.
+ SourceLocation Loc;
+ if (auto *LLVMSrcMgr = Diag.getSourceMgr())
+ Loc = mapLocation(*LLVMSrcMgr, Diag.getLoc());
+
+ // Extract the message.
+ StringRef Message = Diag.getMessage();
+
+ // Map the diagnostic kind.
+ unsigned DiagID;
+ switch (Diag.getKind()) {
+ case llvm::SourceMgr::DK_Error:
+ DiagID = ErrorDiagID;
+ break;
+
+ case llvm::SourceMgr::DK_Warning:
+ DiagID = WarningDiagID;
+ break;
+
+ case llvm::SourceMgr::DK_Remark:
+ llvm_unreachable("remarks not implemented");
+
+ case llvm::SourceMgr::DK_Note:
+ DiagID = NoteDiagID;
+ break;
+ }
+
+ // Report the diagnostic.
+ DiagnosticBuilder Builder = Diagnostics.Report(Loc, DiagID) << Message;
+
+ if (auto *LLVMSrcMgr = Diag.getSourceMgr()) {
+ // Translate ranges.
+ SourceLocation StartOfLine = Loc.getLocWithOffset(-Diag.getColumnNo());
+ for (auto Range : Diag.getRanges()) {
+ Builder << SourceRange(StartOfLine.getLocWithOffset(Range.first),
+ StartOfLine.getLocWithOffset(Range.second));
+ }
+
+ // Translate Fix-Its.
+ for (const llvm::SMFixIt &FixIt : Diag.getFixIts()) {
+ CharSourceRange Range(mapRange(*LLVMSrcMgr, FixIt.getRange()), false);
+ Builder << FixItHint::CreateReplacement(Range, FixIt.getText());
+ }
+ }
+}
diff --git a/contrib/llvm-project/clang/lib/Basic/Stack.cpp b/contrib/llvm-project/clang/lib/Basic/Stack.cpp
index 5e4750931500..aa15d8e66950 100644
--- a/contrib/llvm-project/clang/lib/Basic/Stack.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Stack.cpp
@@ -12,7 +12,6 @@
//===----------------------------------------------------------------------===//
#include "clang/Basic/Stack.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/Support/CrashRecoveryContext.h"
#ifdef _MSC_VER
diff --git a/contrib/llvm-project/clang/lib/Basic/TargetID.cpp b/contrib/llvm-project/clang/lib/Basic/TargetID.cpp
index 59d416f0e015..3c06d9bad1dc 100644
--- a/contrib/llvm-project/clang/lib/Basic/TargetID.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/TargetID.cpp
@@ -8,14 +8,15 @@
#include "clang/Basic/TargetID.h"
#include "llvm/ADT/SmallSet.h"
-#include "llvm/ADT/Triple.h"
-#include "llvm/Support/TargetParser.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/TargetParser.h"
+#include "llvm/TargetParser/Triple.h"
#include <map>
+#include <optional>
namespace clang {
-static const llvm::SmallVector<llvm::StringRef, 4>
+static llvm::SmallVector<llvm::StringRef, 4>
getAllPossibleAMDGPUTargetIDFeatures(const llvm::Triple &T,
llvm::StringRef Proc) {
// Entries in returned vector should be in alphabetical order.
@@ -33,7 +34,7 @@ getAllPossibleAMDGPUTargetIDFeatures(const llvm::Triple &T,
return Ret;
}
-const llvm::SmallVector<llvm::StringRef, 4>
+llvm::SmallVector<llvm::StringRef, 4>
getAllPossibleTargetIDFeatures(const llvm::Triple &T,
llvm::StringRef Processor) {
llvm::SmallVector<llvm::StringRef, 4> Ret;
@@ -62,7 +63,7 @@ llvm::StringRef getProcessorFromTargetID(const llvm::Triple &T,
// A target ID is a processor name followed by a list of target features
// delimited by colon. Each target feature is a string post-fixed by a plus
// or minus sign, e.g. gfx908:sramecc+:xnack-.
-static llvm::Optional<llvm::StringRef>
+static std::optional<llvm::StringRef>
parseTargetIDWithFormatCheckingOnly(llvm::StringRef TargetID,
llvm::StringMap<bool> *FeatureMap) {
llvm::StringRef Processor;
@@ -73,7 +74,7 @@ parseTargetIDWithFormatCheckingOnly(llvm::StringRef TargetID,
auto Split = TargetID.split(':');
Processor = Split.first;
if (Processor.empty())
- return llvm::None;
+ return std::nullopt;
auto Features = Split.second;
if (Features.empty())
@@ -88,31 +89,30 @@ parseTargetIDWithFormatCheckingOnly(llvm::StringRef TargetID,
auto Sign = Splits.first.back();
auto Feature = Splits.first.drop_back();
if (Sign != '+' && Sign != '-')
- return llvm::None;
+ return std::nullopt;
bool IsOn = Sign == '+';
auto Loc = FeatureMap->find(Feature);
// Each feature can only show up at most once in target ID.
if (Loc != FeatureMap->end())
- return llvm::None;
+ return std::nullopt;
(*FeatureMap)[Feature] = IsOn;
Features = Splits.second;
}
return Processor;
}
-llvm::Optional<llvm::StringRef>
+std::optional<llvm::StringRef>
parseTargetID(const llvm::Triple &T, llvm::StringRef TargetID,
llvm::StringMap<bool> *FeatureMap) {
auto OptionalProcessor =
parseTargetIDWithFormatCheckingOnly(TargetID, FeatureMap);
if (!OptionalProcessor)
- return llvm::None;
+ return std::nullopt;
- llvm::StringRef Processor =
- getCanonicalProcessorName(T, OptionalProcessor.getValue());
+ llvm::StringRef Processor = getCanonicalProcessorName(T, *OptionalProcessor);
if (Processor.empty())
- return llvm::None;
+ return std::nullopt;
llvm::SmallSet<llvm::StringRef, 4> AllFeatures;
for (auto &&F : getAllPossibleTargetIDFeatures(T, Processor))
@@ -120,7 +120,7 @@ parseTargetID(const llvm::Triple &T, llvm::StringRef TargetID,
for (auto &&F : *FeatureMap)
if (!AllFeatures.count(F.first()))
- return llvm::None;
+ return std::nullopt;
return Processor;
}
@@ -133,7 +133,7 @@ std::string getCanonicalTargetID(llvm::StringRef Processor,
std::map<const llvm::StringRef, bool> OrderedMap;
for (const auto &F : Features)
OrderedMap[F.first()] = F.second;
- for (auto F : OrderedMap)
+ for (const auto &F : OrderedMap)
TargetID = TargetID + ':' + F.first.str() + (F.second ? "+" : "-");
return TargetID;
}
@@ -141,7 +141,7 @@ std::string getCanonicalTargetID(llvm::StringRef Processor,
// For a specific processor, a feature either shows up in all target IDs, or
// does not show up in any target IDs. Otherwise the target ID combination
// is invalid.
-llvm::Optional<std::pair<llvm::StringRef, llvm::StringRef>>
+std::optional<std::pair<llvm::StringRef, llvm::StringRef>>
getConflictTargetIDCombination(const std::set<llvm::StringRef> &TargetIDs) {
struct Info {
llvm::StringRef TargetID;
@@ -150,8 +150,7 @@ getConflictTargetIDCombination(const std::set<llvm::StringRef> &TargetIDs) {
llvm::StringMap<Info> FeatureMap;
for (auto &&ID : TargetIDs) {
llvm::StringMap<bool> Features;
- llvm::StringRef Proc =
- parseTargetIDWithFormatCheckingOnly(ID, &Features).getValue();
+ llvm::StringRef Proc = *parseTargetIDWithFormatCheckingOnly(ID, &Features);
auto Loc = FeatureMap.find(Proc);
if (Loc == FeatureMap.end())
FeatureMap[Proc] = Info{ID, Features};
@@ -163,7 +162,28 @@ getConflictTargetIDCombination(const std::set<llvm::StringRef> &TargetIDs) {
return std::make_pair(Loc->second.TargetID, ID);
}
}
- return llvm::None;
+ return std::nullopt;
+}
+
+bool isCompatibleTargetID(llvm::StringRef Provided, llvm::StringRef Requested) {
+ llvm::StringMap<bool> ProvidedFeatures, RequestedFeatures;
+ llvm::StringRef ProvidedProc =
+ *parseTargetIDWithFormatCheckingOnly(Provided, &ProvidedFeatures);
+ llvm::StringRef RequestedProc =
+ *parseTargetIDWithFormatCheckingOnly(Requested, &RequestedFeatures);
+ if (ProvidedProc != RequestedProc)
+ return false;
+ for (const auto &F : ProvidedFeatures) {
+ auto Loc = RequestedFeatures.find(F.first());
+ // The default (unspecified) value of a feature is 'All', which can match
+ // either 'On' or 'Off'.
+ if (Loc == RequestedFeatures.end())
+ return false;
+ // If a feature is specified, it must have exact match.
+ if (Loc->second != F.second)
+ return false;
+ }
+ return true;
}
} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Basic/TargetInfo.cpp b/contrib/llvm-project/clang/lib/Basic/TargetInfo.cpp
index 5f8e04c2bd6c..96b3ad9ba2f2 100644
--- a/contrib/llvm-project/clang/lib/Basic/TargetInfo.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/TargetInfo.cpp
@@ -6,7 +6,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This file implements the TargetInfo and TargetInfoImpl interfaces.
+// This file implements the TargetInfo interface.
//
//===----------------------------------------------------------------------===//
@@ -14,18 +14,44 @@
#include "clang/Basic/AddressSpaces.h"
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/DiagnosticFrontend.h"
#include "clang/Basic/LangOptions.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/TargetParser.h"
+#include "llvm/TargetParser/TargetParser.h"
#include <cstdlib>
using namespace clang;
static const LangASMap DefaultAddrSpaceMap = {0};
+// The fake address space map must have a distinct entry for each
+// language-specific address space.
+static const LangASMap FakeAddrSpaceMap = {
+ 0, // Default
+ 1, // opencl_global
+ 3, // opencl_local
+ 2, // opencl_constant
+ 0, // opencl_private
+ 4, // opencl_generic
+ 5, // opencl_global_device
+ 6, // opencl_global_host
+ 7, // cuda_device
+ 8, // cuda_constant
+ 9, // cuda_shared
+ 1, // sycl_global
+ 5, // sycl_global_device
+ 6, // sycl_global_host
+ 3, // sycl_local
+ 0, // sycl_private
+ 10, // ptr32_sptr
+ 11, // ptr32_uptr
+ 12, // ptr64
+ 13, // hlsl_groupshared
+ 20, // wasm_funcref
+};
// TargetInfo Constructor.
-TargetInfo::TargetInfo(const llvm::Triple &T) : TargetOpts(), Triple(T) {
+TargetInfo::TargetInfo(const llvm::Triple &T) : Triple(T) {
// Set defaults. Defaults are set for a 32-bit RISC platform, like PPC or
// SPARC. These should be overridden by concrete targets as needed.
BigEndian = !T.isLittleEndian();
@@ -33,15 +59,21 @@ TargetInfo::TargetInfo(const llvm::Triple &T) : TargetOpts(), Triple(T) {
VLASupported = true;
NoAsmVariants = false;
HasLegalHalfType = false;
+ HalfArgsAndReturns = false;
HasFloat128 = false;
+ HasIbm128 = false;
HasFloat16 = false;
HasBFloat16 = false;
+ HasFullBFloat16 = false;
+ HasLongDouble = true;
+ HasFPReturn = true;
HasStrictFP = false;
PointerWidth = PointerAlign = 32;
BoolWidth = BoolAlign = 8;
IntWidth = IntAlign = 32;
LongWidth = LongAlign = 32;
LongLongWidth = LongLongAlign = 64;
+ Int128Align = 128;
// Fixed point default bit widths
ShortAccumWidth = ShortAccumAlign = 16;
@@ -66,11 +98,12 @@ TargetInfo::TargetInfo(const llvm::Triple &T) : TargetOpts(), Triple(T) {
// From the glibc documentation, on GNU systems, malloc guarantees 16-byte
// alignment on 64-bit systems and 8-byte alignment on 32-bit systems. See
// https://www.gnu.org/software/libc/manual/html_node/Malloc-Examples.html.
- // This alignment guarantee also applies to Windows and Android. On Darwin,
- // the alignment is 16 bytes on both 64-bit and 32-bit systems.
- if (T.isGNUEnvironment() || T.isWindowsMSVCEnvironment() || T.isAndroid())
+ // This alignment guarantee also applies to Windows and Android. On Darwin
+ // and OpenBSD, the alignment is 16 bytes on both 64-bit and 32-bit systems.
+ if (T.isGNUEnvironment() || T.isWindowsMSVCEnvironment() || T.isAndroid() ||
+ T.isOHOSFamily())
NewAlign = Triple.isArch64Bit() ? 128 : Triple.isArch32Bit() ? 64 : 0;
- else if (T.isOSDarwin())
+ else if (T.isOSDarwin() || T.isOSOpenBSD())
NewAlign = 128;
else
NewAlign = 0; // Infer from basic type alignment.
@@ -83,12 +116,12 @@ TargetInfo::TargetInfo(const llvm::Triple &T) : TargetOpts(), Triple(T) {
LongDoubleWidth = 64;
LongDoubleAlign = 64;
Float128Align = 128;
+ Ibm128Align = 128;
LargeArrayMinWidth = 0;
LargeArrayAlign = 0;
MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 0;
MaxVectorAlign = 0;
MaxTLSAlign = 0;
- SimdDefaultAlign = 0;
SizeType = UnsignedLong;
PtrDiffType = SignedLong;
IntMaxType = SignedLongLong;
@@ -113,6 +146,7 @@ TargetInfo::TargetInfo(const llvm::Triple &T) : TargetOpts(), Triple(T) {
DoubleFormat = &llvm::APFloat::IEEEdouble();
LongDoubleFormat = &llvm::APFloat::IEEEdouble();
Float128Format = &llvm::APFloat::IEEEquad();
+ Ibm128Format = &llvm::APFloat::PPCDoubleDouble();
MCountName = "mcount";
UserLabelPrefix = "_";
RegParmMax = 0;
@@ -126,7 +160,7 @@ TargetInfo::TargetInfo(const llvm::Triple &T) : TargetOpts(), Triple(T) {
ARMCDECoprocMask = 0;
// Default to no types using fpret.
- RealTypeUsesObjCFPRet = 0;
+ RealTypeUsesObjCFPRetMask = 0;
// Default to not using fp2ret for __Complex long double
ComplexLongDoubleUsesFP2Ret = false;
@@ -145,6 +179,8 @@ TargetInfo::TargetInfo(const llvm::Triple &T) : TargetOpts(), Triple(T) {
PlatformMinVersion = VersionTuple();
MaxOpenCLWorkGroupSize = 1024;
+
+ MaxBitIntWidth.reset();
}
// Out of line virtual dtor for TargetInfo.
@@ -198,11 +234,11 @@ const char *TargetInfo::getTypeConstantSuffix(IntType T) const {
case UnsignedChar:
if (getCharWidth() < getIntWidth())
return "";
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case UnsignedShort:
if (getShortWidth() < getIntWidth())
return "";
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case UnsignedInt: return "U";
case UnsignedLong: return "UL";
case UnsignedLongLong: return "ULL";
@@ -276,32 +312,38 @@ TargetInfo::IntType TargetInfo::getLeastIntTypeByWidth(unsigned BitWidth,
return NoInt;
}
-TargetInfo::RealType TargetInfo::getRealTypeByWidth(unsigned BitWidth,
- bool ExplicitIEEE) const {
+FloatModeKind TargetInfo::getRealTypeByWidth(unsigned BitWidth,
+ FloatModeKind ExplicitType) const {
+ if (getHalfWidth() == BitWidth)
+ return FloatModeKind::Half;
if (getFloatWidth() == BitWidth)
- return Float;
+ return FloatModeKind::Float;
if (getDoubleWidth() == BitWidth)
- return Double;
+ return FloatModeKind::Double;
switch (BitWidth) {
case 96:
if (&getLongDoubleFormat() == &llvm::APFloat::x87DoubleExtended())
- return LongDouble;
+ return FloatModeKind::LongDouble;
break;
case 128:
// The caller explicitly asked for an IEEE compliant type but we still
// have to check if the target supports it.
- if (ExplicitIEEE)
- return hasFloat128Type() ? Float128 : NoFloat;
+ if (ExplicitType == FloatModeKind::Float128)
+ return hasFloat128Type() ? FloatModeKind::Float128
+ : FloatModeKind::NoFloat;
+ if (ExplicitType == FloatModeKind::Ibm128)
+ return hasIbm128Type() ? FloatModeKind::Ibm128
+ : FloatModeKind::NoFloat;
if (&getLongDoubleFormat() == &llvm::APFloat::PPCDoubleDouble() ||
&getLongDoubleFormat() == &llvm::APFloat::IEEEquad())
- return LongDouble;
+ return FloatModeKind::LongDouble;
if (hasFloat128Type())
- return Float128;
+ return FloatModeKind::Float128;
break;
}
- return NoFloat;
+ return FloatModeKind::NoFloat;
}
/// getTypeAlign - Return the alignment (in bits) of the specified integer type
@@ -406,12 +448,14 @@ void TargetInfo::adjust(DiagnosticsEngine &Diags, LangOptions &Opts) {
// for OpenCL C 2.0 but with no access to target capabilities. Target
// should be immutable once created and thus these language options need
// to be defined only once.
- if (Opts.OpenCLVersion == 300) {
+ if (Opts.getOpenCLCompatibleVersion() == 300) {
const auto &OpenCLFeaturesMap = getSupportedOpenCLOpts();
Opts.OpenCLGenericAddressSpace = hasFeatureEnabled(
OpenCLFeaturesMap, "__opencl_c_generic_address_space");
Opts.OpenCLPipes =
hasFeatureEnabled(OpenCLFeaturesMap, "__opencl_c_pipes");
+ Opts.Blocks =
+ hasFeatureEnabled(OpenCLFeaturesMap, "__opencl_c_device_enqueue");
}
}
@@ -437,6 +481,20 @@ void TargetInfo::adjust(DiagnosticsEngine &Diags, LangOptions &Opts) {
} else if (Opts.LongDoubleSize == 128) {
LongDoubleWidth = LongDoubleAlign = 128;
LongDoubleFormat = &llvm::APFloat::IEEEquad();
+ } else if (Opts.LongDoubleSize == 80) {
+ LongDoubleFormat = &llvm::APFloat::x87DoubleExtended();
+ if (getTriple().isWindowsMSVCEnvironment()) {
+ LongDoubleWidth = 128;
+ LongDoubleAlign = 128;
+ } else { // Linux
+ if (getTriple().getArch() == llvm::Triple::x86) {
+ LongDoubleWidth = 96;
+ LongDoubleAlign = 32;
+ } else {
+ LongDoubleWidth = 128;
+ LongDoubleAlign = 128;
+ }
+ }
}
}
@@ -452,6 +510,12 @@ void TargetInfo::adjust(DiagnosticsEngine &Diags, LangOptions &Opts) {
Diags.Report(diag::err_opt_not_valid_on_target) << "-fprotect-parens";
Opts.ProtectParens = false;
}
+
+ if (Opts.MaxBitIntWidth)
+ MaxBitIntWidth = static_cast<unsigned>(Opts.MaxBitIntWidth);
+
+ if (Opts.FakeAddressSpaceMap)
+ AddrSpaceMap = &FakeAddrSpaceMap;
}
bool TargetInfo::initFeatureMap(
@@ -459,21 +523,73 @@ bool TargetInfo::initFeatureMap(
const std::vector<std::string> &FeatureVec) const {
for (const auto &F : FeatureVec) {
StringRef Name = F;
+ if (Name.empty())
+ continue;
// Apply the feature via the target.
- bool Enabled = Name[0] == '+';
- setFeatureEnabled(Features, Name.substr(1), Enabled);
+ if (Name[0] != '+' && Name[0] != '-')
+ Diags.Report(diag::warn_fe_backend_invalid_feature_flag) << Name;
+ else
+ setFeatureEnabled(Features, Name.substr(1), Name[0] == '+');
}
return true;
}
+ParsedTargetAttr TargetInfo::parseTargetAttr(StringRef Features) const {
+ ParsedTargetAttr Ret;
+ if (Features == "default")
+ return Ret;
+ SmallVector<StringRef, 1> AttrFeatures;
+ Features.split(AttrFeatures, ",");
+
+ // Grab the various features and prepend a "+" to turn on the feature to
+ // the backend and add them to our existing set of features.
+ for (auto &Feature : AttrFeatures) {
+ // Go ahead and trim whitespace rather than either erroring or
+ // accepting it weirdly.
+ Feature = Feature.trim();
+
+ // TODO: Support the fpmath option. It will require checking
+ // overall feature validity for the function with the rest of the
+ // attributes on the function.
+ if (Feature.starts_with("fpmath="))
+ continue;
+
+ if (Feature.starts_with("branch-protection=")) {
+ Ret.BranchProtection = Feature.split('=').second.trim();
+ continue;
+ }
+
+ // While we're here iterating check for a different target cpu.
+ if (Feature.starts_with("arch=")) {
+ if (!Ret.CPU.empty())
+ Ret.Duplicate = "arch=";
+ else
+ Ret.CPU = Feature.split("=").second.trim();
+ } else if (Feature.starts_with("tune=")) {
+ if (!Ret.Tune.empty())
+ Ret.Duplicate = "tune=";
+ else
+ Ret.Tune = Feature.split("=").second.trim();
+ } else if (Feature.starts_with("no-"))
+ Ret.Features.push_back("-" + Feature.split("-").second.str());
+ else
+ Ret.Features.push_back("+" + Feature.str());
+ }
+ return Ret;
+}
+
TargetInfo::CallingConvKind
TargetInfo::getCallingConvKind(bool ClangABICompat4) const {
if (getCXXABI() != TargetCXXABI::Microsoft &&
- (ClangABICompat4 || getTriple().getOS() == llvm::Triple::PS4))
+ (ClangABICompat4 || getTriple().isPS4()))
return CCK_ClangABI4OrPS4;
return CCK_Default;
}
+bool TargetInfo::areDefaultedSMFStillPOD(const LangOptions &LangOpts) const {
+ return LangOpts.getClangABICompat() > LangOptions::ClangABI::Ver15;
+}
+
LangAS TargetInfo::getOpenCLTypeAddrSpace(OpenCLTypeKind TK) const {
switch (TK) {
case OCLTK_Image:
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets.cpp b/contrib/llvm-project/clang/lib/Basic/Targets.cpp
index ba91d0439968..e3283510c6aa 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets.cpp
@@ -19,9 +19,12 @@
#include "Targets/ARM.h"
#include "Targets/AVR.h"
#include "Targets/BPF.h"
+#include "Targets/CSKY.h"
+#include "Targets/DirectX.h"
#include "Targets/Hexagon.h"
#include "Targets/Lanai.h"
#include "Targets/Le64.h"
+#include "Targets/LoongArch.h"
#include "Targets/M68k.h"
#include "Targets/MSP430.h"
#include "Targets/Mips.h"
@@ -39,8 +42,9 @@
#include "Targets/X86.h"
#include "Targets/XCore.h"
#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/DiagnosticFrontend.h"
#include "llvm/ADT/StringExtras.h"
-#include "llvm/ADT/Triple.h"
+#include "llvm/TargetParser/Triple.h"
using namespace clang;
@@ -78,9 +82,10 @@ void defineCPUMacros(MacroBuilder &Builder, StringRef CPUName, bool Tuning) {
void addCygMingDefines(const LangOptions &Opts, MacroBuilder &Builder) {
// Mingw and cygwin define __declspec(a) to __attribute__((a)). Clang
- // supports __declspec natively under -fms-extensions, but we define a no-op
- // __declspec macro anyway for pre-processor compatibility.
- if (Opts.MicrosoftExt)
+ // supports __declspec natively under -fdeclspec (also enabled with
+ // -fms-extensions), but we define a no-op __declspec macro anyway for
+ // pre-processor compatibility.
+ if (Opts.DeclSpecKeyword)
Builder.defineMacro("__declspec", "__declspec");
else
Builder.defineMacro("__declspec(a)", "__attribute__((a))");
@@ -104,8 +109,8 @@ void addCygMingDefines(const LangOptions &Opts, MacroBuilder &Builder) {
// Driver code
//===----------------------------------------------------------------------===//
-TargetInfo *AllocateTarget(const llvm::Triple &Triple,
- const TargetOptions &Opts) {
+std::unique_ptr<TargetInfo> AllocateTarget(const llvm::Triple &Triple,
+ const TargetOptions &Opts) {
llvm::Triple::OSType os = Triple.getOS();
switch (Triple.getArch()) {
@@ -113,498 +118,564 @@ TargetInfo *AllocateTarget(const llvm::Triple &Triple,
return nullptr;
case llvm::Triple::arc:
- return new ARCTargetInfo(Triple, Opts);
+ return std::make_unique<ARCTargetInfo>(Triple, Opts);
case llvm::Triple::xcore:
- return new XCoreTargetInfo(Triple, Opts);
+ return std::make_unique<XCoreTargetInfo>(Triple, Opts);
case llvm::Triple::hexagon:
if (os == llvm::Triple::Linux &&
Triple.getEnvironment() == llvm::Triple::Musl)
- return new LinuxTargetInfo<HexagonTargetInfo>(Triple, Opts);
- return new HexagonTargetInfo(Triple, Opts);
+ return std::make_unique<LinuxTargetInfo<HexagonTargetInfo>>(Triple, Opts);
+ return std::make_unique<HexagonTargetInfo>(Triple, Opts);
case llvm::Triple::lanai:
- return new LanaiTargetInfo(Triple, Opts);
+ return std::make_unique<LanaiTargetInfo>(Triple, Opts);
case llvm::Triple::aarch64_32:
if (Triple.isOSDarwin())
- return new DarwinAArch64TargetInfo(Triple, Opts);
+ return std::make_unique<DarwinAArch64TargetInfo>(Triple, Opts);
return nullptr;
case llvm::Triple::aarch64:
if (Triple.isOSDarwin())
- return new DarwinAArch64TargetInfo(Triple, Opts);
+ return std::make_unique<DarwinAArch64TargetInfo>(Triple, Opts);
switch (os) {
- case llvm::Triple::CloudABI:
- return new CloudABITargetInfo<AArch64leTargetInfo>(Triple, Opts);
case llvm::Triple::FreeBSD:
- return new FreeBSDTargetInfo<AArch64leTargetInfo>(Triple, Opts);
+ return std::make_unique<FreeBSDTargetInfo<AArch64leTargetInfo>>(Triple,
+ Opts);
case llvm::Triple::Fuchsia:
- return new FuchsiaTargetInfo<AArch64leTargetInfo>(Triple, Opts);
+ return std::make_unique<FuchsiaTargetInfo<AArch64leTargetInfo>>(Triple,
+ Opts);
+ case llvm::Triple::Haiku:
+ return std::make_unique<HaikuTargetInfo<AArch64leTargetInfo>>(Triple,
+ Opts);
case llvm::Triple::Linux:
- return new LinuxTargetInfo<AArch64leTargetInfo>(Triple, Opts);
+ switch (Triple.getEnvironment()) {
+ default:
+ return std::make_unique<LinuxTargetInfo<AArch64leTargetInfo>>(Triple,
+ Opts);
+ case llvm::Triple::OpenHOS:
+ return std::make_unique<OHOSTargetInfo<AArch64leTargetInfo>>(Triple,
+ Opts);
+ }
case llvm::Triple::NetBSD:
- return new NetBSDTargetInfo<AArch64leTargetInfo>(Triple, Opts);
+ return std::make_unique<NetBSDTargetInfo<AArch64leTargetInfo>>(Triple,
+ Opts);
case llvm::Triple::OpenBSD:
- return new OpenBSDTargetInfo<AArch64leTargetInfo>(Triple, Opts);
+ return std::make_unique<OpenBSDTargetInfo<AArch64leTargetInfo>>(Triple,
+ Opts);
case llvm::Triple::Win32:
switch (Triple.getEnvironment()) {
case llvm::Triple::GNU:
- return new MinGWARM64TargetInfo(Triple, Opts);
+ return std::make_unique<MinGWARM64TargetInfo>(Triple, Opts);
case llvm::Triple::MSVC:
default: // Assume MSVC for unknown environments
- return new MicrosoftARM64TargetInfo(Triple, Opts);
+ return std::make_unique<MicrosoftARM64TargetInfo>(Triple, Opts);
}
default:
- return new AArch64leTargetInfo(Triple, Opts);
+ return std::make_unique<AArch64leTargetInfo>(Triple, Opts);
}
case llvm::Triple::aarch64_be:
switch (os) {
case llvm::Triple::FreeBSD:
- return new FreeBSDTargetInfo<AArch64beTargetInfo>(Triple, Opts);
+ return std::make_unique<FreeBSDTargetInfo<AArch64beTargetInfo>>(Triple,
+ Opts);
case llvm::Triple::Fuchsia:
- return new FuchsiaTargetInfo<AArch64beTargetInfo>(Triple, Opts);
+ return std::make_unique<FuchsiaTargetInfo<AArch64beTargetInfo>>(Triple,
+ Opts);
case llvm::Triple::Linux:
- return new LinuxTargetInfo<AArch64beTargetInfo>(Triple, Opts);
+ return std::make_unique<LinuxTargetInfo<AArch64beTargetInfo>>(Triple,
+ Opts);
case llvm::Triple::NetBSD:
- return new NetBSDTargetInfo<AArch64beTargetInfo>(Triple, Opts);
+ return std::make_unique<NetBSDTargetInfo<AArch64beTargetInfo>>(Triple,
+ Opts);
default:
- return new AArch64beTargetInfo(Triple, Opts);
+ return std::make_unique<AArch64beTargetInfo>(Triple, Opts);
}
case llvm::Triple::arm:
case llvm::Triple::thumb:
if (Triple.isOSBinFormatMachO())
- return new DarwinARMTargetInfo(Triple, Opts);
+ return std::make_unique<DarwinARMTargetInfo>(Triple, Opts);
switch (os) {
- case llvm::Triple::CloudABI:
- return new CloudABITargetInfo<ARMleTargetInfo>(Triple, Opts);
case llvm::Triple::Linux:
- return new LinuxTargetInfo<ARMleTargetInfo>(Triple, Opts);
+ switch (Triple.getEnvironment()) {
+ default:
+ return std::make_unique<LinuxTargetInfo<ARMleTargetInfo>>(Triple, Opts);
+ case llvm::Triple::OpenHOS:
+ return std::make_unique<OHOSTargetInfo<ARMleTargetInfo>>(Triple, Opts);
+ }
+ case llvm::Triple::LiteOS:
+ return std::make_unique<OHOSTargetInfo<ARMleTargetInfo>>(Triple, Opts);
case llvm::Triple::FreeBSD:
- return new FreeBSDTargetInfo<ARMleTargetInfo>(Triple, Opts);
+ return std::make_unique<FreeBSDTargetInfo<ARMleTargetInfo>>(Triple, Opts);
case llvm::Triple::NetBSD:
- return new NetBSDTargetInfo<ARMleTargetInfo>(Triple, Opts);
+ return std::make_unique<NetBSDTargetInfo<ARMleTargetInfo>>(Triple, Opts);
case llvm::Triple::OpenBSD:
- return new OpenBSDTargetInfo<ARMleTargetInfo>(Triple, Opts);
+ return std::make_unique<OpenBSDTargetInfo<ARMleTargetInfo>>(Triple, Opts);
case llvm::Triple::RTEMS:
- return new RTEMSTargetInfo<ARMleTargetInfo>(Triple, Opts);
+ return std::make_unique<RTEMSTargetInfo<ARMleTargetInfo>>(Triple, Opts);
+ case llvm::Triple::Haiku:
+ return std::make_unique<HaikuTargetInfo<ARMleTargetInfo>>(Triple, Opts);
case llvm::Triple::NaCl:
- return new NaClTargetInfo<ARMleTargetInfo>(Triple, Opts);
+ return std::make_unique<NaClTargetInfo<ARMleTargetInfo>>(Triple, Opts);
case llvm::Triple::Win32:
switch (Triple.getEnvironment()) {
case llvm::Triple::Cygnus:
- return new CygwinARMTargetInfo(Triple, Opts);
+ return std::make_unique<CygwinARMTargetInfo>(Triple, Opts);
case llvm::Triple::GNU:
- return new MinGWARMTargetInfo(Triple, Opts);
+ return std::make_unique<MinGWARMTargetInfo>(Triple, Opts);
case llvm::Triple::Itanium:
- return new ItaniumWindowsARMleTargetInfo(Triple, Opts);
+ return std::make_unique<ItaniumWindowsARMleTargetInfo>(Triple, Opts);
case llvm::Triple::MSVC:
default: // Assume MSVC for unknown environments
- return new MicrosoftARMleTargetInfo(Triple, Opts);
+ return std::make_unique<MicrosoftARMleTargetInfo>(Triple, Opts);
}
default:
- return new ARMleTargetInfo(Triple, Opts);
+ return std::make_unique<ARMleTargetInfo>(Triple, Opts);
}
case llvm::Triple::armeb:
case llvm::Triple::thumbeb:
if (Triple.isOSDarwin())
- return new DarwinARMTargetInfo(Triple, Opts);
+ return std::make_unique<DarwinARMTargetInfo>(Triple, Opts);
switch (os) {
case llvm::Triple::Linux:
- return new LinuxTargetInfo<ARMbeTargetInfo>(Triple, Opts);
- case llvm::Triple::FreeBSD:
- return new FreeBSDTargetInfo<ARMbeTargetInfo>(Triple, Opts);
+ return std::make_unique<LinuxTargetInfo<ARMbeTargetInfo>>(Triple, Opts);
case llvm::Triple::NetBSD:
- return new NetBSDTargetInfo<ARMbeTargetInfo>(Triple, Opts);
- case llvm::Triple::OpenBSD:
- return new OpenBSDTargetInfo<ARMbeTargetInfo>(Triple, Opts);
+ return std::make_unique<NetBSDTargetInfo<ARMbeTargetInfo>>(Triple, Opts);
case llvm::Triple::RTEMS:
- return new RTEMSTargetInfo<ARMbeTargetInfo>(Triple, Opts);
+ return std::make_unique<RTEMSTargetInfo<ARMbeTargetInfo>>(Triple, Opts);
case llvm::Triple::NaCl:
- return new NaClTargetInfo<ARMbeTargetInfo>(Triple, Opts);
+ return std::make_unique<NaClTargetInfo<ARMbeTargetInfo>>(Triple, Opts);
default:
- return new ARMbeTargetInfo(Triple, Opts);
+ return std::make_unique<ARMbeTargetInfo>(Triple, Opts);
}
case llvm::Triple::avr:
- return new AVRTargetInfo(Triple, Opts);
+ return std::make_unique<AVRTargetInfo>(Triple, Opts);
case llvm::Triple::bpfeb:
case llvm::Triple::bpfel:
- return new BPFTargetInfo(Triple, Opts);
+ return std::make_unique<BPFTargetInfo>(Triple, Opts);
case llvm::Triple::msp430:
- return new MSP430TargetInfo(Triple, Opts);
+ return std::make_unique<MSP430TargetInfo>(Triple, Opts);
case llvm::Triple::mips:
switch (os) {
case llvm::Triple::Linux:
- return new LinuxTargetInfo<MipsTargetInfo>(Triple, Opts);
+ return std::make_unique<LinuxTargetInfo<MipsTargetInfo>>(Triple, Opts);
case llvm::Triple::RTEMS:
- return new RTEMSTargetInfo<MipsTargetInfo>(Triple, Opts);
+ return std::make_unique<RTEMSTargetInfo<MipsTargetInfo>>(Triple, Opts);
case llvm::Triple::FreeBSD:
- return new FreeBSDTargetInfo<MipsTargetInfo>(Triple, Opts);
+ return std::make_unique<FreeBSDTargetInfo<MipsTargetInfo>>(Triple, Opts);
case llvm::Triple::NetBSD:
- return new NetBSDTargetInfo<MipsTargetInfo>(Triple, Opts);
+ return std::make_unique<NetBSDTargetInfo<MipsTargetInfo>>(Triple, Opts);
default:
- return new MipsTargetInfo(Triple, Opts);
+ return std::make_unique<MipsTargetInfo>(Triple, Opts);
}
case llvm::Triple::mipsel:
switch (os) {
case llvm::Triple::Linux:
- return new LinuxTargetInfo<MipsTargetInfo>(Triple, Opts);
+ switch (Triple.getEnvironment()) {
+ default:
+ return std::make_unique<LinuxTargetInfo<MipsTargetInfo>>(Triple, Opts);
+ case llvm::Triple::OpenHOS:
+ return std::make_unique<OHOSTargetInfo<MipsTargetInfo>>(Triple, Opts);
+ }
case llvm::Triple::RTEMS:
- return new RTEMSTargetInfo<MipsTargetInfo>(Triple, Opts);
+ return std::make_unique<RTEMSTargetInfo<MipsTargetInfo>>(Triple, Opts);
case llvm::Triple::FreeBSD:
- return new FreeBSDTargetInfo<MipsTargetInfo>(Triple, Opts);
+ return std::make_unique<FreeBSDTargetInfo<MipsTargetInfo>>(Triple, Opts);
case llvm::Triple::NetBSD:
- return new NetBSDTargetInfo<MipsTargetInfo>(Triple, Opts);
+ return std::make_unique<NetBSDTargetInfo<MipsTargetInfo>>(Triple, Opts);
case llvm::Triple::NaCl:
- return new NaClTargetInfo<NaClMips32TargetInfo>(Triple, Opts);
+ return std::make_unique<NaClTargetInfo<NaClMips32TargetInfo>>(Triple,
+ Opts);
default:
- return new MipsTargetInfo(Triple, Opts);
+ return std::make_unique<MipsTargetInfo>(Triple, Opts);
}
case llvm::Triple::mips64:
switch (os) {
case llvm::Triple::Linux:
- return new LinuxTargetInfo<MipsTargetInfo>(Triple, Opts);
+ return std::make_unique<LinuxTargetInfo<MipsTargetInfo>>(Triple, Opts);
case llvm::Triple::RTEMS:
- return new RTEMSTargetInfo<MipsTargetInfo>(Triple, Opts);
+ return std::make_unique<RTEMSTargetInfo<MipsTargetInfo>>(Triple, Opts);
case llvm::Triple::FreeBSD:
- return new FreeBSDTargetInfo<MipsTargetInfo>(Triple, Opts);
+ return std::make_unique<FreeBSDTargetInfo<MipsTargetInfo>>(Triple, Opts);
case llvm::Triple::NetBSD:
- return new NetBSDTargetInfo<MipsTargetInfo>(Triple, Opts);
+ return std::make_unique<NetBSDTargetInfo<MipsTargetInfo>>(Triple, Opts);
case llvm::Triple::OpenBSD:
- return new OpenBSDTargetInfo<MipsTargetInfo>(Triple, Opts);
+ return std::make_unique<OpenBSDTargetInfo<MipsTargetInfo>>(Triple, Opts);
default:
- return new MipsTargetInfo(Triple, Opts);
+ return std::make_unique<MipsTargetInfo>(Triple, Opts);
}
case llvm::Triple::mips64el:
switch (os) {
case llvm::Triple::Linux:
- return new LinuxTargetInfo<MipsTargetInfo>(Triple, Opts);
+ return std::make_unique<LinuxTargetInfo<MipsTargetInfo>>(Triple, Opts);
case llvm::Triple::RTEMS:
- return new RTEMSTargetInfo<MipsTargetInfo>(Triple, Opts);
+ return std::make_unique<RTEMSTargetInfo<MipsTargetInfo>>(Triple, Opts);
case llvm::Triple::FreeBSD:
- return new FreeBSDTargetInfo<MipsTargetInfo>(Triple, Opts);
+ return std::make_unique<FreeBSDTargetInfo<MipsTargetInfo>>(Triple, Opts);
case llvm::Triple::NetBSD:
- return new NetBSDTargetInfo<MipsTargetInfo>(Triple, Opts);
+ return std::make_unique<NetBSDTargetInfo<MipsTargetInfo>>(Triple, Opts);
case llvm::Triple::OpenBSD:
- return new OpenBSDTargetInfo<MipsTargetInfo>(Triple, Opts);
+ return std::make_unique<OpenBSDTargetInfo<MipsTargetInfo>>(Triple, Opts);
default:
- return new MipsTargetInfo(Triple, Opts);
+ return std::make_unique<MipsTargetInfo>(Triple, Opts);
}
case llvm::Triple::m68k:
switch (os) {
case llvm::Triple::Linux:
- return new LinuxTargetInfo<M68kTargetInfo>(Triple, Opts);
+ return std::make_unique<LinuxTargetInfo<M68kTargetInfo>>(Triple, Opts);
case llvm::Triple::NetBSD:
- return new NetBSDTargetInfo<M68kTargetInfo>(Triple, Opts);
+ return std::make_unique<NetBSDTargetInfo<M68kTargetInfo>>(Triple, Opts);
default:
- return new M68kTargetInfo(Triple, Opts);
+ return std::make_unique<M68kTargetInfo>(Triple, Opts);
}
case llvm::Triple::le32:
switch (os) {
case llvm::Triple::NaCl:
- return new NaClTargetInfo<PNaClTargetInfo>(Triple, Opts);
+ return std::make_unique<NaClTargetInfo<PNaClTargetInfo>>(Triple, Opts);
default:
return nullptr;
}
case llvm::Triple::le64:
- return new Le64TargetInfo(Triple, Opts);
+ return std::make_unique<Le64TargetInfo>(Triple, Opts);
case llvm::Triple::ppc:
- if (Triple.isOSDarwin())
- return new DarwinPPC32TargetInfo(Triple, Opts);
switch (os) {
case llvm::Triple::Linux:
- return new LinuxTargetInfo<PPC32TargetInfo>(Triple, Opts);
+ return std::make_unique<LinuxTargetInfo<PPC32TargetInfo>>(Triple, Opts);
case llvm::Triple::FreeBSD:
- return new FreeBSDTargetInfo<PPC32TargetInfo>(Triple, Opts);
+ return std::make_unique<FreeBSDTargetInfo<PPC32TargetInfo>>(Triple, Opts);
case llvm::Triple::NetBSD:
- return new NetBSDTargetInfo<PPC32TargetInfo>(Triple, Opts);
+ return std::make_unique<NetBSDTargetInfo<PPC32TargetInfo>>(Triple, Opts);
case llvm::Triple::OpenBSD:
- return new OpenBSDTargetInfo<PPC32TargetInfo>(Triple, Opts);
+ return std::make_unique<OpenBSDTargetInfo<PPC32TargetInfo>>(Triple, Opts);
case llvm::Triple::RTEMS:
- return new RTEMSTargetInfo<PPC32TargetInfo>(Triple, Opts);
+ return std::make_unique<RTEMSTargetInfo<PPC32TargetInfo>>(Triple, Opts);
case llvm::Triple::AIX:
- return new AIXPPC32TargetInfo(Triple, Opts);
+ return std::make_unique<AIXPPC32TargetInfo>(Triple, Opts);
default:
- return new PPC32TargetInfo(Triple, Opts);
+ return std::make_unique<PPC32TargetInfo>(Triple, Opts);
}
case llvm::Triple::ppcle:
switch (os) {
case llvm::Triple::Linux:
- return new LinuxTargetInfo<PPC32TargetInfo>(Triple, Opts);
+ return std::make_unique<LinuxTargetInfo<PPC32TargetInfo>>(Triple, Opts);
case llvm::Triple::FreeBSD:
- return new FreeBSDTargetInfo<PPC32TargetInfo>(Triple, Opts);
+ return std::make_unique<FreeBSDTargetInfo<PPC32TargetInfo>>(Triple, Opts);
default:
- return new PPC32TargetInfo(Triple, Opts);
+ return std::make_unique<PPC32TargetInfo>(Triple, Opts);
}
case llvm::Triple::ppc64:
- if (Triple.isOSDarwin())
- return new DarwinPPC64TargetInfo(Triple, Opts);
switch (os) {
case llvm::Triple::Linux:
- return new LinuxTargetInfo<PPC64TargetInfo>(Triple, Opts);
+ return std::make_unique<LinuxTargetInfo<PPC64TargetInfo>>(Triple, Opts);
case llvm::Triple::Lv2:
- return new PS3PPUTargetInfo<PPC64TargetInfo>(Triple, Opts);
+ return std::make_unique<PS3PPUTargetInfo<PPC64TargetInfo>>(Triple, Opts);
case llvm::Triple::FreeBSD:
- return new FreeBSDTargetInfo<PPC64TargetInfo>(Triple, Opts);
+ return std::make_unique<FreeBSDTargetInfo<PPC64TargetInfo>>(Triple, Opts);
case llvm::Triple::NetBSD:
- return new NetBSDTargetInfo<PPC64TargetInfo>(Triple, Opts);
+ return std::make_unique<NetBSDTargetInfo<PPC64TargetInfo>>(Triple, Opts);
case llvm::Triple::OpenBSD:
- return new OpenBSDTargetInfo<PPC64TargetInfo>(Triple, Opts);
+ return std::make_unique<OpenBSDTargetInfo<PPC64TargetInfo>>(Triple, Opts);
case llvm::Triple::AIX:
- return new AIXPPC64TargetInfo(Triple, Opts);
+ return std::make_unique<AIXPPC64TargetInfo>(Triple, Opts);
default:
- return new PPC64TargetInfo(Triple, Opts);
+ return std::make_unique<PPC64TargetInfo>(Triple, Opts);
}
case llvm::Triple::ppc64le:
switch (os) {
case llvm::Triple::Linux:
- return new LinuxTargetInfo<PPC64TargetInfo>(Triple, Opts);
+ return std::make_unique<LinuxTargetInfo<PPC64TargetInfo>>(Triple, Opts);
case llvm::Triple::FreeBSD:
- return new FreeBSDTargetInfo<PPC64TargetInfo>(Triple, Opts);
+ return std::make_unique<FreeBSDTargetInfo<PPC64TargetInfo>>(Triple, Opts);
case llvm::Triple::NetBSD:
- return new NetBSDTargetInfo<PPC64TargetInfo>(Triple, Opts);
+ return std::make_unique<NetBSDTargetInfo<PPC64TargetInfo>>(Triple, Opts);
case llvm::Triple::OpenBSD:
- return new OpenBSDTargetInfo<PPC64TargetInfo>(Triple, Opts);
+ return std::make_unique<OpenBSDTargetInfo<PPC64TargetInfo>>(Triple, Opts);
default:
- return new PPC64TargetInfo(Triple, Opts);
+ return std::make_unique<PPC64TargetInfo>(Triple, Opts);
}
case llvm::Triple::nvptx:
- return new NVPTXTargetInfo(Triple, Opts, /*TargetPointerWidth=*/32);
+ return std::make_unique<NVPTXTargetInfo>(Triple, Opts,
+ /*TargetPointerWidth=*/32);
case llvm::Triple::nvptx64:
- return new NVPTXTargetInfo(Triple, Opts, /*TargetPointerWidth=*/64);
+ return std::make_unique<NVPTXTargetInfo>(Triple, Opts,
+ /*TargetPointerWidth=*/64);
case llvm::Triple::amdgcn:
case llvm::Triple::r600:
- return new AMDGPUTargetInfo(Triple, Opts);
+ return std::make_unique<AMDGPUTargetInfo>(Triple, Opts);
case llvm::Triple::riscv32:
- // TODO: add cases for NetBSD, RTEMS once tested.
switch (os) {
- case llvm::Triple::FreeBSD:
- return new FreeBSDTargetInfo<RISCV32TargetInfo>(Triple, Opts);
+ case llvm::Triple::NetBSD:
+ return std::make_unique<NetBSDTargetInfo<RISCV32TargetInfo>>(Triple,
+ Opts);
case llvm::Triple::Linux:
- return new LinuxTargetInfo<RISCV32TargetInfo>(Triple, Opts);
+ return std::make_unique<LinuxTargetInfo<RISCV32TargetInfo>>(Triple, Opts);
default:
- return new RISCV32TargetInfo(Triple, Opts);
+ return std::make_unique<RISCV32TargetInfo>(Triple, Opts);
}
case llvm::Triple::riscv64:
- // TODO: add cases for NetBSD, RTEMS once tested.
switch (os) {
case llvm::Triple::FreeBSD:
- return new FreeBSDTargetInfo<RISCV64TargetInfo>(Triple, Opts);
+ return std::make_unique<FreeBSDTargetInfo<RISCV64TargetInfo>>(Triple,
+ Opts);
+ case llvm::Triple::NetBSD:
+ return std::make_unique<NetBSDTargetInfo<RISCV64TargetInfo>>(Triple,
+ Opts);
case llvm::Triple::OpenBSD:
- return new OpenBSDTargetInfo<RISCV64TargetInfo>(Triple, Opts);
+ return std::make_unique<OpenBSDTargetInfo<RISCV64TargetInfo>>(Triple,
+ Opts);
case llvm::Triple::Fuchsia:
- return new FuchsiaTargetInfo<RISCV64TargetInfo>(Triple, Opts);
+ return std::make_unique<FuchsiaTargetInfo<RISCV64TargetInfo>>(Triple,
+ Opts);
+ case llvm::Triple::Haiku:
+ return std::make_unique<HaikuTargetInfo<RISCV64TargetInfo>>(Triple,
+ Opts);
case llvm::Triple::Linux:
- return new LinuxTargetInfo<RISCV64TargetInfo>(Triple, Opts);
+ switch (Triple.getEnvironment()) {
+ default:
+ return std::make_unique<LinuxTargetInfo<RISCV64TargetInfo>>(Triple,
+ Opts);
+ case llvm::Triple::OpenHOS:
+ return std::make_unique<OHOSTargetInfo<RISCV64TargetInfo>>(Triple,
+ Opts);
+ }
default:
- return new RISCV64TargetInfo(Triple, Opts);
+ return std::make_unique<RISCV64TargetInfo>(Triple, Opts);
}
case llvm::Triple::sparc:
switch (os) {
case llvm::Triple::Linux:
- return new LinuxTargetInfo<SparcV8TargetInfo>(Triple, Opts);
+ return std::make_unique<LinuxTargetInfo<SparcV8TargetInfo>>(Triple, Opts);
case llvm::Triple::Solaris:
- return new SolarisTargetInfo<SparcV8TargetInfo>(Triple, Opts);
+ return std::make_unique<SolarisTargetInfo<SparcV8TargetInfo>>(Triple,
+ Opts);
case llvm::Triple::NetBSD:
- return new NetBSDTargetInfo<SparcV8TargetInfo>(Triple, Opts);
+ return std::make_unique<NetBSDTargetInfo<SparcV8TargetInfo>>(Triple,
+ Opts);
case llvm::Triple::RTEMS:
- return new RTEMSTargetInfo<SparcV8TargetInfo>(Triple, Opts);
+ return std::make_unique<RTEMSTargetInfo<SparcV8TargetInfo>>(Triple, Opts);
default:
- return new SparcV8TargetInfo(Triple, Opts);
+ return std::make_unique<SparcV8TargetInfo>(Triple, Opts);
}
- // The 'sparcel' architecture copies all the above cases except for Solaris.
case llvm::Triple::sparcel:
switch (os) {
case llvm::Triple::Linux:
- return new LinuxTargetInfo<SparcV8elTargetInfo>(Triple, Opts);
- case llvm::Triple::NetBSD:
- return new NetBSDTargetInfo<SparcV8elTargetInfo>(Triple, Opts);
+ return std::make_unique<LinuxTargetInfo<SparcV8elTargetInfo>>(Triple,
+ Opts);
case llvm::Triple::RTEMS:
- return new RTEMSTargetInfo<SparcV8elTargetInfo>(Triple, Opts);
+ return std::make_unique<RTEMSTargetInfo<SparcV8elTargetInfo>>(Triple,
+ Opts);
default:
- return new SparcV8elTargetInfo(Triple, Opts);
+ return std::make_unique<SparcV8elTargetInfo>(Triple, Opts);
}
case llvm::Triple::sparcv9:
switch (os) {
case llvm::Triple::Linux:
- return new LinuxTargetInfo<SparcV9TargetInfo>(Triple, Opts);
+ return std::make_unique<LinuxTargetInfo<SparcV9TargetInfo>>(Triple, Opts);
case llvm::Triple::Solaris:
- return new SolarisTargetInfo<SparcV9TargetInfo>(Triple, Opts);
+ return std::make_unique<SolarisTargetInfo<SparcV9TargetInfo>>(Triple,
+ Opts);
case llvm::Triple::NetBSD:
- return new NetBSDTargetInfo<SparcV9TargetInfo>(Triple, Opts);
+ return std::make_unique<NetBSDTargetInfo<SparcV9TargetInfo>>(Triple,
+ Opts);
case llvm::Triple::OpenBSD:
- return new OpenBSDTargetInfo<SparcV9TargetInfo>(Triple, Opts);
+ return std::make_unique<OpenBSDTargetInfo<SparcV9TargetInfo>>(Triple,
+ Opts);
case llvm::Triple::FreeBSD:
- return new FreeBSDTargetInfo<SparcV9TargetInfo>(Triple, Opts);
+ return std::make_unique<FreeBSDTargetInfo<SparcV9TargetInfo>>(Triple,
+ Opts);
default:
- return new SparcV9TargetInfo(Triple, Opts);
+ return std::make_unique<SparcV9TargetInfo>(Triple, Opts);
}
case llvm::Triple::systemz:
switch (os) {
case llvm::Triple::Linux:
- return new LinuxTargetInfo<SystemZTargetInfo>(Triple, Opts);
+ return std::make_unique<LinuxTargetInfo<SystemZTargetInfo>>(Triple, Opts);
case llvm::Triple::ZOS:
- return new ZOSTargetInfo<SystemZTargetInfo>(Triple, Opts);
+ return std::make_unique<ZOSTargetInfo<SystemZTargetInfo>>(Triple, Opts);
default:
- return new SystemZTargetInfo(Triple, Opts);
+ return std::make_unique<SystemZTargetInfo>(Triple, Opts);
}
case llvm::Triple::tce:
- return new TCETargetInfo(Triple, Opts);
+ return std::make_unique<TCETargetInfo>(Triple, Opts);
case llvm::Triple::tcele:
- return new TCELETargetInfo(Triple, Opts);
+ return std::make_unique<TCELETargetInfo>(Triple, Opts);
case llvm::Triple::x86:
if (Triple.isOSDarwin())
- return new DarwinI386TargetInfo(Triple, Opts);
+ return std::make_unique<DarwinI386TargetInfo>(Triple, Opts);
switch (os) {
- case llvm::Triple::Ananas:
- return new AnanasTargetInfo<X86_32TargetInfo>(Triple, Opts);
- case llvm::Triple::CloudABI:
- return new CloudABITargetInfo<X86_32TargetInfo>(Triple, Opts);
case llvm::Triple::Linux: {
switch (Triple.getEnvironment()) {
default:
- return new LinuxTargetInfo<X86_32TargetInfo>(Triple, Opts);
+ return std::make_unique<LinuxTargetInfo<X86_32TargetInfo>>(Triple,
+ Opts);
case llvm::Triple::Android:
- return new AndroidX86_32TargetInfo(Triple, Opts);
+ return std::make_unique<AndroidX86_32TargetInfo>(Triple, Opts);
}
}
case llvm::Triple::DragonFly:
- return new DragonFlyBSDTargetInfo<X86_32TargetInfo>(Triple, Opts);
+ return std::make_unique<DragonFlyBSDTargetInfo<X86_32TargetInfo>>(Triple,
+ Opts);
case llvm::Triple::NetBSD:
- return new NetBSDI386TargetInfo(Triple, Opts);
+ return std::make_unique<NetBSDI386TargetInfo>(Triple, Opts);
case llvm::Triple::OpenBSD:
- return new OpenBSDI386TargetInfo(Triple, Opts);
+ return std::make_unique<OpenBSDI386TargetInfo>(Triple, Opts);
case llvm::Triple::FreeBSD:
- return new FreeBSDTargetInfo<X86_32TargetInfo>(Triple, Opts);
+ return std::make_unique<FreeBSDTargetInfo<X86_32TargetInfo>>(Triple,
+ Opts);
case llvm::Triple::Fuchsia:
- return new FuchsiaTargetInfo<X86_32TargetInfo>(Triple, Opts);
+ return std::make_unique<FuchsiaTargetInfo<X86_32TargetInfo>>(Triple,
+ Opts);
case llvm::Triple::KFreeBSD:
- return new KFreeBSDTargetInfo<X86_32TargetInfo>(Triple, Opts);
- case llvm::Triple::Minix:
- return new MinixTargetInfo<X86_32TargetInfo>(Triple, Opts);
+ return std::make_unique<KFreeBSDTargetInfo<X86_32TargetInfo>>(Triple,
+ Opts);
case llvm::Triple::Solaris:
- return new SolarisTargetInfo<X86_32TargetInfo>(Triple, Opts);
+ return std::make_unique<SolarisTargetInfo<X86_32TargetInfo>>(Triple,
+ Opts);
case llvm::Triple::Win32: {
switch (Triple.getEnvironment()) {
case llvm::Triple::Cygnus:
- return new CygwinX86_32TargetInfo(Triple, Opts);
+ return std::make_unique<CygwinX86_32TargetInfo>(Triple, Opts);
case llvm::Triple::GNU:
- return new MinGWX86_32TargetInfo(Triple, Opts);
+ return std::make_unique<MinGWX86_32TargetInfo>(Triple, Opts);
case llvm::Triple::Itanium:
case llvm::Triple::MSVC:
default: // Assume MSVC for unknown environments
- return new MicrosoftX86_32TargetInfo(Triple, Opts);
+ return std::make_unique<MicrosoftX86_32TargetInfo>(Triple, Opts);
}
}
case llvm::Triple::Haiku:
- return new HaikuX86_32TargetInfo(Triple, Opts);
+ return std::make_unique<HaikuX86_32TargetInfo>(Triple, Opts);
case llvm::Triple::RTEMS:
- return new RTEMSX86_32TargetInfo(Triple, Opts);
+ return std::make_unique<RTEMSX86_32TargetInfo>(Triple, Opts);
case llvm::Triple::NaCl:
- return new NaClTargetInfo<X86_32TargetInfo>(Triple, Opts);
+ return std::make_unique<NaClTargetInfo<X86_32TargetInfo>>(Triple, Opts);
case llvm::Triple::ELFIAMCU:
- return new MCUX86_32TargetInfo(Triple, Opts);
+ return std::make_unique<MCUX86_32TargetInfo>(Triple, Opts);
case llvm::Triple::Hurd:
- return new HurdTargetInfo<X86_32TargetInfo>(Triple, Opts);
+ return std::make_unique<HurdTargetInfo<X86_32TargetInfo>>(Triple, Opts);
default:
- return new X86_32TargetInfo(Triple, Opts);
+ return std::make_unique<X86_32TargetInfo>(Triple, Opts);
}
case llvm::Triple::x86_64:
if (Triple.isOSDarwin() || Triple.isOSBinFormatMachO())
- return new DarwinX86_64TargetInfo(Triple, Opts);
+ return std::make_unique<DarwinX86_64TargetInfo>(Triple, Opts);
switch (os) {
- case llvm::Triple::Ananas:
- return new AnanasTargetInfo<X86_64TargetInfo>(Triple, Opts);
- case llvm::Triple::CloudABI:
- return new CloudABITargetInfo<X86_64TargetInfo>(Triple, Opts);
case llvm::Triple::Linux: {
switch (Triple.getEnvironment()) {
default:
- return new LinuxTargetInfo<X86_64TargetInfo>(Triple, Opts);
+ return std::make_unique<LinuxTargetInfo<X86_64TargetInfo>>(Triple,
+ Opts);
case llvm::Triple::Android:
- return new AndroidX86_64TargetInfo(Triple, Opts);
+ return std::make_unique<AndroidX86_64TargetInfo>(Triple, Opts);
+ case llvm::Triple::OpenHOS:
+ return std::make_unique<OHOSX86_64TargetInfo>(Triple, Opts);
}
}
case llvm::Triple::DragonFly:
- return new DragonFlyBSDTargetInfo<X86_64TargetInfo>(Triple, Opts);
+ return std::make_unique<DragonFlyBSDTargetInfo<X86_64TargetInfo>>(Triple,
+ Opts);
case llvm::Triple::NetBSD:
- return new NetBSDTargetInfo<X86_64TargetInfo>(Triple, Opts);
+ return std::make_unique<NetBSDTargetInfo<X86_64TargetInfo>>(Triple, Opts);
case llvm::Triple::OpenBSD:
- return new OpenBSDX86_64TargetInfo(Triple, Opts);
+ return std::make_unique<OpenBSDX86_64TargetInfo>(Triple, Opts);
case llvm::Triple::FreeBSD:
- return new FreeBSDTargetInfo<X86_64TargetInfo>(Triple, Opts);
+ return std::make_unique<FreeBSDTargetInfo<X86_64TargetInfo>>(Triple,
+ Opts);
case llvm::Triple::Fuchsia:
- return new FuchsiaTargetInfo<X86_64TargetInfo>(Triple, Opts);
+ return std::make_unique<FuchsiaTargetInfo<X86_64TargetInfo>>(Triple,
+ Opts);
case llvm::Triple::KFreeBSD:
- return new KFreeBSDTargetInfo<X86_64TargetInfo>(Triple, Opts);
+ return std::make_unique<KFreeBSDTargetInfo<X86_64TargetInfo>>(Triple,
+ Opts);
case llvm::Triple::Solaris:
- return new SolarisTargetInfo<X86_64TargetInfo>(Triple, Opts);
+ return std::make_unique<SolarisTargetInfo<X86_64TargetInfo>>(Triple,
+ Opts);
case llvm::Triple::Win32: {
switch (Triple.getEnvironment()) {
case llvm::Triple::Cygnus:
- return new CygwinX86_64TargetInfo(Triple, Opts);
+ return std::make_unique<CygwinX86_64TargetInfo>(Triple, Opts);
case llvm::Triple::GNU:
- return new MinGWX86_64TargetInfo(Triple, Opts);
+ return std::make_unique<MinGWX86_64TargetInfo>(Triple, Opts);
case llvm::Triple::MSVC:
default: // Assume MSVC for unknown environments
- return new MicrosoftX86_64TargetInfo(Triple, Opts);
+ return std::make_unique<MicrosoftX86_64TargetInfo>(Triple, Opts);
}
}
case llvm::Triple::Haiku:
- return new HaikuTargetInfo<X86_64TargetInfo>(Triple, Opts);
+ return std::make_unique<HaikuTargetInfo<X86_64TargetInfo>>(Triple, Opts);
case llvm::Triple::NaCl:
- return new NaClTargetInfo<X86_64TargetInfo>(Triple, Opts);
+ return std::make_unique<NaClTargetInfo<X86_64TargetInfo>>(Triple, Opts);
case llvm::Triple::PS4:
- return new PS4OSTargetInfo<X86_64TargetInfo>(Triple, Opts);
+ return std::make_unique<PS4OSTargetInfo<X86_64TargetInfo>>(Triple, Opts);
+ case llvm::Triple::PS5:
+ return std::make_unique<PS5OSTargetInfo<X86_64TargetInfo>>(Triple, Opts);
+ case llvm::Triple::Hurd:
+ return std::make_unique<HurdTargetInfo<X86_64TargetInfo>>(Triple, Opts);
default:
- return new X86_64TargetInfo(Triple, Opts);
+ return std::make_unique<X86_64TargetInfo>(Triple, Opts);
}
case llvm::Triple::spir: {
if (os != llvm::Triple::UnknownOS ||
Triple.getEnvironment() != llvm::Triple::UnknownEnvironment)
return nullptr;
- return new SPIR32TargetInfo(Triple, Opts);
+ return std::make_unique<SPIR32TargetInfo>(Triple, Opts);
}
case llvm::Triple::spir64: {
if (os != llvm::Triple::UnknownOS ||
Triple.getEnvironment() != llvm::Triple::UnknownEnvironment)
return nullptr;
- return new SPIR64TargetInfo(Triple, Opts);
+ return std::make_unique<SPIR64TargetInfo>(Triple, Opts);
+ }
+ case llvm::Triple::spirv: {
+ return std::make_unique<SPIRVTargetInfo>(Triple, Opts);
+ }
+ case llvm::Triple::spirv32: {
+ if (os != llvm::Triple::UnknownOS ||
+ Triple.getEnvironment() != llvm::Triple::UnknownEnvironment)
+ return nullptr;
+ return std::make_unique<SPIRV32TargetInfo>(Triple, Opts);
+ }
+ case llvm::Triple::spirv64: {
+ if (os != llvm::Triple::UnknownOS ||
+ Triple.getEnvironment() != llvm::Triple::UnknownEnvironment)
+ return nullptr;
+ return std::make_unique<SPIRV64TargetInfo>(Triple, Opts);
}
case llvm::Triple::wasm32:
if (Triple.getSubArch() != llvm::Triple::NoSubArch ||
@@ -613,11 +684,14 @@ TargetInfo *AllocateTarget(const llvm::Triple &Triple,
return nullptr;
switch (os) {
case llvm::Triple::WASI:
- return new WASITargetInfo<WebAssembly32TargetInfo>(Triple, Opts);
+ return std::make_unique<WASITargetInfo<WebAssembly32TargetInfo>>(Triple,
+ Opts);
case llvm::Triple::Emscripten:
- return new EmscriptenTargetInfo<WebAssembly32TargetInfo>(Triple, Opts);
+ return std::make_unique<EmscriptenTargetInfo<WebAssembly32TargetInfo>>(
+ Triple, Opts);
case llvm::Triple::UnknownOS:
- return new WebAssemblyOSTargetInfo<WebAssembly32TargetInfo>(Triple, Opts);
+ return std::make_unique<WebAssemblyOSTargetInfo<WebAssembly32TargetInfo>>(
+ Triple, Opts);
default:
return nullptr;
}
@@ -628,22 +702,53 @@ TargetInfo *AllocateTarget(const llvm::Triple &Triple,
return nullptr;
switch (os) {
case llvm::Triple::WASI:
- return new WASITargetInfo<WebAssembly64TargetInfo>(Triple, Opts);
+ return std::make_unique<WASITargetInfo<WebAssembly64TargetInfo>>(Triple,
+ Opts);
case llvm::Triple::Emscripten:
- return new EmscriptenTargetInfo<WebAssembly64TargetInfo>(Triple, Opts);
+ return std::make_unique<EmscriptenTargetInfo<WebAssembly64TargetInfo>>(
+ Triple, Opts);
case llvm::Triple::UnknownOS:
- return new WebAssemblyOSTargetInfo<WebAssembly64TargetInfo>(Triple, Opts);
+ return std::make_unique<WebAssemblyOSTargetInfo<WebAssembly64TargetInfo>>(
+ Triple, Opts);
default:
return nullptr;
}
+ case llvm::Triple::dxil:
+ return std::make_unique<DirectXTargetInfo>(Triple, Opts);
case llvm::Triple::renderscript32:
- return new LinuxTargetInfo<RenderScript32TargetInfo>(Triple, Opts);
+ return std::make_unique<LinuxTargetInfo<RenderScript32TargetInfo>>(Triple,
+ Opts);
case llvm::Triple::renderscript64:
- return new LinuxTargetInfo<RenderScript64TargetInfo>(Triple, Opts);
+ return std::make_unique<LinuxTargetInfo<RenderScript64TargetInfo>>(Triple,
+ Opts);
case llvm::Triple::ve:
- return new LinuxTargetInfo<VETargetInfo>(Triple, Opts);
+ return std::make_unique<LinuxTargetInfo<VETargetInfo>>(Triple, Opts);
+
+ case llvm::Triple::csky:
+ switch (os) {
+ case llvm::Triple::Linux:
+ return std::make_unique<LinuxTargetInfo<CSKYTargetInfo>>(Triple, Opts);
+ default:
+ return std::make_unique<CSKYTargetInfo>(Triple, Opts);
+ }
+ case llvm::Triple::loongarch32:
+ switch (os) {
+ case llvm::Triple::Linux:
+ return std::make_unique<LinuxTargetInfo<LoongArch32TargetInfo>>(Triple,
+ Opts);
+ default:
+ return std::make_unique<LoongArch32TargetInfo>(Triple, Opts);
+ }
+ case llvm::Triple::loongarch64:
+ switch (os) {
+ case llvm::Triple::Linux:
+ return std::make_unique<LinuxTargetInfo<LoongArch64TargetInfo>>(Triple,
+ Opts);
+ default:
+ return std::make_unique<LoongArch64TargetInfo>(Triple, Opts);
+ }
}
}
} // namespace targets
@@ -658,7 +763,7 @@ TargetInfo::CreateTargetInfo(DiagnosticsEngine &Diags,
llvm::Triple Triple(Opts->Triple);
// Construct the target
- std::unique_ptr<TargetInfo> Target(AllocateTarget(Triple, *Opts));
+ std::unique_ptr<TargetInfo> Target = AllocateTarget(Triple, *Opts);
if (!Target) {
Diags.Report(diag::err_target_unknown_triple) << Triple.str();
return nullptr;
@@ -700,6 +805,13 @@ TargetInfo::CreateTargetInfo(DiagnosticsEngine &Diags,
// Compute the default target features, we need the target to handle this
// because features may have dependencies on one another.
+ llvm::erase_if(Opts->FeaturesAsWritten, [&](StringRef Name) {
+ if (Target->isReadOnlyFeature(Name.substr(1))) {
+ Diags.Report(diag::warn_fe_backend_readonly_feature_flag) << Name;
+ return true;
+ }
+ return false;
+ });
if (!Target->initFeatureMap(Opts->FeatureMap, Diags, Opts->CPU,
Opts->FeaturesAsWritten))
return nullptr;
@@ -719,6 +831,10 @@ TargetInfo::CreateTargetInfo(DiagnosticsEngine &Diags,
Target->setCommandLineOpenCLOpts();
Target->setMaxAtomicWidth();
+ if (!Opts->DarwinTargetVariantTriple.empty())
+ Target->DarwinTargetVariantTriple =
+ llvm::Triple(Opts->DarwinTargetVariantTriple);
+
if (!Target->validateTarget(Diags))
return nullptr;
@@ -745,7 +861,7 @@ bool TargetInfo::validateOpenCLTarget(const LangOptions &Opts,
// Validate that feature macros are set properly for OpenCL C 3.0.
// In other cases assume that target is always valid.
- if (Opts.OpenCLCPlusPlus || Opts.OpenCLVersion < 300)
+ if (Opts.getOpenCLCompatibleVersion() < 300)
return true;
return OpenCLOptions::diagnoseUnsupportedFeatureDependencies(*this, Diags) &&
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets.h b/contrib/llvm-project/clang/lib/Basic/Targets.h
index a063204e69e6..b4d2486b5d2b 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets.h
@@ -24,8 +24,8 @@ namespace clang {
namespace targets {
LLVM_LIBRARY_VISIBILITY
-clang::TargetInfo *AllocateTarget(const llvm::Triple &Triple,
- const clang::TargetOptions &Opts);
+std::unique_ptr<clang::TargetInfo>
+AllocateTarget(const llvm::Triple &Triple, const clang::TargetOptions &Opts);
/// DefineStd - Define a macro name and standard variants. For example if
/// MacroName is "unix", then this will define "__unix", "__unix__", and "unix"
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp
index e163ebfa2348..f5a5d689fa09 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp
@@ -17,29 +17,116 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
-#include "llvm/Support/AArch64TargetParser.h"
+#include "llvm/TargetParser/AArch64TargetParser.h"
+#include "llvm/TargetParser/ARMTargetParserCommon.h"
+#include <optional>
using namespace clang;
using namespace clang::targets;
-const Builtin::Info AArch64TargetInfo::BuiltinInfo[] = {
+static constexpr Builtin::Info BuiltinInfo[] = {
#define BUILTIN(ID, TYPE, ATTRS) \
- {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
+ {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
+#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
+ {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
#include "clang/Basic/BuiltinsNEON.def"
#define BUILTIN(ID, TYPE, ATTRS) \
- {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
+ {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
+#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
+ {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
#include "clang/Basic/BuiltinsSVE.def"
#define BUILTIN(ID, TYPE, ATTRS) \
- {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
+ {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
+#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
+ {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
+#include "clang/Basic/BuiltinsSME.def"
+
+#define BUILTIN(ID, TYPE, ATTRS) \
+ {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
#define LANGBUILTIN(ID, TYPE, ATTRS, LANG) \
- {#ID, TYPE, ATTRS, nullptr, LANG, nullptr},
+ {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, LANG},
+#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
+ {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
#define TARGET_HEADER_BUILTIN(ID, TYPE, ATTRS, HEADER, LANGS, FEATURE) \
- {#ID, TYPE, ATTRS, HEADER, LANGS, FEATURE},
+ {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::HEADER, LANGS},
#include "clang/Basic/BuiltinsAArch64.def"
};
+void AArch64TargetInfo::setArchFeatures() {
+ if (*ArchInfo == llvm::AArch64::ARMV8R) {
+ HasDotProd = true;
+ HasDIT = true;
+ HasFlagM = true;
+ HasRCPC = true;
+ FPU |= NeonMode;
+ HasCCPP = true;
+ HasCRC = true;
+ HasLSE = true;
+ HasRDM = true;
+ } else if (ArchInfo->Version.getMajor() == 8) {
+ if (ArchInfo->Version.getMinor() >= 7u) {
+ HasWFxT = true;
+ }
+ if (ArchInfo->Version.getMinor() >= 6u) {
+ HasBFloat16 = true;
+ HasMatMul = true;
+ }
+ if (ArchInfo->Version.getMinor() >= 5u) {
+ HasAlternativeNZCV = true;
+ HasFRInt3264 = true;
+ HasSSBS = true;
+ HasSB = true;
+ HasPredRes = true;
+ HasBTI = true;
+ }
+ if (ArchInfo->Version.getMinor() >= 4u) {
+ HasDotProd = true;
+ HasDIT = true;
+ HasFlagM = true;
+ }
+ if (ArchInfo->Version.getMinor() >= 3u) {
+ HasRCPC = true;
+ FPU |= NeonMode;
+ }
+ if (ArchInfo->Version.getMinor() >= 2u) {
+ HasCCPP = true;
+ }
+ if (ArchInfo->Version.getMinor() >= 1u) {
+ HasCRC = true;
+ HasLSE = true;
+ HasRDM = true;
+ }
+ } else if (ArchInfo->Version.getMajor() == 9) {
+ if (ArchInfo->Version.getMinor() >= 2u) {
+ HasWFxT = true;
+ }
+ if (ArchInfo->Version.getMinor() >= 1u) {
+ HasBFloat16 = true;
+ HasMatMul = true;
+ }
+ FPU |= SveMode;
+ HasSVE2 = true;
+ HasFullFP16 = true;
+ HasAlternativeNZCV = true;
+ HasFRInt3264 = true;
+ HasSSBS = true;
+ HasSB = true;
+ HasPredRes = true;
+ HasBTI = true;
+ HasDotProd = true;
+ HasDIT = true;
+ HasFlagM = true;
+ HasRCPC = true;
+ FPU |= NeonMode;
+ HasCCPP = true;
+ HasCRC = true;
+ HasLSE = true;
+ HasRDM = true;
+ }
+}
+
AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple &Triple,
const TargetOptions &Opts)
: TargetInfo(Triple), ABI("aapcs") {
@@ -56,7 +143,9 @@ AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple &Triple,
// All AArch64 implementations support ARMv8 FP, which makes half a legal type.
HasLegalHalfType = true;
+ HalfArgsAndReturns = true;
HasFloat16 = true;
+ HasStrictFP = true;
if (Triple.isArch64Bit())
LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
@@ -117,11 +206,11 @@ bool AArch64TargetInfo::setABI(const std::string &Name) {
return true;
}
-bool AArch64TargetInfo::validateBranchProtection(StringRef Spec,
+bool AArch64TargetInfo::validateBranchProtection(StringRef Spec, StringRef,
BranchProtectionInfo &BPI,
StringRef &Err) const {
- llvm::AArch64::ParsedBranchProtection PBP;
- if (!llvm::AArch64::parseBranchProtection(Spec, PBP, Err))
+ llvm::ARM::ParsedBranchProtection PBP;
+ if (!llvm::ARM::parseBranchProtection(Spec, PBP, Err))
return false;
BPI.SignReturnAddr =
@@ -136,12 +225,13 @@ bool AArch64TargetInfo::validateBranchProtection(StringRef Spec,
BPI.SignKey = LangOptions::SignReturnAddressKeyKind::BKey;
BPI.BranchTargetEnforcement = PBP.BranchTargetEnforcement;
+ BPI.BranchProtectionPAuthLR = PBP.BranchProtectionPAuthLR;
+ BPI.GuardedControlStack = PBP.GuardedControlStack;
return true;
}
bool AArch64TargetInfo::isValidCPUName(StringRef Name) const {
- return Name == "generic" ||
- llvm::AArch64::parseCPUArch(Name) != llvm::AArch64::ArchKind::INVALID;
+ return Name == "generic" || llvm::AArch64::parseCpu(Name);
}
bool AArch64TargetInfo::setCPU(const std::string &Name) {
@@ -156,8 +246,6 @@ void AArch64TargetInfo::fillValidCPUList(
void AArch64TargetInfo::getTargetDefinesARMV81A(const LangOptions &Opts,
MacroBuilder &Builder) const {
Builder.defineMacro("__ARM_FEATURE_QRDMX", "1");
- Builder.defineMacro("__ARM_FEATURE_ATOMICS", "1");
- Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
}
void AArch64TargetInfo::getTargetDefinesARMV82A(const LangOptions &Opts,
@@ -183,6 +271,7 @@ void AArch64TargetInfo::getTargetDefinesARMV84A(const LangOptions &Opts,
void AArch64TargetInfo::getTargetDefinesARMV85A(const LangOptions &Opts,
MacroBuilder &Builder) const {
Builder.defineMacro("__ARM_FEATURE_FRINT", "1");
+ Builder.defineMacro("__ARM_FEATURE_BTI", "1");
// Also include the Armv8.4 defines
getTargetDefinesARMV84A(Opts, Builder);
}
@@ -203,21 +292,72 @@ void AArch64TargetInfo::getTargetDefinesARMV87A(const LangOptions &Opts,
getTargetDefinesARMV86A(Opts, Builder);
}
+void AArch64TargetInfo::getTargetDefinesARMV88A(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ // Also include the Armv8.7 defines
+ getTargetDefinesARMV87A(Opts, Builder);
+}
+
+void AArch64TargetInfo::getTargetDefinesARMV89A(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ // Also include the Armv8.8 defines
+ getTargetDefinesARMV88A(Opts, Builder);
+}
+
+void AArch64TargetInfo::getTargetDefinesARMV9A(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ // Armv9-A maps to Armv8.5-A
+ getTargetDefinesARMV85A(Opts, Builder);
+}
+
+void AArch64TargetInfo::getTargetDefinesARMV91A(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ // Armv9.1-A maps to Armv8.6-A
+ getTargetDefinesARMV86A(Opts, Builder);
+}
+
+void AArch64TargetInfo::getTargetDefinesARMV92A(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ // Armv9.2-A maps to Armv8.7-A
+ getTargetDefinesARMV87A(Opts, Builder);
+}
+
+void AArch64TargetInfo::getTargetDefinesARMV93A(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ // Armv9.3-A maps to Armv8.8-A
+ getTargetDefinesARMV88A(Opts, Builder);
+}
+
+void AArch64TargetInfo::getTargetDefinesARMV94A(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ // Armv9.4-A maps to Armv8.9-A
+ getTargetDefinesARMV89A(Opts, Builder);
+}
+
+void AArch64TargetInfo::getTargetDefinesARMV95A(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ // Armv9.5-A does not have a v8.* equivalent, but is a superset of v9.4-A.
+ getTargetDefinesARMV94A(Opts, Builder);
+}
+
void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
// Target identification.
- Builder.defineMacro("__aarch64__");
- // For bare-metal.
- if (getTriple().getOS() == llvm::Triple::UnknownOS &&
- getTriple().isOSBinFormatELF())
- Builder.defineMacro("__ELF__");
-
- // Target properties.
- if (!getTriple().isOSWindows() && getTriple().isArch64Bit()) {
- Builder.defineMacro("_LP64");
- Builder.defineMacro("__LP64__");
+ if (getTriple().isWindowsArm64EC()) {
+ // Define the same set of macros as would be defined on x86_64 to ensure that
+ // ARM64EC datatype layouts match those of x86_64 compiled code
+ Builder.defineMacro("__amd64__");
+ Builder.defineMacro("__amd64");
+ Builder.defineMacro("__x86_64");
+ Builder.defineMacro("__x86_64__");
+ Builder.defineMacro("__arm64ec__");
+ } else {
+ Builder.defineMacro("__aarch64__");
}
+ // Inline assembly supports AArch64 flag outputs.
+ Builder.defineMacro("__GCC_ASM_FLAG_OUTPUTS__");
+
std::string CodeModel = getTargetOpts().CodeModel;
if (CodeModel == "default")
CodeModel = "small";
@@ -227,8 +367,10 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
// ACLE predefines. Many can only have one possible value on v8 AArch64.
Builder.defineMacro("__ARM_ACLE", "200");
- Builder.defineMacro("__ARM_ARCH", "8");
- Builder.defineMacro("__ARM_ARCH_PROFILE", "'A'");
+ Builder.defineMacro("__ARM_ARCH",
+ std::to_string(ArchInfo->Version.getMajor()));
+ Builder.defineMacro("__ARM_ARCH_PROFILE",
+ std::string("'") + (char)ArchInfo->Profile + "'");
Builder.defineMacro("__ARM_64BIT_STATE", "1");
Builder.defineMacro("__ARM_PCS_AAPCS64", "1");
@@ -244,8 +386,14 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__ARM_ALIGN_MAX_STACK_PWR", "4");
+ // These macros are set when Clang can parse declarations with these
+ // attributes.
+ Builder.defineMacro("__ARM_STATE_ZA", "1");
+ Builder.defineMacro("__ARM_STATE_ZT0", "1");
+
// 0xe implies support for half, single and double precision operations.
- Builder.defineMacro("__ARM_FP", "0xE");
+ if (FPU & FPUMode)
+ Builder.defineMacro("__ARM_FP", "0xE");
// PCS specifies this for SysV variants, which is all we support. Other ABIs
// may choose __ARM_FP16_FORMAT_ALTERNATIVE.
@@ -269,6 +417,9 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
if (FPU & SveMode)
Builder.defineMacro("__ARM_FEATURE_SVE", "1");
+ if ((FPU & NeonMode) && (FPU & SveMode))
+ Builder.defineMacro("__ARM_NEON_SVE_BRIDGE", "1");
+
if (HasSVE2)
Builder.defineMacro("__ARM_FEATURE_SVE2", "1");
@@ -284,9 +435,28 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
if (HasSVE2 && HasSVE2SM4)
Builder.defineMacro("__ARM_FEATURE_SVE2_SM4", "1");
+ if (HasSME) {
+ Builder.defineMacro("__ARM_FEATURE_SME");
+ Builder.defineMacro("__ARM_FEATURE_LOCALLY_STREAMING", "1");
+ }
+
+ if (HasSME2) {
+ Builder.defineMacro("__ARM_FEATURE_SME");
+ Builder.defineMacro("__ARM_FEATURE_SME2");
+ Builder.defineMacro("__ARM_FEATURE_LOCALLY_STREAMING", "1");
+ }
+
if (HasCRC)
Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
+ if (HasRCPC3)
+ Builder.defineMacro("__ARM_FEATURE_RCPC", "3");
+ else if (HasRCPC)
+ Builder.defineMacro("__ARM_FEATURE_RCPC", "1");
+
+ if (HasFMV)
+ Builder.defineMacro("__HAVE_FUNCTION_MULTI_VERSIONING", "1");
+
// The __ARM_FEATURE_CRYPTO is deprecated in favor of finer grained feature
// macros for AES, SHA2, SHA3 and SM4
if (HasAES && HasSHA2)
@@ -308,6 +478,9 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__ARM_FEATURE_SM4", "1");
}
+ if (HasPAuth)
+ Builder.defineMacro("__ARM_FEATURE_PAUTH", "1");
+
if (HasUnaligned)
Builder.defineMacro("__ARM_FEATURE_UNALIGNED", "1");
@@ -375,178 +548,416 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
if (Opts.BranchTargetEnforcement)
Builder.defineMacro("__ARM_FEATURE_BTI_DEFAULT", "1");
+ if (Opts.GuardedControlStack)
+ Builder.defineMacro("__ARM_FEATURE_GCS_DEFAULT", "1");
+
if (HasLS64)
Builder.defineMacro("__ARM_FEATURE_LS64", "1");
if (HasRandGen)
Builder.defineMacro("__ARM_FEATURE_RNG", "1");
- switch (ArchKind) {
- default:
- break;
- case llvm::AArch64::ArchKind::ARMV8_1A:
+ if (HasMOPS)
+ Builder.defineMacro("__ARM_FEATURE_MOPS", "1");
+
+ if (HasD128)
+ Builder.defineMacro("__ARM_FEATURE_SYSREG128", "1");
+
+ if (HasGCS)
+ Builder.defineMacro("__ARM_FEATURE_GCS", "1");
+
+ if (*ArchInfo == llvm::AArch64::ARMV8_1A)
getTargetDefinesARMV81A(Opts, Builder);
- break;
- case llvm::AArch64::ArchKind::ARMV8_2A:
+ else if (*ArchInfo == llvm::AArch64::ARMV8_2A)
getTargetDefinesARMV82A(Opts, Builder);
- break;
- case llvm::AArch64::ArchKind::ARMV8_3A:
+ else if (*ArchInfo == llvm::AArch64::ARMV8_3A)
getTargetDefinesARMV83A(Opts, Builder);
- break;
- case llvm::AArch64::ArchKind::ARMV8_4A:
+ else if (*ArchInfo == llvm::AArch64::ARMV8_4A)
getTargetDefinesARMV84A(Opts, Builder);
- break;
- case llvm::AArch64::ArchKind::ARMV8_5A:
+ else if (*ArchInfo == llvm::AArch64::ARMV8_5A)
getTargetDefinesARMV85A(Opts, Builder);
- break;
- case llvm::AArch64::ArchKind::ARMV8_6A:
+ else if (*ArchInfo == llvm::AArch64::ARMV8_6A)
getTargetDefinesARMV86A(Opts, Builder);
- break;
- case llvm::AArch64::ArchKind::ARMV8_7A:
+ else if (*ArchInfo == llvm::AArch64::ARMV8_7A)
getTargetDefinesARMV87A(Opts, Builder);
- break;
- }
-
- // All of the __sync_(bool|val)_compare_and_swap_(1|2|4|8) builtins work.
+ else if (*ArchInfo == llvm::AArch64::ARMV8_8A)
+ getTargetDefinesARMV88A(Opts, Builder);
+ else if (*ArchInfo == llvm::AArch64::ARMV8_9A)
+ getTargetDefinesARMV89A(Opts, Builder);
+ else if (*ArchInfo == llvm::AArch64::ARMV9A)
+ getTargetDefinesARMV9A(Opts, Builder);
+ else if (*ArchInfo == llvm::AArch64::ARMV9_1A)
+ getTargetDefinesARMV91A(Opts, Builder);
+ else if (*ArchInfo == llvm::AArch64::ARMV9_2A)
+ getTargetDefinesARMV92A(Opts, Builder);
+ else if (*ArchInfo == llvm::AArch64::ARMV9_3A)
+ getTargetDefinesARMV93A(Opts, Builder);
+ else if (*ArchInfo == llvm::AArch64::ARMV9_4A)
+ getTargetDefinesARMV94A(Opts, Builder);
+ else if (*ArchInfo == llvm::AArch64::ARMV9_5A)
+ getTargetDefinesARMV95A(Opts, Builder);
+
+ // All of the __sync_(bool|val)_compare_and_swap_(1|2|4|8|16) builtins work.
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16");
+
+ // Allow detection of fast FMA support.
+ Builder.defineMacro("__FP_FAST_FMA", "1");
+ Builder.defineMacro("__FP_FAST_FMAF", "1");
+
+ // C/C++ operators work on both VLS and VLA SVE types
+ if (FPU & SveMode)
+ Builder.defineMacro("__ARM_FEATURE_SVE_VECTOR_OPERATORS", "2");
- if (Opts.ArmSveVectorBits) {
- Builder.defineMacro("__ARM_FEATURE_SVE_BITS", Twine(Opts.ArmSveVectorBits));
- Builder.defineMacro("__ARM_FEATURE_SVE_VECTOR_OPERATORS");
+ if (Opts.VScaleMin && Opts.VScaleMin == Opts.VScaleMax) {
+ Builder.defineMacro("__ARM_FEATURE_SVE_BITS", Twine(Opts.VScaleMin * 128));
}
}
ArrayRef<Builtin::Info> AArch64TargetInfo::getTargetBuiltins() const {
- return llvm::makeArrayRef(BuiltinInfo, clang::AArch64::LastTSBuiltin -
- Builtin::FirstTSBuiltin);
+ return llvm::ArrayRef(BuiltinInfo, clang::AArch64::LastTSBuiltin -
+ Builtin::FirstTSBuiltin);
+}
+
+std::optional<std::pair<unsigned, unsigned>>
+AArch64TargetInfo::getVScaleRange(const LangOptions &LangOpts) const {
+ if (LangOpts.VScaleMin || LangOpts.VScaleMax)
+ return std::pair<unsigned, unsigned>(
+ LangOpts.VScaleMin ? LangOpts.VScaleMin : 1, LangOpts.VScaleMax);
+
+ if (hasFeature("sve"))
+ return std::pair<unsigned, unsigned>(1, 16);
+
+ return std::nullopt;
+}
+
+unsigned AArch64TargetInfo::multiVersionSortPriority(StringRef Name) const {
+ if (Name == "default")
+ return 0;
+ if (auto Ext = llvm::AArch64::parseArchExtension(Name))
+ return Ext->FmvPriority;
+ return 0;
+}
+
+unsigned AArch64TargetInfo::multiVersionFeatureCost() const {
+ // Take the maximum priority as per feature cost, so more features win.
+ return llvm::AArch64::ExtensionInfo::MaxFMVPriority;
+}
+
+bool AArch64TargetInfo::doesFeatureAffectCodeGen(StringRef Name) const {
+ if (auto Ext = llvm::AArch64::parseArchExtension(Name))
+ return !Ext->DependentFeatures.empty();
+ return false;
+}
+
+StringRef AArch64TargetInfo::getFeatureDependencies(StringRef Name) const {
+ if (auto Ext = llvm::AArch64::parseArchExtension(Name))
+ return Ext->DependentFeatures;
+ return StringRef();
+}
+
+bool AArch64TargetInfo::validateCpuSupports(StringRef FeatureStr) const {
+ return llvm::AArch64::parseArchExtension(FeatureStr).has_value();
}
bool AArch64TargetInfo::hasFeature(StringRef Feature) const {
- return Feature == "aarch64" || Feature == "arm64" || Feature == "arm" ||
- (Feature == "neon" && (FPU & NeonMode)) ||
- ((Feature == "sve" || Feature == "sve2" || Feature == "sve2-bitperm" ||
- Feature == "sve2-aes" || Feature == "sve2-sha3" ||
- Feature == "sve2-sm4" || Feature == "f64mm" || Feature == "f32mm" ||
- Feature == "i8mm" || Feature == "bf16") &&
- (FPU & SveMode)) ||
- (Feature == "ls64" && HasLS64);
+ return llvm::StringSwitch<bool>(Feature)
+ .Cases("aarch64", "arm64", "arm", true)
+ .Case("fmv", HasFMV)
+ .Cases("neon", "fp", "simd", FPU & NeonMode)
+ .Case("jscvt", HasJSCVT)
+ .Case("fcma", HasFCMA)
+ .Case("rng", HasRandGen)
+ .Case("flagm", HasFlagM)
+ .Case("flagm2", HasAlternativeNZCV)
+ .Case("fp16fml", HasFP16FML)
+ .Case("dotprod", HasDotProd)
+ .Case("sm4", HasSM4)
+ .Case("rdm", HasRDM)
+ .Case("lse", HasLSE)
+ .Case("crc", HasCRC)
+ .Case("sha2", HasSHA2)
+ .Case("sha3", HasSHA3)
+ .Cases("aes", "pmull", HasAES)
+ .Cases("fp16", "fullfp16", HasFullFP16)
+ .Case("dit", HasDIT)
+ .Case("dpb", HasCCPP)
+ .Case("dpb2", HasCCDP)
+ .Case("rcpc", HasRCPC)
+ .Case("frintts", HasFRInt3264)
+ .Case("i8mm", HasMatMul)
+ .Case("bf16", HasBFloat16)
+ .Case("sve", FPU & SveMode)
+ .Case("sve-bf16", FPU & SveMode && HasBFloat16)
+ .Case("sve-i8mm", FPU & SveMode && HasMatMul)
+ .Case("f32mm", FPU & SveMode && HasMatmulFP32)
+ .Case("f64mm", FPU & SveMode && HasMatmulFP64)
+ .Case("sve2", FPU & SveMode && HasSVE2)
+ .Case("sve2-pmull128", FPU & SveMode && HasSVE2AES)
+ .Case("sve2-bitperm", FPU & SveMode && HasSVE2BitPerm)
+ .Case("sve2-sha3", FPU & SveMode && HasSVE2SHA3)
+ .Case("sve2-sm4", FPU & SveMode && HasSVE2SM4)
+ .Case("sme", HasSME)
+ .Case("sme2", HasSME2)
+ .Case("sme-f64f64", HasSMEF64F64)
+ .Case("sme-i16i64", HasSMEI16I64)
+ .Case("sme-fa64", HasSMEFA64)
+ .Cases("memtag", "memtag2", HasMTE)
+ .Case("sb", HasSB)
+ .Case("predres", HasPredRes)
+ .Cases("ssbs", "ssbs2", HasSSBS)
+ .Case("bti", HasBTI)
+ .Cases("ls64", "ls64_v", "ls64_accdata", HasLS64)
+ .Case("wfxt", HasWFxT)
+ .Case("rcpc3", HasRCPC3)
+ .Default(false);
+}
+
+void AArch64TargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
+ StringRef Name, bool Enabled) const {
+ Features[Name] = Enabled;
+ // If the feature is an architecture feature (like v8.2a), add all previous
+ // architecture versions and any dependant target features.
+ const std::optional<llvm::AArch64::ArchInfo> ArchInfo =
+ llvm::AArch64::ArchInfo::findBySubArch(Name);
+
+ if (!ArchInfo)
+ return; // Not an architecture, nothing more to do.
+
+ // Disabling an architecture feature does not affect dependent features
+ if (!Enabled)
+ return;
+
+ for (const auto *OtherArch : llvm::AArch64::ArchInfos)
+ if (ArchInfo->implies(*OtherArch))
+ Features[OtherArch->getSubArch()] = true;
+
+ // Set any features implied by the architecture
+ std::vector<StringRef> CPUFeats;
+ if (llvm::AArch64::getExtensionFeatures(ArchInfo->DefaultExts, CPUFeats)) {
+ for (auto F : CPUFeats) {
+ assert(F[0] == '+' && "Expected + in target feature!");
+ Features[F.drop_front(1)] = true;
+ }
+ }
}
bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
DiagnosticsEngine &Diags) {
- FPU = FPUMode;
- HasCRC = false;
- HasCrypto = false;
- HasAES = false;
- HasSHA2 = false;
- HasSHA3 = false;
- HasSM4 = false;
- HasUnaligned = true;
- HasFullFP16 = false;
- HasDotProd = false;
- HasFP16FML = false;
- HasMTE = false;
- HasTME = false;
- HasLS64 = false;
- HasRandGen = false;
- HasMatMul = false;
- HasBFloat16 = false;
- HasSVE2 = false;
- HasSVE2AES = false;
- HasSVE2SHA3 = false;
- HasSVE2SM4 = false;
- HasSVE2BitPerm = false;
- HasMatmulFP64 = false;
- HasMatmulFP32 = false;
- HasLSE = false;
-
- ArchKind = llvm::AArch64::ArchKind::ARMV8A;
-
for (const auto &Feature : Features) {
- if (Feature == "+neon")
+ if (Feature == "-fp-armv8")
+ HasNoFP = true;
+ if (Feature == "-neon")
+ HasNoNeon = true;
+ if (Feature == "-sve")
+ HasNoSVE = true;
+
+ if (Feature == "+neon" || Feature == "+fp-armv8")
+ FPU |= NeonMode;
+ if (Feature == "+jscvt") {
+ HasJSCVT = true;
+ FPU |= NeonMode;
+ }
+ if (Feature == "+fcma") {
+ HasFCMA = true;
FPU |= NeonMode;
+ }
+
if (Feature == "+sve") {
+ FPU |= NeonMode;
FPU |= SveMode;
- HasFullFP16 = 1;
+ HasFullFP16 = true;
}
if (Feature == "+sve2") {
+ FPU |= NeonMode;
FPU |= SveMode;
- HasFullFP16 = 1;
- HasSVE2 = 1;
+ HasFullFP16 = true;
+ HasSVE2 = true;
}
if (Feature == "+sve2-aes") {
+ FPU |= NeonMode;
FPU |= SveMode;
- HasFullFP16 = 1;
- HasSVE2 = 1;
- HasSVE2AES = 1;
+ HasFullFP16 = true;
+ HasSVE2 = true;
+ HasSVE2AES = true;
}
if (Feature == "+sve2-sha3") {
+ FPU |= NeonMode;
FPU |= SveMode;
- HasFullFP16 = 1;
- HasSVE2 = 1;
- HasSVE2SHA3 = 1;
+ HasFullFP16 = true;
+ HasSVE2 = true;
+ HasSVE2SHA3 = true;
}
if (Feature == "+sve2-sm4") {
+ FPU |= NeonMode;
FPU |= SveMode;
- HasFullFP16 = 1;
- HasSVE2 = 1;
- HasSVE2SM4 = 1;
+ HasFullFP16 = true;
+ HasSVE2 = true;
+ HasSVE2SM4 = true;
}
if (Feature == "+sve2-bitperm") {
+ FPU |= NeonMode;
FPU |= SveMode;
- HasFullFP16 = 1;
- HasSVE2 = 1;
- HasSVE2BitPerm = 1;
+ HasFullFP16 = true;
+ HasSVE2 = true;
+ HasSVE2BitPerm = true;
}
if (Feature == "+f32mm") {
+ FPU |= NeonMode;
FPU |= SveMode;
+ HasFullFP16 = true;
HasMatmulFP32 = true;
}
if (Feature == "+f64mm") {
+ FPU |= NeonMode;
FPU |= SveMode;
+ HasFullFP16 = true;
HasMatmulFP64 = true;
}
+ if (Feature == "+sme") {
+ HasSME = true;
+ HasBFloat16 = true;
+ HasFullFP16 = true;
+ }
+ if (Feature == "+sme2") {
+ HasSME = true;
+ HasSME2 = true;
+ HasBFloat16 = true;
+ HasFullFP16 = true;
+ }
+ if (Feature == "+sme-f64f64") {
+ HasSME = true;
+ HasSMEF64F64 = true;
+ HasBFloat16 = true;
+ HasFullFP16 = true;
+ }
+ if (Feature == "+sme-i16i64") {
+ HasSME = true;
+ HasSMEI16I64 = true;
+ HasBFloat16 = true;
+ HasFullFP16 = true;
+ }
+ if (Feature == "+sme-fa64") {
+ FPU |= NeonMode;
+ FPU |= SveMode;
+ HasSME = true;
+ HasSVE2 = true;
+ HasSMEFA64 = true;
+ }
+ if (Feature == "+sb")
+ HasSB = true;
+ if (Feature == "+predres")
+ HasPredRes = true;
+ if (Feature == "+ssbs")
+ HasSSBS = true;
+ if (Feature == "+bti")
+ HasBTI = true;
+ if (Feature == "+wfxt")
+ HasWFxT = true;
+ if (Feature == "-fmv")
+ HasFMV = false;
if (Feature == "+crc")
HasCRC = true;
- if (Feature == "+crypto")
- HasCrypto = true;
- if (Feature == "+aes")
+ if (Feature == "+rcpc")
+ HasRCPC = true;
+ if (Feature == "+aes") {
+ FPU |= NeonMode;
HasAES = true;
- if (Feature == "+sha2")
+ }
+ if (Feature == "+sha2") {
+ FPU |= NeonMode;
HasSHA2 = true;
+ }
if (Feature == "+sha3") {
+ FPU |= NeonMode;
HasSHA2 = true;
HasSHA3 = true;
}
- if (Feature == "+sm4")
+ if (Feature == "+rdm") {
+ FPU |= NeonMode;
+ HasRDM = true;
+ }
+ if (Feature == "+dit")
+ HasDIT = true;
+ if (Feature == "+cccp")
+ HasCCPP = true;
+ if (Feature == "+ccdp") {
+ HasCCPP = true;
+ HasCCDP = true;
+ }
+ if (Feature == "+fptoint")
+ HasFRInt3264 = true;
+ if (Feature == "+sm4") {
+ FPU |= NeonMode;
HasSM4 = true;
+ }
if (Feature == "+strict-align")
HasUnaligned = false;
- if (Feature == "+v8.1a")
- ArchKind = llvm::AArch64::ArchKind::ARMV8_1A;
- if (Feature == "+v8.2a")
- ArchKind = llvm::AArch64::ArchKind::ARMV8_2A;
- if (Feature == "+v8.3a")
- ArchKind = llvm::AArch64::ArchKind::ARMV8_3A;
- if (Feature == "+v8.4a")
- ArchKind = llvm::AArch64::ArchKind::ARMV8_4A;
- if (Feature == "+v8.5a")
- ArchKind = llvm::AArch64::ArchKind::ARMV8_5A;
- if (Feature == "+v8.6a")
- ArchKind = llvm::AArch64::ArchKind::ARMV8_6A;
- if (Feature == "+v8.7a")
- ArchKind = llvm::AArch64::ArchKind::ARMV8_7A;
+ // All predecessor archs are added but select the latest one for ArchKind.
+ if (Feature == "+v8a" && ArchInfo->Version < llvm::AArch64::ARMV8A.Version)
+ ArchInfo = &llvm::AArch64::ARMV8A;
+ if (Feature == "+v8.1a" &&
+ ArchInfo->Version < llvm::AArch64::ARMV8_1A.Version)
+ ArchInfo = &llvm::AArch64::ARMV8_1A;
+ if (Feature == "+v8.2a" &&
+ ArchInfo->Version < llvm::AArch64::ARMV8_2A.Version)
+ ArchInfo = &llvm::AArch64::ARMV8_2A;
+ if (Feature == "+v8.3a" &&
+ ArchInfo->Version < llvm::AArch64::ARMV8_3A.Version)
+ ArchInfo = &llvm::AArch64::ARMV8_3A;
+ if (Feature == "+v8.4a" &&
+ ArchInfo->Version < llvm::AArch64::ARMV8_4A.Version)
+ ArchInfo = &llvm::AArch64::ARMV8_4A;
+ if (Feature == "+v8.5a" &&
+ ArchInfo->Version < llvm::AArch64::ARMV8_5A.Version)
+ ArchInfo = &llvm::AArch64::ARMV8_5A;
+ if (Feature == "+v8.6a" &&
+ ArchInfo->Version < llvm::AArch64::ARMV8_6A.Version)
+ ArchInfo = &llvm::AArch64::ARMV8_6A;
+ if (Feature == "+v8.7a" &&
+ ArchInfo->Version < llvm::AArch64::ARMV8_7A.Version)
+ ArchInfo = &llvm::AArch64::ARMV8_7A;
+ if (Feature == "+v8.8a" &&
+ ArchInfo->Version < llvm::AArch64::ARMV8_8A.Version)
+ ArchInfo = &llvm::AArch64::ARMV8_8A;
+ if (Feature == "+v8.9a" &&
+ ArchInfo->Version < llvm::AArch64::ARMV8_9A.Version)
+ ArchInfo = &llvm::AArch64::ARMV8_9A;
+ if (Feature == "+v9a" && ArchInfo->Version < llvm::AArch64::ARMV9A.Version)
+ ArchInfo = &llvm::AArch64::ARMV9A;
+ if (Feature == "+v9.1a" &&
+ ArchInfo->Version < llvm::AArch64::ARMV9_1A.Version)
+ ArchInfo = &llvm::AArch64::ARMV9_1A;
+ if (Feature == "+v9.2a" &&
+ ArchInfo->Version < llvm::AArch64::ARMV9_2A.Version)
+ ArchInfo = &llvm::AArch64::ARMV9_2A;
+ if (Feature == "+v9.3a" &&
+ ArchInfo->Version < llvm::AArch64::ARMV9_3A.Version)
+ ArchInfo = &llvm::AArch64::ARMV9_3A;
+ if (Feature == "+v9.4a" &&
+ ArchInfo->Version < llvm::AArch64::ARMV9_4A.Version)
+ ArchInfo = &llvm::AArch64::ARMV9_4A;
+ if (Feature == "+v9.5a" &&
+ ArchInfo->Version < llvm::AArch64::ARMV9_5A.Version)
+ ArchInfo = &llvm::AArch64::ARMV9_5A;
if (Feature == "+v8r")
- ArchKind = llvm::AArch64::ArchKind::ARMV8R;
- if (Feature == "+fullfp16")
+ ArchInfo = &llvm::AArch64::ARMV8R;
+ if (Feature == "+fullfp16") {
+ FPU |= NeonMode;
HasFullFP16 = true;
- if (Feature == "+dotprod")
+ }
+ if (Feature == "+dotprod") {
+ FPU |= NeonMode;
HasDotProd = true;
- if (Feature == "+fp16fml")
+ }
+ if (Feature == "+fp16fml") {
+ FPU |= NeonMode;
+ HasFullFP16 = true;
HasFP16FML = true;
+ }
if (Feature == "+mte")
HasMTE = true;
if (Feature == "+tme")
@@ -565,10 +976,188 @@ bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasRandGen = true;
if (Feature == "+flagm")
HasFlagM = true;
+ if (Feature == "+altnzcv") {
+ HasFlagM = true;
+ HasAlternativeNZCV = true;
+ }
+ if (Feature == "+mops")
+ HasMOPS = true;
+ if (Feature == "+d128")
+ HasD128 = true;
+ if (Feature == "+gcs")
+ HasGCS = true;
+ if (Feature == "+rcpc3")
+ HasRCPC3 = true;
+ }
+
+ // Check features that are manually disabled by command line options.
+ // This needs to be checked after architecture-related features are handled,
+ // making sure they are properly disabled when required.
+ for (const auto &Feature : Features) {
+ if (Feature == "-d128")
+ HasD128 = false;
}
setDataLayout();
+ setArchFeatures();
+
+ if (HasNoFP) {
+ FPU &= ~FPUMode;
+ FPU &= ~NeonMode;
+ FPU &= ~SveMode;
+ }
+ if (HasNoNeon) {
+ FPU &= ~NeonMode;
+ FPU &= ~SveMode;
+ }
+ if (HasNoSVE)
+ FPU &= ~SveMode;
+
+ return true;
+}
+
+bool AArch64TargetInfo::initFeatureMap(
+ llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags, StringRef CPU,
+ const std::vector<std::string> &FeaturesVec) const {
+ std::vector<std::string> UpdatedFeaturesVec;
+ // Parse the CPU and add any implied features.
+ std::optional<llvm::AArch64::CpuInfo> CpuInfo = llvm::AArch64::parseCpu(CPU);
+ if (CpuInfo) {
+ auto Exts = CpuInfo->getImpliedExtensions();
+ std::vector<StringRef> CPUFeats;
+ llvm::AArch64::getExtensionFeatures(Exts, CPUFeats);
+ for (auto F : CPUFeats) {
+ assert((F[0] == '+' || F[0] == '-') && "Expected +/- in target feature!");
+ UpdatedFeaturesVec.push_back(F.str());
+ }
+ }
+
+ // Process target and dependent features. This is done in two loops collecting
+ // them into UpdatedFeaturesVec: first to add dependent '+'features, second to
+ // add target '+/-'features that can later disable some of features added on
+ // the first loop. Function Multi Versioning features begin with '?'.
+ for (const auto &Feature : FeaturesVec)
+ if (((Feature[0] == '?' || Feature[0] == '+')) &&
+ AArch64TargetInfo::doesFeatureAffectCodeGen(Feature.substr(1))) {
+ StringRef DepFeatures =
+ AArch64TargetInfo::getFeatureDependencies(Feature.substr(1));
+ SmallVector<StringRef, 1> AttrFeatures;
+ DepFeatures.split(AttrFeatures, ",");
+ for (auto F : AttrFeatures)
+ UpdatedFeaturesVec.push_back(F.str());
+ }
+ for (const auto &Feature : FeaturesVec)
+ if (Feature[0] != '?') {
+ std::string UpdatedFeature = Feature;
+ if (Feature[0] == '+') {
+ std::optional<llvm::AArch64::ExtensionInfo> Extension =
+ llvm::AArch64::parseArchExtension(Feature.substr(1));
+ if (Extension)
+ UpdatedFeature = Extension->Feature.str();
+ }
+ UpdatedFeaturesVec.push_back(UpdatedFeature);
+ }
+
+ return TargetInfo::initFeatureMap(Features, Diags, CPU, UpdatedFeaturesVec);
+}
+
+// Parse AArch64 Target attributes, which are a comma separated list of:
+// "arch=<arch>" - parsed to features as per -march=..
+// "cpu=<cpu>" - parsed to features as per -mcpu=.., with CPU set to <cpu>
+// "tune=<cpu>" - TuneCPU set to <cpu>
+// "feature", "no-feature" - Add (or remove) feature.
+// "+feature", "+nofeature" - Add (or remove) feature.
+ParsedTargetAttr AArch64TargetInfo::parseTargetAttr(StringRef Features) const {
+ ParsedTargetAttr Ret;
+ if (Features == "default")
+ return Ret;
+ SmallVector<StringRef, 1> AttrFeatures;
+ Features.split(AttrFeatures, ",");
+ bool FoundArch = false;
+
+ auto SplitAndAddFeatures = [](StringRef FeatString,
+ std::vector<std::string> &Features) {
+ SmallVector<StringRef, 8> SplitFeatures;
+ FeatString.split(SplitFeatures, StringRef("+"), -1, false);
+ for (StringRef Feature : SplitFeatures) {
+ StringRef FeatureName = llvm::AArch64::getArchExtFeature(Feature);
+ if (!FeatureName.empty())
+ Features.push_back(FeatureName.str());
+ else
+ // Pushing the original feature string to give a sema error later on
+ // when they get checked.
+ if (Feature.starts_with("no"))
+ Features.push_back("-" + Feature.drop_front(2).str());
+ else
+ Features.push_back("+" + Feature.str());
+ }
+ };
+
+ for (auto &Feature : AttrFeatures) {
+ Feature = Feature.trim();
+ if (Feature.starts_with("fpmath="))
+ continue;
+
+ if (Feature.starts_with("branch-protection=")) {
+ Ret.BranchProtection = Feature.split('=').second.trim();
+ continue;
+ }
+
+ if (Feature.starts_with("arch=")) {
+ if (FoundArch)
+ Ret.Duplicate = "arch=";
+ FoundArch = true;
+ std::pair<StringRef, StringRef> Split =
+ Feature.split("=").second.trim().split("+");
+ const llvm::AArch64::ArchInfo *AI = llvm::AArch64::parseArch(Split.first);
+
+ // Parse the architecture version, adding the required features to
+ // Ret.Features.
+ if (!AI)
+ continue;
+ Ret.Features.push_back(AI->ArchFeature.str());
+ // Add any extra features, after the +
+ SplitAndAddFeatures(Split.second, Ret.Features);
+ } else if (Feature.starts_with("cpu=")) {
+ if (!Ret.CPU.empty())
+ Ret.Duplicate = "cpu=";
+ else {
+ // Split the cpu string into "cpu=", "cortex-a710" and any remaining
+ // "+feat" features.
+ std::pair<StringRef, StringRef> Split =
+ Feature.split("=").second.trim().split("+");
+ Ret.CPU = Split.first;
+ SplitAndAddFeatures(Split.second, Ret.Features);
+ }
+ } else if (Feature.starts_with("tune=")) {
+ if (!Ret.Tune.empty())
+ Ret.Duplicate = "tune=";
+ else
+ Ret.Tune = Feature.split("=").second.trim();
+ } else if (Feature.starts_with("+")) {
+ SplitAndAddFeatures(Feature, Ret.Features);
+ } else if (Feature.starts_with("no-")) {
+ StringRef FeatureName =
+ llvm::AArch64::getArchExtFeature(Feature.split("-").second);
+ if (!FeatureName.empty())
+ Ret.Features.push_back("-" + FeatureName.drop_front(1).str());
+ else
+ Ret.Features.push_back("-" + Feature.split("-").second.str());
+ } else {
+ // Try parsing the string to the internal target feature name. If it is
+ // invalid, add the original string (which could already be an internal
+ // name). These should be checked later by isValidFeatureName.
+ StringRef FeatureName = llvm::AArch64::getArchExtFeature(Feature);
+ if (!FeatureName.empty())
+ Ret.Features.push_back(FeatureName.str());
+ else
+ Ret.Features.push_back("+" + Feature.str());
+ }
+ }
+ return Ret;
+}
+bool AArch64TargetInfo::hasBFloat16Type() const {
return true;
}
@@ -582,6 +1171,7 @@ AArch64TargetInfo::checkCallingConvention(CallingConv CC) const {
case CC_PreserveAll:
case CC_OpenCLKernel:
case CC_AArch64VectorCall:
+ case CC_AArch64SVEPCS:
case CC_Win64:
return CCCR_OK;
default:
@@ -596,6 +1186,8 @@ TargetInfo::BuiltinVaListKind AArch64TargetInfo::getBuiltinVaListKind() const {
}
const char *const AArch64TargetInfo::GCCRegNames[] = {
+ // clang-format off
+
// 32-bit Integer registers
"w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8", "w9", "w10", "w11",
"w12", "w13", "w14", "w15", "w16", "w17", "w18", "w19", "w20", "w21", "w22",
@@ -628,11 +1220,20 @@ const char *const AArch64TargetInfo::GCCRegNames[] = {
// SVE predicate registers
"p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10",
- "p11", "p12", "p13", "p14", "p15"
+ "p11", "p12", "p13", "p14", "p15",
+
+ // SVE predicate-as-counter registers
+ "pn0", "pn1", "pn2", "pn3", "pn4", "pn5", "pn6", "pn7", "pn8",
+ "pn9", "pn10", "pn11", "pn12", "pn13", "pn14", "pn15",
+
+ // SME registers
+ "za", "zt0",
+
+ // clang-format on
};
ArrayRef<const char *> AArch64TargetInfo::getGCCRegNames() const {
- return llvm::makeArrayRef(GCCRegNames);
+ return llvm::ArrayRef(GCCRegNames);
}
const TargetInfo::GCCRegAlias AArch64TargetInfo::GCCRegAliases[] = {
@@ -675,7 +1276,53 @@ const TargetInfo::GCCRegAlias AArch64TargetInfo::GCCRegAliases[] = {
};
ArrayRef<TargetInfo::GCCRegAlias> AArch64TargetInfo::getGCCRegAliases() const {
- return llvm::makeArrayRef(GCCRegAliases);
+ return llvm::ArrayRef(GCCRegAliases);
+}
+
+// Returns the length of cc constraint.
+static unsigned matchAsmCCConstraint(const char *Name) {
+ constexpr unsigned len = 5;
+ auto RV = llvm::StringSwitch<unsigned>(Name)
+ .Case("@cceq", len)
+ .Case("@ccne", len)
+ .Case("@cchs", len)
+ .Case("@cccs", len)
+ .Case("@cccc", len)
+ .Case("@cclo", len)
+ .Case("@ccmi", len)
+ .Case("@ccpl", len)
+ .Case("@ccvs", len)
+ .Case("@ccvc", len)
+ .Case("@cchi", len)
+ .Case("@ccls", len)
+ .Case("@ccge", len)
+ .Case("@cclt", len)
+ .Case("@ccgt", len)
+ .Case("@ccle", len)
+ .Default(0);
+ return RV;
+}
+
+std::string
+AArch64TargetInfo::convertConstraint(const char *&Constraint) const {
+ std::string R;
+ switch (*Constraint) {
+ case 'U': // Three-character constraint; add "@3" hint for later parsing.
+ R = std::string("@3") + std::string(Constraint, 3);
+ Constraint += 2;
+ break;
+ case '@':
+ if (const unsigned Len = matchAsmCCConstraint(Constraint)) {
+ std::string Converted = "{" + std::string(Constraint, Len) + "}";
+ Constraint += Len - 1;
+ return Converted;
+ }
+ return std::string(1, *Constraint);
+ default:
+ R = TargetInfo::convertConstraint(Constraint);
+ break;
+ }
+ return R;
}
bool AArch64TargetInfo::validateAsmConstraint(
@@ -702,8 +1349,15 @@ bool AArch64TargetInfo::validateAsmConstraint(
Info.setAllowsRegister();
return true;
case 'U':
- if (Name[1] == 'p' && (Name[2] == 'l' || Name[2] == 'a')) {
- // SVE predicate registers ("Upa"=P0-15, "Upl"=P0-P7)
+ if (Name[1] == 'p' &&
+ (Name[2] == 'l' || Name[2] == 'a' || Name[2] == 'h')) {
+ // SVE predicate registers ("Upa"=P0-15, "Upl"=P0-P7, "Uph"=P8-P15)
+ Info.setAllowsRegister();
+ Name += 2;
+ return true;
+ }
+ if (Name[1] == 'c' && (Name[2] == 'i' || Name[2] == 'j')) {
+ // Gpr registers ("Uci"=w8-11, "Ucj"=w12-15)
Info.setAllowsRegister();
Name += 2;
return true;
@@ -725,6 +1379,13 @@ bool AArch64TargetInfo::validateAsmConstraint(
case 'y': // SVE registers (V0-V7)
Info.setAllowsRegister();
return true;
+ case '@':
+ // CC condition
+ if (const unsigned Len = matchAsmCCConstraint(Name)) {
+ Name += Len - 1;
+ Info.setAllowsRegister();
+ return true;
+ }
}
return false;
}
@@ -733,8 +1394,7 @@ bool AArch64TargetInfo::validateConstraintModifier(
StringRef Constraint, char Modifier, unsigned Size,
std::string &SuggestedModifier) const {
// Strip off constraint modifiers.
- while (Constraint[0] == '=' || Constraint[0] == '+' || Constraint[0] == '&')
- Constraint = Constraint.substr(1);
+ Constraint = Constraint.ltrim("=+&");
switch (Constraint[0]) {
default:
@@ -763,7 +1423,7 @@ bool AArch64TargetInfo::validateConstraintModifier(
}
}
-const char *AArch64TargetInfo::getClobbers() const { return ""; }
+std::string_view AArch64TargetInfo::getClobbers() const { return ""; }
int AArch64TargetInfo::getEHDataRegisterNumber(unsigned RegNo) const {
if (RegNo == 0)
@@ -872,7 +1532,13 @@ MicrosoftARM64TargetInfo::MicrosoftARM64TargetInfo(const llvm::Triple &Triple,
void MicrosoftARM64TargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
WindowsARM64TargetInfo::getTargetDefines(Opts, Builder);
- Builder.defineMacro("_M_ARM64", "1");
+ if (getTriple().isWindowsArm64EC()) {
+ Builder.defineMacro("_M_X64", "100");
+ Builder.defineMacro("_M_AMD64", "100");
+ Builder.defineMacro("_M_ARM64EC", "1");
+ } else {
+ Builder.defineMacro("_M_ARM64", "1");
+ }
}
TargetInfo::CallingConvKind
@@ -936,7 +1602,6 @@ void DarwinAArch64TargetInfo::getOSDefines(const LangOptions &Opts,
else
Builder.defineMacro("__ARM64_ARCH_8__");
Builder.defineMacro("__ARM_NEON__");
- Builder.defineMacro("__LITTLE_ENDIAN__");
Builder.defineMacro("__REGISTER_PREFIX__", "");
Builder.defineMacro("__arm64", "1");
Builder.defineMacro("__arm64__", "1");
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.h b/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.h
index 46882a808336..9699222b0bf7 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.h
@@ -15,7 +15,8 @@
#include "OSTargets.h"
#include "clang/Basic/TargetBuiltins.h"
-#include "llvm/Support/TargetParser.h"
+#include "llvm/TargetParser/AArch64TargetParser.h"
+#include <optional>
namespace clang {
namespace targets {
@@ -25,38 +26,67 @@ class LLVM_LIBRARY_VISIBILITY AArch64TargetInfo : public TargetInfo {
static const TargetInfo::GCCRegAlias GCCRegAliases[];
static const char *const GCCRegNames[];
- enum FPUModeEnum { FPUMode, NeonMode = (1 << 0), SveMode = (1 << 1) };
-
- unsigned FPU;
- bool HasCRC;
- bool HasCrypto;
- bool HasAES;
- bool HasSHA2;
- bool HasSHA3;
- bool HasSM4;
- bool HasUnaligned;
- bool HasFullFP16;
- bool HasDotProd;
- bool HasFP16FML;
- bool HasMTE;
- bool HasTME;
- bool HasPAuth;
- bool HasLS64;
- bool HasRandGen;
- bool HasMatMul;
- bool HasSVE2;
- bool HasSVE2AES;
- bool HasSVE2SHA3;
- bool HasSVE2SM4;
- bool HasSVE2BitPerm;
- bool HasMatmulFP64;
- bool HasMatmulFP32;
- bool HasLSE;
- bool HasFlagM;
-
- llvm::AArch64::ArchKind ArchKind;
-
- static const Builtin::Info BuiltinInfo[];
+ enum FPUModeEnum {
+ FPUMode = (1 << 0),
+ NeonMode = (1 << 1),
+ SveMode = (1 << 2),
+ };
+
+ unsigned FPU = FPUMode;
+ bool HasCRC = false;
+ bool HasAES = false;
+ bool HasSHA2 = false;
+ bool HasSHA3 = false;
+ bool HasSM4 = false;
+ bool HasUnaligned = true;
+ bool HasFullFP16 = false;
+ bool HasDotProd = false;
+ bool HasFP16FML = false;
+ bool HasMTE = false;
+ bool HasTME = false;
+ bool HasPAuth = false;
+ bool HasLS64 = false;
+ bool HasRandGen = false;
+ bool HasMatMul = false;
+ bool HasBFloat16 = false;
+ bool HasSVE2 = false;
+ bool HasSVE2AES = false;
+ bool HasSVE2SHA3 = false;
+ bool HasSVE2SM4 = false;
+ bool HasSVE2BitPerm = false;
+ bool HasMatmulFP64 = false;
+ bool HasMatmulFP32 = false;
+ bool HasLSE = false;
+ bool HasFlagM = false;
+ bool HasAlternativeNZCV = false;
+ bool HasMOPS = false;
+ bool HasD128 = false;
+ bool HasRCPC = false;
+ bool HasRDM = false;
+ bool HasDIT = false;
+ bool HasCCPP = false;
+ bool HasCCDP = false;
+ bool HasFRInt3264 = false;
+ bool HasSME = false;
+ bool HasSME2 = false;
+ bool HasSMEF64F64 = false;
+ bool HasSMEI16I64 = false;
+ bool HasSB = false;
+ bool HasPredRes = false;
+ bool HasSSBS = false;
+ bool HasBTI = false;
+ bool HasWFxT = false;
+ bool HasJSCVT = false;
+ bool HasFCMA = false;
+ bool HasNoFP = false;
+ bool HasNoNeon = false;
+ bool HasNoSVE = false;
+ bool HasFMV = true;
+ bool HasGCS = false;
+ bool HasRCPC3 = false;
+ bool HasSMEFA64 = false;
+
+ const llvm::AArch64::ArchInfo *ArchInfo = &llvm::AArch64::ARMV8A;
std::string ABI;
@@ -66,17 +96,27 @@ public:
StringRef getABI() const override;
bool setABI(const std::string &Name) override;
- bool validateBranchProtection(StringRef, BranchProtectionInfo &,
- StringRef &) const override;
+ bool validateBranchProtection(StringRef Spec, StringRef Arch,
+ BranchProtectionInfo &BPI,
+ StringRef &Err) const override;
bool isValidCPUName(StringRef Name) const override;
void fillValidCPUList(SmallVectorImpl<StringRef> &Values) const override;
bool setCPU(const std::string &Name) override;
+ unsigned multiVersionSortPriority(StringRef Name) const override;
+ unsigned multiVersionFeatureCost() const override;
+
+ bool
+ initFeatureMap(llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags,
+ StringRef CPU,
+ const std::vector<std::string> &FeaturesVec) const override;
bool useFP16ConversionIntrinsics() const override {
return false;
}
+ void setArchFeatures();
+
void getTargetDefinesARMV81A(const LangOptions &Opts,
MacroBuilder &Builder) const;
void getTargetDefinesARMV82A(const LangOptions &Opts,
@@ -91,14 +131,43 @@ public:
MacroBuilder &Builder) const;
void getTargetDefinesARMV87A(const LangOptions &Opts,
MacroBuilder &Builder) const;
+ void getTargetDefinesARMV88A(const LangOptions &Opts,
+ MacroBuilder &Builder) const;
+ void getTargetDefinesARMV89A(const LangOptions &Opts,
+ MacroBuilder &Builder) const;
+ void getTargetDefinesARMV9A(const LangOptions &Opts,
+ MacroBuilder &Builder) const;
+ void getTargetDefinesARMV91A(const LangOptions &Opts,
+ MacroBuilder &Builder) const;
+ void getTargetDefinesARMV92A(const LangOptions &Opts,
+ MacroBuilder &Builder) const;
+ void getTargetDefinesARMV93A(const LangOptions &Opts,
+ MacroBuilder &Builder) const;
+ void getTargetDefinesARMV94A(const LangOptions &Opts,
+ MacroBuilder &Builder) const;
+ void getTargetDefinesARMV95A(const LangOptions &Opts,
+ MacroBuilder &Builder) const;
void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const override;
ArrayRef<Builtin::Info> getTargetBuiltins() const override;
+ std::optional<std::pair<unsigned, unsigned>>
+ getVScaleRange(const LangOptions &LangOpts) const override;
+ bool doesFeatureAffectCodeGen(StringRef Name) const override;
+ StringRef getFeatureDependencies(StringRef Name) const override;
+ bool validateCpuSupports(StringRef FeatureStr) const override;
bool hasFeature(StringRef Feature) const override;
+ void setFeatureEnabled(llvm::StringMap<bool> &Features, StringRef Name,
+ bool Enabled) const override;
bool handleTargetFeatures(std::vector<std::string> &Features,
DiagnosticsEngine &Diags) override;
+ ParsedTargetAttr parseTargetAttr(StringRef Str) const override;
+ bool supportsTargetAttributeTune() const override { return true; }
+
+ bool checkArithmeticFenceSupported() const override { return true; }
+
+ bool hasBFloat16Type() const override;
CallingConvCheckResult checkCallingConvention(CallingConv CC) const override;
@@ -109,26 +178,14 @@ public:
ArrayRef<const char *> getGCCRegNames() const override;
ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override;
- std::string convertConstraint(const char *&Constraint) const override {
- std::string R;
- switch (*Constraint) {
- case 'U': // Three-character constraint; add "@3" hint for later parsing.
- R = std::string("@3") + std::string(Constraint, 3);
- Constraint += 2;
- break;
- default:
- R = TargetInfo::convertConstraint(Constraint);
- break;
- }
- return R;
- }
+ std::string convertConstraint(const char *&Constraint) const override;
bool validateAsmConstraint(const char *&Name,
TargetInfo::ConstraintInfo &Info) const override;
bool
validateConstraintModifier(StringRef Constraint, char Modifier, unsigned Size,
std::string &SuggestedModifier) const override;
- const char *getClobbers() const override;
+ std::string_view getClobbers() const override;
StringRef getConstraintRegister(StringRef Constraint,
StringRef Expression) const override {
@@ -140,7 +197,7 @@ public:
const char *getBFloat16Mangling() const override { return "u6__bf16"; };
bool hasInt128Type() const override;
- bool hasExtIntType() const override { return true; }
+ bool hasBitIntType() const override { return true; }
};
class LLVM_LIBRARY_VISIBILITY AArch64leTargetInfo : public AArch64TargetInfo {
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.cpp
index fac786dbcf9e..6f3a4908623d 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.cpp
@@ -13,12 +13,10 @@
#include "AMDGPU.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/CodeGenOptions.h"
+#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/MacroBuilder.h"
#include "clang/Basic/TargetBuiltins.h"
-#include "llvm/ADT/StringSwitch.h"
-#include "llvm/Frontend/OpenMP/OMPGridValues.h"
-
using namespace clang;
using namespace clang::targets;
@@ -34,63 +32,66 @@ static const char *const DataLayoutStringR600 =
static const char *const DataLayoutStringAMDGCN =
"e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32"
- "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
+ "-p7:160:256:256:32-p8:128:128-p9:192:256:256:32-i64:64-v16:16-v24:32-v32:"
+ "32-v48:64-v96:128"
"-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1"
- "-ni:7";
+ "-ni:7:8:9";
const LangASMap AMDGPUTargetInfo::AMDGPUDefIsGenMap = {
- Generic, // Default
- Global, // opencl_global
- Local, // opencl_local
- Constant, // opencl_constant
- Private, // opencl_private
- Generic, // opencl_generic
- Global, // opencl_global_device
- Global, // opencl_global_host
- Global, // cuda_device
- Constant, // cuda_constant
- Local, // cuda_shared
- Global, // sycl_global
- Global, // sycl_global_device
- Global, // sycl_global_host
- Local, // sycl_local
- Private, // sycl_private
- Generic, // ptr32_sptr
- Generic, // ptr32_uptr
- Generic // ptr64
+ llvm::AMDGPUAS::FLAT_ADDRESS, // Default
+ llvm::AMDGPUAS::GLOBAL_ADDRESS, // opencl_global
+ llvm::AMDGPUAS::LOCAL_ADDRESS, // opencl_local
+ llvm::AMDGPUAS::CONSTANT_ADDRESS, // opencl_constant
+ llvm::AMDGPUAS::PRIVATE_ADDRESS, // opencl_private
+ llvm::AMDGPUAS::FLAT_ADDRESS, // opencl_generic
+ llvm::AMDGPUAS::GLOBAL_ADDRESS, // opencl_global_device
+ llvm::AMDGPUAS::GLOBAL_ADDRESS, // opencl_global_host
+ llvm::AMDGPUAS::GLOBAL_ADDRESS, // cuda_device
+ llvm::AMDGPUAS::CONSTANT_ADDRESS, // cuda_constant
+ llvm::AMDGPUAS::LOCAL_ADDRESS, // cuda_shared
+ llvm::AMDGPUAS::GLOBAL_ADDRESS, // sycl_global
+ llvm::AMDGPUAS::GLOBAL_ADDRESS, // sycl_global_device
+ llvm::AMDGPUAS::GLOBAL_ADDRESS, // sycl_global_host
+ llvm::AMDGPUAS::LOCAL_ADDRESS, // sycl_local
+ llvm::AMDGPUAS::PRIVATE_ADDRESS, // sycl_private
+ llvm::AMDGPUAS::FLAT_ADDRESS, // ptr32_sptr
+ llvm::AMDGPUAS::FLAT_ADDRESS, // ptr32_uptr
+ llvm::AMDGPUAS::FLAT_ADDRESS, // ptr64
+ llvm::AMDGPUAS::FLAT_ADDRESS, // hlsl_groupshared
};
const LangASMap AMDGPUTargetInfo::AMDGPUDefIsPrivMap = {
- Private, // Default
- Global, // opencl_global
- Local, // opencl_local
- Constant, // opencl_constant
- Private, // opencl_private
- Generic, // opencl_generic
- Global, // opencl_global_device
- Global, // opencl_global_host
- Global, // cuda_device
- Constant, // cuda_constant
- Local, // cuda_shared
+ llvm::AMDGPUAS::PRIVATE_ADDRESS, // Default
+ llvm::AMDGPUAS::GLOBAL_ADDRESS, // opencl_global
+ llvm::AMDGPUAS::LOCAL_ADDRESS, // opencl_local
+ llvm::AMDGPUAS::CONSTANT_ADDRESS, // opencl_constant
+ llvm::AMDGPUAS::PRIVATE_ADDRESS, // opencl_private
+ llvm::AMDGPUAS::FLAT_ADDRESS, // opencl_generic
+ llvm::AMDGPUAS::GLOBAL_ADDRESS, // opencl_global_device
+ llvm::AMDGPUAS::GLOBAL_ADDRESS, // opencl_global_host
+ llvm::AMDGPUAS::GLOBAL_ADDRESS, // cuda_device
+ llvm::AMDGPUAS::CONSTANT_ADDRESS, // cuda_constant
+ llvm::AMDGPUAS::LOCAL_ADDRESS, // cuda_shared
// SYCL address space values for this map are dummy
- Generic, // sycl_global
- Generic, // sycl_global_device
- Generic, // sycl_global_host
- Generic, // sycl_local
- Generic, // sycl_private
- Generic, // ptr32_sptr
- Generic, // ptr32_uptr
- Generic // ptr64
+ llvm::AMDGPUAS::FLAT_ADDRESS, // sycl_global
+ llvm::AMDGPUAS::FLAT_ADDRESS, // sycl_global_device
+ llvm::AMDGPUAS::FLAT_ADDRESS, // sycl_global_host
+ llvm::AMDGPUAS::FLAT_ADDRESS, // sycl_local
+ llvm::AMDGPUAS::FLAT_ADDRESS, // sycl_private
+ llvm::AMDGPUAS::FLAT_ADDRESS, // ptr32_sptr
+ llvm::AMDGPUAS::FLAT_ADDRESS, // ptr32_uptr
+ llvm::AMDGPUAS::FLAT_ADDRESS, // ptr64
+ llvm::AMDGPUAS::FLAT_ADDRESS, // hlsl_groupshared
};
} // namespace targets
} // namespace clang
-const Builtin::Info AMDGPUTargetInfo::BuiltinInfo[] = {
+static constexpr Builtin::Info BuiltinInfo[] = {
#define BUILTIN(ID, TYPE, ATTRS) \
- {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
+ {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
- {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, FEATURE},
+ {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
#include "clang/Basic/BuiltinsAMDGPU.def"
};
@@ -172,7 +173,7 @@ const char *const AMDGPUTargetInfo::GCCRegNames[] = {
};
ArrayRef<const char *> AMDGPUTargetInfo::getGCCRegNames() const {
- return llvm::makeArrayRef(GCCRegNames);
+ return llvm::ArrayRef(GCCRegNames);
}
bool AMDGPUTargetInfo::initFeatureMap(
@@ -180,136 +181,18 @@ bool AMDGPUTargetInfo::initFeatureMap(
const std::vector<std::string> &FeatureVec) const {
using namespace llvm::AMDGPU;
-
- // XXX - What does the member GPU mean if device name string passed here?
- if (isAMDGCN(getTriple())) {
- switch (llvm::AMDGPU::parseArchAMDGCN(CPU)) {
- case GK_GFX1035:
- case GK_GFX1034:
- case GK_GFX1033:
- case GK_GFX1032:
- case GK_GFX1031:
- case GK_GFX1030:
- Features["ci-insts"] = true;
- Features["dot1-insts"] = true;
- Features["dot2-insts"] = true;
- Features["dot5-insts"] = true;
- Features["dot6-insts"] = true;
- Features["dot7-insts"] = true;
- Features["dl-insts"] = true;
- Features["flat-address-space"] = true;
- Features["16-bit-insts"] = true;
- Features["dpp"] = true;
- Features["gfx8-insts"] = true;
- Features["gfx9-insts"] = true;
- Features["gfx10-insts"] = true;
- Features["gfx10-3-insts"] = true;
- Features["s-memrealtime"] = true;
- Features["s-memtime-inst"] = true;
- break;
- case GK_GFX1012:
- case GK_GFX1011:
- Features["dot1-insts"] = true;
- Features["dot2-insts"] = true;
- Features["dot5-insts"] = true;
- Features["dot6-insts"] = true;
- Features["dot7-insts"] = true;
- LLVM_FALLTHROUGH;
- case GK_GFX1013:
- case GK_GFX1010:
- Features["dl-insts"] = true;
- Features["ci-insts"] = true;
- Features["flat-address-space"] = true;
- Features["16-bit-insts"] = true;
- Features["dpp"] = true;
- Features["gfx8-insts"] = true;
- Features["gfx9-insts"] = true;
- Features["gfx10-insts"] = true;
- Features["s-memrealtime"] = true;
- Features["s-memtime-inst"] = true;
- break;
- case GK_GFX90A:
- Features["gfx90a-insts"] = true;
- LLVM_FALLTHROUGH;
- case GK_GFX908:
- Features["dot3-insts"] = true;
- Features["dot4-insts"] = true;
- Features["dot5-insts"] = true;
- Features["dot6-insts"] = true;
- Features["mai-insts"] = true;
- LLVM_FALLTHROUGH;
- case GK_GFX906:
- Features["dl-insts"] = true;
- Features["dot1-insts"] = true;
- Features["dot2-insts"] = true;
- Features["dot7-insts"] = true;
- LLVM_FALLTHROUGH;
- case GK_GFX90C:
- case GK_GFX909:
- case GK_GFX904:
- case GK_GFX902:
- case GK_GFX900:
- Features["gfx9-insts"] = true;
- LLVM_FALLTHROUGH;
- case GK_GFX810:
- case GK_GFX805:
- case GK_GFX803:
- case GK_GFX802:
- case GK_GFX801:
- Features["gfx8-insts"] = true;
- Features["16-bit-insts"] = true;
- Features["dpp"] = true;
- Features["s-memrealtime"] = true;
- LLVM_FALLTHROUGH;
- case GK_GFX705:
- case GK_GFX704:
- case GK_GFX703:
- case GK_GFX702:
- case GK_GFX701:
- case GK_GFX700:
- Features["ci-insts"] = true;
- Features["flat-address-space"] = true;
- LLVM_FALLTHROUGH;
- case GK_GFX602:
- case GK_GFX601:
- case GK_GFX600:
- Features["s-memtime-inst"] = true;
- break;
- case GK_NONE:
- break;
- default:
- llvm_unreachable("Unhandled GPU!");
- }
- } else {
- if (CPU.empty())
- CPU = "r600";
-
- switch (llvm::AMDGPU::parseArchR600(CPU)) {
- case GK_CAYMAN:
- case GK_CYPRESS:
- case GK_RV770:
- case GK_RV670:
- // TODO: Add fp64 when implemented.
- break;
- case GK_TURKS:
- case GK_CAICOS:
- case GK_BARTS:
- case GK_SUMO:
- case GK_REDWOOD:
- case GK_JUNIPER:
- case GK_CEDAR:
- case GK_RV730:
- case GK_RV710:
- case GK_RS880:
- case GK_R630:
- case GK_R600:
- break;
- default:
- llvm_unreachable("Unhandled GPU!");
- }
+ fillAMDGPUFeatureMap(CPU, getTriple(), Features);
+ if (!TargetInfo::initFeatureMap(Features, Diags, CPU, FeatureVec))
+ return false;
+
+ // TODO: Should move this logic into TargetParser
+ std::string ErrorMsg;
+ if (!insertWaveSizeFeature(CPU, getTriple(), Features, ErrorMsg)) {
+ Diags.Report(diag::err_invalid_feature_combination) << ErrorMsg;
+ return false;
}
- return TargetInfo::initFeatureMap(Features, Diags, CPU, FeatureVec);
+ return true;
}
void AMDGPUTargetInfo::fillValidCPUList(
@@ -335,19 +218,24 @@ AMDGPUTargetInfo::AMDGPUTargetInfo(const llvm::Triple &Triple,
llvm::AMDGPU::getArchAttrR600(GPUKind)) {
resetDataLayout(isAMDGCN(getTriple()) ? DataLayoutStringAMDGCN
: DataLayoutStringR600);
- GridValues = llvm::omp::AMDGPUGpuGridValues;
setAddressSpaceMap(Triple.getOS() == llvm::Triple::Mesa3D ||
!isAMDGCN(Triple));
UseAddrSpaceMapMangling = true;
+ if (isAMDGCN(Triple)) {
+ // __bf16 is always available as a load/store only type on AMDGCN.
+ BFloat16Width = BFloat16Align = 16;
+ BFloat16Format = &llvm::APFloat::BFloat();
+ }
+
HasLegalHalfType = true;
HasFloat16 = true;
WavefrontSize = GPUFeatures & llvm::AMDGPU::FEATURE_WAVE32 ? 32 : 64;
AllowAMDGPUUnsafeFPAtomics = Opts.AllowAMDGPUUnsafeFPAtomics;
- // Set pointer width and alignment for target address space 0.
- PointerWidth = PointerAlign = getPointerWidthV(Generic);
+ // Set pointer width and alignment for the generic address space.
+ PointerWidth = PointerAlign = getPointerWidthV(LangAS::Default);
if (getMaxPointerWidth() == 64) {
LongWidth = LongAlign = 64;
SizeType = UnsignedLong;
@@ -356,6 +244,10 @@ AMDGPUTargetInfo::AMDGPUTargetInfo(const llvm::Triple &Triple,
}
MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
+ CUMode = !(GPUFeatures & llvm::AMDGPU::FEATURE_WGP);
+ for (auto F : {"image-insts", "gws"})
+ ReadOnlyFeatures.insert(F);
+ HalfArgsAndReturns = true;
}
void AMDGPUTargetInfo::adjust(DiagnosticsEngine &Diags, LangOptions &Opts) {
@@ -368,8 +260,8 @@ void AMDGPUTargetInfo::adjust(DiagnosticsEngine &Diags, LangOptions &Opts) {
}
ArrayRef<Builtin::Info> AMDGPUTargetInfo::getTargetBuiltins() const {
- return llvm::makeArrayRef(BuiltinInfo, clang::AMDGPU::LastTSBuiltin -
- Builtin::FirstTSBuiltin);
+ return llvm::ArrayRef(BuiltinInfo,
+ clang::AMDGPU::LastTSBuiltin - Builtin::FirstTSBuiltin);
}
void AMDGPUTargetInfo::getTargetDefines(const LangOptions &Opts,
@@ -386,12 +278,17 @@ void AMDGPUTargetInfo::getTargetDefines(const LangOptions &Opts,
StringRef CanonName = isAMDGCN(getTriple()) ?
getArchNameAMDGCN(GPUKind) : getArchNameR600(GPUKind);
Builder.defineMacro(Twine("__") + Twine(CanonName) + Twine("__"));
+ // Emit macros for gfx family e.g. gfx906 -> __GFX9__, gfx1030 -> __GFX10___
+ if (isAMDGCN(getTriple())) {
+ assert(CanonName.starts_with("gfx") && "Invalid amdgcn canonical name");
+ Builder.defineMacro(Twine("__") + Twine(CanonName.drop_back(2).upper()) +
+ Twine("__"));
+ }
if (isAMDGCN(getTriple())) {
Builder.defineMacro("__amdgcn_processor__",
Twine("\"") + Twine(CanonName) + Twine("\""));
Builder.defineMacro("__amdgcn_target_id__",
- Twine("\"") + Twine(getTargetID().getValue()) +
- Twine("\""));
+ Twine("\"") + Twine(*getTargetID()) + Twine("\""));
for (auto F : getAllPossibleTargetIDFeatures(getTriple(), CanonName)) {
auto Loc = OffloadArchFeatures.find(F);
if (Loc != OffloadArchFeatures.end()) {
@@ -405,6 +302,9 @@ void AMDGPUTargetInfo::getTargetDefines(const LangOptions &Opts,
}
}
+ if (AllowAMDGPUUnsafeFPAtomics)
+ Builder.defineMacro("__AMDGCN_UNSAFE_FP_ATOMICS__");
+
// TODO: __HAS_FMAF__, __HAS_LDEXPF__, __HAS_FP64__ are deprecated and will be
// removed in the near future.
if (hasFMAF())
@@ -418,7 +318,10 @@ void AMDGPUTargetInfo::getTargetDefines(const LangOptions &Opts,
if (hasFastFMA())
Builder.defineMacro("FP_FAST_FMA");
+ Builder.defineMacro("__AMDGCN_WAVEFRONT_SIZE__", Twine(WavefrontSize));
+ // ToDo: deprecate this macro for naming consistency.
Builder.defineMacro("__AMDGCN_WAVEFRONT_SIZE", Twine(WavefrontSize));
+ Builder.defineMacro("__AMDGCN_CUMODE__", Twine(CUMode));
}
void AMDGPUTargetInfo::setAuxTarget(const TargetInfo *Aux) {
@@ -431,9 +334,13 @@ void AMDGPUTargetInfo::setAuxTarget(const TargetInfo *Aux) {
// supported by AMDGPU. Therefore keep its own format for these two types.
auto SaveLongDoubleFormat = LongDoubleFormat;
auto SaveFloat128Format = Float128Format;
+ auto SaveLongDoubleWidth = LongDoubleWidth;
+ auto SaveLongDoubleAlign = LongDoubleAlign;
copyAuxTarget(Aux);
LongDoubleFormat = SaveLongDoubleFormat;
Float128Format = SaveFloat128Format;
+ LongDoubleWidth = SaveLongDoubleWidth;
+ LongDoubleAlign = SaveLongDoubleAlign;
// For certain builtin types support on the host target, claim they are
// support to pass the compilation of the host code during the device-side
// compilation.
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.h b/contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.h
index 2e580ecf2425..90a1516ecdd2 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.h
@@ -17,25 +17,19 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
#include "llvm/ADT/StringSet.h"
-#include "llvm/ADT/Triple.h"
+#include "llvm/Support/AMDGPUAddrSpace.h"
#include "llvm/Support/Compiler.h"
-#include "llvm/Support/TargetParser.h"
+#include "llvm/TargetParser/TargetParser.h"
+#include "llvm/TargetParser/Triple.h"
+#include <optional>
namespace clang {
namespace targets {
class LLVM_LIBRARY_VISIBILITY AMDGPUTargetInfo final : public TargetInfo {
- static const Builtin::Info BuiltinInfo[];
static const char *const GCCRegNames[];
- enum AddrSpace {
- Generic = 0,
- Global = 1,
- Local = 3,
- Constant = 4,
- Private = 5
- };
static const LangASMap AMDGPUDefIsGenMap;
static const LangASMap AMDGPUDefIsPrivMap;
@@ -43,6 +37,12 @@ class LLVM_LIBRARY_VISIBILITY AMDGPUTargetInfo final : public TargetInfo {
unsigned GPUFeatures;
unsigned WavefrontSize;
+ /// Whether to use cumode or WGP mode. True for cumode. False for WGP mode.
+ bool CUMode;
+
+ /// Whether having image instructions.
+ bool HasImage = false;
+
/// Target ID is device name followed by optional feature name postfixed
/// by plus or minus sign delimitted by colon, e.g. gfx908:xnack+:sramecc-.
/// If the target ID contains feature+, map it to true.
@@ -95,17 +95,19 @@ public:
void adjust(DiagnosticsEngine &Diags, LangOptions &Opts) override;
- uint64_t getPointerWidthV(unsigned AddrSpace) const override {
+ uint64_t getPointerWidthV(LangAS AS) const override {
if (isR600(getTriple()))
return 32;
+ unsigned TargetAS = getTargetAddressSpace(AS);
- if (AddrSpace == Private || AddrSpace == Local)
+ if (TargetAS == llvm::AMDGPUAS::PRIVATE_ADDRESS ||
+ TargetAS == llvm::AMDGPUAS::LOCAL_ADDRESS)
return 32;
return 64;
}
- uint64_t getPointerAlignV(unsigned AddrSpace) const override {
+ uint64_t getPointerAlignV(LangAS AddrSpace) const override {
return getPointerWidthV(AddrSpace);
}
@@ -113,12 +115,14 @@ public:
return getTriple().getArch() == llvm::Triple::amdgcn ? 64 : 32;
}
- const char *getClobbers() const override { return ""; }
+ bool hasBFloat16Type() const override { return isAMDGCN(getTriple()); }
+
+ std::string_view getClobbers() const override { return ""; }
ArrayRef<const char *> getGCCRegNames() const override;
ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
- return None;
+ return std::nullopt;
}
/// Accepted register names: (n, m is unsigned integer, n < m)
@@ -165,10 +169,8 @@ public:
}
bool HasLeftParen = false;
- if (S.front() == '{') {
+ if (S.consume_front("{"))
HasLeftParen = true;
- S = S.drop_front();
- }
if (S.empty())
return false;
if (S.front() != 'v' && S.front() != 's' && S.front() != 'a') {
@@ -195,29 +197,24 @@ public:
return true;
}
bool HasLeftBracket = false;
- if (!S.empty() && S.front() == '[') {
+ if (S.consume_front("["))
HasLeftBracket = true;
- S = S.drop_front();
- }
unsigned long long N;
if (S.empty() || consumeUnsignedInteger(S, 10, N))
return false;
- if (!S.empty() && S.front() == ':') {
+ if (S.consume_front(":")) {
if (!HasLeftBracket)
return false;
- S = S.drop_front();
unsigned long long M;
if (consumeUnsignedInteger(S, 10, M) || N >= M)
return false;
}
if (HasLeftBracket) {
- if (S.empty() || S.front() != ']')
+ if (!S.consume_front("]"))
return false;
- S = S.drop_front();
}
- if (S.empty() || S.front() != '}')
+ if (!S.consume_front("}"))
return false;
- S = S.drop_front();
if (!S.empty())
return false;
// Found {vn}, {sn}, {an}, {v[n]}, {s[n]}, {a[n]}, {v[n:m]}, {s[n:m]}
@@ -352,34 +349,56 @@ public:
}
LangAS getCUDABuiltinAddressSpace(unsigned AS) const override {
- return LangAS::Default;
+ switch (AS) {
+ case 0:
+ return LangAS::Default;
+ case 1:
+ return LangAS::cuda_device;
+ case 3:
+ return LangAS::cuda_shared;
+ case 4:
+ return LangAS::cuda_constant;
+ default:
+ return getLangASFromTargetAS(AS);
+ }
+ }
+
+ std::optional<LangAS> getConstantAddressSpace() const override {
+ return getLangASFromTargetAS(llvm::AMDGPUAS::CONSTANT_ADDRESS);
}
- llvm::Optional<LangAS> getConstantAddressSpace() const override {
- return getLangASFromTargetAS(Constant);
+ const llvm::omp::GV &getGridValue() const override {
+ switch (WavefrontSize) {
+ case 32:
+ return llvm::omp::getAMDGPUGridValues<32>();
+ case 64:
+ return llvm::omp::getAMDGPUGridValues<64>();
+ default:
+ llvm_unreachable("getGridValue not implemented for this wavesize");
+ }
}
/// \returns Target specific vtbl ptr address space.
unsigned getVtblPtrAddressSpace() const override {
- return static_cast<unsigned>(Constant);
+ return static_cast<unsigned>(llvm::AMDGPUAS::CONSTANT_ADDRESS);
}
/// \returns If a target requires an address within a target specific address
/// space \p AddressSpace to be converted in order to be used, then return the
/// corresponding target specific DWARF address space.
///
- /// \returns Otherwise return None and no conversion will be emitted in the
- /// DWARF.
- Optional<unsigned>
+ /// \returns Otherwise return std::nullopt and no conversion will be emitted
+ /// in the DWARF.
+ std::optional<unsigned>
getDWARFAddressSpace(unsigned AddressSpace) const override {
const unsigned DWARF_Private = 1;
const unsigned DWARF_Local = 2;
- if (AddressSpace == Private) {
+ if (AddressSpace == llvm::AMDGPUAS::PRIVATE_ADDRESS) {
return DWARF_Private;
- } else if (AddressSpace == Local) {
+ } else if (AddressSpace == llvm::AMDGPUAS::LOCAL_ADDRESS) {
return DWARF_Local;
} else {
- return None;
+ return std::nullopt;
}
}
@@ -389,6 +408,7 @@ public:
return CCCR_Warning;
case CC_C:
case CC_OpenCLKernel:
+ case CC_AMDGPUKernelCall:
return CCCR_OK;
}
}
@@ -404,7 +424,7 @@ public:
void setAuxTarget(const TargetInfo *Aux) override;
- bool hasExtIntType() const override { return true; }
+ bool hasBitIntType() const override { return true; }
// Record offload arch features since they are needed for defining the
// pre-defined macros.
@@ -412,23 +432,29 @@ public:
DiagnosticsEngine &Diags) override {
auto TargetIDFeatures =
getAllPossibleTargetIDFeatures(getTriple(), getArchNameAMDGCN(GPUKind));
- llvm::for_each(Features, [&](const auto &F) {
+ for (const auto &F : Features) {
assert(F.front() == '+' || F.front() == '-');
if (F == "+wavefrontsize64")
WavefrontSize = 64;
+ else if (F == "+cumode")
+ CUMode = true;
+ else if (F == "-cumode")
+ CUMode = false;
+ else if (F == "+image-insts")
+ HasImage = true;
bool IsOn = F.front() == '+';
StringRef Name = StringRef(F).drop_front();
- if (llvm::find(TargetIDFeatures, Name) == TargetIDFeatures.end())
- return;
- assert(OffloadArchFeatures.find(Name) == OffloadArchFeatures.end());
+ if (!llvm::is_contained(TargetIDFeatures, Name))
+ continue;
+ assert(!OffloadArchFeatures.contains(Name));
OffloadArchFeatures[Name] = IsOn;
- });
+ }
return true;
}
- Optional<std::string> getTargetID() const override {
+ std::optional<std::string> getTargetID() const override {
if (!isAMDGCN(getTriple()))
- return llvm::None;
+ return std::nullopt;
// When -target-cpu is not set, we assume generic code that it is valid
// for all GPU and use an empty string as target ID to represent that.
if (GPUKind == llvm::AMDGPU::GK_NONE)
@@ -436,6 +462,8 @@ public:
return getCanonicalTargetID(getArchNameAMDGCN(GPUKind),
OffloadArchFeatures);
}
+
+ bool hasHIPImageSupport() const override { return HasImage; }
};
} // namespace targets
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/ARC.h b/contrib/llvm-project/clang/lib/Basic/Targets/ARC.h
index b314c42be1e9..fcbfdd6eec58 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/ARC.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/ARC.h
@@ -15,8 +15,8 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/TargetParser/Triple.h"
namespace clang {
namespace targets {
@@ -40,13 +40,15 @@ public:
void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const override;
- ArrayRef<Builtin::Info> getTargetBuiltins() const override { return None; }
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override {
+ return std::nullopt;
+ }
BuiltinVaListKind getBuiltinVaListKind() const override {
return TargetInfo::VoidPtrBuiltinVaList;
}
- const char *getClobbers() const override { return ""; }
+ std::string_view getClobbers() const override { return ""; }
ArrayRef<const char *> getGCCRegNames() const override {
static const char *const GCCRegNames[] = {
@@ -54,11 +56,11 @@ public:
"r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
"r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
"r24", "r25", "gp", "sp", "fp", "ilink1", "r30", "blink"};
- return llvm::makeArrayRef(GCCRegNames);
+ return llvm::ArrayRef(GCCRegNames);
}
ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
- return None;
+ return std::nullopt;
}
bool validateAsmConstraint(const char *&Name,
@@ -66,7 +68,9 @@ public:
return false;
}
- bool hasExtIntType() const override { return true; }
+ bool hasBitIntType() const override { return true; }
+
+ bool isCLZForZeroUndef() const override { return false; }
};
} // namespace targets
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/ARM.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/ARM.cpp
index 0e4048f8d5ff..55b71557452f 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/ARM.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/ARM.cpp
@@ -17,6 +17,7 @@
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
+#include "llvm/TargetParser/ARMTargetParser.h"
using namespace clang;
using namespace clang::targets;
@@ -212,6 +213,22 @@ StringRef ARMTargetInfo::getCPUAttr() const {
return "8_6A";
case llvm::ARM::ArchKind::ARMV8_7A:
return "8_7A";
+ case llvm::ARM::ArchKind::ARMV8_8A:
+ return "8_8A";
+ case llvm::ARM::ArchKind::ARMV8_9A:
+ return "8_9A";
+ case llvm::ARM::ArchKind::ARMV9A:
+ return "9A";
+ case llvm::ARM::ArchKind::ARMV9_1A:
+ return "9_1A";
+ case llvm::ARM::ArchKind::ARMV9_2A:
+ return "9_2A";
+ case llvm::ARM::ArchKind::ARMV9_3A:
+ return "9_3A";
+ case llvm::ARM::ArchKind::ARMV9_4A:
+ return "9_4A";
+ case llvm::ARM::ArchKind::ARMV9_5A:
+ return "9_5A";
case llvm::ARM::ArchKind::ARMV8MBaseline:
return "8M_BASE";
case llvm::ARM::ArchKind::ARMV8MMainline:
@@ -240,8 +257,11 @@ ARMTargetInfo::ARMTargetInfo(const llvm::Triple &Triple,
const TargetOptions &Opts)
: TargetInfo(Triple), FPMath(FP_Default), IsAAPCS(true), LDREX(0),
HW_FP(0) {
+ bool IsFreeBSD = Triple.isOSFreeBSD();
bool IsOpenBSD = Triple.isOSOpenBSD();
bool IsNetBSD = Triple.isOSNetBSD();
+ bool IsHaiku = Triple.isOSHaiku();
+ bool IsOHOS = Triple.isOHOSFamily();
// FIXME: the isOSBinFormatMachO is a workaround for identifying a Darwin-like
// environment where size_t is `unsigned long` rather than `unsigned int`
@@ -295,6 +315,7 @@ ARMTargetInfo::ARMTargetInfo(const llvm::Triple &Triple,
case llvm::Triple::GNUEABIHF:
case llvm::Triple::MuslEABI:
case llvm::Triple::MuslEABIHF:
+ case llvm::Triple::OpenHOS:
setABI("aapcs-linux");
break;
case llvm::Triple::EABIHF:
@@ -307,7 +328,7 @@ ARMTargetInfo::ARMTargetInfo(const llvm::Triple &Triple,
default:
if (IsNetBSD)
setABI("apcs-gnu");
- else if (IsOpenBSD)
+ else if (IsFreeBSD || IsOpenBSD || IsHaiku || IsOHOS)
setABI("aapcs-linux");
else
setABI("aapcs");
@@ -361,6 +382,50 @@ bool ARMTargetInfo::setABI(const std::string &Name) {
return false;
}
+bool ARMTargetInfo::isBranchProtectionSupportedArch(StringRef Arch) const {
+ llvm::ARM::ArchKind CPUArch = llvm::ARM::parseCPUArch(Arch);
+ if (CPUArch == llvm::ARM::ArchKind::INVALID)
+ CPUArch = llvm::ARM::parseArch(getTriple().getArchName());
+
+ if (CPUArch == llvm::ARM::ArchKind::INVALID)
+ return false;
+
+ StringRef ArchFeature = llvm::ARM::getArchName(CPUArch);
+ auto a =
+ llvm::Triple(ArchFeature, getTriple().getVendorName(),
+ getTriple().getOSName(), getTriple().getEnvironmentName());
+
+ StringRef SubArch = llvm::ARM::getSubArch(CPUArch);
+ llvm::ARM::ProfileKind Profile = llvm::ARM::parseArchProfile(SubArch);
+ return a.isArmT32() && (Profile == llvm::ARM::ProfileKind::M);
+}
+
+bool ARMTargetInfo::validateBranchProtection(StringRef Spec, StringRef Arch,
+ BranchProtectionInfo &BPI,
+ StringRef &Err) const {
+ llvm::ARM::ParsedBranchProtection PBP;
+ if (!llvm::ARM::parseBranchProtection(Spec, PBP, Err))
+ return false;
+
+ if (!isBranchProtectionSupportedArch(Arch))
+ return false;
+
+ BPI.SignReturnAddr =
+ llvm::StringSwitch<LangOptions::SignReturnAddressScopeKind>(PBP.Scope)
+ .Case("non-leaf", LangOptions::SignReturnAddressScopeKind::NonLeaf)
+ .Case("all", LangOptions::SignReturnAddressScopeKind::All)
+ .Default(LangOptions::SignReturnAddressScopeKind::None);
+
+ // Don't care for the sign key, beyond issuing a warning.
+ if (PBP.Key == "b_key")
+ Err = "b-key";
+ BPI.SignKey = LangOptions::SignReturnAddressKeyKind::AKey;
+
+ BPI.BranchTargetEnforcement = PBP.BranchTargetEnforcement;
+ BPI.BranchProtectionPAuthLR = PBP.BranchProtectionPAuthLR;
+ return true;
+}
+
// FIXME: This should be based on Arch attributes, not CPU names.
bool ARMTargetInfo::initFeatureMap(
llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags, StringRef CPU,
@@ -378,10 +443,23 @@ bool ARMTargetInfo::initFeatureMap(
if (CPUArch != llvm::ARM::ArchKind::INVALID) {
ArchFeature = ("+" + llvm::ARM::getArchName(CPUArch)).str();
TargetFeatures.push_back(ArchFeature);
+
+ // These features are added to allow arm_neon.h target(..) attributes to
+ // match with both arm and aarch64. We need to add all previous architecture
+ // versions, so that "8.6" also allows "8.1" functions. In case of v9.x the
+ // v8.x counterparts are added too. We only need these for anything > 8.0-A.
+ for (llvm::ARM::ArchKind I = llvm::ARM::convertV9toV8(CPUArch);
+ I != llvm::ARM::ArchKind::INVALID; --I)
+ Features[llvm::ARM::getSubArch(I)] = true;
+ if (CPUArch > llvm::ARM::ArchKind::ARMV8A &&
+ CPUArch <= llvm::ARM::ArchKind::ARMV9_3A)
+ for (llvm::ARM::ArchKind I = CPUArch; I != llvm::ARM::ArchKind::INVALID;
+ --I)
+ Features[llvm::ARM::getSubArch(I)] = true;
}
// get default FPU features
- unsigned FPUKind = llvm::ARM::getDefaultFPU(CPU, Arch);
+ llvm::ARM::FPUKind FPUKind = llvm::ARM::getDefaultFPU(CPU, Arch);
llvm::ARM::getFPUFeatures(FPUKind, TargetFeatures);
// get default Extension features
@@ -437,9 +515,13 @@ bool ARMTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HWDiv = 0;
DotProd = 0;
HasMatMul = 0;
+ HasPAC = 0;
+ HasBTI = 0;
HasFloat16 = true;
ARMCDECoprocMask = 0;
HasBFloat16 = false;
+ HasFullBFloat16 = false;
+ FPRegsDisabled = false;
// This does not diagnose illegal cases like having both
// "+vfpv2" and "+vfpv3" or having "+neon" and "-fp64".
@@ -516,9 +598,18 @@ bool ARMTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
ARMCDECoprocMask |= (1U << Coproc);
} else if (Feature == "+bf16") {
HasBFloat16 = true;
+ } else if (Feature == "-fpregs") {
+ FPRegsDisabled = true;
+ } else if (Feature == "+pacbti") {
+ HasPAC = 1;
+ HasBTI = 1;
+ } else if (Feature == "+fullbf16") {
+ HasFullBFloat16 = true;
}
}
+ HalfArgsAndReturns = true;
+
switch (ArchVersion) {
case 6:
if (ArchProfile == llvm::ARM::ProfileKind::M)
@@ -535,6 +626,7 @@ bool ARMTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
LDREX = LDREX_D | LDREX_W | LDREX_H | LDREX_B;
break;
case 8:
+ case 9:
LDREX = LDREX_D | LDREX_W | LDREX_H | LDREX_B;
}
@@ -566,7 +658,8 @@ bool ARMTargetInfo::hasFeature(StringRef Feature) const {
}
bool ARMTargetInfo::hasBFloat16Type() const {
- return HasBFloat16 && !SoftFloat;
+ // The __bf16 type is generally available so long as we have any fp registers.
+ return HasBFloat16 || (FPU && !SoftFloat);
}
bool ARMTargetInfo::isValidCPUName(StringRef Name) const {
@@ -627,8 +720,10 @@ void ARMTargetInfo::getTargetDefines(const LangOptions &Opts,
// For bare-metal none-eabi.
if (getTriple().getOS() == llvm::Triple::UnknownOS &&
(getTriple().getEnvironment() == llvm::Triple::EABI ||
- getTriple().getEnvironment() == llvm::Triple::EABIHF))
- Builder.defineMacro("__ELF__");
+ getTriple().getEnvironment() == llvm::Triple::EABIHF) &&
+ Opts.CPlusPlus) {
+ Builder.defineMacro("_GNU_SOURCE");
+ }
// Target properties.
Builder.defineMacro("__REGISTER_PREFIX__", "");
@@ -736,7 +831,7 @@ void ARMTargetInfo::getTargetDefines(const LangOptions &Opts,
if ((!SoftFloat && !SoftFloatABI) || ABI == "aapcs-vfp" || ABI == "aapcs16")
Builder.defineMacro("__ARM_PCS_VFP", "1");
- if (SoftFloat)
+ if (SoftFloat || (SoftFloatABI && !FPU))
Builder.defineMacro("__SOFTFP__");
// ACLE position independent code macros.
@@ -745,6 +840,70 @@ void ARMTargetInfo::getTargetDefines(const LangOptions &Opts,
if (Opts.RWPI)
Builder.defineMacro("__ARM_RWPI", "1");
+ // Macros for enabling co-proc intrinsics
+ uint64_t FeatureCoprocBF = 0;
+ switch (ArchKind) {
+ default:
+ break;
+ case llvm::ARM::ArchKind::ARMV4:
+ case llvm::ARM::ArchKind::ARMV4T:
+ // Filter __arm_ldcl and __arm_stcl in acle.h
+ FeatureCoprocBF = isThumb() ? 0 : FEATURE_COPROC_B1;
+ break;
+ case llvm::ARM::ArchKind::ARMV5T:
+ FeatureCoprocBF = isThumb() ? 0 : FEATURE_COPROC_B1 | FEATURE_COPROC_B2;
+ break;
+ case llvm::ARM::ArchKind::ARMV5TE:
+ case llvm::ARM::ArchKind::ARMV5TEJ:
+ if (!isThumb())
+ FeatureCoprocBF =
+ FEATURE_COPROC_B1 | FEATURE_COPROC_B2 | FEATURE_COPROC_B3;
+ break;
+ case llvm::ARM::ArchKind::ARMV6:
+ case llvm::ARM::ArchKind::ARMV6K:
+ case llvm::ARM::ArchKind::ARMV6KZ:
+ case llvm::ARM::ArchKind::ARMV6T2:
+ if (!isThumb() || ArchKind == llvm::ARM::ArchKind::ARMV6T2)
+ FeatureCoprocBF = FEATURE_COPROC_B1 | FEATURE_COPROC_B2 |
+ FEATURE_COPROC_B3 | FEATURE_COPROC_B4;
+ break;
+ case llvm::ARM::ArchKind::ARMV7A:
+ case llvm::ARM::ArchKind::ARMV7R:
+ case llvm::ARM::ArchKind::ARMV7M:
+ case llvm::ARM::ArchKind::ARMV7S:
+ case llvm::ARM::ArchKind::ARMV7EM:
+ FeatureCoprocBF = FEATURE_COPROC_B1 | FEATURE_COPROC_B2 |
+ FEATURE_COPROC_B3 | FEATURE_COPROC_B4;
+ break;
+ case llvm::ARM::ArchKind::ARMV8A:
+ case llvm::ARM::ArchKind::ARMV8R:
+ case llvm::ARM::ArchKind::ARMV8_1A:
+ case llvm::ARM::ArchKind::ARMV8_2A:
+ case llvm::ARM::ArchKind::ARMV8_3A:
+ case llvm::ARM::ArchKind::ARMV8_4A:
+ case llvm::ARM::ArchKind::ARMV8_5A:
+ case llvm::ARM::ArchKind::ARMV8_6A:
+ case llvm::ARM::ArchKind::ARMV8_7A:
+ case llvm::ARM::ArchKind::ARMV8_8A:
+ case llvm::ARM::ArchKind::ARMV8_9A:
+ case llvm::ARM::ArchKind::ARMV9A:
+ case llvm::ARM::ArchKind::ARMV9_1A:
+ case llvm::ARM::ArchKind::ARMV9_2A:
+ case llvm::ARM::ArchKind::ARMV9_3A:
+ case llvm::ARM::ArchKind::ARMV9_4A:
+ case llvm::ARM::ArchKind::ARMV9_5A:
+ // Filter __arm_cdp, __arm_ldcl, __arm_stcl in arm_acle.h
+ FeatureCoprocBF = FEATURE_COPROC_B1 | FEATURE_COPROC_B3;
+ break;
+ case llvm::ARM::ArchKind::ARMV8MMainline:
+ case llvm::ARM::ArchKind::ARMV8_1MMainline:
+ FeatureCoprocBF = FEATURE_COPROC_B1 | FEATURE_COPROC_B2 |
+ FEATURE_COPROC_B3 | FEATURE_COPROC_B4;
+ break;
+ }
+ Builder.defineMacro("__ARM_FEATURE_COPROC",
+ "0x" + Twine::utohexstr(FeatureCoprocBF));
+
if (ArchKind == llvm::ARM::ArchKind::XSCALE)
Builder.defineMacro("__XSCALE__");
@@ -858,12 +1017,28 @@ void ARMTargetInfo::getTargetDefines(const LangOptions &Opts,
if (HasMatMul)
Builder.defineMacro("__ARM_FEATURE_MATMUL_INT8", "1");
+ if (HasPAC)
+ Builder.defineMacro("__ARM_FEATURE_PAUTH", "1");
+
+ if (HasBTI)
+ Builder.defineMacro("__ARM_FEATURE_BTI", "1");
+
if (HasBFloat16) {
Builder.defineMacro("__ARM_FEATURE_BF16", "1");
Builder.defineMacro("__ARM_FEATURE_BF16_VECTOR_ARITHMETIC", "1");
Builder.defineMacro("__ARM_BF16_FORMAT_ALTERNATIVE", "1");
}
+ if (Opts.BranchTargetEnforcement)
+ Builder.defineMacro("__ARM_FEATURE_BTI_DEFAULT", "1");
+
+ if (Opts.hasSignReturnAddress()) {
+ unsigned Value = 1;
+ if (Opts.isSignReturnAddressScopeAll())
+ Value |= 1 << 2;
+ Builder.defineMacro("__ARM_FEATURE_PAC_DEFAULT", Twine(Value));
+ }
+
switch (ArchKind) {
default:
break;
@@ -877,32 +1052,45 @@ void ARMTargetInfo::getTargetDefines(const LangOptions &Opts,
case llvm::ARM::ArchKind::ARMV8_4A:
case llvm::ARM::ArchKind::ARMV8_5A:
case llvm::ARM::ArchKind::ARMV8_6A:
+ case llvm::ARM::ArchKind::ARMV8_7A:
+ case llvm::ARM::ArchKind::ARMV8_8A:
+ case llvm::ARM::ArchKind::ARMV8_9A:
+ case llvm::ARM::ArchKind::ARMV9A:
+ case llvm::ARM::ArchKind::ARMV9_1A:
+ case llvm::ARM::ArchKind::ARMV9_2A:
+ case llvm::ARM::ArchKind::ARMV9_3A:
+ case llvm::ARM::ArchKind::ARMV9_4A:
+ case llvm::ARM::ArchKind::ARMV9_5A:
getTargetDefinesARMV83A(Opts, Builder);
break;
}
}
-const Builtin::Info ARMTargetInfo::BuiltinInfo[] = {
+static constexpr Builtin::Info BuiltinInfo[] = {
#define BUILTIN(ID, TYPE, ATTRS) \
- {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
+ {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) \
- {#ID, TYPE, ATTRS, HEADER, ALL_LANGUAGES, nullptr},
+ {#ID, TYPE, ATTRS, nullptr, HeaderDesc::HEADER, ALL_LANGUAGES},
+#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
+ {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
#include "clang/Basic/BuiltinsNEON.def"
#define BUILTIN(ID, TYPE, ATTRS) \
- {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
+ {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
#define LANGBUILTIN(ID, TYPE, ATTRS, LANG) \
- {#ID, TYPE, ATTRS, nullptr, LANG, nullptr},
+ {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, LANG},
#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) \
- {#ID, TYPE, ATTRS, HEADER, ALL_LANGUAGES, nullptr},
+ {#ID, TYPE, ATTRS, nullptr, HeaderDesc::HEADER, ALL_LANGUAGES},
+#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
+ {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
#define TARGET_HEADER_BUILTIN(ID, TYPE, ATTRS, HEADER, LANGS, FEATURE) \
- {#ID, TYPE, ATTRS, HEADER, LANGS, FEATURE},
+ {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::HEADER, LANGS},
#include "clang/Basic/BuiltinsARM.def"
};
ArrayRef<Builtin::Info> ARMTargetInfo::getTargetBuiltins() const {
- return llvm::makeArrayRef(BuiltinInfo, clang::ARM::LastTSBuiltin -
- Builtin::FirstTSBuiltin);
+ return llvm::ArrayRef(BuiltinInfo,
+ clang::ARM::LastTSBuiltin - Builtin::FirstTSBuiltin);
}
bool ARMTargetInfo::isCLZForZeroUndef() const { return false; }
@@ -933,7 +1121,7 @@ const char *const ARMTargetInfo::GCCRegNames[] = {
"q12", "q13", "q14", "q15"};
ArrayRef<const char *> ARMTargetInfo::getGCCRegNames() const {
- return llvm::makeArrayRef(GCCRegNames);
+ return llvm::ArrayRef(GCCRegNames);
}
const TargetInfo::GCCRegAlias ARMTargetInfo::GCCRegAliases[] = {
@@ -946,7 +1134,7 @@ const TargetInfo::GCCRegAlias ARMTargetInfo::GCCRegAliases[] = {
};
ArrayRef<TargetInfo::GCCRegAlias> ARMTargetInfo::getGCCRegAliases() const {
- return llvm::makeArrayRef(GCCRegAliases);
+ return llvm::ArrayRef(GCCRegAliases);
}
bool ARMTargetInfo::validateAsmConstraint(
@@ -968,6 +1156,8 @@ bool ARMTargetInfo::validateAsmConstraint(
case 't': // s0-s31, d0-d31, or q0-q15
case 'w': // s0-s15, d0-d7, or q0-q3
case 'x': // s0-s31, d0-d15, or q0-q7
+ if (FPRegsDisabled)
+ return false;
Info.setAllowsRegister();
return true;
case 'j': // An immediate integer between 0 and 65535 (valid for MOVW)
@@ -1108,8 +1298,7 @@ bool ARMTargetInfo::validateConstraintModifier(
bool isInOut = (Constraint[0] == '+');
// Strip off constraint modifiers.
- while (Constraint[0] == '=' || Constraint[0] == '+' || Constraint[0] == '&')
- Constraint = Constraint.substr(1);
+ Constraint = Constraint.ltrim("=+&");
switch (Constraint[0]) {
default:
@@ -1127,7 +1316,7 @@ bool ARMTargetInfo::validateConstraintModifier(
return true;
}
-const char *ARMTargetInfo::getClobbers() const {
+std::string_view ARMTargetInfo::getClobbers() const {
// FIXME: Is this really right?
return "";
}
@@ -1289,11 +1478,6 @@ DarwinARMTargetInfo::DarwinARMTargetInfo(const llvm::Triple &Triple,
const TargetOptions &Opts)
: DarwinTargetInfo<ARMleTargetInfo>(Triple, Opts) {
HasAlignMac68kSupport = true;
- // iOS always has 64-bit atomic instructions.
- // FIXME: This should be based off of the target features in
- // ARMleTargetInfo.
- MaxAtomicInlineWidth = 64;
-
if (Triple.isWatchABI()) {
// Darwin on iOS uses a variant of the ARM C++ ABI.
TheCXXABI.set(TargetCXXABI::WatchOS);
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/ARM.h b/contrib/llvm-project/clang/lib/Basic/Targets/ARM.h
index 0910064a033b..9802eb01abf3 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/ARM.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/ARM.h
@@ -16,9 +16,10 @@
#include "OSTargets.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Compiler.h"
-#include "llvm/Support/TargetParser.h"
+#include "llvm/TargetParser/ARMTargetParser.h"
+#include "llvm/TargetParser/ARMTargetParserCommon.h"
+#include "llvm/TargetParser/Triple.h"
namespace clang {
namespace targets {
@@ -78,6 +79,9 @@ class LLVM_LIBRARY_VISIBILITY ARMTargetInfo : public TargetInfo {
unsigned Unaligned : 1;
unsigned DotProd : 1;
unsigned HasMatMul : 1;
+ unsigned FPRegsDisabled : 1;
+ unsigned HasPAC : 1;
+ unsigned HasBTI : 1;
enum {
LDREX_B = (1 << 0), /// byte (8-bit)
@@ -96,7 +100,18 @@ class LLVM_LIBRARY_VISIBILITY ARMTargetInfo : public TargetInfo {
};
uint32_t HW_FP;
- static const Builtin::Info BuiltinInfo[];
+ enum {
+ /// __arm_cdp __arm_ldc, __arm_ldcl, __arm_stc,
+ /// __arm_stcl, __arm_mcr and __arm_mrc
+ FEATURE_COPROC_B1 = (1 << 0),
+ /// __arm_cdp2, __arm_ldc2, __arm_stc2, __arm_ldc2l,
+ /// __arm_stc2l, __arm_mcr2 and __arm_mrc2
+ FEATURE_COPROC_B2 = (1 << 1),
+ /// __arm_mcrr, __arm_mrrc
+ FEATURE_COPROC_B3 = (1 << 2),
+ /// __arm_mcrr2, __arm_mrrc2
+ FEATURE_COPROC_B4 = (1 << 3),
+ };
void setABIAAPCS();
void setABIAPCS(bool IsAAPCS16);
@@ -122,6 +137,11 @@ public:
StringRef getABI() const override;
bool setABI(const std::string &Name) override;
+ bool isBranchProtectionSupportedArch(StringRef Arch) const override;
+ bool validateBranchProtection(StringRef Spec, StringRef Arch,
+ BranchProtectionInfo &BPI,
+ StringRef &Err) const override;
+
// FIXME: This should be based on Arch attributes, not CPU names.
bool
initFeatureMap(llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags,
@@ -174,7 +194,7 @@ public:
bool
validateConstraintModifier(StringRef Constraint, char Modifier, unsigned Size,
std::string &SuggestedModifier) const override;
- const char *getClobbers() const override;
+ std::string_view getClobbers() const override;
StringRef getConstraintRegister(StringRef Constraint,
StringRef Expression) const override {
@@ -187,8 +207,8 @@ public:
bool hasSjLjLowering() const override;
- bool hasExtIntType() const override { return true; }
-
+ bool hasBitIntType() const override { return true; }
+
const char *getBFloat16Mangling() const override { return "u6__bf16"; };
};
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/AVR.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/AVR.cpp
index e87b7338c4d6..85ca4bc30c46 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/AVR.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/AVR.cpp
@@ -24,297 +24,475 @@ namespace targets {
struct LLVM_LIBRARY_VISIBILITY MCUInfo {
const char *Name;
const char *DefineName;
+ StringRef Arch; // The __AVR_ARCH__ value.
+ const int NumFlashBanks; // Set to 0 for the devices do not support LPM/ELPM.
};
-// This list should be kept up-to-date with AVRDevices.td in LLVM.
+// NOTE: This list has been synchronized with gcc-avr 5.4.0 and avr-libc 2.0.0.
static MCUInfo AVRMcus[] = {
- {"at90s1200", "__AVR_AT90S1200__"},
- {"attiny11", "__AVR_ATtiny11__"},
- {"attiny12", "__AVR_ATtiny12__"},
- {"attiny15", "__AVR_ATtiny15__"},
- {"attiny28", "__AVR_ATtiny28__"},
- {"at90s2313", "__AVR_AT90S2313__"},
- {"at90s2323", "__AVR_AT90S2323__"},
- {"at90s2333", "__AVR_AT90S2333__"},
- {"at90s2343", "__AVR_AT90S2343__"},
- {"attiny22", "__AVR_ATtiny22__"},
- {"attiny26", "__AVR_ATtiny26__"},
- {"at86rf401", "__AVR_AT86RF401__"},
- {"at90s4414", "__AVR_AT90S4414__"},
- {"at90s4433", "__AVR_AT90S4433__"},
- {"at90s4434", "__AVR_AT90S4434__"},
- {"at90s8515", "__AVR_AT90S8515__"},
- {"at90c8534", "__AVR_AT90c8534__"},
- {"at90s8535", "__AVR_AT90S8535__"},
- {"ata5272", "__AVR_ATA5272__"},
- {"attiny13", "__AVR_ATtiny13__"},
- {"attiny13a", "__AVR_ATtiny13A__"},
- {"attiny2313", "__AVR_ATtiny2313__"},
- {"attiny2313a", "__AVR_ATtiny2313A__"},
- {"attiny24", "__AVR_ATtiny24__"},
- {"attiny24a", "__AVR_ATtiny24A__"},
- {"attiny4313", "__AVR_ATtiny4313__"},
- {"attiny44", "__AVR_ATtiny44__"},
- {"attiny44a", "__AVR_ATtiny44A__"},
- {"attiny84", "__AVR_ATtiny84__"},
- {"attiny84a", "__AVR_ATtiny84A__"},
- {"attiny25", "__AVR_ATtiny25__"},
- {"attiny45", "__AVR_ATtiny45__"},
- {"attiny85", "__AVR_ATtiny85__"},
- {"attiny261", "__AVR_ATtiny261__"},
- {"attiny261a", "__AVR_ATtiny261A__"},
- {"attiny441", "__AVR_ATtiny441__"},
- {"attiny461", "__AVR_ATtiny461__"},
- {"attiny461a", "__AVR_ATtiny461A__"},
- {"attiny841", "__AVR_ATtiny841__"},
- {"attiny861", "__AVR_ATtiny861__"},
- {"attiny861a", "__AVR_ATtiny861A__"},
- {"attiny87", "__AVR_ATtiny87__"},
- {"attiny43u", "__AVR_ATtiny43U__"},
- {"attiny48", "__AVR_ATtiny48__"},
- {"attiny88", "__AVR_ATtiny88__"},
- {"attiny828", "__AVR_ATtiny828__"},
- {"at43usb355", "__AVR_AT43USB355__"},
- {"at76c711", "__AVR_AT76C711__"},
- {"atmega103", "__AVR_ATmega103__"},
- {"at43usb320", "__AVR_AT43USB320__"},
- {"attiny167", "__AVR_ATtiny167__"},
- {"at90usb82", "__AVR_AT90USB82__"},
- {"at90usb162", "__AVR_AT90USB162__"},
- {"ata5505", "__AVR_ATA5505__"},
- {"atmega8u2", "__AVR_ATmega8U2__"},
- {"atmega16u2", "__AVR_ATmega16U2__"},
- {"atmega32u2", "__AVR_ATmega32U2__"},
- {"attiny1634", "__AVR_ATtiny1634__"},
- {"atmega8", "__AVR_ATmega8__"},
- {"ata6289", "__AVR_ATA6289__"},
- {"atmega8a", "__AVR_ATmega8A__"},
- {"ata6285", "__AVR_ATA6285__"},
- {"ata6286", "__AVR_ATA6286__"},
- {"atmega48", "__AVR_ATmega48__"},
- {"atmega48a", "__AVR_ATmega48A__"},
- {"atmega48pa", "__AVR_ATmega48PA__"},
- {"atmega48pb", "__AVR_ATmega48PB__"},
- {"atmega48p", "__AVR_ATmega48P__"},
- {"atmega88", "__AVR_ATmega88__"},
- {"atmega88a", "__AVR_ATmega88A__"},
- {"atmega88p", "__AVR_ATmega88P__"},
- {"atmega88pa", "__AVR_ATmega88PA__"},
- {"atmega88pb", "__AVR_ATmega88PB__"},
- {"atmega8515", "__AVR_ATmega8515__"},
- {"atmega8535", "__AVR_ATmega8535__"},
- {"atmega8hva", "__AVR_ATmega8HVA__"},
- {"at90pwm1", "__AVR_AT90PWM1__"},
- {"at90pwm2", "__AVR_AT90PWM2__"},
- {"at90pwm2b", "__AVR_AT90PWM2B__"},
- {"at90pwm3", "__AVR_AT90PWM3__"},
- {"at90pwm3b", "__AVR_AT90PWM3B__"},
- {"at90pwm81", "__AVR_AT90PWM81__"},
- {"ata5790", "__AVR_ATA5790__"},
- {"ata5795", "__AVR_ATA5795__"},
- {"atmega16", "__AVR_ATmega16__"},
- {"atmega16a", "__AVR_ATmega16A__"},
- {"atmega161", "__AVR_ATmega161__"},
- {"atmega162", "__AVR_ATmega162__"},
- {"atmega163", "__AVR_ATmega163__"},
- {"atmega164a", "__AVR_ATmega164A__"},
- {"atmega164p", "__AVR_ATmega164P__"},
- {"atmega164pa", "__AVR_ATmega164PA__"},
- {"atmega165", "__AVR_ATmega165__"},
- {"atmega165a", "__AVR_ATmega165A__"},
- {"atmega165p", "__AVR_ATmega165P__"},
- {"atmega165pa", "__AVR_ATmega165PA__"},
- {"atmega168", "__AVR_ATmega168__"},
- {"atmega168a", "__AVR_ATmega168A__"},
- {"atmega168p", "__AVR_ATmega168P__"},
- {"atmega168pa", "__AVR_ATmega168PA__"},
- {"atmega168pb", "__AVR_ATmega168PB__"},
- {"atmega169", "__AVR_ATmega169__"},
- {"atmega169a", "__AVR_ATmega169A__"},
- {"atmega169p", "__AVR_ATmega169P__"},
- {"atmega169pa", "__AVR_ATmega169PA__"},
- {"atmega32", "__AVR_ATmega32__"},
- {"atmega32a", "__AVR_ATmega32A__"},
- {"atmega323", "__AVR_ATmega323__"},
- {"atmega324a", "__AVR_ATmega324A__"},
- {"atmega324p", "__AVR_ATmega324P__"},
- {"atmega324pa", "__AVR_ATmega324PA__"},
- {"atmega324pb", "__AVR_ATmega324PB__"},
- {"atmega325", "__AVR_ATmega325__"},
- {"atmega325a", "__AVR_ATmega325A__"},
- {"atmega325p", "__AVR_ATmega325P__"},
- {"atmega325pa", "__AVR_ATmega325PA__"},
- {"atmega3250", "__AVR_ATmega3250__"},
- {"atmega3250a", "__AVR_ATmega3250A__"},
- {"atmega3250p", "__AVR_ATmega3250P__"},
- {"atmega3250pa", "__AVR_ATmega3250PA__"},
- {"atmega328", "__AVR_ATmega328__"},
- {"atmega328p", "__AVR_ATmega328P__"},
- {"atmega328pb", "__AVR_ATmega328PB__"},
- {"atmega329", "__AVR_ATmega329__"},
- {"atmega329a", "__AVR_ATmega329A__"},
- {"atmega329p", "__AVR_ATmega329P__"},
- {"atmega329pa", "__AVR_ATmega329PA__"},
- {"atmega3290", "__AVR_ATmega3290__"},
- {"atmega3290a", "__AVR_ATmega3290A__"},
- {"atmega3290p", "__AVR_ATmega3290P__"},
- {"atmega3290pa", "__AVR_ATmega3290PA__"},
- {"atmega406", "__AVR_ATmega406__"},
- {"atmega64", "__AVR_ATmega64__"},
- {"atmega64a", "__AVR_ATmega64A__"},
- {"atmega640", "__AVR_ATmega640__"},
- {"atmega644", "__AVR_ATmega644__"},
- {"atmega644a", "__AVR_ATmega644A__"},
- {"atmega644p", "__AVR_ATmega644P__"},
- {"atmega644pa", "__AVR_ATmega644PA__"},
- {"atmega645", "__AVR_ATmega645__"},
- {"atmega645a", "__AVR_ATmega645A__"},
- {"atmega645p", "__AVR_ATmega645P__"},
- {"atmega649", "__AVR_ATmega649__"},
- {"atmega649a", "__AVR_ATmega649A__"},
- {"atmega649p", "__AVR_ATmega649P__"},
- {"atmega6450", "__AVR_ATmega6450__"},
- {"atmega6450a", "__AVR_ATmega6450A__"},
- {"atmega6450p", "__AVR_ATmega6450P__"},
- {"atmega6490", "__AVR_ATmega6490__"},
- {"atmega6490a", "__AVR_ATmega6490A__"},
- {"atmega6490p", "__AVR_ATmega6490P__"},
- {"atmega64rfr2", "__AVR_ATmega64RFR2__"},
- {"atmega644rfr2", "__AVR_ATmega644RFR2__"},
- {"atmega16hva", "__AVR_ATmega16HVA__"},
- {"atmega16hva2", "__AVR_ATmega16HVA2__"},
- {"atmega16hvb", "__AVR_ATmega16HVB__"},
- {"atmega16hvbrevb", "__AVR_ATmega16HVBREVB__"},
- {"atmega32hvb", "__AVR_ATmega32HVB__"},
- {"atmega32hvbrevb", "__AVR_ATmega32HVBREVB__"},
- {"atmega64hve", "__AVR_ATmega64HVE__"},
- {"at90can32", "__AVR_AT90CAN32__"},
- {"at90can64", "__AVR_AT90CAN64__"},
- {"at90pwm161", "__AVR_AT90PWM161__"},
- {"at90pwm216", "__AVR_AT90PWM216__"},
- {"at90pwm316", "__AVR_AT90PWM316__"},
- {"atmega32c1", "__AVR_ATmega32C1__"},
- {"atmega64c1", "__AVR_ATmega64C1__"},
- {"atmega16m1", "__AVR_ATmega16M1__"},
- {"atmega32m1", "__AVR_ATmega32M1__"},
- {"atmega64m1", "__AVR_ATmega64M1__"},
- {"atmega16u4", "__AVR_ATmega16U4__"},
- {"atmega32u4", "__AVR_ATmega32U4__"},
- {"atmega32u6", "__AVR_ATmega32U6__"},
- {"at90usb646", "__AVR_AT90USB646__"},
- {"at90usb647", "__AVR_AT90USB647__"},
- {"at90scr100", "__AVR_AT90SCR100__"},
- {"at94k", "__AVR_AT94K__"},
- {"m3000", "__AVR_AT000__"},
- {"atmega128", "__AVR_ATmega128__"},
- {"atmega128a", "__AVR_ATmega128A__"},
- {"atmega1280", "__AVR_ATmega1280__"},
- {"atmega1281", "__AVR_ATmega1281__"},
- {"atmega1284", "__AVR_ATmega1284__"},
- {"atmega1284p", "__AVR_ATmega1284P__"},
- {"atmega128rfa1", "__AVR_ATmega128RFA1__"},
- {"atmega128rfr2", "__AVR_ATmega128RFR2__"},
- {"atmega1284rfr2", "__AVR_ATmega1284RFR2__"},
- {"at90can128", "__AVR_AT90CAN128__"},
- {"at90usb1286", "__AVR_AT90USB1286__"},
- {"at90usb1287", "__AVR_AT90USB1287__"},
- {"atmega2560", "__AVR_ATmega2560__"},
- {"atmega2561", "__AVR_ATmega2561__"},
- {"atmega256rfr2", "__AVR_ATmega256RFR2__"},
- {"atmega2564rfr2", "__AVR_ATmega2564RFR2__"},
- {"atxmega16a4", "__AVR_ATxmega16A4__"},
- {"atxmega16a4u", "__AVR_ATxmega16a4U__"},
- {"atxmega16c4", "__AVR_ATxmega16C4__"},
- {"atxmega16d4", "__AVR_ATxmega16D4__"},
- {"atxmega32a4", "__AVR_ATxmega32A4__"},
- {"atxmega32a4u", "__AVR_ATxmega32A4U__"},
- {"atxmega32c4", "__AVR_ATxmega32C4__"},
- {"atxmega32d4", "__AVR_ATxmega32D4__"},
- {"atxmega32e5", "__AVR_ATxmega32E5__"},
- {"atxmega16e5", "__AVR_ATxmega16E5__"},
- {"atxmega8e5", "__AVR_ATxmega8E5__"},
- {"atxmega32x1", "__AVR_ATxmega32X1__"},
- {"atxmega64a3", "__AVR_ATxmega64A3__"},
- {"atxmega64a3u", "__AVR_ATxmega64A3U__"},
- {"atxmega64a4u", "__AVR_ATxmega64A4U__"},
- {"atxmega64b1", "__AVR_ATxmega64B1__"},
- {"atxmega64b3", "__AVR_ATxmega64B3__"},
- {"atxmega64c3", "__AVR_ATxmega64C3__"},
- {"atxmega64d3", "__AVR_ATxmega64D3__"},
- {"atxmega64d4", "__AVR_ATxmega64D4__"},
- {"atxmega64a1", "__AVR_ATxmega64A1__"},
- {"atxmega64a1u", "__AVR_ATxmega64A1U__"},
- {"atxmega128a3", "__AVR_ATxmega128A3__"},
- {"atxmega128a3u", "__AVR_ATxmega128A3U__"},
- {"atxmega128b1", "__AVR_ATxmega128B1__"},
- {"atxmega128b3", "__AVR_ATxmega128B3__"},
- {"atxmega128c3", "__AVR_ATxmega128C3__"},
- {"atxmega128d3", "__AVR_ATxmega128D3__"},
- {"atxmega128d4", "__AVR_ATxmega128D4__"},
- {"atxmega192a3", "__AVR_ATxmega192A3__"},
- {"atxmega192a3u", "__AVR_ATxmega192A3U__"},
- {"atxmega192c3", "__AVR_ATxmega192C3__"},
- {"atxmega192d3", "__AVR_ATxmega192D3__"},
- {"atxmega256a3", "__AVR_ATxmega256A3__"},
- {"atxmega256a3u", "__AVR_ATxmega256A3U__"},
- {"atxmega256a3b", "__AVR_ATxmega256A3B__"},
- {"atxmega256a3bu", "__AVR_ATxmega256A3BU__"},
- {"atxmega256c3", "__AVR_ATxmega256C3__"},
- {"atxmega256d3", "__AVR_ATxmega256D3__"},
- {"atxmega384c3", "__AVR_ATxmega384C3__"},
- {"atxmega384d3", "__AVR_ATxmega384D3__"},
- {"atxmega128a1", "__AVR_ATxmega128A1__"},
- {"atxmega128a1u", "__AVR_ATxmega128A1U__"},
- {"atxmega128a4u", "__AVR_ATxmega128a4U__"},
- {"attiny4", "__AVR_ATtiny4__"},
- {"attiny5", "__AVR_ATtiny5__"},
- {"attiny9", "__AVR_ATtiny9__"},
- {"attiny10", "__AVR_ATtiny10__"},
- {"attiny20", "__AVR_ATtiny20__"},
- {"attiny40", "__AVR_ATtiny40__"},
- {"attiny102", "__AVR_ATtiny102__"},
- {"attiny104", "__AVR_ATtiny104__"},
+ {"avr1", NULL, "1", 0},
+ {"at90s1200", "__AVR_AT90S1200__", "1", 0},
+ {"attiny11", "__AVR_ATtiny11__", "1", 0},
+ {"attiny12", "__AVR_ATtiny12__", "1", 0},
+ {"attiny15", "__AVR_ATtiny15__", "1", 0},
+ {"attiny28", "__AVR_ATtiny28__", "1", 0},
+ {"avr2", NULL, "2", 1},
+ {"at90s2313", "__AVR_AT90S2313__", "2", 1},
+ {"at90s2323", "__AVR_AT90S2323__", "2", 1},
+ {"at90s2333", "__AVR_AT90S2333__", "2", 1},
+ {"at90s2343", "__AVR_AT90S2343__", "2", 1},
+ {"attiny22", "__AVR_ATtiny22__", "2", 1},
+ {"attiny26", "__AVR_ATtiny26__", "2", 1},
+ {"at86rf401", "__AVR_AT86RF401__", "25", 1},
+ {"at90s4414", "__AVR_AT90S4414__", "2", 1},
+ {"at90s4433", "__AVR_AT90S4433__", "2", 1},
+ {"at90s4434", "__AVR_AT90S4434__", "2", 1},
+ {"at90s8515", "__AVR_AT90S8515__", "2", 1},
+ {"at90c8534", "__AVR_AT90c8534__", "2", 1},
+ {"at90s8535", "__AVR_AT90S8535__", "2", 1},
+ {"avr25", NULL, "25", 1},
+ {"ata5272", "__AVR_ATA5272__", "25", 1},
+ {"ata6616c", "__AVR_ATA6616c__", "25", 1},
+ {"attiny13", "__AVR_ATtiny13__", "25", 1},
+ {"attiny13a", "__AVR_ATtiny13A__", "25", 1},
+ {"attiny2313", "__AVR_ATtiny2313__", "25", 1},
+ {"attiny2313a", "__AVR_ATtiny2313A__", "25", 1},
+ {"attiny24", "__AVR_ATtiny24__", "25", 1},
+ {"attiny24a", "__AVR_ATtiny24A__", "25", 1},
+ {"attiny4313", "__AVR_ATtiny4313__", "25", 1},
+ {"attiny44", "__AVR_ATtiny44__", "25", 1},
+ {"attiny44a", "__AVR_ATtiny44A__", "25", 1},
+ {"attiny84", "__AVR_ATtiny84__", "25", 1},
+ {"attiny84a", "__AVR_ATtiny84A__", "25", 1},
+ {"attiny25", "__AVR_ATtiny25__", "25", 1},
+ {"attiny45", "__AVR_ATtiny45__", "25", 1},
+ {"attiny85", "__AVR_ATtiny85__", "25", 1},
+ {"attiny261", "__AVR_ATtiny261__", "25", 1},
+ {"attiny261a", "__AVR_ATtiny261A__", "25", 1},
+ {"attiny441", "__AVR_ATtiny441__", "25", 1},
+ {"attiny461", "__AVR_ATtiny461__", "25", 1},
+ {"attiny461a", "__AVR_ATtiny461A__", "25", 1},
+ {"attiny841", "__AVR_ATtiny841__", "25", 1},
+ {"attiny861", "__AVR_ATtiny861__", "25", 1},
+ {"attiny861a", "__AVR_ATtiny861A__", "25", 1},
+ {"attiny87", "__AVR_ATtiny87__", "25", 1},
+ {"attiny43u", "__AVR_ATtiny43U__", "25", 1},
+ {"attiny48", "__AVR_ATtiny48__", "25", 1},
+ {"attiny88", "__AVR_ATtiny88__", "25", 1},
+ {"attiny828", "__AVR_ATtiny828__", "25", 1},
+ {"avr3", NULL, "3", 1},
+ {"at43usb355", "__AVR_AT43USB355__", "3", 1},
+ {"at76c711", "__AVR_AT76C711__", "3", 1},
+ {"avr31", NULL, "31", 1},
+ {"atmega103", "__AVR_ATmega103__", "31", 1},
+ {"at43usb320", "__AVR_AT43USB320__", "31", 1},
+ {"avr35", NULL, "35", 1},
+ {"attiny167", "__AVR_ATtiny167__", "35", 1},
+ {"at90usb82", "__AVR_AT90USB82__", "35", 1},
+ {"at90usb162", "__AVR_AT90USB162__", "35", 1},
+ {"ata5505", "__AVR_ATA5505__", "35", 1},
+ {"ata6617c", "__AVR_ATA6617C__", "35", 1},
+ {"ata664251", "__AVR_ATA664251__", "35", 1},
+ {"atmega8u2", "__AVR_ATmega8U2__", "35", 1},
+ {"atmega16u2", "__AVR_ATmega16U2__", "35", 1},
+ {"atmega32u2", "__AVR_ATmega32U2__", "35", 1},
+ {"attiny1634", "__AVR_ATtiny1634__", "35", 1},
+ {"avr4", NULL, "4", 1},
+ {"atmega8", "__AVR_ATmega8__", "4", 1},
+ {"ata6289", "__AVR_ATA6289__", "4", 1},
+ {"atmega8a", "__AVR_ATmega8A__", "4", 1},
+ {"ata6285", "__AVR_ATA6285__", "4", 1},
+ {"ata6286", "__AVR_ATA6286__", "4", 1},
+ {"ata6612c", "__AVR_ATA6612C__", "4", 1},
+ {"atmega48", "__AVR_ATmega48__", "4", 1},
+ {"atmega48a", "__AVR_ATmega48A__", "4", 1},
+ {"atmega48pa", "__AVR_ATmega48PA__", "4", 1},
+ {"atmega48pb", "__AVR_ATmega48PB__", "4", 1},
+ {"atmega48p", "__AVR_ATmega48P__", "4", 1},
+ {"atmega88", "__AVR_ATmega88__", "4", 1},
+ {"atmega88a", "__AVR_ATmega88A__", "4", 1},
+ {"atmega88p", "__AVR_ATmega88P__", "4", 1},
+ {"atmega88pa", "__AVR_ATmega88PA__", "4", 1},
+ {"atmega88pb", "__AVR_ATmega88PB__", "4", 1},
+ {"atmega8515", "__AVR_ATmega8515__", "4", 1},
+ {"atmega8535", "__AVR_ATmega8535__", "4", 1},
+ {"atmega8hva", "__AVR_ATmega8HVA__", "4", 1},
+ {"at90pwm1", "__AVR_AT90PWM1__", "4", 1},
+ {"at90pwm2", "__AVR_AT90PWM2__", "4", 1},
+ {"at90pwm2b", "__AVR_AT90PWM2B__", "4", 1},
+ {"at90pwm3", "__AVR_AT90PWM3__", "4", 1},
+ {"at90pwm3b", "__AVR_AT90PWM3B__", "4", 1},
+ {"at90pwm81", "__AVR_AT90PWM81__", "4", 1},
+ {"avr5", NULL, "5", 1},
+ {"ata5702m322", "__AVR_ATA5702M322__", "5", 1},
+ {"ata5782", "__AVR_ATA5782__", "5", 1},
+ {"ata5790", "__AVR_ATA5790__", "5", 1},
+ {"ata5790n", "__AVR_ATA5790N__", "5", 1},
+ {"ata5791", "__AVR_ATA5791__", "5", 1},
+ {"ata5795", "__AVR_ATA5795__", "5", 1},
+ {"ata5831", "__AVR_ATA5831__", "5", 1},
+ {"ata6613c", "__AVR_ATA6613C__", "5", 1},
+ {"ata6614q", "__AVR_ATA6614Q__", "5", 1},
+ {"ata8210", "__AVR_ATA8210__", "5", 1},
+ {"ata8510", "__AVR_ATA8510__", "5", 1},
+ {"atmega16", "__AVR_ATmega16__", "5", 1},
+ {"atmega16a", "__AVR_ATmega16A__", "5", 1},
+ {"atmega161", "__AVR_ATmega161__", "5", 1},
+ {"atmega162", "__AVR_ATmega162__", "5", 1},
+ {"atmega163", "__AVR_ATmega163__", "5", 1},
+ {"atmega164a", "__AVR_ATmega164A__", "5", 1},
+ {"atmega164p", "__AVR_ATmega164P__", "5", 1},
+ {"atmega164pa", "__AVR_ATmega164PA__", "5", 1},
+ {"atmega165", "__AVR_ATmega165__", "5", 1},
+ {"atmega165a", "__AVR_ATmega165A__", "5", 1},
+ {"atmega165p", "__AVR_ATmega165P__", "5", 1},
+ {"atmega165pa", "__AVR_ATmega165PA__", "5", 1},
+ {"atmega168", "__AVR_ATmega168__", "5", 1},
+ {"atmega168a", "__AVR_ATmega168A__", "5", 1},
+ {"atmega168p", "__AVR_ATmega168P__", "5", 1},
+ {"atmega168pa", "__AVR_ATmega168PA__", "5", 1},
+ {"atmega168pb", "__AVR_ATmega168PB__", "5", 1},
+ {"atmega169", "__AVR_ATmega169__", "5", 1},
+ {"atmega169a", "__AVR_ATmega169A__", "5", 1},
+ {"atmega169p", "__AVR_ATmega169P__", "5", 1},
+ {"atmega169pa", "__AVR_ATmega169PA__", "5", 1},
+ {"atmega32", "__AVR_ATmega32__", "5", 1},
+ {"atmega32a", "__AVR_ATmega32A__", "5", 1},
+ {"atmega323", "__AVR_ATmega323__", "5", 1},
+ {"atmega324a", "__AVR_ATmega324A__", "5", 1},
+ {"atmega324p", "__AVR_ATmega324P__", "5", 1},
+ {"atmega324pa", "__AVR_ATmega324PA__", "5", 1},
+ {"atmega324pb", "__AVR_ATmega324PB__", "5", 1},
+ {"atmega325", "__AVR_ATmega325__", "5", 1},
+ {"atmega325a", "__AVR_ATmega325A__", "5", 1},
+ {"atmega325p", "__AVR_ATmega325P__", "5", 1},
+ {"atmega325pa", "__AVR_ATmega325PA__", "5", 1},
+ {"atmega3250", "__AVR_ATmega3250__", "5", 1},
+ {"atmega3250a", "__AVR_ATmega3250A__", "5", 1},
+ {"atmega3250p", "__AVR_ATmega3250P__", "5", 1},
+ {"atmega3250pa", "__AVR_ATmega3250PA__", "5", 1},
+ {"atmega328", "__AVR_ATmega328__", "5", 1},
+ {"atmega328p", "__AVR_ATmega328P__", "5", 1},
+ {"atmega328pb", "__AVR_ATmega328PB__", "5", 1},
+ {"atmega329", "__AVR_ATmega329__", "5", 1},
+ {"atmega329a", "__AVR_ATmega329A__", "5", 1},
+ {"atmega329p", "__AVR_ATmega329P__", "5", 1},
+ {"atmega329pa", "__AVR_ATmega329PA__", "5", 1},
+ {"atmega3290", "__AVR_ATmega3290__", "5", 1},
+ {"atmega3290a", "__AVR_ATmega3290A__", "5", 1},
+ {"atmega3290p", "__AVR_ATmega3290P__", "5", 1},
+ {"atmega3290pa", "__AVR_ATmega3290PA__", "5", 1},
+ {"atmega406", "__AVR_ATmega406__", "5", 1},
+ {"atmega64", "__AVR_ATmega64__", "5", 1},
+ {"atmega64a", "__AVR_ATmega64A__", "5", 1},
+ {"atmega640", "__AVR_ATmega640__", "5", 1},
+ {"atmega644", "__AVR_ATmega644__", "5", 1},
+ {"atmega644a", "__AVR_ATmega644A__", "5", 1},
+ {"atmega644p", "__AVR_ATmega644P__", "5", 1},
+ {"atmega644pa", "__AVR_ATmega644PA__", "5", 1},
+ {"atmega645", "__AVR_ATmega645__", "5", 1},
+ {"atmega645a", "__AVR_ATmega645A__", "5", 1},
+ {"atmega645p", "__AVR_ATmega645P__", "5", 1},
+ {"atmega649", "__AVR_ATmega649__", "5", 1},
+ {"atmega649a", "__AVR_ATmega649A__", "5", 1},
+ {"atmega649p", "__AVR_ATmega649P__", "5", 1},
+ {"atmega6450", "__AVR_ATmega6450__", "5", 1},
+ {"atmega6450a", "__AVR_ATmega6450A__", "5", 1},
+ {"atmega6450p", "__AVR_ATmega6450P__", "5", 1},
+ {"atmega6490", "__AVR_ATmega6490__", "5", 1},
+ {"atmega6490a", "__AVR_ATmega6490A__", "5", 1},
+ {"atmega6490p", "__AVR_ATmega6490P__", "5", 1},
+ {"atmega64rfr2", "__AVR_ATmega64RFR2__", "5", 1},
+ {"atmega644rfr2", "__AVR_ATmega644RFR2__", "5", 1},
+ {"atmega16hva", "__AVR_ATmega16HVA__", "5", 1},
+ {"atmega16hva2", "__AVR_ATmega16HVA2__", "5", 1},
+ {"atmega16hvb", "__AVR_ATmega16HVB__", "5", 1},
+ {"atmega16hvbrevb", "__AVR_ATmega16HVBREVB__", "5", 1},
+ {"atmega32hvb", "__AVR_ATmega32HVB__", "5", 1},
+ {"atmega32hvbrevb", "__AVR_ATmega32HVBREVB__", "5", 1},
+ {"atmega64hve", "__AVR_ATmega64HVE__", "5", 1},
+ {"atmega64hve2", "__AVR_ATmega64HVE2__", "5", 1},
+ {"at90can32", "__AVR_AT90CAN32__", "5", 1},
+ {"at90can64", "__AVR_AT90CAN64__", "5", 1},
+ {"at90pwm161", "__AVR_AT90PWM161__", "5", 1},
+ {"at90pwm216", "__AVR_AT90PWM216__", "5", 1},
+ {"at90pwm316", "__AVR_AT90PWM316__", "5", 1},
+ {"atmega32c1", "__AVR_ATmega32C1__", "5", 1},
+ {"atmega64c1", "__AVR_ATmega64C1__", "5", 1},
+ {"atmega16m1", "__AVR_ATmega16M1__", "5", 1},
+ {"atmega32m1", "__AVR_ATmega32M1__", "5", 1},
+ {"atmega64m1", "__AVR_ATmega64M1__", "5", 1},
+ {"atmega16u4", "__AVR_ATmega16U4__", "5", 1},
+ {"atmega32u4", "__AVR_ATmega32U4__", "5", 1},
+ {"atmega32u6", "__AVR_ATmega32U6__", "5", 1},
+ {"at90usb646", "__AVR_AT90USB646__", "5", 1},
+ {"at90usb647", "__AVR_AT90USB647__", "5", 1},
+ {"at90scr100", "__AVR_AT90SCR100__", "5", 1},
+ {"at94k", "__AVR_AT94K__", "5", 1},
+ {"m3000", "__AVR_AT000__", "5", 1},
+ {"avr51", NULL, "51", 2},
+ {"atmega128", "__AVR_ATmega128__", "51", 2},
+ {"atmega128a", "__AVR_ATmega128A__", "51", 2},
+ {"atmega1280", "__AVR_ATmega1280__", "51", 2},
+ {"atmega1281", "__AVR_ATmega1281__", "51", 2},
+ {"atmega1284", "__AVR_ATmega1284__", "51", 2},
+ {"atmega1284p", "__AVR_ATmega1284P__", "51", 2},
+ {"atmega128rfa1", "__AVR_ATmega128RFA1__", "51", 2},
+ {"atmega128rfr2", "__AVR_ATmega128RFR2__", "51", 2},
+ {"atmega1284rfr2", "__AVR_ATmega1284RFR2__", "51", 2},
+ {"at90can128", "__AVR_AT90CAN128__", "51", 2},
+ {"at90usb1286", "__AVR_AT90USB1286__", "51", 2},
+ {"at90usb1287", "__AVR_AT90USB1287__", "51", 2},
+ {"avr6", NULL, "6", 4},
+ {"atmega2560", "__AVR_ATmega2560__", "6", 4},
+ {"atmega2561", "__AVR_ATmega2561__", "6", 4},
+ {"atmega256rfr2", "__AVR_ATmega256RFR2__", "6", 4},
+ {"atmega2564rfr2", "__AVR_ATmega2564RFR2__", "6", 4},
+ {"avrxmega2", NULL, "102", 1},
+ {"atxmega16a4", "__AVR_ATxmega16A4__", "102", 1},
+ {"atxmega16a4u", "__AVR_ATxmega16A4U__", "102", 1},
+ {"atxmega16c4", "__AVR_ATxmega16C4__", "102", 1},
+ {"atxmega16d4", "__AVR_ATxmega16D4__", "102", 1},
+ {"atxmega32a4", "__AVR_ATxmega32A4__", "102", 1},
+ {"atxmega32a4u", "__AVR_ATxmega32A4U__", "102", 1},
+ {"atxmega32c3", "__AVR_ATxmega32C3__", "102", 1},
+ {"atxmega32c4", "__AVR_ATxmega32C4__", "102", 1},
+ {"atxmega32d3", "__AVR_ATxmega32D3__", "102", 1},
+ {"atxmega32d4", "__AVR_ATxmega32D4__", "102", 1},
+ {"atxmega32e5", "__AVR_ATxmega32E5__", "102", 1},
+ {"atxmega16e5", "__AVR_ATxmega16E5__", "102", 1},
+ {"atxmega8e5", "__AVR_ATxmega8E5__", "102", 1},
+ {"avrxmega4", NULL, "104", 1},
+ {"atxmega64a3", "__AVR_ATxmega64A3__", "104", 1},
+ {"atxmega64a3u", "__AVR_ATxmega64A3U__", "104", 1},
+ {"atxmega64a4u", "__AVR_ATxmega64A4U__", "104", 1},
+ {"atxmega64b1", "__AVR_ATxmega64B1__", "104", 1},
+ {"atxmega64b3", "__AVR_ATxmega64B3__", "104", 1},
+ {"atxmega64c3", "__AVR_ATxmega64C3__", "104", 1},
+ {"atxmega64d3", "__AVR_ATxmega64D3__", "104", 1},
+ {"atxmega64d4", "__AVR_ATxmega64D4__", "104", 1},
+ {"avrxmega5", NULL, "105", 1},
+ {"atxmega64a1", "__AVR_ATxmega64A1__", "105", 1},
+ {"atxmega64a1u", "__AVR_ATxmega64A1U__", "105", 1},
+ {"avrxmega6", NULL, "106", 6},
+ {"atxmega128a3", "__AVR_ATxmega128A3__", "106", 2},
+ {"atxmega128a3u", "__AVR_ATxmega128A3U__", "106", 2},
+ {"atxmega128b1", "__AVR_ATxmega128B1__", "106", 2},
+ {"atxmega128b3", "__AVR_ATxmega128B3__", "106", 2},
+ {"atxmega128c3", "__AVR_ATxmega128C3__", "106", 2},
+ {"atxmega128d3", "__AVR_ATxmega128D3__", "106", 2},
+ {"atxmega128d4", "__AVR_ATxmega128D4__", "106", 2},
+ {"atxmega192a3", "__AVR_ATxmega192A3__", "106", 3},
+ {"atxmega192a3u", "__AVR_ATxmega192A3U__", "106", 3},
+ {"atxmega192c3", "__AVR_ATxmega192C3__", "106", 3},
+ {"atxmega192d3", "__AVR_ATxmega192D3__", "106", 3},
+ {"atxmega256a3", "__AVR_ATxmega256A3__", "106", 4},
+ {"atxmega256a3u", "__AVR_ATxmega256A3U__", "106", 4},
+ {"atxmega256a3b", "__AVR_ATxmega256A3B__", "106", 4},
+ {"atxmega256a3bu", "__AVR_ATxmega256A3BU__", "106", 4},
+ {"atxmega256c3", "__AVR_ATxmega256C3__", "106", 4},
+ {"atxmega256d3", "__AVR_ATxmega256D3__", "106", 4},
+ {"atxmega384c3", "__AVR_ATxmega384C3__", "106", 6},
+ {"atxmega384d3", "__AVR_ATxmega384D3__", "106", 6},
+ {"avrxmega7", NULL, "107", 2},
+ {"atxmega128a1", "__AVR_ATxmega128A1__", "107", 2},
+ {"atxmega128a1u", "__AVR_ATxmega128A1U__", "107", 2},
+ {"atxmega128a4u", "__AVR_ATxmega128A4U__", "107", 2},
+ {"avrtiny", NULL, "100", 0},
+ {"attiny4", "__AVR_ATtiny4__", "100", 0},
+ {"attiny5", "__AVR_ATtiny5__", "100", 0},
+ {"attiny9", "__AVR_ATtiny9__", "100", 0},
+ {"attiny10", "__AVR_ATtiny10__", "100", 0},
+ {"attiny20", "__AVR_ATtiny20__", "100", 0},
+ {"attiny40", "__AVR_ATtiny40__", "100", 0},
+ {"attiny102", "__AVR_ATtiny102__", "100", 0},
+ {"attiny104", "__AVR_ATtiny104__", "100", 0},
+ {"avrxmega3", NULL, "103", 1},
+ {"attiny202", "__AVR_ATtiny202__", "103", 1},
+ {"attiny402", "__AVR_ATtiny402__", "103", 1},
+ {"attiny204", "__AVR_ATtiny204__", "103", 1},
+ {"attiny404", "__AVR_ATtiny404__", "103", 1},
+ {"attiny804", "__AVR_ATtiny804__", "103", 1},
+ {"attiny1604", "__AVR_ATtiny1604__", "103", 1},
+ {"attiny406", "__AVR_ATtiny406__", "103", 1},
+ {"attiny806", "__AVR_ATtiny806__", "103", 1},
+ {"attiny1606", "__AVR_ATtiny1606__", "103", 1},
+ {"attiny807", "__AVR_ATtiny807__", "103", 1},
+ {"attiny1607", "__AVR_ATtiny1607__", "103", 1},
+ {"attiny212", "__AVR_ATtiny212__", "103", 1},
+ {"attiny412", "__AVR_ATtiny412__", "103", 1},
+ {"attiny214", "__AVR_ATtiny214__", "103", 1},
+ {"attiny414", "__AVR_ATtiny414__", "103", 1},
+ {"attiny814", "__AVR_ATtiny814__", "103", 1},
+ {"attiny1614", "__AVR_ATtiny1614__", "103", 1},
+ {"attiny416", "__AVR_ATtiny416__", "103", 1},
+ {"attiny816", "__AVR_ATtiny816__", "103", 1},
+ {"attiny1616", "__AVR_ATtiny1616__", "103", 1},
+ {"attiny3216", "__AVR_ATtiny3216__", "103", 1},
+ {"attiny417", "__AVR_ATtiny417__", "103", 1},
+ {"attiny817", "__AVR_ATtiny817__", "103", 1},
+ {"attiny1617", "__AVR_ATtiny1617__", "103", 1},
+ {"attiny3217", "__AVR_ATtiny3217__", "103", 1},
+ {"attiny1624", "__AVR_ATtiny1624__", "103", 1},
+ {"attiny1626", "__AVR_ATtiny1626__", "103", 1},
+ {"attiny1627", "__AVR_ATtiny1627__", "103", 1},
+ {"atmega808", "__AVR_ATmega808__", "103", 1},
+ {"atmega809", "__AVR_ATmega809__", "103", 1},
+ {"atmega1608", "__AVR_ATmega1608__", "103", 1},
+ {"atmega1609", "__AVR_ATmega1609__", "103", 1},
+ {"atmega3208", "__AVR_ATmega3208__", "103", 1},
+ {"atmega3209", "__AVR_ATmega3209__", "103", 1},
+ {"atmega4808", "__AVR_ATmega4808__", "103", 1},
+ {"atmega4809", "__AVR_ATmega4809__", "103", 1},
};
} // namespace targets
} // namespace clang
-static constexpr llvm::StringLiteral ValidFamilyNames[] = {
- "avr1", "avr2", "avr25", "avr3", "avr31",
- "avr35", "avr4", "avr5", "avr51", "avr6",
- "avrxmega1", "avrxmega2", "avrxmega3", "avrxmega4", "avrxmega5",
- "avrxmega6", "avrxmega7", "avrtiny"};
+static bool ArchHasELPM(StringRef Arch) {
+ return llvm::StringSwitch<bool>(Arch)
+ .Cases("31", "51", "6", true)
+ .Cases("102", "104", "105", "106", "107", true)
+ .Default(false);
+}
-bool AVRTargetInfo::isValidCPUName(StringRef Name) const {
- bool IsFamily =
- llvm::find(ValidFamilyNames, Name) != std::end(ValidFamilyNames);
+static bool ArchHasELPMX(StringRef Arch) {
+ return llvm::StringSwitch<bool>(Arch)
+ .Cases("51", "6", true)
+ .Cases("102", "104", "105", "106", "107", true)
+ .Default(false);
+}
+
+static bool ArchHasMOVW(StringRef Arch) {
+ return llvm::StringSwitch<bool>(Arch)
+ .Cases("25", "35", "4", "5", "51", "6", true)
+ .Cases("102", "103", "104", "105", "106", "107", true)
+ .Default(false);
+}
+
+static bool ArchHasLPMX(StringRef Arch) {
+ return ArchHasMOVW(Arch); // same architectures
+}
+
+static bool ArchHasMUL(StringRef Arch) {
+ return llvm::StringSwitch<bool>(Arch)
+ .Cases("4", "5", "51", "6", true)
+ .Cases("102", "103", "104", "105", "106", "107", true)
+ .Default(false);
+}
+
+static bool ArchHasJMPCALL(StringRef Arch) {
+ return llvm::StringSwitch<bool>(Arch)
+ .Cases("3", "31", "35", "5", "51", "6", true)
+ .Cases("102", "103", "104", "105", "106", "107", true)
+ .Default(false);
+}
+
+static bool ArchHas3BytePC(StringRef Arch) {
+ // These devices have more than 128kB of program memory.
+ // Note:
+ // - Not fully correct for arch 106: only about half the chips have more
+ // than 128kB program memory and therefore a 3 byte PC.
+ // - Doesn't match GCC entirely: avr-gcc thinks arch 107 goes beyond 128kB
+ // but in fact it doesn't.
+ return llvm::StringSwitch<bool>(Arch)
+ .Case("6", true)
+ .Case("106", true)
+ .Default(false);
+}
- bool IsMCU =
- llvm::find_if(AVRMcus, [&](const MCUInfo &Info) {
- return Info.Name == Name;
- }) != std::end(AVRMcus);
- return IsFamily || IsMCU;
+bool AVRTargetInfo::isValidCPUName(StringRef Name) const {
+ return llvm::any_of(
+ AVRMcus, [&](const MCUInfo &Info) { return Info.Name == Name; });
}
void AVRTargetInfo::fillValidCPUList(SmallVectorImpl<StringRef> &Values) const {
- Values.append(std::begin(ValidFamilyNames), std::end(ValidFamilyNames));
for (const MCUInfo &Info : AVRMcus)
Values.push_back(Info.Name);
}
+bool AVRTargetInfo::setCPU(const std::string &Name) {
+ // Set the ABI field based on the device or family name.
+ auto It = llvm::find_if(
+ AVRMcus, [&](const MCUInfo &Info) { return Info.Name == Name; });
+ if (It != std::end(AVRMcus)) {
+ CPU = Name;
+ ABI = (It->Arch == "100") ? "avrtiny" : "avr";
+ DefineName = It->DefineName;
+ Arch = It->Arch;
+ NumFlashBanks = It->NumFlashBanks;
+ return true;
+ }
+
+ // Parameter Name is neither valid family name nor valid device name.
+ return false;
+}
+
+std::optional<std::string>
+AVRTargetInfo::handleAsmEscapedChar(char EscChar) const {
+ switch (EscChar) {
+ // "%~" represents for 'r' depends on the device has long jump/call.
+ case '~':
+ return ArchHasJMPCALL(Arch) ? std::string("") : std::string(1, 'r');
+
+ // "%!" represents for 'e' depends on the PC register size.
+ case '!':
+ return ArchHas3BytePC(Arch) ? std::string(1, 'e') : std::string("");
+
+ // This is an invalid escape character for AVR.
+ default:
+ return std::nullopt;
+ }
+}
+
void AVRTargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
Builder.defineMacro("AVR");
Builder.defineMacro("__AVR");
Builder.defineMacro("__AVR__");
- Builder.defineMacro("__ELF__");
- Builder.defineMacro("__flash", "__attribute__((address_space(1)))");
- if (!this->CPU.empty()) {
- auto It = llvm::find_if(
- AVRMcus, [&](const MCUInfo &Info) { return Info.Name == this->CPU; });
+ if (ABI == "avrtiny")
+ Builder.defineMacro("__AVR_TINY__", "1");
- if (It != std::end(AVRMcus))
- Builder.defineMacro(It->DefineName);
+ if (DefineName.size() != 0)
+ Builder.defineMacro(DefineName);
+
+ Builder.defineMacro("__AVR_ARCH__", Arch);
+
+ // TODO: perhaps we should use the information from AVRDevices.td instead?
+ if (ArchHasELPM(Arch))
+ Builder.defineMacro("__AVR_HAVE_ELPM__");
+ if (ArchHasELPMX(Arch))
+ Builder.defineMacro("__AVR_HAVE_ELPMX__");
+ if (ArchHasMOVW(Arch))
+ Builder.defineMacro("__AVR_HAVE_MOVW__");
+ if (ArchHasLPMX(Arch))
+ Builder.defineMacro("__AVR_HAVE_LPMX__");
+ if (ArchHasMUL(Arch))
+ Builder.defineMacro("__AVR_HAVE_MUL__");
+ if (ArchHasJMPCALL(Arch))
+ Builder.defineMacro("__AVR_HAVE_JMP_CALL__");
+ if (ArchHas3BytePC(Arch)) {
+ // Note: some devices do support eijmp/eicall even though this macro isn't
+ // set. This is the case if they have less than 128kB flash and so
+ // eijmp/eicall isn't very useful anyway. (This matches gcc, although it's
+ // debatable whether we should be bug-compatible in this case).
+ Builder.defineMacro("__AVR_HAVE_EIJMP_EICALL__");
+ Builder.defineMacro("__AVR_3_BYTE_PC__");
+ } else {
+ Builder.defineMacro("__AVR_2_BYTE_PC__");
}
+
+ if (NumFlashBanks >= 1)
+ Builder.defineMacro("__flash", "__attribute__((__address_space__(1)))");
+ if (NumFlashBanks >= 2)
+ Builder.defineMacro("__flash1", "__attribute__((__address_space__(2)))");
+ if (NumFlashBanks >= 3)
+ Builder.defineMacro("__flash2", "__attribute__((__address_space__(3)))");
+ if (NumFlashBanks >= 4)
+ Builder.defineMacro("__flash3", "__attribute__((__address_space__(4)))");
+ if (NumFlashBanks >= 5)
+ Builder.defineMacro("__flash4", "__attribute__((__address_space__(5)))");
+ if (NumFlashBanks >= 6)
+ Builder.defineMacro("__flash5", "__attribute__((__address_space__(6)))");
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/AVR.h b/contrib/llvm-project/clang/lib/Basic/Targets/AVR.h
index 89a80ca6a39a..9376c46cd98c 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/AVR.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/AVR.h
@@ -15,8 +15,8 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/TargetParser/Triple.h"
namespace clang {
namespace targets {
@@ -61,25 +61,26 @@ public:
void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const override;
- ArrayRef<Builtin::Info> getTargetBuiltins() const override { return None; }
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override {
+ return std::nullopt;
+ }
BuiltinVaListKind getBuiltinVaListKind() const override {
return TargetInfo::VoidPtrBuiltinVaList;
}
- const char *getClobbers() const override { return ""; }
+ std::string_view getClobbers() const override { return ""; }
ArrayRef<const char *> getGCCRegNames() const override {
static const char *const GCCRegNames[] = {
"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9",
"r10", "r11", "r12", "r13", "r14", "r15", "r16", "r17", "r18", "r19",
- "r20", "r21", "r22", "r23", "r24", "r25", "X", "Y", "Z", "SP"
- };
- return llvm::makeArrayRef(GCCRegNames);
+ "r20", "r21", "r22", "r23", "r24", "r25", "X", "Y", "Z", "SP"};
+ return llvm::ArrayRef(GCCRegNames);
}
ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
- return None;
+ return std::nullopt;
}
ArrayRef<TargetInfo::AddlRegName> getGCCAddlRegNames() const override {
@@ -89,7 +90,7 @@ public:
{{"r30", "r31"}, 28},
{{"SPL", "SPH"}, 29},
};
- return llvm::makeArrayRef(AddlRegNames);
+ return llvm::ArrayRef(AddlRegNames);
}
bool validateAsmConstraint(const char *&Name,
@@ -145,7 +146,9 @@ public:
case 'R': // Integer constant (Range: -6 to 5)
Info.setRequiresImmediate(-6, 5);
return true;
- case 'G': // Floating point constant
+ case 'G': // Floating point constant 0.0
+ Info.setRequiresImmediate(0);
+ return true;
case 'Q': // A memory address based on Y or Z pointer with displacement.
return true;
}
@@ -168,15 +171,16 @@ public:
bool isValidCPUName(StringRef Name) const override;
void fillValidCPUList(SmallVectorImpl<StringRef> &Values) const override;
- bool setCPU(const std::string &Name) override {
- bool isValid = isValidCPUName(Name);
- if (isValid)
- CPU = Name;
- return isValid;
- }
+ bool setCPU(const std::string &Name) override;
+ std::optional<std::string> handleAsmEscapedChar(char EscChar) const override;
+ StringRef getABI() const override { return ABI; }
protected:
std::string CPU;
+ StringRef ABI;
+ StringRef DefineName;
+ StringRef Arch;
+ int NumFlashBanks = 0;
};
} // namespace targets
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/BPF.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/BPF.cpp
index 0b0298df30a5..e713e0847922 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/BPF.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/BPF.cpp
@@ -19,9 +19,9 @@
using namespace clang;
using namespace clang::targets;
-const Builtin::Info BPFTargetInfo::BuiltinInfo[] = {
+static constexpr Builtin::Info BuiltinInfo[] = {
#define BUILTIN(ID, TYPE, ATTRS) \
- {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
+ {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
#include "clang/Basic/BuiltinsBPF.def"
};
@@ -29,13 +29,44 @@ void BPFTargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
Builder.defineMacro("__bpf__");
Builder.defineMacro("__BPF__");
+
+ std::string CPU = getTargetOpts().CPU;
+ if (CPU == "probe") {
+ Builder.defineMacro("__BPF_CPU_VERSION__", "0");
+ return;
+ }
+ if (CPU.empty() || CPU == "generic" || CPU == "v1") {
+ Builder.defineMacro("__BPF_CPU_VERSION__", "1");
+ return;
+ }
+
+ std::string CpuVerNumStr = CPU.substr(1);
+ Builder.defineMacro("__BPF_CPU_VERSION__", CpuVerNumStr);
+
+ int CpuVerNum = std::stoi(CpuVerNumStr);
+ if (CpuVerNum >= 2)
+ Builder.defineMacro("__BPF_FEATURE_JMP_EXT");
+
+ if (CpuVerNum >= 3) {
+ Builder.defineMacro("__BPF_FEATURE_JMP32");
+ Builder.defineMacro("__BPF_FEATURE_ALU32");
+ }
+
+ if (CpuVerNum >= 4) {
+ Builder.defineMacro("__BPF_FEATURE_LDSX");
+ Builder.defineMacro("__BPF_FEATURE_MOVSX");
+ Builder.defineMacro("__BPF_FEATURE_BSWAP");
+ Builder.defineMacro("__BPF_FEATURE_SDIV_SMOD");
+ Builder.defineMacro("__BPF_FEATURE_GOTOL");
+ Builder.defineMacro("__BPF_FEATURE_ST");
+ }
}
static constexpr llvm::StringLiteral ValidCPUNames[] = {"generic", "v1", "v2",
- "v3", "probe"};
+ "v3", "v4", "probe"};
bool BPFTargetInfo::isValidCPUName(StringRef Name) const {
- return llvm::find(ValidCPUNames, Name) != std::end(ValidCPUNames);
+ return llvm::is_contained(ValidCPUNames, Name);
}
void BPFTargetInfo::fillValidCPUList(SmallVectorImpl<StringRef> &Values) const {
@@ -43,8 +74,8 @@ void BPFTargetInfo::fillValidCPUList(SmallVectorImpl<StringRef> &Values) const {
}
ArrayRef<Builtin::Info> BPFTargetInfo::getTargetBuiltins() const {
- return llvm::makeArrayRef(BuiltinInfo, clang::BPF::LastTSBuiltin -
- Builtin::FirstTSBuiltin);
+ return llvm::ArrayRef(BuiltinInfo,
+ clang::BPF::LastTSBuiltin - Builtin::FirstTSBuiltin);
}
bool BPFTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/BPF.h b/contrib/llvm-project/clang/lib/Basic/Targets/BPF.h
index 393a91ff53a5..489f29fc4fea 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/BPF.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/BPF.h
@@ -15,14 +15,13 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/TargetParser/Triple.h"
namespace clang {
namespace targets {
class LLVM_LIBRARY_VISIBILITY BPFTargetInfo : public TargetInfo {
- static const Builtin::Info BuiltinInfo[];
bool HasAlu32 = false;
public:
@@ -61,14 +60,16 @@ public:
ArrayRef<Builtin::Info> getTargetBuiltins() const override;
- const char *getClobbers() const override { return ""; }
+ std::string_view getClobbers() const override { return ""; }
BuiltinVaListKind getBuiltinVaListKind() const override {
return TargetInfo::VoidPtrBuiltinVaList;
}
bool isValidGCCRegisterName(StringRef Name) const override { return true; }
- ArrayRef<const char *> getGCCRegNames() const override { return None; }
+ ArrayRef<const char *> getGCCRegNames() const override {
+ return std::nullopt;
+ }
bool validateAsmConstraint(const char *&Name,
TargetInfo::ConstraintInfo &Info) const override {
@@ -85,7 +86,7 @@ public:
}
ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
- return None;
+ return std::nullopt;
}
bool allowDebugInfoForExternalRef() const override { return true; }
@@ -105,7 +106,7 @@ public:
void fillValidCPUList(SmallVectorImpl<StringRef> &Values) const override;
bool setCPU(const std::string &Name) override {
- if (Name == "v3") {
+ if (Name == "v3" || Name == "v4") {
HasAlu32 = true;
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/CSKY.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/CSKY.cpp
new file mode 100644
index 000000000000..851f27dbb1e5
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/CSKY.cpp
@@ -0,0 +1,315 @@
+//===--- CSKY.cpp - Implement CSKY target feature support -----------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements CSKY TargetInfo objects.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CSKY.h"
+
+using namespace clang;
+using namespace clang::targets;
+
+bool CSKYTargetInfo::isValidCPUName(StringRef Name) const {
+ return llvm::CSKY::parseCPUArch(Name) != llvm::CSKY::ArchKind::INVALID;
+}
+
+bool CSKYTargetInfo::setCPU(const std::string &Name) {
+ llvm::CSKY::ArchKind archKind = llvm::CSKY::parseCPUArch(Name);
+ bool isValid = (archKind != llvm::CSKY::ArchKind::INVALID);
+
+ if (isValid) {
+ CPU = Name;
+ Arch = archKind;
+ }
+
+ return isValid;
+}
+
+void CSKYTargetInfo::getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ Builder.defineMacro("__csky__", "2");
+ Builder.defineMacro("__CSKY__", "2");
+ Builder.defineMacro("__ckcore__", "2");
+ Builder.defineMacro("__CKCORE__", "2");
+
+ Builder.defineMacro("__CSKYABI__", ABI == "abiv2" ? "2" : "1");
+ Builder.defineMacro("__cskyabi__", ABI == "abiv2" ? "2" : "1");
+
+ StringRef ArchName = "ck810";
+ StringRef CPUName = "ck810";
+
+ if (Arch != llvm::CSKY::ArchKind::INVALID) {
+ ArchName = llvm::CSKY::getArchName(Arch);
+ CPUName = CPU;
+ }
+
+ Builder.defineMacro("__" + ArchName.upper() + "__");
+ Builder.defineMacro("__" + ArchName.lower() + "__");
+ if (ArchName != CPUName) {
+ Builder.defineMacro("__" + CPUName.upper() + "__");
+ Builder.defineMacro("__" + CPUName.lower() + "__");
+ }
+
+ // TODO: Add support for BE if BE was supported later
+ StringRef endian = "__cskyLE__";
+
+ Builder.defineMacro(endian);
+ Builder.defineMacro(endian.upper());
+ Builder.defineMacro(endian.lower());
+
+ if (DSPV2) {
+ StringRef dspv2 = "__CSKY_DSPV2__";
+ Builder.defineMacro(dspv2);
+ Builder.defineMacro(dspv2.lower());
+ }
+
+ if (VDSPV2) {
+ StringRef vdspv2 = "__CSKY_VDSPV2__";
+ Builder.defineMacro(vdspv2);
+ Builder.defineMacro(vdspv2.lower());
+
+ if (HardFloat) {
+ StringRef vdspv2_f = "__CSKY_VDSPV2_F__";
+ Builder.defineMacro(vdspv2_f);
+ Builder.defineMacro(vdspv2_f.lower());
+ }
+ }
+ if (VDSPV1) {
+ StringRef vdspv1_64 = "__CSKY_VDSP64__";
+ StringRef vdspv1_128 = "__CSKY_VDSP128__";
+
+ Builder.defineMacro(vdspv1_64);
+ Builder.defineMacro(vdspv1_64.lower());
+ Builder.defineMacro(vdspv1_128);
+ Builder.defineMacro(vdspv1_128.lower());
+ }
+ if (is3E3R1) {
+ StringRef is3e3r1 = "__CSKY_3E3R1__";
+ Builder.defineMacro(is3e3r1);
+ Builder.defineMacro(is3e3r1.lower());
+ }
+}
+
+bool CSKYTargetInfo::hasFeature(StringRef Feature) const {
+ return llvm::StringSwitch<bool>(Feature)
+ .Case("hard-float", HardFloat)
+ .Case("hard-float-abi", HardFloatABI)
+ .Case("fpuv2_sf", FPUV2_SF)
+ .Case("fpuv2_df", FPUV2_DF)
+ .Case("fpuv3_sf", FPUV3_SF)
+ .Case("fpuv3_df", FPUV3_DF)
+ .Case("vdspv2", VDSPV2)
+ .Case("dspv2", DSPV2)
+ .Case("vdspv1", VDSPV1)
+ .Case("3e3r1", is3E3R1)
+ .Default(false);
+}
+
+bool CSKYTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
+ DiagnosticsEngine &Diags) {
+ for (const auto &Feature : Features) {
+ if (Feature == "+hard-float")
+ HardFloat = true;
+ if (Feature == "+hard-float-abi")
+ HardFloatABI = true;
+ if (Feature == "+fpuv2_sf")
+ FPUV2_SF = true;
+ if (Feature == "+fpuv2_df")
+ FPUV2_DF = true;
+ if (Feature == "+fpuv3_sf")
+ FPUV3_SF = true;
+ if (Feature == "+fpuv3_df")
+ FPUV3_DF = true;
+ if (Feature == "+vdspv2")
+ VDSPV2 = true;
+ if (Feature == "+dspv2")
+ DSPV2 = true;
+ if (Feature == "+vdspv1")
+ VDSPV1 = true;
+ if (Feature == "+3e3r1")
+ is3E3R1 = true;
+ }
+
+ return true;
+}
+
+ArrayRef<Builtin::Info> CSKYTargetInfo::getTargetBuiltins() const {
+ return ArrayRef<Builtin::Info>();
+}
+
+ArrayRef<const char *> CSKYTargetInfo::getGCCRegNames() const {
+ static const char *const GCCRegNames[] = {
+ // Integer registers
+ "r0",
+ "r1",
+ "r2",
+ "r3",
+ "r4",
+ "r5",
+ "r6",
+ "r7",
+ "r8",
+ "r9",
+ "r10",
+ "r11",
+ "r12",
+ "r13",
+ "r14",
+ "r15",
+ "r16",
+ "r17",
+ "r18",
+ "r19",
+ "r20",
+ "r21",
+ "r22",
+ "r23",
+ "r24",
+ "r25",
+ "r26",
+ "r27",
+ "r28",
+ "r29",
+ "r30",
+ "r31",
+
+ // Floating point registers
+ "fr0",
+ "fr1",
+ "fr2",
+ "fr3",
+ "fr4",
+ "fr5",
+ "fr6",
+ "fr7",
+ "fr8",
+ "fr9",
+ "fr10",
+ "fr11",
+ "fr12",
+ "fr13",
+ "fr14",
+ "fr15",
+ "fr16",
+ "fr17",
+ "fr18",
+ "fr19",
+ "fr20",
+ "fr21",
+ "fr22",
+ "fr23",
+ "fr24",
+ "fr25",
+ "fr26",
+ "fr27",
+ "fr28",
+ "fr29",
+ "fr30",
+ "fr31",
+
+ };
+ return llvm::ArrayRef(GCCRegNames);
+}
+
+ArrayRef<TargetInfo::GCCRegAlias> CSKYTargetInfo::getGCCRegAliases() const {
+ static const TargetInfo::GCCRegAlias GCCRegAliases[] = {
+ {{"a0"}, "r0"},
+ {{"a1"}, "r1"},
+ {{"a2"}, "r2"},
+ {{"a3"}, "r3"},
+ {{"l0"}, "r4"},
+ {{"l1"}, "r5"},
+ {{"l2"}, "r6"},
+ {{"l3"}, "r7"},
+ {{"l4"}, "r8"},
+ {{"l5"}, "r9"},
+ {{"l6"}, "r10"},
+ {{"l7"}, "r11"},
+ {{"t0"}, "r12"},
+ {{"t1"}, "r13"},
+ {{"sp"}, "r14"},
+ {{"lr"}, "r15"},
+ {{"l8"}, "r16"},
+ {{"l9"}, "r17"},
+ {{"t2"}, "r18"},
+ {{"t3"}, "r19"},
+ {{"t4"}, "r20"},
+ {{"t5"}, "r21"},
+ {{"t6"}, "r22"},
+ {{"t7", "fp"}, "r23"},
+ {{"t8", "top"}, "r24"},
+ {{"t9", "bsp"}, "r25"},
+ {{"r26"}, "r26"},
+ {{"r27"}, "r27"},
+ {{"gb", "rgb", "rdb"}, "r28"},
+ {{"tb", "rtb"}, "r29"},
+ {{"svbr"}, "r30"},
+ {{"tls"}, "r31"},
+
+ {{"vr0"}, "fr0"},
+ {{"vr1"}, "fr1"},
+ {{"vr2"}, "fr2"},
+ {{"vr3"}, "fr3"},
+ {{"vr4"}, "fr4"},
+ {{"vr5"}, "fr5"},
+ {{"vr6"}, "fr6"},
+ {{"vr7"}, "fr7"},
+ {{"vr8"}, "fr8"},
+ {{"vr9"}, "fr9"},
+ {{"vr10"}, "fr10"},
+ {{"vr11"}, "fr11"},
+ {{"vr12"}, "fr12"},
+ {{"vr13"}, "fr13"},
+ {{"vr14"}, "fr14"},
+ {{"vr15"}, "fr15"},
+ {{"vr16"}, "fr16"},
+ {{"vr17"}, "fr17"},
+ {{"vr18"}, "fr18"},
+ {{"vr19"}, "fr19"},
+ {{"vr20"}, "fr20"},
+ {{"vr21"}, "fr21"},
+ {{"vr22"}, "fr22"},
+ {{"vr23"}, "fr23"},
+ {{"vr24"}, "fr24"},
+ {{"vr25"}, "fr25"},
+ {{"vr26"}, "fr26"},
+ {{"vr27"}, "fr27"},
+ {{"vr28"}, "fr28"},
+ {{"vr29"}, "fr29"},
+ {{"vr30"}, "fr30"},
+ {{"vr31"}, "fr31"},
+
+ };
+ return llvm::ArrayRef(GCCRegAliases);
+}
+
+bool CSKYTargetInfo::validateAsmConstraint(
+ const char *&Name, TargetInfo::ConstraintInfo &Info) const {
+ switch (*Name) {
+ default:
+ return false;
+ case 'a':
+ case 'b':
+ case 'c':
+ case 'y':
+ case 'l':
+ case 'h':
+ case 'w':
+ case 'v': // A floating-point and vector register.
+ case 'z':
+ Info.setAllowsRegister();
+ return true;
+ }
+}
+
+unsigned CSKYTargetInfo::getMinGlobalAlign(uint64_t Size) const {
+ if (Size >= 32)
+ return 32;
+ return 0;
+}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/CSKY.h b/contrib/llvm-project/clang/lib/Basic/Targets/CSKY.h
new file mode 100644
index 000000000000..11404e37db36
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/CSKY.h
@@ -0,0 +1,107 @@
+//===--- CSKY.h - Declare CSKY target feature support -----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares CSKY TargetInfo objects.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_BASIC_TARGETS_CSKY_H
+#define LLVM_CLANG_LIB_BASIC_TARGETS_CSKY_H
+
+#include "clang/Basic/MacroBuilder.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/TargetParser/CSKYTargetParser.h"
+
+namespace clang {
+namespace targets {
+
+class LLVM_LIBRARY_VISIBILITY CSKYTargetInfo : public TargetInfo {
+protected:
+ std::string ABI;
+ llvm::CSKY::ArchKind Arch = llvm::CSKY::ArchKind::INVALID;
+ std::string CPU;
+
+ bool HardFloat = false;
+ bool HardFloatABI = false;
+ bool FPUV2_SF = false;
+ bool FPUV2_DF = false;
+ bool FPUV3_SF = false;
+ bool FPUV3_DF = false;
+ bool VDSPV2 = false;
+ bool VDSPV1 = false;
+ bool DSPV2 = false;
+ bool is3E3R1 = false;
+
+public:
+ CSKYTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
+ : TargetInfo(Triple) {
+ NoAsmVariants = true;
+ LongLongAlign = 32;
+ SuitableAlign = 32;
+ DoubleAlign = LongDoubleAlign = 32;
+ SizeType = UnsignedInt;
+ PtrDiffType = SignedInt;
+ IntPtrType = SignedInt;
+ WCharType = SignedInt;
+ WIntType = UnsignedInt;
+
+ UseZeroLengthBitfieldAlignment = true;
+ MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 32;
+ resetDataLayout("e-m:e-S32-p:32:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-"
+ "v64:32:32-v128:32:32-a:0:32-Fi32-n32");
+
+ setABI("abiv2");
+ }
+
+ StringRef getABI() const override { return ABI; }
+ bool setABI(const std::string &Name) override {
+ if (Name == "abiv2" || Name == "abiv1") {
+ ABI = Name;
+ return true;
+ }
+ return false;
+ }
+
+ bool setCPU(const std::string &Name) override;
+
+ bool isValidCPUName(StringRef Name) const override;
+
+ unsigned getMinGlobalAlign(uint64_t) const override;
+
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override;
+
+ BuiltinVaListKind getBuiltinVaListKind() const override {
+ return VoidPtrBuiltinVaList;
+ }
+
+ bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &info) const override;
+
+ std::string_view getClobbers() const override { return ""; }
+
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override;
+ bool hasFeature(StringRef Feature) const override;
+ bool handleTargetFeatures(std::vector<std::string> &Features,
+ DiagnosticsEngine &Diags) override;
+
+ /// Whether target allows to overalign ABI-specified preferred alignment
+ bool allowsLargerPreferedTypeAlignment() const override { return false; }
+
+ bool hasBitIntType() const override { return true; }
+
+protected:
+ ArrayRef<const char *> getGCCRegNames() const override;
+
+ ArrayRef<GCCRegAlias> getGCCRegAliases() const override;
+};
+
+} // namespace targets
+} // namespace clang
+
+#endif // LLVM_CLANG_LIB_BASIC_TARGETS_CSKY_H
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/DirectX.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/DirectX.cpp
new file mode 100644
index 000000000000..0dd27e6e93b3
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/DirectX.cpp
@@ -0,0 +1,22 @@
+//===--- DirectX.cpp - Implement DirectX target feature support -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements DirectX TargetInfo objects.
+//
+//===----------------------------------------------------------------------===//
+
+#include "DirectX.h"
+#include "Targets.h"
+
+using namespace clang;
+using namespace clang::targets;
+
+void DirectXTargetInfo::getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ DefineStd(Builder, "DIRECTX", Opts);
+}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/DirectX.h b/contrib/llvm-project/clang/lib/Basic/Targets/DirectX.h
new file mode 100644
index 000000000000..acfcc8c47ba9
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/DirectX.h
@@ -0,0 +1,103 @@
+//===--- DirectX.h - Declare DirectX target feature support -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares DXIL TargetInfo objects.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_BASIC_TARGETS_DIRECTX_H
+#define LLVM_CLANG_LIB_BASIC_TARGETS_DIRECTX_H
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/TargetOptions.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/TargetParser/Triple.h"
+
+namespace clang {
+namespace targets {
+
+static const unsigned DirectXAddrSpaceMap[] = {
+ 0, // Default
+ 1, // opencl_global
+ 3, // opencl_local
+ 2, // opencl_constant
+ 0, // opencl_private
+ 4, // opencl_generic
+ 5, // opencl_global_device
+ 6, // opencl_global_host
+ 0, // cuda_device
+ 0, // cuda_constant
+ 0, // cuda_shared
+ // SYCL address space values for this map are dummy
+ 0, // sycl_global
+ 0, // sycl_global_device
+ 0, // sycl_global_host
+ 0, // sycl_local
+ 0, // sycl_private
+ 0, // ptr32_sptr
+ 0, // ptr32_uptr
+ 0, // ptr64
+ 3, // hlsl_groupshared
+ // Wasm address space values for this target are dummy values,
+ // as it is only enabled for Wasm targets.
+ 20, // wasm_funcref
+};
+
+class LLVM_LIBRARY_VISIBILITY DirectXTargetInfo : public TargetInfo {
+public:
+ DirectXTargetInfo(const llvm::Triple &Triple, const TargetOptions &)
+ : TargetInfo(Triple) {
+ TLSSupported = false;
+ VLASupported = false;
+ LongWidth = LongAlign = 64;
+ AddrSpaceMap = &DirectXAddrSpaceMap;
+ UseAddrSpaceMapMangling = true;
+ HasLegalHalfType = true;
+ HasFloat16 = true;
+ NoAsmVariants = true;
+ PlatformMinVersion = Triple.getOSVersion();
+ PlatformName = llvm::Triple::getOSTypeName(Triple.getOS());
+ resetDataLayout("e-m:e-p:32:32-i1:32-i8:8-i16:16-i32:32-i64:64-f16:16-f32:"
+ "32-f64:64-n8:16:32:64");
+ TheCXXABI.set(TargetCXXABI::Microsoft);
+ }
+ bool useFP16ConversionIntrinsics() const override { return false; }
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override;
+
+ bool hasFeature(StringRef Feature) const override {
+ return Feature == "directx";
+ }
+
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override {
+ return std::nullopt;
+ }
+
+ std::string_view getClobbers() const override { return ""; }
+
+ ArrayRef<const char *> getGCCRegNames() const override {
+ return std::nullopt;
+ }
+
+ bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &info) const override {
+ return true;
+ }
+
+ ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
+ return std::nullopt;
+ }
+
+ BuiltinVaListKind getBuiltinVaListKind() const override {
+ return TargetInfo::VoidPtrBuiltinVaList;
+ }
+};
+
+} // namespace targets
+} // namespace clang
+
+#endif // LLVM_CLANG_LIB_BASIC_TARGETS_DIRECTX_H
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/Hexagon.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/Hexagon.cpp
index 9c37dee7e89a..ac747e371fb4 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/Hexagon.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/Hexagon.cpp
@@ -24,8 +24,6 @@ void HexagonTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__qdsp6__", "1");
Builder.defineMacro("__hexagon__", "1");
- Builder.defineMacro("__ELF__");
-
// The macro __HVXDBL__ is deprecated.
bool DefineHvxDbl = false;
@@ -68,6 +66,18 @@ void HexagonTargetInfo::getTargetDefines(const LangOptions &Opts,
} else if (CPU == "hexagonv68") {
Builder.defineMacro("__HEXAGON_V68__");
Builder.defineMacro("__HEXAGON_ARCH__", "68");
+ } else if (CPU == "hexagonv69") {
+ Builder.defineMacro("__HEXAGON_V69__");
+ Builder.defineMacro("__HEXAGON_ARCH__", "69");
+ } else if (CPU == "hexagonv71") {
+ Builder.defineMacro("__HEXAGON_V71__");
+ Builder.defineMacro("__HEXAGON_ARCH__", "71");
+ } else if (CPU == "hexagonv71t") {
+ Builder.defineMacro("__HEXAGON_V71T__");
+ Builder.defineMacro("__HEXAGON_ARCH__", "71");
+ } else if (CPU == "hexagonv73") {
+ Builder.defineMacro("__HEXAGON_V73__");
+ Builder.defineMacro("__HEXAGON_ARCH__", "73");
}
if (hasFeature("hvx-length64b")) {
@@ -90,6 +100,11 @@ void HexagonTargetInfo::getTargetDefines(const LangOptions &Opts,
std::string NumPhySlots = isTinyCore() ? "3" : "4";
Builder.defineMacro("__HEXAGON_PHYSICAL_SLOTS__", NumPhySlots);
+
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
}
bool HexagonTargetInfo::initFeatureMap(
@@ -128,6 +143,10 @@ bool HexagonTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
else if (F == "+audio")
HasAudio = true;
}
+ if (CPU.compare("hexagonv68") >= 0) {
+ HasLegalHalfType = true;
+ HasFloat16 = true;
+ }
return true;
}
@@ -166,7 +185,7 @@ const char *const HexagonTargetInfo::GCCRegNames[] = {
};
ArrayRef<const char *> HexagonTargetInfo::getGCCRegNames() const {
- return llvm::makeArrayRef(GCCRegNames);
+ return llvm::ArrayRef(GCCRegNames);
}
const TargetInfo::GCCRegAlias HexagonTargetInfo::GCCRegAliases[] = {
@@ -176,16 +195,16 @@ const TargetInfo::GCCRegAlias HexagonTargetInfo::GCCRegAliases[] = {
};
ArrayRef<TargetInfo::GCCRegAlias> HexagonTargetInfo::getGCCRegAliases() const {
- return llvm::makeArrayRef(GCCRegAliases);
+ return llvm::ArrayRef(GCCRegAliases);
}
-const Builtin::Info HexagonTargetInfo::BuiltinInfo[] = {
+static constexpr Builtin::Info BuiltinInfo[] = {
#define BUILTIN(ID, TYPE, ATTRS) \
- {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
+ {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) \
- {#ID, TYPE, ATTRS, HEADER, ALL_LANGUAGES, nullptr},
+ {#ID, TYPE, ATTRS, nullptr, HEADER, ALL_LANGUAGES},
#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
- {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, FEATURE},
+ {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
#include "clang/Basic/BuiltinsHexagon.def"
};
@@ -214,7 +233,9 @@ static constexpr CPUSuffix Suffixes[] = {
{{"hexagonv60"}, {"60"}}, {{"hexagonv62"}, {"62"}},
{{"hexagonv65"}, {"65"}}, {{"hexagonv66"}, {"66"}},
{{"hexagonv67"}, {"67"}}, {{"hexagonv67t"}, {"67t"}},
- {{"hexagonv68"}, {"68"}},
+ {{"hexagonv68"}, {"68"}}, {{"hexagonv69"}, {"69"}},
+ {{"hexagonv71"}, {"71"}}, {{"hexagonv71t"}, {"71t"}},
+ {{"hexagonv73"}, {"73"}},
};
const char *HexagonTargetInfo::getHexagonCPUSuffix(StringRef Name) {
@@ -232,6 +253,6 @@ void HexagonTargetInfo::fillValidCPUList(
}
ArrayRef<Builtin::Info> HexagonTargetInfo::getTargetBuiltins() const {
- return llvm::makeArrayRef(BuiltinInfo, clang::Hexagon::LastTSBuiltin -
- Builtin::FirstTSBuiltin);
+ return llvm::ArrayRef(BuiltinInfo, clang::Hexagon::LastTSBuiltin -
+ Builtin::FirstTSBuiltin);
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/Hexagon.h b/contrib/llvm-project/clang/lib/Basic/Targets/Hexagon.h
index d6c7da5f1e40..cdb47dbae799 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/Hexagon.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/Hexagon.h
@@ -15,8 +15,8 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/TargetParser/Triple.h"
namespace clang {
namespace targets {
@@ -24,7 +24,6 @@ namespace targets {
// Hexagon abstract base class
class LLVM_LIBRARY_VISIBILITY HexagonTargetInfo : public TargetInfo {
- static const Builtin::Info BuiltinInfo[];
static const char *const GCCRegNames[];
static const TargetInfo::GCCRegAlias GCCRegAliases[];
std::string CPU;
@@ -113,7 +112,7 @@ public:
ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override;
- const char *getClobbers() const override { return ""; }
+ std::string_view getClobbers() const override { return ""; }
static const char *getHexagonCPUSuffix(StringRef Name);
@@ -139,7 +138,7 @@ public:
return CPU.find('t') != std::string::npos;
}
- bool hasExtIntType() const override { return true; }
+ bool hasBitIntType() const override { return true; }
};
} // namespace targets
} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/Lanai.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/Lanai.cpp
index bb1872083c09..8722a369ed87 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/Lanai.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/Lanai.cpp
@@ -24,7 +24,7 @@ const char *const LanaiTargetInfo::GCCRegNames[] = {
};
ArrayRef<const char *> LanaiTargetInfo::getGCCRegNames() const {
- return llvm::makeArrayRef(GCCRegNames);
+ return llvm::ArrayRef(GCCRegNames);
}
const TargetInfo::GCCRegAlias LanaiTargetInfo::GCCRegAliases[] = {
@@ -33,7 +33,7 @@ const TargetInfo::GCCRegAlias LanaiTargetInfo::GCCRegAliases[] = {
};
ArrayRef<TargetInfo::GCCRegAlias> LanaiTargetInfo::getGCCRegAliases() const {
- return llvm::makeArrayRef(GCCRegAliases);
+ return llvm::ArrayRef(GCCRegAliases);
}
bool LanaiTargetInfo::isValidCPUName(StringRef Name) const {
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/Lanai.h b/contrib/llvm-project/clang/lib/Basic/Targets/Lanai.h
index 9af5427b81c4..144cbc7de989 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/Lanai.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/Lanai.h
@@ -15,8 +15,8 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/TargetParser/Triple.h"
namespace clang {
namespace targets {
@@ -78,16 +78,18 @@ public:
return TargetInfo::VoidPtrBuiltinVaList;
}
- ArrayRef<Builtin::Info> getTargetBuiltins() const override { return None; }
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override {
+ return std::nullopt;
+ }
bool validateAsmConstraint(const char *&Name,
TargetInfo::ConstraintInfo &info) const override {
return false;
}
- const char *getClobbers() const override { return ""; }
+ std::string_view getClobbers() const override { return ""; }
- bool hasExtIntType() const override { return true; }
+ bool hasBitIntType() const override { return true; }
};
} // namespace targets
} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/Le64.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/Le64.cpp
index 5c961ff81e05..f7afa0e747d6 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/Le64.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/Le64.cpp
@@ -27,5 +27,4 @@ void Le64TargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
DefineStd(Builder, "unix", Opts);
defineCPUMacros(Builder, "le64", /*Tuning=*/false);
- Builder.defineMacro("__ELF__");
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/Le64.h b/contrib/llvm-project/clang/lib/Basic/Targets/Le64.h
index 13a0b04d9f09..45f6a4e9dd75 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/Le64.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/Le64.h
@@ -15,8 +15,8 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/TargetParser/Triple.h"
namespace clang {
namespace targets {
@@ -41,12 +41,14 @@ public:
return TargetInfo::PNaClABIBuiltinVaList;
}
- const char *getClobbers() const override { return ""; }
+ std::string_view getClobbers() const override { return ""; }
- ArrayRef<const char *> getGCCRegNames() const override { return None; }
+ ArrayRef<const char *> getGCCRegNames() const override {
+ return std::nullopt;
+ }
ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
- return None;
+ return std::nullopt;
}
bool validateAsmConstraint(const char *&Name,
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/LoongArch.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/LoongArch.cpp
new file mode 100644
index 000000000000..88537989a051
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/LoongArch.cpp
@@ -0,0 +1,299 @@
+//===--- LoongArch.cpp - Implement LoongArch target feature support -------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements LoongArch TargetInfo objects.
+//
+//===----------------------------------------------------------------------===//
+
+#include "LoongArch.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/MacroBuilder.h"
+#include "clang/Basic/TargetBuiltins.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/LoongArchTargetParser.h"
+
+using namespace clang;
+using namespace clang::targets;
+
+ArrayRef<const char *> LoongArchTargetInfo::getGCCRegNames() const {
+ static const char *const GCCRegNames[] = {
+ // General purpose registers.
+ "$r0", "$r1", "$r2", "$r3", "$r4", "$r5", "$r6", "$r7", "$r8", "$r9",
+ "$r10", "$r11", "$r12", "$r13", "$r14", "$r15", "$r16", "$r17", "$r18",
+ "$r19", "$r20", "$r21", "$r22", "$r23", "$r24", "$r25", "$r26", "$r27",
+ "$r28", "$r29", "$r30", "$r31",
+ // Floating point registers.
+ "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7", "$f8", "$f9",
+ "$f10", "$f11", "$f12", "$f13", "$f14", "$f15", "$f16", "$f17", "$f18",
+ "$f19", "$f20", "$f21", "$f22", "$f23", "$f24", "$f25", "$f26", "$f27",
+ "$f28", "$f29", "$f30", "$f31",
+ // Condition flag registers.
+ "$fcc0", "$fcc1", "$fcc2", "$fcc3", "$fcc4", "$fcc5", "$fcc6", "$fcc7",
+ // 128-bit vector registers.
+ "$vr0", "$vr1", "$vr2", "$vr3", "$vr4", "$vr5", "$vr6", "$vr7", "$vr8",
+ "$vr9", "$vr10", "$vr11", "$vr12", "$vr13", "$vr14", "$vr15", "$vr16",
+ "$vr17", "$vr18", "$vr19", "$vr20", "$vr21", "$vr22", "$vr23", "$vr24",
+ "$vr25", "$vr26", "$vr27", "$vr28", "$vr29", "$vr30", "$vr31",
+ // 256-bit vector registers.
+ "$xr0", "$xr1", "$xr2", "$xr3", "$xr4", "$xr5", "$xr6", "$xr7", "$xr8",
+ "$xr9", "$xr10", "$xr11", "$xr12", "$xr13", "$xr14", "$xr15", "$xr16",
+ "$xr17", "$xr18", "$xr19", "$xr20", "$xr21", "$xr22", "$xr23", "$xr24",
+ "$xr25", "$xr26", "$xr27", "$xr28", "$xr29", "$xr30", "$xr31"};
+ return llvm::ArrayRef(GCCRegNames);
+}
+
+ArrayRef<TargetInfo::GCCRegAlias>
+LoongArchTargetInfo::getGCCRegAliases() const {
+ static const TargetInfo::GCCRegAlias GCCRegAliases[] = {
+ {{"zero", "$zero", "r0"}, "$r0"},
+ {{"ra", "$ra", "r1"}, "$r1"},
+ {{"tp", "$tp", "r2"}, "$r2"},
+ {{"sp", "$sp", "r3"}, "$r3"},
+ {{"a0", "$a0", "r4"}, "$r4"},
+ {{"a1", "$a1", "r5"}, "$r5"},
+ {{"a2", "$a2", "r6"}, "$r6"},
+ {{"a3", "$a3", "r7"}, "$r7"},
+ {{"a4", "$a4", "r8"}, "$r8"},
+ {{"a5", "$a5", "r9"}, "$r9"},
+ {{"a6", "$a6", "r10"}, "$r10"},
+ {{"a7", "$a7", "r11"}, "$r11"},
+ {{"t0", "$t0", "r12"}, "$r12"},
+ {{"t1", "$t1", "r13"}, "$r13"},
+ {{"t2", "$t2", "r14"}, "$r14"},
+ {{"t3", "$t3", "r15"}, "$r15"},
+ {{"t4", "$t4", "r16"}, "$r16"},
+ {{"t5", "$t5", "r17"}, "$r17"},
+ {{"t6", "$t6", "r18"}, "$r18"},
+ {{"t7", "$t7", "r19"}, "$r19"},
+ {{"t8", "$t8", "r20"}, "$r20"},
+ {{"r21"}, "$r21"},
+ {{"s9", "$s9", "r22", "fp", "$fp"}, "$r22"},
+ {{"s0", "$s0", "r23"}, "$r23"},
+ {{"s1", "$s1", "r24"}, "$r24"},
+ {{"s2", "$s2", "r25"}, "$r25"},
+ {{"s3", "$s3", "r26"}, "$r26"},
+ {{"s4", "$s4", "r27"}, "$r27"},
+ {{"s5", "$s5", "r28"}, "$r28"},
+ {{"s6", "$s6", "r29"}, "$r29"},
+ {{"s7", "$s7", "r30"}, "$r30"},
+ {{"s8", "$s8", "r31"}, "$r31"},
+ {{"$fa0"}, "$f0"},
+ {{"$fa1"}, "$f1"},
+ {{"$fa2"}, "$f2"},
+ {{"$fa3"}, "$f3"},
+ {{"$fa4"}, "$f4"},
+ {{"$fa5"}, "$f5"},
+ {{"$fa6"}, "$f6"},
+ {{"$fa7"}, "$f7"},
+ {{"$ft0"}, "$f8"},
+ {{"$ft1"}, "$f9"},
+ {{"$ft2"}, "$f10"},
+ {{"$ft3"}, "$f11"},
+ {{"$ft4"}, "$f12"},
+ {{"$ft5"}, "$f13"},
+ {{"$ft6"}, "$f14"},
+ {{"$ft7"}, "$f15"},
+ {{"$ft8"}, "$f16"},
+ {{"$ft9"}, "$f17"},
+ {{"$ft10"}, "$f18"},
+ {{"$ft11"}, "$f19"},
+ {{"$ft12"}, "$f20"},
+ {{"$ft13"}, "$f21"},
+ {{"$ft14"}, "$f22"},
+ {{"$ft15"}, "$f23"},
+ {{"$fs0"}, "$f24"},
+ {{"$fs1"}, "$f25"},
+ {{"$fs2"}, "$f26"},
+ {{"$fs3"}, "$f27"},
+ {{"$fs4"}, "$f28"},
+ {{"$fs5"}, "$f29"},
+ {{"$fs6"}, "$f30"},
+ {{"$fs7"}, "$f31"},
+ };
+ return llvm::ArrayRef(GCCRegAliases);
+}
+
+bool LoongArchTargetInfo::validateAsmConstraint(
+ const char *&Name, TargetInfo::ConstraintInfo &Info) const {
+ // See the GCC definitions here:
+ // https://gcc.gnu.org/onlinedocs/gccint/Machine-Constraints.html
+ // Note that the 'm' constraint is handled in TargetInfo.
+ switch (*Name) {
+ default:
+ return false;
+ case 'f':
+ // A floating-point register (if available).
+ Info.setAllowsRegister();
+ return true;
+ case 'k':
+ // A memory operand whose address is formed by a base register and
+ // (optionally scaled) index register.
+ Info.setAllowsMemory();
+ return true;
+ case 'l':
+ // A signed 16-bit constant.
+ Info.setRequiresImmediate(-32768, 32767);
+ return true;
+ case 'I':
+ // A signed 12-bit constant (for arithmetic instructions).
+ Info.setRequiresImmediate(-2048, 2047);
+ return true;
+ case 'J':
+ // Integer zero.
+ Info.setRequiresImmediate(0);
+ return true;
+ case 'K':
+ // An unsigned 12-bit constant (for logic instructions).
+ Info.setRequiresImmediate(0, 4095);
+ return true;
+ case 'Z':
+ // ZB: An address that is held in a general-purpose register. The offset is
+ // zero.
+ // ZC: A memory operand whose address is formed by a base register
+ // and offset that is suitable for use in instructions with the same
+ // addressing mode as ll.w and sc.w.
+ if (Name[1] == 'C' || Name[1] == 'B') {
+ Info.setAllowsMemory();
+ ++Name; // Skip over 'Z'.
+ return true;
+ }
+ return false;
+ }
+}
+
+std::string
+LoongArchTargetInfo::convertConstraint(const char *&Constraint) const {
+ std::string R;
+ switch (*Constraint) {
+ case 'Z':
+ // "ZC"/"ZB" are two-character constraints; add "^" hint for later
+ // parsing.
+ R = "^" + std::string(Constraint, 2);
+ ++Constraint;
+ break;
+ default:
+ R = TargetInfo::convertConstraint(Constraint);
+ break;
+ }
+ return R;
+}
+
+void LoongArchTargetInfo::getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ Builder.defineMacro("__loongarch__");
+ unsigned GRLen = getRegisterWidth();
+ Builder.defineMacro("__loongarch_grlen", Twine(GRLen));
+ if (GRLen == 64)
+ Builder.defineMacro("__loongarch64");
+
+ if (HasFeatureD)
+ Builder.defineMacro("__loongarch_frlen", "64");
+ else if (HasFeatureF)
+ Builder.defineMacro("__loongarch_frlen", "32");
+ else
+ Builder.defineMacro("__loongarch_frlen", "0");
+
+ // Define __loongarch_arch.
+ StringRef ArchName = getCPU();
+ Builder.defineMacro("__loongarch_arch", Twine('"') + ArchName + Twine('"'));
+
+ // Define __loongarch_tune.
+ StringRef TuneCPU = getTargetOpts().TuneCPU;
+ if (TuneCPU.empty())
+ TuneCPU = ArchName;
+ Builder.defineMacro("__loongarch_tune", Twine('"') + TuneCPU + Twine('"'));
+
+ if (HasFeatureLSX)
+ Builder.defineMacro("__loongarch_sx", Twine(1));
+ if (HasFeatureLASX)
+ Builder.defineMacro("__loongarch_asx", Twine(1));
+
+ StringRef ABI = getABI();
+ if (ABI == "lp64d" || ABI == "lp64f" || ABI == "lp64s")
+ Builder.defineMacro("__loongarch_lp64");
+
+ if (ABI == "lp64d" || ABI == "ilp32d") {
+ Builder.defineMacro("__loongarch_hard_float");
+ Builder.defineMacro("__loongarch_double_float");
+ } else if (ABI == "lp64f" || ABI == "ilp32f") {
+ Builder.defineMacro("__loongarch_hard_float");
+ Builder.defineMacro("__loongarch_single_float");
+ } else if (ABI == "lp64s" || ABI == "ilp32s") {
+ Builder.defineMacro("__loongarch_soft_float");
+ }
+
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
+ if (GRLen == 64)
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
+}
+
+static constexpr Builtin::Info BuiltinInfo[] = {
+#define BUILTIN(ID, TYPE, ATTRS) \
+ {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
+#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
+ {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
+#include "clang/Basic/BuiltinsLoongArch.def"
+};
+
+bool LoongArchTargetInfo::initFeatureMap(
+ llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags, StringRef CPU,
+ const std::vector<std::string> &FeaturesVec) const {
+ if (getTriple().getArch() == llvm::Triple::loongarch64)
+ Features["64bit"] = true;
+ if (getTriple().getArch() == llvm::Triple::loongarch32)
+ Features["32bit"] = true;
+
+ return TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec);
+}
+
+/// Return true if has this feature.
+bool LoongArchTargetInfo::hasFeature(StringRef Feature) const {
+ bool Is64Bit = getTriple().getArch() == llvm::Triple::loongarch64;
+ // TODO: Handle more features.
+ return llvm::StringSwitch<bool>(Feature)
+ .Case("loongarch32", !Is64Bit)
+ .Case("loongarch64", Is64Bit)
+ .Case("32bit", !Is64Bit)
+ .Case("64bit", Is64Bit)
+ .Case("lsx", HasFeatureLSX)
+ .Case("lasx", HasFeatureLASX)
+ .Default(false);
+}
+
+ArrayRef<Builtin::Info> LoongArchTargetInfo::getTargetBuiltins() const {
+ return llvm::ArrayRef(BuiltinInfo, clang::LoongArch::LastTSBuiltin -
+ Builtin::FirstTSBuiltin);
+}
+
+bool LoongArchTargetInfo::handleTargetFeatures(
+ std::vector<std::string> &Features, DiagnosticsEngine &Diags) {
+ for (const auto &Feature : Features) {
+ if (Feature == "+d" || Feature == "+f") {
+ // "d" implies "f".
+ HasFeatureF = true;
+ if (Feature == "+d") {
+ HasFeatureD = true;
+ }
+ } else if (Feature == "+lsx")
+ HasFeatureLSX = true;
+ else if (Feature == "+lasx")
+ HasFeatureLASX = true;
+ }
+ return true;
+}
+
+bool LoongArchTargetInfo::isValidCPUName(StringRef Name) const {
+ return llvm::LoongArch::isValidCPUName(Name);
+}
+
+void LoongArchTargetInfo::fillValidCPUList(
+ SmallVectorImpl<StringRef> &Values) const {
+ llvm::LoongArch::fillValidCPUList(Values);
+}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/LoongArch.h b/contrib/llvm-project/clang/lib/Basic/Targets/LoongArch.h
new file mode 100644
index 000000000000..3313102492cb
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/LoongArch.h
@@ -0,0 +1,154 @@
+//===-- LoongArch.h - Declare LoongArch target feature support --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares LoongArch TargetInfo objects.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_BASIC_TARGETS_LOONGARCH_H
+#define LLVM_CLANG_LIB_BASIC_TARGETS_LOONGARCH_H
+
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/TargetOptions.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/TargetParser/Triple.h"
+
+namespace clang {
+namespace targets {
+
+class LLVM_LIBRARY_VISIBILITY LoongArchTargetInfo : public TargetInfo {
+protected:
+ std::string ABI;
+ std::string CPU;
+ bool HasFeatureD;
+ bool HasFeatureF;
+ bool HasFeatureLSX;
+ bool HasFeatureLASX;
+
+public:
+ LoongArchTargetInfo(const llvm::Triple &Triple, const TargetOptions &)
+ : TargetInfo(Triple) {
+ HasFeatureD = false;
+ HasFeatureF = false;
+ HasFeatureLSX = false;
+ HasFeatureLASX = false;
+ LongDoubleWidth = 128;
+ LongDoubleAlign = 128;
+ LongDoubleFormat = &llvm::APFloat::IEEEquad();
+ MCountName = "_mcount";
+ SuitableAlign = 128;
+ WCharType = SignedInt;
+ WIntType = UnsignedInt;
+ }
+
+ bool setCPU(const std::string &Name) override {
+ if (!isValidCPUName(Name))
+ return false;
+ CPU = Name;
+ return true;
+ }
+
+ StringRef getCPU() const { return CPU; }
+
+ StringRef getABI() const override { return ABI; }
+
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override;
+
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override;
+
+ BuiltinVaListKind getBuiltinVaListKind() const override {
+ return TargetInfo::VoidPtrBuiltinVaList;
+ }
+
+ std::string_view getClobbers() const override { return ""; }
+
+ ArrayRef<const char *> getGCCRegNames() const override;
+
+ int getEHDataRegisterNumber(unsigned RegNo) const override {
+ if (RegNo == 0)
+ return 4;
+ if (RegNo == 1)
+ return 5;
+ return -1;
+ }
+
+ ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override;
+
+ bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &Info) const override;
+ std::string convertConstraint(const char *&Constraint) const override;
+
+ bool hasBitIntType() const override { return true; }
+
+ bool handleTargetFeatures(std::vector<std::string> &Features,
+ DiagnosticsEngine &Diags) override;
+
+ bool
+ initFeatureMap(llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags,
+ StringRef CPU,
+ const std::vector<std::string> &FeaturesVec) const override;
+
+ bool hasFeature(StringRef Feature) const override;
+
+ bool isValidCPUName(StringRef Name) const override;
+ void fillValidCPUList(SmallVectorImpl<StringRef> &Values) const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY LoongArch32TargetInfo
+ : public LoongArchTargetInfo {
+public:
+ LoongArch32TargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
+ : LoongArchTargetInfo(Triple, Opts) {
+ IntPtrType = SignedInt;
+ PtrDiffType = SignedInt;
+ SizeType = UnsignedInt;
+ resetDataLayout("e-m:e-p:32:32-i64:64-n32-S128");
+ // TODO: select appropriate ABI.
+ setABI("ilp32d");
+ }
+
+ bool setABI(const std::string &Name) override {
+ if (Name == "ilp32d" || Name == "ilp32f" || Name == "ilp32s") {
+ ABI = Name;
+ return true;
+ }
+ return false;
+ }
+ void setMaxAtomicWidth() override {
+ MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 32;
+ }
+};
+
+class LLVM_LIBRARY_VISIBILITY LoongArch64TargetInfo
+ : public LoongArchTargetInfo {
+public:
+ LoongArch64TargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
+ : LoongArchTargetInfo(Triple, Opts) {
+ LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
+ IntMaxType = Int64Type = SignedLong;
+ resetDataLayout("e-m:e-p:64:64-i64:64-i128:128-n64-S128");
+ // TODO: select appropriate ABI.
+ setABI("lp64d");
+ }
+
+ bool setABI(const std::string &Name) override {
+ if (Name == "lp64d" || Name == "lp64f" || Name == "lp64s") {
+ ABI = Name;
+ return true;
+ }
+ return false;
+ }
+ void setMaxAtomicWidth() override {
+ MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
+ }
+};
+} // end namespace targets
+} // end namespace clang
+
+#endif // LLVM_CLANG_LIB_BASIC_TARGETS_LOONGARCH_H
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/M68k.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/M68k.cpp
index c0cd8fa90ed6..1b7e0a7f32c9 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/M68k.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/M68k.cpp
@@ -17,19 +17,20 @@
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
-#include "llvm/Support/TargetParser.h"
+#include "llvm/TargetParser/TargetParser.h"
#include <cstdint>
#include <cstring>
#include <limits>
+#include <optional>
namespace clang {
namespace targets {
M68kTargetInfo::M68kTargetInfo(const llvm::Triple &Triple,
- const TargetOptions &)
- : TargetInfo(Triple) {
+ const TargetOptions &Opts)
+ : TargetInfo(Triple), TargetOpts(Opts) {
- std::string Layout = "";
+ std::string Layout;
// M68k is Big Endian
Layout += "E";
@@ -79,45 +80,44 @@ void M68kTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__m68k__");
- Builder.defineMacro("mc68000");
- Builder.defineMacro("__mc68000");
- Builder.defineMacro("__mc68000__");
+ DefineStd(Builder, "mc68000", Opts);
// For sub-architecture
switch (CPU) {
case CK_68010:
- Builder.defineMacro("mc68010");
- Builder.defineMacro("__mc68010");
- Builder.defineMacro("__mc68010__");
+ DefineStd(Builder, "mc68010", Opts);
break;
case CK_68020:
- Builder.defineMacro("mc68020");
- Builder.defineMacro("__mc68020");
- Builder.defineMacro("__mc68020__");
+ DefineStd(Builder, "mc68020", Opts);
break;
case CK_68030:
- Builder.defineMacro("mc68030");
- Builder.defineMacro("__mc68030");
- Builder.defineMacro("__mc68030__");
+ DefineStd(Builder, "mc68030", Opts);
break;
case CK_68040:
- Builder.defineMacro("mc68040");
- Builder.defineMacro("__mc68040");
- Builder.defineMacro("__mc68040__");
+ DefineStd(Builder, "mc68040", Opts);
break;
case CK_68060:
- Builder.defineMacro("mc68060");
- Builder.defineMacro("__mc68060");
- Builder.defineMacro("__mc68060__");
+ DefineStd(Builder, "mc68060", Opts);
break;
default:
break;
}
+
+ if (CPU >= CK_68020) {
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
+ }
+
+ // Floating point
+ if (TargetOpts.FeatureMap.lookup("isa-68881") ||
+ TargetOpts.FeatureMap.lookup("isa-68882"))
+ Builder.defineMacro("__HAVE_68881__");
}
ArrayRef<Builtin::Info> M68kTargetInfo::getTargetBuiltins() const {
// FIXME: Implement.
- return None;
+ return std::nullopt;
}
bool M68kTargetInfo::hasFeature(StringRef Feature) const {
@@ -131,12 +131,12 @@ const char *const M68kTargetInfo::GCCRegNames[] = {
"pc"};
ArrayRef<const char *> M68kTargetInfo::getGCCRegNames() const {
- return llvm::makeArrayRef(GCCRegNames);
+ return llvm::ArrayRef(GCCRegNames);
}
ArrayRef<TargetInfo::GCCRegAlias> M68kTargetInfo::getGCCRegAliases() const {
// No aliases.
- return None;
+ return std::nullopt;
}
bool M68kTargetInfo::validateAsmConstraint(
@@ -185,13 +185,19 @@ bool M68kTargetInfo::validateAsmConstraint(
break;
}
break;
+ case 'Q': // address register indirect addressing
+ case 'U': // address register indirect w/ constant offset addressing
+ // TODO: Handle 'S' (basically 'm' when pc-rel is enforced) when
+ // '-mpcrel' flag is properly handled by the driver.
+ info.setAllowsMemory();
+ return true;
default:
break;
}
return false;
}
-llvm::Optional<std::string>
+std::optional<std::string>
M68kTargetInfo::handleAsmEscapedChar(char EscChar) const {
char C;
switch (EscChar) {
@@ -209,7 +215,7 @@ M68kTargetInfo::handleAsmEscapedChar(char EscChar) const {
C = 'd';
break;
default:
- return llvm::None;
+ return std::nullopt;
}
return std::string(1, C);
@@ -223,7 +229,7 @@ std::string M68kTargetInfo::convertConstraint(const char *&Constraint) const {
return std::string(1, *Constraint);
}
-const char *M68kTargetInfo::getClobbers() const {
+std::string_view M68kTargetInfo::getClobbers() const {
// FIXME: Is this really right?
return "";
}
@@ -232,5 +238,15 @@ TargetInfo::BuiltinVaListKind M68kTargetInfo::getBuiltinVaListKind() const {
return TargetInfo::VoidPtrBuiltinVaList;
}
+TargetInfo::CallingConvCheckResult
+M68kTargetInfo::checkCallingConvention(CallingConv CC) const {
+ switch (CC) {
+ case CC_C:
+ case CC_M68kRTD:
+ return CCCR_OK;
+ default:
+ return TargetInfo::checkCallingConvention(CC);
+ }
+}
} // namespace targets
} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/M68k.h b/contrib/llvm-project/clang/lib/Basic/Targets/M68k.h
index a42ca674ef9c..a9c262e62fba 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/M68k.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/M68k.h
@@ -16,8 +16,9 @@
#include "OSTargets.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/TargetParser/Triple.h"
+#include <optional>
namespace clang {
namespace targets {
@@ -35,6 +36,8 @@ class LLVM_LIBRARY_VISIBILITY M68kTargetInfo : public TargetInfo {
CK_68060
} CPU = CK_Unknown;
+ const TargetOptions &TargetOpts;
+
public:
M68kTargetInfo(const llvm::Triple &Triple, const TargetOptions &);
@@ -47,10 +50,11 @@ public:
std::string convertConstraint(const char *&Constraint) const override;
bool validateAsmConstraint(const char *&Name,
TargetInfo::ConstraintInfo &info) const override;
- llvm::Optional<std::string> handleAsmEscapedChar(char EscChar) const override;
- const char *getClobbers() const override;
+ std::optional<std::string> handleAsmEscapedChar(char EscChar) const override;
+ std::string_view getClobbers() const override;
BuiltinVaListKind getBuiltinVaListKind() const override;
bool setCPU(const std::string &Name) override;
+ CallingConvCheckResult checkCallingConvention(CallingConv CC) const override;
};
} // namespace targets
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/MSP430.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/MSP430.cpp
index 90890500ae27..844f5d3af703 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/MSP430.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/MSP430.cpp
@@ -22,13 +22,12 @@ const char *const MSP430TargetInfo::GCCRegNames[] = {
};
ArrayRef<const char *> MSP430TargetInfo::getGCCRegNames() const {
- return llvm::makeArrayRef(GCCRegNames);
+ return llvm::ArrayRef(GCCRegNames);
}
void MSP430TargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
Builder.defineMacro("MSP430");
Builder.defineMacro("__MSP430__");
- Builder.defineMacro("__ELF__");
// FIXME: defines for different 'flavours' of MCU
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/MSP430.h b/contrib/llvm-project/clang/lib/Basic/Targets/MSP430.h
index 9d42e4d4bb18..25639b8c1e0a 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/MSP430.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/MSP430.h
@@ -15,8 +15,8 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/TargetParser/Triple.h"
namespace clang {
namespace targets {
@@ -52,7 +52,7 @@ public:
ArrayRef<Builtin::Info> getTargetBuiltins() const override {
// FIXME: Implement.
- return None;
+ return std::nullopt;
}
bool allowsLargerPreferedTypeAlignment() const override { return false; }
@@ -71,7 +71,7 @@ public:
{{"r2"}, "sr"},
{{"r3"}, "cg"},
};
- return llvm::makeArrayRef(GCCRegAliases);
+ return llvm::ArrayRef(GCCRegAliases);
}
bool validateAsmConstraint(const char *&Name,
@@ -87,7 +87,7 @@ public:
return false;
}
- const char *getClobbers() const override {
+ std::string_view getClobbers() const override {
// FIXME: Is this really right?
return "";
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/Mips.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/Mips.cpp
index 3a32fd492c6b..3a65f53c5248 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/Mips.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/Mips.cpp
@@ -20,11 +20,11 @@
using namespace clang;
using namespace clang::targets;
-const Builtin::Info MipsTargetInfo::BuiltinInfo[] = {
+static constexpr Builtin::Info BuiltinInfo[] = {
#define BUILTIN(ID, TYPE, ATTRS) \
- {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
+ {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) \
- {#ID, TYPE, ATTRS, HEADER, ALL_LANGUAGES, nullptr},
+ {#ID, TYPE, ATTRS, nullptr, HeaderDesc::HEADER, ALL_LANGUAGES},
#include "clang/Basic/BuiltinsMips.def"
};
@@ -50,7 +50,7 @@ static constexpr llvm::StringLiteral ValidCPUNames[] = {
{"octeon"}, {"octeon+"}, {"p5600"}};
bool MipsTargetInfo::isValidCPUName(StringRef Name) const {
- return llvm::find(ValidCPUNames, Name) != std::end(ValidCPUNames);
+ return llvm::is_contained(ValidCPUNames, Name);
}
void MipsTargetInfo::fillValidCPUList(
@@ -149,6 +149,10 @@ void MipsTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("_MIPS_FPSET", Twine(32));
else
Builder.defineMacro("_MIPS_FPSET", Twine(16));
+ if (NoOddSpreg)
+ Builder.defineMacro("_MIPS_SPFPSET", Twine(16));
+ else
+ Builder.defineMacro("_MIPS_SPFPSET", Twine(32));
if (IsMips16)
Builder.defineMacro("__mips16", Twine(1));
@@ -182,7 +186,7 @@ void MipsTargetInfo::getTargetDefines(const LangOptions &Opts,
if (DisableMadd4)
Builder.defineMacro("__mips_no_madd4", Twine(1));
- Builder.defineMacro("_MIPS_SZPTR", Twine(getPointerWidth(0)));
+ Builder.defineMacro("_MIPS_SZPTR", Twine(getPointerWidth(LangAS::Default)));
Builder.defineMacro("_MIPS_SZINT", Twine(getIntWidth()));
Builder.defineMacro("_MIPS_SZLONG", Twine(getLongWidth()));
@@ -192,14 +196,14 @@ void MipsTargetInfo::getTargetDefines(const LangOptions &Opts,
else
Builder.defineMacro("_MIPS_ARCH_" + StringRef(CPU).upper());
- if (StringRef(CPU).startswith("octeon"))
+ if (StringRef(CPU).starts_with("octeon"))
Builder.defineMacro("__OCTEON__");
- // These shouldn't be defined for MIPS-I but there's no need to check
- // for that since MIPS-I isn't supported.
- Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
- Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
- Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
+ if (CPU != "mips1") {
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
+ }
// 32-bit MIPS processors don't have the necessary lld/scd instructions
// found in 64-bit processors. In the case of O32 on a 64-bit processor,
@@ -220,8 +224,8 @@ bool MipsTargetInfo::hasFeature(StringRef Feature) const {
}
ArrayRef<Builtin::Info> MipsTargetInfo::getTargetBuiltins() const {
- return llvm::makeArrayRef(BuiltinInfo, clang::Mips::LastTSBuiltin -
- Builtin::FirstTSBuiltin);
+ return llvm::ArrayRef(BuiltinInfo,
+ clang::Mips::LastTSBuiltin - Builtin::FirstTSBuiltin);
}
unsigned MipsTargetInfo::getUnwindWordWidth() const {
@@ -229,7 +233,7 @@ unsigned MipsTargetInfo::getUnwindWordWidth() const {
.Case("o32", 32)
.Case("n32", 64)
.Case("n64", 64)
- .Default(getPointerWidth(0));
+ .Default(getPointerWidth(LangAS::Default));
}
bool MipsTargetInfo::validateTarget(DiagnosticsEngine &Diags) const {
@@ -238,12 +242,6 @@ bool MipsTargetInfo::validateTarget(DiagnosticsEngine &Diags) const {
Diags.Report(diag::err_target_unsupported_cpu_for_micromips) << CPU;
return false;
}
- // FIXME: It's valid to use O32 on a 64-bit CPU but the backend can't handle
- // this yet. It's better to fail here than on the backend assertion.
- if (processorSupportsGPR64() && ABI == "o32") {
- Diags.Report(diag::err_target_unsupported_abi) << ABI << CPU;
- return false;
- }
// 64-bit ABI's require 64-bit CPU's.
if (!processorSupportsGPR64() && (ABI == "n32" || ABI == "n64")) {
@@ -251,24 +249,6 @@ bool MipsTargetInfo::validateTarget(DiagnosticsEngine &Diags) const {
return false;
}
- // FIXME: It's valid to use O32 on a mips64/mips64el triple but the backend
- // can't handle this yet. It's better to fail here than on the
- // backend assertion.
- if (getTriple().isMIPS64() && ABI == "o32") {
- Diags.Report(diag::err_target_unsupported_abi_for_triple)
- << ABI << getTriple().str();
- return false;
- }
-
- // FIXME: It's valid to use N32/N64 on a mips/mipsel triple but the backend
- // can't handle this yet. It's better to fail here than on the
- // backend assertion.
- if (getTriple().isMIPS32() && (ABI == "n32" || ABI == "n64")) {
- Diags.Report(diag::err_target_unsupported_abi_for_triple)
- << ABI << getTriple().str();
- return false;
- }
-
// -fpxx is valid only for the o32 ABI
if (FPMode == FPXX && (ABI == "n32" || ABI == "n64")) {
Diags.Report(diag::err_unsupported_abi_for_opt) << "-mfpxx" << "o32";
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/Mips.h b/contrib/llvm-project/clang/lib/Basic/Targets/Mips.h
index b475c03889a1..23d4e1b598fa 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/Mips.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/Mips.h
@@ -15,8 +15,8 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/TargetParser/Triple.h"
namespace clang {
namespace targets {
@@ -40,7 +40,6 @@ class LLVM_LIBRARY_VISIBILITY MipsTargetInfo : public TargetInfo {
resetDataLayout(("e-" + Layout).str());
}
- static const Builtin::Info BuiltinInfo[];
std::string CPU;
bool IsMips16;
bool IsMicromips;
@@ -54,6 +53,7 @@ class LLVM_LIBRARY_VISIBILITY MipsTargetInfo : public TargetInfo {
bool HasMSA;
bool DisableMadd4;
bool UseIndirectJumpHazard;
+ bool NoOddSpreg;
protected:
enum FPModeEnum { FPXX, FP32, FP64 } FPMode;
@@ -226,7 +226,7 @@ public:
"$msair", "$msacsr", "$msaaccess", "$msasave", "$msamodify",
"$msarequest", "$msamap", "$msaunmap"
};
- return llvm::makeArrayRef(GCCRegNames);
+ return llvm::ArrayRef(GCCRegNames);
}
bool validateAsmConstraint(const char *&Name,
@@ -237,12 +237,14 @@ public:
case 'r': // CPU registers.
case 'd': // Equivalent to "r" unless generating MIPS16 code.
case 'y': // Equivalent to "r", backward compatibility only.
- case 'f': // floating-point registers.
case 'c': // $25 for indirect jumps
case 'l': // lo register
case 'x': // hilo register pair
Info.setAllowsRegister();
return true;
+ case 'f': // floating-point registers.
+ Info.setAllowsRegister();
+ return FloatABI != SoftFloat;
case 'I': // Signed 16-bit constant
case 'J': // Integer 0
case 'K': // Unsigned 16-bit constant
@@ -279,7 +281,7 @@ public:
return TargetInfo::convertConstraint(Constraint);
}
- const char *getClobbers() const override {
+ std::string_view getClobbers() const override {
// In GCC, $1 is not widely used in generated code (it's used only in a few
// specific situations), so there is no real need for users to add it to
// the clobbers list if they want to use it in their inline assembly code.
@@ -314,6 +316,8 @@ public:
FloatABI = HardFloat;
DspRev = NoDSP;
FPMode = isFP64Default() ? FP64 : FPXX;
+ NoOddSpreg = false;
+ bool OddSpregGiven = false;
for (const auto &Feature : Features) {
if (Feature == "+single-float")
@@ -350,8 +354,18 @@ public:
IsNoABICalls = true;
else if (Feature == "+use-indirect-jump-hazard")
UseIndirectJumpHazard = true;
+ else if (Feature == "+nooddspreg") {
+ NoOddSpreg = true;
+ OddSpregGiven = false;
+ } else if (Feature == "-nooddspreg") {
+ NoOddSpreg = false;
+ OddSpregGiven = true;
+ }
}
+ if (FPMode == FPXX && !OddSpregGiven)
+ NoOddSpreg = true;
+
setDataLayout();
return true;
@@ -395,8 +409,8 @@ public:
{{"ra"}, "$31"}
};
if (ABI == "o32")
- return llvm::makeArrayRef(O32RegAliases);
- return llvm::makeArrayRef(NewABIRegAliases);
+ return llvm::ArrayRef(O32RegAliases);
+ return llvm::ArrayRef(NewABIRegAliases);
}
bool hasInt128Type() const override {
@@ -406,7 +420,7 @@ public:
unsigned getUnwindWordWidth() const override;
bool validateTarget(DiagnosticsEngine &Diags) const override;
- bool hasExtIntType() const override { return true; }
+ bool hasBitIntType() const override { return true; }
};
} // namespace targets
} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.cpp
index 56f8a179db3c..c0b5db795e27 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.cpp
@@ -16,18 +16,17 @@
#include "clang/Basic/MacroBuilder.h"
#include "clang/Basic/TargetBuiltins.h"
#include "llvm/ADT/StringSwitch.h"
-#include "llvm/Frontend/OpenMP/OMPGridValues.h"
using namespace clang;
using namespace clang::targets;
-const Builtin::Info NVPTXTargetInfo::BuiltinInfo[] = {
+static constexpr Builtin::Info BuiltinInfo[] = {
#define BUILTIN(ID, TYPE, ATTRS) \
- {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
+ {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) \
- {#ID, TYPE, ATTRS, HEADER, ALL_LANGUAGES, nullptr},
+ {#ID, TYPE, ATTRS, nullptr, HeaderDesc::HEADER, ALL_LANGUAGES},
#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
- {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, FEATURE},
+ {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
#include "clang/Basic/BuiltinsNVPTX.def"
};
@@ -42,31 +41,20 @@ NVPTXTargetInfo::NVPTXTargetInfo(const llvm::Triple &Triple,
PTXVersion = 32;
for (const StringRef Feature : Opts.FeaturesAsWritten) {
- if (!Feature.startswith("+ptx"))
+ int PTXV;
+ if (!Feature.starts_with("+ptx") ||
+ Feature.drop_front(4).getAsInteger(10, PTXV))
continue;
- PTXVersion = llvm::StringSwitch<unsigned>(Feature)
- .Case("+ptx72", 72)
- .Case("+ptx71", 71)
- .Case("+ptx70", 70)
- .Case("+ptx65", 65)
- .Case("+ptx64", 64)
- .Case("+ptx63", 63)
- .Case("+ptx61", 61)
- .Case("+ptx60", 60)
- .Case("+ptx50", 50)
- .Case("+ptx43", 43)
- .Case("+ptx42", 42)
- .Case("+ptx41", 41)
- .Case("+ptx40", 40)
- .Case("+ptx32", 32)
- .Default(32);
+ PTXVersion = PTXV; // TODO: should it be max(PTXVersion, PTXV)?
}
TLSSupported = false;
VLASupported = false;
AddrSpaceMap = &NVPTXAddrSpaceMap;
- GridValues = llvm::omp::NVPTXGpuGridValues;
UseAddrSpaceMapMangling = true;
+ // __bf16 is always available as a load/store only type.
+ BFloat16Width = BFloat16Align = 16;
+ BFloat16Format = &llvm::APFloat::BFloat();
// Define available target features
// These must be defined in sorted order!
@@ -85,7 +73,7 @@ NVPTXTargetInfo::NVPTXTargetInfo(const llvm::Triple &Triple,
// types.
llvm::Triple HostTriple(Opts.HostTriple);
if (!HostTriple.isNVPTX())
- HostTarget.reset(AllocateTarget(llvm::Triple(Opts.HostTriple), Opts));
+ HostTarget = AllocateTarget(llvm::Triple(Opts.HostTriple), Opts);
// If no host target, make some guesses about the data layout and return.
if (!HostTarget) {
@@ -105,12 +93,14 @@ NVPTXTargetInfo::NVPTXTargetInfo(const llvm::Triple &Triple,
default:
llvm_unreachable("TargetPointerWidth must be 32 or 64");
}
+
+ MaxAtomicInlineWidth = TargetPointerWidth;
return;
}
// Copy properties from host target.
- PointerWidth = HostTarget->getPointerWidth(/* AddrSpace = */ 0);
- PointerAlign = HostTarget->getPointerAlign(/* AddrSpace = */ 0);
+ PointerWidth = HostTarget->getPointerWidth(LangAS::Default);
+ PointerAlign = HostTarget->getPointerAlign(LangAS::Default);
BoolWidth = HostTarget->getBoolWidth();
BoolAlign = HostTarget->getBoolAlign();
IntWidth = HostTarget->getIntWidth();
@@ -131,7 +121,7 @@ NVPTXTargetInfo::NVPTXTargetInfo(const llvm::Triple &Triple,
HostTarget->getDefaultAlignForAttributeAligned();
SizeType = HostTarget->getSizeType();
IntMaxType = HostTarget->getIntMaxType();
- PtrDiffType = HostTarget->getPtrDiffType(/* AddrSpace = */ 0);
+ PtrDiffType = HostTarget->getPtrDiffType(LangAS::Default);
IntPtrType = HostTarget->getIntPtrType();
WCharType = HostTarget->getWCharType();
WIntType = HostTarget->getWIntType();
@@ -165,7 +155,7 @@ NVPTXTargetInfo::NVPTXTargetInfo(const llvm::Triple &Triple,
}
ArrayRef<const char *> NVPTXTargetInfo::getGCCRegNames() const {
- return llvm::makeArrayRef(GCCRegNames);
+ return llvm::ArrayRef(GCCRegNames);
}
bool NVPTXTargetInfo::hasFeature(StringRef Feature) const {
@@ -178,7 +168,7 @@ void NVPTXTargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
Builder.defineMacro("__PTX__");
Builder.defineMacro("__NVPTX__");
- if (Opts.CUDAIsDevice) {
+ if (Opts.CUDAIsDevice || Opts.OpenMPIsTargetDevice || !HostTarget) {
// Set __CUDA_ARCH__ for the GPU specified.
std::string CUDAArchCode = [this] {
switch (GPU) {
@@ -204,6 +194,9 @@ void NVPTXTargetInfo::getTargetDefines(const LangOptions &Opts,
case CudaArch::GFX909:
case CudaArch::GFX90a:
case CudaArch::GFX90c:
+ case CudaArch::GFX940:
+ case CudaArch::GFX941:
+ case CudaArch::GFX942:
case CudaArch::GFX1010:
case CudaArch::GFX1011:
case CudaArch::GFX1012:
@@ -214,6 +207,16 @@ void NVPTXTargetInfo::getTargetDefines(const LangOptions &Opts,
case CudaArch::GFX1033:
case CudaArch::GFX1034:
case CudaArch::GFX1035:
+ case CudaArch::GFX1036:
+ case CudaArch::GFX1100:
+ case CudaArch::GFX1101:
+ case CudaArch::GFX1102:
+ case CudaArch::GFX1103:
+ case CudaArch::GFX1150:
+ case CudaArch::GFX1151:
+ case CudaArch::GFX1200:
+ case CudaArch::GFX1201:
+ case CudaArch::Generic:
case CudaArch::LAST:
break;
case CudaArch::UNUSED:
@@ -254,14 +257,23 @@ void NVPTXTargetInfo::getTargetDefines(const LangOptions &Opts,
return "800";
case CudaArch::SM_86:
return "860";
+ case CudaArch::SM_87:
+ return "870";
+ case CudaArch::SM_89:
+ return "890";
+ case CudaArch::SM_90:
+ case CudaArch::SM_90a:
+ return "900";
}
llvm_unreachable("unhandled CudaArch");
}();
Builder.defineMacro("__CUDA_ARCH__", CUDAArchCode);
+ if (GPU == CudaArch::SM_90a)
+ Builder.defineMacro("__CUDA_ARCH_FEAT_SM90_ALL", "1");
}
}
ArrayRef<Builtin::Info> NVPTXTargetInfo::getTargetBuiltins() const {
- return llvm::makeArrayRef(BuiltinInfo, clang::NVPTX::LastTSBuiltin -
- Builtin::FirstTSBuiltin);
+ return llvm::ArrayRef(BuiltinInfo,
+ clang::NVPTX::LastTSBuiltin - Builtin::FirstTSBuiltin);
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.h b/contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.h
index c7db3cdaaf10..20d76b702a94 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.h
@@ -16,8 +16,9 @@
#include "clang/Basic/Cuda.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/TargetParser/Triple.h"
+#include <optional>
namespace clang {
namespace targets {
@@ -42,7 +43,11 @@ static const unsigned NVPTXAddrSpaceMap[] = {
0, // sycl_private
0, // ptr32_sptr
0, // ptr32_uptr
- 0 // ptr64
+ 0, // ptr64
+ 0, // hlsl_groupshared
+ // Wasm address space values for this target are dummy values,
+ // as it is only enabled for Wasm targets.
+ 20, // wasm_funcref
};
/// The DWARF address class. Taken from
@@ -57,7 +62,6 @@ static const int NVPTXDWARFAddrSpaceMap[] = {
class LLVM_LIBRARY_VISIBILITY NVPTXTargetInfo : public TargetInfo {
static const char *const GCCRegNames[];
- static const Builtin::Info BuiltinInfo[];
CudaArch GPU;
uint32_t PTXVersion;
std::unique_ptr<TargetInfo> HostTarget;
@@ -86,7 +90,7 @@ public:
ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
// No aliases.
- return None;
+ return std::nullopt;
}
bool validateAsmConstraint(const char *&Name,
@@ -105,7 +109,7 @@ public:
}
}
- const char *getClobbers() const override {
+ std::string_view getClobbers() const override {
// FIXME: Is this really right?
return "";
}
@@ -121,7 +125,7 @@ public:
void fillValidCPUList(SmallVectorImpl<StringRef> &Values) const override {
for (int i = static_cast<int>(CudaArch::SM_20);
- i < static_cast<int>(CudaArch::LAST); ++i)
+ i < static_cast<int>(CudaArch::Generic); ++i)
Values.emplace_back(CudaArchToString(static_cast<CudaArch>(i)));
}
@@ -147,17 +151,21 @@ public:
Opts["cl_khr_local_int32_extended_atomics"] = true;
}
+ const llvm::omp::GV &getGridValue() const override {
+ return llvm::omp::NVPTXGridValues;
+ }
+
/// \returns If a target requires an address within a target specific address
/// space \p AddressSpace to be converted in order to be used, then return the
/// corresponding target specific DWARF address space.
///
- /// \returns Otherwise return None and no conversion will be emitted in the
- /// DWARF.
- Optional<unsigned>
+ /// \returns Otherwise return std::nullopt and no conversion will be emitted
+ /// in the DWARF.
+ std::optional<unsigned>
getDWARFAddressSpace(unsigned AddressSpace) const override {
- if (AddressSpace >= llvm::array_lengthof(NVPTXDWARFAddrSpaceMap) ||
+ if (AddressSpace >= std::size(NVPTXDWARFAddrSpaceMap) ||
NVPTXDWARFAddrSpaceMap[AddressSpace] < 0)
- return llvm::None;
+ return std::nullopt;
return NVPTXDWARFAddrSpaceMap[AddressSpace];
}
@@ -171,7 +179,10 @@ public:
return CCCR_Warning;
}
- bool hasExtIntType() const override { return true; }
+ bool hasBitIntType() const override { return true; }
+ bool hasBFloat16Type() const override { return true; }
+
+ CudaArch getGPU() const { return GPU; }
};
} // namespace targets
} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.cpp
index 7cd4a5190120..899aefa6173a 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.cpp
@@ -48,12 +48,12 @@ void getDarwinDefines(MacroBuilder &Builder, const LangOptions &Opts,
Builder.defineMacro("_REENTRANT");
// Get the platform type and version number from the triple.
- unsigned Maj, Min, Rev;
+ VersionTuple OsVersion;
if (Triple.isMacOSX()) {
- Triple.getMacOSXVersion(Maj, Min, Rev);
+ Triple.getMacOSXVersion(OsVersion);
PlatformName = "macos";
} else {
- Triple.getOSVersion(Maj, Min, Rev);
+ OsVersion = Triple.getOSVersion();
PlatformName = llvm::Triple::getOSTypeName(Triple.getOS());
if (PlatformName == "ios" && Triple.isMacCatalystEnvironment())
PlatformName = "maccatalyst";
@@ -63,78 +63,63 @@ void getDarwinDefines(MacroBuilder &Builder, const LangOptions &Opts,
// generating code for Win32 ABI. No need to emit
// __ENVIRONMENT_XX_OS_VERSION_MIN_REQUIRED__.
if (PlatformName == "win32") {
- PlatformMinVersion = VersionTuple(Maj, Min, Rev);
+ PlatformMinVersion = OsVersion;
return;
}
- // Set the appropriate OS version define.
- if (Triple.isiOS()) {
- assert(Maj < 100 && Min < 100 && Rev < 100 && "Invalid version!");
- char Str[7];
- if (Maj < 10) {
- Str[0] = '0' + Maj;
- Str[1] = '0' + (Min / 10);
- Str[2] = '0' + (Min % 10);
- Str[3] = '0' + (Rev / 10);
- Str[4] = '0' + (Rev % 10);
- Str[5] = '\0';
- } else {
- // Handle versions >= 10.
- Str[0] = '0' + (Maj / 10);
- Str[1] = '0' + (Maj % 10);
- Str[2] = '0' + (Min / 10);
- Str[3] = '0' + (Min % 10);
- Str[4] = '0' + (Rev / 10);
- Str[5] = '0' + (Rev % 10);
- Str[6] = '\0';
- }
- if (Triple.isTvOS())
- Builder.defineMacro("__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__", Str);
- else
- Builder.defineMacro("__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__",
- Str);
+ assert(OsVersion < VersionTuple(100) && "Invalid version!");
+ char Str[7];
+ if (Triple.isMacOSX() && OsVersion < VersionTuple(10, 10)) {
+ Str[0] = '0' + (OsVersion.getMajor() / 10);
+ Str[1] = '0' + (OsVersion.getMajor() % 10);
+ Str[2] = '0' + std::min(OsVersion.getMinor().value_or(0), 9U);
+ Str[3] = '0' + std::min(OsVersion.getSubminor().value_or(0), 9U);
+ Str[4] = '\0';
+ } else if (!Triple.isMacOSX() && OsVersion.getMajor() < 10) {
+ Str[0] = '0' + OsVersion.getMajor();
+ Str[1] = '0' + (OsVersion.getMinor().value_or(0) / 10);
+ Str[2] = '0' + (OsVersion.getMinor().value_or(0) % 10);
+ Str[3] = '0' + (OsVersion.getSubminor().value_or(0) / 10);
+ Str[4] = '0' + (OsVersion.getSubminor().value_or(0) % 10);
+ Str[5] = '\0';
+ } else {
+ // Handle versions >= 10.
+ Str[0] = '0' + (OsVersion.getMajor() / 10);
+ Str[1] = '0' + (OsVersion.getMajor() % 10);
+ Str[2] = '0' + (OsVersion.getMinor().value_or(0) / 10);
+ Str[3] = '0' + (OsVersion.getMinor().value_or(0) % 10);
+ Str[4] = '0' + (OsVersion.getSubminor().value_or(0) / 10);
+ Str[5] = '0' + (OsVersion.getSubminor().value_or(0) % 10);
+ Str[6] = '\0';
+ }
+ // Set the appropriate OS version define.
+ if (Triple.isTvOS()) {
+ Builder.defineMacro("__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__", Str);
+ } else if (Triple.isiOS()) {
+ Builder.defineMacro("__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__", Str);
} else if (Triple.isWatchOS()) {
- assert(Maj < 10 && Min < 100 && Rev < 100 && "Invalid version!");
- char Str[6];
- Str[0] = '0' + Maj;
- Str[1] = '0' + (Min / 10);
- Str[2] = '0' + (Min % 10);
- Str[3] = '0' + (Rev / 10);
- Str[4] = '0' + (Rev % 10);
- Str[5] = '\0';
Builder.defineMacro("__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__", Str);
+ } else if (Triple.isDriverKit()) {
+ assert(OsVersion.getMinor().value_or(0) < 100 &&
+ OsVersion.getSubminor().value_or(0) < 100 && "Invalid version!");
+ Builder.defineMacro("__ENVIRONMENT_DRIVERKIT_VERSION_MIN_REQUIRED__", Str);
} else if (Triple.isMacOSX()) {
- // Note that the Driver allows versions which aren't representable in the
- // define (because we only get a single digit for the minor and micro
- // revision numbers). So, we limit them to the maximum representable
- // version.
- assert(Maj < 100 && Min < 100 && Rev < 100 && "Invalid version!");
- char Str[7];
- if (Maj < 10 || (Maj == 10 && Min < 10)) {
- Str[0] = '0' + (Maj / 10);
- Str[1] = '0' + (Maj % 10);
- Str[2] = '0' + std::min(Min, 9U);
- Str[3] = '0' + std::min(Rev, 9U);
- Str[4] = '\0';
- } else {
- // Handle versions > 10.9.
- Str[0] = '0' + (Maj / 10);
- Str[1] = '0' + (Maj % 10);
- Str[2] = '0' + (Min / 10);
- Str[3] = '0' + (Min % 10);
- Str[4] = '0' + (Rev / 10);
- Str[5] = '0' + (Rev % 10);
- Str[6] = '\0';
- }
Builder.defineMacro("__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__", Str);
}
- // Tell users about the kernel if there is one.
- if (Triple.isOSDarwin())
+ if (Triple.isOSDarwin()) {
+ // Any darwin OS defines a general darwin OS version macro in addition
+ // to the other OS specific macros.
+ assert(OsVersion.getMinor().value_or(0) < 100 &&
+ OsVersion.getSubminor().value_or(0) < 100 && "Invalid version!");
+ Builder.defineMacro("__ENVIRONMENT_OS_VERSION_MIN_REQUIRED__", Str);
+
+ // Tell users about the kernel if there is one.
Builder.defineMacro("__MACH__");
+ }
- PlatformMinVersion = VersionTuple(Maj, Min, Rev);
+ PlatformMinVersion = OsVersion;
}
static void addMinGWDefines(const llvm::Triple &Triple, const LangOptions &Opts,
@@ -165,6 +150,54 @@ static void addVisualCDefines(const LangOptions &Opts, MacroBuilder &Builder) {
if (!Opts.CharIsSigned)
Builder.defineMacro("_CHAR_UNSIGNED");
+ // "The /fp:contract option allows the compiler to generate floating-point
+ // contractions [...]"
+ if (Opts.getDefaultFPContractMode() != LangOptions::FPModeKind::FPM_Off)
+ Builder.defineMacro("_M_FP_CONTRACT");
+
+ // "The /fp:except option generates code to ensures that any unmasked
+ // floating-point exceptions are raised at the exact point at which they
+ // occur, and that no other floating-point exceptions are raised."
+ if (Opts.getDefaultExceptionMode() ==
+ LangOptions::FPExceptionModeKind::FPE_Strict)
+ Builder.defineMacro("_M_FP_EXCEPT");
+
+ // "The /fp:fast option allows the compiler to reorder, combine, or simplify
+ // floating-point operations to optimize floating-point code for speed and
+ // space. The compiler may omit rounding at assignment statements,
+ // typecasts, or function calls. It may reorder operations or make algebraic
+ // transforms, for example, by use of associative and distributive laws. It
+ // may reorder code even if such transformations result in observably
+ // different rounding behavior."
+ //
+ // "Under /fp:precise and /fp:strict, the compiler doesn't do any mathematical
+ // transformation unless the transformation is guaranteed to produce a bitwise
+ // identical result."
+ const bool any_imprecise_flags =
+ Opts.FastMath || Opts.FiniteMathOnly || Opts.UnsafeFPMath ||
+ Opts.AllowFPReassoc || Opts.NoHonorNaNs || Opts.NoHonorInfs ||
+ Opts.NoSignedZero || Opts.AllowRecip || Opts.ApproxFunc;
+
+ // "Under both /fp:precise and /fp:fast, the compiler generates code intended
+ // to run in the default floating-point environment."
+ //
+ // "[The] default floating point environment [...] sets the rounding mode
+ // to round to nearest."
+ if (Opts.getDefaultRoundingMode() ==
+ LangOptions::RoundingMode::NearestTiesToEven) {
+ if (any_imprecise_flags) {
+ Builder.defineMacro("_M_FP_FAST");
+ } else {
+ Builder.defineMacro("_M_FP_PRECISE");
+ }
+ } else if (!any_imprecise_flags && Opts.getDefaultRoundingMode() ==
+ LangOptions::RoundingMode::Dynamic) {
+ // "Under /fp:strict, the compiler generates code that allows the
+ // program to safely unmask floating-point exceptions, read or write
+ // floating-point status registers, or change rounding modes."
+ Builder.defineMacro("_M_FP_STRICT");
+ }
+
// FIXME: POSIXThreads isn't exactly the option this should be defined for,
// but it works for now.
if (Opts.POSIXThreads)
@@ -181,13 +214,19 @@ static void addVisualCDefines(const LangOptions &Opts, MacroBuilder &Builder) {
Builder.defineMacro("_HAS_CHAR16_T_LANGUAGE_SUPPORT", Twine(1));
if (Opts.isCompatibleWithMSVC(LangOptions::MSVC2015)) {
- if (Opts.CPlusPlus20)
- Builder.defineMacro("_MSVC_LANG", "201705L");
+ if (Opts.CPlusPlus23)
+ // TODO update to the proper value.
+ Builder.defineMacro("_MSVC_LANG", "202004L");
+ else if (Opts.CPlusPlus20)
+ Builder.defineMacro("_MSVC_LANG", "202002L");
else if (Opts.CPlusPlus17)
Builder.defineMacro("_MSVC_LANG", "201703L");
else if (Opts.CPlusPlus14)
Builder.defineMacro("_MSVC_LANG", "201402L");
}
+
+ if (Opts.isCompatibleWithMSVC(LangOptions::MSVC2022_3))
+ Builder.defineMacro("_MSVC_CONSTEXPR_ATTRIBUTE");
}
if (Opts.MicrosoftExt) {
@@ -200,7 +239,22 @@ static void addVisualCDefines(const LangOptions &Opts, MacroBuilder &Builder) {
}
}
+ if (!Opts.MSVolatile)
+ Builder.defineMacro("_ISO_VOLATILE");
+
+ if (Opts.Kernel)
+ Builder.defineMacro("_KERNEL_MODE");
+
Builder.defineMacro("_INTEGRAL_MAX_BITS", "64");
+ Builder.defineMacro("__STDC_NO_THREADS__");
+
+ // Starting with VS 2022 17.1, MSVC predefines the below macro to inform
+ // users of the execution character set defined at compile time.
+ // The value given is the Windows Code Page Identifier:
+ // https://docs.microsoft.com/en-us/windows/win32/intl/code-page-identifiers
+ //
+ // Clang currently only supports UTF-8, so we'll use 65001
+ Builder.defineMacro("_MSVC_EXECUTION_CHARACTER_SET", "65001");
}
void addWindowsDefines(const llvm::Triple &Triple, const LangOptions &Opts,
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.h b/contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.h
index 3fe39ed64d9c..4366c1149e40 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.h
@@ -34,42 +34,6 @@ public:
}
};
-// CloudABI Target
-template <typename Target>
-class LLVM_LIBRARY_VISIBILITY CloudABITargetInfo : public OSTargetInfo<Target> {
-protected:
- void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
- MacroBuilder &Builder) const override {
- Builder.defineMacro("__CloudABI__");
- Builder.defineMacro("__ELF__");
-
- // CloudABI uses ISO/IEC 10646:2012 for wchar_t, char16_t and char32_t.
- Builder.defineMacro("__STDC_ISO_10646__", "201206L");
- Builder.defineMacro("__STDC_UTF_16__");
- Builder.defineMacro("__STDC_UTF_32__");
- }
-
-public:
- CloudABITargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
- : OSTargetInfo<Target>(Triple, Opts) {}
-};
-
-// Ananas target
-template <typename Target>
-class LLVM_LIBRARY_VISIBILITY AnanasTargetInfo : public OSTargetInfo<Target> {
-protected:
- void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
- MacroBuilder &Builder) const override {
- // Ananas defines
- Builder.defineMacro("__Ananas__");
- Builder.defineMacro("__ELF__");
- }
-
-public:
- AnanasTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
- : OSTargetInfo<Target>(Triple, Opts) {}
-};
-
void getDarwinDefines(MacroBuilder &Builder, const LangOptions &Opts,
const llvm::Triple &Triple, StringRef &PlatformName,
VersionTuple &PlatformMinVersion);
@@ -108,7 +72,10 @@ public:
this->TLSSupported = !Triple.isOSVersionLT(2);
else
this->TLSSupported = !Triple.isOSVersionLT(3);
- }
+ } else if (Triple.isDriverKit()) {
+ // No TLS on DriverKit.
+ } else if (Triple.isXROS())
+ this->TLSSupported = true;
this->MCountName = "\01mcount";
}
@@ -143,14 +110,15 @@ public:
case llvm::Triple::WatchOS: // Earliest supporting version is 5.0.0.
MinVersion = llvm::VersionTuple(5U);
break;
+ case llvm::Triple::XROS:
+ MinVersion = llvm::VersionTuple(0);
+ break;
default:
// Conservatively return 8 bytes if OS is unknown.
return 64;
}
- unsigned Major, Minor, Micro;
- T.getOSVersion(Major, Minor, Micro);
- if (llvm::VersionTuple(Major, Minor, Micro) < MinVersion)
+ if (T.getOSVersion() < MinVersion)
return 64;
return OSTargetInfo<Target>::getExnObjectAlignment();
}
@@ -163,6 +131,10 @@ public:
: TargetInfo::UnsignedLongLong)
: TargetInfo::getLeastIntTypeByWidth(BitWidth, IsSigned);
}
+
+ bool areDefaultedSMFStillPOD(const LangOptions &) const override {
+ return false;
+ }
};
// DragonFlyBSD Target
@@ -175,10 +147,11 @@ protected:
// DragonFly defines; list based off of gcc output
Builder.defineMacro("__DragonFly__");
Builder.defineMacro("__DragonFly_cc_version", "100001");
- Builder.defineMacro("__ELF__");
Builder.defineMacro("__KPRINTF_ATTRIBUTE__");
Builder.defineMacro("__tune_i386__");
DefineStd(Builder, "unix", Opts);
+ if (this->HasFloat128)
+ Builder.defineMacro("__FLOAT128__");
}
public:
@@ -188,6 +161,7 @@ public:
default:
case llvm::Triple::x86:
case llvm::Triple::x86_64:
+ this->HasFloat128 = true;
this->MCountName = ".mcount";
break;
}
@@ -217,7 +191,8 @@ protected:
Builder.defineMacro("__FreeBSD_cc_version", Twine(CCVersion));
Builder.defineMacro("__KPRINTF_ATTRIBUTE__");
DefineStd(Builder, "unix", Opts);
- Builder.defineMacro("__ELF__");
+ if (this->HasFloat128)
+ Builder.defineMacro("__FLOAT128__");
// On FreeBSD, wchar_t contains the number of the code point as
// used by the character set of the locale. These character sets are
@@ -235,9 +210,11 @@ public:
FreeBSDTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
: OSTargetInfo<Target>(Triple, Opts) {
switch (Triple.getArch()) {
- default:
case llvm::Triple::x86:
case llvm::Triple::x86_64:
+ this->HasFloat128 = true;
+ [[fallthrough]];
+ default:
this->MCountName = ".mcount";
break;
case llvm::Triple::mips:
@@ -269,7 +246,6 @@ protected:
DefineStd(Builder, "unix", Opts);
Builder.defineMacro("__FreeBSD_kernel__");
Builder.defineMacro("__GLIBC__");
- Builder.defineMacro("__ELF__");
if (Opts.POSIXThreads)
Builder.defineMacro("_REENTRANT");
if (Opts.CPlusPlus)
@@ -277,8 +253,7 @@ protected:
}
public:
- KFreeBSDTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
- : OSTargetInfo<Target>(Triple, Opts) {}
+ using OSTargetInfo<Target>::OSTargetInfo;
};
// Haiku Target
@@ -289,9 +264,8 @@ protected:
MacroBuilder &Builder) const override {
// Haiku defines; list based off of gcc output
Builder.defineMacro("__HAIKU__");
- Builder.defineMacro("__ELF__");
DefineStd(Builder, "unix", Opts);
- if (this->HasFloat128)
+ if (this->HasFloat128)
Builder.defineMacro("__FLOAT128__");
}
@@ -302,7 +276,6 @@ public:
this->IntPtrType = TargetInfo::SignedLong;
this->PtrDiffType = TargetInfo::SignedLong;
this->ProcessIDType = TargetInfo::SignedLong;
- this->TLSSupported = false;
switch (Triple.getArch()) {
default:
break;
@@ -326,39 +299,13 @@ protected:
Builder.defineMacro("__gnu_hurd__");
Builder.defineMacro("__MACH__");
Builder.defineMacro("__GLIBC__");
- Builder.defineMacro("__ELF__");
if (Opts.POSIXThreads)
Builder.defineMacro("_REENTRANT");
if (Opts.CPlusPlus)
Builder.defineMacro("_GNU_SOURCE");
}
public:
- HurdTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
- : OSTargetInfo<Target>(Triple, Opts) {}
-};
-
-// Minix Target
-template <typename Target>
-class LLVM_LIBRARY_VISIBILITY MinixTargetInfo : public OSTargetInfo<Target> {
-protected:
- void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
- MacroBuilder &Builder) const override {
- // Minix defines
-
- Builder.defineMacro("__minix", "3");
- Builder.defineMacro("_EM_WSIZE", "4");
- Builder.defineMacro("_EM_PSIZE", "4");
- Builder.defineMacro("_EM_SSIZE", "2");
- Builder.defineMacro("_EM_LSIZE", "4");
- Builder.defineMacro("_EM_FSIZE", "4");
- Builder.defineMacro("_EM_DSIZE", "8");
- Builder.defineMacro("__ELF__");
- DefineStd(Builder, "unix", Opts);
- }
-
-public:
- MinixTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
- : OSTargetInfo<Target>(Triple, Opts) {}
+ using OSTargetInfo<Target>::OSTargetInfo;
};
// Linux target
@@ -370,13 +317,11 @@ protected:
// Linux defines; list based off of gcc output
DefineStd(Builder, "unix", Opts);
DefineStd(Builder, "linux", Opts);
- Builder.defineMacro("__ELF__");
if (Triple.isAndroid()) {
Builder.defineMacro("__ANDROID__", "1");
- unsigned Maj, Min, Rev;
- Triple.getEnvironmentVersion(Maj, Min, Rev);
this->PlatformName = "android";
- this->PlatformMinVersion = VersionTuple(Maj, Min, Rev);
+ this->PlatformMinVersion = Triple.getEnvironmentVersion();
+ const unsigned Maj = this->PlatformMinVersion.getMajor();
if (Maj) {
Builder.defineMacro("__ANDROID_MIN_SDK_VERSION__", Twine(Maj));
// This historical but ambiguous name for the minSdkVersion macro. Keep
@@ -433,15 +378,24 @@ protected:
// NetBSD defines; list based off of gcc output
Builder.defineMacro("__NetBSD__");
Builder.defineMacro("__unix__");
- Builder.defineMacro("__ELF__");
if (Opts.POSIXThreads)
Builder.defineMacro("_REENTRANT");
+ if (this->HasFloat128)
+ Builder.defineMacro("__FLOAT128__");
}
public:
NetBSDTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
: OSTargetInfo<Target>(Triple, Opts) {
this->MCountName = "__mcount";
+ switch (Triple.getArch()) {
+ default:
+ break;
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ this->HasFloat128 = true;
+ break;
+ }
}
};
@@ -455,16 +409,13 @@ protected:
Builder.defineMacro("__OpenBSD__");
DefineStd(Builder, "unix", Opts);
- Builder.defineMacro("__ELF__");
if (Opts.POSIXThreads)
Builder.defineMacro("_REENTRANT");
if (this->HasFloat128)
Builder.defineMacro("__FLOAT128__");
- if (Opts.C11) {
- Builder.defineMacro("__STDC_NO_ATOMICS__");
+ if (Opts.C11)
Builder.defineMacro("__STDC_NO_THREADS__");
- }
}
public:
@@ -477,7 +428,7 @@ public:
case llvm::Triple::x86:
case llvm::Triple::x86_64:
this->HasFloat128 = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
default:
this->MCountName = "__mcount";
break;
@@ -496,23 +447,6 @@ public:
}
};
-// PSP Target
-template <typename Target>
-class LLVM_LIBRARY_VISIBILITY PSPTargetInfo : public OSTargetInfo<Target> {
-protected:
- void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
- MacroBuilder &Builder) const override {
- // PSP defines; list based on the output of the pspdev gcc toolchain.
- Builder.defineMacro("PSP");
- Builder.defineMacro("_PSP");
- Builder.defineMacro("__psp__");
- Builder.defineMacro("__ELF__");
- }
-
-public:
- PSPTargetInfo(const llvm::Triple &Triple) : OSTargetInfo<Target>(Triple) {}
-};
-
// PS3 PPU Target
template <typename Target>
class LLVM_LIBRARY_VISIBILITY PS3PPUTargetInfo : public OSTargetInfo<Target> {
@@ -520,10 +454,8 @@ protected:
void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
MacroBuilder &Builder) const override {
// PS3 PPU defines.
- Builder.defineMacro("__PPC__");
Builder.defineMacro("__PPU__");
Builder.defineMacro("__CELLOS_LV2__");
- Builder.defineMacro("__ELF__");
Builder.defineMacro("__LP32__");
Builder.defineMacro("_ARCH_PPC64");
Builder.defineMacro("__powerpc64__");
@@ -537,12 +469,13 @@ public:
this->IntMaxType = TargetInfo::SignedLongLong;
this->Int64Type = TargetInfo::SignedLongLong;
this->SizeType = TargetInfo::UnsignedInt;
- this->resetDataLayout("E-m:e-p:32:32-i64:64-n32:64");
+ this->resetDataLayout("E-m:e-p:32:32-Fi64-i64:64-n32:64");
}
};
+// Common base class for PS4/PS5 targets.
template <typename Target>
-class LLVM_LIBRARY_VISIBILITY PS4OSTargetInfo : public OSTargetInfo<Target> {
+class LLVM_LIBRARY_VISIBILITY PSOSTargetInfo : public OSTargetInfo<Target> {
protected:
void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
MacroBuilder &Builder) const override {
@@ -550,35 +483,69 @@ protected:
Builder.defineMacro("__FreeBSD_cc_version", "900001");
Builder.defineMacro("__KPRINTF_ATTRIBUTE__");
DefineStd(Builder, "unix", Opts);
- Builder.defineMacro("__ELF__");
Builder.defineMacro("__SCE__");
- Builder.defineMacro("__ORBIS__");
+ Builder.defineMacro("__STDC_NO_COMPLEX__");
+ Builder.defineMacro("__STDC_NO_THREADS__");
}
public:
- PS4OSTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
+ PSOSTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
: OSTargetInfo<Target>(Triple, Opts) {
this->WCharType = TargetInfo::UnsignedShort;
- // On PS4, TLS variable cannot be aligned to more than 32 bytes (256 bits).
+ // On PS4/PS5, TLS variable cannot be aligned to more than 32 bytes (256
+ // bits).
this->MaxTLSAlign = 256;
- // On PS4, do not honor explicit bit field alignment,
+ // On PS4/PS5, do not honor explicit bit field alignment,
// as in "__attribute__((aligned(2))) int b : 1;".
this->UseExplicitBitFieldAlignment = false;
- switch (Triple.getArch()) {
- default:
- case llvm::Triple::x86_64:
- this->MCountName = ".mcount";
- this->NewAlign = 256;
- break;
- }
+ this->MCountName = ".mcount";
+ this->NewAlign = 256;
+ this->SuitableAlign = 256;
}
+
TargetInfo::CallingConvCheckResult
checkCallingConvention(CallingConv CC) const override {
return (CC == CC_C) ? TargetInfo::CCCR_OK : TargetInfo::CCCR_Error;
}
+
+ bool areDefaultedSMFStillPOD(const LangOptions &) const override {
+ return false;
+ }
+};
+
+// PS4 Target
+template <typename Target>
+class LLVM_LIBRARY_VISIBILITY PS4OSTargetInfo : public PSOSTargetInfo<Target> {
+protected:
+ void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const override {
+ // Start with base class defines.
+ PSOSTargetInfo<Target>::getOSDefines(Opts, Triple, Builder);
+
+ Builder.defineMacro("__ORBIS__");
+ }
+
+public:
+ using PSOSTargetInfo<Target>::PSOSTargetInfo;
+};
+
+// PS5 Target
+template <typename Target>
+class LLVM_LIBRARY_VISIBILITY PS5OSTargetInfo : public PSOSTargetInfo<Target> {
+protected:
+ void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const override {
+ // Start with base class defines.
+ PSOSTargetInfo<Target>::getOSDefines(Opts, Triple, Builder);
+
+ Builder.defineMacro("__PROSPERO__");
+ }
+
+public:
+ using PSOSTargetInfo<Target>::PSOSTargetInfo;
};
// RTEMS Target
@@ -590,7 +557,6 @@ protected:
// RTEMS defines; list based off of gcc output
Builder.defineMacro("__rtems__");
- Builder.defineMacro("__ELF__");
if (Opts.CPlusPlus)
Builder.defineMacro("_GNU_SOURCE");
}
@@ -625,7 +591,6 @@ protected:
MacroBuilder &Builder) const override {
DefineStd(Builder, "sun", Opts);
DefineStd(Builder, "unix", Opts);
- Builder.defineMacro("__ELF__");
Builder.defineMacro("__svr4__");
Builder.defineMacro("__SVR4");
// Solaris headers require _XOPEN_SOURCE to be set to 600 for C99 and
@@ -678,9 +643,11 @@ protected:
DefineStd(Builder, "unix", Opts);
Builder.defineMacro("_IBMR2");
Builder.defineMacro("_POWER");
+ Builder.defineMacro("__THW_BIG_ENDIAN__");
Builder.defineMacro("_AIX");
Builder.defineMacro("__TOS_AIX__");
+ Builder.defineMacro("__HOS_AIX__");
if (Opts.C11) {
Builder.defineMacro("__STDC_NO_ATOMICS__");
@@ -690,23 +657,32 @@ protected:
if (Opts.EnableAIXExtendedAltivecABI)
Builder.defineMacro("__EXTABI__");
- unsigned Major, Minor, Micro;
- Triple.getOSVersion(Major, Minor, Micro);
+ VersionTuple OsVersion = Triple.getOSVersion();
// Define AIX OS-Version Macros.
// Includes logic for legacy versions of AIX; no specific intent to support.
- std::pair<int, int> OsVersion = {Major, Minor};
- if (OsVersion >= std::make_pair(3, 2)) Builder.defineMacro("_AIX32");
- if (OsVersion >= std::make_pair(4, 1)) Builder.defineMacro("_AIX41");
- if (OsVersion >= std::make_pair(4, 3)) Builder.defineMacro("_AIX43");
- if (OsVersion >= std::make_pair(5, 0)) Builder.defineMacro("_AIX50");
- if (OsVersion >= std::make_pair(5, 1)) Builder.defineMacro("_AIX51");
- if (OsVersion >= std::make_pair(5, 2)) Builder.defineMacro("_AIX52");
- if (OsVersion >= std::make_pair(5, 3)) Builder.defineMacro("_AIX53");
- if (OsVersion >= std::make_pair(6, 1)) Builder.defineMacro("_AIX61");
- if (OsVersion >= std::make_pair(7, 1)) Builder.defineMacro("_AIX71");
- if (OsVersion >= std::make_pair(7, 2)) Builder.defineMacro("_AIX72");
- if (OsVersion >= std::make_pair(7, 3)) Builder.defineMacro("_AIX73");
+ if (OsVersion >= VersionTuple(3, 2))
+ Builder.defineMacro("_AIX32");
+ if (OsVersion >= VersionTuple(4, 1))
+ Builder.defineMacro("_AIX41");
+ if (OsVersion >= VersionTuple(4, 3))
+ Builder.defineMacro("_AIX43");
+ if (OsVersion >= VersionTuple(5, 0))
+ Builder.defineMacro("_AIX50");
+ if (OsVersion >= VersionTuple(5, 1))
+ Builder.defineMacro("_AIX51");
+ if (OsVersion >= VersionTuple(5, 2))
+ Builder.defineMacro("_AIX52");
+ if (OsVersion >= VersionTuple(5, 3))
+ Builder.defineMacro("_AIX53");
+ if (OsVersion >= VersionTuple(6, 1))
+ Builder.defineMacro("_AIX61");
+ if (OsVersion >= VersionTuple(7, 1))
+ Builder.defineMacro("_AIX71");
+ if (OsVersion >= VersionTuple(7, 2))
+ Builder.defineMacro("_AIX72");
+ if (OsVersion >= VersionTuple(7, 3))
+ Builder.defineMacro("_AIX73");
// FIXME: Do not define _LONG_LONG when -fno-long-long is specified.
Builder.defineMacro("_LONG_LONG");
@@ -729,6 +705,7 @@ protected:
public:
AIXTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
: OSTargetInfo<Target>(Triple, Opts) {
+ this->MCountName = "__mcount";
this->TheCXXABI.set(TargetCXXABI::XL);
if (this->PointerWidth == 64) {
@@ -740,10 +717,15 @@ public:
}
// AIX sets FLT_EVAL_METHOD to be 1.
- unsigned getFloatEvalMethod() const override { return 1; }
- bool hasInt128Type() const override { return false; }
+ LangOptions::FPEvalMethodKind getFPEvalMethod() const override {
+ return LangOptions::FPEvalMethodKind::FEM_Double;
+ }
bool defaultsToAIXPowerAlignment() const override { return true; }
+
+ bool areDefaultedSMFStillPOD(const LangOptions &) const override {
+ return false;
+ }
};
// z/OS target
@@ -754,13 +736,11 @@ protected:
MacroBuilder &Builder) const override {
// FIXME: _LONG_LONG should not be defined under -std=c89.
Builder.defineMacro("_LONG_LONG");
- Builder.defineMacro("_OPEN_DEFAULT");
- // _UNIX03_WITHDRAWN is required to build libcxx.
- Builder.defineMacro("_UNIX03_WITHDRAWN");
Builder.defineMacro("__370__");
Builder.defineMacro("__BFP__");
// FIXME: __BOOL__ should not be defined under -std=c89.
Builder.defineMacro("__BOOL__");
+ Builder.defineMacro("__COMPILER_VER__", "0x50000000");
Builder.defineMacro("__LONGNAME__");
Builder.defineMacro("__MVS__");
Builder.defineMacro("__THW_370__");
@@ -772,17 +752,6 @@ protected:
if (this->PointerWidth == 64)
Builder.defineMacro("__64BIT__");
- if (Opts.CPlusPlus) {
- Builder.defineMacro("__DLL__");
- // _XOPEN_SOURCE=600 is required to build libcxx.
- Builder.defineMacro("_XOPEN_SOURCE", "600");
- }
-
- if (Opts.GNUMode) {
- Builder.defineMacro("_MI_BUILTIN");
- Builder.defineMacro("_EXT");
- }
-
if (Opts.CPlusPlus && Opts.WChar) {
// Macro __wchar_t is defined so that the wchar_t data
// type is not declared as a typedef in system headers.
@@ -801,7 +770,11 @@ public:
this->UseZeroLengthBitfieldAlignment = true;
this->UseLeadingZeroLengthBitfield = false;
this->ZeroLengthBitfieldBoundary = 32;
- this->DefaultAlignForAttributeAligned = 128;
+ this->TheCXXABI.set(TargetCXXABI::XL);
+ }
+
+ bool areDefaultedSMFStillPOD(const LangOptions &) const override {
+ return false;
}
};
@@ -836,7 +809,6 @@ protected:
Builder.defineMacro("_GNU_SOURCE");
DefineStd(Builder, "unix", Opts);
- Builder.defineMacro("__ELF__");
Builder.defineMacro("__native_client__");
}
@@ -863,10 +835,10 @@ public:
// Handled in ARM's setABI().
} else if (Triple.getArch() == llvm::Triple::x86) {
this->resetDataLayout("e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-"
- "i64:64-n8:16:32-S128");
+ "i64:64-i128:128-n8:16:32-S128");
} else if (Triple.getArch() == llvm::Triple::x86_64) {
this->resetDataLayout("e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-"
- "i64:64-n8:16:32:64-S128");
+ "i64:64-i128:128-n8:16:32:64-S128");
} else if (Triple.getArch() == llvm::Triple::mipsel) {
// Handled on mips' setDataLayout.
} else {
@@ -883,12 +855,14 @@ protected:
void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
MacroBuilder &Builder) const override {
Builder.defineMacro("__Fuchsia__");
- Builder.defineMacro("__ELF__");
if (Opts.POSIXThreads)
Builder.defineMacro("_REENTRANT");
// Required by the libc++ locale support.
if (Opts.CPlusPlus)
Builder.defineMacro("_GNU_SOURCE");
+ Builder.defineMacro("__Fuchsia_API_level__", Twine(Opts.FuchsiaAPILevel));
+ this->PlatformName = "fuchsia";
+ this->PlatformMinVersion = VersionTuple(Opts.FuchsiaAPILevel);
}
public:
@@ -937,8 +911,7 @@ class LLVM_LIBRARY_VISIBILITY WASITargetInfo
}
public:
- explicit WASITargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
- : WebAssemblyOSTargetInfo<Target>(Triple, Opts) {}
+ using WebAssemblyOSTargetInfo<Target>::WebAssemblyOSTargetInfo;
};
// Emscripten target
@@ -948,6 +921,7 @@ class LLVM_LIBRARY_VISIBILITY EmscriptenTargetInfo
void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
MacroBuilder &Builder) const final {
WebAssemblyOSTargetInfo<Target>::getOSDefines(Opts, Triple, Builder);
+ DefineStd(Builder, "unix", Opts);
Builder.defineMacro("__EMSCRIPTEN__");
if (Opts.POSIXThreads)
Builder.defineMacro("__EMSCRIPTEN_PTHREADS__");
@@ -966,6 +940,66 @@ public:
}
};
+// OHOS target
+template <typename Target>
+class LLVM_LIBRARY_VISIBILITY OHOSTargetInfo : public OSTargetInfo<Target> {
+protected:
+ void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const override {
+ // Linux defines; list based off of gcc output
+ DefineStd(Builder, "unix", Opts);
+
+ // Generic OHOS target defines
+ if (Triple.isOHOSFamily()) {
+ Builder.defineMacro("__OHOS_FAMILY__", "1");
+
+ auto Version = Triple.getEnvironmentVersion();
+ this->PlatformName = "ohos";
+ this->PlatformMinVersion = Version;
+ Builder.defineMacro("__OHOS_Major__", Twine(Version.getMajor()));
+ if (auto Minor = Version.getMinor())
+ Builder.defineMacro("__OHOS_Minor__", Twine(*Minor));
+ if (auto Subminor = Version.getSubminor())
+ Builder.defineMacro("__OHOS_Micro__", Twine(*Subminor));
+ }
+
+ if (Triple.isOpenHOS())
+ Builder.defineMacro("__OHOS__");
+
+ if (Triple.isOSLinux()) {
+ DefineStd(Builder, "linux", Opts);
+ } else if (Triple.isOSLiteOS()) {
+ Builder.defineMacro("__LITEOS__");
+ }
+
+ if (Opts.POSIXThreads)
+ Builder.defineMacro("_REENTRANT");
+ if (Opts.CPlusPlus)
+ Builder.defineMacro("_GNU_SOURCE");
+ if (this->HasFloat128)
+ Builder.defineMacro("__FLOAT128__");
+ }
+
+public:
+ OHOSTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
+ : OSTargetInfo<Target>(Triple, Opts) {
+ this->WIntType = TargetInfo::UnsignedInt;
+
+ switch (Triple.getArch()) {
+ default:
+ break;
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ this->HasFloat128 = true;
+ break;
+ }
+ }
+
+ const char *getStaticInitSectionSpecifier() const override {
+ return ".text.startup";
+ }
+};
+
} // namespace targets
} // namespace clang
#endif // LLVM_CLANG_LIB_BASIC_TARGETS_OSTARGETS_H
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/PNaCl.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/PNaCl.cpp
index 60e9467193a8..51b6452b0c20 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/PNaCl.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/PNaCl.cpp
@@ -16,10 +16,12 @@
using namespace clang;
using namespace clang::targets;
-ArrayRef<const char *> PNaClTargetInfo::getGCCRegNames() const { return None; }
+ArrayRef<const char *> PNaClTargetInfo::getGCCRegNames() const {
+ return std::nullopt;
+}
ArrayRef<TargetInfo::GCCRegAlias> PNaClTargetInfo::getGCCRegAliases() const {
- return None;
+ return std::nullopt;
}
void PNaClTargetInfo::getArchDefines(const LangOptions &Opts,
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/PNaCl.h b/contrib/llvm-project/clang/lib/Basic/Targets/PNaCl.h
index d5bfc369583f..595c4d83b1d1 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/PNaCl.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/PNaCl.h
@@ -16,8 +16,8 @@
#include "Mips.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/TargetParser/Triple.h"
namespace clang {
namespace targets {
@@ -52,7 +52,9 @@ public:
return Feature == "pnacl";
}
- ArrayRef<Builtin::Info> getTargetBuiltins() const override { return None; }
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override {
+ return std::nullopt;
+ }
BuiltinVaListKind getBuiltinVaListKind() const override {
return TargetInfo::PNaClABIBuiltinVaList;
@@ -67,9 +69,9 @@ public:
return false;
}
- const char *getClobbers() const override { return ""; }
+ std::string_view getClobbers() const override { return ""; }
- bool hasExtIntType() const override { return true; }
+ bool hasBitIntType() const override { return true; }
};
// We attempt to use PNaCl (le32) frontend and Mips32EL backend.
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/PPC.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/PPC.cpp
index ecfbe284fb2e..41935abfb65d 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/PPC.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/PPC.cpp
@@ -18,11 +18,13 @@
using namespace clang;
using namespace clang::targets;
-const Builtin::Info PPCTargetInfo::BuiltinInfo[] = {
+static constexpr Builtin::Info BuiltinInfo[] = {
#define BUILTIN(ID, TYPE, ATTRS) \
- {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
+ {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
+#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
+ {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) \
- {#ID, TYPE, ATTRS, HEADER, ALL_LANGUAGES, nullptr},
+ {#ID, TYPE, ATTRS, nullptr, HeaderDesc::HEADER, ALL_LANGUAGES},
#include "clang/Basic/BuiltinsPPC.def"
};
@@ -36,6 +38,8 @@ bool PPCTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasAltivec = true;
} else if (Feature == "+vsx") {
HasVSX = true;
+ } else if (Feature == "+crbits") {
+ UseCRBits = true;
} else if (Feature == "+bpermd") {
HasBPERMD = true;
} else if (Feature == "+extdiv") {
@@ -49,7 +53,7 @@ bool PPCTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
} else if (Feature == "+htm") {
HasHTM = true;
} else if (Feature == "+float128") {
- HasFloat128 = true;
+ HasFloat128 = !getTriple().isOSAIX();
} else if (Feature == "+power9-vector") {
HasP9Vector = true;
} else if (Feature == "+power10-vector") {
@@ -73,12 +77,18 @@ bool PPCTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasROPProtect = true;
} else if (Feature == "+privileged") {
HasPrivileged = true;
+ } else if (Feature == "+aix-small-local-exec-tls") {
+ HasAIXSmallLocalExecTLS = true;
+ } else if (Feature == "+isa-v206-instructions") {
+ IsISA2_06 = true;
} else if (Feature == "+isa-v207-instructions") {
IsISA2_07 = true;
} else if (Feature == "+isa-v30-instructions") {
IsISA3_0 = true;
} else if (Feature == "+isa-v31-instructions") {
IsISA3_1 = true;
+ } else if (Feature == "+quadword-atomics") {
+ HasQuadwordAtomics = true;
}
// TODO: Finish this list and add an assert that we've handled them
// all.
@@ -202,8 +212,10 @@ static void defineXLCompatMacros(MacroBuilder &Builder) {
Builder.defineMacro("__darn_32", "__builtin_darn_32");
Builder.defineMacro("__darn_raw", "__builtin_darn_raw");
Builder.defineMacro("__dcbf", "__builtin_dcbf");
+ Builder.defineMacro("__fence", "__builtin_ppc_fence");
Builder.defineMacro("__fmadd", "__builtin_fma");
Builder.defineMacro("__fmadds", "__builtin_fmaf");
+ Builder.defineMacro("__abs", "__builtin_abs");
Builder.defineMacro("__labs", "__builtin_labs");
Builder.defineMacro("__llabs", "__builtin_llabs");
Builder.defineMacro("__popcnt4", "__builtin_popcount");
@@ -236,6 +248,27 @@ static void defineXLCompatMacros(MacroBuilder &Builder) {
Builder.defineMacro("__frsqrtes", "__builtin_ppc_frsqrtes");
Builder.defineMacro("__fsqrt", "__builtin_ppc_fsqrt");
Builder.defineMacro("__fsqrts", "__builtin_ppc_fsqrts");
+ Builder.defineMacro("__addex", "__builtin_ppc_addex");
+ Builder.defineMacro("__cmplxl", "__builtin_complex");
+ Builder.defineMacro("__compare_exp_uo", "__builtin_ppc_compare_exp_uo");
+ Builder.defineMacro("__compare_exp_lt", "__builtin_ppc_compare_exp_lt");
+ Builder.defineMacro("__compare_exp_gt", "__builtin_ppc_compare_exp_gt");
+ Builder.defineMacro("__compare_exp_eq", "__builtin_ppc_compare_exp_eq");
+ Builder.defineMacro("__test_data_class", "__builtin_ppc_test_data_class");
+ Builder.defineMacro("__swdiv", "__builtin_ppc_swdiv");
+ Builder.defineMacro("__swdivs", "__builtin_ppc_swdivs");
+ Builder.defineMacro("__fnabs", "__builtin_ppc_fnabs");
+ Builder.defineMacro("__fnabss", "__builtin_ppc_fnabss");
+ Builder.defineMacro("__builtin_maxfe", "__builtin_ppc_maxfe");
+ Builder.defineMacro("__builtin_maxfl", "__builtin_ppc_maxfl");
+ Builder.defineMacro("__builtin_maxfs", "__builtin_ppc_maxfs");
+ Builder.defineMacro("__builtin_minfe", "__builtin_ppc_minfe");
+ Builder.defineMacro("__builtin_minfl", "__builtin_ppc_minfl");
+ Builder.defineMacro("__builtin_minfs", "__builtin_ppc_minfs");
+ Builder.defineMacro("__builtin_mffs", "__builtin_ppc_mffs");
+ Builder.defineMacro("__builtin_mffsl", "__builtin_ppc_mffsl");
+ Builder.defineMacro("__builtin_mtfsf", "__builtin_ppc_mtfsf");
+ Builder.defineMacro("__builtin_set_fpscr_rn", "__builtin_ppc_set_fpscr_rn");
}
/// PPCTargetInfo::getTargetDefines - Return a set of the PowerPC-specific
@@ -257,8 +290,16 @@ void PPCTargetInfo::getTargetDefines(const LangOptions &Opts,
if (PointerWidth == 64) {
Builder.defineMacro("_ARCH_PPC64");
Builder.defineMacro("__powerpc64__");
- Builder.defineMacro("__ppc64__");
Builder.defineMacro("__PPC64__");
+ } else if (getTriple().isOSAIX()) {
+ // The XL compilers on AIX define _ARCH_PPC64 for both 32 and 64-bit modes.
+ Builder.defineMacro("_ARCH_PPC64");
+ }
+ if (getTriple().isOSAIX()) {
+ Builder.defineMacro("__THW_PPC__");
+ // Define __PPC and __powerpc for AIX XL C/C++ compatibility
+ Builder.defineMacro("__PPC");
+ Builder.defineMacro("__powerpc");
}
// Target properties.
@@ -304,9 +345,8 @@ void PPCTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__LONGDOUBLE64");
}
- // Define this for elfv2 (64-bit only) or 64-bit darwin.
- if (ABI == "elfv2" ||
- (getTriple().getOS() == llvm::Triple::Darwin && PointerWidth == 64))
+ // Define this for elfv2 (64-bit only).
+ if (ABI == "elfv2")
Builder.defineMacro("__STRUCT_PARM_ALIGN__", "16");
if (ArchDefs & ArchDefineName)
@@ -370,8 +410,6 @@ void PPCTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__MMA__");
if (HasROPProtect)
Builder.defineMacro("__ROP_PROTECT__");
- if (HasPrivileged)
- Builder.defineMacro("__PRIVILEGED__");
if (HasP10Vector)
Builder.defineMacro("__POWER10_VECTOR__");
if (HasPCRelativeMemops)
@@ -419,11 +457,11 @@ static bool ppcUserFeaturesCheck(DiagnosticsEngine &Diags,
const std::vector<std::string> &FeaturesVec) {
// vsx was not explicitly turned off.
- if (llvm::find(FeaturesVec, "-vsx") == FeaturesVec.end())
+ if (!llvm::is_contained(FeaturesVec, "-vsx"))
return true;
auto FindVSXSubfeature = [&](StringRef Feature, StringRef Option) {
- if (llvm::find(FeaturesVec, Feature) != FeaturesVec.end()) {
+ if (llvm::is_contained(FeaturesVec, Feature)) {
Diags.Report(diag::err_opt_not_valid_with_opt) << Option << "-mno-vsx";
return true;
}
@@ -488,6 +526,11 @@ bool PPCTargetInfo::initFeatureMap(
.Case("pwr9", true)
.Case("pwr8", true)
.Default(false);
+ Features["crbits"] = llvm::StringSwitch<bool>(CPU)
+ .Case("ppc64le", true)
+ .Case("pwr9", true)
+ .Case("pwr8", true)
+ .Default(false);
Features["vsx"] = llvm::StringSwitch<bool>(CPU)
.Case("ppc64le", true)
.Case("pwr9", true)
@@ -505,11 +548,23 @@ bool PPCTargetInfo::initFeatureMap(
// Privileged instructions are off by default.
Features["privileged"] = false;
+ // The code generated by the -maix-small-local-exec-tls option is turned
+ // off by default.
+ Features["aix-small-local-exec-tls"] = false;
+
Features["spe"] = llvm::StringSwitch<bool>(CPU)
.Case("8548", true)
.Case("e500", true)
.Default(false);
+ Features["isa-v206-instructions"] = llvm::StringSwitch<bool>(CPU)
+ .Case("ppc64le", true)
+ .Case("pwr9", true)
+ .Case("pwr8", true)
+ .Case("pwr7", true)
+ .Case("a2", true)
+ .Default(false);
+
Features["isa-v207-instructions"] = llvm::StringSwitch<bool>(CPU)
.Case("ppc64le", true)
.Case("pwr9", true)
@@ -519,6 +574,12 @@ bool PPCTargetInfo::initFeatureMap(
Features["isa-v30-instructions"] =
llvm::StringSwitch<bool>(CPU).Case("pwr9", true).Default(false);
+ Features["quadword-atomics"] =
+ getTriple().isArch64Bit() && llvm::StringSwitch<bool>(CPU)
+ .Case("pwr9", true)
+ .Case("pwr8", true)
+ .Default(false);
+
// Power10 includes all the same features as Power9 plus any features specific
// to the Power10 core.
if (CPU == "pwr10" || CPU == "power10") {
@@ -536,33 +597,63 @@ bool PPCTargetInfo::initFeatureMap(
if (!ppcUserFeaturesCheck(Diags, FeaturesVec))
return false;
- if (!(ArchDefs & ArchDefinePwr9) && (ArchDefs & ArchDefinePpcgr) &&
- llvm::find(FeaturesVec, "+float128") != FeaturesVec.end()) {
- // We have __float128 on PPC but not power 9 and above.
+ if (!(ArchDefs & ArchDefinePwr7) && (ArchDefs & ArchDefinePpcgr) &&
+ llvm::is_contained(FeaturesVec, "+float128")) {
+ // We have __float128 on PPC but not pre-VSX targets.
Diags.Report(diag::err_opt_not_valid_with_opt) << "-mfloat128" << CPU;
return false;
}
- if (!(ArchDefs & ArchDefinePwr10) &&
- llvm::find(FeaturesVec, "+mma") != FeaturesVec.end()) {
- // We have MMA on PPC but not power 10 and above.
- Diags.Report(diag::err_opt_not_valid_with_opt) << "-mmma" << CPU;
- return false;
+ if (!(ArchDefs & ArchDefinePwr10)) {
+ if (llvm::is_contained(FeaturesVec, "+mma")) {
+ // MMA operations are not available pre-Power10.
+ Diags.Report(diag::err_opt_not_valid_with_opt) << "-mmma" << CPU;
+ return false;
+ }
+ if (llvm::is_contained(FeaturesVec, "+pcrel")) {
+ // PC-Relative instructions are not available pre-Power10,
+ // and these instructions also require prefixed instructions support.
+ Diags.Report(diag::err_opt_not_valid_without_opt)
+ << "-mpcrel"
+ << "-mcpu=pwr10 -mprefixed";
+ return false;
+ }
+ if (llvm::is_contained(FeaturesVec, "+prefixed")) {
+ // Prefixed instructions are not available pre-Power10.
+ Diags.Report(diag::err_opt_not_valid_without_opt) << "-mprefixed"
+ << "-mcpu=pwr10";
+ return false;
+ }
+ if (llvm::is_contained(FeaturesVec, "+paired-vector-memops")) {
+ // Paired vector memops are not available pre-Power10.
+ Diags.Report(diag::err_opt_not_valid_without_opt)
+ << "-mpaired-vector-memops"
+ << "-mcpu=pwr10";
+ return false;
+ }
}
if (!(ArchDefs & ArchDefinePwr8) &&
- llvm::find(FeaturesVec, "+rop-protect") != FeaturesVec.end()) {
+ llvm::is_contained(FeaturesVec, "+rop-protect")) {
// We can turn on ROP Protect on Power 8 and above.
Diags.Report(diag::err_opt_not_valid_with_opt) << "-mrop-protect" << CPU;
return false;
}
if (!(ArchDefs & ArchDefinePwr8) &&
- llvm::find(FeaturesVec, "+privileged") != FeaturesVec.end()) {
+ llvm::is_contained(FeaturesVec, "+privileged")) {
Diags.Report(diag::err_opt_not_valid_with_opt) << "-mprivileged" << CPU;
return false;
}
+ if (llvm::is_contained(FeaturesVec, "+aix-small-local-exec-tls")) {
+ if (!getTriple().isOSAIX() || !getTriple().isArch64Bit()) {
+ Diags.Report(diag::err_opt_not_valid_on_target)
+ << "-maix-small-local-exec-tls";
+ return false;
+ }
+ }
+
return TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec);
}
@@ -576,20 +667,18 @@ void PPCTargetInfo::addP10SpecificFeatures(
Features["pcrelative-memops"] = true;
Features["prefix-instrs"] = true;
Features["isa-v31-instructions"] = true;
- return;
}
// Add features specific to the "Future" CPU.
void PPCTargetInfo::addFutureSpecificFeatures(
- llvm::StringMap<bool> &Features) const {
- return;
-}
+ llvm::StringMap<bool> &Features) const {}
bool PPCTargetInfo::hasFeature(StringRef Feature) const {
return llvm::StringSwitch<bool>(Feature)
.Case("powerpc", true)
.Case("altivec", HasAltivec)
.Case("vsx", HasVSX)
+ .Case("crbits", UseCRBits)
.Case("power8-vector", HasP8Vector)
.Case("crypto", HasP8Crypto)
.Case("direct-move", HasDirectMove)
@@ -606,9 +695,12 @@ bool PPCTargetInfo::hasFeature(StringRef Feature) const {
.Case("mma", HasMMA)
.Case("rop-protect", HasROPProtect)
.Case("privileged", HasPrivileged)
+ .Case("aix-small-local-exec-tls", HasAIXSmallLocalExecTLS)
+ .Case("isa-v206-instructions", IsISA2_06)
.Case("isa-v207-instructions", IsISA2_07)
.Case("isa-v30-instructions", IsISA3_0)
.Case("isa-v31-instructions", IsISA3_1)
+ .Case("quadword-atomics", HasQuadwordAtomics)
.Default(false);
}
@@ -666,6 +758,8 @@ void PPCTargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
}
}
+// Make sure that registers are added in the correct array index which should be
+// the DWARF number for PPC registers.
const char *const PPCTargetInfo::GCCRegNames[] = {
"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8",
"r9", "r10", "r11", "r12", "r13", "r14", "r15", "r16", "r17",
@@ -683,41 +777,47 @@ const char *const PPCTargetInfo::GCCRegNames[] = {
};
ArrayRef<const char *> PPCTargetInfo::getGCCRegNames() const {
- return llvm::makeArrayRef(GCCRegNames);
+ return llvm::ArrayRef(GCCRegNames);
}
const TargetInfo::GCCRegAlias PPCTargetInfo::GCCRegAliases[] = {
// While some of these aliases do map to different registers
// they still share the same register name.
- {{"0"}, "r0"}, {{"1"}, "r1"}, {{"2"}, "r2"}, {{"3"}, "r3"},
- {{"4"}, "r4"}, {{"5"}, "r5"}, {{"6"}, "r6"}, {{"7"}, "r7"},
- {{"8"}, "r8"}, {{"9"}, "r9"}, {{"10"}, "r10"}, {{"11"}, "r11"},
- {{"12"}, "r12"}, {{"13"}, "r13"}, {{"14"}, "r14"}, {{"15"}, "r15"},
- {{"16"}, "r16"}, {{"17"}, "r17"}, {{"18"}, "r18"}, {{"19"}, "r19"},
- {{"20"}, "r20"}, {{"21"}, "r21"}, {{"22"}, "r22"}, {{"23"}, "r23"},
- {{"24"}, "r24"}, {{"25"}, "r25"}, {{"26"}, "r26"}, {{"27"}, "r27"},
- {{"28"}, "r28"}, {{"29"}, "r29"}, {{"30"}, "r30"}, {{"31"}, "r31"},
- {{"fr0"}, "f0"}, {{"fr1"}, "f1"}, {{"fr2"}, "f2"}, {{"fr3"}, "f3"},
- {{"fr4"}, "f4"}, {{"fr5"}, "f5"}, {{"fr6"}, "f6"}, {{"fr7"}, "f7"},
- {{"fr8"}, "f8"}, {{"fr9"}, "f9"}, {{"fr10"}, "f10"}, {{"fr11"}, "f11"},
- {{"fr12"}, "f12"}, {{"fr13"}, "f13"}, {{"fr14"}, "f14"}, {{"fr15"}, "f15"},
- {{"fr16"}, "f16"}, {{"fr17"}, "f17"}, {{"fr18"}, "f18"}, {{"fr19"}, "f19"},
- {{"fr20"}, "f20"}, {{"fr21"}, "f21"}, {{"fr22"}, "f22"}, {{"fr23"}, "f23"},
- {{"fr24"}, "f24"}, {{"fr25"}, "f25"}, {{"fr26"}, "f26"}, {{"fr27"}, "f27"},
- {{"fr28"}, "f28"}, {{"fr29"}, "f29"}, {{"fr30"}, "f30"}, {{"fr31"}, "f31"},
- {{"cc"}, "cr0"},
+ {{"0"}, "r0"}, {{"1", "sp"}, "r1"}, {{"2"}, "r2"},
+ {{"3"}, "r3"}, {{"4"}, "r4"}, {{"5"}, "r5"},
+ {{"6"}, "r6"}, {{"7"}, "r7"}, {{"8"}, "r8"},
+ {{"9"}, "r9"}, {{"10"}, "r10"}, {{"11"}, "r11"},
+ {{"12"}, "r12"}, {{"13"}, "r13"}, {{"14"}, "r14"},
+ {{"15"}, "r15"}, {{"16"}, "r16"}, {{"17"}, "r17"},
+ {{"18"}, "r18"}, {{"19"}, "r19"}, {{"20"}, "r20"},
+ {{"21"}, "r21"}, {{"22"}, "r22"}, {{"23"}, "r23"},
+ {{"24"}, "r24"}, {{"25"}, "r25"}, {{"26"}, "r26"},
+ {{"27"}, "r27"}, {{"28"}, "r28"}, {{"29"}, "r29"},
+ {{"30"}, "r30"}, {{"31"}, "r31"}, {{"fr0"}, "f0"},
+ {{"fr1"}, "f1"}, {{"fr2"}, "f2"}, {{"fr3"}, "f3"},
+ {{"fr4"}, "f4"}, {{"fr5"}, "f5"}, {{"fr6"}, "f6"},
+ {{"fr7"}, "f7"}, {{"fr8"}, "f8"}, {{"fr9"}, "f9"},
+ {{"fr10"}, "f10"}, {{"fr11"}, "f11"}, {{"fr12"}, "f12"},
+ {{"fr13"}, "f13"}, {{"fr14"}, "f14"}, {{"fr15"}, "f15"},
+ {{"fr16"}, "f16"}, {{"fr17"}, "f17"}, {{"fr18"}, "f18"},
+ {{"fr19"}, "f19"}, {{"fr20"}, "f20"}, {{"fr21"}, "f21"},
+ {{"fr22"}, "f22"}, {{"fr23"}, "f23"}, {{"fr24"}, "f24"},
+ {{"fr25"}, "f25"}, {{"fr26"}, "f26"}, {{"fr27"}, "f27"},
+ {{"fr28"}, "f28"}, {{"fr29"}, "f29"}, {{"fr30"}, "f30"},
+ {{"fr31"}, "f31"}, {{"cc"}, "cr0"},
};
ArrayRef<TargetInfo::GCCRegAlias> PPCTargetInfo::getGCCRegAliases() const {
- return llvm::makeArrayRef(GCCRegAliases);
+ return llvm::ArrayRef(GCCRegAliases);
}
-// PPC ELFABIv2 DWARF Definitoin "Table 2.26. Mappings of Common Registers".
+// PPC ELFABIv2 DWARF Definition "Table 2.26. Mappings of Common Registers".
// vs0 ~ vs31 is mapping to 32 - 63,
-// vs32 ~ vs63 is mapping to 77 - 108.
+// vs32 ~ vs63 is mapping to 77 - 108.
+// And this mapping applies to all OSes which run on powerpc.
const TargetInfo::AddlRegName GCCAddlRegNames[] = {
// Table of additional register names to use in user input.
- {{"vs0"}, 32}, {{"vs1"}, 33}, {{"vs2"}, 34}, {{"vs3"}, 35},
+ {{"vs0"}, 32}, {{"vs1"}, 33}, {{"vs2"}, 34}, {{"vs3"}, 35},
{{"vs4"}, 36}, {{"vs5"}, 37}, {{"vs6"}, 38}, {{"vs7"}, 39},
{{"vs8"}, 40}, {{"vs9"}, 41}, {{"vs10"}, 42}, {{"vs11"}, 43},
{{"vs12"}, 44}, {{"vs13"}, 45}, {{"vs14"}, 46}, {{"vs15"}, 47},
@@ -736,10 +836,7 @@ const TargetInfo::AddlRegName GCCAddlRegNames[] = {
};
ArrayRef<TargetInfo::AddlRegName> PPCTargetInfo::getGCCAddlRegNames() const {
- if (ABI == "elfv2")
- return llvm::makeArrayRef(GCCAddlRegNames);
- else
- return TargetInfo::getGCCAddlRegNames();
+ return llvm::ArrayRef(GCCAddlRegNames);
}
static constexpr llvm::StringLiteral ValidCPUNames[] = {
@@ -756,7 +853,7 @@ static constexpr llvm::StringLiteral ValidCPUNames[] = {
{"powerpc64le"}, {"ppc64le"}, {"future"}};
bool PPCTargetInfo::isValidCPUName(StringRef Name) const {
- return llvm::find(ValidCPUNames, Name) != std::end(ValidCPUNames);
+ return llvm::is_contained(ValidCPUNames, Name);
}
void PPCTargetInfo::fillValidCPUList(SmallVectorImpl<StringRef> &Values) const {
@@ -772,9 +869,12 @@ void PPCTargetInfo::adjust(DiagnosticsEngine &Diags, LangOptions &Opts) {
? &llvm::APFloat::IEEEquad()
: &llvm::APFloat::PPCDoubleDouble();
Opts.IEEE128 = 1;
+ if (getTriple().isOSAIX() && Opts.EnableAIXQuadwordAtomicsABI &&
+ HasQuadwordAtomics)
+ MaxAtomicInlineWidth = 128;
}
ArrayRef<Builtin::Info> PPCTargetInfo::getTargetBuiltins() const {
- return llvm::makeArrayRef(BuiltinInfo, clang::PPC::LastTSBuiltin -
- Builtin::FirstTSBuiltin);
+ return llvm::ArrayRef(BuiltinInfo,
+ clang::PPC::LastTSBuiltin - Builtin::FirstTSBuiltin);
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/PPC.h b/contrib/llvm-project/clang/lib/Basic/Targets/PPC.h
index 7c14a4eb9410..4d62673ba7fb 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/PPC.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/PPC.h
@@ -16,9 +16,9 @@
#include "OSTargets.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/TargetParser/Triple.h"
namespace clang {
namespace targets {
@@ -50,7 +50,6 @@ class LLVM_LIBRARY_VISIBILITY PPCTargetInfo : public TargetInfo {
} ArchDefineTypes;
ArchDefineTypes ArchDefs = ArchDefineNone;
- static const Builtin::Info BuiltinInfo[];
static const char *const GCCRegNames[];
static const TargetInfo::GCCRegAlias GCCRegAliases[];
std::string CPU;
@@ -61,7 +60,9 @@ class LLVM_LIBRARY_VISIBILITY PPCTargetInfo : public TargetInfo {
bool HasMMA = false;
bool HasROPProtect = false;
bool HasPrivileged = false;
+ bool HasAIXSmallLocalExecTLS = false;
bool HasVSX = false;
+ bool UseCRBits = false;
bool HasP8Vector = false;
bool HasP8Crypto = false;
bool HasDirectMove = false;
@@ -74,9 +75,11 @@ class LLVM_LIBRARY_VISIBILITY PPCTargetInfo : public TargetInfo {
bool HasP10Vector = false;
bool HasPCRelativeMemops = false;
bool HasPrefixInstrs = false;
+ bool IsISA2_06 = false;
bool IsISA2_07 = false;
bool IsISA3_0 = false;
bool IsISA3_1 = false;
+ bool HasQuadwordAtomics = false;
protected:
std::string ABI;
@@ -85,10 +88,10 @@ public:
PPCTargetInfo(const llvm::Triple &Triple, const TargetOptions &)
: TargetInfo(Triple) {
SuitableAlign = 128;
- SimdDefaultAlign = 128;
LongDoubleWidth = LongDoubleAlign = 128;
LongDoubleFormat = &llvm::APFloat::PPCDoubleDouble();
HasStrictFP = true;
+ HasIbm128 = true;
}
// Set the language option for altivec based on our value.
@@ -195,6 +198,8 @@ public:
void setFeatureEnabled(llvm::StringMap<bool> &Features, StringRef Name,
bool Enabled) const override;
+ bool supportsTargetAttributeTune() const override { return true; }
+
ArrayRef<const char *> getGCCRegNames() const override;
ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override;
@@ -212,7 +217,7 @@ public:
// Don't use floating point registers on soft float ABI.
if (FloatABI == SoftFloat)
return false;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case 'b': // Base register
Info.setAllowsRegister();
break;
@@ -291,7 +296,7 @@ public:
case 'Q': // Memory operand that is an offset from a register (it is
// usually better to use `m' or `es' in asm statements)
Info.setAllowsRegister();
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case 'Z': // Memory operand that is an indexed or indirect from a
// register (it is usually better to use `m' or `es' in
// asm statements)
@@ -328,7 +333,7 @@ public:
return R;
}
- const char *getClobbers() const override { return ""; }
+ std::string_view getClobbers() const override { return ""; }
int getEHDataRegisterNumber(unsigned RegNo) const override {
if (RegNo == 0)
return 3;
@@ -347,8 +352,9 @@ public:
: "u9__ieee128";
}
const char *getFloat128Mangling() const override { return "u9__ieee128"; }
+ const char *getIbm128Mangling() const override { return "g"; }
- bool hasExtIntType() const override { return true; }
+ bool hasBitIntType() const override { return true; }
bool isSPRegName(StringRef RegName) const override {
return RegName.equals("r1") || RegName.equals("x1");
@@ -360,11 +366,11 @@ public:
PPC32TargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
: PPCTargetInfo(Triple, Opts) {
if (Triple.isOSAIX())
- resetDataLayout("E-m:a-p:32:32-i64:64-n32");
+ resetDataLayout("E-m:a-p:32:32-Fi32-i64:64-n32");
else if (Triple.getArch() == llvm::Triple::ppcle)
- resetDataLayout("e-m:e-p:32:32-i64:64-n32");
+ resetDataLayout("e-m:e-p:32:32-Fn32-i64:64-n32");
else
- resetDataLayout("E-m:e-p:32:32-i64:64-n32");
+ resetDataLayout("E-m:e-p:32:32-Fn32-i64:64-n32");
switch (getTriple().getOS()) {
case llvm::Triple::Linux:
@@ -397,7 +403,7 @@ public:
}
BuiltinVaListKind getBuiltinVaListKind() const override {
- // This is the ELF definition, and is overridden by the Darwin sub-target
+ // This is the ELF definition
return TargetInfo::PowerABIBuiltinVaList;
}
};
@@ -411,20 +417,27 @@ public:
LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
IntMaxType = SignedLong;
Int64Type = SignedLong;
- std::string DataLayout = "";
+ std::string DataLayout;
if (Triple.isOSAIX()) {
// TODO: Set appropriate ABI for AIX platform.
- DataLayout = "E-m:a-i64:64-n32:64";
+ DataLayout = "E-m:a-Fi64-i64:64-n32:64";
LongDoubleWidth = 64;
LongDoubleAlign = DoubleAlign = 32;
LongDoubleFormat = &llvm::APFloat::IEEEdouble();
} else if ((Triple.getArch() == llvm::Triple::ppc64le)) {
- DataLayout = "e-m:e-i64:64-n32:64";
+ DataLayout = "e-m:e-Fn32-i64:64-n32:64";
ABI = "elfv2";
} else {
- DataLayout = "E-m:e-i64:64-n32:64";
- ABI = "elfv1";
+ DataLayout = "E-m:e";
+ if (Triple.isPPC64ELFv2ABI()) {
+ ABI = "elfv2";
+ DataLayout += "-Fn32";
+ } else {
+ ABI = "elfv1";
+ DataLayout += "-Fi64";
+ }
+ DataLayout += "-i64:64-n32:64";
}
if (Triple.isOSFreeBSD() || Triple.isOSOpenBSD() || Triple.isMusl()) {
@@ -436,8 +449,18 @@ public:
DataLayout += "-S128-v256:256:256-v512:512:512";
resetDataLayout(DataLayout);
- // PPC64 supports atomics up to 8 bytes.
- MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
+ // Newer PPC64 instruction sets support atomics up to 16 bytes.
+ MaxAtomicPromoteWidth = 128;
+ // Baseline PPC64 supports inlining atomics up to 8 bytes.
+ MaxAtomicInlineWidth = 64;
+ }
+
+ void setMaxAtomicWidth() override {
+ // For power8 and up, backend is able to inline 16-byte atomic lock free
+ // code.
+ // TODO: We should allow AIX to inline quadword atomics in the future.
+ if (!getTriple().isOSAIX() && hasFeature("quadword-atomics"))
+ MaxAtomicInlineWidth = 128;
}
BuiltinVaListKind getBuiltinVaListKind() const override {
@@ -465,33 +488,6 @@ public:
}
};
-class LLVM_LIBRARY_VISIBILITY DarwinPPC32TargetInfo
- : public DarwinTargetInfo<PPC32TargetInfo> {
-public:
- DarwinPPC32TargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
- : DarwinTargetInfo<PPC32TargetInfo>(Triple, Opts) {
- HasAlignMac68kSupport = true;
- BoolWidth = BoolAlign = 32; // XXX support -mone-byte-bool?
- PtrDiffType = SignedInt; // for http://llvm.org/bugs/show_bug.cgi?id=15726
- LongLongAlign = 32;
- resetDataLayout("E-m:o-p:32:32-f64:32:64-n32", "_");
- }
-
- BuiltinVaListKind getBuiltinVaListKind() const override {
- return TargetInfo::CharPtrBuiltinVaList;
- }
-};
-
-class LLVM_LIBRARY_VISIBILITY DarwinPPC64TargetInfo
- : public DarwinTargetInfo<PPC64TargetInfo> {
-public:
- DarwinPPC64TargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
- : DarwinTargetInfo<PPC64TargetInfo>(Triple, Opts) {
- HasAlignMac68kSupport = true;
- resetDataLayout("E-m:o-i64:64-n32:64", "_");
- }
-};
-
class LLVM_LIBRARY_VISIBILITY AIXPPC32TargetInfo :
public AIXTargetInfo<PPC32TargetInfo> {
public:
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.cpp
index 9705129b39d8..c71b2e9eeb6c 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.cpp
@@ -1,4 +1,4 @@
-//===--- RISCV.cpp - Implement RISCV target feature support ---------------===//
+//===--- RISCV.cpp - Implement RISC-V target feature support --------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -6,20 +6,24 @@
//
//===----------------------------------------------------------------------===//
//
-// This file implements RISCV TargetInfo objects.
+// This file implements RISC-V TargetInfo objects.
//
//===----------------------------------------------------------------------===//
#include "RISCV.h"
+#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/MacroBuilder.h"
#include "clang/Basic/TargetBuiltins.h"
#include "llvm/ADT/StringSwitch.h"
-#include "llvm/Support/TargetParser.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/RISCVTargetParser.h"
+#include <optional>
using namespace clang;
using namespace clang::targets;
ArrayRef<const char *> RISCVTargetInfo::getGCCRegNames() const {
+ // clang-format off
static const char *const GCCRegNames[] = {
// Integer registers
"x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
@@ -37,8 +41,13 @@ ArrayRef<const char *> RISCVTargetInfo::getGCCRegNames() const {
"v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
"v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15",
"v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23",
- "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"};
- return llvm::makeArrayRef(GCCRegNames);
+ "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
+
+ // CSRs
+ "fflags", "frm", "vtype", "vl", "vxsat", "vxrm"
+ };
+ // clang-format on
+ return llvm::ArrayRef(GCCRegNames);
}
ArrayRef<TargetInfo::GCCRegAlias> RISCVTargetInfo::getGCCRegAliases() const {
@@ -59,7 +68,7 @@ ArrayRef<TargetInfo::GCCRegAlias> RISCVTargetInfo::getGCCRegAliases() const {
{{"fs4"}, "f20"}, {{"fs5"}, "f21"}, {{"fs6"}, "f22"}, {{"fs7"}, "f23"},
{{"fs8"}, "f24"}, {{"fs9"}, "f25"}, {{"fs10"}, "f26"}, {{"fs11"}, "f27"},
{{"ft8"}, "f28"}, {{"ft9"}, "f29"}, {{"ft10"}, "f30"}, {{"ft11"}, "f31"}};
- return llvm::makeArrayRef(GCCRegAliases);
+ return llvm::ArrayRef(GCCRegAliases);
}
bool RISCVTargetInfo::validateAsmConstraint(
@@ -105,7 +114,7 @@ std::string RISCVTargetInfo::convertConstraint(const char *&Constraint) const {
std::string R;
switch (*Constraint) {
case 'v':
- R = std::string("v");
+ R = std::string("^") + std::string(Constraint, 2);
Constraint += 1;
break;
default:
@@ -115,13 +124,20 @@ std::string RISCVTargetInfo::convertConstraint(const char *&Constraint) const {
return R;
}
+static unsigned getVersionValue(unsigned MajorVersion, unsigned MinorVersion) {
+ return MajorVersion * 1000000 + MinorVersion * 1000;
+}
+
void RISCVTargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
- Builder.defineMacro("__ELF__");
Builder.defineMacro("__riscv");
- bool Is64Bit = getTriple().getArch() == llvm::Triple::riscv64;
+ bool Is64Bit = getTriple().isRISCV64();
Builder.defineMacro("__riscv_xlen", Is64Bit ? "64" : "32");
StringRef CodeModel = getTargetOpts().CodeModel;
+ unsigned FLen = ISAInfo->getFLen();
+ unsigned MinVLen = ISAInfo->getMinVLen();
+ unsigned MaxELen = ISAInfo->getMaxELen();
+ unsigned MaxELenFp = ISAInfo->getMaxELenFp();
if (CodeModel == "default")
CodeModel = "small";
@@ -138,21 +154,28 @@ void RISCVTargetInfo::getTargetDefines(const LangOptions &Opts,
else
Builder.defineMacro("__riscv_float_abi_soft");
- if (ABIName == "ilp32e")
+ if (ABIName == "ilp32e" || ABIName == "lp64e")
Builder.defineMacro("__riscv_abi_rve");
Builder.defineMacro("__riscv_arch_test");
- Builder.defineMacro("__riscv_i", "2000000");
- if (HasM) {
- Builder.defineMacro("__riscv_m", "2000000");
+ for (auto &Extension : ISAInfo->getExtensions()) {
+ auto ExtName = Extension.first;
+ auto ExtInfo = Extension.second;
+
+ Builder.defineMacro(Twine("__riscv_", ExtName),
+ Twine(getVersionValue(ExtInfo.Major, ExtInfo.Minor)));
+ }
+
+ if (ISAInfo->hasExtension("m") || ISAInfo->hasExtension("zmmul"))
Builder.defineMacro("__riscv_mul");
+
+ if (ISAInfo->hasExtension("m")) {
Builder.defineMacro("__riscv_div");
Builder.defineMacro("__riscv_muldiv");
}
- if (HasA) {
- Builder.defineMacro("__riscv_a", "2000000");
+ if (ISAInfo->hasExtension("a")) {
Builder.defineMacro("__riscv_atomic");
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
@@ -161,218 +184,285 @@ void RISCVTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
}
- if (HasF || HasD) {
- Builder.defineMacro("__riscv_f", "2000000");
- Builder.defineMacro("__riscv_flen", HasD ? "64" : "32");
+ if (FLen) {
+ Builder.defineMacro("__riscv_flen", Twine(FLen));
Builder.defineMacro("__riscv_fdiv");
Builder.defineMacro("__riscv_fsqrt");
}
- if (HasD)
- Builder.defineMacro("__riscv_d", "2000000");
-
- if (HasC) {
- Builder.defineMacro("__riscv_c", "2000000");
- Builder.defineMacro("__riscv_compressed");
+ if (MinVLen) {
+ Builder.defineMacro("__riscv_v_min_vlen", Twine(MinVLen));
+ Builder.defineMacro("__riscv_v_elen", Twine(MaxELen));
+ Builder.defineMacro("__riscv_v_elen_fp", Twine(MaxELenFp));
}
- if (HasB) {
- Builder.defineMacro("__riscv_b", "93000");
- Builder.defineMacro("__riscv_bitmanip");
- }
+ if (ISAInfo->hasExtension("c"))
+ Builder.defineMacro("__riscv_compressed");
- if (HasV) {
- Builder.defineMacro("__riscv_v", "10000");
+ if (ISAInfo->hasExtension("zve32x")) {
Builder.defineMacro("__riscv_vector");
+ // Currently we support the v0.12 RISC-V V intrinsics.
+ Builder.defineMacro("__riscv_v_intrinsic", Twine(getVersionValue(0, 12)));
}
- if (HasZba)
- Builder.defineMacro("__riscv_zba", "93000");
-
- if (HasZbb)
- Builder.defineMacro("__riscv_zbb", "93000");
-
- if (HasZbc)
- Builder.defineMacro("__riscv_zbc", "93000");
-
- if (HasZbe)
- Builder.defineMacro("__riscv_zbe", "93000");
+ auto VScale = getVScaleRange(Opts);
+ if (VScale && VScale->first && VScale->first == VScale->second)
+ Builder.defineMacro("__riscv_v_fixed_vlen",
+ Twine(VScale->first * llvm::RISCV::RVVBitsPerBlock));
- if (HasZbf)
- Builder.defineMacro("__riscv_zbf", "93000");
-
- if (HasZbm)
- Builder.defineMacro("__riscv_zbm", "93000");
-
- if (HasZbp)
- Builder.defineMacro("__riscv_zbp", "93000");
-
- if (HasZbproposedc)
- Builder.defineMacro("__riscv_zbproposedc", "93000");
-
- if (HasZbr)
- Builder.defineMacro("__riscv_zbr", "93000");
-
- if (HasZbs)
- Builder.defineMacro("__riscv_zbs", "93000");
-
- if (HasZbt)
- Builder.defineMacro("__riscv_zbt", "93000");
-
- if (HasZfh)
- Builder.defineMacro("__riscv_zfh", "1000");
-
- if (HasZvamo)
- Builder.defineMacro("__riscv_zvamo", "10000");
+ if (FastUnalignedAccess)
+ Builder.defineMacro("__riscv_misaligned_fast");
+ else
+ Builder.defineMacro("__riscv_misaligned_avoid");
- if (HasZvlsseg)
- Builder.defineMacro("__riscv_zvlsseg", "10000");
+ if (ISAInfo->hasExtension("e")) {
+ if (Is64Bit)
+ Builder.defineMacro("__riscv_64e");
+ else
+ Builder.defineMacro("__riscv_32e");
+ }
}
-const Builtin::Info RISCVTargetInfo::BuiltinInfo[] = {
+static constexpr Builtin::Info BuiltinInfo[] = {
+#define BUILTIN(ID, TYPE, ATTRS) \
+ {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
+#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
+ {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
+#include "clang/Basic/BuiltinsRISCVVector.def"
#define BUILTIN(ID, TYPE, ATTRS) \
- {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
+ {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
- {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, FEATURE},
+ {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
#include "clang/Basic/BuiltinsRISCV.def"
};
ArrayRef<Builtin::Info> RISCVTargetInfo::getTargetBuiltins() const {
- return llvm::makeArrayRef(BuiltinInfo, clang::RISCV::LastTSBuiltin -
- Builtin::FirstTSBuiltin);
+ return llvm::ArrayRef(BuiltinInfo,
+ clang::RISCV::LastTSBuiltin - Builtin::FirstTSBuiltin);
}
bool RISCVTargetInfo::initFeatureMap(
llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags, StringRef CPU,
const std::vector<std::string> &FeaturesVec) const {
- if (getTriple().getArch() == llvm::Triple::riscv64)
+ unsigned XLen = 32;
+
+ if (getTriple().isRISCV64()) {
Features["64bit"] = true;
+ XLen = 64;
+ } else {
+ Features["32bit"] = true;
+ }
+
+ // If a target attribute specified a full arch string, override all the ISA
+ // extension target features.
+ const auto I = llvm::find(FeaturesVec, "__RISCV_TargetAttrNeedOverride");
+ if (I != FeaturesVec.end()) {
+ std::vector<std::string> OverrideFeatures(std::next(I), FeaturesVec.end());
+
+ // Add back any non ISA extension features, e.g. +relax.
+ auto IsNonISAExtFeature = [](StringRef Feature) {
+ assert(Feature.size() > 1 && (Feature[0] == '+' || Feature[0] == '-'));
+ StringRef Ext = Feature.substr(1); // drop the +/-
+ return !llvm::RISCVISAInfo::isSupportedExtensionFeature(Ext);
+ };
+ llvm::copy_if(llvm::make_range(FeaturesVec.begin(), I),
+ std::back_inserter(OverrideFeatures), IsNonISAExtFeature);
+
+ return TargetInfo::initFeatureMap(Features, Diags, CPU, OverrideFeatures);
+ }
+
+ // Otherwise, parse the features and add any implied extensions.
+ std::vector<std::string> AllFeatures = FeaturesVec;
+ auto ParseResult = llvm::RISCVISAInfo::parseFeatures(XLen, FeaturesVec);
+ if (!ParseResult) {
+ std::string Buffer;
+ llvm::raw_string_ostream OutputErrMsg(Buffer);
+ handleAllErrors(ParseResult.takeError(), [&](llvm::StringError &ErrMsg) {
+ OutputErrMsg << ErrMsg.getMessage();
+ });
+ Diags.Report(diag::err_invalid_feature_combination) << OutputErrMsg.str();
+ return false;
+ }
- return TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec);
+ // Append all features, not just new ones, so we override any negatives.
+ llvm::append_range(AllFeatures, (*ParseResult)->toFeatures());
+ return TargetInfo::initFeatureMap(Features, Diags, CPU, AllFeatures);
+}
+
+std::optional<std::pair<unsigned, unsigned>>
+RISCVTargetInfo::getVScaleRange(const LangOptions &LangOpts) const {
+ // RISCV::RVVBitsPerBlock is 64.
+ unsigned VScaleMin = ISAInfo->getMinVLen() / llvm::RISCV::RVVBitsPerBlock;
+
+ if (LangOpts.VScaleMin || LangOpts.VScaleMax) {
+ // Treat Zvl*b as a lower bound on vscale.
+ VScaleMin = std::max(VScaleMin, LangOpts.VScaleMin);
+ unsigned VScaleMax = LangOpts.VScaleMax;
+ if (VScaleMax != 0 && VScaleMax < VScaleMin)
+ VScaleMax = VScaleMin;
+ return std::pair<unsigned, unsigned>(VScaleMin ? VScaleMin : 1, VScaleMax);
+ }
+
+ if (VScaleMin > 0) {
+ unsigned VScaleMax = ISAInfo->getMaxVLen() / llvm::RISCV::RVVBitsPerBlock;
+ return std::make_pair(VScaleMin, VScaleMax);
+ }
+
+ return std::nullopt;
}
/// Return true if has this feature, need to sync with handleTargetFeatures.
bool RISCVTargetInfo::hasFeature(StringRef Feature) const {
- bool Is64Bit = getTriple().getArch() == llvm::Triple::riscv64;
- return llvm::StringSwitch<bool>(Feature)
- .Case("riscv", true)
- .Case("riscv32", !Is64Bit)
- .Case("riscv64", Is64Bit)
- .Case("64bit", Is64Bit)
- .Case("m", HasM)
- .Case("a", HasA)
- .Case("f", HasF)
- .Case("d", HasD)
- .Case("c", HasC)
- .Case("experimental-b", HasB)
- .Case("experimental-v", HasV)
- .Case("experimental-zba", HasZba)
- .Case("experimental-zbb", HasZbb)
- .Case("experimental-zbc", HasZbc)
- .Case("experimental-zbe", HasZbe)
- .Case("experimental-zbf", HasZbf)
- .Case("experimental-zbm", HasZbm)
- .Case("experimental-zbp", HasZbp)
- .Case("experimental-zbproposedc", HasZbproposedc)
- .Case("experimental-zbr", HasZbr)
- .Case("experimental-zbs", HasZbs)
- .Case("experimental-zbt", HasZbt)
- .Case("experimental-zfh", HasZfh)
- .Case("experimental-zvamo", HasZvamo)
- .Case("experimental-zvlsseg", HasZvlsseg)
- .Default(false);
+ bool Is64Bit = getTriple().isRISCV64();
+ auto Result = llvm::StringSwitch<std::optional<bool>>(Feature)
+ .Case("riscv", true)
+ .Case("riscv32", !Is64Bit)
+ .Case("riscv64", Is64Bit)
+ .Case("32bit", !Is64Bit)
+ .Case("64bit", Is64Bit)
+ .Case("experimental", HasExperimental)
+ .Default(std::nullopt);
+ if (Result)
+ return *Result;
+
+ return ISAInfo->hasExtension(Feature);
}
/// Perform initialization based on the user configured set of features.
bool RISCVTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
DiagnosticsEngine &Diags) {
- for (const auto &Feature : Features) {
- if (Feature == "+m")
- HasM = true;
- else if (Feature == "+a")
- HasA = true;
- else if (Feature == "+f")
- HasF = true;
- else if (Feature == "+d")
- HasD = true;
- else if (Feature == "+c")
- HasC = true;
- else if (Feature == "+experimental-b")
- HasB = true;
- else if (Feature == "+experimental-v")
- HasV = true;
- else if (Feature == "+experimental-zba")
- HasZba = true;
- else if (Feature == "+experimental-zbb")
- HasZbb = true;
- else if (Feature == "+experimental-zbc")
- HasZbc = true;
- else if (Feature == "+experimental-zbe")
- HasZbe = true;
- else if (Feature == "+experimental-zbf")
- HasZbf = true;
- else if (Feature == "+experimental-zbm")
- HasZbm = true;
- else if (Feature == "+experimental-zbp")
- HasZbp = true;
- else if (Feature == "+experimental-zbproposedc")
- HasZbproposedc = true;
- else if (Feature == "+experimental-zbr")
- HasZbr = true;
- else if (Feature == "+experimental-zbs")
- HasZbs = true;
- else if (Feature == "+experimental-zbt")
- HasZbt = true;
- else if (Feature == "+experimental-zfh")
- HasZfh = true;
- else if (Feature == "+experimental-zvamo")
- HasZvamo = true;
- else if (Feature == "+experimental-zvlsseg")
- HasZvlsseg = true;
+ unsigned XLen = getTriple().isArch64Bit() ? 64 : 32;
+ auto ParseResult = llvm::RISCVISAInfo::parseFeatures(XLen, Features);
+ if (!ParseResult) {
+ std::string Buffer;
+ llvm::raw_string_ostream OutputErrMsg(Buffer);
+ handleAllErrors(ParseResult.takeError(), [&](llvm::StringError &ErrMsg) {
+ OutputErrMsg << ErrMsg.getMessage();
+ });
+ Diags.Report(diag::err_invalid_feature_combination) << OutputErrMsg.str();
+ return false;
+ } else {
+ ISAInfo = std::move(*ParseResult);
}
- return true;
-}
+ if (ABI.empty())
+ ABI = ISAInfo->computeDefaultABI().str();
-bool RISCV32TargetInfo::isValidCPUName(StringRef Name) const {
- return llvm::RISCV::checkCPUKind(llvm::RISCV::parseCPUKind(Name),
- /*Is64Bit=*/false);
-}
+ if (ISAInfo->hasExtension("zfh") || ISAInfo->hasExtension("zhinx"))
+ HasLegalHalfType = true;
-void RISCV32TargetInfo::fillValidCPUList(
- SmallVectorImpl<StringRef> &Values) const {
- llvm::RISCV::fillValidCPUArchList(Values, false);
+ FastUnalignedAccess = llvm::is_contained(Features, "+fast-unaligned-access");
+
+ if (llvm::is_contained(Features, "+experimental"))
+ HasExperimental = true;
+
+ if (ABI == "ilp32e" && ISAInfo->hasExtension("d")) {
+ Diags.Report(diag::err_invalid_feature_combination)
+ << "ILP32E cannot be used with the D ISA extension";
+ return false;
+ }
+ return true;
}
-bool RISCV32TargetInfo::isValidTuneCPUName(StringRef Name) const {
- return llvm::RISCV::checkTuneCPUKind(
- llvm::RISCV::parseTuneCPUKind(Name, false),
- /*Is64Bit=*/false);
+bool RISCVTargetInfo::isValidCPUName(StringRef Name) const {
+ bool Is64Bit = getTriple().isArch64Bit();
+ return llvm::RISCV::parseCPU(Name, Is64Bit);
}
-void RISCV32TargetInfo::fillValidTuneCPUList(
+void RISCVTargetInfo::fillValidCPUList(
SmallVectorImpl<StringRef> &Values) const {
- llvm::RISCV::fillValidTuneCPUArchList(Values, false);
+ bool Is64Bit = getTriple().isArch64Bit();
+ llvm::RISCV::fillValidCPUArchList(Values, Is64Bit);
}
-bool RISCV64TargetInfo::isValidCPUName(StringRef Name) const {
- return llvm::RISCV::checkCPUKind(llvm::RISCV::parseCPUKind(Name),
- /*Is64Bit=*/true);
+bool RISCVTargetInfo::isValidTuneCPUName(StringRef Name) const {
+ bool Is64Bit = getTriple().isArch64Bit();
+ return llvm::RISCV::parseTuneCPU(Name, Is64Bit);
}
-void RISCV64TargetInfo::fillValidCPUList(
+void RISCVTargetInfo::fillValidTuneCPUList(
SmallVectorImpl<StringRef> &Values) const {
- llvm::RISCV::fillValidCPUArchList(Values, true);
+ bool Is64Bit = getTriple().isArch64Bit();
+ llvm::RISCV::fillValidTuneCPUArchList(Values, Is64Bit);
}
-bool RISCV64TargetInfo::isValidTuneCPUName(StringRef Name) const {
- return llvm::RISCV::checkTuneCPUKind(
- llvm::RISCV::parseTuneCPUKind(Name, true),
- /*Is64Bit=*/true);
+static void handleFullArchString(StringRef FullArchStr,
+ std::vector<std::string> &Features) {
+ Features.push_back("__RISCV_TargetAttrNeedOverride");
+ auto RII = llvm::RISCVISAInfo::parseArchString(
+ FullArchStr, /* EnableExperimentalExtension */ true);
+ if (llvm::errorToBool(RII.takeError())) {
+ // Forward the invalid FullArchStr.
+ Features.push_back("+" + FullArchStr.str());
+ } else {
+ // Append a full list of features, including any negative extensions so that
+ // we override the CPU's features.
+ std::vector<std::string> FeatStrings =
+ (*RII)->toFeatures(/* AddAllExtensions */ true);
+ Features.insert(Features.end(), FeatStrings.begin(), FeatStrings.end());
+ }
}
-void RISCV64TargetInfo::fillValidTuneCPUList(
- SmallVectorImpl<StringRef> &Values) const {
- llvm::RISCV::fillValidTuneCPUArchList(Values, true);
+ParsedTargetAttr RISCVTargetInfo::parseTargetAttr(StringRef Features) const {
+ ParsedTargetAttr Ret;
+ if (Features == "default")
+ return Ret;
+ SmallVector<StringRef, 1> AttrFeatures;
+ Features.split(AttrFeatures, ";");
+ bool FoundArch = false;
+
+ for (auto &Feature : AttrFeatures) {
+ Feature = Feature.trim();
+ StringRef AttrString = Feature.split("=").second.trim();
+
+ if (Feature.starts_with("arch=")) {
+ // Override last features
+ Ret.Features.clear();
+ if (FoundArch)
+ Ret.Duplicate = "arch=";
+ FoundArch = true;
+
+ if (AttrString.starts_with("+")) {
+ // EXTENSION like arch=+v,+zbb
+ SmallVector<StringRef, 1> Exts;
+ AttrString.split(Exts, ",");
+ for (auto Ext : Exts) {
+ if (Ext.empty())
+ continue;
+
+ StringRef ExtName = Ext.substr(1);
+ std::string TargetFeature =
+ llvm::RISCVISAInfo::getTargetFeatureForExtension(ExtName);
+ if (!TargetFeature.empty())
+ Ret.Features.push_back(Ext.front() + TargetFeature);
+ else
+ Ret.Features.push_back(Ext.str());
+ }
+ } else {
+ // full-arch-string like arch=rv64gcv
+ handleFullArchString(AttrString, Ret.Features);
+ }
+ } else if (Feature.starts_with("cpu=")) {
+ if (!Ret.CPU.empty())
+ Ret.Duplicate = "cpu=";
+
+ Ret.CPU = AttrString;
+
+ if (!FoundArch) {
+ // Update Features with CPU's features
+ StringRef MarchFromCPU = llvm::RISCV::getMArchFromMcpu(Ret.CPU);
+ if (MarchFromCPU != "") {
+ Ret.Features.clear();
+ handleFullArchString(MarchFromCPU, Ret.Features);
+ }
+ }
+ } else if (Feature.starts_with("tune=")) {
+ if (!Ret.Tune.empty())
+ Ret.Duplicate = "tune=";
+
+ Ret.Tune = AttrString;
+ }
+ }
+ return Ret;
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.h b/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.h
index 7e0846581ca1..bfbdafb682c8 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.h
@@ -1,4 +1,4 @@
-//===--- RISCV.h - Declare RISCV target feature support ---------*- C++ -*-===//
+//===--- RISCV.h - Declare RISC-V target feature support --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -6,7 +6,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This file declares RISCV TargetInfo objects.
+// This file declares RISC-V TargetInfo objects.
//
//===----------------------------------------------------------------------===//
@@ -15,8 +15,10 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/Support/RISCVISAInfo.h"
+#include "llvm/TargetParser/Triple.h"
+#include <optional>
namespace clang {
namespace targets {
@@ -25,33 +27,18 @@ namespace targets {
class RISCVTargetInfo : public TargetInfo {
protected:
std::string ABI, CPU;
- bool HasM = false;
- bool HasA = false;
- bool HasF = false;
- bool HasD = false;
- bool HasC = false;
- bool HasB = false;
- bool HasV = false;
- bool HasZba = false;
- bool HasZbb = false;
- bool HasZbc = false;
- bool HasZbe = false;
- bool HasZbf = false;
- bool HasZbm = false;
- bool HasZbp = false;
- bool HasZbproposedc = false;
- bool HasZbr = false;
- bool HasZbs = false;
- bool HasZbt = false;
- bool HasZfh = false;
- bool HasZvamo = false;
- bool HasZvlsseg = false;
-
- static const Builtin::Info BuiltinInfo[];
+ std::unique_ptr<llvm::RISCVISAInfo> ISAInfo;
+
+private:
+ bool FastUnalignedAccess;
+ bool HasExperimental = false;
public:
RISCVTargetInfo(const llvm::Triple &Triple, const TargetOptions &)
: TargetInfo(Triple) {
+ BFloat16Width = 16;
+ BFloat16Align = 16;
+ BFloat16Format = &llvm::APFloat::BFloat();
LongDoubleWidth = 128;
LongDoubleAlign = 128;
LongDoubleFormat = &llvm::APFloat::IEEEquad();
@@ -61,6 +48,7 @@ public:
HasRISCVVTypes = true;
MCountName = "_mcount";
HasFloat16 = true;
+ HasStrictFP = true;
}
bool setCPU(const std::string &Name) override {
@@ -80,7 +68,12 @@ public:
return TargetInfo::VoidPtrBuiltinVaList;
}
- const char *getClobbers() const override { return ""; }
+ std::string_view getClobbers() const override { return ""; }
+
+ StringRef getConstraintRegister(StringRef Constraint,
+ StringRef Expression) const override {
+ return Expression;
+ }
ArrayRef<const char *> getGCCRegNames() const override;
@@ -105,12 +98,28 @@ public:
StringRef CPU,
const std::vector<std::string> &FeaturesVec) const override;
+ std::optional<std::pair<unsigned, unsigned>>
+ getVScaleRange(const LangOptions &LangOpts) const override;
+
bool hasFeature(StringRef Feature) const override;
bool handleTargetFeatures(std::vector<std::string> &Features,
DiagnosticsEngine &Diags) override;
- bool hasExtIntType() const override { return true; }
+ bool hasBitIntType() const override { return true; }
+
+ bool hasBFloat16Type() const override { return true; }
+
+ bool useFP16ConversionIntrinsics() const override {
+ return false;
+ }
+
+ bool isValidCPUName(StringRef Name) const override;
+ void fillValidCPUList(SmallVectorImpl<StringRef> &Values) const override;
+ bool isValidTuneCPUName(StringRef Name) const override;
+ void fillValidTuneCPUList(SmallVectorImpl<StringRef> &Values) const override;
+ bool supportsTargetAttributeTune() const override { return true; }
+ ParsedTargetAttr parseTargetAttr(StringRef Str) const override;
};
class LLVM_LIBRARY_VISIBILITY RISCV32TargetInfo : public RISCVTargetInfo {
public:
@@ -123,6 +132,12 @@ public:
}
bool setABI(const std::string &Name) override {
+ if (Name == "ilp32e") {
+ ABI = Name;
+ resetDataLayout("e-m:e-p:32:32-i64:64-n32-S32");
+ return true;
+ }
+
if (Name == "ilp32" || Name == "ilp32f" || Name == "ilp32d") {
ABI = Name;
return true;
@@ -130,15 +145,10 @@ public:
return false;
}
- bool isValidCPUName(StringRef Name) const override;
- void fillValidCPUList(SmallVectorImpl<StringRef> &Values) const override;
- bool isValidTuneCPUName(StringRef Name) const override;
- void fillValidTuneCPUList(SmallVectorImpl<StringRef> &Values) const override;
-
void setMaxAtomicWidth() override {
MaxAtomicPromoteWidth = 128;
- if (HasA)
+ if (ISAInfo->hasExtension("a"))
MaxAtomicInlineWidth = 32;
}
};
@@ -148,10 +158,16 @@ public:
: RISCVTargetInfo(Triple, Opts) {
LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
IntMaxType = Int64Type = SignedLong;
- resetDataLayout("e-m:e-p:64:64-i64:64-i128:128-n64-S128");
+ resetDataLayout("e-m:e-p:64:64-i64:64-i128:128-n32:64-S128");
}
bool setABI(const std::string &Name) override {
+ if (Name == "lp64e") {
+ ABI = Name;
+ resetDataLayout("e-m:e-p:64:64-i64:64-i128:128-n32:64-S64");
+ return true;
+ }
+
if (Name == "lp64" || Name == "lp64f" || Name == "lp64d") {
ABI = Name;
return true;
@@ -159,15 +175,10 @@ public:
return false;
}
- bool isValidCPUName(StringRef Name) const override;
- void fillValidCPUList(SmallVectorImpl<StringRef> &Values) const override;
- bool isValidTuneCPUName(StringRef Name) const override;
- void fillValidTuneCPUList(SmallVectorImpl<StringRef> &Values) const override;
-
void setMaxAtomicWidth() override {
MaxAtomicPromoteWidth = 128;
- if (HasA)
+ if (ISAInfo->hasExtension("a"))
MaxAtomicInlineWidth = 64;
}
};
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/SPIR.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/SPIR.cpp
index 9b7aab85314a..dc920177d3a9 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/SPIR.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/SPIR.cpp
@@ -1,4 +1,4 @@
-//===--- SPIR.cpp - Implement SPIR target feature support -----------------===//
+//===--- SPIR.cpp - Implement SPIR and SPIR-V target feature support ------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -6,7 +6,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This file implements SPIR TargetInfo objects.
+// This file implements SPIR and SPIR-V TargetInfo objects.
//
//===----------------------------------------------------------------------===//
@@ -32,3 +32,25 @@ void SPIR64TargetInfo::getTargetDefines(const LangOptions &Opts,
SPIRTargetInfo::getTargetDefines(Opts, Builder);
DefineStd(Builder, "SPIR64", Opts);
}
+
+void BaseSPIRVTargetInfo::getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ DefineStd(Builder, "SPIRV", Opts);
+}
+
+void SPIRVTargetInfo::getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ BaseSPIRVTargetInfo::getTargetDefines(Opts, Builder);
+}
+
+void SPIRV32TargetInfo::getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ BaseSPIRVTargetInfo::getTargetDefines(Opts, Builder);
+ DefineStd(Builder, "SPIRV32", Opts);
+}
+
+void SPIRV64TargetInfo::getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ BaseSPIRVTargetInfo::getTargetDefines(Opts, Builder);
+ DefineStd(Builder, "SPIRV64", Opts);
+}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/SPIR.h b/contrib/llvm-project/clang/lib/Basic/Targets/SPIR.h
index 50f34abd6630..fa4a3bb1c82e 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/SPIR.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/SPIR.h
@@ -1,4 +1,4 @@
-//===--- SPIR.h - Declare SPIR target feature support -----------*- C++ -*-===//
+//===--- SPIR.h - Declare SPIR and SPIR-V target feature support *- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -6,21 +6,25 @@
//
//===----------------------------------------------------------------------===//
//
-// This file declares SPIR TargetInfo objects.
+// This file declares SPIR and SPIR-V TargetInfo objects.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_BASIC_TARGETS_SPIR_H
#define LLVM_CLANG_LIB_BASIC_TARGETS_SPIR_H
+#include "Targets.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/Support/VersionTuple.h"
+#include "llvm/TargetParser/Triple.h"
+#include <optional>
namespace clang {
namespace targets {
+// Used by both the SPIR and SPIR-V targets.
static const unsigned SPIRDefIsPrivMap[] = {
0, // Default
1, // opencl_global
@@ -41,9 +45,14 @@ static const unsigned SPIRDefIsPrivMap[] = {
0, // sycl_private
0, // ptr32_sptr
0, // ptr32_uptr
- 0 // ptr64
+ 0, // ptr64
+ 0, // hlsl_groupshared
+ // Wasm address space values for this target are dummy values,
+ // as it is only enabled for Wasm targets.
+ 20, // wasm_funcref
};
+// Used by both the SPIR and SPIR-V targets.
static const unsigned SPIRDefIsGenMap[] = {
4, // Default
// OpenCL address space values for this map are dummy and they can't be used
@@ -54,9 +63,14 @@ static const unsigned SPIRDefIsGenMap[] = {
0, // opencl_generic
0, // opencl_global_device
0, // opencl_global_host
- 0, // cuda_device
- 0, // cuda_constant
- 0, // cuda_shared
+ // cuda_* address space mapping is intended for HIPSPV (HIP to SPIR-V
+ // translation). This mapping is enabled when the language mode is HIP.
+ 1, // cuda_device
+ // cuda_constant pointer can be casted to default/"flat" pointer, but in
+ // SPIR-V casts between constant and generic pointers are not allowed. For
+ // this reason cuda_constant is mapped to SPIR-V CrossWorkgroup.
+ 1, // cuda_constant
+ 3, // cuda_shared
1, // sycl_global
5, // sycl_global_device
6, // sycl_global_host
@@ -64,17 +78,22 @@ static const unsigned SPIRDefIsGenMap[] = {
0, // sycl_private
0, // ptr32_sptr
0, // ptr32_uptr
- 0 // ptr64
+ 0, // ptr64
+ 0, // hlsl_groupshared
+ // Wasm address space values for this target are dummy values,
+ // as it is only enabled for Wasm targets.
+ 20, // wasm_funcref
};
-class LLVM_LIBRARY_VISIBILITY SPIRTargetInfo : public TargetInfo {
-public:
- SPIRTargetInfo(const llvm::Triple &Triple, const TargetOptions &)
+// Base class for SPIR and SPIR-V target info.
+class LLVM_LIBRARY_VISIBILITY BaseSPIRTargetInfo : public TargetInfo {
+ std::unique_ptr<TargetInfo> HostTarget;
+
+protected:
+ BaseSPIRTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
: TargetInfo(Triple) {
- assert(getTriple().getOS() == llvm::Triple::UnknownOS &&
- "SPIR target must use unknown OS");
- assert(getTriple().getEnvironment() == llvm::Triple::UnknownEnvironment &&
- "SPIR target must use unknown environment type");
+ assert((Triple.isSPIR() || Triple.isSPIRV()) &&
+ "Invalid architecture for SPIR or SPIR-V.");
TLSSupported = false;
VLASupported = false;
LongWidth = LongAlign = 64;
@@ -85,24 +104,68 @@ public:
// Define available target features
// These must be defined in sorted order!
NoAsmVariants = true;
- }
- void getTargetDefines(const LangOptions &Opts,
- MacroBuilder &Builder) const override;
+ llvm::Triple HostTriple(Opts.HostTriple);
+ if (!HostTriple.isSPIR() && !HostTriple.isSPIRV() &&
+ HostTriple.getArch() != llvm::Triple::UnknownArch) {
+ HostTarget = AllocateTarget(llvm::Triple(Opts.HostTriple), Opts);
- bool hasFeature(StringRef Feature) const override {
- return Feature == "spir";
+ // Copy properties from host target.
+ BoolWidth = HostTarget->getBoolWidth();
+ BoolAlign = HostTarget->getBoolAlign();
+ IntWidth = HostTarget->getIntWidth();
+ IntAlign = HostTarget->getIntAlign();
+ HalfWidth = HostTarget->getHalfWidth();
+ HalfAlign = HostTarget->getHalfAlign();
+ FloatWidth = HostTarget->getFloatWidth();
+ FloatAlign = HostTarget->getFloatAlign();
+ DoubleWidth = HostTarget->getDoubleWidth();
+ DoubleAlign = HostTarget->getDoubleAlign();
+ LongWidth = HostTarget->getLongWidth();
+ LongAlign = HostTarget->getLongAlign();
+ LongLongWidth = HostTarget->getLongLongWidth();
+ LongLongAlign = HostTarget->getLongLongAlign();
+ MinGlobalAlign = HostTarget->getMinGlobalAlign(/* TypeSize = */ 0);
+ NewAlign = HostTarget->getNewAlign();
+ DefaultAlignForAttributeAligned =
+ HostTarget->getDefaultAlignForAttributeAligned();
+ IntMaxType = HostTarget->getIntMaxType();
+ WCharType = HostTarget->getWCharType();
+ WIntType = HostTarget->getWIntType();
+ Char16Type = HostTarget->getChar16Type();
+ Char32Type = HostTarget->getChar32Type();
+ Int64Type = HostTarget->getInt64Type();
+ SigAtomicType = HostTarget->getSigAtomicType();
+ ProcessIDType = HostTarget->getProcessIDType();
+
+ UseBitFieldTypeAlignment = HostTarget->useBitFieldTypeAlignment();
+ UseZeroLengthBitfieldAlignment =
+ HostTarget->useZeroLengthBitfieldAlignment();
+ UseExplicitBitFieldAlignment = HostTarget->useExplicitBitFieldAlignment();
+ ZeroLengthBitfieldBoundary = HostTarget->getZeroLengthBitfieldBoundary();
+
+ // This is a bit of a lie, but it controls __GCC_ATOMIC_XXX_LOCK_FREE, and
+ // we need those macros to be identical on host and device, because (among
+ // other things) they affect which standard library classes are defined,
+ // and we need all classes to be defined on both the host and device.
+ MaxAtomicInlineWidth = HostTarget->getMaxAtomicInlineWidth();
+ }
}
+public:
// SPIR supports the half type and the only llvm intrinsic allowed in SPIR is
// memcpy as per section 3 of the SPIR spec.
bool useFP16ConversionIntrinsics() const override { return false; }
- ArrayRef<Builtin::Info> getTargetBuiltins() const override { return None; }
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override {
+ return std::nullopt;
+ }
- const char *getClobbers() const override { return ""; }
+ std::string_view getClobbers() const override { return ""; }
- ArrayRef<const char *> getGCCRegNames() const override { return None; }
+ ArrayRef<const char *> getGCCRegNames() const override {
+ return std::nullopt;
+ }
bool validateAsmConstraint(const char *&Name,
TargetInfo::ConstraintInfo &info) const override {
@@ -110,14 +173,14 @@ public:
}
ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
- return None;
+ return std::nullopt;
}
BuiltinVaListKind getBuiltinVaListKind() const override {
return TargetInfo::VoidPtrBuiltinVaList;
}
- Optional<unsigned>
+ std::optional<unsigned>
getDWARFAddressSpace(unsigned AddressSpace) const override {
return AddressSpace;
}
@@ -140,28 +203,56 @@ public:
// FIXME: SYCL specification considers unannotated pointers and references
// to be pointing to the generic address space. See section 5.9.3 of
// SYCL 2020 specification.
- // Currently, there is no way of representing SYCL's default address space
- // language semantic along with the semantics of embedded C's default
- // address space in the same address space map. Hence the map needs to be
- // reset to allow mapping to the desired value of 'Default' entry for SYCL.
- setAddressSpaceMap(/*DefaultIsGeneric=*/Opts.SYCLIsDevice);
+ // Currently, there is no way of representing SYCL's and HIP/CUDA's default
+ // address space language semantic along with the semantics of embedded C's
+ // default address space in the same address space map. Hence the map needs
+ // to be reset to allow mapping to the desired value of 'Default' entry for
+ // SYCL and HIP/CUDA.
+ setAddressSpaceMap(
+ /*DefaultIsGeneric=*/Opts.SYCLIsDevice ||
+ // The address mapping from HIP/CUDA language for device code is only
+ // defined for SPIR-V.
+ (getTriple().isSPIRV() && Opts.CUDAIsDevice));
}
void setSupportedOpenCLOpts() override {
// Assume all OpenCL extensions and optional core features are supported
- // for SPIR since it is a generic target.
+ // for SPIR and SPIR-V since they are generic targets.
supportAllOpenCLOpts();
}
- bool hasExtIntType() const override { return true; }
+ bool hasBitIntType() const override { return true; }
bool hasInt128Type() const override { return false; }
};
+class LLVM_LIBRARY_VISIBILITY SPIRTargetInfo : public BaseSPIRTargetInfo {
+public:
+ SPIRTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
+ : BaseSPIRTargetInfo(Triple, Opts) {
+ assert(Triple.isSPIR() && "Invalid architecture for SPIR.");
+ assert(getTriple().getOS() == llvm::Triple::UnknownOS &&
+ "SPIR target must use unknown OS");
+ assert(getTriple().getEnvironment() == llvm::Triple::UnknownEnvironment &&
+ "SPIR target must use unknown environment type");
+ }
+
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override;
+
+ bool hasFeature(StringRef Feature) const override {
+ return Feature == "spir";
+ }
+
+ bool checkArithmeticFenceSupported() const override { return true; }
+};
+
class LLVM_LIBRARY_VISIBILITY SPIR32TargetInfo : public SPIRTargetInfo {
public:
SPIR32TargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
: SPIRTargetInfo(Triple, Opts) {
+ assert(Triple.getArch() == llvm::Triple::spir &&
+ "Invalid architecture for 32-bit SPIR.");
PointerWidth = PointerAlign = 32;
SizeType = TargetInfo::UnsignedInt;
PtrDiffType = IntPtrType = TargetInfo::SignedInt;
@@ -177,6 +268,8 @@ class LLVM_LIBRARY_VISIBILITY SPIR64TargetInfo : public SPIRTargetInfo {
public:
SPIR64TargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
: SPIRTargetInfo(Triple, Opts) {
+ assert(Triple.getArch() == llvm::Triple::spir64 &&
+ "Invalid architecture for 64-bit SPIR.");
PointerWidth = PointerAlign = 64;
SizeType = TargetInfo::UnsignedLong;
PtrDiffType = IntPtrType = TargetInfo::SignedLong;
@@ -187,6 +280,87 @@ public:
void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const override;
};
+
+class LLVM_LIBRARY_VISIBILITY BaseSPIRVTargetInfo : public BaseSPIRTargetInfo {
+public:
+ BaseSPIRVTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
+ : BaseSPIRTargetInfo(Triple, Opts) {
+ assert(Triple.isSPIRV() && "Invalid architecture for SPIR-V.");
+ }
+
+ bool hasFeature(StringRef Feature) const override {
+ return Feature == "spirv";
+ }
+
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY SPIRVTargetInfo : public BaseSPIRVTargetInfo {
+public:
+ SPIRVTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
+ : BaseSPIRVTargetInfo(Triple, Opts) {
+ assert(Triple.getArch() == llvm::Triple::spirv &&
+ "Invalid architecture for Logical SPIR-V.");
+ assert(Triple.getOS() == llvm::Triple::Vulkan &&
+ Triple.getVulkanVersion() != llvm::VersionTuple(0) &&
+ "Logical SPIR-V requires a valid Vulkan environment.");
+ assert(Triple.getEnvironment() >= llvm::Triple::Pixel &&
+ Triple.getEnvironment() <= llvm::Triple::Amplification &&
+ "Logical SPIR-V environment must be a valid shader stage.");
+
+ // SPIR-V IDs are represented with a single 32-bit word.
+ SizeType = TargetInfo::UnsignedInt;
+ resetDataLayout("e-i64:64-v16:16-v24:32-v32:32-v48:64-"
+ "v96:128-v192:256-v256:256-v512:512-v1024:1024");
+ }
+
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY SPIRV32TargetInfo : public BaseSPIRVTargetInfo {
+public:
+ SPIRV32TargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
+ : BaseSPIRVTargetInfo(Triple, Opts) {
+ assert(Triple.getArch() == llvm::Triple::spirv32 &&
+ "Invalid architecture for 32-bit SPIR-V.");
+ assert(getTriple().getOS() == llvm::Triple::UnknownOS &&
+ "32-bit SPIR-V target must use unknown OS");
+ assert(getTriple().getEnvironment() == llvm::Triple::UnknownEnvironment &&
+ "32-bit SPIR-V target must use unknown environment type");
+ PointerWidth = PointerAlign = 32;
+ SizeType = TargetInfo::UnsignedInt;
+ PtrDiffType = IntPtrType = TargetInfo::SignedInt;
+ resetDataLayout("e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-"
+ "v96:128-v192:256-v256:256-v512:512-v1024:1024");
+ }
+
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY SPIRV64TargetInfo : public BaseSPIRVTargetInfo {
+public:
+ SPIRV64TargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
+ : BaseSPIRVTargetInfo(Triple, Opts) {
+ assert(Triple.getArch() == llvm::Triple::spirv64 &&
+ "Invalid architecture for 64-bit SPIR-V.");
+ assert(getTriple().getOS() == llvm::Triple::UnknownOS &&
+ "64-bit SPIR-V target must use unknown OS");
+ assert(getTriple().getEnvironment() == llvm::Triple::UnknownEnvironment &&
+ "64-bit SPIR-V target must use unknown environment type");
+ PointerWidth = PointerAlign = 64;
+ SizeType = TargetInfo::UnsignedLong;
+ PtrDiffType = IntPtrType = TargetInfo::SignedLong;
+ resetDataLayout("e-i64:64-v16:16-v24:32-v32:32-v48:64-"
+ "v96:128-v192:256-v256:256-v512:512-v1024:1024");
+ }
+
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override;
+};
+
} // namespace targets
} // namespace clang
#endif // LLVM_CLANG_LIB_BASIC_TARGETS_SPIR_H
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/Sparc.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/Sparc.cpp
index 5eeb77406c34..d1a891092b0f 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/Sparc.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/Sparc.cpp
@@ -33,7 +33,7 @@ const char *const SparcTargetInfo::GCCRegNames[] = {
};
ArrayRef<const char *> SparcTargetInfo::getGCCRegNames() const {
- return llvm::makeArrayRef(GCCRegNames);
+ return llvm::ArrayRef(GCCRegNames);
}
const TargetInfo::GCCRegAlias SparcTargetInfo::GCCRegAliases[] = {
@@ -48,7 +48,7 @@ const TargetInfo::GCCRegAlias SparcTargetInfo::GCCRegAliases[] = {
};
ArrayRef<TargetInfo::GCCRegAlias> SparcTargetInfo::getGCCRegAliases() const {
- return llvm::makeArrayRef(GCCRegAliases);
+ return llvm::ArrayRef(GCCRegAliases);
}
bool SparcTargetInfo::hasFeature(StringRef Feature) const {
@@ -93,12 +93,6 @@ static constexpr SparcCPUInfo CPUInfo[] = {
{{"ma2480"}, SparcTargetInfo::CK_MYRIAD2480, SparcTargetInfo::CG_V8},
{{"ma2485"}, SparcTargetInfo::CK_MYRIAD2485, SparcTargetInfo::CG_V8},
{{"ma2x8x"}, SparcTargetInfo::CK_MYRIAD2x8x, SparcTargetInfo::CG_V8},
- // FIXME: the myriad2[.n] spellings are obsolete,
- // but a grace period is needed to allow updating dependent builds.
- {{"myriad2"}, SparcTargetInfo::CK_MYRIAD2x5x, SparcTargetInfo::CG_V8},
- {{"myriad2.1"}, SparcTargetInfo::CK_MYRIAD2100, SparcTargetInfo::CG_V8},
- {{"myriad2.2"}, SparcTargetInfo::CK_MYRIAD2x5x, SparcTargetInfo::CG_V8},
- {{"myriad2.3"}, SparcTargetInfo::CK_MYRIAD2x8x, SparcTargetInfo::CG_V8},
{{"leon2"}, SparcTargetInfo::CK_LEON2, SparcTargetInfo::CG_V8},
{{"at697e"}, SparcTargetInfo::CK_LEON2_AT697E, SparcTargetInfo::CG_V8},
{{"at697f"}, SparcTargetInfo::CK_LEON2_AT697F, SparcTargetInfo::CG_V8},
@@ -147,7 +141,7 @@ void SparcTargetInfo::getTargetDefines(const LangOptions &Opts,
void SparcV8TargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
SparcTargetInfo::getTargetDefines(Opts, Builder);
- if (getTriple().getOS() == llvm::Triple::Solaris)
+ if (getTriple().isOSSolaris())
Builder.defineMacro("__sparcv8");
else {
switch (getCPUGeneration(CPU)) {
@@ -156,78 +150,10 @@ void SparcV8TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__sparcv8__");
break;
case CG_V9:
- Builder.defineMacro("__sparcv9");
- Builder.defineMacro("__sparcv9__");
Builder.defineMacro("__sparc_v9__");
break;
}
}
- if (getTriple().getVendor() == llvm::Triple::Myriad) {
- std::string MyriadArchValue, Myriad2Value;
- Builder.defineMacro("__sparc_v8__");
- Builder.defineMacro("__leon__");
- switch (CPU) {
- case CK_MYRIAD2100:
- MyriadArchValue = "__ma2100";
- Myriad2Value = "1";
- break;
- case CK_MYRIAD2150:
- MyriadArchValue = "__ma2150";
- Myriad2Value = "2";
- break;
- case CK_MYRIAD2155:
- MyriadArchValue = "__ma2155";
- Myriad2Value = "2";
- break;
- case CK_MYRIAD2450:
- MyriadArchValue = "__ma2450";
- Myriad2Value = "2";
- break;
- case CK_MYRIAD2455:
- MyriadArchValue = "__ma2455";
- Myriad2Value = "2";
- break;
- case CK_MYRIAD2x5x:
- Myriad2Value = "2";
- break;
- case CK_MYRIAD2080:
- MyriadArchValue = "__ma2080";
- Myriad2Value = "3";
- break;
- case CK_MYRIAD2085:
- MyriadArchValue = "__ma2085";
- Myriad2Value = "3";
- break;
- case CK_MYRIAD2480:
- MyriadArchValue = "__ma2480";
- Myriad2Value = "3";
- break;
- case CK_MYRIAD2485:
- MyriadArchValue = "__ma2485";
- Myriad2Value = "3";
- break;
- case CK_MYRIAD2x8x:
- Myriad2Value = "3";
- break;
- default:
- MyriadArchValue = "__ma2100";
- Myriad2Value = "1";
- break;
- }
- if (!MyriadArchValue.empty()) {
- Builder.defineMacro(MyriadArchValue, "1");
- Builder.defineMacro(MyriadArchValue + "__", "1");
- }
- if (Myriad2Value == "2") {
- Builder.defineMacro("__ma2x5x", "1");
- Builder.defineMacro("__ma2x5x__", "1");
- } else if (Myriad2Value == "3") {
- Builder.defineMacro("__ma2x8x", "1");
- Builder.defineMacro("__ma2x8x__", "1");
- }
- Builder.defineMacro("__myriad2__", Myriad2Value);
- Builder.defineMacro("__myriad2", Myriad2Value);
- }
if (getCPUGeneration(CPU) == CG_V9) {
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
@@ -242,7 +168,7 @@ void SparcV9TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__sparcv9");
Builder.defineMacro("__arch64__");
// Solaris doesn't need these variants, but the BSDs do.
- if (getTriple().getOS() != llvm::Triple::Solaris) {
+ if (!getTriple().isOSSolaris()) {
Builder.defineMacro("__sparc64__");
Builder.defineMacro("__sparc_v9__");
Builder.defineMacro("__sparcv9__");
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/Sparc.h b/contrib/llvm-project/clang/lib/Basic/Targets/Sparc.h
index 07844abafe11..214fef88e1dc 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/Sparc.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/Sparc.h
@@ -14,8 +14,8 @@
#define LLVM_CLANG_LIB_BASIC_TARGETS_SPARC_H
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/TargetParser/Triple.h"
namespace clang {
namespace targets {
// Shared base class for SPARC v8 (32-bit) and SPARC v9 (64-bit).
@@ -39,10 +39,8 @@ public:
bool handleTargetFeatures(std::vector<std::string> &Features,
DiagnosticsEngine &Diags) override {
// Check if software floating point is enabled
- auto Feature = llvm::find(Features, "+soft-float");
- if (Feature != Features.end()) {
+ if (llvm::is_contained(Features, "+soft-float"))
SoftFloat = true;
- }
return true;
}
void getTargetDefines(const LangOptions &Opts,
@@ -50,11 +48,9 @@ public:
bool hasFeature(StringRef Feature) const override;
- bool hasSjLjLowering() const override { return true; }
-
ArrayRef<Builtin::Info> getTargetBuiltins() const override {
// FIXME: Implement!
- return None;
+ return std::nullopt;
}
BuiltinVaListKind getBuiltinVaListKind() const override {
return TargetInfo::VoidPtrBuiltinVaList;
@@ -81,7 +77,7 @@ public:
}
return false;
}
- const char *getClobbers() const override {
+ std::string_view getClobbers() const override {
// FIXME: Implement!
return "";
}
@@ -180,8 +176,7 @@ public:
void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const override;
- bool hasSjLjLowering() const override { return true; }
- bool hasExtIntType() const override { return true; }
+ bool hasBitIntType() const override { return true; }
};
// SPARCV8el is the 32-bit little-endian mode selected by Triple::sparcel.
@@ -234,7 +229,7 @@ public:
return getCPUGeneration(CPU) == CG_V9;
}
- bool hasExtIntType() const override { return true; }
+ bool hasBitIntType() const override { return true; }
};
} // namespace targets
} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/SystemZ.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/SystemZ.cpp
index e3e0da21f8d5..a9b5ca483861 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/SystemZ.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/SystemZ.cpp
@@ -20,11 +20,11 @@
using namespace clang;
using namespace clang::targets;
-const Builtin::Info SystemZTargetInfo::BuiltinInfo[] = {
+static constexpr Builtin::Info BuiltinInfo[] = {
#define BUILTIN(ID, TYPE, ATTRS) \
- {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
+ {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
- {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, FEATURE},
+ {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
#include "clang/Basic/BuiltinsSystemZ.def"
};
@@ -46,11 +46,11 @@ const TargetInfo::AddlRegName GCCAddlRegNames[] = {
};
ArrayRef<const char *> SystemZTargetInfo::getGCCRegNames() const {
- return llvm::makeArrayRef(GCCRegNames);
+ return llvm::ArrayRef(GCCRegNames);
}
ArrayRef<TargetInfo::AddlRegName> SystemZTargetInfo::getGCCAddlRegNames() const {
- return llvm::makeArrayRef(GCCAddlRegNames);
+ return llvm::ArrayRef(GCCAddlRegNames);
}
bool SystemZTargetInfo::validateAsmConstraint(
@@ -59,6 +59,17 @@ bool SystemZTargetInfo::validateAsmConstraint(
default:
return false;
+ case 'Z':
+ switch (Name[1]) {
+ default:
+ return false;
+ case 'Q': // Address with base and unsigned 12-bit displacement
+ case 'R': // Likewise, plus an index
+ case 'S': // Address with base and signed 20-bit displacement
+ case 'T': // Likewise, plus an index
+ break;
+ }
+ [[fallthrough]];
case 'a': // Address register
case 'd': // Data register (equivalent to 'r')
case 'f': // Floating-point register
@@ -93,7 +104,7 @@ static constexpr ISANameRevision ISARevisions[] = {
{{"arch11"}, 11}, {{"z13"}, 11},
{{"arch12"}, 12}, {{"z14"}, 12},
{{"arch13"}, 13}, {{"z15"}, 13},
- {{"arch14"}, 14}
+ {{"arch14"}, 14}, {{"z16"}, 14},
};
int SystemZTargetInfo::getISARevision(StringRef Name) const {
@@ -150,6 +161,6 @@ void SystemZTargetInfo::getTargetDefines(const LangOptions &Opts,
}
ArrayRef<Builtin::Info> SystemZTargetInfo::getTargetBuiltins() const {
- return llvm::makeArrayRef(BuiltinInfo, clang::SystemZ::LastTSBuiltin -
- Builtin::FirstTSBuiltin);
+ return llvm::ArrayRef(BuiltinInfo, clang::SystemZ::LastTSBuiltin -
+ Builtin::FirstTSBuiltin);
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/SystemZ.h b/contrib/llvm-project/clang/lib/Basic/Targets/SystemZ.h
index b749c3f75d18..e4ec338880f2 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/SystemZ.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/SystemZ.h
@@ -15,15 +15,14 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/TargetParser/Triple.h"
namespace clang {
namespace targets {
class LLVM_LIBRARY_VISIBILITY SystemZTargetInfo : public TargetInfo {
- static const Builtin::Info BuiltinInfo[];
static const char *const GCCRegNames[];
std::string CPU;
int ISARevision;
@@ -37,17 +36,31 @@ public:
HasTransactionalExecution(false), HasVector(false), SoftFloat(false) {
IntMaxType = SignedLong;
Int64Type = SignedLong;
- TLSSupported = true;
IntWidth = IntAlign = 32;
LongWidth = LongLongWidth = LongAlign = LongLongAlign = 64;
+ Int128Align = 64;
PointerWidth = PointerAlign = 64;
LongDoubleWidth = 128;
LongDoubleAlign = 64;
LongDoubleFormat = &llvm::APFloat::IEEEquad();
DefaultAlignForAttributeAligned = 64;
MinGlobalAlign = 16;
- resetDataLayout("E-m:e-i1:8:16-i8:8:16-i64:64-f128:64-a:8:16-n32:64");
- MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
+ if (Triple.isOSzOS()) {
+ TLSSupported = false;
+ // All vector types are default aligned on an 8-byte boundary, even if the
+ // vector facility is not available. That is different from Linux.
+ MaxVectorAlign = 64;
+ // Compared to Linux/ELF, the data layout differs only in some details:
+ // - name mangling is GOFF.
+ // - 32 bit pointers, either as default or special address space
+ resetDataLayout("E-m:l-i1:8:16-i8:8:16-i64:64-f128:64-v128:64-"
+ "a:8:16-n32:64");
+ } else {
+ TLSSupported = true;
+ resetDataLayout("E-m:e-i1:8:16-i8:8:16-i64:64-f128:64"
+ "-v128:64-a:8:16-n32:64");
+ }
+ MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 128;
HasStrictFP = true;
}
@@ -60,7 +73,7 @@ public:
ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
// No aliases.
- return None;
+ return std::nullopt;
}
ArrayRef<TargetInfo::AddlRegName> getGCCAddlRegNames() const override;
@@ -72,7 +85,31 @@ public:
bool validateAsmConstraint(const char *&Name,
TargetInfo::ConstraintInfo &info) const override;
- const char *getClobbers() const override {
+ std::string convertConstraint(const char *&Constraint) const override {
+ switch (Constraint[0]) {
+ case 'p': // Keep 'p' constraint.
+ return std::string("p");
+ case 'Z':
+ switch (Constraint[1]) {
+ case 'Q': // Address with base and unsigned 12-bit displacement
+ case 'R': // Likewise, plus an index
+ case 'S': // Address with base and signed 20-bit displacement
+ case 'T': // Likewise, plus an index
+ // "^" hints llvm that this is a 2 letter constraint.
+ // "Constraint++" is used to promote the string iterator
+ // to the next constraint.
+ return std::string("^") + std::string(Constraint++, 2);
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ return TargetInfo::convertConstraint(Constraint);
+ }
+
+ std::string_view getClobbers() const override {
// FIXME: Is this really right?
return "";
}
@@ -89,6 +126,14 @@ public:
void fillValidCPUList(SmallVectorImpl<StringRef> &Values) const override;
+ bool isValidTuneCPUName(StringRef Name) const override {
+ return isValidCPUName(Name);
+ }
+
+ void fillValidTuneCPUList(SmallVectorImpl<StringRef> &Values) const override {
+ fillValidCPUList(Values);
+ }
+
bool setCPU(const std::string &Name) override {
CPU = Name;
ISARevision = getISARevision(CPU);
@@ -128,12 +173,14 @@ public:
}
HasVector &= !SoftFloat;
- // If we use the vector ABI, vector types are 64-bit aligned.
- if (HasVector) {
+ // If we use the vector ABI, vector types are 64-bit aligned. The
+ // DataLayout string is always set to this alignment as it is not a
+ // requirement that it follows the alignment emitted by the front end. It
+ // is assumed generally that the Datalayout should reflect only the
+ // target triple and not any specific feature.
+ if (HasVector && !getTriple().isOSzOS())
MaxVectorAlign = 64;
- resetDataLayout("E-m:e-i1:8:16-i8:8:16-i64:64-f128:64"
- "-v128:64-a:8:16-n32:64");
- }
+
return true;
}
@@ -160,7 +207,7 @@ public:
const char *getLongDoubleMangling() const override { return "g"; }
- bool hasExtIntType() const override { return true; }
+ bool hasBitIntType() const override { return true; }
int getEHDataRegisterNumber(unsigned RegNo) const override {
return RegNo < 4 ? 6 + RegNo : -1;
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/TCE.h b/contrib/llvm-project/clang/lib/Basic/Targets/TCE.h
index 251b4d4b56f7..dcf684fe6dbc 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/TCE.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/TCE.h
@@ -15,8 +15,8 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/TargetParser/Triple.h"
namespace clang {
namespace targets {
@@ -50,6 +50,10 @@ static const unsigned TCEOpenCLAddrSpaceMap[] = {
0, // ptr32_sptr
0, // ptr32_uptr
0, // ptr64
+ 0, // hlsl_groupshared
+ // Wasm address space values for this target are dummy values,
+ // as it is only enabled for Wasm targets.
+ 20, // wasm_funcref
};
class LLVM_LIBRARY_VISIBILITY TCETargetInfo : public TargetInfo {
@@ -91,15 +95,19 @@ public:
bool hasFeature(StringRef Feature) const override { return Feature == "tce"; }
- ArrayRef<Builtin::Info> getTargetBuiltins() const override { return None; }
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override {
+ return std::nullopt;
+ }
- const char *getClobbers() const override { return ""; }
+ std::string_view getClobbers() const override { return ""; }
BuiltinVaListKind getBuiltinVaListKind() const override {
return TargetInfo::VoidPtrBuiltinVaList;
}
- ArrayRef<const char *> getGCCRegNames() const override { return None; }
+ ArrayRef<const char *> getGCCRegNames() const override {
+ return std::nullopt;
+ }
bool validateAsmConstraint(const char *&Name,
TargetInfo::ConstraintInfo &info) const override {
@@ -107,7 +115,7 @@ public:
}
ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
- return None;
+ return std::nullopt;
}
};
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/VE.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/VE.cpp
index 22223654e8ad..67cae8faf605 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/VE.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/VE.cpp
@@ -18,22 +18,28 @@
using namespace clang;
using namespace clang::targets;
+static constexpr Builtin::Info BuiltinInfo[] = {
+#define BUILTIN(ID, TYPE, ATTRS) \
+ {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
+#include "clang/Basic/BuiltinsVE.def"
+};
+
void VETargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
- Builder.defineMacro("_LP64", "1");
- Builder.defineMacro("unix", "1");
- Builder.defineMacro("__unix__", "1");
- Builder.defineMacro("__linux__", "1");
Builder.defineMacro("__ve", "1");
Builder.defineMacro("__ve__", "1");
- Builder.defineMacro("__STDC_HOSTED__", "1");
- Builder.defineMacro("__STDC__", "1");
Builder.defineMacro("__NEC__", "1");
// FIXME: define __FAST_MATH__ 1 if -ffast-math is enabled
// FIXME: define __OPTIMIZE__ n if -On is enabled
// FIXME: define __VECTOR__ n 1 if automatic vectorization is enabled
+
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
}
ArrayRef<Builtin::Info> VETargetInfo::getTargetBuiltins() const {
- return ArrayRef<Builtin::Info>();
+ return llvm::ArrayRef(BuiltinInfo,
+ clang::VE::LastTSBuiltin - Builtin::FirstTSBuiltin);
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/VE.h b/contrib/llvm-project/clang/lib/Basic/Targets/VE.h
index 71d6fc08d859..ea9a092cad80 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/VE.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/VE.h
@@ -15,14 +15,13 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/TargetParser/Triple.h"
namespace clang {
namespace targets {
class LLVM_LIBRARY_VISIBILITY VETargetInfo : public TargetInfo {
- static const Builtin::Info BuiltinInfo[];
public:
VETargetInfo(const llvm::Triple &Triple, const TargetOptions &)
@@ -70,7 +69,7 @@ public:
}
}
- const char *getClobbers() const override { return ""; }
+ std::string_view getClobbers() const override { return ""; }
ArrayRef<const char *> getGCCRegNames() const override {
static const char *const GCCRegNames[] = {
@@ -84,7 +83,7 @@ public:
"sx48", "sx49", "sx50", "sx51", "sx52", "sx53", "sx54", "sx55",
"sx56", "sx57", "sx58", "sx59", "sx60", "sx61", "sx62", "sx63",
};
- return llvm::makeArrayRef(GCCRegNames);
+ return llvm::ArrayRef(GCCRegNames);
}
ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
@@ -154,7 +153,7 @@ public:
{{"s62"}, "sx62"},
{{"s63"}, "sx63"},
};
- return llvm::makeArrayRef(GCCRegAliases);
+ return llvm::ArrayRef(GCCRegAliases);
}
bool validateAsmConstraint(const char *&Name,
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.cpp
index 7ef79849cb75..f1c925d90cb6 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.cpp
@@ -20,13 +20,13 @@
using namespace clang;
using namespace clang::targets;
-const Builtin::Info WebAssemblyTargetInfo::BuiltinInfo[] = {
+static constexpr Builtin::Info BuiltinInfo[] = {
#define BUILTIN(ID, TYPE, ATTRS) \
- {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
+ {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
- {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, FEATURE},
+ {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) \
- {#ID, TYPE, ATTRS, HEADER, ALL_LANGUAGES, nullptr},
+ {#ID, TYPE, ATTRS, nullptr, HeaderDesc::HEADER, ALL_LANGUAGES},
#include "clang/Basic/BuiltinsWebAssembly.def"
};
@@ -46,6 +46,7 @@ bool WebAssemblyTargetInfo::setABI(const std::string &Name) {
bool WebAssemblyTargetInfo::hasFeature(StringRef Feature) const {
return llvm::StringSwitch<bool>(Feature)
.Case("simd128", SIMDLevel >= SIMD128)
+ .Case("relaxed-simd", SIMDLevel >= RelaxedSIMD)
.Case("nontrapping-fptoint", HasNontrappingFPToInt)
.Case("sign-ext", HasSignExt)
.Case("exception-handling", HasExceptionHandling)
@@ -55,11 +56,13 @@ bool WebAssemblyTargetInfo::hasFeature(StringRef Feature) const {
.Case("multivalue", HasMultivalue)
.Case("tail-call", HasTailCall)
.Case("reference-types", HasReferenceTypes)
+ .Case("extended-const", HasExtendedConst)
+ .Case("multimemory", HasMultiMemory)
.Default(false);
}
bool WebAssemblyTargetInfo::isValidCPUName(StringRef Name) const {
- return llvm::find(ValidCPUNames, Name) != std::end(ValidCPUNames);
+ return llvm::is_contained(ValidCPUNames, Name);
}
void WebAssemblyTargetInfo::fillValidCPUList(
@@ -72,6 +75,8 @@ void WebAssemblyTargetInfo::getTargetDefines(const LangOptions &Opts,
defineCPUMacros(Builder, "wasm", /*Tuning=*/false);
if (SIMDLevel >= SIMD128)
Builder.defineMacro("__wasm_simd128__");
+ if (SIMDLevel >= RelaxedSIMD)
+ Builder.defineMacro("__wasm_relaxed_simd__");
if (HasNontrappingFPToInt)
Builder.defineMacro("__wasm_nontrapping_fptoint__");
if (HasSignExt)
@@ -90,15 +95,27 @@ void WebAssemblyTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__wasm_tail_call__");
if (HasReferenceTypes)
Builder.defineMacro("__wasm_reference_types__");
+ if (HasExtendedConst)
+ Builder.defineMacro("__wasm_extended_const__");
+ if (HasMultiMemory)
+ Builder.defineMacro("__wasm_multimemory__");
+
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
+ Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
}
void WebAssemblyTargetInfo::setSIMDLevel(llvm::StringMap<bool> &Features,
SIMDEnum Level, bool Enabled) {
if (Enabled) {
switch (Level) {
+ case RelaxedSIMD:
+ Features["relaxed-simd"] = true;
+ [[fallthrough]];
case SIMD128:
Features["simd128"] = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case NoSIMD:
break;
}
@@ -109,6 +126,9 @@ void WebAssemblyTargetInfo::setSIMDLevel(llvm::StringMap<bool> &Features,
case NoSIMD:
case SIMD128:
Features["simd128"] = false;
+ [[fallthrough]];
+ case RelaxedSIMD:
+ Features["relaxed-simd"] = false;
break;
}
}
@@ -118,6 +138,8 @@ void WebAssemblyTargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
bool Enabled) const {
if (Name == "simd128")
setSIMDLevel(Features, SIMD128, Enabled);
+ else if (Name == "relaxed-simd")
+ setSIMDLevel(Features, RelaxedSIMD, Enabled);
else
Features[Name] = Enabled;
}
@@ -132,7 +154,12 @@ bool WebAssemblyTargetInfo::initFeatureMap(
Features["atomics"] = true;
Features["mutable-globals"] = true;
Features["tail-call"] = true;
+ Features["reference-types"] = true;
+ Features["multimemory"] = true;
setSIMDLevel(Features, SIMD128, true);
+ } else if (CPU == "generic") {
+ Features["sign-ext"] = true;
+ Features["mutable-globals"] = true;
}
return TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec);
@@ -149,6 +176,14 @@ bool WebAssemblyTargetInfo::handleTargetFeatures(
SIMDLevel = std::min(SIMDLevel, SIMDEnum(SIMD128 - 1));
continue;
}
+ if (Feature == "+relaxed-simd") {
+ SIMDLevel = std::max(SIMDLevel, RelaxedSIMD);
+ continue;
+ }
+ if (Feature == "-relaxed-simd") {
+ SIMDLevel = std::min(SIMDLevel, SIMDEnum(RelaxedSIMD - 1));
+ continue;
+ }
if (Feature == "+nontrapping-fptoint") {
HasNontrappingFPToInt = true;
continue;
@@ -221,6 +256,22 @@ bool WebAssemblyTargetInfo::handleTargetFeatures(
HasReferenceTypes = false;
continue;
}
+ if (Feature == "+extended-const") {
+ HasExtendedConst = true;
+ continue;
+ }
+ if (Feature == "-extended-const") {
+ HasExtendedConst = false;
+ continue;
+ }
+ if (Feature == "+multimemory") {
+ HasMultiMemory = true;
+ continue;
+ }
+ if (Feature == "-multimemory") {
+ HasMultiMemory = false;
+ continue;
+ }
Diags.Report(diag::err_opt_not_valid_with_opt)
<< Feature << "-target-feature";
@@ -230,17 +281,20 @@ bool WebAssemblyTargetInfo::handleTargetFeatures(
}
ArrayRef<Builtin::Info> WebAssemblyTargetInfo::getTargetBuiltins() const {
- return llvm::makeArrayRef(BuiltinInfo, clang::WebAssembly::LastTSBuiltin -
- Builtin::FirstTSBuiltin);
+ return llvm::ArrayRef(BuiltinInfo, clang::WebAssembly::LastTSBuiltin -
+ Builtin::FirstTSBuiltin);
}
void WebAssemblyTargetInfo::adjust(DiagnosticsEngine &Diags,
LangOptions &Opts) {
- // If the Atomics feature isn't available, turn off POSIXThreads and
- // ThreadModel, so that we don't predefine _REENTRANT or __STDCPP_THREADS__.
- if (!HasAtomics) {
+ TargetInfo::adjust(Diags, Opts);
+ // Turn off POSIXThreads and ThreadModel so that we don't predefine _REENTRANT
+ // or __STDCPP_THREADS__ if we will eventually end up stripping atomics
+ // because they are unsupported.
+ if (!HasAtomics || !HasBulkMemory) {
Opts.POSIXThreads = false;
Opts.setThreadModel(LangOptions::ThreadModelKind::Single);
+ Opts.ThreadsafeStatics = false;
}
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.h b/contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.h
index 4a5ba25c75e7..83b1711f9fdf 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.h
@@ -15,18 +15,42 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/TargetParser/Triple.h"
namespace clang {
namespace targets {
+static const unsigned WebAssemblyAddrSpaceMap[] = {
+ 0, // Default
+ 0, // opencl_global
+ 0, // opencl_local
+ 0, // opencl_constant
+ 0, // opencl_private
+ 0, // opencl_generic
+ 0, // opencl_global_device
+ 0, // opencl_global_host
+ 0, // cuda_device
+ 0, // cuda_constant
+ 0, // cuda_shared
+ 0, // sycl_global
+ 0, // sycl_global_device
+ 0, // sycl_global_host
+ 0, // sycl_local
+ 0, // sycl_private
+ 0, // ptr32_sptr
+ 0, // ptr32_uptr
+ 0, // ptr64
+ 0, // hlsl_groupshared
+ 20, // wasm_funcref
+};
+
class LLVM_LIBRARY_VISIBILITY WebAssemblyTargetInfo : public TargetInfo {
- static const Builtin::Info BuiltinInfo[];
enum SIMDEnum {
NoSIMD,
SIMD128,
+ RelaxedSIMD,
} SIMDLevel = NoSIMD;
bool HasNontrappingFPToInt = false;
@@ -38,17 +62,19 @@ class LLVM_LIBRARY_VISIBILITY WebAssemblyTargetInfo : public TargetInfo {
bool HasMultivalue = false;
bool HasTailCall = false;
bool HasReferenceTypes = false;
+ bool HasExtendedConst = false;
+ bool HasMultiMemory = false;
std::string ABI;
public:
explicit WebAssemblyTargetInfo(const llvm::Triple &T, const TargetOptions &)
: TargetInfo(T) {
+ AddrSpaceMap = &WebAssemblyAddrSpaceMap;
NoAsmVariants = true;
SuitableAlign = 128;
LargeArrayMinWidth = 128;
LargeArrayAlign = 128;
- SimdDefaultAlign = 128;
SigAtomicType = SignedLong;
LongDoubleWidth = LongDoubleAlign = 128;
LongDoubleFormat = &llvm::APFloat::IEEEquad();
@@ -94,10 +120,10 @@ private:
return VoidPtrBuiltinVaList;
}
- ArrayRef<const char *> getGCCRegNames() const final { return None; }
+ ArrayRef<const char *> getGCCRegNames() const final { return std::nullopt; }
ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const final {
- return None;
+ return std::nullopt;
}
bool validateAsmConstraint(const char *&Name,
@@ -105,7 +131,7 @@ private:
return false;
}
- const char *getClobbers() const final { return ""; }
+ std::string_view getClobbers() const final { return ""; }
bool isCLZForZeroUndef() const final { return false; }
@@ -136,7 +162,7 @@ private:
}
}
- bool hasExtIntType() const override { return true; }
+ bool hasBitIntType() const override { return true; }
bool hasProtectedVisibility() const override { return false; }
@@ -150,9 +176,11 @@ public:
const TargetOptions &Opts)
: WebAssemblyTargetInfo(T, Opts) {
if (T.isOSEmscripten())
- resetDataLayout("e-m:e-p:32:32-i64:64-f128:64-n32:64-S128-ni:1:10:20");
+ resetDataLayout("e-m:e-p:32:32-p10:8:8-p20:8:8-i64:64-f128:64-n32:64-"
+ "S128-ni:1:10:20");
else
- resetDataLayout("e-m:e-p:32:32-i64:64-n32:64-S128-ni:1:10:20");
+ resetDataLayout(
+ "e-m:e-p:32:32-p10:8:8-p20:8:8-i64:64-n32:64-S128-ni:1:10:20");
}
protected:
@@ -172,9 +200,11 @@ public:
PtrDiffType = SignedLong;
IntPtrType = SignedLong;
if (T.isOSEmscripten())
- resetDataLayout("e-m:e-p:64:64-i64:64-f128:64-n32:64-S128-ni:1:10:20");
+ resetDataLayout("e-m:e-p:64:64-p10:8:8-p20:8:8-i64:64-f128:64-n32:64-"
+ "S128-ni:1:10:20");
else
- resetDataLayout("e-m:e-p:64:64-i64:64-n32:64-S128-ni:1:10:20");
+ resetDataLayout(
+ "e-m:e-p:64:64-p10:8:8-p20:8:8-i64:64-n32:64-S128-ni:1:10:20");
}
protected:
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/X86.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/X86.cpp
index 9db96c20250f..a68b662d9401 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/X86.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/X86.cpp
@@ -17,26 +17,27 @@
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
-#include "llvm/Support/X86TargetParser.h"
+#include "llvm/TargetParser/X86TargetParser.h"
+#include <optional>
namespace clang {
namespace targets {
-const Builtin::Info BuiltinInfoX86[] = {
+static constexpr Builtin::Info BuiltinInfoX86[] = {
#define BUILTIN(ID, TYPE, ATTRS) \
- {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
+ {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
- {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, FEATURE},
+ {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
#define TARGET_HEADER_BUILTIN(ID, TYPE, ATTRS, HEADER, LANGS, FEATURE) \
- {#ID, TYPE, ATTRS, HEADER, LANGS, FEATURE},
+ {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::HEADER, LANGS},
#include "clang/Basic/BuiltinsX86.def"
#define BUILTIN(ID, TYPE, ATTRS) \
- {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
+ {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
- {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, FEATURE},
+ {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
#define TARGET_HEADER_BUILTIN(ID, TYPE, ATTRS, HEADER, LANGS, FEATURE) \
- {#ID, TYPE, ATTRS, HEADER, LANGS, FEATURE},
+ {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::HEADER, LANGS},
#include "clang/Basic/BuiltinsX86_64.def"
};
@@ -118,6 +119,14 @@ bool X86TargetInfo::initFeatureMap(
setFeatureEnabled(Features, F, true);
std::vector<std::string> UpdatedFeaturesVec;
+ std::vector<std::string> UpdatedAVX10FeaturesVec;
+ enum { FE_NOSET = -1, FE_FALSE, FE_TRUE };
+ int HasEVEX512 = FE_NOSET;
+ bool HasAVX512F = Features.lookup("avx512f");
+ bool HasAVX10 = Features.lookup("avx10.1-256");
+ bool HasAVX10_512 = Features.lookup("avx10.1-512");
+ std::string LastAVX10;
+ std::string LastAVX512;
for (const auto &Feature : FeaturesVec) {
// Expand general-regs-only to -x86, -mmx and -sse
if (Feature == "+general-regs-only") {
@@ -127,8 +136,51 @@ bool X86TargetInfo::initFeatureMap(
continue;
}
+ if (Feature.substr(1, 6) == "avx10.") {
+ if (Feature[0] == '+') {
+ HasAVX10 = true;
+ if (Feature.substr(Feature.size() - 3, 3) == "512")
+ HasAVX10_512 = true;
+ LastAVX10 = Feature;
+ } else if (HasAVX10 && Feature == "-avx10.1-256") {
+ HasAVX10 = false;
+ HasAVX10_512 = false;
+ } else if (HasAVX10_512 && Feature == "-avx10.1-512") {
+ HasAVX10_512 = false;
+ }
+ // Postpone AVX10 features handling after AVX512 settled.
+ UpdatedAVX10FeaturesVec.push_back(Feature);
+ continue;
+ } else if (!HasAVX512F && Feature.substr(0, 7) == "+avx512") {
+ HasAVX512F = true;
+ LastAVX512 = Feature;
+ } else if (HasAVX512F && Feature == "-avx512f") {
+ HasAVX512F = false;
+ } else if (HasEVEX512 != FE_TRUE && Feature == "+evex512") {
+ HasEVEX512 = FE_TRUE;
+ continue;
+ } else if (HasEVEX512 != FE_FALSE && Feature == "-evex512") {
+ HasEVEX512 = FE_FALSE;
+ continue;
+ }
+
UpdatedFeaturesVec.push_back(Feature);
}
+ llvm::append_range(UpdatedFeaturesVec, UpdatedAVX10FeaturesVec);
+ // HasEVEX512 is a three-states flag. We need to turn it into [+-]evex512
+ // according to other features.
+ if (HasAVX512F) {
+ UpdatedFeaturesVec.push_back(HasEVEX512 == FE_FALSE ? "-evex512"
+ : "+evex512");
+ if (HasAVX10 && !HasAVX10_512 && HasEVEX512 != FE_FALSE)
+ Diags.Report(diag::warn_invalid_feature_combination)
+ << LastAVX512 + " " + LastAVX10 + "; will be promoted to avx10.1-512";
+ } else if (HasAVX10) {
+ if (HasEVEX512 != FE_NOSET)
+ Diags.Report(diag::warn_invalid_feature_combination)
+ << LastAVX10 + (HasEVEX512 == FE_TRUE ? " +evex512" : " -evex512");
+ UpdatedFeaturesVec.push_back(HasAVX10_512 ? "+evex512" : "-evex512");
+ }
if (!TargetInfo::initFeatureMap(Features, Diags, CPU, UpdatedFeaturesVec))
return false;
@@ -139,22 +191,28 @@ bool X86TargetInfo::initFeatureMap(
// Enable popcnt if sse4.2 is enabled and popcnt is not explicitly disabled.
auto I = Features.find("sse4.2");
if (I != Features.end() && I->getValue() &&
- llvm::find(UpdatedFeaturesVec, "-popcnt") == UpdatedFeaturesVec.end())
+ !llvm::is_contained(UpdatedFeaturesVec, "-popcnt"))
Features["popcnt"] = true;
// Additionally, if SSE is enabled and mmx is not explicitly disabled,
// then enable MMX.
I = Features.find("sse");
if (I != Features.end() && I->getValue() &&
- llvm::find(UpdatedFeaturesVec, "-mmx") == UpdatedFeaturesVec.end())
+ !llvm::is_contained(UpdatedFeaturesVec, "-mmx"))
Features["mmx"] = true;
// Enable xsave if avx is enabled and xsave is not explicitly disabled.
I = Features.find("avx");
if (I != Features.end() && I->getValue() &&
- llvm::find(UpdatedFeaturesVec, "-xsave") == UpdatedFeaturesVec.end())
+ !llvm::is_contained(UpdatedFeaturesVec, "-xsave"))
Features["xsave"] = true;
+ // Enable CRC32 if SSE4.2 is enabled and CRC32 is not explicitly disabled.
+ I = Features.find("sse4.2");
+ if (I != Features.end() && I->getValue() &&
+ !llvm::is_contained(UpdatedFeaturesVec, "-crc32"))
+ Features["crc32"] = true;
+
return true;
}
@@ -221,6 +279,12 @@ bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasF16C = true;
} else if (Feature == "+gfni") {
HasGFNI = true;
+ } else if (Feature == "+evex512") {
+ HasEVEX512 = true;
+ } else if (Feature == "+avx10.1-256") {
+ HasAVX10_1 = true;
+ } else if (Feature == "+avx10.1-512") {
+ HasAVX10_1_512 = true;
} else if (Feature == "+avx512cd") {
HasAVX512CD = true;
} else if (Feature == "+avx512vpopcntdq") {
@@ -231,8 +295,13 @@ bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasAVX512BF16 = true;
} else if (Feature == "+avx512er") {
HasAVX512ER = true;
+ Diags.Report(diag::warn_knl_knm_isa_support_removed);
+ } else if (Feature == "+avx512fp16") {
+ HasAVX512FP16 = true;
+ HasLegalHalfType = true;
} else if (Feature == "+avx512pf") {
HasAVX512PF = true;
+ Diags.Report(diag::warn_knl_knm_isa_support_removed);
} else if (Feature == "+avx512dq") {
HasAVX512DQ = true;
} else if (Feature == "+avx512bitalg") {
@@ -251,8 +320,14 @@ bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasAVX512VP2INTERSECT = true;
} else if (Feature == "+sha") {
HasSHA = true;
+ } else if (Feature == "+sha512") {
+ HasSHA512 = true;
} else if (Feature == "+shstk") {
HasSHSTK = true;
+ } else if (Feature == "+sm3") {
+ HasSM3 = true;
+ } else if (Feature == "+sm4") {
+ HasSM4 = true;
} else if (Feature == "+movbe") {
HasMOVBE = true;
} else if (Feature == "+sgx") {
@@ -281,14 +356,19 @@ bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasCLWB = true;
} else if (Feature == "+wbnoinvd") {
HasWBNOINVD = true;
+ } else if (Feature == "+prefetchi") {
+ HasPREFETCHI = true;
} else if (Feature == "+prefetchwt1") {
HasPREFETCHWT1 = true;
+ Diags.Report(diag::warn_knl_knm_isa_support_removed);
} else if (Feature == "+clzero") {
HasCLZERO = true;
} else if (Feature == "+cldemote") {
HasCLDEMOTE = true;
} else if (Feature == "+rdpid") {
HasRDPID = true;
+ } else if (Feature == "+rdpru") {
+ HasRDPRU = true;
} else if (Feature == "+kl") {
HasKL = true;
} else if (Feature == "+widekl") {
@@ -315,18 +395,54 @@ bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasHRESET = true;
} else if (Feature == "+amx-bf16") {
HasAMXBF16 = true;
+ } else if (Feature == "+amx-fp16") {
+ HasAMXFP16 = true;
} else if (Feature == "+amx-int8") {
HasAMXINT8 = true;
} else if (Feature == "+amx-tile") {
HasAMXTILE = true;
+ } else if (Feature == "+amx-complex") {
+ HasAMXCOMPLEX = true;
+ } else if (Feature == "+cmpccxadd") {
+ HasCMPCCXADD = true;
+ } else if (Feature == "+raoint") {
+ HasRAOINT = true;
+ } else if (Feature == "+avxifma") {
+ HasAVXIFMA = true;
+ } else if (Feature == "+avxneconvert") {
+ HasAVXNECONVERT= true;
} else if (Feature == "+avxvnni") {
HasAVXVNNI = true;
+ } else if (Feature == "+avxvnniint16") {
+ HasAVXVNNIINT16 = true;
+ } else if (Feature == "+avxvnniint8") {
+ HasAVXVNNIINT8 = true;
} else if (Feature == "+serialize") {
HasSERIALIZE = true;
} else if (Feature == "+tsxldtrk") {
HasTSXLDTRK = true;
} else if (Feature == "+uintr") {
HasUINTR = true;
+ } else if (Feature == "+usermsr") {
+ HasUSERMSR = true;
+ } else if (Feature == "+crc32") {
+ HasCRC32 = true;
+ } else if (Feature == "+x87") {
+ HasX87 = true;
+ } else if (Feature == "+fullbf16") {
+ HasFullBFloat16 = true;
+ } else if (Feature == "+egpr") {
+ HasEGPR = true;
+ } else if (Feature == "+push2pop2") {
+ HasPush2Pop2 = true;
+ } else if (Feature == "+ppx") {
+ HasPPX = true;
+ } else if (Feature == "+ndd") {
+ HasNDD = true;
+ } else if (Feature == "+ccmp") {
+ HasCCMP = true;
+ } else if (Feature == "+cf") {
+ HasCF = true;
}
X86SSEEnum Level = llvm::StringSwitch<X86SSEEnum>(Feature)
@@ -342,6 +458,19 @@ bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
.Default(NoSSE);
SSELevel = std::max(SSELevel, Level);
+ HasFloat16 = SSELevel >= SSE2;
+
+ // X86 target has bfloat16 emulation support in the backend, where
+ // bfloat16 is treated as a 32-bit float, arithmetic operations are
+ // performed in 32-bit, and the result is converted back to bfloat16.
+ // Truncation and extension between bfloat16 and 32-bit float are supported
+ // by the compiler-rt library. However, native bfloat16 support is currently
+ // not available in the X86 target. Hence, HasFullBFloat16 will be false
+ // until native bfloat16 support is available. HasFullBFloat16 is used to
+ // determine whether to automatically use excess floating point precision
+ // for bfloat16 arithmetic operations in the front-end.
+ HasBFloat16 = SSELevel >= SSE2;
+
MMX3DNowEnum ThreeDNowLevel = llvm::StringSwitch<MMX3DNowEnum>(Feature)
.Case("+3dnowa", AMD3DNowAthlon)
.Case("+3dnow", AMD3DNow)
@@ -366,8 +495,11 @@ bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
return false;
}
- SimdDefaultAlign =
- hasFeature("avx512f") ? 512 : hasFeature("avx") ? 256 : 128;
+ // FIXME: We should allow long double type on 32-bits to match with GCC.
+ // This requires backend to be able to lower f80 without x87 first.
+ if (!HasX87 && LongDoubleFormat == &llvm::APFloat::x87DoubleExtended())
+ HasLongDouble = false;
+
return true;
}
@@ -422,7 +554,7 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
case CK_PentiumMMX:
Builder.defineMacro("__pentium_mmx__");
Builder.defineMacro("__tune_pentium_mmx__");
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case CK_i586:
case CK_Pentium:
defineCPUMacros(Builder, "i586");
@@ -431,11 +563,11 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
case CK_Pentium3:
case CK_PentiumM:
Builder.defineMacro("__tune_pentium3__");
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case CK_Pentium2:
case CK_C3_2:
Builder.defineMacro("__tune_pentium2__");
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case CK_PentiumPro:
case CK_i686:
defineCPUMacros(Builder, "i686");
@@ -468,6 +600,8 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
case CK_Tremont:
defineCPUMacros(Builder, "tremont");
break;
+ // Gracemont and later atom-cores use P-core cpu macros.
+ case CK_Gracemont:
case CK_Nehalem:
case CK_Westmere:
case CK_SandyBridge:
@@ -485,6 +619,18 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
case CK_Tigerlake:
case CK_SapphireRapids:
case CK_Alderlake:
+ case CK_Raptorlake:
+ case CK_Meteorlake:
+ case CK_Arrowlake:
+ case CK_ArrowlakeS:
+ case CK_Lunarlake:
+ case CK_Pantherlake:
+ case CK_Sierraforest:
+ case CK_Grandridge:
+ case CK_Graniterapids:
+ case CK_GraniterapidsD:
+ case CK_Emeraldrapids:
+ case CK_Clearwaterforest:
// FIXME: Historically, we defined this legacy name, it would be nice to
// remove it at some point. We've never exposed fine-grained names for
// recent primary x86 CPUs, and we should keep it that way.
@@ -503,7 +649,7 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
case CK_K6_2:
Builder.defineMacro("__k6_2__");
Builder.defineMacro("__tune_k6_2__");
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case CK_K6_3:
if (CPU != CK_K6_2) { // In case of fallthrough
// FIXME: GCC may be enabling these in cases where some other k6
@@ -512,7 +658,7 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__k6_3__");
Builder.defineMacro("__tune_k6_3__");
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case CK_K6:
defineCPUMacros(Builder, "k6");
break;
@@ -563,6 +709,9 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
case CK_ZNVER3:
defineCPUMacros(Builder, "znver3");
break;
+ case CK_ZNVER4:
+ defineCPUMacros(Builder, "znver4");
+ break;
case CK_Geode:
defineCPUMacros(Builder, "geode");
break;
@@ -638,13 +787,13 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
switch (XOPLevel) {
case XOP:
Builder.defineMacro("__XOP__");
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case FMA4:
Builder.defineMacro("__FMA4__");
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case SSE4A:
Builder.defineMacro("__SSE4A__");
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case NoXOP:
break;
}
@@ -658,6 +807,12 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
if (HasGFNI)
Builder.defineMacro("__GFNI__");
+ if (HasEVEX512)
+ Builder.defineMacro("__EVEX512__");
+ if (HasAVX10_1)
+ Builder.defineMacro("__AVX10_1__");
+ if (HasAVX10_1_512)
+ Builder.defineMacro("__AVX10_1_512__");
if (HasAVX512CD)
Builder.defineMacro("__AVX512CD__");
if (HasAVX512VPOPCNTDQ)
@@ -668,6 +823,8 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__AVX512BF16__");
if (HasAVX512ER)
Builder.defineMacro("__AVX512ER__");
+ if (HasAVX512FP16)
+ Builder.defineMacro("__AVX512FP16__");
if (HasAVX512PF)
Builder.defineMacro("__AVX512PF__");
if (HasAVX512DQ)
@@ -676,8 +833,10 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__AVX512BITALG__");
if (HasAVX512BW)
Builder.defineMacro("__AVX512BW__");
- if (HasAVX512VL)
+ if (HasAVX512VL) {
Builder.defineMacro("__AVX512VL__");
+ Builder.defineMacro("__EVEX256__");
+ }
if (HasAVX512VBMI)
Builder.defineMacro("__AVX512VBMI__");
if (HasAVX512VBMI2)
@@ -688,6 +847,8 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__AVX512VP2INTERSECT__");
if (HasSHA)
Builder.defineMacro("__SHA__");
+ if (HasSHA512)
+ Builder.defineMacro("__SHA512__");
if (HasFXSR)
Builder.defineMacro("__FXSR__");
@@ -711,6 +872,12 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__SHSTK__");
if (HasSGX)
Builder.defineMacro("__SGX__");
+ if (HasSM3)
+ Builder.defineMacro("__SM3__");
+ if (HasSM4)
+ Builder.defineMacro("__SM4__");
+ if (HasPREFETCHI)
+ Builder.defineMacro("__PREFETCHI__");
if (HasPREFETCHWT1)
Builder.defineMacro("__PREFETCHWT1__");
if (HasCLZERO)
@@ -721,6 +888,8 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__WIDEKL__");
if (HasRDPID)
Builder.defineMacro("__RDPID__");
+ if (HasRDPRU)
+ Builder.defineMacro("__RDPRU__");
if (HasCLDEMOTE)
Builder.defineMacro("__CLDEMOTE__");
if (HasWAITPKG)
@@ -740,51 +909,83 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
if (HasHRESET)
Builder.defineMacro("__HRESET__");
if (HasAMXTILE)
- Builder.defineMacro("__AMXTILE__");
+ Builder.defineMacro("__AMX_TILE__");
if (HasAMXINT8)
- Builder.defineMacro("__AMXINT8__");
+ Builder.defineMacro("__AMX_INT8__");
if (HasAMXBF16)
- Builder.defineMacro("__AMXBF16__");
+ Builder.defineMacro("__AMX_BF16__");
+ if (HasAMXFP16)
+ Builder.defineMacro("__AMX_FP16__");
+ if (HasAMXCOMPLEX)
+ Builder.defineMacro("__AMX_COMPLEX__");
+ if (HasCMPCCXADD)
+ Builder.defineMacro("__CMPCCXADD__");
+ if (HasRAOINT)
+ Builder.defineMacro("__RAOINT__");
+ if (HasAVXIFMA)
+ Builder.defineMacro("__AVXIFMA__");
+ if (HasAVXNECONVERT)
+ Builder.defineMacro("__AVXNECONVERT__");
if (HasAVXVNNI)
Builder.defineMacro("__AVXVNNI__");
+ if (HasAVXVNNIINT16)
+ Builder.defineMacro("__AVXVNNIINT16__");
+ if (HasAVXVNNIINT8)
+ Builder.defineMacro("__AVXVNNIINT8__");
if (HasSERIALIZE)
Builder.defineMacro("__SERIALIZE__");
if (HasTSXLDTRK)
Builder.defineMacro("__TSXLDTRK__");
if (HasUINTR)
Builder.defineMacro("__UINTR__");
+ if (HasUSERMSR)
+ Builder.defineMacro("__USERMSR__");
+ if (HasCRC32)
+ Builder.defineMacro("__CRC32__");
+ if (HasEGPR)
+ Builder.defineMacro("__EGPR__");
+ if (HasPush2Pop2)
+ Builder.defineMacro("__PUSH2POP2__");
+ if (HasPPX)
+ Builder.defineMacro("__PPX__");
+ if (HasNDD)
+ Builder.defineMacro("__NDD__");
+ if (HasCCMP)
+ Builder.defineMacro("__CCMP__");
+ if (HasCF)
+ Builder.defineMacro("__CF__");
// Each case falls through to the previous one here.
switch (SSELevel) {
case AVX512F:
Builder.defineMacro("__AVX512F__");
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case AVX2:
Builder.defineMacro("__AVX2__");
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case AVX:
Builder.defineMacro("__AVX__");
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case SSE42:
Builder.defineMacro("__SSE4_2__");
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case SSE41:
Builder.defineMacro("__SSE4_1__");
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case SSSE3:
Builder.defineMacro("__SSSE3__");
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case SSE3:
Builder.defineMacro("__SSE3__");
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case SSE2:
Builder.defineMacro("__SSE2__");
Builder.defineMacro("__SSE2_MATH__"); // -mfp-math=sse always implied.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case SSE1:
Builder.defineMacro("__SSE__");
Builder.defineMacro("__SSE_MATH__"); // -mfp-math=sse always implied.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case NoSSE:
break;
}
@@ -814,13 +1015,13 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
switch (MMX3DNowLevel) {
case AMD3DNowAthlon:
Builder.defineMacro("__3dNOW_A__");
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case AMD3DNow:
Builder.defineMacro("__3dNOW__");
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case MMX:
Builder.defineMacro("__MMX__");
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case NoMMX3DNow:
break;
}
@@ -846,9 +1047,13 @@ bool X86TargetInfo::isValidFeatureName(StringRef Name) const {
.Case("adx", true)
.Case("aes", true)
.Case("amx-bf16", true)
+ .Case("amx-complex", true)
+ .Case("amx-fp16", true)
.Case("amx-int8", true)
.Case("amx-tile", true)
.Case("avx", true)
+ .Case("avx10.1-256", true)
+ .Case("avx10.1-512", true)
.Case("avx2", true)
.Case("avx512f", true)
.Case("avx512cd", true)
@@ -856,6 +1061,7 @@ bool X86TargetInfo::isValidFeatureName(StringRef Name) const {
.Case("avx512vnni", true)
.Case("avx512bf16", true)
.Case("avx512er", true)
+ .Case("avx512fp16", true)
.Case("avx512pf", true)
.Case("avx512dq", true)
.Case("avx512bitalg", true)
@@ -865,15 +1071,22 @@ bool X86TargetInfo::isValidFeatureName(StringRef Name) const {
.Case("avx512vbmi2", true)
.Case("avx512ifma", true)
.Case("avx512vp2intersect", true)
+ .Case("avxifma", true)
+ .Case("avxneconvert", true)
.Case("avxvnni", true)
+ .Case("avxvnniint16", true)
+ .Case("avxvnniint8", true)
.Case("bmi", true)
.Case("bmi2", true)
.Case("cldemote", true)
.Case("clflushopt", true)
.Case("clwb", true)
.Case("clzero", true)
+ .Case("cmpccxadd", true)
+ .Case("crc32", true)
.Case("cx16", true)
.Case("enqcmd", true)
+ .Case("evex512", true)
.Case("f16c", true)
.Case("fma", true)
.Case("fma4", true)
@@ -896,10 +1109,13 @@ bool X86TargetInfo::isValidFeatureName(StringRef Name) const {
.Case("pconfig", true)
.Case("pku", true)
.Case("popcnt", true)
+ .Case("prefetchi", true)
.Case("prefetchwt1", true)
.Case("prfchw", true)
.Case("ptwrite", true)
+ .Case("raoint", true)
.Case("rdpid", true)
+ .Case("rdpru", true)
.Case("rdrnd", true)
.Case("rdseed", true)
.Case("rtm", true)
@@ -907,7 +1123,10 @@ bool X86TargetInfo::isValidFeatureName(StringRef Name) const {
.Case("serialize", true)
.Case("sgx", true)
.Case("sha", true)
+ .Case("sha512", true)
.Case("shstk", true)
+ .Case("sm3", true)
+ .Case("sm4", true)
.Case("sse", true)
.Case("sse2", true)
.Case("sse3", true)
@@ -919,6 +1138,7 @@ bool X86TargetInfo::isValidFeatureName(StringRef Name) const {
.Case("tbm", true)
.Case("tsxldtrk", true)
.Case("uintr", true)
+ .Case("usermsr", true)
.Case("vaes", true)
.Case("vpclmulqdq", true)
.Case("wbnoinvd", true)
@@ -929,6 +1149,12 @@ bool X86TargetInfo::isValidFeatureName(StringRef Name) const {
.Case("xsavec", true)
.Case("xsaves", true)
.Case("xsaveopt", true)
+ .Case("egpr", true)
+ .Case("push2pop2", true)
+ .Case("ppx", true)
+ .Case("ndd", true)
+ .Case("ccmp", true)
+ .Case("cf", true)
.Default(false);
}
@@ -937,10 +1163,13 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
.Case("adx", HasADX)
.Case("aes", HasAES)
.Case("amx-bf16", HasAMXBF16)
+ .Case("amx-complex", HasAMXCOMPLEX)
+ .Case("amx-fp16", HasAMXFP16)
.Case("amx-int8", HasAMXINT8)
.Case("amx-tile", HasAMXTILE)
- .Case("avxvnni", HasAVXVNNI)
.Case("avx", SSELevel >= AVX)
+ .Case("avx10.1-256", HasAVX10_1)
+ .Case("avx10.1-512", HasAVX10_1_512)
.Case("avx2", SSELevel >= AVX2)
.Case("avx512f", SSELevel >= AVX512F)
.Case("avx512cd", HasAVX512CD)
@@ -948,6 +1177,7 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
.Case("avx512vnni", HasAVX512VNNI)
.Case("avx512bf16", HasAVX512BF16)
.Case("avx512er", HasAVX512ER)
+ .Case("avx512fp16", HasAVX512FP16)
.Case("avx512pf", HasAVX512PF)
.Case("avx512dq", HasAVX512DQ)
.Case("avx512bitalg", HasAVX512BITALG)
@@ -957,15 +1187,23 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
.Case("avx512vbmi2", HasAVX512VBMI2)
.Case("avx512ifma", HasAVX512IFMA)
.Case("avx512vp2intersect", HasAVX512VP2INTERSECT)
+ .Case("avxifma", HasAVXIFMA)
+ .Case("avxneconvert", HasAVXNECONVERT)
+ .Case("avxvnni", HasAVXVNNI)
+ .Case("avxvnniint16", HasAVXVNNIINT16)
+ .Case("avxvnniint8", HasAVXVNNIINT8)
.Case("bmi", HasBMI)
.Case("bmi2", HasBMI2)
.Case("cldemote", HasCLDEMOTE)
.Case("clflushopt", HasCLFLUSHOPT)
.Case("clwb", HasCLWB)
.Case("clzero", HasCLZERO)
+ .Case("cmpccxadd", HasCMPCCXADD)
+ .Case("crc32", HasCRC32)
.Case("cx8", HasCX8)
.Case("cx16", HasCX16)
.Case("enqcmd", HasENQCMD)
+ .Case("evex512", HasEVEX512)
.Case("f16c", HasF16C)
.Case("fma", HasFMA)
.Case("fma4", XOPLevel >= FMA4)
@@ -989,10 +1227,13 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
.Case("pconfig", HasPCONFIG)
.Case("pku", HasPKU)
.Case("popcnt", HasPOPCNT)
+ .Case("prefetchi", HasPREFETCHI)
.Case("prefetchwt1", HasPREFETCHWT1)
.Case("prfchw", HasPRFCHW)
.Case("ptwrite", HasPTWRITE)
+ .Case("raoint", HasRAOINT)
.Case("rdpid", HasRDPID)
+ .Case("rdpru", HasRDPRU)
.Case("rdrnd", HasRDRND)
.Case("rdseed", HasRDSEED)
.Case("retpoline-external-thunk", HasRetpolineExternalThunk)
@@ -1001,7 +1242,10 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
.Case("serialize", HasSERIALIZE)
.Case("sgx", HasSGX)
.Case("sha", HasSHA)
+ .Case("sha512", HasSHA512)
.Case("shstk", HasSHSTK)
+ .Case("sm3", HasSM3)
+ .Case("sm4", HasSM4)
.Case("sse", SSELevel >= SSE1)
.Case("sse2", SSELevel >= SSE2)
.Case("sse3", SSELevel >= SSE3)
@@ -1012,6 +1256,7 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
.Case("tbm", HasTBM)
.Case("tsxldtrk", HasTSXLDTRK)
.Case("uintr", HasUINTR)
+ .Case("usermsr", HasUSERMSR)
.Case("vaes", HasVAES)
.Case("vpclmulqdq", HasVPCLMULQDQ)
.Case("wbnoinvd", HasWBNOINVD)
@@ -1019,11 +1264,19 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
.Case("x86", true)
.Case("x86_32", getTriple().getArch() == llvm::Triple::x86)
.Case("x86_64", getTriple().getArch() == llvm::Triple::x86_64)
+ .Case("x87", HasX87)
.Case("xop", XOPLevel >= XOP)
.Case("xsave", HasXSAVE)
.Case("xsavec", HasXSAVEC)
.Case("xsaves", HasXSAVES)
.Case("xsaveopt", HasXSAVEOPT)
+ .Case("fullbf16", HasFullBFloat16)
+ .Case("egpr", HasEGPR)
+ .Case("push2pop2", HasPush2Pop2)
+ .Case("ppx", HasPPX)
+ .Case("ndd", HasNDD)
+ .Case("ccmp", HasCCMP)
+ .Case("cf", HasCF)
.Default(false);
}
@@ -1034,35 +1287,23 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
// X86TargetInfo::hasFeature for a somewhat comprehensive list).
bool X86TargetInfo::validateCpuSupports(StringRef FeatureStr) const {
return llvm::StringSwitch<bool>(FeatureStr)
-#define X86_FEATURE_COMPAT(ENUM, STR) .Case(STR, true)
-#include "llvm/Support/X86TargetParser.def"
+#define X86_FEATURE_COMPAT(ENUM, STR, PRIORITY) .Case(STR, true)
+#define X86_MICROARCH_LEVEL(ENUM, STR, PRIORITY) .Case(STR, true)
+#include "llvm/TargetParser/X86TargetParser.def"
.Default(false);
}
static llvm::X86::ProcessorFeatures getFeature(StringRef Name) {
return llvm::StringSwitch<llvm::X86::ProcessorFeatures>(Name)
-#define X86_FEATURE_COMPAT(ENUM, STR) .Case(STR, llvm::X86::FEATURE_##ENUM)
-#include "llvm/Support/X86TargetParser.def"
+#define X86_FEATURE_COMPAT(ENUM, STR, PRIORITY) \
+ .Case(STR, llvm::X86::FEATURE_##ENUM)
+
+#include "llvm/TargetParser/X86TargetParser.def"
;
// Note, this function should only be used after ensuring the value is
// correct, so it asserts if the value is out of range.
}
-static unsigned getFeaturePriority(llvm::X86::ProcessorFeatures Feat) {
- enum class FeatPriority {
-#define FEATURE(FEAT) FEAT,
-#include "clang/Basic/X86Target.def"
- };
- switch (Feat) {
-#define FEATURE(FEAT) \
- case llvm::X86::FEAT: \
- return static_cast<unsigned>(FeatPriority::FEAT);
-#include "clang/Basic/X86Target.def"
- default:
- llvm_unreachable("No Feature Priority for non-CPUSupports Features");
- }
-}
-
unsigned X86TargetInfo::multiVersionSortPriority(StringRef Name) const {
// Valid CPUs have a 'key feature' that compares just better than its key
// feature.
@@ -1079,35 +1320,19 @@ unsigned X86TargetInfo::multiVersionSortPriority(StringRef Name) const {
}
bool X86TargetInfo::validateCPUSpecificCPUDispatch(StringRef Name) const {
- return llvm::StringSwitch<bool>(Name)
-#define CPU_SPECIFIC(NAME, MANGLING, FEATURES) .Case(NAME, true)
-#define CPU_SPECIFIC_ALIAS(NEW_NAME, NAME) .Case(NEW_NAME, true)
-#include "clang/Basic/X86Target.def"
- .Default(false);
-}
-
-static StringRef CPUSpecificCPUDispatchNameDealias(StringRef Name) {
- return llvm::StringSwitch<StringRef>(Name)
-#define CPU_SPECIFIC_ALIAS(NEW_NAME, NAME) .Case(NEW_NAME, NAME)
-#include "clang/Basic/X86Target.def"
- .Default(Name);
+ return llvm::X86::validateCPUSpecificCPUDispatch(Name);
}
char X86TargetInfo::CPUSpecificManglingCharacter(StringRef Name) const {
- return llvm::StringSwitch<char>(CPUSpecificCPUDispatchNameDealias(Name))
-#define CPU_SPECIFIC(NAME, MANGLING, FEATURES) .Case(NAME, MANGLING)
-#include "clang/Basic/X86Target.def"
- .Default(0);
+ return llvm::X86::getCPUDispatchMangling(Name);
}
void X86TargetInfo::getCPUSpecificCPUDispatchFeatures(
StringRef Name, llvm::SmallVectorImpl<StringRef> &Features) const {
- StringRef WholeList =
- llvm::StringSwitch<StringRef>(CPUSpecificCPUDispatchNameDealias(Name))
-#define CPU_SPECIFIC(NAME, MANGLING, FEATURES) .Case(NAME, FEATURES)
-#include "clang/Basic/X86Target.def"
- .Default("");
- WholeList.split(Features, ',', /*MaxSplit=*/-1, /*KeepEmpty=*/false);
+ SmallVector<StringRef, 32> TargetCPUFeatures;
+ llvm::X86::getFeaturesForCPU(Name, TargetCPUFeatures, true);
+ for (auto &F : TargetCPUFeatures)
+ Features.push_back(F);
}
// We can't use a generic validation scheme for the cpus accepted here
@@ -1119,12 +1344,13 @@ bool X86TargetInfo::validateCpuIs(StringRef FeatureStr) const {
#define X86_VENDOR(ENUM, STRING) .Case(STRING, true)
#define X86_CPU_TYPE_ALIAS(ENUM, ALIAS) .Case(ALIAS, true)
#define X86_CPU_TYPE(ENUM, STR) .Case(STR, true)
+#define X86_CPU_SUBTYPE_ALIAS(ENUM, ALIAS) .Case(ALIAS, true)
#define X86_CPU_SUBTYPE(ENUM, STR) .Case(STR, true)
-#include "llvm/Support/X86TargetParser.def"
+#include "llvm/TargetParser/X86TargetParser.def"
.Default(false);
}
-static unsigned matchAsmCCConstraint(const char *&Name) {
+static unsigned matchAsmCCConstraint(const char *Name) {
auto RV = llvm::StringSwitch<unsigned>(Name)
.Case("@cca", 4)
.Case("@ccae", 5)
@@ -1192,6 +1418,14 @@ bool X86TargetInfo::validateAsmConstraint(
case 'O':
Info.setRequiresImmediate(0, 127);
return true;
+ case 'W':
+ switch (*++Name) {
+ default:
+ return false;
+ case 's':
+ Info.setAllowsRegister();
+ return true;
+ }
// Register constraints.
case 'Y': // 'Y' is the first character for several 2-character constraints.
// Shift the pointer to the second character of the constraint.
@@ -1264,7 +1498,7 @@ bool X86TargetInfo::validateAsmConstraint(
// | Sandy Bridge | 64 | https://en.wikipedia.org/wiki/Sandy_Bridge and https://www.7-cpu.com/cpu/SandyBridge.html |
// | Ivy Bridge | 64 | https://blog.stuffedcow.net/2013/01/ivb-cache-replacement/ and https://www.7-cpu.com/cpu/IvyBridge.html |
// | Haswell | 64 | https://www.7-cpu.com/cpu/Haswell.html |
-// | Boadwell | 64 | https://www.7-cpu.com/cpu/Broadwell.html |
+// | Broadwell | 64 | https://www.7-cpu.com/cpu/Broadwell.html |
// | Skylake (including skylake-avx512) | 64 | https://www.nas.nasa.gov/hecc/support/kb/skylake-processors_550.html "Cache Hierarchy" |
// | Cascade Lake | 64 | https://www.nas.nasa.gov/hecc/support/kb/cascade-lake-processors_579.html "Cache Hierarchy" |
// | Skylake | 64 | https://en.wikichip.org/wiki/intel/microarchitectures/kaby_lake "Memory Hierarchy" |
@@ -1272,7 +1506,7 @@ bool X86TargetInfo::validateAsmConstraint(
// | Knights Landing | 64 | https://software.intel.com/en-us/articles/intel-xeon-phi-processor-7200-family-memory-management-optimizations "The Intel® Xeon Phi™ Processor Architecture" |
// | Knights Mill | 64 | https://software.intel.com/sites/default/files/managed/9e/bc/64-ia-32-architectures-optimization-manual.pdf?countrylabel=Colombia "2.5.5.2 L1 DCache " |
// +------------------------------------+-------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------+
-Optional<unsigned> X86TargetInfo::getCPUCacheLineSize() const {
+std::optional<unsigned> X86TargetInfo::getCPUCacheLineSize() const {
using namespace llvm::X86;
switch (CPU) {
// i386
@@ -1315,6 +1549,7 @@ Optional<unsigned> X86TargetInfo::getCPUCacheLineSize() const {
case CK_Goldmont:
case CK_GoldmontPlus:
case CK_Tremont:
+ case CK_Gracemont:
case CK_Westmere:
case CK_SandyBridge:
@@ -1333,6 +1568,18 @@ Optional<unsigned> X86TargetInfo::getCPUCacheLineSize() const {
case CK_Rocketlake:
case CK_IcelakeServer:
case CK_Alderlake:
+ case CK_Raptorlake:
+ case CK_Meteorlake:
+ case CK_Arrowlake:
+ case CK_ArrowlakeS:
+ case CK_Lunarlake:
+ case CK_Pantherlake:
+ case CK_Sierraforest:
+ case CK_Grandridge:
+ case CK_Graniterapids:
+ case CK_GraniterapidsD:
+ case CK_Emeraldrapids:
+ case CK_Clearwaterforest:
case CK_KNL:
case CK_KNM:
// K7
@@ -1354,6 +1601,7 @@ Optional<unsigned> X86TargetInfo::getCPUCacheLineSize() const {
case CK_ZNVER1:
case CK_ZNVER2:
case CK_ZNVER3:
+ case CK_ZNVER4:
// Deprecated
case CK_x86_64:
case CK_x86_64_v2:
@@ -1367,7 +1615,7 @@ Optional<unsigned> X86TargetInfo::getCPUCacheLineSize() const {
// The following currently have unknown cache line sizes (but they are probably all 64):
// Core
case CK_None:
- return None;
+ return std::nullopt;
}
llvm_unreachable("Unknown CPU kind");
}
@@ -1376,8 +1624,7 @@ bool X86TargetInfo::validateOutputSize(const llvm::StringMap<bool> &FeatureMap,
StringRef Constraint,
unsigned Size) const {
// Strip off constraint modifiers.
- while (Constraint[0] == '=' || Constraint[0] == '+' || Constraint[0] == '&')
- Constraint = Constraint.substr(1);
+ Constraint = Constraint.ltrim("=+&");
return validateOperandSize(FeatureMap, Constraint, Size);
}
@@ -1413,8 +1660,9 @@ bool X86TargetInfo::validateOperandSize(const llvm::StringMap<bool> &FeatureMap,
return Size <= 64;
case 'z':
// XMM0/YMM/ZMM0
- if (hasFeatureEnabled(FeatureMap, "avx512f"))
- // ZMM0 can be used if target supports AVX512F.
+ if (hasFeatureEnabled(FeatureMap, "avx512f") &&
+ hasFeatureEnabled(FeatureMap, "evex512"))
+ // ZMM0 can be used if target supports AVX512F and EVEX512 is set.
return Size <= 512U;
else if (hasFeatureEnabled(FeatureMap, "avx"))
// YMM0 can be used if target supports AVX.
@@ -1433,8 +1681,10 @@ bool X86TargetInfo::validateOperandSize(const llvm::StringMap<bool> &FeatureMap,
break;
case 'v':
case 'x':
- if (hasFeatureEnabled(FeatureMap, "avx512f"))
- // 512-bit zmm registers can be used if target supports AVX512F.
+ if (hasFeatureEnabled(FeatureMap, "avx512f") &&
+ hasFeatureEnabled(FeatureMap, "evex512"))
+ // 512-bit zmm registers can be used if target supports AVX512F and
+ // EVEX512 is set.
return Size <= 512U;
else if (hasFeatureEnabled(FeatureMap, "avx"))
// 256-bit ymm registers can be used if target supports AVX.
@@ -1467,12 +1717,15 @@ std::string X86TargetInfo::convertConstraint(const char *&Constraint) const {
return std::string("{si}");
case 'D':
return std::string("{di}");
- case 'p': // address
- return std::string("im");
+ case 'p': // Keep 'p' constraint (address).
+ return std::string("p");
case 't': // top of floating point stack.
return std::string("{st}");
case 'u': // second from top of floating point stack.
return std::string("{st(1)}"); // second from top of floating point stack.
+ case 'W':
+ assert(Constraint[1] == 's');
+ return '^' + std::string(Constraint++, 2);
case 'Y':
switch (Constraint[1]) {
default:
@@ -1491,7 +1744,7 @@ std::string X86TargetInfo::convertConstraint(const char *&Constraint) const {
// to the next constraint.
return std::string("^") + std::string(Constraint++, 2);
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
default:
return std::string(1, *Constraint);
}
@@ -1507,19 +1760,19 @@ void X86TargetInfo::fillValidTuneCPUList(SmallVectorImpl<StringRef> &Values) con
}
ArrayRef<const char *> X86TargetInfo::getGCCRegNames() const {
- return llvm::makeArrayRef(GCCRegNames);
+ return llvm::ArrayRef(GCCRegNames);
}
ArrayRef<TargetInfo::AddlRegName> X86TargetInfo::getGCCAddlRegNames() const {
- return llvm::makeArrayRef(AddlRegNames);
+ return llvm::ArrayRef(AddlRegNames);
}
ArrayRef<Builtin::Info> X86_32TargetInfo::getTargetBuiltins() const {
- return llvm::makeArrayRef(BuiltinInfoX86, clang::X86::LastX86CommonBuiltin -
- Builtin::FirstTSBuiltin + 1);
+ return llvm::ArrayRef(BuiltinInfoX86, clang::X86::LastX86CommonBuiltin -
+ Builtin::FirstTSBuiltin + 1);
}
ArrayRef<Builtin::Info> X86_64TargetInfo::getTargetBuiltins() const {
- return llvm::makeArrayRef(BuiltinInfoX86,
- X86::LastTSBuiltin - Builtin::FirstTSBuiltin);
+ return llvm::ArrayRef(BuiltinInfoX86,
+ X86::LastTSBuiltin - Builtin::FirstTSBuiltin);
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/X86.h b/contrib/llvm-project/clang/lib/Basic/Targets/X86.h
index fcaaf50624e9..0ab1c10833db 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/X86.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/X86.h
@@ -14,11 +14,13 @@
#define LLVM_CLANG_LIB_BASIC_TARGETS_X86_H
#include "OSTargets.h"
+#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Compiler.h"
-#include "llvm/Support/X86TargetParser.h"
+#include "llvm/TargetParser/Triple.h"
+#include "llvm/TargetParser/X86TargetParser.h"
+#include <optional>
namespace clang {
namespace targets {
@@ -42,7 +44,11 @@ static const unsigned X86AddrSpaceMap[] = {
0, // sycl_private
270, // ptr32_sptr
271, // ptr32_uptr
- 272 // ptr64
+ 272, // ptr64
+ 0, // hlsl_groupshared
+ // Wasm address space values for this target are dummy values,
+ // as it is only enabled for Wasm targets.
+ 20, // wasm_funcref
};
// X86 target abstract base class; x86-32 and x86-64 are very close, so
@@ -89,9 +95,13 @@ class LLVM_LIBRARY_VISIBILITY X86TargetInfo : public TargetInfo {
bool HasLWP = false;
bool HasFMA = false;
bool HasF16C = false;
+ bool HasAVX10_1 = false;
+ bool HasAVX10_1_512 = false;
+ bool HasEVEX512 = false;
bool HasAVX512CD = false;
bool HasAVX512VPOPCNTDQ = false;
bool HasAVX512VNNI = false;
+ bool HasAVX512FP16 = false;
bool HasAVX512BF16 = false;
bool HasAVX512ER = false;
bool HasAVX512PF = false;
@@ -101,11 +111,15 @@ class LLVM_LIBRARY_VISIBILITY X86TargetInfo : public TargetInfo {
bool HasAVX512VL = false;
bool HasAVX512VBMI = false;
bool HasAVX512VBMI2 = false;
+ bool HasAVXIFMA = false;
bool HasAVX512IFMA = false;
bool HasAVX512VP2INTERSECT = false;
bool HasSHA = false;
+ bool HasSHA512 = false;
bool HasSHSTK = false;
+ bool HasSM3 = false;
bool HasSGX = false;
+ bool HasSM4 = false;
bool HasCX8 = false;
bool HasCX16 = false;
bool HasFXSR = false;
@@ -121,8 +135,10 @@ class LLVM_LIBRARY_VISIBILITY X86TargetInfo : public TargetInfo {
bool HasCLFLUSHOPT = false;
bool HasCLWB = false;
bool HasMOVBE = false;
+ bool HasPREFETCHI = false;
bool HasPREFETCHWT1 = false;
bool HasRDPID = false;
+ bool HasRDPRU = false;
bool HasRetpolineExternalThunk = false;
bool HasLAHFSAHF = false;
bool HasWBNOINVD = false;
@@ -132,6 +148,12 @@ class LLVM_LIBRARY_VISIBILITY X86TargetInfo : public TargetInfo {
bool HasPTWRITE = false;
bool HasINVPCID = false;
bool HasENQCMD = false;
+ bool HasAVXVNNIINT16 = false;
+ bool HasAMXFP16 = false;
+ bool HasCMPCCXADD = false;
+ bool HasRAOINT = false;
+ bool HasAVXVNNIINT8 = false;
+ bool HasAVXNECONVERT = false;
bool HasKL = false; // For key locker
bool HasWIDEKL = false; // For wide key locker
bool HasHRESET = false;
@@ -139,9 +161,19 @@ class LLVM_LIBRARY_VISIBILITY X86TargetInfo : public TargetInfo {
bool HasAMXTILE = false;
bool HasAMXINT8 = false;
bool HasAMXBF16 = false;
+ bool HasAMXCOMPLEX = false;
bool HasSERIALIZE = false;
bool HasTSXLDTRK = false;
+ bool HasUSERMSR = false;
bool HasUINTR = false;
+ bool HasCRC32 = false;
+ bool HasX87 = false;
+ bool HasEGPR = false;
+ bool HasPush2Pop2 = false;
+ bool HasPPX = false;
+ bool HasNDD = false;
+ bool HasCCMP = false;
+ bool HasCF = false;
protected:
llvm::X86::CPUKind CPU = llvm::X86::CK_None;
@@ -151,6 +183,8 @@ protected:
public:
X86TargetInfo(const llvm::Triple &Triple, const TargetOptions &)
: TargetInfo(Triple) {
+ BFloat16Width = BFloat16Align = 16;
+ BFloat16Format = &llvm::APFloat::BFloat();
LongDoubleFormat = &llvm::APFloat::x87DoubleExtended();
AddrSpaceMap = &X86AddrSpaceMap;
HasStrictFP = true;
@@ -165,15 +199,19 @@ public:
return LongDoubleFormat == &llvm::APFloat::IEEEquad() ? "g" : "e";
}
- unsigned getFloatEvalMethod() const override {
+ LangOptions::FPEvalMethodKind getFPEvalMethod() const override {
// X87 evaluates with 80 bits "long double" precision.
- return SSELevel == NoSSE ? 2 : 0;
+ return SSELevel == NoSSE ? LangOptions::FPEvalMethodKind::FEM_Extended
+ : LangOptions::FPEvalMethodKind::FEM_Source;
}
+ // EvalMethod `source` is not supported for targets with `NoSSE` feature.
+ bool supportSourceEvalMethod() const override { return SSELevel > NoSSE; }
+
ArrayRef<const char *> getGCCRegNames() const override;
ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
- return None;
+ return std::nullopt;
}
ArrayRef<TargetInfo::AddlRegName> getGCCAddlRegNames() const override;
@@ -182,9 +220,9 @@ public:
return RegName.equals("esp") || RegName.equals("rsp");
}
- bool validateCpuSupports(StringRef Name) const override;
+ bool validateCpuSupports(StringRef FeatureStr) const override;
- bool validateCpuIs(StringRef Name) const override;
+ bool validateCpuIs(StringRef FeatureStr) const override;
bool validateCPUSpecificCPUDispatch(StringRef Name) const override;
@@ -194,7 +232,7 @@ public:
StringRef Name,
llvm::SmallVectorImpl<StringRef> &Features) const override;
- Optional<unsigned> getCPUCacheLineSize() const override;
+ std::optional<unsigned> getCPUCacheLineSize() const override;
bool validateAsmConstraint(const char *&Name,
TargetInfo::ConstraintInfo &info) const override;
@@ -218,21 +256,25 @@ public:
bool validateInputSize(const llvm::StringMap<bool> &FeatureMap,
StringRef Constraint, unsigned Size) const override;
- virtual bool
+ bool
checkCFProtectionReturnSupported(DiagnosticsEngine &Diags) const override {
- return true;
+ if (CPU == llvm::X86::CK_None || CPU >= llvm::X86::CK_PentiumPro)
+ return true;
+ return TargetInfo::checkCFProtectionReturnSupported(Diags);
};
- virtual bool
+ bool
checkCFProtectionBranchSupported(DiagnosticsEngine &Diags) const override {
- return true;
+ if (CPU == llvm::X86::CK_None || CPU >= llvm::X86::CK_PentiumPro)
+ return true;
+ return TargetInfo::checkCFProtectionBranchSupported(Diags);
};
virtual bool validateOperandSize(const llvm::StringMap<bool> &FeatureMap,
StringRef Constraint, unsigned Size) const;
std::string convertConstraint(const char *&Constraint) const override;
- const char *getClobbers() const override {
+ std::string_view getClobbers() const override {
return "~{dirflag},~{fpsr},~{flags}";
}
@@ -374,17 +416,19 @@ public:
void setSupportedOpenCLOpts() override { supportAllOpenCLOpts(); }
- uint64_t getPointerWidthV(unsigned AddrSpace) const override {
- if (AddrSpace == ptr32_sptr || AddrSpace == ptr32_uptr)
+ uint64_t getPointerWidthV(LangAS AS) const override {
+ unsigned TargetAddrSpace = getTargetAddressSpace(AS);
+ if (TargetAddrSpace == ptr32_sptr || TargetAddrSpace == ptr32_uptr)
return 32;
- if (AddrSpace == ptr64)
+ if (TargetAddrSpace == ptr64)
return 64;
return PointerWidth;
}
- uint64_t getPointerAlignV(unsigned AddrSpace) const override {
+ uint64_t getPointerAlignV(LangAS AddrSpace) const override {
return getPointerWidthV(AddrSpace);
}
+
};
// X86-32 generic target
@@ -396,22 +440,21 @@ public:
LongDoubleWidth = 96;
LongDoubleAlign = 32;
SuitableAlign = 128;
- resetDataLayout(
- Triple.isOSBinFormatMachO()
- ? "e-m:o-p:32:32-p270:32:32-p271:32:32-p272:64:64-f64:32:64-"
- "f80:32-n8:16:32-S128"
- : "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-f64:32:64-"
- "f80:32-n8:16:32-S128",
- Triple.isOSBinFormatMachO() ? "_" : "");
+ resetDataLayout(Triple.isOSBinFormatMachO()
+ ? "e-m:o-p:32:32-p270:32:32-p271:32:32-p272:64:64-i128:"
+ "128-f64:32:64-f80:32-n8:16:32-S128"
+ : "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-i128:"
+ "128-f64:32:64-f80:32-n8:16:32-S128",
+ Triple.isOSBinFormatMachO() ? "_" : "");
SizeType = UnsignedInt;
PtrDiffType = SignedInt;
IntPtrType = SignedInt;
RegParmMax = 3;
// Use fpret for all types.
- RealTypeUsesObjCFPRet =
- ((1 << TargetInfo::Float) | (1 << TargetInfo::Double) |
- (1 << TargetInfo::LongDouble));
+ RealTypeUsesObjCFPRetMask =
+ (unsigned)(FloatModeKind::Float | FloatModeKind::Double |
+ FloatModeKind::LongDouble);
// x86-32 has atomics up to 8 bytes
MaxAtomicPromoteWidth = 64;
@@ -459,7 +502,10 @@ public:
ArrayRef<Builtin::Info> getTargetBuiltins() const override;
- bool hasExtIntType() const override { return true; }
+ bool hasBitIntType() const override { return true; }
+ size_t getMaxBitIntWidth() const override {
+ return llvm::IntegerType::MAX_INT_BITS;
+ }
};
class LLVM_LIBRARY_VISIBILITY NetBSDI386TargetInfo
@@ -468,14 +514,13 @@ public:
NetBSDI386TargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
: NetBSDTargetInfo<X86_32TargetInfo>(Triple, Opts) {}
- unsigned getFloatEvalMethod() const override {
- unsigned Major, Minor, Micro;
- getTriple().getOSVersion(Major, Minor, Micro);
+ LangOptions::FPEvalMethodKind getFPEvalMethod() const override {
+ VersionTuple OsVersion = getTriple().getOSVersion();
// New NetBSD uses the default rounding mode.
- if (Major >= 7 || (Major == 6 && Minor == 99 && Micro >= 26) || Major == 0)
- return X86_32TargetInfo::getFloatEvalMethod();
+ if (OsVersion >= VersionTuple(6, 99, 26) || OsVersion.getMajor() == 0)
+ return X86_32TargetInfo::getFPEvalMethod();
// NetBSD before 6.99.26 defaults to "double" rounding.
- return 1;
+ return LangOptions::FPEvalMethodKind::FEM_Double;
}
};
@@ -505,8 +550,9 @@ public:
UseSignedCharForObjCBool = false;
SizeType = UnsignedLong;
IntPtrType = SignedLong;
- resetDataLayout("e-m:o-p:32:32-p270:32:32-p271:32:32-p272:64:64-f64:32:64-"
- "f80:128-n8:16:32-S128", "_");
+ resetDataLayout("e-m:o-p:32:32-p270:32:32-p271:32:32-p272:64:64-i128:128-"
+ "f64:32:64-f80:128-n8:16:32-S128",
+ "_");
HasAlignMac68kSupport = true;
}
@@ -531,11 +577,12 @@ public:
DoubleAlign = LongLongAlign = 64;
bool IsWinCOFF =
getTriple().isOSWindows() && getTriple().isOSBinFormatCOFF();
- resetDataLayout(IsWinCOFF ? "e-m:x-p:32:32-p270:32:32-p271:32:32-p272:64:"
- "64-i64:64-f80:32-n8:16:32-a:0:32-S32"
- : "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:"
- "64-i64:64-f80:32-n8:16:32-a:0:32-S32",
- IsWinCOFF ? "_" : "");
+ bool IsMSVC = getTriple().isWindowsMSVCEnvironment();
+ std::string Layout = IsWinCOFF ? "e-m:x" : "e-m:e";
+ Layout += "-p:32:32-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-";
+ Layout += IsMSVC ? "f80:128" : "f80:32";
+ Layout += "-n8:16:32-a:0:32-S32";
+ resetDataLayout(Layout, IsWinCOFF ? "_" : "");
}
};
@@ -583,8 +630,8 @@ public:
: X86_32TargetInfo(Triple, Opts) {
this->WCharType = TargetInfo::UnsignedShort;
DoubleAlign = LongLongAlign = 64;
- resetDataLayout("e-m:x-p:32:32-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:"
- "32-n8:16:32-a:0:32-S32",
+ resetDataLayout("e-m:x-p:32:32-p270:32:32-p271:32:32-p272:64:64-i64:64-"
+ "i128:128-f80:32-n8:16:32-a:0:32-S32",
"_");
}
@@ -622,8 +669,8 @@ public:
: X86_32TargetInfo(Triple, Opts) {
LongDoubleWidth = 64;
LongDoubleFormat = &llvm::APFloat::IEEEdouble();
- resetDataLayout("e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-i64:32-f64:"
- "32-f128:32-n8:16:32-a:0:32-S32");
+ resetDataLayout("e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-i64:32-"
+ "f64:32-f128:32-n8:16:32-a:0:32-S32");
WIntType = UnsignedInt;
}
@@ -683,14 +730,14 @@ public:
// Pointers are 32-bit in x32.
resetDataLayout(IsX32 ? "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-"
- "i64:64-f80:128-n8:16:32:64-S128"
- : IsWinCOFF ? "e-m:w-p270:32:32-p271:32:32-p272:64:"
- "64-i64:64-f80:128-n8:16:32:64-S128"
- : "e-m:e-p270:32:32-p271:32:32-p272:64:"
- "64-i64:64-f80:128-n8:16:32:64-S128");
+ "i64:64-i128:128-f80:128-n8:16:32:64-S128"
+ : IsWinCOFF ? "e-m:w-p270:32:32-p271:32:32-p272:64:64-i64:"
+ "64-i128:128-f80:128-n8:16:32:64-S128"
+ : "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:"
+ "64-i128:128-f80:128-n8:16:32:64-S128");
// Use fpret only for long double.
- RealTypeUsesObjCFPRet = (1 << TargetInfo::LongDouble);
+ RealTypeUsesObjCFPRetMask = (unsigned)FloatModeKind::LongDouble;
// Use fp2ret for _Complex long double.
ComplexLongDoubleUsesFP2Ret = true;
@@ -766,7 +813,10 @@ public:
ArrayRef<Builtin::Info> getTargetBuiltins() const override;
- bool hasExtIntType() const override { return true; }
+ bool hasBitIntType() const override { return true; }
+ size_t getMaxBitIntWidth() const override {
+ return llvm::IntegerType::MAX_INT_BITS;
+ }
};
// x86-64 Windows target
@@ -881,8 +931,9 @@ public:
llvm::Triple T = llvm::Triple(Triple);
if (T.isiOS())
UseSignedCharForObjCBool = false;
- resetDataLayout("e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:"
- "16:32:64-S128", "_");
+ resetDataLayout("e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-"
+ "f80:128-n8:16:32:64-S128",
+ "_");
}
bool handleTargetFeatures(std::vector<std::string> &Features,
@@ -928,6 +979,28 @@ public:
LongDoubleFormat = &llvm::APFloat::IEEEquad();
}
};
+
+// x86_32 OHOS target
+class LLVM_LIBRARY_VISIBILITY OHOSX86_32TargetInfo
+ : public OHOSTargetInfo<X86_32TargetInfo> {
+public:
+ OHOSX86_32TargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
+ : OHOSTargetInfo<X86_32TargetInfo>(Triple, Opts) {
+ SuitableAlign = 32;
+ LongDoubleWidth = 64;
+ LongDoubleFormat = &llvm::APFloat::IEEEdouble();
+ }
+};
+
+// x86_64 OHOS target
+class LLVM_LIBRARY_VISIBILITY OHOSX86_64TargetInfo
+ : public OHOSTargetInfo<X86_64TargetInfo> {
+public:
+ OHOSX86_64TargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
+ : OHOSTargetInfo<X86_64TargetInfo>(Triple, Opts) {
+ LongDoubleFormat = &llvm::APFloat::IEEEquad();
+ }
+};
} // namespace targets
} // namespace clang
#endif // LLVM_CLANG_LIB_BASIC_TARGETS_X86_H
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/XCore.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/XCore.cpp
index ba64f15f3394..fd377bbfb90e 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/XCore.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/XCore.cpp
@@ -18,11 +18,11 @@
using namespace clang;
using namespace clang::targets;
-const Builtin::Info XCoreTargetInfo::BuiltinInfo[] = {
+static constexpr Builtin::Info BuiltinInfo[] = {
#define BUILTIN(ID, TYPE, ATTRS) \
- {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
+ {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) \
- {#ID, TYPE, ATTRS, HEADER, ALL_LANGUAGES, nullptr},
+ {#ID, TYPE, ATTRS, nullptr, HeaderDesc::HEADER, ALL_LANGUAGES},
#include "clang/Basic/BuiltinsXCore.def"
};
@@ -33,6 +33,6 @@ void XCoreTargetInfo::getTargetDefines(const LangOptions &Opts,
}
ArrayRef<Builtin::Info> XCoreTargetInfo::getTargetBuiltins() const {
- return llvm::makeArrayRef(BuiltinInfo, clang::XCore::LastTSBuiltin -
- Builtin::FirstTSBuiltin);
+ return llvm::ArrayRef(BuiltinInfo,
+ clang::XCore::LastTSBuiltin - Builtin::FirstTSBuiltin);
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/XCore.h b/contrib/llvm-project/clang/lib/Basic/Targets/XCore.h
index c33766751aa1..a58d3e8acf47 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/XCore.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/XCore.h
@@ -15,14 +15,13 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/TargetParser/Triple.h"
namespace clang {
namespace targets {
class LLVM_LIBRARY_VISIBILITY XCoreTargetInfo : public TargetInfo {
- static const Builtin::Info BuiltinInfo[];
public:
XCoreTargetInfo(const llvm::Triple &Triple, const TargetOptions &)
@@ -50,18 +49,18 @@ public:
return TargetInfo::VoidPtrBuiltinVaList;
}
- const char *getClobbers() const override { return ""; }
+ std::string_view getClobbers() const override { return ""; }
ArrayRef<const char *> getGCCRegNames() const override {
static const char *const GCCRegNames[] = {
"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
"r8", "r9", "r10", "r11", "cp", "dp", "sp", "lr"
};
- return llvm::makeArrayRef(GCCRegNames);
+ return llvm::ArrayRef(GCCRegNames);
}
ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
- return None;
+ return std::nullopt;
}
bool validateAsmConstraint(const char *&Name,
@@ -76,7 +75,7 @@ public:
bool allowsLargerPreferedTypeAlignment() const override { return false; }
- bool hasExtIntType() const override { return true; }
+ bool hasBitIntType() const override { return true; }
};
} // namespace targets
} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Basic/TokenKinds.cpp b/contrib/llvm-project/clang/lib/Basic/TokenKinds.cpp
index d55e176c72c4..c300175ce90b 100644
--- a/contrib/llvm-project/clang/lib/Basic/TokenKinds.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/TokenKinds.cpp
@@ -46,6 +46,15 @@ const char *tok::getKeywordSpelling(TokenKind Kind) {
return nullptr;
}
+const char *tok::getPPKeywordSpelling(tok::PPKeywordKind Kind) {
+ switch (Kind) {
+#define PPKEYWORD(x) case tok::pp_##x: return #x;
+#include "clang/Basic/TokenKinds.def"
+ default: break;
+ }
+ return nullptr;
+}
+
bool tok::isAnnotation(TokenKind Kind) {
switch (Kind) {
#define ANNOTATION(X) case annot_ ## X: return true;
diff --git a/contrib/llvm-project/clang/lib/Basic/TypeTraits.cpp b/contrib/llvm-project/clang/lib/Basic/TypeTraits.cpp
index 3b723afff70b..4dbf678dc395 100644
--- a/contrib/llvm-project/clang/lib/Basic/TypeTraits.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/TypeTraits.cpp
@@ -55,6 +55,15 @@ static constexpr const char *UnaryExprOrTypeTraitSpellings[] = {
#include "clang/Basic/TokenKinds.def"
};
+static constexpr const unsigned TypeTraitArities[] = {
+#define TYPE_TRAIT_1(Spelling, Name, Key) 1,
+#include "clang/Basic/TokenKinds.def"
+#define TYPE_TRAIT_2(Spelling, Name, Key) 2,
+#include "clang/Basic/TokenKinds.def"
+#define TYPE_TRAIT_N(Spelling, Name, Key) 0,
+#include "clang/Basic/TokenKinds.def"
+};
+
const char *clang::getTraitName(TypeTrait T) {
assert(T <= TT_Last && "invalid enum value!");
return TypeTraitNames[T];
@@ -84,3 +93,8 @@ const char *clang::getTraitSpelling(UnaryExprOrTypeTrait T) {
assert(T <= UETT_Last && "invalid enum value!");
return UnaryExprOrTypeTraitSpellings[T];
}
+
+unsigned clang::getTypeTraitArity(TypeTrait T) {
+ assert(T <= TT_Last && "invalid enum value!");
+ return TypeTraitArities[T];
+}
diff --git a/contrib/llvm-project/clang/lib/Basic/Version.cpp b/contrib/llvm-project/clang/lib/Basic/Version.cpp
index af3118b0f6da..4823f566bd77 100644
--- a/contrib/llvm-project/clang/lib/Basic/Version.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Version.cpp
@@ -57,6 +57,14 @@ std::string getLLVMRevision() {
#endif
}
+std::string getClangVendor() {
+#ifdef CLANG_VENDOR
+ return CLANG_VENDOR;
+#else
+ return "";
+#endif
+}
+
std::string getClangFullRepositoryVersion() {
std::string buf;
llvm::raw_string_ostream OS(buf);
@@ -82,7 +90,7 @@ std::string getClangFullRepositoryVersion() {
OS << LLVMRepo << ' ';
OS << LLVMRev << ')';
}
- return OS.str();
+ return buf;
}
std::string getClangFullVersion() {
@@ -92,17 +100,14 @@ std::string getClangFullVersion() {
std::string getClangToolFullVersion(StringRef ToolName) {
std::string buf;
llvm::raw_string_ostream OS(buf);
-#ifdef CLANG_VENDOR
- OS << CLANG_VENDOR;
-#endif
- OS << ToolName << " version " CLANG_VERSION_STRING;
+ OS << getClangVendor() << ToolName << " version " CLANG_VERSION_STRING;
std::string repo = getClangFullRepositoryVersion();
if (!repo.empty()) {
OS << " " << repo;
}
- return OS.str();
+ return buf;
}
std::string getClangFullCPPVersion() {
@@ -110,17 +115,14 @@ std::string getClangFullCPPVersion() {
// the one we report on the command line.
std::string buf;
llvm::raw_string_ostream OS(buf);
-#ifdef CLANG_VENDOR
- OS << CLANG_VENDOR;
-#endif
- OS << "Clang " CLANG_VERSION_STRING;
+ OS << getClangVendor() << "Clang " CLANG_VERSION_STRING;
std::string repo = getClangFullRepositoryVersion();
if (!repo.empty()) {
OS << " " << repo;
}
- return OS.str();
+ return buf;
}
} // end namespace clang
diff --git a/contrib/llvm-project/clang/lib/Basic/Warnings.cpp b/contrib/llvm-project/clang/lib/Basic/Warnings.cpp
index cc8c138233ca..5a5ac5556338 100644
--- a/contrib/llvm-project/clang/lib/Basic/Warnings.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Warnings.cpp
@@ -96,11 +96,7 @@ void clang::ProcessWarningOptions(DiagnosticsEngine &Diags,
// Check to see if this warning starts with "no-", if so, this is a
// negative form of the option.
- bool isPositive = true;
- if (Opt.startswith("no-")) {
- isPositive = false;
- Opt = Opt.substr(3);
- }
+ bool isPositive = !Opt.consume_front("no-");
// Figure out how this option affects the warning. If -Wfoo, map the
// diagnostic to a warning, if -Wno-foo, map it to ignore.
@@ -133,7 +129,7 @@ void clang::ProcessWarningOptions(DiagnosticsEngine &Diags,
// table. It also has the "specifier" form of -Werror=foo. GCC supports
// the deprecated -Werror-implicit-function-declaration which is used by
// a few projects.
- if (Opt.startswith("error")) {
+ if (Opt.starts_with("error")) {
StringRef Specifier;
if (Opt.size() > 5) { // Specifier must be present.
if (Opt[5] != '=' &&
@@ -162,7 +158,7 @@ void clang::ProcessWarningOptions(DiagnosticsEngine &Diags,
}
// -Wfatal-errors is yet another special case.
- if (Opt.startswith("fatal-errors")) {
+ if (Opt.starts_with("fatal-errors")) {
StringRef Specifier;
if (Opt.size() != 12) {
if ((Opt[12] != '=' && Opt[12] != '-') || Opt.size() == 13) {
@@ -198,14 +194,12 @@ void clang::ProcessWarningOptions(DiagnosticsEngine &Diags,
}
}
- for (unsigned i = 0, e = Opts.Remarks.size(); i != e; ++i) {
- StringRef Opt = Opts.Remarks[i];
+ for (StringRef Opt : Opts.Remarks) {
const auto Flavor = diag::Flavor::Remark;
// Check to see if this warning starts with "no-", if so, this is a
// negative form of the option.
- bool IsPositive = !Opt.startswith("no-");
- if (!IsPositive) Opt = Opt.substr(3);
+ bool IsPositive = !Opt.consume_front("no-");
auto Severity = IsPositive ? diag::Severity::Remark
: diag::Severity::Ignored;
diff --git a/contrib/llvm-project/clang/lib/CodeGen/ABIInfo.cpp b/contrib/llvm-project/clang/lib/CodeGen/ABIInfo.cpp
new file mode 100644
index 000000000000..1b56cf7c596d
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/ABIInfo.cpp
@@ -0,0 +1,231 @@
+//===- ABIInfo.cpp --------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfo.h"
+#include "ABIInfoImpl.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+// Pin the vtable to this file.
+ABIInfo::~ABIInfo() = default;
+
+CGCXXABI &ABIInfo::getCXXABI() const { return CGT.getCXXABI(); }
+
+ASTContext &ABIInfo::getContext() const { return CGT.getContext(); }
+
+llvm::LLVMContext &ABIInfo::getVMContext() const {
+ return CGT.getLLVMContext();
+}
+
+const llvm::DataLayout &ABIInfo::getDataLayout() const {
+ return CGT.getDataLayout();
+}
+
+const TargetInfo &ABIInfo::getTarget() const { return CGT.getTarget(); }
+
+const CodeGenOptions &ABIInfo::getCodeGenOpts() const {
+ return CGT.getCodeGenOpts();
+}
+
+bool ABIInfo::isAndroid() const { return getTarget().getTriple().isAndroid(); }
+
+bool ABIInfo::isOHOSFamily() const {
+ return getTarget().getTriple().isOHOSFamily();
+}
+
+Address ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ return Address::invalid();
+}
+
+bool ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
+ return false;
+}
+
+bool ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
+ uint64_t Members) const {
+ return false;
+}
+
+bool ABIInfo::isZeroLengthBitfieldPermittedInHomogeneousAggregate() const {
+ // For compatibility with GCC, ignore empty bitfields in C++ mode.
+ return getContext().getLangOpts().CPlusPlus;
+}
+
+bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base,
+ uint64_t &Members) const {
+ if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
+ uint64_t NElements = AT->getSize().getZExtValue();
+ if (NElements == 0)
+ return false;
+ if (!isHomogeneousAggregate(AT->getElementType(), Base, Members))
+ return false;
+ Members *= NElements;
+ } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ const RecordDecl *RD = RT->getDecl();
+ if (RD->hasFlexibleArrayMember())
+ return false;
+
+ Members = 0;
+
+ // If this is a C++ record, check the properties of the record such as
+ // bases and ABI specific restrictions
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ if (!getCXXABI().isPermittedToBeHomogeneousAggregate(CXXRD))
+ return false;
+
+ for (const auto &I : CXXRD->bases()) {
+ // Ignore empty records.
+ if (isEmptyRecord(getContext(), I.getType(), true))
+ continue;
+
+ uint64_t FldMembers;
+ if (!isHomogeneousAggregate(I.getType(), Base, FldMembers))
+ return false;
+
+ Members += FldMembers;
+ }
+ }
+
+ for (const auto *FD : RD->fields()) {
+ // Ignore (non-zero arrays of) empty records.
+ QualType FT = FD->getType();
+ while (const ConstantArrayType *AT =
+ getContext().getAsConstantArrayType(FT)) {
+ if (AT->getSize().getZExtValue() == 0)
+ return false;
+ FT = AT->getElementType();
+ }
+ if (isEmptyRecord(getContext(), FT, true))
+ continue;
+
+ if (isZeroLengthBitfieldPermittedInHomogeneousAggregate() &&
+ FD->isZeroLengthBitField(getContext()))
+ continue;
+
+ uint64_t FldMembers;
+ if (!isHomogeneousAggregate(FD->getType(), Base, FldMembers))
+ return false;
+
+ Members = (RD->isUnion() ?
+ std::max(Members, FldMembers) : Members + FldMembers);
+ }
+
+ if (!Base)
+ return false;
+
+ // Ensure there is no padding.
+ if (getContext().getTypeSize(Base) * Members !=
+ getContext().getTypeSize(Ty))
+ return false;
+ } else {
+ Members = 1;
+ if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
+ Members = 2;
+ Ty = CT->getElementType();
+ }
+
+ // Most ABIs only support float, double, and some vector type widths.
+ if (!isHomogeneousAggregateBaseType(Ty))
+ return false;
+
+ // The base type must be the same for all members. Types that
+ // agree in both total size and mode (float vs. vector) are
+ // treated as being equivalent here.
+ const Type *TyPtr = Ty.getTypePtr();
+ if (!Base) {
+ Base = TyPtr;
+ // If it's a non-power-of-2 vector, its size is already a power-of-2,
+ // so make sure to widen it explicitly.
+ if (const VectorType *VT = Base->getAs<VectorType>()) {
+ QualType EltTy = VT->getElementType();
+ unsigned NumElements =
+ getContext().getTypeSize(VT) / getContext().getTypeSize(EltTy);
+ Base = getContext()
+ .getVectorType(EltTy, NumElements, VT->getVectorKind())
+ .getTypePtr();
+ }
+ }
+
+ if (Base->isVectorType() != TyPtr->isVectorType() ||
+ getContext().getTypeSize(Base) != getContext().getTypeSize(TyPtr))
+ return false;
+ }
+ return Members > 0 && isHomogeneousAggregateSmallEnough(Base, Members);
+}
+
+bool ABIInfo::isPromotableIntegerTypeForABI(QualType Ty) const {
+ if (getContext().isPromotableIntegerType(Ty))
+ return true;
+
+ if (const auto *EIT = Ty->getAs<BitIntType>())
+ if (EIT->getNumBits() < getContext().getTypeSize(getContext().IntTy))
+ return true;
+
+ return false;
+}
+
+ABIArgInfo ABIInfo::getNaturalAlignIndirect(QualType Ty, bool ByVal,
+ bool Realign,
+ llvm::Type *Padding) const {
+ return ABIArgInfo::getIndirect(getContext().getTypeAlignInChars(Ty), ByVal,
+ Realign, Padding);
+}
+
+ABIArgInfo ABIInfo::getNaturalAlignIndirectInReg(QualType Ty,
+ bool Realign) const {
+ return ABIArgInfo::getIndirectInReg(getContext().getTypeAlignInChars(Ty),
+ /*ByVal*/ false, Realign);
+}
+
+// Pin the vtable to this file.
+SwiftABIInfo::~SwiftABIInfo() = default;
+
+/// Does the given lowering require more than the given number of
+/// registers when expanded?
+///
+/// This is intended to be the basis of a reasonable basic implementation
+/// of should{Pass,Return}Indirectly.
+///
+/// For most targets, a limit of four total registers is reasonable; this
+/// limits the amount of code required in order to move around the value
+/// in case it wasn't produced immediately prior to the call by the caller
+/// (or wasn't produced in exactly the right registers) or isn't used
+/// immediately within the callee. But some targets may need to further
+/// limit the register count due to an inability to support that many
+/// return registers.
+bool SwiftABIInfo::occupiesMoreThan(ArrayRef<llvm::Type *> scalarTypes,
+ unsigned maxAllRegisters) const {
+ unsigned intCount = 0, fpCount = 0;
+ for (llvm::Type *type : scalarTypes) {
+ if (type->isPointerTy()) {
+ intCount++;
+ } else if (auto intTy = dyn_cast<llvm::IntegerType>(type)) {
+ auto ptrWidth = CGT.getTarget().getPointerWidth(LangAS::Default);
+ intCount += (intTy->getBitWidth() + ptrWidth - 1) / ptrWidth;
+ } else {
+ assert(type->isVectorTy() || type->isFloatingPointTy());
+ fpCount++;
+ }
+ }
+
+ return (intCount + fpCount > maxAllRegisters);
+}
+
+bool SwiftABIInfo::shouldPassIndirectly(ArrayRef<llvm::Type *> ComponentTys,
+ bool AsReturnValue) const {
+ return occupiesMoreThan(ComponentTys, /*total=*/4);
+}
+
+bool SwiftABIInfo::isLegalVectorType(CharUnits VectorSize, llvm::Type *EltTy,
+ unsigned NumElts) const {
+ // The default implementation of this assumes that the target guarantees
+ // 128-bit SIMD support but nothing more.
+ return (VectorSize.getQuantity() > 8 && VectorSize.getQuantity() <= 16);
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/ABIInfo.h b/contrib/llvm-project/clang/lib/CodeGen/ABIInfo.h
index 56f0dd4322d2..b9a5ef6e4366 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/ABIInfo.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/ABIInfo.h
@@ -15,137 +15,134 @@
#include "llvm/IR/Type.h"
namespace llvm {
- class Value;
- class LLVMContext;
- class DataLayout;
- class Type;
-}
+class Value;
+class LLVMContext;
+class DataLayout;
+class Type;
+} // namespace llvm
namespace clang {
- class ASTContext;
- class CodeGenOptions;
- class TargetInfo;
+class ASTContext;
+class CodeGenOptions;
+class TargetInfo;
namespace CodeGen {
- class ABIArgInfo;
- class Address;
- class CGCXXABI;
- class CGFunctionInfo;
- class CodeGenFunction;
- class CodeGenTypes;
- class SwiftABIInfo;
-
-namespace swiftcall {
- class SwiftAggLowering;
-}
-
- // FIXME: All of this stuff should be part of the target interface
- // somehow. It is currently here because it is not clear how to factor
- // the targets to support this, since the Targets currently live in a
- // layer below types n'stuff.
-
-
- /// ABIInfo - Target specific hooks for defining how a type should be
- /// passed or returned from functions.
- class ABIInfo {
- public:
- CodeGen::CodeGenTypes &CGT;
- protected:
- llvm::CallingConv::ID RuntimeCC;
- public:
- ABIInfo(CodeGen::CodeGenTypes &cgt)
- : CGT(cgt), RuntimeCC(llvm::CallingConv::C) {}
-
- virtual ~ABIInfo();
-
- virtual bool supportsSwift() const { return false; }
-
- virtual bool allowBFloatArgsAndRet() const { return false; }
-
- CodeGen::CGCXXABI &getCXXABI() const;
- ASTContext &getContext() const;
- llvm::LLVMContext &getVMContext() const;
- const llvm::DataLayout &getDataLayout() const;
- const TargetInfo &getTarget() const;
- const CodeGenOptions &getCodeGenOpts() const;
-
- /// Return the calling convention to use for system runtime
- /// functions.
- llvm::CallingConv::ID getRuntimeCC() const {
- return RuntimeCC;
- }
-
- virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const = 0;
-
- /// EmitVAArg - Emit the target dependent code to load a value of
- /// \arg Ty from the va_list pointed to by \arg VAListAddr.
-
- // FIXME: This is a gaping layering violation if we wanted to drop
- // the ABI information any lower than CodeGen. Of course, for
- // VAArg handling it has to be at this level; there is no way to
- // abstract this out.
- virtual CodeGen::Address EmitVAArg(CodeGen::CodeGenFunction &CGF,
+class ABIArgInfo;
+class Address;
+class CGCXXABI;
+class CGFunctionInfo;
+class CodeGenFunction;
+class CodeGenTypes;
+
+// FIXME: All of this stuff should be part of the target interface
+// somehow. It is currently here because it is not clear how to factor
+// the targets to support this, since the Targets currently live in a
+// layer below types n'stuff.
+
+/// ABIInfo - Target specific hooks for defining how a type should be
+/// passed or returned from functions.
+class ABIInfo {
+protected:
+ CodeGen::CodeGenTypes &CGT;
+ llvm::CallingConv::ID RuntimeCC;
+
+public:
+ ABIInfo(CodeGen::CodeGenTypes &cgt)
+ : CGT(cgt), RuntimeCC(llvm::CallingConv::C) {}
+
+ virtual ~ABIInfo();
+
+ virtual bool allowBFloatArgsAndRet() const { return false; }
+
+ CodeGen::CGCXXABI &getCXXABI() const;
+ ASTContext &getContext() const;
+ llvm::LLVMContext &getVMContext() const;
+ const llvm::DataLayout &getDataLayout() const;
+ const TargetInfo &getTarget() const;
+ const CodeGenOptions &getCodeGenOpts() const;
+
+ /// Return the calling convention to use for system runtime
+ /// functions.
+ llvm::CallingConv::ID getRuntimeCC() const { return RuntimeCC; }
+
+ virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const = 0;
+
+ /// EmitVAArg - Emit the target dependent code to load a value of
+ /// \arg Ty from the va_list pointed to by \arg VAListAddr.
+
+ // FIXME: This is a gaping layering violation if we wanted to drop
+ // the ABI information any lower than CodeGen. Of course, for
+ // VAArg handling it has to be at this level; there is no way to
+ // abstract this out.
+ virtual CodeGen::Address EmitVAArg(CodeGen::CodeGenFunction &CGF,
+ CodeGen::Address VAListAddr,
+ QualType Ty) const = 0;
+
+ bool isAndroid() const;
+ bool isOHOSFamily() const;
+
+ /// Emit the target dependent code to load a value of
+ /// \arg Ty from the \c __builtin_ms_va_list pointed to by \arg VAListAddr.
+ virtual CodeGen::Address EmitMSVAArg(CodeGen::CodeGenFunction &CGF,
CodeGen::Address VAListAddr,
- QualType Ty) const = 0;
-
- bool isAndroid() const;
-
- /// Emit the target dependent code to load a value of
- /// \arg Ty from the \c __builtin_ms_va_list pointed to by \arg VAListAddr.
- virtual CodeGen::Address EmitMSVAArg(CodeGen::CodeGenFunction &CGF,
- CodeGen::Address VAListAddr,
- QualType Ty) const;
-
- virtual bool isHomogeneousAggregateBaseType(QualType Ty) const;
-
- virtual bool isHomogeneousAggregateSmallEnough(const Type *Base,
- uint64_t Members) const;
-
- bool isHomogeneousAggregate(QualType Ty, const Type *&Base,
- uint64_t &Members) const;
-
- // Implement the Type::IsPromotableIntegerType for ABI specific needs. The
- // only difference is that this considers _ExtInt as well.
- bool isPromotableIntegerTypeForABI(QualType Ty) const;
-
- /// A convenience method to return an indirect ABIArgInfo with an
- /// expected alignment equal to the ABI alignment of the given type.
- CodeGen::ABIArgInfo
- getNaturalAlignIndirect(QualType Ty, bool ByVal = true,
- bool Realign = false,
- llvm::Type *Padding = nullptr) const;
-
- CodeGen::ABIArgInfo
- getNaturalAlignIndirectInReg(QualType Ty, bool Realign = false) const;
-
-
- };
-
- /// A refining implementation of ABIInfo for targets that support swiftcall.
- ///
- /// If we find ourselves wanting multiple such refinements, they'll probably
- /// be independent refinements, and we should probably find another way
- /// to do it than simple inheritance.
- class SwiftABIInfo : public ABIInfo {
- public:
- SwiftABIInfo(CodeGen::CodeGenTypes &cgt) : ABIInfo(cgt) {}
-
- bool supportsSwift() const final override { return true; }
-
- virtual bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> types,
- bool asReturnValue) const = 0;
-
- virtual bool isLegalVectorTypeForSwift(CharUnits totalSize,
- llvm::Type *eltTy,
- unsigned elts) const;
-
- virtual bool isSwiftErrorInRegister() const = 0;
-
- static bool classof(const ABIInfo *info) {
- return info->supportsSwift();
- }
- };
-} // end namespace CodeGen
-} // end namespace clang
+ QualType Ty) const;
+
+ virtual bool isHomogeneousAggregateBaseType(QualType Ty) const;
+
+ virtual bool isHomogeneousAggregateSmallEnough(const Type *Base,
+ uint64_t Members) const;
+ virtual bool isZeroLengthBitfieldPermittedInHomogeneousAggregate() const;
+
+ /// isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous
+ /// aggregate. Base is set to the base element type, and Members is set
+ /// to the number of base elements.
+ bool isHomogeneousAggregate(QualType Ty, const Type *&Base,
+ uint64_t &Members) const;
+
+ // Implement the Type::IsPromotableIntegerType for ABI specific needs. The
+ // only difference is that this considers bit-precise integer types as well.
+ bool isPromotableIntegerTypeForABI(QualType Ty) const;
+
+ /// A convenience method to return an indirect ABIArgInfo with an
+ /// expected alignment equal to the ABI alignment of the given type.
+ CodeGen::ABIArgInfo
+ getNaturalAlignIndirect(QualType Ty, bool ByVal = true, bool Realign = false,
+ llvm::Type *Padding = nullptr) const;
+
+ CodeGen::ABIArgInfo getNaturalAlignIndirectInReg(QualType Ty,
+ bool Realign = false) const;
+};
+
+/// Target specific hooks for defining how a type should be passed or returned
+/// from functions with one of the Swift calling conventions.
+class SwiftABIInfo {
+protected:
+ CodeGenTypes &CGT;
+ bool SwiftErrorInRegister;
+
+ bool occupiesMoreThan(ArrayRef<llvm::Type *> scalarTypes,
+ unsigned maxAllRegisters) const;
+
+public:
+ SwiftABIInfo(CodeGen::CodeGenTypes &CGT, bool SwiftErrorInRegister)
+ : CGT(CGT), SwiftErrorInRegister(SwiftErrorInRegister) {}
+
+ virtual ~SwiftABIInfo();
+
+ /// Returns true if an aggregate which expands to the given type sequence
+ /// should be passed / returned indirectly.
+ virtual bool shouldPassIndirectly(ArrayRef<llvm::Type *> ComponentTys,
+ bool AsReturnValue) const;
+
+ /// Returns true if the given vector type is legal from Swift's calling
+ /// convention perspective.
+ virtual bool isLegalVectorType(CharUnits VectorSize, llvm::Type *EltTy,
+ unsigned NumElts) const;
+
+ /// Returns true if swifterror is lowered to a register by the target ABI.
+ bool isSwiftErrorInRegister() const { return SwiftErrorInRegister; };
+};
+} // end namespace CodeGen
+} // end namespace clang
#endif
diff --git a/contrib/llvm-project/clang/lib/CodeGen/ABIInfoImpl.cpp b/contrib/llvm-project/clang/lib/CodeGen/ABIInfoImpl.cpp
new file mode 100644
index 000000000000..2b20d5a13346
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/ABIInfoImpl.cpp
@@ -0,0 +1,453 @@
+//===- ABIInfoImpl.cpp ----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfoImpl.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+// Pin the vtable to this file.
+DefaultABIInfo::~DefaultABIInfo() = default;
+
+ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
+ Ty = useFirstFieldIfTransparentUnion(Ty);
+
+ if (isAggregateTypeForABI(Ty)) {
+ // Records with non-trivial destructors/copy-constructors should not be
+ // passed by value.
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
+
+ return getNaturalAlignIndirect(Ty);
+ }
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ ASTContext &Context = getContext();
+ if (const auto *EIT = Ty->getAs<BitIntType>())
+ if (EIT->getNumBits() >
+ Context.getTypeSize(Context.getTargetInfo().hasInt128Type()
+ ? Context.Int128Ty
+ : Context.LongLongTy))
+ return getNaturalAlignIndirect(Ty);
+
+ return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
+ : ABIArgInfo::getDirect());
+}
+
+ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ if (isAggregateTypeForABI(RetTy))
+ return getNaturalAlignIndirect(RetTy);
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ if (const auto *EIT = RetTy->getAs<BitIntType>())
+ if (EIT->getNumBits() >
+ getContext().getTypeSize(getContext().getTargetInfo().hasInt128Type()
+ ? getContext().Int128Ty
+ : getContext().LongLongTy))
+ return getNaturalAlignIndirect(RetTy);
+
+ return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
+ : ABIArgInfo::getDirect());
+}
+
+void DefaultABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (auto &I : FI.arguments())
+ I.info = classifyArgumentType(I.type);
+}
+
+Address DefaultABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ return EmitVAArgInstr(CGF, VAListAddr, Ty, classifyArgumentType(Ty));
+}
+
+ABIArgInfo CodeGen::coerceToIntArray(QualType Ty, ASTContext &Context,
+ llvm::LLVMContext &LLVMContext) {
+ // Alignment and Size are measured in bits.
+ const uint64_t Size = Context.getTypeSize(Ty);
+ const uint64_t Alignment = Context.getTypeAlign(Ty);
+ llvm::Type *IntType = llvm::Type::getIntNTy(LLVMContext, Alignment);
+ const uint64_t NumElements = (Size + Alignment - 1) / Alignment;
+ return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType, NumElements));
+}
+
+void CodeGen::AssignToArrayRange(CodeGen::CGBuilderTy &Builder,
+ llvm::Value *Array, llvm::Value *Value,
+ unsigned FirstIndex, unsigned LastIndex) {
+ // Alternatively, we could emit this as a loop in the source.
+ for (unsigned I = FirstIndex; I <= LastIndex; ++I) {
+ llvm::Value *Cell =
+ Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array, I);
+ Builder.CreateAlignedStore(Value, Cell, CharUnits::One());
+ }
+}
+
+bool CodeGen::isAggregateTypeForABI(QualType T) {
+ return !CodeGenFunction::hasScalarEvaluationKind(T) ||
+ T->isMemberFunctionPointerType();
+}
+
+llvm::Type *CodeGen::getVAListElementType(CodeGenFunction &CGF) {
+ return CGF.ConvertTypeForMem(
+ CGF.getContext().getBuiltinVaListType()->getPointeeType());
+}
+
+CGCXXABI::RecordArgABI CodeGen::getRecordArgABI(const RecordType *RT,
+ CGCXXABI &CXXABI) {
+ const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
+ if (!RD) {
+ if (!RT->getDecl()->canPassInRegisters())
+ return CGCXXABI::RAA_Indirect;
+ return CGCXXABI::RAA_Default;
+ }
+ return CXXABI.getRecordArgABI(RD);
+}
+
+CGCXXABI::RecordArgABI CodeGen::getRecordArgABI(QualType T, CGCXXABI &CXXABI) {
+ const RecordType *RT = T->getAs<RecordType>();
+ if (!RT)
+ return CGCXXABI::RAA_Default;
+ return getRecordArgABI(RT, CXXABI);
+}
+
+bool CodeGen::classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI,
+ const ABIInfo &Info) {
+ QualType Ty = FI.getReturnType();
+
+ if (const auto *RT = Ty->getAs<RecordType>())
+ if (!isa<CXXRecordDecl>(RT->getDecl()) &&
+ !RT->getDecl()->canPassInRegisters()) {
+ FI.getReturnInfo() = Info.getNaturalAlignIndirect(Ty);
+ return true;
+ }
+
+ return CXXABI.classifyReturnType(FI);
+}
+
+QualType CodeGen::useFirstFieldIfTransparentUnion(QualType Ty) {
+ if (const RecordType *UT = Ty->getAsUnionType()) {
+ const RecordDecl *UD = UT->getDecl();
+ if (UD->hasAttr<TransparentUnionAttr>()) {
+ assert(!UD->field_empty() && "sema created an empty transparent union");
+ return UD->field_begin()->getType();
+ }
+ }
+ return Ty;
+}
+
+llvm::Value *CodeGen::emitRoundPointerUpToAlignment(CodeGenFunction &CGF,
+ llvm::Value *Ptr,
+ CharUnits Align) {
+ // OverflowArgArea = (OverflowArgArea + Align - 1) & -Align;
+ llvm::Value *RoundUp = CGF.Builder.CreateConstInBoundsGEP1_32(
+ CGF.Builder.getInt8Ty(), Ptr, Align.getQuantity() - 1);
+ return CGF.Builder.CreateIntrinsic(
+ llvm::Intrinsic::ptrmask, {CGF.AllocaInt8PtrTy, CGF.IntPtrTy},
+ {RoundUp, llvm::ConstantInt::get(CGF.IntPtrTy, -Align.getQuantity())},
+ nullptr, Ptr->getName() + ".aligned");
+}
+
+Address
+CodeGen::emitVoidPtrDirectVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ llvm::Type *DirectTy, CharUnits DirectSize,
+ CharUnits DirectAlign, CharUnits SlotSize,
+ bool AllowHigherAlign, bool ForceRightAdjust) {
+ // Cast the element type to i8* if necessary. Some platforms define
+ // va_list as a struct containing an i8* instead of just an i8*.
+ if (VAListAddr.getElementType() != CGF.Int8PtrTy)
+ VAListAddr = VAListAddr.withElementType(CGF.Int8PtrTy);
+
+ llvm::Value *Ptr = CGF.Builder.CreateLoad(VAListAddr, "argp.cur");
+
+ // If the CC aligns values higher than the slot size, do so if needed.
+ Address Addr = Address::invalid();
+ if (AllowHigherAlign && DirectAlign > SlotSize) {
+ Addr = Address(emitRoundPointerUpToAlignment(CGF, Ptr, DirectAlign),
+ CGF.Int8Ty, DirectAlign);
+ } else {
+ Addr = Address(Ptr, CGF.Int8Ty, SlotSize);
+ }
+
+ // Advance the pointer past the argument, then store that back.
+ CharUnits FullDirectSize = DirectSize.alignTo(SlotSize);
+ Address NextPtr =
+ CGF.Builder.CreateConstInBoundsByteGEP(Addr, FullDirectSize, "argp.next");
+ CGF.Builder.CreateStore(NextPtr.getPointer(), VAListAddr);
+
+ // If the argument is smaller than a slot, and this is a big-endian
+ // target, the argument will be right-adjusted in its slot.
+ if (DirectSize < SlotSize && CGF.CGM.getDataLayout().isBigEndian() &&
+ (!DirectTy->isStructTy() || ForceRightAdjust)) {
+ Addr = CGF.Builder.CreateConstInBoundsByteGEP(Addr, SlotSize - DirectSize);
+ }
+
+ return Addr.withElementType(DirectTy);
+}
+
+Address CodeGen::emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType ValueTy, bool IsIndirect,
+ TypeInfoChars ValueInfo,
+ CharUnits SlotSizeAndAlign,
+ bool AllowHigherAlign,
+ bool ForceRightAdjust) {
+ // The size and alignment of the value that was passed directly.
+ CharUnits DirectSize, DirectAlign;
+ if (IsIndirect) {
+ DirectSize = CGF.getPointerSize();
+ DirectAlign = CGF.getPointerAlign();
+ } else {
+ DirectSize = ValueInfo.Width;
+ DirectAlign = ValueInfo.Align;
+ }
+
+ // Cast the address we've calculated to the right type.
+ llvm::Type *DirectTy = CGF.ConvertTypeForMem(ValueTy), *ElementTy = DirectTy;
+ if (IsIndirect) {
+ unsigned AllocaAS = CGF.CGM.getDataLayout().getAllocaAddrSpace();
+ DirectTy = llvm::PointerType::get(CGF.getLLVMContext(), AllocaAS);
+ }
+
+ Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, DirectTy, DirectSize,
+ DirectAlign, SlotSizeAndAlign,
+ AllowHigherAlign, ForceRightAdjust);
+
+ if (IsIndirect) {
+ Addr = Address(CGF.Builder.CreateLoad(Addr), ElementTy, ValueInfo.Align);
+ }
+
+ return Addr;
+}
+
+Address CodeGen::emitMergePHI(CodeGenFunction &CGF, Address Addr1,
+ llvm::BasicBlock *Block1, Address Addr2,
+ llvm::BasicBlock *Block2,
+ const llvm::Twine &Name) {
+ assert(Addr1.getType() == Addr2.getType());
+ llvm::PHINode *PHI = CGF.Builder.CreatePHI(Addr1.getType(), 2, Name);
+ PHI->addIncoming(Addr1.getPointer(), Block1);
+ PHI->addIncoming(Addr2.getPointer(), Block2);
+ CharUnits Align = std::min(Addr1.getAlignment(), Addr2.getAlignment());
+ return Address(PHI, Addr1.getElementType(), Align);
+}
+
+bool CodeGen::isEmptyField(ASTContext &Context, const FieldDecl *FD,
+ bool AllowArrays, bool AsIfNoUniqueAddr) {
+ if (FD->isUnnamedBitfield())
+ return true;
+
+ QualType FT = FD->getType();
+
+ // Constant arrays of empty records count as empty, strip them off.
+ // Constant arrays of zero length always count as empty.
+ bool WasArray = false;
+ if (AllowArrays)
+ while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
+ if (AT->getSize() == 0)
+ return true;
+ FT = AT->getElementType();
+ // The [[no_unique_address]] special case below does not apply to
+ // arrays of C++ empty records, so we need to remember this fact.
+ WasArray = true;
+ }
+
+ const RecordType *RT = FT->getAs<RecordType>();
+ if (!RT)
+ return false;
+
+ // C++ record fields are never empty, at least in the Itanium ABI.
+ //
+ // FIXME: We should use a predicate for whether this behavior is true in the
+ // current ABI.
+ //
+ // The exception to the above rule are fields marked with the
+ // [[no_unique_address]] attribute (since C++20). Those do count as empty
+ // according to the Itanium ABI. The exception applies only to records,
+ // not arrays of records, so we must also check whether we stripped off an
+ // array type above.
+ if (isa<CXXRecordDecl>(RT->getDecl()) &&
+ (WasArray || (!AsIfNoUniqueAddr && !FD->hasAttr<NoUniqueAddressAttr>())))
+ return false;
+
+ return isEmptyRecord(Context, FT, AllowArrays, AsIfNoUniqueAddr);
+}
+
+bool CodeGen::isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays,
+ bool AsIfNoUniqueAddr) {
+ const RecordType *RT = T->getAs<RecordType>();
+ if (!RT)
+ return false;
+ const RecordDecl *RD = RT->getDecl();
+ if (RD->hasFlexibleArrayMember())
+ return false;
+
+ // If this is a C++ record, check the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
+ for (const auto &I : CXXRD->bases())
+ if (!isEmptyRecord(Context, I.getType(), true, AsIfNoUniqueAddr))
+ return false;
+
+ for (const auto *I : RD->fields())
+ if (!isEmptyField(Context, I, AllowArrays, AsIfNoUniqueAddr))
+ return false;
+ return true;
+}
+
+const Type *CodeGen::isSingleElementStruct(QualType T, ASTContext &Context) {
+ const RecordType *RT = T->getAs<RecordType>();
+ if (!RT)
+ return nullptr;
+
+ const RecordDecl *RD = RT->getDecl();
+ if (RD->hasFlexibleArrayMember())
+ return nullptr;
+
+ const Type *Found = nullptr;
+
+ // If this is a C++ record, check the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ for (const auto &I : CXXRD->bases()) {
+ // Ignore empty records.
+ if (isEmptyRecord(Context, I.getType(), true))
+ continue;
+
+ // If we already found an element then this isn't a single-element struct.
+ if (Found)
+ return nullptr;
+
+ // If this is non-empty and not a single element struct, the composite
+ // cannot be a single element struct.
+ Found = isSingleElementStruct(I.getType(), Context);
+ if (!Found)
+ return nullptr;
+ }
+ }
+
+ // Check for single element.
+ for (const auto *FD : RD->fields()) {
+ QualType FT = FD->getType();
+
+ // Ignore empty fields.
+ if (isEmptyField(Context, FD, true))
+ continue;
+
+ // If we already found an element then this isn't a single-element
+ // struct.
+ if (Found)
+ return nullptr;
+
+ // Treat single element arrays as the element.
+ while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
+ if (AT->getSize().getZExtValue() != 1)
+ break;
+ FT = AT->getElementType();
+ }
+
+ if (!isAggregateTypeForABI(FT)) {
+ Found = FT.getTypePtr();
+ } else {
+ Found = isSingleElementStruct(FT, Context);
+ if (!Found)
+ return nullptr;
+ }
+ }
+
+ // We don't consider a struct a single-element struct if it has
+ // padding beyond the element type.
+ if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T))
+ return nullptr;
+
+ return Found;
+}
+
+Address CodeGen::EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty, const ABIArgInfo &AI) {
+ // This default implementation defers to the llvm backend's va_arg
+ // instruction. It can handle only passing arguments directly
+ // (typically only handled in the backend for primitive types), or
+ // aggregates passed indirectly by pointer (NOTE: if the "byval"
+ // flag has ABI impact in the callee, this implementation cannot
+ // work.)
+
+ // Only a few cases are covered here at the moment -- those needed
+ // by the default abi.
+ llvm::Value *Val;
+
+ if (AI.isIndirect()) {
+ assert(!AI.getPaddingType() &&
+ "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
+ assert(
+ !AI.getIndirectRealign() &&
+ "Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!");
+
+ auto TyInfo = CGF.getContext().getTypeInfoInChars(Ty);
+ CharUnits TyAlignForABI = TyInfo.Align;
+
+ llvm::Type *ElementTy = CGF.ConvertTypeForMem(Ty);
+ llvm::Type *BaseTy = llvm::PointerType::getUnqual(ElementTy);
+ llvm::Value *Addr =
+ CGF.Builder.CreateVAArg(VAListAddr.getPointer(), BaseTy);
+ return Address(Addr, ElementTy, TyAlignForABI);
+ } else {
+ assert((AI.isDirect() || AI.isExtend()) &&
+ "Unexpected ArgInfo Kind in generic VAArg emitter!");
+
+ assert(!AI.getInReg() &&
+ "Unexpected InReg seen in arginfo in generic VAArg emitter!");
+ assert(!AI.getPaddingType() &&
+ "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
+ assert(!AI.getDirectOffset() &&
+ "Unexpected DirectOffset seen in arginfo in generic VAArg emitter!");
+ assert(!AI.getCoerceToType() &&
+ "Unexpected CoerceToType seen in arginfo in generic VAArg emitter!");
+
+ Address Temp = CGF.CreateMemTemp(Ty, "varet");
+ Val = CGF.Builder.CreateVAArg(VAListAddr.getPointer(),
+ CGF.ConvertTypeForMem(Ty));
+ CGF.Builder.CreateStore(Val, Temp);
+ return Temp;
+ }
+}
+
+bool CodeGen::isSIMDVectorType(ASTContext &Context, QualType Ty) {
+ return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128;
+}
+
+bool CodeGen::isRecordWithSIMDVectorType(ASTContext &Context, QualType Ty) {
+ const RecordType *RT = Ty->getAs<RecordType>();
+ if (!RT)
+ return false;
+ const RecordDecl *RD = RT->getDecl();
+
+ // If this is a C++ record, check the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
+ for (const auto &I : CXXRD->bases())
+ if (!isRecordWithSIMDVectorType(Context, I.getType()))
+ return false;
+
+ for (const auto *i : RD->fields()) {
+ QualType FT = i->getType();
+
+ if (isSIMDVectorType(Context, FT))
+ return true;
+
+ if (isRecordWithSIMDVectorType(Context, FT))
+ return true;
+ }
+
+ return false;
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/ABIInfoImpl.h b/contrib/llvm-project/clang/lib/CodeGen/ABIInfoImpl.h
new file mode 100644
index 000000000000..afde08ba100c
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/ABIInfoImpl.h
@@ -0,0 +1,158 @@
+//===- ABIInfoImpl.h --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_CODEGEN_ABIINFOIMPL_H
+#define LLVM_CLANG_LIB_CODEGEN_ABIINFOIMPL_H
+
+#include "ABIInfo.h"
+#include "CGCXXABI.h"
+
+namespace clang::CodeGen {
+
+/// DefaultABIInfo - The default implementation for ABI specific
+/// details. This implementation provides information which results in
+/// self-consistent and sensible LLVM IR generation, but does not
+/// conform to any particular ABI.
+class DefaultABIInfo : public ABIInfo {
+public:
+ DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
+
+ virtual ~DefaultABIInfo();
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy) const;
+
+ void computeInfo(CGFunctionInfo &FI) const override;
+
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+};
+
+// Helper for coercing an aggregate argument or return value into an integer
+// array of the same size (including padding) and alignment. This alternate
+// coercion happens only for the RenderScript ABI and can be removed after
+// runtimes that rely on it are no longer supported.
+//
+// RenderScript assumes that the size of the argument / return value in the IR
+// is the same as the size of the corresponding qualified type. This helper
+// coerces the aggregate type into an array of the same size (including
+// padding). This coercion is used in lieu of expansion of struct members or
+// other canonical coercions that return a coerced-type of larger size.
+//
+// Ty - The argument / return value type
+// Context - The associated ASTContext
+// LLVMContext - The associated LLVMContext
+ABIArgInfo coerceToIntArray(QualType Ty, ASTContext &Context,
+ llvm::LLVMContext &LLVMContext);
+
+void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, llvm::Value *Array,
+ llvm::Value *Value, unsigned FirstIndex,
+ unsigned LastIndex);
+
+bool isAggregateTypeForABI(QualType T);
+
+llvm::Type *getVAListElementType(CodeGenFunction &CGF);
+
+CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI);
+
+CGCXXABI::RecordArgABI getRecordArgABI(QualType T, CGCXXABI &CXXABI);
+
+bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI,
+ const ABIInfo &Info);
+
+/// Pass transparent unions as if they were the type of the first element. Sema
+/// should ensure that all elements of the union have the same "machine type".
+QualType useFirstFieldIfTransparentUnion(QualType Ty);
+
+// Dynamically round a pointer up to a multiple of the given alignment.
+llvm::Value *emitRoundPointerUpToAlignment(CodeGenFunction &CGF,
+ llvm::Value *Ptr, CharUnits Align);
+
+/// Emit va_arg for a platform using the common void* representation,
+/// where arguments are simply emitted in an array of slots on the stack.
+///
+/// This version implements the core direct-value passing rules.
+///
+/// \param SlotSize - The size and alignment of a stack slot.
+/// Each argument will be allocated to a multiple of this number of
+/// slots, and all the slots will be aligned to this value.
+/// \param AllowHigherAlign - The slot alignment is not a cap;
+/// an argument type with an alignment greater than the slot size
+/// will be emitted on a higher-alignment address, potentially
+/// leaving one or more empty slots behind as padding. If this
+/// is false, the returned address might be less-aligned than
+/// DirectAlign.
+/// \param ForceRightAdjust - Default is false. On big-endian platform and
+/// if the argument is smaller than a slot, set this flag will force
+/// right-adjust the argument in its slot irrespective of the type.
+Address emitVoidPtrDirectVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ llvm::Type *DirectTy, CharUnits DirectSize,
+ CharUnits DirectAlign, CharUnits SlotSize,
+ bool AllowHigherAlign,
+ bool ForceRightAdjust = false);
+
+/// Emit va_arg for a platform using the common void* representation,
+/// where arguments are simply emitted in an array of slots on the stack.
+///
+/// \param IsIndirect - Values of this type are passed indirectly.
+/// \param ValueInfo - The size and alignment of this type, generally
+/// computed with getContext().getTypeInfoInChars(ValueTy).
+/// \param SlotSizeAndAlign - The size and alignment of a stack slot.
+/// Each argument will be allocated to a multiple of this number of
+/// slots, and all the slots will be aligned to this value.
+/// \param AllowHigherAlign - The slot alignment is not a cap;
+/// an argument type with an alignment greater than the slot size
+/// will be emitted on a higher-alignment address, potentially
+/// leaving one or more empty slots behind as padding.
+/// \param ForceRightAdjust - Default is false. On big-endian platform and
+/// if the argument is smaller than a slot, set this flag will force
+/// right-adjust the argument in its slot irrespective of the type.
+Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType ValueTy, bool IsIndirect,
+ TypeInfoChars ValueInfo, CharUnits SlotSizeAndAlign,
+ bool AllowHigherAlign, bool ForceRightAdjust = false);
+
+Address emitMergePHI(CodeGenFunction &CGF, Address Addr1,
+ llvm::BasicBlock *Block1, Address Addr2,
+ llvm::BasicBlock *Block2, const llvm::Twine &Name = "");
+
+/// isEmptyField - Return true iff a the field is "empty", that is it
+/// is an unnamed bit-field or an (array of) empty record(s). If
+/// AsIfNoUniqueAddr is true, then C++ record fields are considered empty if
+/// the [[no_unique_address]] attribute would have made them empty.
+bool isEmptyField(ASTContext &Context, const FieldDecl *FD, bool AllowArrays,
+ bool AsIfNoUniqueAddr = false);
+
+/// isEmptyRecord - Return true iff a structure contains only empty
+/// fields. Note that a structure with a flexible array member is not
+/// considered empty. If AsIfNoUniqueAddr is true, then C++ record fields are
+/// considered empty if the [[no_unique_address]] attribute would have made
+/// them empty.
+bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays,
+ bool AsIfNoUniqueAddr = false);
+
+/// isSingleElementStruct - Determine if a structure is a "single
+/// element struct", i.e. it has exactly one non-empty field or
+/// exactly one field which is itself a single element
+/// struct. Structures with flexible array members are never
+/// considered single element structs.
+///
+/// \return The field declaration for the single non-empty field, if
+/// it exists.
+const Type *isSingleElementStruct(QualType T, ASTContext &Context);
+
+Address EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
+ const ABIArgInfo &AI);
+
+bool isSIMDVectorType(ASTContext &Context, QualType Ty);
+
+bool isRecordWithSIMDVectorType(ASTContext &Context, QualType Ty);
+
+} // namespace clang::CodeGen
+
+#endif // LLVM_CLANG_LIB_CODEGEN_ABIINFOIMPL_H
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Address.h b/contrib/llvm-project/clang/lib/CodeGen/Address.h
index 6a8e57f8db33..cf48df8f5e73 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/Address.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/Address.h
@@ -14,29 +14,43 @@
#ifndef LLVM_CLANG_LIB_CODEGEN_ADDRESS_H
#define LLVM_CLANG_LIB_CODEGEN_ADDRESS_H
-#include "llvm/IR/Constants.h"
#include "clang/AST/CharUnits.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/Support/MathExtras.h"
namespace clang {
namespace CodeGen {
+// Indicates whether a pointer is known not to be null.
+enum KnownNonNull_t { NotKnownNonNull, KnownNonNull };
+
/// An aligned address.
class Address {
- llvm::Value *Pointer;
+ llvm::PointerIntPair<llvm::Value *, 1, bool> PointerAndKnownNonNull;
+ llvm::Type *ElementType;
CharUnits Alignment;
+
+protected:
+ Address(std::nullptr_t) : ElementType(nullptr) {}
+
public:
- Address(llvm::Value *pointer, CharUnits alignment)
- : Pointer(pointer), Alignment(alignment) {
- assert((!alignment.isZero() || pointer == nullptr) &&
- "creating valid address with invalid alignment");
+ Address(llvm::Value *Pointer, llvm::Type *ElementType, CharUnits Alignment,
+ KnownNonNull_t IsKnownNonNull = NotKnownNonNull)
+ : PointerAndKnownNonNull(Pointer, IsKnownNonNull),
+ ElementType(ElementType), Alignment(Alignment) {
+ assert(Pointer != nullptr && "Pointer cannot be null");
+ assert(ElementType != nullptr && "Element type cannot be null");
}
- static Address invalid() { return Address(nullptr, CharUnits()); }
- bool isValid() const { return Pointer != nullptr; }
+ static Address invalid() { return Address(nullptr); }
+ bool isValid() const {
+ return PointerAndKnownNonNull.getPointer() != nullptr;
+ }
llvm::Value *getPointer() const {
assert(isValid());
- return Pointer;
+ return PointerAndKnownNonNull.getPointer();
}
/// Return the type of the pointer value.
@@ -45,11 +59,9 @@ public:
}
/// Return the type of the values stored in this address.
- ///
- /// When IR pointer types lose their element type, we should simply
- /// store it in Address instead for the convenience of writing code.
llvm::Type *getElementType() const {
- return getType()->getElementType();
+ assert(isValid());
+ return ElementType;
}
/// Return the address space that this address resides in.
@@ -67,30 +79,62 @@ public:
assert(isValid());
return Alignment;
}
+
+ /// Return address with different pointer, but same element type and
+ /// alignment.
+ Address withPointer(llvm::Value *NewPointer,
+ KnownNonNull_t IsKnownNonNull) const {
+ return Address(NewPointer, getElementType(), getAlignment(),
+ IsKnownNonNull);
+ }
+
+ /// Return address with different alignment, but same pointer and element
+ /// type.
+ Address withAlignment(CharUnits NewAlignment) const {
+ return Address(getPointer(), getElementType(), NewAlignment,
+ isKnownNonNull());
+ }
+
+ /// Return address with different element type, but same pointer and
+ /// alignment.
+ Address withElementType(llvm::Type *ElemTy) const {
+ return Address(getPointer(), ElemTy, getAlignment(), isKnownNonNull());
+ }
+
+ /// Whether the pointer is known not to be null.
+ KnownNonNull_t isKnownNonNull() const {
+ assert(isValid());
+ return (KnownNonNull_t)PointerAndKnownNonNull.getInt();
+ }
+
+ /// Set the non-null bit.
+ Address setKnownNonNull() {
+ assert(isValid());
+ PointerAndKnownNonNull.setInt(true);
+ return *this;
+ }
};
/// A specialization of Address that requires the address to be an
/// LLVM Constant.
class ConstantAddress : public Address {
+ ConstantAddress(std::nullptr_t) : Address(nullptr) {}
+
public:
- ConstantAddress(llvm::Constant *pointer, CharUnits alignment)
- : Address(pointer, alignment) {}
+ ConstantAddress(llvm::Constant *pointer, llvm::Type *elementType,
+ CharUnits alignment)
+ : Address(pointer, elementType, alignment) {}
static ConstantAddress invalid() {
- return ConstantAddress(nullptr, CharUnits());
+ return ConstantAddress(nullptr);
}
llvm::Constant *getPointer() const {
return llvm::cast<llvm::Constant>(Address::getPointer());
}
- ConstantAddress getBitCast(llvm::Type *ty) const {
- return ConstantAddress(llvm::ConstantExpr::getBitCast(getPointer(), ty),
- getAlignment());
- }
-
- ConstantAddress getElementBitCast(llvm::Type *ty) const {
- return getBitCast(ty->getPointerTo(getAddressSpace()));
+ ConstantAddress withElementType(llvm::Type *ElemTy) const {
+ return ConstantAddress(getPointer(), ElemTy, getAlignment());
}
static bool isaImpl(Address addr) {
@@ -98,7 +142,7 @@ public:
}
static ConstantAddress castImpl(Address addr) {
return ConstantAddress(llvm::cast<llvm::Constant>(addr.getPointer()),
- addr.getAlignment());
+ addr.getElementType(), addr.getAlignment());
}
};
diff --git a/contrib/llvm-project/clang/lib/CodeGen/BackendConsumer.h b/contrib/llvm-project/clang/lib/CodeGen/BackendConsumer.h
new file mode 100644
index 000000000000..72a814cd43d7
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/BackendConsumer.h
@@ -0,0 +1,166 @@
+//===--- BackendConsumer.h - LLVM BackendConsumer Header File -------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_CODEGEN_BACKENDCONSUMER_H
+#define LLVM_CLANG_LIB_CODEGEN_BACKENDCONSUMER_H
+
+#include "clang/CodeGen/BackendUtil.h"
+#include "clang/CodeGen/CodeGenAction.h"
+
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/Support/Timer.h"
+
+namespace llvm {
+ class DiagnosticInfoDontCall;
+}
+
+namespace clang {
+class ASTContext;
+class CodeGenAction;
+class CoverageSourceInfo;
+
+class BackendConsumer : public ASTConsumer {
+ using LinkModule = CodeGenAction::LinkModule;
+
+ virtual void anchor();
+ DiagnosticsEngine &Diags;
+ BackendAction Action;
+ const HeaderSearchOptions &HeaderSearchOpts;
+ const CodeGenOptions &CodeGenOpts;
+ const TargetOptions &TargetOpts;
+ const LangOptions &LangOpts;
+ std::unique_ptr<raw_pwrite_stream> AsmOutStream;
+ ASTContext *Context;
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS;
+
+ llvm::Timer LLVMIRGeneration;
+ unsigned LLVMIRGenerationRefCount;
+
+ /// True if we've finished generating IR. This prevents us from generating
+ /// additional LLVM IR after emitting output in HandleTranslationUnit. This
+ /// can happen when Clang plugins trigger additional AST deserialization.
+ bool IRGenFinished = false;
+
+ bool TimerIsEnabled = false;
+
+ std::unique_ptr<CodeGenerator> Gen;
+
+ SmallVector<LinkModule, 4> LinkModules;
+
+ // A map from mangled names to their function's source location, used for
+ // backend diagnostics as the Clang AST may be unavailable. We actually use
+ // the mangled name's hash as the key because mangled names can be very
+ // long and take up lots of space. Using a hash can cause name collision,
+ // but that is rare and the consequences are pointing to a wrong source
+ // location which is not severe. This is a vector instead of an actual map
+ // because we optimize for time building this map rather than time
+ // retrieving an entry, as backend diagnostics are uncommon.
+ std::vector<std::pair<llvm::hash_code, FullSourceLoc>>
+ ManglingFullSourceLocs;
+
+
+ // This is here so that the diagnostic printer knows the module a diagnostic
+ // refers to.
+ llvm::Module *CurLinkModule = nullptr;
+
+public:
+ BackendConsumer(BackendAction Action, DiagnosticsEngine &Diags,
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS,
+ const HeaderSearchOptions &HeaderSearchOpts,
+ const PreprocessorOptions &PPOpts,
+ const CodeGenOptions &CodeGenOpts,
+ const TargetOptions &TargetOpts,
+ const LangOptions &LangOpts, const std::string &InFile,
+ SmallVector<LinkModule, 4> LinkModules,
+ std::unique_ptr<raw_pwrite_stream> OS, llvm::LLVMContext &C,
+ CoverageSourceInfo *CoverageInfo = nullptr);
+
+ // This constructor is used in installing an empty BackendConsumer
+ // to use the clang diagnostic handler for IR input files. It avoids
+ // initializing the OS field.
+ BackendConsumer(BackendAction Action, DiagnosticsEngine &Diags,
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS,
+ const HeaderSearchOptions &HeaderSearchOpts,
+ const PreprocessorOptions &PPOpts,
+ const CodeGenOptions &CodeGenOpts,
+ const TargetOptions &TargetOpts,
+ const LangOptions &LangOpts, llvm::Module *Module,
+ SmallVector<LinkModule, 4> LinkModules, llvm::LLVMContext &C,
+ CoverageSourceInfo *CoverageInfo = nullptr);
+
+ llvm::Module *getModule() const;
+ std::unique_ptr<llvm::Module> takeModule();
+
+ CodeGenerator *getCodeGenerator();
+
+ void HandleCXXStaticMemberVarInstantiation(VarDecl *VD) override;
+ void Initialize(ASTContext &Ctx) override;
+ bool HandleTopLevelDecl(DeclGroupRef D) override;
+ void HandleInlineFunctionDefinition(FunctionDecl *D) override;
+ void HandleInterestingDecl(DeclGroupRef D) override;
+ void HandleTranslationUnit(ASTContext &C) override;
+ void HandleTagDeclDefinition(TagDecl *D) override;
+ void HandleTagDeclRequiredDefinition(const TagDecl *D) override;
+ void CompleteTentativeDefinition(VarDecl *D) override;
+ void CompleteExternalDeclaration(VarDecl *D) override;
+ void AssignInheritanceModel(CXXRecordDecl *RD) override;
+ void HandleVTable(CXXRecordDecl *RD) override;
+
+
+ // Links each entry in LinkModules into our module. Returns true on error.
+ bool LinkInModules(llvm::Module *M, bool ShouldLinkFiles = true);
+
+ /// Get the best possible source location to represent a diagnostic that
+ /// may have associated debug info.
+ const FullSourceLoc getBestLocationFromDebugLoc(
+ const llvm::DiagnosticInfoWithLocationBase &D,
+ bool &BadDebugInfo, StringRef &Filename,
+ unsigned &Line, unsigned &Column) const;
+
+ std::optional<FullSourceLoc> getFunctionSourceLocation(
+ const llvm::Function &F) const;
+
+ void DiagnosticHandlerImpl(const llvm::DiagnosticInfo &DI);
+ /// Specialized handler for InlineAsm diagnostic.
+ /// \return True if the diagnostic has been successfully reported, false
+ /// otherwise.
+ bool InlineAsmDiagHandler(const llvm::DiagnosticInfoInlineAsm &D);
+ /// Specialized handler for diagnostics reported using SMDiagnostic.
+ void SrcMgrDiagHandler(const llvm::DiagnosticInfoSrcMgr &D);
+ /// Specialized handler for StackSize diagnostic.
+ /// \return True if the diagnostic has been successfully reported, false
+ /// otherwise.
+ bool StackSizeDiagHandler(const llvm::DiagnosticInfoStackSize &D);
+ /// Specialized handler for ResourceLimit diagnostic.
+ /// \return True if the diagnostic has been successfully reported, false
+ /// otherwise.
+ bool ResourceLimitDiagHandler(const llvm::DiagnosticInfoResourceLimit &D);
+
+ /// Specialized handler for unsupported backend feature diagnostic.
+ void UnsupportedDiagHandler(const llvm::DiagnosticInfoUnsupported &D);
+ /// Specialized handlers for optimization remarks.
+ /// Note that these handlers only accept remarks and they always handle
+ /// them.
+ void EmitOptimizationMessage(const llvm::DiagnosticInfoOptimizationBase &D,
+ unsigned DiagID);
+ void
+ OptimizationRemarkHandler(const llvm::DiagnosticInfoOptimizationBase &D);
+ void OptimizationRemarkHandler(
+ const llvm::OptimizationRemarkAnalysisFPCommute &D);
+ void OptimizationRemarkHandler(
+ const llvm::OptimizationRemarkAnalysisAliasing &D);
+ void OptimizationFailureHandler(
+ const llvm::DiagnosticInfoOptimizationFailure &D);
+ void DontCallDiagHandler(const llvm::DiagnosticInfoDontCall &D);
+ /// Specialized handler for misexpect warnings.
+ /// Note that misexpect remarks are emitted through ORE
+ void MisExpectDiagHandler(const llvm::DiagnosticInfoMisExpect &D);
+};
+
+} // namespace clang
+#endif
diff --git a/contrib/llvm-project/clang/lib/CodeGen/BackendUtil.cpp b/contrib/llvm-project/clang/lib/CodeGen/BackendUtil.cpp
index 481f5347d978..4f22d35f9d3a 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/BackendUtil.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/BackendUtil.cpp
@@ -7,6 +7,8 @@
//===----------------------------------------------------------------------===//
#include "clang/CodeGen/BackendUtil.h"
+#include "BackendConsumer.h"
+#include "LinkInModulesPass.h"
#include "clang/Basic/CodeGenOptions.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/LangOptions.h"
@@ -17,9 +19,8 @@
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/Analysis/StackSafetyAnalysis.h"
+#include "llvm/Analysis/GlobalsModRef.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Bitcode/BitcodeReader.h"
@@ -28,39 +29,39 @@
#include "llvm/CodeGen/RegAllocRegistry.h"
#include "llvm/CodeGen/SchedulerRegistry.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
+#include "llvm/Frontend/Driver/CodeGenOptions.h"
#include "llvm/IR/DataLayout.h"
-#include "llvm/IR/IRPrintingPasses.h"
+#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/LegacyPassManager.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/ModuleSummaryIndex.h"
#include "llvm/IR/PassManager.h"
#include "llvm/IR/Verifier.h"
+#include "llvm/IRPrinter/IRPrintingPasses.h"
#include "llvm/LTO/LTOBackend.h"
#include "llvm/MC/MCAsmInfo.h"
-#include "llvm/MC/SubtargetFeature.h"
+#include "llvm/MC/TargetRegistry.h"
+#include "llvm/Object/OffloadBinary.h"
#include "llvm/Passes/PassBuilder.h"
#include "llvm/Passes/PassPlugin.h"
#include "llvm/Passes/StandardInstrumentations.h"
+#include "llvm/ProfileData/InstrProfCorrelator.h"
#include "llvm/Support/BuryPointer.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/PrettyStackTrace.h"
-#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/TimeProfiler.h"
#include "llvm/Support/Timer.h"
#include "llvm/Support/ToolOutputFile.h"
+#include "llvm/Support/VirtualFileSystem.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
-#include "llvm/Transforms/Coroutines.h"
-#include "llvm/Transforms/Coroutines/CoroCleanup.h"
-#include "llvm/Transforms/Coroutines/CoroEarly.h"
-#include "llvm/Transforms/Coroutines/CoroElide.h"
-#include "llvm/Transforms/Coroutines/CoroSplit.h"
-#include "llvm/Transforms/IPO.h"
-#include "llvm/Transforms/IPO/AlwaysInliner.h"
+#include "llvm/TargetParser/SubtargetFeature.h"
+#include "llvm/TargetParser/Triple.h"
+#include "llvm/Transforms/HipStdPar/HipStdPar.h"
+#include "llvm/Transforms/IPO/EmbedBitcodePass.h"
#include "llvm/Transforms/IPO/LowerTypeTests.h"
-#include "llvm/Transforms/IPO/PassManagerBuilder.h"
#include "llvm/Transforms/IPO/ThinLTOBitcodeWriter.h"
#include "llvm/Transforms/InstCombine/InstCombine.h"
#include "llvm/Transforms/Instrumentation.h"
@@ -71,22 +72,22 @@
#include "llvm/Transforms/Instrumentation/GCOVProfiler.h"
#include "llvm/Transforms/Instrumentation/HWAddressSanitizer.h"
#include "llvm/Transforms/Instrumentation/InstrProfiling.h"
+#include "llvm/Transforms/Instrumentation/KCFI.h"
#include "llvm/Transforms/Instrumentation/MemProfiler.h"
#include "llvm/Transforms/Instrumentation/MemorySanitizer.h"
+#include "llvm/Transforms/Instrumentation/PGOInstrumentation.h"
+#include "llvm/Transforms/Instrumentation/SanitizerBinaryMetadata.h"
#include "llvm/Transforms/Instrumentation/SanitizerCoverage.h"
#include "llvm/Transforms/Instrumentation/ThreadSanitizer.h"
#include "llvm/Transforms/ObjCARC.h"
-#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Scalar/EarlyCSE.h"
#include "llvm/Transforms/Scalar/GVN.h"
-#include "llvm/Transforms/Scalar/LowerMatrixIntrinsics.h"
-#include "llvm/Transforms/Utils.h"
-#include "llvm/Transforms/Utils/CanonicalizeAliases.h"
+#include "llvm/Transforms/Scalar/JumpThreading.h"
#include "llvm/Transforms/Utils/Debugify.h"
#include "llvm/Transforms/Utils/EntryExitInstrumenter.h"
-#include "llvm/Transforms/Utils/NameAnonGlobals.h"
-#include "llvm/Transforms/Utils/SymbolRewriter.h"
+#include "llvm/Transforms/Utils/ModuleUtils.h"
#include <memory>
+#include <optional>
using namespace clang;
using namespace llvm;
@@ -94,10 +95,30 @@ using namespace llvm;
llvm::PassPluginLibraryInfo get##Ext##PluginInfo();
#include "llvm/Support/Extension.def"
+namespace llvm {
+extern cl::opt<bool> PrintPipelinePasses;
+
+// Experiment to move sanitizers earlier.
+static cl::opt<bool> ClSanitizeOnOptimizerEarlyEP(
+ "sanitizer-early-opt-ep", cl::Optional,
+ cl::desc("Insert sanitizers on OptimizerEarlyEP."), cl::init(false));
+
+extern cl::opt<InstrProfCorrelator::ProfCorrelatorKind> ProfileCorrelate;
+
+// Re-link builtin bitcodes after optimization
+cl::opt<bool> ClRelinkBuiltinBitcodePostop(
+ "relink-builtin-bitcode-postop", cl::Optional,
+ cl::desc("Re-link builtin bitcodes after optimization."), cl::init(false));
+} // namespace llvm
+
namespace {
// Default filename used for profile generation.
-static constexpr StringLiteral DefaultProfileGenName = "default_%m.profraw";
+std::string getDefaultProfileGenName() {
+ return DebugInfoCorrelate || ProfileCorrelate != InstrProfCorrelator::NONE
+ ? "default_%m.proflite"
+ : "default_%m.profraw";
+}
class EmitAssemblyHelper {
DiagnosticsEngine &Diags;
@@ -105,12 +126,15 @@ class EmitAssemblyHelper {
const CodeGenOptions &CodeGenOpts;
const clang::TargetOptions &TargetOpts;
const LangOptions &LangOpts;
- Module *TheModule;
+ llvm::Module *TheModule;
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS;
Timer CodeGenerationTime;
std::unique_ptr<raw_pwrite_stream> OS;
+ Triple TargetTriple;
+
TargetIRAnalysis getTargetIRAnalysis() const {
if (TM)
return TM->getTargetIRAnalysis();
@@ -118,8 +142,6 @@ class EmitAssemblyHelper {
return TargetIRAnalysis();
}
- void CreatePasses(legacy::PassManager &MPM, legacy::FunctionPassManager &FPM);
-
/// Generates the TargetMachine.
/// Leaves TM unchanged if it is unable to create the target machine.
/// Some of our clang tests specify triples which are not built
@@ -147,15 +169,42 @@ class EmitAssemblyHelper {
return F;
}
+ void RunOptimizationPipeline(
+ BackendAction Action, std::unique_ptr<raw_pwrite_stream> &OS,
+ std::unique_ptr<llvm::ToolOutputFile> &ThinLinkOS, BackendConsumer *BC);
+ void RunCodegenPipeline(BackendAction Action,
+ std::unique_ptr<raw_pwrite_stream> &OS,
+ std::unique_ptr<llvm::ToolOutputFile> &DwoOS);
+
+ /// Check whether we should emit a module summary for regular LTO.
+ /// The module summary should be emitted by default for regular LTO
+ /// except for ld64 targets.
+ ///
+ /// \return True if the module summary should be emitted.
+ bool shouldEmitRegularLTOSummary() const {
+ return CodeGenOpts.PrepareForLTO && !CodeGenOpts.DisableLLVMPasses &&
+ TargetTriple.getVendor() != llvm::Triple::Apple;
+ }
+
+ /// Check whether we should emit a flag for UnifiedLTO.
+ /// The UnifiedLTO module flag should be set when UnifiedLTO is enabled for
+ /// ThinLTO or Full LTO with module summaries.
+ bool shouldEmitUnifiedLTOModueFlag() const {
+ return CodeGenOpts.UnifiedLTO &&
+ (CodeGenOpts.PrepareForThinLTO || shouldEmitRegularLTOSummary());
+ }
+
public:
EmitAssemblyHelper(DiagnosticsEngine &_Diags,
const HeaderSearchOptions &HeaderSearchOpts,
const CodeGenOptions &CGOpts,
const clang::TargetOptions &TOpts,
- const LangOptions &LOpts, Module *M)
+ const LangOptions &LOpts, llvm::Module *M,
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS)
: Diags(_Diags), HSOpts(HeaderSearchOpts), CodeGenOpts(CGOpts),
- TargetOpts(TOpts), LangOpts(LOpts), TheModule(M),
- CodeGenerationTime("codegen", "Code Generation Time") {}
+ TargetOpts(TOpts), LangOpts(LOpts), TheModule(M), VFS(std::move(VFS)),
+ CodeGenerationTime("codegen", "Code Generation Time"),
+ TargetTriple(TheModule->getTargetTriple()) {}
~EmitAssemblyHelper() {
if (CodeGenOpts.DisableFree)
@@ -164,57 +213,11 @@ public:
std::unique_ptr<TargetMachine> TM;
- void EmitAssembly(BackendAction Action,
- std::unique_ptr<raw_pwrite_stream> OS);
-
- void EmitAssemblyWithNewPassManager(BackendAction Action,
- std::unique_ptr<raw_pwrite_stream> OS);
+ // Emit output using the new pass manager for the optimization pipeline.
+ void EmitAssembly(BackendAction Action, std::unique_ptr<raw_pwrite_stream> OS,
+ BackendConsumer *BC);
};
-
-// We need this wrapper to access LangOpts and CGOpts from extension functions
-// that we add to the PassManagerBuilder.
-class PassManagerBuilderWrapper : public PassManagerBuilder {
-public:
- PassManagerBuilderWrapper(const Triple &TargetTriple,
- const CodeGenOptions &CGOpts,
- const LangOptions &LangOpts)
- : PassManagerBuilder(), TargetTriple(TargetTriple), CGOpts(CGOpts),
- LangOpts(LangOpts) {}
- const Triple &getTargetTriple() const { return TargetTriple; }
- const CodeGenOptions &getCGOpts() const { return CGOpts; }
- const LangOptions &getLangOpts() const { return LangOpts; }
-
-private:
- const Triple &TargetTriple;
- const CodeGenOptions &CGOpts;
- const LangOptions &LangOpts;
-};
-}
-
-static void addObjCARCAPElimPass(const PassManagerBuilder &Builder, PassManagerBase &PM) {
- if (Builder.OptLevel > 0)
- PM.add(createObjCARCAPElimPass());
-}
-
-static void addObjCARCExpandPass(const PassManagerBuilder &Builder, PassManagerBase &PM) {
- if (Builder.OptLevel > 0)
- PM.add(createObjCARCExpandPass());
-}
-
-static void addObjCARCOptPass(const PassManagerBuilder &Builder, PassManagerBase &PM) {
- if (Builder.OptLevel > 0)
- PM.add(createObjCARCOptPass());
-}
-
-static void addAddDiscriminatorsPass(const PassManagerBuilder &Builder,
- legacy::PassManagerBase &PM) {
- PM.add(createAddDiscriminatorsPass());
-}
-
-static void addBoundsCheckingPass(const PassManagerBuilder &Builder,
- legacy::PassManagerBase &PM) {
- PM.add(createBoundsCheckingLegacyPass());
-}
+} // namespace
static SanitizerCoverageOptions
getSancovOptsFromCGOpts(const CodeGenOptions &CGOpts) {
@@ -234,18 +237,19 @@ getSancovOptsFromCGOpts(const CodeGenOptions &CGOpts) {
Opts.InlineBoolFlag = CGOpts.SanitizeCoverageInlineBoolFlag;
Opts.PCTable = CGOpts.SanitizeCoveragePCTable;
Opts.StackDepth = CGOpts.SanitizeCoverageStackDepth;
+ Opts.TraceLoads = CGOpts.SanitizeCoverageTraceLoads;
+ Opts.TraceStores = CGOpts.SanitizeCoverageTraceStores;
+ Opts.CollectControlFlow = CGOpts.SanitizeCoverageControlFlow;
return Opts;
}
-static void addSanitizerCoveragePass(const PassManagerBuilder &Builder,
- legacy::PassManagerBase &PM) {
- const PassManagerBuilderWrapper &BuilderWrapper =
- static_cast<const PassManagerBuilderWrapper &>(Builder);
- const CodeGenOptions &CGOpts = BuilderWrapper.getCGOpts();
- auto Opts = getSancovOptsFromCGOpts(CGOpts);
- PM.add(createModuleSanitizerCoverageLegacyPassPass(
- Opts, CGOpts.SanitizeCoverageAllowlistFiles,
- CGOpts.SanitizeCoverageIgnorelistFiles));
+static SanitizerBinaryMetadataOptions
+getSanitizerBinaryMetadataOptions(const CodeGenOptions &CGOpts) {
+ SanitizerBinaryMetadataOptions Opts;
+ Opts.Covered = CGOpts.SanitizeBinaryMetadataCovered;
+ Opts.Atomics = CGOpts.SanitizeBinaryMetadataAtomics;
+ Opts.UAR = CGOpts.SanitizeBinaryMetadataUAR;
+ return Opts;
}
// Check if ASan should use GC-friendly instrumentation for globals.
@@ -260,194 +264,21 @@ static bool asanUseGlobalsGC(const Triple &T, const CodeGenOptions &CGOpts) {
case Triple::COFF:
return true;
case Triple::ELF:
- return CGOpts.DataSections && !CGOpts.DisableIntegratedAS;
+ return !CGOpts.DisableIntegratedAS;
case Triple::GOFF:
llvm::report_fatal_error("ASan not implemented for GOFF");
case Triple::XCOFF:
llvm::report_fatal_error("ASan not implemented for XCOFF.");
case Triple::Wasm:
+ case Triple::DXContainer:
+ case Triple::SPIRV:
case Triple::UnknownObjectFormat:
break;
}
return false;
}
-static void addMemProfilerPasses(const PassManagerBuilder &Builder,
- legacy::PassManagerBase &PM) {
- PM.add(createMemProfilerFunctionPass());
- PM.add(createModuleMemProfilerLegacyPassPass());
-}
-
-static void addAddressSanitizerPasses(const PassManagerBuilder &Builder,
- legacy::PassManagerBase &PM) {
- const PassManagerBuilderWrapper &BuilderWrapper =
- static_cast<const PassManagerBuilderWrapper&>(Builder);
- const Triple &T = BuilderWrapper.getTargetTriple();
- const CodeGenOptions &CGOpts = BuilderWrapper.getCGOpts();
- bool Recover = CGOpts.SanitizeRecover.has(SanitizerKind::Address);
- bool UseAfterScope = CGOpts.SanitizeAddressUseAfterScope;
- bool UseOdrIndicator = CGOpts.SanitizeAddressUseOdrIndicator;
- bool UseGlobalsGC = asanUseGlobalsGC(T, CGOpts);
- llvm::AsanDtorKind DestructorKind = CGOpts.getSanitizeAddressDtor();
- llvm::AsanDetectStackUseAfterReturnMode UseAfterReturn =
- CGOpts.getSanitizeAddressUseAfterReturn();
- PM.add(createAddressSanitizerFunctionPass(/*CompileKernel*/ false, Recover,
- UseAfterScope, UseAfterReturn));
- PM.add(createModuleAddressSanitizerLegacyPassPass(
- /*CompileKernel*/ false, Recover, UseGlobalsGC, UseOdrIndicator,
- DestructorKind));
-}
-
-static void addKernelAddressSanitizerPasses(const PassManagerBuilder &Builder,
- legacy::PassManagerBase &PM) {
- PM.add(createAddressSanitizerFunctionPass(
- /*CompileKernel*/ true, /*Recover*/ true, /*UseAfterScope*/ false,
- /*UseAfterReturn*/ llvm::AsanDetectStackUseAfterReturnMode::Never));
- PM.add(createModuleAddressSanitizerLegacyPassPass(
- /*CompileKernel*/ true, /*Recover*/ true, /*UseGlobalsGC*/ true,
- /*UseOdrIndicator*/ false));
-}
-
-static void addHWAddressSanitizerPasses(const PassManagerBuilder &Builder,
- legacy::PassManagerBase &PM) {
- const PassManagerBuilderWrapper &BuilderWrapper =
- static_cast<const PassManagerBuilderWrapper &>(Builder);
- const CodeGenOptions &CGOpts = BuilderWrapper.getCGOpts();
- bool Recover = CGOpts.SanitizeRecover.has(SanitizerKind::HWAddress);
- PM.add(createHWAddressSanitizerLegacyPassPass(
- /*CompileKernel*/ false, Recover,
- /*DisableOptimization*/ CGOpts.OptimizationLevel == 0));
-}
-
-static void addKernelHWAddressSanitizerPasses(const PassManagerBuilder &Builder,
- legacy::PassManagerBase &PM) {
- const PassManagerBuilderWrapper &BuilderWrapper =
- static_cast<const PassManagerBuilderWrapper &>(Builder);
- const CodeGenOptions &CGOpts = BuilderWrapper.getCGOpts();
- PM.add(createHWAddressSanitizerLegacyPassPass(
- /*CompileKernel*/ true, /*Recover*/ true,
- /*DisableOptimization*/ CGOpts.OptimizationLevel == 0));
-}
-
-static void addGeneralOptsForMemorySanitizer(const PassManagerBuilder &Builder,
- legacy::PassManagerBase &PM,
- bool CompileKernel) {
- const PassManagerBuilderWrapper &BuilderWrapper =
- static_cast<const PassManagerBuilderWrapper&>(Builder);
- const CodeGenOptions &CGOpts = BuilderWrapper.getCGOpts();
- int TrackOrigins = CGOpts.SanitizeMemoryTrackOrigins;
- bool Recover = CGOpts.SanitizeRecover.has(SanitizerKind::Memory);
- PM.add(createMemorySanitizerLegacyPassPass(
- MemorySanitizerOptions{TrackOrigins, Recover, CompileKernel}));
-
- // MemorySanitizer inserts complex instrumentation that mostly follows
- // the logic of the original code, but operates on "shadow" values.
- // It can benefit from re-running some general purpose optimization passes.
- if (Builder.OptLevel > 0) {
- PM.add(createEarlyCSEPass());
- PM.add(createReassociatePass());
- PM.add(createLICMPass());
- PM.add(createGVNPass());
- PM.add(createInstructionCombiningPass());
- PM.add(createDeadStoreEliminationPass());
- }
-}
-
-static void addMemorySanitizerPass(const PassManagerBuilder &Builder,
- legacy::PassManagerBase &PM) {
- addGeneralOptsForMemorySanitizer(Builder, PM, /*CompileKernel*/ false);
-}
-
-static void addKernelMemorySanitizerPass(const PassManagerBuilder &Builder,
- legacy::PassManagerBase &PM) {
- addGeneralOptsForMemorySanitizer(Builder, PM, /*CompileKernel*/ true);
-}
-
-static void addThreadSanitizerPass(const PassManagerBuilder &Builder,
- legacy::PassManagerBase &PM) {
- PM.add(createThreadSanitizerLegacyPassPass());
-}
-
-static void addDataFlowSanitizerPass(const PassManagerBuilder &Builder,
- legacy::PassManagerBase &PM) {
- const PassManagerBuilderWrapper &BuilderWrapper =
- static_cast<const PassManagerBuilderWrapper&>(Builder);
- const LangOptions &LangOpts = BuilderWrapper.getLangOpts();
- PM.add(createDataFlowSanitizerLegacyPassPass(LangOpts.NoSanitizeFiles));
-}
-
-static void addEntryExitInstrumentationPass(const PassManagerBuilder &Builder,
- legacy::PassManagerBase &PM) {
- PM.add(createEntryExitInstrumenterPass());
-}
-
-static void
-addPostInlineEntryExitInstrumentationPass(const PassManagerBuilder &Builder,
- legacy::PassManagerBase &PM) {
- PM.add(createPostInlineEntryExitInstrumenterPass());
-}
-
-static TargetLibraryInfoImpl *createTLII(llvm::Triple &TargetTriple,
- const CodeGenOptions &CodeGenOpts) {
- TargetLibraryInfoImpl *TLII = new TargetLibraryInfoImpl(TargetTriple);
-
- switch (CodeGenOpts.getVecLib()) {
- case CodeGenOptions::Accelerate:
- TLII->addVectorizableFunctionsFromVecLib(TargetLibraryInfoImpl::Accelerate);
- break;
- case CodeGenOptions::LIBMVEC:
- switch(TargetTriple.getArch()) {
- default:
- break;
- case llvm::Triple::x86_64:
- TLII->addVectorizableFunctionsFromVecLib
- (TargetLibraryInfoImpl::LIBMVEC_X86);
- break;
- }
- break;
- case CodeGenOptions::MASSV:
- TLII->addVectorizableFunctionsFromVecLib(TargetLibraryInfoImpl::MASSV);
- break;
- case CodeGenOptions::SVML:
- TLII->addVectorizableFunctionsFromVecLib(TargetLibraryInfoImpl::SVML);
- break;
- case CodeGenOptions::Darwin_libsystem_m:
- TLII->addVectorizableFunctionsFromVecLib(
- TargetLibraryInfoImpl::DarwinLibSystemM);
- break;
- default:
- break;
- }
- return TLII;
-}
-
-static void addSymbolRewriterPass(const CodeGenOptions &Opts,
- legacy::PassManager *MPM) {
- llvm::SymbolRewriter::RewriteDescriptorList DL;
-
- llvm::SymbolRewriter::RewriteMapParser MapParser;
- for (const auto &MapFile : Opts.RewriteMapFiles)
- MapParser.parse(MapFile, &DL);
-
- MPM->add(createRewriteSymbolsPass(DL));
-}
-
-static CodeGenOpt::Level getCGOptLevel(const CodeGenOptions &CodeGenOpts) {
- switch (CodeGenOpts.OptimizationLevel) {
- default:
- llvm_unreachable("Invalid optimization level!");
- case 0:
- return CodeGenOpt::None;
- case 1:
- return CodeGenOpt::Less;
- case 2:
- return CodeGenOpt::Default; // O2/Os/Oz
- case 3:
- return CodeGenOpt::Aggressive;
- }
-}
-
-static Optional<llvm::CodeModel::Model>
+static std::optional<llvm::CodeModel::Model>
getCodeModel(const CodeGenOptions &CodeGenOpts) {
unsigned CodeModel = llvm::StringSwitch<unsigned>(CodeGenOpts.CodeModel)
.Case("tiny", llvm::CodeModel::Tiny)
@@ -459,21 +290,26 @@ getCodeModel(const CodeGenOptions &CodeGenOpts) {
.Default(~0u);
assert(CodeModel != ~0u && "invalid code model!");
if (CodeModel == ~1u)
- return None;
+ return std::nullopt;
return static_cast<llvm::CodeModel::Model>(CodeModel);
}
static CodeGenFileType getCodeGenFileType(BackendAction Action) {
if (Action == Backend_EmitObj)
- return CGFT_ObjectFile;
+ return CodeGenFileType::ObjectFile;
else if (Action == Backend_EmitMCNull)
- return CGFT_Null;
+ return CodeGenFileType::Null;
else {
assert(Action == Backend_EmitAssembly && "Invalid action!");
- return CGFT_AssemblyFile;
+ return CodeGenFileType::AssemblyFile;
}
}
+static bool actionRequiresCodeGen(BackendAction Action) {
+ return Action != Backend_EmitNothing && Action != Backend_EmitBC &&
+ Action != Backend_EmitLL;
+}
+
static bool initTargetOptions(DiagnosticsEngine &Diags,
llvm::TargetOptions &Options,
const CodeGenOptions &CodeGenOpts,
@@ -538,7 +374,13 @@ static bool initTargetOptions(DiagnosticsEngine &Diags,
Options.NoInfsFPMath = LangOpts.NoHonorInfs;
Options.NoNaNsFPMath = LangOpts.NoHonorNaNs;
Options.NoZerosInBSS = CodeGenOpts.NoZeroInitializedInBSS;
- Options.UnsafeFPMath = LangOpts.UnsafeFPMath;
+ Options.UnsafeFPMath = LangOpts.AllowFPReassoc && LangOpts.AllowRecip &&
+ LangOpts.NoSignedZero && LangOpts.ApproxFunc &&
+ (LangOpts.getDefaultFPContractMode() ==
+ LangOptions::FPModeKind::FPM_Fast ||
+ LangOpts.getDefaultFPContractMode() ==
+ LangOptions::FPModeKind::FPM_FastHonorPragmas);
+ Options.ApproxFuncFPMath = LangOpts.ApproxFunc;
Options.BBSections =
llvm::StringSwitch<llvm::BasicBlockSection>(CodeGenOpts.BBSections)
@@ -567,24 +409,48 @@ static bool initTargetOptions(DiagnosticsEngine &Diags,
Options.UniqueBasicBlockSectionNames =
CodeGenOpts.UniqueBasicBlockSectionNames;
Options.TLSSize = CodeGenOpts.TLSSize;
+ Options.EnableTLSDESC = CodeGenOpts.EnableTLSDESC;
Options.EmulatedTLS = CodeGenOpts.EmulatedTLS;
- Options.ExplicitEmulatedTLS = CodeGenOpts.ExplicitEmulatedTLS;
Options.DebuggerTuning = CodeGenOpts.getDebuggerTuning();
Options.EmitStackSizeSection = CodeGenOpts.StackSizeSection;
Options.StackUsageOutput = CodeGenOpts.StackUsageOutput;
Options.EmitAddrsig = CodeGenOpts.Addrsig;
Options.ForceDwarfFrameSection = CodeGenOpts.ForceDwarfFrameSection;
Options.EmitCallSiteInfo = CodeGenOpts.EmitCallSiteInfo;
- Options.EnableAIXExtendedAltivecABI = CodeGenOpts.EnableAIXExtendedAltivecABI;
- Options.PseudoProbeForProfiling = CodeGenOpts.PseudoProbeForProfiling;
- Options.ValueTrackingVariableLocations =
- CodeGenOpts.ValueTrackingVariableLocations;
- Options.XRayOmitFunctionIndex = CodeGenOpts.XRayOmitFunctionIndex;
+ Options.EnableAIXExtendedAltivecABI = LangOpts.EnableAIXExtendedAltivecABI;
+ Options.XRayFunctionIndex = CodeGenOpts.XRayFunctionIndex;
+ Options.LoopAlignment = CodeGenOpts.LoopAlignment;
+ Options.DebugStrictDwarf = CodeGenOpts.DebugStrictDwarf;
+ Options.ObjectFilenameForDebug = CodeGenOpts.ObjectFilenameForDebug;
+ Options.Hotpatch = CodeGenOpts.HotPatch;
+ Options.JMCInstrument = CodeGenOpts.JMCInstrument;
+ Options.XCOFFReadOnlyPointers = CodeGenOpts.XCOFFReadOnlyPointers;
+
+ switch (CodeGenOpts.getSwiftAsyncFramePointer()) {
+ case CodeGenOptions::SwiftAsyncFramePointerKind::Auto:
+ Options.SwiftAsyncFramePointer =
+ SwiftAsyncFramePointerMode::DeploymentBased;
+ break;
+
+ case CodeGenOptions::SwiftAsyncFramePointerKind::Always:
+ Options.SwiftAsyncFramePointer = SwiftAsyncFramePointerMode::Always;
+ break;
+
+ case CodeGenOptions::SwiftAsyncFramePointerKind::Never:
+ Options.SwiftAsyncFramePointer = SwiftAsyncFramePointerMode::Never;
+ break;
+ }
Options.MCOptions.SplitDwarfFile = CodeGenOpts.SplitDwarfFile;
+ Options.MCOptions.EmitDwarfUnwind = CodeGenOpts.getEmitDwarfUnwind();
+ Options.MCOptions.EmitCompactUnwindNonCanonical =
+ CodeGenOpts.EmitCompactUnwindNonCanonical;
Options.MCOptions.MCRelaxAll = CodeGenOpts.RelaxAll;
Options.MCOptions.MCSaveTempLabels = CodeGenOpts.SaveTempLabels;
- Options.MCOptions.MCUseDwarfDirectory = !CodeGenOpts.NoDwarfDirectoryAsm;
+ Options.MCOptions.MCUseDwarfDirectory =
+ CodeGenOpts.NoDwarfDirectoryAsm
+ ? llvm::MCTargetOptions::DisableDwarfDirectory
+ : llvm::MCTargetOptions::EnableDwarfDirectory;
Options.MCOptions.MCNoExecStack = CodeGenOpts.NoExecStack;
Options.MCOptions.MCIncrementalLinkerCompatible =
CodeGenOpts.IncrementalLinkerCompatible;
@@ -603,20 +469,24 @@ static bool initTargetOptions(DiagnosticsEngine &Diags,
Entry.IgnoreSysRoot ? Entry.Path : HSOpts.Sysroot + Entry.Path);
Options.MCOptions.Argv0 = CodeGenOpts.Argv0;
Options.MCOptions.CommandLineArgs = CodeGenOpts.CommandLineArgs;
- Options.DebugStrictDwarf = CodeGenOpts.DebugStrictDwarf;
+ Options.MCOptions.AsSecureLogFile = CodeGenOpts.AsSecureLogFile;
+ Options.MCOptions.PPCUseFullRegisterNames =
+ CodeGenOpts.PPCUseFullRegisterNames;
+ Options.MisExpect = CodeGenOpts.MisExpect;
return true;
}
-static Optional<GCOVOptions> getGCOVOptions(const CodeGenOptions &CodeGenOpts,
- const LangOptions &LangOpts) {
- if (!CodeGenOpts.EmitGcovArcs && !CodeGenOpts.EmitGcovNotes)
- return None;
+static std::optional<GCOVOptions>
+getGCOVOptions(const CodeGenOptions &CodeGenOpts, const LangOptions &LangOpts) {
+ if (CodeGenOpts.CoverageNotesFile.empty() &&
+ CodeGenOpts.CoverageDataFile.empty())
+ return std::nullopt;
// Not using 'GCOVOptions::getDefault' allows us to avoid exiting if
// LLVM's -default-gcov-version flag is set to something invalid.
GCOVOptions Options;
- Options.EmitNotes = CodeGenOpts.EmitGcovNotes;
- Options.EmitData = CodeGenOpts.EmitGcovArcs;
+ Options.EmitNotes = !CodeGenOpts.CoverageNotesFile.empty();
+ Options.EmitData = !CodeGenOpts.CoverageDataFile.empty();
llvm::copy(CodeGenOpts.CoverageVersion, std::begin(Options.Version));
Options.NoRedZone = CodeGenOpts.DisableRedZone;
Options.Filter = CodeGenOpts.ProfileFilterFiles;
@@ -625,11 +495,11 @@ static Optional<GCOVOptions> getGCOVOptions(const CodeGenOptions &CodeGenOpts,
return Options;
}
-static Optional<InstrProfOptions>
+static std::optional<InstrProfOptions>
getInstrProfOptions(const CodeGenOptions &CodeGenOpts,
const LangOptions &LangOpts) {
if (!CodeGenOpts.hasProfileClangInstr())
- return None;
+ return std::nullopt;
InstrProfOptions Options;
Options.NoRedZone = CodeGenOpts.DisableRedZone;
Options.InstrProfileOutput = CodeGenOpts.InstrProfileOutput;
@@ -637,233 +507,6 @@ getInstrProfOptions(const CodeGenOptions &CodeGenOpts,
return Options;
}
-void EmitAssemblyHelper::CreatePasses(legacy::PassManager &MPM,
- legacy::FunctionPassManager &FPM) {
- // Handle disabling of all LLVM passes, where we want to preserve the
- // internal module before any optimization.
- if (CodeGenOpts.DisableLLVMPasses)
- return;
-
- // Figure out TargetLibraryInfo. This needs to be added to MPM and FPM
- // manually (and not via PMBuilder), since some passes (eg. InstrProfiling)
- // are inserted before PMBuilder ones - they'd get the default-constructed
- // TLI with an unknown target otherwise.
- Triple TargetTriple(TheModule->getTargetTriple());
- std::unique_ptr<TargetLibraryInfoImpl> TLII(
- createTLII(TargetTriple, CodeGenOpts));
-
- // If we reached here with a non-empty index file name, then the index file
- // was empty and we are not performing ThinLTO backend compilation (used in
- // testing in a distributed build environment). Drop any the type test
- // assume sequences inserted for whole program vtables so that codegen doesn't
- // complain.
- if (!CodeGenOpts.ThinLTOIndexFile.empty())
- MPM.add(createLowerTypeTestsPass(/*ExportSummary=*/nullptr,
- /*ImportSummary=*/nullptr,
- /*DropTypeTests=*/true));
-
- PassManagerBuilderWrapper PMBuilder(TargetTriple, CodeGenOpts, LangOpts);
-
- // At O0 and O1 we only run the always inliner which is more efficient. At
- // higher optimization levels we run the normal inliner.
- if (CodeGenOpts.OptimizationLevel <= 1) {
- bool InsertLifetimeIntrinsics = ((CodeGenOpts.OptimizationLevel != 0 &&
- !CodeGenOpts.DisableLifetimeMarkers) ||
- LangOpts.Coroutines);
- PMBuilder.Inliner = createAlwaysInlinerLegacyPass(InsertLifetimeIntrinsics);
- } else {
- // We do not want to inline hot callsites for SamplePGO module-summary build
- // because profile annotation will happen again in ThinLTO backend, and we
- // want the IR of the hot path to match the profile.
- PMBuilder.Inliner = createFunctionInliningPass(
- CodeGenOpts.OptimizationLevel, CodeGenOpts.OptimizeSize,
- (!CodeGenOpts.SampleProfileFile.empty() &&
- CodeGenOpts.PrepareForThinLTO));
- }
-
- PMBuilder.OptLevel = CodeGenOpts.OptimizationLevel;
- PMBuilder.SizeLevel = CodeGenOpts.OptimizeSize;
- PMBuilder.SLPVectorize = CodeGenOpts.VectorizeSLP;
- PMBuilder.LoopVectorize = CodeGenOpts.VectorizeLoop;
- // Only enable CGProfilePass when using integrated assembler, since
- // non-integrated assemblers don't recognize .cgprofile section.
- PMBuilder.CallGraphProfile = !CodeGenOpts.DisableIntegratedAS;
-
- PMBuilder.DisableUnrollLoops = !CodeGenOpts.UnrollLoops;
- // Loop interleaving in the loop vectorizer has historically been set to be
- // enabled when loop unrolling is enabled.
- PMBuilder.LoopsInterleaved = CodeGenOpts.UnrollLoops;
- PMBuilder.MergeFunctions = CodeGenOpts.MergeFunctions;
- PMBuilder.PrepareForThinLTO = CodeGenOpts.PrepareForThinLTO;
- PMBuilder.PrepareForLTO = CodeGenOpts.PrepareForLTO;
- PMBuilder.RerollLoops = CodeGenOpts.RerollLoops;
-
- MPM.add(new TargetLibraryInfoWrapperPass(*TLII));
-
- if (TM)
- TM->adjustPassManager(PMBuilder);
-
- if (CodeGenOpts.DebugInfoForProfiling ||
- !CodeGenOpts.SampleProfileFile.empty())
- PMBuilder.addExtension(PassManagerBuilder::EP_EarlyAsPossible,
- addAddDiscriminatorsPass);
-
- // In ObjC ARC mode, add the main ARC optimization passes.
- if (LangOpts.ObjCAutoRefCount) {
- PMBuilder.addExtension(PassManagerBuilder::EP_EarlyAsPossible,
- addObjCARCExpandPass);
- PMBuilder.addExtension(PassManagerBuilder::EP_ModuleOptimizerEarly,
- addObjCARCAPElimPass);
- PMBuilder.addExtension(PassManagerBuilder::EP_ScalarOptimizerLate,
- addObjCARCOptPass);
- }
-
- if (LangOpts.Coroutines)
- addCoroutinePassesToExtensionPoints(PMBuilder);
-
- if (!CodeGenOpts.MemoryProfileOutput.empty()) {
- PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
- addMemProfilerPasses);
- PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
- addMemProfilerPasses);
- }
-
- if (LangOpts.Sanitize.has(SanitizerKind::LocalBounds)) {
- PMBuilder.addExtension(PassManagerBuilder::EP_ScalarOptimizerLate,
- addBoundsCheckingPass);
- PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
- addBoundsCheckingPass);
- }
-
- if (CodeGenOpts.hasSanitizeCoverage()) {
- PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
- addSanitizerCoveragePass);
- PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
- addSanitizerCoveragePass);
- }
-
- if (LangOpts.Sanitize.has(SanitizerKind::Address)) {
- PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
- addAddressSanitizerPasses);
- PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
- addAddressSanitizerPasses);
- }
-
- if (LangOpts.Sanitize.has(SanitizerKind::KernelAddress)) {
- PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
- addKernelAddressSanitizerPasses);
- PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
- addKernelAddressSanitizerPasses);
- }
-
- if (LangOpts.Sanitize.has(SanitizerKind::HWAddress)) {
- PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
- addHWAddressSanitizerPasses);
- PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
- addHWAddressSanitizerPasses);
- }
-
- if (LangOpts.Sanitize.has(SanitizerKind::KernelHWAddress)) {
- PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
- addKernelHWAddressSanitizerPasses);
- PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
- addKernelHWAddressSanitizerPasses);
- }
-
- if (LangOpts.Sanitize.has(SanitizerKind::Memory)) {
- PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
- addMemorySanitizerPass);
- PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
- addMemorySanitizerPass);
- }
-
- if (LangOpts.Sanitize.has(SanitizerKind::KernelMemory)) {
- PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
- addKernelMemorySanitizerPass);
- PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
- addKernelMemorySanitizerPass);
- }
-
- if (LangOpts.Sanitize.has(SanitizerKind::Thread)) {
- PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
- addThreadSanitizerPass);
- PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
- addThreadSanitizerPass);
- }
-
- if (LangOpts.Sanitize.has(SanitizerKind::DataFlow)) {
- PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
- addDataFlowSanitizerPass);
- PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
- addDataFlowSanitizerPass);
- }
-
- if (CodeGenOpts.InstrumentFunctions ||
- CodeGenOpts.InstrumentFunctionEntryBare ||
- CodeGenOpts.InstrumentFunctionsAfterInlining ||
- CodeGenOpts.InstrumentForProfiling) {
- PMBuilder.addExtension(PassManagerBuilder::EP_EarlyAsPossible,
- addEntryExitInstrumentationPass);
- PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
- addEntryExitInstrumentationPass);
- PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
- addPostInlineEntryExitInstrumentationPass);
- PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
- addPostInlineEntryExitInstrumentationPass);
- }
-
- // Set up the per-function pass manager.
- FPM.add(new TargetLibraryInfoWrapperPass(*TLII));
- if (CodeGenOpts.VerifyModule)
- FPM.add(createVerifierPass());
-
- // Set up the per-module pass manager.
- if (!CodeGenOpts.RewriteMapFiles.empty())
- addSymbolRewriterPass(CodeGenOpts, &MPM);
-
- if (Optional<GCOVOptions> Options = getGCOVOptions(CodeGenOpts, LangOpts)) {
- MPM.add(createGCOVProfilerPass(*Options));
- if (CodeGenOpts.getDebugInfo() == codegenoptions::NoDebugInfo)
- MPM.add(createStripSymbolsPass(true));
- }
-
- if (Optional<InstrProfOptions> Options =
- getInstrProfOptions(CodeGenOpts, LangOpts))
- MPM.add(createInstrProfilingLegacyPass(*Options, false));
-
- bool hasIRInstr = false;
- if (CodeGenOpts.hasProfileIRInstr()) {
- PMBuilder.EnablePGOInstrGen = true;
- hasIRInstr = true;
- }
- if (CodeGenOpts.hasProfileCSIRInstr()) {
- assert(!CodeGenOpts.hasProfileCSIRUse() &&
- "Cannot have both CSProfileUse pass and CSProfileGen pass at the "
- "same time");
- assert(!hasIRInstr &&
- "Cannot have both ProfileGen pass and CSProfileGen pass at the "
- "same time");
- PMBuilder.EnablePGOCSInstrGen = true;
- hasIRInstr = true;
- }
- if (hasIRInstr) {
- if (!CodeGenOpts.InstrProfileOutput.empty())
- PMBuilder.PGOInstrGen = CodeGenOpts.InstrProfileOutput;
- else
- PMBuilder.PGOInstrGen = std::string(DefaultProfileGenName);
- }
- if (CodeGenOpts.hasProfileIRUse()) {
- PMBuilder.PGOInstrUse = CodeGenOpts.ProfileInstrumentUsePath;
- PMBuilder.EnablePGOCSInstrUse = CodeGenOpts.hasProfileCSIRUse();
- }
-
- if (!CodeGenOpts.SampleProfileFile.empty())
- PMBuilder.PGOSampleUse = CodeGenOpts.SampleProfileFile;
-
- PMBuilder.populateFunctionPassManager(FPM);
- PMBuilder.populateModulePassManager(MPM);
-}
-
static void setCommandLineOpts(const CodeGenOptions &CodeGenOpts) {
SmallVector<const char *, 16> BackendArgs;
BackendArgs.push_back("clang"); // Fake program name.
@@ -899,11 +542,14 @@ void EmitAssemblyHelper::CreateTargetMachine(bool MustCreateTM) {
return;
}
- Optional<llvm::CodeModel::Model> CM = getCodeModel(CodeGenOpts);
+ std::optional<llvm::CodeModel::Model> CM = getCodeModel(CodeGenOpts);
std::string FeaturesStr =
llvm::join(TargetOpts.Features.begin(), TargetOpts.Features.end(), ",");
llvm::Reloc::Model RM = CodeGenOpts.RelocationModel;
- CodeGenOpt::Level OptLevel = getCGOptLevel(CodeGenOpts);
+ std::optional<CodeGenOptLevel> OptLevelOrNone =
+ CodeGenOpt::getLevel(CodeGenOpts.OptimizationLevel);
+ assert(OptLevelOrNone && "Invalid optimization level!");
+ CodeGenOptLevel OptLevel = *OptLevelOrNone;
llvm::TargetOptions Options;
if (!initTargetOptions(Diags, Options, CodeGenOpts, TargetOpts, LangOpts,
@@ -911,6 +557,7 @@ void EmitAssemblyHelper::CreateTargetMachine(bool MustCreateTM) {
return;
TM.reset(TheTarget->createTargetMachine(Triple, TargetOpts.CPU, FeaturesStr,
Options, RM, CM, OptLevel));
+ TM->setLargeDataThreshold(CodeGenOpts.LargeDataThreshold);
}
bool EmitAssemblyHelper::AddEmitPasses(legacy::PassManager &CodeGenPasses,
@@ -918,9 +565,8 @@ bool EmitAssemblyHelper::AddEmitPasses(legacy::PassManager &CodeGenPasses,
raw_pwrite_stream &OS,
raw_pwrite_stream *DwoOS) {
// Add LibraryInfo.
- llvm::Triple TargetTriple(TheModule->getTargetTriple());
std::unique_ptr<TargetLibraryInfoImpl> TLII(
- createTLII(TargetTriple, CodeGenOpts));
+ llvm::driver::createTLII(TargetTriple, CodeGenOpts.getVecLib()));
CodeGenPasses.add(new TargetLibraryInfoWrapperPass(*TLII));
// Normal mode, emit a .s or .o file by running the code generator. Note,
@@ -942,143 +588,16 @@ bool EmitAssemblyHelper::AddEmitPasses(legacy::PassManager &CodeGenPasses,
return true;
}
-void EmitAssemblyHelper::EmitAssembly(BackendAction Action,
- std::unique_ptr<raw_pwrite_stream> OS) {
- TimeRegion Region(CodeGenOpts.TimePasses ? &CodeGenerationTime : nullptr);
-
- setCommandLineOpts(CodeGenOpts);
-
- bool UsesCodeGen = (Action != Backend_EmitNothing &&
- Action != Backend_EmitBC &&
- Action != Backend_EmitLL);
- CreateTargetMachine(UsesCodeGen);
-
- if (UsesCodeGen && !TM)
- return;
- if (TM)
- TheModule->setDataLayout(TM->createDataLayout());
-
- DebugifyCustomPassManager PerModulePasses;
- DebugInfoPerPassMap DIPreservationMap;
- if (CodeGenOpts.EnableDIPreservationVerify) {
- PerModulePasses.setDebugifyMode(DebugifyMode::OriginalDebugInfo);
- PerModulePasses.setDIPreservationMap(DIPreservationMap);
-
- if (!CodeGenOpts.DIBugsReportFilePath.empty())
- PerModulePasses.setOrigDIVerifyBugsReportFilePath(
- CodeGenOpts.DIBugsReportFilePath);
- }
- PerModulePasses.add(
- createTargetTransformInfoWrapperPass(getTargetIRAnalysis()));
-
- legacy::FunctionPassManager PerFunctionPasses(TheModule);
- PerFunctionPasses.add(
- createTargetTransformInfoWrapperPass(getTargetIRAnalysis()));
-
- CreatePasses(PerModulePasses, PerFunctionPasses);
-
- legacy::PassManager CodeGenPasses;
- CodeGenPasses.add(
- createTargetTransformInfoWrapperPass(getTargetIRAnalysis()));
-
- std::unique_ptr<llvm::ToolOutputFile> ThinLinkOS, DwoOS;
-
- switch (Action) {
- case Backend_EmitNothing:
- break;
-
- case Backend_EmitBC:
- if (CodeGenOpts.PrepareForThinLTO && !CodeGenOpts.DisableLLVMPasses) {
- if (!CodeGenOpts.ThinLinkBitcodeFile.empty()) {
- ThinLinkOS = openOutputFile(CodeGenOpts.ThinLinkBitcodeFile);
- if (!ThinLinkOS)
- return;
- }
- TheModule->addModuleFlag(Module::Error, "EnableSplitLTOUnit",
- CodeGenOpts.EnableSplitLTOUnit);
- PerModulePasses.add(createWriteThinLTOBitcodePass(
- *OS, ThinLinkOS ? &ThinLinkOS->os() : nullptr));
- } else {
- // Emit a module summary by default for Regular LTO except for ld64
- // targets
- bool EmitLTOSummary =
- (CodeGenOpts.PrepareForLTO &&
- !CodeGenOpts.DisableLLVMPasses &&
- llvm::Triple(TheModule->getTargetTriple()).getVendor() !=
- llvm::Triple::Apple);
- if (EmitLTOSummary) {
- if (!TheModule->getModuleFlag("ThinLTO"))
- TheModule->addModuleFlag(Module::Error, "ThinLTO", uint32_t(0));
- TheModule->addModuleFlag(Module::Error, "EnableSplitLTOUnit",
- uint32_t(1));
- }
-
- PerModulePasses.add(createBitcodeWriterPass(
- *OS, CodeGenOpts.EmitLLVMUseLists, EmitLTOSummary));
- }
- break;
-
- case Backend_EmitLL:
- PerModulePasses.add(
- createPrintModulePass(*OS, "", CodeGenOpts.EmitLLVMUseLists));
- break;
-
- default:
- if (!CodeGenOpts.SplitDwarfOutput.empty()) {
- DwoOS = openOutputFile(CodeGenOpts.SplitDwarfOutput);
- if (!DwoOS)
- return;
- }
- if (!AddEmitPasses(CodeGenPasses, Action, *OS,
- DwoOS ? &DwoOS->os() : nullptr))
- return;
- }
-
- // Before executing passes, print the final values of the LLVM options.
- cl::PrintOptionValues();
-
- // Run passes. For now we do all passes at once, but eventually we
- // would like to have the option of streaming code generation.
-
- {
- PrettyStackTraceString CrashInfo("Per-function optimization");
- llvm::TimeTraceScope TimeScope("PerFunctionPasses");
-
- PerFunctionPasses.doInitialization();
- for (Function &F : *TheModule)
- if (!F.isDeclaration())
- PerFunctionPasses.run(F);
- PerFunctionPasses.doFinalization();
- }
-
- {
- PrettyStackTraceString CrashInfo("Per-module optimization passes");
- llvm::TimeTraceScope TimeScope("PerModulePasses");
- PerModulePasses.run(*TheModule);
- }
-
- {
- PrettyStackTraceString CrashInfo("Code generation");
- llvm::TimeTraceScope TimeScope("CodeGenPasses");
- CodeGenPasses.run(*TheModule);
- }
-
- if (ThinLinkOS)
- ThinLinkOS->keep();
- if (DwoOS)
- DwoOS->keep();
-}
-
-static PassBuilder::OptimizationLevel mapToLevel(const CodeGenOptions &Opts) {
+static OptimizationLevel mapToLevel(const CodeGenOptions &Opts) {
switch (Opts.OptimizationLevel) {
default:
llvm_unreachable("Invalid optimization level!");
case 0:
- return PassBuilder::OptimizationLevel::O0;
+ return OptimizationLevel::O0;
case 1:
- return PassBuilder::OptimizationLevel::O1;
+ return OptimizationLevel::O1;
case 2:
switch (Opts.OptimizeSize) {
@@ -1086,81 +605,108 @@ static PassBuilder::OptimizationLevel mapToLevel(const CodeGenOptions &Opts) {
llvm_unreachable("Invalid optimization level for size!");
case 0:
- return PassBuilder::OptimizationLevel::O2;
+ return OptimizationLevel::O2;
case 1:
- return PassBuilder::OptimizationLevel::Os;
+ return OptimizationLevel::Os;
case 2:
- return PassBuilder::OptimizationLevel::Oz;
+ return OptimizationLevel::Oz;
}
case 3:
- return PassBuilder::OptimizationLevel::O3;
+ return OptimizationLevel::O3;
}
}
+static void addKCFIPass(const Triple &TargetTriple, const LangOptions &LangOpts,
+ PassBuilder &PB) {
+ // If the back-end supports KCFI operand bundle lowering, skip KCFIPass.
+ if (TargetTriple.getArch() == llvm::Triple::x86_64 ||
+ TargetTriple.isAArch64(64) || TargetTriple.isRISCV())
+ return;
+
+ // Ensure we lower KCFI operand bundles with -O0.
+ PB.registerOptimizerLastEPCallback(
+ [&](ModulePassManager &MPM, OptimizationLevel Level) {
+ if (Level == OptimizationLevel::O0 &&
+ LangOpts.Sanitize.has(SanitizerKind::KCFI))
+ MPM.addPass(createModuleToFunctionPassAdaptor(KCFIPass()));
+ });
+
+ // When optimizations are requested, run KCIFPass after InstCombine to
+ // avoid unnecessary checks.
+ PB.registerPeepholeEPCallback(
+ [&](FunctionPassManager &FPM, OptimizationLevel Level) {
+ if (Level != OptimizationLevel::O0 &&
+ LangOpts.Sanitize.has(SanitizerKind::KCFI))
+ FPM.addPass(KCFIPass());
+ });
+}
+
static void addSanitizers(const Triple &TargetTriple,
const CodeGenOptions &CodeGenOpts,
const LangOptions &LangOpts, PassBuilder &PB) {
- PB.registerOptimizerLastEPCallback([&](ModulePassManager &MPM,
- PassBuilder::OptimizationLevel Level) {
+ auto SanitizersCallback = [&](ModulePassManager &MPM,
+ OptimizationLevel Level) {
if (CodeGenOpts.hasSanitizeCoverage()) {
auto SancovOpts = getSancovOptsFromCGOpts(CodeGenOpts);
- MPM.addPass(ModuleSanitizerCoveragePass(
+ MPM.addPass(SanitizerCoveragePass(
SancovOpts, CodeGenOpts.SanitizeCoverageAllowlistFiles,
CodeGenOpts.SanitizeCoverageIgnorelistFiles));
}
+ if (CodeGenOpts.hasSanitizeBinaryMetadata()) {
+ MPM.addPass(SanitizerBinaryMetadataPass(
+ getSanitizerBinaryMetadataOptions(CodeGenOpts),
+ CodeGenOpts.SanitizeMetadataIgnorelistFiles));
+ }
+
auto MSanPass = [&](SanitizerMask Mask, bool CompileKernel) {
if (LangOpts.Sanitize.has(Mask)) {
int TrackOrigins = CodeGenOpts.SanitizeMemoryTrackOrigins;
bool Recover = CodeGenOpts.SanitizeRecover.has(Mask);
- MPM.addPass(
- MemorySanitizerPass({TrackOrigins, Recover, CompileKernel}));
- FunctionPassManager FPM;
- FPM.addPass(
- MemorySanitizerPass({TrackOrigins, Recover, CompileKernel}));
- if (Level != PassBuilder::OptimizationLevel::O0) {
- // MemorySanitizer inserts complex instrumentation that mostly
- // follows the logic of the original code, but operates on
- // "shadow" values. It can benefit from re-running some
- // general purpose optimization passes.
- FPM.addPass(EarlyCSEPass());
- // TODO: Consider add more passes like in
- // addGeneralOptsForMemorySanitizer. EarlyCSEPass makes visible
- // difference on size. It's not clear if the rest is still
- // usefull. InstCombinePass breakes
- // compiler-rt/test/msan/select_origin.cpp.
+ MemorySanitizerOptions options(TrackOrigins, Recover, CompileKernel,
+ CodeGenOpts.SanitizeMemoryParamRetval);
+ MPM.addPass(MemorySanitizerPass(options));
+ if (Level != OptimizationLevel::O0) {
+ // MemorySanitizer inserts complex instrumentation that mostly follows
+ // the logic of the original code, but operates on "shadow" values. It
+ // can benefit from re-running some general purpose optimization
+ // passes.
+ MPM.addPass(RequireAnalysisPass<GlobalsAA, llvm::Module>());
+ FunctionPassManager FPM;
+ FPM.addPass(EarlyCSEPass(true /* Enable mem-ssa. */));
+ FPM.addPass(InstCombinePass());
+ FPM.addPass(JumpThreadingPass());
+ FPM.addPass(GVNPass());
+ FPM.addPass(InstCombinePass());
+ MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
}
- MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
}
};
MSanPass(SanitizerKind::Memory, false);
MSanPass(SanitizerKind::KernelMemory, true);
if (LangOpts.Sanitize.has(SanitizerKind::Thread)) {
- MPM.addPass(ThreadSanitizerPass());
+ MPM.addPass(ModuleThreadSanitizerPass());
MPM.addPass(createModuleToFunctionPassAdaptor(ThreadSanitizerPass()));
}
auto ASanPass = [&](SanitizerMask Mask, bool CompileKernel) {
if (LangOpts.Sanitize.has(Mask)) {
- bool Recover = CodeGenOpts.SanitizeRecover.has(Mask);
- bool UseAfterScope = CodeGenOpts.SanitizeAddressUseAfterScope;
- bool ModuleUseAfterScope = asanUseGlobalsGC(TargetTriple, CodeGenOpts);
+ bool UseGlobalGC = asanUseGlobalsGC(TargetTriple, CodeGenOpts);
bool UseOdrIndicator = CodeGenOpts.SanitizeAddressUseOdrIndicator;
llvm::AsanDtorKind DestructorKind =
CodeGenOpts.getSanitizeAddressDtor();
- llvm::AsanDetectStackUseAfterReturnMode UseAfterReturn =
- CodeGenOpts.getSanitizeAddressUseAfterReturn();
- MPM.addPass(RequireAnalysisPass<ASanGlobalsMetadataAnalysis, Module>());
- MPM.addPass(ModuleAddressSanitizerPass(
- CompileKernel, Recover, ModuleUseAfterScope, UseOdrIndicator,
- DestructorKind));
- MPM.addPass(createModuleToFunctionPassAdaptor(AddressSanitizerPass(
- CompileKernel, Recover, UseAfterScope, UseAfterReturn)));
+ AddressSanitizerOptions Opts;
+ Opts.CompileKernel = CompileKernel;
+ Opts.Recover = CodeGenOpts.SanitizeRecover.has(Mask);
+ Opts.UseAfterScope = CodeGenOpts.SanitizeAddressUseAfterScope;
+ Opts.UseAfterReturn = CodeGenOpts.getSanitizeAddressUseAfterReturn();
+ MPM.addPass(AddressSanitizerPass(Opts, UseGlobalGC, UseOdrIndicator,
+ DestructorKind));
}
};
ASanPass(SanitizerKind::Address, false);
@@ -1170,8 +716,8 @@ static void addSanitizers(const Triple &TargetTriple,
if (LangOpts.Sanitize.has(Mask)) {
bool Recover = CodeGenOpts.SanitizeRecover.has(Mask);
MPM.addPass(HWAddressSanitizerPass(
- CompileKernel, Recover,
- /*DisableOptimization=*/CodeGenOpts.OptimizationLevel == 0));
+ {CompileKernel, Recover,
+ /*DisableOptimization=*/CodeGenOpts.OptimizationLevel == 0}));
}
};
HWASanPass(SanitizerKind::HWAddress, false);
@@ -1180,86 +726,92 @@ static void addSanitizers(const Triple &TargetTriple,
if (LangOpts.Sanitize.has(SanitizerKind::DataFlow)) {
MPM.addPass(DataFlowSanitizerPass(LangOpts.NoSanitizeFiles));
}
- });
+ };
+ if (ClSanitizeOnOptimizerEarlyEP) {
+ PB.registerOptimizerEarlyEPCallback(
+ [SanitizersCallback](ModulePassManager &MPM, OptimizationLevel Level) {
+ ModulePassManager NewMPM;
+ SanitizersCallback(NewMPM, Level);
+ if (!NewMPM.isEmpty()) {
+ // Sanitizers can abandon<GlobalsAA>.
+ NewMPM.addPass(RequireAnalysisPass<GlobalsAA, llvm::Module>());
+ MPM.addPass(std::move(NewMPM));
+ }
+ });
+ } else {
+ // LastEP does not need GlobalsAA.
+ PB.registerOptimizerLastEPCallback(SanitizersCallback);
+ }
}
-/// A clean version of `EmitAssembly` that uses the new pass manager.
-///
-/// Not all features are currently supported in this system, but where
-/// necessary it falls back to the legacy pass manager to at least provide
-/// basic functionality.
-///
-/// This API is planned to have its functionality finished and then to replace
-/// `EmitAssembly` at some point in the future when the default switches.
-void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
- BackendAction Action, std::unique_ptr<raw_pwrite_stream> OS) {
- TimeRegion Region(CodeGenOpts.TimePasses ? &CodeGenerationTime : nullptr);
- setCommandLineOpts(CodeGenOpts);
-
- bool RequiresCodeGen = (Action != Backend_EmitNothing &&
- Action != Backend_EmitBC &&
- Action != Backend_EmitLL);
- CreateTargetMachine(RequiresCodeGen);
-
- if (RequiresCodeGen && !TM)
- return;
- if (TM)
- TheModule->setDataLayout(TM->createDataLayout());
-
- Optional<PGOOptions> PGOOpt;
+void EmitAssemblyHelper::RunOptimizationPipeline(
+ BackendAction Action, std::unique_ptr<raw_pwrite_stream> &OS,
+ std::unique_ptr<llvm::ToolOutputFile> &ThinLinkOS, BackendConsumer *BC) {
+ std::optional<PGOOptions> PGOOpt;
if (CodeGenOpts.hasProfileIRInstr())
// -fprofile-generate.
- PGOOpt = PGOOptions(CodeGenOpts.InstrProfileOutput.empty()
- ? std::string(DefaultProfileGenName)
- : CodeGenOpts.InstrProfileOutput,
- "", "", PGOOptions::IRInstr, PGOOptions::NoCSAction,
- CodeGenOpts.DebugInfoForProfiling);
+ PGOOpt = PGOOptions(
+ CodeGenOpts.InstrProfileOutput.empty() ? getDefaultProfileGenName()
+ : CodeGenOpts.InstrProfileOutput,
+ "", "", CodeGenOpts.MemoryProfileUsePath, nullptr, PGOOptions::IRInstr,
+ PGOOptions::NoCSAction, CodeGenOpts.DebugInfoForProfiling,
+ /*PseudoProbeForProfiling=*/false, CodeGenOpts.AtomicProfileUpdate);
else if (CodeGenOpts.hasProfileIRUse()) {
// -fprofile-use.
auto CSAction = CodeGenOpts.hasProfileCSIRUse() ? PGOOptions::CSIRUse
: PGOOptions::NoCSAction;
- PGOOpt = PGOOptions(CodeGenOpts.ProfileInstrumentUsePath, "",
- CodeGenOpts.ProfileRemappingFile, PGOOptions::IRUse,
- CSAction, CodeGenOpts.DebugInfoForProfiling);
+ PGOOpt = PGOOptions(
+ CodeGenOpts.ProfileInstrumentUsePath, "",
+ CodeGenOpts.ProfileRemappingFile, CodeGenOpts.MemoryProfileUsePath, VFS,
+ PGOOptions::IRUse, CSAction, CodeGenOpts.DebugInfoForProfiling);
} else if (!CodeGenOpts.SampleProfileFile.empty())
// -fprofile-sample-use
PGOOpt = PGOOptions(
CodeGenOpts.SampleProfileFile, "", CodeGenOpts.ProfileRemappingFile,
- PGOOptions::SampleUse, PGOOptions::NoCSAction,
- CodeGenOpts.DebugInfoForProfiling, CodeGenOpts.PseudoProbeForProfiling);
+ CodeGenOpts.MemoryProfileUsePath, VFS, PGOOptions::SampleUse,
+ PGOOptions::NoCSAction, CodeGenOpts.DebugInfoForProfiling,
+ CodeGenOpts.PseudoProbeForProfiling);
+ else if (!CodeGenOpts.MemoryProfileUsePath.empty())
+ // -fmemory-profile-use (without any of the above options)
+ PGOOpt = PGOOptions("", "", "", CodeGenOpts.MemoryProfileUsePath, VFS,
+ PGOOptions::NoAction, PGOOptions::NoCSAction,
+ CodeGenOpts.DebugInfoForProfiling);
else if (CodeGenOpts.PseudoProbeForProfiling)
// -fpseudo-probe-for-profiling
- PGOOpt =
- PGOOptions("", "", "", PGOOptions::NoAction, PGOOptions::NoCSAction,
- CodeGenOpts.DebugInfoForProfiling, true);
+ PGOOpt = PGOOptions("", "", "", /*MemoryProfile=*/"", nullptr,
+ PGOOptions::NoAction, PGOOptions::NoCSAction,
+ CodeGenOpts.DebugInfoForProfiling, true);
else if (CodeGenOpts.DebugInfoForProfiling)
// -fdebug-info-for-profiling
- PGOOpt = PGOOptions("", "", "", PGOOptions::NoAction,
- PGOOptions::NoCSAction, true);
+ PGOOpt = PGOOptions("", "", "", /*MemoryProfile=*/"", nullptr,
+ PGOOptions::NoAction, PGOOptions::NoCSAction, true);
// Check to see if we want to generate a CS profile.
if (CodeGenOpts.hasProfileCSIRInstr()) {
assert(!CodeGenOpts.hasProfileCSIRUse() &&
"Cannot have both CSProfileUse pass and CSProfileGen pass at "
"the same time");
- if (PGOOpt.hasValue()) {
+ if (PGOOpt) {
assert(PGOOpt->Action != PGOOptions::IRInstr &&
PGOOpt->Action != PGOOptions::SampleUse &&
"Cannot run CSProfileGen pass with ProfileGen or SampleUse "
" pass");
PGOOpt->CSProfileGenFile = CodeGenOpts.InstrProfileOutput.empty()
- ? std::string(DefaultProfileGenName)
+ ? getDefaultProfileGenName()
: CodeGenOpts.InstrProfileOutput;
PGOOpt->CSAction = PGOOptions::CSIRInstr;
} else
- PGOOpt = PGOOptions("",
- CodeGenOpts.InstrProfileOutput.empty()
- ? std::string(DefaultProfileGenName)
- : CodeGenOpts.InstrProfileOutput,
- "", PGOOptions::NoAction, PGOOptions::CSIRInstr,
- CodeGenOpts.DebugInfoForProfiling);
+ PGOOpt =
+ PGOOptions("",
+ CodeGenOpts.InstrProfileOutput.empty()
+ ? getDefaultProfileGenName()
+ : CodeGenOpts.InstrProfileOutput,
+ "", /*MemoryProfile=*/"", nullptr, PGOOptions::NoAction,
+ PGOOptions::CSIRInstr, CodeGenOpts.DebugInfoForProfiling);
}
+ if (TM)
+ TM->setPGOOption(PGOOpt);
PipelineTuningOptions PTO;
PTO.LoopUnrolling = CodeGenOpts.UnrollLoops;
@@ -1272,6 +824,7 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
// Only enable CGProfilePass when using integrated assembler, since
// non-integrated assemblers don't recognize .cgprofile section.
PTO.CallGraphProfile = !CodeGenOpts.DisableIntegratedAS;
+ PTO.UnifiedLTO = CodeGenOpts.UnifiedLTO;
LoopAnalysisManager LAM;
FunctionAnalysisManager FAM;
@@ -1283,12 +836,50 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
PrintPassOptions PrintPassOpts;
PrintPassOpts.Indent = DebugPassStructure;
PrintPassOpts.SkipAnalyses = DebugPassStructure;
- StandardInstrumentations SI(CodeGenOpts.DebugPassManager ||
- DebugPassStructure,
- /*VerifyEach*/ false, PrintPassOpts);
- SI.registerCallbacks(PIC, &FAM);
+ StandardInstrumentations SI(
+ TheModule->getContext(),
+ (CodeGenOpts.DebugPassManager || DebugPassStructure),
+ CodeGenOpts.VerifyEach, PrintPassOpts);
+ SI.registerCallbacks(PIC, &MAM);
PassBuilder PB(TM.get(), PTO, PGOOpt, &PIC);
+ // Handle the assignment tracking feature options.
+ switch (CodeGenOpts.getAssignmentTrackingMode()) {
+ case CodeGenOptions::AssignmentTrackingOpts::Forced:
+ PB.registerPipelineStartEPCallback(
+ [&](ModulePassManager &MPM, OptimizationLevel Level) {
+ MPM.addPass(AssignmentTrackingPass());
+ });
+ break;
+ case CodeGenOptions::AssignmentTrackingOpts::Enabled:
+ // Disable assignment tracking in LTO builds for now as the performance
+ // cost is too high. Disable for LLDB tuning due to llvm.org/PR43126.
+ if (!CodeGenOpts.PrepareForThinLTO && !CodeGenOpts.PrepareForLTO &&
+ CodeGenOpts.getDebuggerTuning() != llvm::DebuggerKind::LLDB) {
+ PB.registerPipelineStartEPCallback(
+ [&](ModulePassManager &MPM, OptimizationLevel Level) {
+ // Only use assignment tracking if optimisations are enabled.
+ if (Level != OptimizationLevel::O0)
+ MPM.addPass(AssignmentTrackingPass());
+ });
+ }
+ break;
+ case CodeGenOptions::AssignmentTrackingOpts::Disabled:
+ break;
+ }
+
+ // Enable verify-debuginfo-preserve-each for new PM.
+ DebugifyEachInstrumentation Debugify;
+ DebugInfoPerPass DebugInfoBeforePass;
+ if (CodeGenOpts.EnableDIPreservationVerify) {
+ Debugify.setDebugifyMode(DebugifyMode::OriginalDebugInfo);
+ Debugify.setDebugInfoBeforePass(DebugInfoBeforePass);
+
+ if (!CodeGenOpts.DIBugsReportFilePath.empty())
+ Debugify.setOrigDIVerifyBugsReportFilePath(
+ CodeGenOpts.DIBugsReportFilePath);
+ Debugify.registerCallbacks(PIC, MAM);
+ }
// Attempt to load pass plugins and register their callbacks with PB.
for (auto &PluginFN : CodeGenOpts.PassPlugins) {
auto PassPlugin = PassPlugin::Load(PluginFN);
@@ -1299,18 +890,16 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
<< PluginFN << toString(PassPlugin.takeError());
}
}
+ for (const auto &PassCallback : CodeGenOpts.PassBuilderCallbacks)
+ PassCallback(PB);
#define HANDLE_EXTENSION(Ext) \
get##Ext##PluginInfo().RegisterPassBuilderCallbacks(PB);
#include "llvm/Support/Extension.def"
- // Register the AA manager first so that our version is the one used.
- FAM.registerPass([&] { return PB.buildDefaultAAPipeline(); });
-
// Register the target library analysis directly and give it a customized
// preset TLI.
- Triple TargetTriple(TheModule->getTargetTriple());
std::unique_ptr<TargetLibraryInfoImpl> TLII(
- createTLII(TargetTriple, CodeGenOpts));
+ llvm::driver::createTLII(TargetTriple, CodeGenOpts.getVecLib()));
FAM.registerPass([&] { return TargetLibraryAnalysis(*TLII); });
// Register all the basic analyses with the managers.
@@ -1321,30 +910,33 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
PB.crossRegisterProxies(LAM, FAM, CGAM, MAM);
ModulePassManager MPM;
+ // Add a verifier pass, before any other passes, to catch CodeGen issues.
+ if (CodeGenOpts.VerifyModule)
+ MPM.addPass(VerifierPass());
if (!CodeGenOpts.DisableLLVMPasses) {
// Map our optimization levels into one of the distinct levels used to
// configure the pipeline.
- PassBuilder::OptimizationLevel Level = mapToLevel(CodeGenOpts);
+ OptimizationLevel Level = mapToLevel(CodeGenOpts);
- bool IsThinLTO = CodeGenOpts.PrepareForThinLTO;
- bool IsLTO = CodeGenOpts.PrepareForLTO;
+ const bool PrepareForThinLTO = CodeGenOpts.PrepareForThinLTO;
+ const bool PrepareForLTO = CodeGenOpts.PrepareForLTO;
if (LangOpts.ObjCAutoRefCount) {
PB.registerPipelineStartEPCallback(
- [](ModulePassManager &MPM, PassBuilder::OptimizationLevel Level) {
- if (Level != PassBuilder::OptimizationLevel::O0)
+ [](ModulePassManager &MPM, OptimizationLevel Level) {
+ if (Level != OptimizationLevel::O0)
MPM.addPass(
createModuleToFunctionPassAdaptor(ObjCARCExpandPass()));
});
PB.registerPipelineEarlySimplificationEPCallback(
- [](ModulePassManager &MPM, PassBuilder::OptimizationLevel Level) {
- if (Level != PassBuilder::OptimizationLevel::O0)
+ [](ModulePassManager &MPM, OptimizationLevel Level) {
+ if (Level != OptimizationLevel::O0)
MPM.addPass(ObjCARCAPElimPass());
});
PB.registerScalarOptimizerLateEPCallback(
- [](FunctionPassManager &FPM, PassBuilder::OptimizationLevel Level) {
- if (Level != PassBuilder::OptimizationLevel::O0)
+ [](FunctionPassManager &FPM, OptimizationLevel Level) {
+ if (Level != OptimizationLevel::O0)
FPM.addPass(ObjCARCOptPass());
});
}
@@ -1357,7 +949,7 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
// vtables so that codegen doesn't complain.
if (IsThinLTOPostLink)
PB.registerPipelineStartEPCallback(
- [](ModulePassManager &MPM, PassBuilder::OptimizationLevel Level) {
+ [](ModulePassManager &MPM, OptimizationLevel Level) {
MPM.addPass(LowerTypeTestsPass(/*ExportSummary=*/nullptr,
/*ImportSummary=*/nullptr,
/*DropTypeTests=*/true));
@@ -1368,12 +960,12 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
CodeGenOpts.InstrumentFunctionsAfterInlining ||
CodeGenOpts.InstrumentForProfiling) {
PB.registerPipelineStartEPCallback(
- [](ModulePassManager &MPM, PassBuilder::OptimizationLevel Level) {
+ [](ModulePassManager &MPM, OptimizationLevel Level) {
MPM.addPass(createModuleToFunctionPassAdaptor(
EntryExitInstrumenterPass(/*PostInlining=*/false)));
});
PB.registerOptimizerLastEPCallback(
- [](ModulePassManager &MPM, PassBuilder::OptimizationLevel Level) {
+ [](ModulePassManager &MPM, OptimizationLevel Level) {
MPM.addPass(createModuleToFunctionPassAdaptor(
EntryExitInstrumenterPass(/*PostInlining=*/true)));
});
@@ -1383,94 +975,146 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
// of the pipeline.
if (LangOpts.Sanitize.has(SanitizerKind::LocalBounds))
PB.registerScalarOptimizerLateEPCallback(
- [](FunctionPassManager &FPM, PassBuilder::OptimizationLevel Level) {
+ [](FunctionPassManager &FPM, OptimizationLevel Level) {
FPM.addPass(BoundsCheckingPass());
});
// Don't add sanitizers if we are here from ThinLTO PostLink. That already
// done on PreLink stage.
- if (!IsThinLTOPostLink)
+ if (!IsThinLTOPostLink) {
addSanitizers(TargetTriple, CodeGenOpts, LangOpts, PB);
+ addKCFIPass(TargetTriple, LangOpts, PB);
+ }
- if (Optional<GCOVOptions> Options = getGCOVOptions(CodeGenOpts, LangOpts))
+ if (std::optional<GCOVOptions> Options =
+ getGCOVOptions(CodeGenOpts, LangOpts))
PB.registerPipelineStartEPCallback(
- [Options](ModulePassManager &MPM,
- PassBuilder::OptimizationLevel Level) {
+ [Options](ModulePassManager &MPM, OptimizationLevel Level) {
MPM.addPass(GCOVProfilerPass(*Options));
});
- if (Optional<InstrProfOptions> Options =
+ if (std::optional<InstrProfOptions> Options =
getInstrProfOptions(CodeGenOpts, LangOpts))
PB.registerPipelineStartEPCallback(
- [Options](ModulePassManager &MPM,
- PassBuilder::OptimizationLevel Level) {
- MPM.addPass(InstrProfiling(*Options, false));
+ [Options](ModulePassManager &MPM, OptimizationLevel Level) {
+ MPM.addPass(InstrProfilingLoweringPass(*Options, false));
});
- if (CodeGenOpts.OptimizationLevel == 0) {
- MPM = PB.buildO0DefaultPipeline(Level, IsLTO || IsThinLTO);
- } else if (IsThinLTO) {
- MPM = PB.buildThinLTOPreLinkDefaultPipeline(Level);
- } else if (IsLTO) {
- MPM = PB.buildLTOPreLinkDefaultPipeline(Level);
- } else {
- MPM = PB.buildPerModuleDefaultPipeline(Level);
+ // TODO: Consider passing the MemoryProfileOutput to the pass builder via
+ // the PGOOptions, and set this up there.
+ if (!CodeGenOpts.MemoryProfileOutput.empty()) {
+ PB.registerOptimizerLastEPCallback(
+ [](ModulePassManager &MPM, OptimizationLevel Level) {
+ MPM.addPass(createModuleToFunctionPassAdaptor(MemProfilerPass()));
+ MPM.addPass(ModuleMemProfilerPass());
+ });
}
- if (!CodeGenOpts.MemoryProfileOutput.empty()) {
- MPM.addPass(createModuleToFunctionPassAdaptor(MemProfilerPass()));
- MPM.addPass(ModuleMemProfilerPass());
+ if (CodeGenOpts.FatLTO) {
+ MPM.addPass(PB.buildFatLTODefaultPipeline(
+ Level, PrepareForThinLTO,
+ PrepareForThinLTO || shouldEmitRegularLTOSummary()));
+ } else if (PrepareForThinLTO) {
+ MPM.addPass(PB.buildThinLTOPreLinkDefaultPipeline(Level));
+ } else if (PrepareForLTO) {
+ MPM.addPass(PB.buildLTOPreLinkDefaultPipeline(Level));
+ } else {
+ MPM.addPass(PB.buildPerModuleDefaultPipeline(Level));
}
}
- // FIXME: We still use the legacy pass manager to do code generation. We
- // create that pass manager here and use it as needed below.
- legacy::PassManager CodeGenPasses;
- bool NeedCodeGen = false;
- std::unique_ptr<llvm::ToolOutputFile> ThinLinkOS, DwoOS;
-
- // Append any output we need to the pass manager.
- switch (Action) {
- case Backend_EmitNothing:
- break;
-
- case Backend_EmitBC:
+ // Re-link against any bitcodes supplied via the -mlink-builtin-bitcode option
+ // Some optimizations may generate new function calls that would not have
+ // been linked pre-optimization (i.e. fused sincos calls generated by
+ // AMDGPULibCalls::fold_sincos.)
+ if (ClRelinkBuiltinBitcodePostop)
+ MPM.addPass(LinkInModulesPass(BC, false));
+
+ // Add a verifier pass if requested. We don't have to do this if the action
+ // requires code generation because there will already be a verifier pass in
+ // the code-generation pipeline.
+ // Since we already added a verifier pass above, this
+ // might even not run the analysis, if previous passes caused no changes.
+ if (!actionRequiresCodeGen(Action) && CodeGenOpts.VerifyModule)
+ MPM.addPass(VerifierPass());
+
+ if (Action == Backend_EmitBC || Action == Backend_EmitLL ||
+ CodeGenOpts.FatLTO) {
if (CodeGenOpts.PrepareForThinLTO && !CodeGenOpts.DisableLLVMPasses) {
- if (!CodeGenOpts.ThinLinkBitcodeFile.empty()) {
- ThinLinkOS = openOutputFile(CodeGenOpts.ThinLinkBitcodeFile);
- if (!ThinLinkOS)
- return;
+ if (!TheModule->getModuleFlag("EnableSplitLTOUnit"))
+ TheModule->addModuleFlag(llvm::Module::Error, "EnableSplitLTOUnit",
+ CodeGenOpts.EnableSplitLTOUnit);
+ if (Action == Backend_EmitBC) {
+ if (!CodeGenOpts.ThinLinkBitcodeFile.empty()) {
+ ThinLinkOS = openOutputFile(CodeGenOpts.ThinLinkBitcodeFile);
+ if (!ThinLinkOS)
+ return;
+ }
+ MPM.addPass(ThinLTOBitcodeWriterPass(
+ *OS, ThinLinkOS ? &ThinLinkOS->os() : nullptr));
+ } else if (Action == Backend_EmitLL) {
+ MPM.addPass(PrintModulePass(*OS, "", CodeGenOpts.EmitLLVMUseLists,
+ /*EmitLTOSummary=*/true));
}
- TheModule->addModuleFlag(Module::Error, "EnableSplitLTOUnit",
- CodeGenOpts.EnableSplitLTOUnit);
- MPM.addPass(ThinLTOBitcodeWriterPass(*OS, ThinLinkOS ? &ThinLinkOS->os()
- : nullptr));
} else {
// Emit a module summary by default for Regular LTO except for ld64
// targets
- bool EmitLTOSummary =
- (CodeGenOpts.PrepareForLTO &&
- !CodeGenOpts.DisableLLVMPasses &&
- llvm::Triple(TheModule->getTargetTriple()).getVendor() !=
- llvm::Triple::Apple);
+ bool EmitLTOSummary = shouldEmitRegularLTOSummary();
if (EmitLTOSummary) {
- if (!TheModule->getModuleFlag("ThinLTO"))
- TheModule->addModuleFlag(Module::Error, "ThinLTO", uint32_t(0));
- TheModule->addModuleFlag(Module::Error, "EnableSplitLTOUnit",
- uint32_t(1));
+ if (!TheModule->getModuleFlag("ThinLTO") && !CodeGenOpts.UnifiedLTO)
+ TheModule->addModuleFlag(llvm::Module::Error, "ThinLTO", uint32_t(0));
+ if (!TheModule->getModuleFlag("EnableSplitLTOUnit"))
+ TheModule->addModuleFlag(llvm::Module::Error, "EnableSplitLTOUnit",
+ uint32_t(1));
+ }
+ if (Action == Backend_EmitBC) {
+ MPM.addPass(BitcodeWriterPass(*OS, CodeGenOpts.EmitLLVMUseLists,
+ EmitLTOSummary));
+ } else if (Action == Backend_EmitLL) {
+ MPM.addPass(PrintModulePass(*OS, "", CodeGenOpts.EmitLLVMUseLists,
+ EmitLTOSummary));
}
- MPM.addPass(
- BitcodeWriterPass(*OS, CodeGenOpts.EmitLLVMUseLists, EmitLTOSummary));
}
- break;
- case Backend_EmitLL:
- MPM.addPass(PrintModulePass(*OS, "", CodeGenOpts.EmitLLVMUseLists));
- break;
+ if (shouldEmitUnifiedLTOModueFlag())
+ TheModule->addModuleFlag(llvm::Module::Error, "UnifiedLTO", uint32_t(1));
+ }
+
+ // Print a textual, '-passes=' compatible, representation of pipeline if
+ // requested.
+ if (PrintPipelinePasses) {
+ MPM.printPipeline(outs(), [&PIC](StringRef ClassName) {
+ auto PassName = PIC.getPassNameForClassName(ClassName);
+ return PassName.empty() ? ClassName : PassName;
+ });
+ outs() << "\n";
+ return;
+ }
+
+ if (LangOpts.HIPStdPar && !LangOpts.CUDAIsDevice &&
+ LangOpts.HIPStdParInterposeAlloc)
+ MPM.addPass(HipStdParAllocationInterpositionPass());
+
+ // Now that we have all of the passes ready, run them.
+ {
+ PrettyStackTraceString CrashInfo("Optimizer");
+ llvm::TimeTraceScope TimeScope("Optimizer");
+ MPM.run(*TheModule, MAM);
+ }
+}
+
+void EmitAssemblyHelper::RunCodegenPipeline(
+ BackendAction Action, std::unique_ptr<raw_pwrite_stream> &OS,
+ std::unique_ptr<llvm::ToolOutputFile> &DwoOS) {
+ // We still use the legacy PM to run the codegen pipeline since the new PM
+ // does not work with the codegen pipeline.
+ // FIXME: make the new PM work with the codegen pipeline.
+ legacy::PassManager CodeGenPasses;
+ // Append any output we need to the pass manager.
+ switch (Action) {
case Backend_EmitAssembly:
case Backend_EmitMCNull:
case Backend_EmitObj:
- NeedCodeGen = true;
CodeGenPasses.add(
createTargetTransformInfoWrapperPass(getTargetIRAnalysis()));
if (!CodeGenOpts.SplitDwarfOutput.empty()) {
@@ -1483,22 +1127,44 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
// FIXME: Should we handle this error differently?
return;
break;
+ default:
+ return;
}
- // Before executing passes, print the final values of the LLVM options.
- cl::PrintOptionValues();
-
- // Now that we have all of the passes ready, run them.
- {
- PrettyStackTraceString CrashInfo("Optimizer");
- MPM.run(*TheModule, MAM);
+ // If -print-pipeline-passes is requested, don't run the legacy pass manager.
+ // FIXME: when codegen is switched to use the new pass manager, it should also
+ // emit pass names here.
+ if (PrintPipelinePasses) {
+ return;
}
- // Now if needed, run the legacy PM for codegen.
- if (NeedCodeGen) {
+ {
PrettyStackTraceString CrashInfo("Code generation");
+ llvm::TimeTraceScope TimeScope("CodeGenPasses");
CodeGenPasses.run(*TheModule);
}
+}
+
+void EmitAssemblyHelper::EmitAssembly(BackendAction Action,
+ std::unique_ptr<raw_pwrite_stream> OS,
+ BackendConsumer *BC) {
+ TimeRegion Region(CodeGenOpts.TimePasses ? &CodeGenerationTime : nullptr);
+ setCommandLineOpts(CodeGenOpts);
+
+ bool RequiresCodeGen = actionRequiresCodeGen(Action);
+ CreateTargetMachine(RequiresCodeGen);
+
+ if (RequiresCodeGen && !TM)
+ return;
+ if (TM)
+ TheModule->setDataLayout(TM->createDataLayout());
+
+ // Before executing passes, print the final values of the LLVM options.
+ cl::PrintOptionValues();
+
+ std::unique_ptr<llvm::ToolOutputFile> ThinLinkOS, DwoOS;
+ RunOptimizationPipeline(Action, OS, ThinLinkOS, BC);
+ RunCodegenPipeline(Action, OS, DwoOS);
if (ThinLinkOS)
ThinLinkOS->keep();
@@ -1507,12 +1173,13 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
}
static void runThinLTOBackend(
- DiagnosticsEngine &Diags, ModuleSummaryIndex *CombinedIndex, Module *M,
- const HeaderSearchOptions &HeaderOpts, const CodeGenOptions &CGOpts,
- const clang::TargetOptions &TOpts, const LangOptions &LOpts,
- std::unique_ptr<raw_pwrite_stream> OS, std::string SampleProfile,
- std::string ProfileRemapping, BackendAction Action) {
- StringMap<DenseMap<GlobalValue::GUID, GlobalValueSummary *>>
+ DiagnosticsEngine &Diags, ModuleSummaryIndex *CombinedIndex,
+ llvm::Module *M, const HeaderSearchOptions &HeaderOpts,
+ const CodeGenOptions &CGOpts, const clang::TargetOptions &TOpts,
+ const LangOptions &LOpts, std::unique_ptr<raw_pwrite_stream> OS,
+ std::string SampleProfile, std::string ProfileRemapping,
+ BackendAction Action) {
+ DenseMap<StringRef, DenseMap<GlobalValue::GUID, GlobalValueSummary *>>
ModuleToDefinedGVSummaries;
CombinedIndex->collectDefinedGVSummariesPerModule(ModuleToDefinedGVSummaries);
@@ -1525,8 +1192,9 @@ static void runThinLTOBackend(
if (!lto::initImportList(*M, *CombinedIndex, ImportList))
return;
- auto AddStream = [&](size_t Task) {
- return std::make_unique<lto::NativeObjectStream>(std::move(OS));
+ auto AddStream = [&](size_t Task, const Twine &ModuleName) {
+ return std::make_unique<CachedFileStream>(std::move(OS),
+ CGOpts.ObjectFilenameForDebug);
};
lto::Config Conf;
if (CGOpts.SaveTempsFilePrefix != "") {
@@ -1542,7 +1210,10 @@ static void runThinLTOBackend(
Conf.CodeModel = getCodeModel(CGOpts);
Conf.MAttrs = TOpts.Features;
Conf.RelocModel = CGOpts.RelocationModel;
- Conf.CGOptLevel = getCGOptLevel(CGOpts);
+ std::optional<CodeGenOptLevel> OptLevelOrNone =
+ CodeGenOpt::getLevel(CGOpts.OptimizationLevel);
+ assert(OptLevelOrNone && "Invalid optimization level!");
+ Conf.CGOptLevel = *OptLevelOrNone;
Conf.OptLevel = CGOpts.OptimizationLevel;
initTargetOptions(Diags, Conf.Options, CGOpts, TOpts, LOpts, HeaderOpts);
Conf.SampleProfile = std::move(SampleProfile);
@@ -1566,8 +1237,8 @@ static void runThinLTOBackend(
}
Conf.ProfileRemapping = std::move(ProfileRemapping);
- Conf.UseNewPM = !CGOpts.LegacyPassManager;
Conf.DebugPassManager = CGOpts.DebugPassManager;
+ Conf.VerifyEach = CGOpts.VerifyEach;
Conf.RemarksWithHotness = CGOpts.DiagnosticsWithHotness;
Conf.RemarksFilename = CGOpts.OptRecordFile;
Conf.RemarksPasses = CGOpts.OptRecordPasses;
@@ -1576,18 +1247,18 @@ static void runThinLTOBackend(
Conf.SplitDwarfOutput = CGOpts.SplitDwarfOutput;
switch (Action) {
case Backend_EmitNothing:
- Conf.PreCodeGenModuleHook = [](size_t Task, const Module &Mod) {
+ Conf.PreCodeGenModuleHook = [](size_t Task, const llvm::Module &Mod) {
return false;
};
break;
case Backend_EmitLL:
- Conf.PreCodeGenModuleHook = [&](size_t Task, const Module &Mod) {
+ Conf.PreCodeGenModuleHook = [&](size_t Task, const llvm::Module &Mod) {
M->print(*OS, nullptr, CGOpts.EmitLLVMUseLists);
return false;
};
break;
case Backend_EmitBC:
- Conf.PreCodeGenModuleHook = [&](size_t Task, const Module &Mod) {
+ Conf.PreCodeGenModuleHook = [&](size_t Task, const llvm::Module &Mod) {
WriteBitcodeToFile(*M, *OS, CGOpts.EmitLLVMUseLists);
return false;
};
@@ -1606,14 +1277,12 @@ static void runThinLTOBackend(
}
}
-void clang::EmitBackendOutput(DiagnosticsEngine &Diags,
- const HeaderSearchOptions &HeaderOpts,
- const CodeGenOptions &CGOpts,
- const clang::TargetOptions &TOpts,
- const LangOptions &LOpts,
- StringRef TDesc, Module *M,
- BackendAction Action,
- std::unique_ptr<raw_pwrite_stream> OS) {
+void clang::EmitBackendOutput(
+ DiagnosticsEngine &Diags, const HeaderSearchOptions &HeaderOpts,
+ const CodeGenOptions &CGOpts, const clang::TargetOptions &TOpts,
+ const LangOptions &LOpts, StringRef TDesc, llvm::Module *M,
+ BackendAction Action, IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS,
+ std::unique_ptr<raw_pwrite_stream> OS, BackendConsumer *BC) {
llvm::TimeTraceScope TimeScope("Backend");
@@ -1622,16 +1291,17 @@ void clang::EmitBackendOutput(DiagnosticsEngine &Diags,
// If we are performing a ThinLTO importing compile, load the function index
// into memory and pass it into runThinLTOBackend, which will run the
// function importer and invoke LTO passes.
- Expected<std::unique_ptr<ModuleSummaryIndex>> IndexOrErr =
- llvm::getModuleSummaryIndexForFile(CGOpts.ThinLTOIndexFile,
- /*IgnoreEmptyThinLTOIndexFile*/true);
- if (!IndexOrErr) {
- logAllUnhandledErrors(IndexOrErr.takeError(), errs(),
+ std::unique_ptr<ModuleSummaryIndex> CombinedIndex;
+ if (Error E = llvm::getModuleSummaryIndexForFile(
+ CGOpts.ThinLTOIndexFile,
+ /*IgnoreEmptyThinLTOIndexFile*/ true)
+ .moveInto(CombinedIndex)) {
+ logAllUnhandledErrors(std::move(E), errs(),
"Error loading index file '" +
CGOpts.ThinLTOIndexFile + "': ");
return;
}
- std::unique_ptr<ModuleSummaryIndex> CombinedIndex = std::move(*IndexOrErr);
+
// A null CombinedIndex means we should skip ThinLTO compilation
// (LLVM will optionally ignore empty index files, returning null instead
// of an error).
@@ -1654,12 +1324,8 @@ void clang::EmitBackendOutput(DiagnosticsEngine &Diags,
}
}
- EmitAssemblyHelper AsmHelper(Diags, HeaderOpts, CGOpts, TOpts, LOpts, M);
-
- if (!CGOpts.LegacyPassManager)
- AsmHelper.EmitAssemblyWithNewPassManager(Action, std::move(OS));
- else
- AsmHelper.EmitAssembly(Action, std::move(OS));
+ EmitAssemblyHelper AsmHelper(Diags, HeaderOpts, CGOpts, TOpts, LOpts, M, VFS);
+ AsmHelper.EmitAssembly(Action, std::move(OS), BC);
// Verify clang's TargetInfo DataLayout against the LLVM TargetMachine's
// DataLayout.
@@ -1680,8 +1346,28 @@ void clang::EmbedBitcode(llvm::Module *M, const CodeGenOptions &CGOpts,
llvm::MemoryBufferRef Buf) {
if (CGOpts.getEmbedBitcode() == CodeGenOptions::Embed_Off)
return;
- llvm::EmbedBitcodeInModule(
+ llvm::embedBitcodeInModule(
*M, Buf, CGOpts.getEmbedBitcode() != CodeGenOptions::Embed_Marker,
CGOpts.getEmbedBitcode() != CodeGenOptions::Embed_Bitcode,
CGOpts.CmdArgs);
}
+
+void clang::EmbedObject(llvm::Module *M, const CodeGenOptions &CGOpts,
+ DiagnosticsEngine &Diags) {
+ if (CGOpts.OffloadObjects.empty())
+ return;
+
+ for (StringRef OffloadObject : CGOpts.OffloadObjects) {
+ llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> ObjectOrErr =
+ llvm::MemoryBuffer::getFileOrSTDIN(OffloadObject);
+ if (ObjectOrErr.getError()) {
+ auto DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "could not open '%0' for embedding");
+ Diags.Report(DiagID) << OffloadObject;
+ return;
+ }
+
+ llvm::embedBufferInModule(*M, **ObjectOrErr, ".llvm.offloading",
+ Align(object::OffloadBinary::getAlignment()));
+ }
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGAtomic.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGAtomic.cpp
index b6722ad4e4f1..52e6ddb7d6af 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGAtomic.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGAtomic.cpp
@@ -80,31 +80,30 @@ namespace {
AtomicSizeInBits = C.toBits(
C.toCharUnitsFromBits(Offset + OrigBFI.Size + C.getCharWidth() - 1)
.alignTo(lvalue.getAlignment()));
- auto VoidPtrAddr = CGF.EmitCastToVoidPtr(lvalue.getBitFieldPointer());
+ llvm::Value *BitFieldPtr = lvalue.getBitFieldPointer();
auto OffsetInChars =
(C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) *
lvalue.getAlignment();
- VoidPtrAddr = CGF.Builder.CreateConstGEP1_64(
- CGF.Int8Ty, VoidPtrAddr, OffsetInChars.getQuantity());
- auto Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- VoidPtrAddr,
- CGF.Builder.getIntNTy(AtomicSizeInBits)->getPointerTo(),
- "atomic_bitfield_base");
+ llvm::Value *StoragePtr = CGF.Builder.CreateConstGEP1_64(
+ CGF.Int8Ty, BitFieldPtr, OffsetInChars.getQuantity());
+ StoragePtr = CGF.Builder.CreateAddrSpaceCast(
+ StoragePtr, CGF.UnqualPtrTy, "atomic_bitfield_base");
BFI = OrigBFI;
BFI.Offset = Offset;
BFI.StorageSize = AtomicSizeInBits;
BFI.StorageOffset += OffsetInChars;
- LVal = LValue::MakeBitfield(Address(Addr, lvalue.getAlignment()),
- BFI, lvalue.getType(), lvalue.getBaseInfo(),
- lvalue.getTBAAInfo());
+ llvm::Type *StorageTy = CGF.Builder.getIntNTy(AtomicSizeInBits);
+ LVal = LValue::MakeBitfield(
+ Address(StoragePtr, StorageTy, lvalue.getAlignment()), BFI,
+ lvalue.getType(), lvalue.getBaseInfo(), lvalue.getTBAAInfo());
AtomicTy = C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);
if (AtomicTy.isNull()) {
llvm::APInt Size(
/*numBits=*/32,
C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity());
- AtomicTy =
- C.getConstantArrayType(C.CharTy, Size, nullptr, ArrayType::Normal,
- /*IndexTypeQuals=*/0);
+ AtomicTy = C.getConstantArrayType(C.CharTy, Size, nullptr,
+ ArraySizeModifier::Normal,
+ /*IndexTypeQuals=*/0);
}
AtomicAlign = ValueAlign = lvalue.getAlignment();
} else if (lvalue.isVectorElt()) {
@@ -149,11 +148,20 @@ namespace {
return LVal.getExtVectorPointer();
}
Address getAtomicAddress() const {
- return Address(getAtomicPointer(), getAtomicAlignment());
+ llvm::Type *ElTy;
+ if (LVal.isSimple())
+ ElTy = LVal.getAddress(CGF).getElementType();
+ else if (LVal.isBitField())
+ ElTy = LVal.getBitFieldAddress().getElementType();
+ else if (LVal.isVectorElt())
+ ElTy = LVal.getVectorAddress().getElementType();
+ else
+ ElTy = LVal.getExtVectorAddress().getElementType();
+ return Address(getAtomicPointer(), ElTy, getAtomicAlignment());
}
Address getAtomicAddressAsAtomicIntPointer() const {
- return emitCastToAtomicIntPointer(getAtomicAddress());
+ return castToAtomicIntPointer(getAtomicAddress());
}
/// Is the atomic size larger than the underlying value type?
@@ -175,7 +183,7 @@ namespace {
/// Cast the given pointer to an integer pointer suitable for atomic
/// operations if the source.
- Address emitCastToAtomicIntPointer(Address Addr) const;
+ Address castToAtomicIntPointer(Address Addr) const;
/// If Addr is compatible with the iN that will be used for an atomic
/// operation, bitcast it. Otherwise, create a temporary that is suitable
@@ -296,7 +304,8 @@ Address AtomicInfo::CreateTempAlloca() const {
// Cast to pointer to value type for bitfields.
if (LVal.isBitField())
return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- TempAlloca, getAtomicAddress().getType());
+ TempAlloca, getAtomicAddress().getType(),
+ getAtomicAddress().getElementType());
return TempAlloca;
}
@@ -307,7 +316,7 @@ static RValue emitAtomicLibcall(CodeGenFunction &CGF,
const CGFunctionInfo &fnInfo =
CGF.CGM.getTypes().arrangeBuiltinFunctionCall(resultType, args);
llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
- llvm::AttrBuilder fnAttrB;
+ llvm::AttrBuilder fnAttrB(CGF.getLLVMContext());
fnAttrB.addAttribute(llvm::Attribute::NoUnwind);
fnAttrB.addAttribute(llvm::Attribute::WillReturn);
llvm::AttributeList fnAttrs = llvm::AttributeList::get(
@@ -351,12 +360,12 @@ bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
bool AtomicInfo::emitMemSetZeroIfNecessary() const {
assert(LVal.isSimple());
- llvm::Value *addr = LVal.getPointer(CGF);
- if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
+ Address addr = LVal.getAddress(CGF);
+ if (!requiresMemSetZero(addr.getElementType()))
return false;
CGF.Builder.CreateMemSet(
- addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
+ addr.getPointer(), llvm::ConstantInt::get(CGF.Int8Ty, 0),
CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits).getQuantity(),
LVal.getAlignment().getAsAlign());
return true;
@@ -374,8 +383,7 @@ static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
llvm::Value *Desired = CGF.Builder.CreateLoad(Val2);
llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg(
- Ptr.getPointer(), Expected, Desired, SuccessOrder, FailureOrder,
- Scope);
+ Ptr, Expected, Desired, SuccessOrder, FailureOrder, Scope);
Pair->setVolatile(E->isVolatile());
Pair->setWeak(IsWeak);
@@ -499,9 +507,11 @@ static llvm::Value *EmitPostAtomicMinMax(CGBuilderTy &Builder,
default:
llvm_unreachable("Unexpected min/max operation");
case AtomicExpr::AO__atomic_max_fetch:
+ case AtomicExpr::AO__scoped_atomic_max_fetch:
Pred = IsSigned ? llvm::CmpInst::ICMP_SGT : llvm::CmpInst::ICMP_UGT;
break;
case AtomicExpr::AO__atomic_min_fetch:
+ case AtomicExpr::AO__scoped_atomic_min_fetch:
Pred = IsSigned ? llvm::CmpInst::ICMP_SLT : llvm::CmpInst::ICMP_ULT;
break;
}
@@ -524,17 +534,21 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
llvm_unreachable("Already handled!");
case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
+ case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
FailureOrder, Size, Order, Scope);
return;
case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
+ case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
FailureOrder, Size, Order, Scope);
return;
case AtomicExpr::AO__atomic_compare_exchange:
- case AtomicExpr::AO__atomic_compare_exchange_n: {
+ case AtomicExpr::AO__atomic_compare_exchange_n:
+ case AtomicExpr::AO__scoped_atomic_compare_exchange:
+ case AtomicExpr::AO__scoped_atomic_compare_exchange_n: {
if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,
Val1, Val2, FailureOrder, Size, Order, Scope);
@@ -565,8 +579,11 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
}
case AtomicExpr::AO__c11_atomic_load:
case AtomicExpr::AO__opencl_atomic_load:
+ case AtomicExpr::AO__hip_atomic_load:
case AtomicExpr::AO__atomic_load_n:
- case AtomicExpr::AO__atomic_load: {
+ case AtomicExpr::AO__atomic_load:
+ case AtomicExpr::AO__scoped_atomic_load_n:
+ case AtomicExpr::AO__scoped_atomic_load: {
llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
Load->setAtomic(Order, Scope);
Load->setVolatile(E->isVolatile());
@@ -576,8 +593,11 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
case AtomicExpr::AO__c11_atomic_store:
case AtomicExpr::AO__opencl_atomic_store:
+ case AtomicExpr::AO__hip_atomic_store:
case AtomicExpr::AO__atomic_store:
- case AtomicExpr::AO__atomic_store_n: {
+ case AtomicExpr::AO__atomic_store_n:
+ case AtomicExpr::AO__scoped_atomic_store:
+ case AtomicExpr::AO__scoped_atomic_store_n: {
llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
Store->setAtomic(Order, Scope);
@@ -586,92 +606,125 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
}
case AtomicExpr::AO__c11_atomic_exchange:
+ case AtomicExpr::AO__hip_atomic_exchange:
case AtomicExpr::AO__opencl_atomic_exchange:
case AtomicExpr::AO__atomic_exchange_n:
case AtomicExpr::AO__atomic_exchange:
+ case AtomicExpr::AO__scoped_atomic_exchange_n:
+ case AtomicExpr::AO__scoped_atomic_exchange:
Op = llvm::AtomicRMWInst::Xchg;
break;
case AtomicExpr::AO__atomic_add_fetch:
+ case AtomicExpr::AO__scoped_atomic_add_fetch:
PostOp = E->getValueType()->isFloatingType() ? llvm::Instruction::FAdd
: llvm::Instruction::Add;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case AtomicExpr::AO__c11_atomic_fetch_add:
+ case AtomicExpr::AO__hip_atomic_fetch_add:
case AtomicExpr::AO__opencl_atomic_fetch_add:
case AtomicExpr::AO__atomic_fetch_add:
+ case AtomicExpr::AO__scoped_atomic_fetch_add:
Op = E->getValueType()->isFloatingType() ? llvm::AtomicRMWInst::FAdd
: llvm::AtomicRMWInst::Add;
break;
case AtomicExpr::AO__atomic_sub_fetch:
+ case AtomicExpr::AO__scoped_atomic_sub_fetch:
PostOp = E->getValueType()->isFloatingType() ? llvm::Instruction::FSub
: llvm::Instruction::Sub;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case AtomicExpr::AO__c11_atomic_fetch_sub:
+ case AtomicExpr::AO__hip_atomic_fetch_sub:
case AtomicExpr::AO__opencl_atomic_fetch_sub:
case AtomicExpr::AO__atomic_fetch_sub:
+ case AtomicExpr::AO__scoped_atomic_fetch_sub:
Op = E->getValueType()->isFloatingType() ? llvm::AtomicRMWInst::FSub
: llvm::AtomicRMWInst::Sub;
break;
case AtomicExpr::AO__atomic_min_fetch:
+ case AtomicExpr::AO__scoped_atomic_min_fetch:
PostOpMinMax = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case AtomicExpr::AO__c11_atomic_fetch_min:
+ case AtomicExpr::AO__hip_atomic_fetch_min:
case AtomicExpr::AO__opencl_atomic_fetch_min:
case AtomicExpr::AO__atomic_fetch_min:
- Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Min
- : llvm::AtomicRMWInst::UMin;
+ case AtomicExpr::AO__scoped_atomic_fetch_min:
+ Op = E->getValueType()->isFloatingType()
+ ? llvm::AtomicRMWInst::FMin
+ : (E->getValueType()->isSignedIntegerType()
+ ? llvm::AtomicRMWInst::Min
+ : llvm::AtomicRMWInst::UMin);
break;
case AtomicExpr::AO__atomic_max_fetch:
+ case AtomicExpr::AO__scoped_atomic_max_fetch:
PostOpMinMax = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case AtomicExpr::AO__c11_atomic_fetch_max:
+ case AtomicExpr::AO__hip_atomic_fetch_max:
case AtomicExpr::AO__opencl_atomic_fetch_max:
case AtomicExpr::AO__atomic_fetch_max:
- Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Max
- : llvm::AtomicRMWInst::UMax;
+ case AtomicExpr::AO__scoped_atomic_fetch_max:
+ Op = E->getValueType()->isFloatingType()
+ ? llvm::AtomicRMWInst::FMax
+ : (E->getValueType()->isSignedIntegerType()
+ ? llvm::AtomicRMWInst::Max
+ : llvm::AtomicRMWInst::UMax);
break;
case AtomicExpr::AO__atomic_and_fetch:
+ case AtomicExpr::AO__scoped_atomic_and_fetch:
PostOp = llvm::Instruction::And;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case AtomicExpr::AO__c11_atomic_fetch_and:
+ case AtomicExpr::AO__hip_atomic_fetch_and:
case AtomicExpr::AO__opencl_atomic_fetch_and:
case AtomicExpr::AO__atomic_fetch_and:
+ case AtomicExpr::AO__scoped_atomic_fetch_and:
Op = llvm::AtomicRMWInst::And;
break;
case AtomicExpr::AO__atomic_or_fetch:
+ case AtomicExpr::AO__scoped_atomic_or_fetch:
PostOp = llvm::Instruction::Or;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case AtomicExpr::AO__c11_atomic_fetch_or:
+ case AtomicExpr::AO__hip_atomic_fetch_or:
case AtomicExpr::AO__opencl_atomic_fetch_or:
case AtomicExpr::AO__atomic_fetch_or:
+ case AtomicExpr::AO__scoped_atomic_fetch_or:
Op = llvm::AtomicRMWInst::Or;
break;
case AtomicExpr::AO__atomic_xor_fetch:
+ case AtomicExpr::AO__scoped_atomic_xor_fetch:
PostOp = llvm::Instruction::Xor;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case AtomicExpr::AO__c11_atomic_fetch_xor:
+ case AtomicExpr::AO__hip_atomic_fetch_xor:
case AtomicExpr::AO__opencl_atomic_fetch_xor:
case AtomicExpr::AO__atomic_fetch_xor:
+ case AtomicExpr::AO__scoped_atomic_fetch_xor:
Op = llvm::AtomicRMWInst::Xor;
break;
case AtomicExpr::AO__atomic_nand_fetch:
+ case AtomicExpr::AO__scoped_atomic_nand_fetch:
PostOp = llvm::Instruction::And; // the NOT is special cased below
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
+ case AtomicExpr::AO__c11_atomic_fetch_nand:
case AtomicExpr::AO__atomic_fetch_nand:
+ case AtomicExpr::AO__scoped_atomic_fetch_nand:
Op = llvm::AtomicRMWInst::Nand;
break;
}
llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
llvm::AtomicRMWInst *RMWI =
- CGF.Builder.CreateAtomicRMW(Op, Ptr.getPointer(), LoadVal1, Order, Scope);
+ CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order, Scope);
RMWI->setVolatile(E->isVolatile());
// For __atomic_*_fetch operations, perform the operation again to
@@ -684,7 +737,8 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
else if (PostOp)
Result = CGF.Builder.CreateBinOp((llvm::Instruction::BinaryOps)PostOp, RMWI,
LoadVal1);
- if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
+ if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch ||
+ E->getOp() == AtomicExpr::AO__scoped_atomic_nand_fetch)
Result = CGF.Builder.CreateNot(Result);
CGF.Builder.CreateStore(Result, Dest);
}
@@ -767,9 +821,8 @@ AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
int64_t SizeInBits = CGF.getContext().toBits(SizeInChars);
ValTy =
CGF.getContext().getIntTypeForBitwidth(SizeInBits, /*Signed=*/false);
- llvm::Type *IPtrTy = llvm::IntegerType::get(CGF.getLLVMContext(),
- SizeInBits)->getPointerTo();
- Address Ptr = Address(CGF.Builder.CreateBitCast(Val, IPtrTy), Align);
+ llvm::Type *ITy = llvm::IntegerType::get(CGF.getLLVMContext(), SizeInBits);
+ Address Ptr = Address(Val, ITy, Align);
Val = CGF.EmitLoadOfScalar(Ptr, false,
CGF.getContext().getPointerType(ValTy),
Loc);
@@ -777,8 +830,7 @@ AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
Args.add(RValue::get(Val), ValTy);
} else {
// Non-optimized functions always take a reference.
- Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
- CGF.getContext().VoidPtrTy);
+ Args.add(RValue::get(Val), CGF.getContext().VoidPtrTy);
}
}
@@ -835,43 +887,57 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
case AtomicExpr::AO__opencl_atomic_init:
llvm_unreachable("Already handled above with EmitAtomicInit!");
+ case AtomicExpr::AO__atomic_load_n:
+ case AtomicExpr::AO__scoped_atomic_load_n:
case AtomicExpr::AO__c11_atomic_load:
case AtomicExpr::AO__opencl_atomic_load:
- case AtomicExpr::AO__atomic_load_n:
+ case AtomicExpr::AO__hip_atomic_load:
break;
case AtomicExpr::AO__atomic_load:
+ case AtomicExpr::AO__scoped_atomic_load:
Dest = EmitPointerWithAlignment(E->getVal1());
break;
case AtomicExpr::AO__atomic_store:
+ case AtomicExpr::AO__scoped_atomic_store:
Val1 = EmitPointerWithAlignment(E->getVal1());
break;
case AtomicExpr::AO__atomic_exchange:
+ case AtomicExpr::AO__scoped_atomic_exchange:
Val1 = EmitPointerWithAlignment(E->getVal1());
Dest = EmitPointerWithAlignment(E->getVal2());
break;
- case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
+ case AtomicExpr::AO__atomic_compare_exchange:
+ case AtomicExpr::AO__atomic_compare_exchange_n:
case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
- case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
+ case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
+ case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
+ case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
- case AtomicExpr::AO__atomic_compare_exchange_n:
- case AtomicExpr::AO__atomic_compare_exchange:
+ case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
+ case AtomicExpr::AO__scoped_atomic_compare_exchange:
+ case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
Val1 = EmitPointerWithAlignment(E->getVal1());
- if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
+ if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange ||
+ E->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
Val2 = EmitPointerWithAlignment(E->getVal2());
else
Val2 = EmitValToTemp(*this, E->getVal2());
OrderFail = EmitScalarExpr(E->getOrderFail());
if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||
- E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
+ E->getOp() == AtomicExpr::AO__atomic_compare_exchange ||
+ E->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange_n ||
+ E->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
IsWeak = EmitScalarExpr(E->getWeak());
break;
case AtomicExpr::AO__c11_atomic_fetch_add:
case AtomicExpr::AO__c11_atomic_fetch_sub:
+ case AtomicExpr::AO__hip_atomic_fetch_add:
+ case AtomicExpr::AO__hip_atomic_fetch_sub:
case AtomicExpr::AO__opencl_atomic_fetch_add:
case AtomicExpr::AO__opencl_atomic_fetch_sub:
if (MemTy->isPointerType()) {
@@ -889,42 +955,68 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Temp, Val1Ty));
break;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case AtomicExpr::AO__atomic_fetch_add:
+ case AtomicExpr::AO__atomic_fetch_max:
+ case AtomicExpr::AO__atomic_fetch_min:
case AtomicExpr::AO__atomic_fetch_sub:
case AtomicExpr::AO__atomic_add_fetch:
+ case AtomicExpr::AO__atomic_max_fetch:
+ case AtomicExpr::AO__atomic_min_fetch:
case AtomicExpr::AO__atomic_sub_fetch:
+ case AtomicExpr::AO__c11_atomic_fetch_max:
+ case AtomicExpr::AO__c11_atomic_fetch_min:
+ case AtomicExpr::AO__opencl_atomic_fetch_max:
+ case AtomicExpr::AO__opencl_atomic_fetch_min:
+ case AtomicExpr::AO__hip_atomic_fetch_max:
+ case AtomicExpr::AO__hip_atomic_fetch_min:
+ case AtomicExpr::AO__scoped_atomic_fetch_add:
+ case AtomicExpr::AO__scoped_atomic_fetch_max:
+ case AtomicExpr::AO__scoped_atomic_fetch_min:
+ case AtomicExpr::AO__scoped_atomic_fetch_sub:
+ case AtomicExpr::AO__scoped_atomic_add_fetch:
+ case AtomicExpr::AO__scoped_atomic_max_fetch:
+ case AtomicExpr::AO__scoped_atomic_min_fetch:
+ case AtomicExpr::AO__scoped_atomic_sub_fetch:
ShouldCastToIntPtrTy = !MemTy->isFloatingType();
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
- case AtomicExpr::AO__c11_atomic_store:
- case AtomicExpr::AO__c11_atomic_exchange:
- case AtomicExpr::AO__opencl_atomic_store:
- case AtomicExpr::AO__opencl_atomic_exchange:
+ case AtomicExpr::AO__atomic_fetch_and:
+ case AtomicExpr::AO__atomic_fetch_nand:
+ case AtomicExpr::AO__atomic_fetch_or:
+ case AtomicExpr::AO__atomic_fetch_xor:
+ case AtomicExpr::AO__atomic_and_fetch:
+ case AtomicExpr::AO__atomic_nand_fetch:
+ case AtomicExpr::AO__atomic_or_fetch:
+ case AtomicExpr::AO__atomic_xor_fetch:
case AtomicExpr::AO__atomic_store_n:
case AtomicExpr::AO__atomic_exchange_n:
case AtomicExpr::AO__c11_atomic_fetch_and:
+ case AtomicExpr::AO__c11_atomic_fetch_nand:
case AtomicExpr::AO__c11_atomic_fetch_or:
case AtomicExpr::AO__c11_atomic_fetch_xor:
- case AtomicExpr::AO__c11_atomic_fetch_max:
- case AtomicExpr::AO__c11_atomic_fetch_min:
+ case AtomicExpr::AO__c11_atomic_store:
+ case AtomicExpr::AO__c11_atomic_exchange:
+ case AtomicExpr::AO__hip_atomic_fetch_and:
+ case AtomicExpr::AO__hip_atomic_fetch_or:
+ case AtomicExpr::AO__hip_atomic_fetch_xor:
+ case AtomicExpr::AO__hip_atomic_store:
+ case AtomicExpr::AO__hip_atomic_exchange:
case AtomicExpr::AO__opencl_atomic_fetch_and:
case AtomicExpr::AO__opencl_atomic_fetch_or:
case AtomicExpr::AO__opencl_atomic_fetch_xor:
- case AtomicExpr::AO__opencl_atomic_fetch_min:
- case AtomicExpr::AO__opencl_atomic_fetch_max:
- case AtomicExpr::AO__atomic_fetch_and:
- case AtomicExpr::AO__atomic_fetch_or:
- case AtomicExpr::AO__atomic_fetch_xor:
- case AtomicExpr::AO__atomic_fetch_nand:
- case AtomicExpr::AO__atomic_and_fetch:
- case AtomicExpr::AO__atomic_or_fetch:
- case AtomicExpr::AO__atomic_xor_fetch:
- case AtomicExpr::AO__atomic_nand_fetch:
- case AtomicExpr::AO__atomic_max_fetch:
- case AtomicExpr::AO__atomic_min_fetch:
- case AtomicExpr::AO__atomic_fetch_max:
- case AtomicExpr::AO__atomic_fetch_min:
+ case AtomicExpr::AO__opencl_atomic_store:
+ case AtomicExpr::AO__opencl_atomic_exchange:
+ case AtomicExpr::AO__scoped_atomic_fetch_and:
+ case AtomicExpr::AO__scoped_atomic_fetch_nand:
+ case AtomicExpr::AO__scoped_atomic_fetch_or:
+ case AtomicExpr::AO__scoped_atomic_fetch_xor:
+ case AtomicExpr::AO__scoped_atomic_and_fetch:
+ case AtomicExpr::AO__scoped_atomic_nand_fetch:
+ case AtomicExpr::AO__scoped_atomic_or_fetch:
+ case AtomicExpr::AO__scoped_atomic_xor_fetch:
+ case AtomicExpr::AO__scoped_atomic_store_n:
+ case AtomicExpr::AO__scoped_atomic_exchange_n:
Val1 = EmitValToTemp(*this, E->getVal1());
break;
}
@@ -938,7 +1030,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
AtomicInfo Atomics(*this, AtomicVal);
if (ShouldCastToIntPtrTy) {
- Ptr = Atomics.emitCastToAtomicIntPointer(Ptr);
+ Ptr = Atomics.castToAtomicIntPointer(Ptr);
if (Val1.isValid())
Val1 = Atomics.convertToAtomicIntPointer(Val1);
if (Val2.isValid())
@@ -946,13 +1038,13 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
}
if (Dest.isValid()) {
if (ShouldCastToIntPtrTy)
- Dest = Atomics.emitCastToAtomicIntPointer(Dest);
+ Dest = Atomics.castToAtomicIntPointer(Dest);
} else if (E->isCmpXChg())
Dest = CreateMemTemp(RValTy, "cmpxchg.bool");
else if (!RValTy->isVoidType()) {
Dest = Atomics.CreateTempAlloca();
if (ShouldCastToIntPtrTy)
- Dest = Atomics.emitCastToAtomicIntPointer(Dest);
+ Dest = Atomics.castToAtomicIntPointer(Dest);
}
// Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
@@ -963,36 +1055,60 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
case AtomicExpr::AO__opencl_atomic_init:
llvm_unreachable("Already handled above with EmitAtomicInit!");
- case AtomicExpr::AO__c11_atomic_fetch_add:
- case AtomicExpr::AO__opencl_atomic_fetch_add:
case AtomicExpr::AO__atomic_fetch_add:
- case AtomicExpr::AO__c11_atomic_fetch_and:
- case AtomicExpr::AO__opencl_atomic_fetch_and:
case AtomicExpr::AO__atomic_fetch_and:
- case AtomicExpr::AO__c11_atomic_fetch_or:
- case AtomicExpr::AO__opencl_atomic_fetch_or:
- case AtomicExpr::AO__atomic_fetch_or:
+ case AtomicExpr::AO__atomic_fetch_max:
+ case AtomicExpr::AO__atomic_fetch_min:
case AtomicExpr::AO__atomic_fetch_nand:
- case AtomicExpr::AO__c11_atomic_fetch_sub:
- case AtomicExpr::AO__opencl_atomic_fetch_sub:
+ case AtomicExpr::AO__atomic_fetch_or:
case AtomicExpr::AO__atomic_fetch_sub:
- case AtomicExpr::AO__c11_atomic_fetch_xor:
- case AtomicExpr::AO__opencl_atomic_fetch_xor:
- case AtomicExpr::AO__opencl_atomic_fetch_min:
- case AtomicExpr::AO__opencl_atomic_fetch_max:
case AtomicExpr::AO__atomic_fetch_xor:
- case AtomicExpr::AO__c11_atomic_fetch_max:
- case AtomicExpr::AO__c11_atomic_fetch_min:
case AtomicExpr::AO__atomic_add_fetch:
case AtomicExpr::AO__atomic_and_fetch:
+ case AtomicExpr::AO__atomic_max_fetch:
+ case AtomicExpr::AO__atomic_min_fetch:
case AtomicExpr::AO__atomic_nand_fetch:
case AtomicExpr::AO__atomic_or_fetch:
case AtomicExpr::AO__atomic_sub_fetch:
case AtomicExpr::AO__atomic_xor_fetch:
- case AtomicExpr::AO__atomic_fetch_max:
- case AtomicExpr::AO__atomic_fetch_min:
- case AtomicExpr::AO__atomic_max_fetch:
- case AtomicExpr::AO__atomic_min_fetch:
+ case AtomicExpr::AO__c11_atomic_fetch_add:
+ case AtomicExpr::AO__c11_atomic_fetch_and:
+ case AtomicExpr::AO__c11_atomic_fetch_max:
+ case AtomicExpr::AO__c11_atomic_fetch_min:
+ case AtomicExpr::AO__c11_atomic_fetch_nand:
+ case AtomicExpr::AO__c11_atomic_fetch_or:
+ case AtomicExpr::AO__c11_atomic_fetch_sub:
+ case AtomicExpr::AO__c11_atomic_fetch_xor:
+ case AtomicExpr::AO__hip_atomic_fetch_add:
+ case AtomicExpr::AO__hip_atomic_fetch_and:
+ case AtomicExpr::AO__hip_atomic_fetch_max:
+ case AtomicExpr::AO__hip_atomic_fetch_min:
+ case AtomicExpr::AO__hip_atomic_fetch_or:
+ case AtomicExpr::AO__hip_atomic_fetch_sub:
+ case AtomicExpr::AO__hip_atomic_fetch_xor:
+ case AtomicExpr::AO__opencl_atomic_fetch_add:
+ case AtomicExpr::AO__opencl_atomic_fetch_and:
+ case AtomicExpr::AO__opencl_atomic_fetch_max:
+ case AtomicExpr::AO__opencl_atomic_fetch_min:
+ case AtomicExpr::AO__opencl_atomic_fetch_or:
+ case AtomicExpr::AO__opencl_atomic_fetch_sub:
+ case AtomicExpr::AO__opencl_atomic_fetch_xor:
+ case AtomicExpr::AO__scoped_atomic_fetch_add:
+ case AtomicExpr::AO__scoped_atomic_fetch_and:
+ case AtomicExpr::AO__scoped_atomic_fetch_max:
+ case AtomicExpr::AO__scoped_atomic_fetch_min:
+ case AtomicExpr::AO__scoped_atomic_fetch_nand:
+ case AtomicExpr::AO__scoped_atomic_fetch_or:
+ case AtomicExpr::AO__scoped_atomic_fetch_sub:
+ case AtomicExpr::AO__scoped_atomic_fetch_xor:
+ case AtomicExpr::AO__scoped_atomic_add_fetch:
+ case AtomicExpr::AO__scoped_atomic_and_fetch:
+ case AtomicExpr::AO__scoped_atomic_max_fetch:
+ case AtomicExpr::AO__scoped_atomic_min_fetch:
+ case AtomicExpr::AO__scoped_atomic_nand_fetch:
+ case AtomicExpr::AO__scoped_atomic_or_fetch:
+ case AtomicExpr::AO__scoped_atomic_sub_fetch:
+ case AtomicExpr::AO__scoped_atomic_xor_fetch:
// For these, only library calls for certain sizes exist.
UseOptimizedLibcall = true;
break;
@@ -1001,25 +1117,38 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
case AtomicExpr::AO__atomic_store:
case AtomicExpr::AO__atomic_exchange:
case AtomicExpr::AO__atomic_compare_exchange:
+ case AtomicExpr::AO__scoped_atomic_load:
+ case AtomicExpr::AO__scoped_atomic_store:
+ case AtomicExpr::AO__scoped_atomic_exchange:
+ case AtomicExpr::AO__scoped_atomic_compare_exchange:
// Use the generic version if we don't know that the operand will be
// suitably aligned for the optimized version.
if (Misaligned)
break;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
+ case AtomicExpr::AO__atomic_load_n:
+ case AtomicExpr::AO__atomic_store_n:
+ case AtomicExpr::AO__atomic_exchange_n:
+ case AtomicExpr::AO__atomic_compare_exchange_n:
case AtomicExpr::AO__c11_atomic_load:
case AtomicExpr::AO__c11_atomic_store:
case AtomicExpr::AO__c11_atomic_exchange:
case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
+ case AtomicExpr::AO__hip_atomic_load:
+ case AtomicExpr::AO__hip_atomic_store:
+ case AtomicExpr::AO__hip_atomic_exchange:
+ case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
+ case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
case AtomicExpr::AO__opencl_atomic_load:
case AtomicExpr::AO__opencl_atomic_store:
case AtomicExpr::AO__opencl_atomic_exchange:
case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
- case AtomicExpr::AO__atomic_load_n:
- case AtomicExpr::AO__atomic_store_n:
- case AtomicExpr::AO__atomic_exchange_n:
- case AtomicExpr::AO__atomic_compare_exchange_n:
+ case AtomicExpr::AO__scoped_atomic_load_n:
+ case AtomicExpr::AO__scoped_atomic_store_n:
+ case AtomicExpr::AO__scoped_atomic_exchange_n:
+ case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
// Only use optimized library calls for sizes for which they exist.
// FIXME: Size == 16 optimized library functions exist too.
if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
@@ -1043,15 +1172,14 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
if (AS == LangAS::opencl_generic)
return V;
auto DestAS = getContext().getTargetAddressSpace(LangAS::opencl_generic);
- auto T = V->getType();
- auto *DestType = T->getPointerElementType()->getPointerTo(DestAS);
+ auto *DestType = llvm::PointerType::get(getLLVMContext(), DestAS);
return getTargetHooks().performAddrSpaceCast(
*this, V, AS, LangAS::opencl_generic, DestType, false);
};
- Args.add(RValue::get(CastToGenericAddrSpace(
- EmitCastToVoidPtr(Ptr.getPointer()), E->getPtr()->getType())),
+ Args.add(RValue::get(CastToGenericAddrSpace(Ptr.getPointer(),
+ E->getPtr()->getType())),
getContext().VoidPtrTy);
std::string LibCallName;
@@ -1073,19 +1201,22 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
// void *desired, int success, int failure)
// bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
// int success, int failure)
+ case AtomicExpr::AO__atomic_compare_exchange:
+ case AtomicExpr::AO__atomic_compare_exchange_n:
case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
+ case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
+ case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
- case AtomicExpr::AO__atomic_compare_exchange:
- case AtomicExpr::AO__atomic_compare_exchange_n:
+ case AtomicExpr::AO__scoped_atomic_compare_exchange:
+ case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
LibCallName = "__atomic_compare_exchange";
RetTy = getContext().BoolTy;
HaveRetTy = true;
- Args.add(
- RValue::get(CastToGenericAddrSpace(
- EmitCastToVoidPtr(Val1.getPointer()), E->getVal1()->getType())),
- getContext().VoidPtrTy);
+ Args.add(RValue::get(CastToGenericAddrSpace(Val1.getPointer(),
+ E->getVal1()->getType())),
+ getContext().VoidPtrTy);
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2.getPointer(),
MemTy, E->getExprLoc(), TInfo.Width);
Args.add(RValue::get(Order), getContext().IntTy);
@@ -1094,20 +1225,26 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
// void __atomic_exchange(size_t size, void *mem, void *val, void *return,
// int order)
// T __atomic_exchange_N(T *mem, T val, int order)
+ case AtomicExpr::AO__atomic_exchange:
+ case AtomicExpr::AO__atomic_exchange_n:
case AtomicExpr::AO__c11_atomic_exchange:
+ case AtomicExpr::AO__hip_atomic_exchange:
case AtomicExpr::AO__opencl_atomic_exchange:
- case AtomicExpr::AO__atomic_exchange_n:
- case AtomicExpr::AO__atomic_exchange:
+ case AtomicExpr::AO__scoped_atomic_exchange:
+ case AtomicExpr::AO__scoped_atomic_exchange_n:
LibCallName = "__atomic_exchange";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
MemTy, E->getExprLoc(), TInfo.Width);
break;
// void __atomic_store(size_t size, void *mem, void *val, int order)
// void __atomic_store_N(T *mem, T val, int order)
- case AtomicExpr::AO__c11_atomic_store:
- case AtomicExpr::AO__opencl_atomic_store:
case AtomicExpr::AO__atomic_store:
case AtomicExpr::AO__atomic_store_n:
+ case AtomicExpr::AO__c11_atomic_store:
+ case AtomicExpr::AO__hip_atomic_store:
+ case AtomicExpr::AO__opencl_atomic_store:
+ case AtomicExpr::AO__scoped_atomic_store:
+ case AtomicExpr::AO__scoped_atomic_store_n:
LibCallName = "__atomic_store";
RetTy = getContext().VoidTy;
HaveRetTy = true;
@@ -1116,20 +1253,26 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
break;
// void __atomic_load(size_t size, void *mem, void *return, int order)
// T __atomic_load_N(T *mem, int order)
- case AtomicExpr::AO__c11_atomic_load:
- case AtomicExpr::AO__opencl_atomic_load:
case AtomicExpr::AO__atomic_load:
case AtomicExpr::AO__atomic_load_n:
+ case AtomicExpr::AO__c11_atomic_load:
+ case AtomicExpr::AO__hip_atomic_load:
+ case AtomicExpr::AO__opencl_atomic_load:
+ case AtomicExpr::AO__scoped_atomic_load:
+ case AtomicExpr::AO__scoped_atomic_load_n:
LibCallName = "__atomic_load";
break;
// T __atomic_add_fetch_N(T *mem, T val, int order)
// T __atomic_fetch_add_N(T *mem, T val, int order)
case AtomicExpr::AO__atomic_add_fetch:
+ case AtomicExpr::AO__scoped_atomic_add_fetch:
PostOp = llvm::Instruction::Add;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
+ case AtomicExpr::AO__atomic_fetch_add:
case AtomicExpr::AO__c11_atomic_fetch_add:
+ case AtomicExpr::AO__hip_atomic_fetch_add:
case AtomicExpr::AO__opencl_atomic_fetch_add:
- case AtomicExpr::AO__atomic_fetch_add:
+ case AtomicExpr::AO__scoped_atomic_fetch_add:
LibCallName = "__atomic_fetch_add";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
LoweredMemTy, E->getExprLoc(), TInfo.Width);
@@ -1137,11 +1280,14 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
// T __atomic_and_fetch_N(T *mem, T val, int order)
// T __atomic_fetch_and_N(T *mem, T val, int order)
case AtomicExpr::AO__atomic_and_fetch:
+ case AtomicExpr::AO__scoped_atomic_and_fetch:
PostOp = llvm::Instruction::And;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
+ case AtomicExpr::AO__atomic_fetch_and:
case AtomicExpr::AO__c11_atomic_fetch_and:
+ case AtomicExpr::AO__hip_atomic_fetch_and:
case AtomicExpr::AO__opencl_atomic_fetch_and:
- case AtomicExpr::AO__atomic_fetch_and:
+ case AtomicExpr::AO__scoped_atomic_fetch_and:
LibCallName = "__atomic_fetch_and";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
MemTy, E->getExprLoc(), TInfo.Width);
@@ -1149,11 +1295,14 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
// T __atomic_or_fetch_N(T *mem, T val, int order)
// T __atomic_fetch_or_N(T *mem, T val, int order)
case AtomicExpr::AO__atomic_or_fetch:
+ case AtomicExpr::AO__scoped_atomic_or_fetch:
PostOp = llvm::Instruction::Or;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
+ case AtomicExpr::AO__atomic_fetch_or:
case AtomicExpr::AO__c11_atomic_fetch_or:
+ case AtomicExpr::AO__hip_atomic_fetch_or:
case AtomicExpr::AO__opencl_atomic_fetch_or:
- case AtomicExpr::AO__atomic_fetch_or:
+ case AtomicExpr::AO__scoped_atomic_fetch_or:
LibCallName = "__atomic_fetch_or";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
MemTy, E->getExprLoc(), TInfo.Width);
@@ -1161,11 +1310,14 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
// T __atomic_sub_fetch_N(T *mem, T val, int order)
// T __atomic_fetch_sub_N(T *mem, T val, int order)
case AtomicExpr::AO__atomic_sub_fetch:
+ case AtomicExpr::AO__scoped_atomic_sub_fetch:
PostOp = llvm::Instruction::Sub;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
+ case AtomicExpr::AO__atomic_fetch_sub:
case AtomicExpr::AO__c11_atomic_fetch_sub:
+ case AtomicExpr::AO__hip_atomic_fetch_sub:
case AtomicExpr::AO__opencl_atomic_fetch_sub:
- case AtomicExpr::AO__atomic_fetch_sub:
+ case AtomicExpr::AO__scoped_atomic_fetch_sub:
LibCallName = "__atomic_fetch_sub";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
LoweredMemTy, E->getExprLoc(), TInfo.Width);
@@ -1173,20 +1325,26 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
// T __atomic_xor_fetch_N(T *mem, T val, int order)
// T __atomic_fetch_xor_N(T *mem, T val, int order)
case AtomicExpr::AO__atomic_xor_fetch:
+ case AtomicExpr::AO__scoped_atomic_xor_fetch:
PostOp = llvm::Instruction::Xor;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
+ case AtomicExpr::AO__atomic_fetch_xor:
case AtomicExpr::AO__c11_atomic_fetch_xor:
+ case AtomicExpr::AO__hip_atomic_fetch_xor:
case AtomicExpr::AO__opencl_atomic_fetch_xor:
- case AtomicExpr::AO__atomic_fetch_xor:
+ case AtomicExpr::AO__scoped_atomic_fetch_xor:
LibCallName = "__atomic_fetch_xor";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
MemTy, E->getExprLoc(), TInfo.Width);
break;
case AtomicExpr::AO__atomic_min_fetch:
+ case AtomicExpr::AO__scoped_atomic_min_fetch:
PostOpMinMax = true;
- LLVM_FALLTHROUGH;
- case AtomicExpr::AO__c11_atomic_fetch_min:
+ [[fallthrough]];
case AtomicExpr::AO__atomic_fetch_min:
+ case AtomicExpr::AO__c11_atomic_fetch_min:
+ case AtomicExpr::AO__scoped_atomic_fetch_min:
+ case AtomicExpr::AO__hip_atomic_fetch_min:
case AtomicExpr::AO__opencl_atomic_fetch_min:
LibCallName = E->getValueType()->isSignedIntegerType()
? "__atomic_fetch_min"
@@ -1195,11 +1353,14 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
LoweredMemTy, E->getExprLoc(), TInfo.Width);
break;
case AtomicExpr::AO__atomic_max_fetch:
+ case AtomicExpr::AO__scoped_atomic_max_fetch:
PostOpMinMax = true;
- LLVM_FALLTHROUGH;
- case AtomicExpr::AO__c11_atomic_fetch_max:
+ [[fallthrough]];
case AtomicExpr::AO__atomic_fetch_max:
+ case AtomicExpr::AO__c11_atomic_fetch_max:
+ case AtomicExpr::AO__hip_atomic_fetch_max:
case AtomicExpr::AO__opencl_atomic_fetch_max:
+ case AtomicExpr::AO__scoped_atomic_fetch_max:
LibCallName = E->getValueType()->isSignedIntegerType()
? "__atomic_fetch_max"
: "__atomic_fetch_umax";
@@ -1209,9 +1370,12 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
// T __atomic_nand_fetch_N(T *mem, T val, int order)
// T __atomic_fetch_nand_N(T *mem, T val, int order)
case AtomicExpr::AO__atomic_nand_fetch:
+ case AtomicExpr::AO__scoped_atomic_nand_fetch:
PostOp = llvm::Instruction::And; // the NOT is special cased below
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case AtomicExpr::AO__atomic_fetch_nand:
+ case AtomicExpr::AO__c11_atomic_fetch_nand:
+ case AtomicExpr::AO__scoped_atomic_fetch_nand:
LibCallName = "__atomic_fetch_nand";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
MemTy, E->getExprLoc(), TInfo.Width);
@@ -1236,8 +1400,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
} else {
// Value is returned through parameter before the order.
RetTy = getContext().VoidTy;
- Args.add(RValue::get(EmitCastToVoidPtr(Dest.getPointer())),
- getContext().VoidPtrTy);
+ Args.add(RValue::get(Dest.getPointer()), getContext().VoidPtrTy);
}
}
// order is always the last parameter
@@ -1269,30 +1432,34 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
llvm::Value *LoadVal1 = Args[1].getRValue(*this).getScalarVal();
ResVal = Builder.CreateBinOp(PostOp, ResVal, LoadVal1);
}
- if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
+ if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch ||
+ E->getOp() == AtomicExpr::AO__scoped_atomic_nand_fetch)
ResVal = Builder.CreateNot(ResVal);
- Builder.CreateStore(
- ResVal,
- Builder.CreateBitCast(Dest, ResVal->getType()->getPointerTo()));
+ Builder.CreateStore(ResVal, Dest.withElementType(ResVal->getType()));
}
if (RValTy->isVoidType())
return RValue::get(nullptr);
- return convertTempToRValue(
- Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo()),
- RValTy, E->getExprLoc());
+ return convertTempToRValue(Dest.withElementType(ConvertTypeForMem(RValTy)),
+ RValTy, E->getExprLoc());
}
bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
E->getOp() == AtomicExpr::AO__opencl_atomic_store ||
+ E->getOp() == AtomicExpr::AO__hip_atomic_store ||
E->getOp() == AtomicExpr::AO__atomic_store ||
- E->getOp() == AtomicExpr::AO__atomic_store_n;
+ E->getOp() == AtomicExpr::AO__atomic_store_n ||
+ E->getOp() == AtomicExpr::AO__scoped_atomic_store ||
+ E->getOp() == AtomicExpr::AO__scoped_atomic_store_n;
bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
E->getOp() == AtomicExpr::AO__opencl_atomic_load ||
+ E->getOp() == AtomicExpr::AO__hip_atomic_load ||
E->getOp() == AtomicExpr::AO__atomic_load ||
- E->getOp() == AtomicExpr::AO__atomic_load_n;
+ E->getOp() == AtomicExpr::AO__atomic_load_n ||
+ E->getOp() == AtomicExpr::AO__scoped_atomic_load ||
+ E->getOp() == AtomicExpr::AO__scoped_atomic_load_n;
if (isa<llvm::ConstantInt>(Order)) {
auto ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
@@ -1331,10 +1498,8 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
if (RValTy->isVoidType())
return RValue::get(nullptr);
- return convertTempToRValue(
- Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo(
- Dest.getAddressSpace())),
- RValTy, E->getExprLoc());
+ return convertTempToRValue(Dest.withElementType(ConvertTypeForMem(RValTy)),
+ RValTy, E->getExprLoc());
}
// Long case, when Order isn't obviously constant.
@@ -1404,18 +1569,14 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
return RValue::get(nullptr);
assert(Atomics.getValueSizeInBits() <= Atomics.getAtomicSizeInBits());
- return convertTempToRValue(
- Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo(
- Dest.getAddressSpace())),
- RValTy, E->getExprLoc());
+ return convertTempToRValue(Dest.withElementType(ConvertTypeForMem(RValTy)),
+ RValTy, E->getExprLoc());
}
-Address AtomicInfo::emitCastToAtomicIntPointer(Address addr) const {
- unsigned addrspace =
- cast<llvm::PointerType>(addr.getPointer()->getType())->getAddressSpace();
+Address AtomicInfo::castToAtomicIntPointer(Address addr) const {
llvm::IntegerType *ty =
llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
- return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace));
+ return addr.withElementType(ty);
}
Address AtomicInfo::convertToAtomicIntPointer(Address Addr) const {
@@ -1428,7 +1589,7 @@ Address AtomicInfo::convertToAtomicIntPointer(Address Addr) const {
Addr = Tmp;
}
- return emitCastToAtomicIntPointer(Addr);
+ return castToAtomicIntPointer(Addr);
}
RValue AtomicInfo::convertAtomicTempToRValue(Address addr,
@@ -1477,7 +1638,7 @@ RValue AtomicInfo::ConvertIntToValueOrAtomic(llvm::Value *IntVal,
!AsValue)) {
auto *ValTy = AsValue
? CGF.ConvertTypeForMem(ValueTy)
- : getAtomicAddress().getType()->getPointerElementType();
+ : getAtomicAddress().getElementType();
if (ValTy->isIntegerTy()) {
assert(IntVal->getType() == ValTy && "Different integer types.");
return RValue::get(CGF.EmitFromMemory(IntVal, ValueTy));
@@ -1500,7 +1661,7 @@ RValue AtomicInfo::ConvertIntToValueOrAtomic(llvm::Value *IntVal,
}
// Slam the integer into the temporary.
- Address CastTemp = emitCastToAtomicIntPointer(Temp);
+ Address CastTemp = castToAtomicIntPointer(Temp);
CGF.Builder.CreateStore(IntVal, CastTemp)
->setVolatile(TempIsVolatile);
@@ -1512,10 +1673,8 @@ void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
// void __atomic_load(size_t size, void *mem, void *return, int order);
CallArgList Args;
Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
- Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
- CGF.getContext().VoidPtrTy);
- Args.add(RValue::get(CGF.EmitCastToVoidPtr(AddForLoaded)),
- CGF.getContext().VoidPtrTy);
+ Args.add(RValue::get(getAtomicPointer()), CGF.getContext().VoidPtrTy);
+ Args.add(RValue::get(AddForLoaded), CGF.getContext().VoidPtrTy);
Args.add(
RValue::get(llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(AO))),
CGF.getContext().IntTy);
@@ -1540,7 +1699,7 @@ llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
/// we are operating under /volatile:ms *and* the LValue itself is volatile and
/// performing such an operation can be performed without a libcall.
bool CodeGenFunction::LValueIsSuitableForInlineAtomic(LValue LV) {
- if (!CGM.getCodeGenOpts().MSVolatile) return false;
+ if (!CGM.getLangOpts().MSVolatile) return false;
AtomicInfo AI(*this, LV);
bool IsVolatile = LV.isVolatile() || hasVolatileMember(LV.getType());
// An atomic is inline if we don't need to use a libcall.
@@ -1678,7 +1837,7 @@ llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal) const {
Address Addr = materializeRValue(RVal);
// Cast the temporary to the atomic int type and pull a value out.
- Addr = emitCastToAtomicIntPointer(Addr);
+ Addr = castToAtomicIntPointer(Addr);
return CGF.Builder.CreateLoad(Addr);
}
@@ -1687,8 +1846,7 @@ std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(
llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak) {
// Do the atomic store.
Address Addr = getAtomicAddressAsAtomicIntPointer();
- auto *Inst = CGF.Builder.CreateAtomicCmpXchg(Addr.getPointer(),
- ExpectedVal, DesiredVal,
+ auto *Inst = CGF.Builder.CreateAtomicCmpXchg(Addr, ExpectedVal, DesiredVal,
Success, Failure);
// Other decoration.
Inst->setVolatile(LVal.isVolatileQualified());
@@ -1709,12 +1867,9 @@ AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr,
// void *desired, int success, int failure);
CallArgList Args;
Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
- Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
- CGF.getContext().VoidPtrTy);
- Args.add(RValue::get(CGF.EmitCastToVoidPtr(ExpectedAddr)),
- CGF.getContext().VoidPtrTy);
- Args.add(RValue::get(CGF.EmitCastToVoidPtr(DesiredAddr)),
- CGF.getContext().VoidPtrTy);
+ Args.add(RValue::get(getAtomicPointer()), CGF.getContext().VoidPtrTy);
+ Args.add(RValue::get(ExpectedAddr), CGF.getContext().VoidPtrTy);
+ Args.add(RValue::get(DesiredAddr), CGF.getContext().VoidPtrTy);
Args.add(RValue::get(
llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Success))),
CGF.getContext().IntTy);
@@ -1856,7 +2011,7 @@ void AtomicInfo::EmitAtomicUpdateOp(
/*NumReservedValues=*/2);
PHI->addIncoming(OldVal, CurBB);
Address NewAtomicAddr = CreateTempAlloca();
- Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
+ Address NewAtomicIntAddr = castToAtomicIntPointer(NewAtomicAddr);
if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
requiresMemSetZero(getAtomicAddress().getElementType())) {
CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
@@ -1938,7 +2093,7 @@ void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRVal,
/*NumReservedValues=*/2);
PHI->addIncoming(OldVal, CurBB);
Address NewAtomicAddr = CreateTempAlloca();
- Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
+ Address NewAtomicIntAddr = castToAtomicIntPointer(NewAtomicAddr);
if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
requiresMemSetZero(getAtomicAddress().getElementType())) {
CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
@@ -2017,10 +2172,8 @@ void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
CallArgList args;
args.add(RValue::get(atomics.getAtomicSizeValue()),
getContext().getSizeType());
- args.add(RValue::get(EmitCastToVoidPtr(atomics.getAtomicPointer())),
- getContext().VoidPtrTy);
- args.add(RValue::get(EmitCastToVoidPtr(srcAddr.getPointer())),
- getContext().VoidPtrTy);
+ args.add(RValue::get(atomics.getAtomicPointer()), getContext().VoidPtrTy);
+ args.add(RValue::get(srcAddr.getPointer()), getContext().VoidPtrTy);
args.add(
RValue::get(llvm::ConstantInt::get(IntTy, (int)llvm::toCABI(AO))),
getContext().IntTy);
@@ -2032,8 +2185,7 @@ void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
llvm::Value *intValue = atomics.convertRValueToInt(rvalue);
// Do the atomic store.
- Address addr =
- atomics.emitCastToAtomicIntPointer(atomics.getAtomicAddress());
+ Address addr = atomics.castToAtomicIntPointer(atomics.getAtomicAddress());
intValue = Builder.CreateIntCast(
intValue, addr.getElementType(), /*isSigned=*/false);
llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.cpp
index f39a56f81d41..0cbace7b7f7b 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.cpp
@@ -33,10 +33,10 @@ using namespace clang;
using namespace CodeGen;
CGBlockInfo::CGBlockInfo(const BlockDecl *block, StringRef name)
- : Name(name), CXXThisIndex(0), CanBeGlobal(false), NeedsCopyDispose(false),
- HasCXXObject(false), UsesStret(false), HasCapturedVariableLayout(false),
- CapturesNonExternalType(false), LocalAddress(Address::invalid()),
- StructureType(nullptr), Block(block) {
+ : Name(name), CXXThisIndex(0), CanBeGlobal(false), NeedsCopyDispose(false),
+ NoEscape(false), HasCXXObject(false), UsesStret(false),
+ HasCapturedVariableLayout(false), CapturesNonExternalType(false),
+ LocalAddress(Address::invalid()), StructureType(nullptr), Block(block) {
// Skip asm prefix, if any. 'name' is usually taken directly from
// the mangled name of the enclosing function.
@@ -66,39 +66,6 @@ static llvm::Constant *buildDisposeHelper(CodeGenModule &CGM,
namespace {
-/// Represents a type of copy/destroy operation that should be performed for an
-/// entity that's captured by a block.
-enum class BlockCaptureEntityKind {
- CXXRecord, // Copy or destroy
- ARCWeak,
- ARCStrong,
- NonTrivialCStruct,
- BlockObject, // Assign or release
- None
-};
-
-/// Represents a captured entity that requires extra operations in order for
-/// this entity to be copied or destroyed correctly.
-struct BlockCaptureManagedEntity {
- BlockCaptureEntityKind CopyKind, DisposeKind;
- BlockFieldFlags CopyFlags, DisposeFlags;
- const BlockDecl::Capture *CI;
- const CGBlockInfo::Capture *Capture;
-
- BlockCaptureManagedEntity(BlockCaptureEntityKind CopyType,
- BlockCaptureEntityKind DisposeType,
- BlockFieldFlags CopyFlags,
- BlockFieldFlags DisposeFlags,
- const BlockDecl::Capture &CI,
- const CGBlockInfo::Capture &Capture)
- : CopyKind(CopyType), DisposeKind(DisposeType), CopyFlags(CopyFlags),
- DisposeFlags(DisposeFlags), CI(&CI), Capture(&Capture) {}
-
- bool operator<(const BlockCaptureManagedEntity &Other) const {
- return Capture->getOffset() < Other.Capture->getOffset();
- }
-};
-
enum class CaptureStrKind {
// String for the copy helper.
CopyHelper,
@@ -110,11 +77,7 @@ enum class CaptureStrKind {
} // end anonymous namespace
-static void findBlockCapturedManagedEntities(
- const CGBlockInfo &BlockInfo, const LangOptions &LangOpts,
- SmallVectorImpl<BlockCaptureManagedEntity> &ManagedCaptures);
-
-static std::string getBlockCaptureStr(const BlockCaptureManagedEntity &E,
+static std::string getBlockCaptureStr(const CGBlockInfo::Capture &Cap,
CaptureStrKind StrKind,
CharUnits BlockAlignment,
CodeGenModule &CGM);
@@ -124,34 +87,33 @@ static std::string getBlockDescriptorName(const CGBlockInfo &BlockInfo,
std::string Name = "__block_descriptor_";
Name += llvm::to_string(BlockInfo.BlockSize.getQuantity()) + "_";
- if (BlockInfo.needsCopyDisposeHelpers()) {
+ if (BlockInfo.NeedsCopyDispose) {
if (CGM.getLangOpts().Exceptions)
Name += "e";
if (CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
Name += "a";
Name += llvm::to_string(BlockInfo.BlockAlign.getQuantity()) + "_";
- SmallVector<BlockCaptureManagedEntity, 4> ManagedCaptures;
- findBlockCapturedManagedEntities(BlockInfo, CGM.getContext().getLangOpts(),
- ManagedCaptures);
+ for (auto &Cap : BlockInfo.SortedCaptures) {
+ if (Cap.isConstantOrTrivial())
+ continue;
- for (const BlockCaptureManagedEntity &E : ManagedCaptures) {
- Name += llvm::to_string(E.Capture->getOffset().getQuantity());
+ Name += llvm::to_string(Cap.getOffset().getQuantity());
- if (E.CopyKind == E.DisposeKind) {
+ if (Cap.CopyKind == Cap.DisposeKind) {
// If CopyKind and DisposeKind are the same, merge the capture
// information.
- assert(E.CopyKind != BlockCaptureEntityKind::None &&
+ assert(Cap.CopyKind != BlockCaptureEntityKind::None &&
"shouldn't see BlockCaptureManagedEntity that is None");
- Name += getBlockCaptureStr(E, CaptureStrKind::Merged,
+ Name += getBlockCaptureStr(Cap, CaptureStrKind::Merged,
BlockInfo.BlockAlign, CGM);
} else {
// If CopyKind and DisposeKind are not the same, which can happen when
// either Kind is None or the captured object is a __strong block,
// concatenate the copy and dispose strings.
- Name += getBlockCaptureStr(E, CaptureStrKind::CopyHelper,
+ Name += getBlockCaptureStr(Cap, CaptureStrKind::CopyHelper,
BlockInfo.BlockAlign, CGM);
- Name += getBlockCaptureStr(E, CaptureStrKind::DisposeHelper,
+ Name += getBlockCaptureStr(Cap, CaptureStrKind::DisposeHelper,
BlockInfo.BlockAlign, CGM);
}
}
@@ -190,9 +152,8 @@ static llvm::Constant *buildBlockDescriptor(CodeGenModule &CGM,
cast<llvm::IntegerType>(CGM.getTypes().ConvertType(C.UnsignedLongTy));
llvm::PointerType *i8p = nullptr;
if (CGM.getLangOpts().OpenCL)
- i8p =
- llvm::Type::getInt8PtrTy(
- CGM.getLLVMContext(), C.getTargetAddressSpace(LangAS::opencl_constant));
+ i8p = llvm::PointerType::get(
+ CGM.getLLVMContext(), C.getTargetAddressSpace(LangAS::opencl_constant));
else
i8p = CGM.VoidPtrTy;
@@ -203,8 +164,7 @@ static llvm::Constant *buildBlockDescriptor(CodeGenModule &CGM,
CGM.getLangOpts().getGC() == LangOptions::NonGC) {
descName = getBlockDescriptorName(blockInfo, CGM);
if (llvm::GlobalValue *desc = CGM.getModule().getNamedValue(descName))
- return llvm::ConstantExpr::getBitCast(desc,
- CGM.getBlockDescriptorType());
+ return desc;
}
// If there isn't an equivalent block descriptor global variable, create a new
@@ -223,7 +183,7 @@ static llvm::Constant *buildBlockDescriptor(CodeGenModule &CGM,
// Optional copy/dispose helpers.
bool hasInternalHelper = false;
- if (blockInfo.needsCopyDisposeHelpers()) {
+ if (blockInfo.NeedsCopyDispose) {
// copy_func_helper_decl
llvm::Constant *copyHelper = buildCopyHelper(CGM, blockInfo);
elements.add(copyHelper);
@@ -232,8 +192,9 @@ static llvm::Constant *buildBlockDescriptor(CodeGenModule &CGM,
llvm::Constant *disposeHelper = buildDisposeHelper(CGM, blockInfo);
elements.add(disposeHelper);
- if (cast<llvm::Function>(copyHelper->getOperand(0))->hasInternalLinkage() ||
- cast<llvm::Function>(disposeHelper->getOperand(0))
+ if (cast<llvm::Function>(copyHelper->stripPointerCasts())
+ ->hasInternalLinkage() ||
+ cast<llvm::Function>(disposeHelper->stripPointerCasts())
->hasInternalLinkage())
hasInternalHelper = true;
}
@@ -241,8 +202,7 @@ static llvm::Constant *buildBlockDescriptor(CodeGenModule &CGM,
// Signature. Mandatory ObjC-style method descriptor @encode sequence.
std::string typeAtEncoding =
CGM.getContext().getObjCEncodingForBlock(blockInfo.getBlockExpr());
- elements.add(llvm::ConstantExpr::getBitCast(
- CGM.GetAddrOfConstantCString(typeAtEncoding).getPointer(), i8p));
+ elements.add(CGM.GetAddrOfConstantCString(typeAtEncoding).getPointer());
// GC layout.
if (C.getLangOpts().ObjC) {
@@ -281,7 +241,7 @@ static llvm::Constant *buildBlockDescriptor(CodeGenModule &CGM,
global->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
}
- return llvm::ConstantExpr::getBitCast(global, CGM.getBlockDescriptorType());
+ return global;
}
/*
@@ -340,17 +300,21 @@ namespace {
struct BlockLayoutChunk {
CharUnits Alignment;
CharUnits Size;
- Qualifiers::ObjCLifetime Lifetime;
const BlockDecl::Capture *Capture; // null for 'this'
llvm::Type *Type;
QualType FieldType;
+ BlockCaptureEntityKind CopyKind, DisposeKind;
+ BlockFieldFlags CopyFlags, DisposeFlags;
BlockLayoutChunk(CharUnits align, CharUnits size,
- Qualifiers::ObjCLifetime lifetime,
- const BlockDecl::Capture *capture,
- llvm::Type *type, QualType fieldType)
- : Alignment(align), Size(size), Lifetime(lifetime),
- Capture(capture), Type(type), FieldType(fieldType) {}
+ const BlockDecl::Capture *capture, llvm::Type *type,
+ QualType fieldType, BlockCaptureEntityKind CopyKind,
+ BlockFieldFlags CopyFlags,
+ BlockCaptureEntityKind DisposeKind,
+ BlockFieldFlags DisposeFlags)
+ : Alignment(align), Size(size), Capture(capture), Type(type),
+ FieldType(fieldType), CopyKind(CopyKind), DisposeKind(DisposeKind),
+ CopyFlags(CopyFlags), DisposeFlags(DisposeFlags) {}
/// Tell the block info that this chunk has the given field index.
void setIndex(CGBlockInfo &info, unsigned index, CharUnits offset) {
@@ -358,32 +322,93 @@ namespace {
info.CXXThisIndex = index;
info.CXXThisOffset = offset;
} else {
- auto C = CGBlockInfo::Capture::makeIndex(index, offset, FieldType);
- info.Captures.insert({Capture->getVariable(), C});
+ info.SortedCaptures.push_back(CGBlockInfo::Capture::makeIndex(
+ index, offset, FieldType, CopyKind, CopyFlags, DisposeKind,
+ DisposeFlags, Capture));
}
}
+
+ bool isTrivial() const {
+ return CopyKind == BlockCaptureEntityKind::None &&
+ DisposeKind == BlockCaptureEntityKind::None;
+ }
};
- /// Order by 1) all __strong together 2) next, all byfref together 3) next,
- /// all __weak together. Preserve descending alignment in all situations.
+ /// Order by 1) all __strong together 2) next, all block together 3) next,
+ /// all byref together 4) next, all __weak together. Preserve descending
+ /// alignment in all situations.
bool operator<(const BlockLayoutChunk &left, const BlockLayoutChunk &right) {
if (left.Alignment != right.Alignment)
return left.Alignment > right.Alignment;
auto getPrefOrder = [](const BlockLayoutChunk &chunk) {
- if (chunk.Capture && chunk.Capture->isByRef())
- return 1;
- if (chunk.Lifetime == Qualifiers::OCL_Strong)
+ switch (chunk.CopyKind) {
+ case BlockCaptureEntityKind::ARCStrong:
return 0;
- if (chunk.Lifetime == Qualifiers::OCL_Weak)
- return 2;
- return 3;
+ case BlockCaptureEntityKind::BlockObject:
+ switch (chunk.CopyFlags.getBitMask()) {
+ case BLOCK_FIELD_IS_OBJECT:
+ return 0;
+ case BLOCK_FIELD_IS_BLOCK:
+ return 1;
+ case BLOCK_FIELD_IS_BYREF:
+ return 2;
+ default:
+ break;
+ }
+ break;
+ case BlockCaptureEntityKind::ARCWeak:
+ return 3;
+ default:
+ break;
+ }
+ return 4;
};
return getPrefOrder(left) < getPrefOrder(right);
}
} // end anonymous namespace
+static std::pair<BlockCaptureEntityKind, BlockFieldFlags>
+computeCopyInfoForBlockCapture(const BlockDecl::Capture &CI, QualType T,
+ const LangOptions &LangOpts);
+
+static std::pair<BlockCaptureEntityKind, BlockFieldFlags>
+computeDestroyInfoForBlockCapture(const BlockDecl::Capture &CI, QualType T,
+ const LangOptions &LangOpts);
+
+static void addBlockLayout(CharUnits align, CharUnits size,
+ const BlockDecl::Capture *capture, llvm::Type *type,
+ QualType fieldType,
+ SmallVectorImpl<BlockLayoutChunk> &Layout,
+ CGBlockInfo &Info, CodeGenModule &CGM) {
+ if (!capture) {
+ // 'this' capture.
+ Layout.push_back(BlockLayoutChunk(
+ align, size, capture, type, fieldType, BlockCaptureEntityKind::None,
+ BlockFieldFlags(), BlockCaptureEntityKind::None, BlockFieldFlags()));
+ return;
+ }
+
+ const LangOptions &LangOpts = CGM.getLangOpts();
+ BlockCaptureEntityKind CopyKind, DisposeKind;
+ BlockFieldFlags CopyFlags, DisposeFlags;
+
+ std::tie(CopyKind, CopyFlags) =
+ computeCopyInfoForBlockCapture(*capture, fieldType, LangOpts);
+ std::tie(DisposeKind, DisposeFlags) =
+ computeDestroyInfoForBlockCapture(*capture, fieldType, LangOpts);
+ Layout.push_back(BlockLayoutChunk(align, size, capture, type, fieldType,
+ CopyKind, CopyFlags, DisposeKind,
+ DisposeFlags));
+
+ if (Info.NoEscape)
+ return;
+
+ if (!Layout.back().isTrivial())
+ Info.NeedsCopyDispose = true;
+}
+
/// Determines if the given type is safe for constant capture in C++.
static bool isSafeForCXXConstantCapture(QualType type) {
const RecordType *recordType =
@@ -452,12 +477,10 @@ static void initializeForBlockHeader(CodeGenModule &CGM, CGBlockInfo &info,
if (CGM.getLangOpts().OpenCL) {
// The header is basically 'struct { int; int; generic void *;
// custom_fields; }'. Assert that struct is packed.
- auto GenericAS =
- CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic);
- auto GenPtrAlign =
- CharUnits::fromQuantity(CGM.getTarget().getPointerAlign(GenericAS) / 8);
- auto GenPtrSize =
- CharUnits::fromQuantity(CGM.getTarget().getPointerWidth(GenericAS) / 8);
+ auto GenPtrAlign = CharUnits::fromQuantity(
+ CGM.getTarget().getPointerAlign(LangAS::opencl_generic) / 8);
+ auto GenPtrSize = CharUnits::fromQuantity(
+ CGM.getTarget().getPointerWidth(LangAS::opencl_generic) / 8);
assert(CGM.getIntSize() <= GenPtrSize);
assert(CGM.getIntAlign() <= GenPtrAlign);
assert((2 * CGM.getIntSize()).isMultipleOf(GenPtrAlign));
@@ -471,10 +494,10 @@ static void initializeForBlockHeader(CodeGenModule &CGM, CGBlockInfo &info,
unsigned BlockAlign = GenPtrAlign.getQuantity();
if (auto *Helper =
CGM.getTargetCodeGenInfo().getTargetOpenCLBlockHelper()) {
- for (auto I : Helper->getCustomFieldTypes()) /* custom fields */ {
+ for (auto *I : Helper->getCustomFieldTypes()) /* custom fields */ {
// TargetOpenCLBlockHelp needs to make sure the struct is packed.
// If necessary, add padding fields to the custom fields.
- unsigned Align = CGM.getDataLayout().getABITypeAlignment(I);
+ unsigned Align = CGM.getDataLayout().getABITypeAlign(I).value();
if (BlockAlign < Align)
BlockAlign = Align;
assert(Offset % Align == 0);
@@ -541,6 +564,9 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
CGM.getLangOpts().getGC() == LangOptions::NonGC)
info.HasCapturedVariableLayout = true;
+ if (block->doesNotEscape())
+ info.NoEscape = true;
+
// Collect the layout chunks.
SmallVector<BlockLayoutChunk, 16> layout;
layout.reserve(block->capturesCXXThis() +
@@ -560,9 +586,8 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
auto TInfo = CGM.getContext().getTypeInfoInChars(thisType);
maxFieldAlign = std::max(maxFieldAlign, TInfo.Align);
- layout.push_back(BlockLayoutChunk(TInfo.Align, TInfo.Width,
- Qualifiers::OCL_None,
- nullptr, llvmType, thisType));
+ addBlockLayout(TInfo.Align, TInfo.Width, nullptr, llvmType, thisType,
+ layout, info, CGM);
}
// Next, all the block captures.
@@ -570,9 +595,6 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
const VarDecl *variable = CI.getVariable();
if (CI.isEscapingByref()) {
- // We have to copy/dispose of the __block reference.
- info.NeedsCopyDispose = true;
-
// Just use void* instead of a pointer to the byref type.
CharUnits align = CGM.getPointerAlign();
maxFieldAlign = std::max(maxFieldAlign, align);
@@ -581,72 +603,28 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
// the capture field type should always match.
assert(CGF && getCaptureFieldType(*CGF, CI) == variable->getType() &&
"capture type differs from the variable type");
- layout.push_back(BlockLayoutChunk(align, CGM.getPointerSize(),
- Qualifiers::OCL_None, &CI,
- CGM.VoidPtrTy, variable->getType()));
+ addBlockLayout(align, CGM.getPointerSize(), &CI, CGM.VoidPtrTy,
+ variable->getType(), layout, info, CGM);
continue;
}
// Otherwise, build a layout chunk with the size and alignment of
// the declaration.
if (llvm::Constant *constant = tryCaptureAsConstant(CGM, CGF, variable)) {
- info.Captures[variable] = CGBlockInfo::Capture::makeConstant(constant);
+ info.SortedCaptures.push_back(
+ CGBlockInfo::Capture::makeConstant(constant, &CI));
continue;
}
QualType VT = getCaptureFieldType(*CGF, CI);
- // If we have a lifetime qualifier, honor it for capture purposes.
- // That includes *not* copying it if it's __unsafe_unretained.
- Qualifiers::ObjCLifetime lifetime = VT.getObjCLifetime();
- if (lifetime) {
- switch (lifetime) {
- case Qualifiers::OCL_None: llvm_unreachable("impossible");
- case Qualifiers::OCL_ExplicitNone:
- case Qualifiers::OCL_Autoreleasing:
- break;
-
- case Qualifiers::OCL_Strong:
- case Qualifiers::OCL_Weak:
- info.NeedsCopyDispose = true;
- }
-
- // Block pointers require copy/dispose. So do Objective-C pointers.
- } else if (VT->isObjCRetainableType()) {
- // But honor the inert __unsafe_unretained qualifier, which doesn't
- // actually make it into the type system.
- if (VT->isObjCInertUnsafeUnretainedType()) {
- lifetime = Qualifiers::OCL_ExplicitNone;
- } else {
- info.NeedsCopyDispose = true;
- // used for mrr below.
- lifetime = Qualifiers::OCL_Strong;
- }
-
- // So do types that require non-trivial copy construction.
- } else if (CI.hasCopyExpr()) {
- info.NeedsCopyDispose = true;
- info.HasCXXObject = true;
- if (!VT->getAsCXXRecordDecl()->isExternallyVisible())
- info.CapturesNonExternalType = true;
-
- // So do C structs that require non-trivial copy construction or
- // destruction.
- } else if (VT.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct ||
- VT.isDestructedType() == QualType::DK_nontrivial_c_struct) {
- info.NeedsCopyDispose = true;
-
- // And so do types with destructors.
- } else if (CGM.getLangOpts().CPlusPlus) {
- if (const CXXRecordDecl *record = VT->getAsCXXRecordDecl()) {
- if (!record->hasTrivialDestructor()) {
+ if (CGM.getLangOpts().CPlusPlus)
+ if (const CXXRecordDecl *record = VT->getAsCXXRecordDecl())
+ if (CI.hasCopyExpr() || !record->hasTrivialDestructor()) {
info.HasCXXObject = true;
- info.NeedsCopyDispose = true;
if (!record->isExternallyVisible())
info.CapturesNonExternalType = true;
}
- }
- }
CharUnits size = C.getTypeSizeInChars(VT);
CharUnits align = C.getDeclAlign(variable);
@@ -656,8 +634,7 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
llvm::Type *llvmType =
CGM.getTypes().ConvertTypeForMem(VT);
- layout.push_back(
- BlockLayoutChunk(align, size, lifetime, &CI, llvmType, VT));
+ addBlockLayout(align, size, &CI, llvmType, VT, layout, info, CGM);
}
// If that was everything, we're done here.
@@ -665,6 +642,7 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
info.StructureType =
llvm::StructType::get(CGM.getLLVMContext(), elementTypes, true);
info.CanBeGlobal = true;
+ info.buildCaptureMap();
return;
}
@@ -718,6 +696,7 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
// ...until we get to the alignment of the maximum field.
if (endAlign >= maxFieldAlign) {
+ ++li;
break;
}
}
@@ -770,6 +749,7 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
endAlign = getLowBit(blockSize);
}
+ info.buildCaptureMap();
info.StructureType =
llvm::StructType::get(CGM.getLLVMContext(), elementTypes, true);
}
@@ -799,9 +779,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
IsOpenCL ? CGM.getOpenCLRuntime().getGenericVoidPointerType() : VoidPtrTy;
LangAS GenVoidPtrAddr = IsOpenCL ? LangAS::opencl_generic : LangAS::Default;
auto GenVoidPtrSize = CharUnits::fromQuantity(
- CGM.getTarget().getPointerWidth(
- CGM.getContext().getTargetAddressSpace(GenVoidPtrAddr)) /
- 8);
+ CGM.getTarget().getPointerWidth(GenVoidPtrAddr) / 8);
// Using the computed layout, generate the actual block function.
bool isLambdaConv = blockInfo.getBlockDecl()->isConversionFromLambda();
CodeGenFunction BlockCGF{CGM, true};
@@ -826,10 +804,10 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
// If the block is non-escaping, set field 'isa 'to NSConcreteGlobalBlock
// and set the BLOCK_IS_GLOBAL bit of field 'flags'. Copying a non-escaping
// block just returns the original block and releasing it is a no-op.
- llvm::Constant *blockISA = blockInfo.getBlockDecl()->doesNotEscape()
+ llvm::Constant *blockISA = blockInfo.NoEscape
? CGM.getNSConcreteGlobalBlock()
: CGM.getNSConcreteStackBlock();
- isa = llvm::ConstantExpr::getBitCast(blockISA, VoidPtrTy);
+ isa = blockISA;
// Build the block descriptor.
descriptor = buildBlockDescriptor(CGM, blockInfo);
@@ -838,13 +816,13 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
flags = BLOCK_HAS_SIGNATURE;
if (blockInfo.HasCapturedVariableLayout)
flags |= BLOCK_HAS_EXTENDED_LAYOUT;
- if (blockInfo.needsCopyDisposeHelpers())
+ if (blockInfo.NeedsCopyDispose)
flags |= BLOCK_HAS_COPY_DISPOSE;
if (blockInfo.HasCXXObject)
flags |= BLOCK_HAS_CXX_OBJ;
if (blockInfo.UsesStret)
flags |= BLOCK_USE_STRET;
- if (blockInfo.getBlockDecl()->doesNotEscape())
+ if (blockInfo.NoEscape)
flags |= BLOCK_IS_NOESCAPE | BLOCK_IS_GLOBAL;
}
@@ -961,7 +939,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
if (CI.isNested())
byrefPointer = Builder.CreateLoad(src, "byref.capture");
else
- byrefPointer = Builder.CreateBitCast(src.getPointer(), VoidPtrTy);
+ byrefPointer = src.getPointer();
// Write that void* into the capture field.
Builder.CreateStore(byrefPointer, blockField);
@@ -1014,7 +992,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
// Fake up a new variable so that EmitScalarInit doesn't think
// we're referring to the variable in its own initializer.
ImplicitParamDecl BlockFieldPseudoVar(getContext(), type,
- ImplicitParamDecl::Other);
+ ImplicitParamKind::Other);
// We use one of these or the other depending on whether the
// reference is nested.
@@ -1033,7 +1011,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
}
// Push a cleanup for the capture if necessary.
- if (!blockInfo.NeedsCopyDispose)
+ if (!blockInfo.NoEscape && !blockInfo.NeedsCopyDispose)
continue;
// Ignore __block captures; there's nothing special in the on-stack block
@@ -1099,7 +1077,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
if (IsOpenCL) {
CGM.getOpenCLRuntime().recordBlockInfo(blockInfo.BlockExpression, InvokeFn,
- result);
+ result, blockInfo.StructureType);
}
return result;
@@ -1154,8 +1132,7 @@ llvm::Type *CodeGenModule::getGenericBlockLiteralType() {
SmallVector<llvm::Type *, 8> StructFields(
{IntTy, IntTy, getOpenCLRuntime().getGenericVoidPointerType()});
if (auto *Helper = getTargetCodeGenInfo().getTargetOpenCLBlockHelper()) {
- for (auto I : Helper->getCustomFieldTypes())
- StructFields.push_back(I);
+ llvm::append_range(StructFields, Helper->getCustomFieldTypes());
}
GenericBlockLiteralType = llvm::StructType::create(
StructFields, "struct.__opencl_block_literal_generic");
@@ -1210,8 +1187,8 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr *E,
}
} else {
// Bitcast the block literal to a generic block literal.
- BlockPtr = Builder.CreatePointerCast(
- BlockPtr, llvm::PointerType::get(GenBlockTy, 0), "block.literal");
+ BlockPtr =
+ Builder.CreatePointerCast(BlockPtr, UnqualPtrTy, "block.literal");
// Get pointer to the block invoke function
llvm::Value *FuncPtr = Builder.CreateStructGEP(GenBlockTy, BlockPtr, 3);
@@ -1229,12 +1206,6 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr *E,
const CGFunctionInfo &FnInfo =
CGM.getTypes().arrangeBlockFunctionCall(Args, FuncTy);
- // Cast the function pointer to the right type.
- llvm::Type *BlockFTy = CGM.getTypes().GetFunctionType(FnInfo);
-
- llvm::Type *BlockFTyPtr = llvm::PointerType::getUnqual(BlockFTy);
- Func = Builder.CreatePointerCast(Func, BlockFTyPtr);
-
// Prepare the callee.
CGCallee Callee(CGCalleeInfo(), Func);
@@ -1257,10 +1228,8 @@ Address CodeGenFunction::GetAddrOfBlockDecl(const VarDecl *variable) {
// to byref*.
auto &byrefInfo = getBlockByrefInfo(variable);
- addr = Address(Builder.CreateLoad(addr), byrefInfo.ByrefAlignment);
-
- auto byrefPointerType = llvm::PointerType::get(byrefInfo.Type, 0);
- addr = Builder.CreateBitCast(addr, byrefPointerType, "byref.addr");
+ addr = Address(Builder.CreateLoad(addr), byrefInfo.Type,
+ byrefInfo.ByrefAlignment);
addr = emitBlockByrefAddress(addr, byrefInfo, /*follow*/ true,
variable->getName());
@@ -1351,7 +1320,7 @@ static llvm::Constant *buildGlobalBlock(CodeGenModule &CGM,
fields.add(buildBlockDescriptor(CGM, blockInfo));
} else if (auto *Helper =
CGM.getTargetCodeGenInfo().getTargetOpenCLBlockHelper()) {
- for (auto I : Helper->getCustomFieldValues(CGM, blockInfo)) {
+ for (auto *I : Helper->getCustomFieldValues(CGM, blockInfo)) {
fields.add(I);
}
}
@@ -1396,7 +1365,8 @@ static llvm::Constant *buildGlobalBlock(CodeGenModule &CGM,
if (CGM.getContext().getLangOpts().OpenCL)
CGM.getOpenCLRuntime().recordBlockInfo(
blockInfo.BlockExpression,
- cast<llvm::Function>(blockFn->stripPointerCasts()), Result);
+ cast<llvm::Function>(blockFn->stripPointerCasts()), Result,
+ literal->getValueType());
return Result;
}
@@ -1425,7 +1395,8 @@ void CodeGenFunction::setBlockContextParameter(const ImplicitParamDecl *D,
// directly as BlockPointer.
BlockPointer = Builder.CreatePointerCast(
arg,
- BlockInfo->StructureType->getPointerTo(
+ llvm::PointerType::get(
+ getLLVMContext(),
getContext().getLangOpts().OpenCL
? getContext().getTargetAddressSpace(LangAS::opencl_generic)
: 0),
@@ -1435,15 +1406,12 @@ void CodeGenFunction::setBlockContextParameter(const ImplicitParamDecl *D,
Address CodeGenFunction::LoadBlockStruct() {
assert(BlockInfo && "not in a block invocation function!");
assert(BlockPointer && "no block pointer set!");
- return Address(BlockPointer, BlockInfo->BlockAlign);
+ return Address(BlockPointer, BlockInfo->StructureType, BlockInfo->BlockAlign);
}
-llvm::Function *
-CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
- const CGBlockInfo &blockInfo,
- const DeclMapTy &ldm,
- bool IsLambdaConversionToBlock,
- bool BuildGlobalBlock) {
+llvm::Function *CodeGenFunction::GenerateBlockFunction(
+ GlobalDecl GD, const CGBlockInfo &blockInfo, const DeclMapTy &ldm,
+ bool IsLambdaConversionToBlock, bool BuildGlobalBlock) {
const BlockDecl *blockDecl = blockInfo.getBlockDecl();
CurGD = GD;
@@ -1482,7 +1450,7 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
ImplicitParamDecl SelfDecl(getContext(), const_cast<BlockDecl *>(blockDecl),
SourceLocation(), II, selfTy,
- ImplicitParamDecl::ObjCSelf);
+ ImplicitParamKind::ObjCSelf);
args.push_back(&SelfDecl);
// Now add the rest of the parameters.
@@ -1654,6 +1622,11 @@ computeCopyInfoForBlockCapture(const BlockDecl::Capture &CI, QualType T,
// For all other types, the memcpy is fine.
return std::make_pair(BlockCaptureEntityKind::None, BlockFieldFlags());
+ // Honor the inert __unsafe_unretained qualifier, which doesn't actually
+ // make it into the type system.
+ if (T->isObjCInertUnsafeUnretainedType())
+ return std::make_pair(BlockCaptureEntityKind::None, BlockFieldFlags());
+
// Special rules for ARC captures:
Qualifiers QS = T.getQualifiers();
@@ -1669,34 +1642,6 @@ computeCopyInfoForBlockCapture(const BlockDecl::Capture &CI, QualType T,
llvm_unreachable("after exhaustive PrimitiveCopyKind switch");
}
-static std::pair<BlockCaptureEntityKind, BlockFieldFlags>
-computeDestroyInfoForBlockCapture(const BlockDecl::Capture &CI, QualType T,
- const LangOptions &LangOpts);
-
-/// Find the set of block captures that need to be explicitly copied or destroy.
-static void findBlockCapturedManagedEntities(
- const CGBlockInfo &BlockInfo, const LangOptions &LangOpts,
- SmallVectorImpl<BlockCaptureManagedEntity> &ManagedCaptures) {
- for (const auto &CI : BlockInfo.getBlockDecl()->captures()) {
- const VarDecl *Variable = CI.getVariable();
- const CGBlockInfo::Capture &Capture = BlockInfo.getCapture(Variable);
- if (Capture.isConstant())
- continue;
-
- QualType VT = Capture.fieldType();
- auto CopyInfo = computeCopyInfoForBlockCapture(CI, VT, LangOpts);
- auto DisposeInfo = computeDestroyInfoForBlockCapture(CI, VT, LangOpts);
- if (CopyInfo.first != BlockCaptureEntityKind::None ||
- DisposeInfo.first != BlockCaptureEntityKind::None)
- ManagedCaptures.emplace_back(CopyInfo.first, DisposeInfo.first,
- CopyInfo.second, DisposeInfo.second, CI,
- Capture);
- }
-
- // Sort the captures by offset.
- llvm::sort(ManagedCaptures);
-}
-
namespace {
/// Release a __block variable.
struct CallBlockRelease final : EHScopeStack::Cleanup {
@@ -1713,7 +1658,6 @@ struct CallBlockRelease final : EHScopeStack::Cleanup {
llvm::Value *BlockVarAddr;
if (LoadBlockVarAddr) {
BlockVarAddr = CGF.Builder.CreateLoad(Addr);
- BlockVarAddr = CGF.Builder.CreateBitCast(BlockVarAddr, CGF.VoidPtrTy);
} else {
BlockVarAddr = Addr.getPointer();
}
@@ -1732,13 +1676,13 @@ bool CodeGenFunction::cxxDestructorCanThrow(QualType T) {
}
// Return a string that has the information about a capture.
-static std::string getBlockCaptureStr(const BlockCaptureManagedEntity &E,
+static std::string getBlockCaptureStr(const CGBlockInfo::Capture &Cap,
CaptureStrKind StrKind,
CharUnits BlockAlignment,
CodeGenModule &CGM) {
std::string Str;
ASTContext &Ctx = CGM.getContext();
- const BlockDecl::Capture &CI = *E.CI;
+ const BlockDecl::Capture &CI = *Cap.Cap;
QualType CaptureTy = CI.getVariable()->getType();
BlockCaptureEntityKind Kind;
@@ -1747,15 +1691,16 @@ static std::string getBlockCaptureStr(const BlockCaptureManagedEntity &E,
// CaptureStrKind::Merged should be passed only when the operations and the
// flags are the same for copy and dispose.
assert((StrKind != CaptureStrKind::Merged ||
- (E.CopyKind == E.DisposeKind && E.CopyFlags == E.DisposeFlags)) &&
+ (Cap.CopyKind == Cap.DisposeKind &&
+ Cap.CopyFlags == Cap.DisposeFlags)) &&
"different operations and flags");
if (StrKind == CaptureStrKind::DisposeHelper) {
- Kind = E.DisposeKind;
- Flags = E.DisposeFlags;
+ Kind = Cap.DisposeKind;
+ Flags = Cap.DisposeFlags;
} else {
- Kind = E.CopyKind;
- Flags = E.CopyFlags;
+ Kind = Cap.CopyKind;
+ Flags = Cap.CopyFlags;
}
switch (Kind) {
@@ -1763,7 +1708,7 @@ static std::string getBlockCaptureStr(const BlockCaptureManagedEntity &E,
Str += "c";
SmallString<256> TyStr;
llvm::raw_svector_ostream Out(TyStr);
- CGM.getCXXABI().getMangleContext().mangleTypeName(CaptureTy, Out);
+ CGM.getCXXABI().getMangleContext().mangleCanonicalTypeName(CaptureTy, Out);
Str += llvm::to_string(TyStr.size()) + TyStr.c_str();
break;
}
@@ -1803,8 +1748,7 @@ static std::string getBlockCaptureStr(const BlockCaptureManagedEntity &E,
}
case BlockCaptureEntityKind::NonTrivialCStruct: {
bool IsVolatile = CaptureTy.isVolatileQualified();
- CharUnits Alignment =
- BlockAlignment.alignmentAtOffset(E.Capture->getOffset());
+ CharUnits Alignment = BlockAlignment.alignmentAtOffset(Cap.getOffset());
Str += "n";
std::string FuncStr;
@@ -1829,7 +1773,7 @@ static std::string getBlockCaptureStr(const BlockCaptureManagedEntity &E,
}
static std::string getCopyDestroyHelperFuncName(
- const SmallVectorImpl<BlockCaptureManagedEntity> &Captures,
+ const SmallVectorImpl<CGBlockInfo::Capture> &Captures,
CharUnits BlockAlignment, CaptureStrKind StrKind, CodeGenModule &CGM) {
assert((StrKind == CaptureStrKind::CopyHelper ||
StrKind == CaptureStrKind::DisposeHelper) &&
@@ -1843,9 +1787,11 @@ static std::string getCopyDestroyHelperFuncName(
Name += "a";
Name += llvm::to_string(BlockAlignment.getQuantity()) + "_";
- for (const BlockCaptureManagedEntity &E : Captures) {
- Name += llvm::to_string(E.Capture->getOffset().getQuantity());
- Name += getBlockCaptureStr(E, StrKind, BlockAlignment, CGM);
+ for (auto &Cap : Captures) {
+ if (Cap.isConstantOrTrivial())
+ continue;
+ Name += llvm::to_string(Cap.getOffset().getQuantity());
+ Name += getBlockCaptureStr(Cap, StrKind, BlockAlignment, CGM);
}
return Name;
@@ -1916,23 +1862,21 @@ static void setBlockHelperAttributesVisibility(bool CapturesNonExternalType,
/// the contents of an individual __block variable to the heap.
llvm::Constant *
CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
- SmallVector<BlockCaptureManagedEntity, 4> CopiedCaptures;
- findBlockCapturedManagedEntities(blockInfo, getLangOpts(), CopiedCaptures);
- std::string FuncName =
- getCopyDestroyHelperFuncName(CopiedCaptures, blockInfo.BlockAlign,
- CaptureStrKind::CopyHelper, CGM);
+ std::string FuncName = getCopyDestroyHelperFuncName(
+ blockInfo.SortedCaptures, blockInfo.BlockAlign,
+ CaptureStrKind::CopyHelper, CGM);
if (llvm::GlobalValue *Func = CGM.getModule().getNamedValue(FuncName))
- return llvm::ConstantExpr::getBitCast(Func, VoidPtrTy);
+ return Func;
ASTContext &C = getContext();
QualType ReturnTy = C.VoidTy;
FunctionArgList args;
- ImplicitParamDecl DstDecl(C, C.VoidPtrTy, ImplicitParamDecl::Other);
+ ImplicitParamDecl DstDecl(C, C.VoidPtrTy, ImplicitParamKind::Other);
args.push_back(&DstDecl);
- ImplicitParamDecl SrcDecl(C, C.VoidPtrTy, ImplicitParamDecl::Other);
+ ImplicitParamDecl SrcDecl(C, C.VoidPtrTy, ImplicitParamKind::Other);
args.push_back(&SrcDecl);
const CGFunctionInfo &FI =
@@ -1957,27 +1901,27 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
StartFunction(GlobalDecl(), ReturnTy, Fn, FI, args);
auto AL = ApplyDebugLocation::CreateArtificial(*this);
- llvm::Type *structPtrTy = blockInfo.StructureType->getPointerTo();
-
Address src = GetAddrOfLocalVar(&SrcDecl);
- src = Address(Builder.CreateLoad(src), blockInfo.BlockAlign);
- src = Builder.CreateBitCast(src, structPtrTy, "block.source");
+ src = Address(Builder.CreateLoad(src), blockInfo.StructureType,
+ blockInfo.BlockAlign);
Address dst = GetAddrOfLocalVar(&DstDecl);
- dst = Address(Builder.CreateLoad(dst), blockInfo.BlockAlign);
- dst = Builder.CreateBitCast(dst, structPtrTy, "block.dest");
+ dst = Address(Builder.CreateLoad(dst), blockInfo.StructureType,
+ blockInfo.BlockAlign);
+
+ for (auto &capture : blockInfo.SortedCaptures) {
+ if (capture.isConstantOrTrivial())
+ continue;
- for (const auto &CopiedCapture : CopiedCaptures) {
- const BlockDecl::Capture &CI = *CopiedCapture.CI;
- const CGBlockInfo::Capture &capture = *CopiedCapture.Capture;
+ const BlockDecl::Capture &CI = *capture.Cap;
QualType captureType = CI.getVariable()->getType();
- BlockFieldFlags flags = CopiedCapture.CopyFlags;
+ BlockFieldFlags flags = capture.CopyFlags;
unsigned index = capture.getIndex();
Address srcField = Builder.CreateStructGEP(src, index);
Address dstField = Builder.CreateStructGEP(dst, index);
- switch (CopiedCapture.CopyKind) {
+ switch (capture.CopyKind) {
case BlockCaptureEntityKind::CXXRecord:
// If there's an explicit copy expression, we do that.
assert(CI.getCopyExpr() && "copy expression for variable is missing");
@@ -2021,9 +1965,7 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
}
case BlockCaptureEntityKind::BlockObject: {
llvm::Value *srcValue = Builder.CreateLoad(srcField, "blockcopy.src");
- srcValue = Builder.CreateBitCast(srcValue, VoidPtrTy);
- llvm::Value *dstAddr =
- Builder.CreateBitCast(dstField.getPointer(), VoidPtrTy);
+ llvm::Value *dstAddr = dstField.getPointer();
llvm::Value *args[] = {
dstAddr, srcValue, llvm::ConstantInt::get(Int32Ty, flags.getBitMask())
};
@@ -2040,13 +1982,13 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
// Ensure that we destroy the copied object if an exception is thrown later
// in the helper function.
- pushCaptureCleanup(CopiedCapture.CopyKind, dstField, captureType, flags,
+ pushCaptureCleanup(capture.CopyKind, dstField, captureType, flags,
/*ForCopyHelper*/ true, CI.getVariable(), *this);
}
FinishFunction();
- return llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy);
+ return Fn;
}
static BlockFieldFlags
@@ -2085,8 +2027,10 @@ computeDestroyInfoForBlockCapture(const BlockDecl::Capture &CI, QualType T,
BlockFieldFlags());
case QualType::DK_none: {
// Non-ARC captures are strong, and we need to use _Block_object_dispose.
+ // But honor the inert __unsafe_unretained qualifier, which doesn't actually
+ // make it into the type system.
if (T->isObjCRetainableType() && !T.getQualifiers().hasObjCLifetime() &&
- !LangOpts.ObjCAutoRefCount)
+ !LangOpts.ObjCAutoRefCount && !T->isObjCInertUnsafeUnretainedType())
return std::make_pair(BlockCaptureEntityKind::BlockObject,
getBlockFieldFlagsForObjCObjectPointer(CI, T));
// Otherwise, we have nothing to do.
@@ -2105,21 +2049,19 @@ computeDestroyInfoForBlockCapture(const BlockDecl::Capture &CI, QualType T,
/// variable.
llvm::Constant *
CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
- SmallVector<BlockCaptureManagedEntity, 4> DestroyedCaptures;
- findBlockCapturedManagedEntities(blockInfo, getLangOpts(), DestroyedCaptures);
- std::string FuncName =
- getCopyDestroyHelperFuncName(DestroyedCaptures, blockInfo.BlockAlign,
- CaptureStrKind::DisposeHelper, CGM);
+ std::string FuncName = getCopyDestroyHelperFuncName(
+ blockInfo.SortedCaptures, blockInfo.BlockAlign,
+ CaptureStrKind::DisposeHelper, CGM);
if (llvm::GlobalValue *Func = CGM.getModule().getNamedValue(FuncName))
- return llvm::ConstantExpr::getBitCast(Func, VoidPtrTy);
+ return Func;
ASTContext &C = getContext();
QualType ReturnTy = C.VoidTy;
FunctionArgList args;
- ImplicitParamDecl SrcDecl(C, C.VoidPtrTy, ImplicitParamDecl::Other);
+ ImplicitParamDecl SrcDecl(C, C.VoidPtrTy, ImplicitParamKind::Other);
args.push_back(&SrcDecl);
const CGFunctionInfo &FI =
@@ -2145,22 +2087,22 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
auto AL = ApplyDebugLocation::CreateArtificial(*this);
- llvm::Type *structPtrTy = blockInfo.StructureType->getPointerTo();
-
Address src = GetAddrOfLocalVar(&SrcDecl);
- src = Address(Builder.CreateLoad(src), blockInfo.BlockAlign);
- src = Builder.CreateBitCast(src, structPtrTy, "block");
+ src = Address(Builder.CreateLoad(src), blockInfo.StructureType,
+ blockInfo.BlockAlign);
CodeGenFunction::RunCleanupsScope cleanups(*this);
- for (const auto &DestroyedCapture : DestroyedCaptures) {
- const BlockDecl::Capture &CI = *DestroyedCapture.CI;
- const CGBlockInfo::Capture &capture = *DestroyedCapture.Capture;
- BlockFieldFlags flags = DestroyedCapture.DisposeFlags;
+ for (auto &capture : blockInfo.SortedCaptures) {
+ if (capture.isConstantOrTrivial())
+ continue;
+
+ const BlockDecl::Capture &CI = *capture.Cap;
+ BlockFieldFlags flags = capture.DisposeFlags;
Address srcField = Builder.CreateStructGEP(src, capture.getIndex());
- pushCaptureCleanup(DestroyedCapture.DisposeKind, srcField,
+ pushCaptureCleanup(capture.DisposeKind, srcField,
CI.getVariable()->getType(), flags,
/*ForCopyHelper*/ false, CI.getVariable(), *this);
}
@@ -2169,7 +2111,7 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
FinishFunction();
- return llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy);
+ return Fn;
}
namespace {
@@ -2184,9 +2126,9 @@ public:
void emitCopy(CodeGenFunction &CGF, Address destField,
Address srcField) override {
- destField = CGF.Builder.CreateBitCast(destField, CGF.VoidPtrTy);
+ destField = destField.withElementType(CGF.Int8Ty);
- srcField = CGF.Builder.CreateBitCast(srcField, CGF.VoidPtrPtrTy);
+ srcField = srcField.withElementType(CGF.Int8PtrTy);
llvm::Value *srcValue = CGF.Builder.CreateLoad(srcField);
unsigned flags = (Flags | BLOCK_BYREF_CALLER).getBitMask();
@@ -2199,7 +2141,7 @@ public:
}
void emitDispose(CodeGenFunction &CGF, Address field) override {
- field = CGF.Builder.CreateBitCast(field, CGF.Int8PtrTy->getPointerTo(0));
+ field = field.withElementType(CGF.Int8PtrTy);
llvm::Value *value = CGF.Builder.CreateLoad(field);
CGF.BuildBlockRelease(value, Flags | BLOCK_BYREF_CALLER, false);
@@ -2361,10 +2303,10 @@ generateByrefCopyHelper(CodeGenFunction &CGF, const BlockByrefInfo &byrefInfo,
QualType ReturnTy = Context.VoidTy;
FunctionArgList args;
- ImplicitParamDecl Dst(Context, Context.VoidPtrTy, ImplicitParamDecl::Other);
+ ImplicitParamDecl Dst(Context, Context.VoidPtrTy, ImplicitParamKind::Other);
args.push_back(&Dst);
- ImplicitParamDecl Src(Context, Context.VoidPtrTy, ImplicitParamDecl::Other);
+ ImplicitParamDecl Src(Context, Context.VoidPtrTy, ImplicitParamKind::Other);
args.push_back(&Src);
const CGFunctionInfo &FI =
@@ -2389,30 +2331,26 @@ generateByrefCopyHelper(CodeGenFunction &CGF, const BlockByrefInfo &byrefInfo,
auto AL = ApplyDebugLocation::CreateArtificial(CGF);
if (generator.needsCopy()) {
- llvm::Type *byrefPtrType = byrefInfo.Type->getPointerTo(0);
-
// dst->x
Address destField = CGF.GetAddrOfLocalVar(&Dst);
- destField = Address(CGF.Builder.CreateLoad(destField),
+ destField = Address(CGF.Builder.CreateLoad(destField), byrefInfo.Type,
byrefInfo.ByrefAlignment);
- destField = CGF.Builder.CreateBitCast(destField, byrefPtrType);
- destField = CGF.emitBlockByrefAddress(destField, byrefInfo, false,
- "dest-object");
+ destField =
+ CGF.emitBlockByrefAddress(destField, byrefInfo, false, "dest-object");
// src->x
Address srcField = CGF.GetAddrOfLocalVar(&Src);
- srcField = Address(CGF.Builder.CreateLoad(srcField),
+ srcField = Address(CGF.Builder.CreateLoad(srcField), byrefInfo.Type,
byrefInfo.ByrefAlignment);
- srcField = CGF.Builder.CreateBitCast(srcField, byrefPtrType);
- srcField = CGF.emitBlockByrefAddress(srcField, byrefInfo, false,
- "src-object");
+ srcField =
+ CGF.emitBlockByrefAddress(srcField, byrefInfo, false, "src-object");
generator.emitCopy(CGF, destField, srcField);
}
CGF.FinishFunction();
- return llvm::ConstantExpr::getBitCast(Fn, CGF.Int8PtrTy);
+ return Fn;
}
/// Build the copy helper for a __block variable.
@@ -2433,7 +2371,7 @@ generateByrefDisposeHelper(CodeGenFunction &CGF,
FunctionArgList args;
ImplicitParamDecl Src(CGF.getContext(), Context.VoidPtrTy,
- ImplicitParamDecl::Other);
+ ImplicitParamKind::Other);
args.push_back(&Src);
const CGFunctionInfo &FI =
@@ -2459,9 +2397,8 @@ generateByrefDisposeHelper(CodeGenFunction &CGF,
if (generator.needsDispose()) {
Address addr = CGF.GetAddrOfLocalVar(&Src);
- addr = Address(CGF.Builder.CreateLoad(addr), byrefInfo.ByrefAlignment);
- auto byrefPtrType = byrefInfo.Type->getPointerTo(0);
- addr = CGF.Builder.CreateBitCast(addr, byrefPtrType);
+ addr = Address(CGF.Builder.CreateLoad(addr), byrefInfo.Type,
+ byrefInfo.ByrefAlignment);
addr = CGF.emitBlockByrefAddress(addr, byrefInfo, false, "object");
generator.emitDispose(CGF, addr);
@@ -2469,7 +2406,7 @@ generateByrefDisposeHelper(CodeGenFunction &CGF,
CGF.FinishFunction();
- return llvm::ConstantExpr::getBitCast(Fn, CGF.Int8PtrTy);
+ return Fn;
}
/// Build the dispose helper for a __block variable.
@@ -2607,7 +2544,8 @@ Address CodeGenFunction::emitBlockByrefAddress(Address baseAddr,
// Chase the forwarding address if requested.
if (followForward) {
Address forwardingAddr = Builder.CreateStructGEP(baseAddr, 1, "forwarding");
- baseAddr = Address(Builder.CreateLoad(forwardingAddr), info.ByrefAlignment);
+ baseAddr = Address(Builder.CreateLoad(forwardingAddr), info.Type,
+ info.ByrefAlignment);
}
return Builder.CreateStructGEP(baseAddr, info.FieldIndex, name);
@@ -2643,11 +2581,11 @@ const BlockByrefInfo &CodeGenFunction::getBlockByrefInfo(const VarDecl *D) {
SmallVector<llvm::Type *, 8> types;
// void *__isa;
- types.push_back(Int8PtrTy);
+ types.push_back(VoidPtrTy);
size += getPointerSize();
// void *__forwarding;
- types.push_back(llvm::PointerType::getUnqual(byrefType));
+ types.push_back(VoidPtrTy);
size += getPointerSize();
// int32_t __flags;
@@ -2662,11 +2600,11 @@ const BlockByrefInfo &CodeGenFunction::getBlockByrefInfo(const VarDecl *D) {
bool hasCopyAndDispose = getContext().BlockRequiresCopying(Ty, D);
if (hasCopyAndDispose) {
/// void *__copy_helper;
- types.push_back(Int8PtrTy);
+ types.push_back(VoidPtrTy);
size += getPointerSize();
/// void *__destroy_helper;
- types.push_back(Int8PtrTy);
+ types.push_back(VoidPtrTy);
size += getPointerSize();
}
@@ -2675,7 +2613,7 @@ const BlockByrefInfo &CodeGenFunction::getBlockByrefInfo(const VarDecl *D) {
if (getContext().getByrefLifetime(Ty, Lifetime, HasByrefExtendedLayout) &&
HasByrefExtendedLayout) {
/// void *__byref_variable_layout;
- types.push_back(Int8PtrTy);
+ types.push_back(VoidPtrTy);
size += CharUnits::fromQuantity(PointerSizeInBytes);
}
@@ -2695,8 +2633,8 @@ const BlockByrefInfo &CodeGenFunction::getBlockByrefInfo(const VarDecl *D) {
size = varOffset;
// Conversely, we might have to prevent LLVM from inserting padding.
- } else if (CGM.getDataLayout().getABITypeAlignment(varTy)
- > varAlign.getQuantity()) {
+ } else if (CGM.getDataLayout().getABITypeAlign(varTy) >
+ uint64_t(varAlign.getQuantity())) {
packed = true;
}
types.push_back(varTy);
@@ -2721,8 +2659,7 @@ void CodeGenFunction::emitByrefStructureInit(const AutoVarEmission &emission) {
Address addr = emission.Addr;
// That's an alloca of the byref structure type.
- llvm::StructType *byrefType = cast<llvm::StructType>(
- cast<llvm::PointerType>(addr.getPointer()->getType())->getElementType());
+ llvm::StructType *byrefType = cast<llvm::StructType>(addr.getElementType());
unsigned nextHeaderIndex = 0;
CharUnits nextHeaderOffset;
@@ -2825,10 +2762,8 @@ void CodeGenFunction::emitByrefStructureInit(const AutoVarEmission &emission) {
void CodeGenFunction::BuildBlockRelease(llvm::Value *V, BlockFieldFlags flags,
bool CanThrow) {
llvm::FunctionCallee F = CGM.getBlockObjectDispose();
- llvm::Value *args[] = {
- Builder.CreateBitCast(V, Int8PtrTy),
- llvm::ConstantInt::get(Int32Ty, flags.getBitMask())
- };
+ llvm::Value *args[] = {V,
+ llvm::ConstantInt::get(Int32Ty, flags.getBitMask())};
if (CanThrow)
EmitRuntimeCallOrInvoke(F, args);
@@ -2910,8 +2845,8 @@ llvm::Constant *CodeGenModule::getNSConcreteGlobalBlock() {
if (NSConcreteGlobalBlock)
return NSConcreteGlobalBlock;
- NSConcreteGlobalBlock =
- GetOrCreateLLVMGlobal("_NSConcreteGlobalBlock", Int8PtrTy, 0, nullptr);
+ NSConcreteGlobalBlock = GetOrCreateLLVMGlobal(
+ "_NSConcreteGlobalBlock", Int8PtrTy, LangAS::Default, nullptr);
configureBlocksRuntimeObject(*this, NSConcreteGlobalBlock);
return NSConcreteGlobalBlock;
}
@@ -2920,8 +2855,8 @@ llvm::Constant *CodeGenModule::getNSConcreteStackBlock() {
if (NSConcreteStackBlock)
return NSConcreteStackBlock;
- NSConcreteStackBlock =
- GetOrCreateLLVMGlobal("_NSConcreteStackBlock", Int8PtrTy, 0, nullptr);
+ NSConcreteStackBlock = GetOrCreateLLVMGlobal(
+ "_NSConcreteStackBlock", Int8PtrTy, LangAS::Default, nullptr);
configureBlocksRuntimeObject(*this, NSConcreteStackBlock);
return NSConcreteStackBlock;
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.h b/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.h
index 698ecd3d926a..4ef1ae9f3365 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.h
@@ -26,14 +26,7 @@
#include "clang/Basic/TargetInfo.h"
namespace llvm {
-class Constant;
-class Function;
-class GlobalValue;
-class DataLayout;
-class FunctionType;
-class PointerType;
class Value;
-class LLVMContext;
}
namespace clang {
@@ -148,6 +141,17 @@ public:
CharUnits FieldOffset;
};
+/// Represents a type of copy/destroy operation that should be performed for an
+/// entity that's captured by a block.
+enum class BlockCaptureEntityKind {
+ None,
+ CXXRecord, // Copy or destroy
+ ARCWeak,
+ ARCStrong,
+ NonTrivialCStruct,
+ BlockObject, // Assign or release
+};
+
/// CGBlockInfo - Information to generate a block literal.
class CGBlockInfo {
public:
@@ -197,20 +201,40 @@ public:
return FieldType;
}
- static Capture makeIndex(unsigned index, CharUnits offset,
- QualType FieldType) {
+ static Capture
+ makeIndex(unsigned index, CharUnits offset, QualType FieldType,
+ BlockCaptureEntityKind CopyKind, BlockFieldFlags CopyFlags,
+ BlockCaptureEntityKind DisposeKind, BlockFieldFlags DisposeFlags,
+ const BlockDecl::Capture *Cap) {
Capture v;
v.Data = (index << 1) | 1;
v.Offset = offset.getQuantity();
v.FieldType = FieldType;
+ v.CopyKind = CopyKind;
+ v.CopyFlags = CopyFlags;
+ v.DisposeKind = DisposeKind;
+ v.DisposeFlags = DisposeFlags;
+ v.Cap = Cap;
return v;
}
- static Capture makeConstant(llvm::Value *value) {
+ static Capture makeConstant(llvm::Value *value,
+ const BlockDecl::Capture *Cap) {
Capture v;
v.Data = reinterpret_cast<uintptr_t>(value);
+ v.Cap = Cap;
return v;
}
+
+ bool isConstantOrTrivial() const {
+ return CopyKind == BlockCaptureEntityKind::None &&
+ DisposeKind == BlockCaptureEntityKind::None;
+ }
+
+ BlockCaptureEntityKind CopyKind = BlockCaptureEntityKind::None,
+ DisposeKind = BlockCaptureEntityKind::None;
+ BlockFieldFlags CopyFlags, DisposeFlags;
+ const BlockDecl::Capture *Cap;
};
/// CanBeGlobal - True if the block can be global, i.e. it has
@@ -221,6 +245,9 @@ public:
/// dispose helper functions if the block were escaping.
bool NeedsCopyDispose : 1;
+ /// Indicates whether the block is non-escaping.
+ bool NoEscape : 1;
+
/// HasCXXObject - True if the block's custom copy/dispose functions
/// need to be run even in GC mode.
bool HasCXXObject : 1;
@@ -238,8 +265,11 @@ public:
/// functions.
bool CapturesNonExternalType : 1;
- /// The mapping of allocated indexes within the block.
- llvm::DenseMap<const VarDecl*, Capture> Captures;
+ /// Mapping from variables to pointers to captures in SortedCaptures.
+ llvm::DenseMap<const VarDecl *, Capture *> Captures;
+
+ /// The block's captures. Non-constant captures are sorted by their offsets.
+ llvm::SmallVector<Capture, 4> SortedCaptures;
Address LocalAddress;
llvm::StructType *StructureType;
@@ -257,20 +287,18 @@ public:
// This could be zero if no forced alignment is required.
CharUnits BlockHeaderForcedGapSize;
- /// The next block in the block-info chain. Invalid if this block
- /// info is not part of the CGF's block-info chain, which is true
- /// if it corresponds to a global block or a block whose expression
- /// has been encountered.
- CGBlockInfo *NextBlockInfo;
+ void buildCaptureMap() {
+ for (auto &C : SortedCaptures)
+ Captures[C.Cap->getVariable()] = &C;
+ }
const Capture &getCapture(const VarDecl *var) const {
return const_cast<CGBlockInfo*>(this)->getCapture(var);
}
Capture &getCapture(const VarDecl *var) {
- llvm::DenseMap<const VarDecl*, Capture>::iterator
- it = Captures.find(var);
+ auto it = Captures.find(var);
assert(it != Captures.end() && "no entry for variable!");
- return it->second;
+ return *it->second;
}
const BlockDecl *getBlockDecl() const { return Block; }
@@ -281,11 +309,6 @@ public:
}
CGBlockInfo(const BlockDecl *blockDecl, StringRef Name);
-
- // Indicates whether the block needs a custom copy or dispose function.
- bool needsCopyDisposeHelpers() const {
- return NeedsCopyDispose && !Block->doesNotEscape();
- }
};
} // end namespace CodeGen
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGBuilder.h b/contrib/llvm-project/clang/lib/CodeGen/CGBuilder.h
index 4fad44a105cd..bf5ab171d720 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGBuilder.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGBuilder.h
@@ -9,10 +9,11 @@
#ifndef LLVM_CLANG_LIB_CODEGEN_CGBUILDER_H
#define LLVM_CLANG_LIB_CODEGEN_CGBUILDER_H
-#include "llvm/IR/DataLayout.h"
-#include "llvm/IR/IRBuilder.h"
#include "Address.h"
#include "CodeGenTypeCache.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/Type.h"
namespace clang {
namespace CodeGen {
@@ -31,6 +32,7 @@ public:
void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name,
llvm::BasicBlock *BB,
llvm::BasicBlock::iterator InsertPt) const override;
+
private:
CodeGenFunction *CGF = nullptr;
};
@@ -44,17 +46,18 @@ class CGBuilderTy : public CGBuilderBaseTy {
/// Storing a reference to the type cache here makes it a lot easier
/// to build natural-feeling, target-specific IR.
const CodeGenTypeCache &TypeCache;
+
public:
CGBuilderTy(const CodeGenTypeCache &TypeCache, llvm::LLVMContext &C)
- : CGBuilderBaseTy(C), TypeCache(TypeCache) {}
- CGBuilderTy(const CodeGenTypeCache &TypeCache,
- llvm::LLVMContext &C, const llvm::ConstantFolder &F,
+ : CGBuilderBaseTy(C), TypeCache(TypeCache) {}
+ CGBuilderTy(const CodeGenTypeCache &TypeCache, llvm::LLVMContext &C,
+ const llvm::ConstantFolder &F,
const CGBuilderInserterTy &Inserter)
- : CGBuilderBaseTy(C, F, Inserter), TypeCache(TypeCache) {}
+ : CGBuilderBaseTy(C, F, Inserter), TypeCache(TypeCache) {}
CGBuilderTy(const CodeGenTypeCache &TypeCache, llvm::Instruction *I)
- : CGBuilderBaseTy(I), TypeCache(TypeCache) {}
+ : CGBuilderBaseTy(I), TypeCache(TypeCache) {}
CGBuilderTy(const CodeGenTypeCache &TypeCache, llvm::BasicBlock *BB)
- : CGBuilderBaseTy(BB), TypeCache(TypeCache) {}
+ : CGBuilderBaseTy(BB), TypeCache(TypeCache) {}
llvm::ConstantInt *getSize(CharUnits N) {
return llvm::ConstantInt::get(TypeCache.SizeTy, N.getQuantity());
@@ -86,7 +89,6 @@ public:
llvm::LoadInst *CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr,
CharUnits Align,
const llvm::Twine &Name = "") {
- assert(Addr->getType()->getPointerElementType() == Ty);
return CreateAlignedLoad(Ty, Addr, Align.getAsAlign(), Name);
}
@@ -100,7 +102,8 @@ public:
using CGBuilderBaseTy::CreateAlignedStore;
llvm::StoreInst *CreateAlignedStore(llvm::Value *Val, llvm::Value *Addr,
- CharUnits Align, bool IsVolatile = false) {
+ CharUnits Align,
+ bool IsVolatile = false) {
return CreateAlignedStore(Val, Addr, Align.getAsAlign(), IsVolatile);
}
@@ -115,66 +118,47 @@ public:
/// Emit a load from an i1 flag variable.
llvm::LoadInst *CreateFlagLoad(llvm::Value *Addr,
const llvm::Twine &Name = "") {
- assert(Addr->getType()->getPointerElementType() == getInt1Ty());
return CreateAlignedLoad(getInt1Ty(), Addr, CharUnits::One(), Name);
}
/// Emit a store to an i1 flag variable.
llvm::StoreInst *CreateFlagStore(bool Value, llvm::Value *Addr) {
- assert(Addr->getType()->getPointerElementType() == getInt1Ty());
return CreateAlignedStore(getInt1(Value), Addr, CharUnits::One());
}
- // Temporarily use old signature; clang will be updated to an Address overload
- // in a subsequent patch.
llvm::AtomicCmpXchgInst *
- CreateAtomicCmpXchg(llvm::Value *Ptr, llvm::Value *Cmp, llvm::Value *New,
+ CreateAtomicCmpXchg(Address Addr, llvm::Value *Cmp, llvm::Value *New,
llvm::AtomicOrdering SuccessOrdering,
llvm::AtomicOrdering FailureOrdering,
llvm::SyncScope::ID SSID = llvm::SyncScope::System) {
return CGBuilderBaseTy::CreateAtomicCmpXchg(
- Ptr, Cmp, New, llvm::MaybeAlign(), SuccessOrdering, FailureOrdering,
- SSID);
+ Addr.getPointer(), Cmp, New, Addr.getAlignment().getAsAlign(),
+ SuccessOrdering, FailureOrdering, SSID);
}
- // Temporarily use old signature; clang will be updated to an Address overload
- // in a subsequent patch.
llvm::AtomicRMWInst *
- CreateAtomicRMW(llvm::AtomicRMWInst::BinOp Op, llvm::Value *Ptr,
- llvm::Value *Val, llvm::AtomicOrdering Ordering,
+ CreateAtomicRMW(llvm::AtomicRMWInst::BinOp Op, Address Addr, llvm::Value *Val,
+ llvm::AtomicOrdering Ordering,
llvm::SyncScope::ID SSID = llvm::SyncScope::System) {
- return CGBuilderBaseTy::CreateAtomicRMW(Op, Ptr, Val, llvm::MaybeAlign(),
+ return CGBuilderBaseTy::CreateAtomicRMW(Op, Addr.getPointer(), Val,
+ Addr.getAlignment().getAsAlign(),
Ordering, SSID);
}
- using CGBuilderBaseTy::CreateBitCast;
- Address CreateBitCast(Address Addr, llvm::Type *Ty,
- const llvm::Twine &Name = "") {
- return Address(CreateBitCast(Addr.getPointer(), Ty, Name),
- Addr.getAlignment());
- }
-
using CGBuilderBaseTy::CreateAddrSpaceCast;
Address CreateAddrSpaceCast(Address Addr, llvm::Type *Ty,
const llvm::Twine &Name = "") {
- return Address(CreateAddrSpaceCast(Addr.getPointer(), Ty, Name),
- Addr.getAlignment());
- }
-
- /// Cast the element type of the given address to a different type,
- /// preserving information like the alignment and address space.
- Address CreateElementBitCast(Address Addr, llvm::Type *Ty,
- const llvm::Twine &Name = "") {
- auto PtrTy = Ty->getPointerTo(Addr.getAddressSpace());
- return CreateBitCast(Addr, PtrTy, Name);
+ return Addr.withPointer(CreateAddrSpaceCast(Addr.getPointer(), Ty, Name),
+ Addr.isKnownNonNull());
}
using CGBuilderBaseTy::CreatePointerBitCastOrAddrSpaceCast;
Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty,
+ llvm::Type *ElementTy,
const llvm::Twine &Name = "") {
llvm::Value *Ptr =
- CreatePointerBitCastOrAddrSpaceCast(Addr.getPointer(), Ty, Name);
- return Address(Ptr, Addr.getAlignment());
+ CreatePointerBitCastOrAddrSpaceCast(Addr.getPointer(), Ty, Name);
+ return Address(Ptr, ElementTy, Addr.getAlignment(), Addr.isKnownNonNull());
}
/// Given
@@ -192,9 +176,10 @@ public:
const llvm::StructLayout *Layout = DL.getStructLayout(ElTy);
auto Offset = CharUnits::fromQuantity(Layout->getElementOffset(Index));
- return Address(CreateStructGEP(Addr.getElementType(),
- Addr.getPointer(), Index, Name),
- Addr.getAlignment().alignmentAtOffset(Offset));
+ return Address(
+ CreateStructGEP(Addr.getElementType(), Addr.getPointer(), Index, Name),
+ ElTy->getElementType(Index),
+ Addr.getAlignment().alignmentAtOffset(Offset), Addr.isKnownNonNull());
}
/// Given
@@ -215,7 +200,9 @@ public:
return Address(
CreateInBoundsGEP(Addr.getElementType(), Addr.getPointer(),
{getSize(CharUnits::Zero()), getSize(Index)}, Name),
- Addr.getAlignment().alignmentAtOffset(Index * EltSize));
+ ElTy->getElementType(),
+ Addr.getAlignment().alignmentAtOffset(Index * EltSize),
+ Addr.isKnownNonNull());
}
/// Given
@@ -231,7 +218,8 @@ public:
return Address(CreateInBoundsGEP(Addr.getElementType(), Addr.getPointer(),
getSize(Index), Name),
- Addr.getAlignment().alignmentAtOffset(Index * EltSize));
+ ElTy, Addr.getAlignment().alignmentAtOffset(Index * EltSize),
+ Addr.isKnownNonNull());
}
/// Given
@@ -247,7 +235,24 @@ public:
return Address(CreateGEP(Addr.getElementType(), Addr.getPointer(),
getSize(Index), Name),
- Addr.getAlignment().alignmentAtOffset(Index * EltSize));
+ Addr.getElementType(),
+ Addr.getAlignment().alignmentAtOffset(Index * EltSize),
+ NotKnownNonNull);
+ }
+
+ /// Create GEP with single dynamic index. The address alignment is reduced
+ /// according to the element size.
+ using CGBuilderBaseTy::CreateGEP;
+ Address CreateGEP(Address Addr, llvm::Value *Index,
+ const llvm::Twine &Name = "") {
+ const llvm::DataLayout &DL = BB->getParent()->getParent()->getDataLayout();
+ CharUnits EltSize =
+ CharUnits::fromQuantity(DL.getTypeAllocSize(Addr.getElementType()));
+
+ return Address(
+ CreateGEP(Addr.getElementType(), Addr.getPointer(), Index, Name),
+ Addr.getElementType(),
+ Addr.getAlignment().alignmentOfArrayElement(EltSize), NotKnownNonNull);
}
/// Given a pointer to i8, adjust it by a given constant offset.
@@ -256,14 +261,18 @@ public:
assert(Addr.getElementType() == TypeCache.Int8Ty);
return Address(CreateInBoundsGEP(Addr.getElementType(), Addr.getPointer(),
getSize(Offset), Name),
- Addr.getAlignment().alignmentAtOffset(Offset));
+ Addr.getElementType(),
+ Addr.getAlignment().alignmentAtOffset(Offset),
+ Addr.isKnownNonNull());
}
Address CreateConstByteGEP(Address Addr, CharUnits Offset,
const llvm::Twine &Name = "") {
assert(Addr.getElementType() == TypeCache.Int8Ty);
return Address(CreateGEP(Addr.getElementType(), Addr.getPointer(),
getSize(Offset), Name),
- Addr.getAlignment().alignmentAtOffset(Offset));
+ Addr.getElementType(),
+ Addr.getAlignment().alignmentAtOffset(Offset),
+ NotKnownNonNull);
}
using CGBuilderBaseTy::CreateConstInBoundsGEP2_32;
@@ -278,8 +287,10 @@ public:
/*isSigned=*/true);
if (!GEP->accumulateConstantOffset(DL, Offset))
llvm_unreachable("offset of GEP with constants is always computable");
- return Address(GEP, Addr.getAlignment().alignmentAtOffset(
- CharUnits::fromQuantity(Offset.getSExtValue())));
+ return Address(GEP, GEP->getResultElementType(),
+ Addr.getAlignment().alignmentAtOffset(
+ CharUnits::fromQuantity(Offset.getSExtValue())),
+ Addr.isKnownNonNull());
}
using CGBuilderBaseTy::CreateMemCpy;
@@ -318,9 +329,16 @@ public:
Dest.getAlignment().getAsAlign(), IsVolatile);
}
+ using CGBuilderBaseTy::CreateMemSetInline;
+ llvm::CallInst *CreateMemSetInline(Address Dest, llvm::Value *Value,
+ uint64_t Size) {
+ return CreateMemSetInline(Dest.getPointer(),
+ Dest.getAlignment().getAsAlign(), Value,
+ getInt64(Size));
+ }
+
using CGBuilderBaseTy::CreatePreserveStructAccessIndex;
- Address CreatePreserveStructAccessIndex(Address Addr,
- unsigned Index,
+ Address CreatePreserveStructAccessIndex(Address Addr, unsigned Index,
unsigned FieldIndex,
llvm::MDNode *DbgInfo) {
llvm::StructType *ElTy = cast<llvm::StructType>(Addr.getElementType());
@@ -330,11 +348,18 @@ public:
return Address(CreatePreserveStructAccessIndex(ElTy, Addr.getPointer(),
Index, FieldIndex, DbgInfo),
+ ElTy->getElementType(Index),
Addr.getAlignment().alignmentAtOffset(Offset));
}
+
+ using CGBuilderBaseTy::CreateLaunderInvariantGroup;
+ Address CreateLaunderInvariantGroup(Address Addr) {
+ return Addr.withPointer(CreateLaunderInvariantGroup(Addr.getPointer()),
+ Addr.isKnownNonNull());
+ }
};
-} // end namespace CodeGen
-} // end namespace clang
+} // end namespace CodeGen
+} // end namespace clang
#endif
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGBuiltin.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGBuiltin.cpp
index 1a02965b223e..a4f26a6f0eb1 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGBuiltin.cpp
@@ -10,6 +10,7 @@
//
//===----------------------------------------------------------------------===//
+#include "ABIInfo.h"
#include "CGCUDARuntime.h"
#include "CGCXXABI.h"
#include "CGObjCRuntime.h"
@@ -24,11 +25,15 @@
#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
#include "clang/AST/OSLog.h"
+#include "clang/AST/OperationKinds.h"
#include "clang/Basic/TargetBuiltins.h"
#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/TargetOptions.h"
#include "clang/CodeGen/CGFunctionInfo.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/FloatingPointMode.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Analysis/ValueTracking.h"
@@ -45,24 +50,23 @@
#include "llvm/IR/IntrinsicsR600.h"
#include "llvm/IR/IntrinsicsRISCV.h"
#include "llvm/IR/IntrinsicsS390.h"
+#include "llvm/IR/IntrinsicsVE.h"
#include "llvm/IR/IntrinsicsWebAssembly.h"
#include "llvm/IR/IntrinsicsX86.h"
#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/MatrixBuilder.h"
#include "llvm/Support/ConvertUTF.h"
+#include "llvm/Support/MathExtras.h"
#include "llvm/Support/ScopedPrinter.h"
-#include "llvm/Support/X86TargetParser.h"
+#include "llvm/TargetParser/AArch64TargetParser.h"
+#include "llvm/TargetParser/X86TargetParser.h"
+#include <optional>
#include <sstream>
using namespace clang;
using namespace CodeGen;
using namespace llvm;
-static
-int64_t clamp(int64_t Value, int64_t Low, int64_t High) {
- return std::min(High, std::max(Low, Value));
-}
-
static void initializeAlloca(CodeGenFunction &CGF, AllocaInst *AI, Value *Size,
Align AlignmentInBytes) {
ConstantInt *Byte;
@@ -96,13 +100,63 @@ llvm::Constant *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
StringRef Name;
GlobalDecl D(FD);
+ // TODO: This list should be expanded or refactored after all GCC-compatible
+ // std libcall builtins are implemented.
+ static SmallDenseMap<unsigned, StringRef, 64> F128Builtins{
+ {Builtin::BI__builtin___fprintf_chk, "__fprintf_chkieee128"},
+ {Builtin::BI__builtin___printf_chk, "__printf_chkieee128"},
+ {Builtin::BI__builtin___snprintf_chk, "__snprintf_chkieee128"},
+ {Builtin::BI__builtin___sprintf_chk, "__sprintf_chkieee128"},
+ {Builtin::BI__builtin___vfprintf_chk, "__vfprintf_chkieee128"},
+ {Builtin::BI__builtin___vprintf_chk, "__vprintf_chkieee128"},
+ {Builtin::BI__builtin___vsnprintf_chk, "__vsnprintf_chkieee128"},
+ {Builtin::BI__builtin___vsprintf_chk, "__vsprintf_chkieee128"},
+ {Builtin::BI__builtin_fprintf, "__fprintfieee128"},
+ {Builtin::BI__builtin_printf, "__printfieee128"},
+ {Builtin::BI__builtin_snprintf, "__snprintfieee128"},
+ {Builtin::BI__builtin_sprintf, "__sprintfieee128"},
+ {Builtin::BI__builtin_vfprintf, "__vfprintfieee128"},
+ {Builtin::BI__builtin_vprintf, "__vprintfieee128"},
+ {Builtin::BI__builtin_vsnprintf, "__vsnprintfieee128"},
+ {Builtin::BI__builtin_vsprintf, "__vsprintfieee128"},
+ {Builtin::BI__builtin_fscanf, "__fscanfieee128"},
+ {Builtin::BI__builtin_scanf, "__scanfieee128"},
+ {Builtin::BI__builtin_sscanf, "__sscanfieee128"},
+ {Builtin::BI__builtin_vfscanf, "__vfscanfieee128"},
+ {Builtin::BI__builtin_vscanf, "__vscanfieee128"},
+ {Builtin::BI__builtin_vsscanf, "__vsscanfieee128"},
+ {Builtin::BI__builtin_nexttowardf128, "__nexttowardieee128"},
+ };
+
+ // The AIX library functions frexpl, ldexpl, and modfl are for 128-bit
+ // IBM 'long double' (i.e. __ibm128). Map to the 'double' versions
+ // if it is 64-bit 'long double' mode.
+ static SmallDenseMap<unsigned, StringRef, 4> AIXLongDouble64Builtins{
+ {Builtin::BI__builtin_frexpl, "frexp"},
+ {Builtin::BI__builtin_ldexpl, "ldexp"},
+ {Builtin::BI__builtin_modfl, "modf"},
+ };
+
// If the builtin has been declared explicitly with an assembler label,
// use the mangled name. This differs from the plain label on platforms
// that prefix labels.
if (FD->hasAttr<AsmLabelAttr>())
Name = getMangledName(D);
- else
- Name = Context.BuiltinInfo.getName(BuiltinID) + 10;
+ else {
+ // TODO: This mutation should also be applied to other targets other than
+ // PPC, after backend supports IEEE 128-bit style libcalls.
+ if (getTriple().isPPC64() &&
+ &getTarget().getLongDoubleFormat() == &llvm::APFloat::IEEEquad() &&
+ F128Builtins.contains(BuiltinID))
+ Name = F128Builtins[BuiltinID];
+ else if (getTriple().isOSAIX() &&
+ &getTarget().getLongDoubleFormat() ==
+ &llvm::APFloat::IEEEdouble() &&
+ AIXLongDouble64Builtins.contains(BuiltinID))
+ Name = AIXLongDouble64Builtins[BuiltinID];
+ else
+ Name = Context.BuiltinInfo.getName(BuiltinID).substr(10);
+ }
llvm::FunctionType *Ty =
cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
@@ -134,54 +188,63 @@ static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
return V;
}
+static Address CheckAtomicAlignment(CodeGenFunction &CGF, const CallExpr *E) {
+ ASTContext &Ctx = CGF.getContext();
+ Address Ptr = CGF.EmitPointerWithAlignment(E->getArg(0));
+ unsigned Bytes = Ptr.getElementType()->isPointerTy()
+ ? Ctx.getTypeSizeInChars(Ctx.VoidPtrTy).getQuantity()
+ : Ptr.getElementType()->getScalarSizeInBits() / 8;
+ unsigned Align = Ptr.getAlignment().getQuantity();
+ if (Align % Bytes != 0) {
+ DiagnosticsEngine &Diags = CGF.CGM.getDiags();
+ Diags.Report(E->getBeginLoc(), diag::warn_sync_op_misaligned);
+ // Force address to be at least naturally-aligned.
+ return Ptr.withAlignment(CharUnits::fromQuantity(Bytes));
+ }
+ return Ptr;
+}
+
/// Utility to insert an atomic instruction based on Intrinsic::ID
/// and the expression node.
static Value *MakeBinaryAtomicValue(
CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E,
AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
+
QualType T = E->getType();
assert(E->getArg(0)->getType()->isPointerType());
assert(CGF.getContext().hasSameUnqualifiedType(T,
E->getArg(0)->getType()->getPointeeType()));
assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
- llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
- unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
+ Address DestAddr = CheckAtomicAlignment(CGF, E);
- llvm::IntegerType *IntType =
- llvm::IntegerType::get(CGF.getLLVMContext(),
- CGF.getContext().getTypeSize(T));
- llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
+ llvm::IntegerType *IntType = llvm::IntegerType::get(
+ CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
- llvm::Value *Args[2];
- Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
- Args[1] = CGF.EmitScalarExpr(E->getArg(1));
- llvm::Type *ValueType = Args[1]->getType();
- Args[1] = EmitToInt(CGF, Args[1], T, IntType);
+ llvm::Value *Val = CGF.EmitScalarExpr(E->getArg(1));
+ llvm::Type *ValueType = Val->getType();
+ Val = EmitToInt(CGF, Val, T, IntType);
- llvm::Value *Result = CGF.Builder.CreateAtomicRMW(
- Kind, Args[0], Args[1], Ordering);
+ llvm::Value *Result =
+ CGF.Builder.CreateAtomicRMW(Kind, DestAddr, Val, Ordering);
return EmitFromInt(CGF, Result, T, ValueType);
}
static Value *EmitNontemporalStore(CodeGenFunction &CGF, const CallExpr *E) {
Value *Val = CGF.EmitScalarExpr(E->getArg(0));
- Value *Address = CGF.EmitScalarExpr(E->getArg(1));
+ Address Addr = CGF.EmitPointerWithAlignment(E->getArg(1));
- // Convert the type of the pointer to a pointer to the stored type.
Val = CGF.EmitToMemory(Val, E->getArg(0)->getType());
- Value *BC = CGF.Builder.CreateBitCast(
- Address, llvm::PointerType::getUnqual(Val->getType()), "cast");
- LValue LV = CGF.MakeNaturalAlignAddrLValue(BC, E->getArg(0)->getType());
+ LValue LV = CGF.MakeAddrLValue(Addr, E->getArg(0)->getType());
LV.setNontemporal(true);
CGF.EmitStoreOfScalar(Val, LV, false);
return nullptr;
}
static Value *EmitNontemporalLoad(CodeGenFunction &CGF, const CallExpr *E) {
- Value *Address = CGF.EmitScalarExpr(E->getArg(0));
+ Address Addr = CGF.EmitPointerWithAlignment(E->getArg(0));
- LValue LV = CGF.MakeNaturalAlignAddrLValue(Address, E->getType());
+ LValue LV = CGF.MakeAddrLValue(Addr, E->getType());
LV.setNontemporal(true);
return CGF.EmitLoadOfScalar(LV, E->getExprLoc());
}
@@ -206,23 +269,18 @@ static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
E->getArg(0)->getType()->getPointeeType()));
assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
- llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
- unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
+ Address DestAddr = CheckAtomicAlignment(CGF, E);
- llvm::IntegerType *IntType =
- llvm::IntegerType::get(CGF.getLLVMContext(),
- CGF.getContext().getTypeSize(T));
- llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
+ llvm::IntegerType *IntType = llvm::IntegerType::get(
+ CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
- llvm::Value *Args[2];
- Args[1] = CGF.EmitScalarExpr(E->getArg(1));
- llvm::Type *ValueType = Args[1]->getType();
- Args[1] = EmitToInt(CGF, Args[1], T, IntType);
- Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
+ llvm::Value *Val = CGF.EmitScalarExpr(E->getArg(1));
+ llvm::Type *ValueType = Val->getType();
+ Val = EmitToInt(CGF, Val, T, IntType);
llvm::Value *Result = CGF.Builder.CreateAtomicRMW(
- Kind, Args[0], Args[1], llvm::AtomicOrdering::SequentiallyConsistent);
- Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]);
+ Kind, DestAddr, Val, llvm::AtomicOrdering::SequentiallyConsistent);
+ Result = CGF.Builder.CreateBinOp(Op, Result, Val);
if (Invert)
Result =
CGF.Builder.CreateBinOp(llvm::Instruction::Xor, Result,
@@ -248,22 +306,18 @@ static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
static Value *MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E,
bool ReturnBool) {
QualType T = ReturnBool ? E->getArg(1)->getType() : E->getType();
- llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
- unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
+ Address DestAddr = CheckAtomicAlignment(CGF, E);
llvm::IntegerType *IntType = llvm::IntegerType::get(
CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
- llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
- Value *Args[3];
- Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
- Args[1] = CGF.EmitScalarExpr(E->getArg(1));
- llvm::Type *ValueType = Args[1]->getType();
- Args[1] = EmitToInt(CGF, Args[1], T, IntType);
- Args[2] = EmitToInt(CGF, CGF.EmitScalarExpr(E->getArg(2)), T, IntType);
+ Value *Cmp = CGF.EmitScalarExpr(E->getArg(1));
+ llvm::Type *ValueType = Cmp->getType();
+ Cmp = EmitToInt(CGF, Cmp, T, IntType);
+ Value *New = EmitToInt(CGF, CGF.EmitScalarExpr(E->getArg(2)), T, IntType);
Value *Pair = CGF.Builder.CreateAtomicCmpXchg(
- Args[0], Args[1], Args[2], llvm::AtomicOrdering::SequentiallyConsistent,
+ DestAddr, Cmp, New, llvm::AtomicOrdering::SequentiallyConsistent,
llvm::AtomicOrdering::SequentiallyConsistent);
if (ReturnBool)
// Extract boolean success flag and zext it to int.
@@ -299,7 +353,8 @@ Value *EmitAtomicCmpXchgForMSIntrin(CodeGenFunction &CGF, const CallExpr *E,
assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),
E->getArg(2)->getType()));
- auto *Destination = CGF.EmitScalarExpr(E->getArg(0));
+ Address DestAddr = CheckAtomicAlignment(CGF, E);
+
auto *Comparand = CGF.EmitScalarExpr(E->getArg(2));
auto *Exchange = CGF.EmitScalarExpr(E->getArg(1));
@@ -313,8 +368,7 @@ Value *EmitAtomicCmpXchgForMSIntrin(CodeGenFunction &CGF, const CallExpr *E,
// _Interlocked* operations in the future, we will have to remove the volatile
// marker.
auto *Result = CGF.Builder.CreateAtomicCmpXchg(
- Destination, Comparand, Exchange,
- SuccessOrdering, FailureOrdering);
+ DestAddr, Comparand, Exchange, SuccessOrdering, FailureOrdering);
Result->setVolatile(true);
return CGF.Builder.CreateExtractValue(Result, 0);
}
@@ -327,31 +381,34 @@ Value *EmitAtomicCmpXchgForMSIntrin(CodeGenFunction &CGF, const CallExpr *E,
// __int64 _ExchangeHigh,
// __int64 _ExchangeLow,
// __int64 * _ComparandResult);
+//
+// Note that Destination is assumed to be at least 16-byte aligned, despite
+// being typed int64.
+
static Value *EmitAtomicCmpXchg128ForMSIntrin(CodeGenFunction &CGF,
const CallExpr *E,
AtomicOrdering SuccessOrdering) {
assert(E->getNumArgs() == 4);
- llvm::Value *Destination = CGF.EmitScalarExpr(E->getArg(0));
+ llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
llvm::Value *ExchangeHigh = CGF.EmitScalarExpr(E->getArg(1));
llvm::Value *ExchangeLow = CGF.EmitScalarExpr(E->getArg(2));
- llvm::Value *ComparandPtr = CGF.EmitScalarExpr(E->getArg(3));
+ Address ComparandAddr = CGF.EmitPointerWithAlignment(E->getArg(3));
- assert(Destination->getType()->isPointerTy());
+ assert(DestPtr->getType()->isPointerTy());
assert(!ExchangeHigh->getType()->isPointerTy());
assert(!ExchangeLow->getType()->isPointerTy());
- assert(ComparandPtr->getType()->isPointerTy());
// For Release ordering, the failure ordering should be Monotonic.
auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release
? AtomicOrdering::Monotonic
: SuccessOrdering;
- // Convert to i128 pointers and values.
+ // Convert to i128 pointers and values. Alignment is also overridden for
+ // destination pointer.
llvm::Type *Int128Ty = llvm::IntegerType::get(CGF.getLLVMContext(), 128);
- llvm::Type *Int128PtrTy = Int128Ty->getPointerTo();
- Destination = CGF.Builder.CreateBitCast(Destination, Int128PtrTy);
- Address ComparandResult(CGF.Builder.CreateBitCast(ComparandPtr, Int128PtrTy),
- CGF.getContext().toCharUnitsFromBits(128));
+ Address DestAddr(DestPtr, Int128Ty,
+ CGF.getContext().toCharUnitsFromBits(128));
+ ComparandAddr = ComparandAddr.withElementType(Int128Ty);
// (((i128)hi) << 64) | ((i128)lo)
ExchangeHigh = CGF.Builder.CreateZExt(ExchangeHigh, Int128Ty);
@@ -361,9 +418,9 @@ static Value *EmitAtomicCmpXchg128ForMSIntrin(CodeGenFunction &CGF,
llvm::Value *Exchange = CGF.Builder.CreateOr(ExchangeHigh, ExchangeLow);
// Load the comparand for the instruction.
- llvm::Value *Comparand = CGF.Builder.CreateLoad(ComparandResult);
+ llvm::Value *Comparand = CGF.Builder.CreateLoad(ComparandAddr);
- auto *CXI = CGF.Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange,
+ auto *CXI = CGF.Builder.CreateAtomicCmpXchg(DestAddr, Comparand, Exchange,
SuccessOrdering, FailureOrdering);
// The atomic instruction is marked volatile for consistency with MSVC. This
@@ -374,7 +431,7 @@ static Value *EmitAtomicCmpXchg128ForMSIntrin(CodeGenFunction &CGF,
// Store the result as an outparameter.
CGF.Builder.CreateStore(CGF.Builder.CreateExtractValue(CXI, 0),
- ComparandResult);
+ ComparandAddr);
// Get the success boolean and zero extend it to i8.
Value *Success = CGF.Builder.CreateExtractValue(CXI, 1);
@@ -386,24 +443,21 @@ static Value *EmitAtomicIncrementValue(CodeGenFunction &CGF, const CallExpr *E,
assert(E->getArg(0)->getType()->isPointerType());
auto *IntTy = CGF.ConvertType(E->getType());
+ Address DestAddr = CheckAtomicAlignment(CGF, E);
auto *Result = CGF.Builder.CreateAtomicRMW(
- AtomicRMWInst::Add,
- CGF.EmitScalarExpr(E->getArg(0)),
- ConstantInt::get(IntTy, 1),
- Ordering);
+ AtomicRMWInst::Add, DestAddr, ConstantInt::get(IntTy, 1), Ordering);
return CGF.Builder.CreateAdd(Result, ConstantInt::get(IntTy, 1));
}
-static Value *EmitAtomicDecrementValue(CodeGenFunction &CGF, const CallExpr *E,
+static Value *EmitAtomicDecrementValue(
+ CodeGenFunction &CGF, const CallExpr *E,
AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
assert(E->getArg(0)->getType()->isPointerType());
auto *IntTy = CGF.ConvertType(E->getType());
+ Address DestAddr = CheckAtomicAlignment(CGF, E);
auto *Result = CGF.Builder.CreateAtomicRMW(
- AtomicRMWInst::Sub,
- CGF.EmitScalarExpr(E->getArg(0)),
- ConstantInt::get(IntTy, 1),
- Ordering);
+ AtomicRMWInst::Sub, DestAddr, ConstantInt::get(IntTy, 1), Ordering);
return CGF.Builder.CreateSub(Result, ConstantInt::get(IntTy, 1));
}
@@ -414,7 +468,6 @@ static Value *EmitISOVolatileLoad(CodeGenFunction &CGF, const CallExpr *E) {
CharUnits LoadSize = CGF.getContext().getTypeSizeInChars(ElTy);
llvm::Type *ITy =
llvm::IntegerType::get(CGF.getLLVMContext(), LoadSize.getQuantity() * 8);
- Ptr = CGF.Builder.CreateBitCast(Ptr, ITy->getPointerTo());
llvm::LoadInst *Load = CGF.Builder.CreateAlignedLoad(ITy, Ptr, LoadSize);
Load->setVolatile(true);
return Load;
@@ -426,9 +479,6 @@ static Value *EmitISOVolatileStore(CodeGenFunction &CGF, const CallExpr *E) {
Value *Value = CGF.EmitScalarExpr(E->getArg(1));
QualType ElTy = E->getArg(0)->getType()->getPointeeType();
CharUnits StoreSize = CGF.getContext().getTypeSizeInChars(ElTy);
- llvm::Type *ITy =
- llvm::IntegerType::get(CGF.getLLVMContext(), StoreSize.getQuantity() * 8);
- Ptr = CGF.Builder.CreateBitCast(Ptr, ITy->getPointerTo());
llvm::StoreInst *Store =
CGF.Builder.CreateAlignedStore(Value, Ptr, StoreSize);
Store->setVolatile(true);
@@ -443,8 +493,8 @@ static Value *emitUnaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
unsigned ConstrainedIntrinsicID) {
llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
if (CGF.Builder.getIsFPConstrained()) {
- CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
return CGF.Builder.CreateConstrainedFPCall(F, { Src0 });
} else {
@@ -471,6 +521,25 @@ static Value *emitBinaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
}
}
+// Has second type mangled argument.
+static Value *emitBinaryExpMaybeConstrainedFPBuiltin(
+ CodeGenFunction &CGF, const CallExpr *E, llvm::Intrinsic::ID IntrinsicID,
+ llvm::Intrinsic::ID ConstrainedIntrinsicID) {
+ llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
+ llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
+
+ if (CGF.Builder.getIsFPConstrained()) {
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
+ Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID,
+ {Src0->getType(), Src1->getType()});
+ return CGF.Builder.CreateConstrainedFPCall(F, {Src0, Src1});
+ }
+
+ Function *F =
+ CGF.CGM.getIntrinsic(IntrinsicID, {Src0->getType(), Src1->getType()});
+ return CGF.Builder.CreateCall(F, {Src0, Src1});
+}
+
// Emit an intrinsic that has 3 operands of the same type as its result.
// Depending on mode, this may be a constrained floating-point intrinsic.
static Value *emitTernaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
@@ -511,13 +580,13 @@ static Value *emitCallMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
// Emit a simple mangled intrinsic that has 1 argument and a return type
// matching the argument type.
-static Value *emitUnaryBuiltin(CodeGenFunction &CGF,
- const CallExpr *E,
- unsigned IntrinsicID) {
+static Value *emitUnaryBuiltin(CodeGenFunction &CGF, const CallExpr *E,
+ unsigned IntrinsicID,
+ llvm::StringRef Name = "") {
llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
- return CGF.Builder.CreateCall(F, Src0);
+ return CGF.Builder.CreateCall(F, Src0, Name);
}
// Emit an intrinsic that has 2 operands of the same type as its result.
@@ -574,6 +643,24 @@ emitMaybeConstrainedFPToIntRoundBuiltin(CodeGenFunction &CGF, const CallExpr *E,
}
}
+static Value *emitFrexpBuiltin(CodeGenFunction &CGF, const CallExpr *E,
+ llvm::Intrinsic::ID IntrinsicID) {
+ llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
+ llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
+
+ QualType IntPtrTy = E->getArg(1)->getType()->getPointeeType();
+ llvm::Type *IntTy = CGF.ConvertType(IntPtrTy);
+ llvm::Function *F =
+ CGF.CGM.getIntrinsic(IntrinsicID, {Src0->getType(), IntTy});
+ llvm::Value *Call = CGF.Builder.CreateCall(F, Src0);
+
+ llvm::Value *Exp = CGF.Builder.CreateExtractValue(Call, 1);
+ LValue LV = CGF.MakeNaturalAlignAddrLValue(Src1, IntPtrTy);
+ CGF.EmitStoreOfScalar(Exp, LV);
+
+ return CGF.Builder.CreateExtractValue(Call, 0);
+}
+
/// EmitFAbs - Emit a call to @llvm.fabs().
static Value *EmitFAbs(CodeGenFunction &CGF, Value *V) {
Function *F = CGF.CGM.getIntrinsic(Intrinsic::fabs, V->getType());
@@ -651,6 +738,8 @@ static Value *emitRangedBuiltin(CodeGenFunction &CGF,
Function *F = CGF.CGM.getIntrinsic(IntrinsicID, {});
llvm::Instruction *Call = CGF.Builder.CreateCall(F);
Call->setMetadata(llvm::LLVMContext::MD_range, RNode);
+ Call->setMetadata(llvm::LLVMContext::MD_noundef,
+ llvm::MDNode::get(CGF.getLLVMContext(), std::nullopt));
return Call;
}
@@ -666,7 +755,7 @@ getIntegerWidthAndSignedness(const clang::ASTContext &context,
const clang::QualType Type) {
assert(Type->isIntegerType() && "Given type is not an integer.");
unsigned Width = Type->isBooleanType() ? 1
- : Type->isExtIntType() ? context.getIntWidth(Type)
+ : Type->isBitIntType() ? context.getIntWidth(Type)
: context.getTypeInfo(Type).Width;
bool Signed = Type->isSignedIntegerType();
return {Width, Signed};
@@ -701,11 +790,6 @@ EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> Types) {
}
Value *CodeGenFunction::EmitVAStartEnd(Value *ArgValue, bool IsStart) {
- llvm::Type *DestType = Int8PtrTy;
- if (ArgValue->getType() != DestType)
- ArgValue =
- Builder.CreateBitCast(ArgValue, DestType, ArgValue->getName().data());
-
Intrinsic::ID inst = IsStart ? Intrinsic::vastart : Intrinsic::vaend;
return Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue);
}
@@ -735,6 +819,238 @@ CodeGenFunction::evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
return ConstantInt::get(ResType, ObjectSize, /*isSigned=*/true);
}
+const FieldDecl *CodeGenFunction::FindFlexibleArrayMemberField(
+ ASTContext &Ctx, const RecordDecl *RD, StringRef Name, uint64_t &Offset) {
+ const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
+ getLangOpts().getStrictFlexArraysLevel();
+ unsigned FieldNo = 0;
+ bool IsUnion = RD->isUnion();
+
+ for (const Decl *D : RD->decls()) {
+ if (const auto *Field = dyn_cast<FieldDecl>(D);
+ Field && (Name.empty() || Field->getNameAsString() == Name) &&
+ Decl::isFlexibleArrayMemberLike(
+ Ctx, Field, Field->getType(), StrictFlexArraysLevel,
+ /*IgnoreTemplateOrMacroSubstitution=*/true)) {
+ const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD);
+ Offset += Layout.getFieldOffset(FieldNo);
+ return Field;
+ }
+
+ if (const auto *Record = dyn_cast<RecordDecl>(D))
+ if (const FieldDecl *Field =
+ FindFlexibleArrayMemberField(Ctx, Record, Name, Offset)) {
+ const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD);
+ Offset += Layout.getFieldOffset(FieldNo);
+ return Field;
+ }
+
+ if (!IsUnion && isa<FieldDecl>(D))
+ ++FieldNo;
+ }
+
+ return nullptr;
+}
+
+static unsigned CountCountedByAttrs(const RecordDecl *RD) {
+ unsigned Num = 0;
+
+ for (const Decl *D : RD->decls()) {
+ if (const auto *FD = dyn_cast<FieldDecl>(D);
+ FD && FD->hasAttr<CountedByAttr>()) {
+ return ++Num;
+ }
+
+ if (const auto *Rec = dyn_cast<RecordDecl>(D))
+ Num += CountCountedByAttrs(Rec);
+ }
+
+ return Num;
+}
+
+llvm::Value *
+CodeGenFunction::emitFlexibleArrayMemberSize(const Expr *E, unsigned Type,
+ llvm::IntegerType *ResType) {
+ // The code generated here calculates the size of a struct with a flexible
+ // array member that uses the counted_by attribute. There are two instances
+ // we handle:
+ //
+ // struct s {
+ // unsigned long flags;
+ // int count;
+ // int array[] __attribute__((counted_by(count)));
+ // }
+ //
+ // 1) bdos of the flexible array itself:
+ //
+ // __builtin_dynamic_object_size(p->array, 1) ==
+ // p->count * sizeof(*p->array)
+ //
+ // 2) bdos of a pointer into the flexible array:
+ //
+ // __builtin_dynamic_object_size(&p->array[42], 1) ==
+ // (p->count - 42) * sizeof(*p->array)
+ //
+ // 2) bdos of the whole struct, including the flexible array:
+ //
+ // __builtin_dynamic_object_size(p, 1) ==
+ // max(sizeof(struct s),
+ // offsetof(struct s, array) + p->count * sizeof(*p->array))
+ //
+ ASTContext &Ctx = getContext();
+ const Expr *Base = E->IgnoreParenImpCasts();
+ const Expr *Idx = nullptr;
+
+ if (const auto *UO = dyn_cast<UnaryOperator>(Base);
+ UO && UO->getOpcode() == UO_AddrOf) {
+ Expr *SubExpr = UO->getSubExpr()->IgnoreParenImpCasts();
+ if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(SubExpr)) {
+ Base = ASE->getBase()->IgnoreParenImpCasts();
+ Idx = ASE->getIdx()->IgnoreParenImpCasts();
+
+ if (const auto *IL = dyn_cast<IntegerLiteral>(Idx)) {
+ int64_t Val = IL->getValue().getSExtValue();
+ if (Val < 0)
+ return getDefaultBuiltinObjectSizeResult(Type, ResType);
+
+ if (Val == 0)
+ // The index is 0, so we don't need to take it into account.
+ Idx = nullptr;
+ }
+ } else {
+ // Potential pointer to another element in the struct.
+ Base = SubExpr;
+ }
+ }
+
+ // Get the flexible array member Decl.
+ const RecordDecl *OuterRD = nullptr;
+ std::string FAMName;
+ if (const auto *ME = dyn_cast<MemberExpr>(Base)) {
+ // Check if \p Base is referencing the FAM itself.
+ const ValueDecl *VD = ME->getMemberDecl();
+ OuterRD = VD->getDeclContext()->getOuterLexicalRecordContext();
+ FAMName = VD->getNameAsString();
+ } else if (const auto *DRE = dyn_cast<DeclRefExpr>(Base)) {
+ // Check if we're pointing to the whole struct.
+ QualType Ty = DRE->getDecl()->getType();
+ if (Ty->isPointerType())
+ Ty = Ty->getPointeeType();
+ OuterRD = Ty->getAsRecordDecl();
+
+ // If we have a situation like this:
+ //
+ // struct union_of_fams {
+ // int flags;
+ // union {
+ // signed char normal_field;
+ // struct {
+ // int count1;
+ // int arr1[] __counted_by(count1);
+ // };
+ // struct {
+ // signed char count2;
+ // int arr2[] __counted_by(count2);
+ // };
+ // };
+ // };
+ //
+ // We don't konw which 'count' to use in this scenario:
+ //
+ // size_t get_size(struct union_of_fams *p) {
+ // return __builtin_dynamic_object_size(p, 1);
+ // }
+ //
+ // Instead of calculating a wrong number, we give up.
+ if (OuterRD && CountCountedByAttrs(OuterRD) > 1)
+ return nullptr;
+ }
+
+ if (!OuterRD)
+ return nullptr;
+
+ uint64_t Offset = 0;
+ const FieldDecl *FAMDecl =
+ FindFlexibleArrayMemberField(Ctx, OuterRD, FAMName, Offset);
+ Offset = Ctx.toCharUnitsFromBits(Offset).getQuantity();
+
+ if (!FAMDecl || !FAMDecl->hasAttr<CountedByAttr>())
+ // No flexible array member found or it doesn't have the "counted_by"
+ // attribute.
+ return nullptr;
+
+ const FieldDecl *CountedByFD = FindCountedByField(FAMDecl);
+ if (!CountedByFD)
+ // Can't find the field referenced by the "counted_by" attribute.
+ return nullptr;
+
+ // Build a load of the counted_by field.
+ bool IsSigned = CountedByFD->getType()->isSignedIntegerType();
+ Value *CountedByInst = EmitCountedByFieldExpr(Base, FAMDecl, CountedByFD);
+ if (!CountedByInst)
+ return getDefaultBuiltinObjectSizeResult(Type, ResType);
+
+ CountedByInst = Builder.CreateIntCast(CountedByInst, ResType, IsSigned);
+
+ // Build a load of the index and subtract it from the count.
+ Value *IdxInst = nullptr;
+ if (Idx) {
+ if (Idx->HasSideEffects(getContext()))
+ // We can't have side-effects.
+ return getDefaultBuiltinObjectSizeResult(Type, ResType);
+
+ bool IdxSigned = Idx->getType()->isSignedIntegerType();
+ IdxInst = EmitAnyExprToTemp(Idx).getScalarVal();
+ IdxInst = Builder.CreateIntCast(IdxInst, ResType, IdxSigned);
+
+ // We go ahead with the calculation here. If the index turns out to be
+ // negative, we'll catch it at the end.
+ CountedByInst =
+ Builder.CreateSub(CountedByInst, IdxInst, "", !IsSigned, IsSigned);
+ }
+
+ // Calculate how large the flexible array member is in bytes.
+ const ArrayType *ArrayTy = Ctx.getAsArrayType(FAMDecl->getType());
+ CharUnits Size = Ctx.getTypeSizeInChars(ArrayTy->getElementType());
+ llvm::Constant *ElemSize =
+ llvm::ConstantInt::get(ResType, Size.getQuantity(), IsSigned);
+ Value *FAMSize =
+ Builder.CreateMul(CountedByInst, ElemSize, "", !IsSigned, IsSigned);
+ FAMSize = Builder.CreateIntCast(FAMSize, ResType, IsSigned);
+ Value *Res = FAMSize;
+
+ if (isa<DeclRefExpr>(Base)) {
+ // The whole struct is specificed in the __bdos.
+ const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(OuterRD);
+
+ // Get the offset of the FAM.
+ llvm::Constant *FAMOffset = ConstantInt::get(ResType, Offset, IsSigned);
+ Value *OffsetAndFAMSize =
+ Builder.CreateAdd(FAMOffset, Res, "", !IsSigned, IsSigned);
+
+ // Get the full size of the struct.
+ llvm::Constant *SizeofStruct =
+ ConstantInt::get(ResType, Layout.getSize().getQuantity(), IsSigned);
+
+ // max(sizeof(struct s),
+ // offsetof(struct s, array) + p->count * sizeof(*p->array))
+ Res = IsSigned
+ ? Builder.CreateBinaryIntrinsic(llvm::Intrinsic::smax,
+ OffsetAndFAMSize, SizeofStruct)
+ : Builder.CreateBinaryIntrinsic(llvm::Intrinsic::umax,
+ OffsetAndFAMSize, SizeofStruct);
+ }
+
+ // A negative \p IdxInst or \p CountedByInst means that the index lands
+ // outside of the flexible array member. If that's the case, we want to
+ // return 0.
+ Value *Cmp = Builder.CreateIsNotNeg(CountedByInst);
+ if (IdxInst)
+ Cmp = Builder.CreateAnd(Builder.CreateIsNotNeg(IdxInst), Cmp);
+
+ return Builder.CreateSelect(Cmp, Res, ConstantInt::get(ResType, 0, IsSigned));
+}
+
/// Returns a Value corresponding to the size of the given expression.
/// This Value may be either of the following:
/// - A llvm::Argument (if E is a param with the pass_object_size attribute on
@@ -767,6 +1083,13 @@ CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type,
}
}
+ if (IsDynamic) {
+ // Emit special code for a flexible array member with the "counted_by"
+ // attribute.
+ if (Value *V = emitFlexibleArrayMemberSize(E, Type, ResType))
+ return V;
+ }
+
// LLVM can't handle Type=3 appropriately, and __builtin_object_size shouldn't
// evaluate E for side-effects. In either case, we shouldn't lower to
// @llvm.objectsize.
@@ -884,7 +1207,7 @@ static llvm::Value *EmitX86BitTestIntrinsic(CodeGenFunction &CGF,
// Build the constraints. FIXME: We should support immediates when possible.
std::string Constraints = "={@ccc},r,r,~{cc},~{memory}";
- std::string MachineClobbers = CGF.getTarget().getClobbers();
+ std::string_view MachineClobbers = CGF.getTarget().getClobbers();
if (!MachineClobbers.empty()) {
Constraints += ',';
Constraints += MachineClobbers;
@@ -892,9 +1215,8 @@ static llvm::Value *EmitX86BitTestIntrinsic(CodeGenFunction &CGF,
llvm::IntegerType *IntType = llvm::IntegerType::get(
CGF.getLLVMContext(),
CGF.getContext().getTypeSize(E->getArg(1)->getType()));
- llvm::Type *IntPtrType = IntType->getPointerTo();
llvm::FunctionType *FTy =
- llvm::FunctionType::get(CGF.Int8Ty, {IntPtrType, IntType}, false);
+ llvm::FunctionType::get(CGF.Int8Ty, {CGF.UnqualPtrTy, IntType}, false);
llvm::InlineAsm *IA =
llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
@@ -939,7 +1261,7 @@ static llvm::Value *EmitBitTestIntrinsic(CodeGenFunction &CGF,
Value *BitBaseI8 = CGF.Builder.CreatePointerCast(BitBase, CGF.Int8PtrTy);
Address ByteAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, BitBaseI8,
ByteIndex, "bittest.byteaddr"),
- CharUnits::One());
+ CGF.Int8Ty, CharUnits::One());
Value *PosLow =
CGF.Builder.CreateAnd(CGF.Builder.CreateTrunc(BitPos, CGF.Int8Ty),
llvm::ConstantInt::get(CGF.Int8Ty, 0x7));
@@ -963,8 +1285,7 @@ static llvm::Value *EmitBitTestIntrinsic(CodeGenFunction &CGF,
Mask = CGF.Builder.CreateNot(Mask);
RMWOp = llvm::AtomicRMWInst::And;
}
- OldByte = CGF.Builder.CreateAtomicRMW(RMWOp, ByteAddr.getPointer(), Mask,
- Ordering);
+ OldByte = CGF.Builder.CreateAtomicRMW(RMWOp, ByteAddr, Mask, Ordering);
} else {
// Emit a plain load for the non-interlocked intrinsics.
OldByte = CGF.Builder.CreateLoad(ByteAddr, "bittest.byte");
@@ -1027,19 +1348,21 @@ static llvm::Value *emitPPCLoadReserveIntrinsic(CodeGenFunction &CGF,
AsmOS << "$0, ${1:y}";
std::string Constraints = "=r,*Z,~{memory}";
- std::string MachineClobbers = CGF.getTarget().getClobbers();
+ std::string_view MachineClobbers = CGF.getTarget().getClobbers();
if (!MachineClobbers.empty()) {
Constraints += ',';
Constraints += MachineClobbers;
}
- llvm::Type *IntPtrType = RetType->getPointerTo();
- llvm::FunctionType *FTy =
- llvm::FunctionType::get(RetType, {IntPtrType}, false);
+ llvm::Type *PtrType = CGF.UnqualPtrTy;
+ llvm::FunctionType *FTy = llvm::FunctionType::get(RetType, {PtrType}, false);
llvm::InlineAsm *IA =
llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
- return CGF.Builder.CreateCall(IA, {Addr});
+ llvm::CallInst *CI = CGF.Builder.CreateCall(IA, {Addr});
+ CI->addParamAttr(
+ 0, Attribute::get(CGF.getLLVMContext(), Attribute::ElementType, RetType));
+ return CI;
}
namespace {
@@ -1137,312 +1460,312 @@ enum class CodeGenFunction::MSVCIntrin {
__fastfail,
};
-static Optional<CodeGenFunction::MSVCIntrin>
+static std::optional<CodeGenFunction::MSVCIntrin>
translateArmToMsvcIntrin(unsigned BuiltinID) {
using MSVCIntrin = CodeGenFunction::MSVCIntrin;
switch (BuiltinID) {
default:
- return None;
- case ARM::BI_BitScanForward:
- case ARM::BI_BitScanForward64:
+ return std::nullopt;
+ case clang::ARM::BI_BitScanForward:
+ case clang::ARM::BI_BitScanForward64:
return MSVCIntrin::_BitScanForward;
- case ARM::BI_BitScanReverse:
- case ARM::BI_BitScanReverse64:
+ case clang::ARM::BI_BitScanReverse:
+ case clang::ARM::BI_BitScanReverse64:
return MSVCIntrin::_BitScanReverse;
- case ARM::BI_InterlockedAnd64:
+ case clang::ARM::BI_InterlockedAnd64:
return MSVCIntrin::_InterlockedAnd;
- case ARM::BI_InterlockedExchange64:
+ case clang::ARM::BI_InterlockedExchange64:
return MSVCIntrin::_InterlockedExchange;
- case ARM::BI_InterlockedExchangeAdd64:
+ case clang::ARM::BI_InterlockedExchangeAdd64:
return MSVCIntrin::_InterlockedExchangeAdd;
- case ARM::BI_InterlockedExchangeSub64:
+ case clang::ARM::BI_InterlockedExchangeSub64:
return MSVCIntrin::_InterlockedExchangeSub;
- case ARM::BI_InterlockedOr64:
+ case clang::ARM::BI_InterlockedOr64:
return MSVCIntrin::_InterlockedOr;
- case ARM::BI_InterlockedXor64:
+ case clang::ARM::BI_InterlockedXor64:
return MSVCIntrin::_InterlockedXor;
- case ARM::BI_InterlockedDecrement64:
+ case clang::ARM::BI_InterlockedDecrement64:
return MSVCIntrin::_InterlockedDecrement;
- case ARM::BI_InterlockedIncrement64:
+ case clang::ARM::BI_InterlockedIncrement64:
return MSVCIntrin::_InterlockedIncrement;
- case ARM::BI_InterlockedExchangeAdd8_acq:
- case ARM::BI_InterlockedExchangeAdd16_acq:
- case ARM::BI_InterlockedExchangeAdd_acq:
- case ARM::BI_InterlockedExchangeAdd64_acq:
+ case clang::ARM::BI_InterlockedExchangeAdd8_acq:
+ case clang::ARM::BI_InterlockedExchangeAdd16_acq:
+ case clang::ARM::BI_InterlockedExchangeAdd_acq:
+ case clang::ARM::BI_InterlockedExchangeAdd64_acq:
return MSVCIntrin::_InterlockedExchangeAdd_acq;
- case ARM::BI_InterlockedExchangeAdd8_rel:
- case ARM::BI_InterlockedExchangeAdd16_rel:
- case ARM::BI_InterlockedExchangeAdd_rel:
- case ARM::BI_InterlockedExchangeAdd64_rel:
+ case clang::ARM::BI_InterlockedExchangeAdd8_rel:
+ case clang::ARM::BI_InterlockedExchangeAdd16_rel:
+ case clang::ARM::BI_InterlockedExchangeAdd_rel:
+ case clang::ARM::BI_InterlockedExchangeAdd64_rel:
return MSVCIntrin::_InterlockedExchangeAdd_rel;
- case ARM::BI_InterlockedExchangeAdd8_nf:
- case ARM::BI_InterlockedExchangeAdd16_nf:
- case ARM::BI_InterlockedExchangeAdd_nf:
- case ARM::BI_InterlockedExchangeAdd64_nf:
+ case clang::ARM::BI_InterlockedExchangeAdd8_nf:
+ case clang::ARM::BI_InterlockedExchangeAdd16_nf:
+ case clang::ARM::BI_InterlockedExchangeAdd_nf:
+ case clang::ARM::BI_InterlockedExchangeAdd64_nf:
return MSVCIntrin::_InterlockedExchangeAdd_nf;
- case ARM::BI_InterlockedExchange8_acq:
- case ARM::BI_InterlockedExchange16_acq:
- case ARM::BI_InterlockedExchange_acq:
- case ARM::BI_InterlockedExchange64_acq:
+ case clang::ARM::BI_InterlockedExchange8_acq:
+ case clang::ARM::BI_InterlockedExchange16_acq:
+ case clang::ARM::BI_InterlockedExchange_acq:
+ case clang::ARM::BI_InterlockedExchange64_acq:
return MSVCIntrin::_InterlockedExchange_acq;
- case ARM::BI_InterlockedExchange8_rel:
- case ARM::BI_InterlockedExchange16_rel:
- case ARM::BI_InterlockedExchange_rel:
- case ARM::BI_InterlockedExchange64_rel:
+ case clang::ARM::BI_InterlockedExchange8_rel:
+ case clang::ARM::BI_InterlockedExchange16_rel:
+ case clang::ARM::BI_InterlockedExchange_rel:
+ case clang::ARM::BI_InterlockedExchange64_rel:
return MSVCIntrin::_InterlockedExchange_rel;
- case ARM::BI_InterlockedExchange8_nf:
- case ARM::BI_InterlockedExchange16_nf:
- case ARM::BI_InterlockedExchange_nf:
- case ARM::BI_InterlockedExchange64_nf:
+ case clang::ARM::BI_InterlockedExchange8_nf:
+ case clang::ARM::BI_InterlockedExchange16_nf:
+ case clang::ARM::BI_InterlockedExchange_nf:
+ case clang::ARM::BI_InterlockedExchange64_nf:
return MSVCIntrin::_InterlockedExchange_nf;
- case ARM::BI_InterlockedCompareExchange8_acq:
- case ARM::BI_InterlockedCompareExchange16_acq:
- case ARM::BI_InterlockedCompareExchange_acq:
- case ARM::BI_InterlockedCompareExchange64_acq:
+ case clang::ARM::BI_InterlockedCompareExchange8_acq:
+ case clang::ARM::BI_InterlockedCompareExchange16_acq:
+ case clang::ARM::BI_InterlockedCompareExchange_acq:
+ case clang::ARM::BI_InterlockedCompareExchange64_acq:
return MSVCIntrin::_InterlockedCompareExchange_acq;
- case ARM::BI_InterlockedCompareExchange8_rel:
- case ARM::BI_InterlockedCompareExchange16_rel:
- case ARM::BI_InterlockedCompareExchange_rel:
- case ARM::BI_InterlockedCompareExchange64_rel:
+ case clang::ARM::BI_InterlockedCompareExchange8_rel:
+ case clang::ARM::BI_InterlockedCompareExchange16_rel:
+ case clang::ARM::BI_InterlockedCompareExchange_rel:
+ case clang::ARM::BI_InterlockedCompareExchange64_rel:
return MSVCIntrin::_InterlockedCompareExchange_rel;
- case ARM::BI_InterlockedCompareExchange8_nf:
- case ARM::BI_InterlockedCompareExchange16_nf:
- case ARM::BI_InterlockedCompareExchange_nf:
- case ARM::BI_InterlockedCompareExchange64_nf:
+ case clang::ARM::BI_InterlockedCompareExchange8_nf:
+ case clang::ARM::BI_InterlockedCompareExchange16_nf:
+ case clang::ARM::BI_InterlockedCompareExchange_nf:
+ case clang::ARM::BI_InterlockedCompareExchange64_nf:
return MSVCIntrin::_InterlockedCompareExchange_nf;
- case ARM::BI_InterlockedOr8_acq:
- case ARM::BI_InterlockedOr16_acq:
- case ARM::BI_InterlockedOr_acq:
- case ARM::BI_InterlockedOr64_acq:
+ case clang::ARM::BI_InterlockedOr8_acq:
+ case clang::ARM::BI_InterlockedOr16_acq:
+ case clang::ARM::BI_InterlockedOr_acq:
+ case clang::ARM::BI_InterlockedOr64_acq:
return MSVCIntrin::_InterlockedOr_acq;
- case ARM::BI_InterlockedOr8_rel:
- case ARM::BI_InterlockedOr16_rel:
- case ARM::BI_InterlockedOr_rel:
- case ARM::BI_InterlockedOr64_rel:
+ case clang::ARM::BI_InterlockedOr8_rel:
+ case clang::ARM::BI_InterlockedOr16_rel:
+ case clang::ARM::BI_InterlockedOr_rel:
+ case clang::ARM::BI_InterlockedOr64_rel:
return MSVCIntrin::_InterlockedOr_rel;
- case ARM::BI_InterlockedOr8_nf:
- case ARM::BI_InterlockedOr16_nf:
- case ARM::BI_InterlockedOr_nf:
- case ARM::BI_InterlockedOr64_nf:
+ case clang::ARM::BI_InterlockedOr8_nf:
+ case clang::ARM::BI_InterlockedOr16_nf:
+ case clang::ARM::BI_InterlockedOr_nf:
+ case clang::ARM::BI_InterlockedOr64_nf:
return MSVCIntrin::_InterlockedOr_nf;
- case ARM::BI_InterlockedXor8_acq:
- case ARM::BI_InterlockedXor16_acq:
- case ARM::BI_InterlockedXor_acq:
- case ARM::BI_InterlockedXor64_acq:
+ case clang::ARM::BI_InterlockedXor8_acq:
+ case clang::ARM::BI_InterlockedXor16_acq:
+ case clang::ARM::BI_InterlockedXor_acq:
+ case clang::ARM::BI_InterlockedXor64_acq:
return MSVCIntrin::_InterlockedXor_acq;
- case ARM::BI_InterlockedXor8_rel:
- case ARM::BI_InterlockedXor16_rel:
- case ARM::BI_InterlockedXor_rel:
- case ARM::BI_InterlockedXor64_rel:
+ case clang::ARM::BI_InterlockedXor8_rel:
+ case clang::ARM::BI_InterlockedXor16_rel:
+ case clang::ARM::BI_InterlockedXor_rel:
+ case clang::ARM::BI_InterlockedXor64_rel:
return MSVCIntrin::_InterlockedXor_rel;
- case ARM::BI_InterlockedXor8_nf:
- case ARM::BI_InterlockedXor16_nf:
- case ARM::BI_InterlockedXor_nf:
- case ARM::BI_InterlockedXor64_nf:
+ case clang::ARM::BI_InterlockedXor8_nf:
+ case clang::ARM::BI_InterlockedXor16_nf:
+ case clang::ARM::BI_InterlockedXor_nf:
+ case clang::ARM::BI_InterlockedXor64_nf:
return MSVCIntrin::_InterlockedXor_nf;
- case ARM::BI_InterlockedAnd8_acq:
- case ARM::BI_InterlockedAnd16_acq:
- case ARM::BI_InterlockedAnd_acq:
- case ARM::BI_InterlockedAnd64_acq:
+ case clang::ARM::BI_InterlockedAnd8_acq:
+ case clang::ARM::BI_InterlockedAnd16_acq:
+ case clang::ARM::BI_InterlockedAnd_acq:
+ case clang::ARM::BI_InterlockedAnd64_acq:
return MSVCIntrin::_InterlockedAnd_acq;
- case ARM::BI_InterlockedAnd8_rel:
- case ARM::BI_InterlockedAnd16_rel:
- case ARM::BI_InterlockedAnd_rel:
- case ARM::BI_InterlockedAnd64_rel:
+ case clang::ARM::BI_InterlockedAnd8_rel:
+ case clang::ARM::BI_InterlockedAnd16_rel:
+ case clang::ARM::BI_InterlockedAnd_rel:
+ case clang::ARM::BI_InterlockedAnd64_rel:
return MSVCIntrin::_InterlockedAnd_rel;
- case ARM::BI_InterlockedAnd8_nf:
- case ARM::BI_InterlockedAnd16_nf:
- case ARM::BI_InterlockedAnd_nf:
- case ARM::BI_InterlockedAnd64_nf:
+ case clang::ARM::BI_InterlockedAnd8_nf:
+ case clang::ARM::BI_InterlockedAnd16_nf:
+ case clang::ARM::BI_InterlockedAnd_nf:
+ case clang::ARM::BI_InterlockedAnd64_nf:
return MSVCIntrin::_InterlockedAnd_nf;
- case ARM::BI_InterlockedIncrement16_acq:
- case ARM::BI_InterlockedIncrement_acq:
- case ARM::BI_InterlockedIncrement64_acq:
+ case clang::ARM::BI_InterlockedIncrement16_acq:
+ case clang::ARM::BI_InterlockedIncrement_acq:
+ case clang::ARM::BI_InterlockedIncrement64_acq:
return MSVCIntrin::_InterlockedIncrement_acq;
- case ARM::BI_InterlockedIncrement16_rel:
- case ARM::BI_InterlockedIncrement_rel:
- case ARM::BI_InterlockedIncrement64_rel:
+ case clang::ARM::BI_InterlockedIncrement16_rel:
+ case clang::ARM::BI_InterlockedIncrement_rel:
+ case clang::ARM::BI_InterlockedIncrement64_rel:
return MSVCIntrin::_InterlockedIncrement_rel;
- case ARM::BI_InterlockedIncrement16_nf:
- case ARM::BI_InterlockedIncrement_nf:
- case ARM::BI_InterlockedIncrement64_nf:
+ case clang::ARM::BI_InterlockedIncrement16_nf:
+ case clang::ARM::BI_InterlockedIncrement_nf:
+ case clang::ARM::BI_InterlockedIncrement64_nf:
return MSVCIntrin::_InterlockedIncrement_nf;
- case ARM::BI_InterlockedDecrement16_acq:
- case ARM::BI_InterlockedDecrement_acq:
- case ARM::BI_InterlockedDecrement64_acq:
+ case clang::ARM::BI_InterlockedDecrement16_acq:
+ case clang::ARM::BI_InterlockedDecrement_acq:
+ case clang::ARM::BI_InterlockedDecrement64_acq:
return MSVCIntrin::_InterlockedDecrement_acq;
- case ARM::BI_InterlockedDecrement16_rel:
- case ARM::BI_InterlockedDecrement_rel:
- case ARM::BI_InterlockedDecrement64_rel:
+ case clang::ARM::BI_InterlockedDecrement16_rel:
+ case clang::ARM::BI_InterlockedDecrement_rel:
+ case clang::ARM::BI_InterlockedDecrement64_rel:
return MSVCIntrin::_InterlockedDecrement_rel;
- case ARM::BI_InterlockedDecrement16_nf:
- case ARM::BI_InterlockedDecrement_nf:
- case ARM::BI_InterlockedDecrement64_nf:
+ case clang::ARM::BI_InterlockedDecrement16_nf:
+ case clang::ARM::BI_InterlockedDecrement_nf:
+ case clang::ARM::BI_InterlockedDecrement64_nf:
return MSVCIntrin::_InterlockedDecrement_nf;
}
llvm_unreachable("must return from switch");
}
-static Optional<CodeGenFunction::MSVCIntrin>
+static std::optional<CodeGenFunction::MSVCIntrin>
translateAarch64ToMsvcIntrin(unsigned BuiltinID) {
using MSVCIntrin = CodeGenFunction::MSVCIntrin;
switch (BuiltinID) {
default:
- return None;
- case AArch64::BI_BitScanForward:
- case AArch64::BI_BitScanForward64:
+ return std::nullopt;
+ case clang::AArch64::BI_BitScanForward:
+ case clang::AArch64::BI_BitScanForward64:
return MSVCIntrin::_BitScanForward;
- case AArch64::BI_BitScanReverse:
- case AArch64::BI_BitScanReverse64:
+ case clang::AArch64::BI_BitScanReverse:
+ case clang::AArch64::BI_BitScanReverse64:
return MSVCIntrin::_BitScanReverse;
- case AArch64::BI_InterlockedAnd64:
+ case clang::AArch64::BI_InterlockedAnd64:
return MSVCIntrin::_InterlockedAnd;
- case AArch64::BI_InterlockedExchange64:
+ case clang::AArch64::BI_InterlockedExchange64:
return MSVCIntrin::_InterlockedExchange;
- case AArch64::BI_InterlockedExchangeAdd64:
+ case clang::AArch64::BI_InterlockedExchangeAdd64:
return MSVCIntrin::_InterlockedExchangeAdd;
- case AArch64::BI_InterlockedExchangeSub64:
+ case clang::AArch64::BI_InterlockedExchangeSub64:
return MSVCIntrin::_InterlockedExchangeSub;
- case AArch64::BI_InterlockedOr64:
+ case clang::AArch64::BI_InterlockedOr64:
return MSVCIntrin::_InterlockedOr;
- case AArch64::BI_InterlockedXor64:
+ case clang::AArch64::BI_InterlockedXor64:
return MSVCIntrin::_InterlockedXor;
- case AArch64::BI_InterlockedDecrement64:
+ case clang::AArch64::BI_InterlockedDecrement64:
return MSVCIntrin::_InterlockedDecrement;
- case AArch64::BI_InterlockedIncrement64:
+ case clang::AArch64::BI_InterlockedIncrement64:
return MSVCIntrin::_InterlockedIncrement;
- case AArch64::BI_InterlockedExchangeAdd8_acq:
- case AArch64::BI_InterlockedExchangeAdd16_acq:
- case AArch64::BI_InterlockedExchangeAdd_acq:
- case AArch64::BI_InterlockedExchangeAdd64_acq:
+ case clang::AArch64::BI_InterlockedExchangeAdd8_acq:
+ case clang::AArch64::BI_InterlockedExchangeAdd16_acq:
+ case clang::AArch64::BI_InterlockedExchangeAdd_acq:
+ case clang::AArch64::BI_InterlockedExchangeAdd64_acq:
return MSVCIntrin::_InterlockedExchangeAdd_acq;
- case AArch64::BI_InterlockedExchangeAdd8_rel:
- case AArch64::BI_InterlockedExchangeAdd16_rel:
- case AArch64::BI_InterlockedExchangeAdd_rel:
- case AArch64::BI_InterlockedExchangeAdd64_rel:
+ case clang::AArch64::BI_InterlockedExchangeAdd8_rel:
+ case clang::AArch64::BI_InterlockedExchangeAdd16_rel:
+ case clang::AArch64::BI_InterlockedExchangeAdd_rel:
+ case clang::AArch64::BI_InterlockedExchangeAdd64_rel:
return MSVCIntrin::_InterlockedExchangeAdd_rel;
- case AArch64::BI_InterlockedExchangeAdd8_nf:
- case AArch64::BI_InterlockedExchangeAdd16_nf:
- case AArch64::BI_InterlockedExchangeAdd_nf:
- case AArch64::BI_InterlockedExchangeAdd64_nf:
+ case clang::AArch64::BI_InterlockedExchangeAdd8_nf:
+ case clang::AArch64::BI_InterlockedExchangeAdd16_nf:
+ case clang::AArch64::BI_InterlockedExchangeAdd_nf:
+ case clang::AArch64::BI_InterlockedExchangeAdd64_nf:
return MSVCIntrin::_InterlockedExchangeAdd_nf;
- case AArch64::BI_InterlockedExchange8_acq:
- case AArch64::BI_InterlockedExchange16_acq:
- case AArch64::BI_InterlockedExchange_acq:
- case AArch64::BI_InterlockedExchange64_acq:
+ case clang::AArch64::BI_InterlockedExchange8_acq:
+ case clang::AArch64::BI_InterlockedExchange16_acq:
+ case clang::AArch64::BI_InterlockedExchange_acq:
+ case clang::AArch64::BI_InterlockedExchange64_acq:
return MSVCIntrin::_InterlockedExchange_acq;
- case AArch64::BI_InterlockedExchange8_rel:
- case AArch64::BI_InterlockedExchange16_rel:
- case AArch64::BI_InterlockedExchange_rel:
- case AArch64::BI_InterlockedExchange64_rel:
+ case clang::AArch64::BI_InterlockedExchange8_rel:
+ case clang::AArch64::BI_InterlockedExchange16_rel:
+ case clang::AArch64::BI_InterlockedExchange_rel:
+ case clang::AArch64::BI_InterlockedExchange64_rel:
return MSVCIntrin::_InterlockedExchange_rel;
- case AArch64::BI_InterlockedExchange8_nf:
- case AArch64::BI_InterlockedExchange16_nf:
- case AArch64::BI_InterlockedExchange_nf:
- case AArch64::BI_InterlockedExchange64_nf:
+ case clang::AArch64::BI_InterlockedExchange8_nf:
+ case clang::AArch64::BI_InterlockedExchange16_nf:
+ case clang::AArch64::BI_InterlockedExchange_nf:
+ case clang::AArch64::BI_InterlockedExchange64_nf:
return MSVCIntrin::_InterlockedExchange_nf;
- case AArch64::BI_InterlockedCompareExchange8_acq:
- case AArch64::BI_InterlockedCompareExchange16_acq:
- case AArch64::BI_InterlockedCompareExchange_acq:
- case AArch64::BI_InterlockedCompareExchange64_acq:
+ case clang::AArch64::BI_InterlockedCompareExchange8_acq:
+ case clang::AArch64::BI_InterlockedCompareExchange16_acq:
+ case clang::AArch64::BI_InterlockedCompareExchange_acq:
+ case clang::AArch64::BI_InterlockedCompareExchange64_acq:
return MSVCIntrin::_InterlockedCompareExchange_acq;
- case AArch64::BI_InterlockedCompareExchange8_rel:
- case AArch64::BI_InterlockedCompareExchange16_rel:
- case AArch64::BI_InterlockedCompareExchange_rel:
- case AArch64::BI_InterlockedCompareExchange64_rel:
+ case clang::AArch64::BI_InterlockedCompareExchange8_rel:
+ case clang::AArch64::BI_InterlockedCompareExchange16_rel:
+ case clang::AArch64::BI_InterlockedCompareExchange_rel:
+ case clang::AArch64::BI_InterlockedCompareExchange64_rel:
return MSVCIntrin::_InterlockedCompareExchange_rel;
- case AArch64::BI_InterlockedCompareExchange8_nf:
- case AArch64::BI_InterlockedCompareExchange16_nf:
- case AArch64::BI_InterlockedCompareExchange_nf:
- case AArch64::BI_InterlockedCompareExchange64_nf:
+ case clang::AArch64::BI_InterlockedCompareExchange8_nf:
+ case clang::AArch64::BI_InterlockedCompareExchange16_nf:
+ case clang::AArch64::BI_InterlockedCompareExchange_nf:
+ case clang::AArch64::BI_InterlockedCompareExchange64_nf:
return MSVCIntrin::_InterlockedCompareExchange_nf;
- case AArch64::BI_InterlockedCompareExchange128:
+ case clang::AArch64::BI_InterlockedCompareExchange128:
return MSVCIntrin::_InterlockedCompareExchange128;
- case AArch64::BI_InterlockedCompareExchange128_acq:
+ case clang::AArch64::BI_InterlockedCompareExchange128_acq:
return MSVCIntrin::_InterlockedCompareExchange128_acq;
- case AArch64::BI_InterlockedCompareExchange128_nf:
+ case clang::AArch64::BI_InterlockedCompareExchange128_nf:
return MSVCIntrin::_InterlockedCompareExchange128_nf;
- case AArch64::BI_InterlockedCompareExchange128_rel:
+ case clang::AArch64::BI_InterlockedCompareExchange128_rel:
return MSVCIntrin::_InterlockedCompareExchange128_rel;
- case AArch64::BI_InterlockedOr8_acq:
- case AArch64::BI_InterlockedOr16_acq:
- case AArch64::BI_InterlockedOr_acq:
- case AArch64::BI_InterlockedOr64_acq:
+ case clang::AArch64::BI_InterlockedOr8_acq:
+ case clang::AArch64::BI_InterlockedOr16_acq:
+ case clang::AArch64::BI_InterlockedOr_acq:
+ case clang::AArch64::BI_InterlockedOr64_acq:
return MSVCIntrin::_InterlockedOr_acq;
- case AArch64::BI_InterlockedOr8_rel:
- case AArch64::BI_InterlockedOr16_rel:
- case AArch64::BI_InterlockedOr_rel:
- case AArch64::BI_InterlockedOr64_rel:
+ case clang::AArch64::BI_InterlockedOr8_rel:
+ case clang::AArch64::BI_InterlockedOr16_rel:
+ case clang::AArch64::BI_InterlockedOr_rel:
+ case clang::AArch64::BI_InterlockedOr64_rel:
return MSVCIntrin::_InterlockedOr_rel;
- case AArch64::BI_InterlockedOr8_nf:
- case AArch64::BI_InterlockedOr16_nf:
- case AArch64::BI_InterlockedOr_nf:
- case AArch64::BI_InterlockedOr64_nf:
+ case clang::AArch64::BI_InterlockedOr8_nf:
+ case clang::AArch64::BI_InterlockedOr16_nf:
+ case clang::AArch64::BI_InterlockedOr_nf:
+ case clang::AArch64::BI_InterlockedOr64_nf:
return MSVCIntrin::_InterlockedOr_nf;
- case AArch64::BI_InterlockedXor8_acq:
- case AArch64::BI_InterlockedXor16_acq:
- case AArch64::BI_InterlockedXor_acq:
- case AArch64::BI_InterlockedXor64_acq:
+ case clang::AArch64::BI_InterlockedXor8_acq:
+ case clang::AArch64::BI_InterlockedXor16_acq:
+ case clang::AArch64::BI_InterlockedXor_acq:
+ case clang::AArch64::BI_InterlockedXor64_acq:
return MSVCIntrin::_InterlockedXor_acq;
- case AArch64::BI_InterlockedXor8_rel:
- case AArch64::BI_InterlockedXor16_rel:
- case AArch64::BI_InterlockedXor_rel:
- case AArch64::BI_InterlockedXor64_rel:
+ case clang::AArch64::BI_InterlockedXor8_rel:
+ case clang::AArch64::BI_InterlockedXor16_rel:
+ case clang::AArch64::BI_InterlockedXor_rel:
+ case clang::AArch64::BI_InterlockedXor64_rel:
return MSVCIntrin::_InterlockedXor_rel;
- case AArch64::BI_InterlockedXor8_nf:
- case AArch64::BI_InterlockedXor16_nf:
- case AArch64::BI_InterlockedXor_nf:
- case AArch64::BI_InterlockedXor64_nf:
+ case clang::AArch64::BI_InterlockedXor8_nf:
+ case clang::AArch64::BI_InterlockedXor16_nf:
+ case clang::AArch64::BI_InterlockedXor_nf:
+ case clang::AArch64::BI_InterlockedXor64_nf:
return MSVCIntrin::_InterlockedXor_nf;
- case AArch64::BI_InterlockedAnd8_acq:
- case AArch64::BI_InterlockedAnd16_acq:
- case AArch64::BI_InterlockedAnd_acq:
- case AArch64::BI_InterlockedAnd64_acq:
+ case clang::AArch64::BI_InterlockedAnd8_acq:
+ case clang::AArch64::BI_InterlockedAnd16_acq:
+ case clang::AArch64::BI_InterlockedAnd_acq:
+ case clang::AArch64::BI_InterlockedAnd64_acq:
return MSVCIntrin::_InterlockedAnd_acq;
- case AArch64::BI_InterlockedAnd8_rel:
- case AArch64::BI_InterlockedAnd16_rel:
- case AArch64::BI_InterlockedAnd_rel:
- case AArch64::BI_InterlockedAnd64_rel:
+ case clang::AArch64::BI_InterlockedAnd8_rel:
+ case clang::AArch64::BI_InterlockedAnd16_rel:
+ case clang::AArch64::BI_InterlockedAnd_rel:
+ case clang::AArch64::BI_InterlockedAnd64_rel:
return MSVCIntrin::_InterlockedAnd_rel;
- case AArch64::BI_InterlockedAnd8_nf:
- case AArch64::BI_InterlockedAnd16_nf:
- case AArch64::BI_InterlockedAnd_nf:
- case AArch64::BI_InterlockedAnd64_nf:
+ case clang::AArch64::BI_InterlockedAnd8_nf:
+ case clang::AArch64::BI_InterlockedAnd16_nf:
+ case clang::AArch64::BI_InterlockedAnd_nf:
+ case clang::AArch64::BI_InterlockedAnd64_nf:
return MSVCIntrin::_InterlockedAnd_nf;
- case AArch64::BI_InterlockedIncrement16_acq:
- case AArch64::BI_InterlockedIncrement_acq:
- case AArch64::BI_InterlockedIncrement64_acq:
+ case clang::AArch64::BI_InterlockedIncrement16_acq:
+ case clang::AArch64::BI_InterlockedIncrement_acq:
+ case clang::AArch64::BI_InterlockedIncrement64_acq:
return MSVCIntrin::_InterlockedIncrement_acq;
- case AArch64::BI_InterlockedIncrement16_rel:
- case AArch64::BI_InterlockedIncrement_rel:
- case AArch64::BI_InterlockedIncrement64_rel:
+ case clang::AArch64::BI_InterlockedIncrement16_rel:
+ case clang::AArch64::BI_InterlockedIncrement_rel:
+ case clang::AArch64::BI_InterlockedIncrement64_rel:
return MSVCIntrin::_InterlockedIncrement_rel;
- case AArch64::BI_InterlockedIncrement16_nf:
- case AArch64::BI_InterlockedIncrement_nf:
- case AArch64::BI_InterlockedIncrement64_nf:
+ case clang::AArch64::BI_InterlockedIncrement16_nf:
+ case clang::AArch64::BI_InterlockedIncrement_nf:
+ case clang::AArch64::BI_InterlockedIncrement64_nf:
return MSVCIntrin::_InterlockedIncrement_nf;
- case AArch64::BI_InterlockedDecrement16_acq:
- case AArch64::BI_InterlockedDecrement_acq:
- case AArch64::BI_InterlockedDecrement64_acq:
+ case clang::AArch64::BI_InterlockedDecrement16_acq:
+ case clang::AArch64::BI_InterlockedDecrement_acq:
+ case clang::AArch64::BI_InterlockedDecrement64_acq:
return MSVCIntrin::_InterlockedDecrement_acq;
- case AArch64::BI_InterlockedDecrement16_rel:
- case AArch64::BI_InterlockedDecrement_rel:
- case AArch64::BI_InterlockedDecrement64_rel:
+ case clang::AArch64::BI_InterlockedDecrement16_rel:
+ case clang::AArch64::BI_InterlockedDecrement_rel:
+ case clang::AArch64::BI_InterlockedDecrement64_rel:
return MSVCIntrin::_InterlockedDecrement_rel;
- case AArch64::BI_InterlockedDecrement16_nf:
- case AArch64::BI_InterlockedDecrement_nf:
- case AArch64::BI_InterlockedDecrement64_nf:
+ case clang::AArch64::BI_InterlockedDecrement16_nf:
+ case clang::AArch64::BI_InterlockedDecrement_nf:
+ case clang::AArch64::BI_InterlockedDecrement64_nf:
return MSVCIntrin::_InterlockedDecrement_nf;
}
llvm_unreachable("must return from switch");
}
-static Optional<CodeGenFunction::MSVCIntrin>
+static std::optional<CodeGenFunction::MSVCIntrin>
translateX86ToMsvcIntrin(unsigned BuiltinID) {
using MSVCIntrin = CodeGenFunction::MSVCIntrin;
switch (BuiltinID) {
default:
- return None;
+ return std::nullopt;
case clang::X86::BI_BitScanForward:
case clang::X86::BI_BitScanForward64:
return MSVCIntrin::_BitScanForward;
@@ -1481,8 +1804,7 @@ Value *CodeGenFunction::EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID,
Value *ArgValue = EmitScalarExpr(E->getArg(1));
llvm::Type *ArgType = ArgValue->getType();
- llvm::Type *IndexType =
- IndexAddress.getPointer()->getType()->getPointerElementType();
+ llvm::Type *IndexType = IndexAddress.getElementType();
llvm::Type *ResultType = ConvertType(E->getType());
Value *ArgZero = llvm::Constant::getNullValue(ArgType);
@@ -1668,7 +1990,7 @@ Value *CodeGenFunction::EmitCheckedArgForBuiltin(const Expr *E,
&& "Unsupported builtin check kind");
Value *ArgValue = EmitScalarExpr(E);
- if (!SanOpts.has(SanitizerKind::Builtin) || !getTarget().isCLZForZeroUndef())
+ if (!SanOpts.has(SanitizerKind::Builtin))
return ArgValue;
SanitizerScope SanScope(this);
@@ -1678,10 +2000,49 @@ Value *CodeGenFunction::EmitCheckedArgForBuiltin(const Expr *E,
SanitizerHandler::InvalidBuiltin,
{EmitCheckSourceLocation(E->getExprLoc()),
llvm::ConstantInt::get(Builder.getInt8Ty(), Kind)},
- None);
+ std::nullopt);
return ArgValue;
}
+static Value *EmitAbs(CodeGenFunction &CGF, Value *ArgValue, bool HasNSW) {
+ return CGF.Builder.CreateBinaryIntrinsic(
+ Intrinsic::abs, ArgValue,
+ ConstantInt::get(CGF.Builder.getInt1Ty(), HasNSW));
+}
+
+static Value *EmitOverflowCheckedAbs(CodeGenFunction &CGF, const CallExpr *E,
+ bool SanitizeOverflow) {
+ Value *ArgValue = CGF.EmitScalarExpr(E->getArg(0));
+
+ // Try to eliminate overflow check.
+ if (const auto *VCI = dyn_cast<llvm::ConstantInt>(ArgValue)) {
+ if (!VCI->isMinSignedValue())
+ return EmitAbs(CGF, ArgValue, true);
+ }
+
+ CodeGenFunction::SanitizerScope SanScope(&CGF);
+
+ Constant *Zero = Constant::getNullValue(ArgValue->getType());
+ Value *ResultAndOverflow = CGF.Builder.CreateBinaryIntrinsic(
+ Intrinsic::ssub_with_overflow, Zero, ArgValue);
+ Value *Result = CGF.Builder.CreateExtractValue(ResultAndOverflow, 0);
+ Value *NotOverflow = CGF.Builder.CreateNot(
+ CGF.Builder.CreateExtractValue(ResultAndOverflow, 1));
+
+ // TODO: support -ftrapv-handler.
+ if (SanitizeOverflow) {
+ CGF.EmitCheck({{NotOverflow, SanitizerKind::SignedIntegerOverflow}},
+ SanitizerHandler::NegateOverflow,
+ {CGF.EmitCheckSourceLocation(E->getArg(0)->getExprLoc()),
+ CGF.EmitCheckTypeDescriptor(E->getType())},
+ {ArgValue});
+ } else
+ CGF.EmitTrapCheck(NotOverflow, SanitizerHandler::SubOverflow);
+
+ Value *CmpResult = CGF.Builder.CreateICmpSLT(ArgValue, Zero, "abscond");
+ return CGF.Builder.CreateSelect(CmpResult, Result, ArgValue, "abs");
+}
+
/// Get the argument type for arguments to os_log_helper.
static CanQualType getOSLogArgType(ASTContext &C, int Size) {
QualType UnsignedTy = C.getIntTypeForBitwidth(Size * 8, /*Signed=*/false);
@@ -1712,7 +2073,7 @@ llvm::Function *CodeGenFunction::generateBuiltinOSLogHelperFunction(
FunctionArgList Args;
Args.push_back(ImplicitParamDecl::Create(
Ctx, nullptr, SourceLocation(), &Ctx.Idents.get("buffer"), Ctx.VoidPtrTy,
- ImplicitParamDecl::Other));
+ ImplicitParamKind::Other));
ArgTys.emplace_back(Ctx.VoidPtrTy);
for (unsigned int I = 0, E = Layout.Items.size(); I < E; ++I) {
@@ -1724,7 +2085,7 @@ llvm::Function *CodeGenFunction::generateBuiltinOSLogHelperFunction(
Args.push_back(ImplicitParamDecl::Create(
Ctx, nullptr, SourceLocation(),
&Ctx.Idents.get(std::string("arg") + llvm::to_string(I)), ArgTy,
- ImplicitParamDecl::Other));
+ ImplicitParamKind::Other));
ArgTys.emplace_back(ArgTy);
}
@@ -1754,8 +2115,9 @@ llvm::Function *CodeGenFunction::generateBuiltinOSLogHelperFunction(
auto AL = ApplyDebugLocation::CreateArtificial(*this);
CharUnits Offset;
- Address BufAddr(Builder.CreateLoad(GetAddrOfLocalVar(Args[0]), "buf"),
- BufferAlignment);
+ Address BufAddr =
+ Address(Builder.CreateLoad(GetAddrOfLocalVar(Args[0]), "buf"), Int8Ty,
+ BufferAlignment);
Builder.CreateStore(Builder.getInt8(Layout.getSummaryByte()),
Builder.CreateConstByteGEP(BufAddr, Offset++, "summary"));
Builder.CreateStore(Builder.getInt8(Layout.getNumArgsByte()),
@@ -1776,8 +2138,7 @@ llvm::Function *CodeGenFunction::generateBuiltinOSLogHelperFunction(
Address Arg = GetAddrOfLocalVar(Args[I]);
Address Addr = Builder.CreateConstByteGEP(BufAddr, Offset, "argData");
- Addr = Builder.CreateBitCast(Addr, Arg.getPointer()->getType(),
- "argDataCast");
+ Addr = Addr.withElementType(Arg.getElementType());
Builder.CreateStore(Builder.CreateLoad(Arg), Addr);
Offset += Size;
++I;
@@ -1976,7 +2337,7 @@ EmitCheckedMixedSignMultiply(CodeGenFunction &CGF, const clang::Expr *Op1,
// Signed overflow occurs if the result is greater than INT_MAX or lesser
// than INT_MIN, i.e when |Result| > (INT_MAX + IsNegative).
auto IntMax =
- llvm::APInt::getSignedMaxValue(ResultInfo.Width).zextOrSelf(OpWidth);
+ llvm::APInt::getSignedMaxValue(ResultInfo.Width).zext(OpWidth);
llvm::Value *MaxResult =
CGF.Builder.CreateAdd(llvm::ConstantInt::get(OpTy, IntMax),
CGF.Builder.CreateZExt(IsNegative, OpTy));
@@ -2017,89 +2378,6 @@ EmitCheckedMixedSignMultiply(CodeGenFunction &CGF, const clang::Expr *Op1,
return RValue::get(Overflow);
}
-static llvm::Value *dumpRecord(CodeGenFunction &CGF, QualType RType,
- Value *&RecordPtr, CharUnits Align,
- llvm::FunctionCallee Func, int Lvl) {
- ASTContext &Context = CGF.getContext();
- RecordDecl *RD = RType->castAs<RecordType>()->getDecl()->getDefinition();
- std::string Pad = std::string(Lvl * 4, ' ');
-
- Value *GString =
- CGF.Builder.CreateGlobalStringPtr(RType.getAsString() + " {\n");
- Value *Res = CGF.Builder.CreateCall(Func, {GString});
-
- static llvm::DenseMap<QualType, const char *> Types;
- if (Types.empty()) {
- Types[Context.CharTy] = "%c";
- Types[Context.BoolTy] = "%d";
- Types[Context.SignedCharTy] = "%hhd";
- Types[Context.UnsignedCharTy] = "%hhu";
- Types[Context.IntTy] = "%d";
- Types[Context.UnsignedIntTy] = "%u";
- Types[Context.LongTy] = "%ld";
- Types[Context.UnsignedLongTy] = "%lu";
- Types[Context.LongLongTy] = "%lld";
- Types[Context.UnsignedLongLongTy] = "%llu";
- Types[Context.ShortTy] = "%hd";
- Types[Context.UnsignedShortTy] = "%hu";
- Types[Context.VoidPtrTy] = "%p";
- Types[Context.FloatTy] = "%f";
- Types[Context.DoubleTy] = "%f";
- Types[Context.LongDoubleTy] = "%Lf";
- Types[Context.getPointerType(Context.CharTy)] = "%s";
- Types[Context.getPointerType(Context.getConstType(Context.CharTy))] = "%s";
- }
-
- for (const auto *FD : RD->fields()) {
- Value *FieldPtr = RecordPtr;
- if (RD->isUnion())
- FieldPtr = CGF.Builder.CreatePointerCast(
- FieldPtr, CGF.ConvertType(Context.getPointerType(FD->getType())));
- else
- FieldPtr = CGF.Builder.CreateStructGEP(CGF.ConvertType(RType), FieldPtr,
- FD->getFieldIndex());
-
- GString = CGF.Builder.CreateGlobalStringPtr(
- llvm::Twine(Pad)
- .concat(FD->getType().getAsString())
- .concat(llvm::Twine(' '))
- .concat(FD->getNameAsString())
- .concat(" : ")
- .str());
- Value *TmpRes = CGF.Builder.CreateCall(Func, {GString});
- Res = CGF.Builder.CreateAdd(Res, TmpRes);
-
- QualType CanonicalType =
- FD->getType().getUnqualifiedType().getCanonicalType();
-
- // We check whether we are in a recursive type
- if (CanonicalType->isRecordType()) {
- TmpRes = dumpRecord(CGF, CanonicalType, FieldPtr, Align, Func, Lvl + 1);
- Res = CGF.Builder.CreateAdd(TmpRes, Res);
- continue;
- }
-
- // We try to determine the best format to print the current field
- llvm::Twine Format = Types.find(CanonicalType) == Types.end()
- ? Types[Context.VoidPtrTy]
- : Types[CanonicalType];
-
- Address FieldAddress = Address(FieldPtr, Align);
- FieldPtr = CGF.Builder.CreateLoad(FieldAddress);
-
- // FIXME Need to handle bitfield here
- GString = CGF.Builder.CreateGlobalStringPtr(
- Format.concat(llvm::Twine('\n')).str());
- TmpRes = CGF.Builder.CreateCall(Func, {GString, FieldPtr});
- Res = CGF.Builder.CreateAdd(Res, TmpRes);
- }
-
- GString = CGF.Builder.CreateGlobalStringPtr(Pad + "}\n");
- Value *TmpRes = CGF.Builder.CreateCall(Func, {GString});
- Res = CGF.Builder.CreateAdd(Res, TmpRes);
- return Res;
-}
-
static bool
TypeRequiresBuiltinLaunderImp(const ASTContext &Ctx, QualType Ty,
llvm::SmallPtrSetImpl<const Decl *> &Seen) {
@@ -2223,13 +2501,38 @@ static unsigned mutateLongDoubleBuiltin(unsigned BuiltinID) {
}
}
+static Value *tryUseTestFPKind(CodeGenFunction &CGF, unsigned BuiltinID,
+ Value *V) {
+ if (CGF.Builder.getIsFPConstrained() &&
+ CGF.Builder.getDefaultConstrainedExcept() != fp::ebIgnore) {
+ if (Value *Result =
+ CGF.getTargetHooks().testFPKind(V, BuiltinID, CGF.Builder, CGF.CGM))
+ return Result;
+ }
+ return nullptr;
+}
+
+static RValue EmitHipStdParUnsupportedBuiltin(CodeGenFunction *CGF,
+ const FunctionDecl *FD) {
+ auto Name = FD->getNameAsString() + "__hipstdpar_unsupported";
+ auto FnTy = CGF->CGM.getTypes().GetFunctionType(FD);
+ auto UBF = CGF->CGM.getModule().getOrInsertFunction(Name, FnTy);
+
+ SmallVector<Value *, 16> Args;
+ for (auto &&FormalTy : FnTy->params())
+ Args.push_back(llvm::PoisonValue::get(FormalTy));
+
+ return RValue::get(CGF->Builder.CreateCall(UBF, Args));
+}
+
RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
const CallExpr *E,
ReturnValueSlot ReturnValue) {
const FunctionDecl *FD = GD.getDecl()->getAsFunction();
// See if we can constant fold this builtin. If so, don't emit it at all.
+ // TODO: Extend this handling to all builtin calls that we can constant-fold.
Expr::EvalResult Result;
- if (E->EvaluateAsRValue(Result, CGM.getContext()) &&
+ if (E->isPRValue() && E->EvaluateAsRValue(Result, CGM.getContext()) &&
!Result.hasSideEffects()) {
if (Result.Val.isInt())
return RValue::get(llvm::ConstantInt::get(getLLVMContext(),
@@ -2254,12 +2557,96 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
const unsigned BuiltinIDIfNoAsmLabel =
FD->hasAttr<AsmLabelAttr>() ? 0 : BuiltinID;
+ std::optional<bool> ErrnoOverriden;
+ // ErrnoOverriden is true if math-errno is overriden via the
+ // '#pragma float_control(precise, on)'. This pragma disables fast-math,
+ // which implies math-errno.
+ if (E->hasStoredFPFeatures()) {
+ FPOptionsOverride OP = E->getFPFeatures();
+ if (OP.hasMathErrnoOverride())
+ ErrnoOverriden = OP.getMathErrnoOverride();
+ }
+ // True if 'atttibute__((optnone)) is used. This attibute overrides
+ // fast-math which implies math-errno.
+ bool OptNone = CurFuncDecl && CurFuncDecl->hasAttr<OptimizeNoneAttr>();
+
+ // True if we are compiling at -O2 and errno has been disabled
+ // using the '#pragma float_control(precise, off)', and
+ // attribute opt-none hasn't been seen.
+ bool ErrnoOverridenToFalseWithOpt =
+ ErrnoOverriden.has_value() && !ErrnoOverriden.value() && !OptNone &&
+ CGM.getCodeGenOpts().OptimizationLevel != 0;
+
// There are LLVM math intrinsics/instructions corresponding to math library
// functions except the LLVM op will never set errno while the math library
// might. Also, math builtins have the same semantics as their math library
// twins. Thus, we can transform math library and builtin calls to their
// LLVM counterparts if the call is marked 'const' (known to never set errno).
- if (FD->hasAttr<ConstAttr>()) {
+ // In case FP exceptions are enabled, the experimental versions of the
+ // intrinsics model those.
+ bool ConstAlways =
+ getContext().BuiltinInfo.isConst(BuiltinID);
+
+ // There's a special case with the fma builtins where they are always const
+ // if the target environment is GNU or the target is OS is Windows and we're
+ // targeting the MSVCRT.dll environment.
+ // FIXME: This list can be become outdated. Need to find a way to get it some
+ // other way.
+ switch (BuiltinID) {
+ case Builtin::BI__builtin_fma:
+ case Builtin::BI__builtin_fmaf:
+ case Builtin::BI__builtin_fmal:
+ case Builtin::BIfma:
+ case Builtin::BIfmaf:
+ case Builtin::BIfmal: {
+ auto &Trip = CGM.getTriple();
+ if (Trip.isGNUEnvironment() || Trip.isOSMSVCRT())
+ ConstAlways = true;
+ break;
+ }
+ default:
+ break;
+ }
+
+ bool ConstWithoutErrnoAndExceptions =
+ getContext().BuiltinInfo.isConstWithoutErrnoAndExceptions(BuiltinID);
+ bool ConstWithoutExceptions =
+ getContext().BuiltinInfo.isConstWithoutExceptions(BuiltinID);
+
+ // ConstAttr is enabled in fast-math mode. In fast-math mode, math-errno is
+ // disabled.
+ // Math intrinsics are generated only when math-errno is disabled. Any pragmas
+ // or attributes that affect math-errno should prevent or allow math
+ // intrincs to be generated. Intrinsics are generated:
+ // 1- In fast math mode, unless math-errno is overriden
+ // via '#pragma float_control(precise, on)', or via an
+ // 'attribute__((optnone))'.
+ // 2- If math-errno was enabled on command line but overriden
+ // to false via '#pragma float_control(precise, off))' and
+ // 'attribute__((optnone))' hasn't been used.
+ // 3- If we are compiling with optimization and errno has been disabled
+ // via '#pragma float_control(precise, off)', and
+ // 'attribute__((optnone))' hasn't been used.
+
+ bool ConstWithoutErrnoOrExceptions =
+ ConstWithoutErrnoAndExceptions || ConstWithoutExceptions;
+ bool GenerateIntrinsics =
+ (ConstAlways && !OptNone) ||
+ (!getLangOpts().MathErrno &&
+ !(ErrnoOverriden.has_value() && ErrnoOverriden.value()) && !OptNone);
+ if (!GenerateIntrinsics) {
+ GenerateIntrinsics =
+ ConstWithoutErrnoOrExceptions && !ConstWithoutErrnoAndExceptions;
+ if (!GenerateIntrinsics)
+ GenerateIntrinsics =
+ ConstWithoutErrnoOrExceptions &&
+ (!getLangOpts().MathErrno &&
+ !(ErrnoOverriden.has_value() && ErrnoOverriden.value()) && !OptNone);
+ if (!GenerateIntrinsics)
+ GenerateIntrinsics =
+ ConstWithoutErrnoOrExceptions && ErrnoOverridenToFalseWithOpt;
+ }
+ if (GenerateIntrinsics) {
switch (BuiltinIDIfNoAsmLabel) {
case Builtin::BIceil:
case Builtin::BIceilf:
@@ -2318,7 +2705,16 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
Intrinsic::exp2,
Intrinsic::experimental_constrained_exp2));
-
+ case Builtin::BI__builtin_exp10:
+ case Builtin::BI__builtin_exp10f:
+ case Builtin::BI__builtin_exp10f16:
+ case Builtin::BI__builtin_exp10l:
+ case Builtin::BI__builtin_exp10f128: {
+ // TODO: strictfp support
+ if (Builder.getIsFPConstrained())
+ break;
+ return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::exp10));
+ }
case Builtin::BIfabs:
case Builtin::BIfabsf:
case Builtin::BIfabsl:
@@ -2476,6 +2872,18 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Intrinsic::round,
Intrinsic::experimental_constrained_round));
+ case Builtin::BIroundeven:
+ case Builtin::BIroundevenf:
+ case Builtin::BIroundevenl:
+ case Builtin::BI__builtin_roundeven:
+ case Builtin::BI__builtin_roundevenf:
+ case Builtin::BI__builtin_roundevenf16:
+ case Builtin::BI__builtin_roundevenl:
+ case Builtin::BI__builtin_roundevenf128:
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
+ Intrinsic::roundeven,
+ Intrinsic::experimental_constrained_roundeven));
+
case Builtin::BIsin:
case Builtin::BIsinf:
case Builtin::BIsinl:
@@ -2496,10 +2904,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_sqrtf16:
case Builtin::BI__builtin_sqrtl:
case Builtin::BI__builtin_sqrtf128:
- return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
- Intrinsic::sqrt,
- Intrinsic::experimental_constrained_sqrt));
-
+ case Builtin::BI__builtin_elementwise_sqrt: {
+ llvm::Value *Call = emitUnaryMaybeConstrainedFPBuiltin(
+ *this, E, Intrinsic::sqrt, Intrinsic::experimental_constrained_sqrt);
+ SetSqrtFPAccuracy(Call);
+ return RValue::get(Call);
+ }
case Builtin::BItrunc:
case Builtin::BItruncf:
case Builtin::BItruncl:
@@ -2555,12 +2965,41 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
*this, E, Intrinsic::llrint,
Intrinsic::experimental_constrained_llrint));
-
+ case Builtin::BI__builtin_ldexp:
+ case Builtin::BI__builtin_ldexpf:
+ case Builtin::BI__builtin_ldexpl:
+ case Builtin::BI__builtin_ldexpf16:
+ case Builtin::BI__builtin_ldexpf128: {
+ return RValue::get(emitBinaryExpMaybeConstrainedFPBuiltin(
+ *this, E, Intrinsic::ldexp,
+ Intrinsic::experimental_constrained_ldexp));
+ }
default:
break;
}
}
+ // Check NonnullAttribute/NullabilityArg and Alignment.
+ auto EmitArgCheck = [&](TypeCheckKind Kind, Address A, const Expr *Arg,
+ unsigned ParmNum) {
+ Value *Val = A.getPointer();
+ EmitNonNullArgCheck(RValue::get(Val), Arg->getType(), Arg->getExprLoc(), FD,
+ ParmNum);
+
+ if (SanOpts.has(SanitizerKind::Alignment)) {
+ SanitizerSet SkippedChecks;
+ SkippedChecks.set(SanitizerKind::All);
+ SkippedChecks.clear(SanitizerKind::Alignment);
+ SourceLocation Loc = Arg->getExprLoc();
+ // Strip an implicit cast.
+ if (auto *CE = dyn_cast<ImplicitCastExpr>(Arg))
+ if (CE->getCastKind() == CK_BitCast)
+ Arg = CE->getSubExpr();
+ EmitTypeCheck(Kind, Loc, Val, Arg->getType(), A.getAlignment(),
+ SkippedChecks);
+ }
+ };
+
switch (BuiltinIDIfNoAsmLabel) {
default: break;
case Builtin::BI__builtin___CFStringMakeConstantString:
@@ -2570,32 +3009,41 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_va_start:
case Builtin::BI__va_start:
case Builtin::BI__builtin_va_end:
- return RValue::get(
- EmitVAStartEnd(BuiltinID == Builtin::BI__va_start
- ? EmitScalarExpr(E->getArg(0))
- : EmitVAListRef(E->getArg(0)).getPointer(),
- BuiltinID != Builtin::BI__builtin_va_end));
+ EmitVAStartEnd(BuiltinID == Builtin::BI__va_start
+ ? EmitScalarExpr(E->getArg(0))
+ : EmitVAListRef(E->getArg(0)).getPointer(),
+ BuiltinID != Builtin::BI__builtin_va_end);
+ return RValue::get(nullptr);
case Builtin::BI__builtin_va_copy: {
Value *DstPtr = EmitVAListRef(E->getArg(0)).getPointer();
Value *SrcPtr = EmitVAListRef(E->getArg(1)).getPointer();
-
- llvm::Type *Type = Int8PtrTy;
-
- DstPtr = Builder.CreateBitCast(DstPtr, Type);
- SrcPtr = Builder.CreateBitCast(SrcPtr, Type);
- return RValue::get(Builder.CreateCall(CGM.getIntrinsic(Intrinsic::vacopy),
- {DstPtr, SrcPtr}));
+ Builder.CreateCall(CGM.getIntrinsic(Intrinsic::vacopy), {DstPtr, SrcPtr});
+ return RValue::get(nullptr);
}
+ case Builtin::BIabs:
+ case Builtin::BIlabs:
+ case Builtin::BIllabs:
case Builtin::BI__builtin_abs:
case Builtin::BI__builtin_labs:
case Builtin::BI__builtin_llabs: {
- // X < 0 ? -X : X
- // The negation has 'nsw' because abs of INT_MIN is undefined.
- Value *ArgValue = EmitScalarExpr(E->getArg(0));
- Value *NegOp = Builder.CreateNSWNeg(ArgValue, "neg");
- Constant *Zero = llvm::Constant::getNullValue(ArgValue->getType());
- Value *CmpResult = Builder.CreateICmpSLT(ArgValue, Zero, "abscond");
- Value *Result = Builder.CreateSelect(CmpResult, NegOp, ArgValue, "abs");
+ bool SanitizeOverflow = SanOpts.has(SanitizerKind::SignedIntegerOverflow);
+
+ Value *Result;
+ switch (getLangOpts().getSignedOverflowBehavior()) {
+ case LangOptions::SOB_Defined:
+ Result = EmitAbs(*this, EmitScalarExpr(E->getArg(0)), false);
+ break;
+ case LangOptions::SOB_Undefined:
+ if (!SanitizeOverflow) {
+ Result = EmitAbs(*this, EmitScalarExpr(E->getArg(0)), true);
+ break;
+ }
+ [[fallthrough]];
+ case LangOptions::SOB_Trapping:
+ // TODO: Somehow handle the corner case when the address of abs is taken.
+ Result = EmitOverflowCheckedAbs(*this, E, SanitizeOverflow);
+ break;
+ }
return RValue::get(Result);
}
case Builtin::BI__builtin_complex: {
@@ -2625,23 +3073,6 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
return RValue::get(ComplexVal.first);
}
- case Builtin::BI__builtin_dump_struct: {
- llvm::Type *LLVMIntTy = getTypes().ConvertType(getContext().IntTy);
- llvm::FunctionType *LLVMFuncType = llvm::FunctionType::get(
- LLVMIntTy, {llvm::Type::getInt8PtrTy(getLLVMContext())}, true);
-
- Value *Func = EmitScalarExpr(E->getArg(1)->IgnoreImpCasts());
- CharUnits Arg0Align = EmitPointerWithAlignment(E->getArg(0)).getAlignment();
-
- const Expr *Arg0 = E->getArg(0)->IgnoreImpCasts();
- QualType Arg0Type = Arg0->getType()->getPointeeType();
-
- Value *RecordPtr = EmitScalarExpr(Arg0);
- Value *Res = dumpRecord(*this, Arg0Type, RecordPtr, Arg0Align,
- {LLVMFuncType, Func}, 0);
- return RValue::get(Res);
- }
-
case Builtin::BI__builtin_preserve_access_index: {
// Only enabled preserved access index region when debuginfo
// is available as debuginfo is needed to preserve user-level
@@ -2856,7 +3287,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Value *AlignmentValue = EmitScalarExpr(E->getArg(1));
ConstantInt *AlignmentCI = cast<ConstantInt>(AlignmentValue);
if (AlignmentCI->getValue().ugt(llvm::Value::MaximumAlignment))
- AlignmentCI = ConstantInt::get(AlignmentCI->getType(),
+ AlignmentCI = ConstantInt::get(AlignmentCI->getIntegerType(),
llvm::Value::MaximumAlignment);
emitAlignmentAssumption(PtrValue, Ptr,
@@ -2871,7 +3302,20 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Value *ArgValue = EmitScalarExpr(E->getArg(0));
Function *FnAssume = CGM.getIntrinsic(Intrinsic::assume);
- return RValue::get(Builder.CreateCall(FnAssume, ArgValue));
+ Builder.CreateCall(FnAssume, ArgValue);
+ return RValue::get(nullptr);
+ }
+ case Builtin::BI__builtin_assume_separate_storage: {
+ const Expr *Arg0 = E->getArg(0);
+ const Expr *Arg1 = E->getArg(1);
+
+ Value *Value0 = EmitScalarExpr(Arg0);
+ Value *Value1 = EmitScalarExpr(Arg1);
+
+ Value *Values[] = {Value0, Value1};
+ OperandBundleDefT<Value *> OBD("separate_storage", Values);
+ Builder.CreateAssumption(ConstantInt::getTrue(getLLVMContext()), {OBD});
+ return RValue::get(nullptr);
}
case Builtin::BI__arithmetic_fence: {
// Create the builtin call if FastMath is selected, and the target
@@ -2905,7 +3349,10 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
}
case Builtin::BI__builtin_bswap16:
case Builtin::BI__builtin_bswap32:
- case Builtin::BI__builtin_bswap64: {
+ case Builtin::BI__builtin_bswap64:
+ case Builtin::BI_byteswap_ushort:
+ case Builtin::BI_byteswap_ulong:
+ case Builtin::BI_byteswap_uint64: {
return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bswap));
}
case Builtin::BI__builtin_bitreverse8:
@@ -2989,7 +3436,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
llvm::ConstantInt::get(Int32Ty, 3);
Value *Data = llvm::ConstantInt::get(Int32Ty, 1);
Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
- return RValue::get(Builder.CreateCall(F, {Address, RW, Locality, Data}));
+ Builder.CreateCall(F, {Address, RW, Locality, Data});
+ return RValue::get(nullptr);
}
case Builtin::BI__builtin_readcyclecounter: {
Function *F = CGM.getIntrinsic(Intrinsic::readcyclecounter);
@@ -3002,9 +3450,11 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
return RValue::get(Builder.CreateCall(F, {Begin, End}));
}
case Builtin::BI__builtin_trap:
- return RValue::get(EmitTrapCall(Intrinsic::trap));
+ EmitTrapCall(Intrinsic::trap);
+ return RValue::get(nullptr);
case Builtin::BI__debugbreak:
- return RValue::get(EmitTrapCall(Intrinsic::debugtrap));
+ EmitTrapCall(Intrinsic::debugtrap);
+ return RValue::get(nullptr);
case Builtin::BI__builtin_unreachable: {
EmitUnreachable(E->getExprLoc());
@@ -3021,6 +3471,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
if (Builder.getIsFPConstrained()) {
+ // FIXME: llvm.powi has 2 mangling types,
+ // llvm.experimental.constrained.powi has one.
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_powi,
Src0->getType());
@@ -3031,6 +3483,19 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
{ Src0->getType(), Src1->getType() });
return RValue::get(Builder.CreateCall(F, { Src0, Src1 }));
}
+ case Builtin::BI__builtin_frexpl: {
+ // Linux PPC will not be adding additional PPCDoubleDouble support.
+ // WIP to switch default to IEEE long double. Will emit libcall for
+ // frexpl instead of legalizing this type in the BE.
+ if (&getTarget().getLongDoubleFormat() == &llvm::APFloat::PPCDoubleDouble())
+ break;
+ LLVM_FALLTHROUGH;
+ }
+ case Builtin::BI__builtin_frexp:
+ case Builtin::BI__builtin_frexpf:
+ case Builtin::BI__builtin_frexpf128:
+ case Builtin::BI__builtin_frexpf16:
+ return RValue::get(emitFrexpBuiltin(*this, E, Intrinsic::frexp));
case Builtin::BI__builtin_isgreater:
case Builtin::BI__builtin_isgreaterequal:
case Builtin::BI__builtin_isless:
@@ -3040,7 +3505,6 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
// Ordered comparisons: we know the arguments to these are matching scalar
// floating point values.
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
- // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
Value *LHS = EmitScalarExpr(E->getArg(0));
Value *RHS = EmitScalarExpr(E->getArg(1));
@@ -3068,50 +3532,277 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
// ZExt bool to int type.
return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType())));
}
+
case Builtin::BI__builtin_isnan: {
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
Value *V = EmitScalarExpr(E->getArg(0));
- llvm::Type *Ty = V->getType();
- const llvm::fltSemantics &Semantics = Ty->getFltSemantics();
- if (!Builder.getIsFPConstrained() ||
- Builder.getDefaultConstrainedExcept() == fp::ebIgnore ||
- !Ty->isIEEE()) {
- V = Builder.CreateFCmpUNO(V, V, "cmp");
- return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
- }
+ if (Value *Result = tryUseTestFPKind(*this, BuiltinID, V))
+ return RValue::get(Result);
+ return RValue::get(
+ Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcNan),
+ ConvertType(E->getType())));
+ }
- if (Value *Result = getTargetHooks().testFPKind(V, BuiltinID, Builder, CGM))
+ case Builtin::BI__builtin_issignaling: {
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
+ Value *V = EmitScalarExpr(E->getArg(0));
+ return RValue::get(
+ Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcSNan),
+ ConvertType(E->getType())));
+ }
+
+ case Builtin::BI__builtin_isinf: {
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
+ Value *V = EmitScalarExpr(E->getArg(0));
+ if (Value *Result = tryUseTestFPKind(*this, BuiltinID, V))
return RValue::get(Result);
+ return RValue::get(
+ Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcInf),
+ ConvertType(E->getType())));
+ }
- // NaN has all exp bits set and a non zero significand. Therefore:
- // isnan(V) == ((exp mask - (abs(V) & exp mask)) < 0)
- unsigned bitsize = Ty->getScalarSizeInBits();
- llvm::IntegerType *IntTy = Builder.getIntNTy(bitsize);
- Value *IntV = Builder.CreateBitCast(V, IntTy);
- APInt AndMask = APInt::getSignedMaxValue(bitsize);
- Value *AbsV =
- Builder.CreateAnd(IntV, llvm::ConstantInt::get(IntTy, AndMask));
- APInt ExpMask = APFloat::getInf(Semantics).bitcastToAPInt();
- Value *Sub =
- Builder.CreateSub(llvm::ConstantInt::get(IntTy, ExpMask), AbsV);
- // V = sign bit (Sub) <=> V = (Sub < 0)
- V = Builder.CreateLShr(Sub, llvm::ConstantInt::get(IntTy, bitsize - 1));
- if (bitsize > 32)
- V = Builder.CreateTrunc(V, ConvertType(E->getType()));
- return RValue::get(V);
+ case Builtin::BIfinite:
+ case Builtin::BI__finite:
+ case Builtin::BIfinitef:
+ case Builtin::BI__finitef:
+ case Builtin::BIfinitel:
+ case Builtin::BI__finitel:
+ case Builtin::BI__builtin_isfinite: {
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
+ Value *V = EmitScalarExpr(E->getArg(0));
+ if (Value *Result = tryUseTestFPKind(*this, BuiltinID, V))
+ return RValue::get(Result);
+ return RValue::get(
+ Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcFinite),
+ ConvertType(E->getType())));
+ }
+
+ case Builtin::BI__builtin_isnormal: {
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
+ Value *V = EmitScalarExpr(E->getArg(0));
+ return RValue::get(
+ Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcNormal),
+ ConvertType(E->getType())));
+ }
+
+ case Builtin::BI__builtin_issubnormal: {
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
+ Value *V = EmitScalarExpr(E->getArg(0));
+ return RValue::get(
+ Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcSubnormal),
+ ConvertType(E->getType())));
+ }
+
+ case Builtin::BI__builtin_iszero: {
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
+ Value *V = EmitScalarExpr(E->getArg(0));
+ return RValue::get(
+ Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcZero),
+ ConvertType(E->getType())));
+ }
+
+ case Builtin::BI__builtin_isfpclass: {
+ Expr::EvalResult Result;
+ if (!E->getArg(1)->EvaluateAsInt(Result, CGM.getContext()))
+ break;
+ uint64_t Test = Result.Val.getInt().getLimitedValue();
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
+ Value *V = EmitScalarExpr(E->getArg(0));
+ return RValue::get(Builder.CreateZExt(Builder.createIsFPClass(V, Test),
+ ConvertType(E->getType())));
+ }
+
+ case Builtin::BI__builtin_nondeterministic_value: {
+ llvm::Type *Ty = ConvertType(E->getArg(0)->getType());
+
+ Value *Result = PoisonValue::get(Ty);
+ Result = Builder.CreateFreeze(Result);
+
+ return RValue::get(Result);
+ }
+
+ case Builtin::BI__builtin_elementwise_abs: {
+ Value *Result;
+ QualType QT = E->getArg(0)->getType();
+
+ if (auto *VecTy = QT->getAs<VectorType>())
+ QT = VecTy->getElementType();
+ if (QT->isIntegerType())
+ Result = Builder.CreateBinaryIntrinsic(
+ llvm::Intrinsic::abs, EmitScalarExpr(E->getArg(0)),
+ Builder.getFalse(), nullptr, "elt.abs");
+ else
+ Result = emitUnaryBuiltin(*this, E, llvm::Intrinsic::fabs, "elt.abs");
+
+ return RValue::get(Result);
}
+ case Builtin::BI__builtin_elementwise_ceil:
+ return RValue::get(
+ emitUnaryBuiltin(*this, E, llvm::Intrinsic::ceil, "elt.ceil"));
+ case Builtin::BI__builtin_elementwise_exp:
+ return RValue::get(
+ emitUnaryBuiltin(*this, E, llvm::Intrinsic::exp, "elt.exp"));
+ case Builtin::BI__builtin_elementwise_exp2:
+ return RValue::get(
+ emitUnaryBuiltin(*this, E, llvm::Intrinsic::exp2, "elt.exp2"));
+ case Builtin::BI__builtin_elementwise_log:
+ return RValue::get(
+ emitUnaryBuiltin(*this, E, llvm::Intrinsic::log, "elt.log"));
+ case Builtin::BI__builtin_elementwise_log2:
+ return RValue::get(
+ emitUnaryBuiltin(*this, E, llvm::Intrinsic::log2, "elt.log2"));
+ case Builtin::BI__builtin_elementwise_log10:
+ return RValue::get(
+ emitUnaryBuiltin(*this, E, llvm::Intrinsic::log10, "elt.log10"));
+ case Builtin::BI__builtin_elementwise_pow: {
+ return RValue::get(emitBinaryBuiltin(*this, E, llvm::Intrinsic::pow));
+ }
+ case Builtin::BI__builtin_elementwise_bitreverse:
+ return RValue::get(emitUnaryBuiltin(*this, E, llvm::Intrinsic::bitreverse,
+ "elt.bitreverse"));
+ case Builtin::BI__builtin_elementwise_cos:
+ return RValue::get(
+ emitUnaryBuiltin(*this, E, llvm::Intrinsic::cos, "elt.cos"));
+ case Builtin::BI__builtin_elementwise_floor:
+ return RValue::get(
+ emitUnaryBuiltin(*this, E, llvm::Intrinsic::floor, "elt.floor"));
+ case Builtin::BI__builtin_elementwise_roundeven:
+ return RValue::get(emitUnaryBuiltin(*this, E, llvm::Intrinsic::roundeven,
+ "elt.roundeven"));
+ case Builtin::BI__builtin_elementwise_round:
+ return RValue::get(emitUnaryBuiltin(*this, E, llvm::Intrinsic::round,
+ "elt.round"));
+ case Builtin::BI__builtin_elementwise_rint:
+ return RValue::get(emitUnaryBuiltin(*this, E, llvm::Intrinsic::rint,
+ "elt.rint"));
+ case Builtin::BI__builtin_elementwise_nearbyint:
+ return RValue::get(emitUnaryBuiltin(*this, E, llvm::Intrinsic::nearbyint,
+ "elt.nearbyint"));
+ case Builtin::BI__builtin_elementwise_sin:
+ return RValue::get(
+ emitUnaryBuiltin(*this, E, llvm::Intrinsic::sin, "elt.sin"));
+
+ case Builtin::BI__builtin_elementwise_trunc:
+ return RValue::get(
+ emitUnaryBuiltin(*this, E, llvm::Intrinsic::trunc, "elt.trunc"));
+ case Builtin::BI__builtin_elementwise_canonicalize:
+ return RValue::get(
+ emitUnaryBuiltin(*this, E, llvm::Intrinsic::canonicalize, "elt.canonicalize"));
+ case Builtin::BI__builtin_elementwise_copysign:
+ return RValue::get(emitBinaryBuiltin(*this, E, llvm::Intrinsic::copysign));
+ case Builtin::BI__builtin_elementwise_fma:
+ return RValue::get(emitTernaryBuiltin(*this, E, llvm::Intrinsic::fma));
+ case Builtin::BI__builtin_elementwise_add_sat:
+ case Builtin::BI__builtin_elementwise_sub_sat: {
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
+ Value *Result;
+ assert(Op0->getType()->isIntOrIntVectorTy() && "integer type expected");
+ QualType Ty = E->getArg(0)->getType();
+ if (auto *VecTy = Ty->getAs<VectorType>())
+ Ty = VecTy->getElementType();
+ bool IsSigned = Ty->isSignedIntegerType();
+ unsigned Opc;
+ if (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_add_sat)
+ Opc = IsSigned ? llvm::Intrinsic::sadd_sat : llvm::Intrinsic::uadd_sat;
+ else
+ Opc = IsSigned ? llvm::Intrinsic::ssub_sat : llvm::Intrinsic::usub_sat;
+ Result = Builder.CreateBinaryIntrinsic(Opc, Op0, Op1, nullptr, "elt.sat");
+ return RValue::get(Result);
+ }
+
+ case Builtin::BI__builtin_elementwise_max: {
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
+ Value *Result;
+ if (Op0->getType()->isIntOrIntVectorTy()) {
+ QualType Ty = E->getArg(0)->getType();
+ if (auto *VecTy = Ty->getAs<VectorType>())
+ Ty = VecTy->getElementType();
+ Result = Builder.CreateBinaryIntrinsic(Ty->isSignedIntegerType()
+ ? llvm::Intrinsic::smax
+ : llvm::Intrinsic::umax,
+ Op0, Op1, nullptr, "elt.max");
+ } else
+ Result = Builder.CreateMaxNum(Op0, Op1, "elt.max");
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_elementwise_min: {
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
+ Value *Result;
+ if (Op0->getType()->isIntOrIntVectorTy()) {
+ QualType Ty = E->getArg(0)->getType();
+ if (auto *VecTy = Ty->getAs<VectorType>())
+ Ty = VecTy->getElementType();
+ Result = Builder.CreateBinaryIntrinsic(Ty->isSignedIntegerType()
+ ? llvm::Intrinsic::smin
+ : llvm::Intrinsic::umin,
+ Op0, Op1, nullptr, "elt.min");
+ } else
+ Result = Builder.CreateMinNum(Op0, Op1, "elt.min");
+ return RValue::get(Result);
+ }
+
+ case Builtin::BI__builtin_reduce_max: {
+ auto GetIntrinsicID = [](QualType QT) {
+ if (auto *VecTy = QT->getAs<VectorType>())
+ QT = VecTy->getElementType();
+ if (QT->isSignedIntegerType())
+ return llvm::Intrinsic::vector_reduce_smax;
+ if (QT->isUnsignedIntegerType())
+ return llvm::Intrinsic::vector_reduce_umax;
+ assert(QT->isFloatingType() && "must have a float here");
+ return llvm::Intrinsic::vector_reduce_fmax;
+ };
+ return RValue::get(emitUnaryBuiltin(
+ *this, E, GetIntrinsicID(E->getArg(0)->getType()), "rdx.min"));
+ }
+
+ case Builtin::BI__builtin_reduce_min: {
+ auto GetIntrinsicID = [](QualType QT) {
+ if (auto *VecTy = QT->getAs<VectorType>())
+ QT = VecTy->getElementType();
+ if (QT->isSignedIntegerType())
+ return llvm::Intrinsic::vector_reduce_smin;
+ if (QT->isUnsignedIntegerType())
+ return llvm::Intrinsic::vector_reduce_umin;
+ assert(QT->isFloatingType() && "must have a float here");
+ return llvm::Intrinsic::vector_reduce_fmin;
+ };
+
+ return RValue::get(emitUnaryBuiltin(
+ *this, E, GetIntrinsicID(E->getArg(0)->getType()), "rdx.min"));
+ }
+
+ case Builtin::BI__builtin_reduce_add:
+ return RValue::get(emitUnaryBuiltin(
+ *this, E, llvm::Intrinsic::vector_reduce_add, "rdx.add"));
+ case Builtin::BI__builtin_reduce_mul:
+ return RValue::get(emitUnaryBuiltin(
+ *this, E, llvm::Intrinsic::vector_reduce_mul, "rdx.mul"));
+ case Builtin::BI__builtin_reduce_xor:
+ return RValue::get(emitUnaryBuiltin(
+ *this, E, llvm::Intrinsic::vector_reduce_xor, "rdx.xor"));
+ case Builtin::BI__builtin_reduce_or:
+ return RValue::get(emitUnaryBuiltin(
+ *this, E, llvm::Intrinsic::vector_reduce_or, "rdx.or"));
+ case Builtin::BI__builtin_reduce_and:
+ return RValue::get(emitUnaryBuiltin(
+ *this, E, llvm::Intrinsic::vector_reduce_and, "rdx.and"));
+
case Builtin::BI__builtin_matrix_transpose: {
- const auto *MatrixTy = E->getArg(0)->getType()->getAs<ConstantMatrixType>();
+ auto *MatrixTy = E->getArg(0)->getType()->castAs<ConstantMatrixType>();
Value *MatValue = EmitScalarExpr(E->getArg(0));
- MatrixBuilder<CGBuilderTy> MB(Builder);
+ MatrixBuilder MB(Builder);
Value *Result = MB.CreateMatrixTranspose(MatValue, MatrixTy->getNumRows(),
MatrixTy->getNumColumns());
return RValue::get(Result);
}
case Builtin::BI__builtin_matrix_column_major_load: {
- MatrixBuilder<CGBuilderTy> MB(Builder);
+ MatrixBuilder MB(Builder);
// Emit everything that isn't dependent on the first parameter type
Value *Stride = EmitScalarExpr(E->getArg(3));
const auto *ResultTy = E->getType()->getAs<ConstantMatrixType>();
@@ -3123,14 +3814,15 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(0)->getType(),
E->getArg(0)->getExprLoc(), FD, 0);
Value *Result = MB.CreateColumnMajorLoad(
- Src.getPointer(), Align(Src.getAlignment().getQuantity()), Stride,
- IsVolatile, ResultTy->getNumRows(), ResultTy->getNumColumns(),
+ Src.getElementType(), Src.getPointer(),
+ Align(Src.getAlignment().getQuantity()), Stride, IsVolatile,
+ ResultTy->getNumRows(), ResultTy->getNumColumns(),
"matrix");
return RValue::get(Result);
}
case Builtin::BI__builtin_matrix_column_major_store: {
- MatrixBuilder<CGBuilderTy> MB(Builder);
+ MatrixBuilder MB(Builder);
Value *Matrix = EmitScalarExpr(E->getArg(0));
Address Dst = EmitPointerWithAlignment(E->getArg(1));
Value *Stride = EmitScalarExpr(E->getArg(2));
@@ -3148,52 +3840,6 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
return RValue::get(Result);
}
- case Builtin::BIfinite:
- case Builtin::BI__finite:
- case Builtin::BIfinitef:
- case Builtin::BI__finitef:
- case Builtin::BIfinitel:
- case Builtin::BI__finitel:
- case Builtin::BI__builtin_isinf:
- case Builtin::BI__builtin_isfinite: {
- // isinf(x) --> fabs(x) == infinity
- // isfinite(x) --> fabs(x) != infinity
- // x != NaN via the ordered compare in either case.
- CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
- Value *V = EmitScalarExpr(E->getArg(0));
- llvm::Type *Ty = V->getType();
- if (!Builder.getIsFPConstrained() ||
- Builder.getDefaultConstrainedExcept() == fp::ebIgnore ||
- !Ty->isIEEE()) {
- Value *Fabs = EmitFAbs(*this, V);
- Constant *Infinity = ConstantFP::getInfinity(V->getType());
- CmpInst::Predicate Pred = (BuiltinID == Builtin::BI__builtin_isinf)
- ? CmpInst::FCMP_OEQ
- : CmpInst::FCMP_ONE;
- Value *FCmp = Builder.CreateFCmp(Pred, Fabs, Infinity, "cmpinf");
- return RValue::get(Builder.CreateZExt(FCmp, ConvertType(E->getType())));
- }
-
- if (Value *Result = getTargetHooks().testFPKind(V, BuiltinID, Builder, CGM))
- return RValue::get(Result);
-
- // Inf values have all exp bits set and a zero significand. Therefore:
- // isinf(V) == ((V << 1) == ((exp mask) << 1))
- // isfinite(V) == ((V << 1) < ((exp mask) << 1)) using unsigned comparison
- unsigned bitsize = Ty->getScalarSizeInBits();
- llvm::IntegerType *IntTy = Builder.getIntNTy(bitsize);
- Value *IntV = Builder.CreateBitCast(V, IntTy);
- Value *Shl1 = Builder.CreateShl(IntV, 1);
- const llvm::fltSemantics &Semantics = Ty->getFltSemantics();
- APInt ExpMask = APFloat::getInf(Semantics).bitcastToAPInt();
- Value *ExpMaskShl1 = llvm::ConstantInt::get(IntTy, ExpMask.shl(1));
- if (BuiltinID == Builtin::BI__builtin_isinf)
- V = Builder.CreateICmpEQ(Shl1, ExpMaskShl1);
- else
- V = Builder.CreateICmpULT(Shl1, ExpMaskShl1);
- return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
- }
-
case Builtin::BI__builtin_isinf_sign: {
// isinf_sign(x) -> fabs(x) == infinity ? (signbit(x) ? -1 : 1) : 0
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
@@ -3213,28 +3859,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
return RValue::get(Result);
}
- case Builtin::BI__builtin_isnormal: {
- // isnormal(x) --> x == x && fabsf(x) < infinity && fabsf(x) >= float_min
- CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
- // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
- Value *V = EmitScalarExpr(E->getArg(0));
- Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
-
- Value *Abs = EmitFAbs(*this, V);
- Value *IsLessThanInf =
- Builder.CreateFCmpULT(Abs, ConstantFP::getInfinity(V->getType()),"isinf");
- APFloat Smallest = APFloat::getSmallestNormalized(
- getContext().getFloatTypeSemantics(E->getArg(0)->getType()));
- Value *IsNormal =
- Builder.CreateFCmpUGE(Abs, ConstantFP::get(V->getContext(), Smallest),
- "isnormal");
- V = Builder.CreateAnd(Eq, IsLessThanInf, "and");
- V = Builder.CreateAnd(V, IsNormal, "and");
- return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
- }
-
case Builtin::BI__builtin_flt_rounds: {
- Function *F = CGM.getIntrinsic(Intrinsic::flt_rounds);
+ Function *F = CGM.getIntrinsic(Intrinsic::get_rounding);
llvm::Type *ResultType = ConvertType(E->getType());
Value *Result = Builder.CreateCall(F);
@@ -3244,6 +3870,14 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
return RValue::get(Result);
}
+ case Builtin::BI__builtin_set_flt_rounds: {
+ Function *F = CGM.getIntrinsic(Intrinsic::set_rounding);
+
+ Value *V = EmitScalarExpr(E->getArg(0));
+ Builder.CreateCall(F, V);
+ return RValue::get(nullptr);
+ }
+
case Builtin::BI__builtin_fpclassify: {
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
// FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
@@ -3304,8 +3938,15 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
return RValue::get(Result);
}
+ // An alloca will always return a pointer to the alloca (stack) address
+ // space. This address space need not be the same as the AST / Language
+ // default (e.g. in C / C++ auto vars are in the generic address space). At
+ // the AST level this is handled within CreateTempAlloca et al., but for the
+ // builtin / dynamic alloca we have to handle it here. We use an explicit cast
+ // instead of passing an AS to CreateAlloca so as to not inhibit optimisation.
case Builtin::BIalloca:
case Builtin::BI_alloca:
+ case Builtin::BI__builtin_alloca_uninitialized:
case Builtin::BI__builtin_alloca: {
Value *Size = EmitScalarExpr(E->getArg(0));
const TargetInfo &TI = getContext().getTargetInfo();
@@ -3316,10 +3957,19 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
.getAsAlign();
AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
AI->setAlignment(SuitableAlignmentInBytes);
- initializeAlloca(*this, AI, Size, SuitableAlignmentInBytes);
+ if (BuiltinID != Builtin::BI__builtin_alloca_uninitialized)
+ initializeAlloca(*this, AI, Size, SuitableAlignmentInBytes);
+ LangAS AAS = getASTAllocaAddressSpace();
+ LangAS EAS = E->getType()->getPointeeType().getAddressSpace();
+ if (AAS != EAS) {
+ llvm::Type *Ty = CGM.getTypes().ConvertType(E->getType());
+ return RValue::get(getTargetHooks().performAddrSpaceCast(*this, AI, AAS,
+ EAS, Ty));
+ }
return RValue::get(AI);
}
+ case Builtin::BI__builtin_alloca_with_align_uninitialized:
case Builtin::BI__builtin_alloca_with_align: {
Value *Size = EmitScalarExpr(E->getArg(0));
Value *AlignmentInBitsValue = EmitScalarExpr(E->getArg(1));
@@ -3329,7 +3979,15 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
CGM.getContext().toCharUnitsFromBits(AlignmentInBits).getAsAlign();
AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
AI->setAlignment(AlignmentInBytes);
- initializeAlloca(*this, AI, Size, AlignmentInBytes);
+ if (BuiltinID != Builtin::BI__builtin_alloca_with_align_uninitialized)
+ initializeAlloca(*this, AI, Size, AlignmentInBytes);
+ LangAS AAS = getASTAllocaAddressSpace();
+ LangAS EAS = E->getType()->getPointeeType().getAddressSpace();
+ if (AAS != EAS) {
+ llvm::Type *Ty = CGM.getTypes().ConvertType(E->getType());
+ return RValue::get(getTargetHooks().performAddrSpaceCast(*this, AI, AAS,
+ EAS, Ty));
+ }
return RValue::get(AI);
}
@@ -3342,6 +4000,20 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Builder.CreateMemSet(Dest, Builder.getInt8(0), SizeVal, false);
return RValue::get(nullptr);
}
+
+ case Builtin::BIbcopy:
+ case Builtin::BI__builtin_bcopy: {
+ Address Src = EmitPointerWithAlignment(E->getArg(0));
+ Address Dest = EmitPointerWithAlignment(E->getArg(1));
+ Value *SizeVal = EmitScalarExpr(E->getArg(2));
+ EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(0)->getType(),
+ E->getArg(0)->getExprLoc(), FD, 0);
+ EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(1)->getType(),
+ E->getArg(1)->getExprLoc(), FD, 0);
+ Builder.CreateMemMove(Dest, Src, SizeVal, false);
+ return RValue::get(Dest.getPointer());
+ }
+
case Builtin::BImemcpy:
case Builtin::BI__builtin_memcpy:
case Builtin::BImempcpy:
@@ -3349,10 +4021,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Address Dest = EmitPointerWithAlignment(E->getArg(0));
Address Src = EmitPointerWithAlignment(E->getArg(1));
Value *SizeVal = EmitScalarExpr(E->getArg(2));
- EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
- E->getArg(0)->getExprLoc(), FD, 0);
- EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(),
- E->getArg(1)->getExprLoc(), FD, 1);
+ EmitArgCheck(TCK_Store, Dest, E->getArg(0), 0);
+ EmitArgCheck(TCK_Load, Src, E->getArg(1), 1);
Builder.CreateMemCpy(Dest, Src, SizeVal, false);
if (BuiltinID == Builtin::BImempcpy ||
BuiltinID == Builtin::BI__builtin_mempcpy)
@@ -3367,10 +4037,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Address Src = EmitPointerWithAlignment(E->getArg(1));
uint64_t Size =
E->getArg(2)->EvaluateKnownConstInt(getContext()).getZExtValue();
- EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
- E->getArg(0)->getExprLoc(), FD, 0);
- EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(),
- E->getArg(1)->getExprLoc(), FD, 1);
+ EmitArgCheck(TCK_Store, Dest, E->getArg(0), 0);
+ EmitArgCheck(TCK_Load, Src, E->getArg(1), 1);
Builder.CreateMemCpyInline(Dest, Src, Size);
return RValue::get(nullptr);
}
@@ -3427,10 +4095,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Address Dest = EmitPointerWithAlignment(E->getArg(0));
Address Src = EmitPointerWithAlignment(E->getArg(1));
Value *SizeVal = EmitScalarExpr(E->getArg(2));
- EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
- E->getArg(0)->getExprLoc(), FD, 0);
- EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(),
- E->getArg(1)->getExprLoc(), FD, 1);
+ EmitArgCheck(TCK_Store, Dest, E->getArg(0), 0);
+ EmitArgCheck(TCK_Load, Src, E->getArg(1), 1);
Builder.CreateMemMove(Dest, Src, SizeVal, false);
return RValue::get(Dest.getPointer());
}
@@ -3445,6 +4111,17 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
return RValue::get(Dest.getPointer());
}
+ case Builtin::BI__builtin_memset_inline: {
+ Address Dest = EmitPointerWithAlignment(E->getArg(0));
+ Value *ByteVal =
+ Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), Builder.getInt8Ty());
+ uint64_t Size =
+ E->getArg(2)->EvaluateKnownConstInt(getContext()).getZExtValue();
+ EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
+ E->getArg(0)->getExprLoc(), FD, 0);
+ Builder.CreateMemSetInline(Dest, ByteVal, Size);
+ return RValue::get(nullptr);
+ }
case Builtin::BI__builtin___memset_chk: {
// fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
Expr::EvalResult SizeResult, DstSizeResult;
@@ -3642,7 +4319,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
}
case Builtin::BI__builtin_unwind_init: {
Function *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init);
- return RValue::get(Builder.CreateCall(F));
+ Builder.CreateCall(F);
+ return RValue::get(nullptr);
}
case Builtin::BI__builtin_extend_pointer: {
// Extends a pointer to the size of an _Unwind_Word, which is
@@ -3680,19 +4358,18 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Builder.CreateStore(FrameAddr, Buf);
// Store the stack pointer to the setjmp buffer.
- Value *StackAddr =
- Builder.CreateCall(CGM.getIntrinsic(Intrinsic::stacksave));
+ Value *StackAddr = Builder.CreateStackSave();
+ assert(Buf.getPointer()->getType() == StackAddr->getType());
+
Address StackSaveSlot = Builder.CreateConstInBoundsGEP(Buf, 2);
Builder.CreateStore(StackAddr, StackSaveSlot);
// Call LLVM's EH setjmp, which is lightweight.
Function *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
- Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
return RValue::get(Builder.CreateCall(F, Buf.getPointer()));
}
case Builtin::BI__builtin_longjmp: {
Value *Buf = EmitScalarExpr(E->getArg(0));
- Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
// Call LLVM's EH longjmp, which is lightweight.
Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf);
@@ -3855,15 +4532,13 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__sync_lock_release_4:
case Builtin::BI__sync_lock_release_8:
case Builtin::BI__sync_lock_release_16: {
- Value *Ptr = EmitScalarExpr(E->getArg(0));
+ Address Ptr = CheckAtomicAlignment(*this, E);
QualType ElTy = E->getArg(0)->getType()->getPointeeType();
- CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy);
+
llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
- StoreSize.getQuantity() * 8);
- Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo());
+ getContext().getTypeSize(ElTy));
llvm::StoreInst *Store =
- Builder.CreateAlignedStore(llvm::Constant::getNullValue(ITy), Ptr,
- StoreSize);
+ Builder.CreateStore(llvm::Constant::getNullValue(ITy), Ptr);
Store->setAtomic(llvm::AtomicOrdering::Release);
return RValue::get(nullptr);
}
@@ -3914,9 +4589,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
bool Volatile =
PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
- Value *Ptr = EmitScalarExpr(E->getArg(0));
- unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace();
- Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
+ Address Ptr =
+ EmitPointerWithAlignment(E->getArg(0)).withElementType(Int8Ty);
+
Value *NewVal = Builder.getInt8(1);
Value *Order = EmitScalarExpr(E->getArg(1));
if (isa<llvm::ConstantInt>(Order)) {
@@ -3998,8 +4673,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
Address Ptr = EmitPointerWithAlignment(E->getArg(0));
- unsigned AddrSpace = Ptr.getPointer()->getType()->getPointerAddressSpace();
- Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
+ Ptr = Ptr.withElementType(Int8Ty);
Value *NewVal = Builder.getInt8(0);
Value *Order = EmitScalarExpr(E->getArg(1));
if (isa<llvm::ConstantInt>(Order)) {
@@ -4136,7 +4810,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
StringRef WideBytes = Str->getBytes();
std::string StrUtf8;
if (!convertUTF16ToUTF8String(
- makeArrayRef(WideBytes.data(), WideBytes.size()), StrUtf8)) {
+ ArrayRef(WideBytes.data(), WideBytes.size()), StrUtf8)) {
CGM.ErrorUnsupported(E, "non-UTF16 __annotation argument");
continue;
}
@@ -4152,8 +4826,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
}
case Builtin::BI__builtin_annotation: {
llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0));
- llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::annotation,
- AnnVal->getType());
+ llvm::Function *F =
+ CGM.getIntrinsic(llvm::Intrinsic::annotation,
+ {AnnVal->getType(), CGM.ConstGlobalsPtrTy});
// Get the annotation string, go through casts. Sema requires this to be a
// non-wide string literal, potentially casted, so the cast<> is safe.
@@ -4393,14 +5068,20 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
return RValue::get(Carry);
}
+ case Builtin::BIaddressof:
+ case Builtin::BI__addressof:
case Builtin::BI__builtin_addressof:
return RValue::get(EmitLValue(E->getArg(0)).getPointer(*this));
+ case Builtin::BI__builtin_function_start:
+ return RValue::get(CGM.GetFunctionStart(
+ E->getArg(0)->getAsBuiltinConstantDeclRef(CGM.getContext())));
case Builtin::BI__builtin_operator_new:
return EmitBuiltinNewDeleteCall(
E->getCallee()->getType()->castAs<FunctionProtoType>(), E, false);
case Builtin::BI__builtin_operator_delete:
- return EmitBuiltinNewDeleteCall(
+ EmitBuiltinNewDeleteCall(
E->getCallee()->getType()->castAs<FunctionProtoType>(), E, true);
+ return RValue::get(nullptr);
case Builtin::BI__builtin_is_aligned:
return EmitBuiltinIsAligned(E);
@@ -4428,13 +5109,10 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI_InterlockedCompareExchangePointer:
case Builtin::BI_InterlockedCompareExchangePointer_nf: {
llvm::Type *RTy;
- llvm::IntegerType *IntType =
- IntegerType::get(getLLVMContext(),
- getContext().getTypeSize(E->getType()));
- llvm::Type *IntPtrType = IntType->getPointerTo();
+ llvm::IntegerType *IntType = IntegerType::get(
+ getLLVMContext(), getContext().getTypeSize(E->getType()));
- llvm::Value *Destination =
- Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), IntPtrType);
+ Address DestAddr = CheckAtomicAlignment(*this, E);
llvm::Value *Exchange = EmitScalarExpr(E->getArg(1));
RTy = Exchange->getType();
@@ -4447,7 +5125,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
BuiltinID == Builtin::BI_InterlockedCompareExchangePointer_nf ?
AtomicOrdering::Monotonic : AtomicOrdering::SequentiallyConsistent;
- auto Result = Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange,
+ auto Result = Builder.CreateAtomicCmpXchg(DestAddr, Comparand, Exchange,
Ordering, Ordering);
Result->setVolatile(true);
@@ -4549,30 +5227,30 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
}
break;
+ // C++ std:: builtins.
+ case Builtin::BImove:
+ case Builtin::BImove_if_noexcept:
+ case Builtin::BIforward:
+ case Builtin::BIforward_like:
+ case Builtin::BIas_const:
+ return RValue::get(EmitLValue(E->getArg(0)).getPointer(*this));
case Builtin::BI__GetExceptionInfo: {
if (llvm::GlobalVariable *GV =
CGM.getCXXABI().getThrowInfo(FD->getParamDecl(0)->getType()))
- return RValue::get(llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy));
+ return RValue::get(GV);
break;
}
case Builtin::BI__fastfail:
return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::__fastfail, E));
- case Builtin::BI__builtin_coro_size: {
- auto & Context = getContext();
- auto SizeTy = Context.getSizeType();
- auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
- Function *F = CGM.getIntrinsic(Intrinsic::coro_size, T);
- return RValue::get(Builder.CreateCall(F));
- }
-
case Builtin::BI__builtin_coro_id:
return EmitCoroutineIntrinsic(E, Intrinsic::coro_id);
case Builtin::BI__builtin_coro_promise:
return EmitCoroutineIntrinsic(E, Intrinsic::coro_promise);
case Builtin::BI__builtin_coro_resume:
- return EmitCoroutineIntrinsic(E, Intrinsic::coro_resume);
+ EmitCoroutineIntrinsic(E, Intrinsic::coro_resume);
+ return RValue::get(nullptr);
case Builtin::BI__builtin_coro_frame:
return EmitCoroutineIntrinsic(E, Intrinsic::coro_frame);
case Builtin::BI__builtin_coro_noop:
@@ -4580,7 +5258,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_coro_free:
return EmitCoroutineIntrinsic(E, Intrinsic::coro_free);
case Builtin::BI__builtin_coro_destroy:
- return EmitCoroutineIntrinsic(E, Intrinsic::coro_destroy);
+ EmitCoroutineIntrinsic(E, Intrinsic::coro_destroy);
+ return RValue::get(nullptr);
case Builtin::BI__builtin_coro_done:
return EmitCoroutineIntrinsic(E, Intrinsic::coro_done);
case Builtin::BI__builtin_coro_alloc:
@@ -4591,8 +5270,10 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
return EmitCoroutineIntrinsic(E, Intrinsic::coro_end);
case Builtin::BI__builtin_coro_suspend:
return EmitCoroutineIntrinsic(E, Intrinsic::coro_suspend);
- case Builtin::BI__builtin_coro_param:
- return EmitCoroutineIntrinsic(E, Intrinsic::coro_param);
+ case Builtin::BI__builtin_coro_size:
+ return EmitCoroutineIntrinsic(E, Intrinsic::coro_size);
+ case Builtin::BI__builtin_coro_align:
+ return EmitCoroutineIntrinsic(E, Intrinsic::coro_align);
// OpenCL v2.0 s6.13.16.2, Built-in pipe read and write functions
case Builtin::BIread_pipe:
@@ -4606,8 +5287,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
// Type of the generic packet parameter.
unsigned GenericAS =
getContext().getTargetAddressSpace(LangAS::opencl_generic);
- llvm::Type *I8PTy = llvm::PointerType::get(
- llvm::Type::getInt8Ty(getLLVMContext()), GenericAS);
+ llvm::Type *I8PTy = llvm::PointerType::get(getLLVMContext(), GenericAS);
// Testing which overloaded version we should generate the call for.
if (2U == E->getNumArgs()) {
@@ -4752,11 +5432,13 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BIto_local:
case Builtin::BIto_private: {
auto Arg0 = EmitScalarExpr(E->getArg(0));
- auto NewArgT = llvm::PointerType::get(Int8Ty,
- CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
- auto NewRetT = llvm::PointerType::get(Int8Ty,
- CGM.getContext().getTargetAddressSpace(
- E->getType()->getPointeeType().getAddressSpace()));
+ auto NewArgT = llvm::PointerType::get(
+ getLLVMContext(),
+ CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
+ auto NewRetT = llvm::PointerType::get(
+ getLLVMContext(),
+ CGM.getContext().getTargetAddressSpace(
+ E->getType()->getPointeeType().getAddressSpace()));
auto FTy = llvm::FunctionType::get(NewRetT, {NewArgT}, false);
llvm::Value *NewArg;
if (Arg0->getType()->getPointerAddressSpace() !=
@@ -4778,7 +5460,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
unsigned NumArgs = E->getNumArgs();
llvm::Type *QueueTy = ConvertType(getContext().OCLQueueTy);
- llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
+ llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
getContext().getTargetAddressSpace(LangAS::opencl_generic));
llvm::Value *Queue = EmitScalarExpr(E->getArg(0));
@@ -4799,11 +5481,11 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
auto Info =
CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
llvm::Value *Kernel =
- Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
+ Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
llvm::Value *Block =
Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
- AttrBuilder B;
+ AttrBuilder B(Builder.getContext());
B.addByValAttr(NDRangeL.getAddress(*this).getElementType());
llvm::AttributeList ByValAttrSet =
llvm::AttributeList::get(CGM.getModule().getContext(), 3U, B);
@@ -4822,7 +5504,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
-> std::tuple<llvm::Value *, llvm::Value *, llvm::Value *> {
llvm::APInt ArraySize(32, NumArgs - First);
QualType SizeArrayTy = getContext().getConstantArrayType(
- getContext().getSizeType(), ArraySize, nullptr, ArrayType::Normal,
+ getContext().getSizeType(), ArraySize, nullptr,
+ ArraySizeModifier::Normal,
/*IndexTypeQuals=*/0);
auto Tmp = CreateMemTemp(SizeArrayTy, "block_sizes");
llvm::Value *TmpPtr = Tmp.getPointer();
@@ -4853,7 +5536,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
auto Info =
CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
llvm::Value *Kernel =
- Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
+ Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
auto *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
llvm::Value *ElemPtr, *TmpSize, *TmpPtr;
std::tie(ElemPtr, TmpSize, TmpPtr) = CreateArrayForSizeVar(4);
@@ -4877,8 +5560,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
}
// Any calls now have event arguments passed.
if (NumArgs >= 7) {
- llvm::Type *EventTy = ConvertType(getContext().OCLClkEventTy);
- llvm::PointerType *EventPtrTy = EventTy->getPointerTo(
+ llvm::PointerType *PtrTy = llvm::PointerType::get(
+ CGM.getLLVMContext(),
CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
llvm::Value *NumEvents =
@@ -4890,33 +5573,33 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
llvm::Value *EventWaitList = nullptr;
if (E->getArg(4)->isNullPointerConstant(
getContext(), Expr::NPC_ValueDependentIsNotNull)) {
- EventWaitList = llvm::ConstantPointerNull::get(EventPtrTy);
+ EventWaitList = llvm::ConstantPointerNull::get(PtrTy);
} else {
EventWaitList = E->getArg(4)->getType()->isArrayType()
? EmitArrayToPointerDecay(E->getArg(4)).getPointer()
: EmitScalarExpr(E->getArg(4));
// Convert to generic address space.
- EventWaitList = Builder.CreatePointerCast(EventWaitList, EventPtrTy);
+ EventWaitList = Builder.CreatePointerCast(EventWaitList, PtrTy);
}
llvm::Value *EventRet = nullptr;
if (E->getArg(5)->isNullPointerConstant(
getContext(), Expr::NPC_ValueDependentIsNotNull)) {
- EventRet = llvm::ConstantPointerNull::get(EventPtrTy);
+ EventRet = llvm::ConstantPointerNull::get(PtrTy);
} else {
EventRet =
- Builder.CreatePointerCast(EmitScalarExpr(E->getArg(5)), EventPtrTy);
+ Builder.CreatePointerCast(EmitScalarExpr(E->getArg(5)), PtrTy);
}
auto Info =
CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(6));
llvm::Value *Kernel =
- Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
+ Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
llvm::Value *Block =
Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
std::vector<llvm::Type *> ArgTys = {
- QueueTy, Int32Ty, RangeTy, Int32Ty,
- EventPtrTy, EventPtrTy, GenericVoidPtrTy, GenericVoidPtrTy};
+ QueueTy, Int32Ty, RangeTy, Int32Ty,
+ PtrTy, PtrTy, GenericVoidPtrTy, GenericVoidPtrTy};
std::vector<llvm::Value *> Args = {Queue, Flags, Range,
NumEvents, EventWaitList, EventRet,
@@ -4951,16 +5634,17 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
EmitLifetimeEnd(TmpSize, TmpPtr);
return Call;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
// OpenCL v2.0 s6.13.17.6 - Kernel query functions need bitcast of block
// parameter.
case Builtin::BIget_kernel_work_group_size: {
- llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
+ llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
getContext().getTargetAddressSpace(LangAS::opencl_generic));
auto Info =
CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
- Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
+ Value *Kernel =
+ Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
return RValue::get(EmitRuntimeCall(
CGM.CreateRuntimeFunction(
@@ -4970,11 +5654,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
{Kernel, Arg}));
}
case Builtin::BIget_kernel_preferred_work_group_size_multiple: {
- llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
+ llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
getContext().getTargetAddressSpace(LangAS::opencl_generic));
auto Info =
CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
- Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
+ Value *Kernel =
+ Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
return RValue::get(EmitRuntimeCall(
CGM.CreateRuntimeFunction(
@@ -4985,13 +5670,14 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
}
case Builtin::BIget_kernel_max_sub_group_size_for_ndrange:
case Builtin::BIget_kernel_sub_group_count_for_ndrange: {
- llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
+ llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
getContext().getTargetAddressSpace(LangAS::opencl_generic));
LValue NDRangeL = EmitAggExprToLValue(E->getArg(0));
llvm::Value *NDRange = NDRangeL.getAddress(*this).getPointer();
auto Info =
CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(1));
- Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
+ Value *Kernel =
+ Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
Value *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
const char *Name =
BuiltinID == Builtin::BIget_kernel_max_sub_group_size_for_ndrange
@@ -5011,7 +5697,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Value *Val = EmitScalarExpr(E->getArg(0));
Address Address = EmitPointerWithAlignment(E->getArg(1));
Value *HalfVal = Builder.CreateFPTrunc(Val, Builder.getHalfTy());
- return RValue::get(Builder.CreateStore(HalfVal, Address));
+ Builder.CreateStore(HalfVal, Address);
+ return RValue::get(nullptr);
}
case Builtin::BI__builtin_load_half: {
Address Address = EmitPointerWithAlignment(E->getArg(0));
@@ -5024,11 +5711,16 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getFloatTy()));
}
case Builtin::BIprintf:
- if (getTarget().getTriple().isNVPTX())
- return EmitNVPTXDevicePrintfCallExpr(E, ReturnValue);
- if (getTarget().getTriple().getArch() == Triple::amdgcn &&
- getLangOpts().HIP)
- return EmitAMDGPUDevicePrintfCallExpr(E, ReturnValue);
+ if (getTarget().getTriple().isNVPTX() ||
+ getTarget().getTriple().isAMDGCN()) {
+ if (getLangOpts().OpenMPIsTargetDevice)
+ return EmitOpenMPDevicePrintfCallExpr(E);
+ if (getTarget().getTriple().isNVPTX())
+ return EmitNVPTXDevicePrintfCallExpr(E);
+ if (getTarget().getTriple().isAMDGCN() && getLangOpts().HIP)
+ return EmitAMDGPUDevicePrintfCallExpr(E);
+ }
+
break;
case Builtin::BI__builtin_canonicalize:
case Builtin::BI__builtin_canonicalizef:
@@ -5039,7 +5731,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_thread_pointer: {
if (!getContext().getTargetInfo().isTLSSupported())
CGM.ErrorUnsupported(E, "__builtin_thread_pointer");
- // Fall through - it's already mapped to the intrinsic by GCCBuiltin.
+ // Fall through - it's already mapped to the intrinsic by ClangBuiltin.
break;
}
case Builtin::BI__builtin_os_log_format:
@@ -5130,12 +5822,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Address DestAddr = EmitMSVAListRef(E->getArg(0));
Address SrcAddr = EmitMSVAListRef(E->getArg(1));
- llvm::Type *BPP = Int8PtrPtrTy;
-
- DestAddr = Address(Builder.CreateBitCast(DestAddr.getPointer(), BPP, "cp"),
- DestAddr.getAlignment());
- SrcAddr = Address(Builder.CreateBitCast(SrcAddr.getPointer(), BPP, "ap"),
- SrcAddr.getAlignment());
+ DestAddr = DestAddr.withElementType(Int8PtrTy);
+ SrcAddr = SrcAddr.withElementType(Int8PtrTy);
Value *ArgPtr = Builder.CreateLoad(SrcAddr, "ap.val");
return RValue::get(Builder.CreateStore(ArgPtr, DestAddr));
@@ -5177,12 +5865,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
LargestVectorWidth = std::max(LargestVectorWidth, VectorWidth);
// See if we have a target specific intrinsic.
- const char *Name = getContext().BuiltinInfo.getName(BuiltinID);
+ StringRef Name = getContext().BuiltinInfo.getName(BuiltinID);
Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic;
StringRef Prefix =
llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch());
if (!Prefix.empty()) {
- IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix.data(), Name);
+ IntrinsicID = Intrinsic::getIntrinsicForClangBuiltin(Prefix.data(), Name);
// NOTE we don't need to perform a compatibility flag check here since the
// intrinsics are declared in Builtins*.def via LANGBUILTIN which filter the
// MS builtins via ALL_MS_LANGUAGES and are filtered earlier.
@@ -5204,18 +5892,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
llvm::FunctionType *FTy = F->getFunctionType();
for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
- Value *ArgValue;
- // If this is a normal argument, just emit it as a scalar.
- if ((ICEArguments & (1 << i)) == 0) {
- ArgValue = EmitScalarExpr(E->getArg(i));
- } else {
- // If this is required to be a constant, constant fold it so that we
- // know that the generated intrinsic gets a ConstantInt.
- ArgValue = llvm::ConstantInt::get(
- getLLVMContext(),
- *E->getArg(i)->getIntegerConstantExpr(getContext()));
- }
-
+ Value *ArgValue = EmitScalarOrConstFoldImmArg(ICEArguments, i, E);
// If the intrinsic arg type is different from the builtin arg type
// we need to do a bit cast.
llvm::Type *PTy = FTy->getParamType(i);
@@ -5225,14 +5902,20 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
if (PtrTy->getAddressSpace() !=
ArgValue->getType()->getPointerAddressSpace()) {
ArgValue = Builder.CreateAddrSpaceCast(
- ArgValue,
- ArgValue->getType()->getPointerTo(PtrTy->getAddressSpace()));
+ ArgValue, llvm::PointerType::get(getLLVMContext(),
+ PtrTy->getAddressSpace()));
}
}
assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) &&
"Must be able to losslessly bit cast to param");
- ArgValue = Builder.CreateBitCast(ArgValue, PTy);
+ // Cast vector type (e.g., v256i32) to x86_amx, this only happen
+ // in amx intrinsics.
+ if (PTy->isX86_AMXTy())
+ ArgValue = Builder.CreateIntrinsic(Intrinsic::x86_cast_vector_to_tile,
+ {ArgValue->getType()}, {ArgValue});
+ else
+ ArgValue = Builder.CreateBitCast(ArgValue, PTy);
}
Args.push_back(ArgValue);
@@ -5250,15 +5933,25 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
if (auto *PtrTy = dyn_cast<llvm::PointerType>(RetTy)) {
if (PtrTy->getAddressSpace() != V->getType()->getPointerAddressSpace()) {
V = Builder.CreateAddrSpaceCast(
- V, V->getType()->getPointerTo(PtrTy->getAddressSpace()));
+ V, llvm::PointerType::get(getLLVMContext(),
+ PtrTy->getAddressSpace()));
}
}
assert(V->getType()->canLosslesslyBitCastTo(RetTy) &&
"Must be able to losslessly bit cast result type");
- V = Builder.CreateBitCast(V, RetTy);
+ // Cast x86_amx to vector type (e.g., v256i32), this only happen
+ // in amx intrinsics.
+ if (V->getType()->isX86_AMXTy())
+ V = Builder.CreateIntrinsic(Intrinsic::x86_cast_tile_to_vector, {RetTy},
+ {V});
+ else
+ V = Builder.CreateBitCast(V, RetTy);
}
+ if (RetTy->isVoidTy())
+ return RValue::get(nullptr);
+
return RValue::get(V);
}
@@ -5276,6 +5969,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E, ReturnValue)) {
switch (EvalKind) {
case TEK_Scalar:
+ if (V->getType()->isVoidTy())
+ return RValue::get(nullptr);
return RValue::get(V);
case TEK_Aggregate:
return RValue::getAggregate(ReturnValue.getValue(),
@@ -5286,6 +5981,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
llvm_unreachable("Bad evaluation kind in EmitBuiltinExpr");
}
+ if (getLangOpts().HIPStdPar && getLangOpts().CUDAIsDevice)
+ return EmitHipStdParUnsupportedBuiltin(this, FD);
+
ErrorUnsupported(E, "builtin function");
// Unknown builtin, for now just dump it out and return undef.
@@ -5296,6 +5994,16 @@ static Value *EmitTargetArchBuiltinExpr(CodeGenFunction *CGF,
unsigned BuiltinID, const CallExpr *E,
ReturnValueSlot ReturnValue,
llvm::Triple::ArchType Arch) {
+ // When compiling in HipStdPar mode we have to be conservative in rejecting
+ // target specific features in the FE, and defer the possible error to the
+ // AcceleratorCodeSelection pass, wherein iff an unsupported target builtin is
+ // referenced by an accelerator executable function, we emit an error.
+ // Returning nullptr here leads to the builtin being handled in
+ // EmitStdParUnsupportedBuiltin.
+ if (CGF->getLangOpts().HIPStdPar && CGF->getLangOpts().CUDAIsDevice &&
+ Arch != CGF->getTarget().getTriple().getArch())
+ return nullptr;
+
switch (Arch) {
case llvm::Triple::arm:
case llvm::Triple::armeb:
@@ -5530,7 +6238,7 @@ struct ARMVectorIntrinsicInfo {
TypeModifier }
static const ARMVectorIntrinsicInfo ARMSIMDIntrinsicMap [] = {
- NEONMAP1(__a32_vcvt_bf16_v, arm_neon_vcvtfp2bf, 0),
+ NEONMAP1(__a32_vcvt_bf16_f32, arm_neon_vcvtfp2bf, 0),
NEONMAP0(splat_lane_v),
NEONMAP0(splat_laneq_v),
NEONMAP0(splatq_lane_v),
@@ -5542,21 +6250,27 @@ static const ARMVectorIntrinsicInfo ARMSIMDIntrinsicMap [] = {
NEONMAP0(vadd_v),
NEONMAP0(vaddhn_v),
NEONMAP0(vaddq_v),
- NEONMAP1(vaesdq_v, arm_neon_aesd, 0),
- NEONMAP1(vaeseq_v, arm_neon_aese, 0),
- NEONMAP1(vaesimcq_v, arm_neon_aesimc, 0),
- NEONMAP1(vaesmcq_v, arm_neon_aesmc, 0),
- NEONMAP1(vbfdot_v, arm_neon_bfdot, 0),
- NEONMAP1(vbfdotq_v, arm_neon_bfdot, 0),
- NEONMAP1(vbfmlalbq_v, arm_neon_bfmlalb, 0),
- NEONMAP1(vbfmlaltq_v, arm_neon_bfmlalt, 0),
- NEONMAP1(vbfmmlaq_v, arm_neon_bfmmla, 0),
+ NEONMAP1(vaesdq_u8, arm_neon_aesd, 0),
+ NEONMAP1(vaeseq_u8, arm_neon_aese, 0),
+ NEONMAP1(vaesimcq_u8, arm_neon_aesimc, 0),
+ NEONMAP1(vaesmcq_u8, arm_neon_aesmc, 0),
+ NEONMAP1(vbfdot_f32, arm_neon_bfdot, 0),
+ NEONMAP1(vbfdotq_f32, arm_neon_bfdot, 0),
+ NEONMAP1(vbfmlalbq_f32, arm_neon_bfmlalb, 0),
+ NEONMAP1(vbfmlaltq_f32, arm_neon_bfmlalt, 0),
+ NEONMAP1(vbfmmlaq_f32, arm_neon_bfmmla, 0),
NEONMAP1(vbsl_v, arm_neon_vbsl, AddRetType),
NEONMAP1(vbslq_v, arm_neon_vbsl, AddRetType),
- NEONMAP1(vcadd_rot270_v, arm_neon_vcadd_rot270, Add1ArgType),
- NEONMAP1(vcadd_rot90_v, arm_neon_vcadd_rot90, Add1ArgType),
- NEONMAP1(vcaddq_rot270_v, arm_neon_vcadd_rot270, Add1ArgType),
- NEONMAP1(vcaddq_rot90_v, arm_neon_vcadd_rot90, Add1ArgType),
+ NEONMAP1(vcadd_rot270_f16, arm_neon_vcadd_rot270, Add1ArgType),
+ NEONMAP1(vcadd_rot270_f32, arm_neon_vcadd_rot270, Add1ArgType),
+ NEONMAP1(vcadd_rot90_f16, arm_neon_vcadd_rot90, Add1ArgType),
+ NEONMAP1(vcadd_rot90_f32, arm_neon_vcadd_rot90, Add1ArgType),
+ NEONMAP1(vcaddq_rot270_f16, arm_neon_vcadd_rot270, Add1ArgType),
+ NEONMAP1(vcaddq_rot270_f32, arm_neon_vcadd_rot270, Add1ArgType),
+ NEONMAP1(vcaddq_rot270_f64, arm_neon_vcadd_rot270, Add1ArgType),
+ NEONMAP1(vcaddq_rot90_f16, arm_neon_vcadd_rot90, Add1ArgType),
+ NEONMAP1(vcaddq_rot90_f32, arm_neon_vcadd_rot90, Add1ArgType),
+ NEONMAP1(vcaddq_rot90_f64, arm_neon_vcadd_rot90, Add1ArgType),
NEONMAP1(vcage_v, arm_neon_vacge, 0),
NEONMAP1(vcageq_v, arm_neon_vacge, 0),
NEONMAP1(vcagt_v, arm_neon_vacgt, 0),
@@ -5582,90 +6296,96 @@ static const ARMVectorIntrinsicInfo ARMSIMDIntrinsicMap [] = {
NEONMAP1(vcnt_v, ctpop, Add1ArgType),
NEONMAP1(vcntq_v, ctpop, Add1ArgType),
NEONMAP1(vcvt_f16_f32, arm_neon_vcvtfp2hf, 0),
- NEONMAP0(vcvt_f16_v),
+ NEONMAP0(vcvt_f16_s16),
+ NEONMAP0(vcvt_f16_u16),
NEONMAP1(vcvt_f32_f16, arm_neon_vcvthf2fp, 0),
NEONMAP0(vcvt_f32_v),
- NEONMAP2(vcvt_n_f16_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
+ NEONMAP1(vcvt_n_f16_s16, arm_neon_vcvtfxs2fp, 0),
+ NEONMAP1(vcvt_n_f16_u16, arm_neon_vcvtfxu2fp, 0),
NEONMAP2(vcvt_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
- NEONMAP1(vcvt_n_s16_v, arm_neon_vcvtfp2fxs, 0),
+ NEONMAP1(vcvt_n_s16_f16, arm_neon_vcvtfp2fxs, 0),
NEONMAP1(vcvt_n_s32_v, arm_neon_vcvtfp2fxs, 0),
NEONMAP1(vcvt_n_s64_v, arm_neon_vcvtfp2fxs, 0),
- NEONMAP1(vcvt_n_u16_v, arm_neon_vcvtfp2fxu, 0),
+ NEONMAP1(vcvt_n_u16_f16, arm_neon_vcvtfp2fxu, 0),
NEONMAP1(vcvt_n_u32_v, arm_neon_vcvtfp2fxu, 0),
NEONMAP1(vcvt_n_u64_v, arm_neon_vcvtfp2fxu, 0),
- NEONMAP0(vcvt_s16_v),
+ NEONMAP0(vcvt_s16_f16),
NEONMAP0(vcvt_s32_v),
NEONMAP0(vcvt_s64_v),
- NEONMAP0(vcvt_u16_v),
+ NEONMAP0(vcvt_u16_f16),
NEONMAP0(vcvt_u32_v),
NEONMAP0(vcvt_u64_v),
- NEONMAP1(vcvta_s16_v, arm_neon_vcvtas, 0),
+ NEONMAP1(vcvta_s16_f16, arm_neon_vcvtas, 0),
NEONMAP1(vcvta_s32_v, arm_neon_vcvtas, 0),
NEONMAP1(vcvta_s64_v, arm_neon_vcvtas, 0),
- NEONMAP1(vcvta_u16_v, arm_neon_vcvtau, 0),
+ NEONMAP1(vcvta_u16_f16, arm_neon_vcvtau, 0),
NEONMAP1(vcvta_u32_v, arm_neon_vcvtau, 0),
NEONMAP1(vcvta_u64_v, arm_neon_vcvtau, 0),
- NEONMAP1(vcvtaq_s16_v, arm_neon_vcvtas, 0),
+ NEONMAP1(vcvtaq_s16_f16, arm_neon_vcvtas, 0),
NEONMAP1(vcvtaq_s32_v, arm_neon_vcvtas, 0),
NEONMAP1(vcvtaq_s64_v, arm_neon_vcvtas, 0),
- NEONMAP1(vcvtaq_u16_v, arm_neon_vcvtau, 0),
+ NEONMAP1(vcvtaq_u16_f16, arm_neon_vcvtau, 0),
NEONMAP1(vcvtaq_u32_v, arm_neon_vcvtau, 0),
NEONMAP1(vcvtaq_u64_v, arm_neon_vcvtau, 0),
NEONMAP1(vcvth_bf16_f32, arm_neon_vcvtbfp2bf, 0),
- NEONMAP1(vcvtm_s16_v, arm_neon_vcvtms, 0),
+ NEONMAP1(vcvtm_s16_f16, arm_neon_vcvtms, 0),
NEONMAP1(vcvtm_s32_v, arm_neon_vcvtms, 0),
NEONMAP1(vcvtm_s64_v, arm_neon_vcvtms, 0),
- NEONMAP1(vcvtm_u16_v, arm_neon_vcvtmu, 0),
+ NEONMAP1(vcvtm_u16_f16, arm_neon_vcvtmu, 0),
NEONMAP1(vcvtm_u32_v, arm_neon_vcvtmu, 0),
NEONMAP1(vcvtm_u64_v, arm_neon_vcvtmu, 0),
- NEONMAP1(vcvtmq_s16_v, arm_neon_vcvtms, 0),
+ NEONMAP1(vcvtmq_s16_f16, arm_neon_vcvtms, 0),
NEONMAP1(vcvtmq_s32_v, arm_neon_vcvtms, 0),
NEONMAP1(vcvtmq_s64_v, arm_neon_vcvtms, 0),
- NEONMAP1(vcvtmq_u16_v, arm_neon_vcvtmu, 0),
+ NEONMAP1(vcvtmq_u16_f16, arm_neon_vcvtmu, 0),
NEONMAP1(vcvtmq_u32_v, arm_neon_vcvtmu, 0),
NEONMAP1(vcvtmq_u64_v, arm_neon_vcvtmu, 0),
- NEONMAP1(vcvtn_s16_v, arm_neon_vcvtns, 0),
+ NEONMAP1(vcvtn_s16_f16, arm_neon_vcvtns, 0),
NEONMAP1(vcvtn_s32_v, arm_neon_vcvtns, 0),
NEONMAP1(vcvtn_s64_v, arm_neon_vcvtns, 0),
- NEONMAP1(vcvtn_u16_v, arm_neon_vcvtnu, 0),
+ NEONMAP1(vcvtn_u16_f16, arm_neon_vcvtnu, 0),
NEONMAP1(vcvtn_u32_v, arm_neon_vcvtnu, 0),
NEONMAP1(vcvtn_u64_v, arm_neon_vcvtnu, 0),
- NEONMAP1(vcvtnq_s16_v, arm_neon_vcvtns, 0),
+ NEONMAP1(vcvtnq_s16_f16, arm_neon_vcvtns, 0),
NEONMAP1(vcvtnq_s32_v, arm_neon_vcvtns, 0),
NEONMAP1(vcvtnq_s64_v, arm_neon_vcvtns, 0),
- NEONMAP1(vcvtnq_u16_v, arm_neon_vcvtnu, 0),
+ NEONMAP1(vcvtnq_u16_f16, arm_neon_vcvtnu, 0),
NEONMAP1(vcvtnq_u32_v, arm_neon_vcvtnu, 0),
NEONMAP1(vcvtnq_u64_v, arm_neon_vcvtnu, 0),
- NEONMAP1(vcvtp_s16_v, arm_neon_vcvtps, 0),
+ NEONMAP1(vcvtp_s16_f16, arm_neon_vcvtps, 0),
NEONMAP1(vcvtp_s32_v, arm_neon_vcvtps, 0),
NEONMAP1(vcvtp_s64_v, arm_neon_vcvtps, 0),
- NEONMAP1(vcvtp_u16_v, arm_neon_vcvtpu, 0),
+ NEONMAP1(vcvtp_u16_f16, arm_neon_vcvtpu, 0),
NEONMAP1(vcvtp_u32_v, arm_neon_vcvtpu, 0),
NEONMAP1(vcvtp_u64_v, arm_neon_vcvtpu, 0),
- NEONMAP1(vcvtpq_s16_v, arm_neon_vcvtps, 0),
+ NEONMAP1(vcvtpq_s16_f16, arm_neon_vcvtps, 0),
NEONMAP1(vcvtpq_s32_v, arm_neon_vcvtps, 0),
NEONMAP1(vcvtpq_s64_v, arm_neon_vcvtps, 0),
- NEONMAP1(vcvtpq_u16_v, arm_neon_vcvtpu, 0),
+ NEONMAP1(vcvtpq_u16_f16, arm_neon_vcvtpu, 0),
NEONMAP1(vcvtpq_u32_v, arm_neon_vcvtpu, 0),
NEONMAP1(vcvtpq_u64_v, arm_neon_vcvtpu, 0),
- NEONMAP0(vcvtq_f16_v),
+ NEONMAP0(vcvtq_f16_s16),
+ NEONMAP0(vcvtq_f16_u16),
NEONMAP0(vcvtq_f32_v),
- NEONMAP2(vcvtq_n_f16_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
+ NEONMAP1(vcvtq_n_f16_s16, arm_neon_vcvtfxs2fp, 0),
+ NEONMAP1(vcvtq_n_f16_u16, arm_neon_vcvtfxu2fp, 0),
NEONMAP2(vcvtq_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
- NEONMAP1(vcvtq_n_s16_v, arm_neon_vcvtfp2fxs, 0),
+ NEONMAP1(vcvtq_n_s16_f16, arm_neon_vcvtfp2fxs, 0),
NEONMAP1(vcvtq_n_s32_v, arm_neon_vcvtfp2fxs, 0),
NEONMAP1(vcvtq_n_s64_v, arm_neon_vcvtfp2fxs, 0),
- NEONMAP1(vcvtq_n_u16_v, arm_neon_vcvtfp2fxu, 0),
+ NEONMAP1(vcvtq_n_u16_f16, arm_neon_vcvtfp2fxu, 0),
NEONMAP1(vcvtq_n_u32_v, arm_neon_vcvtfp2fxu, 0),
NEONMAP1(vcvtq_n_u64_v, arm_neon_vcvtfp2fxu, 0),
- NEONMAP0(vcvtq_s16_v),
+ NEONMAP0(vcvtq_s16_f16),
NEONMAP0(vcvtq_s32_v),
NEONMAP0(vcvtq_s64_v),
- NEONMAP0(vcvtq_u16_v),
+ NEONMAP0(vcvtq_u16_f16),
NEONMAP0(vcvtq_u32_v),
NEONMAP0(vcvtq_u64_v),
- NEONMAP2(vdot_v, arm_neon_udot, arm_neon_sdot, 0),
- NEONMAP2(vdotq_v, arm_neon_udot, arm_neon_sdot, 0),
+ NEONMAP1(vdot_s32, arm_neon_sdot, 0),
+ NEONMAP1(vdot_u32, arm_neon_udot, 0),
+ NEONMAP1(vdotq_s32, arm_neon_sdot, 0),
+ NEONMAP1(vdotq_u32, arm_neon_udot, 0),
NEONMAP0(vext_v),
NEONMAP0(vextq_v),
NEONMAP0(vfma_v),
@@ -5710,7 +6430,8 @@ static const ARMVectorIntrinsicInfo ARMSIMDIntrinsicMap [] = {
NEONMAP1(vminnm_v, arm_neon_vminnm, Add1ArgType),
NEONMAP1(vminnmq_v, arm_neon_vminnm, Add1ArgType),
NEONMAP2(vminq_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
- NEONMAP2(vmmlaq_v, arm_neon_ummla, arm_neon_smmla, 0),
+ NEONMAP1(vmmlaq_s32, arm_neon_smmla, 0),
+ NEONMAP1(vmmlaq_u32, arm_neon_ummla, 0),
NEONMAP0(vmovl_v),
NEONMAP0(vmovn_v),
NEONMAP1(vmul_v, arm_neon_vmulp, Add1ArgType),
@@ -5737,6 +6458,14 @@ static const ARMVectorIntrinsicInfo ARMSIMDIntrinsicMap [] = {
NEONMAP1(vqmovun_v, arm_neon_vqmovnsu, Add1ArgType),
NEONMAP1(vqneg_v, arm_neon_vqneg, Add1ArgType),
NEONMAP1(vqnegq_v, arm_neon_vqneg, Add1ArgType),
+ NEONMAP1(vqrdmlah_s16, arm_neon_vqrdmlah, Add1ArgType),
+ NEONMAP1(vqrdmlah_s32, arm_neon_vqrdmlah, Add1ArgType),
+ NEONMAP1(vqrdmlahq_s16, arm_neon_vqrdmlah, Add1ArgType),
+ NEONMAP1(vqrdmlahq_s32, arm_neon_vqrdmlah, Add1ArgType),
+ NEONMAP1(vqrdmlsh_s16, arm_neon_vqrdmlsh, Add1ArgType),
+ NEONMAP1(vqrdmlsh_s32, arm_neon_vqrdmlsh, Add1ArgType),
+ NEONMAP1(vqrdmlshq_s16, arm_neon_vqrdmlsh, Add1ArgType),
+ NEONMAP1(vqrdmlshq_s32, arm_neon_vqrdmlsh, Add1ArgType),
NEONMAP1(vqrdmulh_v, arm_neon_vqrdmulh, Add1ArgType),
NEONMAP1(vqrdmulhq_v, arm_neon_vqrdmulh, Add1ArgType),
NEONMAP2(vqrshl_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts),
@@ -5779,12 +6508,12 @@ static const ARMVectorIntrinsicInfo ARMSIMDIntrinsicMap [] = {
NEONMAP1(vrsqrts_v, arm_neon_vrsqrts, Add1ArgType),
NEONMAP1(vrsqrtsq_v, arm_neon_vrsqrts, Add1ArgType),
NEONMAP1(vrsubhn_v, arm_neon_vrsubhn, Add1ArgType),
- NEONMAP1(vsha1su0q_v, arm_neon_sha1su0, 0),
- NEONMAP1(vsha1su1q_v, arm_neon_sha1su1, 0),
- NEONMAP1(vsha256h2q_v, arm_neon_sha256h2, 0),
- NEONMAP1(vsha256hq_v, arm_neon_sha256h, 0),
- NEONMAP1(vsha256su0q_v, arm_neon_sha256su0, 0),
- NEONMAP1(vsha256su1q_v, arm_neon_sha256su1, 0),
+ NEONMAP1(vsha1su0q_u32, arm_neon_sha1su0, 0),
+ NEONMAP1(vsha1su1q_u32, arm_neon_sha1su1, 0),
+ NEONMAP1(vsha256h2q_u32, arm_neon_sha256h2, 0),
+ NEONMAP1(vsha256hq_u32, arm_neon_sha256h, 0),
+ NEONMAP1(vsha256su0q_u32, arm_neon_sha256su0, 0),
+ NEONMAP1(vsha256su1q_u32, arm_neon_sha256su1, 0),
NEONMAP0(vshl_n_v),
NEONMAP2(vshl_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts),
NEONMAP0(vshll_n_v),
@@ -5818,9 +6547,9 @@ static const ARMVectorIntrinsicInfo ARMSIMDIntrinsicMap [] = {
NEONMAP0(vtrnq_v),
NEONMAP0(vtst_v),
NEONMAP0(vtstq_v),
- NEONMAP1(vusdot_v, arm_neon_usdot, 0),
- NEONMAP1(vusdotq_v, arm_neon_usdot, 0),
- NEONMAP1(vusmmlaq_v, arm_neon_usmmla, 0),
+ NEONMAP1(vusdot_s32, arm_neon_usdot, 0),
+ NEONMAP1(vusdotq_s32, arm_neon_usdot, 0),
+ NEONMAP1(vusmmlaq_s32, arm_neon_usmmla, 0),
NEONMAP0(vuzp_v),
NEONMAP0(vuzpq_v),
NEONMAP0(vzip_v),
@@ -5828,7 +6557,7 @@ static const ARMVectorIntrinsicInfo ARMSIMDIntrinsicMap [] = {
};
static const ARMVectorIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
- NEONMAP1(__a64_vcvtq_low_bf16_v, aarch64_neon_bfcvtn, 0),
+ NEONMAP1(__a64_vcvtq_low_bf16_f32, aarch64_neon_bfcvtn, 0),
NEONMAP0(splat_lane_v),
NEONMAP0(splat_laneq_v),
NEONMAP0(splatq_lane_v),
@@ -5839,20 +6568,33 @@ static const ARMVectorIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
NEONMAP0(vaddhn_v),
NEONMAP0(vaddq_p128),
NEONMAP0(vaddq_v),
- NEONMAP1(vaesdq_v, aarch64_crypto_aesd, 0),
- NEONMAP1(vaeseq_v, aarch64_crypto_aese, 0),
- NEONMAP1(vaesimcq_v, aarch64_crypto_aesimc, 0),
- NEONMAP1(vaesmcq_v, aarch64_crypto_aesmc, 0),
- NEONMAP2(vbcaxq_v, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, Add1ArgType | UnsignedAlts),
- NEONMAP1(vbfdot_v, aarch64_neon_bfdot, 0),
- NEONMAP1(vbfdotq_v, aarch64_neon_bfdot, 0),
- NEONMAP1(vbfmlalbq_v, aarch64_neon_bfmlalb, 0),
- NEONMAP1(vbfmlaltq_v, aarch64_neon_bfmlalt, 0),
- NEONMAP1(vbfmmlaq_v, aarch64_neon_bfmmla, 0),
- NEONMAP1(vcadd_rot270_v, aarch64_neon_vcadd_rot270, Add1ArgType),
- NEONMAP1(vcadd_rot90_v, aarch64_neon_vcadd_rot90, Add1ArgType),
- NEONMAP1(vcaddq_rot270_v, aarch64_neon_vcadd_rot270, Add1ArgType),
- NEONMAP1(vcaddq_rot90_v, aarch64_neon_vcadd_rot90, Add1ArgType),
+ NEONMAP1(vaesdq_u8, aarch64_crypto_aesd, 0),
+ NEONMAP1(vaeseq_u8, aarch64_crypto_aese, 0),
+ NEONMAP1(vaesimcq_u8, aarch64_crypto_aesimc, 0),
+ NEONMAP1(vaesmcq_u8, aarch64_crypto_aesmc, 0),
+ NEONMAP2(vbcaxq_s16, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, Add1ArgType | UnsignedAlts),
+ NEONMAP2(vbcaxq_s32, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, Add1ArgType | UnsignedAlts),
+ NEONMAP2(vbcaxq_s64, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, Add1ArgType | UnsignedAlts),
+ NEONMAP2(vbcaxq_s8, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, Add1ArgType | UnsignedAlts),
+ NEONMAP2(vbcaxq_u16, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, Add1ArgType | UnsignedAlts),
+ NEONMAP2(vbcaxq_u32, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, Add1ArgType | UnsignedAlts),
+ NEONMAP2(vbcaxq_u64, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, Add1ArgType | UnsignedAlts),
+ NEONMAP2(vbcaxq_u8, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, Add1ArgType | UnsignedAlts),
+ NEONMAP1(vbfdot_f32, aarch64_neon_bfdot, 0),
+ NEONMAP1(vbfdotq_f32, aarch64_neon_bfdot, 0),
+ NEONMAP1(vbfmlalbq_f32, aarch64_neon_bfmlalb, 0),
+ NEONMAP1(vbfmlaltq_f32, aarch64_neon_bfmlalt, 0),
+ NEONMAP1(vbfmmlaq_f32, aarch64_neon_bfmmla, 0),
+ NEONMAP1(vcadd_rot270_f16, aarch64_neon_vcadd_rot270, Add1ArgType),
+ NEONMAP1(vcadd_rot270_f32, aarch64_neon_vcadd_rot270, Add1ArgType),
+ NEONMAP1(vcadd_rot90_f16, aarch64_neon_vcadd_rot90, Add1ArgType),
+ NEONMAP1(vcadd_rot90_f32, aarch64_neon_vcadd_rot90, Add1ArgType),
+ NEONMAP1(vcaddq_rot270_f16, aarch64_neon_vcadd_rot270, Add1ArgType),
+ NEONMAP1(vcaddq_rot270_f32, aarch64_neon_vcadd_rot270, Add1ArgType),
+ NEONMAP1(vcaddq_rot270_f64, aarch64_neon_vcadd_rot270, Add1ArgType),
+ NEONMAP1(vcaddq_rot90_f16, aarch64_neon_vcadd_rot90, Add1ArgType),
+ NEONMAP1(vcaddq_rot90_f32, aarch64_neon_vcadd_rot90, Add1ArgType),
+ NEONMAP1(vcaddq_rot90_f64, aarch64_neon_vcadd_rot90, Add1ArgType),
NEONMAP1(vcage_v, aarch64_neon_facge, 0),
NEONMAP1(vcageq_v, aarch64_neon_facge, 0),
NEONMAP1(vcagt_v, aarch64_neon_facgt, 0),
@@ -5875,57 +6617,82 @@ static const ARMVectorIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
NEONMAP0(vcltzq_v),
NEONMAP1(vclz_v, ctlz, Add1ArgType),
NEONMAP1(vclzq_v, ctlz, Add1ArgType),
- NEONMAP1(vcmla_rot180_v, aarch64_neon_vcmla_rot180, Add1ArgType),
- NEONMAP1(vcmla_rot270_v, aarch64_neon_vcmla_rot270, Add1ArgType),
- NEONMAP1(vcmla_rot90_v, aarch64_neon_vcmla_rot90, Add1ArgType),
- NEONMAP1(vcmla_v, aarch64_neon_vcmla_rot0, Add1ArgType),
- NEONMAP1(vcmlaq_rot180_v, aarch64_neon_vcmla_rot180, Add1ArgType),
- NEONMAP1(vcmlaq_rot270_v, aarch64_neon_vcmla_rot270, Add1ArgType),
- NEONMAP1(vcmlaq_rot90_v, aarch64_neon_vcmla_rot90, Add1ArgType),
- NEONMAP1(vcmlaq_v, aarch64_neon_vcmla_rot0, Add1ArgType),
+ NEONMAP1(vcmla_f16, aarch64_neon_vcmla_rot0, Add1ArgType),
+ NEONMAP1(vcmla_f32, aarch64_neon_vcmla_rot0, Add1ArgType),
+ NEONMAP1(vcmla_rot180_f16, aarch64_neon_vcmla_rot180, Add1ArgType),
+ NEONMAP1(vcmla_rot180_f32, aarch64_neon_vcmla_rot180, Add1ArgType),
+ NEONMAP1(vcmla_rot270_f16, aarch64_neon_vcmla_rot270, Add1ArgType),
+ NEONMAP1(vcmla_rot270_f32, aarch64_neon_vcmla_rot270, Add1ArgType),
+ NEONMAP1(vcmla_rot90_f16, aarch64_neon_vcmla_rot90, Add1ArgType),
+ NEONMAP1(vcmla_rot90_f32, aarch64_neon_vcmla_rot90, Add1ArgType),
+ NEONMAP1(vcmlaq_f16, aarch64_neon_vcmla_rot0, Add1ArgType),
+ NEONMAP1(vcmlaq_f32, aarch64_neon_vcmla_rot0, Add1ArgType),
+ NEONMAP1(vcmlaq_f64, aarch64_neon_vcmla_rot0, Add1ArgType),
+ NEONMAP1(vcmlaq_rot180_f16, aarch64_neon_vcmla_rot180, Add1ArgType),
+ NEONMAP1(vcmlaq_rot180_f32, aarch64_neon_vcmla_rot180, Add1ArgType),
+ NEONMAP1(vcmlaq_rot180_f64, aarch64_neon_vcmla_rot180, Add1ArgType),
+ NEONMAP1(vcmlaq_rot270_f16, aarch64_neon_vcmla_rot270, Add1ArgType),
+ NEONMAP1(vcmlaq_rot270_f32, aarch64_neon_vcmla_rot270, Add1ArgType),
+ NEONMAP1(vcmlaq_rot270_f64, aarch64_neon_vcmla_rot270, Add1ArgType),
+ NEONMAP1(vcmlaq_rot90_f16, aarch64_neon_vcmla_rot90, Add1ArgType),
+ NEONMAP1(vcmlaq_rot90_f32, aarch64_neon_vcmla_rot90, Add1ArgType),
+ NEONMAP1(vcmlaq_rot90_f64, aarch64_neon_vcmla_rot90, Add1ArgType),
NEONMAP1(vcnt_v, ctpop, Add1ArgType),
NEONMAP1(vcntq_v, ctpop, Add1ArgType),
NEONMAP1(vcvt_f16_f32, aarch64_neon_vcvtfp2hf, 0),
- NEONMAP0(vcvt_f16_v),
+ NEONMAP0(vcvt_f16_s16),
+ NEONMAP0(vcvt_f16_u16),
NEONMAP1(vcvt_f32_f16, aarch64_neon_vcvthf2fp, 0),
NEONMAP0(vcvt_f32_v),
- NEONMAP2(vcvt_n_f16_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
+ NEONMAP1(vcvt_n_f16_s16, aarch64_neon_vcvtfxs2fp, 0),
+ NEONMAP1(vcvt_n_f16_u16, aarch64_neon_vcvtfxu2fp, 0),
NEONMAP2(vcvt_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
NEONMAP2(vcvt_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
- NEONMAP1(vcvt_n_s16_v, aarch64_neon_vcvtfp2fxs, 0),
+ NEONMAP1(vcvt_n_s16_f16, aarch64_neon_vcvtfp2fxs, 0),
NEONMAP1(vcvt_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
NEONMAP1(vcvt_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
- NEONMAP1(vcvt_n_u16_v, aarch64_neon_vcvtfp2fxu, 0),
+ NEONMAP1(vcvt_n_u16_f16, aarch64_neon_vcvtfp2fxu, 0),
NEONMAP1(vcvt_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
NEONMAP1(vcvt_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
- NEONMAP0(vcvtq_f16_v),
+ NEONMAP0(vcvtq_f16_s16),
+ NEONMAP0(vcvtq_f16_u16),
NEONMAP0(vcvtq_f32_v),
- NEONMAP1(vcvtq_high_bf16_v, aarch64_neon_bfcvtn2, 0),
- NEONMAP2(vcvtq_n_f16_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
+ NEONMAP1(vcvtq_high_bf16_f32, aarch64_neon_bfcvtn2, 0),
+ NEONMAP1(vcvtq_n_f16_s16, aarch64_neon_vcvtfxs2fp, 0),
+ NEONMAP1(vcvtq_n_f16_u16, aarch64_neon_vcvtfxu2fp, 0),
NEONMAP2(vcvtq_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
NEONMAP2(vcvtq_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
- NEONMAP1(vcvtq_n_s16_v, aarch64_neon_vcvtfp2fxs, 0),
+ NEONMAP1(vcvtq_n_s16_f16, aarch64_neon_vcvtfp2fxs, 0),
NEONMAP1(vcvtq_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
NEONMAP1(vcvtq_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
- NEONMAP1(vcvtq_n_u16_v, aarch64_neon_vcvtfp2fxu, 0),
+ NEONMAP1(vcvtq_n_u16_f16, aarch64_neon_vcvtfp2fxu, 0),
NEONMAP1(vcvtq_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
NEONMAP1(vcvtq_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
NEONMAP1(vcvtx_f32_v, aarch64_neon_fcvtxn, AddRetType | Add1ArgType),
- NEONMAP2(vdot_v, aarch64_neon_udot, aarch64_neon_sdot, 0),
- NEONMAP2(vdotq_v, aarch64_neon_udot, aarch64_neon_sdot, 0),
- NEONMAP2(veor3q_v, aarch64_crypto_eor3u, aarch64_crypto_eor3s, Add1ArgType | UnsignedAlts),
+ NEONMAP1(vdot_s32, aarch64_neon_sdot, 0),
+ NEONMAP1(vdot_u32, aarch64_neon_udot, 0),
+ NEONMAP1(vdotq_s32, aarch64_neon_sdot, 0),
+ NEONMAP1(vdotq_u32, aarch64_neon_udot, 0),
+ NEONMAP2(veor3q_s16, aarch64_crypto_eor3u, aarch64_crypto_eor3s, Add1ArgType | UnsignedAlts),
+ NEONMAP2(veor3q_s32, aarch64_crypto_eor3u, aarch64_crypto_eor3s, Add1ArgType | UnsignedAlts),
+ NEONMAP2(veor3q_s64, aarch64_crypto_eor3u, aarch64_crypto_eor3s, Add1ArgType | UnsignedAlts),
+ NEONMAP2(veor3q_s8, aarch64_crypto_eor3u, aarch64_crypto_eor3s, Add1ArgType | UnsignedAlts),
+ NEONMAP2(veor3q_u16, aarch64_crypto_eor3u, aarch64_crypto_eor3s, Add1ArgType | UnsignedAlts),
+ NEONMAP2(veor3q_u32, aarch64_crypto_eor3u, aarch64_crypto_eor3s, Add1ArgType | UnsignedAlts),
+ NEONMAP2(veor3q_u64, aarch64_crypto_eor3u, aarch64_crypto_eor3s, Add1ArgType | UnsignedAlts),
+ NEONMAP2(veor3q_u8, aarch64_crypto_eor3u, aarch64_crypto_eor3s, Add1ArgType | UnsignedAlts),
NEONMAP0(vext_v),
NEONMAP0(vextq_v),
NEONMAP0(vfma_v),
NEONMAP0(vfmaq_v),
- NEONMAP1(vfmlal_high_v, aarch64_neon_fmlal2, 0),
- NEONMAP1(vfmlal_low_v, aarch64_neon_fmlal, 0),
- NEONMAP1(vfmlalq_high_v, aarch64_neon_fmlal2, 0),
- NEONMAP1(vfmlalq_low_v, aarch64_neon_fmlal, 0),
- NEONMAP1(vfmlsl_high_v, aarch64_neon_fmlsl2, 0),
- NEONMAP1(vfmlsl_low_v, aarch64_neon_fmlsl, 0),
- NEONMAP1(vfmlslq_high_v, aarch64_neon_fmlsl2, 0),
- NEONMAP1(vfmlslq_low_v, aarch64_neon_fmlsl, 0),
+ NEONMAP1(vfmlal_high_f16, aarch64_neon_fmlal2, 0),
+ NEONMAP1(vfmlal_low_f16, aarch64_neon_fmlal, 0),
+ NEONMAP1(vfmlalq_high_f16, aarch64_neon_fmlal2, 0),
+ NEONMAP1(vfmlalq_low_f16, aarch64_neon_fmlal, 0),
+ NEONMAP1(vfmlsl_high_f16, aarch64_neon_fmlsl2, 0),
+ NEONMAP1(vfmlsl_low_f16, aarch64_neon_fmlsl, 0),
+ NEONMAP1(vfmlslq_high_f16, aarch64_neon_fmlsl2, 0),
+ NEONMAP1(vfmlslq_low_f16, aarch64_neon_fmlsl, 0),
NEONMAP2(vhadd_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
NEONMAP2(vhaddq_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
NEONMAP2(vhsub_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts),
@@ -5936,7 +6703,8 @@ static const ARMVectorIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
NEONMAP1(vld1q_x2_v, aarch64_neon_ld1x2, 0),
NEONMAP1(vld1q_x3_v, aarch64_neon_ld1x3, 0),
NEONMAP1(vld1q_x4_v, aarch64_neon_ld1x4, 0),
- NEONMAP2(vmmlaq_v, aarch64_neon_ummla, aarch64_neon_smmla, 0),
+ NEONMAP1(vmmlaq_s32, aarch64_neon_smmla, 0),
+ NEONMAP1(vmmlaq_u32, aarch64_neon_ummla, 0),
NEONMAP0(vmovl_v),
NEONMAP0(vmovn_v),
NEONMAP1(vmul_v, aarch64_neon_pmul, Add1ArgType),
@@ -5962,6 +6730,14 @@ static const ARMVectorIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
NEONMAP1(vqmovun_v, aarch64_neon_sqxtun, Add1ArgType),
NEONMAP1(vqneg_v, aarch64_neon_sqneg, Add1ArgType),
NEONMAP1(vqnegq_v, aarch64_neon_sqneg, Add1ArgType),
+ NEONMAP1(vqrdmlah_s16, aarch64_neon_sqrdmlah, Add1ArgType),
+ NEONMAP1(vqrdmlah_s32, aarch64_neon_sqrdmlah, Add1ArgType),
+ NEONMAP1(vqrdmlahq_s16, aarch64_neon_sqrdmlah, Add1ArgType),
+ NEONMAP1(vqrdmlahq_s32, aarch64_neon_sqrdmlah, Add1ArgType),
+ NEONMAP1(vqrdmlsh_s16, aarch64_neon_sqrdmlsh, Add1ArgType),
+ NEONMAP1(vqrdmlsh_s32, aarch64_neon_sqrdmlsh, Add1ArgType),
+ NEONMAP1(vqrdmlshq_s16, aarch64_neon_sqrdmlsh, Add1ArgType),
+ NEONMAP1(vqrdmlshq_s32, aarch64_neon_sqrdmlsh, Add1ArgType),
NEONMAP1(vqrdmulh_lane_v, aarch64_neon_sqrdmulh_lane, 0),
NEONMAP1(vqrdmulh_laneq_v, aarch64_neon_sqrdmulh_laneq, 0),
NEONMAP1(vqrdmulh_v, aarch64_neon_sqrdmulh, Add1ArgType),
@@ -5979,21 +6755,29 @@ static const ARMVectorIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
NEONMAP2(vqsub_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
NEONMAP2(vqsubq_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
NEONMAP1(vraddhn_v, aarch64_neon_raddhn, Add1ArgType),
- NEONMAP1(vrax1q_v, aarch64_crypto_rax1, 0),
+ NEONMAP1(vrax1q_u64, aarch64_crypto_rax1, 0),
NEONMAP2(vrecpe_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
NEONMAP2(vrecpeq_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
NEONMAP1(vrecps_v, aarch64_neon_frecps, Add1ArgType),
NEONMAP1(vrecpsq_v, aarch64_neon_frecps, Add1ArgType),
NEONMAP2(vrhadd_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
NEONMAP2(vrhaddq_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
- NEONMAP1(vrnd32x_v, aarch64_neon_frint32x, Add1ArgType),
- NEONMAP1(vrnd32xq_v, aarch64_neon_frint32x, Add1ArgType),
- NEONMAP1(vrnd32z_v, aarch64_neon_frint32z, Add1ArgType),
- NEONMAP1(vrnd32zq_v, aarch64_neon_frint32z, Add1ArgType),
- NEONMAP1(vrnd64x_v, aarch64_neon_frint64x, Add1ArgType),
- NEONMAP1(vrnd64xq_v, aarch64_neon_frint64x, Add1ArgType),
- NEONMAP1(vrnd64z_v, aarch64_neon_frint64z, Add1ArgType),
- NEONMAP1(vrnd64zq_v, aarch64_neon_frint64z, Add1ArgType),
+ NEONMAP1(vrnd32x_f32, aarch64_neon_frint32x, Add1ArgType),
+ NEONMAP1(vrnd32x_f64, aarch64_neon_frint32x, Add1ArgType),
+ NEONMAP1(vrnd32xq_f32, aarch64_neon_frint32x, Add1ArgType),
+ NEONMAP1(vrnd32xq_f64, aarch64_neon_frint32x, Add1ArgType),
+ NEONMAP1(vrnd32z_f32, aarch64_neon_frint32z, Add1ArgType),
+ NEONMAP1(vrnd32z_f64, aarch64_neon_frint32z, Add1ArgType),
+ NEONMAP1(vrnd32zq_f32, aarch64_neon_frint32z, Add1ArgType),
+ NEONMAP1(vrnd32zq_f64, aarch64_neon_frint32z, Add1ArgType),
+ NEONMAP1(vrnd64x_f32, aarch64_neon_frint64x, Add1ArgType),
+ NEONMAP1(vrnd64x_f64, aarch64_neon_frint64x, Add1ArgType),
+ NEONMAP1(vrnd64xq_f32, aarch64_neon_frint64x, Add1ArgType),
+ NEONMAP1(vrnd64xq_f64, aarch64_neon_frint64x, Add1ArgType),
+ NEONMAP1(vrnd64z_f32, aarch64_neon_frint64z, Add1ArgType),
+ NEONMAP1(vrnd64z_f64, aarch64_neon_frint64z, Add1ArgType),
+ NEONMAP1(vrnd64zq_f32, aarch64_neon_frint64z, Add1ArgType),
+ NEONMAP1(vrnd64zq_f64, aarch64_neon_frint64z, Add1ArgType),
NEONMAP0(vrndi_v),
NEONMAP0(vrndiq_v),
NEONMAP2(vrshl_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
@@ -6005,16 +6789,16 @@ static const ARMVectorIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
NEONMAP1(vrsqrts_v, aarch64_neon_frsqrts, Add1ArgType),
NEONMAP1(vrsqrtsq_v, aarch64_neon_frsqrts, Add1ArgType),
NEONMAP1(vrsubhn_v, aarch64_neon_rsubhn, Add1ArgType),
- NEONMAP1(vsha1su0q_v, aarch64_crypto_sha1su0, 0),
- NEONMAP1(vsha1su1q_v, aarch64_crypto_sha1su1, 0),
- NEONMAP1(vsha256h2q_v, aarch64_crypto_sha256h2, 0),
- NEONMAP1(vsha256hq_v, aarch64_crypto_sha256h, 0),
- NEONMAP1(vsha256su0q_v, aarch64_crypto_sha256su0, 0),
- NEONMAP1(vsha256su1q_v, aarch64_crypto_sha256su1, 0),
- NEONMAP1(vsha512h2q_v, aarch64_crypto_sha512h2, 0),
- NEONMAP1(vsha512hq_v, aarch64_crypto_sha512h, 0),
- NEONMAP1(vsha512su0q_v, aarch64_crypto_sha512su0, 0),
- NEONMAP1(vsha512su1q_v, aarch64_crypto_sha512su1, 0),
+ NEONMAP1(vsha1su0q_u32, aarch64_crypto_sha1su0, 0),
+ NEONMAP1(vsha1su1q_u32, aarch64_crypto_sha1su1, 0),
+ NEONMAP1(vsha256h2q_u32, aarch64_crypto_sha256h2, 0),
+ NEONMAP1(vsha256hq_u32, aarch64_crypto_sha256h, 0),
+ NEONMAP1(vsha256su0q_u32, aarch64_crypto_sha256su0, 0),
+ NEONMAP1(vsha256su1q_u32, aarch64_crypto_sha256su1, 0),
+ NEONMAP1(vsha512h2q_u64, aarch64_crypto_sha512h2, 0),
+ NEONMAP1(vsha512hq_u64, aarch64_crypto_sha512h, 0),
+ NEONMAP1(vsha512su0q_u64, aarch64_crypto_sha512su0, 0),
+ NEONMAP1(vsha512su1q_u64, aarch64_crypto_sha512su1, 0),
NEONMAP0(vshl_n_v),
NEONMAP2(vshl_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts),
NEONMAP0(vshll_n_v),
@@ -6023,15 +6807,15 @@ static const ARMVectorIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
NEONMAP0(vshr_n_v),
NEONMAP0(vshrn_n_v),
NEONMAP0(vshrq_n_v),
- NEONMAP1(vsm3partw1q_v, aarch64_crypto_sm3partw1, 0),
- NEONMAP1(vsm3partw2q_v, aarch64_crypto_sm3partw2, 0),
- NEONMAP1(vsm3ss1q_v, aarch64_crypto_sm3ss1, 0),
- NEONMAP1(vsm3tt1aq_v, aarch64_crypto_sm3tt1a, 0),
- NEONMAP1(vsm3tt1bq_v, aarch64_crypto_sm3tt1b, 0),
- NEONMAP1(vsm3tt2aq_v, aarch64_crypto_sm3tt2a, 0),
- NEONMAP1(vsm3tt2bq_v, aarch64_crypto_sm3tt2b, 0),
- NEONMAP1(vsm4ekeyq_v, aarch64_crypto_sm4ekey, 0),
- NEONMAP1(vsm4eq_v, aarch64_crypto_sm4e, 0),
+ NEONMAP1(vsm3partw1q_u32, aarch64_crypto_sm3partw1, 0),
+ NEONMAP1(vsm3partw2q_u32, aarch64_crypto_sm3partw2, 0),
+ NEONMAP1(vsm3ss1q_u32, aarch64_crypto_sm3ss1, 0),
+ NEONMAP1(vsm3tt1aq_u32, aarch64_crypto_sm3tt1a, 0),
+ NEONMAP1(vsm3tt1bq_u32, aarch64_crypto_sm3tt1b, 0),
+ NEONMAP1(vsm3tt2aq_u32, aarch64_crypto_sm3tt2a, 0),
+ NEONMAP1(vsm3tt2bq_u32, aarch64_crypto_sm3tt2b, 0),
+ NEONMAP1(vsm4ekeyq_u32, aarch64_crypto_sm4ekey, 0),
+ NEONMAP1(vsm4eq_u32, aarch64_crypto_sm4e, 0),
NEONMAP1(vst1_x2_v, aarch64_neon_st1x2, 0),
NEONMAP1(vst1_x3_v, aarch64_neon_st1x3, 0),
NEONMAP1(vst1_x4_v, aarch64_neon_st1x4, 0),
@@ -6041,10 +6825,10 @@ static const ARMVectorIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
NEONMAP0(vsubhn_v),
NEONMAP0(vtst_v),
NEONMAP0(vtstq_v),
- NEONMAP1(vusdot_v, aarch64_neon_usdot, 0),
- NEONMAP1(vusdotq_v, aarch64_neon_usdot, 0),
- NEONMAP1(vusmmlaq_v, aarch64_neon_usmmla, 0),
- NEONMAP1(vxarq_v, aarch64_crypto_xar, 0),
+ NEONMAP1(vusdot_s32, aarch64_neon_usdot, 0),
+ NEONMAP1(vusdotq_s32, aarch64_neon_usdot, 0),
+ NEONMAP1(vusmmlaq_s32, aarch64_neon_usmmla, 0),
+ NEONMAP1(vxarq_u64, aarch64_crypto_xar, 0),
};
static const ARMVectorIntrinsicInfo AArch64SISDIntrinsicMap[] = {
@@ -6164,6 +6948,10 @@ static const ARMVectorIntrinsicInfo AArch64SISDIntrinsicMap[] = {
NEONMAP1(vqnegd_s64, aarch64_neon_sqneg, Add1ArgType),
NEONMAP1(vqnegh_s16, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqnegs_s32, aarch64_neon_sqneg, Add1ArgType),
+ NEONMAP1(vqrdmlahh_s16, aarch64_neon_sqrdmlah, Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqrdmlahs_s32, aarch64_neon_sqrdmlah, Add1ArgType),
+ NEONMAP1(vqrdmlshh_s16, aarch64_neon_sqrdmlsh, Vectorize1ArgType | Use64BitVectors),
+ NEONMAP1(vqrdmlshs_s32, aarch64_neon_sqrdmlsh, Add1ArgType),
NEONMAP1(vqrdmulhh_s16, aarch64_neon_sqrdmulh, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqrdmulhs_s32, aarch64_neon_sqrdmulh, Add1ArgType),
NEONMAP1(vqrshlb_s8, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
@@ -6282,6 +7070,163 @@ static const ARMVectorIntrinsicInfo AArch64SISDIntrinsicMap[] = {
NEONMAP1(vrsqrtsh_f16, aarch64_neon_frsqrts, Add1ArgType),
};
+// Some intrinsics are equivalent for codegen.
+static const std::pair<unsigned, unsigned> NEONEquivalentIntrinsicMap[] = {
+ { NEON::BI__builtin_neon_splat_lane_bf16, NEON::BI__builtin_neon_splat_lane_v, },
+ { NEON::BI__builtin_neon_splat_laneq_bf16, NEON::BI__builtin_neon_splat_laneq_v, },
+ { NEON::BI__builtin_neon_splatq_lane_bf16, NEON::BI__builtin_neon_splatq_lane_v, },
+ { NEON::BI__builtin_neon_splatq_laneq_bf16, NEON::BI__builtin_neon_splatq_laneq_v, },
+ { NEON::BI__builtin_neon_vabd_f16, NEON::BI__builtin_neon_vabd_v, },
+ { NEON::BI__builtin_neon_vabdq_f16, NEON::BI__builtin_neon_vabdq_v, },
+ { NEON::BI__builtin_neon_vabs_f16, NEON::BI__builtin_neon_vabs_v, },
+ { NEON::BI__builtin_neon_vabsq_f16, NEON::BI__builtin_neon_vabsq_v, },
+ { NEON::BI__builtin_neon_vbsl_f16, NEON::BI__builtin_neon_vbsl_v, },
+ { NEON::BI__builtin_neon_vbslq_f16, NEON::BI__builtin_neon_vbslq_v, },
+ { NEON::BI__builtin_neon_vcage_f16, NEON::BI__builtin_neon_vcage_v, },
+ { NEON::BI__builtin_neon_vcageq_f16, NEON::BI__builtin_neon_vcageq_v, },
+ { NEON::BI__builtin_neon_vcagt_f16, NEON::BI__builtin_neon_vcagt_v, },
+ { NEON::BI__builtin_neon_vcagtq_f16, NEON::BI__builtin_neon_vcagtq_v, },
+ { NEON::BI__builtin_neon_vcale_f16, NEON::BI__builtin_neon_vcale_v, },
+ { NEON::BI__builtin_neon_vcaleq_f16, NEON::BI__builtin_neon_vcaleq_v, },
+ { NEON::BI__builtin_neon_vcalt_f16, NEON::BI__builtin_neon_vcalt_v, },
+ { NEON::BI__builtin_neon_vcaltq_f16, NEON::BI__builtin_neon_vcaltq_v, },
+ { NEON::BI__builtin_neon_vceqz_f16, NEON::BI__builtin_neon_vceqz_v, },
+ { NEON::BI__builtin_neon_vceqzq_f16, NEON::BI__builtin_neon_vceqzq_v, },
+ { NEON::BI__builtin_neon_vcgez_f16, NEON::BI__builtin_neon_vcgez_v, },
+ { NEON::BI__builtin_neon_vcgezq_f16, NEON::BI__builtin_neon_vcgezq_v, },
+ { NEON::BI__builtin_neon_vcgtz_f16, NEON::BI__builtin_neon_vcgtz_v, },
+ { NEON::BI__builtin_neon_vcgtzq_f16, NEON::BI__builtin_neon_vcgtzq_v, },
+ { NEON::BI__builtin_neon_vclez_f16, NEON::BI__builtin_neon_vclez_v, },
+ { NEON::BI__builtin_neon_vclezq_f16, NEON::BI__builtin_neon_vclezq_v, },
+ { NEON::BI__builtin_neon_vcltz_f16, NEON::BI__builtin_neon_vcltz_v, },
+ { NEON::BI__builtin_neon_vcltzq_f16, NEON::BI__builtin_neon_vcltzq_v, },
+ { NEON::BI__builtin_neon_vext_f16, NEON::BI__builtin_neon_vext_v, },
+ { NEON::BI__builtin_neon_vextq_f16, NEON::BI__builtin_neon_vextq_v, },
+ { NEON::BI__builtin_neon_vfma_f16, NEON::BI__builtin_neon_vfma_v, },
+ { NEON::BI__builtin_neon_vfma_lane_f16, NEON::BI__builtin_neon_vfma_lane_v, },
+ { NEON::BI__builtin_neon_vfma_laneq_f16, NEON::BI__builtin_neon_vfma_laneq_v, },
+ { NEON::BI__builtin_neon_vfmaq_f16, NEON::BI__builtin_neon_vfmaq_v, },
+ { NEON::BI__builtin_neon_vfmaq_lane_f16, NEON::BI__builtin_neon_vfmaq_lane_v, },
+ { NEON::BI__builtin_neon_vfmaq_laneq_f16, NEON::BI__builtin_neon_vfmaq_laneq_v, },
+ { NEON::BI__builtin_neon_vld1_bf16_x2, NEON::BI__builtin_neon_vld1_x2_v },
+ { NEON::BI__builtin_neon_vld1_bf16_x3, NEON::BI__builtin_neon_vld1_x3_v },
+ { NEON::BI__builtin_neon_vld1_bf16_x4, NEON::BI__builtin_neon_vld1_x4_v },
+ { NEON::BI__builtin_neon_vld1_bf16, NEON::BI__builtin_neon_vld1_v },
+ { NEON::BI__builtin_neon_vld1_dup_bf16, NEON::BI__builtin_neon_vld1_dup_v },
+ { NEON::BI__builtin_neon_vld1_lane_bf16, NEON::BI__builtin_neon_vld1_lane_v },
+ { NEON::BI__builtin_neon_vld1q_bf16_x2, NEON::BI__builtin_neon_vld1q_x2_v },
+ { NEON::BI__builtin_neon_vld1q_bf16_x3, NEON::BI__builtin_neon_vld1q_x3_v },
+ { NEON::BI__builtin_neon_vld1q_bf16_x4, NEON::BI__builtin_neon_vld1q_x4_v },
+ { NEON::BI__builtin_neon_vld1q_bf16, NEON::BI__builtin_neon_vld1q_v },
+ { NEON::BI__builtin_neon_vld1q_dup_bf16, NEON::BI__builtin_neon_vld1q_dup_v },
+ { NEON::BI__builtin_neon_vld1q_lane_bf16, NEON::BI__builtin_neon_vld1q_lane_v },
+ { NEON::BI__builtin_neon_vld2_bf16, NEON::BI__builtin_neon_vld2_v },
+ { NEON::BI__builtin_neon_vld2_dup_bf16, NEON::BI__builtin_neon_vld2_dup_v },
+ { NEON::BI__builtin_neon_vld2_lane_bf16, NEON::BI__builtin_neon_vld2_lane_v },
+ { NEON::BI__builtin_neon_vld2q_bf16, NEON::BI__builtin_neon_vld2q_v },
+ { NEON::BI__builtin_neon_vld2q_dup_bf16, NEON::BI__builtin_neon_vld2q_dup_v },
+ { NEON::BI__builtin_neon_vld2q_lane_bf16, NEON::BI__builtin_neon_vld2q_lane_v },
+ { NEON::BI__builtin_neon_vld3_bf16, NEON::BI__builtin_neon_vld3_v },
+ { NEON::BI__builtin_neon_vld3_dup_bf16, NEON::BI__builtin_neon_vld3_dup_v },
+ { NEON::BI__builtin_neon_vld3_lane_bf16, NEON::BI__builtin_neon_vld3_lane_v },
+ { NEON::BI__builtin_neon_vld3q_bf16, NEON::BI__builtin_neon_vld3q_v },
+ { NEON::BI__builtin_neon_vld3q_dup_bf16, NEON::BI__builtin_neon_vld3q_dup_v },
+ { NEON::BI__builtin_neon_vld3q_lane_bf16, NEON::BI__builtin_neon_vld3q_lane_v },
+ { NEON::BI__builtin_neon_vld4_bf16, NEON::BI__builtin_neon_vld4_v },
+ { NEON::BI__builtin_neon_vld4_dup_bf16, NEON::BI__builtin_neon_vld4_dup_v },
+ { NEON::BI__builtin_neon_vld4_lane_bf16, NEON::BI__builtin_neon_vld4_lane_v },
+ { NEON::BI__builtin_neon_vld4q_bf16, NEON::BI__builtin_neon_vld4q_v },
+ { NEON::BI__builtin_neon_vld4q_dup_bf16, NEON::BI__builtin_neon_vld4q_dup_v },
+ { NEON::BI__builtin_neon_vld4q_lane_bf16, NEON::BI__builtin_neon_vld4q_lane_v },
+ { NEON::BI__builtin_neon_vmax_f16, NEON::BI__builtin_neon_vmax_v, },
+ { NEON::BI__builtin_neon_vmaxnm_f16, NEON::BI__builtin_neon_vmaxnm_v, },
+ { NEON::BI__builtin_neon_vmaxnmq_f16, NEON::BI__builtin_neon_vmaxnmq_v, },
+ { NEON::BI__builtin_neon_vmaxq_f16, NEON::BI__builtin_neon_vmaxq_v, },
+ { NEON::BI__builtin_neon_vmin_f16, NEON::BI__builtin_neon_vmin_v, },
+ { NEON::BI__builtin_neon_vminnm_f16, NEON::BI__builtin_neon_vminnm_v, },
+ { NEON::BI__builtin_neon_vminnmq_f16, NEON::BI__builtin_neon_vminnmq_v, },
+ { NEON::BI__builtin_neon_vminq_f16, NEON::BI__builtin_neon_vminq_v, },
+ { NEON::BI__builtin_neon_vmulx_f16, NEON::BI__builtin_neon_vmulx_v, },
+ { NEON::BI__builtin_neon_vmulxq_f16, NEON::BI__builtin_neon_vmulxq_v, },
+ { NEON::BI__builtin_neon_vpadd_f16, NEON::BI__builtin_neon_vpadd_v, },
+ { NEON::BI__builtin_neon_vpaddq_f16, NEON::BI__builtin_neon_vpaddq_v, },
+ { NEON::BI__builtin_neon_vpmax_f16, NEON::BI__builtin_neon_vpmax_v, },
+ { NEON::BI__builtin_neon_vpmaxnm_f16, NEON::BI__builtin_neon_vpmaxnm_v, },
+ { NEON::BI__builtin_neon_vpmaxnmq_f16, NEON::BI__builtin_neon_vpmaxnmq_v, },
+ { NEON::BI__builtin_neon_vpmaxq_f16, NEON::BI__builtin_neon_vpmaxq_v, },
+ { NEON::BI__builtin_neon_vpmin_f16, NEON::BI__builtin_neon_vpmin_v, },
+ { NEON::BI__builtin_neon_vpminnm_f16, NEON::BI__builtin_neon_vpminnm_v, },
+ { NEON::BI__builtin_neon_vpminnmq_f16, NEON::BI__builtin_neon_vpminnmq_v, },
+ { NEON::BI__builtin_neon_vpminq_f16, NEON::BI__builtin_neon_vpminq_v, },
+ { NEON::BI__builtin_neon_vrecpe_f16, NEON::BI__builtin_neon_vrecpe_v, },
+ { NEON::BI__builtin_neon_vrecpeq_f16, NEON::BI__builtin_neon_vrecpeq_v, },
+ { NEON::BI__builtin_neon_vrecps_f16, NEON::BI__builtin_neon_vrecps_v, },
+ { NEON::BI__builtin_neon_vrecpsq_f16, NEON::BI__builtin_neon_vrecpsq_v, },
+ { NEON::BI__builtin_neon_vrnd_f16, NEON::BI__builtin_neon_vrnd_v, },
+ { NEON::BI__builtin_neon_vrnda_f16, NEON::BI__builtin_neon_vrnda_v, },
+ { NEON::BI__builtin_neon_vrndaq_f16, NEON::BI__builtin_neon_vrndaq_v, },
+ { NEON::BI__builtin_neon_vrndi_f16, NEON::BI__builtin_neon_vrndi_v, },
+ { NEON::BI__builtin_neon_vrndiq_f16, NEON::BI__builtin_neon_vrndiq_v, },
+ { NEON::BI__builtin_neon_vrndm_f16, NEON::BI__builtin_neon_vrndm_v, },
+ { NEON::BI__builtin_neon_vrndmq_f16, NEON::BI__builtin_neon_vrndmq_v, },
+ { NEON::BI__builtin_neon_vrndn_f16, NEON::BI__builtin_neon_vrndn_v, },
+ { NEON::BI__builtin_neon_vrndnq_f16, NEON::BI__builtin_neon_vrndnq_v, },
+ { NEON::BI__builtin_neon_vrndp_f16, NEON::BI__builtin_neon_vrndp_v, },
+ { NEON::BI__builtin_neon_vrndpq_f16, NEON::BI__builtin_neon_vrndpq_v, },
+ { NEON::BI__builtin_neon_vrndq_f16, NEON::BI__builtin_neon_vrndq_v, },
+ { NEON::BI__builtin_neon_vrndx_f16, NEON::BI__builtin_neon_vrndx_v, },
+ { NEON::BI__builtin_neon_vrndxq_f16, NEON::BI__builtin_neon_vrndxq_v, },
+ { NEON::BI__builtin_neon_vrsqrte_f16, NEON::BI__builtin_neon_vrsqrte_v, },
+ { NEON::BI__builtin_neon_vrsqrteq_f16, NEON::BI__builtin_neon_vrsqrteq_v, },
+ { NEON::BI__builtin_neon_vrsqrts_f16, NEON::BI__builtin_neon_vrsqrts_v, },
+ { NEON::BI__builtin_neon_vrsqrtsq_f16, NEON::BI__builtin_neon_vrsqrtsq_v, },
+ { NEON::BI__builtin_neon_vsqrt_f16, NEON::BI__builtin_neon_vsqrt_v, },
+ { NEON::BI__builtin_neon_vsqrtq_f16, NEON::BI__builtin_neon_vsqrtq_v, },
+ { NEON::BI__builtin_neon_vst1_bf16_x2, NEON::BI__builtin_neon_vst1_x2_v },
+ { NEON::BI__builtin_neon_vst1_bf16_x3, NEON::BI__builtin_neon_vst1_x3_v },
+ { NEON::BI__builtin_neon_vst1_bf16_x4, NEON::BI__builtin_neon_vst1_x4_v },
+ { NEON::BI__builtin_neon_vst1_bf16, NEON::BI__builtin_neon_vst1_v },
+ { NEON::BI__builtin_neon_vst1_lane_bf16, NEON::BI__builtin_neon_vst1_lane_v },
+ { NEON::BI__builtin_neon_vst1q_bf16_x2, NEON::BI__builtin_neon_vst1q_x2_v },
+ { NEON::BI__builtin_neon_vst1q_bf16_x3, NEON::BI__builtin_neon_vst1q_x3_v },
+ { NEON::BI__builtin_neon_vst1q_bf16_x4, NEON::BI__builtin_neon_vst1q_x4_v },
+ { NEON::BI__builtin_neon_vst1q_bf16, NEON::BI__builtin_neon_vst1q_v },
+ { NEON::BI__builtin_neon_vst1q_lane_bf16, NEON::BI__builtin_neon_vst1q_lane_v },
+ { NEON::BI__builtin_neon_vst2_bf16, NEON::BI__builtin_neon_vst2_v },
+ { NEON::BI__builtin_neon_vst2_lane_bf16, NEON::BI__builtin_neon_vst2_lane_v },
+ { NEON::BI__builtin_neon_vst2q_bf16, NEON::BI__builtin_neon_vst2q_v },
+ { NEON::BI__builtin_neon_vst2q_lane_bf16, NEON::BI__builtin_neon_vst2q_lane_v },
+ { NEON::BI__builtin_neon_vst3_bf16, NEON::BI__builtin_neon_vst3_v },
+ { NEON::BI__builtin_neon_vst3_lane_bf16, NEON::BI__builtin_neon_vst3_lane_v },
+ { NEON::BI__builtin_neon_vst3q_bf16, NEON::BI__builtin_neon_vst3q_v },
+ { NEON::BI__builtin_neon_vst3q_lane_bf16, NEON::BI__builtin_neon_vst3q_lane_v },
+ { NEON::BI__builtin_neon_vst4_bf16, NEON::BI__builtin_neon_vst4_v },
+ { NEON::BI__builtin_neon_vst4_lane_bf16, NEON::BI__builtin_neon_vst4_lane_v },
+ { NEON::BI__builtin_neon_vst4q_bf16, NEON::BI__builtin_neon_vst4q_v },
+ { NEON::BI__builtin_neon_vst4q_lane_bf16, NEON::BI__builtin_neon_vst4q_lane_v },
+ { NEON::BI__builtin_neon_vtrn_f16, NEON::BI__builtin_neon_vtrn_v, },
+ { NEON::BI__builtin_neon_vtrnq_f16, NEON::BI__builtin_neon_vtrnq_v, },
+ { NEON::BI__builtin_neon_vuzp_f16, NEON::BI__builtin_neon_vuzp_v, },
+ { NEON::BI__builtin_neon_vuzpq_f16, NEON::BI__builtin_neon_vuzpq_v, },
+ { NEON::BI__builtin_neon_vzip_f16, NEON::BI__builtin_neon_vzip_v, },
+ { NEON::BI__builtin_neon_vzipq_f16, NEON::BI__builtin_neon_vzipq_v, },
+ // The mangling rules cause us to have one ID for each type for vldap1(q)_lane
+ // and vstl1(q)_lane, but codegen is equivalent for all of them. Choose an
+ // arbitrary one to be handled as tha canonical variation.
+ { NEON::BI__builtin_neon_vldap1_lane_u64, NEON::BI__builtin_neon_vldap1_lane_s64 },
+ { NEON::BI__builtin_neon_vldap1_lane_f64, NEON::BI__builtin_neon_vldap1_lane_s64 },
+ { NEON::BI__builtin_neon_vldap1_lane_p64, NEON::BI__builtin_neon_vldap1_lane_s64 },
+ { NEON::BI__builtin_neon_vldap1q_lane_u64, NEON::BI__builtin_neon_vldap1q_lane_s64 },
+ { NEON::BI__builtin_neon_vldap1q_lane_f64, NEON::BI__builtin_neon_vldap1q_lane_s64 },
+ { NEON::BI__builtin_neon_vldap1q_lane_p64, NEON::BI__builtin_neon_vldap1q_lane_s64 },
+ { NEON::BI__builtin_neon_vstl1_lane_u64, NEON::BI__builtin_neon_vstl1_lane_s64 },
+ { NEON::BI__builtin_neon_vstl1_lane_f64, NEON::BI__builtin_neon_vstl1_lane_s64 },
+ { NEON::BI__builtin_neon_vstl1_lane_p64, NEON::BI__builtin_neon_vstl1_lane_s64 },
+ { NEON::BI__builtin_neon_vstl1q_lane_u64, NEON::BI__builtin_neon_vstl1q_lane_s64 },
+ { NEON::BI__builtin_neon_vstl1q_lane_f64, NEON::BI__builtin_neon_vstl1q_lane_s64 },
+ { NEON::BI__builtin_neon_vstl1q_lane_p64, NEON::BI__builtin_neon_vstl1q_lane_s64 },
+};
+
#undef NEONMAP0
#undef NEONMAP1
#undef NEONMAP2
@@ -6297,17 +7242,36 @@ static const ARMVectorIntrinsicInfo AArch64SISDIntrinsicMap[] = {
static const ARMVectorIntrinsicInfo AArch64SVEIntrinsicMap[] = {
#define GET_SVE_LLVM_INTRINSIC_MAP
#include "clang/Basic/arm_sve_builtin_cg.inc"
+#include "clang/Basic/BuiltinsAArch64NeonSVEBridge_cg.def"
#undef GET_SVE_LLVM_INTRINSIC_MAP
};
#undef SVEMAP1
#undef SVEMAP2
+#define SMEMAP1(NameBase, LLVMIntrinsic, TypeModifier) \
+ { \
+ #NameBase, SME::BI__builtin_sme_##NameBase, Intrinsic::LLVMIntrinsic, 0, \
+ TypeModifier \
+ }
+
+#define SMEMAP2(NameBase, TypeModifier) \
+ { #NameBase, SME::BI__builtin_sme_##NameBase, 0, 0, TypeModifier }
+static const ARMVectorIntrinsicInfo AArch64SMEIntrinsicMap[] = {
+#define GET_SME_LLVM_INTRINSIC_MAP
+#include "clang/Basic/arm_sme_builtin_cg.inc"
+#undef GET_SME_LLVM_INTRINSIC_MAP
+};
+
+#undef SMEMAP1
+#undef SMEMAP2
+
static bool NEONSIMDIntrinsicsProvenSorted = false;
static bool AArch64SIMDIntrinsicsProvenSorted = false;
static bool AArch64SISDIntrinsicsProvenSorted = false;
static bool AArch64SVEIntrinsicsProvenSorted = false;
+static bool AArch64SMEIntrinsicsProvenSorted = false;
static const ARMVectorIntrinsicInfo *
findARMVectorIntrinsicInMap(ArrayRef<ARMVectorIntrinsicInfo> IntrinsicMap,
@@ -6418,13 +7382,13 @@ static Value *EmitCommonNeonSISDBuiltinExpr(
Ops[j] = CGF.Builder.CreateTruncOrBitCast(
Ops[j], cast<llvm::VectorType>(ArgTy)->getElementType());
Ops[j] =
- CGF.Builder.CreateInsertElement(UndefValue::get(ArgTy), Ops[j], C0);
+ CGF.Builder.CreateInsertElement(PoisonValue::get(ArgTy), Ops[j], C0);
}
Value *Result = CGF.EmitNeonCall(F, Ops, s);
llvm::Type *ResultType = CGF.ConvertType(E->getType());
- if (ResultType->getPrimitiveSizeInBits().getFixedSize() <
- Result->getType()->getPrimitiveSizeInBits().getFixedSize())
+ if (ResultType->getPrimitiveSizeInBits().getFixedValue() <
+ Result->getType()->getPrimitiveSizeInBits().getFixedValue())
return CGF.Builder.CreateExtractElement(Result, C0);
return CGF.Builder.CreateBitCast(Result, ResultType, s);
@@ -6437,7 +7401,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
llvm::Triple::ArchType Arch) {
// Get the last argument, which specifies the vector type.
const Expr *Arg = E->getArg(E->getNumArgs() - 1);
- Optional<llvm::APSInt> NeonTypeConst =
+ std::optional<llvm::APSInt> NeonTypeConst =
Arg->getIntegerConstantExpr(getContext());
if (!NeonTypeConst)
return nullptr;
@@ -6521,7 +7485,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
case NEON::BI__builtin_neon_vcalt_v:
case NEON::BI__builtin_neon_vcaltq_v:
std::swap(Ops[0], Ops[1]);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case NEON::BI__builtin_neon_vcage_v:
case NEON::BI__builtin_neon_vcageq_v:
case NEON::BI__builtin_neon_vcagt_v:
@@ -6577,17 +7541,25 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
HasLegalHalfType);
return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
: Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
- case NEON::BI__builtin_neon_vcvt_f16_v:
- case NEON::BI__builtin_neon_vcvtq_f16_v:
+ case NEON::BI__builtin_neon_vcvt_f16_s16:
+ case NEON::BI__builtin_neon_vcvt_f16_u16:
+ case NEON::BI__builtin_neon_vcvtq_f16_s16:
+ case NEON::BI__builtin_neon_vcvtq_f16_u16:
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float16, false, Quad),
HasLegalHalfType);
return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
: Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
- case NEON::BI__builtin_neon_vcvt_n_f16_v:
+ case NEON::BI__builtin_neon_vcvt_n_f16_s16:
+ case NEON::BI__builtin_neon_vcvt_n_f16_u16:
+ case NEON::BI__builtin_neon_vcvtq_n_f16_s16:
+ case NEON::BI__builtin_neon_vcvtq_n_f16_u16: {
+ llvm::Type *Tys[2] = { GetFloatNeonType(this, Type), Ty };
+ Function *F = CGM.getIntrinsic(Int, Tys);
+ return EmitNeonCall(F, Ops, "vcvt_n");
+ }
case NEON::BI__builtin_neon_vcvt_n_f32_v:
case NEON::BI__builtin_neon_vcvt_n_f64_v:
- case NEON::BI__builtin_neon_vcvtq_n_f16_v:
case NEON::BI__builtin_neon_vcvtq_n_f32_v:
case NEON::BI__builtin_neon_vcvtq_n_f64_v: {
llvm::Type *Tys[2] = { GetFloatNeonType(this, Type), Ty };
@@ -6595,15 +7567,15 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
Function *F = CGM.getIntrinsic(Int, Tys);
return EmitNeonCall(F, Ops, "vcvt_n");
}
- case NEON::BI__builtin_neon_vcvt_n_s16_v:
+ case NEON::BI__builtin_neon_vcvt_n_s16_f16:
case NEON::BI__builtin_neon_vcvt_n_s32_v:
- case NEON::BI__builtin_neon_vcvt_n_u16_v:
+ case NEON::BI__builtin_neon_vcvt_n_u16_f16:
case NEON::BI__builtin_neon_vcvt_n_u32_v:
case NEON::BI__builtin_neon_vcvt_n_s64_v:
case NEON::BI__builtin_neon_vcvt_n_u64_v:
- case NEON::BI__builtin_neon_vcvtq_n_s16_v:
+ case NEON::BI__builtin_neon_vcvtq_n_s16_f16:
case NEON::BI__builtin_neon_vcvtq_n_s32_v:
- case NEON::BI__builtin_neon_vcvtq_n_u16_v:
+ case NEON::BI__builtin_neon_vcvtq_n_u16_f16:
case NEON::BI__builtin_neon_vcvtq_n_u32_v:
case NEON::BI__builtin_neon_vcvtq_n_s64_v:
case NEON::BI__builtin_neon_vcvtq_n_u64_v: {
@@ -6615,64 +7587,64 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
case NEON::BI__builtin_neon_vcvt_u32_v:
case NEON::BI__builtin_neon_vcvt_s64_v:
case NEON::BI__builtin_neon_vcvt_u64_v:
- case NEON::BI__builtin_neon_vcvt_s16_v:
- case NEON::BI__builtin_neon_vcvt_u16_v:
+ case NEON::BI__builtin_neon_vcvt_s16_f16:
+ case NEON::BI__builtin_neon_vcvt_u16_f16:
case NEON::BI__builtin_neon_vcvtq_s32_v:
case NEON::BI__builtin_neon_vcvtq_u32_v:
case NEON::BI__builtin_neon_vcvtq_s64_v:
case NEON::BI__builtin_neon_vcvtq_u64_v:
- case NEON::BI__builtin_neon_vcvtq_s16_v:
- case NEON::BI__builtin_neon_vcvtq_u16_v: {
+ case NEON::BI__builtin_neon_vcvtq_s16_f16:
+ case NEON::BI__builtin_neon_vcvtq_u16_f16: {
Ops[0] = Builder.CreateBitCast(Ops[0], GetFloatNeonType(this, Type));
return Usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt")
: Builder.CreateFPToSI(Ops[0], Ty, "vcvt");
}
- case NEON::BI__builtin_neon_vcvta_s16_v:
+ case NEON::BI__builtin_neon_vcvta_s16_f16:
case NEON::BI__builtin_neon_vcvta_s32_v:
case NEON::BI__builtin_neon_vcvta_s64_v:
- case NEON::BI__builtin_neon_vcvta_u16_v:
+ case NEON::BI__builtin_neon_vcvta_u16_f16:
case NEON::BI__builtin_neon_vcvta_u32_v:
case NEON::BI__builtin_neon_vcvta_u64_v:
- case NEON::BI__builtin_neon_vcvtaq_s16_v:
+ case NEON::BI__builtin_neon_vcvtaq_s16_f16:
case NEON::BI__builtin_neon_vcvtaq_s32_v:
case NEON::BI__builtin_neon_vcvtaq_s64_v:
- case NEON::BI__builtin_neon_vcvtaq_u16_v:
+ case NEON::BI__builtin_neon_vcvtaq_u16_f16:
case NEON::BI__builtin_neon_vcvtaq_u32_v:
case NEON::BI__builtin_neon_vcvtaq_u64_v:
- case NEON::BI__builtin_neon_vcvtn_s16_v:
+ case NEON::BI__builtin_neon_vcvtn_s16_f16:
case NEON::BI__builtin_neon_vcvtn_s32_v:
case NEON::BI__builtin_neon_vcvtn_s64_v:
- case NEON::BI__builtin_neon_vcvtn_u16_v:
+ case NEON::BI__builtin_neon_vcvtn_u16_f16:
case NEON::BI__builtin_neon_vcvtn_u32_v:
case NEON::BI__builtin_neon_vcvtn_u64_v:
- case NEON::BI__builtin_neon_vcvtnq_s16_v:
+ case NEON::BI__builtin_neon_vcvtnq_s16_f16:
case NEON::BI__builtin_neon_vcvtnq_s32_v:
case NEON::BI__builtin_neon_vcvtnq_s64_v:
- case NEON::BI__builtin_neon_vcvtnq_u16_v:
+ case NEON::BI__builtin_neon_vcvtnq_u16_f16:
case NEON::BI__builtin_neon_vcvtnq_u32_v:
case NEON::BI__builtin_neon_vcvtnq_u64_v:
- case NEON::BI__builtin_neon_vcvtp_s16_v:
+ case NEON::BI__builtin_neon_vcvtp_s16_f16:
case NEON::BI__builtin_neon_vcvtp_s32_v:
case NEON::BI__builtin_neon_vcvtp_s64_v:
- case NEON::BI__builtin_neon_vcvtp_u16_v:
+ case NEON::BI__builtin_neon_vcvtp_u16_f16:
case NEON::BI__builtin_neon_vcvtp_u32_v:
case NEON::BI__builtin_neon_vcvtp_u64_v:
- case NEON::BI__builtin_neon_vcvtpq_s16_v:
+ case NEON::BI__builtin_neon_vcvtpq_s16_f16:
case NEON::BI__builtin_neon_vcvtpq_s32_v:
case NEON::BI__builtin_neon_vcvtpq_s64_v:
- case NEON::BI__builtin_neon_vcvtpq_u16_v:
+ case NEON::BI__builtin_neon_vcvtpq_u16_f16:
case NEON::BI__builtin_neon_vcvtpq_u32_v:
case NEON::BI__builtin_neon_vcvtpq_u64_v:
- case NEON::BI__builtin_neon_vcvtm_s16_v:
+ case NEON::BI__builtin_neon_vcvtm_s16_f16:
case NEON::BI__builtin_neon_vcvtm_s32_v:
case NEON::BI__builtin_neon_vcvtm_s64_v:
- case NEON::BI__builtin_neon_vcvtm_u16_v:
+ case NEON::BI__builtin_neon_vcvtm_u16_f16:
case NEON::BI__builtin_neon_vcvtm_u32_v:
case NEON::BI__builtin_neon_vcvtm_u64_v:
- case NEON::BI__builtin_neon_vcvtmq_s16_v:
+ case NEON::BI__builtin_neon_vcvtmq_s16_f16:
case NEON::BI__builtin_neon_vcvtmq_s32_v:
case NEON::BI__builtin_neon_vcvtmq_s64_v:
- case NEON::BI__builtin_neon_vcvtmq_u16_v:
+ case NEON::BI__builtin_neon_vcvtmq_u16_f16:
case NEON::BI__builtin_neon_vcvtmq_u32_v:
case NEON::BI__builtin_neon_vcvtmq_u64_v: {
llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
@@ -6717,13 +7689,9 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
case NEON::BI__builtin_neon_vld1q_x3_v:
case NEON::BI__builtin_neon_vld1_x4_v:
case NEON::BI__builtin_neon_vld1q_x4_v: {
- llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getElementType());
- Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
- llvm::Type *Tys[2] = { VTy, PTy };
+ llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld1xN");
- Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld2_v:
@@ -6742,15 +7710,12 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
Value *Align = getAlignmentValue32(PtrOp1);
Ops[1] = Builder.CreateCall(F, {Ops[1], Align}, NameHint);
- Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld1_dup_v:
case NEON::BI__builtin_neon_vld1q_dup_v: {
- Value *V = UndefValue::get(Ty);
- Ty = llvm::PointerType::getUnqual(VTy->getElementType());
- PtrOp0 = Builder.CreateBitCast(PtrOp0, Ty);
+ Value *V = PoisonValue::get(Ty);
+ PtrOp0 = PtrOp0.withElementType(VTy->getElementType());
LoadInst *Ld = Builder.CreateLoad(PtrOp0);
llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
Ops[0] = Builder.CreateInsertElement(V, Ld, CI);
@@ -6767,9 +7732,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
for (unsigned I = 2; I < Ops.size() - 1; ++I)
Ops[I] = Builder.CreateBitCast(Ops[I], Ty);
Ops.push_back(getAlignmentValue32(PtrOp1));
- Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), NameHint);
- Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = Builder.CreateCall(F, ArrayRef(Ops).slice(1), NameHint);
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vmovl_v: {
@@ -6871,10 +7834,10 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
case NEON::BI__builtin_neon_vrshrq_n_v:
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n",
1, true);
- case NEON::BI__builtin_neon_vsha512hq_v:
- case NEON::BI__builtin_neon_vsha512h2q_v:
- case NEON::BI__builtin_neon_vsha512su0q_v:
- case NEON::BI__builtin_neon_vsha512su1q_v: {
+ case NEON::BI__builtin_neon_vsha512hq_u64:
+ case NEON::BI__builtin_neon_vsha512h2q_u64:
+ case NEON::BI__builtin_neon_vsha512su0q_u64:
+ case NEON::BI__builtin_neon_vsha512su1q_u64: {
Function *F = CGM.getIntrinsic(Int);
return EmitNeonCall(F, Ops, "");
}
@@ -6926,18 +7889,18 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
Ops.push_back(getAlignmentValue32(PtrOp0));
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "");
}
- case NEON::BI__builtin_neon_vsm3partw1q_v:
- case NEON::BI__builtin_neon_vsm3partw2q_v:
- case NEON::BI__builtin_neon_vsm3ss1q_v:
- case NEON::BI__builtin_neon_vsm4ekeyq_v:
- case NEON::BI__builtin_neon_vsm4eq_v: {
+ case NEON::BI__builtin_neon_vsm3partw1q_u32:
+ case NEON::BI__builtin_neon_vsm3partw2q_u32:
+ case NEON::BI__builtin_neon_vsm3ss1q_u32:
+ case NEON::BI__builtin_neon_vsm4ekeyq_u32:
+ case NEON::BI__builtin_neon_vsm4eq_u32: {
Function *F = CGM.getIntrinsic(Int);
return EmitNeonCall(F, Ops, "");
}
- case NEON::BI__builtin_neon_vsm3tt1aq_v:
- case NEON::BI__builtin_neon_vsm3tt1bq_v:
- case NEON::BI__builtin_neon_vsm3tt2aq_v:
- case NEON::BI__builtin_neon_vsm3tt2bq_v: {
+ case NEON::BI__builtin_neon_vsm3tt1aq_u32:
+ case NEON::BI__builtin_neon_vsm3tt1bq_u32:
+ case NEON::BI__builtin_neon_vsm3tt2aq_u32:
+ case NEON::BI__builtin_neon_vsm3tt2bq_u32: {
Function *F = CGM.getIntrinsic(Int);
Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty);
return EmitNeonCall(F, Ops, "");
@@ -6948,16 +7911,15 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
case NEON::BI__builtin_neon_vst1q_x3_v:
case NEON::BI__builtin_neon_vst1_x4_v:
case NEON::BI__builtin_neon_vst1q_x4_v: {
- llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getElementType());
// TODO: Currently in AArch32 mode the pointer operand comes first, whereas
// in AArch64 it comes last. We may want to stick to one or another.
if (Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_be ||
Arch == llvm::Triple::aarch64_32) {
- llvm::Type *Tys[2] = { VTy, PTy };
+ llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "");
}
- llvm::Type *Tys[2] = { PTy, VTy };
+ llvm::Type *Tys[2] = {UnqualPtrTy, VTy};
return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "");
}
case NEON::BI__builtin_neon_vsubhn_v: {
@@ -6979,7 +7941,6 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
}
case NEON::BI__builtin_neon_vtrn_v:
case NEON::BI__builtin_neon_vtrnq_v: {
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Value *SV = nullptr;
@@ -7007,7 +7968,6 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
}
case NEON::BI__builtin_neon_vuzp_v:
case NEON::BI__builtin_neon_vuzpq_v: {
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Value *SV = nullptr;
@@ -7023,14 +7983,13 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
}
return SV;
}
- case NEON::BI__builtin_neon_vxarq_v: {
+ case NEON::BI__builtin_neon_vxarq_u64: {
Function *F = CGM.getIntrinsic(Int);
Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
return EmitNeonCall(F, Ops, "");
}
case NEON::BI__builtin_neon_vzip_v:
case NEON::BI__builtin_neon_vzipq_v: {
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Value *SV = nullptr;
@@ -7047,70 +8006,71 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
}
return SV;
}
- case NEON::BI__builtin_neon_vdot_v:
- case NEON::BI__builtin_neon_vdotq_v: {
+ case NEON::BI__builtin_neon_vdot_s32:
+ case NEON::BI__builtin_neon_vdot_u32:
+ case NEON::BI__builtin_neon_vdotq_s32:
+ case NEON::BI__builtin_neon_vdotq_u32: {
auto *InputTy =
llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
llvm::Type *Tys[2] = { Ty, InputTy };
- Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vdot");
}
- case NEON::BI__builtin_neon_vfmlal_low_v:
- case NEON::BI__builtin_neon_vfmlalq_low_v: {
+ case NEON::BI__builtin_neon_vfmlal_low_f16:
+ case NEON::BI__builtin_neon_vfmlalq_low_f16: {
auto *InputTy =
llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
llvm::Type *Tys[2] = { Ty, InputTy };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_low");
}
- case NEON::BI__builtin_neon_vfmlsl_low_v:
- case NEON::BI__builtin_neon_vfmlslq_low_v: {
+ case NEON::BI__builtin_neon_vfmlsl_low_f16:
+ case NEON::BI__builtin_neon_vfmlslq_low_f16: {
auto *InputTy =
llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
llvm::Type *Tys[2] = { Ty, InputTy };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_low");
}
- case NEON::BI__builtin_neon_vfmlal_high_v:
- case NEON::BI__builtin_neon_vfmlalq_high_v: {
+ case NEON::BI__builtin_neon_vfmlal_high_f16:
+ case NEON::BI__builtin_neon_vfmlalq_high_f16: {
auto *InputTy =
llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
llvm::Type *Tys[2] = { Ty, InputTy };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_high");
}
- case NEON::BI__builtin_neon_vfmlsl_high_v:
- case NEON::BI__builtin_neon_vfmlslq_high_v: {
+ case NEON::BI__builtin_neon_vfmlsl_high_f16:
+ case NEON::BI__builtin_neon_vfmlslq_high_f16: {
auto *InputTy =
llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
llvm::Type *Tys[2] = { Ty, InputTy };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_high");
}
- case NEON::BI__builtin_neon_vmmlaq_v: {
+ case NEON::BI__builtin_neon_vmmlaq_s32:
+ case NEON::BI__builtin_neon_vmmlaq_u32: {
auto *InputTy =
llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
llvm::Type *Tys[2] = { Ty, InputTy };
- Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
- return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmmla");
+ return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "vmmla");
}
- case NEON::BI__builtin_neon_vusmmlaq_v: {
+ case NEON::BI__builtin_neon_vusmmlaq_s32: {
auto *InputTy =
llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
llvm::Type *Tys[2] = { Ty, InputTy };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vusmmla");
}
- case NEON::BI__builtin_neon_vusdot_v:
- case NEON::BI__builtin_neon_vusdotq_v: {
+ case NEON::BI__builtin_neon_vusdot_s32:
+ case NEON::BI__builtin_neon_vusdotq_s32: {
auto *InputTy =
llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
llvm::Type *Tys[2] = { Ty, InputTy };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vusdot");
}
- case NEON::BI__builtin_neon_vbfdot_v:
- case NEON::BI__builtin_neon_vbfdotq_v: {
+ case NEON::BI__builtin_neon_vbfdot_f32:
+ case NEON::BI__builtin_neon_vbfdotq_f32: {
llvm::Type *InputTy =
llvm::FixedVectorType::get(BFloatTy, Ty->getPrimitiveSizeInBits() / 16);
llvm::Type *Tys[2] = { Ty, InputTy };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vbfdot");
}
- case NEON::BI__builtin_neon___a32_vcvt_bf16_v: {
+ case NEON::BI__builtin_neon___a32_vcvt_bf16_f32: {
llvm::Type *Tys[1] = { Ty };
Function *F = CGM.getIntrinsic(Int, Tys);
return EmitNeonCall(F, Ops, "vcvtfp2bf");
@@ -7144,7 +8104,10 @@ Value *CodeGenFunction::EmitAArch64CompareBuiltinExpr(
Op = Builder.CreateBitCast(Op, OTy);
if (OTy->getScalarType()->isFloatingPointTy()) {
- Op = Builder.CreateFCmp(Fp, Op, Constant::getNullValue(OTy));
+ if (Fp == CmpInst::FCMP_OEQ)
+ Op = Builder.CreateFCmp(Fp, Op, Constant::getNullValue(OTy));
+ else
+ Op = Builder.CreateFCmpS(Fp, Op, Constant::getNullValue(OTy));
} else {
Op = Builder.CreateICmp(Ip, Op, Constant::getNullValue(OTy));
}
@@ -7195,27 +8158,27 @@ Value *CodeGenFunction::GetValueForARMHint(unsigned BuiltinID) {
switch (BuiltinID) {
default:
return nullptr;
- case ARM::BI__builtin_arm_nop:
+ case clang::ARM::BI__builtin_arm_nop:
Value = 0;
break;
- case ARM::BI__builtin_arm_yield:
- case ARM::BI__yield:
+ case clang::ARM::BI__builtin_arm_yield:
+ case clang::ARM::BI__yield:
Value = 1;
break;
- case ARM::BI__builtin_arm_wfe:
- case ARM::BI__wfe:
+ case clang::ARM::BI__builtin_arm_wfe:
+ case clang::ARM::BI__wfe:
Value = 2;
break;
- case ARM::BI__builtin_arm_wfi:
- case ARM::BI__wfi:
+ case clang::ARM::BI__builtin_arm_wfi:
+ case clang::ARM::BI__wfi:
Value = 3;
break;
- case ARM::BI__builtin_arm_sev:
- case ARM::BI__sev:
+ case clang::ARM::BI__builtin_arm_sev:
+ case clang::ARM::BI__sev:
Value = 4;
break;
- case ARM::BI__builtin_arm_sevl:
- case ARM::BI__sevl:
+ case clang::ARM::BI__builtin_arm_sevl:
+ case clang::ARM::BI__sevl:
Value = 5;
break;
}
@@ -7230,6 +8193,26 @@ enum SpecialRegisterAccessKind {
Write,
};
+// Generates the IR for __builtin_read_exec_*.
+// Lowers the builtin to amdgcn_ballot intrinsic.
+static Value *EmitAMDGCNBallotForExec(CodeGenFunction &CGF, const CallExpr *E,
+ llvm::Type *RegisterType,
+ llvm::Type *ValueType, bool isExecHi) {
+ CodeGen::CGBuilderTy &Builder = CGF.Builder;
+ CodeGen::CodeGenModule &CGM = CGF.CGM;
+
+ Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_ballot, {RegisterType});
+ llvm::Value *Call = Builder.CreateCall(F, {Builder.getInt1(true)});
+
+ if (isExecHi) {
+ Value *Rt2 = Builder.CreateLShr(Call, 32);
+ Rt2 = Builder.CreateTrunc(Rt2, CGF.Int32Ty);
+ return Rt2;
+ }
+
+ return Call;
+}
+
// Generates the IR for the read/write special register builtin,
// ValueType is the type of the value that is to be written or read,
// RegisterType is the type of the register being written to or read from.
@@ -7239,9 +8222,10 @@ static Value *EmitSpecialRegisterBuiltin(CodeGenFunction &CGF,
llvm::Type *ValueType,
SpecialRegisterAccessKind AccessKind,
StringRef SysReg = "") {
- // write and register intrinsics only support 32 and 64 bit operations.
- assert((RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64))
- && "Unsupported size for register.");
+ // write and register intrinsics only support 32, 64 and 128 bit operations.
+ assert((RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64) ||
+ RegisterType->isIntegerTy(128)) &&
+ "Unsupported size for register.");
CodeGen::CGBuilderTy &Builder = CGF.Builder;
CodeGen::CodeGenModule &CGM = CGF.CGM;
@@ -7348,7 +8332,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
if (auto Hint = GetValueForARMHint(BuiltinID))
return Hint;
- if (BuiltinID == ARM::BI__emit) {
+ if (BuiltinID == clang::ARM::BI__emit) {
bool IsThumb = getTarget().getTriple().getArch() == llvm::Triple::thumb;
llvm::FunctionType *FTy =
llvm::FunctionType::get(VoidTy, /*Variadic=*/false);
@@ -7369,12 +8353,12 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(Emit);
}
- if (BuiltinID == ARM::BI__builtin_arm_dbg) {
+ if (BuiltinID == clang::ARM::BI__builtin_arm_dbg) {
Value *Option = EmitScalarExpr(E->getArg(0));
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_dbg), Option);
}
- if (BuiltinID == ARM::BI__builtin_arm_prefetch) {
+ if (BuiltinID == clang::ARM::BI__builtin_arm_prefetch) {
Value *Address = EmitScalarExpr(E->getArg(0));
Value *RW = EmitScalarExpr(E->getArg(1));
Value *IsData = EmitScalarExpr(E->getArg(2));
@@ -7386,23 +8370,34 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(F, {Address, RW, Locality, IsData});
}
- if (BuiltinID == ARM::BI__builtin_arm_rbit) {
+ if (BuiltinID == clang::ARM::BI__builtin_arm_rbit) {
llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
return Builder.CreateCall(
CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
}
- if (BuiltinID == ARM::BI__builtin_arm_cls) {
+ if (BuiltinID == clang::ARM::BI__builtin_arm_clz ||
+ BuiltinID == clang::ARM::BI__builtin_arm_clz64) {
+ llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
+ Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Arg->getType());
+ Value *Res = Builder.CreateCall(F, {Arg, Builder.getInt1(false)});
+ if (BuiltinID == clang::ARM::BI__builtin_arm_clz64)
+ Res = Builder.CreateTrunc(Res, Builder.getInt32Ty());
+ return Res;
+ }
+
+
+ if (BuiltinID == clang::ARM::BI__builtin_arm_cls) {
llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_cls), Arg, "cls");
}
- if (BuiltinID == ARM::BI__builtin_arm_cls64) {
+ if (BuiltinID == clang::ARM::BI__builtin_arm_cls64) {
llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_cls64), Arg,
"cls");
}
- if (BuiltinID == ARM::BI__clear_cache) {
+ if (BuiltinID == clang::ARM::BI__clear_cache) {
assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments");
const FunctionDecl *FD = E->getDirectCallee();
Value *Ops[2];
@@ -7414,16 +8409,16 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
}
- if (BuiltinID == ARM::BI__builtin_arm_mcrr ||
- BuiltinID == ARM::BI__builtin_arm_mcrr2) {
+ if (BuiltinID == clang::ARM::BI__builtin_arm_mcrr ||
+ BuiltinID == clang::ARM::BI__builtin_arm_mcrr2) {
Function *F;
switch (BuiltinID) {
default: llvm_unreachable("unexpected builtin");
- case ARM::BI__builtin_arm_mcrr:
+ case clang::ARM::BI__builtin_arm_mcrr:
F = CGM.getIntrinsic(Intrinsic::arm_mcrr);
break;
- case ARM::BI__builtin_arm_mcrr2:
+ case clang::ARM::BI__builtin_arm_mcrr2:
F = CGM.getIntrinsic(Intrinsic::arm_mcrr2);
break;
}
@@ -7448,16 +8443,16 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(F, {Coproc, Opc1, Rt, Rt2, CRm});
}
- if (BuiltinID == ARM::BI__builtin_arm_mrrc ||
- BuiltinID == ARM::BI__builtin_arm_mrrc2) {
+ if (BuiltinID == clang::ARM::BI__builtin_arm_mrrc ||
+ BuiltinID == clang::ARM::BI__builtin_arm_mrrc2) {
Function *F;
switch (BuiltinID) {
default: llvm_unreachable("unexpected builtin");
- case ARM::BI__builtin_arm_mrrc:
+ case clang::ARM::BI__builtin_arm_mrrc:
F = CGM.getIntrinsic(Intrinsic::arm_mrrc);
break;
- case ARM::BI__builtin_arm_mrrc2:
+ case clang::ARM::BI__builtin_arm_mrrc2:
F = CGM.getIntrinsic(Intrinsic::arm_mrrc2);
break;
}
@@ -7482,28 +8477,27 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
return Builder.CreateBitCast(RtAndRt2, ConvertType(E->getType()));
}
- if (BuiltinID == ARM::BI__builtin_arm_ldrexd ||
- ((BuiltinID == ARM::BI__builtin_arm_ldrex ||
- BuiltinID == ARM::BI__builtin_arm_ldaex) &&
+ if (BuiltinID == clang::ARM::BI__builtin_arm_ldrexd ||
+ ((BuiltinID == clang::ARM::BI__builtin_arm_ldrex ||
+ BuiltinID == clang::ARM::BI__builtin_arm_ldaex) &&
getContext().getTypeSize(E->getType()) == 64) ||
- BuiltinID == ARM::BI__ldrexd) {
+ BuiltinID == clang::ARM::BI__ldrexd) {
Function *F;
switch (BuiltinID) {
default: llvm_unreachable("unexpected builtin");
- case ARM::BI__builtin_arm_ldaex:
+ case clang::ARM::BI__builtin_arm_ldaex:
F = CGM.getIntrinsic(Intrinsic::arm_ldaexd);
break;
- case ARM::BI__builtin_arm_ldrexd:
- case ARM::BI__builtin_arm_ldrex:
- case ARM::BI__ldrexd:
+ case clang::ARM::BI__builtin_arm_ldrexd:
+ case clang::ARM::BI__builtin_arm_ldrex:
+ case clang::ARM::BI__ldrexd:
F = CGM.getIntrinsic(Intrinsic::arm_ldrexd);
break;
}
Value *LdPtr = EmitScalarExpr(E->getArg(0));
- Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy),
- "ldrexd");
+ Value *Val = Builder.CreateCall(F, LdPtr, "ldrexd");
Value *Val0 = Builder.CreateExtractValue(Val, 1);
Value *Val1 = Builder.CreateExtractValue(Val, 0);
@@ -7516,63 +8510,63 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
return Builder.CreateBitCast(Val, ConvertType(E->getType()));
}
- if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
- BuiltinID == ARM::BI__builtin_arm_ldaex) {
+ if (BuiltinID == clang::ARM::BI__builtin_arm_ldrex ||
+ BuiltinID == clang::ARM::BI__builtin_arm_ldaex) {
Value *LoadAddr = EmitScalarExpr(E->getArg(0));
QualType Ty = E->getType();
llvm::Type *RealResTy = ConvertType(Ty);
- llvm::Type *PtrTy = llvm::IntegerType::get(
- getLLVMContext(), getContext().getTypeSize(Ty))->getPointerTo();
- LoadAddr = Builder.CreateBitCast(LoadAddr, PtrTy);
+ llvm::Type *IntTy =
+ llvm::IntegerType::get(getLLVMContext(), getContext().getTypeSize(Ty));
- Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_ldaex
- ? Intrinsic::arm_ldaex
- : Intrinsic::arm_ldrex,
- PtrTy);
- Value *Val = Builder.CreateCall(F, LoadAddr, "ldrex");
+ Function *F = CGM.getIntrinsic(
+ BuiltinID == clang::ARM::BI__builtin_arm_ldaex ? Intrinsic::arm_ldaex
+ : Intrinsic::arm_ldrex,
+ UnqualPtrTy);
+ CallInst *Val = Builder.CreateCall(F, LoadAddr, "ldrex");
+ Val->addParamAttr(
+ 0, Attribute::get(getLLVMContext(), Attribute::ElementType, IntTy));
if (RealResTy->isPointerTy())
return Builder.CreateIntToPtr(Val, RealResTy);
else {
llvm::Type *IntResTy = llvm::IntegerType::get(
getLLVMContext(), CGM.getDataLayout().getTypeSizeInBits(RealResTy));
- Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
- return Builder.CreateBitCast(Val, RealResTy);
+ return Builder.CreateBitCast(Builder.CreateTruncOrBitCast(Val, IntResTy),
+ RealResTy);
}
}
- if (BuiltinID == ARM::BI__builtin_arm_strexd ||
- ((BuiltinID == ARM::BI__builtin_arm_stlex ||
- BuiltinID == ARM::BI__builtin_arm_strex) &&
+ if (BuiltinID == clang::ARM::BI__builtin_arm_strexd ||
+ ((BuiltinID == clang::ARM::BI__builtin_arm_stlex ||
+ BuiltinID == clang::ARM::BI__builtin_arm_strex) &&
getContext().getTypeSize(E->getArg(0)->getType()) == 64)) {
- Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex
- ? Intrinsic::arm_stlexd
- : Intrinsic::arm_strexd);
+ Function *F = CGM.getIntrinsic(
+ BuiltinID == clang::ARM::BI__builtin_arm_stlex ? Intrinsic::arm_stlexd
+ : Intrinsic::arm_strexd);
llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty);
Address Tmp = CreateMemTemp(E->getArg(0)->getType());
Value *Val = EmitScalarExpr(E->getArg(0));
Builder.CreateStore(Val, Tmp);
- Address LdPtr = Builder.CreateBitCast(Tmp,llvm::PointerType::getUnqual(STy));
+ Address LdPtr = Tmp.withElementType(STy);
Val = Builder.CreateLoad(LdPtr);
Value *Arg0 = Builder.CreateExtractValue(Val, 0);
Value *Arg1 = Builder.CreateExtractValue(Val, 1);
- Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), Int8PtrTy);
+ Value *StPtr = EmitScalarExpr(E->getArg(1));
return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "strexd");
}
- if (BuiltinID == ARM::BI__builtin_arm_strex ||
- BuiltinID == ARM::BI__builtin_arm_stlex) {
+ if (BuiltinID == clang::ARM::BI__builtin_arm_strex ||
+ BuiltinID == clang::ARM::BI__builtin_arm_stlex) {
Value *StoreVal = EmitScalarExpr(E->getArg(0));
Value *StoreAddr = EmitScalarExpr(E->getArg(1));
QualType Ty = E->getArg(0)->getType();
- llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(),
- getContext().getTypeSize(Ty));
- StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo());
+ llvm::Type *StoreTy =
+ llvm::IntegerType::get(getLLVMContext(), getContext().getTypeSize(Ty));
if (StoreVal->getType()->isPointerTy())
StoreVal = Builder.CreatePtrToInt(StoreVal, Int32Ty);
@@ -7584,14 +8578,18 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int32Ty);
}
- Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex
- ? Intrinsic::arm_stlex
- : Intrinsic::arm_strex,
- StoreAddr->getType());
- return Builder.CreateCall(F, {StoreVal, StoreAddr}, "strex");
+ Function *F = CGM.getIntrinsic(
+ BuiltinID == clang::ARM::BI__builtin_arm_stlex ? Intrinsic::arm_stlex
+ : Intrinsic::arm_strex,
+ StoreAddr->getType());
+
+ CallInst *CI = Builder.CreateCall(F, {StoreVal, StoreAddr}, "strex");
+ CI->addParamAttr(
+ 1, Attribute::get(getLLVMContext(), Attribute::ElementType, StoreTy));
+ return CI;
}
- if (BuiltinID == ARM::BI__builtin_arm_clrex) {
+ if (BuiltinID == clang::ARM::BI__builtin_arm_clrex) {
Function *F = CGM.getIntrinsic(Intrinsic::arm_clrex);
return Builder.CreateCall(F);
}
@@ -7599,19 +8597,19 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
// CRC32
Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
switch (BuiltinID) {
- case ARM::BI__builtin_arm_crc32b:
+ case clang::ARM::BI__builtin_arm_crc32b:
CRCIntrinsicID = Intrinsic::arm_crc32b; break;
- case ARM::BI__builtin_arm_crc32cb:
+ case clang::ARM::BI__builtin_arm_crc32cb:
CRCIntrinsicID = Intrinsic::arm_crc32cb; break;
- case ARM::BI__builtin_arm_crc32h:
+ case clang::ARM::BI__builtin_arm_crc32h:
CRCIntrinsicID = Intrinsic::arm_crc32h; break;
- case ARM::BI__builtin_arm_crc32ch:
+ case clang::ARM::BI__builtin_arm_crc32ch:
CRCIntrinsicID = Intrinsic::arm_crc32ch; break;
- case ARM::BI__builtin_arm_crc32w:
- case ARM::BI__builtin_arm_crc32d:
+ case clang::ARM::BI__builtin_arm_crc32w:
+ case clang::ARM::BI__builtin_arm_crc32d:
CRCIntrinsicID = Intrinsic::arm_crc32w; break;
- case ARM::BI__builtin_arm_crc32cw:
- case ARM::BI__builtin_arm_crc32cd:
+ case clang::ARM::BI__builtin_arm_crc32cw:
+ case clang::ARM::BI__builtin_arm_crc32cd:
CRCIntrinsicID = Intrinsic::arm_crc32cw; break;
}
@@ -7619,10 +8617,10 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Value *Arg0 = EmitScalarExpr(E->getArg(0));
Value *Arg1 = EmitScalarExpr(E->getArg(1));
- // crc32{c,}d intrinsics are implemnted as two calls to crc32{c,}w
+ // crc32{c,}d intrinsics are implemented as two calls to crc32{c,}w
// intrinsics, hence we need different codegen for these cases.
- if (BuiltinID == ARM::BI__builtin_arm_crc32d ||
- BuiltinID == ARM::BI__builtin_arm_crc32cd) {
+ if (BuiltinID == clang::ARM::BI__builtin_arm_crc32d ||
+ BuiltinID == clang::ARM::BI__builtin_arm_crc32cd) {
Value *C1 = llvm::ConstantInt::get(Int64Ty, 32);
Value *Arg1a = Builder.CreateTruncOrBitCast(Arg1, Int32Ty);
Value *Arg1b = Builder.CreateLShr(Arg1, C1);
@@ -7639,24 +8637,24 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
}
}
- if (BuiltinID == ARM::BI__builtin_arm_rsr ||
- BuiltinID == ARM::BI__builtin_arm_rsr64 ||
- BuiltinID == ARM::BI__builtin_arm_rsrp ||
- BuiltinID == ARM::BI__builtin_arm_wsr ||
- BuiltinID == ARM::BI__builtin_arm_wsr64 ||
- BuiltinID == ARM::BI__builtin_arm_wsrp) {
+ if (BuiltinID == clang::ARM::BI__builtin_arm_rsr ||
+ BuiltinID == clang::ARM::BI__builtin_arm_rsr64 ||
+ BuiltinID == clang::ARM::BI__builtin_arm_rsrp ||
+ BuiltinID == clang::ARM::BI__builtin_arm_wsr ||
+ BuiltinID == clang::ARM::BI__builtin_arm_wsr64 ||
+ BuiltinID == clang::ARM::BI__builtin_arm_wsrp) {
SpecialRegisterAccessKind AccessKind = Write;
- if (BuiltinID == ARM::BI__builtin_arm_rsr ||
- BuiltinID == ARM::BI__builtin_arm_rsr64 ||
- BuiltinID == ARM::BI__builtin_arm_rsrp)
+ if (BuiltinID == clang::ARM::BI__builtin_arm_rsr ||
+ BuiltinID == clang::ARM::BI__builtin_arm_rsr64 ||
+ BuiltinID == clang::ARM::BI__builtin_arm_rsrp)
AccessKind = VolatileRead;
- bool IsPointerBuiltin = BuiltinID == ARM::BI__builtin_arm_rsrp ||
- BuiltinID == ARM::BI__builtin_arm_wsrp;
+ bool IsPointerBuiltin = BuiltinID == clang::ARM::BI__builtin_arm_rsrp ||
+ BuiltinID == clang::ARM::BI__builtin_arm_wsrp;
- bool Is64Bit = BuiltinID == ARM::BI__builtin_arm_rsr64 ||
- BuiltinID == ARM::BI__builtin_arm_wsr64;
+ bool Is64Bit = BuiltinID == clang::ARM::BI__builtin_arm_rsr64 ||
+ BuiltinID == clang::ARM::BI__builtin_arm_wsr64;
llvm::Type *ValueType;
llvm::Type *RegisterType;
@@ -7673,9 +8671,14 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
AccessKind);
}
+ if (BuiltinID == ARM::BI__builtin_sponentry) {
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic::sponentry, AllocaInt8PtrTy);
+ return Builder.CreateCall(F);
+ }
+
// Handle MSVC intrinsics before argument evaluation to prevent double
// evaluation.
- if (Optional<MSVCIntrin> MsvcIntId = translateArmToMsvcIntrin(BuiltinID))
+ if (std::optional<MSVCIntrin> MsvcIntId = translateArmToMsvcIntrin(BuiltinID))
return EmitMSVCBuiltinExpr(*MsvcIntId, E);
// Deal with MVE builtins
@@ -7685,6 +8688,13 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
if (Value *Result = EmitARMCDEBuiltinExpr(BuiltinID, E, ReturnValue, Arch))
return Result;
+ // Some intrinsics are equivalent - if they are use the base intrinsic ID.
+ auto It = llvm::find_if(NEONEquivalentIntrinsicMap, [BuiltinID](auto &P) {
+ return P.first == BuiltinID;
+ });
+ if (It != end(NEONEquivalentIntrinsicMap))
+ BuiltinID = It->second;
+
// Find out if any arguments are required to be integer constant
// expressions.
unsigned ICEArguments = 0;
@@ -7761,15 +8771,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
}
}
- if ((ICEArguments & (1 << i)) == 0) {
- Ops.push_back(EmitScalarExpr(E->getArg(i)));
- } else {
- // If this is required to be a constant, constant fold it so that we know
- // that the generated intrinsic gets a ConstantInt.
- Ops.push_back(llvm::ConstantInt::get(
- getLLVMContext(),
- *E->getArg(i)->getIntegerConstantExpr(getContext())));
- }
+ Ops.push_back(EmitScalarOrConstFoldImmArg(ICEArguments, i, E));
}
switch (BuiltinID) {
@@ -7831,10 +8833,11 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
// The ARM _MoveToCoprocessor builtins put the input register value as
// the first argument, but the LLVM intrinsic expects it as the third one.
- case ARM::BI_MoveToCoprocessor:
- case ARM::BI_MoveToCoprocessor2: {
- Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI_MoveToCoprocessor ?
- Intrinsic::arm_mcr : Intrinsic::arm_mcr2);
+ case clang::ARM::BI_MoveToCoprocessor:
+ case clang::ARM::BI_MoveToCoprocessor2: {
+ Function *F = CGM.getIntrinsic(BuiltinID == clang::ARM::BI_MoveToCoprocessor
+ ? Intrinsic::arm_mcr
+ : Intrinsic::arm_mcr2);
return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0],
Ops[3], Ops[4], Ops[5]});
}
@@ -7843,15 +8846,16 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
// Get the last argument, which specifies the vector type.
assert(HasExtraArg);
const Expr *Arg = E->getArg(E->getNumArgs()-1);
- Optional<llvm::APSInt> Result = Arg->getIntegerConstantExpr(getContext());
+ std::optional<llvm::APSInt> Result =
+ Arg->getIntegerConstantExpr(getContext());
if (!Result)
return nullptr;
- if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f ||
- BuiltinID == ARM::BI__builtin_arm_vcvtr_d) {
+ if (BuiltinID == clang::ARM::BI__builtin_arm_vcvtr_f ||
+ BuiltinID == clang::ARM::BI__builtin_arm_vcvtr_d) {
// Determine the overloaded type of this builtin.
llvm::Type *Ty;
- if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f)
+ if (BuiltinID == clang::ARM::BI__builtin_arm_vcvtr_f)
Ty = FloatTy;
else
Ty = DoubleTy;
@@ -7879,7 +8883,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
// Many NEON builtins have identical semantics and uses in ARM and
// AArch64. Emit these in a single function.
- auto IntrinsicMap = makeArrayRef(ARMSIMDIntrinsicMap);
+ auto IntrinsicMap = ArrayRef(ARMSIMDIntrinsicMap);
const ARMVectorIntrinsicInfo *Builtin = findARMVectorIntrinsicInMap(
IntrinsicMap, BuiltinID, NEONSIMDIntrinsicsProvenSorted);
if (Builtin)
@@ -7909,10 +8913,10 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
int Indices[] = {1 - Lane, Lane};
return Builder.CreateShuffleVector(Ops[1], Ld, Indices, "vld1q_lane");
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case NEON::BI__builtin_neon_vld1_lane_v: {
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
- PtrOp0 = Builder.CreateElementBitCast(PtrOp0, VTy->getElementType());
+ PtrOp0 = PtrOp0.withElementType(VTy->getElementType());
Value *Ld = Builder.CreateLoad(PtrOp0);
return Builder.CreateInsertElement(Ops[1], Ld, Ops[2], "vld1_lane");
}
@@ -7949,7 +8953,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vsri_n_v:
case NEON::BI__builtin_neon_vsriq_n_v:
rightShift = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case NEON::BI__builtin_neon_vsli_n_v:
case NEON::BI__builtin_neon_vsliq_n_v:
Ops[2] = EmitNeonShiftVector(Ops[2], Ty, rightShift);
@@ -7972,13 +8976,12 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1,
Tys), Ops);
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case NEON::BI__builtin_neon_vst1_lane_v: {
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
- Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
- auto St = Builder.CreateStore(Ops[1], Builder.CreateBitCast(PtrOp0, Ty));
- return St;
+ return Builder.CreateStore(Ops[1],
+ PtrOp0.withElementType(Ops[1]->getType()));
}
case NEON::BI__builtin_neon_vtbl1_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1),
@@ -8173,9 +9176,9 @@ Value *CodeGenFunction::EmitARMMVEBuiltinExpr(unsigned BuiltinID,
Ops.push_back(EmitScalarExpr(Addr));
Tys.push_back(ConvertType(Addr->getType()));
- Function *F = CGM.getIntrinsic(IRIntr, makeArrayRef(Tys));
+ Function *F = CGM.getIntrinsic(IRIntr, ArrayRef(Tys));
Value *LoadResult = Builder.CreateCall(F, Ops);
- Value *MvecOut = UndefValue::get(MvecLType);
+ Value *MvecOut = PoisonValue::get(MvecLType);
for (unsigned i = 0; i < NumVectors; ++i) {
Value *Vec = Builder.CreateExtractValue(LoadResult, i);
MvecOut = Builder.CreateInsertValue(MvecOut, Vec, {0, i});
@@ -8215,7 +9218,7 @@ Value *CodeGenFunction::EmitARMMVEBuiltinExpr(unsigned BuiltinID,
for (unsigned i = 0; i < NumVectors; i++)
Ops.push_back(Builder.CreateExtractValue(Mvec, {0, i}));
- Function *F = CGM.getIntrinsic(IRIntr, makeArrayRef(Tys));
+ Function *F = CGM.getIntrinsic(IRIntr, ArrayRef(Tys));
Value *ToReturn = nullptr;
for (unsigned i = 0; i < NumVectors; i++) {
Ops.push_back(llvm::ConstantInt::get(Int32Ty, i));
@@ -8281,7 +9284,8 @@ static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID
// Get the last argument, which specifies the vector type.
const Expr *Arg = E->getArg(E->getNumArgs() - 1);
- Optional<llvm::APSInt> Result = Arg->getIntegerConstantExpr(CGF.getContext());
+ std::optional<llvm::APSInt> Result =
+ Arg->getIntegerConstantExpr(CGF.getContext());
if (!Result)
return nullptr;
@@ -8297,29 +9301,25 @@ static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID
// argument that specifies the vector type, need to handle each case.
switch (BuiltinID) {
case NEON::BI__builtin_neon_vtbl1_v: {
- return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 1), nullptr,
- Ops[1], Ty, Intrinsic::aarch64_neon_tbl1,
- "vtbl1");
+ return packTBLDVectorList(CGF, ArrayRef(Ops).slice(0, 1), nullptr, Ops[1],
+ Ty, Intrinsic::aarch64_neon_tbl1, "vtbl1");
}
case NEON::BI__builtin_neon_vtbl2_v: {
- return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 2), nullptr,
- Ops[2], Ty, Intrinsic::aarch64_neon_tbl1,
- "vtbl1");
+ return packTBLDVectorList(CGF, ArrayRef(Ops).slice(0, 2), nullptr, Ops[2],
+ Ty, Intrinsic::aarch64_neon_tbl1, "vtbl1");
}
case NEON::BI__builtin_neon_vtbl3_v: {
- return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 3), nullptr,
- Ops[3], Ty, Intrinsic::aarch64_neon_tbl2,
- "vtbl2");
+ return packTBLDVectorList(CGF, ArrayRef(Ops).slice(0, 3), nullptr, Ops[3],
+ Ty, Intrinsic::aarch64_neon_tbl2, "vtbl2");
}
case NEON::BI__builtin_neon_vtbl4_v: {
- return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 4), nullptr,
- Ops[4], Ty, Intrinsic::aarch64_neon_tbl2,
- "vtbl2");
+ return packTBLDVectorList(CGF, ArrayRef(Ops).slice(0, 4), nullptr, Ops[4],
+ Ty, Intrinsic::aarch64_neon_tbl2, "vtbl2");
}
case NEON::BI__builtin_neon_vtbx1_v: {
Value *TblRes =
- packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 1), nullptr, Ops[2],
- Ty, Intrinsic::aarch64_neon_tbl1, "vtbl1");
+ packTBLDVectorList(CGF, ArrayRef(Ops).slice(1, 1), nullptr, Ops[2], Ty,
+ Intrinsic::aarch64_neon_tbl1, "vtbl1");
llvm::Constant *EightV = ConstantInt::get(Ty, 8);
Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[2], EightV);
@@ -8330,14 +9330,13 @@ static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID
return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx");
}
case NEON::BI__builtin_neon_vtbx2_v: {
- return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 2), Ops[0],
- Ops[3], Ty, Intrinsic::aarch64_neon_tbx1,
- "vtbx1");
+ return packTBLDVectorList(CGF, ArrayRef(Ops).slice(1, 2), Ops[0], Ops[3],
+ Ty, Intrinsic::aarch64_neon_tbx1, "vtbx1");
}
case NEON::BI__builtin_neon_vtbx3_v: {
Value *TblRes =
- packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 3), nullptr, Ops[4],
- Ty, Intrinsic::aarch64_neon_tbl2, "vtbl2");
+ packTBLDVectorList(CGF, ArrayRef(Ops).slice(1, 3), nullptr, Ops[4], Ty,
+ Intrinsic::aarch64_neon_tbl2, "vtbl2");
llvm::Constant *TwentyFourV = ConstantInt::get(Ty, 24);
Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[4],
@@ -8349,9 +9348,8 @@ static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID
return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx");
}
case NEON::BI__builtin_neon_vtbx4_v: {
- return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 4), Ops[0],
- Ops[5], Ty, Intrinsic::aarch64_neon_tbx2,
- "vtbx2");
+ return packTBLDVectorList(CGF, ArrayRef(Ops).slice(1, 4), Ops[0], Ops[5],
+ Ty, Intrinsic::aarch64_neon_tbx2, "vtbx2");
}
case NEON::BI__builtin_neon_vqtbl1_v:
case NEON::BI__builtin_neon_vqtbl1q_v:
@@ -8390,7 +9388,7 @@ static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID
Value *CodeGenFunction::vectorWrapScalar16(Value *Op) {
auto *VTy = llvm::FixedVectorType::get(Int16Ty, 4);
Op = Builder.CreateBitCast(Op, Int16Ty);
- Value *V = UndefValue::get(VTy);
+ Value *V = PoisonValue::get(VTy);
llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
Op = Builder.CreateInsertElement(V, Op, CI);
return Op;
@@ -8399,7 +9397,7 @@ Value *CodeGenFunction::vectorWrapScalar16(Value *Op) {
/// SVEBuiltinMemEltTy - Returns the memory element type for this memory
/// access builtin. Only required if it can't be inferred from the base pointer
/// operand.
-llvm::Type *CodeGenFunction::SVEBuiltinMemEltTy(SVETypeFlags TypeFlags) {
+llvm::Type *CodeGenFunction::SVEBuiltinMemEltTy(const SVETypeFlags &TypeFlags) {
switch (TypeFlags.getMemEltType()) {
case SVETypeFlags::MemEltTyDefault:
return getEltType(TypeFlags);
@@ -8415,7 +9413,7 @@ llvm::Type *CodeGenFunction::SVEBuiltinMemEltTy(SVETypeFlags TypeFlags) {
llvm_unreachable("Unknown MemEltType");
}
-llvm::Type *CodeGenFunction::getEltType(SVETypeFlags TypeFlags) {
+llvm::Type *CodeGenFunction::getEltType(const SVETypeFlags &TypeFlags) {
switch (TypeFlags.getEltType()) {
default:
llvm_unreachable("Invalid SVETypeFlag!");
@@ -8428,6 +9426,8 @@ llvm::Type *CodeGenFunction::getEltType(SVETypeFlags TypeFlags) {
return Builder.getInt32Ty();
case SVETypeFlags::EltTyInt64:
return Builder.getInt64Ty();
+ case SVETypeFlags::EltTyInt128:
+ return Builder.getInt128Ty();
case SVETypeFlags::EltTyFloat16:
return Builder.getHalfTy();
@@ -8450,7 +9450,7 @@ llvm::Type *CodeGenFunction::getEltType(SVETypeFlags TypeFlags) {
// Return the llvm predicate vector type corresponding to the specified element
// TypeFlags.
llvm::ScalableVectorType *
-CodeGenFunction::getSVEPredType(SVETypeFlags TypeFlags) {
+CodeGenFunction::getSVEPredType(const SVETypeFlags &TypeFlags) {
switch (TypeFlags.getEltType()) {
default: llvm_unreachable("Unhandled SVETypeFlag!");
@@ -8519,7 +9519,8 @@ CodeGenFunction::getSVEType(const SVETypeFlags &TypeFlags) {
}
}
-llvm::Value *CodeGenFunction::EmitSVEAllTruePred(SVETypeFlags TypeFlags) {
+llvm::Value *
+CodeGenFunction::EmitSVEAllTruePred(const SVETypeFlags &TypeFlags) {
Function *Ptrue =
CGM.getIntrinsic(Intrinsic::aarch64_sve_ptrue, getSVEPredType(TypeFlags));
return Builder.CreateCall(Ptrue, {Builder.getInt32(/*SV_ALL*/ 31)});
@@ -8536,6 +9537,11 @@ static llvm::ScalableVectorType *getSVEVectorForElementType(llvm::Type *EltTy) {
// the elements of the specified datatype.
Value *CodeGenFunction::EmitSVEPredicateCast(Value *Pred,
llvm::ScalableVectorType *VTy) {
+
+ if (isa<TargetExtType>(Pred->getType()) &&
+ cast<TargetExtType>(Pred->getType())->getName() == "aarch64.svcount")
+ return Pred;
+
auto *RTy = llvm::VectorType::get(IntegerType::get(getLLVMContext(), 1), VTy);
if (Pred->getType() == RTy)
return Pred;
@@ -8545,6 +9551,7 @@ Value *CodeGenFunction::EmitSVEPredicateCast(Value *Pred,
switch (VTy->getMinNumElements()) {
default:
llvm_unreachable("unsupported element count!");
+ case 1:
case 2:
case 4:
case 8:
@@ -8563,20 +9570,13 @@ Value *CodeGenFunction::EmitSVEPredicateCast(Value *Pred,
return C;
}
-Value *CodeGenFunction::EmitSVEGatherLoad(SVETypeFlags TypeFlags,
+Value *CodeGenFunction::EmitSVEGatherLoad(const SVETypeFlags &TypeFlags,
SmallVectorImpl<Value *> &Ops,
unsigned IntID) {
auto *ResultTy = getSVEType(TypeFlags);
auto *OverloadedTy =
llvm::ScalableVectorType::get(SVEBuiltinMemEltTy(TypeFlags), ResultTy);
- // At the ACLE level there's only one predicate type, svbool_t, which is
- // mapped to <n x 16 x i1>. However, this might be incompatible with the
- // actual type being loaded. For example, when loading doubles (i64) the
- // predicated should be <n x 2 x i1> instead. At the IR level the type of
- // the predicate and the data being loaded must match. Cast accordingly.
- Ops[0] = EmitSVEPredicateCast(Ops[0], OverloadedTy);
-
Function *F = nullptr;
if (Ops[1]->getType()->isVectorTy())
// This is the "vector base, scalar offset" case. In order to uniquely
@@ -8590,6 +9590,16 @@ Value *CodeGenFunction::EmitSVEGatherLoad(SVETypeFlags TypeFlags,
// intrinsic.
F = CGM.getIntrinsic(IntID, OverloadedTy);
+ // At the ACLE level there's only one predicate type, svbool_t, which is
+ // mapped to <n x 16 x i1>. However, this might be incompatible with the
+ // actual type being loaded. For example, when loading doubles (i64) the
+ // predicate should be <n x 2 x i1> instead. At the IR level the type of
+ // the predicate and the data being loaded must match. Cast to the type
+ // expected by the intrinsic. The intrinsic itself should be defined in
+ // a way than enforces relations between parameter types.
+ Ops[0] = EmitSVEPredicateCast(
+ Ops[0], cast<llvm::ScalableVectorType>(F->getArg(0)->getType()));
+
// Pass 0 when the offset is missing. This can only be applied when using
// the "vector base" addressing mode for which ACLE allows no offset. The
// corresponding LLVM IR always requires an offset.
@@ -8603,8 +9613,7 @@ Value *CodeGenFunction::EmitSVEGatherLoad(SVETypeFlags TypeFlags,
if (!TypeFlags.isByteIndexed() && Ops[1]->getType()->isVectorTy()) {
unsigned BytesPerElt =
OverloadedTy->getElementType()->getScalarSizeInBits() / 8;
- Value *Scale = ConstantInt::get(Int64Ty, BytesPerElt);
- Ops[2] = Builder.CreateMul(Ops[2], Scale);
+ Ops[2] = Builder.CreateShl(Ops[2], Log2_32(BytesPerElt));
}
Value *Call = Builder.CreateCall(F, Ops);
@@ -8615,7 +9624,7 @@ Value *CodeGenFunction::EmitSVEGatherLoad(SVETypeFlags TypeFlags,
: Builder.CreateSExt(Call, ResultTy);
}
-Value *CodeGenFunction::EmitSVEScatterStore(SVETypeFlags TypeFlags,
+Value *CodeGenFunction::EmitSVEScatterStore(const SVETypeFlags &TypeFlags,
SmallVectorImpl<Value *> &Ops,
unsigned IntID) {
auto *SrcDataTy = getSVEType(TypeFlags);
@@ -8655,22 +9664,24 @@ Value *CodeGenFunction::EmitSVEScatterStore(SVETypeFlags TypeFlags,
// mapped to <n x 16 x i1>. However, this might be incompatible with the
// actual type being stored. For example, when storing doubles (i64) the
// predicated should be <n x 2 x i1> instead. At the IR level the type of
- // the predicate and the data being stored must match. Cast accordingly.
- Ops[1] = EmitSVEPredicateCast(Ops[1], OverloadedTy);
+ // the predicate and the data being stored must match. Cast to the type
+ // expected by the intrinsic. The intrinsic itself should be defined in
+ // a way that enforces relations between parameter types.
+ Ops[1] = EmitSVEPredicateCast(
+ Ops[1], cast<llvm::ScalableVectorType>(F->getArg(1)->getType()));
// For "vector base, scalar index" scale the index so that it becomes a
// scalar offset.
if (!TypeFlags.isByteIndexed() && Ops[2]->getType()->isVectorTy()) {
unsigned BytesPerElt =
OverloadedTy->getElementType()->getScalarSizeInBits() / 8;
- Value *Scale = ConstantInt::get(Int64Ty, BytesPerElt);
- Ops[3] = Builder.CreateMul(Ops[3], Scale);
+ Ops[3] = Builder.CreateShl(Ops[3], Log2_32(BytesPerElt));
}
return Builder.CreateCall(F, Ops);
}
-Value *CodeGenFunction::EmitSVEGatherPrefetch(SVETypeFlags TypeFlags,
+Value *CodeGenFunction::EmitSVEGatherPrefetch(const SVETypeFlags &TypeFlags,
SmallVectorImpl<Value *> &Ops,
unsigned IntID) {
// The gather prefetches are overloaded on the vector input - this can either
@@ -8694,8 +9705,8 @@ Value *CodeGenFunction::EmitSVEGatherPrefetch(SVETypeFlags TypeFlags,
// Index needs to be passed as scaled offset.
llvm::Type *MemEltTy = SVEBuiltinMemEltTy(TypeFlags);
unsigned BytesPerElt = MemEltTy->getPrimitiveSizeInBits() / 8;
- Value *Scale = ConstantInt::get(Int64Ty, BytesPerElt);
- Ops[2] = Builder.CreateMul(Ops[2], Scale);
+ if (BytesPerElt > 1)
+ Ops[2] = Builder.CreateShl(Ops[2], Log2_32(BytesPerElt));
}
}
@@ -8703,22 +9714,27 @@ Value *CodeGenFunction::EmitSVEGatherPrefetch(SVETypeFlags TypeFlags,
return Builder.CreateCall(F, Ops);
}
-Value *CodeGenFunction::EmitSVEStructLoad(SVETypeFlags TypeFlags,
+Value *CodeGenFunction::EmitSVEStructLoad(const SVETypeFlags &TypeFlags,
SmallVectorImpl<Value*> &Ops,
unsigned IntID) {
llvm::ScalableVectorType *VTy = getSVEType(TypeFlags);
- auto VecPtrTy = llvm::PointerType::getUnqual(VTy);
- auto EltPtrTy = llvm::PointerType::getUnqual(VTy->getElementType());
unsigned N;
switch (IntID) {
- case Intrinsic::aarch64_sve_ld2:
+ case Intrinsic::aarch64_sve_ld2_sret:
+ case Intrinsic::aarch64_sve_ld1_pn_x2:
+ case Intrinsic::aarch64_sve_ldnt1_pn_x2:
+ case Intrinsic::aarch64_sve_ld2q_sret:
N = 2;
break;
- case Intrinsic::aarch64_sve_ld3:
+ case Intrinsic::aarch64_sve_ld3_sret:
+ case Intrinsic::aarch64_sve_ld3q_sret:
N = 3;
break;
- case Intrinsic::aarch64_sve_ld4:
+ case Intrinsic::aarch64_sve_ld4_sret:
+ case Intrinsic::aarch64_sve_ld1_pn_x4:
+ case Intrinsic::aarch64_sve_ldnt1_pn_x4:
+ case Intrinsic::aarch64_sve_ld4q_sret:
N = 4;
break;
default:
@@ -8727,64 +9743,74 @@ Value *CodeGenFunction::EmitSVEStructLoad(SVETypeFlags TypeFlags,
auto RetTy = llvm::VectorType::get(VTy->getElementType(),
VTy->getElementCount() * N);
- Value *Predicate = EmitSVEPredicateCast(Ops[0], VTy);
- Value *BasePtr= Builder.CreateBitCast(Ops[1], VecPtrTy);
- Value *Offset = Ops.size() > 2 ? Ops[2] : Builder.getInt32(0);
- BasePtr = Builder.CreateGEP(VTy, BasePtr, Offset);
- BasePtr = Builder.CreateBitCast(BasePtr, EltPtrTy);
+ Value *Predicate = EmitSVEPredicateCast(Ops[0], VTy);
+ Value *BasePtr = Ops[1];
- Function *F = CGM.getIntrinsic(IntID, {RetTy, Predicate->getType()});
- return Builder.CreateCall(F, { Predicate, BasePtr });
+ // Does the load have an offset?
+ if (Ops.size() > 2)
+ BasePtr = Builder.CreateGEP(VTy, BasePtr, Ops[2]);
+
+ Function *F = CGM.getIntrinsic(IntID, {VTy});
+ Value *Call = Builder.CreateCall(F, {Predicate, BasePtr});
+ unsigned MinElts = VTy->getMinNumElements();
+ Value *Ret = llvm::PoisonValue::get(RetTy);
+ for (unsigned I = 0; I < N; I++) {
+ Value *Idx = ConstantInt::get(CGM.Int64Ty, I * MinElts);
+ Value *SRet = Builder.CreateExtractValue(Call, I);
+ Ret = Builder.CreateInsertVector(RetTy, Ret, SRet, Idx);
+ }
+ return Ret;
}
-Value *CodeGenFunction::EmitSVEStructStore(SVETypeFlags TypeFlags,
+Value *CodeGenFunction::EmitSVEStructStore(const SVETypeFlags &TypeFlags,
SmallVectorImpl<Value*> &Ops,
unsigned IntID) {
llvm::ScalableVectorType *VTy = getSVEType(TypeFlags);
- auto VecPtrTy = llvm::PointerType::getUnqual(VTy);
- auto EltPtrTy = llvm::PointerType::getUnqual(VTy->getElementType());
unsigned N;
switch (IntID) {
case Intrinsic::aarch64_sve_st2:
+ case Intrinsic::aarch64_sve_st1_pn_x2:
+ case Intrinsic::aarch64_sve_stnt1_pn_x2:
+ case Intrinsic::aarch64_sve_st2q:
N = 2;
break;
case Intrinsic::aarch64_sve_st3:
+ case Intrinsic::aarch64_sve_st3q:
N = 3;
break;
case Intrinsic::aarch64_sve_st4:
+ case Intrinsic::aarch64_sve_st1_pn_x4:
+ case Intrinsic::aarch64_sve_stnt1_pn_x4:
+ case Intrinsic::aarch64_sve_st4q:
N = 4;
break;
default:
llvm_unreachable("unknown intrinsic!");
}
- auto TupleTy =
- llvm::VectorType::get(VTy->getElementType(), VTy->getElementCount() * N);
Value *Predicate = EmitSVEPredicateCast(Ops[0], VTy);
- Value *BasePtr = Builder.CreateBitCast(Ops[1], VecPtrTy);
- Value *Offset = Ops.size() > 3 ? Ops[2] : Builder.getInt32(0);
- Value *Val = Ops.back();
- BasePtr = Builder.CreateGEP(VTy, BasePtr, Offset);
- BasePtr = Builder.CreateBitCast(BasePtr, EltPtrTy);
+ Value *BasePtr = Ops[1];
+
+ // Does the store have an offset?
+ if (Ops.size() > (2 + N))
+ BasePtr = Builder.CreateGEP(VTy, BasePtr, Ops[2]);
// The llvm.aarch64.sve.st2/3/4 intrinsics take legal part vectors, so we
// need to break up the tuple vector.
SmallVector<llvm::Value*, 5> Operands;
- Function *FExtr =
- CGM.getIntrinsic(Intrinsic::aarch64_sve_tuple_get, {VTy, TupleTy});
- for (unsigned I = 0; I < N; ++I)
- Operands.push_back(Builder.CreateCall(FExtr, {Val, Builder.getInt32(I)}));
+ for (unsigned I = Ops.size() - N; I < Ops.size(); ++I)
+ Operands.push_back(Ops[I]);
Operands.append({Predicate, BasePtr});
-
Function *F = CGM.getIntrinsic(IntID, { VTy });
+
return Builder.CreateCall(F, Operands);
}
// SVE2's svpmullb and svpmullt builtins are similar to the svpmullb_pair and
// svpmullt_pair intrinsics, with the exception that their results are bitcast
// to a wider type.
-Value *CodeGenFunction::EmitSVEPMull(SVETypeFlags TypeFlags,
+Value *CodeGenFunction::EmitSVEPMull(const SVETypeFlags &TypeFlags,
SmallVectorImpl<Value *> &Ops,
unsigned BuiltinID) {
// Splat scalar operand to vector (intrinsics with _n infix)
@@ -8802,14 +9828,14 @@ Value *CodeGenFunction::EmitSVEPMull(SVETypeFlags TypeFlags,
return EmitSVEReinterpret(Call, Ty);
}
-Value *CodeGenFunction::EmitSVEMovl(SVETypeFlags TypeFlags,
+Value *CodeGenFunction::EmitSVEMovl(const SVETypeFlags &TypeFlags,
ArrayRef<Value *> Ops, unsigned BuiltinID) {
llvm::Type *OverloadedTy = getSVEType(TypeFlags);
Function *F = CGM.getIntrinsic(BuiltinID, OverloadedTy);
return Builder.CreateCall(F, {Ops[0], Builder.getInt32(0)});
}
-Value *CodeGenFunction::EmitSVEPrefetchLoad(SVETypeFlags TypeFlags,
+Value *CodeGenFunction::EmitSVEPrefetchLoad(const SVETypeFlags &TypeFlags,
SmallVectorImpl<Value *> &Ops,
unsigned BuiltinID) {
auto *MemEltTy = SVEBuiltinMemEltTy(TypeFlags);
@@ -8820,13 +9846,9 @@ Value *CodeGenFunction::EmitSVEPrefetchLoad(SVETypeFlags TypeFlags,
Value *BasePtr = Ops[1];
// Implement the index operand if not omitted.
- if (Ops.size() > 3) {
- BasePtr = Builder.CreateBitCast(BasePtr, MemoryTy->getPointerTo());
+ if (Ops.size() > 3)
BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Ops[2]);
- }
- // Prefetch intriniscs always expect an i8*
- BasePtr = Builder.CreateBitCast(BasePtr, llvm::PointerType::getUnqual(Int8Ty));
Value *PrfOp = Ops.back();
Function *F = CGM.getIntrinsic(BuiltinID, Predicate->getType());
@@ -8836,7 +9858,7 @@ Value *CodeGenFunction::EmitSVEPrefetchLoad(SVETypeFlags TypeFlags,
Value *CodeGenFunction::EmitSVEMaskedLoad(const CallExpr *E,
llvm::Type *ReturnTy,
SmallVectorImpl<Value *> &Ops,
- unsigned BuiltinID,
+ unsigned IntrinsicID,
bool IsZExtReturn) {
QualType LangPTy = E->getArg(1)->getType();
llvm::Type *MemEltTy = CGM.getTypes().ConvertType(
@@ -8845,24 +9867,46 @@ Value *CodeGenFunction::EmitSVEMaskedLoad(const CallExpr *E,
// The vector type that is returned may be different from the
// eventual type loaded from memory.
auto VectorTy = cast<llvm::ScalableVectorType>(ReturnTy);
- auto MemoryTy = llvm::ScalableVectorType::get(MemEltTy, VectorTy);
+ llvm::ScalableVectorType *MemoryTy = nullptr;
+ llvm::ScalableVectorType *PredTy = nullptr;
+ bool IsQuadLoad = false;
+ switch (IntrinsicID) {
+ case Intrinsic::aarch64_sve_ld1uwq:
+ case Intrinsic::aarch64_sve_ld1udq:
+ MemoryTy = llvm::ScalableVectorType::get(MemEltTy, 1);
+ PredTy = llvm::ScalableVectorType::get(
+ llvm::Type::getInt1Ty(getLLVMContext()), 1);
+ IsQuadLoad = true;
+ break;
+ default:
+ MemoryTy = llvm::ScalableVectorType::get(MemEltTy, VectorTy);
+ PredTy = MemoryTy;
+ break;
+ }
- Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy);
- Value *BasePtr = Builder.CreateBitCast(Ops[1], MemoryTy->getPointerTo());
- Value *Offset = Ops.size() > 2 ? Ops[2] : Builder.getInt32(0);
- BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Offset);
+ Value *Predicate = EmitSVEPredicateCast(Ops[0], PredTy);
+ Value *BasePtr = Ops[1];
+
+ // Does the load have an offset?
+ if (Ops.size() > 2)
+ BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Ops[2]);
+
+ Function *F = CGM.getIntrinsic(IntrinsicID, IsQuadLoad ? VectorTy : MemoryTy);
+ auto *Load =
+ cast<llvm::Instruction>(Builder.CreateCall(F, {Predicate, BasePtr}));
+ auto TBAAInfo = CGM.getTBAAAccessInfo(LangPTy->getPointeeType());
+ CGM.DecorateInstructionWithTBAA(Load, TBAAInfo);
- BasePtr = Builder.CreateBitCast(BasePtr, MemEltTy->getPointerTo());
- Function *F = CGM.getIntrinsic(BuiltinID, MemoryTy);
- Value *Load = Builder.CreateCall(F, {Predicate, BasePtr});
+ if (IsQuadLoad)
+ return Load;
return IsZExtReturn ? Builder.CreateZExt(Load, VectorTy)
- : Builder.CreateSExt(Load, VectorTy);
+ : Builder.CreateSExt(Load, VectorTy);
}
Value *CodeGenFunction::EmitSVEMaskedStore(const CallExpr *E,
SmallVectorImpl<Value *> &Ops,
- unsigned BuiltinID) {
+ unsigned IntrinsicID) {
QualType LangPTy = E->getArg(1)->getType();
llvm::Type *MemEltTy = CGM.getTypes().ConvertType(
LangPTy->castAs<PointerType>()->getPointeeType());
@@ -8872,24 +9916,109 @@ Value *CodeGenFunction::EmitSVEMaskedStore(const CallExpr *E,
auto VectorTy = cast<llvm::ScalableVectorType>(Ops.back()->getType());
auto MemoryTy = llvm::ScalableVectorType::get(MemEltTy, VectorTy);
- Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy);
- Value *BasePtr = Builder.CreateBitCast(Ops[1], MemoryTy->getPointerTo());
- Value *Offset = Ops.size() == 4 ? Ops[2] : Builder.getInt32(0);
- BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Offset);
+ auto PredTy = MemoryTy;
+ auto AddrMemoryTy = MemoryTy;
+ bool IsQuadStore = false;
+
+ switch (IntrinsicID) {
+ case Intrinsic::aarch64_sve_st1wq:
+ case Intrinsic::aarch64_sve_st1dq:
+ AddrMemoryTy = llvm::ScalableVectorType::get(MemEltTy, 1);
+ PredTy =
+ llvm::ScalableVectorType::get(IntegerType::get(getLLVMContext(), 1), 1);
+ IsQuadStore = true;
+ break;
+ default:
+ break;
+ }
+ Value *Predicate = EmitSVEPredicateCast(Ops[0], PredTy);
+ Value *BasePtr = Ops[1];
+
+ // Does the store have an offset?
+ if (Ops.size() == 4)
+ BasePtr = Builder.CreateGEP(AddrMemoryTy, BasePtr, Ops[2]);
// Last value is always the data
- llvm::Value *Val = Builder.CreateTrunc(Ops.back(), MemoryTy);
+ Value *Val =
+ IsQuadStore ? Ops.back() : Builder.CreateTrunc(Ops.back(), MemoryTy);
- BasePtr = Builder.CreateBitCast(BasePtr, MemEltTy->getPointerTo());
- Function *F = CGM.getIntrinsic(BuiltinID, MemoryTy);
- return Builder.CreateCall(F, {Val, Predicate, BasePtr});
+ Function *F =
+ CGM.getIntrinsic(IntrinsicID, IsQuadStore ? VectorTy : MemoryTy);
+ auto *Store =
+ cast<llvm::Instruction>(Builder.CreateCall(F, {Val, Predicate, BasePtr}));
+ auto TBAAInfo = CGM.getTBAAAccessInfo(LangPTy->getPointeeType());
+ CGM.DecorateInstructionWithTBAA(Store, TBAAInfo);
+ return Store;
+}
+
+Value *CodeGenFunction::EmitSMELd1St1(const SVETypeFlags &TypeFlags,
+ SmallVectorImpl<Value *> &Ops,
+ unsigned IntID) {
+ Ops[2] = EmitSVEPredicateCast(
+ Ops[2], getSVEVectorForElementType(SVEBuiltinMemEltTy(TypeFlags)));
+
+ SmallVector<Value *> NewOps;
+ NewOps.push_back(Ops[2]);
+
+ llvm::Value *BasePtr = Ops[3];
+
+ // If the intrinsic contains the vnum parameter, multiply it with the vector
+ // size in bytes.
+ if (Ops.size() == 5) {
+ Function *StreamingVectorLength =
+ CGM.getIntrinsic(Intrinsic::aarch64_sme_cntsb);
+ llvm::Value *StreamingVectorLengthCall =
+ Builder.CreateCall(StreamingVectorLength);
+ llvm::Value *Mulvl =
+ Builder.CreateMul(StreamingVectorLengthCall, Ops[4], "mulvl");
+ // The type of the ptr parameter is void *, so use Int8Ty here.
+ BasePtr = Builder.CreateGEP(Int8Ty, Ops[3], Mulvl);
+ }
+ NewOps.push_back(BasePtr);
+ NewOps.push_back(Ops[0]);
+ NewOps.push_back(Ops[1]);
+ Function *F = CGM.getIntrinsic(IntID);
+ return Builder.CreateCall(F, NewOps);
+}
+
+Value *CodeGenFunction::EmitSMEReadWrite(const SVETypeFlags &TypeFlags,
+ SmallVectorImpl<Value *> &Ops,
+ unsigned IntID) {
+ auto *VecTy = getSVEType(TypeFlags);
+ Function *F = CGM.getIntrinsic(IntID, VecTy);
+ if (TypeFlags.isReadZA())
+ Ops[1] = EmitSVEPredicateCast(Ops[1], VecTy);
+ else if (TypeFlags.isWriteZA())
+ Ops[2] = EmitSVEPredicateCast(Ops[2], VecTy);
+ return Builder.CreateCall(F, Ops);
+}
+
+Value *CodeGenFunction::EmitSMEZero(const SVETypeFlags &TypeFlags,
+ SmallVectorImpl<Value *> &Ops,
+ unsigned IntID) {
+ // svzero_za() intrinsic zeros the entire za tile and has no paramters.
+ if (Ops.size() == 0)
+ Ops.push_back(llvm::ConstantInt::get(Int32Ty, 255));
+ Function *F = CGM.getIntrinsic(IntID, {});
+ return Builder.CreateCall(F, Ops);
+}
+
+Value *CodeGenFunction::EmitSMELdrStr(const SVETypeFlags &TypeFlags,
+ SmallVectorImpl<Value *> &Ops,
+ unsigned IntID) {
+ if (Ops.size() == 2)
+ Ops.push_back(Builder.getInt32(0));
+ else
+ Ops[2] = Builder.CreateIntCast(Ops[2], Int32Ty, true);
+ Function *F = CGM.getIntrinsic(IntID, {});
+ return Builder.CreateCall(F, Ops);
}
// Limit the usage of scalable llvm IR generated by the ACLE by using the
// sve dup.x intrinsic instead of IRBuilder::CreateVectorSplat.
Value *CodeGenFunction::EmitSVEDupX(Value *Scalar, llvm::Type *Ty) {
- auto F = CGM.getIntrinsic(Intrinsic::aarch64_sve_dup_x, Ty);
- return Builder.CreateCall(F, Scalar);
+ return Builder.CreateVectorSplat(
+ cast<llvm::VectorType>(Ty)->getElementCount(), Scalar);
}
Value *CodeGenFunction::EmitSVEDupX(Value* Scalar) {
@@ -8918,52 +10047,121 @@ static void InsertExplicitUndefOperand(CGBuilderTy &Builder, llvm::Type *Ty,
Ops.insert(Ops.begin(), SplatUndef);
}
-SmallVector<llvm::Type *, 2> CodeGenFunction::getSVEOverloadTypes(
- SVETypeFlags TypeFlags, llvm::Type *ResultType, ArrayRef<Value *> Ops) {
+SmallVector<llvm::Type *, 2>
+CodeGenFunction::getSVEOverloadTypes(const SVETypeFlags &TypeFlags,
+ llvm::Type *ResultType,
+ ArrayRef<Value *> Ops) {
if (TypeFlags.isOverloadNone())
return {};
llvm::Type *DefaultType = getSVEType(TypeFlags);
- if (TypeFlags.isOverloadWhile())
+ if (TypeFlags.isOverloadWhileOrMultiVecCvt())
return {DefaultType, Ops[1]->getType()};
if (TypeFlags.isOverloadWhileRW())
return {getSVEPredType(TypeFlags), Ops[0]->getType()};
- if (TypeFlags.isOverloadCvt() || TypeFlags.isTupleSet())
+ if (TypeFlags.isOverloadCvt())
return {Ops[0]->getType(), Ops.back()->getType()};
- if (TypeFlags.isTupleCreate() || TypeFlags.isTupleGet())
- return {ResultType, Ops[0]->getType()};
+ if (TypeFlags.isReductionQV() && !ResultType->isScalableTy() &&
+ ResultType->isVectorTy())
+ return {ResultType, Ops[1]->getType()};
assert(TypeFlags.isOverloadDefault() && "Unexpected value for overloads");
return {DefaultType};
}
-Value *CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID,
- const CallExpr *E) {
+Value *CodeGenFunction::EmitSVETupleSetOrGet(const SVETypeFlags &TypeFlags,
+ llvm::Type *Ty,
+ ArrayRef<Value *> Ops) {
+ assert((TypeFlags.isTupleSet() || TypeFlags.isTupleGet()) &&
+ "Expects TypleFlag isTupleSet or TypeFlags.isTupleSet()");
+
+ unsigned I = cast<ConstantInt>(Ops[1])->getSExtValue();
+ auto *SingleVecTy = dyn_cast<llvm::ScalableVectorType>(
+ TypeFlags.isTupleSet() ? Ops[2]->getType() : Ty);
+ Value *Idx = ConstantInt::get(CGM.Int64Ty,
+ I * SingleVecTy->getMinNumElements());
+
+ if (TypeFlags.isTupleSet())
+ return Builder.CreateInsertVector(Ty, Ops[0], Ops[2], Idx);
+ return Builder.CreateExtractVector(Ty, Ops[0], Idx);
+}
+
+Value *CodeGenFunction::EmitSVETupleCreate(const SVETypeFlags &TypeFlags,
+ llvm::Type *Ty,
+ ArrayRef<Value *> Ops) {
+ assert(TypeFlags.isTupleCreate() && "Expects TypleFlag isTupleCreate");
+
+ auto *SrcTy = dyn_cast<llvm::ScalableVectorType>(Ops[0]->getType());
+ unsigned MinElts = SrcTy->getMinNumElements();
+ Value *Call = llvm::PoisonValue::get(Ty);
+ for (unsigned I = 0; I < Ops.size(); I++) {
+ Value *Idx = ConstantInt::get(CGM.Int64Ty, I * MinElts);
+ Call = Builder.CreateInsertVector(Ty, Call, Ops[I], Idx);
+ }
+
+ return Call;
+}
+
+Value *CodeGenFunction::FormSVEBuiltinResult(Value *Call) {
+ // Multi-vector results should be broken up into a single (wide) result
+ // vector.
+ auto *StructTy = dyn_cast<StructType>(Call->getType());
+ if (!StructTy)
+ return Call;
+
+ auto *VTy = dyn_cast<ScalableVectorType>(StructTy->getTypeAtIndex(0U));
+ if (!VTy)
+ return Call;
+ unsigned N = StructTy->getNumElements();
+
+ // We may need to emit a cast to a svbool_t
+ bool IsPredTy = VTy->getElementType()->isIntegerTy(1);
+ unsigned MinElts = IsPredTy ? 16 : VTy->getMinNumElements();
+
+ ScalableVectorType *WideVTy =
+ ScalableVectorType::get(VTy->getElementType(), MinElts * N);
+ Value *Ret = llvm::PoisonValue::get(WideVTy);
+ for (unsigned I = 0; I < N; ++I) {
+ Value *SRet = Builder.CreateExtractValue(Call, I);
+ assert(SRet->getType() == VTy && "Unexpected type for result value");
+ Value *Idx = ConstantInt::get(CGM.Int64Ty, I * MinElts);
+
+ if (IsPredTy)
+ SRet = EmitSVEPredicateCast(
+ SRet, ScalableVectorType::get(Builder.getInt1Ty(), 16));
+
+ Ret = Builder.CreateInsertVector(WideVTy, Ret, SRet, Idx);
+ }
+ Call = Ret;
+
+ return Call;
+}
+
+void CodeGenFunction::GetAArch64SVEProcessedOperands(
+ unsigned BuiltinID, const CallExpr *E, SmallVectorImpl<Value *> &Ops,
+ SVETypeFlags TypeFlags) {
// Find out if any arguments are required to be integer constant expressions.
unsigned ICEArguments = 0;
ASTContext::GetBuiltinTypeError Error;
getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
assert(Error == ASTContext::GE_None && "Should not codegen an error");
- llvm::Type *Ty = ConvertType(E->getType());
- if (BuiltinID >= SVE::BI__builtin_sve_reinterpret_s8_s8 &&
- BuiltinID <= SVE::BI__builtin_sve_reinterpret_f64_f64) {
- Value *Val = EmitScalarExpr(E->getArg(0));
- return EmitSVEReinterpret(Val, Ty);
- }
+ // Tuple set/get only requires one insert/extract vector, which is
+ // created by EmitSVETupleSetOrGet.
+ bool IsTupleGetOrSet = TypeFlags.isTupleSet() || TypeFlags.isTupleGet();
- llvm::SmallVector<Value *, 4> Ops;
for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
- if ((ICEArguments & (1 << i)) == 0)
- Ops.push_back(EmitScalarExpr(E->getArg(i)));
- else {
+ bool IsICE = ICEArguments & (1 << i);
+ Value *Arg = EmitScalarExpr(E->getArg(i));
+
+ if (IsICE) {
// If this is required to be a constant, constant fold it so that we know
// that the generated intrinsic gets a ConstantInt.
- Optional<llvm::APSInt> Result =
+ std::optional<llvm::APSInt> Result =
E->getArg(i)->getIntegerConstantExpr(getContext());
assert(Result && "Expected argument to be a constant");
@@ -8972,12 +10170,49 @@ Value *CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID,
// immediate requires more than a handful of bits.
*Result = Result->extOrTrunc(32);
Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), *Result));
+ continue;
}
+
+ if (IsTupleGetOrSet || !isa<ScalableVectorType>(Arg->getType())) {
+ Ops.push_back(Arg);
+ continue;
+ }
+
+ auto *VTy = cast<ScalableVectorType>(Arg->getType());
+ unsigned MinElts = VTy->getMinNumElements();
+ bool IsPred = VTy->getElementType()->isIntegerTy(1);
+ unsigned N = (MinElts * VTy->getScalarSizeInBits()) / (IsPred ? 16 : 128);
+
+ if (N == 1) {
+ Ops.push_back(Arg);
+ continue;
+ }
+
+ for (unsigned I = 0; I < N; ++I) {
+ Value *Idx = ConstantInt::get(CGM.Int64Ty, (I * MinElts) / N);
+ auto *NewVTy =
+ ScalableVectorType::get(VTy->getElementType(), MinElts / N);
+ Ops.push_back(Builder.CreateExtractVector(NewVTy, Arg, Idx));
+ }
+ }
+}
+
+Value *CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID,
+ const CallExpr *E) {
+ llvm::Type *Ty = ConvertType(E->getType());
+ if (BuiltinID >= SVE::BI__builtin_sve_reinterpret_s8_s8 &&
+ BuiltinID <= SVE::BI__builtin_sve_reinterpret_f64_f64_x4) {
+ Value *Val = EmitScalarExpr(E->getArg(0));
+ return EmitSVEReinterpret(Val, Ty);
}
auto *Builtin = findARMVectorIntrinsicInMap(AArch64SVEIntrinsicMap, BuiltinID,
AArch64SVEIntrinsicsProvenSorted);
+
+ llvm::SmallVector<Value *, 4> Ops;
SVETypeFlags TypeFlags(Builtin->TypeModifier);
+ GetAArch64SVEProcessedOperands(BuiltinID, E, Ops, TypeFlags);
+
if (TypeFlags.isLoad())
return EmitSVEMaskedLoad(E, Ty, Ops, Builtin->LLVMIntrinsic,
TypeFlags.isZExtReturn());
@@ -8991,10 +10226,14 @@ Value *CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID,
return EmitSVEPrefetchLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic);
else if (TypeFlags.isGatherPrefetch())
return EmitSVEGatherPrefetch(TypeFlags, Ops, Builtin->LLVMIntrinsic);
- else if (TypeFlags.isStructLoad())
- return EmitSVEStructLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic);
- else if (TypeFlags.isStructStore())
- return EmitSVEStructStore(TypeFlags, Ops, Builtin->LLVMIntrinsic);
+ else if (TypeFlags.isStructLoad())
+ return EmitSVEStructLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic);
+ else if (TypeFlags.isStructStore())
+ return EmitSVEStructStore(TypeFlags, Ops, Builtin->LLVMIntrinsic);
+ else if (TypeFlags.isTupleSet() || TypeFlags.isTupleGet())
+ return EmitSVETupleSetOrGet(TypeFlags, Ty, Ops);
+ else if (TypeFlags.isTupleCreate())
+ return EmitSVETupleCreate(TypeFlags, Ty, Ops);
else if (TypeFlags.isUndef())
return UndefValue::get(Ty);
else if (Builtin->LLVMIntrinsic != 0) {
@@ -9025,16 +10264,20 @@ Value *CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID,
if (TypeFlags.isReverseCompare())
std::swap(Ops[1], Ops[2]);
-
- if (TypeFlags.isReverseUSDOT())
+ else if (TypeFlags.isReverseUSDOT())
+ std::swap(Ops[1], Ops[2]);
+ else if (TypeFlags.isReverseMergeAnyBinOp() &&
+ TypeFlags.getMergeType() == SVETypeFlags::MergeAny)
std::swap(Ops[1], Ops[2]);
+ else if (TypeFlags.isReverseMergeAnyAccOp() &&
+ TypeFlags.getMergeType() == SVETypeFlags::MergeAny)
+ std::swap(Ops[1], Ops[3]);
// Predicated intrinsics with _z suffix need a select w/ zeroinitializer.
if (TypeFlags.getMergeType() == SVETypeFlags::MergeZero) {
llvm::Type *OpndTy = Ops[1]->getType();
auto *SplatZero = Constant::getNullValue(OpndTy);
- Function *Sel = CGM.getIntrinsic(Intrinsic::aarch64_sve_sel, OpndTy);
- Ops[1] = Builder.CreateCall(Sel, {Ops[0], Ops[1], SplatZero});
+ Ops[1] = Builder.CreateSelect(Ops[0], Ops[1], SplatZero);
}
Function *F = CGM.getIntrinsic(Builtin->LLVMIntrinsic,
@@ -9046,13 +10289,55 @@ Value *CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID,
if (PredTy->getScalarType()->isIntegerTy(1))
Call = EmitSVEPredicateCast(Call, cast<llvm::ScalableVectorType>(Ty));
- return Call;
+ return FormSVEBuiltinResult(Call);
}
switch (BuiltinID) {
default:
return nullptr;
+ case SVE::BI__builtin_sve_svreinterpret_b: {
+ auto SVCountTy =
+ llvm::TargetExtType::get(getLLVMContext(), "aarch64.svcount");
+ Function *CastFromSVCountF =
+ CGM.getIntrinsic(Intrinsic::aarch64_sve_convert_to_svbool, SVCountTy);
+ return Builder.CreateCall(CastFromSVCountF, Ops[0]);
+ }
+ case SVE::BI__builtin_sve_svreinterpret_c: {
+ auto SVCountTy =
+ llvm::TargetExtType::get(getLLVMContext(), "aarch64.svcount");
+ Function *CastToSVCountF =
+ CGM.getIntrinsic(Intrinsic::aarch64_sve_convert_from_svbool, SVCountTy);
+ return Builder.CreateCall(CastToSVCountF, Ops[0]);
+ }
+
+ case SVE::BI__builtin_sve_svpsel_lane_b8:
+ case SVE::BI__builtin_sve_svpsel_lane_b16:
+ case SVE::BI__builtin_sve_svpsel_lane_b32:
+ case SVE::BI__builtin_sve_svpsel_lane_b64:
+ case SVE::BI__builtin_sve_svpsel_lane_c8:
+ case SVE::BI__builtin_sve_svpsel_lane_c16:
+ case SVE::BI__builtin_sve_svpsel_lane_c32:
+ case SVE::BI__builtin_sve_svpsel_lane_c64: {
+ bool IsSVCount = isa<TargetExtType>(Ops[0]->getType());
+ assert(((!IsSVCount || cast<TargetExtType>(Ops[0]->getType())->getName() ==
+ "aarch64.svcount")) &&
+ "Unexpected TargetExtType");
+ auto SVCountTy =
+ llvm::TargetExtType::get(getLLVMContext(), "aarch64.svcount");
+ Function *CastFromSVCountF =
+ CGM.getIntrinsic(Intrinsic::aarch64_sve_convert_to_svbool, SVCountTy);
+ Function *CastToSVCountF =
+ CGM.getIntrinsic(Intrinsic::aarch64_sve_convert_from_svbool, SVCountTy);
+
+ auto OverloadedTy = getSVEType(SVETypeFlags(Builtin->TypeModifier));
+ Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_psel, OverloadedTy);
+ llvm::Value *Ops0 =
+ IsSVCount ? Builder.CreateCall(CastFromSVCountF, Ops[0]) : Ops[0];
+ llvm::Value *Ops1 = EmitSVEPredicateCast(Ops[1], OverloadedTy);
+ llvm::Value *PSel = Builder.CreateCall(F, {Ops0, Ops1, Ops[2]});
+ return IsSVCount ? Builder.CreateCall(CastToSVCountF, PSel) : PSel;
+ }
case SVE::BI__builtin_sve_svmov_b_z: {
// svmov_b_z(pg, op) <=> svand_b_z(pg, op, op)
SVETypeFlags TypeFlags(Builtin->TypeModifier);
@@ -9147,12 +10432,9 @@ Value *CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID,
VecOps.push_back(Builder.CreateZExt(Ops[I], EltTy));
Value *Vec = BuildVector(VecOps);
- SVETypeFlags TypeFlags(Builtin->TypeModifier);
- Value *Pred = EmitSVEAllTruePred(TypeFlags);
-
llvm::Type *OverloadedTy = getSVEVectorForElementType(EltTy);
Value *InsertSubVec = Builder.CreateInsertVector(
- OverloadedTy, UndefValue::get(OverloadedTy), Vec, Builder.getInt64(0));
+ OverloadedTy, PoisonValue::get(OverloadedTy), Vec, Builder.getInt64(0));
Function *F =
CGM.getIntrinsic(Intrinsic::aarch64_sve_dupq_lane, OverloadedTy);
@@ -9162,6 +10444,9 @@ Value *CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID,
if (!IsBoolTy)
return DupQLane;
+ SVETypeFlags TypeFlags(Builtin->TypeModifier);
+ Value *Pred = EmitSVEAllTruePred(TypeFlags);
+
// For svdupq_n_b* we need to add an additional 'cmpne' with '0'.
F = CGM.getIntrinsic(NumOpnds == 2 ? Intrinsic::aarch64_sve_cmpne
: Intrinsic::aarch64_sve_cmpne_wide,
@@ -9174,6 +10459,13 @@ Value *CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID,
case SVE::BI__builtin_sve_svpfalse_b:
return ConstantInt::getFalse(Ty);
+ case SVE::BI__builtin_sve_svpfalse_c: {
+ auto SVBoolTy = ScalableVectorType::get(Builder.getInt1Ty(), 16);
+ Function *CastToSVCountF =
+ CGM.getIntrinsic(Intrinsic::aarch64_sve_convert_from_svbool, Ty);
+ return Builder.CreateCall(CastToSVCountF, ConstantInt::getFalse(SVBoolTy));
+ }
+
case SVE::BI__builtin_sve_svlen_bf16:
case SVE::BI__builtin_sve_svlen_f16:
case SVE::BI__builtin_sve_svlen_f32:
@@ -9208,14 +10500,57 @@ Value *CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID,
case SVE::BI__builtin_sve_svtbl2_f32:
case SVE::BI__builtin_sve_svtbl2_f64: {
SVETypeFlags TF(Builtin->TypeModifier);
- auto VTy = cast<llvm::VectorType>(getSVEType(TF));
- auto TupleTy = llvm::VectorType::getDoubleElementsVectorType(VTy);
- Function *FExtr =
- CGM.getIntrinsic(Intrinsic::aarch64_sve_tuple_get, {VTy, TupleTy});
- Value *V0 = Builder.CreateCall(FExtr, {Ops[0], Builder.getInt32(0)});
- Value *V1 = Builder.CreateCall(FExtr, {Ops[0], Builder.getInt32(1)});
+ auto VTy = cast<llvm::ScalableVectorType>(getSVEType(TF));
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_tbl2, VTy);
- return Builder.CreateCall(F, {V0, V1, Ops[1]});
+ return Builder.CreateCall(F, Ops);
+ }
+
+ case SVE::BI__builtin_sve_svset_neonq_s8:
+ case SVE::BI__builtin_sve_svset_neonq_s16:
+ case SVE::BI__builtin_sve_svset_neonq_s32:
+ case SVE::BI__builtin_sve_svset_neonq_s64:
+ case SVE::BI__builtin_sve_svset_neonq_u8:
+ case SVE::BI__builtin_sve_svset_neonq_u16:
+ case SVE::BI__builtin_sve_svset_neonq_u32:
+ case SVE::BI__builtin_sve_svset_neonq_u64:
+ case SVE::BI__builtin_sve_svset_neonq_f16:
+ case SVE::BI__builtin_sve_svset_neonq_f32:
+ case SVE::BI__builtin_sve_svset_neonq_f64:
+ case SVE::BI__builtin_sve_svset_neonq_bf16: {
+ return Builder.CreateInsertVector(Ty, Ops[0], Ops[1], Builder.getInt64(0));
+ }
+
+ case SVE::BI__builtin_sve_svget_neonq_s8:
+ case SVE::BI__builtin_sve_svget_neonq_s16:
+ case SVE::BI__builtin_sve_svget_neonq_s32:
+ case SVE::BI__builtin_sve_svget_neonq_s64:
+ case SVE::BI__builtin_sve_svget_neonq_u8:
+ case SVE::BI__builtin_sve_svget_neonq_u16:
+ case SVE::BI__builtin_sve_svget_neonq_u32:
+ case SVE::BI__builtin_sve_svget_neonq_u64:
+ case SVE::BI__builtin_sve_svget_neonq_f16:
+ case SVE::BI__builtin_sve_svget_neonq_f32:
+ case SVE::BI__builtin_sve_svget_neonq_f64:
+ case SVE::BI__builtin_sve_svget_neonq_bf16: {
+ return Builder.CreateExtractVector(Ty, Ops[0], Builder.getInt64(0));
+ }
+
+ case SVE::BI__builtin_sve_svdup_neonq_s8:
+ case SVE::BI__builtin_sve_svdup_neonq_s16:
+ case SVE::BI__builtin_sve_svdup_neonq_s32:
+ case SVE::BI__builtin_sve_svdup_neonq_s64:
+ case SVE::BI__builtin_sve_svdup_neonq_u8:
+ case SVE::BI__builtin_sve_svdup_neonq_u16:
+ case SVE::BI__builtin_sve_svdup_neonq_u32:
+ case SVE::BI__builtin_sve_svdup_neonq_u64:
+ case SVE::BI__builtin_sve_svdup_neonq_f16:
+ case SVE::BI__builtin_sve_svdup_neonq_f32:
+ case SVE::BI__builtin_sve_svdup_neonq_f64:
+ case SVE::BI__builtin_sve_svdup_neonq_bf16: {
+ Value *Insert = Builder.CreateInsertVector(Ty, PoisonValue::get(Ty), Ops[0],
+ Builder.getInt64(0));
+ return Builder.CreateIntrinsic(Intrinsic::aarch64_sve_dupq_lane, {Ty},
+ {Insert, Builder.getInt64(0)});
}
}
@@ -9223,37 +10558,109 @@ Value *CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID,
return nullptr;
}
+static void swapCommutativeSMEOperands(unsigned BuiltinID,
+ SmallVectorImpl<Value *> &Ops) {
+ unsigned MultiVec;
+ switch (BuiltinID) {
+ default:
+ return;
+ case SME::BI__builtin_sme_svsumla_za32_s8_vg4x1:
+ MultiVec = 1;
+ break;
+ case SME::BI__builtin_sme_svsumla_za32_s8_vg4x2:
+ case SME::BI__builtin_sme_svsudot_za32_s8_vg1x2:
+ MultiVec = 2;
+ break;
+ case SME::BI__builtin_sme_svsudot_za32_s8_vg1x4:
+ case SME::BI__builtin_sme_svsumla_za32_s8_vg4x4:
+ MultiVec = 4;
+ break;
+ }
+
+ if (MultiVec > 0)
+ for (unsigned I = 0; I < MultiVec; ++I)
+ std::swap(Ops[I + 1], Ops[I + 1 + MultiVec]);
+}
+
+Value *CodeGenFunction::EmitAArch64SMEBuiltinExpr(unsigned BuiltinID,
+ const CallExpr *E) {
+ auto *Builtin = findARMVectorIntrinsicInMap(AArch64SMEIntrinsicMap, BuiltinID,
+ AArch64SMEIntrinsicsProvenSorted);
+
+ llvm::SmallVector<Value *, 4> Ops;
+ SVETypeFlags TypeFlags(Builtin->TypeModifier);
+ GetAArch64SVEProcessedOperands(BuiltinID, E, Ops, TypeFlags);
+
+ if (TypeFlags.isLoad() || TypeFlags.isStore())
+ return EmitSMELd1St1(TypeFlags, Ops, Builtin->LLVMIntrinsic);
+ else if (TypeFlags.isReadZA() || TypeFlags.isWriteZA())
+ return EmitSMEReadWrite(TypeFlags, Ops, Builtin->LLVMIntrinsic);
+ else if (BuiltinID == SME::BI__builtin_sme_svzero_mask_za ||
+ BuiltinID == SME::BI__builtin_sme_svzero_za)
+ return EmitSMEZero(TypeFlags, Ops, Builtin->LLVMIntrinsic);
+ else if (BuiltinID == SME::BI__builtin_sme_svldr_vnum_za ||
+ BuiltinID == SME::BI__builtin_sme_svstr_vnum_za ||
+ BuiltinID == SME::BI__builtin_sme_svldr_za ||
+ BuiltinID == SME::BI__builtin_sme_svstr_za)
+ return EmitSMELdrStr(TypeFlags, Ops, Builtin->LLVMIntrinsic);
+
+ // Handle builtins which require their multi-vector operands to be swapped
+ swapCommutativeSMEOperands(BuiltinID, Ops);
+
+ // Should not happen!
+ if (Builtin->LLVMIntrinsic == 0)
+ return nullptr;
+
+ // Predicates must match the main datatype.
+ for (unsigned i = 0, e = Ops.size(); i != e; ++i)
+ if (auto PredTy = dyn_cast<llvm::VectorType>(Ops[i]->getType()))
+ if (PredTy->getElementType()->isIntegerTy(1))
+ Ops[i] = EmitSVEPredicateCast(Ops[i], getSVEType(TypeFlags));
+
+ Function *F =
+ TypeFlags.isOverloadNone()
+ ? CGM.getIntrinsic(Builtin->LLVMIntrinsic)
+ : CGM.getIntrinsic(Builtin->LLVMIntrinsic, {getSVEType(TypeFlags)});
+ Value *Call = Builder.CreateCall(F, Ops);
+
+ return FormSVEBuiltinResult(Call);
+}
+
Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
const CallExpr *E,
llvm::Triple::ArchType Arch) {
- if (BuiltinID >= AArch64::FirstSVEBuiltin &&
- BuiltinID <= AArch64::LastSVEBuiltin)
+ if (BuiltinID >= clang::AArch64::FirstSVEBuiltin &&
+ BuiltinID <= clang::AArch64::LastSVEBuiltin)
return EmitAArch64SVEBuiltinExpr(BuiltinID, E);
+ if (BuiltinID >= clang::AArch64::FirstSMEBuiltin &&
+ BuiltinID <= clang::AArch64::LastSMEBuiltin)
+ return EmitAArch64SMEBuiltinExpr(BuiltinID, E);
+
unsigned HintID = static_cast<unsigned>(-1);
switch (BuiltinID) {
default: break;
- case AArch64::BI__builtin_arm_nop:
+ case clang::AArch64::BI__builtin_arm_nop:
HintID = 0;
break;
- case AArch64::BI__builtin_arm_yield:
- case AArch64::BI__yield:
+ case clang::AArch64::BI__builtin_arm_yield:
+ case clang::AArch64::BI__yield:
HintID = 1;
break;
- case AArch64::BI__builtin_arm_wfe:
- case AArch64::BI__wfe:
+ case clang::AArch64::BI__builtin_arm_wfe:
+ case clang::AArch64::BI__wfe:
HintID = 2;
break;
- case AArch64::BI__builtin_arm_wfi:
- case AArch64::BI__wfi:
+ case clang::AArch64::BI__builtin_arm_wfi:
+ case clang::AArch64::BI__wfi:
HintID = 3;
break;
- case AArch64::BI__builtin_arm_sev:
- case AArch64::BI__sev:
+ case clang::AArch64::BI__builtin_arm_sev:
+ case clang::AArch64::BI__sev:
HintID = 4;
break;
- case AArch64::BI__builtin_arm_sevl:
- case AArch64::BI__sevl:
+ case clang::AArch64::BI__builtin_arm_sevl:
+ case clang::AArch64::BI__sevl:
HintID = 5;
break;
}
@@ -9263,37 +10670,34 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(F, llvm::ConstantInt::get(Int32Ty, HintID));
}
- if (BuiltinID == AArch64::BI__builtin_arm_prefetch) {
- Value *Address = EmitScalarExpr(E->getArg(0));
- Value *RW = EmitScalarExpr(E->getArg(1));
- Value *CacheLevel = EmitScalarExpr(E->getArg(2));
- Value *RetentionPolicy = EmitScalarExpr(E->getArg(3));
- Value *IsData = EmitScalarExpr(E->getArg(4));
-
- Value *Locality = nullptr;
- if (cast<llvm::ConstantInt>(RetentionPolicy)->isZero()) {
- // Temporal fetch, needs to convert cache level to locality.
- Locality = llvm::ConstantInt::get(Int32Ty,
- -cast<llvm::ConstantInt>(CacheLevel)->getValue() + 3);
- } else {
- // Streaming fetch.
- Locality = llvm::ConstantInt::get(Int32Ty, 0);
- }
-
- // FIXME: We need AArch64 specific LLVM intrinsic if we want to specify
- // PLDL3STRM or PLDL2STRM.
- Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
- return Builder.CreateCall(F, {Address, RW, Locality, IsData});
- }
-
- if (BuiltinID == AArch64::BI__builtin_arm_rbit) {
+ if (BuiltinID == clang::AArch64::BI__builtin_arm_get_sme_state) {
+ // Create call to __arm_sme_state and store the results to the two pointers.
+ CallInst *CI = EmitRuntimeCall(CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(StructType::get(CGM.Int64Ty, CGM.Int64Ty), {},
+ false),
+ "__arm_sme_state"));
+ auto Attrs =
+ AttributeList()
+ .addFnAttribute(getLLVMContext(), "aarch64_pstate_sm_compatible")
+ .addFnAttribute(getLLVMContext(), "aarch64_pstate_za_preserved");
+ CI->setAttributes(Attrs);
+ CI->setCallingConv(
+ llvm::CallingConv::
+ AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2);
+ Builder.CreateStore(Builder.CreateExtractValue(CI, 0),
+ EmitPointerWithAlignment(E->getArg(0)));
+ return Builder.CreateStore(Builder.CreateExtractValue(CI, 1),
+ EmitPointerWithAlignment(E->getArg(1)));
+ }
+
+ if (BuiltinID == clang::AArch64::BI__builtin_arm_rbit) {
assert((getContext().getTypeSize(E->getType()) == 32) &&
"rbit of unusual size!");
llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
return Builder.CreateCall(
CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
}
- if (BuiltinID == AArch64::BI__builtin_arm_rbit64) {
+ if (BuiltinID == clang::AArch64::BI__builtin_arm_rbit64) {
assert((getContext().getTypeSize(E->getType()) == 64) &&
"rbit of unusual size!");
llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
@@ -9301,50 +10705,60 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
}
- if (BuiltinID == AArch64::BI__builtin_arm_cls) {
+ if (BuiltinID == clang::AArch64::BI__builtin_arm_clz ||
+ BuiltinID == clang::AArch64::BI__builtin_arm_clz64) {
+ llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
+ Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Arg->getType());
+ Value *Res = Builder.CreateCall(F, {Arg, Builder.getInt1(false)});
+ if (BuiltinID == clang::AArch64::BI__builtin_arm_clz64)
+ Res = Builder.CreateTrunc(Res, Builder.getInt32Ty());
+ return Res;
+ }
+
+ if (BuiltinID == clang::AArch64::BI__builtin_arm_cls) {
llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_cls), Arg,
"cls");
}
- if (BuiltinID == AArch64::BI__builtin_arm_cls64) {
+ if (BuiltinID == clang::AArch64::BI__builtin_arm_cls64) {
llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_cls64), Arg,
"cls");
}
- if (BuiltinID == AArch64::BI__builtin_arm_frint32zf ||
- BuiltinID == AArch64::BI__builtin_arm_frint32z) {
+ if (BuiltinID == clang::AArch64::BI__builtin_arm_rint32zf ||
+ BuiltinID == clang::AArch64::BI__builtin_arm_rint32z) {
llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
llvm::Type *Ty = Arg->getType();
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_frint32z, Ty),
Arg, "frint32z");
}
- if (BuiltinID == AArch64::BI__builtin_arm_frint64zf ||
- BuiltinID == AArch64::BI__builtin_arm_frint64z) {
+ if (BuiltinID == clang::AArch64::BI__builtin_arm_rint64zf ||
+ BuiltinID == clang::AArch64::BI__builtin_arm_rint64z) {
llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
llvm::Type *Ty = Arg->getType();
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_frint64z, Ty),
Arg, "frint64z");
}
- if (BuiltinID == AArch64::BI__builtin_arm_frint32xf ||
- BuiltinID == AArch64::BI__builtin_arm_frint32x) {
+ if (BuiltinID == clang::AArch64::BI__builtin_arm_rint32xf ||
+ BuiltinID == clang::AArch64::BI__builtin_arm_rint32x) {
llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
llvm::Type *Ty = Arg->getType();
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_frint32x, Ty),
Arg, "frint32x");
}
- if (BuiltinID == AArch64::BI__builtin_arm_frint64xf ||
- BuiltinID == AArch64::BI__builtin_arm_frint64x) {
+ if (BuiltinID == clang::AArch64::BI__builtin_arm_rint64xf ||
+ BuiltinID == clang::AArch64::BI__builtin_arm_rint64x) {
llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
llvm::Type *Ty = Arg->getType();
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_frint64x, Ty),
Arg, "frint64x");
}
- if (BuiltinID == AArch64::BI__builtin_arm_jcvt) {
+ if (BuiltinID == clang::AArch64::BI__builtin_arm_jcvt) {
assert((getContext().getTypeSize(E->getType()) == 32) &&
"__jcvt of unusual size!");
llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
@@ -9352,14 +10766,14 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
CGM.getIntrinsic(Intrinsic::aarch64_fjcvtzs), Arg);
}
- if (BuiltinID == AArch64::BI__builtin_arm_ld64b ||
- BuiltinID == AArch64::BI__builtin_arm_st64b ||
- BuiltinID == AArch64::BI__builtin_arm_st64bv ||
- BuiltinID == AArch64::BI__builtin_arm_st64bv0) {
+ if (BuiltinID == clang::AArch64::BI__builtin_arm_ld64b ||
+ BuiltinID == clang::AArch64::BI__builtin_arm_st64b ||
+ BuiltinID == clang::AArch64::BI__builtin_arm_st64bv ||
+ BuiltinID == clang::AArch64::BI__builtin_arm_st64bv0) {
llvm::Value *MemAddr = EmitScalarExpr(E->getArg(0));
llvm::Value *ValPtr = EmitScalarExpr(E->getArg(1));
- if (BuiltinID == AArch64::BI__builtin_arm_ld64b) {
+ if (BuiltinID == clang::AArch64::BI__builtin_arm_ld64b) {
// Load from the address via an LLVM intrinsic, receiving a
// tuple of 8 i64 words, and store each one to ValPtr.
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_ld64b);
@@ -9368,7 +10782,8 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
for (size_t i = 0; i < 8; i++) {
llvm::Value *ValOffsetPtr =
Builder.CreateGEP(Int64Ty, ValPtr, Builder.getInt32(i));
- Address Addr(ValOffsetPtr, CharUnits::fromQuantity(8));
+ Address Addr =
+ Address(ValOffsetPtr, Int64Ty, CharUnits::fromQuantity(8));
ToRet = Builder.CreateStore(Builder.CreateExtractValue(Val, i), Addr);
}
return ToRet;
@@ -9380,24 +10795,25 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
for (size_t i = 0; i < 8; i++) {
llvm::Value *ValOffsetPtr =
Builder.CreateGEP(Int64Ty, ValPtr, Builder.getInt32(i));
- Address Addr(ValOffsetPtr, CharUnits::fromQuantity(8));
+ Address Addr =
+ Address(ValOffsetPtr, Int64Ty, CharUnits::fromQuantity(8));
Args.push_back(Builder.CreateLoad(Addr));
}
- auto Intr = (BuiltinID == AArch64::BI__builtin_arm_st64b
+ auto Intr = (BuiltinID == clang::AArch64::BI__builtin_arm_st64b
? Intrinsic::aarch64_st64b
- : BuiltinID == AArch64::BI__builtin_arm_st64bv
- ? Intrinsic::aarch64_st64bv
- : Intrinsic::aarch64_st64bv0);
+ : BuiltinID == clang::AArch64::BI__builtin_arm_st64bv
+ ? Intrinsic::aarch64_st64bv
+ : Intrinsic::aarch64_st64bv0);
Function *F = CGM.getIntrinsic(Intr);
return Builder.CreateCall(F, Args);
}
}
- if (BuiltinID == AArch64::BI__builtin_arm_rndr ||
- BuiltinID == AArch64::BI__builtin_arm_rndrrs) {
+ if (BuiltinID == clang::AArch64::BI__builtin_arm_rndr ||
+ BuiltinID == clang::AArch64::BI__builtin_arm_rndrrs) {
- auto Intr = (BuiltinID == AArch64::BI__builtin_arm_rndr
+ auto Intr = (BuiltinID == clang::AArch64::BI__builtin_arm_rndr
? Intrinsic::aarch64_rndr
: Intrinsic::aarch64_rndrrs);
Function *F = CGM.getIntrinsic(Intr);
@@ -9411,7 +10827,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return Status;
}
- if (BuiltinID == AArch64::BI__clear_cache) {
+ if (BuiltinID == clang::AArch64::BI__clear_cache) {
assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments");
const FunctionDecl *FD = E->getDirectCallee();
Value *Ops[2];
@@ -9423,16 +10839,16 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
}
- if ((BuiltinID == AArch64::BI__builtin_arm_ldrex ||
- BuiltinID == AArch64::BI__builtin_arm_ldaex) &&
+ if ((BuiltinID == clang::AArch64::BI__builtin_arm_ldrex ||
+ BuiltinID == clang::AArch64::BI__builtin_arm_ldaex) &&
getContext().getTypeSize(E->getType()) == 128) {
- Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex
- ? Intrinsic::aarch64_ldaxp
- : Intrinsic::aarch64_ldxp);
+ Function *F =
+ CGM.getIntrinsic(BuiltinID == clang::AArch64::BI__builtin_arm_ldaex
+ ? Intrinsic::aarch64_ldaxp
+ : Intrinsic::aarch64_ldxp);
Value *LdPtr = EmitScalarExpr(E->getArg(0));
- Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy),
- "ldxp");
+ Value *Val = Builder.CreateCall(F, LdPtr, "ldxp");
Value *Val0 = Builder.CreateExtractValue(Val, 1);
Value *Val1 = Builder.CreateExtractValue(Val, 0);
@@ -9444,61 +10860,62 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
Val = Builder.CreateOr(Val, Val1);
return Builder.CreateBitCast(Val, ConvertType(E->getType()));
- } else if (BuiltinID == AArch64::BI__builtin_arm_ldrex ||
- BuiltinID == AArch64::BI__builtin_arm_ldaex) {
+ } else if (BuiltinID == clang::AArch64::BI__builtin_arm_ldrex ||
+ BuiltinID == clang::AArch64::BI__builtin_arm_ldaex) {
Value *LoadAddr = EmitScalarExpr(E->getArg(0));
QualType Ty = E->getType();
llvm::Type *RealResTy = ConvertType(Ty);
- llvm::Type *PtrTy = llvm::IntegerType::get(
- getLLVMContext(), getContext().getTypeSize(Ty))->getPointerTo();
- LoadAddr = Builder.CreateBitCast(LoadAddr, PtrTy);
+ llvm::Type *IntTy =
+ llvm::IntegerType::get(getLLVMContext(), getContext().getTypeSize(Ty));
- Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex
- ? Intrinsic::aarch64_ldaxr
- : Intrinsic::aarch64_ldxr,
- PtrTy);
- Value *Val = Builder.CreateCall(F, LoadAddr, "ldxr");
+ Function *F =
+ CGM.getIntrinsic(BuiltinID == clang::AArch64::BI__builtin_arm_ldaex
+ ? Intrinsic::aarch64_ldaxr
+ : Intrinsic::aarch64_ldxr,
+ UnqualPtrTy);
+ CallInst *Val = Builder.CreateCall(F, LoadAddr, "ldxr");
+ Val->addParamAttr(
+ 0, Attribute::get(getLLVMContext(), Attribute::ElementType, IntTy));
if (RealResTy->isPointerTy())
return Builder.CreateIntToPtr(Val, RealResTy);
llvm::Type *IntResTy = llvm::IntegerType::get(
getLLVMContext(), CGM.getDataLayout().getTypeSizeInBits(RealResTy));
- Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
- return Builder.CreateBitCast(Val, RealResTy);
+ return Builder.CreateBitCast(Builder.CreateTruncOrBitCast(Val, IntResTy),
+ RealResTy);
}
- if ((BuiltinID == AArch64::BI__builtin_arm_strex ||
- BuiltinID == AArch64::BI__builtin_arm_stlex) &&
+ if ((BuiltinID == clang::AArch64::BI__builtin_arm_strex ||
+ BuiltinID == clang::AArch64::BI__builtin_arm_stlex) &&
getContext().getTypeSize(E->getArg(0)->getType()) == 128) {
- Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex
- ? Intrinsic::aarch64_stlxp
- : Intrinsic::aarch64_stxp);
+ Function *F =
+ CGM.getIntrinsic(BuiltinID == clang::AArch64::BI__builtin_arm_stlex
+ ? Intrinsic::aarch64_stlxp
+ : Intrinsic::aarch64_stxp);
llvm::Type *STy = llvm::StructType::get(Int64Ty, Int64Ty);
Address Tmp = CreateMemTemp(E->getArg(0)->getType());
EmitAnyExprToMem(E->getArg(0), Tmp, Qualifiers(), /*init*/ true);
- Tmp = Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(STy));
+ Tmp = Tmp.withElementType(STy);
llvm::Value *Val = Builder.CreateLoad(Tmp);
Value *Arg0 = Builder.CreateExtractValue(Val, 0);
Value *Arg1 = Builder.CreateExtractValue(Val, 1);
- Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)),
- Int8PtrTy);
+ Value *StPtr = EmitScalarExpr(E->getArg(1));
return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "stxp");
}
- if (BuiltinID == AArch64::BI__builtin_arm_strex ||
- BuiltinID == AArch64::BI__builtin_arm_stlex) {
+ if (BuiltinID == clang::AArch64::BI__builtin_arm_strex ||
+ BuiltinID == clang::AArch64::BI__builtin_arm_stlex) {
Value *StoreVal = EmitScalarExpr(E->getArg(0));
Value *StoreAddr = EmitScalarExpr(E->getArg(1));
QualType Ty = E->getArg(0)->getType();
- llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(),
- getContext().getTypeSize(Ty));
- StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo());
+ llvm::Type *StoreTy =
+ llvm::IntegerType::get(getLLVMContext(), getContext().getTypeSize(Ty));
if (StoreVal->getType()->isPointerTy())
StoreVal = Builder.CreatePtrToInt(StoreVal, Int64Ty);
@@ -9510,14 +10927,18 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int64Ty);
}
- Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex
- ? Intrinsic::aarch64_stlxr
- : Intrinsic::aarch64_stxr,
- StoreAddr->getType());
- return Builder.CreateCall(F, {StoreVal, StoreAddr}, "stxr");
+ Function *F =
+ CGM.getIntrinsic(BuiltinID == clang::AArch64::BI__builtin_arm_stlex
+ ? Intrinsic::aarch64_stlxr
+ : Intrinsic::aarch64_stxr,
+ StoreAddr->getType());
+ CallInst *CI = Builder.CreateCall(F, {StoreVal, StoreAddr}, "stxr");
+ CI->addParamAttr(
+ 1, Attribute::get(getLLVMContext(), Attribute::ElementType, StoreTy));
+ return CI;
}
- if (BuiltinID == AArch64::BI__getReg) {
+ if (BuiltinID == clang::AArch64::BI__getReg) {
Expr::EvalResult Result;
if (!E->getArg(0)->EvaluateAsInt(Result, CGM.getContext()))
llvm_unreachable("Sema will ensure that the parameter is constant");
@@ -9535,33 +10956,42 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(F, Metadata);
}
- if (BuiltinID == AArch64::BI__builtin_arm_clrex) {
+ if (BuiltinID == clang::AArch64::BI__break) {
+ Expr::EvalResult Result;
+ if (!E->getArg(0)->EvaluateAsInt(Result, CGM.getContext()))
+ llvm_unreachable("Sema will ensure that the parameter is constant");
+
+ llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::aarch64_break);
+ return Builder.CreateCall(F, {EmitScalarExpr(E->getArg(0))});
+ }
+
+ if (BuiltinID == clang::AArch64::BI__builtin_arm_clrex) {
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_clrex);
return Builder.CreateCall(F);
}
- if (BuiltinID == AArch64::BI_ReadWriteBarrier)
+ if (BuiltinID == clang::AArch64::BI_ReadWriteBarrier)
return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
llvm::SyncScope::SingleThread);
// CRC32
Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
switch (BuiltinID) {
- case AArch64::BI__builtin_arm_crc32b:
+ case clang::AArch64::BI__builtin_arm_crc32b:
CRCIntrinsicID = Intrinsic::aarch64_crc32b; break;
- case AArch64::BI__builtin_arm_crc32cb:
+ case clang::AArch64::BI__builtin_arm_crc32cb:
CRCIntrinsicID = Intrinsic::aarch64_crc32cb; break;
- case AArch64::BI__builtin_arm_crc32h:
+ case clang::AArch64::BI__builtin_arm_crc32h:
CRCIntrinsicID = Intrinsic::aarch64_crc32h; break;
- case AArch64::BI__builtin_arm_crc32ch:
+ case clang::AArch64::BI__builtin_arm_crc32ch:
CRCIntrinsicID = Intrinsic::aarch64_crc32ch; break;
- case AArch64::BI__builtin_arm_crc32w:
+ case clang::AArch64::BI__builtin_arm_crc32w:
CRCIntrinsicID = Intrinsic::aarch64_crc32w; break;
- case AArch64::BI__builtin_arm_crc32cw:
+ case clang::AArch64::BI__builtin_arm_crc32cw:
CRCIntrinsicID = Intrinsic::aarch64_crc32cw; break;
- case AArch64::BI__builtin_arm_crc32d:
+ case clang::AArch64::BI__builtin_arm_crc32d:
CRCIntrinsicID = Intrinsic::aarch64_crc32x; break;
- case AArch64::BI__builtin_arm_crc32cd:
+ case clang::AArch64::BI__builtin_arm_crc32cd:
CRCIntrinsicID = Intrinsic::aarch64_crc32cx; break;
}
@@ -9576,20 +11006,32 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(F, {Arg0, Arg1});
}
+ // Memory Operations (MOPS)
+ if (BuiltinID == AArch64::BI__builtin_arm_mops_memset_tag) {
+ Value *Dst = EmitScalarExpr(E->getArg(0));
+ Value *Val = EmitScalarExpr(E->getArg(1));
+ Value *Size = EmitScalarExpr(E->getArg(2));
+ Dst = Builder.CreatePointerCast(Dst, Int8PtrTy);
+ Val = Builder.CreateTrunc(Val, Int8Ty);
+ Size = Builder.CreateIntCast(Size, Int64Ty, false);
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::aarch64_mops_memset_tag), {Dst, Val, Size});
+ }
+
// Memory Tagging Extensions (MTE) Intrinsics
Intrinsic::ID MTEIntrinsicID = Intrinsic::not_intrinsic;
switch (BuiltinID) {
- case AArch64::BI__builtin_arm_irg:
+ case clang::AArch64::BI__builtin_arm_irg:
MTEIntrinsicID = Intrinsic::aarch64_irg; break;
- case AArch64::BI__builtin_arm_addg:
+ case clang::AArch64::BI__builtin_arm_addg:
MTEIntrinsicID = Intrinsic::aarch64_addg; break;
- case AArch64::BI__builtin_arm_gmi:
+ case clang::AArch64::BI__builtin_arm_gmi:
MTEIntrinsicID = Intrinsic::aarch64_gmi; break;
- case AArch64::BI__builtin_arm_ldg:
+ case clang::AArch64::BI__builtin_arm_ldg:
MTEIntrinsicID = Intrinsic::aarch64_ldg; break;
- case AArch64::BI__builtin_arm_stg:
+ case clang::AArch64::BI__builtin_arm_stg:
MTEIntrinsicID = Intrinsic::aarch64_stg; break;
- case AArch64::BI__builtin_arm_subp:
+ case clang::AArch64::BI__builtin_arm_subp:
MTEIntrinsicID = Intrinsic::aarch64_subp; break;
}
@@ -9654,41 +11096,52 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
}
}
- if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
- BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
- BuiltinID == AArch64::BI__builtin_arm_rsrp ||
- BuiltinID == AArch64::BI__builtin_arm_wsr ||
- BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
- BuiltinID == AArch64::BI__builtin_arm_wsrp) {
+ if (BuiltinID == clang::AArch64::BI__builtin_arm_rsr ||
+ BuiltinID == clang::AArch64::BI__builtin_arm_rsr64 ||
+ BuiltinID == clang::AArch64::BI__builtin_arm_rsr128 ||
+ BuiltinID == clang::AArch64::BI__builtin_arm_rsrp ||
+ BuiltinID == clang::AArch64::BI__builtin_arm_wsr ||
+ BuiltinID == clang::AArch64::BI__builtin_arm_wsr64 ||
+ BuiltinID == clang::AArch64::BI__builtin_arm_wsr128 ||
+ BuiltinID == clang::AArch64::BI__builtin_arm_wsrp) {
SpecialRegisterAccessKind AccessKind = Write;
- if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
- BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
- BuiltinID == AArch64::BI__builtin_arm_rsrp)
+ if (BuiltinID == clang::AArch64::BI__builtin_arm_rsr ||
+ BuiltinID == clang::AArch64::BI__builtin_arm_rsr64 ||
+ BuiltinID == clang::AArch64::BI__builtin_arm_rsr128 ||
+ BuiltinID == clang::AArch64::BI__builtin_arm_rsrp)
AccessKind = VolatileRead;
- bool IsPointerBuiltin = BuiltinID == AArch64::BI__builtin_arm_rsrp ||
- BuiltinID == AArch64::BI__builtin_arm_wsrp;
+ bool IsPointerBuiltin = BuiltinID == clang::AArch64::BI__builtin_arm_rsrp ||
+ BuiltinID == clang::AArch64::BI__builtin_arm_wsrp;
+
+ bool Is32Bit = BuiltinID == clang::AArch64::BI__builtin_arm_rsr ||
+ BuiltinID == clang::AArch64::BI__builtin_arm_wsr;
- bool Is64Bit = BuiltinID != AArch64::BI__builtin_arm_rsr &&
- BuiltinID != AArch64::BI__builtin_arm_wsr;
+ bool Is128Bit = BuiltinID == clang::AArch64::BI__builtin_arm_rsr128 ||
+ BuiltinID == clang::AArch64::BI__builtin_arm_wsr128;
llvm::Type *ValueType;
llvm::Type *RegisterType = Int64Ty;
- if (IsPointerBuiltin) {
+ if (Is32Bit) {
+ ValueType = Int32Ty;
+ } else if (Is128Bit) {
+ llvm::Type *Int128Ty =
+ llvm::IntegerType::getInt128Ty(CGM.getLLVMContext());
+ ValueType = Int128Ty;
+ RegisterType = Int128Ty;
+ } else if (IsPointerBuiltin) {
ValueType = VoidPtrTy;
- } else if (Is64Bit) {
- ValueType = Int64Ty;
} else {
- ValueType = Int32Ty;
- }
+ ValueType = Int64Ty;
+ };
return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType,
AccessKind);
}
- if (BuiltinID == AArch64::BI_ReadStatusReg ||
- BuiltinID == AArch64::BI_WriteStatusReg) {
+ if (BuiltinID == clang::AArch64::BI_ReadStatusReg ||
+ BuiltinID == clang::AArch64::BI_WriteStatusReg) {
LLVMContext &Context = CGM.getLLVMContext();
unsigned SysReg =
@@ -9709,7 +11162,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
llvm::Type *RegisterType = Int64Ty;
llvm::Type *Types[] = { RegisterType };
- if (BuiltinID == AArch64::BI_ReadStatusReg) {
+ if (BuiltinID == clang::AArch64::BI_ReadStatusReg) {
llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
return Builder.CreateCall(F, Metadata);
@@ -9721,22 +11174,23 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(F, { Metadata, ArgValue });
}
- if (BuiltinID == AArch64::BI_AddressOfReturnAddress) {
+ if (BuiltinID == clang::AArch64::BI_AddressOfReturnAddress) {
llvm::Function *F =
CGM.getIntrinsic(Intrinsic::addressofreturnaddress, AllocaInt8PtrTy);
return Builder.CreateCall(F);
}
- if (BuiltinID == AArch64::BI__builtin_sponentry) {
+ if (BuiltinID == clang::AArch64::BI__builtin_sponentry) {
llvm::Function *F = CGM.getIntrinsic(Intrinsic::sponentry, AllocaInt8PtrTy);
return Builder.CreateCall(F);
}
- if (BuiltinID == AArch64::BI__mulh || BuiltinID == AArch64::BI__umulh) {
+ if (BuiltinID == clang::AArch64::BI__mulh ||
+ BuiltinID == clang::AArch64::BI__umulh) {
llvm::Type *ResType = ConvertType(E->getType());
llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
- bool IsSigned = BuiltinID == AArch64::BI__mulh;
+ bool IsSigned = BuiltinID == clang::AArch64::BI__mulh;
Value *LHS =
Builder.CreateIntCast(EmitScalarExpr(E->getArg(0)), Int128Ty, IsSigned);
Value *RHS =
@@ -9755,11 +11209,128 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return HigherBits;
}
+ if (BuiltinID == AArch64::BI__writex18byte ||
+ BuiltinID == AArch64::BI__writex18word ||
+ BuiltinID == AArch64::BI__writex18dword ||
+ BuiltinID == AArch64::BI__writex18qword) {
+ // Read x18 as i8*
+ LLVMContext &Context = CGM.getLLVMContext();
+ llvm::Metadata *Ops[] = {llvm::MDString::get(Context, "x18")};
+ llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
+ llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
+ llvm::Function *F =
+ CGM.getIntrinsic(llvm::Intrinsic::read_register, {Int64Ty});
+ llvm::Value *X18 = Builder.CreateCall(F, Metadata);
+ X18 = Builder.CreateIntToPtr(X18, Int8PtrTy);
+
+ // Store val at x18 + offset
+ Value *Offset = Builder.CreateZExt(EmitScalarExpr(E->getArg(0)), Int64Ty);
+ Value *Ptr = Builder.CreateGEP(Int8Ty, X18, Offset);
+ Value *Val = EmitScalarExpr(E->getArg(1));
+ StoreInst *Store = Builder.CreateAlignedStore(Val, Ptr, CharUnits::One());
+ return Store;
+ }
+
+ if (BuiltinID == AArch64::BI__readx18byte ||
+ BuiltinID == AArch64::BI__readx18word ||
+ BuiltinID == AArch64::BI__readx18dword ||
+ BuiltinID == AArch64::BI__readx18qword) {
+ llvm::Type *IntTy = ConvertType(E->getType());
+
+ // Read x18 as i8*
+ LLVMContext &Context = CGM.getLLVMContext();
+ llvm::Metadata *Ops[] = {llvm::MDString::get(Context, "x18")};
+ llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
+ llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
+ llvm::Function *F =
+ CGM.getIntrinsic(llvm::Intrinsic::read_register, {Int64Ty});
+ llvm::Value *X18 = Builder.CreateCall(F, Metadata);
+ X18 = Builder.CreateIntToPtr(X18, Int8PtrTy);
+
+ // Load x18 + offset
+ Value *Offset = Builder.CreateZExt(EmitScalarExpr(E->getArg(0)), Int64Ty);
+ Value *Ptr = Builder.CreateGEP(Int8Ty, X18, Offset);
+ LoadInst *Load = Builder.CreateAlignedLoad(IntTy, Ptr, CharUnits::One());
+ return Load;
+ }
+
+ if (BuiltinID == AArch64::BI_CopyDoubleFromInt64 ||
+ BuiltinID == AArch64::BI_CopyFloatFromInt32 ||
+ BuiltinID == AArch64::BI_CopyInt32FromFloat ||
+ BuiltinID == AArch64::BI_CopyInt64FromDouble) {
+ Value *Arg = EmitScalarExpr(E->getArg(0));
+ llvm::Type *RetTy = ConvertType(E->getType());
+ return Builder.CreateBitCast(Arg, RetTy);
+ }
+
+ if (BuiltinID == AArch64::BI_CountLeadingOnes ||
+ BuiltinID == AArch64::BI_CountLeadingOnes64 ||
+ BuiltinID == AArch64::BI_CountLeadingZeros ||
+ BuiltinID == AArch64::BI_CountLeadingZeros64) {
+ Value *Arg = EmitScalarExpr(E->getArg(0));
+ llvm::Type *ArgType = Arg->getType();
+
+ if (BuiltinID == AArch64::BI_CountLeadingOnes ||
+ BuiltinID == AArch64::BI_CountLeadingOnes64)
+ Arg = Builder.CreateXor(Arg, Constant::getAllOnesValue(ArgType));
+
+ Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
+ Value *Result = Builder.CreateCall(F, {Arg, Builder.getInt1(false)});
+
+ if (BuiltinID == AArch64::BI_CountLeadingOnes64 ||
+ BuiltinID == AArch64::BI_CountLeadingZeros64)
+ Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
+ return Result;
+ }
+
+ if (BuiltinID == AArch64::BI_CountLeadingSigns ||
+ BuiltinID == AArch64::BI_CountLeadingSigns64) {
+ Value *Arg = EmitScalarExpr(E->getArg(0));
+
+ Function *F = (BuiltinID == AArch64::BI_CountLeadingSigns)
+ ? CGM.getIntrinsic(Intrinsic::aarch64_cls)
+ : CGM.getIntrinsic(Intrinsic::aarch64_cls64);
+
+ Value *Result = Builder.CreateCall(F, Arg, "cls");
+ if (BuiltinID == AArch64::BI_CountLeadingSigns64)
+ Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
+ return Result;
+ }
+
+ if (BuiltinID == AArch64::BI_CountOneBits ||
+ BuiltinID == AArch64::BI_CountOneBits64) {
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+ llvm::Type *ArgType = ArgValue->getType();
+ Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
+
+ Value *Result = Builder.CreateCall(F, ArgValue);
+ if (BuiltinID == AArch64::BI_CountOneBits64)
+ Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
+ return Result;
+ }
+
+ if (BuiltinID == AArch64::BI__prefetch) {
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ Value *RW = llvm::ConstantInt::get(Int32Ty, 0);
+ Value *Locality = ConstantInt::get(Int32Ty, 3);
+ Value *Data = llvm::ConstantInt::get(Int32Ty, 1);
+ Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
+ return Builder.CreateCall(F, {Address, RW, Locality, Data});
+ }
+
// Handle MSVC intrinsics before argument evaluation to prevent double
// evaluation.
- if (Optional<MSVCIntrin> MsvcIntId = translateAarch64ToMsvcIntrin(BuiltinID))
+ if (std::optional<MSVCIntrin> MsvcIntId =
+ translateAarch64ToMsvcIntrin(BuiltinID))
return EmitMSVCBuiltinExpr(*MsvcIntId, E);
+ // Some intrinsics are equivalent - if they are use the base intrinsic ID.
+ auto It = llvm::find_if(NEONEquivalentIntrinsicMap, [BuiltinID](auto &P) {
+ return P.first == BuiltinID;
+ });
+ if (It != end(NEONEquivalentIntrinsicMap))
+ BuiltinID = It->second;
+
// Find out if any arguments are required to be integer constant
// expressions.
unsigned ICEArguments = 0;
@@ -9782,6 +11353,10 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vst1q_v:
case NEON::BI__builtin_neon_vst1_lane_v:
case NEON::BI__builtin_neon_vst1q_lane_v:
+ case NEON::BI__builtin_neon_vldap1_lane_s64:
+ case NEON::BI__builtin_neon_vldap1q_lane_s64:
+ case NEON::BI__builtin_neon_vstl1_lane_s64:
+ case NEON::BI__builtin_neon_vstl1q_lane_s64:
// Get the alignment for the argument in addition to the value;
// we'll use it later.
PtrOp0 = EmitPointerWithAlignment(E->getArg(0));
@@ -9789,18 +11364,10 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
continue;
}
}
- if ((ICEArguments & (1 << i)) == 0) {
- Ops.push_back(EmitScalarExpr(E->getArg(i)));
- } else {
- // If this is required to be a constant, constant fold it so that we know
- // that the generated intrinsic gets a ConstantInt.
- Ops.push_back(llvm::ConstantInt::get(
- getLLVMContext(),
- *E->getArg(i)->getIntegerConstantExpr(getContext())));
- }
+ Ops.push_back(EmitScalarOrConstFoldImmArg(ICEArguments, i, E));
}
- auto SISDMap = makeArrayRef(AArch64SISDIntrinsicMap);
+ auto SISDMap = ArrayRef(AArch64SISDIntrinsicMap);
const ARMVectorIntrinsicInfo *Builtin = findARMVectorIntrinsicInMap(
SISDMap, BuiltinID, AArch64SISDIntrinsicsProvenSorted);
@@ -9813,7 +11380,8 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
const Expr *Arg = E->getArg(E->getNumArgs()-1);
NeonTypeFlags Type(0);
- if (Optional<llvm::APSInt> Result = Arg->getIntegerConstantExpr(getContext()))
+ if (std::optional<llvm::APSInt> Result =
+ Arg->getIntegerConstantExpr(getContext()))
// Determine the type of this overloaded NEON intrinsic.
Type = NeonTypeFlags(Result->getZExtValue());
@@ -9837,20 +11405,18 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
}
case NEON::BI__builtin_neon_vldrq_p128: {
llvm::Type *Int128Ty = llvm::Type::getIntNTy(getLLVMContext(), 128);
- llvm::Type *Int128PTy = llvm::PointerType::get(Int128Ty, 0);
- Value *Ptr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int128PTy);
+ Value *Ptr = EmitScalarExpr(E->getArg(0));
return Builder.CreateAlignedLoad(Int128Ty, Ptr,
CharUnits::fromQuantity(16));
}
case NEON::BI__builtin_neon_vstrq_p128: {
- llvm::Type *Int128PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), 128);
- Value *Ptr = Builder.CreateBitCast(Ops[0], Int128PTy);
+ Value *Ptr = Ops[0];
return Builder.CreateDefaultAlignedStore(EmitScalarExpr(E->getArg(1)), Ptr);
}
case NEON::BI__builtin_neon_vcvts_f32_u32:
case NEON::BI__builtin_neon_vcvtd_f64_u64:
usgn = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case NEON::BI__builtin_neon_vcvts_f32_s32:
case NEON::BI__builtin_neon_vcvtd_f64_s64: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
@@ -9866,7 +11432,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vcvth_f16_u32:
case NEON::BI__builtin_neon_vcvth_f16_u64:
usgn = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case NEON::BI__builtin_neon_vcvth_f16_s16:
case NEON::BI__builtin_neon_vcvth_f16_s32:
case NEON::BI__builtin_neon_vcvth_f16_s64: {
@@ -10086,7 +11652,10 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ops.push_back(EmitScalarExpr(E->getArg(1)));
Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
- Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
+ if (P == llvm::FCmpInst::FCMP_OEQ)
+ Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
+ else
+ Ops[0] = Builder.CreateFCmpS(P, Ops[0], Ops[1]);
return Builder.CreateSExt(Ops[0], Int64Ty, "vcmpd");
}
case NEON::BI__builtin_neon_vceqs_f32:
@@ -10106,7 +11675,10 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ops.push_back(EmitScalarExpr(E->getArg(1)));
Ops[0] = Builder.CreateBitCast(Ops[0], FloatTy);
Ops[1] = Builder.CreateBitCast(Ops[1], FloatTy);
- Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
+ if (P == llvm::FCmpInst::FCMP_OEQ)
+ Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
+ else
+ Ops[0] = Builder.CreateFCmpS(P, Ops[0], Ops[1]);
return Builder.CreateSExt(Ops[0], Int32Ty, "vcmpd");
}
case NEON::BI__builtin_neon_vceqh_f16:
@@ -10126,7 +11698,10 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ops.push_back(EmitScalarExpr(E->getArg(1)));
Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
Ops[1] = Builder.CreateBitCast(Ops[1], HalfTy);
- Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
+ if (P == llvm::FCmpInst::FCMP_OEQ)
+ Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
+ else
+ Ops[0] = Builder.CreateFCmpS(P, Ops[0], Ops[1]);
return Builder.CreateSExt(Ops[0], Int16Ty, "vcmpd");
}
case NEON::BI__builtin_neon_vceqd_s64:
@@ -10294,14 +11869,12 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
*this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, HalfTy,
{EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)), Ops[0]});
case NEON::BI__builtin_neon_vfmsh_f16: {
- // FIXME: This should be an fneg instruction:
- Value *Zero = llvm::ConstantFP::getZeroValueForNegation(HalfTy);
- Value* Sub = Builder.CreateFSub(Zero, EmitScalarExpr(E->getArg(1)), "vsubh");
+ Value* Neg = Builder.CreateFNeg(EmitScalarExpr(E->getArg(1)), "vsubh");
// NEON intrinsic puts accumulator first, unlike the LLVM fma.
return emitCallMaybeConstrainedFPBuiltin(
*this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, HalfTy,
- {Sub, EmitScalarExpr(E->getArg(2)), Ops[0]});
+ {Neg, EmitScalarExpr(E->getArg(2)), Ops[0]});
}
case NEON::BI__builtin_neon_vaddd_s64:
case NEON::BI__builtin_neon_vaddd_u64:
@@ -10471,13 +12044,13 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
"vgetq_lane");
}
- case AArch64::BI_InterlockedAdd: {
- Value *Arg0 = EmitScalarExpr(E->getArg(0));
- Value *Arg1 = EmitScalarExpr(E->getArg(1));
- AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
- AtomicRMWInst::Add, Arg0, Arg1,
- llvm::AtomicOrdering::SequentiallyConsistent);
- return Builder.CreateAdd(RMWI, Arg1);
+ case clang::AArch64::BI_InterlockedAdd: {
+ Address DestAddr = CheckAtomicAlignment(*this, E);
+ Value *Val = EmitScalarExpr(E->getArg(1));
+ AtomicRMWInst *RMWI =
+ Builder.CreateAtomicRMW(AtomicRMWInst::Add, DestAddr, Val,
+ llvm::AtomicOrdering::SequentiallyConsistent);
+ return Builder.CreateAdd(RMWI, Val);
}
}
@@ -10790,26 +12363,34 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
: Intrinsic::trunc;
return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndz");
}
- case NEON::BI__builtin_neon_vrnd32x_v:
- case NEON::BI__builtin_neon_vrnd32xq_v: {
+ case NEON::BI__builtin_neon_vrnd32x_f32:
+ case NEON::BI__builtin_neon_vrnd32xq_f32:
+ case NEON::BI__builtin_neon_vrnd32x_f64:
+ case NEON::BI__builtin_neon_vrnd32xq_f64: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Int = Intrinsic::aarch64_neon_frint32x;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd32x");
}
- case NEON::BI__builtin_neon_vrnd32z_v:
- case NEON::BI__builtin_neon_vrnd32zq_v: {
+ case NEON::BI__builtin_neon_vrnd32z_f32:
+ case NEON::BI__builtin_neon_vrnd32zq_f32:
+ case NEON::BI__builtin_neon_vrnd32z_f64:
+ case NEON::BI__builtin_neon_vrnd32zq_f64: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Int = Intrinsic::aarch64_neon_frint32z;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd32z");
}
- case NEON::BI__builtin_neon_vrnd64x_v:
- case NEON::BI__builtin_neon_vrnd64xq_v: {
+ case NEON::BI__builtin_neon_vrnd64x_f32:
+ case NEON::BI__builtin_neon_vrnd64xq_f32:
+ case NEON::BI__builtin_neon_vrnd64x_f64:
+ case NEON::BI__builtin_neon_vrnd64xq_f64: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Int = Intrinsic::aarch64_neon_frint64x;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd64x");
}
- case NEON::BI__builtin_neon_vrnd64z_v:
- case NEON::BI__builtin_neon_vrnd64zq_v: {
+ case NEON::BI__builtin_neon_vrnd64z_f32:
+ case NEON::BI__builtin_neon_vrnd64zq_f32:
+ case NEON::BI__builtin_neon_vrnd64z_f64:
+ case NEON::BI__builtin_neon_vrnd64zq_f64: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Int = Intrinsic::aarch64_neon_frint64z;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd64z");
@@ -10847,26 +12428,26 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vcvt_u32_v:
case NEON::BI__builtin_neon_vcvt_s64_v:
case NEON::BI__builtin_neon_vcvt_u64_v:
- case NEON::BI__builtin_neon_vcvt_s16_v:
- case NEON::BI__builtin_neon_vcvt_u16_v:
+ case NEON::BI__builtin_neon_vcvt_s16_f16:
+ case NEON::BI__builtin_neon_vcvt_u16_f16:
case NEON::BI__builtin_neon_vcvtq_s32_v:
case NEON::BI__builtin_neon_vcvtq_u32_v:
case NEON::BI__builtin_neon_vcvtq_s64_v:
case NEON::BI__builtin_neon_vcvtq_u64_v:
- case NEON::BI__builtin_neon_vcvtq_s16_v:
- case NEON::BI__builtin_neon_vcvtq_u16_v: {
+ case NEON::BI__builtin_neon_vcvtq_s16_f16:
+ case NEON::BI__builtin_neon_vcvtq_u16_f16: {
Int =
usgn ? Intrinsic::aarch64_neon_fcvtzu : Intrinsic::aarch64_neon_fcvtzs;
llvm::Type *Tys[2] = {Ty, GetFloatNeonType(this, Type)};
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtz");
}
- case NEON::BI__builtin_neon_vcvta_s16_v:
- case NEON::BI__builtin_neon_vcvta_u16_v:
+ case NEON::BI__builtin_neon_vcvta_s16_f16:
+ case NEON::BI__builtin_neon_vcvta_u16_f16:
case NEON::BI__builtin_neon_vcvta_s32_v:
- case NEON::BI__builtin_neon_vcvtaq_s16_v:
+ case NEON::BI__builtin_neon_vcvtaq_s16_f16:
case NEON::BI__builtin_neon_vcvtaq_s32_v:
case NEON::BI__builtin_neon_vcvta_u32_v:
- case NEON::BI__builtin_neon_vcvtaq_u16_v:
+ case NEON::BI__builtin_neon_vcvtaq_u16_f16:
case NEON::BI__builtin_neon_vcvtaq_u32_v:
case NEON::BI__builtin_neon_vcvta_s64_v:
case NEON::BI__builtin_neon_vcvtaq_s64_v:
@@ -10876,13 +12457,13 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvta");
}
- case NEON::BI__builtin_neon_vcvtm_s16_v:
+ case NEON::BI__builtin_neon_vcvtm_s16_f16:
case NEON::BI__builtin_neon_vcvtm_s32_v:
- case NEON::BI__builtin_neon_vcvtmq_s16_v:
+ case NEON::BI__builtin_neon_vcvtmq_s16_f16:
case NEON::BI__builtin_neon_vcvtmq_s32_v:
- case NEON::BI__builtin_neon_vcvtm_u16_v:
+ case NEON::BI__builtin_neon_vcvtm_u16_f16:
case NEON::BI__builtin_neon_vcvtm_u32_v:
- case NEON::BI__builtin_neon_vcvtmq_u16_v:
+ case NEON::BI__builtin_neon_vcvtmq_u16_f16:
case NEON::BI__builtin_neon_vcvtmq_u32_v:
case NEON::BI__builtin_neon_vcvtm_s64_v:
case NEON::BI__builtin_neon_vcvtmq_s64_v:
@@ -10892,13 +12473,13 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtm");
}
- case NEON::BI__builtin_neon_vcvtn_s16_v:
+ case NEON::BI__builtin_neon_vcvtn_s16_f16:
case NEON::BI__builtin_neon_vcvtn_s32_v:
- case NEON::BI__builtin_neon_vcvtnq_s16_v:
+ case NEON::BI__builtin_neon_vcvtnq_s16_f16:
case NEON::BI__builtin_neon_vcvtnq_s32_v:
- case NEON::BI__builtin_neon_vcvtn_u16_v:
+ case NEON::BI__builtin_neon_vcvtn_u16_f16:
case NEON::BI__builtin_neon_vcvtn_u32_v:
- case NEON::BI__builtin_neon_vcvtnq_u16_v:
+ case NEON::BI__builtin_neon_vcvtnq_u16_f16:
case NEON::BI__builtin_neon_vcvtnq_u32_v:
case NEON::BI__builtin_neon_vcvtn_s64_v:
case NEON::BI__builtin_neon_vcvtnq_s64_v:
@@ -10908,13 +12489,13 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtn");
}
- case NEON::BI__builtin_neon_vcvtp_s16_v:
+ case NEON::BI__builtin_neon_vcvtp_s16_f16:
case NEON::BI__builtin_neon_vcvtp_s32_v:
- case NEON::BI__builtin_neon_vcvtpq_s16_v:
+ case NEON::BI__builtin_neon_vcvtpq_s16_f16:
case NEON::BI__builtin_neon_vcvtpq_s32_v:
- case NEON::BI__builtin_neon_vcvtp_u16_v:
+ case NEON::BI__builtin_neon_vcvtp_u16_f16:
case NEON::BI__builtin_neon_vcvtp_u32_v:
- case NEON::BI__builtin_neon_vcvtpq_u16_v:
+ case NEON::BI__builtin_neon_vcvtpq_u16_f16:
case NEON::BI__builtin_neon_vcvtpq_u32_v:
case NEON::BI__builtin_neon_vcvtp_s64_v:
case NEON::BI__builtin_neon_vcvtpq_s64_v:
@@ -10990,7 +12571,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vaddv_u8:
// FIXME: These are handled by the AArch64 scalar code.
usgn = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case NEON::BI__builtin_neon_vaddv_s8: {
Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
Ty = Int32Ty;
@@ -11002,7 +12583,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
}
case NEON::BI__builtin_neon_vaddv_u16:
usgn = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case NEON::BI__builtin_neon_vaddv_s16: {
Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
Ty = Int32Ty;
@@ -11014,7 +12595,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
}
case NEON::BI__builtin_neon_vaddvq_u8:
usgn = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case NEON::BI__builtin_neon_vaddvq_s8: {
Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
Ty = Int32Ty;
@@ -11026,7 +12607,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
}
case NEON::BI__builtin_neon_vaddvq_u16:
usgn = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case NEON::BI__builtin_neon_vaddvq_s16: {
Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
Ty = Int32Ty;
@@ -11355,28 +12936,31 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
}
case NEON::BI__builtin_neon_vld1_v:
case NEON::BI__builtin_neon_vld1q_v: {
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
return Builder.CreateAlignedLoad(VTy, Ops[0], PtrOp0.getAlignment());
}
case NEON::BI__builtin_neon_vst1_v:
case NEON::BI__builtin_neon_vst1q_v:
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
return Builder.CreateAlignedStore(Ops[1], Ops[0], PtrOp0.getAlignment());
case NEON::BI__builtin_neon_vld1_lane_v:
case NEON::BI__builtin_neon_vld1q_lane_v: {
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
- Ty = llvm::PointerType::getUnqual(VTy->getElementType());
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ops[0] = Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0],
PtrOp0.getAlignment());
return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vld1_lane");
}
+ case NEON::BI__builtin_neon_vldap1_lane_s64:
+ case NEON::BI__builtin_neon_vldap1q_lane_s64: {
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ llvm::LoadInst *LI = Builder.CreateAlignedLoad(
+ VTy->getElementType(), Ops[0], PtrOp0.getAlignment());
+ LI->setAtomic(llvm::AtomicOrdering::Acquire);
+ Ops[0] = LI;
+ return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vldap1_lane");
+ }
case NEON::BI__builtin_neon_vld1_dup_v:
case NEON::BI__builtin_neon_vld1q_dup_v: {
- Value *V = UndefValue::get(Ty);
- Ty = llvm::PointerType::getUnqual(VTy->getElementType());
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Value *V = PoisonValue::get(Ty);
Ops[0] = Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0],
PtrOp0.getAlignment());
llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
@@ -11387,76 +12971,56 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vst1q_lane_v:
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
- Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
- return Builder.CreateAlignedStore(Ops[1], Builder.CreateBitCast(Ops[0], Ty),
- PtrOp0.getAlignment());
+ return Builder.CreateAlignedStore(Ops[1], Ops[0], PtrOp0.getAlignment());
+ case NEON::BI__builtin_neon_vstl1_lane_s64:
+ case NEON::BI__builtin_neon_vstl1q_lane_s64: {
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
+ llvm::StoreInst *SI =
+ Builder.CreateAlignedStore(Ops[1], Ops[0], PtrOp0.getAlignment());
+ SI->setAtomic(llvm::AtomicOrdering::Release);
+ return SI;
+ }
case NEON::BI__builtin_neon_vld2_v:
case NEON::BI__builtin_neon_vld2q_v: {
- llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
- Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
- llvm::Type *Tys[2] = { VTy, PTy };
+ llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
- Ops[0] = Builder.CreateBitCast(Ops[0],
- llvm::PointerType::getUnqual(Ops[1]->getType()));
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld3_v:
case NEON::BI__builtin_neon_vld3q_v: {
- llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
- Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
- llvm::Type *Tys[2] = { VTy, PTy };
+ llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
- Ops[0] = Builder.CreateBitCast(Ops[0],
- llvm::PointerType::getUnqual(Ops[1]->getType()));
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld4_v:
case NEON::BI__builtin_neon_vld4q_v: {
- llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
- Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
- llvm::Type *Tys[2] = { VTy, PTy };
+ llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
- Ops[0] = Builder.CreateBitCast(Ops[0],
- llvm::PointerType::getUnqual(Ops[1]->getType()));
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld2_dup_v:
case NEON::BI__builtin_neon_vld2q_dup_v: {
- llvm::Type *PTy =
- llvm::PointerType::getUnqual(VTy->getElementType());
- Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
- llvm::Type *Tys[2] = { VTy, PTy };
+ llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2r, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
- Ops[0] = Builder.CreateBitCast(Ops[0],
- llvm::PointerType::getUnqual(Ops[1]->getType()));
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld3_dup_v:
case NEON::BI__builtin_neon_vld3q_dup_v: {
- llvm::Type *PTy =
- llvm::PointerType::getUnqual(VTy->getElementType());
- Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
- llvm::Type *Tys[2] = { VTy, PTy };
+ llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3r, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
- Ops[0] = Builder.CreateBitCast(Ops[0],
- llvm::PointerType::getUnqual(Ops[1]->getType()));
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld4_dup_v:
case NEON::BI__builtin_neon_vld4q_dup_v: {
- llvm::Type *PTy =
- llvm::PointerType::getUnqual(VTy->getElementType());
- Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
- llvm::Type *Tys[2] = { VTy, PTy };
+ llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4r, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
- Ops[0] = Builder.CreateBitCast(Ops[0],
- llvm::PointerType::getUnqual(Ops[1]->getType()));
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld2_lane_v:
@@ -11467,9 +13031,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty);
- Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld2_lane");
- Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = Builder.CreateCall(F, ArrayRef(Ops).slice(1), "vld2_lane");
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld3_lane_v:
@@ -11481,9 +13043,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
Ops[4] = Builder.CreateZExt(Ops[4], Int64Ty);
- Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane");
- Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = Builder.CreateCall(F, ArrayRef(Ops).slice(1), "vld3_lane");
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld4_lane_v:
@@ -11496,9 +13056,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
Ops[5] = Builder.CreateZExt(Ops[5], Int64Ty);
- Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld4_lane");
- Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = Builder.CreateCall(F, ArrayRef(Ops).slice(1), "vld4_lane");
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vst2_v:
@@ -11548,7 +13106,6 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
}
case NEON::BI__builtin_neon_vtrn_v:
case NEON::BI__builtin_neon_vtrnq_v: {
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Value *SV = nullptr;
@@ -11567,7 +13124,6 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
}
case NEON::BI__builtin_neon_vuzp_v:
case NEON::BI__builtin_neon_vuzpq_v: {
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Value *SV = nullptr;
@@ -11585,7 +13141,6 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
}
case NEON::BI__builtin_neon_vzip_v:
case NEON::BI__builtin_neon_vzipq_v: {
- Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Value *SV = nullptr;
@@ -11656,7 +13211,7 @@ Value *CodeGenFunction::EmitBPFBuiltinExpr(unsigned BuiltinID,
"unexpected BPF builtin");
// A sequence number, injected into IR builtin functions, to
- // prevent CSE given the only difference of the funciton
+ // prevent CSE given the only difference of the function
// may just be the debuginfo metadata.
static uint32_t BuiltinSeqNum;
@@ -11732,7 +13287,7 @@ Value *CodeGenFunction::EmitBPFBuiltinExpr(unsigned BuiltinID,
const auto *DR = cast<DeclRefExpr>(CE->getSubExpr());
const auto *Enumerator = cast<EnumConstantDecl>(DR->getDecl());
- auto &InitVal = Enumerator->getInitVal();
+ auto InitVal = Enumerator->getInitVal();
std::string InitValStr;
if (InitVal.isNegative() || InitVal > uint64_t(INT64_MAX))
InitValStr = std::to_string(InitVal.getSExtValue());
@@ -11772,11 +13327,11 @@ BuildVector(ArrayRef<llvm::Value*> Ops) {
}
// Otherwise, insertelement the values to build the vector.
- Value *Result = llvm::UndefValue::get(
+ Value *Result = llvm::PoisonValue::get(
llvm::FixedVectorType::get(Ops[0]->getType(), Ops.size()));
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
- Result = Builder.CreateInsertElement(Result, Ops[i], Builder.getInt32(i));
+ Result = Builder.CreateInsertElement(Result, Ops[i], Builder.getInt64(i));
return Result;
}
@@ -11796,18 +13351,15 @@ static Value *getMaskVecValue(CodeGenFunction &CGF, Value *Mask,
int Indices[4];
for (unsigned i = 0; i != NumElts; ++i)
Indices[i] = i;
- MaskVec = CGF.Builder.CreateShuffleVector(MaskVec, MaskVec,
- makeArrayRef(Indices, NumElts),
- "extract");
+ MaskVec = CGF.Builder.CreateShuffleVector(
+ MaskVec, MaskVec, ArrayRef(Indices, NumElts), "extract");
}
return MaskVec;
}
static Value *EmitX86MaskedStore(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
Align Alignment) {
- // Cast the pointer to right type.
- Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
- llvm::PointerType::getUnqual(Ops[1]->getType()));
+ Value *Ptr = Ops[0];
Value *MaskVec = getMaskVecValue(
CGF, Ops[2],
@@ -11818,10 +13370,8 @@ static Value *EmitX86MaskedStore(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
static Value *EmitX86MaskedLoad(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
Align Alignment) {
- // Cast the pointer to right type.
llvm::Type *Ty = Ops[1]->getType();
- Value *Ptr =
- CGF.Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
+ Value *Ptr = Ops[0];
Value *MaskVec = getMaskVecValue(
CGF, Ops[2], cast<llvm::FixedVectorType>(Ty)->getNumElements());
@@ -11832,11 +13382,7 @@ static Value *EmitX86MaskedLoad(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
static Value *EmitX86ExpandLoad(CodeGenFunction &CGF,
ArrayRef<Value *> Ops) {
auto *ResultTy = cast<llvm::VectorType>(Ops[1]->getType());
- llvm::Type *PtrTy = ResultTy->getElementType();
-
- // Cast the pointer to element type.
- Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
- llvm::PointerType::getUnqual(PtrTy));
+ Value *Ptr = Ops[0];
Value *MaskVec = getMaskVecValue(
CGF, Ops[2], cast<FixedVectorType>(ResultTy)->getNumElements());
@@ -11862,11 +13408,7 @@ static Value *EmitX86CompressExpand(CodeGenFunction &CGF,
static Value *EmitX86CompressStore(CodeGenFunction &CGF,
ArrayRef<Value *> Ops) {
auto *ResultTy = cast<llvm::FixedVectorType>(Ops[1]->getType());
- llvm::Type *PtrTy = ResultTy->getElementType();
-
- // Cast the pointer to element type.
- Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
- llvm::PointerType::getUnqual(PtrTy));
+ Value *Ptr = Ops[0];
Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements());
@@ -12067,23 +13609,39 @@ static Value *EmitX86FMAExpr(CodeGenFunction &CGF, const CallExpr *E,
Intrinsic::ID IID = Intrinsic::not_intrinsic;
switch (BuiltinID) {
default: break;
+ case clang::X86::BI__builtin_ia32_vfmsubph512_mask3:
+ Subtract = true;
+ [[fallthrough]];
+ case clang::X86::BI__builtin_ia32_vfmaddph512_mask:
+ case clang::X86::BI__builtin_ia32_vfmaddph512_maskz:
+ case clang::X86::BI__builtin_ia32_vfmaddph512_mask3:
+ IID = llvm::Intrinsic::x86_avx512fp16_vfmadd_ph_512;
+ break;
+ case clang::X86::BI__builtin_ia32_vfmsubaddph512_mask3:
+ Subtract = true;
+ [[fallthrough]];
+ case clang::X86::BI__builtin_ia32_vfmaddsubph512_mask:
+ case clang::X86::BI__builtin_ia32_vfmaddsubph512_maskz:
+ case clang::X86::BI__builtin_ia32_vfmaddsubph512_mask3:
+ IID = llvm::Intrinsic::x86_avx512fp16_vfmaddsub_ph_512;
+ break;
case clang::X86::BI__builtin_ia32_vfmsubps512_mask3:
Subtract = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case clang::X86::BI__builtin_ia32_vfmaddps512_mask:
case clang::X86::BI__builtin_ia32_vfmaddps512_maskz:
case clang::X86::BI__builtin_ia32_vfmaddps512_mask3:
IID = llvm::Intrinsic::x86_avx512_vfmadd_ps_512; break;
case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3:
Subtract = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case clang::X86::BI__builtin_ia32_vfmaddpd512_mask:
case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz:
case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3:
IID = llvm::Intrinsic::x86_avx512_vfmadd_pd_512; break;
case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3:
Subtract = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask:
case clang::X86::BI__builtin_ia32_vfmaddsubps512_maskz:
case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask3:
@@ -12091,7 +13649,7 @@ static Value *EmitX86FMAExpr(CodeGenFunction &CGF, const CallExpr *E,
break;
case clang::X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
Subtract = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask:
case clang::X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
@@ -12130,22 +13688,30 @@ static Value *EmitX86FMAExpr(CodeGenFunction &CGF, const CallExpr *E,
// Handle any required masking.
Value *MaskFalseVal = nullptr;
switch (BuiltinID) {
+ case clang::X86::BI__builtin_ia32_vfmaddph512_mask:
case clang::X86::BI__builtin_ia32_vfmaddps512_mask:
case clang::X86::BI__builtin_ia32_vfmaddpd512_mask:
+ case clang::X86::BI__builtin_ia32_vfmaddsubph512_mask:
case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask:
case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask:
MaskFalseVal = Ops[0];
break;
+ case clang::X86::BI__builtin_ia32_vfmaddph512_maskz:
case clang::X86::BI__builtin_ia32_vfmaddps512_maskz:
case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz:
+ case clang::X86::BI__builtin_ia32_vfmaddsubph512_maskz:
case clang::X86::BI__builtin_ia32_vfmaddsubps512_maskz:
case clang::X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
MaskFalseVal = Constant::getNullValue(Ops[0]->getType());
break;
+ case clang::X86::BI__builtin_ia32_vfmsubph512_mask3:
+ case clang::X86::BI__builtin_ia32_vfmaddph512_mask3:
case clang::X86::BI__builtin_ia32_vfmsubps512_mask3:
case clang::X86::BI__builtin_ia32_vfmaddps512_mask3:
case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3:
case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3:
+ case clang::X86::BI__builtin_ia32_vfmsubaddph512_mask3:
+ case clang::X86::BI__builtin_ia32_vfmaddsubph512_mask3:
case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3:
case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask3:
case clang::X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
@@ -12176,9 +13742,21 @@ static Value *EmitScalarFMAExpr(CodeGenFunction &CGF, const CallExpr *E,
Ops[2] = CGF.Builder.CreateExtractElement(Ops[2], (uint64_t)0);
Value *Res;
if (Rnd != 4) {
- Intrinsic::ID IID = Ops[0]->getType()->getPrimitiveSizeInBits() == 32 ?
- Intrinsic::x86_avx512_vfmadd_f32 :
- Intrinsic::x86_avx512_vfmadd_f64;
+ Intrinsic::ID IID;
+
+ switch (Ops[0]->getType()->getPrimitiveSizeInBits()) {
+ case 16:
+ IID = Intrinsic::x86_avx512fp16_vfmadd_f16;
+ break;
+ case 32:
+ IID = Intrinsic::x86_avx512_vfmadd_f32;
+ break;
+ case 64:
+ IID = Intrinsic::x86_avx512_vfmadd_f64;
+ break;
+ default:
+ llvm_unreachable("Unexpected size");
+ }
Res = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID),
{Ops[0], Ops[1], Ops[2], Ops[4]});
} else if (CGF.Builder.getIsFPConstrained()) {
@@ -12271,13 +13849,6 @@ static Value *EmitX86SExtMask(CodeGenFunction &CGF, Value *Op,
return CGF.Builder.CreateSExt(Mask, DstTy, "vpmovm2");
}
-// Emit binary intrinsic with the same type used in result/args.
-static Value *EmitX86BinaryIntrinsic(CodeGenFunction &CGF,
- ArrayRef<Value *> Ops, Intrinsic::ID IID) {
- llvm::Function *F = CGF.CGM.getIntrinsic(IID, Ops[0]->getType());
- return CGF.Builder.CreateCall(F, {Ops[0], Ops[1]});
-}
-
Value *CodeGenFunction::EmitX86CpuIs(const CallExpr *E) {
const Expr *CPUExpr = E->getArg(0)->IgnoreParenCasts();
StringRef CPUStr = cast<clang::StringLiteral>(CPUExpr)->getString();
@@ -12321,18 +13892,6 @@ static Value *EmitX86CvtF16ToFloatExpr(CodeGenFunction &CGF,
return Res;
}
-// Convert a BF16 to a float.
-static Value *EmitX86CvtBF16ToFloatExpr(CodeGenFunction &CGF,
- const CallExpr *E,
- ArrayRef<Value *> Ops) {
- llvm::Type *Int32Ty = CGF.Builder.getInt32Ty();
- Value *ZeroExt = CGF.Builder.CreateZExt(Ops[0], Int32Ty);
- Value *Shl = CGF.Builder.CreateShl(ZeroExt, 16);
- llvm::Type *ResultType = CGF.ConvertType(E->getType());
- Value *BitCast = CGF.Builder.CreateBitCast(Shl, ResultType);
- return BitCast;
-}
-
Value *CodeGenFunction::EmitX86CpuIs(StringRef CPUStr) {
llvm::Type *Int32Ty = Builder.getInt32Ty();
@@ -12361,9 +13920,11 @@ Value *CodeGenFunction::EmitX86CpuIs(StringRef CPUStr) {
.Case(ALIAS, {1u, static_cast<unsigned>(llvm::X86::ENUM)})
#define X86_CPU_TYPE(ENUM, STR) \
.Case(STR, {1u, static_cast<unsigned>(llvm::X86::ENUM)})
+#define X86_CPU_SUBTYPE_ALIAS(ENUM, ALIAS) \
+ .Case(ALIAS, {2u, static_cast<unsigned>(llvm::X86::ENUM)})
#define X86_CPU_SUBTYPE(ENUM, STR) \
.Case(STR, {2u, static_cast<unsigned>(llvm::X86::ENUM)})
-#include "llvm/Support/X86TargetParser.def"
+#include "llvm/TargetParser/X86TargetParser.def"
.Default({0, 0});
assert(Value != 0 && "Invalid CPUStr passed to CpuIs");
@@ -12385,32 +13946,14 @@ Value *CodeGenFunction::EmitX86CpuSupports(const CallExpr *E) {
return EmitX86CpuSupports(FeatureStr);
}
-uint64_t
-CodeGenFunction::GetX86CpuSupportsMask(ArrayRef<StringRef> FeatureStrs) {
- // Processor features and mapping to processor feature value.
- uint64_t FeaturesMask = 0;
- for (const StringRef &FeatureStr : FeatureStrs) {
- unsigned Feature =
- StringSwitch<unsigned>(FeatureStr)
-#define X86_FEATURE_COMPAT(ENUM, STR) .Case(STR, llvm::X86::FEATURE_##ENUM)
-#include "llvm/Support/X86TargetParser.def"
- ;
- FeaturesMask |= (1ULL << Feature);
- }
- return FeaturesMask;
-}
-
Value *CodeGenFunction::EmitX86CpuSupports(ArrayRef<StringRef> FeatureStrs) {
- return EmitX86CpuSupports(GetX86CpuSupportsMask(FeatureStrs));
+ return EmitX86CpuSupports(llvm::X86::getCpuSupportsMask(FeatureStrs));
}
-llvm::Value *CodeGenFunction::EmitX86CpuSupports(uint64_t FeaturesMask) {
- uint32_t Features1 = Lo_32(FeaturesMask);
- uint32_t Features2 = Hi_32(FeaturesMask);
-
+llvm::Value *
+CodeGenFunction::EmitX86CpuSupports(std::array<uint32_t, 4> FeatureMask) {
Value *Result = Builder.getTrue();
-
- if (Features1 != 0) {
+ if (FeatureMask[0] != 0) {
// Matching the struct layout from the compiler-rt/libgcc structure that is
// filled in:
// unsigned int __cpu_vendor;
@@ -12433,22 +13976,26 @@ llvm::Value *CodeGenFunction::EmitX86CpuSupports(uint64_t FeaturesMask) {
CharUnits::fromQuantity(4));
// Check the value of the bit corresponding to the feature requested.
- Value *Mask = Builder.getInt32(Features1);
+ Value *Mask = Builder.getInt32(FeatureMask[0]);
Value *Bitset = Builder.CreateAnd(Features, Mask);
Value *Cmp = Builder.CreateICmpEQ(Bitset, Mask);
Result = Builder.CreateAnd(Result, Cmp);
}
- if (Features2 != 0) {
- llvm::Constant *CpuFeatures2 = CGM.CreateRuntimeVariable(Int32Ty,
- "__cpu_features2");
- cast<llvm::GlobalValue>(CpuFeatures2)->setDSOLocal(true);
-
- Value *Features = Builder.CreateAlignedLoad(Int32Ty, CpuFeatures2,
- CharUnits::fromQuantity(4));
-
+ llvm::Type *ATy = llvm::ArrayType::get(Int32Ty, 3);
+ llvm::Constant *CpuFeatures2 =
+ CGM.CreateRuntimeVariable(ATy, "__cpu_features2");
+ cast<llvm::GlobalValue>(CpuFeatures2)->setDSOLocal(true);
+ for (int i = 1; i != 4; ++i) {
+ const uint32_t M = FeatureMask[i];
+ if (!M)
+ continue;
+ Value *Idxs[] = {Builder.getInt32(0), Builder.getInt32(i - 1)};
+ Value *Features = Builder.CreateAlignedLoad(
+ Int32Ty, Builder.CreateGEP(ATy, CpuFeatures2, Idxs),
+ CharUnits::fromQuantity(4));
// Check the value of the bit corresponding to the feature requested.
- Value *Mask = Builder.getInt32(Features2);
+ Value *Mask = Builder.getInt32(M);
Value *Bitset = Builder.CreateAnd(Features, Mask);
Value *Cmp = Builder.CreateICmpEQ(Bitset, Mask);
Result = Builder.CreateAnd(Result, Cmp);
@@ -12457,6 +14004,16 @@ llvm::Value *CodeGenFunction::EmitX86CpuSupports(uint64_t FeaturesMask) {
return Result;
}
+Value *CodeGenFunction::EmitAArch64CpuInit() {
+ llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
+ llvm::FunctionCallee Func =
+ CGM.CreateRuntimeFunction(FTy, "__init_cpu_features_resolver");
+ cast<llvm::GlobalValue>(Func.getCallee())->setDSOLocal(true);
+ cast<llvm::GlobalValue>(Func.getCallee())
+ ->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
+ return Builder.CreateCall(Func);
+}
+
Value *CodeGenFunction::EmitX86CpuInit() {
llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy,
/*Variadic*/ false);
@@ -12468,6 +14025,32 @@ Value *CodeGenFunction::EmitX86CpuInit() {
return Builder.CreateCall(Func);
}
+llvm::Value *
+CodeGenFunction::EmitAArch64CpuSupports(ArrayRef<StringRef> FeaturesStrs) {
+ uint64_t FeaturesMask = llvm::AArch64::getCpuSupportsMask(FeaturesStrs);
+ Value *Result = Builder.getTrue();
+ if (FeaturesMask != 0) {
+ // Get features from structure in runtime library
+ // struct {
+ // unsigned long long features;
+ // } __aarch64_cpu_features;
+ llvm::Type *STy = llvm::StructType::get(Int64Ty);
+ llvm::Constant *AArch64CPUFeatures =
+ CGM.CreateRuntimeVariable(STy, "__aarch64_cpu_features");
+ cast<llvm::GlobalValue>(AArch64CPUFeatures)->setDSOLocal(true);
+ llvm::Value *CpuFeatures = Builder.CreateGEP(
+ STy, AArch64CPUFeatures,
+ {ConstantInt::get(Int32Ty, 0), ConstantInt::get(Int32Ty, 0)});
+ Value *Features = Builder.CreateAlignedLoad(Int64Ty, CpuFeatures,
+ CharUnits::fromQuantity(8));
+ Value *Mask = Builder.getInt64(FeaturesMask);
+ Value *Bitset = Builder.CreateAnd(Features, Mask);
+ Value *Cmp = Builder.CreateICmpEQ(Bitset, Mask);
+ Result = Builder.CreateAnd(Result, Cmp);
+ }
+ return Result;
+}
+
Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
if (BuiltinID == X86::BI__builtin_cpu_is)
@@ -12479,11 +14062,12 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// Handle MSVC intrinsics before argument evaluation to prevent double
// evaluation.
- if (Optional<MSVCIntrin> MsvcIntId = translateX86ToMsvcIntrin(BuiltinID))
+ if (std::optional<MSVCIntrin> MsvcIntId = translateX86ToMsvcIntrin(BuiltinID))
return EmitMSVCBuiltinExpr(*MsvcIntId, E);
SmallVector<Value*, 4> Ops;
bool IsMaskFCmp = false;
+ bool IsConjFMA = false;
// Find out if any arguments are required to be integer constant expressions.
unsigned ICEArguments = 0;
@@ -12492,16 +14076,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
assert(Error == ASTContext::GE_None && "Should not codegen an error");
for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
- // If this is a normal argument, just emit it as a scalar.
- if ((ICEArguments & (1 << i)) == 0) {
- Ops.push_back(EmitScalarExpr(E->getArg(i)));
- continue;
- }
-
- // If this is required to be a constant, constant fold it so that we know
- // that the generated intrinsic gets a ConstantInt.
- Ops.push_back(llvm::ConstantInt::get(
- getLLVMContext(), *E->getArg(i)->getIntegerConstantExpr(getContext())));
+ Ops.push_back(EmitScalarOrConstFoldImmArg(ICEArguments, i, E));
}
// These exist so that the builtin that takes an immediate can be bounds
@@ -12636,13 +14211,13 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Address Tmp = CreateMemTemp(E->getArg(0)->getType());
Builder.CreateStore(Ops[0], Tmp);
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr),
- Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy));
+ Tmp.getPointer());
}
case X86::BI_mm_getcsr:
case X86::BI__builtin_ia32_stmxcsr: {
Address Tmp = CreateMemTemp(E->getType());
Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
- Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy));
+ Tmp.getPointer());
return Builder.CreateLoad(Tmp, "stmxcsr");
}
case X86::BI__builtin_ia32_xsave:
@@ -12714,6 +14289,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_storeups512_mask:
return EmitX86MaskedStore(*this, Ops, Align(1));
+ case X86::BI__builtin_ia32_storesh128_mask:
case X86::BI__builtin_ia32_storess128_mask:
case X86::BI__builtin_ia32_storesd128_mask:
return EmitX86MaskedStore(*this, Ops, Align(1));
@@ -12765,14 +14341,21 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_cvtdq2ps512_mask:
case X86::BI__builtin_ia32_cvtqq2ps512_mask:
case X86::BI__builtin_ia32_cvtqq2pd512_mask:
+ case X86::BI__builtin_ia32_vcvtw2ph512_mask:
+ case X86::BI__builtin_ia32_vcvtdq2ph512_mask:
+ case X86::BI__builtin_ia32_vcvtqq2ph512_mask:
return EmitX86ConvertIntToFp(*this, E, Ops, /*IsSigned*/ true);
case X86::BI__builtin_ia32_cvtudq2ps512_mask:
case X86::BI__builtin_ia32_cvtuqq2ps512_mask:
case X86::BI__builtin_ia32_cvtuqq2pd512_mask:
+ case X86::BI__builtin_ia32_vcvtuw2ph512_mask:
+ case X86::BI__builtin_ia32_vcvtudq2ph512_mask:
+ case X86::BI__builtin_ia32_vcvtuqq2ph512_mask:
return EmitX86ConvertIntToFp(*this, E, Ops, /*IsSigned*/ false);
case X86::BI__builtin_ia32_vfmaddss3:
case X86::BI__builtin_ia32_vfmaddsd3:
+ case X86::BI__builtin_ia32_vfmaddsh3_mask:
case X86::BI__builtin_ia32_vfmaddss3_mask:
case X86::BI__builtin_ia32_vfmaddsd3_mask:
return EmitScalarFMAExpr(*this, E, Ops, Ops[0]);
@@ -12780,20 +14363,28 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_vfmaddsd:
return EmitScalarFMAExpr(*this, E, Ops,
Constant::getNullValue(Ops[0]->getType()));
+ case X86::BI__builtin_ia32_vfmaddsh3_maskz:
case X86::BI__builtin_ia32_vfmaddss3_maskz:
case X86::BI__builtin_ia32_vfmaddsd3_maskz:
return EmitScalarFMAExpr(*this, E, Ops, Ops[0], /*ZeroMask*/ true);
+ case X86::BI__builtin_ia32_vfmaddsh3_mask3:
case X86::BI__builtin_ia32_vfmaddss3_mask3:
case X86::BI__builtin_ia32_vfmaddsd3_mask3:
return EmitScalarFMAExpr(*this, E, Ops, Ops[2], /*ZeroMask*/ false, 2);
+ case X86::BI__builtin_ia32_vfmsubsh3_mask3:
case X86::BI__builtin_ia32_vfmsubss3_mask3:
case X86::BI__builtin_ia32_vfmsubsd3_mask3:
return EmitScalarFMAExpr(*this, E, Ops, Ops[2], /*ZeroMask*/ false, 2,
/*NegAcc*/ true);
+ case X86::BI__builtin_ia32_vfmaddph:
case X86::BI__builtin_ia32_vfmaddps:
case X86::BI__builtin_ia32_vfmaddpd:
+ case X86::BI__builtin_ia32_vfmaddph256:
case X86::BI__builtin_ia32_vfmaddps256:
case X86::BI__builtin_ia32_vfmaddpd256:
+ case X86::BI__builtin_ia32_vfmaddph512_mask:
+ case X86::BI__builtin_ia32_vfmaddph512_maskz:
+ case X86::BI__builtin_ia32_vfmaddph512_mask3:
case X86::BI__builtin_ia32_vfmaddps512_mask:
case X86::BI__builtin_ia32_vfmaddps512_maskz:
case X86::BI__builtin_ia32_vfmaddps512_mask3:
@@ -12802,7 +14393,12 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_vfmaddpd512_maskz:
case X86::BI__builtin_ia32_vfmaddpd512_mask3:
case X86::BI__builtin_ia32_vfmsubpd512_mask3:
+ case X86::BI__builtin_ia32_vfmsubph512_mask3:
return EmitX86FMAExpr(*this, E, Ops, BuiltinID, /*IsAddSub*/ false);
+ case X86::BI__builtin_ia32_vfmaddsubph512_mask:
+ case X86::BI__builtin_ia32_vfmaddsubph512_maskz:
+ case X86::BI__builtin_ia32_vfmaddsubph512_mask3:
+ case X86::BI__builtin_ia32_vfmsubaddph512_mask3:
case X86::BI__builtin_ia32_vfmaddsubps512_mask:
case X86::BI__builtin_ia32_vfmaddsubps512_maskz:
case X86::BI__builtin_ia32_vfmaddsubps512_mask3:
@@ -12849,6 +14445,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_loaddqudi512_mask:
return EmitX86MaskedLoad(*this, Ops, Align(1));
+ case X86::BI__builtin_ia32_loadsh128_mask:
case X86::BI__builtin_ia32_loadss128_mask:
case X86::BI__builtin_ia32_loadsd128_mask:
return EmitX86MaskedLoad(*this, Ops, Align(1));
@@ -13197,8 +14794,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
for (unsigned i = 0; i != NumElts; ++i)
Indices[i] = i + Index;
- Value *Res = Builder.CreateShuffleVector(Ops[0],
- makeArrayRef(Indices, NumElts),
+ Value *Res = Builder.CreateShuffleVector(Ops[0], ArrayRef(Indices, NumElts),
"extract");
if (Ops.size() == 4)
@@ -13236,9 +14832,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
for (unsigned i = 0; i != DstNumElts; ++i)
Indices[i] = (i >= SrcNumElts) ? SrcNumElts + (i % SrcNumElts) : i;
- Value *Op1 = Builder.CreateShuffleVector(Ops[1],
- makeArrayRef(Indices, DstNumElts),
- "widen");
+ Value *Op1 = Builder.CreateShuffleVector(
+ Ops[1], ArrayRef(Indices, DstNumElts), "widen");
for (unsigned i = 0; i != DstNumElts; ++i) {
if (i >= Index && i < (Index + SrcNumElts))
@@ -13248,8 +14843,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
}
return Builder.CreateShuffleVector(Ops[0], Op1,
- makeArrayRef(Indices, DstNumElts),
- "insert");
+ ArrayRef(Indices, DstNumElts), "insert");
}
case X86::BI__builtin_ia32_pmovqd512_mask:
case X86::BI__builtin_ia32_pmovwb512_mask: {
@@ -13299,8 +14893,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Indices[i] = ((Imm >> (i % 8)) & 0x1) ? NumElts + i : i;
return Builder.CreateShuffleVector(Ops[0], Ops[1],
- makeArrayRef(Indices, NumElts),
- "blend");
+ ArrayRef(Indices, NumElts), "blend");
}
case X86::BI__builtin_ia32_pshuflw:
case X86::BI__builtin_ia32_pshuflw256:
@@ -13322,7 +14915,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Indices[l + i] = l + i;
}
- return Builder.CreateShuffleVector(Ops[0], makeArrayRef(Indices, NumElts),
+ return Builder.CreateShuffleVector(Ops[0], ArrayRef(Indices, NumElts),
"pshuflw");
}
case X86::BI__builtin_ia32_pshufhw:
@@ -13345,7 +14938,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
}
}
- return Builder.CreateShuffleVector(Ops[0], makeArrayRef(Indices, NumElts),
+ return Builder.CreateShuffleVector(Ops[0], ArrayRef(Indices, NumElts),
"pshufhw");
}
case X86::BI__builtin_ia32_pshufd:
@@ -13374,7 +14967,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
}
}
- return Builder.CreateShuffleVector(Ops[0], makeArrayRef(Indices, NumElts),
+ return Builder.CreateShuffleVector(Ops[0], ArrayRef(Indices, NumElts),
"permil");
}
case X86::BI__builtin_ia32_shufpd:
@@ -13404,8 +14997,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
}
return Builder.CreateShuffleVector(Ops[0], Ops[1],
- makeArrayRef(Indices, NumElts),
- "shufp");
+ ArrayRef(Indices, NumElts), "shufp");
}
case X86::BI__builtin_ia32_permdi256:
case X86::BI__builtin_ia32_permdf256:
@@ -13421,7 +15013,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
for (unsigned i = 0; i != 4; ++i)
Indices[l + i] = l + ((Imm >> (2 * i)) & 0x3);
- return Builder.CreateShuffleVector(Ops[0], makeArrayRef(Indices, NumElts),
+ return Builder.CreateShuffleVector(Ops[0], ArrayRef(Indices, NumElts),
"perm");
}
case X86::BI__builtin_ia32_palignr128:
@@ -13458,8 +15050,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
}
return Builder.CreateShuffleVector(Ops[1], Ops[0],
- makeArrayRef(Indices, NumElts),
- "palignr");
+ ArrayRef(Indices, NumElts), "palignr");
}
case X86::BI__builtin_ia32_alignd128:
case X86::BI__builtin_ia32_alignd256:
@@ -13479,8 +15070,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Indices[i] = i + ShiftVal;
return Builder.CreateShuffleVector(Ops[1], Ops[0],
- makeArrayRef(Indices, NumElts),
- "valign");
+ ArrayRef(Indices, NumElts), "valign");
}
case X86::BI__builtin_ia32_shuf_f32x4_256:
case X86::BI__builtin_ia32_shuf_f64x2_256:
@@ -13508,8 +15098,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
}
return Builder.CreateShuffleVector(Ops[0], Ops[1],
- makeArrayRef(Indices, NumElts),
- "shuf");
+ ArrayRef(Indices, NumElts), "shuf");
}
case X86::BI__builtin_ia32_vperm2f128_pd256:
@@ -13548,8 +15137,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
}
return Builder.CreateShuffleVector(OutOps[0], OutOps[1],
- makeArrayRef(Indices, NumElts),
- "vperm");
+ ArrayRef(Indices, NumElts), "vperm");
}
case X86::BI__builtin_ia32_pslldqi128_byteshift:
@@ -13577,9 +15165,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
auto *VecTy = llvm::FixedVectorType::get(Int8Ty, NumElts);
Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast");
Value *Zero = llvm::Constant::getNullValue(VecTy);
- Value *SV = Builder.CreateShuffleVector(Zero, Cast,
- makeArrayRef(Indices, NumElts),
- "pslldq");
+ Value *SV = Builder.CreateShuffleVector(
+ Zero, Cast, ArrayRef(Indices, NumElts), "pslldq");
return Builder.CreateBitCast(SV, Ops[0]->getType(), "cast");
}
case X86::BI__builtin_ia32_psrldqi128_byteshift:
@@ -13607,9 +15194,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
auto *VecTy = llvm::FixedVectorType::get(Int8Ty, NumElts);
Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast");
Value *Zero = llvm::Constant::getNullValue(VecTy);
- Value *SV = Builder.CreateShuffleVector(Cast, Zero,
- makeArrayRef(Indices, NumElts),
- "psrldq");
+ Value *SV = Builder.CreateShuffleVector(
+ Cast, Zero, ArrayRef(Indices, NumElts), "psrldq");
return Builder.CreateBitCast(SV, ResultType, "cast");
}
case X86::BI__builtin_ia32_kshiftliqi:
@@ -13629,9 +15215,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Indices[i] = NumElts + i - ShiftVal;
Value *Zero = llvm::Constant::getNullValue(In->getType());
- Value *SV = Builder.CreateShuffleVector(Zero, In,
- makeArrayRef(Indices, NumElts),
- "kshiftl");
+ Value *SV = Builder.CreateShuffleVector(
+ Zero, In, ArrayRef(Indices, NumElts), "kshiftl");
return Builder.CreateBitCast(SV, Ops[0]->getType());
}
case X86::BI__builtin_ia32_kshiftriqi:
@@ -13651,9 +15236,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Indices[i] = i + ShiftVal;
Value *Zero = llvm::Constant::getNullValue(In->getType());
- Value *SV = Builder.CreateShuffleVector(In, Zero,
- makeArrayRef(Indices, NumElts),
- "kshiftr");
+ Value *SV = Builder.CreateShuffleVector(
+ In, Zero, ArrayRef(Indices, NumElts), "kshiftr");
return Builder.CreateBitCast(SV, Ops[0]->getType());
}
case X86::BI__builtin_ia32_movnti:
@@ -13671,13 +15255,9 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
BuiltinID == X86::BI__builtin_ia32_movntss)
Src = Builder.CreateExtractElement(Src, (uint64_t)0, "extract");
- // Convert the type of the pointer to a pointer to the stored type.
- Value *BC = Builder.CreateBitCast(
- Ptr, llvm::PointerType::getUnqual(Src->getType()), "cast");
-
// Unaligned nontemporal store of the scalar value.
- StoreInst *SI = Builder.CreateDefaultAlignedStore(Src, BC);
- SI->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
+ StoreInst *SI = Builder.CreateDefaultAlignedStore(Src, Ptr);
+ SI->setMetadata(llvm::LLVMContext::MD_nontemporal, Node);
SI->setAlignment(llvm::Align(1));
return SI;
}
@@ -13728,6 +15308,12 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_selectq_128:
case X86::BI__builtin_ia32_selectq_256:
case X86::BI__builtin_ia32_selectq_512:
+ case X86::BI__builtin_ia32_selectph_128:
+ case X86::BI__builtin_ia32_selectph_256:
+ case X86::BI__builtin_ia32_selectph_512:
+ case X86::BI__builtin_ia32_selectpbf_128:
+ case X86::BI__builtin_ia32_selectpbf_256:
+ case X86::BI__builtin_ia32_selectpbf_512:
case X86::BI__builtin_ia32_selectps_128:
case X86::BI__builtin_ia32_selectps_256:
case X86::BI__builtin_ia32_selectps_512:
@@ -13735,6 +15321,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_selectpd_256:
case X86::BI__builtin_ia32_selectpd_512:
return EmitX86Select(*this, Ops[0], Ops[1], Ops[2]);
+ case X86::BI__builtin_ia32_selectsh_128:
+ case X86::BI__builtin_ia32_selectsbf_128:
case X86::BI__builtin_ia32_selectss_128:
case X86::BI__builtin_ia32_selectsd_128: {
Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
@@ -13932,14 +15520,12 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// First extract half of each vector. This gives better codegen than
// doing it in a single shuffle.
- LHS = Builder.CreateShuffleVector(LHS, LHS,
- makeArrayRef(Indices, NumElts / 2));
- RHS = Builder.CreateShuffleVector(RHS, RHS,
- makeArrayRef(Indices, NumElts / 2));
+ LHS = Builder.CreateShuffleVector(LHS, LHS, ArrayRef(Indices, NumElts / 2));
+ RHS = Builder.CreateShuffleVector(RHS, RHS, ArrayRef(Indices, NumElts / 2));
// Concat the vectors.
// NOTE: Operands are swapped to match the intrinsic definition.
- Value *Res = Builder.CreateShuffleVector(RHS, LHS,
- makeArrayRef(Indices, NumElts));
+ Value *Res =
+ Builder.CreateShuffleVector(RHS, LHS, ArrayRef(Indices, NumElts));
return Builder.CreateBitCast(Res, Ops[0]->getType());
}
@@ -13967,15 +15553,28 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
}
return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0);
}
+ case X86::BI__builtin_ia32_sqrtsh_round_mask:
case X86::BI__builtin_ia32_sqrtsd_round_mask:
case X86::BI__builtin_ia32_sqrtss_round_mask: {
unsigned CC = cast<llvm::ConstantInt>(Ops[4])->getZExtValue();
// Support only if the rounding mode is 4 (AKA CUR_DIRECTION),
// otherwise keep the intrinsic.
if (CC != 4) {
- Intrinsic::ID IID = BuiltinID == X86::BI__builtin_ia32_sqrtsd_round_mask ?
- Intrinsic::x86_avx512_mask_sqrt_sd :
- Intrinsic::x86_avx512_mask_sqrt_ss;
+ Intrinsic::ID IID;
+
+ switch (BuiltinID) {
+ default:
+ llvm_unreachable("Unsupported intrinsic!");
+ case X86::BI__builtin_ia32_sqrtsh_round_mask:
+ IID = Intrinsic::x86_avx512fp16_mask_sqrt_sh;
+ break;
+ case X86::BI__builtin_ia32_sqrtsd_round_mask:
+ IID = Intrinsic::x86_avx512_mask_sqrt_sd;
+ break;
+ case X86::BI__builtin_ia32_sqrtss_round_mask:
+ IID = Intrinsic::x86_avx512_mask_sqrt_ss;
+ break;
+ }
return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
}
Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
@@ -13997,6 +15596,9 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_sqrtpd:
case X86::BI__builtin_ia32_sqrtps256:
case X86::BI__builtin_ia32_sqrtps:
+ case X86::BI__builtin_ia32_sqrtph256:
+ case X86::BI__builtin_ia32_sqrtph:
+ case X86::BI__builtin_ia32_sqrtph512:
case X86::BI__builtin_ia32_sqrtps512:
case X86::BI__builtin_ia32_sqrtpd512: {
if (Ops.size() == 2) {
@@ -14004,9 +15606,21 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// Support only if the rounding mode is 4 (AKA CUR_DIRECTION),
// otherwise keep the intrinsic.
if (CC != 4) {
- Intrinsic::ID IID = BuiltinID == X86::BI__builtin_ia32_sqrtps512 ?
- Intrinsic::x86_avx512_sqrt_ps_512 :
- Intrinsic::x86_avx512_sqrt_pd_512;
+ Intrinsic::ID IID;
+
+ switch (BuiltinID) {
+ default:
+ llvm_unreachable("Unsupported intrinsic!");
+ case X86::BI__builtin_ia32_sqrtph512:
+ IID = Intrinsic::x86_avx512fp16_sqrt_ph_512;
+ break;
+ case X86::BI__builtin_ia32_sqrtps512:
+ IID = Intrinsic::x86_avx512_sqrt_ps_512;
+ break;
+ case X86::BI__builtin_ia32_sqrtpd512:
+ IID = Intrinsic::x86_avx512_sqrt_pd_512;
+ break;
+ }
return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
}
}
@@ -14020,73 +15634,6 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(F, Ops[0]);
}
}
- case X86::BI__builtin_ia32_pabsb128:
- case X86::BI__builtin_ia32_pabsw128:
- case X86::BI__builtin_ia32_pabsd128:
- case X86::BI__builtin_ia32_pabsb256:
- case X86::BI__builtin_ia32_pabsw256:
- case X86::BI__builtin_ia32_pabsd256:
- case X86::BI__builtin_ia32_pabsq128:
- case X86::BI__builtin_ia32_pabsq256:
- case X86::BI__builtin_ia32_pabsb512:
- case X86::BI__builtin_ia32_pabsw512:
- case X86::BI__builtin_ia32_pabsd512:
- case X86::BI__builtin_ia32_pabsq512: {
- Function *F = CGM.getIntrinsic(Intrinsic::abs, Ops[0]->getType());
- return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
- }
- case X86::BI__builtin_ia32_pmaxsb128:
- case X86::BI__builtin_ia32_pmaxsw128:
- case X86::BI__builtin_ia32_pmaxsd128:
- case X86::BI__builtin_ia32_pmaxsq128:
- case X86::BI__builtin_ia32_pmaxsb256:
- case X86::BI__builtin_ia32_pmaxsw256:
- case X86::BI__builtin_ia32_pmaxsd256:
- case X86::BI__builtin_ia32_pmaxsq256:
- case X86::BI__builtin_ia32_pmaxsb512:
- case X86::BI__builtin_ia32_pmaxsw512:
- case X86::BI__builtin_ia32_pmaxsd512:
- case X86::BI__builtin_ia32_pmaxsq512:
- return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::smax);
- case X86::BI__builtin_ia32_pmaxub128:
- case X86::BI__builtin_ia32_pmaxuw128:
- case X86::BI__builtin_ia32_pmaxud128:
- case X86::BI__builtin_ia32_pmaxuq128:
- case X86::BI__builtin_ia32_pmaxub256:
- case X86::BI__builtin_ia32_pmaxuw256:
- case X86::BI__builtin_ia32_pmaxud256:
- case X86::BI__builtin_ia32_pmaxuq256:
- case X86::BI__builtin_ia32_pmaxub512:
- case X86::BI__builtin_ia32_pmaxuw512:
- case X86::BI__builtin_ia32_pmaxud512:
- case X86::BI__builtin_ia32_pmaxuq512:
- return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::umax);
- case X86::BI__builtin_ia32_pminsb128:
- case X86::BI__builtin_ia32_pminsw128:
- case X86::BI__builtin_ia32_pminsd128:
- case X86::BI__builtin_ia32_pminsq128:
- case X86::BI__builtin_ia32_pminsb256:
- case X86::BI__builtin_ia32_pminsw256:
- case X86::BI__builtin_ia32_pminsd256:
- case X86::BI__builtin_ia32_pminsq256:
- case X86::BI__builtin_ia32_pminsb512:
- case X86::BI__builtin_ia32_pminsw512:
- case X86::BI__builtin_ia32_pminsd512:
- case X86::BI__builtin_ia32_pminsq512:
- return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::smin);
- case X86::BI__builtin_ia32_pminub128:
- case X86::BI__builtin_ia32_pminuw128:
- case X86::BI__builtin_ia32_pminud128:
- case X86::BI__builtin_ia32_pminuq128:
- case X86::BI__builtin_ia32_pminub256:
- case X86::BI__builtin_ia32_pminuw256:
- case X86::BI__builtin_ia32_pminud256:
- case X86::BI__builtin_ia32_pminuq256:
- case X86::BI__builtin_ia32_pminub512:
- case X86::BI__builtin_ia32_pminuw512:
- case X86::BI__builtin_ia32_pminud512:
- case X86::BI__builtin_ia32_pminuq512:
- return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::umin);
case X86::BI__builtin_ia32_pmuludq128:
case X86::BI__builtin_ia32_pmuludq256:
@@ -14161,82 +15708,50 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
return EmitX86FunnelShift(*this, Ops[1], Ops[0], Ops[2], true);
// Reductions
- case X86::BI__builtin_ia32_reduce_add_d512:
- case X86::BI__builtin_ia32_reduce_add_q512: {
- Function *F =
- CGM.getIntrinsic(Intrinsic::vector_reduce_add, Ops[0]->getType());
- return Builder.CreateCall(F, {Ops[0]});
- }
- case X86::BI__builtin_ia32_reduce_and_d512:
- case X86::BI__builtin_ia32_reduce_and_q512: {
- Function *F =
- CGM.getIntrinsic(Intrinsic::vector_reduce_and, Ops[0]->getType());
- return Builder.CreateCall(F, {Ops[0]});
- }
case X86::BI__builtin_ia32_reduce_fadd_pd512:
- case X86::BI__builtin_ia32_reduce_fadd_ps512: {
+ case X86::BI__builtin_ia32_reduce_fadd_ps512:
+ case X86::BI__builtin_ia32_reduce_fadd_ph512:
+ case X86::BI__builtin_ia32_reduce_fadd_ph256:
+ case X86::BI__builtin_ia32_reduce_fadd_ph128: {
Function *F =
CGM.getIntrinsic(Intrinsic::vector_reduce_fadd, Ops[1]->getType());
+ IRBuilder<>::FastMathFlagGuard FMFGuard(Builder);
Builder.getFastMathFlags().setAllowReassoc();
return Builder.CreateCall(F, {Ops[0], Ops[1]});
}
case X86::BI__builtin_ia32_reduce_fmul_pd512:
- case X86::BI__builtin_ia32_reduce_fmul_ps512: {
+ case X86::BI__builtin_ia32_reduce_fmul_ps512:
+ case X86::BI__builtin_ia32_reduce_fmul_ph512:
+ case X86::BI__builtin_ia32_reduce_fmul_ph256:
+ case X86::BI__builtin_ia32_reduce_fmul_ph128: {
Function *F =
CGM.getIntrinsic(Intrinsic::vector_reduce_fmul, Ops[1]->getType());
+ IRBuilder<>::FastMathFlagGuard FMFGuard(Builder);
Builder.getFastMathFlags().setAllowReassoc();
return Builder.CreateCall(F, {Ops[0], Ops[1]});
}
case X86::BI__builtin_ia32_reduce_fmax_pd512:
- case X86::BI__builtin_ia32_reduce_fmax_ps512: {
+ case X86::BI__builtin_ia32_reduce_fmax_ps512:
+ case X86::BI__builtin_ia32_reduce_fmax_ph512:
+ case X86::BI__builtin_ia32_reduce_fmax_ph256:
+ case X86::BI__builtin_ia32_reduce_fmax_ph128: {
Function *F =
CGM.getIntrinsic(Intrinsic::vector_reduce_fmax, Ops[0]->getType());
+ IRBuilder<>::FastMathFlagGuard FMFGuard(Builder);
Builder.getFastMathFlags().setNoNaNs();
return Builder.CreateCall(F, {Ops[0]});
}
case X86::BI__builtin_ia32_reduce_fmin_pd512:
- case X86::BI__builtin_ia32_reduce_fmin_ps512: {
+ case X86::BI__builtin_ia32_reduce_fmin_ps512:
+ case X86::BI__builtin_ia32_reduce_fmin_ph512:
+ case X86::BI__builtin_ia32_reduce_fmin_ph256:
+ case X86::BI__builtin_ia32_reduce_fmin_ph128: {
Function *F =
CGM.getIntrinsic(Intrinsic::vector_reduce_fmin, Ops[0]->getType());
+ IRBuilder<>::FastMathFlagGuard FMFGuard(Builder);
Builder.getFastMathFlags().setNoNaNs();
return Builder.CreateCall(F, {Ops[0]});
}
- case X86::BI__builtin_ia32_reduce_mul_d512:
- case X86::BI__builtin_ia32_reduce_mul_q512: {
- Function *F =
- CGM.getIntrinsic(Intrinsic::vector_reduce_mul, Ops[0]->getType());
- return Builder.CreateCall(F, {Ops[0]});
- }
- case X86::BI__builtin_ia32_reduce_or_d512:
- case X86::BI__builtin_ia32_reduce_or_q512: {
- Function *F =
- CGM.getIntrinsic(Intrinsic::vector_reduce_or, Ops[0]->getType());
- return Builder.CreateCall(F, {Ops[0]});
- }
- case X86::BI__builtin_ia32_reduce_smax_d512:
- case X86::BI__builtin_ia32_reduce_smax_q512: {
- Function *F =
- CGM.getIntrinsic(Intrinsic::vector_reduce_smax, Ops[0]->getType());
- return Builder.CreateCall(F, {Ops[0]});
- }
- case X86::BI__builtin_ia32_reduce_smin_d512:
- case X86::BI__builtin_ia32_reduce_smin_q512: {
- Function *F =
- CGM.getIntrinsic(Intrinsic::vector_reduce_smin, Ops[0]->getType());
- return Builder.CreateCall(F, {Ops[0]});
- }
- case X86::BI__builtin_ia32_reduce_umax_d512:
- case X86::BI__builtin_ia32_reduce_umax_q512: {
- Function *F =
- CGM.getIntrinsic(Intrinsic::vector_reduce_umax, Ops[0]->getType());
- return Builder.CreateCall(F, {Ops[0]});
- }
- case X86::BI__builtin_ia32_reduce_umin_d512:
- case X86::BI__builtin_ia32_reduce_umin_q512: {
- Function *F =
- CGM.getIntrinsic(Intrinsic::vector_reduce_umin, Ops[0]->getType());
- return Builder.CreateCall(F, {Ops[0]});
- }
// 3DNow!
case X86::BI__builtin_ia32_pswapdsf:
@@ -14311,6 +15826,9 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_fpclassps128_mask:
case X86::BI__builtin_ia32_fpclassps256_mask:
case X86::BI__builtin_ia32_fpclassps512_mask:
+ case X86::BI__builtin_ia32_fpclassph128_mask:
+ case X86::BI__builtin_ia32_fpclassph256_mask:
+ case X86::BI__builtin_ia32_fpclassph512_mask:
case X86::BI__builtin_ia32_fpclasspd128_mask:
case X86::BI__builtin_ia32_fpclasspd256_mask:
case X86::BI__builtin_ia32_fpclasspd512_mask: {
@@ -14322,6 +15840,15 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Intrinsic::ID ID;
switch (BuiltinID) {
default: llvm_unreachable("Unsupported intrinsic!");
+ case X86::BI__builtin_ia32_fpclassph128_mask:
+ ID = Intrinsic::x86_avx512fp16_fpclass_ph_128;
+ break;
+ case X86::BI__builtin_ia32_fpclassph256_mask:
+ ID = Intrinsic::x86_avx512fp16_fpclass_ph_256;
+ break;
+ case X86::BI__builtin_ia32_fpclassph512_mask:
+ ID = Intrinsic::x86_avx512fp16_fpclass_ph_512;
+ break;
case X86::BI__builtin_ia32_fpclassps128_mask:
ID = Intrinsic::x86_avx512_fpclass_ps_128;
break;
@@ -14459,6 +15986,9 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_cmpordps:
case X86::BI__builtin_ia32_cmpordpd:
return getVectorFCmpIR(CmpInst::FCMP_ORD, /*IsSignaling*/false);
+ case X86::BI__builtin_ia32_cmpph128_mask:
+ case X86::BI__builtin_ia32_cmpph256_mask:
+ case X86::BI__builtin_ia32_cmpph512_mask:
case X86::BI__builtin_ia32_cmpps128_mask:
case X86::BI__builtin_ia32_cmpps256_mask:
case X86::BI__builtin_ia32_cmpps512_mask:
@@ -14466,7 +15996,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_cmppd256_mask:
case X86::BI__builtin_ia32_cmppd512_mask:
IsMaskFCmp = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case X86::BI__builtin_ia32_cmpps:
case X86::BI__builtin_ia32_cmpps256:
case X86::BI__builtin_ia32_cmppd:
@@ -14535,6 +16065,15 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_cmppd256:
IID = Intrinsic::x86_avx_cmp_pd_256;
break;
+ case X86::BI__builtin_ia32_cmpph128_mask:
+ IID = Intrinsic::x86_avx512fp16_mask_cmp_ph_128;
+ break;
+ case X86::BI__builtin_ia32_cmpph256_mask:
+ IID = Intrinsic::x86_avx512fp16_mask_cmp_ph_256;
+ break;
+ case X86::BI__builtin_ia32_cmpph512_mask:
+ IID = Intrinsic::x86_avx512fp16_mask_cmp_ph_512;
+ break;
case X86::BI__builtin_ia32_cmpps512_mask:
IID = Intrinsic::x86_avx512_mask_cmp_ps_512;
break;
@@ -14631,7 +16170,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
return EmitX86CvtF16ToFloatExpr(*this, Ops, ConvertType(E->getType()));
}
-// AVX512 bf16 intrinsics
+ // AVX512 bf16 intrinsics
case X86::BI__builtin_ia32_cvtneps2bf16_128_mask: {
Ops[2] = getMaskVecValue(
*this, Ops[2],
@@ -14640,7 +16179,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
}
case X86::BI__builtin_ia32_cvtsbf162ss_32:
- return EmitX86CvtBF16ToFloatExpr(*this, E, Ops);
+ return Builder.CreateFPExt(Ops[0], Builder.getFloatTy());
case X86::BI__builtin_ia32_cvtneps2bf16_256_mask:
case X86::BI__builtin_ia32_cvtneps2bf16_512_mask: {
@@ -14658,6 +16197,46 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
return EmitX86Select(*this, Ops[2], Res, Ops[1]);
}
+ case X86::BI__cpuid:
+ case X86::BI__cpuidex: {
+ Value *FuncId = EmitScalarExpr(E->getArg(1));
+ Value *SubFuncId = BuiltinID == X86::BI__cpuidex
+ ? EmitScalarExpr(E->getArg(2))
+ : llvm::ConstantInt::get(Int32Ty, 0);
+
+ llvm::StructType *CpuidRetTy =
+ llvm::StructType::get(Int32Ty, Int32Ty, Int32Ty, Int32Ty);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CpuidRetTy, {Int32Ty, Int32Ty}, false);
+
+ StringRef Asm, Constraints;
+ if (getTarget().getTriple().getArch() == llvm::Triple::x86) {
+ Asm = "cpuid";
+ Constraints = "={ax},={bx},={cx},={dx},{ax},{cx}";
+ } else {
+ // x86-64 uses %rbx as the base register, so preserve it.
+ Asm = "xchgq %rbx, ${1:q}\n"
+ "cpuid\n"
+ "xchgq %rbx, ${1:q}";
+ Constraints = "={ax},=r,={cx},={dx},0,2";
+ }
+
+ llvm::InlineAsm *IA = llvm::InlineAsm::get(FTy, Asm, Constraints,
+ /*hasSideEffects=*/false);
+ Value *IACall = Builder.CreateCall(IA, {FuncId, SubFuncId});
+ Value *BasePtr = EmitScalarExpr(E->getArg(0));
+ Value *Store = nullptr;
+ for (unsigned i = 0; i < 4; i++) {
+ Value *Extracted = Builder.CreateExtractValue(IACall, i);
+ Value *StorePtr = Builder.CreateConstInBoundsGEP1_32(Int32Ty, BasePtr, i);
+ Store = Builder.CreateAlignedStore(Extracted, StorePtr, getIntAlign());
+ }
+
+ // Return the last store instruction to signal that we have emitted the
+ // the intrinsic.
+ return Store;
+ }
+
case X86::BI__emul:
case X86::BI__emulu: {
llvm::Type *Int64Ty = llvm::IntegerType::get(getLLVMContext(), 64);
@@ -14748,8 +16327,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__readfsdword:
case X86::BI__readfsqword: {
llvm::Type *IntTy = ConvertType(E->getType());
- Value *Ptr =
- Builder.CreateIntToPtr(Ops[0], llvm::PointerType::get(IntTy, 257));
+ Value *Ptr = Builder.CreateIntToPtr(
+ Ops[0], llvm::PointerType::get(getLLVMContext(), 257));
LoadInst *Load = Builder.CreateAlignedLoad(
IntTy, Ptr, getContext().getTypeAlignInChars(E->getType()));
Load->setVolatile(true);
@@ -14760,51 +16339,21 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__readgsdword:
case X86::BI__readgsqword: {
llvm::Type *IntTy = ConvertType(E->getType());
- Value *Ptr =
- Builder.CreateIntToPtr(Ops[0], llvm::PointerType::get(IntTy, 256));
+ Value *Ptr = Builder.CreateIntToPtr(
+ Ops[0], llvm::PointerType::get(getLLVMContext(), 256));
LoadInst *Load = Builder.CreateAlignedLoad(
IntTy, Ptr, getContext().getTypeAlignInChars(E->getType()));
Load->setVolatile(true);
return Load;
}
- case X86::BI__builtin_ia32_paddsb512:
- case X86::BI__builtin_ia32_paddsw512:
- case X86::BI__builtin_ia32_paddsb256:
- case X86::BI__builtin_ia32_paddsw256:
- case X86::BI__builtin_ia32_paddsb128:
- case X86::BI__builtin_ia32_paddsw128:
- return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::sadd_sat);
- case X86::BI__builtin_ia32_paddusb512:
- case X86::BI__builtin_ia32_paddusw512:
- case X86::BI__builtin_ia32_paddusb256:
- case X86::BI__builtin_ia32_paddusw256:
- case X86::BI__builtin_ia32_paddusb128:
- case X86::BI__builtin_ia32_paddusw128:
- return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::uadd_sat);
- case X86::BI__builtin_ia32_psubsb512:
- case X86::BI__builtin_ia32_psubsw512:
- case X86::BI__builtin_ia32_psubsb256:
- case X86::BI__builtin_ia32_psubsw256:
- case X86::BI__builtin_ia32_psubsb128:
- case X86::BI__builtin_ia32_psubsw128:
- return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::ssub_sat);
- case X86::BI__builtin_ia32_psubusb512:
- case X86::BI__builtin_ia32_psubusw512:
- case X86::BI__builtin_ia32_psubusb256:
- case X86::BI__builtin_ia32_psubusw256:
- case X86::BI__builtin_ia32_psubusb128:
- case X86::BI__builtin_ia32_psubusw128:
- return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::usub_sat);
case X86::BI__builtin_ia32_encodekey128_u32: {
Intrinsic::ID IID = Intrinsic::x86_encodekey128;
Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), {Ops[0], Ops[1]});
- for (int i = 0; i < 6; ++i) {
+ for (int i = 0; i < 3; ++i) {
Value *Extract = Builder.CreateExtractValue(Call, i + 1);
Value *Ptr = Builder.CreateConstGEP1_32(Int8Ty, Ops[2], i * 16);
- Ptr = Builder.CreateBitCast(
- Ptr, llvm::PointerType::getUnqual(Extract->getType()));
Builder.CreateAlignedStore(Extract, Ptr, Align(1));
}
@@ -14816,11 +16365,9 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Value *Call =
Builder.CreateCall(CGM.getIntrinsic(IID), {Ops[0], Ops[1], Ops[2]});
- for (int i = 0; i < 7; ++i) {
+ for (int i = 0; i < 4; ++i) {
Value *Extract = Builder.CreateExtractValue(Call, i + 1);
Value *Ptr = Builder.CreateConstGEP1_32(Int8Ty, Ops[3], i * 16);
- Ptr = Builder.CreateBitCast(
- Ptr, llvm::PointerType::getUnqual(Extract->getType()));
Builder.CreateAlignedStore(Extract, Ptr, Align(1));
}
@@ -14941,15 +16488,57 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Builder.SetInsertPoint(End);
return Builder.CreateExtractValue(Call, 0);
}
+ case X86::BI__builtin_ia32_vfcmaddcph512_mask:
+ IsConjFMA = true;
+ [[fallthrough]];
+ case X86::BI__builtin_ia32_vfmaddcph512_mask: {
+ Intrinsic::ID IID = IsConjFMA
+ ? Intrinsic::x86_avx512fp16_mask_vfcmadd_cph_512
+ : Intrinsic::x86_avx512fp16_mask_vfmadd_cph_512;
+ Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
+ return EmitX86Select(*this, Ops[3], Call, Ops[0]);
+ }
+ case X86::BI__builtin_ia32_vfcmaddcsh_round_mask:
+ IsConjFMA = true;
+ [[fallthrough]];
+ case X86::BI__builtin_ia32_vfmaddcsh_round_mask: {
+ Intrinsic::ID IID = IsConjFMA ? Intrinsic::x86_avx512fp16_mask_vfcmadd_csh
+ : Intrinsic::x86_avx512fp16_mask_vfmadd_csh;
+ Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
+ Value *And = Builder.CreateAnd(Ops[3], llvm::ConstantInt::get(Int8Ty, 1));
+ return EmitX86Select(*this, And, Call, Ops[0]);
+ }
+ case X86::BI__builtin_ia32_vfcmaddcsh_round_mask3:
+ IsConjFMA = true;
+ [[fallthrough]];
+ case X86::BI__builtin_ia32_vfmaddcsh_round_mask3: {
+ Intrinsic::ID IID = IsConjFMA ? Intrinsic::x86_avx512fp16_mask_vfcmadd_csh
+ : Intrinsic::x86_avx512fp16_mask_vfmadd_csh;
+ Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
+ static constexpr int Mask[] = {0, 5, 6, 7};
+ return Builder.CreateShuffleVector(Call, Ops[2], Mask);
+ }
+ case X86::BI__builtin_ia32_prefetchi:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::prefetch, Ops[0]->getType()),
+ {Ops[0], llvm::ConstantInt::get(Int32Ty, 0), Ops[1],
+ llvm::ConstantInt::get(Int32Ty, 0)});
}
}
Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
- SmallVector<Value*, 4> Ops;
-
- for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
- Ops.push_back(EmitScalarExpr(E->getArg(i)));
+ // Do not emit the builtin arguments in the arguments of a function call,
+ // because the evaluation order of function arguments is not specified in C++.
+ // This is important when testing to ensure the arguments are emitted in the
+ // same order every time. Eg:
+ // Instead of:
+ // return Builder.CreateFDiv(EmitScalarExpr(E->getArg(0)),
+ // EmitScalarExpr(E->getArg(1)), "swdiv");
+ // Use:
+ // Value *Op0 = EmitScalarExpr(E->getArg(0));
+ // Value *Op1 = EmitScalarExpr(E->getArg(1));
+ // return Builder.CreateFDiv(Op0, Op1, "swdiv")
Intrinsic::ID ID = Intrinsic::not_intrinsic;
@@ -14976,11 +16565,11 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
case PPC::BI__builtin_vsx_lxvl:
case PPC::BI__builtin_vsx_lxvll:
{
- if(BuiltinID == PPC::BI__builtin_vsx_lxvl ||
- BuiltinID == PPC::BI__builtin_vsx_lxvll){
- Ops[0] = Builder.CreateBitCast(Ops[0], Int8PtrTy);
- }else {
- Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
+ SmallVector<Value *, 2> Ops;
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Ops.push_back(EmitScalarExpr(E->getArg(1)));
+ if (!(BuiltinID == PPC::BI__builtin_vsx_lxvl ||
+ BuiltinID == PPC::BI__builtin_vsx_lxvll)) {
Ops[0] = Builder.CreateGEP(Int8Ty, Ops[1], Ops[0]);
Ops.pop_back();
}
@@ -15044,11 +16633,12 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
case PPC::BI__builtin_vsx_stxvl:
case PPC::BI__builtin_vsx_stxvll:
{
- if(BuiltinID == PPC::BI__builtin_vsx_stxvl ||
- BuiltinID == PPC::BI__builtin_vsx_stxvll ){
- Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
- }else {
- Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy);
+ SmallVector<Value *, 3> Ops;
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Ops.push_back(EmitScalarExpr(E->getArg(1)));
+ Ops.push_back(EmitScalarExpr(E->getArg(2)));
+ if (!(BuiltinID == PPC::BI__builtin_vsx_stxvl ||
+ BuiltinID == PPC::BI__builtin_vsx_stxvll)) {
Ops[1] = Builder.CreateGEP(Int8Ty, Ops[2], Ops[1]);
Ops.pop_back();
}
@@ -15096,14 +16686,16 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
// Essentially boils down to performing an unaligned VMX load sequence so
// as to avoid crossing a page boundary and then shuffling the elements
// into the right side of the vector register.
- int64_t NumBytes = cast<ConstantInt>(Ops[1])->getZExtValue();
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
+ int64_t NumBytes = cast<ConstantInt>(Op1)->getZExtValue();
llvm::Type *ResTy = ConvertType(E->getType());
bool IsLE = getTarget().isLittleEndian();
// If the user wants the entire vector, just load the entire vector.
if (NumBytes == 16) {
- Value *BC = Builder.CreateBitCast(Ops[0], ResTy->getPointerTo());
- Value *LD = Builder.CreateLoad(Address(BC, CharUnits::fromQuantity(1)));
+ Value *LD =
+ Builder.CreateLoad(Address(Op0, ResTy, CharUnits::fromQuantity(1)));
if (!IsLE)
return LD;
@@ -15119,16 +16711,14 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
: Intrinsic::ppc_altivec_lvsl);
llvm::Function *Vperm = CGM.getIntrinsic(Intrinsic::ppc_altivec_vperm);
Value *HiMem = Builder.CreateGEP(
- Int8Ty, Ops[0], ConstantInt::get(Ops[1]->getType(), NumBytes - 1));
- Value *LoLd = Builder.CreateCall(Lvx, Ops[0], "ld.lo");
+ Int8Ty, Op0, ConstantInt::get(Op1->getType(), NumBytes - 1));
+ Value *LoLd = Builder.CreateCall(Lvx, Op0, "ld.lo");
Value *HiLd = Builder.CreateCall(Lvx, HiMem, "ld.hi");
- Value *Mask1 = Builder.CreateCall(Lvs, Ops[0], "mask1");
+ Value *Mask1 = Builder.CreateCall(Lvs, Op0, "mask1");
- Ops.clear();
- Ops.push_back(IsLE ? HiLd : LoLd);
- Ops.push_back(IsLE ? LoLd : HiLd);
- Ops.push_back(Mask1);
- Value *AllElts = Builder.CreateCall(Vperm, Ops, "shuffle1");
+ Op0 = IsLE ? HiLd : LoLd;
+ Op1 = IsLE ? LoLd : HiLd;
+ Value *AllElts = Builder.CreateCall(Vperm, {Op0, Op1, Mask1}, "shuffle1");
Constant *Zero = llvm::Constant::getNullValue(IsLE ? ResTy : AllElts->getType());
if (IsLE) {
@@ -15149,23 +16739,24 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
Builder.CreateCall(Vperm, {Zero, AllElts, Mask2}, "shuffle2"), ResTy);
}
case PPC::BI__builtin_vsx_strmb: {
- int64_t NumBytes = cast<ConstantInt>(Ops[1])->getZExtValue();
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
+ Value *Op2 = EmitScalarExpr(E->getArg(2));
+ int64_t NumBytes = cast<ConstantInt>(Op1)->getZExtValue();
bool IsLE = getTarget().isLittleEndian();
auto StoreSubVec = [&](unsigned Width, unsigned Offset, unsigned EltNo) {
// Storing the whole vector, simply store it on BE and reverse bytes and
// store on LE.
if (Width == 16) {
- Value *BC =
- Builder.CreateBitCast(Ops[0], Ops[2]->getType()->getPointerTo());
- Value *StVec = Ops[2];
+ Value *StVec = Op2;
if (IsLE) {
SmallVector<int, 16> RevMask;
for (int Idx = 0; Idx < 16; Idx++)
RevMask.push_back(15 - Idx);
- StVec = Builder.CreateShuffleVector(Ops[2], Ops[2], RevMask);
+ StVec = Builder.CreateShuffleVector(Op2, Op2, RevMask);
}
- return Builder.CreateStore(StVec,
- Address(BC, CharUnits::fromQuantity(1)));
+ return Builder.CreateStore(
+ StVec, Address(Op0, Op2->getType(), CharUnits::fromQuantity(1)));
}
auto *ConvTy = Int64Ty;
unsigned NumElts = 0;
@@ -15190,17 +16781,16 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
break;
}
Value *Vec = Builder.CreateBitCast(
- Ops[2], llvm::FixedVectorType::get(ConvTy, NumElts));
- Value *Ptr = Builder.CreateGEP(Int8Ty, Ops[0],
- ConstantInt::get(Int64Ty, Offset));
- Value *PtrBC = Builder.CreateBitCast(Ptr, ConvTy->getPointerTo());
+ Op2, llvm::FixedVectorType::get(ConvTy, NumElts));
+ Value *Ptr =
+ Builder.CreateGEP(Int8Ty, Op0, ConstantInt::get(Int64Ty, Offset));
Value *Elt = Builder.CreateExtractElement(Vec, EltNo);
if (IsLE && Width > 1) {
Function *F = CGM.getIntrinsic(Intrinsic::bswap, ConvTy);
Elt = Builder.CreateCall(F, Elt);
}
- return Builder.CreateStore(Elt,
- Address(PtrBC, CharUnits::fromQuantity(1)));
+ return Builder.CreateStore(
+ Elt, Address(Ptr, ConvTy, CharUnits::fromQuantity(1)));
};
unsigned Stored = 0;
unsigned RemainingBytes = NumBytes;
@@ -15264,62 +16854,65 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
Function *F = CGM.getIntrinsic(Intrinsic::cttz, ResultType);
return Builder.CreateCall(F, {X, Undef});
}
- case PPC::BI__builtin_altivec_vec_replace_elt:
- case PPC::BI__builtin_altivec_vec_replace_unaligned: {
- // The third argument of vec_replace_elt and vec_replace_unaligned must
- // be a compile time constant and will be emitted either to the vinsw
- // or vinsd instruction.
- ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
+ case PPC::BI__builtin_altivec_vinsd:
+ case PPC::BI__builtin_altivec_vinsw:
+ case PPC::BI__builtin_altivec_vinsd_elt:
+ case PPC::BI__builtin_altivec_vinsw_elt: {
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
+ Value *Op2 = EmitScalarExpr(E->getArg(2));
+
+ bool IsUnaligned = (BuiltinID == PPC::BI__builtin_altivec_vinsw ||
+ BuiltinID == PPC::BI__builtin_altivec_vinsd);
+
+ bool Is32bit = (BuiltinID == PPC::BI__builtin_altivec_vinsw ||
+ BuiltinID == PPC::BI__builtin_altivec_vinsw_elt);
+
+ // The third argument must be a compile time constant.
+ ConstantInt *ArgCI = dyn_cast<ConstantInt>(Op2);
assert(ArgCI &&
"Third Arg to vinsw/vinsd intrinsic must be a constant integer!");
- llvm::Type *ResultType = ConvertType(E->getType());
- llvm::Function *F = nullptr;
- Value *Call = nullptr;
+
+ // Valid value for the third argument is dependent on the input type and
+ // builtin called.
+ int ValidMaxValue = 0;
+ if (IsUnaligned)
+ ValidMaxValue = (Is32bit) ? 12 : 8;
+ else
+ ValidMaxValue = (Is32bit) ? 3 : 1;
+
+ // Get value of third argument.
int64_t ConstArg = ArgCI->getSExtValue();
- unsigned ArgWidth = Ops[1]->getType()->getPrimitiveSizeInBits();
- bool Is32Bit = false;
- assert((ArgWidth == 32 || ArgWidth == 64) && "Invalid argument width");
- // The input to vec_replace_elt is an element index, not a byte index.
- if (BuiltinID == PPC::BI__builtin_altivec_vec_replace_elt)
- ConstArg *= ArgWidth / 8;
- if (ArgWidth == 32) {
- Is32Bit = true;
- // When the second argument is 32 bits, it can either be an integer or
- // a float. The vinsw intrinsic is used in this case.
- F = CGM.getIntrinsic(Intrinsic::ppc_altivec_vinsw);
+
+ // Compose range checking error message.
+ std::string RangeErrMsg = IsUnaligned ? "byte" : "element";
+ RangeErrMsg += " number " + llvm::to_string(ConstArg);
+ RangeErrMsg += " is outside of the valid range [0, ";
+ RangeErrMsg += llvm::to_string(ValidMaxValue) + "]";
+
+ // Issue error if third argument is not within the valid range.
+ if (ConstArg < 0 || ConstArg > ValidMaxValue)
+ CGM.Error(E->getExprLoc(), RangeErrMsg);
+
+ // Input to vec_replace_elt is an element index, convert to byte index.
+ if (!IsUnaligned) {
+ ConstArg *= Is32bit ? 4 : 8;
// Fix the constant according to endianess.
if (getTarget().isLittleEndian())
- ConstArg = 12 - ConstArg;
- } else {
- // When the second argument is 64 bits, it can either be a long long or
- // a double. The vinsd intrinsic is used in this case.
- F = CGM.getIntrinsic(Intrinsic::ppc_altivec_vinsd);
- // Fix the constant for little endian.
- if (getTarget().isLittleEndian())
- ConstArg = 8 - ConstArg;
+ ConstArg = (Is32bit ? 12 : 8) - ConstArg;
}
- Ops[2] = ConstantInt::getSigned(Int32Ty, ConstArg);
- // Depending on ArgWidth, the input vector could be a float or a double.
- // If the input vector is a float type, bitcast the inputs to integers. Or,
- // if the input vector is a double, bitcast the inputs to 64-bit integers.
- if (!Ops[1]->getType()->isIntegerTy(ArgWidth)) {
- Ops[0] = Builder.CreateBitCast(
- Ops[0], Is32Bit ? llvm::FixedVectorType::get(Int32Ty, 4)
- : llvm::FixedVectorType::get(Int64Ty, 2));
- Ops[1] = Builder.CreateBitCast(Ops[1], Is32Bit ? Int32Ty : Int64Ty);
- }
- // Emit the call to vinsw or vinsd.
- Call = Builder.CreateCall(F, Ops);
- // Depending on the builtin, bitcast to the approriate result type.
- if (BuiltinID == PPC::BI__builtin_altivec_vec_replace_elt &&
- !Ops[1]->getType()->isIntegerTy())
- return Builder.CreateBitCast(Call, ResultType);
- else if (BuiltinID == PPC::BI__builtin_altivec_vec_replace_elt &&
- Ops[1]->getType()->isIntegerTy())
- return Call;
- else
- return Builder.CreateBitCast(Call,
- llvm::FixedVectorType::get(Int8Ty, 16));
+
+ ID = Is32bit ? Intrinsic::ppc_altivec_vinsw : Intrinsic::ppc_altivec_vinsd;
+ Op2 = ConstantInt::getSigned(Int32Ty, ConstArg);
+ // Casting input to vector int as per intrinsic definition.
+ Op0 =
+ Is32bit
+ ? Builder.CreateBitCast(Op0, llvm::FixedVectorType::get(Int32Ty, 4))
+ : Builder.CreateBitCast(Op0,
+ llvm::FixedVectorType::get(Int64Ty, 2));
+ return Builder.CreateBitCast(
+ Builder.CreateCall(CGM.getIntrinsic(ID), {Op0, Op1, Op2}), ResultType);
}
case PPC::BI__builtin_altivec_vpopcntb:
case PPC::BI__builtin_altivec_vpopcnth:
@@ -15332,15 +16925,60 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
}
case PPC::BI__builtin_altivec_vadduqm:
case PPC::BI__builtin_altivec_vsubuqm: {
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
- Ops[0] =
- Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int128Ty, 1));
- Ops[1] =
- Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int128Ty, 1));
+ Op0 = Builder.CreateBitCast(Op0, llvm::FixedVectorType::get(Int128Ty, 1));
+ Op1 = Builder.CreateBitCast(Op1, llvm::FixedVectorType::get(Int128Ty, 1));
if (BuiltinID == PPC::BI__builtin_altivec_vadduqm)
- return Builder.CreateAdd(Ops[0], Ops[1], "vadduqm");
+ return Builder.CreateAdd(Op0, Op1, "vadduqm");
else
- return Builder.CreateSub(Ops[0], Ops[1], "vsubuqm");
+ return Builder.CreateSub(Op0, Op1, "vsubuqm");
+ }
+ case PPC::BI__builtin_altivec_vaddcuq_c:
+ case PPC::BI__builtin_altivec_vsubcuq_c: {
+ SmallVector<Value *, 2> Ops;
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
+ llvm::Type *V1I128Ty = llvm::FixedVectorType::get(
+ llvm::IntegerType::get(getLLVMContext(), 128), 1);
+ Ops.push_back(Builder.CreateBitCast(Op0, V1I128Ty));
+ Ops.push_back(Builder.CreateBitCast(Op1, V1I128Ty));
+ ID = (BuiltinID == PPC::BI__builtin_altivec_vaddcuq_c)
+ ? Intrinsic::ppc_altivec_vaddcuq
+ : Intrinsic::ppc_altivec_vsubcuq;
+ return Builder.CreateCall(CGM.getIntrinsic(ID), Ops, "");
+ }
+ case PPC::BI__builtin_altivec_vaddeuqm_c:
+ case PPC::BI__builtin_altivec_vaddecuq_c:
+ case PPC::BI__builtin_altivec_vsubeuqm_c:
+ case PPC::BI__builtin_altivec_vsubecuq_c: {
+ SmallVector<Value *, 3> Ops;
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
+ Value *Op2 = EmitScalarExpr(E->getArg(2));
+ llvm::Type *V1I128Ty = llvm::FixedVectorType::get(
+ llvm::IntegerType::get(getLLVMContext(), 128), 1);
+ Ops.push_back(Builder.CreateBitCast(Op0, V1I128Ty));
+ Ops.push_back(Builder.CreateBitCast(Op1, V1I128Ty));
+ Ops.push_back(Builder.CreateBitCast(Op2, V1I128Ty));
+ switch (BuiltinID) {
+ default:
+ llvm_unreachable("Unsupported intrinsic!");
+ case PPC::BI__builtin_altivec_vaddeuqm_c:
+ ID = Intrinsic::ppc_altivec_vaddeuqm;
+ break;
+ case PPC::BI__builtin_altivec_vaddecuq_c:
+ ID = Intrinsic::ppc_altivec_vaddecuq;
+ break;
+ case PPC::BI__builtin_altivec_vsubeuqm_c:
+ ID = Intrinsic::ppc_altivec_vsubeuqm;
+ break;
+ case PPC::BI__builtin_altivec_vsubecuq_c:
+ ID = Intrinsic::ppc_altivec_vsubecuq;
+ break;
+ }
+ return Builder.CreateCall(CGM.getIntrinsic(ID), Ops, "");
}
// Rotate and insert under mask operation.
// __rldimi(rs, is, shift, mask)
@@ -15349,29 +16987,37 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
// (rotl(rs, shift) & mask) | (is & ~mask)
case PPC::BI__builtin_ppc_rldimi:
case PPC::BI__builtin_ppc_rlwimi: {
- llvm::Type *Ty = Ops[0]->getType();
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
+ Value *Op2 = EmitScalarExpr(E->getArg(2));
+ Value *Op3 = EmitScalarExpr(E->getArg(3));
+ llvm::Type *Ty = Op0->getType();
Function *F = CGM.getIntrinsic(Intrinsic::fshl, Ty);
if (BuiltinID == PPC::BI__builtin_ppc_rldimi)
- Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
- Value *Shift = Builder.CreateCall(F, {Ops[0], Ops[0], Ops[2]});
- Value *X = Builder.CreateAnd(Shift, Ops[3]);
- Value *Y = Builder.CreateAnd(Ops[1], Builder.CreateNot(Ops[3]));
+ Op2 = Builder.CreateZExt(Op2, Int64Ty);
+ Value *Shift = Builder.CreateCall(F, {Op0, Op0, Op2});
+ Value *X = Builder.CreateAnd(Shift, Op3);
+ Value *Y = Builder.CreateAnd(Op1, Builder.CreateNot(Op3));
return Builder.CreateOr(X, Y);
}
// Rotate and insert under mask operation.
// __rlwnm(rs, shift, mask)
// rotl(rs, shift) & mask
case PPC::BI__builtin_ppc_rlwnm: {
- llvm::Type *Ty = Ops[0]->getType();
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
+ Value *Op2 = EmitScalarExpr(E->getArg(2));
+ llvm::Type *Ty = Op0->getType();
Function *F = CGM.getIntrinsic(Intrinsic::fshl, Ty);
- Value *Shift = Builder.CreateCall(F, {Ops[0], Ops[0], Ops[1]});
- return Builder.CreateAnd(Shift, Ops[2]);
+ Value *Shift = Builder.CreateCall(F, {Op0, Op0, Op1});
+ return Builder.CreateAnd(Shift, Op2);
}
case PPC::BI__builtin_ppc_poppar4:
case PPC::BI__builtin_ppc_poppar8: {
- llvm::Type *ArgType = Ops[0]->getType();
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ llvm::Type *ArgType = Op0->getType();
Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
- Value *Tmp = Builder.CreateCall(F, Ops[0]);
+ Value *Tmp = Builder.CreateCall(F, Op0);
llvm::Type *ResultType = ConvertType(E->getType());
Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1));
@@ -15381,10 +17027,12 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
return Result;
}
case PPC::BI__builtin_ppc_cmpb: {
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
if (getTarget().getTriple().isPPC64()) {
Function *F =
CGM.getIntrinsic(Intrinsic::ppc_cmpb, {Int64Ty, Int64Ty, Int64Ty});
- return Builder.CreateCall(F, Ops, "cmpb");
+ return Builder.CreateCall(F, {Op0, Op1}, "cmpb");
}
// For 32 bit, emit the code as below:
// %conv = trunc i64 %a to i32
@@ -15402,13 +17050,13 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
// ret i64 %or
Function *F =
CGM.getIntrinsic(Intrinsic::ppc_cmpb, {Int32Ty, Int32Ty, Int32Ty});
- Value *ArgOneLo = Builder.CreateTrunc(Ops[0], Int32Ty);
- Value *ArgTwoLo = Builder.CreateTrunc(Ops[1], Int32Ty);
+ Value *ArgOneLo = Builder.CreateTrunc(Op0, Int32Ty);
+ Value *ArgTwoLo = Builder.CreateTrunc(Op1, Int32Ty);
Constant *ShiftAmt = ConstantInt::get(Int64Ty, 32);
Value *ArgOneHi =
- Builder.CreateTrunc(Builder.CreateLShr(Ops[0], ShiftAmt), Int32Ty);
+ Builder.CreateTrunc(Builder.CreateLShr(Op0, ShiftAmt), Int32Ty);
Value *ArgTwoHi =
- Builder.CreateTrunc(Builder.CreateLShr(Ops[1], ShiftAmt), Int32Ty);
+ Builder.CreateTrunc(Builder.CreateLShr(Op1, ShiftAmt), Int32Ty);
Value *ResLo = Builder.CreateZExt(
Builder.CreateCall(F, {ArgOneLo, ArgTwoLo}, "cmpb"), Int64Ty);
Value *ResHiShift = Builder.CreateZExt(
@@ -15502,24 +17150,37 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
return FDiv;
}
case PPC::BI__builtin_ppc_alignx: {
- ConstantInt *AlignmentCI = cast<ConstantInt>(Ops[0]);
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
+ ConstantInt *AlignmentCI = cast<ConstantInt>(Op0);
if (AlignmentCI->getValue().ugt(llvm::Value::MaximumAlignment))
- AlignmentCI = ConstantInt::get(AlignmentCI->getType(),
+ AlignmentCI = ConstantInt::get(AlignmentCI->getIntegerType(),
llvm::Value::MaximumAlignment);
- emitAlignmentAssumption(Ops[1], E->getArg(1),
+ emitAlignmentAssumption(Op1, E->getArg(1),
/*The expr loc is sufficient.*/ SourceLocation(),
AlignmentCI, nullptr);
- return Ops[1];
+ return Op1;
}
case PPC::BI__builtin_ppc_rdlam: {
- llvm::Type *Ty = Ops[0]->getType();
- Value *ShiftAmt = Builder.CreateIntCast(Ops[1], Ty, false);
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
+ Value *Op2 = EmitScalarExpr(E->getArg(2));
+ llvm::Type *Ty = Op0->getType();
+ Value *ShiftAmt = Builder.CreateIntCast(Op1, Ty, false);
Function *F = CGM.getIntrinsic(Intrinsic::fshl, Ty);
- Value *Rotate = Builder.CreateCall(F, {Ops[0], Ops[0], ShiftAmt});
- return Builder.CreateAnd(Rotate, Ops[2]);
+ Value *Rotate = Builder.CreateCall(F, {Op0, Op0, ShiftAmt});
+ return Builder.CreateAnd(Rotate, Op2);
+ }
+ case PPC::BI__builtin_ppc_load2r: {
+ Function *F = CGM.getIntrinsic(Intrinsic::ppc_load2r);
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *LoadIntrinsic = Builder.CreateCall(F, {Op0});
+ return Builder.CreateTrunc(LoadIntrinsic, Int16Ty);
}
// FMA variations
+ case PPC::BI__builtin_ppc_fnmsub:
+ case PPC::BI__builtin_ppc_fnmsubs:
case PPC::BI__builtin_vsx_xvmaddadp:
case PPC::BI__builtin_vsx_xvmaddasp:
case PPC::BI__builtin_vsx_xvnmaddadp:
@@ -15558,6 +17219,8 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
F, {X, Y, Builder.CreateFNeg(Z, "neg")});
else
return Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")});
+ case PPC::BI__builtin_ppc_fnmsub:
+ case PPC::BI__builtin_ppc_fnmsubs:
case PPC::BI__builtin_vsx_xvnmsubadp:
case PPC::BI__builtin_vsx_xvnmsubasp:
if (Builder.getIsFPConstrained())
@@ -15566,95 +17229,96 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
F, {X, Y, Builder.CreateFNeg(Z, "neg")}),
"neg");
else
- return Builder.CreateFNeg(
- Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")}),
- "neg");
- }
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::ppc_fnmsub, ResultType), {X, Y, Z});
+ }
llvm_unreachable("Unknown FMA operation");
return nullptr; // Suppress no-return warning
}
case PPC::BI__builtin_vsx_insertword: {
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
+ Value *Op2 = EmitScalarExpr(E->getArg(2));
llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_vsx_xxinsertw);
// Third argument is a compile time constant int. It must be clamped to
// to the range [0, 12].
- ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
+ ConstantInt *ArgCI = dyn_cast<ConstantInt>(Op2);
assert(ArgCI &&
"Third arg to xxinsertw intrinsic must be constant integer");
const int64_t MaxIndex = 12;
- int64_t Index = clamp(ArgCI->getSExtValue(), 0, MaxIndex);
+ int64_t Index = std::clamp(ArgCI->getSExtValue(), (int64_t)0, MaxIndex);
// The builtin semantics don't exactly match the xxinsertw instructions
// semantics (which ppc_vsx_xxinsertw follows). The builtin extracts the
// word from the first argument, and inserts it in the second argument. The
// instruction extracts the word from its second input register and inserts
// it into its first input register, so swap the first and second arguments.
- std::swap(Ops[0], Ops[1]);
+ std::swap(Op0, Op1);
// Need to cast the second argument from a vector of unsigned int to a
// vector of long long.
- Ops[1] =
- Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int64Ty, 2));
+ Op1 = Builder.CreateBitCast(Op1, llvm::FixedVectorType::get(Int64Ty, 2));
if (getTarget().isLittleEndian()) {
// Reverse the double words in the vector we will extract from.
- Ops[0] =
- Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2));
- Ops[0] = Builder.CreateShuffleVector(Ops[0], Ops[0], ArrayRef<int>{1, 0});
+ Op0 = Builder.CreateBitCast(Op0, llvm::FixedVectorType::get(Int64Ty, 2));
+ Op0 = Builder.CreateShuffleVector(Op0, Op0, ArrayRef<int>{1, 0});
// Reverse the index.
Index = MaxIndex - Index;
}
// Intrinsic expects the first arg to be a vector of int.
- Ops[0] =
- Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 4));
- Ops[2] = ConstantInt::getSigned(Int32Ty, Index);
- return Builder.CreateCall(F, Ops);
+ Op0 = Builder.CreateBitCast(Op0, llvm::FixedVectorType::get(Int32Ty, 4));
+ Op2 = ConstantInt::getSigned(Int32Ty, Index);
+ return Builder.CreateCall(F, {Op0, Op1, Op2});
}
case PPC::BI__builtin_vsx_extractuword: {
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_vsx_xxextractuw);
// Intrinsic expects the first argument to be a vector of doublewords.
- Ops[0] =
- Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2));
+ Op0 = Builder.CreateBitCast(Op0, llvm::FixedVectorType::get(Int64Ty, 2));
// The second argument is a compile time constant int that needs to
// be clamped to the range [0, 12].
- ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[1]);
+ ConstantInt *ArgCI = dyn_cast<ConstantInt>(Op1);
assert(ArgCI &&
"Second Arg to xxextractuw intrinsic must be a constant integer!");
const int64_t MaxIndex = 12;
- int64_t Index = clamp(ArgCI->getSExtValue(), 0, MaxIndex);
+ int64_t Index = std::clamp(ArgCI->getSExtValue(), (int64_t)0, MaxIndex);
if (getTarget().isLittleEndian()) {
// Reverse the index.
Index = MaxIndex - Index;
- Ops[1] = ConstantInt::getSigned(Int32Ty, Index);
+ Op1 = ConstantInt::getSigned(Int32Ty, Index);
// Emit the call, then reverse the double words of the results vector.
- Value *Call = Builder.CreateCall(F, Ops);
+ Value *Call = Builder.CreateCall(F, {Op0, Op1});
Value *ShuffleCall =
Builder.CreateShuffleVector(Call, Call, ArrayRef<int>{1, 0});
return ShuffleCall;
} else {
- Ops[1] = ConstantInt::getSigned(Int32Ty, Index);
- return Builder.CreateCall(F, Ops);
+ Op1 = ConstantInt::getSigned(Int32Ty, Index);
+ return Builder.CreateCall(F, {Op0, Op1});
}
}
case PPC::BI__builtin_vsx_xxpermdi: {
- ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
+ Value *Op2 = EmitScalarExpr(E->getArg(2));
+ ConstantInt *ArgCI = dyn_cast<ConstantInt>(Op2);
assert(ArgCI && "Third arg must be constant integer!");
unsigned Index = ArgCI->getZExtValue();
- Ops[0] =
- Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2));
- Ops[1] =
- Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int64Ty, 2));
+ Op0 = Builder.CreateBitCast(Op0, llvm::FixedVectorType::get(Int64Ty, 2));
+ Op1 = Builder.CreateBitCast(Op1, llvm::FixedVectorType::get(Int64Ty, 2));
// Account for endianness by treating this as just a shuffle. So we use the
// same indices for both LE and BE in order to produce expected results in
@@ -15663,21 +17327,21 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
int ElemIdx1 = 2 + (Index & 1);
int ShuffleElts[2] = {ElemIdx0, ElemIdx1};
- Value *ShuffleCall =
- Builder.CreateShuffleVector(Ops[0], Ops[1], ShuffleElts);
+ Value *ShuffleCall = Builder.CreateShuffleVector(Op0, Op1, ShuffleElts);
QualType BIRetType = E->getType();
auto RetTy = ConvertType(BIRetType);
return Builder.CreateBitCast(ShuffleCall, RetTy);
}
case PPC::BI__builtin_vsx_xxsldwi: {
- ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
+ Value *Op2 = EmitScalarExpr(E->getArg(2));
+ ConstantInt *ArgCI = dyn_cast<ConstantInt>(Op2);
assert(ArgCI && "Third argument must be a compile time constant");
unsigned Index = ArgCI->getZExtValue() & 0x3;
- Ops[0] =
- Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 4));
- Ops[1] =
- Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int32Ty, 4));
+ Op0 = Builder.CreateBitCast(Op0, llvm::FixedVectorType::get(Int32Ty, 4));
+ Op1 = Builder.CreateBitCast(Op1, llvm::FixedVectorType::get(Int32Ty, 4));
// Create a shuffle mask
int ElemIdx0;
@@ -15701,40 +17365,44 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
}
int ShuffleElts[4] = {ElemIdx0, ElemIdx1, ElemIdx2, ElemIdx3};
- Value *ShuffleCall =
- Builder.CreateShuffleVector(Ops[0], Ops[1], ShuffleElts);
+ Value *ShuffleCall = Builder.CreateShuffleVector(Op0, Op1, ShuffleElts);
QualType BIRetType = E->getType();
auto RetTy = ConvertType(BIRetType);
return Builder.CreateBitCast(ShuffleCall, RetTy);
}
case PPC::BI__builtin_pack_vector_int128: {
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
bool isLittleEndian = getTarget().isLittleEndian();
- Value *UndefValue =
- llvm::UndefValue::get(llvm::FixedVectorType::get(Ops[0]->getType(), 2));
+ Value *PoisonValue =
+ llvm::PoisonValue::get(llvm::FixedVectorType::get(Op0->getType(), 2));
Value *Res = Builder.CreateInsertElement(
- UndefValue, Ops[0], (uint64_t)(isLittleEndian ? 1 : 0));
- Res = Builder.CreateInsertElement(Res, Ops[1],
+ PoisonValue, Op0, (uint64_t)(isLittleEndian ? 1 : 0));
+ Res = Builder.CreateInsertElement(Res, Op1,
(uint64_t)(isLittleEndian ? 0 : 1));
return Builder.CreateBitCast(Res, ConvertType(E->getType()));
}
case PPC::BI__builtin_unpack_vector_int128: {
- ConstantInt *Index = cast<ConstantInt>(Ops[1]);
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
+ ConstantInt *Index = cast<ConstantInt>(Op1);
Value *Unpacked = Builder.CreateBitCast(
- Ops[0], llvm::FixedVectorType::get(ConvertType(E->getType()), 2));
+ Op0, llvm::FixedVectorType::get(ConvertType(E->getType()), 2));
if (getTarget().isLittleEndian())
- Index = ConstantInt::get(Index->getType(), 1 - Index->getZExtValue());
+ Index =
+ ConstantInt::get(Index->getIntegerType(), 1 - Index->getZExtValue());
return Builder.CreateExtractElement(Unpacked, Index);
}
case PPC::BI__builtin_ppc_sthcx: {
llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_sthcx);
- Ops[0] = Builder.CreateBitCast(Ops[0], Int8PtrTy);
- Ops[1] = Builder.CreateSExt(Ops[1], Int32Ty);
- return Builder.CreateCall(F, Ops);
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = Builder.CreateSExt(EmitScalarExpr(E->getArg(1)), Int32Ty);
+ return Builder.CreateCall(F, {Op0, Op1});
}
// The PPC MMA builtins take a pointer to a __vector_quad as an argument.
@@ -15743,10 +17411,16 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
// use custom code generation to expand a builtin call with a pointer to a
// load (if the corresponding instruction accumulates its result) followed by
// the call to the intrinsic and a store of the result.
-#define CUSTOM_BUILTIN(Name, Intr, Types, Accumulate) \
+#define CUSTOM_BUILTIN(Name, Intr, Types, Accumulate, Feature) \
case PPC::BI__builtin_##Name:
#include "clang/Basic/BuiltinsPPC.def"
{
+ SmallVector<Value *, 4> Ops;
+ for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
+ if (E->getArg(i)->getType()->isArrayType())
+ Ops.push_back(EmitArrayToPointerDecay(E->getArg(i)).getPointer());
+ else
+ Ops.push_back(EmitScalarExpr(E->getArg(i)));
// The first argument of these two builtins is a pointer used to store their
// result. However, the llvm intrinsics return their result in multiple
// return values. So, here we emit code extracting these values from the
@@ -15765,7 +17439,7 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
Value *Vec = Builder.CreateLoad(Addr);
Value *Call = Builder.CreateCall(F, {Vec});
llvm::Type *VTy = llvm::FixedVectorType::get(Int8Ty, 16);
- Value *Ptr = Builder.CreateBitCast(Ops[0], VTy->getPointerTo());
+ Value *Ptr = Ops[0];
for (unsigned i=0; i<NumVecs; i++) {
Value *Vec = Builder.CreateExtractValue(Call, i);
llvm::ConstantInt* Index = llvm::ConstantInt::get(IntTy, i);
@@ -15774,9 +17448,20 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
}
return Call;
}
+ if (BuiltinID == PPC::BI__builtin_vsx_build_pair ||
+ BuiltinID == PPC::BI__builtin_mma_build_acc) {
+ // Reverse the order of the operands for LE, so the
+ // same builtin call can be used on both LE and BE
+ // without the need for the programmer to swap operands.
+ // The operands are reversed starting from the second argument,
+ // the first operand is the pointer to the pair/accumulator
+ // that is being built.
+ if (getTarget().isLittleEndian())
+ std::reverse(Ops.begin() + 1, Ops.end());
+ }
bool Accumulate;
switch (BuiltinID) {
- #define CUSTOM_BUILTIN(Name, Intr, Types, Acc) \
+ #define CUSTOM_BUILTIN(Name, Intr, Types, Acc, Feature) \
case PPC::BI__builtin_##Name: \
ID = Intrinsic::ppc_##Intr; \
Accumulate = Acc; \
@@ -15789,10 +17474,8 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
BuiltinID == PPC::BI__builtin_mma_stxvp) {
if (BuiltinID == PPC::BI__builtin_vsx_lxvp ||
BuiltinID == PPC::BI__builtin_mma_lxvp) {
- Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
Ops[0] = Builder.CreateGEP(Int8Ty, Ops[1], Ops[0]);
} else {
- Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy);
Ops[1] = Builder.CreateGEP(Int8Ty, Ops[2], Ops[1]);
}
Ops.pop_back();
@@ -15819,10 +17502,11 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
Value *OldVal = Builder.CreateLoad(OldValAddr);
QualType AtomicTy = E->getArg(0)->getType()->getPointeeType();
LValue LV = MakeAddrLValue(Addr, AtomicTy);
+ Value *Op2 = EmitScalarExpr(E->getArg(2));
auto Pair = EmitAtomicCompareExchange(
- LV, RValue::get(OldVal), RValue::get(Ops[2]), E->getExprLoc(),
+ LV, RValue::get(OldVal), RValue::get(Op2), E->getExprLoc(),
llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Monotonic, true);
- // Unlike c11's atomic_compare_exchange, accroding to
+ // Unlike c11's atomic_compare_exchange, according to
// https://www.ibm.com/docs/en/xl-c-and-cpp-aix/16.1?topic=functions-compare-swap-compare-swaplp
// > In either case, the contents of the memory location specified by addr
// > are copied into the memory location specified by old_val_addr.
@@ -15831,7 +17515,7 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
// store.
Value *LoadedVal = Pair.first.getScalarVal();
Builder.CreateStore(LoadedVal, OldValAddr);
- return Pair.second;
+ return Builder.CreateZExt(Pair.second, Builder.getInt32Ty());
}
case PPC::BI__builtin_ppc_fetch_and_add:
case PPC::BI__builtin_ppc_fetch_and_addlp: {
@@ -15860,38 +17544,45 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
case PPC::BI__builtin_ppc_lbarx:
return emitPPCLoadReserveIntrinsic(*this, BuiltinID, E);
case PPC::BI__builtin_ppc_mfspr: {
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
llvm::Type *RetType = CGM.getDataLayout().getTypeSizeInBits(VoidPtrTy) == 32
? Int32Ty
: Int64Ty;
Function *F = CGM.getIntrinsic(Intrinsic::ppc_mfspr, RetType);
- return Builder.CreateCall(F, Ops);
+ return Builder.CreateCall(F, {Op0});
}
case PPC::BI__builtin_ppc_mtspr: {
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
llvm::Type *RetType = CGM.getDataLayout().getTypeSizeInBits(VoidPtrTy) == 32
? Int32Ty
: Int64Ty;
Function *F = CGM.getIntrinsic(Intrinsic::ppc_mtspr, RetType);
- return Builder.CreateCall(F, Ops);
+ return Builder.CreateCall(F, {Op0, Op1});
}
case PPC::BI__builtin_ppc_popcntb: {
Value *ArgValue = EmitScalarExpr(E->getArg(0));
llvm::Type *ArgType = ArgValue->getType();
Function *F = CGM.getIntrinsic(Intrinsic::ppc_popcntb, {ArgType, ArgType});
- return Builder.CreateCall(F, Ops, "popcntb");
+ return Builder.CreateCall(F, {ArgValue}, "popcntb");
}
case PPC::BI__builtin_ppc_mtfsf: {
// The builtin takes a uint32 that needs to be cast to an
// f64 to be passed to the intrinsic.
- Value *Cast = Builder.CreateUIToFP(Ops[1], DoubleTy);
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
+ Value *Cast = Builder.CreateUIToFP(Op1, DoubleTy);
llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_mtfsf);
- return Builder.CreateCall(F, {Ops[0], Cast}, "");
+ return Builder.CreateCall(F, {Op0, Cast}, "");
}
case PPC::BI__builtin_ppc_swdiv_nochk:
case PPC::BI__builtin_ppc_swdivs_nochk: {
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
FastMathFlags FMF = Builder.getFastMathFlags();
Builder.getFastMathFlags().setFast();
- Value *FDiv = Builder.CreateFDiv(Ops[0], Ops[1], "swdiv_nochk");
+ Value *FDiv = Builder.CreateFDiv(Op0, Op1, "swdiv_nochk");
Builder.getFastMathFlags() &= (FMF);
return FDiv;
}
@@ -15930,6 +17621,72 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
*this, E, Intrinsic::sqrt,
Intrinsic::experimental_constrained_sqrt))
.getScalarVal();
+ case PPC::BI__builtin_ppc_test_data_class: {
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::ppc_test_data_class, Op0->getType()),
+ {Op0, Op1}, "test_data_class");
+ }
+ case PPC::BI__builtin_ppc_maxfe: {
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
+ Value *Op2 = EmitScalarExpr(E->getArg(2));
+ Value *Op3 = EmitScalarExpr(E->getArg(3));
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::ppc_maxfe),
+ {Op0, Op1, Op2, Op3});
+ }
+ case PPC::BI__builtin_ppc_maxfl: {
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
+ Value *Op2 = EmitScalarExpr(E->getArg(2));
+ Value *Op3 = EmitScalarExpr(E->getArg(3));
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::ppc_maxfl),
+ {Op0, Op1, Op2, Op3});
+ }
+ case PPC::BI__builtin_ppc_maxfs: {
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
+ Value *Op2 = EmitScalarExpr(E->getArg(2));
+ Value *Op3 = EmitScalarExpr(E->getArg(3));
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::ppc_maxfs),
+ {Op0, Op1, Op2, Op3});
+ }
+ case PPC::BI__builtin_ppc_minfe: {
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
+ Value *Op2 = EmitScalarExpr(E->getArg(2));
+ Value *Op3 = EmitScalarExpr(E->getArg(3));
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::ppc_minfe),
+ {Op0, Op1, Op2, Op3});
+ }
+ case PPC::BI__builtin_ppc_minfl: {
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
+ Value *Op2 = EmitScalarExpr(E->getArg(2));
+ Value *Op3 = EmitScalarExpr(E->getArg(3));
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::ppc_minfl),
+ {Op0, Op1, Op2, Op3});
+ }
+ case PPC::BI__builtin_ppc_minfs: {
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
+ Value *Op2 = EmitScalarExpr(E->getArg(2));
+ Value *Op3 = EmitScalarExpr(E->getArg(3));
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::ppc_minfs),
+ {Op0, Op1, Op2, Op3});
+ }
+ case PPC::BI__builtin_ppc_swdiv:
+ case PPC::BI__builtin_ppc_swdivs: {
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
+ return Builder.CreateFDiv(Op0, Op1, "swdiv");
+ }
+ case PPC::BI__builtin_ppc_set_fpscr_rn:
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::ppc_setrnd),
+ {EmitScalarExpr(E->getArg(0))});
+ case PPC::BI__builtin_ppc_mffs:
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::ppc_readflm));
}
}
@@ -15940,11 +17697,9 @@ Value *EmitAMDGPUDispatchPtr(CodeGenFunction &CGF,
const CallExpr *E = nullptr) {
auto *F = CGF.CGM.getIntrinsic(Intrinsic::amdgcn_dispatch_ptr);
auto *Call = CGF.Builder.CreateCall(F);
- Call->addAttribute(
- AttributeList::ReturnIndex,
+ Call->addRetAttr(
Attribute::getWithDereferenceableBytes(Call->getContext(), 64));
- Call->addAttribute(AttributeList::ReturnIndex,
- Attribute::getWithAlignment(Call->getContext(), Align(4)));
+ Call->addRetAttr(Attribute::getWithAlignment(Call->getContext(), Align(4)));
if (!E)
return Call;
QualType BuiltinRetType = E->getType();
@@ -15954,23 +17709,84 @@ Value *EmitAMDGPUDispatchPtr(CodeGenFunction &CGF,
return CGF.Builder.CreateAddrSpaceCast(Call, RetTy);
}
+Value *EmitAMDGPUImplicitArgPtr(CodeGenFunction &CGF) {
+ auto *F = CGF.CGM.getIntrinsic(Intrinsic::amdgcn_implicitarg_ptr);
+ auto *Call = CGF.Builder.CreateCall(F);
+ Call->addRetAttr(
+ Attribute::getWithDereferenceableBytes(Call->getContext(), 256));
+ Call->addRetAttr(Attribute::getWithAlignment(Call->getContext(), Align(8)));
+ return Call;
+}
+
// \p Index is 0, 1, and 2 for x, y, and z dimension, respectively.
+/// Emit code based on Code Object ABI version.
+/// COV_4 : Emit code to use dispatch ptr
+/// COV_5 : Emit code to use implicitarg ptr
+/// COV_NONE : Emit code to load a global variable "__oclc_ABI_version"
+/// and use its value for COV_4 or COV_5 approach. It is used for
+/// compiling device libraries in an ABI-agnostic way.
+///
+/// Note: "__oclc_ABI_version" is supposed to be emitted and intialized by
+/// clang during compilation of user code.
Value *EmitAMDGPUWorkGroupSize(CodeGenFunction &CGF, unsigned Index) {
- const unsigned XOffset = 4;
- auto *DP = EmitAMDGPUDispatchPtr(CGF);
- // Indexing the HSA kernel_dispatch_packet struct.
- auto *Offset = llvm::ConstantInt::get(CGF.Int32Ty, XOffset + Index * 2);
- auto *GEP = CGF.Builder.CreateGEP(CGF.Int8Ty, DP, Offset);
- auto *DstTy =
- CGF.Int16Ty->getPointerTo(GEP->getType()->getPointerAddressSpace());
- auto *Cast = CGF.Builder.CreateBitCast(GEP, DstTy);
- auto *LD = CGF.Builder.CreateLoad(Address(Cast, CharUnits::fromQuantity(2)));
+ llvm::LoadInst *LD;
+
+ auto Cov = CGF.getTarget().getTargetOpts().CodeObjectVersion;
+
+ if (Cov == CodeObjectVersionKind::COV_None) {
+ StringRef Name = "__oclc_ABI_version";
+ auto *ABIVersionC = CGF.CGM.getModule().getNamedGlobal(Name);
+ if (!ABIVersionC)
+ ABIVersionC = new llvm::GlobalVariable(
+ CGF.CGM.getModule(), CGF.Int32Ty, false,
+ llvm::GlobalValue::ExternalLinkage, nullptr, Name, nullptr,
+ llvm::GlobalVariable::NotThreadLocal,
+ CGF.CGM.getContext().getTargetAddressSpace(LangAS::opencl_constant));
+
+ // This load will be eliminated by the IPSCCP because it is constant
+ // weak_odr without externally_initialized. Either changing it to weak or
+ // adding externally_initialized will keep the load.
+ Value *ABIVersion = CGF.Builder.CreateAlignedLoad(CGF.Int32Ty, ABIVersionC,
+ CGF.CGM.getIntAlign());
+
+ Value *IsCOV5 = CGF.Builder.CreateICmpSGE(
+ ABIVersion,
+ llvm::ConstantInt::get(CGF.Int32Ty, CodeObjectVersionKind::COV_5));
+
+ // Indexing the implicit kernarg segment.
+ Value *ImplicitGEP = CGF.Builder.CreateConstGEP1_32(
+ CGF.Int8Ty, EmitAMDGPUImplicitArgPtr(CGF), 12 + Index * 2);
+
+ // Indexing the HSA kernel_dispatch_packet struct.
+ Value *DispatchGEP = CGF.Builder.CreateConstGEP1_32(
+ CGF.Int8Ty, EmitAMDGPUDispatchPtr(CGF), 4 + Index * 2);
+
+ auto Result = CGF.Builder.CreateSelect(IsCOV5, ImplicitGEP, DispatchGEP);
+ LD = CGF.Builder.CreateLoad(
+ Address(Result, CGF.Int16Ty, CharUnits::fromQuantity(2)));
+ } else {
+ Value *GEP = nullptr;
+ if (Cov == CodeObjectVersionKind::COV_5) {
+ // Indexing the implicit kernarg segment.
+ GEP = CGF.Builder.CreateConstGEP1_32(
+ CGF.Int8Ty, EmitAMDGPUImplicitArgPtr(CGF), 12 + Index * 2);
+ } else {
+ // Indexing the HSA kernel_dispatch_packet struct.
+ GEP = CGF.Builder.CreateConstGEP1_32(
+ CGF.Int8Ty, EmitAMDGPUDispatchPtr(CGF), 4 + Index * 2);
+ }
+ LD = CGF.Builder.CreateLoad(
+ Address(GEP, CGF.Int16Ty, CharUnits::fromQuantity(2)));
+ }
+
llvm::MDBuilder MDHelper(CGF.getLLVMContext());
llvm::MDNode *RNode = MDHelper.createRange(APInt(16, 1),
APInt(16, CGF.getTarget().getMaxOpenCLWorkGroupSize() + 1));
LD->setMetadata(llvm::LLVMContext::MD_range, RNode);
+ LD->setMetadata(llvm::LLVMContext::MD_noundef,
+ llvm::MDNode::get(CGF.getLLVMContext(), std::nullopt));
LD->setMetadata(llvm::LLVMContext::MD_invariant_load,
- llvm::MDNode::get(CGF.getLLVMContext(), None));
+ llvm::MDNode::get(CGF.getLLVMContext(), std::nullopt));
return LD;
}
@@ -15981,12 +17797,10 @@ Value *EmitAMDGPUGridSize(CodeGenFunction &CGF, unsigned Index) {
// Indexing the HSA kernel_dispatch_packet struct.
auto *Offset = llvm::ConstantInt::get(CGF.Int32Ty, XOffset + Index * 4);
auto *GEP = CGF.Builder.CreateGEP(CGF.Int8Ty, DP, Offset);
- auto *DstTy =
- CGF.Int32Ty->getPointerTo(GEP->getType()->getPointerAddressSpace());
- auto *Cast = CGF.Builder.CreateBitCast(GEP, DstTy);
- auto *LD = CGF.Builder.CreateLoad(Address(Cast, CharUnits::fromQuantity(4)));
+ auto *LD = CGF.Builder.CreateLoad(
+ Address(GEP, CGF.Int32Ty, CharUnits::fromQuantity(4)));
LD->setMetadata(llvm::LLVMContext::MD_invariant_load,
- llvm::MDNode::get(CGF.getLLVMContext(), None));
+ llvm::MDNode::get(CGF.getLLVMContext(), std::nullopt));
return LD;
}
} // namespace
@@ -15997,39 +17811,52 @@ Value *EmitAMDGPUGridSize(CodeGenFunction &CGF, unsigned Index) {
// it into LLVM's memory ordering specifier using atomic C ABI, and writes
// to \p AO. \p Scope takes a const char * and converts it into AMDGCN
// specific SyncScopeID and writes it to \p SSID.
-bool CodeGenFunction::ProcessOrderScopeAMDGCN(Value *Order, Value *Scope,
+void CodeGenFunction::ProcessOrderScopeAMDGCN(Value *Order, Value *Scope,
llvm::AtomicOrdering &AO,
llvm::SyncScope::ID &SSID) {
- if (isa<llvm::ConstantInt>(Order)) {
- int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
+ int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
+
+ // Map C11/C++11 memory ordering to LLVM memory ordering
+ assert(llvm::isValidAtomicOrderingCABI(ord));
+ switch (static_cast<llvm::AtomicOrderingCABI>(ord)) {
+ case llvm::AtomicOrderingCABI::acquire:
+ case llvm::AtomicOrderingCABI::consume:
+ AO = llvm::AtomicOrdering::Acquire;
+ break;
+ case llvm::AtomicOrderingCABI::release:
+ AO = llvm::AtomicOrdering::Release;
+ break;
+ case llvm::AtomicOrderingCABI::acq_rel:
+ AO = llvm::AtomicOrdering::AcquireRelease;
+ break;
+ case llvm::AtomicOrderingCABI::seq_cst:
+ AO = llvm::AtomicOrdering::SequentiallyConsistent;
+ break;
+ case llvm::AtomicOrderingCABI::relaxed:
+ AO = llvm::AtomicOrdering::Monotonic;
+ break;
+ }
- // Map C11/C++11 memory ordering to LLVM memory ordering
- assert(llvm::isValidAtomicOrderingCABI(ord));
- switch (static_cast<llvm::AtomicOrderingCABI>(ord)) {
- case llvm::AtomicOrderingCABI::acquire:
- case llvm::AtomicOrderingCABI::consume:
- AO = llvm::AtomicOrdering::Acquire;
- break;
- case llvm::AtomicOrderingCABI::release:
- AO = llvm::AtomicOrdering::Release;
- break;
- case llvm::AtomicOrderingCABI::acq_rel:
- AO = llvm::AtomicOrdering::AcquireRelease;
- break;
- case llvm::AtomicOrderingCABI::seq_cst:
- AO = llvm::AtomicOrdering::SequentiallyConsistent;
- break;
- case llvm::AtomicOrderingCABI::relaxed:
- AO = llvm::AtomicOrdering::Monotonic;
- break;
- }
+ StringRef scp;
+ llvm::getConstantStringInfo(Scope, scp);
+ SSID = getLLVMContext().getOrInsertSyncScopeID(scp);
+}
- StringRef scp;
- llvm::getConstantStringInfo(Scope, scp);
- SSID = getLLVMContext().getOrInsertSyncScopeID(scp);
- return true;
- }
- return false;
+llvm::Value *CodeGenFunction::EmitScalarOrConstFoldImmArg(unsigned ICEArguments,
+ unsigned Idx,
+ const CallExpr *E) {
+ llvm::Value *Arg = nullptr;
+ if ((ICEArguments & (1 << Idx)) == 0) {
+ Arg = EmitScalarExpr(E->getArg(Idx));
+ } else {
+ // If this is required to be a constant, constant fold it so that we
+ // know that the generated intrinsic gets a ConstantInt.
+ std::optional<llvm::APSInt> Result =
+ E->getArg(Idx)->getIntegerConstantExpr(getContext());
+ assert(Result && "Expected argument to be a constant");
+ Arg = llvm::ConstantInt::get(getLLVMContext(), *Result);
+ }
+ return Arg;
}
Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
@@ -16056,8 +17883,7 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
llvm::Value *Result = Builder.CreateExtractValue(Tmp, 0);
llvm::Value *Flag = Builder.CreateExtractValue(Tmp, 1);
- llvm::Type *RealFlagType
- = FlagOutPtr.getPointer()->getType()->getPointerElementType();
+ llvm::Type *RealFlagType = FlagOutPtr.getElementType();
llvm::Value *FlagExt = Builder.CreateZExt(Flag, RealFlagType);
Builder.CreateStore(FlagExt, FlagOutPtr);
@@ -16083,11 +17909,18 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
case AMDGPU::BI__builtin_amdgcn_mov_dpp:
case AMDGPU::BI__builtin_amdgcn_update_dpp: {
llvm::SmallVector<llvm::Value *, 6> Args;
- for (unsigned I = 0; I != E->getNumArgs(); ++I)
- Args.push_back(EmitScalarExpr(E->getArg(I)));
+ // Find out if any arguments are required to be integer constant
+ // expressions.
+ unsigned ICEArguments = 0;
+ ASTContext::GetBuiltinTypeError Error;
+ getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
+ assert(Error == ASTContext::GE_None && "Should not codegen an error");
+ for (unsigned I = 0; I != E->getNumArgs(); ++I) {
+ Args.push_back(EmitScalarOrConstFoldImmArg(ICEArguments, I, E));
+ }
assert(Args.size() == 5 || Args.size() == 6);
if (Args.size() == 5)
- Args.insert(Args.begin(), llvm::UndefValue::get(Args[0]->getType()));
+ Args.insert(Args.begin(), llvm::PoisonValue::get(Args[0]->getType()));
Function *F =
CGM.getIntrinsic(Intrinsic::amdgcn_update_dpp, Args[0]->getType());
return Builder.CreateCall(F, Args);
@@ -16122,12 +17955,29 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_cos);
case AMDGPU::BI__builtin_amdgcn_dispatch_ptr:
return EmitAMDGPUDispatchPtr(*this, E);
+ case AMDGPU::BI__builtin_amdgcn_logf:
+ return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_log);
+ case AMDGPU::BI__builtin_amdgcn_exp2f:
+ return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_exp2);
case AMDGPU::BI__builtin_amdgcn_log_clampf:
return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_log_clamp);
case AMDGPU::BI__builtin_amdgcn_ldexp:
- case AMDGPU::BI__builtin_amdgcn_ldexpf:
- case AMDGPU::BI__builtin_amdgcn_ldexph:
- return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_ldexp);
+ case AMDGPU::BI__builtin_amdgcn_ldexpf: {
+ llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
+ llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
+ llvm::Function *F =
+ CGM.getIntrinsic(Intrinsic::ldexp, {Src0->getType(), Src1->getType()});
+ return Builder.CreateCall(F, {Src0, Src1});
+ }
+ case AMDGPU::BI__builtin_amdgcn_ldexph: {
+ // The raw instruction has a different behavior for out of bounds exponent
+ // values (implicit truncation instead of saturate to short_min/short_max).
+ llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
+ llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
+ llvm::Function *F =
+ CGM.getIntrinsic(Intrinsic::ldexp, {Src0->getType(), Int16Ty});
+ return Builder.CreateCall(F, {Src0, Builder.CreateTrunc(Src1, Int16Ty)});
+ }
case AMDGPU::BI__builtin_amdgcn_frexp_mant:
case AMDGPU::BI__builtin_amdgcn_frexp_mantf:
case AMDGPU::BI__builtin_amdgcn_frexp_manth:
@@ -16155,6 +18005,13 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_ubfe);
case AMDGPU::BI__builtin_amdgcn_sbfe:
return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_sbfe);
+ case AMDGPU::BI__builtin_amdgcn_ballot_w32:
+ case AMDGPU::BI__builtin_amdgcn_ballot_w64: {
+ llvm::Type *ResultType = ConvertType(E->getType());
+ llvm::Value *Src = EmitScalarExpr(E->getArg(0));
+ Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_ballot, { ResultType });
+ return Builder.CreateCall(F, { Src });
+ }
case AMDGPU::BI__builtin_amdgcn_uicmp:
case AMDGPU::BI__builtin_amdgcn_uicmpl:
case AMDGPU::BI__builtin_amdgcn_sicmp:
@@ -16220,21 +18077,152 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
Src0 = Builder.CreatePointerBitCastOrAddrSpaceCast(Src0, PTy);
return Builder.CreateCall(F, { Src0, Src1, Src2, Src3, Src4 });
}
- case AMDGPU::BI__builtin_amdgcn_read_exec: {
- CallInst *CI = cast<CallInst>(
- EmitSpecialRegisterBuiltin(*this, E, Int64Ty, Int64Ty, NormalRead, "exec"));
- CI->setConvergent();
- return CI;
+ case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_f64:
+ case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_f32:
+ case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_v2f16:
+ case AMDGPU::BI__builtin_amdgcn_global_atomic_fmin_f64:
+ case AMDGPU::BI__builtin_amdgcn_global_atomic_fmax_f64:
+ case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_f64:
+ case AMDGPU::BI__builtin_amdgcn_flat_atomic_fmin_f64:
+ case AMDGPU::BI__builtin_amdgcn_flat_atomic_fmax_f64:
+ case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_f32:
+ case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_v2f16: {
+ Intrinsic::ID IID;
+ llvm::Type *ArgTy = llvm::Type::getDoubleTy(getLLVMContext());
+ switch (BuiltinID) {
+ case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_f32:
+ ArgTy = llvm::Type::getFloatTy(getLLVMContext());
+ IID = Intrinsic::amdgcn_global_atomic_fadd;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_v2f16:
+ ArgTy = llvm::FixedVectorType::get(
+ llvm::Type::getHalfTy(getLLVMContext()), 2);
+ IID = Intrinsic::amdgcn_global_atomic_fadd;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_f64:
+ IID = Intrinsic::amdgcn_global_atomic_fadd;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_global_atomic_fmin_f64:
+ IID = Intrinsic::amdgcn_global_atomic_fmin;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_global_atomic_fmax_f64:
+ IID = Intrinsic::amdgcn_global_atomic_fmax;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_f64:
+ IID = Intrinsic::amdgcn_flat_atomic_fadd;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_flat_atomic_fmin_f64:
+ IID = Intrinsic::amdgcn_flat_atomic_fmin;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_flat_atomic_fmax_f64:
+ IID = Intrinsic::amdgcn_flat_atomic_fmax;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_f32:
+ ArgTy = llvm::Type::getFloatTy(getLLVMContext());
+ IID = Intrinsic::amdgcn_flat_atomic_fadd;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_v2f16:
+ ArgTy = llvm::FixedVectorType::get(
+ llvm::Type::getHalfTy(getLLVMContext()), 2);
+ IID = Intrinsic::amdgcn_flat_atomic_fadd;
+ break;
+ }
+ llvm::Value *Addr = EmitScalarExpr(E->getArg(0));
+ llvm::Value *Val = EmitScalarExpr(E->getArg(1));
+ llvm::Function *F =
+ CGM.getIntrinsic(IID, {ArgTy, Addr->getType(), Val->getType()});
+ return Builder.CreateCall(F, {Addr, Val});
}
- case AMDGPU::BI__builtin_amdgcn_read_exec_lo:
- case AMDGPU::BI__builtin_amdgcn_read_exec_hi: {
- StringRef RegName = BuiltinID == AMDGPU::BI__builtin_amdgcn_read_exec_lo ?
- "exec_lo" : "exec_hi";
- CallInst *CI = cast<CallInst>(
- EmitSpecialRegisterBuiltin(*this, E, Int32Ty, Int32Ty, NormalRead, RegName));
- CI->setConvergent();
- return CI;
+ case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_v2bf16:
+ case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_v2bf16: {
+ Intrinsic::ID IID;
+ switch (BuiltinID) {
+ case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_v2bf16:
+ IID = Intrinsic::amdgcn_global_atomic_fadd_v2bf16;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_v2bf16:
+ IID = Intrinsic::amdgcn_flat_atomic_fadd_v2bf16;
+ break;
+ }
+ llvm::Value *Addr = EmitScalarExpr(E->getArg(0));
+ llvm::Value *Val = EmitScalarExpr(E->getArg(1));
+ llvm::Function *F = CGM.getIntrinsic(IID, {Addr->getType()});
+ return Builder.CreateCall(F, {Addr, Val});
+ }
+ case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f64:
+ case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f32:
+ case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_v2f16: {
+ Intrinsic::ID IID;
+ llvm::Type *ArgTy;
+ switch (BuiltinID) {
+ case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f32:
+ ArgTy = llvm::Type::getFloatTy(getLLVMContext());
+ IID = Intrinsic::amdgcn_ds_fadd;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f64:
+ ArgTy = llvm::Type::getDoubleTy(getLLVMContext());
+ IID = Intrinsic::amdgcn_ds_fadd;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_v2f16:
+ ArgTy = llvm::FixedVectorType::get(
+ llvm::Type::getHalfTy(getLLVMContext()), 2);
+ IID = Intrinsic::amdgcn_ds_fadd;
+ break;
+ }
+ llvm::Value *Addr = EmitScalarExpr(E->getArg(0));
+ llvm::Value *Val = EmitScalarExpr(E->getArg(1));
+ llvm::Constant *ZeroI32 = llvm::ConstantInt::getIntegerValue(
+ llvm::Type::getInt32Ty(getLLVMContext()), APInt(32, 0, true));
+ llvm::Constant *ZeroI1 = llvm::ConstantInt::getIntegerValue(
+ llvm::Type::getInt1Ty(getLLVMContext()), APInt(1, 0));
+ llvm::Function *F = CGM.getIntrinsic(IID, {ArgTy});
+ return Builder.CreateCall(F, {Addr, Val, ZeroI32, ZeroI32, ZeroI1});
+ }
+ case AMDGPU::BI__builtin_amdgcn_global_load_tr_i32:
+ case AMDGPU::BI__builtin_amdgcn_global_load_tr_v2i32:
+ case AMDGPU::BI__builtin_amdgcn_global_load_tr_v4f16:
+ case AMDGPU::BI__builtin_amdgcn_global_load_tr_v4i16:
+ case AMDGPU::BI__builtin_amdgcn_global_load_tr_v8f16:
+ case AMDGPU::BI__builtin_amdgcn_global_load_tr_v8i16: {
+
+ llvm::Type *ArgTy;
+ switch (BuiltinID) {
+ case AMDGPU::BI__builtin_amdgcn_global_load_tr_i32:
+ ArgTy = llvm::Type::getInt32Ty(getLLVMContext());
+ break;
+ case AMDGPU::BI__builtin_amdgcn_global_load_tr_v2i32:
+ ArgTy = llvm::FixedVectorType::get(
+ llvm::Type::getInt32Ty(getLLVMContext()), 2);
+ break;
+ case AMDGPU::BI__builtin_amdgcn_global_load_tr_v4f16:
+ ArgTy = llvm::FixedVectorType::get(
+ llvm::Type::getHalfTy(getLLVMContext()), 4);
+ break;
+ case AMDGPU::BI__builtin_amdgcn_global_load_tr_v4i16:
+ ArgTy = llvm::FixedVectorType::get(
+ llvm::Type::getInt16Ty(getLLVMContext()), 4);
+ break;
+ case AMDGPU::BI__builtin_amdgcn_global_load_tr_v8f16:
+ ArgTy = llvm::FixedVectorType::get(
+ llvm::Type::getHalfTy(getLLVMContext()), 8);
+ break;
+ case AMDGPU::BI__builtin_amdgcn_global_load_tr_v8i16:
+ ArgTy = llvm::FixedVectorType::get(
+ llvm::Type::getInt16Ty(getLLVMContext()), 8);
+ break;
+ }
+
+ llvm::Value *Addr = EmitScalarExpr(E->getArg(0));
+ llvm::Function *F =
+ CGM.getIntrinsic(Intrinsic::amdgcn_global_load_tr, {ArgTy});
+ return Builder.CreateCall(F, {Addr});
}
+ case AMDGPU::BI__builtin_amdgcn_read_exec:
+ return EmitAMDGCNBallotForExec(*this, E, Int64Ty, Int64Ty, false);
+ case AMDGPU::BI__builtin_amdgcn_read_exec_lo:
+ return EmitAMDGCNBallotForExec(*this, E, Int32Ty, Int32Ty, false);
+ case AMDGPU::BI__builtin_amdgcn_read_exec_hi:
+ return EmitAMDGCNBallotForExec(*this, E, Int64Ty, Int64Ty, true);
case AMDGPU::BI__builtin_amdgcn_image_bvh_intersect_ray:
case AMDGPU::BI__builtin_amdgcn_image_bvh_intersect_ray_h:
case AMDGPU::BI__builtin_amdgcn_image_bvh_intersect_ray_l:
@@ -16246,12 +18234,264 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
llvm::Value *RayInverseDir = EmitScalarExpr(E->getArg(4));
llvm::Value *TextureDescr = EmitScalarExpr(E->getArg(5));
+ // The builtins take these arguments as vec4 where the last element is
+ // ignored. The intrinsic takes them as vec3.
+ RayOrigin = Builder.CreateShuffleVector(RayOrigin, RayOrigin,
+ ArrayRef<int>{0, 1, 2});
+ RayDir =
+ Builder.CreateShuffleVector(RayDir, RayDir, ArrayRef<int>{0, 1, 2});
+ RayInverseDir = Builder.CreateShuffleVector(RayInverseDir, RayInverseDir,
+ ArrayRef<int>{0, 1, 2});
+
Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_image_bvh_intersect_ray,
{NodePtr->getType(), RayDir->getType()});
return Builder.CreateCall(F, {NodePtr, RayExtent, RayOrigin, RayDir,
RayInverseDir, TextureDescr});
}
+ case AMDGPU::BI__builtin_amdgcn_ds_bvh_stack_rtn: {
+ SmallVector<Value *, 4> Args;
+ for (int i = 0, e = E->getNumArgs(); i != e; ++i)
+ Args.push_back(EmitScalarExpr(E->getArg(i)));
+
+ Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_ds_bvh_stack_rtn);
+ Value *Call = Builder.CreateCall(F, Args);
+ Value *Rtn = Builder.CreateExtractValue(Call, 0);
+ Value *A = Builder.CreateExtractValue(Call, 1);
+ llvm::Type *RetTy = ConvertType(E->getType());
+ Value *I0 = Builder.CreateInsertElement(PoisonValue::get(RetTy), Rtn,
+ (uint64_t)0);
+ return Builder.CreateInsertElement(I0, A, 1);
+ }
+
+ case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w32:
+ case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_tied_w32:
+ case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w64:
+ case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_tied_w64:
+ case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_w32:
+ case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_tied_w32:
+ case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_w64:
+ case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_tied_w64:
+ case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf16_w32:
+ case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf16_w64:
+ case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_f16_w32:
+ case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_f16_w64:
+ case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu4_w32:
+ case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu4_w64:
+ case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu8_w32:
+ case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu8_w64:
+ case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w32_gfx12:
+ case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w64_gfx12:
+ case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_w32_gfx12:
+ case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_w64_gfx12:
+ case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf16_w32_gfx12:
+ case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf16_w64_gfx12:
+ case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_f16_w32_gfx12:
+ case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_f16_w64_gfx12:
+ case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu4_w32_gfx12:
+ case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu4_w64_gfx12:
+ case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu8_w32_gfx12:
+ case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu8_w64_gfx12:
+ case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_fp8_fp8_w32_gfx12:
+ case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_fp8_fp8_w64_gfx12:
+ case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_fp8_bf8_w32_gfx12:
+ case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_fp8_bf8_w64_gfx12:
+ case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf8_fp8_w32_gfx12:
+ case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf8_fp8_w64_gfx12:
+ case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf8_bf8_w32_gfx12:
+ case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf8_bf8_w64_gfx12:
+ case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x32_iu4_w32_gfx12:
+ case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x32_iu4_w64_gfx12:
+ case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_f16_w32:
+ case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_f16_w64:
+ case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_bf16_w32:
+ case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_bf16_w64:
+ case AMDGPU::BI__builtin_amdgcn_swmmac_f16_16x16x32_f16_w32:
+ case AMDGPU::BI__builtin_amdgcn_swmmac_f16_16x16x32_f16_w64:
+ case AMDGPU::BI__builtin_amdgcn_swmmac_bf16_16x16x32_bf16_w32:
+ case AMDGPU::BI__builtin_amdgcn_swmmac_bf16_16x16x32_bf16_w64:
+ case AMDGPU::BI__builtin_amdgcn_swmmac_i32_16x16x32_iu8_w32:
+ case AMDGPU::BI__builtin_amdgcn_swmmac_i32_16x16x32_iu8_w64:
+ case AMDGPU::BI__builtin_amdgcn_swmmac_i32_16x16x32_iu4_w32:
+ case AMDGPU::BI__builtin_amdgcn_swmmac_i32_16x16x32_iu4_w64:
+ case AMDGPU::BI__builtin_amdgcn_swmmac_i32_16x16x64_iu4_w32:
+ case AMDGPU::BI__builtin_amdgcn_swmmac_i32_16x16x64_iu4_w64:
+ case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_fp8_fp8_w32:
+ case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_fp8_fp8_w64:
+ case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_fp8_bf8_w32:
+ case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_fp8_bf8_w64:
+ case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_bf8_fp8_w32:
+ case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_bf8_fp8_w64:
+ case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_bf8_bf8_w32:
+ case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_bf8_bf8_w64: {
+
+ // These operations perform a matrix multiplication and accumulation of
+ // the form:
+ // D = A * B + C
+ // We need to specify one type for matrices AB and one for matrices CD.
+ // Sparse matrix operations can have different types for A and B as well as
+ // an additional type for sparsity index.
+ // Destination type should be put before types used for source operands.
+ SmallVector<unsigned, 2> ArgsForMatchingMatrixTypes;
+ // On GFX12, the intrinsics with 16-bit accumulator use a packed layout.
+ // There is no need for the variable opsel argument, so always set it to
+ // "false".
+ bool AppendFalseForOpselArg = false;
+ unsigned BuiltinWMMAOp;
+
+ switch (BuiltinID) {
+ case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_f16_w32:
+ case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_f16_w64:
+ case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_f16_w32_gfx12:
+ case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_f16_w64_gfx12:
+ ArgsForMatchingMatrixTypes = {2, 0}; // CD, AB
+ BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f32_16x16x16_f16;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf16_w32:
+ case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf16_w64:
+ case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf16_w32_gfx12:
+ case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf16_w64_gfx12:
+ ArgsForMatchingMatrixTypes = {2, 0}; // CD, AB
+ BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f32_16x16x16_bf16;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_w32_gfx12:
+ case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_w64_gfx12:
+ AppendFalseForOpselArg = true;
+ LLVM_FALLTHROUGH;
+ case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_w32:
+ case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_w64:
+ ArgsForMatchingMatrixTypes = {2, 0}; // CD, AB
+ BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f16_16x16x16_f16;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w32_gfx12:
+ case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w64_gfx12:
+ AppendFalseForOpselArg = true;
+ LLVM_FALLTHROUGH;
+ case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w32:
+ case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w64:
+ ArgsForMatchingMatrixTypes = {2, 0}; // CD, AB
+ BuiltinWMMAOp = Intrinsic::amdgcn_wmma_bf16_16x16x16_bf16;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_tied_w32:
+ case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_tied_w64:
+ ArgsForMatchingMatrixTypes = {2, 0}; // CD, AB
+ BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f16_16x16x16_f16_tied;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_tied_w32:
+ case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_tied_w64:
+ ArgsForMatchingMatrixTypes = {2, 0}; // CD, AB
+ BuiltinWMMAOp = Intrinsic::amdgcn_wmma_bf16_16x16x16_bf16_tied;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu8_w32:
+ case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu8_w64:
+ case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu8_w32_gfx12:
+ case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu8_w64_gfx12:
+ ArgsForMatchingMatrixTypes = {4, 1}; // CD, AB
+ BuiltinWMMAOp = Intrinsic::amdgcn_wmma_i32_16x16x16_iu8;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu4_w32:
+ case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu4_w64:
+ case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu4_w32_gfx12:
+ case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu4_w64_gfx12:
+ ArgsForMatchingMatrixTypes = {4, 1}; // CD, AB
+ BuiltinWMMAOp = Intrinsic::amdgcn_wmma_i32_16x16x16_iu4;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_fp8_fp8_w32_gfx12:
+ case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_fp8_fp8_w64_gfx12:
+ ArgsForMatchingMatrixTypes = {2, 0}; // CD, AB
+ BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f32_16x16x16_fp8_fp8;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_fp8_bf8_w32_gfx12:
+ case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_fp8_bf8_w64_gfx12:
+ ArgsForMatchingMatrixTypes = {2, 0}; // CD, AB
+ BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f32_16x16x16_fp8_bf8;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf8_fp8_w32_gfx12:
+ case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf8_fp8_w64_gfx12:
+ ArgsForMatchingMatrixTypes = {2, 0}; // CD, AB
+ BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f32_16x16x16_bf8_fp8;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf8_bf8_w32_gfx12:
+ case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf8_bf8_w64_gfx12:
+ ArgsForMatchingMatrixTypes = {2, 0}; // CD, AB
+ BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f32_16x16x16_bf8_bf8;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x32_iu4_w32_gfx12:
+ case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x32_iu4_w64_gfx12:
+ ArgsForMatchingMatrixTypes = {4, 1}; // CD, AB
+ BuiltinWMMAOp = Intrinsic::amdgcn_wmma_i32_16x16x32_iu4;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_f16_w32:
+ case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_f16_w64:
+ ArgsForMatchingMatrixTypes = {2, 0, 1, 3}; // CD, A, B, Index
+ BuiltinWMMAOp = Intrinsic::amdgcn_swmmac_f32_16x16x32_f16;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_bf16_w32:
+ case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_bf16_w64:
+ ArgsForMatchingMatrixTypes = {2, 0, 1, 3}; // CD, A, B, Index
+ BuiltinWMMAOp = Intrinsic::amdgcn_swmmac_f32_16x16x32_bf16;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_swmmac_f16_16x16x32_f16_w32:
+ case AMDGPU::BI__builtin_amdgcn_swmmac_f16_16x16x32_f16_w64:
+ ArgsForMatchingMatrixTypes = {2, 0, 1, 3}; // CD, A, B, Index
+ BuiltinWMMAOp = Intrinsic::amdgcn_swmmac_f16_16x16x32_f16;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_swmmac_bf16_16x16x32_bf16_w32:
+ case AMDGPU::BI__builtin_amdgcn_swmmac_bf16_16x16x32_bf16_w64:
+ ArgsForMatchingMatrixTypes = {2, 0, 1, 3}; // CD, A, B, Index
+ BuiltinWMMAOp = Intrinsic::amdgcn_swmmac_bf16_16x16x32_bf16;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_swmmac_i32_16x16x32_iu8_w32:
+ case AMDGPU::BI__builtin_amdgcn_swmmac_i32_16x16x32_iu8_w64:
+ ArgsForMatchingMatrixTypes = {4, 1, 3, 5}; // CD, A, B, Index
+ BuiltinWMMAOp = Intrinsic::amdgcn_swmmac_i32_16x16x32_iu8;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_swmmac_i32_16x16x32_iu4_w32:
+ case AMDGPU::BI__builtin_amdgcn_swmmac_i32_16x16x32_iu4_w64:
+ ArgsForMatchingMatrixTypes = {4, 1, 3, 5}; // CD, A, B, Index
+ BuiltinWMMAOp = Intrinsic::amdgcn_swmmac_i32_16x16x32_iu4;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_swmmac_i32_16x16x64_iu4_w32:
+ case AMDGPU::BI__builtin_amdgcn_swmmac_i32_16x16x64_iu4_w64:
+ ArgsForMatchingMatrixTypes = {4, 1, 3, 5}; // CD, A, B, Index
+ BuiltinWMMAOp = Intrinsic::amdgcn_swmmac_i32_16x16x64_iu4;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_fp8_fp8_w32:
+ case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_fp8_fp8_w64:
+ ArgsForMatchingMatrixTypes = {2, 0, 1, 3}; // CD, A, B, Index
+ BuiltinWMMAOp = Intrinsic::amdgcn_swmmac_f32_16x16x32_fp8_fp8;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_fp8_bf8_w32:
+ case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_fp8_bf8_w64:
+ ArgsForMatchingMatrixTypes = {2, 0, 1, 3}; // CD, A, B, Index
+ BuiltinWMMAOp = Intrinsic::amdgcn_swmmac_f32_16x16x32_fp8_bf8;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_bf8_fp8_w32:
+ case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_bf8_fp8_w64:
+ ArgsForMatchingMatrixTypes = {2, 0, 1, 3}; // CD, A, B, Index
+ BuiltinWMMAOp = Intrinsic::amdgcn_swmmac_f32_16x16x32_bf8_fp8;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_bf8_bf8_w32:
+ case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x32_bf8_bf8_w64:
+ ArgsForMatchingMatrixTypes = {2, 0, 1, 3}; // CD, A, B, Index
+ BuiltinWMMAOp = Intrinsic::amdgcn_swmmac_f32_16x16x32_bf8_bf8;
+ break;
+ }
+
+ SmallVector<Value *, 6> Args;
+ for (int i = 0, e = E->getNumArgs(); i != e; ++i)
+ Args.push_back(EmitScalarExpr(E->getArg(i)));
+ if (AppendFalseForOpselArg)
+ Args.push_back(Builder.getFalse());
+
+ SmallVector<llvm::Type *, 6> ArgTypes;
+ for (auto ArgIdx : ArgsForMatchingMatrixTypes)
+ ArgTypes.push_back(Args[ArgIdx]->getType());
+
+ Function *F = CGM.getIntrinsic(BuiltinWMMAOp, ArgTypes);
+ return Builder.CreateCall(F, Args);
+ }
+
// amdgcn workitem
case AMDGPU::BI__builtin_amdgcn_workitem_id_x:
return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_x, 0, 1024);
@@ -16293,53 +18533,51 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
Function *F = CGM.getIntrinsic(Intrinsic::fshr, Src0->getType());
return Builder.CreateCall(F, { Src0, Src1, Src2 });
}
-
case AMDGPU::BI__builtin_amdgcn_fence: {
- if (ProcessOrderScopeAMDGCN(EmitScalarExpr(E->getArg(0)),
- EmitScalarExpr(E->getArg(1)), AO, SSID))
- return Builder.CreateFence(AO, SSID);
- LLVM_FALLTHROUGH;
+ ProcessOrderScopeAMDGCN(EmitScalarExpr(E->getArg(0)),
+ EmitScalarExpr(E->getArg(1)), AO, SSID);
+ return Builder.CreateFence(AO, SSID);
}
case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
case AMDGPU::BI__builtin_amdgcn_atomic_dec64: {
- unsigned BuiltinAtomicOp;
- llvm::Type *ResultType = ConvertType(E->getType());
-
+ llvm::AtomicRMWInst::BinOp BinOp;
switch (BuiltinID) {
case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
- BuiltinAtomicOp = Intrinsic::amdgcn_atomic_inc;
+ BinOp = llvm::AtomicRMWInst::UIncWrap;
break;
case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
case AMDGPU::BI__builtin_amdgcn_atomic_dec64:
- BuiltinAtomicOp = Intrinsic::amdgcn_atomic_dec;
+ BinOp = llvm::AtomicRMWInst::UDecWrap;
break;
}
- Value *Ptr = EmitScalarExpr(E->getArg(0));
+ Address Ptr = CheckAtomicAlignment(*this, E);
Value *Val = EmitScalarExpr(E->getArg(1));
- llvm::Function *F =
- CGM.getIntrinsic(BuiltinAtomicOp, {ResultType, Ptr->getType()});
-
- if (ProcessOrderScopeAMDGCN(EmitScalarExpr(E->getArg(2)),
- EmitScalarExpr(E->getArg(3)), AO, SSID)) {
+ ProcessOrderScopeAMDGCN(EmitScalarExpr(E->getArg(2)),
+ EmitScalarExpr(E->getArg(3)), AO, SSID);
- // llvm.amdgcn.atomic.inc and llvm.amdgcn.atomic.dec expects ordering and
- // scope as unsigned values
- Value *MemOrder = Builder.getInt32(static_cast<int>(AO));
- Value *MemScope = Builder.getInt32(static_cast<int>(SSID));
-
- QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
- bool Volatile =
- PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
- Value *IsVolatile = Builder.getInt1(static_cast<bool>(Volatile));
+ QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
+ bool Volatile =
+ PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
- return Builder.CreateCall(F, {Ptr, Val, MemOrder, MemScope, IsVolatile});
- }
- LLVM_FALLTHROUGH;
+ llvm::AtomicRMWInst *RMW =
+ Builder.CreateAtomicRMW(BinOp, Ptr, Val, AO, SSID);
+ if (Volatile)
+ RMW->setVolatile(true);
+ return RMW;
+ }
+ case AMDGPU::BI__builtin_amdgcn_s_sendmsg_rtn:
+ case AMDGPU::BI__builtin_amdgcn_s_sendmsg_rtnl: {
+ llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
+ llvm::Type *ResultType = ConvertType(E->getType());
+ // s_sendmsg_rtn is mangled using return type only.
+ Function *F =
+ CGM.getIntrinsic(Intrinsic::amdgcn_s_sendmsg_rtn, {ResultType});
+ return Builder.CreateCall(F, {Arg});
}
default:
return nullptr;
@@ -16434,6 +18672,32 @@ Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(F, {X, Undef});
}
+ case SystemZ::BI__builtin_s390_verllb:
+ case SystemZ::BI__builtin_s390_verllh:
+ case SystemZ::BI__builtin_s390_verllf:
+ case SystemZ::BI__builtin_s390_verllg: {
+ llvm::Type *ResultType = ConvertType(E->getType());
+ llvm::Value *Src = EmitScalarExpr(E->getArg(0));
+ llvm::Value *Amt = EmitScalarExpr(E->getArg(1));
+ // Splat scalar rotate amount to vector type.
+ unsigned NumElts = cast<llvm::FixedVectorType>(ResultType)->getNumElements();
+ Amt = Builder.CreateIntCast(Amt, ResultType->getScalarType(), false);
+ Amt = Builder.CreateVectorSplat(NumElts, Amt);
+ Function *F = CGM.getIntrinsic(Intrinsic::fshl, ResultType);
+ return Builder.CreateCall(F, { Src, Src, Amt });
+ }
+
+ case SystemZ::BI__builtin_s390_verllvb:
+ case SystemZ::BI__builtin_s390_verllvh:
+ case SystemZ::BI__builtin_s390_verllvf:
+ case SystemZ::BI__builtin_s390_verllvg: {
+ llvm::Type *ResultType = ConvertType(E->getType());
+ llvm::Value *Src = EmitScalarExpr(E->getArg(0));
+ llvm::Value *Amt = EmitScalarExpr(E->getArg(1));
+ Function *F = CGM.getIntrinsic(Intrinsic::fshl, ResultType);
+ return Builder.CreateCall(F, { Src, Src, Amt });
+ }
+
case SystemZ::BI__builtin_s390_vfsqsb:
case SystemZ::BI__builtin_s390_vfsqdb: {
llvm::Type *ResultType = ConvertType(E->getType());
@@ -16858,7 +19122,7 @@ static NVPTXMmaLdstInfo getNVPTXMmaLdstInfo(unsigned BuiltinID) {
case NVPTX::BI__mma_tf32_m16n16k8_ld_a:
return MMA_LDST(4, m16n16k8_load_a_tf32);
case NVPTX::BI__mma_tf32_m16n16k8_ld_b:
- return MMA_LDST(2, m16n16k8_load_b_tf32);
+ return MMA_LDST(4, m16n16k8_load_b_tf32);
case NVPTX::BI__mma_tf32_m16n16k8_ld_c:
return MMA_LDST(8, m16n16k8_load_c_f32);
@@ -17049,26 +19313,76 @@ static NVPTXMmaInfo getNVPTXMmaInfo(unsigned BuiltinID) {
#undef MMA_VARIANTS_B1_XOR
}
+static Value *MakeLdgLdu(unsigned IntrinsicID, CodeGenFunction &CGF,
+ const CallExpr *E) {
+ Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
+ QualType ArgType = E->getArg(0)->getType();
+ clang::CharUnits Align = CGF.CGM.getNaturalPointeeTypeAlignment(ArgType);
+ llvm::Type *ElemTy = CGF.ConvertTypeForMem(ArgType->getPointeeType());
+ return CGF.Builder.CreateCall(
+ CGF.CGM.getIntrinsic(IntrinsicID, {ElemTy, Ptr->getType()}),
+ {Ptr, ConstantInt::get(CGF.Builder.getInt32Ty(), Align.getQuantity())});
+}
+
+static Value *MakeScopedAtomic(unsigned IntrinsicID, CodeGenFunction &CGF,
+ const CallExpr *E) {
+ Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
+ llvm::Type *ElemTy =
+ CGF.ConvertTypeForMem(E->getArg(0)->getType()->getPointeeType());
+ return CGF.Builder.CreateCall(
+ CGF.CGM.getIntrinsic(IntrinsicID, {ElemTy, Ptr->getType()}),
+ {Ptr, CGF.EmitScalarExpr(E->getArg(1))});
+}
+
+static Value *MakeCpAsync(unsigned IntrinsicID, unsigned IntrinsicIDS,
+ CodeGenFunction &CGF, const CallExpr *E,
+ int SrcSize) {
+ return E->getNumArgs() == 3
+ ? CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IntrinsicIDS),
+ {CGF.EmitScalarExpr(E->getArg(0)),
+ CGF.EmitScalarExpr(E->getArg(1)),
+ CGF.EmitScalarExpr(E->getArg(2))})
+ : CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IntrinsicID),
+ {CGF.EmitScalarExpr(E->getArg(0)),
+ CGF.EmitScalarExpr(E->getArg(1))});
+}
+
+static Value *MakeHalfType(unsigned IntrinsicID, unsigned BuiltinID,
+ const CallExpr *E, CodeGenFunction &CGF) {
+ auto &C = CGF.CGM.getContext();
+ if (!(C.getLangOpts().NativeHalfType ||
+ !C.getTargetInfo().useFP16ConversionIntrinsics())) {
+ CGF.CGM.Error(E->getExprLoc(), C.BuiltinInfo.getName(BuiltinID).str() +
+ " requires native half type support.");
+ return nullptr;
+ }
+
+ if (IntrinsicID == Intrinsic::nvvm_ldg_global_f ||
+ IntrinsicID == Intrinsic::nvvm_ldu_global_f)
+ return MakeLdgLdu(IntrinsicID, CGF, E);
+
+ SmallVector<Value *, 16> Args;
+ auto *F = CGF.CGM.getIntrinsic(IntrinsicID);
+ auto *FTy = F->getFunctionType();
+ unsigned ICEArguments = 0;
+ ASTContext::GetBuiltinTypeError Error;
+ C.GetBuiltinType(BuiltinID, Error, &ICEArguments);
+ assert(Error == ASTContext::GE_None && "Should not codegen an error");
+ for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
+ assert((ICEArguments & (1 << i)) == 0);
+ auto *ArgValue = CGF.EmitScalarExpr(E->getArg(i));
+ auto *PTy = FTy->getParamType(i);
+ if (PTy != ArgValue->getType())
+ ArgValue = CGF.Builder.CreateBitCast(ArgValue, PTy);
+ Args.push_back(ArgValue);
+ }
+
+ return CGF.Builder.CreateCall(F, Args);
+}
} // namespace
-Value *
-CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E) {
- auto MakeLdg = [&](unsigned IntrinsicID) {
- Value *Ptr = EmitScalarExpr(E->getArg(0));
- clang::CharUnits Align =
- CGM.getNaturalPointeeTypeAlignment(E->getArg(0)->getType());
- return Builder.CreateCall(
- CGM.getIntrinsic(IntrinsicID, {Ptr->getType()->getPointerElementType(),
- Ptr->getType()}),
- {Ptr, ConstantInt::get(Builder.getInt32Ty(), Align.getQuantity())});
- };
- auto MakeScopedAtomic = [&](unsigned IntrinsicID) {
- Value *Ptr = EmitScalarExpr(E->getArg(0));
- return Builder.CreateCall(
- CGM.getIntrinsic(IntrinsicID, {Ptr->getType()->getPointerElementType(),
- Ptr->getType()}),
- {Ptr, EmitScalarExpr(E->getArg(1))});
- };
+Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
+ const CallExpr *E) {
switch (BuiltinID) {
case NVPTX::BI__nvvm_atom_add_gen_i:
case NVPTX::BI__nvvm_atom_add_gen_l:
@@ -17129,9 +19443,10 @@ CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E) {
case NVPTX::BI__nvvm_atom_add_gen_f:
case NVPTX::BI__nvvm_atom_add_gen_d: {
- Value *Ptr = EmitScalarExpr(E->getArg(0));
+ Address DestAddr = EmitPointerWithAlignment(E->getArg(0));
Value *Val = EmitScalarExpr(E->getArg(1));
- return Builder.CreateAtomicRMW(llvm::AtomicRMWInst::FAdd, Ptr, Val,
+
+ return Builder.CreateAtomicRMW(llvm::AtomicRMWInst::FAdd, DestAddr, Val,
AtomicOrdering::SequentiallyConsistent);
}
@@ -17152,8 +19467,11 @@ CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E) {
}
case NVPTX::BI__nvvm_ldg_c:
+ case NVPTX::BI__nvvm_ldg_sc:
case NVPTX::BI__nvvm_ldg_c2:
+ case NVPTX::BI__nvvm_ldg_sc2:
case NVPTX::BI__nvvm_ldg_c4:
+ case NVPTX::BI__nvvm_ldg_sc4:
case NVPTX::BI__nvvm_ldg_s:
case NVPTX::BI__nvvm_ldg_s2:
case NVPTX::BI__nvvm_ldg_s4:
@@ -17161,6 +19479,7 @@ CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E) {
case NVPTX::BI__nvvm_ldg_i2:
case NVPTX::BI__nvvm_ldg_i4:
case NVPTX::BI__nvvm_ldg_l:
+ case NVPTX::BI__nvvm_ldg_l2:
case NVPTX::BI__nvvm_ldg_ll:
case NVPTX::BI__nvvm_ldg_ll2:
case NVPTX::BI__nvvm_ldg_uc:
@@ -17173,119 +19492,159 @@ CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E) {
case NVPTX::BI__nvvm_ldg_ui2:
case NVPTX::BI__nvvm_ldg_ui4:
case NVPTX::BI__nvvm_ldg_ul:
+ case NVPTX::BI__nvvm_ldg_ul2:
case NVPTX::BI__nvvm_ldg_ull:
case NVPTX::BI__nvvm_ldg_ull2:
// PTX Interoperability section 2.2: "For a vector with an even number of
// elements, its alignment is set to number of elements times the alignment
// of its member: n*alignof(t)."
- return MakeLdg(Intrinsic::nvvm_ldg_global_i);
+ return MakeLdgLdu(Intrinsic::nvvm_ldg_global_i, *this, E);
case NVPTX::BI__nvvm_ldg_f:
case NVPTX::BI__nvvm_ldg_f2:
case NVPTX::BI__nvvm_ldg_f4:
case NVPTX::BI__nvvm_ldg_d:
case NVPTX::BI__nvvm_ldg_d2:
- return MakeLdg(Intrinsic::nvvm_ldg_global_f);
+ return MakeLdgLdu(Intrinsic::nvvm_ldg_global_f, *this, E);
+
+ case NVPTX::BI__nvvm_ldu_c:
+ case NVPTX::BI__nvvm_ldu_sc:
+ case NVPTX::BI__nvvm_ldu_c2:
+ case NVPTX::BI__nvvm_ldu_sc2:
+ case NVPTX::BI__nvvm_ldu_c4:
+ case NVPTX::BI__nvvm_ldu_sc4:
+ case NVPTX::BI__nvvm_ldu_s:
+ case NVPTX::BI__nvvm_ldu_s2:
+ case NVPTX::BI__nvvm_ldu_s4:
+ case NVPTX::BI__nvvm_ldu_i:
+ case NVPTX::BI__nvvm_ldu_i2:
+ case NVPTX::BI__nvvm_ldu_i4:
+ case NVPTX::BI__nvvm_ldu_l:
+ case NVPTX::BI__nvvm_ldu_l2:
+ case NVPTX::BI__nvvm_ldu_ll:
+ case NVPTX::BI__nvvm_ldu_ll2:
+ case NVPTX::BI__nvvm_ldu_uc:
+ case NVPTX::BI__nvvm_ldu_uc2:
+ case NVPTX::BI__nvvm_ldu_uc4:
+ case NVPTX::BI__nvvm_ldu_us:
+ case NVPTX::BI__nvvm_ldu_us2:
+ case NVPTX::BI__nvvm_ldu_us4:
+ case NVPTX::BI__nvvm_ldu_ui:
+ case NVPTX::BI__nvvm_ldu_ui2:
+ case NVPTX::BI__nvvm_ldu_ui4:
+ case NVPTX::BI__nvvm_ldu_ul:
+ case NVPTX::BI__nvvm_ldu_ul2:
+ case NVPTX::BI__nvvm_ldu_ull:
+ case NVPTX::BI__nvvm_ldu_ull2:
+ return MakeLdgLdu(Intrinsic::nvvm_ldu_global_i, *this, E);
+ case NVPTX::BI__nvvm_ldu_f:
+ case NVPTX::BI__nvvm_ldu_f2:
+ case NVPTX::BI__nvvm_ldu_f4:
+ case NVPTX::BI__nvvm_ldu_d:
+ case NVPTX::BI__nvvm_ldu_d2:
+ return MakeLdgLdu(Intrinsic::nvvm_ldu_global_f, *this, E);
case NVPTX::BI__nvvm_atom_cta_add_gen_i:
case NVPTX::BI__nvvm_atom_cta_add_gen_l:
case NVPTX::BI__nvvm_atom_cta_add_gen_ll:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_cta);
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_cta, *this, E);
case NVPTX::BI__nvvm_atom_sys_add_gen_i:
case NVPTX::BI__nvvm_atom_sys_add_gen_l:
case NVPTX::BI__nvvm_atom_sys_add_gen_ll:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_sys);
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_sys, *this, E);
case NVPTX::BI__nvvm_atom_cta_add_gen_f:
case NVPTX::BI__nvvm_atom_cta_add_gen_d:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_cta);
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_cta, *this, E);
case NVPTX::BI__nvvm_atom_sys_add_gen_f:
case NVPTX::BI__nvvm_atom_sys_add_gen_d:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_sys);
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_sys, *this, E);
case NVPTX::BI__nvvm_atom_cta_xchg_gen_i:
case NVPTX::BI__nvvm_atom_cta_xchg_gen_l:
case NVPTX::BI__nvvm_atom_cta_xchg_gen_ll:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_cta);
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_cta, *this, E);
case NVPTX::BI__nvvm_atom_sys_xchg_gen_i:
case NVPTX::BI__nvvm_atom_sys_xchg_gen_l:
case NVPTX::BI__nvvm_atom_sys_xchg_gen_ll:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_sys);
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_sys, *this, E);
case NVPTX::BI__nvvm_atom_cta_max_gen_i:
case NVPTX::BI__nvvm_atom_cta_max_gen_ui:
case NVPTX::BI__nvvm_atom_cta_max_gen_l:
case NVPTX::BI__nvvm_atom_cta_max_gen_ul:
case NVPTX::BI__nvvm_atom_cta_max_gen_ll:
case NVPTX::BI__nvvm_atom_cta_max_gen_ull:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_cta);
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_cta, *this, E);
case NVPTX::BI__nvvm_atom_sys_max_gen_i:
case NVPTX::BI__nvvm_atom_sys_max_gen_ui:
case NVPTX::BI__nvvm_atom_sys_max_gen_l:
case NVPTX::BI__nvvm_atom_sys_max_gen_ul:
case NVPTX::BI__nvvm_atom_sys_max_gen_ll:
case NVPTX::BI__nvvm_atom_sys_max_gen_ull:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_sys);
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_sys, *this, E);
case NVPTX::BI__nvvm_atom_cta_min_gen_i:
case NVPTX::BI__nvvm_atom_cta_min_gen_ui:
case NVPTX::BI__nvvm_atom_cta_min_gen_l:
case NVPTX::BI__nvvm_atom_cta_min_gen_ul:
case NVPTX::BI__nvvm_atom_cta_min_gen_ll:
case NVPTX::BI__nvvm_atom_cta_min_gen_ull:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_cta);
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_cta, *this, E);
case NVPTX::BI__nvvm_atom_sys_min_gen_i:
case NVPTX::BI__nvvm_atom_sys_min_gen_ui:
case NVPTX::BI__nvvm_atom_sys_min_gen_l:
case NVPTX::BI__nvvm_atom_sys_min_gen_ul:
case NVPTX::BI__nvvm_atom_sys_min_gen_ll:
case NVPTX::BI__nvvm_atom_sys_min_gen_ull:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_sys);
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_sys, *this, E);
case NVPTX::BI__nvvm_atom_cta_inc_gen_ui:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_cta);
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_cta, *this, E);
case NVPTX::BI__nvvm_atom_cta_dec_gen_ui:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_cta);
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_cta, *this, E);
case NVPTX::BI__nvvm_atom_sys_inc_gen_ui:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_sys);
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_sys, *this, E);
case NVPTX::BI__nvvm_atom_sys_dec_gen_ui:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_sys);
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_sys, *this, E);
case NVPTX::BI__nvvm_atom_cta_and_gen_i:
case NVPTX::BI__nvvm_atom_cta_and_gen_l:
case NVPTX::BI__nvvm_atom_cta_and_gen_ll:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_cta);
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_cta, *this, E);
case NVPTX::BI__nvvm_atom_sys_and_gen_i:
case NVPTX::BI__nvvm_atom_sys_and_gen_l:
case NVPTX::BI__nvvm_atom_sys_and_gen_ll:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_sys);
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_sys, *this, E);
case NVPTX::BI__nvvm_atom_cta_or_gen_i:
case NVPTX::BI__nvvm_atom_cta_or_gen_l:
case NVPTX::BI__nvvm_atom_cta_or_gen_ll:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_cta);
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_cta, *this, E);
case NVPTX::BI__nvvm_atom_sys_or_gen_i:
case NVPTX::BI__nvvm_atom_sys_or_gen_l:
case NVPTX::BI__nvvm_atom_sys_or_gen_ll:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_sys);
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_sys, *this, E);
case NVPTX::BI__nvvm_atom_cta_xor_gen_i:
case NVPTX::BI__nvvm_atom_cta_xor_gen_l:
case NVPTX::BI__nvvm_atom_cta_xor_gen_ll:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_cta);
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_cta, *this, E);
case NVPTX::BI__nvvm_atom_sys_xor_gen_i:
case NVPTX::BI__nvvm_atom_sys_xor_gen_l:
case NVPTX::BI__nvvm_atom_sys_xor_gen_ll:
- return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_sys);
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_sys, *this, E);
case NVPTX::BI__nvvm_atom_cta_cas_gen_i:
case NVPTX::BI__nvvm_atom_cta_cas_gen_l:
case NVPTX::BI__nvvm_atom_cta_cas_gen_ll: {
Value *Ptr = EmitScalarExpr(E->getArg(0));
+ llvm::Type *ElemTy =
+ ConvertTypeForMem(E->getArg(0)->getType()->getPointeeType());
return Builder.CreateCall(
CGM.getIntrinsic(
- Intrinsic::nvvm_atomic_cas_gen_i_cta,
- {Ptr->getType()->getPointerElementType(), Ptr->getType()}),
+ Intrinsic::nvvm_atomic_cas_gen_i_cta, {ElemTy, Ptr->getType()}),
{Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))});
}
case NVPTX::BI__nvvm_atom_sys_cas_gen_i:
case NVPTX::BI__nvvm_atom_sys_cas_gen_l:
case NVPTX::BI__nvvm_atom_sys_cas_gen_ll: {
Value *Ptr = EmitScalarExpr(E->getArg(0));
+ llvm::Type *ElemTy =
+ ConvertTypeForMem(E->getArg(0)->getType()->getPointeeType());
return Builder.CreateCall(
CGM.getIntrinsic(
- Intrinsic::nvvm_atomic_cas_gen_i_sys,
- {Ptr->getType()->getPointerElementType(), Ptr->getType()}),
+ Intrinsic::nvvm_atomic_cas_gen_i_sys, {ElemTy, Ptr->getType()}),
{Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))});
}
case NVPTX::BI__nvvm_match_all_sync_i32p:
@@ -17359,7 +19718,7 @@ CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E) {
Address Dst = EmitPointerWithAlignment(E->getArg(0));
Value *Src = EmitScalarExpr(E->getArg(1));
Value *Ldm = EmitScalarExpr(E->getArg(2));
- Optional<llvm::APSInt> isColMajorArg =
+ std::optional<llvm::APSInt> isColMajorArg =
E->getArg(3)->getIntegerConstantExpr(getContext());
if (!isColMajorArg)
return nullptr;
@@ -17406,7 +19765,7 @@ CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E) {
Value *Dst = EmitScalarExpr(E->getArg(0));
Address Src = EmitPointerWithAlignment(E->getArg(1));
Value *Ldm = EmitScalarExpr(E->getArg(2));
- Optional<llvm::APSInt> isColMajorArg =
+ std::optional<llvm::APSInt> isColMajorArg =
E->getArg(3)->getIntegerConstantExpr(getContext());
if (!isColMajorArg)
return nullptr;
@@ -17465,7 +19824,7 @@ CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E) {
Address SrcA = EmitPointerWithAlignment(E->getArg(1));
Address SrcB = EmitPointerWithAlignment(E->getArg(2));
Address SrcC = EmitPointerWithAlignment(E->getArg(3));
- Optional<llvm::APSInt> LayoutArg =
+ std::optional<llvm::APSInt> LayoutArg =
E->getArg(4)->getIntegerConstantExpr(getContext());
if (!LayoutArg)
return nullptr;
@@ -17476,7 +19835,7 @@ CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E) {
if (BuiltinID == NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1 ||
BuiltinID == NVPTX::BI__bmma_m8n8k128_mma_and_popc_b1)
SatfArg = 0; // .b1 does not have satf argument.
- else if (Optional<llvm::APSInt> OptSatfArg =
+ else if (std::optional<llvm::APSInt> OptSatfArg =
E->getArg(5)->getIntegerConstantExpr(getContext()))
SatfArg = *OptSatfArg;
else
@@ -17530,6 +19889,243 @@ CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E) {
CharUnits::fromQuantity(4));
return Result;
}
+ // The following builtins require half type support
+ case NVPTX::BI__nvvm_ex2_approx_f16:
+ return MakeHalfType(Intrinsic::nvvm_ex2_approx_f16, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_ex2_approx_f16x2:
+ return MakeHalfType(Intrinsic::nvvm_ex2_approx_f16x2, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_ff2f16x2_rn:
+ return MakeHalfType(Intrinsic::nvvm_ff2f16x2_rn, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_ff2f16x2_rn_relu:
+ return MakeHalfType(Intrinsic::nvvm_ff2f16x2_rn_relu, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_ff2f16x2_rz:
+ return MakeHalfType(Intrinsic::nvvm_ff2f16x2_rz, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_ff2f16x2_rz_relu:
+ return MakeHalfType(Intrinsic::nvvm_ff2f16x2_rz_relu, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_fma_rn_f16:
+ return MakeHalfType(Intrinsic::nvvm_fma_rn_f16, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_fma_rn_f16x2:
+ return MakeHalfType(Intrinsic::nvvm_fma_rn_f16x2, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_fma_rn_ftz_f16:
+ return MakeHalfType(Intrinsic::nvvm_fma_rn_ftz_f16, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_fma_rn_ftz_f16x2:
+ return MakeHalfType(Intrinsic::nvvm_fma_rn_ftz_f16x2, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_fma_rn_ftz_relu_f16:
+ return MakeHalfType(Intrinsic::nvvm_fma_rn_ftz_relu_f16, BuiltinID, E,
+ *this);
+ case NVPTX::BI__nvvm_fma_rn_ftz_relu_f16x2:
+ return MakeHalfType(Intrinsic::nvvm_fma_rn_ftz_relu_f16x2, BuiltinID, E,
+ *this);
+ case NVPTX::BI__nvvm_fma_rn_ftz_sat_f16:
+ return MakeHalfType(Intrinsic::nvvm_fma_rn_ftz_sat_f16, BuiltinID, E,
+ *this);
+ case NVPTX::BI__nvvm_fma_rn_ftz_sat_f16x2:
+ return MakeHalfType(Intrinsic::nvvm_fma_rn_ftz_sat_f16x2, BuiltinID, E,
+ *this);
+ case NVPTX::BI__nvvm_fma_rn_relu_f16:
+ return MakeHalfType(Intrinsic::nvvm_fma_rn_relu_f16, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_fma_rn_relu_f16x2:
+ return MakeHalfType(Intrinsic::nvvm_fma_rn_relu_f16x2, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_fma_rn_sat_f16:
+ return MakeHalfType(Intrinsic::nvvm_fma_rn_sat_f16, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_fma_rn_sat_f16x2:
+ return MakeHalfType(Intrinsic::nvvm_fma_rn_sat_f16x2, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_fmax_f16:
+ return MakeHalfType(Intrinsic::nvvm_fmax_f16, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_fmax_f16x2:
+ return MakeHalfType(Intrinsic::nvvm_fmax_f16x2, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_fmax_ftz_f16:
+ return MakeHalfType(Intrinsic::nvvm_fmax_ftz_f16, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_fmax_ftz_f16x2:
+ return MakeHalfType(Intrinsic::nvvm_fmax_ftz_f16x2, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_fmax_ftz_nan_f16:
+ return MakeHalfType(Intrinsic::nvvm_fmax_ftz_nan_f16, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_fmax_ftz_nan_f16x2:
+ return MakeHalfType(Intrinsic::nvvm_fmax_ftz_nan_f16x2, BuiltinID, E,
+ *this);
+ case NVPTX::BI__nvvm_fmax_ftz_nan_xorsign_abs_f16:
+ return MakeHalfType(Intrinsic::nvvm_fmax_ftz_nan_xorsign_abs_f16, BuiltinID,
+ E, *this);
+ case NVPTX::BI__nvvm_fmax_ftz_nan_xorsign_abs_f16x2:
+ return MakeHalfType(Intrinsic::nvvm_fmax_ftz_nan_xorsign_abs_f16x2,
+ BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_fmax_ftz_xorsign_abs_f16:
+ return MakeHalfType(Intrinsic::nvvm_fmax_ftz_xorsign_abs_f16, BuiltinID, E,
+ *this);
+ case NVPTX::BI__nvvm_fmax_ftz_xorsign_abs_f16x2:
+ return MakeHalfType(Intrinsic::nvvm_fmax_ftz_xorsign_abs_f16x2, BuiltinID,
+ E, *this);
+ case NVPTX::BI__nvvm_fmax_nan_f16:
+ return MakeHalfType(Intrinsic::nvvm_fmax_nan_f16, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_fmax_nan_f16x2:
+ return MakeHalfType(Intrinsic::nvvm_fmax_nan_f16x2, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_fmax_nan_xorsign_abs_f16:
+ return MakeHalfType(Intrinsic::nvvm_fmax_nan_xorsign_abs_f16, BuiltinID, E,
+ *this);
+ case NVPTX::BI__nvvm_fmax_nan_xorsign_abs_f16x2:
+ return MakeHalfType(Intrinsic::nvvm_fmax_nan_xorsign_abs_f16x2, BuiltinID,
+ E, *this);
+ case NVPTX::BI__nvvm_fmax_xorsign_abs_f16:
+ return MakeHalfType(Intrinsic::nvvm_fmax_xorsign_abs_f16, BuiltinID, E,
+ *this);
+ case NVPTX::BI__nvvm_fmax_xorsign_abs_f16x2:
+ return MakeHalfType(Intrinsic::nvvm_fmax_xorsign_abs_f16x2, BuiltinID, E,
+ *this);
+ case NVPTX::BI__nvvm_fmin_f16:
+ return MakeHalfType(Intrinsic::nvvm_fmin_f16, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_fmin_f16x2:
+ return MakeHalfType(Intrinsic::nvvm_fmin_f16x2, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_fmin_ftz_f16:
+ return MakeHalfType(Intrinsic::nvvm_fmin_ftz_f16, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_fmin_ftz_f16x2:
+ return MakeHalfType(Intrinsic::nvvm_fmin_ftz_f16x2, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_fmin_ftz_nan_f16:
+ return MakeHalfType(Intrinsic::nvvm_fmin_ftz_nan_f16, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_fmin_ftz_nan_f16x2:
+ return MakeHalfType(Intrinsic::nvvm_fmin_ftz_nan_f16x2, BuiltinID, E,
+ *this);
+ case NVPTX::BI__nvvm_fmin_ftz_nan_xorsign_abs_f16:
+ return MakeHalfType(Intrinsic::nvvm_fmin_ftz_nan_xorsign_abs_f16, BuiltinID,
+ E, *this);
+ case NVPTX::BI__nvvm_fmin_ftz_nan_xorsign_abs_f16x2:
+ return MakeHalfType(Intrinsic::nvvm_fmin_ftz_nan_xorsign_abs_f16x2,
+ BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_fmin_ftz_xorsign_abs_f16:
+ return MakeHalfType(Intrinsic::nvvm_fmin_ftz_xorsign_abs_f16, BuiltinID, E,
+ *this);
+ case NVPTX::BI__nvvm_fmin_ftz_xorsign_abs_f16x2:
+ return MakeHalfType(Intrinsic::nvvm_fmin_ftz_xorsign_abs_f16x2, BuiltinID,
+ E, *this);
+ case NVPTX::BI__nvvm_fmin_nan_f16:
+ return MakeHalfType(Intrinsic::nvvm_fmin_nan_f16, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_fmin_nan_f16x2:
+ return MakeHalfType(Intrinsic::nvvm_fmin_nan_f16x2, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_fmin_nan_xorsign_abs_f16:
+ return MakeHalfType(Intrinsic::nvvm_fmin_nan_xorsign_abs_f16, BuiltinID, E,
+ *this);
+ case NVPTX::BI__nvvm_fmin_nan_xorsign_abs_f16x2:
+ return MakeHalfType(Intrinsic::nvvm_fmin_nan_xorsign_abs_f16x2, BuiltinID,
+ E, *this);
+ case NVPTX::BI__nvvm_fmin_xorsign_abs_f16:
+ return MakeHalfType(Intrinsic::nvvm_fmin_xorsign_abs_f16, BuiltinID, E,
+ *this);
+ case NVPTX::BI__nvvm_fmin_xorsign_abs_f16x2:
+ return MakeHalfType(Intrinsic::nvvm_fmin_xorsign_abs_f16x2, BuiltinID, E,
+ *this);
+ case NVPTX::BI__nvvm_ldg_h:
+ return MakeHalfType(Intrinsic::nvvm_ldg_global_f, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_ldg_h2:
+ return MakeHalfType(Intrinsic::nvvm_ldg_global_f, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_ldu_h:
+ return MakeHalfType(Intrinsic::nvvm_ldu_global_f, BuiltinID, E, *this);
+ case NVPTX::BI__nvvm_ldu_h2: {
+ return MakeHalfType(Intrinsic::nvvm_ldu_global_f, BuiltinID, E, *this);
+ }
+ case NVPTX::BI__nvvm_cp_async_ca_shared_global_4:
+ return MakeCpAsync(Intrinsic::nvvm_cp_async_ca_shared_global_4,
+ Intrinsic::nvvm_cp_async_ca_shared_global_4_s, *this, E,
+ 4);
+ case NVPTX::BI__nvvm_cp_async_ca_shared_global_8:
+ return MakeCpAsync(Intrinsic::nvvm_cp_async_ca_shared_global_8,
+ Intrinsic::nvvm_cp_async_ca_shared_global_8_s, *this, E,
+ 8);
+ case NVPTX::BI__nvvm_cp_async_ca_shared_global_16:
+ return MakeCpAsync(Intrinsic::nvvm_cp_async_ca_shared_global_16,
+ Intrinsic::nvvm_cp_async_ca_shared_global_16_s, *this, E,
+ 16);
+ case NVPTX::BI__nvvm_cp_async_cg_shared_global_16:
+ return MakeCpAsync(Intrinsic::nvvm_cp_async_cg_shared_global_16,
+ Intrinsic::nvvm_cp_async_cg_shared_global_16_s, *this, E,
+ 16);
+ case NVPTX::BI__nvvm_read_ptx_sreg_clusterid_x:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_clusterid_x));
+ case NVPTX::BI__nvvm_read_ptx_sreg_clusterid_y:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_clusterid_y));
+ case NVPTX::BI__nvvm_read_ptx_sreg_clusterid_z:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_clusterid_z));
+ case NVPTX::BI__nvvm_read_ptx_sreg_clusterid_w:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_clusterid_w));
+ case NVPTX::BI__nvvm_read_ptx_sreg_nclusterid_x:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_nclusterid_x));
+ case NVPTX::BI__nvvm_read_ptx_sreg_nclusterid_y:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_nclusterid_y));
+ case NVPTX::BI__nvvm_read_ptx_sreg_nclusterid_z:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_nclusterid_z));
+ case NVPTX::BI__nvvm_read_ptx_sreg_nclusterid_w:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_nclusterid_w));
+ case NVPTX::BI__nvvm_read_ptx_sreg_cluster_ctaid_x:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_ctaid_x));
+ case NVPTX::BI__nvvm_read_ptx_sreg_cluster_ctaid_y:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_ctaid_y));
+ case NVPTX::BI__nvvm_read_ptx_sreg_cluster_ctaid_z:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_ctaid_z));
+ case NVPTX::BI__nvvm_read_ptx_sreg_cluster_ctaid_w:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_ctaid_w));
+ case NVPTX::BI__nvvm_read_ptx_sreg_cluster_nctaid_x:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_nctaid_x));
+ case NVPTX::BI__nvvm_read_ptx_sreg_cluster_nctaid_y:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_nctaid_y));
+ case NVPTX::BI__nvvm_read_ptx_sreg_cluster_nctaid_z:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_nctaid_z));
+ case NVPTX::BI__nvvm_read_ptx_sreg_cluster_nctaid_w:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_nctaid_w));
+ case NVPTX::BI__nvvm_read_ptx_sreg_cluster_ctarank:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_ctarank));
+ case NVPTX::BI__nvvm_read_ptx_sreg_cluster_nctarank:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_nctarank));
+ case NVPTX::BI__nvvm_is_explicit_cluster:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_is_explicit_cluster));
+ case NVPTX::BI__nvvm_isspacep_shared_cluster:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_isspacep_shared_cluster),
+ EmitScalarExpr(E->getArg(0)));
+ case NVPTX::BI__nvvm_mapa:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_mapa),
+ {EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1))});
+ case NVPTX::BI__nvvm_mapa_shared_cluster:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_mapa_shared_cluster),
+ {EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1))});
+ case NVPTX::BI__nvvm_getctarank:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_getctarank),
+ EmitScalarExpr(E->getArg(0)));
+ case NVPTX::BI__nvvm_getctarank_shared_cluster:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_getctarank_shared_cluster),
+ EmitScalarExpr(E->getArg(0)));
+ case NVPTX::BI__nvvm_barrier_cluster_arrive:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_barrier_cluster_arrive));
+ case NVPTX::BI__nvvm_barrier_cluster_arrive_relaxed:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_barrier_cluster_arrive_relaxed));
+ case NVPTX::BI__nvvm_barrier_cluster_wait:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_barrier_cluster_wait));
+ case NVPTX::BI__nvvm_fence_sc_cluster:
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::nvvm_fence_sc_cluster));
default:
return nullptr;
}
@@ -17580,46 +20176,37 @@ RValue CodeGenFunction::EmitBuiltinIsAligned(const CallExpr *E) {
/// Generate (x & ~(y-1)) to align down or ((x+(y-1)) & ~(y-1)) to align up.
/// Note: For pointer types we can avoid ptrtoint/inttoptr pairs by using the
-/// llvm.ptrmask instrinsic (with a GEP before in the align_up case).
-/// TODO: actually use ptrmask once most optimization passes know about it.
+/// llvm.ptrmask intrinsic (with a GEP before in the align_up case).
RValue CodeGenFunction::EmitBuiltinAlignTo(const CallExpr *E, bool AlignUp) {
BuiltinAlignArgs Args(E, *this);
- llvm::Value *SrcAddr = Args.Src;
- if (Args.Src->getType()->isPointerTy())
- SrcAddr = Builder.CreatePtrToInt(Args.Src, Args.IntType, "intptr");
- llvm::Value *SrcForMask = SrcAddr;
+ llvm::Value *SrcForMask = Args.Src;
if (AlignUp) {
// When aligning up we have to first add the mask to ensure we go over the
// next alignment value and then align down to the next valid multiple.
// By adding the mask, we ensure that align_up on an already aligned
// value will not change the value.
- SrcForMask = Builder.CreateAdd(SrcForMask, Args.Mask, "over_boundary");
+ if (Args.Src->getType()->isPointerTy()) {
+ if (getLangOpts().isSignedOverflowDefined())
+ SrcForMask =
+ Builder.CreateGEP(Int8Ty, SrcForMask, Args.Mask, "over_boundary");
+ else
+ SrcForMask = EmitCheckedInBoundsGEP(Int8Ty, SrcForMask, Args.Mask,
+ /*SignedIndices=*/true,
+ /*isSubtraction=*/false,
+ E->getExprLoc(), "over_boundary");
+ } else {
+ SrcForMask = Builder.CreateAdd(SrcForMask, Args.Mask, "over_boundary");
+ }
}
// Invert the mask to only clear the lower bits.
llvm::Value *InvertedMask = Builder.CreateNot(Args.Mask, "inverted_mask");
- llvm::Value *Result =
- Builder.CreateAnd(SrcForMask, InvertedMask, "aligned_result");
+ llvm::Value *Result = nullptr;
if (Args.Src->getType()->isPointerTy()) {
- /// TODO: Use ptrmask instead of ptrtoint+gep once it is optimized well.
- // Result = Builder.CreateIntrinsic(
- // Intrinsic::ptrmask, {Args.SrcType, SrcForMask->getType(), Args.IntType},
- // {SrcForMask, NegatedMask}, nullptr, "aligned_result");
- Result->setName("aligned_intptr");
- llvm::Value *Difference = Builder.CreateSub(Result, SrcAddr, "diff");
- // The result must point to the same underlying allocation. This means we
- // can use an inbounds GEP to enable better optimization.
- Value *Base = EmitCastToVoidPtr(Args.Src);
- if (getLangOpts().isSignedOverflowDefined())
- Result = Builder.CreateGEP(Int8Ty, Base, Difference, "aligned_result");
- else
- Result = EmitCheckedInBoundsGEP(Base, Difference,
- /*SignedIndices=*/true,
- /*isSubtraction=*/!AlignUp,
- E->getExprLoc(), "aligned_result");
- Result = Builder.CreatePointerCast(Result, Args.SrcType);
- // Emit an alignment assumption to ensure that the new alignment is
- // propagated to loads/stores, etc.
- emitAlignmentAssumption(Result, E, E->getExprLoc(), Args.Alignment);
+ Result = Builder.CreateIntrinsic(
+ Intrinsic::ptrmask, {Args.SrcType, Args.IntType},
+ {SrcForMask, InvertedMask}, nullptr, "aligned_result");
+ } else {
+ Result = Builder.CreateAnd(SrcForMask, InvertedMask, "aligned_result");
}
assert(Result->getType() == Args.SrcType);
return RValue::get(Result);
@@ -17749,6 +20336,22 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
CGM.getIntrinsic(Intrinsic::maximum, ConvertType(E->getType()));
return Builder.CreateCall(Callee, {LHS, RHS});
}
+ case WebAssembly::BI__builtin_wasm_pmin_f32x4:
+ case WebAssembly::BI__builtin_wasm_pmin_f64x2: {
+ Value *LHS = EmitScalarExpr(E->getArg(0));
+ Value *RHS = EmitScalarExpr(E->getArg(1));
+ Function *Callee =
+ CGM.getIntrinsic(Intrinsic::wasm_pmin, ConvertType(E->getType()));
+ return Builder.CreateCall(Callee, {LHS, RHS});
+ }
+ case WebAssembly::BI__builtin_wasm_pmax_f32x4:
+ case WebAssembly::BI__builtin_wasm_pmax_f64x2: {
+ Value *LHS = EmitScalarExpr(E->getArg(0));
+ Value *RHS = EmitScalarExpr(E->getArg(1));
+ Function *Callee =
+ CGM.getIntrinsic(Intrinsic::wasm_pmax, ConvertType(E->getType()));
+ return Builder.CreateCall(Callee, {LHS, RHS});
+ }
case WebAssembly::BI__builtin_wasm_ceil_f32x4:
case WebAssembly::BI__builtin_wasm_floor_f32x4:
case WebAssembly::BI__builtin_wasm_trunc_f32x4:
@@ -17782,6 +20385,14 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType()));
return Builder.CreateCall(Callee, Value);
}
+ case WebAssembly::BI__builtin_wasm_ref_null_extern: {
+ Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_ref_null_extern);
+ return Builder.CreateCall(Callee);
+ }
+ case WebAssembly::BI__builtin_wasm_ref_null_func: {
+ Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_ref_null_func);
+ return Builder.CreateCall(Callee);
+ }
case WebAssembly::BI__builtin_wasm_swizzle_i8x16: {
Value *Src = EmitScalarExpr(E->getArg(0));
Value *Indices = EmitScalarExpr(E->getArg(1));
@@ -17903,7 +20514,7 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
IntNo = Intrinsic::wasm_extadd_pairwise_unsigned;
break;
default:
- llvm_unreachable("unexptected builtin ID");
+ llvm_unreachable("unexpected builtin ID");
}
Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType()));
@@ -17996,30 +20607,26 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
CGM.getIntrinsic(IntNo, {ConvertType(E->getType()), Low->getType()});
return Builder.CreateCall(Callee, {Low, High});
}
- case WebAssembly::BI__builtin_wasm_trunc_sat_zero_s_f64x2_i32x4:
- case WebAssembly::BI__builtin_wasm_trunc_sat_zero_u_f64x2_i32x4: {
+ case WebAssembly::BI__builtin_wasm_trunc_sat_s_zero_f64x2_i32x4:
+ case WebAssembly::BI__builtin_wasm_trunc_sat_u_zero_f64x2_i32x4: {
Value *Vec = EmitScalarExpr(E->getArg(0));
unsigned IntNo;
switch (BuiltinID) {
- case WebAssembly::BI__builtin_wasm_trunc_sat_zero_s_f64x2_i32x4:
+ case WebAssembly::BI__builtin_wasm_trunc_sat_s_zero_f64x2_i32x4:
IntNo = Intrinsic::fptosi_sat;
break;
- case WebAssembly::BI__builtin_wasm_trunc_sat_zero_u_f64x2_i32x4:
+ case WebAssembly::BI__builtin_wasm_trunc_sat_u_zero_f64x2_i32x4:
IntNo = Intrinsic::fptoui_sat;
break;
default:
llvm_unreachable("unexpected builtin ID");
}
llvm::Type *SrcT = Vec->getType();
- llvm::Type *TruncT =
- SrcT->getWithNewType(llvm::IntegerType::get(getLLVMContext(), 32));
+ llvm::Type *TruncT = SrcT->getWithNewType(Builder.getInt32Ty());
Function *Callee = CGM.getIntrinsic(IntNo, {TruncT, SrcT});
Value *Trunc = Builder.CreateCall(Callee, Vec);
- Value *Splat = Builder.CreateVectorSplat(2, Builder.getInt32(0));
- Value *ConcatMask =
- llvm::ConstantVector::get({Builder.getInt32(0), Builder.getInt32(1),
- Builder.getInt32(2), Builder.getInt32(3)});
- return Builder.CreateShuffleVector(Trunc, Splat, ConcatMask);
+ Value *Splat = Constant::getNullValue(TruncT);
+ return Builder.CreateShuffleVector(Trunc, Splat, ArrayRef<int>{0, 1, 2, 3});
}
case WebAssembly::BI__builtin_wasm_shuffle_i8x16: {
Value *Ops[18];
@@ -18027,7 +20634,7 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
Ops[OpIdx++] = EmitScalarExpr(E->getArg(0));
Ops[OpIdx++] = EmitScalarExpr(E->getArg(1));
while (OpIdx < 18) {
- Optional<llvm::APSInt> LaneConst =
+ std::optional<llvm::APSInt> LaneConst =
E->getArg(OpIdx)->getIntegerConstantExpr(getContext());
assert(LaneConst && "Constant arg isn't actually constant?");
Ops[OpIdx++] = llvm::ConstantInt::get(getLLVMContext(), *LaneConst);
@@ -18035,19 +20642,217 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_shuffle);
return Builder.CreateCall(Callee, Ops);
}
+ case WebAssembly::BI__builtin_wasm_relaxed_madd_f32x4:
+ case WebAssembly::BI__builtin_wasm_relaxed_nmadd_f32x4:
+ case WebAssembly::BI__builtin_wasm_relaxed_madd_f64x2:
+ case WebAssembly::BI__builtin_wasm_relaxed_nmadd_f64x2: {
+ Value *A = EmitScalarExpr(E->getArg(0));
+ Value *B = EmitScalarExpr(E->getArg(1));
+ Value *C = EmitScalarExpr(E->getArg(2));
+ unsigned IntNo;
+ switch (BuiltinID) {
+ case WebAssembly::BI__builtin_wasm_relaxed_madd_f32x4:
+ case WebAssembly::BI__builtin_wasm_relaxed_madd_f64x2:
+ IntNo = Intrinsic::wasm_relaxed_madd;
+ break;
+ case WebAssembly::BI__builtin_wasm_relaxed_nmadd_f32x4:
+ case WebAssembly::BI__builtin_wasm_relaxed_nmadd_f64x2:
+ IntNo = Intrinsic::wasm_relaxed_nmadd;
+ break;
+ default:
+ llvm_unreachable("unexpected builtin ID");
+ }
+ Function *Callee = CGM.getIntrinsic(IntNo, A->getType());
+ return Builder.CreateCall(Callee, {A, B, C});
+ }
+ case WebAssembly::BI__builtin_wasm_relaxed_laneselect_i8x16:
+ case WebAssembly::BI__builtin_wasm_relaxed_laneselect_i16x8:
+ case WebAssembly::BI__builtin_wasm_relaxed_laneselect_i32x4:
+ case WebAssembly::BI__builtin_wasm_relaxed_laneselect_i64x2: {
+ Value *A = EmitScalarExpr(E->getArg(0));
+ Value *B = EmitScalarExpr(E->getArg(1));
+ Value *C = EmitScalarExpr(E->getArg(2));
+ Function *Callee =
+ CGM.getIntrinsic(Intrinsic::wasm_relaxed_laneselect, A->getType());
+ return Builder.CreateCall(Callee, {A, B, C});
+ }
+ case WebAssembly::BI__builtin_wasm_relaxed_swizzle_i8x16: {
+ Value *Src = EmitScalarExpr(E->getArg(0));
+ Value *Indices = EmitScalarExpr(E->getArg(1));
+ Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_relaxed_swizzle);
+ return Builder.CreateCall(Callee, {Src, Indices});
+ }
+ case WebAssembly::BI__builtin_wasm_relaxed_min_f32x4:
+ case WebAssembly::BI__builtin_wasm_relaxed_max_f32x4:
+ case WebAssembly::BI__builtin_wasm_relaxed_min_f64x2:
+ case WebAssembly::BI__builtin_wasm_relaxed_max_f64x2: {
+ Value *LHS = EmitScalarExpr(E->getArg(0));
+ Value *RHS = EmitScalarExpr(E->getArg(1));
+ unsigned IntNo;
+ switch (BuiltinID) {
+ case WebAssembly::BI__builtin_wasm_relaxed_min_f32x4:
+ case WebAssembly::BI__builtin_wasm_relaxed_min_f64x2:
+ IntNo = Intrinsic::wasm_relaxed_min;
+ break;
+ case WebAssembly::BI__builtin_wasm_relaxed_max_f32x4:
+ case WebAssembly::BI__builtin_wasm_relaxed_max_f64x2:
+ IntNo = Intrinsic::wasm_relaxed_max;
+ break;
+ default:
+ llvm_unreachable("unexpected builtin ID");
+ }
+ Function *Callee = CGM.getIntrinsic(IntNo, LHS->getType());
+ return Builder.CreateCall(Callee, {LHS, RHS});
+ }
+ case WebAssembly::BI__builtin_wasm_relaxed_trunc_s_i32x4_f32x4:
+ case WebAssembly::BI__builtin_wasm_relaxed_trunc_u_i32x4_f32x4:
+ case WebAssembly::BI__builtin_wasm_relaxed_trunc_s_zero_i32x4_f64x2:
+ case WebAssembly::BI__builtin_wasm_relaxed_trunc_u_zero_i32x4_f64x2: {
+ Value *Vec = EmitScalarExpr(E->getArg(0));
+ unsigned IntNo;
+ switch (BuiltinID) {
+ case WebAssembly::BI__builtin_wasm_relaxed_trunc_s_i32x4_f32x4:
+ IntNo = Intrinsic::wasm_relaxed_trunc_signed;
+ break;
+ case WebAssembly::BI__builtin_wasm_relaxed_trunc_u_i32x4_f32x4:
+ IntNo = Intrinsic::wasm_relaxed_trunc_unsigned;
+ break;
+ case WebAssembly::BI__builtin_wasm_relaxed_trunc_s_zero_i32x4_f64x2:
+ IntNo = Intrinsic::wasm_relaxed_trunc_signed_zero;
+ break;
+ case WebAssembly::BI__builtin_wasm_relaxed_trunc_u_zero_i32x4_f64x2:
+ IntNo = Intrinsic::wasm_relaxed_trunc_unsigned_zero;
+ break;
+ default:
+ llvm_unreachable("unexpected builtin ID");
+ }
+ Function *Callee = CGM.getIntrinsic(IntNo);
+ return Builder.CreateCall(Callee, {Vec});
+ }
+ case WebAssembly::BI__builtin_wasm_relaxed_q15mulr_s_i16x8: {
+ Value *LHS = EmitScalarExpr(E->getArg(0));
+ Value *RHS = EmitScalarExpr(E->getArg(1));
+ Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_relaxed_q15mulr_signed);
+ return Builder.CreateCall(Callee, {LHS, RHS});
+ }
+ case WebAssembly::BI__builtin_wasm_relaxed_dot_i8x16_i7x16_s_i16x8: {
+ Value *LHS = EmitScalarExpr(E->getArg(0));
+ Value *RHS = EmitScalarExpr(E->getArg(1));
+ Function *Callee =
+ CGM.getIntrinsic(Intrinsic::wasm_relaxed_dot_i8x16_i7x16_signed);
+ return Builder.CreateCall(Callee, {LHS, RHS});
+ }
+ case WebAssembly::BI__builtin_wasm_relaxed_dot_i8x16_i7x16_add_s_i32x4: {
+ Value *LHS = EmitScalarExpr(E->getArg(0));
+ Value *RHS = EmitScalarExpr(E->getArg(1));
+ Value *Acc = EmitScalarExpr(E->getArg(2));
+ Function *Callee =
+ CGM.getIntrinsic(Intrinsic::wasm_relaxed_dot_i8x16_i7x16_add_signed);
+ return Builder.CreateCall(Callee, {LHS, RHS, Acc});
+ }
+ case WebAssembly::BI__builtin_wasm_relaxed_dot_bf16x8_add_f32_f32x4: {
+ Value *LHS = EmitScalarExpr(E->getArg(0));
+ Value *RHS = EmitScalarExpr(E->getArg(1));
+ Value *Acc = EmitScalarExpr(E->getArg(2));
+ Function *Callee =
+ CGM.getIntrinsic(Intrinsic::wasm_relaxed_dot_bf16x8_add_f32);
+ return Builder.CreateCall(Callee, {LHS, RHS, Acc});
+ }
+ case WebAssembly::BI__builtin_wasm_table_get: {
+ assert(E->getArg(0)->getType()->isArrayType());
+ Value *Table = EmitArrayToPointerDecay(E->getArg(0)).getPointer();
+ Value *Index = EmitScalarExpr(E->getArg(1));
+ Function *Callee;
+ if (E->getType().isWebAssemblyExternrefType())
+ Callee = CGM.getIntrinsic(Intrinsic::wasm_table_get_externref);
+ else if (E->getType().isWebAssemblyFuncrefType())
+ Callee = CGM.getIntrinsic(Intrinsic::wasm_table_get_funcref);
+ else
+ llvm_unreachable(
+ "Unexpected reference type for __builtin_wasm_table_get");
+ return Builder.CreateCall(Callee, {Table, Index});
+ }
+ case WebAssembly::BI__builtin_wasm_table_set: {
+ assert(E->getArg(0)->getType()->isArrayType());
+ Value *Table = EmitArrayToPointerDecay(E->getArg(0)).getPointer();
+ Value *Index = EmitScalarExpr(E->getArg(1));
+ Value *Val = EmitScalarExpr(E->getArg(2));
+ Function *Callee;
+ if (E->getArg(2)->getType().isWebAssemblyExternrefType())
+ Callee = CGM.getIntrinsic(Intrinsic::wasm_table_set_externref);
+ else if (E->getArg(2)->getType().isWebAssemblyFuncrefType())
+ Callee = CGM.getIntrinsic(Intrinsic::wasm_table_set_funcref);
+ else
+ llvm_unreachable(
+ "Unexpected reference type for __builtin_wasm_table_set");
+ return Builder.CreateCall(Callee, {Table, Index, Val});
+ }
+ case WebAssembly::BI__builtin_wasm_table_size: {
+ assert(E->getArg(0)->getType()->isArrayType());
+ Value *Value = EmitArrayToPointerDecay(E->getArg(0)).getPointer();
+ Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_table_size);
+ return Builder.CreateCall(Callee, Value);
+ }
+ case WebAssembly::BI__builtin_wasm_table_grow: {
+ assert(E->getArg(0)->getType()->isArrayType());
+ Value *Table = EmitArrayToPointerDecay(E->getArg(0)).getPointer();
+ Value *Val = EmitScalarExpr(E->getArg(1));
+ Value *NElems = EmitScalarExpr(E->getArg(2));
+
+ Function *Callee;
+ if (E->getArg(1)->getType().isWebAssemblyExternrefType())
+ Callee = CGM.getIntrinsic(Intrinsic::wasm_table_grow_externref);
+ else if (E->getArg(2)->getType().isWebAssemblyFuncrefType())
+ Callee = CGM.getIntrinsic(Intrinsic::wasm_table_fill_funcref);
+ else
+ llvm_unreachable(
+ "Unexpected reference type for __builtin_wasm_table_grow");
+
+ return Builder.CreateCall(Callee, {Table, Val, NElems});
+ }
+ case WebAssembly::BI__builtin_wasm_table_fill: {
+ assert(E->getArg(0)->getType()->isArrayType());
+ Value *Table = EmitArrayToPointerDecay(E->getArg(0)).getPointer();
+ Value *Index = EmitScalarExpr(E->getArg(1));
+ Value *Val = EmitScalarExpr(E->getArg(2));
+ Value *NElems = EmitScalarExpr(E->getArg(3));
+
+ Function *Callee;
+ if (E->getArg(2)->getType().isWebAssemblyExternrefType())
+ Callee = CGM.getIntrinsic(Intrinsic::wasm_table_fill_externref);
+ else if (E->getArg(2)->getType().isWebAssemblyFuncrefType())
+ Callee = CGM.getIntrinsic(Intrinsic::wasm_table_fill_funcref);
+ else
+ llvm_unreachable(
+ "Unexpected reference type for __builtin_wasm_table_fill");
+
+ return Builder.CreateCall(Callee, {Table, Index, Val, NElems});
+ }
+ case WebAssembly::BI__builtin_wasm_table_copy: {
+ assert(E->getArg(0)->getType()->isArrayType());
+ Value *TableX = EmitArrayToPointerDecay(E->getArg(0)).getPointer();
+ Value *TableY = EmitArrayToPointerDecay(E->getArg(1)).getPointer();
+ Value *DstIdx = EmitScalarExpr(E->getArg(2));
+ Value *SrcIdx = EmitScalarExpr(E->getArg(3));
+ Value *NElems = EmitScalarExpr(E->getArg(4));
+
+ Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_table_copy);
+
+ return Builder.CreateCall(Callee, {TableX, TableY, SrcIdx, DstIdx, NElems});
+ }
default:
return nullptr;
}
}
static std::pair<Intrinsic::ID, unsigned>
-getIntrinsicForHexagonNonGCCBuiltin(unsigned BuiltinID) {
+getIntrinsicForHexagonNonClangBuiltin(unsigned BuiltinID) {
struct Info {
unsigned BuiltinID;
Intrinsic::ID IntrinsicID;
unsigned VecLen;
};
- Info Infos[] = {
+ static Info Infos[] = {
#define CUSTOM_BUILTIN_MAPPING(x,s) \
{ Hexagon::BI__builtin_HEXAGON_##x, Intrinsic::hexagon_##x, s },
CUSTOM_BUILTIN_MAPPING(L2_loadrub_pci, 0)
@@ -18072,6 +20877,7 @@ getIntrinsicForHexagonNonGCCBuiltin(unsigned BuiltinID) {
CUSTOM_BUILTIN_MAPPING(S2_storerf_pcr, 0)
CUSTOM_BUILTIN_MAPPING(S2_storeri_pcr, 0)
CUSTOM_BUILTIN_MAPPING(S2_storerd_pcr, 0)
+ // Legacy builtins that take a vector in place of a vector predicate.
CUSTOM_BUILTIN_MAPPING(V6_vmaskedstoreq, 64)
CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorenq, 64)
CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentq, 64)
@@ -18088,8 +20894,7 @@ getIntrinsicForHexagonNonGCCBuiltin(unsigned BuiltinID) {
static const bool SortOnce = (llvm::sort(Infos, CmpInfo), true);
(void)SortOnce;
- const Info *F = std::lower_bound(std::begin(Infos), std::end(Infos),
- Info{BuiltinID, 0, 0}, CmpInfo);
+ const Info *F = llvm::lower_bound(Infos, Info{BuiltinID, 0, 0}, CmpInfo);
if (F == std::end(Infos) || F->BuiltinID != BuiltinID)
return {Intrinsic::not_intrinsic, 0};
@@ -18100,13 +20905,12 @@ Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
Intrinsic::ID ID;
unsigned VecLen;
- std::tie(ID, VecLen) = getIntrinsicForHexagonNonGCCBuiltin(BuiltinID);
+ std::tie(ID, VecLen) = getIntrinsicForHexagonNonClangBuiltin(BuiltinID);
auto MakeCircOp = [this, E](unsigned IntID, bool IsLoad) {
// The base pointer is passed by address, so it needs to be loaded.
Address A = EmitPointerWithAlignment(E->getArg(0));
- Address BP = Address(
- Builder.CreateBitCast(A.getPointer(), Int8PtrPtrTy), A.getAlignment());
+ Address BP = Address(A.getPointer(), Int8PtrTy, A.getAlignment());
llvm::Value *Base = Builder.CreateLoad(BP);
// The treatment of both loads and stores is the same: the arguments for
// the builtin are the same as the arguments for the intrinsic.
@@ -18125,8 +20929,7 @@ Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
// generate one (NewBase). The new base address needs to be stored.
llvm::Value *NewBase = IsLoad ? Builder.CreateExtractValue(Result, 1)
: Result;
- llvm::Value *LV = Builder.CreateBitCast(
- EmitScalarExpr(E->getArg(0)), NewBase->getType()->getPointerTo());
+ llvm::Value *LV = EmitScalarExpr(E->getArg(0));
Address Dest = EmitPointerWithAlignment(E->getArg(0));
llvm::Value *RetVal =
Builder.CreateAlignedStore(NewBase, LV, Dest.getAlignment());
@@ -18142,15 +20945,13 @@ Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
// The intrinsic generates one result, which is the new value for the base
// pointer. It needs to be returned. The result of the load instruction is
// passed to intrinsic by address, so the value needs to be stored.
- llvm::Value *BaseAddress =
- Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int8PtrTy);
+ llvm::Value *BaseAddress = EmitScalarExpr(E->getArg(0));
// Expressions like &(*pt++) will be incremented per evaluation.
// EmitPointerWithAlignment and EmitScalarExpr evaluates the expression
// per call.
Address DestAddr = EmitPointerWithAlignment(E->getArg(1));
- DestAddr = Address(Builder.CreateBitCast(DestAddr.getPointer(), Int8PtrTy),
- DestAddr.getAlignment());
+ DestAddr = Address(DestAddr.getPointer(), Int8Ty, DestAddr.getAlignment());
llvm::Value *DestAddress = DestAddr.getPointer();
// Operands are Base, Dest, Modifier.
@@ -18167,9 +20968,7 @@ Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
// to be handled with stores of respective destination type.
DestVal = Builder.CreateTrunc(DestVal, DestTy);
- llvm::Value *DestForStore =
- Builder.CreateBitCast(DestAddress, DestVal->getType()->getPointerTo());
- Builder.CreateAlignedStore(DestVal, DestForStore, DestAddr.getAlignment());
+ Builder.CreateAlignedStore(DestVal, DestAddress, DestAddr.getAlignment());
// The updated value of the base pointer is returned.
return Builder.CreateExtractValue(Result, 1);
};
@@ -18197,8 +20996,8 @@ Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry_128B: {
// Get the type from the 0-th argument.
llvm::Type *VecType = ConvertType(E->getArg(0)->getType());
- Address PredAddr = Builder.CreateBitCast(
- EmitPointerWithAlignment(E->getArg(2)), VecType->getPointerTo(0));
+ Address PredAddr =
+ EmitPointerWithAlignment(E->getArg(2)).withElementType(VecType);
llvm::Value *PredIn = V2Q(Builder.CreateLoad(PredAddr));
llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(ID),
{EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1)), PredIn});
@@ -18208,6 +21007,46 @@ Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
PredAddr.getAlignment());
return Builder.CreateExtractValue(Result, 0);
}
+ // These are identical to the builtins above, except they don't consume
+ // input carry, only generate carry-out. Since they still produce two
+ // outputs, generate the store of the predicate, but no load.
+ case Hexagon::BI__builtin_HEXAGON_V6_vaddcarryo:
+ case Hexagon::BI__builtin_HEXAGON_V6_vaddcarryo_128B:
+ case Hexagon::BI__builtin_HEXAGON_V6_vsubcarryo:
+ case Hexagon::BI__builtin_HEXAGON_V6_vsubcarryo_128B: {
+ // Get the type from the 0-th argument.
+ llvm::Type *VecType = ConvertType(E->getArg(0)->getType());
+ Address PredAddr =
+ EmitPointerWithAlignment(E->getArg(2)).withElementType(VecType);
+ llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(ID),
+ {EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1))});
+
+ llvm::Value *PredOut = Builder.CreateExtractValue(Result, 1);
+ Builder.CreateAlignedStore(Q2V(PredOut), PredAddr.getPointer(),
+ PredAddr.getAlignment());
+ return Builder.CreateExtractValue(Result, 0);
+ }
+
+ case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstoreq:
+ case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorenq:
+ case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorentq:
+ case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorentnq:
+ case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstoreq_128B:
+ case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorenq_128B:
+ case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorentq_128B:
+ case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorentnq_128B: {
+ SmallVector<llvm::Value*,4> Ops;
+ const Expr *PredOp = E->getArg(0);
+ // There will be an implicit cast to a boolean vector. Strip it.
+ if (auto *Cast = dyn_cast<ImplicitCastExpr>(PredOp)) {
+ if (Cast->getCastKind() == CK_BitCast)
+ PredOp = Cast->getSubExpr();
+ Ops.push_back(V2Q(EmitScalarExpr(PredOp)));
+ }
+ for (int i = 1, e = E->getNumArgs(); i != e; ++i)
+ Ops.push_back(EmitScalarExpr(E->getArg(i)));
+ return Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
+ }
case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci:
case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci:
@@ -18245,40 +21084,6 @@ Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
return MakeBrevLd(Intrinsic::hexagon_L2_loadri_pbr, Int32Ty);
case Hexagon::BI__builtin_brev_ldd:
return MakeBrevLd(Intrinsic::hexagon_L2_loadrd_pbr, Int64Ty);
-
- default: {
- if (ID == Intrinsic::not_intrinsic)
- return nullptr;
-
- auto IsVectorPredTy = [](llvm::Type *T) {
- return T->isVectorTy() &&
- cast<llvm::VectorType>(T)->getElementType()->isIntegerTy(1);
- };
-
- llvm::Function *IntrFn = CGM.getIntrinsic(ID);
- llvm::FunctionType *IntrTy = IntrFn->getFunctionType();
- SmallVector<llvm::Value*,4> Ops;
- for (unsigned i = 0, e = IntrTy->getNumParams(); i != e; ++i) {
- llvm::Type *T = IntrTy->getParamType(i);
- const Expr *A = E->getArg(i);
- if (IsVectorPredTy(T)) {
- // There will be an implicit cast to a boolean vector. Strip it.
- if (auto *Cast = dyn_cast<ImplicitCastExpr>(A)) {
- if (Cast->getCastKind() == CK_BitCast)
- A = Cast->getSubExpr();
- }
- Ops.push_back(V2Q(EmitScalarExpr(A)));
- } else {
- Ops.push_back(EmitScalarExpr(A));
- }
- }
-
- llvm::Value *Call = Builder.CreateCall(IntrFn, Ops);
- if (IsVectorPredTy(IntrTy->getReturnType()))
- Call = Q2V(Call);
-
- return Call;
- } // default
} // switch
return nullptr;
@@ -18290,11 +21095,46 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
SmallVector<Value *, 4> Ops;
llvm::Type *ResultType = ConvertType(E->getType());
- for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
- Ops.push_back(EmitScalarExpr(E->getArg(i)));
+ // Find out if any arguments are required to be integer constant expressions.
+ unsigned ICEArguments = 0;
+ ASTContext::GetBuiltinTypeError Error;
+ getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
+ if (Error == ASTContext::GE_Missing_type) {
+ // Vector intrinsics don't have a type string.
+ assert(BuiltinID >= clang::RISCV::FirstRVVBuiltin &&
+ BuiltinID <= clang::RISCV::LastRVVBuiltin);
+ ICEArguments = 0;
+ if (BuiltinID == RISCVVector::BI__builtin_rvv_vget_v ||
+ BuiltinID == RISCVVector::BI__builtin_rvv_vset_v)
+ ICEArguments = 1 << 1;
+ } else {
+ assert(Error == ASTContext::GE_None && "Unexpected error");
+ }
+
+ if (BuiltinID == RISCV::BI__builtin_riscv_ntl_load)
+ ICEArguments |= (1 << 1);
+ if (BuiltinID == RISCV::BI__builtin_riscv_ntl_store)
+ ICEArguments |= (1 << 2);
+
+ for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
+ // Handle aggregate argument, namely RVV tuple types in segment load/store
+ if (hasAggregateEvaluationKind(E->getArg(i)->getType())) {
+ LValue L = EmitAggExprToLValue(E->getArg(i));
+ llvm::Value *AggValue = Builder.CreateLoad(L.getAddress(*this));
+ Ops.push_back(AggValue);
+ continue;
+ }
+ Ops.push_back(EmitScalarOrConstFoldImmArg(ICEArguments, i, E));
+ }
Intrinsic::ID ID = Intrinsic::not_intrinsic;
unsigned NF = 1;
+ // The 0th bit simulates the `vta` of RVV
+ // The 1st bit simulates the `vma` of RVV
+ constexpr unsigned RVV_VTA = 0x1;
+ constexpr unsigned RVV_VMA = 0x2;
+ int PolicyAttrs = 0;
+ bool IsMasked = false;
// Required for overloaded intrinsics.
llvm::SmallVector<llvm::Type *, 2> IntrinsicTypes;
@@ -18302,33 +21142,24 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
default: llvm_unreachable("unexpected builtin ID");
case RISCV::BI__builtin_riscv_orc_b_32:
case RISCV::BI__builtin_riscv_orc_b_64:
- case RISCV::BI__builtin_riscv_clmul:
- case RISCV::BI__builtin_riscv_clmulh:
- case RISCV::BI__builtin_riscv_clmulr:
- case RISCV::BI__builtin_riscv_bcompress_32:
- case RISCV::BI__builtin_riscv_bcompress_64:
- case RISCV::BI__builtin_riscv_bdecompress_32:
- case RISCV::BI__builtin_riscv_bdecompress_64:
- case RISCV::BI__builtin_riscv_grev_32:
- case RISCV::BI__builtin_riscv_grev_64:
- case RISCV::BI__builtin_riscv_gorc_32:
- case RISCV::BI__builtin_riscv_gorc_64:
- case RISCV::BI__builtin_riscv_shfl_32:
- case RISCV::BI__builtin_riscv_shfl_64:
- case RISCV::BI__builtin_riscv_unshfl_32:
- case RISCV::BI__builtin_riscv_unshfl_64:
- case RISCV::BI__builtin_riscv_xperm_n:
- case RISCV::BI__builtin_riscv_xperm_b:
- case RISCV::BI__builtin_riscv_xperm_h:
- case RISCV::BI__builtin_riscv_xperm_w:
- case RISCV::BI__builtin_riscv_crc32_b:
- case RISCV::BI__builtin_riscv_crc32_h:
- case RISCV::BI__builtin_riscv_crc32_w:
- case RISCV::BI__builtin_riscv_crc32_d:
- case RISCV::BI__builtin_riscv_crc32c_b:
- case RISCV::BI__builtin_riscv_crc32c_h:
- case RISCV::BI__builtin_riscv_crc32c_w:
- case RISCV::BI__builtin_riscv_crc32c_d: {
+ case RISCV::BI__builtin_riscv_clz_32:
+ case RISCV::BI__builtin_riscv_clz_64:
+ case RISCV::BI__builtin_riscv_ctz_32:
+ case RISCV::BI__builtin_riscv_ctz_64:
+ case RISCV::BI__builtin_riscv_clmul_32:
+ case RISCV::BI__builtin_riscv_clmul_64:
+ case RISCV::BI__builtin_riscv_clmulh_32:
+ case RISCV::BI__builtin_riscv_clmulh_64:
+ case RISCV::BI__builtin_riscv_clmulr_32:
+ case RISCV::BI__builtin_riscv_clmulr_64:
+ case RISCV::BI__builtin_riscv_xperm4_32:
+ case RISCV::BI__builtin_riscv_xperm4_64:
+ case RISCV::BI__builtin_riscv_xperm8_32:
+ case RISCV::BI__builtin_riscv_xperm8_64:
+ case RISCV::BI__builtin_riscv_brev8_32:
+ case RISCV::BI__builtin_riscv_brev8_64:
+ case RISCV::BI__builtin_riscv_zip_32:
+ case RISCV::BI__builtin_riscv_unzip_32: {
switch (BuiltinID) {
default: llvm_unreachable("unexpected builtin ID");
// Zbb
@@ -18336,90 +21167,151 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
case RISCV::BI__builtin_riscv_orc_b_64:
ID = Intrinsic::riscv_orc_b;
break;
+ case RISCV::BI__builtin_riscv_clz_32:
+ case RISCV::BI__builtin_riscv_clz_64: {
+ Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType());
+ Value *Result = Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
+ if (Result->getType() != ResultType)
+ Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
+ "cast");
+ return Result;
+ }
+ case RISCV::BI__builtin_riscv_ctz_32:
+ case RISCV::BI__builtin_riscv_ctz_64: {
+ Function *F = CGM.getIntrinsic(Intrinsic::cttz, Ops[0]->getType());
+ Value *Result = Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
+ if (Result->getType() != ResultType)
+ Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
+ "cast");
+ return Result;
+ }
// Zbc
- case RISCV::BI__builtin_riscv_clmul:
+ case RISCV::BI__builtin_riscv_clmul_32:
+ case RISCV::BI__builtin_riscv_clmul_64:
ID = Intrinsic::riscv_clmul;
break;
- case RISCV::BI__builtin_riscv_clmulh:
+ case RISCV::BI__builtin_riscv_clmulh_32:
+ case RISCV::BI__builtin_riscv_clmulh_64:
ID = Intrinsic::riscv_clmulh;
break;
- case RISCV::BI__builtin_riscv_clmulr:
+ case RISCV::BI__builtin_riscv_clmulr_32:
+ case RISCV::BI__builtin_riscv_clmulr_64:
ID = Intrinsic::riscv_clmulr;
break;
- // Zbe
- case RISCV::BI__builtin_riscv_bcompress_32:
- case RISCV::BI__builtin_riscv_bcompress_64:
- ID = Intrinsic::riscv_bcompress;
- break;
- case RISCV::BI__builtin_riscv_bdecompress_32:
- case RISCV::BI__builtin_riscv_bdecompress_64:
- ID = Intrinsic::riscv_bdecompress;
- break;
-
- // Zbp
- case RISCV::BI__builtin_riscv_grev_32:
- case RISCV::BI__builtin_riscv_grev_64:
- ID = Intrinsic::riscv_grev;
+ // Zbkx
+ case RISCV::BI__builtin_riscv_xperm8_32:
+ case RISCV::BI__builtin_riscv_xperm8_64:
+ ID = Intrinsic::riscv_xperm8;
break;
- case RISCV::BI__builtin_riscv_gorc_32:
- case RISCV::BI__builtin_riscv_gorc_64:
- ID = Intrinsic::riscv_gorc;
- break;
- case RISCV::BI__builtin_riscv_shfl_32:
- case RISCV::BI__builtin_riscv_shfl_64:
- ID = Intrinsic::riscv_shfl;
- break;
- case RISCV::BI__builtin_riscv_unshfl_32:
- case RISCV::BI__builtin_riscv_unshfl_64:
- ID = Intrinsic::riscv_unshfl;
- break;
- case RISCV::BI__builtin_riscv_xperm_n:
- ID = Intrinsic::riscv_xperm_n;
- break;
- case RISCV::BI__builtin_riscv_xperm_b:
- ID = Intrinsic::riscv_xperm_b;
- break;
- case RISCV::BI__builtin_riscv_xperm_h:
- ID = Intrinsic::riscv_xperm_h;
- break;
- case RISCV::BI__builtin_riscv_xperm_w:
- ID = Intrinsic::riscv_xperm_w;
+ case RISCV::BI__builtin_riscv_xperm4_32:
+ case RISCV::BI__builtin_riscv_xperm4_64:
+ ID = Intrinsic::riscv_xperm4;
break;
- // Zbr
- case RISCV::BI__builtin_riscv_crc32_b:
- ID = Intrinsic::riscv_crc32_b;
- break;
- case RISCV::BI__builtin_riscv_crc32_h:
- ID = Intrinsic::riscv_crc32_h;
- break;
- case RISCV::BI__builtin_riscv_crc32_w:
- ID = Intrinsic::riscv_crc32_w;
- break;
- case RISCV::BI__builtin_riscv_crc32_d:
- ID = Intrinsic::riscv_crc32_d;
- break;
- case RISCV::BI__builtin_riscv_crc32c_b:
- ID = Intrinsic::riscv_crc32c_b;
+ // Zbkb
+ case RISCV::BI__builtin_riscv_brev8_32:
+ case RISCV::BI__builtin_riscv_brev8_64:
+ ID = Intrinsic::riscv_brev8;
break;
- case RISCV::BI__builtin_riscv_crc32c_h:
- ID = Intrinsic::riscv_crc32c_h;
+ case RISCV::BI__builtin_riscv_zip_32:
+ ID = Intrinsic::riscv_zip;
break;
- case RISCV::BI__builtin_riscv_crc32c_w:
- ID = Intrinsic::riscv_crc32c_w;
- break;
- case RISCV::BI__builtin_riscv_crc32c_d:
- ID = Intrinsic::riscv_crc32c_d;
+ case RISCV::BI__builtin_riscv_unzip_32:
+ ID = Intrinsic::riscv_unzip;
break;
}
IntrinsicTypes = {ResultType};
break;
}
+
+ // Zk builtins
+
+ // Zknh
+ case RISCV::BI__builtin_riscv_sha256sig0:
+ ID = Intrinsic::riscv_sha256sig0;
+ break;
+ case RISCV::BI__builtin_riscv_sha256sig1:
+ ID = Intrinsic::riscv_sha256sig1;
+ break;
+ case RISCV::BI__builtin_riscv_sha256sum0:
+ ID = Intrinsic::riscv_sha256sum0;
+ break;
+ case RISCV::BI__builtin_riscv_sha256sum1:
+ ID = Intrinsic::riscv_sha256sum1;
+ break;
+
+ // Zksed
+ case RISCV::BI__builtin_riscv_sm4ks:
+ ID = Intrinsic::riscv_sm4ks;
+ break;
+ case RISCV::BI__builtin_riscv_sm4ed:
+ ID = Intrinsic::riscv_sm4ed;
+ break;
+
+ // Zksh
+ case RISCV::BI__builtin_riscv_sm3p0:
+ ID = Intrinsic::riscv_sm3p0;
+ break;
+ case RISCV::BI__builtin_riscv_sm3p1:
+ ID = Intrinsic::riscv_sm3p1;
+ break;
+
+ // Zihintntl
+ case RISCV::BI__builtin_riscv_ntl_load: {
+ llvm::Type *ResTy = ConvertType(E->getType());
+ unsigned DomainVal = 5; // Default __RISCV_NTLH_ALL
+ if (Ops.size() == 2)
+ DomainVal = cast<ConstantInt>(Ops[1])->getZExtValue();
+
+ llvm::MDNode *RISCVDomainNode = llvm::MDNode::get(
+ getLLVMContext(),
+ llvm::ConstantAsMetadata::get(Builder.getInt32(DomainVal)));
+ llvm::MDNode *NontemporalNode = llvm::MDNode::get(
+ getLLVMContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
+
+ int Width;
+ if(ResTy->isScalableTy()) {
+ const ScalableVectorType *SVTy = cast<ScalableVectorType>(ResTy);
+ llvm::Type *ScalarTy = ResTy->getScalarType();
+ Width = ScalarTy->getPrimitiveSizeInBits() *
+ SVTy->getElementCount().getKnownMinValue();
+ } else
+ Width = ResTy->getPrimitiveSizeInBits();
+ LoadInst *Load = Builder.CreateLoad(
+ Address(Ops[0], ResTy, CharUnits::fromQuantity(Width / 8)));
+
+ Load->setMetadata(llvm::LLVMContext::MD_nontemporal, NontemporalNode);
+ Load->setMetadata(CGM.getModule().getMDKindID("riscv-nontemporal-domain"),
+ RISCVDomainNode);
+
+ return Load;
+ }
+ case RISCV::BI__builtin_riscv_ntl_store: {
+ unsigned DomainVal = 5; // Default __RISCV_NTLH_ALL
+ if (Ops.size() == 3)
+ DomainVal = cast<ConstantInt>(Ops[2])->getZExtValue();
+
+ llvm::MDNode *RISCVDomainNode = llvm::MDNode::get(
+ getLLVMContext(),
+ llvm::ConstantAsMetadata::get(Builder.getInt32(DomainVal)));
+ llvm::MDNode *NontemporalNode = llvm::MDNode::get(
+ getLLVMContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
+
+ StoreInst *Store = Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
+ Store->setMetadata(llvm::LLVMContext::MD_nontemporal, NontemporalNode);
+ Store->setMetadata(CGM.getModule().getMDKindID("riscv-nontemporal-domain"),
+ RISCVDomainNode);
+
+ return Store;
+ }
+
// Vector builtins are handled from here.
#include "clang/Basic/riscv_vector_builtin_cg.inc"
+ // SiFive Vector builtins are handled from here.
+#include "clang/Basic/riscv_sifive_vector_builtin_cg.inc"
}
assert(ID != Intrinsic::not_intrinsic);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCUDANV.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGCUDANV.cpp
index 88030fee501b..5b43272bfa62 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCUDANV.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCUDANV.cpp
@@ -19,11 +19,13 @@
#include "clang/Basic/Cuda.h"
#include "clang/CodeGen/CodeGenABITypes.h"
#include "clang/CodeGen/ConstantInitBuilder.h"
+#include "llvm/Frontend/Offloading/Utility.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/ReplaceConstant.h"
#include "llvm/Support/Format.h"
+#include "llvm/Support/VirtualFileSystem.h"
using namespace clang;
using namespace CodeGen;
@@ -37,7 +39,7 @@ class CGNVCUDARuntime : public CGCUDARuntime {
private:
llvm::IntegerType *IntTy, *SizeTy;
llvm::Type *VoidTy;
- llvm::PointerType *CharPtrTy, *VoidPtrTy, *VoidPtrPtrTy;
+ llvm::PointerType *PtrTy;
/// Convenience reference to LLVM Context
llvm::LLVMContext &Context;
@@ -49,10 +51,10 @@ private:
const Decl *D;
};
llvm::SmallVector<KernelInfo, 16> EmittedKernels;
- // Map a device stub function to a symbol for identifying kernel in host code.
+ // Map a kernel mangled name to a symbol for identifying kernel in host code
// For CUDA, the symbol for identifying the kernel is the same as the device
// stub function. For HIP, they are different.
- llvm::DenseMap<llvm::Function *, llvm::GlobalValue *> KernelHandles;
+ llvm::DenseMap<StringRef, llvm::GlobalValue *> KernelHandles;
// Map a kernel handle to the kernel stub.
llvm::DenseMap<llvm::GlobalValue *, llvm::Function *> KernelStubs;
struct VarInfo {
@@ -69,6 +71,8 @@ private:
bool RelocatableDeviceCode;
/// Mangle context for device.
std::unique_ptr<MangleContext> DeviceMC;
+ /// Some zeros used for GEPs.
+ llvm::Constant *Zeros[2];
llvm::FunctionCallee getSetupArgumentFn() const;
llvm::FunctionCallee getLaunchFn() const;
@@ -86,14 +90,25 @@ private:
/// the start of the string. The result of this function can be used anywhere
/// where the C code specifies const char*.
llvm::Constant *makeConstantString(const std::string &Str,
- const std::string &Name = "",
- const std::string &SectionName = "",
- unsigned Alignment = 0) {
- llvm::Constant *Zeros[] = {llvm::ConstantInt::get(SizeTy, 0),
- llvm::ConstantInt::get(SizeTy, 0)};
+ const std::string &Name = "") {
auto ConstStr = CGM.GetAddrOfConstantCString(Str, Name.c_str());
- llvm::GlobalVariable *GV =
- cast<llvm::GlobalVariable>(ConstStr.getPointer());
+ return llvm::ConstantExpr::getGetElementPtr(ConstStr.getElementType(),
+ ConstStr.getPointer(), Zeros);
+ }
+
+ /// Helper function which generates an initialized constant array from Str,
+ /// and optionally sets section name and alignment. AddNull specifies whether
+ /// the array should nave NUL termination.
+ llvm::Constant *makeConstantArray(StringRef Str,
+ StringRef Name = "",
+ StringRef SectionName = "",
+ unsigned Alignment = 0,
+ bool AddNull = false) {
+ llvm::Constant *Value =
+ llvm::ConstantDataArray::getString(Context, Str, AddNull);
+ auto *GV = new llvm::GlobalVariable(
+ TheModule, Value->getType(), /*isConstant=*/true,
+ llvm::GlobalValue::PrivateLinkage, Value, Name);
if (!SectionName.empty()) {
GV->setSection(SectionName);
// Mark the address as used which make sure that this section isn't
@@ -102,9 +117,7 @@ private:
}
if (Alignment)
GV->setAlignment(llvm::Align(Alignment));
-
- return llvm::ConstantExpr::getGetElementPtr(ConstStr.getElementType(),
- ConstStr.getPointer(), Zeros);
+ return llvm::ConstantExpr::getGetElementPtr(GV->getValueType(), GV, Zeros);
}
/// Helper function that generates an empty dummy function returning void.
@@ -157,6 +170,8 @@ private:
llvm::Function *makeModuleDtorFunction();
/// Transform managed variables for device compilation.
void transformManagedVars();
+ /// Create offloading entries to register globals in RDC mode.
+ void createOffloadingEntries();
public:
CGNVCUDARuntime(CodeGenModule &CGM);
@@ -177,7 +192,7 @@ public:
llvm::Function *finalizeModule() override;
};
-}
+} // end anonymous namespace
std::string CGNVCUDARuntime::addPrefixToName(StringRef FuncName) const {
if (CGM.getLangOpts().HIP)
@@ -212,21 +227,17 @@ CGNVCUDARuntime::CGNVCUDARuntime(CodeGenModule &CGM)
TheModule(CGM.getModule()),
RelocatableDeviceCode(CGM.getLangOpts().GPURelocatableDeviceCode),
DeviceMC(InitDeviceMC(CGM)) {
- CodeGen::CodeGenTypes &Types = CGM.getTypes();
- ASTContext &Ctx = CGM.getContext();
-
IntTy = CGM.IntTy;
SizeTy = CGM.SizeTy;
VoidTy = CGM.VoidTy;
-
- CharPtrTy = llvm::PointerType::getUnqual(Types.ConvertType(Ctx.CharTy));
- VoidPtrTy = cast<llvm::PointerType>(Types.ConvertType(Ctx.VoidPtrTy));
- VoidPtrPtrTy = VoidPtrTy->getPointerTo();
+ Zeros[0] = llvm::ConstantInt::get(SizeTy, 0);
+ Zeros[1] = Zeros[0];
+ PtrTy = CGM.UnqualPtrTy;
}
llvm::FunctionCallee CGNVCUDARuntime::getSetupArgumentFn() const {
// cudaError_t cudaSetupArgument(void *, size_t, size_t)
- llvm::Type *Params[] = {VoidPtrTy, SizeTy, SizeTy};
+ llvm::Type *Params[] = {PtrTy, SizeTy, SizeTy};
return CGM.CreateRuntimeFunction(
llvm::FunctionType::get(IntTy, Params, false),
addPrefixToName("SetupArgument"));
@@ -236,27 +247,24 @@ llvm::FunctionCallee CGNVCUDARuntime::getLaunchFn() const {
if (CGM.getLangOpts().HIP) {
// hipError_t hipLaunchByPtr(char *);
return CGM.CreateRuntimeFunction(
- llvm::FunctionType::get(IntTy, CharPtrTy, false), "hipLaunchByPtr");
- } else {
- // cudaError_t cudaLaunch(char *);
- return CGM.CreateRuntimeFunction(
- llvm::FunctionType::get(IntTy, CharPtrTy, false), "cudaLaunch");
+ llvm::FunctionType::get(IntTy, PtrTy, false), "hipLaunchByPtr");
}
+ // cudaError_t cudaLaunch(char *);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(IntTy, PtrTy, false),
+ "cudaLaunch");
}
llvm::FunctionType *CGNVCUDARuntime::getRegisterGlobalsFnTy() const {
- return llvm::FunctionType::get(VoidTy, VoidPtrPtrTy, false);
+ return llvm::FunctionType::get(VoidTy, PtrTy, false);
}
llvm::FunctionType *CGNVCUDARuntime::getCallbackFnTy() const {
- return llvm::FunctionType::get(VoidTy, VoidPtrTy, false);
+ return llvm::FunctionType::get(VoidTy, PtrTy, false);
}
llvm::FunctionType *CGNVCUDARuntime::getRegisterLinkedBinaryFnTy() const {
- auto CallbackFnTy = getCallbackFnTy();
- auto RegisterGlobalsFnTy = getRegisterGlobalsFnTy();
- llvm::Type *Params[] = {RegisterGlobalsFnTy->getPointerTo(), VoidPtrTy,
- VoidPtrTy, CallbackFnTy->getPointerTo()};
+ llvm::Type *Params[] = {llvm::PointerType::getUnqual(Context), PtrTy, PtrTy,
+ llvm::PointerType::getUnqual(Context)};
return llvm::FunctionType::get(VoidTy, Params, false);
}
@@ -282,13 +290,12 @@ std::string CGNVCUDARuntime::getDeviceSideName(const NamedDecl *ND) {
DeviceSideName = std::string(ND->getIdentifier()->getName());
// Make unique name for device side static file-scope variable for HIP.
- if (CGM.getContext().shouldExternalizeStaticVar(ND) &&
- CGM.getLangOpts().GPURelocatableDeviceCode &&
- !CGM.getLangOpts().CUID.empty()) {
+ if (CGM.getContext().shouldExternalize(ND) &&
+ CGM.getLangOpts().GPURelocatableDeviceCode) {
SmallString<256> Buffer;
llvm::raw_svector_ostream Out(Buffer);
Out << DeviceSideName;
- CGM.printPostfixForExternalizedStaticVar(Out);
+ CGM.printPostfixForExternalizedDecl(Out, ND);
DeviceSideName = std::string(Out.str());
}
return DeviceSideName;
@@ -297,7 +304,8 @@ std::string CGNVCUDARuntime::getDeviceSideName(const NamedDecl *ND) {
void CGNVCUDARuntime::emitDeviceStub(CodeGenFunction &CGF,
FunctionArgList &Args) {
EmittedKernels.push_back({CGF.CurFn, CGF.CurFuncDecl});
- if (auto *GV = dyn_cast<llvm::GlobalVariable>(KernelHandles[CGF.CurFn])) {
+ if (auto *GV =
+ dyn_cast<llvm::GlobalVariable>(KernelHandles[CGF.CurFn->getName()])) {
GV->setLinkage(CGF.CurFn->getLinkage());
GV->setInitializer(CGF.CurFn);
}
@@ -319,29 +327,40 @@ void CGNVCUDARuntime::emitDeviceStubBodyNew(CodeGenFunction &CGF,
// args, allocate a single pointer so we still have a valid pointer to the
// argument array that we can pass to runtime, even if it will be unused.
Address KernelArgs = CGF.CreateTempAlloca(
- VoidPtrTy, CharUnits::fromQuantity(16), "kernel_args",
+ PtrTy, CharUnits::fromQuantity(16), "kernel_args",
llvm::ConstantInt::get(SizeTy, std::max<size_t>(1, Args.size())));
// Store pointers to the arguments in a locally allocated launch_args.
for (unsigned i = 0; i < Args.size(); ++i) {
llvm::Value* VarPtr = CGF.GetAddrOfLocalVar(Args[i]).getPointer();
- llvm::Value *VoidVarPtr = CGF.Builder.CreatePointerCast(VarPtr, VoidPtrTy);
+ llvm::Value *VoidVarPtr = CGF.Builder.CreatePointerCast(VarPtr, PtrTy);
CGF.Builder.CreateDefaultAlignedStore(
VoidVarPtr,
- CGF.Builder.CreateConstGEP1_32(VoidPtrTy, KernelArgs.getPointer(), i));
+ CGF.Builder.CreateConstGEP1_32(PtrTy, KernelArgs.getPointer(), i));
}
llvm::BasicBlock *EndBlock = CGF.createBasicBlock("setup.end");
// Lookup cudaLaunchKernel/hipLaunchKernel function.
+ // HIP kernel launching API name depends on -fgpu-default-stream option. For
+ // the default value 'legacy', it is hipLaunchKernel. For 'per-thread',
+ // it is hipLaunchKernel_spt.
// cudaError_t cudaLaunchKernel(const void *func, dim3 gridDim, dim3 blockDim,
// void **args, size_t sharedMem,
// cudaStream_t stream);
- // hipError_t hipLaunchKernel(const void *func, dim3 gridDim, dim3 blockDim,
- // void **args, size_t sharedMem,
- // hipStream_t stream);
+ // hipError_t hipLaunchKernel[_spt](const void *func, dim3 gridDim,
+ // dim3 blockDim, void **args,
+ // size_t sharedMem, hipStream_t stream);
TranslationUnitDecl *TUDecl = CGM.getContext().getTranslationUnitDecl();
DeclContext *DC = TranslationUnitDecl::castToDeclContext(TUDecl);
- auto LaunchKernelName = addPrefixToName("LaunchKernel");
+ std::string KernelLaunchAPI = "LaunchKernel";
+ if (CGF.getLangOpts().GPUDefaultStream ==
+ LangOptions::GPUDefaultStreamKind::PerThread) {
+ if (CGF.getLangOpts().HIP)
+ KernelLaunchAPI = KernelLaunchAPI + "_spt";
+ else if (CGF.getLangOpts().CUDA)
+ KernelLaunchAPI = KernelLaunchAPI + "_ptsz";
+ }
+ auto LaunchKernelName = addPrefixToName(KernelLaunchAPI);
IdentifierInfo &cudaLaunchKernelII =
CGM.getContext().Idents.get(LaunchKernelName);
FunctionDecl *cudaLaunchKernelFD = nullptr;
@@ -364,8 +383,7 @@ void CGNVCUDARuntime::emitDeviceStubBodyNew(CodeGenFunction &CGF,
CGF.CreateMemTemp(Dim3Ty, CharUnits::fromQuantity(8), "block_dim");
Address ShmemSize =
CGF.CreateTempAlloca(SizeTy, CGM.getSizeAlign(), "shmem_size");
- Address Stream =
- CGF.CreateTempAlloca(VoidPtrTy, CGM.getPointerAlign(), "stream");
+ Address Stream = CGF.CreateTempAlloca(PtrTy, CGM.getPointerAlign(), "stream");
llvm::FunctionCallee cudaPopConfigFn = CGM.CreateRuntimeFunction(
llvm::FunctionType::get(IntTy,
{/*gridDim=*/GridDim.getType(),
@@ -381,7 +399,7 @@ void CGNVCUDARuntime::emitDeviceStubBodyNew(CodeGenFunction &CGF,
// Emit the call to cudaLaunch
llvm::Value *Kernel =
- CGF.Builder.CreatePointerCast(KernelHandles[CGF.CurFn], VoidPtrTy);
+ CGF.Builder.CreatePointerCast(KernelHandles[CGF.CurFn->getName()], PtrTy);
CallArgList LaunchKernelArgs;
LaunchKernelArgs.add(RValue::get(Kernel),
cudaLaunchKernelFD->getParamDecl(0)->getType());
@@ -397,7 +415,7 @@ void CGNVCUDARuntime::emitDeviceStubBodyNew(CodeGenFunction &CGF,
QualType QT = cudaLaunchKernelFD->getType();
QualType CQT = QT.getCanonicalType();
llvm::Type *Ty = CGM.getTypes().ConvertType(CQT);
- llvm::FunctionType *FTy = dyn_cast<llvm::FunctionType>(Ty);
+ llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
const CGFunctionInfo &FI =
CGM.getTypes().arrangeFunctionDeclaration(cudaLaunchKernelFD);
@@ -421,7 +439,7 @@ void CGNVCUDARuntime::emitDeviceStubBodyLegacy(CodeGenFunction &CGF,
Offset = Offset.alignTo(TInfo.Align);
llvm::Value *Args[] = {
CGF.Builder.CreatePointerCast(CGF.GetAddrOfLocalVar(A).getPointer(),
- VoidPtrTy),
+ PtrTy),
llvm::ConstantInt::get(SizeTy, TInfo.Width.getQuantity()),
llvm::ConstantInt::get(SizeTy, Offset.getQuantity()),
};
@@ -437,7 +455,7 @@ void CGNVCUDARuntime::emitDeviceStubBodyLegacy(CodeGenFunction &CGF,
// Emit the call to cudaLaunch
llvm::FunctionCallee cudaLaunchFn = getLaunchFn();
llvm::Value *Arg =
- CGF.Builder.CreatePointerCast(KernelHandles[CGF.CurFn], CharPtrTy);
+ CGF.Builder.CreatePointerCast(KernelHandles[CGF.CurFn->getName()], PtrTy);
CGF.EmitRuntimeCallOrInvoke(cudaLaunchFn, Arg);
CGF.EmitBranch(EndBlock);
@@ -473,7 +491,7 @@ static void replaceManagedVar(llvm::GlobalVariable *Var,
// variable with instructions.
for (auto &&Op : WorkItem) {
auto *CE = cast<llvm::ConstantExpr>(Op);
- auto *NewInst = llvm::createReplacementInstr(CE, I);
+ auto *NewInst = CE->getAsInstruction(I);
NewInst->replaceUsesOfWith(OldV, NewV);
OldV = CE;
NewV = NewInst;
@@ -515,8 +533,8 @@ llvm::Function *CGNVCUDARuntime::makeRegisterGlobalsFn() {
// void __cudaRegisterFunction(void **, const char *, char *, const char *,
// int, uint3*, uint3*, dim3*, dim3*, int*)
llvm::Type *RegisterFuncParams[] = {
- VoidPtrPtrTy, CharPtrTy, CharPtrTy, CharPtrTy, IntTy,
- VoidPtrTy, VoidPtrTy, VoidPtrTy, VoidPtrTy, IntTy->getPointerTo()};
+ PtrTy, PtrTy, PtrTy, PtrTy, IntTy,
+ PtrTy, PtrTy, PtrTy, PtrTy, llvm::PointerType::getUnqual(Context)};
llvm::FunctionCallee RegisterFunc = CGM.CreateRuntimeFunction(
llvm::FunctionType::get(IntTy, RegisterFuncParams, false),
addUnderscoredPrefixToName("RegisterFunction"));
@@ -528,10 +546,10 @@ llvm::Function *CGNVCUDARuntime::makeRegisterGlobalsFn() {
for (auto &&I : EmittedKernels) {
llvm::Constant *KernelName =
makeConstantString(getDeviceSideName(cast<NamedDecl>(I.D)));
- llvm::Constant *NullPtr = llvm::ConstantPointerNull::get(VoidPtrTy);
+ llvm::Constant *NullPtr = llvm::ConstantPointerNull::get(PtrTy);
llvm::Value *Args[] = {
&GpuBinaryHandlePtr,
- Builder.CreateBitCast(KernelHandles[I.Kernel], VoidPtrTy),
+ KernelHandles[I.Kernel->getName()],
KernelName,
KernelName,
llvm::ConstantInt::get(IntTy, -1),
@@ -539,7 +557,7 @@ llvm::Function *CGNVCUDARuntime::makeRegisterGlobalsFn() {
NullPtr,
NullPtr,
NullPtr,
- llvm::ConstantPointerNull::get(IntTy->getPointerTo())};
+ llvm::ConstantPointerNull::get(llvm::PointerType::getUnqual(Context))};
Builder.CreateCall(RegisterFunc, Args);
}
@@ -551,16 +569,15 @@ llvm::Function *CGNVCUDARuntime::makeRegisterGlobalsFn() {
// void __cudaRegisterVar(void **, char *, char *, const char *,
// int, int, int, int)
- llvm::Type *RegisterVarParams[] = {VoidPtrPtrTy, CharPtrTy, CharPtrTy,
- CharPtrTy, IntTy, VarSizeTy,
- IntTy, IntTy};
+ llvm::Type *RegisterVarParams[] = {PtrTy, PtrTy, PtrTy, PtrTy,
+ IntTy, VarSizeTy, IntTy, IntTy};
llvm::FunctionCallee RegisterVar = CGM.CreateRuntimeFunction(
llvm::FunctionType::get(VoidTy, RegisterVarParams, false),
addUnderscoredPrefixToName("RegisterVar"));
// void __hipRegisterManagedVar(void **, char *, char *, const char *,
// size_t, unsigned)
- llvm::Type *RegisterManagedVarParams[] = {VoidPtrPtrTy, CharPtrTy, CharPtrTy,
- CharPtrTy, VarSizeTy, IntTy};
+ llvm::Type *RegisterManagedVarParams[] = {PtrTy, PtrTy, PtrTy,
+ PtrTy, VarSizeTy, IntTy};
llvm::FunctionCallee RegisterManagedVar = CGM.CreateRuntimeFunction(
llvm::FunctionType::get(VoidTy, RegisterManagedVarParams, false),
addUnderscoredPrefixToName("RegisterManagedVar"));
@@ -568,16 +585,13 @@ llvm::Function *CGNVCUDARuntime::makeRegisterGlobalsFn() {
// const void **, const char *, int, int);
llvm::FunctionCallee RegisterSurf = CGM.CreateRuntimeFunction(
llvm::FunctionType::get(
- VoidTy, {VoidPtrPtrTy, VoidPtrTy, CharPtrTy, CharPtrTy, IntTy, IntTy},
- false),
+ VoidTy, {PtrTy, PtrTy, PtrTy, PtrTy, IntTy, IntTy}, false),
addUnderscoredPrefixToName("RegisterSurface"));
// void __cudaRegisterTexture(void **, const struct textureReference *,
// const void **, const char *, int, int, int)
llvm::FunctionCallee RegisterTex = CGM.CreateRuntimeFunction(
llvm::FunctionType::get(
- VoidTy,
- {VoidPtrPtrTy, VoidPtrTy, CharPtrTy, CharPtrTy, IntTy, IntTy, IntTy},
- false),
+ VoidTy, {PtrTy, PtrTy, PtrTy, PtrTy, IntTy, IntTy, IntTy}, false),
addUnderscoredPrefixToName("RegisterTexture"));
for (auto &&Info : DeviceVars) {
llvm::GlobalVariable *Var = Info.Var;
@@ -590,7 +604,7 @@ llvm::Function *CGNVCUDARuntime::makeRegisterGlobalsFn() {
uint64_t VarSize =
CGM.getDataLayout().getTypeAllocSize(Var->getValueType());
if (Info.Flags.isManaged()) {
- auto ManagedVar = new llvm::GlobalVariable(
+ auto *ManagedVar = new llvm::GlobalVariable(
CGM.getModule(), Var->getType(),
/*isConstant=*/false, Var->getLinkage(),
/*Init=*/Var->isDeclaration()
@@ -606,8 +620,8 @@ llvm::Function *CGNVCUDARuntime::makeRegisterGlobalsFn() {
replaceManagedVar(Var, ManagedVar);
llvm::Value *Args[] = {
&GpuBinaryHandlePtr,
- Builder.CreateBitCast(ManagedVar, VoidPtrTy),
- Builder.CreateBitCast(Var, VoidPtrTy),
+ ManagedVar,
+ Var,
VarName,
llvm::ConstantInt::get(VarSizeTy, VarSize),
llvm::ConstantInt::get(IntTy, Var->getAlignment())};
@@ -616,7 +630,7 @@ llvm::Function *CGNVCUDARuntime::makeRegisterGlobalsFn() {
} else {
llvm::Value *Args[] = {
&GpuBinaryHandlePtr,
- Builder.CreateBitCast(Var, VoidPtrTy),
+ Var,
VarName,
VarName,
llvm::ConstantInt::get(IntTy, Info.Flags.isExtern()),
@@ -630,15 +644,15 @@ llvm::Function *CGNVCUDARuntime::makeRegisterGlobalsFn() {
case DeviceVarFlags::Surface:
Builder.CreateCall(
RegisterSurf,
- {&GpuBinaryHandlePtr, Builder.CreateBitCast(Var, VoidPtrTy), VarName,
- VarName, llvm::ConstantInt::get(IntTy, Info.Flags.getSurfTexType()),
+ {&GpuBinaryHandlePtr, Var, VarName, VarName,
+ llvm::ConstantInt::get(IntTy, Info.Flags.getSurfTexType()),
llvm::ConstantInt::get(IntTy, Info.Flags.isExtern())});
break;
case DeviceVarFlags::Texture:
Builder.CreateCall(
RegisterTex,
- {&GpuBinaryHandlePtr, Builder.CreateBitCast(Var, VoidPtrTy), VarName,
- VarName, llvm::ConstantInt::get(IntTy, Info.Flags.getSurfTexType()),
+ {&GpuBinaryHandlePtr, Var, VarName, VarName,
+ llvm::ConstantInt::get(IntTy, Info.Flags.getSurfTexType()),
llvm::ConstantInt::get(IntTy, Info.Flags.isNormalized()),
llvm::ConstantInt::get(IntTy, Info.Flags.isExtern())});
break;
@@ -653,7 +667,7 @@ llvm::Function *CGNVCUDARuntime::makeRegisterGlobalsFn() {
///
/// For CUDA:
/// \code
-/// void __cuda_module_ctor(void*) {
+/// void __cuda_module_ctor() {
/// Handle = __cudaRegisterFatBinary(GpuBinaryBlob);
/// __cuda_register_globals(Handle);
/// }
@@ -661,7 +675,7 @@ llvm::Function *CGNVCUDARuntime::makeRegisterGlobalsFn() {
///
/// For HIP:
/// \code
-/// void __hip_module_ctor(void*) {
+/// void __hip_module_ctor() {
/// if (__hip_gpubin_handle == 0) {
/// __hip_gpubin_handle = __hipRegisterFatBinary(GpuBinaryBlob);
/// __hip_register_globals(__hip_gpubin_handle);
@@ -688,11 +702,11 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
// void ** __{cuda|hip}RegisterFatBinary(void *);
llvm::FunctionCallee RegisterFatbinFunc = CGM.CreateRuntimeFunction(
- llvm::FunctionType::get(VoidPtrPtrTy, VoidPtrTy, false),
+ llvm::FunctionType::get(PtrTy, PtrTy, false),
addUnderscoredPrefixToName("RegisterFatBinary"));
// struct { int magic, int version, void * gpu_binary, void * dont_care };
llvm::StructType *FatbinWrapperTy =
- llvm::StructType::get(IntTy, IntTy, VoidPtrTy, VoidPtrTy);
+ llvm::StructType::get(IntTy, IntTy, PtrTy, PtrTy);
// Register GPU binary with the CUDA runtime, store returned handle in a
// global variable and save a reference in GpuBinaryHandle to be cleaned up
@@ -700,8 +714,9 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
// handle so CUDA runtime can figure out what to call on the GPU side.
std::unique_ptr<llvm::MemoryBuffer> CudaGpuBinary = nullptr;
if (!CudaGpuBinaryFileName.empty()) {
- llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> CudaGpuBinaryOrErr =
- llvm::MemoryBuffer::getFileOrSTDIN(CudaGpuBinaryFileName);
+ auto VFS = CGM.getFileSystem();
+ auto CudaGpuBinaryOrErr =
+ VFS->getBufferForFile(CudaGpuBinaryFileName, -1, false);
if (std::error_code EC = CudaGpuBinaryOrErr.getError()) {
CGM.getDiags().Report(diag::err_cannot_open_file)
<< CudaGpuBinaryFileName << EC.message();
@@ -711,7 +726,7 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
}
llvm::Function *ModuleCtorFunc = llvm::Function::Create(
- llvm::FunctionType::get(VoidTy, VoidPtrTy, false),
+ llvm::FunctionType::get(VoidTy, false),
llvm::GlobalValue::InternalLinkage,
addUnderscoredPrefixToName("_module_ctor"), &TheModule);
llvm::BasicBlock *CtorEntryBB =
@@ -737,9 +752,8 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
// If fatbin is available from early finalization, create a string
// literal containing the fat binary loaded from the given file.
const unsigned HIPCodeObjectAlign = 4096;
- FatBinStr =
- makeConstantString(std::string(CudaGpuBinary->getBuffer()), "",
- FatbinConstantName, HIPCodeObjectAlign);
+ FatBinStr = makeConstantArray(std::string(CudaGpuBinary->getBuffer()), "",
+ FatbinConstantName, HIPCodeObjectAlign);
} else {
// If fatbin is not available, create an external symbol
// __hip_fatbin in section .hip_fatbin. The external symbol is supposed
@@ -773,8 +787,8 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
// For CUDA, create a string literal containing the fat binary loaded from
// the given file.
- FatBinStr = makeConstantString(std::string(CudaGpuBinary->getBuffer()), "",
- FatbinConstantName, 8);
+ FatBinStr = makeConstantArray(std::string(CudaGpuBinary->getBuffer()), "",
+ FatbinConstantName, 8);
FatMagic = CudaFatMagic;
}
@@ -788,7 +802,7 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
// Data.
Values.add(FatBinStr);
// Unused in fatbin v1.
- Values.add(llvm::ConstantPointerNull::get(VoidPtrTy));
+ Values.add(llvm::ConstantPointerNull::get(PtrTy));
llvm::GlobalVariable *FatbinWrapper = Values.finishAndCreateGlobal(
addUnderscoredPrefixToName("_fatbin_wrapper"), CGM.getPointerAlign(),
/*constant*/ true);
@@ -811,19 +825,21 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
// The name, size, and initialization pattern of this variable is part
// of HIP ABI.
GpuBinaryHandle = new llvm::GlobalVariable(
- TheModule, VoidPtrPtrTy, /*isConstant=*/false,
- Linkage,
- /*Initializer=*/llvm::ConstantPointerNull::get(VoidPtrPtrTy),
+ TheModule, PtrTy, /*isConstant=*/false, Linkage,
+ /*Initializer=*/llvm::ConstantPointerNull::get(PtrTy),
"__hip_gpubin_handle");
+ if (Linkage == llvm::GlobalValue::LinkOnceAnyLinkage)
+ GpuBinaryHandle->setComdat(
+ CGM.getModule().getOrInsertComdat(GpuBinaryHandle->getName()));
GpuBinaryHandle->setAlignment(CGM.getPointerAlign().getAsAlign());
// Prevent the weak symbol in different shared libraries being merged.
if (Linkage != llvm::GlobalValue::InternalLinkage)
GpuBinaryHandle->setVisibility(llvm::GlobalValue::HiddenVisibility);
Address GpuBinaryAddr(
- GpuBinaryHandle,
+ GpuBinaryHandle, PtrTy,
CharUnits::fromQuantity(GpuBinaryHandle->getAlignment()));
{
- auto HandleValue = CtorBuilder.CreateLoad(GpuBinaryAddr);
+ auto *HandleValue = CtorBuilder.CreateLoad(GpuBinaryAddr);
llvm::Constant *Zero =
llvm::Constant::getNullValue(HandleValue->getType());
llvm::Value *EQZero = CtorBuilder.CreateICmpEQ(HandleValue, Zero);
@@ -832,9 +848,8 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
{
CtorBuilder.SetInsertPoint(IfBlock);
// GpuBinaryHandle = __hipRegisterFatBinary(&FatbinWrapper);
- llvm::CallInst *RegisterFatbinCall = CtorBuilder.CreateCall(
- RegisterFatbinFunc,
- CtorBuilder.CreateBitCast(FatbinWrapper, VoidPtrTy));
+ llvm::CallInst *RegisterFatbinCall =
+ CtorBuilder.CreateCall(RegisterFatbinFunc, FatbinWrapper);
CtorBuilder.CreateStore(RegisterFatbinCall, GpuBinaryAddr);
CtorBuilder.CreateBr(ExitBlock);
}
@@ -842,7 +857,7 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
CtorBuilder.SetInsertPoint(ExitBlock);
// Call __hip_register_globals(GpuBinaryHandle);
if (RegisterGlobalsFunc) {
- auto HandleValue = CtorBuilder.CreateLoad(GpuBinaryAddr);
+ auto *HandleValue = CtorBuilder.CreateLoad(GpuBinaryAddr);
CtorBuilder.CreateCall(RegisterGlobalsFunc, HandleValue);
}
}
@@ -850,12 +865,11 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
// Register binary with CUDA runtime. This is substantially different in
// default mode vs. separate compilation!
// GpuBinaryHandle = __cudaRegisterFatBinary(&FatbinWrapper);
- llvm::CallInst *RegisterFatbinCall = CtorBuilder.CreateCall(
- RegisterFatbinFunc,
- CtorBuilder.CreateBitCast(FatbinWrapper, VoidPtrTy));
+ llvm::CallInst *RegisterFatbinCall =
+ CtorBuilder.CreateCall(RegisterFatbinFunc, FatbinWrapper);
GpuBinaryHandle = new llvm::GlobalVariable(
- TheModule, VoidPtrPtrTy, false, llvm::GlobalValue::InternalLinkage,
- llvm::ConstantPointerNull::get(VoidPtrPtrTy), "__cuda_gpubin_handle");
+ TheModule, PtrTy, false, llvm::GlobalValue::InternalLinkage,
+ llvm::ConstantPointerNull::get(PtrTy), "__cuda_gpubin_handle");
GpuBinaryHandle->setAlignment(CGM.getPointerAlign().getAsAlign());
CtorBuilder.CreateAlignedStore(RegisterFatbinCall, GpuBinaryHandle,
CGM.getPointerAlign());
@@ -869,7 +883,7 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
CudaFeature::CUDA_USES_FATBIN_REGISTER_END)) {
// void __cudaRegisterFatBinaryEnd(void **);
llvm::FunctionCallee RegisterFatbinEndFunc = CGM.CreateRuntimeFunction(
- llvm::FunctionType::get(VoidTy, VoidPtrPtrTy, false),
+ llvm::FunctionType::get(VoidTy, PtrTy, false),
"__cudaRegisterFatBinaryEnd");
CtorBuilder.CreateCall(RegisterFatbinEndFunc, RegisterFatbinCall);
}
@@ -878,8 +892,8 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
SmallString<64> ModuleID;
llvm::raw_svector_ostream OS(ModuleID);
OS << ModuleIDPrefix << llvm::format("%" PRIx64, FatbinWrapper->getGUID());
- llvm::Constant *ModuleIDConstant = makeConstantString(
- std::string(ModuleID.str()), "", ModuleIDSectionName, 32);
+ llvm::Constant *ModuleIDConstant = makeConstantArray(
+ std::string(ModuleID), "", ModuleIDSectionName, 32, /*AddNull=*/true);
// Create an alias for the FatbinWrapper that nvcc will look for.
llvm::GlobalAlias::create(llvm::GlobalValue::ExternalLinkage,
@@ -893,9 +907,7 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
getRegisterLinkedBinaryFnTy(), RegisterLinkedBinaryName);
assert(RegisterGlobalsFunc && "Expecting at least dummy function!");
- llvm::Value *Args[] = {RegisterGlobalsFunc,
- CtorBuilder.CreateBitCast(FatbinWrapper, VoidPtrTy),
- ModuleIDConstant,
+ llvm::Value *Args[] = {RegisterGlobalsFunc, FatbinWrapper, ModuleIDConstant,
makeDummyFunction(getCallbackFnTy())};
CtorBuilder.CreateCall(RegisterLinkedBinaryFunc, Args);
}
@@ -922,14 +934,14 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
///
/// For CUDA:
/// \code
-/// void __cuda_module_dtor(void*) {
+/// void __cuda_module_dtor() {
/// __cudaUnregisterFatBinary(Handle);
/// }
/// \endcode
///
/// For HIP:
/// \code
-/// void __hip_module_dtor(void*) {
+/// void __hip_module_dtor() {
/// if (__hip_gpubin_handle) {
/// __hipUnregisterFatBinary(__hip_gpubin_handle);
/// __hip_gpubin_handle = 0;
@@ -943,11 +955,11 @@ llvm::Function *CGNVCUDARuntime::makeModuleDtorFunction() {
// void __cudaUnregisterFatBinary(void ** handle);
llvm::FunctionCallee UnregisterFatbinFunc = CGM.CreateRuntimeFunction(
- llvm::FunctionType::get(VoidTy, VoidPtrPtrTy, false),
+ llvm::FunctionType::get(VoidTy, PtrTy, false),
addUnderscoredPrefixToName("UnregisterFatBinary"));
llvm::Function *ModuleDtorFunc = llvm::Function::Create(
- llvm::FunctionType::get(VoidTy, VoidPtrTy, false),
+ llvm::FunctionType::get(VoidTy, false),
llvm::GlobalValue::InternalLinkage,
addUnderscoredPrefixToName("_module_dtor"), &TheModule);
@@ -956,9 +968,10 @@ llvm::Function *CGNVCUDARuntime::makeModuleDtorFunction() {
CGBuilderTy DtorBuilder(CGM, Context);
DtorBuilder.SetInsertPoint(DtorEntryBB);
- Address GpuBinaryAddr(GpuBinaryHandle, CharUnits::fromQuantity(
- GpuBinaryHandle->getAlignment()));
- auto HandleValue = DtorBuilder.CreateLoad(GpuBinaryAddr);
+ Address GpuBinaryAddr(
+ GpuBinaryHandle, GpuBinaryHandle->getValueType(),
+ CharUnits::fromQuantity(GpuBinaryHandle->getAlignment()));
+ auto *HandleValue = DtorBuilder.CreateLoad(GpuBinaryAddr);
// There is only one HIP fat binary per linked module, however there are
// multiple destructor functions. Make sure the fat binary is unregistered
// only once.
@@ -1071,7 +1084,7 @@ void CGNVCUDARuntime::transformManagedVars() {
llvm::GlobalVariable *Var = Info.Var;
if (Info.Flags.getKind() == DeviceVarFlags::Variable &&
Info.Flags.isManaged()) {
- auto ManagedVar = new llvm::GlobalVariable(
+ auto *ManagedVar = new llvm::GlobalVariable(
CGM.getModule(), Var->getType(),
/*isConstant=*/false, Var->getLinkage(),
/*Init=*/Var->isDeclaration()
@@ -1097,6 +1110,53 @@ void CGNVCUDARuntime::transformManagedVars() {
}
}
+// Creates offloading entries for all the kernels and globals that must be
+// registered. The linker will provide a pointer to this section so we can
+// register the symbols with the linked device image.
+void CGNVCUDARuntime::createOffloadingEntries() {
+ StringRef Section = CGM.getLangOpts().HIP ? "hip_offloading_entries"
+ : "cuda_offloading_entries";
+ llvm::Module &M = CGM.getModule();
+ for (KernelInfo &I : EmittedKernels)
+ llvm::offloading::emitOffloadingEntry(
+ M, KernelHandles[I.Kernel->getName()],
+ getDeviceSideName(cast<NamedDecl>(I.D)), /*Flags=*/0, /*Data=*/0,
+ llvm::offloading::OffloadGlobalEntry, Section);
+
+ for (VarInfo &I : DeviceVars) {
+ uint64_t VarSize =
+ CGM.getDataLayout().getTypeAllocSize(I.Var->getValueType());
+ int32_t Flags =
+ (I.Flags.isExtern()
+ ? static_cast<int32_t>(llvm::offloading::OffloadGlobalExtern)
+ : 0) |
+ (I.Flags.isConstant()
+ ? static_cast<int32_t>(llvm::offloading::OffloadGlobalConstant)
+ : 0) |
+ (I.Flags.isNormalized()
+ ? static_cast<int32_t>(llvm::offloading::OffloadGlobalNormalized)
+ : 0);
+ if (I.Flags.getKind() == DeviceVarFlags::Variable) {
+ llvm::offloading::emitOffloadingEntry(
+ M, I.Var, getDeviceSideName(I.D), VarSize,
+ (I.Flags.isManaged() ? llvm::offloading::OffloadGlobalManagedEntry
+ : llvm::offloading::OffloadGlobalEntry) |
+ Flags,
+ /*Data=*/0, Section);
+ } else if (I.Flags.getKind() == DeviceVarFlags::Surface) {
+ llvm::offloading::emitOffloadingEntry(
+ M, I.Var, getDeviceSideName(I.D), VarSize,
+ llvm::offloading::OffloadGlobalSurfaceEntry | Flags,
+ I.Flags.getSurfTexType(), Section);
+ } else if (I.Flags.getKind() == DeviceVarFlags::Texture) {
+ llvm::offloading::emitOffloadingEntry(
+ M, I.Var, getDeviceSideName(I.D), VarSize,
+ llvm::offloading::OffloadGlobalTextureEntry | Flags,
+ I.Flags.getSurfTexType(), Section);
+ }
+ }
+}
+
// Returns module constructor to be added.
llvm::Function *CGNVCUDARuntime::finalizeModule() {
if (CGM.getLangOpts().CUDAIsDevice) {
@@ -1125,17 +1185,37 @@ llvm::Function *CGNVCUDARuntime::finalizeModule() {
}
return nullptr;
}
- return makeModuleCtorFunction();
+ if (CGM.getLangOpts().OffloadingNewDriver && RelocatableDeviceCode)
+ createOffloadingEntries();
+ else
+ return makeModuleCtorFunction();
+
+ return nullptr;
}
llvm::GlobalValue *CGNVCUDARuntime::getKernelHandle(llvm::Function *F,
GlobalDecl GD) {
- auto Loc = KernelHandles.find(F);
- if (Loc != KernelHandles.end())
- return Loc->second;
+ auto Loc = KernelHandles.find(F->getName());
+ if (Loc != KernelHandles.end()) {
+ auto OldHandle = Loc->second;
+ if (KernelStubs[OldHandle] == F)
+ return OldHandle;
+
+ // We've found the function name, but F itself has changed, so we need to
+ // update the references.
+ if (CGM.getLangOpts().HIP) {
+ // For HIP compilation the handle itself does not change, so we only need
+ // to update the Stub value.
+ KernelStubs[OldHandle] = F;
+ return OldHandle;
+ }
+ // For non-HIP compilation, erase the old Stub and fall-through to creating
+ // new entries.
+ KernelStubs.erase(OldHandle);
+ }
if (!CGM.getLangOpts().HIP) {
- KernelHandles[F] = F;
+ KernelHandles[F->getName()] = F;
KernelStubs[F] = F;
return F;
}
@@ -1148,7 +1228,11 @@ llvm::GlobalValue *CGNVCUDARuntime::getKernelHandle(llvm::Function *F,
Var->setAlignment(CGM.getPointerAlign().getAsAlign());
Var->setDSOLocal(F->isDSOLocal());
Var->setVisibility(F->getVisibility());
- KernelHandles[F] = Var;
+ auto *FD = cast<FunctionDecl>(GD.getDecl());
+ auto *FT = FD->getPrimaryTemplate();
+ if (!FT || FT->isThisDeclarationADefinition())
+ CGM.maybeSetTrivialComdat(*FD, *Var);
+ KernelHandles[F->getName()] = Var;
KernelStubs[Var] = F;
return Var;
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCUDARuntime.h b/contrib/llvm-project/clang/lib/CodeGen/CGCUDARuntime.h
index 1c119dc77fd4..c7af8f1cf0fe 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCUDARuntime.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCUDARuntime.h
@@ -17,6 +17,7 @@
#include "clang/AST/GlobalDecl.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/Frontend/Offloading/Utility.h"
#include "llvm/IR/GlobalValue.h"
namespace llvm {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCXX.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGCXX.cpp
index 86f548191d65..e95a735f92f7 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCXX.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCXX.cpp
@@ -40,6 +40,11 @@ bool CodeGenModule::TryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) {
if (getCodeGenOpts().OptimizationLevel == 0)
return true;
+ // Disable this optimization for ARM64EC. FIXME: This probably should work,
+ // but getting the symbol table correct is complicated.
+ if (getTarget().getTriple().isWindowsArm64EC())
+ return true;
+
// If sanitizing memory to check for use-after-dtor, do not emit as
// an alias, unless this class owns no members.
if (getCodeGenOpts().SanitizeMemoryUseAfterDtor &&
@@ -131,17 +136,10 @@ bool CodeGenModule::TryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) {
if (Replacements.count(MangledName))
return false;
- // Derive the type for the alias.
llvm::Type *AliasValueType = getTypes().GetFunctionType(AliasDecl);
- llvm::PointerType *AliasType = AliasValueType->getPointerTo();
- // Find the referent. Some aliases might require a bitcast, in
- // which case the caller is responsible for ensuring the soundness
- // of these semantics.
- auto *Ref = cast<llvm::GlobalValue>(GetAddrOfGlobal(TargetDecl));
- llvm::Constant *Aliasee = Ref;
- if (Ref->getType() != AliasType)
- Aliasee = llvm::ConstantExpr::getBitCast(Ref, AliasType);
+ // Find the referent.
+ auto *Aliasee = cast<llvm::GlobalValue>(GetAddrOfGlobal(TargetDecl));
// Instead of creating as alias to a linkonce_odr, replace all of the uses
// of the aliasee.
@@ -170,7 +168,7 @@ bool CodeGenModule::TryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) {
// If we don't have a definition for the destructor yet or the definition is
// avaialable_externally, don't emit an alias. We can't emit aliases to
// declarations; that's just not how aliases work.
- if (Ref->isDeclarationForLinker())
+ if (Aliasee->isDeclarationForLinker())
return true;
// Don't create an alias to a linker weak symbol. This avoids producing
@@ -189,7 +187,8 @@ bool CodeGenModule::TryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) {
// Switch any previous uses to the alias.
if (Entry) {
- assert(Entry->getType() == AliasType &&
+ assert(Entry->getValueType() == AliasValueType &&
+ Entry->getAddressSpace() == Alias->getAddressSpace() &&
"declaration exists with different type");
Alias->takeName(Entry);
Entry->replaceAllUsesWith(Alias);
@@ -252,8 +251,7 @@ static CGCallee BuildAppleKextVirtualCall(CodeGenFunction &CGF,
"No kext in Microsoft ABI");
CodeGenModule &CGM = CGF.CGM;
llvm::Value *VTable = CGM.getCXXABI().getAddrOfVTable(RD, CharUnits());
- Ty = Ty->getPointerTo();
- VTable = CGF.Builder.CreateBitCast(VTable, Ty->getPointerTo());
+ Ty = llvm::PointerType::getUnqual(CGM.getLLVMContext());
assert(VTable && "BuildVirtualCall = kext vtbl pointer is null");
uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
const VTableLayout &VTLayout = CGM.getItaniumVTableContext().getVTableLayout(RD);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.cpp
index 9714730e3c4b..a8bf57a277e9 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.cpp
@@ -45,13 +45,9 @@ CGCallee CGCXXABI::EmitLoadOfMemberFunctionPointer(
ErrorUnsupportedABI(CGF, "calls through member pointers");
ThisPtrForCall = This.getPointer();
- const FunctionProtoType *FPT =
- MPT->getPointeeType()->getAs<FunctionProtoType>();
- const auto *RD =
- cast<CXXRecordDecl>(MPT->getClass()->castAs<RecordType>()->getDecl());
- llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(
- CGM.getTypes().arrangeCXXMethodType(RD, FPT, /*FD=*/nullptr));
- llvm::Constant *FnPtr = llvm::Constant::getNullValue(FTy->getPointerTo());
+ const auto *FPT = MPT->getPointeeType()->castAs<FunctionProtoType>();
+ llvm::Constant *FnPtr = llvm::Constant::getNullValue(
+ llvm::PointerType::getUnqual(CGM.getLLVMContext()));
return CGCallee::forDirect(FnPtr, FPT);
}
@@ -60,8 +56,8 @@ CGCXXABI::EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
Address Base, llvm::Value *MemPtr,
const MemberPointerType *MPT) {
ErrorUnsupportedABI(CGF, "loads of member pointers");
- llvm::Type *Ty = CGF.ConvertType(MPT->getPointeeType())
- ->getPointerTo(Base.getAddressSpace());
+ llvm::Type *Ty =
+ llvm::PointerType::get(CGF.getLLVMContext(), Base.getAddressSpace());
return llvm::Constant::getNullValue(Ty);
}
@@ -124,10 +120,10 @@ void CGCXXABI::buildThisParam(CodeGenFunction &CGF, FunctionArgList &params) {
// FIXME: I'm not entirely sure I like using a fake decl just for code
// generation. Maybe we can come up with a better way?
- auto *ThisDecl = ImplicitParamDecl::Create(
- CGM.getContext(), nullptr, MD->getLocation(),
- &CGM.getContext().Idents.get("this"), MD->getThisType(),
- ImplicitParamDecl::CXXThis);
+ auto *ThisDecl =
+ ImplicitParamDecl::Create(CGM.getContext(), nullptr, MD->getLocation(),
+ &CGM.getContext().Idents.get("this"),
+ MD->getThisType(), ImplicitParamKind::CXXThis);
params.push_back(ThisDecl);
CGF.CXXABIThisDecl = ThisDecl;
@@ -154,6 +150,51 @@ void CGCXXABI::setCXXABIThisValue(CodeGenFunction &CGF, llvm::Value *ThisPtr) {
CGF.CXXABIThisValue = ThisPtr;
}
+bool CGCXXABI::mayNeedDestruction(const VarDecl *VD) const {
+ if (VD->needsDestruction(getContext()))
+ return true;
+
+ // If the variable has an incomplete class type (or array thereof), it
+ // might need destruction.
+ const Type *T = VD->getType()->getBaseElementTypeUnsafe();
+ if (T->getAs<RecordType>() && T->isIncompleteType())
+ return true;
+
+ return false;
+}
+
+bool CGCXXABI::isEmittedWithConstantInitializer(
+ const VarDecl *VD, bool InspectInitForWeakDef) const {
+ VD = VD->getMostRecentDecl();
+ if (VD->hasAttr<ConstInitAttr>())
+ return true;
+
+ // All later checks examine the initializer specified on the variable. If
+ // the variable is weak, such examination would not be correct.
+ if (!InspectInitForWeakDef && (VD->isWeak() || VD->hasAttr<SelectAnyAttr>()))
+ return false;
+
+ const VarDecl *InitDecl = VD->getInitializingDeclaration();
+ if (!InitDecl)
+ return false;
+
+ // If there's no initializer to run, this is constant initialization.
+ if (!InitDecl->hasInit())
+ return true;
+
+ // If we have the only definition, we don't need a thread wrapper if we
+ // will emit the value as a constant.
+ if (isUniqueGVALinkage(getContext().GetGVALinkageForVariable(VD)))
+ return !mayNeedDestruction(VD) && InitDecl->evaluateValue();
+
+ // Otherwise, we need a thread wrapper unless we know that every
+ // translation unit will emit the value as a constant. We rely on the
+ // variable being constant-initialized in every translation unit if it's
+ // constant-initialized in any translation unit, which isn't actually
+ // guaranteed by the standard but is necessary for sanity.
+ return InitDecl->hasConstantInitialization();
+}
+
void CGCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
RValue RV, QualType ResultType) {
assert(!CGF.hasAggregateEvaluationKind(ResultType) &&
@@ -206,7 +247,7 @@ void CGCXXABI::ReadArrayCookie(CodeGenFunction &CGF, Address ptr,
llvm::Value *&numElements,
llvm::Value *&allocPtr, CharUnits &cookieSize) {
// Derive a char* in the same address space as the pointer.
- ptr = CGF.Builder.CreateElementBitCast(ptr, CGF.Int8Ty);
+ ptr = ptr.withElementType(CGF.Int8Ty);
// If we don't need an array cookie, bail out early.
if (!requiresArrayCookie(expr, eltTy)) {
@@ -271,8 +312,7 @@ void CGCXXABI::setCXXDestructorDLLStorage(llvm::GlobalValue *GV,
llvm::GlobalValue::LinkageTypes CGCXXABI::getCXXDestructorLinkage(
GVALinkage Linkage, const CXXDestructorDecl *Dtor, CXXDtorType DT) const {
// Delegate back to CGM by default.
- return CGM.getLLVMLinkageForDeclarator(Dtor, Linkage,
- /*IsConstantVariable=*/false);
+ return CGM.getLLVMLinkageForDeclarator(Dtor, Linkage);
}
bool CGCXXABI::NeedsVTTParameter(GlobalDecl GD) {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.h b/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.h
index ea839db7528e..ad1ad08d0856 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.h
@@ -31,7 +31,6 @@ class CXXConstructorDecl;
class CXXDestructorDecl;
class CXXMethodDecl;
class CXXRecordDecl;
-class FieldDecl;
class MangleContext;
namespace CodeGen {
@@ -42,6 +41,8 @@ struct CatchTypeInfo;
/// Implements C++ ABI-specific code generation functions.
class CGCXXABI {
+ friend class CodeGenModule;
+
protected:
CodeGenModule &CGM;
std::unique_ptr<MangleContext> MangleCtx;
@@ -57,7 +58,10 @@ protected:
return CGF.CXXABIThisValue;
}
Address getThisAddress(CodeGenFunction &CGF) {
- return Address(CGF.CXXABIThisValue, CGF.CXXABIThisAlignment);
+ return Address(
+ CGF.CXXABIThisValue,
+ CGF.ConvertTypeForMem(CGF.CXXABIThisDecl->getType()->getPointeeType()),
+ CGF.CXXABIThisAlignment);
}
/// Issue a diagnostic about unsupported features in the ABI.
@@ -80,6 +84,18 @@ protected:
ASTContext &getContext() const { return CGM.getContext(); }
+ bool mayNeedDestruction(const VarDecl *VD) const;
+
+ /// Determine whether we will definitely emit this variable with a constant
+ /// initializer, either because the language semantics demand it or because
+ /// we know that the initializer is a constant.
+ // For weak definitions, any initializer available in the current translation
+ // is not necessarily reflective of the initializer used; such initializers
+ // are ignored unless if InspectInitForWeakDef is true.
+ bool
+ isEmittedWithConstantInitializer(const VarDecl *VD,
+ bool InspectInitForWeakDef = false) const;
+
virtual bool requiresArrayCookie(const CXXDeleteExpr *E, QualType eltType);
virtual bool requiresArrayCookie(const CXXNewExpr *E);
@@ -89,6 +105,10 @@ protected:
/// final class will have been taken care of by the caller.
virtual bool isThisCompleteObject(GlobalDecl GD) const = 0;
+ virtual bool constructorsAndDestructorsReturnThis() const {
+ return CGM.getCodeGenOpts().CtorDtorReturnThis;
+ }
+
public:
virtual ~CGCXXABI();
@@ -104,7 +124,13 @@ public:
///
/// There currently is no way to indicate if a destructor returns 'this'
/// when called virtually, and code generation does not support the case.
- virtual bool HasThisReturn(GlobalDecl GD) const { return false; }
+ virtual bool HasThisReturn(GlobalDecl GD) const {
+ if (isa<CXXConstructorDecl>(GD.getDecl()) ||
+ (isa<CXXDestructorDecl>(GD.getDecl()) &&
+ GD.getDtorType() != Dtor_Deleting))
+ return constructorsAndDestructorsReturnThis();
+ return false;
+ }
virtual bool hasMostDerivedReturn(GlobalDecl GD) const { return false; }
@@ -261,16 +287,26 @@ public:
virtual bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
QualType SrcRecordTy) = 0;
+ virtual bool shouldEmitExactDynamicCast(QualType DestRecordTy) = 0;
- virtual llvm::Value *
- EmitDynamicCastCall(CodeGenFunction &CGF, Address Value,
- QualType SrcRecordTy, QualType DestTy,
- QualType DestRecordTy, llvm::BasicBlock *CastEnd) = 0;
+ virtual llvm::Value *emitDynamicCastCall(CodeGenFunction &CGF, Address Value,
+ QualType SrcRecordTy,
+ QualType DestTy,
+ QualType DestRecordTy,
+ llvm::BasicBlock *CastEnd) = 0;
- virtual llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF,
+ virtual llvm::Value *emitDynamicCastToVoid(CodeGenFunction &CGF,
Address Value,
- QualType SrcRecordTy,
- QualType DestTy) = 0;
+ QualType SrcRecordTy) = 0;
+
+ /// Emit a dynamic_cast from SrcRecordTy to DestRecordTy. The cast fails if
+ /// the dynamic type of Value is not exactly DestRecordTy.
+ virtual llvm::Value *emitExactDynamicCast(CodeGenFunction &CGF, Address Value,
+ QualType SrcRecordTy,
+ QualType DestTy,
+ QualType DestRecordTy,
+ llvm::BasicBlock *CastSuccess,
+ llvm::BasicBlock *CastFail) = 0;
virtual bool EmitBadCastCall(CodeGenFunction &CGF) = 0;
@@ -353,9 +389,8 @@ public:
/// zero if no specific type is applicable, e.g. if the ABI expects the "this"
/// parameter to point to some artificial offset in a complete object due to
/// vbases being reordered.
- virtual const CXXRecordDecl *
- getThisArgumentTypeForMethod(const CXXMethodDecl *MD) {
- return MD->getParent();
+ virtual const CXXRecordDecl *getThisArgumentTypeForMethod(GlobalDecl GD) {
+ return cast<CXXMethodDecl>(GD.getDecl())->getParent();
}
/// Perform ABI-specific "this" argument adjustment required prior to
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp
index 47a4ed35be85..28c211aa631e 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp
@@ -13,6 +13,7 @@
#include "CGCall.h"
#include "ABIInfo.h"
+#include "ABIInfoImpl.h"
#include "CGBlocks.h"
#include "CGCXXABI.h"
#include "CGCleanup.h"
@@ -25,20 +26,22 @@
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/Basic/CodeGenOptions.h"
-#include "clang/Basic/TargetBuiltins.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/CodeGen/CGFunctionInfo.h"
#include "clang/CodeGen/SwiftCallingConv.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/Assumptions.h"
+#include "llvm/IR/AttributeMask.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/Type.h"
#include "llvm/Transforms/Utils/Local.h"
+#include <optional>
using namespace clang;
using namespace CodeGen;
@@ -61,12 +64,15 @@ unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) {
// TODO: Add support for __vectorcall to LLVM.
case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
case CC_AArch64VectorCall: return llvm::CallingConv::AArch64_VectorCall;
+ case CC_AArch64SVEPCS: return llvm::CallingConv::AArch64_SVE_VectorCall;
+ case CC_AMDGPUKernelCall: return llvm::CallingConv::AMDGPU_KERNEL;
case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
case CC_OpenCLKernel: return CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv();
case CC_PreserveMost: return llvm::CallingConv::PreserveMost;
case CC_PreserveAll: return llvm::CallingConv::PreserveAll;
case CC_Swift: return llvm::CallingConv::Swift;
case CC_SwiftAsync: return llvm::CallingConv::SwiftTail;
+ case CC_M68kRTD: return llvm::CallingConv::M68k_RTD;
}
}
@@ -108,8 +114,7 @@ CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
// When translating an unprototyped function type, always use a
// variadic type.
return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
- /*instanceMethod=*/false,
- /*chainCall=*/false, None,
+ FnInfoOpts::None, std::nullopt,
FTNP->getExtInfo(), {}, RequiredArgs(0));
}
@@ -185,10 +190,10 @@ arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
appendParameterTypes(CGT, prefix, paramInfos, FTP);
CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
- return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
- /*chainCall=*/false, prefix,
- FTP->getExtInfo(), paramInfos,
- Required);
+ FnInfoOpts opts =
+ instanceMethod ? FnInfoOpts::IsInstanceMethod : FnInfoOpts::None;
+ return CGT.arrangeLLVMFunctionInfo(resultType, opts, prefix,
+ FTP->getExtInfo(), paramInfos, Required);
}
/// Arrange the argument and result information for a value of the
@@ -227,6 +232,12 @@ static CallingConv getCallingConventionForDecl(const ObjCMethodDecl *D,
if (D->hasAttr<AArch64VectorPcsAttr>())
return CC_AArch64VectorCall;
+ if (D->hasAttr<AArch64SVEPcsAttr>())
+ return CC_AArch64SVEPCS;
+
+ if (D->hasAttr<AMDGPUKernelCallAttr>())
+ return CC_AMDGPUKernelCall;
+
if (D->hasAttr<IntelOclBiccAttr>())
return CC_IntelOclBicc;
@@ -242,6 +253,9 @@ static CallingConv getCallingConventionForDecl(const ObjCMethodDecl *D,
if (D->hasAttr<PreserveAllAttr>())
return CC_PreserveAll;
+ if (D->hasAttr<M68kRTDAttr>())
+ return CC_M68kRTD;
+
return CC_C;
}
@@ -261,7 +275,7 @@ CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
argTypes.push_back(DeriveThisType(RD, MD));
return ::arrangeLLVMFunctionInfo(
- *this, true, argTypes,
+ *this, /*instanceMethod=*/true, argTypes,
FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
}
@@ -288,7 +302,7 @@ CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
setCUDAKernelCallingConvention(FT, CGM, MD);
auto prototype = FT.getAs<FunctionProtoType>();
- if (MD->isInstance()) {
+ if (MD->isImplicitObjectMemberFunction()) {
// The abstract case is perfectly fine.
const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD);
@@ -312,7 +326,9 @@ CodeGenTypes::arrangeCXXStructorDeclaration(GlobalDecl GD) {
SmallVector<CanQualType, 16> argTypes;
SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
- argTypes.push_back(DeriveThisType(MD->getParent(), MD));
+
+ const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(GD);
+ argTypes.push_back(DeriveThisType(ThisType, MD));
bool PassParams = true;
@@ -351,9 +367,8 @@ CodeGenTypes::arrangeCXXStructorDeclaration(GlobalDecl GD) {
: TheCXXABI.hasMostDerivedReturn(GD)
? CGM.getContext().VoidPtrTy
: Context.VoidTy;
- return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true,
- /*chainCall=*/false, argTypes, extInfo,
- paramInfos, required);
+ return arrangeLLVMFunctionInfo(resultType, FnInfoOpts::IsInstanceMethod,
+ argTypes, extInfo, paramInfos, required);
}
static SmallVector<CanQualType, 16>
@@ -427,9 +442,9 @@ CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,
addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs,
ArgTypes.size());
}
- return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,
- /*chainCall=*/false, ArgTypes, Info,
- ParamInfos, Required);
+
+ return arrangeLLVMFunctionInfo(ResultType, FnInfoOpts::IsInstanceMethod,
+ ArgTypes, Info, ParamInfos, Required);
}
/// Arrange the argument and result information for the declaration or
@@ -437,7 +452,7 @@ CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,
const CGFunctionInfo &
CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
- if (MD->isInstance())
+ if (MD->isImplicitObjectMemberFunction())
return arrangeCXXMethodDeclaration(MD);
CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
@@ -448,9 +463,9 @@ CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
// When declaring a function without a prototype, always use a
// non-variadic type.
if (CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>()) {
- return arrangeLLVMFunctionInfo(
- noProto->getReturnType(), /*instanceMethod=*/false,
- /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All);
+ return arrangeLLVMFunctionInfo(noProto->getReturnType(), FnInfoOpts::None,
+ std::nullopt, noProto->getExtInfo(), {},
+ RequiredArgs::All);
}
return arrangeFreeFunctionType(FTy.castAs<FunctionProtoType>());
@@ -475,9 +490,11 @@ const CGFunctionInfo &
CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
QualType receiverType) {
SmallVector<CanQualType, 16> argTys;
- SmallVector<FunctionProtoType::ExtParameterInfo, 4> extParamInfos(2);
+ SmallVector<FunctionProtoType::ExtParameterInfo, 4> extParamInfos(
+ MD->isDirectMethod() ? 1 : 2);
argTys.push_back(Context.getCanonicalParamType(receiverType));
- argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
+ if (!MD->isDirectMethod())
+ argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
// FIXME: Kill copy?
for (const auto *I : MD->parameters()) {
argTys.push_back(Context.getCanonicalParamType(I->getType()));
@@ -497,9 +514,9 @@ CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
RequiredArgs required =
(MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
- return arrangeLLVMFunctionInfo(
- GetReturnType(MD->getReturnType()), /*instanceMethod=*/false,
- /*chainCall=*/false, argTys, einfo, extParamInfos, required);
+ return arrangeLLVMFunctionInfo(GetReturnType(MD->getReturnType()),
+ FnInfoOpts::None, argTys, einfo, extParamInfos,
+ required);
}
const CGFunctionInfo &
@@ -508,9 +525,8 @@ CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType,
auto argTypes = getArgTypesForCall(Context, args);
FunctionType::ExtInfo einfo;
- return arrangeLLVMFunctionInfo(
- GetReturnType(returnType), /*instanceMethod=*/false,
- /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All);
+ return arrangeLLVMFunctionInfo(GetReturnType(returnType), FnInfoOpts::None,
+ argTypes, einfo, {}, RequiredArgs::All);
}
const CGFunctionInfo &
@@ -535,8 +551,7 @@ CodeGenTypes::arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD) {
assert(MD->isVirtual() && "only methods have thunks");
CanQual<FunctionProtoType> FTP = GetFormalType(MD);
CanQualType ArgTys[] = {DeriveThisType(MD->getParent(), MD)};
- return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
- /*chainCall=*/false, ArgTys,
+ return arrangeLLVMFunctionInfo(Context.VoidTy, FnInfoOpts::None, ArgTys,
FTP->getExtInfo(), {}, RequiredArgs(1));
}
@@ -555,9 +570,8 @@ CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD,
ArgTys.push_back(Context.IntTy);
CallingConv CC = Context.getDefaultCallingConvention(
/*IsVariadic=*/false, /*IsCXXMethod=*/true);
- return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true,
- /*chainCall=*/false, ArgTys,
- FunctionType::ExtInfo(CC), {},
+ return arrangeLLVMFunctionInfo(Context.VoidTy, FnInfoOpts::IsInstanceMethod,
+ ArgTys, FunctionType::ExtInfo(CC), {},
RequiredArgs::All);
}
@@ -601,10 +615,10 @@ arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
SmallVector<CanQualType, 16> argTypes;
for (const auto &arg : args)
argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
+ FnInfoOpts opts = chainCall ? FnInfoOpts::IsChainCall : FnInfoOpts::None;
return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()),
- /*instanceMethod=*/false, chainCall,
- argTypes, fnType->getExtInfo(), paramInfos,
- required);
+ opts, argTypes, fnType->getExtInfo(),
+ paramInfos, required);
}
/// Figure out the rules for calling a function with the given formal
@@ -635,8 +649,8 @@ CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto,
auto argTypes = getArgTypesForDeclaration(Context, params);
return arrangeLLVMFunctionInfo(GetReturnType(proto->getReturnType()),
- /*instanceMethod*/ false, /*chainCall*/ false,
- argTypes, proto->getExtInfo(), paramInfos,
+ FnInfoOpts::None, argTypes,
+ proto->getExtInfo(), paramInfos,
RequiredArgs::forPrototypePlus(proto, 1));
}
@@ -647,10 +661,9 @@ CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType,
SmallVector<CanQualType, 16> argTypes;
for (const auto &Arg : args)
argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
- return arrangeLLVMFunctionInfo(
- GetReturnType(resultType), /*instanceMethod=*/false,
- /*chainCall=*/false, argTypes, FunctionType::ExtInfo(),
- /*paramInfos=*/ {}, RequiredArgs::All);
+ return arrangeLLVMFunctionInfo(GetReturnType(resultType), FnInfoOpts::None,
+ argTypes, FunctionType::ExtInfo(),
+ /*paramInfos=*/{}, RequiredArgs::All);
}
const CGFunctionInfo &
@@ -658,17 +671,17 @@ CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType,
const FunctionArgList &args) {
auto argTypes = getArgTypesForDeclaration(Context, args);
- return arrangeLLVMFunctionInfo(
- GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false,
- argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
+ return arrangeLLVMFunctionInfo(GetReturnType(resultType), FnInfoOpts::None,
+ argTypes, FunctionType::ExtInfo(), {},
+ RequiredArgs::All);
}
const CGFunctionInfo &
CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType,
ArrayRef<CanQualType> argTypes) {
- return arrangeLLVMFunctionInfo(
- resultType, /*instanceMethod=*/false, /*chainCall=*/false,
- argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
+ return arrangeLLVMFunctionInfo(resultType, FnInfoOpts::None, argTypes,
+ FunctionType::ExtInfo(), {},
+ RequiredArgs::All);
}
/// Arrange a call to a C++ method, passing the given arguments.
@@ -691,15 +704,15 @@ CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
auto argTypes = getArgTypesForCall(Context, args);
FunctionType::ExtInfo info = proto->getExtInfo();
- return arrangeLLVMFunctionInfo(
- GetReturnType(proto->getReturnType()), /*instanceMethod=*/true,
- /*chainCall=*/false, argTypes, info, paramInfos, required);
+ return arrangeLLVMFunctionInfo(GetReturnType(proto->getReturnType()),
+ FnInfoOpts::IsInstanceMethod, argTypes, info,
+ paramInfos, required);
}
const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
- return arrangeLLVMFunctionInfo(
- getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false,
- None, FunctionType::ExtInfo(), {}, RequiredArgs::All);
+ return arrangeLLVMFunctionInfo(getContext().VoidTy, FnInfoOpts::None,
+ std::nullopt, FunctionType::ExtInfo(), {},
+ RequiredArgs::All);
}
const CGFunctionInfo &
@@ -719,12 +732,15 @@ CodeGenTypes::arrangeCall(const CGFunctionInfo &signature,
auto argTypes = getArgTypesForCall(Context, args);
assert(signature.getRequiredArgs().allowsOptionalArgs());
- return arrangeLLVMFunctionInfo(signature.getReturnType(),
- signature.isInstanceMethod(),
- signature.isChainCall(),
- argTypes,
- signature.getExtInfo(),
- paramInfos,
+ FnInfoOpts opts = FnInfoOpts::None;
+ if (signature.isInstanceMethod())
+ opts |= FnInfoOpts::IsInstanceMethod;
+ if (signature.isChainCall())
+ opts |= FnInfoOpts::IsChainCall;
+ if (signature.isDelegateCall())
+ opts |= FnInfoOpts::IsDelegateCall;
+ return arrangeLLVMFunctionInfo(signature.getReturnType(), opts, argTypes,
+ signature.getExtInfo(), paramInfos,
signature.getRequiredArgs());
}
@@ -737,21 +753,24 @@ void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI);
/// Arrange the argument and result information for an abstract value
/// of a given function type. This is the method which all of the
/// above functions ultimately defer to.
-const CGFunctionInfo &
-CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
- bool instanceMethod,
- bool chainCall,
- ArrayRef<CanQualType> argTypes,
- FunctionType::ExtInfo info,
- ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos,
- RequiredArgs required) {
+const CGFunctionInfo &CodeGenTypes::arrangeLLVMFunctionInfo(
+ CanQualType resultType, FnInfoOpts opts, ArrayRef<CanQualType> argTypes,
+ FunctionType::ExtInfo info,
+ ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos,
+ RequiredArgs required) {
assert(llvm::all_of(argTypes,
[](CanQualType T) { return T.isCanonicalAsParam(); }));
// Lookup or create unique function info.
llvm::FoldingSetNodeID ID;
- CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos,
- required, resultType, argTypes);
+ bool isInstanceMethod =
+ (opts & FnInfoOpts::IsInstanceMethod) == FnInfoOpts::IsInstanceMethod;
+ bool isChainCall =
+ (opts & FnInfoOpts::IsChainCall) == FnInfoOpts::IsChainCall;
+ bool isDelegateCall =
+ (opts & FnInfoOpts::IsDelegateCall) == FnInfoOpts::IsDelegateCall;
+ CGFunctionInfo::Profile(ID, isInstanceMethod, isChainCall, isDelegateCall,
+ info, paramInfos, required, resultType, argTypes);
void *insertPos = nullptr;
CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
@@ -761,8 +780,8 @@ CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
// Construct the function info. We co-allocate the ArgInfos.
- FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info,
- paramInfos, resultType, argTypes, required);
+ FI = CGFunctionInfo::create(CC, isInstanceMethod, isChainCall, isDelegateCall,
+ info, paramInfos, resultType, argTypes, required);
FunctionInfos.InsertNode(FI, insertPos);
bool inserted = FunctionsBeingProcessed.insert(FI).second;
@@ -797,9 +816,8 @@ CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
return *FI;
}
-CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
- bool instanceMethod,
- bool chainCall,
+CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC, bool instanceMethod,
+ bool chainCall, bool delegateCall,
const FunctionType::ExtInfo &info,
ArrayRef<ExtParameterInfo> paramInfos,
CanQualType resultType,
@@ -819,6 +837,7 @@ CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
FI->ASTCallingConvention = info.getCC();
FI->InstanceMethod = instanceMethod;
FI->ChainCall = chainCall;
+ FI->DelegateCall = delegateCall;
FI->CmseNSCall = info.getCmseNSCall();
FI->NoReturn = info.getNoReturn();
FI->ReturnsRetained = info.getProducesResult();
@@ -832,6 +851,7 @@ CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
FI->NumArgs = argTypes.size();
FI->HasExtParameterInfos = !paramInfos.empty();
FI->getArgsBuffer()[0].type = resultType;
+ FI->MaxVectorWidth = 0;
for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
FI->getArgsBuffer()[i + 1].type = argTypes[i];
for (unsigned i = 0, e = paramInfos.size(); i != e; ++i)
@@ -941,8 +961,7 @@ getTypeExpansion(QualType Ty, const ASTContext &Context) {
if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
assert(!CXXRD->isDynamicClass() &&
"cannot expand vtable pointers in dynamic classes");
- for (const CXXBaseSpecifier &BS : CXXRD->bases())
- Bases.push_back(&BS);
+ llvm::append_range(Bases, llvm::make_pointer_range(CXXRD->bases()));
}
for (const auto *FD : RD->fields()) {
@@ -1011,11 +1030,12 @@ static void forConstantArrayExpansion(CodeGenFunction &CGF,
CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy);
CharUnits EltAlign =
BaseAddr.getAlignment().alignmentOfArrayElement(EltSize);
+ llvm::Type *EltTy = CGF.ConvertTypeForMem(CAE->EltTy);
for (int i = 0, n = CAE->NumElts; i < n; i++) {
llvm::Value *EltAddr = CGF.Builder.CreateConstGEP2_32(
BaseAddr.getElementType(), BaseAddr.getPointer(), 0, i);
- Fn(Address(EltAddr, EltAlign));
+ Fn(Address(EltAddr, EltTy, EltAlign));
}
}
@@ -1056,10 +1076,19 @@ void CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
// Call EmitStoreOfScalar except when the lvalue is a bitfield to emit a
// primitive store.
assert(isa<NoExpansion>(Exp.get()));
- if (LV.isBitField())
- EmitStoreThroughLValue(RValue::get(&*AI++), LV);
- else
- EmitStoreOfScalar(&*AI++, LV);
+ llvm::Value *Arg = &*AI++;
+ if (LV.isBitField()) {
+ EmitStoreThroughLValue(RValue::get(Arg), LV);
+ } else {
+ // TODO: currently there are some places are inconsistent in what LLVM
+ // pointer type they use (see D118744). Once clang uses opaque pointers
+ // all LLVM pointer types will be the same and we can remove this check.
+ if (Arg->getType()->isPointerTy()) {
+ Address Addr = LV.getAddress(*this);
+ Arg = Builder.CreateBitCast(Arg, Addr.getElementType());
+ }
+ EmitStoreOfScalar(Arg, LV);
+ }
}
}
@@ -1125,7 +1154,7 @@ static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty,
CharUnits MinAlign,
const Twine &Name = "tmp") {
// Don't use an alignment that's worse than what LLVM would prefer.
- auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty);
+ auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlign(Ty);
CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign));
return CGF.CreateTempAlloca(Ty, Align, Name + ".coerce");
@@ -1238,7 +1267,7 @@ static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
Src = EnterStructPointerForCoercedAccess(Src, SrcSTy,
- DstSize.getFixedSize(), CGF);
+ DstSize.getFixedValue(), CGF);
SrcTy = Src.getElementType();
}
@@ -1254,29 +1283,42 @@ static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
// If load is legal, just bitcast the src pointer.
if (!SrcSize.isScalable() && !DstSize.isScalable() &&
- SrcSize.getFixedSize() >= DstSize.getFixedSize()) {
+ SrcSize.getFixedValue() >= DstSize.getFixedValue()) {
// Generally SrcSize is never greater than DstSize, since this means we are
// losing bits. However, this can happen in cases where the structure has
// additional padding, for example due to a user specified alignment.
//
// FIXME: Assert that we aren't truncating non-padding bits when have access
// to that information.
- Src = CGF.Builder.CreateBitCast(Src,
- Ty->getPointerTo(Src.getAddressSpace()));
+ Src = Src.withElementType(Ty);
return CGF.Builder.CreateLoad(Src);
}
// If coercing a fixed vector to a scalable vector for ABI compatibility, and
- // the types match, use the llvm.experimental.vector.insert intrinsic to
- // perform the conversion.
+ // the types match, use the llvm.vector.insert intrinsic to perform the
+ // conversion.
if (auto *ScalableDst = dyn_cast<llvm::ScalableVectorType>(Ty)) {
if (auto *FixedSrc = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
+ // If we are casting a fixed i8 vector to a scalable 16 x i1 predicate
+ // vector, use a vector insert and bitcast the result.
+ bool NeedsBitcast = false;
+ auto PredType =
+ llvm::ScalableVectorType::get(CGF.Builder.getInt1Ty(), 16);
+ llvm::Type *OrigType = Ty;
+ if (ScalableDst == PredType &&
+ FixedSrc->getElementType() == CGF.Builder.getInt8Ty()) {
+ ScalableDst = llvm::ScalableVectorType::get(CGF.Builder.getInt8Ty(), 2);
+ NeedsBitcast = true;
+ }
if (ScalableDst->getElementType() == FixedSrc->getElementType()) {
auto *Load = CGF.Builder.CreateLoad(Src);
auto *UndefVec = llvm::UndefValue::get(ScalableDst);
auto *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
- return CGF.Builder.CreateInsertVector(ScalableDst, UndefVec, Load, Zero,
- "castScalableSve");
+ llvm::Value *Result = CGF.Builder.CreateInsertVector(
+ ScalableDst, UndefVec, Load, Zero, "cast.scalable");
+ if (NeedsBitcast)
+ Result = CGF.Builder.CreateBitCast(Result, OrigType);
+ return Result;
}
}
}
@@ -1287,7 +1329,7 @@ static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
CGF.Builder.CreateMemCpy(
Tmp.getPointer(), Tmp.getAlignment().getAsAlign(), Src.getPointer(),
Src.getAlignment().getAsAlign(),
- llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize.getKnownMinSize()));
+ llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize.getKnownMinValue()));
return CGF.Builder.CreateLoad(Tmp);
}
@@ -1330,7 +1372,7 @@ static void CreateCoercedStore(llvm::Value *Src,
if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy,
- SrcSize.getFixedSize(), CGF);
+ SrcSize.getFixedValue(), CGF);
DstTy = Dst.getElementType();
}
@@ -1338,7 +1380,7 @@ static void CreateCoercedStore(llvm::Value *Src,
llvm::PointerType *DstPtrTy = llvm::dyn_cast<llvm::PointerType>(DstTy);
if (SrcPtrTy && DstPtrTy &&
SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace()) {
- Src = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy);
+ Src = CGF.Builder.CreateAddrSpaceCast(Src, DstTy);
CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
return;
}
@@ -1357,8 +1399,8 @@ static void CreateCoercedStore(llvm::Value *Src,
// If store is legal, just bitcast the src pointer.
if (isa<llvm::ScalableVectorType>(SrcTy) ||
isa<llvm::ScalableVectorType>(DstTy) ||
- SrcSize.getFixedSize() <= DstSize.getFixedSize()) {
- Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy);
+ SrcSize.getFixedValue() <= DstSize.getFixedValue()) {
+ Dst = Dst.withElementType(SrcTy);
CGF.EmitAggregateStore(Src, Dst, DstIsVolatile);
} else {
// Otherwise do coercion through memory. This is stupid, but
@@ -1375,17 +1417,17 @@ static void CreateCoercedStore(llvm::Value *Src,
CGF.Builder.CreateMemCpy(
Dst.getPointer(), Dst.getAlignment().getAsAlign(), Tmp.getPointer(),
Tmp.getAlignment().getAsAlign(),
- llvm::ConstantInt::get(CGF.IntPtrTy, DstSize.getFixedSize()));
+ llvm::ConstantInt::get(CGF.IntPtrTy, DstSize.getFixedValue()));
}
}
static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr,
const ABIArgInfo &info) {
if (unsigned offset = info.getDirectOffset()) {
- addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty);
+ addr = addr.withElementType(CGF.Int8Ty);
addr = CGF.Builder.CreateConstInBoundsByteGEP(addr,
CharUnits::fromQuantity(offset));
- addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType());
+ addr = addr.withElementType(info.getCoerceToType());
}
return addr;
}
@@ -1550,11 +1592,11 @@ bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
default:
return false;
case BuiltinType::Float:
- return getTarget().useObjCFPRetForRealType(TargetInfo::Float);
+ return getTarget().useObjCFPRetForRealType(FloatModeKind::Float);
case BuiltinType::Double:
- return getTarget().useObjCFPRetForRealType(TargetInfo::Double);
+ return getTarget().useObjCFPRetForRealType(FloatModeKind::Double);
case BuiltinType::LongDouble:
- return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble);
+ return getTarget().useObjCFPRetForRealType(FloatModeKind::LongDouble);
}
}
@@ -1600,9 +1642,8 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
if (retAI.getInAllocaSRet()) {
// sret things on win32 aren't void, they return the sret pointer.
QualType ret = FI.getReturnType();
- llvm::Type *ty = ConvertType(ret);
- unsigned addressSpace = Context.getTargetAddressSpace(ret);
- resultType = llvm::PointerType::get(ty, addressSpace);
+ unsigned addressSpace = CGM.getTypes().getTargetAddressSpace(ret);
+ resultType = llvm::PointerType::get(getLLVMContext(), addressSpace);
} else {
resultType = llvm::Type::getVoidTy(getLLVMContext());
}
@@ -1624,18 +1665,15 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
// Add type for sret argument.
if (IRFunctionArgs.hasSRetArg()) {
QualType Ret = FI.getReturnType();
- llvm::Type *Ty = ConvertType(Ret);
- unsigned AddressSpace = Context.getTargetAddressSpace(Ret);
+ unsigned AddressSpace = CGM.getTypes().getTargetAddressSpace(Ret);
ArgTypes[IRFunctionArgs.getSRetArgNo()] =
- llvm::PointerType::get(Ty, AddressSpace);
+ llvm::PointerType::get(getLLVMContext(), AddressSpace);
}
// Add type for inalloca argument.
- if (IRFunctionArgs.hasInallocaArg()) {
- auto ArgStruct = FI.getArgStruct();
- assert(ArgStruct);
- ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo();
- }
+ if (IRFunctionArgs.hasInallocaArg())
+ ArgTypes[IRFunctionArgs.getInallocaArgNo()] =
+ llvm::PointerType::getUnqual(getLLVMContext());
// Add in all of the required arguments.
unsigned ArgNo = 0;
@@ -1658,20 +1696,17 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
assert(NumIRArgs == 0);
break;
- case ABIArgInfo::Indirect: {
+ case ABIArgInfo::Indirect:
assert(NumIRArgs == 1);
// indirect arguments are always on the stack, which is alloca addr space.
- llvm::Type *LTy = ConvertTypeForMem(it->type);
- ArgTypes[FirstIRArg] = LTy->getPointerTo(
- CGM.getDataLayout().getAllocaAddrSpace());
+ ArgTypes[FirstIRArg] = llvm::PointerType::get(
+ getLLVMContext(), CGM.getDataLayout().getAllocaAddrSpace());
break;
- }
- case ABIArgInfo::IndirectAliased: {
+ case ABIArgInfo::IndirectAliased:
assert(NumIRArgs == 1);
- llvm::Type *LTy = ConvertTypeForMem(it->type);
- ArgTypes[FirstIRArg] = LTy->getPointerTo(ArgInfo.getIndirectAddrSpace());
+ ArgTypes[FirstIRArg] = llvm::PointerType::get(
+ getLLVMContext(), ArgInfo.getIndirectAddrSpace());
break;
- }
case ABIArgInfo::Extend:
case ABIArgInfo::Direct: {
// Fast-isel and the optimizer generally like scalar values better than
@@ -1691,7 +1726,7 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
case ABIArgInfo::CoerceAndExpand: {
auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
- for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) {
+ for (auto *EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) {
*ArgTypesIter++ = EltTy;
}
assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
@@ -1714,7 +1749,7 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
- const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+ const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
if (!isFuncTypeConvertible(FPT))
return llvm::StructType::get(getLLVMContext());
@@ -1731,10 +1766,51 @@ static void AddAttributesFromFunctionProtoType(ASTContext &Ctx,
if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) &&
FPT->isNothrow())
FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
+
+ unsigned SMEBits = FPT->getAArch64SMEAttributes();
+ if (SMEBits & FunctionType::SME_PStateSMEnabledMask)
+ FuncAttrs.addAttribute("aarch64_pstate_sm_enabled");
+ if (SMEBits & FunctionType::SME_PStateSMCompatibleMask)
+ FuncAttrs.addAttribute("aarch64_pstate_sm_compatible");
+
+ // ZA
+ if (FunctionType::getArmZAState(SMEBits) == FunctionType::ARM_Out ||
+ FunctionType::getArmZAState(SMEBits) == FunctionType::ARM_InOut)
+ FuncAttrs.addAttribute("aarch64_pstate_za_shared");
+ if (FunctionType::getArmZAState(SMEBits) == FunctionType::ARM_Preserves ||
+ FunctionType::getArmZAState(SMEBits) == FunctionType::ARM_In) {
+ FuncAttrs.addAttribute("aarch64_pstate_za_shared");
+ FuncAttrs.addAttribute("aarch64_pstate_za_preserved");
+ }
+
+ // ZT0
+ if (FunctionType::getArmZT0State(SMEBits) == FunctionType::ARM_Preserves)
+ FuncAttrs.addAttribute("aarch64_preserves_zt0");
+ if (FunctionType::getArmZT0State(SMEBits) == FunctionType::ARM_In)
+ FuncAttrs.addAttribute("aarch64_in_zt0");
+ if (FunctionType::getArmZT0State(SMEBits) == FunctionType::ARM_Out)
+ FuncAttrs.addAttribute("aarch64_out_zt0");
+ if (FunctionType::getArmZT0State(SMEBits) == FunctionType::ARM_InOut)
+ FuncAttrs.addAttribute("aarch64_inout_zt0");
+}
+
+static void AddAttributesFromAssumes(llvm::AttrBuilder &FuncAttrs,
+ const Decl *Callee) {
+ if (!Callee)
+ return;
+
+ SmallVector<StringRef, 4> Attrs;
+
+ for (const AssumptionAttr *AA : Callee->specific_attrs<AssumptionAttr>())
+ AA->getAssumption().split(Attrs, ",");
+
+ if (!Attrs.empty())
+ FuncAttrs.addAttribute(llvm::AssumptionAttrKey,
+ llvm::join(Attrs.begin(), Attrs.end(), ","));
}
bool CodeGenModule::MayDropFunctionReturn(const ASTContext &Context,
- QualType ReturnType) {
+ QualType ReturnType) const {
// We can't just discard the return value for a record type with a
// complex destructor or a non-trivially copyable type.
if (const RecordType *RT =
@@ -1745,10 +1821,65 @@ bool CodeGenModule::MayDropFunctionReturn(const ASTContext &Context,
return ReturnType.isTriviallyCopyableType(Context);
}
-void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
- bool HasOptnone,
- bool AttrOnCallSite,
- llvm::AttrBuilder &FuncAttrs) {
+static bool HasStrictReturn(const CodeGenModule &Module, QualType RetTy,
+ const Decl *TargetDecl) {
+ // As-is msan can not tolerate noundef mismatch between caller and
+ // implementation. Mismatch is possible for e.g. indirect calls from C-caller
+ // into C++. Such mismatches lead to confusing false reports. To avoid
+ // expensive workaround on msan we enforce initialization event in uncommon
+ // cases where it's allowed.
+ if (Module.getLangOpts().Sanitize.has(SanitizerKind::Memory))
+ return true;
+ // C++ explicitly makes returning undefined values UB. C's rule only applies
+ // to used values, so we never mark them noundef for now.
+ if (!Module.getLangOpts().CPlusPlus)
+ return false;
+ if (TargetDecl) {
+ if (const FunctionDecl *FDecl = dyn_cast<FunctionDecl>(TargetDecl)) {
+ if (FDecl->isExternC())
+ return false;
+ } else if (const VarDecl *VDecl = dyn_cast<VarDecl>(TargetDecl)) {
+ // Function pointer.
+ if (VDecl->isExternC())
+ return false;
+ }
+ }
+
+ // We don't want to be too aggressive with the return checking, unless
+ // it's explicit in the code opts or we're using an appropriate sanitizer.
+ // Try to respect what the programmer intended.
+ return Module.getCodeGenOpts().StrictReturn ||
+ !Module.MayDropFunctionReturn(Module.getContext(), RetTy) ||
+ Module.getLangOpts().Sanitize.has(SanitizerKind::Return);
+}
+
+/// Add denormal-fp-math and denormal-fp-math-f32 as appropriate for the
+/// requested denormal behavior, accounting for the overriding behavior of the
+/// -f32 case.
+static void addDenormalModeAttrs(llvm::DenormalMode FPDenormalMode,
+ llvm::DenormalMode FP32DenormalMode,
+ llvm::AttrBuilder &FuncAttrs) {
+ if (FPDenormalMode != llvm::DenormalMode::getDefault())
+ FuncAttrs.addAttribute("denormal-fp-math", FPDenormalMode.str());
+
+ if (FP32DenormalMode != FPDenormalMode && FP32DenormalMode.isValid())
+ FuncAttrs.addAttribute("denormal-fp-math-f32", FP32DenormalMode.str());
+}
+
+/// Add default attributes to a function, which have merge semantics under
+/// -mlink-builtin-bitcode and should not simply overwrite any existing
+/// attributes in the linked library.
+static void
+addMergableDefaultFunctionAttributes(const CodeGenOptions &CodeGenOpts,
+ llvm::AttrBuilder &FuncAttrs) {
+ addDenormalModeAttrs(CodeGenOpts.FPDenormalMode, CodeGenOpts.FP32DenormalMode,
+ FuncAttrs);
+}
+
+static void getTrivialDefaultFunctionAttributes(
+ StringRef Name, bool HasOptnone, const CodeGenOptions &CodeGenOpts,
+ const LangOptions &LangOpts, bool AttrOnCallSite,
+ llvm::AttrBuilder &FuncAttrs) {
// OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
if (!HasOptnone) {
if (CodeGenOpts.OptimizeSize)
@@ -1766,24 +1897,23 @@ void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
if (AttrOnCallSite) {
// Attributes that should go on the call site only.
+ // FIXME: Look for 'BuiltinAttr' on the function rather than re-checking
+ // the -fno-builtin-foo list.
if (!CodeGenOpts.SimplifyLibCalls || LangOpts.isNoBuiltinFunc(Name))
FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
if (!CodeGenOpts.TrapFuncName.empty())
FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
} else {
- StringRef FpKind;
switch (CodeGenOpts.getFramePointer()) {
case CodeGenOptions::FramePointerKind::None:
- FpKind = "none";
+ // This is the default behavior.
break;
case CodeGenOptions::FramePointerKind::NonLeaf:
- FpKind = "non-leaf";
- break;
case CodeGenOptions::FramePointerKind::All:
- FpKind = "all";
- break;
+ FuncAttrs.addAttribute("frame-pointer",
+ CodeGenOptions::getFramePointerKindName(
+ CodeGenOpts.getFramePointer()));
}
- FuncAttrs.addAttribute("frame-pointer", FpKind);
if (CodeGenOpts.LessPreciseFPMAD)
FuncAttrs.addAttribute("less-precise-fpmad", "true");
@@ -1791,30 +1921,23 @@ void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
if (CodeGenOpts.NullPointerIsValid)
FuncAttrs.addAttribute(llvm::Attribute::NullPointerIsValid);
- if (CodeGenOpts.FPDenormalMode != llvm::DenormalMode::getIEEE())
- FuncAttrs.addAttribute("denormal-fp-math",
- CodeGenOpts.FPDenormalMode.str());
- if (CodeGenOpts.FP32DenormalMode != CodeGenOpts.FPDenormalMode) {
- FuncAttrs.addAttribute(
- "denormal-fp-math-f32",
- CodeGenOpts.FP32DenormalMode.str());
- }
-
- if (LangOpts.getFPExceptionMode() == LangOptions::FPE_Ignore)
+ if (LangOpts.getDefaultExceptionMode() == LangOptions::FPE_Ignore)
FuncAttrs.addAttribute("no-trapping-math", "true");
- // Strict (compliant) code is the default, so only add this attribute to
- // indicate that we are trying to workaround a problem case.
- if (!CodeGenOpts.StrictFloatCastOverflow)
- FuncAttrs.addAttribute("strict-float-cast-overflow", "false");
-
// TODO: Are these all needed?
// unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags.
if (LangOpts.NoHonorInfs)
FuncAttrs.addAttribute("no-infs-fp-math", "true");
if (LangOpts.NoHonorNaNs)
FuncAttrs.addAttribute("no-nans-fp-math", "true");
- if (LangOpts.UnsafeFPMath)
+ if (LangOpts.ApproxFunc)
+ FuncAttrs.addAttribute("approx-func-fp-math", "true");
+ if (LangOpts.AllowFPReassoc && LangOpts.AllowRecip &&
+ LangOpts.NoSignedZero && LangOpts.ApproxFunc &&
+ (LangOpts.getDefaultFPContractMode() ==
+ LangOptions::FPModeKind::FPM_Fast ||
+ LangOpts.getDefaultFPContractMode() ==
+ LangOptions::FPModeKind::FPM_FastHonorPragmas))
FuncAttrs.addAttribute("unsafe-fp-math", "true");
if (CodeGenOpts.SoftFloat)
FuncAttrs.addAttribute("use-soft-float", "true");
@@ -1843,9 +1966,40 @@ void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
if (CodeGenOpts.SpeculativeLoadHardening)
FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
+
+ // Add zero-call-used-regs attribute.
+ switch (CodeGenOpts.getZeroCallUsedRegs()) {
+ case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Skip:
+ FuncAttrs.removeAttribute("zero-call-used-regs");
+ break;
+ case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPRArg:
+ FuncAttrs.addAttribute("zero-call-used-regs", "used-gpr-arg");
+ break;
+ case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPR:
+ FuncAttrs.addAttribute("zero-call-used-regs", "used-gpr");
+ break;
+ case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedArg:
+ FuncAttrs.addAttribute("zero-call-used-regs", "used-arg");
+ break;
+ case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Used:
+ FuncAttrs.addAttribute("zero-call-used-regs", "used");
+ break;
+ case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPRArg:
+ FuncAttrs.addAttribute("zero-call-used-regs", "all-gpr-arg");
+ break;
+ case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPR:
+ FuncAttrs.addAttribute("zero-call-used-regs", "all-gpr");
+ break;
+ case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllArg:
+ FuncAttrs.addAttribute("zero-call-used-regs", "all-arg");
+ break;
+ case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::All:
+ FuncAttrs.addAttribute("zero-call-used-regs", "all");
+ break;
+ }
}
- if (getLangOpts().assumeFunctionsAreConvergent()) {
+ if (LangOpts.assumeFunctionsAreConvergent()) {
// Conservatively, mark all functions and calls in CUDA and OpenCL as
// convergent (meaning, they may call an intrinsically convergent op, such
// as __syncthreads() / barrier(), and so can't have certain optimizations
@@ -1854,8 +2008,10 @@ void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
FuncAttrs.addAttribute(llvm::Attribute::Convergent);
}
- if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) {
- // Exceptions aren't supported in CUDA device code.
+ // TODO: NoUnwind attribute should be added for other GPU modes HIP,
+ // OpenMP offload. AFAIK, neither of them support exceptions in device code.
+ if ((LangOpts.CUDA && LangOpts.CUDAIsDevice) || LangOpts.OpenCL ||
+ LangOpts.SYCLIsDevice) {
FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
}
@@ -1866,16 +2022,128 @@ void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
}
}
-void CodeGenModule::addDefaultFunctionDefinitionAttributes(llvm::Function &F) {
- llvm::AttrBuilder FuncAttrs;
- getDefaultFunctionAttributes(F.getName(), F.hasOptNone(),
- /* AttrOnCallSite = */ false, FuncAttrs);
- // TODO: call GetCPUAndFeaturesAttributes?
- F.addAttributes(llvm::AttributeList::FunctionIndex, FuncAttrs);
+/// Merges `target-features` from \TargetOpts and \F, and sets the result in
+/// \FuncAttr
+/// * features from \F are always kept
+/// * a feature from \TargetOpts is kept if itself and its opposite are absent
+/// from \F
+static void
+overrideFunctionFeaturesWithTargetFeatures(llvm::AttrBuilder &FuncAttr,
+ const llvm::Function &F,
+ const TargetOptions &TargetOpts) {
+ auto FFeatures = F.getFnAttribute("target-features");
+
+ llvm::StringSet<> MergedNames;
+ SmallVector<StringRef> MergedFeatures;
+ MergedFeatures.reserve(TargetOpts.Features.size());
+
+ auto AddUnmergedFeatures = [&](auto &&FeatureRange) {
+ for (StringRef Feature : FeatureRange) {
+ if (Feature.empty())
+ continue;
+ assert(Feature[0] == '+' || Feature[0] == '-');
+ StringRef Name = Feature.drop_front(1);
+ bool Merged = !MergedNames.insert(Name).second;
+ if (!Merged)
+ MergedFeatures.push_back(Feature);
+ }
+ };
+
+ if (FFeatures.isValid())
+ AddUnmergedFeatures(llvm::split(FFeatures.getValueAsString(), ','));
+ AddUnmergedFeatures(TargetOpts.Features);
+
+ if (!MergedFeatures.empty()) {
+ llvm::sort(MergedFeatures);
+ FuncAttr.addAttribute("target-features", llvm::join(MergedFeatures, ","));
+ }
+}
+
+void CodeGen::mergeDefaultFunctionDefinitionAttributes(
+ llvm::Function &F, const CodeGenOptions &CodeGenOpts,
+ const LangOptions &LangOpts, const TargetOptions &TargetOpts,
+ bool WillInternalize) {
+
+ llvm::AttrBuilder FuncAttrs(F.getContext());
+ // Here we only extract the options that are relevant compared to the version
+ // from GetCPUAndFeaturesAttributes.
+ if (!TargetOpts.CPU.empty())
+ FuncAttrs.addAttribute("target-cpu", TargetOpts.CPU);
+ if (!TargetOpts.TuneCPU.empty())
+ FuncAttrs.addAttribute("tune-cpu", TargetOpts.TuneCPU);
+
+ ::getTrivialDefaultFunctionAttributes(F.getName(), F.hasOptNone(),
+ CodeGenOpts, LangOpts,
+ /*AttrOnCallSite=*/false, FuncAttrs);
+
+ if (!WillInternalize && F.isInterposable()) {
+ // Do not promote "dynamic" denormal-fp-math to this translation unit's
+ // setting for weak functions that won't be internalized. The user has no
+ // real control for how builtin bitcode is linked, so we shouldn't assume
+ // later copies will use a consistent mode.
+ F.addFnAttrs(FuncAttrs);
+ return;
+ }
+
+ llvm::AttributeMask AttrsToRemove;
+
+ llvm::DenormalMode DenormModeToMerge = F.getDenormalModeRaw();
+ llvm::DenormalMode DenormModeToMergeF32 = F.getDenormalModeF32Raw();
+ llvm::DenormalMode Merged =
+ CodeGenOpts.FPDenormalMode.mergeCalleeMode(DenormModeToMerge);
+ llvm::DenormalMode MergedF32 = CodeGenOpts.FP32DenormalMode;
+
+ if (DenormModeToMergeF32.isValid()) {
+ MergedF32 =
+ CodeGenOpts.FP32DenormalMode.mergeCalleeMode(DenormModeToMergeF32);
+ }
+
+ if (Merged == llvm::DenormalMode::getDefault()) {
+ AttrsToRemove.addAttribute("denormal-fp-math");
+ } else if (Merged != DenormModeToMerge) {
+ // Overwrite existing attribute
+ FuncAttrs.addAttribute("denormal-fp-math",
+ CodeGenOpts.FPDenormalMode.str());
+ }
+
+ if (MergedF32 == llvm::DenormalMode::getDefault()) {
+ AttrsToRemove.addAttribute("denormal-fp-math-f32");
+ } else if (MergedF32 != DenormModeToMergeF32) {
+ // Overwrite existing attribute
+ FuncAttrs.addAttribute("denormal-fp-math-f32",
+ CodeGenOpts.FP32DenormalMode.str());
+ }
+
+ F.removeFnAttrs(AttrsToRemove);
+ addDenormalModeAttrs(Merged, MergedF32, FuncAttrs);
+
+ overrideFunctionFeaturesWithTargetFeatures(FuncAttrs, F, TargetOpts);
+
+ F.addFnAttrs(FuncAttrs);
+}
+
+void CodeGenModule::getTrivialDefaultFunctionAttributes(
+ StringRef Name, bool HasOptnone, bool AttrOnCallSite,
+ llvm::AttrBuilder &FuncAttrs) {
+ ::getTrivialDefaultFunctionAttributes(Name, HasOptnone, getCodeGenOpts(),
+ getLangOpts(), AttrOnCallSite,
+ FuncAttrs);
+}
+
+void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
+ bool HasOptnone,
+ bool AttrOnCallSite,
+ llvm::AttrBuilder &FuncAttrs) {
+ getTrivialDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite,
+ FuncAttrs);
+ // If we're just getting the default, get the default values for mergeable
+ // attributes.
+ if (!AttrOnCallSite)
+ addMergableDefaultFunctionAttributes(CodeGenOpts, FuncAttrs);
}
void CodeGenModule::addDefaultFunctionDefinitionAttributes(
- llvm::AttrBuilder &attrs) {
+ llvm::AttrBuilder &attrs) {
getDefaultFunctionAttributes(/*function name*/ "", /*optnone*/ false,
/*for call*/ false, attrs);
GetCPUAndFeaturesAttributes(GlobalDecl(), attrs);
@@ -1921,7 +2189,8 @@ static bool DetermineNoUndef(QualType QTy, CodeGenTypes &Types,
const llvm::DataLayout &DL, const ABIArgInfo &AI,
bool CheckCoerce = true) {
llvm::Type *Ty = Types.ConvertTypeForMem(QTy);
- if (AI.getKind() == ABIArgInfo::Indirect)
+ if (AI.getKind() == ABIArgInfo::Indirect ||
+ AI.getKind() == ABIArgInfo::IndirectAliased)
return true;
if (AI.getKind() == ABIArgInfo::Extend)
return true;
@@ -1940,7 +2209,7 @@ static bool DetermineNoUndef(QualType QTy, CodeGenTypes &Types,
// there's no internal padding (typeSizeEqualsStoreSize).
return false;
}
- if (QTy->isExtIntType())
+ if (QTy->isBitIntType())
return true;
if (QTy->isReferenceType())
return true;
@@ -1966,6 +2235,71 @@ static bool DetermineNoUndef(QualType QTy, CodeGenTypes &Types,
return false;
}
+/// Check if the argument of a function has maybe_undef attribute.
+static bool IsArgumentMaybeUndef(const Decl *TargetDecl,
+ unsigned NumRequiredArgs, unsigned ArgNo) {
+ const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl);
+ if (!FD)
+ return false;
+
+ // Assume variadic arguments do not have maybe_undef attribute.
+ if (ArgNo >= NumRequiredArgs)
+ return false;
+
+ // Check if argument has maybe_undef attribute.
+ if (ArgNo < FD->getNumParams()) {
+ const ParmVarDecl *Param = FD->getParamDecl(ArgNo);
+ if (Param && Param->hasAttr<MaybeUndefAttr>())
+ return true;
+ }
+
+ return false;
+}
+
+/// Test if it's legal to apply nofpclass for the given parameter type and it's
+/// lowered IR type.
+static bool canApplyNoFPClass(const ABIArgInfo &AI, QualType ParamType,
+ bool IsReturn) {
+ // Should only apply to FP types in the source, not ABI promoted.
+ if (!ParamType->hasFloatingRepresentation())
+ return false;
+
+ // The promoted-to IR type also needs to support nofpclass.
+ llvm::Type *IRTy = AI.getCoerceToType();
+ if (llvm::AttributeFuncs::isNoFPClassCompatibleType(IRTy))
+ return true;
+
+ if (llvm::StructType *ST = dyn_cast<llvm::StructType>(IRTy)) {
+ return !IsReturn && AI.getCanBeFlattened() &&
+ llvm::all_of(ST->elements(), [](llvm::Type *Ty) {
+ return llvm::AttributeFuncs::isNoFPClassCompatibleType(Ty);
+ });
+ }
+
+ return false;
+}
+
+/// Return the nofpclass mask that can be applied to floating-point parameters.
+static llvm::FPClassTest getNoFPClassTestMask(const LangOptions &LangOpts) {
+ llvm::FPClassTest Mask = llvm::fcNone;
+ if (LangOpts.NoHonorInfs)
+ Mask |= llvm::fcInf;
+ if (LangOpts.NoHonorNaNs)
+ Mask |= llvm::fcNan;
+ return Mask;
+}
+
+void CodeGenModule::AdjustMemoryAttribute(StringRef Name,
+ CGCalleeInfo CalleeInfo,
+ llvm::AttributeList &Attrs) {
+ if (Attrs.getMemoryEffects().getModRef() == llvm::ModRefInfo::NoModRef) {
+ Attrs = Attrs.removeFnAttribute(getLLVMContext(), llvm::Attribute::Memory);
+ llvm::Attribute MemoryAttr = llvm::Attribute::getWithMemoryEffects(
+ getLLVMContext(), llvm::MemoryEffects::writeOnly());
+ Attrs = Attrs.addFnAttribute(getLLVMContext(), MemoryAttr);
+ }
+}
+
/// Construct the IR attribute list of a function or call.
///
/// When adding an attribute, please consider where it should be handled:
@@ -1989,8 +2323,8 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
llvm::AttributeList &AttrList,
unsigned &CallingConv,
bool AttrOnCallSite, bool IsThunk) {
- llvm::AttrBuilder FuncAttrs;
- llvm::AttrBuilder RetAttrs;
+ llvm::AttrBuilder FuncAttrs(getLLVMContext());
+ llvm::AttrBuilder RetAttrs(getLLVMContext());
// Collect function IR attributes from the CC lowering.
// We'll collect the paramete and result attributes later.
@@ -2006,10 +2340,23 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
const Decl *TargetDecl = CalleeInfo.getCalleeDecl().getDecl();
+ // Attach assumption attributes to the declaration. If this is a call
+ // site, attach assumptions from the caller to the call as well.
+ AddAttributesFromAssumes(FuncAttrs, TargetDecl);
+
bool HasOptnone = false;
// The NoBuiltinAttr attached to the target FunctionDecl.
const NoBuiltinAttr *NBA = nullptr;
+ // Some ABIs may result in additional accesses to arguments that may
+ // otherwise not be present.
+ auto AddPotentialArgAccess = [&]() {
+ llvm::Attribute A = FuncAttrs.getAttribute(llvm::Attribute::Memory);
+ if (A.isValid())
+ FuncAttrs.addMemoryAttr(A.getMemoryEffects() |
+ llvm::MemoryEffects::argMemOnly());
+ };
+
// Collect function IR attributes based on declaration-specific
// information.
// FIXME: handle sseregparm someday...
@@ -2048,44 +2395,29 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
NBA = Fn->getAttr<NoBuiltinAttr>();
}
+ }
+
+ if (isa<FunctionDecl>(TargetDecl) || isa<VarDecl>(TargetDecl)) {
// Only place nomerge attribute on call sites, never functions. This
// allows it to work on indirect virtual function calls.
if (AttrOnCallSite && TargetDecl->hasAttr<NoMergeAttr>())
FuncAttrs.addAttribute(llvm::Attribute::NoMerge);
-
- // Add known guaranteed alignment for allocation functions.
- if (unsigned BuiltinID = Fn->getBuiltinID()) {
- switch (BuiltinID) {
- case Builtin::BIaligned_alloc:
- case Builtin::BIcalloc:
- case Builtin::BImalloc:
- case Builtin::BImemalign:
- case Builtin::BIrealloc:
- case Builtin::BIstrdup:
- case Builtin::BIstrndup:
- RetAttrs.addAlignmentAttr(Context.getTargetInfo().getNewAlign() /
- Context.getTargetInfo().getCharWidth());
- break;
- default:
- break;
- }
- }
}
// 'const', 'pure' and 'noalias' attributed functions are also nounwind.
if (TargetDecl->hasAttr<ConstAttr>()) {
- FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
+ FuncAttrs.addMemoryAttr(llvm::MemoryEffects::none());
FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
// gcc specifies that 'const' functions have greater restrictions than
// 'pure' functions, so they also cannot have infinite loops.
FuncAttrs.addAttribute(llvm::Attribute::WillReturn);
} else if (TargetDecl->hasAttr<PureAttr>()) {
- FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
+ FuncAttrs.addMemoryAttr(llvm::MemoryEffects::readOnly());
FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
// gcc specifies that 'pure' functions cannot have infinite loops.
FuncAttrs.addAttribute(llvm::Attribute::WillReturn);
} else if (TargetDecl->hasAttr<NoAliasAttr>()) {
- FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly);
+ FuncAttrs.addMemoryAttr(llvm::MemoryEffects::inaccessibleOrArgMemOnly());
FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
}
if (TargetDecl->hasAttr<RestrictAttr>())
@@ -2102,7 +2434,7 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) {
- Optional<unsigned> NumElemsParam;
+ std::optional<unsigned> NumElemsParam;
if (AllocSize->getNumElemsParam().isValid())
NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex();
FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(),
@@ -2119,22 +2451,18 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
// to the compiler that the global work-size be a multiple of
// the work-group size specified to clEnqueueNDRangeKernel
// (i.e. work groups are uniform).
- FuncAttrs.addAttribute("uniform-work-group-size",
- llvm::toStringRef(CodeGenOpts.UniformWGSize));
+ FuncAttrs.addAttribute(
+ "uniform-work-group-size",
+ llvm::toStringRef(getLangOpts().OffloadUniformBlock));
}
}
- std::string AssumptionValueStr;
- for (AssumptionAttr *AssumptionA :
- TargetDecl->specific_attrs<AssumptionAttr>()) {
- std::string AS = AssumptionA->getAssumption().str();
- if (!AS.empty() && !AssumptionValueStr.empty())
- AssumptionValueStr += ",";
- AssumptionValueStr += AS;
- }
+ if (TargetDecl->hasAttr<CUDAGlobalAttr>() &&
+ getLangOpts().OffloadUniformBlock)
+ FuncAttrs.addAttribute("uniform-work-group-size", "true");
- if (!AssumptionValueStr.empty())
- FuncAttrs.addAttribute(llvm::AssumptionAttrKey, AssumptionValueStr);
+ if (TargetDecl->hasAttr<ArmLocallyStreamingAttr>())
+ FuncAttrs.addAttribute("aarch64_pstate_sm_body");
}
// Attach "no-builtins" attributes to:
@@ -2157,6 +2485,15 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
if (TargetDecl->hasAttr<NoSplitStackAttr>())
FuncAttrs.removeAttribute("split-stack");
+ if (TargetDecl->hasAttr<ZeroCallUsedRegsAttr>()) {
+ // A function "__attribute__((...))" overrides the command-line flag.
+ auto Kind =
+ TargetDecl->getAttr<ZeroCallUsedRegsAttr>()->getZeroCallUsedRegs();
+ FuncAttrs.removeAttribute("zero-call-used-regs");
+ FuncAttrs.addAttribute(
+ "zero-call-used-regs",
+ ZeroCallUsedRegsAttr::ConvertZeroCallUsedRegsKindToStr(Kind));
+ }
// Add NonLazyBind attribute to function declarations when -fno-plt
// is used.
@@ -2174,9 +2511,8 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
// Add "sample-profile-suffix-elision-policy" attribute for internal linkage
// functions with -funique-internal-linkage-names.
if (TargetDecl && CodeGenOpts.UniqueInternalLinkageNames) {
- if (isa<FunctionDecl>(TargetDecl)) {
- if (this->getFunctionLinkage(CalleeInfo.getCalleeDecl()) ==
- llvm::GlobalValue::InternalLinkage)
+ if (const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
+ if (!FD->isExternallyVisible())
FuncAttrs.addAttribute("sample-profile-suffix-elision-policy",
"selected");
}
@@ -2224,27 +2560,9 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
const ABIArgInfo &RetAI = FI.getReturnInfo();
const llvm::DataLayout &DL = getDataLayout();
- // C++ explicitly makes returning undefined values UB. C's rule only applies
- // to used values, so we never mark them noundef for now.
- bool HasStrictReturn = getLangOpts().CPlusPlus;
- if (TargetDecl) {
- if (const FunctionDecl *FDecl = dyn_cast<FunctionDecl>(TargetDecl))
- HasStrictReturn &= !FDecl->isExternC();
- else if (const VarDecl *VDecl = dyn_cast<VarDecl>(TargetDecl))
- // Function pointer
- HasStrictReturn &= !VDecl->isExternC();
- }
-
- // We don't want to be too aggressive with the return checking, unless
- // it's explicit in the code opts or we're using an appropriate sanitizer.
- // Try to respect what the programmer intended.
- HasStrictReturn &= getCodeGenOpts().StrictReturn ||
- !MayDropFunctionReturn(getContext(), RetTy) ||
- getLangOpts().Sanitize.has(SanitizerKind::Memory) ||
- getLangOpts().Sanitize.has(SanitizerKind::Return);
-
// Determine if the return type could be partially undef
- if (CodeGenOpts.EnableNoundefAttrs && HasStrictReturn) {
+ if (CodeGenOpts.EnableNoundefAttrs &&
+ HasStrictReturn(*this, RetTy, TargetDecl)) {
if (!RetTy->isVoidType() && RetAI.getKind() != ABIArgInfo::Indirect &&
DetermineNoUndef(RetTy, getTypes(), DL, RetAI))
RetAttrs.addAttribute(llvm::Attribute::NoUndef);
@@ -2256,10 +2574,14 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
RetAttrs.addAttribute(llvm::Attribute::SExt);
else
RetAttrs.addAttribute(llvm::Attribute::ZExt);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ABIArgInfo::Direct:
if (RetAI.getInReg())
RetAttrs.addAttribute(llvm::Attribute::InReg);
+
+ if (canApplyNoFPClass(RetAI, RetTy, true))
+ RetAttrs.addNoFPClassAttr(getNoFPClassTestMask(getLangOpts()));
+
break;
case ABIArgInfo::Ignore:
break;
@@ -2267,8 +2589,7 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
case ABIArgInfo::InAlloca:
case ABIArgInfo::Indirect: {
// inalloca and sret disable readnone and readonly
- FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
- .removeAttribute(llvm::Attribute::ReadNone);
+ AddPotentialArgAccess();
break;
}
@@ -2287,7 +2608,7 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
RetAttrs.addDereferenceableAttr(
getMinimumObjectSize(PTy).getQuantity());
- if (getContext().getTargetAddressSpace(PTy) == 0 &&
+ if (getTypes().getTargetAddressSpace(PTy) == 0 &&
!CodeGenOpts.NullPointerIsValid)
RetAttrs.addAttribute(llvm::Attribute::NonNull);
if (PTy->isObjectType()) {
@@ -2303,8 +2624,10 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
// Attach attributes to sret.
if (IRFunctionArgs.hasSRetArg()) {
- llvm::AttrBuilder SRETAttrs;
+ llvm::AttrBuilder SRETAttrs(getLLVMContext());
SRETAttrs.addStructRetAttr(getTypes().ConvertTypeForMem(RetTy));
+ SRETAttrs.addAttribute(llvm::Attribute::Writable);
+ SRETAttrs.addAttribute(llvm::Attribute::DeadOnUnwind);
hasUsedSRet = true;
if (RetAI.getInReg())
SRETAttrs.addAttribute(llvm::Attribute::InReg);
@@ -2315,7 +2638,7 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
// Attach attributes to inalloca argument.
if (IRFunctionArgs.hasInallocaArg()) {
- llvm::AttrBuilder Attrs;
+ llvm::AttrBuilder Attrs(getLLVMContext());
Attrs.addInAllocaAttr(FI.getArgStruct());
ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
llvm::AttributeSet::get(getLLVMContext(), Attrs);
@@ -2330,13 +2653,13 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
assert(IRArgs.second == 1 && "Expected only a single `this` pointer.");
- llvm::AttrBuilder Attrs;
+ llvm::AttrBuilder Attrs(getLLVMContext());
QualType ThisTy =
- FI.arg_begin()->type.castAs<PointerType>()->getPointeeType();
+ FI.arg_begin()->type.getTypePtr()->getPointeeType();
if (!CodeGenOpts.NullPointerIsValid &&
- getContext().getTargetAddressSpace(FI.arg_begin()->type) == 0) {
+ getTypes().getTargetAddressSpace(FI.arg_begin()->type) == 0) {
Attrs.addAttribute(llvm::Attribute::NonNull);
Attrs.addDereferenceableAttr(getMinimumObjectSize(ThisTy).getQuantity());
} else {
@@ -2365,7 +2688,7 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
I != E; ++I, ++ArgNo) {
QualType ParamType = I->type;
const ABIArgInfo &AI = I->info;
- llvm::AttrBuilder Attrs;
+ llvm::AttrBuilder Attrs(getLLVMContext());
// Add attribute for padding argument, if necessary.
if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
@@ -2373,14 +2696,15 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
llvm::AttributeSet::get(
getLLVMContext(),
- llvm::AttrBuilder().addAttribute(llvm::Attribute::InReg));
+ llvm::AttrBuilder(getLLVMContext()).addAttribute(llvm::Attribute::InReg));
}
}
// Decide whether the argument we're handling could be partially undef
- bool ArgNoUndef = DetermineNoUndef(ParamType, getTypes(), DL, AI);
- if (CodeGenOpts.EnableNoundefAttrs && ArgNoUndef)
+ if (CodeGenOpts.EnableNoundefAttrs &&
+ DetermineNoUndef(ParamType, getTypes(), DL, AI)) {
Attrs.addAttribute(llvm::Attribute::NoUndef);
+ }
// 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
// have the corresponding parameter variable. It doesn't make
@@ -2391,15 +2715,17 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
Attrs.addAttribute(llvm::Attribute::SExt);
else
Attrs.addAttribute(llvm::Attribute::ZExt);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ABIArgInfo::Direct:
if (ArgNo == 0 && FI.isChainCall())
Attrs.addAttribute(llvm::Attribute::Nest);
else if (AI.getInReg())
Attrs.addAttribute(llvm::Attribute::InReg);
Attrs.addStackAlignmentAttr(llvm::MaybeAlign(AI.getDirectAlign()));
- break;
+ if (canApplyNoFPClass(AI, ParamType, false))
+ Attrs.addNoFPClassAttr(getNoFPClassTestMask(getLangOpts()));
+ break;
case ABIArgInfo::Indirect: {
if (AI.getInReg())
Attrs.addAttribute(llvm::Attribute::InReg);
@@ -2409,7 +2735,8 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
auto *Decl = ParamType->getAsRecordDecl();
if (CodeGenOpts.PassByValueIsNoAlias && Decl &&
- Decl->getArgPassingRestrictions() == RecordDecl::APK_CanPassInRegs)
+ Decl->getArgPassingRestrictions() ==
+ RecordArgPassingKind::CanPassInRegs)
// When calling the function, the pointer passed in will be the only
// reference to the underlying object. Mark it accordingly.
Attrs.addAttribute(llvm::Attribute::NoAlias);
@@ -2437,9 +2764,7 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
Attrs.addAlignmentAttr(Align.getQuantity());
// byval disables readnone and readonly.
- FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
- .removeAttribute(llvm::Attribute::ReadNone);
-
+ AddPotentialArgAccess();
break;
}
case ABIArgInfo::IndirectAliased: {
@@ -2455,8 +2780,7 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
case ABIArgInfo::InAlloca:
// inalloca disables readnone and readonly.
- FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
- .removeAttribute(llvm::Attribute::ReadNone);
+ AddPotentialArgAccess();
continue;
}
@@ -2465,7 +2789,7 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
Attrs.addDereferenceableAttr(
getMinimumObjectSize(PTy).getQuantity());
- if (getContext().getTargetAddressSpace(PTy) == 0 &&
+ if (getTypes().getTargetAddressSpace(PTy) == 0 &&
!CodeGenOpts.NullPointerIsValid)
Attrs.addAttribute(llvm::Attribute::NonNull);
if (PTy->isObjectType()) {
@@ -2475,6 +2799,20 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
}
}
+ // From OpenCL spec v3.0.10 section 6.3.5 Alignment of Types:
+ // > For arguments to a __kernel function declared to be a pointer to a
+ // > data type, the OpenCL compiler can assume that the pointee is always
+ // > appropriately aligned as required by the data type.
+ if (TargetDecl && TargetDecl->hasAttr<OpenCLKernelAttr>() &&
+ ParamType->isPointerType()) {
+ QualType PTy = ParamType->getPointeeType();
+ if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
+ llvm::Align Alignment =
+ getNaturalPointeeTypeAlignment(ParamType).getAsAlign();
+ Attrs.addAlignmentAttr(Alignment);
+ }
+ }
+
switch (FI.getExtParameterInfo(ArgNo).getABI()) {
case ParameterABI::Ordinary:
break;
@@ -2520,8 +2858,8 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
unsigned FirstIRArg, NumIRArgs;
std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
for (unsigned i = 0; i < NumIRArgs; i++)
- ArgAttrs[FirstIRArg + i] =
- llvm::AttributeSet::get(getLLVMContext(), Attrs);
+ ArgAttrs[FirstIRArg + i] = ArgAttrs[FirstIRArg + i].addAttributes(
+ getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), Attrs));
}
}
assert(ArgNo == FI.arg_size());
@@ -2620,12 +2958,9 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// If we're using inalloca, all the memory arguments are GEPs off of the last
// parameter, which is a pointer to the complete memory area.
Address ArgStruct = Address::invalid();
- if (IRFunctionArgs.hasInallocaArg()) {
+ if (IRFunctionArgs.hasInallocaArg())
ArgStruct = Address(Fn->getArg(IRFunctionArgs.getInallocaArgNo()),
- FI.getArgStructAlignment());
-
- assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo());
- }
+ FI.getArgStruct(), FI.getArgStructAlignment());
// Name the struct return parameter.
if (IRFunctionArgs.hasSRetArg()) {
@@ -2672,7 +3007,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
Address V =
Builder.CreateStructGEP(ArgStruct, FieldIndex, Arg->getName());
if (ArgI.getInAllocaIndirect())
- V = Address(Builder.CreateLoad(V),
+ V = Address(Builder.CreateLoad(V), ConvertTypeForMem(Ty),
getContext().getTypeAlignInChars(Ty));
ArgVals.push_back(ParamValue::forIndirect(V));
break;
@@ -2681,8 +3016,8 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
case ABIArgInfo::Indirect:
case ABIArgInfo::IndirectAliased: {
assert(NumIRArgs == 1);
- Address ParamAddr =
- Address(Fn->getArg(FirstIRArg), ArgI.getIndirectAlign());
+ Address ParamAddr = Address(Fn->getArg(FirstIRArg), ConvertTypeForMem(Ty),
+ ArgI.getIndirectAlign(), KnownNonNull);
if (!hasScalarEvaluationKind(Ty)) {
// Aggregates and complex variables are accessed by reference. All we
@@ -2744,15 +3079,15 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// indicates dereferenceability, and if the size is constant we can
// use the dereferenceable attribute (which requires the size in
// bytes).
- if (ArrTy->getSizeModifier() == ArrayType::Static) {
+ if (ArrTy->getSizeModifier() == ArraySizeModifier::Static) {
QualType ETy = ArrTy->getElementType();
llvm::Align Alignment =
CGM.getNaturalTypeAlignment(ETy).getAsAlign();
- AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment));
+ AI->addAttrs(llvm::AttrBuilder(getLLVMContext()).addAlignmentAttr(Alignment));
uint64_t ArrSize = ArrTy->getSize().getZExtValue();
if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
ArrSize) {
- llvm::AttrBuilder Attrs;
+ llvm::AttrBuilder Attrs(getLLVMContext());
Attrs.addDereferenceableAttr(
getContext().getTypeSizeInChars(ETy).getQuantity() *
ArrSize);
@@ -2768,12 +3103,12 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// For C99 VLAs with the static keyword, we don't know the size so
// we can't use the dereferenceable attribute, but in addrspace(0)
// we know that it must be nonnull.
- if (ArrTy->getSizeModifier() == VariableArrayType::Static) {
+ if (ArrTy->getSizeModifier() == ArraySizeModifier::Static) {
QualType ETy = ArrTy->getElementType();
llvm::Align Alignment =
CGM.getNaturalTypeAlignment(ETy).getAsAlign();
- AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment));
- if (!getContext().getTargetAddressSpace(ETy) &&
+ AI->addAttrs(llvm::AttrBuilder(getLLVMContext()).addAlignmentAttr(Alignment));
+ if (!getTypes().getTargetAddressSpace(ETy) &&
!CGM.getCodeGenOpts().NullPointerIsValid)
AI->addAttr(llvm::Attribute::NonNull);
}
@@ -2782,7 +3117,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// Set `align` attribute if any.
const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
if (!AVAttr)
- if (const auto *TOTy = dyn_cast<TypedefType>(OTy))
+ if (const auto *TOTy = OTy->getAs<TypedefType>())
AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
if (AVAttr && !SanOpts.has(SanitizerKind::Alignment)) {
// If alignment-assumption sanitizer is enabled, we do *not* add
@@ -2790,11 +3125,11 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// so the UBSAN check could function.
llvm::ConstantInt *AlignmentCI =
cast<llvm::ConstantInt>(EmitScalarExpr(AVAttr->getAlignment()));
- unsigned AlignmentInt =
+ uint64_t AlignmentInt =
AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment);
if (AI->getParamAlign().valueOrOne() < AlignmentInt) {
AI->removeAttr(llvm::Attribute::AttrKind::Alignment);
- AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(
+ AI->addAttrs(llvm::AttrBuilder(getLLVMContext()).addAlignmentAttr(
llvm::Align(AlignmentInt)));
}
}
@@ -2821,7 +3156,8 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
assert(pointeeTy->isPointerType());
Address temp =
CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
- Address arg = Address(V, getContext().getTypeAlignInChars(pointeeTy));
+ Address arg(V, ConvertTypeForMem(pointeeTy),
+ getContext().getTypeAlignInChars(pointeeTy));
llvm::Value *incomingErrorValue = Builder.CreateLoad(arg);
Builder.CreateStore(incomingErrorValue, temp);
V = temp.getPointer();
@@ -2854,19 +3190,27 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// VLST arguments are coerced to VLATs at the function boundary for
// ABI consistency. If this is a VLST that was coerced to
// a VLAT at the function boundary and the types match up, use
- // llvm.experimental.vector.extract to convert back to the original
- // VLST.
+ // llvm.vector.extract to convert back to the original VLST.
if (auto *VecTyTo = dyn_cast<llvm::FixedVectorType>(ConvertType(Ty))) {
- auto *Coerced = Fn->getArg(FirstIRArg);
+ llvm::Value *Coerced = Fn->getArg(FirstIRArg);
if (auto *VecTyFrom =
dyn_cast<llvm::ScalableVectorType>(Coerced->getType())) {
+ // If we are casting a scalable 16 x i1 predicate vector to a fixed i8
+ // vector, bitcast the source and use a vector extract.
+ auto PredType =
+ llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
+ if (VecTyFrom == PredType &&
+ VecTyTo->getElementType() == Builder.getInt8Ty()) {
+ VecTyFrom = llvm::ScalableVectorType::get(Builder.getInt8Ty(), 2);
+ Coerced = Builder.CreateBitCast(Coerced, VecTyFrom);
+ }
if (VecTyFrom->getElementType() == VecTyTo->getElementType()) {
llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty);
assert(NumIRArgs == 1);
Coerced->setName(Arg->getName() + ".coerce");
ArgVals.push_back(ParamValue::forDirect(Builder.CreateExtractVector(
- VecTyTo, Coerced, Zero, "castFixedSve")));
+ VecTyTo, Coerced, Zero, "cast.fixed")));
break;
}
}
@@ -2883,30 +3227,51 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
STy->getNumElements() > 1) {
- uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
- llvm::Type *DstTy = Ptr.getElementType();
- uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
+ llvm::TypeSize StructSize = CGM.getDataLayout().getTypeAllocSize(STy);
+ llvm::TypeSize PtrElementSize =
+ CGM.getDataLayout().getTypeAllocSize(Ptr.getElementType());
+ if (StructSize.isScalable()) {
+ assert(STy->containsHomogeneousScalableVectorTypes() &&
+ "ABI only supports structure with homogeneous scalable vector "
+ "type");
+ assert(StructSize == PtrElementSize &&
+ "Only allow non-fractional movement of structure with"
+ "homogeneous scalable vector type");
+ assert(STy->getNumElements() == NumIRArgs);
+
+ llvm::Value *LoadedStructValue = llvm::PoisonValue::get(STy);
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ auto *AI = Fn->getArg(FirstIRArg + i);
+ AI->setName(Arg->getName() + ".coerce" + Twine(i));
+ LoadedStructValue =
+ Builder.CreateInsertValue(LoadedStructValue, AI, i);
+ }
- Address AddrToStoreInto = Address::invalid();
- if (SrcSize <= DstSize) {
- AddrToStoreInto = Builder.CreateElementBitCast(Ptr, STy);
+ Builder.CreateStore(LoadedStructValue, Ptr);
} else {
- AddrToStoreInto =
- CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
- }
+ uint64_t SrcSize = StructSize.getFixedValue();
+ uint64_t DstSize = PtrElementSize.getFixedValue();
+
+ Address AddrToStoreInto = Address::invalid();
+ if (SrcSize <= DstSize) {
+ AddrToStoreInto = Ptr.withElementType(STy);
+ } else {
+ AddrToStoreInto =
+ CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
+ }
- assert(STy->getNumElements() == NumIRArgs);
- for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
- auto AI = Fn->getArg(FirstIRArg + i);
- AI->setName(Arg->getName() + ".coerce" + Twine(i));
- Address EltPtr = Builder.CreateStructGEP(AddrToStoreInto, i);
- Builder.CreateStore(AI, EltPtr);
- }
+ assert(STy->getNumElements() == NumIRArgs);
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ auto AI = Fn->getArg(FirstIRArg + i);
+ AI->setName(Arg->getName() + ".coerce" + Twine(i));
+ Address EltPtr = Builder.CreateStructGEP(AddrToStoreInto, i);
+ Builder.CreateStore(AI, EltPtr);
+ }
- if (SrcSize > DstSize) {
- Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
+ if (SrcSize > DstSize) {
+ Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
+ }
}
-
} else {
// Simple case, just do a coerced store of the argument into the alloca.
assert(NumIRArgs == 1);
@@ -2934,7 +3299,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
ArgVals.push_back(ParamValue::forIndirect(alloca));
auto coercionType = ArgI.getCoerceAndExpandType();
- alloca = Builder.CreateElementBitCast(alloca, coercionType);
+ alloca = alloca.withElementType(coercionType);
unsigned argIndex = FirstIRArg;
for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
@@ -3099,9 +3464,9 @@ static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
const VarDecl *self = method->getSelfDecl();
if (!self->getType().isConstQualified()) return nullptr;
- // Look for a retain call.
- llvm::CallInst *retainCall =
- dyn_cast<llvm::CallInst>(result->stripPointerCasts());
+ // Look for a retain call. Note: stripPointerCasts looks through returned arg
+ // functions, which would cause us to miss the retain.
+ llvm::CallInst *retainCall = dyn_cast<llvm::CallInst>(result);
if (!retainCall || retainCall->getCalledOperand() !=
CGF.CGM.getObjCEntrypoints().objc_retain)
return nullptr;
@@ -3153,11 +3518,14 @@ static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
// ReturnValue to some other location.
auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * {
auto *SI = dyn_cast<llvm::StoreInst>(U);
- if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer())
+ if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer() ||
+ SI->getValueOperand()->getType() != CGF.ReturnValue.getElementType())
return nullptr;
// These aren't actually possible for non-coerced returns, and we
// only care about non-coerced returns on this code path.
- assert(!SI->isAtomic() && !SI->isVolatile());
+ // All memory instructions inside __try block are volatile.
+ assert(!SI->isAtomic() &&
+ (!SI->isVolatile() || CGF.currentFunctionUsesSEHTry()));
return SI;
};
// If there are multiple uses of the return-value slot, just check
@@ -3167,28 +3535,19 @@ static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
if (!CGF.ReturnValue.getPointer()->hasOneUse()) {
llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
if (IP->empty()) return nullptr;
- llvm::Instruction *I = &IP->back();
-
- // Skip lifetime markers
- for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(),
- IE = IP->rend();
- II != IE; ++II) {
- if (llvm::IntrinsicInst *Intrinsic =
- dyn_cast<llvm::IntrinsicInst>(&*II)) {
- if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) {
- const llvm::Value *CastAddr = Intrinsic->getArgOperand(1);
- ++II;
- if (II == IE)
- break;
- if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II))
- continue;
- }
- }
- I = &*II;
- break;
- }
- return GetStoreIfValid(I);
+ // Look at directly preceding instruction, skipping bitcasts and lifetime
+ // markers.
+ for (llvm::Instruction &I : make_range(IP->rbegin(), IP->rend())) {
+ if (isa<llvm::BitCastInst>(&I))
+ continue;
+ if (auto *II = dyn_cast<llvm::IntrinsicInst>(&I))
+ if (II->getIntrinsicID() == llvm::Intrinsic::lifetime_end)
+ continue;
+
+ return GetStoreIfValid(&I);
+ }
+ return nullptr;
}
llvm::StoreInst *store =
@@ -3199,8 +3558,9 @@ static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
// single-predecessors chain from the current insertion point.
llvm::BasicBlock *StoreBB = store->getParent();
llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
+ llvm::SmallPtrSet<llvm::BasicBlock *, 4> SeenBBs;
while (IP != StoreBB) {
- if (!(IP = IP->getSinglePredecessor()))
+ if (!SeenBBs.insert(IP).second || !(IP = IP->getSinglePredecessor()))
return nullptr;
}
@@ -3389,7 +3749,7 @@ llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src,
int CharsPerElt =
ATy->getArrayElementType()->getScalarSizeInBits() / CharWidth;
int MaskIndex = 0;
- llvm::Value *R = llvm::UndefValue::get(ATy);
+ llvm::Value *R = llvm::PoisonValue::get(ATy);
for (int I = 0, N = ATy->getArrayNumElements(); I != N; ++I) {
uint64_t Mask = buildMultiCharMask(Bits, MaskIndex, CharsPerElt, CharWidth,
DataLayout.isBigEndian());
@@ -3430,7 +3790,7 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
switch (RetAI.getKind()) {
case ABIArgInfo::InAlloca:
- // Aggregrates get evaluated directly into the destination. Sometimes we
+ // Aggregates get evaluated directly into the destination. Sometimes we
// need to return the sret value in a register, though.
assert(hasAggregateEvaluationKind(RetTy));
if (RetAI.getInAllocaSRet()) {
@@ -3438,8 +3798,7 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
--EI;
llvm::Value *ArgStruct = &*EI;
llvm::Value *SRet = Builder.CreateStructGEP(
- EI->getType()->getPointerElementType(), ArgStruct,
- RetAI.getInAllocaFieldIndex());
+ FI.getArgStruct(), ArgStruct, RetAI.getInAllocaFieldIndex());
llvm::Type *Ty =
cast<llvm::GetElementPtrInst>(SRet)->getResultElementType();
RV = Builder.CreateAlignedLoad(Ty, SRet, getPointerAlign(), "sret");
@@ -3459,14 +3818,21 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
break;
}
case TEK_Aggregate:
- // Do nothing; aggregrates get evaluated directly into the destination.
+ // Do nothing; aggregates get evaluated directly into the destination.
break;
- case TEK_Scalar:
- EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
- MakeNaturalAlignAddrLValue(&*AI, RetTy),
- /*isInit*/ true);
+ case TEK_Scalar: {
+ LValueBaseInfo BaseInfo;
+ TBAAAccessInfo TBAAInfo;
+ CharUnits Alignment =
+ CGM.getNaturalTypeAlignment(RetTy, &BaseInfo, &TBAAInfo);
+ Address ArgAddr(&*AI, ConvertType(RetTy), Alignment);
+ LValue ArgVal =
+ LValue::MakeAddr(ArgAddr, RetTy, getContext(), BaseInfo, TBAAInfo);
+ EmitStoreOfScalar(
+ Builder.CreateLoad(ReturnValue), ArgVal, /*isInit*/ true);
break;
}
+ }
break;
}
@@ -3537,7 +3903,7 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
// Load all of the coerced elements out into results.
llvm::SmallVector<llvm::Value*, 4> results;
- Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType);
+ Address addr = ReturnValue.withElementType(coercionType);
for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
auto coercedEltType = coercionType->getElementType(i);
if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType))
@@ -3557,7 +3923,7 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
// Construct a return type that lacks padding elements.
llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType();
- RV = llvm::UndefValue::get(returnType);
+ RV = llvm::PoisonValue::get(returnType);
for (unsigned i = 0, e = results.size(); i != e; ++i) {
RV = Builder.CreateInsertValue(RV, results[i], i);
}
@@ -3663,15 +4029,15 @@ static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF,
// FIXME: Generate IR in one pass, rather than going back and fixing up these
// placeholders.
llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
- llvm::Type *IRPtrTy = IRTy->getPointerTo();
- llvm::Value *Placeholder = llvm::UndefValue::get(IRPtrTy->getPointerTo());
+ llvm::Type *IRPtrTy = llvm::PointerType::getUnqual(CGF.getLLVMContext());
+ llvm::Value *Placeholder = llvm::PoisonValue::get(IRPtrTy);
// FIXME: When we generate this IR in one pass, we shouldn't need
// this win32-specific alignment hack.
CharUnits Align = CharUnits::fromQuantity(4);
Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align);
- return AggValueSlot::forAddr(Address(Placeholder, Align),
+ return AggValueSlot::forAddr(Address(Placeholder, IRTy, Align),
Ty.getQualifiers(),
AggValueSlot::IsNotDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
@@ -3689,10 +4055,6 @@ void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
QualType type = param->getType();
- if (isInAllocaArgument(CGM.getCXXABI(), type)) {
- CGM.ErrorUnsupported(param, "forwarded non-trivially copyable parameter");
- }
-
// GetAddrOfLocalVar returns a pointer-to-pointer for references,
// but the argument needs to be the original pointer.
if (type->isReferenceType()) {
@@ -3854,7 +4216,9 @@ static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
// because of the crazy ObjC compatibility rules.
llvm::PointerType *destType =
- cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
+ cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
+ llvm::Type *destElemType =
+ CGF.ConvertTypeForMem(CRE->getType()->getPointeeType());
// If the address is a constant null, just pass the appropriate null.
if (isProvablyNull(srcAddr.getPointer())) {
@@ -3864,9 +4228,8 @@ static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
}
// Create the temporary.
- Address temp = CGF.CreateTempAlloca(destType->getElementType(),
- CGF.getPointerAlign(),
- "icr.temp");
+ Address temp =
+ CGF.CreateTempAlloca(destElemType, CGF.getPointerAlign(), "icr.temp");
// Loading an l-value can introduce a cleanup if the l-value is __weak,
// and that cleanup will be conditional if we can't prove that the l-value
// isn't null, so we need to register a dominating point so that the cleanups
@@ -3877,8 +4240,7 @@ static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
bool shouldCopy = CRE->shouldCopy();
if (!shouldCopy) {
llvm::Value *null =
- llvm::ConstantPointerNull::get(
- cast<llvm::PointerType>(destType->getElementType()));
+ llvm::ConstantPointerNull::get(cast<llvm::PointerType>(destElemType));
CGF.Builder.CreateStore(null, temp);
}
@@ -3920,8 +4282,7 @@ static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
assert(srcRV.isScalar());
llvm::Value *src = srcRV.getScalarVal();
- src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
- "icr.cast");
+ src = CGF.Builder.CreateBitCast(src, destElemType, "icr.cast");
// Use an ordinary store, not a store-to-lvalue.
CGF.Builder.CreateStore(src, temp);
@@ -3963,15 +4324,13 @@ void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) {
assert(!StackBase);
// Save the stack.
- llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
- StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save");
+ StackBase = CGF.Builder.CreateStackSave("inalloca.save");
}
void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const {
if (StackBase) {
// Restore the stack after the call.
- llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
- CGF.Builder.CreateCall(F, StackBase);
+ CGF.Builder.CreateStackRestore(StackBase);
}
}
@@ -3994,7 +4353,7 @@ void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType,
bool CanCheckNullability = false;
if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) {
- auto Nullability = PVD->getType()->getNullability(getContext());
+ auto Nullability = PVD->getType()->getNullability();
CanCheckNullability = Nullability &&
*Nullability == NullabilityKind::NonNull &&
PVD->getTypeSourceInfo();
@@ -4022,7 +4381,7 @@ void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType,
EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(AttrLoc),
llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
};
- EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, None);
+ EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, std::nullopt);
}
// Check if the call is going to use the inalloca convention. This needs to
@@ -4120,8 +4479,7 @@ void CodeGenFunction::EmitCallArgs(
}
// If we still have any arguments, emit them using the type of the argument.
- for (auto *A : llvm::make_range(std::next(ArgRange.begin(), ArgTypes.size()),
- ArgRange.end()))
+ for (auto *A : llvm::drop_begin(ArgRange, ArgTypes.size()))
ArgTypes.push_back(IsVariadic ? getVarArgType(A) : A->getType());
assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
@@ -4294,11 +4652,8 @@ void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) {
// If we're using inalloca, use the argument memory. Otherwise, use a
// temporary.
- AggValueSlot Slot;
- if (args.isUsingInAlloca())
- Slot = createPlaceholderSlot(*this, type);
- else
- Slot = CreateAggTemp(type, "agg.tmp");
+ AggValueSlot Slot = args.isUsingInAlloca()
+ ? createPlaceholderSlot(*this, type) : CreateAggTemp(type, "agg.tmp");
bool DestroyedInCallee = true, NeedsEHCleanup = true;
if (const auto *RD = type->getAsCXXRecordDecl())
@@ -4321,7 +4676,7 @@ void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
type);
// This unreachable is a temporary marker which will be removed later.
llvm::Instruction *IsActive = Builder.CreateUnreachable();
- args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
+ args.addArgCleanupDeactivation(EHStack.stable_begin(), IsActive);
}
return;
}
@@ -4346,7 +4701,7 @@ QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
if (Arg->getType()->isIntegerType() &&
getContext().getTypeSize(Arg->getType()) <
- getContext().getTargetInfo().getPointerWidth(0) &&
+ getContext().getTargetInfo().getPointerWidth(LangAS::Default) &&
Arg->isNullPointerConstant(getContext(),
Expr::NPC_ValueDependentIsNotNull)) {
return getContext().getIntPtrType();
@@ -4369,7 +4724,7 @@ CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
llvm::CallInst *
CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
const llvm::Twine &name) {
- return EmitNounwindRuntimeCall(callee, None, name);
+ return EmitNounwindRuntimeCall(callee, std::nullopt, name);
}
/// Emits a call to the given nounwind runtime function.
@@ -4386,24 +4741,29 @@ CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
/// runtime function.
llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee,
const llvm::Twine &name) {
- return EmitRuntimeCall(callee, None, name);
+ return EmitRuntimeCall(callee, std::nullopt, name);
}
// Calls which may throw must have operand bundles indicating which funclet
// they are nested within.
SmallVector<llvm::OperandBundleDef, 1>
CodeGenFunction::getBundlesForFunclet(llvm::Value *Callee) {
- SmallVector<llvm::OperandBundleDef, 1> BundleList;
// There is no need for a funclet operand bundle if we aren't inside a
// funclet.
if (!CurrentFuncletPad)
- return BundleList;
-
- // Skip intrinsics which cannot throw.
- auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts());
- if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow())
- return BundleList;
+ return (SmallVector<llvm::OperandBundleDef, 1>());
+
+ // Skip intrinsics which cannot throw (as long as they don't lower into
+ // regular function calls in the course of IR transformations).
+ if (auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts())) {
+ if (CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow()) {
+ auto IID = CalleeFn->getIntrinsicID();
+ if (!llvm::IntrinsicInst::mayLowerToFunctionCall(IID))
+ return (SmallVector<llvm::OperandBundleDef, 1>());
+ }
+ }
+ SmallVector<llvm::OperandBundleDef, 1> BundleList;
BundleList.emplace_back("funclet", CurrentFuncletPad);
return BundleList;
}
@@ -4445,7 +4805,7 @@ void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(
llvm::CallBase *
CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
const Twine &name) {
- return EmitRuntimeCallOrInvoke(callee, None, name);
+ return EmitRuntimeCallOrInvoke(callee, std::nullopt, name);
}
/// Emits a call or invoke instruction to the given runtime function.
@@ -4495,7 +4855,7 @@ namespace {
/// Specify given \p NewAlign as the alignment of return value attribute. If
/// such attribute already exists, re-set it to the maximal one of two options.
-LLVM_NODISCARD llvm::AttributeList
+[[nodiscard]] llvm::AttributeList
maybeRaiseRetAlignmentAttribute(llvm::LLVMContext &Ctx,
const llvm::AttributeList &Attrs,
llvm::Align NewAlign) {
@@ -4503,10 +4863,8 @@ maybeRaiseRetAlignmentAttribute(llvm::LLVMContext &Ctx,
if (CurAlign >= NewAlign)
return Attrs;
llvm::Attribute AlignAttr = llvm::Attribute::getWithAlignment(Ctx, NewAlign);
- return Attrs
- .removeAttribute(Ctx, llvm::AttributeList::ReturnIndex,
- llvm::Attribute::AttrKind::Alignment)
- .addAttribute(Ctx, llvm::AttributeList::ReturnIndex, AlignAttr);
+ return Attrs.removeRetAttribute(Ctx, llvm::Attribute::AttrKind::Alignment)
+ .addRetAttribute(Ctx, AlignAttr);
}
template <typename AlignedAttrTy> class AbstractAssumeAlignedAttrEmitter {
@@ -4528,7 +4886,7 @@ protected:
public:
/// If we can, materialize the alignment as an attribute on return value.
- LLVM_NODISCARD llvm::AttributeList
+ [[nodiscard]] llvm::AttributeList
TryEmitAsCallSiteAttribute(const llvm::AttributeList &Attrs) {
if (!AA || OffsetCI || CGF.SanOpts.has(SanitizerKind::Alignment))
return Attrs;
@@ -4595,6 +4953,19 @@ public:
} // namespace
+static unsigned getMaxVectorWidth(const llvm::Type *Ty) {
+ if (auto *VT = dyn_cast<llvm::VectorType>(Ty))
+ return VT->getPrimitiveSizeInBits().getKnownMinValue();
+ if (auto *AT = dyn_cast<llvm::ArrayType>(Ty))
+ return getMaxVectorWidth(AT->getElementType());
+
+ unsigned MaxVectorWidth = 0;
+ if (auto *ST = dyn_cast<llvm::StructType>(Ty))
+ for (auto *I : ST->elements())
+ MaxVectorWidth = std::max(MaxVectorWidth, getMaxVectorWidth(I));
+ return MaxVectorWidth;
+}
+
RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
const CGCallee &Callee,
ReturnValueSlot ReturnValue,
@@ -4621,7 +4992,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// the proper cpu features (and it won't cause code generation issues due to
// function based code generation).
if (TargetDecl->hasAttr<AlwaysInlineAttr>() &&
- TargetDecl->hasAttr<TargetAttr>())
+ (TargetDecl->hasAttr<TargetAttr>() ||
+ (CurFuncDecl && CurFuncDecl->hasAttr<TargetAttr>())))
checkTargetFeatures(Loc, FD);
// Some architectures (such as x86-64) have the ABI changed based on
@@ -4630,25 +5002,6 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
CGM, Loc, dyn_cast_or_null<FunctionDecl>(CurCodeDecl), FD, CallArgs);
}
-#ifndef NDEBUG
- if (!(CallInfo.isVariadic() && CallInfo.getArgStruct())) {
- // For an inalloca varargs function, we don't expect CallInfo to match the
- // function pointer's type, because the inalloca struct a will have extra
- // fields in it for the varargs parameters. Code later in this function
- // bitcasts the function pointer to the type derived from CallInfo.
- //
- // In other cases, we assert that the types match up (until pointers stop
- // having pointee types).
- llvm::Type *TypeFromVal;
- if (Callee.isVirtual())
- TypeFromVal = Callee.getVirtualFunctionType();
- else
- TypeFromVal =
- Callee.getFunctionPointer()->getType()->getPointerElementType();
- assert(IRFuncTy == TypeFromVal);
- }
-#endif
-
// 1. Set up the arguments.
// If we're using inalloca, insert the allocation after the stack save.
@@ -4669,7 +5022,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
AI->setAlignment(Align.getAsAlign());
AI->setUsedWithInAlloca(true);
assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
- ArgMemory = Address(AI, Align);
+ ArgMemory = Address(AI, ArgStruct, Align);
}
ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
@@ -4725,6 +5078,9 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
unsigned FirstIRArg, NumIRArgs;
std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
+ bool ArgHasMaybeUndefAttr =
+ IsArgumentMaybeUndef(TargetDecl, CallInfo.getNumRequiredArgs(), ArgNo);
+
switch (ArgInfo.getKind()) {
case ABIArgInfo::InAlloca: {
assert(NumIRArgs == 0);
@@ -4767,13 +5123,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// Store the RValue into the argument struct.
Address Addr =
Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
- unsigned AS = Addr.getType()->getPointerAddressSpace();
- llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
- // There are some cases where a trivial bitcast is not avoidable. The
- // definition of a type later in a translation unit may change it's type
- // from {}* to (%struct.foo*)*.
- if (Addr.getType() != MemType)
- Addr = Builder.CreateBitCast(Addr, MemType);
+ Addr = Addr.withElementType(ConvertTypeForMem(I->Ty));
I->copyInto(*this, Addr);
}
break;
@@ -4786,7 +5136,11 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// Make a temporary alloca to pass the argument.
Address Addr = CreateMemTempWithoutCast(
I->Ty, ArgInfo.getIndirectAlign(), "indirect-arg-temp");
- IRCallArgs[FirstIRArg] = Addr.getPointer();
+
+ llvm::Value *Val = Addr.getPointer();
+ if (ArgHasMaybeUndefAttr)
+ Val = Builder.CreateFreeze(Addr.getPointer());
+ IRCallArgs[FirstIRArg] = Val;
I->copyInto(*this, Addr);
} else {
@@ -4811,7 +5165,6 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
"indirect argument must be in alloca address space");
bool NeedCopy = false;
-
if (Addr.getAlignment() < Align &&
llvm::getOrEnforceKnownAlignment(V, Align.getAsAlign(), *TD) <
Align.getAsAlign()) {
@@ -4820,12 +5173,15 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
auto LV = I->getKnownLValue();
auto AS = LV.getAddressSpace();
- if (!ArgInfo.getIndirectByVal() ||
+ bool isByValOrRef =
+ ArgInfo.isIndirectAliased() || ArgInfo.getIndirectByVal();
+
+ if (!isByValOrRef ||
(LV.getAlignment() < getContext().getTypeAlignInChars(I->Ty))) {
NeedCopy = true;
}
if (!getLangOpts().OpenCL) {
- if ((ArgInfo.getIndirectByVal() &&
+ if ((isByValOrRef &&
(AS != LangAS::Default &&
AS != CGM.getASTAllocaAddressSpace()))) {
NeedCopy = true;
@@ -4833,7 +5189,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
}
// For OpenCL even if RV is located in default or alloca address space
// we don't want to perform address space cast for it.
- else if ((ArgInfo.getIndirectByVal() &&
+ else if ((isByValOrRef &&
Addr.getType()->getAddressSpace() != IRFuncTy->
getParamType(FirstIRArg)->getPointerAddressSpace())) {
NeedCopy = true;
@@ -4844,7 +5200,10 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// Create an aligned temporary, and copy to it.
Address AI = CreateMemTempWithoutCast(
I->Ty, ArgInfo.getIndirectAlign(), "byval-temp");
- IRCallArgs[FirstIRArg] = AI.getPointer();
+ llvm::Value *Val = AI.getPointer();
+ if (ArgHasMaybeUndefAttr)
+ Val = Builder.CreateFreeze(AI.getPointer());
+ IRCallArgs[FirstIRArg] = Val;
// Emit lifetime markers for the temporary alloca.
llvm::TypeSize ByvalTempElementSize =
@@ -4860,11 +5219,15 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
I->copyInto(*this, AI);
} else {
// Skip the extra memcpy call.
- auto *T = V->getType()->getPointerElementType()->getPointerTo(
- CGM.getDataLayout().getAllocaAddrSpace());
- IRCallArgs[FirstIRArg] = getTargetHooks().performAddrSpaceCast(
+ auto *T = llvm::PointerType::get(
+ CGM.getLLVMContext(), CGM.getDataLayout().getAllocaAddrSpace());
+
+ llvm::Value *Val = getTargetHooks().performAddrSpaceCast(
*this, V, LangAS::Default, CGM.getASTAllocaAddressSpace(), T,
true);
+ if (ArgHasMaybeUndefAttr)
+ Val = Builder.CreateFreeze(Val);
+ IRCallArgs[FirstIRArg] = Val;
}
}
break;
@@ -4895,8 +5258,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
assert(!swiftErrorTemp.isValid() && "multiple swifterror args");
QualType pointeeTy = I->Ty->getPointeeType();
- swiftErrorArg =
- Address(V, getContext().getTypeAlignInChars(pointeeTy));
+ swiftErrorArg = Address(V, ConvertTypeForMem(pointeeTy),
+ getContext().getTypeAlignInChars(pointeeTy));
swiftErrorTemp =
CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
@@ -4918,6 +5281,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
V->getType() != IRFuncTy->getParamType(FirstIRArg))
V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
+ if (ArgHasMaybeUndefAttr)
+ V = Builder.CreateFreeze(V);
IRCallArgs[FirstIRArg] = V;
break;
}
@@ -4941,29 +5306,50 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
llvm::Type *SrcTy = Src.getElementType();
- uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
- uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
-
- // If the source type is smaller than the destination type of the
- // coerce-to logic, copy the source value into a temp alloca the size
- // of the destination type to allow loading all of it. The bits past
- // the source value are left undef.
- if (SrcSize < DstSize) {
- Address TempAlloca
- = CreateTempAlloca(STy, Src.getAlignment(),
- Src.getName() + ".coerce");
- Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
- Src = TempAlloca;
+ llvm::TypeSize SrcTypeSize =
+ CGM.getDataLayout().getTypeAllocSize(SrcTy);
+ llvm::TypeSize DstTypeSize = CGM.getDataLayout().getTypeAllocSize(STy);
+ if (SrcTypeSize.isScalable()) {
+ assert(STy->containsHomogeneousScalableVectorTypes() &&
+ "ABI only supports structure with homogeneous scalable vector "
+ "type");
+ assert(SrcTypeSize == DstTypeSize &&
+ "Only allow non-fractional movement of structure with "
+ "homogeneous scalable vector type");
+ assert(NumIRArgs == STy->getNumElements());
+
+ llvm::Value *StoredStructValue =
+ Builder.CreateLoad(Src, Src.getName() + ".tuple");
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ llvm::Value *Extract = Builder.CreateExtractValue(
+ StoredStructValue, i, Src.getName() + ".extract" + Twine(i));
+ IRCallArgs[FirstIRArg + i] = Extract;
+ }
} else {
- Src = Builder.CreateBitCast(Src,
- STy->getPointerTo(Src.getAddressSpace()));
- }
+ uint64_t SrcSize = SrcTypeSize.getFixedValue();
+ uint64_t DstSize = DstTypeSize.getFixedValue();
+
+ // If the source type is smaller than the destination type of the
+ // coerce-to logic, copy the source value into a temp alloca the size
+ // of the destination type to allow loading all of it. The bits past
+ // the source value are left undef.
+ if (SrcSize < DstSize) {
+ Address TempAlloca = CreateTempAlloca(STy, Src.getAlignment(),
+ Src.getName() + ".coerce");
+ Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
+ Src = TempAlloca;
+ } else {
+ Src = Src.withElementType(STy);
+ }
- assert(NumIRArgs == STy->getNumElements());
- for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
- Address EltPtr = Builder.CreateStructGEP(Src, i);
- llvm::Value *LI = Builder.CreateLoad(EltPtr);
- IRCallArgs[FirstIRArg + i] = LI;
+ assert(NumIRArgs == STy->getNumElements());
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ Address EltPtr = Builder.CreateStructGEP(Src, i);
+ llvm::Value *LI = Builder.CreateLoad(EltPtr);
+ if (ArgHasMaybeUndefAttr)
+ LI = Builder.CreateFreeze(LI);
+ IRCallArgs[FirstIRArg + i] = LI;
+ }
}
} else {
// In the simple case, just pass the coerced loaded value.
@@ -4979,6 +5365,9 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
if (ATy != nullptr && isa<RecordType>(I->Ty.getCanonicalType()))
Load = EmitCMSEClearRecord(Load, ATy, I->Ty);
}
+
+ if (ArgHasMaybeUndefAttr)
+ Load = Builder.CreateFreeze(Load);
IRCallArgs[FirstIRArg] = Load;
}
@@ -5002,13 +5391,12 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
llvm::Type *scalarType = RV.getScalarVal()->getType();
auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType);
- auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType);
+ auto scalarAlign = CGM.getDataLayout().getPrefTypeAlign(scalarType);
// Materialize to a temporary.
addr = CreateTempAlloca(
RV.getScalarVal()->getType(),
- CharUnits::fromQuantity(std::max(
- (unsigned)layout->getAlignment().value(), scalarAlign)),
+ CharUnits::fromQuantity(std::max(layout->getAlignment(), scalarAlign)),
"tmp",
/*ArraySize=*/nullptr, &AllocaAddr);
tempSize = EmitLifetimeStart(scalarSize, AllocaAddr.getPointer());
@@ -5016,7 +5404,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
Builder.CreateStore(RV.getScalarVal(), addr);
}
- addr = Builder.CreateElementBitCast(addr, coercionType);
+ addr = addr.withElementType(coercionType);
unsigned IRArgPos = FirstIRArg;
for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
@@ -5024,6 +5412,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
Address eltAddr = Builder.CreateStructGEP(addr, i);
llvm::Value *elt = Builder.CreateLoad(eltAddr);
+ if (ArgHasMaybeUndefAttr)
+ elt = Builder.CreateFreeze(elt);
IRCallArgs[IRArgPos++] = elt;
}
assert(IRArgPos == FirstIRArg + NumIRArgs);
@@ -5050,33 +5440,6 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// If we're using inalloca, set up that argument.
if (ArgMemory.isValid()) {
llvm::Value *Arg = ArgMemory.getPointer();
- if (CallInfo.isVariadic()) {
- // When passing non-POD arguments by value to variadic functions, we will
- // end up with a variadic prototype and an inalloca call site. In such
- // cases, we can't do any parameter mismatch checks. Give up and bitcast
- // the callee.
- unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace();
- CalleePtr =
- Builder.CreateBitCast(CalleePtr, IRFuncTy->getPointerTo(CalleeAS));
- } else {
- llvm::Type *LastParamTy =
- IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
- if (Arg->getType() != LastParamTy) {
-#ifndef NDEBUG
- // Assert that these structs have equivalent element types.
- llvm::StructType *FullTy = CallInfo.getArgStruct();
- llvm::StructType *DeclaredTy = cast<llvm::StructType>(
- cast<llvm::PointerType>(LastParamTy)->getElementType());
- assert(DeclaredTy->getNumElements() == FullTy->getNumElements());
- for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(),
- DE = DeclaredTy->element_end(),
- FI = FullTy->element_begin();
- DI != DE; ++DI, ++FI)
- assert(*DI == *FI);
-#endif
- Arg = Builder.CreateBitCast(Arg, LastParamTy);
- }
- }
assert(IRFunctionArgs.hasInallocaArg());
IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
}
@@ -5149,12 +5512,9 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
#endif
// Update the largest vector width if any arguments have vector types.
- for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
- if (auto *VT = dyn_cast<llvm::VectorType>(IRCallArgs[i]->getType()))
- LargestVectorWidth =
- std::max((uint64_t)LargestVectorWidth,
- VT->getPrimitiveSizeInBits().getKnownMinSize());
- }
+ for (unsigned i = 0; i < IRCallArgs.size(); ++i)
+ LargestVectorWidth = std::max(LargestVectorWidth,
+ getMaxVectorWidth(IRCallArgs[i]->getType()));
// Compute the calling convention and attributes.
unsigned CallingConv;
@@ -5164,18 +5524,30 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
/*AttrOnCallSite=*/true,
/*IsThunk=*/false);
- if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl))
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
if (FD->hasAttr<StrictFPAttr>())
// All calls within a strictfp function are marked strictfp
- Attrs =
- Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
- llvm::Attribute::StrictFP);
+ Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::StrictFP);
+ // If -ffast-math is enabled and the function is guarded by an
+ // '__attribute__((optnone)) adjust the memory attribute so the BE emits the
+ // library call instead of the intrinsic.
+ if (FD->hasAttr<OptimizeNoneAttr>() && getLangOpts().FastMath)
+ CGM.AdjustMemoryAttribute(CalleePtr->getName(), Callee.getAbstractInfo(),
+ Attrs);
+ }
// Add call-site nomerge attribute if exists.
if (InNoMergeAttributedStmt)
+ Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::NoMerge);
+
+ // Add call-site noinline attribute if exists.
+ if (InNoInlineAttributedStmt)
+ Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::NoInline);
+
+ // Add call-site always_inline attribute if exists.
+ if (InAlwaysInlineAttributedStmt)
Attrs =
- Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
- llvm::Attribute::NoMerge);
+ Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::AlwaysInline);
// Apply some call-site-specific attributes.
// TODO: work this into building the attribute set.
@@ -5183,17 +5555,15 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// Apply always_inline to all calls within flatten functions.
// FIXME: should this really take priority over __try, below?
if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
+ !InNoInlineAttributedStmt &&
!(TargetDecl && TargetDecl->hasAttr<NoInlineAttr>())) {
Attrs =
- Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
- llvm::Attribute::AlwaysInline);
+ Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::AlwaysInline);
}
// Disable inlining inside SEH __try blocks.
if (isSEHTryScope()) {
- Attrs =
- Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
- llvm::Attribute::NoInline);
+ Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::NoInline);
}
// Decide whether to use a call or an invoke.
@@ -5209,7 +5579,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
CannotThrow = true;
} else {
// Otherwise, nounwind call sites will never throw.
- CannotThrow = Attrs.hasFnAttribute(llvm::Attribute::NoUnwind);
+ CannotThrow = Attrs.hasFnAttr(llvm::Attribute::NoUnwind);
if (auto *FPtr = dyn_cast<llvm::Function>(CalleePtr))
if (FPtr->hasFnAttribute(llvm::Attribute::NoUnwind))
@@ -5229,12 +5599,14 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
SmallVector<llvm::OperandBundleDef, 1> BundleList =
getBundlesForFunclet(CalleePtr);
+ if (SanOpts.has(SanitizerKind::KCFI) &&
+ !isa_and_nonnull<FunctionDecl>(TargetDecl))
+ EmitKCFIOperandBundle(ConcreteCallee, BundleList);
+
if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl))
if (FD->hasAttr<StrictFPAttr>())
// All calls within a strictfp function are marked strictfp
- Attrs =
- Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
- llvm::Attribute::StrictFP);
+ Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::StrictFP);
AssumeAlignedAttrEmitter AssumeAlignedAttrEmitter(*this, TargetDecl);
Attrs = AssumeAlignedAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
@@ -5252,6 +5624,10 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
BundleList);
EmitBlock(Cont);
}
+ if (CI->getCalledFunction() && CI->getCalledFunction()->hasName() &&
+ CI->getCalledFunction()->getName().starts_with("_Z4sqrt")) {
+ SetSqrtFPAccuracy(CI);
+ }
if (callOrInvoke)
*callOrInvoke = CI;
@@ -5261,8 +5637,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
if (const auto *A = FD->getAttr<CFGuardAttr>()) {
if (A->getGuard() == CFGuardAttr::GuardArg::nocf && !CI->getCalledFunction())
- Attrs = Attrs.addAttribute(
- getLLVMContext(), llvm::AttributeList::FunctionIndex, "guard_nocf");
+ Attrs = Attrs.addFnAttribute(getLLVMContext(), "guard_nocf");
}
}
@@ -5276,10 +5651,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
CI->setName("call");
// Update largest vector width from the return type.
- if (auto *VT = dyn_cast<llvm::VectorType>(CI->getType()))
- LargestVectorWidth =
- std::max((uint64_t)LargestVectorWidth,
- VT->getPrimitiveSizeInBits().getKnownMinSize());
+ LargestVectorWidth =
+ std::max(LargestVectorWidth, getMaxVectorWidth(CI->getType()));
// Insert instrumentation or attach profile metadata at indirect call sites.
// For more details, see the comment before the definition of
@@ -5306,6 +5679,15 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
TargetDecl->hasAttr<MSAllocatorAttr>())
getDebugInfo()->addHeapAllocSiteMetadata(CI, RetTy->getPointeeType(), Loc);
+ // Add metadata if calling an __attribute__((error(""))) or warning fn.
+ if (TargetDecl && TargetDecl->hasAttr<ErrorAttr>()) {
+ llvm::ConstantInt *Line =
+ llvm::ConstantInt::get(Int32Ty, Loc.getRawEncoding());
+ llvm::ConstantAsMetadata *MD = llvm::ConstantAsMetadata::get(Line);
+ llvm::MDTuple *MDT = llvm::MDNode::get(getLLVMContext(), {MD});
+ CI->setMetadata("srcloc", MDT);
+ }
+
// 4. Finish the call.
// If the call doesn't return, finish the basic block and clear the
@@ -5321,8 +5703,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// attributes of the called function.
if (auto *F = CI->getCalledFunction())
F->removeFnAttr(llvm::Attribute::NoReturn);
- CI->removeAttribute(llvm::AttributeList::FunctionIndex,
- llvm::Attribute::NoReturn);
+ CI->removeFnAttr(llvm::Attribute::NoReturn);
// Avoid incompatibility with ASan which relies on the `noreturn`
// attribute to insert handler calls.
@@ -5389,8 +5770,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
case ABIArgInfo::CoerceAndExpand: {
auto coercionType = RetAI.getCoerceAndExpandType();
- Address addr = SRetPtr;
- addr = Builder.CreateElementBitCast(addr, coercionType);
+ Address addr = SRetPtr.withElementType(coercionType);
assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType());
bool requiresExtract = isa<llvm::StructType>(CI->getType());
@@ -5407,8 +5787,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
assert(unpaddedIndex == 0);
Builder.CreateStore(elt, eltAddr);
}
- // FALLTHROUGH
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case ABIArgInfo::InAlloca:
@@ -5457,6 +5836,20 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
llvm_unreachable("bad evaluation kind");
}
+ // If coercing a fixed vector from a scalable vector for ABI
+ // compatibility, and the types match, use the llvm.vector.extract
+ // intrinsic to perform the conversion.
+ if (auto *FixedDst = dyn_cast<llvm::FixedVectorType>(RetIRTy)) {
+ llvm::Value *V = CI;
+ if (auto *ScalableSrc = dyn_cast<llvm::ScalableVectorType>(V->getType())) {
+ if (FixedDst->getElementType() == ScalableSrc->getElementType()) {
+ llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty);
+ V = Builder.CreateExtractVector(FixedDst, V, Zero, "cast.fixed");
+ return RValue::get(V);
+ }
+ }
+ }
+
Address DestPtr = ReturnValue.getValue();
bool DestIsVolatile = ReturnValue.isVolatile();
@@ -5465,9 +5858,14 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
DestIsVolatile = false;
}
- // If the value is offset in memory, apply the offset now.
- Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
- CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
+ // An empty record can overlap other data (if declared with
+ // no_unique_address); omit the store for such types - as there is no
+ // actual data to store.
+ if (!isEmptyRecord(getContext(), RetTy, true)) {
+ // If the value is offset in memory, apply the offset now.
+ Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
+ CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
+ }
return convertTempToRValue(DestPtr, RetTy, SourceLocation());
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCall.h b/contrib/llvm-project/clang/lib/CodeGen/CGCall.h
index e3d9fec6d363..1c0d15dc932a 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCall.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCall.h
@@ -20,23 +20,18 @@
#include "clang/AST/CanonicalType.h"
#include "clang/AST/GlobalDecl.h"
#include "clang/AST/Type.h"
+#include "llvm/ADT/STLForwardCompat.h"
#include "llvm/IR/Value.h"
-// FIXME: Restructure so we don't have to expose so much stuff.
-#include "ABIInfo.h"
-
namespace llvm {
-class AttributeList;
-class Function;
class Type;
class Value;
} // namespace llvm
namespace clang {
-class ASTContext;
class Decl;
class FunctionDecl;
-class ObjCMethodDecl;
+class TargetOptions;
class VarDecl;
namespace CodeGen {
@@ -49,11 +44,11 @@ class CGCalleeInfo {
GlobalDecl CalleeDecl;
public:
- explicit CGCalleeInfo() : CalleeProtoTy(nullptr), CalleeDecl() {}
+ explicit CGCalleeInfo() : CalleeProtoTy(nullptr) {}
CGCalleeInfo(const FunctionProtoType *calleeProtoTy, GlobalDecl calleeDecl)
: CalleeProtoTy(calleeProtoTy), CalleeDecl(calleeDecl) {}
CGCalleeInfo(const FunctionProtoType *calleeProtoTy)
- : CalleeProtoTy(calleeProtoTy), CalleeDecl() {}
+ : CalleeProtoTy(calleeProtoTy) {}
CGCalleeInfo(GlobalDecl calleeDecl)
: CalleeProtoTy(nullptr), CalleeDecl(calleeDecl) {}
@@ -115,7 +110,6 @@ public:
AbstractInfo = abstractInfo;
assert(functionPtr && "configuring callee without function pointer");
assert(functionPtr->getType()->isPointerTy());
- assert(functionPtr->getType()->getPointerElementType()->isFunctionTy());
}
static CGCallee forBuiltin(unsigned builtinID,
@@ -263,7 +257,7 @@ public:
/// arguments in a call.
class CallArgList : public SmallVector<CallArg, 8> {
public:
- CallArgList() : StackBase(nullptr) {}
+ CallArgList() = default;
struct Writeback {
/// The original argument. Note that the argument l-value
@@ -349,7 +343,7 @@ private:
SmallVector<CallArgCleanup, 1> CleanupsToDeactivate;
/// The stacksave call. It dominates all of the argument evaluation.
- llvm::CallInst *StackBase;
+ llvm::CallInst *StackBase = nullptr;
};
/// FunctionArgList - Type for representing both the decl and type
@@ -382,6 +376,56 @@ public:
bool isExternallyDestructed() const { return IsExternallyDestructed; }
};
+/// Adds attributes to \p F according to our \p CodeGenOpts and \p LangOpts, as
+/// though we had emitted it ourselves. We remove any attributes on F that
+/// conflict with the attributes we add here.
+///
+/// This is useful for adding attrs to bitcode modules that you want to link
+/// with but don't control, such as CUDA's libdevice. When linking with such
+/// a bitcode library, you might want to set e.g. its functions'
+/// "unsafe-fp-math" attribute to match the attr of the functions you're
+/// codegen'ing. Otherwise, LLVM will interpret the bitcode module's lack of
+/// unsafe-fp-math attrs as tantamount to unsafe-fp-math=false, and then LLVM
+/// will propagate unsafe-fp-math=false up to every transitive caller of a
+/// function in the bitcode library!
+///
+/// With the exception of fast-math attrs, this will only make the attributes
+/// on the function more conservative. But it's unsafe to call this on a
+/// function which relies on particular fast-math attributes for correctness.
+/// It's up to you to ensure that this is safe.
+void mergeDefaultFunctionDefinitionAttributes(llvm::Function &F,
+ const CodeGenOptions &CodeGenOpts,
+ const LangOptions &LangOpts,
+ const TargetOptions &TargetOpts,
+ bool WillInternalize);
+
+enum class FnInfoOpts {
+ None = 0,
+ IsInstanceMethod = 1 << 0,
+ IsChainCall = 1 << 1,
+ IsDelegateCall = 1 << 2,
+};
+
+inline FnInfoOpts operator|(FnInfoOpts A, FnInfoOpts B) {
+ return static_cast<FnInfoOpts>(llvm::to_underlying(A) |
+ llvm::to_underlying(B));
+}
+
+inline FnInfoOpts operator&(FnInfoOpts A, FnInfoOpts B) {
+ return static_cast<FnInfoOpts>(llvm::to_underlying(A) &
+ llvm::to_underlying(B));
+}
+
+inline FnInfoOpts operator|=(FnInfoOpts A, FnInfoOpts B) {
+ A = A | B;
+ return A;
+}
+
+inline FnInfoOpts operator&=(FnInfoOpts A, FnInfoOpts B) {
+ A = A & B;
+ return A;
+}
+
} // end namespace CodeGen
} // end namespace clang
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGClass.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGClass.cpp
index 9895a23b7093..34319381901a 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGClass.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGClass.cpp
@@ -28,7 +28,9 @@
#include "clang/CodeGen/CGFunctionInfo.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Metadata.h"
+#include "llvm/Support/SaveAndRestore.h"
#include "llvm/Transforms/Utils/SanitizerStats.h"
+#include <optional>
using namespace clang;
using namespace CodeGen;
@@ -127,18 +129,18 @@ CodeGenModule::getDynamicOffsetAlignment(CharUnits actualBaseAlign,
Address CodeGenFunction::LoadCXXThisAddress() {
assert(CurFuncDecl && "loading 'this' without a func declaration?");
- assert(isa<CXXMethodDecl>(CurFuncDecl));
+ auto *MD = cast<CXXMethodDecl>(CurFuncDecl);
// Lazily compute CXXThisAlignment.
if (CXXThisAlignment.isZero()) {
// Just use the best known alignment for the parent.
// TODO: if we're currently emitting a complete-object ctor/dtor,
// we can always use the complete-object alignment.
- auto RD = cast<CXXMethodDecl>(CurFuncDecl)->getParent();
- CXXThisAlignment = CGM.getClassPointerAlignment(RD);
+ CXXThisAlignment = CGM.getClassPointerAlignment(MD->getParent());
}
- return Address(LoadCXXThis(), CXXThisAlignment);
+ llvm::Type *Ty = ConvertType(MD->getFunctionObjectParameterType());
+ return Address(LoadCXXThis(), Ty, CXXThisAlignment, KnownNonNull);
}
/// Emit the address of a field using a member data pointer.
@@ -162,7 +164,8 @@ CodeGenFunction::EmitCXXMemberDataPointerAddress(const Expr *E, Address base,
CGM.getDynamicOffsetAlignment(base.getAlignment(),
memberPtrType->getClass()->getAsCXXRecordDecl(),
memberAlign);
- return Address(ptr, memberAlign);
+ return Address(ptr, ConvertTypeForMem(memberPtrType->getPointeeType()),
+ memberAlign);
}
CharUnits CodeGenModule::computeNonVirtualBaseClassOffset(
@@ -234,12 +237,10 @@ CodeGenFunction::GetAddressOfDirectBaseInCompleteClass(Address This,
// TODO: for complete types, this should be possible with a GEP.
Address V = This;
if (!Offset.isZero()) {
- V = Builder.CreateElementBitCast(V, Int8Ty);
+ V = V.withElementType(Int8Ty);
V = Builder.CreateConstInBoundsByteGEP(V, Offset);
}
- V = Builder.CreateElementBitCast(V, ConvertType(Base));
-
- return V;
+ return V.withElementType(ConvertType(Base));
}
static Address
@@ -270,8 +271,6 @@ ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, Address addr,
// Apply the base offset.
llvm::Value *ptr = addr.getPointer();
- unsigned AddrSpace = ptr->getType()->getPointerAddressSpace();
- ptr = CGF.Builder.CreateBitCast(ptr, CGF.Int8Ty->getPointerTo(AddrSpace));
ptr = CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, ptr, baseOffset, "add.ptr");
// If we have a virtual component, the alignment of the result will
@@ -286,7 +285,7 @@ ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, Address addr,
}
alignment = alignment.alignmentAtOffset(nonVirtualOffset);
- return Address(ptr, alignment);
+ return Address(ptr, CGF.Int8Ty, alignment);
}
Address CodeGenFunction::GetAddressOfBaseClass(
@@ -326,9 +325,9 @@ Address CodeGenFunction::GetAddressOfBaseClass(
}
// Get the base pointer type.
- llvm::Type *BasePtrTy =
- ConvertType((PathEnd[-1])->getType())
- ->getPointerTo(Value.getType()->getPointerAddressSpace());
+ llvm::Type *BaseValueTy = ConvertType((PathEnd[-1])->getType());
+ llvm::Type *PtrTy = llvm::PointerType::get(
+ CGM.getLLVMContext(), Value.getType()->getPointerAddressSpace());
QualType DerivedTy = getContext().getRecordType(Derived);
CharUnits DerivedAlign = CGM.getClassPointerAlignment(Derived);
@@ -342,7 +341,7 @@ Address CodeGenFunction::GetAddressOfBaseClass(
EmitTypeCheck(TCK_Upcast, Loc, Value.getPointer(),
DerivedTy, DerivedAlign, SkippedChecks);
}
- return Builder.CreateBitCast(Value, BasePtrTy);
+ return Value.withElementType(BaseValueTy);
}
llvm::BasicBlock *origBB = nullptr;
@@ -379,7 +378,7 @@ Address CodeGenFunction::GetAddressOfBaseClass(
VirtualOffset, Derived, VBase);
// Cast to the destination type.
- Value = Builder.CreateBitCast(Value, BasePtrTy);
+ Value = Value.withElementType(BaseValueTy);
// Build a phi if we needed a null check.
if (NullCheckValue) {
@@ -387,10 +386,10 @@ Address CodeGenFunction::GetAddressOfBaseClass(
Builder.CreateBr(endBB);
EmitBlock(endBB);
- llvm::PHINode *PHI = Builder.CreatePHI(BasePtrTy, 2, "cast.result");
+ llvm::PHINode *PHI = Builder.CreatePHI(PtrTy, 2, "cast.result");
PHI->addIncoming(Value.getPointer(), notNullBB);
- PHI->addIncoming(llvm::Constant::getNullValue(BasePtrTy), origBB);
- Value = Address(PHI, Value.getAlignment());
+ PHI->addIncoming(llvm::Constant::getNullValue(PtrTy), origBB);
+ Value = Value.withPointer(PHI, NotKnownNonNull);
}
return Value;
@@ -405,17 +404,15 @@ CodeGenFunction::GetAddressOfDerivedClass(Address BaseAddr,
assert(PathBegin != PathEnd && "Base path should not be empty!");
QualType DerivedTy =
- getContext().getCanonicalType(getContext().getTagDeclType(Derived));
- unsigned AddrSpace =
- BaseAddr.getPointer()->getType()->getPointerAddressSpace();
- llvm::Type *DerivedPtrTy = ConvertType(DerivedTy)->getPointerTo(AddrSpace);
+ getContext().getCanonicalType(getContext().getTagDeclType(Derived));
+ llvm::Type *DerivedValueTy = ConvertType(DerivedTy);
llvm::Value *NonVirtualOffset =
CGM.GetNonVirtualBaseClassOffset(Derived, PathBegin, PathEnd);
if (!NonVirtualOffset) {
// No offset, we can just cast back.
- return Builder.CreateBitCast(BaseAddr, DerivedPtrTy);
+ return BaseAddr.withElementType(DerivedValueTy);
}
llvm::BasicBlock *CastNull = nullptr;
@@ -433,13 +430,10 @@ CodeGenFunction::GetAddressOfDerivedClass(Address BaseAddr,
}
// Apply the offset.
- llvm::Value *Value = Builder.CreateBitCast(BaseAddr.getPointer(), Int8PtrTy);
+ llvm::Value *Value = BaseAddr.getPointer();
Value = Builder.CreateInBoundsGEP(
Int8Ty, Value, Builder.CreateNeg(NonVirtualOffset), "sub.ptr");
- // Just cast.
- Value = Builder.CreateBitCast(Value, DerivedPtrTy);
-
// Produce a PHI if we had a null-check.
if (NullCheckValue) {
Builder.CreateBr(CastEnd);
@@ -453,7 +447,7 @@ CodeGenFunction::GetAddressOfDerivedClass(Address BaseAddr,
Value = PHI;
}
- return Address(Value, CGM.getClassPointerAlignment(Derived));
+ return Address(Value, DerivedValueTy, CGM.getClassPointerAlignment(Derived));
}
llvm::Value *CodeGenFunction::GetVTTParameter(GlobalDecl GD,
@@ -517,7 +511,7 @@ namespace {
const CXXDestructorDecl *D = BaseClass->getDestructor();
// We are already inside a destructor, so presumably the object being
// destroyed should have the expected type.
- QualType ThisTy = D->getThisObjectType();
+ QualType ThisTy = D->getFunctionObjectParameterType();
Address Addr =
CGF.GetAddressOfDirectBaseInCompleteClass(CGF.LoadCXXThisAddress(),
DerivedClass, BaseClass,
@@ -862,6 +856,7 @@ void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) {
EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true);
incrementProfileCounter(Body);
+ maybeCreateMCDCCondBitmap();
RunCleanupsScope RunCleanups(*this);
@@ -996,16 +991,8 @@ namespace {
private:
void emitMemcpyIR(Address DestPtr, Address SrcPtr, CharUnits Size) {
- llvm::PointerType *DPT = DestPtr.getType();
- llvm::Type *DBP =
- llvm::Type::getInt8PtrTy(CGF.getLLVMContext(), DPT->getAddressSpace());
- DestPtr = CGF.Builder.CreateBitCast(DestPtr, DBP);
-
- llvm::PointerType *SPT = SrcPtr.getType();
- llvm::Type *SBP =
- llvm::Type::getInt8PtrTy(CGF.getLLVMContext(), SPT->getAddressSpace());
- SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, SBP);
-
+ DestPtr = DestPtr.withElementType(CGF.Int8Ty);
+ SrcPtr = SrcPtr.withElementType(CGF.Int8Ty);
CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, Size.getQuantity());
}
@@ -1306,10 +1293,10 @@ void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD,
assert(BaseCtorContinueBB);
}
- llvm::Value *const OldThis = CXXThisValue;
for (; B != E && (*B)->isBaseInitializer() && (*B)->isBaseVirtual(); B++) {
if (!ConstructVBases)
continue;
+ SaveAndRestore ThisRAII(CXXThisValue);
if (CGM.getCodeGenOpts().StrictVTablePointers &&
CGM.getCodeGenOpts().OptimizationLevel > 0 &&
isInitializerOfDynamicClass(*B))
@@ -1326,7 +1313,7 @@ void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD,
// Then, non-virtual base initializers.
for (; B != E && (*B)->isBaseInitializer(); B++) {
assert(!(*B)->isBaseVirtual());
-
+ SaveAndRestore ThisRAII(CXXThisValue);
if (CGM.getCodeGenOpts().StrictVTablePointers &&
CGM.getCodeGenOpts().OptimizationLevel > 0 &&
isInitializerOfDynamicClass(*B))
@@ -1334,8 +1321,6 @@ void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD,
EmitBaseInitializer(*this, ClassDecl, *B);
}
- CXXThisValue = OldThis;
-
InitializeVTablePointers(ClassDecl);
// And finally, initialize class members.
@@ -1424,6 +1409,11 @@ static bool CanSkipVTablePointerInitialization(CodeGenFunction &CGF,
if (!ClassDecl->isDynamicClass())
return true;
+ // For a final class, the vtable pointer is known to already point to the
+ // class's vtable.
+ if (ClassDecl->isEffectivelyFinal())
+ return true;
+
if (!Dtor->hasTrivialBody())
return false;
@@ -1455,8 +1445,10 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
}
Stmt *Body = Dtor->getBody();
- if (Body)
+ if (Body) {
incrementProfileCounter(Body);
+ maybeCreateMCDCCondBitmap();
+ }
// The call to operator delete in a deleting destructor happens
// outside of the function-try-block, which means it's always
@@ -1466,7 +1458,7 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
RunCleanupsScope DtorEpilogue(*this);
EnterDtorCleanups(Dtor, Dtor_Deleting);
if (HaveInsertPoint()) {
- QualType ThisTy = Dtor->getThisObjectType();
+ QualType ThisTy = Dtor->getFunctionObjectParameterType();
EmitCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false,
/*Delegating=*/false, LoadCXXThisAddress(), ThisTy);
}
@@ -1500,14 +1492,14 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
EnterDtorCleanups(Dtor, Dtor_Complete);
if (!isTryBody) {
- QualType ThisTy = Dtor->getThisObjectType();
+ QualType ThisTy = Dtor->getFunctionObjectParameterType();
EmitCXXDestructorCall(Dtor, Dtor_Base, /*ForVirtualBase=*/false,
/*Delegating=*/false, LoadCXXThisAddress(), ThisTy);
break;
}
// Fallthrough: act like we're in the base variant.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Dtor_Base:
assert(Body);
@@ -1559,6 +1551,7 @@ void CodeGenFunction::emitImplicitAssignmentOperatorBody(FunctionArgList &Args)
LexicalScope Scope(*this, RootCS->getSourceRange());
incrementProfileCounter(RootCS);
+ maybeCreateMCDCCondBitmap();
AssignmentMemcpyizer AM(*this, AssignOp, Args);
for (auto *I : RootCS->body())
AM.emitAssignment(I);
@@ -1651,112 +1644,135 @@ namespace {
}
};
- static void EmitSanitizerDtorCallback(CodeGenFunction &CGF, llvm::Value *Ptr,
- CharUnits::QuantityType PoisonSize) {
- CodeGenFunction::SanitizerScope SanScope(&CGF);
- // Pass in void pointer and size of region as arguments to runtime
- // function
- llvm::Value *Args[] = {CGF.Builder.CreateBitCast(Ptr, CGF.VoidPtrTy),
- llvm::ConstantInt::get(CGF.SizeTy, PoisonSize)};
+ class DeclAsInlineDebugLocation {
+ CGDebugInfo *DI;
+ llvm::MDNode *InlinedAt;
+ std::optional<ApplyDebugLocation> Location;
+
+ public:
+ DeclAsInlineDebugLocation(CodeGenFunction &CGF, const NamedDecl &Decl)
+ : DI(CGF.getDebugInfo()) {
+ if (!DI)
+ return;
+ InlinedAt = DI->getInlinedAt();
+ DI->setInlinedAt(CGF.Builder.getCurrentDebugLocation());
+ Location.emplace(CGF, Decl.getLocation());
+ }
- llvm::Type *ArgTypes[] = {CGF.VoidPtrTy, CGF.SizeTy};
+ ~DeclAsInlineDebugLocation() {
+ if (!DI)
+ return;
+ Location.reset();
+ DI->setInlinedAt(InlinedAt);
+ }
+ };
- llvm::FunctionType *FnType =
- llvm::FunctionType::get(CGF.VoidTy, ArgTypes, false);
- llvm::FunctionCallee Fn =
- CGF.CGM.CreateRuntimeFunction(FnType, "__sanitizer_dtor_callback");
- CGF.EmitNounwindRuntimeCall(Fn, Args);
- }
+ static void EmitSanitizerDtorCallback(
+ CodeGenFunction &CGF, StringRef Name, llvm::Value *Ptr,
+ std::optional<CharUnits::QuantityType> PoisonSize = {}) {
+ CodeGenFunction::SanitizerScope SanScope(&CGF);
+ // Pass in void pointer and size of region as arguments to runtime
+ // function
+ SmallVector<llvm::Value *, 2> Args = {Ptr};
+ SmallVector<llvm::Type *, 2> ArgTypes = {CGF.VoidPtrTy};
+
+ if (PoisonSize.has_value()) {
+ Args.emplace_back(llvm::ConstantInt::get(CGF.SizeTy, *PoisonSize));
+ ArgTypes.emplace_back(CGF.SizeTy);
+ }
- class SanitizeDtorMembers final : public EHScopeStack::Cleanup {
- const CXXDestructorDecl *Dtor;
+ llvm::FunctionType *FnType =
+ llvm::FunctionType::get(CGF.VoidTy, ArgTypes, false);
+ llvm::FunctionCallee Fn = CGF.CGM.CreateRuntimeFunction(FnType, Name);
- public:
- SanitizeDtorMembers(const CXXDestructorDecl *Dtor) : Dtor(Dtor) {}
+ CGF.EmitNounwindRuntimeCall(Fn, Args);
+ }
+
+ static void
+ EmitSanitizerDtorFieldsCallback(CodeGenFunction &CGF, llvm::Value *Ptr,
+ CharUnits::QuantityType PoisonSize) {
+ EmitSanitizerDtorCallback(CGF, "__sanitizer_dtor_callback_fields", Ptr,
+ PoisonSize);
+ }
+
+ /// Poison base class with a trivial destructor.
+ struct SanitizeDtorTrivialBase final : EHScopeStack::Cleanup {
+ const CXXRecordDecl *BaseClass;
+ bool BaseIsVirtual;
+ SanitizeDtorTrivialBase(const CXXRecordDecl *Base, bool BaseIsVirtual)
+ : BaseClass(Base), BaseIsVirtual(BaseIsVirtual) {}
- // Generate function call for handling object poisoning.
- // Disables tail call elimination, to prevent the current stack frame
- // from disappearing from the stack trace.
void Emit(CodeGenFunction &CGF, Flags flags) override {
- const ASTRecordLayout &Layout =
- CGF.getContext().getASTRecordLayout(Dtor->getParent());
+ const CXXRecordDecl *DerivedClass =
+ cast<CXXMethodDecl>(CGF.CurCodeDecl)->getParent();
- // Nothing to poison.
- if (Layout.getFieldCount() == 0)
+ Address Addr = CGF.GetAddressOfDirectBaseInCompleteClass(
+ CGF.LoadCXXThisAddress(), DerivedClass, BaseClass, BaseIsVirtual);
+
+ const ASTRecordLayout &BaseLayout =
+ CGF.getContext().getASTRecordLayout(BaseClass);
+ CharUnits BaseSize = BaseLayout.getSize();
+
+ if (!BaseSize.isPositive())
return;
+ // Use the base class declaration location as inline DebugLocation. All
+ // fields of the class are destroyed.
+ DeclAsInlineDebugLocation InlineHere(CGF, *BaseClass);
+ EmitSanitizerDtorFieldsCallback(CGF, Addr.getPointer(),
+ BaseSize.getQuantity());
+
// Prevent the current stack frame from disappearing from the stack trace.
CGF.CurFn->addFnAttr("disable-tail-calls", "true");
-
- // Construct pointer to region to begin poisoning, and calculate poison
- // size, so that only members declared in this class are poisoned.
- ASTContext &Context = CGF.getContext();
-
- const RecordDecl *Decl = Dtor->getParent();
- auto Fields = Decl->fields();
- auto IsTrivial = [&](const FieldDecl *F) {
- return FieldHasTrivialDestructorBody(Context, F);
- };
-
- auto IsZeroSize = [&](const FieldDecl *F) {
- return F->isZeroSize(Context);
- };
-
- // Poison blocks of fields with trivial destructors making sure that block
- // begin and end do not point to zero-sized fields. They don't have
- // correct offsets so can't be used to calculate poisoning range.
- for (auto It = Fields.begin(); It != Fields.end();) {
- It = std::find_if(It, Fields.end(), [&](const FieldDecl *F) {
- return IsTrivial(F) && !IsZeroSize(F);
- });
- if (It == Fields.end())
- break;
- auto Start = It++;
- It = std::find_if(It, Fields.end(), [&](const FieldDecl *F) {
- return !IsTrivial(F) && !IsZeroSize(F);
- });
-
- PoisonMembers(CGF, (*Start)->getFieldIndex(),
- It == Fields.end() ? -1 : (*It)->getFieldIndex());
- }
}
+ };
- private:
- /// \param layoutStartOffset index of the ASTRecordLayout field to
- /// start poisoning (inclusive)
- /// \param layoutEndOffset index of the ASTRecordLayout field to
- /// end poisoning (exclusive)
- void PoisonMembers(CodeGenFunction &CGF, unsigned layoutStartOffset,
- unsigned layoutEndOffset) {
- ASTContext &Context = CGF.getContext();
+ class SanitizeDtorFieldRange final : public EHScopeStack::Cleanup {
+ const CXXDestructorDecl *Dtor;
+ unsigned StartIndex;
+ unsigned EndIndex;
+
+ public:
+ SanitizeDtorFieldRange(const CXXDestructorDecl *Dtor, unsigned StartIndex,
+ unsigned EndIndex)
+ : Dtor(Dtor), StartIndex(StartIndex), EndIndex(EndIndex) {}
+
+ // Generate function call for handling object poisoning.
+ // Disables tail call elimination, to prevent the current stack frame
+ // from disappearing from the stack trace.
+ void Emit(CodeGenFunction &CGF, Flags flags) override {
+ const ASTContext &Context = CGF.getContext();
const ASTRecordLayout &Layout =
Context.getASTRecordLayout(Dtor->getParent());
- // It's a first trivia field so it should be at the begining of char,
+ // It's a first trivial field so it should be at the begining of a char,
// still round up start offset just in case.
- CharUnits PoisonStart =
- Context.toCharUnitsFromBits(Layout.getFieldOffset(layoutStartOffset) +
- Context.getCharWidth() - 1);
+ CharUnits PoisonStart = Context.toCharUnitsFromBits(
+ Layout.getFieldOffset(StartIndex) + Context.getCharWidth() - 1);
llvm::ConstantInt *OffsetSizePtr =
llvm::ConstantInt::get(CGF.SizeTy, PoisonStart.getQuantity());
- llvm::Value *OffsetPtr = CGF.Builder.CreateGEP(
- CGF.Int8Ty,
- CGF.Builder.CreateBitCast(CGF.LoadCXXThis(), CGF.Int8PtrTy),
- OffsetSizePtr);
+ llvm::Value *OffsetPtr =
+ CGF.Builder.CreateGEP(CGF.Int8Ty, CGF.LoadCXXThis(), OffsetSizePtr);
CharUnits PoisonEnd;
- if (layoutEndOffset >= Layout.getFieldCount()) {
+ if (EndIndex >= Layout.getFieldCount()) {
PoisonEnd = Layout.getNonVirtualSize();
} else {
PoisonEnd =
- Context.toCharUnitsFromBits(Layout.getFieldOffset(layoutEndOffset));
+ Context.toCharUnitsFromBits(Layout.getFieldOffset(EndIndex));
}
CharUnits PoisonSize = PoisonEnd - PoisonStart;
if (!PoisonSize.isPositive())
return;
- EmitSanitizerDtorCallback(CGF, OffsetPtr, PoisonSize.getQuantity());
+ // Use the top field declaration location as inline DebugLocation.
+ DeclAsInlineDebugLocation InlineHere(
+ CGF, **std::next(Dtor->getParent()->field_begin(), StartIndex));
+ EmitSanitizerDtorFieldsCallback(CGF, OffsetPtr, PoisonSize.getQuantity());
+
+ // Prevent the current stack frame from disappearing from the stack trace.
+ CGF.CurFn->addFnAttr("disable-tail-calls", "true");
}
};
@@ -1770,17 +1786,45 @@ namespace {
void Emit(CodeGenFunction &CGF, Flags flags) override {
assert(Dtor->getParent()->isDynamicClass());
(void)Dtor;
- ASTContext &Context = CGF.getContext();
// Poison vtable and vtable ptr if they exist for this class.
llvm::Value *VTablePtr = CGF.LoadCXXThis();
- CharUnits::QuantityType PoisonSize =
- Context.toCharUnitsFromBits(CGF.PointerWidthInBits).getQuantity();
// Pass in void pointer and size of region as arguments to runtime
// function
- EmitSanitizerDtorCallback(CGF, VTablePtr, PoisonSize);
+ EmitSanitizerDtorCallback(CGF, "__sanitizer_dtor_callback_vptr",
+ VTablePtr);
}
};
+
+ class SanitizeDtorCleanupBuilder {
+ ASTContext &Context;
+ EHScopeStack &EHStack;
+ const CXXDestructorDecl *DD;
+ std::optional<unsigned> StartIndex;
+
+ public:
+ SanitizeDtorCleanupBuilder(ASTContext &Context, EHScopeStack &EHStack,
+ const CXXDestructorDecl *DD)
+ : Context(Context), EHStack(EHStack), DD(DD), StartIndex(std::nullopt) {}
+ void PushCleanupForField(const FieldDecl *Field) {
+ if (Field->isZeroSize(Context))
+ return;
+ unsigned FieldIndex = Field->getFieldIndex();
+ if (FieldHasTrivialDestructorBody(Context, Field)) {
+ if (!StartIndex)
+ StartIndex = FieldIndex;
+ } else if (StartIndex) {
+ EHStack.pushCleanup<SanitizeDtorFieldRange>(NormalAndEHCleanup, DD,
+ *StartIndex, FieldIndex);
+ StartIndex = std::nullopt;
+ }
+ }
+ void End() {
+ if (StartIndex)
+ EHStack.pushCleanup<SanitizeDtorFieldRange>(NormalAndEHCleanup, DD,
+ *StartIndex, -1);
+ }
+ };
} // end anonymous namespace
/// Emit all code that comes at the end of class's
@@ -1843,13 +1887,19 @@ void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD,
auto *BaseClassDecl =
cast<CXXRecordDecl>(Base.getType()->castAs<RecordType>()->getDecl());
- // Ignore trivial destructors.
- if (BaseClassDecl->hasTrivialDestructor())
- continue;
-
- EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup,
- BaseClassDecl,
- /*BaseIsVirtual*/ true);
+ if (BaseClassDecl->hasTrivialDestructor()) {
+ // Under SanitizeMemoryUseAfterDtor, poison the trivial base class
+ // memory. For non-trival base classes the same is done in the class
+ // destructor.
+ if (CGM.getCodeGenOpts().SanitizeMemoryUseAfterDtor &&
+ SanOpts.has(SanitizerKind::Memory) && !BaseClassDecl->isEmpty())
+ EHStack.pushCleanup<SanitizeDtorTrivialBase>(NormalAndEHCleanup,
+ BaseClassDecl,
+ /*BaseIsVirtual*/ true);
+ } else {
+ EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup, BaseClassDecl,
+ /*BaseIsVirtual*/ true);
+ }
}
return;
@@ -1871,36 +1921,46 @@ void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD,
CXXRecordDecl *BaseClassDecl = Base.getType()->getAsCXXRecordDecl();
- // Ignore trivial destructors.
- if (BaseClassDecl->hasTrivialDestructor())
- continue;
-
- EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup,
- BaseClassDecl,
- /*BaseIsVirtual*/ false);
+ if (BaseClassDecl->hasTrivialDestructor()) {
+ if (CGM.getCodeGenOpts().SanitizeMemoryUseAfterDtor &&
+ SanOpts.has(SanitizerKind::Memory) && !BaseClassDecl->isEmpty())
+ EHStack.pushCleanup<SanitizeDtorTrivialBase>(NormalAndEHCleanup,
+ BaseClassDecl,
+ /*BaseIsVirtual*/ false);
+ } else {
+ EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup, BaseClassDecl,
+ /*BaseIsVirtual*/ false);
+ }
}
// Poison fields such that access after their destructors are
// invoked, and before the base class destructor runs, is invalid.
- if (CGM.getCodeGenOpts().SanitizeMemoryUseAfterDtor &&
- SanOpts.has(SanitizerKind::Memory))
- EHStack.pushCleanup<SanitizeDtorMembers>(NormalAndEHCleanup, DD);
+ bool SanitizeFields = CGM.getCodeGenOpts().SanitizeMemoryUseAfterDtor &&
+ SanOpts.has(SanitizerKind::Memory);
+ SanitizeDtorCleanupBuilder SanitizeBuilder(getContext(), EHStack, DD);
// Destroy direct fields.
for (const auto *Field : ClassDecl->fields()) {
+ if (SanitizeFields)
+ SanitizeBuilder.PushCleanupForField(Field);
+
QualType type = Field->getType();
QualType::DestructionKind dtorKind = type.isDestructedType();
- if (!dtorKind) continue;
+ if (!dtorKind)
+ continue;
// Anonymous union members do not have their destructors called.
const RecordType *RT = type->getAsUnionType();
- if (RT && RT->getDecl()->isAnonymousStructOrUnion()) continue;
+ if (RT && RT->getDecl()->isAnonymousStructOrUnion())
+ continue;
CleanupKind cleanupKind = getCleanupKind(dtorKind);
- EHStack.pushCleanup<DestroyField>(cleanupKind, Field,
- getDestroyer(dtorKind),
- cleanupKind & EHCleanup);
+ EHStack.pushCleanup<DestroyField>(
+ cleanupKind, Field, getDestroyer(dtorKind), cleanupKind & EHCleanup);
}
+
+ if (SanitizeFields)
+ SanitizeBuilder.End();
}
/// EmitCXXAggrConstructorCall - Emit a loop to call a particular
@@ -1986,7 +2046,7 @@ void CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
CharUnits eltAlignment =
arrayBase.getAlignment()
.alignmentOfArrayElement(getContext().getTypeSizeInChars(type));
- Address curAddr = Address(cur, eltAlignment);
+ Address curAddr = Address(cur, elementType, eltAlignment);
// Zero initialize the storage, if requested.
if (zeroInitialize)
@@ -2057,14 +2117,13 @@ void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
CallArgList Args;
Address This = ThisAVS.getAddress();
LangAS SlotAS = ThisAVS.getQualifiers().getAddressSpace();
- QualType ThisType = D->getThisType();
- LangAS ThisAS = ThisType.getTypePtr()->getPointeeType().getAddressSpace();
+ LangAS ThisAS = D->getFunctionObjectParameterType().getAddressSpace();
llvm::Value *ThisPtr = This.getPointer();
if (SlotAS != ThisAS) {
unsigned TargetThisAS = getContext().getTargetAddressSpace(ThisAS);
llvm::Type *NewType =
- ThisPtr->getType()->getPointerElementType()->getPointerTo(TargetThisAS);
+ llvm::PointerType::get(getLLVMContext(), TargetThisAS);
ThisPtr = getTargetHooks().performAddrSpaceCast(*this, This.getPointer(),
ThisAS, SlotAS, NewType);
}
@@ -2150,8 +2209,8 @@ void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
assert(Args.size() == 2 && "unexpected argcount for trivial ctor");
QualType SrcTy = D->getParamDecl(0)->getType().getNonReferenceType();
- Address Src(Args[1].getRValue(*this).getScalarVal(),
- CGM.getNaturalTypeAlignment(SrcTy));
+ Address Src = Address(Args[1].getRValue(*this).getScalarVal(), ConvertTypeForMem(SrcTy),
+ CGM.getNaturalTypeAlignment(SrcTy));
LValue SrcLVal = MakeAddrLValue(Src, SrcTy);
QualType DestTy = getContext().getTypeDeclType(ClassDecl);
LValue DestLVal = MakeAddrLValue(This, DestTy);
@@ -2334,8 +2393,8 @@ CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
// Push the src ptr.
QualType QT = *(FPT->param_type_begin());
llvm::Type *t = CGM.getTypes().ConvertType(QT);
- Src = Builder.CreateBitCast(Src, t);
- Args.add(RValue::get(Src.getPointer()), QT);
+ llvm::Value *SrcVal = Builder.CreateBitCast(Src.getPointer(), t);
+ Args.add(RValue::get(SrcVal), QT);
// Skip over first argument (Src).
EmitCallArgs(Args, FPT, drop_begin(E->arguments(), 1), E->getConstructor(),
@@ -2397,7 +2456,7 @@ namespace {
void Emit(CodeGenFunction &CGF, Flags flags) override {
// We are calling the destructor from within the constructor.
// Therefore, "this" should have the expected type.
- QualType ThisTy = Dtor->getThisObjectType();
+ QualType ThisTy = Dtor->getFunctionObjectParameterType();
CGF.EmitCXXDestructorCall(Dtor, Type, /*ForVirtualBase=*/false,
/*Delegating=*/true, Addr, ThisTy);
}
@@ -2502,7 +2561,6 @@ void CodeGenFunction::InitializeVTablePointer(const VPtr &Vptr) {
// Apply the offsets.
Address VTableField = LoadCXXThisAddress();
-
if (!NonVirtualOffset.isZero() || VirtualOffset)
VTableField = ApplyNonVirtualAndVirtualOffset(
*this, VTableField, NonVirtualOffset, VirtualOffset, Vptr.VTableClass,
@@ -2511,20 +2569,13 @@ void CodeGenFunction::InitializeVTablePointer(const VPtr &Vptr) {
// Finally, store the address point. Use the same LLVM types as the field to
// support optimization.
unsigned GlobalsAS = CGM.getDataLayout().getDefaultGlobalsAddressSpace();
- unsigned ProgAS = CGM.getDataLayout().getProgramAddressSpace();
- llvm::Type *VTablePtrTy =
- llvm::FunctionType::get(CGM.Int32Ty, /*isVarArg=*/true)
- ->getPointerTo(ProgAS)
- ->getPointerTo(GlobalsAS);
- // vtable field is is derived from `this` pointer, therefore it should be in
- // default address space.
- VTableField = Builder.CreatePointerBitCastOrAddrSpaceCast(
- VTableField, VTablePtrTy->getPointerTo());
- VTableAddressPoint = Builder.CreatePointerBitCastOrAddrSpaceCast(
- VTableAddressPoint, VTablePtrTy);
+ llvm::Type *PtrTy = llvm::PointerType::get(CGM.getLLVMContext(), GlobalsAS);
+ // vtable field is derived from `this` pointer, therefore they should be in
+ // the same addr space. Note that this might not be LLVM address space 0.
+ VTableField = VTableField.withElementType(PtrTy);
llvm::StoreInst *Store = Builder.CreateStore(VTableAddressPoint, VTableField);
- TBAAAccessInfo TBAAInfo = CGM.getTBAAVTablePtrAccessInfo(VTablePtrTy);
+ TBAAAccessInfo TBAAInfo = CGM.getTBAAVTablePtrAccessInfo(PtrTy);
CGM.DecorateInstructionWithTBAA(Store, TBAAInfo);
if (CGM.getCodeGenOpts().OptimizationLevel > 0 &&
CGM.getCodeGenOpts().StrictVTablePointers)
@@ -2617,7 +2668,7 @@ void CodeGenFunction::InitializeVTablePointers(const CXXRecordDecl *RD) {
llvm::Value *CodeGenFunction::GetVTablePtr(Address This,
llvm::Type *VTableTy,
const CXXRecordDecl *RD) {
- Address VTablePtrSrc = Builder.CreateElementBitCast(This, VTableTy);
+ Address VTablePtrSrc = This.withElementType(VTableTy);
llvm::Instruction *VTable = Builder.CreateLoad(VTablePtrSrc, "vtable");
TBAAAccessInfo TBAAInfo = CGM.getTBAAVTablePtrAccessInfo(VTableTy);
CGM.DecorateInstructionWithTBAA(VTable, TBAAInfo);
@@ -2670,18 +2721,23 @@ void CodeGenFunction::EmitTypeMetadataCodeForVCall(const CXXRecordDecl *RD,
if (SanOpts.has(SanitizerKind::CFIVCall))
EmitVTablePtrCheckForCall(RD, VTable, CodeGenFunction::CFITCK_VCall, Loc);
else if (CGM.getCodeGenOpts().WholeProgramVTables &&
- // Don't insert type test assumes if we are forcing public std
+ // Don't insert type test assumes if we are forcing public
// visibility.
- !CGM.HasLTOVisibilityPublicStd(RD)) {
- llvm::Metadata *MD =
- CGM.CreateMetadataIdentifierForType(QualType(RD->getTypeForDecl(), 0));
+ !CGM.AlwaysHasLTOVisibilityPublic(RD)) {
+ QualType Ty = QualType(RD->getTypeForDecl(), 0);
+ llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(Ty);
llvm::Value *TypeId =
llvm::MetadataAsValue::get(CGM.getLLVMContext(), MD);
- llvm::Value *CastedVTable = Builder.CreateBitCast(VTable, Int8PtrTy);
+ // If we already know that the call has hidden LTO visibility, emit
+ // @llvm.type.test(). Otherwise emit @llvm.public.type.test(), which WPD
+ // will convert to @llvm.type.test() if we assert at link time that we have
+ // whole program visibility.
+ llvm::Intrinsic::ID IID = CGM.HasHiddenLTOVisibility(RD)
+ ? llvm::Intrinsic::type_test
+ : llvm::Intrinsic::public_type_test;
llvm::Value *TypeTest =
- Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),
- {CastedVTable, TypeId});
+ Builder.CreateCall(CGM.getIntrinsic(IID), {VTable, TypeId});
Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::assume), TypeTest);
}
}
@@ -2696,8 +2752,7 @@ void CodeGenFunction::EmitVTablePtrCheckForCall(const CXXRecordDecl *RD,
EmitVTablePtrCheck(RD, VTable, TCK, Loc);
}
-void CodeGenFunction::EmitVTablePtrCheckForCast(QualType T,
- llvm::Value *Derived,
+void CodeGenFunction::EmitVTablePtrCheckForCast(QualType T, Address Derived,
bool MayBeNull,
CFITypeCheckKind TCK,
SourceLocation Loc) {
@@ -2720,7 +2775,7 @@ void CodeGenFunction::EmitVTablePtrCheckForCast(QualType T,
if (MayBeNull) {
llvm::Value *DerivedNotNull =
- Builder.CreateIsNotNull(Derived, "cast.nonnull");
+ Builder.CreateIsNotNull(Derived.getPointer(), "cast.nonnull");
llvm::BasicBlock *CheckBlock = createBasicBlock("cast.check");
ContBlock = createBasicBlock("cast.cont");
@@ -2731,8 +2786,8 @@ void CodeGenFunction::EmitVTablePtrCheckForCast(QualType T,
}
llvm::Value *VTable;
- std::tie(VTable, ClassDecl) = CGM.getCXXABI().LoadVTablePtr(
- *this, Address(Derived, getPointerAlign()), ClassDecl);
+ std::tie(VTable, ClassDecl) =
+ CGM.getCXXABI().LoadVTablePtr(*this, Derived, ClassDecl);
EmitVTablePtrCheck(ClassDecl, VTable, TCK, Loc);
@@ -2786,9 +2841,8 @@ void CodeGenFunction::EmitVTablePtrCheck(const CXXRecordDecl *RD,
CGM.CreateMetadataIdentifierForType(QualType(RD->getTypeForDecl(), 0));
llvm::Value *TypeId = llvm::MetadataAsValue::get(getLLVMContext(), MD);
- llvm::Value *CastedVTable = Builder.CreateBitCast(VTable, Int8PtrTy);
llvm::Value *TypeTest = Builder.CreateCall(
- CGM.getIntrinsic(llvm::Intrinsic::type_test), {CastedVTable, TypeId});
+ CGM.getIntrinsic(llvm::Intrinsic::type_test), {VTable, TypeId});
llvm::Constant *StaticData[] = {
llvm::ConstantInt::get(Int8Ty, TCK),
@@ -2798,7 +2852,7 @@ void CodeGenFunction::EmitVTablePtrCheck(const CXXRecordDecl *RD,
auto CrossDsoTypeId = CGM.CreateCrossDsoCfiTypeId(MD);
if (CGM.getCodeGenOpts().SanitizeCfiCrossDso && CrossDsoTypeId) {
- EmitCfiSlowPathCheck(M, TypeTest, CrossDsoTypeId, CastedVTable, StaticData);
+ EmitCfiSlowPathCheck(M, TypeTest, CrossDsoTypeId, VTable, StaticData);
return;
}
@@ -2811,9 +2865,9 @@ void CodeGenFunction::EmitVTablePtrCheck(const CXXRecordDecl *RD,
CGM.getLLVMContext(),
llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));
llvm::Value *ValidVtable = Builder.CreateCall(
- CGM.getIntrinsic(llvm::Intrinsic::type_test), {CastedVTable, AllVtables});
+ CGM.getIntrinsic(llvm::Intrinsic::type_test), {VTable, AllVtables});
EmitCheck(std::make_pair(TypeTest, M), SanitizerHandler::CFICheckFail,
- StaticData, {CastedVTable, ValidVtable});
+ StaticData, {VTable, ValidVtable});
}
bool CodeGenFunction::ShouldEmitVTableTypeCheckedLoad(const CXXRecordDecl *RD) {
@@ -2834,7 +2888,8 @@ bool CodeGenFunction::ShouldEmitVTableTypeCheckedLoad(const CXXRecordDecl *RD) {
}
llvm::Value *CodeGenFunction::EmitVTableTypeCheckedLoad(
- const CXXRecordDecl *RD, llvm::Value *VTable, uint64_t VTableByteOffset) {
+ const CXXRecordDecl *RD, llvm::Value *VTable, llvm::Type *VTableTy,
+ uint64_t VTableByteOffset) {
SanitizerScope SanScope(this);
EmitSanitizerStatReport(llvm::SanStat_CFI_VCall);
@@ -2843,11 +2898,9 @@ llvm::Value *CodeGenFunction::EmitVTableTypeCheckedLoad(
CGM.CreateMetadataIdentifierForType(QualType(RD->getTypeForDecl(), 0));
llvm::Value *TypeId = llvm::MetadataAsValue::get(CGM.getLLVMContext(), MD);
- llvm::Value *CastedVTable = Builder.CreateBitCast(VTable, Int8PtrTy);
llvm::Value *CheckedLoad = Builder.CreateCall(
CGM.getIntrinsic(llvm::Intrinsic::type_checked_load),
- {CastedVTable, llvm::ConstantInt::get(Int32Ty, VTableByteOffset),
- TypeId});
+ {VTable, llvm::ConstantInt::get(Int32Ty, VTableByteOffset), TypeId});
llvm::Value *CheckResult = Builder.CreateExtractValue(CheckedLoad, 1);
std::string TypeName = RD->getQualifiedNameAsString();
@@ -2858,20 +2911,21 @@ llvm::Value *CodeGenFunction::EmitVTableTypeCheckedLoad(
SanitizerHandler::CFICheckFail, {}, {});
}
- return Builder.CreateBitCast(
- Builder.CreateExtractValue(CheckedLoad, 0),
- cast<llvm::PointerType>(VTable->getType())->getElementType());
+ return Builder.CreateBitCast(Builder.CreateExtractValue(CheckedLoad, 0),
+ VTableTy);
}
void CodeGenFunction::EmitForwardingCallToLambda(
- const CXXMethodDecl *callOperator,
- CallArgList &callArgs) {
+ const CXXMethodDecl *callOperator, CallArgList &callArgs,
+ const CGFunctionInfo *calleeFnInfo, llvm::Constant *calleePtr) {
// Get the address of the call operator.
- const CGFunctionInfo &calleeFnInfo =
- CGM.getTypes().arrangeCXXMethodDeclaration(callOperator);
- llvm::Constant *calleePtr =
- CGM.GetAddrOfFunction(GlobalDecl(callOperator),
- CGM.getTypes().GetFunctionType(calleeFnInfo));
+ if (!calleeFnInfo)
+ calleeFnInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(callOperator);
+
+ if (!calleePtr)
+ calleePtr =
+ CGM.GetAddrOfFunction(GlobalDecl(callOperator),
+ CGM.getTypes().GetFunctionType(*calleeFnInfo));
// Prepare the return slot.
const FunctionProtoType *FPT =
@@ -2879,8 +2933,8 @@ void CodeGenFunction::EmitForwardingCallToLambda(
QualType resultType = FPT->getReturnType();
ReturnValueSlot returnSlot;
if (!resultType->isVoidType() &&
- calleeFnInfo.getReturnInfo().getKind() == ABIArgInfo::Indirect &&
- !hasScalarEvaluationKind(calleeFnInfo.getReturnType()))
+ calleeFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect &&
+ !hasScalarEvaluationKind(calleeFnInfo->getReturnType()))
returnSlot =
ReturnValueSlot(ReturnValue, resultType.isVolatileQualified(),
/*IsUnused=*/false, /*IsExternallyDestructed=*/true);
@@ -2891,7 +2945,7 @@ void CodeGenFunction::EmitForwardingCallToLambda(
// Now emit our call.
auto callee = CGCallee::forDirect(calleePtr, GlobalDecl(callOperator));
- RValue RV = EmitCall(calleeFnInfo, callee, returnSlot, callArgs);
+ RValue RV = EmitCall(*calleeFnInfo, callee, returnSlot, callArgs);
// If necessary, copy the returned value into the slot.
if (!resultType->isVoidType() && returnSlot.isNull()) {
@@ -2925,7 +2979,7 @@ void CodeGenFunction::EmitLambdaBlockInvokeBody() {
CallArgs.add(RValue::get(ThisPtr.getPointer()), ThisType);
// Add the rest of the parameters.
- for (auto param : BD->parameters())
+ for (auto *param : BD->parameters())
EmitDelegateCallArg(CallArgs, param, param->getBeginLoc());
assert(!Lambda->isGenericLambda() &&
@@ -2933,20 +2987,35 @@ void CodeGenFunction::EmitLambdaBlockInvokeBody() {
EmitForwardingCallToLambda(CallOp, CallArgs);
}
-void CodeGenFunction::EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD) {
+void CodeGenFunction::EmitLambdaStaticInvokeBody(const CXXMethodDecl *MD) {
+ if (MD->isVariadic()) {
+ // FIXME: Making this work correctly is nasty because it requires either
+ // cloning the body of the call operator or making the call operator
+ // forward.
+ CGM.ErrorUnsupported(MD, "lambda conversion to variadic function");
+ return;
+ }
+
const CXXRecordDecl *Lambda = MD->getParent();
// Start building arguments for forwarding call
CallArgList CallArgs;
- QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda));
- llvm::Value *ThisPtr = llvm::UndefValue::get(getTypes().ConvertType(ThisType));
- CallArgs.add(RValue::get(ThisPtr), ThisType);
+ QualType LambdaType = getContext().getRecordType(Lambda);
+ QualType ThisType = getContext().getPointerType(LambdaType);
+ Address ThisPtr = CreateMemTemp(LambdaType, "unused.capture");
+ CallArgs.add(RValue::get(ThisPtr.getPointer()), ThisType);
- // Add the rest of the parameters.
- for (auto Param : MD->parameters())
+ EmitLambdaDelegatingInvokeBody(MD, CallArgs);
+}
+
+void CodeGenFunction::EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD,
+ CallArgList &CallArgs) {
+ // Add the rest of the forwarded parameters.
+ for (auto *Param : MD->parameters())
EmitDelegateCallArg(CallArgs, Param, Param->getBeginLoc());
+ const CXXRecordDecl *Lambda = MD->getParent();
const CXXMethodDecl *CallOp = Lambda->getLambdaCallOperator();
// For a generic lambda, find the corresponding call operator specialization
// to which the call to the static-invoker shall be forwarded.
@@ -2960,10 +3029,21 @@ void CodeGenFunction::EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD) {
assert(CorrespondingCallOpSpecialization);
CallOp = cast<CXXMethodDecl>(CorrespondingCallOpSpecialization);
}
+
+ // Special lambda forwarding when there are inalloca parameters.
+ if (hasInAllocaArg(MD)) {
+ const CGFunctionInfo *ImplFnInfo = nullptr;
+ llvm::Function *ImplFn = nullptr;
+ EmitLambdaInAllocaImplFn(CallOp, &ImplFnInfo, &ImplFn);
+
+ EmitForwardingCallToLambda(CallOp, CallArgs, ImplFnInfo, ImplFn);
+ return;
+ }
+
EmitForwardingCallToLambda(CallOp, CallArgs);
}
-void CodeGenFunction::EmitLambdaStaticInvokeBody(const CXXMethodDecl *MD) {
+void CodeGenFunction::EmitLambdaInAllocaCallOpBody(const CXXMethodDecl *MD) {
if (MD->isVariadic()) {
// FIXME: Making this work correctly is nasty because it requires either
// cloning the body of the call operator or making the call operator forward.
@@ -2971,5 +3051,56 @@ void CodeGenFunction::EmitLambdaStaticInvokeBody(const CXXMethodDecl *MD) {
return;
}
- EmitLambdaDelegatingInvokeBody(MD);
+ // Forward %this argument.
+ CallArgList CallArgs;
+ QualType LambdaType = getContext().getRecordType(MD->getParent());
+ QualType ThisType = getContext().getPointerType(LambdaType);
+ llvm::Value *ThisArg = CurFn->getArg(0);
+ CallArgs.add(RValue::get(ThisArg), ThisType);
+
+ EmitLambdaDelegatingInvokeBody(MD, CallArgs);
+}
+
+void CodeGenFunction::EmitLambdaInAllocaImplFn(
+ const CXXMethodDecl *CallOp, const CGFunctionInfo **ImplFnInfo,
+ llvm::Function **ImplFn) {
+ const CGFunctionInfo &FnInfo =
+ CGM.getTypes().arrangeCXXMethodDeclaration(CallOp);
+ llvm::Function *CallOpFn =
+ cast<llvm::Function>(CGM.GetAddrOfFunction(GlobalDecl(CallOp)));
+
+ // Emit function containing the original call op body. __invoke will delegate
+ // to this function.
+ SmallVector<CanQualType, 4> ArgTypes;
+ for (auto I = FnInfo.arg_begin(); I != FnInfo.arg_end(); ++I)
+ ArgTypes.push_back(I->type);
+ *ImplFnInfo = &CGM.getTypes().arrangeLLVMFunctionInfo(
+ FnInfo.getReturnType(), FnInfoOpts::IsDelegateCall, ArgTypes,
+ FnInfo.getExtInfo(), {}, FnInfo.getRequiredArgs());
+
+ // Create mangled name as if this was a method named __impl. If for some
+ // reason the name doesn't look as expected then just tack __impl to the
+ // front.
+ // TODO: Use the name mangler to produce the right name instead of using
+ // string replacement.
+ StringRef CallOpName = CallOpFn->getName();
+ std::string ImplName;
+ if (size_t Pos = CallOpName.find_first_of("<lambda"))
+ ImplName = ("?__impl@" + CallOpName.drop_front(Pos)).str();
+ else
+ ImplName = ("__impl" + CallOpName).str();
+
+ llvm::Function *Fn = CallOpFn->getParent()->getFunction(ImplName);
+ if (!Fn) {
+ Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(**ImplFnInfo),
+ llvm::GlobalValue::InternalLinkage, ImplName,
+ CGM.getModule());
+ CGM.SetInternalFunctionAttributes(CallOp, Fn, **ImplFnInfo);
+
+ const GlobalDecl &GD = GlobalDecl(CallOp);
+ const auto *D = cast<FunctionDecl>(GD.getDecl());
+ CodeGenFunction(CGM).GenerateCode(GD, Fn, **ImplFnInfo);
+ CGM.SetLLVMFunctionAttributesForDefinition(D, Fn);
+ }
+ *ImplFn = Fn;
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCleanup.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGCleanup.cpp
index b9364fcd2231..f87caf050eea 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCleanup.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCleanup.cpp
@@ -38,13 +38,13 @@ DominatingValue<RValue>::saved_type::save(CodeGenFunction &CGF, RValue rv) {
// These automatically dominate and don't need to be saved.
if (!DominatingLLVMValue::needsSaving(V))
- return saved_type(V, ScalarLiteral);
+ return saved_type(V, nullptr, ScalarLiteral);
// Everything else needs an alloca.
Address addr =
CGF.CreateDefaultAlignTempAlloca(V->getType(), "saved-rvalue");
CGF.Builder.CreateStore(V, addr);
- return saved_type(addr.getPointer(), ScalarAddress);
+ return saved_type(addr.getPointer(), nullptr, ScalarAddress);
}
if (rv.isComplex()) {
@@ -54,19 +54,19 @@ DominatingValue<RValue>::saved_type::save(CodeGenFunction &CGF, RValue rv) {
Address addr = CGF.CreateDefaultAlignTempAlloca(ComplexTy, "saved-complex");
CGF.Builder.CreateStore(V.first, CGF.Builder.CreateStructGEP(addr, 0));
CGF.Builder.CreateStore(V.second, CGF.Builder.CreateStructGEP(addr, 1));
- return saved_type(addr.getPointer(), ComplexAddress);
+ return saved_type(addr.getPointer(), nullptr, ComplexAddress);
}
assert(rv.isAggregate());
Address V = rv.getAggregateAddress(); // TODO: volatile?
if (!DominatingLLVMValue::needsSaving(V.getPointer()))
- return saved_type(V.getPointer(), AggregateLiteral,
+ return saved_type(V.getPointer(), V.getElementType(), AggregateLiteral,
V.getAlignment().getQuantity());
Address addr =
CGF.CreateTempAlloca(V.getType(), CGF.getPointerAlign(), "saved-rvalue");
CGF.Builder.CreateStore(V.getPointer(), addr);
- return saved_type(addr.getPointer(), AggregateAddress,
+ return saved_type(addr.getPointer(), V.getElementType(), AggregateAddress,
V.getAlignment().getQuantity());
}
@@ -75,8 +75,9 @@ DominatingValue<RValue>::saved_type::save(CodeGenFunction &CGF, RValue rv) {
/// point.
RValue DominatingValue<RValue>::saved_type::restore(CodeGenFunction &CGF) {
auto getSavingAddress = [&](llvm::Value *value) {
- auto alignment = cast<llvm::AllocaInst>(value)->getAlignment();
- return Address(value, CharUnits::fromQuantity(alignment));
+ auto *AI = cast<llvm::AllocaInst>(value);
+ return Address(value, AI->getAllocatedType(),
+ CharUnits::fromQuantity(AI->getAlign().value()));
};
switch (K) {
case ScalarLiteral:
@@ -84,10 +85,12 @@ RValue DominatingValue<RValue>::saved_type::restore(CodeGenFunction &CGF) {
case ScalarAddress:
return RValue::get(CGF.Builder.CreateLoad(getSavingAddress(Value)));
case AggregateLiteral:
- return RValue::getAggregate(Address(Value, CharUnits::fromQuantity(Align)));
+ return RValue::getAggregate(
+ Address(Value, ElementType, CharUnits::fromQuantity(Align)));
case AggregateAddress: {
auto addr = CGF.Builder.CreateLoad(getSavingAddress(Value));
- return RValue::getAggregate(Address(addr, CharUnits::fromQuantity(Align)));
+ return RValue::getAggregate(
+ Address(addr, ElementType, CharUnits::fromQuantity(Align)));
}
case ComplexAddress: {
Address address = getSavingAddress(Value);
@@ -180,6 +183,15 @@ void *EHScopeStack::pushCleanup(CleanupKind Kind, size_t Size) {
bool IsNormalCleanup = Kind & NormalCleanup;
bool IsEHCleanup = Kind & EHCleanup;
bool IsLifetimeMarker = Kind & LifetimeMarker;
+
+ // Per C++ [except.terminate], it is implementation-defined whether none,
+ // some, or all cleanups are called before std::terminate. Thus, when
+ // terminate is the current EH scope, we may skip adding any EH cleanup
+ // scopes.
+ if (InnermostEHScope != stable_end() &&
+ find(InnermostEHScope)->getKind() == EHScope::Terminate)
+ IsEHCleanup = false;
+
EHCleanupScope *Scope =
new (Buffer) EHCleanupScope(IsNormalCleanup,
IsEHCleanup,
@@ -195,8 +207,13 @@ void *EHScopeStack::pushCleanup(CleanupKind Kind, size_t Size) {
Scope->setLifetimeMarker();
// With Windows -EHa, Invoke llvm.seh.scope.begin() for EHCleanup
+ // If exceptions are disabled/ignored and SEH is not in use, then there is no
+ // invoke destination. SEH "works" even if exceptions are off. In practice,
+ // this means that C++ destructors and other EH cleanups don't run, which is
+ // consistent with MSVC's behavior, except in the presence of -EHa.
+ // Check getInvokeDest() to generate llvm.seh.scope.begin() as needed.
if (CGF->getLangOpts().EHAsynch && IsEHCleanup && !IsLifetimeMarker &&
- CGF->getTarget().getCXXABI().isMicrosoft())
+ CGF->getTarget().getCXXABI().isMicrosoft() && CGF->getInvokeDest())
CGF->EmitSehCppScopeBegin();
return Scope->getCleanupBuffer();
@@ -544,7 +561,7 @@ static llvm::BasicBlock *SimplifyCleanupEntry(CodeGenFunction &CGF,
Entry->replaceAllUsesWith(Pred);
// Merge the blocks.
- Pred->getInstList().splice(Pred->end(), Entry->getInstList());
+ Pred->splice(Pred->end(), Entry);
// Kill the entry block.
Entry->eraseFromParent();
@@ -770,7 +787,7 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
if (!RequiresNormalCleanup) {
// Mark CPP scope end for passed-by-value Arg temp
// per Windows ABI which is "normally" Cleanup in callee
- if (IsEHa && getInvokeDest()) {
+ if (IsEHa && getInvokeDest() && Builder.GetInsertBlock()) {
if (Personality.isMSVCXXPersonality())
EmitSehCppScopeEnd();
}
@@ -824,7 +841,7 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
EmitBlock(NormalEntry);
// intercept normal cleanup to mark SEH scope end
- if (IsEHa) {
+ if (IsEHa && getInvokeDest()) {
if (Personality.isMSVCXXPersonality())
EmitSehCppScopeEnd();
else
@@ -856,8 +873,13 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
// If there's exactly one branch-after and no other threads,
// we can route it without a switch.
+ // Skip for SEH, since ExitSwitch is used to generate code to indicate
+ // abnormal termination. (SEH: Except _leave and fall-through at
+ // the end, all other exits in a _try (return/goto/continue/break)
+ // are considered as abnormal terminations, using NormalCleanupDestSlot
+ // to indicate abnormal termination)
if (!Scope.hasBranchThroughs() && !HasFixups && !HasFallthrough &&
- Scope.getNumBranchAfters() == 1) {
+ !currentFunctionUsesSEHTry() && Scope.getNumBranchAfters() == 1) {
assert(!BranchThroughDest || !IsActive);
// Clean up the possibly dead store to the cleanup dest slot.
@@ -930,7 +952,7 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
// Append the prepared cleanup prologue from above.
llvm::BasicBlock *NormalExit = Builder.GetInsertBlock();
for (unsigned I = 0, E = InstsToAppend.size(); I != E; ++I)
- NormalExit->getInstList().push_back(InstsToAppend[I]);
+ InstsToAppend[I]->insertInto(NormalExit, NormalExit->end());
// Optimistically hope that any fixups will continue falling through.
for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups();
@@ -1004,8 +1026,7 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
// throwing cleanups. For funclet EH personalities, the cleanupendpad models
// program termination when cleanups throw.
bool PushedTerminate = false;
- SaveAndRestore<llvm::Instruction *> RestoreCurrentFuncletPad(
- CurrentFuncletPad);
+ SaveAndRestore RestoreCurrentFuncletPad(CurrentFuncletPad);
llvm::CleanupPadInst *CPI = nullptr;
const EHPersonality &Personality = EHPersonality::get(*this);
@@ -1020,6 +1041,8 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
if (!Personality.isMSVCPersonality()) {
EHStack.pushTerminate();
PushedTerminate = true;
+ } else if (IsEHa && getInvokeDest()) {
+ EmitSehCppScopeEnd();
}
// We only actually emit the cleanup code if the cleanup is either
@@ -1324,7 +1347,8 @@ static void EmitSehScope(CodeGenFunction &CGF,
CGF.getBundlesForFunclet(SehCppScope.getCallee());
if (CGF.CurrentFuncletPad)
BundleList.emplace_back("funclet", CGF.CurrentFuncletPad);
- CGF.Builder.CreateInvoke(SehCppScope, Cont, InvokeDest, None, BundleList);
+ CGF.Builder.CreateInvoke(SehCppScope, Cont, InvokeDest, std::nullopt,
+ BundleList);
CGF.EmitBlock(Cont);
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCleanup.h b/contrib/llvm-project/clang/lib/CodeGen/CGCleanup.h
index 1b54c0018d27..fcfbf41b0eaf 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCleanup.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCleanup.h
@@ -23,7 +23,6 @@ namespace llvm {
class BasicBlock;
class Value;
class ConstantInt;
-class AllocaInst;
}
namespace clang {
@@ -242,7 +241,7 @@ class alignas(8) EHCleanupScope : public EHScope {
/// An optional i1 variable indicating whether this cleanup has been
/// activated yet.
- llvm::AllocaInst *ActiveFlag;
+ Address ActiveFlag;
/// Extra information required for cleanups that have resolved
/// branches through them. This has to be allocated on the side
@@ -290,7 +289,8 @@ public:
EHScopeStack::stable_iterator enclosingEH)
: EHScope(EHScope::Cleanup, enclosingEH),
EnclosingNormal(enclosingNormal), NormalBlock(nullptr),
- ActiveFlag(nullptr), ExtInfo(nullptr), FixupDepth(fixupDepth) {
+ ActiveFlag(Address::invalid()), ExtInfo(nullptr),
+ FixupDepth(fixupDepth) {
CleanupBits.IsNormalCleanup = isNormal;
CleanupBits.IsEHCleanup = isEH;
CleanupBits.IsActive = true;
@@ -320,13 +320,13 @@ public:
bool isLifetimeMarker() const { return CleanupBits.IsLifetimeMarker; }
void setLifetimeMarker() { CleanupBits.IsLifetimeMarker = true; }
- bool hasActiveFlag() const { return ActiveFlag != nullptr; }
+ bool hasActiveFlag() const { return ActiveFlag.isValid(); }
Address getActiveFlag() const {
- return Address(ActiveFlag, CharUnits::One());
+ return ActiveFlag;
}
void setActiveFlag(Address Var) {
assert(Var.getAlignment().isOne());
- ActiveFlag = cast<llvm::AllocaInst>(Var.getPointer());
+ ActiveFlag = Var;
}
void setTestFlagInNormalCleanup() {
@@ -613,6 +613,7 @@ struct EHPersonality {
static const EHPersonality MSVC_CxxFrameHandler3;
static const EHPersonality GNU_Wasm_CPlusPlus;
static const EHPersonality XL_CPlusPlus;
+ static const EHPersonality ZOS_CPlusPlus;
/// Does this personality use landingpads or the family of pad instructions
/// designed to form funclets?
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCoroutine.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGCoroutine.cpp
index ca071d3d2e80..888d30bfb3e1 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCoroutine.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCoroutine.cpp
@@ -129,14 +129,48 @@ static SmallString<32> buildSuspendPrefixStr(CGCoroData &Coro, AwaitKind Kind) {
return Prefix;
}
-static bool memberCallExpressionCanThrow(const Expr *E) {
- if (const auto *CE = dyn_cast<CXXMemberCallExpr>(E))
- if (const auto *Proto =
- CE->getMethodDecl()->getType()->getAs<FunctionProtoType>())
- if (isNoexceptExceptionSpec(Proto->getExceptionSpecType()) &&
- Proto->canThrow() == CT_Cannot)
- return false;
- return true;
+// Check if function can throw based on prototype noexcept, also works for
+// destructors which are implicitly noexcept but can be marked noexcept(false).
+static bool FunctionCanThrow(const FunctionDecl *D) {
+ const auto *Proto = D->getType()->getAs<FunctionProtoType>();
+ if (!Proto) {
+ // Function proto is not found, we conservatively assume throwing.
+ return true;
+ }
+ return !isNoexceptExceptionSpec(Proto->getExceptionSpecType()) ||
+ Proto->canThrow() != CT_Cannot;
+}
+
+static bool ResumeStmtCanThrow(const Stmt *S) {
+ if (const auto *CE = dyn_cast<CallExpr>(S)) {
+ const auto *Callee = CE->getDirectCallee();
+ if (!Callee)
+ // We don't have direct callee. Conservatively assume throwing.
+ return true;
+
+ if (FunctionCanThrow(Callee))
+ return true;
+
+ // Fall through to visit the children.
+ }
+
+ if (const auto *TE = dyn_cast<CXXBindTemporaryExpr>(S)) {
+ // Special handling of CXXBindTemporaryExpr here as calling of Dtor of the
+ // temporary is not part of `children()` as covered in the fall through.
+ // We need to mark entire statement as throwing if the destructor of the
+ // temporary throws.
+ const auto *Dtor = TE->getTemporary()->getDestructor();
+ if (FunctionCanThrow(Dtor))
+ return true;
+
+ // Fall through to visit the children.
+ }
+
+ for (const auto *child : S->children())
+ if (ResumeStmtCanThrow(child))
+ return true;
+
+ return false;
}
// Emit suspend expression which roughly looks like:
@@ -198,7 +232,10 @@ static LValueOrRValue emitSuspendExpression(CodeGenFunction &CGF, CGCoroData &Co
auto *NullPtr = llvm::ConstantPointerNull::get(CGF.CGM.Int8PtrTy);
auto *SaveCall = Builder.CreateCall(CoroSave, {NullPtr});
+ CGF.CurCoro.InSuspendBlock = true;
auto *SuspendRet = CGF.EmitScalarExpr(S.getSuspendExpr());
+ CGF.CurCoro.InSuspendBlock = false;
+
if (SuspendRet != nullptr && SuspendRet->getType()->isIntegerTy(1)) {
// Veto suspension if requested by bool returning await_suspend.
BasicBlock *RealSuspendBlock =
@@ -230,7 +267,7 @@ static LValueOrRValue emitSuspendExpression(CodeGenFunction &CGF, CGCoroData &Co
// is marked as 'noexcept', we avoid generating this additional IR.
CXXTryStmt *TryStmt = nullptr;
if (Coro.ExceptionHandler && Kind == AwaitKind::Init &&
- memberCallExpressionCanThrow(S.getResumeExpr())) {
+ ResumeStmtCanThrow(S.getResumeExpr())) {
Coro.ResumeEHVar =
CGF.CreateTempAlloca(Builder.getInt1Ty(), Prefix + Twine("resume.eh"));
Builder.CreateFlagStore(true, Coro.ResumeEHVar);
@@ -238,10 +275,19 @@ static LValueOrRValue emitSuspendExpression(CodeGenFunction &CGF, CGCoroData &Co
auto Loc = S.getResumeExpr()->getExprLoc();
auto *Catch = new (CGF.getContext())
CXXCatchStmt(Loc, /*exDecl=*/nullptr, Coro.ExceptionHandler);
- auto *TryBody =
- CompoundStmt::Create(CGF.getContext(), S.getResumeExpr(), Loc, Loc);
+ auto *TryBody = CompoundStmt::Create(CGF.getContext(), S.getResumeExpr(),
+ FPOptionsOverride(), Loc, Loc);
TryStmt = CXXTryStmt::Create(CGF.getContext(), Loc, TryBody, Catch);
CGF.EnterCXXTryStmt(*TryStmt);
+ CGF.EmitStmt(TryBody);
+ // We don't use EmitCXXTryStmt here. We need to store to ResumeEHVar that
+ // doesn't exist in the body.
+ Builder.CreateFlagStore(false, Coro.ResumeEHVar);
+ CGF.ExitCXXTryStmt(*TryStmt);
+ LValueOrRValue Res;
+ // We are not supposed to obtain the value from init suspend await_resume().
+ Res.RV = RValue::getIgnored();
+ return Res;
}
LValueOrRValue Res;
@@ -250,11 +296,6 @@ static LValueOrRValue emitSuspendExpression(CodeGenFunction &CGF, CGCoroData &Co
else
Res.RV = CGF.EmitAnyExpr(S.getResumeExpr(), aggSlot, ignoreResult);
- if (TryStmt) {
- Builder.CreateFlagStore(false, Coro.ResumeEHVar);
- CGF.ExitCXXTryStmt(*TryStmt);
- }
-
return Res;
}
@@ -400,8 +441,11 @@ struct CallCoroEnd final : public EHScopeStack::Cleanup {
llvm::Function *CoroEndFn = CGM.getIntrinsic(llvm::Intrinsic::coro_end);
// See if we have a funclet bundle to associate coro.end with. (WinEH)
auto Bundles = getBundlesForCoroEnd(CGF);
- auto *CoroEnd = CGF.Builder.CreateCall(
- CoroEndFn, {NullPtr, CGF.Builder.getTrue()}, Bundles);
+ auto *CoroEnd =
+ CGF.Builder.CreateCall(CoroEndFn,
+ {NullPtr, CGF.Builder.getTrue(),
+ llvm::ConstantTokenNone::get(CoroEndFn->getContext())},
+ Bundles);
if (Bundles.empty()) {
// Otherwise, (landingpad model), create a conditional branch that leads
// either to a cleanup block or a block with EH resume instruction.
@@ -470,22 +514,52 @@ struct GetReturnObjectManager {
CodeGenFunction &CGF;
CGBuilderTy &Builder;
const CoroutineBodyStmt &S;
+ // When true, performs RVO for the return object.
+ bool DirectEmit = false;
Address GroActiveFlag;
CodeGenFunction::AutoVarEmission GroEmission;
GetReturnObjectManager(CodeGenFunction &CGF, const CoroutineBodyStmt &S)
: CGF(CGF), Builder(CGF.Builder), S(S), GroActiveFlag(Address::invalid()),
- GroEmission(CodeGenFunction::AutoVarEmission::invalid()) {}
+ GroEmission(CodeGenFunction::AutoVarEmission::invalid()) {
+ // The call to get_­return_­object is sequenced before the call to
+ // initial_­suspend and is invoked at most once, but there are caveats
+ // regarding on whether the prvalue result object may be initialized
+ // directly/eager or delayed, depending on the types involved.
+ //
+ // More info at https://github.com/cplusplus/papers/issues/1414
+ //
+ // The general cases:
+ // 1. Same type of get_return_object and coroutine return type (direct
+ // emission):
+ // - Constructed in the return slot.
+ // 2. Different types (delayed emission):
+ // - Constructed temporary object prior to initial suspend initialized with
+ // a call to get_return_object()
+ // - When coroutine needs to to return to the caller and needs to construct
+ // return value for the coroutine it is initialized with expiring value of
+ // the temporary obtained above.
+ //
+ // Direct emission for void returning coroutines or GROs.
+ DirectEmit = [&]() {
+ auto *RVI = S.getReturnValueInit();
+ assert(RVI && "expected RVI");
+ auto GroType = RVI->getType();
+ return CGF.getContext().hasSameType(GroType, CGF.FnRetTy);
+ }();
+ }
// The gro variable has to outlive coroutine frame and coroutine promise, but,
// it can only be initialized after coroutine promise was created, thus, we
// split its emission in two parts. EmitGroAlloca emits an alloca and sets up
// cleanups. Later when coroutine promise is available we initialize the gro
// and sets the flag that the cleanup is now active.
-
void EmitGroAlloca() {
- auto *GroDeclStmt = dyn_cast<DeclStmt>(S.getResultDecl());
+ if (DirectEmit)
+ return;
+
+ auto *GroDeclStmt = dyn_cast_or_null<DeclStmt>(S.getResultDecl());
if (!GroDeclStmt) {
// If get_return_object returns void, no need to do an alloca.
return;
@@ -494,11 +568,16 @@ struct GetReturnObjectManager {
auto *GroVarDecl = cast<VarDecl>(GroDeclStmt->getSingleDecl());
// Set GRO flag that it is not initialized yet
- GroActiveFlag =
- CGF.CreateTempAlloca(Builder.getInt1Ty(), CharUnits::One(), "gro.active");
+ GroActiveFlag = CGF.CreateTempAlloca(Builder.getInt1Ty(), CharUnits::One(),
+ "gro.active");
Builder.CreateStore(Builder.getFalse(), GroActiveFlag);
GroEmission = CGF.EmitAutoVarAlloca(*GroVarDecl);
+ auto *GroAlloca = dyn_cast_or_null<llvm::AllocaInst>(
+ GroEmission.getOriginalAllocatedAddress().getPointer());
+ assert(GroAlloca && "expected alloca to be emitted");
+ GroAlloca->setMetadata(llvm::LLVMContext::MD_coro_outside_frame,
+ llvm::MDNode::get(CGF.CGM.getLLVMContext(), {}));
// Remember the top of EHStack before emitting the cleanup.
auto old_top = CGF.EHStack.stable_begin();
@@ -506,8 +585,8 @@ struct GetReturnObjectManager {
auto top = CGF.EHStack.stable_begin();
// Make the cleanup conditional on gro.active
- for (auto b = CGF.EHStack.find(top), e = CGF.EHStack.find(old_top);
- b != e; b++) {
+ for (auto b = CGF.EHStack.find(top), e = CGF.EHStack.find(old_top); b != e;
+ b++) {
if (auto *Cleanup = dyn_cast<EHCleanupScope>(&*b)) {
assert(!Cleanup->hasActiveFlag() && "cleanup already has active flag?");
Cleanup->setActiveFlag(GroActiveFlag);
@@ -518,6 +597,27 @@ struct GetReturnObjectManager {
}
void EmitGroInit() {
+ if (DirectEmit) {
+ // ReturnValue should be valid as long as the coroutine's return type
+ // is not void. The assertion could help us to reduce the check later.
+ assert(CGF.ReturnValue.isValid() == (bool)S.getReturnStmt());
+ // Now we have the promise, initialize the GRO.
+ // We need to emit `get_return_object` first. According to:
+ // [dcl.fct.def.coroutine]p7
+ // The call to get_return_­object is sequenced before the call to
+ // initial_suspend and is invoked at most once.
+ //
+ // So we couldn't emit return value when we emit return statment,
+ // otherwise the call to get_return_object wouldn't be in front
+ // of initial_suspend.
+ if (CGF.ReturnValue.isValid()) {
+ CGF.EmitAnyExprToMem(S.getReturnValue(), CGF.ReturnValue,
+ S.getReturnValue()->getType().getQualifiers(),
+ /*IsInit*/ true);
+ }
+ return;
+ }
+
if (!GroActiveFlag.isValid()) {
// No Gro variable was allocated. Simply emit the call to
// get_return_object.
@@ -529,7 +629,7 @@ struct GetReturnObjectManager {
Builder.CreateStore(Builder.getTrue(), GroActiveFlag);
}
};
-}
+} // namespace
static void emitBodyAndFallthrough(CodeGenFunction &CGF,
const CoroutineBodyStmt &S, Stmt *Body) {
@@ -541,7 +641,7 @@ static void emitBodyAndFallthrough(CodeGenFunction &CGF,
}
void CodeGenFunction::EmitCoroutineBody(const CoroutineBodyStmt &S) {
- auto *NullPtr = llvm::ConstantPointerNull::get(Builder.getInt8PtrTy());
+ auto *NullPtr = llvm::ConstantPointerNull::get(Builder.getPtrTy());
auto &TI = CGM.getContext().getTargetInfo();
unsigned NewAlign = TI.getNewAlign() / TI.getCharWidth();
@@ -577,6 +677,8 @@ void CodeGenFunction::EmitCoroutineBody(const CoroutineBodyStmt &S) {
// See if allocation was successful.
auto *NullPtr = llvm::ConstantPointerNull::get(Int8PtrTy);
auto *Cond = Builder.CreateICmpNE(AllocateCall, NullPtr);
+ // Expect the allocation to be successful.
+ emitCondLikelihoodViaExpectIntrinsic(Cond, Stmt::LH_Likely);
Builder.CreateCondBr(Cond, InitBB, RetOnFailureBB);
// If not, return OnAllocFailure object.
@@ -608,7 +710,7 @@ void CodeGenFunction::EmitCoroutineBody(const CoroutineBodyStmt &S) {
EHStack.pushCleanup<CallCoroDelete>(NormalAndEHCleanup, S.getDeallocate());
// Create mapping between parameters and copy-params for coroutine function.
- auto ParamMoves = S.getParamMoves();
+ llvm::ArrayRef<const Stmt *> ParamMoves = S.getParamMoves();
assert(
(ParamMoves.size() == 0 || (ParamMoves.size() == FnArgs.size())) &&
"ParamMoves and FnArgs should be the same size for coroutine function");
@@ -699,10 +801,24 @@ void CodeGenFunction::EmitCoroutineBody(const CoroutineBodyStmt &S) {
// Emit coro.end before getReturnStmt (and parameter destructors), since
// resume and destroy parts of the coroutine should not include them.
llvm::Function *CoroEnd = CGM.getIntrinsic(llvm::Intrinsic::coro_end);
- Builder.CreateCall(CoroEnd, {NullPtr, Builder.getFalse()});
-
- if (Stmt *Ret = S.getReturnStmt())
+ Builder.CreateCall(CoroEnd,
+ {NullPtr, Builder.getFalse(),
+ llvm::ConstantTokenNone::get(CoroEnd->getContext())});
+
+ if (Stmt *Ret = S.getReturnStmt()) {
+ // Since we already emitted the return value above, so we shouldn't
+ // emit it again here.
+ if (GroManager.DirectEmit)
+ cast<ReturnStmt>(Ret)->setRetValue(nullptr);
EmitStmt(Ret);
+ }
+
+ // LLVM require the frontend to mark the coroutine.
+ CurFn->setPresplitCoroutine();
+
+ if (CXXRecordDecl *RD = FnRetTy->getAsCXXRecordDecl();
+ RD && RD->hasAttr<CoroOnlyDestroyWhenCompleteAttr>())
+ CurFn->setCoroDestroyOnlyWhenComplete();
}
// Emit coroutine intrinsic and patch up arguments of the token type.
@@ -720,9 +836,23 @@ RValue CodeGenFunction::EmitCoroutineIntrinsic(const CallExpr *E,
}
CGM.Error(E->getBeginLoc(), "this builtin expect that __builtin_coro_begin "
"has been used earlier in this function");
- auto NullPtr = llvm::ConstantPointerNull::get(Builder.getInt8PtrTy());
+ auto *NullPtr = llvm::ConstantPointerNull::get(Builder.getPtrTy());
return RValue::get(NullPtr);
}
+ case llvm::Intrinsic::coro_size: {
+ auto &Context = getContext();
+ CanQualType SizeTy = Context.getSizeType();
+ llvm::IntegerType *T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
+ llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::coro_size, T);
+ return RValue::get(Builder.CreateCall(F));
+ }
+ case llvm::Intrinsic::coro_align: {
+ auto &Context = getContext();
+ CanQualType SizeTy = Context.getSizeType();
+ llvm::IntegerType *T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
+ llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::coro_align, T);
+ return RValue::get(Builder.CreateCall(F));
+ }
// The following three intrinsics take a token parameter referring to a token
// returned by earlier call to @llvm.coro.id. Since we cannot represent it in
// builtins, we patch it up here.
@@ -736,7 +866,7 @@ RValue CodeGenFunction::EmitCoroutineIntrinsic(const CallExpr *E,
CGM.Error(E->getBeginLoc(), "this builtin expect that __builtin_coro_id has"
" been used earlier in this function");
// Fallthrough to the next case to add TokenNone as the first argument.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
// @llvm.coro.suspend takes a token parameter. Add token 'none' as the first
// argument.
@@ -746,6 +876,10 @@ RValue CodeGenFunction::EmitCoroutineIntrinsic(const CallExpr *E,
}
for (const Expr *Arg : E->arguments())
Args.push_back(EmitScalarExpr(Arg));
+ // @llvm.coro.end takes a token parameter. Add token 'none' as the last
+ // argument.
+ if (IID == llvm::Intrinsic::coro_end)
+ Args.push_back(llvm::ConstantTokenNone::get(getLLVMContext()));
llvm::Function *F = CGM.getIntrinsic(IID);
llvm::CallInst *Call = Builder.CreateCall(F, Args);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.cpp
index 81c910f40bf8..0f3f684d61dc 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.cpp
@@ -18,6 +18,7 @@
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "ConstantEmitter.h"
+#include "TargetInfo.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclFriend.h"
@@ -25,6 +26,8 @@
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/RecordLayout.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/VTableBuilder.h"
#include "clang/Basic/CodeGenOptions.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
@@ -46,13 +49,16 @@
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MD5.h"
#include "llvm/Support/Path.h"
+#include "llvm/Support/SHA1.h"
+#include "llvm/Support/SHA256.h"
#include "llvm/Support/TimeProfiler.h"
+#include <optional>
using namespace clang;
using namespace clang::CodeGen;
static uint32_t getTypeAlignIfRequired(const Type *Ty, const ASTContext &Ctx) {
auto TI = Ctx.getTypeInfo(Ty);
- return TI.AlignIsRequired ? TI.Align : 0;
+ return TI.isAlignRequired() ? TI.Align : 0;
}
static uint32_t getTypeAlignIfRequired(QualType Ty, const ASTContext &Ctx) {
@@ -67,8 +73,6 @@ CGDebugInfo::CGDebugInfo(CodeGenModule &CGM)
: CGM(CGM), DebugKind(CGM.getCodeGenOpts().getDebugInfo()),
DebugTypeExtRefs(CGM.getCodeGenOpts().DebugTypeExtRefs),
DBuilder(CGM.getModule()) {
- for (const auto &KV : CGM.getCodeGenOpts().DebugPrefixMap)
- DebugPrefixMap[KV.first] = KV.second;
CreateCompileUnit();
}
@@ -243,6 +247,12 @@ PrintingPolicy CGDebugInfo::getPrintingPolicy() const {
PP.SplitTemplateClosers = true;
}
+ PP.SuppressInlineNamespace = false;
+ PP.PrintCanonicalTypes = true;
+ PP.UsePreferredNames = false;
+ PP.AlwaysIncludeTypeForTemplateArgument = true;
+ PP.UseEnumerators = false;
+
// Apply -fdebug-prefix-map.
PP.Callbacks = &PrintCB;
return PP;
@@ -335,39 +345,44 @@ StringRef CGDebugInfo::getClassName(const RecordDecl *RD) {
return StringRef();
}
-Optional<llvm::DIFile::ChecksumKind>
-CGDebugInfo::computeChecksum(FileID FID, SmallString<32> &Checksum) const {
+std::optional<llvm::DIFile::ChecksumKind>
+CGDebugInfo::computeChecksum(FileID FID, SmallString<64> &Checksum) const {
Checksum.clear();
if (!CGM.getCodeGenOpts().EmitCodeView &&
CGM.getCodeGenOpts().DwarfVersion < 5)
- return None;
+ return std::nullopt;
SourceManager &SM = CGM.getContext().getSourceManager();
- Optional<llvm::MemoryBufferRef> MemBuffer = SM.getBufferOrNone(FID);
+ std::optional<llvm::MemoryBufferRef> MemBuffer = SM.getBufferOrNone(FID);
if (!MemBuffer)
- return None;
-
- llvm::MD5 Hash;
- llvm::MD5::MD5Result Result;
-
- Hash.update(MemBuffer->getBuffer());
- Hash.final(Result);
-
- Hash.stringifyResult(Result, Checksum);
- return llvm::DIFile::CSK_MD5;
+ return std::nullopt;
+
+ auto Data = llvm::arrayRefFromStringRef(MemBuffer->getBuffer());
+ switch (CGM.getCodeGenOpts().getDebugSrcHash()) {
+ case clang::CodeGenOptions::DSH_MD5:
+ llvm::toHex(llvm::MD5::hash(Data), /*LowerCase=*/true, Checksum);
+ return llvm::DIFile::CSK_MD5;
+ case clang::CodeGenOptions::DSH_SHA1:
+ llvm::toHex(llvm::SHA1::hash(Data), /*LowerCase=*/true, Checksum);
+ return llvm::DIFile::CSK_SHA1;
+ case clang::CodeGenOptions::DSH_SHA256:
+ llvm::toHex(llvm::SHA256::hash(Data), /*LowerCase=*/true, Checksum);
+ return llvm::DIFile::CSK_SHA256;
+ }
+ llvm_unreachable("Unhandled DebugSrcHashKind enum");
}
-Optional<StringRef> CGDebugInfo::getSource(const SourceManager &SM,
- FileID FID) {
+std::optional<StringRef> CGDebugInfo::getSource(const SourceManager &SM,
+ FileID FID) {
if (!CGM.getCodeGenOpts().EmbedSource)
- return None;
+ return std::nullopt;
bool SourceInvalid = false;
StringRef Source = SM.getBufferData(FID, &SourceInvalid);
if (SourceInvalid)
- return None;
+ return std::nullopt;
return Source;
}
@@ -376,16 +391,18 @@ llvm::DIFile *CGDebugInfo::getOrCreateFile(SourceLocation Loc) {
SourceManager &SM = CGM.getContext().getSourceManager();
StringRef FileName;
FileID FID;
+ std::optional<llvm::DIFile::ChecksumInfo<StringRef>> CSInfo;
if (Loc.isInvalid()) {
// The DIFile used by the CU is distinct from the main source file. Call
// createFile() below for canonicalization if the source file was specified
// with an absolute path.
FileName = TheCU->getFile()->getFilename();
+ CSInfo = TheCU->getFile()->getChecksum();
} else {
PresumedLoc PLoc = SM.getPresumedLoc(Loc);
FileName = PLoc.getFilename();
-
+
if (FileName.empty()) {
FileName = TheCU->getFile()->getFilename();
} else {
@@ -402,19 +419,21 @@ llvm::DIFile *CGDebugInfo::getOrCreateFile(SourceLocation Loc) {
return cast<llvm::DIFile>(V);
}
- SmallString<32> Checksum;
-
- Optional<llvm::DIFile::ChecksumKind> CSKind = computeChecksum(FID, Checksum);
- Optional<llvm::DIFile::ChecksumInfo<StringRef>> CSInfo;
- if (CSKind)
- CSInfo.emplace(*CSKind, Checksum);
+ // Put Checksum at a scope where it will persist past the createFile call.
+ SmallString<64> Checksum;
+ if (!CSInfo) {
+ std::optional<llvm::DIFile::ChecksumKind> CSKind =
+ computeChecksum(FID, Checksum);
+ if (CSKind)
+ CSInfo.emplace(*CSKind, Checksum);
+ }
return createFile(FileName, CSInfo, getSource(SM, SM.getFileID(Loc)));
}
-llvm::DIFile *
-CGDebugInfo::createFile(StringRef FileName,
- Optional<llvm::DIFile::ChecksumInfo<StringRef>> CSInfo,
- Optional<StringRef> Source) {
+llvm::DIFile *CGDebugInfo::createFile(
+ StringRef FileName,
+ std::optional<llvm::DIFile::ChecksumInfo<StringRef>> CSInfo,
+ std::optional<StringRef> Source) {
StringRef Dir;
StringRef File;
std::string RemappedFile = remapDIPath(FileName);
@@ -422,16 +441,16 @@ CGDebugInfo::createFile(StringRef FileName,
SmallString<128> DirBuf;
SmallString<128> FileBuf;
if (llvm::sys::path::is_absolute(RemappedFile)) {
- // Strip the common prefix (if it is more than just "/") from current
- // directory and FileName for a more space-efficient encoding.
+ // Strip the common prefix (if it is more than just "/" or "C:\") from
+ // current directory and FileName for a more space-efficient encoding.
auto FileIt = llvm::sys::path::begin(RemappedFile);
auto FileE = llvm::sys::path::end(RemappedFile);
auto CurDirIt = llvm::sys::path::begin(CurDir);
auto CurDirE = llvm::sys::path::end(CurDir);
for (; CurDirIt != CurDirE && *CurDirIt == *FileIt; ++CurDirIt, ++FileIt)
llvm::sys::path::append(DirBuf, *CurDirIt);
- if (std::distance(llvm::sys::path::begin(CurDir), CurDirIt) == 1) {
- // Don't strip the common prefix if it is only the root "/"
+ if (llvm::sys::path::root_path(DirBuf) == DirBuf) {
+ // Don't strip the common prefix if it is only the root ("/" or "C:\")
// since that would make LLVM diagnostic locations confusing.
Dir = {};
File = RemappedFile;
@@ -442,7 +461,8 @@ CGDebugInfo::createFile(StringRef FileName,
File = FileBuf;
}
} else {
- Dir = CurDir;
+ if (!llvm::sys::path::is_absolute(FileName))
+ Dir = CurDir;
File = RemappedFile;
}
llvm::DIFile *F = DBuilder.createFile(File, Dir, CSInfo, Source);
@@ -451,12 +471,9 @@ CGDebugInfo::createFile(StringRef FileName,
}
std::string CGDebugInfo::remapDIPath(StringRef Path) const {
- if (DebugPrefixMap.empty())
- return Path.str();
-
SmallString<256> P = Path;
- for (const auto &Entry : DebugPrefixMap)
- if (llvm::sys::path::replace_path_prefix(P, Entry.first, Entry.second))
+ for (auto &[From, To] : llvm::reverse(CGM.getCodeGenOpts().DebugPrefixMap))
+ if (llvm::sys::path::replace_path_prefix(P, From, To))
break;
return P.str().str();
}
@@ -487,15 +504,17 @@ StringRef CGDebugInfo::getCurrentDirname() {
if (!CWDName.empty())
return CWDName;
- SmallString<256> CWD;
- llvm::sys::fs::current_path(CWD);
- return CWDName = internString(CWD);
+ llvm::ErrorOr<std::string> CWD =
+ CGM.getFileSystem()->getCurrentWorkingDirectory();
+ if (!CWD)
+ return StringRef();
+ return CWDName = internString(*CWD);
}
void CGDebugInfo::CreateCompileUnit() {
- SmallString<32> Checksum;
- Optional<llvm::DIFile::ChecksumKind> CSKind;
- Optional<llvm::DIFile::ChecksumInfo<StringRef>> CSInfo;
+ SmallString<64> Checksum;
+ std::optional<llvm::DIFile::ChecksumKind> CSKind;
+ std::optional<llvm::DIFile::ChecksumInfo<StringRef>> CSInfo;
// Should we be asking the SourceManager for the main file name, instead of
// accepting it as an argument? This just causes the main file name to
@@ -506,7 +525,9 @@ void CGDebugInfo::CreateCompileUnit() {
// Get absolute path name.
SourceManager &SM = CGM.getContext().getSourceManager();
- std::string MainFileName = CGM.getCodeGenOpts().MainFileName;
+ auto &CGO = CGM.getCodeGenOpts();
+ const LangOptions &LO = CGM.getLangOpts();
+ std::string MainFileName = CGO.MainFileName;
if (MainFileName.empty())
MainFileName = "<stdin>";
@@ -515,37 +536,45 @@ void CGDebugInfo::CreateCompileUnit() {
// a relative path, so we look into the actual file entry for the main
// file to determine the real absolute path for the file.
std::string MainFileDir;
- if (const FileEntry *MainFile = SM.getFileEntryForID(SM.getMainFileID())) {
- MainFileDir = std::string(MainFile->getDir()->getName());
+ if (OptionalFileEntryRef MainFile =
+ SM.getFileEntryRefForID(SM.getMainFileID())) {
+ MainFileDir = std::string(MainFile->getDir().getName());
if (!llvm::sys::path::is_absolute(MainFileName)) {
llvm::SmallString<1024> MainFileDirSS(MainFileDir);
- llvm::sys::path::append(MainFileDirSS, MainFileName);
- MainFileName =
- std::string(llvm::sys::path::remove_leading_dotslash(MainFileDirSS));
+ llvm::sys::path::Style Style =
+ LO.UseTargetPathSeparator
+ ? (CGM.getTarget().getTriple().isOSWindows()
+ ? llvm::sys::path::Style::windows_backslash
+ : llvm::sys::path::Style::posix)
+ : llvm::sys::path::Style::native;
+ llvm::sys::path::append(MainFileDirSS, Style, MainFileName);
+ MainFileName = std::string(
+ llvm::sys::path::remove_leading_dotslash(MainFileDirSS, Style));
}
// If the main file name provided is identical to the input file name, and
// if the input file is a preprocessed source, use the module name for
// debug info. The module name comes from the name specified in the first
- // linemarker if the input is a preprocessed source.
+ // linemarker if the input is a preprocessed source. In this case we don't
+ // know the content to compute a checksum.
if (MainFile->getName() == MainFileName &&
FrontendOptions::getInputKindForExtension(
MainFile->getName().rsplit('.').second)
- .isPreprocessed())
+ .isPreprocessed()) {
MainFileName = CGM.getModule().getName().str();
-
- CSKind = computeChecksum(SM.getMainFileID(), Checksum);
+ } else {
+ CSKind = computeChecksum(SM.getMainFileID(), Checksum);
+ }
}
llvm::dwarf::SourceLanguage LangTag;
- const LangOptions &LO = CGM.getLangOpts();
if (LO.CPlusPlus) {
if (LO.ObjC)
LangTag = llvm::dwarf::DW_LANG_ObjC_plus_plus;
- else if (LO.CPlusPlus14 && (!CGM.getCodeGenOpts().DebugStrictDwarf ||
- CGM.getCodeGenOpts().DwarfVersion >= 5))
+ else if (CGO.DebugStrictDwarf && CGO.DwarfVersion < 5)
+ LangTag = llvm::dwarf::DW_LANG_C_plus_plus;
+ else if (LO.CPlusPlus14)
LangTag = llvm::dwarf::DW_LANG_C_plus_plus_14;
- else if (LO.CPlusPlus11 && (!CGM.getCodeGenOpts().DebugStrictDwarf ||
- CGM.getCodeGenOpts().DwarfVersion >= 5))
+ else if (LO.CPlusPlus11)
LangTag = llvm::dwarf::DW_LANG_C_plus_plus_11;
else
LangTag = llvm::dwarf::DW_LANG_C_plus_plus;
@@ -556,6 +585,8 @@ void CGDebugInfo::CreateCompileUnit() {
LangTag = llvm::dwarf::DW_LANG_OpenCL;
} else if (LO.RenderScript) {
LangTag = llvm::dwarf::DW_LANG_GOOGLE_RenderScript;
+ } else if (LO.C11 && !(CGO.DebugStrictDwarf && CGO.DwarfVersion < 5)) {
+ LangTag = llvm::dwarf::DW_LANG_C11;
} else if (LO.C99) {
LangTag = llvm::dwarf::DW_LANG_C99;
} else {
@@ -571,20 +602,20 @@ void CGDebugInfo::CreateCompileUnit() {
llvm::DICompileUnit::DebugEmissionKind EmissionKind;
switch (DebugKind) {
- case codegenoptions::NoDebugInfo:
- case codegenoptions::LocTrackingOnly:
+ case llvm::codegenoptions::NoDebugInfo:
+ case llvm::codegenoptions::LocTrackingOnly:
EmissionKind = llvm::DICompileUnit::NoDebug;
break;
- case codegenoptions::DebugLineTablesOnly:
+ case llvm::codegenoptions::DebugLineTablesOnly:
EmissionKind = llvm::DICompileUnit::LineTablesOnly;
break;
- case codegenoptions::DebugDirectivesOnly:
+ case llvm::codegenoptions::DebugDirectivesOnly:
EmissionKind = llvm::DICompileUnit::DebugDirectivesOnly;
break;
- case codegenoptions::DebugInfoConstructor:
- case codegenoptions::LimitedDebugInfo:
- case codegenoptions::FullDebugInfo:
- case codegenoptions::UnusedTypeInfo:
+ case llvm::codegenoptions::DebugInfoConstructor:
+ case llvm::codegenoptions::LimitedDebugInfo:
+ case llvm::codegenoptions::FullDebugInfo:
+ case llvm::codegenoptions::UnusedTypeInfo:
EmissionKind = llvm::DICompileUnit::FullDebug;
break;
}
@@ -606,22 +637,27 @@ void CGDebugInfo::CreateCompileUnit() {
Sysroot = CGM.getHeaderSearchOpts().Sysroot;
auto B = llvm::sys::path::rbegin(Sysroot);
auto E = llvm::sys::path::rend(Sysroot);
- auto It = std::find_if(B, E, [](auto SDK) { return SDK.endswith(".sdk"); });
+ auto It =
+ std::find_if(B, E, [](auto SDK) { return SDK.ends_with(".sdk"); });
if (It != E)
SDK = *It;
}
+ llvm::DICompileUnit::DebugNameTableKind NameTableKind =
+ static_cast<llvm::DICompileUnit::DebugNameTableKind>(
+ CGOpts.DebugNameTable);
+ if (CGM.getTarget().getTriple().isNVPTX())
+ NameTableKind = llvm::DICompileUnit::DebugNameTableKind::None;
+ else if (CGM.getTarget().getTriple().getVendor() == llvm::Triple::Apple)
+ NameTableKind = llvm::DICompileUnit::DebugNameTableKind::Apple;
+
// Create new compile unit.
TheCU = DBuilder.createCompileUnit(
LangTag, CUFile, CGOpts.EmitVersionIdentMetadata ? Producer : "",
LO.Optimize || CGOpts.PrepareForLTO || CGOpts.PrepareForThinLTO,
CGOpts.DwarfDebugFlags, RuntimeVers, CGOpts.SplitDwarfFile, EmissionKind,
DwoId, CGOpts.SplitDwarfInlining, CGOpts.DebugInfoForProfiling,
- CGM.getTarget().getTriple().isNVPTX()
- ? llvm::DICompileUnit::DebugNameTableKind::None
- : static_cast<llvm::DICompileUnit::DebugNameTableKind>(
- CGOpts.DebugNameTable),
- CGOpts.DebugRangesBaseAddress, remapDIPath(Sysroot), SDK);
+ NameTableKind, CGOpts.DebugRangesBaseAddress, remapDIPath(Sysroot), SDK);
}
llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
@@ -703,24 +739,41 @@ llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
#include "clang/Basic/AArch64SVEACLETypes.def"
{
ASTContext::BuiltinVectorTypeInfo Info =
- CGM.getContext().getBuiltinVectorTypeInfo(BT);
- unsigned NumElemsPerVG = (Info.EC.getKnownMinValue() * Info.NumVectors) / 2;
+ // For svcount_t, only the lower 2 bytes are relevant.
+ BT->getKind() == BuiltinType::SveCount
+ ? ASTContext::BuiltinVectorTypeInfo(
+ CGM.getContext().BoolTy, llvm::ElementCount::getFixed(16),
+ 1)
+ : CGM.getContext().getBuiltinVectorTypeInfo(BT);
+
+ // A single vector of bytes may not suffice as the representation of
+ // svcount_t tuples because of the gap between the active 16bits of
+ // successive tuple members. Currently no such tuples are defined for
+ // svcount_t, so assert that NumVectors is 1.
+ assert((BT->getKind() != BuiltinType::SveCount || Info.NumVectors == 1) &&
+ "Unsupported number of vectors for svcount_t");
// Debuggers can't extract 1bit from a vector, so will display a
- // bitpattern for svbool_t instead.
+ // bitpattern for predicates instead.
+ unsigned NumElems = Info.EC.getKnownMinValue() * Info.NumVectors;
if (Info.ElementType == CGM.getContext().BoolTy) {
- NumElemsPerVG /= 8;
+ NumElems /= 8;
Info.ElementType = CGM.getContext().UnsignedCharTy;
}
- auto *LowerBound =
- llvm::ConstantAsMetadata::get(llvm::ConstantInt::getSigned(
- llvm::Type::getInt64Ty(CGM.getLLVMContext()), 0));
- SmallVector<int64_t, 9> Expr(
- {llvm::dwarf::DW_OP_constu, NumElemsPerVG, llvm::dwarf::DW_OP_bregx,
- /* AArch64::VG */ 46, 0, llvm::dwarf::DW_OP_mul,
- llvm::dwarf::DW_OP_constu, 1, llvm::dwarf::DW_OP_minus});
- auto *UpperBound = DBuilder.createExpression(Expr);
+ llvm::Metadata *LowerBound, *UpperBound;
+ LowerBound = llvm::ConstantAsMetadata::get(llvm::ConstantInt::getSigned(
+ llvm::Type::getInt64Ty(CGM.getLLVMContext()), 0));
+ if (Info.EC.isScalable()) {
+ unsigned NumElemsPerVG = NumElems / 2;
+ SmallVector<uint64_t, 9> Expr(
+ {llvm::dwarf::DW_OP_constu, NumElemsPerVG, llvm::dwarf::DW_OP_bregx,
+ /* AArch64::VG */ 46, 0, llvm::dwarf::DW_OP_mul,
+ llvm::dwarf::DW_OP_constu, 1, llvm::dwarf::DW_OP_minus});
+ UpperBound = DBuilder.createExpression(Expr);
+ } else
+ UpperBound = llvm::ConstantAsMetadata::get(llvm::ConstantInt::getSigned(
+ llvm::Type::getInt64Ty(CGM.getLLVMContext()), NumElems - 1));
llvm::Metadata *Subscript = DBuilder.getOrCreateSubrange(
/*count*/ nullptr, LowerBound, UpperBound, /*stride*/ nullptr);
@@ -762,7 +815,7 @@ llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
}
// Element count = (VLENB / SEW) x LMUL
- SmallVector<int64_t, 9> Expr(
+ SmallVector<uint64_t, 12> Expr(
// The DW_OP_bregx operation has two operands: a register which is
// specified by an unsigned LEB128 number, followed by a signed LEB128
// offset.
@@ -776,6 +829,8 @@ llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
Expr.push_back(llvm::dwarf::DW_OP_div);
else
Expr.push_back(llvm::dwarf::DW_OP_mul);
+ // Element max index = count - 1
+ Expr.append({llvm::dwarf::DW_OP_constu, 1, llvm::dwarf::DW_OP_minus});
auto *LowerBound =
llvm::ConstantAsMetadata::get(llvm::ConstantInt::getSigned(
@@ -791,6 +846,17 @@ llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
return DBuilder.createVectorType(/*Size=*/0, Align, ElemTy,
SubscriptArray);
}
+
+#define WASM_REF_TYPE(Name, MangledName, Id, SingletonId, AS) \
+ case BuiltinType::Id: { \
+ if (!SingletonId) \
+ SingletonId = \
+ DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type, \
+ MangledName, TheCU, TheCU->getFile(), 0); \
+ return SingletonId; \
+ }
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
+
case BuiltinType::UChar:
case BuiltinType::Char_U:
Encoding = llvm::dwarf::DW_ATE_unsigned_char;
@@ -830,11 +896,12 @@ llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
case BuiltinType::BFloat16:
case BuiltinType::Float128:
case BuiltinType::Double:
- // FIXME: For targets where long double and __float128 have the same size,
- // they are currently indistinguishable in the debugger without some
- // special treatment. However, there is currently no consensus on encoding
- // and this should be updated once a DWARF encoding exists for distinct
- // floating point types of the same size.
+ case BuiltinType::Ibm128:
+ // FIXME: For targets where long double, __ibm128 and __float128 have the
+ // same size, they are currently indistinguishable in the debugger without
+ // some special treatment. However, there is currently no consensus on
+ // encoding and this should be updated once a DWARF encoding exists for
+ // distinct floating point types of the same size.
Encoding = llvm::dwarf::DW_ATE_float;
break;
case BuiltinType::ShortAccum:
@@ -867,35 +934,15 @@ llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
break;
}
- switch (BT->getKind()) {
- case BuiltinType::Long:
- BTName = "long int";
- break;
- case BuiltinType::LongLong:
- BTName = "long long int";
- break;
- case BuiltinType::ULong:
- BTName = "long unsigned int";
- break;
- case BuiltinType::ULongLong:
- BTName = "long long unsigned int";
- break;
- default:
- BTName = BT->getName(CGM.getLangOpts());
- break;
- }
+ BTName = BT->getName(CGM.getLangOpts());
// Bit size and offset of the type.
uint64_t Size = CGM.getContext().getTypeSize(BT);
return DBuilder.createBasicType(BTName, Size, Encoding);
}
-llvm::DIType *CGDebugInfo::CreateType(const AutoType *Ty) {
- return DBuilder.createUnspecifiedType("auto");
-}
+llvm::DIType *CGDebugInfo::CreateType(const BitIntType *Ty) {
-llvm::DIType *CGDebugInfo::CreateType(const ExtIntType *Ty) {
-
- StringRef Name = Ty->isUnsigned() ? "unsigned _ExtInt" : "_ExtInt";
+ StringRef Name = Ty->isUnsigned() ? "unsigned _BitInt" : "_BitInt";
llvm::dwarf::TypeKind Encoding = Ty->isUnsigned()
? llvm::dwarf::DW_ATE_unsigned
: llvm::dwarf::DW_ATE_signed;
@@ -914,29 +961,41 @@ llvm::DIType *CGDebugInfo::CreateType(const ComplexType *Ty) {
return DBuilder.createBasicType("complex", Size, Encoding);
}
+static void stripUnusedQualifiers(Qualifiers &Q) {
+ // Ignore these qualifiers for now.
+ Q.removeObjCGCAttr();
+ Q.removeAddressSpace();
+ Q.removeObjCLifetime();
+ Q.removeUnaligned();
+}
+
+static llvm::dwarf::Tag getNextQualifier(Qualifiers &Q) {
+ if (Q.hasConst()) {
+ Q.removeConst();
+ return llvm::dwarf::DW_TAG_const_type;
+ }
+ if (Q.hasVolatile()) {
+ Q.removeVolatile();
+ return llvm::dwarf::DW_TAG_volatile_type;
+ }
+ if (Q.hasRestrict()) {
+ Q.removeRestrict();
+ return llvm::dwarf::DW_TAG_restrict_type;
+ }
+ return (llvm::dwarf::Tag)0;
+}
+
llvm::DIType *CGDebugInfo::CreateQualifiedType(QualType Ty,
llvm::DIFile *Unit) {
QualifierCollector Qc;
const Type *T = Qc.strip(Ty);
- // Ignore these qualifiers for now.
- Qc.removeObjCGCAttr();
- Qc.removeAddressSpace();
- Qc.removeObjCLifetime();
+ stripUnusedQualifiers(Qc);
// We will create one Derived type for one qualifier and recurse to handle any
// additional ones.
- llvm::dwarf::Tag Tag;
- if (Qc.hasConst()) {
- Tag = llvm::dwarf::DW_TAG_const_type;
- Qc.removeConst();
- } else if (Qc.hasVolatile()) {
- Tag = llvm::dwarf::DW_TAG_volatile_type;
- Qc.removeVolatile();
- } else if (Qc.hasRestrict()) {
- Tag = llvm::dwarf::DW_TAG_restrict_type;
- Qc.removeRestrict();
- } else {
+ llvm::dwarf::Tag Tag = getNextQualifier(Qc);
+ if (!Tag) {
assert(Qc.empty() && "Unknown type qualifier for debug info");
return getOrCreateType(QualType(T, 0), Unit);
}
@@ -948,6 +1007,30 @@ llvm::DIType *CGDebugInfo::CreateQualifiedType(QualType Ty,
return DBuilder.createQualifiedType(Tag, FromTy);
}
+llvm::DIType *CGDebugInfo::CreateQualifiedType(const FunctionProtoType *F,
+ llvm::DIFile *Unit) {
+ FunctionProtoType::ExtProtoInfo EPI = F->getExtProtoInfo();
+ Qualifiers &Q = EPI.TypeQuals;
+ stripUnusedQualifiers(Q);
+
+ // We will create one Derived type for one qualifier and recurse to handle any
+ // additional ones.
+ llvm::dwarf::Tag Tag = getNextQualifier(Q);
+ if (!Tag) {
+ assert(Q.empty() && "Unknown type qualifier for debug info");
+ return nullptr;
+ }
+
+ auto *FromTy =
+ getOrCreateType(CGM.getContext().getFunctionType(F->getReturnType(),
+ F->getParamTypes(), EPI),
+ Unit);
+
+ // No need to fill in the Name, Line, Size, Alignment, Offset in case of
+ // CVR derived types.
+ return DBuilder.createQualifiedType(Tag, FromTy);
+}
+
llvm::DIType *CGDebugInfo::CreateType(const ObjCObjectPointerType *Ty,
llvm::DIFile *Unit) {
@@ -1107,13 +1190,30 @@ llvm::DIType *CGDebugInfo::CreatePointerLikeType(llvm::dwarf::Tag Tag,
QualType PointeeTy,
llvm::DIFile *Unit) {
// Bit size, align and offset of the type.
- // Size is always the size of a pointer. We can't use getTypeSize here
- // because that does not return the correct value for references.
- unsigned AddressSpace = CGM.getContext().getTargetAddressSpace(PointeeTy);
- uint64_t Size = CGM.getTarget().getPointerWidth(AddressSpace);
+ // Size is always the size of a pointer.
+ uint64_t Size = CGM.getContext().getTypeSize(Ty);
auto Align = getTypeAlignIfRequired(Ty, CGM.getContext());
- Optional<unsigned> DWARFAddressSpace =
- CGM.getTarget().getDWARFAddressSpace(AddressSpace);
+ std::optional<unsigned> DWARFAddressSpace =
+ CGM.getTarget().getDWARFAddressSpace(
+ CGM.getTypes().getTargetAddressSpace(PointeeTy));
+
+ SmallVector<llvm::Metadata *, 4> Annots;
+ auto *BTFAttrTy = dyn_cast<BTFTagAttributedType>(PointeeTy);
+ while (BTFAttrTy) {
+ StringRef Tag = BTFAttrTy->getAttr()->getBTFTypeTag();
+ if (!Tag.empty()) {
+ llvm::Metadata *Ops[2] = {
+ llvm::MDString::get(CGM.getLLVMContext(), StringRef("btf_type_tag")),
+ llvm::MDString::get(CGM.getLLVMContext(), Tag)};
+ Annots.insert(Annots.begin(),
+ llvm::MDNode::get(CGM.getLLVMContext(), Ops));
+ }
+ BTFAttrTy = dyn_cast<BTFTagAttributedType>(BTFAttrTy->getWrappedType());
+ }
+
+ llvm::DINodeArray Annotations = nullptr;
+ if (Annots.size() > 0)
+ Annotations = DBuilder.getOrCreateArray(Annots);
if (Tag == llvm::dwarf::DW_TAG_reference_type ||
Tag == llvm::dwarf::DW_TAG_rvalue_reference_type)
@@ -1121,7 +1221,8 @@ llvm::DIType *CGDebugInfo::CreatePointerLikeType(llvm::dwarf::Tag Tag,
Size, Align, DWARFAddressSpace);
else
return DBuilder.createPointerType(getOrCreateType(PointeeTy, Unit), Size,
- Align, DWARFAddressSpace);
+ Align, DWARFAddressSpace, StringRef(),
+ Annotations);
}
llvm::DIType *CGDebugInfo::getOrCreateStructPtrType(StringRef Name,
@@ -1217,17 +1318,31 @@ llvm::DIType *CGDebugInfo::CreateType(const TemplateSpecializationType *Ty,
assert(Ty->isTypeAlias());
llvm::DIType *Src = getOrCreateType(Ty->getAliasedType(), Unit);
- auto *AliasDecl =
- cast<TypeAliasTemplateDecl>(Ty->getTemplateName().getAsTemplateDecl())
- ->getTemplatedDecl();
+ const TemplateDecl *TD = Ty->getTemplateName().getAsTemplateDecl();
+ if (isa<BuiltinTemplateDecl>(TD))
+ return Src;
+ const auto *AliasDecl = cast<TypeAliasTemplateDecl>(TD)->getTemplatedDecl();
if (AliasDecl->hasAttr<NoDebugAttr>())
return Src;
SmallString<128> NS;
llvm::raw_svector_ostream OS(NS);
- Ty->getTemplateName().print(OS, getPrintingPolicy(), /*qualified*/ false);
- printTemplateArgumentList(OS, Ty->template_arguments(), getPrintingPolicy());
+
+ auto PP = getPrintingPolicy();
+ Ty->getTemplateName().print(OS, PP, TemplateName::Qualified::None);
+
+ // Disable PrintCanonicalTypes here because we want
+ // the DW_AT_name to benefit from the TypePrinter's ability
+ // to skip defaulted template arguments.
+ //
+ // FIXME: Once -gsimple-template-names is enabled by default
+ // and we attach template parameters to alias template DIEs
+ // we don't need to worry about customizing the PrintingPolicy
+ // here anymore.
+ PP.PrintCanonicalTypes = false;
+ printTemplateArgumentList(OS, Ty->template_arguments(), PP,
+ TD->getTemplateParameters());
SourceLocation Loc = AliasDecl->getLocation();
return DBuilder.createTypedef(Src, OS.str(), getOrCreateFile(Loc),
@@ -1235,6 +1350,33 @@ llvm::DIType *CGDebugInfo::CreateType(const TemplateSpecializationType *Ty,
getDeclContextDescriptor(AliasDecl));
}
+/// Convert an AccessSpecifier into the corresponding DINode flag.
+/// As an optimization, return 0 if the access specifier equals the
+/// default for the containing type.
+static llvm::DINode::DIFlags getAccessFlag(AccessSpecifier Access,
+ const RecordDecl *RD) {
+ AccessSpecifier Default = clang::AS_none;
+ if (RD && RD->isClass())
+ Default = clang::AS_private;
+ else if (RD && (RD->isStruct() || RD->isUnion()))
+ Default = clang::AS_public;
+
+ if (Access == Default)
+ return llvm::DINode::FlagZero;
+
+ switch (Access) {
+ case clang::AS_private:
+ return llvm::DINode::FlagPrivate;
+ case clang::AS_protected:
+ return llvm::DINode::FlagProtected;
+ case clang::AS_public:
+ return llvm::DINode::FlagPublic;
+ case clang::AS_none:
+ return llvm::DINode::FlagZero;
+ }
+ llvm_unreachable("unexpected access enumerator");
+}
+
llvm::DIType *CGDebugInfo::CreateType(const TypedefType *Ty,
llvm::DIFile *Unit) {
llvm::DIType *Underlying =
@@ -1249,9 +1391,17 @@ llvm::DIType *CGDebugInfo::CreateType(const TypedefType *Ty,
uint32_t Align = getDeclAlignIfRequired(Ty->getDecl(), CGM.getContext());
// Typedefs are derived from some other type.
+ llvm::DINodeArray Annotations = CollectBTFDeclTagAnnotations(Ty->getDecl());
+
+ llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero;
+ const DeclContext *DC = Ty->getDecl()->getDeclContext();
+ if (isa<RecordDecl>(DC))
+ Flags = getAccessFlag(Ty->getDecl()->getAccess(), cast<RecordDecl>(DC));
+
return DBuilder.createTypedef(Underlying, Ty->getDecl()->getName(),
getOrCreateFile(Loc), getLineNumber(Loc),
- getDeclContextDescriptor(Ty->getDecl()), Align);
+ getDeclContextDescriptor(Ty->getDecl()), Align,
+ Flags, Annotations);
}
static unsigned getDwarfCC(CallingConv CC) {
@@ -1276,6 +1426,7 @@ static unsigned getDwarfCC(CallingConv CC) {
return llvm::dwarf::DW_CC_LLVM_X86_64SysV;
case CC_AAPCS:
case CC_AArch64VectorCall:
+ case CC_AArch64SVEPCS:
return llvm::dwarf::DW_CC_LLVM_AAPCS;
case CC_AAPCS_VFP:
return llvm::dwarf::DW_CC_LLVM_AAPCS_VFP;
@@ -1284,6 +1435,7 @@ static unsigned getDwarfCC(CallingConv CC) {
case CC_SpirFunction:
return llvm::dwarf::DW_CC_LLVM_SpirFunction;
case CC_OpenCLKernel:
+ case CC_AMDGPUKernelCall:
return llvm::dwarf::DW_CC_LLVM_OpenCLKernel;
case CC_Swift:
return llvm::dwarf::DW_CC_LLVM_Swift;
@@ -1296,22 +1448,43 @@ static unsigned getDwarfCC(CallingConv CC) {
return llvm::dwarf::DW_CC_LLVM_PreserveAll;
case CC_X86RegCall:
return llvm::dwarf::DW_CC_LLVM_X86RegCall;
+ case CC_M68kRTD:
+ return llvm::dwarf::DW_CC_LLVM_M68kRTD;
}
return 0;
}
+static llvm::DINode::DIFlags getRefFlags(const FunctionProtoType *Func) {
+ llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero;
+ if (Func->getExtProtoInfo().RefQualifier == RQ_LValue)
+ Flags |= llvm::DINode::FlagLValueReference;
+ if (Func->getExtProtoInfo().RefQualifier == RQ_RValue)
+ Flags |= llvm::DINode::FlagRValueReference;
+ return Flags;
+}
+
llvm::DIType *CGDebugInfo::CreateType(const FunctionType *Ty,
llvm::DIFile *Unit) {
+ const auto *FPT = dyn_cast<FunctionProtoType>(Ty);
+ if (FPT) {
+ if (llvm::DIType *QTy = CreateQualifiedType(FPT, Unit))
+ return QTy;
+ }
+
+ // Create the type without any qualifiers
+
SmallVector<llvm::Metadata *, 16> EltTys;
// Add the result type at least.
EltTys.push_back(getOrCreateType(Ty->getReturnType(), Unit));
+ llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero;
// Set up remainder of arguments if there is a prototype.
// otherwise emit it as a variadic function.
- if (isa<FunctionNoProtoType>(Ty))
+ if (!FPT) {
EltTys.push_back(DBuilder.createUnspecifiedParameter());
- else if (const auto *FPT = dyn_cast<FunctionProtoType>(Ty)) {
+ } else {
+ Flags = getRefFlags(FPT);
for (const QualType &ParamType : FPT->param_types())
EltTys.push_back(getOrCreateType(ParamType, Unit));
if (FPT->isVariadic())
@@ -1319,42 +1492,18 @@ llvm::DIType *CGDebugInfo::CreateType(const FunctionType *Ty,
}
llvm::DITypeRefArray EltTypeArray = DBuilder.getOrCreateTypeArray(EltTys);
- return DBuilder.createSubroutineType(EltTypeArray, llvm::DINode::FlagZero,
- getDwarfCC(Ty->getCallConv()));
-}
-
-/// Convert an AccessSpecifier into the corresponding DINode flag.
-/// As an optimization, return 0 if the access specifier equals the
-/// default for the containing type.
-static llvm::DINode::DIFlags getAccessFlag(AccessSpecifier Access,
- const RecordDecl *RD) {
- AccessSpecifier Default = clang::AS_none;
- if (RD && RD->isClass())
- Default = clang::AS_private;
- else if (RD && (RD->isStruct() || RD->isUnion()))
- Default = clang::AS_public;
-
- if (Access == Default)
- return llvm::DINode::FlagZero;
-
- switch (Access) {
- case clang::AS_private:
- return llvm::DINode::FlagPrivate;
- case clang::AS_protected:
- return llvm::DINode::FlagProtected;
- case clang::AS_public:
- return llvm::DINode::FlagPublic;
- case clang::AS_none:
- return llvm::DINode::FlagZero;
- }
- llvm_unreachable("unexpected access enumerator");
+ llvm::DIType *F = DBuilder.createSubroutineType(
+ EltTypeArray, Flags, getDwarfCC(Ty->getCallConv()));
+ return F;
}
-llvm::DIType *CGDebugInfo::createBitFieldType(const FieldDecl *BitFieldDecl,
- llvm::DIScope *RecordTy,
- const RecordDecl *RD) {
+llvm::DIDerivedType *
+CGDebugInfo::createBitFieldType(const FieldDecl *BitFieldDecl,
+ llvm::DIScope *RecordTy, const RecordDecl *RD) {
StringRef Name = BitFieldDecl->getName();
QualType Ty = BitFieldDecl->getType();
+ if (BitFieldDecl->hasAttr<PreferredTypeAttr>())
+ Ty = BitFieldDecl->getAttr<PreferredTypeAttr>()->getType();
SourceLocation Loc = BitFieldDecl->getLocation();
llvm::DIFile *VUnit = getOrCreateFile(Loc);
llvm::DIType *DebugType = getOrCreateType(Ty, VUnit);
@@ -1377,16 +1526,88 @@ llvm::DIType *CGDebugInfo::createBitFieldType(const FieldDecl *BitFieldDecl,
Offset = BitFieldInfo.StorageSize - BitFieldInfo.Size - Offset;
uint64_t OffsetInBits = StorageOffsetInBits + Offset;
llvm::DINode::DIFlags Flags = getAccessFlag(BitFieldDecl->getAccess(), RD);
+ llvm::DINodeArray Annotations = CollectBTFDeclTagAnnotations(BitFieldDecl);
return DBuilder.createBitFieldMemberType(
RecordTy, Name, File, Line, SizeInBits, OffsetInBits, StorageOffsetInBits,
- Flags, DebugType);
+ Flags, DebugType, Annotations);
+}
+
+llvm::DIDerivedType *CGDebugInfo::createBitFieldSeparatorIfNeeded(
+ const FieldDecl *BitFieldDecl, const llvm::DIDerivedType *BitFieldDI,
+ llvm::ArrayRef<llvm::Metadata *> PreviousFieldsDI, const RecordDecl *RD) {
+
+ if (!CGM.getTargetCodeGenInfo().shouldEmitDWARFBitFieldSeparators())
+ return nullptr;
+
+ /*
+ Add a *single* zero-bitfield separator between two non-zero bitfields
+ separated by one or more zero-bitfields. This is used to distinguish between
+ structures such the ones below, where the memory layout is the same, but how
+ the ABI assigns fields to registers differs.
+
+ struct foo {
+ int space[4];
+ char a : 8; // on amdgpu, passed on v4
+ char b : 8;
+ char x : 8;
+ char y : 8;
+ };
+ struct bar {
+ int space[4];
+ char a : 8; // on amdgpu, passed on v4
+ char b : 8;
+ char : 0;
+ char x : 8; // passed on v5
+ char y : 8;
+ };
+ */
+ if (PreviousFieldsDI.empty())
+ return nullptr;
+
+ // If we already emitted metadata for a 0-length bitfield, nothing to do here.
+ auto *PreviousMDEntry =
+ PreviousFieldsDI.empty() ? nullptr : PreviousFieldsDI.back();
+ auto *PreviousMDField =
+ dyn_cast_or_null<llvm::DIDerivedType>(PreviousMDEntry);
+ if (!PreviousMDField || !PreviousMDField->isBitField() ||
+ PreviousMDField->getSizeInBits() == 0)
+ return nullptr;
+
+ auto PreviousBitfield = RD->field_begin();
+ std::advance(PreviousBitfield, BitFieldDecl->getFieldIndex() - 1);
+
+ assert(PreviousBitfield->isBitField());
+
+ ASTContext &Context = CGM.getContext();
+ if (!PreviousBitfield->isZeroLengthBitField(Context))
+ return nullptr;
+
+ QualType Ty = PreviousBitfield->getType();
+ SourceLocation Loc = PreviousBitfield->getLocation();
+ llvm::DIFile *VUnit = getOrCreateFile(Loc);
+ llvm::DIType *DebugType = getOrCreateType(Ty, VUnit);
+ llvm::DIScope *RecordTy = BitFieldDI->getScope();
+
+ llvm::DIFile *File = getOrCreateFile(Loc);
+ unsigned Line = getLineNumber(Loc);
+
+ uint64_t StorageOffsetInBits =
+ cast<llvm::ConstantInt>(BitFieldDI->getStorageOffsetInBits())
+ ->getZExtValue();
+
+ llvm::DINode::DIFlags Flags =
+ getAccessFlag(PreviousBitfield->getAccess(), RD);
+ llvm::DINodeArray Annotations =
+ CollectBTFDeclTagAnnotations(*PreviousBitfield);
+ return DBuilder.createBitFieldMemberType(
+ RecordTy, "", File, Line, 0, StorageOffsetInBits, StorageOffsetInBits,
+ Flags, DebugType, Annotations);
}
-llvm::DIType *
-CGDebugInfo::createFieldType(StringRef name, QualType type, SourceLocation loc,
- AccessSpecifier AS, uint64_t offsetInBits,
- uint32_t AlignInBits, llvm::DIFile *tunit,
- llvm::DIScope *scope, const RecordDecl *RD) {
+llvm::DIType *CGDebugInfo::createFieldType(
+ StringRef name, QualType type, SourceLocation loc, AccessSpecifier AS,
+ uint64_t offsetInBits, uint32_t AlignInBits, llvm::DIFile *tunit,
+ llvm::DIScope *scope, const RecordDecl *RD, llvm::DINodeArray Annotations) {
llvm::DIType *debugType = getOrCreateType(type, tunit);
// Get the location for the field.
@@ -1404,7 +1625,7 @@ CGDebugInfo::createFieldType(StringRef name, QualType type, SourceLocation loc,
llvm::DINode::DIFlags flags = getAccessFlag(AS, RD);
return DBuilder.createMemberType(scope, name, file, line, SizeInBits, Align,
- offsetInBits, flags, debugType);
+ offsetInBits, flags, debugType, Annotations);
}
void CGDebugInfo::CollectRecordLambdaFields(
@@ -1423,7 +1644,7 @@ void CGDebugInfo::CollectRecordLambdaFields(
if (C.capturesVariable()) {
SourceLocation Loc = C.getLocation();
assert(!Field->isBitField() && "lambdas don't have bitfield members!");
- VarDecl *V = C.getCapturedVar();
+ ValueDecl *V = C.getCapturedVar();
StringRef VName = V->getName();
llvm::DIFile *VUnit = getOrCreateFile(Loc);
auto Align = getDeclAlignIfRequired(V, CGM.getContext());
@@ -1439,8 +1660,10 @@ void CGDebugInfo::CollectRecordLambdaFields(
FieldDecl *f = *Field;
llvm::DIFile *VUnit = getOrCreateFile(f->getLocation());
QualType type = f->getType();
+ StringRef ThisName =
+ CGM.getCodeGenOpts().EmitCodeView ? "__this" : "this";
llvm::DIType *fieldType = createFieldType(
- "this", type, f->getLocation(), f->getAccess(),
+ ThisName, type, f->getLocation(), f->getAccess(),
layout.getFieldOffset(fieldno), VUnit, RecordTy, CXXDecl);
elements.push_back(fieldType);
@@ -1459,6 +1682,9 @@ CGDebugInfo::CreateRecordStaticField(const VarDecl *Var, llvm::DIType *RecordTy,
unsigned LineNumber = getLineNumber(Var->getLocation());
StringRef VName = Var->getName();
+
+ // FIXME: to avoid complications with type merging we should
+ // emit the constant on the definition instead of the declaration.
llvm::Constant *C = nullptr;
if (Var->getInit()) {
const APValue *Value = Var->evaluateValue();
@@ -1471,9 +1697,12 @@ CGDebugInfo::CreateRecordStaticField(const VarDecl *Var, llvm::DIType *RecordTy,
}
llvm::DINode::DIFlags Flags = getAccessFlag(Var->getAccess(), RD);
+ auto Tag = CGM.getCodeGenOpts().DwarfVersion >= 5
+ ? llvm::dwarf::DW_TAG_variable
+ : llvm::dwarf::DW_TAG_member;
auto Align = getDeclAlignIfRequired(Var, CGM.getContext());
llvm::DIDerivedType *GV = DBuilder.createStaticMemberType(
- RecordTy, VName, VUnit, LineNumber, VTy, Flags, C, Align);
+ RecordTy, VName, VUnit, LineNumber, VTy, Flags, C, Tag, Align);
StaticDataMemberCache[Var->getCanonicalDecl()].reset(GV);
return GV;
}
@@ -1491,12 +1720,17 @@ void CGDebugInfo::CollectRecordNormalField(
llvm::DIType *FieldType;
if (field->isBitField()) {
- FieldType = createBitFieldType(field, RecordTy, RD);
+ llvm::DIDerivedType *BitFieldType;
+ FieldType = BitFieldType = createBitFieldType(field, RecordTy, RD);
+ if (llvm::DIType *Separator =
+ createBitFieldSeparatorIfNeeded(field, BitFieldType, elements, RD))
+ elements.push_back(Separator);
} else {
auto Align = getDeclAlignIfRequired(field, CGM.getContext());
+ llvm::DINodeArray Annotations = CollectBTFDeclTagAnnotations(field);
FieldType =
createFieldType(name, type, field->getLocation(), field->getAccess(),
- OffsetInBits, Align, tunit, RecordTy, RD);
+ OffsetInBits, Align, tunit, RecordTy, RD, Annotations);
}
elements.push_back(FieldType);
@@ -1562,52 +1796,66 @@ void CGDebugInfo::CollectRecordFields(
} else if (CGM.getCodeGenOpts().EmitCodeView) {
// Debug info for nested types is included in the member list only for
// CodeView.
- if (const auto *nestedType = dyn_cast<TypeDecl>(I))
+ if (const auto *nestedType = dyn_cast<TypeDecl>(I)) {
+ // MSVC doesn't generate nested type for anonymous struct/union.
+ if (isa<RecordDecl>(I) &&
+ cast<RecordDecl>(I)->isAnonymousStructOrUnion())
+ continue;
if (!nestedType->isImplicit() &&
nestedType->getDeclContext() == record)
CollectRecordNestedType(nestedType, elements);
+ }
}
}
}
llvm::DISubroutineType *
CGDebugInfo::getOrCreateMethodType(const CXXMethodDecl *Method,
- llvm::DIFile *Unit, bool decl) {
+ llvm::DIFile *Unit) {
const FunctionProtoType *Func = Method->getType()->getAs<FunctionProtoType>();
if (Method->isStatic())
return cast_or_null<llvm::DISubroutineType>(
getOrCreateType(QualType(Func, 0), Unit));
- return getOrCreateInstanceMethodType(Method->getThisType(), Func, Unit, decl);
-}
+ return getOrCreateInstanceMethodType(Method->getThisType(), Func, Unit);
+}
+
+llvm::DISubroutineType *CGDebugInfo::getOrCreateInstanceMethodType(
+ QualType ThisPtr, const FunctionProtoType *Func, llvm::DIFile *Unit) {
+ FunctionProtoType::ExtProtoInfo EPI = Func->getExtProtoInfo();
+ Qualifiers &Qc = EPI.TypeQuals;
+ Qc.removeConst();
+ Qc.removeVolatile();
+ Qc.removeRestrict();
+ Qc.removeUnaligned();
+ // Keep the removed qualifiers in sync with
+ // CreateQualifiedType(const FunctionPrototype*, DIFile *Unit)
+ // On a 'real' member function type, these qualifiers are carried on the type
+ // of the first parameter, not as separate DW_TAG_const_type (etc) decorator
+ // tags around them. (But, in the raw function types with qualifiers, they have
+ // to use wrapper types.)
-llvm::DISubroutineType *
-CGDebugInfo::getOrCreateInstanceMethodType(QualType ThisPtr,
- const FunctionProtoType *Func,
- llvm::DIFile *Unit, bool decl) {
// Add "this" pointer.
- llvm::DITypeRefArray Args(
- cast<llvm::DISubroutineType>(getOrCreateType(QualType(Func, 0), Unit))
- ->getTypeArray());
+ const auto *OriginalFunc = cast<llvm::DISubroutineType>(
+ getOrCreateType(CGM.getContext().getFunctionType(
+ Func->getReturnType(), Func->getParamTypes(), EPI),
+ Unit));
+ llvm::DITypeRefArray Args = OriginalFunc->getTypeArray();
assert(Args.size() && "Invalid number of arguments!");
SmallVector<llvm::Metadata *, 16> Elts;
+
// First element is always return type. For 'void' functions it is NULL.
- QualType temp = Func->getReturnType();
- if (temp->getTypeClass() == Type::Auto && decl)
- Elts.push_back(CreateType(cast<AutoType>(temp)));
- else
- Elts.push_back(Args[0]);
+ Elts.push_back(Args[0]);
// "this" pointer is always first argument.
const CXXRecordDecl *RD = ThisPtr->getPointeeCXXRecordDecl();
if (isa<ClassTemplateSpecializationDecl>(RD)) {
// Create pointer type directly in this case.
const PointerType *ThisPtrTy = cast<PointerType>(ThisPtr);
- QualType PointeeTy = ThisPtrTy->getPointeeType();
- unsigned AS = CGM.getContext().getTargetAddressSpace(PointeeTy);
- uint64_t Size = CGM.getTarget().getPointerWidth(AS);
+ uint64_t Size = CGM.getContext().getTypeSize(ThisPtrTy);
auto Align = getTypeAlignIfRequired(ThisPtrTy, CGM.getContext());
- llvm::DIType *PointeeType = getOrCreateType(PointeeTy, Unit);
+ llvm::DIType *PointeeType =
+ getOrCreateType(ThisPtrTy->getPointeeType(), Unit);
llvm::DIType *ThisPtrType =
DBuilder.createPointerType(PointeeType, Size, Align);
TypeCache[ThisPtr.getAsOpaquePtr()].reset(ThisPtrType);
@@ -1629,13 +1877,7 @@ CGDebugInfo::getOrCreateInstanceMethodType(QualType ThisPtr,
llvm::DITypeRefArray EltTypeArray = DBuilder.getOrCreateTypeArray(Elts);
- llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero;
- if (Func->getExtProtoInfo().RefQualifier == RQ_LValue)
- Flags |= llvm::DINode::FlagLValueReference;
- if (Func->getExtProtoInfo().RefQualifier == RQ_RValue)
- Flags |= llvm::DINode::FlagRValueReference;
-
- return DBuilder.createSubroutineType(EltTypeArray, Flags,
+ return DBuilder.createSubroutineType(EltTypeArray, OriginalFunc->getFlags(),
getDwarfCC(Func->getCallConv()));
}
@@ -1655,7 +1897,7 @@ llvm::DISubprogram *CGDebugInfo::CreateCXXMemberFunction(
isa<CXXConstructorDecl>(Method) || isa<CXXDestructorDecl>(Method);
StringRef MethodName = getFunctionName(Method);
- llvm::DISubroutineType *MethodTy = getOrCreateMethodType(Method, Unit, true);
+ llvm::DISubroutineType *MethodTy = getOrCreateMethodType(Method, Unit);
// Since a single ctor/dtor corresponds to multiple functions, it doesn't
// make sense to give a single ctor/dtor a linkage name.
@@ -1683,8 +1925,8 @@ llvm::DISubprogram *CGDebugInfo::CreateCXXMemberFunction(
llvm::DISubprogram::DISPFlags SPFlags = llvm::DISubprogram::SPFlagZero;
int ThisAdjustment = 0;
- if (Method->isVirtual()) {
- if (Method->isPure())
+ if (VTableContextBase::hasVtableSlot(Method)) {
+ if (Method->isPureVirtual())
SPFlags |= llvm::DISubprogram::SPFlagPureVirtual;
else
SPFlags |= llvm::DISubprogram::SPFlagVirtual;
@@ -1721,27 +1963,8 @@ llvm::DISubprogram *CGDebugInfo::CreateCXXMemberFunction(
ContainingType = RecordTy;
}
- // We're checking for deleted C++ special member functions
- // [Ctors,Dtors, Copy/Move]
- auto checkAttrDeleted = [&](const auto *Method) {
- if (Method->getCanonicalDecl()->isDeleted())
- SPFlags |= llvm::DISubprogram::SPFlagDeleted;
- };
-
- switch (Method->getKind()) {
-
- case Decl::CXXConstructor:
- case Decl::CXXDestructor:
- checkAttrDeleted(Method);
- break;
- case Decl::CXXMethod:
- if (Method->isCopyAssignmentOperator() ||
- Method->isMoveAssignmentOperator())
- checkAttrDeleted(Method);
- break;
- default:
- break;
- }
+ if (Method->getCanonicalDecl()->isDeleted())
+ SPFlags |= llvm::DISubprogram::SPFlagDeleted;
if (Method->isNoReturn())
Flags |= llvm::DINode::FlagNoReturn;
@@ -1771,7 +1994,7 @@ llvm::DISubprogram *CGDebugInfo::CreateCXXMemberFunction(
// In this debug mode, emit type info for a class when its constructor type
// info is emitted.
- if (DebugKind == codegenoptions::DebugInfoConstructor)
+ if (DebugKind == llvm::codegenoptions::DebugInfoConstructor)
if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(Method))
completeUnusedClass(*CD->getParent());
@@ -1887,43 +2110,28 @@ void CGDebugInfo::CollectCXXBasesAux(
}
llvm::DINodeArray
-CGDebugInfo::CollectTemplateParams(const TemplateParameterList *TPList,
- ArrayRef<TemplateArgument> TAList,
+CGDebugInfo::CollectTemplateParams(std::optional<TemplateArgs> OArgs,
llvm::DIFile *Unit) {
+ if (!OArgs)
+ return llvm::DINodeArray();
+ TemplateArgs &Args = *OArgs;
SmallVector<llvm::Metadata *, 16> TemplateParams;
- for (unsigned i = 0, e = TAList.size(); i != e; ++i) {
- const TemplateArgument &TA = TAList[i];
+ for (unsigned i = 0, e = Args.Args.size(); i != e; ++i) {
+ const TemplateArgument &TA = Args.Args[i];
StringRef Name;
- bool defaultParameter = false;
- if (TPList)
- Name = TPList->getParam(i)->getName();
+ const bool defaultParameter = TA.getIsDefaulted();
+ if (Args.TList)
+ Name = Args.TList->getParam(i)->getName();
+
switch (TA.getKind()) {
case TemplateArgument::Type: {
llvm::DIType *TTy = getOrCreateType(TA.getAsType(), Unit);
-
- if (TPList)
- if (auto *templateType =
- dyn_cast_or_null<TemplateTypeParmDecl>(TPList->getParam(i)))
- if (templateType->hasDefaultArgument())
- defaultParameter =
- templateType->getDefaultArgument() == TA.getAsType();
-
TemplateParams.push_back(DBuilder.createTemplateTypeParameter(
TheCU, Name, TTy, defaultParameter));
} break;
case TemplateArgument::Integral: {
llvm::DIType *TTy = getOrCreateType(TA.getIntegralType(), Unit);
- if (TPList && CGM.getCodeGenOpts().DwarfVersion >= 5)
- if (auto *templateType =
- dyn_cast_or_null<NonTypeTemplateParmDecl>(TPList->getParam(i)))
- if (templateType->hasDefaultArgument() &&
- !templateType->getDefaultArgument()->isValueDependent())
- defaultParameter = llvm::APSInt::isSameValue(
- templateType->getDefaultArgument()->EvaluateKnownConstInt(
- CGM.getContext()),
- TA.getAsIntegral());
-
TemplateParams.push_back(DBuilder.createTemplateValueParameter(
TheCU, Name, TTy, defaultParameter,
llvm::ConstantInt::get(CGM.getLLVMContext(), TA.getAsIntegral())));
@@ -1937,14 +2145,14 @@ CGDebugInfo::CollectTemplateParams(const TemplateParameterList *TPList,
// attribute, i.e. that value is not available at the host side.
if (!CGM.getLangOpts().CUDA || CGM.getLangOpts().CUDAIsDevice ||
!D->hasAttr<CUDADeviceAttr>()) {
- const CXXMethodDecl *MD;
// Variable pointer template parameters have a value that is the address
// of the variable.
if (const auto *VD = dyn_cast<VarDecl>(D))
V = CGM.GetAddrOfGlobalVar(VD);
// Member function pointers have special support for building them,
// though this is currently unsupported in LLVM CodeGen.
- else if ((MD = dyn_cast<CXXMethodDecl>(D)) && MD->isInstance())
+ else if (const auto *MD = dyn_cast<CXXMethodDecl>(D);
+ MD && MD->isImplicitObjectMemberFunction())
V = CGM.getCXXABI().EmitMemberFunctionPointer(MD);
else if (const auto *FD = dyn_cast<FunctionDecl>(D))
V = CGM.GetAddrOfFunction(FD);
@@ -1993,15 +2201,27 @@ CGDebugInfo::CollectTemplateParams(const TemplateParameterList *TPList,
TemplateParams.push_back(DBuilder.createTemplateValueParameter(
TheCU, Name, TTy, defaultParameter, V));
} break;
- case TemplateArgument::Template:
+ case TemplateArgument::StructuralValue: {
+ QualType T = TA.getStructuralValueType();
+ llvm::DIType *TTy = getOrCreateType(T, Unit);
+ llvm::Constant *V = ConstantEmitter(CGM).emitAbstract(
+ SourceLocation(), TA.getAsStructuralValue(), T);
+ TemplateParams.push_back(DBuilder.createTemplateValueParameter(
+ TheCU, Name, TTy, defaultParameter, V));
+ } break;
+ case TemplateArgument::Template: {
+ std::string QualName;
+ llvm::raw_string_ostream OS(QualName);
+ TA.getAsTemplate().getAsTemplateDecl()->printQualifiedName(
+ OS, getPrintingPolicy());
TemplateParams.push_back(DBuilder.createTemplateTemplateParameter(
- TheCU, Name, nullptr,
- TA.getAsTemplate().getAsTemplateDecl()->getQualifiedNameAsString()));
+ TheCU, Name, nullptr, OS.str(), defaultParameter));
break;
+ }
case TemplateArgument::Pack:
TemplateParams.push_back(DBuilder.createTemplateParameterPack(
TheCU, Name, nullptr,
- CollectTemplateParams(nullptr, TA.getPackAsArray(), Unit)));
+ CollectTemplateParams({{nullptr, TA.getPackAsArray()}}, Unit)));
break;
case TemplateArgument::Expression: {
const Expr *E = TA.getAsExpr();
@@ -2024,43 +2244,72 @@ CGDebugInfo::CollectTemplateParams(const TemplateParameterList *TPList,
return DBuilder.getOrCreateArray(TemplateParams);
}
-llvm::DINodeArray
-CGDebugInfo::CollectFunctionTemplateParams(const FunctionDecl *FD,
- llvm::DIFile *Unit) {
+std::optional<CGDebugInfo::TemplateArgs>
+CGDebugInfo::GetTemplateArgs(const FunctionDecl *FD) const {
if (FD->getTemplatedKind() ==
FunctionDecl::TK_FunctionTemplateSpecialization) {
const TemplateParameterList *TList = FD->getTemplateSpecializationInfo()
->getTemplate()
->getTemplateParameters();
- return CollectTemplateParams(
- TList, FD->getTemplateSpecializationArgs()->asArray(), Unit);
+ return {{TList, FD->getTemplateSpecializationArgs()->asArray()}};
}
- return llvm::DINodeArray();
+ return std::nullopt;
}
-
-llvm::DINodeArray CGDebugInfo::CollectVarTemplateParams(const VarDecl *VL,
- llvm::DIFile *Unit) {
+std::optional<CGDebugInfo::TemplateArgs>
+CGDebugInfo::GetTemplateArgs(const VarDecl *VD) const {
// Always get the full list of parameters, not just the ones from the
// specialization. A partial specialization may have fewer parameters than
// there are arguments.
- auto *TS = dyn_cast<VarTemplateSpecializationDecl>(VL);
+ auto *TS = dyn_cast<VarTemplateSpecializationDecl>(VD);
if (!TS)
- return llvm::DINodeArray();
+ return std::nullopt;
VarTemplateDecl *T = TS->getSpecializedTemplate();
const TemplateParameterList *TList = T->getTemplateParameters();
auto TA = TS->getTemplateArgs().asArray();
- return CollectTemplateParams(TList, TA, Unit);
+ return {{TList, TA}};
+}
+std::optional<CGDebugInfo::TemplateArgs>
+CGDebugInfo::GetTemplateArgs(const RecordDecl *RD) const {
+ if (auto *TSpecial = dyn_cast<ClassTemplateSpecializationDecl>(RD)) {
+ // Always get the full list of parameters, not just the ones from the
+ // specialization. A partial specialization may have fewer parameters than
+ // there are arguments.
+ TemplateParameterList *TPList =
+ TSpecial->getSpecializedTemplate()->getTemplateParameters();
+ const TemplateArgumentList &TAList = TSpecial->getTemplateArgs();
+ return {{TPList, TAList.asArray()}};
+ }
+ return std::nullopt;
}
-llvm::DINodeArray CGDebugInfo::CollectCXXTemplateParams(
- const ClassTemplateSpecializationDecl *TSpecial, llvm::DIFile *Unit) {
- // Always get the full list of parameters, not just the ones from the
- // specialization. A partial specialization may have fewer parameters than
- // there are arguments.
- TemplateParameterList *TPList =
- TSpecial->getSpecializedTemplate()->getTemplateParameters();
- const TemplateArgumentList &TAList = TSpecial->getTemplateArgs();
- return CollectTemplateParams(TPList, TAList.asArray(), Unit);
+llvm::DINodeArray
+CGDebugInfo::CollectFunctionTemplateParams(const FunctionDecl *FD,
+ llvm::DIFile *Unit) {
+ return CollectTemplateParams(GetTemplateArgs(FD), Unit);
+}
+
+llvm::DINodeArray CGDebugInfo::CollectVarTemplateParams(const VarDecl *VL,
+ llvm::DIFile *Unit) {
+ return CollectTemplateParams(GetTemplateArgs(VL), Unit);
+}
+
+llvm::DINodeArray CGDebugInfo::CollectCXXTemplateParams(const RecordDecl *RD,
+ llvm::DIFile *Unit) {
+ return CollectTemplateParams(GetTemplateArgs(RD), Unit);
+}
+
+llvm::DINodeArray CGDebugInfo::CollectBTFDeclTagAnnotations(const Decl *D) {
+ if (!D->hasAttr<BTFDeclTagAttr>())
+ return nullptr;
+
+ SmallVector<llvm::Metadata *, 4> Annotations;
+ for (const auto *I : D->specific_attrs<BTFDeclTagAttr>()) {
+ llvm::Metadata *Ops[2] = {
+ llvm::MDString::get(CGM.getLLVMContext(), StringRef("btf_decl_tag")),
+ llvm::MDString::get(CGM.getLLVMContext(), I->getBTFDeclTag())};
+ Annotations.push_back(llvm::MDNode::get(CGM.getLLVMContext(), Ops));
+ }
+ return DBuilder.getOrCreateArray(Annotations);
}
llvm::DIType *CGDebugInfo::getOrCreateVTablePtrType(llvm::DIFile *Unit) {
@@ -2075,7 +2324,7 @@ llvm::DIType *CGDebugInfo::getOrCreateVTablePtrType(llvm::DIFile *Unit) {
llvm::DIType *SubTy = DBuilder.createSubroutineType(SElements);
unsigned Size = Context.getTypeSize(Context.VoidPtrTy);
unsigned VtblPtrAddressSpace = CGM.getTarget().getVtblPtrAddressSpace();
- Optional<unsigned> DWARFAddressSpace =
+ std::optional<unsigned> DWARFAddressSpace =
CGM.getTarget().getDWARFAddressSpace(VtblPtrAddressSpace);
llvm::DIType *vtbl_ptr_type = DBuilder.createPointerType(
@@ -2172,7 +2421,7 @@ void CGDebugInfo::CollectVTableInfo(const CXXRecordDecl *RD, llvm::DIFile *Unit,
VFTLayout.vtable_components().size() - CGM.getLangOpts().RTTIData;
unsigned VTableWidth = PtrWidth * VSlotCount;
unsigned VtblPtrAddressSpace = CGM.getTarget().getVtblPtrAddressSpace();
- Optional<unsigned> DWARFAddressSpace =
+ std::optional<unsigned> DWARFAddressSpace =
CGM.getTarget().getDWARFAddressSpace(VtblPtrAddressSpace);
// Create a very wide void* type and insert it directly in the element list.
@@ -2225,11 +2474,11 @@ void CGDebugInfo::addHeapAllocSiteMetadata(llvm::CallBase *CI,
QualType AllocatedTy,
SourceLocation Loc) {
if (CGM.getCodeGenOpts().getDebugInfo() <=
- codegenoptions::DebugLineTablesOnly)
+ llvm::codegenoptions::DebugLineTablesOnly)
return;
llvm::MDNode *node;
if (AllocatedTy->isVoidType())
- node = llvm::MDNode::get(CGM.getLLVMContext(), None);
+ node = llvm::MDNode::get(CGM.getLLVMContext(), std::nullopt);
else
node = getOrCreateType(AllocatedTy, getOrCreateFile(Loc));
@@ -2237,7 +2486,7 @@ void CGDebugInfo::addHeapAllocSiteMetadata(llvm::CallBase *CI,
}
void CGDebugInfo::completeType(const EnumDecl *ED) {
- if (DebugKind <= codegenoptions::DebugLineTablesOnly)
+ if (DebugKind <= llvm::codegenoptions::DebugLineTablesOnly)
return;
QualType Ty = CGM.getContext().getEnumType(ED);
void *TyPtr = Ty.getAsOpaquePtr();
@@ -2250,7 +2499,7 @@ void CGDebugInfo::completeType(const EnumDecl *ED) {
}
void CGDebugInfo::completeType(const RecordDecl *RD) {
- if (DebugKind > codegenoptions::LimitedDebugInfo ||
+ if (DebugKind > llvm::codegenoptions::LimitedDebugInfo ||
!CGM.getLangOpts().CPlusPlus)
completeRequiredType(RD);
}
@@ -2312,14 +2561,18 @@ void CGDebugInfo::completeClassData(const RecordDecl *RD) {
}
void CGDebugInfo::completeClass(const RecordDecl *RD) {
- if (DebugKind <= codegenoptions::DebugLineTablesOnly)
+ if (DebugKind <= llvm::codegenoptions::DebugLineTablesOnly)
return;
QualType Ty = CGM.getContext().getRecordType(RD);
void *TyPtr = Ty.getAsOpaquePtr();
auto I = TypeCache.find(TyPtr);
if (I != TypeCache.end() && !cast<llvm::DIType>(I->second)->isForwardDecl())
return;
- llvm::DIType *Res = CreateTypeDefinition(Ty->castAs<RecordType>());
+
+ // We want the canonical definition of the structure to not
+ // be the typedef. Since that would lead to circular typedef
+ // metadata.
+ auto [Res, PrefRes] = CreateTypeDefinition(Ty->castAs<RecordType>());
assert(!Res->isForwardDecl());
TypeCache[TyPtr].reset(Res);
}
@@ -2346,12 +2599,21 @@ static bool canUseCtorHoming(const CXXRecordDecl *RD) {
if (isClassOrMethodDLLImport(RD))
return false;
- return !RD->isLambda() && !RD->isAggregate() &&
- !RD->hasTrivialDefaultConstructor() &&
- !RD->hasConstexprNonCopyMoveConstructor();
+ if (RD->isLambda() || RD->isAggregate() ||
+ RD->hasTrivialDefaultConstructor() ||
+ RD->hasConstexprNonCopyMoveConstructor())
+ return false;
+
+ for (const CXXConstructorDecl *Ctor : RD->ctors()) {
+ if (Ctor->isCopyOrMoveConstructor())
+ continue;
+ if (!Ctor->isDeleted())
+ return true;
+ }
+ return false;
}
-static bool shouldOmitDefinition(codegenoptions::DebugInfoKind DebugKind,
+static bool shouldOmitDefinition(llvm::codegenoptions::DebugInfoKind DebugKind,
bool DebugTypeExtRefs, const RecordDecl *RD,
const LangOptions &LangOpts) {
if (DebugTypeExtRefs && isDefinedInClangModule(RD->getDefinition()))
@@ -2364,10 +2626,10 @@ static bool shouldOmitDefinition(codegenoptions::DebugInfoKind DebugKind,
// Only emit forward declarations in line tables only to keep debug info size
// small. This only applies to CodeView, since we don't emit types in DWARF
// line tables only.
- if (DebugKind == codegenoptions::DebugLineTablesOnly)
+ if (DebugKind == llvm::codegenoptions::DebugLineTablesOnly)
return true;
- if (DebugKind > codegenoptions::LimitedDebugInfo ||
+ if (DebugKind > llvm::codegenoptions::LimitedDebugInfo ||
RD->hasAttr<StandaloneDebugAttr>())
return false;
@@ -2403,7 +2665,7 @@ static bool shouldOmitDefinition(codegenoptions::DebugInfoKind DebugKind,
// In constructor homing mode, only emit complete debug info for a class
// when its constructor is emitted.
- if ((DebugKind == codegenoptions::DebugInfoConstructor) &&
+ if ((DebugKind == llvm::codegenoptions::DebugInfoConstructor) &&
canUseCtorHoming(CXXDecl))
return true;
@@ -2430,10 +2692,25 @@ llvm::DIType *CGDebugInfo::CreateType(const RecordType *Ty) {
return T;
}
- return CreateTypeDefinition(Ty);
+ auto [Def, Pref] = CreateTypeDefinition(Ty);
+
+ return Pref ? Pref : Def;
}
-llvm::DIType *CGDebugInfo::CreateTypeDefinition(const RecordType *Ty) {
+llvm::DIType *CGDebugInfo::GetPreferredNameType(const CXXRecordDecl *RD,
+ llvm::DIFile *Unit) {
+ if (!RD)
+ return nullptr;
+
+ auto const *PNA = RD->getAttr<PreferredNameAttr>();
+ if (!PNA)
+ return nullptr;
+
+ return getOrCreateType(PNA->getTypedefType(), Unit);
+}
+
+std::pair<llvm::DIType *, llvm::DIType *>
+CGDebugInfo::CreateTypeDefinition(const RecordType *Ty) {
RecordDecl *RD = Ty->getDecl();
// Get overall information about the record type for the debug info.
@@ -2449,7 +2726,7 @@ llvm::DIType *CGDebugInfo::CreateTypeDefinition(const RecordType *Ty) {
const RecordDecl *D = RD->getDefinition();
if (!D || !D->isCompleteDefinition())
- return FwdDecl;
+ return {FwdDecl, nullptr};
if (const auto *CXXDecl = dyn_cast<CXXRecordDecl>(RD))
CollectContainingType(CXXDecl, FwdDecl);
@@ -2488,7 +2765,12 @@ llvm::DIType *CGDebugInfo::CreateTypeDefinition(const RecordType *Ty) {
llvm::MDNode::replaceWithPermanent(llvm::TempDICompositeType(FwdDecl));
RegionMap[Ty->getDecl()].reset(FwdDecl);
- return FwdDecl;
+
+ if (CGM.getCodeGenOpts().getDebuggerTuning() == llvm::DebuggerKind::LLDB)
+ if (auto *PrefDI = GetPreferredNameType(CXXDecl, DefUnit))
+ return {FwdDecl, PrefDI};
+
+ return {FwdDecl, nullptr};
}
llvm::DIType *CGDebugInfo::CreateType(const ObjCObjectType *Ty,
@@ -2612,7 +2894,7 @@ llvm::DIModule *CGDebugInfo::getOrCreateModuleRef(ASTSourceDescriptor Mod,
// clang::Module object, but it won't actually be built or imported; it will
// be textual.
if (CreateSkeletonCU && IsRootModule && Mod.getASTFile().empty() && M)
- assert(StringRef(M->Name).startswith(CGM.getLangOpts().ModuleName) &&
+ assert(StringRef(M->Name).starts_with(CGM.getLangOpts().ModuleName) &&
"clang module without ASTFile must be specified by -fmodule-name");
// Return a StringRef to the remapped Path.
@@ -2639,8 +2921,12 @@ llvm::DIModule *CGDebugInfo::getOrCreateModuleRef(ASTSourceDescriptor Mod,
llvm::DIBuilder DIB(CGM.getModule());
SmallString<0> PCM;
- if (!llvm::sys::path::is_absolute(Mod.getASTFile()))
- PCM = Mod.getPath();
+ if (!llvm::sys::path::is_absolute(Mod.getASTFile())) {
+ if (CGM.getHeaderSearchOpts().ModuleFileHomeIsCwd)
+ PCM = getCurrentDirname();
+ else
+ PCM = Mod.getPath();
+ }
llvm::sys::path::append(PCM, Mod.getASTFile());
DIB.createCompileUnit(
TheCU->getSourceLanguage(),
@@ -2801,6 +3087,9 @@ llvm::DIType *CGDebugInfo::CreateTypeDefinition(const ObjCInterfaceType *Ty,
else if (Field->getAccessControl() == ObjCIvarDecl::Public)
Flags = llvm::DINode::FlagPublic;
+ if (Field->isBitField())
+ Flags |= llvm::DINode::FlagBitField;
+
llvm::MDNode *PropertyNode = nullptr;
if (ObjCImplementationDecl *ImpD = ID->getImplementation()) {
if (ObjCPropertyImplDecl *PImpD =
@@ -2839,6 +3128,23 @@ llvm::DIType *CGDebugInfo::CreateTypeDefinition(const ObjCInterfaceType *Ty,
llvm::DIType *CGDebugInfo::CreateType(const VectorType *Ty,
llvm::DIFile *Unit) {
+ if (Ty->isExtVectorBoolType()) {
+ // Boolean ext_vector_type(N) are special because their real element type
+ // (bits of bit size) is not their Clang element type (_Bool of size byte).
+ // For now, we pretend the boolean vector were actually a vector of bytes
+ // (where each byte represents 8 bits of the actual vector).
+ // FIXME Debug info should actually represent this proper as a vector mask
+ // type.
+ auto &Ctx = CGM.getContext();
+ uint64_t Size = CGM.getContext().getTypeSize(Ty);
+ uint64_t NumVectorBytes = Size / Ctx.getCharWidth();
+
+ // Construct the vector of 'char' type.
+ QualType CharVecTy =
+ Ctx.getVectorType(Ctx.CharTy, NumVectorBytes, VectorKind::Generic);
+ return CreateType(CharVecTy->getAs<VectorType>(), Unit);
+ }
+
llvm::DIType *ElementTy = getOrCreateType(Ty->getElementType(), Unit);
int64_t Count = Ty->getNumElements();
@@ -3012,11 +3318,11 @@ llvm::DIType *CGDebugInfo::CreateType(const MemberPointerType *Ty,
Flags);
const FunctionProtoType *FPT =
- Ty->getPointeeType()->getAs<FunctionProtoType>();
+ Ty->getPointeeType()->castAs<FunctionProtoType>();
return DBuilder.createMemberPointerType(
getOrCreateInstanceMethodType(
CXXMethodDecl::getThisType(FPT, Ty->getMostRecentCXXRecordDecl()),
- FPT, U, false),
+ FPT, U),
ClassType, Size, /*Align=*/0, Flags);
}
@@ -3098,9 +3404,9 @@ llvm::DIType *CGDebugInfo::CreateTypeDefinition(const EnumType *Ty) {
unsigned Line = getLineNumber(ED->getLocation());
llvm::DIScope *EnumContext = getDeclContextDescriptor(ED);
llvm::DIType *ClassTy = getOrCreateType(ED->getIntegerType(), DefUnit);
- return DBuilder.createEnumerationType(EnumContext, ED->getName(), DefUnit,
- Line, Size, Align, EltArray, ClassTy,
- Identifier, ED->isScoped());
+ return DBuilder.createEnumerationType(
+ EnumContext, ED->getName(), DefUnit, Line, Size, Align, EltArray, ClassTy,
+ /*RunTimeLang=*/0, Identifier, ED->isScoped());
}
llvm::DIMacro *CGDebugInfo::CreateMacro(llvm::DIMacroFile *Parent,
@@ -3141,7 +3447,7 @@ static QualType UnwrapTypeForDebugInfo(QualType T, const ASTContext &C) {
T = cast<TypeOfExprType>(T)->getUnderlyingExpr()->getType();
break;
case Type::TypeOf:
- T = cast<TypeOfType>(T)->getUnderlyingType();
+ T = cast<TypeOfType>(T)->getUnmodifiedType();
break;
case Type::Decltype:
T = cast<DecltypeType>(T)->getUnderlyingType();
@@ -3152,9 +3458,15 @@ static QualType UnwrapTypeForDebugInfo(QualType T, const ASTContext &C) {
case Type::Attributed:
T = cast<AttributedType>(T)->getEquivalentType();
break;
+ case Type::BTFTagAttributed:
+ T = cast<BTFTagAttributedType>(T)->getWrappedType();
+ break;
case Type::Elaborated:
T = cast<ElaboratedType>(T)->getNamedType();
break;
+ case Type::Using:
+ T = cast<UsingType>(T)->getUnderlyingType();
+ break;
case Type::Paren:
T = cast<ParenType>(T)->getInnerType();
break;
@@ -3201,7 +3513,8 @@ void CGDebugInfo::completeTemplateDefinition(
}
void CGDebugInfo::completeUnusedClass(const CXXRecordDecl &D) {
- if (DebugKind <= codegenoptions::DebugLineTablesOnly)
+ if (DebugKind <= llvm::codegenoptions::DebugLineTablesOnly ||
+ D.isDynamicClass())
return;
completeClassData(&D);
@@ -3331,8 +3644,8 @@ llvm::DIType *CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile *Unit) {
case Type::Atomic:
return CreateType(cast<AtomicType>(Ty), Unit);
- case Type::ExtInt:
- return CreateType(cast<ExtIntType>(Ty));
+ case Type::BitInt:
+ return CreateType(cast<BitIntType>(Ty));
case Type::Pipe:
return CreateType(cast<PipeType>(Ty), Unit);
@@ -3341,10 +3654,12 @@ llvm::DIType *CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile *Unit) {
case Type::Auto:
case Type::Attributed:
+ case Type::BTFTagAttributed:
case Type::Adjusted:
case Type::Decayed:
case Type::DeducedTemplateSpecialization:
case Type::Elaborated:
+ case Type::Using:
case Type::Paren:
case Type::MacroQualified:
case Type::SubstTemplateTypeParm:
@@ -3413,7 +3728,11 @@ llvm::DICompositeType *CGDebugInfo::CreateLimitedType(const RecordType *Ty) {
return getOrCreateRecordFwdDecl(Ty, RDContext);
uint64_t Size = CGM.getContext().getTypeSize(Ty);
- auto Align = getDeclAlignIfRequired(D, CGM.getContext());
+ // __attribute__((aligned)) can increase or decrease alignment *except* on a
+ // struct or struct member, where it only increases alignment unless 'packed'
+ // is also specified. To handle this case, the `getTypeAlignIfRequired` needs
+ // to be used.
+ auto Align = getTypeAlignIfRequired(Ty, CGM.getContext());
SmallString<256> Identifier = getTypeIdentifier(Ty, CGM, TheCU);
@@ -3433,11 +3752,15 @@ llvm::DICompositeType *CGDebugInfo::CreateLimitedType(const RecordType *Ty) {
// Record exports it symbols to the containing structure.
if (CXXRD->isAnonymousStructOrUnion())
Flags |= llvm::DINode::FlagExportSymbols;
+
+ Flags |= getAccessFlag(CXXRD->getAccess(),
+ dyn_cast<CXXRecordDecl>(CXXRD->getDeclContext()));
}
+ llvm::DINodeArray Annotations = CollectBTFDeclTagAnnotations(D);
llvm::DICompositeType *RealDecl = DBuilder.createReplaceableCompositeType(
getTagForRecord(RD), RDName, RDContext, DefUnit, Line, 0, Size, Align,
- Flags, Identifier);
+ Flags, Identifier, Annotations);
// Elements of composite types usually have back to the type, creating
// uniquing cycles. Distinct nodes are more efficient.
@@ -3453,7 +3776,7 @@ llvm::DICompositeType *CGDebugInfo::CreateLimitedType(const RecordType *Ty) {
// them distinct if they are ODR-uniqued.
if (Identifier.empty())
break;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case llvm::dwarf::DW_TAG_structure_type:
case llvm::dwarf::DW_TAG_union_type:
@@ -3476,11 +3799,11 @@ llvm::DICompositeType *CGDebugInfo::CreateLimitedType(const RecordType *Ty) {
void CGDebugInfo::CollectContainingType(const CXXRecordDecl *RD,
llvm::DICompositeType *RealDecl) {
// A class's primary base or the class itself contains the vtable.
- llvm::DICompositeType *ContainingType = nullptr;
+ llvm::DIType *ContainingType = nullptr;
const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
if (const CXXRecordDecl *PBase = RL.getPrimaryBase()) {
// Seek non-virtual primary base root.
- while (1) {
+ while (true) {
const ASTRecordLayout &BRL = CGM.getContext().getASTRecordLayout(PBase);
const CXXRecordDecl *PBT = BRL.getPrimaryBase();
if (PBT && !BRL.isPrimaryBaseVirtual())
@@ -3488,9 +3811,8 @@ void CGDebugInfo::CollectContainingType(const CXXRecordDecl *RD,
else
break;
}
- ContainingType = cast<llvm::DICompositeType>(
- getOrCreateType(QualType(PBase->getTypeForDecl(), 0),
- getOrCreateFile(RD->getLocation())));
+ ContainingType = getOrCreateType(QualType(PBase->getTypeForDecl(), 0),
+ getOrCreateFile(RD->getLocation()));
} else if (RD->isDynamicClass())
ContainingType = RealDecl;
@@ -3525,17 +3847,18 @@ void CGDebugInfo::collectFunctionDeclProps(GlobalDecl GD, llvm::DIFile *Unit,
// No need to replicate the linkage name if it isn't different from the
// subprogram name, no need to have it at all unless coverage is enabled or
// debug is set to more than just line tables or extra debug info is needed.
- if (LinkageName == Name || (!CGM.getCodeGenOpts().EmitGcovArcs &&
- !CGM.getCodeGenOpts().EmitGcovNotes &&
- !CGM.getCodeGenOpts().DebugInfoForProfiling &&
- !CGM.getCodeGenOpts().PseudoProbeForProfiling &&
- DebugKind <= codegenoptions::DebugLineTablesOnly))
+ if (LinkageName == Name ||
+ (CGM.getCodeGenOpts().CoverageNotesFile.empty() &&
+ CGM.getCodeGenOpts().CoverageDataFile.empty() &&
+ !CGM.getCodeGenOpts().DebugInfoForProfiling &&
+ !CGM.getCodeGenOpts().PseudoProbeForProfiling &&
+ DebugKind <= llvm::codegenoptions::DebugLineTablesOnly))
LinkageName = StringRef();
// Emit the function scope in line tables only mode (if CodeView) to
// differentiate between function names.
if (CGM.getCodeGenOpts().hasReducedDebugInfo() ||
- (DebugKind == codegenoptions::DebugLineTablesOnly &&
+ (DebugKind == llvm::codegenoptions::DebugLineTablesOnly &&
CGM.getCodeGenOpts().EmitCodeView)) {
if (const NamespaceDecl *NSDecl =
dyn_cast_or_null<NamespaceDecl>(FD->getDeclContext()))
@@ -3572,7 +3895,7 @@ void CGDebugInfo::collectVarDeclProps(const VarDecl *VD, llvm::DIFile *&Unit,
QualType ET = CGM.getContext().getAsArrayType(T)->getElementType();
T = CGM.getContext().getConstantArrayType(ET, ConstVal, nullptr,
- ArrayType::Normal, 0);
+ ArraySizeModifier::Normal, 0);
}
Name = VD->getName();
@@ -3702,6 +4025,17 @@ llvm::DINode *CGDebugInfo::getDeclarationOrDefinition(const Decl *D) {
auto N = I->second;
if (auto *GVE = dyn_cast_or_null<llvm::DIGlobalVariableExpression>(N))
return GVE->getVariable();
+ return cast<llvm::DINode>(N);
+ }
+
+ // Search imported declaration cache if it is already defined
+ // as imported declaration.
+ auto IE = ImportedDeclCache.find(D->getCanonicalDecl());
+
+ if (IE != ImportedDeclCache.end()) {
+ auto N = IE->second;
+ if (auto *GVE = dyn_cast_or_null<llvm::DIImportedEntity>(N))
+ return cast<llvm::DINode>(GVE);
return dyn_cast_or_null<llvm::DINode>(N);
}
@@ -3716,7 +4050,7 @@ llvm::DINode *CGDebugInfo::getDeclarationOrDefinition(const Decl *D) {
}
llvm::DISubprogram *CGDebugInfo::getFunctionDeclaration(const Decl *D) {
- if (!D || DebugKind <= codegenoptions::DebugLineTablesOnly)
+ if (!D || DebugKind <= llvm::codegenoptions::DebugLineTablesOnly)
return nullptr;
const auto *FD = dyn_cast<FunctionDecl>(D);
@@ -3739,7 +4073,7 @@ llvm::DISubprogram *CGDebugInfo::getFunctionDeclaration(const Decl *D) {
return SP;
}
- for (auto NextFD : FD->redecls()) {
+ for (auto *NextFD : FD->redecls()) {
auto MI = SPCache.find(NextFD->getCanonicalDecl());
if (MI != SPCache.end()) {
auto *SP = dyn_cast_or_null<llvm::DISubprogram>(MI->second);
@@ -3753,7 +4087,7 @@ llvm::DISubprogram *CGDebugInfo::getFunctionDeclaration(const Decl *D) {
llvm::DISubprogram *CGDebugInfo::getObjCMethodDeclaration(
const Decl *D, llvm::DISubroutineType *FnType, unsigned LineNo,
llvm::DINode::DIFlags Flags, llvm::DISubprogram::DISPFlags SPFlags) {
- if (!D || DebugKind <= codegenoptions::DebugLineTablesOnly)
+ if (!D || DebugKind <= llvm::codegenoptions::DebugLineTablesOnly)
return nullptr;
const auto *OMD = dyn_cast<ObjCMethodDecl>(D);
@@ -3793,14 +4127,15 @@ llvm::DISubroutineType *CGDebugInfo::getOrCreateFunctionType(const Decl *D,
llvm::DIFile *F) {
// In CodeView, we emit the function types in line tables only because the
// only way to distinguish between functions is by display name and type.
- if (!D || (DebugKind <= codegenoptions::DebugLineTablesOnly &&
+ if (!D || (DebugKind <= llvm::codegenoptions::DebugLineTablesOnly &&
!CGM.getCodeGenOpts().EmitCodeView))
// Create fake but valid subroutine type. Otherwise -verify would fail, and
// subprogram DIE will miss DW_AT_decl_file and DW_AT_decl_line fields.
- return DBuilder.createSubroutineType(DBuilder.getOrCreateTypeArray(None));
+ return DBuilder.createSubroutineType(
+ DBuilder.getOrCreateTypeArray(std::nullopt));
if (const auto *Method = dyn_cast<CXXMethodDecl>(D))
- return getOrCreateMethodType(Method, F, false);
+ return getOrCreateMethodType(Method, F);
const auto *FTy = FnType->getAs<FunctionType>();
CallingConv CC = FTy ? FTy->getCallConv() : CallingConv::CC_C;
@@ -3861,6 +4196,20 @@ llvm::DISubroutineType *CGDebugInfo::getOrCreateFunctionType(const Decl *D,
return cast<llvm::DISubroutineType>(getOrCreateType(FnType, F));
}
+QualType
+CGDebugInfo::getFunctionType(const FunctionDecl *FD, QualType RetTy,
+ const SmallVectorImpl<const VarDecl *> &Args) {
+ CallingConv CC = CallingConv::CC_C;
+ if (FD)
+ if (const auto *SrcFnTy = FD->getType()->getAs<FunctionType>())
+ CC = SrcFnTy->getCallConv();
+ SmallVector<QualType, 16> ArgTypes;
+ for (const VarDecl *VD : Args)
+ ArgTypes.push_back(VD->getType());
+ return CGM.getContext().getFunctionType(RetTy, ArgTypes,
+ FunctionProtoType::ExtProtoInfo(CC));
+}
+
void CGDebugInfo::emitFunctionStart(GlobalDecl GD, SourceLocation Loc,
SourceLocation ScopeLoc, QualType FnType,
llvm::Function *Fn, bool CurFuncIsThunk) {
@@ -3909,11 +4258,15 @@ void CGDebugInfo::emitFunctionStart(GlobalDecl GD, SourceLocation Loc,
Flags |= llvm::DINode::FlagPrototyped;
}
- if (Name.startswith("\01"))
+ if (Name.starts_with("\01"))
Name = Name.substr(1);
+ assert((!D || !isa<VarDecl>(D) ||
+ GD.getDynamicInitKind() != DynamicInitKind::NoStub) &&
+ "Unexpected DynamicInitKind !");
+
if (!HasDecl || D->isImplicit() || D->hasAttr<ArtificialAttr>() ||
- (isa<VarDecl>(D) && GD.getDynamicInitKind() != DynamicInitKind::NoStub)) {
+ isa<VarDecl>(D) || isa<CapturedDecl>(D)) {
Flags |= llvm::DINode::FlagArtificial;
// Artificial functions should not silently reuse CurLoc.
CurLoc = SourceLocation();
@@ -3935,10 +4288,13 @@ void CGDebugInfo::emitFunctionStart(GlobalDecl GD, SourceLocation Loc,
unsigned ScopeLine = getLineNumber(ScopeLoc);
llvm::DISubroutineType *DIFnType = getOrCreateFunctionType(D, FnType, Unit);
llvm::DISubprogram *Decl = nullptr;
- if (D)
+ llvm::DINodeArray Annotations = nullptr;
+ if (D) {
Decl = isa<ObjCMethodDecl>(D)
? getObjCMethodDeclaration(D, DIFnType, LineNo, Flags, SPFlags)
: getFunctionDeclaration(D);
+ Annotations = CollectBTFDeclTagAnnotations(D);
+ }
// FIXME: The function declaration we're constructing here is mostly reusing
// declarations from CXXMethodDecl and not constructing new ones for arbitrary
@@ -3947,7 +4303,8 @@ void CGDebugInfo::emitFunctionStart(GlobalDecl GD, SourceLocation Loc,
// are emitted as CU level entities by the backend.
llvm::DISubprogram *SP = DBuilder.createFunction(
FDContext, Name, LinkageName, Unit, LineNo, DIFnType, ScopeLine,
- FlagsForDef, SPFlagsForDef, TParamsArray.get(), Decl);
+ FlagsForDef, SPFlagsForDef, TParamsArray.get(), Decl, nullptr,
+ Annotations);
Fn->setSubprogram(SP);
// We might get here with a VarDecl in the case we're generating
// code for the initialization of globals. Do not record these decls
@@ -4006,10 +4363,28 @@ void CGDebugInfo::EmitFunctionDecl(GlobalDecl GD, SourceLocation Loc,
if (CGM.getLangOpts().Optimize)
SPFlags |= llvm::DISubprogram::SPFlagOptimized;
+ llvm::DINodeArray Annotations = CollectBTFDeclTagAnnotations(D);
+ llvm::DISubroutineType *STy = getOrCreateFunctionType(D, FnType, Unit);
llvm::DISubprogram *SP = DBuilder.createFunction(
- FDContext, Name, LinkageName, Unit, LineNo,
- getOrCreateFunctionType(D, FnType, Unit), ScopeLine, Flags, SPFlags,
- TParamsArray.get(), getFunctionDeclaration(D));
+ FDContext, Name, LinkageName, Unit, LineNo, STy, ScopeLine, Flags,
+ SPFlags, TParamsArray.get(), nullptr, nullptr, Annotations);
+
+ // Preserve btf_decl_tag attributes for parameters of extern functions
+ // for BPF target. The parameters created in this loop are attached as
+ // DISubprogram's retainedNodes in the subsequent finalizeSubprogram call.
+ if (IsDeclForCallSite && CGM.getTarget().getTriple().isBPF()) {
+ if (auto *FD = dyn_cast<FunctionDecl>(D)) {
+ llvm::DITypeRefArray ParamTypes = STy->getTypeArray();
+ unsigned ArgNo = 1;
+ for (ParmVarDecl *PD : FD->parameters()) {
+ llvm::DINodeArray ParamAnnotations = CollectBTFDeclTagAnnotations(PD);
+ DBuilder.createParameterVariable(
+ SP, PD->getName(), ArgNo, Unit, LineNo, ParamTypes[ArgNo], true,
+ llvm::DINode::FlagZero, ParamAnnotations);
+ ++ArgNo;
+ }
+ }
+ }
if (IsDeclForCallSite)
Fn->setSubprogram(SP);
@@ -4028,17 +4403,11 @@ void CGDebugInfo::EmitFuncDeclForCallSite(llvm::CallBase *CallOrInvoke,
if (Func->getSubprogram())
return;
- // Do not emit a declaration subprogram for a builtin, a function with nodebug
- // attribute, or if call site info isn't required. Also, elide declarations
- // for functions with reserved names, as call site-related features aren't
- // interesting in this case (& also, the compiler may emit calls to these
- // functions without debug locations, which makes the verifier complain).
- if (CalleeDecl->getBuiltinID() != 0 || CalleeDecl->hasAttr<NoDebugAttr>() ||
+ // Do not emit a declaration subprogram for a function with nodebug
+ // attribute, or if call site info isn't required.
+ if (CalleeDecl->hasAttr<NoDebugAttr>() ||
getCallSiteRelatedAttrs() == llvm::DINode::FlagZero)
return;
- if (CalleeDecl->isReserved(CGM.getLangOpts()) !=
- ReservedIdentifierStatus::NotReserved)
- return;
// If there is no DISubprogram attached to the function being called,
// create the one describing the function in order to have complete
@@ -4091,14 +4460,14 @@ void CGDebugInfo::CreateLexicalBlock(SourceLocation Loc) {
}
void CGDebugInfo::AppendAddressSpaceXDeref(
- unsigned AddressSpace, SmallVectorImpl<int64_t> &Expr) const {
- Optional<unsigned> DWARFAddressSpace =
+ unsigned AddressSpace, SmallVectorImpl<uint64_t> &Expr) const {
+ std::optional<unsigned> DWARFAddressSpace =
CGM.getTarget().getDWARFAddressSpace(AddressSpace);
if (!DWARFAddressSpace)
return;
Expr.push_back(llvm::dwarf::DW_OP_constu);
- Expr.push_back(DWARFAddressSpace.getValue());
+ Expr.push_back(*DWARFAddressSpace);
Expr.push_back(llvm::dwarf::DW_OP_swap);
Expr.push_back(llvm::dwarf::DW_OP_xderef);
}
@@ -4113,7 +4482,7 @@ void CGDebugInfo::EmitLexicalBlockStart(CGBuilderTy &Builder,
CGM.getLLVMContext(), getLineNumber(Loc), getColumnNumber(Loc),
LexicalBlockStack.back(), CurInlinedAt));
- if (DebugKind <= codegenoptions::DebugLineTablesOnly)
+ if (DebugKind <= llvm::codegenoptions::DebugLineTablesOnly)
return;
// Create a new lexical block and push it on the stack.
@@ -4127,7 +4496,7 @@ void CGDebugInfo::EmitLexicalBlockEnd(CGBuilderTy &Builder,
// Provide an entry in the line table for the end of the block.
EmitLocation(Builder, Loc);
- if (DebugKind <= codegenoptions::DebugLineTablesOnly)
+ if (DebugKind <= llvm::codegenoptions::DebugLineTablesOnly)
return;
LexicalBlockStack.pop_back();
@@ -4189,7 +4558,7 @@ CGDebugInfo::EmitTypeForVarWithBlocksAttr(const VarDecl *VD,
CharUnits Align = CGM.getContext().getDeclAlign(VD);
if (Align > CGM.getContext().toCharUnitsFromBits(
- CGM.getTarget().getPointerAlign(0))) {
+ CGM.getTarget().getPointerAlign(LangAS::Default))) {
CharUnits FieldOffsetInBytes =
CGM.getContext().toCharUnitsFromBits(FieldOffset);
CharUnits AlignedOffsetInBytes = FieldOffsetInBytes.alignTo(Align);
@@ -4198,7 +4567,7 @@ CGDebugInfo::EmitTypeForVarWithBlocksAttr(const VarDecl *VD,
if (NumPaddingBytes.isPositive()) {
llvm::APInt pad(32, NumPaddingBytes.getQuantity());
FType = CGM.getContext().getConstantArrayType(
- CGM.getContext().CharTy, pad, nullptr, ArrayType::Normal, 0);
+ CGM.getContext().CharTy, pad, nullptr, ArraySizeModifier::Normal, 0);
EltTys.push_back(CreateMemberType(Unit, FType, "", &FieldOffset));
}
}
@@ -4223,7 +4592,7 @@ CGDebugInfo::EmitTypeForVarWithBlocksAttr(const VarDecl *VD,
llvm::DILocalVariable *CGDebugInfo::EmitDeclare(const VarDecl *VD,
llvm::Value *Storage,
- llvm::Optional<unsigned> ArgNo,
+ std::optional<unsigned> ArgNo,
CGBuilderTy &Builder,
const bool UsePointerValue) {
assert(CGM.getCodeGenOpts().hasReducedDebugInfo());
@@ -4256,21 +4625,21 @@ llvm::DILocalVariable *CGDebugInfo::EmitDeclare(const VarDecl *VD,
Line = getLineNumber(VD->getLocation());
Column = getColumnNumber(VD->getLocation());
}
- SmallVector<int64_t, 13> Expr;
+ SmallVector<uint64_t, 13> Expr;
llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero;
if (VD->isImplicit())
Flags |= llvm::DINode::FlagArtificial;
auto Align = getDeclAlignIfRequired(VD, CGM.getContext());
- unsigned AddressSpace = CGM.getContext().getTargetAddressSpace(VD->getType());
+ unsigned AddressSpace = CGM.getTypes().getTargetAddressSpace(VD->getType());
AppendAddressSpaceXDeref(AddressSpace, Expr);
// If this is implicit parameter of CXXThis or ObjCSelf kind, then give it an
// object pointer flag.
if (const auto *IPD = dyn_cast<ImplicitParamDecl>(VD)) {
- if (IPD->getParameterKind() == ImplicitParamDecl::CXXThis ||
- IPD->getParameterKind() == ImplicitParamDecl::ObjCSelf)
+ if (IPD->getParameterKind() == ImplicitParamKind::CXXThis ||
+ IPD->getParameterKind() == ImplicitParamKind::ObjCSelf)
Flags |= llvm::DINode::FlagObjectPointer;
}
@@ -4289,7 +4658,7 @@ llvm::DILocalVariable *CGDebugInfo::EmitDeclare(const VarDecl *VD,
Expr.push_back(llvm::dwarf::DW_OP_plus_uconst);
// offset of __forwarding field
offset = CGM.getContext().toCharUnitsFromBits(
- CGM.getTarget().getPointerWidth(0));
+ CGM.getTarget().getPointerWidth(LangAS::Default));
Expr.push_back(offset.getQuantity());
Expr.push_back(llvm::dwarf::DW_OP_deref);
Expr.push_back(llvm::dwarf::DW_OP_plus_uconst);
@@ -4337,8 +4706,7 @@ llvm::DILocalVariable *CGDebugInfo::EmitDeclare(const VarDecl *VD,
// Use DW_OP_deref to tell the debugger to load the pointer and treat it as
// the address of the variable.
if (UsePointerValue) {
- assert(std::find(Expr.begin(), Expr.end(), llvm::dwarf::DW_OP_deref) ==
- Expr.end() &&
+ assert(!llvm::is_contained(Expr, llvm::dwarf::DW_OP_deref) &&
"Debug info already contains DW_OP_deref.");
Expr.push_back(llvm::dwarf::DW_OP_deref);
}
@@ -4346,8 +4714,10 @@ llvm::DILocalVariable *CGDebugInfo::EmitDeclare(const VarDecl *VD,
// Create the descriptor for the variable.
llvm::DILocalVariable *D = nullptr;
if (ArgNo) {
+ llvm::DINodeArray Annotations = CollectBTFDeclTagAnnotations(VD);
D = DBuilder.createParameterVariable(Scope, Name, *ArgNo, Unit, Line, Ty,
- CGM.getLangOpts().Optimize, Flags);
+ CGM.getLangOpts().Optimize, Flags,
+ Annotations);
} else {
// For normal local variable, we will try to find out whether 'VD' is the
// copy parameter of coroutine.
@@ -4400,12 +4770,147 @@ llvm::DILocalVariable *CGDebugInfo::EmitDeclare(const VarDecl *VD,
return D;
}
+llvm::DIType *CGDebugInfo::CreateBindingDeclType(const BindingDecl *BD) {
+ llvm::DIFile *Unit = getOrCreateFile(BD->getLocation());
+
+ // If the declaration is bound to a bitfield struct field, its type may have a
+ // size that is different from its deduced declaration type's.
+ if (const MemberExpr *ME = dyn_cast<MemberExpr>(BD->getBinding())) {
+ if (const FieldDecl *FD = dyn_cast<FieldDecl>(ME->getMemberDecl())) {
+ if (FD->isBitField()) {
+ ASTContext &Context = CGM.getContext();
+ const CGRecordLayout &RL =
+ CGM.getTypes().getCGRecordLayout(FD->getParent());
+ const CGBitFieldInfo &Info = RL.getBitFieldInfo(FD);
+
+ // Find an integer type with the same bitwidth as the bitfield size. If
+ // no suitable type is present in the target, give up on producing debug
+ // information as it would be wrong. It is certainly possible to produce
+ // correct debug info, but the logic isn't currently implemented.
+ uint64_t BitfieldSizeInBits = Info.Size;
+ QualType IntTy =
+ Context.getIntTypeForBitwidth(BitfieldSizeInBits, Info.IsSigned);
+ if (IntTy.isNull())
+ return nullptr;
+ Qualifiers Quals = BD->getType().getQualifiers();
+ QualType FinalTy = Context.getQualifiedType(IntTy, Quals);
+ llvm::DIType *Ty = getOrCreateType(FinalTy, Unit);
+ assert(Ty);
+ return Ty;
+ }
+ }
+ }
+
+ return getOrCreateType(BD->getType(), Unit);
+}
+
+llvm::DILocalVariable *CGDebugInfo::EmitDeclare(const BindingDecl *BD,
+ llvm::Value *Storage,
+ std::optional<unsigned> ArgNo,
+ CGBuilderTy &Builder,
+ const bool UsePointerValue) {
+ assert(CGM.getCodeGenOpts().hasReducedDebugInfo());
+ assert(!LexicalBlockStack.empty() && "Region stack mismatch, stack empty!");
+ if (BD->hasAttr<NoDebugAttr>())
+ return nullptr;
+
+ // Skip the tuple like case, we don't handle that here
+ if (isa<DeclRefExpr>(BD->getBinding()))
+ return nullptr;
+
+ llvm::DIType *Ty = CreateBindingDeclType(BD);
+
+ // If there is no debug info for this type then do not emit debug info
+ // for this variable.
+ if (!Ty)
+ return nullptr;
+
+ auto Align = getDeclAlignIfRequired(BD, CGM.getContext());
+ unsigned AddressSpace = CGM.getTypes().getTargetAddressSpace(BD->getType());
+
+ SmallVector<uint64_t, 3> Expr;
+ AppendAddressSpaceXDeref(AddressSpace, Expr);
+
+ // Clang stores the sret pointer provided by the caller in a static alloca.
+ // Use DW_OP_deref to tell the debugger to load the pointer and treat it as
+ // the address of the variable.
+ if (UsePointerValue) {
+ assert(!llvm::is_contained(Expr, llvm::dwarf::DW_OP_deref) &&
+ "Debug info already contains DW_OP_deref.");
+ Expr.push_back(llvm::dwarf::DW_OP_deref);
+ }
+
+ unsigned Line = getLineNumber(BD->getLocation());
+ unsigned Column = getColumnNumber(BD->getLocation());
+ StringRef Name = BD->getName();
+ auto *Scope = cast<llvm::DIScope>(LexicalBlockStack.back());
+ llvm::DIFile *Unit = getOrCreateFile(BD->getLocation());
+ // Create the descriptor for the variable.
+ llvm::DILocalVariable *D = DBuilder.createAutoVariable(
+ Scope, Name, Unit, Line, Ty, CGM.getLangOpts().Optimize,
+ llvm::DINode::FlagZero, Align);
+
+ if (const MemberExpr *ME = dyn_cast<MemberExpr>(BD->getBinding())) {
+ if (const FieldDecl *FD = dyn_cast<FieldDecl>(ME->getMemberDecl())) {
+ const unsigned fieldIndex = FD->getFieldIndex();
+ const clang::CXXRecordDecl *parent =
+ (const CXXRecordDecl *)FD->getParent();
+ const ASTRecordLayout &layout =
+ CGM.getContext().getASTRecordLayout(parent);
+ const uint64_t fieldOffset = layout.getFieldOffset(fieldIndex);
+
+ if (fieldOffset != 0) {
+ // Currently if the field offset is not a multiple of byte, the produced
+ // location would not be accurate. Therefore give up.
+ if (fieldOffset % CGM.getContext().getCharWidth() != 0)
+ return nullptr;
+
+ Expr.push_back(llvm::dwarf::DW_OP_plus_uconst);
+ Expr.push_back(
+ CGM.getContext().toCharUnitsFromBits(fieldOffset).getQuantity());
+ }
+ }
+ } else if (const ArraySubscriptExpr *ASE =
+ dyn_cast<ArraySubscriptExpr>(BD->getBinding())) {
+ if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(ASE->getIdx())) {
+ const uint64_t value = IL->getValue().getZExtValue();
+ const uint64_t typeSize = CGM.getContext().getTypeSize(BD->getType());
+
+ if (value != 0) {
+ Expr.push_back(llvm::dwarf::DW_OP_plus_uconst);
+ Expr.push_back(CGM.getContext()
+ .toCharUnitsFromBits(value * typeSize)
+ .getQuantity());
+ }
+ }
+ }
+
+ // Insert an llvm.dbg.declare into the current block.
+ DBuilder.insertDeclare(Storage, D, DBuilder.createExpression(Expr),
+ llvm::DILocation::get(CGM.getLLVMContext(), Line,
+ Column, Scope, CurInlinedAt),
+ Builder.GetInsertBlock());
+
+ return D;
+}
+
llvm::DILocalVariable *
CGDebugInfo::EmitDeclareOfAutoVariable(const VarDecl *VD, llvm::Value *Storage,
CGBuilderTy &Builder,
const bool UsePointerValue) {
assert(CGM.getCodeGenOpts().hasReducedDebugInfo());
- return EmitDeclare(VD, Storage, llvm::None, Builder, UsePointerValue);
+
+ if (auto *DD = dyn_cast<DecompositionDecl>(VD)) {
+ for (auto *B : DD->bindings()) {
+ EmitDeclare(B, Storage, std::nullopt, Builder,
+ VD->getType()->isReferenceType());
+ }
+ // Don't emit an llvm.dbg.declare for the composite storage as it doesn't
+ // correspond to a user variable.
+ return nullptr;
+ }
+
+ return EmitDeclare(VD, Storage, std::nullopt, Builder, UsePointerValue);
}
void CGDebugInfo::EmitLabel(const LabelDecl *D, CGBuilderTy &Builder) {
@@ -4467,7 +4972,7 @@ void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(
// Self is passed along as an implicit non-arg variable in a
// block. Mark it as the object pointer.
if (const auto *IPD = dyn_cast<ImplicitParamDecl>(VD))
- if (IPD->getParameterKind() == ImplicitParamDecl::ObjCSelf)
+ if (IPD->getParameterKind() == ImplicitParamKind::ObjCSelf)
Ty = CreateSelfType(VD->getType(), Ty);
// Get location information.
@@ -4481,7 +4986,7 @@ void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(
target.getStructLayout(blockInfo.StructureType)
->getElementOffset(blockInfo.getCapture(VD).getIndex()));
- SmallVector<int64_t, 9> addr;
+ SmallVector<uint64_t, 9> addr;
addr.push_back(llvm::dwarf::DW_OP_deref);
addr.push_back(llvm::dwarf::DW_OP_plus_uconst);
addr.push_back(offset.getQuantity());
@@ -4517,9 +5022,10 @@ void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(
llvm::DILocalVariable *
CGDebugInfo::EmitDeclareOfArgVariable(const VarDecl *VD, llvm::Value *AI,
- unsigned ArgNo, CGBuilderTy &Builder) {
+ unsigned ArgNo, CGBuilderTy &Builder,
+ bool UsePointerValue) {
assert(CGM.getCodeGenOpts().hasReducedDebugInfo());
- return EmitDeclare(VD, AI, ArgNo, Builder);
+ return EmitDeclare(VD, AI, ArgNo, Builder, UsePointerValue);
}
namespace {
@@ -4653,7 +5159,7 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
llvm::DIType *fieldType;
if (capture->isByRef()) {
TypeInfo PtrInfo = C.getTypeInfo(C.VoidPtrTy);
- auto Align = PtrInfo.AlignIsRequired ? PtrInfo.Align : 0;
+ auto Align = PtrInfo.isAlignRequired() ? PtrInfo.Align : 0;
// FIXME: This recomputes the layout of the BlockByRefWrapper.
uint64_t xoffset;
fieldType =
@@ -4740,14 +5246,231 @@ llvm::DIGlobalVariableExpression *CGDebugInfo::CollectAnonRecordDecls(
return GVE;
}
+static bool ReferencesAnonymousEntity(ArrayRef<TemplateArgument> Args);
+static bool ReferencesAnonymousEntity(RecordType *RT) {
+ // Unnamed classes/lambdas can't be reconstituted due to a lack of column
+ // info we produce in the DWARF, so we can't get Clang's full name back.
+ // But so long as it's not one of those, it doesn't matter if some sub-type
+ // of the record (a template parameter) can't be reconstituted - because the
+ // un-reconstitutable type itself will carry its own name.
+ const auto *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
+ if (!RD)
+ return false;
+ if (!RD->getIdentifier())
+ return true;
+ auto *TSpecial = dyn_cast<ClassTemplateSpecializationDecl>(RD);
+ if (!TSpecial)
+ return false;
+ return ReferencesAnonymousEntity(TSpecial->getTemplateArgs().asArray());
+}
+static bool ReferencesAnonymousEntity(ArrayRef<TemplateArgument> Args) {
+ return llvm::any_of(Args, [&](const TemplateArgument &TA) {
+ switch (TA.getKind()) {
+ case TemplateArgument::Pack:
+ return ReferencesAnonymousEntity(TA.getPackAsArray());
+ case TemplateArgument::Type: {
+ struct ReferencesAnonymous
+ : public RecursiveASTVisitor<ReferencesAnonymous> {
+ bool RefAnon = false;
+ bool VisitRecordType(RecordType *RT) {
+ if (ReferencesAnonymousEntity(RT)) {
+ RefAnon = true;
+ return false;
+ }
+ return true;
+ }
+ };
+ ReferencesAnonymous RT;
+ RT.TraverseType(TA.getAsType());
+ if (RT.RefAnon)
+ return true;
+ break;
+ }
+ default:
+ break;
+ }
+ return false;
+ });
+}
+namespace {
+struct ReconstitutableType : public RecursiveASTVisitor<ReconstitutableType> {
+ bool Reconstitutable = true;
+ bool VisitVectorType(VectorType *FT) {
+ Reconstitutable = false;
+ return false;
+ }
+ bool VisitAtomicType(AtomicType *FT) {
+ Reconstitutable = false;
+ return false;
+ }
+ bool VisitType(Type *T) {
+ // _BitInt(N) isn't reconstitutable because the bit width isn't encoded in
+ // the DWARF, only the byte width.
+ if (T->isBitIntType()) {
+ Reconstitutable = false;
+ return false;
+ }
+ return true;
+ }
+ bool TraverseEnumType(EnumType *ET) {
+ // Unnamed enums can't be reconstituted due to a lack of column info we
+ // produce in the DWARF, so we can't get Clang's full name back.
+ if (const auto *ED = dyn_cast<EnumDecl>(ET->getDecl())) {
+ if (!ED->getIdentifier()) {
+ Reconstitutable = false;
+ return false;
+ }
+ if (!ED->isExternallyVisible()) {
+ Reconstitutable = false;
+ return false;
+ }
+ }
+ return true;
+ }
+ bool VisitFunctionProtoType(FunctionProtoType *FT) {
+ // noexcept is not encoded in DWARF, so the reversi
+ Reconstitutable &= !isNoexceptExceptionSpec(FT->getExceptionSpecType());
+ Reconstitutable &= !FT->getNoReturnAttr();
+ return Reconstitutable;
+ }
+ bool VisitRecordType(RecordType *RT) {
+ if (ReferencesAnonymousEntity(RT)) {
+ Reconstitutable = false;
+ return false;
+ }
+ return true;
+ }
+};
+} // anonymous namespace
+
+// Test whether a type name could be rebuilt from emitted debug info.
+static bool IsReconstitutableType(QualType QT) {
+ ReconstitutableType T;
+ T.TraverseType(QT);
+ return T.Reconstitutable;
+}
+
std::string CGDebugInfo::GetName(const Decl *D, bool Qualified) const {
std::string Name;
llvm::raw_string_ostream OS(Name);
- if (const NamedDecl *ND = dyn_cast<NamedDecl>(D)) {
- PrintingPolicy PP = getPrintingPolicy();
- PP.PrintCanonicalTypes = true;
- PP.SuppressInlineNamespace = false;
+ const NamedDecl *ND = dyn_cast<NamedDecl>(D);
+ if (!ND)
+ return Name;
+ llvm::codegenoptions::DebugTemplateNamesKind TemplateNamesKind =
+ CGM.getCodeGenOpts().getDebugSimpleTemplateNames();
+
+ if (!CGM.getCodeGenOpts().hasReducedDebugInfo())
+ TemplateNamesKind = llvm::codegenoptions::DebugTemplateNamesKind::Full;
+
+ std::optional<TemplateArgs> Args;
+
+ bool IsOperatorOverload = false; // isa<CXXConversionDecl>(ND);
+ if (auto *RD = dyn_cast<CXXRecordDecl>(ND)) {
+ Args = GetTemplateArgs(RD);
+ } else if (auto *FD = dyn_cast<FunctionDecl>(ND)) {
+ Args = GetTemplateArgs(FD);
+ auto NameKind = ND->getDeclName().getNameKind();
+ IsOperatorOverload |=
+ NameKind == DeclarationName::CXXOperatorName ||
+ NameKind == DeclarationName::CXXConversionFunctionName;
+ } else if (auto *VD = dyn_cast<VarDecl>(ND)) {
+ Args = GetTemplateArgs(VD);
+ }
+ std::function<bool(ArrayRef<TemplateArgument>)> HasReconstitutableArgs =
+ [&](ArrayRef<TemplateArgument> Args) {
+ return llvm::all_of(Args, [&](const TemplateArgument &TA) {
+ switch (TA.getKind()) {
+ case TemplateArgument::Template:
+ // Easy to reconstitute - the value of the parameter in the debug
+ // info is the string name of the template. (so the template name
+ // itself won't benefit from any name rebuilding, but that's a
+ // representational limitation - maybe DWARF could be
+ // changed/improved to use some more structural representation)
+ return true;
+ case TemplateArgument::Declaration:
+ // Reference and pointer non-type template parameters point to
+ // variables, functions, etc and their value is, at best (for
+ // variables) represented as an address - not a reference to the
+ // DWARF describing the variable/function/etc. This makes it hard,
+ // possibly impossible to rebuild the original name - looking up the
+ // address in the executable file's symbol table would be needed.
+ return false;
+ case TemplateArgument::NullPtr:
+ // These could be rebuilt, but figured they're close enough to the
+ // declaration case, and not worth rebuilding.
+ return false;
+ case TemplateArgument::Pack:
+ // A pack is invalid if any of the elements of the pack are invalid.
+ return HasReconstitutableArgs(TA.getPackAsArray());
+ case TemplateArgument::Integral:
+ // Larger integers get encoded as DWARF blocks which are a bit
+ // harder to parse back into a large integer, etc - so punting on
+ // this for now. Re-parsing the integers back into APInt is probably
+ // feasible some day.
+ return TA.getAsIntegral().getBitWidth() <= 64 &&
+ IsReconstitutableType(TA.getIntegralType());
+ case TemplateArgument::StructuralValue:
+ return false;
+ case TemplateArgument::Type:
+ return IsReconstitutableType(TA.getAsType());
+ default:
+ llvm_unreachable("Other, unresolved, template arguments should "
+ "not be seen here");
+ }
+ });
+ };
+ // A conversion operator presents complications/ambiguity if there's a
+ // conversion to class template that is itself a template, eg:
+ // template<typename T>
+ // operator ns::t1<T, int>();
+ // This should be named, eg: "operator ns::t1<float, int><float>"
+ // (ignoring clang bug that means this is currently "operator t1<float>")
+ // but if the arguments were stripped, the consumer couldn't differentiate
+ // whether the template argument list for the conversion type was the
+ // function's argument list (& no reconstitution was needed) or not.
+ // This could be handled if reconstitutable names had a separate attribute
+ // annotating them as such - this would remove the ambiguity.
+ //
+ // Alternatively the template argument list could be parsed enough to check
+ // whether there's one list or two, then compare that with the DWARF
+ // description of the return type and the template argument lists to determine
+ // how many lists there should be and if one is missing it could be assumed(?)
+ // to be the function's template argument list & then be rebuilt.
+ //
+ // Other operator overloads that aren't conversion operators could be
+ // reconstituted but would require a bit more nuance about detecting the
+ // difference between these different operators during that rebuilding.
+ bool Reconstitutable =
+ Args && HasReconstitutableArgs(Args->Args) && !IsOperatorOverload;
+
+ PrintingPolicy PP = getPrintingPolicy();
+
+ if (TemplateNamesKind == llvm::codegenoptions::DebugTemplateNamesKind::Full ||
+ !Reconstitutable) {
ND->getNameForDiagnostic(OS, PP, Qualified);
+ } else {
+ bool Mangled = TemplateNamesKind ==
+ llvm::codegenoptions::DebugTemplateNamesKind::Mangled;
+ // check if it's a template
+ if (Mangled)
+ OS << "_STN|";
+
+ OS << ND->getDeclName();
+ std::string EncodedOriginalName;
+ llvm::raw_string_ostream EncodedOriginalNameOS(EncodedOriginalName);
+ EncodedOriginalNameOS << ND->getDeclName();
+
+ if (Mangled) {
+ OS << "|";
+ printTemplateArgumentList(OS, Args->Args, PP);
+ printTemplateArgumentList(EncodedOriginalNameOS, Args->Args, PP);
+#ifndef NDEBUG
+ std::string CanonicalOriginalName;
+ llvm::raw_string_ostream OriginalOS(CanonicalOriginalName);
+ ND->getNameForDiagnostic(OriginalOS, PP, Qualified);
+ assert(EncodedOriginalNameOS.str() == OriginalOS.str());
+#endif
+ }
}
return Name;
}
@@ -4794,9 +5517,8 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
} else {
auto Align = getDeclAlignIfRequired(D, CGM.getContext());
- SmallVector<int64_t, 4> Expr;
- unsigned AddressSpace =
- CGM.getContext().getTargetAddressSpace(D->getType());
+ SmallVector<uint64_t, 4> Expr;
+ unsigned AddressSpace = CGM.getTypes().getTargetAddressSpace(D->getType());
if (CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) {
if (D->hasAttr<CUDASharedAttr>())
AddressSpace =
@@ -4807,12 +5529,13 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
}
AppendAddressSpaceXDeref(AddressSpace, Expr);
+ llvm::DINodeArray Annotations = CollectBTFDeclTagAnnotations(D);
GVE = DBuilder.createGlobalVariableExpression(
DContext, DeclName, LinkageName, Unit, LineNo, getOrCreateType(T, Unit),
Var->hasLocalLinkage(), true,
Expr.empty() ? nullptr : DBuilder.createExpression(Expr),
getOrCreateStaticDataMemberDeclarationOrNull(D), TemplateParameters,
- Align);
+ Align, Annotations);
Var->addDebugInfo(GVE);
}
DeclCache[D->getCanonicalDecl()].reset(GVE);
@@ -4878,17 +5601,8 @@ void CGDebugInfo::EmitGlobalVariable(const ValueDecl *VD, const APValue &Init) {
auto &GV = DeclCache[VD];
if (GV)
return;
- llvm::DIExpression *InitExpr = nullptr;
- if (CGM.getContext().getTypeSize(VD->getType()) <= 64) {
- // FIXME: Add a representation for integer constants wider than 64 bits.
- if (Init.isInt())
- InitExpr =
- DBuilder.createConstantValueExpression(Init.getInt().getExtValue());
- else if (Init.isFloat())
- InitExpr = DBuilder.createConstantValueExpression(
- Init.getFloat().bitcastToAPInt().getZExtValue());
- }
+ llvm::DIExpression *InitExpr = createConstantValueExpression(VD, Init);
llvm::MDTuple *TemplateParameters = nullptr;
if (isa<VarTemplateSpecializationDecl>(VD))
@@ -4922,6 +5636,62 @@ void CGDebugInfo::EmitExternalVariable(llvm::GlobalVariable *Var,
Var->addDebugInfo(GVE);
}
+void CGDebugInfo::EmitGlobalAlias(const llvm::GlobalValue *GV,
+ const GlobalDecl GD) {
+
+ assert(GV);
+
+ if (!CGM.getCodeGenOpts().hasReducedDebugInfo())
+ return;
+
+ const auto *D = cast<ValueDecl>(GD.getDecl());
+ if (D->hasAttr<NoDebugAttr>())
+ return;
+
+ auto AliaseeDecl = CGM.getMangledNameDecl(GV->getName());
+ llvm::DINode *DI;
+
+ if (!AliaseeDecl)
+ // FIXME: Aliasee not declared yet - possibly declared later
+ // For example,
+ //
+ // 1 extern int newname __attribute__((alias("oldname")));
+ // 2 int oldname = 1;
+ //
+ // No debug info would be generated for 'newname' in this case.
+ //
+ // Fix compiler to generate "newname" as imported_declaration
+ // pointing to the DIE of "oldname".
+ return;
+ if (!(DI = getDeclarationOrDefinition(
+ AliaseeDecl.getCanonicalDecl().getDecl())))
+ return;
+
+ llvm::DIScope *DContext = getDeclContextDescriptor(D);
+ auto Loc = D->getLocation();
+
+ llvm::DIImportedEntity *ImportDI = DBuilder.createImportedDeclaration(
+ DContext, DI, getOrCreateFile(Loc), getLineNumber(Loc), D->getName());
+
+ // Record this DIE in the cache for nested declaration reference.
+ ImportedDeclCache[GD.getCanonicalDecl().getDecl()].reset(ImportDI);
+}
+
+void CGDebugInfo::AddStringLiteralDebugInfo(llvm::GlobalVariable *GV,
+ const StringLiteral *S) {
+ SourceLocation Loc = S->getStrTokenLoc(0);
+ PresumedLoc PLoc = CGM.getContext().getSourceManager().getPresumedLoc(Loc);
+ if (!PLoc.isValid())
+ return;
+
+ llvm::DIFile *File = getOrCreateFile(Loc);
+ llvm::DIGlobalVariableExpression *Debug =
+ DBuilder.createGlobalVariableExpression(
+ nullptr, StringRef(), StringRef(), getOrCreateFile(Loc),
+ getLineNumber(Loc), getOrCreateType(S->getType(), File), true);
+ GV->addDebugInfo(Debug);
+}
+
llvm::DIScope *CGDebugInfo::getCurrentContextDescriptor(const Decl *D) {
if (!LexicalBlockStack.empty())
return LexicalBlockStack.back();
@@ -5151,8 +5921,9 @@ llvm::DebugLoc CGDebugInfo::SourceLocToDebugLoc(SourceLocation Loc) {
llvm::DINode::DIFlags CGDebugInfo::getCallSiteRelatedAttrs() const {
// Call site-related attributes are only useful in optimized programs, and
// when there's a possibility of debugging backtraces.
- if (!CGM.getLangOpts().Optimize || DebugKind == codegenoptions::NoDebugInfo ||
- DebugKind == codegenoptions::LocTrackingOnly)
+ if (!CGM.getLangOpts().Optimize ||
+ DebugKind == llvm::codegenoptions::NoDebugInfo ||
+ DebugKind == llvm::codegenoptions::LocTrackingOnly)
return llvm::DINode::FlagZero;
// Call site-related attributes are available in DWARF v5. Some debuggers,
@@ -5168,3 +5939,32 @@ llvm::DINode::DIFlags CGDebugInfo::getCallSiteRelatedAttrs() const {
return llvm::DINode::FlagAllCallsDescribed;
}
+
+llvm::DIExpression *
+CGDebugInfo::createConstantValueExpression(const clang::ValueDecl *VD,
+ const APValue &Val) {
+ // FIXME: Add a representation for integer constants wider than 64 bits.
+ if (CGM.getContext().getTypeSize(VD->getType()) > 64)
+ return nullptr;
+
+ if (Val.isFloat())
+ return DBuilder.createConstantValueExpression(
+ Val.getFloat().bitcastToAPInt().getZExtValue());
+
+ if (!Val.isInt())
+ return nullptr;
+
+ llvm::APSInt const &ValInt = Val.getInt();
+ std::optional<uint64_t> ValIntOpt;
+ if (ValInt.isUnsigned())
+ ValIntOpt = ValInt.tryZExtValue();
+ else if (auto tmp = ValInt.trySExtValue())
+ // Transform a signed optional to unsigned optional. When cpp 23 comes,
+ // use std::optional::transform
+ ValIntOpt = static_cast<uint64_t>(*tmp);
+
+ if (ValIntOpt)
+ return DBuilder.createConstantValueExpression(ValIntOpt.value());
+
+ return nullptr;
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.h b/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.h
index b01165f85a6c..7b60e94555d0 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.h
@@ -25,11 +25,11 @@
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/IR/DIBuilder.h"
#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Support/Allocator.h"
+#include <optional>
namespace llvm {
class MDNode;
@@ -40,7 +40,6 @@ class ClassTemplateSpecializationDecl;
class GlobalDecl;
class ModuleMap;
class ObjCInterfaceDecl;
-class ObjCIvarDecl;
class UsingDecl;
class VarDecl;
enum class DynamicInitKind : unsigned;
@@ -57,7 +56,7 @@ class CGDebugInfo {
friend class ApplyDebugLocation;
friend class SaveAndRestoreLocation;
CodeGenModule &CGM;
- const codegenoptions::DebugInfoKind DebugKind;
+ const llvm::codegenoptions::DebugInfoKind DebugKind;
bool DebugTypeExtRefs;
llvm::DIBuilder DBuilder;
llvm::DICompileUnit *TheCU = nullptr;
@@ -81,13 +80,12 @@ class CGDebugInfo {
#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
llvm::DIType *Id##Ty = nullptr;
#include "clang/Basic/OpenCLExtensionTypes.def"
+#define WASM_TYPE(Name, Id, SingletonId) llvm::DIType *SingletonId = nullptr;
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
/// Cache of previously constructed Types.
llvm::DenseMap<const void *, llvm::TrackingMDRef> TypeCache;
- std::map<llvm::StringRef, llvm::StringRef, std::greater<llvm::StringRef>>
- DebugPrefixMap;
-
/// Cache that maps VLA types to size expressions for that type,
/// represented by instantiated Metadata nodes.
llvm::SmallDenseMap<QualType, llvm::Metadata *> SizeExprCache;
@@ -153,8 +151,10 @@ class CGDebugInfo {
llvm::DenseMap<const char *, llvm::TrackingMDRef> DIFileCache;
llvm::DenseMap<const FunctionDecl *, llvm::TrackingMDRef> SPCache;
/// Cache declarations relevant to DW_TAG_imported_declarations (C++
- /// using declarations) that aren't covered by other more specific caches.
+ /// using declarations and global alias variables) that aren't covered
+ /// by other more specific caches.
llvm::DenseMap<const Decl *, llvm::TrackingMDRef> DeclCache;
+ llvm::DenseMap<const Decl *, llvm::TrackingMDRef> ImportedDeclCache;
llvm::DenseMap<const NamespaceDecl *, llvm::TrackingMDRef> NamespaceCache;
llvm::DenseMap<const NamespaceAliasDecl *, llvm::TrackingMDRef>
NamespaceAliasCache;
@@ -176,9 +176,10 @@ class CGDebugInfo {
/// ivars and property accessors.
llvm::DIType *CreateType(const BuiltinType *Ty);
llvm::DIType *CreateType(const ComplexType *Ty);
- llvm::DIType *CreateType(const AutoType *Ty);
- llvm::DIType *CreateType(const ExtIntType *Ty);
+ llvm::DIType *CreateType(const BitIntType *Ty);
llvm::DIType *CreateQualifiedType(QualType Ty, llvm::DIFile *Fg);
+ llvm::DIType *CreateQualifiedType(const FunctionProtoType *Ty,
+ llvm::DIFile *Fg);
llvm::DIType *CreateType(const TypedefType *Ty, llvm::DIFile *Fg);
llvm::DIType *CreateType(const TemplateSpecializationType *Ty,
llvm::DIFile *Fg);
@@ -188,7 +189,15 @@ class CGDebugInfo {
llvm::DIType *CreateType(const FunctionType *Ty, llvm::DIFile *F);
/// Get structure or union type.
llvm::DIType *CreateType(const RecordType *Tyg);
- llvm::DIType *CreateTypeDefinition(const RecordType *Ty);
+
+ /// Create definition for the specified 'Ty'.
+ ///
+ /// \returns A pair of 'llvm::DIType's. The first is the definition
+ /// of the 'Ty'. The second is the type specified by the preferred_name
+ /// attribute on 'Ty', which can be a nullptr if no such attribute
+ /// exists.
+ std::pair<llvm::DIType *, llvm::DIType *>
+ CreateTypeDefinition(const RecordType *Ty);
llvm::DICompositeType *CreateLimitedType(const RecordType *Ty);
void CollectContainingType(const CXXRecordDecl *RD,
llvm::DICompositeType *CT);
@@ -228,10 +237,10 @@ class CGDebugInfo {
/// not updated to include implicit \c this pointer. Use this routine
/// to get a method type which includes \c this pointer.
llvm::DISubroutineType *getOrCreateMethodType(const CXXMethodDecl *Method,
- llvm::DIFile *F, bool decl);
+ llvm::DIFile *F);
llvm::DISubroutineType *
getOrCreateInstanceMethodType(QualType ThisPtr, const FunctionProtoType *Func,
- llvm::DIFile *Unit, bool decl);
+ llvm::DIFile *Unit);
llvm::DISubroutineType *
getOrCreateFunctionType(const Decl *D, QualType FnType, llvm::DIFile *F);
/// \return debug info descriptor for vtable.
@@ -272,9 +281,18 @@ class CGDebugInfo {
llvm::DenseSet<CanonicalDeclPtr<const CXXRecordDecl>> &SeenTypes,
llvm::DINode::DIFlags StartingFlags);
+ /// Helper function that returns the llvm::DIType that the
+ /// PreferredNameAttr attribute on \ref RD refers to. If no such
+ /// attribute exists, returns nullptr.
+ llvm::DIType *GetPreferredNameType(const CXXRecordDecl *RD,
+ llvm::DIFile *Unit);
+
+ struct TemplateArgs {
+ const TemplateParameterList *TList;
+ llvm::ArrayRef<TemplateArgument> Args;
+ };
/// A helper function to collect template parameters.
- llvm::DINodeArray CollectTemplateParams(const TemplateParameterList *TPList,
- ArrayRef<TemplateArgument> TAList,
+ llvm::DINodeArray CollectTemplateParams(std::optional<TemplateArgs> Args,
llvm::DIFile *Unit);
/// A helper function to collect debug info for function template
/// parameters.
@@ -286,17 +304,24 @@ class CGDebugInfo {
llvm::DINodeArray CollectVarTemplateParams(const VarDecl *VD,
llvm::DIFile *Unit);
+ std::optional<TemplateArgs> GetTemplateArgs(const VarDecl *) const;
+ std::optional<TemplateArgs> GetTemplateArgs(const RecordDecl *) const;
+ std::optional<TemplateArgs> GetTemplateArgs(const FunctionDecl *) const;
+
/// A helper function to collect debug info for template
/// parameters.
- llvm::DINodeArray
- CollectCXXTemplateParams(const ClassTemplateSpecializationDecl *TS,
- llvm::DIFile *F);
+ llvm::DINodeArray CollectCXXTemplateParams(const RecordDecl *TS,
+ llvm::DIFile *F);
+
+ /// A helper function to collect debug info for btf_decl_tag annotations.
+ llvm::DINodeArray CollectBTFDeclTagAnnotations(const Decl *D);
llvm::DIType *createFieldType(StringRef name, QualType type,
SourceLocation loc, AccessSpecifier AS,
uint64_t offsetInBits, uint32_t AlignInBits,
llvm::DIFile *tunit, llvm::DIScope *scope,
- const RecordDecl *RD = nullptr);
+ const RecordDecl *RD = nullptr,
+ llvm::DINodeArray Annotations = nullptr);
llvm::DIType *createFieldType(StringRef name, QualType type,
SourceLocation loc, AccessSpecifier AS,
@@ -308,9 +333,18 @@ class CGDebugInfo {
}
/// Create new bit field member.
- llvm::DIType *createBitFieldType(const FieldDecl *BitFieldDecl,
- llvm::DIScope *RecordTy,
- const RecordDecl *RD);
+ llvm::DIDerivedType *createBitFieldType(const FieldDecl *BitFieldDecl,
+ llvm::DIScope *RecordTy,
+ const RecordDecl *RD);
+
+ /// Create type for binding declarations.
+ llvm::DIType *CreateBindingDeclType(const BindingDecl *BD);
+
+ /// Create an anonnymous zero-size separator for bit-field-decl if needed on
+ /// the target.
+ llvm::DIDerivedType *createBitFieldSeparatorIfNeeded(
+ const FieldDecl *BitFieldDecl, const llvm::DIDerivedType *BitFieldDI,
+ llvm::ArrayRef<llvm::Metadata *> PreviousFieldsDI, const RecordDecl *RD);
/// Helpers for collecting fields of a record.
/// @{
@@ -346,7 +380,7 @@ class CGDebugInfo {
/// Extended dereferencing mechanism is has the following format:
/// DW_OP_constu <DWARF Address Space> DW_OP_swap DW_OP_xderef
void AppendAddressSpaceXDeref(unsigned AddressSpace,
- SmallVectorImpl<int64_t> &Expr) const;
+ SmallVectorImpl<uint64_t> &Expr) const;
/// A helper function to collect debug info for the default elements of a
/// block.
@@ -417,6 +451,9 @@ public:
/// location will be reused.
void EmitLocation(CGBuilderTy &Builder, SourceLocation Loc);
+ QualType getFunctionType(const FunctionDecl *FD, QualType RetTy,
+ const SmallVectorImpl<const VarDecl *> &Args);
+
/// Emit a call to llvm.dbg.function.start to indicate
/// start of a new function.
/// \param Loc The location of the function header.
@@ -472,10 +509,9 @@ public:
/// Emit call to \c llvm.dbg.declare for an argument variable
/// declaration.
- llvm::DILocalVariable *EmitDeclareOfArgVariable(const VarDecl *Decl,
- llvm::Value *AI,
- unsigned ArgNo,
- CGBuilderTy &Builder);
+ llvm::DILocalVariable *
+ EmitDeclareOfArgVariable(const VarDecl *Decl, llvm::Value *AI, unsigned ArgNo,
+ CGBuilderTy &Builder, bool UsePointerValue = false);
/// Emit call to \c llvm.dbg.declare for the block-literal argument
/// to a block invocation function.
@@ -493,6 +529,9 @@ public:
/// Emit information about an external variable.
void EmitExternalVariable(llvm::GlobalVariable *GV, const VarDecl *Decl);
+ /// Emit information about global variable alias.
+ void EmitGlobalAlias(const llvm::GlobalValue *GV, const GlobalDecl Decl);
+
/// Emit C++ using directive.
void EmitUsingDirective(const UsingDirectiveDecl &UD);
@@ -514,6 +553,14 @@ public:
/// Emit an @import declaration.
void EmitImportDecl(const ImportDecl &ID);
+ /// DebugInfo isn't attached to string literals by default. While certain
+ /// aspects of debuginfo aren't useful for string literals (like a name), it's
+ /// nice to be able to symbolize the line and column information. This is
+ /// especially useful for sanitizers, as it allows symbolization of
+ /// heap-buffer-overflows on constant strings.
+ void AddStringLiteralDebugInfo(llvm::GlobalVariable *GV,
+ const StringLiteral *S);
+
/// Emit C++ namespace alias.
llvm::DIImportedEntity *EmitNamespaceAlias(const NamespaceAliasDecl &NA);
@@ -560,7 +607,15 @@ private:
/// Returns a pointer to the DILocalVariable associated with the
/// llvm.dbg.declare, or nullptr otherwise.
llvm::DILocalVariable *EmitDeclare(const VarDecl *decl, llvm::Value *AI,
- llvm::Optional<unsigned> ArgNo,
+ std::optional<unsigned> ArgNo,
+ CGBuilderTy &Builder,
+ const bool UsePointerValue = false);
+
+ /// Emit call to llvm.dbg.declare for a binding declaration.
+ /// Returns a pointer to the DILocalVariable associated with the
+ /// llvm.dbg.declare, or nullptr otherwise.
+ llvm::DILocalVariable *EmitDeclare(const BindingDecl *decl, llvm::Value *AI,
+ std::optional<unsigned> ArgNo,
CGBuilderTy &Builder,
const bool UsePointerValue = false);
@@ -596,11 +651,11 @@ private:
void CreateCompileUnit();
/// Compute the file checksum debug info for input file ID.
- Optional<llvm::DIFile::ChecksumKind>
- computeChecksum(FileID FID, SmallString<32> &Checksum) const;
+ std::optional<llvm::DIFile::ChecksumKind>
+ computeChecksum(FileID FID, SmallString<64> &Checksum) const;
/// Get the source of the given file ID.
- Optional<StringRef> getSource(const SourceManager &SM, FileID FID);
+ std::optional<StringRef> getSource(const SourceManager &SM, FileID FID);
/// Convenience function to get the file debug info descriptor for the input
/// location.
@@ -609,8 +664,8 @@ private:
/// Create a file debug info descriptor for a source file.
llvm::DIFile *
createFile(StringRef FileName,
- Optional<llvm::DIFile::ChecksumInfo<StringRef>> CSInfo,
- Optional<StringRef> Source);
+ std::optional<llvm::DIFile::ChecksumInfo<StringRef>> CSInfo,
+ std::optional<StringRef> Source);
/// Get the type from the cache or create a new type if necessary.
llvm::DIType *getOrCreateType(QualType Ty, llvm::DIFile *Fg);
@@ -745,6 +800,11 @@ private:
llvm::MDTuple *&TemplateParameters,
llvm::DIScope *&VDContext);
+ /// Create a DIExpression representing the constant corresponding
+ /// to the specified 'Val'. Returns nullptr on failure.
+ llvm::DIExpression *createConstantValueExpression(const clang::ValueDecl *VD,
+ const APValue &Val);
+
/// Allocate a copy of \p A using the DebugInfoNames allocator
/// and return a reference to it. If multiple arguments are given the strings
/// are concatenated.
@@ -777,7 +837,15 @@ public:
ApplyDebugLocation(ApplyDebugLocation &&Other) : CGF(Other.CGF) {
Other.CGF = nullptr;
}
- ApplyDebugLocation &operator=(ApplyDebugLocation &&) = default;
+
+ // Define copy assignment operator.
+ ApplyDebugLocation &operator=(ApplyDebugLocation &&Other) {
+ if (this != &Other) {
+ CGF = Other.CGF;
+ Other.CGF = nullptr;
+ }
+ return *this;
+ }
~ApplyDebugLocation();
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGDecl.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGDecl.cpp
index 5b3d39f20b41..aa9997b87ecf 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGDecl.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGDecl.cpp
@@ -37,6 +37,7 @@
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Type.h"
+#include <optional>
using namespace clang;
using namespace CodeGen;
@@ -90,16 +91,17 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
case Decl::Export:
case Decl::ObjCPropertyImpl:
case Decl::FileScopeAsm:
+ case Decl::TopLevelStmt:
case Decl::Friend:
case Decl::FriendTemplate:
case Decl::Block:
case Decl::Captured:
- case Decl::ClassScopeFunctionSpecialization:
case Decl::UsingShadow:
case Decl::ConstructorUsingShadow:
case Decl::ObjCTypeParam:
case Decl::Binding:
case Decl::UnresolvedUsingIfExists:
+ case Decl::HLSLBuffer:
llvm_unreachable("Declaration should not be in declstmts!");
case Decl::Record: // struct/union/class X;
case Decl::CXXRecord: // struct/union/class X; [C++]
@@ -118,6 +120,7 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
case Decl::Label: // __label__ x;
case Decl::Import:
case Decl::MSGuid: // __declspec(uuid("..."))
+ case Decl::UnnamedGlobalConstant:
case Decl::TemplateParamObject:
case Decl::OMPThreadPrivate:
case Decl::OMPAllocate:
@@ -125,6 +128,7 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
case Decl::OMPRequires:
case Decl::Empty:
case Decl::Concept:
+ case Decl::ImplicitConceptSpecialization:
case Decl::LifetimeExtendedTemporary:
case Decl::RequiresExprBody:
// None of these decls require codegen support.
@@ -197,7 +201,7 @@ void CodeGenFunction::EmitVarDecl(const VarDecl &D) {
return;
llvm::GlobalValue::LinkageTypes Linkage =
- CGM.getLLVMLinkageVarDefinition(&D, /*IsConstant=*/false);
+ CGM.getLLVMLinkageVarDefinition(&D);
// FIXME: We need to force the emission/use of a guard variable for
// some variables even if we can constant-evaluate them because
@@ -287,7 +291,8 @@ llvm::Constant *CodeGenModule::getOrCreateStaticVarDecl(
if (AS != ExpectedAS) {
Addr = getTargetCodeGenInfo().performAddrSpaceCast(
*this, GV, AS, ExpectedAS,
- LTy->getPointerTo(getContext().getTargetAddressSpace(ExpectedAS)));
+ llvm::PointerType::get(getLLVMContext(),
+ getContext().getTargetAddressSpace(ExpectedAS)));
}
setStaticLocalDeclAddress(&D, Addr);
@@ -341,6 +346,8 @@ CodeGenFunction::AddInitializerToStaticVarDecl(const VarDecl &D,
if (!Init) {
if (!getLangOpts().CPlusPlus)
CGM.ErrorUnsupported(D.getInit(), "constant l-value expression");
+ else if (D.hasFlexibleArrayInit(getContext()))
+ CGM.ErrorUnsupported(D.getInit(), "flexible array initializer");
else if (HaveInsertPoint()) {
// Since we have a static initializer, this global variable can't
// be constant.
@@ -351,6 +358,14 @@ CodeGenFunction::AddInitializerToStaticVarDecl(const VarDecl &D,
return GV;
}
+#ifndef NDEBUG
+ CharUnits VarSize = CGM.getContext().getTypeSizeInChars(D.getType()) +
+ D.getFlexibleArrayInitChars(getContext());
+ CharUnits CstSize = CharUnits::fromQuantity(
+ CGM.getDataLayout().getTypeAllocSize(Init->getType()));
+ assert(VarSize == CstSize && "Emitted constant has unexpected size");
+#endif
+
// The initializer may differ in type from the global. Rewrite
// the global to match the initializer. (We have to do this
// because some types, like unions, can't be completely represented
@@ -371,21 +386,22 @@ CodeGenFunction::AddInitializerToStaticVarDecl(const VarDecl &D,
GV->takeName(OldGV);
// Replace all uses of the old global with the new global
- llvm::Constant *NewPtrForOldDecl =
- llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
- OldGV->replaceAllUsesWith(NewPtrForOldDecl);
+ OldGV->replaceAllUsesWith(GV);
// Erase the old global, since it is no longer used.
OldGV->eraseFromParent();
}
- GV->setConstant(CGM.isTypeConstant(D.getType(), true));
+ bool NeedsDtor =
+ D.needsDestruction(getContext()) == QualType::DK_cxx_destructor;
+
+ GV->setConstant(
+ D.getType().isConstantStorage(getContext(), true, !NeedsDtor));
GV->setInitializer(Init);
emitter.finalize(GV);
- if (D.needsDestruction(getContext()) == QualType::DK_cxx_destructor &&
- HaveInsertPoint()) {
+ if (NeedsDtor && HaveInsertPoint()) {
// We have a constant initializer, but a nontrivial destructor. We still
// need to perform a guarded "initialization" in order to register the
// destructor.
@@ -405,7 +421,8 @@ void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
// Store into LocalDeclMap before generating initializer to handle
// circular references.
- setAddrOfLocalVar(&D, Address(addr, alignment));
+ llvm::Type *elemTy = ConvertTypeForMem(D.getType());
+ setAddrOfLocalVar(&D, Address(addr, elemTy, alignment));
// We can't have a VLA here, but we can have a pointer to a VLA,
// even though that doesn't really make any sense.
@@ -451,6 +468,9 @@ void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
else if (D.hasAttr<UsedAttr>())
CGM.addUsedOrCompilerUsedGlobal(var);
+ if (CGM.getCodeGenOpts().KeepPersistentStorageVariables)
+ CGM.addUsedOrCompilerUsedGlobal(var);
+
// We may have to cast the constant because of the initializer
// mismatch above.
//
@@ -458,11 +478,10 @@ void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
// RAUW's the GV uses of this constant will be invalid.
llvm::Constant *castedAddr =
llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(var, expectedType);
- if (var != castedAddr)
- LocalDeclMap.find(&D)->second = Address(castedAddr, alignment);
+ LocalDeclMap.find(&D)->second = Address(castedAddr, elemTy, alignment);
CGM.setStaticLocalDeclAddress(&D, castedAddr);
- CGM.getSanitizerMetadata()->reportGlobalToASan(var, D);
+ CGM.getSanitizerMetadata()->reportGlobal(var, D);
// Emit global variable debug descriptor for static vars.
CGDebugInfo *DI = getDebugInfo();
@@ -558,8 +577,17 @@ namespace {
bool isRedundantBeforeReturn() override { return true; }
void Emit(CodeGenFunction &CGF, Flags flags) override {
llvm::Value *V = CGF.Builder.CreateLoad(Stack);
- llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
- CGF.Builder.CreateCall(F, V);
+ CGF.Builder.CreateStackRestore(V);
+ }
+ };
+
+ struct KmpcAllocFree final : EHScopeStack::Cleanup {
+ std::pair<llvm::Value *, llvm::Value *> AddrSizePair;
+ KmpcAllocFree(const std::pair<llvm::Value *, llvm::Value *> &AddrSizePair)
+ : AddrSizePair(AddrSizePair) {}
+ void Emit(CodeGenFunction &CGF, Flags EmissionFlags) override {
+ auto &RT = CGF.CGM.getOpenMPRuntime();
+ RT.getKmpcFreeShared(CGF, AddrSizePair);
}
};
@@ -709,8 +737,8 @@ static bool tryEmitARCCopyWeakInit(CodeGenFunction &CGF,
// Handle a formal type change to avoid asserting.
auto srcAddr = srcLV.getAddress(CGF);
if (needsCast) {
- srcAddr = CGF.Builder.CreateElementBitCast(
- srcAddr, destLV.getAddress(CGF).getElementType());
+ srcAddr =
+ srcAddr.withElementType(destLV.getAddress(CGF).getElementType());
}
// If it was an l-value, use objc_copyWeak.
@@ -744,7 +772,7 @@ void CodeGenFunction::EmitNullabilityCheck(LValue LHS, llvm::Value *RHS,
if (!SanOpts.has(SanitizerKind::NullabilityAssign))
return;
- auto Nullability = LHS.getType()->getNullability(getContext());
+ auto Nullability = LHS.getType()->getNullability();
if (!Nullability || *Nullability != NullabilityKind::NonNull)
return;
@@ -828,7 +856,7 @@ void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D,
// If D is pseudo-strong, treat it like __unsafe_unretained here. This means
// that we omit the retain, and causes non-autoreleased return values to be
// immediately released.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case Qualifiers::OCL_ExplicitNone:
@@ -1142,11 +1170,11 @@ Address CodeGenModule::createUnnamedGlobalFrom(const VarDecl &D,
GV->setAlignment(Align.getAsAlign());
GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
CacheEntry = GV;
- } else if (CacheEntry->getAlignment() < Align.getQuantity()) {
+ } else if (CacheEntry->getAlignment() < uint64_t(Align.getQuantity())) {
CacheEntry->setAlignment(Align.getAsAlign());
}
- return Address(CacheEntry, Align);
+ return Address(CacheEntry, CacheEntry->getValueType(), Align);
}
static Address createUnnamedGlobalForMemcpyFrom(CodeGenModule &CGM,
@@ -1155,11 +1183,7 @@ static Address createUnnamedGlobalForMemcpyFrom(CodeGenModule &CGM,
llvm::Constant *Constant,
CharUnits Align) {
Address SrcPtr = CGM.createUnnamedGlobalFrom(D, Constant, Align);
- llvm::Type *BP = llvm::PointerType::getInt8PtrTy(CGM.getLLVMContext(),
- SrcPtr.getAddressSpace());
- if (SrcPtr.getType() != BP)
- SrcPtr = Builder.CreateBitCast(SrcPtr, BP);
- return SrcPtr;
+ return SrcPtr.withElementType(CGM.Int8Ty);
}
static void emitStoresForConstant(CodeGenModule &CGM, const VarDecl &D,
@@ -1193,7 +1217,7 @@ static void emitStoresForConstant(CodeGenModule &CGM, const VarDecl &D,
bool valueAlreadyCorrect =
constant->isNullValue() || isa<llvm::UndefValue>(constant);
if (!valueAlreadyCorrect) {
- Loc = Builder.CreateBitCast(Loc, Ty->getPointerTo(Loc.getAddressSpace()));
+ Loc = Loc.withElementType(Ty);
emitStoresForInitAfterBZero(CGM, constant, Loc, isVolatile, Builder,
IsAutoInit);
}
@@ -1217,29 +1241,35 @@ static void emitStoresForConstant(CodeGenModule &CGM, const VarDecl &D,
return;
}
- // If the initializer is small, use a handful of stores.
+ // If the initializer is small or trivialAutoVarInit is set, use a handful of
+ // stores.
+ bool IsTrivialAutoVarInitPattern =
+ CGM.getContext().getLangOpts().getTrivialAutoVarInit() ==
+ LangOptions::TrivialAutoVarInitKind::Pattern;
if (shouldSplitConstantStore(CGM, ConstantSize)) {
if (auto *STy = dyn_cast<llvm::StructType>(Ty)) {
- // FIXME: handle the case when STy != Loc.getElementType().
- if (STy == Loc.getElementType()) {
+ if (STy == Loc.getElementType() ||
+ (STy != Loc.getElementType() && IsTrivialAutoVarInitPattern)) {
+ const llvm::StructLayout *Layout =
+ CGM.getDataLayout().getStructLayout(STy);
for (unsigned i = 0; i != constant->getNumOperands(); i++) {
- Address EltPtr = Builder.CreateStructGEP(Loc, i);
- emitStoresForConstant(
- CGM, D, EltPtr, isVolatile, Builder,
- cast<llvm::Constant>(Builder.CreateExtractValue(constant, i)),
- IsAutoInit);
+ CharUnits CurOff =
+ CharUnits::fromQuantity(Layout->getElementOffset(i));
+ Address EltPtr = Builder.CreateConstInBoundsByteGEP(
+ Loc.withElementType(CGM.Int8Ty), CurOff);
+ emitStoresForConstant(CGM, D, EltPtr, isVolatile, Builder,
+ constant->getAggregateElement(i), IsAutoInit);
}
return;
}
} else if (auto *ATy = dyn_cast<llvm::ArrayType>(Ty)) {
- // FIXME: handle the case when ATy != Loc.getElementType().
- if (ATy == Loc.getElementType()) {
+ if (ATy == Loc.getElementType() ||
+ (ATy != Loc.getElementType() && IsTrivialAutoVarInitPattern)) {
for (unsigned i = 0; i != ATy->getNumElements(); i++) {
- Address EltPtr = Builder.CreateConstArrayGEP(Loc, i);
- emitStoresForConstant(
- CGM, D, EltPtr, isVolatile, Builder,
- cast<llvm::Constant>(Builder.CreateExtractValue(constant, i)),
- IsAutoInit);
+ Address EltPtr = Builder.CreateConstGEP(
+ Loc.withElementType(ATy->getElementType()), i);
+ emitStoresForConstant(CGM, D, EltPtr, isVolatile, Builder,
+ constant->getAggregateElement(i), IsAutoInit);
}
return;
}
@@ -1332,7 +1362,6 @@ llvm::Value *CodeGenFunction::EmitLifetimeStart(llvm::TypeSize Size,
"Pointer should be in alloca address space");
llvm::Value *SizeV = llvm::ConstantInt::get(
Int64Ty, Size.isScalable() ? -1 : Size.getFixedValue());
- Addr = Builder.CreateBitCast(Addr, AllocaInt8PtrTy);
llvm::CallInst *C =
Builder.CreateCall(CGM.getLLVMLifetimeStartFn(), {SizeV, Addr});
C->setDoesNotThrow();
@@ -1343,7 +1372,6 @@ void CodeGenFunction::EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr) {
assert(Addr->getType()->getPointerAddressSpace() ==
CGM.getDataLayout().getAllocaAddrSpace() &&
"Pointer should be in alloca address space");
- Addr = Builder.CreateBitCast(Addr, AllocaInt8PtrTy);
llvm::CallInst *C =
Builder.CreateCall(CGM.getLLVMLifetimeEndFn(), {Size, Addr});
C->setDoesNotThrow();
@@ -1392,9 +1420,8 @@ void CodeGenFunction::EmitAndRegisterVariableArrayDimensions(
else {
// Create an artificial VarDecl to generate debug info for.
IdentifierInfo *NameIdent = VLAExprNames[NameIdx++];
- auto VlaExprTy = VlaSize.NumElts->getType()->getPointerElementType();
auto QT = getContext().getIntTypeForBitwidth(
- VlaExprTy->getScalarSizeInBits(), false);
+ SizeTy->getScalarSizeInBits(), false);
auto *ArtificialDecl = VarDecl::Create(
getContext(), const_cast<DeclContext *>(D.getDeclContext()),
D.getLocation(), D.getLocation(), NameIdent, QT,
@@ -1447,6 +1474,7 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
if (getLangOpts().OpenMP && OpenMPLocalAddr.isValid()) {
address = OpenMPLocalAddr;
+ AllocaAddr = OpenMPLocalAddr;
} else if (Ty->isConstantSizeType()) {
// If this value is an array or struct with a statically determinable
// constant initializer, there are optimizations we can do.
@@ -1467,10 +1495,13 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
// emit it as a global instead.
// Exception is if a variable is located in non-constant address space
// in OpenCL.
+ bool NeedsDtor =
+ D.needsDestruction(getContext()) == QualType::DK_cxx_destructor;
if ((!getLangOpts().OpenCL ||
Ty.getAddressSpace() == LangAS::opencl_constant) &&
(CGM.getCodeGenOpts().MergeAllConstants && !NRVO &&
- !isEscapingByRef && CGM.isTypeConstant(Ty, true))) {
+ !isEscapingByRef &&
+ Ty.isConstantStorage(getContext(), true, !NeedsDtor))) {
EmitStaticVarDecl(D, llvm::GlobalValue::InternalLinkage);
// Signal this condition to later callbacks.
@@ -1492,6 +1523,7 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
// return slot, so that we can elide the copy when returning this
// variable (C++0x [class.copy]p34).
address = ReturnValue;
+ AllocaAddr = ReturnValue;
if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
const auto *RD = RecordTy->getDecl();
@@ -1503,7 +1535,7 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
// applied.
llvm::Value *Zero = Builder.getFalse();
Address NRVOFlag =
- CreateTempAlloca(Zero->getType(), CharUnits::One(), "nrvo");
+ CreateTempAlloca(Zero->getType(), CharUnits::One(), "nrvo");
EnsureInsertPoint();
Builder.CreateStore(Zero, NRVOFlag);
@@ -1565,28 +1597,59 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
} else {
EnsureInsertPoint();
- if (!DidCallStackSave) {
- // Save the stack.
- Address Stack =
- CreateTempAlloca(Int8PtrTy, getPointerAlign(), "saved_stack");
+ // Delayed globalization for variable length declarations. This ensures that
+ // the expression representing the length has been emitted and can be used
+ // by the definition of the VLA. Since this is an escaped declaration, in
+ // OpenMP we have to use a call to __kmpc_alloc_shared(). The matching
+ // deallocation call to __kmpc_free_shared() is emitted later.
+ bool VarAllocated = false;
+ if (getLangOpts().OpenMPIsTargetDevice) {
+ auto &RT = CGM.getOpenMPRuntime();
+ if (RT.isDelayedVariableLengthDecl(*this, &D)) {
+ // Emit call to __kmpc_alloc_shared() instead of the alloca.
+ std::pair<llvm::Value *, llvm::Value *> AddrSizePair =
+ RT.getKmpcAllocShared(*this, &D);
+
+ // Save the address of the allocation:
+ LValue Base = MakeAddrLValue(AddrSizePair.first, D.getType(),
+ CGM.getContext().getDeclAlign(&D),
+ AlignmentSource::Decl);
+ address = Base.getAddress(*this);
+
+ // Push a cleanup block to emit the call to __kmpc_free_shared in the
+ // appropriate location at the end of the scope of the
+ // __kmpc_alloc_shared functions:
+ pushKmpcAllocFree(NormalCleanup, AddrSizePair);
+
+ // Mark variable as allocated:
+ VarAllocated = true;
+ }
+ }
- llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave);
- llvm::Value *V = Builder.CreateCall(F);
- Builder.CreateStore(V, Stack);
+ if (!VarAllocated) {
+ if (!DidCallStackSave) {
+ // Save the stack.
+ Address Stack =
+ CreateDefaultAlignTempAlloca(AllocaInt8PtrTy, "saved_stack");
- DidCallStackSave = true;
+ llvm::Value *V = Builder.CreateStackSave();
+ assert(V->getType() == AllocaInt8PtrTy);
+ Builder.CreateStore(V, Stack);
- // Push a cleanup block and restore the stack there.
- // FIXME: in general circumstances, this should be an EH cleanup.
- pushStackRestore(NormalCleanup, Stack);
- }
+ DidCallStackSave = true;
- auto VlaSize = getVLASize(Ty);
- llvm::Type *llvmTy = ConvertTypeForMem(VlaSize.Type);
+ // Push a cleanup block and restore the stack there.
+ // FIXME: in general circumstances, this should be an EH cleanup.
+ pushStackRestore(NormalCleanup, Stack);
+ }
+
+ auto VlaSize = getVLASize(Ty);
+ llvm::Type *llvmTy = ConvertTypeForMem(VlaSize.Type);
- // Allocate memory for the array.
- address = CreateTempAlloca(llvmTy, alignment, "vla", VlaSize.NumElts,
- &AllocaAddr);
+ // Allocate memory for the array.
+ address = CreateTempAlloca(llvmTy, alignment, "vla", VlaSize.NumElts,
+ &AllocaAddr);
+ }
// If we have debug info enabled, properly describe the VLA dimensions for
// this type by registering the vla size expression for each of the
@@ -1605,10 +1668,11 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
DI->setLocation(D.getLocation());
// If NRVO, use a pointer to the return address.
- if (UsePointerValue)
+ if (UsePointerValue) {
DebugAddr = ReturnValuePointer;
-
- (void)DI->EmitDeclareOfAutoVariable(&D, DebugAddr.getPointer(), Builder,
+ AllocaAddr = ReturnValuePointer;
+ }
+ (void)DI->EmitDeclareOfAutoVariable(&D, AllocaAddr.getPointer(), Builder,
UsePointerValue);
}
@@ -1706,20 +1770,34 @@ void CodeGenFunction::emitZeroOrPatternForAutoVarInit(QualType type,
const VarDecl &D,
Address Loc) {
auto trivialAutoVarInit = getContext().getLangOpts().getTrivialAutoVarInit();
+ auto trivialAutoVarInitMaxSize =
+ getContext().getLangOpts().TrivialAutoVarInitMaxSize;
CharUnits Size = getContext().getTypeSizeInChars(type);
bool isVolatile = type.isVolatileQualified();
if (!Size.isZero()) {
+ // We skip auto-init variables by their alloc size. Take this as an example:
+ // "struct Foo {int x; char buff[1024];}" Assume the max-size flag is 1023.
+ // All Foo type variables will be skipped. Ideally, we only skip the buff
+ // array and still auto-init X in this example.
+ // TODO: Improve the size filtering to by member size.
+ auto allocSize = CGM.getDataLayout().getTypeAllocSize(Loc.getElementType());
switch (trivialAutoVarInit) {
case LangOptions::TrivialAutoVarInitKind::Uninitialized:
llvm_unreachable("Uninitialized handled by caller");
case LangOptions::TrivialAutoVarInitKind::Zero:
if (CGM.stopAutoInit())
return;
+ if (trivialAutoVarInitMaxSize > 0 &&
+ allocSize > trivialAutoVarInitMaxSize)
+ return;
emitStoresForZeroInit(CGM, D, Loc, isVolatile, Builder);
break;
case LangOptions::TrivialAutoVarInitKind::Pattern:
if (CGM.stopAutoInit())
return;
+ if (trivialAutoVarInitMaxSize > 0 &&
+ allocSize > trivialAutoVarInitMaxSize)
+ return;
emitStoresForPatternInit(CGM, D, Loc, isVolatile, Builder);
break;
}
@@ -1771,7 +1849,7 @@ void CodeGenFunction::emitZeroOrPatternForAutoVarInit(QualType type,
SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(EltSize));
llvm::Value *BaseSizeInChars =
llvm::ConstantInt::get(IntPtrTy, EltSize.getQuantity());
- Address Begin = Builder.CreateElementBitCast(Loc, Int8Ty, "vla.begin");
+ Address Begin = Loc.withElementType(Int8Ty);
llvm::Value *End = Builder.CreateInBoundsGEP(
Begin.getElementType(), Begin.getPointer(), SizeVal, "vla.end");
llvm::BasicBlock *OriginBB = Builder.GetInsertBlock();
@@ -1780,7 +1858,7 @@ void CodeGenFunction::emitZeroOrPatternForAutoVarInit(QualType type,
Cur->addIncoming(Begin.getPointer(), OriginBB);
CharUnits CurAlign = Loc.getAlignment().alignmentOfArrayElement(EltSize);
auto *I =
- Builder.CreateMemCpy(Address(Cur, CurAlign),
+ Builder.CreateMemCpy(Address(Cur, Int8Ty, CurAlign),
createUnnamedGlobalForMemcpyFrom(
CGM, D, Builder, Constant, ConstantAlign),
BaseSizeInChars, isVolatile);
@@ -1902,10 +1980,9 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
return EmitStoreThroughLValue(RValue::get(constant), lv, true);
}
- llvm::Type *BP = CGM.Int8Ty->getPointerTo(Loc.getAddressSpace());
- emitStoresForConstant(
- CGM, D, (Loc.getType() == BP) ? Loc : Builder.CreateBitCast(Loc, BP),
- type.isVolatileQualified(), Builder, constant, /*IsAutoInit=*/false);
+ emitStoresForConstant(CGM, D, Loc.withElementType(CGM.Int8Ty),
+ type.isVolatileQualified(), Builder, constant,
+ /*IsAutoInit=*/false);
}
/// Emit an expression as an initializer for an object (variable, field, etc.)
@@ -2123,6 +2200,11 @@ void CodeGenFunction::pushStackRestore(CleanupKind Kind, Address SPMem) {
EHStack.pushCleanup<CallStackRestore>(Kind, SPMem);
}
+void CodeGenFunction::pushKmpcAllocFree(
+ CleanupKind Kind, std::pair<llvm::Value *, llvm::Value *> AddrSizePair) {
+ EHStack.pushCleanup<KmpcAllocFree>(Kind, AddrSizePair);
+}
+
void CodeGenFunction::pushLifetimeExtendedDestroy(CleanupKind cleanupKind,
Address addr, QualType type,
Destroyer *destroyer,
@@ -2246,16 +2328,17 @@ void CodeGenFunction::emitArrayDestroy(llvm::Value *begin,
// Shift the address back by one element.
llvm::Value *negativeOne = llvm::ConstantInt::get(SizeTy, -1, true);
+ llvm::Type *llvmElementType = ConvertTypeForMem(elementType);
llvm::Value *element = Builder.CreateInBoundsGEP(
- elementPast->getType()->getPointerElementType(), elementPast, negativeOne,
- "arraydestroy.element");
+ llvmElementType, elementPast, negativeOne, "arraydestroy.element");
if (useEHCleanup)
pushRegularPartialArrayCleanup(begin, element, elementType, elementAlign,
destroyer);
// Perform the actual destruction there.
- destroyer(*this, Address(element, elementAlign), elementType);
+ destroyer(*this, Address(element, llvmElementType, elementAlign),
+ elementType);
if (useEHCleanup)
PopCleanupBlock();
@@ -2275,6 +2358,8 @@ static void emitPartialArrayDestroy(CodeGenFunction &CGF,
llvm::Value *begin, llvm::Value *end,
QualType type, CharUnits elementAlign,
CodeGenFunction::Destroyer *destroyer) {
+ llvm::Type *elemTy = CGF.ConvertTypeForMem(type);
+
// If the element type is itself an array, drill down.
unsigned arrayDepth = 0;
while (const ArrayType *arrayType = CGF.getContext().getAsArrayType(type)) {
@@ -2288,7 +2373,6 @@ static void emitPartialArrayDestroy(CodeGenFunction &CGF,
llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
SmallVector<llvm::Value*,4> gepIndices(arrayDepth+1, zero);
- llvm::Type *elemTy = begin->getType()->getPointerElementType();
begin = CGF.Builder.CreateInBoundsGEP(
elemTy, begin, gepIndices, "pad.arraybegin");
end = CGF.Builder.CreateInBoundsGEP(
@@ -2428,11 +2512,15 @@ namespace {
/// for the specified parameter and set up LocalDeclMap.
void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
unsigned ArgNo) {
+ bool NoDebugInfo = false;
// FIXME: Why isn't ImplicitParamDecl a ParmVarDecl?
assert((isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)) &&
"Invalid argument to EmitParmDecl");
- Arg.getAnyValue()->setName(D.getName());
+ // Set the name of the parameter's initial value to make IR easier to
+ // read. Don't modify the names of globals.
+ if (!isa<llvm::GlobalValue>(Arg.getAnyValue()))
+ Arg.getAnyValue()->setName(D.getName());
QualType Ty = D.getType();
@@ -2447,23 +2535,40 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
setBlockContextParameter(IPD, ArgNo, V);
return;
}
+ // Suppressing debug info for ThreadPrivateVar parameters, else it hides
+ // debug info of TLS variables.
+ NoDebugInfo =
+ (IPD->getParameterKind() == ImplicitParamKind::ThreadPrivateVar);
}
Address DeclPtr = Address::invalid();
+ Address AllocaPtr = Address::invalid();
bool DoStore = false;
bool IsScalar = hasScalarEvaluationKind(Ty);
+ bool UseIndirectDebugAddress = false;
+
// If we already have a pointer to the argument, reuse the input pointer.
if (Arg.isIndirect()) {
DeclPtr = Arg.getIndirectAddress();
- // If we have a prettier pointer type at this point, bitcast to that.
- unsigned AS = DeclPtr.getType()->getAddressSpace();
- llvm::Type *IRTy = ConvertTypeForMem(Ty)->getPointerTo(AS);
- if (DeclPtr.getType() != IRTy)
- DeclPtr = Builder.CreateBitCast(DeclPtr, IRTy, D.getName());
+ DeclPtr = DeclPtr.withElementType(ConvertTypeForMem(Ty));
// Indirect argument is in alloca address space, which may be different
// from the default address space.
auto AllocaAS = CGM.getASTAllocaAddressSpace();
auto *V = DeclPtr.getPointer();
+ AllocaPtr = DeclPtr;
+
+ // For truly ABI indirect arguments -- those that are not `byval` -- store
+ // the address of the argument on the stack to preserve debug information.
+ ABIArgInfo ArgInfo = CurFnInfo->arguments()[ArgNo - 1].info;
+ if (ArgInfo.isIndirect())
+ UseIndirectDebugAddress = !ArgInfo.getIndirectByVal();
+ if (UseIndirectDebugAddress) {
+ auto PtrTy = getContext().getPointerType(Ty);
+ AllocaPtr = CreateMemTemp(PtrTy, getContext().getTypeAlignInChars(PtrTy),
+ D.getName() + ".indirect_addr");
+ EmitStoreOfScalar(V, AllocaPtr, /* Volatile */ false, PtrTy);
+ }
+
auto SrcLangAS = getLangOpts().OpenCL ? LangAS::opencl_private : AllocaAS;
auto DestLangAS =
getLangOpts().OpenCL ? LangAS::opencl_private : LangAS::Default;
@@ -2471,10 +2576,11 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
assert(getContext().getTargetAddressSpace(SrcLangAS) ==
CGM.getDataLayout().getAllocaAddrSpace());
auto DestAS = getContext().getTargetAddressSpace(DestLangAS);
- auto *T = V->getType()->getPointerElementType()->getPointerTo(DestAS);
- DeclPtr = Address(getTargetHooks().performAddrSpaceCast(
- *this, V, SrcLangAS, DestLangAS, T, true),
- DeclPtr.getAlignment());
+ auto *T = llvm::PointerType::get(getLLVMContext(), DestAS);
+ DeclPtr =
+ DeclPtr.withPointer(getTargetHooks().performAddrSpaceCast(
+ *this, V, SrcLangAS, DestLangAS, T, true),
+ DeclPtr.isKnownNonNull());
}
// Push a destructor cleanup for this parameter if the ABI requires it.
@@ -2500,10 +2606,11 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
: Address::invalid();
if (getLangOpts().OpenMP && OpenMPLocalAddr.isValid()) {
DeclPtr = OpenMPLocalAddr;
+ AllocaPtr = DeclPtr;
} else {
// Otherwise, create a temporary to hold the value.
DeclPtr = CreateMemTemp(Ty, getContext().getDeclAlign(&D),
- D.getName() + ".addr");
+ D.getName() + ".addr", &AllocaPtr);
}
DoStore = true;
}
@@ -2577,9 +2684,10 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
// Emit debug info for param declarations in non-thunk functions.
if (CGDebugInfo *DI = getDebugInfo()) {
- if (CGM.getCodeGenOpts().hasReducedDebugInfo() && !CurFuncIsThunk) {
+ if (CGM.getCodeGenOpts().hasReducedDebugInfo() && !CurFuncIsThunk &&
+ !NoDebugInfo) {
llvm::DILocalVariable *DILocalVar = DI->EmitDeclareOfArgVariable(
- &D, DeclPtr.getPointer(), ArgNo, Builder);
+ &D, AllocaPtr.getPointer(), ArgNo, Builder, UseIndirectDebugAddress);
if (const auto *Var = dyn_cast_or_null<ParmVarDecl>(&D))
DI->getParamDbgMappings().insert({Var, DILocalVar});
}
@@ -2592,7 +2700,7 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
// function satisfy their nullability preconditions. This makes it necessary
// to emit null checks for args in the function body itself.
if (requiresReturnValueNullabilityCheck()) {
- auto Nullability = Ty->getNullability(getContext());
+ auto Nullability = Ty->getNullability();
if (Nullability && *Nullability == NullabilityKind::NonNull) {
SanitizerScope SanScope(this);
RetValNullabilityPrecondition =
@@ -2674,3 +2782,22 @@ void CodeGenModule::EmitOMPAllocateDecl(const OMPAllocateDecl *D) {
DummyGV->eraseFromParent();
}
}
+
+std::optional<CharUnits>
+CodeGenModule::getOMPAllocateAlignment(const VarDecl *VD) {
+ if (const auto *AA = VD->getAttr<OMPAllocateDeclAttr>()) {
+ if (Expr *Alignment = AA->getAlignment()) {
+ unsigned UserAlign =
+ Alignment->EvaluateKnownConstInt(getContext()).getExtValue();
+ CharUnits NaturalAlign =
+ getNaturalTypeAlignment(VD->getType().getNonReferenceType());
+
+ // OpenMP5.1 pg 185 lines 7-10
+ // Each item in the align modifier list must be aligned to the maximum
+ // of the specified alignment and the type's natural alignment.
+ return CharUnits::fromQuantity(
+ std::max<unsigned>(UserAlign, NaturalAlign.getQuantity()));
+ }
+ }
+ return std::nullopt;
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGDeclCXX.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGDeclCXX.cpp
index 553fedebfe56..e08a1e5f42df 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGDeclCXX.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGDeclCXX.cpp
@@ -11,6 +11,7 @@
//===----------------------------------------------------------------------===//
#include "CGCXXABI.h"
+#include "CGHLSLRuntime.h"
#include "CGObjCRuntime.h"
#include "CGOpenMPRuntime.h"
#include "CodeGenFunction.h"
@@ -121,21 +122,21 @@ static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D,
if (CGF.getContext().getLangOpts().OpenCL) {
auto DestAS =
CGM.getTargetCodeGenInfo().getAddrSpaceOfCxaAtexitPtrParam();
- auto DestTy = CGF.getTypes().ConvertType(Type)->getPointerTo(
- CGM.getContext().getTargetAddressSpace(DestAS));
+ auto DestTy = llvm::PointerType::get(
+ CGM.getLLVMContext(), CGM.getContext().getTargetAddressSpace(DestAS));
auto SrcAS = D.getType().getQualifiers().getAddressSpace();
if (DestAS == SrcAS)
- Argument = llvm::ConstantExpr::getBitCast(Addr.getPointer(), DestTy);
+ Argument = Addr.getPointer();
else
// FIXME: On addr space mismatch we are passing NULL. The generation
// of the global destructor function should be adjusted accordingly.
Argument = llvm::ConstantPointerNull::get(DestTy);
} else {
- Argument = llvm::ConstantExpr::getBitCast(
- Addr.getPointer(), CGF.getTypes().ConvertType(Type)->getPointerTo());
+ Argument = Addr.getPointer();
}
// Otherwise, the standard logic requires a helper function.
} else {
+ Addr = Addr.withElementType(CGF.ConvertTypeForMem(Type));
Func = CodeGenFunction(CGM)
.generateDestroyHelper(Addr, Type, CGF.getDestroyer(DtorKind),
CGF.needsEHCleanup(DtorKind), &D);
@@ -166,13 +167,12 @@ void CodeGenFunction::EmitInvariantStart(llvm::Constant *Addr, CharUnits Size) {
// Emit a call with the size in bytes of the object.
uint64_t Width = Size.getQuantity();
- llvm::Value *Args[2] = { llvm::ConstantInt::getSigned(Int64Ty, Width),
- llvm::ConstantExpr::getBitCast(Addr, Int8PtrTy)};
+ llvm::Value *Args[2] = {llvm::ConstantInt::getSigned(Int64Ty, Width), Addr};
Builder.CreateCall(InvariantStart, Args);
}
void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
- llvm::Constant *DeclPtr,
+ llvm::GlobalVariable *GV,
bool PerformInit) {
const Expr *Init = D.getInit();
@@ -193,15 +193,17 @@ void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
// For example, in the above CUDA code, the static local variable s has a
// "shared" address space qualifier, but the constructor of StructWithCtor
// expects "this" in the "generic" address space.
- unsigned ExpectedAddrSpace = getContext().getTargetAddressSpace(T);
- unsigned ActualAddrSpace = DeclPtr->getType()->getPointerAddressSpace();
+ unsigned ExpectedAddrSpace = getTypes().getTargetAddressSpace(T);
+ unsigned ActualAddrSpace = GV->getAddressSpace();
+ llvm::Constant *DeclPtr = GV;
if (ActualAddrSpace != ExpectedAddrSpace) {
- llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(T);
- llvm::PointerType *PTy = llvm::PointerType::get(LTy, ExpectedAddrSpace);
+ llvm::PointerType *PTy =
+ llvm::PointerType::get(getLLVMContext(), ExpectedAddrSpace);
DeclPtr = llvm::ConstantExpr::getAddrSpaceCast(DeclPtr, PTy);
}
- ConstantAddress DeclAddr(DeclPtr, getContext().getDeclAlign(&D));
+ ConstantAddress DeclAddr(
+ DeclPtr, GV->getValueType(), getContext().getDeclAlign(&D));
if (!T->isReferenceType()) {
if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd &&
@@ -210,9 +212,11 @@ void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
&D, DeclAddr, D.getAttr<OMPThreadPrivateDeclAttr>()->getLocation(),
PerformInit, this);
}
+ bool NeedsDtor =
+ D.needsDestruction(getContext()) == QualType::DK_cxx_destructor;
if (PerformInit)
EmitDeclInit(*this, D, DeclAddr);
- if (CGM.isTypeConstant(D.getType(), true))
+ if (D.getType().isConstantStorage(getContext(), true, !NeedsDtor))
EmitDeclInvariant(*this, D, DeclPtr);
else
EmitDeclDestroy(*this, D, DeclAddr);
@@ -274,8 +278,8 @@ llvm::Function *CodeGenFunction::createTLSAtExitStub(
}
const CGFunctionInfo &FI = CGM.getTypes().arrangeLLVMFunctionInfo(
- getContext().IntTy, /*instanceMethod=*/false, /*chainCall=*/false,
- {getContext().IntTy}, FunctionType::ExtInfo(), {}, RequiredArgs::All);
+ getContext().IntTy, FnInfoOpts::None, {getContext().IntTy},
+ FunctionType::ExtInfo(), {}, RequiredArgs::All);
// Get the stub function type, int(*)(int,...).
llvm::FunctionType *StubTy =
@@ -288,7 +292,7 @@ llvm::Function *CodeGenFunction::createTLSAtExitStub(
FunctionArgList Args;
ImplicitParamDecl IPD(CGM.getContext(), CGM.getContext().IntTy,
- ImplicitParamDecl::Other);
+ ImplicitParamKind::Other);
Args.push_back(&IPD);
QualType ResTy = CGM.getContext().IntTy;
@@ -323,6 +327,15 @@ void CodeGenFunction::registerGlobalDtorWithAtExit(const VarDecl &VD,
registerGlobalDtorWithAtExit(dtorStub);
}
+/// Register a global destructor using the LLVM 'llvm.global_dtors' global.
+void CodeGenFunction::registerGlobalDtorWithLLVM(const VarDecl &VD,
+ llvm::FunctionCallee Dtor,
+ llvm::Constant *Addr) {
+ // Create a function which calls the destructor.
+ llvm::Function *dtorStub = createAtExitStub(VD, Dtor, Addr);
+ CGM.AddGlobalDtor(dtorStub);
+}
+
void CodeGenFunction::registerGlobalDtorWithAtExit(llvm::Constant *dtorStub) {
// extern "C" int atexit(void (*f)(void));
assert(dtorStub->getType() ==
@@ -422,9 +435,8 @@ void CodeGenFunction::EmitCXXGuardedInitBranch(llvm::Value *NeedsInit,
llvm::Function *CodeGenModule::CreateGlobalInitOrCleanUpFunction(
llvm::FunctionType *FTy, const Twine &Name, const CGFunctionInfo &FI,
- SourceLocation Loc, bool TLS) {
- llvm::Function *Fn = llvm::Function::Create(
- FTy, llvm::GlobalValue::InternalLinkage, Name, &getModule());
+ SourceLocation Loc, bool TLS, llvm::GlobalVariable::LinkageTypes Linkage) {
+ llvm::Function *Fn = llvm::Function::Create(FTy, Linkage, Name, &getModule());
if (!getLangOpts().AppleKext && !TLS) {
// Set the section if needed.
@@ -432,7 +444,8 @@ llvm::Function *CodeGenModule::CreateGlobalInitOrCleanUpFunction(
Fn->setSection(Section);
}
- SetInternalFunctionAttributes(GlobalDecl(), Fn, FI);
+ if (Linkage == llvm::GlobalVariable::InternalLinkage)
+ SetInternalFunctionAttributes(GlobalDecl(), Fn, FI);
Fn->setCallingConv(getRuntimeCC());
@@ -455,8 +468,8 @@ llvm::Function *CodeGenModule::CreateGlobalInitOrCleanUpFunction(
!isInNoSanitizeList(SanitizerKind::KernelHWAddress, Fn, Loc))
Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
- if (getLangOpts().Sanitize.has(SanitizerKind::MemTag) &&
- !isInNoSanitizeList(SanitizerKind::MemTag, Fn, Loc))
+ if (getLangOpts().Sanitize.has(SanitizerKind::MemtagStack) &&
+ !isInNoSanitizeList(SanitizerKind::MemtagStack, Fn, Loc))
Fn->addFnAttr(llvm::Attribute::SanitizeMemTag);
if (getLangOpts().Sanitize.has(SanitizerKind::Thread) &&
@@ -515,10 +528,6 @@ CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D,
D->hasAttr<CUDASharedAttr>()))
return;
- if (getLangOpts().OpenMP &&
- getOpenMPRuntime().emitDeclareTargetVarDefinition(D, Addr, PerformInit))
- return;
-
// Check if we've already initialized this decl.
auto I = DelayedCXXInitPosition.find(D);
if (I != DelayedCXXInitPosition.end() && I->second == ~0U)
@@ -549,7 +558,18 @@ CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D,
CXXThreadLocalInits.push_back(Fn);
CXXThreadLocalInitVars.push_back(D);
} else if (PerformInit && ISA) {
- EmitPointerToInitFunc(D, Addr, Fn, ISA);
+ // Contract with backend that "init_seg(compiler)" corresponds to priority
+ // 200 and "init_seg(lib)" corresponds to priority 400.
+ int Priority = -1;
+ if (ISA->getSection() == ".CRT$XCC")
+ Priority = 200;
+ else if (ISA->getSection() == ".CRT$XCL")
+ Priority = 400;
+
+ if (Priority != -1)
+ AddGlobalCtor(Fn, Priority, ~0U, COMDATKey);
+ else
+ EmitPointerToInitFunc(D, Addr, Fn, ISA);
} else if (auto *IPA = D->getAttr<InitPriorityAttr>()) {
OrderGlobalInitsOrStermFinalizers Key(IPA->getPriority(),
PrioritizedCXXGlobalInits.size());
@@ -573,14 +593,32 @@ CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D,
// SelectAny globals will be comdat-folded. Put the initializer into a
// COMDAT group associated with the global, so the initializers get folded
// too.
-
- AddGlobalCtor(Fn, 65535, COMDATKey);
+ I = DelayedCXXInitPosition.find(D);
+ // CXXGlobalInits.size() is the lex order number for the next deferred
+ // VarDecl. Use it when the current VarDecl is non-deferred. Although this
+ // lex order number is shared between current VarDecl and some following
+ // VarDecls, their order of insertion into `llvm.global_ctors` is the same
+ // as the lexing order and the following stable sort would preserve such
+ // order.
+ unsigned LexOrder =
+ I == DelayedCXXInitPosition.end() ? CXXGlobalInits.size() : I->second;
+ AddGlobalCtor(Fn, 65535, LexOrder, COMDATKey);
if (COMDATKey && (getTriple().isOSBinFormatELF() ||
getTarget().getCXXABI().isMicrosoft())) {
// When COMDAT is used on ELF or in the MS C++ ABI, the key must be in
// llvm.used to prevent linker GC.
addUsedGlobal(COMDATKey);
}
+
+ // If we used a COMDAT key for the global ctor, the init function can be
+ // discarded if the global ctor entry is discarded.
+ // FIXME: Do we need to restrict this to ELF and Wasm?
+ llvm::Comdat *C = Addr->getComdat();
+ if (COMDATKey && C &&
+ (getTarget().getTriple().isOSBinFormatELF() ||
+ getTarget().getTriple().isOSBinFormatWasm())) {
+ Fn->setComdat(C);
+ }
} else {
I = DelayedCXXInitPosition.find(D); // Re-do lookup in case of re-hash.
if (I == DelayedCXXInitPosition.end()) {
@@ -605,6 +643,159 @@ void CodeGenModule::EmitCXXThreadLocalInitFunc() {
CXXThreadLocals.clear();
}
+/* Build the initializer for a C++20 module:
+ This is arranged to be run only once regardless of how many times the module
+ might be included transitively. This arranged by using a guard variable.
+
+ If there are no initializers at all (and also no imported modules) we reduce
+ this to an empty function (since the Itanium ABI requires that this function
+ be available to a caller, which might be produced by a different
+ implementation).
+
+ First we call any initializers for imported modules.
+ We then call initializers for the Global Module Fragment (if present)
+ We then call initializers for the current module.
+ We then call initializers for the Private Module Fragment (if present)
+*/
+
+void CodeGenModule::EmitCXXModuleInitFunc(Module *Primary) {
+ assert(Primary->isInterfaceOrPartition() &&
+ "The function should only be called for C++20 named module interface"
+ " or partition.");
+
+ while (!CXXGlobalInits.empty() && !CXXGlobalInits.back())
+ CXXGlobalInits.pop_back();
+
+ // As noted above, we create the function, even if it is empty.
+ // Module initializers for imported modules are emitted first.
+
+ // Collect all the modules that we import
+ llvm::SmallSetVector<Module *, 8> AllImports;
+ // Ones that we export
+ for (auto I : Primary->Exports)
+ AllImports.insert(I.getPointer());
+ // Ones that we only import.
+ for (Module *M : Primary->Imports)
+ AllImports.insert(M);
+ // Ones that we import in the global module fragment or the private module
+ // fragment.
+ for (Module *SubM : Primary->submodules()) {
+ assert((SubM->isGlobalModule() || SubM->isPrivateModule()) &&
+ "The sub modules of C++20 module unit should only be global module "
+ "fragments or private module framents.");
+ assert(SubM->Exports.empty() &&
+ "The global mdoule fragments and the private module fragments are "
+ "not allowed to export import modules.");
+ for (Module *M : SubM->Imports)
+ AllImports.insert(M);
+ }
+
+ SmallVector<llvm::Function *, 8> ModuleInits;
+ for (Module *M : AllImports) {
+ // No Itanium initializer in header like modules.
+ if (M->isHeaderLikeModule())
+ continue; // TODO: warn of mixed use of module map modules and C++20?
+ // We're allowed to skip the initialization if we are sure it doesn't
+ // do any thing.
+ if (!M->isNamedModuleInterfaceHasInit())
+ continue;
+ llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
+ SmallString<256> FnName;
+ {
+ llvm::raw_svector_ostream Out(FnName);
+ cast<ItaniumMangleContext>(getCXXABI().getMangleContext())
+ .mangleModuleInitializer(M, Out);
+ }
+ assert(!GetGlobalValue(FnName.str()) &&
+ "We should only have one use of the initializer call");
+ llvm::Function *Fn = llvm::Function::Create(
+ FTy, llvm::Function::ExternalLinkage, FnName.str(), &getModule());
+ ModuleInits.push_back(Fn);
+ }
+
+ // Add any initializers with specified priority; this uses the same approach
+ // as EmitCXXGlobalInitFunc().
+ if (!PrioritizedCXXGlobalInits.empty()) {
+ SmallVector<llvm::Function *, 8> LocalCXXGlobalInits;
+ llvm::array_pod_sort(PrioritizedCXXGlobalInits.begin(),
+ PrioritizedCXXGlobalInits.end());
+ for (SmallVectorImpl<GlobalInitData>::iterator
+ I = PrioritizedCXXGlobalInits.begin(),
+ E = PrioritizedCXXGlobalInits.end();
+ I != E;) {
+ SmallVectorImpl<GlobalInitData>::iterator PrioE =
+ std::upper_bound(I + 1, E, *I, GlobalInitPriorityCmp());
+
+ for (; I < PrioE; ++I)
+ ModuleInits.push_back(I->second);
+ }
+ }
+
+ // Now append the ones without specified priority.
+ for (auto *F : CXXGlobalInits)
+ ModuleInits.push_back(F);
+
+ llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
+ const CGFunctionInfo &FI = getTypes().arrangeNullaryFunction();
+
+ // We now build the initializer for this module, which has a mangled name
+ // as per the Itanium ABI . The action of the initializer is guarded so that
+ // each init is run just once (even though a module might be imported
+ // multiple times via nested use).
+ llvm::Function *Fn;
+ {
+ SmallString<256> InitFnName;
+ llvm::raw_svector_ostream Out(InitFnName);
+ cast<ItaniumMangleContext>(getCXXABI().getMangleContext())
+ .mangleModuleInitializer(Primary, Out);
+ Fn = CreateGlobalInitOrCleanUpFunction(
+ FTy, llvm::Twine(InitFnName), FI, SourceLocation(), false,
+ llvm::GlobalVariable::ExternalLinkage);
+
+ // If we have a completely empty initializer then we do not want to create
+ // the guard variable.
+ ConstantAddress GuardAddr = ConstantAddress::invalid();
+ if (!ModuleInits.empty()) {
+ // Create the guard var.
+ llvm::GlobalVariable *Guard = new llvm::GlobalVariable(
+ getModule(), Int8Ty, /*isConstant=*/false,
+ llvm::GlobalVariable::InternalLinkage,
+ llvm::ConstantInt::get(Int8Ty, 0), InitFnName.str() + "__in_chrg");
+ CharUnits GuardAlign = CharUnits::One();
+ Guard->setAlignment(GuardAlign.getAsAlign());
+ GuardAddr = ConstantAddress(Guard, Int8Ty, GuardAlign);
+ }
+ CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, ModuleInits,
+ GuardAddr);
+ }
+
+ // We allow for the case that a module object is added to a linked binary
+ // without a specific call to the the initializer. This also ensures that
+ // implementation partition initializers are called when the partition
+ // is not imported as an interface.
+ AddGlobalCtor(Fn);
+
+ // See the comment in EmitCXXGlobalInitFunc about OpenCL global init
+ // functions.
+ if (getLangOpts().OpenCL) {
+ GenKernelArgMetadata(Fn);
+ Fn->setCallingConv(llvm::CallingConv::SPIR_KERNEL);
+ }
+
+ assert(!getLangOpts().CUDA || !getLangOpts().CUDAIsDevice ||
+ getLangOpts().GPUAllowDeviceInit);
+ if (getLangOpts().HIP && getLangOpts().CUDAIsDevice) {
+ Fn->setCallingConv(llvm::CallingConv::AMDGPU_KERNEL);
+ Fn->addFnAttr("device-init");
+ }
+
+ // We are done with the inits.
+ AllImports.clear();
+ PrioritizedCXXGlobalInits.clear();
+ CXXGlobalInits.clear();
+ ModuleInits.clear();
+}
+
static SmallString<128> getTransformedFileName(llvm::Module &M) {
SmallString<128> FileName = llvm::sys::path::filename(M.getName());
@@ -637,7 +828,29 @@ CodeGenModule::EmitCXXGlobalInitFunc() {
while (!CXXGlobalInits.empty() && !CXXGlobalInits.back())
CXXGlobalInits.pop_back();
- if (CXXGlobalInits.empty() && PrioritizedCXXGlobalInits.empty())
+ // When we import C++20 modules, we must run their initializers first.
+ SmallVector<llvm::Function *, 8> ModuleInits;
+ if (CXX20ModuleInits)
+ for (Module *M : ImportedModules) {
+ // No Itanium initializer in header like modules.
+ if (M->isHeaderLikeModule())
+ continue;
+ llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
+ SmallString<256> FnName;
+ {
+ llvm::raw_svector_ostream Out(FnName);
+ cast<ItaniumMangleContext>(getCXXABI().getMangleContext())
+ .mangleModuleInitializer(M, Out);
+ }
+ assert(!GetGlobalValue(FnName.str()) &&
+ "We should only have one use of the initializer call");
+ llvm::Function *Fn = llvm::Function::Create(
+ FTy, llvm::Function::ExternalLinkage, FnName.str(), &getModule());
+ ModuleInits.push_back(Fn);
+ }
+
+ if (ModuleInits.empty() && CXXGlobalInits.empty() &&
+ PrioritizedCXXGlobalInits.empty())
return;
llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
@@ -663,6 +876,13 @@ CodeGenModule::EmitCXXGlobalInitFunc() {
llvm::Function *Fn = CreateGlobalInitOrCleanUpFunction(
FTy, "_GLOBAL__I_" + getPrioritySuffix(Priority), FI);
+ // Prepend the module inits to the highest priority set.
+ if (!ModuleInits.empty()) {
+ for (auto *F : ModuleInits)
+ LocalCXXGlobalInits.push_back(F);
+ ModuleInits.clear();
+ }
+
for (; I < PrioE; ++I)
LocalCXXGlobalInits.push_back(I->second);
@@ -672,17 +892,35 @@ CodeGenModule::EmitCXXGlobalInitFunc() {
PrioritizedCXXGlobalInits.clear();
}
- if (getCXXABI().useSinitAndSterm() && CXXGlobalInits.empty())
+ if (getCXXABI().useSinitAndSterm() && ModuleInits.empty() &&
+ CXXGlobalInits.empty())
return;
+ for (auto *F : CXXGlobalInits)
+ ModuleInits.push_back(F);
+ CXXGlobalInits.clear();
+
// Include the filename in the symbol name. Including "sub_" matches gcc
// and makes sure these symbols appear lexicographically behind the symbols
- // with priority emitted above.
- llvm::Function *Fn = CreateGlobalInitOrCleanUpFunction(
- FTy, llvm::Twine("_GLOBAL__sub_I_", getTransformedFileName(getModule())),
- FI);
-
- CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, CXXGlobalInits);
+ // with priority emitted above. Module implementation units behave the same
+ // way as a non-modular TU with imports.
+ llvm::Function *Fn;
+ if (CXX20ModuleInits && getContext().getCurrentNamedModule() &&
+ !getContext().getCurrentNamedModule()->isModuleImplementation()) {
+ SmallString<256> InitFnName;
+ llvm::raw_svector_ostream Out(InitFnName);
+ cast<ItaniumMangleContext>(getCXXABI().getMangleContext())
+ .mangleModuleInitializer(getContext().getCurrentNamedModule(), Out);
+ Fn = CreateGlobalInitOrCleanUpFunction(
+ FTy, llvm::Twine(InitFnName), FI, SourceLocation(), false,
+ llvm::GlobalVariable::ExternalLinkage);
+ } else
+ Fn = CreateGlobalInitOrCleanUpFunction(
+ FTy,
+ llvm::Twine("_GLOBAL__sub_I_", getTransformedFileName(getModule())),
+ FI);
+
+ CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, ModuleInits);
AddGlobalCtor(Fn);
// In OpenCL global init functions must be converted to kernels in order to
@@ -694,7 +932,7 @@ CodeGenModule::EmitCXXGlobalInitFunc() {
// dynamic resource allocation on the device and program scope variables are
// destroyed by the runtime when program is released.
if (getLangOpts().OpenCL) {
- GenOpenCLArgMetadata(Fn);
+ GenKernelArgMetadata(Fn);
Fn->setCallingConv(llvm::CallingConv::SPIR_KERNEL);
}
@@ -705,7 +943,7 @@ CodeGenModule::EmitCXXGlobalInitFunc() {
Fn->addFnAttr("device-init");
}
- CXXGlobalInits.clear();
+ ModuleInits.clear();
}
void CodeGenModule::EmitCXXGlobalCleanUpFunc() {
@@ -795,6 +1033,9 @@ void CodeGenFunction::GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
EmitCXXGlobalVarDeclInit(*D, Addr, PerformInit);
}
+ if (getLangOpts().HLSL)
+ CGM.getHLSLRuntime().annotateHLSLResource(D, Addr);
+
FinishFunction();
}
@@ -902,7 +1143,7 @@ llvm::Function *CodeGenFunction::generateDestroyHelper(
bool useEHCleanupForArray, const VarDecl *VD) {
FunctionArgList args;
ImplicitParamDecl Dst(getContext(), getContext().VoidPtrTy,
- ImplicitParamDecl::Other);
+ ImplicitParamKind::Other);
args.push_back(&Dst);
const CGFunctionInfo &FI =
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGException.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGException.cpp
index 9f65e9eb120c..5a9d06da12de 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGException.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGException.cpp
@@ -127,6 +127,8 @@ const EHPersonality
EHPersonality::GNU_Wasm_CPlusPlus = { "__gxx_wasm_personality_v0", nullptr };
const EHPersonality EHPersonality::XL_CPlusPlus = {"__xlcxx_personality_v1",
nullptr};
+const EHPersonality EHPersonality::ZOS_CPlusPlus = {"__zos_cxx_personality_v2",
+ nullptr};
static const EHPersonality &getCPersonality(const TargetInfo &Target,
const LangOptions &L) {
@@ -156,9 +158,11 @@ static const EHPersonality &getObjCPersonality(const TargetInfo &Target,
case ObjCRuntime::WatchOS:
return EHPersonality::NeXT_ObjC;
case ObjCRuntime::GNUstep:
- if (L.ObjCRuntime.getVersion() >= VersionTuple(1, 7))
+ if (T.isOSCygMing())
+ return EHPersonality::GNU_CPlusPlus_SEH;
+ else if (L.ObjCRuntime.getVersion() >= VersionTuple(1, 7))
return EHPersonality::GNUstep_ObjC;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ObjCRuntime::GCC:
case ObjCRuntime::ObjFW:
if (L.hasSjLjExceptions())
@@ -185,6 +189,8 @@ static const EHPersonality &getCXXPersonality(const TargetInfo &Target,
return EHPersonality::GNU_CPlusPlus_SEH;
if (L.hasWasmExceptions())
return EHPersonality::GNU_Wasm_CPlusPlus;
+ if (T.isOSzOS())
+ return EHPersonality::ZOS_CPlusPlus;
return EHPersonality::GNU_CPlusPlus;
}
@@ -210,7 +216,8 @@ static const EHPersonality &getObjCXXPersonality(const TargetInfo &Target,
return getObjCPersonality(Target, L);
case ObjCRuntime::GNUstep:
- return EHPersonality::GNU_ObjCXX;
+ return Target.getTriple().isOSCygMing() ? EHPersonality::GNU_CPlusPlus_SEH
+ : EHPersonality::GNU_ObjCXX;
// The GCC runtime's personality function inherently doesn't support
// mixed EH. Use the ObjC personality just to avoid returning null.
@@ -249,7 +256,7 @@ const EHPersonality &EHPersonality::get(CodeGenFunction &CGF) {
// For outlined finallys and filters, use the SEH personality in case they
// contain more SEH. This mostly only affects finallys. Filters could
// hypothetically use gnu statement expressions to sneak in nested SEH.
- FD = FD ? FD : CGF.CurSEHParent;
+ FD = FD ? FD : CGF.CurSEHParent.getDecl();
return get(CGF.CGM, dyn_cast_or_null<FunctionDecl>(FD));
}
@@ -263,12 +270,7 @@ static llvm::FunctionCallee getPersonalityFn(CodeGenModule &CGM,
static llvm::Constant *getOpaquePersonalityFn(CodeGenModule &CGM,
const EHPersonality &Personality) {
llvm::FunctionCallee Fn = getPersonalityFn(CGM, Personality);
- llvm::PointerType* Int8PtrTy = llvm::PointerType::get(
- llvm::Type::getInt8Ty(CGM.getLLVMContext()),
- CGM.getDataLayout().getProgramAddressSpace());
-
- return llvm::ConstantExpr::getBitCast(cast<llvm::Constant>(Fn.getCallee()),
- Int8PtrTy);
+ return cast<llvm::Constant>(Fn.getCallee());
}
/// Check whether a landingpad instruction only uses C++ features.
@@ -282,7 +284,7 @@ static bool LandingPadHasOnlyCXXUses(llvm::LandingPadInst *LPI) {
if (llvm::GlobalVariable *GV = dyn_cast<llvm::GlobalVariable>(Val))
// ObjC EH selector entries are always global variables with
// names starting like this.
- if (GV->getName().startswith("OBJC_EHTYPE"))
+ if (GV->getName().starts_with("OBJC_EHTYPE"))
return false;
} else {
// Check if any of the filter values have the ObjC prefix.
@@ -293,7 +295,7 @@ static bool LandingPadHasOnlyCXXUses(llvm::LandingPadInst *LPI) {
cast<llvm::GlobalVariable>((*II)->stripPointerCasts()))
// ObjC EH selector entries are always global variables with
// names starting like this.
- if (GV->getName().startswith("OBJC_EHTYPE"))
+ if (GV->getName().starts_with("OBJC_EHTYPE"))
return false;
}
}
@@ -400,8 +402,8 @@ void CodeGenFunction::EmitAnyExprToExn(const Expr *e, Address addr) {
// __cxa_allocate_exception returns a void*; we need to cast this
// to the appropriate type for the object.
- llvm::Type *ty = ConvertTypeForMem(e->getType())->getPointerTo();
- Address typedAddr = Builder.CreateBitCast(addr, ty);
+ llvm::Type *ty = ConvertTypeForMem(e->getType());
+ Address typedAddr = addr.withElementType(ty);
// FIXME: this isn't quite right! If there's a final unelided call
// to a copy constructor, then according to [except.terminate]p1 we
@@ -421,13 +423,13 @@ void CodeGenFunction::EmitAnyExprToExn(const Expr *e, Address addr) {
Address CodeGenFunction::getExceptionSlot() {
if (!ExceptionSlot)
ExceptionSlot = CreateTempAlloca(Int8PtrTy, "exn.slot");
- return Address(ExceptionSlot, getPointerAlign());
+ return Address(ExceptionSlot, Int8PtrTy, getPointerAlign());
}
Address CodeGenFunction::getEHSelectorSlot() {
if (!EHSelectorSlot)
EHSelectorSlot = CreateTempAlloca(Int32Ty, "ehselector.slot");
- return Address(EHSelectorSlot, CharUnits::fromQuantity(4));
+ return Address(EHSelectorSlot, Int32Ty, CharUnits::fromQuantity(4));
}
llvm::Value *CodeGenFunction::getExceptionFromSlot() {
@@ -440,6 +442,15 @@ llvm::Value *CodeGenFunction::getSelectorFromSlot() {
void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E,
bool KeepInsertionPoint) {
+ // If the exception is being emitted in an OpenMP target region,
+ // and the target is a GPU, we do not support exception handling.
+ // Therefore, we emit a trap which will abort the program, and
+ // prompt a warning indicating that a trap will be emitted.
+ const llvm::Triple &T = Target.getTriple();
+ if (CGM.getLangOpts().OpenMPIsTargetDevice && (T.isNVPTX() || T.isAMDGCN())) {
+ EmitTrapCall(llvm::Intrinsic::trap);
+ return;
+ }
if (const Expr *SubExpr = E->getSubExpr()) {
QualType ThrowType = SubExpr->getType();
if (ThrowType->isObjCObjectPointerType()) {
@@ -477,11 +488,11 @@ void CodeGenFunction::EmitStartEHSpec(const Decl *D) {
return;
ExceptionSpecificationType EST = Proto->getExceptionSpecType();
- if (isNoexceptExceptionSpec(EST) && Proto->canThrow() == CT_Cannot) {
- // noexcept functions are simple terminate scopes.
- if (!getLangOpts().EHAsynch) // -EHa: HW exception still can occur
- EHStack.pushTerminate();
- } else if (EST == EST_Dynamic || EST == EST_DynamicNone) {
+ // In C++17 and later, 'throw()' aka EST_DynamicNone is treated the same way
+ // as noexcept. In earlier standards, it is handled in this block, along with
+ // 'throw(X...)'.
+ if (EST == EST_Dynamic ||
+ (EST == EST_DynamicNone && !getLangOpts().CPlusPlus17)) {
// TODO: Revisit exception specifications for the MS ABI. There is a way to
// encode these in an object file but MSVC doesn't do anything with it.
if (getTarget().getCXXABI().isMicrosoft())
@@ -521,6 +532,10 @@ void CodeGenFunction::EmitStartEHSpec(const Decl *D) {
/*ForEH=*/true);
Filter->setFilter(I, EHType);
}
+ } else if (Proto->canThrow() == CT_Cannot) {
+ // noexcept functions are simple terminate scopes.
+ if (!getLangOpts().EHAsynch) // -EHa: HW exception still can occur
+ EHStack.pushTerminate();
}
}
@@ -580,10 +595,8 @@ void CodeGenFunction::EmitEndEHSpec(const Decl *D) {
return;
ExceptionSpecificationType EST = Proto->getExceptionSpecType();
- if (isNoexceptExceptionSpec(EST) && Proto->canThrow() == CT_Cannot &&
- !EHStack.empty() /* possible empty when under async exceptions */) {
- EHStack.popTerminate();
- } else if (EST == EST_Dynamic || EST == EST_DynamicNone) {
+ if (EST == EST_Dynamic ||
+ (EST == EST_DynamicNone && !getLangOpts().CPlusPlus17)) {
// TODO: Revisit exception specifications for the MS ABI. There is a way to
// encode these in an object file but MSVC doesn't do anything with it.
if (getTarget().getCXXABI().isMicrosoft())
@@ -599,13 +612,24 @@ void CodeGenFunction::EmitEndEHSpec(const Decl *D) {
EHFilterScope &filterScope = cast<EHFilterScope>(*EHStack.begin());
emitFilterDispatchBlock(*this, filterScope);
EHStack.popFilter();
+ } else if (Proto->canThrow() == CT_Cannot &&
+ /* possible empty when under async exceptions */
+ !EHStack.empty()) {
+ EHStack.popTerminate();
}
}
void CodeGenFunction::EmitCXXTryStmt(const CXXTryStmt &S) {
- EnterCXXTryStmt(S);
+ const llvm::Triple &T = Target.getTriple();
+ // If we encounter a try statement on in an OpenMP target region offloaded to
+ // a GPU, we treat it as a basic block.
+ const bool IsTargetDevice =
+ (CGM.getLangOpts().OpenMPIsTargetDevice && (T.isNVPTX() || T.isAMDGCN()));
+ if (!IsTargetDevice)
+ EnterCXXTryStmt(S);
EmitStmt(S.getTryBlock());
- ExitCXXTryStmt(S);
+ if (!IsTargetDevice)
+ ExitCXXTryStmt(S);
}
void CodeGenFunction::EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
@@ -640,7 +664,7 @@ void CodeGenFunction::EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
// Under async exceptions, catch(...) need to catch HW exception too
// Mark scope with SehTryBegin as a SEH __try scope
if (getLangOpts().EHAsynch)
- EmitRuntimeCallOrInvoke(getSehTryBeginFn(CGM));
+ EmitSehTryScopeBegin();
}
}
}
@@ -1114,6 +1138,8 @@ static void emitCatchDispatchBlock(CodeGenFunction &CGF,
// Select the right handler.
llvm::Function *llvm_eh_typeid_for =
CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_typeid_for);
+ llvm::Type *argTy = llvm_eh_typeid_for->getArg(0)->getType();
+ LangAS globAS = CGF.CGM.GetGlobalVarAddressSpace(nullptr);
// Load the selector value.
llvm::Value *selector = CGF.getSelectorFromSlot();
@@ -1127,7 +1153,11 @@ static void emitCatchDispatchBlock(CodeGenFunction &CGF,
assert(handler.Type.Flags == 0 &&
"landingpads do not support catch handler flags");
assert(typeValue && "fell into catch-all case!");
- typeValue = CGF.Builder.CreateBitCast(typeValue, CGF.Int8PtrTy);
+ // With opaque ptrs, only the address space can be a mismatch.
+ if (typeValue->getType() != argTy)
+ typeValue =
+ CGF.getTargetHooks().performAddrSpaceCast(CGF, typeValue, globAS,
+ LangAS::Default, argTy);
// Figure out the next block.
bool nextIsEnd;
@@ -1217,8 +1247,7 @@ void CodeGenFunction::ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
// Wasm uses Windows-style EH instructions, but merges all catch clauses into
// one big catchpad. So we save the old funclet pad here before we traverse
// each catch handler.
- SaveAndRestore<llvm::Instruction *> RestoreCurrentFuncletPad(
- CurrentFuncletPad);
+ SaveAndRestore RestoreCurrentFuncletPad(CurrentFuncletPad);
llvm::BasicBlock *WasmCatchStartBlock = nullptr;
if (EHPersonality::get(*this).isWasmPersonality()) {
auto *CatchSwitch =
@@ -1251,8 +1280,7 @@ void CodeGenFunction::ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
RunCleanupsScope CatchScope(*this);
// Initialize the catch variable and set up the cleanups.
- SaveAndRestore<llvm::Instruction *> RestoreCurrentFuncletPad(
- CurrentFuncletPad);
+ SaveAndRestore RestoreCurrentFuncletPad(CurrentFuncletPad);
CGM.getCXXABI().emitBeginCatch(*this, C);
// Emit the PGO counter increment.
@@ -1576,8 +1604,7 @@ llvm::BasicBlock *CodeGenFunction::getTerminateFunclet() {
// Create the cleanuppad using the current parent pad as its token. Use 'none'
// if this is a top-level terminate scope, which is the common case.
- SaveAndRestore<llvm::Instruction *> RestoreCurrentFuncletPad(
- CurrentFuncletPad);
+ SaveAndRestore RestoreCurrentFuncletPad(CurrentFuncletPad);
llvm::Value *ParentPad = CurrentFuncletPad;
if (!ParentPad)
ParentPad = llvm::ConstantTokenNone::get(CGM.getLLVMContext());
@@ -1622,7 +1649,7 @@ llvm::BasicBlock *CodeGenFunction::getEHResumeBlock(bool isCleanup) {
llvm::Value *Sel = getSelectorFromSlot();
llvm::Type *LPadType = llvm::StructType::get(Exn->getType(), Sel->getType());
- llvm::Value *LPadVal = llvm::UndefValue::get(LPadType);
+ llvm::Value *LPadVal = llvm::PoisonValue::get(LPadType);
LPadVal = Builder.CreateInsertValue(LPadVal, Exn, 0, "lpad.val");
LPadVal = Builder.CreateInsertValue(LPadVal, Sel, 1, "lpad.val");
@@ -1813,13 +1840,11 @@ Address CodeGenFunction::recoverAddrOfEscapedLocal(CodeGenFunction &ParentCGF,
auto InsertPair = ParentCGF.EscapedLocals.insert(
std::make_pair(ParentAlloca, ParentCGF.EscapedLocals.size()));
int FrameEscapeIdx = InsertPair.first->second;
- // call i8* @llvm.localrecover(i8* bitcast(@parentFn), i8* %fp, i32 N)
+ // call ptr @llvm.localrecover(ptr @parentFn, ptr %fp, i32 N)
llvm::Function *FrameRecoverFn = llvm::Intrinsic::getDeclaration(
&CGM.getModule(), llvm::Intrinsic::localrecover);
- llvm::Constant *ParentI8Fn =
- llvm::ConstantExpr::getBitCast(ParentCGF.CurFn, Int8PtrTy);
RecoverCall = Builder.CreateCall(
- FrameRecoverFn, {ParentI8Fn, ParentFP,
+ FrameRecoverFn, {ParentCGF.CurFn, ParentFP,
llvm::ConstantInt::get(Int32Ty, FrameEscapeIdx)});
} else {
@@ -1839,7 +1864,7 @@ Address CodeGenFunction::recoverAddrOfEscapedLocal(CodeGenFunction &ParentCGF,
llvm::Value *ChildVar =
Builder.CreateBitCast(RecoverCall, ParentVar.getType());
ChildVar->setName(ParentVar.getName());
- return Address(ChildVar, ParentVar.getAlignment());
+ return ParentVar.withPointer(ChildVar, KnownNonNull);
}
void CodeGenFunction::EmitCapturedLocals(CodeGenFunction &ParentCGF,
@@ -1882,9 +1907,7 @@ void CodeGenFunction::EmitCapturedLocals(CodeGenFunction &ParentCGF,
// since finally funclets recover the parent FP for us.
llvm::Function *RecoverFPIntrin =
CGM.getIntrinsic(llvm::Intrinsic::eh_recoverfp);
- llvm::Constant *ParentI8Fn =
- llvm::ConstantExpr::getBitCast(ParentCGF.CurFn, Int8PtrTy);
- ParentFP = Builder.CreateCall(RecoverFPIntrin, {ParentI8Fn, EntryFP});
+ ParentFP = Builder.CreateCall(RecoverFPIntrin, {ParentCGF.CurFn, EntryFP});
// if the parent is a _finally, the passed-in ParentFP is the FP
// of parent _finally, not Establisher's FP (FP of outermost function).
@@ -1901,7 +1924,7 @@ void CodeGenFunction::EmitCapturedLocals(CodeGenFunction &ParentCGF,
const VarDecl *D = cast<VarDecl>(I.first);
if (isa<ImplicitParamDecl>(D) &&
D->getType() == getContext().VoidPtrTy) {
- assert(D->getName().startswith("frame_pointer"));
+ assert(D->getName().starts_with("frame_pointer"));
FramePtrAddrAlloca = cast<llvm::AllocaInst>(I.second.getPointer());
break;
}
@@ -1912,20 +1935,17 @@ void CodeGenFunction::EmitCapturedLocals(CodeGenFunction &ParentCGF,
int FrameEscapeIdx = InsertPair.first->second;
// an example of a filter's prolog::
- // %0 = call i8* @llvm.eh.recoverfp(bitcast(@"?fin$0@0@main@@"),..)
- // %1 = call i8* @llvm.localrecover(bitcast(@"?fin$0@0@main@@"),..)
- // %2 = bitcast i8* %1 to i8**
- // %3 = load i8*, i8* *%2, align 8
- // ==> %3 is the frame-pointer of outermost host function
+ // %0 = call ptr @llvm.eh.recoverfp(@"?fin$0@0@main@@",..)
+ // %1 = call ptr @llvm.localrecover(@"?fin$0@0@main@@",..)
+ // %2 = load ptr, ptr %1, align 8
+ // ==> %2 is the frame-pointer of outermost host function
llvm::Function *FrameRecoverFn = llvm::Intrinsic::getDeclaration(
&CGM.getModule(), llvm::Intrinsic::localrecover);
- llvm::Constant *ParentI8Fn =
- llvm::ConstantExpr::getBitCast(ParentCGF.CurFn, Int8PtrTy);
ParentFP = Builder.CreateCall(
- FrameRecoverFn, {ParentI8Fn, ParentFP,
+ FrameRecoverFn, {ParentCGF.CurFn, ParentFP,
llvm::ConstantInt::get(Int32Ty, FrameEscapeIdx)});
- ParentFP = Builder.CreateBitCast(ParentFP, CGM.VoidPtrPtrTy);
- ParentFP = Builder.CreateLoad(Address(ParentFP, getPointerAlign()));
+ ParentFP = Builder.CreateLoad(
+ Address(ParentFP, CGM.VoidPtrTy, getPointerAlign()));
}
}
@@ -1998,7 +2018,7 @@ void CodeGenFunction::startOutlinedSEHHelper(CodeGenFunction &ParentCGF,
SmallString<128> Name;
{
llvm::raw_svector_ostream OS(Name);
- const NamedDecl *ParentSEHFn = ParentCGF.CurSEHParent;
+ GlobalDecl ParentSEHFn = ParentCGF.CurSEHParent;
assert(ParentSEHFn && "No CurSEHParent!");
MangleContext &Mangler = CGM.getCXXABI().getMangleContext();
if (IsFilter)
@@ -2015,17 +2035,17 @@ void CodeGenFunction::startOutlinedSEHHelper(CodeGenFunction &ParentCGF,
Args.push_back(ImplicitParamDecl::Create(
getContext(), /*DC=*/nullptr, StartLoc,
&getContext().Idents.get("exception_pointers"),
- getContext().VoidPtrTy, ImplicitParamDecl::Other));
+ getContext().VoidPtrTy, ImplicitParamKind::Other));
} else {
Args.push_back(ImplicitParamDecl::Create(
getContext(), /*DC=*/nullptr, StartLoc,
&getContext().Idents.get("abnormal_termination"),
- getContext().UnsignedCharTy, ImplicitParamDecl::Other));
+ getContext().UnsignedCharTy, ImplicitParamKind::Other));
}
Args.push_back(ImplicitParamDecl::Create(
getContext(), /*DC=*/nullptr, StartLoc,
&getContext().Idents.get("frame_pointer"), getContext().VoidPtrTy,
- ImplicitParamDecl::Other));
+ ImplicitParamKind::Other));
}
QualType RetTy = IsFilter ? getContext().LongTy : getContext().VoidTy;
@@ -2097,7 +2117,6 @@ void CodeGenFunction::EmitSEHExceptionCodeSave(CodeGenFunction &ParentCGF,
// pointer is stored in the second field. So, GEP 20 bytes backwards and
// load the pointer.
SEHInfo = Builder.CreateConstInBoundsGEP1_32(Int8Ty, EntryFP, -20);
- SEHInfo = Builder.CreateBitCast(SEHInfo, Int8PtrTy->getPointerTo());
SEHInfo = Builder.CreateAlignedLoad(Int8PtrTy, SEHInfo, getPointerAlign());
SEHCodeSlotStack.push_back(recoverAddrOfEscapedLocal(
ParentCGF, ParentCGF.SEHCodeSlotStack.back(), ParentFP));
@@ -2110,10 +2129,9 @@ void CodeGenFunction::EmitSEHExceptionCodeSave(CodeGenFunction &ParentCGF,
// CONTEXT *ContextRecord;
// };
// int exceptioncode = exception_pointers->ExceptionRecord->ExceptionCode;
- llvm::Type *RecordTy = CGM.Int32Ty->getPointerTo();
+ llvm::Type *RecordTy = llvm::PointerType::getUnqual(getLLVMContext());
llvm::Type *PtrsTy = llvm::StructType::get(RecordTy, CGM.VoidPtrTy);
- llvm::Value *Ptrs = Builder.CreateBitCast(SEHInfo, PtrsTy->getPointerTo());
- llvm::Value *Rec = Builder.CreateStructGEP(PtrsTy, Ptrs, 0);
+ llvm::Value *Rec = Builder.CreateStructGEP(PtrsTy, SEHInfo, 0);
Rec = Builder.CreateAlignedLoad(RecordTy, Rec, getPointerAlign());
llvm::Value *Code = Builder.CreateAlignedLoad(Int32Ty, Rec, getIntAlign());
assert(!SEHCodeSlotStack.empty() && "emitting EH code outside of __except");
@@ -2182,9 +2200,7 @@ void CodeGenFunction::EnterSEHTryStmt(const SEHTryStmt &S) {
// in place of the RTTI typeinfo global that C++ EH uses.
llvm::Function *FilterFunc =
HelperCGF.GenerateSEHFilterFunction(*this, *Except);
- llvm::Constant *OpaqueFunc =
- llvm::ConstantExpr::getBitCast(FilterFunc, Int8PtrTy);
- CatchScope->setHandler(0, OpaqueFunc, createBasicBlock("__except.ret"));
+ CatchScope->setHandler(0, FilterFunc, createBasicBlock("__except.ret"));
}
void CodeGenFunction::ExitSEHTryStmt(const SEHTryStmt &S) {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGExpr.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGExpr.cpp
index bf514aab8851..f8f997909977 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGExpr.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGExpr.cpp
@@ -26,42 +26,43 @@
#include "clang/AST/Attr.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/NSAPI.h"
+#include "clang/AST/StmtVisitor.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/CodeGenOptions.h"
#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/Hashing.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/IntrinsicsWebAssembly.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/MDBuilder.h"
+#include "llvm/IR/MatrixBuilder.h"
+#include "llvm/Passes/OptimizationLevel.h"
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/SaveAndRestore.h"
+#include "llvm/Support/xxhash.h"
#include "llvm/Transforms/Utils/SanitizerStats.h"
+#include <optional>
#include <string>
using namespace clang;
using namespace CodeGen;
+// Experiment to make sanitizers easier to debug
+static llvm::cl::opt<bool> ClSanitizeDebugDeoptimization(
+ "ubsan-unique-traps", llvm::cl::Optional,
+ llvm::cl::desc("Deoptimize traps for UBSAN so there is 1 trap per check"),
+ llvm::cl::init(false));
+
//===--------------------------------------------------------------------===//
// Miscellaneous Helper Methods
//===--------------------------------------------------------------------===//
-llvm::Value *CodeGenFunction::EmitCastToVoidPtr(llvm::Value *value) {
- unsigned addressSpace =
- cast<llvm::PointerType>(value->getType())->getAddressSpace();
-
- llvm::PointerType *destType = Int8PtrTy;
- if (addressSpace)
- destType = llvm::Type::getInt8PtrTy(getLLVMContext(), addressSpace);
-
- if (value->getType() == destType) return value;
- return Builder.CreateBitCast(value, destType);
-}
-
/// CreateTempAlloca - This creates a alloca and inserts it into the entry
/// block.
Address CodeGenFunction::CreateTempAllocaWithoutCast(llvm::Type *Ty,
@@ -70,7 +71,7 @@ Address CodeGenFunction::CreateTempAllocaWithoutCast(llvm::Type *Ty,
llvm::Value *ArraySize) {
auto Alloca = CreateTempAlloca(Ty, Name, ArraySize);
Alloca->setAlignment(Align.getAsAlign());
- return Address(Alloca, Align);
+ return Address(Alloca, Ty, Align, KnownNonNull);
}
/// CreateTempAlloca - This creates a alloca and inserts it into the entry
@@ -94,13 +95,13 @@ Address CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, CharUnits Align,
// otherwise alloca is inserted at the current insertion point of the
// builder.
if (!ArraySize)
- Builder.SetInsertPoint(AllocaInsertPt);
+ Builder.SetInsertPoint(getPostAllocaInsertPoint());
V = getTargetHooks().performAddrSpaceCast(
*this, V, getASTAllocaAddressSpace(), LangAS::Default,
Ty->getPointerTo(DestAddrSpace), /*non-null*/ true);
}
- return Address(V, Align);
+ return Address(V, Ty, Align, KnownNonNull);
}
/// CreateTempAlloca - This creates an alloca and inserts it into the entry
@@ -122,23 +123,10 @@ llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty,
Address CodeGenFunction::CreateDefaultAlignTempAlloca(llvm::Type *Ty,
const Twine &Name) {
CharUnits Align =
- CharUnits::fromQuantity(CGM.getDataLayout().getABITypeAlignment(Ty));
+ CharUnits::fromQuantity(CGM.getDataLayout().getPrefTypeAlign(Ty));
return CreateTempAlloca(Ty, Align, Name);
}
-void CodeGenFunction::InitTempAlloca(Address Var, llvm::Value *Init) {
- auto *Alloca = Var.getPointer();
- assert(isa<llvm::AllocaInst>(Alloca) ||
- (isa<llvm::AddrSpaceCastInst>(Alloca) &&
- isa<llvm::AllocaInst>(
- cast<llvm::AddrSpaceCastInst>(Alloca)->getPointerOperand())));
-
- auto *Store = new llvm::StoreInst(Init, Alloca, /*volatile*/ false,
- Var.getAlignment().getAsAlign());
- llvm::BasicBlock *Block = AllocaInsertPt->getParent();
- Block->getInstList().insertAfter(AllocaInsertPt->getIterator(), Store);
-}
-
Address CodeGenFunction::CreateIRTemp(QualType Ty, const Twine &Name) {
CharUnits Align = getContext().getTypeAlignInChars(Ty);
return CreateTempAlloca(ConvertType(Ty), Align, Name);
@@ -156,13 +144,12 @@ Address CodeGenFunction::CreateMemTemp(QualType Ty, CharUnits Align,
/*ArraySize=*/nullptr, Alloca);
if (Ty->isConstantMatrixType()) {
- auto *ArrayTy = cast<llvm::ArrayType>(Result.getType()->getElementType());
+ auto *ArrayTy = cast<llvm::ArrayType>(Result.getElementType());
auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(),
ArrayTy->getNumElements());
- Result = Address(
- Builder.CreateBitCast(Result.getPointer(), VectorTy->getPointerTo()),
- Result.getAlignment());
+ Result = Address(Result.getPointer(), VectorTy, Result.getAlignment(),
+ KnownNonNull);
}
return Result;
}
@@ -201,7 +188,17 @@ llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) {
/// ignoring the result.
void CodeGenFunction::EmitIgnoredExpr(const Expr *E) {
if (E->isPRValue())
- return (void) EmitAnyExpr(E, AggValueSlot::ignored(), true);
+ return (void)EmitAnyExpr(E, AggValueSlot::ignored(), true);
+
+ // if this is a bitfield-resulting conditional operator, we can special case
+ // emit this. The normal 'EmitLValue' version of this is particularly
+ // difficult to codegen for, since creating a single "LValue" for two
+ // different sized arguments here is not particularly doable.
+ if (const auto *CondOp = dyn_cast<AbstractConditionalOperator>(
+ E->IgnoreParenNoopCasts(getContext()))) {
+ if (CondOp->getObjectKind() == OK_BitField)
+ return EmitIgnoredConditionalOperator(CondOp);
+ }
// Just emit it as an l-value and drop the result.
EmitLValue(E);
@@ -402,7 +399,7 @@ static Address createReferenceTemporary(CodeGenFunction &CGF,
QualType Ty = Inner->getType();
if (CGF.CGM.getCodeGenOpts().MergeAllConstants &&
(Ty->isArrayType() || Ty->isRecordType()) &&
- CGF.CGM.isTypeConstant(Ty, true))
+ Ty.isConstantStorage(CGF.getContext(), true, false))
if (auto Init = ConstantEmitter(CGF).tryEmitAbstract(Inner, Ty)) {
auto AS = CGF.CGM.GetGlobalConstantAddressSpace();
auto *GV = new llvm::GlobalVariable(
@@ -419,7 +416,7 @@ static Address createReferenceTemporary(CodeGenFunction &CGF,
GV->getValueType()->getPointerTo(
CGF.getContext().getTargetAddressSpace(LangAS::Default)));
// FIXME: Should we put the new global into a COMDAT?
- return Address(C, alignment);
+ return Address(C, GV->getValueType(), alignment);
}
return CGF.CreateMemTemp(Ty, "ref.tmp", Alloca);
}
@@ -435,7 +432,7 @@ static Address createReferenceTemporary(CodeGenFunction &CGF,
/// Helper method to check if the underlying ABI is AAPCS
static bool isAAPCS(const TargetInfo &TargetInfo) {
- return TargetInfo.getABI().startswith("aapcs");
+ return TargetInfo.getABI().starts_with("aapcs");
}
LValue CodeGenFunction::
@@ -453,10 +450,8 @@ EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) {
ownership != Qualifiers::OCL_ExplicitNone) {
Address Object = createReferenceTemporary(*this, M, E);
if (auto *Var = dyn_cast<llvm::GlobalVariable>(Object.getPointer())) {
- Object = Address(llvm::ConstantExpr::getBitCast(Var,
- ConvertTypeForMem(E->getType())
- ->getPointerTo(Object.getAddressSpace())),
- Object.getAlignment());
+ llvm::Type *Ty = ConvertTypeForMem(E->getType());
+ Object = Object.withElementType(Ty);
// createReferenceTemporary will promote the temporary to a global with a
// constant initializer if it can. It can only do this to a value of
@@ -511,10 +506,8 @@ EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) {
Address Object = createReferenceTemporary(*this, M, E, &Alloca);
if (auto *Var = dyn_cast<llvm::GlobalVariable>(
Object.getPointer()->stripPointerCasts())) {
- Object = Address(llvm::ConstantExpr::getBitCast(
- cast<llvm::Constant>(Object.getPointer()),
- ConvertTypeForMem(E->getType())->getPointerTo()),
- Object.getAlignment());
+ llvm::Type *TemporaryType = ConvertTypeForMem(E->getType());
+ Object = Object.withElementType(TemporaryType);
// If the temporary is a global and has a constant initializer or is a
// constant temporary that we promoted to a global, we may have already
// initialized it.
@@ -540,13 +533,17 @@ EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) {
// Avoid creating a conditional cleanup just to hold an llvm.lifetime.end
// marker. Instead, start the lifetime of a conditional temporary earlier
// so that it's unconditional. Don't do this with sanitizers which need
- // more precise lifetime marks.
+ // more precise lifetime marks. However when inside an "await.suspend"
+ // block, we should always avoid conditional cleanup because it creates
+ // boolean marker that lives across await_suspend, which can destroy coro
+ // frame.
ConditionalEvaluation *OldConditional = nullptr;
CGBuilderTy::InsertPoint OldIP;
if (isInConditionalBranch() && !E->getType().isDestructedType() &&
- !SanOpts.has(SanitizerKind::HWAddress) &&
- !SanOpts.has(SanitizerKind::Memory) &&
- !CGM.getCodeGenOpts().SanitizeAddressUseAfterScope) {
+ ((!SanOpts.has(SanitizerKind::HWAddress) &&
+ !SanOpts.has(SanitizerKind::Memory) &&
+ !CGM.getCodeGenOpts().SanitizeAddressUseAfterScope) ||
+ inSuspendBlock())) {
OldConditional = OutermostConditional;
OutermostConditional = nullptr;
@@ -580,8 +577,7 @@ EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) {
// Perform derived-to-base casts and/or field accesses, to get from the
// temporary object we created (and, potentially, for which we extended
// the lifetime) to the subobject we're binding the reference to.
- for (unsigned I = Adjustments.size(); I != 0; --I) {
- SubobjectAdjustment &Adjustment = Adjustments[I-1];
+ for (SubobjectAdjustment &Adjustment : llvm::reverse(Adjustments)) {
switch (Adjustment.Kind) {
case SubobjectAdjustment::DerivedToBaseAdjustment:
Object =
@@ -667,9 +663,9 @@ bool CodeGenFunction::isVptrCheckRequired(TypeCheckKind TCK, QualType Ty) {
}
bool CodeGenFunction::sanitizePerformTypeCheck() const {
- return SanOpts.has(SanitizerKind::Null) |
- SanOpts.has(SanitizerKind::Alignment) |
- SanOpts.has(SanitizerKind::ObjectSize) |
+ return SanOpts.has(SanitizerKind::Null) ||
+ SanOpts.has(SanitizerKind::Alignment) ||
+ SanOpts.has(SanitizerKind::ObjectSize) ||
SanOpts.has(SanitizerKind::Vptr);
}
@@ -751,30 +747,29 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
llvm::Value *Min = Builder.getFalse();
llvm::Value *NullIsUnknown = Builder.getFalse();
llvm::Value *Dynamic = Builder.getFalse();
- llvm::Value *CastAddr = Builder.CreateBitCast(Ptr, Int8PtrTy);
llvm::Value *LargeEnough = Builder.CreateICmpUGE(
- Builder.CreateCall(F, {CastAddr, Min, NullIsUnknown, Dynamic}), Size);
+ Builder.CreateCall(F, {Ptr, Min, NullIsUnknown, Dynamic}), Size);
Checks.push_back(std::make_pair(LargeEnough, SanitizerKind::ObjectSize));
}
}
- uint64_t AlignVal = 0;
+ llvm::MaybeAlign AlignVal;
llvm::Value *PtrAsInt = nullptr;
if (SanOpts.has(SanitizerKind::Alignment) &&
!SkippedChecks.has(SanitizerKind::Alignment)) {
- AlignVal = Alignment.getQuantity();
+ AlignVal = Alignment.getAsMaybeAlign();
if (!Ty->isIncompleteType() && !AlignVal)
AlignVal = CGM.getNaturalTypeAlignment(Ty, nullptr, nullptr,
/*ForPointeeType=*/true)
- .getQuantity();
+ .getAsMaybeAlign();
// The glvalue must be suitably aligned.
- if (AlignVal > 1 &&
- (!PtrToAlloca || PtrToAlloca->getAlignment() < AlignVal)) {
+ if (AlignVal && *AlignVal > llvm::Align(1) &&
+ (!PtrToAlloca || PtrToAlloca->getAlign() < *AlignVal)) {
PtrAsInt = Builder.CreatePtrToInt(Ptr, IntPtrTy);
llvm::Value *Align = Builder.CreateAnd(
- PtrAsInt, llvm::ConstantInt::get(IntPtrTy, AlignVal - 1));
+ PtrAsInt, llvm::ConstantInt::get(IntPtrTy, AlignVal->value() - 1));
llvm::Value *Aligned =
Builder.CreateICmpEQ(Align, llvm::ConstantInt::get(IntPtrTy, 0));
if (Aligned != True)
@@ -783,12 +778,9 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
}
if (Checks.size() > 0) {
- // Make sure we're not losing information. Alignment needs to be a power of
- // 2
- assert(!AlignVal || (uint64_t)1 << llvm::Log2_64(AlignVal) == AlignVal);
llvm::Constant *StaticData[] = {
EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(Ty),
- llvm::ConstantInt::get(Int8Ty, AlignVal ? llvm::Log2_64(AlignVal) : 1),
+ llvm::ConstantInt::get(Int8Ty, AlignVal ? llvm::Log2(*AlignVal) : 1),
llvm::ConstantInt::get(Int8Ty, TCK)};
EmitCheck(Checks, SanitizerHandler::TypeMismatch, StaticData,
PtrAsInt ? PtrAsInt : Ptr);
@@ -833,8 +825,7 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
// Load the vptr, and compute hash_16_bytes(TypeHash, vptr).
llvm::Value *Low = llvm::ConstantInt::get(Int64Ty, TypeHash);
- llvm::Type *VPtrTy = llvm::PointerType::get(IntPtrTy, 0);
- Address VPtrAddr(Builder.CreateBitCast(Ptr, VPtrTy), getPointerAlign());
+ Address VPtrAddr(Ptr, IntPtrTy, getPointerAlign());
llvm::Value *VPtrVal = Builder.CreateLoad(VPtrAddr);
llvm::Value *High = Builder.CreateZExt(VPtrVal, Int64Ty);
@@ -878,44 +869,6 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
}
}
-/// Determine whether this expression refers to a flexible array member in a
-/// struct. We disable array bounds checks for such members.
-static bool isFlexibleArrayMemberExpr(const Expr *E) {
- // For compatibility with existing code, we treat arrays of length 0 or
- // 1 as flexible array members.
- // FIXME: This is inconsistent with the warning code in SemaChecking. Unify
- // the two mechanisms.
- const ArrayType *AT = E->getType()->castAsArrayTypeUnsafe();
- if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) {
- // FIXME: Sema doesn't treat [1] as a flexible array member if the bound
- // was produced by macro expansion.
- if (CAT->getSize().ugt(1))
- return false;
- } else if (!isa<IncompleteArrayType>(AT))
- return false;
-
- E = E->IgnoreParens();
-
- // A flexible array member must be the last member in the class.
- if (const auto *ME = dyn_cast<MemberExpr>(E)) {
- // FIXME: If the base type of the member expr is not FD->getParent(),
- // this should not be treated as a flexible array member access.
- if (const auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl())) {
- // FIXME: Sema doesn't treat a T[1] union member as a flexible array
- // member, only a T[0] or T[] member gets that treatment.
- if (FD->getParent()->isUnion())
- return true;
- RecordDecl::field_iterator FI(
- DeclContext::decl_iterator(const_cast<FieldDecl *>(FD)));
- return ++FI == FD->getParent()->field_end();
- }
- } else if (const auto *IRE = dyn_cast<ObjCIvarRefExpr>(E)) {
- return IRE->getDecl()->getNextIvar() == nullptr;
- }
-
- return false;
-}
-
llvm::Value *CodeGenFunction::LoadPassedObjectSize(const Expr *E,
QualType EltTy) {
ASTContext &C = getContext();
@@ -957,8 +910,11 @@ llvm::Value *CodeGenFunction::LoadPassedObjectSize(const Expr *E,
/// If Base is known to point to the start of an array, return the length of
/// that array. Return 0 if the length cannot be determined.
-static llvm::Value *getArrayIndexingBound(
- CodeGenFunction &CGF, const Expr *Base, QualType &IndexedType) {
+static llvm::Value *getArrayIndexingBound(CodeGenFunction &CGF,
+ const Expr *Base,
+ QualType &IndexedType,
+ LangOptions::StrictFlexArraysLevelKind
+ StrictFlexArraysLevel) {
// For the vector indexing extension, the bound is the number of elements.
if (const VectorType *VT = Base->getType()->getAs<VectorType>()) {
IndexedType = Base->getType();
@@ -969,17 +925,23 @@ static llvm::Value *getArrayIndexingBound(
if (const auto *CE = dyn_cast<CastExpr>(Base)) {
if (CE->getCastKind() == CK_ArrayToPointerDecay &&
- !isFlexibleArrayMemberExpr(CE->getSubExpr())) {
+ !CE->getSubExpr()->isFlexibleArrayMemberLike(CGF.getContext(),
+ StrictFlexArraysLevel)) {
+ CodeGenFunction::SanitizerScope SanScope(&CGF);
+
IndexedType = CE->getSubExpr()->getType();
const ArrayType *AT = IndexedType->castAsArrayTypeUnsafe();
if (const auto *CAT = dyn_cast<ConstantArrayType>(AT))
return CGF.Builder.getInt(CAT->getSize());
- else if (const auto *VAT = dyn_cast<VariableArrayType>(AT))
+
+ if (const auto *VAT = dyn_cast<VariableArrayType>(AT))
return CGF.getVLASize(VAT).NumElts;
// Ignore pass_object_size here. It's not applicable on decayed pointers.
}
}
+ CodeGenFunction::SanitizerScope SanScope(&CGF);
+
QualType EltTy{Base->getType()->getPointeeOrArrayElementType(), 0};
if (llvm::Value *POS = CGF.LoadPassedObjectSize(Base, EltTy)) {
IndexedType = Base->getType();
@@ -989,18 +951,248 @@ static llvm::Value *getArrayIndexingBound(
return nullptr;
}
+namespace {
+
+/// \p StructAccessBase returns the base \p Expr of a field access. It returns
+/// either a \p DeclRefExpr, representing the base pointer to the struct, i.e.:
+///
+/// p in p-> a.b.c
+///
+/// or a \p MemberExpr, if the \p MemberExpr has the \p RecordDecl we're
+/// looking for:
+///
+/// struct s {
+/// struct s *ptr;
+/// int count;
+/// char array[] __attribute__((counted_by(count)));
+/// };
+///
+/// If we have an expression like \p p->ptr->array[index], we want the
+/// \p MemberExpr for \p p->ptr instead of \p p.
+class StructAccessBase
+ : public ConstStmtVisitor<StructAccessBase, const Expr *> {
+ const RecordDecl *ExpectedRD;
+
+ bool IsExpectedRecordDecl(const Expr *E) const {
+ QualType Ty = E->getType();
+ if (Ty->isPointerType())
+ Ty = Ty->getPointeeType();
+ return ExpectedRD == Ty->getAsRecordDecl();
+ }
+
+public:
+ StructAccessBase(const RecordDecl *ExpectedRD) : ExpectedRD(ExpectedRD) {}
+
+ //===--------------------------------------------------------------------===//
+ // Visitor Methods
+ //===--------------------------------------------------------------------===//
+
+ // NOTE: If we build C++ support for counted_by, then we'll have to handle
+ // horrors like this:
+ //
+ // struct S {
+ // int x, y;
+ // int blah[] __attribute__((counted_by(x)));
+ // } s;
+ //
+ // int foo(int index, int val) {
+ // int (S::*IHatePMDs)[] = &S::blah;
+ // (s.*IHatePMDs)[index] = val;
+ // }
+
+ const Expr *Visit(const Expr *E) {
+ return ConstStmtVisitor<StructAccessBase, const Expr *>::Visit(E);
+ }
+
+ const Expr *VisitStmt(const Stmt *S) { return nullptr; }
+
+ // These are the types we expect to return (in order of most to least
+ // likely):
+ //
+ // 1. DeclRefExpr - This is the expression for the base of the structure.
+ // It's exactly what we want to build an access to the \p counted_by
+ // field.
+ // 2. MemberExpr - This is the expression that has the same \p RecordDecl
+ // as the flexble array member's lexical enclosing \p RecordDecl. This
+ // allows us to catch things like: "p->p->array"
+ // 3. CompoundLiteralExpr - This is for people who create something
+ // heretical like (struct foo has a flexible array member):
+ //
+ // (struct foo){ 1, 2 }.blah[idx];
+ const Expr *VisitDeclRefExpr(const DeclRefExpr *E) {
+ return IsExpectedRecordDecl(E) ? E : nullptr;
+ }
+ const Expr *VisitMemberExpr(const MemberExpr *E) {
+ if (IsExpectedRecordDecl(E) && E->isArrow())
+ return E;
+ const Expr *Res = Visit(E->getBase());
+ return !Res && IsExpectedRecordDecl(E) ? E : Res;
+ }
+ const Expr *VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
+ return IsExpectedRecordDecl(E) ? E : nullptr;
+ }
+ const Expr *VisitCallExpr(const CallExpr *E) {
+ return IsExpectedRecordDecl(E) ? E : nullptr;
+ }
+
+ const Expr *VisitArraySubscriptExpr(const ArraySubscriptExpr *E) {
+ if (IsExpectedRecordDecl(E))
+ return E;
+ return Visit(E->getBase());
+ }
+ const Expr *VisitCastExpr(const CastExpr *E) {
+ return Visit(E->getSubExpr());
+ }
+ const Expr *VisitParenExpr(const ParenExpr *E) {
+ return Visit(E->getSubExpr());
+ }
+ const Expr *VisitUnaryAddrOf(const UnaryOperator *E) {
+ return Visit(E->getSubExpr());
+ }
+ const Expr *VisitUnaryDeref(const UnaryOperator *E) {
+ return Visit(E->getSubExpr());
+ }
+};
+
+} // end anonymous namespace
+
+using RecIndicesTy =
+ SmallVector<std::pair<const RecordDecl *, llvm::Value *>, 8>;
+
+static bool getGEPIndicesToField(CodeGenFunction &CGF, const RecordDecl *RD,
+ const FieldDecl *FD, RecIndicesTy &Indices) {
+ const CGRecordLayout &Layout = CGF.CGM.getTypes().getCGRecordLayout(RD);
+ int64_t FieldNo = -1;
+ for (const Decl *D : RD->decls()) {
+ if (const auto *Field = dyn_cast<FieldDecl>(D)) {
+ FieldNo = Layout.getLLVMFieldNo(Field);
+ if (FD == Field) {
+ Indices.emplace_back(std::make_pair(RD, CGF.Builder.getInt32(FieldNo)));
+ return true;
+ }
+ }
+
+ if (const auto *Record = dyn_cast<RecordDecl>(D)) {
+ ++FieldNo;
+ if (getGEPIndicesToField(CGF, Record, FD, Indices)) {
+ if (RD->isUnion())
+ FieldNo = 0;
+ Indices.emplace_back(std::make_pair(RD, CGF.Builder.getInt32(FieldNo)));
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+/// This method is typically called in contexts where we can't generate
+/// side-effects, like in __builtin_dynamic_object_size. When finding
+/// expressions, only choose those that have either already been emitted or can
+/// be loaded without side-effects.
+///
+/// - \p FAMDecl: the \p Decl for the flexible array member. It may not be
+/// within the top-level struct.
+/// - \p CountDecl: must be within the same non-anonymous struct as \p FAMDecl.
+llvm::Value *CodeGenFunction::EmitCountedByFieldExpr(
+ const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl) {
+ const RecordDecl *RD = CountDecl->getParent()->getOuterLexicalRecordContext();
+
+ // Find the base struct expr (i.e. p in p->a.b.c.d).
+ const Expr *StructBase = StructAccessBase(RD).Visit(Base);
+ if (!StructBase || StructBase->HasSideEffects(getContext()))
+ return nullptr;
+
+ llvm::Value *Res = nullptr;
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(StructBase)) {
+ Res = EmitDeclRefLValue(DRE).getPointer(*this);
+ Res = Builder.CreateAlignedLoad(ConvertType(DRE->getType()), Res,
+ getPointerAlign(), "dre.load");
+ } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(StructBase)) {
+ LValue LV = EmitMemberExpr(ME);
+ Address Addr = LV.getAddress(*this);
+ Res = Addr.getPointer();
+ } else if (StructBase->getType()->isPointerType()) {
+ LValueBaseInfo BaseInfo;
+ TBAAAccessInfo TBAAInfo;
+ Address Addr = EmitPointerWithAlignment(StructBase, &BaseInfo, &TBAAInfo);
+ Res = Addr.getPointer();
+ } else {
+ return nullptr;
+ }
+
+ llvm::Value *Zero = Builder.getInt32(0);
+ RecIndicesTy Indices;
+
+ getGEPIndicesToField(*this, RD, CountDecl, Indices);
+
+ for (auto I = Indices.rbegin(), E = Indices.rend(); I != E; ++I)
+ Res = Builder.CreateInBoundsGEP(
+ ConvertType(QualType(I->first->getTypeForDecl(), 0)), Res,
+ {Zero, I->second}, "..counted_by.gep");
+
+ return Builder.CreateAlignedLoad(ConvertType(CountDecl->getType()), Res,
+ getIntAlign(), "..counted_by.load");
+}
+
+const FieldDecl *CodeGenFunction::FindCountedByField(const FieldDecl *FD) {
+ if (!FD || !FD->hasAttr<CountedByAttr>())
+ return nullptr;
+
+ const auto *CBA = FD->getAttr<CountedByAttr>();
+ if (!CBA)
+ return nullptr;
+
+ auto GetNonAnonStructOrUnion =
+ [](const RecordDecl *RD) -> const RecordDecl * {
+ while (RD && RD->isAnonymousStructOrUnion()) {
+ const auto *R = dyn_cast<RecordDecl>(RD->getDeclContext());
+ if (!R)
+ return nullptr;
+ RD = R;
+ }
+ return RD;
+ };
+ const RecordDecl *EnclosingRD = GetNonAnonStructOrUnion(FD->getParent());
+ if (!EnclosingRD)
+ return nullptr;
+
+ DeclarationName DName(CBA->getCountedByField());
+ DeclContext::lookup_result Lookup = EnclosingRD->lookup(DName);
+
+ if (Lookup.empty())
+ return nullptr;
+
+ const NamedDecl *ND = Lookup.front();
+ if (const auto *IFD = dyn_cast<IndirectFieldDecl>(ND))
+ ND = IFD->getAnonField();
+
+ return dyn_cast<FieldDecl>(ND);
+}
+
void CodeGenFunction::EmitBoundsCheck(const Expr *E, const Expr *Base,
llvm::Value *Index, QualType IndexType,
bool Accessed) {
assert(SanOpts.has(SanitizerKind::ArrayBounds) &&
"should not be called unless adding bounds checks");
- SanitizerScope SanScope(this);
-
+ const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
+ getLangOpts().getStrictFlexArraysLevel();
QualType IndexedType;
- llvm::Value *Bound = getArrayIndexingBound(*this, Base, IndexedType);
+ llvm::Value *Bound =
+ getArrayIndexingBound(*this, Base, IndexedType, StrictFlexArraysLevel);
+
+ EmitBoundsCheckImpl(E, Bound, Index, IndexType, IndexedType, Accessed);
+}
+
+void CodeGenFunction::EmitBoundsCheckImpl(const Expr *E, llvm::Value *Bound,
+ llvm::Value *Index,
+ QualType IndexType,
+ QualType IndexedType, bool Accessed) {
if (!Bound)
return;
+ SanitizerScope SanScope(this);
+
bool IndexSigned = IndexType->isSignedIntegerOrEnumerationType();
llvm::Value *IndexVal = Builder.CreateIntCast(Index, SizeTy, IndexSigned);
llvm::Value *BoundVal = Builder.CreateIntCast(Bound, SizeTy, false);
@@ -1016,7 +1208,6 @@ void CodeGenFunction::EmitBoundsCheck(const Expr *E, const Expr *Base,
SanitizerHandler::OutOfBounds, StaticData, Index);
}
-
CodeGenFunction::ComplexPairTy CodeGenFunction::
EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
bool isInc, bool isPre) {
@@ -1067,11 +1258,10 @@ void CodeGenModule::EmitExplicitCastExprType(const ExplicitCastExpr *E,
// LValue Expression Emission
//===----------------------------------------------------------------------===//
-/// EmitPointerWithAlignment - Given an expression of pointer type, try to
-/// derive a more accurate bound on the alignment of the pointer.
-Address CodeGenFunction::EmitPointerWithAlignment(const Expr *E,
- LValueBaseInfo *BaseInfo,
- TBAAAccessInfo *TBAAInfo) {
+static Address EmitPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo,
+ TBAAAccessInfo *TBAAInfo,
+ KnownNonNull_t IsKnownNonNull,
+ CodeGenFunction &CGF) {
// We allow this with ObjC object pointers because of fragile ABIs.
assert(E->getType()->isPointerType() ||
E->getType()->isObjCObjectPointerType());
@@ -1080,7 +1270,7 @@ Address CodeGenFunction::EmitPointerWithAlignment(const Expr *E,
// Casts:
if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
if (const auto *ECE = dyn_cast<ExplicitCastExpr>(CE))
- CGM.EmitExplicitCastExprType(ECE, this);
+ CGF.CGM.EmitExplicitCastExprType(ECE, &CGF);
switch (CE->getCastKind()) {
// Non-converting casts (but not C's implicit conversion from void*).
@@ -1093,47 +1283,51 @@ Address CodeGenFunction::EmitPointerWithAlignment(const Expr *E,
LValueBaseInfo InnerBaseInfo;
TBAAAccessInfo InnerTBAAInfo;
- Address Addr = EmitPointerWithAlignment(CE->getSubExpr(),
- &InnerBaseInfo,
- &InnerTBAAInfo);
+ Address Addr = CGF.EmitPointerWithAlignment(
+ CE->getSubExpr(), &InnerBaseInfo, &InnerTBAAInfo, IsKnownNonNull);
if (BaseInfo) *BaseInfo = InnerBaseInfo;
if (TBAAInfo) *TBAAInfo = InnerTBAAInfo;
if (isa<ExplicitCastExpr>(CE)) {
LValueBaseInfo TargetTypeBaseInfo;
TBAAAccessInfo TargetTypeTBAAInfo;
- CharUnits Align = CGM.getNaturalPointeeTypeAlignment(
+ CharUnits Align = CGF.CGM.getNaturalPointeeTypeAlignment(
E->getType(), &TargetTypeBaseInfo, &TargetTypeTBAAInfo);
if (TBAAInfo)
- *TBAAInfo = CGM.mergeTBAAInfoForCast(*TBAAInfo,
- TargetTypeTBAAInfo);
+ *TBAAInfo =
+ CGF.CGM.mergeTBAAInfoForCast(*TBAAInfo, TargetTypeTBAAInfo);
// If the source l-value is opaque, honor the alignment of the
// casted-to type.
if (InnerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) {
if (BaseInfo)
BaseInfo->mergeForCast(TargetTypeBaseInfo);
- Addr = Address(Addr.getPointer(), Align);
+ Addr = Address(Addr.getPointer(), Addr.getElementType(), Align,
+ IsKnownNonNull);
}
}
- if (SanOpts.has(SanitizerKind::CFIUnrelatedCast) &&
+ if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast) &&
CE->getCastKind() == CK_BitCast) {
if (auto PT = E->getType()->getAs<PointerType>())
- EmitVTablePtrCheckForCast(PT->getPointeeType(), Addr.getPointer(),
- /*MayBeNull=*/true,
- CodeGenFunction::CFITCK_UnrelatedCast,
- CE->getBeginLoc());
+ CGF.EmitVTablePtrCheckForCast(PT->getPointeeType(), Addr,
+ /*MayBeNull=*/true,
+ CodeGenFunction::CFITCK_UnrelatedCast,
+ CE->getBeginLoc());
}
- return CE->getCastKind() != CK_AddressSpaceConversion
- ? Builder.CreateBitCast(Addr, ConvertType(E->getType()))
- : Builder.CreateAddrSpaceCast(Addr,
- ConvertType(E->getType()));
+
+ llvm::Type *ElemTy =
+ CGF.ConvertTypeForMem(E->getType()->getPointeeType());
+ Addr = Addr.withElementType(ElemTy);
+ if (CE->getCastKind() == CK_AddressSpaceConversion)
+ Addr = CGF.Builder.CreateAddrSpaceCast(Addr,
+ CGF.ConvertType(E->getType()));
+ return Addr;
}
break;
// Array-to-pointer decay.
case CK_ArrayToPointerDecay:
- return EmitArrayToPointerDecay(CE->getSubExpr(), BaseInfo, TBAAInfo);
+ return CGF.EmitArrayToPointerDecay(CE->getSubExpr(), BaseInfo, TBAAInfo);
// Derived-to-base conversions.
case CK_UncheckedDerivedToBase:
@@ -1142,13 +1336,15 @@ Address CodeGenFunction::EmitPointerWithAlignment(const Expr *E,
// conservatively pretend that the complete object is of the base class
// type.
if (TBAAInfo)
- *TBAAInfo = CGM.getTBAAAccessInfo(E->getType());
- Address Addr = EmitPointerWithAlignment(CE->getSubExpr(), BaseInfo);
+ *TBAAInfo = CGF.CGM.getTBAAAccessInfo(E->getType());
+ Address Addr = CGF.EmitPointerWithAlignment(
+ CE->getSubExpr(), BaseInfo, nullptr,
+ (KnownNonNull_t)(IsKnownNonNull ||
+ CE->getCastKind() == CK_UncheckedDerivedToBase));
auto Derived = CE->getSubExpr()->getType()->getPointeeCXXRecordDecl();
- return GetAddressOfBaseClass(Addr, Derived,
- CE->path_begin(), CE->path_end(),
- ShouldNullCheckClassCastValue(CE),
- CE->getExprLoc());
+ return CGF.GetAddressOfBaseClass(
+ Addr, Derived, CE->path_begin(), CE->path_end(),
+ CGF.ShouldNullCheckClassCastValue(CE), CE->getExprLoc());
}
// TODO: Is there any reason to treat base-to-derived conversions
@@ -1161,10 +1357,26 @@ Address CodeGenFunction::EmitPointerWithAlignment(const Expr *E,
// Unary &.
if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
if (UO->getOpcode() == UO_AddrOf) {
- LValue LV = EmitLValue(UO->getSubExpr());
+ LValue LV = CGF.EmitLValue(UO->getSubExpr(), IsKnownNonNull);
+ if (BaseInfo) *BaseInfo = LV.getBaseInfo();
+ if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();
+ return LV.getAddress(CGF);
+ }
+ }
+
+ // std::addressof and variants.
+ if (auto *Call = dyn_cast<CallExpr>(E)) {
+ switch (Call->getBuiltinCallee()) {
+ default:
+ break;
+ case Builtin::BIaddressof:
+ case Builtin::BI__addressof:
+ case Builtin::BI__builtin_addressof: {
+ LValue LV = CGF.EmitLValue(Call->getArg(0), IsKnownNonNull);
if (BaseInfo) *BaseInfo = LV.getBaseInfo();
if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();
- return LV.getAddress(*this);
+ return LV.getAddress(CGF);
+ }
}
}
@@ -1172,8 +1384,21 @@ Address CodeGenFunction::EmitPointerWithAlignment(const Expr *E,
// Otherwise, use the alignment of the type.
CharUnits Align =
- CGM.getNaturalPointeeTypeAlignment(E->getType(), BaseInfo, TBAAInfo);
- return Address(EmitScalarExpr(E), Align);
+ CGF.CGM.getNaturalPointeeTypeAlignment(E->getType(), BaseInfo, TBAAInfo);
+ llvm::Type *ElemTy = CGF.ConvertTypeForMem(E->getType()->getPointeeType());
+ return Address(CGF.EmitScalarExpr(E), ElemTy, Align, IsKnownNonNull);
+}
+
+/// EmitPointerWithAlignment - Given an expression of pointer type, try to
+/// derive a more accurate bound on the alignment of the pointer.
+Address CodeGenFunction::EmitPointerWithAlignment(
+ const Expr *E, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo,
+ KnownNonNull_t IsKnownNonNull) {
+ Address Addr =
+ ::EmitPointerWithAlignment(E, BaseInfo, TBAAInfo, IsKnownNonNull, *this);
+ if (IsKnownNonNull && !Addr.isKnownNonNull())
+ Addr.setKnownNonNull();
+ return Addr;
}
llvm::Value *CodeGenFunction::EmitNonNullRValueCheck(RValue RV, QualType T) {
@@ -1218,9 +1443,10 @@ RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E,
LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E,
const char *Name) {
ErrorUnsupported(E, Name);
- llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType()));
- return MakeAddrLValue(Address(llvm::UndefValue::get(Ty), CharUnits::One()),
- E->getType());
+ llvm::Type *ElTy = ConvertType(E->getType());
+ llvm::Type *Ty = UnqualPtrTy;
+ return MakeAddrLValue(
+ Address(llvm::UndefValue::get(Ty), ElTy, CharUnits::One()), E->getType());
}
bool CodeGenFunction::IsWrappedCXXThis(const Expr *Obj) {
@@ -1282,7 +1508,24 @@ LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) {
/// type of the same size of the lvalue's type. If the lvalue has a variable
/// length type, this is not possible.
///
-LValue CodeGenFunction::EmitLValue(const Expr *E) {
+LValue CodeGenFunction::EmitLValue(const Expr *E,
+ KnownNonNull_t IsKnownNonNull) {
+ LValue LV = EmitLValueHelper(E, IsKnownNonNull);
+ if (IsKnownNonNull && !LV.isKnownNonNull())
+ LV.setKnownNonNull();
+ return LV;
+}
+
+static QualType getConstantExprReferredType(const FullExpr *E,
+ const ASTContext &Ctx) {
+ const Expr *SE = E->getSubExpr()->IgnoreImplicit();
+ if (isa<OpaqueValueExpr>(SE))
+ return SE->getType();
+ return cast<CallExpr>(SE)->getCallReturnType(Ctx)->getPointeeType();
+}
+
+LValue CodeGenFunction::EmitLValueHelper(const Expr *E,
+ KnownNonNull_t IsKnownNonNull) {
ApplyDebugLocation DL(*this, E);
switch (E->getStmtClass()) {
default: return EmitUnsupportedLValue(E, "l-value expression");
@@ -1310,7 +1553,8 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
case Expr::UserDefinedLiteralClass:
return EmitCallExprLValue(cast<CallExpr>(E));
case Expr::CXXRewrittenBinaryOperatorClass:
- return EmitLValue(cast<CXXRewrittenBinaryOperator>(E)->getSemanticForm());
+ return EmitLValue(cast<CXXRewrittenBinaryOperator>(E)->getSemanticForm(),
+ IsKnownNonNull);
case Expr::VAArgExprClass:
return EmitVAArgExprLValue(cast<VAArgExpr>(E));
case Expr::DeclRefExprClass:
@@ -1318,16 +1562,16 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
case Expr::ConstantExprClass: {
const ConstantExpr *CE = cast<ConstantExpr>(E);
if (llvm::Value *Result = ConstantEmitter(*this).tryEmitConstantExpr(CE)) {
- QualType RetType = cast<CallExpr>(CE->getSubExpr()->IgnoreImplicit())
- ->getCallReturnType(getContext());
+ QualType RetType = getConstantExprReferredType(CE, getContext());
return MakeNaturalAlignAddrLValue(Result, RetType);
}
- return EmitLValue(cast<ConstantExpr>(E)->getSubExpr());
+ return EmitLValue(cast<ConstantExpr>(E)->getSubExpr(), IsKnownNonNull);
}
case Expr::ParenExprClass:
- return EmitLValue(cast<ParenExpr>(E)->getSubExpr());
+ return EmitLValue(cast<ParenExpr>(E)->getSubExpr(), IsKnownNonNull);
case Expr::GenericSelectionExprClass:
- return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr());
+ return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr(),
+ IsKnownNonNull);
case Expr::PredefinedExprClass:
return EmitPredefinedLValue(cast<PredefinedExpr>(E));
case Expr::StringLiteralClass:
@@ -1351,14 +1595,16 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
case Expr::ExprWithCleanupsClass: {
const auto *cleanups = cast<ExprWithCleanups>(E);
RunCleanupsScope Scope(*this);
- LValue LV = EmitLValue(cleanups->getSubExpr());
+ LValue LV = EmitLValue(cleanups->getSubExpr(), IsKnownNonNull);
if (LV.isSimple()) {
// Defend against branches out of gnu statement expressions surrounded by
// cleanups.
- llvm::Value *V = LV.getPointer(*this);
+ Address Addr = LV.getAddress(*this);
+ llvm::Value *V = Addr.getPointer();
Scope.ForceCleanup({&V});
- return LValue::MakeAddr(Address(V, LV.getAlignment()), LV.getType(),
- getContext(), LV.getBaseInfo(), LV.getTBAAInfo());
+ return LValue::MakeAddr(Addr.withPointer(V, Addr.isKnownNonNull()),
+ LV.getType(), getContext(), LV.getBaseInfo(),
+ LV.getTBAAInfo());
}
// FIXME: Is it possible to create an ExprWithCleanups that produces a
// bitfield lvalue or some other non-simple lvalue?
@@ -1368,12 +1614,12 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
case Expr::CXXDefaultArgExprClass: {
auto *DAE = cast<CXXDefaultArgExpr>(E);
CXXDefaultArgExprScope Scope(*this, DAE);
- return EmitLValue(DAE->getExpr());
+ return EmitLValue(DAE->getExpr(), IsKnownNonNull);
}
case Expr::CXXDefaultInitExprClass: {
auto *DIE = cast<CXXDefaultInitExpr>(E);
CXXDefaultInitExprScope Scope(*this, DIE);
- return EmitLValue(DIE->getExpr());
+ return EmitLValue(DIE->getExpr(), IsKnownNonNull);
}
case Expr::CXXTypeidExprClass:
return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E));
@@ -1394,6 +1640,8 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
return EmitOMPArraySectionExpr(cast<OMPArraySectionExpr>(E));
case Expr::ExtVectorElementExprClass:
return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E));
+ case Expr::CXXThisExprClass:
+ return MakeAddrLValue(LoadCXXThisAddress(), E->getType());
case Expr::MemberExprClass:
return EmitMemberExpr(cast<MemberExpr>(E));
case Expr::CompoundLiteralExprClass:
@@ -1403,11 +1651,12 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
case Expr::BinaryConditionalOperatorClass:
return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E));
case Expr::ChooseExprClass:
- return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr());
+ return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(), IsKnownNonNull);
case Expr::OpaqueValueExprClass:
return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E));
case Expr::SubstNonTypeTemplateParmExprClass:
- return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement());
+ return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement(),
+ IsKnownNonNull);
case Expr::ImplicitCastExprClass:
case Expr::CStyleCastExprClass:
case Expr::CXXFunctionalCastExprClass:
@@ -1629,21 +1878,7 @@ static bool getRangeForType(CodeGenFunction &CGF, QualType Ty,
End = llvm::APInt(CGF.getContext().getTypeSize(Ty), 2);
} else {
const EnumDecl *ED = ET->getDecl();
- llvm::Type *LTy = CGF.ConvertTypeForMem(ED->getIntegerType());
- unsigned Bitwidth = LTy->getScalarSizeInBits();
- unsigned NumNegativeBits = ED->getNumNegativeBits();
- unsigned NumPositiveBits = ED->getNumPositiveBits();
-
- if (NumNegativeBits) {
- unsigned NumBits = std::max(NumNegativeBits, NumPositiveBits + 1);
- assert(NumBits <= Bitwidth);
- End = llvm::APInt(Bitwidth, 1) << (NumBits - 1);
- Min = -End;
- } else {
- assert(NumPositiveBits <= Bitwidth);
- End = llvm::APInt(Bitwidth, 1) << NumPositiveBits;
- Min = llvm::APInt(Bitwidth, 0);
- }
+ ED->getValueRange(End, Min);
}
return true;
}
@@ -1711,27 +1946,46 @@ llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile,
LValueBaseInfo BaseInfo,
TBAAAccessInfo TBAAInfo,
bool isNontemporal) {
- if (!CGM.getCodeGenOpts().PreserveVec3Type) {
- // For better performance, handle vector loads differently.
- if (Ty->isVectorType()) {
- const llvm::Type *EltTy = Addr.getElementType();
-
- const auto *VTy = cast<llvm::FixedVectorType>(EltTy);
-
- // Handle vectors of size 3 like size 4 for better performance.
- if (VTy->getNumElements() == 3) {
-
- // Bitcast to vec4 type.
- auto *vec4Ty = llvm::FixedVectorType::get(VTy->getElementType(), 4);
- Address Cast = Builder.CreateElementBitCast(Addr, vec4Ty, "castToVec4");
- // Now load value.
- llvm::Value *V = Builder.CreateLoad(Cast, Volatile, "loadVec4");
-
- // Shuffle vector to get vec3.
- V = Builder.CreateShuffleVector(V, ArrayRef<int>{0, 1, 2},
- "extractVec");
- return EmitFromMemory(V, Ty);
- }
+ if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr.getPointer()))
+ if (GV->isThreadLocal())
+ Addr = Addr.withPointer(Builder.CreateThreadLocalAddress(GV),
+ NotKnownNonNull);
+
+ if (const auto *ClangVecTy = Ty->getAs<VectorType>()) {
+ // Boolean vectors use `iN` as storage type.
+ if (ClangVecTy->isExtVectorBoolType()) {
+ llvm::Type *ValTy = ConvertType(Ty);
+ unsigned ValNumElems =
+ cast<llvm::FixedVectorType>(ValTy)->getNumElements();
+ // Load the `iP` storage object (P is the padded vector size).
+ auto *RawIntV = Builder.CreateLoad(Addr, Volatile, "load_bits");
+ const auto *RawIntTy = RawIntV->getType();
+ assert(RawIntTy->isIntegerTy() && "compressed iN storage for bitvectors");
+ // Bitcast iP --> <P x i1>.
+ auto *PaddedVecTy = llvm::FixedVectorType::get(
+ Builder.getInt1Ty(), RawIntTy->getPrimitiveSizeInBits());
+ llvm::Value *V = Builder.CreateBitCast(RawIntV, PaddedVecTy);
+ // Shuffle <P x i1> --> <N x i1> (N is the actual bit size).
+ V = emitBoolVecConversion(V, ValNumElems, "extractvec");
+
+ return EmitFromMemory(V, Ty);
+ }
+
+ // Handle vectors of size 3 like size 4 for better performance.
+ const llvm::Type *EltTy = Addr.getElementType();
+ const auto *VTy = cast<llvm::FixedVectorType>(EltTy);
+
+ if (!CGM.getCodeGenOpts().PreserveVec3Type && VTy->getNumElements() == 3) {
+
+ llvm::VectorType *vec4Ty =
+ llvm::FixedVectorType::get(VTy->getElementType(), 4);
+ Address Cast = Addr.withElementType(vec4Ty);
+ // Now load value.
+ llvm::Value *V = Builder.CreateLoad(Cast, Volatile, "loadVec4");
+
+ // Shuffle vector to get vec3.
+ V = Builder.CreateShuffleVector(V, ArrayRef<int>{0, 1, 2}, "extractVec");
+ return EmitFromMemory(V, Ty);
}
}
@@ -1746,7 +2000,7 @@ llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile,
if (isNontemporal) {
llvm::MDNode *Node = llvm::MDNode::get(
Load->getContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
- Load->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
+ Load->setMetadata(llvm::LLVMContext::MD_nontemporal, Node);
}
CGM.DecorateInstructionWithTBAA(Load, TBAAInfo);
@@ -1755,8 +2009,11 @@ llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile,
// In order to prevent the optimizer from throwing away the check, don't
// attach range metadata to the load.
} else if (CGM.getCodeGenOpts().OptimizationLevel > 0)
- if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty))
+ if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty)) {
Load->setMetadata(llvm::LLVMContext::MD_range, RangeInfo);
+ Load->setMetadata(llvm::LLVMContext::MD_noundef,
+ llvm::MDNode::get(getLLVMContext(), std::nullopt));
+ }
return EmitFromMemory(Load, Ty);
}
@@ -1782,6 +2039,17 @@ llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
"wrong value rep of bool");
return Builder.CreateTrunc(Value, Builder.getInt1Ty(), "tobool");
}
+ if (Ty->isExtVectorBoolType()) {
+ const auto *RawIntTy = Value->getType();
+ // Bitcast iP --> <P x i1>.
+ auto *PaddedVecTy = llvm::FixedVectorType::get(
+ Builder.getInt1Ty(), RawIntTy->getPrimitiveSizeInBits());
+ auto *V = Builder.CreateBitCast(Value, PaddedVecTy);
+ // Shuffle <P x i1> --> <N x i1> (N is the actual bit size).
+ llvm::Type *ValTy = ConvertType(Ty);
+ unsigned ValNumElems = cast<llvm::FixedVectorType>(ValTy)->getNumElements();
+ return emitBoolVecConversion(V, ValNumElems, "extractvec");
+ }
return Value;
}
@@ -1790,22 +2058,20 @@ llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
// MatrixType), if it points to a array (the memory type of MatrixType).
static Address MaybeConvertMatrixAddress(Address Addr, CodeGenFunction &CGF,
bool IsVector = true) {
- auto *ArrayTy = dyn_cast<llvm::ArrayType>(
- cast<llvm::PointerType>(Addr.getPointer()->getType())->getElementType());
+ auto *ArrayTy = dyn_cast<llvm::ArrayType>(Addr.getElementType());
if (ArrayTy && IsVector) {
auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(),
ArrayTy->getNumElements());
- return Address(CGF.Builder.CreateElementBitCast(Addr, VectorTy));
+ return Addr.withElementType(VectorTy);
}
- auto *VectorTy = dyn_cast<llvm::VectorType>(
- cast<llvm::PointerType>(Addr.getPointer()->getType())->getElementType());
+ auto *VectorTy = dyn_cast<llvm::VectorType>(Addr.getElementType());
if (VectorTy && !IsVector) {
auto *ArrayTy = llvm::ArrayType::get(
VectorTy->getElementType(),
cast<llvm::FixedVectorType>(VectorTy)->getNumElements());
- return Address(CGF.Builder.CreateElementBitCast(Addr, ArrayTy));
+ return Addr.withElementType(ArrayTy);
}
return Addr;
@@ -1828,11 +2094,23 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr,
LValueBaseInfo BaseInfo,
TBAAAccessInfo TBAAInfo,
bool isInit, bool isNontemporal) {
- if (!CGM.getCodeGenOpts().PreserveVec3Type) {
- // Handle vectors differently to get better performance.
- if (Ty->isVectorType()) {
- llvm::Type *SrcTy = Value->getType();
- auto *VecTy = dyn_cast<llvm::VectorType>(SrcTy);
+ if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr.getPointer()))
+ if (GV->isThreadLocal())
+ Addr = Addr.withPointer(Builder.CreateThreadLocalAddress(GV),
+ NotKnownNonNull);
+
+ llvm::Type *SrcTy = Value->getType();
+ if (const auto *ClangVecTy = Ty->getAs<VectorType>()) {
+ auto *VecTy = dyn_cast<llvm::FixedVectorType>(SrcTy);
+ if (VecTy && ClangVecTy->isExtVectorBoolType()) {
+ auto *MemIntTy = cast<llvm::IntegerType>(Addr.getElementType());
+ // Expand to the memory bit width.
+ unsigned MemNumElems = MemIntTy->getPrimitiveSizeInBits();
+ // <N x i1> --> <P x i1>.
+ Value = emitBoolVecConversion(Value, MemNumElems, "insertvec");
+ // <P x i1> --> iP.
+ Value = Builder.CreateBitCast(Value, MemIntTy);
+ } else if (!CGM.getCodeGenOpts().PreserveVec3Type) {
// Handle vec3 special.
if (VecTy && cast<llvm::FixedVectorType>(VecTy)->getNumElements() == 3) {
// Our source is a vec3, do a shuffle vector to make it a vec4.
@@ -1841,7 +2119,7 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr,
SrcTy = llvm::FixedVectorType::get(VecTy->getElementType(), 4);
}
if (Addr.getElementType() != SrcTy) {
- Addr = Builder.CreateElementBitCast(Addr, SrcTy, "storetmp");
+ Addr = Addr.withElementType(SrcTy);
}
}
}
@@ -1861,7 +2139,7 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr,
llvm::MDNode *Node =
llvm::MDNode::get(Store->getContext(),
llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
- Store->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
+ Store->setMetadata(llvm::LLVMContext::MD_nontemporal, Node);
}
CGM.DecorateInstructionWithTBAA(Store, TBAAInfo);
@@ -1939,10 +2217,15 @@ RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
return EmitLoadOfGlobalRegLValue(LV);
if (LV.isMatrixElt()) {
+ llvm::Value *Idx = LV.getMatrixIdx();
+ if (CGM.getCodeGenOpts().OptimizationLevel > 0) {
+ const auto *const MatTy = LV.getType()->castAs<ConstantMatrixType>();
+ llvm::MatrixBuilder MB(Builder);
+ MB.CreateIndexAssumption(Idx, MatTy->getNumElementsFlattened());
+ }
llvm::LoadInst *Load =
Builder.CreateLoad(LV.getMatrixAddress(), LV.isVolatileQualified());
- return RValue::get(
- Builder.CreateExtractElement(Load, LV.getMatrixIdx(), "matrixext"));
+ return RValue::get(Builder.CreateExtractElement(Load, Idx, "matrixext"));
}
assert(LV.isBitField() && "Unknown LValue type!");
@@ -1990,6 +2273,14 @@ RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) {
llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddress(),
LV.isVolatileQualified());
+ // HLSL allows treating scalars as one-element vectors. Converting the scalar
+ // IR value to a vector here allows the rest of codegen to behave as normal.
+ if (getLangOpts().HLSL && !Vec->getType()->isVectorTy()) {
+ llvm::Type *DstTy = llvm::FixedVectorType::get(Vec->getType(), 1);
+ llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty);
+ Vec = Builder.CreateInsertElement(DstTy, Vec, Zero, "cast.splat");
+ }
+
const llvm::Constant *Elts = LV.getExtVectorElts();
// If the result of the expression is a non-vector type, we must be extracting
@@ -2018,9 +2309,7 @@ Address CodeGenFunction::EmitExtVectorElementLValue(LValue LV) {
QualType EQT = LV.getType()->castAs<VectorType>()->getElementType();
llvm::Type *VectorElementTy = CGM.getTypes().ConvertType(EQT);
- Address CastToPointerElement =
- Builder.CreateElementBitCast(VectorAddress, VectorElementTy,
- "conv.ptr.element");
+ Address CastToPointerElement = VectorAddress.withElementType(VectorElementTy);
const llvm::Constant *Elts = LV.getExtVectorElts();
unsigned ix = getAccessedFieldNo(0, Elts);
@@ -2064,8 +2353,19 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
// Read/modify/write the vector, inserting the new element.
llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddress(),
Dst.isVolatileQualified());
+ auto *IRStoreTy = dyn_cast<llvm::IntegerType>(Vec->getType());
+ if (IRStoreTy) {
+ auto *IRVecTy = llvm::FixedVectorType::get(
+ Builder.getInt1Ty(), IRStoreTy->getPrimitiveSizeInBits());
+ Vec = Builder.CreateBitCast(Vec, IRVecTy);
+ // iN --> <N x i1>.
+ }
Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(),
Dst.getVectorIdx(), "vecins");
+ if (IRStoreTy) {
+ // <N x i1> --> <iN>.
+ Vec = Builder.CreateBitCast(Vec, IRStoreTy);
+ }
Builder.CreateStore(Vec, Dst.getVectorAddress(),
Dst.isVolatileQualified());
return;
@@ -2080,9 +2380,15 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
return EmitStoreThroughGlobalRegLValue(Src, Dst);
if (Dst.isMatrixElt()) {
- llvm::Value *Vec = Builder.CreateLoad(Dst.getMatrixAddress());
- Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(),
- Dst.getMatrixIdx(), "matins");
+ llvm::Value *Idx = Dst.getMatrixIdx();
+ if (CGM.getCodeGenOpts().OptimizationLevel > 0) {
+ const auto *const MatTy = Dst.getType()->castAs<ConstantMatrixType>();
+ llvm::MatrixBuilder MB(Builder);
+ MB.CreateIndexAssumption(Idx, MatTy->getNumElementsFlattened());
+ }
+ llvm::Instruction *Load = Builder.CreateLoad(Dst.getMatrixAddress());
+ llvm::Value *Vec =
+ Builder.CreateInsertElement(Load, Src.getScalarVal(), Idx, "matins");
Builder.CreateStore(Vec, Dst.getMatrixAddress(),
Dst.isVolatileQualified());
return;
@@ -2244,10 +2550,20 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
LValue Dst) {
+ // HLSL allows storing to scalar values through ExtVector component LValues.
+ // To support this we need to handle the case where the destination address is
+ // a scalar.
+ Address DstAddr = Dst.getExtVectorAddress();
+ if (!DstAddr.getElementType()->isVectorTy()) {
+ assert(!Dst.getType()->isVectorType() &&
+ "this should only occur for non-vector l-values");
+ Builder.CreateStore(Src.getScalarVal(), DstAddr, Dst.isVolatileQualified());
+ return;
+ }
+
// This access turns into a read/modify/write of the vector. Load the input
// value now.
- llvm::Value *Vec = Builder.CreateLoad(Dst.getExtVectorAddress(),
- Dst.isVolatileQualified());
+ llvm::Value *Vec = Builder.CreateLoad(DstAddr, Dst.isVolatileQualified());
const llvm::Constant *Elts = Dst.getExtVectorElts();
llvm::Value *SrcVal = Src.getScalarVal();
@@ -2295,7 +2611,8 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
llvm_unreachable("unexpected shorten vector length");
}
} else {
- // If the Src is a scalar (not a vector) it must be updating one element.
+ // If the Src is a scalar (not a vector), and the target is a vector it must
+ // be updating one element.
unsigned InIdx = getAccessedFieldNo(0, Elts);
llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx);
Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt);
@@ -2428,14 +2745,6 @@ static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
}
}
-static llvm::Value *
-EmitBitCastOfLValueToProperType(CodeGenFunction &CGF,
- llvm::Value *V, llvm::Type *IRType,
- StringRef Name = StringRef()) {
- unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace();
- return CGF.Builder.CreateBitCast(V, IRType->getPointerTo(AS), Name);
-}
-
static LValue EmitThreadPrivateVarDeclLValue(
CodeGenFunction &CGF, const VarDecl *VD, QualType T, Address Addr,
llvm::Type *RealVarTy, SourceLocation Loc) {
@@ -2446,22 +2755,24 @@ static LValue EmitThreadPrivateVarDeclLValue(
Addr =
CGF.CGM.getOpenMPRuntime().getAddrOfThreadPrivate(CGF, VD, Addr, Loc);
- Addr = CGF.Builder.CreateElementBitCast(Addr, RealVarTy);
+ Addr = Addr.withElementType(RealVarTy);
return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
}
static Address emitDeclTargetVarDeclLValue(CodeGenFunction &CGF,
const VarDecl *VD, QualType T) {
- llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
+ std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
- // Return an invalid address if variable is MT_To and unified
- // memory is not enabled. For all other cases: MT_Link and
- // MT_To with unified memory, return a valid address.
- if (!Res || (*Res == OMPDeclareTargetDeclAttr::MT_To &&
+ // Return an invalid address if variable is MT_To (or MT_Enter starting with
+ // OpenMP 5.2) and unified memory is not enabled. For all other cases: MT_Link
+ // and MT_To (or MT_Enter) with unified memory, return a valid address.
+ if (!Res || ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
+ *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
!CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory()))
return Address::invalid();
assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) ||
- (*Res == OMPDeclareTargetDeclAttr::MT_To &&
+ ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
+ *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory())) &&
"Expected link clause OR to clause with unified memory enabled.");
QualType PtrTy = CGF.getContext().getPointerType(VD->getType());
@@ -2477,10 +2788,11 @@ CodeGenFunction::EmitLoadOfReference(LValue RefLVal,
Builder.CreateLoad(RefLVal.getAddress(*this), RefLVal.isVolatile());
CGM.DecorateInstructionWithTBAA(Load, RefLVal.getTBAAInfo());
+ QualType PointeeType = RefLVal.getType()->getPointeeType();
CharUnits Align = CGM.getNaturalTypeAlignment(
- RefLVal.getType()->getPointeeType(), PointeeBaseInfo, PointeeTBAAInfo,
+ PointeeType, PointeeBaseInfo, PointeeTBAAInfo,
/* forPointeeType= */ true);
- return Address(Load, Align);
+ return Address(Load, ConvertTypeForMem(PointeeType), Align);
}
LValue CodeGenFunction::EmitLoadOfReferenceLValue(LValue RefLVal) {
@@ -2497,9 +2809,10 @@ Address CodeGenFunction::EmitLoadOfPointer(Address Ptr,
LValueBaseInfo *BaseInfo,
TBAAAccessInfo *TBAAInfo) {
llvm::Value *Addr = Builder.CreateLoad(Ptr);
- return Address(Addr, CGM.getNaturalTypeAlignment(PtrTy->getPointeeType(),
- BaseInfo, TBAAInfo,
- /*forPointeeType=*/true));
+ return Address(Addr, ConvertTypeForMem(PtrTy->getPointeeType()),
+ CGM.getNaturalTypeAlignment(PtrTy->getPointeeType(), BaseInfo,
+ TBAAInfo,
+ /*forPointeeType=*/true));
}
LValue CodeGenFunction::EmitLoadOfPointerLValue(Address PtrAddr,
@@ -2520,17 +2833,20 @@ static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
return CGF.CGM.getCXXABI().EmitThreadLocalVarDeclLValue(CGF, VD, T);
// Check if the variable is marked as declare target with link clause in
// device codegen.
- if (CGF.getLangOpts().OpenMPIsDevice) {
+ if (CGF.getLangOpts().OpenMPIsTargetDevice) {
Address Addr = emitDeclTargetVarDeclLValue(CGF, VD, T);
if (Addr.isValid())
return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
}
llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
+
+ if (VD->getTLSKind() != VarDecl::TLS_None)
+ V = CGF.Builder.CreateThreadLocalAddress(V);
+
llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType());
- V = EmitBitCastOfLValueToProperType(CGF, V, RealVarTy);
CharUnits Alignment = CGF.getContext().getDeclAlign(VD);
- Address Addr(V, Alignment);
+ Address Addr(V, RealVarTy, Alignment);
// Emit reference to the private copy of the variable if it is an OpenMP
// threadprivate variable.
if (CGF.getLangOpts().OpenMP && !CGF.getLangOpts().OpenMPSimd &&
@@ -2555,19 +2871,6 @@ static llvm::Constant *EmitFunctionDeclPointer(CodeGenModule &CGM,
}
llvm::Constant *V = CGM.GetAddrOfFunction(GD);
- if (!FD->hasPrototype()) {
- if (const FunctionProtoType *Proto =
- FD->getType()->getAs<FunctionProtoType>()) {
- // Ugly case: for a K&R-style definition, the type of the definition
- // isn't the same as the type of a use. Correct for this with a
- // bitcast.
- QualType NoProtoType =
- CGM.getContext().getFunctionNoProtoType(Proto->getReturnType());
- NoProtoType = CGM.getContext().getPointerType(NoProtoType);
- V = llvm::ConstantExpr::getBitCast(V,
- CGM.getTypes().ConvertType(NoProtoType));
- }
- }
return V;
}
@@ -2582,9 +2885,8 @@ static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, const Expr *E,
static LValue EmitCapturedFieldLValue(CodeGenFunction &CGF, const FieldDecl *FD,
llvm::Value *ThisValue) {
- QualType TagType = CGF.getContext().getTagDeclType(FD->getParent());
- LValue LV = CGF.MakeNaturalAlignAddrLValue(ThisValue, TagType);
- return CGF.EmitLValueForField(LV, FD);
+
+ return CGF.EmitLValueForLambdaField(FD, ThisValue);
}
/// Named Registers are named metadata pointing to the register name
@@ -2612,7 +2914,7 @@ static LValue EmitGlobalNamedRegister(const VarDecl *VD, CodeGenModule &CGM) {
llvm::Value *Ptr =
llvm::MetadataAsValue::get(CGM.getLLVMContext(), M->getOperand(0));
- return LValue::MakeGlobalReg(Address(Ptr, Alignment), VD->getType());
+ return LValue::MakeGlobalReg(Ptr, Alignment, VD->getType());
}
/// Determine whether we can emit a reference to \p VD from the current
@@ -2620,8 +2922,7 @@ static LValue EmitGlobalNamedRegister(const VarDecl *VD, CodeGenModule &CGM) {
/// this context.
static bool canEmitSpuriousReferenceToVariable(CodeGenFunction &CGF,
const DeclRefExpr *E,
- const VarDecl *VD,
- bool IsConstant) {
+ const VarDecl *VD) {
// For a variable declared in an enclosing scope, do not emit a spurious
// reference even if we have a capture, as that will emit an unwarranted
// reference to our capture state, and will likely generate worse code than
@@ -2654,7 +2955,7 @@ static bool canEmitSpuriousReferenceToVariable(CodeGenFunction &CGF,
// We can emit a spurious reference only if the linkage implies that we'll
// be emitting a non-interposable symbol that will be retained until link
// time.
- switch (CGF.CGM.getLLVMLinkageVarDefinition(VD, IsConstant)) {
+ switch (CGF.CGM.getLLVMLinkageVarDefinition(VD)) {
case llvm::GlobalValue::ExternalLinkage:
case llvm::GlobalValue::LinkOnceODRLinkage:
case llvm::GlobalValue::WeakODRLinkage:
@@ -2685,7 +2986,7 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
// constant value directly instead.
if (E->isNonOdrUse() == NOUR_Constant &&
(VD->getType()->isReferenceType() ||
- !canEmitSpuriousReferenceToVariable(*this, E, VD, true))) {
+ !canEmitSpuriousReferenceToVariable(*this, E, VD))) {
VD->getAnyInitializer(VD);
llvm::Constant *Val = ConstantEmitter(*this).emitAbstract(
E->getLocation(), *VD->evaluateValue(), VD->getType());
@@ -2698,9 +2999,8 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
getContext().getDeclAlign(VD));
llvm::Type *VarTy = getTypes().ConvertTypeForMem(VD->getType());
auto *PTy = llvm::PointerType::get(
- VarTy, getContext().getTargetAddressSpace(VD->getType()));
- if (PTy != Addr.getType())
- Addr = Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, PTy);
+ VarTy, getTypes().getTargetAddressSpace(VD->getType()));
+ Addr = Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, PTy, VarTy);
} else {
// Should we be using the alignment of the constant pointer we emitted?
CharUnits Alignment =
@@ -2708,7 +3008,7 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
/* BaseInfo= */ nullptr,
/* TBAAInfo= */ nullptr,
/* forPointeeType= */ true);
- Addr = Address(Val, Alignment);
+ Addr = Address(Val, ConvertTypeForMem(E->getType()), Alignment);
}
return MakeAddrLValue(Addr, T, AlignmentSource::Decl);
}
@@ -2739,8 +3039,10 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
LValue CapLVal =
EmitCapturedFieldLValue(*this, CapturedStmtInfo->lookup(VD),
CapturedStmtInfo->getContextValue());
+ Address LValueAddress = CapLVal.getAddress(*this);
CapLVal = MakeAddrLValue(
- Address(CapLVal.getPointer(*this), getContext().getDeclAlign(VD)),
+ Address(LValueAddress.getPointer(), LValueAddress.getElementType(),
+ getContext().getDeclAlign(VD)),
CapLVal.getType(), LValueBaseInfo(AlignmentSource::Decl),
CapLVal.getTBAAInfo());
// Mark lvalue as nontemporal if the variable is marked as nontemporal
@@ -2785,15 +3087,20 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
// Otherwise, it might be static local we haven't emitted yet for
// some reason; most likely, because it's in an outer function.
} else if (VD->isStaticLocal()) {
- addr = Address(CGM.getOrCreateStaticVarDecl(
- *VD, CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false)),
- getContext().getDeclAlign(VD));
+ llvm::Constant *var = CGM.getOrCreateStaticVarDecl(
+ *VD, CGM.getLLVMLinkageVarDefinition(VD));
+ addr = Address(
+ var, ConvertTypeForMem(VD->getType()), getContext().getDeclAlign(VD));
// No other cases for now.
} else {
llvm_unreachable("DeclRefExpr for Decl not entered in LocalDeclMap?");
}
+ // Handle threadlocal function locals.
+ if (VD->getTLSKind() != VarDecl::TLS_None)
+ addr = addr.withPointer(
+ Builder.CreateThreadLocalAddress(addr.getPointer()), NotKnownNonNull);
// Check for OpenMP threadprivate variables.
if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd &&
@@ -2851,8 +3158,13 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
// FIXME: While we're emitting a binding from an enclosing scope, all other
// DeclRefExprs we see should be implicitly treated as if they also refer to
// an enclosing scope.
- if (const auto *BD = dyn_cast<BindingDecl>(ND))
+ if (const auto *BD = dyn_cast<BindingDecl>(ND)) {
+ if (E->refersToEnclosingVariableOrCapture()) {
+ auto *FD = LambdaCaptureFields.lookup(BD);
+ return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue);
+ }
return EmitLValue(BD->getBinding());
+ }
// We can form DeclRefExprs naming GUID declarations when reconstituting
// non-type template parameters into expressions.
@@ -2860,9 +3172,20 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
return MakeAddrLValue(CGM.GetAddrOfMSGuidDecl(GD), T,
AlignmentSource::Decl);
- if (const auto *TPO = dyn_cast<TemplateParamObjectDecl>(ND))
- return MakeAddrLValue(CGM.GetAddrOfTemplateParamObject(TPO), T,
- AlignmentSource::Decl);
+ if (const auto *TPO = dyn_cast<TemplateParamObjectDecl>(ND)) {
+ auto ATPO = CGM.GetAddrOfTemplateParamObject(TPO);
+ auto AS = getLangASFromTargetAS(ATPO.getAddressSpace());
+
+ if (AS != T.getAddressSpace()) {
+ auto TargetAS = getContext().getTargetAddressSpace(T.getAddressSpace());
+ auto PtrTy = ATPO.getElementType()->getPointerTo(TargetAS);
+ auto ASC = getTargetHooks().performAddrSpaceCast(
+ CGM, ATPO.getPointer(), AS, T.getAddressSpace(), PtrTy);
+ ATPO = ConstantAddress(ASC, ATPO.getElementType(), ATPO.getAlignment());
+ }
+
+ return MakeAddrLValue(ATPO, T, AlignmentSource::Decl);
+ }
llvm_unreachable("Unhandled DeclRefExpr");
}
@@ -2948,7 +3271,7 @@ LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
auto SL = E->getFunctionName();
assert(SL != nullptr && "No StringLiteral name in PredefinedExpr");
StringRef FnName = CurFn->getName();
- if (FnName.startswith("\01"))
+ if (FnName.starts_with("\01"))
FnName = FnName.substr(1);
StringRef NameItems[] = {
PredefinedExpr::getIdentKindName(E->getIdentKind()), FnName};
@@ -3001,10 +3324,9 @@ llvm::Constant *CodeGenFunction::EmitCheckTypeDescriptor(QualType T) {
// Format the type name as if for a diagnostic, including quotes and
// optionally an 'aka'.
SmallString<32> Buffer;
- CGM.getDiags().ConvertArgToString(DiagnosticsEngine::ak_qualtype,
- (intptr_t)T.getAsOpaquePtr(),
- StringRef(), StringRef(), None, Buffer,
- None);
+ CGM.getDiags().ConvertArgToString(
+ DiagnosticsEngine::ak_qualtype, (intptr_t)T.getAsOpaquePtr(), StringRef(),
+ StringRef(), std::nullopt, Buffer, std::nullopt);
llvm::Constant *Components[] = {
Builder.getInt16(TypeKind), Builder.getInt16(TypeInfo),
@@ -3033,7 +3355,7 @@ llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) {
// Floating-point types which fit into intptr_t are bitcast to integers
// and then passed directly (after zero-extension, if necessary).
if (V->getType()->isFloatingPointTy()) {
- unsigned Bits = V->getType()->getPrimitiveSizeInBits().getFixedSize();
+ unsigned Bits = V->getType()->getPrimitiveSizeInBits().getFixedValue();
if (Bits <= TargetTy->getIntegerBitWidth())
V = Builder.CreateBitCast(V, llvm::Type::getIntNTy(getLLVMContext(),
Bits));
@@ -3097,7 +3419,8 @@ llvm::Constant *CodeGenFunction::EmitCheckSourceLocation(SourceLocation Loc) {
auto FilenameGV =
CGM.GetAddrOfConstantCString(std::string(FilenameString), ".src");
CGM.getSanitizerMetadata()->disableSanitizerForGlobal(
- cast<llvm::GlobalVariable>(FilenameGV.getPointer()));
+ cast<llvm::GlobalVariable>(
+ FilenameGV.getPointer()->stripPointerCasts()));
Filename = FilenameGV.getPointer();
Line = PLoc.getLine();
Column = PLoc.getColumn();
@@ -3127,7 +3450,7 @@ enum class CheckRecoverableKind {
static CheckRecoverableKind getRecoverableKind(SanitizerMask Kind) {
assert(Kind.countPopulation() == 1);
- if (Kind == SanitizerKind::Function || Kind == SanitizerKind::Vptr)
+ if (Kind == SanitizerKind::Vptr)
return CheckRecoverableKind::AlwaysRecoverable;
else if (Kind == SanitizerKind::Return || Kind == SanitizerKind::Unreachable)
return CheckRecoverableKind::Unrecoverable;
@@ -3155,7 +3478,7 @@ static void emitCheckHandlerCall(CodeGenFunction &CGF,
CheckRecoverableKind RecoverKind, bool IsFatal,
llvm::BasicBlock *ContBB) {
assert(IsFatal || RecoverKind != CheckRecoverableKind::Unrecoverable);
- Optional<ApplyDebugLocation> DL;
+ std::optional<ApplyDebugLocation> DL;
if (!CGF.Builder.getCurrentDebugLocation()) {
// Ensure that the call has at least an artificial debug location.
DL.emplace(CGF, SourceLocation());
@@ -3175,12 +3498,12 @@ static void emitCheckHandlerCall(CodeGenFunction &CGF,
bool MayReturn =
!IsFatal || RecoverKind == CheckRecoverableKind::AlwaysRecoverable;
- llvm::AttrBuilder B;
+ llvm::AttrBuilder B(CGF.getLLVMContext());
if (!MayReturn) {
B.addAttribute(llvm::Attribute::NoReturn)
.addAttribute(llvm::Attribute::NoUnwind);
}
- B.addAttribute(llvm::Attribute::UWTable);
+ B.addUWTableAttr(llvm::UWTableKind::Default);
llvm::FunctionCallee Fn = CGF.CGM.CreateRuntimeFunction(
FnType, FnName,
@@ -3203,7 +3526,7 @@ void CodeGenFunction::EmitCheck(
assert(IsSanitizerScope);
assert(Checked.size() > 0);
assert(CheckHandler >= 0 &&
- size_t(CheckHandler) < llvm::array_lengthof(SanitizerHandlers));
+ size_t(CheckHandler) < std::size(SanitizerHandlers));
const StringRef CheckName = SanitizerHandlers[CheckHandler].Name;
llvm::Value *FatalCond = nullptr;
@@ -3265,13 +3588,15 @@ void CodeGenFunction::EmitCheck(
// Emit handler arguments and create handler function type.
if (!StaticArgs.empty()) {
llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
- auto *InfoPtr =
- new llvm::GlobalVariable(CGM.getModule(), Info->getType(), false,
- llvm::GlobalVariable::PrivateLinkage, Info);
+ auto *InfoPtr = new llvm::GlobalVariable(
+ CGM.getModule(), Info->getType(), false,
+ llvm::GlobalVariable::PrivateLinkage, Info, "", nullptr,
+ llvm::GlobalVariable::NotThreadLocal,
+ CGM.getDataLayout().getDefaultGlobalsAddressSpace());
InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
CGM.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr);
- Args.push_back(Builder.CreateBitCast(InfoPtr, Int8PtrTy));
- ArgTypes.push_back(Int8PtrTy);
+ Args.push_back(InfoPtr);
+ ArgTypes.push_back(Args.back()->getType());
}
for (size_t i = 0, n = DynamicArgs.size(); i != n; ++i) {
@@ -3336,8 +3661,7 @@ void CodeGenFunction::EmitCfiSlowPathCheck(
"__cfi_slowpath_diag",
llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy, Int8PtrTy},
false));
- CheckCall = Builder.CreateCall(
- SlowPathFn, {TypeId, Ptr, Builder.CreateBitCast(InfoPtr, Int8PtrTy)});
+ CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr, InfoPtr});
} else {
SlowPathFn = CGM.getModule().getOrInsertFunction(
"__cfi_slowpath",
@@ -3360,14 +3684,12 @@ void CodeGenFunction::EmitCfiCheckStub() {
llvm::Function *F = llvm::Function::Create(
llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy, Int8PtrTy}, false),
llvm::GlobalValue::WeakAnyLinkage, "__cfi_check", M);
+ F->setAlignment(llvm::Align(4096));
CGM.setDSOLocal(F);
llvm::BasicBlock *BB = llvm::BasicBlock::Create(Ctx, "entry", F);
- // FIXME: consider emitting an intrinsic call like
- // call void @llvm.cfi_check(i64 %0, i8* %1, i8* %2)
- // which can be lowered in CrossDSOCFI pass to the actual contents of
- // __cfi_check. This would allow inlining of __cfi_check calls.
- llvm::CallInst::Create(
- llvm::Intrinsic::getDeclaration(M, llvm::Intrinsic::trap), "", BB);
+ // CrossDSOCFI pass is not executed if there is no executable code.
+ SmallVector<llvm::Value*> Args{F->getArg(2), F->getArg(1)};
+ llvm::CallInst::Create(M->getFunction("__cfi_check_fail"), Args, "", BB);
llvm::ReturnInst::Create(Ctx, nullptr, BB);
}
@@ -3382,9 +3704,9 @@ void CodeGenFunction::EmitCfiCheckFail() {
SanitizerScope SanScope(this);
FunctionArgList Args;
ImplicitParamDecl ArgData(getContext(), getContext().VoidPtrTy,
- ImplicitParamDecl::Other);
+ ImplicitParamKind::Other);
ImplicitParamDecl ArgAddr(getContext(), getContext().VoidPtrTy,
- ImplicitParamDecl::Other);
+ ImplicitParamKind::Other);
Args.push_back(&ArgData);
Args.push_back(&ArgAddr);
@@ -3428,7 +3750,8 @@ void CodeGenFunction::EmitCfiCheckFail() {
CfiCheckFailDataTy,
Builder.CreatePointerCast(Data, CfiCheckFailDataTy->getPointerTo(0)), 0,
0);
- Address CheckKindAddr(V, getIntAlign());
+
+ Address CheckKindAddr(V, Int8Ty, getIntAlign());
llvm::Value *CheckKind = Builder.CreateLoad(CheckKindAddr);
llvm::Value *AllVtables = llvm::MetadataAsValue::get(
@@ -3471,7 +3794,7 @@ void CodeGenFunction::EmitUnreachable(SourceLocation Loc) {
EmitCheck(std::make_pair(static_cast<llvm::Value *>(Builder.getFalse()),
SanitizerKind::Unreachable),
SanitizerHandler::BuiltinUnreachable,
- EmitCheckSourceLocation(Loc), None);
+ EmitCheckSourceLocation(Loc), std::nullopt);
}
Builder.CreateUnreachable();
}
@@ -3484,32 +3807,37 @@ void CodeGenFunction::EmitTrapCheck(llvm::Value *Checked,
// check-type per function to save on code size.
if (TrapBBs.size() <= CheckHandlerID)
TrapBBs.resize(CheckHandlerID + 1);
+
llvm::BasicBlock *&TrapBB = TrapBBs[CheckHandlerID];
- if (!CGM.getCodeGenOpts().OptimizationLevel || !TrapBB) {
+ if (!ClSanitizeDebugDeoptimization &&
+ CGM.getCodeGenOpts().OptimizationLevel && TrapBB &&
+ (!CurCodeDecl || !CurCodeDecl->hasAttr<OptimizeNoneAttr>())) {
+ auto Call = TrapBB->begin();
+ assert(isa<llvm::CallInst>(Call) && "Expected call in trap BB");
+
+ Call->applyMergedLocation(Call->getDebugLoc(),
+ Builder.getCurrentDebugLocation());
+ Builder.CreateCondBr(Checked, Cont, TrapBB);
+ } else {
TrapBB = createBasicBlock("trap");
Builder.CreateCondBr(Checked, Cont, TrapBB);
EmitBlock(TrapBB);
- llvm::CallInst *TrapCall =
- Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::ubsantrap),
- llvm::ConstantInt::get(CGM.Int8Ty, CheckHandlerID));
+ llvm::CallInst *TrapCall = Builder.CreateCall(
+ CGM.getIntrinsic(llvm::Intrinsic::ubsantrap),
+ llvm::ConstantInt::get(CGM.Int8Ty, ClSanitizeDebugDeoptimization
+ ? TrapBB->getParent()->size()
+ : CheckHandlerID));
if (!CGM.getCodeGenOpts().TrapFuncName.empty()) {
auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name",
CGM.getCodeGenOpts().TrapFuncName);
- TrapCall->addAttribute(llvm::AttributeList::FunctionIndex, A);
+ TrapCall->addFnAttr(A);
}
TrapCall->setDoesNotReturn();
TrapCall->setDoesNotThrow();
Builder.CreateUnreachable();
- } else {
- auto Call = TrapBB->begin();
- assert(isa<llvm::CallInst>(Call) && "Expected call in trap BB");
-
- Call->applyMergedLocation(Call->getDebugLoc(),
- Builder.getCurrentDebugLocation());
- Builder.CreateCondBr(Checked, Cont, TrapBB);
}
EmitBlock(Cont);
@@ -3522,7 +3850,7 @@ llvm::CallInst *CodeGenFunction::EmitTrapCall(llvm::Intrinsic::ID IntrID) {
if (!CGM.getCodeGenOpts().TrapFuncName.empty()) {
auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name",
CGM.getCodeGenOpts().TrapFuncName);
- TrapCall->addAttribute(llvm::AttributeList::FunctionIndex, A);
+ TrapCall->addFnAttr(A);
}
return TrapCall;
@@ -3541,7 +3869,7 @@ Address CodeGenFunction::EmitArrayToPointerDecay(const Expr *E,
// If the array type was an incomplete type, we need to make sure
// the decay ends up being the right type.
llvm::Type *NewTy = ConvertType(E->getType());
- Addr = Builder.CreateElementBitCast(Addr, NewTy);
+ Addr = Addr.withElementType(NewTy);
// Note that VLA pointers are always decayed, so we don't need to do
// anything here.
@@ -3560,7 +3888,7 @@ Address CodeGenFunction::EmitArrayToPointerDecay(const Expr *E,
if (BaseInfo) *BaseInfo = LV.getBaseInfo();
if (TBAAInfo) *TBAAInfo = CGM.getTBAAAccessInfo(EltType);
- return Builder.CreateElementBitCast(Addr, ConvertTypeForMem(EltType));
+ return Addr.withElementType(ConvertTypeForMem(EltType));
}
/// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an
@@ -3588,7 +3916,7 @@ static llvm::Value *emitArraySubscriptGEP(CodeGenFunction &CGF,
SourceLocation loc,
const llvm::Twine &name = "arrayidx") {
if (inbounds) {
- return CGF.EmitCheckedInBoundsGEP(ptr, indices, signedIndices,
+ return CGF.EmitCheckedInBoundsGEP(elemType, ptr, indices, signedIndices,
CodeGenFunction::NotSubtraction, loc,
name);
} else {
@@ -3620,6 +3948,33 @@ static QualType getFixedSizeElementType(const ASTContext &ctx,
return eltType;
}
+static bool hasBPFPreserveStaticOffset(const RecordDecl *D) {
+ return D && D->hasAttr<BPFPreserveStaticOffsetAttr>();
+}
+
+static bool hasBPFPreserveStaticOffset(const Expr *E) {
+ if (!E)
+ return false;
+ QualType PointeeType = E->getType()->getPointeeType();
+ if (PointeeType.isNull())
+ return false;
+ if (const auto *BaseDecl = PointeeType->getAsRecordDecl())
+ return hasBPFPreserveStaticOffset(BaseDecl);
+ return false;
+}
+
+// Wraps Addr with a call to llvm.preserve.static.offset intrinsic.
+static Address wrapWithBPFPreserveStaticOffset(CodeGenFunction &CGF,
+ Address &Addr) {
+ if (!CGF.getTarget().getTriple().isBPF())
+ return Addr;
+
+ llvm::Function *Fn =
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::preserve_static_offset);
+ llvm::CallInst *Call = CGF.Builder.CreateCall(Fn, {Addr.getPointer()});
+ return Address(Call, Addr.getElementType(), Addr.getAlignment());
+}
+
/// Given an array base, check whether its member access belongs to a record
/// with preserve_access_index attribute or not.
static bool IsPreserveAIArrayBase(CodeGenFunction &CGF, const Expr *ArrayBase) {
@@ -3665,7 +4020,7 @@ static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr,
const llvm::Twine &name = "arrayidx") {
// All the indices except that last must be zero.
#ifndef NDEBUG
- for (auto idx : indices.drop_back())
+ for (auto *idx : indices.drop_back())
assert(isa<llvm::ConstantInt>(idx) &&
cast<llvm::ConstantInt>(idx)->isZero());
#endif
@@ -3681,6 +4036,9 @@ static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr,
CharUnits eltAlign =
getArrayElementAlign(addr.getAlignment(), indices.back(), eltSize);
+ if (hasBPFPreserveStaticOffset(Base))
+ addr = wrapWithBPFPreserveStaticOffset(CGF, addr);
+
llvm::Value *eltPtr;
auto LastIndex = dyn_cast<llvm::ConstantInt>(indices.back());
if (!LastIndex ||
@@ -3700,7 +4058,62 @@ static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr,
idx, DbgInfo);
}
- return Address(eltPtr, eltAlign);
+ return Address(eltPtr, CGF.ConvertTypeForMem(eltType), eltAlign);
+}
+
+/// The offset of a field from the beginning of the record.
+static bool getFieldOffsetInBits(CodeGenFunction &CGF, const RecordDecl *RD,
+ const FieldDecl *FD, int64_t &Offset) {
+ ASTContext &Ctx = CGF.getContext();
+ const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD);
+ unsigned FieldNo = 0;
+
+ for (const Decl *D : RD->decls()) {
+ if (const auto *Record = dyn_cast<RecordDecl>(D))
+ if (getFieldOffsetInBits(CGF, Record, FD, Offset)) {
+ Offset += Layout.getFieldOffset(FieldNo);
+ return true;
+ }
+
+ if (const auto *Field = dyn_cast<FieldDecl>(D))
+ if (FD == Field) {
+ Offset += Layout.getFieldOffset(FieldNo);
+ return true;
+ }
+
+ if (isa<FieldDecl>(D))
+ ++FieldNo;
+ }
+
+ return false;
+}
+
+/// Returns the relative offset difference between \p FD1 and \p FD2.
+/// \code
+/// offsetof(struct foo, FD1) - offsetof(struct foo, FD2)
+/// \endcode
+/// Both fields must be within the same struct.
+static std::optional<int64_t> getOffsetDifferenceInBits(CodeGenFunction &CGF,
+ const FieldDecl *FD1,
+ const FieldDecl *FD2) {
+ const RecordDecl *FD1OuterRec =
+ FD1->getParent()->getOuterLexicalRecordContext();
+ const RecordDecl *FD2OuterRec =
+ FD2->getParent()->getOuterLexicalRecordContext();
+
+ if (FD1OuterRec != FD2OuterRec)
+ // Fields must be within the same RecordDecl.
+ return std::optional<int64_t>();
+
+ int64_t FD1Offset = 0;
+ if (!getFieldOffsetInBits(CGF, FD1OuterRec, FD1, FD1Offset))
+ return std::optional<int64_t>();
+
+ int64_t FD2Offset = 0;
+ if (!getFieldOffsetInBits(CGF, FD2OuterRec, FD2, FD2Offset))
+ return std::optional<int64_t>();
+
+ return std::make_optional<int64_t>(FD1Offset - FD2Offset);
}
LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
@@ -3805,19 +4218,15 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
// interfaces, so we can't rely on GEP to do this scaling
// correctly, so we need to cast to i8*. FIXME: is this actually
// true? A lot of other things in the fragile ABI would break...
- llvm::Type *OrigBaseTy = Addr.getType();
- Addr = Builder.CreateElementBitCast(Addr, Int8Ty);
+ llvm::Type *OrigBaseElemTy = Addr.getElementType();
// Do the GEP.
CharUnits EltAlign =
getArrayElementAlign(Addr.getAlignment(), Idx, InterfaceSize);
llvm::Value *EltPtr =
- emitArraySubscriptGEP(*this, Addr.getElementType(), Addr.getPointer(),
- ScaledIdx, false, SignedIndices, E->getExprLoc());
- Addr = Address(EltPtr, EltAlign);
-
- // Cast back.
- Addr = Builder.CreateBitCast(Addr, OrigBaseTy);
+ emitArraySubscriptGEP(*this, Int8Ty, Addr.getPointer(), ScaledIdx,
+ false, SignedIndices, E->getExprLoc());
+ Addr = Address(EltPtr, OrigBaseElemTy, EltAlign);
} else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
// If this is A[i] where A is an array, the frontend will have decayed the
// base to be a ArrayToPointerDecay implicit cast. While correct, it is
@@ -3834,6 +4243,47 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
ArrayLV = EmitLValue(Array);
auto *Idx = EmitIdxAfterBase(/*Promote*/true);
+ if (SanOpts.has(SanitizerKind::ArrayBounds)) {
+ // If the array being accessed has a "counted_by" attribute, generate
+ // bounds checking code. The "count" field is at the top level of the
+ // struct or in an anonymous struct, that's also at the top level. Future
+ // expansions may allow the "count" to reside at any place in the struct,
+ // but the value of "counted_by" will be a "simple" path to the count,
+ // i.e. "a.b.count", so we shouldn't need the full force of EmitLValue or
+ // similar to emit the correct GEP.
+ const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
+ getLangOpts().getStrictFlexArraysLevel();
+
+ if (const auto *ME = dyn_cast<MemberExpr>(Array);
+ ME &&
+ ME->isFlexibleArrayMemberLike(getContext(), StrictFlexArraysLevel) &&
+ ME->getMemberDecl()->hasAttr<CountedByAttr>()) {
+ const FieldDecl *FAMDecl = dyn_cast<FieldDecl>(ME->getMemberDecl());
+ if (const FieldDecl *CountFD = FindCountedByField(FAMDecl)) {
+ if (std::optional<int64_t> Diff =
+ getOffsetDifferenceInBits(*this, CountFD, FAMDecl)) {
+ CharUnits OffsetDiff = CGM.getContext().toCharUnitsFromBits(*Diff);
+
+ // Create a GEP with a byte offset between the FAM and count and
+ // use that to load the count value.
+ Addr = Builder.CreatePointerBitCastOrAddrSpaceCast(
+ ArrayLV.getAddress(*this), Int8PtrTy, Int8Ty);
+
+ llvm::Type *CountTy = ConvertType(CountFD->getType());
+ llvm::Value *Res = Builder.CreateInBoundsGEP(
+ Int8Ty, Addr.getPointer(),
+ Builder.getInt32(OffsetDiff.getQuantity()), ".counted_by.gep");
+ Res = Builder.CreateAlignedLoad(CountTy, Res, getIntAlign(),
+ ".counted_by.load");
+
+ // Now emit the bounds checking.
+ EmitBoundsCheckImpl(E, Res, Idx, E->getIdx()->getType(),
+ Array->getType(), Accessed);
+ }
+ }
+ }
+ }
+
// Propagate the alignment from the array itself to the result.
QualType arrayType = Array->getType();
Addr = emitArraySubscriptGEP(
@@ -3895,7 +4345,7 @@ static Address emitOMPArraySectionBase(CodeGenFunction &CGF, const Expr *Base,
// If the array type was an incomplete type, we need to make sure
// the decay ends up being the right type.
llvm::Type *NewTy = CGF.ConvertType(BaseTy);
- Addr = CGF.Builder.CreateElementBitCast(Addr, NewTy);
+ Addr = Addr.withElementType(NewTy);
// Note that VLA pointers are always decayed, so we don't need to do
// anything here.
@@ -3905,8 +4355,7 @@ static Address emitOMPArraySectionBase(CodeGenFunction &CGF, const Expr *Base,
Addr = CGF.Builder.CreateConstArrayGEP(Addr, 0, "arraydecay");
}
- return CGF.Builder.CreateElementBitCast(Addr,
- CGF.ConvertTypeForMem(ElTy));
+ return Addr.withElementType(CGF.ConvertTypeForMem(ElTy));
}
LValueBaseInfo TypeBaseInfo;
TBAAAccessInfo TypeTBAAInfo;
@@ -3914,7 +4363,8 @@ static Address emitOMPArraySectionBase(CodeGenFunction &CGF, const Expr *Base,
CGF.CGM.getNaturalTypeAlignment(ElTy, &TypeBaseInfo, &TypeTBAAInfo);
BaseInfo.mergeForCast(TypeBaseInfo);
TBAAInfo = CGF.CGM.mergeTBAAInfoForCast(TBAAInfo, TypeTBAAInfo);
- return Address(CGF.Builder.CreateLoad(BaseLVal.getAddress(CGF)), Align);
+ return Address(CGF.Builder.CreateLoad(BaseLVal.getAddress(CGF)),
+ CGF.ConvertTypeForMem(ElTy), Align);
}
return CGF.EmitPointerWithAlignment(Base, &BaseInfo, &TBAAInfo);
}
@@ -3947,14 +4397,15 @@ LValue CodeGenFunction::EmitOMPArraySectionExpr(const OMPArraySectionExpr *E,
llvm::APSInt ConstLength;
if (Length) {
// Idx = LowerBound + Length - 1;
- if (Optional<llvm::APSInt> CL = Length->getIntegerConstantExpr(C)) {
+ if (std::optional<llvm::APSInt> CL = Length->getIntegerConstantExpr(C)) {
ConstLength = CL->zextOrTrunc(PointerWidthInBits);
Length = nullptr;
}
auto *LowerBound = E->getLowerBound();
llvm::APSInt ConstLowerBound(PointerWidthInBits, /*isUnsigned=*/false);
if (LowerBound) {
- if (Optional<llvm::APSInt> LB = LowerBound->getIntegerConstantExpr(C)) {
+ if (std::optional<llvm::APSInt> LB =
+ LowerBound->getIntegerConstantExpr(C)) {
ConstLowerBound = LB->zextOrTrunc(PointerWidthInBits);
LowerBound = nullptr;
}
@@ -3994,12 +4445,13 @@ LValue CodeGenFunction::EmitOMPArraySectionExpr(const OMPArraySectionExpr *E,
: BaseTy;
if (auto *VAT = C.getAsVariableArrayType(ArrayTy)) {
Length = VAT->getSizeExpr();
- if (Optional<llvm::APSInt> L = Length->getIntegerConstantExpr(C)) {
+ if (std::optional<llvm::APSInt> L = Length->getIntegerConstantExpr(C)) {
ConstLength = *L;
Length = nullptr;
}
} else {
auto *CAT = C.getAsConstantArrayType(ArrayTy);
+ assert(CAT && "unexpected type for array initializer");
ConstLength = CAT->getSize();
}
if (Length) {
@@ -4184,24 +4636,45 @@ LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
/// Given that we are currently emitting a lambda, emit an l-value for
/// one of its members.
-LValue CodeGenFunction::EmitLValueForLambdaField(const FieldDecl *Field) {
- if (CurCodeDecl) {
- assert(cast<CXXMethodDecl>(CurCodeDecl)->getParent()->isLambda());
- assert(cast<CXXMethodDecl>(CurCodeDecl)->getParent() == Field->getParent());
+///
+LValue CodeGenFunction::EmitLValueForLambdaField(const FieldDecl *Field,
+ llvm::Value *ThisValue) {
+ bool HasExplicitObjectParameter = false;
+ if (const auto *MD = dyn_cast_if_present<CXXMethodDecl>(CurCodeDecl)) {
+ HasExplicitObjectParameter = MD->isExplicitObjectMemberFunction();
+ assert(MD->getParent()->isLambda());
+ assert(MD->getParent() == Field->getParent());
+ }
+ LValue LambdaLV;
+ if (HasExplicitObjectParameter) {
+ const VarDecl *D = cast<CXXMethodDecl>(CurCodeDecl)->getParamDecl(0);
+ auto It = LocalDeclMap.find(D);
+ assert(It != LocalDeclMap.end() && "explicit parameter not loaded?");
+ Address AddrOfExplicitObject = It->getSecond();
+ if (D->getType()->isReferenceType())
+ LambdaLV = EmitLoadOfReferenceLValue(AddrOfExplicitObject, D->getType(),
+ AlignmentSource::Decl);
+ else
+ LambdaLV = MakeNaturalAlignAddrLValue(AddrOfExplicitObject.getPointer(),
+ D->getType().getNonReferenceType());
+ } else {
+ QualType LambdaTagType = getContext().getTagDeclType(Field->getParent());
+ LambdaLV = MakeNaturalAlignAddrLValue(ThisValue, LambdaTagType);
}
- QualType LambdaTagType =
- getContext().getTagDeclType(Field->getParent());
- LValue LambdaLV = MakeNaturalAlignAddrLValue(CXXABIThisValue, LambdaTagType);
return EmitLValueForField(LambdaLV, Field);
}
+LValue CodeGenFunction::EmitLValueForLambdaField(const FieldDecl *Field) {
+ return EmitLValueForLambdaField(Field, CXXABIThisValue);
+}
+
/// Get the field index in the debug info. The debug info structure/union
/// will ignore the unnamed bitfields.
unsigned CodeGenFunction::getDebugInfoFIndex(const RecordDecl *Rec,
unsigned FieldIndex) {
unsigned I = 0, Skipped = 0;
- for (auto F : Rec->getDefinition()->fields()) {
+ for (auto *F : Rec->getDefinition()->fields()) {
if (I == FieldIndex)
break;
if (F->isUnnamedBitfield())
@@ -4220,7 +4693,7 @@ static Address emitAddrOfZeroSizeField(CodeGenFunction &CGF, Address Base,
CGF.getContext().getFieldOffset(Field));
if (Offset.isZero())
return Base;
- Base = CGF.Builder.CreateElementBitCast(Base, CGF.Int8Ty);
+ Base = Base.withElementType(CGF.Int8Ty);
return CGF.Builder.CreateConstInBoundsByteGEP(Base, Offset);
}
@@ -4290,6 +4763,8 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
Address Addr = base.getAddress(*this);
unsigned Idx = RL.getLLVMFieldNo(field);
const RecordDecl *rec = field->getParent();
+ if (hasBPFPreserveStaticOffset(rec))
+ Addr = wrapWithBPFPreserveStaticOffset(*this, Addr);
if (!UseVolatile) {
if (!IsInPreservedAIRegion &&
(!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>())) {
@@ -4308,8 +4783,7 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
// Get the access type.
llvm::Type *FieldIntTy = llvm::Type::getIntNTy(getLLVMContext(), SS);
- if (Addr.getElementType() != FieldIntTy)
- Addr = Builder.CreateElementBitCast(Addr, FieldIntTy);
+ Addr = Addr.withElementType(FieldIntTy);
if (UseVolatile) {
const unsigned VolatileOffset = Info.VolatileStorageOffset.getQuantity();
if (VolatileOffset)
@@ -4363,6 +4837,8 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
}
Address addr = base.getAddress(*this);
+ if (hasBPFPreserveStaticOffset(rec))
+ addr = wrapWithBPFPreserveStaticOffset(*this, addr);
if (auto *ClassDef = dyn_cast<CXXRecordDecl>(rec)) {
if (CGM.getCodeGenOpts().StrictVTablePointers &&
ClassDef->isDynamicClass()) {
@@ -4371,7 +4847,7 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
// fields may leak the real address of dynamic object, which could result
// in miscompilation when leaked pointer would be compared.
auto *stripped = Builder.CreateStripInvariantGroup(addr.getPointer());
- addr = Address(stripped, addr.getAlignment());
+ addr = Address(stripped, addr.getElementType(), addr.getAlignment());
}
}
@@ -4382,8 +4858,7 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
hasAnyVptr(FieldType, getContext()))
// Because unions can easily skip invariant.barriers, we need to add
// a barrier every time CXXRecord field with vptr is referenced.
- addr = Address(Builder.CreateLaunderInvariantGroup(addr.getPointer()),
- addr.getAlignment());
+ addr = Builder.CreateLaunderInvariantGroup(addr);
if (IsInPreservedAIRegion ||
(getDebugInfo() && rec->hasAttr<BPFPreserveAccessIndexAttr>())) {
@@ -4393,12 +4868,11 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
addr = Address(
Builder.CreatePreserveUnionAccessIndex(
addr.getPointer(), getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo),
- addr.getAlignment());
+ addr.getElementType(), addr.getAlignment());
}
if (FieldType->isReferenceType())
- addr = Builder.CreateElementBitCast(
- addr, CGM.getTypes().ConvertTypeForMem(FieldType), field->getName());
+ addr = addr.withElementType(CGM.getTypes().ConvertTypeForMem(FieldType));
} else {
if (!IsInPreservedAIRegion &&
(!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>()))
@@ -4423,11 +4897,8 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
}
// Make sure that the address is pointing to the right type. This is critical
- // for both unions and structs. A union needs a bitcast, a struct element
- // will need a bitcast if the LLVM type laid out doesn't match the desired
- // type.
- addr = Builder.CreateElementBitCast(
- addr, CGM.getTypes().ConvertTypeForMem(FieldType), field->getName());
+ // for both unions and structs.
+ addr = addr.withElementType(CGM.getTypes().ConvertTypeForMem(FieldType));
if (field->hasAttr<AnnotateAttr>())
addr = EmitFieldAnnotations(field, addr);
@@ -4454,7 +4925,7 @@ CodeGenFunction::EmitLValueForFieldInitialization(LValue Base,
// Make sure that the address is pointing to the right type.
llvm::Type *llvmType = ConvertTypeForMem(FieldType);
- V = Builder.CreateElementBitCast(V, llvmType, Field->getName());
+ V = V.withElementType(llvmType);
// TODO: Generate TBAA information that describes this access as a structure
// member access and not just an access to an object of the field's type. This
@@ -4506,102 +4977,150 @@ LValue CodeGenFunction::EmitInitListLValue(const InitListExpr *E) {
/// Emit the operand of a glvalue conditional operator. This is either a glvalue
/// or a (possibly-parenthesized) throw-expression. If this is a throw, no
/// LValue is returned and the current block has been terminated.
-static Optional<LValue> EmitLValueOrThrowExpression(CodeGenFunction &CGF,
- const Expr *Operand) {
+static std::optional<LValue> EmitLValueOrThrowExpression(CodeGenFunction &CGF,
+ const Expr *Operand) {
if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Operand->IgnoreParens())) {
CGF.EmitCXXThrowExpr(ThrowExpr, /*KeepInsertionPoint*/false);
- return None;
+ return std::nullopt;
}
return CGF.EmitLValue(Operand);
}
-LValue CodeGenFunction::
-EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) {
- if (!expr->isGLValue()) {
- // ?: here should be an aggregate.
- assert(hasAggregateEvaluationKind(expr->getType()) &&
- "Unexpected conditional operator!");
- return EmitAggExprToLValue(expr);
- }
-
- OpaqueValueMapping binding(*this, expr);
-
- const Expr *condExpr = expr->getCond();
+namespace {
+// Handle the case where the condition is a constant evaluatable simple integer,
+// which means we don't have to separately handle the true/false blocks.
+std::optional<LValue> HandleConditionalOperatorLValueSimpleCase(
+ CodeGenFunction &CGF, const AbstractConditionalOperator *E) {
+ const Expr *condExpr = E->getCond();
bool CondExprBool;
- if (ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
- const Expr *live = expr->getTrueExpr(), *dead = expr->getFalseExpr();
- if (!CondExprBool) std::swap(live, dead);
+ if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
+ const Expr *Live = E->getTrueExpr(), *Dead = E->getFalseExpr();
+ if (!CondExprBool)
+ std::swap(Live, Dead);
- if (!ContainsLabel(dead)) {
+ if (!CGF.ContainsLabel(Dead)) {
// If the true case is live, we need to track its region.
if (CondExprBool)
- incrementProfileCounter(expr);
+ CGF.incrementProfileCounter(E);
// If a throw expression we emit it and return an undefined lvalue
// because it can't be used.
- if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(live->IgnoreParens())) {
- EmitCXXThrowExpr(ThrowExpr);
- llvm::Type *Ty =
- llvm::PointerType::getUnqual(ConvertType(dead->getType()));
- return MakeAddrLValue(
- Address(llvm::UndefValue::get(Ty), CharUnits::One()),
- dead->getType());
+ if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Live->IgnoreParens())) {
+ CGF.EmitCXXThrowExpr(ThrowExpr);
+ llvm::Type *ElemTy = CGF.ConvertType(Dead->getType());
+ llvm::Type *Ty = CGF.UnqualPtrTy;
+ return CGF.MakeAddrLValue(
+ Address(llvm::UndefValue::get(Ty), ElemTy, CharUnits::One()),
+ Dead->getType());
}
- return EmitLValue(live);
+ return CGF.EmitLValue(Live);
}
}
+ return std::nullopt;
+}
+struct ConditionalInfo {
+ llvm::BasicBlock *lhsBlock, *rhsBlock;
+ std::optional<LValue> LHS, RHS;
+};
- llvm::BasicBlock *lhsBlock = createBasicBlock("cond.true");
- llvm::BasicBlock *rhsBlock = createBasicBlock("cond.false");
- llvm::BasicBlock *contBlock = createBasicBlock("cond.end");
-
- ConditionalEvaluation eval(*this);
- EmitBranchOnBoolExpr(condExpr, lhsBlock, rhsBlock, getProfileCount(expr));
+// Create and generate the 3 blocks for a conditional operator.
+// Leaves the 'current block' in the continuation basic block.
+template<typename FuncTy>
+ConditionalInfo EmitConditionalBlocks(CodeGenFunction &CGF,
+ const AbstractConditionalOperator *E,
+ const FuncTy &BranchGenFunc) {
+ ConditionalInfo Info{CGF.createBasicBlock("cond.true"),
+ CGF.createBasicBlock("cond.false"), std::nullopt,
+ std::nullopt};
+ llvm::BasicBlock *endBlock = CGF.createBasicBlock("cond.end");
+
+ CodeGenFunction::ConditionalEvaluation eval(CGF);
+ CGF.EmitBranchOnBoolExpr(E->getCond(), Info.lhsBlock, Info.rhsBlock,
+ CGF.getProfileCount(E));
// Any temporaries created here are conditional.
- EmitBlock(lhsBlock);
- incrementProfileCounter(expr);
- eval.begin(*this);
- Optional<LValue> lhs =
- EmitLValueOrThrowExpression(*this, expr->getTrueExpr());
- eval.end(*this);
-
- if (lhs && !lhs->isSimple())
- return EmitUnsupportedLValue(expr, "conditional operator");
+ CGF.EmitBlock(Info.lhsBlock);
+ CGF.incrementProfileCounter(E);
+ eval.begin(CGF);
+ Info.LHS = BranchGenFunc(CGF, E->getTrueExpr());
+ eval.end(CGF);
+ Info.lhsBlock = CGF.Builder.GetInsertBlock();
- lhsBlock = Builder.GetInsertBlock();
- if (lhs)
- Builder.CreateBr(contBlock);
+ if (Info.LHS)
+ CGF.Builder.CreateBr(endBlock);
// Any temporaries created here are conditional.
- EmitBlock(rhsBlock);
- eval.begin(*this);
- Optional<LValue> rhs =
- EmitLValueOrThrowExpression(*this, expr->getFalseExpr());
- eval.end(*this);
- if (rhs && !rhs->isSimple())
- return EmitUnsupportedLValue(expr, "conditional operator");
- rhsBlock = Builder.GetInsertBlock();
+ CGF.EmitBlock(Info.rhsBlock);
+ eval.begin(CGF);
+ Info.RHS = BranchGenFunc(CGF, E->getFalseExpr());
+ eval.end(CGF);
+ Info.rhsBlock = CGF.Builder.GetInsertBlock();
+ CGF.EmitBlock(endBlock);
+
+ return Info;
+}
+} // namespace
+
+void CodeGenFunction::EmitIgnoredConditionalOperator(
+ const AbstractConditionalOperator *E) {
+ if (!E->isGLValue()) {
+ // ?: here should be an aggregate.
+ assert(hasAggregateEvaluationKind(E->getType()) &&
+ "Unexpected conditional operator!");
+ return (void)EmitAggExprToLValue(E);
+ }
+
+ OpaqueValueMapping binding(*this, E);
+ if (HandleConditionalOperatorLValueSimpleCase(*this, E))
+ return;
- EmitBlock(contBlock);
+ EmitConditionalBlocks(*this, E, [](CodeGenFunction &CGF, const Expr *E) {
+ CGF.EmitIgnoredExpr(E);
+ return LValue{};
+ });
+}
+LValue CodeGenFunction::EmitConditionalOperatorLValue(
+ const AbstractConditionalOperator *expr) {
+ if (!expr->isGLValue()) {
+ // ?: here should be an aggregate.
+ assert(hasAggregateEvaluationKind(expr->getType()) &&
+ "Unexpected conditional operator!");
+ return EmitAggExprToLValue(expr);
+ }
+
+ OpaqueValueMapping binding(*this, expr);
+ if (std::optional<LValue> Res =
+ HandleConditionalOperatorLValueSimpleCase(*this, expr))
+ return *Res;
+
+ ConditionalInfo Info = EmitConditionalBlocks(
+ *this, expr, [](CodeGenFunction &CGF, const Expr *E) {
+ return EmitLValueOrThrowExpression(CGF, E);
+ });
- if (lhs && rhs) {
- llvm::PHINode *phi =
- Builder.CreatePHI(lhs->getPointer(*this)->getType(), 2, "cond-lvalue");
- phi->addIncoming(lhs->getPointer(*this), lhsBlock);
- phi->addIncoming(rhs->getPointer(*this), rhsBlock);
- Address result(phi, std::min(lhs->getAlignment(), rhs->getAlignment()));
+ if ((Info.LHS && !Info.LHS->isSimple()) ||
+ (Info.RHS && !Info.RHS->isSimple()))
+ return EmitUnsupportedLValue(expr, "conditional operator");
+
+ if (Info.LHS && Info.RHS) {
+ Address lhsAddr = Info.LHS->getAddress(*this);
+ Address rhsAddr = Info.RHS->getAddress(*this);
+ llvm::PHINode *phi = Builder.CreatePHI(lhsAddr.getType(), 2, "cond-lvalue");
+ phi->addIncoming(lhsAddr.getPointer(), Info.lhsBlock);
+ phi->addIncoming(rhsAddr.getPointer(), Info.rhsBlock);
+ Address result(phi, lhsAddr.getElementType(),
+ std::min(lhsAddr.getAlignment(), rhsAddr.getAlignment()));
AlignmentSource alignSource =
- std::max(lhs->getBaseInfo().getAlignmentSource(),
- rhs->getBaseInfo().getAlignmentSource());
+ std::max(Info.LHS->getBaseInfo().getAlignmentSource(),
+ Info.RHS->getBaseInfo().getAlignmentSource());
TBAAAccessInfo TBAAInfo = CGM.mergeTBAAInfoForConditionalOperator(
- lhs->getTBAAInfo(), rhs->getTBAAInfo());
+ Info.LHS->getTBAAInfo(), Info.RHS->getTBAAInfo());
return MakeAddrLValue(result, expr->getType(), LValueBaseInfo(alignSource),
TBAAInfo);
} else {
- assert((lhs || rhs) &&
+ assert((Info.LHS || Info.RHS) &&
"both operands of glvalue conditional are throw-expressions?");
- return lhs ? *lhs : *rhs;
+ return Info.LHS ? *Info.LHS : *Info.RHS;
}
}
@@ -4624,7 +5143,6 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
case CK_IntegralToPointer:
case CK_PointerToIntegral:
case CK_PointerToBoolean:
- case CK_VectorSplat:
case CK_IntegralCast:
case CK_BooleanToSignedIntegral:
case CK_IntegralToBoolean:
@@ -4684,10 +5202,28 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
case CK_UserDefinedConversion:
case CK_CPointerToObjCPointerCast:
case CK_BlockPointerToObjCPointerCast:
- case CK_NoOp:
case CK_LValueToRValue:
return EmitLValue(E->getSubExpr());
+ case CK_NoOp: {
+ // CK_NoOp can model a qualification conversion, which can remove an array
+ // bound and change the IR type.
+ // FIXME: Once pointee types are removed from IR, remove this.
+ LValue LV = EmitLValue(E->getSubExpr());
+ // Propagate the volatile qualifer to LValue, if exist in E.
+ if (E->changesVolatileQualification())
+ LV.getQuals() = E->getType().getQualifiers();
+ if (LV.isSimple()) {
+ Address V = LV.getAddress(*this);
+ if (V.isValid()) {
+ llvm::Type *T = ConvertTypeForMem(E->getType());
+ if (V.getElementType() != T)
+ LV.setAddress(V.withElementType(T));
+ }
+ }
+ return LV;
+ }
+
case CK_UncheckedDerivedToBase:
case CK_DerivedToBase: {
const auto *DerivedClassTy =
@@ -4728,7 +5264,7 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
Derived.getPointer(), E->getType());
if (SanOpts.has(SanitizerKind::CFIDerivedCast))
- EmitVTablePtrCheckForCast(E->getType(), Derived.getPointer(),
+ EmitVTablePtrCheckForCast(E->getType(), Derived,
/*MayBeNull=*/false, CFITCK_DerivedCast,
E->getBeginLoc());
@@ -4741,11 +5277,11 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
CGM.EmitExplicitCastExprType(CE, this);
LValue LV = EmitLValue(E->getSubExpr());
- Address V = Builder.CreateBitCast(LV.getAddress(*this),
- ConvertType(CE->getTypeAsWritten()));
+ Address V = LV.getAddress(*this).withElementType(
+ ConvertTypeForMem(CE->getTypeAsWritten()->getPointeeType()));
if (SanOpts.has(SanitizerKind::CFIUnrelatedCast))
- EmitVTablePtrCheckForCast(E->getType(), V.getPointer(),
+ EmitVTablePtrCheckForCast(E->getType(), V,
/*MayBeNull=*/false, CFITCK_UnrelatedCast,
E->getBeginLoc());
@@ -4759,18 +5295,25 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
*this, LV.getPointer(*this),
E->getSubExpr()->getType().getAddressSpace(),
E->getType().getAddressSpace(), ConvertType(DestTy));
- return MakeAddrLValue(Address(V, LV.getAddress(*this).getAlignment()),
+ return MakeAddrLValue(Address(V, ConvertTypeForMem(E->getType()),
+ LV.getAddress(*this).getAlignment()),
E->getType(), LV.getBaseInfo(), LV.getTBAAInfo());
}
case CK_ObjCObjectLValueCast: {
LValue LV = EmitLValue(E->getSubExpr());
- Address V = Builder.CreateElementBitCast(LV.getAddress(*this),
- ConvertType(E->getType()));
+ Address V = LV.getAddress(*this).withElementType(ConvertType(E->getType()));
return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(),
CGM.getTBAAInfoForSubobject(LV, E->getType()));
}
case CK_ZeroToOCLOpaqueType:
llvm_unreachable("NULL to OpenCL opaque type lvalue cast is not valid");
+
+ case CK_VectorSplat: {
+ // LValue results of vector splats are only supported in HLSL.
+ if (!getLangOpts().HLSL)
+ return EmitUnsupportedLValue(E, "unexpected cast lvalue");
+ return EmitLValue(E->getSubExpr());
+ }
}
llvm_unreachable("Unhandled lvalue cast kind?");
@@ -4849,9 +5392,12 @@ RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
if (const auto *CE = dyn_cast<CUDAKernelCallExpr>(E))
return EmitCUDAKernelCallExpr(CE, ReturnValue);
+ // A CXXOperatorCallExpr is created even for explicit object methods, but
+ // these should be treated like static function call.
if (const auto *CE = dyn_cast<CXXOperatorCallExpr>(E))
- if (const CXXMethodDecl *MD =
- dyn_cast_or_null<CXXMethodDecl>(CE->getCalleeDecl()))
+ if (const auto *MD =
+ dyn_cast_if_present<CXXMethodDecl>(CE->getCalleeDecl());
+ MD && MD->isImplicitObjectMemberFunction())
return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue);
CGCallee callee = EmitCallee(E->getCallee());
@@ -4875,16 +5421,56 @@ RValue CodeGenFunction::EmitSimpleCallExpr(const CallExpr *E,
return EmitCall(E->getCallee()->getType(), Callee, E, ReturnValue);
}
+// Detect the unusual situation where an inline version is shadowed by a
+// non-inline version. In that case we should pick the external one
+// everywhere. That's GCC behavior too.
+static bool OnlyHasInlineBuiltinDeclaration(const FunctionDecl *FD) {
+ for (const FunctionDecl *PD = FD; PD; PD = PD->getPreviousDecl())
+ if (!PD->isInlineBuiltinDeclaration())
+ return false;
+ return true;
+}
+
static CGCallee EmitDirectCallee(CodeGenFunction &CGF, GlobalDecl GD) {
const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
if (auto builtinID = FD->getBuiltinID()) {
- // Replaceable builtin provide their own implementation of a builtin. Unless
- // we are in the builtin implementation itself, don't call the actual
- // builtin. If we are in the builtin implementation, avoid trivial infinite
- // recursion.
- if (!FD->isInlineBuiltinDeclaration() ||
- CGF.CurFn->getName() == FD->getName())
+ std::string NoBuiltinFD = ("no-builtin-" + FD->getName()).str();
+ std::string NoBuiltins = "no-builtins";
+
+ StringRef Ident = CGF.CGM.getMangledName(GD);
+ std::string FDInlineName = (Ident + ".inline").str();
+
+ bool IsPredefinedLibFunction =
+ CGF.getContext().BuiltinInfo.isPredefinedLibFunction(builtinID);
+ bool HasAttributeNoBuiltin =
+ CGF.CurFn->getAttributes().hasFnAttr(NoBuiltinFD) ||
+ CGF.CurFn->getAttributes().hasFnAttr(NoBuiltins);
+
+ // When directing calling an inline builtin, call it through it's mangled
+ // name to make it clear it's not the actual builtin.
+ if (CGF.CurFn->getName() != FDInlineName &&
+ OnlyHasInlineBuiltinDeclaration(FD)) {
+ llvm::Constant *CalleePtr = EmitFunctionDeclPointer(CGF.CGM, GD);
+ llvm::Function *Fn = llvm::cast<llvm::Function>(CalleePtr);
+ llvm::Module *M = Fn->getParent();
+ llvm::Function *Clone = M->getFunction(FDInlineName);
+ if (!Clone) {
+ Clone = llvm::Function::Create(Fn->getFunctionType(),
+ llvm::GlobalValue::InternalLinkage,
+ Fn->getAddressSpace(), FDInlineName, M);
+ Clone->addFnAttr(llvm::Attribute::AlwaysInline);
+ }
+ return CGCallee::forDirect(Clone, GD);
+ }
+
+ // Replaceable builtins provide their own implementation of a builtin. If we
+ // are in an inline builtin implementation, avoid trivial infinite
+ // recursion. Honor __attribute__((no_builtin("foo"))) or
+ // __attribute__((no_builtin)) on the current function unless foo is
+ // not a predefined library function which means we must generate the
+ // builtin no matter what.
+ else if (!IsPredefinedLibFunction || !HasAttributeNoBuiltin)
return CGCallee::forBuiltin(builtinID, FD);
}
@@ -4893,6 +5479,7 @@ static CGCallee EmitDirectCallee(CodeGenFunction &CGF, GlobalDecl GD) {
FD->hasAttr<CUDAGlobalAttr>())
CalleePtr = CGF.CGM.getCUDARuntime().getKernelStub(
cast<llvm::GlobalValue>(CalleePtr->stripPointerCasts()));
+
return CGCallee::forDirect(CalleePtr, GD);
}
@@ -4934,7 +5521,7 @@ CGCallee CodeGenFunction::EmitCallee(const Expr *E) {
functionType = ptrType->getPointeeType();
} else {
functionType = E->getType();
- calleePtr = EmitLValue(E).getPointer(*this);
+ calleePtr = EmitLValue(E, KnownNonNull).getPointer(*this);
}
assert(functionType->isFunctionType());
@@ -5034,8 +5621,8 @@ CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) {
}
Address CodeGenFunction::EmitCXXUuidofExpr(const CXXUuidofExpr *E) {
- return Builder.CreateElementBitCast(CGM.GetAddrOfMSGuidDecl(E->getGuidDecl()),
- ConvertType(E->getType()));
+ return CGM.GetAddrOfMSGuidDecl(E->getGuidDecl())
+ .withElementType(ConvertType(E->getType()));
}
LValue CodeGenFunction::EmitCXXUuidofLValue(const CXXUuidofExpr *E) {
@@ -5077,6 +5664,15 @@ llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface,
return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar);
}
+llvm::Value *
+CodeGenFunction::EmitIvarOffsetAsPointerDiff(const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar) {
+ llvm::Value *OffsetValue = EmitIvarOffset(Interface, Ivar);
+ QualType PointerDiffType = getContext().getPointerDiffType();
+ return Builder.CreateZExtOrTrunc(OffsetValue,
+ getTypes().ConvertType(PointerDiffType));
+}
+
LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy,
llvm::Value *BaseValue,
const ObjCIvarDecl *Ivar,
@@ -5127,33 +5723,55 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, const CGCallee &OrigCallee
const Decl *TargetDecl =
OrigCallee.getAbstractInfo().getCalleeDecl().getDecl();
+ assert((!isa_and_present<FunctionDecl>(TargetDecl) ||
+ !cast<FunctionDecl>(TargetDecl)->isImmediateFunction()) &&
+ "trying to emit a call to an immediate function");
+
CalleeType = getContext().getCanonicalType(CalleeType);
auto PointeeType = cast<PointerType>(CalleeType)->getPointeeType();
CGCallee Callee = OrigCallee;
- if (getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function) &&
- (!TargetDecl || !isa<FunctionDecl>(TargetDecl))) {
+ if (SanOpts.has(SanitizerKind::Function) &&
+ (!TargetDecl || !isa<FunctionDecl>(TargetDecl)) &&
+ !isa<FunctionNoProtoType>(PointeeType)) {
if (llvm::Constant *PrefixSig =
CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM)) {
SanitizerScope SanScope(this);
- // Remove any (C++17) exception specifications, to allow calling e.g. a
- // noexcept function through a non-noexcept pointer.
- auto ProtoTy =
- getContext().getFunctionTypeWithExceptionSpec(PointeeType, EST_None);
- llvm::Constant *FTRTTIConst =
- CGM.GetAddrOfRTTIDescriptor(ProtoTy, /*ForEH=*/true);
+ auto *TypeHash = getUBSanFunctionTypeHash(PointeeType);
+
llvm::Type *PrefixSigType = PrefixSig->getType();
llvm::StructType *PrefixStructTy = llvm::StructType::get(
CGM.getLLVMContext(), {PrefixSigType, Int32Ty}, /*isPacked=*/true);
llvm::Value *CalleePtr = Callee.getFunctionPointer();
- llvm::Value *CalleePrefixStruct = Builder.CreateBitCast(
- CalleePtr, llvm::PointerType::getUnqual(PrefixStructTy));
+ // On 32-bit Arm, the low bit of a function pointer indicates whether
+ // it's using the Arm or Thumb instruction set. The actual first
+ // instruction lives at the same address either way, so we must clear
+ // that low bit before using the function address to find the prefix
+ // structure.
+ //
+ // This applies to both Arm and Thumb target triples, because
+ // either one could be used in an interworking context where it
+ // might be passed function pointers of both types.
+ llvm::Value *AlignedCalleePtr;
+ if (CGM.getTriple().isARM() || CGM.getTriple().isThumb()) {
+ llvm::Value *CalleeAddress =
+ Builder.CreatePtrToInt(CalleePtr, IntPtrTy);
+ llvm::Value *Mask = llvm::ConstantInt::get(IntPtrTy, ~1);
+ llvm::Value *AlignedCalleeAddress =
+ Builder.CreateAnd(CalleeAddress, Mask);
+ AlignedCalleePtr =
+ Builder.CreateIntToPtr(AlignedCalleeAddress, CalleePtr->getType());
+ } else {
+ AlignedCalleePtr = CalleePtr;
+ }
+
+ llvm::Value *CalleePrefixStruct = AlignedCalleePtr;
llvm::Value *CalleeSigPtr =
- Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, 0, 0);
+ Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, -1, 0);
llvm::Value *CalleeSig =
Builder.CreateAlignedLoad(PrefixSigType, CalleeSigPtr, getIntAlign());
llvm::Value *CalleeSigMatch = Builder.CreateICmpEQ(CalleeSig, PrefixSig);
@@ -5163,19 +5781,17 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, const CGCallee &OrigCallee
Builder.CreateCondBr(CalleeSigMatch, TypeCheck, Cont);
EmitBlock(TypeCheck);
- llvm::Value *CalleeRTTIPtr =
- Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, 0, 1);
- llvm::Value *CalleeRTTIEncoded =
- Builder.CreateAlignedLoad(Int32Ty, CalleeRTTIPtr, getPointerAlign());
- llvm::Value *CalleeRTTI =
- DecodeAddrUsedInPrologue(CalleePtr, CalleeRTTIEncoded);
- llvm::Value *CalleeRTTIMatch =
- Builder.CreateICmpEQ(CalleeRTTI, FTRTTIConst);
+ llvm::Value *CalleeTypeHash = Builder.CreateAlignedLoad(
+ Int32Ty,
+ Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, -1, 1),
+ getPointerAlign());
+ llvm::Value *CalleeTypeHashMatch =
+ Builder.CreateICmpEQ(CalleeTypeHash, TypeHash);
llvm::Constant *StaticData[] = {EmitCheckSourceLocation(E->getBeginLoc()),
EmitCheckTypeDescriptor(CalleeType)};
- EmitCheck(std::make_pair(CalleeRTTIMatch, SanitizerKind::Function),
+ EmitCheck(std::make_pair(CalleeTypeHashMatch, SanitizerKind::Function),
SanitizerHandler::FunctionTypeMismatch, StaticData,
- {CalleePtr, CalleeRTTI, FTRTTIConst});
+ {CalleePtr});
Builder.CreateBr(Cont);
EmitBlock(Cont);
@@ -5200,9 +5816,8 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, const CGCallee &OrigCallee
llvm::Value *TypeId = llvm::MetadataAsValue::get(getLLVMContext(), MD);
llvm::Value *CalleePtr = Callee.getFunctionPointer();
- llvm::Value *CastedCallee = Builder.CreateBitCast(CalleePtr, Int8PtrTy);
llvm::Value *TypeTest = Builder.CreateCall(
- CGM.getIntrinsic(llvm::Intrinsic::type_test), {CastedCallee, TypeId});
+ CGM.getIntrinsic(llvm::Intrinsic::type_test), {CalleePtr, TypeId});
auto CrossDsoTypeId = CGM.CreateCrossDsoCfiTypeId(MD);
llvm::Constant *StaticData[] = {
@@ -5212,18 +5827,17 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, const CGCallee &OrigCallee
};
if (CGM.getCodeGenOpts().SanitizeCfiCrossDso && CrossDsoTypeId) {
EmitCfiSlowPathCheck(SanitizerKind::CFIICall, TypeTest, CrossDsoTypeId,
- CastedCallee, StaticData);
+ CalleePtr, StaticData);
} else {
EmitCheck(std::make_pair(TypeTest, SanitizerKind::CFIICall),
SanitizerHandler::CFICheckFail, StaticData,
- {CastedCallee, llvm::UndefValue::get(IntPtrTy)});
+ {CalleePtr, llvm::UndefValue::get(IntPtrTy)});
}
}
CallArgList Args;
if (Chain)
- Args.add(RValue::get(Builder.CreateBitCast(Chain, CGM.VoidPtrTy)),
- CGM.getContext().VoidPtrTy);
+ Args.add(RValue::get(Chain), CGM.getContext().VoidPtrTy);
// C++17 requires that we evaluate arguments to a call using assignment syntax
// right-to-left, and that we evaluate arguments to certain other operators
@@ -5232,6 +5846,7 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, const CGCallee &OrigCallee
// destruction order is not necessarily reverse construction order.
// FIXME: Revisit this based on C++ committee response to unimplementability.
EvaluationOrder Order = EvaluationOrder::Default;
+ bool StaticOperator = false;
if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(E)) {
if (OCE->isAssignmentOp())
Order = EvaluationOrder::ForceRightToLeft;
@@ -5249,10 +5864,22 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, const CGCallee &OrigCallee
break;
}
}
+
+ if (const auto *MD =
+ dyn_cast_if_present<CXXMethodDecl>(OCE->getCalleeDecl());
+ MD && MD->isStatic())
+ StaticOperator = true;
}
- EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), E->arguments(),
- E->getDirectCallee(), /*ParamsToSkip*/ 0, Order);
+ auto Arguments = E->arguments();
+ if (StaticOperator) {
+ // If we're calling a static operator, we need to emit the object argument
+ // and ignore it.
+ EmitIgnoredExpr(E->getArg(0));
+ Arguments = drop_begin(Arguments, 1);
+ }
+ EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), Arguments,
+ E->getDirectCallee(), /*ParamsToSkip=*/0, Order);
const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeFreeFunctionCall(
Args, FnType, /*ChainCall=*/Chain);
@@ -5294,9 +5921,8 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, const CGCallee &OrigCallee
isa<CUDAKernelCallExpr>(E) &&
(!TargetDecl || !isa<FunctionDecl>(TargetDecl))) {
llvm::Value *Handle = Callee.getFunctionPointer();
- auto *Cast =
- Builder.CreateBitCast(Handle, Handle->getType()->getPointerTo());
- auto *Stub = Builder.CreateLoad(Address(Cast, CGM.getPointerAlign()));
+ auto *Stub = Builder.CreateLoad(
+ Address(Handle, Handle->getType(), CGM.getPointerAlign()));
Callee.setFunctionPointer(Stub);
}
llvm::CallBase *CallOrInvoke = nullptr;
@@ -5306,9 +5932,13 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, const CGCallee &OrigCallee
// Generate function declaration DISuprogram in order to be used
// in debug info about call sites.
if (CGDebugInfo *DI = getDebugInfo()) {
- if (auto *CalleeDecl = dyn_cast_or_null<FunctionDecl>(TargetDecl))
- DI->EmitFuncDeclForCallSite(CallOrInvoke, QualType(FnType, 0),
+ if (auto *CalleeDecl = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
+ FunctionArgList Args;
+ QualType ResTy = BuildFunctionArgList(CalleeDecl, Args);
+ DI->EmitFuncDeclForCallSite(CallOrInvoke,
+ DI->getFunctionType(CalleeDecl, ResTy, Args),
CalleeDecl);
+ }
}
return Call;
@@ -5363,6 +5993,48 @@ void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, float Accuracy) {
cast<llvm::Instruction>(Val)->setMetadata(llvm::LLVMContext::MD_fpmath, Node);
}
+void CodeGenFunction::SetSqrtFPAccuracy(llvm::Value *Val) {
+ llvm::Type *EltTy = Val->getType()->getScalarType();
+ if (!EltTy->isFloatTy())
+ return;
+
+ if ((getLangOpts().OpenCL &&
+ !CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) ||
+ (getLangOpts().HIP && getLangOpts().CUDAIsDevice &&
+ !CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) {
+ // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 3ulp
+ //
+ // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
+ // build option allows an application to specify that single precision
+ // floating-point divide (x/y and 1/x) and sqrt used in the program
+ // source are correctly rounded.
+ //
+ // TODO: CUDA has a prec-sqrt flag
+ SetFPAccuracy(Val, 3.0f);
+ }
+}
+
+void CodeGenFunction::SetDivFPAccuracy(llvm::Value *Val) {
+ llvm::Type *EltTy = Val->getType()->getScalarType();
+ if (!EltTy->isFloatTy())
+ return;
+
+ if ((getLangOpts().OpenCL &&
+ !CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) ||
+ (getLangOpts().HIP && getLangOpts().CUDAIsDevice &&
+ !CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) {
+ // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp
+ //
+ // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
+ // build option allows an application to specify that single precision
+ // floating-point divide (x/y and 1/x) and sqrt used in the program
+ // source are correctly rounded.
+ //
+ // TODO: CUDA has a prec-div flag
+ SetFPAccuracy(Val, 2.5f);
+ }
+}
+
namespace {
struct LValueOrRValue {
LValue LV;
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGExprAgg.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGExprAgg.cpp
index 1e81ad9f2dc7..810b28f25fa1 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGExprAgg.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGExprAgg.cpp
@@ -85,10 +85,9 @@ public:
void EmitCopy(QualType type, const AggValueSlot &dest,
const AggValueSlot &src);
- void EmitMoveFromReturnSlot(const Expr *E, RValue Src);
-
- void EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
- QualType ArrayQTy, InitListExpr *E);
+ void EmitArrayInit(Address DestPtr, llvm::ArrayType *AType, QualType ArrayQTy,
+ Expr *ExprToVisit, ArrayRef<Expr *> Args,
+ Expr *ArrayFiller);
AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) {
if (CGF.getLangOpts().getGC() && TypeRequiresGCollection(T))
@@ -127,8 +126,17 @@ public:
}
void VisitConstantExpr(ConstantExpr *E) {
+ EnsureDest(E->getType());
+
if (llvm::Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) {
- CGF.EmitAggregateStore(Result, Dest.getAddress(),
+ Address StoreDest = Dest.getAddress();
+ // The emitted value is guaranteed to have the same size as the
+ // destination but can have a different type. Just do a bitcast in this
+ // case to avoid incorrect GEPs.
+ if (Result->getType() != StoreDest.getType())
+ StoreDest = StoreDest.withElementType(Result->getType());
+
+ CGF.EmitAggregateStore(Result, StoreDest,
E->getType().isVolatileQualified());
return;
}
@@ -170,6 +178,9 @@ public:
void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO);
void VisitChooseExpr(const ChooseExpr *CE);
void VisitInitListExpr(InitListExpr *E);
+ void VisitCXXParenListOrInitListExpr(Expr *ExprToVisit, ArrayRef<Expr *> Args,
+ FieldDecl *InitializedFieldInUnion,
+ Expr *ArrayFiller);
void VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
llvm::Value *outerBegin = nullptr);
void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
@@ -199,10 +210,22 @@ public:
return EmitFinalDestCopy(E->getType(), LV);
}
- CGF.EmitPseudoObjectRValue(E, EnsureSlot(E->getType()));
+ AggValueSlot Slot = EnsureSlot(E->getType());
+ bool NeedsDestruction =
+ !Slot.isExternallyDestructed() &&
+ E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct;
+ if (NeedsDestruction)
+ Slot.setExternallyDestructed();
+ CGF.EmitPseudoObjectRValue(E, Slot);
+ if (NeedsDestruction)
+ CGF.pushDestroy(QualType::DK_nontrivial_c_struct, Slot.getAddress(),
+ E->getType());
}
void VisitVAArgExpr(VAArgExpr *E);
+ void VisitCXXParenListInitExpr(CXXParenListInitExpr *E);
+ void VisitCXXParenListOrInitListExpr(Expr *ExprToVisit, ArrayRef<Expr *> Args,
+ Expr *ArrayFiller);
void EmitInitializationToLValue(Expr *E, LValue Address);
void EmitNullInitializationToLValue(LValue Address);
@@ -299,7 +322,7 @@ void AggExprEmitter::withReturnValueSlot(
if (!UseTemp)
return;
- assert(Dest.getPointer() != Src.getAggregatePointer());
+ assert(Dest.isIgnored() || Dest.getPointer() != Src.getAggregatePointer());
EmitFinalDestCopy(E->getType(), Src);
if (!RequiresDestruction && LifetimeStartInst) {
@@ -469,10 +492,12 @@ static bool isTrivialFiller(Expr *E) {
return false;
}
-/// Emit initialization of an array from an initializer list.
+/// Emit initialization of an array from an initializer list. ExprToVisit must
+/// be either an InitListEpxr a CXXParenInitListExpr.
void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
- QualType ArrayQTy, InitListExpr *E) {
- uint64_t NumInitElements = E->getNumInits();
+ QualType ArrayQTy, Expr *ExprToVisit,
+ ArrayRef<Expr *> Args, Expr *ArrayFiller) {
+ uint64_t NumInitElements = Args.size();
uint64_t NumArrayElements = AType->getNumElements();
assert(NumInitElements <= NumArrayElements);
@@ -491,7 +516,7 @@ void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
CharUnits elementAlign =
DestPtr.getAlignment().alignmentOfArrayElement(elementSize);
- llvm::Type *llvmElementType = begin->getType()->getPointerElementType();
+ llvm::Type *llvmElementType = CGF.ConvertTypeForMem(elementType);
// Consider initializing the array by copying from a global. For this to be
// more efficient than per-element initialization, the size of the elements
@@ -501,17 +526,19 @@ void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
CodeGen::CodeGenModule &CGM = CGF.CGM;
ConstantEmitter Emitter(CGF);
LangAS AS = ArrayQTy.getAddressSpace();
- if (llvm::Constant *C = Emitter.tryEmitForInitializer(E, AS, ArrayQTy)) {
+ if (llvm::Constant *C =
+ Emitter.tryEmitForInitializer(ExprToVisit, AS, ArrayQTy)) {
auto GV = new llvm::GlobalVariable(
CGM.getModule(), C->getType(),
- CGM.isTypeConstant(ArrayQTy, /* ExcludeCtorDtor= */ true),
- llvm::GlobalValue::PrivateLinkage, C, "constinit",
+ /* isConstant= */ true, llvm::GlobalValue::PrivateLinkage, C,
+ "constinit",
/* InsertBefore= */ nullptr, llvm::GlobalVariable::NotThreadLocal,
CGM.getContext().getTargetAddressSpace(AS));
Emitter.finalize(GV);
CharUnits Align = CGM.getContext().getTypeAlignInChars(ArrayQTy);
GV->setAlignment(Align.getAsAlign());
- EmitFinalDestCopy(ArrayQTy, CGF.MakeAddrLValue(GV, ArrayQTy, Align));
+ Address GVAddr(GV, GV->getValueType(), Align);
+ EmitFinalDestCopy(ArrayQTy, CGF.MakeAddrLValue(GVAddr, ArrayQTy));
return;
}
}
@@ -563,14 +590,13 @@ void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit);
}
- LValue elementLV =
- CGF.MakeAddrLValue(Address(element, elementAlign), elementType);
- EmitInitializationToLValue(E->getInit(i), elementLV);
+ LValue elementLV = CGF.MakeAddrLValue(
+ Address(element, llvmElementType, elementAlign), elementType);
+ EmitInitializationToLValue(Args[i], elementLV);
}
// Check whether there's a non-trivial array-fill expression.
- Expr *filler = E->getArrayFiller();
- bool hasTrivialFiller = isTrivialFiller(filler);
+ bool hasTrivialFiller = isTrivialFiller(ArrayFiller);
// Any remaining elements need to be zero-initialized, possibly
// using the filler expression. We can skip this if the we're
@@ -611,10 +637,10 @@ void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
// every temporary created in a default argument is sequenced before
// the construction of the next array element, if any
CodeGenFunction::RunCleanupsScope CleanupsScope(CGF);
- LValue elementLV =
- CGF.MakeAddrLValue(Address(currentElement, elementAlign), elementType);
- if (filler)
- EmitInitializationToLValue(filler, elementLV);
+ LValue elementLV = CGF.MakeAddrLValue(
+ Address(currentElement, llvmElementType, elementAlign), elementType);
+ if (ArrayFiller)
+ EmitInitializationToLValue(ArrayFiller, elementLV);
else
EmitNullInitializationToLValue(elementLV);
}
@@ -725,8 +751,7 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
// GCC union extension
QualType Ty = E->getSubExpr()->getType();
- Address CastPtr =
- Builder.CreateElementBitCast(Dest.getAddress(), CGF.ConvertType(Ty));
+ Address CastPtr = Dest.getAddress().withElementType(CGF.ConvertType(Ty));
EmitInitializationToLValue(E->getSubExpr(),
CGF.MakeAddrLValue(CastPtr, Ty));
break;
@@ -741,9 +766,8 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
LValue SourceLV = CGF.EmitLValue(E->getSubExpr());
Address SourceAddress =
- Builder.CreateElementBitCast(SourceLV.getAddress(CGF), CGF.Int8Ty);
- Address DestAddress =
- Builder.CreateElementBitCast(Dest.getAddress(), CGF.Int8Ty);
+ SourceLV.getAddress(CGF).withElementType(CGF.Int8Ty);
+ Address DestAddress = Dest.getAddress().withElementType(CGF.Int8Ty);
llvm::Value *SizeVal = llvm::ConstantInt::get(
CGF.SizeTy,
CGF.getContext().getTypeSizeInChars(E->getType()).getQuantity());
@@ -847,7 +871,7 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
return;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case CK_NoOp:
@@ -1588,46 +1612,72 @@ void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) {
}
}
+void AggExprEmitter::VisitCXXParenListInitExpr(CXXParenListInitExpr *E) {
+ VisitCXXParenListOrInitListExpr(E, E->getInitExprs(),
+ E->getInitializedFieldInUnion(),
+ E->getArrayFiller());
+}
+
void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
+ if (E->hadArrayRangeDesignator())
+ CGF.ErrorUnsupported(E, "GNU array range designator extension");
+
+ if (E->isTransparent())
+ return Visit(E->getInit(0));
+
+ VisitCXXParenListOrInitListExpr(
+ E, E->inits(), E->getInitializedFieldInUnion(), E->getArrayFiller());
+}
+
+void AggExprEmitter::VisitCXXParenListOrInitListExpr(
+ Expr *ExprToVisit, ArrayRef<Expr *> InitExprs,
+ FieldDecl *InitializedFieldInUnion, Expr *ArrayFiller) {
#if 0
// FIXME: Assess perf here? Figure out what cases are worth optimizing here
// (Length of globals? Chunks of zeroed-out space?).
//
// If we can, prefer a copy from a global; this is a lot less code for long
// globals, and it's easier for the current optimizers to analyze.
- if (llvm::Constant* C = CGF.CGM.EmitConstantExpr(E, E->getType(), &CGF)) {
+ if (llvm::Constant *C =
+ CGF.CGM.EmitConstantExpr(ExprToVisit, ExprToVisit->getType(), &CGF)) {
llvm::GlobalVariable* GV =
new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true,
llvm::GlobalValue::InternalLinkage, C, "");
- EmitFinalDestCopy(E->getType(), CGF.MakeAddrLValue(GV, E->getType()));
+ EmitFinalDestCopy(ExprToVisit->getType(),
+ CGF.MakeAddrLValue(GV, ExprToVisit->getType()));
return;
}
#endif
- if (E->hadArrayRangeDesignator())
- CGF.ErrorUnsupported(E, "GNU array range designator extension");
-
- if (E->isTransparent())
- return Visit(E->getInit(0));
- AggValueSlot Dest = EnsureSlot(E->getType());
+ AggValueSlot Dest = EnsureSlot(ExprToVisit->getType());
- LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
+ LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), ExprToVisit->getType());
// Handle initialization of an array.
- if (E->getType()->isArrayType()) {
+ if (ExprToVisit->getType()->isConstantArrayType()) {
auto AType = cast<llvm::ArrayType>(Dest.getAddress().getElementType());
- EmitArrayInit(Dest.getAddress(), AType, E->getType(), E);
+ EmitArrayInit(Dest.getAddress(), AType, ExprToVisit->getType(), ExprToVisit,
+ InitExprs, ArrayFiller);
+ return;
+ } else if (ExprToVisit->getType()->isVariableArrayType()) {
+ // A variable array type that has an initializer can only do empty
+ // initialization. And because this feature is not exposed as an extension
+ // in C++, we can safely memset the array memory to zero.
+ assert(InitExprs.size() == 0 &&
+ "you can only use an empty initializer with VLAs");
+ CGF.EmitNullInitialization(Dest.getAddress(), ExprToVisit->getType());
return;
}
- assert(E->getType()->isRecordType() && "Only support structs/unions here!");
+ assert(ExprToVisit->getType()->isRecordType() &&
+ "Only support structs/unions here!");
// Do struct initialization; this code just sets each individual member
// to the approprate value. This makes bitfield support automatic;
// the disadvantage is that the generated code is more difficult for
// the optimizer, especially with bitfields.
- unsigned NumInitElements = E->getNumInits();
- RecordDecl *record = E->getType()->castAs<RecordType>()->getDecl();
+ unsigned NumInitElements = InitExprs.size();
+ RecordDecl *record = ExprToVisit->getType()->castAs<RecordType>()->getDecl();
// We'll need to enter cleanup scopes in case any of the element
// initializers throws an exception.
@@ -1645,7 +1695,7 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
// Emit initialization of base classes.
if (auto *CXXRD = dyn_cast<CXXRecordDecl>(record)) {
- assert(E->getNumInits() >= CXXRD->getNumBases() &&
+ assert(NumInitElements >= CXXRD->getNumBases() &&
"missing initializer for base class");
for (auto &Base : CXXRD->bases()) {
assert(!Base.isVirtual() && "should not see vbases here");
@@ -1659,7 +1709,7 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased,
CGF.getOverlapForBaseInit(CXXRD, BaseRD, Base.isVirtual()));
- CGF.EmitAggExpr(E->getInit(curInitIndex++), AggSlot);
+ CGF.EmitAggExpr(InitExprs[curInitIndex++], AggSlot);
if (QualType::DestructionKind dtorKind =
Base.getType().isDestructedType()) {
@@ -1675,25 +1725,25 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
if (record->isUnion()) {
// Only initialize one field of a union. The field itself is
// specified by the initializer list.
- if (!E->getInitializedFieldInUnion()) {
+ if (!InitializedFieldInUnion) {
// Empty union; we have nothing to do.
#ifndef NDEBUG
// Make sure that it's really an empty and not a failure of
// semantic analysis.
for (const auto *Field : record->fields())
- assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed");
+ assert((Field->isUnnamedBitfield() || Field->isAnonymousStructOrUnion()) && "Only unnamed bitfields or ananymous class allowed");
#endif
return;
}
// FIXME: volatility
- FieldDecl *Field = E->getInitializedFieldInUnion();
+ FieldDecl *Field = InitializedFieldInUnion;
LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestLV, Field);
if (NumInitElements) {
// Store the initializer into the field
- EmitInitializationToLValue(E->getInit(0), FieldLoc);
+ EmitInitializationToLValue(InitExprs[0], FieldLoc);
} else {
// Default-initialize to null.
EmitNullInitializationToLValue(FieldLoc);
@@ -1717,7 +1767,7 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
// have a zeroed object, and the rest of the fields are
// zero-initializable.
if (curInitIndex == NumInitElements && Dest.isZeroed() &&
- CGF.getTypes().isZeroInitializable(E->getType()))
+ CGF.getTypes().isZeroInitializable(ExprToVisit->getType()))
break;
@@ -1727,7 +1777,7 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
if (curInitIndex < NumInitElements) {
// Store the initializer into the field.
- EmitInitializationToLValue(E->getInit(curInitIndex++), LV);
+ EmitInitializationToLValue(InitExprs[curInitIndex++], LV);
} else {
// We're out of initializers; default-initialize to null
EmitNullInitializationToLValue(LV);
@@ -1798,6 +1848,7 @@ void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
CharUnits elementAlign =
destPtr.getAlignment().alignmentOfArrayElement(elementSize);
+ llvm::Type *llvmElementType = CGF.ConvertTypeForMem(elementType);
llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
@@ -1807,8 +1858,8 @@ void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
llvm::PHINode *index =
Builder.CreatePHI(zero->getType(), 2, "arrayinit.index");
index->addIncoming(zero, entryBB);
- llvm::Value *element = Builder.CreateInBoundsGEP(
- begin->getType()->getPointerElementType(), begin, index);
+ llvm::Value *element =
+ Builder.CreateInBoundsGEP(llvmElementType, begin, index);
// Prepare for a cleanup.
QualType::DestructionKind dtorKind = elementType.isDestructedType();
@@ -1830,8 +1881,8 @@ void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
// at the end of each iteration.
CodeGenFunction::RunCleanupsScope CleanupsScope(CGF);
CodeGenFunction::ArrayInitLoopExprScope Scope(CGF, index);
- LValue elementLV =
- CGF.MakeAddrLValue(Address(element, elementAlign), elementType);
+ LValue elementLV = CGF.MakeAddrLValue(
+ Address(element, llvmElementType, elementAlign), elementType);
if (InnerLoop) {
// If the subexpression is an ArrayInitLoopExpr, share its cleanup.
@@ -1922,7 +1973,7 @@ static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) {
// Reference values are always non-null and have the width of a pointer.
if (Field->getType()->isReferenceType())
NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits(
- CGF.getTarget().getPointerWidth(0));
+ CGF.getTarget().getPointerWidth(LangAS::Default));
else
NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF);
}
@@ -1971,8 +2022,7 @@ static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
// Okay, it seems like a good idea to use an initial memset, emit the call.
llvm::Constant *SizeVal = CGF.Builder.getInt64(Size.getQuantity());
- Address Loc = Slot.getAddress();
- Loc = CGF.Builder.CreateElementBitCast(Loc, CGF.Int8Ty);
+ Address Loc = Slot.getAddress().withElementType(CGF.Int8Ty);
CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal, false);
// Tell the AggExprEmitter that the slot is known zero.
@@ -2136,8 +2186,8 @@ void CodeGenFunction::EmitAggregateCopy(LValue Dest, LValue Src, QualType Ty,
// we need to use a different call here. We use isVolatile to indicate when
// either the source or the destination is volatile.
- DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty);
- SrcPtr = Builder.CreateElementBitCast(SrcPtr, Int8Ty);
+ DestPtr = DestPtr.withElementType(Int8Ty);
+ SrcPtr = SrcPtr.withElementType(Int8Ty);
// Don't do any of the memmove_collectable tests if GC isn't set.
if (CGM.getLangOpts().getGC() == LangOptions::NonGC) {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGExprCXX.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGExprCXX.cpp
index f42759e9db50..d136bfc37278 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGExprCXX.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGExprCXX.cpp
@@ -33,18 +33,20 @@ struct MemberCallInfo {
}
static MemberCallInfo
-commonEmitCXXMemberOrOperatorCall(CodeGenFunction &CGF, const CXXMethodDecl *MD,
+commonEmitCXXMemberOrOperatorCall(CodeGenFunction &CGF, GlobalDecl GD,
llvm::Value *This, llvm::Value *ImplicitParam,
QualType ImplicitParamTy, const CallExpr *CE,
CallArgList &Args, CallArgList *RtlArgs) {
+ auto *MD = cast<CXXMethodDecl>(GD.getDecl());
+
assert(CE == nullptr || isa<CXXMemberCallExpr>(CE) ||
isa<CXXOperatorCallExpr>(CE));
- assert(MD->isInstance() &&
+ assert(MD->isImplicitObjectMemberFunction() &&
"Trying to emit a member or operator call expr on a static method!");
// Push the this ptr.
const CXXRecordDecl *RD =
- CGF.CGM.getCXXABI().getThisArgumentTypeForMethod(MD);
+ CGF.CGM.getCXXABI().getThisArgumentTypeForMethod(GD);
Args.add(RValue::get(This), CGF.getTypes().DeriveThisType(RD, MD));
// If there is an implicit parameter (e.g. VTT), emit it.
@@ -64,7 +66,12 @@ commonEmitCXXMemberOrOperatorCall(CodeGenFunction &CGF, const CXXMethodDecl *MD,
Args.addFrom(*RtlArgs);
} else if (CE) {
// Special case: skip first argument of CXXOperatorCall (it is "this").
- unsigned ArgsToSkip = isa<CXXOperatorCallExpr>(CE) ? 1 : 0;
+ unsigned ArgsToSkip = 0;
+ if (const auto *Op = dyn_cast<CXXOperatorCallExpr>(CE)) {
+ if (const auto *M = dyn_cast<CXXMethodDecl>(Op->getCalleeDecl()))
+ ArgsToSkip =
+ static_cast<unsigned>(!M->isExplicitObjectMemberFunction());
+ }
CGF.EmitCallArgs(Args, FPT, drop_begin(CE->arguments(), ArgsToSkip),
CE->getDirectCallee());
} else {
@@ -110,7 +117,7 @@ RValue CodeGenFunction::EmitCXXDestructorCall(
}
CallArgList Args;
- commonEmitCXXMemberOrOperatorCall(*this, DtorDecl, This, ImplicitParam,
+ commonEmitCXXMemberOrOperatorCall(*this, Dtor, This, ImplicitParam,
ImplicitParamTy, CE, Args, nullptr);
return EmitCall(CGM.getTypes().arrangeCXXStructorDeclaration(Dtor), Callee,
ReturnValueSlot(), Args, nullptr, CE && CE == MustTailCall,
@@ -285,7 +292,8 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
assert(ReturnValue.isNull() && "Constructor shouldn't have return value");
CallArgList Args;
commonEmitCXXMemberOrOperatorCall(
- *this, Ctor, This.getPointer(*this), /*ImplicitParam=*/nullptr,
+ *this, {Ctor, Ctor_Complete}, This.getPointer(*this),
+ /*ImplicitParam=*/nullptr,
/*ImplicitParamTy=*/QualType(), CE, Args, nullptr);
EmitCXXConstructorCall(Ctor, Ctor_Complete, /*ForVirtualBase=*/false,
@@ -443,9 +451,9 @@ CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
// Emit the 'this' pointer.
Address This = Address::invalid();
if (BO->getOpcode() == BO_PtrMemI)
- This = EmitPointerWithAlignment(BaseExpr);
+ This = EmitPointerWithAlignment(BaseExpr, nullptr, nullptr, KnownNonNull);
else
- This = EmitLValue(BaseExpr).getAddress(*this);
+ This = EmitLValue(BaseExpr, KnownNonNull).getAddress(*this);
EmitTypeCheck(TCK_MemberCall, E->getExprLoc(), This.getPointer(),
QualType(MPT->getClass(), 0));
@@ -481,7 +489,7 @@ RValue
CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
const CXXMethodDecl *MD,
ReturnValueSlot ReturnValue) {
- assert(MD->isInstance() &&
+ assert(MD->isImplicitObjectMemberFunction() &&
"Trying to emit a member call expr on a static method!");
return EmitCXXMemberOrOperatorMemberCallExpr(
E, MD, ReturnValue, /*HasQualifier=*/false, /*Qualifier=*/nullptr,
@@ -499,7 +507,7 @@ static void EmitNullBaseClassInitialization(CodeGenFunction &CGF,
if (Base->isEmpty())
return;
- DestPtr = CGF.Builder.CreateElementBitCast(DestPtr, CGF.Int8Ty);
+ DestPtr = DestPtr.withElementType(CGF.Int8Ty);
const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(Base);
CharUnits NVSize = Layout.getNonVirtualSize();
@@ -548,11 +556,11 @@ static void EmitNullBaseClassInitialization(CodeGenFunction &CGF,
/*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage,
NullConstantForBase, Twine());
- CharUnits Align = std::max(Layout.getNonVirtualAlignment(),
- DestPtr.getAlignment());
+ CharUnits Align =
+ std::max(Layout.getNonVirtualAlignment(), DestPtr.getAlignment());
NullVariable->setAlignment(Align.getAsAlign());
- Address SrcPtr = Address(CGF.EmitCastToVoidPtr(NullVariable), Align);
+ Address SrcPtr(NullVariable, CGF.Int8Ty, Align);
// Get and call the appropriate llvm.memcpy overload.
for (std::pair<CharUnits, CharUnits> Store : Stores) {
@@ -592,12 +600,12 @@ CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
// already zeroed.
if (E->requiresZeroInitialization() && !Dest.isZeroed()) {
switch (E->getConstructionKind()) {
- case CXXConstructExpr::CK_Delegating:
- case CXXConstructExpr::CK_Complete:
+ case CXXConstructionKind::Delegating:
+ case CXXConstructionKind::Complete:
EmitNullInitialization(Dest.getAddress(), E->getType());
break;
- case CXXConstructExpr::CK_VirtualBase:
- case CXXConstructExpr::CK_NonVirtualBase:
+ case CXXConstructionKind::VirtualBase:
+ case CXXConstructionKind::NonVirtualBase:
EmitNullBaseClassInitialization(*this, Dest.getAddress(),
CD->getParent());
break;
@@ -633,21 +641,21 @@ CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
bool Delegating = false;
switch (E->getConstructionKind()) {
- case CXXConstructExpr::CK_Delegating:
+ case CXXConstructionKind::Delegating:
// We should be emitting a constructor; GlobalDecl will assert this
Type = CurGD.getCtorType();
Delegating = true;
break;
- case CXXConstructExpr::CK_Complete:
+ case CXXConstructionKind::Complete:
Type = Ctor_Complete;
break;
- case CXXConstructExpr::CK_VirtualBase:
+ case CXXConstructionKind::VirtualBase:
ForVirtualBase = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
- case CXXConstructExpr::CK_NonVirtualBase:
+ case CXXConstructionKind::NonVirtualBase:
Type = Ctor_Base;
}
@@ -764,7 +772,7 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
// wider than that, check whether it's already too big, and if so,
// overflow.
else if (numElementsWidth > sizeWidth &&
- numElementsWidth - sizeWidth > count.countLeadingZeros())
+ numElementsWidth - sizeWidth > count.countl_zero())
hasAnyOverflow = true;
// Okay, compute a count at the right width.
@@ -825,8 +833,8 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
// going to have to do a comparison for (2), and this happens to
// take care of (1), too.
if (numElementsWidth > sizeWidth) {
- llvm::APInt threshold(numElementsWidth, 1);
- threshold <<= sizeWidth;
+ llvm::APInt threshold =
+ llvm::APInt::getOneBitSet(numElementsWidth, sizeWidth);
llvm::Value *thresholdV
= llvm::ConstantInt::get(numElementsType, threshold);
@@ -1030,11 +1038,25 @@ void CodeGenFunction::EmitNewArrayInitializer(
return true;
};
+ const InitListExpr *ILE = dyn_cast<InitListExpr>(Init);
+ const CXXParenListInitExpr *CPLIE = nullptr;
+ const StringLiteral *SL = nullptr;
+ const ObjCEncodeExpr *OCEE = nullptr;
+ const Expr *IgnoreParen = nullptr;
+ if (!ILE) {
+ IgnoreParen = Init->IgnoreParenImpCasts();
+ CPLIE = dyn_cast<CXXParenListInitExpr>(IgnoreParen);
+ SL = dyn_cast<StringLiteral>(IgnoreParen);
+ OCEE = dyn_cast<ObjCEncodeExpr>(IgnoreParen);
+ }
+
// If the initializer is an initializer list, first do the explicit elements.
- if (const InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) {
+ if (ILE || CPLIE || SL || OCEE) {
// Initializing from a (braced) string literal is a special case; the init
// list element does not initialize a (single) array element.
- if (ILE->isStringLiteralInit()) {
+ if ((ILE && ILE->isStringLiteralInit()) || SL || OCEE) {
+ if (!ILE)
+ Init = IgnoreParen;
// Initialize the initial portion of length equal to that of the string
// literal. The allocation must be for at least this much; we emitted a
// check for that earlier.
@@ -1046,19 +1068,15 @@ void CodeGenFunction::EmitNewArrayInitializer(
AggValueSlot::DoesNotOverlap,
AggValueSlot::IsNotZeroed,
AggValueSlot::IsSanitizerChecked);
- EmitAggExpr(ILE->getInit(0), Slot);
+ EmitAggExpr(ILE ? ILE->getInit(0) : Init, Slot);
// Move past these elements.
InitListElements =
- cast<ConstantArrayType>(ILE->getType()->getAsArrayTypeUnsafe())
- ->getSize().getZExtValue();
- CurPtr =
- Address(Builder.CreateInBoundsGEP(CurPtr.getElementType(),
- CurPtr.getPointer(),
- Builder.getSize(InitListElements),
- "string.init.end"),
- CurPtr.getAlignment().alignmentAtOffset(InitListElements *
- ElementSize));
+ cast<ConstantArrayType>(Init->getType()->getAsArrayTypeUnsafe())
+ ->getSize()
+ .getZExtValue();
+ CurPtr = Builder.CreateConstInBoundsGEP(
+ CurPtr, InitListElements, "string.init.end");
// Zero out the rest, if any remain.
llvm::ConstantInt *ConstNum = dyn_cast<llvm::ConstantInt>(NumElements);
@@ -1070,7 +1088,9 @@ void CodeGenFunction::EmitNewArrayInitializer(
return;
}
- InitListElements = ILE->getNumInits();
+ ArrayRef<const Expr *> InitExprs =
+ ILE ? ILE->inits() : CPLIE->getInitExprs();
+ InitListElements = InitExprs.size();
// If this is a multi-dimensional array new, we will initialize multiple
// elements with each init list element.
@@ -1078,7 +1098,7 @@ void CodeGenFunction::EmitNewArrayInitializer(
if (const ConstantArrayType *CAT = dyn_cast_or_null<ConstantArrayType>(
AllocType->getAsArrayTypeUnsafe())) {
ElementTy = ConvertTypeForMem(AllocType);
- CurPtr = Builder.CreateElementBitCast(CurPtr, ElementTy);
+ CurPtr = CurPtr.withElementType(ElementTy);
InitListElements *= getContext().getConstantArrayElementCount(CAT);
}
@@ -1098,30 +1118,28 @@ void CodeGenFunction::EmitNewArrayInitializer(
}
CharUnits StartAlign = CurPtr.getAlignment();
- for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i) {
+ unsigned i = 0;
+ for (const Expr *IE : InitExprs) {
// Tell the cleanup that it needs to destroy up to this
// element. TODO: some of these stores can be trivially
// observed to be unnecessary.
if (EndOfInit.isValid()) {
- auto FinishedPtr =
- Builder.CreateBitCast(CurPtr.getPointer(), BeginPtr.getType());
- Builder.CreateStore(FinishedPtr, EndOfInit);
+ Builder.CreateStore(CurPtr.getPointer(), EndOfInit);
}
// FIXME: If the last initializer is an incomplete initializer list for
// an array, and we have an array filler, we can fold together the two
// initialization loops.
- StoreAnyExprIntoOneUnit(*this, ILE->getInit(i),
- ILE->getInit(i)->getType(), CurPtr,
+ StoreAnyExprIntoOneUnit(*this, IE, IE->getType(), CurPtr,
AggValueSlot::DoesNotOverlap);
- CurPtr = Address(Builder.CreateInBoundsGEP(CurPtr.getElementType(),
- CurPtr.getPointer(),
- Builder.getSize(1),
- "array.exp.next"),
- StartAlign.alignmentAtOffset((i + 1) * ElementSize));
+ CurPtr = Address(Builder.CreateInBoundsGEP(
+ CurPtr.getElementType(), CurPtr.getPointer(),
+ Builder.getSize(1), "array.exp.next"),
+ CurPtr.getElementType(),
+ StartAlign.alignmentAtOffset((++i) * ElementSize));
}
// The remaining elements are filled with the array filler expression.
- Init = ILE->getArrayFiller();
+ Init = ILE ? ILE->getArrayFiller() : CPLIE->getArrayFiller();
// Extract the initializer for the individual array elements by pulling
// out the array filler from all the nested initializer lists. This avoids
@@ -1135,7 +1153,7 @@ void CodeGenFunction::EmitNewArrayInitializer(
}
// Switch back to initializing one base element at a time.
- CurPtr = Builder.CreateBitCast(CurPtr, BeginPtr.getType());
+ CurPtr = CurPtr.withElementType(BeginPtr.getElementType());
}
// If all elements have already been initialized, skip any further
@@ -1249,10 +1267,10 @@ void CodeGenFunction::EmitNewArrayInitializer(
// Set up the current-element phi.
llvm::PHINode *CurPtrPhi =
- Builder.CreatePHI(CurPtr.getType(), 2, "array.cur");
+ Builder.CreatePHI(CurPtr.getType(), 2, "array.cur");
CurPtrPhi->addIncoming(CurPtr.getPointer(), EntryBB);
- CurPtr = Address(CurPtrPhi, ElementAlign);
+ CurPtr = Address(CurPtrPhi, CurPtr.getElementType(), ElementAlign);
// Store the new Cleanup position for irregular Cleanups.
if (EndOfInit.isValid())
@@ -1326,8 +1344,7 @@ static RValue EmitNewDeleteCall(CodeGenFunction &CGF,
llvm::Function *Fn = dyn_cast<llvm::Function>(CalleePtr);
if (CalleeDecl->isReplaceableGlobalAllocationFunction() &&
Fn && Fn->hasFnAttribute(llvm::Attribute::NoBuiltin)) {
- CallOrInvoke->addAttribute(llvm::AttributeList::FunctionIndex,
- llvm::Attribute::Builtin);
+ CallOrInvoke->addFnAttr(llvm::Attribute::Builtin);
}
return RV;
@@ -1561,16 +1578,23 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
// 1. Build a call to the allocation function.
FunctionDecl *allocator = E->getOperatorNew();
- // If there is a brace-initializer, cannot allocate fewer elements than inits.
+ // If there is a brace-initializer or C++20 parenthesized initializer, cannot
+ // allocate fewer elements than inits.
unsigned minElements = 0;
if (E->isArray() && E->hasInitializer()) {
- const InitListExpr *ILE = dyn_cast<InitListExpr>(E->getInitializer());
- if (ILE && ILE->isStringLiteralInit())
+ const Expr *Init = E->getInitializer();
+ const InitListExpr *ILE = dyn_cast<InitListExpr>(Init);
+ const CXXParenListInitExpr *CPLIE = dyn_cast<CXXParenListInitExpr>(Init);
+ const Expr *IgnoreParen = Init->IgnoreParenImpCasts();
+ if ((ILE && ILE->isStringLiteralInit()) ||
+ isa<StringLiteral>(IgnoreParen) || isa<ObjCEncodeExpr>(IgnoreParen)) {
minElements =
- cast<ConstantArrayType>(ILE->getType()->getAsArrayTypeUnsafe())
- ->getSize().getZExtValue();
- else if (ILE)
- minElements = ILE->getNumInits();
+ cast<ConstantArrayType>(Init->getType()->getAsArrayTypeUnsafe())
+ ->getSize()
+ .getZExtValue();
+ } else if (ILE || CPLIE) {
+ minElements = ILE ? ILE->getNumInits() : CPLIE->getInitExprs().size();
+ }
}
llvm::Value *numElements = nullptr;
@@ -1578,7 +1602,7 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
llvm::Value *allocSize =
EmitCXXNewAllocSize(*this, E, minElements, numElements,
allocSizeWithoutCookie);
- CharUnits allocAlign = getContext().getPreferredTypeAlignInChars(allocType);
+ CharUnits allocAlign = getContext().getTypeAlignInChars(allocType);
// Emit the allocation call. If the allocator is a global placement
// operator, just "inline" it directly.
@@ -1595,7 +1619,7 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
// In these cases, discard the computed alignment and use the
// formal alignment of the allocated type.
if (BaseInfo.getAlignmentSource() != AlignmentSource::Decl)
- allocation = Address(allocation.getPointer(), allocAlign);
+ allocation = allocation.withAlignment(allocAlign);
// Set up allocatorArgs for the call to operator delete if it's not
// the reserved global operator.
@@ -1659,13 +1683,13 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
CharUnits allocationAlign = allocAlign;
if (!E->passAlignment() &&
allocator->isReplaceableGlobalAllocationFunction()) {
- unsigned AllocatorAlign = llvm::PowerOf2Floor(std::min<uint64_t>(
+ unsigned AllocatorAlign = llvm::bit_floor(std::min<uint64_t>(
Target.getNewAlign(), getContext().getTypeSize(allocType)));
allocationAlign = std::max(
allocationAlign, getContext().toCharUnitsFromBits(AllocatorAlign));
}
- allocation = Address(RV.getScalarVal(), allocationAlign);
+ allocation = Address(RV.getScalarVal(), Int8Ty, allocationAlign);
}
// Emit a null check on the allocation result if the allocation
@@ -1718,7 +1742,7 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
}
llvm::Type *elementTy = ConvertTypeForMem(allocType);
- Address result = Builder.CreateElementBitCast(allocation, elementTy);
+ Address result = allocation.withElementType(elementTy);
// Passing pointer through launder.invariant.group to avoid propagation of
// vptrs information which may be included in previous type.
@@ -1726,8 +1750,7 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
// of optimization level.
if (CGM.getCodeGenOpts().StrictVTablePointers &&
allocator->isReservedGlobalPlacementOperator())
- result = Address(Builder.CreateLaunderInvariantGroup(result.getPointer()),
- result.getAlignment());
+ result = Builder.CreateLaunderInvariantGroup(result);
// Emit sanitizer checks for pointer value now, so that in the case of an
// array it was checked only once and not at each constructor call. We may
@@ -1743,13 +1766,14 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
EmitNewInitializer(*this, E, allocType, elementTy, result, numElements,
allocSizeWithoutCookie);
+ llvm::Value *resultPtr = result.getPointer();
if (E->isArray()) {
// NewPtr is a pointer to the base element type. If we're
// allocating an array of arrays, we'll need to cast back to the
// array pointer type.
llvm::Type *resultType = ConvertTypeForMem(E->getType());
- if (result.getType() != resultType)
- result = Builder.CreateBitCast(result, resultType);
+ if (resultPtr->getType() != resultType)
+ resultPtr = Builder.CreateBitCast(resultPtr, resultType);
}
// Deactivate the 'operator delete' cleanup if we finished
@@ -1759,7 +1783,6 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
cleanupDominator->eraseFromParent();
}
- llvm::Value *resultPtr = result.getPointer();
if (nullCheck) {
conditional.end(*this);
@@ -1803,7 +1826,8 @@ void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
CharUnits Align = CGM.getNaturalTypeAlignment(DDTag);
DestroyingDeleteTag = CreateTempAlloca(Ty, "destroying.delete.tag");
DestroyingDeleteTag->setAlignment(Align.getAsAlign());
- DeleteArgs.add(RValue::getAggregate(Address(DestroyingDeleteTag, Align)), DDTag);
+ DeleteArgs.add(
+ RValue::getAggregate(Address(DestroyingDeleteTag, Ty, Align)), DDTag);
}
// Pass the size if the delete function has a size_t parameter.
@@ -2076,6 +2100,7 @@ void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
EmitBlock(DeleteNotNull);
+ Ptr.setKnownNonNull();
QualType DeleteTy = E->getDestroyedType();
@@ -2108,7 +2133,8 @@ void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
Ptr = Address(Builder.CreateInBoundsGEP(Ptr.getElementType(),
Ptr.getPointer(), GEP, "del.first"),
- Ptr.getAlignment());
+ ConvertTypeForMem(DeleteTy), Ptr.getAlignment(),
+ Ptr.isKnownNonNull());
}
assert(ConvertTypeForMem(DeleteTy) == Ptr.getElementType());
@@ -2195,13 +2221,20 @@ static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF, const Expr *E,
}
llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
- llvm::Type *StdTypeInfoPtrTy =
- ConvertType(E->getType())->getPointerTo();
+ llvm::Type *PtrTy = llvm::PointerType::getUnqual(getLLVMContext());
+ LangAS GlobAS = CGM.GetGlobalVarAddressSpace(nullptr);
+
+ auto MaybeASCast = [=](auto &&TypeInfo) {
+ if (GlobAS == LangAS::Default)
+ return TypeInfo;
+ return getTargetHooks().performAddrSpaceCast(CGM,TypeInfo, GlobAS,
+ LangAS::Default, PtrTy);
+ };
if (E->isTypeOperand()) {
llvm::Constant *TypeInfo =
CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand(getContext()));
- return Builder.CreateBitCast(TypeInfo, StdTypeInfoPtrTy);
+ return MaybeASCast(TypeInfo);
}
// C++ [expr.typeid]p2:
@@ -2211,12 +2244,10 @@ llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
// type) to which the glvalue refers.
// If the operand is already most derived object, no need to look up vtable.
if (E->isPotentiallyEvaluated() && !E->isMostDerived(getContext()))
- return EmitTypeidFromVTable(*this, E->getExprOperand(),
- StdTypeInfoPtrTy);
+ return EmitTypeidFromVTable(*this, E->getExprOperand(), PtrTy);
QualType OperandTy = E->getExprOperand()->getType();
- return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(OperandTy),
- StdTypeInfoPtrTy);
+ return MaybeASCast(CGM.GetAddrOfRTTIDescriptor(OperandTy));
}
static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF,
@@ -2230,8 +2261,8 @@ static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF,
if (!CGF.CGM.getCXXABI().EmitBadCastCall(CGF))
return nullptr;
- CGF.EmitBlock(CGF.createBasicBlock("dynamic_cast.end"));
- return llvm::UndefValue::get(DestLTy);
+ CGF.Builder.ClearInsertionPoint();
+ return llvm::PoisonValue::get(DestLTy);
}
llvm::Value *CodeGenFunction::EmitDynamicCast(Address ThisAddr,
@@ -2244,17 +2275,16 @@ llvm::Value *CodeGenFunction::EmitDynamicCast(Address ThisAddr,
// C++ [expr.dynamic.cast]p7:
// If T is "pointer to cv void," then the result is a pointer to the most
// derived object pointed to by v.
- const PointerType *DestPTy = DestTy->getAs<PointerType>();
-
- bool isDynamicCastToVoid;
+ bool IsDynamicCastToVoid = DestTy->isVoidPointerType();
QualType SrcRecordTy;
QualType DestRecordTy;
- if (DestPTy) {
- isDynamicCastToVoid = DestPTy->getPointeeType()->isVoidType();
+ if (IsDynamicCastToVoid) {
+ SrcRecordTy = SrcTy->getPointeeType();
+ // No DestRecordTy.
+ } else if (const PointerType *DestPTy = DestTy->getAs<PointerType>()) {
SrcRecordTy = SrcTy->castAs<PointerType>()->getPointeeType();
DestRecordTy = DestPTy->getPointeeType();
} else {
- isDynamicCastToVoid = false;
SrcRecordTy = SrcTy;
DestRecordTy = DestTy->castAs<ReferenceType>()->getPointeeType();
}
@@ -2267,18 +2297,30 @@ llvm::Value *CodeGenFunction::EmitDynamicCast(Address ThisAddr,
EmitTypeCheck(TCK_DynamicOperation, DCE->getExprLoc(), ThisAddr.getPointer(),
SrcRecordTy);
- if (DCE->isAlwaysNull())
- if (llvm::Value *T = EmitDynamicCastToNull(*this, DestTy))
+ if (DCE->isAlwaysNull()) {
+ if (llvm::Value *T = EmitDynamicCastToNull(*this, DestTy)) {
+ // Expression emission is expected to retain a valid insertion point.
+ if (!Builder.GetInsertBlock())
+ EmitBlock(createBasicBlock("dynamic_cast.unreachable"));
return T;
+ }
+ }
assert(SrcRecordTy->isRecordType() && "source type must be a record type!");
+ // If the destination is effectively final, the cast succeeds if and only
+ // if the dynamic type of the pointer is exactly the destination type.
+ bool IsExact = !IsDynamicCastToVoid &&
+ CGM.getCodeGenOpts().OptimizationLevel > 0 &&
+ DestRecordTy->getAsCXXRecordDecl()->isEffectivelyFinal() &&
+ CGM.getCXXABI().shouldEmitExactDynamicCast(DestRecordTy);
+
// C++ [expr.dynamic.cast]p4:
// If the value of v is a null pointer value in the pointer case, the result
// is the null pointer value of type T.
bool ShouldNullCheckSrcValue =
- CGM.getCXXABI().shouldDynamicCastCallBeNullChecked(SrcTy->isPointerType(),
- SrcRecordTy);
+ IsExact || CGM.getCXXABI().shouldDynamicCastCallBeNullChecked(
+ SrcTy->isPointerType(), SrcRecordTy);
llvm::BasicBlock *CastNull = nullptr;
llvm::BasicBlock *CastNotNull = nullptr;
@@ -2294,30 +2336,38 @@ llvm::Value *CodeGenFunction::EmitDynamicCast(Address ThisAddr,
}
llvm::Value *Value;
- if (isDynamicCastToVoid) {
- Value = CGM.getCXXABI().EmitDynamicCastToVoid(*this, ThisAddr, SrcRecordTy,
- DestTy);
+ if (IsDynamicCastToVoid) {
+ Value = CGM.getCXXABI().emitDynamicCastToVoid(*this, ThisAddr, SrcRecordTy);
+ } else if (IsExact) {
+ // If the destination type is effectively final, this pointer points to the
+ // right type if and only if its vptr has the right value.
+ Value = CGM.getCXXABI().emitExactDynamicCast(
+ *this, ThisAddr, SrcRecordTy, DestTy, DestRecordTy, CastEnd, CastNull);
} else {
assert(DestRecordTy->isRecordType() &&
"destination type must be a record type!");
- Value = CGM.getCXXABI().EmitDynamicCastCall(*this, ThisAddr, SrcRecordTy,
+ Value = CGM.getCXXABI().emitDynamicCastCall(*this, ThisAddr, SrcRecordTy,
DestTy, DestRecordTy, CastEnd);
- CastNotNull = Builder.GetInsertBlock();
}
+ CastNotNull = Builder.GetInsertBlock();
+ llvm::Value *NullValue = nullptr;
if (ShouldNullCheckSrcValue) {
EmitBranch(CastEnd);
EmitBlock(CastNull);
+ NullValue = EmitDynamicCastToNull(*this, DestTy);
+ CastNull = Builder.GetInsertBlock();
+
EmitBranch(CastEnd);
}
EmitBlock(CastEnd);
- if (ShouldNullCheckSrcValue) {
+ if (CastNull) {
llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
PHI->addIncoming(Value, CastNotNull);
- PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull);
+ PHI->addIncoming(NullValue, CastNull);
Value = PHI;
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGExprComplex.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGExprComplex.cpp
index 5409e82d437e..839fe16cd772 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGExprComplex.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGExprComplex.cpp
@@ -177,11 +177,15 @@ public:
ComplexPairTy VisitImplicitCastExpr(ImplicitCastExpr *E) {
// Unlike for scalars, we don't have to worry about function->ptr demotion
// here.
+ if (E->changesVolatileQualification())
+ return EmitLoadOfLValue(E);
return EmitCast(E->getCastKind(), E->getSubExpr(), E->getType());
}
ComplexPairTy VisitCastExpr(CastExpr *E) {
if (const auto *ECE = dyn_cast<ExplicitCastExpr>(E))
CGF.CGM.EmitExplicitCastExprType(ECE, &CGF);
+ if (E->changesVolatileQualification())
+ return EmitLoadOfLValue(E);
return EmitCast(E->getCastKind(), E->getSubExpr(), E->getType());
}
ComplexPairTy VisitCallExpr(const CallExpr *E);
@@ -206,12 +210,13 @@ public:
return VisitPrePostIncDec(E, true, true);
}
ComplexPairTy VisitUnaryDeref(const Expr *E) { return EmitLoadOfLValue(E); }
- ComplexPairTy VisitUnaryPlus (const UnaryOperator *E) {
- TestAndClearIgnoreReal();
- TestAndClearIgnoreImag();
- return Visit(E->getSubExpr());
- }
- ComplexPairTy VisitUnaryMinus (const UnaryOperator *E);
+
+ ComplexPairTy VisitUnaryPlus(const UnaryOperator *E,
+ QualType PromotionType = QualType());
+ ComplexPairTy VisitPlus(const UnaryOperator *E, QualType PromotionType);
+ ComplexPairTy VisitUnaryMinus(const UnaryOperator *E,
+ QualType PromotionType = QualType());
+ ComplexPairTy VisitMinus(const UnaryOperator *E, QualType PromotionType);
ComplexPairTy VisitUnaryNot (const UnaryOperator *E);
// LNot,Real,Imag never return complex.
ComplexPairTy VisitUnaryExtension(const UnaryOperator *E) {
@@ -251,9 +256,13 @@ public:
ComplexPairTy LHS;
ComplexPairTy RHS;
QualType Ty; // Computation Type.
+ FPOptions FPFeatures;
};
- BinOpInfo EmitBinOps(const BinaryOperator *E);
+ BinOpInfo EmitBinOps(const BinaryOperator *E,
+ QualType PromotionTy = QualType());
+ ComplexPairTy EmitPromoted(const Expr *E, QualType PromotionTy);
+ ComplexPairTy EmitPromotedComplexOperand(const Expr *E, QualType PromotionTy);
LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
ComplexPairTy (ComplexExprEmitter::*Func)
(const BinOpInfo &),
@@ -266,23 +275,41 @@ public:
ComplexPairTy EmitBinSub(const BinOpInfo &Op);
ComplexPairTy EmitBinMul(const BinOpInfo &Op);
ComplexPairTy EmitBinDiv(const BinOpInfo &Op);
+ ComplexPairTy EmitAlgebraicDiv(llvm::Value *A, llvm::Value *B, llvm::Value *C,
+ llvm::Value *D);
+ ComplexPairTy EmitRangeReductionDiv(llvm::Value *A, llvm::Value *B,
+ llvm::Value *C, llvm::Value *D);
ComplexPairTy EmitComplexBinOpLibCall(StringRef LibCallName,
const BinOpInfo &Op);
- ComplexPairTy VisitBinAdd(const BinaryOperator *E) {
- return EmitBinAdd(EmitBinOps(E));
- }
- ComplexPairTy VisitBinSub(const BinaryOperator *E) {
- return EmitBinSub(EmitBinOps(E));
- }
- ComplexPairTy VisitBinMul(const BinaryOperator *E) {
- return EmitBinMul(EmitBinOps(E));
+ QualType getPromotionType(QualType Ty) {
+ if (auto *CT = Ty->getAs<ComplexType>()) {
+ QualType ElementType = CT->getElementType();
+ if (ElementType.UseExcessPrecision(CGF.getContext()))
+ return CGF.getContext().getComplexType(CGF.getContext().FloatTy);
+ }
+ if (Ty.UseExcessPrecision(CGF.getContext()))
+ return CGF.getContext().FloatTy;
+ return QualType();
}
- ComplexPairTy VisitBinDiv(const BinaryOperator *E) {
- return EmitBinDiv(EmitBinOps(E));
+
+#define HANDLEBINOP(OP) \
+ ComplexPairTy VisitBin##OP(const BinaryOperator *E) { \
+ QualType promotionTy = getPromotionType(E->getType()); \
+ ComplexPairTy result = EmitBin##OP(EmitBinOps(E, promotionTy)); \
+ if (!promotionTy.isNull()) \
+ result = \
+ CGF.EmitUnPromotedValue(result, E->getType()); \
+ return result; \
}
+ HANDLEBINOP(Mul)
+ HANDLEBINOP(Div)
+ HANDLEBINOP(Add)
+ HANDLEBINOP(Sub)
+#undef HANDLEBINOP
+
ComplexPairTy VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) {
return Visit(E->getSemanticForm());
}
@@ -469,15 +496,14 @@ ComplexPairTy ComplexExprEmitter::EmitCast(CastKind CK, Expr *Op,
case CK_LValueBitCast: {
LValue origLV = CGF.EmitLValue(Op);
- Address V = origLV.getAddress(CGF);
- V = Builder.CreateElementBitCast(V, CGF.ConvertType(DestTy));
+ Address V = origLV.getAddress(CGF).withElementType(CGF.ConvertType(DestTy));
return EmitLoadOfLValue(CGF.MakeAddrLValue(V, DestTy), Op->getExprLoc());
}
case CK_LValueToRValueBitCast: {
LValue SourceLVal = CGF.EmitLValue(Op);
- Address Addr = Builder.CreateElementBitCast(SourceLVal.getAddress(CGF),
- CGF.ConvertTypeForMem(DestTy));
+ Address Addr = SourceLVal.getAddress(CGF).withElementType(
+ CGF.ConvertTypeForMem(DestTy));
LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
return EmitLoadOfLValue(DestLV, Op->getExprLoc());
@@ -556,10 +582,45 @@ ComplexPairTy ComplexExprEmitter::EmitCast(CastKind CK, Expr *Op,
llvm_unreachable("unknown cast resulting in complex value");
}
-ComplexPairTy ComplexExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
+ComplexPairTy ComplexExprEmitter::VisitUnaryPlus(const UnaryOperator *E,
+ QualType PromotionType) {
+ QualType promotionTy = PromotionType.isNull()
+ ? getPromotionType(E->getSubExpr()->getType())
+ : PromotionType;
+ ComplexPairTy result = VisitPlus(E, promotionTy);
+ if (!promotionTy.isNull())
+ return CGF.EmitUnPromotedValue(result, E->getSubExpr()->getType());
+ return result;
+}
+
+ComplexPairTy ComplexExprEmitter::VisitPlus(const UnaryOperator *E,
+ QualType PromotionType) {
TestAndClearIgnoreReal();
TestAndClearIgnoreImag();
- ComplexPairTy Op = Visit(E->getSubExpr());
+ if (!PromotionType.isNull())
+ return CGF.EmitPromotedComplexExpr(E->getSubExpr(), PromotionType);
+ return Visit(E->getSubExpr());
+}
+
+ComplexPairTy ComplexExprEmitter::VisitUnaryMinus(const UnaryOperator *E,
+ QualType PromotionType) {
+ QualType promotionTy = PromotionType.isNull()
+ ? getPromotionType(E->getSubExpr()->getType())
+ : PromotionType;
+ ComplexPairTy result = VisitMinus(E, promotionTy);
+ if (!promotionTy.isNull())
+ return CGF.EmitUnPromotedValue(result, E->getSubExpr()->getType());
+ return result;
+}
+ComplexPairTy ComplexExprEmitter::VisitMinus(const UnaryOperator *E,
+ QualType PromotionType) {
+ TestAndClearIgnoreReal();
+ TestAndClearIgnoreImag();
+ ComplexPairTy Op;
+ if (!PromotionType.isNull())
+ Op = CGF.EmitPromotedComplexExpr(E->getSubExpr(), PromotionType);
+ else
+ Op = Visit(E->getSubExpr());
llvm::Value *ResR, *ResI;
if (Op.first->getType()->isFloatingPointTy()) {
@@ -590,6 +651,7 @@ ComplexPairTy ComplexExprEmitter::EmitBinAdd(const BinOpInfo &Op) {
llvm::Value *ResR, *ResI;
if (Op.LHS.first->getType()->isFloatingPointTy()) {
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Op.FPFeatures);
ResR = Builder.CreateFAdd(Op.LHS.first, Op.RHS.first, "add.r");
if (Op.LHS.second && Op.RHS.second)
ResI = Builder.CreateFAdd(Op.LHS.second, Op.RHS.second, "add.i");
@@ -608,6 +670,7 @@ ComplexPairTy ComplexExprEmitter::EmitBinAdd(const BinOpInfo &Op) {
ComplexPairTy ComplexExprEmitter::EmitBinSub(const BinOpInfo &Op) {
llvm::Value *ResR, *ResI;
if (Op.LHS.first->getType()->isFloatingPointTy()) {
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Op.FPFeatures);
ResR = Builder.CreateFSub(Op.LHS.first, Op.RHS.first, "sub.r");
if (Op.LHS.second && Op.RHS.second)
ResI = Builder.CreateFSub(Op.LHS.second, Op.RHS.second, "sub.i");
@@ -700,6 +763,7 @@ ComplexPairTy ComplexExprEmitter::EmitBinMul(const BinOpInfo &Op) {
// FIXME: C11 also provides for imaginary types which would allow folding
// still more of this within the type system.
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Op.FPFeatures);
if (Op.LHS.second && Op.RHS.second) {
// If both operands are complex, emit the core math directly, and then
// test for NaNs. If we find NaNs in the result, we delegate to a libcall
@@ -721,6 +785,10 @@ ComplexPairTy ComplexExprEmitter::EmitBinMul(const BinOpInfo &Op) {
ResR = Builder.CreateFSub(AC, BD, "mul_r");
ResI = Builder.CreateFAdd(AD, BC, "mul_i");
+ if (Op.FPFeatures.getComplexRange() == LangOptions::CX_Limited ||
+ Op.FPFeatures.getComplexRange() == LangOptions::CX_Fortran)
+ return ComplexPairTy(ResR, ResI);
+
// Emit the test for the real part becoming NaN and create a branch to
// handle it. We test for NaN by comparing the number to itself.
Value *IsRNaN = Builder.CreateFCmpUNO(ResR, ResR, "isnan_cmp");
@@ -786,22 +854,145 @@ ComplexPairTy ComplexExprEmitter::EmitBinMul(const BinOpInfo &Op) {
return ComplexPairTy(ResR, ResI);
}
+ComplexPairTy ComplexExprEmitter::EmitAlgebraicDiv(llvm::Value *LHSr,
+ llvm::Value *LHSi,
+ llvm::Value *RHSr,
+ llvm::Value *RHSi) {
+ // (a+ib) / (c+id) = ((ac+bd)/(cc+dd)) + i((bc-ad)/(cc+dd))
+ llvm::Value *DSTr, *DSTi;
+
+ llvm::Value *AC = Builder.CreateFMul(LHSr, RHSr); // a*c
+ llvm::Value *BD = Builder.CreateFMul(LHSi, RHSi); // b*d
+ llvm::Value *ACpBD = Builder.CreateFAdd(AC, BD); // ac+bd
+
+ llvm::Value *CC = Builder.CreateFMul(RHSr, RHSr); // c*c
+ llvm::Value *DD = Builder.CreateFMul(RHSi, RHSi); // d*d
+ llvm::Value *CCpDD = Builder.CreateFAdd(CC, DD); // cc+dd
+
+ llvm::Value *BC = Builder.CreateFMul(LHSi, RHSr); // b*c
+ llvm::Value *AD = Builder.CreateFMul(LHSr, RHSi); // a*d
+ llvm::Value *BCmAD = Builder.CreateFSub(BC, AD); // bc-ad
+
+ DSTr = Builder.CreateFDiv(ACpBD, CCpDD);
+ DSTi = Builder.CreateFDiv(BCmAD, CCpDD);
+ return ComplexPairTy(DSTr, DSTi);
+}
+
+// EmitFAbs - Emit a call to @llvm.fabs.
+static llvm::Value *EmitllvmFAbs(CodeGenFunction &CGF, llvm::Value *Value) {
+ llvm::Function *Func =
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::fabs, Value->getType());
+ llvm::Value *Call = CGF.Builder.CreateCall(Func, Value);
+ return Call;
+}
+
+// EmitRangeReductionDiv - Implements Smith's algorithm for complex division.
+// SMITH, R. L. Algorithm 116: Complex division. Commun. ACM 5, 8 (1962).
+ComplexPairTy ComplexExprEmitter::EmitRangeReductionDiv(llvm::Value *LHSr,
+ llvm::Value *LHSi,
+ llvm::Value *RHSr,
+ llvm::Value *RHSi) {
+ // FIXME: This could eventually be replaced by an LLVM intrinsic to
+ // avoid this long IR sequence.
+
+ // (a + ib) / (c + id) = (e + if)
+ llvm::Value *FAbsRHSr = EmitllvmFAbs(CGF, RHSr); // |c|
+ llvm::Value *FAbsRHSi = EmitllvmFAbs(CGF, RHSi); // |d|
+ // |c| >= |d|
+ llvm::Value *IsR = Builder.CreateFCmpUGT(FAbsRHSr, FAbsRHSi, "abs_cmp");
+
+ llvm::BasicBlock *TrueBB =
+ CGF.createBasicBlock("abs_rhsr_greater_or_equal_abs_rhsi");
+ llvm::BasicBlock *FalseBB =
+ CGF.createBasicBlock("abs_rhsr_less_than_abs_rhsi");
+ llvm::BasicBlock *ContBB = CGF.createBasicBlock("complex_div");
+ Builder.CreateCondBr(IsR, TrueBB, FalseBB);
+
+ CGF.EmitBlock(TrueBB);
+ // abs(c) >= abs(d)
+ // r = d/c
+ // tmp = c + rd
+ // e = (a + br)/tmp
+ // f = (b - ar)/tmp
+ llvm::Value *DdC = Builder.CreateFDiv(RHSi, RHSr); // r=d/c
+
+ llvm::Value *RD = Builder.CreateFMul(DdC, RHSi); // rd
+ llvm::Value *CpRD = Builder.CreateFAdd(RHSr, RD); // tmp=c+rd
+
+ llvm::Value *T3 = Builder.CreateFMul(LHSi, DdC); // br
+ llvm::Value *T4 = Builder.CreateFAdd(LHSr, T3); // a+br
+ llvm::Value *DSTTr = Builder.CreateFDiv(T4, CpRD); // (a+br)/tmp
+
+ llvm::Value *T5 = Builder.CreateFMul(LHSr, DdC); // ar
+ llvm::Value *T6 = Builder.CreateFSub(LHSi, T5); // b-ar
+ llvm::Value *DSTTi = Builder.CreateFDiv(T6, CpRD); // (b-ar)/tmp
+ Builder.CreateBr(ContBB);
+
+ CGF.EmitBlock(FalseBB);
+ // abs(c) < abs(d)
+ // r = c/d
+ // tmp = d + rc
+ // e = (ar + b)/tmp
+ // f = (br - a)/tmp
+ llvm::Value *CdD = Builder.CreateFDiv(RHSr, RHSi); // r=c/d
+
+ llvm::Value *RC = Builder.CreateFMul(CdD, RHSr); // rc
+ llvm::Value *DpRC = Builder.CreateFAdd(RHSi, RC); // tmp=d+rc
+
+ llvm::Value *T7 = Builder.CreateFMul(LHSr, CdD); // ar
+ llvm::Value *T8 = Builder.CreateFAdd(T7, LHSi); // ar+b
+ llvm::Value *DSTFr = Builder.CreateFDiv(T8, DpRC); // (ar+b)/tmp
+
+ llvm::Value *T9 = Builder.CreateFMul(LHSi, CdD); // br
+ llvm::Value *T10 = Builder.CreateFSub(T9, LHSr); // br-a
+ llvm::Value *DSTFi = Builder.CreateFDiv(T10, DpRC); // (br-a)/tmp
+ Builder.CreateBr(ContBB);
+
+ // Phi together the computation paths.
+ CGF.EmitBlock(ContBB);
+ llvm::PHINode *VALr = Builder.CreatePHI(DSTTr->getType(), 2);
+ VALr->addIncoming(DSTTr, TrueBB);
+ VALr->addIncoming(DSTFr, FalseBB);
+ llvm::PHINode *VALi = Builder.CreatePHI(DSTTi->getType(), 2);
+ VALi->addIncoming(DSTTi, TrueBB);
+ VALi->addIncoming(DSTFi, FalseBB);
+ return ComplexPairTy(VALr, VALi);
+}
+
// See C11 Annex G.5.1 for the semantics of multiplicative operators on complex
// typed values.
ComplexPairTy ComplexExprEmitter::EmitBinDiv(const BinOpInfo &Op) {
llvm::Value *LHSr = Op.LHS.first, *LHSi = Op.LHS.second;
llvm::Value *RHSr = Op.RHS.first, *RHSi = Op.RHS.second;
-
llvm::Value *DSTr, *DSTi;
if (LHSr->getType()->isFloatingPointTy()) {
- // If we have a complex operand on the RHS and FastMath is not allowed, we
- // delegate to a libcall to handle all of the complexities and minimize
- // underflow/overflow cases. When FastMath is allowed we construct the
- // divide inline using the same algorithm as for integer operands.
- //
- // FIXME: We would be able to avoid the libcall in many places if we
- // supported imaginary types in addition to complex types.
- if (RHSi && !CGF.getLangOpts().FastMath) {
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Op.FPFeatures);
+ if (!RHSi) {
+ assert(LHSi && "Can have at most one non-complex operand!");
+
+ DSTr = Builder.CreateFDiv(LHSr, RHSr);
+ DSTi = Builder.CreateFDiv(LHSi, RHSr);
+ return ComplexPairTy(DSTr, DSTi);
+ }
+ llvm::Value *OrigLHSi = LHSi;
+ if (!LHSi)
+ LHSi = llvm::Constant::getNullValue(RHSi->getType());
+ if (Op.FPFeatures.getComplexRange() == LangOptions::CX_Fortran)
+ return EmitRangeReductionDiv(LHSr, LHSi, RHSr, RHSi);
+ else if (Op.FPFeatures.getComplexRange() == LangOptions::CX_Limited)
+ return EmitAlgebraicDiv(LHSr, LHSi, RHSr, RHSi);
+ else if (!CGF.getLangOpts().FastMath ||
+ // '-ffast-math' is used in the command line but followed by an
+ // '-fno-cx-limited-range'.
+ Op.FPFeatures.getComplexRange() == LangOptions::CX_Full) {
+ LHSi = OrigLHSi;
+ // If we have a complex operand on the RHS and FastMath is not allowed, we
+ // delegate to a libcall to handle all of the complexities and minimize
+ // underflow/overflow cases. When FastMath is allowed we construct the
+ // divide inline using the same algorithm as for integer operands.
+ //
+ // FIXME: We would be able to avoid the libcall in many places if we
+ // supported imaginary types in addition to complex types.
BinOpInfo LibCallOp = Op;
// If LHS was a real, supply a null imaginary part.
if (!LHSi)
@@ -823,30 +1014,8 @@ ComplexPairTy ComplexExprEmitter::EmitBinDiv(const BinOpInfo &Op) {
case llvm::Type::FP128TyID:
return EmitComplexBinOpLibCall("__divtc3", LibCallOp);
}
- } else if (RHSi) {
- if (!LHSi)
- LHSi = llvm::Constant::getNullValue(RHSi->getType());
-
- // (a+ib) / (c+id) = ((ac+bd)/(cc+dd)) + i((bc-ad)/(cc+dd))
- llvm::Value *AC = Builder.CreateFMul(LHSr, RHSr); // a*c
- llvm::Value *BD = Builder.CreateFMul(LHSi, RHSi); // b*d
- llvm::Value *ACpBD = Builder.CreateFAdd(AC, BD); // ac+bd
-
- llvm::Value *CC = Builder.CreateFMul(RHSr, RHSr); // c*c
- llvm::Value *DD = Builder.CreateFMul(RHSi, RHSi); // d*d
- llvm::Value *CCpDD = Builder.CreateFAdd(CC, DD); // cc+dd
-
- llvm::Value *BC = Builder.CreateFMul(LHSi, RHSr); // b*c
- llvm::Value *AD = Builder.CreateFMul(LHSr, RHSi); // a*d
- llvm::Value *BCmAD = Builder.CreateFSub(BC, AD); // bc-ad
-
- DSTr = Builder.CreateFDiv(ACpBD, CCpDD);
- DSTi = Builder.CreateFDiv(BCmAD, CCpDD);
} else {
- assert(LHSi && "Can have at most one non-complex operand!");
-
- DSTr = Builder.CreateFDiv(LHSr, RHSr);
- DSTi = Builder.CreateFDiv(LHSi, RHSr);
+ return EmitAlgebraicDiv(LHSr, LHSi, RHSr, RHSi);
}
} else {
assert(Op.LHS.second && Op.RHS.second &&
@@ -876,21 +1045,103 @@ ComplexPairTy ComplexExprEmitter::EmitBinDiv(const BinOpInfo &Op) {
return ComplexPairTy(DSTr, DSTi);
}
+ComplexPairTy CodeGenFunction::EmitUnPromotedValue(ComplexPairTy result,
+ QualType UnPromotionType) {
+ llvm::Type *ComplexElementTy =
+ ConvertType(UnPromotionType->castAs<ComplexType>()->getElementType());
+ if (result.first)
+ result.first =
+ Builder.CreateFPTrunc(result.first, ComplexElementTy, "unpromotion");
+ if (result.second)
+ result.second =
+ Builder.CreateFPTrunc(result.second, ComplexElementTy, "unpromotion");
+ return result;
+}
+
+ComplexPairTy CodeGenFunction::EmitPromotedValue(ComplexPairTy result,
+ QualType PromotionType) {
+ llvm::Type *ComplexElementTy =
+ ConvertType(PromotionType->castAs<ComplexType>()->getElementType());
+ if (result.first)
+ result.first = Builder.CreateFPExt(result.first, ComplexElementTy, "ext");
+ if (result.second)
+ result.second = Builder.CreateFPExt(result.second, ComplexElementTy, "ext");
+
+ return result;
+}
+
+ComplexPairTy ComplexExprEmitter::EmitPromoted(const Expr *E,
+ QualType PromotionType) {
+ E = E->IgnoreParens();
+ if (auto BO = dyn_cast<BinaryOperator>(E)) {
+ switch (BO->getOpcode()) {
+#define HANDLE_BINOP(OP) \
+ case BO_##OP: \
+ return EmitBin##OP(EmitBinOps(BO, PromotionType));
+ HANDLE_BINOP(Add)
+ HANDLE_BINOP(Sub)
+ HANDLE_BINOP(Mul)
+ HANDLE_BINOP(Div)
+#undef HANDLE_BINOP
+ default:
+ break;
+ }
+ } else if (auto UO = dyn_cast<UnaryOperator>(E)) {
+ switch (UO->getOpcode()) {
+ case UO_Minus:
+ return VisitMinus(UO, PromotionType);
+ case UO_Plus:
+ return VisitPlus(UO, PromotionType);
+ default:
+ break;
+ }
+ }
+ auto result = Visit(const_cast<Expr *>(E));
+ if (!PromotionType.isNull())
+ return CGF.EmitPromotedValue(result, PromotionType);
+ else
+ return result;
+}
+
+ComplexPairTy CodeGenFunction::EmitPromotedComplexExpr(const Expr *E,
+ QualType DstTy) {
+ return ComplexExprEmitter(*this).EmitPromoted(E, DstTy);
+}
+
+ComplexPairTy
+ComplexExprEmitter::EmitPromotedComplexOperand(const Expr *E,
+ QualType OverallPromotionType) {
+ if (E->getType()->isAnyComplexType()) {
+ if (!OverallPromotionType.isNull())
+ return CGF.EmitPromotedComplexExpr(E, OverallPromotionType);
+ else
+ return Visit(const_cast<Expr *>(E));
+ } else {
+ if (!OverallPromotionType.isNull()) {
+ QualType ComplexElementTy =
+ OverallPromotionType->castAs<ComplexType>()->getElementType();
+ return ComplexPairTy(CGF.EmitPromotedScalarExpr(E, ComplexElementTy),
+ nullptr);
+ } else {
+ return ComplexPairTy(CGF.EmitScalarExpr(E), nullptr);
+ }
+ }
+}
+
ComplexExprEmitter::BinOpInfo
-ComplexExprEmitter::EmitBinOps(const BinaryOperator *E) {
+ComplexExprEmitter::EmitBinOps(const BinaryOperator *E,
+ QualType PromotionType) {
TestAndClearIgnoreReal();
TestAndClearIgnoreImag();
BinOpInfo Ops;
- if (E->getLHS()->getType()->isRealFloatingType())
- Ops.LHS = ComplexPairTy(CGF.EmitScalarExpr(E->getLHS()), nullptr);
- else
- Ops.LHS = Visit(E->getLHS());
- if (E->getRHS()->getType()->isRealFloatingType())
- Ops.RHS = ComplexPairTy(CGF.EmitScalarExpr(E->getRHS()), nullptr);
- else
- Ops.RHS = Visit(E->getRHS());
- Ops.Ty = E->getType();
+ Ops.LHS = EmitPromotedComplexOperand(E->getLHS(), PromotionType);
+ Ops.RHS = EmitPromotedComplexOperand(E->getRHS(), PromotionType);
+ if (!PromotionType.isNull())
+ Ops.Ty = PromotionType;
+ else
+ Ops.Ty = E->getType();
+ Ops.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
return Ops;
}
@@ -905,41 +1156,74 @@ EmitCompoundAssignLValue(const CompoundAssignOperator *E,
if (const AtomicType *AT = LHSTy->getAs<AtomicType>())
LHSTy = AT->getValueType();
- CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
BinOpInfo OpInfo;
+ OpInfo.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, OpInfo.FPFeatures);
// Load the RHS and LHS operands.
// __block variables need to have the rhs evaluated first, plus this should
// improve codegen a little.
- OpInfo.Ty = E->getComputationResultType();
- QualType ComplexElementTy = cast<ComplexType>(OpInfo.Ty)->getElementType();
+ QualType PromotionTypeCR;
+ PromotionTypeCR = getPromotionType(E->getComputationResultType());
+ if (PromotionTypeCR.isNull())
+ PromotionTypeCR = E->getComputationResultType();
+ OpInfo.Ty = PromotionTypeCR;
+ QualType ComplexElementTy =
+ OpInfo.Ty->castAs<ComplexType>()->getElementType();
+ QualType PromotionTypeRHS = getPromotionType(E->getRHS()->getType());
// The RHS should have been converted to the computation type.
if (E->getRHS()->getType()->isRealFloatingType()) {
- assert(
- CGF.getContext()
- .hasSameUnqualifiedType(ComplexElementTy, E->getRHS()->getType()));
- OpInfo.RHS = ComplexPairTy(CGF.EmitScalarExpr(E->getRHS()), nullptr);
+ if (!PromotionTypeRHS.isNull())
+ OpInfo.RHS = ComplexPairTy(
+ CGF.EmitPromotedScalarExpr(E->getRHS(), PromotionTypeRHS), nullptr);
+ else {
+ assert(CGF.getContext().hasSameUnqualifiedType(ComplexElementTy,
+ E->getRHS()->getType()));
+
+ OpInfo.RHS = ComplexPairTy(CGF.EmitScalarExpr(E->getRHS()), nullptr);
+ }
} else {
- assert(CGF.getContext()
- .hasSameUnqualifiedType(OpInfo.Ty, E->getRHS()->getType()));
- OpInfo.RHS = Visit(E->getRHS());
+ if (!PromotionTypeRHS.isNull()) {
+ OpInfo.RHS = ComplexPairTy(
+ CGF.EmitPromotedComplexExpr(E->getRHS(), PromotionTypeRHS));
+ } else {
+ assert(CGF.getContext().hasSameUnqualifiedType(OpInfo.Ty,
+ E->getRHS()->getType()));
+ OpInfo.RHS = Visit(E->getRHS());
+ }
}
LValue LHS = CGF.EmitLValue(E->getLHS());
// Load from the l-value and convert it.
SourceLocation Loc = E->getExprLoc();
+ QualType PromotionTypeLHS = getPromotionType(E->getComputationLHSType());
if (LHSTy->isAnyComplexType()) {
ComplexPairTy LHSVal = EmitLoadOfLValue(LHS, Loc);
- OpInfo.LHS = EmitComplexToComplexCast(LHSVal, LHSTy, OpInfo.Ty, Loc);
+ if (!PromotionTypeLHS.isNull())
+ OpInfo.LHS =
+ EmitComplexToComplexCast(LHSVal, LHSTy, PromotionTypeLHS, Loc);
+ else
+ OpInfo.LHS = EmitComplexToComplexCast(LHSVal, LHSTy, OpInfo.Ty, Loc);
} else {
llvm::Value *LHSVal = CGF.EmitLoadOfScalar(LHS, Loc);
// For floating point real operands we can directly pass the scalar form
// to the binary operator emission and potentially get more efficient code.
if (LHSTy->isRealFloatingType()) {
- if (!CGF.getContext().hasSameUnqualifiedType(ComplexElementTy, LHSTy))
- LHSVal = CGF.EmitScalarConversion(LHSVal, LHSTy, ComplexElementTy, Loc);
+ QualType PromotedComplexElementTy;
+ if (!PromotionTypeLHS.isNull()) {
+ PromotedComplexElementTy =
+ cast<ComplexType>(PromotionTypeLHS)->getElementType();
+ if (!CGF.getContext().hasSameUnqualifiedType(PromotedComplexElementTy,
+ PromotionTypeLHS))
+ LHSVal = CGF.EmitScalarConversion(LHSVal, LHSTy,
+ PromotedComplexElementTy, Loc);
+ } else {
+ if (!CGF.getContext().hasSameUnqualifiedType(ComplexElementTy, LHSTy))
+ LHSVal =
+ CGF.EmitScalarConversion(LHSVal, LHSTy, ComplexElementTy, Loc);
+ }
OpInfo.LHS = ComplexPairTy(LHSVal, nullptr);
} else {
OpInfo.LHS = EmitScalarToComplexCast(LHSVal, LHSTy, OpInfo.Ty, Loc);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGExprConstant.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGExprConstant.cpp
index 47e41261e095..604e3958161d 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGExprConstant.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGExprConstant.cpp
@@ -25,10 +25,12 @@
#include "clang/Basic/Builtins.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/Sequence.h"
+#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalVariable.h"
+#include <optional>
using namespace clang;
using namespace CodeGen;
@@ -46,7 +48,7 @@ struct ConstantAggregateBuilderUtils {
CharUnits getAlignment(const llvm::Constant *C) const {
return CharUnits::fromQuantity(
- CGM.getDataLayout().getABITypeAlignment(C->getType()));
+ CGM.getDataLayout().getABITypeAlign(C->getType()));
}
CharUnits getSize(llvm::Type *Ty) const {
@@ -94,7 +96,7 @@ class ConstantAggregateBuilder : private ConstantAggregateBuilderUtils {
bool NaturalLayout = true;
bool split(size_t Index, CharUnits Hint);
- Optional<size_t> splitAt(CharUnits Pos);
+ std::optional<size_t> splitAt(CharUnits Pos);
static llvm::Constant *buildFrom(CodeGenModule &CGM,
ArrayRef<llvm::Constant *> Elems,
@@ -158,12 +160,12 @@ bool ConstantAggregateBuilder::add(llvm::Constant *C, CharUnits Offset,
}
// Uncommon case: constant overlaps what we've already created.
- llvm::Optional<size_t> FirstElemToReplace = splitAt(Offset);
+ std::optional<size_t> FirstElemToReplace = splitAt(Offset);
if (!FirstElemToReplace)
return false;
CharUnits CSize = getSize(C);
- llvm::Optional<size_t> LastElemToReplace = splitAt(Offset + CSize);
+ std::optional<size_t> LastElemToReplace = splitAt(Offset + CSize);
if (!LastElemToReplace)
return false;
@@ -222,10 +224,10 @@ bool ConstantAggregateBuilder::addBits(llvm::APInt Bits, uint64_t OffsetInBits,
// Partial byte: update the existing integer if there is one. If we
// can't split out a 1-CharUnit range to update, then we can't add
// these bits and fail the entire constant emission.
- llvm::Optional<size_t> FirstElemToUpdate = splitAt(OffsetInChars);
+ std::optional<size_t> FirstElemToUpdate = splitAt(OffsetInChars);
if (!FirstElemToUpdate)
return false;
- llvm::Optional<size_t> LastElemToUpdate =
+ std::optional<size_t> LastElemToUpdate =
splitAt(OffsetInChars + CharUnits::One());
if (!LastElemToUpdate)
return false;
@@ -283,8 +285,8 @@ bool ConstantAggregateBuilder::addBits(llvm::APInt Bits, uint64_t OffsetInBits,
/// Returns a position within Elems and Offsets such that all elements
/// before the returned index end before Pos and all elements at or after
/// the returned index begin at or after Pos. Splits elements as necessary
-/// to ensure this. Returns None if we find something we can't split.
-Optional<size_t> ConstantAggregateBuilder::splitAt(CharUnits Pos) {
+/// to ensure this. Returns std::nullopt if we find something we can't split.
+std::optional<size_t> ConstantAggregateBuilder::splitAt(CharUnits Pos) {
if (Pos >= Size)
return Offsets.size();
@@ -305,7 +307,7 @@ Optional<size_t> ConstantAggregateBuilder::splitAt(CharUnits Pos) {
// Try to decompose it into smaller constants.
if (!split(LastAtOrBeforePosIndex, Pos))
- return None;
+ return std::nullopt;
}
}
@@ -439,22 +441,33 @@ llvm::Constant *ConstantAggregateBuilder::buildFrom(
// Can't emit as an array, carry on to emit as a struct.
}
+ // The size of the constant we plan to generate. This is usually just
+ // the size of the initialized type, but in AllowOversized mode (i.e.
+ // flexible array init), it can be larger.
CharUnits DesiredSize = Utils.getSize(DesiredTy);
+ if (Size > DesiredSize) {
+ assert(AllowOversized && "Elems are oversized");
+ DesiredSize = Size;
+ }
+
+ // The natural alignment of an unpacked LLVM struct with the given elements.
CharUnits Align = CharUnits::One();
for (llvm::Constant *C : Elems)
Align = std::max(Align, Utils.getAlignment(C));
+
+ // The natural size of an unpacked LLVM struct with the given elements.
CharUnits AlignedSize = Size.alignTo(Align);
bool Packed = false;
ArrayRef<llvm::Constant*> UnpackedElems = Elems;
llvm::SmallVector<llvm::Constant*, 32> UnpackedElemStorage;
- if ((DesiredSize < AlignedSize && !AllowOversized) ||
- DesiredSize.alignTo(Align) != DesiredSize) {
- // The natural layout would be the wrong size; force use of a packed layout.
+ if (DesiredSize < AlignedSize || DesiredSize.alignTo(Align) != DesiredSize) {
+ // The natural layout would be too big; force use of a packed layout.
NaturalLayout = false;
Packed = true;
} else if (DesiredSize > AlignedSize) {
- // The constant would be too small. Add padding to fix it.
+ // The natural layout would be too small. Add padding to fix it. (This
+ // is ignored if we choose a packed layout.)
UnpackedElemStorage.assign(Elems.begin(), Elems.end());
UnpackedElemStorage.push_back(Utils.getPadding(DesiredSize - Size));
UnpackedElems = UnpackedElemStorage;
@@ -482,7 +495,7 @@ llvm::Constant *ConstantAggregateBuilder::buildFrom(
// If we're using the packed layout, pad it out to the desired size if
// necessary.
if (Packed) {
- assert((SizeSoFar <= DesiredSize || AllowOversized) &&
+ assert(SizeSoFar <= DesiredSize &&
"requested size is too small for contents");
if (SizeSoFar < DesiredSize)
PackedElems.push_back(Utils.getPadding(DesiredSize - SizeSoFar));
@@ -506,12 +519,12 @@ void ConstantAggregateBuilder::condense(CharUnits Offset,
llvm::Type *DesiredTy) {
CharUnits Size = getSize(DesiredTy);
- llvm::Optional<size_t> FirstElemToReplace = splitAt(Offset);
+ std::optional<size_t> FirstElemToReplace = splitAt(Offset);
if (!FirstElemToReplace)
return;
size_t First = *FirstElemToReplace;
- llvm::Optional<size_t> LastElemToReplace = splitAt(Offset + Size);
+ std::optional<size_t> LastElemToReplace = splitAt(Offset + Size);
if (!LastElemToReplace)
return;
size_t Last = *LastElemToReplace;
@@ -532,8 +545,8 @@ void ConstantAggregateBuilder::condense(CharUnits Offset,
}
llvm::Constant *Replacement = buildFrom(
- CGM, makeArrayRef(Elems).slice(First, Length),
- makeArrayRef(Offsets).slice(First, Length), Offset, getSize(DesiredTy),
+ CGM, ArrayRef(Elems).slice(First, Length),
+ ArrayRef(Offsets).slice(First, Length), Offset, getSize(DesiredTy),
/*known to have natural layout=*/false, DesiredTy, false);
replace(Elems, First, Last, {Replacement});
replace(Offsets, First, Last, {Offset});
@@ -692,8 +705,8 @@ bool ConstStructBuilder::Build(InitListExpr *ILE, bool AllowOverwrite) {
!declaresSameEntity(ILE->getInitializedFieldInUnion(), Field))
continue;
- // Don't emit anonymous bitfields or zero-sized fields.
- if (Field->isUnnamedBitfield() || Field->isZeroSize(CGM.getContext()))
+ // Don't emit anonymous bitfields.
+ if (Field->isUnnamedBitfield())
continue;
// Get the initializer. A struct can include fields without initializers,
@@ -704,6 +717,14 @@ bool ConstStructBuilder::Build(InitListExpr *ILE, bool AllowOverwrite) {
if (Init && isa<NoInitExpr>(Init))
continue;
+ // Zero-sized fields are not emitted, but their initializers may still
+ // prevent emission of this struct as a constant.
+ if (Field->isZeroSize(CGM.getContext())) {
+ if (Init->HasSideEffects(CGM.getContext()))
+ return false;
+ continue;
+ }
+
// When emitting a DesignatedInitUpdateExpr, a nested InitListExpr
// represents additional overwriting of our current constant value, and not
// a new constant to emit independently.
@@ -851,6 +872,7 @@ bool ConstStructBuilder::Build(const APValue &Val, const RecordDecl *RD,
}
llvm::Constant *ConstStructBuilder::Finalize(QualType Type) {
+ Type = Type.getNonReferenceType();
RecordDecl *RD = Type->castAs<RecordType>()->getDecl();
llvm::Type *ValTy = CGM.getTypes().ConvertType(Type);
return Builder.build(ValTy, RD->hasFlexibleArrayMember());
@@ -893,17 +915,16 @@ bool ConstStructBuilder::UpdateStruct(ConstantEmitter &Emitter,
// ConstExprEmitter
//===----------------------------------------------------------------------===//
-static ConstantAddress tryEmitGlobalCompoundLiteral(CodeGenModule &CGM,
- CodeGenFunction *CGF,
- const CompoundLiteralExpr *E) {
+static ConstantAddress
+tryEmitGlobalCompoundLiteral(ConstantEmitter &emitter,
+ const CompoundLiteralExpr *E) {
+ CodeGenModule &CGM = emitter.CGM;
CharUnits Align = CGM.getContext().getTypeAlignInChars(E->getType());
if (llvm::GlobalVariable *Addr =
CGM.getAddrOfConstantCompoundLiteralIfEmitted(E))
- return ConstantAddress(Addr, Align);
+ return ConstantAddress(Addr, Addr->getValueType(), Align);
LangAS addressSpace = E->getType().getAddressSpace();
-
- ConstantEmitter emitter(CGM, CGF);
llvm::Constant *C = emitter.tryEmitForInitializer(E->getInitializer(),
addressSpace, E->getType());
if (!C) {
@@ -912,16 +933,16 @@ static ConstantAddress tryEmitGlobalCompoundLiteral(CodeGenModule &CGM,
return ConstantAddress::invalid();
}
- auto GV = new llvm::GlobalVariable(CGM.getModule(), C->getType(),
- CGM.isTypeConstant(E->getType(), true),
- llvm::GlobalValue::InternalLinkage,
- C, ".compoundliteral", nullptr,
- llvm::GlobalVariable::NotThreadLocal,
- CGM.getContext().getTargetAddressSpace(addressSpace));
+ auto GV = new llvm::GlobalVariable(
+ CGM.getModule(), C->getType(),
+ E->getType().isConstantStorage(CGM.getContext(), true, false),
+ llvm::GlobalValue::InternalLinkage, C, ".compoundliteral", nullptr,
+ llvm::GlobalVariable::NotThreadLocal,
+ CGM.getContext().getTargetAddressSpace(addressSpace));
emitter.finalize(GV);
GV->setAlignment(Align.getAsAlign());
CGM.setAddrOfConstantCompoundLiteral(E, GV);
- return ConstantAddress(GV, Align);
+ return ConstantAddress(GV, GV->getValueType(), Align);
}
static llvm::Constant *
@@ -952,7 +973,7 @@ EmitArrayConstant(CodeGenModule &CGM, llvm::ArrayType *DesiredType,
if (CommonElementType && NonzeroLength >= 8) {
llvm::Constant *Initial = llvm::ConstantArray::get(
llvm::ArrayType::get(CommonElementType, NonzeroLength),
- makeArrayRef(Elements).take_front(NonzeroLength));
+ ArrayRef(Elements).take_front(NonzeroLength));
Elements.resize(2);
Elements[0] = Initial;
} else {
@@ -1091,16 +1112,52 @@ public:
destAS, destTy);
}
- case CK_LValueToRValue:
+ case CK_LValueToRValue: {
+ // We don't really support doing lvalue-to-rvalue conversions here; any
+ // interesting conversions should be done in Evaluate(). But as a
+ // special case, allow compound literals to support the gcc extension
+ // allowing "struct x {int x;} x = (struct x) {};".
+ if (auto *E = dyn_cast<CompoundLiteralExpr>(subExpr->IgnoreParens()))
+ return Visit(E->getInitializer(), destType);
+ return nullptr;
+ }
+
case CK_AtomicToNonAtomic:
case CK_NonAtomicToAtomic:
case CK_NoOp:
case CK_ConstructorConversion:
return Visit(subExpr, destType);
+ case CK_ArrayToPointerDecay:
+ if (const auto *S = dyn_cast<StringLiteral>(subExpr))
+ return CGM.GetAddrOfConstantStringFromLiteral(S).getPointer();
+ return nullptr;
+ case CK_NullToPointer:
+ if (Visit(subExpr, destType))
+ return CGM.EmitNullConstant(destType);
+ return nullptr;
+
case CK_IntToOCLSampler:
llvm_unreachable("global sampler variables are not generated");
+ case CK_IntegralCast: {
+ QualType FromType = subExpr->getType();
+ // See also HandleIntToIntCast in ExprConstant.cpp
+ if (FromType->isIntegerType())
+ if (llvm::Constant *C = Visit(subExpr, FromType))
+ if (auto *CI = dyn_cast<llvm::ConstantInt>(C)) {
+ unsigned SrcWidth = CGM.getContext().getIntWidth(FromType);
+ unsigned DstWidth = CGM.getContext().getIntWidth(destType);
+ if (DstWidth == SrcWidth)
+ return CI;
+ llvm::APInt A = FromType->isSignedIntegerType()
+ ? CI->getValue().sextOrTrunc(DstWidth)
+ : CI->getValue().zextOrTrunc(DstWidth);
+ return llvm::ConstantInt::get(CGM.getLLVMContext(), A);
+ }
+ return nullptr;
+ }
+
case CK_Dependent: llvm_unreachable("saw dependent cast!");
case CK_BuiltinFnToFnPtr:
@@ -1135,7 +1192,6 @@ public:
case CK_CPointerToObjCPointerCast:
case CK_BlockPointerToObjCPointerCast:
case CK_AnyPointerToBlockPointerCast:
- case CK_ArrayToPointerDecay:
case CK_FunctionToPointerDecay:
case CK_BaseToDerived:
case CK_DerivedToBase:
@@ -1154,8 +1210,6 @@ public:
case CK_IntegralComplexToFloatingComplex:
case CK_PointerToIntegral:
case CK_PointerToBoolean:
- case CK_NullToPointer:
- case CK_IntegralCast:
case CK_BooleanToSignedIntegral:
case CK_IntegralToPointer:
case CK_IntegralToBoolean:
@@ -1186,9 +1240,8 @@ public:
return Visit(E->getSubExpr(), T);
}
- llvm::Constant *VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E,
- QualType T) {
- return Visit(E->getSubExpr(), T);
+ llvm::Constant *VisitIntegerLiteral(IntegerLiteral *I, QualType T) {
+ return llvm::ConstantInt::get(CGM.getLLVMContext(), I->getValue());
}
llvm::Constant *EmitArrayInitialization(InitListExpr *ILE, QualType T) {
@@ -1293,7 +1346,12 @@ public:
assert(CGM.getContext().hasSameUnqualifiedType(Ty, Arg->getType()) &&
"argument to copy ctor is of wrong type");
- return Visit(Arg, Ty);
+ // Look through the temporary; it's just converting the value to an
+ // lvalue to pass it to the constructor.
+ if (auto *MTE = dyn_cast<MaterializeTemporaryExpr>(Arg))
+ return Visit(MTE->getSubExpr(), Ty);
+ // Don't try to support arbitrary lvalue-to-rvalue conversions for now.
+ return nullptr;
}
return CGM.EmitNullConstant(Ty);
@@ -1311,6 +1369,7 @@ public:
std::string Str;
CGM.getContext().getObjCEncodingForType(E->getEncodedType(), Str);
const ConstantArrayType *CAT = CGM.getContext().getAsConstantArrayType(T);
+ assert(CAT && "String data not of constant array type!");
// Resize the string to the right size, adding zeros at the end, or
// truncating as needed.
@@ -1322,6 +1381,13 @@ public:
return Visit(E->getSubExpr(), T);
}
+ llvm::Constant *VisitUnaryMinus(UnaryOperator *U, QualType T) {
+ if (llvm::Constant *C = Visit(U->getSubExpr(), T))
+ if (auto *CI = dyn_cast<llvm::ConstantInt>(C))
+ return llvm::ConstantInt::get(CGM.getLLVMContext(), -CI->getValue());
+ return nullptr;
+ }
+
// Utility methods
llvm::Type *ConvertType(QualType T) {
return CGM.getTypes().ConvertType(T);
@@ -1366,15 +1432,12 @@ ConstantEmitter::tryEmitAbstract(const APValue &value, QualType destType) {
llvm::Constant *ConstantEmitter::tryEmitConstantExpr(const ConstantExpr *CE) {
if (!CE->hasAPValueResult())
return nullptr;
- const Expr *Inner = CE->getSubExpr()->IgnoreImplicit();
- QualType RetType;
- if (auto *Call = dyn_cast<CallExpr>(Inner))
- RetType = Call->getCallReturnType(CGF->getContext());
- else if (auto *Ctor = dyn_cast<CXXConstructExpr>(Inner))
- RetType = Ctor->getType();
- llvm::Constant *Res =
- emitAbstract(CE->getBeginLoc(), CE->getAPValueResult(), RetType);
- return Res;
+
+ QualType RetType = CE->getType();
+ if (CE->isGLValue())
+ RetType = CGM.getContext().getLValueReferenceType(RetType);
+
+ return emitAbstract(CE->getBeginLoc(), CE->getAPValueResult(), RetType);
}
llvm::Constant *
@@ -1544,7 +1607,7 @@ namespace {
}
void setLocation(llvm::GlobalVariable *placeholder) {
- assert(Locations.find(placeholder) == Locations.end() &&
+ assert(!Locations.contains(placeholder) &&
"already found location for placeholder!");
// Lazily fill in IndexValues with the values from Indices.
@@ -1567,13 +1630,8 @@ namespace {
IndexValues[i] = llvm::ConstantInt::get(CGM.Int32Ty, Indices[i]);
}
- // Form a GEP and then bitcast to the placeholder type so that the
- // replacement will succeed.
- llvm::Constant *location =
- llvm::ConstantExpr::getInBoundsGetElementPtr(BaseValueTy,
- Base, IndexValues);
- location = llvm::ConstantExpr::getBitCast(location,
- placeholder->getType());
+ llvm::Constant *location = llvm::ConstantExpr::getInBoundsGetElementPtr(
+ BaseValueTy, Base, IndexValues);
Locations.insert({placeholder, location});
}
@@ -1623,33 +1681,26 @@ llvm::Constant *ConstantEmitter::tryEmitPrivateForVarInit(const VarDecl &D) {
if (CD->isTrivial() && CD->isDefaultConstructor())
return CGM.EmitNullConstant(D.getType());
}
- InConstantContext = true;
}
+ InConstantContext = D.hasConstantInitialization();
QualType destType = D.getType();
+ const Expr *E = D.getInit();
+ assert(E && "No initializer to emit");
+
+ if (!destType->isReferenceType()) {
+ QualType nonMemoryDestType = getNonMemoryType(CGM, destType);
+ if (llvm::Constant *C = ConstExprEmitter(*this).Visit(const_cast<Expr *>(E),
+ nonMemoryDestType))
+ return emitForMemory(C, destType);
+ }
// Try to emit the initializer. Note that this can allow some things that
// are not allowed by tryEmitPrivateForMemory alone.
- if (auto value = D.evaluateValue()) {
+ if (APValue *value = D.evaluateValue())
return tryEmitPrivateForMemory(*value, destType);
- }
- // FIXME: Implement C++11 [basic.start.init]p2: if the initializer of a
- // reference is a constant expression, and the reference binds to a temporary,
- // then constant initialization is performed. ConstExprEmitter will
- // incorrectly emit a prvalue constant in this case, and the calling code
- // interprets that as the (pointer) value of the reference, rather than the
- // desired value of the referee.
- if (destType->isReferenceType())
- return nullptr;
-
- const Expr *E = D.getInit();
- assert(E && "No initializer to emit");
-
- auto nonMemoryDestType = getNonMemoryType(CGM, destType);
- auto C =
- ConstExprEmitter(*this).Visit(const_cast<Expr*>(E), nonMemoryDestType);
- return (C ? emitForMemory(C, destType) : nullptr);
+ return nullptr;
}
llvm::Constant *
@@ -1704,9 +1755,12 @@ llvm::Constant *ConstantEmitter::emitForMemory(CodeGenModule &CGM,
}
// Zero-extend bool.
- if (C->getType()->isIntegerTy(1)) {
+ if (C->getType()->isIntegerTy(1) && !destType->isBitIntType()) {
llvm::Type *boolTy = CGM.getTypes().ConvertTypeForMem(destType);
- return llvm::ConstantExpr::getZExt(C, boolTy);
+ llvm::Constant *Res = llvm::ConstantFoldCastOperand(
+ llvm::Instruction::ZExt, C, boolTy, CGM.getDataLayout());
+ assert(Res && "Constant folding must succeed");
+ return Res;
}
return C;
@@ -1714,6 +1768,13 @@ llvm::Constant *ConstantEmitter::emitForMemory(CodeGenModule &CGM,
llvm::Constant *ConstantEmitter::tryEmitPrivate(const Expr *E,
QualType destType) {
+ assert(!destType->isVoidType() && "can't emit a void constant");
+
+ if (!destType->isReferenceType())
+ if (llvm::Constant *C =
+ ConstExprEmitter(*this).Visit(const_cast<Expr *>(E), destType))
+ return C;
+
Expr::EvalResult Result;
bool Success = false;
@@ -1723,13 +1784,10 @@ llvm::Constant *ConstantEmitter::tryEmitPrivate(const Expr *E,
else
Success = E->EvaluateAsRValue(Result, CGM.getContext(), InConstantContext);
- llvm::Constant *C;
if (Success && !Result.HasSideEffects)
- C = tryEmitPrivate(Result.Val, destType);
- else
- C = ConstExprEmitter(*this).Visit(const_cast<Expr*>(E), destType);
+ return tryEmitPrivate(Result.Val, destType);
- return C;
+ return nullptr;
}
llvm::Constant *CodeGenModule::getNullPointer(llvm::PointerType *T, QualType QT) {
@@ -1803,13 +1861,7 @@ private:
if (!hasNonZeroOffset())
return C;
- llvm::Type *origPtrTy = C->getType();
- unsigned AS = origPtrTy->getPointerAddressSpace();
- llvm::Type *charPtrTy = CGM.Int8Ty->getPointerTo(AS);
- C = llvm::ConstantExpr::getBitCast(C, charPtrTy);
- C = llvm::ConstantExpr::getGetElementPtr(CGM.Int8Ty, C, getOffset());
- C = llvm::ConstantExpr::getPointerCast(C, origPtrTy);
- return C;
+ return llvm::ConstantExpr::getGetElementPtr(CGM.Int8Ty, C, getOffset());
}
};
@@ -1870,8 +1922,9 @@ ConstantLValueEmitter::tryEmitAbsolute(llvm::Type *destTy) {
// FIXME: signedness depends on the original integer type.
auto intptrTy = CGM.getDataLayout().getIntPtrType(destPtrTy);
llvm::Constant *C;
- C = llvm::ConstantExpr::getIntegerCast(getOffset(), intptrTy,
- /*isSigned*/ false);
+ C = llvm::ConstantFoldIntegerCast(getOffset(), intptrTy, /*isSigned*/ false,
+ CGM.getDataLayout());
+ assert(C && "Must have folded, as Offset is a ConstantInt");
C = llvm::ConstantExpr::getIntToPtr(C, destPtrTy);
return C;
}
@@ -1898,7 +1951,7 @@ ConstantLValueEmitter::tryEmitBase(const APValue::LValueBase &base) {
if (VD->isLocalVarDecl()) {
return CGM.getOrCreateStaticVarDecl(
- *VD, CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false));
+ *VD, CGM.getLLVMLinkageVarDefinition(VD));
}
}
}
@@ -1906,6 +1959,9 @@ ConstantLValueEmitter::tryEmitBase(const APValue::LValueBase &base) {
if (auto *GD = dyn_cast<MSGuidDecl>(D))
return CGM.GetAddrOfMSGuidDecl(GD);
+ if (auto *GCD = dyn_cast<UnnamedGlobalConstantDecl>(D))
+ return CGM.GetAddrOfUnnamedGlobalConstantDecl(GCD);
+
if (auto *TPO = dyn_cast<TemplateParamObjectDecl>(D))
return CGM.GetAddrOfTemplateParamObject(TPO);
@@ -1913,15 +1969,8 @@ ConstantLValueEmitter::tryEmitBase(const APValue::LValueBase &base) {
}
// Handle typeid(T).
- if (TypeInfoLValue TI = base.dyn_cast<TypeInfoLValue>()) {
- llvm::Type *StdTypeInfoPtrTy =
- CGM.getTypes().ConvertType(base.getTypeInfoType())->getPointerTo();
- llvm::Constant *TypeInfo =
- CGM.GetAddrOfRTTIDescriptor(QualType(TI.getType(), 0));
- if (TypeInfo->getType() != StdTypeInfoPtrTy)
- TypeInfo = llvm::ConstantExpr::getBitCast(TypeInfo, StdTypeInfoPtrTy);
- return TypeInfo;
- }
+ if (TypeInfoLValue TI = base.dyn_cast<TypeInfoLValue>())
+ return CGM.GetAddrOfRTTIDescriptor(QualType(TI.getType(), 0));
// Otherwise, it must be an expression.
return Visit(base.get<const Expr*>());
@@ -1936,7 +1985,9 @@ ConstantLValueEmitter::VisitConstantExpr(const ConstantExpr *E) {
ConstantLValue
ConstantLValueEmitter::VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
- return tryEmitGlobalCompoundLiteral(CGM, Emitter.CGF, E);
+ ConstantEmitter CompoundLiteralEmitter(CGM, Emitter.CGF);
+ CompoundLiteralEmitter.setInConstantContext(Emitter.isInConstantContext());
+ return tryEmitGlobalCompoundLiteral(CompoundLiteralEmitter, E);
}
ConstantLValue
@@ -1953,7 +2004,7 @@ static ConstantLValue emitConstantObjCStringLiteral(const StringLiteral *S,
QualType T,
CodeGenModule &CGM) {
auto C = CGM.getObjCRuntime().GenerateConstantString(S);
- return C.getElementBitCast(CGM.getTypes().ConvertTypeForMem(T));
+ return C.withElementType(CGM.getTypes().ConvertTypeForMem(T));
}
ConstantLValue
@@ -1978,14 +2029,15 @@ ConstantLValue
ConstantLValueEmitter::VisitAddrLabelExpr(const AddrLabelExpr *E) {
assert(Emitter.CGF && "Invalid address of label expression outside function");
llvm::Constant *Ptr = Emitter.CGF->GetAddrOfLabel(E->getLabel());
- Ptr = llvm::ConstantExpr::getBitCast(Ptr,
- CGM.getTypes().ConvertType(E->getType()));
return Ptr;
}
ConstantLValue
ConstantLValueEmitter::VisitCallExpr(const CallExpr *E) {
unsigned builtin = E->getBuiltinCallee();
+ if (builtin == Builtin::BI__builtin_function_start)
+ return CGM.GetFunctionStart(
+ E->getArg(0)->getAsBuiltinConstantDeclRef(CGM.getContext()));
if (builtin != Builtin::BI__builtin___CFStringMakeConstantString &&
builtin != Builtin::BI__builtin___NSStringMakeConstantString)
return nullptr;
@@ -2091,6 +2143,9 @@ llvm::Constant *ConstantEmitter::tryEmitPrivate(const APValue &Value,
Inits[I] = llvm::ConstantInt::get(CGM.getLLVMContext(), Elt.getInt());
else if (Elt.isFloat())
Inits[I] = llvm::ConstantFP::get(CGM.getLLVMContext(), Elt.getFloat());
+ else if (Elt.isIndeterminate())
+ Inits[I] = llvm::UndefValue::get(CGM.getTypes().ConvertType(
+ DestType->castAs<VectorType>()->getElementType()));
else
llvm_unreachable("unsupported vector element type");
}
@@ -2153,6 +2208,11 @@ llvm::Constant *ConstantEmitter::tryEmitPrivate(const APValue &Value,
llvm::ArrayType *Desired =
cast<llvm::ArrayType>(CGM.getTypes().ConvertType(DestType));
+
+ // Fix the type of incomplete arrays if the initializer isn't empty.
+ if (DestType->isIncompleteArrayType() && !Elts.empty())
+ Desired = llvm::ArrayType::get(Desired->getElementType(), Elts.size());
+
return EmitArrayConstant(CGM, Desired, CommonElementType, NumElements, Elts,
Filler);
}
@@ -2177,7 +2237,8 @@ void CodeGenModule::setAddrOfConstantCompoundLiteral(
ConstantAddress
CodeGenModule::GetAddrOfConstantCompoundLiteral(const CompoundLiteralExpr *E) {
assert(E->isFileScope() && "not a file-scope compound literal expr");
- return tryEmitGlobalCompoundLiteral(*this, nullptr, E);
+ ConstantEmitter emitter(*this);
+ return tryEmitGlobalCompoundLiteral(emitter, E);
}
llvm::Constant *
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGExprScalar.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGExprScalar.cpp
index 418f23bd1a97..181b15e9c7d0 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGExprScalar.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGExprScalar.cpp
@@ -28,10 +28,10 @@
#include "clang/Basic/CodeGenOptions.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/APFixedPoint.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/FixedPointBuilder.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GetElementPtrTypeIterator.h"
@@ -40,7 +40,9 @@
#include "llvm/IR/IntrinsicsPowerPC.h"
#include "llvm/IR/MatrixBuilder.h"
#include "llvm/IR/Module.h"
+#include "llvm/Support/TypeSize.h"
#include <cstdarg>
+#include <optional>
using namespace clang;
using namespace CodeGen;
@@ -65,20 +67,14 @@ bool mayHaveIntegerOverflow(llvm::ConstantInt *LHS, llvm::ConstantInt *RHS,
const auto &LHSAP = LHS->getValue();
const auto &RHSAP = RHS->getValue();
if (Opcode == BO_Add) {
- if (Signed)
- Result = LHSAP.sadd_ov(RHSAP, Overflow);
- else
- Result = LHSAP.uadd_ov(RHSAP, Overflow);
+ Result = Signed ? LHSAP.sadd_ov(RHSAP, Overflow)
+ : LHSAP.uadd_ov(RHSAP, Overflow);
} else if (Opcode == BO_Sub) {
- if (Signed)
- Result = LHSAP.ssub_ov(RHSAP, Overflow);
- else
- Result = LHSAP.usub_ov(RHSAP, Overflow);
+ Result = Signed ? LHSAP.ssub_ov(RHSAP, Overflow)
+ : LHSAP.usub_ov(RHSAP, Overflow);
} else if (Opcode == BO_Mul) {
- if (Signed)
- Result = LHSAP.smul_ov(RHSAP, Overflow);
- else
- Result = LHSAP.umul_ov(RHSAP, Overflow);
+ Result = Signed ? LHSAP.smul_ov(RHSAP, Overflow)
+ : LHSAP.umul_ov(RHSAP, Overflow);
} else if (Opcode == BO_Div || Opcode == BO_Rem) {
if (Signed && !RHS->isZero())
Result = LHSAP.sdiv_ov(RHSAP, Overflow);
@@ -156,23 +152,23 @@ static bool MustVisitNullValue(const Expr *E) {
}
/// If \p E is a widened promoted integer, get its base (unpromoted) type.
-static llvm::Optional<QualType> getUnwidenedIntegerType(const ASTContext &Ctx,
- const Expr *E) {
+static std::optional<QualType> getUnwidenedIntegerType(const ASTContext &Ctx,
+ const Expr *E) {
const Expr *Base = E->IgnoreImpCasts();
if (E == Base)
- return llvm::None;
+ return std::nullopt;
QualType BaseTy = Base->getType();
- if (!BaseTy->isPromotableIntegerType() ||
+ if (!Ctx.isPromotableIntegerType(BaseTy) ||
Ctx.getTypeSize(BaseTy) >= Ctx.getTypeSize(E->getType()))
- return llvm::None;
+ return std::nullopt;
return BaseTy;
}
/// Check if \p E is a widened promoted integer.
static bool IsWidenedIntegerOp(const ASTContext &Ctx, const Expr *E) {
- return getUnwidenedIntegerType(Ctx, E).hasValue();
+ return getUnwidenedIntegerType(Ctx, E).has_value();
}
/// Check if we can skip the overflow check for \p Op.
@@ -259,7 +255,7 @@ public:
if (VD->getType()->isReferenceType()) {
if (const auto *TTy =
- dyn_cast<TypedefType>(VD->getType().getNonReferenceType()))
+ VD->getType().getNonReferenceType()->getAs<TypedefType>())
AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
} else {
// Assumptions for function parameters are emitted at the start of the
@@ -275,8 +271,7 @@ public:
}
if (!AVAttr)
- if (const auto *TTy =
- dyn_cast<TypedefType>(E->getType()))
+ if (const auto *TTy = E->getType()->getAs<TypedefType>())
AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
if (!AVAttr)
@@ -419,10 +414,16 @@ public:
Value *VisitExpr(Expr *S);
Value *VisitConstantExpr(ConstantExpr *E) {
+ // A constant expression of type 'void' generates no code and produces no
+ // value.
+ if (E->getType()->isVoidType())
+ return nullptr;
+
if (Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) {
if (E->isGLValue())
return CGF.Builder.CreateLoad(Address(
- Result, CGF.getContext().getTypeAlignInChars(E->getType())));
+ Result, CGF.ConvertTypeForMem(E->getType()),
+ CGF.getContext().getTypeAlignInChars(E->getType())));
return Result;
}
return Visit(E->getSubExpr());
@@ -466,6 +467,9 @@ public:
return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
}
Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
+ if (E->getType()->isVoidType())
+ return nullptr;
+
return EmitNullValue(E->getType());
}
Value *VisitGNUNullExpr(const GNUNullExpr *E) {
@@ -618,16 +622,22 @@ public:
return Visit(E->getSubExpr()); // the actual value should be unused
return EmitLoadOfLValue(E);
}
- Value *VisitUnaryPlus(const UnaryOperator *E) {
- // This differs from gcc, though, most likely due to a bug in gcc.
- TestAndClearIgnoreResultAssign();
- return Visit(E->getSubExpr());
- }
- Value *VisitUnaryMinus (const UnaryOperator *E);
+
+ Value *VisitUnaryPlus(const UnaryOperator *E,
+ QualType PromotionType = QualType());
+ Value *VisitPlus(const UnaryOperator *E, QualType PromotionType);
+ Value *VisitUnaryMinus(const UnaryOperator *E,
+ QualType PromotionType = QualType());
+ Value *VisitMinus(const UnaryOperator *E, QualType PromotionType);
+
Value *VisitUnaryNot (const UnaryOperator *E);
Value *VisitUnaryLNot (const UnaryOperator *E);
- Value *VisitUnaryReal (const UnaryOperator *E);
- Value *VisitUnaryImag (const UnaryOperator *E);
+ Value *VisitUnaryReal(const UnaryOperator *E,
+ QualType PromotionType = QualType());
+ Value *VisitReal(const UnaryOperator *E, QualType PromotionType);
+ Value *VisitUnaryImag(const UnaryOperator *E,
+ QualType PromotionType = QualType());
+ Value *VisitImag(const UnaryOperator *E, QualType PromotionType);
Value *VisitUnaryExtension(const UnaryOperator *E) {
return Visit(E->getSubExpr());
}
@@ -717,7 +727,7 @@ public:
case LangOptions::SOB_Undefined:
if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case LangOptions::SOB_Trapping:
if (CanElideOverflowCheck(CGF.getContext(), Ops))
return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
@@ -726,7 +736,7 @@ public:
}
if (Ops.Ty->isConstantMatrixType()) {
- llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
+ llvm::MatrixBuilder MB(Builder);
// We need to check the types of the operands of the operator to get the
// correct matrix dimensions.
auto *BO = cast<BinaryOperator>(Ops.E);
@@ -789,7 +799,13 @@ public:
// Helper functions for fixed point binary operations.
Value *EmitFixedPointBinOp(const BinOpInfo &Ops);
- BinOpInfo EmitBinOps(const BinaryOperator *E);
+ BinOpInfo EmitBinOps(const BinaryOperator *E,
+ QualType PromotionTy = QualType());
+
+ Value *EmitPromotedValue(Value *result, QualType PromotionType);
+ Value *EmitUnPromotedValue(Value *result, QualType ExprType);
+ Value *EmitPromoted(const Expr *E, QualType PromotionType);
+
LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
Value *(ScalarExprEmitter::*F)(const BinOpInfo &),
Value *&Result);
@@ -797,13 +813,36 @@ public:
Value *EmitCompoundAssign(const CompoundAssignOperator *E,
Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
+ QualType getPromotionType(QualType Ty) {
+ const auto &Ctx = CGF.getContext();
+ if (auto *CT = Ty->getAs<ComplexType>()) {
+ QualType ElementType = CT->getElementType();
+ if (ElementType.UseExcessPrecision(Ctx))
+ return Ctx.getComplexType(Ctx.FloatTy);
+ }
+
+ if (Ty.UseExcessPrecision(Ctx)) {
+ if (auto *VT = Ty->getAs<VectorType>()) {
+ unsigned NumElements = VT->getNumElements();
+ return Ctx.getVectorType(Ctx.FloatTy, NumElements, VT->getVectorKind());
+ }
+ return Ctx.FloatTy;
+ }
+
+ return QualType();
+ }
+
// Binary operators and binary compound assignment operators.
-#define HANDLEBINOP(OP) \
- Value *VisitBin ## OP(const BinaryOperator *E) { \
- return Emit ## OP(EmitBinOps(E)); \
- } \
- Value *VisitBin ## OP ## Assign(const CompoundAssignOperator *E) { \
- return EmitCompoundAssign(E, &ScalarExprEmitter::Emit ## OP); \
+#define HANDLEBINOP(OP) \
+ Value *VisitBin##OP(const BinaryOperator *E) { \
+ QualType promotionTy = getPromotionType(E->getType()); \
+ auto result = Emit##OP(EmitBinOps(E, promotionTy)); \
+ if (result && !promotionTy.isNull()) \
+ result = EmitUnPromotedValue(result, E->getType()); \
+ return result; \
+ } \
+ Value *VisitBin##OP##Assign(const CompoundAssignOperator *E) { \
+ return EmitCompoundAssign(E, &ScalarExprEmitter::Emit##OP); \
}
HANDLEBINOP(Mul)
HANDLEBINOP(Div)
@@ -1235,7 +1274,18 @@ Value *ScalarExprEmitter::EmitScalarCast(Value *Src, QualType SrcType,
if (isa<llvm::IntegerType>(DstElementTy)) {
assert(SrcElementTy->isFloatingPointTy() && "Unknown real conversion");
- if (DstElementType->isSignedIntegerOrEnumerationType())
+ bool IsSigned = DstElementType->isSignedIntegerOrEnumerationType();
+
+ // If we can't recognize overflow as undefined behavior, assume that
+ // overflow saturates. This protects against normal optimizations if we are
+ // compiling with non-standard FP semantics.
+ if (!CGF.CGM.getCodeGenOpts().StrictFloatCastOverflow) {
+ llvm::Intrinsic::ID IID =
+ IsSigned ? llvm::Intrinsic::fptosi_sat : llvm::Intrinsic::fptoui_sat;
+ return Builder.CreateCall(CGF.CGM.getIntrinsic(IID, {DstTy, SrcTy}), Src);
+ }
+
+ if (IsSigned)
return Builder.CreateFPToSI(Src, DstTy, "conv");
return Builder.CreateFPToUI(Src, DstTy, "conv");
}
@@ -1377,8 +1427,8 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
if (isa<llvm::VectorType>(SrcTy) || isa<llvm::VectorType>(DstTy)) {
// Allow bitcast from vector to integer/fp of the same size.
- unsigned SrcSize = SrcTy->getPrimitiveSizeInBits();
- unsigned DstSize = DstTy->getPrimitiveSizeInBits();
+ llvm::TypeSize SrcSize = SrcTy->getPrimitiveSizeInBits();
+ llvm::TypeSize DstSize = DstTy->getPrimitiveSizeInBits();
if (SrcSize == DstSize)
return Builder.CreateBitCast(Src, DstTy, "conv");
@@ -1586,20 +1636,14 @@ Value *ScalarExprEmitter::VisitExpr(Expr *E) {
Value *
ScalarExprEmitter::VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E) {
ASTContext &Context = CGF.getContext();
- llvm::Optional<LangAS> GlobalAS =
- Context.getTargetInfo().getConstantAddressSpace();
+ unsigned AddrSpace =
+ Context.getTargetAddressSpace(CGF.CGM.GetGlobalConstantAddressSpace());
llvm::Constant *GlobalConstStr = Builder.CreateGlobalStringPtr(
- E->ComputeName(Context), "__usn_str",
- static_cast<unsigned>(GlobalAS.getValueOr(LangAS::Default)));
-
- unsigned ExprAS = Context.getTargetAddressSpace(E->getType());
+ E->ComputeName(Context), "__usn_str", AddrSpace);
- if (GlobalConstStr->getType()->getPointerAddressSpace() == ExprAS)
- return GlobalConstStr;
-
- llvm::Type *EltTy = GlobalConstStr->getType()->getPointerElementType();
- llvm::PointerType *NewPtrTy = llvm::PointerType::get(EltTy, ExprAS);
- return Builder.CreateAddrSpaceCast(GlobalConstStr, NewPtrTy, "usn_addr_cast");
+ llvm::Type *ExprTy = ConvertType(E->getType());
+ return Builder.CreatePointerBitCastOrAddrSpaceCast(GlobalConstStr, ExprTy,
+ "usn_addr_cast");
}
Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
@@ -1629,7 +1673,7 @@ Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
// newv = insert newv, x, i
auto *RTy = llvm::FixedVectorType::get(LTy->getElementType(),
MTy->getNumElements());
- Value* NewV = llvm::UndefValue::get(RTy);
+ Value* NewV = llvm::PoisonValue::get(RTy);
for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
Value *IIndx = llvm::ConstantInt::get(CGF.SizeTy, i);
Value *Indx = Builder.CreateExtractElement(Mask, IIndx, "shuf_idx");
@@ -1647,7 +1691,7 @@ Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
for (unsigned i = 2; i < E->getNumSubExprs(); ++i) {
llvm::APSInt Idx = E->getShuffleMaskIdx(CGF.getContext(), i-2);
// Check for -1 and output it as undef in the IR.
- if (Idx.isSigned() && Idx.isAllOnesValue())
+ if (Idx.isSigned() && Idx.isAllOnes())
Indices.push_back(-1);
else
Indices.push_back(Idx.getZExtValue());
@@ -1753,7 +1797,8 @@ Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
// loads the lvalue formed by the subscript expr. However, we have to be
// careful, because the base of a vector subscript is occasionally an rvalue,
// so we can't get it as an lvalue.
- if (!E->getBase()->getType()->isVectorType())
+ if (!E->getBase()->getType()->isVectorType() &&
+ !E->getBase()->getType()->isSveVLSBuiltinType())
return EmitLoadOfLValue(E);
// Handle the vector case. The base must be a vector, the index must be an
@@ -1775,13 +1820,18 @@ Value *ScalarExprEmitter::VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E) {
// integer value.
Value *RowIdx = Visit(E->getRowIdx());
Value *ColumnIdx = Visit(E->getColumnIdx());
+
+ const auto *MatrixTy = E->getBase()->getType()->castAs<ConstantMatrixType>();
+ unsigned NumRows = MatrixTy->getNumRows();
+ llvm::MatrixBuilder MB(Builder);
+ Value *Idx = MB.CreateIndex(RowIdx, ColumnIdx, NumRows);
+ if (CGF.CGM.getCodeGenOpts().OptimizationLevel > 0)
+ MB.CreateIndexAssumption(Idx, MatrixTy->getNumElementsFlattened());
+
Value *Matrix = Visit(E->getBase());
// TODO: Should we emit bounds checks with SanitizerKind::ArrayBounds?
- llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
- return MB.CreateExtractElement(
- Matrix, RowIdx, ColumnIdx,
- E->getBase()->getType()->castAs<ConstantMatrixType>()->getNumRows());
+ return Builder.CreateExtractElement(Matrix, Idx, "matrixext");
}
static int getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
@@ -1819,6 +1869,23 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
return Visit(E->getInit(0));
}
+ if (isa<llvm::ScalableVectorType>(VType)) {
+ if (NumInitElements == 0) {
+ // C++11 value-initialization for the vector.
+ return EmitNullValue(E->getType());
+ }
+
+ if (NumInitElements == 1) {
+ Expr *InitVector = E->getInit(0);
+
+ // Initialize from another scalable vector of the same type.
+ if (InitVector->getType() == E->getType())
+ return Visit(InitVector);
+ }
+
+ llvm_unreachable("Unexpected initialization of a scalable vector!");
+ }
+
unsigned ResElts = cast<llvm::FixedVectorType>(VType)->getNumElements();
// Loop over initializers collecting the Value for each, and remembering
@@ -1827,8 +1894,8 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
// initializer, since LLVM optimizers generally do not want to touch
// shuffles.
unsigned CurIdx = 0;
- bool VIsUndefShuffle = false;
- llvm::Value *V = llvm::UndefValue::get(VType);
+ bool VIsPoisonShuffle = false;
+ llvm::Value *V = llvm::PoisonValue::get(VType);
for (unsigned i = 0; i != NumInitElements; ++i) {
Expr *IE = E->getInit(i);
Value *Init = Visit(IE);
@@ -1848,16 +1915,16 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
llvm::ConstantInt *C = cast<llvm::ConstantInt>(EI->getIndexOperand());
Value *LHS = nullptr, *RHS = nullptr;
if (CurIdx == 0) {
- // insert into undef -> shuffle (src, undef)
+ // insert into poison -> shuffle (src, poison)
// shufflemask must use an i32
Args.push_back(getAsInt32(C, CGF.Int32Ty));
Args.resize(ResElts, -1);
LHS = EI->getVectorOperand();
RHS = V;
- VIsUndefShuffle = true;
- } else if (VIsUndefShuffle) {
- // insert into undefshuffle && size match -> shuffle (v, src)
+ VIsPoisonShuffle = true;
+ } else if (VIsPoisonShuffle) {
+ // insert into poison shuffle && size match -> shuffle (v, src)
llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V);
for (unsigned j = 0; j != CurIdx; ++j)
Args.push_back(getMaskElt(SVV, j, 0));
@@ -1866,7 +1933,7 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
RHS = EI->getVectorOperand();
- VIsUndefShuffle = false;
+ VIsPoisonShuffle = false;
}
if (!Args.empty()) {
V = Builder.CreateShuffleVector(LHS, RHS, Args);
@@ -1877,7 +1944,7 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
}
V = Builder.CreateInsertElement(V, Init, Builder.getInt32(CurIdx),
"vecinit");
- VIsUndefShuffle = false;
+ VIsPoisonShuffle = false;
++CurIdx;
continue;
}
@@ -1895,9 +1962,9 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
if (OpTy->getNumElements() == ResElts) {
for (unsigned j = 0; j != CurIdx; ++j) {
- // If the current vector initializer is a shuffle with undef, merge
+ // If the current vector initializer is a shuffle with poison, merge
// this shuffle directly into it.
- if (VIsUndefShuffle) {
+ if (VIsPoisonShuffle) {
Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0));
} else {
Args.push_back(j);
@@ -1907,7 +1974,7 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
Args.push_back(getMaskElt(SVI, j, Offset));
Args.resize(ResElts, -1);
- if (VIsUndefShuffle)
+ if (VIsPoisonShuffle)
V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
Init = SVOp;
@@ -1930,12 +1997,12 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
Args.resize(ResElts, -1);
}
- // If V is undef, make sure it ends up on the RHS of the shuffle to aid
+ // If V is poison, make sure it ends up on the RHS of the shuffle to aid
// merging subsequent shuffles into this one.
if (CurIdx == 0)
std::swap(V, Init);
V = Builder.CreateShuffleVector(V, Init, Args, "vecinit");
- VIsUndefShuffle = isa<llvm::UndefValue>(Init);
+ VIsPoisonShuffle = isa<llvm::PoisonValue>(Init);
CurIdx += InitElts;
}
@@ -1979,6 +2046,7 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
Expr *E = CE->getSubExpr();
QualType DestTy = CE->getType();
CastKind Kind = CE->getCastKind();
+ CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, CE);
// These cases are generally not written to ignore the result of
// evaluating their sub-expressions, so we clear this now.
@@ -1995,15 +2063,15 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
case CK_LValueBitCast:
case CK_ObjCObjectLValueCast: {
Address Addr = EmitLValue(E).getAddress(CGF);
- Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy));
+ Addr = Addr.withElementType(CGF.ConvertTypeForMem(DestTy));
LValue LV = CGF.MakeAddrLValue(Addr, DestTy);
return EmitLoadOfLValue(LV, CE->getExprLoc());
}
case CK_LValueToRValueBitCast: {
LValue SourceLVal = CGF.EmitLValue(E);
- Address Addr = Builder.CreateElementBitCast(SourceLVal.getAddress(CGF),
- CGF.ConvertTypeForMem(DestTy));
+ Address Addr = SourceLVal.getAddress(CGF).withElementType(
+ CGF.ConvertTypeForMem(DestTy));
LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
return EmitLoadOfLValue(DestLV, CE->getExprLoc());
@@ -2016,18 +2084,22 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
Value *Src = Visit(const_cast<Expr*>(E));
llvm::Type *SrcTy = Src->getType();
llvm::Type *DstTy = ConvertType(DestTy);
- if (SrcTy->isPtrOrPtrVectorTy() && DstTy->isPtrOrPtrVectorTy() &&
- SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) {
- llvm_unreachable("wrong cast for pointers in different address spaces"
- "(must be an address space cast)!");
- }
+ assert(
+ (!SrcTy->isPtrOrPtrVectorTy() || !DstTy->isPtrOrPtrVectorTy() ||
+ SrcTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace()) &&
+ "Address-space cast must be used to convert address spaces");
if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
- if (auto PT = DestTy->getAs<PointerType>())
- CGF.EmitVTablePtrCheckForCast(PT->getPointeeType(), Src,
- /*MayBeNull=*/true,
- CodeGenFunction::CFITCK_UnrelatedCast,
- CE->getBeginLoc());
+ if (auto *PT = DestTy->getAs<PointerType>()) {
+ CGF.EmitVTablePtrCheckForCast(
+ PT->getPointeeType(),
+ Address(Src,
+ CGF.ConvertTypeForMem(
+ E->getType()->castAs<PointerType>()->getPointeeType()),
+ CGF.getPointerAlign()),
+ /*MayBeNull=*/true, CodeGenFunction::CFITCK_UnrelatedCast,
+ CE->getBeginLoc());
+ }
}
if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
@@ -2050,7 +2122,8 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
// Update heapallocsite metadata when there is an explicit pointer cast.
if (auto *CI = dyn_cast<llvm::CallBase>(Src)) {
- if (CI->getMetadata("heapallocsite") && isa<ExplicitCastExpr>(CE)) {
+ if (CI->getMetadata("heapallocsite") && isa<ExplicitCastExpr>(CE) &&
+ !isa<CastExpr>(E)) {
QualType PointeeType = DestTy->getPointeeType();
if (!PointeeType.isNull())
CGF.getDebugInfo()->addHeapAllocSiteMetadata(CI, PointeeType,
@@ -2059,27 +2132,50 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
}
// If Src is a fixed vector and Dst is a scalable vector, and both have the
- // same element type, use the llvm.experimental.vector.insert intrinsic to
- // perform the bitcast.
+ // same element type, use the llvm.vector.insert intrinsic to perform the
+ // bitcast.
if (const auto *FixedSrc = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
if (const auto *ScalableDst = dyn_cast<llvm::ScalableVectorType>(DstTy)) {
+ // If we are casting a fixed i8 vector to a scalable 16 x i1 predicate
+ // vector, use a vector insert and bitcast the result.
+ bool NeedsBitCast = false;
+ auto PredType = llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
+ llvm::Type *OrigType = DstTy;
+ if (ScalableDst == PredType &&
+ FixedSrc->getElementType() == Builder.getInt8Ty()) {
+ DstTy = llvm::ScalableVectorType::get(Builder.getInt8Ty(), 2);
+ ScalableDst = cast<llvm::ScalableVectorType>(DstTy);
+ NeedsBitCast = true;
+ }
if (FixedSrc->getElementType() == ScalableDst->getElementType()) {
llvm::Value *UndefVec = llvm::UndefValue::get(DstTy);
llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
- return Builder.CreateInsertVector(DstTy, UndefVec, Src, Zero,
- "castScalableSve");
+ llvm::Value *Result = Builder.CreateInsertVector(
+ DstTy, UndefVec, Src, Zero, "cast.scalable");
+ if (NeedsBitCast)
+ Result = Builder.CreateBitCast(Result, OrigType);
+ return Result;
}
}
}
// If Src is a scalable vector and Dst is a fixed vector, and both have the
- // same element type, use the llvm.experimental.vector.extract intrinsic to
- // perform the bitcast.
+ // same element type, use the llvm.vector.extract intrinsic to perform the
+ // bitcast.
if (const auto *ScalableSrc = dyn_cast<llvm::ScalableVectorType>(SrcTy)) {
if (const auto *FixedDst = dyn_cast<llvm::FixedVectorType>(DstTy)) {
+ // If we are casting a scalable 16 x i1 predicate vector to a fixed i8
+ // vector, bitcast the source and use a vector extract.
+ auto PredType = llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
+ if (ScalableSrc == PredType &&
+ FixedDst->getElementType() == Builder.getInt8Ty()) {
+ SrcTy = llvm::ScalableVectorType::get(Builder.getInt8Ty(), 2);
+ ScalableSrc = cast<llvm::ScalableVectorType>(SrcTy);
+ Src = Builder.CreateBitCast(Src, SrcTy);
+ }
if (ScalableSrc->getElementType() == FixedDst->getElementType()) {
llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
- return Builder.CreateExtractVector(DstTy, Src, Zero, "castFixedSve");
+ return Builder.CreateExtractVector(DstTy, Src, Zero, "cast.fixed");
}
}
}
@@ -2087,10 +2183,9 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
// Perform VLAT <-> VLST bitcast through memory.
// TODO: since the llvm.experimental.vector.{insert,extract} intrinsics
// require the element types of the vectors to be the same, we
- // need to keep this around for casting between predicates, or more
- // generally for bitcasts between VLAT <-> VLST where the element
- // types of the vectors are not the same, until we figure out a better
- // way of doing these casts.
+ // need to keep this around for bitcasts between VLAT <-> VLST where
+ // the element types of the vectors are not the same, until we figure
+ // out a better way of doing these casts.
if ((isa<llvm::FixedVectorType>(SrcTy) &&
isa<llvm::ScalableVectorType>(DstTy)) ||
(isa<llvm::ScalableVectorType>(SrcTy) &&
@@ -2098,13 +2193,11 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
Address Addr = CGF.CreateDefaultAlignTempAlloca(SrcTy, "saved-value");
LValue LV = CGF.MakeAddrLValue(Addr, E->getType());
CGF.EmitStoreOfScalar(Src, LV);
- Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy),
- "castFixedSve");
+ Addr = Addr.withElementType(CGF.ConvertTypeForMem(DestTy));
LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
return EmitLoadOfLValue(DestLV, CE->getExprLoc());
}
-
return Builder.CreateBitCast(Src, DstTy);
}
case CK_AddressSpaceConversion: {
@@ -2127,10 +2220,14 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
}
case CK_AtomicToNonAtomic:
case CK_NonAtomicToAtomic:
- case CK_NoOp:
case CK_UserDefinedConversion:
return Visit(const_cast<Expr*>(E));
+ case CK_NoOp: {
+ return CE->changesVolatileQualification() ? EmitLoadOfLValue(CE)
+ : Visit(const_cast<Expr *>(E));
+ }
+
case CK_BaseToDerived: {
const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl();
assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!");
@@ -2148,10 +2245,10 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
Derived.getPointer(), DestTy->getPointeeType());
if (CGF.SanOpts.has(SanitizerKind::CFIDerivedCast))
- CGF.EmitVTablePtrCheckForCast(
- DestTy->getPointeeType(), Derived.getPointer(),
- /*MayBeNull=*/true, CodeGenFunction::CFITCK_DerivedCast,
- CE->getBeginLoc());
+ CGF.EmitVTablePtrCheckForCast(DestTy->getPointeeType(), Derived,
+ /*MayBeNull=*/true,
+ CodeGenFunction::CFITCK_DerivedCast,
+ CE->getBeginLoc());
return Derived.getPointer();
}
@@ -2275,9 +2372,10 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
}
case CK_VectorSplat: {
llvm::Type *DstTy = ConvertType(DestTy);
- Value *Elt = Visit(const_cast<Expr*>(E));
+ Value *Elt = Visit(const_cast<Expr *>(E));
// Splat the element across to all elements
- unsigned NumElements = cast<llvm::FixedVectorType>(DstTy)->getNumElements();
+ llvm::ElementCount NumElements =
+ cast<llvm::VectorType>(DstTy)->getElementCount();
return Builder.CreateVectorSplat(NumElements, Elt, "splat");
}
@@ -2420,7 +2518,7 @@ llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior(
case LangOptions::SOB_Undefined:
if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
return Builder.CreateNSWAdd(InVal, Amount, Name);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case LangOptions::SOB_Trapping:
if (!E->canOverflow())
return Builder.CreateNSWAdd(InVal, Amount, Name);
@@ -2473,7 +2571,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
// For atomic bool increment, we just store true and return it for
// preincrement, do an atomic swap with true for postincrement
return Builder.CreateAtomicRMW(
- llvm::AtomicRMWInst::Xchg, LV.getPointer(CGF), True,
+ llvm::AtomicRMWInst::Xchg, LV.getAddress(CGF), True,
llvm::AtomicOrdering::SequentiallyConsistent);
}
// Special case for atomic increment / decrement on integers, emit
@@ -2491,7 +2589,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
llvm::Value *amt = CGF.EmitToMemory(
llvm::ConstantInt::get(ConvertType(type), 1, true), type);
llvm::Value *old =
- Builder.CreateAtomicRMW(aop, LV.getPointer(CGF), amt,
+ Builder.CreateAtomicRMW(aop, LV.getAddress(CGF), amt,
llvm::AtomicOrdering::SequentiallyConsistent);
return isPre ? Builder.CreateBinOp(op, old, amt) : old;
}
@@ -2525,7 +2623,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
} else if (type->isIntegerType()) {
QualType promotedType;
bool canPerformLossyDemotionCheck = false;
- if (type->isPromotableIntegerType()) {
+ if (CGF.getContext().isPromotableIntegerType(type)) {
promotedType = CGF.getContext().getPromotedIntegerType(type);
assert(promotedType != type && "Shouldn't promote to the same type.");
canPerformLossyDemotionCheck = true;
@@ -2587,37 +2685,36 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
= CGF.getContext().getAsVariableArrayType(type)) {
llvm::Value *numElts = CGF.getVLASize(vla).NumElts;
if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize");
+ llvm::Type *elemTy = CGF.ConvertTypeForMem(vla->getElementType());
if (CGF.getLangOpts().isSignedOverflowDefined())
- value = Builder.CreateGEP(value->getType()->getPointerElementType(),
- value, numElts, "vla.inc");
+ value = Builder.CreateGEP(elemTy, value, numElts, "vla.inc");
else
value = CGF.EmitCheckedInBoundsGEP(
- value, numElts, /*SignedIndices=*/false, isSubtraction,
+ elemTy, value, numElts, /*SignedIndices=*/false, isSubtraction,
E->getExprLoc(), "vla.inc");
// Arithmetic on function pointers (!) is just +-1.
} else if (type->isFunctionType()) {
llvm::Value *amt = Builder.getInt32(amount);
- value = CGF.EmitCastToVoidPtr(value);
if (CGF.getLangOpts().isSignedOverflowDefined())
value = Builder.CreateGEP(CGF.Int8Ty, value, amt, "incdec.funcptr");
else
- value = CGF.EmitCheckedInBoundsGEP(value, amt, /*SignedIndices=*/false,
- isSubtraction, E->getExprLoc(),
- "incdec.funcptr");
- value = Builder.CreateBitCast(value, input->getType());
+ value =
+ CGF.EmitCheckedInBoundsGEP(CGF.Int8Ty, value, amt,
+ /*SignedIndices=*/false, isSubtraction,
+ E->getExprLoc(), "incdec.funcptr");
// For everything else, we can just do a simple increment.
} else {
llvm::Value *amt = Builder.getInt32(amount);
+ llvm::Type *elemTy = CGF.ConvertTypeForMem(type);
if (CGF.getLangOpts().isSignedOverflowDefined())
- value = Builder.CreateGEP(value->getType()->getPointerElementType(),
- value, amt, "incdec.ptr");
+ value = Builder.CreateGEP(elemTy, value, amt, "incdec.ptr");
else
- value = CGF.EmitCheckedInBoundsGEP(value, amt, /*SignedIndices=*/false,
- isSubtraction, E->getExprLoc(),
- "incdec.ptr");
+ value = CGF.EmitCheckedInBoundsGEP(
+ elemTy, value, amt, /*SignedIndices=*/false, isSubtraction,
+ E->getExprLoc(), "incdec.ptr");
}
// Vector increment/decrement.
@@ -2658,7 +2755,8 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
amt = llvm::ConstantFP::get(VMContext,
llvm::APFloat(static_cast<double>(amount)));
else {
- // Remaining types are Half, LongDouble or __float128. Convert from float.
+ // Remaining types are Half, Bfloat16, LongDouble, __ibm128 or __float128.
+ // Convert from float.
llvm::APFloat F(static_cast<float>(amount));
bool ignored;
const llvm::fltSemantics *FS;
@@ -2668,6 +2766,10 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
FS = &CGF.getTarget().getFloat128Format();
else if (value->getType()->isHalfTy())
FS = &CGF.getTarget().getHalfFormat();
+ else if (value->getType()->isBFloatTy())
+ FS = &CGF.getTarget().getBFloat16Format();
+ else if (value->getType()->isPPC_FP128Ty())
+ FS = &CGF.getTarget().getIbm128Format();
else
FS = &CGF.getTarget().getLongDoubleFormat();
F.convert(*FS, llvm::APFloat::rmTowardZero, &ignored);
@@ -2714,7 +2816,6 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
// Objective-C pointer types.
} else {
const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>();
- value = CGF.EmitCastToVoidPtr(value);
CharUnits size = CGF.getContext().getTypeSizeInChars(OPT->getObjectType());
if (!isInc) size = -size;
@@ -2724,9 +2825,9 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
if (CGF.getLangOpts().isSignedOverflowDefined())
value = Builder.CreateGEP(CGF.Int8Ty, value, sizeValue, "incdec.objptr");
else
- value = CGF.EmitCheckedInBoundsGEP(value, sizeValue,
- /*SignedIndices=*/false, isSubtraction,
- E->getExprLoc(), "incdec.objptr");
+ value = CGF.EmitCheckedInBoundsGEP(
+ CGF.Int8Ty, value, sizeValue, /*SignedIndices=*/false, isSubtraction,
+ E->getExprLoc(), "incdec.objptr");
value = Builder.CreateBitCast(value, input->getType());
}
@@ -2755,10 +2856,45 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
}
+Value *ScalarExprEmitter::VisitUnaryPlus(const UnaryOperator *E,
+ QualType PromotionType) {
+ QualType promotionTy = PromotionType.isNull()
+ ? getPromotionType(E->getSubExpr()->getType())
+ : PromotionType;
+ Value *result = VisitPlus(E, promotionTy);
+ if (result && !promotionTy.isNull())
+ result = EmitUnPromotedValue(result, E->getType());
+ return result;
+}
-Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
+Value *ScalarExprEmitter::VisitPlus(const UnaryOperator *E,
+ QualType PromotionType) {
+ // This differs from gcc, though, most likely due to a bug in gcc.
TestAndClearIgnoreResultAssign();
- Value *Op = Visit(E->getSubExpr());
+ if (!PromotionType.isNull())
+ return CGF.EmitPromotedScalarExpr(E->getSubExpr(), PromotionType);
+ return Visit(E->getSubExpr());
+}
+
+Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E,
+ QualType PromotionType) {
+ QualType promotionTy = PromotionType.isNull()
+ ? getPromotionType(E->getSubExpr()->getType())
+ : PromotionType;
+ Value *result = VisitMinus(E, promotionTy);
+ if (result && !promotionTy.isNull())
+ result = EmitUnPromotedValue(result, E->getType());
+ return result;
+}
+
+Value *ScalarExprEmitter::VisitMinus(const UnaryOperator *E,
+ QualType PromotionType) {
+ TestAndClearIgnoreResultAssign();
+ Value *Op;
+ if (!PromotionType.isNull())
+ Op = CGF.EmitPromotedScalarExpr(E->getSubExpr(), PromotionType);
+ else
+ Op = Visit(E->getSubExpr());
// Generate a unary FNeg for FP ops.
if (Op->getType()->isFPOrFPVectorTy())
@@ -2778,14 +2914,14 @@ Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
TestAndClearIgnoreResultAssign();
Value *Op = Visit(E->getSubExpr());
- return Builder.CreateNot(Op, "neg");
+ return Builder.CreateNot(Op, "not");
}
Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
// Perform vector logical not on comparison with zero vector.
if (E->getType()->isVectorType() &&
E->getType()->castAs<VectorType>()->getVectorKind() ==
- VectorType::GenericVector) {
+ VectorKind::Generic) {
Value *Oper = Visit(E->getSubExpr());
Value *Zero = llvm::Constant::getNullValue(Oper->getType());
Value *Result;
@@ -2889,8 +3025,8 @@ Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
CurrentType = ON.getBase()->getType();
// Compute the offset to the base.
- const RecordType *BaseRT = CurrentType->getAs<RecordType>();
- CXXRecordDecl *BaseRD = cast<CXXRecordDecl>(BaseRT->getDecl());
+ auto *BaseRT = CurrentType->castAs<RecordType>();
+ auto *BaseRD = cast<CXXRecordDecl>(BaseRT->getDecl());
CharUnits OffsetInt = RL.getBaseClassOffset(BaseRD);
Offset = llvm::ConstantInt::get(ResultType, OffsetInt.getQuantity());
break;
@@ -2907,9 +3043,10 @@ Value *
ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
const UnaryExprOrTypeTraitExpr *E) {
QualType TypeToSize = E->getTypeOfArgument();
- if (E->getKind() == UETT_SizeOf) {
+ if (auto Kind = E->getKind();
+ Kind == UETT_SizeOf || Kind == UETT_DataSizeOf) {
if (const VariableArrayType *VAT =
- CGF.getContext().getAsVariableArrayType(TypeToSize)) {
+ CGF.getContext().getAsVariableArrayType(TypeToSize)) {
if (E->isArgumentType()) {
// sizeof(type) - make sure to emit the VLA size.
CGF.EmitVariablyModifiedType(TypeToSize);
@@ -2936,6 +3073,9 @@ ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
E->getTypeOfArgument()->getPointeeType()))
.getQuantity();
return llvm::ConstantInt::get(CGF.SizeTy, Alignment);
+ } else if (E->getKind() == UETT_VectorElements) {
+ auto *VecTy = cast<llvm::VectorType>(ConvertType(E->getTypeOfArgument()));
+ return Builder.CreateElementCount(CGF.SizeTy, VecTy->getElementCount());
}
// If this isn't sizeof(vla), the result must be constant; use the constant
@@ -2943,33 +3083,75 @@ ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
return Builder.getInt(E->EvaluateKnownConstInt(CGF.getContext()));
}
-Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) {
+Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E,
+ QualType PromotionType) {
+ QualType promotionTy = PromotionType.isNull()
+ ? getPromotionType(E->getSubExpr()->getType())
+ : PromotionType;
+ Value *result = VisitReal(E, promotionTy);
+ if (result && !promotionTy.isNull())
+ result = EmitUnPromotedValue(result, E->getType());
+ return result;
+}
+
+Value *ScalarExprEmitter::VisitReal(const UnaryOperator *E,
+ QualType PromotionType) {
Expr *Op = E->getSubExpr();
if (Op->getType()->isAnyComplexType()) {
// If it's an l-value, load through the appropriate subobject l-value.
// Note that we have to ask E because Op might be an l-value that
// this won't work for, e.g. an Obj-C property.
- if (E->isGLValue())
- return CGF.EmitLoadOfLValue(CGF.EmitLValue(E),
- E->getExprLoc()).getScalarVal();
-
+ if (E->isGLValue()) {
+ if (!PromotionType.isNull()) {
+ CodeGenFunction::ComplexPairTy result = CGF.EmitComplexExpr(
+ Op, /*IgnoreReal*/ IgnoreResultAssign, /*IgnoreImag*/ true);
+ if (result.first)
+ result.first = CGF.EmitPromotedValue(result, PromotionType).first;
+ return result.first;
+ } else {
+ return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getExprLoc())
+ .getScalarVal();
+ }
+ }
// Otherwise, calculate and project.
return CGF.EmitComplexExpr(Op, false, true).first;
}
+ if (!PromotionType.isNull())
+ return CGF.EmitPromotedScalarExpr(Op, PromotionType);
return Visit(Op);
}
-Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) {
+Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E,
+ QualType PromotionType) {
+ QualType promotionTy = PromotionType.isNull()
+ ? getPromotionType(E->getSubExpr()->getType())
+ : PromotionType;
+ Value *result = VisitImag(E, promotionTy);
+ if (result && !promotionTy.isNull())
+ result = EmitUnPromotedValue(result, E->getType());
+ return result;
+}
+
+Value *ScalarExprEmitter::VisitImag(const UnaryOperator *E,
+ QualType PromotionType) {
Expr *Op = E->getSubExpr();
if (Op->getType()->isAnyComplexType()) {
// If it's an l-value, load through the appropriate subobject l-value.
// Note that we have to ask E because Op might be an l-value that
// this won't work for, e.g. an Obj-C property.
- if (Op->isGLValue())
- return CGF.EmitLoadOfLValue(CGF.EmitLValue(E),
- E->getExprLoc()).getScalarVal();
-
+ if (Op->isGLValue()) {
+ if (!PromotionType.isNull()) {
+ CodeGenFunction::ComplexPairTy result = CGF.EmitComplexExpr(
+ Op, /*IgnoreReal*/ true, /*IgnoreImag*/ IgnoreResultAssign);
+ if (result.second)
+ result.second = CGF.EmitPromotedValue(result, PromotionType).second;
+ return result.second;
+ } else {
+ return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getExprLoc())
+ .getScalarVal();
+ }
+ }
// Otherwise, calculate and project.
return CGF.EmitComplexExpr(Op, true, false).second;
}
@@ -2978,8 +3160,12 @@ Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) {
// effects are evaluated, but not the actual value.
if (Op->isGLValue())
CGF.EmitLValue(Op);
+ else if (!PromotionType.isNull())
+ CGF.EmitPromotedScalarExpr(Op, PromotionType);
else
CGF.EmitScalarExpr(Op, true);
+ if (!PromotionType.isNull())
+ return llvm::Constant::getNullValue(ConvertType(PromotionType));
return llvm::Constant::getNullValue(ConvertType(E->getType()));
}
@@ -2987,12 +3173,65 @@ Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) {
// Binary Operators
//===----------------------------------------------------------------------===//
-BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) {
+Value *ScalarExprEmitter::EmitPromotedValue(Value *result,
+ QualType PromotionType) {
+ return CGF.Builder.CreateFPExt(result, ConvertType(PromotionType), "ext");
+}
+
+Value *ScalarExprEmitter::EmitUnPromotedValue(Value *result,
+ QualType ExprType) {
+ return CGF.Builder.CreateFPTrunc(result, ConvertType(ExprType), "unpromotion");
+}
+
+Value *ScalarExprEmitter::EmitPromoted(const Expr *E, QualType PromotionType) {
+ E = E->IgnoreParens();
+ if (auto BO = dyn_cast<BinaryOperator>(E)) {
+ switch (BO->getOpcode()) {
+#define HANDLE_BINOP(OP) \
+ case BO_##OP: \
+ return Emit##OP(EmitBinOps(BO, PromotionType));
+ HANDLE_BINOP(Add)
+ HANDLE_BINOP(Sub)
+ HANDLE_BINOP(Mul)
+ HANDLE_BINOP(Div)
+#undef HANDLE_BINOP
+ default:
+ break;
+ }
+ } else if (auto UO = dyn_cast<UnaryOperator>(E)) {
+ switch (UO->getOpcode()) {
+ case UO_Imag:
+ return VisitImag(UO, PromotionType);
+ case UO_Real:
+ return VisitReal(UO, PromotionType);
+ case UO_Minus:
+ return VisitMinus(UO, PromotionType);
+ case UO_Plus:
+ return VisitPlus(UO, PromotionType);
+ default:
+ break;
+ }
+ }
+ auto result = Visit(const_cast<Expr *>(E));
+ if (result) {
+ if (!PromotionType.isNull())
+ return EmitPromotedValue(result, PromotionType);
+ else
+ return EmitUnPromotedValue(result, E->getType());
+ }
+ return result;
+}
+
+BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E,
+ QualType PromotionType) {
TestAndClearIgnoreResultAssign();
BinOpInfo Result;
- Result.LHS = Visit(E->getLHS());
- Result.RHS = Visit(E->getRHS());
- Result.Ty = E->getType();
+ Result.LHS = CGF.EmitPromotedScalarExpr(E->getLHS(), PromotionType);
+ Result.RHS = CGF.EmitPromotedScalarExpr(E->getRHS(), PromotionType);
+ if (!PromotionType.isNull())
+ Result.Ty = PromotionType;
+ else
+ Result.Ty = E->getType();
Result.Opcode = E->getOpcode();
Result.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
Result.E = E;
@@ -3011,8 +3250,18 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
// Emit the RHS first. __block variables need to have the rhs evaluated
// first, plus this should improve codegen a little.
- OpInfo.RHS = Visit(E->getRHS());
- OpInfo.Ty = E->getComputationResultType();
+
+ QualType PromotionTypeCR;
+ PromotionTypeCR = getPromotionType(E->getComputationResultType());
+ if (PromotionTypeCR.isNull())
+ PromotionTypeCR = E->getComputationResultType();
+ QualType PromotionTypeLHS = getPromotionType(E->getComputationLHSType());
+ QualType PromotionTypeRHS = getPromotionType(E->getRHS()->getType());
+ if (!PromotionTypeRHS.isNull())
+ OpInfo.RHS = CGF.EmitPromotedScalarExpr(E->getRHS(), PromotionTypeRHS);
+ else
+ OpInfo.RHS = Visit(E->getRHS());
+ OpInfo.Ty = PromotionTypeCR;
OpInfo.Opcode = E->getOpcode();
OpInfo.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
OpInfo.E = E;
@@ -3065,7 +3314,7 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
E->getExprLoc()),
LHSTy);
Value *OldVal = Builder.CreateAtomicRMW(
- AtomicOp, LHSLV.getPointer(CGF), Amt,
+ AtomicOp, LHSLV.getAddress(CGF), Amt,
llvm::AtomicOrdering::SequentiallyConsistent);
// Since operation is atomic, the result type is guaranteed to be the
@@ -3091,16 +3340,20 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, OpInfo.FPFeatures);
SourceLocation Loc = E->getExprLoc();
- OpInfo.LHS =
- EmitScalarConversion(OpInfo.LHS, LHSTy, E->getComputationLHSType(), Loc);
+ if (!PromotionTypeLHS.isNull())
+ OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy, PromotionTypeLHS,
+ E->getExprLoc());
+ else
+ OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy,
+ E->getComputationLHSType(), Loc);
// Expand the binary operator.
Result = (this->*Func)(OpInfo);
// Convert the result back to the LHS type,
// potentially with Implicit Conversion sanitizer check.
- Result = EmitScalarConversion(Result, E->getComputationResultType(), LHSTy,
- Loc, ScalarConversionOpts(CGF.SanOpts));
+ Result = EmitScalarConversion(Result, PromotionTypeCR, LHSTy, Loc,
+ ScalarConversionOpts(CGF.SanOpts));
if (atomicPHI) {
llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
@@ -3203,7 +3456,7 @@ Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
}
if (Ops.Ty->isConstantMatrixType()) {
- llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
+ llvm::MatrixBuilder MB(Builder);
// We need to check the types of the operands of the operator to get the
// correct matrix dimensions.
auto *BO = cast<BinaryOperator>(Ops.E);
@@ -3222,21 +3475,7 @@ Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
llvm::Value *Val;
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
- if ((CGF.getLangOpts().OpenCL &&
- !CGF.CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) ||
- (CGF.getLangOpts().HIP && CGF.getLangOpts().CUDAIsDevice &&
- !CGF.CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) {
- // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp
- // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
- // build option allows an application to specify that single precision
- // floating-point divide (x/y and 1/x) and sqrt used in the program
- // source are correctly rounded.
- llvm::Type *ValTy = Val->getType();
- if (ValTy->isFloatTy() ||
- (isa<llvm::VectorType>(ValTy) &&
- cast<llvm::VectorType>(ValTy)->getElementType()->isFloatTy()))
- CGF.SetFPAccuracy(Val, 2.5);
- }
+ CGF.SetDivFPAccuracy(Val);
return Val;
}
else if (Ops.isFixedPointOp())
@@ -3446,8 +3685,8 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF,
index = CGF.Builder.CreateMul(index, objectSize);
- Value *result = CGF.Builder.CreateBitCast(pointer, CGF.VoidPtrTy);
- result = CGF.Builder.CreateGEP(CGF.Int8Ty, result, index, "add.ptr");
+ Value *result =
+ CGF.Builder.CreateGEP(CGF.Int8Ty, pointer, index, "add.ptr");
return CGF.Builder.CreateBitCast(result, pointer->getType());
}
@@ -3461,16 +3700,15 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF,
// GEP indexes are signed, and scaling an index isn't permitted to
// signed-overflow, so we use the same semantics for our explicit
// multiply. We suppress this if overflow is not undefined behavior.
+ llvm::Type *elemTy = CGF.ConvertTypeForMem(vla->getElementType());
if (CGF.getLangOpts().isSignedOverflowDefined()) {
index = CGF.Builder.CreateMul(index, numElements, "vla.index");
- pointer = CGF.Builder.CreateGEP(
- pointer->getType()->getPointerElementType(), pointer, index,
- "add.ptr");
+ pointer = CGF.Builder.CreateGEP(elemTy, pointer, index, "add.ptr");
} else {
index = CGF.Builder.CreateNSWMul(index, numElements, "vla.index");
- pointer =
- CGF.EmitCheckedInBoundsGEP(pointer, index, isSigned, isSubtraction,
- op.E->getExprLoc(), "add.ptr");
+ pointer = CGF.EmitCheckedInBoundsGEP(
+ elemTy, pointer, index, isSigned, isSubtraction, op.E->getExprLoc(),
+ "add.ptr");
}
return pointer;
}
@@ -3478,18 +3716,18 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF,
// Explicitly handle GNU void* and function pointer arithmetic extensions. The
// GNU void* casts amount to no-ops since our void* type is i8*, but this is
// future proof.
- if (elementType->isVoidType() || elementType->isFunctionType()) {
- Value *result = CGF.EmitCastToVoidPtr(pointer);
- result = CGF.Builder.CreateGEP(CGF.Int8Ty, result, index, "add.ptr");
- return CGF.Builder.CreateBitCast(result, pointer->getType());
- }
+ llvm::Type *elemTy;
+ if (elementType->isVoidType() || elementType->isFunctionType())
+ elemTy = CGF.Int8Ty;
+ else
+ elemTy = CGF.ConvertTypeForMem(elementType);
if (CGF.getLangOpts().isSignedOverflowDefined())
- return CGF.Builder.CreateGEP(
- pointer->getType()->getPointerElementType(), pointer, index, "add.ptr");
+ return CGF.Builder.CreateGEP(elemTy, pointer, index, "add.ptr");
- return CGF.EmitCheckedInBoundsGEP(pointer, index, isSigned, isSubtraction,
- op.E->getExprLoc(), "add.ptr");
+ return CGF.EmitCheckedInBoundsGEP(
+ elemTy, pointer, index, isSigned, isSubtraction, op.E->getExprLoc(),
+ "add.ptr");
}
// Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and
@@ -3500,8 +3738,6 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF,
static Value* buildFMulAdd(llvm::Instruction *MulOp, Value *Addend,
const CodeGenFunction &CGF, CGBuilderTy &Builder,
bool negMul, bool negAdd) {
- assert(!(negMul && negAdd) && "Only one of negMul and negAdd should be set.");
-
Value *MulOp0 = MulOp->getOperand(0);
Value *MulOp1 = MulOp->getOperand(1);
if (negMul)
@@ -3546,31 +3782,70 @@ static Value* tryEmitFMulAdd(const BinOpInfo &op,
if (!op.FPFeatures.allowFPContractWithinStatement())
return nullptr;
+ Value *LHS = op.LHS;
+ Value *RHS = op.RHS;
+
+ // Peek through fneg to look for fmul. Make sure fneg has no users, and that
+ // it is the only use of its operand.
+ bool NegLHS = false;
+ if (auto *LHSUnOp = dyn_cast<llvm::UnaryOperator>(LHS)) {
+ if (LHSUnOp->getOpcode() == llvm::Instruction::FNeg &&
+ LHSUnOp->use_empty() && LHSUnOp->getOperand(0)->hasOneUse()) {
+ LHS = LHSUnOp->getOperand(0);
+ NegLHS = true;
+ }
+ }
+
+ bool NegRHS = false;
+ if (auto *RHSUnOp = dyn_cast<llvm::UnaryOperator>(RHS)) {
+ if (RHSUnOp->getOpcode() == llvm::Instruction::FNeg &&
+ RHSUnOp->use_empty() && RHSUnOp->getOperand(0)->hasOneUse()) {
+ RHS = RHSUnOp->getOperand(0);
+ NegRHS = true;
+ }
+ }
+
// We have a potentially fusable op. Look for a mul on one of the operands.
// Also, make sure that the mul result isn't used directly. In that case,
// there's no point creating a muladd operation.
- if (auto *LHSBinOp = dyn_cast<llvm::BinaryOperator>(op.LHS)) {
+ if (auto *LHSBinOp = dyn_cast<llvm::BinaryOperator>(LHS)) {
if (LHSBinOp->getOpcode() == llvm::Instruction::FMul &&
- LHSBinOp->use_empty())
- return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, false, isSub);
+ (LHSBinOp->use_empty() || NegLHS)) {
+ // If we looked through fneg, erase it.
+ if (NegLHS)
+ cast<llvm::Instruction>(op.LHS)->eraseFromParent();
+ return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, NegLHS, isSub);
+ }
}
- if (auto *RHSBinOp = dyn_cast<llvm::BinaryOperator>(op.RHS)) {
+ if (auto *RHSBinOp = dyn_cast<llvm::BinaryOperator>(RHS)) {
if (RHSBinOp->getOpcode() == llvm::Instruction::FMul &&
- RHSBinOp->use_empty())
- return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false);
+ (RHSBinOp->use_empty() || NegRHS)) {
+ // If we looked through fneg, erase it.
+ if (NegRHS)
+ cast<llvm::Instruction>(op.RHS)->eraseFromParent();
+ return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub ^ NegRHS, false);
+ }
}
- if (auto *LHSBinOp = dyn_cast<llvm::CallBase>(op.LHS)) {
+ if (auto *LHSBinOp = dyn_cast<llvm::CallBase>(LHS)) {
if (LHSBinOp->getIntrinsicID() ==
llvm::Intrinsic::experimental_constrained_fmul &&
- LHSBinOp->use_empty())
- return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, false, isSub);
+ (LHSBinOp->use_empty() || NegLHS)) {
+ // If we looked through fneg, erase it.
+ if (NegLHS)
+ cast<llvm::Instruction>(op.LHS)->eraseFromParent();
+ return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, NegLHS, isSub);
+ }
}
- if (auto *RHSBinOp = dyn_cast<llvm::CallBase>(op.RHS)) {
+ if (auto *RHSBinOp = dyn_cast<llvm::CallBase>(RHS)) {
if (RHSBinOp->getIntrinsicID() ==
llvm::Intrinsic::experimental_constrained_fmul &&
- RHSBinOp->use_empty())
- return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false);
+ (RHSBinOp->use_empty() || NegRHS)) {
+ // If we looked through fneg, erase it.
+ if (NegRHS)
+ cast<llvm::Instruction>(op.RHS)->eraseFromParent();
+ return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub ^ NegRHS, false);
+ }
}
return nullptr;
@@ -3588,7 +3863,7 @@ Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
case LangOptions::SOB_Undefined:
if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case LangOptions::SOB_Trapping:
if (CanElideOverflowCheck(CGF.getContext(), op))
return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
@@ -3596,8 +3871,16 @@ Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
}
}
+ // For vector and matrix adds, try to fold into a fmuladd.
+ if (op.LHS->getType()->isFPOrFPVectorTy()) {
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
+ // Try to form an fmuladd.
+ if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder))
+ return FMulAdd;
+ }
+
if (op.Ty->isConstantMatrixType()) {
- llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
+ llvm::MatrixBuilder MB(Builder);
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
return MB.CreateAdd(op.LHS, op.RHS);
}
@@ -3609,10 +3892,6 @@ Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
if (op.LHS->getType()->isFPOrFPVectorTy()) {
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
- // Try to form an fmuladd.
- if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder))
- return FMulAdd;
-
return Builder.CreateFAdd(op.LHS, op.RHS, "add");
}
@@ -3738,7 +4017,7 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
case LangOptions::SOB_Undefined:
if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case LangOptions::SOB_Trapping:
if (CanElideOverflowCheck(CGF.getContext(), op))
return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
@@ -3746,8 +4025,16 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
}
}
+ // For vector and matrix subs, try to fold into a fmuladd.
+ if (op.LHS->getType()->isFPOrFPVectorTy()) {
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
+ // Try to form an fmuladd.
+ if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true))
+ return FMulAdd;
+ }
+
if (op.Ty->isConstantMatrixType()) {
- llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
+ llvm::MatrixBuilder MB(Builder);
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
return MB.CreateSub(op.LHS, op.RHS);
}
@@ -3759,9 +4046,6 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
if (op.LHS->getType()->isFPOrFPVectorTy()) {
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
- // Try to form an fmuladd.
- if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true))
- return FMulAdd;
return Builder.CreateFSub(op.LHS, op.RHS, "sub");
}
@@ -4280,6 +4564,12 @@ Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
if (LHSCondVal) { // If we have 1 && X, just emit X.
CGF.incrementProfileCounter(E);
+ // If the top of the logical operator nest, reset the MCDC temp to 0.
+ if (CGF.MCDCLogOpStack.empty())
+ CGF.maybeResetMCDCCondBitmap(E);
+
+ CGF.MCDCLogOpStack.push_back(E);
+
Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
// If we're generating for profiling or coverage, generate a branch to a
@@ -4288,6 +4578,7 @@ Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
// "FalseBlock" after the increment is done.
if (InstrumentRegions &&
CodeGenFunction::isInstrumentedCondition(E->getRHS())) {
+ CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
llvm::BasicBlock *FBlock = CGF.createBasicBlock("land.end");
llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt");
Builder.CreateCondBr(RHSCond, RHSBlockCnt, FBlock);
@@ -4297,6 +4588,11 @@ Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
CGF.EmitBlock(FBlock);
}
+ CGF.MCDCLogOpStack.pop_back();
+ // If the top of the logical operator nest, update the MCDC bitmap.
+ if (CGF.MCDCLogOpStack.empty())
+ CGF.maybeUpdateMCDCTestVectorBitmap(E);
+
// ZExt result to int or bool.
return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext");
}
@@ -4306,6 +4602,12 @@ Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
return llvm::Constant::getNullValue(ResTy);
}
+ // If the top of the logical operator nest, reset the MCDC temp to 0.
+ if (CGF.MCDCLogOpStack.empty())
+ CGF.maybeResetMCDCCondBitmap(E);
+
+ CGF.MCDCLogOpStack.push_back(E);
+
llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end");
llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs");
@@ -4338,6 +4640,7 @@ Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
// condition coverage.
if (InstrumentRegions &&
CodeGenFunction::isInstrumentedCondition(E->getRHS())) {
+ CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt");
Builder.CreateCondBr(RHSCond, RHSBlockCnt, ContBlock);
CGF.EmitBlock(RHSBlockCnt);
@@ -4355,6 +4658,11 @@ Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
// Insert an entry into the phi node for the edge with the value of RHSCond.
PN->addIncoming(RHSCond, RHSBlock);
+ CGF.MCDCLogOpStack.pop_back();
+ // If the top of the logical operator nest, update the MCDC bitmap.
+ if (CGF.MCDCLogOpStack.empty())
+ CGF.maybeUpdateMCDCTestVectorBitmap(E);
+
// Artificial location to preserve the scope information
{
auto NL = ApplyDebugLocation::CreateArtificial(CGF);
@@ -4396,6 +4704,12 @@ Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
if (!LHSCondVal) { // If we have 0 || X, just emit X.
CGF.incrementProfileCounter(E);
+ // If the top of the logical operator nest, reset the MCDC temp to 0.
+ if (CGF.MCDCLogOpStack.empty())
+ CGF.maybeResetMCDCCondBitmap(E);
+
+ CGF.MCDCLogOpStack.push_back(E);
+
Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
// If we're generating for profiling or coverage, generate a branch to a
@@ -4404,6 +4718,7 @@ Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
// "FalseBlock" after the increment is done.
if (InstrumentRegions &&
CodeGenFunction::isInstrumentedCondition(E->getRHS())) {
+ CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
llvm::BasicBlock *FBlock = CGF.createBasicBlock("lor.end");
llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt");
Builder.CreateCondBr(RHSCond, FBlock, RHSBlockCnt);
@@ -4413,6 +4728,11 @@ Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
CGF.EmitBlock(FBlock);
}
+ CGF.MCDCLogOpStack.pop_back();
+ // If the top of the logical operator nest, update the MCDC bitmap.
+ if (CGF.MCDCLogOpStack.empty())
+ CGF.maybeUpdateMCDCTestVectorBitmap(E);
+
// ZExt result to int or bool.
return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext");
}
@@ -4422,6 +4742,12 @@ Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
return llvm::ConstantInt::get(ResTy, 1);
}
+ // If the top of the logical operator nest, reset the MCDC temp to 0.
+ if (CGF.MCDCLogOpStack.empty())
+ CGF.maybeResetMCDCCondBitmap(E);
+
+ CGF.MCDCLogOpStack.push_back(E);
+
llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end");
llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs");
@@ -4458,6 +4784,7 @@ Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
// condition coverage.
if (InstrumentRegions &&
CodeGenFunction::isInstrumentedCondition(E->getRHS())) {
+ CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt");
Builder.CreateCondBr(RHSCond, ContBlock, RHSBlockCnt);
CGF.EmitBlock(RHSBlockCnt);
@@ -4471,6 +4798,11 @@ Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
CGF.EmitBlock(ContBlock);
PN->addIncoming(RHSCond, RHSBlock);
+ CGF.MCDCLogOpStack.pop_back();
+ // If the top of the logical operator nest, update the MCDC bitmap.
+ if (CGF.MCDCLogOpStack.empty())
+ CGF.maybeUpdateMCDCTestVectorBitmap(E);
+
// ZExt result to int.
return Builder.CreateZExtOrBitCast(PN, ResTy, "lor.ext");
}
@@ -4579,7 +4911,8 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
return tmp5;
}
- if (condExpr->getType()->isVectorType()) {
+ if (condExpr->getType()->isVectorType() ||
+ condExpr->getType()->isSveVLSBuiltinType()) {
CGF.incrementProfileCounter(E);
llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
@@ -4614,6 +4947,10 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
return Builder.CreateSelect(CondV, LHS, RHS, "cond");
}
+ // If the top of the logical operator nest, reset the MCDC temp to 0.
+ if (CGF.MCDCLogOpStack.empty())
+ CGF.maybeResetMCDCCondBitmap(condExpr);
+
llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
@@ -4623,6 +4960,13 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
CGF.getProfileCount(lhsExpr));
CGF.EmitBlock(LHSBlock);
+
+ // If the top of the logical operator nest, update the MCDC bitmap for the
+ // ConditionalOperator prior to visiting its LHS and RHS blocks, since they
+ // may also contain a boolean expression.
+ if (CGF.MCDCLogOpStack.empty())
+ CGF.maybeUpdateMCDCTestVectorBitmap(condExpr);
+
CGF.incrementProfileCounter(E);
eval.begin(CGF);
Value *LHS = Visit(lhsExpr);
@@ -4632,6 +4976,13 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
Builder.CreateBr(ContBlock);
CGF.EmitBlock(RHSBlock);
+
+ // If the top of the logical operator nest, update the MCDC bitmap for the
+ // ConditionalOperator prior to visiting its LHS and RHS blocks, since they
+ // may also contain a boolean expression.
+ if (CGF.MCDCLogOpStack.empty())
+ CGF.maybeUpdateMCDCTestVectorBitmap(condExpr);
+
eval.begin(CGF);
Value *RHS = Visit(rhsExpr);
eval.end(CGF);
@@ -4649,6 +5000,7 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), 2, "cond");
PN->addIncoming(LHS, LHSBlock);
PN->addIncoming(RHS, RHSBlock);
+
return PN;
}
@@ -4695,8 +5047,7 @@ Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {
static Value *ConvertVec3AndVec4(CGBuilderTy &Builder, CodeGenFunction &CGF,
Value *Src, unsigned NumElementsDst) {
static constexpr int Mask[] = {0, 1, 2, -1};
- return Builder.CreateShuffleVector(Src,
- llvm::makeArrayRef(Mask, NumElementsDst));
+ return Builder.CreateShuffleVector(Src, llvm::ArrayRef(Mask, NumElementsDst));
}
// Create cast instructions for converting LLVM value \p Src to LLVM type \p
@@ -4759,15 +5110,16 @@ Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
? cast<llvm::FixedVectorType>(DstTy)->getNumElements()
: 0;
+ // Use bit vector expansion for ext_vector_type boolean vectors.
+ if (E->getType()->isExtVectorBoolType())
+ return CGF.emitBoolVecConversion(Src, NumElementsDst, "astype");
+
// Going from vec3 to non-vec3 is a special case and requires a shuffle
// vector to get a vec4, then a bitcast if the target type is different.
if (NumElementsSrc == 3 && NumElementsDst != 3) {
Src = ConvertVec3AndVec4(Builder, CGF, Src, 4);
-
- if (!CGF.CGM.getCodeGenOpts().PreserveVec3Type) {
- Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
- DstTy);
- }
+ Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
+ DstTy);
Src->setName("astype");
return Src;
@@ -4777,12 +5129,10 @@ Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
// to vec4 if the original type is not vec4, then a shuffle vector to
// get a vec3.
if (NumElementsSrc != 3 && NumElementsDst == 3) {
- if (!CGF.CGM.getCodeGenOpts().PreserveVec3Type) {
- auto *Vec4Ty = llvm::FixedVectorType::get(
- cast<llvm::VectorType>(DstTy)->getElementType(), 4);
- Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
- Vec4Ty);
- }
+ auto *Vec4Ty = llvm::FixedVectorType::get(
+ cast<llvm::VectorType>(DstTy)->getElementType(), 4);
+ Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
+ Vec4Ty);
Src = ConvertVec3AndVec4(Builder, CGF, Src, 3);
Src->setName("astype");
@@ -4834,6 +5184,16 @@ Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src,
}
+Value *
+CodeGenFunction::EmitPromotedScalarExpr(const Expr *E,
+ QualType PromotionType) {
+ if (!PromotionType.isNull())
+ return ScalarExprEmitter(*this).EmitPromoted(E, PromotionType);
+ else
+ return ScalarExprEmitter(*this).Visit(const_cast<Expr *>(E));
+}
+
+
llvm::Value *CodeGenFunction::
EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
bool isInc, bool isPre) {
@@ -4847,13 +5207,15 @@ LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
Expr *BaseExpr = E->getBase();
Address Addr = Address::invalid();
if (BaseExpr->isPRValue()) {
- Addr = Address(EmitScalarExpr(BaseExpr), getPointerAlign());
+ llvm::Type *BaseTy =
+ ConvertTypeForMem(BaseExpr->getType()->getPointeeType());
+ Addr = Address(EmitScalarExpr(BaseExpr), BaseTy, getPointerAlign());
} else {
Addr = EmitLValue(BaseExpr).getAddress(*this);
}
// Cast the address to Class*.
- Addr = Builder.CreateElementBitCast(Addr, ConvertType(E->getType()));
+ Addr = Addr.withElementType(ConvertType(E->getType()));
return MakeAddrLValue(Addr, E->getType());
}
@@ -4942,7 +5304,7 @@ static GEPOffsetAndOverflow EmitGEPOffsetInBytes(Value *BasePtr, Value *GEPVal,
auto *GEP = cast<llvm::GEPOperator>(GEPVal);
assert(GEP->getPointerOperand() == BasePtr &&
- "BasePtr must be the the base of the GEP.");
+ "BasePtr must be the base of the GEP.");
assert(GEP->isInBounds() && "Expected inbounds GEP");
auto *IntPtrTy = DL.getIntPtrType(GEP->getPointerOperandType());
@@ -4997,8 +5359,8 @@ static GEPOffsetAndOverflow EmitGEPOffsetInBytes(Value *BasePtr, Value *GEPVal,
} else {
// Otherwise this is array-like indexing. The local offset is the index
// multiplied by the element size.
- auto *ElementSize = llvm::ConstantInt::get(
- IntPtrTy, DL.getTypeAllocSize(GTI.getIndexedType()));
+ auto *ElementSize =
+ llvm::ConstantInt::get(IntPtrTy, GTI.getSequentialElementStride(DL));
auto *IndexS = Builder.CreateIntCast(Index, IntPtrTy, /*isSigned=*/true);
LocalOffset = eval(BO_Mul, ElementSize, IndexS);
}
@@ -5015,12 +5377,12 @@ static GEPOffsetAndOverflow EmitGEPOffsetInBytes(Value *BasePtr, Value *GEPVal,
}
Value *
-CodeGenFunction::EmitCheckedInBoundsGEP(Value *Ptr, ArrayRef<Value *> IdxList,
+CodeGenFunction::EmitCheckedInBoundsGEP(llvm::Type *ElemTy, Value *Ptr,
+ ArrayRef<Value *> IdxList,
bool SignedIndices, bool IsSubtraction,
SourceLocation Loc, const Twine &Name) {
llvm::Type *PtrTy = Ptr->getType();
- Value *GEPVal = Builder.CreateInBoundsGEP(
- PtrTy->getPointerElementType(), Ptr, IdxList, Name);
+ Value *GEPVal = Builder.CreateInBoundsGEP(ElemTy, Ptr, IdxList, Name);
// If the pointer overflow sanitizer isn't enabled, do nothing.
if (!SanOpts.has(SanitizerKind::PointerOverflow))
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGGPUBuiltin.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGGPUBuiltin.cpp
index f860623e2bc3..e465789a003e 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGGPUBuiltin.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGGPUBuiltin.cpp
@@ -21,13 +21,14 @@
using namespace clang;
using namespace CodeGen;
-static llvm::Function *GetVprintfDeclaration(llvm::Module &M) {
- llvm::Type *ArgTypes[] = {llvm::Type::getInt8PtrTy(M.getContext()),
- llvm::Type::getInt8PtrTy(M.getContext())};
+namespace {
+llvm::Function *GetVprintfDeclaration(llvm::Module &M) {
+ llvm::Type *ArgTypes[] = {llvm::PointerType::getUnqual(M.getContext()),
+ llvm::PointerType::getUnqual(M.getContext())};
llvm::FunctionType *VprintfFuncType = llvm::FunctionType::get(
llvm::Type::getInt32Ty(M.getContext()), ArgTypes, false);
- if (auto* F = M.getFunction("vprintf")) {
+ if (auto *F = M.getFunction("vprintf")) {
// Our CUDA system header declares vprintf with the right signature, so
// nobody else should have been able to declare vprintf with a bogus
// signature.
@@ -41,6 +42,28 @@ static llvm::Function *GetVprintfDeclaration(llvm::Module &M) {
VprintfFuncType, llvm::GlobalVariable::ExternalLinkage, "vprintf", &M);
}
+llvm::Function *GetOpenMPVprintfDeclaration(CodeGenModule &CGM) {
+ const char *Name = "__llvm_omp_vprintf";
+ llvm::Module &M = CGM.getModule();
+ llvm::Type *ArgTypes[] = {llvm::PointerType::getUnqual(M.getContext()),
+ llvm::PointerType::getUnqual(M.getContext()),
+ llvm::Type::getInt32Ty(M.getContext())};
+ llvm::FunctionType *VprintfFuncType = llvm::FunctionType::get(
+ llvm::Type::getInt32Ty(M.getContext()), ArgTypes, false);
+
+ if (auto *F = M.getFunction(Name)) {
+ if (F->getFunctionType() != VprintfFuncType) {
+ CGM.Error(SourceLocation(),
+ "Invalid type declaration for __llvm_omp_vprintf");
+ return nullptr;
+ }
+ return F;
+ }
+
+ return llvm::Function::Create(
+ VprintfFuncType, llvm::GlobalVariable::ExternalLinkage, Name, &M);
+}
+
// Transforms a call to printf into a call to the NVPTX vprintf syscall (which
// isn't particularly special; it's invoked just like a regular function).
// vprintf takes two args: A format string, and a pointer to a buffer containing
@@ -66,39 +89,23 @@ static llvm::Function *GetVprintfDeclaration(llvm::Module &M) {
//
// Note that by the time this function runs, E's args have already undergone the
// standard C vararg promotion (short -> int, float -> double, etc.).
-RValue
-CodeGenFunction::EmitNVPTXDevicePrintfCallExpr(const CallExpr *E,
- ReturnValueSlot ReturnValue) {
- assert(getTarget().getTriple().isNVPTX());
- assert(E->getBuiltinCallee() == Builtin::BIprintf);
- assert(E->getNumArgs() >= 1); // printf always has at least one arg.
-
- const llvm::DataLayout &DL = CGM.getDataLayout();
- llvm::LLVMContext &Ctx = CGM.getLLVMContext();
- CallArgList Args;
- EmitCallArgs(Args,
- E->getDirectCallee()->getType()->getAs<FunctionProtoType>(),
- E->arguments(), E->getDirectCallee(),
- /* ParamsToSkip = */ 0);
-
- // We don't know how to emit non-scalar varargs.
- if (std::any_of(Args.begin() + 1, Args.end(), [&](const CallArg &A) {
- return !A.getRValue(*this).isScalar();
- })) {
- CGM.ErrorUnsupported(E, "non-scalar arg to printf");
- return RValue::get(llvm::ConstantInt::get(IntTy, 0));
- }
+std::pair<llvm::Value *, llvm::TypeSize>
+packArgsIntoNVPTXFormatBuffer(CodeGenFunction *CGF, const CallArgList &Args) {
+ const llvm::DataLayout &DL = CGF->CGM.getDataLayout();
+ llvm::LLVMContext &Ctx = CGF->CGM.getLLVMContext();
+ CGBuilderTy &Builder = CGF->Builder;
// Construct and fill the args buffer that we'll pass to vprintf.
- llvm::Value *BufferPtr;
if (Args.size() <= 1) {
- // If there are no args, pass a null pointer to vprintf.
- BufferPtr = llvm::ConstantPointerNull::get(llvm::Type::getInt8PtrTy(Ctx));
+ // If there are no args, pass a null pointer and size 0
+ llvm::Value *BufferPtr =
+ llvm::ConstantPointerNull::get(llvm::PointerType::getUnqual(Ctx));
+ return {BufferPtr, llvm::TypeSize::getFixed(0)};
} else {
llvm::SmallVector<llvm::Type *, 8> ArgTypes;
for (unsigned I = 1, NumArgs = Args.size(); I < NumArgs; ++I)
- ArgTypes.push_back(Args[I].getRValue(*this).getScalarVal()->getType());
+ ArgTypes.push_back(Args[I].getRValue(*CGF).getScalarVal()->getType());
// Using llvm::StructType is correct only because printf doesn't accept
// aggregates. If we had to handle aggregates here, we'd have to manually
@@ -106,25 +113,71 @@ CodeGenFunction::EmitNVPTXDevicePrintfCallExpr(const CallExpr *E,
// that the alignment of the llvm type was the same as the alignment of the
// clang type.
llvm::Type *AllocaTy = llvm::StructType::create(ArgTypes, "printf_args");
- llvm::Value *Alloca = CreateTempAlloca(AllocaTy);
+ llvm::Value *Alloca = CGF->CreateTempAlloca(AllocaTy);
for (unsigned I = 1, NumArgs = Args.size(); I < NumArgs; ++I) {
llvm::Value *P = Builder.CreateStructGEP(AllocaTy, Alloca, I - 1);
- llvm::Value *Arg = Args[I].getRValue(*this).getScalarVal();
+ llvm::Value *Arg = Args[I].getRValue(*CGF).getScalarVal();
Builder.CreateAlignedStore(Arg, P, DL.getPrefTypeAlign(Arg->getType()));
}
- BufferPtr = Builder.CreatePointerCast(Alloca, llvm::Type::getInt8PtrTy(Ctx));
+ llvm::Value *BufferPtr =
+ Builder.CreatePointerCast(Alloca, llvm::PointerType::getUnqual(Ctx));
+ return {BufferPtr, DL.getTypeAllocSize(AllocaTy)};
}
+}
- // Invoke vprintf and return.
- llvm::Function* VprintfFunc = GetVprintfDeclaration(CGM.getModule());
- return RValue::get(Builder.CreateCall(
- VprintfFunc, {Args[0].getRValue(*this).getScalarVal(), BufferPtr}));
+bool containsNonScalarVarargs(CodeGenFunction *CGF, const CallArgList &Args) {
+ return llvm::any_of(llvm::drop_begin(Args), [&](const CallArg &A) {
+ return !A.getRValue(*CGF).isScalar();
+ });
}
-RValue
-CodeGenFunction::EmitAMDGPUDevicePrintfCallExpr(const CallExpr *E,
- ReturnValueSlot ReturnValue) {
+RValue EmitDevicePrintfCallExpr(const CallExpr *E, CodeGenFunction *CGF,
+ llvm::Function *Decl, bool WithSizeArg) {
+ CodeGenModule &CGM = CGF->CGM;
+ CGBuilderTy &Builder = CGF->Builder;
+ assert(E->getBuiltinCallee() == Builtin::BIprintf);
+ assert(E->getNumArgs() >= 1); // printf always has at least one arg.
+
+ // Uses the same format as nvptx for the argument packing, but also passes
+ // an i32 for the total size of the passed pointer
+ CallArgList Args;
+ CGF->EmitCallArgs(Args,
+ E->getDirectCallee()->getType()->getAs<FunctionProtoType>(),
+ E->arguments(), E->getDirectCallee(),
+ /* ParamsToSkip = */ 0);
+
+ // We don't know how to emit non-scalar varargs.
+ if (containsNonScalarVarargs(CGF, Args)) {
+ CGM.ErrorUnsupported(E, "non-scalar arg to printf");
+ return RValue::get(llvm::ConstantInt::get(CGF->IntTy, 0));
+ }
+
+ auto r = packArgsIntoNVPTXFormatBuffer(CGF, Args);
+ llvm::Value *BufferPtr = r.first;
+
+ llvm::SmallVector<llvm::Value *, 3> Vec = {
+ Args[0].getRValue(*CGF).getScalarVal(), BufferPtr};
+ if (WithSizeArg) {
+ // Passing > 32bit of data as a local alloca doesn't work for nvptx or
+ // amdgpu
+ llvm::Constant *Size =
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(CGM.getLLVMContext()),
+ static_cast<uint32_t>(r.second.getFixedValue()));
+
+ Vec.push_back(Size);
+ }
+ return RValue::get(Builder.CreateCall(Decl, Vec));
+}
+} // namespace
+
+RValue CodeGenFunction::EmitNVPTXDevicePrintfCallExpr(const CallExpr *E) {
+ assert(getTarget().getTriple().isNVPTX());
+ return EmitDevicePrintfCallExpr(
+ E, this, GetVprintfDeclaration(CGM.getModule()), false);
+}
+
+RValue CodeGenFunction::EmitAMDGPUDevicePrintfCallExpr(const CallExpr *E) {
assert(getTarget().getTriple().getArch() == llvm::Triple::amdgcn);
assert(E->getBuiltinCallee() == Builtin::BIprintf ||
E->getBuiltinCallee() == Builtin::BI__builtin_printf);
@@ -137,7 +190,7 @@ CodeGenFunction::EmitAMDGPUDevicePrintfCallExpr(const CallExpr *E,
/* ParamsToSkip = */ 0);
SmallVector<llvm::Value *, 8> Args;
- for (auto A : CallArgs) {
+ for (const auto &A : CallArgs) {
// We don't know how to emit non-scalar varargs.
if (!A.getRValue(*this).isScalar()) {
CGM.ErrorUnsupported(E, "non-scalar arg to printf");
@@ -150,7 +203,17 @@ CodeGenFunction::EmitAMDGPUDevicePrintfCallExpr(const CallExpr *E,
llvm::IRBuilder<> IRB(Builder.GetInsertBlock(), Builder.GetInsertPoint());
IRB.SetCurrentDebugLocation(Builder.getCurrentDebugLocation());
- auto Printf = llvm::emitAMDGPUPrintfCall(IRB, Args);
+
+ bool isBuffered = (CGM.getTarget().getTargetOpts().AMDGPUPrintfKindVal ==
+ clang::TargetOptions::AMDGPUPrintfKind::Buffered);
+ auto Printf = llvm::emitAMDGPUPrintfCall(IRB, Args, isBuffered);
Builder.SetInsertPoint(IRB.GetInsertBlock(), IRB.GetInsertPoint());
return RValue::get(Printf);
}
+
+RValue CodeGenFunction::EmitOpenMPDevicePrintfCallExpr(const CallExpr *E) {
+ assert(getTarget().getTriple().isNVPTX() ||
+ getTarget().getTriple().isAMDGCN());
+ return EmitDevicePrintfCallExpr(E, this, GetOpenMPVprintfDeclaration(CGM),
+ true);
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGHLSLRuntime.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGHLSLRuntime.cpp
new file mode 100644
index 000000000000..e887d35198b3
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGHLSLRuntime.cpp
@@ -0,0 +1,454 @@
+//===----- CGHLSLRuntime.cpp - Interface to HLSL Runtimes -----------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides an abstract class for HLSL code generation. Concrete
+// subclasses of this implement code generation for specific HLSL
+// runtime libraries.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGHLSLRuntime.h"
+#include "CGDebugInfo.h"
+#include "CodeGenModule.h"
+#include "clang/AST/Decl.h"
+#include "clang/Basic/TargetOptions.h"
+#include "llvm/IR/IntrinsicsDirectX.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/FormatVariadic.h"
+
+using namespace clang;
+using namespace CodeGen;
+using namespace clang::hlsl;
+using namespace llvm;
+
+namespace {
+
+void addDxilValVersion(StringRef ValVersionStr, llvm::Module &M) {
+ // The validation of ValVersionStr is done at HLSLToolChain::TranslateArgs.
+ // Assume ValVersionStr is legal here.
+ VersionTuple Version;
+ if (Version.tryParse(ValVersionStr) || Version.getBuild() ||
+ Version.getSubminor() || !Version.getMinor()) {
+ return;
+ }
+
+ uint64_t Major = Version.getMajor();
+ uint64_t Minor = *Version.getMinor();
+
+ auto &Ctx = M.getContext();
+ IRBuilder<> B(M.getContext());
+ MDNode *Val = MDNode::get(Ctx, {ConstantAsMetadata::get(B.getInt32(Major)),
+ ConstantAsMetadata::get(B.getInt32(Minor))});
+ StringRef DXILValKey = "dx.valver";
+ auto *DXILValMD = M.getOrInsertNamedMetadata(DXILValKey);
+ DXILValMD->addOperand(Val);
+}
+void addDisableOptimizations(llvm::Module &M) {
+ StringRef Key = "dx.disable_optimizations";
+ M.addModuleFlag(llvm::Module::ModFlagBehavior::Override, Key, 1);
+}
+// cbuffer will be translated into global variable in special address space.
+// If translate into C,
+// cbuffer A {
+// float a;
+// float b;
+// }
+// float foo() { return a + b; }
+//
+// will be translated into
+//
+// struct A {
+// float a;
+// float b;
+// } cbuffer_A __attribute__((address_space(4)));
+// float foo() { return cbuffer_A.a + cbuffer_A.b; }
+//
+// layoutBuffer will create the struct A type.
+// replaceBuffer will replace use of global variable a and b with cbuffer_A.a
+// and cbuffer_A.b.
+//
+void layoutBuffer(CGHLSLRuntime::Buffer &Buf, const DataLayout &DL) {
+ if (Buf.Constants.empty())
+ return;
+
+ std::vector<llvm::Type *> EltTys;
+ for (auto &Const : Buf.Constants) {
+ GlobalVariable *GV = Const.first;
+ Const.second = EltTys.size();
+ llvm::Type *Ty = GV->getValueType();
+ EltTys.emplace_back(Ty);
+ }
+ Buf.LayoutStruct = llvm::StructType::get(EltTys[0]->getContext(), EltTys);
+}
+
+GlobalVariable *replaceBuffer(CGHLSLRuntime::Buffer &Buf) {
+ // Create global variable for CB.
+ GlobalVariable *CBGV = new GlobalVariable(
+ Buf.LayoutStruct, /*isConstant*/ true,
+ GlobalValue::LinkageTypes::ExternalLinkage, nullptr,
+ llvm::formatv("{0}{1}", Buf.Name, Buf.IsCBuffer ? ".cb." : ".tb."),
+ GlobalValue::NotThreadLocal);
+
+ IRBuilder<> B(CBGV->getContext());
+ Value *ZeroIdx = B.getInt32(0);
+ // Replace Const use with CB use.
+ for (auto &[GV, Offset] : Buf.Constants) {
+ Value *GEP =
+ B.CreateGEP(Buf.LayoutStruct, CBGV, {ZeroIdx, B.getInt32(Offset)});
+
+ assert(Buf.LayoutStruct->getElementType(Offset) == GV->getValueType() &&
+ "constant type mismatch");
+
+ // Replace.
+ GV->replaceAllUsesWith(GEP);
+ // Erase GV.
+ GV->removeDeadConstantUsers();
+ GV->eraseFromParent();
+ }
+ return CBGV;
+}
+
+} // namespace
+
+void CGHLSLRuntime::addConstant(VarDecl *D, Buffer &CB) {
+ if (D->getStorageClass() == SC_Static) {
+ // For static inside cbuffer, take as global static.
+ // Don't add to cbuffer.
+ CGM.EmitGlobal(D);
+ return;
+ }
+
+ auto *GV = cast<GlobalVariable>(CGM.GetAddrOfGlobalVar(D));
+ // Add debug info for constVal.
+ if (CGDebugInfo *DI = CGM.getModuleDebugInfo())
+ if (CGM.getCodeGenOpts().getDebugInfo() >=
+ codegenoptions::DebugInfoKind::LimitedDebugInfo)
+ DI->EmitGlobalVariable(cast<GlobalVariable>(GV), D);
+
+ // FIXME: support packoffset.
+ // See https://github.com/llvm/llvm-project/issues/57914.
+ uint32_t Offset = 0;
+ bool HasUserOffset = false;
+
+ unsigned LowerBound = HasUserOffset ? Offset : UINT_MAX;
+ CB.Constants.emplace_back(std::make_pair(GV, LowerBound));
+}
+
+void CGHLSLRuntime::addBufferDecls(const DeclContext *DC, Buffer &CB) {
+ for (Decl *it : DC->decls()) {
+ if (auto *ConstDecl = dyn_cast<VarDecl>(it)) {
+ addConstant(ConstDecl, CB);
+ } else if (isa<CXXRecordDecl, EmptyDecl>(it)) {
+ // Nothing to do for this declaration.
+ } else if (isa<FunctionDecl>(it)) {
+ // A function within an cbuffer is effectively a top-level function,
+ // as it only refers to globally scoped declarations.
+ CGM.EmitTopLevelDecl(it);
+ }
+ }
+}
+
+void CGHLSLRuntime::addBuffer(const HLSLBufferDecl *D) {
+ Buffers.emplace_back(Buffer(D));
+ addBufferDecls(D, Buffers.back());
+}
+
+void CGHLSLRuntime::finishCodeGen() {
+ auto &TargetOpts = CGM.getTarget().getTargetOpts();
+ llvm::Module &M = CGM.getModule();
+ Triple T(M.getTargetTriple());
+ if (T.getArch() == Triple::ArchType::dxil)
+ addDxilValVersion(TargetOpts.DxilValidatorVersion, M);
+
+ generateGlobalCtorDtorCalls();
+ if (CGM.getCodeGenOpts().OptimizationLevel == 0)
+ addDisableOptimizations(M);
+
+ const DataLayout &DL = M.getDataLayout();
+
+ for (auto &Buf : Buffers) {
+ layoutBuffer(Buf, DL);
+ GlobalVariable *GV = replaceBuffer(Buf);
+ M.insertGlobalVariable(GV);
+ llvm::hlsl::ResourceClass RC = Buf.IsCBuffer
+ ? llvm::hlsl::ResourceClass::CBuffer
+ : llvm::hlsl::ResourceClass::SRV;
+ llvm::hlsl::ResourceKind RK = Buf.IsCBuffer
+ ? llvm::hlsl::ResourceKind::CBuffer
+ : llvm::hlsl::ResourceKind::TBuffer;
+ addBufferResourceAnnotation(GV, RC, RK, /*IsROV=*/false,
+ llvm::hlsl::ElementType::Invalid, Buf.Binding);
+ }
+}
+
+CGHLSLRuntime::Buffer::Buffer(const HLSLBufferDecl *D)
+ : Name(D->getName()), IsCBuffer(D->isCBuffer()),
+ Binding(D->getAttr<HLSLResourceBindingAttr>()) {}
+
+void CGHLSLRuntime::addBufferResourceAnnotation(llvm::GlobalVariable *GV,
+ llvm::hlsl::ResourceClass RC,
+ llvm::hlsl::ResourceKind RK,
+ bool IsROV,
+ llvm::hlsl::ElementType ET,
+ BufferResBinding &Binding) {
+ llvm::Module &M = CGM.getModule();
+
+ NamedMDNode *ResourceMD = nullptr;
+ switch (RC) {
+ case llvm::hlsl::ResourceClass::UAV:
+ ResourceMD = M.getOrInsertNamedMetadata("hlsl.uavs");
+ break;
+ case llvm::hlsl::ResourceClass::SRV:
+ ResourceMD = M.getOrInsertNamedMetadata("hlsl.srvs");
+ break;
+ case llvm::hlsl::ResourceClass::CBuffer:
+ ResourceMD = M.getOrInsertNamedMetadata("hlsl.cbufs");
+ break;
+ default:
+ assert(false && "Unsupported buffer type!");
+ return;
+ }
+ assert(ResourceMD != nullptr &&
+ "ResourceMD must have been set by the switch above.");
+
+ llvm::hlsl::FrontendResource Res(
+ GV, RK, ET, IsROV, Binding.Reg.value_or(UINT_MAX), Binding.Space);
+ ResourceMD->addOperand(Res.getMetadata());
+}
+
+static llvm::hlsl::ElementType
+calculateElementType(const ASTContext &Context, const clang::Type *ResourceTy) {
+ using llvm::hlsl::ElementType;
+
+ // TODO: We may need to update this when we add things like ByteAddressBuffer
+ // that don't have a template parameter (or, indeed, an element type).
+ const auto *TST = ResourceTy->getAs<TemplateSpecializationType>();
+ assert(TST && "Resource types must be template specializations");
+ ArrayRef<TemplateArgument> Args = TST->template_arguments();
+ assert(!Args.empty() && "Resource has no element type");
+
+ // At this point we have a resource with an element type, so we can assume
+ // that it's valid or we would have diagnosed the error earlier.
+ QualType ElTy = Args[0].getAsType();
+
+ // We should either have a basic type or a vector of a basic type.
+ if (const auto *VecTy = ElTy->getAs<clang::VectorType>())
+ ElTy = VecTy->getElementType();
+
+ if (ElTy->isSignedIntegerType()) {
+ switch (Context.getTypeSize(ElTy)) {
+ case 16:
+ return ElementType::I16;
+ case 32:
+ return ElementType::I32;
+ case 64:
+ return ElementType::I64;
+ }
+ } else if (ElTy->isUnsignedIntegerType()) {
+ switch (Context.getTypeSize(ElTy)) {
+ case 16:
+ return ElementType::U16;
+ case 32:
+ return ElementType::U32;
+ case 64:
+ return ElementType::U64;
+ }
+ } else if (ElTy->isSpecificBuiltinType(BuiltinType::Half))
+ return ElementType::F16;
+ else if (ElTy->isSpecificBuiltinType(BuiltinType::Float))
+ return ElementType::F32;
+ else if (ElTy->isSpecificBuiltinType(BuiltinType::Double))
+ return ElementType::F64;
+
+ // TODO: We need to handle unorm/snorm float types here once we support them
+ llvm_unreachable("Invalid element type for resource");
+}
+
+void CGHLSLRuntime::annotateHLSLResource(const VarDecl *D, GlobalVariable *GV) {
+ const Type *Ty = D->getType()->getPointeeOrArrayElementType();
+ if (!Ty)
+ return;
+ const auto *RD = Ty->getAsCXXRecordDecl();
+ if (!RD)
+ return;
+ const auto *Attr = RD->getAttr<HLSLResourceAttr>();
+ if (!Attr)
+ return;
+
+ llvm::hlsl::ResourceClass RC = Attr->getResourceClass();
+ llvm::hlsl::ResourceKind RK = Attr->getResourceKind();
+ bool IsROV = Attr->getIsROV();
+ llvm::hlsl::ElementType ET = calculateElementType(CGM.getContext(), Ty);
+
+ BufferResBinding Binding(D->getAttr<HLSLResourceBindingAttr>());
+ addBufferResourceAnnotation(GV, RC, RK, IsROV, ET, Binding);
+}
+
+CGHLSLRuntime::BufferResBinding::BufferResBinding(
+ HLSLResourceBindingAttr *Binding) {
+ if (Binding) {
+ llvm::APInt RegInt(64, 0);
+ Binding->getSlot().substr(1).getAsInteger(10, RegInt);
+ Reg = RegInt.getLimitedValue();
+ llvm::APInt SpaceInt(64, 0);
+ Binding->getSpace().substr(5).getAsInteger(10, SpaceInt);
+ Space = SpaceInt.getLimitedValue();
+ } else {
+ Space = 0;
+ }
+}
+
+void clang::CodeGen::CGHLSLRuntime::setHLSLEntryAttributes(
+ const FunctionDecl *FD, llvm::Function *Fn) {
+ const auto *ShaderAttr = FD->getAttr<HLSLShaderAttr>();
+ assert(ShaderAttr && "All entry functions must have a HLSLShaderAttr");
+ const StringRef ShaderAttrKindStr = "hlsl.shader";
+ Fn->addFnAttr(ShaderAttrKindStr,
+ ShaderAttr->ConvertShaderTypeToStr(ShaderAttr->getType()));
+ if (HLSLNumThreadsAttr *NumThreadsAttr = FD->getAttr<HLSLNumThreadsAttr>()) {
+ const StringRef NumThreadsKindStr = "hlsl.numthreads";
+ std::string NumThreadsStr =
+ formatv("{0},{1},{2}", NumThreadsAttr->getX(), NumThreadsAttr->getY(),
+ NumThreadsAttr->getZ());
+ Fn->addFnAttr(NumThreadsKindStr, NumThreadsStr);
+ }
+}
+
+static Value *buildVectorInput(IRBuilder<> &B, Function *F, llvm::Type *Ty) {
+ if (const auto *VT = dyn_cast<FixedVectorType>(Ty)) {
+ Value *Result = PoisonValue::get(Ty);
+ for (unsigned I = 0; I < VT->getNumElements(); ++I) {
+ Value *Elt = B.CreateCall(F, {B.getInt32(I)});
+ Result = B.CreateInsertElement(Result, Elt, I);
+ }
+ return Result;
+ }
+ return B.CreateCall(F, {B.getInt32(0)});
+}
+
+llvm::Value *CGHLSLRuntime::emitInputSemantic(IRBuilder<> &B,
+ const ParmVarDecl &D,
+ llvm::Type *Ty) {
+ assert(D.hasAttrs() && "Entry parameter missing annotation attribute!");
+ if (D.hasAttr<HLSLSV_GroupIndexAttr>()) {
+ llvm::Function *DxGroupIndex =
+ CGM.getIntrinsic(Intrinsic::dx_flattened_thread_id_in_group);
+ return B.CreateCall(FunctionCallee(DxGroupIndex));
+ }
+ if (D.hasAttr<HLSLSV_DispatchThreadIDAttr>()) {
+ llvm::Function *DxThreadID = CGM.getIntrinsic(Intrinsic::dx_thread_id);
+ return buildVectorInput(B, DxThreadID, Ty);
+ }
+ assert(false && "Unhandled parameter attribute");
+ return nullptr;
+}
+
+void CGHLSLRuntime::emitEntryFunction(const FunctionDecl *FD,
+ llvm::Function *Fn) {
+ llvm::Module &M = CGM.getModule();
+ llvm::LLVMContext &Ctx = M.getContext();
+ auto *EntryTy = llvm::FunctionType::get(llvm::Type::getVoidTy(Ctx), false);
+ Function *EntryFn =
+ Function::Create(EntryTy, Function::ExternalLinkage, FD->getName(), &M);
+
+ // Copy function attributes over, we have no argument or return attributes
+ // that can be valid on the real entry.
+ AttributeList NewAttrs = AttributeList::get(Ctx, AttributeList::FunctionIndex,
+ Fn->getAttributes().getFnAttrs());
+ EntryFn->setAttributes(NewAttrs);
+ setHLSLEntryAttributes(FD, EntryFn);
+
+ // Set the called function as internal linkage.
+ Fn->setLinkage(GlobalValue::InternalLinkage);
+
+ BasicBlock *BB = BasicBlock::Create(Ctx, "entry", EntryFn);
+ IRBuilder<> B(BB);
+ llvm::SmallVector<Value *> Args;
+ // FIXME: support struct parameters where semantics are on members.
+ // See: https://github.com/llvm/llvm-project/issues/57874
+ unsigned SRetOffset = 0;
+ for (const auto &Param : Fn->args()) {
+ if (Param.hasStructRetAttr()) {
+ // FIXME: support output.
+ // See: https://github.com/llvm/llvm-project/issues/57874
+ SRetOffset = 1;
+ Args.emplace_back(PoisonValue::get(Param.getType()));
+ continue;
+ }
+ const ParmVarDecl *PD = FD->getParamDecl(Param.getArgNo() - SRetOffset);
+ Args.push_back(emitInputSemantic(B, *PD, Param.getType()));
+ }
+
+ CallInst *CI = B.CreateCall(FunctionCallee(Fn), Args);
+ (void)CI;
+ // FIXME: Handle codegen for return type semantics.
+ // See: https://github.com/llvm/llvm-project/issues/57875
+ B.CreateRetVoid();
+}
+
+static void gatherFunctions(SmallVectorImpl<Function *> &Fns, llvm::Module &M,
+ bool CtorOrDtor) {
+ const auto *GV =
+ M.getNamedGlobal(CtorOrDtor ? "llvm.global_ctors" : "llvm.global_dtors");
+ if (!GV)
+ return;
+ const auto *CA = dyn_cast<ConstantArray>(GV->getInitializer());
+ if (!CA)
+ return;
+ // The global_ctor array elements are a struct [Priority, Fn *, COMDat].
+ // HLSL neither supports priorities or COMDat values, so we will check those
+ // in an assert but not handle them.
+
+ llvm::SmallVector<Function *> CtorFns;
+ for (const auto &Ctor : CA->operands()) {
+ if (isa<ConstantAggregateZero>(Ctor))
+ continue;
+ ConstantStruct *CS = cast<ConstantStruct>(Ctor);
+
+ assert(cast<ConstantInt>(CS->getOperand(0))->getValue() == 65535 &&
+ "HLSL doesn't support setting priority for global ctors.");
+ assert(isa<ConstantPointerNull>(CS->getOperand(2)) &&
+ "HLSL doesn't support COMDat for global ctors.");
+ Fns.push_back(cast<Function>(CS->getOperand(1)));
+ }
+}
+
+void CGHLSLRuntime::generateGlobalCtorDtorCalls() {
+ llvm::Module &M = CGM.getModule();
+ SmallVector<Function *> CtorFns;
+ SmallVector<Function *> DtorFns;
+ gatherFunctions(CtorFns, M, true);
+ gatherFunctions(DtorFns, M, false);
+
+ // Insert a call to the global constructor at the beginning of the entry block
+ // to externally exported functions. This is a bit of a hack, but HLSL allows
+ // global constructors, but doesn't support driver initialization of globals.
+ for (auto &F : M.functions()) {
+ if (!F.hasFnAttribute("hlsl.shader"))
+ continue;
+ IRBuilder<> B(&F.getEntryBlock(), F.getEntryBlock().begin());
+ for (auto *Fn : CtorFns)
+ B.CreateCall(FunctionCallee(Fn));
+
+ // Insert global dtors before the terminator of the last instruction
+ B.SetInsertPoint(F.back().getTerminator());
+ for (auto *Fn : DtorFns)
+ B.CreateCall(FunctionCallee(Fn));
+ }
+
+ // No need to keep global ctors/dtors for non-lib profile after call to
+ // ctors/dtors added for entry.
+ Triple T(M.getTargetTriple());
+ if (T.getEnvironment() != Triple::EnvironmentType::Library) {
+ if (auto *GV = M.getNamedGlobal("llvm.global_ctors"))
+ GV->eraseFromParent();
+ if (auto *GV = M.getNamedGlobal("llvm.global_dtors"))
+ GV->eraseFromParent();
+ }
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGHLSLRuntime.h b/contrib/llvm-project/clang/lib/CodeGen/CGHLSLRuntime.h
new file mode 100644
index 000000000000..bffefb66740a
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGHLSLRuntime.h
@@ -0,0 +1,105 @@
+//===----- CGHLSLRuntime.h - Interface to HLSL Runtimes -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides an abstract class for HLSL code generation. Concrete
+// subclasses of this implement code generation for specific HLSL
+// runtime libraries.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_CODEGEN_CGHLSLRUNTIME_H
+#define LLVM_CLANG_LIB_CODEGEN_CGHLSLRUNTIME_H
+
+#include "llvm/IR/IRBuilder.h"
+
+#include "clang/Basic/HLSLRuntime.h"
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Frontend/HLSL/HLSLResource.h"
+
+#include <optional>
+#include <vector>
+
+namespace llvm {
+class GlobalVariable;
+class Function;
+class StructType;
+} // namespace llvm
+
+namespace clang {
+class VarDecl;
+class ParmVarDecl;
+class HLSLBufferDecl;
+class HLSLResourceBindingAttr;
+class Type;
+class DeclContext;
+
+class FunctionDecl;
+
+namespace CodeGen {
+
+class CodeGenModule;
+
+class CGHLSLRuntime {
+public:
+ struct BufferResBinding {
+ // The ID like 2 in register(b2, space1).
+ std::optional<unsigned> Reg;
+ // The Space like 1 is register(b2, space1).
+ // Default value is 0.
+ unsigned Space;
+ BufferResBinding(HLSLResourceBindingAttr *Attr);
+ };
+ struct Buffer {
+ Buffer(const HLSLBufferDecl *D);
+ llvm::StringRef Name;
+ // IsCBuffer - Whether the buffer is a cbuffer (and not a tbuffer).
+ bool IsCBuffer;
+ BufferResBinding Binding;
+ // Global variable and offset for each constant.
+ std::vector<std::pair<llvm::GlobalVariable *, unsigned>> Constants;
+ llvm::StructType *LayoutStruct = nullptr;
+ };
+
+protected:
+ CodeGenModule &CGM;
+
+ llvm::Value *emitInputSemantic(llvm::IRBuilder<> &B, const ParmVarDecl &D,
+ llvm::Type *Ty);
+
+public:
+ CGHLSLRuntime(CodeGenModule &CGM) : CGM(CGM) {}
+ virtual ~CGHLSLRuntime() {}
+
+ void annotateHLSLResource(const VarDecl *D, llvm::GlobalVariable *GV);
+ void generateGlobalCtorDtorCalls();
+
+ void addBuffer(const HLSLBufferDecl *D);
+ void finishCodeGen();
+
+ void setHLSLEntryAttributes(const FunctionDecl *FD, llvm::Function *Fn);
+
+ void emitEntryFunction(const FunctionDecl *FD, llvm::Function *Fn);
+ void setHLSLFunctionAttributes(llvm::Function *, const FunctionDecl *);
+
+private:
+ void addBufferResourceAnnotation(llvm::GlobalVariable *GV,
+ llvm::hlsl::ResourceClass RC,
+ llvm::hlsl::ResourceKind RK, bool IsROV,
+ llvm::hlsl::ElementType ET,
+ BufferResBinding &Binding);
+ void addConstant(VarDecl *D, Buffer &CB);
+ void addBufferDecls(const DeclContext *DC, Buffer &CB);
+ llvm::SmallVector<Buffer> Buffers;
+};
+
+} // namespace CodeGen
+} // namespace clang
+
+#endif
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGLoopInfo.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGLoopInfo.cpp
index 12a6cd8da603..0d4800b90a2f 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGLoopInfo.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGLoopInfo.cpp
@@ -17,6 +17,7 @@
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Metadata.h"
+#include <optional>
using namespace clang::CodeGen;
using namespace llvm;
@@ -37,7 +38,7 @@ MDNode *LoopInfo::createPipeliningMetadata(const LoopAttributes &Attrs,
bool &HasUserTransforms) {
LLVMContext &Ctx = Header->getContext();
- Optional<bool> Enabled;
+ std::optional<bool> Enabled;
if (Attrs.PipelineDisabled)
Enabled = false;
else if (Attrs.PipelineInitiationInterval != 0)
@@ -82,11 +83,11 @@ LoopInfo::createPartialUnrollMetadata(const LoopAttributes &Attrs,
bool &HasUserTransforms) {
LLVMContext &Ctx = Header->getContext();
- Optional<bool> Enabled;
+ std::optional<bool> Enabled;
if (Attrs.UnrollEnable == LoopAttributes::Disable)
Enabled = false;
else if (Attrs.UnrollEnable == LoopAttributes::Full)
- Enabled = None;
+ Enabled = std::nullopt;
else if (Attrs.UnrollEnable != LoopAttributes::Unspecified ||
Attrs.UnrollCount != 0)
Enabled = true;
@@ -144,7 +145,7 @@ LoopInfo::createUnrollAndJamMetadata(const LoopAttributes &Attrs,
bool &HasUserTransforms) {
LLVMContext &Ctx = Header->getContext();
- Optional<bool> Enabled;
+ std::optional<bool> Enabled;
if (Attrs.UnrollAndJamEnable == LoopAttributes::Disable)
Enabled = false;
else if (Attrs.UnrollAndJamEnable == LoopAttributes::Enable ||
@@ -212,7 +213,7 @@ LoopInfo::createLoopVectorizeMetadata(const LoopAttributes &Attrs,
bool &HasUserTransforms) {
LLVMContext &Ctx = Header->getContext();
- Optional<bool> Enabled;
+ std::optional<bool> Enabled;
if (Attrs.VectorizeEnable == LoopAttributes::Disable)
Enabled = false;
else if (Attrs.VectorizeEnable != LoopAttributes::Unspecified ||
@@ -330,7 +331,7 @@ LoopInfo::createLoopDistributeMetadata(const LoopAttributes &Attrs,
bool &HasUserTransforms) {
LLVMContext &Ctx = Header->getContext();
- Optional<bool> Enabled;
+ std::optional<bool> Enabled;
if (Attrs.DistributeEnable == LoopAttributes::Disable)
Enabled = false;
if (Attrs.DistributeEnable == LoopAttributes::Enable)
@@ -380,7 +381,7 @@ MDNode *LoopInfo::createFullUnrollMetadata(const LoopAttributes &Attrs,
bool &HasUserTransforms) {
LLVMContext &Ctx = Header->getContext();
- Optional<bool> Enabled;
+ std::optional<bool> Enabled;
if (Attrs.UnrollEnable == LoopAttributes::Disable)
Enabled = false;
else if (Attrs.UnrollEnable == LoopAttributes::Full)
@@ -439,6 +440,14 @@ MDNode *LoopInfo::createMetadata(
Ctx, {MDString::get(Ctx, "llvm.loop.parallel_accesses"), AccGroup}));
}
+ // Setting clang::code_align attribute.
+ if (Attrs.CodeAlign > 0) {
+ Metadata *Vals[] = {MDString::get(Ctx, "llvm.loop.align"),
+ ConstantAsMetadata::get(ConstantInt::get(
+ llvm::Type::getInt32Ty(Ctx), Attrs.CodeAlign))};
+ LoopProperties.push_back(MDNode::get(Ctx, Vals));
+ }
+
LoopProperties.insert(LoopProperties.end(), AdditionalLoopProperties.begin(),
AdditionalLoopProperties.end());
return createFullUnrollMetadata(Attrs, LoopProperties, HasUserTransforms);
@@ -452,7 +461,7 @@ LoopAttributes::LoopAttributes(bool IsParallel)
VectorizeScalable(LoopAttributes::Unspecified), InterleaveCount(0),
UnrollCount(0), UnrollAndJamCount(0),
DistributeEnable(LoopAttributes::Unspecified), PipelineDisabled(false),
- PipelineInitiationInterval(0), MustProgress(false) {}
+ PipelineInitiationInterval(0), CodeAlign(0), MustProgress(false) {}
void LoopAttributes::clear() {
IsParallel = false;
@@ -468,6 +477,7 @@ void LoopAttributes::clear() {
DistributeEnable = LoopAttributes::Unspecified;
PipelineDisabled = false;
PipelineInitiationInterval = 0;
+ CodeAlign = 0;
MustProgress = false;
}
@@ -492,11 +502,11 @@ LoopInfo::LoopInfo(BasicBlock *Header, const LoopAttributes &Attrs,
Attrs.VectorizeEnable == LoopAttributes::Unspecified &&
Attrs.UnrollEnable == LoopAttributes::Unspecified &&
Attrs.UnrollAndJamEnable == LoopAttributes::Unspecified &&
- Attrs.DistributeEnable == LoopAttributes::Unspecified && !StartLoc &&
- !EndLoc && !Attrs.MustProgress)
+ Attrs.DistributeEnable == LoopAttributes::Unspecified &&
+ Attrs.CodeAlign == 0 && !StartLoc && !EndLoc && !Attrs.MustProgress)
return;
- TempLoopID = MDNode::getTemporary(Header->getContext(), None);
+ TempLoopID = MDNode::getTemporary(Header->getContext(), std::nullopt);
}
void LoopInfo::finish() {
@@ -787,6 +797,15 @@ void LoopInfoStack::push(BasicBlock *Header, clang::ASTContext &Ctx,
}
}
+ // Identify loop attribute 'code_align' from Attrs.
+ // For attribute code_align:
+ // n - 'llvm.loop.align i32 n' metadata will be emitted.
+ if (const auto *CodeAlign = getSpecificAttr<const CodeAlignAttr>(Attrs)) {
+ const auto *CE = cast<ConstantExpr>(CodeAlign->getAlignment());
+ llvm::APSInt ArgVal = CE->getResultAsAPSInt();
+ setCodeAlign(ArgVal.getSExtValue());
+ }
+
setMustProgress(MustProgress);
if (CGOpts.OptimizationLevel > 0)
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGLoopInfo.h b/contrib/llvm-project/clang/lib/CodeGen/CGLoopInfo.h
index 856e892f712e..a1c8c7e5307f 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGLoopInfo.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGLoopInfo.h
@@ -79,6 +79,9 @@ struct LoopAttributes {
/// Value for llvm.loop.pipeline.iicount metadata.
unsigned PipelineInitiationInterval;
+ /// Value for 'llvm.loop.align' metadata.
+ unsigned CodeAlign;
+
/// Value for whether the loop is required to make progress.
bool MustProgress;
};
@@ -282,6 +285,9 @@ public:
StagedAttrs.PipelineInitiationInterval = C;
}
+ /// Set value of code align for the next loop pushed.
+ void setCodeAlign(unsigned C) { StagedAttrs.CodeAlign = C; }
+
/// Set no progress for the next loop pushed.
void setMustProgress(bool P) { StagedAttrs.MustProgress = P; }
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGNonTrivialStruct.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGNonTrivialStruct.cpp
index ad505fc5a0d4..75c1d7fbea84 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGNonTrivialStruct.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGNonTrivialStruct.cpp
@@ -313,10 +313,9 @@ static const CGFunctionInfo &getFunctionInfo(CodeGenModule &CGM,
for (unsigned I = 0; I < N; ++I)
Params.push_back(ImplicitParamDecl::Create(
Ctx, nullptr, SourceLocation(), &Ctx.Idents.get(ValNameStr[I]), ParamTy,
- ImplicitParamDecl::Other));
+ ImplicitParamKind::Other));
- for (auto &P : Params)
- Args.push_back(P);
+ llvm::append_range(Args, Params);
return CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, Args);
}
@@ -324,11 +323,11 @@ static const CGFunctionInfo &getFunctionInfo(CodeGenModule &CGM,
template <size_t N, size_t... Ints>
static std::array<Address, N> getParamAddrs(std::index_sequence<Ints...> IntSeq,
std::array<CharUnits, N> Alignments,
- FunctionArgList Args,
+ const FunctionArgList &Args,
CodeGenFunction *CGF) {
- return std::array<Address, N>{{
- Address(CGF->Builder.CreateLoad(CGF->GetAddrOfLocalVar(Args[Ints])),
- Alignments[Ints])...}};
+ return std::array<Address, N>{
+ {Address(CGF->Builder.CreateLoad(CGF->GetAddrOfLocalVar(Args[Ints])),
+ CGF->VoidPtrTy, Alignments[Ints], KnownNonNull)...}};
}
// Template classes that are used as bases for classes that emit special
@@ -366,11 +365,8 @@ template <class Derived> struct GenFuncBase {
llvm::ConstantInt::get(NumElts->getType(), BaseEltSize);
llvm::Value *SizeInBytes =
CGF.Builder.CreateNUWMul(BaseEltSizeVal, NumElts);
- Address BC = CGF.Builder.CreateBitCast(DstAddr, CGF.CGM.Int8PtrTy);
- llvm::Value *DstArrayEnd =
- CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, BC.getPointer(), SizeInBytes);
- DstArrayEnd = CGF.Builder.CreateBitCast(DstArrayEnd, CGF.CGM.Int8PtrPtrTy,
- "dstarray.end");
+ llvm::Value *DstArrayEnd = CGF.Builder.CreateInBoundsGEP(
+ CGF.Int8Ty, DstAddr.getPointer(), SizeInBytes);
llvm::BasicBlock *PreheaderBB = CGF.Builder.GetInsertBlock();
// Create the header block and insert the phi instructions.
@@ -400,8 +396,9 @@ template <class Derived> struct GenFuncBase {
std::array<Address, N> NewAddrs = Addrs;
for (unsigned I = 0; I < N; ++I)
- NewAddrs[I] = Address(
- PHIs[I], StartAddrs[I].getAlignment().alignmentAtOffset(EltSize));
+ NewAddrs[I] =
+ Address(PHIs[I], CGF.Int8PtrTy,
+ StartAddrs[I].getAlignment().alignmentAtOffset(EltSize));
EltQT = IsVolatile ? EltQT.withVolatile() : EltQT;
this->asDerived().visitWithKind(FK, EltQT, nullptr, CharUnits::Zero(),
@@ -426,9 +423,9 @@ template <class Derived> struct GenFuncBase {
assert(Addr.isValid() && "invalid address");
if (Offset.getQuantity() == 0)
return Addr;
- Addr = CGF->Builder.CreateBitCast(Addr, CGF->CGM.Int8PtrTy);
+ Addr = Addr.withElementType(CGF->CGM.Int8Ty);
Addr = CGF->Builder.CreateConstInBoundsGEP(Addr, Offset.getQuantity());
- return CGF->Builder.CreateBitCast(Addr, CGF->CGM.Int8PtrPtrTy);
+ return Addr.withElementType(CGF->CGM.Int8PtrTy);
}
Address getAddrWithOffset(Address Addr, CharUnits StructFieldOffset,
@@ -491,9 +488,7 @@ template <class Derived> struct GenFuncBase {
for (unsigned I = 0; I < N; ++I) {
Alignments[I] = Addrs[I].getAlignment();
- Ptrs[I] =
- CallerCGF.Builder.CreateBitCast(Addrs[I], CallerCGF.CGM.Int8PtrPtrTy)
- .getPointer();
+ Ptrs[I] = Addrs[I].getPointer();
}
if (llvm::Function *F =
@@ -523,20 +518,19 @@ struct GenBinaryFunc : CopyStructVisitor<Derived, IsMove>,
Address SrcAddr = this->getAddrWithOffset(Addrs[SrcIdx], this->Start);
// Emit memcpy.
- if (Size.getQuantity() >= 16 || !llvm::isPowerOf2_32(Size.getQuantity())) {
+ if (Size.getQuantity() >= 16 ||
+ !llvm::has_single_bit<uint32_t>(Size.getQuantity())) {
llvm::Value *SizeVal =
llvm::ConstantInt::get(this->CGF->SizeTy, Size.getQuantity());
- DstAddr =
- this->CGF->Builder.CreateElementBitCast(DstAddr, this->CGF->Int8Ty);
- SrcAddr =
- this->CGF->Builder.CreateElementBitCast(SrcAddr, this->CGF->Int8Ty);
+ DstAddr = DstAddr.withElementType(this->CGF->Int8Ty);
+ SrcAddr = SrcAddr.withElementType(this->CGF->Int8Ty);
this->CGF->Builder.CreateMemCpy(DstAddr, SrcAddr, SizeVal, false);
} else {
llvm::Type *Ty = llvm::Type::getIntNTy(
this->CGF->getLLVMContext(),
Size.getQuantity() * this->CGF->getContext().getCharWidth());
- DstAddr = this->CGF->Builder.CreateElementBitCast(DstAddr, Ty);
- SrcAddr = this->CGF->Builder.CreateElementBitCast(SrcAddr, Ty);
+ DstAddr = DstAddr.withElementType(Ty);
+ SrcAddr = SrcAddr.withElementType(Ty);
llvm::Value *SrcVal = this->CGF->Builder.CreateLoad(SrcAddr, false);
this->CGF->Builder.CreateStore(SrcVal, DstAddr, false);
}
@@ -554,19 +548,19 @@ struct GenBinaryFunc : CopyStructVisitor<Derived, IsMove>,
return;
QualType RT = QualType(FD->getParent()->getTypeForDecl(), 0);
- llvm::PointerType *PtrTy = this->CGF->ConvertType(RT)->getPointerTo();
+ llvm::Type *Ty = this->CGF->ConvertType(RT);
Address DstAddr = this->getAddrWithOffset(Addrs[DstIdx], Offset);
- LValue DstBase = this->CGF->MakeAddrLValue(
- this->CGF->Builder.CreateBitCast(DstAddr, PtrTy), FT);
+ LValue DstBase =
+ this->CGF->MakeAddrLValue(DstAddr.withElementType(Ty), FT);
DstLV = this->CGF->EmitLValueForField(DstBase, FD);
Address SrcAddr = this->getAddrWithOffset(Addrs[SrcIdx], Offset);
- LValue SrcBase = this->CGF->MakeAddrLValue(
- this->CGF->Builder.CreateBitCast(SrcAddr, PtrTy), FT);
+ LValue SrcBase =
+ this->CGF->MakeAddrLValue(SrcAddr.withElementType(Ty), FT);
SrcLV = this->CGF->EmitLValueForField(SrcBase, FD);
} else {
- llvm::PointerType *Ty = this->CGF->ConvertTypeForMem(FT)->getPointerTo();
- Address DstAddr = this->CGF->Builder.CreateBitCast(Addrs[DstIdx], Ty);
- Address SrcAddr = this->CGF->Builder.CreateBitCast(Addrs[SrcIdx], Ty);
+ llvm::Type *Ty = this->CGF->ConvertTypeForMem(FT);
+ Address DstAddr = Addrs[DstIdx].withElementType(Ty);
+ Address SrcAddr = Addrs[SrcIdx].withElementType(Ty);
DstLV = this->CGF->MakeAddrLValue(DstAddr, FT);
SrcLV = this->CGF->MakeAddrLValue(SrcAddr, FT);
}
@@ -664,7 +658,7 @@ struct GenDefaultInitialize
llvm::Constant *SizeVal = CGF->Builder.getInt64(Size.getQuantity());
Address DstAddr = getAddrWithOffset(Addrs[DstIdx], CurStructOffset, FD);
- Address Loc = CGF->Builder.CreateElementBitCast(DstAddr, CGF->Int8Ty);
+ Address Loc = DstAddr.withElementType(CGF->Int8Ty);
CGF->Builder.CreateMemSet(Loc, CGF->Builder.getInt8(0), SizeVal,
IsVolatile);
}
@@ -816,8 +810,7 @@ void CodeGenFunction::destroyNonTrivialCStruct(CodeGenFunction &CGF,
// such structure.
void CodeGenFunction::defaultInitNonTrivialCStructVar(LValue Dst) {
GenDefaultInitialize Gen(getContext());
- Address DstPtr =
- Builder.CreateBitCast(Dst.getAddress(*this), CGM.Int8PtrPtrTy);
+ Address DstPtr = Dst.getAddress(*this).withElementType(CGM.Int8PtrTy);
Gen.setCGF(this);
QualType QT = Dst.getType();
QT = Dst.isVolatile() ? QT.withVolatile() : QT;
@@ -830,7 +823,7 @@ static void callSpecialFunction(G &&Gen, StringRef FuncName, QualType QT,
std::array<Address, N> Addrs) {
auto SetArtificialLoc = ApplyDebugLocation::CreateArtificial(CGF);
for (unsigned I = 0; I < N; ++I)
- Addrs[I] = CGF.Builder.CreateBitCast(Addrs[I], CGF.CGM.Int8PtrPtrTy);
+ Addrs[I] = Addrs[I].withElementType(CGF.CGM.Int8PtrTy);
QT = IsVolatile ? QT.withVolatile() : QT;
Gen.callFunc(FuncName, QT, Addrs, CGF);
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGObjC.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGObjC.cpp
index 937a0e8a3b69..03fc0ec7ff54 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGObjC.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGObjC.cpp
@@ -22,11 +22,14 @@
#include "clang/AST/StmtObjC.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/CodeGen/CGFunctionInfo.h"
+#include "clang/CodeGen/CodeGenABITypes.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Analysis/ObjCARCUtil.h"
#include "llvm/BinaryFormat/MachO.h"
+#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/InlineAsm.h"
+#include <optional>
using namespace clang;
using namespace CodeGen;
@@ -49,8 +52,7 @@ llvm::Value *CodeGenFunction::EmitObjCStringLiteral(const ObjCStringLiteral *E)
{
llvm::Constant *C =
CGM.getObjCRuntime().GenerateConstantString(E->getString()).getPointer();
- // FIXME: This bitcast should just be made an invariant on the Runtime.
- return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType()));
+ return C;
}
/// EmitObjCBoxedExpr - This routine generates code to call
@@ -92,8 +94,9 @@ CodeGenFunction::EmitObjCBoxedExpr(const ObjCBoxedExpr *E) {
// and cast value to correct type
Address Temporary = CreateMemTemp(SubExpr->getType());
EmitAnyExprToMem(SubExpr, Temporary, Qualifiers(), /*isInit*/ true);
- Address BitCast = Builder.CreateBitCast(Temporary, ConvertType(ArgQT));
- Args.add(RValue::get(BitCast.getPointer()), ArgQT);
+ llvm::Value *BitCast =
+ Builder.CreateBitCast(Temporary.getPointer(), ConvertType(ArgQT));
+ Args.add(RValue::get(BitCast), ArgQT);
// Create char array to store type encoding
std::string Str;
@@ -136,8 +139,8 @@ llvm::Value *CodeGenFunction::EmitObjCCollectionLiteral(const Expr *E,
LValue LV = MakeNaturalAlignAddrLValue(Constant, IdTy);
llvm::Value *Ptr = EmitLoadOfScalar(LV, E->getBeginLoc());
cast<llvm::LoadInst>(Ptr)->setMetadata(
- CGM.getModule().getMDKindID("invariant.load"),
- llvm::MDNode::get(getLLVMContext(), None));
+ llvm::LLVMContext::MD_invariant_load,
+ llvm::MDNode::get(getLLVMContext(), std::nullopt));
return Builder.CreateBitCast(Ptr, ConvertType(E->getType()));
}
@@ -145,9 +148,9 @@ llvm::Value *CodeGenFunction::EmitObjCCollectionLiteral(const Expr *E,
llvm::APInt APNumElements(Context.getTypeSize(Context.getSizeType()),
NumElements);
QualType ElementType = Context.getObjCIdType().withConst();
- QualType ElementArrayType
- = Context.getConstantArrayType(ElementType, APNumElements, nullptr,
- ArrayType::Normal, /*IndexTypeQuals=*/0);
+ QualType ElementArrayType = Context.getConstantArrayType(
+ ElementType, APNumElements, nullptr, ArraySizeModifier::Normal,
+ /*IndexTypeQuals=*/0);
// Allocate the temporary array(s).
Address Objects = CreateMemTemp(ElementArrayType, "objects");
@@ -218,6 +221,7 @@ llvm::Value *CodeGenFunction::EmitObjCCollectionLiteral(const Expr *E,
QualType ResultType = E->getType();
const ObjCObjectPointerType *InterfacePointerType
= ResultType->getAsObjCInterfacePointerType();
+ assert(InterfacePointerType && "Unexpected InterfacePointerType - null");
ObjCInterfaceDecl *Class
= InterfacePointerType->getObjectType()->getInterface();
CGObjCRuntime &Runtime = CGM.getObjCRuntime();
@@ -369,16 +373,14 @@ static const Expr *findWeakLValue(const Expr *E) {
///
/// If the runtime does support a required entrypoint, then this method will
/// generate a call and return the resulting value. Otherwise it will return
-/// None and the caller can generate a msgSend instead.
-static Optional<llvm::Value *>
-tryGenerateSpecializedMessageSend(CodeGenFunction &CGF, QualType ResultType,
- llvm::Value *Receiver,
- const CallArgList& Args, Selector Sel,
- const ObjCMethodDecl *method,
- bool isClassMessage) {
+/// std::nullopt and the caller can generate a msgSend instead.
+static std::optional<llvm::Value *> tryGenerateSpecializedMessageSend(
+ CodeGenFunction &CGF, QualType ResultType, llvm::Value *Receiver,
+ const CallArgList &Args, Selector Sel, const ObjCMethodDecl *method,
+ bool isClassMessage) {
auto &CGM = CGF.CGM;
if (!CGM.getCodeGenOpts().ObjCConvertMessagesToRuntimeCalls)
- return None;
+ return std::nullopt;
auto &Runtime = CGM.getLangOpts().ObjCRuntime;
switch (Sel.getMethodFamily()) {
@@ -399,7 +401,7 @@ tryGenerateSpecializedMessageSend(CodeGenFunction &CGF, QualType ResultType,
if (isa<llvm::ConstantPointerNull>(arg))
return CGF.EmitObjCAllocWithZone(Receiver,
CGF.ConvertType(ResultType));
- return None;
+ return std::nullopt;
}
}
break;
@@ -430,7 +432,7 @@ tryGenerateSpecializedMessageSend(CodeGenFunction &CGF, QualType ResultType,
default:
break;
}
- return None;
+ return std::nullopt;
}
CodeGen::RValue CGObjCRuntime::GeneratePossiblySpecializedMessageSend(
@@ -438,10 +440,10 @@ CodeGen::RValue CGObjCRuntime::GeneratePossiblySpecializedMessageSend(
Selector Sel, llvm::Value *Receiver, const CallArgList &Args,
const ObjCInterfaceDecl *OID, const ObjCMethodDecl *Method,
bool isClassMessage) {
- if (Optional<llvm::Value *> SpecializedResult =
+ if (std::optional<llvm::Value *> SpecializedResult =
tryGenerateSpecializedMessageSend(CGF, ResultType, Receiver, Args,
Sel, Method, isClassMessage)) {
- return RValue::get(SpecializedResult.getValue());
+ return RValue::get(*SpecializedResult);
}
return GenerateMessageSend(CGF, Return, ResultType, Sel, Receiver, Args, OID,
Method);
@@ -519,36 +521,36 @@ CGObjCRuntime::GetRuntimeProtocolList(ObjCProtocolDecl::protocol_iterator begin,
/// Instead of '[[MyClass alloc] init]', try to generate
/// 'objc_alloc_init(MyClass)'. This provides a code size improvement on the
/// caller side, as well as the optimized objc_alloc.
-static Optional<llvm::Value *>
+static std::optional<llvm::Value *>
tryEmitSpecializedAllocInit(CodeGenFunction &CGF, const ObjCMessageExpr *OME) {
auto &Runtime = CGF.getLangOpts().ObjCRuntime;
if (!Runtime.shouldUseRuntimeFunctionForCombinedAllocInit())
- return None;
+ return std::nullopt;
// Match the exact pattern '[[MyClass alloc] init]'.
Selector Sel = OME->getSelector();
if (OME->getReceiverKind() != ObjCMessageExpr::Instance ||
!OME->getType()->isObjCObjectPointerType() || !Sel.isUnarySelector() ||
Sel.getNameForSlot(0) != "init")
- return None;
+ return std::nullopt;
// Okay, this is '[receiver init]', check if 'receiver' is '[cls alloc]'
// with 'cls' a Class.
auto *SubOME =
dyn_cast<ObjCMessageExpr>(OME->getInstanceReceiver()->IgnoreParenCasts());
if (!SubOME)
- return None;
+ return std::nullopt;
Selector SubSel = SubOME->getSelector();
if (!SubOME->getType()->isObjCObjectPointerType() ||
!SubSel.isUnarySelector() || SubSel.getNameForSlot(0) != "alloc")
- return None;
+ return std::nullopt;
llvm::Value *Receiver = nullptr;
switch (SubOME->getReceiverKind()) {
case ObjCMessageExpr::Instance:
if (!SubOME->getInstanceReceiver()->getType()->isObjCClassType())
- return None;
+ return std::nullopt;
Receiver = CGF.EmitScalarExpr(SubOME->getInstanceReceiver());
break;
@@ -562,7 +564,7 @@ tryEmitSpecializedAllocInit(CodeGenFunction &CGF, const ObjCMessageExpr *OME) {
}
case ObjCMessageExpr::SuperInstance:
case ObjCMessageExpr::SuperClass:
- return None;
+ return std::nullopt;
}
return CGF.EmitObjCAllocInit(Receiver, CGF.ConvertType(OME->getType()));
@@ -589,7 +591,7 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
}
}
- if (Optional<llvm::Value *> Val = tryEmitSpecializedAllocInit(*this, E))
+ if (std::optional<llvm::Value *> Val = tryEmitSpecializedAllocInit(*this, E))
return AdjustObjCObjectType(*this, E->getType(), RValue::get(*Val));
// We don't retain the receiver in delegate init calls, and this is
@@ -767,7 +769,8 @@ void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD,
}
args.push_back(OMD->getSelfDecl());
- args.push_back(OMD->getCmdDecl());
+ if (!OMD->isDirectMethod())
+ args.push_back(OMD->getCmdDecl());
args.append(OMD->param_begin(), OMD->param_end());
@@ -816,19 +819,17 @@ static void emitStructGetterCall(CodeGenFunction &CGF, ObjCIvarDecl *ivar,
bool isAtomic, bool hasStrong) {
ASTContext &Context = CGF.getContext();
- Address src =
+ llvm::Value *src =
CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), CGF.LoadObjCSelf(), ivar, 0)
- .getAddress(CGF);
+ .getPointer(CGF);
// objc_copyStruct (ReturnValue, &structIvar,
// sizeof (Type of Ivar), isAtomic, false);
CallArgList args;
- Address dest = CGF.Builder.CreateBitCast(CGF.ReturnValue, CGF.VoidPtrTy);
- args.add(RValue::get(dest.getPointer()), Context.VoidPtrTy);
-
- src = CGF.Builder.CreateBitCast(src, CGF.VoidPtrTy);
- args.add(RValue::get(src.getPointer()), Context.VoidPtrTy);
+ llvm::Value *dest = CGF.ReturnValue.getPointer();
+ args.add(RValue::get(dest), Context.VoidPtrTy);
+ args.add(RValue::get(src), Context.VoidPtrTy);
CharUnits size = CGF.getContext().getTypeSizeInChars(ivar->getType());
args.add(RValue::get(CGF.CGM.getSize(size)), Context.getSizeType());
@@ -847,7 +848,7 @@ static void emitStructGetterCall(CodeGenFunction &CGF, ObjCIvarDecl *ivar,
static bool hasUnalignedAtomics(llvm::Triple::ArchType arch) {
// FIXME: Allow unaligned atomic load/store on x86. (It is not
// currently supported by the backend.)
- return 0;
+ return false;
}
/// Return the maximum size that permits atomic accesses for the given
@@ -1094,7 +1095,6 @@ static void emitCPPObjectAtomicGetterCall(CodeGenFunction &CGF,
llvm::Value *ivarAddr =
CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), CGF.LoadObjCSelf(), ivar, 0)
.getPointer(CGF);
- ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy);
args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy);
// Third argument is the helper function.
@@ -1108,11 +1108,47 @@ static void emitCPPObjectAtomicGetterCall(CodeGenFunction &CGF,
callee, ReturnValueSlot(), args);
}
+// emitCmdValueForGetterSetterBody - Handle emitting the load necessary for
+// the `_cmd` selector argument for getter/setter bodies. For direct methods,
+// this returns an undefined/poison value; this matches behavior prior to `_cmd`
+// being removed from the direct method ABI as the getter/setter caller would
+// never load one. For non-direct methods, this emits a load of the implicit
+// `_cmd` storage.
+static llvm::Value *emitCmdValueForGetterSetterBody(CodeGenFunction &CGF,
+ ObjCMethodDecl *MD) {
+ if (MD->isDirectMethod()) {
+ // Direct methods do not have a `_cmd` argument. Emit an undefined/poison
+ // value. This will be passed to objc_getProperty/objc_setProperty, which
+ // has not appeared bothered by the `_cmd` argument being undefined before.
+ llvm::Type *selType = CGF.ConvertType(CGF.getContext().getObjCSelType());
+ return llvm::PoisonValue::get(selType);
+ }
+
+ return CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(MD->getCmdDecl()), "cmd");
+}
+
void
CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
const ObjCPropertyImplDecl *propImpl,
const ObjCMethodDecl *GetterMethodDecl,
llvm::Constant *AtomicHelperFn) {
+
+ ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
+
+ if (ivar->getType().isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) {
+ if (!AtomicHelperFn) {
+ LValue Src =
+ EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, 0);
+ LValue Dst = MakeAddrLValue(ReturnValue, ivar->getType());
+ callCStructCopyConstructor(Dst, Src);
+ } else {
+ ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
+ emitCPPObjectAtomicGetterCall(*this, ReturnValue.getPointer(), ivar,
+ AtomicHelperFn);
+ }
+ return;
+ }
+
// If there's a non-trivial 'get' expression, we just have to emit that.
if (!hasTrivialGetExpr(propImpl)) {
if (!AtomicHelperFn) {
@@ -1133,8 +1169,6 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
QualType propType = prop->getType();
ObjCMethodDecl *getterMethod = propImpl->getGetterMethodDecl();
- ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
-
// Pick an implementation strategy.
PropertyImplStrategy strategy(CGM, propImpl);
switch (strategy.getKind()) {
@@ -1149,11 +1183,10 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
// types, so there's no point in trying to pick a prettier type.
uint64_t ivarSize = getContext().toBits(strategy.getIvarSize());
llvm::Type *bitcastType = llvm::Type::getIntNTy(getLLVMContext(), ivarSize);
- bitcastType = bitcastType->getPointerTo(); // addrspace 0 okay
// Perform an atomic load. This does not impose ordering constraints.
Address ivarAddr = LV.getAddress(*this);
- ivarAddr = Builder.CreateBitCast(ivarAddr, bitcastType);
+ ivarAddr = ivarAddr.withElementType(bitcastType);
llvm::LoadInst *load = Builder.CreateLoad(ivarAddr, "load");
load->setAtomic(llvm::AtomicOrdering::Unordered);
@@ -1164,12 +1197,10 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
uint64_t retTySize = CGM.getDataLayout().getTypeSizeInBits(retTy);
llvm::Value *ivarVal = load;
if (ivarSize > retTySize) {
- llvm::Type *newTy = llvm::Type::getIntNTy(getLLVMContext(), retTySize);
- ivarVal = Builder.CreateTrunc(load, newTy);
- bitcastType = newTy->getPointerTo();
+ bitcastType = llvm::Type::getIntNTy(getLLVMContext(), retTySize);
+ ivarVal = Builder.CreateTrunc(load, bitcastType);
}
- Builder.CreateStore(ivarVal,
- Builder.CreateBitCast(ReturnValue, bitcastType));
+ Builder.CreateStore(ivarVal, ReturnValue.withElementType(bitcastType));
// Make sure we don't do an autorelease.
AutoreleaseResult = false;
@@ -1188,11 +1219,10 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
// Return (ivar-type) objc_getProperty((id) self, _cmd, offset, true).
// FIXME: Can't this be simpler? This might even be worse than the
// corresponding gcc code.
- llvm::Value *cmd =
- Builder.CreateLoad(GetAddrOfLocalVar(getterMethod->getCmdDecl()), "cmd");
+ llvm::Value *cmd = emitCmdValueForGetterSetterBody(*this, getterMethod);
llvm::Value *self = Builder.CreateBitCast(LoadObjCSelf(), VoidPtrTy);
llvm::Value *ivarOffset =
- EmitIvarOffset(classImpl->getClassInterface(), ivar);
+ EmitIvarOffsetAsPointerDiff(classImpl->getClassInterface(), ivar);
CallArgList args;
args.add(RValue::get(self), getContext().getObjCIdType());
@@ -1306,7 +1336,6 @@ static void emitStructSetterCall(CodeGenFunction &CGF, ObjCMethodDecl *OMD,
argVar->getType().getNonReferenceType(), VK_LValue,
SourceLocation());
llvm::Value *argAddr = CGF.EmitLValue(&argRef).getPointer(CGF);
- argAddr = CGF.Builder.CreateBitCast(argAddr, CGF.Int8PtrTy);
args.add(RValue::get(argAddr), CGF.getContext().VoidPtrTy);
// The third argument is the sizeof the type.
@@ -1343,7 +1372,6 @@ static void emitCPPObjectAtomicSetterCall(CodeGenFunction &CGF,
llvm::Value *ivarAddr =
CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), CGF.LoadObjCSelf(), ivar, 0)
.getPointer(CGF);
- ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy);
args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy);
// The second argument is the address of the parameter variable.
@@ -1352,7 +1380,6 @@ static void emitCPPObjectAtomicSetterCall(CodeGenFunction &CGF,
argVar->getType().getNonReferenceType(), VK_LValue,
SourceLocation());
llvm::Value *argAddr = CGF.EmitLValue(&argRef).getPointer(CGF);
- argAddr = CGF.Builder.CreateBitCast(argAddr, CGF.Int8PtrTy);
args.add(RValue::get(argAddr), CGF.getContext().VoidPtrTy);
// Third argument is the helper function.
@@ -1404,6 +1431,24 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
ObjCMethodDecl *setterMethod = propImpl->getSetterMethodDecl();
+ if (ivar->getType().isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) {
+ ParmVarDecl *PVD = *setterMethod->param_begin();
+ if (!AtomicHelperFn) {
+ // Call the move assignment operator instead of calling the copy
+ // assignment operator and destructor.
+ LValue Dst = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar,
+ /*quals*/ 0);
+ LValue Src = MakeAddrLValue(GetAddrOfLocalVar(PVD), ivar->getType());
+ callCStructMoveAssignmentOperator(Dst, Src);
+ } else {
+ // If atomic, assignment is called via a locking api.
+ emitCPPObjectAtomicSetterCall(*this, setterMethod, ivar, AtomicHelperFn);
+ }
+ // Decativate the destructor for the setter parameter.
+ DeactivateCleanupBlock(CalleeDestructedParamCleanups[PVD], AllocaInsertPt);
+ return;
+ }
+
// Just use the setter expression if Sema gave us one and it's
// non-trivial.
if (!hasTrivialSetExpr(propImpl)) {
@@ -1432,15 +1477,13 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
// Currently, all atomic accesses have to be through integer
// types, so there's no point in trying to pick a prettier type.
- llvm::Type *bitcastType =
- llvm::Type::getIntNTy(getLLVMContext(),
- getContext().toBits(strategy.getIvarSize()));
+ llvm::Type *castType = llvm::Type::getIntNTy(
+ getLLVMContext(), getContext().toBits(strategy.getIvarSize()));
// Cast both arguments to the chosen operation type.
- argAddr = Builder.CreateElementBitCast(argAddr, bitcastType);
- ivarAddr = Builder.CreateElementBitCast(ivarAddr, bitcastType);
+ argAddr = argAddr.withElementType(castType);
+ ivarAddr = ivarAddr.withElementType(castType);
- // This bitcast load is likely to cause some nasty IR.
llvm::Value *load = Builder.CreateLoad(argAddr);
// Perform an atomic store. There are no memory ordering requirements.
@@ -1474,12 +1517,11 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
// Emit objc_setProperty((id) self, _cmd, offset, arg,
// <is-atomic>, <is-copy>).
- llvm::Value *cmd =
- Builder.CreateLoad(GetAddrOfLocalVar(setterMethod->getCmdDecl()));
+ llvm::Value *cmd = emitCmdValueForGetterSetterBody(*this, setterMethod);
llvm::Value *self =
Builder.CreateBitCast(LoadObjCSelf(), VoidPtrTy);
llvm::Value *ivarOffset =
- EmitIvarOffset(classImpl->getClassInterface(), ivar);
+ EmitIvarOffsetAsPointerDiff(classImpl->getClassInterface(), ivar);
Address argAddr = GetAddrOfLocalVar(*setterMethod->param_begin());
llvm::Value *arg = Builder.CreateLoad(argAddr, "arg");
arg = Builder.CreateBitCast(arg, VoidPtrTy);
@@ -1555,6 +1597,12 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
argCK = CK_AnyPointerToBlockPointerCast;
} else if (ivarRef.getType()->isPointerType()) {
argCK = CK_BitCast;
+ } else if (argLoad.getType()->isAtomicType() &&
+ !ivarRef.getType()->isAtomicType()) {
+ argCK = CK_AtomicToNonAtomic;
+ } else if (!argLoad.getType()->isAtomicType() &&
+ ivarRef.getType()->isAtomicType()) {
+ argCK = CK_NonAtomicToAtomic;
}
ImplicitCastExpr argCast(ImplicitCastExpr::OnStack, ivarRef.getType(), argCK,
&argLoad, VK_PRValue, FPOptionsOverride());
@@ -1743,12 +1791,11 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
&CGM.getContext().Idents.get("count")
};
Selector FastEnumSel =
- CGM.getContext().Selectors.getSelector(llvm::array_lengthof(II), &II[0]);
+ CGM.getContext().Selectors.getSelector(std::size(II), &II[0]);
- QualType ItemsTy =
- getContext().getConstantArrayType(getContext().getObjCIdType(),
- llvm::APInt(32, NumItems), nullptr,
- ArrayType::Normal, 0);
+ QualType ItemsTy = getContext().getConstantArrayType(
+ getContext().getObjCIdType(), llvm::APInt(32, NumItems), nullptr,
+ ArraySizeModifier::Normal, 0);
Address ItemsPtr = CreateMemTemp(ItemsTy, "items.ptr");
// Emit the collection pointer. In ARC, we do a retain.
@@ -1905,8 +1952,7 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
// Fetch the value at the current index from the buffer.
llvm::Value *CurrentItemPtr = Builder.CreateGEP(
- EnumStateItems->getType()->getPointerElementType(), EnumStateItems, index,
- "currentitem.ptr");
+ ObjCIdType, EnumStateItems, index, "currentitem.ptr");
llvm::Value *CurrentItem =
Builder.CreateAlignedLoad(ObjCIdType, CurrentItemPtr, getPointerAlign());
@@ -2108,6 +2154,13 @@ static void setARCRuntimeFunctionLinkage(CodeGenModule &CGM,
setARCRuntimeFunctionLinkage(CGM, RTF.getCallee());
}
+static llvm::Function *getARCIntrinsic(llvm::Intrinsic::ID IntID,
+ CodeGenModule &CGM) {
+ llvm::Function *fn = CGM.getIntrinsic(IntID);
+ setARCRuntimeFunctionLinkage(CGM, fn);
+ return fn;
+}
+
/// Perform an operation having the signature
/// i8* (i8*)
/// where a null input causes a no-op and returns null.
@@ -2118,10 +2171,8 @@ static llvm::Value *emitARCValueOperation(
if (isa<llvm::ConstantPointerNull>(value))
return value;
- if (!fn) {
- fn = CGF.CGM.getIntrinsic(IntID);
- setARCRuntimeFunctionLinkage(CGF.CGM, fn);
- }
+ if (!fn)
+ fn = getARCIntrinsic(IntID, CGF.CGM);
// Cast the argument to 'id'.
llvm::Type *origType = returnType ? returnType : value->getType();
@@ -2140,23 +2191,10 @@ static llvm::Value *emitARCValueOperation(
static llvm::Value *emitARCLoadOperation(CodeGenFunction &CGF, Address addr,
llvm::Function *&fn,
llvm::Intrinsic::ID IntID) {
- if (!fn) {
- fn = CGF.CGM.getIntrinsic(IntID);
- setARCRuntimeFunctionLinkage(CGF.CGM, fn);
- }
-
- // Cast the argument to 'id*'.
- llvm::Type *origType = addr.getElementType();
- addr = CGF.Builder.CreateBitCast(addr, CGF.Int8PtrPtrTy);
-
- // Call the function.
- llvm::Value *result = CGF.EmitNounwindRuntimeCall(fn, addr.getPointer());
-
- // Cast the result back to a dereference of the original type.
- if (origType != CGF.Int8PtrTy)
- result = CGF.Builder.CreateBitCast(result, origType);
+ if (!fn)
+ fn = getARCIntrinsic(IntID, CGF.CGM);
- return result;
+ return CGF.EmitNounwindRuntimeCall(fn, addr.getPointer());
}
/// Perform an operation having the following signature:
@@ -2168,10 +2206,8 @@ static llvm::Value *emitARCStoreOperation(CodeGenFunction &CGF, Address addr,
bool ignored) {
assert(addr.getElementType() == value->getType());
- if (!fn) {
- fn = CGF.CGM.getIntrinsic(IntID);
- setARCRuntimeFunctionLinkage(CGF.CGM, fn);
- }
+ if (!fn)
+ fn = getARCIntrinsic(IntID, CGF.CGM);
llvm::Type *origType = value->getType();
@@ -2193,10 +2229,8 @@ static void emitARCCopyOperation(CodeGenFunction &CGF, Address dst, Address src,
llvm::Intrinsic::ID IntID) {
assert(dst.getType() == src.getType());
- if (!fn) {
- fn = CGF.CGM.getIntrinsic(IntID);
- setARCRuntimeFunctionLinkage(CGF.CGM, fn);
- }
+ if (!fn)
+ fn = getARCIntrinsic(IntID, CGF.CGM);
llvm::Value *args[] = {
CGF.Builder.CreateBitCast(dst.getPointer(), CGF.Int8PtrPtrTy),
@@ -2286,7 +2320,7 @@ llvm::Value *CodeGenFunction::EmitARCRetainBlock(llvm::Value *value,
CGM.getObjCEntrypoints().objc_retainBlock);
call->setMetadata("clang.arc.copy_on_escape",
- llvm::MDNode::get(Builder.getContext(), None));
+ llvm::MDNode::get(Builder.getContext(), std::nullopt));
}
return result;
@@ -2328,7 +2362,8 @@ static void emitAutoreleasedReturnValueMarker(CodeGenFunction &CGF) {
// Call the marker asm if we made one, which we do only at -O0.
if (marker)
- CGF.Builder.CreateCall(marker, None, CGF.getBundlesForFunclet(marker));
+ CGF.Builder.CreateCall(marker, std::nullopt,
+ CGF.getBundlesForFunclet(marker));
}
static llvm::Value *emitOptimizedARCReturnCall(llvm::Value *value,
@@ -2340,13 +2375,22 @@ static llvm::Value *emitOptimizedARCReturnCall(llvm::Value *value,
// retainRV or claimRV calls in the IR. We currently do this only when the
// optimization level isn't -O0 since global-isel, which is currently run at
// -O0, doesn't know about the operand bundle.
+ ObjCEntrypoints &EPs = CGF.CGM.getObjCEntrypoints();
+ llvm::Function *&EP = IsRetainRV
+ ? EPs.objc_retainAutoreleasedReturnValue
+ : EPs.objc_unsafeClaimAutoreleasedReturnValue;
+ llvm::Intrinsic::ID IID =
+ IsRetainRV ? llvm::Intrinsic::objc_retainAutoreleasedReturnValue
+ : llvm::Intrinsic::objc_unsafeClaimAutoreleasedReturnValue;
+ EP = getARCIntrinsic(IID, CGF.CGM);
+
+ llvm::Triple::ArchType Arch = CGF.CGM.getTriple().getArch();
- // FIXME: Do this when the target isn't aarch64.
+ // FIXME: Do this on all targets and at -O0 too. This can be enabled only if
+ // the target backend knows how to handle the operand bundle.
if (CGF.CGM.getCodeGenOpts().OptimizationLevel > 0 &&
- CGF.CGM.getTarget().getTriple().isAArch64()) {
- llvm::Value *bundleArgs[] = {llvm::ConstantInt::get(
- CGF.Int64Ty,
- llvm::objcarc::getAttachedCallOperandBundleEnum(IsRetainRV))};
+ (Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::x86_64)) {
+ llvm::Value *bundleArgs[] = {EP};
llvm::OperandBundleDef OB("clang.arc.attachedcall", bundleArgs);
auto *oldCall = cast<llvm::CallBase>(value);
llvm::CallBase *newCall = llvm::CallBase::addOperandBundle(
@@ -2362,13 +2406,6 @@ static llvm::Value *emitOptimizedARCReturnCall(llvm::Value *value,
CGF.CGM.getTargetCodeGenInfo().markARCOptimizedReturnCallsAsNoTail();
llvm::CallInst::TailCallKind tailKind =
isNoTail ? llvm::CallInst::TCK_NoTail : llvm::CallInst::TCK_None;
- ObjCEntrypoints &EPs = CGF.CGM.getObjCEntrypoints();
- llvm::Function *&EP = IsRetainRV
- ? EPs.objc_retainAutoreleasedReturnValue
- : EPs.objc_unsafeClaimAutoreleasedReturnValue;
- llvm::Intrinsic::ID IID =
- IsRetainRV ? llvm::Intrinsic::objc_retainAutoreleasedReturnValue
- : llvm::Intrinsic::objc_unsafeClaimAutoreleasedReturnValue;
return emitARCValueOperation(CGF, value, nullptr, EP, IID, tailKind);
}
@@ -2401,10 +2438,8 @@ void CodeGenFunction::EmitARCRelease(llvm::Value *value,
if (isa<llvm::ConstantPointerNull>(value)) return;
llvm::Function *&fn = CGM.getObjCEntrypoints().objc_release;
- if (!fn) {
- fn = CGM.getIntrinsic(llvm::Intrinsic::objc_release);
- setARCRuntimeFunctionLinkage(CGM, fn);
- }
+ if (!fn)
+ fn = getARCIntrinsic(llvm::Intrinsic::objc_release, CGM);
// Cast the argument to 'id'.
value = Builder.CreateBitCast(value, Int8PtrTy);
@@ -2414,7 +2449,7 @@ void CodeGenFunction::EmitARCRelease(llvm::Value *value,
if (precise == ARCImpreciseLifetime) {
call->setMetadata("clang.imprecise_release",
- llvm::MDNode::get(Builder.getContext(), None));
+ llvm::MDNode::get(Builder.getContext(), std::nullopt));
}
}
@@ -2447,10 +2482,8 @@ llvm::Value *CodeGenFunction::EmitARCStoreStrongCall(Address addr,
assert(addr.getElementType() == value->getType());
llvm::Function *&fn = CGM.getObjCEntrypoints().objc_storeStrong;
- if (!fn) {
- fn = CGM.getIntrinsic(llvm::Intrinsic::objc_storeStrong);
- setARCRuntimeFunctionLinkage(CGM, fn);
- }
+ if (!fn)
+ fn = getARCIntrinsic(llvm::Intrinsic::objc_storeStrong, CGM);
llvm::Value *args[] = {
Builder.CreateBitCast(addr.getPointer(), Int8PtrPtrTy),
@@ -2603,13 +2636,8 @@ void CodeGenFunction::EmitARCInitWeak(Address addr, llvm::Value *value) {
/// Essentially objc_storeWeak(addr, nil).
void CodeGenFunction::EmitARCDestroyWeak(Address addr) {
llvm::Function *&fn = CGM.getObjCEntrypoints().objc_destroyWeak;
- if (!fn) {
- fn = CGM.getIntrinsic(llvm::Intrinsic::objc_destroyWeak);
- setARCRuntimeFunctionLinkage(CGM, fn);
- }
-
- // Cast the argument to 'id*'.
- addr = Builder.CreateBitCast(addr, Int8PtrPtrTy);
+ if (!fn)
+ fn = getARCIntrinsic(llvm::Intrinsic::objc_destroyWeak, CGM);
EmitNounwindRuntimeCall(fn, addr.getPointer());
}
@@ -2651,10 +2679,8 @@ void CodeGenFunction::emitARCMoveAssignWeak(QualType Ty, Address DstAddr,
/// call i8* \@objc_autoreleasePoolPush(void)
llvm::Value *CodeGenFunction::EmitObjCAutoreleasePoolPush() {
llvm::Function *&fn = CGM.getObjCEntrypoints().objc_autoreleasePoolPush;
- if (!fn) {
- fn = CGM.getIntrinsic(llvm::Intrinsic::objc_autoreleasePoolPush);
- setARCRuntimeFunctionLinkage(CGM, fn);
- }
+ if (!fn)
+ fn = getARCIntrinsic(llvm::Intrinsic::objc_autoreleasePoolPush, CGM);
return EmitNounwindRuntimeCall(fn);
}
@@ -2679,10 +2705,8 @@ void CodeGenFunction::EmitObjCAutoreleasePoolPop(llvm::Value *value) {
EmitRuntimeCallOrInvoke(fn, value);
} else {
llvm::FunctionCallee &fn = CGM.getObjCEntrypoints().objc_autoreleasePoolPop;
- if (!fn) {
- fn = CGM.getIntrinsic(llvm::Intrinsic::objc_autoreleasePoolPop);
- setARCRuntimeFunctionLinkage(CGM, fn);
- }
+ if (!fn)
+ fn = getARCIntrinsic(llvm::Intrinsic::objc_autoreleasePoolPop, CGM);
EmitRuntimeCall(fn, value);
}
@@ -2820,7 +2844,7 @@ void CodeGenFunction::EmitObjCRelease(llvm::Value *value,
if (precise == ARCImpreciseLifetime) {
call->setMetadata("clang.imprecise_release",
- llvm::MDNode::get(Builder.getContext(), None));
+ llvm::MDNode::get(Builder.getContext(), std::nullopt));
}
}
@@ -3344,7 +3368,8 @@ struct ARCRetainExprEmitter :
TryEmitResult result = visitExpr(e);
// Avoid the block-retain if this is a block literal that doesn't need to be
// copied to the heap.
- if (e->getBlockDecl()->canAvoidCopyToHeap())
+ if (CGF.CGM.getCodeGenOpts().ObjCAvoidHeapifyLocalBlocks &&
+ e->getBlockDecl()->canAvoidCopyToHeap())
result.setInt(true);
return result;
}
@@ -3653,7 +3678,6 @@ void CodeGenFunction::EmitExtendGCLifetime(llvm::Value *object) {
/* constraints */ "r",
/* side effects */ true);
- object = Builder.CreateBitCast(object, VoidPtrTy);
EmitNounwindRuntimeCall(extender, object);
}
@@ -3664,15 +3688,27 @@ void CodeGenFunction::EmitExtendGCLifetime(llvm::Value *object) {
llvm::Constant *
CodeGenFunction::GenerateObjCAtomicSetterCopyHelperFunction(
const ObjCPropertyImplDecl *PID) {
+ const ObjCPropertyDecl *PD = PID->getPropertyDecl();
+ if ((!(PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_atomic)))
+ return nullptr;
+
+ QualType Ty = PID->getPropertyIvarDecl()->getType();
+ ASTContext &C = getContext();
+
+ if (Ty.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) {
+ // Call the move assignment operator instead of calling the copy assignment
+ // operator and destructor.
+ CharUnits Alignment = C.getTypeAlignInChars(Ty);
+ llvm::Constant *Fn = getNonTrivialCStructMoveAssignmentOperator(
+ CGM, Alignment, Alignment, Ty.isVolatileQualified(), Ty);
+ return Fn;
+ }
+
if (!getLangOpts().CPlusPlus ||
!getLangOpts().ObjCRuntime.hasAtomicCopyHelper())
return nullptr;
- QualType Ty = PID->getPropertyIvarDecl()->getType();
if (!Ty->isRecordType())
return nullptr;
- const ObjCPropertyDecl *PD = PID->getPropertyDecl();
- if ((!(PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_atomic)))
- return nullptr;
llvm::Constant *HelperFn = nullptr;
if (hasTrivialSetExpr(PID))
return nullptr;
@@ -3680,7 +3716,6 @@ CodeGenFunction::GenerateObjCAtomicSetterCopyHelperFunction(
if ((HelperFn = CGM.getAtomicSetterHelperFnMap(Ty)))
return HelperFn;
- ASTContext &C = getContext();
IdentifierInfo *II
= &CGM.getContext().Idents.get("__assign_helper_atomic_property_");
@@ -3697,7 +3732,7 @@ CodeGenFunction::GenerateObjCAtomicSetterCopyHelperFunction(
FunctionDecl *FD = FunctionDecl::Create(
C, C.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II,
- FunctionTy, nullptr, SC_Static, false, false);
+ FunctionTy, nullptr, SC_Static, false, false, false);
FunctionArgList args;
ParmVarDecl *Params[2];
@@ -3746,23 +3781,32 @@ CodeGenFunction::GenerateObjCAtomicSetterCopyHelperFunction(
EmitStmt(TheCall);
FinishFunction();
- HelperFn = llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy);
+ HelperFn = Fn;
CGM.setAtomicSetterHelperFnMap(Ty, HelperFn);
return HelperFn;
}
-llvm::Constant *
-CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction(
- const ObjCPropertyImplDecl *PID) {
+llvm::Constant *CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction(
+ const ObjCPropertyImplDecl *PID) {
+ const ObjCPropertyDecl *PD = PID->getPropertyDecl();
+ if ((!(PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_atomic)))
+ return nullptr;
+
+ QualType Ty = PD->getType();
+ ASTContext &C = getContext();
+
+ if (Ty.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) {
+ CharUnits Alignment = C.getTypeAlignInChars(Ty);
+ llvm::Constant *Fn = getNonTrivialCStructCopyConstructor(
+ CGM, Alignment, Alignment, Ty.isVolatileQualified(), Ty);
+ return Fn;
+ }
+
if (!getLangOpts().CPlusPlus ||
!getLangOpts().ObjCRuntime.hasAtomicCopyHelper())
return nullptr;
- const ObjCPropertyDecl *PD = PID->getPropertyDecl();
- QualType Ty = PD->getType();
if (!Ty->isRecordType())
return nullptr;
- if ((!(PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_atomic)))
- return nullptr;
llvm::Constant *HelperFn = nullptr;
if (hasTrivialGetExpr(PID))
return nullptr;
@@ -3770,7 +3814,6 @@ CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction(
if ((HelperFn = CGM.getAtomicGetterHelperFnMap(Ty)))
return HelperFn;
- ASTContext &C = getContext();
IdentifierInfo *II =
&CGM.getContext().Idents.get("__copy_helper_atomic_property_");
@@ -3787,7 +3830,7 @@ CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction(
FunctionDecl *FD = FunctionDecl::Create(
C, C.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II,
- FunctionTy, nullptr, SC_Static, false, false);
+ FunctionTy, nullptr, SC_Static, false, false, false);
FunctionArgList args;
ParmVarDecl *Params[2];
@@ -3847,18 +3890,17 @@ CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction(
SourceLocation());
RValue DV = EmitAnyExpr(&DstExpr);
- CharUnits Alignment
- = getContext().getTypeAlignInChars(TheCXXConstructExpr->getType());
+ CharUnits Alignment =
+ getContext().getTypeAlignInChars(TheCXXConstructExpr->getType());
EmitAggExpr(TheCXXConstructExpr,
- AggValueSlot::forAddr(Address(DV.getScalarVal(), Alignment),
- Qualifiers(),
- AggValueSlot::IsDestructed,
- AggValueSlot::DoesNotNeedGCBarriers,
- AggValueSlot::IsNotAliased,
- AggValueSlot::DoesNotOverlap));
+ AggValueSlot::forAddr(
+ Address(DV.getScalarVal(), ConvertTypeForMem(Ty), Alignment),
+ Qualifiers(), AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased, AggValueSlot::DoesNotOverlap));
FinishFunction();
- HelperFn = llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy);
+ HelperFn = Fn;
CGM.setAtomicGetterHelperFnMap(Ty, HelperFn);
return HelperFn;
}
@@ -3899,8 +3941,12 @@ static unsigned getBaseMachOPlatformID(const llvm::Triple &TT) {
return llvm::MachO::PLATFORM_TVOS;
case llvm::Triple::WatchOS:
return llvm::MachO::PLATFORM_WATCHOS;
+ case llvm::Triple::XROS:
+ return llvm::MachO::PLATFORM_XROS;
+ case llvm::Triple::DriverKit:
+ return llvm::MachO::PLATFORM_DRIVERKIT;
default:
- return /*Unknown platform*/ 0;
+ return llvm::MachO::PLATFORM_UNKNOWN;
}
}
@@ -3913,12 +3959,13 @@ static llvm::Value *emitIsPlatformVersionAtLeast(CodeGenFunction &CGF,
llvm::SmallVector<llvm::Value *, 8> Args;
auto EmitArgs = [&](const VersionTuple &Version, const llvm::Triple &TT) {
- Optional<unsigned> Min = Version.getMinor(), SMin = Version.getSubminor();
+ std::optional<unsigned> Min = Version.getMinor(),
+ SMin = Version.getSubminor();
Args.push_back(
llvm::ConstantInt::get(CGM.Int32Ty, getBaseMachOPlatformID(TT)));
Args.push_back(llvm::ConstantInt::get(CGM.Int32Ty, Version.getMajor()));
- Args.push_back(llvm::ConstantInt::get(CGM.Int32Ty, Min ? *Min : 0));
- Args.push_back(llvm::ConstantInt::get(CGM.Int32Ty, SMin ? *SMin : 0));
+ Args.push_back(llvm::ConstantInt::get(CGM.Int32Ty, Min.value_or(0)));
+ Args.push_back(llvm::ConstantInt::get(CGM.Int32Ty, SMin.value_or(0)));
};
assert(!Version.empty() && "unexpected empty version");
@@ -3951,12 +3998,12 @@ CodeGenFunction::EmitBuiltinAvailable(const VersionTuple &Version) {
CGM.CreateRuntimeFunction(FTy, "__isOSVersionAtLeast");
}
- Optional<unsigned> Min = Version.getMinor(), SMin = Version.getSubminor();
+ std::optional<unsigned> Min = Version.getMinor(),
+ SMin = Version.getSubminor();
llvm::Value *Args[] = {
llvm::ConstantInt::get(CGM.Int32Ty, Version.getMajor()),
- llvm::ConstantInt::get(CGM.Int32Ty, Min ? *Min : 0),
- llvm::ConstantInt::get(CGM.Int32Ty, SMin ? *SMin : 0),
- };
+ llvm::ConstantInt::get(CGM.Int32Ty, Min.value_or(0)),
+ llvm::ConstantInt::get(CGM.Int32Ty, SMin.value_or(0))};
llvm::Value *CallRes =
EmitNounwindRuntimeCall(CGM.IsOSVersionAtLeastFn, Args);
@@ -3979,6 +4026,12 @@ static bool isFoundationNeededForDarwinAvailabilityCheck(
case llvm::Triple::MacOSX:
FoundationDroppedInVersion = VersionTuple(/*Major=*/10, /*Minor=*/15);
break;
+ case llvm::Triple::XROS:
+ // XROS doesn't need Foundation.
+ return false;
+ case llvm::Triple::DriverKit:
+ // DriverKit doesn't need Foundation.
+ return false;
default:
llvm_unreachable("Unexpected OS");
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGObjCGNU.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGObjCGNU.cpp
index 3f361f4e7931..a36b0cdddaf0 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGObjCGNU.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGObjCGNU.cpp
@@ -18,6 +18,8 @@
#include "CGObjCRuntime.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
+#include "CodeGenTypes.h"
+#include "SanitizerMetadata.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
@@ -46,17 +48,13 @@ namespace {
/// types and the function declaration into a module if they're not used, and
/// avoids constructing the type more than once if it's used more than once.
class LazyRuntimeFunction {
- CodeGenModule *CGM;
- llvm::FunctionType *FTy;
- const char *FunctionName;
- llvm::FunctionCallee Function;
+ CodeGenModule *CGM = nullptr;
+ llvm::FunctionType *FTy = nullptr;
+ const char *FunctionName = nullptr;
+ llvm::FunctionCallee Function = nullptr;
public:
- /// Constructor leaves this class uninitialized, because it is intended to
- /// be used as a field in another class and not all of the types that are
- /// used as arguments will necessarily be available at construction time.
- LazyRuntimeFunction()
- : CGM(nullptr), FunctionName(nullptr), Function(nullptr) {}
+ LazyRuntimeFunction() = default;
/// Initialises the lazy function with the name, return type, and the types
/// of the arguments.
@@ -71,7 +69,7 @@ public:
FTy = llvm::FunctionType::get(RetTy, ArgTys, false);
}
else {
- FTy = llvm::FunctionType::get(RetTy, None, false);
+ FTy = llvm::FunctionType::get(RetTy, std::nullopt, false);
}
}
@@ -107,6 +105,8 @@ protected:
/// SEL is included in a header somewhere, in which case it will be whatever
/// type is declared in that header, most likely {i8*, i8*}.
llvm::PointerType *SelectorTy;
+ /// Element type of SelectorTy.
+ llvm::Type *SelectorElemTy;
/// LLVM i8 type. Cached here to avoid repeatedly getting it in all of the
/// places where it's used
llvm::IntegerType *Int8Ty;
@@ -128,6 +128,8 @@ protected:
/// but if the runtime header declaring it is included then it may be a
/// pointer to a structure.
llvm::PointerType *IdTy;
+ /// Element type of IdTy.
+ llvm::Type *IdElemTy;
/// Pointer to a pointer to an Objective-C object. Used in the new ABI
/// message lookup function and some GC-related functions.
llvm::PointerType *PtrToIdTy;
@@ -168,6 +170,8 @@ protected:
/// Does the current target use SEH-based exceptions? False implies
/// Itanium-style DWARF unwinding.
bool usesSEHExceptions;
+ /// Does the current target uses C++-based exceptions?
+ bool usesCxxExceptions;
/// Helper to check if we are targeting a specific runtime version or later.
bool isRuntime(ObjCRuntime::Kind kind, unsigned major, unsigned minor=0) {
@@ -313,12 +317,9 @@ protected:
/// Ensures that the value has the required type, by inserting a bitcast if
/// required. This function lets us avoid inserting bitcasts that are
/// redundant.
- llvm::Value* EnforceType(CGBuilderTy &B, llvm::Value *V, llvm::Type *Ty) {
- if (V->getType() == Ty) return V;
- return B.CreateBitCast(V, Ty);
- }
- Address EnforceType(CGBuilderTy &B, Address V, llvm::Type *Ty) {
- if (V.getType() == Ty) return V;
+ llvm::Value *EnforceType(CGBuilderTy &B, llvm::Value *V, llvm::Type *Ty) {
+ if (V->getType() == Ty)
+ return V;
return B.CreateBitCast(V, Ty);
}
@@ -598,6 +599,10 @@ public:
llvm::Function *GenerateMethod(const ObjCMethodDecl *OMD,
const ObjCContainerDecl *CD) override;
+
+ // Map to unify direct method definitions.
+ llvm::DenseMap<const ObjCMethodDecl *, llvm::Function *>
+ DirectMethodDefinitions;
void GenerateDirectMethodPrologue(CodeGenFunction &CGF, llvm::Function *Fn,
const ObjCMethodDecl *OMD,
const ObjCContainerDecl *CD) override;
@@ -700,8 +705,8 @@ protected:
llvm::Value *LookupIMPSuper(CodeGenFunction &CGF, Address ObjCSuper,
llvm::Value *cmd, MessageSendInfo &MSI) override {
CGBuilderTy &Builder = CGF.Builder;
- llvm::Value *lookupArgs[] = {EnforceType(Builder, ObjCSuper,
- PtrToObjCSuperTy).getPointer(), cmd};
+ llvm::Value *lookupArgs[] = {
+ EnforceType(Builder, ObjCSuper.getPointer(), PtrToObjCSuperTy), cmd};
return CGF.EmitNounwindRuntimeCall(MsgLookupSuperFn, lookupArgs);
}
@@ -822,12 +827,18 @@ class CGObjCGNUstep : public CGObjCGNU {
SlotLookupSuperFn.init(&CGM, "objc_slot_lookup_super", SlotTy,
PtrToObjCSuperTy, SelectorTy);
// If we're in ObjC++ mode, then we want to make
- if (usesSEHExceptions) {
- llvm::Type *VoidTy = llvm::Type::getVoidTy(VMContext);
- // void objc_exception_rethrow(void)
- ExceptionReThrowFn.init(&CGM, "objc_exception_rethrow", VoidTy);
+ llvm::Type *VoidTy = llvm::Type::getVoidTy(VMContext);
+ if (usesCxxExceptions) {
+ // void *__cxa_begin_catch(void *e)
+ EnterCatchFn.init(&CGM, "__cxa_begin_catch", PtrTy, PtrTy);
+ // void __cxa_end_catch(void)
+ ExitCatchFn.init(&CGM, "__cxa_end_catch", VoidTy);
+ // void objc_exception_rethrow(void*)
+ ExceptionReThrowFn.init(&CGM, "__cxa_rethrow", PtrTy);
+ } else if (usesSEHExceptions) {
+ // void objc_exception_rethrow(void)
+ ExceptionReThrowFn.init(&CGM, "objc_exception_rethrow", VoidTy);
} else if (CGM.getLangOpts().CPlusPlus) {
- llvm::Type *VoidTy = llvm::Type::getVoidTy(VMContext);
// void *__cxa_begin_catch(void *e)
EnterCatchFn.init(&CGM, "__cxa_begin_catch", PtrTy, PtrTy);
// void __cxa_end_catch(void)
@@ -836,7 +847,6 @@ class CGObjCGNUstep : public CGObjCGNU {
ExceptionReThrowFn.init(&CGM, "_Unwind_Resume_or_Rethrow", VoidTy,
PtrTy);
} else if (R.getVersion() >= VersionTuple(1, 7)) {
- llvm::Type *VoidTy = llvm::Type::getVoidTy(VMContext);
// id objc_begin_catch(void *e)
EnterCatchFn.init(&CGM, "objc_begin_catch", IdTy, PtrTy);
// void objc_end_catch(void)
@@ -844,7 +854,6 @@ class CGObjCGNUstep : public CGObjCGNU {
// void _Unwind_Resume_or_Rethrow(void*)
ExceptionReThrowFn.init(&CGM, "objc_exception_rethrow", VoidTy, PtrTy);
}
- llvm::Type *VoidTy = llvm::Type::getVoidTy(VMContext);
SetPropertyAtomic.init(&CGM, "objc_setProperty_atomic", VoidTy, IdTy,
SelectorTy, IdTy, PtrDiffTy);
SetPropertyAtomicCopy.init(&CGM, "objc_setProperty_atomic_copy", VoidTy,
@@ -914,6 +923,14 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
ClassAliasSection,
ConstantStringSection
};
+ /// The subset of `objc_class_flags` used at compile time.
+ enum ClassFlags {
+ /// This is a metaclass
+ ClassFlagMeta = (1 << 0),
+ /// This class has been initialised by the runtime (+initialize has been
+ /// sent if necessary).
+ ClassFlagInitialized = (1 << 8),
+ };
static const char *const SectionsBaseNames[8];
static const char *const PECOFFSectionsBaseNames[8];
template<SectionKind K>
@@ -929,6 +946,8 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
/// structure describing the receiver and the class, and a selector as
/// arguments. Returns the IMP for the corresponding method.
LazyRuntimeFunction MsgLookupSuperFn;
+ /// Function to ensure that +initialize is sent to a class.
+ LazyRuntimeFunction SentInitializeFn;
/// A flag indicating if we've emitted at least one protocol.
/// If we haven't, then we need to emit an empty protocol, to ensure that the
/// __start__objc_protocols and __stop__objc_protocols sections exist.
@@ -978,13 +997,13 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
// Look for an existing one
llvm::StringMap<llvm::Constant*>::iterator old = ObjCStrings.find(Str);
if (old != ObjCStrings.end())
- return ConstantAddress(old->getValue(), Align);
+ return ConstantAddress(old->getValue(), IdElemTy, Align);
bool isNonASCII = SL->containsNonAscii();
auto LiteralLength = SL->getLength();
- if ((CGM.getTarget().getPointerWidth(0) == 64) &&
+ if ((CGM.getTarget().getPointerWidth(LangAS::Default) == 64) &&
(LiteralLength < 9) && !isNonASCII) {
// Tiny strings are only used on 64-bit platforms. They store 8 7-bit
// ASCII characters in the high 56 bits, followed by a 4-bit length and a
@@ -1000,7 +1019,7 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
auto *ObjCStr = llvm::ConstantExpr::getIntToPtr(
llvm::ConstantInt::get(Int64Ty, str), IdTy);
ObjCStrings[Str] = ObjCStr;
- return ConstantAddress(ObjCStr, Align);
+ return ConstantAddress(ObjCStr, IdElemTy, Align);
}
StringRef StringClass = CGM.getLangOpts().ObjCConstantStringClass;
@@ -1017,8 +1036,7 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
if (CGM.getTriple().isOSBinFormatCOFF()) {
cast<llvm::GlobalValue>(isa)->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
}
- } else if (isa->getType() != PtrToIdTy)
- isa = llvm::ConstantExpr::getBitCast(isa, PtrToIdTy);
+ }
// struct
// {
@@ -1063,7 +1081,7 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
// Hash. Not currently initialised by the compiler.
Fields.addInt(Int32Ty, 0);
// pointer to the data string.
- auto Arr = llvm::makeArrayRef(&ToBuf[0], ToPtr+1);
+ auto Arr = llvm::ArrayRef(&ToBuf[0], ToPtr + 1);
auto *C = llvm::ConstantDataArray::get(VMContext, Arr);
auto *Buffer = new llvm::GlobalVariable(TheModule, C->getType(),
/*isConstant=*/true, llvm::GlobalValue::PrivateLinkage, C, ".str");
@@ -1111,10 +1129,9 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
std::pair<llvm::GlobalVariable*, int> v{ObjCStrGV, 0};
EarlyInitList.emplace_back(Sym, v);
}
- llvm::Constant *ObjCStr = llvm::ConstantExpr::getBitCast(ObjCStrGV, IdTy);
- ObjCStrings[Str] = ObjCStr;
- ConstantStrings.push_back(ObjCStr);
- return ConstantAddress(ObjCStr, Align);
+ ObjCStrings[Str] = ObjCStrGV;
+ ConstantStrings.push_back(ObjCStrGV);
+ return ConstantAddress(ObjCStrGV, IdElemTy, Align);
}
void PushProperty(ConstantArrayBuilder &PropertiesArray,
@@ -1196,9 +1213,7 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
ReferencedProtocols.end());
SmallVector<llvm::Constant *, 16> Protocols;
for (const auto *PI : RuntimeProtocols)
- Protocols.push_back(
- llvm::ConstantExpr::getBitCast(GenerateProtocolRef(PI),
- ProtocolPtrTy));
+ Protocols.push_back(GenerateProtocolRef(PI));
return GenerateProtocolList(Protocols);
}
@@ -1206,8 +1221,10 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
llvm::Value *cmd, MessageSendInfo &MSI) override {
// Don't access the slot unless we're trying to cache the result.
CGBuilderTy &Builder = CGF.Builder;
- llvm::Value *lookupArgs[] = {CGObjCGNU::EnforceType(Builder, ObjCSuper,
- PtrToObjCSuperTy).getPointer(), cmd};
+ llvm::Value *lookupArgs[] = {CGObjCGNU::EnforceType(Builder,
+ ObjCSuper.getPointer(),
+ PtrToObjCSuperTy),
+ cmd};
return CGF.EmitNounwindRuntimeCall(MsgLookupSuperFn, lookupArgs);
}
@@ -1261,8 +1278,8 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
llvm::Value *GetClassNamed(CodeGenFunction &CGF,
const std::string &Name,
bool isWeak) override {
- return CGF.Builder.CreateLoad(Address(GetClassVar(Name, isWeak),
- CGM.getPointerAlign()));
+ return CGF.Builder.CreateLoad(
+ Address(GetClassVar(Name, isWeak), IdTy, CGM.getPointerAlign()));
}
int32_t FlagsForOwnership(Qualifiers::ObjCLifetime Ownership) {
// typedef enum {
@@ -1306,7 +1323,7 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
llvm::GlobalValue::ExternalLinkage, nullptr, Name);
GV->setAlignment(CGM.getPointerAlign().getAsAlign());
}
- return llvm::ConstantExpr::getBitCast(GV, ProtocolPtrTy);
+ return GV;
}
/// Existing protocol references.
@@ -1323,9 +1340,9 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
std::string RefName = SymbolForProtocolRef(Name);
assert(!TheModule.getGlobalVariable(RefName));
// Emit a reference symbol.
- auto GV = new llvm::GlobalVariable(TheModule, ProtocolPtrTy,
- false, llvm::GlobalValue::LinkOnceODRLinkage,
- llvm::ConstantExpr::getBitCast(Protocol, ProtocolPtrTy), RefName);
+ auto GV = new llvm::GlobalVariable(TheModule, ProtocolPtrTy, false,
+ llvm::GlobalValue::LinkOnceODRLinkage,
+ Protocol, RefName);
GV->setComdat(TheModule.getOrInsertComdat(RefName));
GV->setSection(sectionName<ProtocolReferenceSection>());
GV->setAlignment(CGM.getPointerAlign().getAsAlign());
@@ -1382,9 +1399,7 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
auto RuntimeProtocols =
GetRuntimeProtocolList(PD->protocol_begin(), PD->protocol_end());
for (const auto *PI : RuntimeProtocols)
- Protocols.push_back(
- llvm::ConstantExpr::getBitCast(GenerateProtocolRef(PI),
- ProtocolPtrTy));
+ Protocols.push_back(GenerateProtocolRef(PI));
llvm::Constant *ProtocolList = GenerateProtocolList(Protocols);
// Collect information about methods
@@ -1421,29 +1436,35 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
GV->setSection(sectionName<ProtocolSection>());
GV->setComdat(TheModule.getOrInsertComdat(SymName));
if (OldGV) {
- OldGV->replaceAllUsesWith(llvm::ConstantExpr::getBitCast(GV,
- OldGV->getType()));
+ OldGV->replaceAllUsesWith(GV);
OldGV->removeFromParent();
GV->setName(SymName);
}
Protocol = GV;
return GV;
}
- llvm::Constant *EnforceType(llvm::Constant *Val, llvm::Type *Ty) {
- if (Val->getType() == Ty)
- return Val;
- return llvm::ConstantExpr::getBitCast(Val, Ty);
- }
llvm::Value *GetTypedSelector(CodeGenFunction &CGF, Selector Sel,
const std::string &TypeEncoding) override {
return GetConstantSelector(Sel, TypeEncoding);
}
+ std::string GetSymbolNameForTypeEncoding(const std::string &TypeEncoding) {
+ std::string MangledTypes = std::string(TypeEncoding);
+ // @ is used as a special character in ELF symbol names (used for symbol
+ // versioning), so mangle the name to not include it. Replace it with a
+ // character that is not a valid type encoding character (and, being
+ // non-printable, never will be!)
+ if (CGM.getTriple().isOSBinFormatELF())
+ std::replace(MangledTypes.begin(), MangledTypes.end(), '@', '\1');
+ // = in dll exported names causes lld to fail when linking on Windows.
+ if (CGM.getTriple().isOSWindows())
+ std::replace(MangledTypes.begin(), MangledTypes.end(), '=', '\2');
+ return MangledTypes;
+ }
llvm::Constant *GetTypeString(llvm::StringRef TypeEncoding) {
if (TypeEncoding.empty())
return NULLPtr;
- std::string MangledTypes = std::string(TypeEncoding);
- std::replace(MangledTypes.begin(), MangledTypes.end(),
- '@', '\1');
+ std::string MangledTypes =
+ GetSymbolNameForTypeEncoding(std::string(TypeEncoding));
std::string TypesVarName = ".objc_sel_types_" + MangledTypes;
auto *TypesGlobal = TheModule.getGlobalVariable(TypesVarName);
if (!TypesGlobal) {
@@ -1460,17 +1481,11 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
}
llvm::Constant *GetConstantSelector(Selector Sel,
const std::string &TypeEncoding) override {
- // @ is used as a special character in symbol names (used for symbol
- // versioning), so mangle the name to not include it. Replace it with a
- // character that is not a valid type encoding character (and, being
- // non-printable, never will be!)
- std::string MangledTypes = TypeEncoding;
- std::replace(MangledTypes.begin(), MangledTypes.end(),
- '@', '\1');
+ std::string MangledTypes = GetSymbolNameForTypeEncoding(TypeEncoding);
auto SelVarName = (StringRef(".objc_selector_") + Sel.getAsString() + "_" +
MangledTypes).str();
if (auto *GV = TheModule.getNamedGlobal(SelVarName))
- return EnforceType(GV, SelectorTy);
+ return GV;
ConstantInitBuilder builder(CGM);
auto SelBuilder = builder.beginStruct();
SelBuilder.add(ExportUniqueString(Sel.getAsString(), ".objc_sel_name_",
@@ -1481,8 +1496,7 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
GV->setComdat(TheModule.getOrInsertComdat(SelVarName));
GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
GV->setSection(sectionName<SelectorSection>());
- auto *SelVal = EnforceType(GV, SelectorTy);
- return SelVal;
+ return GV;
}
llvm::StructType *emptyStruct = nullptr;
@@ -1679,9 +1693,7 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
const ObjCIvarDecl *Ivar) override {
std::string TypeEncoding;
CGM.getContext().getObjCEncodingForType(Ivar->getType(), TypeEncoding);
- // Prevent the @ from being interpreted as a symbol version.
- std::replace(TypeEncoding.begin(), TypeEncoding.end(),
- '@', '\1');
+ TypeEncoding = GetSymbolNameForTypeEncoding(TypeEncoding);
const std::string Name = "__objc_ivar_offset_" + ID->getNameAsString()
+ '.' + Ivar->getNameAsString() + '.' + TypeEncoding;
return Name;
@@ -1723,7 +1735,7 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
metaclassFields.addInt(LongTy, 0);
// unsigned long info;
// objc_class_flag_meta
- metaclassFields.addInt(LongTy, 1);
+ metaclassFields.addInt(LongTy, ClassFlags::ClassFlagMeta);
// long instance_size;
// Setting this to zero is consistent with the older ABI, but it might be
// more sensible to set this to sizeof(struct objc_class)
@@ -1739,9 +1751,8 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
SmallVector<ObjCMethodDecl*, 16> ClassMethods;
ClassMethods.insert(ClassMethods.begin(), OID->classmeth_begin(),
OID->classmeth_end());
- metaclassFields.addBitCast(
- GenerateMethodList(className, "", ClassMethods, true),
- PtrTy);
+ metaclassFields.add(
+ GenerateMethodList(className, "", ClassMethods, true));
}
// void *dtable;
metaclassFields.addNullPointer(PtrTy);
@@ -1792,7 +1803,7 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
}
}
if (!IsCOFF)
- classFields.add(llvm::ConstantExpr::getBitCast(SuperClass, PtrTy));
+ classFields.add(SuperClass);
else
classFields.addNullPointer(PtrTy);
} else
@@ -1866,6 +1877,8 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
llvm::GlobalValue::HiddenVisibility :
llvm::GlobalValue::DefaultVisibility;
OffsetVar->setVisibility(ivarVisibility);
+ if (ivarVisibility != llvm::GlobalValue::HiddenVisibility)
+ CGM.setGVProperties(OffsetVar, OID->getClassInterface());
ivarBuilder.add(OffsetVar);
// Ivar size
ivarBuilder.addInt(Int32Ty,
@@ -1908,9 +1921,9 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
if (InstanceMethods.size() == 0)
classFields.addNullPointer(PtrTy);
else
- classFields.addBitCast(
- GenerateMethodList(className, "", InstanceMethods, false),
- PtrTy);
+ classFields.add(
+ GenerateMethodList(className, "", InstanceMethods, false));
+
// void *dtable;
classFields.addNullPointer(PtrTy);
// IMP cxx_construct;
@@ -1926,9 +1939,8 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
classDecl->protocol_end());
SmallVector<llvm::Constant *, 16> Protocols;
for (const auto *I : RuntimeProtocols)
- Protocols.push_back(
- llvm::ConstantExpr::getBitCast(GenerateProtocolRef(I),
- ProtocolPtrTy));
+ Protocols.push_back(GenerateProtocolRef(I));
+
if (Protocols.empty())
classFields.addNullPointer(PtrTy);
else
@@ -1946,7 +1958,7 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
auto *classRefSymbol = GetClassVar(className);
classRefSymbol->setSection(sectionName<ClassReferenceSection>());
- classRefSymbol->setInitializer(llvm::ConstantExpr::getBitCast(classStruct, IdTy));
+ classRefSymbol->setInitializer(classStruct);
if (IsCOFF) {
// we can't import a class struct.
@@ -1967,22 +1979,19 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
// Resolve the class aliases, if they exist.
// FIXME: Class pointer aliases shouldn't exist!
if (ClassPtrAlias) {
- ClassPtrAlias->replaceAllUsesWith(
- llvm::ConstantExpr::getBitCast(classStruct, IdTy));
+ ClassPtrAlias->replaceAllUsesWith(classStruct);
ClassPtrAlias->eraseFromParent();
ClassPtrAlias = nullptr;
}
if (auto Placeholder =
TheModule.getNamedGlobal(SymbolForClass(className)))
if (Placeholder != classStruct) {
- Placeholder->replaceAllUsesWith(
- llvm::ConstantExpr::getBitCast(classStruct, Placeholder->getType()));
+ Placeholder->replaceAllUsesWith(classStruct);
Placeholder->eraseFromParent();
classStruct->setName(SymbolForClass(className));
}
if (MetaClassPtrAlias) {
- MetaClassPtrAlias->replaceAllUsesWith(
- llvm::ConstantExpr::getBitCast(metaclass, IdTy));
+ MetaClassPtrAlias->replaceAllUsesWith(metaclass);
MetaClassPtrAlias->eraseFromParent();
MetaClassPtrAlias = nullptr;
}
@@ -2000,6 +2009,8 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
CGObjCGNUstep2(CodeGenModule &Mod) : CGObjCGNUstep(Mod, 10, 4, 2) {
MsgLookupSuperFn.init(&CGM, "objc_msg_lookup_super", IMPTy,
PtrToObjCSuperTy, SelectorTy);
+ SentInitializeFn.init(&CGM, "objc_send_initialize",
+ llvm::Type::getVoidTy(VMContext), IdTy);
// struct objc_property
// {
// const char *name;
@@ -2013,6 +2024,106 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
{ PtrToInt8Ty, PtrToInt8Ty, PtrToInt8Ty, PtrToInt8Ty, PtrToInt8Ty });
}
+ void GenerateDirectMethodPrologue(CodeGenFunction &CGF, llvm::Function *Fn,
+ const ObjCMethodDecl *OMD,
+ const ObjCContainerDecl *CD) override {
+ auto &Builder = CGF.Builder;
+ bool ReceiverCanBeNull = true;
+ auto selfAddr = CGF.GetAddrOfLocalVar(OMD->getSelfDecl());
+ auto selfValue = Builder.CreateLoad(selfAddr);
+
+ // Generate:
+ //
+ // /* unless the receiver is never NULL */
+ // if (self == nil) {
+ // return (ReturnType){ };
+ // }
+ //
+ // /* for class methods only to force class lazy initialization */
+ // if (!__objc_{class}_initialized)
+ // {
+ // objc_send_initialize(class);
+ // __objc_{class}_initialized = 1;
+ // }
+ //
+ // _cmd = @selector(...)
+ // ...
+
+ if (OMD->isClassMethod()) {
+ const ObjCInterfaceDecl *OID = cast<ObjCInterfaceDecl>(CD);
+
+ // Nullable `Class` expressions cannot be messaged with a direct method
+ // so the only reason why the receive can be null would be because
+ // of weak linking.
+ ReceiverCanBeNull = isWeakLinkedClass(OID);
+ }
+
+ llvm::MDBuilder MDHelper(CGM.getLLVMContext());
+ if (ReceiverCanBeNull) {
+ llvm::BasicBlock *SelfIsNilBlock =
+ CGF.createBasicBlock("objc_direct_method.self_is_nil");
+ llvm::BasicBlock *ContBlock =
+ CGF.createBasicBlock("objc_direct_method.cont");
+
+ // if (self == nil) {
+ auto selfTy = cast<llvm::PointerType>(selfValue->getType());
+ auto Zero = llvm::ConstantPointerNull::get(selfTy);
+
+ Builder.CreateCondBr(Builder.CreateICmpEQ(selfValue, Zero),
+ SelfIsNilBlock, ContBlock,
+ MDHelper.createBranchWeights(1, 1 << 20));
+
+ CGF.EmitBlock(SelfIsNilBlock);
+
+ // return (ReturnType){ };
+ auto retTy = OMD->getReturnType();
+ Builder.SetInsertPoint(SelfIsNilBlock);
+ if (!retTy->isVoidType()) {
+ CGF.EmitNullInitialization(CGF.ReturnValue, retTy);
+ }
+ CGF.EmitBranchThroughCleanup(CGF.ReturnBlock);
+ // }
+
+ // rest of the body
+ CGF.EmitBlock(ContBlock);
+ Builder.SetInsertPoint(ContBlock);
+ }
+
+ if (OMD->isClassMethod()) {
+ // Prefix of the class type.
+ auto *classStart =
+ llvm::StructType::get(PtrTy, PtrTy, PtrTy, LongTy, LongTy);
+ auto &astContext = CGM.getContext();
+ auto flags = Builder.CreateLoad(
+ Address{Builder.CreateStructGEP(classStart, selfValue, 4), LongTy,
+ CharUnits::fromQuantity(
+ astContext.getTypeAlign(astContext.UnsignedLongTy))});
+ auto isInitialized =
+ Builder.CreateAnd(flags, ClassFlags::ClassFlagInitialized);
+ llvm::BasicBlock *notInitializedBlock =
+ CGF.createBasicBlock("objc_direct_method.class_uninitialized");
+ llvm::BasicBlock *initializedBlock =
+ CGF.createBasicBlock("objc_direct_method.class_initialized");
+ Builder.CreateCondBr(Builder.CreateICmpEQ(isInitialized, Zeros[0]),
+ notInitializedBlock, initializedBlock,
+ MDHelper.createBranchWeights(1, 1 << 20));
+ CGF.EmitBlock(notInitializedBlock);
+ Builder.SetInsertPoint(notInitializedBlock);
+ CGF.EmitRuntimeCall(SentInitializeFn, selfValue);
+ Builder.CreateBr(initializedBlock);
+ CGF.EmitBlock(initializedBlock);
+ Builder.SetInsertPoint(initializedBlock);
+ }
+
+ // only synthesize _cmd if it's referenced
+ if (OMD->getCmdDecl()->isUsed()) {
+ // `_cmd` is not a parameter to direct methods, so storage must be
+ // explicitly declared for it.
+ CGF.EmitVarDecl(*OMD->getCmdDecl());
+ Builder.CreateStore(GetSelector(CGF, OMD),
+ CGF.GetAddrOfLocalVar(OMD->getCmdDecl()));
+ }
+ }
};
const char *const CGObjCGNUstep2::SectionsBaseNames[8] =
@@ -2143,6 +2254,9 @@ CGObjCGNU::CGObjCGNU(CodeGenModule &cgm, unsigned runtimeABIVersion,
msgSendMDKind = VMContext.getMDKindID("GNUObjCMessageSend");
usesSEHExceptions =
cgm.getContext().getTargetInfo().getTriple().isWindowsMSVCEnvironment();
+ usesCxxExceptions =
+ cgm.getContext().getTargetInfo().getTriple().isOSCygMing() &&
+ isRuntime(ObjCRuntime::GNUstep, 2);
CodeGenTypes &Types = CGM.getTypes();
IntTy = cast<llvm::IntegerType>(
@@ -2168,8 +2282,10 @@ CGObjCGNU::CGObjCGNU(CodeGenModule &cgm, unsigned runtimeABIVersion,
QualType selTy = CGM.getContext().getObjCSelType();
if (QualType() == selTy) {
SelectorTy = PtrToInt8Ty;
+ SelectorElemTy = Int8Ty;
} else {
SelectorTy = cast<llvm::PointerType>(CGM.getTypes().ConvertType(selTy));
+ SelectorElemTy = CGM.getTypes().ConvertTypeForMem(selTy->getPointeeType());
}
PtrToIntTy = llvm::PointerType::getUnqual(IntTy);
@@ -2187,8 +2303,11 @@ CGObjCGNU::CGObjCGNU(CodeGenModule &cgm, unsigned runtimeABIVersion,
if (UnqualIdTy != QualType()) {
ASTIdTy = CGM.getContext().getCanonicalType(UnqualIdTy);
IdTy = cast<llvm::PointerType>(CGM.getTypes().ConvertType(ASTIdTy));
+ IdElemTy = CGM.getTypes().ConvertTypeForMem(
+ ASTIdTy.getTypePtr()->getPointeeType());
} else {
IdTy = PtrToInt8Ty;
+ IdElemTy = Int8Ty;
}
PtrToIdTy = llvm::PointerType::getUnqual(IdTy);
ProtocolTy = llvm::StructType::get(IdTy,
@@ -2224,7 +2343,10 @@ CGObjCGNU::CGObjCGNU(CodeGenModule &cgm, unsigned runtimeABIVersion,
// void objc_exception_throw(id);
ExceptionThrowFn.init(&CGM, "objc_exception_throw", VoidTy, IdTy);
- ExceptionReThrowFn.init(&CGM, "objc_exception_throw", VoidTy, IdTy);
+ ExceptionReThrowFn.init(&CGM,
+ usesCxxExceptions ? "objc_exception_rethrow"
+ : "objc_exception_throw",
+ VoidTy, IdTy);
// int objc_sync_enter(id);
SyncEnterFn.init(&CGM, "objc_sync_enter", IntTy, IdTy);
// int objc_sync_exit(id);
@@ -2345,9 +2467,10 @@ llvm::Value *CGObjCGNU::GetTypedSelector(CodeGenFunction &CGF, Selector Sel,
}
}
if (!SelValue) {
- SelValue = llvm::GlobalAlias::create(
- SelectorTy->getElementType(), 0, llvm::GlobalValue::PrivateLinkage,
- ".objc_selector_" + Sel.getAsString(), &TheModule);
+ SelValue = llvm::GlobalAlias::create(SelectorElemTy, 0,
+ llvm::GlobalValue::PrivateLinkage,
+ ".objc_selector_" + Sel.getAsString(),
+ &TheModule);
Types.emplace_back(TypeEncoding, SelValue);
}
@@ -2400,7 +2523,7 @@ llvm::Constant *CGObjCGNUstep::GetEHType(QualType T) {
if (usesSEHExceptions)
return CGM.getCXXABI().getAddrOfRTTIDescriptor(T);
- if (!CGM.getLangOpts().CPlusPlus)
+ if (!CGM.getLangOpts().CPlusPlus && !usesCxxExceptions)
return CGObjCGNU::GetEHType(T);
// For Objective-C++, we want to provide the ability to catch both C++ and
@@ -2417,7 +2540,7 @@ llvm::Constant *CGObjCGNUstep::GetEHType(QualType T) {
false,
llvm::GlobalValue::ExternalLinkage,
nullptr, "__objc_id_type_info");
- return llvm::ConstantExpr::getBitCast(IDEHType, PtrToInt8Ty);
+ return IDEHType;
}
const ObjCObjectPointerType *PT =
@@ -2431,9 +2554,8 @@ llvm::Constant *CGObjCGNUstep::GetEHType(QualType T) {
std::string typeinfoName = "__objc_eh_typeinfo_" + className;
// Return the existing typeinfo if it exists
- llvm::Constant *typeinfo = TheModule.getGlobalVariable(typeinfoName);
- if (typeinfo)
- return llvm::ConstantExpr::getBitCast(typeinfo, PtrToInt8Ty);
+ if (llvm::Constant *typeinfo = TheModule.getGlobalVariable(typeinfoName))
+ return typeinfo;
// Otherwise create it.
@@ -2448,9 +2570,8 @@ llvm::Constant *CGObjCGNUstep::GetEHType(QualType T) {
nullptr, vtableName);
}
llvm::Constant *Two = llvm::ConstantInt::get(IntTy, 2);
- auto *BVtable = llvm::ConstantExpr::getBitCast(
- llvm::ConstantExpr::getGetElementPtr(Vtable->getValueType(), Vtable, Two),
- PtrToInt8Ty);
+ auto *BVtable =
+ llvm::ConstantExpr::getGetElementPtr(Vtable->getValueType(), Vtable, Two);
llvm::Constant *typeName =
ExportUniqueString(className, "__objc_eh_typename_");
@@ -2464,7 +2585,7 @@ llvm::Constant *CGObjCGNUstep::GetEHType(QualType T) {
CGM.getPointerAlign(),
/*constant*/ false,
llvm::GlobalValue::LinkOnceODRLinkage);
- return llvm::ConstantExpr::getBitCast(TI, PtrToInt8Ty);
+ return TI;
}
/// Generate an NSConstantString object.
@@ -2476,7 +2597,7 @@ ConstantAddress CGObjCGNU::GenerateConstantString(const StringLiteral *SL) {
// Look for an existing one
llvm::StringMap<llvm::Constant*>::iterator old = ObjCStrings.find(Str);
if (old != ObjCStrings.end())
- return ConstantAddress(old->getValue(), Align);
+ return ConstantAddress(old->getValue(), Int8Ty, Align);
StringRef StringClass = CGM.getLangOpts().ObjCConstantStringClass;
@@ -2488,22 +2609,19 @@ ConstantAddress CGObjCGNU::GenerateConstantString(const StringLiteral *SL) {
llvm::Constant *isa = TheModule.getNamedGlobal(Sym);
if (!isa)
- isa = new llvm::GlobalVariable(TheModule, IdTy, /* isConstant */false,
- llvm::GlobalValue::ExternalWeakLinkage, nullptr, Sym);
- else if (isa->getType() != PtrToIdTy)
- isa = llvm::ConstantExpr::getBitCast(isa, PtrToIdTy);
+ isa = new llvm::GlobalVariable(TheModule, IdTy, /* isConstant */ false,
+ llvm::GlobalValue::ExternalWeakLinkage,
+ nullptr, Sym);
ConstantInitBuilder Builder(CGM);
auto Fields = Builder.beginStruct();
Fields.add(isa);
Fields.add(MakeConstantString(Str));
Fields.addInt(IntTy, Str.size());
- llvm::Constant *ObjCStr =
- Fields.finishAndCreateGlobal(".objc_str", Align);
- ObjCStr = llvm::ConstantExpr::getBitCast(ObjCStr, PtrToInt8Ty);
+ llvm::Constant *ObjCStr = Fields.finishAndCreateGlobal(".objc_str", Align);
ObjCStrings[Str] = ObjCStr;
ConstantStrings.push_back(ObjCStr);
- return ConstantAddress(ObjCStr, Align);
+ return ConstantAddress(ObjCStr, Int8Ty, Align);
}
///Generates a message send where the super is the receiver. This is a message
@@ -2574,14 +2692,14 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGenFunction &CGF,
if (IsClassMessage) {
if (!MetaClassPtrAlias) {
MetaClassPtrAlias = llvm::GlobalAlias::create(
- IdTy->getElementType(), 0, llvm::GlobalValue::InternalLinkage,
+ IdElemTy, 0, llvm::GlobalValue::InternalLinkage,
".objc_metaclass_ref" + Class->getNameAsString(), &TheModule);
}
ReceiverClass = MetaClassPtrAlias;
} else {
if (!ClassPtrAlias) {
ClassPtrAlias = llvm::GlobalAlias::create(
- IdTy->getElementType(), 0, llvm::GlobalValue::InternalLinkage,
+ IdElemTy, 0, llvm::GlobalValue::InternalLinkage,
".objc_class_ref" + Class->getNameAsString(), &TheModule);
}
ReceiverClass = ClassPtrAlias;
@@ -2607,8 +2725,6 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGenFunction &CGF,
Builder.CreateStore(Receiver, Builder.CreateStructGEP(ObjCSuper, 0));
Builder.CreateStore(ReceiverClass, Builder.CreateStructGEP(ObjCSuper, 1));
- ObjCSuper = EnforceType(Builder, ObjCSuper, PtrToObjCSuperTy);
-
// Get the IMP
llvm::Value *imp = LookupIMPSuper(CGF, ObjCSuper, cmd, MSI);
imp = EnforceType(Builder, imp, MSI.MessengerType);
@@ -2651,42 +2767,18 @@ CGObjCGNU::GenerateMessageSend(CodeGenFunction &CGF,
}
}
- // If the return type is something that goes in an integer register, the
- // runtime will handle 0 returns. For other cases, we fill in the 0 value
- // ourselves.
- //
- // The language spec says the result of this kind of message send is
- // undefined, but lots of people seem to have forgotten to read that
- // paragraph and insist on sending messages to nil that have structure
- // returns. With GCC, this generates a random return value (whatever happens
- // to be on the stack / in those registers at the time) on most platforms,
- // and generates an illegal instruction trap on SPARC. With LLVM it corrupts
- // the stack.
- bool isPointerSizedReturn = (ResultType->isAnyPointerType() ||
- ResultType->isIntegralOrEnumerationType() || ResultType->isVoidType());
-
- llvm::BasicBlock *startBB = nullptr;
- llvm::BasicBlock *messageBB = nullptr;
- llvm::BasicBlock *continueBB = nullptr;
-
- if (!isPointerSizedReturn) {
- startBB = Builder.GetInsertBlock();
- messageBB = CGF.createBasicBlock("msgSend");
- continueBB = CGF.createBasicBlock("continue");
-
- llvm::Value *isNil = Builder.CreateICmpEQ(Receiver,
- llvm::Constant::getNullValue(Receiver->getType()));
- Builder.CreateCondBr(isNil, continueBB, messageBB);
- CGF.EmitBlock(messageBB);
- }
+ bool isDirect = Method && Method->isDirectMethod();
IdTy = cast<llvm::PointerType>(CGM.getTypes().ConvertType(ASTIdTy));
llvm::Value *cmd;
- if (Method)
- cmd = GetSelector(CGF, Method);
- else
- cmd = GetSelector(CGF, Sel);
- cmd = EnforceType(Builder, cmd, SelectorTy);
+ if (!isDirect) {
+ if (Method)
+ cmd = GetSelector(CGF, Method);
+ else
+ cmd = GetSelector(CGF, Sel);
+ cmd = EnforceType(Builder, cmd, SelectorTy);
+ }
+
Receiver = EnforceType(Builder, Receiver, IdTy);
llvm::Metadata *impMD[] = {
@@ -2698,18 +2790,114 @@ CGObjCGNU::GenerateMessageSend(CodeGenFunction &CGF,
CallArgList ActualArgs;
ActualArgs.add(RValue::get(Receiver), ASTIdTy);
- ActualArgs.add(RValue::get(cmd), CGF.getContext().getObjCSelType());
+ if (!isDirect)
+ ActualArgs.add(RValue::get(cmd), CGF.getContext().getObjCSelType());
ActualArgs.addFrom(CallArgs);
MessageSendInfo MSI = getMessageSendInfo(Method, ResultType, ActualArgs);
+ // Message sends are expected to return a zero value when the
+ // receiver is nil. At one point, this was only guaranteed for
+ // simple integer and pointer types, but expectations have grown
+ // over time.
+ //
+ // Given a nil receiver, the GNU runtime's message lookup will
+ // return a stub function that simply sets various return-value
+ // registers to zero and then returns. That's good enough for us
+ // if and only if (1) the calling conventions of that stub are
+ // compatible with the signature we're using and (2) the registers
+ // it sets are sufficient to produce a zero value of the return type.
+ // Rather than doing a whole target-specific analysis, we assume it
+ // only works for void, integer, and pointer types, and in all
+ // other cases we do an explicit nil check is emitted code. In
+ // addition to ensuring we produce a zero value for other types, this
+ // sidesteps the few outright CC incompatibilities we know about that
+ // could otherwise lead to crashes, like when a method is expected to
+ // return on the x87 floating point stack or adjust the stack pointer
+ // because of an indirect return.
+ bool hasParamDestroyedInCallee = false;
+ bool requiresExplicitZeroResult = false;
+ bool requiresNilReceiverCheck = [&] {
+ // We never need a check if we statically know the receiver isn't nil.
+ if (!canMessageReceiverBeNull(CGF, Method, /*IsSuper*/ false,
+ Class, Receiver))
+ return false;
+
+ // If there's a consumed argument, we need a nil check.
+ if (Method && Method->hasParamDestroyedInCallee()) {
+ hasParamDestroyedInCallee = true;
+ }
+
+ // If the return value isn't flagged as unused, and the result
+ // type isn't in our narrow set where we assume compatibility,
+ // we need a nil check to ensure a nil value.
+ if (!Return.isUnused()) {
+ if (ResultType->isVoidType()) {
+ // void results are definitely okay.
+ } else if (ResultType->hasPointerRepresentation() &&
+ CGM.getTypes().isZeroInitializable(ResultType)) {
+ // Pointer types should be fine as long as they have
+ // bitwise-zero null pointers. But do we need to worry
+ // about unusual address spaces?
+ } else if (ResultType->isIntegralOrEnumerationType()) {
+ // Bitwise zero should always be zero for integral types.
+ // FIXME: we probably need a size limit here, but we've
+ // never imposed one before
+ } else {
+ // Otherwise, use an explicit check just to be sure, unless we're
+ // calling a direct method, where the implementation does this for us.
+ requiresExplicitZeroResult = !isDirect;
+ }
+ }
+
+ return hasParamDestroyedInCallee || requiresExplicitZeroResult;
+ }();
+
+ // We will need to explicitly zero-initialize an aggregate result slot
+ // if we generally require explicit zeroing and we have an aggregate
+ // result.
+ bool requiresExplicitAggZeroing =
+ requiresExplicitZeroResult && CGF.hasAggregateEvaluationKind(ResultType);
+
+ // The block we're going to end up in after any message send or nil path.
+ llvm::BasicBlock *continueBB = nullptr;
+ // The block that eventually branched to continueBB along the nil path.
+ llvm::BasicBlock *nilPathBB = nullptr;
+ // The block to do explicit work in along the nil path, if necessary.
+ llvm::BasicBlock *nilCleanupBB = nullptr;
+
+ // Emit the nil-receiver check.
+ if (requiresNilReceiverCheck) {
+ llvm::BasicBlock *messageBB = CGF.createBasicBlock("msgSend");
+ continueBB = CGF.createBasicBlock("continue");
+
+ // If we need to zero-initialize an aggregate result or destroy
+ // consumed arguments, we'll need a separate cleanup block.
+ // Otherwise we can just branch directly to the continuation block.
+ if (requiresExplicitAggZeroing || hasParamDestroyedInCallee) {
+ nilCleanupBB = CGF.createBasicBlock("nilReceiverCleanup");
+ } else {
+ nilPathBB = Builder.GetInsertBlock();
+ }
+
+ llvm::Value *isNil = Builder.CreateICmpEQ(Receiver,
+ llvm::Constant::getNullValue(Receiver->getType()));
+ Builder.CreateCondBr(isNil, nilCleanupBB ? nilCleanupBB : continueBB,
+ messageBB);
+ CGF.EmitBlock(messageBB);
+ }
+
// Get the IMP to call
llvm::Value *imp;
- // If we have non-legacy dispatch specified, we try using the objc_msgSend()
- // functions. These are not supported on all platforms (or all runtimes on a
- // given platform), so we
- switch (CGM.getCodeGenOpts().getObjCDispatchMethod()) {
+ // If this is a direct method, just emit it here.
+ if (isDirect)
+ imp = GenerateMethod(Method, Method->getClassInterface());
+ else
+ // If we have non-legacy dispatch specified, we try using the
+ // objc_msgSend() functions. These are not supported on all platforms
+ // (or all runtimes on a given platform), so we
+ switch (CGM.getCodeGenOpts().getObjCDispatchMethod()) {
case CodeGenOptions::Legacy:
imp = LookupIMP(CGF, Receiver, cmd, node, MSI);
break;
@@ -2732,7 +2920,7 @@ CGObjCGNU::GenerateMessageSend(CodeGenFunction &CGF,
llvm::FunctionType::get(IdTy, IdTy, true), "objc_msgSend")
.getCallee();
}
- }
+ }
// Reset the receiver in case the lookup modified it
ActualArgs[0] = CallArg(RValue::get(Receiver), ASTIdTy);
@@ -2742,38 +2930,53 @@ CGObjCGNU::GenerateMessageSend(CodeGenFunction &CGF,
llvm::CallBase *call;
CGCallee callee(CGCalleeInfo(), imp);
RValue msgRet = CGF.EmitCall(MSI.CallInfo, callee, Return, ActualArgs, &call);
- call->setMetadata(msgSendMDKind, node);
-
+ if (!isDirect)
+ call->setMetadata(msgSendMDKind, node);
- if (!isPointerSizedReturn) {
- messageBB = CGF.Builder.GetInsertBlock();
+ if (requiresNilReceiverCheck) {
+ llvm::BasicBlock *nonNilPathBB = CGF.Builder.GetInsertBlock();
CGF.Builder.CreateBr(continueBB);
+
+ // Emit the nil path if we decided it was necessary above.
+ if (nilCleanupBB) {
+ CGF.EmitBlock(nilCleanupBB);
+
+ if (hasParamDestroyedInCallee) {
+ destroyCalleeDestroyedArguments(CGF, Method, CallArgs);
+ }
+
+ if (requiresExplicitAggZeroing) {
+ assert(msgRet.isAggregate());
+ Address addr = msgRet.getAggregateAddress();
+ CGF.EmitNullInitialization(addr, ResultType);
+ }
+
+ nilPathBB = CGF.Builder.GetInsertBlock();
+ CGF.Builder.CreateBr(continueBB);
+ }
+
+ // Enter the continuation block and emit a phi if required.
CGF.EmitBlock(continueBB);
if (msgRet.isScalar()) {
- llvm::Value *v = msgRet.getScalarVal();
- llvm::PHINode *phi = Builder.CreatePHI(v->getType(), 2);
- phi->addIncoming(v, messageBB);
- phi->addIncoming(llvm::Constant::getNullValue(v->getType()), startBB);
- msgRet = RValue::get(phi);
+ // If the return type is void, do nothing
+ if (llvm::Value *v = msgRet.getScalarVal()) {
+ llvm::PHINode *phi = Builder.CreatePHI(v->getType(), 2);
+ phi->addIncoming(v, nonNilPathBB);
+ phi->addIncoming(CGM.EmitNullConstant(ResultType), nilPathBB);
+ msgRet = RValue::get(phi);
+ }
} else if (msgRet.isAggregate()) {
- Address v = msgRet.getAggregateAddress();
- llvm::PHINode *phi = Builder.CreatePHI(v.getType(), 2);
- llvm::Type *RetTy = v.getElementType();
- Address NullVal = CGF.CreateTempAlloca(RetTy, v.getAlignment(), "null");
- CGF.InitTempAlloca(NullVal, llvm::Constant::getNullValue(RetTy));
- phi->addIncoming(v.getPointer(), messageBB);
- phi->addIncoming(NullVal.getPointer(), startBB);
- msgRet = RValue::getAggregate(Address(phi, v.getAlignment()));
+ // Aggregate zeroing is handled in nilCleanupBB when it's required.
} else /* isComplex() */ {
std::pair<llvm::Value*,llvm::Value*> v = msgRet.getComplexVal();
llvm::PHINode *phi = Builder.CreatePHI(v.first->getType(), 2);
- phi->addIncoming(v.first, messageBB);
+ phi->addIncoming(v.first, nonNilPathBB);
phi->addIncoming(llvm::Constant::getNullValue(v.first->getType()),
- startBB);
+ nilPathBB);
llvm::PHINode *phi2 = Builder.CreatePHI(v.second->getType(), 2);
- phi2->addIncoming(v.second, messageBB);
+ phi2->addIncoming(v.second, nonNilPathBB);
phi2->addIncoming(llvm::Constant::getNullValue(v.second->getType()),
- startBB);
+ nilPathBB);
msgRet = RValue::getComplex(phi, phi2);
}
}
@@ -2831,14 +3034,14 @@ GenerateMethodList(StringRef ClassName,
assert(FnPtr && "Can't generate metadata for method that doesn't exist");
auto Method = MethodArray.beginStruct(ObjCMethodTy);
if (isV2ABI) {
- Method.addBitCast(FnPtr, IMPTy);
+ Method.add(FnPtr);
Method.add(GetConstantSelector(OMD->getSelector(),
Context.getObjCEncodingForMethodDecl(OMD)));
Method.add(MakeConstantString(Context.getObjCEncodingForMethodDecl(OMD, true)));
} else {
Method.add(MakeConstantString(OMD->getSelector().getAsString()));
Method.add(MakeConstantString(Context.getObjCEncodingForMethodDecl(OMD)));
- Method.addBitCast(FnPtr, IMPTy);
+ Method.add(FnPtr);
}
Method.finishAndAddTo(MethodArray);
}
@@ -2937,7 +3140,7 @@ llvm::Constant *CGObjCGNU::GenerateClassStructure(
// Fill in the structure
// isa
- Elements.addBitCast(MetaClass, PtrToInt8Ty);
+ Elements.add(MetaClass);
// super_class
Elements.add(SuperClass);
// name
@@ -2966,7 +3169,7 @@ llvm::Constant *CGObjCGNU::GenerateClassStructure(
// sibling_class
Elements.add(NULLPtr);
// protocols
- Elements.addBitCast(Protocols, PtrTy);
+ Elements.add(Protocols);
// gc_object_type
Elements.add(NULLPtr);
// abi_version
@@ -2990,8 +3193,7 @@ llvm::Constant *CGObjCGNU::GenerateClassStructure(
Elements.finishAndCreateGlobal(ClassSym, CGM.getPointerAlign(), false,
llvm::GlobalValue::ExternalLinkage);
if (ClassRef) {
- ClassRef->replaceAllUsesWith(llvm::ConstantExpr::getBitCast(Class,
- ClassRef->getType()));
+ ClassRef->replaceAllUsesWith(Class);
ClassRef->removeFromParent();
Class->setName(ClassSym);
}
@@ -3039,7 +3241,7 @@ CGObjCGNU::GenerateProtocolList(ArrayRef<std::string> Protocols) {
} else {
protocol = value->getValue();
}
- Elements.addBitCast(protocol, PtrToInt8Ty);
+ Elements.add(protocol);
}
Elements.finishAndAddTo(ProtocolList);
return ProtocolList.finishAndCreateGlobal(".objc_protocol_list",
@@ -3066,7 +3268,6 @@ llvm::Constant *
CGObjCGNU::GenerateEmptyProtocol(StringRef ProtocolName) {
llvm::Constant *ProtocolList = GenerateProtocolList({});
llvm::Constant *MethodList = GenerateProtocolMethodList({});
- MethodList = llvm::ConstantExpr::getBitCast(MethodList, PtrToInt8Ty);
// Protocols are objects containing lists of the methods implemented and
// protocols adopted.
ConstantInitBuilder Builder(CGM);
@@ -3157,9 +3358,7 @@ void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
Elements.add(PropertyList);
Elements.add(OptionalPropertyList);
ExistingProtocols[ProtocolName] =
- llvm::ConstantExpr::getBitCast(
- Elements.finishAndCreateGlobal(".objc_protocol", CGM.getPointerAlign()),
- IdTy);
+ Elements.finishAndCreateGlobal(".objc_protocol", CGM.getPointerAlign());
}
void CGObjCGNU::GenerateProtocolHolderCategory() {
// Collect information about instance methods
@@ -3172,11 +3371,9 @@ void CGObjCGNU::GenerateProtocolHolderCategory() {
Elements.add(MakeConstantString(CategoryName));
Elements.add(MakeConstantString(ClassName));
// Instance method list
- Elements.addBitCast(GenerateMethodList(
- ClassName, CategoryName, {}, false), PtrTy);
+ Elements.add(GenerateMethodList(ClassName, CategoryName, {}, false));
// Class method list
- Elements.addBitCast(GenerateMethodList(
- ClassName, CategoryName, {}, true), PtrTy);
+ Elements.add(GenerateMethodList(ClassName, CategoryName, {}, true));
// Protocol list
ConstantInitBuilder ProtocolListBuilder(CGM);
@@ -3186,16 +3383,13 @@ void CGObjCGNU::GenerateProtocolHolderCategory() {
auto ProtocolElements = ProtocolList.beginArray(PtrTy);
for (auto iter = ExistingProtocols.begin(), endIter = ExistingProtocols.end();
iter != endIter ; iter++) {
- ProtocolElements.addBitCast(iter->getValue(), PtrTy);
+ ProtocolElements.add(iter->getValue());
}
ProtocolElements.finishAndAddTo(ProtocolList);
- Elements.addBitCast(
- ProtocolList.finishAndCreateGlobal(".objc_protocol_list",
- CGM.getPointerAlign()),
- PtrTy);
- Categories.push_back(llvm::ConstantExpr::getBitCast(
- Elements.finishAndCreateGlobal("", CGM.getPointerAlign()),
- PtrTy));
+ Elements.add(ProtocolList.finishAndCreateGlobal(".objc_protocol_list",
+ CGM.getPointerAlign()));
+ Categories.push_back(
+ Elements.finishAndCreateGlobal("", CGM.getPointerAlign()));
}
/// Libobjc2 uses a bitfield representation where small(ish) bitfields are
@@ -3234,7 +3428,7 @@ llvm::Constant *CGObjCGNU::MakeBitField(ArrayRef<bool> bits) {
auto fields = builder.beginStruct();
fields.addInt(Int32Ty, values.size());
auto array = fields.beginArray();
- for (auto v : values) array.add(v);
+ for (auto *v : values) array.add(v);
array.finishAndAddTo(fields);
llvm::Constant *GS =
@@ -3270,38 +3464,35 @@ void CGObjCGNU::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
SmallVector<ObjCMethodDecl*, 16> InstanceMethods;
InstanceMethods.insert(InstanceMethods.begin(), OCD->instmeth_begin(),
OCD->instmeth_end());
- Elements.addBitCast(
- GenerateMethodList(ClassName, CategoryName, InstanceMethods, false),
- PtrTy);
+ Elements.add(
+ GenerateMethodList(ClassName, CategoryName, InstanceMethods, false));
+
// Class method list
SmallVector<ObjCMethodDecl*, 16> ClassMethods;
ClassMethods.insert(ClassMethods.begin(), OCD->classmeth_begin(),
OCD->classmeth_end());
- Elements.addBitCast(
- GenerateMethodList(ClassName, CategoryName, ClassMethods, true),
- PtrTy);
+ Elements.add(GenerateMethodList(ClassName, CategoryName, ClassMethods, true));
+
// Protocol list
- Elements.addBitCast(GenerateCategoryProtocolList(CatDecl), PtrTy);
+ Elements.add(GenerateCategoryProtocolList(CatDecl));
if (isRuntime(ObjCRuntime::GNUstep, 2)) {
const ObjCCategoryDecl *Category =
Class->FindCategoryDeclaration(OCD->getIdentifier());
if (Category) {
// Instance properties
- Elements.addBitCast(GeneratePropertyList(OCD, Category, false), PtrTy);
+ Elements.add(GeneratePropertyList(OCD, Category, false));
// Class properties
- Elements.addBitCast(GeneratePropertyList(OCD, Category, true), PtrTy);
+ Elements.add(GeneratePropertyList(OCD, Category, true));
} else {
Elements.addNullPointer(PtrTy);
Elements.addNullPointer(PtrTy);
}
}
- Categories.push_back(llvm::ConstantExpr::getBitCast(
- Elements.finishAndCreateGlobal(
- std::string(".objc_category_")+ClassName+CategoryName,
- CGM.getPointerAlign()),
- PtrTy));
+ Categories.push_back(Elements.finishAndCreateGlobal(
+ std::string(".objc_category_") + ClassName + CategoryName,
+ CGM.getPointerAlign()));
}
llvm::Constant *CGObjCGNU::GeneratePropertyList(const Decl *Container,
@@ -3604,20 +3795,17 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
// Resolve the class aliases, if they exist.
if (ClassPtrAlias) {
- ClassPtrAlias->replaceAllUsesWith(
- llvm::ConstantExpr::getBitCast(ClassStruct, IdTy));
+ ClassPtrAlias->replaceAllUsesWith(ClassStruct);
ClassPtrAlias->eraseFromParent();
ClassPtrAlias = nullptr;
}
if (MetaClassPtrAlias) {
- MetaClassPtrAlias->replaceAllUsesWith(
- llvm::ConstantExpr::getBitCast(MetaClassStruct, IdTy));
+ MetaClassPtrAlias->replaceAllUsesWith(MetaClassStruct);
MetaClassPtrAlias->eraseFromParent();
MetaClassPtrAlias = nullptr;
}
// Add class structure to list to be added to the symtab later
- ClassStruct = llvm::ConstantExpr::getBitCast(ClassStruct, PtrToInt8Ty);
Classes.push_back(ClassStruct);
}
@@ -3630,13 +3818,10 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
// Add all referenced protocols to a category.
GenerateProtocolHolderCategory();
- llvm::StructType *selStructTy =
- dyn_cast<llvm::StructType>(SelectorTy->getElementType());
- llvm::Type *selStructPtrTy = SelectorTy;
+ llvm::StructType *selStructTy = dyn_cast<llvm::StructType>(SelectorElemTy);
if (!selStructTy) {
selStructTy = llvm::StructType::get(CGM.getLLVMContext(),
{ PtrToInt8Ty, PtrToInt8Ty });
- selStructPtrTy = llvm::PointerType::getUnqual(selStructTy);
}
// Generate statics list:
@@ -3667,7 +3852,6 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
statics = allStaticsArray.finishAndCreateGlobal(".objc_statics_ptr",
CGM.getPointerAlign());
- statics = llvm::ConstantExpr::getBitCast(statics, PtrTy);
}
// Array of classes, categories, and constant objects.
@@ -3730,9 +3914,6 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
// FIXME: We're generating redundant loads and stores here!
llvm::Constant *selPtr = llvm::ConstantExpr::getGetElementPtr(
selectorList->getValueType(), selectorList, idxs);
- // If selectors are defined as an opaque type, cast the pointer to this
- // type.
- selPtr = llvm::ConstantExpr::getBitCast(selPtr, SelectorTy);
selectorAliases[i]->replaceAllUsesWith(selPtr);
selectorAliases[i]->eraseFromParent();
}
@@ -3744,7 +3925,7 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
// Number of static selectors
symtab.addInt(LongTy, selectorCount);
- symtab.addBitCast(selectorList, selStructPtrTy);
+ symtab.add(selectorList);
// Number of classes defined.
symtab.addInt(CGM.Int16Ty, Classes.size());
@@ -3770,9 +3951,9 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
llvm::Type *moduleEltTys[] = {
LongTy, LongTy, PtrToInt8Ty, symtab->getType(), IntTy
};
- llvm::StructType *moduleTy =
- llvm::StructType::get(CGM.getLLVMContext(),
- makeArrayRef(moduleEltTys).drop_back(unsigned(RuntimeVersion < 10)));
+ llvm::StructType *moduleTy = llvm::StructType::get(
+ CGM.getLLVMContext(),
+ ArrayRef(moduleEltTys).drop_back(unsigned(RuntimeVersion < 10)));
ConstantInitBuilder builder(CGM);
auto module = builder.beginStruct(moduleTy);
@@ -3783,9 +3964,9 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
// The path to the source file where this module was declared
SourceManager &SM = CGM.getContext().getSourceManager();
- const FileEntry *mainFile = SM.getFileEntryForID(SM.getMainFileID());
+ OptionalFileEntryRef mainFile = SM.getFileEntryRefForID(SM.getMainFileID());
std::string path =
- (Twine(mainFile->getDir()->getName()) + "/" + mainFile->getName()).str();
+ (mainFile->getDir().getName() + "/" + mainFile->getName()).str();
module.add(MakeConstantString(path, ".objc_source_file_name"));
module.add(symtab);
@@ -3853,7 +4034,6 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
llvm::Constant *TheClass =
TheModule.getGlobalVariable("_OBJC_CLASS_" + iter->first, true);
if (TheClass) {
- TheClass = llvm::ConstantExpr::getBitCast(TheClass, PtrTy);
Builder.CreateCall(RegisterAlias,
{TheClass, MakeConstantString(iter->second)});
}
@@ -3874,14 +4054,50 @@ llvm::Function *CGObjCGNU::GenerateMethod(const ObjCMethodDecl *OMD,
CodeGenTypes &Types = CGM.getTypes();
llvm::FunctionType *MethodTy =
Types.GetFunctionType(Types.arrangeObjCMethodDeclaration(OMD));
- std::string FunctionName = getSymbolNameForMethod(OMD);
-
- llvm::Function *Method
- = llvm::Function::Create(MethodTy,
- llvm::GlobalValue::InternalLinkage,
- FunctionName,
- &TheModule);
- return Method;
+
+ bool isDirect = OMD->isDirectMethod();
+ std::string FunctionName =
+ getSymbolNameForMethod(OMD, /*include category*/ !isDirect);
+
+ if (!isDirect)
+ return llvm::Function::Create(MethodTy,
+ llvm::GlobalVariable::InternalLinkage,
+ FunctionName, &TheModule);
+
+ auto *COMD = OMD->getCanonicalDecl();
+ auto I = DirectMethodDefinitions.find(COMD);
+ llvm::Function *OldFn = nullptr, *Fn = nullptr;
+
+ if (I == DirectMethodDefinitions.end()) {
+ auto *F =
+ llvm::Function::Create(MethodTy, llvm::GlobalVariable::ExternalLinkage,
+ FunctionName, &TheModule);
+ DirectMethodDefinitions.insert(std::make_pair(COMD, F));
+ return F;
+ }
+
+ // Objective-C allows for the declaration and implementation types
+ // to differ slightly.
+ //
+ // If we're being asked for the Function associated for a method
+ // implementation, a previous value might have been cached
+ // based on the type of the canonical declaration.
+ //
+ // If these do not match, then we'll replace this function with
+ // a new one that has the proper type below.
+ if (!OMD->getBody() || COMD->getReturnType() == OMD->getReturnType())
+ return I->second;
+
+ OldFn = I->second;
+ Fn = llvm::Function::Create(MethodTy, llvm::GlobalValue::ExternalLinkage, "",
+ &CGM.getModule());
+ Fn->takeName(OldFn);
+ OldFn->replaceAllUsesWith(Fn);
+ OldFn->eraseFromParent();
+
+ // Replace the cached function in the map.
+ I->second = Fn;
+ return Fn;
}
void CGObjCGNU::GenerateDirectMethodPrologue(CodeGenFunction &CGF,
@@ -3961,7 +4177,7 @@ void CGObjCGNU::EmitThrowStmt(CodeGenFunction &CGF,
ExceptionAsObject = CGF.ObjCEHValueStack.back();
isRethrow = true;
}
- if (isRethrow && usesSEHExceptions) {
+ if (isRethrow && (usesSEHExceptions || usesCxxExceptions)) {
// For SEH, ExceptionAsObject may be undef, because the catch handler is
// not passed it for catchalls and so it is not visible to the catch
// funclet. The real thrown object will still be live on the stack at this
@@ -3971,8 +4187,7 @@ void CGObjCGNU::EmitThrowStmt(CodeGenFunction &CGF,
// argument.
llvm::CallBase *Throw = CGF.EmitRuntimeCallOrInvoke(ExceptionReThrowFn);
Throw->setDoesNotReturn();
- }
- else {
+ } else {
ExceptionAsObject = CGF.Builder.CreateBitCast(ExceptionAsObject, IdTy);
llvm::CallBase *Throw =
CGF.EmitRuntimeCallOrInvoke(ExceptionThrowFn, ExceptionAsObject);
@@ -3986,16 +4201,16 @@ void CGObjCGNU::EmitThrowStmt(CodeGenFunction &CGF,
llvm::Value * CGObjCGNU::EmitObjCWeakRead(CodeGenFunction &CGF,
Address AddrWeakObj) {
CGBuilderTy &B = CGF.Builder;
- AddrWeakObj = EnforceType(B, AddrWeakObj, PtrToIdTy);
- return B.CreateCall(WeakReadFn, AddrWeakObj.getPointer());
+ return B.CreateCall(WeakReadFn,
+ EnforceType(B, AddrWeakObj.getPointer(), PtrToIdTy));
}
void CGObjCGNU::EmitObjCWeakAssign(CodeGenFunction &CGF,
llvm::Value *src, Address dst) {
CGBuilderTy &B = CGF.Builder;
src = EnforceType(B, src, IdTy);
- dst = EnforceType(B, dst, PtrToIdTy);
- B.CreateCall(WeakAssignFn, {src, dst.getPointer()});
+ llvm::Value *dstVal = EnforceType(B, dst.getPointer(), PtrToIdTy);
+ B.CreateCall(WeakAssignFn, {src, dstVal});
}
void CGObjCGNU::EmitObjCGlobalAssign(CodeGenFunction &CGF,
@@ -4003,10 +4218,10 @@ void CGObjCGNU::EmitObjCGlobalAssign(CodeGenFunction &CGF,
bool threadlocal) {
CGBuilderTy &B = CGF.Builder;
src = EnforceType(B, src, IdTy);
- dst = EnforceType(B, dst, PtrToIdTy);
+ llvm::Value *dstVal = EnforceType(B, dst.getPointer(), PtrToIdTy);
// FIXME. Add threadloca assign API
assert(!threadlocal && "EmitObjCGlobalAssign - Threal Local API NYI");
- B.CreateCall(GlobalAssignFn, {src, dst.getPointer()});
+ B.CreateCall(GlobalAssignFn, {src, dstVal});
}
void CGObjCGNU::EmitObjCIvarAssign(CodeGenFunction &CGF,
@@ -4014,16 +4229,16 @@ void CGObjCGNU::EmitObjCIvarAssign(CodeGenFunction &CGF,
llvm::Value *ivarOffset) {
CGBuilderTy &B = CGF.Builder;
src = EnforceType(B, src, IdTy);
- dst = EnforceType(B, dst, IdTy);
- B.CreateCall(IvarAssignFn, {src, dst.getPointer(), ivarOffset});
+ llvm::Value *dstVal = EnforceType(B, dst.getPointer(), IdTy);
+ B.CreateCall(IvarAssignFn, {src, dstVal, ivarOffset});
}
void CGObjCGNU::EmitObjCStrongCastAssign(CodeGenFunction &CGF,
llvm::Value *src, Address dst) {
CGBuilderTy &B = CGF.Builder;
src = EnforceType(B, src, IdTy);
- dst = EnforceType(B, dst, PtrToIdTy);
- B.CreateCall(StrongCastAssignFn, {src, dst.getPointer()});
+ llvm::Value *dstVal = EnforceType(B, dst.getPointer(), PtrToIdTy);
+ B.CreateCall(StrongCastAssignFn, {src, dstVal});
}
void CGObjCGNU::EmitGCMemmoveCollectable(CodeGenFunction &CGF,
@@ -4031,10 +4246,10 @@ void CGObjCGNU::EmitGCMemmoveCollectable(CodeGenFunction &CGF,
Address SrcPtr,
llvm::Value *Size) {
CGBuilderTy &B = CGF.Builder;
- DestPtr = EnforceType(B, DestPtr, PtrTy);
- SrcPtr = EnforceType(B, SrcPtr, PtrTy);
+ llvm::Value *DestPtrVal = EnforceType(B, DestPtr.getPointer(), PtrTy);
+ llvm::Value *SrcPtrVal = EnforceType(B, SrcPtr.getPointer(), PtrTy);
- B.CreateCall(MemMoveFn, {DestPtr.getPointer(), SrcPtr.getPointer(), Size});
+ B.CreateCall(MemMoveFn, {DestPtrVal, SrcPtrVal, Size});
}
llvm::GlobalVariable *CGObjCGNU::ObjCIvarOffsetVariable(
@@ -4046,9 +4261,9 @@ llvm::GlobalVariable *CGObjCGNU::ObjCIvarOffsetVariable(
// when linked against code which isn't (most of the time).
llvm::GlobalVariable *IvarOffsetPointer = TheModule.getNamedGlobal(Name);
if (!IvarOffsetPointer)
- IvarOffsetPointer = new llvm::GlobalVariable(TheModule,
- llvm::Type::getInt32PtrTy(VMContext), false,
- llvm::GlobalValue::ExternalLinkage, nullptr, Name);
+ IvarOffsetPointer = new llvm::GlobalVariable(
+ TheModule, llvm::PointerType::getUnqual(VMContext), false,
+ llvm::GlobalValue::ExternalLinkage, nullptr, Name);
return IvarOffsetPointer;
}
@@ -4092,10 +4307,11 @@ llvm::Value *CGObjCGNU::EmitIvarOffset(CodeGenFunction &CGF,
CGF.CGM.getTarget().getTriple().isKnownWindowsMSVCEnvironment())
return CGF.Builder.CreateZExtOrBitCast(
CGF.Builder.CreateAlignedLoad(
- Int32Ty, CGF.Builder.CreateAlignedLoad(
- llvm::Type::getInt32PtrTy(VMContext),
- ObjCIvarOffsetVariable(Interface, Ivar),
- CGF.getPointerAlign(), "ivar"),
+ Int32Ty,
+ CGF.Builder.CreateAlignedLoad(
+ llvm::PointerType::getUnqual(VMContext),
+ ObjCIvarOffsetVariable(Interface, Ivar),
+ CGF.getPointerAlign(), "ivar"),
CharUnits::fromQuantity(4)),
PtrDiffTy);
std::string name = "__objc_ivar_offset_value_" +
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGObjCMac.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGObjCMac.cpp
index 3de67bb4bbc5..517f7cddebc1 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGObjCMac.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGObjCMac.cpp
@@ -174,6 +174,7 @@ protected:
public:
llvm::IntegerType *ShortTy, *IntTy, *LongTy;
llvm::PointerType *Int8PtrTy, *Int8PtrPtrTy;
+ llvm::PointerType *Int8PtrProgramASTy;
llvm::Type *IvarOffsetVarTy;
/// ObjectPtrTy - LLVM type for object handles (typeof(id))
@@ -736,14 +737,17 @@ public:
// Also it is safe to make it readnone, since we never load or store the
// classref except by calling this function.
llvm::Type *params[] = { Int8PtrPtrTy };
+ llvm::LLVMContext &C = CGM.getLLVMContext();
+ llvm::AttributeSet AS = llvm::AttributeSet::get(C, {
+ llvm::Attribute::get(C, llvm::Attribute::NonLazyBind),
+ llvm::Attribute::getWithMemoryEffects(C, llvm::MemoryEffects::none()),
+ llvm::Attribute::get(C, llvm::Attribute::NoUnwind),
+ });
llvm::FunctionCallee F = CGM.CreateRuntimeFunction(
llvm::FunctionType::get(ClassnfABIPtrTy, params, false),
"objc_loadClassref",
llvm::AttributeList::get(CGM.getLLVMContext(),
- llvm::AttributeList::FunctionIndex,
- {llvm::Attribute::NonLazyBind,
- llvm::Attribute::ReadNone,
- llvm::Attribute::NoUnwind}));
+ llvm::AttributeList::FunctionIndex, AS));
if (!CGM.getTriple().isOSBinFormatCOFF())
cast<llvm::Function>(F.getCallee())->setLinkage(
llvm::Function::ExternalWeakLinkage);
@@ -1170,7 +1174,7 @@ public:
static ProtocolMethodLists get(const ObjCProtocolDecl *PD) {
ProtocolMethodLists result;
- for (auto MD : PD->methods()) {
+ for (auto *MD : PD->methods()) {
size_t index = (2 * size_t(MD->isOptional()))
+ (size_t(MD->isClassMethod()));
result.Methods[index].push_back(MD);
@@ -1709,8 +1713,8 @@ public:
/// A helper class for performing the null-initialization of a return
/// value.
struct NullReturnState {
- llvm::BasicBlock *NullBB;
- NullReturnState() : NullBB(nullptr) {}
+ llvm::BasicBlock *NullBB = nullptr;
+ NullReturnState() = default;
/// Perform a null-check of the given receiver.
void init(CodeGenFunction &CGF, llvm::Value *receiver) {
@@ -1754,37 +1758,9 @@ struct NullReturnState {
// Okay, start emitting the null-receiver block.
CGF.EmitBlock(NullBB);
- // Release any consumed arguments we've got.
+ // Destroy any consumed arguments we've got.
if (Method) {
- CallArgList::const_iterator I = CallArgs.begin();
- for (ObjCMethodDecl::param_const_iterator i = Method->param_begin(),
- e = Method->param_end(); i != e; ++i, ++I) {
- const ParmVarDecl *ParamDecl = (*i);
- if (ParamDecl->hasAttr<NSConsumedAttr>()) {
- RValue RV = I->getRValue(CGF);
- assert(RV.isScalar() &&
- "NullReturnState::complete - arg not on object");
- CGF.EmitARCRelease(RV.getScalarVal(), ARCImpreciseLifetime);
- } else {
- QualType QT = ParamDecl->getType();
- auto *RT = QT->getAs<RecordType>();
- if (RT && RT->getDecl()->isParamDestroyedInCallee()) {
- RValue RV = I->getRValue(CGF);
- QualType::DestructionKind DtorKind = QT.isDestructedType();
- switch (DtorKind) {
- case QualType::DK_cxx_destructor:
- CGF.destroyCXXObject(CGF, RV.getAggregateAddress(), QT);
- break;
- case QualType::DK_nontrivial_c_struct:
- CGF.destroyNonTrivialCStruct(CGF, RV.getAggregateAddress(), QT);
- break;
- default:
- llvm_unreachable("unexpected dtor kind");
- break;
- }
- }
- }
- }
+ CGObjCRuntime::destroyCalleeDestroyedArguments(CGF, Method, CallArgs);
}
// The phi code below assumes that we haven't needed any control flow yet.
@@ -1874,7 +1850,7 @@ static bool hasObjCExceptionAttribute(ASTContext &Context,
static llvm::GlobalValue::LinkageTypes
getLinkageTypeForObjCMetadata(CodeGenModule &CGM, StringRef Section) {
if (CGM.getTriple().isOSBinFormatMachO() &&
- (Section.empty() || Section.startswith("__DATA")))
+ (Section.empty() || Section.starts_with("__DATA")))
return llvm::GlobalValue::InternalLinkage;
return llvm::GlobalValue::PrivateLinkage;
}
@@ -1982,9 +1958,8 @@ llvm::Constant *CGObjCMac::getNSConstantStringClassRef() {
llvm::Type *PTy = llvm::ArrayType::get(CGM.IntTy, 0);
auto GV = CGM.CreateRuntimeVariable(PTy, str);
- auto V = llvm::ConstantExpr::getBitCast(GV, CGM.IntTy->getPointerTo());
- ConstantStringClassRef = V;
- return V;
+ ConstantStringClassRef = GV;
+ return GV;
}
llvm::Constant *CGObjCNonFragileABIMac::getNSConstantStringClassRef() {
@@ -1996,12 +1971,8 @@ llvm::Constant *CGObjCNonFragileABIMac::getNSConstantStringClassRef() {
StringClass.empty() ? "OBJC_CLASS_$_NSConstantString"
: "OBJC_CLASS_$_" + StringClass;
llvm::Constant *GV = GetClassGlobal(str, NotForDefinition);
-
- // Make sure the result is of the correct type.
- auto V = llvm::ConstantExpr::getBitCast(GV, CGM.IntTy->getPointerTo());
-
- ConstantStringClassRef = V;
- return V;
+ ConstantStringClassRef = GV;
+ return GV;
}
ConstantAddress
@@ -2011,7 +1982,8 @@ CGObjCCommonMac::GenerateConstantNSString(const StringLiteral *Literal) {
GetConstantStringEntry(NSConstantStringMap, Literal, StringLength);
if (auto *C = Entry.second)
- return ConstantAddress(C, CharUnits::fromQuantity(C->getAlignment()));
+ return ConstantAddress(
+ C, C->getValueType(), CharUnits::fromQuantity(C->getAlignment()));
// If we don't already have it, get _NSConstantStringClassReference.
llvm::Constant *Class = getNSConstantStringClassRef();
@@ -2019,11 +1991,8 @@ CGObjCCommonMac::GenerateConstantNSString(const StringLiteral *Literal) {
// If we don't already have it, construct the type for a constant NSString.
if (!NSConstantStringType) {
NSConstantStringType =
- llvm::StructType::create({
- CGM.Int32Ty->getPointerTo(),
- CGM.Int8PtrTy,
- CGM.IntTy
- }, "struct.__builtin_NSString");
+ llvm::StructType::create({CGM.UnqualPtrTy, CGM.Int8PtrTy, CGM.IntTy},
+ "struct.__builtin_NSString");
}
ConstantInitBuilder Builder(CGM);
@@ -2045,7 +2014,7 @@ CGObjCCommonMac::GenerateConstantNSString(const StringLiteral *Literal) {
// Don't enforce the target's minimum global alignment, since the only use
// of the string is via this class initializer.
GV->setAlignment(llvm::Align(1));
- Fields.addBitCast(GV, CGM.Int8PtrTy);
+ Fields.add(GV);
// String length.
Fields.addInt(CGM.IntTy, StringLength);
@@ -2064,7 +2033,7 @@ CGObjCCommonMac::GenerateConstantNSString(const StringLiteral *Literal) {
: NSStringSection);
Entry.second = GV;
- return ConstantAddress(GV, Alignment);
+ return ConstantAddress(GV, GV->getValueType(), Alignment);
}
enum {
@@ -2151,15 +2120,6 @@ CodeGen::RValue CGObjCMac::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
Method, Class, ObjCTypes);
}
-static bool isWeakLinkedClass(const ObjCInterfaceDecl *ID) {
- do {
- if (ID->isWeakImported())
- return true;
- } while ((ID = ID->getSuperClass()));
-
- return false;
-}
-
CodeGen::RValue
CGObjCCommonMac::EmitMessageSend(CodeGen::CodeGenFunction &CGF,
ReturnValueSlot Return,
@@ -2174,22 +2134,14 @@ CGObjCCommonMac::EmitMessageSend(CodeGen::CodeGenFunction &CGF,
const ObjCCommonTypesHelper &ObjCTypes) {
CodeGenTypes &Types = CGM.getTypes();
auto selTy = CGF.getContext().getObjCSelType();
- llvm::Value *SelValue;
-
- if (Method && Method->isDirectMethod()) {
- // Direct methods will synthesize the proper `_cmd` internally,
- // so just don't bother with setting the `_cmd` argument.
- assert(!IsSuper);
- SelValue = llvm::UndefValue::get(Types.ConvertType(selTy));
- } else {
- SelValue = GetSelector(CGF, Sel);
- }
+ llvm::Value *SelValue = llvm::UndefValue::get(Types.ConvertType(selTy));
CallArgList ActualArgs;
if (!IsSuper)
Arg0 = CGF.Builder.CreateBitCast(Arg0, ObjCTypes.ObjectPtrTy);
ActualArgs.add(RValue::get(Arg0), Arg0Ty);
- ActualArgs.add(RValue::get(SelValue), selTy);
+ if (!Method || !Method->isDirectMethod())
+ ActualArgs.add(RValue::get(SelValue), selTy);
ActualArgs.addFrom(CallArgs);
// If we're calling a method, use the formal signature.
@@ -2200,38 +2152,19 @@ CGObjCCommonMac::EmitMessageSend(CodeGen::CodeGenFunction &CGF,
CGM.getContext().getCanonicalType(ResultType) &&
"Result type mismatch!");
- bool ReceiverCanBeNull = true;
-
- // Super dispatch assumes that self is non-null; even the messenger
- // doesn't have a null check internally.
- if (IsSuper) {
- ReceiverCanBeNull = false;
-
- // If this is a direct dispatch of a class method, check whether the class,
- // or anything in its hierarchy, was weak-linked.
- } else if (ClassReceiver && Method && Method->isClassMethod()) {
- ReceiverCanBeNull = isWeakLinkedClass(ClassReceiver);
-
- // If we're emitting a method, and self is const (meaning just ARC, for now),
- // and the receiver is a load of self, then self is a valid object.
- } else if (auto CurMethod =
- dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl)) {
- auto Self = CurMethod->getSelfDecl();
- if (Self->getType().isConstQualified()) {
- if (auto LI = dyn_cast<llvm::LoadInst>(Arg0->stripPointerCasts())) {
- llvm::Value *SelfAddr = CGF.GetAddrOfLocalVar(Self).getPointer();
- if (SelfAddr == LI->getPointerOperand()) {
- ReceiverCanBeNull = false;
- }
- }
- }
- }
+ bool ReceiverCanBeNull =
+ canMessageReceiverBeNull(CGF, Method, IsSuper, ClassReceiver, Arg0);
bool RequiresNullCheck = false;
+ bool RequiresSelValue = true;
llvm::FunctionCallee Fn = nullptr;
if (Method && Method->isDirectMethod()) {
+ assert(!IsSuper);
Fn = GenerateDirectMethod(Method, Method->getClassInterface());
+ // Direct methods will synthesize the proper `_cmd` internally,
+ // so just don't bother with setting the `_cmd` argument.
+ RequiresSelValue = false;
} else if (CGM.ReturnSlotInterferesWithArgs(MSI.CallInfo)) {
if (ReceiverCanBeNull) RequiresNullCheck = true;
Fn = (ObjCABI == 2) ? ObjCTypes.getSendStretFn2(IsSuper)
@@ -2261,20 +2194,20 @@ CGObjCCommonMac::EmitMessageSend(CodeGen::CodeGenFunction &CGF,
RequiresNullCheck = false;
// Emit a null-check if there's a consumed argument other than the receiver.
- if (!RequiresNullCheck && CGM.getLangOpts().ObjCAutoRefCount && Method) {
- for (const auto *ParamDecl : Method->parameters()) {
- if (ParamDecl->isDestroyedInCallee()) {
- RequiresNullCheck = true;
- break;
- }
- }
- }
+ if (!RequiresNullCheck && Method && Method->hasParamDestroyedInCallee())
+ RequiresNullCheck = true;
NullReturnState nullReturn;
if (RequiresNullCheck) {
nullReturn.init(CGF, Arg0);
}
+ // If a selector value needs to be passed, emit the load before the call.
+ if (RequiresSelValue) {
+ SelValue = GetSelector(CGF, Sel);
+ ActualArgs[1] = CallArg(RValue::get(SelValue), selTy);
+ }
+
llvm::CallBase *CallSite;
CGCallee Callee = CGCallee::forDirect(BitcastFn);
RValue rvalue = CGF.EmitCall(MSI.CallInfo, Callee, Return, ActualArgs,
@@ -2466,8 +2399,8 @@ void IvarLayoutBuilder::visitBlock(const CGBlockInfo &blockInfo) {
Qualifiers::GC GCAttr = GetGCAttrTypeForType(CGM.getContext(), type);
if (GCAttr == Qualifiers::Strong) {
- assert(CGM.getContext().getTypeSize(type)
- == CGM.getTarget().getPointerWidth(0));
+ assert(CGM.getContext().getTypeSize(type) ==
+ CGM.getTarget().getPointerWidth(LangAS::Default));
IvarsInfo.push_back(IvarInfo(fieldOffset, /*size in words*/ 1));
}
}
@@ -2553,7 +2486,7 @@ void CGObjCCommonMac::BuildRCRecordLayout(const llvm::StructLayout *RecLayout,
if (FQT->isUnionType())
HasUnion = true;
- BuildRCBlockVarRecordLayout(FQT->getAs<RecordType>(),
+ BuildRCBlockVarRecordLayout(FQT->castAs<RecordType>(),
BytePos + FieldOffset, HasUnion);
continue;
}
@@ -2760,7 +2693,7 @@ llvm::Constant *CGObjCCommonMac::getBitmapBlockLayout(bool ComputeByrefLayout) {
llvm::Constant *nullPtr = llvm::Constant::getNullValue(CGM.Int8PtrTy);
if (RunSkipBlockVars.empty())
return nullPtr;
- unsigned WordSizeInBits = CGM.getTarget().getPointerWidth(0);
+ unsigned WordSizeInBits = CGM.getTarget().getPointerWidth(LangAS::Default);
unsigned ByteSizeInBits = CGM.getTarget().getCharWidth();
unsigned WordSizeInBytes = WordSizeInBits/ByteSizeInBits;
@@ -2946,7 +2879,7 @@ void CGObjCCommonMac::fillRunSkipBlockVars(CodeGenModule &CGM,
RunSkipBlockVars.clear();
bool hasUnion = false;
- unsigned WordSizeInBits = CGM.getTarget().getPointerWidth(0);
+ unsigned WordSizeInBits = CGM.getTarget().getPointerWidth(LangAS::Default);
unsigned ByteSizeInBits = CGM.getTarget().getCharWidth();
unsigned WordSizeInBytes = WordSizeInBits/ByteSizeInBits;
@@ -3001,8 +2934,7 @@ CGObjCCommonMac::BuildRCBlockLayout(CodeGenModule &CGM,
std::string CGObjCCommonMac::getRCBlockLayoutStr(CodeGenModule &CGM,
const CGBlockInfo &blockInfo) {
fillRunSkipBlockVars(CGM, blockInfo);
- return getBlockLayoutInfoString(RunSkipBlockVars,
- blockInfo.needsCopyDisposeHelpers());
+ return getBlockLayoutInfoString(RunSkipBlockVars, blockInfo.NeedsCopyDispose);
}
llvm::Constant *CGObjCCommonMac::BuildByrefLayout(CodeGen::CodeGenModule &CGM,
@@ -3029,8 +2961,7 @@ llvm::Value *CGObjCMac::GenerateProtocolRef(CodeGenFunction &CGF,
// resolved. Investigate. Its also wasteful to look this up over and over.
LazySymbols.insert(&CGM.getContext().Idents.get("Protocol"));
- return llvm::ConstantExpr::getBitCast(GetProtocolRef(PD),
- ObjCTypes.getExternalProtocolPtrTy());
+ return GetProtocolRef(PD);
}
void CGObjCCommonMac::GenerateProtocol(const ObjCProtocolDecl *PD) {
@@ -3250,7 +3181,7 @@ CGObjCMac::EmitProtocolList(Twine name,
llvm::GlobalVariable *GV =
CreateMetadataVar(name, values, section, CGM.getPointerAlign(), false);
- return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.ProtocolListPtrTy);
+ return GV;
}
static void
@@ -3358,7 +3289,7 @@ llvm::Constant *CGObjCCommonMac::EmitPropertyList(Twine Name,
llvm::GlobalVariable *GV =
CreateMetadataVar(Name, values, Section, CGM.getPointerAlign(), true);
- return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.PropertyListPtrTy);
+ return GV;
}
llvm::Constant *
@@ -3379,7 +3310,7 @@ CGObjCCommonMac::EmitProtocolMethodTypes(Twine Name,
llvm::GlobalVariable *GV =
CreateMetadataVar(Name, Init, Section, CGM.getPointerAlign(), true);
- return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.Int8PtrPtrTy);
+ return GV;
}
/*
@@ -3389,7 +3320,7 @@ CGObjCCommonMac::EmitProtocolMethodTypes(Twine Name,
struct _objc_method_list *instance_methods;
struct _objc_method_list *class_methods;
struct _objc_protocol_list *protocols;
- uint32_t size; // <rdar://4585769>
+ uint32_t size; // sizeof(struct _objc_category)
struct _objc_property_list *instance_properties;
struct _objc_property_list *class_properties;
};
@@ -3518,7 +3449,7 @@ static bool hasWeakMember(QualType type) {
}
if (auto recType = type->getAs<RecordType>()) {
- for (auto field : recType->getDecl()->fields()) {
+ for (auto *field : recType->getDecl()->fields()) {
if (hasWeakMember(field->getType()))
return true;
}
@@ -3626,8 +3557,7 @@ void CGObjCMac::GenerateClass(const ObjCImplementationDecl *ID) {
// Record a reference to the super class.
LazySymbols.insert(Super->getIdentifier());
- values.addBitCast(GetClassName(Super->getObjCRuntimeNameAsString()),
- ObjCTypes.ClassPtrTy);
+ values.add(GetClassName(Super->getObjCRuntimeNameAsString()));
} else {
values.addNullPointer(ObjCTypes.ClassPtrTy);
}
@@ -3681,14 +3611,12 @@ llvm::Constant *CGObjCMac::EmitMetaClass(const ObjCImplementationDecl *ID,
const ObjCInterfaceDecl *Root = ID->getClassInterface();
while (const ObjCInterfaceDecl *Super = Root->getSuperClass())
Root = Super;
- values.addBitCast(GetClassName(Root->getObjCRuntimeNameAsString()),
- ObjCTypes.ClassPtrTy);
+ values.add(GetClassName(Root->getObjCRuntimeNameAsString()));
// The super class for the metaclass is emitted as the name of the
// super class. The runtime fixes this up to point to the
// *metaclass* for the super class.
if (ObjCInterfaceDecl *Super = ID->getClassInterface()->getSuperClass()) {
- values.addBitCast(GetClassName(Super->getObjCRuntimeNameAsString()),
- ObjCTypes.ClassPtrTy);
+ values.add(GetClassName(Super->getObjCRuntimeNameAsString()));
} else {
values.addNullPointer(ObjCTypes.ClassPtrTy);
}
@@ -3869,16 +3797,10 @@ llvm::Constant *CGObjCMac::EmitIvarList(const ObjCImplementationDecl *ID,
ivarList.fillPlaceholderWithInt(countSlot, ObjCTypes.IntTy, count);
llvm::GlobalVariable *GV;
- if (ForClass)
- GV =
- CreateMetadataVar("OBJC_CLASS_VARIABLES_" + ID->getName(), ivarList,
- "__OBJC,__class_vars,regular,no_dead_strip",
- CGM.getPointerAlign(), true);
- else
- GV = CreateMetadataVar("OBJC_INSTANCE_VARIABLES_" + ID->getName(), ivarList,
- "__OBJC,__instance_vars,regular,no_dead_strip",
- CGM.getPointerAlign(), true);
- return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.IvarListPtrTy);
+ GV = CreateMetadataVar("OBJC_INSTANCE_VARIABLES_" + ID->getName(), ivarList,
+ "__OBJC,__instance_vars,regular,no_dead_strip",
+ CGM.getPointerAlign(), true);
+ return GV;
}
/// Build a struct objc_method_description constant for the given method.
@@ -3890,8 +3812,7 @@ llvm::Constant *CGObjCMac::EmitIvarList(const ObjCImplementationDecl *ID,
void CGObjCMac::emitMethodDescriptionConstant(ConstantArrayBuilder &builder,
const ObjCMethodDecl *MD) {
auto description = builder.beginStruct(ObjCTypes.MethodDescriptionTy);
- description.addBitCast(GetMethodVarName(MD->getSelector()),
- ObjCTypes.SelectorPtrTy);
+ description.add(GetMethodVarName(MD->getSelector()));
description.add(GetMethodVarType(MD));
description.finishAndAddTo(builder);
}
@@ -3909,10 +3830,9 @@ void CGObjCMac::emitMethodConstant(ConstantArrayBuilder &builder,
assert(fn && "no definition registered for method");
auto method = builder.beginStruct(ObjCTypes.MethodTy);
- method.addBitCast(GetMethodVarName(MD->getSelector()),
- ObjCTypes.SelectorPtrTy);
+ method.add(GetMethodVarName(MD->getSelector()));
method.add(GetMethodVarType(MD));
- method.addBitCast(fn, ObjCTypes.Int8PtrTy);
+ method.add(fn);
method.finishAndAddTo(builder);
}
@@ -3997,8 +3917,7 @@ llvm::Constant *CGObjCMac::emitMethodList(Twine name, MethodListType MLT,
llvm::GlobalVariable *GV = CreateMetadataVar(prefix + name, values, section,
CGM.getPointerAlign(), true);
- return llvm::ConstantExpr::getBitCast(GV,
- ObjCTypes.MethodDescriptionListPtrTy);
+ return GV;
}
// Otherwise, it's an objc_method_list.
@@ -4015,7 +3934,7 @@ llvm::Constant *CGObjCMac::emitMethodList(Twine name, MethodListType MLT,
llvm::GlobalVariable *GV = CreateMetadataVar(prefix + name, values, section,
CGM.getPointerAlign(), true);
- return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.MethodListPtrTy);
+ return GV;
}
llvm::Function *CGObjCCommonMac::GenerateMethod(const ObjCMethodDecl *OMD,
@@ -4070,8 +3989,7 @@ CGObjCCommonMac::GenerateDirectMethod(const ObjCMethodDecl *OMD,
Fn = llvm::Function::Create(MethodTy, llvm::GlobalValue::ExternalLinkage,
"", &CGM.getModule());
Fn->takeName(OldFn);
- OldFn->replaceAllUsesWith(
- llvm::ConstantExpr::getBitCast(Fn, OldFn->getType()));
+ OldFn->replaceAllUsesWith(Fn);
OldFn->eraseFromParent();
// Replace the cached function in the map.
@@ -4167,6 +4085,9 @@ void CGObjCCommonMac::GenerateDirectMethodPrologue(
// only synthesize _cmd if it's referenced
if (OMD->getCmdDecl()->isUsed()) {
+ // `_cmd` is not a parameter to direct methods, so storage must be
+ // explicitly declared for it.
+ CGF.EmitVarDecl(*OMD->getCmdDecl());
Builder.CreateStore(GetSelector(CGF, OMD),
CGF.GetAddrOfLocalVar(OMD->getCmdDecl()));
}
@@ -4436,7 +4357,11 @@ FragileHazards::FragileHazards(CodeGenFunction &CGF) : CGF(CGF) {
void FragileHazards::emitWriteHazard() {
if (Locals.empty()) return;
- CGF.EmitNounwindRuntimeCall(WriteHazard, Locals);
+ llvm::CallInst *Call = CGF.EmitNounwindRuntimeCall(WriteHazard, Locals);
+ for (auto Pair : llvm::enumerate(Locals))
+ Call->addParamAttr(Pair.index(), llvm::Attribute::get(
+ CGF.getLLVMContext(), llvm::Attribute::ElementType,
+ cast<llvm::AllocaInst>(Pair.value())->getAllocatedType()));
}
void FragileHazards::emitReadHazard(CGBuilderTy &Builder) {
@@ -4444,6 +4369,10 @@ void FragileHazards::emitReadHazard(CGBuilderTy &Builder) {
llvm::CallInst *call = Builder.CreateCall(ReadHazard, Locals);
call->setDoesNotThrow();
call->setCallingConv(CGF.getRuntimeCC());
+ for (auto Pair : llvm::enumerate(Locals))
+ call->addParamAttr(Pair.index(), llvm::Attribute::get(
+ Builder.getContext(), llvm::Attribute::ElementType,
+ cast<llvm::AllocaInst>(Pair.value())->getAllocatedType()));
}
/// Emit read hazards in all the protected blocks, i.e. all the blocks
@@ -4541,14 +4470,10 @@ llvm::FunctionType *FragileHazards::GetAsmFnType() {
want to implement correct ObjC/C++ exception interactions for the
fragile ABI.
- Note that for this use of setjmp/longjmp to be correct, we may need
- to mark some local variables volatile: if a non-volatile local
- variable is modified between the setjmp and the longjmp, it has
- indeterminate value. For the purposes of LLVM IR, it may be
- sufficient to make loads and stores within the @try (to variables
- declared outside the @try) volatile. This is necessary for
- optimized correctness, but is not currently being done; this is
- being tracked as rdar://problem/8160285
+ Note that for this use of setjmp/longjmp to be correct in the presence of
+ optimization, we use inline assembly on the set of local variables to force
+ flushing locals to memory immediately before any protected calls and to
+ inhibit optimizing locals across the setjmp->catch edge.
The basic framework for a @try-catch-finally is as follows:
{
@@ -4788,9 +4713,7 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
// matched and avoid generating code for falling off the end if
// so.
bool AllMatched = false;
- for (unsigned I = 0, N = AtTryStmt->getNumCatchStmts(); I != N; ++I) {
- const ObjCAtCatchStmt *CatchStmt = AtTryStmt->getCatchStmt(I);
-
+ for (const ObjCAtCatchStmt *CatchStmt : AtTryStmt->catch_stmts()) {
const VarDecl *CatchParam = CatchStmt->getCatchParamDecl();
const ObjCObjectPointerType *OPT = nullptr;
@@ -4974,11 +4897,11 @@ void CGObjCMac::EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
llvm::Value * CGObjCMac::EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
Address AddrWeakObj) {
llvm::Type* DestTy = AddrWeakObj.getElementType();
- AddrWeakObj = CGF.Builder.CreateBitCast(AddrWeakObj,
- ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *AddrWeakObjVal = CGF.Builder.CreateBitCast(
+ AddrWeakObj.getPointer(), ObjCTypes.PtrObjectPtrTy);
llvm::Value *read_weak =
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcReadWeakFn(),
- AddrWeakObj.getPointer(), "weakread");
+ AddrWeakObjVal, "weakread");
read_weak = CGF.Builder.CreateBitCast(read_weak, DestTy);
return read_weak;
}
@@ -4997,8 +4920,9 @@ void CGObjCMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
- dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
- llvm::Value *args[] = { src, dst.getPointer() };
+ llvm::Value *dstVal =
+ CGF.Builder.CreateBitCast(dst.getPointer(), ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *args[] = { src, dstVal };
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignWeakFn(),
args, "weakassign");
}
@@ -5018,8 +4942,9 @@ void CGObjCMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
- dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
- llvm::Value *args[] = { src, dst.getPointer() };
+ llvm::Value *dstVal =
+ CGF.Builder.CreateBitCast(dst.getPointer(), ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *args[] = {src, dstVal};
if (!threadlocal)
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignGlobalFn(),
args, "globalassign");
@@ -5044,8 +4969,9 @@ void CGObjCMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
- dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
- llvm::Value *args[] = { src, dst.getPointer(), ivarOffset };
+ llvm::Value *dstVal =
+ CGF.Builder.CreateBitCast(dst.getPointer(), ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *args[] = {src, dstVal, ivarOffset};
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignIvarFn(), args);
}
@@ -5063,18 +4989,16 @@ void CGObjCMac::EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
- dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
- llvm::Value *args[] = { src, dst.getPointer() };
+ llvm::Value *dstVal =
+ CGF.Builder.CreateBitCast(dst.getPointer(), ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *args[] = {src, dstVal};
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignStrongCastFn(),
args, "strongassign");
}
void CGObjCMac::EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
- Address DestPtr,
- Address SrcPtr,
+ Address DestPtr, Address SrcPtr,
llvm::Value *size) {
- SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, ObjCTypes.Int8PtrTy);
- DestPtr = CGF.Builder.CreateBitCast(DestPtr, ObjCTypes.Int8PtrTy);
llvm::Value *args[] = { DestPtr.getPointer(), SrcPtr.getPointer(), size };
CGF.EmitNounwindRuntimeCall(ObjCTypes.GcMemmoveCollectableFn(), args);
}
@@ -5123,7 +5047,9 @@ std::string CGObjCCommonMac::GetSectionName(StringRef Section,
return ("." + Section.substr(2) + "$B").str();
case llvm::Triple::Wasm:
case llvm::Triple::GOFF:
+ case llvm::Triple::SPIRV:
case llvm::Triple::XCOFF:
+ case llvm::Triple::DXContainer:
llvm::report_fatal_error(
"Objective-C support is unimplemented for object file format");
}
@@ -5146,7 +5072,8 @@ enum ImageInfoFlags {
eImageInfo_OptimizedByDyld = (1 << 3), // This flag is set by the dyld shared cache.
// A flag indicating that the module has no instances of a @synthesize of a
- // superclass variable. <rdar://problem/6803242>
+ // superclass variable. This flag used to be consumed by the runtime to work
+ // around miscompile by gcc.
eImageInfo_CorrectedSynthesize = (1 << 4), // This flag is no longer set by clang.
eImageInfo_ImageIsSimulated = (1 << 5),
eImageInfo_ClassProperties = (1 << 6)
@@ -5259,17 +5186,17 @@ llvm::Constant *CGObjCMac::EmitModuleSymbols() {
if (ID->isWeakImported() && !IMP->isWeakImported())
DefinedClasses[i]->setLinkage(llvm::GlobalVariable::ExternalLinkage);
- array.addBitCast(DefinedClasses[i], ObjCTypes.Int8PtrTy);
+ array.add(DefinedClasses[i]);
}
for (unsigned i=0; i<NumCategories; i++)
- array.addBitCast(DefinedCategories[i], ObjCTypes.Int8PtrTy);
+ array.add(DefinedCategories[i]);
array.finishAndAddTo(values);
llvm::GlobalVariable *GV = CreateMetadataVar(
"OBJC_SYMBOLS", values, "__OBJC,__symbols,regular,no_dead_strip",
CGM.getPointerAlign(), true);
- return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.SymtabPtrTy);
+ return GV;
}
llvm::Value *CGObjCMac::EmitClassRefFromId(CodeGenFunction &CGF,
@@ -5279,13 +5206,10 @@ llvm::Value *CGObjCMac::EmitClassRefFromId(CodeGenFunction &CGF,
llvm::GlobalVariable *&Entry = ClassReferences[II];
if (!Entry) {
- llvm::Constant *Casted =
- llvm::ConstantExpr::getBitCast(GetClassName(II->getName()),
- ObjCTypes.ClassPtrTy);
- Entry = CreateMetadataVar(
- "OBJC_CLASS_REFERENCES_", Casted,
- "__OBJC,__cls_refs,literal_pointers,no_dead_strip",
- CGM.getPointerAlign(), true);
+ Entry =
+ CreateMetadataVar("OBJC_CLASS_REFERENCES_", GetClassName(II->getName()),
+ "__OBJC,__cls_refs,literal_pointers,no_dead_strip",
+ CGM.getPointerAlign(), true);
}
return CGF.Builder.CreateAlignedLoad(Entry->getValueType(), Entry,
@@ -5318,16 +5242,13 @@ Address CGObjCMac::EmitSelectorAddr(Selector Sel) {
llvm::GlobalVariable *&Entry = SelectorReferences[Sel];
if (!Entry) {
- llvm::Constant *Casted =
- llvm::ConstantExpr::getBitCast(GetMethodVarName(Sel),
- ObjCTypes.SelectorPtrTy);
Entry = CreateMetadataVar(
- "OBJC_SELECTOR_REFERENCES_", Casted,
+ "OBJC_SELECTOR_REFERENCES_", GetMethodVarName(Sel),
"__OBJC,__message_refs,literal_pointers,no_dead_strip", Align, true);
Entry->setExternallyInitialized(true);
}
- return Address(Entry, Align);
+ return Address(Entry, ObjCTypes.SelectorPtrTy, Align);
}
llvm::Constant *CGObjCCommonMac::GetClassName(StringRef RuntimeName) {
@@ -5338,12 +5259,7 @@ llvm::Constant *CGObjCCommonMac::GetClassName(StringRef RuntimeName) {
}
llvm::Function *CGObjCCommonMac::GetMethodDefinition(const ObjCMethodDecl *MD) {
- llvm::DenseMap<const ObjCMethodDecl*, llvm::Function*>::iterator
- I = MethodDefinitions.find(MD);
- if (I != MethodDefinitions.end())
- return I->second;
-
- return nullptr;
+ return MethodDefinitions.lookup(MD);
}
/// GetIvarLayoutName - Returns a unique constant for the given
@@ -5792,11 +5708,13 @@ ObjCCommonTypesHelper::ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm)
{
CodeGen::CodeGenTypes &Types = CGM.getTypes();
ASTContext &Ctx = CGM.getContext();
+ unsigned ProgramAS = CGM.getDataLayout().getProgramAddressSpace();
ShortTy = cast<llvm::IntegerType>(Types.ConvertType(Ctx.ShortTy));
IntTy = CGM.IntTy;
LongTy = cast<llvm::IntegerType>(Types.ConvertType(Ctx.LongTy));
Int8PtrTy = CGM.Int8PtrTy;
+ Int8PtrProgramASTy = llvm::PointerType::get(CGM.Int8Ty, ProgramAS);
Int8PtrPtrTy = CGM.Int8PtrPtrTy;
// arm64 targets use "int" ivar offset variables. All others,
@@ -5825,10 +5743,9 @@ ObjCCommonTypesHelper::ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm)
// id self;
// Class cls;
// }
- RecordDecl *RD = RecordDecl::Create(Ctx, TTK_Struct,
- Ctx.getTranslationUnitDecl(),
- SourceLocation(), SourceLocation(),
- &Ctx.Idents.get("_objc_super"));
+ RecordDecl *RD = RecordDecl::Create(
+ Ctx, TagTypeKind::Struct, Ctx.getTranslationUnitDecl(), SourceLocation(),
+ SourceLocation(), &Ctx.Idents.get("_objc_super"));
RD->addDecl(FieldDecl::Create(Ctx, RD, SourceLocation(), SourceLocation(),
nullptr, Ctx.getObjCIdType(), nullptr, nullptr,
false, ICIS_NoInit));
@@ -5865,7 +5782,7 @@ ObjCCommonTypesHelper::ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm)
// char *_imp;
// }
MethodTy = llvm::StructType::create("struct._objc_method", SelectorPtrTy,
- Int8PtrTy, Int8PtrTy);
+ Int8PtrTy, Int8PtrProgramASTy);
// struct _objc_cache *
CacheTy = llvm::StructType::create(VMContext, "struct._objc_cache");
@@ -6178,10 +6095,9 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
// };
// First the clang type for struct _message_ref_t
- RecordDecl *RD = RecordDecl::Create(Ctx, TTK_Struct,
- Ctx.getTranslationUnitDecl(),
- SourceLocation(), SourceLocation(),
- &Ctx.Idents.get("_message_ref_t"));
+ RecordDecl *RD = RecordDecl::Create(
+ Ctx, TagTypeKind::Struct, Ctx.getTranslationUnitDecl(), SourceLocation(),
+ SourceLocation(), &Ctx.Idents.get("_message_ref_t"));
RD->addDecl(FieldDecl::Create(Ctx, RD, SourceLocation(), SourceLocation(),
nullptr, Ctx.VoidPtrTy, nullptr, nullptr, false,
ICIS_NoInit));
@@ -6236,8 +6152,8 @@ void CGObjCNonFragileABIMac::AddModuleClassList(
SmallVector<llvm::Constant*, 8> Symbols(NumClasses);
for (unsigned i=0; i<NumClasses; i++)
- Symbols[i] = llvm::ConstantExpr::getBitCast(Container[i],
- ObjCTypes.Int8PtrTy);
+ Symbols[i] = Container[i];
+
llvm::Constant *Init =
llvm::ConstantArray::get(llvm::ArrayType::get(ObjCTypes.Int8PtrTy,
Symbols.size()),
@@ -6246,13 +6162,12 @@ void CGObjCNonFragileABIMac::AddModuleClassList(
// Section name is obtained by calling GetSectionName, which returns
// sections in the __DATA segment on MachO.
assert((!CGM.getTriple().isOSBinFormatMachO() ||
- SectionName.startswith("__DATA")) &&
+ SectionName.starts_with("__DATA")) &&
"SectionName expected to start with __DATA on MachO");
llvm::GlobalVariable *GV = new llvm::GlobalVariable(
CGM.getModule(), Init->getType(), false,
llvm::GlobalValue::PrivateLinkage, Init, SymbolName);
- GV->setAlignment(
- llvm::Align(CGM.getDataLayout().getABITypeAlignment(Init->getType())));
+ GV->setAlignment(CGM.getDataLayout().getABITypeAlign(Init->getType()));
GV->setSection(SectionName);
CGM.addCompilerUsedGlobal(GV);
}
@@ -6484,8 +6399,7 @@ CGObjCNonFragileABIMac::BuildClassObject(const ObjCInterfaceDecl *CI,
if (CGM.getTriple().isOSBinFormatMachO())
GV->setSection("__DATA, __objc_data");
- GV->setAlignment(llvm::Align(
- CGM.getDataLayout().getABITypeAlignment(ObjCTypes.ClassnfABITy)));
+ GV->setAlignment(CGM.getDataLayout().getABITypeAlign(ObjCTypes.ClassnfABITy));
if (!CGM.getTriple().isOSBinFormatCOFF())
if (HiddenVisibility)
GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
@@ -6675,9 +6589,7 @@ llvm::Value *CGObjCNonFragileABIMac::GenerateProtocolRef(CodeGenFunction &CGF,
// of protocol's meta-data (not a reference to it!)
assert(!PD->isNonRuntimeProtocol() &&
"attempting to get a protocol ref to a static protocol.");
- llvm::Constant *Init =
- llvm::ConstantExpr::getBitCast(GetOrEmitProtocol(PD),
- ObjCTypes.getExternalProtocolPtrTy());
+ llvm::Constant *Init = GetOrEmitProtocol(PD);
std::string ProtocolName("_OBJC_PROTOCOL_REFERENCE_$_");
ProtocolName += PD->getObjCRuntimeNameAsString();
@@ -6741,33 +6653,53 @@ void CGObjCNonFragileABIMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
}
}
- values.add(emitMethodList(listName, MethodListType::CategoryInstanceMethods,
- instanceMethods));
- values.add(emitMethodList(listName, MethodListType::CategoryClassMethods,
- classMethods));
+ auto instanceMethodList = emitMethodList(
+ listName, MethodListType::CategoryInstanceMethods, instanceMethods);
+ auto classMethodList = emitMethodList(
+ listName, MethodListType::CategoryClassMethods, classMethods);
+ values.add(instanceMethodList);
+ values.add(classMethodList);
+ // Keep track of whether we have actual metadata to emit.
+ bool isEmptyCategory =
+ instanceMethodList->isNullValue() && classMethodList->isNullValue();
const ObjCCategoryDecl *Category =
- Interface->FindCategoryDeclaration(OCD->getIdentifier());
+ Interface->FindCategoryDeclaration(OCD->getIdentifier());
if (Category) {
SmallString<256> ExtName;
- llvm::raw_svector_ostream(ExtName) << Interface->getObjCRuntimeNameAsString() << "_$_"
- << OCD->getName();
- values.add(EmitProtocolList("_OBJC_CATEGORY_PROTOCOLS_$_"
- + Interface->getObjCRuntimeNameAsString() + "_$_"
- + Category->getName(),
- Category->protocol_begin(),
- Category->protocol_end()));
- values.add(EmitPropertyList("_OBJC_$_PROP_LIST_" + ExtName.str(),
- OCD, Category, ObjCTypes, false));
- values.add(EmitPropertyList("_OBJC_$_CLASS_PROP_LIST_" + ExtName.str(),
- OCD, Category, ObjCTypes, true));
+ llvm::raw_svector_ostream(ExtName)
+ << Interface->getObjCRuntimeNameAsString() << "_$_" << OCD->getName();
+ auto protocolList =
+ EmitProtocolList("_OBJC_CATEGORY_PROTOCOLS_$_" +
+ Interface->getObjCRuntimeNameAsString() + "_$_" +
+ Category->getName(),
+ Category->protocol_begin(), Category->protocol_end());
+ auto propertyList = EmitPropertyList("_OBJC_$_PROP_LIST_" + ExtName.str(),
+ OCD, Category, ObjCTypes, false);
+ auto classPropertyList =
+ EmitPropertyList("_OBJC_$_CLASS_PROP_LIST_" + ExtName.str(), OCD,
+ Category, ObjCTypes, true);
+ values.add(protocolList);
+ values.add(propertyList);
+ values.add(classPropertyList);
+ isEmptyCategory &= protocolList->isNullValue() &&
+ propertyList->isNullValue() &&
+ classPropertyList->isNullValue();
} else {
values.addNullPointer(ObjCTypes.ProtocolListnfABIPtrTy);
values.addNullPointer(ObjCTypes.PropertyListPtrTy);
values.addNullPointer(ObjCTypes.PropertyListPtrTy);
}
- unsigned Size = CGM.getDataLayout().getTypeAllocSize(ObjCTypes.CategorynfABITy);
+ if (isEmptyCategory) {
+ // Empty category, don't emit any metadata.
+ values.abandon();
+ MethodDefinitions.clear();
+ return;
+ }
+
+ unsigned Size =
+ CGM.getDataLayout().getTypeAllocSize(ObjCTypes.CategorynfABITy);
values.addInt(ObjCTypes.IntTy, Size);
llvm::GlobalVariable *GCATV =
@@ -6798,17 +6730,16 @@ void CGObjCNonFragileABIMac::emitMethodConstant(ConstantArrayBuilder &builder,
const ObjCMethodDecl *MD,
bool forProtocol) {
auto method = builder.beginStruct(ObjCTypes.MethodTy);
- method.addBitCast(GetMethodVarName(MD->getSelector()),
- ObjCTypes.SelectorPtrTy);
+ method.add(GetMethodVarName(MD->getSelector()));
method.add(GetMethodVarType(MD));
if (forProtocol) {
// Protocol methods have no implementation. So, this entry is always NULL.
- method.addNullPointer(ObjCTypes.Int8PtrTy);
+ method.addNullPointer(ObjCTypes.Int8PtrProgramASTy);
} else {
llvm::Function *fn = GetMethodDefinition(MD);
assert(fn && "no definition for method?");
- method.addBitCast(fn, ObjCTypes.Int8PtrTy);
+ method.add(fn);
}
method.finishAndAddTo(builder);
@@ -6882,7 +6813,7 @@ CGObjCNonFragileABIMac::emitMethodList(Twine name, MethodListType kind,
llvm::GlobalVariable *GV = finishAndCreateGlobal(values, prefix + name, CGM);
CGM.addCompilerUsedGlobal(GV);
- return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.MethodListnfABIPtrTy);
+ return GV;
}
/// ObjCIvarOffsetVariable - Returns the ivar offset variable for
@@ -6926,8 +6857,8 @@ CGObjCNonFragileABIMac::EmitIvarOffsetVar(const ObjCInterfaceDecl *ID,
llvm::GlobalVariable *IvarOffsetGV = ObjCIvarOffsetVariable(ID, Ivar);
IvarOffsetGV->setInitializer(
llvm::ConstantInt::get(ObjCTypes.IvarOffsetVarTy, Offset));
- IvarOffsetGV->setAlignment(llvm::Align(
- CGM.getDataLayout().getABITypeAlignment(ObjCTypes.IvarOffsetVarTy)));
+ IvarOffsetGV->setAlignment(
+ CGM.getDataLayout().getABITypeAlign(ObjCTypes.IvarOffsetVarTy));
if (!CGM.getTriple().isOSBinFormatCOFF()) {
// FIXME: This matches gcc, but shouldn't the visibility be set on the use
@@ -7024,7 +6955,7 @@ llvm::Constant *CGObjCNonFragileABIMac::EmitIvarList(
llvm::GlobalVariable *GV = finishAndCreateGlobal(
ivarList, Prefix + OID->getObjCRuntimeNameAsString(), CGM);
CGM.addCompilerUsedGlobal(GV);
- return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.IvarListnfABIPtrTy);
+ return GV;
}
llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocolRef(
@@ -7155,8 +7086,8 @@ llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocol(
ProtocolRef);
if (!CGM.getTriple().isOSBinFormatMachO())
PTGV->setComdat(CGM.getModule().getOrInsertComdat(ProtocolRef));
- PTGV->setAlignment(llvm::Align(
- CGM.getDataLayout().getABITypeAlignment(ObjCTypes.ProtocolnfABIPtrTy)));
+ PTGV->setAlignment(
+ CGM.getDataLayout().getABITypeAlign(ObjCTypes.ProtocolnfABIPtrTy));
PTGV->setSection(GetSectionName("__objc_protolist",
"coalesced,no_dead_strip"));
PTGV->setVisibility(llvm::GlobalValue::HiddenVisibility);
@@ -7198,7 +7129,7 @@ CGObjCNonFragileABIMac::EmitProtocolList(Twine Name,
llvm::GlobalVariable *GV =
CGM.getModule().getGlobalVariable(TmpName.str(), true);
if (GV)
- return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.ProtocolListnfABIPtrTy);
+ return GV;
ConstantInitBuilder builder(CGM);
auto values = builder.beginStruct();
@@ -7216,8 +7147,7 @@ CGObjCNonFragileABIMac::EmitProtocolList(Twine Name,
GV = finishAndCreateGlobal(values, Name, CGM);
CGM.addCompilerUsedGlobal(GV);
- return llvm::ConstantExpr::getBitCast(GV,
- ObjCTypes.ProtocolListnfABIPtrTy);
+ return GV;
}
/// EmitObjCValueForIvar - Code Gen for nonfragile ivar reference.
@@ -7254,8 +7184,8 @@ CGObjCNonFragileABIMac::EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
CGF.getSizeAlign(), "ivar");
if (IsIvarOffsetKnownIdempotent(CGF, Ivar))
cast<llvm::LoadInst>(IvarOffsetValue)
- ->setMetadata(CGM.getModule().getMDKindID("invariant.load"),
- llvm::MDNode::get(VMContext, None));
+ ->setMetadata(llvm::LLVMContext::MD_invariant_load,
+ llvm::MDNode::get(VMContext, std::nullopt));
}
// This could be 32bit int or 64bit integer depending on the architecture.
@@ -7384,7 +7314,7 @@ CGObjCNonFragileABIMac::EmitVTableMessageSend(CodeGenFunction &CGF,
Address mref =
Address(CGF.Builder.CreateBitCast(messageRef, ObjCTypes.MessageRefPtrTy),
- CGF.getPointerAlign());
+ ObjCTypes.MessageRefTy, CGF.getPointerAlign());
// Update the message ref argument.
args[1].setRValue(RValue::get(mref.getPointer()));
@@ -7443,7 +7373,7 @@ CGObjCNonFragileABIMac::GetClassGlobal(StringRef Name,
: llvm::GlobalValue::ExternalLinkage;
llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name);
- if (!GV || GV->getType() != ObjCTypes.ClassnfABITy->getPointerTo()) {
+ if (!GV || GV->getValueType() != ObjCTypes.ClassnfABITy) {
auto *NewGV = new llvm::GlobalVariable(ObjCTypes.ClassnfABITy, false, L,
nullptr, Name);
@@ -7451,12 +7381,11 @@ CGObjCNonFragileABIMac::GetClassGlobal(StringRef Name,
NewGV->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
if (GV) {
- GV->replaceAllUsesWith(
- llvm::ConstantExpr::getBitCast(NewGV, GV->getType()));
+ GV->replaceAllUsesWith(NewGV);
GV->eraseFromParent();
}
GV = NewGV;
- CGM.getModule().getGlobalList().push_back(GV);
+ CGM.getModule().insertGlobalVariable(GV);
}
assert(GV->getLinkage() == L);
@@ -7654,8 +7583,8 @@ llvm::Value *CGObjCNonFragileABIMac::EmitSelector(CodeGenFunction &CGF,
Address Addr = EmitSelectorAddr(Sel);
llvm::LoadInst* LI = CGF.Builder.CreateLoad(Addr);
- LI->setMetadata(CGM.getModule().getMDKindID("invariant.load"),
- llvm::MDNode::get(VMContext, None));
+ LI->setMetadata(llvm::LLVMContext::MD_invariant_load,
+ llvm::MDNode::get(VMContext, std::nullopt));
return LI;
}
@@ -7663,14 +7592,11 @@ Address CGObjCNonFragileABIMac::EmitSelectorAddr(Selector Sel) {
llvm::GlobalVariable *&Entry = SelectorReferences[Sel];
CharUnits Align = CGM.getPointerAlign();
if (!Entry) {
- llvm::Constant *Casted =
- llvm::ConstantExpr::getBitCast(GetMethodVarName(Sel),
- ObjCTypes.SelectorPtrTy);
std::string SectionName =
GetSectionName("__objc_selrefs", "literal_pointers,no_dead_strip");
Entry = new llvm::GlobalVariable(
CGM.getModule(), ObjCTypes.SelectorPtrTy, false,
- getLinkageTypeForObjCMetadata(CGM, SectionName), Casted,
+ getLinkageTypeForObjCMetadata(CGM, SectionName), GetMethodVarName(Sel),
"OBJC_SELECTOR_REFERENCES_");
Entry->setExternallyInitialized(true);
Entry->setSection(SectionName);
@@ -7678,7 +7604,7 @@ Address CGObjCNonFragileABIMac::EmitSelectorAddr(Selector Sel) {
CGM.addCompilerUsedGlobal(Entry);
}
- return Address(Entry, Align);
+ return Address(Entry, ObjCTypes.SelectorPtrTy, Align);
}
/// EmitObjCIvarAssign - Code gen for assigning to a __strong object.
@@ -7697,8 +7623,9 @@ void CGObjCNonFragileABIMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
- dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
- llvm::Value *args[] = { src, dst.getPointer(), ivarOffset };
+ llvm::Value *dstVal =
+ CGF.Builder.CreateBitCast(dst.getPointer(), ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *args[] = {src, dstVal, ivarOffset};
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignIvarFn(), args);
}
@@ -7717,19 +7644,16 @@ void CGObjCNonFragileABIMac::EmitObjCStrongCastAssign(
src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
- dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
- llvm::Value *args[] = { src, dst.getPointer() };
+ llvm::Value *dstVal =
+ CGF.Builder.CreateBitCast(dst.getPointer(), ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *args[] = {src, dstVal};
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignStrongCastFn(),
args, "weakassign");
}
void CGObjCNonFragileABIMac::EmitGCMemmoveCollectable(
- CodeGen::CodeGenFunction &CGF,
- Address DestPtr,
- Address SrcPtr,
- llvm::Value *Size) {
- SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, ObjCTypes.Int8PtrTy);
- DestPtr = CGF.Builder.CreateBitCast(DestPtr, ObjCTypes.Int8PtrTy);
+ CodeGen::CodeGenFunction &CGF, Address DestPtr, Address SrcPtr,
+ llvm::Value *Size) {
llvm::Value *args[] = { DestPtr.getPointer(), SrcPtr.getPointer(), Size };
CGF.EmitNounwindRuntimeCall(ObjCTypes.GcMemmoveCollectableFn(), args);
}
@@ -7741,10 +7665,11 @@ llvm::Value * CGObjCNonFragileABIMac::EmitObjCWeakRead(
CodeGen::CodeGenFunction &CGF,
Address AddrWeakObj) {
llvm::Type *DestTy = AddrWeakObj.getElementType();
- AddrWeakObj = CGF.Builder.CreateBitCast(AddrWeakObj, ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *AddrWeakObjVal = CGF.Builder.CreateBitCast(
+ AddrWeakObj.getPointer(), ObjCTypes.PtrObjectPtrTy);
llvm::Value *read_weak =
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcReadWeakFn(),
- AddrWeakObj.getPointer(), "weakread");
+ AddrWeakObjVal, "weakread");
read_weak = CGF.Builder.CreateBitCast(read_weak, DestTy);
return read_weak;
}
@@ -7763,8 +7688,9 @@ void CGObjCNonFragileABIMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
- dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
- llvm::Value *args[] = { src, dst.getPointer() };
+ llvm::Value *dstVal =
+ CGF.Builder.CreateBitCast(dst.getPointer(), ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *args[] = {src, dstVal};
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignWeakFn(),
args, "weakassign");
}
@@ -7784,8 +7710,9 @@ void CGObjCNonFragileABIMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
- dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
- llvm::Value *args[] = { src, dst.getPointer() };
+ llvm::Value *dstVal =
+ CGF.Builder.CreateBitCast(dst.getPointer(), ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *args[] = {src, dstVal};
if (!threadlocal)
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignGlobalFn(),
args, "globalassign");
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGObjCRuntime.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGObjCRuntime.cpp
index 108f6fc7ba60..424564f97599 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGObjCRuntime.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGObjCRuntime.cpp
@@ -22,6 +22,7 @@
#include "clang/AST/StmtObjC.h"
#include "clang/CodeGen/CGFunctionInfo.h"
#include "clang/CodeGen/CodeGenABITypes.h"
+#include "llvm/IR/Instruction.h"
#include "llvm/Support/SaveAndRestore.h"
using namespace clang;
@@ -62,12 +63,10 @@ LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
CGF.CGM.getContext().getObjCObjectPointerType(InterfaceTy);
QualType IvarTy =
Ivar->getUsageType(ObjectPtrTy).withCVRQualifiers(CVRQualifiers);
- llvm::Type *LTy = CGF.CGM.getTypes().ConvertTypeForMem(IvarTy);
- llvm::Value *V = CGF.Builder.CreateBitCast(BaseValue, CGF.Int8PtrTy);
+ llvm::Value *V = BaseValue;
V = CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, V, Offset, "add.ptr");
if (!Ivar->isBitField()) {
- V = CGF.Builder.CreateBitCast(V, llvm::PointerType::getUnqual(LTy));
LValue LV = CGF.MakeNaturalAlignAddrLValue(V, IvarTy);
return LV;
}
@@ -106,10 +105,10 @@ LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
CGF.CGM.getContext().toBits(StorageSize),
CharUnits::fromQuantity(0)));
- Address Addr(V, Alignment);
- Addr = CGF.Builder.CreateElementBitCast(Addr,
- llvm::Type::getIntNTy(CGF.getLLVMContext(),
- Info->StorageSize));
+ Address Addr =
+ Address(V, llvm::Type::getIntNTy(CGF.getLLVMContext(), Info->StorageSize),
+ Alignment);
+
return LValue::MakeBitfield(Addr, *Info, IvarTy,
LValueBaseInfo(AlignmentSource::Decl),
TBAAAccessInfo());
@@ -163,8 +162,7 @@ void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF,
// Enter the catch, if there is one.
if (S.getNumCatchStmts()) {
- for (unsigned I = 0, N = S.getNumCatchStmts(); I != N; ++I) {
- const ObjCAtCatchStmt *CatchStmt = S.getCatchStmt(I);
+ for (const ObjCAtCatchStmt *CatchStmt : S.catch_stmts()) {
const VarDecl *CatchDecl = CatchStmt->getCatchParamDecl();
Handlers.push_back(CatchHandler());
@@ -228,13 +226,18 @@ void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF,
CatchHandler &Handler = Handlers[I];
CGF.EmitBlock(Handler.Block);
- llvm::CatchPadInst *CPI = nullptr;
- SaveAndRestore<llvm::Instruction *> RestoreCurrentFuncletPad(CGF.CurrentFuncletPad);
- if (useFunclets)
- if ((CPI = dyn_cast_or_null<llvm::CatchPadInst>(Handler.Block->getFirstNonPHI()))) {
+
+ CodeGenFunction::LexicalScope Cleanups(CGF, Handler.Body->getSourceRange());
+ SaveAndRestore RevertAfterScope(CGF.CurrentFuncletPad);
+ if (useFunclets) {
+ llvm::Instruction *CPICandidate = Handler.Block->getFirstNonPHI();
+ if (auto *CPI = dyn_cast_or_null<llvm::CatchPadInst>(CPICandidate)) {
CGF.CurrentFuncletPad = CPI;
CPI->setOperand(2, CGF.getExceptionSlot().getPointer());
+ CGF.EHStack.pushCleanup<CatchRetScope>(NormalCleanup, CPI);
}
+ }
+
llvm::Value *RawExn = CGF.getExceptionFromSlot();
// Enter the catch.
@@ -242,8 +245,6 @@ void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF,
if (beginCatchFn)
Exn = CGF.EmitNounwindRuntimeCall(beginCatchFn, RawExn, "exn.adjusted");
- CodeGenFunction::LexicalScope cleanups(CGF, Handler.Body->getSourceRange());
-
if (endCatchFn) {
// Add a cleanup to leave the catch.
bool EndCatchMightThrow = (Handler.Variable == nullptr);
@@ -261,15 +262,13 @@ void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF,
CGF.EmitAutoVarDecl(*CatchParam);
EmitInitOfCatchParam(CGF, CastExn, CatchParam);
}
- if (CPI)
- CGF.EHStack.pushCleanup<CatchRetScope>(NormalCleanup, CPI);
CGF.ObjCEHValueStack.push_back(Exn);
CGF.EmitStmt(Handler.Body);
CGF.ObjCEHValueStack.pop_back();
// Leave any cleanups associated with the catch.
- cleanups.ForceCleanup();
+ Cleanups.ForceCleanup();
CGF.EmitBranchThroughCleanup(Cont);
}
@@ -294,7 +293,7 @@ void CGObjCRuntime::EmitInitOfCatchParam(CodeGenFunction &CGF,
switch (paramDecl->getType().getQualifiers().getObjCLifetime()) {
case Qualifiers::OCL_Strong:
exn = CGF.EmitARCRetainNonBlock(exn);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Qualifiers::OCL_None:
case Qualifiers::OCL_ExplicitNone:
@@ -361,14 +360,16 @@ CGObjCRuntime::MessageSendInfo
CGObjCRuntime::getMessageSendInfo(const ObjCMethodDecl *method,
QualType resultType,
CallArgList &callArgs) {
+ unsigned ProgramAS = CGM.getDataLayout().getProgramAddressSpace();
+
+ llvm::PointerType *signatureType =
+ llvm::PointerType::get(CGM.getLLVMContext(), ProgramAS);
+
// If there's a method, use information from that.
if (method) {
const CGFunctionInfo &signature =
CGM.getTypes().arrangeObjCMessageSendSignature(method, callArgs[0].Ty);
- llvm::PointerType *signatureType =
- CGM.getTypes().GetFunctionType(signature)->getPointerTo();
-
const CGFunctionInfo &signatureForCall =
CGM.getTypes().arrangeCall(signature, callArgs);
@@ -379,12 +380,86 @@ CGObjCRuntime::getMessageSendInfo(const ObjCMethodDecl *method,
const CGFunctionInfo &argsInfo =
CGM.getTypes().arrangeUnprototypedObjCMessageSend(resultType, callArgs);
- // Derive the signature to call from that.
- llvm::PointerType *signatureType =
- CGM.getTypes().GetFunctionType(argsInfo)->getPointerTo();
return MessageSendInfo(argsInfo, signatureType);
}
+bool CGObjCRuntime::canMessageReceiverBeNull(CodeGenFunction &CGF,
+ const ObjCMethodDecl *method,
+ bool isSuper,
+ const ObjCInterfaceDecl *classReceiver,
+ llvm::Value *receiver) {
+ // Super dispatch assumes that self is non-null; even the messenger
+ // doesn't have a null check internally.
+ if (isSuper)
+ return false;
+
+ // If this is a direct dispatch of a class method, check whether the class,
+ // or anything in its hierarchy, was weak-linked.
+ if (classReceiver && method && method->isClassMethod())
+ return isWeakLinkedClass(classReceiver);
+
+ // If we're emitting a method, and self is const (meaning just ARC, for now),
+ // and the receiver is a load of self, then self is a valid object.
+ if (auto curMethod =
+ dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl)) {
+ auto self = curMethod->getSelfDecl();
+ if (self->getType().isConstQualified()) {
+ if (auto LI = dyn_cast<llvm::LoadInst>(receiver->stripPointerCasts())) {
+ llvm::Value *selfAddr = CGF.GetAddrOfLocalVar(self).getPointer();
+ if (selfAddr == LI->getPointerOperand()) {
+ return false;
+ }
+ }
+ }
+ }
+
+ // Otherwise, assume it can be null.
+ return true;
+}
+
+bool CGObjCRuntime::isWeakLinkedClass(const ObjCInterfaceDecl *ID) {
+ do {
+ if (ID->isWeakImported())
+ return true;
+ } while ((ID = ID->getSuperClass()));
+
+ return false;
+}
+
+void CGObjCRuntime::destroyCalleeDestroyedArguments(CodeGenFunction &CGF,
+ const ObjCMethodDecl *method,
+ const CallArgList &callArgs) {
+ CallArgList::const_iterator I = callArgs.begin();
+ for (auto i = method->param_begin(), e = method->param_end();
+ i != e; ++i, ++I) {
+ const ParmVarDecl *param = (*i);
+ if (param->hasAttr<NSConsumedAttr>()) {
+ RValue RV = I->getRValue(CGF);
+ assert(RV.isScalar() &&
+ "NullReturnState::complete - arg not on object");
+ CGF.EmitARCRelease(RV.getScalarVal(), ARCImpreciseLifetime);
+ } else {
+ QualType QT = param->getType();
+ auto *RT = QT->getAs<RecordType>();
+ if (RT && RT->getDecl()->isParamDestroyedInCallee()) {
+ RValue RV = I->getRValue(CGF);
+ QualType::DestructionKind DtorKind = QT.isDestructedType();
+ switch (DtorKind) {
+ case QualType::DK_cxx_destructor:
+ CGF.destroyCXXObject(CGF, RV.getAggregateAddress(), QT);
+ break;
+ case QualType::DK_nontrivial_c_struct:
+ CGF.destroyNonTrivialCStruct(CGF, RV.getAggregateAddress(), QT);
+ break;
+ default:
+ llvm_unreachable("unexpected dtor kind");
+ break;
+ }
+ }
+ }
+ }
+}
+
llvm::Constant *
clang::CodeGen::emitObjCProtocolObject(CodeGenModule &CGM,
const ObjCProtocolDecl *protocol) {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGObjCRuntime.h b/contrib/llvm-project/clang/lib/CodeGen/CGObjCRuntime.h
index f56101df77b6..3bd981256f47 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGObjCRuntime.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGObjCRuntime.h
@@ -34,7 +34,8 @@ namespace llvm {
namespace clang {
namespace CodeGen {
- class CodeGenFunction;
+class CGFunctionInfo;
+class CodeGenFunction;
}
class FieldDecl;
@@ -337,6 +338,23 @@ public:
MessageSendInfo getMessageSendInfo(const ObjCMethodDecl *method,
QualType resultType,
CallArgList &callArgs);
+ bool canMessageReceiverBeNull(CodeGenFunction &CGF,
+ const ObjCMethodDecl *method,
+ bool isSuper,
+ const ObjCInterfaceDecl *classReceiver,
+ llvm::Value *receiver);
+ static bool isWeakLinkedClass(const ObjCInterfaceDecl *cls);
+
+ /// Destroy the callee-destroyed arguments of the given method,
+ /// if it has any. Used for nil-receiver paths in message sends.
+ /// Never does anything if the method does not satisfy
+ /// hasParamDestroyedInCallee().
+ ///
+ /// \param callArgs - just the formal arguments, not including implicit
+ /// arguments such as self and cmd
+ static void destroyCalleeDestroyedArguments(CodeGenFunction &CGF,
+ const ObjCMethodDecl *method,
+ const CallArgList &callArgs);
// FIXME: This probably shouldn't be here, but the code to compute
// it is here.
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGOpenCLRuntime.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGOpenCLRuntime.cpp
index dbe375294d17..115b618056a4 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGOpenCLRuntime.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGOpenCLRuntime.cpp
@@ -31,45 +31,28 @@ void CGOpenCLRuntime::EmitWorkGroupLocalVarDecl(CodeGenFunction &CGF,
}
llvm::Type *CGOpenCLRuntime::convertOpenCLSpecificType(const Type *T) {
- assert(T->isOpenCLSpecificType() &&
- "Not an OpenCL specific type!");
+ assert(T->isOpenCLSpecificType() && "Not an OpenCL specific type!");
- llvm::LLVMContext& Ctx = CGM.getLLVMContext();
+ // Check if the target has a specific translation for this type first.
+ if (llvm::Type *TransTy = CGM.getTargetCodeGenInfo().getOpenCLType(CGM, T))
+ return TransTy;
+
+ if (T->isSamplerT())
+ return getSamplerType(T);
+
+ return getPointerType(T);
+}
+
+llvm::PointerType *CGOpenCLRuntime::getPointerType(const Type *T) {
uint32_t AddrSpc = CGM.getContext().getTargetAddressSpace(
CGM.getContext().getOpenCLTypeAddrSpace(T));
- switch (cast<BuiltinType>(T)->getKind()) {
- default:
- llvm_unreachable("Unexpected opencl builtin type!");
- return nullptr;
-#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
- case BuiltinType::Id: \
- return llvm::PointerType::get( \
- llvm::StructType::create(Ctx, "opencl." #ImgType "_" #Suffix "_t"), \
- AddrSpc);
-#include "clang/Basic/OpenCLImageTypes.def"
- case BuiltinType::OCLSampler:
- return getSamplerType(T);
- case BuiltinType::OCLEvent:
- return llvm::PointerType::get(
- llvm::StructType::create(Ctx, "opencl.event_t"), AddrSpc);
- case BuiltinType::OCLClkEvent:
- return llvm::PointerType::get(
- llvm::StructType::create(Ctx, "opencl.clk_event_t"), AddrSpc);
- case BuiltinType::OCLQueue:
- return llvm::PointerType::get(
- llvm::StructType::create(Ctx, "opencl.queue_t"), AddrSpc);
- case BuiltinType::OCLReserveID:
- return llvm::PointerType::get(
- llvm::StructType::create(Ctx, "opencl.reserve_id_t"), AddrSpc);
-#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
- case BuiltinType::Id: \
- return llvm::PointerType::get( \
- llvm::StructType::create(Ctx, "opencl." #ExtType), AddrSpc);
-#include "clang/Basic/OpenCLExtensionTypes.def"
- }
+ return llvm::PointerType::get(CGM.getLLVMContext(), AddrSpc);
}
llvm::Type *CGOpenCLRuntime::getPipeType(const PipeType *T) {
+ if (llvm::Type *PipeTy = CGM.getTargetCodeGenInfo().getOpenCLType(CGM, T))
+ return PipeTy;
+
if (T->isReadOnly())
return getPipeType(T, "opencl.pipe_ro_t", PipeROTy);
else
@@ -79,19 +62,19 @@ llvm::Type *CGOpenCLRuntime::getPipeType(const PipeType *T) {
llvm::Type *CGOpenCLRuntime::getPipeType(const PipeType *T, StringRef Name,
llvm::Type *&PipeTy) {
if (!PipeTy)
- PipeTy = llvm::PointerType::get(llvm::StructType::create(
- CGM.getLLVMContext(), Name),
- CGM.getContext().getTargetAddressSpace(
- CGM.getContext().getOpenCLTypeAddrSpace(T)));
+ PipeTy = getPointerType(T);
return PipeTy;
}
-llvm::PointerType *CGOpenCLRuntime::getSamplerType(const Type *T) {
- if (!SamplerTy)
- SamplerTy = llvm::PointerType::get(llvm::StructType::create(
- CGM.getLLVMContext(), "opencl.sampler_t"),
- CGM.getContext().getTargetAddressSpace(
- CGM.getContext().getOpenCLTypeAddrSpace(T)));
+llvm::Type *CGOpenCLRuntime::getSamplerType(const Type *T) {
+ if (SamplerTy)
+ return SamplerTy;
+
+ if (llvm::Type *TransTy = CGM.getTargetCodeGenInfo().getOpenCLType(
+ CGM, CGM.getContext().OCLSamplerTy.getTypePtr()))
+ SamplerTy = TransTy;
+ else
+ SamplerTy = getPointerType(T);
return SamplerTy;
}
@@ -117,7 +100,7 @@ llvm::Value *CGOpenCLRuntime::getPipeElemAlign(const Expr *PipeArg) {
llvm::PointerType *CGOpenCLRuntime::getGenericVoidPointerType() {
assert(CGM.getLangOpts().OpenCL);
- return llvm::IntegerType::getInt8PtrTy(
+ return llvm::PointerType::get(
CGM.getLLVMContext(),
CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
}
@@ -143,14 +126,14 @@ static const BlockExpr *getBlockExpr(const Expr *E) {
/// corresponding block expression.
void CGOpenCLRuntime::recordBlockInfo(const BlockExpr *E,
llvm::Function *InvokeF,
- llvm::Value *Block) {
- assert(EnqueuedBlockMap.find(E) == EnqueuedBlockMap.end() &&
- "Block expression emitted twice");
+ llvm::Value *Block, llvm::Type *BlockTy) {
+ assert(!EnqueuedBlockMap.contains(E) && "Block expression emitted twice");
assert(isa<llvm::Function>(InvokeF) && "Invalid invoke function");
assert(Block->getType()->isPointerTy() && "Invalid block literal type");
EnqueuedBlockMap[E].InvokeFunc = InvokeF;
EnqueuedBlockMap[E].BlockArg = Block;
- EnqueuedBlockMap[E].Kernel = nullptr;
+ EnqueuedBlockMap[E].BlockTy = BlockTy;
+ EnqueuedBlockMap[E].KernelHandle = nullptr;
}
llvm::Function *CGOpenCLRuntime::getInvokeFunction(const Expr *E) {
@@ -165,22 +148,17 @@ CGOpenCLRuntime::emitOpenCLEnqueuedBlock(CodeGenFunction &CGF, const Expr *E) {
// to get the block literal.
const BlockExpr *Block = getBlockExpr(E);
- assert(EnqueuedBlockMap.find(Block) != EnqueuedBlockMap.end() &&
- "Block expression not emitted");
+ assert(EnqueuedBlockMap.contains(Block) && "Block expression not emitted");
// Do not emit the block wrapper again if it has been emitted.
- if (EnqueuedBlockMap[Block].Kernel) {
+ if (EnqueuedBlockMap[Block].KernelHandle) {
return EnqueuedBlockMap[Block];
}
auto *F = CGF.getTargetHooks().createEnqueuedBlockKernel(
- CGF, EnqueuedBlockMap[Block].InvokeFunc,
- EnqueuedBlockMap[Block].BlockArg->stripPointerCasts());
+ CGF, EnqueuedBlockMap[Block].InvokeFunc, EnqueuedBlockMap[Block].BlockTy);
// The common part of the post-processing of the kernel goes here.
- F->addFnAttr(llvm::Attribute::NoUnwind);
- F->setCallingConv(
- CGF.getTypes().ClangCallConvToLLVMCallConv(CallingConv::CC_OpenCLKernel));
- EnqueuedBlockMap[Block].Kernel = F;
+ EnqueuedBlockMap[Block].KernelHandle = F;
return EnqueuedBlockMap[Block];
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGOpenCLRuntime.h b/contrib/llvm-project/clang/lib/CodeGen/CGOpenCLRuntime.h
index 3f7aa9b0d8dc..34613c3516f3 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGOpenCLRuntime.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGOpenCLRuntime.h
@@ -18,6 +18,7 @@
#include "clang/AST/Expr.h"
#include "clang/AST/Type.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringMap.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Value.h"
@@ -37,19 +38,21 @@ protected:
CodeGenModule &CGM;
llvm::Type *PipeROTy;
llvm::Type *PipeWOTy;
- llvm::PointerType *SamplerTy;
+ llvm::Type *SamplerTy;
/// Structure for enqueued block information.
struct EnqueuedBlockInfo {
llvm::Function *InvokeFunc; /// Block invoke function.
- llvm::Function *Kernel; /// Enqueued block kernel.
+ llvm::Value *KernelHandle; /// Enqueued block kernel reference.
llvm::Value *BlockArg; /// The first argument to enqueued block kernel.
+ llvm::Type *BlockTy; /// Type of the block argument.
};
/// Maps block expression to block information.
llvm::DenseMap<const Expr *, EnqueuedBlockInfo> EnqueuedBlockMap;
virtual llvm::Type *getPipeType(const PipeType *T, StringRef Name,
llvm::Type *&PipeTy);
+ llvm::PointerType *getPointerType(const Type *T);
public:
CGOpenCLRuntime(CodeGenModule &CGM) : CGM(CGM),
@@ -66,7 +69,7 @@ public:
virtual llvm::Type *getPipeType(const PipeType *T);
- llvm::PointerType *getSamplerType(const Type *T);
+ llvm::Type *getSamplerType(const Type *T);
// Returns a value which indicates the size in bytes of the pipe
// element.
@@ -90,7 +93,7 @@ public:
/// \param InvokeF invoke function emitted for the block expression.
/// \param Block block literal emitted for the block expression.
void recordBlockInfo(const BlockExpr *E, llvm::Function *InvokeF,
- llvm::Value *Block);
+ llvm::Value *Block, llvm::Type *BlockTy);
/// \return LLVM block invoke function emitted for an expression derived from
/// the block expression.
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.cpp
index ca98c7a57446..4855e7410a01 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.cpp
@@ -15,6 +15,7 @@
#include "CGCleanup.h"
#include "CGRecordLayout.h"
#include "CodeGenFunction.h"
+#include "TargetInfo.h"
#include "clang/AST/APValue.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
@@ -28,17 +29,21 @@
#include "clang/CodeGen/ConstantInitBuilder.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SetOperations.h"
+#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Bitcode/BitcodeReader.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/AtomicOrdering.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
+#include <cstdint>
#include <numeric>
+#include <optional>
using namespace clang;
using namespace CodeGen;
@@ -368,8 +373,7 @@ public:
/*RefersToEnclosingVariableOrCapture=*/false,
VD->getType().getNonReferenceType(), VK_LValue,
C.getLocation());
- PrivScope.addPrivate(
- VD, [&CGF, &DRE]() { return CGF.EmitLValue(&DRE).getAddress(CGF); });
+ PrivScope.addPrivate(VD, CGF.EmitLValue(&DRE).getAddress(CGF));
}
(void)PrivScope.Privatize();
}
@@ -407,7 +411,7 @@ private:
/// RAII for emitting code of OpenMP constructs.
class InlinedOpenMPRegionRAII {
CodeGenFunction &CGF;
- llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
+ llvm::DenseMap<const ValueDecl *, FieldDecl *> LambdaCaptureFields;
FieldDecl *LambdaThisCaptureField = nullptr;
const CodeGen::CGBlockInfo *BlockInfo = nullptr;
bool NoInheritance = false;
@@ -476,32 +480,6 @@ enum OpenMPLocationFlags : unsigned {
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/OMP_IDENT_WORK_DISTRIBUTE)
};
-namespace {
-LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();
-/// Values for bit flags for marking which requires clauses have been used.
-enum OpenMPOffloadingRequiresDirFlags : int64_t {
- /// flag undefined.
- OMP_REQ_UNDEFINED = 0x000,
- /// no requires clause present.
- OMP_REQ_NONE = 0x001,
- /// reverse_offload clause.
- OMP_REQ_REVERSE_OFFLOAD = 0x002,
- /// unified_address clause.
- OMP_REQ_UNIFIED_ADDRESS = 0x004,
- /// unified_shared_memory clause.
- OMP_REQ_UNIFIED_SHARED_MEMORY = 0x008,
- /// dynamic_allocators clause.
- OMP_REQ_DYNAMIC_ALLOCATORS = 0x010,
- LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/OMP_REQ_DYNAMIC_ALLOCATORS)
-};
-
-enum OpenMPOffloadingReservedDeviceIDs {
- /// Device ID if the device was not defined, runtime should get it
- /// from environment variables in the spec.
- OMP_DEVICEID_UNDEF = -1,
-};
-} // anonymous namespace
-
/// Describes ident structure that describes a source location.
/// All descriptions are taken from
/// https://github.com/llvm/llvm-project/blob/main/openmp/runtime/src/kmp.h
@@ -632,10 +610,8 @@ static void emitInitWithReductionInitializer(CodeGenFunction &CGF,
const auto *RHSDRE =
cast<DeclRefExpr>(cast<UnaryOperator>(RHS)->getSubExpr());
CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
- PrivateScope.addPrivate(cast<VarDecl>(LHSDRE->getDecl()),
- [=]() { return Private; });
- PrivateScope.addPrivate(cast<VarDecl>(RHSDRE->getDecl()),
- [=]() { return Original; });
+ PrivateScope.addPrivate(cast<VarDecl>(LHSDRE->getDecl()), Private);
+ PrivateScope.addPrivate(cast<VarDecl>(RHSDRE->getDecl()), Original);
(void)PrivateScope.Privatize();
RValue Func = RValue::get(Reduction.second);
CodeGenFunction::OpaqueValueMapping Map(CGF, OVE, Func);
@@ -687,11 +663,8 @@ static void EmitOMPAggregateInit(CodeGenFunction &CGF, Address DestAddr,
// Drill down to the base element type on both arrays.
const ArrayType *ArrayTy = Type->getAsArrayTypeUnsafe();
llvm::Value *NumElements = CGF.emitArrayLength(ArrayTy, ElementTy, DestAddr);
- DestAddr =
- CGF.Builder.CreateElementBitCast(DestAddr, DestAddr.getElementType());
if (DRD)
- SrcAddr =
- CGF.Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType());
+ SrcAddr = SrcAddr.withElementType(DestAddr.getElementType());
llvm::Value *SrcBegin = nullptr;
if (DRD)
@@ -720,14 +693,14 @@ static void EmitOMPAggregateInit(CodeGenFunction &CGF, Address DestAddr,
"omp.arraycpy.srcElementPast");
SrcElementPHI->addIncoming(SrcBegin, EntryBB);
SrcElementCurrent =
- Address(SrcElementPHI,
+ Address(SrcElementPHI, SrcAddr.getElementType(),
SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize));
}
llvm::PHINode *DestElementPHI = CGF.Builder.CreatePHI(
DestBegin->getType(), 2, "omp.arraycpy.destElementPast");
DestElementPHI->addIncoming(DestBegin, EntryBB);
Address DestElementCurrent =
- Address(DestElementPHI,
+ Address(DestElementPHI, DestAddr.getElementType(),
DestAddr.getAlignment().alignmentOfArrayElement(ElementSize));
// Emit copy.
@@ -775,7 +748,7 @@ LValue ReductionCodeGen::emitSharedLValueUB(CodeGenFunction &CGF,
}
void ReductionCodeGen::emitAggregateInitialization(
- CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal,
+ CodeGenFunction &CGF, unsigned N, Address PrivateAddr, Address SharedAddr,
const OMPDeclareReductionDecl *DRD) {
// Emit VarDecl with copy init for arrays.
// Get the address of the original variable captured in current
@@ -788,7 +761,7 @@ void ReductionCodeGen::emitAggregateInitialization(
EmitDeclareReductionInit,
EmitDeclareReductionInit ? ClausesData[N].ReductionOp
: PrivateVD->getInit(),
- DRD, SharedLVal.getAddress(CGF));
+ DRD, SharedAddr);
}
ReductionCodeGen::ReductionCodeGen(ArrayRef<const Expr *> Shareds,
@@ -826,9 +799,7 @@ void ReductionCodeGen::emitSharedOrigLValue(CodeGenFunction &CGF, unsigned N) {
}
void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N) {
- const auto *PrivateVD =
- cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
- QualType PrivateType = PrivateVD->getType();
+ QualType PrivateType = getPrivateType(N);
bool AsArraySection = isa<OMPArraySectionExpr>(ClausesData[N].Ref);
if (!PrivateType->isVariablyModifiedType()) {
Sizes.emplace_back(
@@ -838,12 +809,11 @@ void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N) {
}
llvm::Value *Size;
llvm::Value *SizeInChars;
- auto *ElemType =
- cast<llvm::PointerType>(OrigAddresses[N].first.getPointer(CGF)->getType())
- ->getElementType();
+ auto *ElemType = OrigAddresses[N].first.getAddress(CGF).getElementType();
auto *ElemSizeOf = llvm::ConstantExpr::getSizeOf(ElemType);
if (AsArraySection) {
- Size = CGF.Builder.CreatePtrDiff(OrigAddresses[N].second.getPointer(CGF),
+ Size = CGF.Builder.CreatePtrDiff(ElemType,
+ OrigAddresses[N].second.getPointer(CGF),
OrigAddresses[N].first.getPointer(CGF));
Size = CGF.Builder.CreateNUWAdd(
Size, llvm::ConstantInt::get(Size->getType(), /*V=*/1));
@@ -864,9 +834,7 @@ void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N) {
void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N,
llvm::Value *Size) {
- const auto *PrivateVD =
- cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
- QualType PrivateType = PrivateVD->getType();
+ QualType PrivateType = getPrivateType(N);
if (!PrivateType->isVariablyModifiedType()) {
assert(!Size && !Sizes[N].second &&
"Size should be nullptr for non-variably modified reduction "
@@ -882,31 +850,22 @@ void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N,
}
void ReductionCodeGen::emitInitialization(
- CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal,
+ CodeGenFunction &CGF, unsigned N, Address PrivateAddr, Address SharedAddr,
llvm::function_ref<bool(CodeGenFunction &)> DefaultInit) {
assert(SharedAddresses.size() > N && "No variable was generated");
const auto *PrivateVD =
cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
const OMPDeclareReductionDecl *DRD =
getReductionInit(ClausesData[N].ReductionOp);
- QualType PrivateType = PrivateVD->getType();
- PrivateAddr = CGF.Builder.CreateElementBitCast(
- PrivateAddr, CGF.ConvertTypeForMem(PrivateType));
- QualType SharedType = SharedAddresses[N].first.getType();
- SharedLVal = CGF.MakeAddrLValue(
- CGF.Builder.CreateElementBitCast(SharedLVal.getAddress(CGF),
- CGF.ConvertTypeForMem(SharedType)),
- SharedType, SharedAddresses[N].first.getBaseInfo(),
- CGF.CGM.getTBAAInfoForSubobject(SharedAddresses[N].first, SharedType));
if (CGF.getContext().getAsArrayType(PrivateVD->getType())) {
if (DRD && DRD->getInitializer())
(void)DefaultInit(CGF);
- emitAggregateInitialization(CGF, N, PrivateAddr, SharedLVal, DRD);
+ emitAggregateInitialization(CGF, N, PrivateAddr, SharedAddr, DRD);
} else if (DRD && (DRD->getInitializer() || !PrivateVD->hasInit())) {
(void)DefaultInit(CGF);
+ QualType SharedType = SharedAddresses[N].first.getType();
emitInitWithReductionInitializer(CGF, DRD, ClausesData[N].ReductionOp,
- PrivateAddr, SharedLVal.getAddress(CGF),
- SharedLVal.getType());
+ PrivateAddr, SharedAddr, SharedType);
} else if (!DefaultInit(CGF) && PrivateVD->hasInit() &&
!CGF.isTrivialInitializer(PrivateVD->getInit())) {
CGF.EmitAnyExprToMem(PrivateVD->getInit(), PrivateAddr,
@@ -916,22 +875,18 @@ void ReductionCodeGen::emitInitialization(
}
bool ReductionCodeGen::needCleanups(unsigned N) {
- const auto *PrivateVD =
- cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
- QualType PrivateType = PrivateVD->getType();
+ QualType PrivateType = getPrivateType(N);
QualType::DestructionKind DTorKind = PrivateType.isDestructedType();
return DTorKind != QualType::DK_none;
}
void ReductionCodeGen::emitCleanups(CodeGenFunction &CGF, unsigned N,
Address PrivateAddr) {
- const auto *PrivateVD =
- cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
- QualType PrivateType = PrivateVD->getType();
+ QualType PrivateType = getPrivateType(N);
QualType::DestructionKind DTorKind = PrivateType.isDestructedType();
if (needCleanups(N)) {
- PrivateAddr = CGF.Builder.CreateElementBitCast(
- PrivateAddr, CGF.ConvertTypeForMem(PrivateType));
+ PrivateAddr =
+ PrivateAddr.withElementType(CGF.ConvertTypeForMem(PrivateType));
CGF.pushDestroy(DTorKind, PrivateAddr, PrivateType);
}
}
@@ -950,15 +905,13 @@ static LValue loadToBegin(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
BaseTy = BaseTy->getPointeeType();
}
return CGF.MakeAddrLValue(
- CGF.Builder.CreateElementBitCast(BaseLV.getAddress(CGF),
- CGF.ConvertTypeForMem(ElTy)),
+ BaseLV.getAddress(CGF).withElementType(CGF.ConvertTypeForMem(ElTy)),
BaseLV.getType(), BaseLV.getBaseInfo(),
CGF.CGM.getTBAAInfoForSubobject(BaseLV, BaseLV.getType()));
}
static Address castToBase(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
- llvm::Type *BaseLVType, CharUnits BaseLVAlignment,
- llvm::Value *Addr) {
+ Address OriginalBaseAddress, llvm::Value *Addr) {
Address Tmp = Address::invalid();
Address TopTmp = Address::invalid();
Address MostTopTmp = Address::invalid();
@@ -973,15 +926,17 @@ static Address castToBase(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
TopTmp = Tmp;
BaseTy = BaseTy->getPointeeType();
}
- llvm::Type *Ty = BaseLVType;
- if (Tmp.isValid())
- Ty = Tmp.getElementType();
- Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, Ty);
+
if (Tmp.isValid()) {
+ Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ Addr, Tmp.getElementType());
CGF.Builder.CreateStore(Addr, Tmp);
return MostTopTmp;
}
- return Address(Addr, BaseLVAlignment);
+
+ Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ Addr, OriginalBaseAddress.getType());
+ return OriginalBaseAddress.withPointer(Addr, NotKnownNonNull);
}
static const VarDecl *getBaseDecl(const Expr *Ref, const DeclRefExpr *&DE) {
@@ -1015,7 +970,8 @@ Address ReductionCodeGen::adjustPrivateAddress(CodeGenFunction &CGF, unsigned N,
OriginalBaseLValue);
Address SharedAddr = SharedAddresses[N].first.getAddress(CGF);
llvm::Value *Adjustment = CGF.Builder.CreatePtrDiff(
- BaseLValue.getPointer(CGF), SharedAddr.getPointer());
+ SharedAddr.getElementType(), BaseLValue.getPointer(CGF),
+ SharedAddr.getPointer());
llvm::Value *PrivatePointer =
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
PrivateAddr.getPointer(), SharedAddr.getType());
@@ -1023,8 +979,7 @@ Address ReductionCodeGen::adjustPrivateAddress(CodeGenFunction &CGF, unsigned N,
SharedAddr.getElementType(), PrivatePointer, Adjustment);
return castToBase(CGF, OrigVD->getType(),
SharedAddresses[N].first.getType(),
- OriginalBaseLValue.getAddress(CGF).getType(),
- OriginalBaseLValue.getAlignment(), Ptr);
+ OriginalBaseLValue.getAddress(CGF), Ptr);
}
BaseDecls.emplace_back(
cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Ref)->getDecl()));
@@ -1076,15 +1031,26 @@ static FieldDecl *addFieldToRecordDecl(ASTContext &C, DeclContext *DC,
return Field;
}
-CGOpenMPRuntime::CGOpenMPRuntime(CodeGenModule &CGM, StringRef FirstSeparator,
- StringRef Separator)
- : CGM(CGM), FirstSeparator(FirstSeparator), Separator(Separator),
- OMPBuilder(CGM.getModule()), OffloadEntriesInfoManager(CGM) {
+CGOpenMPRuntime::CGOpenMPRuntime(CodeGenModule &CGM)
+ : CGM(CGM), OMPBuilder(CGM.getModule()) {
KmpCriticalNameTy = llvm::ArrayType::get(CGM.Int32Ty, /*NumElements*/ 8);
-
- // Initialize Types used in OpenMPIRBuilder from OMPKinds.def
+ llvm::OpenMPIRBuilderConfig Config(
+ CGM.getLangOpts().OpenMPIsTargetDevice, isGPU(),
+ CGM.getLangOpts().OpenMPOffloadMandatory,
+ /*HasRequiresReverseOffload*/ false, /*HasRequiresUnifiedAddress*/ false,
+ hasRequiresUnifiedSharedMemory(), /*HasRequiresDynamicAllocators*/ false);
OMPBuilder.initialize();
- loadOffloadInfoMetadata();
+ OMPBuilder.loadOffloadInfoMetadata(CGM.getLangOpts().OpenMPIsTargetDevice
+ ? CGM.getLangOpts().OMPHostIRFile
+ : StringRef{});
+ OMPBuilder.setConfig(Config);
+
+ // The user forces the compiler to behave as if omp requires
+ // unified_shared_memory was given.
+ if (CGM.getLangOpts().OpenMPForceUSM) {
+ HasRequiresUnifiedSharedMemory = true;
+ OMPBuilder.Config.setHasRequiresUnifiedSharedMemory(true);
+ }
}
void CGOpenMPRuntime::clear() {
@@ -1103,14 +1069,7 @@ void CGOpenMPRuntime::clear() {
}
std::string CGOpenMPRuntime::getName(ArrayRef<StringRef> Parts) const {
- SmallString<128> Buffer;
- llvm::raw_svector_ostream OS(Buffer);
- StringRef Sep = FirstSeparator;
- for (StringRef Part : Parts) {
- OS << Sep << Part;
- Sep = Separator;
- }
- return std::string(OS.str());
+ return OMPBuilder.createPlatformSpecificName(Parts);
}
static llvm::Function *
@@ -1122,9 +1081,9 @@ emitCombinerOrInitializer(CodeGenModule &CGM, QualType Ty,
QualType PtrTy = C.getPointerType(Ty).withRestrict();
FunctionArgList Args;
ImplicitParamDecl OmpOutParm(C, /*DC=*/nullptr, Out->getLocation(),
- /*Id=*/nullptr, PtrTy, ImplicitParamDecl::Other);
+ /*Id=*/nullptr, PtrTy, ImplicitParamKind::Other);
ImplicitParamDecl OmpInParm(C, /*DC=*/nullptr, In->getLocation(),
- /*Id=*/nullptr, PtrTy, ImplicitParamDecl::Other);
+ /*Id=*/nullptr, PtrTy, ImplicitParamKind::Other);
Args.push_back(&OmpOutParm);
Args.push_back(&OmpInParm);
const CGFunctionInfo &FnInfo =
@@ -1147,15 +1106,13 @@ emitCombinerOrInitializer(CodeGenModule &CGM, QualType Ty,
Out->getLocation());
CodeGenFunction::OMPPrivateScope Scope(CGF);
Address AddrIn = CGF.GetAddrOfLocalVar(&OmpInParm);
- Scope.addPrivate(In, [&CGF, AddrIn, PtrTy]() {
- return CGF.EmitLoadOfPointerLValue(AddrIn, PtrTy->castAs<PointerType>())
- .getAddress(CGF);
- });
+ Scope.addPrivate(
+ In, CGF.EmitLoadOfPointerLValue(AddrIn, PtrTy->castAs<PointerType>())
+ .getAddress(CGF));
Address AddrOut = CGF.GetAddrOfLocalVar(&OmpOutParm);
- Scope.addPrivate(Out, [&CGF, AddrOut, PtrTy]() {
- return CGF.EmitLoadOfPointerLValue(AddrOut, PtrTy->castAs<PointerType>())
- .getAddress(CGF);
- });
+ Scope.addPrivate(
+ Out, CGF.EmitLoadOfPointerLValue(AddrOut, PtrTy->castAs<PointerType>())
+ .getAddress(CGF));
(void)Scope.Privatize();
if (!IsCombiner && Out->hasInit() &&
!CGF.isTrivialInitializer(Out->getInit())) {
@@ -1183,7 +1140,7 @@ void CGOpenMPRuntime::emitUserDefinedReduction(
if (const Expr *Init = D->getInitializer()) {
Initializer = emitCombinerOrInitializer(
CGM, D->getType(),
- D->getInitializerKind() == OMPDeclareReductionDecl::CallInit ? Init
+ D->getInitializerKind() == OMPDeclareReductionInitKind::Call ? Init
: nullptr,
cast<VarDecl>(cast<DeclRefExpr>(D->getInitOrig())->getDecl()),
cast<VarDecl>(cast<DeclRefExpr>(D->getInitPriv())->getDecl()),
@@ -1287,20 +1244,38 @@ static llvm::Function *emitParallelOrTeamsOutlinedFunction(
return CGF.GenerateOpenMPCapturedStmtFunction(*CS, D.getBeginLoc());
}
+std::string CGOpenMPRuntime::getOutlinedHelperName(StringRef Name) const {
+ std::string Suffix = getName({"omp_outlined"});
+ return (Name + Suffix).str();
+}
+
+std::string CGOpenMPRuntime::getOutlinedHelperName(CodeGenFunction &CGF) const {
+ return getOutlinedHelperName(CGF.CurFn->getName());
+}
+
+std::string CGOpenMPRuntime::getReductionFuncName(StringRef Name) const {
+ std::string Suffix = getName({"omp", "reduction", "reduction_func"});
+ return (Name + Suffix).str();
+}
+
llvm::Function *CGOpenMPRuntime::emitParallelOutlinedFunction(
- const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
- OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
+ CodeGenFunction &CGF, const OMPExecutableDirective &D,
+ const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind,
+ const RegionCodeGenTy &CodeGen) {
const CapturedStmt *CS = D.getCapturedStmt(OMPD_parallel);
return emitParallelOrTeamsOutlinedFunction(
- CGM, D, CS, ThreadIDVar, InnermostKind, getOutlinedHelperName(), CodeGen);
+ CGM, D, CS, ThreadIDVar, InnermostKind, getOutlinedHelperName(CGF),
+ CodeGen);
}
llvm::Function *CGOpenMPRuntime::emitTeamsOutlinedFunction(
- const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
- OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
+ CodeGenFunction &CGF, const OMPExecutableDirective &D,
+ const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind,
+ const RegionCodeGenTy &CodeGen) {
const CapturedStmt *CS = D.getCapturedStmt(OMPD_teams);
return emitParallelOrTeamsOutlinedFunction(
- CGM, D, CS, ThreadIDVar, InnermostKind, getOutlinedHelperName(), CodeGen);
+ CGM, D, CS, ThreadIDVar, InnermostKind, getOutlinedHelperName(CGF),
+ CodeGen);
}
llvm::Function *CGOpenMPRuntime::emitTaskOutlinedFunction(
@@ -1350,51 +1325,6 @@ llvm::Function *CGOpenMPRuntime::emitTaskOutlinedFunction(
return Res;
}
-static void buildStructValue(ConstantStructBuilder &Fields, CodeGenModule &CGM,
- const RecordDecl *RD, const CGRecordLayout &RL,
- ArrayRef<llvm::Constant *> Data) {
- llvm::StructType *StructTy = RL.getLLVMType();
- unsigned PrevIdx = 0;
- ConstantInitBuilder CIBuilder(CGM);
- auto DI = Data.begin();
- for (const FieldDecl *FD : RD->fields()) {
- unsigned Idx = RL.getLLVMFieldNo(FD);
- // Fill the alignment.
- for (unsigned I = PrevIdx; I < Idx; ++I)
- Fields.add(llvm::Constant::getNullValue(StructTy->getElementType(I)));
- PrevIdx = Idx + 1;
- Fields.add(*DI);
- ++DI;
- }
-}
-
-template <class... As>
-static llvm::GlobalVariable *
-createGlobalStruct(CodeGenModule &CGM, QualType Ty, bool IsConstant,
- ArrayRef<llvm::Constant *> Data, const Twine &Name,
- As &&... Args) {
- const auto *RD = cast<RecordDecl>(Ty->getAsTagDecl());
- const CGRecordLayout &RL = CGM.getTypes().getCGRecordLayout(RD);
- ConstantInitBuilder CIBuilder(CGM);
- ConstantStructBuilder Fields = CIBuilder.beginStruct(RL.getLLVMType());
- buildStructValue(Fields, CGM, RD, RL, Data);
- return Fields.finishAndCreateGlobal(
- Name, CGM.getContext().getAlignOfGlobalVarInChars(Ty), IsConstant,
- std::forward<As>(Args)...);
-}
-
-template <typename T>
-static void
-createConstantGlobalStructAndAddToParent(CodeGenModule &CGM, QualType Ty,
- ArrayRef<llvm::Constant *> Data,
- T &Parent) {
- const auto *RD = cast<RecordDecl>(Ty->getAsTagDecl());
- const CGRecordLayout &RL = CGM.getTypes().getCGRecordLayout(RD);
- ConstantStructBuilder Fields = Parent.beginStruct(RL.getLLVMType());
- buildStructValue(Fields, CGM, RD, RL, Data);
- Fields.finishAndAddTo(Parent);
-}
-
void CGOpenMPRuntime::setLocThreadIdInsertPt(CodeGenFunction &CGF,
bool AtCurrentPoint) {
auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
@@ -1435,25 +1365,27 @@ static StringRef getIdentStringFromSourceLocation(CodeGenFunction &CGF,
llvm::Value *CGOpenMPRuntime::emitUpdateLocation(CodeGenFunction &CGF,
SourceLocation Loc,
- unsigned Flags) {
+ unsigned Flags, bool EmitLoc) {
+ uint32_t SrcLocStrSize;
llvm::Constant *SrcLocStr;
- if (CGM.getCodeGenOpts().getDebugInfo() == codegenoptions::NoDebugInfo ||
+ if ((!EmitLoc && CGM.getCodeGenOpts().getDebugInfo() ==
+ llvm::codegenoptions::NoDebugInfo) ||
Loc.isInvalid()) {
- SrcLocStr = OMPBuilder.getOrCreateDefaultSrcLocStr();
+ SrcLocStr = OMPBuilder.getOrCreateDefaultSrcLocStr(SrcLocStrSize);
} else {
- std::string FunctionName = "";
+ std::string FunctionName;
if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CGF.CurFuncDecl))
FunctionName = FD->getQualifiedNameAsString();
PresumedLoc PLoc = CGF.getContext().getSourceManager().getPresumedLoc(Loc);
const char *FileName = PLoc.getFilename();
unsigned Line = PLoc.getLine();
unsigned Column = PLoc.getColumn();
- SrcLocStr = OMPBuilder.getOrCreateSrcLocStr(FunctionName.c_str(), FileName,
- Line, Column);
+ SrcLocStr = OMPBuilder.getOrCreateSrcLocStr(FunctionName, FileName, Line,
+ Column, SrcLocStrSize);
}
unsigned Reserved2Flags = getDefaultLocationReserved2Flags();
- return OMPBuilder.getOrCreateIdent(SrcLocStr, llvm::omp::IdentFlag(Flags),
- Reserved2Flags);
+ return OMPBuilder.getOrCreateIdent(
+ SrcLocStr, SrcLocStrSize, llvm::omp::IdentFlag(Flags), Reserved2Flags);
}
llvm::Value *CGOpenMPRuntime::getThreadID(CodeGenFunction &CGF,
@@ -1464,10 +1396,11 @@ llvm::Value *CGOpenMPRuntime::getThreadID(CodeGenFunction &CGF,
if (CGM.getLangOpts().OpenMPIRBuilder) {
SmallString<128> Buffer;
OMPBuilder.updateToLocation(CGF.Builder.saveIP());
+ uint32_t SrcLocStrSize;
auto *SrcLocStr = OMPBuilder.getOrCreateSrcLocStr(
- getIdentStringFromSourceLocation(CGF, Loc, Buffer));
+ getIdentStringFromSourceLocation(CGF, Loc, Buffer), SrcLocStrSize);
return OMPBuilder.getOrCreateThreadID(
- OMPBuilder.getOrCreateIdent(SrcLocStr));
+ OMPBuilder.getOrCreateIdent(SrcLocStr, SrcLocStrSize));
}
llvm::Value *ThreadID = nullptr;
@@ -1515,6 +1448,7 @@ llvm::Value *CGOpenMPRuntime::getThreadID(CodeGenFunction &CGF,
setLocThreadIdInsertPt(CGF);
CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
CGF.Builder.SetInsertPoint(Elem.second.ServiceInsertPt);
+ auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, Loc);
llvm::CallInst *Call = CGF.Builder.CreateCall(
OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
OMPRTL___kmpc_global_thread_num),
@@ -1559,161 +1493,94 @@ llvm::Type *CGOpenMPRuntime::getKmpc_MicroPointerTy() {
return llvm::PointerType::getUnqual(Kmpc_MicroTy);
}
-llvm::FunctionCallee
-CGOpenMPRuntime::createForStaticInitFunction(unsigned IVSize, bool IVSigned) {
- assert((IVSize == 32 || IVSize == 64) &&
- "IV size is not compatible with the omp runtime");
- StringRef Name = IVSize == 32 ? (IVSigned ? "__kmpc_for_static_init_4"
- : "__kmpc_for_static_init_4u")
- : (IVSigned ? "__kmpc_for_static_init_8"
- : "__kmpc_for_static_init_8u");
- llvm::Type *ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
- auto *PtrTy = llvm::PointerType::getUnqual(ITy);
- llvm::Type *TypeParams[] = {
- getIdentTyPointerTy(), // loc
- CGM.Int32Ty, // tid
- CGM.Int32Ty, // schedtype
- llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter
- PtrTy, // p_lower
- PtrTy, // p_upper
- PtrTy, // p_stride
- ITy, // incr
- ITy // chunk
- };
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
- return CGM.CreateRuntimeFunction(FnTy, Name);
-}
-
-llvm::FunctionCallee
-CGOpenMPRuntime::createDispatchInitFunction(unsigned IVSize, bool IVSigned) {
- assert((IVSize == 32 || IVSize == 64) &&
- "IV size is not compatible with the omp runtime");
- StringRef Name =
- IVSize == 32
- ? (IVSigned ? "__kmpc_dispatch_init_4" : "__kmpc_dispatch_init_4u")
- : (IVSigned ? "__kmpc_dispatch_init_8" : "__kmpc_dispatch_init_8u");
- llvm::Type *ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
- llvm::Type *TypeParams[] = { getIdentTyPointerTy(), // loc
- CGM.Int32Ty, // tid
- CGM.Int32Ty, // schedtype
- ITy, // lower
- ITy, // upper
- ITy, // stride
- ITy // chunk
- };
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
- return CGM.CreateRuntimeFunction(FnTy, Name);
-}
-
-llvm::FunctionCallee
-CGOpenMPRuntime::createDispatchFiniFunction(unsigned IVSize, bool IVSigned) {
- assert((IVSize == 32 || IVSize == 64) &&
- "IV size is not compatible with the omp runtime");
- StringRef Name =
- IVSize == 32
- ? (IVSigned ? "__kmpc_dispatch_fini_4" : "__kmpc_dispatch_fini_4u")
- : (IVSigned ? "__kmpc_dispatch_fini_8" : "__kmpc_dispatch_fini_8u");
- llvm::Type *TypeParams[] = {
- getIdentTyPointerTy(), // loc
- CGM.Int32Ty, // tid
- };
- auto *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
- return CGM.CreateRuntimeFunction(FnTy, Name);
-}
-
-llvm::FunctionCallee
-CGOpenMPRuntime::createDispatchNextFunction(unsigned IVSize, bool IVSigned) {
- assert((IVSize == 32 || IVSize == 64) &&
- "IV size is not compatible with the omp runtime");
- StringRef Name =
- IVSize == 32
- ? (IVSigned ? "__kmpc_dispatch_next_4" : "__kmpc_dispatch_next_4u")
- : (IVSigned ? "__kmpc_dispatch_next_8" : "__kmpc_dispatch_next_8u");
- llvm::Type *ITy = IVSize == 32 ? CGM.Int32Ty : CGM.Int64Ty;
- auto *PtrTy = llvm::PointerType::getUnqual(ITy);
- llvm::Type *TypeParams[] = {
- getIdentTyPointerTy(), // loc
- CGM.Int32Ty, // tid
- llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter
- PtrTy, // p_lower
- PtrTy, // p_upper
- PtrTy // p_stride
- };
- auto *FnTy =
- llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
- return CGM.CreateRuntimeFunction(FnTy, Name);
+llvm::OffloadEntriesInfoManager::OMPTargetDeviceClauseKind
+convertDeviceClause(const VarDecl *VD) {
+ std::optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
+ OMPDeclareTargetDeclAttr::getDeviceType(VD);
+ if (!DevTy)
+ return llvm::OffloadEntriesInfoManager::OMPTargetDeviceClauseNone;
+
+ switch ((int)*DevTy) { // Avoid -Wcovered-switch-default
+ case OMPDeclareTargetDeclAttr::DT_Host:
+ return llvm::OffloadEntriesInfoManager::OMPTargetDeviceClauseHost;
+ break;
+ case OMPDeclareTargetDeclAttr::DT_NoHost:
+ return llvm::OffloadEntriesInfoManager::OMPTargetDeviceClauseNoHost;
+ break;
+ case OMPDeclareTargetDeclAttr::DT_Any:
+ return llvm::OffloadEntriesInfoManager::OMPTargetDeviceClauseAny;
+ break;
+ default:
+ return llvm::OffloadEntriesInfoManager::OMPTargetDeviceClauseNone;
+ break;
+ }
}
-/// Obtain information that uniquely identifies a target entry. This
-/// consists of the file and device IDs as well as line number associated with
-/// the relevant entry source location.
-static void getTargetEntryUniqueInfo(ASTContext &C, SourceLocation Loc,
- unsigned &DeviceID, unsigned &FileID,
- unsigned &LineNum) {
- SourceManager &SM = C.getSourceManager();
+llvm::OffloadEntriesInfoManager::OMPTargetGlobalVarEntryKind
+convertCaptureClause(const VarDecl *VD) {
+ std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> MapType =
+ OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
+ if (!MapType)
+ return llvm::OffloadEntriesInfoManager::OMPTargetGlobalVarEntryNone;
+ switch ((int)*MapType) { // Avoid -Wcovered-switch-default
+ case OMPDeclareTargetDeclAttr::MapTypeTy::MT_To:
+ return llvm::OffloadEntriesInfoManager::OMPTargetGlobalVarEntryTo;
+ break;
+ case OMPDeclareTargetDeclAttr::MapTypeTy::MT_Enter:
+ return llvm::OffloadEntriesInfoManager::OMPTargetGlobalVarEntryEnter;
+ break;
+ case OMPDeclareTargetDeclAttr::MapTypeTy::MT_Link:
+ return llvm::OffloadEntriesInfoManager::OMPTargetGlobalVarEntryLink;
+ break;
+ default:
+ return llvm::OffloadEntriesInfoManager::OMPTargetGlobalVarEntryNone;
+ break;
+ }
+}
- // The loc should be always valid and have a file ID (the user cannot use
- // #pragma directives in macros)
+static llvm::TargetRegionEntryInfo getEntryInfoFromPresumedLoc(
+ CodeGenModule &CGM, llvm::OpenMPIRBuilder &OMPBuilder,
+ SourceLocation BeginLoc, llvm::StringRef ParentName = "") {
- assert(Loc.isValid() && "Source location is expected to be always valid.");
+ auto FileInfoCallBack = [&]() {
+ SourceManager &SM = CGM.getContext().getSourceManager();
+ PresumedLoc PLoc = SM.getPresumedLoc(BeginLoc);
- PresumedLoc PLoc = SM.getPresumedLoc(Loc);
- assert(PLoc.isValid() && "Source location is expected to be always valid.");
+ llvm::sys::fs::UniqueID ID;
+ if (llvm::sys::fs::getUniqueID(PLoc.getFilename(), ID)) {
+ PLoc = SM.getPresumedLoc(BeginLoc, /*UseLineDirectives=*/false);
+ }
- llvm::sys::fs::UniqueID ID;
- if (auto EC = llvm::sys::fs::getUniqueID(PLoc.getFilename(), ID)) {
- PLoc = SM.getPresumedLoc(Loc, /*UseLineDirectives=*/false);
- assert(PLoc.isValid() && "Source location is expected to be always valid.");
- if (auto EC = llvm::sys::fs::getUniqueID(PLoc.getFilename(), ID))
- SM.getDiagnostics().Report(diag::err_cannot_open_file)
- << PLoc.getFilename() << EC.message();
- }
+ return std::pair<std::string, uint64_t>(PLoc.getFilename(), PLoc.getLine());
+ };
- DeviceID = ID.getDevice();
- FileID = ID.getFile();
- LineNum = PLoc.getLine();
+ return OMPBuilder.getTargetEntryUniqueInfo(FileInfoCallBack, ParentName);
}
Address CGOpenMPRuntime::getAddrOfDeclareTargetVar(const VarDecl *VD) {
- if (CGM.getLangOpts().OpenMPSimd)
- return Address::invalid();
- llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
- OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
- if (Res && (*Res == OMPDeclareTargetDeclAttr::MT_Link ||
- (*Res == OMPDeclareTargetDeclAttr::MT_To &&
- HasRequiresUnifiedSharedMemory))) {
- SmallString<64> PtrName;
- {
- llvm::raw_svector_ostream OS(PtrName);
- OS << CGM.getMangledName(GlobalDecl(VD));
- if (!VD->isExternallyVisible()) {
- unsigned DeviceID, FileID, Line;
- getTargetEntryUniqueInfo(CGM.getContext(),
- VD->getCanonicalDecl()->getBeginLoc(),
- DeviceID, FileID, Line);
- OS << llvm::format("_%x", FileID);
- }
- OS << "_decl_tgt_ref_ptr";
- }
- llvm::Value *Ptr = CGM.getModule().getNamedValue(PtrName);
- if (!Ptr) {
- QualType PtrTy = CGM.getContext().getPointerType(VD->getType());
- Ptr = getOrCreateInternalVariable(CGM.getTypes().ConvertTypeForMem(PtrTy),
- PtrName);
+ auto AddrOfGlobal = [&VD, this]() { return CGM.GetAddrOfGlobal(VD); };
- auto *GV = cast<llvm::GlobalVariable>(Ptr);
- GV->setLinkage(llvm::GlobalValue::WeakAnyLinkage);
+ auto LinkageForVariable = [&VD, this]() {
+ return CGM.getLLVMLinkageVarDefinition(VD);
+ };
- if (!CGM.getLangOpts().OpenMPIsDevice)
- GV->setInitializer(CGM.GetAddrOfGlobal(VD));
- registerTargetGlobalVariable(VD, cast<llvm::Constant>(Ptr));
- }
- return Address(Ptr, CGM.getContext().getDeclAlign(VD));
- }
- return Address::invalid();
+ std::vector<llvm::GlobalVariable *> GeneratedRefs;
+
+ llvm::Type *LlvmPtrTy = CGM.getTypes().ConvertTypeForMem(
+ CGM.getContext().getPointerType(VD->getType()));
+ llvm::Constant *addr = OMPBuilder.getAddrOfDeclareTargetVar(
+ convertCaptureClause(VD), convertDeviceClause(VD),
+ VD->hasDefinition(CGM.getContext()) == VarDecl::DeclarationOnly,
+ VD->isExternallyVisible(),
+ getEntryInfoFromPresumedLoc(CGM, OMPBuilder,
+ VD->getCanonicalDecl()->getBeginLoc()),
+ CGM.getMangledName(VD), GeneratedRefs, CGM.getLangOpts().OpenMPSimd,
+ CGM.getLangOpts().OMPTargetTriples, LlvmPtrTy, AddrOfGlobal,
+ LinkageForVariable);
+
+ if (!addr)
+ return Address::invalid();
+ return Address(addr, LlvmPtrTy, CGM.getContext().getDeclAlign(VD));
}
llvm::Constant *
@@ -1722,8 +1589,8 @@ CGOpenMPRuntime::getOrCreateThreadPrivateCache(const VarDecl *VD) {
!CGM.getContext().getTargetInfo().isTLSSupported());
// Lookup the entry, lazily creating it if necessary.
std::string Suffix = getName({"cache", ""});
- return getOrCreateInternalVariable(
- CGM.Int8PtrPtrTy, Twine(CGM.getMangledName(VD)).concat(Suffix));
+ return OMPBuilder.getOrCreateInternalVariable(
+ CGM.Int8PtrPtrTy, Twine(CGM.getMangledName(VD)).concat(Suffix).str());
}
Address CGOpenMPRuntime::getAddrOfThreadPrivate(CodeGenFunction &CGF,
@@ -1735,16 +1602,17 @@ Address CGOpenMPRuntime::getAddrOfThreadPrivate(CodeGenFunction &CGF,
return VDAddr;
llvm::Type *VarTy = VDAddr.getElementType();
- llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
- CGF.Builder.CreatePointerCast(VDAddr.getPointer(),
- CGM.Int8PtrTy),
- CGM.getSize(CGM.GetTargetTypeStoreSize(VarTy)),
- getOrCreateThreadPrivateCache(VD)};
- return Address(CGF.EmitRuntimeCall(
- OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_threadprivate_cached),
- Args),
- VDAddr.getAlignment());
+ llvm::Value *Args[] = {
+ emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
+ CGF.Builder.CreatePointerCast(VDAddr.getPointer(), CGM.Int8PtrTy),
+ CGM.getSize(CGM.GetTargetTypeStoreSize(VarTy)),
+ getOrCreateThreadPrivateCache(VD)};
+ return Address(
+ CGF.EmitRuntimeCall(
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_threadprivate_cached),
+ Args),
+ CGF.Int8Ty, VDAddr.getAlignment());
}
void CGOpenMPRuntime::emitThreadPrivateVarInit(
@@ -1787,7 +1655,7 @@ llvm::Function *CGOpenMPRuntime::emitThreadPrivateVarDefinition(
FunctionArgList Args;
ImplicitParamDecl Dst(CGM.getContext(), /*DC=*/nullptr, Loc,
/*Id=*/nullptr, CGM.getContext().VoidPtrTy,
- ImplicitParamDecl::Other);
+ ImplicitParamKind::Other);
Args.push_back(&Dst);
const auto &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
@@ -1801,9 +1669,8 @@ llvm::Function *CGOpenMPRuntime::emitThreadPrivateVarDefinition(
llvm::Value *ArgVal = CtorCGF.EmitLoadOfScalar(
CtorCGF.GetAddrOfLocalVar(&Dst), /*Volatile=*/false,
CGM.getContext().VoidPtrTy, Dst.getLocation());
- Address Arg = Address(ArgVal, VDAddr.getAlignment());
- Arg = CtorCGF.Builder.CreateElementBitCast(
- Arg, CtorCGF.ConvertTypeForMem(ASTTy));
+ Address Arg(ArgVal, CtorCGF.ConvertTypeForMem(ASTTy),
+ VDAddr.getAlignment());
CtorCGF.EmitAnyExprToMem(Init, Arg, Init->getType().getQualifiers(),
/*IsInitializer=*/true);
ArgVal = CtorCGF.EmitLoadOfScalar(
@@ -1820,7 +1687,7 @@ llvm::Function *CGOpenMPRuntime::emitThreadPrivateVarDefinition(
FunctionArgList Args;
ImplicitParamDecl Dst(CGM.getContext(), /*DC=*/nullptr, Loc,
/*Id=*/nullptr, CGM.getContext().VoidPtrTy,
- ImplicitParamDecl::Other);
+ ImplicitParamKind::Other);
Args.push_back(&Dst);
const auto &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
@@ -1837,9 +1704,10 @@ llvm::Function *CGOpenMPRuntime::emitThreadPrivateVarDefinition(
llvm::Value *ArgVal = DtorCGF.EmitLoadOfScalar(
DtorCGF.GetAddrOfLocalVar(&Dst),
/*Volatile=*/false, CGM.getContext().VoidPtrTy, Dst.getLocation());
- DtorCGF.emitDestroy(Address(ArgVal, VDAddr.getAlignment()), ASTTy,
- DtorCGF.getDestroyer(ASTTy.isDestructedType()),
- DtorCGF.needsEHCleanup(ASTTy.isDestructedType()));
+ DtorCGF.emitDestroy(
+ Address(ArgVal, DtorCGF.Int8Ty, VDAddr.getAlignment()), ASTTy,
+ DtorCGF.getDestroyer(ASTTy.isDestructedType()),
+ DtorCGF.needsEHCleanup(ASTTy.isDestructedType()));
DtorCGF.FinishFunction();
Dtor = Fn;
}
@@ -1887,119 +1755,39 @@ llvm::Function *CGOpenMPRuntime::emitThreadPrivateVarDefinition(
return nullptr;
}
-bool CGOpenMPRuntime::emitDeclareTargetVarDefinition(const VarDecl *VD,
- llvm::GlobalVariable *Addr,
- bool PerformInit) {
- if (CGM.getLangOpts().OMPTargetTriples.empty() &&
- !CGM.getLangOpts().OpenMPIsDevice)
- return false;
- Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
- OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
- if (!Res || *Res == OMPDeclareTargetDeclAttr::MT_Link ||
- (*Res == OMPDeclareTargetDeclAttr::MT_To &&
- HasRequiresUnifiedSharedMemory))
- return CGM.getLangOpts().OpenMPIsDevice;
- VD = VD->getDefinition(CGM.getContext());
- assert(VD && "Unknown VarDecl");
-
- if (!DeclareTargetWithDefinition.insert(CGM.getMangledName(VD)).second)
- return CGM.getLangOpts().OpenMPIsDevice;
-
- QualType ASTTy = VD->getType();
- SourceLocation Loc = VD->getCanonicalDecl()->getBeginLoc();
-
- // Produce the unique prefix to identify the new target regions. We use
- // the source location of the variable declaration which we know to not
- // conflict with any target region.
- unsigned DeviceID;
- unsigned FileID;
- unsigned Line;
- getTargetEntryUniqueInfo(CGM.getContext(), Loc, DeviceID, FileID, Line);
- SmallString<128> Buffer, Out;
- {
- llvm::raw_svector_ostream OS(Buffer);
- OS << "__omp_offloading_" << llvm::format("_%x", DeviceID)
- << llvm::format("_%x_", FileID) << VD->getName() << "_l" << Line;
- }
-
- const Expr *Init = VD->getAnyInitializer();
- if (CGM.getLangOpts().CPlusPlus && PerformInit) {
- llvm::Constant *Ctor;
- llvm::Constant *ID;
- if (CGM.getLangOpts().OpenMPIsDevice) {
- // Generate function that re-emits the declaration's initializer into
- // the threadprivate copy of the variable VD
- CodeGenFunction CtorCGF(CGM);
+void CGOpenMPRuntime::emitDeclareTargetFunction(const FunctionDecl *FD,
+ llvm::GlobalValue *GV) {
+ std::optional<OMPDeclareTargetDeclAttr *> ActiveAttr =
+ OMPDeclareTargetDeclAttr::getActiveAttr(FD);
- const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
- llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
- llvm::Function *Fn = CGM.CreateGlobalInitOrCleanUpFunction(
- FTy, Twine(Buffer, "_ctor"), FI, Loc);
- auto NL = ApplyDebugLocation::CreateEmpty(CtorCGF);
- CtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI,
- FunctionArgList(), Loc, Loc);
- auto AL = ApplyDebugLocation::CreateArtificial(CtorCGF);
- CtorCGF.EmitAnyExprToMem(Init,
- Address(Addr, CGM.getContext().getDeclAlign(VD)),
- Init->getType().getQualifiers(),
- /*IsInitializer=*/true);
- CtorCGF.FinishFunction();
- Ctor = Fn;
- ID = llvm::ConstantExpr::getBitCast(Fn, CGM.Int8PtrTy);
- CGM.addUsedGlobal(cast<llvm::GlobalValue>(Ctor));
- } else {
- Ctor = new llvm::GlobalVariable(
- CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
- llvm::GlobalValue::PrivateLinkage,
- llvm::Constant::getNullValue(CGM.Int8Ty), Twine(Buffer, "_ctor"));
- ID = Ctor;
- }
-
- // Register the information for the entry associated with the constructor.
- Out.clear();
- OffloadEntriesInfoManager.registerTargetRegionEntryInfo(
- DeviceID, FileID, Twine(Buffer, "_ctor").toStringRef(Out), Line, Ctor,
- ID, OffloadEntriesInfoManagerTy::OMPTargetRegionEntryCtor);
- }
- if (VD->getType().isDestructedType() != QualType::DK_none) {
- llvm::Constant *Dtor;
- llvm::Constant *ID;
- if (CGM.getLangOpts().OpenMPIsDevice) {
- // Generate function that emits destructor call for the threadprivate
- // copy of the variable VD
- CodeGenFunction DtorCGF(CGM);
+ // We only need to handle active 'indirect' declare target functions.
+ if (!ActiveAttr || !(*ActiveAttr)->getIndirect())
+ return;
- const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
- llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
- llvm::Function *Fn = CGM.CreateGlobalInitOrCleanUpFunction(
- FTy, Twine(Buffer, "_dtor"), FI, Loc);
- auto NL = ApplyDebugLocation::CreateEmpty(DtorCGF);
- DtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI,
- FunctionArgList(), Loc, Loc);
- // Create a scope with an artificial location for the body of this
- // function.
- auto AL = ApplyDebugLocation::CreateArtificial(DtorCGF);
- DtorCGF.emitDestroy(Address(Addr, CGM.getContext().getDeclAlign(VD)),
- ASTTy, DtorCGF.getDestroyer(ASTTy.isDestructedType()),
- DtorCGF.needsEHCleanup(ASTTy.isDestructedType()));
- DtorCGF.FinishFunction();
- Dtor = Fn;
- ID = llvm::ConstantExpr::getBitCast(Fn, CGM.Int8PtrTy);
- CGM.addUsedGlobal(cast<llvm::GlobalValue>(Dtor));
- } else {
- Dtor = new llvm::GlobalVariable(
- CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
- llvm::GlobalValue::PrivateLinkage,
- llvm::Constant::getNullValue(CGM.Int8Ty), Twine(Buffer, "_dtor"));
- ID = Dtor;
- }
- // Register the information for the entry associated with the destructor.
- Out.clear();
- OffloadEntriesInfoManager.registerTargetRegionEntryInfo(
- DeviceID, FileID, Twine(Buffer, "_dtor").toStringRef(Out), Line, Dtor,
- ID, OffloadEntriesInfoManagerTy::OMPTargetRegionEntryDtor);
+ // Get a mangled name to store the new device global in.
+ llvm::TargetRegionEntryInfo EntryInfo = getEntryInfoFromPresumedLoc(
+ CGM, OMPBuilder, FD->getCanonicalDecl()->getBeginLoc(), FD->getName());
+ SmallString<128> Name;
+ OMPBuilder.OffloadInfoManager.getTargetRegionEntryFnName(Name, EntryInfo);
+
+ // We need to generate a new global to hold the address of the indirectly
+ // called device function. Doing this allows us to keep the visibility and
+ // linkage of the associated function unchanged while allowing the runtime to
+ // access its value.
+ llvm::GlobalValue *Addr = GV;
+ if (CGM.getLangOpts().OpenMPIsTargetDevice) {
+ Addr = new llvm::GlobalVariable(
+ CGM.getModule(), CGM.VoidPtrTy,
+ /*isConstant=*/true, llvm::GlobalValue::ExternalLinkage, GV, Name,
+ nullptr, llvm::GlobalValue::NotThreadLocal,
+ CGM.getModule().getDataLayout().getDefaultGlobalsAddressSpace());
+ Addr->setVisibility(llvm::GlobalValue::ProtectedVisibility);
}
- return CGM.getLangOpts().OpenMPIsDevice;
+
+ OMPBuilder.OffloadInfoManager.registerDeviceGlobalVarEntryInfo(
+ Name, Addr, CGM.GetTargetTypeStoreSize(CGM.VoidPtrTy).getQuantity(),
+ llvm::OffloadEntriesInfoManager::OMPTargetGlobalVarEntryIndirect,
+ llvm::GlobalValue::WeakODRLinkage);
}
Address CGOpenMPRuntime::getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF,
@@ -2007,12 +1795,13 @@ Address CGOpenMPRuntime::getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF,
StringRef Name) {
std::string Suffix = getName({"artificial", ""});
llvm::Type *VarLVType = CGF.ConvertTypeForMem(VarType);
- llvm::Value *GAddr =
- getOrCreateInternalVariable(VarLVType, Twine(Name).concat(Suffix));
+ llvm::GlobalVariable *GAddr = OMPBuilder.getOrCreateInternalVariable(
+ VarLVType, Twine(Name).concat(Suffix).str());
if (CGM.getLangOpts().OpenMP && CGM.getLangOpts().OpenMPUseTLS &&
CGM.getTarget().isTLSSupported()) {
- cast<llvm::GlobalVariable>(GAddr)->setThreadLocal(/*Val=*/true);
- return Address(GAddr, CGM.getContext().getTypeAlignInChars(VarType));
+ GAddr->setThreadLocal(/*Val=*/true);
+ return Address(GAddr, GAddr->getValueType(),
+ CGM.getContext().getTypeAlignInChars(VarType));
}
std::string CacheSuffix = getName({"cache", ""});
llvm::Value *Args[] = {
@@ -2021,8 +1810,9 @@ Address CGOpenMPRuntime::getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF,
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(GAddr, CGM.VoidPtrTy),
CGF.Builder.CreateIntCast(CGF.getTypeSize(VarType), CGM.SizeTy,
/*isSigned=*/false),
- getOrCreateInternalVariable(
- CGM.VoidPtrPtrTy, Twine(Name).concat(Suffix).concat(CacheSuffix))};
+ OMPBuilder.getOrCreateInternalVariable(
+ CGM.VoidPtrPtrTy,
+ Twine(Name).concat(Suffix).concat(CacheSuffix).str())};
return Address(
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
CGF.EmitRuntimeCall(
@@ -2030,7 +1820,7 @@ Address CGOpenMPRuntime::getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF,
CGM.getModule(), OMPRTL___kmpc_threadprivate_cached),
Args),
VarLVType->getPointerTo(/*AddrSpace=*/0)),
- CGM.getContext().getTypeAlignInChars(VarType));
+ VarLVType, CGM.getContext().getTypeAlignInChars(VarType));
}
void CGOpenMPRuntime::emitIfClause(CodeGenFunction &CGF, const Expr *Cond,
@@ -2075,7 +1865,8 @@ void CGOpenMPRuntime::emitIfClause(CodeGenFunction &CGF, const Expr *Cond,
void CGOpenMPRuntime::emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
- const Expr *IfCond) {
+ const Expr *IfCond,
+ llvm::Value *NumThreads) {
if (!CGF.HaveInsertPoint())
return;
llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
@@ -2112,7 +1903,7 @@ void CGOpenMPRuntime::emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
Address ZeroAddrBound =
CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
/*Name=*/".bound.zero.addr");
- CGF.InitTempAlloca(ZeroAddrBound, CGF.Builder.getInt32(/*C*/ 0));
+ CGF.Builder.CreateStore(CGF.Builder.getInt32(/*C*/ 0), ZeroAddrBound);
llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
// ThreadId for serialized parallels is 0.
OutlinedFnArgs.push_back(ThreadIDAddr.getPointer());
@@ -2166,30 +1957,10 @@ Address CGOpenMPRuntime::emitThreadIDAddress(CodeGenFunction &CGF,
return ThreadIDTemp;
}
-llvm::Constant *CGOpenMPRuntime::getOrCreateInternalVariable(
- llvm::Type *Ty, const llvm::Twine &Name, unsigned AddressSpace) {
- SmallString<256> Buffer;
- llvm::raw_svector_ostream Out(Buffer);
- Out << Name;
- StringRef RuntimeName = Out.str();
- auto &Elem = *InternalVars.try_emplace(RuntimeName, nullptr).first;
- if (Elem.second) {
- assert(Elem.second->getType()->getPointerElementType() == Ty &&
- "OMP internal variable has different type than requested");
- return &*Elem.second;
- }
-
- return Elem.second = new llvm::GlobalVariable(
- CGM.getModule(), Ty, /*IsConstant*/ false,
- llvm::GlobalValue::CommonLinkage, llvm::Constant::getNullValue(Ty),
- Elem.first(), /*InsertBefore=*/nullptr,
- llvm::GlobalValue::NotThreadLocal, AddressSpace);
-}
-
llvm::Value *CGOpenMPRuntime::getCriticalRegionLock(StringRef CriticalName) {
std::string Prefix = Twine("gomp_critical_user_", CriticalName).str();
std::string Name = getName({Prefix, "var"});
- return getOrCreateInternalVariable(KmpCriticalNameTy, Name);
+ return OMPBuilder.getOrCreateInternalVariable(KmpCriticalNameTy, Name);
}
namespace {
@@ -2361,14 +2132,15 @@ static Address emitAddrOfVarFromArray(CodeGenFunction &CGF, Address Array,
Address PtrAddr = CGF.Builder.CreateConstArrayGEP(Array, Index);
llvm::Value *Ptr = CGF.Builder.CreateLoad(PtrAddr);
- Address Addr = Address(Ptr, CGF.getContext().getDeclAlign(Var));
- Addr = CGF.Builder.CreateElementBitCast(
- Addr, CGF.ConvertTypeForMem(Var->getType()));
- return Addr;
+ llvm::Type *ElemTy = CGF.ConvertTypeForMem(Var->getType());
+ return Address(
+ CGF.Builder.CreateBitCast(
+ Ptr, ElemTy->getPointerTo(Ptr->getType()->getPointerAddressSpace())),
+ ElemTy, CGF.getContext().getDeclAlign(Var));
}
static llvm::Value *emitCopyprivateCopyFunction(
- CodeGenModule &CGM, llvm::Type *ArgsType,
+ CodeGenModule &CGM, llvm::Type *ArgsElemType,
ArrayRef<const Expr *> CopyprivateVars, ArrayRef<const Expr *> DestExprs,
ArrayRef<const Expr *> SrcExprs, ArrayRef<const Expr *> AssignmentOps,
SourceLocation Loc) {
@@ -2376,9 +2148,9 @@ static llvm::Value *emitCopyprivateCopyFunction(
// void copy_func(void *LHSArg, void *RHSArg);
FunctionArgList Args;
ImplicitParamDecl LHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
- ImplicitParamDecl::Other);
+ ImplicitParamKind::Other);
ImplicitParamDecl RHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
- ImplicitParamDecl::Other);
+ ImplicitParamKind::Other);
Args.push_back(&LHSArg);
Args.push_back(&RHSArg);
const auto &CGFI =
@@ -2395,11 +2167,13 @@ static llvm::Value *emitCopyprivateCopyFunction(
// Dest = (void*[n])(LHSArg);
// Src = (void*[n])(RHSArg);
Address LHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&LHSArg)),
- ArgsType), CGF.getPointerAlign());
+ CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&LHSArg)),
+ ArgsElemType->getPointerTo()),
+ ArgsElemType, CGF.getPointerAlign());
Address RHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&RHSArg)),
- ArgsType), CGF.getPointerAlign());
+ CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&RHSArg)),
+ ArgsElemType->getPointerTo()),
+ ArgsElemType, CGF.getPointerAlign());
// *(Type0*)Dst[0] = *(Type0*)Src[0];
// *(Type1*)Dst[1] = *(Type1*)Src[1];
// ...
@@ -2472,7 +2246,7 @@ void CGOpenMPRuntime::emitSingleRegion(CodeGenFunction &CGF,
if (DidIt.isValid()) {
llvm::APInt ArraySize(/*unsigned int numBits=*/32, CopyprivateVars.size());
QualType CopyprivateArrayTy = C.getConstantArrayType(
- C.VoidPtrTy, ArraySize, nullptr, ArrayType::Normal,
+ C.VoidPtrTy, ArraySize, nullptr, ArraySizeModifier::Normal,
/*IndexTypeQuals=*/0);
// Create a list of all private variables for copyprivate.
Address CopyprivateList =
@@ -2488,12 +2262,11 @@ void CGOpenMPRuntime::emitSingleRegion(CodeGenFunction &CGF,
// Build function that copies private values from single region to all other
// threads in the corresponding parallel region.
llvm::Value *CpyFn = emitCopyprivateCopyFunction(
- CGM, CGF.ConvertTypeForMem(CopyprivateArrayTy)->getPointerTo(),
- CopyprivateVars, SrcExprs, DstExprs, AssignmentOps, Loc);
+ CGM, CGF.ConvertTypeForMem(CopyprivateArrayTy), CopyprivateVars,
+ SrcExprs, DstExprs, AssignmentOps, Loc);
llvm::Value *BufSize = CGF.getTypeSize(CopyprivateArrayTy);
- Address CL =
- CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(CopyprivateList,
- CGF.VoidPtrTy);
+ Address CL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ CopyprivateList, CGF.VoidPtrTy, CGF.Int8Ty);
llvm::Value *DidItVal = CGF.Builder.CreateLoad(DidIt);
llvm::Value *Args[] = {
emitUpdateLocation(CGF, Loc), // ident_t *<loc>
@@ -2616,6 +2389,22 @@ void CGOpenMPRuntime::emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
Args);
}
+void CGOpenMPRuntime::emitErrorCall(CodeGenFunction &CGF, SourceLocation Loc,
+ Expr *ME, bool IsFatal) {
+ llvm::Value *MVL =
+ ME ? CGF.EmitStringLiteralLValue(cast<StringLiteral>(ME)).getPointer(CGF)
+ : llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
+ // Build call void __kmpc_error(ident_t *loc, int severity, const char
+ // *message)
+ llvm::Value *Args[] = {
+ emitUpdateLocation(CGF, Loc, /*Flags=*/0, /*GenLoc=*/true),
+ llvm::ConstantInt::get(CGM.Int32Ty, IsFatal ? 2 : 1),
+ CGF.Builder.CreatePointerCast(MVL, CGM.Int8PtrTy)};
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_error),
+ Args);
+}
+
/// Map the OpenMP loop schedule to the runtime enumeration.
static OpenMPSchedType getRuntimeSchedule(OpenMPScheduleClauseKind ScheduleKind,
bool Chunked, bool Ordered) {
@@ -2759,7 +2548,8 @@ void CGOpenMPRuntime::emitForDispatchInit(
CGF.Builder.getIntN(IVSize, 1), // Stride
Chunk // Chunk
};
- CGF.EmitRuntimeCall(createDispatchInitFunction(IVSize, IVSigned), Args);
+ CGF.EmitRuntimeCall(OMPBuilder.createDispatchInitFunction(IVSize, IVSigned),
+ Args);
}
static void emitForStaticInitCall(
@@ -2818,7 +2608,7 @@ void CGOpenMPRuntime::emitForStaticInit(CodeGenFunction &CGF,
const StaticRTInput &Values) {
OpenMPSchedType ScheduleNum = getRuntimeSchedule(
ScheduleKind.Schedule, Values.Chunk != nullptr, Values.Ordered);
- assert(isOpenMPWorksharingDirective(DKind) &&
+ assert((isOpenMPWorksharingDirective(DKind) || (DKind == OMPD_loop)) &&
"Expected loop-based or sections-based directive.");
llvm::Value *UpdatedLocation = emitUpdateLocation(CGF, Loc,
isOpenMPLoopDirective(DKind)
@@ -2826,7 +2616,8 @@ void CGOpenMPRuntime::emitForStaticInit(CodeGenFunction &CGF,
: OMP_IDENT_WORK_SECTIONS);
llvm::Value *ThreadId = getThreadID(CGF, Loc);
llvm::FunctionCallee StaticInitFunction =
- createForStaticInitFunction(Values.IVSize, Values.IVSigned);
+ OMPBuilder.createForStaticInitFunction(Values.IVSize, Values.IVSigned,
+ false);
auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, Loc);
emitForStaticInitCall(CGF, UpdatedLocation, ThreadId, StaticInitFunction,
ScheduleNum, ScheduleKind.M1, ScheduleKind.M2, Values);
@@ -2841,8 +2632,13 @@ void CGOpenMPRuntime::emitDistributeStaticInit(
llvm::Value *UpdatedLocation =
emitUpdateLocation(CGF, Loc, OMP_IDENT_WORK_DISTRIBUTE);
llvm::Value *ThreadId = getThreadID(CGF, Loc);
- llvm::FunctionCallee StaticInitFunction =
- createForStaticInitFunction(Values.IVSize, Values.IVSigned);
+ llvm::FunctionCallee StaticInitFunction;
+ bool isGPUDistribute =
+ CGM.getLangOpts().OpenMPIsTargetDevice &&
+ (CGM.getTriple().isAMDGCN() || CGM.getTriple().isNVPTX());
+ StaticInitFunction = OMPBuilder.createForStaticInitFunction(
+ Values.IVSize, Values.IVSigned, isGPUDistribute);
+
emitForStaticInitCall(CGF, UpdatedLocation, ThreadId, StaticInitFunction,
ScheduleNum, OMPC_SCHEDULE_MODIFIER_unknown,
OMPC_SCHEDULE_MODIFIER_unknown, Values);
@@ -2863,9 +2659,17 @@ void CGOpenMPRuntime::emitForStaticFinish(CodeGenFunction &CGF,
: OMP_IDENT_WORK_SECTIONS),
getThreadID(CGF, Loc)};
auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, Loc);
- CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_for_static_fini),
- Args);
+ if (isOpenMPDistributeDirective(DKind) &&
+ CGM.getLangOpts().OpenMPIsTargetDevice &&
+ (CGM.getTriple().isAMDGCN() || CGM.getTriple().isNVPTX()))
+ CGF.EmitRuntimeCall(
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_distribute_static_fini),
+ Args);
+ else
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_for_static_fini),
+ Args);
}
void CGOpenMPRuntime::emitForOrderedIterationEnd(CodeGenFunction &CGF,
@@ -2876,7 +2680,8 @@ void CGOpenMPRuntime::emitForOrderedIterationEnd(CodeGenFunction &CGF,
return;
// Call __kmpc_for_dynamic_fini_(4|8)[u](ident_t *loc, kmp_int32 tid);
llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
- CGF.EmitRuntimeCall(createDispatchFiniFunction(IVSize, IVSigned), Args);
+ CGF.EmitRuntimeCall(OMPBuilder.createDispatchFiniFunction(IVSize, IVSigned),
+ Args);
}
llvm::Value *CGOpenMPRuntime::emitForNext(CodeGenFunction &CGF,
@@ -2896,8 +2701,8 @@ llvm::Value *CGOpenMPRuntime::emitForNext(CodeGenFunction &CGF,
UB.getPointer(), // &Upper
ST.getPointer() // &Stride
};
- llvm::Value *Call =
- CGF.EmitRuntimeCall(createDispatchNextFunction(IVSize, IVSigned), Args);
+ llvm::Value *Call = CGF.EmitRuntimeCall(
+ OMPBuilder.createDispatchNextFunction(IVSize, IVSigned), Args);
return CGF.EmitScalarConversion(
Call, CGF.getContext().getIntTypeForBitwidth(32, /*Signed=*/1),
CGF.getContext().BoolTy, Loc);
@@ -2972,415 +2777,54 @@ enum KmpTaskTFields {
};
} // anonymous namespace
-bool CGOpenMPRuntime::OffloadEntriesInfoManagerTy::empty() const {
- return OffloadEntriesTargetRegion.empty() &&
- OffloadEntriesDeviceGlobalVar.empty();
-}
-
-/// Initialize target region entry.
-void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
- initializeTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
- StringRef ParentName, unsigned LineNum,
- unsigned Order) {
- assert(CGM.getLangOpts().OpenMPIsDevice && "Initialization of entries is "
- "only required for the device "
- "code generation.");
- OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum] =
- OffloadEntryInfoTargetRegion(Order, /*Addr=*/nullptr, /*ID=*/nullptr,
- OMPTargetRegionEntryTargetRegion);
- ++OffloadingEntriesNum;
-}
-
-void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
- registerTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
- StringRef ParentName, unsigned LineNum,
- llvm::Constant *Addr, llvm::Constant *ID,
- OMPTargetRegionEntryKind Flags) {
- // If we are emitting code for a target, the entry is already initialized,
- // only has to be registered.
- if (CGM.getLangOpts().OpenMPIsDevice) {
- // This could happen if the device compilation is invoked standalone.
- if (!hasTargetRegionEntryInfo(DeviceID, FileID, ParentName, LineNum))
- return;
- auto &Entry =
- OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum];
- Entry.setAddress(Addr);
- Entry.setID(ID);
- Entry.setFlags(Flags);
- } else {
- if (Flags ==
- OffloadEntriesInfoManagerTy::OMPTargetRegionEntryTargetRegion &&
- hasTargetRegionEntryInfo(DeviceID, FileID, ParentName, LineNum,
- /*IgnoreAddressId*/ true))
- return;
- assert(!hasTargetRegionEntryInfo(DeviceID, FileID, ParentName, LineNum) &&
- "Target region entry already registered!");
- OffloadEntryInfoTargetRegion Entry(OffloadingEntriesNum, Addr, ID, Flags);
- OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum] = Entry;
- ++OffloadingEntriesNum;
- }
-}
-
-bool CGOpenMPRuntime::OffloadEntriesInfoManagerTy::hasTargetRegionEntryInfo(
- unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned LineNum,
- bool IgnoreAddressId) const {
- auto PerDevice = OffloadEntriesTargetRegion.find(DeviceID);
- if (PerDevice == OffloadEntriesTargetRegion.end())
- return false;
- auto PerFile = PerDevice->second.find(FileID);
- if (PerFile == PerDevice->second.end())
- return false;
- auto PerParentName = PerFile->second.find(ParentName);
- if (PerParentName == PerFile->second.end())
- return false;
- auto PerLine = PerParentName->second.find(LineNum);
- if (PerLine == PerParentName->second.end())
- return false;
- // Fail if this entry is already registered.
- if (!IgnoreAddressId &&
- (PerLine->second.getAddress() || PerLine->second.getID()))
- return false;
- return true;
-}
-
-void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::actOnTargetRegionEntriesInfo(
- const OffloadTargetRegionEntryInfoActTy &Action) {
- // Scan all target region entries and perform the provided action.
- for (const auto &D : OffloadEntriesTargetRegion)
- for (const auto &F : D.second)
- for (const auto &P : F.second)
- for (const auto &L : P.second)
- Action(D.first, F.first, P.first(), L.first, L.second);
-}
-
-void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
- initializeDeviceGlobalVarEntryInfo(StringRef Name,
- OMPTargetGlobalVarEntryKind Flags,
- unsigned Order) {
- assert(CGM.getLangOpts().OpenMPIsDevice && "Initialization of entries is "
- "only required for the device "
- "code generation.");
- OffloadEntriesDeviceGlobalVar.try_emplace(Name, Order, Flags);
- ++OffloadingEntriesNum;
-}
-
-void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
- registerDeviceGlobalVarEntryInfo(StringRef VarName, llvm::Constant *Addr,
- CharUnits VarSize,
- OMPTargetGlobalVarEntryKind Flags,
- llvm::GlobalValue::LinkageTypes Linkage) {
- if (CGM.getLangOpts().OpenMPIsDevice) {
- // This could happen if the device compilation is invoked standalone.
- if (!hasDeviceGlobalVarEntryInfo(VarName))
- return;
- auto &Entry = OffloadEntriesDeviceGlobalVar[VarName];
- if (Entry.getAddress() && hasDeviceGlobalVarEntryInfo(VarName)) {
- if (Entry.getVarSize().isZero()) {
- Entry.setVarSize(VarSize);
- Entry.setLinkage(Linkage);
- }
- return;
- }
- Entry.setVarSize(VarSize);
- Entry.setLinkage(Linkage);
- Entry.setAddress(Addr);
- } else {
- if (hasDeviceGlobalVarEntryInfo(VarName)) {
- auto &Entry = OffloadEntriesDeviceGlobalVar[VarName];
- assert(Entry.isValid() && Entry.getFlags() == Flags &&
- "Entry not initialized!");
- if (Entry.getVarSize().isZero()) {
- Entry.setVarSize(VarSize);
- Entry.setLinkage(Linkage);
- }
- return;
- }
- OffloadEntriesDeviceGlobalVar.try_emplace(
- VarName, OffloadingEntriesNum, Addr, VarSize, Flags, Linkage);
- ++OffloadingEntriesNum;
- }
-}
-
-void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
- actOnDeviceGlobalVarEntriesInfo(
- const OffloadDeviceGlobalVarEntryInfoActTy &Action) {
- // Scan all target region entries and perform the provided action.
- for (const auto &E : OffloadEntriesDeviceGlobalVar)
- Action(E.getKey(), E.getValue());
-}
-
-void CGOpenMPRuntime::createOffloadEntry(
- llvm::Constant *ID, llvm::Constant *Addr, uint64_t Size, int32_t Flags,
- llvm::GlobalValue::LinkageTypes Linkage) {
- StringRef Name = Addr->getName();
- llvm::Module &M = CGM.getModule();
- llvm::LLVMContext &C = M.getContext();
-
- // Create constant string with the name.
- llvm::Constant *StrPtrInit = llvm::ConstantDataArray::getString(C, Name);
-
- std::string StringName = getName({"omp_offloading", "entry_name"});
- auto *Str = new llvm::GlobalVariable(
- M, StrPtrInit->getType(), /*isConstant=*/true,
- llvm::GlobalValue::InternalLinkage, StrPtrInit, StringName);
- Str->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
-
- llvm::Constant *Data[] = {
- llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(ID, CGM.VoidPtrTy),
- llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(Str, CGM.Int8PtrTy),
- llvm::ConstantInt::get(CGM.SizeTy, Size),
- llvm::ConstantInt::get(CGM.Int32Ty, Flags),
- llvm::ConstantInt::get(CGM.Int32Ty, 0)};
- std::string EntryName = getName({"omp_offloading", "entry", ""});
- llvm::GlobalVariable *Entry = createGlobalStruct(
- CGM, getTgtOffloadEntryQTy(), /*IsConstant=*/true, Data,
- Twine(EntryName).concat(Name), llvm::GlobalValue::WeakAnyLinkage);
-
- // The entry has to be created in the section the linker expects it to be.
- Entry->setSection("omp_offloading_entries");
-}
-
void CGOpenMPRuntime::createOffloadEntriesAndInfoMetadata() {
- // Emit the offloading entries and metadata so that the device codegen side
- // can easily figure out what to emit. The produced metadata looks like
- // this:
- //
- // !omp_offload.info = !{!1, ...}
- //
- // Right now we only generate metadata for function that contain target
- // regions.
-
// If we are in simd mode or there are no entries, we don't need to do
// anything.
- if (CGM.getLangOpts().OpenMPSimd || OffloadEntriesInfoManager.empty())
+ if (CGM.getLangOpts().OpenMPSimd || OMPBuilder.OffloadInfoManager.empty())
return;
- llvm::Module &M = CGM.getModule();
- llvm::LLVMContext &C = M.getContext();
- SmallVector<std::tuple<const OffloadEntriesInfoManagerTy::OffloadEntryInfo *,
- SourceLocation, StringRef>,
- 16>
- OrderedEntries(OffloadEntriesInfoManager.size());
- llvm::SmallVector<StringRef, 16> ParentFunctions(
- OffloadEntriesInfoManager.size());
-
- // Auxiliary methods to create metadata values and strings.
- auto &&GetMDInt = [this](unsigned V) {
- return llvm::ConstantAsMetadata::get(
- llvm::ConstantInt::get(CGM.Int32Ty, V));
- };
-
- auto &&GetMDString = [&C](StringRef V) { return llvm::MDString::get(C, V); };
-
- // Create the offloading info metadata node.
- llvm::NamedMDNode *MD = M.getOrInsertNamedMetadata("omp_offload.info");
-
- // Create function that emits metadata for each target region entry;
- auto &&TargetRegionMetadataEmitter =
- [this, &C, MD, &OrderedEntries, &ParentFunctions, &GetMDInt,
- &GetMDString](
- unsigned DeviceID, unsigned FileID, StringRef ParentName,
- unsigned Line,
- const OffloadEntriesInfoManagerTy::OffloadEntryInfoTargetRegion &E) {
- // Generate metadata for target regions. Each entry of this metadata
- // contains:
- // - Entry 0 -> Kind of this type of metadata (0).
- // - Entry 1 -> Device ID of the file where the entry was identified.
- // - Entry 2 -> File ID of the file where the entry was identified.
- // - Entry 3 -> Mangled name of the function where the entry was
- // identified.
- // - Entry 4 -> Line in the file where the entry was identified.
- // - Entry 5 -> Order the entry was created.
- // The first element of the metadata node is the kind.
- llvm::Metadata *Ops[] = {GetMDInt(E.getKind()), GetMDInt(DeviceID),
- GetMDInt(FileID), GetMDString(ParentName),
- GetMDInt(Line), GetMDInt(E.getOrder())};
-
- SourceLocation Loc;
- for (auto I = CGM.getContext().getSourceManager().fileinfo_begin(),
- E = CGM.getContext().getSourceManager().fileinfo_end();
- I != E; ++I) {
- if (I->getFirst()->getUniqueID().getDevice() == DeviceID &&
- I->getFirst()->getUniqueID().getFile() == FileID) {
- Loc = CGM.getContext().getSourceManager().translateFileLineCol(
- I->getFirst(), Line, 1);
- break;
- }
- }
- // Save this entry in the right position of the ordered entries array.
- OrderedEntries[E.getOrder()] = std::make_tuple(&E, Loc, ParentName);
- ParentFunctions[E.getOrder()] = ParentName;
-
- // Add metadata to the named metadata node.
- MD->addOperand(llvm::MDNode::get(C, Ops));
- };
-
- OffloadEntriesInfoManager.actOnTargetRegionEntriesInfo(
- TargetRegionMetadataEmitter);
-
- // Create function that emits metadata for each device global variable entry;
- auto &&DeviceGlobalVarMetadataEmitter =
- [&C, &OrderedEntries, &GetMDInt, &GetMDString,
- MD](StringRef MangledName,
- const OffloadEntriesInfoManagerTy::OffloadEntryInfoDeviceGlobalVar
- &E) {
- // Generate metadata for global variables. Each entry of this metadata
- // contains:
- // - Entry 0 -> Kind of this type of metadata (1).
- // - Entry 1 -> Mangled name of the variable.
- // - Entry 2 -> Declare target kind.
- // - Entry 3 -> Order the entry was created.
- // The first element of the metadata node is the kind.
- llvm::Metadata *Ops[] = {
- GetMDInt(E.getKind()), GetMDString(MangledName),
- GetMDInt(E.getFlags()), GetMDInt(E.getOrder())};
-
- // Save this entry in the right position of the ordered entries array.
- OrderedEntries[E.getOrder()] =
- std::make_tuple(&E, SourceLocation(), MangledName);
-
- // Add metadata to the named metadata node.
- MD->addOperand(llvm::MDNode::get(C, Ops));
- };
-
- OffloadEntriesInfoManager.actOnDeviceGlobalVarEntriesInfo(
- DeviceGlobalVarMetadataEmitter);
-
- for (const auto &E : OrderedEntries) {
- assert(std::get<0>(E) && "All ordered entries must exist!");
- if (const auto *CE =
- dyn_cast<OffloadEntriesInfoManagerTy::OffloadEntryInfoTargetRegion>(
- std::get<0>(E))) {
- if (!CE->getID() || !CE->getAddress()) {
- // Do not blame the entry if the parent funtion is not emitted.
- StringRef FnName = ParentFunctions[CE->getOrder()];
- if (!CGM.GetGlobalValue(FnName))
- continue;
- unsigned DiagID = CGM.getDiags().getCustomDiagID(
- DiagnosticsEngine::Error,
- "Offloading entry for target region in %0 is incorrect: either the "
- "address or the ID is invalid.");
- CGM.getDiags().Report(std::get<1>(E), DiagID) << FnName;
- continue;
- }
- createOffloadEntry(CE->getID(), CE->getAddress(), /*Size=*/0,
- CE->getFlags(), llvm::GlobalValue::WeakAnyLinkage);
- } else if (const auto *CE = dyn_cast<OffloadEntriesInfoManagerTy::
- OffloadEntryInfoDeviceGlobalVar>(
- std::get<0>(E))) {
- OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind Flags =
- static_cast<OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind>(
- CE->getFlags());
- switch (Flags) {
- case OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryTo: {
- if (CGM.getLangOpts().OpenMPIsDevice &&
- CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory())
- continue;
- if (!CE->getAddress()) {
- unsigned DiagID = CGM.getDiags().getCustomDiagID(
- DiagnosticsEngine::Error, "Offloading entry for declare target "
- "variable %0 is incorrect: the "
- "address is invalid.");
- CGM.getDiags().Report(std::get<1>(E), DiagID) << std::get<2>(E);
- continue;
- }
- // The vaiable has no definition - no need to add the entry.
- if (CE->getVarSize().isZero())
- continue;
- break;
- }
- case OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryLink:
- assert(((CGM.getLangOpts().OpenMPIsDevice && !CE->getAddress()) ||
- (!CGM.getLangOpts().OpenMPIsDevice && CE->getAddress())) &&
- "Declaret target link address is set.");
- if (CGM.getLangOpts().OpenMPIsDevice)
- continue;
- if (!CE->getAddress()) {
- unsigned DiagID = CGM.getDiags().getCustomDiagID(
- DiagnosticsEngine::Error,
- "Offloading entry for declare target variable is incorrect: the "
- "address is invalid.");
- CGM.getDiags().Report(DiagID);
- continue;
+ llvm::OpenMPIRBuilder::EmitMetadataErrorReportFunctionTy &&ErrorReportFn =
+ [this](llvm::OpenMPIRBuilder::EmitMetadataErrorKind Kind,
+ const llvm::TargetRegionEntryInfo &EntryInfo) -> void {
+ SourceLocation Loc;
+ if (Kind != llvm::OpenMPIRBuilder::EMIT_MD_GLOBAL_VAR_LINK_ERROR) {
+ for (auto I = CGM.getContext().getSourceManager().fileinfo_begin(),
+ E = CGM.getContext().getSourceManager().fileinfo_end();
+ I != E; ++I) {
+ if (I->getFirst().getUniqueID().getDevice() == EntryInfo.DeviceID &&
+ I->getFirst().getUniqueID().getFile() == EntryInfo.FileID) {
+ Loc = CGM.getContext().getSourceManager().translateFileLineCol(
+ I->getFirst(), EntryInfo.Line, 1);
+ break;
}
- break;
}
- createOffloadEntry(CE->getAddress(), CE->getAddress(),
- CE->getVarSize().getQuantity(), Flags,
- CE->getLinkage());
- } else {
- llvm_unreachable("Unsupported entry kind.");
}
- }
-}
-
-/// Loads all the offload entries information from the host IR
-/// metadata.
-void CGOpenMPRuntime::loadOffloadInfoMetadata() {
- // If we are in target mode, load the metadata from the host IR. This code has
- // to match the metadaata creation in createOffloadEntriesAndInfoMetadata().
-
- if (!CGM.getLangOpts().OpenMPIsDevice)
- return;
-
- if (CGM.getLangOpts().OMPHostIRFile.empty())
- return;
-
- auto Buf = llvm::MemoryBuffer::getFile(CGM.getLangOpts().OMPHostIRFile);
- if (auto EC = Buf.getError()) {
- CGM.getDiags().Report(diag::err_cannot_open_file)
- << CGM.getLangOpts().OMPHostIRFile << EC.message();
- return;
- }
-
- llvm::LLVMContext C;
- auto ME = expectedToErrorOrAndEmitErrors(
- C, llvm::parseBitcodeFile(Buf.get()->getMemBufferRef(), C));
-
- if (auto EC = ME.getError()) {
- unsigned DiagID = CGM.getDiags().getCustomDiagID(
- DiagnosticsEngine::Error, "Unable to parse host IR file '%0':'%1'");
- CGM.getDiags().Report(DiagID)
- << CGM.getLangOpts().OMPHostIRFile << EC.message();
- return;
- }
-
- llvm::NamedMDNode *MD = ME.get()->getNamedMetadata("omp_offload.info");
- if (!MD)
- return;
-
- for (llvm::MDNode *MN : MD->operands()) {
- auto &&GetMDInt = [MN](unsigned Idx) {
- auto *V = cast<llvm::ConstantAsMetadata>(MN->getOperand(Idx));
- return cast<llvm::ConstantInt>(V->getValue())->getZExtValue();
- };
-
- auto &&GetMDString = [MN](unsigned Idx) {
- auto *V = cast<llvm::MDString>(MN->getOperand(Idx));
- return V->getString();
- };
-
- switch (GetMDInt(0)) {
- default:
- llvm_unreachable("Unexpected metadata!");
- break;
- case OffloadEntriesInfoManagerTy::OffloadEntryInfo::
- OffloadingEntryInfoTargetRegion:
- OffloadEntriesInfoManager.initializeTargetRegionEntryInfo(
- /*DeviceID=*/GetMDInt(1), /*FileID=*/GetMDInt(2),
- /*ParentName=*/GetMDString(3), /*Line=*/GetMDInt(4),
- /*Order=*/GetMDInt(5));
- break;
- case OffloadEntriesInfoManagerTy::OffloadEntryInfo::
- OffloadingEntryInfoDeviceGlobalVar:
- OffloadEntriesInfoManager.initializeDeviceGlobalVarEntryInfo(
- /*MangledName=*/GetMDString(1),
- static_cast<OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind>(
- /*Flags=*/GetMDInt(2)),
- /*Order=*/GetMDInt(3));
- break;
+ switch (Kind) {
+ case llvm::OpenMPIRBuilder::EMIT_MD_TARGET_REGION_ERROR: {
+ unsigned DiagID = CGM.getDiags().getCustomDiagID(
+ DiagnosticsEngine::Error, "Offloading entry for target region in "
+ "%0 is incorrect: either the "
+ "address or the ID is invalid.");
+ CGM.getDiags().Report(Loc, DiagID) << EntryInfo.ParentName;
+ } break;
+ case llvm::OpenMPIRBuilder::EMIT_MD_DECLARE_TARGET_ERROR: {
+ unsigned DiagID = CGM.getDiags().getCustomDiagID(
+ DiagnosticsEngine::Error, "Offloading entry for declare target "
+ "variable %0 is incorrect: the "
+ "address is invalid.");
+ CGM.getDiags().Report(Loc, DiagID) << EntryInfo.ParentName;
+ } break;
+ case llvm::OpenMPIRBuilder::EMIT_MD_GLOBAL_VAR_LINK_ERROR: {
+ unsigned DiagID = CGM.getDiags().getCustomDiagID(
+ DiagnosticsEngine::Error,
+ "Offloading entry for declare target variable is incorrect: the "
+ "address is invalid.");
+ CGM.getDiags().Report(DiagID);
+ } break;
}
- }
+ };
+
+ OMPBuilder.createOffloadEntriesAndInfoMetadata(ErrorReportFn);
}
void CGOpenMPRuntime::emitKmpRoutineEntryT(QualType KmpInt32Ty) {
@@ -3395,35 +2839,6 @@ void CGOpenMPRuntime::emitKmpRoutineEntryT(QualType KmpInt32Ty) {
}
}
-QualType CGOpenMPRuntime::getTgtOffloadEntryQTy() {
- // Make sure the type of the entry is already created. This is the type we
- // have to create:
- // struct __tgt_offload_entry{
- // void *addr; // Pointer to the offload entry info.
- // // (function or global)
- // char *name; // Name of the function or global.
- // size_t size; // Size of the entry info (0 if it a function).
- // int32_t flags; // Flags associated with the entry, e.g. 'link'.
- // int32_t reserved; // Reserved, to use by the runtime library.
- // };
- if (TgtOffloadEntryQTy.isNull()) {
- ASTContext &C = CGM.getContext();
- RecordDecl *RD = C.buildImplicitRecord("__tgt_offload_entry");
- RD->startDefinition();
- addFieldToRecordDecl(C, RD, C.VoidPtrTy);
- addFieldToRecordDecl(C, RD, C.getPointerType(C.CharTy));
- addFieldToRecordDecl(C, RD, C.getSizeType());
- addFieldToRecordDecl(
- C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true));
- addFieldToRecordDecl(
- C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true));
- RD->completeDefinition();
- RD->addAttr(PackedAttr::CreateImplicit(C));
- TgtOffloadEntryQTy = C.getRecordType(RD);
- }
- return TgtOffloadEntryQTy;
-}
-
namespace {
struct PrivateHelpersTy {
PrivateHelpersTy(const Expr *OriginalRef, const VarDecl *Original,
@@ -3448,8 +2863,7 @@ static bool isAllocatableDecl(const VarDecl *VD) {
return false;
const auto *AA = CVD->getAttr<OMPAllocateDeclAttr>();
// Use the default allocation.
- return !((AA->getAllocatorType() == OMPAllocateDeclAttr::OMPDefaultMemAlloc ||
- AA->getAllocatorType() == OMPAllocateDeclAttr::OMPNullMemAlloc) &&
+ return !(AA->getAllocatorType() == OMPAllocateDeclAttr::OMPDefaultMemAlloc &&
!AA->getAllocator());
}
@@ -3505,7 +2919,7 @@ createKmpTaskTRecordDecl(CodeGenModule &CGM, OpenMPDirectiveKind Kind,
// kmp_int32 liter;
// void * reductions;
// };
- RecordDecl *UD = C.buildImplicitRecord("kmp_cmplrdata_t", TTK_Union);
+ RecordDecl *UD = C.buildImplicitRecord("kmp_cmplrdata_t", TagTypeKind::Union);
UD->startDefinition();
addFieldToRecordDecl(C, UD, KmpInt32Ty);
addFieldToRecordDecl(C, UD, KmpRoutineEntryPointerQTy);
@@ -3571,10 +2985,10 @@ emitProxyTaskFunction(CodeGenModule &CGM, SourceLocation Loc,
ASTContext &C = CGM.getContext();
FunctionArgList Args;
ImplicitParamDecl GtidArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, KmpInt32Ty,
- ImplicitParamDecl::Other);
+ ImplicitParamKind::Other);
ImplicitParamDecl TaskTypeArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
KmpTaskTWithPrivatesPtrQTy.withRestrict(),
- ImplicitParamDecl::Other);
+ ImplicitParamKind::Other);
Args.push_back(&GtidArg);
Args.push_back(&TaskTypeArg);
const auto &TaskEntryFnInfo =
@@ -3625,12 +3039,12 @@ emitProxyTaskFunction(CodeGenModule &CGM, SourceLocation Loc,
PrivatesParam = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
}
- llvm::Value *CommonArgs[] = {GtidParam, PartidParam, PrivatesParam,
- TaskPrivatesMap,
- CGF.Builder
- .CreatePointerBitCastOrAddrSpaceCast(
- TDBase.getAddress(CGF), CGF.VoidPtrTy)
- .getPointer()};
+ llvm::Value *CommonArgs[] = {
+ GtidParam, PartidParam, PrivatesParam, TaskPrivatesMap,
+ CGF.Builder
+ .CreatePointerBitCastOrAddrSpaceCast(TDBase.getAddress(CGF),
+ CGF.VoidPtrTy, CGF.Int8Ty)
+ .getPointer()};
SmallVector<llvm::Value *, 16> CallArgs(std::begin(CommonArgs),
std::end(CommonArgs));
if (isOpenMPTaskLoopDirective(Kind)) {
@@ -3673,10 +3087,10 @@ static llvm::Value *emitDestructorsFunction(CodeGenModule &CGM,
ASTContext &C = CGM.getContext();
FunctionArgList Args;
ImplicitParamDecl GtidArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, KmpInt32Ty,
- ImplicitParamDecl::Other);
+ ImplicitParamKind::Other);
ImplicitParamDecl TaskTypeArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
KmpTaskTWithPrivatesPtrQTy.withRestrict(),
- ImplicitParamDecl::Other);
+ ImplicitParamKind::Other);
Args.push_back(&GtidArg);
Args.push_back(&TaskTypeArg);
const auto &DestructorFnInfo =
@@ -3733,7 +3147,7 @@ emitTaskPrivateMappingFunction(CodeGenModule &CGM, SourceLocation Loc,
ImplicitParamDecl TaskPrivatesArg(
C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
C.getPointerType(PrivatesQTy).withConst().withRestrict(),
- ImplicitParamDecl::Other);
+ ImplicitParamKind::Other);
Args.push_back(&TaskPrivatesArg);
llvm::DenseMap<CanonicalDeclPtr<const VarDecl>, unsigned> PrivateVarsPos;
unsigned Counter = 1;
@@ -3743,7 +3157,7 @@ emitTaskPrivateMappingFunction(CodeGenModule &CGM, SourceLocation Loc,
C.getPointerType(C.getPointerType(E->getType()))
.withConst()
.withRestrict(),
- ImplicitParamDecl::Other));
+ ImplicitParamKind::Other));
const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
PrivateVarsPos[VD] = Counter;
++Counter;
@@ -3754,7 +3168,7 @@ emitTaskPrivateMappingFunction(CodeGenModule &CGM, SourceLocation Loc,
C.getPointerType(C.getPointerType(E->getType()))
.withConst()
.withRestrict(),
- ImplicitParamDecl::Other));
+ ImplicitParamKind::Other));
const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
PrivateVarsPos[VD] = Counter;
++Counter;
@@ -3765,7 +3179,7 @@ emitTaskPrivateMappingFunction(CodeGenModule &CGM, SourceLocation Loc,
C.getPointerType(C.getPointerType(E->getType()))
.withConst()
.withRestrict(),
- ImplicitParamDecl::Other));
+ ImplicitParamKind::Other));
const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
PrivateVarsPos[VD] = Counter;
++Counter;
@@ -3779,7 +3193,7 @@ emitTaskPrivateMappingFunction(CodeGenModule &CGM, SourceLocation Loc,
Args.push_back(ImplicitParamDecl::Create(
C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
C.getPointerType(C.getPointerType(Ty)).withConst().withRestrict(),
- ImplicitParamDecl::Other));
+ ImplicitParamKind::Other));
PrivateVarsPos[VD] = Counter;
++Counter;
}
@@ -3850,7 +3264,8 @@ static void emitPrivatesInit(CodeGenFunction &CGF,
(IsTargetTask && KmpTaskSharedsPtr.isValid())) {
SrcBase = CGF.MakeAddrLValue(
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- KmpTaskSharedsPtr, CGF.ConvertTypeForMem(SharedsPtrTy)),
+ KmpTaskSharedsPtr, CGF.ConvertTypeForMem(SharedsPtrTy),
+ CGF.ConvertTypeForMem(SharedsTy)),
SharedsTy);
}
FI = cast<RecordDecl>(FI->getType()->getAsTagDecl())->field_begin();
@@ -3886,13 +3301,13 @@ static void emitPrivatesInit(CodeGenFunction &CGF,
} else if (ForDup) {
SharedRefLValue = CGF.EmitLValueForField(SrcBase, SharedField);
SharedRefLValue = CGF.MakeAddrLValue(
- Address(SharedRefLValue.getPointer(CGF),
- C.getDeclAlign(OriginalVD)),
+ SharedRefLValue.getAddress(CGF).withAlignment(
+ C.getDeclAlign(OriginalVD)),
SharedRefLValue.getType(), LValueBaseInfo(AlignmentSource::Decl),
SharedRefLValue.getTBAAInfo());
} else if (CGF.LambdaCaptureFields.count(
Pair.second.Original->getCanonicalDecl()) > 0 ||
- dyn_cast_or_null<BlockDecl>(CGF.CurCodeDecl)) {
+ isa_and_nonnull<BlockDecl>(CGF.CurCodeDecl)) {
SharedRefLValue = CGF.EmitLValue(Pair.second.OriginalRef);
} else {
// Processing for implicitly captured variables.
@@ -3916,8 +3331,7 @@ static void emitPrivatesInit(CodeGenFunction &CGF,
Address SrcElement) {
// Clean up any temporaries needed by the initialization.
CodeGenFunction::OMPPrivateScope InitScope(CGF);
- InitScope.addPrivate(
- Elem, [SrcElement]() -> Address { return SrcElement; });
+ InitScope.addPrivate(Elem, SrcElement);
(void)InitScope.Privatize();
// Emit initialization for single element.
CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(
@@ -3929,9 +3343,7 @@ static void emitPrivatesInit(CodeGenFunction &CGF,
}
} else {
CodeGenFunction::OMPPrivateScope InitScope(CGF);
- InitScope.addPrivate(Elem, [SharedRefLValue, &CGF]() -> Address {
- return SharedRefLValue.getAddress(CGF);
- });
+ InitScope.addPrivate(Elem, SharedRefLValue.getAddress(CGF));
(void)InitScope.Privatize();
CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CapturesInfo);
CGF.EmitExprAsInit(Init, VD, PrivateLValue,
@@ -3954,7 +3366,7 @@ static bool checkInitIsRequired(CodeGenFunction &CGF,
continue;
const VarDecl *VD = Pair.second.PrivateCopy;
const Expr *Init = VD->getAnyInitializer();
- InitRequired = InitRequired || (Init && isa<CXXConstructExpr>(Init) &&
+ InitRequired = InitRequired || (isa_and_nonnull<CXXConstructExpr>(Init) &&
!CGF.isTrivialInitializer(Init));
if (InitRequired)
break;
@@ -3985,12 +3397,12 @@ emitTaskDupFunction(CodeGenModule &CGM, SourceLocation Loc,
FunctionArgList Args;
ImplicitParamDecl DstArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
KmpTaskTWithPrivatesPtrQTy,
- ImplicitParamDecl::Other);
+ ImplicitParamKind::Other);
ImplicitParamDecl SrcArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
KmpTaskTWithPrivatesPtrQTy,
- ImplicitParamDecl::Other);
+ ImplicitParamKind::Other);
ImplicitParamDecl LastprivArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
- ImplicitParamDecl::Other);
+ ImplicitParamKind::Other);
Args.push_back(&DstArg);
Args.push_back(&SrcArg);
Args.push_back(&LastprivArg);
@@ -4034,7 +3446,7 @@ emitTaskDupFunction(CodeGenModule &CGM, SourceLocation Loc,
Base, *std::next(KmpTaskTQTyRD->field_begin(),
KmpTaskTShareds)),
Loc),
- CGM.getNaturalTypeAlignment(SharedsTy));
+ CGF.Int8Ty, CGM.getNaturalTypeAlignment(SharedsTy));
}
emitPrivatesInit(CGF, D, KmpTaskSharedsPtr, TDBase, KmpTaskTWithPrivatesQTyRD,
SharedsTy, SharedsPtrTy, Data, Privates, /*ForDup=*/true);
@@ -4077,14 +3489,11 @@ public:
for (unsigned I = 0, End = E->numOfIterators(); I < End; ++I) {
Uppers.push_back(CGF.EmitScalarExpr(E->getHelper(I).Upper));
const auto *VD = cast<VarDecl>(E->getIteratorDecl(I));
- addPrivate(VD, [&CGF, VD]() {
- return CGF.CreateMemTemp(VD->getType(), VD->getName());
- });
+ addPrivate(VD, CGF.CreateMemTemp(VD->getType(), VD->getName()));
const OMPIteratorHelperData &HelperData = E->getHelper(I);
- addPrivate(HelperData.CounterVD, [&CGF, &HelperData]() {
- return CGF.CreateMemTemp(HelperData.CounterVD->getType(),
- "counter.addr");
- });
+ addPrivate(
+ HelperData.CounterVD,
+ CGF.CreateMemTemp(HelperData.CounterVD->getType(), "counter.addr"));
}
Privatize();
@@ -4401,18 +3810,18 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
if (NumOfElements) {
NumOfElements = CGF.Builder.CreateNUWAdd(
llvm::ConstantInt::get(CGF.SizeTy, NumAffinities), NumOfElements);
- OpaqueValueExpr OVE(
+ auto *OVE = new (C) OpaqueValueExpr(
Loc,
C.getIntTypeForBitwidth(C.getTypeSize(C.getSizeType()), /*Signed=*/0),
VK_PRValue);
- CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, &OVE,
+ CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, OVE,
RValue::get(NumOfElements));
- KmpTaskAffinityInfoArrayTy =
- C.getVariableArrayType(KmpTaskAffinityInfoTy, &OVE, ArrayType::Normal,
- /*IndexTypeQuals=*/0, SourceRange(Loc, Loc));
+ KmpTaskAffinityInfoArrayTy = C.getVariableArrayType(
+ KmpTaskAffinityInfoTy, OVE, ArraySizeModifier::Normal,
+ /*IndexTypeQuals=*/0, SourceRange(Loc, Loc));
// Properly emit variable-sized array.
auto *PD = ImplicitParamDecl::Create(C, KmpTaskAffinityInfoArrayTy,
- ImplicitParamDecl::Other);
+ ImplicitParamKind::Other);
CGF.EmitVarDecl(*PD);
AffinitiesArray = CGF.GetAddrOfLocalVar(PD);
NumOfElements = CGF.Builder.CreateIntCast(NumOfElements, CGF.Int32Ty,
@@ -4421,7 +3830,7 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
KmpTaskAffinityInfoArrayTy = C.getConstantArrayType(
KmpTaskAffinityInfoTy,
llvm::APInt(C.getTypeSize(C.getSizeType()), NumAffinities), nullptr,
- ArrayType::Normal, /*IndexTypeQuals=*/0);
+ ArraySizeModifier::Normal, /*IndexTypeQuals=*/0);
AffinitiesArray =
CGF.CreateMemTemp(KmpTaskAffinityInfoArrayTy, ".affs.arr.addr");
AffinitiesArray = CGF.Builder.CreateConstArrayGEP(AffinitiesArray, 0);
@@ -4477,10 +3886,7 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
std::tie(Addr, Size) = getPointerAndSize(CGF, E);
llvm::Value *Idx = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
LValue Base = CGF.MakeAddrLValue(
- Address(CGF.Builder.CreateGEP(AffinitiesArray.getElementType(),
- AffinitiesArray.getPointer(), Idx),
- AffinitiesArray.getAlignment()),
- KmpTaskAffinityInfoTy);
+ CGF.Builder.CreateGEP(AffinitiesArray, Idx), KmpTaskAffinityInfoTy);
// affs[i].base_addr = &<Affinities[i].second>;
LValue BaseAddrLVal = CGF.EmitLValueForField(
Base, *std::next(KmpAffinityInfoRD->field_begin(), BaseAddr));
@@ -4520,13 +3926,13 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
// Copy shareds if there are any.
Address KmpTaskSharedsPtr = Address::invalid();
if (!SharedsTy->getAsStructureType()->getDecl()->field_empty()) {
- KmpTaskSharedsPtr =
- Address(CGF.EmitLoadOfScalar(
- CGF.EmitLValueForField(
- TDBase, *std::next(KmpTaskTQTyRD->field_begin(),
- KmpTaskTShareds)),
- Loc),
- CGM.getNaturalTypeAlignment(SharedsTy));
+ KmpTaskSharedsPtr = Address(
+ CGF.EmitLoadOfScalar(
+ CGF.EmitLValueForField(
+ TDBase,
+ *std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTShareds)),
+ Loc),
+ CGF.Int8Ty, CGM.getNaturalTypeAlignment(SharedsTy));
LValue Dest = CGF.MakeAddrLValue(KmpTaskSharedsPtr, SharedsTy);
LValue Src = CGF.MakeAddrLValue(Shareds, SharedsTy);
CGF.EmitAggregateCopy(Dest, Src, SharedsTy, AggValueSlot::DoesNotOverlap);
@@ -4578,35 +3984,31 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
return Result;
}
-namespace {
-/// Dependence kind for RTL.
-enum RTLDependenceKindTy {
- DepIn = 0x01,
- DepInOut = 0x3,
- DepMutexInOutSet = 0x4
-};
-/// Fields ids in kmp_depend_info record.
-enum RTLDependInfoFieldsTy { BaseAddr, Len, Flags };
-} // namespace
-
/// Translates internal dependency kind into the runtime kind.
static RTLDependenceKindTy translateDependencyKind(OpenMPDependClauseKind K) {
RTLDependenceKindTy DepKind;
switch (K) {
case OMPC_DEPEND_in:
- DepKind = DepIn;
+ DepKind = RTLDependenceKindTy::DepIn;
break;
// Out and InOut dependencies must use the same code.
case OMPC_DEPEND_out:
case OMPC_DEPEND_inout:
- DepKind = DepInOut;
+ DepKind = RTLDependenceKindTy::DepInOut;
break;
case OMPC_DEPEND_mutexinoutset:
- DepKind = DepMutexInOutSet;
+ DepKind = RTLDependenceKindTy::DepMutexInOutSet;
+ break;
+ case OMPC_DEPEND_inoutset:
+ DepKind = RTLDependenceKindTy::DepInOutSet;
+ break;
+ case OMPC_DEPEND_outallmemory:
+ DepKind = RTLDependenceKindTy::DepOmpAllMem;
break;
case OMPC_DEPEND_source:
case OMPC_DEPEND_sink:
case OMPC_DEPEND_depobj:
+ case OMPC_DEPEND_inoutallmemory:
case OMPC_DEPEND_unknown:
llvm_unreachable("Unknown task dependence type");
}
@@ -4636,23 +4038,21 @@ CGOpenMPRuntime::getDepobjElements(CodeGenFunction &CGF, LValue DepobjLVal,
getDependTypes(C, KmpDependInfoTy, FlagsTy);
RecordDecl *KmpDependInfoRD =
cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
- LValue Base = CGF.EmitLoadOfPointerLValue(
- DepobjLVal.getAddress(CGF),
- C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
QualType KmpDependInfoPtrTy = C.getPointerType(KmpDependInfoTy);
- Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- Base.getAddress(CGF), CGF.ConvertTypeForMem(KmpDependInfoPtrTy));
- Base = CGF.MakeAddrLValue(Addr, KmpDependInfoTy, Base.getBaseInfo(),
- Base.getTBAAInfo());
- llvm::Value *DepObjAddr = CGF.Builder.CreateGEP(
- Addr.getElementType(), Addr.getPointer(),
+ LValue Base = CGF.EmitLoadOfPointerLValue(
+ DepobjLVal.getAddress(CGF).withElementType(
+ CGF.ConvertTypeForMem(KmpDependInfoPtrTy)),
+ KmpDependInfoPtrTy->castAs<PointerType>());
+ Address DepObjAddr = CGF.Builder.CreateGEP(
+ Base.getAddress(CGF),
llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
LValue NumDepsBase = CGF.MakeAddrLValue(
- Address(DepObjAddr, Addr.getAlignment()), KmpDependInfoTy,
- Base.getBaseInfo(), Base.getTBAAInfo());
+ DepObjAddr, KmpDependInfoTy, Base.getBaseInfo(), Base.getTBAAInfo());
// NumDeps = deps[i].base_addr;
LValue BaseAddrLVal = CGF.EmitLValueForField(
- NumDepsBase, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
+ NumDepsBase,
+ *std::next(KmpDependInfoRD->field_begin(),
+ static_cast<unsigned int>(RTLDependInfoFields::BaseAddr)));
llvm::Value *NumDeps = CGF.EmitLoadOfScalar(BaseAddrLVal, Loc);
return std::make_pair(NumDeps, Base);
}
@@ -4676,35 +4076,46 @@ static void emitDependData(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
for (const Expr *E : Data.DepExprs) {
llvm::Value *Addr;
llvm::Value *Size;
- std::tie(Addr, Size) = getPointerAndSize(CGF, E);
+
+ // The expression will be a nullptr in the 'omp_all_memory' case.
+ if (E) {
+ std::tie(Addr, Size) = getPointerAndSize(CGF, E);
+ Addr = CGF.Builder.CreatePtrToInt(Addr, CGF.IntPtrTy);
+ } else {
+ Addr = llvm::ConstantInt::get(CGF.IntPtrTy, 0);
+ Size = llvm::ConstantInt::get(CGF.SizeTy, 0);
+ }
LValue Base;
if (unsigned *P = Pos.dyn_cast<unsigned *>()) {
Base = CGF.MakeAddrLValue(
CGF.Builder.CreateConstGEP(DependenciesArray, *P), KmpDependInfoTy);
} else {
+ assert(E && "Expected a non-null expression");
LValue &PosLVal = *Pos.get<LValue *>();
llvm::Value *Idx = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
Base = CGF.MakeAddrLValue(
- Address(CGF.Builder.CreateGEP(DependenciesArray.getElementType(),
- DependenciesArray.getPointer(), Idx),
- DependenciesArray.getAlignment()),
- KmpDependInfoTy);
+ CGF.Builder.CreateGEP(DependenciesArray, Idx), KmpDependInfoTy);
}
// deps[i].base_addr = &<Dependencies[i].second>;
LValue BaseAddrLVal = CGF.EmitLValueForField(
- Base, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
- CGF.EmitStoreOfScalar(CGF.Builder.CreatePtrToInt(Addr, CGF.IntPtrTy),
- BaseAddrLVal);
+ Base,
+ *std::next(KmpDependInfoRD->field_begin(),
+ static_cast<unsigned int>(RTLDependInfoFields::BaseAddr)));
+ CGF.EmitStoreOfScalar(Addr, BaseAddrLVal);
// deps[i].len = sizeof(<Dependencies[i].second>);
LValue LenLVal = CGF.EmitLValueForField(
- Base, *std::next(KmpDependInfoRD->field_begin(), Len));
+ Base, *std::next(KmpDependInfoRD->field_begin(),
+ static_cast<unsigned int>(RTLDependInfoFields::Len)));
CGF.EmitStoreOfScalar(Size, LenLVal);
// deps[i].flags = <Dependencies[i].first>;
RTLDependenceKindTy DepKind = translateDependencyKind(Data.DepKind);
LValue FlagsLVal = CGF.EmitLValueForField(
- Base, *std::next(KmpDependInfoRD->field_begin(), Flags));
- CGF.EmitStoreOfScalar(llvm::ConstantInt::get(LLVMFlagsTy, DepKind),
- FlagsLVal);
+ Base,
+ *std::next(KmpDependInfoRD->field_begin(),
+ static_cast<unsigned int>(RTLDependInfoFields::Flags)));
+ CGF.EmitStoreOfScalar(
+ llvm::ConstantInt::get(LLVMFlagsTy, static_cast<unsigned int>(DepKind)),
+ FlagsLVal);
if (unsigned *P = Pos.dyn_cast<unsigned *>()) {
++(*P);
} else {
@@ -4717,50 +4128,30 @@ static void emitDependData(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
}
}
-static SmallVector<llvm::Value *, 4>
-emitDepobjElementsSizes(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
- const OMPTaskDataTy::DependData &Data) {
+SmallVector<llvm::Value *, 4> CGOpenMPRuntime::emitDepobjElementsSizes(
+ CodeGenFunction &CGF, QualType &KmpDependInfoTy,
+ const OMPTaskDataTy::DependData &Data) {
assert(Data.DepKind == OMPC_DEPEND_depobj &&
- "Expected depobj dependecy kind.");
+ "Expected depobj dependency kind.");
SmallVector<llvm::Value *, 4> Sizes;
SmallVector<LValue, 4> SizeLVals;
ASTContext &C = CGF.getContext();
- QualType FlagsTy;
- getDependTypes(C, KmpDependInfoTy, FlagsTy);
- RecordDecl *KmpDependInfoRD =
- cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
- QualType KmpDependInfoPtrTy = C.getPointerType(KmpDependInfoTy);
- llvm::Type *KmpDependInfoPtrT = CGF.ConvertTypeForMem(KmpDependInfoPtrTy);
{
OMPIteratorGeneratorScope IteratorScope(
CGF, cast_or_null<OMPIteratorExpr>(
Data.IteratorExpr ? Data.IteratorExpr->IgnoreParenImpCasts()
: nullptr));
for (const Expr *E : Data.DepExprs) {
+ llvm::Value *NumDeps;
+ LValue Base;
LValue DepobjLVal = CGF.EmitLValue(E->IgnoreParenImpCasts());
- LValue Base = CGF.EmitLoadOfPointerLValue(
- DepobjLVal.getAddress(CGF),
- C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
- Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- Base.getAddress(CGF), KmpDependInfoPtrT);
- Base = CGF.MakeAddrLValue(Addr, KmpDependInfoTy, Base.getBaseInfo(),
- Base.getTBAAInfo());
- llvm::Value *DepObjAddr = CGF.Builder.CreateGEP(
- Addr.getElementType(), Addr.getPointer(),
- llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
- LValue NumDepsBase = CGF.MakeAddrLValue(
- Address(DepObjAddr, Addr.getAlignment()), KmpDependInfoTy,
- Base.getBaseInfo(), Base.getTBAAInfo());
- // NumDeps = deps[i].base_addr;
- LValue BaseAddrLVal = CGF.EmitLValueForField(
- NumDepsBase, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
- llvm::Value *NumDeps =
- CGF.EmitLoadOfScalar(BaseAddrLVal, E->getExprLoc());
+ std::tie(NumDeps, Base) =
+ getDepobjElements(CGF, DepobjLVal, E->getExprLoc());
LValue NumLVal = CGF.MakeAddrLValue(
CGF.CreateMemTemp(C.getUIntPtrType(), "depobj.size.addr"),
C.getUIntPtrType());
- CGF.InitTempAlloca(NumLVal.getAddress(CGF),
- llvm::ConstantInt::get(CGF.IntPtrTy, 0));
+ CGF.Builder.CreateStore(llvm::ConstantInt::get(CGF.IntPtrTy, 0),
+ NumLVal.getAddress(CGF));
llvm::Value *PrevVal = CGF.EmitLoadOfScalar(NumLVal, E->getExprLoc());
llvm::Value *Add = CGF.Builder.CreateNUWAdd(PrevVal, NumDeps);
CGF.EmitStoreOfScalar(Add, NumLVal);
@@ -4775,19 +4166,13 @@ emitDepobjElementsSizes(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
return Sizes;
}
-static void emitDepobjElements(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
- LValue PosLVal,
- const OMPTaskDataTy::DependData &Data,
- Address DependenciesArray) {
+void CGOpenMPRuntime::emitDepobjElements(CodeGenFunction &CGF,
+ QualType &KmpDependInfoTy,
+ LValue PosLVal,
+ const OMPTaskDataTy::DependData &Data,
+ Address DependenciesArray) {
assert(Data.DepKind == OMPC_DEPEND_depobj &&
- "Expected depobj dependecy kind.");
- ASTContext &C = CGF.getContext();
- QualType FlagsTy;
- getDependTypes(C, KmpDependInfoTy, FlagsTy);
- RecordDecl *KmpDependInfoRD =
- cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
- QualType KmpDependInfoPtrTy = C.getPointerType(KmpDependInfoTy);
- llvm::Type *KmpDependInfoPtrT = CGF.ConvertTypeForMem(KmpDependInfoPtrTy);
+ "Expected depobj dependency kind.");
llvm::Value *ElSize = CGF.getTypeSize(KmpDependInfoTy);
{
OMPIteratorGeneratorScope IteratorScope(
@@ -4796,37 +4181,18 @@ static void emitDepobjElements(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
: nullptr));
for (unsigned I = 0, End = Data.DepExprs.size(); I < End; ++I) {
const Expr *E = Data.DepExprs[I];
+ llvm::Value *NumDeps;
+ LValue Base;
LValue DepobjLVal = CGF.EmitLValue(E->IgnoreParenImpCasts());
- LValue Base = CGF.EmitLoadOfPointerLValue(
- DepobjLVal.getAddress(CGF),
- C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
- Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- Base.getAddress(CGF), KmpDependInfoPtrT);
- Base = CGF.MakeAddrLValue(Addr, KmpDependInfoTy, Base.getBaseInfo(),
- Base.getTBAAInfo());
-
- // Get number of elements in a single depobj.
- llvm::Value *DepObjAddr = CGF.Builder.CreateGEP(
- Addr.getElementType(), Addr.getPointer(),
- llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
- LValue NumDepsBase = CGF.MakeAddrLValue(
- Address(DepObjAddr, Addr.getAlignment()), KmpDependInfoTy,
- Base.getBaseInfo(), Base.getTBAAInfo());
- // NumDeps = deps[i].base_addr;
- LValue BaseAddrLVal = CGF.EmitLValueForField(
- NumDepsBase, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
- llvm::Value *NumDeps =
- CGF.EmitLoadOfScalar(BaseAddrLVal, E->getExprLoc());
+ std::tie(NumDeps, Base) =
+ getDepobjElements(CGF, DepobjLVal, E->getExprLoc());
// memcopy dependency data.
llvm::Value *Size = CGF.Builder.CreateNUWMul(
ElSize,
CGF.Builder.CreateIntCast(NumDeps, CGF.SizeTy, /*isSigned=*/false));
llvm::Value *Pos = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
- Address DepAddr =
- Address(CGF.Builder.CreateGEP(DependenciesArray.getElementType(),
- DependenciesArray.getPointer(), Pos),
- DependenciesArray.getAlignment());
+ Address DepAddr = CGF.Builder.CreateGEP(DependenciesArray, Pos);
CGF.Builder.CreateMemCpy(DepAddr, Base.getAddress(CGF), Size);
// Increase pos.
@@ -4861,8 +4227,9 @@ std::pair<llvm::Value *, Address> CGOpenMPRuntime::emitDependClause(
bool HasRegularWithIterators = false;
llvm::Value *NumOfDepobjElements = llvm::ConstantInt::get(CGF.IntPtrTy, 0);
llvm::Value *NumOfRegularWithIterators =
- llvm::ConstantInt::get(CGF.IntPtrTy, 1);
- // Calculate number of depobj dependecies and regular deps with the iterators.
+ llvm::ConstantInt::get(CGF.IntPtrTy, 0);
+ // Calculate number of depobj dependencies and regular deps with the
+ // iterators.
for (const OMPTaskDataTy::DependData &D : Dependencies) {
if (D.DepKind == OMPC_DEPEND_depobj) {
SmallVector<llvm::Value *, 4> Sizes =
@@ -4875,12 +4242,15 @@ std::pair<llvm::Value *, Address> CGOpenMPRuntime::emitDependClause(
continue;
}
// Include number of iterations, if any.
+
if (const auto *IE = cast_or_null<OMPIteratorExpr>(D.IteratorExpr)) {
for (unsigned I = 0, E = IE->numOfIterators(); I < E; ++I) {
llvm::Value *Sz = CGF.EmitScalarExpr(IE->getHelper(I).Upper);
Sz = CGF.Builder.CreateIntCast(Sz, CGF.IntPtrTy, /*isSigned=*/false);
+ llvm::Value *NumClauseDeps = CGF.Builder.CreateNUWMul(
+ Sz, llvm::ConstantInt::get(CGF.IntPtrTy, D.DepExprs.size()));
NumOfRegularWithIterators =
- CGF.Builder.CreateNUWMul(NumOfRegularWithIterators, Sz);
+ CGF.Builder.CreateNUWAdd(NumOfRegularWithIterators, NumClauseDeps);
}
HasRegularWithIterators = true;
continue;
@@ -4899,18 +4269,18 @@ std::pair<llvm::Value *, Address> CGOpenMPRuntime::emitDependClause(
NumOfElements =
CGF.Builder.CreateNUWAdd(NumOfRegularWithIterators, NumOfElements);
}
- OpaqueValueExpr OVE(Loc,
- C.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0),
- VK_PRValue);
- CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, &OVE,
+ auto *OVE = new (C) OpaqueValueExpr(
+ Loc, C.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0),
+ VK_PRValue);
+ CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, OVE,
RValue::get(NumOfElements));
KmpDependInfoArrayTy =
- C.getVariableArrayType(KmpDependInfoTy, &OVE, ArrayType::Normal,
+ C.getVariableArrayType(KmpDependInfoTy, OVE, ArraySizeModifier::Normal,
/*IndexTypeQuals=*/0, SourceRange(Loc, Loc));
// CGF.EmitVariablyModifiedType(KmpDependInfoArrayTy);
// Properly emit variable-sized array.
auto *PD = ImplicitParamDecl::Create(C, KmpDependInfoArrayTy,
- ImplicitParamDecl::Other);
+ ImplicitParamKind::Other);
CGF.EmitVarDecl(*PD);
DependenciesArray = CGF.GetAddrOfLocalVar(PD);
NumOfElements = CGF.Builder.CreateIntCast(NumOfElements, CGF.Int32Ty,
@@ -4918,7 +4288,7 @@ std::pair<llvm::Value *, Address> CGOpenMPRuntime::emitDependClause(
} else {
KmpDependInfoArrayTy = C.getConstantArrayType(
KmpDependInfoTy, llvm::APInt(/*numBits=*/64, NumDependencies), nullptr,
- ArrayType::Normal, /*IndexTypeQuals=*/0);
+ ArraySizeModifier::Normal, /*IndexTypeQuals=*/0);
DependenciesArray =
CGF.CreateMemTemp(KmpDependInfoArrayTy, ".dep.arr.addr");
DependenciesArray = CGF.Builder.CreateConstArrayGEP(DependenciesArray, 0);
@@ -4933,7 +4303,7 @@ std::pair<llvm::Value *, Address> CGOpenMPRuntime::emitDependClause(
emitDependData(CGF, KmpDependInfoTy, &Pos, Dependencies[I],
DependenciesArray);
}
- // Copy regular dependecies with iterators.
+ // Copy regular dependencies with iterators.
LValue PosLVal = CGF.MakeAddrLValue(
CGF.CreateMemTemp(C.getSizeType(), "dep.counter.addr"), C.getSizeType());
CGF.EmitStoreOfScalar(llvm::ConstantInt::get(CGF.SizeTy, Pos), PosLVal);
@@ -4954,7 +4324,7 @@ std::pair<llvm::Value *, Address> CGOpenMPRuntime::emitDependClause(
}
}
DependenciesArray = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- DependenciesArray, CGF.VoidPtrTy);
+ DependenciesArray, CGF.VoidPtrTy, CGF.Int8Ty);
return std::make_pair(NumOfElements, DependenciesArray);
}
@@ -4998,7 +4368,7 @@ Address CGOpenMPRuntime::emitDepobjDependClause(
} else {
QualType KmpDependInfoArrayTy = C.getConstantArrayType(
KmpDependInfoTy, llvm::APInt(/*numBits=*/64, NumDependencies + 1),
- nullptr, ArrayType::Normal, /*IndexTypeQuals=*/0);
+ nullptr, ArraySizeModifier::Normal, /*IndexTypeQuals=*/0);
CharUnits Sz = C.getTypeSizeInChars(KmpDependInfoArrayTy);
Size = CGM.getSize(Sz.alignTo(Align));
NumDepsVal = llvm::ConstantInt::get(CGF.IntPtrTy, NumDependencies);
@@ -5013,14 +4383,17 @@ Address CGOpenMPRuntime::emitDepobjDependClause(
CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_alloc),
Args, ".dep.arr.addr");
+ llvm::Type *KmpDependInfoLlvmTy = CGF.ConvertTypeForMem(KmpDependInfoTy);
Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- Addr, CGF.ConvertTypeForMem(KmpDependInfoTy)->getPointerTo());
- DependenciesArray = Address(Addr, Align);
+ Addr, KmpDependInfoLlvmTy->getPointerTo());
+ DependenciesArray = Address(Addr, KmpDependInfoLlvmTy, Align);
// Write number of elements in the first element of array for depobj.
LValue Base = CGF.MakeAddrLValue(DependenciesArray, KmpDependInfoTy);
// deps[i].base_addr = NumDependencies;
LValue BaseAddrLVal = CGF.EmitLValueForField(
- Base, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
+ Base,
+ *std::next(KmpDependInfoRD->field_begin(),
+ static_cast<unsigned int>(RTLDependInfoFields::BaseAddr)));
CGF.EmitStoreOfScalar(NumDepsVal, BaseAddrLVal);
llvm::PointerUnion<unsigned *, LValue *> Pos;
unsigned Idx = 1;
@@ -5037,7 +4410,8 @@ Address CGOpenMPRuntime::emitDepobjDependClause(
}
emitDependData(CGF, KmpDependInfoTy, Pos, Dependencies, DependenciesArray);
DependenciesArray = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.Builder.CreateConstGEP(DependenciesArray, 1), CGF.VoidPtrTy);
+ CGF.Builder.CreateConstGEP(DependenciesArray, 1), CGF.VoidPtrTy,
+ CGF.Int8Ty);
return DependenciesArray;
}
@@ -5047,11 +4421,11 @@ void CGOpenMPRuntime::emitDestroyClause(CodeGenFunction &CGF, LValue DepobjLVal,
QualType FlagsTy;
getDependTypes(C, KmpDependInfoTy, FlagsTy);
LValue Base = CGF.EmitLoadOfPointerLValue(
- DepobjLVal.getAddress(CGF),
- C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
+ DepobjLVal.getAddress(CGF), C.VoidPtrTy.castAs<PointerType>());
QualType KmpDependInfoPtrTy = C.getPointerType(KmpDependInfoTy);
Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- Base.getAddress(CGF), CGF.ConvertTypeForMem(KmpDependInfoPtrTy));
+ Base.getAddress(CGF), CGF.ConvertTypeForMem(KmpDependInfoPtrTy),
+ CGF.ConvertTypeForMem(KmpDependInfoTy));
llvm::Value *DepObjAddr = CGF.Builder.CreateGEP(
Addr.getElementType(), Addr.getPointer(),
llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
@@ -5093,15 +4467,17 @@ void CGOpenMPRuntime::emitUpdateClause(CodeGenFunction &CGF, LValue DepobjLVal,
llvm::PHINode *ElementPHI =
CGF.Builder.CreatePHI(Begin.getType(), 2, "omp.elementPast");
ElementPHI->addIncoming(Begin.getPointer(), EntryBB);
- Begin = Address(ElementPHI, Begin.getAlignment());
+ Begin = Begin.withPointer(ElementPHI, KnownNonNull);
Base = CGF.MakeAddrLValue(Begin, KmpDependInfoTy, Base.getBaseInfo(),
Base.getTBAAInfo());
// deps[i].flags = NewDepKind;
RTLDependenceKindTy DepKind = translateDependencyKind(NewDepKind);
LValue FlagsLVal = CGF.EmitLValueForField(
- Base, *std::next(KmpDependInfoRD->field_begin(), Flags));
- CGF.EmitStoreOfScalar(llvm::ConstantInt::get(LLVMFlagsTy, DepKind),
- FlagsLVal);
+ Base, *std::next(KmpDependInfoRD->field_begin(),
+ static_cast<unsigned int>(RTLDependInfoFields::Flags)));
+ CGF.EmitStoreOfScalar(
+ llvm::ConstantInt::get(LLVMFlagsTy, static_cast<unsigned int>(DepKind)),
+ FlagsLVal);
// Shift the address forward by one element.
Address ElementNext =
@@ -5179,7 +4555,7 @@ void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
Region->emitUntiedSwitch(CGF);
};
- llvm::Value *DepWaitTaskArgs[6];
+ llvm::Value *DepWaitTaskArgs[7];
if (!Data.Dependences.empty()) {
DepWaitTaskArgs[0] = UpLoc;
DepWaitTaskArgs[1] = ThreadID;
@@ -5187,6 +4563,8 @@ void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
DepWaitTaskArgs[3] = DependenciesArray.getPointer();
DepWaitTaskArgs[4] = CGF.Builder.getInt32(0);
DepWaitTaskArgs[5] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
+ DepWaitTaskArgs[6] =
+ llvm::ConstantInt::get(CGF.Int32Ty, Data.HasNowaitClause);
}
auto &M = CGM.getModule();
auto &&ElseCodeGen = [this, &M, &TaskArgs, ThreadID, NewTaskNewTaskTTy,
@@ -5198,9 +4576,9 @@ void CGOpenMPRuntime::emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
// ndeps_noalias, kmp_depend_info_t *noalias_dep_list); if dependence info
// is specified.
if (!Data.Dependences.empty())
- CGF.EmitRuntimeCall(
- OMPBuilder.getOrCreateRuntimeFunction(M, OMPRTL___kmpc_omp_wait_deps),
- DepWaitTaskArgs);
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ M, OMPRTL___kmpc_omp_taskwait_deps_51),
+ DepWaitTaskArgs);
// Call proxy_task_entry(gtid, new_task);
auto &&CodeGen = [TaskEntry, ThreadID, NewTaskNewTaskTTy,
Loc](CodeGenFunction &CGF, PrePostActionTy &Action) {
@@ -5365,21 +4743,21 @@ static void EmitOMPAggregateReduction(
llvm::PHINode *RHSElementPHI = CGF.Builder.CreatePHI(
RHSBegin->getType(), 2, "omp.arraycpy.srcElementPast");
RHSElementPHI->addIncoming(RHSBegin, EntryBB);
- Address RHSElementCurrent =
- Address(RHSElementPHI,
- RHSAddr.getAlignment().alignmentOfArrayElement(ElementSize));
+ Address RHSElementCurrent(
+ RHSElementPHI, RHSAddr.getElementType(),
+ RHSAddr.getAlignment().alignmentOfArrayElement(ElementSize));
llvm::PHINode *LHSElementPHI = CGF.Builder.CreatePHI(
LHSBegin->getType(), 2, "omp.arraycpy.destElementPast");
LHSElementPHI->addIncoming(LHSBegin, EntryBB);
- Address LHSElementCurrent =
- Address(LHSElementPHI,
- LHSAddr.getAlignment().alignmentOfArrayElement(ElementSize));
+ Address LHSElementCurrent(
+ LHSElementPHI, LHSAddr.getElementType(),
+ LHSAddr.getAlignment().alignmentOfArrayElement(ElementSize));
// Emit copy.
CodeGenFunction::OMPPrivateScope Scope(CGF);
- Scope.addPrivate(LHSVar, [=]() { return LHSElementCurrent; });
- Scope.addPrivate(RHSVar, [=]() { return RHSElementCurrent; });
+ Scope.addPrivate(LHSVar, LHSElementCurrent);
+ Scope.addPrivate(RHSVar, RHSElementCurrent);
Scope.Privatize();
RedOpGen(CGF, XExpr, EExpr, UpExpr);
Scope.ForceCleanup();
@@ -5424,22 +4802,22 @@ static void emitReductionCombiner(CodeGenFunction &CGF,
}
llvm::Function *CGOpenMPRuntime::emitReductionFunction(
- SourceLocation Loc, llvm::Type *ArgsType, ArrayRef<const Expr *> Privates,
- ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs,
- ArrayRef<const Expr *> ReductionOps) {
+ StringRef ReducerName, SourceLocation Loc, llvm::Type *ArgsElemType,
+ ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> LHSExprs,
+ ArrayRef<const Expr *> RHSExprs, ArrayRef<const Expr *> ReductionOps) {
ASTContext &C = CGM.getContext();
// void reduction_func(void *LHSArg, void *RHSArg);
FunctionArgList Args;
ImplicitParamDecl LHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
- ImplicitParamDecl::Other);
+ ImplicitParamKind::Other);
ImplicitParamDecl RHSArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
- ImplicitParamDecl::Other);
+ ImplicitParamKind::Other);
Args.push_back(&LHSArg);
Args.push_back(&RHSArg);
const auto &CGFI =
CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
- std::string Name = getName({"omp", "reduction", "reduction_func"});
+ std::string Name = getReductionFuncName(ReducerName);
auto *Fn = llvm::Function::Create(CGM.getTypes().GetFunctionType(CGFI),
llvm::GlobalValue::InternalLinkage, Name,
&CGM.getModule());
@@ -5451,29 +4829,27 @@ llvm::Function *CGOpenMPRuntime::emitReductionFunction(
// Dst = (void*[n])(LHSArg);
// Src = (void*[n])(RHSArg);
Address LHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&LHSArg)),
- ArgsType), CGF.getPointerAlign());
+ CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&LHSArg)),
+ ArgsElemType->getPointerTo()),
+ ArgsElemType, CGF.getPointerAlign());
Address RHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&RHSArg)),
- ArgsType), CGF.getPointerAlign());
+ CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&RHSArg)),
+ ArgsElemType->getPointerTo()),
+ ArgsElemType, CGF.getPointerAlign());
// ...
// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]);
// ...
CodeGenFunction::OMPPrivateScope Scope(CGF);
- auto IPriv = Privates.begin();
+ const auto *IPriv = Privates.begin();
unsigned Idx = 0;
for (unsigned I = 0, E = ReductionOps.size(); I < E; ++I, ++IPriv, ++Idx) {
const auto *RHSVar =
cast<VarDecl>(cast<DeclRefExpr>(RHSExprs[I])->getDecl());
- Scope.addPrivate(RHSVar, [&CGF, RHS, Idx, RHSVar]() {
- return emitAddrOfVarFromArray(CGF, RHS, Idx, RHSVar);
- });
+ Scope.addPrivate(RHSVar, emitAddrOfVarFromArray(CGF, RHS, Idx, RHSVar));
const auto *LHSVar =
cast<VarDecl>(cast<DeclRefExpr>(LHSExprs[I])->getDecl());
- Scope.addPrivate(LHSVar, [&CGF, LHS, Idx, LHSVar]() {
- return emitAddrOfVarFromArray(CGF, LHS, Idx, LHSVar);
- });
+ Scope.addPrivate(LHSVar, emitAddrOfVarFromArray(CGF, LHS, Idx, LHSVar));
QualType PrivTy = (*IPriv)->getType();
if (PrivTy->isVariablyModifiedType()) {
// Get array size and emit VLA type.
@@ -5490,8 +4866,8 @@ llvm::Function *CGOpenMPRuntime::emitReductionFunction(
}
Scope.Privatize();
IPriv = Privates.begin();
- auto ILHS = LHSExprs.begin();
- auto IRHS = RHSExprs.begin();
+ const auto *ILHS = LHSExprs.begin();
+ const auto *IRHS = RHSExprs.begin();
for (const Expr *E : ReductionOps) {
if ((*IPriv)->getType()->isArrayType()) {
// Emit reduction for array section.
@@ -5586,9 +4962,9 @@ void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
if (SimpleReduction) {
CodeGenFunction::RunCleanupsScope Scope(CGF);
- auto IPriv = Privates.begin();
- auto ILHS = LHSExprs.begin();
- auto IRHS = RHSExprs.begin();
+ const auto *IPriv = Privates.begin();
+ const auto *ILHS = LHSExprs.begin();
+ const auto *IRHS = RHSExprs.begin();
for (const Expr *E : ReductionOps) {
emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS),
cast<DeclRefExpr>(*IRHS));
@@ -5608,12 +4984,12 @@ void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
++Size;
}
llvm::APInt ArraySize(/*unsigned int numBits=*/32, Size);
- QualType ReductionArrayTy =
- C.getConstantArrayType(C.VoidPtrTy, ArraySize, nullptr, ArrayType::Normal,
- /*IndexTypeQuals=*/0);
+ QualType ReductionArrayTy = C.getConstantArrayType(
+ C.VoidPtrTy, ArraySize, nullptr, ArraySizeModifier::Normal,
+ /*IndexTypeQuals=*/0);
Address ReductionList =
CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
- auto IPriv = Privates.begin();
+ const auto *IPriv = Privates.begin();
unsigned Idx = 0;
for (unsigned I = 0, E = RHSExprs.size(); I < E; ++I, ++IPriv, ++Idx) {
Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
@@ -5637,8 +5013,8 @@ void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
// 2. Emit reduce_func().
llvm::Function *ReductionFn = emitReductionFunction(
- Loc, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(), Privates,
- LHSExprs, RHSExprs, ReductionOps);
+ CGF.CurFn->getName(), Loc, CGF.ConvertTypeForMem(ReductionArrayTy),
+ Privates, LHSExprs, RHSExprs, ReductionOps);
// 3. Create static kmp_critical_name lock = { 0 };
std::string Name = getName({"reduction"});
@@ -5690,9 +5066,9 @@ void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
auto &&CodeGen = [Privates, LHSExprs, RHSExprs, ReductionOps](
CodeGenFunction &CGF, PrePostActionTy &Action) {
CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
- auto IPriv = Privates.begin();
- auto ILHS = LHSExprs.begin();
- auto IRHS = RHSExprs.begin();
+ const auto *IPriv = Privates.begin();
+ const auto *ILHS = LHSExprs.begin();
+ const auto *IRHS = RHSExprs.begin();
for (const Expr *E : ReductionOps) {
RT.emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS),
cast<DeclRefExpr>(*IRHS));
@@ -5703,7 +5079,7 @@ void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
};
RegionCodeGenTy RCG(CodeGen);
CommonActionTy Action(
- nullptr, llvm::None,
+ nullptr, std::nullopt,
OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), WithNowait ? OMPRTL___kmpc_end_reduce_nowait
: OMPRTL___kmpc_end_reduce),
@@ -5724,9 +5100,9 @@ void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
auto &&AtomicCodeGen = [Loc, Privates, LHSExprs, RHSExprs, ReductionOps](
CodeGenFunction &CGF, PrePostActionTy &Action) {
- auto ILHS = LHSExprs.begin();
- auto IRHS = RHSExprs.begin();
- auto IPriv = Privates.begin();
+ const auto *ILHS = LHSExprs.begin();
+ const auto *IRHS = RHSExprs.begin();
+ const auto *IPriv = Privates.begin();
for (const Expr *E : ReductionOps) {
const Expr *XExpr = nullptr;
const Expr *EExpr = nullptr;
@@ -5768,14 +5144,11 @@ void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
llvm::AtomicOrdering::Monotonic, Loc,
[&CGF, UpExpr, VD, Loc](RValue XRValue) {
CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
- PrivateScope.addPrivate(
- VD, [&CGF, VD, XRValue, Loc]() {
- Address LHSTemp = CGF.CreateMemTemp(VD->getType());
- CGF.emitOMPSimpleStore(
- CGF.MakeAddrLValue(LHSTemp, VD->getType()), XRValue,
- VD->getType().getNonReferenceType(), Loc);
- return LHSTemp;
- });
+ Address LHSTemp = CGF.CreateMemTemp(VD->getType());
+ CGF.emitOMPSimpleStore(
+ CGF.MakeAddrLValue(LHSTemp, VD->getType()), XRValue,
+ VD->getType().getNonReferenceType(), Loc);
+ PrivateScope.addPrivate(VD, LHSTemp);
(void)PrivateScope.Privatize();
return CGF.EmitAnyExpr(UpExpr);
});
@@ -5828,7 +5201,7 @@ void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
ThreadId, // i32 <gtid>
Lock // kmp_critical_name *&<lock>
};
- CommonActionTy Action(nullptr, llvm::None,
+ CommonActionTy Action(nullptr, std::nullopt,
OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_end_reduce),
EndArgs);
@@ -5876,9 +5249,9 @@ static llvm::Value *emitReduceInitFunction(CodeGenModule &CGM,
VoidPtrTy.addRestrict();
FunctionArgList Args;
ImplicitParamDecl Param(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, VoidPtrTy,
- ImplicitParamDecl::Other);
+ ImplicitParamKind::Other);
ImplicitParamDecl ParamOrig(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, VoidPtrTy,
- ImplicitParamDecl::Other);
+ ImplicitParamKind::Other);
Args.emplace_back(&Param);
Args.emplace_back(&ParamOrig);
const auto &FnInfo =
@@ -5891,9 +5264,11 @@ static llvm::Value *emitReduceInitFunction(CodeGenModule &CGM,
Fn->setDoesNotRecurse();
CodeGenFunction CGF(CGM);
CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc);
+ QualType PrivateType = RCG.getPrivateType(N);
Address PrivateAddr = CGF.EmitLoadOfPointer(
- CGF.GetAddrOfLocalVar(&Param),
- C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
+ CGF.GetAddrOfLocalVar(&Param).withElementType(
+ CGF.ConvertTypeForMem(PrivateType)->getPointerTo()),
+ C.getPointerType(PrivateType)->castAs<PointerType>());
llvm::Value *Size = nullptr;
// If the size of the reduction item is non-constant, load it from global
// threadprivate variable.
@@ -5905,25 +5280,20 @@ static llvm::Value *emitReduceInitFunction(CodeGenModule &CGM,
CGM.getContext().getSizeType(), Loc);
}
RCG.emitAggregateType(CGF, N, Size);
- LValue OrigLVal;
+ Address OrigAddr = Address::invalid();
// If initializer uses initializer from declare reduction construct, emit a
// pointer to the address of the original reduction item (reuired by reduction
// initializer)
if (RCG.usesReductionInitializer(N)) {
Address SharedAddr = CGF.GetAddrOfLocalVar(&ParamOrig);
- SharedAddr = CGF.EmitLoadOfPointer(
+ OrigAddr = CGF.EmitLoadOfPointer(
SharedAddr,
CGM.getContext().VoidPtrTy.castAs<PointerType>()->getTypePtr());
- OrigLVal = CGF.MakeAddrLValue(SharedAddr, CGM.getContext().VoidPtrTy);
- } else {
- OrigLVal = CGF.MakeNaturalAlignAddrLValue(
- llvm::ConstantPointerNull::get(CGM.VoidPtrTy),
- CGM.getContext().VoidPtrTy);
}
// Emit the initializer:
// %0 = bitcast void* %arg to <type>*
// store <type> <init>, <type>* %0
- RCG.emitInitialization(CGF, N, PrivateAddr, OrigLVal,
+ RCG.emitInitialization(CGF, N, PrivateAddr, OrigAddr,
[](CodeGenFunction &) { return false; });
CGF.FinishFunction();
return Fn;
@@ -5950,9 +5320,9 @@ static llvm::Value *emitReduceCombFunction(CodeGenModule &CGM,
const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(RHS)->getDecl());
FunctionArgList Args;
ImplicitParamDecl ParamInOut(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.VoidPtrTy, ImplicitParamDecl::Other);
+ C.VoidPtrTy, ImplicitParamKind::Other);
ImplicitParamDecl ParamIn(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
- ImplicitParamDecl::Other);
+ ImplicitParamKind::Other);
Args.emplace_back(&ParamInOut);
Args.emplace_back(&ParamIn);
const auto &FnInfo =
@@ -5980,22 +5350,21 @@ static llvm::Value *emitReduceCombFunction(CodeGenModule &CGM,
// %lhs = bitcast void* %arg0 to <type>*
// %rhs = bitcast void* %arg1 to <type>*
CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
- PrivateScope.addPrivate(LHSVD, [&C, &CGF, &ParamInOut, LHSVD]() {
- // Pull out the pointer to the variable.
- Address PtrAddr = CGF.EmitLoadOfPointer(
- CGF.GetAddrOfLocalVar(&ParamInOut),
- C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
- return CGF.Builder.CreateElementBitCast(
- PtrAddr, CGF.ConvertTypeForMem(LHSVD->getType()));
- });
- PrivateScope.addPrivate(RHSVD, [&C, &CGF, &ParamIn, RHSVD]() {
- // Pull out the pointer to the variable.
- Address PtrAddr = CGF.EmitLoadOfPointer(
- CGF.GetAddrOfLocalVar(&ParamIn),
- C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
- return CGF.Builder.CreateElementBitCast(
- PtrAddr, CGF.ConvertTypeForMem(RHSVD->getType()));
- });
+ PrivateScope.addPrivate(
+ LHSVD,
+ // Pull out the pointer to the variable.
+ CGF.EmitLoadOfPointer(
+ CGF.GetAddrOfLocalVar(&ParamInOut)
+ .withElementType(
+ CGF.ConvertTypeForMem(LHSVD->getType())->getPointerTo()),
+ C.getPointerType(LHSVD->getType())->castAs<PointerType>()));
+ PrivateScope.addPrivate(
+ RHSVD,
+ // Pull out the pointer to the variable.
+ CGF.EmitLoadOfPointer(
+ CGF.GetAddrOfLocalVar(&ParamIn).withElementType(
+ CGF.ConvertTypeForMem(RHSVD->getType())->getPointerTo()),
+ C.getPointerType(RHSVD->getType())->castAs<PointerType>()));
PrivateScope.Privatize();
// Emit the combiner body:
// %2 = <ReductionOp>(<type> *%lhs, <type> *%rhs)
@@ -6023,7 +5392,7 @@ static llvm::Value *emitReduceFiniFunction(CodeGenModule &CGM,
ASTContext &C = CGM.getContext();
FunctionArgList Args;
ImplicitParamDecl Param(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
- ImplicitParamDecl::Other);
+ ImplicitParamKind::Other);
Args.emplace_back(&Param);
const auto &FnInfo =
CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
@@ -6036,8 +5405,7 @@ static llvm::Value *emitReduceFiniFunction(CodeGenModule &CGM,
CodeGenFunction CGF(CGM);
CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc);
Address PrivateAddr = CGF.EmitLoadOfPointer(
- CGF.GetAddrOfLocalVar(&Param),
- C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
+ CGF.GetAddrOfLocalVar(&Param), C.VoidPtrTy.castAs<PointerType>());
llvm::Value *Size = nullptr;
// If the size of the reduction item is non-constant, load it from global
// threadprivate variable.
@@ -6087,8 +5455,9 @@ llvm::Value *CGOpenMPRuntime::emitTaskReductionInit(
QualType RDType = C.getRecordType(RD);
unsigned Size = Data.ReductionVars.size();
llvm::APInt ArraySize(/*numBits=*/64, Size);
- QualType ArrayRDType = C.getConstantArrayType(
- RDType, ArraySize, nullptr, ArrayType::Normal, /*IndexTypeQuals=*/0);
+ QualType ArrayRDType =
+ C.getConstantArrayType(RDType, ArraySize, nullptr,
+ ArraySizeModifier::Normal, /*IndexTypeQuals=*/0);
// kmp_task_red_input_t .rd_input.[Size];
Address TaskRedInput = CGF.CreateMemTemp(ArrayRDType, ".rd_input.");
ReductionCodeGen RCG(Data.ReductionVars, Data.ReductionOrigs,
@@ -6098,21 +5467,19 @@ llvm::Value *CGOpenMPRuntime::emitTaskReductionInit(
llvm::Value *Idxs[] = {llvm::ConstantInt::get(CGM.SizeTy, /*V=*/0),
llvm::ConstantInt::get(CGM.SizeTy, Cnt)};
llvm::Value *GEP = CGF.EmitCheckedInBoundsGEP(
- TaskRedInput.getPointer(), Idxs,
+ TaskRedInput.getElementType(), TaskRedInput.getPointer(), Idxs,
/*SignedIndices=*/false, /*IsSubtraction=*/false, Loc,
".rd_input.gep.");
LValue ElemLVal = CGF.MakeNaturalAlignAddrLValue(GEP, RDType);
// ElemLVal.reduce_shar = &Shareds[Cnt];
LValue SharedLVal = CGF.EmitLValueForField(ElemLVal, SharedFD);
RCG.emitSharedOrigLValue(CGF, Cnt);
- llvm::Value *CastedShared =
- CGF.EmitCastToVoidPtr(RCG.getSharedLValue(Cnt).getPointer(CGF));
- CGF.EmitStoreOfScalar(CastedShared, SharedLVal);
+ llvm::Value *Shared = RCG.getSharedLValue(Cnt).getPointer(CGF);
+ CGF.EmitStoreOfScalar(Shared, SharedLVal);
// ElemLVal.reduce_orig = &Origs[Cnt];
LValue OrigLVal = CGF.EmitLValueForField(ElemLVal, OrigFD);
- llvm::Value *CastedOrig =
- CGF.EmitCastToVoidPtr(RCG.getOrigLValue(Cnt).getPointer(CGF));
- CGF.EmitStoreOfScalar(CastedOrig, OrigLVal);
+ llvm::Value *Orig = RCG.getOrigLValue(Cnt).getPointer(CGF);
+ CGF.EmitStoreOfScalar(Orig, OrigLVal);
RCG.emitAggregateType(CGF, Cnt);
llvm::Value *SizeValInChars;
llvm::Value *SizeVal;
@@ -6129,21 +5496,19 @@ llvm::Value *CGOpenMPRuntime::emitTaskReductionInit(
CGF.EmitStoreOfScalar(SizeValInChars, SizeLVal);
// ElemLVal.reduce_init = init;
LValue InitLVal = CGF.EmitLValueForField(ElemLVal, InitFD);
- llvm::Value *InitAddr =
- CGF.EmitCastToVoidPtr(emitReduceInitFunction(CGM, Loc, RCG, Cnt));
+ llvm::Value *InitAddr = emitReduceInitFunction(CGM, Loc, RCG, Cnt);
CGF.EmitStoreOfScalar(InitAddr, InitLVal);
// ElemLVal.reduce_fini = fini;
LValue FiniLVal = CGF.EmitLValueForField(ElemLVal, FiniFD);
llvm::Value *Fini = emitReduceFiniFunction(CGM, Loc, RCG, Cnt);
- llvm::Value *FiniAddr = Fini
- ? CGF.EmitCastToVoidPtr(Fini)
- : llvm::ConstantPointerNull::get(CGM.VoidPtrTy);
+ llvm::Value *FiniAddr =
+ Fini ? Fini : llvm::ConstantPointerNull::get(CGM.VoidPtrTy);
CGF.EmitStoreOfScalar(FiniAddr, FiniLVal);
// ElemLVal.reduce_comb = comb;
LValue CombLVal = CGF.EmitLValueForField(ElemLVal, CombFD);
- llvm::Value *CombAddr = CGF.EmitCastToVoidPtr(emitReduceCombFunction(
+ llvm::Value *CombAddr = emitReduceCombFunction(
CGM, Loc, RCG, Cnt, Data.ReductionOps[Cnt], LHSExprs[Cnt],
- RHSExprs[Cnt], Data.ReductionCopies[Cnt]));
+ RHSExprs[Cnt], Data.ReductionCopies[Cnt]);
CGF.EmitStoreOfScalar(CombAddr, CombLVal);
// ElemLVal.flags = 0;
LValue FlagsLVal = CGF.EmitLValueForField(ElemLVal, FlagsFD);
@@ -6237,24 +5602,56 @@ Address CGOpenMPRuntime::getTaskReductionItem(CodeGenFunction &CGF,
OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_task_reduction_get_th_data),
Args),
- SharedLVal.getAlignment());
+ CGF.Int8Ty, SharedLVal.getAlignment());
}
-void CGOpenMPRuntime::emitTaskwaitCall(CodeGenFunction &CGF,
- SourceLocation Loc) {
+void CGOpenMPRuntime::emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc,
+ const OMPTaskDataTy &Data) {
if (!CGF.HaveInsertPoint())
return;
- if (CGF.CGM.getLangOpts().OpenMPIRBuilder) {
+ if (CGF.CGM.getLangOpts().OpenMPIRBuilder && Data.Dependences.empty()) {
+ // TODO: Need to support taskwait with dependences in the OpenMPIRBuilder.
OMPBuilder.createTaskwait(CGF.Builder);
} else {
- // Build call kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32
- // global_tid);
- llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc)};
- // Ignore return result until untied tasks are supported.
- CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_omp_taskwait),
- Args);
+ llvm::Value *ThreadID = getThreadID(CGF, Loc);
+ llvm::Value *UpLoc = emitUpdateLocation(CGF, Loc);
+ auto &M = CGM.getModule();
+ Address DependenciesArray = Address::invalid();
+ llvm::Value *NumOfElements;
+ std::tie(NumOfElements, DependenciesArray) =
+ emitDependClause(CGF, Data.Dependences, Loc);
+ if (!Data.Dependences.empty()) {
+ llvm::Value *DepWaitTaskArgs[7];
+ DepWaitTaskArgs[0] = UpLoc;
+ DepWaitTaskArgs[1] = ThreadID;
+ DepWaitTaskArgs[2] = NumOfElements;
+ DepWaitTaskArgs[3] = DependenciesArray.getPointer();
+ DepWaitTaskArgs[4] = CGF.Builder.getInt32(0);
+ DepWaitTaskArgs[5] = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
+ DepWaitTaskArgs[6] =
+ llvm::ConstantInt::get(CGF.Int32Ty, Data.HasNowaitClause);
+
+ CodeGenFunction::RunCleanupsScope LocalScope(CGF);
+
+ // Build void __kmpc_omp_taskwait_deps_51(ident_t *, kmp_int32 gtid,
+ // kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32
+ // ndeps_noalias, kmp_depend_info_t *noalias_dep_list,
+ // kmp_int32 has_no_wait); if dependence info is specified.
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ M, OMPRTL___kmpc_omp_taskwait_deps_51),
+ DepWaitTaskArgs);
+
+ } else {
+
+ // Build call kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32
+ // global_tid);
+ llvm::Value *Args[] = {UpLoc, ThreadID};
+ // Ignore return result until untied tasks are supported.
+ CGF.EmitRuntimeCall(
+ OMPBuilder.getOrCreateRuntimeFunction(M, OMPRTL___kmpc_omp_taskwait),
+ Args);
+ }
}
if (auto *Region = dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
@@ -6417,7 +5814,7 @@ void CGOpenMPRuntime::emitTargetOutlinedFunction(
const OMPExecutableDirective &D, StringRef ParentName,
llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
- assert(!ParentName.empty() && "Invalid target region parent name!");
+ assert(!ParentName.empty() && "Invalid target entry parent name!");
HasEmittedTargetRegion = true;
SmallVector<std::pair<const Expr *, const Expr *>, 4> Allocators;
for (const auto *C : D.getClausesOfKind<OMPUsesAllocatorsClause>()) {
@@ -6448,19 +5845,18 @@ void CGOpenMPRuntime::emitUsesAllocatorsInit(CodeGenFunction &CGF,
.getLimitedValue());
LValue AllocatorTraitsLVal = CGF.EmitLValue(AllocatorTraits);
Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- AllocatorTraitsLVal.getAddress(CGF), CGF.VoidPtrPtrTy);
+ AllocatorTraitsLVal.getAddress(CGF), CGF.VoidPtrPtrTy, CGF.VoidPtrTy);
AllocatorTraitsLVal = CGF.MakeAddrLValue(Addr, CGF.getContext().VoidPtrTy,
AllocatorTraitsLVal.getBaseInfo(),
AllocatorTraitsLVal.getTBAAInfo());
- llvm::Value *Traits =
- CGF.EmitLoadOfScalar(AllocatorTraitsLVal, AllocatorTraits->getExprLoc());
+ llvm::Value *Traits = Addr.getPointer();
llvm::Value *AllocatorVal =
CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_init_allocator),
{ThreadId, MemSpaceHandle, NumTraits, Traits});
// Store to allocator.
- CGF.EmitVarDecl(*cast<VarDecl>(
+ CGF.EmitAutoVarAlloca(*cast<VarDecl>(
cast<DeclRefExpr>(Allocator->IgnoreParenImpCasts())->getDecl()));
LValue AllocatorLVal = CGF.EmitLValue(Allocator->IgnoreParenImpCasts());
AllocatorVal =
@@ -6485,86 +5881,73 @@ void CGOpenMPRuntime::emitUsesAllocatorsFini(CodeGenFunction &CGF,
{ThreadId, AllocatorVal});
}
+void CGOpenMPRuntime::computeMinAndMaxThreadsAndTeams(
+ const OMPExecutableDirective &D, CodeGenFunction &CGF,
+ int32_t &MinThreadsVal, int32_t &MaxThreadsVal, int32_t &MinTeamsVal,
+ int32_t &MaxTeamsVal) {
+
+ getNumTeamsExprForTargetDirective(CGF, D, MinTeamsVal, MaxTeamsVal);
+ getNumThreadsExprForTargetDirective(CGF, D, MaxThreadsVal,
+ /*UpperBoundOnly=*/true);
+
+ for (auto *C : D.getClausesOfKind<OMPXAttributeClause>()) {
+ for (auto *A : C->getAttrs()) {
+ int32_t AttrMinThreadsVal = 1, AttrMaxThreadsVal = -1;
+ int32_t AttrMinBlocksVal = 1, AttrMaxBlocksVal = -1;
+ if (auto *Attr = dyn_cast<CUDALaunchBoundsAttr>(A))
+ CGM.handleCUDALaunchBoundsAttr(nullptr, Attr, &AttrMaxThreadsVal,
+ &AttrMinBlocksVal, &AttrMaxBlocksVal);
+ else if (auto *Attr = dyn_cast<AMDGPUFlatWorkGroupSizeAttr>(A))
+ CGM.handleAMDGPUFlatWorkGroupSizeAttr(
+ nullptr, Attr, /*ReqdWGS=*/nullptr, &AttrMinThreadsVal,
+ &AttrMaxThreadsVal);
+ else
+ continue;
+
+ MinThreadsVal = std::max(MinThreadsVal, AttrMinThreadsVal);
+ if (AttrMaxThreadsVal > 0)
+ MaxThreadsVal = MaxThreadsVal > 0
+ ? std::min(MaxThreadsVal, AttrMaxThreadsVal)
+ : AttrMaxThreadsVal;
+ MinTeamsVal = std::max(MinTeamsVal, AttrMinBlocksVal);
+ if (AttrMaxBlocksVal > 0)
+ MaxTeamsVal = MaxTeamsVal > 0 ? std::min(MaxTeamsVal, AttrMaxBlocksVal)
+ : AttrMaxBlocksVal;
+ }
+ }
+}
+
void CGOpenMPRuntime::emitTargetOutlinedFunctionHelper(
const OMPExecutableDirective &D, StringRef ParentName,
llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
- // Create a unique name for the entry function using the source location
- // information of the current target region. The name will be something like:
- //
- // __omp_offloading_DD_FFFF_PP_lBB
- //
- // where DD_FFFF is an ID unique to the file (device and file IDs), PP is the
- // mangled name of the function that encloses the target region and BB is the
- // line number of the target region.
-
- unsigned DeviceID;
- unsigned FileID;
- unsigned Line;
- getTargetEntryUniqueInfo(CGM.getContext(), D.getBeginLoc(), DeviceID, FileID,
- Line);
- SmallString<64> EntryFnName;
- {
- llvm::raw_svector_ostream OS(EntryFnName);
- OS << "__omp_offloading" << llvm::format("_%x", DeviceID)
- << llvm::format("_%x_", FileID) << ParentName << "_l" << Line;
- }
- const CapturedStmt &CS = *D.getCapturedStmt(OMPD_target);
+ llvm::TargetRegionEntryInfo EntryInfo =
+ getEntryInfoFromPresumedLoc(CGM, OMPBuilder, D.getBeginLoc(), ParentName);
CodeGenFunction CGF(CGM, true);
- CGOpenMPTargetRegionInfo CGInfo(CS, CodeGen, EntryFnName);
- CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
+ llvm::OpenMPIRBuilder::FunctionGenCallback &&GenerateOutlinedFunction =
+ [&CGF, &D, &CodeGen](StringRef EntryFnName) {
+ const CapturedStmt &CS = *D.getCapturedStmt(OMPD_target);
- OutlinedFn = CGF.GenerateOpenMPCapturedStmtFunction(CS, D.getBeginLoc());
+ CGOpenMPTargetRegionInfo CGInfo(CS, CodeGen, EntryFnName);
+ CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
+ return CGF.GenerateOpenMPCapturedStmtFunction(CS, D.getBeginLoc());
+ };
- // If this target outline function is not an offload entry, we don't need to
- // register it.
- if (!IsOffloadEntry)
- return;
+ OMPBuilder.emitTargetRegionFunction(EntryInfo, GenerateOutlinedFunction,
+ IsOffloadEntry, OutlinedFn, OutlinedFnID);
- // The target region ID is used by the runtime library to identify the current
- // target region, so it only has to be unique and not necessarily point to
- // anything. It could be the pointer to the outlined function that implements
- // the target region, but we aren't using that so that the compiler doesn't
- // need to keep that, and could therefore inline the host function if proven
- // worthwhile during optimization. In the other hand, if emitting code for the
- // device, the ID has to be the function address so that it can retrieved from
- // the offloading entry and launched by the runtime library. We also mark the
- // outlined function to have external linkage in case we are emitting code for
- // the device, because these functions will be entry points to the device.
-
- if (CGM.getLangOpts().OpenMPIsDevice) {
- OutlinedFnID = llvm::ConstantExpr::getBitCast(OutlinedFn, CGM.Int8PtrTy);
- OutlinedFn->setLinkage(llvm::GlobalValue::WeakAnyLinkage);
- OutlinedFn->setDSOLocal(false);
- if (CGM.getTriple().isAMDGCN())
- OutlinedFn->setCallingConv(llvm::CallingConv::AMDGPU_KERNEL);
- } else {
- std::string Name = getName({EntryFnName, "region_id"});
- OutlinedFnID = new llvm::GlobalVariable(
- CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
- llvm::GlobalValue::WeakAnyLinkage,
- llvm::Constant::getNullValue(CGM.Int8Ty), Name);
- }
+ if (!OutlinedFn)
+ return;
- // Register the information for the entry associated with this target region.
- OffloadEntriesInfoManager.registerTargetRegionEntryInfo(
- DeviceID, FileID, ParentName, Line, OutlinedFn, OutlinedFnID,
- OffloadEntriesInfoManagerTy::OMPTargetRegionEntryTargetRegion);
+ CGM.getTargetCodeGenInfo().setTargetAttributes(nullptr, OutlinedFn, CGM);
- // Add NumTeams and ThreadLimit attributes to the outlined GPU function
- int32_t DefaultValTeams = -1;
- getNumTeamsExprForTargetDirective(CGF, D, DefaultValTeams);
- if (DefaultValTeams > 0) {
- OutlinedFn->addFnAttr("omp_target_num_teams",
- std::to_string(DefaultValTeams));
- }
- int32_t DefaultValThreads = -1;
- getNumThreadsExprForTargetDirective(CGF, D, DefaultValThreads);
- if (DefaultValThreads > 0) {
- OutlinedFn->addFnAttr("omp_target_thread_limit",
- std::to_string(DefaultValThreads));
+ for (auto *C : D.getClausesOfKind<OMPXAttributeClause>()) {
+ for (auto *A : C->getAttrs()) {
+ if (auto *Attr = dyn_cast<AMDGPUWavesPerEUAttr>(A))
+ CGM.handleAMDGPUWavesPerEUAttr(OutlinedFn, Attr);
+ }
}
}
@@ -6621,8 +6004,8 @@ const Stmt *CGOpenMPRuntime::getSingleCompoundChild(ASTContext &Ctx,
}
const Expr *CGOpenMPRuntime::getNumTeamsExprForTargetDirective(
- CodeGenFunction &CGF, const OMPExecutableDirective &D,
- int32_t &DefaultVal) {
+ CodeGenFunction &CGF, const OMPExecutableDirective &D, int32_t &MinTeamsVal,
+ int32_t &MaxTeamsVal) {
OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
assert(isOpenMPTargetExecutionDirective(DirectiveKind) &&
@@ -6643,24 +6026,25 @@ const Expr *CGOpenMPRuntime::getNumTeamsExprForTargetDirective(
if (NumTeams->isIntegerConstantExpr(CGF.getContext()))
if (auto Constant =
NumTeams->getIntegerConstantExpr(CGF.getContext()))
- DefaultVal = Constant->getExtValue();
+ MinTeamsVal = MaxTeamsVal = Constant->getExtValue();
return NumTeams;
}
- DefaultVal = 0;
+ MinTeamsVal = MaxTeamsVal = 0;
return nullptr;
}
if (isOpenMPParallelDirective(NestedDir->getDirectiveKind()) ||
isOpenMPSimdDirective(NestedDir->getDirectiveKind())) {
- DefaultVal = 1;
+ MinTeamsVal = MaxTeamsVal = 1;
return nullptr;
}
- DefaultVal = 1;
+ MinTeamsVal = MaxTeamsVal = 1;
return nullptr;
}
// A value of -1 is used to check if we need to emit no teams region
- DefaultVal = -1;
+ MinTeamsVal = MaxTeamsVal = -1;
return nullptr;
}
+ case OMPD_target_teams_loop:
case OMPD_target_teams:
case OMPD_target_teams_distribute:
case OMPD_target_teams_distribute_simd:
@@ -6671,21 +6055,23 @@ const Expr *CGOpenMPRuntime::getNumTeamsExprForTargetDirective(
D.getSingleClause<OMPNumTeamsClause>()->getNumTeams();
if (NumTeams->isIntegerConstantExpr(CGF.getContext()))
if (auto Constant = NumTeams->getIntegerConstantExpr(CGF.getContext()))
- DefaultVal = Constant->getExtValue();
+ MinTeamsVal = MaxTeamsVal = Constant->getExtValue();
return NumTeams;
}
- DefaultVal = 0;
+ MinTeamsVal = MaxTeamsVal = 0;
return nullptr;
}
case OMPD_target_parallel:
case OMPD_target_parallel_for:
case OMPD_target_parallel_for_simd:
+ case OMPD_target_parallel_loop:
case OMPD_target_simd:
- DefaultVal = 1;
+ MinTeamsVal = MaxTeamsVal = 1;
return nullptr;
case OMPD_parallel:
case OMPD_for:
case OMPD_parallel_for:
+ case OMPD_parallel_loop:
case OMPD_parallel_master:
case OMPD_parallel_sections:
case OMPD_for_simd:
@@ -6740,6 +6126,7 @@ const Expr *CGOpenMPRuntime::getNumTeamsExprForTargetDirective(
case OMPD_parallel_master_taskloop:
case OMPD_parallel_master_taskloop_simd:
case OMPD_requires:
+ case OMPD_metadirective:
case OMPD_unknown:
break;
default:
@@ -6750,12 +6137,13 @@ const Expr *CGOpenMPRuntime::getNumTeamsExprForTargetDirective(
llvm::Value *CGOpenMPRuntime::emitNumTeamsForTargetDirective(
CodeGenFunction &CGF, const OMPExecutableDirective &D) {
- assert(!CGF.getLangOpts().OpenMPIsDevice &&
+ assert(!CGF.getLangOpts().OpenMPIsTargetDevice &&
"Clauses associated with the teams directive expected to be emitted "
"only for the host!");
CGBuilderTy &Bld = CGF.Builder;
- int32_t DefaultNT = -1;
- const Expr *NumTeams = getNumTeamsExprForTargetDirective(CGF, D, DefaultNT);
+ int32_t MinNT = -1, MaxNT = -1;
+ const Expr *NumTeams =
+ getNumTeamsExprForTargetDirective(CGF, D, MinNT, MaxNT);
if (NumTeams != nullptr) {
OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
@@ -6783,270 +6171,175 @@ llvm::Value *CGOpenMPRuntime::emitNumTeamsForTargetDirective(
default:
break;
}
- } else if (DefaultNT == -1) {
- return nullptr;
}
- return Bld.getInt32(DefaultNT);
+ assert(MinNT == MaxNT && "Num threads ranges require handling here.");
+ return llvm::ConstantInt::get(CGF.Int32Ty, MinNT);
}
-static llvm::Value *getNumThreads(CodeGenFunction &CGF, const CapturedStmt *CS,
- llvm::Value *DefaultThreadLimitVal) {
+/// Check for a num threads constant value (stored in \p DefaultVal), or
+/// expression (stored in \p E). If the value is conditional (via an if-clause),
+/// store the condition in \p CondVal. If \p E, and \p CondVal respectively, are
+/// nullptr, no expression evaluation is perfomed.
+static void getNumThreads(CodeGenFunction &CGF, const CapturedStmt *CS,
+ const Expr **E, int32_t &UpperBound,
+ bool UpperBoundOnly, llvm::Value **CondVal) {
const Stmt *Child = CGOpenMPRuntime::getSingleCompoundChild(
CGF.getContext(), CS->getCapturedStmt());
- if (const auto *Dir = dyn_cast_or_null<OMPExecutableDirective>(Child)) {
- if (isOpenMPParallelDirective(Dir->getDirectiveKind())) {
- llvm::Value *NumThreads = nullptr;
- llvm::Value *CondVal = nullptr;
- // Handle if clause. If if clause present, the number of threads is
- // calculated as <cond> ? (<numthreads> ? <numthreads> : 0 ) : 1.
- if (Dir->hasClausesOfKind<OMPIfClause>()) {
- CGOpenMPInnerExprInfo CGInfo(CGF, *CS);
- CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
- const OMPIfClause *IfClause = nullptr;
- for (const auto *C : Dir->getClausesOfKind<OMPIfClause>()) {
- if (C->getNameModifier() == OMPD_unknown ||
- C->getNameModifier() == OMPD_parallel) {
- IfClause = C;
- break;
- }
- }
- if (IfClause) {
- const Expr *Cond = IfClause->getCondition();
- bool Result;
- if (Cond->EvaluateAsBooleanCondition(Result, CGF.getContext())) {
- if (!Result)
- return CGF.Builder.getInt32(1);
- } else {
- CodeGenFunction::LexicalScope Scope(CGF, Cond->getSourceRange());
- if (const auto *PreInit =
- cast_or_null<DeclStmt>(IfClause->getPreInitStmt())) {
- for (const auto *I : PreInit->decls()) {
- if (!I->hasAttr<OMPCaptureNoInitAttr>()) {
- CGF.EmitVarDecl(cast<VarDecl>(*I));
- } else {
- CodeGenFunction::AutoVarEmission Emission =
- CGF.EmitAutoVarAlloca(cast<VarDecl>(*I));
- CGF.EmitAutoVarCleanups(Emission);
- }
- }
- }
- CondVal = CGF.EvaluateExprAsBool(Cond);
- }
+ const auto *Dir = dyn_cast_or_null<OMPExecutableDirective>(Child);
+ if (!Dir)
+ return;
+
+ if (isOpenMPParallelDirective(Dir->getDirectiveKind())) {
+ // Handle if clause. If if clause present, the number of threads is
+ // calculated as <cond> ? (<numthreads> ? <numthreads> : 0 ) : 1.
+ if (CondVal && Dir->hasClausesOfKind<OMPIfClause>()) {
+ CGOpenMPInnerExprInfo CGInfo(CGF, *CS);
+ CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
+ const OMPIfClause *IfClause = nullptr;
+ for (const auto *C : Dir->getClausesOfKind<OMPIfClause>()) {
+ if (C->getNameModifier() == OMPD_unknown ||
+ C->getNameModifier() == OMPD_parallel) {
+ IfClause = C;
+ break;
}
}
- // Check the value of num_threads clause iff if clause was not specified
- // or is not evaluated to false.
- if (Dir->hasClausesOfKind<OMPNumThreadsClause>()) {
- CGOpenMPInnerExprInfo CGInfo(CGF, *CS);
- CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
- const auto *NumThreadsClause =
- Dir->getSingleClause<OMPNumThreadsClause>();
- CodeGenFunction::LexicalScope Scope(
- CGF, NumThreadsClause->getNumThreads()->getSourceRange());
- if (const auto *PreInit =
- cast_or_null<DeclStmt>(NumThreadsClause->getPreInitStmt())) {
- for (const auto *I : PreInit->decls()) {
- if (!I->hasAttr<OMPCaptureNoInitAttr>()) {
- CGF.EmitVarDecl(cast<VarDecl>(*I));
- } else {
- CodeGenFunction::AutoVarEmission Emission =
- CGF.EmitAutoVarAlloca(cast<VarDecl>(*I));
- CGF.EmitAutoVarCleanups(Emission);
+ if (IfClause) {
+ const Expr *CondExpr = IfClause->getCondition();
+ bool Result;
+ if (CondExpr->EvaluateAsBooleanCondition(Result, CGF.getContext())) {
+ if (!Result) {
+ UpperBound = 1;
+ return;
+ }
+ } else {
+ CodeGenFunction::LexicalScope Scope(CGF, CondExpr->getSourceRange());
+ if (const auto *PreInit =
+ cast_or_null<DeclStmt>(IfClause->getPreInitStmt())) {
+ for (const auto *I : PreInit->decls()) {
+ if (!I->hasAttr<OMPCaptureNoInitAttr>()) {
+ CGF.EmitVarDecl(cast<VarDecl>(*I));
+ } else {
+ CodeGenFunction::AutoVarEmission Emission =
+ CGF.EmitAutoVarAlloca(cast<VarDecl>(*I));
+ CGF.EmitAutoVarCleanups(Emission);
+ }
}
+ *CondVal = CGF.EvaluateExprAsBool(CondExpr);
}
}
- NumThreads = CGF.EmitScalarExpr(NumThreadsClause->getNumThreads());
- NumThreads = CGF.Builder.CreateIntCast(NumThreads, CGF.Int32Ty,
- /*isSigned=*/false);
- if (DefaultThreadLimitVal)
- NumThreads = CGF.Builder.CreateSelect(
- CGF.Builder.CreateICmpULT(DefaultThreadLimitVal, NumThreads),
- DefaultThreadLimitVal, NumThreads);
- } else {
- NumThreads = DefaultThreadLimitVal ? DefaultThreadLimitVal
- : CGF.Builder.getInt32(0);
}
- // Process condition of the if clause.
- if (CondVal) {
- NumThreads = CGF.Builder.CreateSelect(CondVal, NumThreads,
- CGF.Builder.getInt32(1));
- }
- return NumThreads;
- }
- if (isOpenMPSimdDirective(Dir->getDirectiveKind()))
- return CGF.Builder.getInt32(1);
- return DefaultThreadLimitVal;
- }
- return DefaultThreadLimitVal ? DefaultThreadLimitVal
- : CGF.Builder.getInt32(0);
-}
-
-const Expr *CGOpenMPRuntime::getNumThreadsExprForTargetDirective(
- CodeGenFunction &CGF, const OMPExecutableDirective &D,
- int32_t &DefaultVal) {
- OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
- assert(isOpenMPTargetExecutionDirective(DirectiveKind) &&
- "Expected target-based executable directive.");
-
- switch (DirectiveKind) {
- case OMPD_target:
- // Teams have no clause thread_limit
- return nullptr;
- case OMPD_target_teams:
- case OMPD_target_teams_distribute:
- if (D.hasClausesOfKind<OMPThreadLimitClause>()) {
- const auto *ThreadLimitClause = D.getSingleClause<OMPThreadLimitClause>();
- const Expr *ThreadLimit = ThreadLimitClause->getThreadLimit();
- if (ThreadLimit->isIntegerConstantExpr(CGF.getContext()))
- if (auto Constant =
- ThreadLimit->getIntegerConstantExpr(CGF.getContext()))
- DefaultVal = Constant->getExtValue();
- return ThreadLimit;
}
- return nullptr;
- case OMPD_target_parallel:
- case OMPD_target_parallel_for:
- case OMPD_target_parallel_for_simd:
- case OMPD_target_teams_distribute_parallel_for:
- case OMPD_target_teams_distribute_parallel_for_simd: {
- Expr *ThreadLimit = nullptr;
- Expr *NumThreads = nullptr;
- if (D.hasClausesOfKind<OMPThreadLimitClause>()) {
- const auto *ThreadLimitClause = D.getSingleClause<OMPThreadLimitClause>();
- ThreadLimit = ThreadLimitClause->getThreadLimit();
- if (ThreadLimit->isIntegerConstantExpr(CGF.getContext()))
- if (auto Constant =
- ThreadLimit->getIntegerConstantExpr(CGF.getContext()))
- DefaultVal = Constant->getExtValue();
- }
- if (D.hasClausesOfKind<OMPNumThreadsClause>()) {
- const auto *NumThreadsClause = D.getSingleClause<OMPNumThreadsClause>();
- NumThreads = NumThreadsClause->getNumThreads();
- if (NumThreads->isIntegerConstantExpr(CGF.getContext())) {
- if (auto Constant =
- NumThreads->getIntegerConstantExpr(CGF.getContext())) {
- if (Constant->getExtValue() < DefaultVal) {
- DefaultVal = Constant->getExtValue();
- ThreadLimit = NumThreads;
+ // Check the value of num_threads clause iff if clause was not specified
+ // or is not evaluated to false.
+ if (Dir->hasClausesOfKind<OMPNumThreadsClause>()) {
+ CGOpenMPInnerExprInfo CGInfo(CGF, *CS);
+ CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
+ const auto *NumThreadsClause =
+ Dir->getSingleClause<OMPNumThreadsClause>();
+ const Expr *NTExpr = NumThreadsClause->getNumThreads();
+ if (NTExpr->isIntegerConstantExpr(CGF.getContext()))
+ if (auto Constant = NTExpr->getIntegerConstantExpr(CGF.getContext()))
+ UpperBound =
+ UpperBound
+ ? Constant->getZExtValue()
+ : std::min(UpperBound,
+ static_cast<int32_t>(Constant->getZExtValue()));
+ // If we haven't found a upper bound, remember we saw a thread limiting
+ // clause.
+ if (UpperBound == -1)
+ UpperBound = 0;
+ if (!E)
+ return;
+ CodeGenFunction::LexicalScope Scope(CGF, NTExpr->getSourceRange());
+ if (const auto *PreInit =
+ cast_or_null<DeclStmt>(NumThreadsClause->getPreInitStmt())) {
+ for (const auto *I : PreInit->decls()) {
+ if (!I->hasAttr<OMPCaptureNoInitAttr>()) {
+ CGF.EmitVarDecl(cast<VarDecl>(*I));
+ } else {
+ CodeGenFunction::AutoVarEmission Emission =
+ CGF.EmitAutoVarAlloca(cast<VarDecl>(*I));
+ CGF.EmitAutoVarCleanups(Emission);
}
}
}
+ *E = NTExpr;
}
- return ThreadLimit;
- }
- case OMPD_target_teams_distribute_simd:
- case OMPD_target_simd:
- DefaultVal = 1;
- return nullptr;
- case OMPD_parallel:
- case OMPD_for:
- case OMPD_parallel_for:
- case OMPD_parallel_master:
- case OMPD_parallel_sections:
- case OMPD_for_simd:
- case OMPD_parallel_for_simd:
- case OMPD_cancel:
- case OMPD_cancellation_point:
- case OMPD_ordered:
- case OMPD_threadprivate:
- case OMPD_allocate:
- case OMPD_task:
- case OMPD_simd:
- case OMPD_tile:
- case OMPD_unroll:
- case OMPD_sections:
- case OMPD_section:
- case OMPD_single:
- case OMPD_master:
- case OMPD_critical:
- case OMPD_taskyield:
- case OMPD_barrier:
- case OMPD_taskwait:
- case OMPD_taskgroup:
- case OMPD_atomic:
- case OMPD_flush:
- case OMPD_depobj:
- case OMPD_scan:
- case OMPD_teams:
- case OMPD_target_data:
- case OMPD_target_exit_data:
- case OMPD_target_enter_data:
- case OMPD_distribute:
- case OMPD_distribute_simd:
- case OMPD_distribute_parallel_for:
- case OMPD_distribute_parallel_for_simd:
- case OMPD_teams_distribute:
- case OMPD_teams_distribute_simd:
- case OMPD_teams_distribute_parallel_for:
- case OMPD_teams_distribute_parallel_for_simd:
- case OMPD_target_update:
- case OMPD_declare_simd:
- case OMPD_declare_variant:
- case OMPD_begin_declare_variant:
- case OMPD_end_declare_variant:
- case OMPD_declare_target:
- case OMPD_end_declare_target:
- case OMPD_declare_reduction:
- case OMPD_declare_mapper:
- case OMPD_taskloop:
- case OMPD_taskloop_simd:
- case OMPD_master_taskloop:
- case OMPD_master_taskloop_simd:
- case OMPD_parallel_master_taskloop:
- case OMPD_parallel_master_taskloop_simd:
- case OMPD_requires:
- case OMPD_unknown:
- break;
- default:
- break;
+ return;
}
- llvm_unreachable("Unsupported directive kind.");
+ if (isOpenMPSimdDirective(Dir->getDirectiveKind()))
+ UpperBound = 1;
}
-llvm::Value *CGOpenMPRuntime::emitNumThreadsForTargetDirective(
- CodeGenFunction &CGF, const OMPExecutableDirective &D) {
- assert(!CGF.getLangOpts().OpenMPIsDevice &&
+const Expr *CGOpenMPRuntime::getNumThreadsExprForTargetDirective(
+ CodeGenFunction &CGF, const OMPExecutableDirective &D, int32_t &UpperBound,
+ bool UpperBoundOnly, llvm::Value **CondVal, const Expr **ThreadLimitExpr) {
+ assert((!CGF.getLangOpts().OpenMPIsTargetDevice || UpperBoundOnly) &&
"Clauses associated with the teams directive expected to be emitted "
"only for the host!");
OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
assert(isOpenMPTargetExecutionDirective(DirectiveKind) &&
"Expected target-based executable directive.");
- CGBuilderTy &Bld = CGF.Builder;
- llvm::Value *ThreadLimitVal = nullptr;
- llvm::Value *NumThreadsVal = nullptr;
+
+ const Expr *NT = nullptr;
+ const Expr **NTPtr = UpperBoundOnly ? nullptr : &NT;
+
+ auto CheckForConstExpr = [&](const Expr *E, const Expr **EPtr) {
+ if (E->isIntegerConstantExpr(CGF.getContext())) {
+ if (auto Constant = E->getIntegerConstantExpr(CGF.getContext()))
+ UpperBound = UpperBound ? Constant->getZExtValue()
+ : std::min(UpperBound,
+ int32_t(Constant->getZExtValue()));
+ }
+ // If we haven't found a upper bound, remember we saw a thread limiting
+ // clause.
+ if (UpperBound == -1)
+ UpperBound = 0;
+ if (EPtr)
+ *EPtr = E;
+ };
+
+ auto ReturnSequential = [&]() {
+ UpperBound = 1;
+ return NT;
+ };
+
switch (DirectiveKind) {
case OMPD_target: {
const CapturedStmt *CS = D.getInnermostCapturedStmt();
- if (llvm::Value *NumThreads = getNumThreads(CGF, CS, ThreadLimitVal))
- return NumThreads;
+ getNumThreads(CGF, CS, NTPtr, UpperBound, UpperBoundOnly, CondVal);
const Stmt *Child = CGOpenMPRuntime::getSingleCompoundChild(
CGF.getContext(), CS->getCapturedStmt());
+ // TODO: The standard is not clear how to resolve two thread limit clauses,
+ // let's pick the teams one if it's present, otherwise the target one.
+ const auto *ThreadLimitClause = D.getSingleClause<OMPThreadLimitClause>();
if (const auto *Dir = dyn_cast_or_null<OMPExecutableDirective>(Child)) {
- if (Dir->hasClausesOfKind<OMPThreadLimitClause>()) {
- CGOpenMPInnerExprInfo CGInfo(CGF, *CS);
- CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
- const auto *ThreadLimitClause =
- Dir->getSingleClause<OMPThreadLimitClause>();
- CodeGenFunction::LexicalScope Scope(
- CGF, ThreadLimitClause->getThreadLimit()->getSourceRange());
- if (const auto *PreInit =
- cast_or_null<DeclStmt>(ThreadLimitClause->getPreInitStmt())) {
- for (const auto *I : PreInit->decls()) {
- if (!I->hasAttr<OMPCaptureNoInitAttr>()) {
- CGF.EmitVarDecl(cast<VarDecl>(*I));
- } else {
- CodeGenFunction::AutoVarEmission Emission =
- CGF.EmitAutoVarAlloca(cast<VarDecl>(*I));
- CGF.EmitAutoVarCleanups(Emission);
+ if (const auto *TLC = Dir->getSingleClause<OMPThreadLimitClause>()) {
+ ThreadLimitClause = TLC;
+ if (ThreadLimitExpr) {
+ CGOpenMPInnerExprInfo CGInfo(CGF, *CS);
+ CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
+ CodeGenFunction::LexicalScope Scope(
+ CGF, ThreadLimitClause->getThreadLimit()->getSourceRange());
+ if (const auto *PreInit =
+ cast_or_null<DeclStmt>(ThreadLimitClause->getPreInitStmt())) {
+ for (const auto *I : PreInit->decls()) {
+ if (!I->hasAttr<OMPCaptureNoInitAttr>()) {
+ CGF.EmitVarDecl(cast<VarDecl>(*I));
+ } else {
+ CodeGenFunction::AutoVarEmission Emission =
+ CGF.EmitAutoVarAlloca(cast<VarDecl>(*I));
+ CGF.EmitAutoVarCleanups(Emission);
+ }
}
}
}
- llvm::Value *ThreadLimit = CGF.EmitScalarExpr(
- ThreadLimitClause->getThreadLimit(), /*IgnoreResultAssign=*/true);
- ThreadLimitVal =
- Bld.CreateIntCast(ThreadLimit, CGF.Int32Ty, /*isSigned=*/false);
}
+ }
+ if (ThreadLimitClause)
+ CheckForConstExpr(ThreadLimitClause->getThreadLimit(), ThreadLimitExpr);
+ if (const auto *Dir = dyn_cast_or_null<OMPExecutableDirective>(Child)) {
if (isOpenMPTeamsDirective(Dir->getDirectiveKind()) &&
!isOpenMPDistributeDirective(Dir->getDirectiveKind())) {
CS = Dir->getInnermostCapturedStmt();
@@ -7054,59 +6347,49 @@ llvm::Value *CGOpenMPRuntime::emitNumThreadsForTargetDirective(
CGF.getContext(), CS->getCapturedStmt());
Dir = dyn_cast_or_null<OMPExecutableDirective>(Child);
}
- if (Dir && isOpenMPDistributeDirective(Dir->getDirectiveKind()) &&
- !isOpenMPSimdDirective(Dir->getDirectiveKind())) {
+ if (Dir && isOpenMPParallelDirective(Dir->getDirectiveKind())) {
CS = Dir->getInnermostCapturedStmt();
- if (llvm::Value *NumThreads = getNumThreads(CGF, CS, ThreadLimitVal))
- return NumThreads;
- }
- if (Dir && isOpenMPSimdDirective(Dir->getDirectiveKind()))
- return Bld.getInt32(1);
+ getNumThreads(CGF, CS, NTPtr, UpperBound, UpperBoundOnly, CondVal);
+ } else if (Dir && isOpenMPSimdDirective(Dir->getDirectiveKind()))
+ return ReturnSequential();
}
- return ThreadLimitVal ? ThreadLimitVal : Bld.getInt32(0);
+ return NT;
}
case OMPD_target_teams: {
if (D.hasClausesOfKind<OMPThreadLimitClause>()) {
CodeGenFunction::RunCleanupsScope ThreadLimitScope(CGF);
const auto *ThreadLimitClause = D.getSingleClause<OMPThreadLimitClause>();
- llvm::Value *ThreadLimit = CGF.EmitScalarExpr(
- ThreadLimitClause->getThreadLimit(), /*IgnoreResultAssign=*/true);
- ThreadLimitVal =
- Bld.CreateIntCast(ThreadLimit, CGF.Int32Ty, /*isSigned=*/false);
+ CheckForConstExpr(ThreadLimitClause->getThreadLimit(), ThreadLimitExpr);
}
const CapturedStmt *CS = D.getInnermostCapturedStmt();
- if (llvm::Value *NumThreads = getNumThreads(CGF, CS, ThreadLimitVal))
- return NumThreads;
+ getNumThreads(CGF, CS, NTPtr, UpperBound, UpperBoundOnly, CondVal);
const Stmt *Child = CGOpenMPRuntime::getSingleCompoundChild(
CGF.getContext(), CS->getCapturedStmt());
if (const auto *Dir = dyn_cast_or_null<OMPExecutableDirective>(Child)) {
if (Dir->getDirectiveKind() == OMPD_distribute) {
CS = Dir->getInnermostCapturedStmt();
- if (llvm::Value *NumThreads = getNumThreads(CGF, CS, ThreadLimitVal))
- return NumThreads;
+ getNumThreads(CGF, CS, NTPtr, UpperBound, UpperBoundOnly, CondVal);
}
}
- return ThreadLimitVal ? ThreadLimitVal : Bld.getInt32(0);
+ return NT;
}
case OMPD_target_teams_distribute:
if (D.hasClausesOfKind<OMPThreadLimitClause>()) {
CodeGenFunction::RunCleanupsScope ThreadLimitScope(CGF);
const auto *ThreadLimitClause = D.getSingleClause<OMPThreadLimitClause>();
- llvm::Value *ThreadLimit = CGF.EmitScalarExpr(
- ThreadLimitClause->getThreadLimit(), /*IgnoreResultAssign=*/true);
- ThreadLimitVal =
- Bld.CreateIntCast(ThreadLimit, CGF.Int32Ty, /*isSigned=*/false);
+ CheckForConstExpr(ThreadLimitClause->getThreadLimit(), ThreadLimitExpr);
}
- return getNumThreads(CGF, D.getInnermostCapturedStmt(), ThreadLimitVal);
+ getNumThreads(CGF, D.getInnermostCapturedStmt(), NTPtr, UpperBound,
+ UpperBoundOnly, CondVal);
+ return NT;
+ case OMPD_target_teams_loop:
+ case OMPD_target_parallel_loop:
case OMPD_target_parallel:
case OMPD_target_parallel_for:
case OMPD_target_parallel_for_simd:
case OMPD_target_teams_distribute_parallel_for:
case OMPD_target_teams_distribute_parallel_for_simd: {
- llvm::Value *CondVal = nullptr;
- // Handle if clause. If if clause present, the number of threads is
- // calculated as <cond> ? (<numthreads> ? <numthreads> : 0 ) : 1.
- if (D.hasClausesOfKind<OMPIfClause>()) {
+ if (CondVal && D.hasClausesOfKind<OMPIfClause>()) {
const OMPIfClause *IfClause = nullptr;
for (const auto *C : D.getClausesOfKind<OMPIfClause>()) {
if (C->getNameModifier() == OMPD_unknown ||
@@ -7120,108 +6403,92 @@ llvm::Value *CGOpenMPRuntime::emitNumThreadsForTargetDirective(
bool Result;
if (Cond->EvaluateAsBooleanCondition(Result, CGF.getContext())) {
if (!Result)
- return Bld.getInt32(1);
+ return ReturnSequential();
} else {
CodeGenFunction::RunCleanupsScope Scope(CGF);
- CondVal = CGF.EvaluateExprAsBool(Cond);
+ *CondVal = CGF.EvaluateExprAsBool(Cond);
}
}
}
if (D.hasClausesOfKind<OMPThreadLimitClause>()) {
CodeGenFunction::RunCleanupsScope ThreadLimitScope(CGF);
const auto *ThreadLimitClause = D.getSingleClause<OMPThreadLimitClause>();
- llvm::Value *ThreadLimit = CGF.EmitScalarExpr(
- ThreadLimitClause->getThreadLimit(), /*IgnoreResultAssign=*/true);
- ThreadLimitVal =
- Bld.CreateIntCast(ThreadLimit, CGF.Int32Ty, /*isSigned=*/false);
+ CheckForConstExpr(ThreadLimitClause->getThreadLimit(), ThreadLimitExpr);
}
if (D.hasClausesOfKind<OMPNumThreadsClause>()) {
CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF);
const auto *NumThreadsClause = D.getSingleClause<OMPNumThreadsClause>();
- llvm::Value *NumThreads = CGF.EmitScalarExpr(
- NumThreadsClause->getNumThreads(), /*IgnoreResultAssign=*/true);
- NumThreadsVal =
- Bld.CreateIntCast(NumThreads, CGF.Int32Ty, /*isSigned=*/false);
- ThreadLimitVal = ThreadLimitVal
- ? Bld.CreateSelect(Bld.CreateICmpULT(NumThreadsVal,
- ThreadLimitVal),
- NumThreadsVal, ThreadLimitVal)
- : NumThreadsVal;
- }
- if (!ThreadLimitVal)
- ThreadLimitVal = Bld.getInt32(0);
- if (CondVal)
- return Bld.CreateSelect(CondVal, ThreadLimitVal, Bld.getInt32(1));
- return ThreadLimitVal;
+ CheckForConstExpr(NumThreadsClause->getNumThreads(), nullptr);
+ return NumThreadsClause->getNumThreads();
+ }
+ return NT;
}
case OMPD_target_teams_distribute_simd:
case OMPD_target_simd:
- return Bld.getInt32(1);
- case OMPD_parallel:
- case OMPD_for:
- case OMPD_parallel_for:
- case OMPD_parallel_master:
- case OMPD_parallel_sections:
- case OMPD_for_simd:
- case OMPD_parallel_for_simd:
- case OMPD_cancel:
- case OMPD_cancellation_point:
- case OMPD_ordered:
- case OMPD_threadprivate:
- case OMPD_allocate:
- case OMPD_task:
- case OMPD_simd:
- case OMPD_tile:
- case OMPD_unroll:
- case OMPD_sections:
- case OMPD_section:
- case OMPD_single:
- case OMPD_master:
- case OMPD_critical:
- case OMPD_taskyield:
- case OMPD_barrier:
- case OMPD_taskwait:
- case OMPD_taskgroup:
- case OMPD_atomic:
- case OMPD_flush:
- case OMPD_depobj:
- case OMPD_scan:
- case OMPD_teams:
- case OMPD_target_data:
- case OMPD_target_exit_data:
- case OMPD_target_enter_data:
- case OMPD_distribute:
- case OMPD_distribute_simd:
- case OMPD_distribute_parallel_for:
- case OMPD_distribute_parallel_for_simd:
- case OMPD_teams_distribute:
- case OMPD_teams_distribute_simd:
- case OMPD_teams_distribute_parallel_for:
- case OMPD_teams_distribute_parallel_for_simd:
- case OMPD_target_update:
- case OMPD_declare_simd:
- case OMPD_declare_variant:
- case OMPD_begin_declare_variant:
- case OMPD_end_declare_variant:
- case OMPD_declare_target:
- case OMPD_end_declare_target:
- case OMPD_declare_reduction:
- case OMPD_declare_mapper:
- case OMPD_taskloop:
- case OMPD_taskloop_simd:
- case OMPD_master_taskloop:
- case OMPD_master_taskloop_simd:
- case OMPD_parallel_master_taskloop:
- case OMPD_parallel_master_taskloop_simd:
- case OMPD_requires:
- case OMPD_unknown:
- break;
+ return ReturnSequential();
default:
break;
}
llvm_unreachable("Unsupported directive kind.");
}
+llvm::Value *CGOpenMPRuntime::emitNumThreadsForTargetDirective(
+ CodeGenFunction &CGF, const OMPExecutableDirective &D) {
+ llvm::Value *NumThreadsVal = nullptr;
+ llvm::Value *CondVal = nullptr;
+ llvm::Value *ThreadLimitVal = nullptr;
+ const Expr *ThreadLimitExpr = nullptr;
+ int32_t UpperBound = -1;
+
+ const Expr *NT = getNumThreadsExprForTargetDirective(
+ CGF, D, UpperBound, /* UpperBoundOnly */ false, &CondVal,
+ &ThreadLimitExpr);
+
+ // Thread limit expressions are used below, emit them.
+ if (ThreadLimitExpr) {
+ ThreadLimitVal =
+ CGF.EmitScalarExpr(ThreadLimitExpr, /*IgnoreResultAssign=*/true);
+ ThreadLimitVal = CGF.Builder.CreateIntCast(ThreadLimitVal, CGF.Int32Ty,
+ /*isSigned=*/false);
+ }
+
+ // Generate the num teams expression.
+ if (UpperBound == 1) {
+ NumThreadsVal = CGF.Builder.getInt32(UpperBound);
+ } else if (NT) {
+ NumThreadsVal = CGF.EmitScalarExpr(NT, /*IgnoreResultAssign=*/true);
+ NumThreadsVal = CGF.Builder.CreateIntCast(NumThreadsVal, CGF.Int32Ty,
+ /*isSigned=*/false);
+ } else if (ThreadLimitVal) {
+ // If we do not have a num threads value but a thread limit, replace the
+ // former with the latter. We know handled the thread limit expression.
+ NumThreadsVal = ThreadLimitVal;
+ ThreadLimitVal = nullptr;
+ } else {
+ // Default to "0" which means runtime choice.
+ assert(!ThreadLimitVal && "Default not applicable with thread limit value");
+ NumThreadsVal = CGF.Builder.getInt32(0);
+ }
+
+ // Handle if clause. If if clause present, the number of threads is
+ // calculated as <cond> ? (<numthreads> ? <numthreads> : 0 ) : 1.
+ if (CondVal) {
+ CodeGenFunction::RunCleanupsScope Scope(CGF);
+ NumThreadsVal = CGF.Builder.CreateSelect(CondVal, NumThreadsVal,
+ CGF.Builder.getInt32(1));
+ }
+
+ // If the thread limit and num teams expression were present, take the
+ // minimum.
+ if (ThreadLimitVal) {
+ NumThreadsVal = CGF.Builder.CreateSelect(
+ CGF.Builder.CreateICmpULT(ThreadLimitVal, NumThreadsVal),
+ ThreadLimitVal, NumThreadsVal);
+ }
+
+ return NumThreadsVal;
+}
+
namespace {
LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();
@@ -7231,59 +6498,13 @@ LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();
// code for that information.
class MappableExprsHandler {
public:
- /// Values for bit flags used to specify the mapping type for
- /// offloading.
- enum OpenMPOffloadMappingFlags : uint64_t {
- /// No flags
- OMP_MAP_NONE = 0x0,
- /// Allocate memory on the device and move data from host to device.
- OMP_MAP_TO = 0x01,
- /// Allocate memory on the device and move data from device to host.
- OMP_MAP_FROM = 0x02,
- /// Always perform the requested mapping action on the element, even
- /// if it was already mapped before.
- OMP_MAP_ALWAYS = 0x04,
- /// Delete the element from the device environment, ignoring the
- /// current reference count associated with the element.
- OMP_MAP_DELETE = 0x08,
- /// The element being mapped is a pointer-pointee pair; both the
- /// pointer and the pointee should be mapped.
- OMP_MAP_PTR_AND_OBJ = 0x10,
- /// This flags signals that the base address of an entry should be
- /// passed to the target kernel as an argument.
- OMP_MAP_TARGET_PARAM = 0x20,
- /// Signal that the runtime library has to return the device pointer
- /// in the current position for the data being mapped. Used when we have the
- /// use_device_ptr or use_device_addr clause.
- OMP_MAP_RETURN_PARAM = 0x40,
- /// This flag signals that the reference being passed is a pointer to
- /// private data.
- OMP_MAP_PRIVATE = 0x80,
- /// Pass the element to the device by value.
- OMP_MAP_LITERAL = 0x100,
- /// Implicit map
- OMP_MAP_IMPLICIT = 0x200,
- /// Close is a hint to the runtime to allocate memory close to
- /// the target device.
- OMP_MAP_CLOSE = 0x400,
- /// 0x800 is reserved for compatibility with XLC.
- /// Produce a runtime error if the data is not already allocated.
- OMP_MAP_PRESENT = 0x1000,
- /// Signal that the runtime library should use args as an array of
- /// descriptor_dim pointers and use args_size as dims. Used when we have
- /// non-contiguous list items in target update directive
- OMP_MAP_NON_CONTIG = 0x100000000000,
- /// The 16 MSBs of the flags indicate whether the entry is member of some
- /// struct/class.
- OMP_MAP_MEMBER_OF = 0xffff000000000000,
- LLVM_MARK_AS_BITMASK_ENUM(/* LargestFlag = */ OMP_MAP_MEMBER_OF),
- };
-
/// Get the offset of the OMP_MAP_MEMBER_OF field.
static unsigned getFlagMemberOffset() {
unsigned Offset = 0;
- for (uint64_t Remain = OMP_MAP_MEMBER_OF; !(Remain & 1);
- Remain = Remain >> 1)
+ for (uint64_t Remain =
+ static_cast<std::underlying_type_t<OpenMPOffloadMappingFlags>>(
+ OpenMPOffloadMappingFlags::OMP_MAP_MEMBER_OF);
+ !(Remain & 1); Remain = Remain >> 1)
Offset++;
return Offset;
}
@@ -7305,67 +6526,31 @@ public:
const Expr *getMapExpr() const { return MapExpr; }
};
- /// Class that associates information with a base pointer to be passed to the
- /// runtime library.
- class BasePointerInfo {
- /// The base pointer.
- llvm::Value *Ptr = nullptr;
- /// The base declaration that refers to this device pointer, or null if
- /// there is none.
- const ValueDecl *DevPtrDecl = nullptr;
-
- public:
- BasePointerInfo(llvm::Value *Ptr, const ValueDecl *DevPtrDecl = nullptr)
- : Ptr(Ptr), DevPtrDecl(DevPtrDecl) {}
- llvm::Value *operator*() const { return Ptr; }
- const ValueDecl *getDevicePtrDecl() const { return DevPtrDecl; }
- void setDevicePtrDecl(const ValueDecl *D) { DevPtrDecl = D; }
- };
-
+ using DeviceInfoTy = llvm::OpenMPIRBuilder::DeviceInfoTy;
+ using MapBaseValuesArrayTy = llvm::OpenMPIRBuilder::MapValuesArrayTy;
+ using MapValuesArrayTy = llvm::OpenMPIRBuilder::MapValuesArrayTy;
+ using MapFlagsArrayTy = llvm::OpenMPIRBuilder::MapFlagsArrayTy;
+ using MapDimArrayTy = llvm::OpenMPIRBuilder::MapDimArrayTy;
+ using MapNonContiguousArrayTy =
+ llvm::OpenMPIRBuilder::MapNonContiguousArrayTy;
using MapExprsArrayTy = SmallVector<MappingExprInfo, 4>;
- using MapBaseValuesArrayTy = SmallVector<BasePointerInfo, 4>;
- using MapValuesArrayTy = SmallVector<llvm::Value *, 4>;
- using MapFlagsArrayTy = SmallVector<OpenMPOffloadMappingFlags, 4>;
- using MapMappersArrayTy = SmallVector<const ValueDecl *, 4>;
- using MapDimArrayTy = SmallVector<uint64_t, 4>;
- using MapNonContiguousArrayTy = SmallVector<MapValuesArrayTy, 4>;
+ using MapValueDeclsArrayTy = SmallVector<const ValueDecl *, 4>;
/// This structure contains combined information generated for mappable
/// clauses, including base pointers, pointers, sizes, map types, user-defined
/// mappers, and non-contiguous information.
- struct MapCombinedInfoTy {
- struct StructNonContiguousInfo {
- bool IsNonContiguous = false;
- MapDimArrayTy Dims;
- MapNonContiguousArrayTy Offsets;
- MapNonContiguousArrayTy Counts;
- MapNonContiguousArrayTy Strides;
- };
+ struct MapCombinedInfoTy : llvm::OpenMPIRBuilder::MapInfosTy {
MapExprsArrayTy Exprs;
- MapBaseValuesArrayTy BasePointers;
- MapValuesArrayTy Pointers;
- MapValuesArrayTy Sizes;
- MapFlagsArrayTy Types;
- MapMappersArrayTy Mappers;
- StructNonContiguousInfo NonContigInfo;
+ MapValueDeclsArrayTy Mappers;
+ MapValueDeclsArrayTy DevicePtrDecls;
/// Append arrays in \a CurInfo.
void append(MapCombinedInfoTy &CurInfo) {
Exprs.append(CurInfo.Exprs.begin(), CurInfo.Exprs.end());
- BasePointers.append(CurInfo.BasePointers.begin(),
- CurInfo.BasePointers.end());
- Pointers.append(CurInfo.Pointers.begin(), CurInfo.Pointers.end());
- Sizes.append(CurInfo.Sizes.begin(), CurInfo.Sizes.end());
- Types.append(CurInfo.Types.begin(), CurInfo.Types.end());
+ DevicePtrDecls.append(CurInfo.DevicePtrDecls.begin(),
+ CurInfo.DevicePtrDecls.end());
Mappers.append(CurInfo.Mappers.begin(), CurInfo.Mappers.end());
- NonContigInfo.Dims.append(CurInfo.NonContigInfo.Dims.begin(),
- CurInfo.NonContigInfo.Dims.end());
- NonContigInfo.Offsets.append(CurInfo.NonContigInfo.Offsets.begin(),
- CurInfo.NonContigInfo.Offsets.end());
- NonContigInfo.Counts.append(CurInfo.NonContigInfo.Counts.begin(),
- CurInfo.NonContigInfo.Counts.end());
- NonContigInfo.Strides.append(CurInfo.NonContigInfo.Strides.begin(),
- CurInfo.NonContigInfo.Strides.end());
+ llvm::OpenMPIRBuilder::MapInfosTy::append(CurInfo);
}
};
@@ -7447,6 +6632,16 @@ private:
SmallVector<OMPClauseMappableExprCommon::MappableExprComponentListRef, 4>>
DevPointersMap;
+ /// Map between device addr declarations and their expression components.
+ /// The key value for declarations in 'this' is null.
+ llvm::DenseMap<
+ const ValueDecl *,
+ SmallVector<OMPClauseMappableExprCommon::MappableExprComponentListRef, 4>>
+ HasDevAddrsMap;
+
+ /// Map between lambda declarations and their map type.
+ llvm::DenseMap<const ValueDecl *, const OMPMapClause *> LambdasMap;
+
llvm::Value *getExprTypeSize(const Expr *E) const {
QualType ExprTy = E->getType().getCanonicalType();
@@ -7531,7 +6726,8 @@ private:
ArrayRef<OpenMPMotionModifierKind> MotionModifiers, bool IsImplicit,
bool AddPtrFlag, bool AddIsTargetParamFlag, bool IsNonContiguous) const {
OpenMPOffloadMappingFlags Bits =
- IsImplicit ? OMP_MAP_IMPLICIT : OMP_MAP_NONE;
+ IsImplicit ? OpenMPOffloadMappingFlags::OMP_MAP_IMPLICIT
+ : OpenMPOffloadMappingFlags::OMP_MAP_NONE;
switch (MapType) {
case OMPC_MAP_alloc:
case OMPC_MAP_release:
@@ -7541,37 +6737,36 @@ private:
// type modifiers.
break;
case OMPC_MAP_to:
- Bits |= OMP_MAP_TO;
+ Bits |= OpenMPOffloadMappingFlags::OMP_MAP_TO;
break;
case OMPC_MAP_from:
- Bits |= OMP_MAP_FROM;
+ Bits |= OpenMPOffloadMappingFlags::OMP_MAP_FROM;
break;
case OMPC_MAP_tofrom:
- Bits |= OMP_MAP_TO | OMP_MAP_FROM;
+ Bits |= OpenMPOffloadMappingFlags::OMP_MAP_TO |
+ OpenMPOffloadMappingFlags::OMP_MAP_FROM;
break;
case OMPC_MAP_delete:
- Bits |= OMP_MAP_DELETE;
+ Bits |= OpenMPOffloadMappingFlags::OMP_MAP_DELETE;
break;
case OMPC_MAP_unknown:
llvm_unreachable("Unexpected map type!");
}
if (AddPtrFlag)
- Bits |= OMP_MAP_PTR_AND_OBJ;
+ Bits |= OpenMPOffloadMappingFlags::OMP_MAP_PTR_AND_OBJ;
if (AddIsTargetParamFlag)
- Bits |= OMP_MAP_TARGET_PARAM;
- if (llvm::find(MapModifiers, OMPC_MAP_MODIFIER_always)
- != MapModifiers.end())
- Bits |= OMP_MAP_ALWAYS;
- if (llvm::find(MapModifiers, OMPC_MAP_MODIFIER_close)
- != MapModifiers.end())
- Bits |= OMP_MAP_CLOSE;
- if (llvm::find(MapModifiers, OMPC_MAP_MODIFIER_present) !=
- MapModifiers.end() ||
- llvm::find(MotionModifiers, OMPC_MOTION_MODIFIER_present) !=
- MotionModifiers.end())
- Bits |= OMP_MAP_PRESENT;
+ Bits |= OpenMPOffloadMappingFlags::OMP_MAP_TARGET_PARAM;
+ if (llvm::is_contained(MapModifiers, OMPC_MAP_MODIFIER_always))
+ Bits |= OpenMPOffloadMappingFlags::OMP_MAP_ALWAYS;
+ if (llvm::is_contained(MapModifiers, OMPC_MAP_MODIFIER_close))
+ Bits |= OpenMPOffloadMappingFlags::OMP_MAP_CLOSE;
+ if (llvm::is_contained(MapModifiers, OMPC_MAP_MODIFIER_present) ||
+ llvm::is_contained(MotionModifiers, OMPC_MOTION_MODIFIER_present))
+ Bits |= OpenMPOffloadMappingFlags::OMP_MAP_PRESENT;
+ if (llvm::is_contained(MapModifiers, OMPC_MAP_MODIFIER_ompx_hold))
+ Bits |= OpenMPOffloadMappingFlags::OMP_MAP_OMPX_HOLD;
if (IsNonContiguous)
- Bits |= OMP_MAP_NON_CONTIG;
+ Bits |= OpenMPOffloadMappingFlags::OMP_MAP_NON_CONTIG;
return Bits;
}
@@ -7623,12 +6818,14 @@ private:
OpenMPMapClauseKind MapType, ArrayRef<OpenMPMapModifierKind> MapModifiers,
ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
OMPClauseMappableExprCommon::MappableExprComponentListRef Components,
- MapCombinedInfoTy &CombinedInfo, StructRangeInfoTy &PartialStruct,
- bool IsFirstComponentList, bool IsImplicit,
+ MapCombinedInfoTy &CombinedInfo,
+ MapCombinedInfoTy &StructBaseCombinedInfo,
+ StructRangeInfoTy &PartialStruct, bool IsFirstComponentList,
+ bool IsImplicit, bool GenerateAllInfoForClauses,
const ValueDecl *Mapper = nullptr, bool ForDeviceAddr = false,
const ValueDecl *BaseDecl = nullptr, const Expr *MapExpr = nullptr,
ArrayRef<OMPClauseMappableExprCommon::MappableExprComponentListRef>
- OverlappedElements = llvm::None) const {
+ OverlappedElements = std::nullopt) const {
// The following summarizes what has to be generated for each map and the
// types below. The generated information is expressed in this order:
// base pointer, section pointer, size, flags
@@ -7637,6 +6834,7 @@ private:
// double d;
// int i[100];
// float *p;
+ // int **a = &i;
//
// struct S1 {
// int i;
@@ -7670,6 +6868,14 @@ private:
// in unified shared memory mode or for local pointers
// p, &p[1], 24*sizeof(float), TARGET_PARAM | TO | FROM
//
+ // map((*a)[0:3])
+ // &(*a), &(*a), sizeof(pointer), TARGET_PARAM | TO | FROM
+ // &(*a), &(*a)[0], 3*sizeof(int), PTR_AND_OBJ | TO | FROM
+ //
+ // map(**a)
+ // &(*a), &(*a), sizeof(pointer), TARGET_PARAM | TO | FROM
+ // &(*a), &(**a), sizeof(int), PTR_AND_OBJ | TO | FROM
+ //
// map(s)
// &s, &s, sizeof(S2), TARGET_PARAM | TO | FROM
//
@@ -7830,6 +7036,7 @@ private:
isa<CXXThisExpr>(OAShE->getBase()->IgnoreParenCasts())) {
BP = Address(
CGF.EmitScalarExpr(OAShE->getBase()),
+ CGF.ConvertTypeForMem(OAShE->getBase()->getType()->getPointeeType()),
CGF.getContext().getTypeAlignInChars(OAShE->getBase()->getType()));
} else {
// The base is the reference to the variable.
@@ -7837,10 +7044,11 @@ private:
BP = CGF.EmitOMPSharedLValue(AssocExpr).getAddress(CGF);
if (const auto *VD =
dyn_cast_or_null<VarDecl>(I->getAssociatedDeclaration())) {
- if (llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
+ if (std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) {
if ((*Res == OMPDeclareTargetDeclAttr::MT_Link) ||
- (*Res == OMPDeclareTargetDeclAttr::MT_To &&
+ ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
+ *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory())) {
RequiresReference = true;
BP = CGF.CGM.getOpenMPRuntime().getAddrOfDeclareTargetVar(VD);
@@ -7899,6 +7107,25 @@ private:
bool IsNonContiguous = CombinedInfo.NonContigInfo.IsNonContiguous;
bool IsPrevMemberReference = false;
+ // We need to check if we will be encountering any MEs. If we do not
+ // encounter any ME expression it means we will be mapping the whole struct.
+ // In that case we need to skip adding an entry for the struct to the
+ // CombinedInfo list and instead add an entry to the StructBaseCombinedInfo
+ // list only when generating all info for clauses.
+ bool IsMappingWholeStruct = true;
+ if (!GenerateAllInfoForClauses) {
+ IsMappingWholeStruct = false;
+ } else {
+ for (auto TempI = I; TempI != CE; ++TempI) {
+ const MemberExpr *PossibleME =
+ dyn_cast<MemberExpr>(TempI->getAssociatedExpression());
+ if (PossibleME) {
+ IsMappingWholeStruct = false;
+ break;
+ }
+ }
+ }
+
for (; I != CE; ++I) {
// If the current component is member of a struct (parent struct) mark it.
if (!EncounteredME) {
@@ -7960,7 +7187,9 @@ private:
bool IsMemberReference = isa<MemberExpr>(I->getAssociatedExpression()) &&
MapDecl &&
MapDecl->getType()->isLValueReferenceType();
- bool IsNonDerefPointer = IsPointer && !UO && !BO && !IsNonContiguous;
+ bool IsNonDerefPointer = IsPointer &&
+ !(UO && UO->getOpcode() != UO_Deref) && !BO &&
+ !IsNonContiguous;
if (OASE)
++DimSize;
@@ -7999,9 +7228,12 @@ private:
return BaseLV;
};
if (OAShE) {
- LowestElem = LB = Address(CGF.EmitScalarExpr(OAShE->getBase()),
- CGF.getContext().getTypeAlignInChars(
- OAShE->getBase()->getType()));
+ LowestElem = LB =
+ Address(CGF.EmitScalarExpr(OAShE->getBase()),
+ CGF.ConvertTypeForMem(
+ OAShE->getBase()->getType()->getPointeeType()),
+ CGF.getContext().getTypeAlignInChars(
+ OAShE->getBase()->getType()));
} else if (IsMemberReference) {
const auto *ME = cast<MemberExpr>(I->getAssociatedExpression());
LValue BaseLVal = EmitMemberExprBase(CGF, ME);
@@ -8037,8 +7269,8 @@ private:
CharUnits TypeSize = CGF.getContext().getTypeSizeInChars(
I->getAssociatedExpression()->getType());
Address HB = CGF.Builder.CreateConstGEP(
- CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(LowestElem,
- CGF.VoidPtrTy),
+ CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ LowestElem, CGF.VoidPtrTy, CGF.Int8Ty),
TypeSize.getQuantity() - 1);
PartialStruct.HighestElem = {
std::numeric_limits<decltype(
@@ -8052,7 +7284,7 @@ private:
std::swap(PartialStruct.PreliminaryMapData, CombinedInfo);
// Emit data for non-overlapped data.
OpenMPOffloadMappingFlags Flags =
- OMP_MAP_MEMBER_OF |
+ OpenMPOffloadMappingFlags::OMP_MAP_MEMBER_OF |
getMapTypeBits(MapType, MapModifiers, MotionModifiers, IsImplicit,
/*AddPtrFlag=*/false,
/*AddIsTargetParamFlag=*/false, IsNonContiguous);
@@ -8078,14 +7310,15 @@ private:
.getAddress(CGF);
}
Size = CGF.Builder.CreatePtrDiff(
- CGF.EmitCastToVoidPtr(ComponentLB.getPointer()),
- CGF.EmitCastToVoidPtr(LB.getPointer()));
+ CGF.Int8Ty, ComponentLB.getPointer(), LB.getPointer());
break;
}
}
assert(Size && "Failed to determine structure size");
CombinedInfo.Exprs.emplace_back(MapDecl, MapExpr);
CombinedInfo.BasePointers.push_back(BP.getPointer());
+ CombinedInfo.DevicePtrDecls.push_back(nullptr);
+ CombinedInfo.DevicePointers.push_back(DeviceInfoTy::None);
CombinedInfo.Pointers.push_back(LB.getPointer());
CombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
Size, CGF.Int64Ty, /*isSigned=*/true));
@@ -8097,10 +7330,12 @@ private:
}
CombinedInfo.Exprs.emplace_back(MapDecl, MapExpr);
CombinedInfo.BasePointers.push_back(BP.getPointer());
+ CombinedInfo.DevicePtrDecls.push_back(nullptr);
+ CombinedInfo.DevicePointers.push_back(DeviceInfoTy::None);
CombinedInfo.Pointers.push_back(LB.getPointer());
Size = CGF.Builder.CreatePtrDiff(
- CGF.Builder.CreateConstGEP(HB, 1).getPointer(),
- CGF.EmitCastToVoidPtr(LB.getPointer()));
+ CGF.Int8Ty, CGF.Builder.CreateConstGEP(HB, 1).getPointer(),
+ LB.getPointer());
CombinedInfo.Sizes.push_back(
CGF.Builder.CreateIntCast(Size, CGF.Int64Ty, /*isSigned=*/true));
CombinedInfo.Types.push_back(Flags);
@@ -8110,19 +7345,41 @@ private:
break;
}
llvm::Value *Size = getExprTypeSize(I->getAssociatedExpression());
+ // Skip adding an entry in the CurInfo of this combined entry if the
+ // whole struct is currently being mapped. The struct needs to be added
+ // in the first position before any data internal to the struct is being
+ // mapped.
if (!IsMemberPointerOrAddr ||
(Next == CE && MapType != OMPC_MAP_unknown)) {
- CombinedInfo.Exprs.emplace_back(MapDecl, MapExpr);
- CombinedInfo.BasePointers.push_back(BP.getPointer());
- CombinedInfo.Pointers.push_back(LB.getPointer());
- CombinedInfo.Sizes.push_back(
- CGF.Builder.CreateIntCast(Size, CGF.Int64Ty, /*isSigned=*/true));
- CombinedInfo.NonContigInfo.Dims.push_back(IsNonContiguous ? DimSize
- : 1);
+ if (!IsMappingWholeStruct) {
+ CombinedInfo.Exprs.emplace_back(MapDecl, MapExpr);
+ CombinedInfo.BasePointers.push_back(BP.getPointer());
+ CombinedInfo.DevicePtrDecls.push_back(nullptr);
+ CombinedInfo.DevicePointers.push_back(DeviceInfoTy::None);
+ CombinedInfo.Pointers.push_back(LB.getPointer());
+ CombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
+ Size, CGF.Int64Ty, /*isSigned=*/true));
+ CombinedInfo.NonContigInfo.Dims.push_back(IsNonContiguous ? DimSize
+ : 1);
+ } else {
+ StructBaseCombinedInfo.Exprs.emplace_back(MapDecl, MapExpr);
+ StructBaseCombinedInfo.BasePointers.push_back(BP.getPointer());
+ StructBaseCombinedInfo.DevicePtrDecls.push_back(nullptr);
+ StructBaseCombinedInfo.DevicePointers.push_back(DeviceInfoTy::None);
+ StructBaseCombinedInfo.Pointers.push_back(LB.getPointer());
+ StructBaseCombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
+ Size, CGF.Int64Ty, /*isSigned=*/true));
+ StructBaseCombinedInfo.NonContigInfo.Dims.push_back(
+ IsNonContiguous ? DimSize : 1);
+ }
// If Mapper is valid, the last component inherits the mapper.
bool HasMapper = Mapper && Next == CE;
- CombinedInfo.Mappers.push_back(HasMapper ? Mapper : nullptr);
+ if (!IsMappingWholeStruct)
+ CombinedInfo.Mappers.push_back(HasMapper ? Mapper : nullptr);
+ else
+ StructBaseCombinedInfo.Mappers.push_back(HasMapper ? Mapper
+ : nullptr);
// We need to add a pointer flag for each map that comes from the
// same expression except for the first one. We also need to signal
@@ -8138,20 +7395,26 @@ private:
// If we have a PTR_AND_OBJ pair where the OBJ is a pointer as well,
// then we reset the TO/FROM/ALWAYS/DELETE/CLOSE flags.
if (IsPointer || (IsMemberReference && Next != CE))
- Flags &= ~(OMP_MAP_TO | OMP_MAP_FROM | OMP_MAP_ALWAYS |
- OMP_MAP_DELETE | OMP_MAP_CLOSE);
+ Flags &= ~(OpenMPOffloadMappingFlags::OMP_MAP_TO |
+ OpenMPOffloadMappingFlags::OMP_MAP_FROM |
+ OpenMPOffloadMappingFlags::OMP_MAP_ALWAYS |
+ OpenMPOffloadMappingFlags::OMP_MAP_DELETE |
+ OpenMPOffloadMappingFlags::OMP_MAP_CLOSE);
if (ShouldBeMemberOf) {
// Set placeholder value MEMBER_OF=FFFF to indicate that the flag
// should be later updated with the correct value of MEMBER_OF.
- Flags |= OMP_MAP_MEMBER_OF;
+ Flags |= OpenMPOffloadMappingFlags::OMP_MAP_MEMBER_OF;
// From now on, all subsequent PTR_AND_OBJ entries should not be
// marked as MEMBER_OF.
ShouldBeMemberOf = false;
}
}
- CombinedInfo.Types.push_back(Flags);
+ if (!IsMappingWholeStruct)
+ CombinedInfo.Types.push_back(Flags);
+ else
+ StructBaseCombinedInfo.Types.push_back(Flags);
}
// If we have encountered a member expression so far, keep track of the
@@ -8177,7 +7440,14 @@ private:
} else if (FieldIndex < PartialStruct.LowestElem.first) {
PartialStruct.LowestElem = {FieldIndex, LowestElem};
} else if (FieldIndex > PartialStruct.HighestElem.first) {
- PartialStruct.HighestElem = {FieldIndex, LowestElem};
+ if (IsFinalArraySection) {
+ Address HB =
+ CGF.EmitOMPArraySectionExpr(OASE, /*IsLowerBound=*/false)
+ .getAddress(CGF);
+ PartialStruct.HighestElem = {FieldIndex, HB};
+ } else {
+ PartialStruct.HighestElem = {FieldIndex, LowestElem};
+ }
}
}
@@ -8283,7 +7553,7 @@ private:
}
// Skip the dummy dimension since we have already have its information.
- auto DI = DimSizes.begin() + 1;
+ auto *DI = DimSizes.begin() + 1;
// Product of dimension.
llvm::Value *DimProd =
llvm::ConstantInt::get(CGF.CGM.Int64Ty, ElementTypeSize);
@@ -8391,7 +7661,7 @@ private:
/// Return the adjusted map modifiers if the declaration a capture refers to
/// appears in a first-private clause. This is expected to be used only with
/// directives that start with 'target'.
- MappableExprsHandler::OpenMPOffloadMappingFlags
+ OpenMPOffloadMappingFlags
getMapModifiersForPrivateClauses(const CapturedStmt::Capture &Cap) const {
assert(Cap.capturesVariable() && "Expected capture by reference only!");
@@ -8400,34 +7670,22 @@ private:
// declaration is known as first-private in this handler.
if (FirstPrivateDecls.count(Cap.getCapturedVar())) {
if (Cap.getCapturedVar()->getType()->isAnyPointerType())
- return MappableExprsHandler::OMP_MAP_TO |
- MappableExprsHandler::OMP_MAP_PTR_AND_OBJ;
- return MappableExprsHandler::OMP_MAP_PRIVATE |
- MappableExprsHandler::OMP_MAP_TO;
- }
- return MappableExprsHandler::OMP_MAP_TO |
- MappableExprsHandler::OMP_MAP_FROM;
- }
-
- static OpenMPOffloadMappingFlags getMemberOfFlag(unsigned Position) {
- // Rotate by getFlagMemberOffset() bits.
- return static_cast<OpenMPOffloadMappingFlags>(((uint64_t)Position + 1)
- << getFlagMemberOffset());
- }
-
- static void setCorrectMemberOfFlag(OpenMPOffloadMappingFlags &Flags,
- OpenMPOffloadMappingFlags MemberOfFlag) {
- // If the entry is PTR_AND_OBJ but has not been marked with the special
- // placeholder value 0xFFFF in the MEMBER_OF field, then it should not be
- // marked as MEMBER_OF.
- if ((Flags & OMP_MAP_PTR_AND_OBJ) &&
- ((Flags & OMP_MAP_MEMBER_OF) != OMP_MAP_MEMBER_OF))
- return;
-
- // Reset the placeholder value to prepare the flag for the assignment of the
- // proper MEMBER_OF value.
- Flags &= ~OMP_MAP_MEMBER_OF;
- Flags |= MemberOfFlag;
+ return OpenMPOffloadMappingFlags::OMP_MAP_TO |
+ OpenMPOffloadMappingFlags::OMP_MAP_PTR_AND_OBJ;
+ return OpenMPOffloadMappingFlags::OMP_MAP_PRIVATE |
+ OpenMPOffloadMappingFlags::OMP_MAP_TO;
+ }
+ auto I = LambdasMap.find(Cap.getCapturedVar()->getCanonicalDecl());
+ if (I != LambdasMap.end())
+ // for map(to: lambda): using user specified map type.
+ return getMapTypeBits(
+ I->getSecond()->getMapType(), I->getSecond()->getMapTypeModifiers(),
+ /*MotionModifiers=*/std::nullopt, I->getSecond()->isImplicit(),
+ /*AddPtrFlag=*/false,
+ /*AddIsTargetParamFlag=*/false,
+ /*isNonContiguous=*/false);
+ return OpenMPOffloadMappingFlags::OMP_MAP_TO |
+ OpenMPOffloadMappingFlags::OMP_MAP_FROM;
}
void getPlainLayout(const CXXRecordDecl *RD,
@@ -8497,6 +7755,7 @@ private:
/// the device pointers info array.
void generateAllInfoForClauses(
ArrayRef<const OMPClause *> Clauses, MapCombinedInfoTy &CombinedInfo,
+ llvm::OpenMPIRBuilder &OMPBuilder,
const llvm::DenseSet<CanonicalDeclPtr<const Decl>> &SkipVarSet =
llvm::DenseSet<CanonicalDeclPtr<const Decl>>()) const {
// We have to process the component lists that relate with the same
@@ -8536,10 +7795,8 @@ private:
if (!C)
continue;
MapKind Kind = Other;
- if (!C->getMapTypeModifiers().empty() &&
- llvm::any_of(C->getMapTypeModifiers(), [](OpenMPMapModifierKind K) {
- return K == OMPC_MAP_MODIFIER_present;
- }))
+ if (llvm::is_contained(C->getMapTypeModifiers(),
+ OMPC_MAP_MODIFIER_present))
Kind = Present;
else if (C->getMapType() == OMPC_MAP_alloc)
Kind = Allocs;
@@ -8547,7 +7804,7 @@ private:
for (const auto L : C->component_lists()) {
const Expr *E = (C->getMapLoc().isValid()) ? *EI : nullptr;
InfoGen(std::get<0>(L), Kind, std::get<1>(L), C->getMapType(),
- C->getMapTypeModifiers(), llvm::None,
+ C->getMapTypeModifiers(), std::nullopt,
/*ReturnDevicePointer=*/false, C->isImplicit(), std::get<2>(L),
E);
++EI;
@@ -8558,14 +7815,12 @@ private:
if (!C)
continue;
MapKind Kind = Other;
- if (!C->getMotionModifiers().empty() &&
- llvm::any_of(C->getMotionModifiers(), [](OpenMPMotionModifierKind K) {
- return K == OMPC_MOTION_MODIFIER_present;
- }))
+ if (llvm::is_contained(C->getMotionModifiers(),
+ OMPC_MOTION_MODIFIER_present))
Kind = Present;
const auto *EI = C->getVarRefs().begin();
for (const auto L : C->component_lists()) {
- InfoGen(std::get<0>(L), Kind, std::get<1>(L), OMPC_MAP_to, llvm::None,
+ InfoGen(std::get<0>(L), Kind, std::get<1>(L), OMPC_MAP_to, std::nullopt,
C->getMotionModifiers(), /*ReturnDevicePointer=*/false,
C->isImplicit(), std::get<2>(L), *EI);
++EI;
@@ -8576,63 +7831,110 @@ private:
if (!C)
continue;
MapKind Kind = Other;
- if (!C->getMotionModifiers().empty() &&
- llvm::any_of(C->getMotionModifiers(), [](OpenMPMotionModifierKind K) {
- return K == OMPC_MOTION_MODIFIER_present;
- }))
+ if (llvm::is_contained(C->getMotionModifiers(),
+ OMPC_MOTION_MODIFIER_present))
Kind = Present;
const auto *EI = C->getVarRefs().begin();
for (const auto L : C->component_lists()) {
- InfoGen(std::get<0>(L), Kind, std::get<1>(L), OMPC_MAP_from, llvm::None,
- C->getMotionModifiers(), /*ReturnDevicePointer=*/false,
- C->isImplicit(), std::get<2>(L), *EI);
+ InfoGen(std::get<0>(L), Kind, std::get<1>(L), OMPC_MAP_from,
+ std::nullopt, C->getMotionModifiers(),
+ /*ReturnDevicePointer=*/false, C->isImplicit(), std::get<2>(L),
+ *EI);
++EI;
}
}
- // Look at the use_device_ptr clause information and mark the existing map
- // entries as such. If there is no map information for an entry in the
- // use_device_ptr list, we create one with map type 'alloc' and zero size
- // section. It is the user fault if that was not mapped before. If there is
- // no map information and the pointer is a struct member, then we defer the
- // emission of that entry until the whole struct has been processed.
+ // Look at the use_device_ptr and use_device_addr clauses information and
+ // mark the existing map entries as such. If there is no map information for
+ // an entry in the use_device_ptr and use_device_addr list, we create one
+ // with map type 'alloc' and zero size section. It is the user fault if that
+ // was not mapped before. If there is no map information and the pointer is
+ // a struct member, then we defer the emission of that entry until the whole
+ // struct has been processed.
llvm::MapVector<CanonicalDeclPtr<const Decl>,
SmallVector<DeferredDevicePtrEntryTy, 4>>
DeferredInfo;
- MapCombinedInfoTy UseDevicePtrCombinedInfo;
+ MapCombinedInfoTy UseDeviceDataCombinedInfo;
+
+ auto &&UseDeviceDataCombinedInfoGen =
+ [&UseDeviceDataCombinedInfo](const ValueDecl *VD, llvm::Value *Ptr,
+ CodeGenFunction &CGF, bool IsDevAddr) {
+ UseDeviceDataCombinedInfo.Exprs.push_back(VD);
+ UseDeviceDataCombinedInfo.BasePointers.emplace_back(Ptr);
+ UseDeviceDataCombinedInfo.DevicePtrDecls.emplace_back(VD);
+ UseDeviceDataCombinedInfo.DevicePointers.emplace_back(
+ IsDevAddr ? DeviceInfoTy::Address : DeviceInfoTy::Pointer);
+ UseDeviceDataCombinedInfo.Pointers.push_back(Ptr);
+ UseDeviceDataCombinedInfo.Sizes.push_back(
+ llvm::Constant::getNullValue(CGF.Int64Ty));
+ UseDeviceDataCombinedInfo.Types.push_back(
+ OpenMPOffloadMappingFlags::OMP_MAP_RETURN_PARAM);
+ UseDeviceDataCombinedInfo.Mappers.push_back(nullptr);
+ };
- for (const auto *Cl : Clauses) {
- const auto *C = dyn_cast<OMPUseDevicePtrClause>(Cl);
- if (!C)
- continue;
- for (const auto L : C->component_lists()) {
- OMPClauseMappableExprCommon::MappableExprComponentListRef Components =
- std::get<1>(L);
- assert(!Components.empty() &&
- "Not expecting empty list of components!");
- const ValueDecl *VD = Components.back().getAssociatedDeclaration();
- VD = cast<ValueDecl>(VD->getCanonicalDecl());
- const Expr *IE = Components.back().getAssociatedExpression();
- // If the first component is a member expression, we have to look into
- // 'this', which maps to null in the map of map information. Otherwise
- // look directly for the information.
- auto It = Info.find(isa<MemberExpr>(IE) ? nullptr : VD);
-
- // We potentially have map information for this declaration already.
- // Look for the first set of components that refer to it.
- if (It != Info.end()) {
- bool Found = false;
- for (auto &Data : It->second) {
- auto *CI = llvm::find_if(Data, [VD](const MapInfo &MI) {
- return MI.Components.back().getAssociatedDeclaration() == VD;
- });
- // If we found a map entry, signal that the pointer has to be
- // returned and move on to the next declaration. Exclude cases where
- // the base pointer is mapped as array subscript, array section or
- // array shaping. The base address is passed as a pointer to base in
- // this case and cannot be used as a base for use_device_ptr list
- // item.
- if (CI != Data.end()) {
+ auto &&MapInfoGen =
+ [&DeferredInfo, &UseDeviceDataCombinedInfoGen,
+ &InfoGen](CodeGenFunction &CGF, const Expr *IE, const ValueDecl *VD,
+ OMPClauseMappableExprCommon::MappableExprComponentListRef
+ Components,
+ bool IsImplicit, bool IsDevAddr) {
+ // We didn't find any match in our map information - generate a zero
+ // size array section - if the pointer is a struct member we defer
+ // this action until the whole struct has been processed.
+ if (isa<MemberExpr>(IE)) {
+ // Insert the pointer into Info to be processed by
+ // generateInfoForComponentList. Because it is a member pointer
+ // without a pointee, no entry will be generated for it, therefore
+ // we need to generate one after the whole struct has been
+ // processed. Nonetheless, generateInfoForComponentList must be
+ // called to take the pointer into account for the calculation of
+ // the range of the partial struct.
+ InfoGen(nullptr, Other, Components, OMPC_MAP_unknown, std::nullopt,
+ std::nullopt, /*ReturnDevicePointer=*/false, IsImplicit,
+ nullptr, nullptr, IsDevAddr);
+ DeferredInfo[nullptr].emplace_back(IE, VD, IsDevAddr);
+ } else {
+ llvm::Value *Ptr;
+ if (IsDevAddr) {
+ if (IE->isGLValue())
+ Ptr = CGF.EmitLValue(IE).getPointer(CGF);
+ else
+ Ptr = CGF.EmitScalarExpr(IE);
+ } else {
+ Ptr = CGF.EmitLoadOfScalar(CGF.EmitLValue(IE), IE->getExprLoc());
+ }
+ UseDeviceDataCombinedInfoGen(VD, Ptr, CGF, IsDevAddr);
+ }
+ };
+
+ auto &&IsMapInfoExist = [&Info](CodeGenFunction &CGF, const ValueDecl *VD,
+ const Expr *IE, bool IsDevAddr) -> bool {
+ // We potentially have map information for this declaration already.
+ // Look for the first set of components that refer to it. If found,
+ // return true.
+ // If the first component is a member expression, we have to look into
+ // 'this', which maps to null in the map of map information. Otherwise
+ // look directly for the information.
+ auto It = Info.find(isa<MemberExpr>(IE) ? nullptr : VD);
+ if (It != Info.end()) {
+ bool Found = false;
+ for (auto &Data : It->second) {
+ auto *CI = llvm::find_if(Data, [VD](const MapInfo &MI) {
+ return MI.Components.back().getAssociatedDeclaration() == VD;
+ });
+ // If we found a map entry, signal that the pointer has to be
+ // returned and move on to the next declaration. Exclude cases where
+ // the base pointer is mapped as array subscript, array section or
+ // array shaping. The base address is passed as a pointer to base in
+ // this case and cannot be used as a base for use_device_ptr list
+ // item.
+ if (CI != Data.end()) {
+ if (IsDevAddr) {
+ CI->ForDeviceAddr = IsDevAddr;
+ CI->ReturnDevicePointer = true;
+ Found = true;
+ break;
+ } else {
auto PrevCI = std::next(CI->Components.rbegin());
const auto *VarD = dyn_cast<VarDecl>(VD);
if (CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory() ||
@@ -8641,57 +7943,52 @@ private:
PrevCI == CI->Components.rend() ||
isa<MemberExpr>(PrevCI->getAssociatedExpression()) || !VarD ||
VarD->hasLocalStorage()) {
+ CI->ForDeviceAddr = IsDevAddr;
CI->ReturnDevicePointer = true;
Found = true;
break;
}
}
}
- if (Found)
- continue;
- }
-
- // We didn't find any match in our map information - generate a zero
- // size array section - if the pointer is a struct member we defer this
- // action until the whole struct has been processed.
- if (isa<MemberExpr>(IE)) {
- // Insert the pointer into Info to be processed by
- // generateInfoForComponentList. Because it is a member pointer
- // without a pointee, no entry will be generated for it, therefore
- // we need to generate one after the whole struct has been processed.
- // Nonetheless, generateInfoForComponentList must be called to take
- // the pointer into account for the calculation of the range of the
- // partial struct.
- InfoGen(nullptr, Other, Components, OMPC_MAP_unknown, llvm::None,
- llvm::None, /*ReturnDevicePointer=*/false, C->isImplicit(),
- nullptr);
- DeferredInfo[nullptr].emplace_back(IE, VD, /*ForDeviceAddr=*/false);
- } else {
- llvm::Value *Ptr =
- CGF.EmitLoadOfScalar(CGF.EmitLValue(IE), IE->getExprLoc());
- UseDevicePtrCombinedInfo.Exprs.push_back(VD);
- UseDevicePtrCombinedInfo.BasePointers.emplace_back(Ptr, VD);
- UseDevicePtrCombinedInfo.Pointers.push_back(Ptr);
- UseDevicePtrCombinedInfo.Sizes.push_back(
- llvm::Constant::getNullValue(CGF.Int64Ty));
- UseDevicePtrCombinedInfo.Types.push_back(OMP_MAP_RETURN_PARAM);
- UseDevicePtrCombinedInfo.Mappers.push_back(nullptr);
}
+ return Found;
}
- }
+ return false;
+ };
- // Look at the use_device_addr clause information and mark the existing map
+ // Look at the use_device_ptr clause information and mark the existing map
// entries as such. If there is no map information for an entry in the
- // use_device_addr list, we create one with map type 'alloc' and zero size
+ // use_device_ptr list, we create one with map type 'alloc' and zero size
// section. It is the user fault if that was not mapped before. If there is
// no map information and the pointer is a struct member, then we defer the
// emission of that entry until the whole struct has been processed.
+ for (const auto *Cl : Clauses) {
+ const auto *C = dyn_cast<OMPUseDevicePtrClause>(Cl);
+ if (!C)
+ continue;
+ for (const auto L : C->component_lists()) {
+ OMPClauseMappableExprCommon::MappableExprComponentListRef Components =
+ std::get<1>(L);
+ assert(!Components.empty() &&
+ "Not expecting empty list of components!");
+ const ValueDecl *VD = Components.back().getAssociatedDeclaration();
+ VD = cast<ValueDecl>(VD->getCanonicalDecl());
+ const Expr *IE = Components.back().getAssociatedExpression();
+ if (IsMapInfoExist(CGF, VD, IE, /*IsDevAddr=*/false))
+ continue;
+ MapInfoGen(CGF, IE, VD, Components, C->isImplicit(),
+ /*IsDevAddr=*/false);
+ }
+ }
+
llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>, 4> Processed;
for (const auto *Cl : Clauses) {
const auto *C = dyn_cast<OMPUseDeviceAddrClause>(Cl);
if (!C)
continue;
for (const auto L : C->component_lists()) {
+ OMPClauseMappableExprCommon::MappableExprComponentListRef Components =
+ std::get<1>(L);
assert(!std::get<1>(L).empty() &&
"Not expecting empty list of components!");
const ValueDecl *VD = std::get<1>(L).back().getAssociatedDeclaration();
@@ -8699,67 +7996,19 @@ private:
continue;
VD = cast<ValueDecl>(VD->getCanonicalDecl());
const Expr *IE = std::get<1>(L).back().getAssociatedExpression();
- // If the first component is a member expression, we have to look into
- // 'this', which maps to null in the map of map information. Otherwise
- // look directly for the information.
- auto It = Info.find(isa<MemberExpr>(IE) ? nullptr : VD);
-
- // We potentially have map information for this declaration already.
- // Look for the first set of components that refer to it.
- if (It != Info.end()) {
- bool Found = false;
- for (auto &Data : It->second) {
- auto *CI = llvm::find_if(Data, [VD](const MapInfo &MI) {
- return MI.Components.back().getAssociatedDeclaration() == VD;
- });
- // If we found a map entry, signal that the pointer has to be
- // returned and move on to the next declaration.
- if (CI != Data.end()) {
- CI->ReturnDevicePointer = true;
- Found = true;
- break;
- }
- }
- if (Found)
- continue;
- }
-
- // We didn't find any match in our map information - generate a zero
- // size array section - if the pointer is a struct member we defer this
- // action until the whole struct has been processed.
- if (isa<MemberExpr>(IE)) {
- // Insert the pointer into Info to be processed by
- // generateInfoForComponentList. Because it is a member pointer
- // without a pointee, no entry will be generated for it, therefore
- // we need to generate one after the whole struct has been processed.
- // Nonetheless, generateInfoForComponentList must be called to take
- // the pointer into account for the calculation of the range of the
- // partial struct.
- InfoGen(nullptr, Other, std::get<1>(L), OMPC_MAP_unknown, llvm::None,
- llvm::None, /*ReturnDevicePointer=*/false, C->isImplicit(),
- nullptr, nullptr, /*ForDeviceAddr=*/true);
- DeferredInfo[nullptr].emplace_back(IE, VD, /*ForDeviceAddr=*/true);
- } else {
- llvm::Value *Ptr;
- if (IE->isGLValue())
- Ptr = CGF.EmitLValue(IE).getPointer(CGF);
- else
- Ptr = CGF.EmitScalarExpr(IE);
- CombinedInfo.Exprs.push_back(VD);
- CombinedInfo.BasePointers.emplace_back(Ptr, VD);
- CombinedInfo.Pointers.push_back(Ptr);
- CombinedInfo.Sizes.push_back(
- llvm::Constant::getNullValue(CGF.Int64Ty));
- CombinedInfo.Types.push_back(OMP_MAP_RETURN_PARAM);
- CombinedInfo.Mappers.push_back(nullptr);
- }
+ if (IsMapInfoExist(CGF, VD, IE, /*IsDevAddr=*/true))
+ continue;
+ MapInfoGen(CGF, IE, VD, Components, C->isImplicit(),
+ /*IsDevAddr=*/true);
}
}
for (const auto &Data : Info) {
StructRangeInfoTy PartialStruct;
- // Temporary generated information.
+ // Current struct information:
MapCombinedInfoTy CurInfo;
+ // Current struct base information:
+ MapCombinedInfoTy StructBaseCurInfo;
const Decl *D = Data.first;
const ValueDecl *VD = cast_or_null<ValueDecl>(D);
for (const auto &M : Data.second) {
@@ -8769,27 +8018,55 @@ private:
// Remember the current base pointer index.
unsigned CurrentBasePointersIdx = CurInfo.BasePointers.size();
+ unsigned StructBasePointersIdx =
+ StructBaseCurInfo.BasePointers.size();
CurInfo.NonContigInfo.IsNonContiguous =
L.Components.back().isNonContiguous();
generateInfoForComponentList(
L.MapType, L.MapModifiers, L.MotionModifiers, L.Components,
- CurInfo, PartialStruct, /*IsFirstComponentList=*/false,
- L.IsImplicit, L.Mapper, L.ForDeviceAddr, VD, L.VarRef);
+ CurInfo, StructBaseCurInfo, PartialStruct,
+ /*IsFirstComponentList=*/false, L.IsImplicit,
+ /*GenerateAllInfoForClauses*/ true, L.Mapper, L.ForDeviceAddr, VD,
+ L.VarRef);
- // If this entry relates with a device pointer, set the relevant
+ // If this entry relates to a device pointer, set the relevant
// declaration and add the 'return pointer' flag.
if (L.ReturnDevicePointer) {
- assert(CurInfo.BasePointers.size() > CurrentBasePointersIdx &&
+ // Check whether a value was added to either CurInfo or
+ // StructBaseCurInfo and error if no value was added to either of
+ // them:
+ assert((CurrentBasePointersIdx < CurInfo.BasePointers.size() ||
+ StructBasePointersIdx <
+ StructBaseCurInfo.BasePointers.size()) &&
"Unexpected number of mapped base pointers.");
+ // Choose a base pointer index which is always valid:
const ValueDecl *RelevantVD =
L.Components.back().getAssociatedDeclaration();
assert(RelevantVD &&
"No relevant declaration related with device pointer??");
- CurInfo.BasePointers[CurrentBasePointersIdx].setDevicePtrDecl(
- RelevantVD);
- CurInfo.Types[CurrentBasePointersIdx] |= OMP_MAP_RETURN_PARAM;
+ // If StructBaseCurInfo has been updated this iteration then work on
+ // the first new entry added to it i.e. make sure that when multiple
+ // values are added to any of the lists, the first value added is
+ // being modified by the assignments below (not the last value
+ // added).
+ if (StructBasePointersIdx < StructBaseCurInfo.BasePointers.size()) {
+ StructBaseCurInfo.DevicePtrDecls[StructBasePointersIdx] =
+ RelevantVD;
+ StructBaseCurInfo.DevicePointers[StructBasePointersIdx] =
+ L.ForDeviceAddr ? DeviceInfoTy::Address
+ : DeviceInfoTy::Pointer;
+ StructBaseCurInfo.Types[StructBasePointersIdx] |=
+ OpenMPOffloadMappingFlags::OMP_MAP_RETURN_PARAM;
+ } else {
+ CurInfo.DevicePtrDecls[CurrentBasePointersIdx] = RelevantVD;
+ CurInfo.DevicePointers[CurrentBasePointersIdx] =
+ L.ForDeviceAddr ? DeviceInfoTy::Address
+ : DeviceInfoTy::Pointer;
+ CurInfo.Types[CurrentBasePointersIdx] |=
+ OpenMPOffloadMappingFlags::OMP_MAP_RETURN_PARAM;
+ }
}
}
}
@@ -8810,7 +8087,9 @@ private:
// Entry is RETURN_PARAM. Also, set the placeholder value
// MEMBER_OF=FFFF so that the entry is later updated with the
// correct value of MEMBER_OF.
- CurInfo.Types.push_back(OMP_MAP_RETURN_PARAM | OMP_MAP_MEMBER_OF);
+ CurInfo.Types.push_back(
+ OpenMPOffloadMappingFlags::OMP_MAP_RETURN_PARAM |
+ OpenMPOffloadMappingFlags::OMP_MAP_MEMBER_OF);
} else {
BasePtr = this->CGF.EmitLValue(L.IE).getPointer(CGF);
Ptr = this->CGF.EmitLoadOfScalar(this->CGF.EmitLValue(L.IE),
@@ -8818,30 +8097,43 @@ private:
// Entry is PTR_AND_OBJ and RETURN_PARAM. Also, set the
// placeholder value MEMBER_OF=FFFF so that the entry is later
// updated with the correct value of MEMBER_OF.
- CurInfo.Types.push_back(OMP_MAP_PTR_AND_OBJ | OMP_MAP_RETURN_PARAM |
- OMP_MAP_MEMBER_OF);
+ CurInfo.Types.push_back(
+ OpenMPOffloadMappingFlags::OMP_MAP_PTR_AND_OBJ |
+ OpenMPOffloadMappingFlags::OMP_MAP_RETURN_PARAM |
+ OpenMPOffloadMappingFlags::OMP_MAP_MEMBER_OF);
}
CurInfo.Exprs.push_back(L.VD);
- CurInfo.BasePointers.emplace_back(BasePtr, L.VD);
+ CurInfo.BasePointers.emplace_back(BasePtr);
+ CurInfo.DevicePtrDecls.emplace_back(L.VD);
+ CurInfo.DevicePointers.emplace_back(
+ L.ForDeviceAddr ? DeviceInfoTy::Address : DeviceInfoTy::Pointer);
CurInfo.Pointers.push_back(Ptr);
CurInfo.Sizes.push_back(
llvm::Constant::getNullValue(this->CGF.Int64Ty));
CurInfo.Mappers.push_back(nullptr);
}
}
+
+ // Unify entries in one list making sure the struct mapping precedes the
+ // individual fields:
+ MapCombinedInfoTy UnionCurInfo;
+ UnionCurInfo.append(StructBaseCurInfo);
+ UnionCurInfo.append(CurInfo);
+
// If there is an entry in PartialStruct it means we have a struct with
// individual members mapped. Emit an extra combined entry.
if (PartialStruct.Base.isValid()) {
- CurInfo.NonContigInfo.Dims.push_back(0);
- emitCombinedEntry(CombinedInfo, CurInfo.Types, PartialStruct, VD);
+ UnionCurInfo.NonContigInfo.Dims.push_back(0);
+ // Emit a combined entry:
+ emitCombinedEntry(CombinedInfo, UnionCurInfo.Types, PartialStruct,
+ /*IsMapThis*/ !VD, OMPBuilder, VD);
}
- // We need to append the results of this capture to what we already
- // have.
- CombinedInfo.append(CurInfo);
+ // We need to append the results of this capture to what we already have.
+ CombinedInfo.append(UnionCurInfo);
}
// Append data for use_device_ptr clauses.
- CombinedInfo.append(UseDevicePtrCombinedInfo);
+ CombinedInfo.append(UseDeviceDataCombinedInfo);
}
public:
@@ -8869,6 +8161,25 @@ public:
for (const auto *C : Dir.getClausesOfKind<OMPIsDevicePtrClause>())
for (auto L : C->component_lists())
DevPointersMap[std::get<0>(L)].push_back(std::get<1>(L));
+ // Extract device addr clause information.
+ for (const auto *C : Dir.getClausesOfKind<OMPHasDeviceAddrClause>())
+ for (auto L : C->component_lists())
+ HasDevAddrsMap[std::get<0>(L)].push_back(std::get<1>(L));
+ // Extract map information.
+ for (const auto *C : Dir.getClausesOfKind<OMPMapClause>()) {
+ if (C->getMapType() != OMPC_MAP_to)
+ continue;
+ for (auto L : C->component_lists()) {
+ const ValueDecl *VD = std::get<0>(L);
+ const auto *RD = VD ? VD->getType()
+ .getCanonicalType()
+ .getNonReferenceType()
+ ->getAsCXXRecordDecl()
+ : nullptr;
+ if (RD && RD->isLambda())
+ LambdasMap.try_emplace(std::get<0>(L), C);
+ }
+ }
}
/// Constructor for the declare mapper directive.
@@ -8880,11 +8191,13 @@ public:
/// individual struct members.
void emitCombinedEntry(MapCombinedInfoTy &CombinedInfo,
MapFlagsArrayTy &CurTypes,
- const StructRangeInfoTy &PartialStruct,
+ const StructRangeInfoTy &PartialStruct, bool IsMapThis,
+ llvm::OpenMPIRBuilder &OMPBuilder,
const ValueDecl *VD = nullptr,
bool NotTargetParams = true) const {
if (CurTypes.size() == 1 &&
- ((CurTypes.back() & OMP_MAP_MEMBER_OF) != OMP_MAP_MEMBER_OF) &&
+ ((CurTypes.back() & OpenMPOffloadMappingFlags::OMP_MAP_MEMBER_OF) !=
+ OpenMPOffloadMappingFlags::OMP_MAP_MEMBER_OF) &&
!PartialStruct.IsArraySection)
return;
Address LBAddr = PartialStruct.LowestElem.second;
@@ -8896,41 +8209,80 @@ public:
CombinedInfo.Exprs.push_back(VD);
// Base is the base of the struct
CombinedInfo.BasePointers.push_back(PartialStruct.Base.getPointer());
+ CombinedInfo.DevicePtrDecls.push_back(nullptr);
+ CombinedInfo.DevicePointers.push_back(DeviceInfoTy::None);
// Pointer is the address of the lowest element
llvm::Value *LB = LBAddr.getPointer();
- CombinedInfo.Pointers.push_back(LB);
+ const CXXMethodDecl *MD =
+ CGF.CurFuncDecl ? dyn_cast<CXXMethodDecl>(CGF.CurFuncDecl) : nullptr;
+ const CXXRecordDecl *RD = MD ? MD->getParent() : nullptr;
+ bool HasBaseClass = RD && IsMapThis ? RD->getNumBases() > 0 : false;
// There should not be a mapper for a combined entry.
+ if (HasBaseClass) {
+ // OpenMP 5.2 148:21:
+ // If the target construct is within a class non-static member function,
+ // and a variable is an accessible data member of the object for which the
+ // non-static data member function is invoked, the variable is treated as
+ // if the this[:1] expression had appeared in a map clause with a map-type
+ // of tofrom.
+ // Emit this[:1]
+ CombinedInfo.Pointers.push_back(PartialStruct.Base.getPointer());
+ QualType Ty = MD->getFunctionObjectParameterType();
+ llvm::Value *Size =
+ CGF.Builder.CreateIntCast(CGF.getTypeSize(Ty), CGF.Int64Ty,
+ /*isSigned=*/true);
+ CombinedInfo.Sizes.push_back(Size);
+ } else {
+ CombinedInfo.Pointers.push_back(LB);
+ // Size is (addr of {highest+1} element) - (addr of lowest element)
+ llvm::Value *HB = HBAddr.getPointer();
+ llvm::Value *HAddr = CGF.Builder.CreateConstGEP1_32(
+ HBAddr.getElementType(), HB, /*Idx0=*/1);
+ llvm::Value *CLAddr = CGF.Builder.CreatePointerCast(LB, CGF.VoidPtrTy);
+ llvm::Value *CHAddr = CGF.Builder.CreatePointerCast(HAddr, CGF.VoidPtrTy);
+ llvm::Value *Diff = CGF.Builder.CreatePtrDiff(CGF.Int8Ty, CHAddr, CLAddr);
+ llvm::Value *Size = CGF.Builder.CreateIntCast(Diff, CGF.Int64Ty,
+ /*isSigned=*/false);
+ CombinedInfo.Sizes.push_back(Size);
+ }
CombinedInfo.Mappers.push_back(nullptr);
- // Size is (addr of {highest+1} element) - (addr of lowest element)
- llvm::Value *HB = HBAddr.getPointer();
- llvm::Value *HAddr =
- CGF.Builder.CreateConstGEP1_32(HBAddr.getElementType(), HB, /*Idx0=*/1);
- llvm::Value *CLAddr = CGF.Builder.CreatePointerCast(LB, CGF.VoidPtrTy);
- llvm::Value *CHAddr = CGF.Builder.CreatePointerCast(HAddr, CGF.VoidPtrTy);
- llvm::Value *Diff = CGF.Builder.CreatePtrDiff(CHAddr, CLAddr);
- llvm::Value *Size = CGF.Builder.CreateIntCast(Diff, CGF.Int64Ty,
- /*isSigned=*/false);
- CombinedInfo.Sizes.push_back(Size);
// Map type is always TARGET_PARAM, if generate info for captures.
- CombinedInfo.Types.push_back(NotTargetParams ? OMP_MAP_NONE
- : OMP_MAP_TARGET_PARAM);
+ CombinedInfo.Types.push_back(
+ NotTargetParams ? OpenMPOffloadMappingFlags::OMP_MAP_NONE
+ : OpenMPOffloadMappingFlags::OMP_MAP_TARGET_PARAM);
// If any element has the present modifier, then make sure the runtime
// doesn't attempt to allocate the struct.
if (CurTypes.end() !=
llvm::find_if(CurTypes, [](OpenMPOffloadMappingFlags Type) {
- return Type & OMP_MAP_PRESENT;
+ return static_cast<std::underlying_type_t<OpenMPOffloadMappingFlags>>(
+ Type & OpenMPOffloadMappingFlags::OMP_MAP_PRESENT);
}))
- CombinedInfo.Types.back() |= OMP_MAP_PRESENT;
+ CombinedInfo.Types.back() |= OpenMPOffloadMappingFlags::OMP_MAP_PRESENT;
// Remove TARGET_PARAM flag from the first element
- (*CurTypes.begin()) &= ~OMP_MAP_TARGET_PARAM;
+ (*CurTypes.begin()) &= ~OpenMPOffloadMappingFlags::OMP_MAP_TARGET_PARAM;
+ // If any element has the ompx_hold modifier, then make sure the runtime
+ // uses the hold reference count for the struct as a whole so that it won't
+ // be unmapped by an extra dynamic reference count decrement. Add it to all
+ // elements as well so the runtime knows which reference count to check
+ // when determining whether it's time for device-to-host transfers of
+ // individual elements.
+ if (CurTypes.end() !=
+ llvm::find_if(CurTypes, [](OpenMPOffloadMappingFlags Type) {
+ return static_cast<std::underlying_type_t<OpenMPOffloadMappingFlags>>(
+ Type & OpenMPOffloadMappingFlags::OMP_MAP_OMPX_HOLD);
+ })) {
+ CombinedInfo.Types.back() |= OpenMPOffloadMappingFlags::OMP_MAP_OMPX_HOLD;
+ for (auto &M : CurTypes)
+ M |= OpenMPOffloadMappingFlags::OMP_MAP_OMPX_HOLD;
+ }
// All other current entries will be MEMBER_OF the combined entry
// (except for PTR_AND_OBJ entries which do not have a placeholder value
// 0xFFFF in the MEMBER_OF field).
OpenMPOffloadMappingFlags MemberOfFlag =
- getMemberOfFlag(CombinedInfo.BasePointers.size() - 1);
+ OMPBuilder.getMemberOfFlag(CombinedInfo.BasePointers.size() - 1);
for (auto &M : CurTypes)
- setCorrectMemberOfFlag(M, MemberOfFlag);
+ OMPBuilder.setCorrectMemberOfFlag(M, MemberOfFlag);
}
/// Generate all the base pointers, section pointers, sizes, map types, and
@@ -8939,39 +8291,40 @@ public:
/// pair of the relevant declaration and index where it occurs is appended to
/// the device pointers info array.
void generateAllInfo(
- MapCombinedInfoTy &CombinedInfo,
+ MapCombinedInfoTy &CombinedInfo, llvm::OpenMPIRBuilder &OMPBuilder,
const llvm::DenseSet<CanonicalDeclPtr<const Decl>> &SkipVarSet =
llvm::DenseSet<CanonicalDeclPtr<const Decl>>()) const {
assert(CurDir.is<const OMPExecutableDirective *>() &&
"Expect a executable directive");
const auto *CurExecDir = CurDir.get<const OMPExecutableDirective *>();
- generateAllInfoForClauses(CurExecDir->clauses(), CombinedInfo, SkipVarSet);
+ generateAllInfoForClauses(CurExecDir->clauses(), CombinedInfo, OMPBuilder,
+ SkipVarSet);
}
/// Generate all the base pointers, section pointers, sizes, map types, and
/// mappers for the extracted map clauses of user-defined mapper (all included
/// in \a CombinedInfo).
- void generateAllInfoForMapper(MapCombinedInfoTy &CombinedInfo) const {
+ void generateAllInfoForMapper(MapCombinedInfoTy &CombinedInfo,
+ llvm::OpenMPIRBuilder &OMPBuilder) const {
assert(CurDir.is<const OMPDeclareMapperDecl *>() &&
"Expect a declare mapper directive");
const auto *CurMapperDir = CurDir.get<const OMPDeclareMapperDecl *>();
- generateAllInfoForClauses(CurMapperDir->clauses(), CombinedInfo);
+ generateAllInfoForClauses(CurMapperDir->clauses(), CombinedInfo,
+ OMPBuilder);
}
/// Emit capture info for lambdas for variables captured by reference.
void generateInfoForLambdaCaptures(
const ValueDecl *VD, llvm::Value *Arg, MapCombinedInfoTy &CombinedInfo,
llvm::DenseMap<llvm::Value *, llvm::Value *> &LambdaPointers) const {
- const auto *RD = VD->getType()
- .getCanonicalType()
- .getNonReferenceType()
- ->getAsCXXRecordDecl();
+ QualType VDType = VD->getType().getCanonicalType().getNonReferenceType();
+ const auto *RD = VDType->getAsCXXRecordDecl();
if (!RD || !RD->isLambda())
return;
- Address VDAddr = Address(Arg, CGF.getContext().getDeclAlign(VD));
- LValue VDLVal = CGF.MakeAddrLValue(
- VDAddr, VD->getType().getCanonicalType().getNonReferenceType());
- llvm::DenseMap<const VarDecl *, FieldDecl *> Captures;
+ Address VDAddr(Arg, CGF.ConvertTypeForMem(VDType),
+ CGF.getContext().getDeclAlign(VD));
+ LValue VDLVal = CGF.MakeAddrLValue(VDAddr, VDType);
+ llvm::DenseMap<const ValueDecl *, FieldDecl *> Captures;
FieldDecl *ThisCapture = nullptr;
RD->getCaptureFields(Captures, ThisCapture);
if (ThisCapture) {
@@ -8982,18 +8335,23 @@ public:
VDLVal.getPointer(CGF));
CombinedInfo.Exprs.push_back(VD);
CombinedInfo.BasePointers.push_back(ThisLVal.getPointer(CGF));
+ CombinedInfo.DevicePtrDecls.push_back(nullptr);
+ CombinedInfo.DevicePointers.push_back(DeviceInfoTy::None);
CombinedInfo.Pointers.push_back(ThisLValVal.getPointer(CGF));
CombinedInfo.Sizes.push_back(
CGF.Builder.CreateIntCast(CGF.getTypeSize(CGF.getContext().VoidPtrTy),
CGF.Int64Ty, /*isSigned=*/true));
- CombinedInfo.Types.push_back(OMP_MAP_PTR_AND_OBJ | OMP_MAP_LITERAL |
- OMP_MAP_MEMBER_OF | OMP_MAP_IMPLICIT);
+ CombinedInfo.Types.push_back(
+ OpenMPOffloadMappingFlags::OMP_MAP_PTR_AND_OBJ |
+ OpenMPOffloadMappingFlags::OMP_MAP_LITERAL |
+ OpenMPOffloadMappingFlags::OMP_MAP_MEMBER_OF |
+ OpenMPOffloadMappingFlags::OMP_MAP_IMPLICIT);
CombinedInfo.Mappers.push_back(nullptr);
}
for (const LambdaCapture &LC : RD->captures()) {
if (!LC.capturesVariable())
continue;
- const VarDecl *VD = LC.getCapturedVar();
+ const VarDecl *VD = cast<VarDecl>(LC.getCapturedVar());
if (LC.getCaptureKind() != LCK_ByRef && !VD->getType()->isPointerType())
continue;
auto It = Captures.find(VD);
@@ -9005,6 +8363,8 @@ public:
VDLVal.getPointer(CGF));
CombinedInfo.Exprs.push_back(VD);
CombinedInfo.BasePointers.push_back(VarLVal.getPointer(CGF));
+ CombinedInfo.DevicePtrDecls.push_back(nullptr);
+ CombinedInfo.DevicePointers.push_back(DeviceInfoTy::None);
CombinedInfo.Pointers.push_back(VarLValVal.getPointer(CGF));
CombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
CGF.getTypeSize(
@@ -9016,26 +8376,34 @@ public:
VDLVal.getPointer(CGF));
CombinedInfo.Exprs.push_back(VD);
CombinedInfo.BasePointers.push_back(VarLVal.getPointer(CGF));
+ CombinedInfo.DevicePtrDecls.push_back(nullptr);
+ CombinedInfo.DevicePointers.push_back(DeviceInfoTy::None);
CombinedInfo.Pointers.push_back(VarRVal.getScalarVal());
CombinedInfo.Sizes.push_back(llvm::ConstantInt::get(CGF.Int64Ty, 0));
}
- CombinedInfo.Types.push_back(OMP_MAP_PTR_AND_OBJ | OMP_MAP_LITERAL |
- OMP_MAP_MEMBER_OF | OMP_MAP_IMPLICIT);
+ CombinedInfo.Types.push_back(
+ OpenMPOffloadMappingFlags::OMP_MAP_PTR_AND_OBJ |
+ OpenMPOffloadMappingFlags::OMP_MAP_LITERAL |
+ OpenMPOffloadMappingFlags::OMP_MAP_MEMBER_OF |
+ OpenMPOffloadMappingFlags::OMP_MAP_IMPLICIT);
CombinedInfo.Mappers.push_back(nullptr);
}
}
/// Set correct indices for lambdas captures.
void adjustMemberOfForLambdaCaptures(
+ llvm::OpenMPIRBuilder &OMPBuilder,
const llvm::DenseMap<llvm::Value *, llvm::Value *> &LambdaPointers,
MapBaseValuesArrayTy &BasePointers, MapValuesArrayTy &Pointers,
MapFlagsArrayTy &Types) const {
for (unsigned I = 0, E = Types.size(); I < E; ++I) {
// Set correct member_of idx for all implicit lambda captures.
- if (Types[I] != (OMP_MAP_PTR_AND_OBJ | OMP_MAP_LITERAL |
- OMP_MAP_MEMBER_OF | OMP_MAP_IMPLICIT))
+ if (Types[I] != (OpenMPOffloadMappingFlags::OMP_MAP_PTR_AND_OBJ |
+ OpenMPOffloadMappingFlags::OMP_MAP_LITERAL |
+ OpenMPOffloadMappingFlags::OMP_MAP_MEMBER_OF |
+ OpenMPOffloadMappingFlags::OMP_MAP_IMPLICIT))
continue;
- llvm::Value *BasePtr = LambdaPointers.lookup(*BasePointers[I]);
+ llvm::Value *BasePtr = LambdaPointers.lookup(BasePointers[I]);
assert(BasePtr && "Unable to find base lambda address.");
int TgtIdx = -1;
for (unsigned J = I; J > 0; --J) {
@@ -9049,8 +8417,9 @@ public:
// All other current entries will be MEMBER_OF the combined entry
// (except for PTR_AND_OBJ entries which do not have a placeholder value
// 0xFFFF in the MEMBER_OF field).
- OpenMPOffloadMappingFlags MemberOfFlag = getMemberOfFlag(TgtIdx);
- setCorrectMemberOfFlag(Types[I], MemberOfFlag);
+ OpenMPOffloadMappingFlags MemberOfFlag =
+ OMPBuilder.getMemberOfFlag(TgtIdx);
+ OMPBuilder.setCorrectMemberOfFlag(Types[I], MemberOfFlag);
}
}
@@ -9067,19 +8436,26 @@ public:
? nullptr
: Cap->getCapturedVar()->getCanonicalDecl();
+ // for map(to: lambda): skip here, processing it in
+ // generateDefaultMapInfo
+ if (LambdasMap.count(VD))
+ return;
+
// If this declaration appears in a is_device_ptr clause we just have to
// pass the pointer by value. If it is a reference to a declaration, we just
// pass its value.
- if (DevPointersMap.count(VD)) {
+ if (VD && (DevPointersMap.count(VD) || HasDevAddrsMap.count(VD))) {
CombinedInfo.Exprs.push_back(VD);
- CombinedInfo.BasePointers.emplace_back(Arg, VD);
+ CombinedInfo.BasePointers.emplace_back(Arg);
+ CombinedInfo.DevicePtrDecls.emplace_back(VD);
+ CombinedInfo.DevicePointers.emplace_back(DeviceInfoTy::Pointer);
CombinedInfo.Pointers.push_back(Arg);
CombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
CGF.getTypeSize(CGF.getContext().VoidPtrTy), CGF.Int64Ty,
/*isSigned=*/true));
CombinedInfo.Types.push_back(
- (Cap->capturesVariable() ? OMP_MAP_TO : OMP_MAP_LITERAL) |
- OMP_MAP_TARGET_PARAM);
+ OpenMPOffloadMappingFlags::OMP_MAP_LITERAL |
+ OpenMPOffloadMappingFlags::OMP_MAP_TARGET_PARAM);
CombinedInfo.Mappers.push_back(nullptr);
return;
}
@@ -9089,6 +8465,21 @@ public:
OpenMPMapClauseKind, ArrayRef<OpenMPMapModifierKind>, bool,
const ValueDecl *, const Expr *>;
SmallVector<MapData, 4> DeclComponentLists;
+ // For member fields list in is_device_ptr, store it in
+ // DeclComponentLists for generating components info.
+ static const OpenMPMapModifierKind Unknown = OMPC_MAP_MODIFIER_unknown;
+ auto It = DevPointersMap.find(VD);
+ if (It != DevPointersMap.end())
+ for (const auto &MCL : It->second)
+ DeclComponentLists.emplace_back(MCL, OMPC_MAP_to, Unknown,
+ /*IsImpicit = */ true, nullptr,
+ nullptr);
+ auto I = HasDevAddrsMap.find(VD);
+ if (I != HasDevAddrsMap.end())
+ for (const auto &MCL : I->second)
+ DeclComponentLists.emplace_back(MCL, OMPC_MAP_tofrom, Unknown,
+ /*IsImpicit = */ true, nullptr,
+ nullptr);
assert(CurDir.is<const OMPExecutableDirective *>() &&
"Expect a executable directive");
const auto *CurExecDir = CurDir.get<const OMPExecutableDirective *>();
@@ -9113,18 +8504,13 @@ public:
const MapData &RHS) {
ArrayRef<OpenMPMapModifierKind> MapModifiers = std::get<2>(LHS);
OpenMPMapClauseKind MapType = std::get<1>(RHS);
- bool HasPresent = !MapModifiers.empty() &&
- llvm::any_of(MapModifiers, [](OpenMPMapModifierKind K) {
- return K == clang::OMPC_MAP_MODIFIER_present;
- });
+ bool HasPresent =
+ llvm::is_contained(MapModifiers, clang::OMPC_MAP_MODIFIER_present);
bool HasAllocs = MapType == OMPC_MAP_alloc;
MapModifiers = std::get<2>(RHS);
MapType = std::get<1>(LHS);
bool HasPresentR =
- !MapModifiers.empty() &&
- llvm::any_of(MapModifiers, [](OpenMPMapModifierKind K) {
- return K == clang::OMPC_MAP_MODIFIER_present;
- });
+ llvm::is_contained(MapModifiers, clang::OMPC_MAP_MODIFIER_present);
bool HasAllocsR = MapType == OMPC_MAP_alloc;
return (HasPresent && !HasPresentR) || (HasAllocs && !HasAllocsR);
});
@@ -9147,7 +8533,7 @@ public:
std::tie(Components, MapType, MapModifiers, IsImplicit, Mapper, VarRef) =
L;
++Count;
- for (const MapData &L1 : makeArrayRef(DeclComponentLists).slice(Count)) {
+ for (const MapData &L1 : ArrayRef(DeclComponentLists).slice(Count)) {
OMPClauseMappableExprCommon::MappableExprComponentListRef Components1;
std::tie(Components1, MapType, MapModifiers, IsImplicit, Mapper,
VarRef) = L1;
@@ -9254,6 +8640,7 @@ public:
// Associated with a capture, because the mapping flags depend on it.
// Go through all of the elements with the overlapped elements.
bool IsFirstComponentList = true;
+ MapCombinedInfoTy StructBaseCombinedInfo;
for (const auto &Pair : OverlappedData) {
const MapData &L = *Pair.getFirst();
OMPClauseMappableExprCommon::MappableExprComponentListRef Components;
@@ -9267,8 +8654,9 @@ public:
ArrayRef<OMPClauseMappableExprCommon::MappableExprComponentListRef>
OverlappedComponents = Pair.getSecond();
generateInfoForComponentList(
- MapType, MapModifiers, llvm::None, Components, CombinedInfo,
- PartialStruct, IsFirstComponentList, IsImplicit, Mapper,
+ MapType, MapModifiers, std::nullopt, Components, CombinedInfo,
+ StructBaseCombinedInfo, PartialStruct, IsFirstComponentList,
+ IsImplicit, /*GenerateAllInfoForClauses*/ false, Mapper,
/*ForDeviceAddr=*/false, VD, VarRef, OverlappedComponents);
IsFirstComponentList = false;
}
@@ -9284,10 +8672,11 @@ public:
L;
auto It = OverlappedData.find(&L);
if (It == OverlappedData.end())
- generateInfoForComponentList(MapType, MapModifiers, llvm::None,
- Components, CombinedInfo, PartialStruct,
- IsFirstComponentList, IsImplicit, Mapper,
- /*ForDeviceAddr=*/false, VD, VarRef);
+ generateInfoForComponentList(
+ MapType, MapModifiers, std::nullopt, Components, CombinedInfo,
+ StructBaseCombinedInfo, PartialStruct, IsFirstComponentList,
+ IsImplicit, /*GenerateAllInfoForClauses*/ false, Mapper,
+ /*ForDeviceAddr=*/false, VD, VarRef);
IsFirstComponentList = false;
}
}
@@ -9302,28 +8691,34 @@ public:
if (CI.capturesThis()) {
CombinedInfo.Exprs.push_back(nullptr);
CombinedInfo.BasePointers.push_back(CV);
+ CombinedInfo.DevicePtrDecls.push_back(nullptr);
+ CombinedInfo.DevicePointers.push_back(DeviceInfoTy::None);
CombinedInfo.Pointers.push_back(CV);
const auto *PtrTy = cast<PointerType>(RI.getType().getTypePtr());
CombinedInfo.Sizes.push_back(
CGF.Builder.CreateIntCast(CGF.getTypeSize(PtrTy->getPointeeType()),
CGF.Int64Ty, /*isSigned=*/true));
// Default map type.
- CombinedInfo.Types.push_back(OMP_MAP_TO | OMP_MAP_FROM);
+ CombinedInfo.Types.push_back(OpenMPOffloadMappingFlags::OMP_MAP_TO |
+ OpenMPOffloadMappingFlags::OMP_MAP_FROM);
} else if (CI.capturesVariableByCopy()) {
const VarDecl *VD = CI.getCapturedVar();
CombinedInfo.Exprs.push_back(VD->getCanonicalDecl());
CombinedInfo.BasePointers.push_back(CV);
+ CombinedInfo.DevicePtrDecls.push_back(nullptr);
+ CombinedInfo.DevicePointers.push_back(DeviceInfoTy::None);
CombinedInfo.Pointers.push_back(CV);
if (!RI.getType()->isAnyPointerType()) {
// We have to signal to the runtime captures passed by value that are
// not pointers.
- CombinedInfo.Types.push_back(OMP_MAP_LITERAL);
+ CombinedInfo.Types.push_back(
+ OpenMPOffloadMappingFlags::OMP_MAP_LITERAL);
CombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
CGF.getTypeSize(RI.getType()), CGF.Int64Ty, /*isSigned=*/true));
} else {
// Pointers are implicitly mapped with a zero size and no flags
// (other than first map that is added for all implicit maps).
- CombinedInfo.Types.push_back(OMP_MAP_NONE);
+ CombinedInfo.Types.push_back(OpenMPOffloadMappingFlags::OMP_MAP_NONE);
CombinedInfo.Sizes.push_back(llvm::Constant::getNullValue(CGF.Int64Ty));
}
auto I = FirstPrivateDecls.find(VD);
@@ -9343,6 +8738,8 @@ public:
auto I = FirstPrivateDecls.find(VD);
CombinedInfo.Exprs.push_back(VD->getCanonicalDecl());
CombinedInfo.BasePointers.push_back(CV);
+ CombinedInfo.DevicePtrDecls.push_back(nullptr);
+ CombinedInfo.DevicePointers.push_back(DeviceInfoTy::None);
if (I != FirstPrivateDecls.end() && ElementType->isAnyPointerType()) {
Address PtrAddr = CGF.EmitLoadOfReference(CGF.MakeAddrLValue(
CV, ElementType, CGF.getContext().getDeclAlign(VD),
@@ -9355,11 +8752,12 @@ public:
IsImplicit = I->getSecond();
}
// Every default map produces a single argument which is a target parameter.
- CombinedInfo.Types.back() |= OMP_MAP_TARGET_PARAM;
+ CombinedInfo.Types.back() |=
+ OpenMPOffloadMappingFlags::OMP_MAP_TARGET_PARAM;
// Add flag stating this is an implicit map.
if (IsImplicit)
- CombinedInfo.Types.back() |= OMP_MAP_IMPLICIT;
+ CombinedInfo.Types.back() |= OpenMPOffloadMappingFlags::OMP_MAP_IMPLICIT;
// No user-defined mapper for default mapping.
CombinedInfo.Mappers.push_back(nullptr);
@@ -9367,72 +8765,16 @@ public:
};
} // anonymous namespace
-static void emitNonContiguousDescriptor(
- CodeGenFunction &CGF, MappableExprsHandler::MapCombinedInfoTy &CombinedInfo,
- CGOpenMPRuntime::TargetDataInfo &Info) {
- CodeGenModule &CGM = CGF.CGM;
- MappableExprsHandler::MapCombinedInfoTy::StructNonContiguousInfo
- &NonContigInfo = CombinedInfo.NonContigInfo;
+// Try to extract the base declaration from a `this->x` expression if possible.
+static ValueDecl *getDeclFromThisExpr(const Expr *E) {
+ if (!E)
+ return nullptr;
- // Build an array of struct descriptor_dim and then assign it to
- // offload_args.
- //
- // struct descriptor_dim {
- // uint64_t offset;
- // uint64_t count;
- // uint64_t stride
- // };
- ASTContext &C = CGF.getContext();
- QualType Int64Ty = C.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0);
- RecordDecl *RD;
- RD = C.buildImplicitRecord("descriptor_dim");
- RD->startDefinition();
- addFieldToRecordDecl(C, RD, Int64Ty);
- addFieldToRecordDecl(C, RD, Int64Ty);
- addFieldToRecordDecl(C, RD, Int64Ty);
- RD->completeDefinition();
- QualType DimTy = C.getRecordType(RD);
-
- enum { OffsetFD = 0, CountFD, StrideFD };
- // We need two index variable here since the size of "Dims" is the same as the
- // size of Components, however, the size of offset, count, and stride is equal
- // to the size of base declaration that is non-contiguous.
- for (unsigned I = 0, L = 0, E = NonContigInfo.Dims.size(); I < E; ++I) {
- // Skip emitting ir if dimension size is 1 since it cannot be
- // non-contiguous.
- if (NonContigInfo.Dims[I] == 1)
- continue;
- llvm::APInt Size(/*numBits=*/32, NonContigInfo.Dims[I]);
- QualType ArrayTy =
- C.getConstantArrayType(DimTy, Size, nullptr, ArrayType::Normal, 0);
- Address DimsAddr = CGF.CreateMemTemp(ArrayTy, "dims");
- for (unsigned II = 0, EE = NonContigInfo.Dims[I]; II < EE; ++II) {
- unsigned RevIdx = EE - II - 1;
- LValue DimsLVal = CGF.MakeAddrLValue(
- CGF.Builder.CreateConstArrayGEP(DimsAddr, II), DimTy);
- // Offset
- LValue OffsetLVal = CGF.EmitLValueForField(
- DimsLVal, *std::next(RD->field_begin(), OffsetFD));
- CGF.EmitStoreOfScalar(NonContigInfo.Offsets[L][RevIdx], OffsetLVal);
- // Count
- LValue CountLVal = CGF.EmitLValueForField(
- DimsLVal, *std::next(RD->field_begin(), CountFD));
- CGF.EmitStoreOfScalar(NonContigInfo.Counts[L][RevIdx], CountLVal);
- // Stride
- LValue StrideLVal = CGF.EmitLValueForField(
- DimsLVal, *std::next(RD->field_begin(), StrideFD));
- CGF.EmitStoreOfScalar(NonContigInfo.Strides[L][RevIdx], StrideLVal);
- }
- // args[I] = &dims
- Address DAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- DimsAddr, CGM.Int8PtrTy);
- llvm::Value *P = CGF.Builder.CreateConstInBoundsGEP2_32(
- llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
- Info.PointersArray, 0, I);
- Address PAddr(P, CGF.getPointerAlign());
- CGF.Builder.CreateStore(DAddr.getPointer(), PAddr);
- ++L;
- }
+ if (const auto *OASE = dyn_cast<OMPArraySectionExpr>(E->IgnoreParenCasts()))
+ if (const MemberExpr *ME =
+ dyn_cast<MemberExpr>(OASE->getBase()->IgnoreParenImpCasts()))
+ return ME->getMemberDecl();
+ return nullptr;
}
/// Emit a string constant containing the names of the values mapped to the
@@ -9440,29 +8782,35 @@ static void emitNonContiguousDescriptor(
llvm::Constant *
emitMappingInformation(CodeGenFunction &CGF, llvm::OpenMPIRBuilder &OMPBuilder,
MappableExprsHandler::MappingExprInfo &MapExprs) {
- llvm::Constant *SrcLocStr;
- if (!MapExprs.getMapDecl()) {
- SrcLocStr = OMPBuilder.getOrCreateDefaultSrcLocStr();
+
+ uint32_t SrcLocStrSize;
+ if (!MapExprs.getMapDecl() && !MapExprs.getMapExpr())
+ return OMPBuilder.getOrCreateDefaultSrcLocStr(SrcLocStrSize);
+
+ SourceLocation Loc;
+ if (!MapExprs.getMapDecl() && MapExprs.getMapExpr()) {
+ if (const ValueDecl *VD = getDeclFromThisExpr(MapExprs.getMapExpr()))
+ Loc = VD->getLocation();
+ else
+ Loc = MapExprs.getMapExpr()->getExprLoc();
} else {
- std::string ExprName = "";
- if (MapExprs.getMapExpr()) {
- PrintingPolicy P(CGF.getContext().getLangOpts());
- llvm::raw_string_ostream OS(ExprName);
- MapExprs.getMapExpr()->printPretty(OS, nullptr, P);
- OS.flush();
- } else {
- ExprName = MapExprs.getMapDecl()->getNameAsString();
- }
+ Loc = MapExprs.getMapDecl()->getLocation();
+ }
- SourceLocation Loc = MapExprs.getMapDecl()->getLocation();
- PresumedLoc PLoc = CGF.getContext().getSourceManager().getPresumedLoc(Loc);
- const char *FileName = PLoc.getFilename();
- unsigned Line = PLoc.getLine();
- unsigned Column = PLoc.getColumn();
- SrcLocStr = OMPBuilder.getOrCreateSrcLocStr(FileName, ExprName.c_str(),
- Line, Column);
+ std::string ExprName;
+ if (MapExprs.getMapExpr()) {
+ PrintingPolicy P(CGF.getContext().getLangOpts());
+ llvm::raw_string_ostream OS(ExprName);
+ MapExprs.getMapExpr()->printPretty(OS, nullptr, P);
+ OS.flush();
+ } else {
+ ExprName = MapExprs.getMapDecl()->getNameAsString();
}
- return SrcLocStr;
+
+ PresumedLoc PLoc = CGF.getContext().getSourceManager().getPresumedLoc(Loc);
+ return OMPBuilder.getOrCreateSrcLocStr(PLoc.getFilename(), ExprName,
+ PLoc.getLine(), PLoc.getColumn(),
+ SrcLocStrSize);
}
/// Emit the arrays used to pass the captures and map information to the
@@ -9473,241 +8821,45 @@ static void emitOffloadingArrays(
CGOpenMPRuntime::TargetDataInfo &Info, llvm::OpenMPIRBuilder &OMPBuilder,
bool IsNonContiguous = false) {
CodeGenModule &CGM = CGF.CGM;
- ASTContext &Ctx = CGF.getContext();
// Reset the array information.
Info.clearArrayInfo();
Info.NumberOfPtrs = CombinedInfo.BasePointers.size();
- if (Info.NumberOfPtrs) {
- // Detect if we have any capture size requiring runtime evaluation of the
- // size so that a constant array could be eventually used.
- bool hasRuntimeEvaluationCaptureSize = false;
- for (llvm::Value *S : CombinedInfo.Sizes)
- if (!isa<llvm::Constant>(S)) {
- hasRuntimeEvaluationCaptureSize = true;
- break;
- }
+ using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
+ InsertPointTy AllocaIP(CGF.AllocaInsertPt->getParent(),
+ CGF.AllocaInsertPt->getIterator());
+ InsertPointTy CodeGenIP(CGF.Builder.GetInsertBlock(),
+ CGF.Builder.GetInsertPoint());
- llvm::APInt PointerNumAP(32, Info.NumberOfPtrs, /*isSigned=*/true);
- QualType PointerArrayType = Ctx.getConstantArrayType(
- Ctx.VoidPtrTy, PointerNumAP, nullptr, ArrayType::Normal,
- /*IndexTypeQuals=*/0);
-
- Info.BasePointersArray =
- CGF.CreateMemTemp(PointerArrayType, ".offload_baseptrs").getPointer();
- Info.PointersArray =
- CGF.CreateMemTemp(PointerArrayType, ".offload_ptrs").getPointer();
- Address MappersArray =
- CGF.CreateMemTemp(PointerArrayType, ".offload_mappers");
- Info.MappersArray = MappersArray.getPointer();
-
- // If we don't have any VLA types or other types that require runtime
- // evaluation, we can use a constant array for the map sizes, otherwise we
- // need to fill up the arrays as we do for the pointers.
- QualType Int64Ty =
- Ctx.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1);
- if (hasRuntimeEvaluationCaptureSize) {
- QualType SizeArrayType = Ctx.getConstantArrayType(
- Int64Ty, PointerNumAP, nullptr, ArrayType::Normal,
- /*IndexTypeQuals=*/0);
- Info.SizesArray =
- CGF.CreateMemTemp(SizeArrayType, ".offload_sizes").getPointer();
- } else {
- // We expect all the sizes to be constant, so we collect them to create
- // a constant array.
- SmallVector<llvm::Constant *, 16> ConstSizes;
- for (unsigned I = 0, E = CombinedInfo.Sizes.size(); I < E; ++I) {
- if (IsNonContiguous &&
- (CombinedInfo.Types[I] & MappableExprsHandler::OMP_MAP_NON_CONTIG)) {
- ConstSizes.push_back(llvm::ConstantInt::get(
- CGF.Int64Ty, CombinedInfo.NonContigInfo.Dims[I]));
- } else {
- ConstSizes.push_back(cast<llvm::Constant>(CombinedInfo.Sizes[I]));
- }
- }
+ auto FillInfoMap = [&](MappableExprsHandler::MappingExprInfo &MapExpr) {
+ return emitMappingInformation(CGF, OMPBuilder, MapExpr);
+ };
+ if (CGM.getCodeGenOpts().getDebugInfo() !=
+ llvm::codegenoptions::NoDebugInfo) {
+ CombinedInfo.Names.resize(CombinedInfo.Exprs.size());
+ llvm::transform(CombinedInfo.Exprs, CombinedInfo.Names.begin(),
+ FillInfoMap);
+ }
- auto *SizesArrayInit = llvm::ConstantArray::get(
- llvm::ArrayType::get(CGM.Int64Ty, ConstSizes.size()), ConstSizes);
- std::string Name = CGM.getOpenMPRuntime().getName({"offload_sizes"});
- auto *SizesArrayGbl = new llvm::GlobalVariable(
- CGM.getModule(), SizesArrayInit->getType(),
- /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage,
- SizesArrayInit, Name);
- SizesArrayGbl->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
- Info.SizesArray = SizesArrayGbl;
- }
-
- // The map types are always constant so we don't need to generate code to
- // fill arrays. Instead, we create an array constant.
- SmallVector<uint64_t, 4> Mapping(CombinedInfo.Types.size(), 0);
- llvm::copy(CombinedInfo.Types, Mapping.begin());
- std::string MaptypesName =
- CGM.getOpenMPRuntime().getName({"offload_maptypes"});
- auto *MapTypesArrayGbl =
- OMPBuilder.createOffloadMaptypes(Mapping, MaptypesName);
- Info.MapTypesArray = MapTypesArrayGbl;
-
- // The information types are only built if there is debug information
- // requested.
- if (CGM.getCodeGenOpts().getDebugInfo() == codegenoptions::NoDebugInfo) {
- Info.MapNamesArray = llvm::Constant::getNullValue(
- llvm::Type::getInt8Ty(CGF.Builder.getContext())->getPointerTo());
- } else {
- auto fillInfoMap = [&](MappableExprsHandler::MappingExprInfo &MapExpr) {
- return emitMappingInformation(CGF, OMPBuilder, MapExpr);
- };
- SmallVector<llvm::Constant *, 4> InfoMap(CombinedInfo.Exprs.size());
- llvm::transform(CombinedInfo.Exprs, InfoMap.begin(), fillInfoMap);
- std::string MapnamesName =
- CGM.getOpenMPRuntime().getName({"offload_mapnames"});
- auto *MapNamesArrayGbl =
- OMPBuilder.createOffloadMapnames(InfoMap, MapnamesName);
- Info.MapNamesArray = MapNamesArrayGbl;
- }
-
- // If there's a present map type modifier, it must not be applied to the end
- // of a region, so generate a separate map type array in that case.
- if (Info.separateBeginEndCalls()) {
- bool EndMapTypesDiffer = false;
- for (uint64_t &Type : Mapping) {
- if (Type & MappableExprsHandler::OMP_MAP_PRESENT) {
- Type &= ~MappableExprsHandler::OMP_MAP_PRESENT;
- EndMapTypesDiffer = true;
- }
- }
- if (EndMapTypesDiffer) {
- MapTypesArrayGbl =
- OMPBuilder.createOffloadMaptypes(Mapping, MaptypesName);
- Info.MapTypesArrayEnd = MapTypesArrayGbl;
- }
+ auto DeviceAddrCB = [&](unsigned int I, llvm::Value *NewDecl) {
+ if (const ValueDecl *DevVD = CombinedInfo.DevicePtrDecls[I]) {
+ Info.CaptureDeviceAddrMap.try_emplace(DevVD, NewDecl);
}
+ };
- for (unsigned I = 0; I < Info.NumberOfPtrs; ++I) {
- llvm::Value *BPVal = *CombinedInfo.BasePointers[I];
- llvm::Value *BP = CGF.Builder.CreateConstInBoundsGEP2_32(
- llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
- Info.BasePointersArray, 0, I);
- BP = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- BP, BPVal->getType()->getPointerTo(/*AddrSpace=*/0));
- Address BPAddr(BP, Ctx.getTypeAlignInChars(Ctx.VoidPtrTy));
- CGF.Builder.CreateStore(BPVal, BPAddr);
-
- if (Info.requiresDevicePointerInfo())
- if (const ValueDecl *DevVD =
- CombinedInfo.BasePointers[I].getDevicePtrDecl())
- Info.CaptureDeviceAddrMap.try_emplace(DevVD, BPAddr);
-
- llvm::Value *PVal = CombinedInfo.Pointers[I];
- llvm::Value *P = CGF.Builder.CreateConstInBoundsGEP2_32(
- llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
- Info.PointersArray, 0, I);
- P = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- P, PVal->getType()->getPointerTo(/*AddrSpace=*/0));
- Address PAddr(P, Ctx.getTypeAlignInChars(Ctx.VoidPtrTy));
- CGF.Builder.CreateStore(PVal, PAddr);
-
- if (hasRuntimeEvaluationCaptureSize) {
- llvm::Value *S = CGF.Builder.CreateConstInBoundsGEP2_32(
- llvm::ArrayType::get(CGM.Int64Ty, Info.NumberOfPtrs),
- Info.SizesArray,
- /*Idx0=*/0,
- /*Idx1=*/I);
- Address SAddr(S, Ctx.getTypeAlignInChars(Int64Ty));
- CGF.Builder.CreateStore(CGF.Builder.CreateIntCast(CombinedInfo.Sizes[I],
- CGM.Int64Ty,
- /*isSigned=*/true),
- SAddr);
- }
-
- // Fill up the mapper array.
- llvm::Value *MFunc = llvm::ConstantPointerNull::get(CGM.VoidPtrTy);
- if (CombinedInfo.Mappers[I]) {
- MFunc = CGM.getOpenMPRuntime().getOrCreateUserDefinedMapperFunc(
- cast<OMPDeclareMapperDecl>(CombinedInfo.Mappers[I]));
- MFunc = CGF.Builder.CreatePointerCast(MFunc, CGM.VoidPtrTy);
- Info.HasMapper = true;
- }
- Address MAddr = CGF.Builder.CreateConstArrayGEP(MappersArray, I);
- CGF.Builder.CreateStore(MFunc, MAddr);
+ auto CustomMapperCB = [&](unsigned int I) {
+ llvm::Value *MFunc = nullptr;
+ if (CombinedInfo.Mappers[I]) {
+ Info.HasMapper = true;
+ MFunc = CGF.CGM.getOpenMPRuntime().getOrCreateUserDefinedMapperFunc(
+ cast<OMPDeclareMapperDecl>(CombinedInfo.Mappers[I]));
}
- }
-
- if (!IsNonContiguous || CombinedInfo.NonContigInfo.Offsets.empty() ||
- Info.NumberOfPtrs == 0)
- return;
-
- emitNonContiguousDescriptor(CGF, CombinedInfo, Info);
-}
-
-namespace {
-/// Additional arguments for emitOffloadingArraysArgument function.
-struct ArgumentsOptions {
- bool ForEndCall = false;
- ArgumentsOptions() = default;
- ArgumentsOptions(bool ForEndCall) : ForEndCall(ForEndCall) {}
-};
-} // namespace
-
-/// Emit the arguments to be passed to the runtime library based on the
-/// arrays of base pointers, pointers, sizes, map types, and mappers. If
-/// ForEndCall, emit map types to be passed for the end of the region instead of
-/// the beginning.
-static void emitOffloadingArraysArgument(
- CodeGenFunction &CGF, llvm::Value *&BasePointersArrayArg,
- llvm::Value *&PointersArrayArg, llvm::Value *&SizesArrayArg,
- llvm::Value *&MapTypesArrayArg, llvm::Value *&MapNamesArrayArg,
- llvm::Value *&MappersArrayArg, CGOpenMPRuntime::TargetDataInfo &Info,
- const ArgumentsOptions &Options = ArgumentsOptions()) {
- assert((!Options.ForEndCall || Info.separateBeginEndCalls()) &&
- "expected region end call to runtime only when end call is separate");
- CodeGenModule &CGM = CGF.CGM;
- if (Info.NumberOfPtrs) {
- BasePointersArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
- llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
- Info.BasePointersArray,
- /*Idx0=*/0, /*Idx1=*/0);
- PointersArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
- llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
- Info.PointersArray,
- /*Idx0=*/0,
- /*Idx1=*/0);
- SizesArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
- llvm::ArrayType::get(CGM.Int64Ty, Info.NumberOfPtrs), Info.SizesArray,
- /*Idx0=*/0, /*Idx1=*/0);
- MapTypesArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
- llvm::ArrayType::get(CGM.Int64Ty, Info.NumberOfPtrs),
- Options.ForEndCall && Info.MapTypesArrayEnd ? Info.MapTypesArrayEnd
- : Info.MapTypesArray,
- /*Idx0=*/0,
- /*Idx1=*/0);
-
- // Only emit the mapper information arrays if debug information is
- // requested.
- if (CGF.CGM.getCodeGenOpts().getDebugInfo() == codegenoptions::NoDebugInfo)
- MapNamesArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy);
- else
- MapNamesArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
- llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
- Info.MapNamesArray,
- /*Idx0=*/0,
- /*Idx1=*/0);
- // If there is no user-defined mapper, set the mapper array to nullptr to
- // avoid an unnecessary data privatization
- if (!Info.HasMapper)
- MappersArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy);
- else
- MappersArrayArg =
- CGF.Builder.CreatePointerCast(Info.MappersArray, CGM.VoidPtrPtrTy);
- } else {
- BasePointersArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy);
- PointersArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy);
- SizesArrayArg = llvm::ConstantPointerNull::get(CGM.Int64Ty->getPointerTo());
- MapTypesArrayArg =
- llvm::ConstantPointerNull::get(CGM.Int64Ty->getPointerTo());
- MapNamesArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy);
- MappersArrayArg = llvm::ConstantPointerNull::get(CGM.VoidPtrPtrTy);
- }
+ return MFunc;
+ };
+ OMPBuilder.emitOffloadingArrays(AllocaIP, CodeGenIP, CombinedInfo, Info,
+ /*IsNonContiguous=*/true, DeviceAddrCB,
+ CustomMapperCB);
}
/// Check for inner distribute directive.
@@ -9724,7 +8876,8 @@ getNestedDistributeDirective(ASTContext &Ctx, const OMPExecutableDirective &D) {
OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind();
switch (D.getDirectiveKind()) {
case OMPD_target:
- if (isOpenMPDistributeDirective(DKind))
+ // For now, just treat 'target teams loop' as if it's distributed.
+ if (isOpenMPDistributeDirective(DKind) || DKind == OMPD_teams_loop)
return NestedDir;
if (DKind == OMPD_teams) {
Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
@@ -9810,6 +8963,7 @@ getNestedDistributeDirective(ASTContext &Ctx, const OMPExecutableDirective &D) {
case OMPD_parallel_master_taskloop:
case OMPD_parallel_master_taskloop_simd:
case OMPD_requires:
+ case OMPD_metadirective:
case OMPD_unknown:
default:
llvm_unreachable("Unexpected directive.");
@@ -9863,20 +9017,21 @@ void CGOpenMPRuntime::emitUserDefinedMapper(const OMPDeclareMapperDecl *D,
cast<VarDecl>(cast<DeclRefExpr>(D->getMapperVarRef())->getDecl());
SourceLocation Loc = D->getLocation();
CharUnits ElementSize = C.getTypeSizeInChars(Ty);
+ llvm::Type *ElemTy = CGM.getTypes().ConvertTypeForMem(Ty);
// Prepare mapper function arguments and attributes.
ImplicitParamDecl HandleArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.VoidPtrTy, ImplicitParamDecl::Other);
+ C.VoidPtrTy, ImplicitParamKind::Other);
ImplicitParamDecl BaseArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
- ImplicitParamDecl::Other);
+ ImplicitParamKind::Other);
ImplicitParamDecl BeginArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.VoidPtrTy, ImplicitParamDecl::Other);
+ C.VoidPtrTy, ImplicitParamKind::Other);
ImplicitParamDecl SizeArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, Int64Ty,
- ImplicitParamDecl::Other);
+ ImplicitParamKind::Other);
ImplicitParamDecl TypeArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, Int64Ty,
- ImplicitParamDecl::Other);
+ ImplicitParamKind::Other);
ImplicitParamDecl NameArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.VoidPtrTy,
- ImplicitParamDecl::Other);
+ ImplicitParamKind::Other);
FunctionArgList Args;
Args.push_back(&HandleArg);
Args.push_back(&BaseArg);
@@ -9889,7 +9044,7 @@ void CGOpenMPRuntime::emitUserDefinedMapper(const OMPDeclareMapperDecl *D,
llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
SmallString<64> TyStr;
llvm::raw_svector_ostream Out(TyStr);
- CGM.getCXXABI().getMangleContext().mangleTypeName(Ty, Out);
+ CGM.getCXXABI().getMangleContext().mangleCanonicalTypeName(Ty, Out);
std::string Name = getName({"omp_mapper", TyStr, D->getName()});
auto *Fn = llvm::Function::Create(FnTy, llvm::GlobalValue::InternalLinkage,
Name, &CGM.getModule());
@@ -9917,8 +9072,7 @@ void CGOpenMPRuntime::emitUserDefinedMapper(const OMPDeclareMapperDecl *D,
Size, MapperCGF.Builder.getInt64(ElementSize.getQuantity()));
llvm::Value *PtrBegin = MapperCGF.Builder.CreateBitCast(
BeginIn, CGM.getTypes().ConvertTypeForMem(PtrTy));
- llvm::Value *PtrEnd = MapperCGF.Builder.CreateGEP(
- PtrBegin->getType()->getPointerElementType(), PtrBegin, Size);
+ llvm::Value *PtrEnd = MapperCGF.Builder.CreateGEP(ElemTy, PtrBegin, Size);
llvm::Value *MapType = MapperCGF.EmitLoadOfScalar(
MapperCGF.GetAddrOfLocalVar(&TypeArg), /*Volatile=*/false,
C.getPointerType(Int64Ty), Loc);
@@ -9950,19 +9104,19 @@ void CGOpenMPRuntime::emitUserDefinedMapper(const OMPDeclareMapperDecl *D,
llvm::PHINode *PtrPHI = MapperCGF.Builder.CreatePHI(
PtrBegin->getType(), 2, "omp.arraymap.ptrcurrent");
PtrPHI->addIncoming(PtrBegin, EntryBB);
- Address PtrCurrent =
- Address(PtrPHI, MapperCGF.GetAddrOfLocalVar(&BeginArg)
- .getAlignment()
- .alignmentOfArrayElement(ElementSize));
+ Address PtrCurrent(PtrPHI, ElemTy,
+ MapperCGF.GetAddrOfLocalVar(&BeginArg)
+ .getAlignment()
+ .alignmentOfArrayElement(ElementSize));
// Privatize the declared variable of mapper to be the current array element.
CodeGenFunction::OMPPrivateScope Scope(MapperCGF);
- Scope.addPrivate(MapperVarDecl, [PtrCurrent]() { return PtrCurrent; });
+ Scope.addPrivate(MapperVarDecl, PtrCurrent);
(void)Scope.Privatize();
// Get map clause information. Fill up the arrays with all mapped variables.
MappableExprsHandler::MapCombinedInfoTy Info;
MappableExprsHandler MEHandler(*D, MapperCGF);
- MEHandler.generateAllInfoForMapper(Info);
+ MEHandler.generateAllInfoForMapper(Info, OMPBuilder);
// Call the runtime API __tgt_mapper_num_components to get the number of
// pre-existing components.
@@ -9978,17 +9132,20 @@ void CGOpenMPRuntime::emitUserDefinedMapper(const OMPDeclareMapperDecl *D,
// Fill up the runtime mapper handle for all components.
for (unsigned I = 0; I < Info.BasePointers.size(); ++I) {
llvm::Value *CurBaseArg = MapperCGF.Builder.CreateBitCast(
- *Info.BasePointers[I], CGM.getTypes().ConvertTypeForMem(C.VoidPtrTy));
+ Info.BasePointers[I], CGM.getTypes().ConvertTypeForMem(C.VoidPtrTy));
llvm::Value *CurBeginArg = MapperCGF.Builder.CreateBitCast(
Info.Pointers[I], CGM.getTypes().ConvertTypeForMem(C.VoidPtrTy));
llvm::Value *CurSizeArg = Info.Sizes[I];
llvm::Value *CurNameArg =
- (CGM.getCodeGenOpts().getDebugInfo() == codegenoptions::NoDebugInfo)
+ (CGM.getCodeGenOpts().getDebugInfo() ==
+ llvm::codegenoptions::NoDebugInfo)
? llvm::ConstantPointerNull::get(CGM.VoidPtrTy)
: emitMappingInformation(MapperCGF, OMPBuilder, Info.Exprs[I]);
// Extract the MEMBER_OF field from the map type.
- llvm::Value *OriMapType = MapperCGF.Builder.getInt64(Info.Types[I]);
+ llvm::Value *OriMapType = MapperCGF.Builder.getInt64(
+ static_cast<std::underlying_type_t<OpenMPOffloadMappingFlags>>(
+ Info.Types[I]));
llvm::Value *MemberMapType =
MapperCGF.Builder.CreateNUWAdd(OriMapType, ShiftedPreviousSize);
@@ -10006,8 +9163,10 @@ void CGOpenMPRuntime::emitUserDefinedMapper(const OMPDeclareMapperDecl *D,
// tofrom | alloc | to | from | tofrom | release | delete
llvm::Value *LeftToFrom = MapperCGF.Builder.CreateAnd(
MapType,
- MapperCGF.Builder.getInt64(MappableExprsHandler::OMP_MAP_TO |
- MappableExprsHandler::OMP_MAP_FROM));
+ MapperCGF.Builder.getInt64(
+ static_cast<std::underlying_type_t<OpenMPOffloadMappingFlags>>(
+ OpenMPOffloadMappingFlags::OMP_MAP_TO |
+ OpenMPOffloadMappingFlags::OMP_MAP_FROM)));
llvm::BasicBlock *AllocBB = MapperCGF.createBasicBlock("omp.type.alloc");
llvm::BasicBlock *AllocElseBB =
MapperCGF.createBasicBlock("omp.type.alloc.else");
@@ -10021,30 +9180,40 @@ void CGOpenMPRuntime::emitUserDefinedMapper(const OMPDeclareMapperDecl *D,
MapperCGF.EmitBlock(AllocBB);
llvm::Value *AllocMapType = MapperCGF.Builder.CreateAnd(
MemberMapType,
- MapperCGF.Builder.getInt64(~(MappableExprsHandler::OMP_MAP_TO |
- MappableExprsHandler::OMP_MAP_FROM)));
+ MapperCGF.Builder.getInt64(
+ ~static_cast<std::underlying_type_t<OpenMPOffloadMappingFlags>>(
+ OpenMPOffloadMappingFlags::OMP_MAP_TO |
+ OpenMPOffloadMappingFlags::OMP_MAP_FROM)));
MapperCGF.Builder.CreateBr(EndBB);
MapperCGF.EmitBlock(AllocElseBB);
llvm::Value *IsTo = MapperCGF.Builder.CreateICmpEQ(
LeftToFrom,
- MapperCGF.Builder.getInt64(MappableExprsHandler::OMP_MAP_TO));
+ MapperCGF.Builder.getInt64(
+ static_cast<std::underlying_type_t<OpenMPOffloadMappingFlags>>(
+ OpenMPOffloadMappingFlags::OMP_MAP_TO)));
MapperCGF.Builder.CreateCondBr(IsTo, ToBB, ToElseBB);
// In case of to, clear OMP_MAP_FROM.
MapperCGF.EmitBlock(ToBB);
llvm::Value *ToMapType = MapperCGF.Builder.CreateAnd(
MemberMapType,
- MapperCGF.Builder.getInt64(~MappableExprsHandler::OMP_MAP_FROM));
+ MapperCGF.Builder.getInt64(
+ ~static_cast<std::underlying_type_t<OpenMPOffloadMappingFlags>>(
+ OpenMPOffloadMappingFlags::OMP_MAP_FROM)));
MapperCGF.Builder.CreateBr(EndBB);
MapperCGF.EmitBlock(ToElseBB);
llvm::Value *IsFrom = MapperCGF.Builder.CreateICmpEQ(
LeftToFrom,
- MapperCGF.Builder.getInt64(MappableExprsHandler::OMP_MAP_FROM));
+ MapperCGF.Builder.getInt64(
+ static_cast<std::underlying_type_t<OpenMPOffloadMappingFlags>>(
+ OpenMPOffloadMappingFlags::OMP_MAP_FROM)));
MapperCGF.Builder.CreateCondBr(IsFrom, FromBB, EndBB);
// In case of from, clear OMP_MAP_TO.
MapperCGF.EmitBlock(FromBB);
llvm::Value *FromMapType = MapperCGF.Builder.CreateAnd(
MemberMapType,
- MapperCGF.Builder.getInt64(~MappableExprsHandler::OMP_MAP_TO));
+ MapperCGF.Builder.getInt64(
+ ~static_cast<std::underlying_type_t<OpenMPOffloadMappingFlags>>(
+ OpenMPOffloadMappingFlags::OMP_MAP_TO)));
// In case of tofrom, do nothing.
MapperCGF.EmitBlock(EndBB);
LastBB = EndBB;
@@ -10075,7 +9244,6 @@ void CGOpenMPRuntime::emitUserDefinedMapper(const OMPDeclareMapperDecl *D,
// Update the pointer to point to the next element that needs to be mapped,
// and check whether we have mapped all elements.
- llvm::Type *ElemTy = PtrPHI->getType()->getPointerElementType();
llvm::Value *PtrNext = MapperCGF.Builder.CreateConstGEP1_32(
ElemTy, PtrPHI, /*Idx0=*/1, "omp.arraymap.next");
PtrPHI->addIncoming(PtrNext, LastBB);
@@ -10120,17 +9288,20 @@ void CGOpenMPRuntime::emitUDMapperArrayInitOrDel(
Size, MapperCGF.Builder.getInt64(1), "omp.arrayinit.isarray");
llvm::Value *DeleteBit = MapperCGF.Builder.CreateAnd(
MapType,
- MapperCGF.Builder.getInt64(MappableExprsHandler::OMP_MAP_DELETE));
+ MapperCGF.Builder.getInt64(
+ static_cast<std::underlying_type_t<OpenMPOffloadMappingFlags>>(
+ OpenMPOffloadMappingFlags::OMP_MAP_DELETE)));
llvm::Value *DeleteCond;
llvm::Value *Cond;
if (IsInit) {
// base != begin?
- llvm::Value *BaseIsBegin = MapperCGF.Builder.CreateIsNotNull(
- MapperCGF.Builder.CreatePtrDiff(Base, Begin));
+ llvm::Value *BaseIsBegin = MapperCGF.Builder.CreateICmpNE(Base, Begin);
// IsPtrAndObj?
llvm::Value *PtrAndObjBit = MapperCGF.Builder.CreateAnd(
MapType,
- MapperCGF.Builder.getInt64(MappableExprsHandler::OMP_MAP_PTR_AND_OBJ));
+ MapperCGF.Builder.getInt64(
+ static_cast<std::underlying_type_t<OpenMPOffloadMappingFlags>>(
+ OpenMPOffloadMappingFlags::OMP_MAP_PTR_AND_OBJ)));
PtrAndObjBit = MapperCGF.Builder.CreateIsNotNull(PtrAndObjBit);
BaseIsBegin = MapperCGF.Builder.CreateAnd(BaseIsBegin, PtrAndObjBit);
Cond = MapperCGF.Builder.CreateOr(IsArray, BaseIsBegin);
@@ -10153,11 +9324,15 @@ void CGOpenMPRuntime::emitUDMapperArrayInitOrDel(
// memory allocation/deletion purpose only.
llvm::Value *MapTypeArg = MapperCGF.Builder.CreateAnd(
MapType,
- MapperCGF.Builder.getInt64(~(MappableExprsHandler::OMP_MAP_TO |
- MappableExprsHandler::OMP_MAP_FROM)));
+ MapperCGF.Builder.getInt64(
+ ~static_cast<std::underlying_type_t<OpenMPOffloadMappingFlags>>(
+ OpenMPOffloadMappingFlags::OMP_MAP_TO |
+ OpenMPOffloadMappingFlags::OMP_MAP_FROM)));
MapTypeArg = MapperCGF.Builder.CreateOr(
MapTypeArg,
- MapperCGF.Builder.getInt64(MappableExprsHandler::OMP_MAP_IMPLICIT));
+ MapperCGF.Builder.getInt64(
+ static_cast<std::underlying_type_t<OpenMPOffloadMappingFlags>>(
+ OpenMPOffloadMappingFlags::OMP_MAP_IMPLICIT)));
// Call the runtime API __tgt_push_mapper_component to fill up the runtime
// data structure.
@@ -10178,32 +9353,274 @@ llvm::Function *CGOpenMPRuntime::getOrCreateUserDefinedMapperFunc(
return UDMMap.lookup(D);
}
-void CGOpenMPRuntime::emitTargetNumIterationsCall(
+llvm::Value *CGOpenMPRuntime::emitTargetNumIterationsCall(
CodeGenFunction &CGF, const OMPExecutableDirective &D,
- llvm::Value *DeviceID,
llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
const OMPLoopDirective &D)>
SizeEmitter) {
OpenMPDirectiveKind Kind = D.getDirectiveKind();
const OMPExecutableDirective *TD = &D;
// Get nested teams distribute kind directive, if any.
- if (!isOpenMPDistributeDirective(Kind) || !isOpenMPTeamsDirective(Kind))
+ if ((!isOpenMPDistributeDirective(Kind) || !isOpenMPTeamsDirective(Kind)) &&
+ Kind != OMPD_target_teams_loop)
TD = getNestedDistributeDirective(CGM.getContext(), D);
if (!TD)
- return;
+ return llvm::ConstantInt::get(CGF.Int64Ty, 0);
+
const auto *LD = cast<OMPLoopDirective>(TD);
- auto &&CodeGen = [LD, DeviceID, SizeEmitter, &D, this](CodeGenFunction &CGF,
- PrePostActionTy &) {
- if (llvm::Value *NumIterations = SizeEmitter(CGF, *LD)) {
- llvm::Value *RTLoc = emitUpdateLocation(CGF, D.getBeginLoc());
- llvm::Value *Args[] = {RTLoc, DeviceID, NumIterations};
- CGF.EmitRuntimeCall(
- OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_push_target_tripcount_mapper),
- Args);
+ if (llvm::Value *NumIterations = SizeEmitter(CGF, *LD))
+ return NumIterations;
+ return llvm::ConstantInt::get(CGF.Int64Ty, 0);
+}
+
+static void
+emitTargetCallFallback(CGOpenMPRuntime *OMPRuntime, llvm::Function *OutlinedFn,
+ const OMPExecutableDirective &D,
+ llvm::SmallVectorImpl<llvm::Value *> &CapturedVars,
+ bool RequiresOuterTask, const CapturedStmt &CS,
+ bool OffloadingMandatory, CodeGenFunction &CGF) {
+ if (OffloadingMandatory) {
+ CGF.Builder.CreateUnreachable();
+ } else {
+ if (RequiresOuterTask) {
+ CapturedVars.clear();
+ CGF.GenerateOpenMPCapturedVars(CS, CapturedVars);
+ }
+ OMPRuntime->emitOutlinedFunctionCall(CGF, D.getBeginLoc(), OutlinedFn,
+ CapturedVars);
+ }
+}
+
+static llvm::Value *emitDeviceID(
+ llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device,
+ CodeGenFunction &CGF) {
+ // Emit device ID if any.
+ llvm::Value *DeviceID;
+ if (Device.getPointer()) {
+ assert((Device.getInt() == OMPC_DEVICE_unknown ||
+ Device.getInt() == OMPC_DEVICE_device_num) &&
+ "Expected device_num modifier.");
+ llvm::Value *DevVal = CGF.EmitScalarExpr(Device.getPointer());
+ DeviceID =
+ CGF.Builder.CreateIntCast(DevVal, CGF.Int64Ty, /*isSigned=*/true);
+ } else {
+ DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
+ }
+ return DeviceID;
+}
+
+llvm::Value *emitDynCGGroupMem(const OMPExecutableDirective &D,
+ CodeGenFunction &CGF) {
+ llvm::Value *DynCGroupMem = CGF.Builder.getInt32(0);
+
+ if (auto *DynMemClause = D.getSingleClause<OMPXDynCGroupMemClause>()) {
+ CodeGenFunction::RunCleanupsScope DynCGroupMemScope(CGF);
+ llvm::Value *DynCGroupMemVal = CGF.EmitScalarExpr(
+ DynMemClause->getSize(), /*IgnoreResultAssign=*/true);
+ DynCGroupMem = CGF.Builder.CreateIntCast(DynCGroupMemVal, CGF.Int32Ty,
+ /*isSigned=*/false);
+ }
+ return DynCGroupMem;
+}
+
+static void emitTargetCallKernelLaunch(
+ CGOpenMPRuntime *OMPRuntime, llvm::Function *OutlinedFn,
+ const OMPExecutableDirective &D,
+ llvm::SmallVectorImpl<llvm::Value *> &CapturedVars, bool RequiresOuterTask,
+ const CapturedStmt &CS, bool OffloadingMandatory,
+ llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device,
+ llvm::Value *OutlinedFnID, CodeGenFunction::OMPTargetDataInfo &InputInfo,
+ llvm::Value *&MapTypesArray, llvm::Value *&MapNamesArray,
+ llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
+ const OMPLoopDirective &D)>
+ SizeEmitter,
+ CodeGenFunction &CGF, CodeGenModule &CGM) {
+ llvm::OpenMPIRBuilder &OMPBuilder = OMPRuntime->getOMPBuilder();
+
+ // Fill up the arrays with all the captured variables.
+ MappableExprsHandler::MapCombinedInfoTy CombinedInfo;
+
+ // Get mappable expression information.
+ MappableExprsHandler MEHandler(D, CGF);
+ llvm::DenseMap<llvm::Value *, llvm::Value *> LambdaPointers;
+ llvm::DenseSet<CanonicalDeclPtr<const Decl>> MappedVarSet;
+
+ auto RI = CS.getCapturedRecordDecl()->field_begin();
+ auto *CV = CapturedVars.begin();
+ for (CapturedStmt::const_capture_iterator CI = CS.capture_begin(),
+ CE = CS.capture_end();
+ CI != CE; ++CI, ++RI, ++CV) {
+ MappableExprsHandler::MapCombinedInfoTy CurInfo;
+ MappableExprsHandler::StructRangeInfoTy PartialStruct;
+
+ // VLA sizes are passed to the outlined region by copy and do not have map
+ // information associated.
+ if (CI->capturesVariableArrayType()) {
+ CurInfo.Exprs.push_back(nullptr);
+ CurInfo.BasePointers.push_back(*CV);
+ CurInfo.DevicePtrDecls.push_back(nullptr);
+ CurInfo.DevicePointers.push_back(
+ MappableExprsHandler::DeviceInfoTy::None);
+ CurInfo.Pointers.push_back(*CV);
+ CurInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
+ CGF.getTypeSize(RI->getType()), CGF.Int64Ty, /*isSigned=*/true));
+ // Copy to the device as an argument. No need to retrieve it.
+ CurInfo.Types.push_back(OpenMPOffloadMappingFlags::OMP_MAP_LITERAL |
+ OpenMPOffloadMappingFlags::OMP_MAP_TARGET_PARAM |
+ OpenMPOffloadMappingFlags::OMP_MAP_IMPLICIT);
+ CurInfo.Mappers.push_back(nullptr);
+ } else {
+ // If we have any information in the map clause, we use it, otherwise we
+ // just do a default mapping.
+ MEHandler.generateInfoForCapture(CI, *CV, CurInfo, PartialStruct);
+ if (!CI->capturesThis())
+ MappedVarSet.insert(CI->getCapturedVar());
+ else
+ MappedVarSet.insert(nullptr);
+ if (CurInfo.BasePointers.empty() && !PartialStruct.Base.isValid())
+ MEHandler.generateDefaultMapInfo(*CI, **RI, *CV, CurInfo);
+ // Generate correct mapping for variables captured by reference in
+ // lambdas.
+ if (CI->capturesVariable())
+ MEHandler.generateInfoForLambdaCaptures(CI->getCapturedVar(), *CV,
+ CurInfo, LambdaPointers);
+ }
+ // We expect to have at least an element of information for this capture.
+ assert((!CurInfo.BasePointers.empty() || PartialStruct.Base.isValid()) &&
+ "Non-existing map pointer for capture!");
+ assert(CurInfo.BasePointers.size() == CurInfo.Pointers.size() &&
+ CurInfo.BasePointers.size() == CurInfo.Sizes.size() &&
+ CurInfo.BasePointers.size() == CurInfo.Types.size() &&
+ CurInfo.BasePointers.size() == CurInfo.Mappers.size() &&
+ "Inconsistent map information sizes!");
+
+ // If there is an entry in PartialStruct it means we have a struct with
+ // individual members mapped. Emit an extra combined entry.
+ if (PartialStruct.Base.isValid()) {
+ CombinedInfo.append(PartialStruct.PreliminaryMapData);
+ MEHandler.emitCombinedEntry(
+ CombinedInfo, CurInfo.Types, PartialStruct, CI->capturesThis(),
+ OMPBuilder, nullptr,
+ !PartialStruct.PreliminaryMapData.BasePointers.empty());
+ }
+
+ // We need to append the results of this capture to what we already have.
+ CombinedInfo.append(CurInfo);
+ }
+ // Adjust MEMBER_OF flags for the lambdas captures.
+ MEHandler.adjustMemberOfForLambdaCaptures(
+ OMPBuilder, LambdaPointers, CombinedInfo.BasePointers,
+ CombinedInfo.Pointers, CombinedInfo.Types);
+ // Map any list items in a map clause that were not captures because they
+ // weren't referenced within the construct.
+ MEHandler.generateAllInfo(CombinedInfo, OMPBuilder, MappedVarSet);
+
+ CGOpenMPRuntime::TargetDataInfo Info;
+ // Fill up the arrays and create the arguments.
+ emitOffloadingArrays(CGF, CombinedInfo, Info, OMPBuilder);
+ bool EmitDebug = CGF.CGM.getCodeGenOpts().getDebugInfo() !=
+ llvm::codegenoptions::NoDebugInfo;
+ OMPBuilder.emitOffloadingArraysArgument(CGF.Builder, Info.RTArgs, Info,
+ EmitDebug,
+ /*ForEndCall=*/false);
+
+ InputInfo.NumberOfTargetItems = Info.NumberOfPtrs;
+ InputInfo.BasePointersArray = Address(Info.RTArgs.BasePointersArray,
+ CGF.VoidPtrTy, CGM.getPointerAlign());
+ InputInfo.PointersArray =
+ Address(Info.RTArgs.PointersArray, CGF.VoidPtrTy, CGM.getPointerAlign());
+ InputInfo.SizesArray =
+ Address(Info.RTArgs.SizesArray, CGF.Int64Ty, CGM.getPointerAlign());
+ InputInfo.MappersArray =
+ Address(Info.RTArgs.MappersArray, CGF.VoidPtrTy, CGM.getPointerAlign());
+ MapTypesArray = Info.RTArgs.MapTypesArray;
+ MapNamesArray = Info.RTArgs.MapNamesArray;
+
+ auto &&ThenGen = [&OMPRuntime, OutlinedFn, &D, &CapturedVars,
+ RequiresOuterTask, &CS, OffloadingMandatory, Device,
+ OutlinedFnID, &InputInfo, &MapTypesArray, &MapNamesArray,
+ SizeEmitter](CodeGenFunction &CGF, PrePostActionTy &) {
+ bool IsReverseOffloading = Device.getInt() == OMPC_DEVICE_ancestor;
+
+ if (IsReverseOffloading) {
+ // Reverse offloading is not supported, so just execute on the host.
+ // FIXME: This fallback solution is incorrect since it ignores the
+ // OMP_TARGET_OFFLOAD environment variable. Instead it would be better to
+ // assert here and ensure SEMA emits an error.
+ emitTargetCallFallback(OMPRuntime, OutlinedFn, D, CapturedVars,
+ RequiresOuterTask, CS, OffloadingMandatory, CGF);
+ return;
}
+
+ bool HasNoWait = D.hasClausesOfKind<OMPNowaitClause>();
+ unsigned NumTargetItems = InputInfo.NumberOfTargetItems;
+
+ llvm::Value *BasePointersArray = InputInfo.BasePointersArray.getPointer();
+ llvm::Value *PointersArray = InputInfo.PointersArray.getPointer();
+ llvm::Value *SizesArray = InputInfo.SizesArray.getPointer();
+ llvm::Value *MappersArray = InputInfo.MappersArray.getPointer();
+
+ auto &&EmitTargetCallFallbackCB =
+ [&OMPRuntime, OutlinedFn, &D, &CapturedVars, RequiresOuterTask, &CS,
+ OffloadingMandatory, &CGF](llvm::OpenMPIRBuilder::InsertPointTy IP)
+ -> llvm::OpenMPIRBuilder::InsertPointTy {
+ CGF.Builder.restoreIP(IP);
+ emitTargetCallFallback(OMPRuntime, OutlinedFn, D, CapturedVars,
+ RequiresOuterTask, CS, OffloadingMandatory, CGF);
+ return CGF.Builder.saveIP();
+ };
+
+ llvm::Value *DeviceID = emitDeviceID(Device, CGF);
+ llvm::Value *NumTeams = OMPRuntime->emitNumTeamsForTargetDirective(CGF, D);
+ llvm::Value *NumThreads =
+ OMPRuntime->emitNumThreadsForTargetDirective(CGF, D);
+ llvm::Value *RTLoc = OMPRuntime->emitUpdateLocation(CGF, D.getBeginLoc());
+ llvm::Value *NumIterations =
+ OMPRuntime->emitTargetNumIterationsCall(CGF, D, SizeEmitter);
+ llvm::Value *DynCGGroupMem = emitDynCGGroupMem(D, CGF);
+ llvm::OpenMPIRBuilder::InsertPointTy AllocaIP(
+ CGF.AllocaInsertPt->getParent(), CGF.AllocaInsertPt->getIterator());
+
+ llvm::OpenMPIRBuilder::TargetDataRTArgs RTArgs(
+ BasePointersArray, PointersArray, SizesArray, MapTypesArray,
+ nullptr /* MapTypesArrayEnd */, MappersArray, MapNamesArray);
+
+ llvm::OpenMPIRBuilder::TargetKernelArgs Args(
+ NumTargetItems, RTArgs, NumIterations, NumTeams, NumThreads,
+ DynCGGroupMem, HasNoWait);
+
+ CGF.Builder.restoreIP(OMPRuntime->getOMPBuilder().emitKernelLaunch(
+ CGF.Builder, OutlinedFn, OutlinedFnID, EmitTargetCallFallbackCB, Args,
+ DeviceID, RTLoc, AllocaIP));
};
- emitInlinedDirective(CGF, OMPD_unknown, CodeGen);
+
+ if (RequiresOuterTask)
+ CGF.EmitOMPTargetTaskBasedDirective(D, ThenGen, InputInfo);
+ else
+ OMPRuntime->emitInlinedDirective(CGF, D.getDirectiveKind(), ThenGen);
+}
+
+static void
+emitTargetCallElse(CGOpenMPRuntime *OMPRuntime, llvm::Function *OutlinedFn,
+ const OMPExecutableDirective &D,
+ llvm::SmallVectorImpl<llvm::Value *> &CapturedVars,
+ bool RequiresOuterTask, const CapturedStmt &CS,
+ bool OffloadingMandatory, CodeGenFunction &CGF) {
+
+ // Notify that the host version must be executed.
+ auto &&ElseGen =
+ [&OMPRuntime, OutlinedFn, &D, &CapturedVars, RequiresOuterTask, &CS,
+ OffloadingMandatory](CodeGenFunction &CGF, PrePostActionTy &) {
+ emitTargetCallFallback(OMPRuntime, OutlinedFn, D, CapturedVars,
+ RequiresOuterTask, CS, OffloadingMandatory, CGF);
+ };
+
+ if (RequiresOuterTask) {
+ CodeGenFunction::OMPTargetDataInfo InputInfo;
+ CGF.EmitOMPTargetTaskBasedDirective(D, ElseGen, InputInfo);
+ } else {
+ OMPRuntime->emitInlinedDirective(CGF, D.getDirectiveKind(), ElseGen);
+ }
}
void CGOpenMPRuntime::emitTargetCall(
@@ -10216,10 +9633,18 @@ void CGOpenMPRuntime::emitTargetCall(
if (!CGF.HaveInsertPoint())
return;
- assert(OutlinedFn && "Invalid outlined function!");
+ const bool OffloadingMandatory = !CGM.getLangOpts().OpenMPIsTargetDevice &&
+ CGM.getLangOpts().OpenMPOffloadMandatory;
- const bool RequiresOuterTask = D.hasClausesOfKind<OMPDependClause>() ||
- D.hasClausesOfKind<OMPNowaitClause>();
+ assert((OffloadingMandatory || OutlinedFn) && "Invalid outlined function!");
+
+ const bool RequiresOuterTask =
+ D.hasClausesOfKind<OMPDependClause>() ||
+ D.hasClausesOfKind<OMPNowaitClause>() ||
+ D.hasClausesOfKind<OMPInReductionClause>() ||
+ (CGM.getLangOpts().OpenMP >= 51 &&
+ needsTaskBasedThreadLimit(D.getDirectiveKind()) &&
+ D.hasClausesOfKind<OMPThreadLimitClause>());
llvm::SmallVector<llvm::Value *, 16> CapturedVars;
const CapturedStmt &CS = *D.getCapturedStmt(OMPD_target);
auto &&ArgsCodegen = [&CS, &CapturedVars](CodeGenFunction &CGF,
@@ -10231,291 +9656,24 @@ void CGOpenMPRuntime::emitTargetCall(
CodeGenFunction::OMPTargetDataInfo InputInfo;
llvm::Value *MapTypesArray = nullptr;
llvm::Value *MapNamesArray = nullptr;
- // Fill up the pointer arrays and transfer execution to the device.
- auto &&ThenGen = [this, Device, OutlinedFn, OutlinedFnID, &D, &InputInfo,
- &MapTypesArray, &MapNamesArray, &CS, RequiresOuterTask,
- &CapturedVars,
- SizeEmitter](CodeGenFunction &CGF, PrePostActionTy &) {
- if (Device.getInt() == OMPC_DEVICE_ancestor) {
- // Reverse offloading is not supported, so just execute on the host.
- if (RequiresOuterTask) {
- CapturedVars.clear();
- CGF.GenerateOpenMPCapturedVars(CS, CapturedVars);
- }
- emitOutlinedFunctionCall(CGF, D.getBeginLoc(), OutlinedFn, CapturedVars);
- return;
- }
-
- // On top of the arrays that were filled up, the target offloading call
- // takes as arguments the device id as well as the host pointer. The host
- // pointer is used by the runtime library to identify the current target
- // region, so it only has to be unique and not necessarily point to
- // anything. It could be the pointer to the outlined function that
- // implements the target region, but we aren't using that so that the
- // compiler doesn't need to keep that, and could therefore inline the host
- // function if proven worthwhile during optimization.
-
- // From this point on, we need to have an ID of the target region defined.
- assert(OutlinedFnID && "Invalid outlined function ID!");
-
- // Emit device ID if any.
- llvm::Value *DeviceID;
- if (Device.getPointer()) {
- assert((Device.getInt() == OMPC_DEVICE_unknown ||
- Device.getInt() == OMPC_DEVICE_device_num) &&
- "Expected device_num modifier.");
- llvm::Value *DevVal = CGF.EmitScalarExpr(Device.getPointer());
- DeviceID =
- CGF.Builder.CreateIntCast(DevVal, CGF.Int64Ty, /*isSigned=*/true);
- } else {
- DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
- }
-
- // Emit the number of elements in the offloading arrays.
- llvm::Value *PointerNum =
- CGF.Builder.getInt32(InputInfo.NumberOfTargetItems);
- // Return value of the runtime offloading call.
- llvm::Value *Return;
-
- llvm::Value *NumTeams = emitNumTeamsForTargetDirective(CGF, D);
- llvm::Value *NumThreads = emitNumThreadsForTargetDirective(CGF, D);
-
- // Source location for the ident struct
- llvm::Value *RTLoc = emitUpdateLocation(CGF, D.getBeginLoc());
-
- // Emit tripcount for the target loop-based directive.
- emitTargetNumIterationsCall(CGF, D, DeviceID, SizeEmitter);
-
- bool HasNowait = D.hasClausesOfKind<OMPNowaitClause>();
- // The target region is an outlined function launched by the runtime
- // via calls __tgt_target() or __tgt_target_teams().
- //
- // __tgt_target() launches a target region with one team and one thread,
- // executing a serial region. This master thread may in turn launch
- // more threads within its team upon encountering a parallel region,
- // however, no additional teams can be launched on the device.
- //
- // __tgt_target_teams() launches a target region with one or more teams,
- // each with one or more threads. This call is required for target
- // constructs such as:
- // 'target teams'
- // 'target' / 'teams'
- // 'target teams distribute parallel for'
- // 'target parallel'
- // and so on.
- //
- // Note that on the host and CPU targets, the runtime implementation of
- // these calls simply call the outlined function without forking threads.
- // The outlined functions themselves have runtime calls to
- // __kmpc_fork_teams() and __kmpc_fork() for this purpose, codegen'd by
- // the compiler in emitTeamsCall() and emitParallelCall().
- //
- // In contrast, on the NVPTX target, the implementation of
- // __tgt_target_teams() launches a GPU kernel with the requested number
- // of teams and threads so no additional calls to the runtime are required.
- if (NumTeams) {
- // If we have NumTeams defined this means that we have an enclosed teams
- // region. Therefore we also expect to have NumThreads defined. These two
- // values should be defined in the presence of a teams directive,
- // regardless of having any clauses associated. If the user is using teams
- // but no clauses, these two values will be the default that should be
- // passed to the runtime library - a 32-bit integer with the value zero.
- assert(NumThreads && "Thread limit expression should be available along "
- "with number of teams.");
- SmallVector<llvm::Value *> OffloadingArgs = {
- RTLoc,
- DeviceID,
- OutlinedFnID,
- PointerNum,
- InputInfo.BasePointersArray.getPointer(),
- InputInfo.PointersArray.getPointer(),
- InputInfo.SizesArray.getPointer(),
- MapTypesArray,
- MapNamesArray,
- InputInfo.MappersArray.getPointer(),
- NumTeams,
- NumThreads};
- if (HasNowait) {
- // Add int32_t depNum = 0, void *depList = nullptr, int32_t
- // noAliasDepNum = 0, void *noAliasDepList = nullptr.
- OffloadingArgs.push_back(CGF.Builder.getInt32(0));
- OffloadingArgs.push_back(llvm::ConstantPointerNull::get(CGM.VoidPtrTy));
- OffloadingArgs.push_back(CGF.Builder.getInt32(0));
- OffloadingArgs.push_back(llvm::ConstantPointerNull::get(CGM.VoidPtrTy));
- }
- Return = CGF.EmitRuntimeCall(
- OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), HasNowait
- ? OMPRTL___tgt_target_teams_nowait_mapper
- : OMPRTL___tgt_target_teams_mapper),
- OffloadingArgs);
- } else {
- SmallVector<llvm::Value *> OffloadingArgs = {
- RTLoc,
- DeviceID,
- OutlinedFnID,
- PointerNum,
- InputInfo.BasePointersArray.getPointer(),
- InputInfo.PointersArray.getPointer(),
- InputInfo.SizesArray.getPointer(),
- MapTypesArray,
- MapNamesArray,
- InputInfo.MappersArray.getPointer()};
- if (HasNowait) {
- // Add int32_t depNum = 0, void *depList = nullptr, int32_t
- // noAliasDepNum = 0, void *noAliasDepList = nullptr.
- OffloadingArgs.push_back(CGF.Builder.getInt32(0));
- OffloadingArgs.push_back(llvm::ConstantPointerNull::get(CGM.VoidPtrTy));
- OffloadingArgs.push_back(CGF.Builder.getInt32(0));
- OffloadingArgs.push_back(llvm::ConstantPointerNull::get(CGM.VoidPtrTy));
- }
- Return = CGF.EmitRuntimeCall(
- OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), HasNowait ? OMPRTL___tgt_target_nowait_mapper
- : OMPRTL___tgt_target_mapper),
- OffloadingArgs);
- }
-
- // Check the error code and execute the host version if required.
- llvm::BasicBlock *OffloadFailedBlock =
- CGF.createBasicBlock("omp_offload.failed");
- llvm::BasicBlock *OffloadContBlock =
- CGF.createBasicBlock("omp_offload.cont");
- llvm::Value *Failed = CGF.Builder.CreateIsNotNull(Return);
- CGF.Builder.CreateCondBr(Failed, OffloadFailedBlock, OffloadContBlock);
-
- CGF.EmitBlock(OffloadFailedBlock);
- if (RequiresOuterTask) {
- CapturedVars.clear();
- CGF.GenerateOpenMPCapturedVars(CS, CapturedVars);
- }
- emitOutlinedFunctionCall(CGF, D.getBeginLoc(), OutlinedFn, CapturedVars);
- CGF.EmitBranch(OffloadContBlock);
-
- CGF.EmitBlock(OffloadContBlock, /*IsFinished=*/true);
+ auto &&TargetThenGen = [this, OutlinedFn, &D, &CapturedVars,
+ RequiresOuterTask, &CS, OffloadingMandatory, Device,
+ OutlinedFnID, &InputInfo, &MapTypesArray,
+ &MapNamesArray, SizeEmitter](CodeGenFunction &CGF,
+ PrePostActionTy &) {
+ emitTargetCallKernelLaunch(this, OutlinedFn, D, CapturedVars,
+ RequiresOuterTask, CS, OffloadingMandatory,
+ Device, OutlinedFnID, InputInfo, MapTypesArray,
+ MapNamesArray, SizeEmitter, CGF, CGM);
};
- // Notify that the host version must be executed.
- auto &&ElseGen = [this, &D, OutlinedFn, &CS, &CapturedVars,
- RequiresOuterTask](CodeGenFunction &CGF,
- PrePostActionTy &) {
- if (RequiresOuterTask) {
- CapturedVars.clear();
- CGF.GenerateOpenMPCapturedVars(CS, CapturedVars);
- }
- emitOutlinedFunctionCall(CGF, D.getBeginLoc(), OutlinedFn, CapturedVars);
- };
-
- auto &&TargetThenGen = [this, &ThenGen, &D, &InputInfo, &MapTypesArray,
- &MapNamesArray, &CapturedVars, RequiresOuterTask,
- &CS](CodeGenFunction &CGF, PrePostActionTy &) {
- // Fill up the arrays with all the captured variables.
- MappableExprsHandler::MapCombinedInfoTy CombinedInfo;
-
- // Get mappable expression information.
- MappableExprsHandler MEHandler(D, CGF);
- llvm::DenseMap<llvm::Value *, llvm::Value *> LambdaPointers;
- llvm::DenseSet<CanonicalDeclPtr<const Decl>> MappedVarSet;
-
- auto RI = CS.getCapturedRecordDecl()->field_begin();
- auto *CV = CapturedVars.begin();
- for (CapturedStmt::const_capture_iterator CI = CS.capture_begin(),
- CE = CS.capture_end();
- CI != CE; ++CI, ++RI, ++CV) {
- MappableExprsHandler::MapCombinedInfoTy CurInfo;
- MappableExprsHandler::StructRangeInfoTy PartialStruct;
-
- // VLA sizes are passed to the outlined region by copy and do not have map
- // information associated.
- if (CI->capturesVariableArrayType()) {
- CurInfo.Exprs.push_back(nullptr);
- CurInfo.BasePointers.push_back(*CV);
- CurInfo.Pointers.push_back(*CV);
- CurInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
- CGF.getTypeSize(RI->getType()), CGF.Int64Ty, /*isSigned=*/true));
- // Copy to the device as an argument. No need to retrieve it.
- CurInfo.Types.push_back(MappableExprsHandler::OMP_MAP_LITERAL |
- MappableExprsHandler::OMP_MAP_TARGET_PARAM |
- MappableExprsHandler::OMP_MAP_IMPLICIT);
- CurInfo.Mappers.push_back(nullptr);
- } else {
- // If we have any information in the map clause, we use it, otherwise we
- // just do a default mapping.
- MEHandler.generateInfoForCapture(CI, *CV, CurInfo, PartialStruct);
- if (!CI->capturesThis())
- MappedVarSet.insert(CI->getCapturedVar());
- else
- MappedVarSet.insert(nullptr);
- if (CurInfo.BasePointers.empty() && !PartialStruct.Base.isValid())
- MEHandler.generateDefaultMapInfo(*CI, **RI, *CV, CurInfo);
- // Generate correct mapping for variables captured by reference in
- // lambdas.
- if (CI->capturesVariable())
- MEHandler.generateInfoForLambdaCaptures(CI->getCapturedVar(), *CV,
- CurInfo, LambdaPointers);
- }
- // We expect to have at least an element of information for this capture.
- assert((!CurInfo.BasePointers.empty() || PartialStruct.Base.isValid()) &&
- "Non-existing map pointer for capture!");
- assert(CurInfo.BasePointers.size() == CurInfo.Pointers.size() &&
- CurInfo.BasePointers.size() == CurInfo.Sizes.size() &&
- CurInfo.BasePointers.size() == CurInfo.Types.size() &&
- CurInfo.BasePointers.size() == CurInfo.Mappers.size() &&
- "Inconsistent map information sizes!");
-
- // If there is an entry in PartialStruct it means we have a struct with
- // individual members mapped. Emit an extra combined entry.
- if (PartialStruct.Base.isValid()) {
- CombinedInfo.append(PartialStruct.PreliminaryMapData);
- MEHandler.emitCombinedEntry(
- CombinedInfo, CurInfo.Types, PartialStruct, nullptr,
- !PartialStruct.PreliminaryMapData.BasePointers.empty());
- }
-
- // We need to append the results of this capture to what we already have.
- CombinedInfo.append(CurInfo);
- }
- // Adjust MEMBER_OF flags for the lambdas captures.
- MEHandler.adjustMemberOfForLambdaCaptures(
- LambdaPointers, CombinedInfo.BasePointers, CombinedInfo.Pointers,
- CombinedInfo.Types);
- // Map any list items in a map clause that were not captures because they
- // weren't referenced within the construct.
- MEHandler.generateAllInfo(CombinedInfo, MappedVarSet);
-
- TargetDataInfo Info;
- // Fill up the arrays and create the arguments.
- emitOffloadingArrays(CGF, CombinedInfo, Info, OMPBuilder);
- emitOffloadingArraysArgument(
- CGF, Info.BasePointersArray, Info.PointersArray, Info.SizesArray,
- Info.MapTypesArray, Info.MapNamesArray, Info.MappersArray, Info,
- {/*ForEndTask=*/false});
-
- InputInfo.NumberOfTargetItems = Info.NumberOfPtrs;
- InputInfo.BasePointersArray =
- Address(Info.BasePointersArray, CGM.getPointerAlign());
- InputInfo.PointersArray =
- Address(Info.PointersArray, CGM.getPointerAlign());
- InputInfo.SizesArray = Address(Info.SizesArray, CGM.getPointerAlign());
- InputInfo.MappersArray = Address(Info.MappersArray, CGM.getPointerAlign());
- MapTypesArray = Info.MapTypesArray;
- MapNamesArray = Info.MapNamesArray;
- if (RequiresOuterTask)
- CGF.EmitOMPTargetTaskBasedDirective(D, ThenGen, InputInfo);
- else
- emitInlinedDirective(CGF, D.getDirectiveKind(), ThenGen);
- };
-
- auto &&TargetElseGen = [this, &ElseGen, &D, RequiresOuterTask](
- CodeGenFunction &CGF, PrePostActionTy &) {
- if (RequiresOuterTask) {
- CodeGenFunction::OMPTargetDataInfo InputInfo;
- CGF.EmitOMPTargetTaskBasedDirective(D, ElseGen, InputInfo);
- } else {
- emitInlinedDirective(CGF, D.getDirectiveKind(), ElseGen);
- }
- };
+ auto &&TargetElseGen =
+ [this, OutlinedFn, &D, &CapturedVars, RequiresOuterTask, &CS,
+ OffloadingMandatory](CodeGenFunction &CGF, PrePostActionTy &) {
+ emitTargetCallElse(this, OutlinedFn, D, CapturedVars, RequiresOuterTask,
+ CS, OffloadingMandatory, CGF);
+ };
// If we have a target function ID it means that we need to support
// offloading, otherwise, just execute on the host. We need to execute on host
@@ -10547,16 +9705,13 @@ void CGOpenMPRuntime::scanForTargetRegionsFunctions(const Stmt *S,
if (RequiresDeviceCodegen) {
const auto &E = *cast<OMPExecutableDirective>(S);
- unsigned DeviceID;
- unsigned FileID;
- unsigned Line;
- getTargetEntryUniqueInfo(CGM.getContext(), E.getBeginLoc(), DeviceID,
- FileID, Line);
+
+ llvm::TargetRegionEntryInfo EntryInfo = getEntryInfoFromPresumedLoc(
+ CGM, OMPBuilder, E.getBeginLoc(), ParentName);
// Is this a target region that should not be emitted as an entry point? If
// so just signal we are done with this target region.
- if (!OffloadEntriesInfoManager.hasTargetRegionEntryInfo(DeviceID, FileID,
- ParentName, Line))
+ if (!OMPBuilder.OffloadInfoManager.hasTargetRegionEntryInfo(EntryInfo))
return;
switch (E.getDirectiveKind()) {
@@ -10603,6 +9758,14 @@ void CGOpenMPRuntime::scanForTargetRegionsFunctions(const Stmt *S,
CGM, ParentName,
cast<OMPTargetTeamsDistributeParallelForSimdDirective>(E));
break;
+ case OMPD_target_teams_loop:
+ CodeGenFunction::EmitOMPTargetTeamsGenericLoopDeviceFunction(
+ CGM, ParentName, cast<OMPTargetTeamsGenericLoopDirective>(E));
+ break;
+ case OMPD_target_parallel_loop:
+ CodeGenFunction::EmitOMPTargetParallelGenericLoopDeviceFunction(
+ CGM, ParentName, cast<OMPTargetParallelGenericLoopDirective>(E));
+ break;
case OMPD_parallel:
case OMPD_for:
case OMPD_parallel_for:
@@ -10660,6 +9823,7 @@ void CGOpenMPRuntime::scanForTargetRegionsFunctions(const Stmt *S,
case OMPD_parallel_master_taskloop:
case OMPD_parallel_master_taskloop_simd:
case OMPD_requires:
+ case OMPD_metadirective:
case OMPD_unknown:
default:
llvm_unreachable("Unknown target directive for OpenMP device codegen.");
@@ -10685,7 +9849,7 @@ void CGOpenMPRuntime::scanForTargetRegionsFunctions(const Stmt *S,
}
static bool isAssumedToBeNotEmitted(const ValueDecl *VD, bool IsDevice) {
- Optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
+ std::optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
OMPDeclareTargetDeclAttr::getDeviceType(VD);
if (!DevTy)
return false;
@@ -10701,10 +9865,10 @@ static bool isAssumedToBeNotEmitted(const ValueDecl *VD, bool IsDevice) {
bool CGOpenMPRuntime::emitTargetFunctions(GlobalDecl GD) {
// If emitting code for the host, we do not process FD here. Instead we do
// the normal code generation.
- if (!CGM.getLangOpts().OpenMPIsDevice) {
+ if (!CGM.getLangOpts().OpenMPIsTargetDevice) {
if (const auto *FD = dyn_cast<FunctionDecl>(GD.getDecl()))
if (isAssumedToBeNotEmitted(cast<ValueDecl>(FD),
- CGM.getLangOpts().OpenMPIsDevice))
+ CGM.getLangOpts().OpenMPIsTargetDevice))
return true;
return false;
}
@@ -10715,7 +9879,7 @@ bool CGOpenMPRuntime::emitTargetFunctions(GlobalDecl GD) {
StringRef Name = CGM.getMangledName(GD);
scanForTargetRegionsFunctions(FD->getBody(), Name);
if (isAssumedToBeNotEmitted(cast<ValueDecl>(FD),
- CGM.getLangOpts().OpenMPIsDevice))
+ CGM.getLangOpts().OpenMPIsTargetDevice))
return true;
}
@@ -10726,10 +9890,10 @@ bool CGOpenMPRuntime::emitTargetFunctions(GlobalDecl GD) {
bool CGOpenMPRuntime::emitTargetGlobalVariable(GlobalDecl GD) {
if (isAssumedToBeNotEmitted(cast<ValueDecl>(GD.getDecl()),
- CGM.getLangOpts().OpenMPIsDevice))
+ CGM.getLangOpts().OpenMPIsTargetDevice))
return true;
- if (!CGM.getLangOpts().OpenMPIsDevice)
+ if (!CGM.getLangOpts().OpenMPIsTargetDevice)
return false;
// Check if there are Ctors/Dtors in this declaration and look for target
@@ -10750,11 +9914,12 @@ bool CGOpenMPRuntime::emitTargetGlobalVariable(GlobalDecl GD) {
}
// Do not to emit variable if it is not marked as declare target.
- llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
+ std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(
cast<VarDecl>(GD.getDecl()));
if (!Res || *Res == OMPDeclareTargetDeclAttr::MT_Link ||
- (*Res == OMPDeclareTargetDeclAttr::MT_To &&
+ ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
+ *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
HasRequiresUnifiedSharedMemory)) {
DeferredGlobalVariables.insert(cast<VarDecl>(GD.getDecl()));
return true;
@@ -10765,19 +9930,20 @@ bool CGOpenMPRuntime::emitTargetGlobalVariable(GlobalDecl GD) {
void CGOpenMPRuntime::registerTargetGlobalVariable(const VarDecl *VD,
llvm::Constant *Addr) {
if (CGM.getLangOpts().OMPTargetTriples.empty() &&
- !CGM.getLangOpts().OpenMPIsDevice)
+ !CGM.getLangOpts().OpenMPIsTargetDevice)
return;
- // If we have host/nohost variables, they do not need to be registered.
- Optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
- OMPDeclareTargetDeclAttr::getDeviceType(VD);
- if (DevTy && DevTy.getValue() != OMPDeclareTargetDeclAttr::DT_Any)
+ std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
+ OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
+
+ // If this is an 'extern' declaration we defer to the canonical definition and
+ // do not emit an offloading entry.
+ if (Res && *Res != OMPDeclareTargetDeclAttr::MT_Link &&
+ VD->hasExternalStorage())
return;
- llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
- OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
if (!Res) {
- if (CGM.getLangOpts().OpenMPIsDevice) {
+ if (CGM.getLangOpts().OpenMPIsTargetDevice) {
// Register non-target variables being emitted in device code (debug info
// may cause this).
StringRef VarName = CGM.getMangledName(VD);
@@ -10785,63 +9951,27 @@ void CGOpenMPRuntime::registerTargetGlobalVariable(const VarDecl *VD,
}
return;
}
- // Register declare target variables.
- OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind Flags;
- StringRef VarName;
- CharUnits VarSize;
- llvm::GlobalValue::LinkageTypes Linkage;
-
- if (*Res == OMPDeclareTargetDeclAttr::MT_To &&
- !HasRequiresUnifiedSharedMemory) {
- Flags = OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryTo;
- VarName = CGM.getMangledName(VD);
- if (VD->hasDefinition(CGM.getContext()) != VarDecl::DeclarationOnly) {
- VarSize = CGM.getContext().getTypeSizeInChars(VD->getType());
- assert(!VarSize.isZero() && "Expected non-zero size of the variable");
- } else {
- VarSize = CharUnits::Zero();
- }
- Linkage = CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false);
- // Temp solution to prevent optimizations of the internal variables.
- if (CGM.getLangOpts().OpenMPIsDevice && !VD->isExternallyVisible()) {
- // Do not create a "ref-variable" if the original is not also available
- // on the host.
- if (!OffloadEntriesInfoManager.hasDeviceGlobalVarEntryInfo(VarName))
- return;
- std::string RefName = getName({VarName, "ref"});
- if (!CGM.GetGlobalValue(RefName)) {
- llvm::Constant *AddrRef =
- getOrCreateInternalVariable(Addr->getType(), RefName);
- auto *GVAddrRef = cast<llvm::GlobalVariable>(AddrRef);
- GVAddrRef->setConstant(/*Val=*/true);
- GVAddrRef->setLinkage(llvm::GlobalValue::InternalLinkage);
- GVAddrRef->setInitializer(Addr);
- CGM.addCompilerUsedGlobal(GVAddrRef);
- }
- }
- } else {
- assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) ||
- (*Res == OMPDeclareTargetDeclAttr::MT_To &&
- HasRequiresUnifiedSharedMemory)) &&
- "Declare target attribute must link or to with unified memory.");
- if (*Res == OMPDeclareTargetDeclAttr::MT_Link)
- Flags = OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryLink;
- else
- Flags = OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryTo;
- if (CGM.getLangOpts().OpenMPIsDevice) {
- VarName = Addr->getName();
- Addr = nullptr;
- } else {
- VarName = getAddrOfDeclareTargetVar(VD).getName();
- Addr = cast<llvm::Constant>(getAddrOfDeclareTargetVar(VD).getPointer());
- }
- VarSize = CGM.getPointerSize();
- Linkage = llvm::GlobalValue::WeakAnyLinkage;
- }
+ auto AddrOfGlobal = [&VD, this]() { return CGM.GetAddrOfGlobal(VD); };
+ auto LinkageForVariable = [&VD, this]() {
+ return CGM.getLLVMLinkageVarDefinition(VD);
+ };
+
+ std::vector<llvm::GlobalVariable *> GeneratedRefs;
+ OMPBuilder.registerTargetGlobalVariable(
+ convertCaptureClause(VD), convertDeviceClause(VD),
+ VD->hasDefinition(CGM.getContext()) == VarDecl::DeclarationOnly,
+ VD->isExternallyVisible(),
+ getEntryInfoFromPresumedLoc(CGM, OMPBuilder,
+ VD->getCanonicalDecl()->getBeginLoc()),
+ CGM.getMangledName(VD), GeneratedRefs, CGM.getLangOpts().OpenMPSimd,
+ CGM.getLangOpts().OMPTargetTriples, AddrOfGlobal, LinkageForVariable,
+ CGM.getTypes().ConvertTypeForMem(
+ CGM.getContext().getPointerType(VD->getType())),
+ Addr);
- OffloadEntriesInfoManager.registerDeviceGlobalVarEntryInfo(
- VarName, Addr, VarSize, Flags, Linkage);
+ for (auto *ref : GeneratedRefs)
+ CGM.addCompilerUsedGlobal(ref);
}
bool CGOpenMPRuntime::emitTargetGlobal(GlobalDecl GD) {
@@ -10854,16 +9984,18 @@ bool CGOpenMPRuntime::emitTargetGlobal(GlobalDecl GD) {
void CGOpenMPRuntime::emitDeferredTargetDecls() const {
for (const VarDecl *VD : DeferredGlobalVariables) {
- llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
+ std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
if (!Res)
continue;
- if (*Res == OMPDeclareTargetDeclAttr::MT_To &&
+ if ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
+ *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
!HasRequiresUnifiedSharedMemory) {
CGM.EmitGlobal(VD);
} else {
assert((*Res == OMPDeclareTargetDeclAttr::MT_Link ||
- (*Res == OMPDeclareTargetDeclAttr::MT_To &&
+ ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
+ *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
HasRequiresUnifiedSharedMemory)) &&
"Expected link clause or to clause with unified memory.");
(void)CGM.getOpenMPRuntime().getAddrOfDeclareTargetVar(VD);
@@ -10881,6 +10013,7 @@ void CGOpenMPRuntime::processRequiresDirective(const OMPRequiresDecl *D) {
for (const OMPClause *Clause : D->clauselists()) {
if (Clause->getClauseKind() == OMPC_unified_shared_memory) {
HasRequiresUnifiedSharedMemory = true;
+ OMPBuilder.Config.setHasRequiresUnifiedSharedMemory(true);
} else if (const auto *AC =
dyn_cast<OMPAtomicDefaultMemOrderClause>(Clause)) {
switch (AC->getAtomicDefaultMemOrderKind()) {
@@ -10936,19 +10069,19 @@ bool CGOpenMPRuntime::hasRequiresUnifiedSharedMemory() const {
CGOpenMPRuntime::DisableAutoDeclareTargetRAII::DisableAutoDeclareTargetRAII(
CodeGenModule &CGM)
: CGM(CGM) {
- if (CGM.getLangOpts().OpenMPIsDevice) {
+ if (CGM.getLangOpts().OpenMPIsTargetDevice) {
SavedShouldMarkAsGlobal = CGM.getOpenMPRuntime().ShouldMarkAsGlobal;
CGM.getOpenMPRuntime().ShouldMarkAsGlobal = false;
}
}
CGOpenMPRuntime::DisableAutoDeclareTargetRAII::~DisableAutoDeclareTargetRAII() {
- if (CGM.getLangOpts().OpenMPIsDevice)
+ if (CGM.getLangOpts().OpenMPIsTargetDevice)
CGM.getOpenMPRuntime().ShouldMarkAsGlobal = SavedShouldMarkAsGlobal;
}
bool CGOpenMPRuntime::markAsGlobalTarget(GlobalDecl GD) {
- if (!CGM.getLangOpts().OpenMPIsDevice || !ShouldMarkAsGlobal)
+ if (!CGM.getLangOpts().OpenMPIsTargetDevice || !ShouldMarkAsGlobal)
return true;
const auto *D = cast<FunctionDecl>(GD.getDecl());
@@ -10971,10 +10104,9 @@ llvm::Function *CGOpenMPRuntime::emitRequiresDirectiveRegFun() {
// If we don't have entries or if we are emitting code for the device, we
// don't need to do anything.
if (CGM.getLangOpts().OMPTargetTriples.empty() ||
- CGM.getLangOpts().OpenMPSimd || CGM.getLangOpts().OpenMPIsDevice ||
- (OffloadEntriesInfoManager.empty() &&
- !HasEmittedDeclareTargetRegion &&
- !HasEmittedTargetRegion))
+ CGM.getLangOpts().OpenMPSimd || CGM.getLangOpts().OpenMPIsTargetDevice ||
+ (OMPBuilder.OffloadInfoManager.empty() &&
+ !HasEmittedDeclareTargetRegion && !HasEmittedTargetRegion))
return nullptr;
// Create and register the function that handles the requires directives.
@@ -10988,22 +10120,19 @@ llvm::Function *CGOpenMPRuntime::emitRequiresDirectiveRegFun() {
std::string ReqName = getName({"omp_offloading", "requires_reg"});
RequiresRegFn = CGM.CreateGlobalInitOrCleanUpFunction(FTy, ReqName, FI);
CGF.StartFunction(GlobalDecl(), C.VoidTy, RequiresRegFn, FI, {});
- OpenMPOffloadingRequiresDirFlags Flags = OMP_REQ_NONE;
// TODO: check for other requires clauses.
// The requires directive takes effect only when a target region is
// present in the compilation unit. Otherwise it is ignored and not
// passed to the runtime. This avoids the runtime from throwing an error
// for mismatching requires clauses across compilation units that don't
// contain at least 1 target region.
- assert((HasEmittedTargetRegion ||
- HasEmittedDeclareTargetRegion ||
- !OffloadEntriesInfoManager.empty()) &&
+ assert((HasEmittedTargetRegion || HasEmittedDeclareTargetRegion ||
+ !OMPBuilder.OffloadInfoManager.empty()) &&
"Target or declare target region expected.");
- if (HasRequiresUnifiedSharedMemory)
- Flags = OMP_REQ_UNIFIED_SHARED_MEMORY;
CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___tgt_register_requires),
- llvm::ConstantInt::get(CGM.Int64Ty, Flags));
+ llvm::ConstantInt::get(
+ CGM.Int64Ty, OMPBuilder.Config.getRequiresFlags()));
CGF.FinishFunction();
}
return RequiresRegFn;
@@ -11063,9 +10192,28 @@ void CGOpenMPRuntime::emitNumTeamsClause(CodeGenFunction &CGF,
PushNumTeamsArgs);
}
+void CGOpenMPRuntime::emitThreadLimitClause(CodeGenFunction &CGF,
+ const Expr *ThreadLimit,
+ SourceLocation Loc) {
+ llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
+ llvm::Value *ThreadLimitVal =
+ ThreadLimit
+ ? CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(ThreadLimit),
+ CGF.CGM.Int32Ty, /* isSigned = */ true)
+ : CGF.Builder.getInt32(0);
+
+ // Build call __kmpc_set_thread_limit(&loc, global_tid, thread_limit)
+ llvm::Value *ThreadLimitArgs[] = {RTLoc, getThreadID(CGF, Loc),
+ ThreadLimitVal};
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_set_thread_limit),
+ ThreadLimitArgs);
+}
+
void CGOpenMPRuntime::emitTargetDataCalls(
CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond,
- const Expr *Device, const RegionCodeGenTy &CodeGen, TargetDataInfo &Info) {
+ const Expr *Device, const RegionCodeGenTy &CodeGen,
+ CGOpenMPRuntime::TargetDataInfo &Info) {
if (!CGF.HaveInsertPoint())
return;
@@ -11073,148 +10221,94 @@ void CGOpenMPRuntime::emitTargetDataCalls(
// off.
PrePostActionTy NoPrivAction;
- // Generate the code for the opening of the data environment. Capture all the
- // arguments of the runtime call by reference because they are used in the
- // closing of the region.
- auto &&BeginThenGen = [this, &D, Device, &Info,
- &CodeGen](CodeGenFunction &CGF, PrePostActionTy &) {
- // Fill up the arrays with all the mapped variables.
- MappableExprsHandler::MapCombinedInfoTy CombinedInfo;
+ using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
- // Get map clause information.
- MappableExprsHandler MEHandler(D, CGF);
- MEHandler.generateAllInfo(CombinedInfo);
+ llvm::Value *IfCondVal = nullptr;
+ if (IfCond)
+ IfCondVal = CGF.EvaluateExprAsBool(IfCond);
- // Fill up the arrays and create the arguments.
- emitOffloadingArrays(CGF, CombinedInfo, Info, OMPBuilder,
- /*IsNonContiguous=*/true);
+ // Emit device ID if any.
+ llvm::Value *DeviceID = nullptr;
+ if (Device) {
+ DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
+ CGF.Int64Ty, /*isSigned=*/true);
+ } else {
+ DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
+ }
- llvm::Value *BasePointersArrayArg = nullptr;
- llvm::Value *PointersArrayArg = nullptr;
- llvm::Value *SizesArrayArg = nullptr;
- llvm::Value *MapTypesArrayArg = nullptr;
- llvm::Value *MapNamesArrayArg = nullptr;
- llvm::Value *MappersArrayArg = nullptr;
- emitOffloadingArraysArgument(CGF, BasePointersArrayArg, PointersArrayArg,
- SizesArrayArg, MapTypesArrayArg,
- MapNamesArrayArg, MappersArrayArg, Info);
+ // Fill up the arrays with all the mapped variables.
+ MappableExprsHandler::MapCombinedInfoTy CombinedInfo;
+ auto GenMapInfoCB =
+ [&](InsertPointTy CodeGenIP) -> llvm::OpenMPIRBuilder::MapInfosTy & {
+ CGF.Builder.restoreIP(CodeGenIP);
+ // Get map clause information.
+ MappableExprsHandler MEHandler(D, CGF);
+ MEHandler.generateAllInfo(CombinedInfo, OMPBuilder);
- // Emit device ID if any.
- llvm::Value *DeviceID = nullptr;
- if (Device) {
- DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
- CGF.Int64Ty, /*isSigned=*/true);
- } else {
- DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
+ auto FillInfoMap = [&](MappableExprsHandler::MappingExprInfo &MapExpr) {
+ return emitMappingInformation(CGF, OMPBuilder, MapExpr);
+ };
+ if (CGM.getCodeGenOpts().getDebugInfo() !=
+ llvm::codegenoptions::NoDebugInfo) {
+ CombinedInfo.Names.resize(CombinedInfo.Exprs.size());
+ llvm::transform(CombinedInfo.Exprs, CombinedInfo.Names.begin(),
+ FillInfoMap);
}
- // Emit the number of elements in the offloading arrays.
- llvm::Value *PointerNum = CGF.Builder.getInt32(Info.NumberOfPtrs);
- //
- // Source location for the ident struct
- llvm::Value *RTLoc = emitUpdateLocation(CGF, D.getBeginLoc());
-
- llvm::Value *OffloadingArgs[] = {RTLoc,
- DeviceID,
- PointerNum,
- BasePointersArrayArg,
- PointersArrayArg,
- SizesArrayArg,
- MapTypesArrayArg,
- MapNamesArrayArg,
- MappersArrayArg};
- CGF.EmitRuntimeCall(
- OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___tgt_target_data_begin_mapper),
- OffloadingArgs);
-
- // If device pointer privatization is required, emit the body of the region
- // here. It will have to be duplicated: with and without privatization.
- if (!Info.CaptureDeviceAddrMap.empty())
- CodeGen(CGF);
+ return CombinedInfo;
};
-
- // Generate code for the closing of the data region.
- auto &&EndThenGen = [this, Device, &Info, &D](CodeGenFunction &CGF,
- PrePostActionTy &) {
- assert(Info.isValid() && "Invalid data environment closing arguments.");
-
- llvm::Value *BasePointersArrayArg = nullptr;
- llvm::Value *PointersArrayArg = nullptr;
- llvm::Value *SizesArrayArg = nullptr;
- llvm::Value *MapTypesArrayArg = nullptr;
- llvm::Value *MapNamesArrayArg = nullptr;
- llvm::Value *MappersArrayArg = nullptr;
- emitOffloadingArraysArgument(CGF, BasePointersArrayArg, PointersArrayArg,
- SizesArrayArg, MapTypesArrayArg,
- MapNamesArrayArg, MappersArrayArg, Info,
- {/*ForEndCall=*/true});
-
- // Emit device ID if any.
- llvm::Value *DeviceID = nullptr;
- if (Device) {
- DeviceID = CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(Device),
- CGF.Int64Ty, /*isSigned=*/true);
- } else {
- DeviceID = CGF.Builder.getInt64(OMP_DEVICEID_UNDEF);
+ using BodyGenTy = llvm::OpenMPIRBuilder::BodyGenTy;
+ auto BodyCB = [&](InsertPointTy CodeGenIP, BodyGenTy BodyGenType) {
+ CGF.Builder.restoreIP(CodeGenIP);
+ switch (BodyGenType) {
+ case BodyGenTy::Priv:
+ if (!Info.CaptureDeviceAddrMap.empty())
+ CodeGen(CGF);
+ break;
+ case BodyGenTy::DupNoPriv:
+ if (!Info.CaptureDeviceAddrMap.empty()) {
+ CodeGen.setAction(NoPrivAction);
+ CodeGen(CGF);
+ }
+ break;
+ case BodyGenTy::NoPriv:
+ if (Info.CaptureDeviceAddrMap.empty()) {
+ CodeGen.setAction(NoPrivAction);
+ CodeGen(CGF);
+ }
+ break;
}
-
- // Emit the number of elements in the offloading arrays.
- llvm::Value *PointerNum = CGF.Builder.getInt32(Info.NumberOfPtrs);
-
- // Source location for the ident struct
- llvm::Value *RTLoc = emitUpdateLocation(CGF, D.getBeginLoc());
-
- llvm::Value *OffloadingArgs[] = {RTLoc,
- DeviceID,
- PointerNum,
- BasePointersArrayArg,
- PointersArrayArg,
- SizesArrayArg,
- MapTypesArrayArg,
- MapNamesArrayArg,
- MappersArrayArg};
- CGF.EmitRuntimeCall(
- OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___tgt_target_data_end_mapper),
- OffloadingArgs);
+ return InsertPointTy(CGF.Builder.GetInsertBlock(),
+ CGF.Builder.GetInsertPoint());
};
- // If we need device pointer privatization, we need to emit the body of the
- // region with no privatization in the 'else' branch of the conditional.
- // Otherwise, we don't have to do anything.
- auto &&BeginElseGen = [&Info, &CodeGen, &NoPrivAction](CodeGenFunction &CGF,
- PrePostActionTy &) {
- if (!Info.CaptureDeviceAddrMap.empty()) {
- CodeGen.setAction(NoPrivAction);
- CodeGen(CGF);
+ auto DeviceAddrCB = [&](unsigned int I, llvm::Value *NewDecl) {
+ if (const ValueDecl *DevVD = CombinedInfo.DevicePtrDecls[I]) {
+ Info.CaptureDeviceAddrMap.try_emplace(DevVD, NewDecl);
}
};
- // We don't have to do anything to close the region if the if clause evaluates
- // to false.
- auto &&EndElseGen = [](CodeGenFunction &CGF, PrePostActionTy &) {};
+ auto CustomMapperCB = [&](unsigned int I) {
+ llvm::Value *MFunc = nullptr;
+ if (CombinedInfo.Mappers[I]) {
+ Info.HasMapper = true;
+ MFunc = CGF.CGM.getOpenMPRuntime().getOrCreateUserDefinedMapperFunc(
+ cast<OMPDeclareMapperDecl>(CombinedInfo.Mappers[I]));
+ }
+ return MFunc;
+ };
- if (IfCond) {
- emitIfClause(CGF, IfCond, BeginThenGen, BeginElseGen);
- } else {
- RegionCodeGenTy RCG(BeginThenGen);
- RCG(CGF);
- }
+ // Source location for the ident struct
+ llvm::Value *RTLoc = emitUpdateLocation(CGF, D.getBeginLoc());
- // If we don't require privatization of device pointers, we emit the body in
- // between the runtime calls. This avoids duplicating the body code.
- if (Info.CaptureDeviceAddrMap.empty()) {
- CodeGen.setAction(NoPrivAction);
- CodeGen(CGF);
- }
-
- if (IfCond) {
- emitIfClause(CGF, IfCond, EndThenGen, EndElseGen);
- } else {
- RegionCodeGenTy RCG(EndThenGen);
- RCG(CGF);
- }
+ InsertPointTy AllocaIP(CGF.AllocaInsertPt->getParent(),
+ CGF.AllocaInsertPt->getIterator());
+ InsertPointTy CodeGenIP(CGF.Builder.GetInsertBlock(),
+ CGF.Builder.GetInsertPoint());
+ llvm::OpenMPIRBuilder::LocationDescription OmpLoc(CodeGenIP);
+ CGF.Builder.restoreIP(OMPBuilder.createTargetData(
+ OmpLoc, AllocaIP, CodeGenIP, DeviceID, IfCondVal, Info, GenMapInfoCB,
+ /*MapperFunc=*/nullptr, BodyCB, DeviceAddrCB, CustomMapperCB, RTLoc));
}
void CGOpenMPRuntime::emitTargetDataStandAloneCall(
@@ -11341,6 +10435,7 @@ void CGOpenMPRuntime::emitTargetDataStandAloneCall(
case OMPD_target_parallel_for:
case OMPD_target_parallel_for_simd:
case OMPD_requires:
+ case OMPD_metadirective:
case OMPD_unknown:
default:
llvm_unreachable("Unexpected standalone target data directive.");
@@ -11359,28 +10454,30 @@ void CGOpenMPRuntime::emitTargetDataStandAloneCall(
// Get map clause information.
MappableExprsHandler MEHandler(D, CGF);
- MEHandler.generateAllInfo(CombinedInfo);
+ MEHandler.generateAllInfo(CombinedInfo, OMPBuilder);
- TargetDataInfo Info;
+ CGOpenMPRuntime::TargetDataInfo Info;
// Fill up the arrays and create the arguments.
emitOffloadingArrays(CGF, CombinedInfo, Info, OMPBuilder,
/*IsNonContiguous=*/true);
bool RequiresOuterTask = D.hasClausesOfKind<OMPDependClause>() ||
D.hasClausesOfKind<OMPNowaitClause>();
- emitOffloadingArraysArgument(
- CGF, Info.BasePointersArray, Info.PointersArray, Info.SizesArray,
- Info.MapTypesArray, Info.MapNamesArray, Info.MappersArray, Info,
- {/*ForEndTask=*/false});
+ bool EmitDebug = CGF.CGM.getCodeGenOpts().getDebugInfo() !=
+ llvm::codegenoptions::NoDebugInfo;
+ OMPBuilder.emitOffloadingArraysArgument(CGF.Builder, Info.RTArgs, Info,
+ EmitDebug,
+ /*ForEndCall=*/false);
InputInfo.NumberOfTargetItems = Info.NumberOfPtrs;
- InputInfo.BasePointersArray =
- Address(Info.BasePointersArray, CGM.getPointerAlign());
- InputInfo.PointersArray =
- Address(Info.PointersArray, CGM.getPointerAlign());
+ InputInfo.BasePointersArray = Address(Info.RTArgs.BasePointersArray,
+ CGF.VoidPtrTy, CGM.getPointerAlign());
+ InputInfo.PointersArray = Address(Info.RTArgs.PointersArray, CGF.VoidPtrTy,
+ CGM.getPointerAlign());
InputInfo.SizesArray =
- Address(Info.SizesArray, CGM.getPointerAlign());
- InputInfo.MappersArray = Address(Info.MappersArray, CGM.getPointerAlign());
- MapTypesArray = Info.MapTypesArray;
- MapNamesArray = Info.MapNamesArray;
+ Address(Info.RTArgs.SizesArray, CGF.Int64Ty, CGM.getPointerAlign());
+ InputInfo.MappersArray =
+ Address(Info.RTArgs.MappersArray, CGF.VoidPtrTy, CGM.getPointerAlign());
+ MapTypesArray = Info.RTArgs.MapTypesArray;
+ MapNamesArray = Info.RTArgs.MapNamesArray;
if (RequiresOuterTask)
CGF.EmitOMPTargetTaskBasedDirective(D, ThenGen, InputInfo);
else
@@ -11398,13 +10495,21 @@ void CGOpenMPRuntime::emitTargetDataStandAloneCall(
namespace {
/// Kind of parameter in a function with 'declare simd' directive.
- enum ParamKindTy { LinearWithVarStride, Linear, Uniform, Vector };
- /// Attribute set of the parameter.
- struct ParamAttrTy {
- ParamKindTy Kind = Vector;
- llvm::APSInt StrideOrArg;
- llvm::APSInt Alignment;
- };
+enum ParamKindTy {
+ Linear,
+ LinearRef,
+ LinearUVal,
+ LinearVal,
+ Uniform,
+ Vector,
+};
+/// Attribute set of the parameter.
+struct ParamAttrTy {
+ ParamKindTy Kind = Vector;
+ llvm::APSInt StrideOrArg;
+ llvm::APSInt Alignment;
+ bool HasVarStride = false;
+};
} // namespace
static unsigned evaluateCDTSize(const FunctionDecl *FD,
@@ -11459,6 +10564,52 @@ static unsigned evaluateCDTSize(const FunctionDecl *FD,
return C.getTypeSize(CDT);
}
+/// Mangle the parameter part of the vector function name according to
+/// their OpenMP classification. The mangling function is defined in
+/// section 4.5 of the AAVFABI(2021Q1).
+static std::string mangleVectorParameters(ArrayRef<ParamAttrTy> ParamAttrs) {
+ SmallString<256> Buffer;
+ llvm::raw_svector_ostream Out(Buffer);
+ for (const auto &ParamAttr : ParamAttrs) {
+ switch (ParamAttr.Kind) {
+ case Linear:
+ Out << 'l';
+ break;
+ case LinearRef:
+ Out << 'R';
+ break;
+ case LinearUVal:
+ Out << 'U';
+ break;
+ case LinearVal:
+ Out << 'L';
+ break;
+ case Uniform:
+ Out << 'u';
+ break;
+ case Vector:
+ Out << 'v';
+ break;
+ }
+ if (ParamAttr.HasVarStride)
+ Out << "s" << ParamAttr.StrideOrArg;
+ else if (ParamAttr.Kind == Linear || ParamAttr.Kind == LinearRef ||
+ ParamAttr.Kind == LinearUVal || ParamAttr.Kind == LinearVal) {
+ // Don't print the step value if it is not present or if it is
+ // equal to 1.
+ if (ParamAttr.StrideOrArg < 0)
+ Out << 'n' << -ParamAttr.StrideOrArg;
+ else if (ParamAttr.StrideOrArg != 1)
+ Out << ParamAttr.StrideOrArg;
+ }
+
+ if (!!ParamAttr.Alignment)
+ Out << 'a' << ParamAttr.Alignment;
+ }
+
+ return std::string(Out.str());
+}
+
static void
emitX86DeclareSimdFunction(const FunctionDecl *FD, llvm::Function *Fn,
const llvm::APSInt &VLENVal,
@@ -11507,26 +10658,7 @@ emitX86DeclareSimdFunction(const FunctionDecl *FD, llvm::Function *Fn,
} else {
Out << VLENVal;
}
- for (const ParamAttrTy &ParamAttr : ParamAttrs) {
- switch (ParamAttr.Kind){
- case LinearWithVarStride:
- Out << 's' << ParamAttr.StrideOrArg;
- break;
- case Linear:
- Out << 'l';
- if (ParamAttr.StrideOrArg != 1)
- Out << ParamAttr.StrideOrArg;
- break;
- case Uniform:
- Out << 'u';
- break;
- case Vector:
- Out << 'v';
- break;
- }
- if (!!ParamAttr.Alignment)
- Out << 'a' << ParamAttr.Alignment;
- }
+ Out << mangleVectorParameters(ParamAttrs);
Out << '_' << Fn->getName();
Fn->addFnAttr(Out.str());
}
@@ -11539,11 +10671,7 @@ emitX86DeclareSimdFunction(const FunctionDecl *FD, llvm::Function *Fn,
// available at
// https://developer.arm.com/products/software-development-tools/hpc/arm-compiler-for-hpc/vector-function-abi.
-/// Maps To Vector (MTV), as defined in 3.1.1 of the AAVFABI.
-///
-/// TODO: Need to implement the behavior for reference marked with a
-/// var or no linear modifiers (1.b in the section). For this, we
-/// need to extend ParamKindTy to support the linear modifiers.
+/// Maps To Vector (MTV), as defined in 4.1.1 of the AAVFABI (2021Q1).
static bool getAArch64MTV(QualType QT, ParamKindTy Kind) {
QT = QT.getCanonicalType();
@@ -11553,12 +10681,11 @@ static bool getAArch64MTV(QualType QT, ParamKindTy Kind) {
if (Kind == ParamKindTy::Uniform)
return false;
- if (Kind == ParamKindTy::Linear)
+ if (Kind == ParamKindTy::LinearUVal || Kind == ParamKindTy::LinearRef)
return false;
- // TODO: Handle linear references with modifiers
-
- if (Kind == ParamKindTy::LinearWithVarStride)
+ if ((Kind == ParamKindTy::Linear || Kind == ParamKindTy::LinearVal) &&
+ !QT->isReferenceType())
return false;
return true;
@@ -11627,11 +10754,11 @@ getNDSWDS(const FunctionDecl *FD, ArrayRef<ParamAttrTy> ParamAttrs) {
assert(!Sizes.empty() && "Unable to determine NDS and WDS.");
// The LS of a function parameter / return value can only be a power
// of 2, starting from 8 bits, up to 128.
- assert(std::all_of(Sizes.begin(), Sizes.end(),
- [](unsigned Size) {
- return Size == 8 || Size == 16 || Size == 32 ||
- Size == 64 || Size == 128;
- }) &&
+ assert(llvm::all_of(Sizes,
+ [](unsigned Size) {
+ return Size == 8 || Size == 16 || Size == 32 ||
+ Size == 64 || Size == 128;
+ }) &&
"Invalid size");
return std::make_tuple(*std::min_element(std::begin(Sizes), std::end(Sizes)),
@@ -11639,39 +10766,6 @@ getNDSWDS(const FunctionDecl *FD, ArrayRef<ParamAttrTy> ParamAttrs) {
OutputBecomesInput);
}
-/// Mangle the parameter part of the vector function name according to
-/// their OpenMP classification. The mangling function is defined in
-/// section 3.5 of the AAVFABI.
-static std::string mangleVectorParameters(ArrayRef<ParamAttrTy> ParamAttrs) {
- SmallString<256> Buffer;
- llvm::raw_svector_ostream Out(Buffer);
- for (const auto &ParamAttr : ParamAttrs) {
- switch (ParamAttr.Kind) {
- case LinearWithVarStride:
- Out << "ls" << ParamAttr.StrideOrArg;
- break;
- case Linear:
- Out << 'l';
- // Don't print the step value if it is not present or if it is
- // equal to 1.
- if (ParamAttr.StrideOrArg != 1)
- Out << ParamAttr.StrideOrArg;
- break;
- case Uniform:
- Out << 'u';
- break;
- case Vector:
- Out << 'v';
- break;
- }
-
- if (!!ParamAttr.Alignment)
- Out << 'a' << ParamAttr.Alignment;
- }
-
- return std::string(Out.str());
-}
-
// Function used to add the attribute. The parameter `VLEN` is
// templated to allow the use of "x" when targeting scalable functions
// for SVE.
@@ -11838,16 +10932,16 @@ void CGOpenMPRuntime::emitDeclareSimdFunction(const FunctionDecl *FD,
llvm::Function *Fn) {
ASTContext &C = CGM.getContext();
FD = FD->getMostRecentDecl();
- // Map params to their positions in function decl.
- llvm::DenseMap<const Decl *, unsigned> ParamPositions;
- if (isa<CXXMethodDecl>(FD))
- ParamPositions.try_emplace(FD, 0);
- unsigned ParamPos = ParamPositions.size();
- for (const ParmVarDecl *P : FD->parameters()) {
- ParamPositions.try_emplace(P->getCanonicalDecl(), ParamPos);
- ++ParamPos;
- }
while (FD) {
+ // Map params to their positions in function decl.
+ llvm::DenseMap<const Decl *, unsigned> ParamPositions;
+ if (isa<CXXMethodDecl>(FD))
+ ParamPositions.try_emplace(FD, 0);
+ unsigned ParamPos = ParamPositions.size();
+ for (const ParmVarDecl *P : FD->parameters()) {
+ ParamPositions.try_emplace(P->getCanonicalDecl(), ParamPos);
+ ++ParamPos;
+ }
for (const auto *Attr : FD->specific_attrs<OMPDeclareSimdDeclAttr>()) {
llvm::SmallVector<ParamAttrTy, 8> ParamAttrs(ParamPositions.size());
// Mark uniform parameters.
@@ -11859,12 +10953,14 @@ void CGOpenMPRuntime::emitDeclareSimdFunction(const FunctionDecl *FD,
} else {
const auto *PVD = cast<ParmVarDecl>(cast<DeclRefExpr>(E)->getDecl())
->getCanonicalDecl();
- Pos = ParamPositions[PVD];
+ auto It = ParamPositions.find(PVD);
+ assert(It != ParamPositions.end() && "Function parameter not found");
+ Pos = It->second;
}
ParamAttrs[Pos].Kind = Uniform;
}
// Get alignment info.
- auto NI = Attr->alignments_begin();
+ auto *NI = Attr->alignments_begin();
for (const Expr *E : Attr->aligneds()) {
E = E->IgnoreParenImpCasts();
unsigned Pos;
@@ -11875,7 +10971,9 @@ void CGOpenMPRuntime::emitDeclareSimdFunction(const FunctionDecl *FD,
} else {
const auto *PVD = cast<ParmVarDecl>(cast<DeclRefExpr>(E)->getDecl())
->getCanonicalDecl();
- Pos = ParamPositions[PVD];
+ auto It = ParamPositions.find(PVD);
+ assert(It != ParamPositions.end() && "Function parameter not found");
+ Pos = It->second;
ParmTy = PVD->getType();
}
ParamAttrs[Pos].Alignment =
@@ -11887,27 +10985,48 @@ void CGOpenMPRuntime::emitDeclareSimdFunction(const FunctionDecl *FD,
++NI;
}
// Mark linear parameters.
- auto SI = Attr->steps_begin();
- auto MI = Attr->modifiers_begin();
+ auto *SI = Attr->steps_begin();
+ auto *MI = Attr->modifiers_begin();
for (const Expr *E : Attr->linears()) {
E = E->IgnoreParenImpCasts();
unsigned Pos;
+ bool IsReferenceType = false;
// Rescaling factor needed to compute the linear parameter
// value in the mangled name.
unsigned PtrRescalingFactor = 1;
if (isa<CXXThisExpr>(E)) {
Pos = ParamPositions[FD];
+ auto *P = cast<PointerType>(E->getType());
+ PtrRescalingFactor = CGM.getContext()
+ .getTypeSizeInChars(P->getPointeeType())
+ .getQuantity();
} else {
const auto *PVD = cast<ParmVarDecl>(cast<DeclRefExpr>(E)->getDecl())
->getCanonicalDecl();
- Pos = ParamPositions[PVD];
+ auto It = ParamPositions.find(PVD);
+ assert(It != ParamPositions.end() && "Function parameter not found");
+ Pos = It->second;
if (auto *P = dyn_cast<PointerType>(PVD->getType()))
PtrRescalingFactor = CGM.getContext()
.getTypeSizeInChars(P->getPointeeType())
.getQuantity();
+ else if (PVD->getType()->isReferenceType()) {
+ IsReferenceType = true;
+ PtrRescalingFactor =
+ CGM.getContext()
+ .getTypeSizeInChars(PVD->getType().getNonReferenceType())
+ .getQuantity();
+ }
}
ParamAttrTy &ParamAttr = ParamAttrs[Pos];
- ParamAttr.Kind = Linear;
+ if (*MI == OMPC_LINEAR_ref)
+ ParamAttr.Kind = LinearRef;
+ else if (*MI == OMPC_LINEAR_uval)
+ ParamAttr.Kind = LinearUVal;
+ else if (IsReferenceType)
+ ParamAttr.Kind = LinearVal;
+ else
+ ParamAttr.Kind = Linear;
// Assuming a stride of 1, for `linear` without modifiers.
ParamAttr.StrideOrArg = llvm::APSInt::getUnsigned(1);
if (*SI) {
@@ -11915,10 +11034,13 @@ void CGOpenMPRuntime::emitDeclareSimdFunction(const FunctionDecl *FD,
if (!(*SI)->EvaluateAsInt(Result, C, Expr::SE_AllowSideEffects)) {
if (const auto *DRE =
cast<DeclRefExpr>((*SI)->IgnoreParenImpCasts())) {
- if (const auto *StridePVD = cast<ParmVarDecl>(DRE->getDecl())) {
- ParamAttr.Kind = LinearWithVarStride;
- ParamAttr.StrideOrArg = llvm::APSInt::getUnsigned(
- ParamPositions[StridePVD->getCanonicalDecl()]);
+ if (const auto *StridePVD =
+ dyn_cast<ParmVarDecl>(DRE->getDecl())) {
+ ParamAttr.HasVarStride = true;
+ auto It = ParamPositions.find(StridePVD->getCanonicalDecl());
+ assert(It != ParamPositions.end() &&
+ "Function parameter not found");
+ ParamAttr.StrideOrArg = llvm::APSInt::getUnsigned(It->second);
}
}
} else {
@@ -11928,7 +11050,8 @@ void CGOpenMPRuntime::emitDeclareSimdFunction(const FunctionDecl *FD,
// If we are using a linear clause on a pointer, we need to
// rescale the value of linear_step with the byte size of the
// pointee type.
- if (Linear == ParamAttr.Kind)
+ if (!ParamAttr.HasVarStride &&
+ (ParamAttr.Kind == Linear || ParamAttr.Kind == LinearRef))
ParamAttr.StrideOrArg = ParamAttr.StrideOrArg * PtrRescalingFactor;
++SI;
++MI;
@@ -11949,7 +11072,7 @@ void CGOpenMPRuntime::emitDeclareSimdFunction(const FunctionDecl *FD,
if (CGM.getTarget().hasFeature("sve"))
emitAArch64DeclareSimdFunction(CGM, FD, VLEN, ParamAttrs, State,
MangledName, 's', 128, Fn, ExprLoc);
- if (CGM.getTarget().hasFeature("neon"))
+ else if (CGM.getTarget().hasFeature("neon"))
emitAArch64DeclareSimdFunction(CGM, FD, VLEN, ParamAttrs, State,
MangledName, 'n', 128, Fn, ExprLoc);
}
@@ -12009,8 +11132,8 @@ void CGOpenMPRuntime::emitDoacrossInit(CodeGenFunction &CGF,
RD = cast<RecordDecl>(KmpDimTy->getAsTagDecl());
}
llvm::APInt Size(/*numBits=*/32, NumIterations.size());
- QualType ArrayTy =
- C.getConstantArrayType(KmpDimTy, Size, nullptr, ArrayType::Normal, 0);
+ QualType ArrayTy = C.getConstantArrayType(KmpDimTy, Size, nullptr,
+ ArraySizeModifier::Normal, 0);
Address DimsAddr = CGF.CreateMemTemp(ArrayTy, "dims");
CGF.EmitNullInitialization(DimsAddr, ArrayTy);
@@ -12051,16 +11174,18 @@ void CGOpenMPRuntime::emitDoacrossInit(CodeGenFunction &CGF,
llvm::FunctionCallee FiniRTLFn = OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_doacross_fini);
CGF.EHStack.pushCleanup<DoacrossCleanupTy>(NormalAndEHCleanup, FiniRTLFn,
- llvm::makeArrayRef(FiniArgs));
+ llvm::ArrayRef(FiniArgs));
}
-void CGOpenMPRuntime::emitDoacrossOrdered(CodeGenFunction &CGF,
- const OMPDependClause *C) {
+template <typename T>
+static void EmitDoacrossOrdered(CodeGenFunction &CGF, CodeGenModule &CGM,
+ const T *C, llvm::Value *ULoc,
+ llvm::Value *ThreadID) {
QualType Int64Ty =
CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1);
llvm::APInt Size(/*numBits=*/32, C->getNumLoops());
QualType ArrayTy = CGM.getContext().getConstantArrayType(
- Int64Ty, Size, nullptr, ArrayType::Normal, 0);
+ Int64Ty, Size, nullptr, ArraySizeModifier::Normal, 0);
Address CntAddr = CGF.CreateMemTemp(ArrayTy, ".cnt.addr");
for (unsigned I = 0, E = C->getNumLoops(); I < E; ++I) {
const Expr *CounterVal = C->getLoopData(I);
@@ -12072,21 +11197,35 @@ void CGOpenMPRuntime::emitDoacrossOrdered(CodeGenFunction &CGF,
/*Volatile=*/false, Int64Ty);
}
llvm::Value *Args[] = {
- emitUpdateLocation(CGF, C->getBeginLoc()),
- getThreadID(CGF, C->getBeginLoc()),
- CGF.Builder.CreateConstArrayGEP(CntAddr, 0).getPointer()};
+ ULoc, ThreadID, CGF.Builder.CreateConstArrayGEP(CntAddr, 0).getPointer()};
llvm::FunctionCallee RTLFn;
- if (C->getDependencyKind() == OMPC_DEPEND_source) {
+ llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
+ OMPDoacrossKind<T> ODK;
+ if (ODK.isSource(C)) {
RTLFn = OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
OMPRTL___kmpc_doacross_post);
} else {
- assert(C->getDependencyKind() == OMPC_DEPEND_sink);
+ assert(ODK.isSink(C) && "Expect sink modifier.");
RTLFn = OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
OMPRTL___kmpc_doacross_wait);
}
CGF.EmitRuntimeCall(RTLFn, Args);
}
+void CGOpenMPRuntime::emitDoacrossOrdered(CodeGenFunction &CGF,
+ const OMPDependClause *C) {
+ return EmitDoacrossOrdered<OMPDependClause>(
+ CGF, CGM, C, emitUpdateLocation(CGF, C->getBeginLoc()),
+ getThreadID(CGF, C->getBeginLoc()));
+}
+
+void CGOpenMPRuntime::emitDoacrossOrdered(CodeGenFunction &CGF,
+ const OMPDoacrossClause *C) {
+ return EmitDoacrossOrdered<OMPDoacrossClause>(
+ CGF, CGM, C, emitUpdateLocation(CGF, C->getBeginLoc()),
+ getThreadID(CGF, C->getBeginLoc()));
+}
+
void CGOpenMPRuntime::emitCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::FunctionCallee Callee,
ArrayRef<llvm::Value *> Args) const {
@@ -12120,6 +11259,36 @@ Address CGOpenMPRuntime::getParameterAddress(CodeGenFunction &CGF,
return CGF.GetAddrOfLocalVar(NativeParam);
}
+/// Return allocator value from expression, or return a null allocator (default
+/// when no allocator specified).
+static llvm::Value *getAllocatorVal(CodeGenFunction &CGF,
+ const Expr *Allocator) {
+ llvm::Value *AllocVal;
+ if (Allocator) {
+ AllocVal = CGF.EmitScalarExpr(Allocator);
+ // According to the standard, the original allocator type is a enum
+ // (integer). Convert to pointer type, if required.
+ AllocVal = CGF.EmitScalarConversion(AllocVal, Allocator->getType(),
+ CGF.getContext().VoidPtrTy,
+ Allocator->getExprLoc());
+ } else {
+ // If no allocator specified, it defaults to the null allocator.
+ AllocVal = llvm::Constant::getNullValue(
+ CGF.CGM.getTypes().ConvertType(CGF.getContext().VoidPtrTy));
+ }
+ return AllocVal;
+}
+
+/// Return the alignment from an allocate directive if present.
+static llvm::Value *getAlignmentValue(CodeGenModule &CGM, const VarDecl *VD) {
+ std::optional<CharUnits> AllocateAlignment = CGM.getOMPAllocateAlignment(VD);
+
+ if (!AllocateAlignment)
+ return nullptr;
+
+ return llvm::ConstantInt::get(CGM.SizeTy, AllocateAlignment->getQuantity());
+}
+
Address CGOpenMPRuntime::getAddressOfLocalVariable(CodeGenFunction &CGF,
const VarDecl *VD) {
if (!VD)
@@ -12156,20 +11325,20 @@ Address CGOpenMPRuntime::getAddressOfLocalVariable(CodeGenFunction &CGF,
}
llvm::Value *ThreadID = getThreadID(CGF, CVD->getBeginLoc());
const auto *AA = CVD->getAttr<OMPAllocateDeclAttr>();
- assert(AA->getAllocator() &&
- "Expected allocator expression for non-default allocator.");
- llvm::Value *Allocator = CGF.EmitScalarExpr(AA->getAllocator());
- // According to the standard, the original allocator type is a enum
- // (integer). Convert to pointer type, if required.
- Allocator = CGF.EmitScalarConversion(
- Allocator, AA->getAllocator()->getType(), CGF.getContext().VoidPtrTy,
- AA->getAllocator()->getExprLoc());
- llvm::Value *Args[] = {ThreadID, Size, Allocator};
-
- llvm::Value *Addr =
- CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_alloc),
- Args, getName({CVD->getName(), ".void.addr"}));
+ const Expr *Allocator = AA->getAllocator();
+ llvm::Value *AllocVal = getAllocatorVal(CGF, Allocator);
+ llvm::Value *Alignment = getAlignmentValue(CGM, CVD);
+ SmallVector<llvm::Value *, 4> Args;
+ Args.push_back(ThreadID);
+ if (Alignment)
+ Args.push_back(Alignment);
+ Args.push_back(Size);
+ Args.push_back(AllocVal);
+ llvm::omp::RuntimeFunction FnID =
+ Alignment ? OMPRTL___kmpc_aligned_alloc : OMPRTL___kmpc_alloc;
+ llvm::Value *Addr = CGF.EmitRuntimeCall(
+ OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(), FnID), Args,
+ getName({CVD->getName(), ".void.addr"}));
llvm::FunctionCallee FiniRTLFn = OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_free);
QualType Ty = CGM.getContext().getPointerType(CVD->getType());
@@ -12183,14 +11352,14 @@ Address CGOpenMPRuntime::getAddressOfLocalVariable(CodeGenFunction &CGF,
llvm::FunctionCallee RTLFn;
SourceLocation::UIntTy LocEncoding;
Address Addr;
- const Expr *Allocator;
+ const Expr *AllocExpr;
public:
OMPAllocateCleanupTy(llvm::FunctionCallee RTLFn,
SourceLocation::UIntTy LocEncoding, Address Addr,
- const Expr *Allocator)
+ const Expr *AllocExpr)
: RTLFn(RTLFn), LocEncoding(LocEncoding), Addr(Addr),
- Allocator(Allocator) {}
+ AllocExpr(AllocExpr) {}
void Emit(CodeGenFunction &CGF, Flags /*flags*/) override {
if (!CGF.HaveInsertPoint())
return;
@@ -12199,22 +11368,18 @@ Address CGOpenMPRuntime::getAddressOfLocalVariable(CodeGenFunction &CGF,
CGF, SourceLocation::getFromRawEncoding(LocEncoding));
Args[1] = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
Addr.getPointer(), CGF.VoidPtrTy);
- llvm::Value *AllocVal = CGF.EmitScalarExpr(Allocator);
- // According to the standard, the original allocator type is a enum
- // (integer). Convert to pointer type, if required.
- AllocVal = CGF.EmitScalarConversion(AllocVal, Allocator->getType(),
- CGF.getContext().VoidPtrTy,
- Allocator->getExprLoc());
+ llvm::Value *AllocVal = getAllocatorVal(CGF, AllocExpr);
Args[2] = AllocVal;
-
CGF.EmitRuntimeCall(RTLFn, Args);
}
};
Address VDAddr =
- UntiedRealAddr.isValid() ? UntiedRealAddr : Address(Addr, Align);
+ UntiedRealAddr.isValid()
+ ? UntiedRealAddr
+ : Address(Addr, CGF.ConvertTypeForMem(CVD->getType()), Align);
CGF.EHStack.pushCleanup<OMPAllocateCleanupTy>(
NormalAndEHCleanup, FiniRTLFn, CVD->getLocation().getRawEncoding(),
- VDAddr, AA->getAllocator());
+ VDAddr, Allocator);
if (UntiedRealAddr.isValid())
if (auto *Region =
dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
@@ -12287,7 +11452,7 @@ bool CGOpenMPRuntime::isNontemporalDecl(const ValueDecl *VD) const {
return llvm::any_of(
CGM.getOpenMPRuntime().NontemporalDeclsStack,
- [VD](const NontemporalDeclsSet &Set) { return Set.count(VD) > 0; });
+ [VD](const NontemporalDeclsSet &Set) { return Set.contains(VD); });
}
void CGOpenMPRuntime::LastprivateConditionalRAII::tryToDisableInnerAnalysis(
@@ -12556,20 +11721,19 @@ void CGOpenMPRuntime::emitLastprivateConditionalUpdate(CodeGenFunction &CGF,
// Last updated loop counter for the lastprivate conditional var.
// int<xx> last_iv = 0;
llvm::Type *LLIVTy = CGF.ConvertTypeForMem(IVLVal.getType());
- llvm::Constant *LastIV =
- getOrCreateInternalVariable(LLIVTy, getName({UniqueDeclName, "iv"}));
+ llvm::Constant *LastIV = OMPBuilder.getOrCreateInternalVariable(
+ LLIVTy, getName({UniqueDeclName, "iv"}));
cast<llvm::GlobalVariable>(LastIV)->setAlignment(
IVLVal.getAlignment().getAsAlign());
LValue LastIVLVal = CGF.MakeNaturalAlignAddrLValue(LastIV, IVLVal.getType());
// Last value of the lastprivate conditional.
// decltype(priv_a) last_a;
- llvm::Constant *Last = getOrCreateInternalVariable(
+ llvm::GlobalVariable *Last = OMPBuilder.getOrCreateInternalVariable(
CGF.ConvertTypeForMem(LVal.getType()), UniqueDeclName);
- cast<llvm::GlobalVariable>(Last)->setAlignment(
- LVal.getAlignment().getAsAlign());
- LValue LastLVal =
- CGF.MakeAddrLValue(Last, LVal.getType(), LVal.getAlignment());
+ Last->setAlignment(LVal.getAlignment().getAsAlign());
+ LValue LastLVal = CGF.MakeAddrLValue(
+ Address(Last, Last->getValueType(), LVal.getAlignment()), LVal.getType());
// Global loop counter. Required to handle inner parallel-for regions.
// iv
@@ -12660,7 +11824,8 @@ void CGOpenMPRuntime::checkAndEmitLastprivateConditional(CodeGenFunction &CGF,
LValue PrivLVal = CGF.EmitLValue(FoundE);
Address StructAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
PrivLVal.getAddress(CGF),
- CGF.ConvertTypeForMem(CGF.getContext().getPointerType(StructTy)));
+ CGF.ConvertTypeForMem(CGF.getContext().getPointerType(StructTy)),
+ CGF.ConvertTypeForMem(StructTy));
LValue BaseLVal =
CGF.MakeAddrLValue(StructAddr, StructTy, AlignmentSource::Decl);
LValue FiredLVal = CGF.EmitLValueForField(BaseLVal, FiredDecl);
@@ -12696,7 +11861,7 @@ void CGOpenMPRuntime::checkAndEmitSharedLastprivateConditional(
const CapturedStmt *CS = D.getCapturedStmt(CaptureRegions.back());
for (const auto &Pair : It->DeclToUniqueName) {
const auto *VD = cast<VarDecl>(Pair.first->getCanonicalDecl());
- if (!CS->capturesVariable(VD) || IgnoredDecls.count(VD) > 0)
+ if (!CS->capturesVariable(VD) || IgnoredDecls.contains(VD))
continue;
auto I = LPCI->getSecond().find(Pair.first);
assert(I != LPCI->getSecond().end() &&
@@ -12742,20 +11907,23 @@ void CGOpenMPRuntime::emitLastprivateConditionalFinalUpdate(
if (!GV)
return;
LValue LPLVal = CGF.MakeAddrLValue(
- GV, PrivLVal.getType().getNonReferenceType(), PrivLVal.getAlignment());
+ Address(GV, GV->getValueType(), PrivLVal.getAlignment()),
+ PrivLVal.getType().getNonReferenceType());
llvm::Value *Res = CGF.EmitLoadOfScalar(LPLVal, Loc);
CGF.EmitStoreOfScalar(Res, PrivLVal);
}
llvm::Function *CGOpenMPSIMDRuntime::emitParallelOutlinedFunction(
- const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
- OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
+ CodeGenFunction &CGF, const OMPExecutableDirective &D,
+ const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind,
+ const RegionCodeGenTy &CodeGen) {
llvm_unreachable("Not supported in SIMD-only mode");
}
llvm::Function *CGOpenMPSIMDRuntime::emitTeamsOutlinedFunction(
- const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
- OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
+ CodeGenFunction &CGF, const OMPExecutableDirective &D,
+ const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind,
+ const RegionCodeGenTy &CodeGen) {
llvm_unreachable("Not supported in SIMD-only mode");
}
@@ -12771,7 +11939,8 @@ void CGOpenMPSIMDRuntime::emitParallelCall(CodeGenFunction &CGF,
SourceLocation Loc,
llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
- const Expr *IfCond) {
+ const Expr *IfCond,
+ llvm::Value *NumThreads) {
llvm_unreachable("Not supported in SIMD-only mode");
}
@@ -12958,7 +12127,8 @@ Address CGOpenMPSIMDRuntime::getTaskReductionItem(CodeGenFunction &CGF,
}
void CGOpenMPSIMDRuntime::emitTaskwaitCall(CodeGenFunction &CGF,
- SourceLocation Loc) {
+ SourceLocation Loc,
+ const OMPTaskDataTy &Data) {
llvm_unreachable("Not supported in SIMD-only mode");
}
@@ -13020,7 +12190,8 @@ void CGOpenMPSIMDRuntime::emitNumTeamsClause(CodeGenFunction &CGF,
void CGOpenMPSIMDRuntime::emitTargetDataCalls(
CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond,
- const Expr *Device, const RegionCodeGenTy &CodeGen, TargetDataInfo &Info) {
+ const Expr *Device, const RegionCodeGenTy &CodeGen,
+ CGOpenMPRuntime::TargetDataInfo &Info) {
llvm_unreachable("Not supported in SIMD-only mode");
}
@@ -13041,6 +12212,11 @@ void CGOpenMPSIMDRuntime::emitDoacrossOrdered(CodeGenFunction &CGF,
llvm_unreachable("Not supported in SIMD-only mode");
}
+void CGOpenMPSIMDRuntime::emitDoacrossOrdered(CodeGenFunction &CGF,
+ const OMPDoacrossClause *C) {
+ llvm_unreachable("Not supported in SIMD-only mode");
+}
+
const VarDecl *
CGOpenMPSIMDRuntime::translateParameter(const FieldDecl *FD,
const VarDecl *NativeParam) const {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.h b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.h
index c24648aae7e1..b01b39abd160 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.h
@@ -35,7 +35,6 @@ class ArrayType;
class Constant;
class FunctionType;
class GlobalVariable;
-class StructType;
class Type;
class Value;
class OpenMPIRBuilder;
@@ -48,7 +47,6 @@ class OMPExecutableDirective;
class OMPLoopDirective;
class VarDecl;
class OMPDeclareReductionDecl;
-class IdentifierInfo;
namespace CodeGen {
class Address;
@@ -123,6 +121,7 @@ struct OMPTaskDataTy final {
bool Nogroup = false;
bool IsReductionWithTaskMod = false;
bool IsWorksharingReduction = false;
+ bool HasNowaitClause = false;
};
/// Class intended to support codegen of all kind of the reduction clauses.
@@ -162,10 +161,10 @@ private:
/// Performs aggregate initialization.
/// \param N Number of reduction item in the common list.
/// \param PrivateAddr Address of the corresponding private item.
- /// \param SharedLVal Address of the original shared variable.
+ /// \param SharedAddr Address of the original shared variable.
/// \param DRD Declare reduction construct used for reduction item.
void emitAggregateInitialization(CodeGenFunction &CGF, unsigned N,
- Address PrivateAddr, LValue SharedLVal,
+ Address PrivateAddr, Address SharedAddr,
const OMPDeclareReductionDecl *DRD);
public:
@@ -187,10 +186,10 @@ public:
/// \param PrivateAddr Address of the corresponding private item.
/// \param DefaultInit Default initialization sequence that should be
/// performed if no reduction specific initialization is found.
- /// \param SharedLVal Address of the original shared variable.
+ /// \param SharedAddr Address of the original shared variable.
void
emitInitialization(CodeGenFunction &CGF, unsigned N, Address PrivateAddr,
- LValue SharedLVal,
+ Address SharedAddr,
llvm::function_ref<bool(CodeGenFunction &)> DefaultInit);
/// Returns true if the private copy requires cleanups.
bool needCleanups(unsigned N);
@@ -220,6 +219,11 @@ public:
/// Returns true if the initialization of the reduction item uses initializer
/// from declare reduction construct.
bool usesReductionInitializer(unsigned N) const;
+ /// Return the type of the private item.
+ QualType getPrivateType(unsigned N) const {
+ return cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl())
+ ->getType();
+ }
};
class CGOpenMPRuntime {
@@ -228,7 +232,7 @@ public:
/// as those marked as `omp declare target`.
class DisableAutoDeclareTargetRAII {
CodeGenModule &CGM;
- bool SavedShouldMarkAsGlobal;
+ bool SavedShouldMarkAsGlobal = false;
public:
DisableAutoDeclareTargetRAII(CodeGenModule &CGM);
@@ -303,20 +307,17 @@ public:
protected:
CodeGenModule &CGM;
- StringRef FirstSeparator, Separator;
/// An OpenMP-IR-Builder instance.
llvm::OpenMPIRBuilder OMPBuilder;
- /// Constructor allowing to redefine the name separator for the variables.
- explicit CGOpenMPRuntime(CodeGenModule &CGM, StringRef FirstSeparator,
- StringRef Separator);
-
- /// Creates offloading entry for the provided entry ID \a ID,
- /// address \a Addr, size \a Size, and flags \a Flags.
- virtual void createOffloadEntry(llvm::Constant *ID, llvm::Constant *Addr,
- uint64_t Size, int32_t Flags,
- llvm::GlobalValue::LinkageTypes Linkage);
+ /// Helper to determine the min/max number of threads/teams for \p D.
+ void computeMinAndMaxThreadsAndTeams(const OMPExecutableDirective &D,
+ CodeGenFunction &CGF,
+ int32_t &MinThreadsVal,
+ int32_t &MaxThreadsVal,
+ int32_t &MinTeamsVal,
+ int32_t &MaxTeamsVal);
/// Helper to emit outlined function for 'target' directive.
/// \param D Directive to emit.
@@ -334,41 +335,6 @@ protected:
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen);
- /// Emits object of ident_t type with info for source location.
- /// \param Flags Flags for OpenMP location.
- ///
- llvm::Value *emitUpdateLocation(CodeGenFunction &CGF, SourceLocation Loc,
- unsigned Flags = 0);
-
- /// Emit the number of teams for a target directive. Inspect the num_teams
- /// clause associated with a teams construct combined or closely nested
- /// with the target directive.
- ///
- /// Emit a team of size one for directives such as 'target parallel' that
- /// have no associated teams construct.
- ///
- /// Otherwise, return nullptr.
- const Expr *getNumTeamsExprForTargetDirective(CodeGenFunction &CGF,
- const OMPExecutableDirective &D,
- int32_t &DefaultVal);
- llvm::Value *emitNumTeamsForTargetDirective(CodeGenFunction &CGF,
- const OMPExecutableDirective &D);
- /// Emit the number of threads for a target directive. Inspect the
- /// thread_limit clause associated with a teams construct combined or closely
- /// nested with the target directive.
- ///
- /// Emit the num_threads clause for directives such as 'target parallel' that
- /// have no associated teams construct.
- ///
- /// Otherwise, return nullptr.
- const Expr *
- getNumThreadsExprForTargetDirective(CodeGenFunction &CGF,
- const OMPExecutableDirective &D,
- int32_t &DefaultVal);
- llvm::Value *
- emitNumThreadsForTargetDirective(CodeGenFunction &CGF,
- const OMPExecutableDirective &D);
-
/// Returns pointer to ident_t type.
llvm::Type *getIdentTyPointerTy();
@@ -377,14 +343,16 @@ protected:
llvm::Value *getThreadID(CodeGenFunction &CGF, SourceLocation Loc);
/// Get the function name of an outlined region.
- // The name can be customized depending on the target.
- //
- virtual StringRef getOutlinedHelperName() const { return ".omp_outlined."; }
+ std::string getOutlinedHelperName(StringRef Name) const;
+ std::string getOutlinedHelperName(CodeGenFunction &CGF) const;
+
+ /// Get the function name of a reduction function.
+ std::string getReductionFuncName(StringRef Name) const;
/// Emits \p Callee function call with arguments \p Args with location \p Loc.
void emitCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::FunctionCallee Callee,
- ArrayRef<llvm::Value *> Args = llvm::None) const;
+ ArrayRef<llvm::Value *> Args = std::nullopt) const;
/// Emits address of the word in a memory where current thread id is
/// stored.
@@ -416,8 +384,7 @@ protected:
///
llvm::Value *getCriticalRegionLock(StringRef CriticalName);
-private:
-
+protected:
/// Map for SourceLocation and OpenMP runtime library debug locations.
typedef llvm::DenseMap<SourceLocation, llvm::Value *> OpenMPDebugLocMapTy;
OpenMPDebugLocMapTy OpenMPDebugLocMap;
@@ -471,8 +438,8 @@ private:
/// <critical_section_name> + ".var" for "omp critical" directives; 2)
/// <mangled_name_for_global_var> + ".cache." for cache for threadprivate
/// variables.
- llvm::StringMap<llvm::AssertingVH<llvm::Constant>, llvm::BumpPtrAllocator>
- InternalVars;
+ llvm::StringMap<llvm::AssertingVH<llvm::GlobalVariable>,
+ llvm::BumpPtrAllocator> InternalVars;
/// Type typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *);
llvm::Type *KmpRoutineEntryPtrTy = nullptr;
QualType KmpRoutineEntryPtrQTy;
@@ -515,225 +482,6 @@ private:
/// kmp_int64 st; // stride
/// };
QualType KmpDimTy;
- /// Type struct __tgt_offload_entry{
- /// void *addr; // Pointer to the offload entry info.
- /// // (function or global)
- /// char *name; // Name of the function or global.
- /// size_t size; // Size of the entry info (0 if it a function).
- /// int32_t flags;
- /// int32_t reserved;
- /// };
- QualType TgtOffloadEntryQTy;
- /// Entity that registers the offloading constants that were emitted so
- /// far.
- class OffloadEntriesInfoManagerTy {
- CodeGenModule &CGM;
-
- /// Number of entries registered so far.
- unsigned OffloadingEntriesNum = 0;
-
- public:
- /// Base class of the entries info.
- class OffloadEntryInfo {
- public:
- /// Kind of a given entry.
- enum OffloadingEntryInfoKinds : unsigned {
- /// Entry is a target region.
- OffloadingEntryInfoTargetRegion = 0,
- /// Entry is a declare target variable.
- OffloadingEntryInfoDeviceGlobalVar = 1,
- /// Invalid entry info.
- OffloadingEntryInfoInvalid = ~0u
- };
-
- protected:
- OffloadEntryInfo() = delete;
- explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind) : Kind(Kind) {}
- explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind, unsigned Order,
- uint32_t Flags)
- : Flags(Flags), Order(Order), Kind(Kind) {}
- ~OffloadEntryInfo() = default;
-
- public:
- bool isValid() const { return Order != ~0u; }
- unsigned getOrder() const { return Order; }
- OffloadingEntryInfoKinds getKind() const { return Kind; }
- uint32_t getFlags() const { return Flags; }
- void setFlags(uint32_t NewFlags) { Flags = NewFlags; }
- llvm::Constant *getAddress() const {
- return cast_or_null<llvm::Constant>(Addr);
- }
- void setAddress(llvm::Constant *V) {
- assert(!Addr.pointsToAliveValue() && "Address has been set before!");
- Addr = V;
- }
- static bool classof(const OffloadEntryInfo *Info) { return true; }
-
- private:
- /// Address of the entity that has to be mapped for offloading.
- llvm::WeakTrackingVH Addr;
-
- /// Flags associated with the device global.
- uint32_t Flags = 0u;
-
- /// Order this entry was emitted.
- unsigned Order = ~0u;
-
- OffloadingEntryInfoKinds Kind = OffloadingEntryInfoInvalid;
- };
-
- /// Return true if a there are no entries defined.
- bool empty() const;
- /// Return number of entries defined so far.
- unsigned size() const { return OffloadingEntriesNum; }
- OffloadEntriesInfoManagerTy(CodeGenModule &CGM) : CGM(CGM) {}
-
- //
- // Target region entries related.
- //
-
- /// Kind of the target registry entry.
- enum OMPTargetRegionEntryKind : uint32_t {
- /// Mark the entry as target region.
- OMPTargetRegionEntryTargetRegion = 0x0,
- /// Mark the entry as a global constructor.
- OMPTargetRegionEntryCtor = 0x02,
- /// Mark the entry as a global destructor.
- OMPTargetRegionEntryDtor = 0x04,
- };
-
- /// Target region entries info.
- class OffloadEntryInfoTargetRegion final : public OffloadEntryInfo {
- /// Address that can be used as the ID of the entry.
- llvm::Constant *ID = nullptr;
-
- public:
- OffloadEntryInfoTargetRegion()
- : OffloadEntryInfo(OffloadingEntryInfoTargetRegion) {}
- explicit OffloadEntryInfoTargetRegion(unsigned Order,
- llvm::Constant *Addr,
- llvm::Constant *ID,
- OMPTargetRegionEntryKind Flags)
- : OffloadEntryInfo(OffloadingEntryInfoTargetRegion, Order, Flags),
- ID(ID) {
- setAddress(Addr);
- }
-
- llvm::Constant *getID() const { return ID; }
- void setID(llvm::Constant *V) {
- assert(!ID && "ID has been set before!");
- ID = V;
- }
- static bool classof(const OffloadEntryInfo *Info) {
- return Info->getKind() == OffloadingEntryInfoTargetRegion;
- }
- };
-
- /// Initialize target region entry.
- void initializeTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
- StringRef ParentName, unsigned LineNum,
- unsigned Order);
- /// Register target region entry.
- void registerTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
- StringRef ParentName, unsigned LineNum,
- llvm::Constant *Addr, llvm::Constant *ID,
- OMPTargetRegionEntryKind Flags);
- /// Return true if a target region entry with the provided information
- /// exists.
- bool hasTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
- StringRef ParentName, unsigned LineNum,
- bool IgnoreAddressId = false) const;
- /// brief Applies action \a Action on all registered entries.
- typedef llvm::function_ref<void(unsigned, unsigned, StringRef, unsigned,
- const OffloadEntryInfoTargetRegion &)>
- OffloadTargetRegionEntryInfoActTy;
- void actOnTargetRegionEntriesInfo(
- const OffloadTargetRegionEntryInfoActTy &Action);
-
- //
- // Device global variable entries related.
- //
-
- /// Kind of the global variable entry..
- enum OMPTargetGlobalVarEntryKind : uint32_t {
- /// Mark the entry as a to declare target.
- OMPTargetGlobalVarEntryTo = 0x0,
- /// Mark the entry as a to declare target link.
- OMPTargetGlobalVarEntryLink = 0x1,
- };
-
- /// Device global variable entries info.
- class OffloadEntryInfoDeviceGlobalVar final : public OffloadEntryInfo {
- /// Type of the global variable.
- CharUnits VarSize;
- llvm::GlobalValue::LinkageTypes Linkage;
-
- public:
- OffloadEntryInfoDeviceGlobalVar()
- : OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar) {}
- explicit OffloadEntryInfoDeviceGlobalVar(unsigned Order,
- OMPTargetGlobalVarEntryKind Flags)
- : OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags) {}
- explicit OffloadEntryInfoDeviceGlobalVar(
- unsigned Order, llvm::Constant *Addr, CharUnits VarSize,
- OMPTargetGlobalVarEntryKind Flags,
- llvm::GlobalValue::LinkageTypes Linkage)
- : OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags),
- VarSize(VarSize), Linkage(Linkage) {
- setAddress(Addr);
- }
-
- CharUnits getVarSize() const { return VarSize; }
- void setVarSize(CharUnits Size) { VarSize = Size; }
- llvm::GlobalValue::LinkageTypes getLinkage() const { return Linkage; }
- void setLinkage(llvm::GlobalValue::LinkageTypes LT) { Linkage = LT; }
- static bool classof(const OffloadEntryInfo *Info) {
- return Info->getKind() == OffloadingEntryInfoDeviceGlobalVar;
- }
- };
-
- /// Initialize device global variable entry.
- void initializeDeviceGlobalVarEntryInfo(StringRef Name,
- OMPTargetGlobalVarEntryKind Flags,
- unsigned Order);
-
- /// Register device global variable entry.
- void
- registerDeviceGlobalVarEntryInfo(StringRef VarName, llvm::Constant *Addr,
- CharUnits VarSize,
- OMPTargetGlobalVarEntryKind Flags,
- llvm::GlobalValue::LinkageTypes Linkage);
- /// Checks if the variable with the given name has been registered already.
- bool hasDeviceGlobalVarEntryInfo(StringRef VarName) const {
- return OffloadEntriesDeviceGlobalVar.count(VarName) > 0;
- }
- /// Applies action \a Action on all registered entries.
- typedef llvm::function_ref<void(StringRef,
- const OffloadEntryInfoDeviceGlobalVar &)>
- OffloadDeviceGlobalVarEntryInfoActTy;
- void actOnDeviceGlobalVarEntriesInfo(
- const OffloadDeviceGlobalVarEntryInfoActTy &Action);
-
- private:
- // Storage for target region entries kind. The storage is to be indexed by
- // file ID, device ID, parent function name and line number.
- typedef llvm::DenseMap<unsigned, OffloadEntryInfoTargetRegion>
- OffloadEntriesTargetRegionPerLine;
- typedef llvm::StringMap<OffloadEntriesTargetRegionPerLine>
- OffloadEntriesTargetRegionPerParentName;
- typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerParentName>
- OffloadEntriesTargetRegionPerFile;
- typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerFile>
- OffloadEntriesTargetRegionPerDevice;
- typedef OffloadEntriesTargetRegionPerDevice OffloadEntriesTargetRegionTy;
- OffloadEntriesTargetRegionTy OffloadEntriesTargetRegion;
- /// Storage for device global variable entries kind. The storage is to be
- /// indexed by mangled name.
- typedef llvm::StringMap<OffloadEntryInfoDeviceGlobalVar>
- OffloadEntriesDeviceGlobalVarTy;
- OffloadEntriesDeviceGlobalVarTy OffloadEntriesDeviceGlobalVar;
- };
- OffloadEntriesInfoManagerTy OffloadEntriesInfoManager;
bool ShouldMarkAsGlobal = true;
/// List of the emitted declarations.
@@ -775,14 +523,7 @@ private:
/// Device routines are specific to the
bool HasEmittedDeclareTargetRegion = false;
- /// Loads all the offload entries information from the host IR
- /// metadata.
- void loadOffloadInfoMetadata();
-
- /// Returns __tgt_offload_entry type.
- QualType getTgtOffloadEntryQTy();
-
- /// Start scanning from statement \a S and and emit all target regions
+ /// Start scanning from statement \a S and emit all target regions
/// found along the way.
/// \param S Starting statement.
/// \param ParentName Name of the function declaration that is being scanned.
@@ -794,26 +535,6 @@ private:
/// Returns pointer to kmpc_micro type.
llvm::Type *getKmpc_MicroPointerTy();
- /// Returns __kmpc_for_static_init_* runtime function for the specified
- /// size \a IVSize and sign \a IVSigned.
- llvm::FunctionCallee createForStaticInitFunction(unsigned IVSize,
- bool IVSigned);
-
- /// Returns __kmpc_dispatch_init_* runtime function for the specified
- /// size \a IVSize and sign \a IVSigned.
- llvm::FunctionCallee createDispatchInitFunction(unsigned IVSize,
- bool IVSigned);
-
- /// Returns __kmpc_dispatch_next_* runtime function for the specified
- /// size \a IVSize and sign \a IVSigned.
- llvm::FunctionCallee createDispatchNextFunction(unsigned IVSize,
- bool IVSigned);
-
- /// Returns __kmpc_dispatch_fini_* runtime function for the specified
- /// size \a IVSize and sign \a IVSigned.
- llvm::FunctionCallee createDispatchFiniFunction(unsigned IVSize,
- bool IVSigned);
-
/// If the specified mangled name is not in the module, create and
/// return threadprivate cache object. This object is a pointer's worth of
/// storage that's reserved for use by the OpenMP runtime.
@@ -821,16 +542,6 @@ private:
/// \return Cache variable for the specified threadprivate.
llvm::Constant *getOrCreateThreadPrivateCache(const VarDecl *VD);
- /// Gets (if variable with the given name already exist) or creates
- /// internal global variable with the specified Name. The created variable has
- /// linkage CommonLinkage by default and is initialized by null value.
- /// \param Ty Type of the global variable. If it is exist already the type
- /// must be the same.
- /// \param Name Name of the variable.
- llvm::Constant *getOrCreateInternalVariable(llvm::Type *Ty,
- const llvm::Twine &Name,
- unsigned AddressSpace = 0);
-
/// Set of threadprivate variables with the generated initializer.
llvm::StringSet<> ThreadPrivateWithDefinition;
@@ -891,17 +602,6 @@ private:
llvm::Function *TaskFunction, QualType SharedsTy,
Address Shareds, const OMPTaskDataTy &Data);
- /// Emit code that pushes the trip count of loops associated with constructs
- /// 'target teams distribute' and 'teams distribute parallel for'.
- /// \param SizeEmitter Emits the int64 value for the number of iterations of
- /// the associated loop.
- void emitTargetNumIterationsCall(
- CodeGenFunction &CGF, const OMPExecutableDirective &D,
- llvm::Value *DeviceID,
- llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
- const OMPLoopDirective &D)>
- SizeEmitter);
-
/// Emit update for lastprivate conditional data.
void emitLastprivateConditionalUpdate(CodeGenFunction &CGF, LValue IVLVal,
StringRef UniqueDeclName, LValue LVal,
@@ -915,12 +615,88 @@ private:
LValue DepobjLVal,
SourceLocation Loc);
+ SmallVector<llvm::Value *, 4>
+ emitDepobjElementsSizes(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
+ const OMPTaskDataTy::DependData &Data);
+
+ void emitDepobjElements(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
+ LValue PosLVal, const OMPTaskDataTy::DependData &Data,
+ Address DependenciesArray);
+
public:
- explicit CGOpenMPRuntime(CodeGenModule &CGM)
- : CGOpenMPRuntime(CGM, ".", ".") {}
+ explicit CGOpenMPRuntime(CodeGenModule &CGM);
virtual ~CGOpenMPRuntime() {}
virtual void clear();
+ /// Emits object of ident_t type with info for source location.
+ /// \param Flags Flags for OpenMP location.
+ /// \param EmitLoc emit source location with debug-info is off.
+ ///
+ llvm::Value *emitUpdateLocation(CodeGenFunction &CGF, SourceLocation Loc,
+ unsigned Flags = 0, bool EmitLoc = false);
+
+ /// Emit the number of teams for a target directive. Inspect the num_teams
+ /// clause associated with a teams construct combined or closely nested
+ /// with the target directive.
+ ///
+ /// Emit a team of size one for directives such as 'target parallel' that
+ /// have no associated teams construct.
+ ///
+ /// Otherwise, return nullptr.
+ const Expr *getNumTeamsExprForTargetDirective(CodeGenFunction &CGF,
+ const OMPExecutableDirective &D,
+ int32_t &MinTeamsVal,
+ int32_t &MaxTeamsVal);
+ llvm::Value *emitNumTeamsForTargetDirective(CodeGenFunction &CGF,
+ const OMPExecutableDirective &D);
+
+ /// Check for a number of threads upper bound constant value (stored in \p
+ /// UpperBound), or expression (returned). If the value is conditional (via an
+ /// if-clause), store the condition in \p CondExpr. Similarly, a potential
+ /// thread limit expression is stored in \p ThreadLimitExpr. If \p
+ /// UpperBoundOnly is true, no expression evaluation is perfomed.
+ const Expr *getNumThreadsExprForTargetDirective(
+ CodeGenFunction &CGF, const OMPExecutableDirective &D,
+ int32_t &UpperBound, bool UpperBoundOnly,
+ llvm::Value **CondExpr = nullptr, const Expr **ThreadLimitExpr = nullptr);
+
+ /// Emit an expression that denotes the number of threads a target region
+ /// shall use. Will generate "i32 0" to allow the runtime to choose.
+ llvm::Value *
+ emitNumThreadsForTargetDirective(CodeGenFunction &CGF,
+ const OMPExecutableDirective &D);
+
+ /// Return the trip count of loops associated with constructs / 'target teams
+ /// distribute' and 'teams distribute parallel for'. \param SizeEmitter Emits
+ /// the int64 value for the number of iterations of the associated loop.
+ llvm::Value *emitTargetNumIterationsCall(
+ CodeGenFunction &CGF, const OMPExecutableDirective &D,
+ llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
+ const OMPLoopDirective &D)>
+ SizeEmitter);
+
+ /// Returns true if the current target is a GPU.
+ virtual bool isGPU() const { return false; }
+
+ /// Check if the variable length declaration is delayed:
+ virtual bool isDelayedVariableLengthDecl(CodeGenFunction &CGF,
+ const VarDecl *VD) const {
+ return false;
+ };
+
+ /// Get call to __kmpc_alloc_shared
+ virtual std::pair<llvm::Value *, llvm::Value *>
+ getKmpcAllocShared(CodeGenFunction &CGF, const VarDecl *VD) {
+ llvm_unreachable("not implemented");
+ }
+
+ /// Get call to __kmpc_free_shared
+ virtual void getKmpcFreeShared(
+ CodeGenFunction &CGF,
+ const std::pair<llvm::Value *, llvm::Value *> &AddrSizePair) {
+ llvm_unreachable("not implemented");
+ }
+
/// Emits code for OpenMP 'if' clause using specified \a CodeGen
/// function. Here is the logic:
/// if (Cond) {
@@ -958,26 +734,30 @@ public:
/// Emits outlined function for the specified OpenMP parallel directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
+ /// \param CGF Reference to current CodeGenFunction.
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
virtual llvm::Function *emitParallelOutlinedFunction(
- const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
- OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen);
+ CodeGenFunction &CGF, const OMPExecutableDirective &D,
+ const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind,
+ const RegionCodeGenTy &CodeGen);
/// Emits outlined function for the specified OpenMP teams directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
+ /// \param CGF Reference to current CodeGenFunction.
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
virtual llvm::Function *emitTeamsOutlinedFunction(
- const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
- OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen);
+ CodeGenFunction &CGF, const OMPExecutableDirective &D,
+ const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind,
+ const RegionCodeGenTy &CodeGen);
/// Emits outlined function for the OpenMP task directive \a D. This
/// outlined function has type void(*)(kmp_int32 ThreadID, struct task_t*
@@ -1013,11 +793,13 @@ public:
/// variables used in \a OutlinedFn function.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
+ /// \param NumThreads The value corresponding to the num_threads clause, if
+ /// any, or nullptr.
///
virtual void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
- const Expr *IfCond);
+ const Expr *IfCond, llvm::Value *NumThreads);
/// Emits a critical region.
/// \param CriticalName Name of the critical region.
@@ -1047,6 +829,11 @@ public:
/// Emits code for a taskyield directive.
virtual void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc);
+ /// Emit __kmpc_error call for error directive
+ /// extern void __kmpc_error(ident_t *loc, int severity, const char *message);
+ virtual void emitErrorCall(CodeGenFunction &CGF, SourceLocation Loc, Expr *ME,
+ bool IsFatal);
+
/// Emit a taskgroup region.
/// \param TaskgroupOpGen Generator for the statement associated with the
/// given taskgroup region.
@@ -1302,13 +1089,12 @@ public:
SourceLocation Loc, bool PerformInit,
CodeGenFunction *CGF = nullptr);
- /// Emit a code for initialization of declare target variable.
- /// \param VD Declare target variable.
- /// \param Addr Address of the global variable \a VD.
+ /// Emit code for handling declare target functions in the runtime.
+ /// \param FD Declare target function.
+ /// \param Addr Address of the global \a FD.
/// \param PerformInit true if initialization expression is not constant.
- virtual bool emitDeclareTargetVarDefinition(const VarDecl *VD,
- llvm::GlobalVariable *Addr,
- bool PerformInit);
+ virtual void emitDeclareTargetFunction(const FunctionDecl *FD,
+ llvm::GlobalValue *GV);
/// Creates artificial threadprivate variable with name \p Name and type \p
/// VarType.
@@ -1404,18 +1190,17 @@ public:
bool HasCancel = false);
/// Emits reduction function.
- /// \param ArgsType Array type containing pointers to reduction variables.
+ /// \param ReducerName Name of the function calling the reduction.
+ /// \param ArgsElemType Array type containing pointers to reduction variables.
/// \param Privates List of private copies for original reduction arguments.
/// \param LHSExprs List of LHS in \a ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a ReductionOps reduction operations.
/// \param ReductionOps List of reduction operations in form 'LHS binop RHS'
/// or 'operator binop(LHS, RHS)'.
- llvm::Function *emitReductionFunction(SourceLocation Loc,
- llvm::Type *ArgsType,
- ArrayRef<const Expr *> Privates,
- ArrayRef<const Expr *> LHSExprs,
- ArrayRef<const Expr *> RHSExprs,
- ArrayRef<const Expr *> ReductionOps);
+ llvm::Function *emitReductionFunction(
+ StringRef ReducerName, SourceLocation Loc, llvm::Type *ArgsElemType,
+ ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> LHSExprs,
+ ArrayRef<const Expr *> RHSExprs, ArrayRef<const Expr *> ReductionOps);
/// Emits single reduction combiner
void emitSingleReductionCombiner(CodeGenFunction &CGF,
@@ -1545,7 +1330,8 @@ public:
LValue SharedLVal);
/// Emit code for 'taskwait' directive.
- virtual void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc);
+ virtual void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc,
+ const OMPTaskDataTy &Data);
/// Emit code for 'cancellation point' construct.
/// \param CancelRegion Region kind for which the cancellation point must be
@@ -1650,67 +1436,26 @@ public:
virtual void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams,
const Expr *ThreadLimit, SourceLocation Loc);
+ /// Emits call to void __kmpc_set_thread_limit(ident_t *loc, kmp_int32
+ /// global_tid, kmp_int32 thread_limit) to generate code for
+ /// thread_limit clause on target directive
+ /// \param ThreadLimit An integer expression of threads.
+ virtual void emitThreadLimitClause(CodeGenFunction &CGF,
+ const Expr *ThreadLimit,
+ SourceLocation Loc);
+
/// Struct that keeps all the relevant information that should be kept
/// throughout a 'target data' region.
- class TargetDataInfo {
- /// Set to true if device pointer information have to be obtained.
- bool RequiresDevicePointerInfo = false;
- /// Set to true if Clang emits separate runtime calls for the beginning and
- /// end of the region. These calls might have separate map type arrays.
- bool SeparateBeginEndCalls = false;
-
+ class TargetDataInfo : public llvm::OpenMPIRBuilder::TargetDataInfo {
public:
- /// The array of base pointer passed to the runtime library.
- llvm::Value *BasePointersArray = nullptr;
- /// The array of section pointers passed to the runtime library.
- llvm::Value *PointersArray = nullptr;
- /// The array of sizes passed to the runtime library.
- llvm::Value *SizesArray = nullptr;
- /// The array of map types passed to the runtime library for the beginning
- /// of the region or for the entire region if there are no separate map
- /// types for the region end.
- llvm::Value *MapTypesArray = nullptr;
- /// The array of map types passed to the runtime library for the end of the
- /// region, or nullptr if there are no separate map types for the region
- /// end.
- llvm::Value *MapTypesArrayEnd = nullptr;
- /// The array of user-defined mappers passed to the runtime library.
- llvm::Value *MappersArray = nullptr;
- /// The array of original declaration names of mapped pointers sent to the
- /// runtime library for debugging
- llvm::Value *MapNamesArray = nullptr;
- /// Indicate whether any user-defined mapper exists.
- bool HasMapper = false;
- /// The total number of pointers passed to the runtime library.
- unsigned NumberOfPtrs = 0u;
- /// Map between the a declaration of a capture and the corresponding base
- /// pointer address where the runtime returns the device pointers.
- llvm::DenseMap<const ValueDecl *, Address> CaptureDeviceAddrMap;
-
- explicit TargetDataInfo() {}
+ explicit TargetDataInfo() : llvm::OpenMPIRBuilder::TargetDataInfo() {}
explicit TargetDataInfo(bool RequiresDevicePointerInfo,
bool SeparateBeginEndCalls)
- : RequiresDevicePointerInfo(RequiresDevicePointerInfo),
- SeparateBeginEndCalls(SeparateBeginEndCalls) {}
- /// Clear information about the data arrays.
- void clearArrayInfo() {
- BasePointersArray = nullptr;
- PointersArray = nullptr;
- SizesArray = nullptr;
- MapTypesArray = nullptr;
- MapTypesArrayEnd = nullptr;
- MapNamesArray = nullptr;
- MappersArray = nullptr;
- HasMapper = false;
- NumberOfPtrs = 0u;
- }
- /// Return true if the current target data information has valid arrays.
- bool isValid() {
- return BasePointersArray && PointersArray && SizesArray &&
- MapTypesArray && (!HasMapper || MappersArray) && NumberOfPtrs;
- }
- bool requiresDevicePointerInfo() { return RequiresDevicePointerInfo; }
- bool separateBeginEndCalls() { return SeparateBeginEndCalls; }
+ : llvm::OpenMPIRBuilder::TargetDataInfo(RequiresDevicePointerInfo,
+ SeparateBeginEndCalls) {}
+ /// Map between the a declaration of a capture and the corresponding new
+ /// llvm address where the runtime returns the device pointers.
+ llvm::DenseMap<const ValueDecl *, llvm::Value *> CaptureDeviceAddrMap;
};
/// Emit the target data mapping code associated with \a D.
@@ -1725,7 +1470,7 @@ public:
const OMPExecutableDirective &D,
const Expr *IfCond, const Expr *Device,
const RegionCodeGenTy &CodeGen,
- TargetDataInfo &Info);
+ CGOpenMPRuntime::TargetDataInfo &Info);
/// Emit the data mapping/movement code associated with the directive
/// \a D that should be of the form 'target [{enter|exit} data | update]'.
@@ -1756,6 +1501,11 @@ public:
virtual void emitDoacrossOrdered(CodeGenFunction &CGF,
const OMPDependClause *C);
+ /// Emit code for doacross ordered directive with 'doacross' clause.
+ /// \param C 'doacross' clause with 'sink|source' dependence type.
+ virtual void emitDoacrossOrdered(CodeGenFunction &CGF,
+ const OMPDoacrossClause *C);
+
/// Translates the native parameter of outlined function if this is required
/// for target.
/// \param FD Field decl from captured record for the parameter.
@@ -1790,7 +1540,7 @@ public:
virtual void
emitOutlinedFunctionCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::FunctionCallee OutlinedFn,
- ArrayRef<llvm::Value *> Args = llvm::None) const;
+ ArrayRef<llvm::Value *> Args = std::nullopt) const;
/// Emits OpenMP-specific function prolog.
/// Required for device constructs.
@@ -1933,30 +1683,30 @@ public:
/// Emits outlined function for the specified OpenMP parallel directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
+ /// \param CGF Reference to current CodeGenFunction.
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
- llvm::Function *
- emitParallelOutlinedFunction(const OMPExecutableDirective &D,
- const VarDecl *ThreadIDVar,
- OpenMPDirectiveKind InnermostKind,
- const RegionCodeGenTy &CodeGen) override;
+ llvm::Function *emitParallelOutlinedFunction(
+ CodeGenFunction &CGF, const OMPExecutableDirective &D,
+ const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind,
+ const RegionCodeGenTy &CodeGen) override;
/// Emits outlined function for the specified OpenMP teams directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
+ /// \param CGF Reference to current CodeGenFunction.
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
- llvm::Function *
- emitTeamsOutlinedFunction(const OMPExecutableDirective &D,
- const VarDecl *ThreadIDVar,
- OpenMPDirectiveKind InnermostKind,
- const RegionCodeGenTy &CodeGen) override;
+ llvm::Function *emitTeamsOutlinedFunction(
+ CodeGenFunction &CGF, const OMPExecutableDirective &D,
+ const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind,
+ const RegionCodeGenTy &CodeGen) override;
/// Emits outlined function for the OpenMP task directive \a D. This
/// outlined function has type void(*)(kmp_int32 ThreadID, struct task_t*
@@ -1988,11 +1738,13 @@ public:
/// variables used in \a OutlinedFn function.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
+ /// \param NumThreads The value corresponding to the num_threads clause, if
+ /// any, or nullptr.
///
void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
- const Expr *IfCond) override;
+ const Expr *IfCond, llvm::Value *NumThreads) override;
/// Emits a critical region.
/// \param CriticalName Name of the critical region.
@@ -2383,7 +2135,8 @@ public:
LValue SharedLVal) override;
/// Emit code for 'taskwait' directive.
- void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc) override;
+ void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc,
+ const OMPTaskDataTy &Data) override;
/// Emit code for 'cancellation point' construct.
/// \param CancelRegion Region kind for which the cancellation point must be
@@ -2482,7 +2235,7 @@ public:
void emitTargetDataCalls(CodeGenFunction &CGF,
const OMPExecutableDirective &D, const Expr *IfCond,
const Expr *Device, const RegionCodeGenTy &CodeGen,
- TargetDataInfo &Info) override;
+ CGOpenMPRuntime::TargetDataInfo &Info) override;
/// Emit the data mapping/movement code associated with the directive
/// \a D that should be of the form 'target [{enter|exit} data | update]'.
@@ -2506,6 +2259,11 @@ public:
void emitDoacrossOrdered(CodeGenFunction &CGF,
const OMPDependClause *C) override;
+ /// Emit code for doacross ordered directive with 'doacross' clause.
+ /// \param C 'doacross' clause with 'sink|source' dependence type.
+ void emitDoacrossOrdered(CodeGenFunction &CGF,
+ const OMPDoacrossClause *C) override;
+
/// Translates the native parameter of outlined function if this is required
/// for target.
/// \param FD Field decl from captured record for the parameter.
@@ -2528,6 +2286,34 @@ public:
};
} // namespace CodeGen
+// Utility for openmp doacross clause kind
+namespace {
+template <typename T> class OMPDoacrossKind {
+public:
+ bool isSink(const T *) { return false; }
+ bool isSource(const T *) { return false; }
+};
+template <> class OMPDoacrossKind<OMPDependClause> {
+public:
+ bool isSink(const OMPDependClause *C) {
+ return C->getDependencyKind() == OMPC_DEPEND_sink;
+ }
+ bool isSource(const OMPDependClause *C) {
+ return C->getDependencyKind() == OMPC_DEPEND_source;
+ }
+};
+template <> class OMPDoacrossKind<OMPDoacrossClause> {
+public:
+ bool isSource(const OMPDoacrossClause *C) {
+ return C->getDependenceType() == OMPC_DOACROSS_source ||
+ C->getDependenceType() == OMPC_DOACROSS_source_omp_cur_iteration;
+ }
+ bool isSink(const OMPDoacrossClause *C) {
+ return C->getDependenceType() == OMPC_DOACROSS_sink ||
+ C->getDependenceType() == OMPC_DOACROSS_sink_omp_cur_iteration;
+ }
+};
+} // namespace
} // namespace clang
#endif
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeAMDGCN.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeAMDGCN.cpp
deleted file mode 100644
index 33d4ab838af1..000000000000
--- a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeAMDGCN.cpp
+++ /dev/null
@@ -1,60 +0,0 @@
-//===-- CGOpenMPRuntimeAMDGCN.cpp - Interface to OpenMP AMDGCN Runtimes --===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This provides a class for OpenMP runtime code generation specialized to
-// AMDGCN targets from generalized CGOpenMPRuntimeGPU class.
-//
-//===----------------------------------------------------------------------===//
-
-#include "CGOpenMPRuntimeAMDGCN.h"
-#include "CGOpenMPRuntimeGPU.h"
-#include "CodeGenFunction.h"
-#include "clang/AST/Attr.h"
-#include "clang/AST/DeclOpenMP.h"
-#include "clang/AST/StmtOpenMP.h"
-#include "clang/AST/StmtVisitor.h"
-#include "clang/Basic/Cuda.h"
-#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/IR/IntrinsicsAMDGPU.h"
-
-using namespace clang;
-using namespace CodeGen;
-using namespace llvm::omp;
-
-CGOpenMPRuntimeAMDGCN::CGOpenMPRuntimeAMDGCN(CodeGenModule &CGM)
- : CGOpenMPRuntimeGPU(CGM) {
- if (!CGM.getLangOpts().OpenMPIsDevice)
- llvm_unreachable("OpenMP AMDGCN can only handle device code.");
-}
-
-llvm::Value *CGOpenMPRuntimeAMDGCN::getGPUWarpSize(CodeGenFunction &CGF) {
- CGBuilderTy &Bld = CGF.Builder;
- // return constant compile-time target-specific warp size
- unsigned WarpSize = CGF.getTarget().getGridValue(llvm::omp::GV_Warp_Size);
- return Bld.getInt32(WarpSize);
-}
-
-llvm::Value *CGOpenMPRuntimeAMDGCN::getGPUThreadID(CodeGenFunction &CGF) {
- CGBuilderTy &Bld = CGF.Builder;
- llvm::Function *F =
- CGF.CGM.getIntrinsic(llvm::Intrinsic::amdgcn_workitem_id_x);
- return Bld.CreateCall(F, llvm::None, "nvptx_tid");
-}
-
-llvm::Value *CGOpenMPRuntimeAMDGCN::getGPUNumThreads(CodeGenFunction &CGF) {
- CGBuilderTy &Bld = CGF.Builder;
- llvm::Module *M = &CGF.CGM.getModule();
- const char *LocSize = "__kmpc_amdgcn_gpu_num_threads";
- llvm::Function *F = M->getFunction(LocSize);
- if (!F) {
- F = llvm::Function::Create(
- llvm::FunctionType::get(CGF.Int32Ty, llvm::None, false),
- llvm::GlobalVariable::ExternalLinkage, LocSize, &CGF.CGM.getModule());
- }
- return Bld.CreateCall(F, llvm::None, "nvptx_num_threads");
-}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeAMDGCN.h b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeAMDGCN.h
deleted file mode 100644
index c1421261bfc1..000000000000
--- a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeAMDGCN.h
+++ /dev/null
@@ -1,43 +0,0 @@
-//===--- CGOpenMPRuntimeAMDGCN.h - Interface to OpenMP AMDGCN Runtimes ---===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This provides a class for OpenMP runtime code generation specialized to
-// AMDGCN targets from generalized CGOpenMPRuntimeGPU class.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIMEAMDGCN_H
-#define LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIMEAMDGCN_H
-
-#include "CGOpenMPRuntime.h"
-#include "CGOpenMPRuntimeGPU.h"
-#include "CodeGenFunction.h"
-#include "clang/AST/StmtOpenMP.h"
-
-namespace clang {
-namespace CodeGen {
-
-class CGOpenMPRuntimeAMDGCN final : public CGOpenMPRuntimeGPU {
-
-public:
- explicit CGOpenMPRuntimeAMDGCN(CodeGenModule &CGM);
-
- /// Get the GPU warp size.
- llvm::Value *getGPUWarpSize(CodeGenFunction &CGF) override;
-
- /// Get the id of the current thread on the GPU.
- llvm::Value *getGPUThreadID(CodeGenFunction &CGF) override;
-
- /// Get the maximum number of threads in a block of the GPU.
- llvm::Value *getGPUNumThreads(CodeGenFunction &CGF) override;
-};
-
-} // namespace CodeGen
-} // namespace clang
-
-#endif // LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIMEAMDGCN_H
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
index 63fecedc6fb7..299ee1460b3d 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
@@ -12,16 +12,16 @@
//===----------------------------------------------------------------------===//
#include "CGOpenMPRuntimeGPU.h"
-#include "CGOpenMPRuntimeNVPTX.h"
#include "CodeGenFunction.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclOpenMP.h"
+#include "clang/AST/OpenMPClause.h"
#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/Basic/Cuda.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Frontend/OpenMP/OMPGridValues.h"
-#include "llvm/IR/IntrinsicsNVPTX.h"
+#include "llvm/Support/MathExtras.h"
using namespace clang;
using namespace CodeGen;
@@ -74,46 +74,15 @@ private:
CGOpenMPRuntimeGPU::ExecutionMode SavedExecMode =
CGOpenMPRuntimeGPU::EM_Unknown;
CGOpenMPRuntimeGPU::ExecutionMode &ExecMode;
- bool SavedRuntimeMode = false;
- bool *RuntimeMode = nullptr;
public:
- /// Constructor for Non-SPMD mode.
- ExecutionRuntimeModesRAII(CGOpenMPRuntimeGPU::ExecutionMode &ExecMode)
- : ExecMode(ExecMode) {
- SavedExecMode = ExecMode;
- ExecMode = CGOpenMPRuntimeGPU::EM_NonSPMD;
- }
- /// Constructor for SPMD mode.
ExecutionRuntimeModesRAII(CGOpenMPRuntimeGPU::ExecutionMode &ExecMode,
- bool &RuntimeMode, bool FullRuntimeMode)
- : ExecMode(ExecMode), RuntimeMode(&RuntimeMode) {
+ CGOpenMPRuntimeGPU::ExecutionMode EntryMode)
+ : ExecMode(ExecMode) {
SavedExecMode = ExecMode;
- SavedRuntimeMode = RuntimeMode;
- ExecMode = CGOpenMPRuntimeGPU::EM_SPMD;
- RuntimeMode = FullRuntimeMode;
- }
- ~ExecutionRuntimeModesRAII() {
- ExecMode = SavedExecMode;
- if (RuntimeMode)
- *RuntimeMode = SavedRuntimeMode;
+ ExecMode = EntryMode;
}
-};
-
-/// GPU Configuration: This information can be derived from cuda registers,
-/// however, providing compile time constants helps generate more efficient
-/// code. For all practical purposes this is fine because the configuration
-/// is the same for all known NVPTX architectures.
-enum MachineConfiguration : unsigned {
- /// See "llvm/Frontend/OpenMP/OMPGridValues.h" for various related target
- /// specific Grid Values like GV_Warp_Size, GV_Warp_Size_Log2,
- /// and GV_Warp_Size_Log2_Mask.
-
- /// Global memory alignment for performance.
- GlobalMemoryAlignment = 128,
-
- /// Maximal size of the shared memory buffer.
- SharedMemorySize = 128,
+ ~ExecutionRuntimeModesRAII() { ExecMode = SavedExecMode; }
};
static const ValueDecl *getPrivateItem(const Expr *RefExpr) {
@@ -138,31 +107,23 @@ static const ValueDecl *getPrivateItem(const Expr *RefExpr) {
return cast<ValueDecl>(ME->getMemberDecl()->getCanonicalDecl());
}
-
static RecordDecl *buildRecordForGlobalizedVars(
ASTContext &C, ArrayRef<const ValueDecl *> EscapedDecls,
ArrayRef<const ValueDecl *> EscapedDeclsForTeams,
llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *>
- &MappedDeclsFields, int BufSize) {
+ &MappedDeclsFields,
+ int BufSize) {
using VarsDataTy = std::pair<CharUnits /*Align*/, const ValueDecl *>;
if (EscapedDecls.empty() && EscapedDeclsForTeams.empty())
return nullptr;
SmallVector<VarsDataTy, 4> GlobalizedVars;
for (const ValueDecl *D : EscapedDecls)
- GlobalizedVars.emplace_back(
- CharUnits::fromQuantity(std::max(
- C.getDeclAlign(D).getQuantity(),
- static_cast<CharUnits::QuantityType>(GlobalMemoryAlignment))),
- D);
+ GlobalizedVars.emplace_back(C.getDeclAlign(D), D);
for (const ValueDecl *D : EscapedDeclsForTeams)
GlobalizedVars.emplace_back(C.getDeclAlign(D), D);
- llvm::stable_sort(GlobalizedVars, [](VarsDataTy L, VarsDataTy R) {
- return L.first > R.first;
- });
// Build struct _globalized_locals_ty {
- // /* globalized vars */[WarSize] align (max(decl_align,
- // GlobalMemoryAlignment))
+ // /* globalized vars */[WarSize] align (decl_align)
// /* globalized vars */ for EscapedDeclsForTeams
// };
RecordDecl *GlobalizedRD = C.buildImplicitRecord("_globalized_locals_ty");
@@ -192,24 +153,24 @@ static RecordDecl *buildRecordForGlobalizedVars(
Field->addAttr(*I);
}
} else {
- llvm::APInt ArraySize(32, BufSize);
- Type = C.getConstantArrayType(Type, ArraySize, nullptr, ArrayType::Normal,
- 0);
+ if (BufSize > 1) {
+ llvm::APInt ArraySize(32, BufSize);
+ Type = C.getConstantArrayType(Type, ArraySize, nullptr,
+ ArraySizeModifier::Normal, 0);
+ }
Field = FieldDecl::Create(
C, GlobalizedRD, Loc, Loc, VD->getIdentifier(), Type,
C.getTrivialTypeSourceInfo(Type, SourceLocation()),
/*BW=*/nullptr, /*Mutable=*/false,
/*InitStyle=*/ICIS_NoInit);
Field->setAccess(AS_public);
- llvm::APInt Align(32, std::max(C.getDeclAlign(VD).getQuantity(),
- static_cast<CharUnits::QuantityType>(
- GlobalMemoryAlignment)));
+ llvm::APInt Align(32, Pair.first.getQuantity());
Field->addAttr(AlignedAttr::CreateImplicit(
C, /*IsAlignmentExpr=*/true,
IntegerLiteral::Create(C, Align,
C.getIntTypeForBitwidth(32, /*Signed=*/0),
SourceLocation()),
- {}, AttributeCommonInfo::AS_GNU, AlignedAttr::GNU_aligned));
+ {}, AlignedAttr::GNU_aligned));
}
GlobalizedRD->addDecl(Field);
MappedDeclsFields.try_emplace(VD, Field);
@@ -224,6 +185,7 @@ class CheckVarsEscapingDeclContext final
CodeGenFunction &CGF;
llvm::SetVector<const ValueDecl *> EscapedDecls;
llvm::SetVector<const ValueDecl *> EscapedVariableLengthDecls;
+ llvm::SetVector<const ValueDecl *> DelayedVariableLengthDecls;
llvm::SmallPtrSet<const Decl *, 4> EscapedParameters;
RecordDecl *GlobalizedRD = nullptr;
llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> MappedDeclsFields;
@@ -240,10 +202,12 @@ class CheckVarsEscapingDeclContext final
if (VD->hasAttrs() && VD->hasAttr<OMPAllocateDeclAttr>())
return;
// Variables captured by value must be globalized.
+ bool IsCaptured = false;
if (auto *CSI = CGF.CapturedStmtInfo) {
if (const FieldDecl *FD = CSI->lookup(cast<VarDecl>(VD))) {
// Check if need to capture the variable that was already captured by
// value in the outer region.
+ IsCaptured = true;
if (!IsForCombinedParallelRegion) {
if (!FD->hasAttrs())
return;
@@ -270,9 +234,14 @@ class CheckVarsEscapingDeclContext final
VD->getType()->isReferenceType())
// Do not globalize variables with reference type.
return;
- if (VD->getType()->isVariablyModifiedType())
- EscapedVariableLengthDecls.insert(VD);
- else
+ if (VD->getType()->isVariablyModifiedType()) {
+ // If not captured at the target region level then mark the escaped
+ // variable as delayed.
+ if (IsCaptured)
+ EscapedVariableLengthDecls.insert(VD);
+ else
+ DelayedVariableLengthDecls.insert(VD);
+ } else
EscapedDecls.insert(VD);
}
@@ -339,7 +308,7 @@ class CheckVarsEscapingDeclContext final
assert(!GlobalizedRD &&
"Record for globalized variables is built already.");
ArrayRef<const ValueDecl *> EscapedDeclsForParallel, EscapedDeclsForTeams;
- unsigned WarpSize = CGF.getTarget().getGridValue(llvm::omp::GV_Warp_Size);
+ unsigned WarpSize = CGF.getTarget().getGridValue().GV_Warp_Size;
if (IsInTTDRegion)
EscapedDeclsForTeams = EscapedDecls.getArrayRef();
else
@@ -446,9 +415,8 @@ public:
markAsEscaped(VD);
if (isa<OMPCapturedExprDecl>(VD))
VisitValueDecl(VD);
- else if (const auto *VarD = dyn_cast<VarDecl>(VD))
- if (VarD->isInitCapture())
- VisitValueDecl(VD);
+ else if (VD->isInitCapture())
+ VisitValueDecl(VD);
}
void VisitUnaryOperator(const UnaryOperator *E) {
if (!E)
@@ -505,10 +473,7 @@ public:
const FieldDecl *getFieldForGlobalizedVar(const ValueDecl *VD) const {
assert(GlobalizedRD &&
"Record for globalized variables must be generated already.");
- auto I = MappedDeclsFields.find(VD);
- if (I == MappedDeclsFields.end())
- return nullptr;
- return I->getSecond();
+ return MappedDeclsFields.lookup(VD);
}
/// Returns the list of the escaped local variables/parameters.
@@ -527,6 +492,12 @@ public:
ArrayRef<const ValueDecl *> getEscapedVariableLengthDecls() const {
return EscapedVariableLengthDecls.getArrayRef();
}
+
+ /// Returns the list of the delayed variables with the variably modified
+ /// types.
+ ArrayRef<const ValueDecl *> getDelayedVariableLengthDecls() const {
+ return DelayedVariableLengthDecls.getArrayRef();
+ }
};
} // anonymous namespace
@@ -536,7 +507,7 @@ public:
static llvm::Value *getNVPTXWarpID(CodeGenFunction &CGF) {
CGBuilderTy &Bld = CGF.Builder;
unsigned LaneIDBits =
- CGF.getTarget().getGridValue(llvm::omp::GV_Warp_Size_Log2);
+ llvm::Log2_32(CGF.getTarget().getGridValue().GV_Warp_Size);
auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
return Bld.CreateAShr(RT.getGPUThreadID(CGF), LaneIDBits, "nvptx_warp_id");
}
@@ -546,8 +517,10 @@ static llvm::Value *getNVPTXWarpID(CodeGenFunction &CGF) {
/// on the NVPTX device, to generate more efficient code.
static llvm::Value *getNVPTXLaneID(CodeGenFunction &CGF) {
CGBuilderTy &Bld = CGF.Builder;
- unsigned LaneIDMask = CGF.getContext().getTargetInfo().getGridValue(
- llvm::omp::GV_Warp_Size_Log2_Mask);
+ unsigned LaneIDBits =
+ llvm::Log2_32(CGF.getTarget().getGridValue().GV_Warp_Size);
+ assert(LaneIDBits < 32 && "Invalid LaneIDBits size in NVPTX device.");
+ unsigned LaneIDMask = ~0u >> (32u - LaneIDBits);
auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
return Bld.CreateAnd(RT.getGPUThreadID(CGF), Bld.getInt32(LaneIDMask),
"nvptx_lane_id");
@@ -558,10 +531,9 @@ CGOpenMPRuntimeGPU::getExecutionMode() const {
return CurrentExecutionMode;
}
-static CGOpenMPRuntimeGPU::DataSharingMode
-getDataSharingMode(CodeGenModule &CGM) {
- return CGM.getLangOpts().OpenMPCUDAMode ? CGOpenMPRuntimeGPU::CUDA
- : CGOpenMPRuntimeGPU::Generic;
+CGOpenMPRuntimeGPU::DataSharingMode
+CGOpenMPRuntimeGPU::getDataSharingMode() const {
+ return CurrentDataSharingMode;
}
/// Check for inner (nested) SPMD construct, if any
@@ -674,6 +646,8 @@ static bool supportsSPMDExecutionMode(ASTContext &Ctx,
case OMPD_target:
case OMPD_target_teams:
return hasNestedSPMDDirective(Ctx, D);
+ case OMPD_target_teams_loop:
+ case OMPD_target_parallel_loop:
case OMPD_target_parallel:
case OMPD_target_parallel_for:
case OMPD_target_parallel_for_simd:
@@ -747,298 +721,40 @@ static bool supportsSPMDExecutionMode(ASTContext &Ctx,
"Unknown programming model for OpenMP directive on NVPTX target.");
}
-/// Check if the directive is loops based and has schedule clause at all or has
-/// static scheduling.
-static bool hasStaticScheduling(const OMPExecutableDirective &D) {
- assert(isOpenMPWorksharingDirective(D.getDirectiveKind()) &&
- isOpenMPLoopDirective(D.getDirectiveKind()) &&
- "Expected loop-based directive.");
- return !D.hasClausesOfKind<OMPOrderedClause>() &&
- (!D.hasClausesOfKind<OMPScheduleClause>() ||
- llvm::any_of(D.getClausesOfKind<OMPScheduleClause>(),
- [](const OMPScheduleClause *C) {
- return C->getScheduleKind() == OMPC_SCHEDULE_static;
- }));
-}
-
-/// Check for inner (nested) lightweight runtime construct, if any
-static bool hasNestedLightweightDirective(ASTContext &Ctx,
- const OMPExecutableDirective &D) {
- assert(supportsSPMDExecutionMode(Ctx, D) && "Expected SPMD mode directive.");
- const auto *CS = D.getInnermostCapturedStmt();
- const auto *Body =
- CS->getCapturedStmt()->IgnoreContainers(/*IgnoreCaptured=*/true);
- const Stmt *ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
-
- if (const auto *NestedDir =
- dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
- OpenMPDirectiveKind DKind = NestedDir->getDirectiveKind();
- switch (D.getDirectiveKind()) {
- case OMPD_target:
- if (isOpenMPParallelDirective(DKind) &&
- isOpenMPWorksharingDirective(DKind) && isOpenMPLoopDirective(DKind) &&
- hasStaticScheduling(*NestedDir))
- return true;
- if (DKind == OMPD_teams_distribute_simd || DKind == OMPD_simd)
- return true;
- if (DKind == OMPD_parallel) {
- Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
- /*IgnoreCaptured=*/true);
- if (!Body)
- return false;
- ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
- if (const auto *NND =
- dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
- DKind = NND->getDirectiveKind();
- if (isOpenMPWorksharingDirective(DKind) &&
- isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
- return true;
- }
- } else if (DKind == OMPD_teams) {
- Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
- /*IgnoreCaptured=*/true);
- if (!Body)
- return false;
- ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
- if (const auto *NND =
- dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
- DKind = NND->getDirectiveKind();
- if (isOpenMPParallelDirective(DKind) &&
- isOpenMPWorksharingDirective(DKind) &&
- isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
- return true;
- if (DKind == OMPD_parallel) {
- Body = NND->getInnermostCapturedStmt()->IgnoreContainers(
- /*IgnoreCaptured=*/true);
- if (!Body)
- return false;
- ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
- if (const auto *NND =
- dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
- DKind = NND->getDirectiveKind();
- if (isOpenMPWorksharingDirective(DKind) &&
- isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
- return true;
- }
- }
- }
- }
- return false;
- case OMPD_target_teams:
- if (isOpenMPParallelDirective(DKind) &&
- isOpenMPWorksharingDirective(DKind) && isOpenMPLoopDirective(DKind) &&
- hasStaticScheduling(*NestedDir))
- return true;
- if (DKind == OMPD_distribute_simd || DKind == OMPD_simd)
- return true;
- if (DKind == OMPD_parallel) {
- Body = NestedDir->getInnermostCapturedStmt()->IgnoreContainers(
- /*IgnoreCaptured=*/true);
- if (!Body)
- return false;
- ChildStmt = CGOpenMPRuntime::getSingleCompoundChild(Ctx, Body);
- if (const auto *NND =
- dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
- DKind = NND->getDirectiveKind();
- if (isOpenMPWorksharingDirective(DKind) &&
- isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NND))
- return true;
- }
- }
- return false;
- case OMPD_target_parallel:
- if (DKind == OMPD_simd)
- return true;
- return isOpenMPWorksharingDirective(DKind) &&
- isOpenMPLoopDirective(DKind) && hasStaticScheduling(*NestedDir);
- case OMPD_target_teams_distribute:
- case OMPD_target_simd:
- case OMPD_target_parallel_for:
- case OMPD_target_parallel_for_simd:
- case OMPD_target_teams_distribute_simd:
- case OMPD_target_teams_distribute_parallel_for:
- case OMPD_target_teams_distribute_parallel_for_simd:
- case OMPD_parallel:
- case OMPD_for:
- case OMPD_parallel_for:
- case OMPD_parallel_master:
- case OMPD_parallel_sections:
- case OMPD_for_simd:
- case OMPD_parallel_for_simd:
- case OMPD_cancel:
- case OMPD_cancellation_point:
- case OMPD_ordered:
- case OMPD_threadprivate:
- case OMPD_allocate:
- case OMPD_task:
- case OMPD_simd:
- case OMPD_sections:
- case OMPD_section:
- case OMPD_single:
- case OMPD_master:
- case OMPD_critical:
- case OMPD_taskyield:
- case OMPD_barrier:
- case OMPD_taskwait:
- case OMPD_taskgroup:
- case OMPD_atomic:
- case OMPD_flush:
- case OMPD_depobj:
- case OMPD_scan:
- case OMPD_teams:
- case OMPD_target_data:
- case OMPD_target_exit_data:
- case OMPD_target_enter_data:
- case OMPD_distribute:
- case OMPD_distribute_simd:
- case OMPD_distribute_parallel_for:
- case OMPD_distribute_parallel_for_simd:
- case OMPD_teams_distribute:
- case OMPD_teams_distribute_simd:
- case OMPD_teams_distribute_parallel_for:
- case OMPD_teams_distribute_parallel_for_simd:
- case OMPD_target_update:
- case OMPD_declare_simd:
- case OMPD_declare_variant:
- case OMPD_begin_declare_variant:
- case OMPD_end_declare_variant:
- case OMPD_declare_target:
- case OMPD_end_declare_target:
- case OMPD_declare_reduction:
- case OMPD_declare_mapper:
- case OMPD_taskloop:
- case OMPD_taskloop_simd:
- case OMPD_master_taskloop:
- case OMPD_master_taskloop_simd:
- case OMPD_parallel_master_taskloop:
- case OMPD_parallel_master_taskloop_simd:
- case OMPD_requires:
- case OMPD_unknown:
- default:
- llvm_unreachable("Unexpected directive.");
- }
- }
-
- return false;
-}
-
-/// Checks if the construct supports lightweight runtime. It must be SPMD
-/// construct + inner loop-based construct with static scheduling.
-static bool supportsLightweightRuntime(ASTContext &Ctx,
- const OMPExecutableDirective &D) {
- if (!supportsSPMDExecutionMode(Ctx, D))
- return false;
- OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
- switch (DirectiveKind) {
- case OMPD_target:
- case OMPD_target_teams:
- case OMPD_target_parallel:
- return hasNestedLightweightDirective(Ctx, D);
- case OMPD_target_parallel_for:
- case OMPD_target_parallel_for_simd:
- case OMPD_target_teams_distribute_parallel_for:
- case OMPD_target_teams_distribute_parallel_for_simd:
- // (Last|First)-privates must be shared in parallel region.
- return hasStaticScheduling(D);
- case OMPD_target_simd:
- case OMPD_target_teams_distribute_simd:
- return true;
- case OMPD_target_teams_distribute:
- return false;
- case OMPD_parallel:
- case OMPD_for:
- case OMPD_parallel_for:
- case OMPD_parallel_master:
- case OMPD_parallel_sections:
- case OMPD_for_simd:
- case OMPD_parallel_for_simd:
- case OMPD_cancel:
- case OMPD_cancellation_point:
- case OMPD_ordered:
- case OMPD_threadprivate:
- case OMPD_allocate:
- case OMPD_task:
- case OMPD_simd:
- case OMPD_sections:
- case OMPD_section:
- case OMPD_single:
- case OMPD_master:
- case OMPD_critical:
- case OMPD_taskyield:
- case OMPD_barrier:
- case OMPD_taskwait:
- case OMPD_taskgroup:
- case OMPD_atomic:
- case OMPD_flush:
- case OMPD_depobj:
- case OMPD_scan:
- case OMPD_teams:
- case OMPD_target_data:
- case OMPD_target_exit_data:
- case OMPD_target_enter_data:
- case OMPD_distribute:
- case OMPD_distribute_simd:
- case OMPD_distribute_parallel_for:
- case OMPD_distribute_parallel_for_simd:
- case OMPD_teams_distribute:
- case OMPD_teams_distribute_simd:
- case OMPD_teams_distribute_parallel_for:
- case OMPD_teams_distribute_parallel_for_simd:
- case OMPD_target_update:
- case OMPD_declare_simd:
- case OMPD_declare_variant:
- case OMPD_begin_declare_variant:
- case OMPD_end_declare_variant:
- case OMPD_declare_target:
- case OMPD_end_declare_target:
- case OMPD_declare_reduction:
- case OMPD_declare_mapper:
- case OMPD_taskloop:
- case OMPD_taskloop_simd:
- case OMPD_master_taskloop:
- case OMPD_master_taskloop_simd:
- case OMPD_parallel_master_taskloop:
- case OMPD_parallel_master_taskloop_simd:
- case OMPD_requires:
- case OMPD_unknown:
- default:
- break;
- }
- llvm_unreachable(
- "Unknown programming model for OpenMP directive on NVPTX target.");
-}
-
void CGOpenMPRuntimeGPU::emitNonSPMDKernel(const OMPExecutableDirective &D,
StringRef ParentName,
llvm::Function *&OutlinedFn,
llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen) {
- ExecutionRuntimeModesRAII ModeRAII(CurrentExecutionMode);
+ ExecutionRuntimeModesRAII ModeRAII(CurrentExecutionMode, EM_NonSPMD);
EntryFunctionState EST;
WrapperFunctionsMap.clear();
+ [[maybe_unused]] bool IsBareKernel = D.getSingleClause<OMPXBareClause>();
+ assert(!IsBareKernel && "bare kernel should not be at generic mode");
+
// Emit target region as a standalone region.
class NVPTXPrePostActionTy : public PrePostActionTy {
CGOpenMPRuntimeGPU::EntryFunctionState &EST;
+ const OMPExecutableDirective &D;
public:
- NVPTXPrePostActionTy(CGOpenMPRuntimeGPU::EntryFunctionState &EST)
- : EST(EST) {}
+ NVPTXPrePostActionTy(CGOpenMPRuntimeGPU::EntryFunctionState &EST,
+ const OMPExecutableDirective &D)
+ : EST(EST), D(D) {}
void Enter(CodeGenFunction &CGF) override {
- auto &RT =
- static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
- RT.emitKernelInit(CGF, EST, /* IsSPMD */ false);
+ auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
+ RT.emitKernelInit(D, CGF, EST, /* IsSPMD */ false);
// Skip target region initialization.
RT.setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true);
}
void Exit(CodeGenFunction &CGF) override {
- auto &RT =
- static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
+ auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
RT.clearLocThreadIdInsertPt(CGF);
RT.emitKernelDeinit(CGF, EST, /* IsSPMD */ false);
}
- } Action(EST);
+ } Action(EST, D);
CodeGen.setAction(Action);
IsInTTDRegion = true;
emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
@@ -1046,11 +762,17 @@ void CGOpenMPRuntimeGPU::emitNonSPMDKernel(const OMPExecutableDirective &D,
IsInTTDRegion = false;
}
-void CGOpenMPRuntimeGPU::emitKernelInit(CodeGenFunction &CGF,
+void CGOpenMPRuntimeGPU::emitKernelInit(const OMPExecutableDirective &D,
+ CodeGenFunction &CGF,
EntryFunctionState &EST, bool IsSPMD) {
+ int32_t MinThreadsVal = 1, MaxThreadsVal = -1, MinTeamsVal = 1,
+ MaxTeamsVal = -1;
+ computeMinAndMaxThreadsAndTeams(D, CGF, MinThreadsVal, MaxThreadsVal,
+ MinTeamsVal, MaxTeamsVal);
+
CGBuilderTy &Bld = CGF.Builder;
- Bld.restoreIP(OMPBuilder.createTargetInit(Bld, IsSPMD, requiresFullRuntime()));
- IsInTargetMasterThreadRegion = IsSPMD;
+ Bld.restoreIP(OMPBuilder.createTargetInit(
+ Bld, IsSPMD, MinThreadsVal, MaxThreadsVal, MinTeamsVal, MaxTeamsVal));
if (!IsSPMD)
emitGenericVarsProlog(CGF, EST.Loc);
}
@@ -1061,8 +783,34 @@ void CGOpenMPRuntimeGPU::emitKernelDeinit(CodeGenFunction &CGF,
if (!IsSPMD)
emitGenericVarsEpilog(CGF);
+ // This is temporary until we remove the fixed sized buffer.
+ ASTContext &C = CGM.getContext();
+ RecordDecl *StaticRD = C.buildImplicitRecord(
+ "_openmp_teams_reduction_type_$_", RecordDecl::TagKind::Union);
+ StaticRD->startDefinition();
+ for (const RecordDecl *TeamReductionRec : TeamsReductions) {
+ QualType RecTy = C.getRecordType(TeamReductionRec);
+ auto *Field = FieldDecl::Create(
+ C, StaticRD, SourceLocation(), SourceLocation(), nullptr, RecTy,
+ C.getTrivialTypeSourceInfo(RecTy, SourceLocation()),
+ /*BW=*/nullptr, /*Mutable=*/false,
+ /*InitStyle=*/ICIS_NoInit);
+ Field->setAccess(AS_public);
+ StaticRD->addDecl(Field);
+ }
+ StaticRD->completeDefinition();
+ QualType StaticTy = C.getRecordType(StaticRD);
+ llvm::Type *LLVMReductionsBufferTy =
+ CGM.getTypes().ConvertTypeForMem(StaticTy);
+ const auto &DL = CGM.getModule().getDataLayout();
+ uint64_t ReductionDataSize =
+ TeamsReductions.empty()
+ ? 0
+ : DL.getTypeAllocSize(LLVMReductionsBufferTy).getFixedValue();
CGBuilderTy &Bld = CGF.Builder;
- OMPBuilder.createTargetDeinit(Bld, IsSPMD, requiresFullRuntime());
+ OMPBuilder.createTargetDeinit(Bld, ReductionDataSize,
+ C.getLangOpts().OpenMPCUDAReductionBufNum);
+ TeamsReductions.clear();
}
void CGOpenMPRuntimeGPU::emitSPMDKernel(const OMPExecutableDirective &D,
@@ -1071,31 +819,43 @@ void CGOpenMPRuntimeGPU::emitSPMDKernel(const OMPExecutableDirective &D,
llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen) {
- ExecutionRuntimeModesRAII ModeRAII(
- CurrentExecutionMode, RequiresFullRuntime,
- CGM.getLangOpts().OpenMPCUDAForceFullRuntime ||
- !supportsLightweightRuntime(CGM.getContext(), D));
+ ExecutionRuntimeModesRAII ModeRAII(CurrentExecutionMode, EM_SPMD);
EntryFunctionState EST;
+ bool IsBareKernel = D.getSingleClause<OMPXBareClause>();
+
// Emit target region as a standalone region.
class NVPTXPrePostActionTy : public PrePostActionTy {
CGOpenMPRuntimeGPU &RT;
CGOpenMPRuntimeGPU::EntryFunctionState &EST;
+ bool IsBareKernel;
+ DataSharingMode Mode;
+ const OMPExecutableDirective &D;
public:
NVPTXPrePostActionTy(CGOpenMPRuntimeGPU &RT,
- CGOpenMPRuntimeGPU::EntryFunctionState &EST)
- : RT(RT), EST(EST) {}
+ CGOpenMPRuntimeGPU::EntryFunctionState &EST,
+ bool IsBareKernel, const OMPExecutableDirective &D)
+ : RT(RT), EST(EST), IsBareKernel(IsBareKernel),
+ Mode(RT.CurrentDataSharingMode), D(D) {}
void Enter(CodeGenFunction &CGF) override {
- RT.emitKernelInit(CGF, EST, /* IsSPMD */ true);
+ if (IsBareKernel) {
+ RT.CurrentDataSharingMode = DataSharingMode::DS_CUDA;
+ return;
+ }
+ RT.emitKernelInit(D, CGF, EST, /* IsSPMD */ true);
// Skip target region initialization.
RT.setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true);
}
void Exit(CodeGenFunction &CGF) override {
+ if (IsBareKernel) {
+ RT.CurrentDataSharingMode = Mode;
+ return;
+ }
RT.clearLocThreadIdInsertPt(CGF);
RT.emitKernelDeinit(CGF, EST, /* IsSPMD */ true);
}
- } Action(*this, EST);
+ } Action(*this, EST, IsBareKernel, D);
CodeGen.setAction(Action);
IsInTTDRegion = true;
emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
@@ -1103,44 +863,6 @@ void CGOpenMPRuntimeGPU::emitSPMDKernel(const OMPExecutableDirective &D,
IsInTTDRegion = false;
}
-// Create a unique global variable to indicate the execution mode of this target
-// region. The execution mode is either 'generic', or 'spmd' depending on the
-// target directive. This variable is picked up by the offload library to setup
-// the device appropriately before kernel launch. If the execution mode is
-// 'generic', the runtime reserves one warp for the master, otherwise, all
-// warps participate in parallel work.
-static void setPropertyExecutionMode(CodeGenModule &CGM, StringRef Name,
- bool Mode) {
- auto *GVMode =
- new llvm::GlobalVariable(CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
- llvm::GlobalValue::WeakAnyLinkage,
- llvm::ConstantInt::get(CGM.Int8Ty, Mode ? 0 : 1),
- Twine(Name, "_exec_mode"));
- CGM.addCompilerUsedGlobal(GVMode);
-}
-
-void CGOpenMPRuntimeGPU::createOffloadEntry(llvm::Constant *ID,
- llvm::Constant *Addr,
- uint64_t Size, int32_t,
- llvm::GlobalValue::LinkageTypes) {
- // TODO: Add support for global variables on the device after declare target
- // support.
- if (!isa<llvm::Function>(Addr))
- return;
- llvm::Module &M = CGM.getModule();
- llvm::LLVMContext &Ctx = CGM.getLLVMContext();
-
- // Get "nvvm.annotations" metadata node
- llvm::NamedMDNode *MD = M.getOrInsertNamedMetadata("nvvm.annotations");
-
- llvm::Metadata *MDVals[] = {
- llvm::ConstantAsMetadata::get(Addr), llvm::MDString::get(Ctx, "kernel"),
- llvm::ConstantAsMetadata::get(
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), 1))};
- // Append metadata to nvvm.annotations
- MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
-}
-
void CGOpenMPRuntimeGPU::emitTargetOutlinedFunction(
const OMPExecutableDirective &D, StringRef ParentName,
llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
@@ -1151,71 +873,56 @@ void CGOpenMPRuntimeGPU::emitTargetOutlinedFunction(
assert(!ParentName.empty() && "Invalid target region parent name!");
bool Mode = supportsSPMDExecutionMode(CGM.getContext(), D);
- if (Mode)
+ bool IsBareKernel = D.getSingleClause<OMPXBareClause>();
+ if (Mode || IsBareKernel)
emitSPMDKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry,
CodeGen);
else
emitNonSPMDKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry,
CodeGen);
-
- setPropertyExecutionMode(CGM, OutlinedFn->getName(), Mode);
-}
-
-namespace {
-LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();
-/// Enum for accesseing the reserved_2 field of the ident_t struct.
-enum ModeFlagsTy : unsigned {
- /// Bit set to 1 when in SPMD mode.
- KMP_IDENT_SPMD_MODE = 0x01,
- /// Bit set to 1 when a simplified runtime is used.
- KMP_IDENT_SIMPLE_RT_MODE = 0x02,
- LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/KMP_IDENT_SIMPLE_RT_MODE)
-};
-
-/// Special mode Undefined. Is the combination of Non-SPMD mode + SimpleRuntime.
-static const ModeFlagsTy UndefinedMode =
- (~KMP_IDENT_SPMD_MODE) & KMP_IDENT_SIMPLE_RT_MODE;
-} // anonymous namespace
-
-unsigned CGOpenMPRuntimeGPU::getDefaultLocationReserved2Flags() const {
- switch (getExecutionMode()) {
- case EM_SPMD:
- if (requiresFullRuntime())
- return KMP_IDENT_SPMD_MODE & (~KMP_IDENT_SIMPLE_RT_MODE);
- return KMP_IDENT_SPMD_MODE | KMP_IDENT_SIMPLE_RT_MODE;
- case EM_NonSPMD:
- assert(requiresFullRuntime() && "Expected full runtime.");
- return (~KMP_IDENT_SPMD_MODE) & (~KMP_IDENT_SIMPLE_RT_MODE);
- case EM_Unknown:
- return UndefinedMode;
- }
- llvm_unreachable("Unknown flags are requested.");
}
CGOpenMPRuntimeGPU::CGOpenMPRuntimeGPU(CodeGenModule &CGM)
- : CGOpenMPRuntime(CGM, "_", "$") {
- if (!CGM.getLangOpts().OpenMPIsDevice)
- llvm_unreachable("OpenMP NVPTX can only handle device code.");
+ : CGOpenMPRuntime(CGM) {
+ llvm::OpenMPIRBuilderConfig Config(
+ CGM.getLangOpts().OpenMPIsTargetDevice, isGPU(),
+ CGM.getLangOpts().OpenMPOffloadMandatory,
+ /*HasRequiresReverseOffload*/ false, /*HasRequiresUnifiedAddress*/ false,
+ hasRequiresUnifiedSharedMemory(), /*HasRequiresDynamicAllocators*/ false);
+ OMPBuilder.setConfig(Config);
+
+ if (!CGM.getLangOpts().OpenMPIsTargetDevice)
+ llvm_unreachable("OpenMP can only handle device code.");
+
+ if (CGM.getLangOpts().OpenMPCUDAMode)
+ CurrentDataSharingMode = CGOpenMPRuntimeGPU::DS_CUDA;
+
+ llvm::OpenMPIRBuilder &OMPBuilder = getOMPBuilder();
+ if (CGM.getLangOpts().NoGPULib || CGM.getLangOpts().OMPHostIRFile.empty())
+ return;
+
+ OMPBuilder.createGlobalFlag(CGM.getLangOpts().OpenMPTargetDebug,
+ "__omp_rtl_debug_kind");
+ OMPBuilder.createGlobalFlag(CGM.getLangOpts().OpenMPTeamSubscription,
+ "__omp_rtl_assume_teams_oversubscription");
+ OMPBuilder.createGlobalFlag(CGM.getLangOpts().OpenMPThreadSubscription,
+ "__omp_rtl_assume_threads_oversubscription");
+ OMPBuilder.createGlobalFlag(CGM.getLangOpts().OpenMPNoThreadState,
+ "__omp_rtl_assume_no_thread_state");
+ OMPBuilder.createGlobalFlag(CGM.getLangOpts().OpenMPNoNestedParallelism,
+ "__omp_rtl_assume_no_nested_parallelism");
}
void CGOpenMPRuntimeGPU::emitProcBindClause(CodeGenFunction &CGF,
ProcBindKind ProcBind,
SourceLocation Loc) {
- // Do nothing in case of SPMD mode and L0 parallel.
- if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD)
- return;
-
- CGOpenMPRuntime::emitProcBindClause(CGF, ProcBind, Loc);
+ // Nothing to do.
}
void CGOpenMPRuntimeGPU::emitNumThreadsClause(CodeGenFunction &CGF,
llvm::Value *NumThreads,
SourceLocation Loc) {
- // Do nothing in case of SPMD mode and L0 parallel.
- if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD)
- return;
-
- CGOpenMPRuntime::emitNumThreadsClause(CGF, NumThreads, Loc);
+ // Nothing to do.
}
void CGOpenMPRuntimeGPU::emitNumTeamsClause(CodeGenFunction &CGF,
@@ -1224,36 +931,17 @@ void CGOpenMPRuntimeGPU::emitNumTeamsClause(CodeGenFunction &CGF,
SourceLocation Loc) {}
llvm::Function *CGOpenMPRuntimeGPU::emitParallelOutlinedFunction(
- const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
- OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
+ CodeGenFunction &CGF, const OMPExecutableDirective &D,
+ const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind,
+ const RegionCodeGenTy &CodeGen) {
// Emit target region as a standalone region.
- class NVPTXPrePostActionTy : public PrePostActionTy {
- bool &IsInParallelRegion;
- bool PrevIsInParallelRegion;
-
- public:
- NVPTXPrePostActionTy(bool &IsInParallelRegion)
- : IsInParallelRegion(IsInParallelRegion) {}
- void Enter(CodeGenFunction &CGF) override {
- PrevIsInParallelRegion = IsInParallelRegion;
- IsInParallelRegion = true;
- }
- void Exit(CodeGenFunction &CGF) override {
- IsInParallelRegion = PrevIsInParallelRegion;
- }
- } Action(IsInParallelRegion);
- CodeGen.setAction(Action);
bool PrevIsInTTDRegion = IsInTTDRegion;
IsInTTDRegion = false;
- bool PrevIsInTargetMasterThreadRegion = IsInTargetMasterThreadRegion;
- IsInTargetMasterThreadRegion = false;
auto *OutlinedFun =
cast<llvm::Function>(CGOpenMPRuntime::emitParallelOutlinedFunction(
- D, ThreadIDVar, InnermostKind, CodeGen));
- IsInTargetMasterThreadRegion = PrevIsInTargetMasterThreadRegion;
+ CGF, D, ThreadIDVar, InnermostKind, CodeGen));
IsInTTDRegion = PrevIsInTTDRegion;
- if (getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD &&
- !IsInParallelRegion) {
+ if (getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD) {
llvm::Function *WrapperFun =
createParallelDataSharingWrapper(OutlinedFun, D);
WrapperFunctionsMap[OutlinedFun] = WrapperFun;
@@ -1301,14 +989,15 @@ getTeamsReductionVars(ASTContext &Ctx, const OMPExecutableDirective &D,
}
llvm::Function *CGOpenMPRuntimeGPU::emitTeamsOutlinedFunction(
- const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
- OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
+ CodeGenFunction &CGF, const OMPExecutableDirective &D,
+ const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind,
+ const RegionCodeGenTy &CodeGen) {
SourceLocation Loc = D.getBeginLoc();
const RecordDecl *GlobalizedRD = nullptr;
llvm::SmallVector<const ValueDecl *, 4> LastPrivatesReductions;
llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> MappedDeclsFields;
- unsigned WarpSize = CGM.getTarget().getGridValue(llvm::omp::GV_Warp_Size);
+ unsigned WarpSize = CGM.getTarget().getGridValue().GV_Warp_Size;
// Globalize team reductions variable unconditionally in all modes.
if (getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD)
getTeamsReductionVars(CGM.getContext(), D, LastPrivatesReductions);
@@ -1316,7 +1005,7 @@ llvm::Function *CGOpenMPRuntimeGPU::emitTeamsOutlinedFunction(
getDistributeLastprivateVars(CGM.getContext(), D, LastPrivatesReductions);
if (!LastPrivatesReductions.empty()) {
GlobalizedRD = ::buildRecordForGlobalizedVars(
- CGM.getContext(), llvm::None, LastPrivatesReductions,
+ CGM.getContext(), std::nullopt, LastPrivatesReductions,
MappedDeclsFields, WarpSize);
}
} else if (!LastPrivatesReductions.empty()) {
@@ -1363,16 +1052,14 @@ llvm::Function *CGOpenMPRuntimeGPU::emitTeamsOutlinedFunction(
} Action(Loc, GlobalizedRD, MappedDeclsFields);
CodeGen.setAction(Action);
llvm::Function *OutlinedFun = CGOpenMPRuntime::emitTeamsOutlinedFunction(
- D, ThreadIDVar, InnermostKind, CodeGen);
+ CGF, D, ThreadIDVar, InnermostKind, CodeGen);
return OutlinedFun;
}
void CGOpenMPRuntimeGPU::emitGenericVarsProlog(CodeGenFunction &CGF,
- SourceLocation Loc,
- bool WithSPMDCheck) {
- if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic &&
- getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD)
+ SourceLocation Loc) {
+ if (getDataSharingMode() != CGOpenMPRuntimeGPU::DS_Generic)
return;
CGBuilderTy &Bld = CGF.Builder;
@@ -1396,10 +1083,14 @@ void CGOpenMPRuntimeGPU::emitGenericVarsProlog(CodeGenFunction &CGF,
// Allocate space for the variable to be globalized
llvm::Value *AllocArgs[] = {CGF.getTypeSize(VD->getType())};
- llvm::Instruction *VoidPtr =
+ llvm::CallBase *VoidPtr =
CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_alloc_shared),
AllocArgs, VD->getName());
+ // FIXME: We should use the variables actual alignment as an argument.
+ VoidPtr->addRetAttr(llvm::Attribute::get(
+ CGM.getLLVMContext(), llvm::Attribute::Alignment,
+ CGM.getContext().getTargetInfo().getNewAlign() / 8));
// Cast the void pointer and get the address of the globalized variable.
llvm::PointerType *VarPtrTy = CGF.ConvertTypeForMem(VarTy)->getPointerTo();
@@ -1417,48 +1108,75 @@ void CGOpenMPRuntimeGPU::emitGenericVarsProlog(CodeGenFunction &CGF,
if (auto *DI = CGF.getDebugInfo())
VoidPtr->setDebugLoc(DI->SourceLocToDebugLoc(VD->getLocation()));
}
- for (const auto *VD : I->getSecond().EscapedVariableLengthDecls) {
- // Use actual memory size of the VLA object including the padding
- // for alignment purposes.
- llvm::Value *Size = CGF.getTypeSize(VD->getType());
- CharUnits Align = CGM.getContext().getDeclAlign(VD);
- Size = Bld.CreateNUWAdd(
- Size, llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity() - 1));
- llvm::Value *AlignVal =
- llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity());
-
- Size = Bld.CreateUDiv(Size, AlignVal);
- Size = Bld.CreateNUWMul(Size, AlignVal);
-
- // Allocate space for this VLA object to be globalized.
- llvm::Value *AllocArgs[] = {CGF.getTypeSize(VD->getType())};
- llvm::Instruction *VoidPtr =
- CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_alloc_shared),
- AllocArgs, VD->getName());
- I->getSecond().EscapedVariableLengthDeclsAddrs.emplace_back(
- std::pair<llvm::Value *, llvm::Value *>(
- {VoidPtr, CGF.getTypeSize(VD->getType())}));
- LValue Base = CGF.MakeAddrLValue(VoidPtr, VD->getType(),
+ for (const auto *ValueD : I->getSecond().EscapedVariableLengthDecls) {
+ const auto *VD = cast<VarDecl>(ValueD);
+ std::pair<llvm::Value *, llvm::Value *> AddrSizePair =
+ getKmpcAllocShared(CGF, VD);
+ I->getSecond().EscapedVariableLengthDeclsAddrs.emplace_back(AddrSizePair);
+ LValue Base = CGF.MakeAddrLValue(AddrSizePair.first, VD->getType(),
CGM.getContext().getDeclAlign(VD),
AlignmentSource::Decl);
- I->getSecond().MappedParams->setVarAddr(CGF, cast<VarDecl>(VD),
- Base.getAddress(CGF));
+ I->getSecond().MappedParams->setVarAddr(CGF, VD, Base.getAddress(CGF));
}
I->getSecond().MappedParams->apply(CGF);
}
-void CGOpenMPRuntimeGPU::emitGenericVarsEpilog(CodeGenFunction &CGF,
- bool WithSPMDCheck) {
- if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic &&
- getExecutionMode() != CGOpenMPRuntimeGPU::EM_SPMD)
+bool CGOpenMPRuntimeGPU::isDelayedVariableLengthDecl(CodeGenFunction &CGF,
+ const VarDecl *VD) const {
+ const auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
+ if (I == FunctionGlobalizedDecls.end())
+ return false;
+
+ // Check variable declaration is delayed:
+ return llvm::is_contained(I->getSecond().DelayedVariableLengthDecls, VD);
+}
+
+std::pair<llvm::Value *, llvm::Value *>
+CGOpenMPRuntimeGPU::getKmpcAllocShared(CodeGenFunction &CGF,
+ const VarDecl *VD) {
+ CGBuilderTy &Bld = CGF.Builder;
+
+ // Compute size and alignment.
+ llvm::Value *Size = CGF.getTypeSize(VD->getType());
+ CharUnits Align = CGM.getContext().getDeclAlign(VD);
+ Size = Bld.CreateNUWAdd(
+ Size, llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity() - 1));
+ llvm::Value *AlignVal =
+ llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity());
+ Size = Bld.CreateUDiv(Size, AlignVal);
+ Size = Bld.CreateNUWMul(Size, AlignVal);
+
+ // Allocate space for this VLA object to be globalized.
+ llvm::Value *AllocArgs[] = {Size};
+ llvm::CallBase *VoidPtr =
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_alloc_shared),
+ AllocArgs, VD->getName());
+ VoidPtr->addRetAttr(llvm::Attribute::get(
+ CGM.getLLVMContext(), llvm::Attribute::Alignment, Align.getQuantity()));
+
+ return std::make_pair(VoidPtr, Size);
+}
+
+void CGOpenMPRuntimeGPU::getKmpcFreeShared(
+ CodeGenFunction &CGF,
+ const std::pair<llvm::Value *, llvm::Value *> &AddrSizePair) {
+ // Deallocate the memory for each globalized VLA object
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_free_shared),
+ {AddrSizePair.first, AddrSizePair.second});
+}
+
+void CGOpenMPRuntimeGPU::emitGenericVarsEpilog(CodeGenFunction &CGF) {
+ if (getDataSharingMode() != CGOpenMPRuntimeGPU::DS_Generic)
return;
const auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
if (I != FunctionGlobalizedDecls.end()) {
- // Deallocate the memory for each globalized VLA object
- for (auto AddrSizePair :
+ // Deallocate the memory for each globalized VLA object that was
+ // globalized in the prolog (i.e. emitGenericVarsProlog).
+ for (const auto &AddrSizePair :
llvm::reverse(I->getSecond().EscapedVariableLengthDeclsAddrs)) {
CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_free_shared),
@@ -1486,11 +1204,18 @@ void CGOpenMPRuntimeGPU::emitTeamsCall(CodeGenFunction &CGF,
if (!CGF.HaveInsertPoint())
return;
+ bool IsBareKernel = D.getSingleClause<OMPXBareClause>();
+
Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
/*Name=*/".zero.addr");
- CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
+ CGF.Builder.CreateStore(CGF.Builder.getInt32(/*C*/ 0), ZeroAddr);
llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
- OutlinedFnArgs.push_back(emitThreadIDAddress(CGF, Loc).getPointer());
+ // We don't emit any thread id function call in bare kernel, but because the
+ // outlined function has a pointer argument, we emit a nullptr here.
+ if (IsBareKernel)
+ OutlinedFnArgs.push_back(llvm::ConstantPointerNull::get(CGM.VoidPtrTy));
+ else
+ OutlinedFnArgs.push_back(emitThreadIDAddress(CGF, Loc).getPointer());
OutlinedFnArgs.push_back(ZeroAddr.getPointer());
OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
@@ -1500,13 +1225,16 @@ void CGOpenMPRuntimeGPU::emitParallelCall(CodeGenFunction &CGF,
SourceLocation Loc,
llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
- const Expr *IfCond) {
+ const Expr *IfCond,
+ llvm::Value *NumThreads) {
if (!CGF.HaveInsertPoint())
return;
- auto &&ParallelGen = [this, Loc, OutlinedFn, CapturedVars,
- IfCond](CodeGenFunction &CGF, PrePostActionTy &Action) {
+ auto &&ParallelGen = [this, Loc, OutlinedFn, CapturedVars, IfCond,
+ NumThreads](CodeGenFunction &CGF,
+ PrePostActionTy &Action) {
CGBuilderTy &Bld = CGF.Builder;
+ llvm::Value *NumThreadsVal = NumThreads;
llvm::Function *WFn = WrapperFunctionsMap[OutlinedFn];
llvm::Value *ID = llvm::ConstantPointerNull::get(CGM.Int8PtrTy);
if (WFn)
@@ -1546,13 +1274,18 @@ void CGOpenMPRuntimeGPU::emitParallelCall(CodeGenFunction &CGF,
else
IfCondVal = llvm::ConstantInt::get(CGF.Int32Ty, 1);
- assert(IfCondVal && "Expected a value");
+ if (!NumThreadsVal)
+ NumThreadsVal = llvm::ConstantInt::get(CGF.Int32Ty, -1);
+ else
+ NumThreadsVal = Bld.CreateZExtOrTrunc(NumThreadsVal, CGF.Int32Ty),
+
+ assert(IfCondVal && "Expected a value");
llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
llvm::Value *Args[] = {
RTLoc,
getThreadID(CGF, Loc),
IfCondVal,
- llvm::ConstantInt::get(CGF.Int32Ty, -1),
+ NumThreadsVal,
llvm::ConstantInt::get(CGF.Int32Ty, -1),
FnPtr,
ID,
@@ -1687,8 +1420,7 @@ static llvm::Value *castValueToType(CodeGenFunction &CGF, llvm::Value *Val,
return CGF.Builder.CreateIntCast(Val, LLVMCastTy,
CastTy->hasSignedIntegerRepresentation());
Address CastItem = CGF.CreateMemTemp(CastTy);
- Address ValCastItem = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CastItem, Val->getType()->getPointerTo(CastItem.getAddressSpace()));
+ Address ValCastItem = CastItem.withElementType(Val->getType());
CGF.EmitStoreOfScalar(Val, ValCastItem, /*Volatile=*/false, ValTy,
LValueBaseInfo(AlignmentSource::Type),
TBAAAccessInfo());
@@ -1751,7 +1483,7 @@ static void shuffleAndStore(CodeGenFunction &CGF, Address SrcAddr,
Address ElemPtr = DestAddr;
Address Ptr = SrcAddr;
Address PtrEnd = Bld.CreatePointerBitCastOrAddrSpaceCast(
- Bld.CreateConstGEP(SrcAddr, 1), CGF.VoidPtrTy);
+ Bld.CreateConstGEP(SrcAddr, 1), CGF.VoidPtrTy, CGF.Int8Ty);
for (int IntSize = 8; IntSize >= 1; IntSize /= 2) {
if (Size < CharUnits::fromQuantity(IntSize))
continue;
@@ -1759,9 +1491,10 @@ static void shuffleAndStore(CodeGenFunction &CGF, Address SrcAddr,
CGF.getContext().toBits(CharUnits::fromQuantity(IntSize)),
/*Signed=*/1);
llvm::Type *IntTy = CGF.ConvertTypeForMem(IntType);
- Ptr = Bld.CreatePointerBitCastOrAddrSpaceCast(Ptr, IntTy->getPointerTo());
- ElemPtr =
- Bld.CreatePointerBitCastOrAddrSpaceCast(ElemPtr, IntTy->getPointerTo());
+ Ptr = Bld.CreatePointerBitCastOrAddrSpaceCast(Ptr, IntTy->getPointerTo(),
+ IntTy);
+ ElemPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
+ ElemPtr, IntTy->getPointerTo(), IntTy);
if (Size.getQuantity() / IntSize > 1) {
llvm::BasicBlock *PreCondBB = CGF.createBasicBlock(".shuffle.pre_cond");
llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".shuffle.then");
@@ -1774,11 +1507,13 @@ static void shuffleAndStore(CodeGenFunction &CGF, Address SrcAddr,
llvm::PHINode *PhiDest =
Bld.CreatePHI(ElemPtr.getType(), /*NumReservedValues=*/2);
PhiDest->addIncoming(ElemPtr.getPointer(), CurrentBB);
- Ptr = Address(PhiSrc, Ptr.getAlignment());
- ElemPtr = Address(PhiDest, ElemPtr.getAlignment());
+ Ptr = Address(PhiSrc, Ptr.getElementType(), Ptr.getAlignment());
+ ElemPtr =
+ Address(PhiDest, ElemPtr.getElementType(), ElemPtr.getAlignment());
llvm::Value *PtrDiff = Bld.CreatePtrDiff(
- PtrEnd.getPointer(), Bld.CreatePointerBitCastOrAddrSpaceCast(
- Ptr.getPointer(), CGF.VoidPtrTy));
+ CGF.Int8Ty, PtrEnd.getPointer(),
+ Bld.CreatePointerBitCastOrAddrSpaceCast(Ptr.getPointer(),
+ CGF.VoidPtrTy));
Bld.CreateCondBr(Bld.CreateICmpSGT(PtrDiff, Bld.getInt64(IntSize - 1)),
ThenBB, ExitBB);
CGF.EmitBlock(ThenBB);
@@ -1821,11 +1556,6 @@ enum CopyAction : unsigned {
RemoteLaneToThread,
// ThreadCopy: Make a copy of a Reduce list on the thread's stack.
ThreadCopy,
- // ThreadToScratchpad: Copy a team-reduced array to the scratchpad.
- ThreadToScratchpad,
- // ScratchpadToThread: Copy from a scratchpad array in global memory
- // containing team-reduced data to a thread's stack.
- ScratchpadToThread,
};
} // namespace
@@ -1847,13 +1577,10 @@ static void emitReductionListCopy(
CGBuilderTy &Bld = CGF.Builder;
llvm::Value *RemoteLaneOffset = CopyOptions.RemoteLaneOffset;
- llvm::Value *ScratchpadIndex = CopyOptions.ScratchpadIndex;
- llvm::Value *ScratchpadWidth = CopyOptions.ScratchpadWidth;
// Iterates, element-by-element, through the source Reduce list and
// make a copy.
unsigned Idx = 0;
- unsigned Size = Privates.size();
for (const Expr *Private : Privates) {
Address SrcElementAddr = Address::invalid();
Address DestElementAddr = Address::invalid();
@@ -1863,18 +1590,16 @@ static void emitReductionListCopy(
// Set to true to update the pointer in the dest Reduce list to a
// newly created element.
bool UpdateDestListPtr = false;
- // Increment the src or dest pointer to the scratchpad, for each
- // new element.
- bool IncrScratchpadSrc = false;
- bool IncrScratchpadDest = false;
+ QualType PrivatePtrType = C.getPointerType(Private->getType());
+ llvm::Type *PrivateLlvmPtrType = CGF.ConvertType(PrivatePtrType);
switch (Action) {
case RemoteLaneToThread: {
// Step 1.1: Get the address for the src element in the Reduce list.
Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
SrcElementAddr = CGF.EmitLoadOfPointer(
- SrcElementPtrAddr,
- C.getPointerType(Private->getType())->castAs<PointerType>());
+ SrcElementPtrAddr.withElementType(PrivateLlvmPtrType),
+ PrivatePtrType->castAs<PointerType>());
// Step 1.2: Create a temporary to store the element in the destination
// Reduce list.
@@ -1889,68 +1614,25 @@ static void emitReductionListCopy(
// Step 1.1: Get the address for the src element in the Reduce list.
Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
SrcElementAddr = CGF.EmitLoadOfPointer(
- SrcElementPtrAddr,
- C.getPointerType(Private->getType())->castAs<PointerType>());
+ SrcElementPtrAddr.withElementType(PrivateLlvmPtrType),
+ PrivatePtrType->castAs<PointerType>());
// Step 1.2: Get the address for dest element. The destination
// element has already been created on the thread's stack.
DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
DestElementAddr = CGF.EmitLoadOfPointer(
- DestElementPtrAddr,
- C.getPointerType(Private->getType())->castAs<PointerType>());
- break;
- }
- case ThreadToScratchpad: {
- // Step 1.1: Get the address for the src element in the Reduce list.
- Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
- SrcElementAddr = CGF.EmitLoadOfPointer(
- SrcElementPtrAddr,
- C.getPointerType(Private->getType())->castAs<PointerType>());
-
- // Step 1.2: Get the address for dest element:
- // address = base + index * ElementSizeInChars.
- llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
- llvm::Value *CurrentOffset =
- Bld.CreateNUWMul(ElementSizeInChars, ScratchpadIndex);
- llvm::Value *ScratchPadElemAbsolutePtrVal =
- Bld.CreateNUWAdd(DestBase.getPointer(), CurrentOffset);
- ScratchPadElemAbsolutePtrVal =
- Bld.CreateIntToPtr(ScratchPadElemAbsolutePtrVal, CGF.VoidPtrTy);
- DestElementAddr = Address(ScratchPadElemAbsolutePtrVal,
- C.getTypeAlignInChars(Private->getType()));
- IncrScratchpadDest = true;
- break;
- }
- case ScratchpadToThread: {
- // Step 1.1: Get the address for the src element in the scratchpad.
- // address = base + index * ElementSizeInChars.
- llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
- llvm::Value *CurrentOffset =
- Bld.CreateNUWMul(ElementSizeInChars, ScratchpadIndex);
- llvm::Value *ScratchPadElemAbsolutePtrVal =
- Bld.CreateNUWAdd(SrcBase.getPointer(), CurrentOffset);
- ScratchPadElemAbsolutePtrVal =
- Bld.CreateIntToPtr(ScratchPadElemAbsolutePtrVal, CGF.VoidPtrTy);
- SrcElementAddr = Address(ScratchPadElemAbsolutePtrVal,
- C.getTypeAlignInChars(Private->getType()));
- IncrScratchpadSrc = true;
-
- // Step 1.2: Create a temporary to store the element in the destination
- // Reduce list.
- DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
- DestElementAddr =
- CGF.CreateMemTemp(Private->getType(), ".omp.reduction.element");
- UpdateDestListPtr = true;
+ DestElementPtrAddr.withElementType(PrivateLlvmPtrType),
+ PrivatePtrType->castAs<PointerType>());
break;
}
}
// Regardless of src and dest of copy, we emit the load of src
// element as this is required in all directions
- SrcElementAddr = Bld.CreateElementBitCast(
- SrcElementAddr, CGF.ConvertTypeForMem(Private->getType()));
- DestElementAddr = Bld.CreateElementBitCast(DestElementAddr,
- SrcElementAddr.getElementType());
+ SrcElementAddr = SrcElementAddr.withElementType(
+ CGF.ConvertTypeForMem(Private->getType()));
+ DestElementAddr =
+ DestElementAddr.withElementType(SrcElementAddr.getElementType());
// Now that all active lanes have read the element in the
// Reduce list, shuffle over the value from the remote lane.
@@ -2000,35 +1682,6 @@ static void emitReductionListCopy(
C.VoidPtrTy);
}
- // Step 4.1: Increment SrcBase/DestBase so that it points to the starting
- // address of the next element in scratchpad memory, unless we're currently
- // processing the last one. Memory alignment is also taken care of here.
- if ((IncrScratchpadDest || IncrScratchpadSrc) && (Idx + 1 < Size)) {
- llvm::Value *ScratchpadBasePtr =
- IncrScratchpadDest ? DestBase.getPointer() : SrcBase.getPointer();
- llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
- ScratchpadBasePtr = Bld.CreateNUWAdd(
- ScratchpadBasePtr,
- Bld.CreateNUWMul(ScratchpadWidth, ElementSizeInChars));
-
- // Take care of global memory alignment for performance
- ScratchpadBasePtr = Bld.CreateNUWSub(
- ScratchpadBasePtr, llvm::ConstantInt::get(CGM.SizeTy, 1));
- ScratchpadBasePtr = Bld.CreateUDiv(
- ScratchpadBasePtr,
- llvm::ConstantInt::get(CGM.SizeTy, GlobalMemoryAlignment));
- ScratchpadBasePtr = Bld.CreateNUWAdd(
- ScratchpadBasePtr, llvm::ConstantInt::get(CGM.SizeTy, 1));
- ScratchpadBasePtr = Bld.CreateNUWMul(
- ScratchpadBasePtr,
- llvm::ConstantInt::get(CGM.SizeTy, GlobalMemoryAlignment));
-
- if (IncrScratchpadDest)
- DestBase = Address(ScratchpadBasePtr, CGF.getPointerAlign());
- else /* IncrScratchpadSrc = true */
- SrcBase = Address(ScratchpadBasePtr, CGF.getPointerAlign());
- }
-
++Idx;
}
}
@@ -2056,12 +1709,12 @@ static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
// At the stage of the computation when this function is called, partially
// aggregated values reside in the first lane of every active warp.
ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.VoidPtrTy, ImplicitParamDecl::Other);
+ C.VoidPtrTy, ImplicitParamKind::Other);
// NumWarps: number of warps active in the parallel region. This could
// be smaller than 32 (max warps in a CTA) for partial block reduction.
ImplicitParamDecl NumWarpsArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
C.getIntTypeForBitwidth(32, /* Signed */ true),
- ImplicitParamDecl::Other);
+ ImplicitParamKind::Other);
FunctionArgList Args;
Args.push_back(&ReduceListArg);
Args.push_back(&NumWarpsArg);
@@ -2089,7 +1742,7 @@ static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
"__openmp_nvptx_data_transfer_temporary_storage";
llvm::GlobalVariable *TransferMedium =
M.getGlobalVariable(TransferMediumName);
- unsigned WarpSize = CGF.getTarget().getGridValue(llvm::omp::GV_Warp_Size);
+ unsigned WarpSize = CGF.getTarget().getGridValue().GV_Warp_Size;
if (!TransferMedium) {
auto *Ty = llvm::ArrayType::get(CGM.Int32Ty, WarpSize);
unsigned SharedAddressSpace = C.getTargetAddressSpace(LangAS::cuda_shared);
@@ -2110,13 +1763,14 @@ static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
llvm::Value *WarpID = getNVPTXWarpID(CGF);
Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
+ llvm::Type *ElemTy = CGF.ConvertTypeForMem(ReductionArrayTy);
Address LocalReduceList(
Bld.CreatePointerBitCastOrAddrSpaceCast(
CGF.EmitLoadOfScalar(
AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc,
LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo()),
- CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
- CGF.getPointerAlign());
+ ElemTy->getPointerTo()),
+ ElemTy, CGF.getPointerAlign());
unsigned Idx = 0;
for (const Expr *Private : Privates) {
@@ -2174,23 +1828,18 @@ static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
// elemptr = ((CopyType*)(elemptrptr)) + I
- Address ElemPtr = Address(ElemPtrPtr, Align);
- ElemPtr = Bld.CreateElementBitCast(ElemPtr, CopyType);
- if (NumIters > 1) {
- ElemPtr = Address(Bld.CreateGEP(ElemPtr.getElementType(),
- ElemPtr.getPointer(), Cnt),
- ElemPtr.getAlignment());
- }
+ Address ElemPtr(ElemPtrPtr, CopyType, Align);
+ if (NumIters > 1)
+ ElemPtr = Bld.CreateGEP(ElemPtr, Cnt);
// Get pointer to location in transfer medium.
// MediumPtr = &medium[warp_id]
llvm::Value *MediumPtrVal = Bld.CreateInBoundsGEP(
TransferMedium->getValueType(), TransferMedium,
{llvm::Constant::getNullValue(CGM.Int64Ty), WarpID});
- Address MediumPtr(MediumPtrVal, Align);
// Casting to actual data type.
// MediumPtr = (CopyType*)MediumPtrAddr;
- MediumPtr = Bld.CreateElementBitCast(MediumPtr, CopyType);
+ Address MediumPtr(MediumPtrVal, CopyType, Align);
// elem = *elemptr
//*MediumPtr = elem
@@ -2236,21 +1885,16 @@ static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
llvm::Value *SrcMediumPtrVal = Bld.CreateInBoundsGEP(
TransferMedium->getValueType(), TransferMedium,
{llvm::Constant::getNullValue(CGM.Int64Ty), ThreadID});
- Address SrcMediumPtr(SrcMediumPtrVal, Align);
// SrcMediumVal = *SrcMediumPtr;
- SrcMediumPtr = Bld.CreateElementBitCast(SrcMediumPtr, CopyType);
+ Address SrcMediumPtr(SrcMediumPtrVal, CopyType, Align);
// TargetElemPtr = (CopyType*)(SrcDataAddr[i]) + I
Address TargetElemPtrPtr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
llvm::Value *TargetElemPtrVal = CGF.EmitLoadOfScalar(
TargetElemPtrPtr, /*Volatile=*/false, C.VoidPtrTy, Loc);
- Address TargetElemPtr = Address(TargetElemPtrVal, Align);
- TargetElemPtr = Bld.CreateElementBitCast(TargetElemPtr, CopyType);
- if (NumIters > 1) {
- TargetElemPtr = Address(Bld.CreateGEP(TargetElemPtr.getElementType(),
- TargetElemPtr.getPointer(), Cnt),
- TargetElemPtr.getAlignment());
- }
+ Address TargetElemPtr(TargetElemPtrVal, CopyType, Align);
+ if (NumIters > 1)
+ TargetElemPtr = Bld.CreateGEP(TargetElemPtr, Cnt);
// *TargetElemPtr = SrcMediumVal;
llvm::Value *SrcMediumValue =
@@ -2353,16 +1997,16 @@ static llvm::Function *emitShuffleAndReduceFunction(
// Thread local Reduce list used to host the values of data to be reduced.
ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.VoidPtrTy, ImplicitParamDecl::Other);
+ C.VoidPtrTy, ImplicitParamKind::Other);
// Current lane id; could be logical.
ImplicitParamDecl LaneIDArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.ShortTy,
- ImplicitParamDecl::Other);
+ ImplicitParamKind::Other);
// Offset of the remote source lane relative to the current lane.
ImplicitParamDecl RemoteLaneOffsetArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.ShortTy, ImplicitParamDecl::Other);
+ C.ShortTy, ImplicitParamKind::Other);
// Algorithm version. This is expected to be known at compile time.
ImplicitParamDecl AlgoVerArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.ShortTy, ImplicitParamDecl::Other);
+ C.ShortTy, ImplicitParamKind::Other);
FunctionArgList Args;
Args.push_back(&ReduceListArg);
Args.push_back(&LaneIDArg);
@@ -2383,12 +2027,13 @@ static llvm::Function *emitShuffleAndReduceFunction(
CGBuilderTy &Bld = CGF.Builder;
Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
+ llvm::Type *ElemTy = CGF.ConvertTypeForMem(ReductionArrayTy);
Address LocalReduceList(
Bld.CreatePointerBitCastOrAddrSpaceCast(
CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
C.VoidPtrTy, SourceLocation()),
- CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
- CGF.getPointerAlign());
+ ElemTy->getPointerTo()),
+ ElemTy, CGF.getPointerAlign());
Address AddrLaneIDArg = CGF.GetAddrOfLocalVar(&LaneIDArg);
llvm::Value *LaneIDArgVal = CGF.EmitLoadOfScalar(
@@ -2513,13 +2158,13 @@ static llvm::Value *emitListToGlobalCopyFunction(
// Buffer: global reduction buffer.
ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.VoidPtrTy, ImplicitParamDecl::Other);
+ C.VoidPtrTy, ImplicitParamKind::Other);
// Idx: index of the buffer.
ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
- ImplicitParamDecl::Other);
+ ImplicitParamKind::Other);
// ReduceList: thread local Reduce list.
ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.VoidPtrTy, ImplicitParamDecl::Other);
+ C.VoidPtrTy, ImplicitParamKind::Other);
FunctionArgList Args;
Args.push_back(&BufferArg);
Args.push_back(&IdxArg);
@@ -2539,20 +2184,20 @@ static llvm::Value *emitListToGlobalCopyFunction(
Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
+ llvm::Type *ElemTy = CGF.ConvertTypeForMem(ReductionArrayTy);
Address LocalReduceList(
Bld.CreatePointerBitCastOrAddrSpaceCast(
CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
C.VoidPtrTy, Loc),
- CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
- CGF.getPointerAlign());
+ ElemTy->getPointerTo()),
+ ElemTy, CGF.getPointerAlign());
QualType StaticTy = C.getRecordType(TeamReductionRec);
llvm::Type *LLVMReductionsBufferTy =
CGM.getTypes().ConvertTypeForMem(StaticTy);
llvm::Value *BufferArrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
LLVMReductionsBufferTy->getPointerTo());
- llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
- CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
+ llvm::Value *Idxs[] = {CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
/*Volatile=*/false, C.IntTy,
Loc)};
unsigned Idx = 0;
@@ -2562,19 +2207,22 @@ static llvm::Value *emitListToGlobalCopyFunction(
llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
// elemptr = ((CopyType*)(elemptrptr)) + I
+ ElemTy = CGF.ConvertTypeForMem(Private->getType());
ElemPtrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
- ElemPtrPtr, CGF.ConvertTypeForMem(Private->getType())->getPointerTo());
+ ElemPtrPtr, ElemTy->getPointerTo());
Address ElemPtr =
- Address(ElemPtrPtr, C.getTypeAlignInChars(Private->getType()));
+ Address(ElemPtrPtr, ElemTy, C.getTypeAlignInChars(Private->getType()));
const ValueDecl *VD = cast<DeclRefExpr>(Private)->getDecl();
// Global = Buffer.VD[Idx];
const FieldDecl *FD = VarFieldMap.lookup(VD);
+ llvm::Value *BufferPtr =
+ Bld.CreateInBoundsGEP(LLVMReductionsBufferTy, BufferArrPtr, Idxs);
LValue GlobLVal = CGF.EmitLValueForField(
- CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
+ CGF.MakeNaturalAlignAddrLValue(BufferPtr, StaticTy), FD);
Address GlobAddr = GlobLVal.getAddress(CGF);
- llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(
- GlobAddr.getElementType(), GlobAddr.getPointer(), Idxs);
- GlobLVal.setAddress(Address(BufferPtr, GlobAddr.getAlignment()));
+ GlobLVal.setAddress(Address(GlobAddr.getPointer(),
+ CGF.ConvertTypeForMem(Private->getType()),
+ GlobAddr.getAlignment()));
switch (CGF.getEvaluationKind(Private->getType())) {
case TEK_Scalar: {
llvm::Value *V = CGF.EmitLoadOfScalar(
@@ -2622,13 +2270,13 @@ static llvm::Value *emitListToGlobalReduceFunction(
// Buffer: global reduction buffer.
ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.VoidPtrTy, ImplicitParamDecl::Other);
+ C.VoidPtrTy, ImplicitParamKind::Other);
// Idx: index of the buffer.
ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
- ImplicitParamDecl::Other);
+ ImplicitParamKind::Other);
// ReduceList: thread local Reduce list.
ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.VoidPtrTy, ImplicitParamDecl::Other);
+ C.VoidPtrTy, ImplicitParamKind::Other);
FunctionArgList Args;
Args.push_back(&BufferArg);
Args.push_back(&IdxArg);
@@ -2659,8 +2307,7 @@ static llvm::Value *emitListToGlobalReduceFunction(
Address ReductionList =
CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
auto IPriv = Privates.begin();
- llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
- CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
+ llvm::Value *Idxs[] = {CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
/*Volatile=*/false, C.IntTy,
Loc)};
unsigned Idx = 0;
@@ -2669,13 +2316,13 @@ static llvm::Value *emitListToGlobalReduceFunction(
// Global = Buffer.VD[Idx];
const ValueDecl *VD = cast<DeclRefExpr>(*IPriv)->getDecl();
const FieldDecl *FD = VarFieldMap.lookup(VD);
+ llvm::Value *BufferPtr =
+ Bld.CreateInBoundsGEP(LLVMReductionsBufferTy, BufferArrPtr, Idxs);
LValue GlobLVal = CGF.EmitLValueForField(
- CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
+ CGF.MakeNaturalAlignAddrLValue(BufferPtr, StaticTy), FD);
Address GlobAddr = GlobLVal.getAddress(CGF);
- llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(
- GlobAddr.getElementType(), GlobAddr.getPointer(), Idxs);
- llvm::Value *Ptr = CGF.EmitCastToVoidPtr(BufferPtr);
- CGF.EmitStoreOfScalar(Ptr, Elem, /*Volatile=*/false, C.VoidPtrTy);
+ CGF.EmitStoreOfScalar(GlobAddr.getPointer(), Elem, /*Volatile=*/false,
+ C.VoidPtrTy);
if ((*IPriv)->getType()->isVariablyModifiedType()) {
// Store array size.
++Idx;
@@ -2691,8 +2338,7 @@ static llvm::Value *emitListToGlobalReduceFunction(
}
// Call reduce_function(GlobalReduceList, ReduceList)
- llvm::Value *GlobalReduceList =
- CGF.EmitCastToVoidPtr(ReductionList.getPointer());
+ llvm::Value *GlobalReduceList = ReductionList.getPointer();
Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
llvm::Value *ReducedPtr = CGF.EmitLoadOfScalar(
AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc);
@@ -2718,13 +2364,13 @@ static llvm::Value *emitGlobalToListCopyFunction(
// Buffer: global reduction buffer.
ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.VoidPtrTy, ImplicitParamDecl::Other);
+ C.VoidPtrTy, ImplicitParamKind::Other);
// Idx: index of the buffer.
ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
- ImplicitParamDecl::Other);
+ ImplicitParamKind::Other);
// ReduceList: thread local Reduce list.
ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.VoidPtrTy, ImplicitParamDecl::Other);
+ C.VoidPtrTy, ImplicitParamKind::Other);
FunctionArgList Args;
Args.push_back(&BufferArg);
Args.push_back(&IdxArg);
@@ -2744,12 +2390,13 @@ static llvm::Value *emitGlobalToListCopyFunction(
Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
+ llvm::Type *ElemTy = CGF.ConvertTypeForMem(ReductionArrayTy);
Address LocalReduceList(
Bld.CreatePointerBitCastOrAddrSpaceCast(
CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
C.VoidPtrTy, Loc),
- CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
- CGF.getPointerAlign());
+ ElemTy->getPointerTo()),
+ ElemTy, CGF.getPointerAlign());
QualType StaticTy = C.getRecordType(TeamReductionRec);
llvm::Type *LLVMReductionsBufferTy =
CGM.getTypes().ConvertTypeForMem(StaticTy);
@@ -2757,8 +2404,7 @@ static llvm::Value *emitGlobalToListCopyFunction(
CGF.EmitLoadOfScalar(AddrBufferArg, /*Volatile=*/false, C.VoidPtrTy, Loc),
LLVMReductionsBufferTy->getPointerTo());
- llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
- CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
+ llvm::Value *Idxs[] = {CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
/*Volatile=*/false, C.IntTy,
Loc)};
unsigned Idx = 0;
@@ -2768,19 +2414,22 @@ static llvm::Value *emitGlobalToListCopyFunction(
llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
// elemptr = ((CopyType*)(elemptrptr)) + I
+ ElemTy = CGF.ConvertTypeForMem(Private->getType());
ElemPtrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
- ElemPtrPtr, CGF.ConvertTypeForMem(Private->getType())->getPointerTo());
+ ElemPtrPtr, ElemTy->getPointerTo());
Address ElemPtr =
- Address(ElemPtrPtr, C.getTypeAlignInChars(Private->getType()));
+ Address(ElemPtrPtr, ElemTy, C.getTypeAlignInChars(Private->getType()));
const ValueDecl *VD = cast<DeclRefExpr>(Private)->getDecl();
// Global = Buffer.VD[Idx];
const FieldDecl *FD = VarFieldMap.lookup(VD);
+ llvm::Value *BufferPtr =
+ Bld.CreateInBoundsGEP(LLVMReductionsBufferTy, BufferArrPtr, Idxs);
LValue GlobLVal = CGF.EmitLValueForField(
- CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
+ CGF.MakeNaturalAlignAddrLValue(BufferPtr, StaticTy), FD);
Address GlobAddr = GlobLVal.getAddress(CGF);
- llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(
- GlobAddr.getElementType(), GlobAddr.getPointer(), Idxs);
- GlobLVal.setAddress(Address(BufferPtr, GlobAddr.getAlignment()));
+ GlobLVal.setAddress(Address(GlobAddr.getPointer(),
+ CGF.ConvertTypeForMem(Private->getType()),
+ GlobAddr.getAlignment()));
switch (CGF.getEvaluationKind(Private->getType())) {
case TEK_Scalar: {
llvm::Value *V = CGF.EmitLoadOfScalar(GlobLVal, Loc);
@@ -2828,13 +2477,13 @@ static llvm::Value *emitGlobalToListReduceFunction(
// Buffer: global reduction buffer.
ImplicitParamDecl BufferArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.VoidPtrTy, ImplicitParamDecl::Other);
+ C.VoidPtrTy, ImplicitParamKind::Other);
// Idx: index of the buffer.
ImplicitParamDecl IdxArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr, C.IntTy,
- ImplicitParamDecl::Other);
+ ImplicitParamKind::Other);
// ReduceList: thread local Reduce list.
ImplicitParamDecl ReduceListArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
- C.VoidPtrTy, ImplicitParamDecl::Other);
+ C.VoidPtrTy, ImplicitParamKind::Other);
FunctionArgList Args;
Args.push_back(&BufferArg);
Args.push_back(&IdxArg);
@@ -2865,8 +2514,7 @@ static llvm::Value *emitGlobalToListReduceFunction(
Address ReductionList =
CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
auto IPriv = Privates.begin();
- llvm::Value *Idxs[] = {llvm::ConstantInt::getNullValue(CGF.Int32Ty),
- CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
+ llvm::Value *Idxs[] = {CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(&IdxArg),
/*Volatile=*/false, C.IntTy,
Loc)};
unsigned Idx = 0;
@@ -2875,13 +2523,13 @@ static llvm::Value *emitGlobalToListReduceFunction(
// Global = Buffer.VD[Idx];
const ValueDecl *VD = cast<DeclRefExpr>(*IPriv)->getDecl();
const FieldDecl *FD = VarFieldMap.lookup(VD);
+ llvm::Value *BufferPtr =
+ Bld.CreateInBoundsGEP(LLVMReductionsBufferTy, BufferArrPtr, Idxs);
LValue GlobLVal = CGF.EmitLValueForField(
- CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
+ CGF.MakeNaturalAlignAddrLValue(BufferPtr, StaticTy), FD);
Address GlobAddr = GlobLVal.getAddress(CGF);
- llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(
- GlobAddr.getElementType(), GlobAddr.getPointer(), Idxs);
- llvm::Value *Ptr = CGF.EmitCastToVoidPtr(BufferPtr);
- CGF.EmitStoreOfScalar(Ptr, Elem, /*Volatile=*/false, C.VoidPtrTy);
+ CGF.EmitStoreOfScalar(GlobAddr.getPointer(), Elem, /*Volatile=*/false,
+ C.VoidPtrTy);
if ((*IPriv)->getType()->isVariablyModifiedType()) {
// Store array size.
++Idx;
@@ -2897,8 +2545,7 @@ static llvm::Value *emitGlobalToListReduceFunction(
}
// Call reduce_function(ReduceList, GlobalReduceList)
- llvm::Value *GlobalReduceList =
- CGF.EmitCastToVoidPtr(ReductionList.getPointer());
+ llvm::Value *GlobalReduceList = ReductionList.getPointer();
Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
llvm::Value *ReducedPtr = CGF.EmitLoadOfScalar(
AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc);
@@ -3173,15 +2820,25 @@ void CGOpenMPRuntimeGPU::emitReduction(
assert((TeamsReduction || ParallelReduction) &&
"Invalid reduction selection in emitReduction.");
+ llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> VarFieldMap;
+ llvm::SmallVector<const ValueDecl *, 4> PrivatesReductions(Privates.size());
+ int Cnt = 0;
+ for (const Expr *DRE : Privates) {
+ PrivatesReductions[Cnt] = cast<DeclRefExpr>(DRE)->getDecl();
+ ++Cnt;
+ }
+
+ ASTContext &C = CGM.getContext();
+ const RecordDecl *ReductionRec = ::buildRecordForGlobalizedVars(
+ CGM.getContext(), PrivatesReductions, std::nullopt, VarFieldMap, 1);
+
// Build res = __kmpc_reduce{_nowait}(<gtid>, <n>, sizeof(RedList),
// RedList, shuffle_reduce_func, interwarp_copy_func);
// or
// Build res = __kmpc_reduce_teams_nowait_simple(<loc>, <gtid>, <lck>);
llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
- llvm::Value *ThreadId = getThreadID(CGF, Loc);
llvm::Value *Res;
- ASTContext &C = CGM.getContext();
// 1. Build a list of reduction variables.
// void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
auto Size = RHSExprs.size();
@@ -3191,9 +2848,9 @@ void CGOpenMPRuntimeGPU::emitReduction(
++Size;
}
llvm::APInt ArraySize(/*unsigned int numBits=*/32, Size);
- QualType ReductionArrayTy =
- C.getConstantArrayType(C.VoidPtrTy, ArraySize, nullptr, ArrayType::Normal,
- /*IndexTypeQuals=*/0);
+ QualType ReductionArrayTy = C.getConstantArrayType(
+ C.VoidPtrTy, ArraySize, nullptr, ArraySizeModifier::Normal,
+ /*IndexTypeQuals=*/0);
Address ReductionList =
CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
auto IPriv = Privates.begin();
@@ -3221,21 +2878,19 @@ void CGOpenMPRuntimeGPU::emitReduction(
llvm::Value *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
ReductionList.getPointer(), CGF.VoidPtrTy);
llvm::Function *ReductionFn = emitReductionFunction(
- Loc, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(), Privates,
- LHSExprs, RHSExprs, ReductionOps);
- llvm::Value *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy);
+ CGF.CurFn->getName(), Loc, CGF.ConvertTypeForMem(ReductionArrayTy),
+ Privates, LHSExprs, RHSExprs, ReductionOps);
+ llvm::Value *ReductionDataSize =
+ CGF.getTypeSize(C.getRecordType(ReductionRec));
+ ReductionDataSize =
+ CGF.Builder.CreateSExtOrTrunc(ReductionDataSize, CGF.Int64Ty);
llvm::Function *ShuffleAndReduceFn = emitShuffleAndReduceFunction(
CGM, Privates, ReductionArrayTy, ReductionFn, Loc);
llvm::Value *InterWarpCopyFn =
emitInterWarpCopyFunction(CGM, Privates, ReductionArrayTy, Loc);
if (ParallelReduction) {
- llvm::Value *Args[] = {RTLoc,
- ThreadId,
- CGF.Builder.getInt32(RHSExprs.size()),
- ReductionArrayTySize,
- RL,
- ShuffleAndReduceFn,
+ llvm::Value *Args[] = {RTLoc, ReductionDataSize, RL, ShuffleAndReduceFn,
InterWarpCopyFn};
Res = CGF.EmitRuntimeCall(
@@ -3244,42 +2899,27 @@ void CGOpenMPRuntimeGPU::emitReduction(
Args);
} else {
assert(TeamsReduction && "expected teams reduction.");
- llvm::SmallDenseMap<const ValueDecl *, const FieldDecl *> VarFieldMap;
- llvm::SmallVector<const ValueDecl *, 4> PrivatesReductions(Privates.size());
- int Cnt = 0;
- for (const Expr *DRE : Privates) {
- PrivatesReductions[Cnt] = cast<DeclRefExpr>(DRE)->getDecl();
- ++Cnt;
- }
- const RecordDecl *TeamReductionRec = ::buildRecordForGlobalizedVars(
- CGM.getContext(), PrivatesReductions, llvm::None, VarFieldMap,
- C.getLangOpts().OpenMPCUDAReductionBufNum);
- TeamsReductions.push_back(TeamReductionRec);
- if (!KernelTeamsReductionPtr) {
- KernelTeamsReductionPtr = new llvm::GlobalVariable(
- CGM.getModule(), CGM.VoidPtrTy, /*isConstant=*/true,
- llvm::GlobalValue::InternalLinkage, nullptr,
- "_openmp_teams_reductions_buffer_$_$ptr");
- }
- llvm::Value *GlobalBufferPtr = CGF.EmitLoadOfScalar(
- Address(KernelTeamsReductionPtr, CGM.getPointerAlign()),
- /*Volatile=*/false, C.getPointerType(C.VoidPtrTy), Loc);
+ TeamsReductions.push_back(ReductionRec);
+ auto *KernelTeamsReductionPtr = CGF.EmitRuntimeCall(
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_reduction_get_fixed_buffer),
+ {}, "_openmp_teams_reductions_buffer_$_$ptr");
llvm::Value *GlobalToBufferCpyFn = ::emitListToGlobalCopyFunction(
- CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap);
+ CGM, Privates, ReductionArrayTy, Loc, ReductionRec, VarFieldMap);
llvm::Value *GlobalToBufferRedFn = ::emitListToGlobalReduceFunction(
- CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap,
+ CGM, Privates, ReductionArrayTy, Loc, ReductionRec, VarFieldMap,
ReductionFn);
llvm::Value *BufferToGlobalCpyFn = ::emitGlobalToListCopyFunction(
- CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap);
+ CGM, Privates, ReductionArrayTy, Loc, ReductionRec, VarFieldMap);
llvm::Value *BufferToGlobalRedFn = ::emitGlobalToListReduceFunction(
- CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap,
+ CGM, Privates, ReductionArrayTy, Loc, ReductionRec, VarFieldMap,
ReductionFn);
llvm::Value *Args[] = {
RTLoc,
- ThreadId,
- GlobalBufferPtr,
+ KernelTeamsReductionPtr,
CGF.Builder.getInt32(C.getLangOpts().OpenMPCUDAReductionBufNum),
+ ReductionDataSize,
RL,
ShuffleAndReduceFn,
InterWarpCopyFn,
@@ -3321,14 +2961,7 @@ void CGOpenMPRuntimeGPU::emitReduction(
++IRHS;
}
};
- llvm::Value *EndArgs[] = {ThreadId};
RegionCodeGenTy RCG(CodeGen);
- NVPTXActionTy Action(
- nullptr, llvm::None,
- OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_nvptx_end_reduce_nowait),
- EndArgs);
- RCG.setAction(Action);
RCG(CGF);
// There is no need to emit line number for unconditional branch.
(void)ApplyDebugLocation::CreateEmpty(CGF);
@@ -3358,7 +2991,7 @@ CGOpenMPRuntimeGPU::translateParameter(const FieldDecl *FD,
if (isa<ImplicitParamDecl>(NativeParam))
return ImplicitParamDecl::Create(
CGM.getContext(), /*DC=*/nullptr, NativeParam->getLocation(),
- NativeParam->getIdentifier(), ArgType, ImplicitParamDecl::Other);
+ NativeParam->getIdentifier(), ArgType, ImplicitParamKind::Other);
return ParmVarDecl::Create(
CGM.getContext(),
const_cast<DeclContext *>(NativeParam->getDeclContext()),
@@ -3380,18 +3013,14 @@ CGOpenMPRuntimeGPU::getParameterAddress(CodeGenFunction &CGF,
const Type *NonQualTy = QC.strip(NativeParamType);
QualType NativePointeeTy = cast<ReferenceType>(NonQualTy)->getPointeeType();
unsigned NativePointeeAddrSpace =
- CGF.getContext().getTargetAddressSpace(NativePointeeTy);
+ CGF.getTypes().getTargetAddressSpace(NativePointeeTy);
QualType TargetTy = TargetParam->getType();
- llvm::Value *TargetAddr = CGF.EmitLoadOfScalar(
- LocalAddr, /*Volatile=*/false, TargetTy, SourceLocation());
- // First cast to generic.
- TargetAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- TargetAddr, TargetAddr->getType()->getPointerElementType()->getPointerTo(
- /*AddrSpace=*/0));
- // Cast from generic to native address space.
+ llvm::Value *TargetAddr = CGF.EmitLoadOfScalar(LocalAddr, /*Volatile=*/false,
+ TargetTy, SourceLocation());
+ // Cast to native address space.
TargetAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- TargetAddr, TargetAddr->getType()->getPointerElementType()->getPointerTo(
- NativePointeeAddrSpace));
+ TargetAddr,
+ llvm::PointerType::get(CGF.getLLVMContext(), NativePointeeAddrSpace));
Address NativeParamAddr = CGF.CreateMemTemp(NativeParamType);
CGF.EmitStoreOfScalar(TargetAddr, NativeParamAddr, /*Volatile=*/false,
NativeParamType);
@@ -3415,11 +3044,8 @@ void CGOpenMPRuntimeGPU::emitOutlinedFunctionCall(
TargetArgs.emplace_back(NativeArg);
continue;
}
- llvm::Value *TargetArg = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- NativeArg,
- NativeArg->getType()->getPointerElementType()->getPointerTo());
TargetArgs.emplace_back(
- CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(TargetArg, TargetType));
+ CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(NativeArg, TargetType));
}
CGOpenMPRuntime::emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, TargetArgs);
}
@@ -3441,10 +3067,10 @@ llvm::Function *CGOpenMPRuntimeGPU::createParallelDataSharingWrapper(
Ctx.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/false);
ImplicitParamDecl ParallelLevelArg(Ctx, /*DC=*/nullptr, D.getBeginLoc(),
/*Id=*/nullptr, Int16QTy,
- ImplicitParamDecl::Other);
+ ImplicitParamKind::Other);
ImplicitParamDecl WrapperArg(Ctx, /*DC=*/nullptr, D.getBeginLoc(),
/*Id=*/nullptr, Int32QTy,
- ImplicitParamDecl::Other);
+ ImplicitParamKind::Other);
WrapperArgs.emplace_back(&ParallelLevelArg);
WrapperArgs.emplace_back(&WrapperArg);
@@ -3476,7 +3102,7 @@ llvm::Function *CGOpenMPRuntimeGPU::createParallelDataSharingWrapper(
Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
/*Name=*/".zero.addr");
- CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
+ CGF.Builder.CreateStore(CGF.Builder.getInt32(/*C*/ 0), ZeroAddr);
// Get the array of arguments.
SmallVector<llvm::Value *, 8> Args;
@@ -3503,15 +3129,14 @@ llvm::Function *CGOpenMPRuntimeGPU::createParallelDataSharingWrapper(
isOpenMPLoopBoundSharingDirective(D.getDirectiveKind())) {
SharedArgListAddress = CGF.EmitLoadOfPointer(
GlobalArgs, CGF.getContext()
- .getPointerType(CGF.getContext().getPointerType(
- CGF.getContext().VoidPtrTy))
+ .getPointerType(CGF.getContext().VoidPtrTy)
.castAs<PointerType>());
}
unsigned Idx = 0;
if (isOpenMPLoopBoundSharingDirective(D.getDirectiveKind())) {
Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx);
Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
- Src, CGF.SizeTy->getPointerTo());
+ Src, CGF.SizeTy->getPointerTo(), CGF.SizeTy);
llvm::Value *LB = CGF.EmitLoadOfScalar(
TypedAddress,
/*Volatile=*/false,
@@ -3521,7 +3146,7 @@ llvm::Function *CGOpenMPRuntimeGPU::createParallelDataSharingWrapper(
++Idx;
Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx);
TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
- Src, CGF.SizeTy->getPointerTo());
+ Src, CGF.SizeTy->getPointerTo(), CGF.SizeTy);
llvm::Value *UB = CGF.EmitLoadOfScalar(
TypedAddress,
/*Volatile=*/false,
@@ -3536,7 +3161,8 @@ llvm::Function *CGOpenMPRuntimeGPU::createParallelDataSharingWrapper(
QualType ElemTy = CurField->getType();
Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, I + Idx);
Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
- Src, CGF.ConvertTypeForMem(CGFContext.getPointerType(ElemTy)));
+ Src, CGF.ConvertTypeForMem(CGFContext.getPointerType(ElemTy)),
+ CGF.ConvertTypeForMem(ElemTy));
llvm::Value *Arg = CGF.EmitLoadOfScalar(TypedAddress,
/*Volatile=*/false,
CGFContext.getPointerType(ElemTy),
@@ -3557,7 +3183,7 @@ llvm::Function *CGOpenMPRuntimeGPU::createParallelDataSharingWrapper(
void CGOpenMPRuntimeGPU::emitFunctionProlog(CodeGenFunction &CGF,
const Decl *D) {
- if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic)
+ if (getDataSharingMode() != CGOpenMPRuntimeGPU::DS_Generic)
return;
assert(D && "Expected function or captured|block decl.");
@@ -3588,7 +3214,10 @@ void CGOpenMPRuntimeGPU::emitFunctionProlog(CodeGenFunction &CGF,
TeamAndReductions.second.clear();
ArrayRef<const ValueDecl *> EscapedVariableLengthDecls =
VarChecker.getEscapedVariableLengthDecls();
- if (!GlobalizedVarsRecord && EscapedVariableLengthDecls.empty())
+ ArrayRef<const ValueDecl *> DelayedVariableLengthDecls =
+ VarChecker.getDelayedVariableLengthDecls();
+ if (!GlobalizedVarsRecord && EscapedVariableLengthDecls.empty() &&
+ DelayedVariableLengthDecls.empty())
return;
auto I = FunctionGlobalizedDecls.try_emplace(CGF.CurFn).first;
I->getSecond().MappedParams =
@@ -3598,29 +3227,21 @@ void CGOpenMPRuntimeGPU::emitFunctionProlog(CodeGenFunction &CGF,
VarChecker.getEscapedParameters().end());
I->getSecond().EscapedVariableLengthDecls.append(
EscapedVariableLengthDecls.begin(), EscapedVariableLengthDecls.end());
+ I->getSecond().DelayedVariableLengthDecls.append(
+ DelayedVariableLengthDecls.begin(), DelayedVariableLengthDecls.end());
DeclToAddrMapTy &Data = I->getSecond().LocalVarData;
for (const ValueDecl *VD : VarChecker.getEscapedDecls()) {
assert(VD->isCanonicalDecl() && "Expected canonical declaration");
Data.insert(std::make_pair(VD, MappedVarData()));
}
- if (!IsInTTDRegion && !NeedToDelayGlobalization && !IsInParallelRegion) {
- CheckVarsEscapingDeclContext VarChecker(CGF, llvm::None);
- VarChecker.Visit(Body);
- I->getSecond().SecondaryLocalVarData.emplace();
- DeclToAddrMapTy &Data = I->getSecond().SecondaryLocalVarData.getValue();
- for (const ValueDecl *VD : VarChecker.getEscapedDecls()) {
- assert(VD->isCanonicalDecl() && "Expected canonical declaration");
- Data.insert(std::make_pair(VD, MappedVarData()));
- }
- }
if (!NeedToDelayGlobalization) {
- emitGenericVarsProlog(CGF, D->getBeginLoc(), /*WithSPMDCheck=*/true);
+ emitGenericVarsProlog(CGF, D->getBeginLoc());
struct GlobalizationScope final : EHScopeStack::Cleanup {
GlobalizationScope() = default;
void Emit(CodeGenFunction &CGF, Flags flags) override {
static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime())
- .emitGenericVarsEpilog(CGF, /*WithSPMDCheck=*/true);
+ .emitGenericVarsEpilog(CGF);
}
};
CGF.EHStack.pushCleanup<GlobalizationScope>(NormalAndEHCleanup);
@@ -3658,7 +3279,7 @@ Address CGOpenMPRuntimeGPU::getAddressOfLocalVariable(CodeGenFunction &CGF,
llvm::Type *VarTy = CGF.ConvertTypeForMem(VD->getType());
auto *GV = new llvm::GlobalVariable(
CGM.getModule(), VarTy, /*isConstant=*/false,
- llvm::GlobalValue::InternalLinkage, llvm::Constant::getNullValue(VarTy),
+ llvm::GlobalValue::InternalLinkage, llvm::PoisonValue::get(VarTy),
VD->getName(),
/*InsertBefore=*/nullptr, llvm::GlobalValue::NotThreadLocal,
CGM.getContext().getTargetAddressSpace(AS));
@@ -3668,10 +3289,10 @@ Address CGOpenMPRuntimeGPU::getAddressOfLocalVariable(CodeGenFunction &CGF,
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
GV, VarTy->getPointerTo(CGM.getContext().getTargetAddressSpace(
VD->getType().getAddressSpace()))),
- Align);
+ VarTy, Align);
}
- if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic)
+ if (getDataSharingMode() != CGOpenMPRuntimeGPU::DS_Generic)
return Address::invalid();
VD = VD->getCanonicalDecl();
@@ -3754,7 +3375,7 @@ void CGOpenMPRuntimeGPU::adjustTargetSpecificDataForLambdas(
else
VDLVal = CGF.MakeAddrLValue(
VDAddr, VD->getType().getCanonicalType().getNonReferenceType());
- llvm::DenseMap<const VarDecl *, FieldDecl *> Captures;
+ llvm::DenseMap<const ValueDecl *, FieldDecl *> Captures;
FieldDecl *ThisCapture = nullptr;
RD->getCaptureFields(Captures, ThisCapture);
if (ThisCapture && CGF.CapturedStmtInfo->isCXXThisExprCaptured()) {
@@ -3766,13 +3387,15 @@ void CGOpenMPRuntimeGPU::adjustTargetSpecificDataForLambdas(
for (const LambdaCapture &LC : RD->captures()) {
if (LC.getCaptureKind() != LCK_ByRef)
continue;
- const VarDecl *VD = LC.getCapturedVar();
- if (!CS->capturesVariable(VD))
+ const ValueDecl *VD = LC.getCapturedVar();
+ // FIXME: For now VD is always a VarDecl because OpenMP does not support
+ // capturing structured bindings in lambdas yet.
+ if (!CS->capturesVariable(cast<VarDecl>(VD)))
continue;
auto It = Captures.find(VD);
assert(It != Captures.end() && "Found lambda capture without field.");
LValue VarLVal = CGF.EmitLValueForFieldInitialization(VDLVal, It->second);
- Address VDAddr = CGF.GetAddrOfLocalVar(VD);
+ Address VDAddr = CGF.GetAddrOfLocalVar(cast<VarDecl>(VD));
if (VD->getType().getCanonicalType()->isReferenceType())
VDAddr = CGF.EmitLoadOfReferenceLValue(VDAddr,
VD->getType().getCanonicalType())
@@ -3857,6 +3480,10 @@ void CGOpenMPRuntimeGPU::processRequiresDirective(
case CudaArch::SM_75:
case CudaArch::SM_80:
case CudaArch::SM_86:
+ case CudaArch::SM_87:
+ case CudaArch::SM_89:
+ case CudaArch::SM_90:
+ case CudaArch::SM_90a:
case CudaArch::GFX600:
case CudaArch::GFX601:
case CudaArch::GFX602:
@@ -3879,6 +3506,9 @@ void CGOpenMPRuntimeGPU::processRequiresDirective(
case CudaArch::GFX909:
case CudaArch::GFX90a:
case CudaArch::GFX90c:
+ case CudaArch::GFX940:
+ case CudaArch::GFX941:
+ case CudaArch::GFX942:
case CudaArch::GFX1010:
case CudaArch::GFX1011:
case CudaArch::GFX1012:
@@ -3889,6 +3519,16 @@ void CGOpenMPRuntimeGPU::processRequiresDirective(
case CudaArch::GFX1033:
case CudaArch::GFX1034:
case CudaArch::GFX1035:
+ case CudaArch::GFX1036:
+ case CudaArch::GFX1100:
+ case CudaArch::GFX1101:
+ case CudaArch::GFX1102:
+ case CudaArch::GFX1103:
+ case CudaArch::GFX1150:
+ case CudaArch::GFX1151:
+ case CudaArch::GFX1200:
+ case CudaArch::GFX1201:
+ case CudaArch::Generic:
case CudaArch::UNUSED:
case CudaArch::UNKNOWN:
break;
@@ -3900,38 +3540,30 @@ void CGOpenMPRuntimeGPU::processRequiresDirective(
CGOpenMPRuntime::processRequiresDirective(D);
}
-void CGOpenMPRuntimeGPU::clear() {
-
- if (!TeamsReductions.empty()) {
- ASTContext &C = CGM.getContext();
- RecordDecl *StaticRD = C.buildImplicitRecord(
- "_openmp_teams_reduction_type_$_", RecordDecl::TagKind::TTK_Union);
- StaticRD->startDefinition();
- for (const RecordDecl *TeamReductionRec : TeamsReductions) {
- QualType RecTy = C.getRecordType(TeamReductionRec);
- auto *Field = FieldDecl::Create(
- C, StaticRD, SourceLocation(), SourceLocation(), nullptr, RecTy,
- C.getTrivialTypeSourceInfo(RecTy, SourceLocation()),
- /*BW=*/nullptr, /*Mutable=*/false,
- /*InitStyle=*/ICIS_NoInit);
- Field->setAccess(AS_public);
- StaticRD->addDecl(Field);
- }
- StaticRD->completeDefinition();
- QualType StaticTy = C.getRecordType(StaticRD);
- llvm::Type *LLVMReductionsBufferTy =
- CGM.getTypes().ConvertTypeForMem(StaticTy);
- // FIXME: nvlink does not handle weak linkage correctly (object with the
- // different size are reported as erroneous).
- // Restore CommonLinkage as soon as nvlink is fixed.
- auto *GV = new llvm::GlobalVariable(
- CGM.getModule(), LLVMReductionsBufferTy,
- /*isConstant=*/false, llvm::GlobalValue::InternalLinkage,
- llvm::Constant::getNullValue(LLVMReductionsBufferTy),
- "_openmp_teams_reductions_buffer_$_");
- KernelTeamsReductionPtr->setInitializer(
- llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(GV,
- CGM.VoidPtrTy));
- }
- CGOpenMPRuntime::clear();
+llvm::Value *CGOpenMPRuntimeGPU::getGPUNumThreads(CodeGenFunction &CGF) {
+ CGBuilderTy &Bld = CGF.Builder;
+ llvm::Module *M = &CGF.CGM.getModule();
+ const char *LocSize = "__kmpc_get_hardware_num_threads_in_block";
+ llvm::Function *F = M->getFunction(LocSize);
+ if (!F) {
+ F = llvm::Function::Create(
+ llvm::FunctionType::get(CGF.Int32Ty, std::nullopt, false),
+ llvm::GlobalVariable::ExternalLinkage, LocSize, &CGF.CGM.getModule());
+ }
+ return Bld.CreateCall(F, std::nullopt, "nvptx_num_threads");
+}
+
+llvm::Value *CGOpenMPRuntimeGPU::getGPUThreadID(CodeGenFunction &CGF) {
+ ArrayRef<llvm::Value *> Args{};
+ return CGF.EmitRuntimeCall(
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_get_hardware_thread_id_in_block),
+ Args);
+}
+
+llvm::Value *CGOpenMPRuntimeGPU::getGPUWarpSize(CodeGenFunction &CGF) {
+ ArrayRef<llvm::Value *> Args{};
+ return CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_get_warp_size),
+ Args);
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.h b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.h
index b5f1b843c46b..141436f26230 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.h
@@ -17,7 +17,6 @@
#include "CGOpenMPRuntime.h"
#include "CodeGenFunction.h"
#include "clang/AST/StmtOpenMP.h"
-#include "llvm/Frontend/OpenMP/OMPGridValues.h"
namespace clang {
namespace CodeGen {
@@ -33,6 +32,18 @@ public:
/// Unknown execution mode (orphaned directive).
EM_Unknown,
};
+
+ /// Target codegen is specialized based on two data-sharing modes: CUDA, in
+ /// which the local variables are actually global threadlocal, and Generic, in
+ /// which the local variables are placed in global memory if they may escape
+ /// their declaration context.
+ enum DataSharingMode {
+ /// CUDA data sharing mode.
+ DS_CUDA,
+ /// Generic data-sharing mode.
+ DS_Generic,
+ };
+
private:
/// Parallel outlined function work for workers to execute.
llvm::SmallVector<llvm::Function *, 16> Work;
@@ -43,36 +54,29 @@ private:
ExecutionMode getExecutionMode() const;
- bool requiresFullRuntime() const { return RequiresFullRuntime; }
+ DataSharingMode getDataSharingMode() const;
/// Get barrier to synchronize all threads in a block.
void syncCTAThreads(CodeGenFunction &CGF);
/// Helper for target directive initialization.
- void emitKernelInit(CodeGenFunction &CGF, EntryFunctionState &EST,
- bool IsSPMD);
+ void emitKernelInit(const OMPExecutableDirective &D, CodeGenFunction &CGF,
+ EntryFunctionState &EST, bool IsSPMD);
/// Helper for target directive finalization.
void emitKernelDeinit(CodeGenFunction &CGF, EntryFunctionState &EST,
bool IsSPMD);
/// Helper for generic variables globalization prolog.
- void emitGenericVarsProlog(CodeGenFunction &CGF, SourceLocation Loc,
- bool WithSPMDCheck = false);
+ void emitGenericVarsProlog(CodeGenFunction &CGF, SourceLocation Loc);
/// Helper for generic variables globalization epilog.
- void emitGenericVarsEpilog(CodeGenFunction &CGF, bool WithSPMDCheck = false);
+ void emitGenericVarsEpilog(CodeGenFunction &CGF);
//
// Base class overrides.
//
- /// Creates offloading entry for the provided entry ID \a ID,
- /// address \a Addr, size \a Size, and flags \a Flags.
- void createOffloadEntry(llvm::Constant *ID, llvm::Constant *Addr,
- uint64_t Size, int32_t Flags,
- llvm::GlobalValue::LinkageTypes Linkage) override;
-
/// Emit outlined function specialized for the Fork-Join
/// programming model for applicable target directives on the NVPTX device.
/// \param D Directive to emit.
@@ -119,85 +123,54 @@ private:
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen) override;
- /// Emits code for parallel or serial call of the \a OutlinedFn with
- /// variables captured in a record which address is stored in \a
- /// CapturedStruct.
- /// This call is for the Non-SPMD Execution Mode.
- /// \param OutlinedFn Outlined function to be run in parallel threads. Type of
- /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
- /// \param CapturedVars A pointer to the record with the references to
- /// variables used in \a OutlinedFn function.
- /// \param IfCond Condition in the associated 'if' clause, if it was
- /// specified, nullptr otherwise.
- void emitNonSPMDParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
- llvm::Value *OutlinedFn,
- ArrayRef<llvm::Value *> CapturedVars,
- const Expr *IfCond);
-
- /// Emits code for parallel or serial call of the \a OutlinedFn with
- /// variables captured in a record which address is stored in \a
- /// CapturedStruct.
- /// This call is for a parallel directive within an SPMD target directive.
- /// \param OutlinedFn Outlined function to be run in parallel threads. Type of
- /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
- /// \param CapturedVars A pointer to the record with the references to
- /// variables used in \a OutlinedFn function.
- /// \param IfCond Condition in the associated 'if' clause, if it was
- /// specified, nullptr otherwise.
- ///
- void emitSPMDParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
- llvm::Function *OutlinedFn,
- ArrayRef<llvm::Value *> CapturedVars,
- const Expr *IfCond);
-
protected:
- /// Get the function name of an outlined region.
- // The name can be customized depending on the target.
- //
- StringRef getOutlinedHelperName() const override {
- return "__omp_outlined__";
- }
-
/// Check if the default location must be constant.
/// Constant for NVPTX for better optimization.
bool isDefaultLocationConstant() const override { return true; }
- /// Returns additional flags that can be stored in reserved_2 field of the
- /// default location.
- /// For NVPTX target contains data about SPMD/Non-SPMD execution mode +
- /// Full/Lightweight runtime mode. Used for better optimization.
- unsigned getDefaultLocationReserved2Flags() const override;
-
public:
explicit CGOpenMPRuntimeGPU(CodeGenModule &CGM);
- void clear() override;
+
+ bool isGPU() const override { return true; };
/// Declare generalized virtual functions which need to be defined
/// by all specializations of OpenMPGPURuntime Targets like AMDGCN
/// and NVPTX.
+ /// Check if the variable length declaration is delayed:
+ bool isDelayedVariableLengthDecl(CodeGenFunction &CGF,
+ const VarDecl *VD) const override;
+
+ /// Get call to __kmpc_alloc_shared
+ std::pair<llvm::Value *, llvm::Value *>
+ getKmpcAllocShared(CodeGenFunction &CGF, const VarDecl *VD) override;
+
+ /// Get call to __kmpc_free_shared
+ void getKmpcFreeShared(
+ CodeGenFunction &CGF,
+ const std::pair<llvm::Value *, llvm::Value *> &AddrSizePair) override;
+
/// Get the GPU warp size.
- virtual llvm::Value *getGPUWarpSize(CodeGenFunction &CGF) = 0;
+ llvm::Value *getGPUWarpSize(CodeGenFunction &CGF);
/// Get the id of the current thread on the GPU.
- virtual llvm::Value *getGPUThreadID(CodeGenFunction &CGF) = 0;
+ llvm::Value *getGPUThreadID(CodeGenFunction &CGF);
/// Get the maximum number of threads in a block of the GPU.
- virtual llvm::Value *getGPUNumThreads(CodeGenFunction &CGF) = 0;
+ llvm::Value *getGPUNumThreads(CodeGenFunction &CGF);
/// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32
/// global_tid, int proc_bind) to generate code for 'proc_bind' clause.
- virtual void emitProcBindClause(CodeGenFunction &CGF,
- llvm::omp::ProcBindKind ProcBind,
- SourceLocation Loc) override;
+ void emitProcBindClause(CodeGenFunction &CGF,
+ llvm::omp::ProcBindKind ProcBind,
+ SourceLocation Loc) override;
/// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_threads) to generate code for 'num_threads'
/// clause.
/// \param NumThreads An integer value of threads.
- virtual void emitNumThreadsClause(CodeGenFunction &CGF,
- llvm::Value *NumThreads,
- SourceLocation Loc) override;
+ void emitNumThreadsClause(CodeGenFunction &CGF, llvm::Value *NumThreads,
+ SourceLocation Loc) override;
/// This function ought to emit, in the general case, a call to
// the openmp runtime kmpc_push_num_teams. In NVPTX backend it is not needed
@@ -211,31 +184,31 @@ public:
// directive.
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
+ /// \param CGF Reference to current CodeGenFunction.
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
- llvm::Function *
- emitParallelOutlinedFunction(const OMPExecutableDirective &D,
- const VarDecl *ThreadIDVar,
- OpenMPDirectiveKind InnermostKind,
- const RegionCodeGenTy &CodeGen) override;
+ llvm::Function *emitParallelOutlinedFunction(
+ CodeGenFunction &CGF, const OMPExecutableDirective &D,
+ const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind,
+ const RegionCodeGenTy &CodeGen) override;
/// Emits inlined function for the specified OpenMP teams
// directive.
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
+ /// \param CGF Reference to current CodeGenFunction.
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
- llvm::Function *
- emitTeamsOutlinedFunction(const OMPExecutableDirective &D,
- const VarDecl *ThreadIDVar,
- OpenMPDirectiveKind InnermostKind,
- const RegionCodeGenTy &CodeGen) override;
+ llvm::Function *emitTeamsOutlinedFunction(
+ CodeGenFunction &CGF, const OMPExecutableDirective &D,
+ const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind,
+ const RegionCodeGenTy &CodeGen) override;
/// Emits code for teams call of the \a OutlinedFn with
/// variables captured in a record which address is stored in \a
@@ -258,10 +231,13 @@ public:
/// variables used in \a OutlinedFn function.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
+ /// \param NumThreads The value corresponding to the num_threads clause, if
+ /// any,
+ /// or nullptr.
void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
- const Expr *IfCond) override;
+ const Expr *IfCond, llvm::Value *NumThreads) override;
/// Emit an implicit/explicit barrier for OpenMP threads.
/// \param Kind Directive for which this implicit barrier call must be
@@ -298,18 +274,12 @@ public:
/// SimpleReduction Emit reduction operation only. Used for omp simd
/// directive on the host.
/// ReductionKind The kind of reduction to perform.
- virtual void emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
- ArrayRef<const Expr *> Privates,
- ArrayRef<const Expr *> LHSExprs,
- ArrayRef<const Expr *> RHSExprs,
- ArrayRef<const Expr *> ReductionOps,
- ReductionOptionsTy Options) override;
-
- /// Returns specified OpenMP runtime function for the current OpenMP
- /// implementation. Specialized for the NVPTX device.
- /// \param Function OpenMP runtime function.
- /// \return Specified function.
- llvm::FunctionCallee createNVPTXRuntimeFunction(unsigned Function);
+ void emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
+ ArrayRef<const Expr *> Privates,
+ ArrayRef<const Expr *> LHSExprs,
+ ArrayRef<const Expr *> RHSExprs,
+ ArrayRef<const Expr *> ReductionOps,
+ ReductionOptionsTy Options) override;
/// Translates the native parameter of outlined function if this is required
/// for target.
@@ -329,7 +299,7 @@ public:
/// translating these arguments to correct target-specific arguments.
void emitOutlinedFunctionCall(
CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee OutlinedFn,
- ArrayRef<llvm::Value *> Args = llvm::None) const override;
+ ArrayRef<llvm::Value *> Args = std::nullopt) const override;
/// Emits OpenMP-specific function prolog.
/// Required for device constructs.
@@ -339,17 +309,6 @@ public:
Address getAddressOfLocalVariable(CodeGenFunction &CGF,
const VarDecl *VD) override;
- /// Target codegen is specialized based on two data-sharing modes: CUDA, in
- /// which the local variables are actually global threadlocal, and Generic, in
- /// which the local variables are placed in global memory if they may escape
- /// their declaration context.
- enum DataSharingMode {
- /// CUDA data sharing mode.
- CUDA,
- /// Generic data-sharing mode.
- Generic,
- };
-
/// Cleans up references to the objects in finished function.
///
void functionFinished(CodeGenFunction &CGF) override;
@@ -385,17 +344,13 @@ private:
/// to emit optimized code.
ExecutionMode CurrentExecutionMode = EM_Unknown;
- /// Check if the full runtime is required (default - yes).
- bool RequiresFullRuntime = true;
+ /// Track the data sharing mode when codegening directives within a target
+ /// region.
+ DataSharingMode CurrentDataSharingMode = DataSharingMode::DS_Generic;
- /// true if we're emitting the code for the target region and next parallel
- /// region is L0 for sure.
- bool IsInTargetMasterThreadRegion = false;
/// true if currently emitting code for target/teams/distribute region, false
/// - otherwise.
bool IsInTTDRegion = false;
- /// true if we're definitely in the parallel region.
- bool IsInParallelRegion = false;
/// Map between an outlined function and its wrapper.
llvm::DenseMap<llvm::Function *, llvm::Function *> WrapperFunctionsMap;
@@ -420,26 +375,21 @@ private:
using EscapedParamsTy = llvm::SmallPtrSet<const Decl *, 4>;
struct FunctionData {
DeclToAddrMapTy LocalVarData;
- llvm::Optional<DeclToAddrMapTy> SecondaryLocalVarData = llvm::None;
EscapedParamsTy EscapedParameters;
llvm::SmallVector<const ValueDecl*, 4> EscapedVariableLengthDecls;
+ llvm::SmallVector<const ValueDecl *, 4> DelayedVariableLengthDecls;
llvm::SmallVector<std::pair<llvm::Value *, llvm::Value *>, 4>
EscapedVariableLengthDeclsAddrs;
- llvm::Value *IsInSPMDModeFlag = nullptr;
std::unique_ptr<CodeGenFunction::OMPMapVars> MappedParams;
};
/// Maps the function to the list of the globalized variables with their
/// addresses.
llvm::SmallDenseMap<llvm::Function *, FunctionData> FunctionGlobalizedDecls;
- llvm::GlobalVariable *KernelTeamsReductionPtr = nullptr;
/// List of the records with the list of fields for the reductions across the
/// teams. Used to build the intermediate buffer for the fast teams
/// reductions.
/// All the records are gathered into a union `union.type` is created.
llvm::SmallVector<const RecordDecl *, 4> TeamsReductions;
- /// Shared pointer for the global memory in the global memory buffer used for
- /// the given kernel.
- llvm::GlobalVariable *KernelStaticGlobalized = nullptr;
/// Pair of the Non-SPMD team and all reductions variables in this team
/// region.
std::pair<const Decl *, llvm::SmallVector<const ValueDecl *, 4>>
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp
deleted file mode 100644
index 1688d07b90b6..000000000000
--- a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp
+++ /dev/null
@@ -1,56 +0,0 @@
-//===---- CGOpenMPRuntimeNVPTX.cpp - Interface to OpenMP NVPTX Runtimes ---===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This provides a class for OpenMP runtime code generation specialized to NVPTX
-// targets from generalized CGOpenMPRuntimeGPU class.
-//
-//===----------------------------------------------------------------------===//
-
-#include "CGOpenMPRuntimeNVPTX.h"
-#include "CGOpenMPRuntimeGPU.h"
-#include "CodeGenFunction.h"
-#include "clang/AST/Attr.h"
-#include "clang/AST/DeclOpenMP.h"
-#include "clang/AST/StmtOpenMP.h"
-#include "clang/AST/StmtVisitor.h"
-#include "clang/Basic/Cuda.h"
-#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/IR/IntrinsicsNVPTX.h"
-
-using namespace clang;
-using namespace CodeGen;
-using namespace llvm::omp;
-
-CGOpenMPRuntimeNVPTX::CGOpenMPRuntimeNVPTX(CodeGenModule &CGM)
- : CGOpenMPRuntimeGPU(CGM) {
- if (!CGM.getLangOpts().OpenMPIsDevice)
- llvm_unreachable("OpenMP NVPTX can only handle device code.");
-}
-
-llvm::Value *CGOpenMPRuntimeNVPTX::getGPUWarpSize(CodeGenFunction &CGF) {
- return CGF.EmitRuntimeCall(
- llvm::Intrinsic::getDeclaration(
- &CGF.CGM.getModule(), llvm::Intrinsic::nvvm_read_ptx_sreg_warpsize),
- "nvptx_warp_size");
-}
-
-llvm::Value *CGOpenMPRuntimeNVPTX::getGPUThreadID(CodeGenFunction &CGF) {
- CGBuilderTy &Bld = CGF.Builder;
- llvm::Function *F;
- F = llvm::Intrinsic::getDeclaration(
- &CGF.CGM.getModule(), llvm::Intrinsic::nvvm_read_ptx_sreg_tid_x);
- return Bld.CreateCall(F, llvm::None, "nvptx_tid");
-}
-
-llvm::Value *CGOpenMPRuntimeNVPTX::getGPUNumThreads(CodeGenFunction &CGF) {
- CGBuilderTy &Bld = CGF.Builder;
- llvm::Function *F;
- F = llvm::Intrinsic::getDeclaration(
- &CGF.CGM.getModule(), llvm::Intrinsic::nvvm_read_ptx_sreg_ntid_x);
- return Bld.CreateCall(F, llvm::None, "nvptx_num_threads");
-}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeNVPTX.h b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeNVPTX.h
deleted file mode 100644
index 5f1602959266..000000000000
--- a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeNVPTX.h
+++ /dev/null
@@ -1,43 +0,0 @@
-//===----- CGOpenMPRuntimeNVPTX.h - Interface to OpenMP NVPTX Runtimes ----===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This provides a class for OpenMP runtime code generation specialized to NVPTX
-// targets from generalized CGOpenMPRuntimeGPU class.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIMENVPTX_H
-#define LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIMENVPTX_H
-
-#include "CGOpenMPRuntime.h"
-#include "CGOpenMPRuntimeGPU.h"
-#include "CodeGenFunction.h"
-#include "clang/AST/StmtOpenMP.h"
-
-namespace clang {
-namespace CodeGen {
-
-class CGOpenMPRuntimeNVPTX final : public CGOpenMPRuntimeGPU {
-
-public:
- explicit CGOpenMPRuntimeNVPTX(CodeGenModule &CGM);
-
- /// Get the GPU warp size.
- llvm::Value *getGPUWarpSize(CodeGenFunction &CGF) override;
-
- /// Get the id of the current thread on the GPU.
- llvm::Value *getGPUThreadID(CodeGenFunction &CGF) override;
-
- /// Get the maximum number of threads in a block of the GPU.
- llvm::Value *getGPUNumThreads(CodeGenFunction &CGF) override;
-};
-
-} // CodeGen namespace.
-} // clang namespace.
-
-#endif // LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIMENVPTX_H
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGRecordLayout.h b/contrib/llvm-project/clang/lib/CodeGen/CGRecordLayout.h
index e6665b72bcba..d5ea74922603 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGRecordLayout.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGRecordLayout.h
@@ -93,8 +93,8 @@ struct CGBitFieldInfo {
CharUnits VolatileStorageOffset;
CGBitFieldInfo()
- : Offset(), Size(), IsSigned(), StorageSize(), StorageOffset(),
- VolatileOffset(), VolatileStorageSize(), VolatileStorageOffset() {}
+ : Offset(), Size(), IsSigned(), StorageSize(), VolatileOffset(),
+ VolatileStorageSize() {}
CGBitFieldInfo(unsigned Offset, unsigned Size, bool IsSigned,
unsigned StorageSize, CharUnits StorageOffset)
@@ -200,6 +200,12 @@ public:
return FieldInfo.lookup(FD);
}
+ // Return whether the following non virtual base has a corresponding
+ // entry in the LLVM struct.
+ bool hasNonVirtualBaseLLVMField(const CXXRecordDecl *RD) const {
+ return NonVirtualBases.count(RD);
+ }
+
unsigned getNonVirtualBaseLLVMFieldNo(const CXXRecordDecl *RD) const {
assert(NonVirtualBases.count(RD) && "Invalid non-virtual base!");
return NonVirtualBases.lookup(RD);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
index cf8313f92587..868ef810f3c4 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -111,7 +111,7 @@ struct CGRecordLowering {
/// Helper function to check if we are targeting AAPCS.
bool isAAPCS() const {
- return Context.getTargetInfo().getABI().startswith("aapcs");
+ return Context.getTargetInfo().getABI().starts_with("aapcs");
}
/// Helper function to check if the target machine is BigEndian.
@@ -162,7 +162,7 @@ struct CGRecordLowering {
return CharUnits::fromQuantity(DataLayout.getTypeAllocSize(Type));
}
CharUnits getAlignment(llvm::Type *Type) {
- return CharUnits::fromQuantity(DataLayout.getABITypeAlignment(Type));
+ return CharUnits::fromQuantity(DataLayout.getABITypeAlign(Type));
}
bool isZeroInitializable(const FieldDecl *FD) {
return Types.isZeroInitializable(FD->getType());
@@ -182,7 +182,7 @@ struct CGRecordLowering {
llvm::Type *StorageType);
/// Lowers an ASTRecordLayout to a llvm type.
void lower(bool NonVirtualBaseType);
- void lowerUnion();
+ void lowerUnion(bool isNoUniqueAddress);
void accumulateFields();
void accumulateBitFields(RecordDecl::field_iterator Field,
RecordDecl::field_iterator FieldEnd);
@@ -280,7 +280,7 @@ void CGRecordLowering::lower(bool NVBaseType) {
// CodeGenTypes::ComputeRecordLayout.
CharUnits Size = NVBaseType ? Layout.getNonVirtualSize() : Layout.getSize();
if (D->isUnion()) {
- lowerUnion();
+ lowerUnion(NVBaseType);
computeVolatileBitfields();
return;
}
@@ -308,8 +308,9 @@ void CGRecordLowering::lower(bool NVBaseType) {
computeVolatileBitfields();
}
-void CGRecordLowering::lowerUnion() {
- CharUnits LayoutSize = Layout.getSize();
+void CGRecordLowering::lowerUnion(bool isNoUniqueAddress) {
+ CharUnits LayoutSize =
+ isNoUniqueAddress ? Layout.getDataSize() : Layout.getSize();
llvm::Type *StorageType = nullptr;
bool SeenNamedMember = false;
// Iterate through the fields setting bitFieldInfo and the Fields array. Also
@@ -365,7 +366,12 @@ void CGRecordLowering::lowerUnion() {
FieldTypes.push_back(StorageType);
appendPaddingBytes(LayoutSize - getSize(StorageType));
// Set packed if we need it.
- if (LayoutSize % getAlignment(StorageType))
+ const auto StorageAlignment = getAlignment(StorageType);
+ assert((Layout.getSize() % StorageAlignment == 0 ||
+ Layout.getDataSize() % StorageAlignment) &&
+ "Union's standard layout and no_unique_address layout must agree on "
+ "packedness");
+ if (Layout.getDataSize() % StorageAlignment)
Packed = true;
}
@@ -379,9 +385,14 @@ void CGRecordLowering::accumulateFields() {
for (++Field; Field != FieldEnd && Field->isBitField(); ++Field);
accumulateBitFields(Start, Field);
} else if (!Field->isZeroSize(Context)) {
+ // Use base subobject layout for the potentially-overlapping field,
+ // as it is done in RecordLayoutBuilder
Members.push_back(MemberInfo(
bitsToCharUnits(getFieldBitOffset(*Field)), MemberInfo::Field,
- getStorageType(*Field), *Field));
+ Field->isPotentiallyOverlapping()
+ ? getStorageType(Field->getType()->getAsCXXRecordDecl())
+ : getStorageType(*Field),
+ *Field));
++Field;
} else {
++Field;
@@ -411,7 +422,7 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
continue;
}
llvm::Type *Type =
- Types.ConvertTypeForMem(Field->getType(), /*ForBitFields=*/true);
+ Types.ConvertTypeForMem(Field->getType(), /*ForBitField=*/true);
// If we don't have a run yet, or don't live within the previous run's
// allocated storage then we allocate some storage and start a new run.
if (Run == FieldEnd || BitOffset >= Tail) {
@@ -647,12 +658,13 @@ void CGRecordLowering::computeVolatileBitfields() {
void CGRecordLowering::accumulateVPtrs() {
if (Layout.hasOwnVFPtr())
- Members.push_back(MemberInfo(CharUnits::Zero(), MemberInfo::VFPtr,
- llvm::FunctionType::get(getIntNType(32), /*isVarArg=*/true)->
- getPointerTo()->getPointerTo()));
+ Members.push_back(
+ MemberInfo(CharUnits::Zero(), MemberInfo::VFPtr,
+ llvm::PointerType::getUnqual(Types.getLLVMContext())));
if (Layout.hasOwnVBPtr())
- Members.push_back(MemberInfo(Layout.getVBPtrOffset(), MemberInfo::VBPtr,
- llvm::Type::getInt32PtrTy(Types.getLLVMContext())));
+ Members.push_back(
+ MemberInfo(Layout.getVBPtrOffset(), MemberInfo::VBPtr,
+ llvm::PointerType::getUnqual(Types.getLLVMContext())));
}
void CGRecordLowering::accumulateVBases() {
@@ -882,7 +894,7 @@ CodeGenTypes::ComputeRecordLayout(const RecordDecl *D, llvm::StructType *Ty) {
// If we're in C++, compute the base subobject type.
llvm::StructType *BaseTy = nullptr;
- if (isa<CXXRecordDecl>(D) && !D->isUnion() && !D->hasAttr<FinalAttr>()) {
+ if (isa<CXXRecordDecl>(D)) {
BaseTy = Ty;
if (Builder.Layout.getNonVirtualSize() != Builder.Layout.getSize()) {
CGRecordLowering BaseBuilder(*this, D, /*Packed=*/Builder.Packed);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGStmt.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGStmt.cpp
index 0a3a722fa653..beff0ad9da27 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGStmt.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGStmt.cpp
@@ -24,13 +24,17 @@
#include "clang/Basic/PrettyStackTrace.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/IR/Assumptions.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/MDBuilder.h"
#include "llvm/Support/SaveAndRestore.h"
+#include <optional>
using namespace clang;
using namespace CodeGen;
@@ -196,6 +200,9 @@ void CodeGenFunction::EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs) {
case Stmt::SEHTryStmtClass:
EmitSEHTryStmt(cast<SEHTryStmt>(*S));
break;
+ case Stmt::OMPMetaDirectiveClass:
+ EmitOMPMetaDirective(cast<OMPMetaDirective>(*S));
+ break;
case Stmt::OMPCanonicalLoopClass:
EmitOMPCanonicalLoop(cast<OMPCanonicalLoop>(S));
break;
@@ -250,6 +257,9 @@ void CodeGenFunction::EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs) {
case Stmt::OMPTaskyieldDirectiveClass:
EmitOMPTaskyieldDirective(cast<OMPTaskyieldDirective>(*S));
break;
+ case Stmt::OMPErrorDirectiveClass:
+ EmitOMPErrorDirective(cast<OMPErrorDirective>(*S));
+ break;
case Stmt::OMPBarrierDirectiveClass:
EmitOMPBarrierDirective(cast<OMPBarrierDirective>(*S));
break;
@@ -310,18 +320,31 @@ void CodeGenFunction::EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs) {
case Stmt::OMPMasterTaskLoopDirectiveClass:
EmitOMPMasterTaskLoopDirective(cast<OMPMasterTaskLoopDirective>(*S));
break;
+ case Stmt::OMPMaskedTaskLoopDirectiveClass:
+ llvm_unreachable("masked taskloop directive not supported yet.");
+ break;
case Stmt::OMPMasterTaskLoopSimdDirectiveClass:
EmitOMPMasterTaskLoopSimdDirective(
cast<OMPMasterTaskLoopSimdDirective>(*S));
break;
+ case Stmt::OMPMaskedTaskLoopSimdDirectiveClass:
+ llvm_unreachable("masked taskloop simd directive not supported yet.");
+ break;
case Stmt::OMPParallelMasterTaskLoopDirectiveClass:
EmitOMPParallelMasterTaskLoopDirective(
cast<OMPParallelMasterTaskLoopDirective>(*S));
break;
+ case Stmt::OMPParallelMaskedTaskLoopDirectiveClass:
+ llvm_unreachable("parallel masked taskloop directive not supported yet.");
+ break;
case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass:
EmitOMPParallelMasterTaskLoopSimdDirective(
cast<OMPParallelMasterTaskLoopSimdDirective>(*S));
break;
+ case Stmt::OMPParallelMaskedTaskLoopSimdDirectiveClass:
+ llvm_unreachable(
+ "parallel masked taskloop simd directive not supported yet.");
+ break;
case Stmt::OMPDistributeDirectiveClass:
EmitOMPDistributeDirective(cast<OMPDistributeDirective>(*S));
break;
@@ -381,14 +404,37 @@ void CodeGenFunction::EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs) {
cast<OMPTargetTeamsDistributeSimdDirective>(*S));
break;
case Stmt::OMPInteropDirectiveClass:
- llvm_unreachable("Interop directive not supported yet.");
+ EmitOMPInteropDirective(cast<OMPInteropDirective>(*S));
break;
case Stmt::OMPDispatchDirectiveClass:
- llvm_unreachable("Dispatch directive not supported yet.");
+ CGM.ErrorUnsupported(S, "OpenMP dispatch directive");
break;
+ case Stmt::OMPScopeDirectiveClass:
+ llvm_unreachable("scope not supported with FE outlining");
case Stmt::OMPMaskedDirectiveClass:
EmitOMPMaskedDirective(cast<OMPMaskedDirective>(*S));
break;
+ case Stmt::OMPGenericLoopDirectiveClass:
+ EmitOMPGenericLoopDirective(cast<OMPGenericLoopDirective>(*S));
+ break;
+ case Stmt::OMPTeamsGenericLoopDirectiveClass:
+ EmitOMPTeamsGenericLoopDirective(cast<OMPTeamsGenericLoopDirective>(*S));
+ break;
+ case Stmt::OMPTargetTeamsGenericLoopDirectiveClass:
+ EmitOMPTargetTeamsGenericLoopDirective(
+ cast<OMPTargetTeamsGenericLoopDirective>(*S));
+ break;
+ case Stmt::OMPParallelGenericLoopDirectiveClass:
+ EmitOMPParallelGenericLoopDirective(
+ cast<OMPParallelGenericLoopDirective>(*S));
+ break;
+ case Stmt::OMPTargetParallelGenericLoopDirectiveClass:
+ EmitOMPTargetParallelGenericLoopDirective(
+ cast<OMPTargetParallelGenericLoopDirective>(*S));
+ break;
+ case Stmt::OMPParallelMaskedDirectiveClass:
+ EmitOMPParallelMaskedDirective(cast<OMPParallelMaskedDirective>(*S));
+ break;
}
}
@@ -536,9 +582,9 @@ void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
// Place the block after the current block, if possible, or else at
// the end of the function.
if (CurBB && CurBB->getParent())
- CurFn->getBasicBlockList().insertAfter(CurBB->getIterator(), BB);
+ CurFn->insert(std::next(CurBB->getIterator()), BB);
else
- CurFn->getBasicBlockList().push_back(BB);
+ CurFn->insert(CurFn->end(), BB);
Builder.SetInsertPoint(BB);
}
@@ -563,15 +609,14 @@ void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
bool inserted = false;
for (llvm::User *u : block->users()) {
if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) {
- CurFn->getBasicBlockList().insertAfter(insn->getParent()->getIterator(),
- block);
+ CurFn->insert(std::next(insn->getParent()->getIterator()), block);
inserted = true;
break;
}
}
if (!inserted)
- CurFn->getBasicBlockList().push_back(block);
+ CurFn->insert(CurFn->end(), block);
Builder.SetInsertPoint(block);
}
@@ -659,20 +704,34 @@ void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) {
void CodeGenFunction::EmitAttributedStmt(const AttributedStmt &S) {
bool nomerge = false;
+ bool noinline = false;
+ bool alwaysinline = false;
const CallExpr *musttail = nullptr;
for (const auto *A : S.getAttrs()) {
- if (A->getKind() == attr::NoMerge) {
+ switch (A->getKind()) {
+ default:
+ break;
+ case attr::NoMerge:
nomerge = true;
- }
- if (A->getKind() == attr::MustTail) {
+ break;
+ case attr::NoInline:
+ noinline = true;
+ break;
+ case attr::AlwaysInline:
+ alwaysinline = true;
+ break;
+ case attr::MustTail:
const Stmt *Sub = S.getSubStmt();
const ReturnStmt *R = cast<ReturnStmt>(Sub);
musttail = cast<CallExpr>(R->getRetValue()->IgnoreParens());
+ break;
}
}
- SaveAndRestore<bool> save_nomerge(InNoMergeAttributedStmt, nomerge);
- SaveAndRestore<const CallExpr *> save_musttail(MustTailCall, musttail);
+ SaveAndRestore save_nomerge(InNoMergeAttributedStmt, nomerge);
+ SaveAndRestore save_noinline(InNoInlineAttributedStmt, noinline);
+ SaveAndRestore save_alwaysinline(InAlwaysInlineAttributedStmt, alwaysinline);
+ SaveAndRestore save_musttail(MustTailCall, musttail);
EmitStmt(S.getSubStmt(), S.getAttrs());
}
@@ -709,6 +768,17 @@ void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) {
}
void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
+ // The else branch of a consteval if statement is always the only branch that
+ // can be runtime evaluated.
+ if (S.isConsteval()) {
+ const Stmt *Executed = S.isNegatedConsteval() ? S.getThen() : S.getElse();
+ if (Executed) {
+ RunCleanupsScope ExecutedScope(*this);
+ EmitStmt(Executed);
+ }
+ return;
+ }
+
// C99 6.8.4.1: The first substatement is executed if the expression compares
// unequal to 0. The condition must be a scalar type.
LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
@@ -754,11 +824,32 @@ void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
// Prefer the PGO based weights over the likelihood attribute.
// When the build isn't optimized the metadata isn't used, so don't generate
// it.
+ // Also, differentiate between disabled PGO and a never executed branch with
+ // PGO. Assuming PGO is in use:
+ // - we want to ignore the [[likely]] attribute if the branch is never
+ // executed,
+ // - assuming the profile is poor, preserving the attribute may still be
+ // beneficial.
+ // As an approximation, preserve the attribute only if both the branch and the
+ // parent context were not executed.
Stmt::Likelihood LH = Stmt::LH_None;
- uint64_t Count = getProfileCount(S.getThen());
- if (!Count && CGM.getCodeGenOpts().OptimizationLevel)
+ uint64_t ThenCount = getProfileCount(S.getThen());
+ if (!ThenCount && !getCurrentProfileCount() &&
+ CGM.getCodeGenOpts().OptimizationLevel)
LH = Stmt::getLikelihood(S.getThen(), S.getElse());
- EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock, Count, LH);
+
+ // When measuring MC/DC, always fully evaluate the condition up front using
+ // EvaluateExprAsBool() so that the test vector bitmap can be updated prior to
+ // executing the body of the if.then or if.else. This is useful for when
+ // there is a 'return' within the body, but this is particularly beneficial
+ // when one if-stmt is nested within another if-stmt so that all of the MC/DC
+ // updates are kept linear and consistent.
+ if (!CGM.getCodeGenOpts().MCDCCoverage)
+ EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock, ThenCount, LH);
+ else {
+ llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
+ Builder.CreateCondBr(BoolCondVal, ThenBlock, ElseBlock);
+ }
// Emit the 'then' code.
EmitBlock(ThenBlock);
@@ -1220,8 +1311,7 @@ void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
SLocPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
CGM.getSanitizerMetadata()->disableSanitizerForGlobal(SLocPtr);
assert(ReturnLocation.isValid() && "No valid return location");
- Builder.CreateStore(Builder.CreateBitCast(SLocPtr, Int8PtrTy),
- ReturnLocation);
+ Builder.CreateStore(SLocPtr, ReturnLocation);
}
// Returning from an outlined SEH helper is UB, and we already warn on it.
@@ -1397,7 +1487,7 @@ void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S,
llvm::BasicBlock *FalseDest = CaseRangeBlock;
CaseRangeBlock = createBasicBlock("sw.caserange");
- CurFn->getBasicBlockList().push_back(CaseRangeBlock);
+ CurFn->insert(CurFn->end(), CaseRangeBlock);
Builder.SetInsertPoint(CaseRangeBlock);
// Emit range check.
@@ -1448,6 +1538,21 @@ void CodeGenFunction::EmitCaseStmt(const CaseStmt &S,
llvm::ConstantInt *CaseVal =
Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext()));
+
+ // Emit debuginfo for the case value if it is an enum value.
+ const ConstantExpr *CE;
+ if (auto ICE = dyn_cast<ImplicitCastExpr>(S.getLHS()))
+ CE = dyn_cast<ConstantExpr>(ICE->getSubExpr());
+ else
+ CE = dyn_cast<ConstantExpr>(S.getLHS());
+ if (CE) {
+ if (auto DE = dyn_cast<DeclRefExpr>(CE->getSubExpr()))
+ if (CGDebugInfo *Dbg = getDebugInfo())
+ if (CGM.getCodeGenOpts().hasReducedDebugInfo())
+ Dbg->EmitGlobalVariable(DE->getDecl(),
+ APValue(llvm::APSInt(CaseVal->getValue())));
+ }
+
if (SwitchLikelihood)
SwitchLikelihood->push_back(Stmt::getLikelihood(Attrs));
@@ -1518,6 +1623,12 @@ void CodeGenFunction::EmitCaseStmt(const CaseStmt &S,
NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
}
+ // Generate a stop point for debug info if the case statement is
+ // followed by a default statement. A fallthrough case before a
+ // default case gets its own branch target.
+ if (CurCase->getSubStmt()->getStmtClass() == Stmt::DefaultStmtClass)
+ EmitStopPoint(CurCase);
+
// Normal default recursion for non-cases.
EmitStmt(CurCase->getSubStmt());
}
@@ -1776,11 +1887,11 @@ static bool FindCaseStatementsForValue(const SwitchStmt &S,
FoundCase;
}
-static Optional<SmallVector<uint64_t, 16>>
+static std::optional<SmallVector<uint64_t, 16>>
getLikelihoodWeights(ArrayRef<Stmt::Likelihood> Likelihoods) {
// Are there enough branches to weight them?
if (Likelihoods.size() <= 1)
- return None;
+ return std::nullopt;
uint64_t NumUnlikely = 0;
uint64_t NumNone = 0;
@@ -1801,7 +1912,7 @@ getLikelihoodWeights(ArrayRef<Stmt::Likelihood> Likelihoods) {
// Is there a likelihood attribute used?
if (NumUnlikely == 0 && NumLikely == 0)
- return None;
+ return std::nullopt;
// When multiple cases share the same code they can be combined during
// optimization. In that case the weights of the branch will be the sum of
@@ -1983,7 +2094,7 @@ void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
} else if (SwitchLikelihood) {
assert(SwitchLikelihood->size() == 1 + SwitchInsn->getNumCases() &&
"switch likelihoods do not match switch cases");
- Optional<SmallVector<uint64_t, 16>> LHW =
+ std::optional<SmallVector<uint64_t, 16>> LHW =
getLikelihoodWeights(*SwitchLikelihood);
if (LHW) {
llvm::MDBuilder MDHelper(CGM.getLLVMContext());
@@ -2085,42 +2196,34 @@ AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
return (EarlyClobber ? "&{" : "{") + Register.str() + "}";
}
-llvm::Value*
-CodeGenFunction::EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info,
- LValue InputValue, QualType InputType,
- std::string &ConstraintStr,
- SourceLocation Loc) {
- llvm::Value *Arg;
+std::pair<llvm::Value*, llvm::Type *> CodeGenFunction::EmitAsmInputLValue(
+ const TargetInfo::ConstraintInfo &Info, LValue InputValue,
+ QualType InputType, std::string &ConstraintStr, SourceLocation Loc) {
if (Info.allowsRegister() || !Info.allowsMemory()) {
- if (CodeGenFunction::hasScalarEvaluationKind(InputType)) {
- Arg = EmitLoadOfLValue(InputValue, Loc).getScalarVal();
- } else {
- llvm::Type *Ty = ConvertType(InputType);
- uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
- if ((Size <= 64 && llvm::isPowerOf2_64(Size)) ||
- getTargetHooks().isScalarizableAsmOperand(*this, Ty)) {
- Ty = llvm::IntegerType::get(getLLVMContext(), Size);
- Ty = llvm::PointerType::getUnqual(Ty);
-
- Arg = Builder.CreateLoad(
- Builder.CreateBitCast(InputValue.getAddress(*this), Ty));
- } else {
- Arg = InputValue.getPointer(*this);
- ConstraintStr += '*';
- }
+ if (CodeGenFunction::hasScalarEvaluationKind(InputType))
+ return {EmitLoadOfLValue(InputValue, Loc).getScalarVal(), nullptr};
+
+ llvm::Type *Ty = ConvertType(InputType);
+ uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
+ if ((Size <= 64 && llvm::isPowerOf2_64(Size)) ||
+ getTargetHooks().isScalarizableAsmOperand(*this, Ty)) {
+ Ty = llvm::IntegerType::get(getLLVMContext(), Size);
+
+ return {
+ Builder.CreateLoad(InputValue.getAddress(*this).withElementType(Ty)),
+ nullptr};
}
- } else {
- Arg = InputValue.getPointer(*this);
- ConstraintStr += '*';
}
- return Arg;
+ Address Addr = InputValue.getAddress(*this);
+ ConstraintStr += '*';
+ return {Addr.getPointer(), Addr.getElementType()};
}
-llvm::Value* CodeGenFunction::EmitAsmInput(
- const TargetInfo::ConstraintInfo &Info,
- const Expr *InputExpr,
- std::string &ConstraintStr) {
+std::pair<llvm::Value *, llvm::Type *>
+CodeGenFunction::EmitAsmInput(const TargetInfo::ConstraintInfo &Info,
+ const Expr *InputExpr,
+ std::string &ConstraintStr) {
// If this can't be a register or memory, i.e., has to be a constant
// (immediate or symbolic), try to emit it as such.
if (!Info.allowsRegister() && !Info.allowsMemory()) {
@@ -2131,19 +2234,20 @@ llvm::Value* CodeGenFunction::EmitAsmInput(
llvm::APSInt IntResult;
if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(),
getContext()))
- return llvm::ConstantInt::get(getLLVMContext(), IntResult);
+ return {llvm::ConstantInt::get(getLLVMContext(), IntResult), nullptr};
}
Expr::EvalResult Result;
if (InputExpr->EvaluateAsInt(Result, getContext()))
- return llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt());
+ return {llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt()),
+ nullptr};
}
if (Info.allowsRegister() || !Info.allowsMemory())
if (CodeGenFunction::hasScalarEvaluationKind(InputExpr->getType()))
- return EmitScalarExpr(InputExpr);
+ return {EmitScalarExpr(InputExpr), nullptr};
if (InputExpr->getStmtClass() == Expr::CXXThisExprClass)
- return EmitScalarExpr(InputExpr);
+ return {EmitScalarExpr(InputExpr), nullptr};
InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
LValue Dest = EmitLValue(InputExpr);
return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr,
@@ -2185,23 +2289,29 @@ static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
bool HasUnwindClobber, bool ReadOnly,
bool ReadNone, bool NoMerge, const AsmStmt &S,
const std::vector<llvm::Type *> &ResultRegTypes,
+ const std::vector<llvm::Type *> &ArgElemTypes,
CodeGenFunction &CGF,
std::vector<llvm::Value *> &RegResults) {
if (!HasUnwindClobber)
- Result.addAttribute(llvm::AttributeList::FunctionIndex,
- llvm::Attribute::NoUnwind);
+ Result.addFnAttr(llvm::Attribute::NoUnwind);
if (NoMerge)
- Result.addAttribute(llvm::AttributeList::FunctionIndex,
- llvm::Attribute::NoMerge);
+ Result.addFnAttr(llvm::Attribute::NoMerge);
// Attach readnone and readonly attributes.
if (!HasSideEffect) {
if (ReadNone)
- Result.addAttribute(llvm::AttributeList::FunctionIndex,
- llvm::Attribute::ReadNone);
+ Result.setDoesNotAccessMemory();
else if (ReadOnly)
- Result.addAttribute(llvm::AttributeList::FunctionIndex,
- llvm::Attribute::ReadOnly);
+ Result.setOnlyReadsMemory();
+ }
+
+ // Add elementtype attribute for indirect constraints.
+ for (auto Pair : llvm::enumerate(ArgElemTypes)) {
+ if (Pair.value()) {
+ auto Attr = llvm::Attribute::get(
+ CGF.getLLVMContext(), llvm::Attribute::ElementType, Pair.value());
+ Result.addParamAttr(Pair.index(), Attr);
+ }
}
// Slap the source location of the inline asm into a !srcloc metadata on the
@@ -2223,8 +2333,7 @@ static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
// convergent (meaning, they may call an intrinsically convergent op, such
// as bar.sync, and so can't have certain optimizations applied around
// them).
- Result.addAttribute(llvm::AttributeList::FunctionIndex,
- llvm::Attribute::Convergent);
+ Result.addFnAttr(llvm::Attribute::Convergent);
// Extract all of the register value results from the asm.
if (ResultRegTypes.size() == 1) {
RegResults.push_back(&Result);
@@ -2236,7 +2345,114 @@ static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
}
}
+static void
+EmitAsmStores(CodeGenFunction &CGF, const AsmStmt &S,
+ const llvm::ArrayRef<llvm::Value *> RegResults,
+ const llvm::ArrayRef<llvm::Type *> ResultRegTypes,
+ const llvm::ArrayRef<llvm::Type *> ResultTruncRegTypes,
+ const llvm::ArrayRef<LValue> ResultRegDests,
+ const llvm::ArrayRef<QualType> ResultRegQualTys,
+ const llvm::BitVector &ResultTypeRequiresCast,
+ const llvm::BitVector &ResultRegIsFlagReg) {
+ CGBuilderTy &Builder = CGF.Builder;
+ CodeGenModule &CGM = CGF.CGM;
+ llvm::LLVMContext &CTX = CGF.getLLVMContext();
+
+ assert(RegResults.size() == ResultRegTypes.size());
+ assert(RegResults.size() == ResultTruncRegTypes.size());
+ assert(RegResults.size() == ResultRegDests.size());
+ // ResultRegDests can be also populated by addReturnRegisterOutputs() above,
+ // in which case its size may grow.
+ assert(ResultTypeRequiresCast.size() <= ResultRegDests.size());
+ assert(ResultRegIsFlagReg.size() <= ResultRegDests.size());
+
+ for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
+ llvm::Value *Tmp = RegResults[i];
+ llvm::Type *TruncTy = ResultTruncRegTypes[i];
+
+ if ((i < ResultRegIsFlagReg.size()) && ResultRegIsFlagReg[i]) {
+ // Target must guarantee the Value `Tmp` here is lowered to a boolean
+ // value.
+ llvm::Constant *Two = llvm::ConstantInt::get(Tmp->getType(), 2);
+ llvm::Value *IsBooleanValue =
+ Builder.CreateCmp(llvm::CmpInst::ICMP_ULT, Tmp, Two);
+ llvm::Function *FnAssume = CGM.getIntrinsic(llvm::Intrinsic::assume);
+ Builder.CreateCall(FnAssume, IsBooleanValue);
+ }
+
+ // If the result type of the LLVM IR asm doesn't match the result type of
+ // the expression, do the conversion.
+ if (ResultRegTypes[i] != TruncTy) {
+
+ // Truncate the integer result to the right size, note that TruncTy can be
+ // a pointer.
+ if (TruncTy->isFloatingPointTy())
+ Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
+ else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
+ uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
+ Tmp = Builder.CreateTrunc(
+ Tmp, llvm::IntegerType::get(CTX, (unsigned)ResSize));
+ Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
+ } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
+ uint64_t TmpSize =
+ CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
+ Tmp = Builder.CreatePtrToInt(
+ Tmp, llvm::IntegerType::get(CTX, (unsigned)TmpSize));
+ Tmp = Builder.CreateTrunc(Tmp, TruncTy);
+ } else if (Tmp->getType()->isIntegerTy() && TruncTy->isIntegerTy()) {
+ Tmp = Builder.CreateZExtOrTrunc(Tmp, TruncTy);
+ } else if (Tmp->getType()->isVectorTy() || TruncTy->isVectorTy()) {
+ Tmp = Builder.CreateBitCast(Tmp, TruncTy);
+ }
+ }
+
+ LValue Dest = ResultRegDests[i];
+ // ResultTypeRequiresCast elements correspond to the first
+ // ResultTypeRequiresCast.size() elements of RegResults.
+ if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) {
+ unsigned Size = CGF.getContext().getTypeSize(ResultRegQualTys[i]);
+ Address A = Dest.getAddress(CGF).withElementType(ResultRegTypes[i]);
+ if (CGF.getTargetHooks().isScalarizableAsmOperand(CGF, TruncTy)) {
+ Builder.CreateStore(Tmp, A);
+ continue;
+ }
+
+ QualType Ty =
+ CGF.getContext().getIntTypeForBitwidth(Size, /*Signed=*/false);
+ if (Ty.isNull()) {
+ const Expr *OutExpr = S.getOutputExpr(i);
+ CGM.getDiags().Report(OutExpr->getExprLoc(),
+ diag::err_store_value_to_reg);
+ return;
+ }
+ Dest = CGF.MakeAddrLValue(A, Ty);
+ }
+ CGF.EmitStoreThroughLValue(RValue::get(Tmp), Dest);
+ }
+}
+
+static void EmitHipStdParUnsupportedAsm(CodeGenFunction *CGF,
+ const AsmStmt &S) {
+ constexpr auto Name = "__ASM__hipstdpar_unsupported";
+
+ StringRef Asm;
+ if (auto GCCAsm = dyn_cast<GCCAsmStmt>(&S))
+ Asm = GCCAsm->getAsmString()->getString();
+
+ auto &Ctx = CGF->CGM.getLLVMContext();
+
+ auto StrTy = llvm::ConstantDataArray::getString(Ctx, Asm);
+ auto FnTy = llvm::FunctionType::get(llvm::Type::getVoidTy(Ctx),
+ {StrTy->getType()}, false);
+ auto UBF = CGF->CGM.getModule().getOrInsertFunction(Name, FnTy);
+
+ CGF->Builder.CreateCall(UBF, {StrTy});
+}
+
void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
+ // Pop all cleanup blocks at the end of the asm statement.
+ CodeGenFunction::RunCleanupsScope Cleanups(*this);
+
// Assemble the final asm string.
std::string AsmString = S.generateAsmString(getContext());
@@ -2244,27 +2460,38 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
- for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
+ bool IsHipStdPar = getLangOpts().HIPStdPar && getLangOpts().CUDAIsDevice;
+ bool IsValidTargetAsm = true;
+ for (unsigned i = 0, e = S.getNumOutputs(); i != e && IsValidTargetAsm; i++) {
StringRef Name;
if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
Name = GAS->getOutputName(i);
TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name);
bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid;
- assert(IsValid && "Failed to parse output constraint");
+ if (IsHipStdPar && !IsValid)
+ IsValidTargetAsm = false;
+ else
+ assert(IsValid && "Failed to parse output constraint");
OutputConstraintInfos.push_back(Info);
}
- for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
+ for (unsigned i = 0, e = S.getNumInputs(); i != e && IsValidTargetAsm; i++) {
StringRef Name;
if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
Name = GAS->getInputName(i);
TargetInfo::ConstraintInfo Info(S.getInputConstraint(i), Name);
bool IsValid =
getTarget().validateInputConstraint(OutputConstraintInfos, Info);
- assert(IsValid && "Failed to parse input constraint"); (void)IsValid;
+ if (IsHipStdPar && !IsValid)
+ IsValidTargetAsm = false;
+ else
+ assert(IsValid && "Failed to parse input constraint");
InputConstraintInfos.push_back(Info);
}
+ if (!IsValidTargetAsm)
+ return EmitHipStdParUnsupportedAsm(this, S);
+
std::string Constraints;
std::vector<LValue> ResultRegDests;
@@ -2272,13 +2499,16 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
std::vector<llvm::Type *> ResultRegTypes;
std::vector<llvm::Type *> ResultTruncRegTypes;
std::vector<llvm::Type *> ArgTypes;
+ std::vector<llvm::Type *> ArgElemTypes;
std::vector<llvm::Value*> Args;
llvm::BitVector ResultTypeRequiresCast;
+ llvm::BitVector ResultRegIsFlagReg;
// Keep track of inout constraints.
std::string InOutConstraints;
std::vector<llvm::Value*> InOutArgs;
std::vector<llvm::Type*> InOutArgTypes;
+ std::vector<llvm::Type*> InOutArgElemTypes;
// Keep track of out constraints for tied input operand.
std::vector<std::string> OutputConstraints;
@@ -2330,6 +2560,9 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
ResultRegQualTys.push_back(QTy);
ResultRegDests.push_back(Dest);
+ bool IsFlagReg = llvm::StringRef(OutputConstraint).starts_with("{@cc");
+ ResultRegIsFlagReg.push_back(IsFlagReg);
+
llvm::Type *Ty = ConvertTypeForMem(QTy);
const bool RequiresCast = Info.allowsRegister() &&
(getTargetHooks().isScalarizableAsmOperand(*this, Ty) ||
@@ -2378,23 +2611,20 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
if (auto *VT = dyn_cast<llvm::VectorType>(ResultRegTypes.back()))
LargestVectorWidth =
std::max((uint64_t)LargestVectorWidth,
- VT->getPrimitiveSizeInBits().getKnownMinSize());
+ VT->getPrimitiveSizeInBits().getKnownMinValue());
} else {
- llvm::Type *DestAddrTy = Dest.getAddress(*this).getType();
- llvm::Value *DestPtr = Dest.getPointer(*this);
+ Address DestAddr = Dest.getAddress(*this);
// Matrix types in memory are represented by arrays, but accessed through
// vector pointers, with the alignment specified on the access operation.
// For inline assembly, update pointer arguments to use vector pointers.
// Otherwise there will be a mis-match if the matrix is also an
// input-argument which is represented as vector.
- if (isa<MatrixType>(OutExpr->getType().getCanonicalType())) {
- DestAddrTy = llvm::PointerType::get(
- ConvertType(OutExpr->getType()),
- cast<llvm::PointerType>(DestAddrTy)->getAddressSpace());
- DestPtr = Builder.CreateBitCast(DestPtr, DestAddrTy);
- }
- ArgTypes.push_back(DestAddrTy);
- Args.push_back(DestPtr);
+ if (isa<MatrixType>(OutExpr->getType().getCanonicalType()))
+ DestAddr = DestAddr.withElementType(ConvertType(OutExpr->getType()));
+
+ ArgTypes.push_back(DestAddr.getType());
+ ArgElemTypes.push_back(DestAddr.getElementType());
+ Args.push_back(DestAddr.getPointer());
Constraints += "=*";
Constraints += OutputConstraint;
ReadOnly = ReadNone = false;
@@ -2404,9 +2634,11 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
InOutConstraints += ',';
const Expr *InputExpr = S.getOutputExpr(i);
- llvm::Value *Arg = EmitAsmInputLValue(Info, Dest, InputExpr->getType(),
- InOutConstraints,
- InputExpr->getExprLoc());
+ llvm::Value *Arg;
+ llvm::Type *ArgElemType;
+ std::tie(Arg, ArgElemType) = EmitAsmInputLValue(
+ Info, Dest, InputExpr->getType(), InOutConstraints,
+ InputExpr->getExprLoc());
if (llvm::Type* AdjTy =
getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
@@ -2417,7 +2649,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
LargestVectorWidth =
std::max((uint64_t)LargestVectorWidth,
- VT->getPrimitiveSizeInBits().getKnownMinSize());
+ VT->getPrimitiveSizeInBits().getKnownMinValue());
// Only tie earlyclobber physregs.
if (Info.allowsRegister() && (GCCReg.empty() || Info.earlyClobber()))
InOutConstraints += llvm::utostr(i);
@@ -2425,6 +2657,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
InOutConstraints += OutputConstraint;
InOutArgTypes.push_back(Arg->getType());
+ InOutArgElemTypes.push_back(ArgElemType);
InOutArgs.push_back(Arg);
}
}
@@ -2435,7 +2668,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
if (RetAI.isDirect() || RetAI.isExtend()) {
// Make a fake lvalue for the return value slot.
- LValue ReturnSlot = MakeAddrLValue(ReturnValue, FnRetTy);
+ LValue ReturnSlot = MakeAddrLValueWithoutTBAA(ReturnValue, FnRetTy);
CGM.getTargetCodeGenInfo().addReturnRegisterOutputs(
*this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes,
ResultRegDests, AsmString, S.getNumOutputs());
@@ -2464,7 +2697,9 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
getTarget(), CGM, S, false /* No EarlyClobber */);
std::string ReplaceConstraint (InputConstraint);
- llvm::Value *Arg = EmitAsmInput(Info, InputExpr, Constraints);
+ llvm::Value *Arg;
+ llvm::Type *ArgElemType;
+ std::tie(Arg, ArgElemType) = EmitAsmInput(Info, InputExpr, Constraints);
// If this input argument is tied to a larger output result, extend the
// input to be the same size as the output. The LLVM backend wants to see
@@ -2486,10 +2721,8 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
Arg = Builder.CreateZExt(Arg, OutputTy);
else if (isa<llvm::PointerType>(OutputTy))
Arg = Builder.CreateZExt(Arg, IntPtrTy);
- else {
- assert(OutputTy->isFloatingPointTy() && "Unexpected output type");
+ else if (OutputTy->isFloatingPointTy())
Arg = Builder.CreateFPExt(Arg, OutputTy);
- }
}
// Deal with the tied operands' constraint code in adjustInlineAsmType.
ReplaceConstraint = OutputConstraints[Output];
@@ -2506,42 +2739,40 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
LargestVectorWidth =
std::max((uint64_t)LargestVectorWidth,
- VT->getPrimitiveSizeInBits().getKnownMinSize());
+ VT->getPrimitiveSizeInBits().getKnownMinValue());
ArgTypes.push_back(Arg->getType());
+ ArgElemTypes.push_back(ArgElemType);
Args.push_back(Arg);
Constraints += InputConstraint;
}
+ // Append the "input" part of inout constraints.
+ for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
+ ArgTypes.push_back(InOutArgTypes[i]);
+ ArgElemTypes.push_back(InOutArgElemTypes[i]);
+ Args.push_back(InOutArgs[i]);
+ }
+ Constraints += InOutConstraints;
+
// Labels
SmallVector<llvm::BasicBlock *, 16> Transfer;
llvm::BasicBlock *Fallthrough = nullptr;
bool IsGCCAsmGoto = false;
- if (const auto *GS = dyn_cast<GCCAsmStmt>(&S)) {
+ if (const auto *GS = dyn_cast<GCCAsmStmt>(&S)) {
IsGCCAsmGoto = GS->isAsmGoto();
if (IsGCCAsmGoto) {
for (const auto *E : GS->labels()) {
JumpDest Dest = getJumpDestForLabel(E->getLabel());
Transfer.push_back(Dest.getBlock());
- llvm::BlockAddress *BA =
- llvm::BlockAddress::get(CurFn, Dest.getBlock());
- Args.push_back(BA);
- ArgTypes.push_back(BA->getType());
if (!Constraints.empty())
Constraints += ',';
- Constraints += 'X';
+ Constraints += "!i";
}
Fallthrough = createBasicBlock("asm.fallthrough");
}
}
- // Append the "input" part of inout constraints last.
- for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
- ArgTypes.push_back(InOutArgTypes[i]);
- Args.push_back(InOutArgs[i]);
- }
- Constraints += InOutConstraints;
-
bool HasUnwindClobber = false;
// Clobbers
@@ -2591,7 +2822,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
"unwind clobber can't be used with asm goto");
// Add machine specific clobbers
- std::string MachineClobbers = getTarget().getClobbers();
+ std::string_view MachineClobbers = getTarget().getClobbers();
if (!MachineClobbers.empty()) {
if (!Constraints.empty())
Constraints += ',';
@@ -2610,90 +2841,80 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
llvm::FunctionType::get(ResultType, ArgTypes, false);
bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0;
+
+ llvm::InlineAsm::AsmDialect GnuAsmDialect =
+ CGM.getCodeGenOpts().getInlineAsmDialect() == CodeGenOptions::IAD_ATT
+ ? llvm::InlineAsm::AD_ATT
+ : llvm::InlineAsm::AD_Intel;
llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ?
- llvm::InlineAsm::AD_Intel : llvm::InlineAsm::AD_ATT;
+ llvm::InlineAsm::AD_Intel : GnuAsmDialect;
+
llvm::InlineAsm *IA = llvm::InlineAsm::get(
FTy, AsmString, Constraints, HasSideEffect,
/* IsAlignStack */ false, AsmDialect, HasUnwindClobber);
std::vector<llvm::Value*> RegResults;
+ llvm::CallBrInst *CBR;
+ llvm::DenseMap<llvm::BasicBlock *, SmallVector<llvm::Value *, 4>>
+ CBRRegResults;
if (IsGCCAsmGoto) {
- llvm::CallBrInst *Result =
- Builder.CreateCallBr(IA, Fallthrough, Transfer, Args);
+ CBR = Builder.CreateCallBr(IA, Fallthrough, Transfer, Args);
EmitBlock(Fallthrough);
- UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, false,
- ReadOnly, ReadNone, InNoMergeAttributedStmt, S,
- ResultRegTypes, *this, RegResults);
+ UpdateAsmCallInst(*CBR, HasSideEffect, false, ReadOnly, ReadNone,
+ InNoMergeAttributedStmt, S, ResultRegTypes, ArgElemTypes,
+ *this, RegResults);
+ // Because we are emitting code top to bottom, we don't have enough
+ // information at this point to know precisely whether we have a critical
+ // edge. If we have outputs, split all indirect destinations.
+ if (!RegResults.empty()) {
+ unsigned i = 0;
+ for (llvm::BasicBlock *Dest : CBR->getIndirectDests()) {
+ llvm::Twine SynthName = Dest->getName() + ".split";
+ llvm::BasicBlock *SynthBB = createBasicBlock(SynthName);
+ llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
+ Builder.SetInsertPoint(SynthBB);
+
+ if (ResultRegTypes.size() == 1) {
+ CBRRegResults[SynthBB].push_back(CBR);
+ } else {
+ for (unsigned j = 0, e = ResultRegTypes.size(); j != e; ++j) {
+ llvm::Value *Tmp = Builder.CreateExtractValue(CBR, j, "asmresult");
+ CBRRegResults[SynthBB].push_back(Tmp);
+ }
+ }
+
+ EmitBranch(Dest);
+ EmitBlock(SynthBB);
+ CBR->setIndirectDest(i++, SynthBB);
+ }
+ }
} else if (HasUnwindClobber) {
llvm::CallBase *Result = EmitCallOrInvoke(IA, Args, "");
UpdateAsmCallInst(*Result, HasSideEffect, true, ReadOnly, ReadNone,
- InNoMergeAttributedStmt, S, ResultRegTypes, *this,
- RegResults);
+ InNoMergeAttributedStmt, S, ResultRegTypes, ArgElemTypes,
+ *this, RegResults);
} else {
llvm::CallInst *Result =
Builder.CreateCall(IA, Args, getBundlesForFunclet(IA));
- UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, false,
- ReadOnly, ReadNone, InNoMergeAttributedStmt, S,
- ResultRegTypes, *this, RegResults);
- }
-
- assert(RegResults.size() == ResultRegTypes.size());
- assert(RegResults.size() == ResultTruncRegTypes.size());
- assert(RegResults.size() == ResultRegDests.size());
- // ResultRegDests can be also populated by addReturnRegisterOutputs() above,
- // in which case its size may grow.
- assert(ResultTypeRequiresCast.size() <= ResultRegDests.size());
- for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
- llvm::Value *Tmp = RegResults[i];
- llvm::Type *TruncTy = ResultTruncRegTypes[i];
-
- // If the result type of the LLVM IR asm doesn't match the result type of
- // the expression, do the conversion.
- if (ResultRegTypes[i] != ResultTruncRegTypes[i]) {
-
- // Truncate the integer result to the right size, note that TruncTy can be
- // a pointer.
- if (TruncTy->isFloatingPointTy())
- Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
- else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
- uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
- Tmp = Builder.CreateTrunc(Tmp,
- llvm::IntegerType::get(getLLVMContext(), (unsigned)ResSize));
- Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
- } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
- uint64_t TmpSize =CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
- Tmp = Builder.CreatePtrToInt(Tmp,
- llvm::IntegerType::get(getLLVMContext(), (unsigned)TmpSize));
- Tmp = Builder.CreateTrunc(Tmp, TruncTy);
- } else if (TruncTy->isIntegerTy()) {
- Tmp = Builder.CreateZExtOrTrunc(Tmp, TruncTy);
- } else if (TruncTy->isVectorTy()) {
- Tmp = Builder.CreateBitCast(Tmp, TruncTy);
- }
- }
-
- LValue Dest = ResultRegDests[i];
- // ResultTypeRequiresCast elements correspond to the first
- // ResultTypeRequiresCast.size() elements of RegResults.
- if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) {
- unsigned Size = getContext().getTypeSize(ResultRegQualTys[i]);
- Address A = Builder.CreateBitCast(Dest.getAddress(*this),
- ResultRegTypes[i]->getPointerTo());
- if (getTargetHooks().isScalarizableAsmOperand(*this, TruncTy)) {
- Builder.CreateStore(Tmp, A);
- continue;
- }
-
- QualType Ty = getContext().getIntTypeForBitwidth(Size, /*Signed*/ false);
- if (Ty.isNull()) {
- const Expr *OutExpr = S.getOutputExpr(i);
- CGM.Error(
- OutExpr->getExprLoc(),
- "impossible constraint in asm: can't store value into a register");
- return;
- }
- Dest = MakeAddrLValue(A, Ty);
+ UpdateAsmCallInst(*Result, HasSideEffect, false, ReadOnly, ReadNone,
+ InNoMergeAttributedStmt, S, ResultRegTypes, ArgElemTypes,
+ *this, RegResults);
+ }
+
+ EmitAsmStores(*this, S, RegResults, ResultRegTypes, ResultTruncRegTypes,
+ ResultRegDests, ResultRegQualTys, ResultTypeRequiresCast,
+ ResultRegIsFlagReg);
+
+ // If this is an asm goto with outputs, repeat EmitAsmStores, but with a
+ // different insertion point; one for each indirect destination and with
+ // CBRRegResults rather than RegResults.
+ if (IsGCCAsmGoto && !CBRRegResults.empty()) {
+ for (llvm::BasicBlock *Succ : CBR->getIndirectDests()) {
+ llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
+ Builder.SetInsertPoint(Succ, --(Succ->end()));
+ EmitAsmStores(*this, S, CBRRegResults[Succ], ResultRegTypes,
+ ResultTruncRegTypes, ResultRegDests, ResultRegQualTys,
+ ResultTypeRequiresCast, ResultRegIsFlagReg);
}
- EmitStoreThroughLValue(RValue::get(Tmp), Dest);
}
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGStmtOpenMP.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGStmtOpenMP.cpp
index f6233b791182..e362c9da51fe 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGStmtOpenMP.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGStmtOpenMP.cpp
@@ -24,11 +24,17 @@
#include "clang/AST/StmtVisitor.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PrettyStackTrace.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/BinaryFormat/Dwarf.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
#include "llvm/IR/Constants.h"
+#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Metadata.h"
#include "llvm/Support/AtomicOrdering.h"
+#include <optional>
using namespace clang;
using namespace CodeGen;
using namespace llvm::omp;
@@ -69,13 +75,13 @@ class OMPLexicalScope : public CodeGenFunction::LexicalScope {
public:
OMPLexicalScope(
CodeGenFunction &CGF, const OMPExecutableDirective &S,
- const llvm::Optional<OpenMPDirectiveKind> CapturedRegion = llvm::None,
+ const std::optional<OpenMPDirectiveKind> CapturedRegion = std::nullopt,
const bool EmitPreInitStmt = true)
: CodeGenFunction::LexicalScope(CGF, S.getSourceRange()),
InlinedShareds(CGF) {
if (EmitPreInitStmt)
emitPreInitStmt(CGF, S);
- if (!CapturedRegion.hasValue())
+ if (!CapturedRegion)
return;
assert(S.hasAssociatedStmt() &&
"Expected associated statement for inlined directive.");
@@ -90,9 +96,7 @@ public:
isCapturedVar(CGF, VD) || (CGF.CapturedStmtInfo &&
InlinedShareds.isGlobalVarCaptured(VD)),
VD->getType().getNonReferenceType(), VK_LValue, C.getLocation());
- InlinedShareds.addPrivate(VD, [&CGF, &DRE]() -> Address {
- return CGF.EmitLValue(&DRE).getAddress(CGF);
- });
+ InlinedShareds.addPrivate(VD, CGF.EmitLValue(&DRE).getAddress(CGF));
}
}
(void)InlinedShareds.Privatize();
@@ -111,7 +115,7 @@ class OMPParallelScope final : public OMPLexicalScope {
public:
OMPParallelScope(CodeGenFunction &CGF, const OMPExecutableDirective &S)
- : OMPLexicalScope(CGF, S, /*CapturedRegion=*/llvm::None,
+ : OMPLexicalScope(CGF, S, /*CapturedRegion=*/std::nullopt,
EmitPreInitStmt(S)) {}
};
@@ -126,7 +130,7 @@ class OMPTeamsScope final : public OMPLexicalScope {
public:
OMPTeamsScope(CodeGenFunction &CGF, const OMPExecutableDirective &S)
- : OMPLexicalScope(CGF, S, /*CapturedRegion=*/llvm::None,
+ : OMPLexicalScope(CGF, S, /*CapturedRegion=*/std::nullopt,
EmitPreInitStmt(S)) {}
};
@@ -150,11 +154,12 @@ class OMPLoopScope : public CodeGenFunction::RunCleanupsScope {
const auto *OrigVD =
cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl());
if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
+ QualType OrigVDTy = OrigVD->getType().getNonReferenceType();
(void)PreCondVars.setVarAddr(
CGF, OrigVD,
Address(llvm::UndefValue::get(CGF.ConvertTypeForMem(
- CGF.getContext().getPointerType(
- OrigVD->getType().getNonReferenceType()))),
+ CGF.getContext().getPointerType(OrigVDTy))),
+ CGF.ConvertTypeForMem(OrigVDTy),
CGF.getContext().getDeclAlign(OrigVD)));
}
}
@@ -267,9 +272,7 @@ public:
InlinedShareds.isGlobalVarCaptured(VD)),
VD->getType().getNonReferenceType(), VK_LValue,
C.getLocation());
- InlinedShareds.addPrivate(VD, [&CGF, &DRE]() -> Address {
- return CGF.EmitLValue(&DRE).getAddress(CGF);
- });
+ InlinedShareds.addPrivate(VD, CGF.EmitLValue(&DRE).getAddress(CGF));
}
}
CS = dyn_cast<CapturedStmt>(CS->getCapturedStmt());
@@ -309,8 +312,8 @@ llvm::Value *CodeGenFunction::getTypeSize(QualType Ty) {
while (const VariableArrayType *VAT = C.getAsVariableArrayType(Ty)) {
VlaSizePair VlaSize = getVLASize(VAT);
Ty = VlaSize.Type;
- Size = Size ? Builder.CreateNUWMul(Size, VlaSize.NumElts)
- : VlaSize.NumElts;
+ Size =
+ Size ? Builder.CreateNUWMul(Size, VlaSize.NumElts) : VlaSize.NumElts;
}
SizeInChars = C.getTypeSizeInChars(Ty);
if (SizeInChars.isZero())
@@ -375,8 +378,7 @@ static Address castValueFromUintptr(CodeGenFunction &CGF, SourceLocation Loc,
AddrLV.getAddress(CGF).getPointer(), Ctx.getUIntPtrType(),
Ctx.getPointerType(DstType), Loc);
Address TmpAddr =
- CGF.MakeNaturalAlignAddrLValue(CastedPtr, Ctx.getPointerType(DstType))
- .getAddress(CGF);
+ CGF.MakeNaturalAlignAddrLValue(CastedPtr, DstType).getAddress(CGF);
return TmpAddr;
}
@@ -445,12 +447,13 @@ static llvm::Function *emitOutlinedFunctionPrologue(
FunctionDecl *DebugFunctionDecl = nullptr;
if (!FO.UIntPtrCastRequired) {
FunctionProtoType::ExtProtoInfo EPI;
- QualType FunctionTy = Ctx.getFunctionType(Ctx.VoidTy, llvm::None, EPI);
+ QualType FunctionTy = Ctx.getFunctionType(Ctx.VoidTy, std::nullopt, EPI);
DebugFunctionDecl = FunctionDecl::Create(
Ctx, Ctx.getTranslationUnitDecl(), FO.S->getBeginLoc(),
SourceLocation(), DeclarationName(), FunctionTy,
Ctx.getTrivialTypeSourceInfo(FunctionTy), SC_Static,
- /*isInlineSpecified=*/false, /*hasWrittenPrototype=*/false);
+ /*UsesFPIntrin=*/false, /*isInlineSpecified=*/false,
+ /*hasWrittenPrototype=*/false);
}
for (const FieldDecl *FD : RD->fields()) {
QualType ArgType = FD->getType();
@@ -479,7 +482,11 @@ static llvm::Function *emitOutlinedFunctionPrologue(
if (ArgType->isVariablyModifiedType())
ArgType = getCanonicalParamType(Ctx, ArgType);
VarDecl *Arg;
- if (DebugFunctionDecl && (CapVar || I->capturesThis())) {
+ if (CapVar && (CapVar->getTLSKind() != clang::VarDecl::TLS_None)) {
+ Arg = ImplicitParamDecl::Create(Ctx, /*DC=*/nullptr, FD->getLocation(),
+ II, ArgType,
+ ImplicitParamKind::ThreadPrivateVar);
+ } else if (DebugFunctionDecl && (CapVar || I->capturesThis())) {
Arg = ParmVarDecl::Create(
Ctx, DebugFunctionDecl,
CapVar ? CapVar->getBeginLoc() : FD->getBeginLoc(),
@@ -487,7 +494,7 @@ static llvm::Function *emitOutlinedFunctionPrologue(
/*TInfo=*/nullptr, SC_None, /*DefArg=*/nullptr);
} else {
Arg = ImplicitParamDecl::Create(Ctx, /*DC=*/nullptr, FD->getLocation(),
- II, ArgType, ImplicitParamDecl::Other);
+ II, ArgType, ImplicitParamKind::Other);
}
Args.emplace_back(Arg);
// Do not cast arguments if we emit function with non-original types.
@@ -497,9 +504,8 @@ static llvm::Function *emitOutlinedFunctionPrologue(
: CGM.getOpenMPRuntime().translateParameter(FD, Arg));
++I;
}
- Args.append(
- std::next(CD->param_begin(), CD->getContextParamPosition() + 1),
- CD->param_end());
+ Args.append(std::next(CD->param_begin(), CD->getContextParamPosition() + 1),
+ CD->param_end());
TargetArgs.append(
std::next(CD->param_begin(), CD->getContextParamPosition() + 1),
CD->param_end());
@@ -518,8 +524,10 @@ static llvm::Function *emitOutlinedFunctionPrologue(
F->setDoesNotRecurse();
// Always inline the outlined function if optimizations are enabled.
- if (CGM.getCodeGenOpts().OptimizationLevel != 0)
+ if (CGM.getCodeGenOpts().OptimizationLevel != 0) {
+ F->removeFnAttr(llvm::Attribute::NoInline);
F->addFnAttr(llvm::Attribute::AlwaysInline);
+ }
// Generate the function.
CGF.StartFunction(CD, Ctx.VoidTy, F, FuncInfo, TargetArgs,
@@ -573,8 +581,7 @@ static llvm::Function *emitOutlinedFunctionPrologue(
}
if (!FO.RegisterCastedArgsOnly) {
LocalAddrs.insert(
- {Args[Cnt],
- {Var, Address(ArgAddr.getPointer(), Ctx.getDeclAlign(Var))}});
+ {Args[Cnt], {Var, ArgAddr.withAlignment(Ctx.getDeclAlign(Var))}});
}
} else if (I->capturesVariableByCopy()) {
assert(!FD->getType()->isAnyPointerType() &&
@@ -624,9 +631,8 @@ CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S,
CodeGenFunction::OMPPrivateScope LocalScope(*this);
for (const auto &LocalAddrPair : LocalAddrs) {
if (LocalAddrPair.second.first) {
- LocalScope.addPrivate(LocalAddrPair.second.first, [&LocalAddrPair]() {
- return LocalAddrPair.second.second;
- });
+ LocalScope.addPrivate(LocalAddrPair.second.first,
+ LocalAddrPair.second.second);
}
}
(void)LocalScope.Privatize();
@@ -661,19 +667,16 @@ CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S,
I->second.first ? I->second.first->getType() : Arg->getType(),
AlignmentSource::Decl);
if (LV.getType()->isAnyComplexType())
- LV.setAddress(WrapperCGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- LV.getAddress(WrapperCGF),
- PI->getType()->getPointerTo(
- LV.getAddress(WrapperCGF).getAddressSpace())));
+ LV.setAddress(LV.getAddress(WrapperCGF).withElementType(PI->getType()));
CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getBeginLoc());
} else {
auto EI = VLASizes.find(Arg);
if (EI != VLASizes.end()) {
CallArg = EI->second.second;
} else {
- LValue LV = WrapperCGF.MakeAddrLValue(WrapperCGF.GetAddrOfLocalVar(Arg),
- Arg->getType(),
- AlignmentSource::Decl);
+ LValue LV =
+ WrapperCGF.MakeAddrLValue(WrapperCGF.GetAddrOfLocalVar(Arg),
+ Arg->getType(), AlignmentSource::Decl);
CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getBeginLoc());
}
}
@@ -697,13 +700,14 @@ void CodeGenFunction::EmitOMPAggregateAssign(
// Drill down to the base element type on both arrays.
const ArrayType *ArrayTy = OriginalType->getAsArrayTypeUnsafe();
llvm::Value *NumElements = emitArrayLength(ArrayTy, ElementTy, DestAddr);
- SrcAddr = Builder.CreateElementBitCast(SrcAddr, DestAddr.getElementType());
+ SrcAddr = SrcAddr.withElementType(DestAddr.getElementType());
llvm::Value *SrcBegin = SrcAddr.getPointer();
llvm::Value *DestBegin = DestAddr.getPointer();
// Cast from pointer to array type to pointer to single element.
- llvm::Value *DestEnd =
- Builder.CreateGEP(DestAddr.getElementType(), DestBegin, NumElements);
+ llvm::Value *DestEnd = Builder.CreateInBoundsGEP(DestAddr.getElementType(),
+ DestBegin, NumElements);
+
// The basic structure here is a while-do loop.
llvm::BasicBlock *BodyBB = createBasicBlock("omp.arraycpy.body");
llvm::BasicBlock *DoneBB = createBasicBlock("omp.arraycpy.done");
@@ -718,29 +722,29 @@ void CodeGenFunction::EmitOMPAggregateAssign(
CharUnits ElementSize = getContext().getTypeSizeInChars(ElementTy);
llvm::PHINode *SrcElementPHI =
- Builder.CreatePHI(SrcBegin->getType(), 2, "omp.arraycpy.srcElementPast");
+ Builder.CreatePHI(SrcBegin->getType(), 2, "omp.arraycpy.srcElementPast");
SrcElementPHI->addIncoming(SrcBegin, EntryBB);
Address SrcElementCurrent =
- Address(SrcElementPHI,
+ Address(SrcElementPHI, SrcAddr.getElementType(),
SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize));
- llvm::PHINode *DestElementPHI =
- Builder.CreatePHI(DestBegin->getType(), 2, "omp.arraycpy.destElementPast");
+ llvm::PHINode *DestElementPHI = Builder.CreatePHI(
+ DestBegin->getType(), 2, "omp.arraycpy.destElementPast");
DestElementPHI->addIncoming(DestBegin, EntryBB);
Address DestElementCurrent =
- Address(DestElementPHI,
- DestAddr.getAlignment().alignmentOfArrayElement(ElementSize));
+ Address(DestElementPHI, DestAddr.getElementType(),
+ DestAddr.getAlignment().alignmentOfArrayElement(ElementSize));
// Emit copy.
CopyGen(DestElementCurrent, SrcElementCurrent);
// Shift the address forward by one element.
- llvm::Value *DestElementNext = Builder.CreateConstGEP1_32(
- DestAddr.getElementType(), DestElementPHI, /*Idx0=*/1,
- "omp.arraycpy.dest.element");
- llvm::Value *SrcElementNext = Builder.CreateConstGEP1_32(
- SrcAddr.getElementType(), SrcElementPHI, /*Idx0=*/1,
- "omp.arraycpy.src.element");
+ llvm::Value *DestElementNext =
+ Builder.CreateConstGEP1_32(DestAddr.getElementType(), DestElementPHI,
+ /*Idx0=*/1, "omp.arraycpy.dest.element");
+ llvm::Value *SrcElementNext =
+ Builder.CreateConstGEP1_32(SrcAddr.getElementType(), SrcElementPHI,
+ /*Idx0=*/1, "omp.arraycpy.src.element");
// Check whether we've reached the end.
llvm::Value *Done =
Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
@@ -772,8 +776,8 @@ void CodeGenFunction::EmitOMPCopy(QualType OriginalType, Address DestAddr,
// destination and source variables to corresponding array
// elements.
CodeGenFunction::OMPPrivateScope Remap(*this);
- Remap.addPrivate(DestVD, [DestElement]() { return DestElement; });
- Remap.addPrivate(SrcVD, [SrcElement]() { return SrcElement; });
+ Remap.addPrivate(DestVD, DestElement);
+ Remap.addPrivate(SrcVD, SrcElement);
(void)Remap.Privatize();
EmitIgnoredExpr(Copy);
});
@@ -781,8 +785,8 @@ void CodeGenFunction::EmitOMPCopy(QualType OriginalType, Address DestAddr,
} else {
// Remap pseudo source variable to private copy.
CodeGenFunction::OMPPrivateScope Remap(*this);
- Remap.addPrivate(SrcVD, [SrcAddr]() { return SrcAddr; });
- Remap.addPrivate(DestVD, [DestAddr]() { return DestAddr; });
+ Remap.addPrivate(SrcVD, SrcAddr);
+ Remap.addPrivate(DestVD, DestAddr);
(void)Remap.Privatize();
// Emit copying of the whole variable.
EmitIgnoredExpr(Copy);
@@ -794,7 +798,7 @@ bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
if (!HaveInsertPoint())
return false;
bool DeviceConstTarget =
- getLangOpts().OpenMPIsDevice &&
+ getLangOpts().OpenMPIsTargetDevice &&
isOpenMPTargetExecutionDirective(D.getDirectiveKind());
bool FirstprivateIsLastprivate = false;
llvm::DenseMap<const VarDecl *, OpenMPLastprivateModifier> Lastprivates;
@@ -871,68 +875,56 @@ bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
// Emit VarDecl with copy init for arrays.
// Get the address of the original variable captured in current
// captured region.
- IsRegistered = PrivateScope.addPrivate(
- OrigVD, [this, VD, Type, OriginalLVal, VDInit]() {
- AutoVarEmission Emission = EmitAutoVarAlloca(*VD);
- const Expr *Init = VD->getInit();
- if (!isa<CXXConstructExpr>(Init) ||
- isTrivialInitializer(Init)) {
- // Perform simple memcpy.
- LValue Dest =
- MakeAddrLValue(Emission.getAllocatedAddress(), Type);
- EmitAggregateAssign(Dest, OriginalLVal, Type);
- } else {
- EmitOMPAggregateAssign(
- Emission.getAllocatedAddress(),
- OriginalLVal.getAddress(*this), Type,
- [this, VDInit, Init](Address DestElement,
- Address SrcElement) {
- // Clean up any temporaries needed by the
- // initialization.
- RunCleanupsScope InitScope(*this);
- // Emit initialization for single element.
- setAddrOfLocalVar(VDInit, SrcElement);
- EmitAnyExprToMem(Init, DestElement,
- Init->getType().getQualifiers(),
- /*IsInitializer*/ false);
- LocalDeclMap.erase(VDInit);
- });
- }
- EmitAutoVarCleanups(Emission);
- return Emission.getAllocatedAddress();
- });
+ AutoVarEmission Emission = EmitAutoVarAlloca(*VD);
+ const Expr *Init = VD->getInit();
+ if (!isa<CXXConstructExpr>(Init) || isTrivialInitializer(Init)) {
+ // Perform simple memcpy.
+ LValue Dest = MakeAddrLValue(Emission.getAllocatedAddress(), Type);
+ EmitAggregateAssign(Dest, OriginalLVal, Type);
+ } else {
+ EmitOMPAggregateAssign(
+ Emission.getAllocatedAddress(), OriginalLVal.getAddress(*this),
+ Type,
+ [this, VDInit, Init](Address DestElement, Address SrcElement) {
+ // Clean up any temporaries needed by the
+ // initialization.
+ RunCleanupsScope InitScope(*this);
+ // Emit initialization for single element.
+ setAddrOfLocalVar(VDInit, SrcElement);
+ EmitAnyExprToMem(Init, DestElement,
+ Init->getType().getQualifiers(),
+ /*IsInitializer*/ false);
+ LocalDeclMap.erase(VDInit);
+ });
+ }
+ EmitAutoVarCleanups(Emission);
+ IsRegistered =
+ PrivateScope.addPrivate(OrigVD, Emission.getAllocatedAddress());
} else {
Address OriginalAddr = OriginalLVal.getAddress(*this);
- IsRegistered =
- PrivateScope.addPrivate(OrigVD, [this, VDInit, OriginalAddr, VD,
- ThisFirstprivateIsLastprivate,
- OrigVD, &Lastprivates, IRef]() {
- // Emit private VarDecl with copy init.
- // Remap temp VDInit variable to the address of the original
- // variable (for proper handling of captured global variables).
- setAddrOfLocalVar(VDInit, OriginalAddr);
- EmitDecl(*VD);
- LocalDeclMap.erase(VDInit);
- if (ThisFirstprivateIsLastprivate &&
- Lastprivates[OrigVD->getCanonicalDecl()] ==
- OMPC_LASTPRIVATE_conditional) {
- // Create/init special variable for lastprivate conditionals.
- Address VDAddr =
- CGM.getOpenMPRuntime().emitLastprivateConditionalInit(
- *this, OrigVD);
- llvm::Value *V = EmitLoadOfScalar(
- MakeAddrLValue(GetAddrOfLocalVar(VD), (*IRef)->getType(),
- AlignmentSource::Decl),
- (*IRef)->getExprLoc());
- EmitStoreOfScalar(V,
- MakeAddrLValue(VDAddr, (*IRef)->getType(),
- AlignmentSource::Decl));
- LocalDeclMap.erase(VD);
- setAddrOfLocalVar(VD, VDAddr);
- return VDAddr;
- }
- return GetAddrOfLocalVar(VD);
- });
+ // Emit private VarDecl with copy init.
+ // Remap temp VDInit variable to the address of the original
+ // variable (for proper handling of captured global variables).
+ setAddrOfLocalVar(VDInit, OriginalAddr);
+ EmitDecl(*VD);
+ LocalDeclMap.erase(VDInit);
+ Address VDAddr = GetAddrOfLocalVar(VD);
+ if (ThisFirstprivateIsLastprivate &&
+ Lastprivates[OrigVD->getCanonicalDecl()] ==
+ OMPC_LASTPRIVATE_conditional) {
+ // Create/init special variable for lastprivate conditionals.
+ llvm::Value *V =
+ EmitLoadOfScalar(MakeAddrLValue(VDAddr, (*IRef)->getType(),
+ AlignmentSource::Decl),
+ (*IRef)->getExprLoc());
+ VDAddr = CGM.getOpenMPRuntime().emitLastprivateConditionalInit(
+ *this, OrigVD);
+ EmitStoreOfScalar(V, MakeAddrLValue(VDAddr, (*IRef)->getType(),
+ AlignmentSource::Decl));
+ LocalDeclMap.erase(VD);
+ setAddrOfLocalVar(VD, VDAddr);
+ }
+ IsRegistered = PrivateScope.addPrivate(OrigVD, VDAddr);
}
assert(IsRegistered &&
"firstprivate var already registered as private");
@@ -958,11 +950,10 @@ void CodeGenFunction::EmitOMPPrivateClause(
const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
- bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, VD]() {
- // Emit private VarDecl with copy init.
- EmitDecl(*VD);
- return GetAddrOfLocalVar(VD);
- });
+ EmitDecl(*VD);
+ // Emit private VarDecl with copy init.
+ bool IsRegistered =
+ PrivateScope.addPrivate(OrigVD, GetAddrOfLocalVar(VD));
assert(IsRegistered && "private var already registered as private");
// Silence the warning about unused variable.
(void)IsRegistered;
@@ -1003,9 +994,10 @@ bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) {
LocalDeclMap.erase(VD);
} else {
MasterAddr =
- Address(VD->isStaticLocal() ? CGM.getStaticLocalDeclAddress(VD)
- : CGM.GetAddrOfGlobal(VD),
- getContext().getDeclAlign(VD));
+ Address(VD->isStaticLocal() ? CGM.getStaticLocalDeclAddress(VD)
+ : CGM.GetAddrOfGlobal(VD),
+ CGM.getTypes().ConvertTypeForMem(VD->getType()),
+ getContext().getDeclAlign(VD));
}
// Get the address of the threadprivate variable.
Address PrivateAddr = EmitLValue(*IRef).getAddress(*this);
@@ -1073,31 +1065,27 @@ bool CodeGenFunction::EmitOMPLastprivateClauseInit(
if (AlreadyEmittedVars.insert(OrigVD->getCanonicalDecl()).second) {
const auto *DestVD =
cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
- PrivateScope.addPrivate(DestVD, [this, OrigVD, IRef]() {
- DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD),
- /*RefersToEnclosingVariableOrCapture=*/
- CapturedStmtInfo->lookup(OrigVD) != nullptr,
- (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
- return EmitLValue(&DRE).getAddress(*this);
- });
+ DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD),
+ /*RefersToEnclosingVariableOrCapture=*/
+ CapturedStmtInfo->lookup(OrigVD) != nullptr,
+ (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
+ PrivateScope.addPrivate(DestVD, EmitLValue(&DRE).getAddress(*this));
// Check if the variable is also a firstprivate: in this case IInit is
// not generated. Initialization of this variable will happen in codegen
// for 'firstprivate' clause.
if (IInit && !SIMDLCVs.count(OrigVD->getCanonicalDecl())) {
const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
- bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, VD, C,
- OrigVD]() {
- if (C->getKind() == OMPC_LASTPRIVATE_conditional) {
- Address VDAddr =
- CGM.getOpenMPRuntime().emitLastprivateConditionalInit(*this,
- OrigVD);
- setAddrOfLocalVar(VD, VDAddr);
- return VDAddr;
- }
+ Address VDAddr = Address::invalid();
+ if (C->getKind() == OMPC_LASTPRIVATE_conditional) {
+ VDAddr = CGM.getOpenMPRuntime().emitLastprivateConditionalInit(
+ *this, OrigVD);
+ setAddrOfLocalVar(VD, VDAddr);
+ } else {
// Emit private VarDecl with copy init.
EmitDecl(*VD);
- return GetAddrOfLocalVar(VD);
- });
+ VDAddr = GetAddrOfLocalVar(VD);
+ }
+ bool IsRegistered = PrivateScope.addPrivate(OrigVD, VDAddr);
assert(IsRegistered &&
"lastprivate var already registered as private");
(void)IsRegistered;
@@ -1177,9 +1165,10 @@ void CodeGenFunction::EmitOMPLastprivateClauseFinal(
// Get the address of the private variable.
Address PrivateAddr = GetAddrOfLocalVar(PrivateVD);
if (const auto *RefTy = PrivateVD->getType()->getAs<ReferenceType>())
- PrivateAddr =
- Address(Builder.CreateLoad(PrivateAddr),
- CGM.getNaturalTypeAlignment(RefTy->getPointeeType()));
+ PrivateAddr = Address(
+ Builder.CreateLoad(PrivateAddr),
+ CGM.getTypes().ConvertTypeForMem(RefTy->getPointeeType()),
+ CGM.getNaturalTypeAlignment(RefTy->getPointeeType()));
// Store the last value to the private copy in the last iteration.
if (C->getKind() == OMPC_LASTPRIVATE_conditional)
CGM.getOpenMPRuntime().emitLastprivateConditionalFinalUpdate(
@@ -1243,7 +1232,7 @@ void CodeGenFunction::EmitOMPReductionClauseInit(
RedCG.emitAggregateType(*this, Count);
AutoVarEmission Emission = EmitAutoVarAlloca(*PrivateVD);
RedCG.emitInitialization(*this, Count, Emission.getAllocatedAddress(),
- RedCG.getSharedLValue(Count),
+ RedCG.getSharedLValue(Count).getAddress(*this),
[&Emission](CodeGenFunction &CGF) {
CGF.EmitAutoVarInit(Emission);
return true;
@@ -1251,8 +1240,8 @@ void CodeGenFunction::EmitOMPReductionClauseInit(
EmitAutoVarCleanups(Emission);
Address BaseAddr = RedCG.adjustPrivateAddress(
*this, Count, Emission.getAllocatedAddress());
- bool IsRegistered = PrivateScope.addPrivate(
- RedCG.getBaseDecl(Count), [BaseAddr]() { return BaseAddr; });
+ bool IsRegistered =
+ PrivateScope.addPrivate(RedCG.getBaseDecl(Count), BaseAddr);
assert(IsRegistered && "private var already registered as private");
// Silence the warning about unused variable.
(void)IsRegistered;
@@ -1264,23 +1253,18 @@ void CodeGenFunction::EmitOMPReductionClauseInit(
if (isaOMPArraySectionExpr && Type->isVariablyModifiedType()) {
// Store the address of the original variable associated with the LHS
// implicit variable.
- PrivateScope.addPrivate(LHSVD, [&RedCG, Count, this]() {
- return RedCG.getSharedLValue(Count).getAddress(*this);
- });
- PrivateScope.addPrivate(
- RHSVD, [this, PrivateVD]() { return GetAddrOfLocalVar(PrivateVD); });
+ PrivateScope.addPrivate(LHSVD,
+ RedCG.getSharedLValue(Count).getAddress(*this));
+ PrivateScope.addPrivate(RHSVD, GetAddrOfLocalVar(PrivateVD));
} else if ((isaOMPArraySectionExpr && Type->isScalarType()) ||
isa<ArraySubscriptExpr>(IRef)) {
// Store the address of the original variable associated with the LHS
// implicit variable.
- PrivateScope.addPrivate(LHSVD, [&RedCG, Count, this]() {
- return RedCG.getSharedLValue(Count).getAddress(*this);
- });
- PrivateScope.addPrivate(RHSVD, [this, PrivateVD, RHSVD]() {
- return Builder.CreateElementBitCast(GetAddrOfLocalVar(PrivateVD),
- ConvertTypeForMem(RHSVD->getType()),
- "rhs.begin");
- });
+ PrivateScope.addPrivate(LHSVD,
+ RedCG.getSharedLValue(Count).getAddress(*this));
+ PrivateScope.addPrivate(RHSVD,
+ GetAddrOfLocalVar(PrivateVD).withElementType(
+ ConvertTypeForMem(RHSVD->getType())));
} else {
QualType Type = PrivateVD->getType();
bool IsArray = getContext().getAsArrayType(Type) != nullptr;
@@ -1288,18 +1272,14 @@ void CodeGenFunction::EmitOMPReductionClauseInit(
// Store the address of the original variable associated with the LHS
// implicit variable.
if (IsArray) {
- OriginalAddr = Builder.CreateElementBitCast(
- OriginalAddr, ConvertTypeForMem(LHSVD->getType()), "lhs.begin");
+ OriginalAddr =
+ OriginalAddr.withElementType(ConvertTypeForMem(LHSVD->getType()));
}
- PrivateScope.addPrivate(LHSVD, [OriginalAddr]() { return OriginalAddr; });
+ PrivateScope.addPrivate(LHSVD, OriginalAddr);
PrivateScope.addPrivate(
- RHSVD, [this, PrivateVD, RHSVD, IsArray]() {
- return IsArray
- ? Builder.CreateElementBitCast(
- GetAddrOfLocalVar(PrivateVD),
- ConvertTypeForMem(RHSVD->getType()), "rhs.begin")
- : GetAddrOfLocalVar(PrivateVD);
- });
+ RHSVD, IsArray ? GetAddrOfLocalVar(PrivateVD).withElementType(
+ ConvertTypeForMem(RHSVD->getType()))
+ : GetAddrOfLocalVar(PrivateVD));
}
++ILHS;
++IRHS;
@@ -1363,6 +1343,7 @@ void CodeGenFunction::EmitOMPReductionClauseInit(
case OMPD_parallel_for_simd:
case OMPD_task:
case OMPD_taskyield:
+ case OMPD_error:
case OMPD_barrier:
case OMPD_taskwait:
case OMPD_taskgroup:
@@ -1557,14 +1538,15 @@ static void emitCommonOMPParallelDirective(
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
const CodeGenBoundParametersTy &CodeGenBoundParameters) {
const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel);
+ llvm::Value *NumThreads = nullptr;
llvm::Function *OutlinedFn =
CGF.CGM.getOpenMPRuntime().emitParallelOutlinedFunction(
- S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen);
+ CGF, S, *CS->getCapturedDecl()->param_begin(), InnermostKind,
+ CodeGen);
if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>()) {
CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF);
- llvm::Value *NumThreads =
- CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(),
- /*IgnoreResultAssign=*/true);
+ NumThreads = CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(),
+ /*IgnoreResultAssign=*/true);
CGF.CGM.getOpenMPRuntime().emitNumThreadsClause(
CGF, NumThreads, NumThreadsClause->getBeginLoc());
}
@@ -1591,7 +1573,7 @@ static void emitCommonOMPParallelDirective(
CodeGenBoundParameters(CGF, S, CapturedVars);
CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
CGF.CGM.getOpenMPRuntime().emitParallelCall(CGF, S.getBeginLoc(), OutlinedFn,
- CapturedVars, IfCond);
+ CapturedVars, IfCond, NumThreads);
}
static bool isAllocatableDecl(const VarDecl *VD) {
@@ -1609,6 +1591,19 @@ static void emitEmptyBoundParameters(CodeGenFunction &,
const OMPExecutableDirective &,
llvm::SmallVectorImpl<llvm::Value *> &) {}
+static void emitOMPCopyinClause(CodeGenFunction &CGF,
+ const OMPExecutableDirective &S) {
+ bool Copyins = CGF.EmitOMPCopyinClause(S);
+ if (Copyins) {
+ // Emit implicit barrier to synchronize threads and avoid data races on
+ // propagation master's thread values of threadprivate variables to local
+ // instances of that variables of all other implicit threads.
+ CGF.CGM.getOpenMPRuntime().emitBarrierCall(
+ CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false,
+ /*ForceSimpleCall=*/true);
+ }
+}
+
Address CodeGenFunction::OMPBuilderCBHelpers::getAddressOfLocalVariable(
CodeGenFunction &CGF, const VarDecl *VD) {
CodeGenModule &CGM = CGF.CGM;
@@ -1656,7 +1651,7 @@ Address CodeGenFunction::OMPBuilderCBHelpers::getAddressOfLocalVariable(
Addr,
CGF.ConvertTypeForMem(CGM.getContext().getPointerType(CVD->getType())),
getNameWithSeparators({CVD->getName(), ".addr"}, ".", "."));
- return Address(Addr, Align);
+ return Address(Addr, CGF.ConvertTypeForMem(CVD->getType()), Align);
}
Address CodeGenFunction::OMPBuilderCBHelpers::getAddrOfThreadPrivate(
@@ -1679,7 +1674,7 @@ Address CodeGenFunction::OMPBuilderCBHelpers::getAddrOfThreadPrivate(
llvm::CallInst *ThreadPrivateCacheCall =
OMPBuilder.createCachedThreadPrivate(CGF.Builder, Data, Size, CacheName);
- return Address(ThreadPrivateCacheCall, VDAddr.getAlignment());
+ return Address(ThreadPrivateCacheCall, CGM.Int8Ty, VDAddr.getAlignment());
}
std::string CodeGenFunction::OMPBuilderCBHelpers::getNameWithSeparators(
@@ -1693,6 +1688,41 @@ std::string CodeGenFunction::OMPBuilderCBHelpers::getNameWithSeparators(
}
return OS.str().str();
}
+
+void CodeGenFunction::OMPBuilderCBHelpers::EmitOMPInlinedRegionBody(
+ CodeGenFunction &CGF, const Stmt *RegionBodyStmt, InsertPointTy AllocaIP,
+ InsertPointTy CodeGenIP, Twine RegionName) {
+ CGBuilderTy &Builder = CGF.Builder;
+ Builder.restoreIP(CodeGenIP);
+ llvm::BasicBlock *FiniBB = splitBBWithSuffix(Builder, /*CreateBranch=*/false,
+ "." + RegionName + ".after");
+
+ {
+ OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(CGF, AllocaIP, *FiniBB);
+ CGF.EmitStmt(RegionBodyStmt);
+ }
+
+ if (Builder.saveIP().isSet())
+ Builder.CreateBr(FiniBB);
+}
+
+void CodeGenFunction::OMPBuilderCBHelpers::EmitOMPOutlinedRegionBody(
+ CodeGenFunction &CGF, const Stmt *RegionBodyStmt, InsertPointTy AllocaIP,
+ InsertPointTy CodeGenIP, Twine RegionName) {
+ CGBuilderTy &Builder = CGF.Builder;
+ Builder.restoreIP(CodeGenIP);
+ llvm::BasicBlock *FiniBB = splitBBWithSuffix(Builder, /*CreateBranch=*/false,
+ "." + RegionName + ".after");
+
+ {
+ OMPBuilderCBHelpers::OutlinedRegionBodyRAII IRB(CGF, AllocaIP, *FiniBB);
+ CGF.EmitStmt(RegionBodyStmt);
+ }
+
+ if (Builder.saveIP().isSet())
+ Builder.CreateBr(FiniBB);
+}
+
void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) {
if (CGM.getLangOpts().OpenMPIRBuilder) {
llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
@@ -1735,13 +1765,10 @@ void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) {
const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel);
const Stmt *ParallelRegionBodyStmt = CS->getCapturedStmt();
- auto BodyGenCB = [ParallelRegionBodyStmt,
- this](InsertPointTy AllocaIP, InsertPointTy CodeGenIP,
- llvm::BasicBlock &ContinuationBB) {
- OMPBuilderCBHelpers::OutlinedRegionBodyRAII ORB(*this, AllocaIP,
- ContinuationBB);
- OMPBuilderCBHelpers::EmitOMPRegionBody(*this, ParallelRegionBodyStmt,
- CodeGenIP, ContinuationBB);
+ auto BodyGenCB = [&, this](InsertPointTy AllocaIP,
+ InsertPointTy CodeGenIP) {
+ OMPBuilderCBHelpers::EmitOMPOutlinedRegionBody(
+ *this, ParallelRegionBodyStmt, AllocaIP, CodeGenIP, "parallel");
};
CGCapturedStmtInfo CGSI(*CS, CR_OpenMP);
@@ -1758,16 +1785,8 @@ void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) {
auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
Action.Enter(CGF);
OMPPrivateScope PrivateScope(CGF);
- bool Copyins = CGF.EmitOMPCopyinClause(S);
+ emitOMPCopyinClause(CGF, S);
(void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
- if (Copyins) {
- // Emit implicit barrier to synchronize threads and avoid data races on
- // propagation master's thread values of threadprivate variables to local
- // instances of that variables of all other implicit threads.
- CGF.CGM.getOpenMPRuntime().emitBarrierCall(
- CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false,
- /*ForceSimpleCall=*/true);
- }
CGF.EmitOMPPrivateClause(S, PrivateScope);
CGF.EmitOMPReductionClauseInit(S, PrivateScope);
(void)PrivateScope.Privatize();
@@ -1786,6 +1805,10 @@ void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) {
checkForLastprivateConditionalUpdate(*this, S);
}
+void CodeGenFunction::EmitOMPMetaDirective(const OMPMetaDirective &S) {
+ EmitStmt(S.getIfStmt());
+}
+
namespace {
/// RAII to handle scopes for loop transformation directives.
class OMPTransformDirectiveScopeRAII {
@@ -1793,6 +1816,11 @@ class OMPTransformDirectiveScopeRAII {
CodeGenFunction::CGCapturedStmtInfo *CGSI = nullptr;
CodeGenFunction::CGCapturedStmtRAII *CapInfoRAII = nullptr;
+ OMPTransformDirectiveScopeRAII(const OMPTransformDirectiveScopeRAII &) =
+ delete;
+ OMPTransformDirectiveScopeRAII &
+ operator=(const OMPTransformDirectiveScopeRAII &) = delete;
+
public:
OMPTransformDirectiveScopeRAII(CodeGenFunction &CGF, const Stmt *S) {
if (const auto *Dir = dyn_cast<OMPLoopBasedDirective>(S)) {
@@ -1827,9 +1855,7 @@ static void emitBody(CodeGenFunction &CGF, const Stmt *S, const Stmt *NextLoop,
return;
}
if (SimplifiedS == NextLoop) {
- if (auto *Dir = dyn_cast<OMPTileDirective>(SimplifiedS))
- SimplifiedS = Dir->getTransformedStmt();
- if (auto *Dir = dyn_cast<OMPUnrollDirective>(SimplifiedS))
+ if (auto *Dir = dyn_cast<OMPLoopTransformationDirective>(SimplifiedS))
SimplifiedS = Dir->getTransformedStmt();
if (const auto *CanonLoop = dyn_cast<OMPCanonicalLoop>(SimplifiedS))
SimplifiedS = CanonLoop->getLoopStmt();
@@ -1953,11 +1979,27 @@ llvm::CanonicalLoopInfo *
CodeGenFunction::EmitOMPCollapsedCanonicalLoopNest(const Stmt *S, int Depth) {
assert(Depth == 1 && "Nested loops with OpenMPIRBuilder not yet implemented");
+ // The caller is processing the loop-associated directive processing the \p
+ // Depth loops nested in \p S. Put the previous pending loop-associated
+ // directive to the stack. If the current loop-associated directive is a loop
+ // transformation directive, it will push its generated loops onto the stack
+ // such that together with the loops left here they form the combined loop
+ // nest for the parent loop-associated directive.
+ int ParentExpectedOMPLoopDepth = ExpectedOMPLoopDepth;
+ ExpectedOMPLoopDepth = Depth;
+
EmitStmt(S);
assert(OMPLoopNestStack.size() >= (size_t)Depth && "Found too few loops");
// The last added loop is the outermost one.
- return OMPLoopNestStack.back();
+ llvm::CanonicalLoopInfo *Result = OMPLoopNestStack.back();
+
+ // Pop the \p Depth loops requested by the call from that stack and restore
+ // the previous context.
+ OMPLoopNestStack.pop_back_n(Depth);
+ ExpectedOMPLoopDepth = ParentExpectedOMPLoopDepth;
+
+ return Result;
}
void CodeGenFunction::EmitOMPCanonicalLoop(const OMPCanonicalLoop *S) {
@@ -2113,9 +2155,10 @@ bool CodeGenFunction::EmitOMPLinearClauseInit(const OMPLoopDirective &D) {
CapturedStmtInfo->lookup(OrigVD) != nullptr,
VD->getInit()->getType(), VK_LValue,
VD->getInit()->getExprLoc());
- EmitExprAsInit(&DRE, VD, MakeAddrLValue(Emission.getAllocatedAddress(),
- VD->getType()),
- /*capturedByInit=*/false);
+ EmitExprAsInit(
+ &DRE, VD,
+ MakeAddrLValue(Emission.getAllocatedAddress(), VD->getType()),
+ /*capturedByInit=*/false);
EmitAutoVarCleanups(Emission);
} else {
EmitVarDecl(*VD);
@@ -2159,7 +2202,7 @@ void CodeGenFunction::EmitOMPLinearClauseFinal(
(*IC)->getType(), VK_LValue, (*IC)->getExprLoc());
Address OrigAddr = EmitLValue(&DRE).getAddress(*this);
CodeGenFunction::OMPPrivateScope VarScope(*this);
- VarScope.addPrivate(OrigVD, [OrigAddr]() { return OrigAddr; });
+ VarScope.addPrivate(OrigVD, OrigAddr);
(void)VarScope.Privatize();
EmitIgnoredExpr(F);
++IC;
@@ -2218,21 +2261,15 @@ void CodeGenFunction::EmitOMPPrivateLoopCounters(
AutoVarEmission VarEmission = EmitAutoVarAlloca(*PrivateVD);
EmitAutoVarCleanups(VarEmission);
LocalDeclMap.erase(PrivateVD);
- (void)LoopScope.addPrivate(VD, [&VarEmission]() {
- return VarEmission.getAllocatedAddress();
- });
+ (void)LoopScope.addPrivate(VD, VarEmission.getAllocatedAddress());
if (LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD) ||
VD->hasGlobalStorage()) {
- (void)LoopScope.addPrivate(PrivateVD, [this, VD, E]() {
- DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(VD),
- LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD),
- E->getType(), VK_LValue, E->getExprLoc());
- return EmitLValue(&DRE).getAddress(*this);
- });
+ DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(VD),
+ LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD),
+ E->getType(), VK_LValue, E->getExprLoc());
+ (void)LoopScope.addPrivate(PrivateVD, EmitLValue(&DRE).getAddress(*this));
} else {
- (void)LoopScope.addPrivate(PrivateVD, [&VarEmission]() {
- return VarEmission.getAllocatedAddress();
- });
+ (void)LoopScope.addPrivate(PrivateVD, VarEmission.getAllocatedAddress());
}
++I;
}
@@ -2247,9 +2284,8 @@ void CodeGenFunction::EmitOMPPrivateLoopCounters(
// Override only those variables that can be captured to avoid re-emission
// of the variables declared within the loops.
if (DRE->refersToEnclosingVariableOrCapture()) {
- (void)LoopScope.addPrivate(VD, [this, DRE, VD]() {
- return CreateMemTemp(DRE->getType(), VD->getName());
- });
+ (void)LoopScope.addPrivate(
+ VD, CreateMemTemp(DRE->getType(), VD->getName()));
}
}
}
@@ -2272,7 +2308,7 @@ static void emitPreCond(CodeGenFunction &CGF, const OMPLoopDirective &S,
// Create temp loop control variables with their init values to support
// non-rectangular loops.
CodeGenFunction::OMPMapVars PreCondVars;
- for (const Expr * E: S.dependent_counters()) {
+ for (const Expr *E : S.dependent_counters()) {
if (!E)
continue;
assert(!E->getType().getNonReferenceType()->isRecordType() &&
@@ -2312,11 +2348,10 @@ void CodeGenFunction::EmitOMPLinearClause(
const auto *PrivateVD =
cast<VarDecl>(cast<DeclRefExpr>(*CurPrivate)->getDecl());
if (!SIMDLCVs.count(VD->getCanonicalDecl())) {
- bool IsRegistered = PrivateScope.addPrivate(VD, [this, PrivateVD]() {
- // Emit private VarDecl with copy init.
- EmitVarDecl(*PrivateVD);
- return GetAddrOfLocalVar(PrivateVD);
- });
+ // Emit private VarDecl with copy init.
+ EmitVarDecl(*PrivateVD);
+ bool IsRegistered =
+ PrivateScope.addPrivate(VD, GetAddrOfLocalVar(PrivateVD));
assert(IsRegistered && "linear var already registered as private");
// Silence the warning about unused variable.
(void)IsRegistered;
@@ -2407,7 +2442,7 @@ void CodeGenFunction::EmitOMPSimdFinal(
OrigAddr = EmitLValue(&DRE).getAddress(*this);
}
OMPPrivateScope VarScope(*this);
- VarScope.addPrivate(OrigVD, [OrigAddr]() { return OrigAddr; });
+ VarScope.addPrivate(OrigVD, OrigAddr);
(void)VarScope.Privatize();
EmitIgnoredExpr(F);
}
@@ -2523,9 +2558,9 @@ static void emitOMPSimdRegion(CodeGenFunction &CGF, const OMPLoopDirective &S,
(void)CGF.EmitOMPLinearClauseInit(S);
{
CodeGenFunction::OMPPrivateScope LoopScope(CGF);
+ CGF.EmitOMPPrivateClause(S, LoopScope);
CGF.EmitOMPPrivateLoopCounters(S, LoopScope);
CGF.EmitOMPLinearClause(S, LoopScope);
- CGF.EmitOMPPrivateClause(S, LoopScope);
CGF.EmitOMPReductionClauseInit(S, LoopScope);
CGOpenMPRuntime::LastprivateConditionalRAII LPCRegion(
CGF, S, CGF.EmitLValue(S.getIterationVariable()));
@@ -2555,8 +2590,9 @@ static void emitOMPSimdRegion(CodeGenFunction &CGF, const OMPLoopDirective &S,
CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_simd);
emitPostUpdateForReductionClause(CGF, S,
[](CodeGenFunction &) { return nullptr; });
+ LoopScope.restoreMap();
+ CGF.EmitOMPLinearClauseFinal(S, [](CodeGenFunction &) { return nullptr; });
}
- CGF.EmitOMPLinearClauseFinal(S, [](CodeGenFunction &) { return nullptr; });
// Emit: if (PreCond) - end.
if (ContBlock) {
CGF.EmitBranch(ContBlock);
@@ -2564,7 +2600,125 @@ static void emitOMPSimdRegion(CodeGenFunction &CGF, const OMPLoopDirective &S,
}
}
+static bool isSupportedByOpenMPIRBuilder(const OMPSimdDirective &S) {
+ // Check for unsupported clauses
+ for (OMPClause *C : S.clauses()) {
+ // Currently only order, simdlen and safelen clauses are supported
+ if (!(isa<OMPSimdlenClause>(C) || isa<OMPSafelenClause>(C) ||
+ isa<OMPOrderClause>(C) || isa<OMPAlignedClause>(C)))
+ return false;
+ }
+
+ // Check if we have a statement with the ordered directive.
+ // Visit the statement hierarchy to find a compound statement
+ // with a ordered directive in it.
+ if (const auto *CanonLoop = dyn_cast<OMPCanonicalLoop>(S.getRawStmt())) {
+ if (const Stmt *SyntacticalLoop = CanonLoop->getLoopStmt()) {
+ for (const Stmt *SubStmt : SyntacticalLoop->children()) {
+ if (!SubStmt)
+ continue;
+ if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(SubStmt)) {
+ for (const Stmt *CSSubStmt : CS->children()) {
+ if (!CSSubStmt)
+ continue;
+ if (isa<OMPOrderedDirective>(CSSubStmt)) {
+ return false;
+ }
+ }
+ }
+ }
+ }
+ }
+ return true;
+}
+static llvm::MapVector<llvm::Value *, llvm::Value *>
+GetAlignedMapping(const OMPSimdDirective &S, CodeGenFunction &CGF) {
+ llvm::MapVector<llvm::Value *, llvm::Value *> AlignedVars;
+ for (const auto *Clause : S.getClausesOfKind<OMPAlignedClause>()) {
+ llvm::APInt ClauseAlignment(64, 0);
+ if (const Expr *AlignmentExpr = Clause->getAlignment()) {
+ auto *AlignmentCI =
+ cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AlignmentExpr));
+ ClauseAlignment = AlignmentCI->getValue();
+ }
+ for (const Expr *E : Clause->varlists()) {
+ llvm::APInt Alignment(ClauseAlignment);
+ if (Alignment == 0) {
+ // OpenMP [2.8.1, Description]
+ // If no optional parameter is specified, implementation-defined default
+ // alignments for SIMD instructions on the target platforms are assumed.
+ Alignment =
+ CGF.getContext()
+ .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign(
+ E->getType()->getPointeeType()))
+ .getQuantity();
+ }
+ assert((Alignment == 0 || Alignment.isPowerOf2()) &&
+ "alignment is not power of 2");
+ llvm::Value *PtrValue = CGF.EmitScalarExpr(E);
+ AlignedVars[PtrValue] = CGF.Builder.getInt64(Alignment.getSExtValue());
+ }
+ }
+ return AlignedVars;
+}
+
void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) {
+ bool UseOMPIRBuilder =
+ CGM.getLangOpts().OpenMPIRBuilder && isSupportedByOpenMPIRBuilder(S);
+ if (UseOMPIRBuilder) {
+ auto &&CodeGenIRBuilder = [this, &S, UseOMPIRBuilder](CodeGenFunction &CGF,
+ PrePostActionTy &) {
+ // Use the OpenMPIRBuilder if enabled.
+ if (UseOMPIRBuilder) {
+ llvm::MapVector<llvm::Value *, llvm::Value *> AlignedVars =
+ GetAlignedMapping(S, CGF);
+ // Emit the associated statement and get its loop representation.
+ const Stmt *Inner = S.getRawStmt();
+ llvm::CanonicalLoopInfo *CLI =
+ EmitOMPCollapsedCanonicalLoopNest(Inner, 1);
+
+ llvm::OpenMPIRBuilder &OMPBuilder =
+ CGM.getOpenMPRuntime().getOMPBuilder();
+ // Add SIMD specific metadata
+ llvm::ConstantInt *Simdlen = nullptr;
+ if (const auto *C = S.getSingleClause<OMPSimdlenClause>()) {
+ RValue Len =
+ this->EmitAnyExpr(C->getSimdlen(), AggValueSlot::ignored(),
+ /*ignoreResult=*/true);
+ auto *Val = cast<llvm::ConstantInt>(Len.getScalarVal());
+ Simdlen = Val;
+ }
+ llvm::ConstantInt *Safelen = nullptr;
+ if (const auto *C = S.getSingleClause<OMPSafelenClause>()) {
+ RValue Len =
+ this->EmitAnyExpr(C->getSafelen(), AggValueSlot::ignored(),
+ /*ignoreResult=*/true);
+ auto *Val = cast<llvm::ConstantInt>(Len.getScalarVal());
+ Safelen = Val;
+ }
+ llvm::omp::OrderKind Order = llvm::omp::OrderKind::OMP_ORDER_unknown;
+ if (const auto *C = S.getSingleClause<OMPOrderClause>()) {
+ if (C->getKind() == OpenMPOrderClauseKind ::OMPC_ORDER_concurrent) {
+ Order = llvm::omp::OrderKind::OMP_ORDER_concurrent;
+ }
+ }
+ // Add simd metadata to the collapsed loop. Do not generate
+ // another loop for if clause. Support for if clause is done earlier.
+ OMPBuilder.applySimd(CLI, AlignedVars,
+ /*IfCond*/ nullptr, Order, Simdlen, Safelen);
+ return;
+ }
+ };
+ {
+ auto LPCRegion =
+ CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
+ OMPLexicalScope Scope(*this, S, OMPD_unknown);
+ CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd,
+ CodeGenIRBuilder);
+ }
+ return;
+ }
+
ParentLoopDirectiveForScanRegion ScanRegion(*this, S);
OMPFirstScanLoop = true;
auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
@@ -2587,6 +2741,46 @@ void CodeGenFunction::EmitOMPTileDirective(const OMPTileDirective &S) {
}
void CodeGenFunction::EmitOMPUnrollDirective(const OMPUnrollDirective &S) {
+ bool UseOMPIRBuilder = CGM.getLangOpts().OpenMPIRBuilder;
+
+ if (UseOMPIRBuilder) {
+ auto DL = SourceLocToDebugLoc(S.getBeginLoc());
+ const Stmt *Inner = S.getRawStmt();
+
+ // Consume nested loop. Clear the entire remaining loop stack because a
+ // fully unrolled loop is non-transformable. For partial unrolling the
+ // generated outer loop is pushed back to the stack.
+ llvm::CanonicalLoopInfo *CLI = EmitOMPCollapsedCanonicalLoopNest(Inner, 1);
+ OMPLoopNestStack.clear();
+
+ llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
+
+ bool NeedsUnrolledCLI = ExpectedOMPLoopDepth >= 1;
+ llvm::CanonicalLoopInfo *UnrolledCLI = nullptr;
+
+ if (S.hasClausesOfKind<OMPFullClause>()) {
+ assert(ExpectedOMPLoopDepth == 0);
+ OMPBuilder.unrollLoopFull(DL, CLI);
+ } else if (auto *PartialClause = S.getSingleClause<OMPPartialClause>()) {
+ uint64_t Factor = 0;
+ if (Expr *FactorExpr = PartialClause->getFactor()) {
+ Factor = FactorExpr->EvaluateKnownConstInt(getContext()).getZExtValue();
+ assert(Factor >= 1 && "Only positive factors are valid");
+ }
+ OMPBuilder.unrollLoopPartial(DL, CLI, Factor,
+ NeedsUnrolledCLI ? &UnrolledCLI : nullptr);
+ } else {
+ OMPBuilder.unrollLoopHeuristic(DL, CLI);
+ }
+
+ assert((!NeedsUnrolledCLI || UnrolledCLI) &&
+ "NeedsUnrolledCLI implies UnrolledCLI to be set");
+ if (UnrolledCLI)
+ OMPLoopNestStack.push_back(UnrolledCLI);
+
+ return;
+ }
+
// This function is only called if the unrolled loop is not consumed by any
// other loop-associated construct. Such a loop-associated construct will have
// used the transformed AST.
@@ -2732,12 +2926,10 @@ void CodeGenFunction::EmitOMPForOuterLoop(
CGOpenMPRuntime &RT = CGM.getOpenMPRuntime();
// Dynamic scheduling of the outer loop (dynamic, guided, auto, runtime).
- const bool DynamicOrOrdered =
- Ordered || RT.isDynamic(ScheduleKind.Schedule);
+ const bool DynamicOrOrdered = Ordered || RT.isDynamic(ScheduleKind.Schedule);
- assert((Ordered ||
- !RT.isStaticNonchunked(ScheduleKind.Schedule,
- LoopArgs.Chunk != nullptr)) &&
+ assert((Ordered || !RT.isStaticNonchunked(ScheduleKind.Schedule,
+ LoopArgs.Chunk != nullptr)) &&
"static non-chunked schedule does not need outer loop");
// Emit outer loop.
@@ -3057,15 +3249,15 @@ void CodeGenFunction::EmitOMPTargetSimdDirective(
}
namespace {
- struct ScheduleKindModifiersTy {
- OpenMPScheduleClauseKind Kind;
- OpenMPScheduleClauseModifier M1;
- OpenMPScheduleClauseModifier M2;
- ScheduleKindModifiersTy(OpenMPScheduleClauseKind Kind,
- OpenMPScheduleClauseModifier M1,
- OpenMPScheduleClauseModifier M2)
- : Kind(Kind), M1(M1), M2(M2) {}
- };
+struct ScheduleKindModifiersTy {
+ OpenMPScheduleClauseKind Kind;
+ OpenMPScheduleClauseModifier M1;
+ OpenMPScheduleClauseModifier M2;
+ ScheduleKindModifiersTy(OpenMPScheduleClauseKind Kind,
+ OpenMPScheduleClauseModifier M1,
+ OpenMPScheduleClauseModifier M2)
+ : Kind(Kind), M1(M1), M2(M2) {}
+};
} // namespace
bool CodeGenFunction::EmitOMPWorksharingLoop(
@@ -3185,8 +3377,10 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(
// If the static schedule kind is specified or if the ordered clause is
// specified, and if no monotonic modifier is specified, the effect will
// be as if the monotonic modifier was specified.
- bool StaticChunkedOne = RT.isStaticChunked(ScheduleKind.Schedule,
- /* Chunked */ Chunk != nullptr) && HasChunkSizeOne &&
+ bool StaticChunkedOne =
+ RT.isStaticChunked(ScheduleKind.Schedule,
+ /* Chunked */ Chunk != nullptr) &&
+ HasChunkSizeOne &&
isOpenMPLoopBoundSharingDirective(S.getDirectiveKind());
bool IsMonotonic =
Ordered ||
@@ -3291,11 +3485,12 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(
EmitOMPLastprivateClauseFinal(
S, isOpenMPSimdDirective(S.getDirectiveKind()),
Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getBeginLoc())));
+ LoopScope.restoreMap();
+ EmitOMPLinearClauseFinal(S, [IL, &S](CodeGenFunction &CGF) {
+ return CGF.Builder.CreateIsNotNull(
+ CGF.EmitLoadOfScalar(IL, S.getBeginLoc()));
+ });
}
- EmitOMPLinearClauseFinal(S, [IL, &S](CodeGenFunction &CGF) {
- return CGF.Builder.CreateIsNotNull(
- CGF.EmitLoadOfScalar(IL, S.getBeginLoc()));
- });
DoacrossCleanupScope.ForceCleanup();
// We're now done with the loop, so jump to the continuation block.
if (ContBlock) {
@@ -3388,6 +3583,57 @@ static void emitScanBasedDirectiveDecls(
}
}
+/// Copies final inscan reductions values to the original variables.
+/// The code is the following:
+/// \code
+/// <orig_var> = buffer[num_iters-1];
+/// \endcode
+static void emitScanBasedDirectiveFinals(
+ CodeGenFunction &CGF, const OMPLoopDirective &S,
+ llvm::function_ref<llvm::Value *(CodeGenFunction &)> NumIteratorsGen) {
+ llvm::Value *OMPScanNumIterations = CGF.Builder.CreateIntCast(
+ NumIteratorsGen(CGF), CGF.SizeTy, /*isSigned=*/false);
+ SmallVector<const Expr *, 4> Shareds;
+ SmallVector<const Expr *, 4> LHSs;
+ SmallVector<const Expr *, 4> RHSs;
+ SmallVector<const Expr *, 4> Privates;
+ SmallVector<const Expr *, 4> CopyOps;
+ SmallVector<const Expr *, 4> CopyArrayElems;
+ for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) {
+ assert(C->getModifier() == OMPC_REDUCTION_inscan &&
+ "Only inscan reductions are expected.");
+ Shareds.append(C->varlist_begin(), C->varlist_end());
+ LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
+ RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
+ Privates.append(C->privates().begin(), C->privates().end());
+ CopyOps.append(C->copy_ops().begin(), C->copy_ops().end());
+ CopyArrayElems.append(C->copy_array_elems().begin(),
+ C->copy_array_elems().end());
+ }
+ // Create temp var and copy LHS value to this temp value.
+ // LHS = TMP[LastIter];
+ llvm::Value *OMPLast = CGF.Builder.CreateNSWSub(
+ OMPScanNumIterations,
+ llvm::ConstantInt::get(CGF.SizeTy, 1, /*isSigned=*/false));
+ for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) {
+ const Expr *PrivateExpr = Privates[I];
+ const Expr *OrigExpr = Shareds[I];
+ const Expr *CopyArrayElem = CopyArrayElems[I];
+ CodeGenFunction::OpaqueValueMapping IdxMapping(
+ CGF,
+ cast<OpaqueValueExpr>(
+ cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()),
+ RValue::get(OMPLast));
+ LValue DestLVal = CGF.EmitLValue(OrigExpr);
+ LValue SrcLVal = CGF.EmitLValue(CopyArrayElem);
+ CGF.EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(CGF),
+ SrcLVal.getAddress(CGF),
+ cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
+ cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()),
+ CopyOps[I]);
+ }
+}
+
/// Emits the code for the directive with inscan reductions.
/// The code is the following:
/// \code
@@ -3496,7 +3742,7 @@ static void emitScanBasedDirective(
RValue::get(IVal));
LHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress(CGF);
}
- PrivScope.addPrivate(LHSVD, [LHSAddr]() { return LHSAddr; });
+ PrivScope.addPrivate(LHSVD, LHSAddr);
Address RHSAddr = Address::invalid();
{
llvm::Value *OffsetIVal = CGF.Builder.CreateNUWSub(IVal, Pow2K);
@@ -3507,7 +3753,7 @@ static void emitScanBasedDirective(
RValue::get(OffsetIVal));
RHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress(CGF);
}
- PrivScope.addPrivate(RHSVD, [RHSAddr]() { return RHSAddr; });
+ PrivScope.addPrivate(RHSVD, RHSAddr);
++ILHS;
++IRHS;
}
@@ -3582,6 +3828,8 @@ static bool emitWorksharingDirective(CodeGenFunction &CGF,
if (!isOpenMPParallelDirective(S.getDirectiveKind()))
emitScanBasedDirectiveDecls(CGF, S, NumIteratorsGen);
emitScanBasedDirective(CGF, S, NumIteratorsGen, FirstGen, SecondGen);
+ if (!isOpenMPParallelDirective(S.getDirectiveKind()))
+ emitScanBasedDirectiveFinals(CGF, S, NumIteratorsGen);
} else {
CodeGenFunction::OMPCancelStackRAII CancelRegion(CGF, S.getDirectiveKind(),
HasCancel);
@@ -3595,13 +3843,52 @@ static bool emitWorksharingDirective(CodeGenFunction &CGF,
static bool isSupportedByOpenMPIRBuilder(const OMPForDirective &S) {
if (S.hasCancel())
return false;
- for (OMPClause *C : S.clauses())
- if (!isa<OMPNowaitClause>(C))
- return false;
+ for (OMPClause *C : S.clauses()) {
+ if (isa<OMPNowaitClause>(C))
+ continue;
+
+ if (auto *SC = dyn_cast<OMPScheduleClause>(C)) {
+ if (SC->getFirstScheduleModifier() != OMPC_SCHEDULE_MODIFIER_unknown)
+ return false;
+ if (SC->getSecondScheduleModifier() != OMPC_SCHEDULE_MODIFIER_unknown)
+ return false;
+ switch (SC->getScheduleKind()) {
+ case OMPC_SCHEDULE_auto:
+ case OMPC_SCHEDULE_dynamic:
+ case OMPC_SCHEDULE_runtime:
+ case OMPC_SCHEDULE_guided:
+ case OMPC_SCHEDULE_static:
+ continue;
+ case OMPC_SCHEDULE_unknown:
+ return false;
+ }
+ }
+
+ return false;
+ }
return true;
}
+static llvm::omp::ScheduleKind
+convertClauseKindToSchedKind(OpenMPScheduleClauseKind ScheduleClauseKind) {
+ switch (ScheduleClauseKind) {
+ case OMPC_SCHEDULE_unknown:
+ return llvm::omp::OMP_SCHEDULE_Default;
+ case OMPC_SCHEDULE_auto:
+ return llvm::omp::OMP_SCHEDULE_Auto;
+ case OMPC_SCHEDULE_dynamic:
+ return llvm::omp::OMP_SCHEDULE_Dynamic;
+ case OMPC_SCHEDULE_guided:
+ return llvm::omp::OMP_SCHEDULE_Guided;
+ case OMPC_SCHEDULE_runtime:
+ return llvm::omp::OMP_SCHEDULE_Runtime;
+ case OMPC_SCHEDULE_static:
+ return llvm::omp::OMP_SCHEDULE_Static;
+ }
+ llvm_unreachable("Unhandled schedule kind");
+}
+
void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) {
bool HasLastprivates = false;
bool UseOMPIRBuilder =
@@ -3610,17 +3897,31 @@ void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) {
UseOMPIRBuilder](CodeGenFunction &CGF, PrePostActionTy &) {
// Use the OpenMPIRBuilder if enabled.
if (UseOMPIRBuilder) {
+ bool NeedsBarrier = !S.getSingleClause<OMPNowaitClause>();
+
+ llvm::omp::ScheduleKind SchedKind = llvm::omp::OMP_SCHEDULE_Default;
+ llvm::Value *ChunkSize = nullptr;
+ if (auto *SchedClause = S.getSingleClause<OMPScheduleClause>()) {
+ SchedKind =
+ convertClauseKindToSchedKind(SchedClause->getScheduleKind());
+ if (const Expr *ChunkSizeExpr = SchedClause->getChunkSize())
+ ChunkSize = EmitScalarExpr(ChunkSizeExpr);
+ }
+
// Emit the associated statement and get its loop representation.
const Stmt *Inner = S.getRawStmt();
llvm::CanonicalLoopInfo *CLI =
EmitOMPCollapsedCanonicalLoopNest(Inner, 1);
- bool NeedsBarrier = !S.getSingleClause<OMPNowaitClause>();
llvm::OpenMPIRBuilder &OMPBuilder =
CGM.getOpenMPRuntime().getOMPBuilder();
llvm::OpenMPIRBuilder::InsertPointTy AllocaIP(
AllocaInsertPt->getParent(), AllocaInsertPt->getIterator());
- OMPBuilder.createWorkshareLoop(Builder, CLI, AllocaIP, NeedsBarrier);
+ OMPBuilder.applyWorkshareLoop(
+ Builder.getCurrentDebugLocation(), CLI, AllocaIP, NeedsBarrier,
+ SchedKind, ChunkSize, /*HasSimdModifier=*/false,
+ /*HasMonotonicModifier=*/false, /*HasNonmonotonicModifier=*/false,
+ /*HasOrderedClause=*/false);
return;
}
@@ -3835,22 +4136,17 @@ void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &S) {
if (CS) {
for (const Stmt *SubStmt : CS->children()) {
auto SectionCB = [this, SubStmt](InsertPointTy AllocaIP,
- InsertPointTy CodeGenIP,
- llvm::BasicBlock &FiniBB) {
- OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP,
- FiniBB);
- OMPBuilderCBHelpers::EmitOMPRegionBody(*this, SubStmt, CodeGenIP,
- FiniBB);
+ InsertPointTy CodeGenIP) {
+ OMPBuilderCBHelpers::EmitOMPInlinedRegionBody(
+ *this, SubStmt, AllocaIP, CodeGenIP, "section");
};
SectionCBVector.push_back(SectionCB);
}
} else {
auto SectionCB = [this, CapturedStmt](InsertPointTy AllocaIP,
- InsertPointTy CodeGenIP,
- llvm::BasicBlock &FiniBB) {
- OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, FiniBB);
- OMPBuilderCBHelpers::EmitOMPRegionBody(*this, CapturedStmt, CodeGenIP,
- FiniBB);
+ InsertPointTy CodeGenIP) {
+ OMPBuilderCBHelpers::EmitOMPInlinedRegionBody(
+ *this, CapturedStmt, AllocaIP, CodeGenIP, "section");
};
SectionCBVector.push_back(SectionCB);
}
@@ -3903,11 +4199,9 @@ void CodeGenFunction::EmitOMPSectionDirective(const OMPSectionDirective &S) {
};
auto BodyGenCB = [SectionRegionBodyStmt, this](InsertPointTy AllocaIP,
- InsertPointTy CodeGenIP,
- llvm::BasicBlock &FiniBB) {
- OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, FiniBB);
- OMPBuilderCBHelpers::EmitOMPRegionBody(*this, SectionRegionBodyStmt,
- CodeGenIP, FiniBB);
+ InsertPointTy CodeGenIP) {
+ OMPBuilderCBHelpers::EmitOMPInlinedRegionBody(
+ *this, SectionRegionBodyStmt, AllocaIP, CodeGenIP, "section");
};
LexicalScope Scope(*this, S.getSourceRange());
@@ -3986,11 +4280,9 @@ void CodeGenFunction::EmitOMPMasterDirective(const OMPMasterDirective &S) {
};
auto BodyGenCB = [MasterRegionBodyStmt, this](InsertPointTy AllocaIP,
- InsertPointTy CodeGenIP,
- llvm::BasicBlock &FiniBB) {
- OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, FiniBB);
- OMPBuilderCBHelpers::EmitOMPRegionBody(*this, MasterRegionBodyStmt,
- CodeGenIP, FiniBB);
+ InsertPointTy CodeGenIP) {
+ OMPBuilderCBHelpers::EmitOMPInlinedRegionBody(
+ *this, MasterRegionBodyStmt, AllocaIP, CodeGenIP, "master");
};
LexicalScope Scope(*this, S.getSourceRange());
@@ -4034,11 +4326,9 @@ void CodeGenFunction::EmitOMPMaskedDirective(const OMPMaskedDirective &S) {
};
auto BodyGenCB = [MaskedRegionBodyStmt, this](InsertPointTy AllocaIP,
- InsertPointTy CodeGenIP,
- llvm::BasicBlock &FiniBB) {
- OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, FiniBB);
- OMPBuilderCBHelpers::EmitOMPRegionBody(*this, MaskedRegionBodyStmt,
- CodeGenIP, FiniBB);
+ InsertPointTy CodeGenIP) {
+ OMPBuilderCBHelpers::EmitOMPInlinedRegionBody(
+ *this, MaskedRegionBodyStmt, AllocaIP, CodeGenIP, "masked");
};
LexicalScope Scope(*this, S.getSourceRange());
@@ -4076,11 +4366,9 @@ void CodeGenFunction::EmitOMPCriticalDirective(const OMPCriticalDirective &S) {
};
auto BodyGenCB = [CriticalRegionBodyStmt, this](InsertPointTy AllocaIP,
- InsertPointTy CodeGenIP,
- llvm::BasicBlock &FiniBB) {
- OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, FiniBB);
- OMPBuilderCBHelpers::EmitOMPRegionBody(*this, CriticalRegionBodyStmt,
- CodeGenIP, FiniBB);
+ InsertPointTy CodeGenIP) {
+ OMPBuilderCBHelpers::EmitOMPInlinedRegionBody(
+ *this, CriticalRegionBodyStmt, AllocaIP, CodeGenIP, "critical");
};
LexicalScope Scope(*this, S.getSourceRange());
@@ -4112,26 +4400,29 @@ void CodeGenFunction::EmitOMPParallelForDirective(
// directives: 'parallel' with 'for' directive.
auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
Action.Enter(CGF);
+ emitOMPCopyinClause(CGF, S);
(void)emitWorksharingDirective(CGF, S, S.hasCancel());
};
{
- if (llvm::any_of(S.getClausesOfKind<OMPReductionClause>(),
+ const auto &&NumIteratorsGen = [&S](CodeGenFunction &CGF) {
+ CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF);
+ CGCapturedStmtInfo CGSI(CR_OpenMP);
+ CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGSI);
+ OMPLoopScope LoopScope(CGF, S);
+ return CGF.EmitScalarExpr(S.getNumIterations());
+ };
+ bool IsInscan = llvm::any_of(S.getClausesOfKind<OMPReductionClause>(),
[](const OMPReductionClause *C) {
return C->getModifier() == OMPC_REDUCTION_inscan;
- })) {
- const auto &&NumIteratorsGen = [&S](CodeGenFunction &CGF) {
- CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF);
- CGCapturedStmtInfo CGSI(CR_OpenMP);
- CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGSI);
- OMPLoopScope LoopScope(CGF, S);
- return CGF.EmitScalarExpr(S.getNumIterations());
- };
+ });
+ if (IsInscan)
emitScanBasedDirectiveDecls(*this, S, NumIteratorsGen);
- }
auto LPCRegion =
CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
emitCommonOMPParallelDirective(*this, S, OMPD_for, CodeGen,
emitEmptyBoundParameters);
+ if (IsInscan)
+ emitScanBasedDirectiveFinals(*this, S, NumIteratorsGen);
}
// Check for outer lastprivate conditional update.
checkForLastprivateConditionalUpdate(*this, S);
@@ -4143,26 +4434,29 @@ void CodeGenFunction::EmitOMPParallelForSimdDirective(
// directives: 'parallel' with 'for' directive.
auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
Action.Enter(CGF);
+ emitOMPCopyinClause(CGF, S);
(void)emitWorksharingDirective(CGF, S, /*HasCancel=*/false);
};
{
- if (llvm::any_of(S.getClausesOfKind<OMPReductionClause>(),
+ const auto &&NumIteratorsGen = [&S](CodeGenFunction &CGF) {
+ CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF);
+ CGCapturedStmtInfo CGSI(CR_OpenMP);
+ CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGSI);
+ OMPLoopScope LoopScope(CGF, S);
+ return CGF.EmitScalarExpr(S.getNumIterations());
+ };
+ bool IsInscan = llvm::any_of(S.getClausesOfKind<OMPReductionClause>(),
[](const OMPReductionClause *C) {
return C->getModifier() == OMPC_REDUCTION_inscan;
- })) {
- const auto &&NumIteratorsGen = [&S](CodeGenFunction &CGF) {
- CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF);
- CGCapturedStmtInfo CGSI(CR_OpenMP);
- CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGSI);
- OMPLoopScope LoopScope(CGF, S);
- return CGF.EmitScalarExpr(S.getNumIterations());
- };
+ });
+ if (IsInscan)
emitScanBasedDirectiveDecls(*this, S, NumIteratorsGen);
- }
auto LPCRegion =
CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
emitCommonOMPParallelDirective(*this, S, OMPD_for_simd, CodeGen,
emitEmptyBoundParameters);
+ if (IsInscan)
+ emitScanBasedDirectiveFinals(*this, S, NumIteratorsGen);
}
// Check for outer lastprivate conditional update.
checkForLastprivateConditionalUpdate(*this, S);
@@ -4175,16 +4469,8 @@ void CodeGenFunction::EmitOMPParallelMasterDirective(
auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
Action.Enter(CGF);
OMPPrivateScope PrivateScope(CGF);
- bool Copyins = CGF.EmitOMPCopyinClause(S);
+ emitOMPCopyinClause(CGF, S);
(void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
- if (Copyins) {
- // Emit implicit barrier to synchronize threads and avoid data races on
- // propagation master's thread values of threadprivate variables to local
- // instances of that variables of all other implicit threads.
- CGF.CGM.getOpenMPRuntime().emitBarrierCall(
- CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false,
- /*ForceSimpleCall=*/true);
- }
CGF.EmitOMPPrivateClause(S, PrivateScope);
CGF.EmitOMPReductionClauseInit(S, PrivateScope);
(void)PrivateScope.Privatize();
@@ -4203,12 +4489,40 @@ void CodeGenFunction::EmitOMPParallelMasterDirective(
checkForLastprivateConditionalUpdate(*this, S);
}
+void CodeGenFunction::EmitOMPParallelMaskedDirective(
+ const OMPParallelMaskedDirective &S) {
+ // Emit directive as a combined directive that consists of two implicit
+ // directives: 'parallel' with 'masked' directive.
+ auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
+ Action.Enter(CGF);
+ OMPPrivateScope PrivateScope(CGF);
+ emitOMPCopyinClause(CGF, S);
+ (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
+ CGF.EmitOMPPrivateClause(S, PrivateScope);
+ CGF.EmitOMPReductionClauseInit(S, PrivateScope);
+ (void)PrivateScope.Privatize();
+ emitMasked(CGF, S);
+ CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel);
+ };
+ {
+ auto LPCRegion =
+ CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
+ emitCommonOMPParallelDirective(*this, S, OMPD_masked, CodeGen,
+ emitEmptyBoundParameters);
+ emitPostUpdateForReductionClause(*this, S,
+ [](CodeGenFunction &) { return nullptr; });
+ }
+ // Check for outer lastprivate conditional update.
+ checkForLastprivateConditionalUpdate(*this, S);
+}
+
void CodeGenFunction::EmitOMPParallelSectionsDirective(
const OMPParallelSectionsDirective &S) {
// Emit directive as a combined directive that consists of two implicit
// directives: 'parallel' with 'sections' directive.
auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
Action.Enter(CGF);
+ emitOMPCopyinClause(CGF, S);
CGF.EmitSections(S);
};
{
@@ -4240,10 +4554,10 @@ public:
PrivateDecls.push_back(VD);
}
}
- void VisitOMPExecutableDirective(const OMPExecutableDirective *) { return; }
- void VisitCapturedStmt(const CapturedStmt *) { return; }
- void VisitLambdaExpr(const LambdaExpr *) { return; }
- void VisitBlockExpr(const BlockExpr *) { return; }
+ void VisitOMPExecutableDirective(const OMPExecutableDirective *) {}
+ void VisitCapturedStmt(const CapturedStmt *) {}
+ void VisitLambdaExpr(const LambdaExpr *) {}
+ void VisitBlockExpr(const BlockExpr *) {}
void VisitStmt(const Stmt *S) {
if (!S)
return;
@@ -4257,6 +4571,40 @@ public:
};
} // anonymous namespace
+static void buildDependences(const OMPExecutableDirective &S,
+ OMPTaskDataTy &Data) {
+
+ // First look for 'omp_all_memory' and add this first.
+ bool OmpAllMemory = false;
+ if (llvm::any_of(
+ S.getClausesOfKind<OMPDependClause>(), [](const OMPDependClause *C) {
+ return C->getDependencyKind() == OMPC_DEPEND_outallmemory ||
+ C->getDependencyKind() == OMPC_DEPEND_inoutallmemory;
+ })) {
+ OmpAllMemory = true;
+ // Since both OMPC_DEPEND_outallmemory and OMPC_DEPEND_inoutallmemory are
+ // equivalent to the runtime, always use OMPC_DEPEND_outallmemory to
+ // simplify.
+ OMPTaskDataTy::DependData &DD =
+ Data.Dependences.emplace_back(OMPC_DEPEND_outallmemory,
+ /*IteratorExpr=*/nullptr);
+ // Add a nullptr Expr to simplify the codegen in emitDependData.
+ DD.DepExprs.push_back(nullptr);
+ }
+ // Add remaining dependences skipping any 'out' or 'inout' if they are
+ // overridden by 'omp_all_memory'.
+ for (const auto *C : S.getClausesOfKind<OMPDependClause>()) {
+ OpenMPDependClauseKind Kind = C->getDependencyKind();
+ if (Kind == OMPC_DEPEND_outallmemory || Kind == OMPC_DEPEND_inoutallmemory)
+ continue;
+ if (OmpAllMemory && (Kind == OMPC_DEPEND_out || Kind == OMPC_DEPEND_inout))
+ continue;
+ OMPTaskDataTy::DependData &DD =
+ Data.Dependences.emplace_back(C->getDependencyKind(), C->getModifier());
+ DD.DepExprs.append(C->varlist_begin(), C->varlist_end());
+ }
+}
+
void CodeGenFunction::EmitOMPTaskBasedDirective(
const OMPExecutableDirective &S, const OpenMPDirectiveKind CapturedRegion,
const RegionCodeGenTy &BodyGen, const TaskGenTy &TaskGen,
@@ -4352,11 +4700,7 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
Data.Reductions = CGM.getOpenMPRuntime().emitTaskReductionInit(
*this, S.getBeginLoc(), LHSs, RHSs, Data);
// Build list of dependences.
- for (const auto *C : S.getClausesOfKind<OMPDependClause>()) {
- OMPTaskDataTy::DependData &DD =
- Data.Dependences.emplace_back(C->getDependencyKind(), C->getModifier());
- DD.DepExprs.append(C->varlist_begin(), C->varlist_end());
- }
+ buildDependences(S, Data);
// Get list of local vars for untied tasks.
if (!Data.Tied) {
CheckVarsEscapingUntiedTaskDeclContext Checker;
@@ -4372,6 +4716,54 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
UntiedLocalVars;
// Set proper addresses for generated private copies.
OMPPrivateScope Scope(CGF);
+ // Generate debug info for variables present in shared clause.
+ if (auto *DI = CGF.getDebugInfo()) {
+ llvm::SmallDenseMap<const VarDecl *, FieldDecl *> CaptureFields =
+ CGF.CapturedStmtInfo->getCaptureFields();
+ llvm::Value *ContextValue = CGF.CapturedStmtInfo->getContextValue();
+ if (CaptureFields.size() && ContextValue) {
+ unsigned CharWidth = CGF.getContext().getCharWidth();
+ // The shared variables are packed together as members of structure.
+ // So the address of each shared variable can be computed by adding
+ // offset of it (within record) to the base address of record. For each
+ // shared variable, debug intrinsic llvm.dbg.declare is generated with
+ // appropriate expressions (DIExpression).
+ // Ex:
+ // %12 = load %struct.anon*, %struct.anon** %__context.addr.i
+ // call void @llvm.dbg.declare(metadata %struct.anon* %12,
+ // metadata !svar1,
+ // metadata !DIExpression(DW_OP_deref))
+ // call void @llvm.dbg.declare(metadata %struct.anon* %12,
+ // metadata !svar2,
+ // metadata !DIExpression(DW_OP_plus_uconst, 8, DW_OP_deref))
+ for (auto It = CaptureFields.begin(); It != CaptureFields.end(); ++It) {
+ const VarDecl *SharedVar = It->first;
+ RecordDecl *CaptureRecord = It->second->getParent();
+ const ASTRecordLayout &Layout =
+ CGF.getContext().getASTRecordLayout(CaptureRecord);
+ unsigned Offset =
+ Layout.getFieldOffset(It->second->getFieldIndex()) / CharWidth;
+ if (CGF.CGM.getCodeGenOpts().hasReducedDebugInfo())
+ (void)DI->EmitDeclareOfAutoVariable(SharedVar, ContextValue,
+ CGF.Builder, false);
+ llvm::Instruction &Last = CGF.Builder.GetInsertBlock()->back();
+ // Get the call dbg.declare instruction we just created and update
+ // its DIExpression to add offset to base address.
+ if (auto DDI = dyn_cast<llvm::DbgVariableIntrinsic>(&Last)) {
+ SmallVector<uint64_t, 8> Ops;
+ // Add offset to the base address if non zero.
+ if (Offset) {
+ Ops.push_back(llvm::dwarf::DW_OP_plus_uconst);
+ Ops.push_back(Offset);
+ }
+ Ops.push_back(llvm::dwarf::DW_OP_deref);
+ auto &Ctx = DDI->getContext();
+ llvm::DIExpression *DIExpr = llvm::DIExpression::get(Ctx, Ops);
+ Last.setOperand(2, llvm::MetadataAsValue::get(Ctx, DIExpr));
+ }
+ }
+ }
+ }
llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> FirstprivatePtrs;
if (!Data.PrivateVars.empty() || !Data.FirstprivateVars.empty() ||
!Data.LastprivateVars.empty() || !Data.PrivateLocals.empty()) {
@@ -4432,39 +4824,50 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
}
auto *CopyFnTy = llvm::FunctionType::get(CGF.Builder.getVoidTy(),
ParamTypes, /*isVarArg=*/false);
- CopyFn = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CopyFn, CopyFnTy->getPointerTo());
CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
CGF, S.getBeginLoc(), {CopyFnTy, CopyFn}, CallArgs);
for (const auto &Pair : LastprivateDstsOrigs) {
const auto *OrigVD = cast<VarDecl>(Pair.second->getDecl());
DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(OrigVD),
/*RefersToEnclosingVariableOrCapture=*/
- CGF.CapturedStmtInfo->lookup(OrigVD) != nullptr,
+ CGF.CapturedStmtInfo->lookup(OrigVD) != nullptr,
Pair.second->getType(), VK_LValue,
Pair.second->getExprLoc());
- Scope.addPrivate(Pair.first, [&CGF, &DRE]() {
- return CGF.EmitLValue(&DRE).getAddress(CGF);
- });
+ Scope.addPrivate(Pair.first, CGF.EmitLValue(&DRE).getAddress(CGF));
}
for (const auto &Pair : PrivatePtrs) {
- Address Replacement(CGF.Builder.CreateLoad(Pair.second),
- CGF.getContext().getDeclAlign(Pair.first));
- Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; });
+ Address Replacement = Address(
+ CGF.Builder.CreateLoad(Pair.second),
+ CGF.ConvertTypeForMem(Pair.first->getType().getNonReferenceType()),
+ CGF.getContext().getDeclAlign(Pair.first));
+ Scope.addPrivate(Pair.first, Replacement);
+ if (auto *DI = CGF.getDebugInfo())
+ if (CGF.CGM.getCodeGenOpts().hasReducedDebugInfo())
+ (void)DI->EmitDeclareOfAutoVariable(
+ Pair.first, Pair.second.getPointer(), CGF.Builder,
+ /*UsePointerValue*/ true);
}
// Adjust mapping for internal locals by mapping actual memory instead of
// a pointer to this memory.
for (auto &Pair : UntiedLocalVars) {
+ QualType VDType = Pair.first->getType().getNonReferenceType();
+ if (Pair.first->getType()->isLValueReferenceType())
+ VDType = CGF.getContext().getPointerType(VDType);
if (isAllocatableDecl(Pair.first)) {
llvm::Value *Ptr = CGF.Builder.CreateLoad(Pair.second.first);
- Address Replacement(Ptr, CGF.getPointerAlign());
+ Address Replacement(
+ Ptr,
+ CGF.ConvertTypeForMem(CGF.getContext().getPointerType(VDType)),
+ CGF.getPointerAlign());
Pair.second.first = Replacement;
Ptr = CGF.Builder.CreateLoad(Replacement);
- Replacement = Address(Ptr, CGF.getContext().getDeclAlign(Pair.first));
+ Replacement = Address(Ptr, CGF.ConvertTypeForMem(VDType),
+ CGF.getContext().getDeclAlign(Pair.first));
Pair.second.second = Replacement;
} else {
llvm::Value *Ptr = CGF.Builder.CreateLoad(Pair.second.first);
- Address Replacement(Ptr, CGF.getContext().getDeclAlign(Pair.first));
+ Address Replacement(Ptr, CGF.ConvertTypeForMem(VDType),
+ CGF.getContext().getDeclAlign(Pair.first));
Pair.second.first = Replacement;
}
}
@@ -4472,10 +4875,11 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
if (Data.Reductions) {
OMPPrivateScope FirstprivateScope(CGF);
for (const auto &Pair : FirstprivatePtrs) {
- Address Replacement(CGF.Builder.CreateLoad(Pair.second),
- CGF.getContext().getDeclAlign(Pair.first));
- FirstprivateScope.addPrivate(Pair.first,
- [Replacement]() { return Replacement; });
+ Address Replacement(
+ CGF.Builder.CreateLoad(Pair.second),
+ CGF.ConvertTypeForMem(Pair.first->getType().getNonReferenceType()),
+ CGF.getContext().getDeclAlign(Pair.first));
+ FirstprivateScope.addPrivate(Pair.first, Replacement);
}
(void)FirstprivateScope.Privatize();
OMPLexicalScope LexScope(CGF, S, CapturedRegion);
@@ -4499,10 +4903,10 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
CGF.getContext().getPointerType(
Data.ReductionCopies[Cnt]->getType()),
Data.ReductionCopies[Cnt]->getExprLoc()),
+ CGF.ConvertTypeForMem(Data.ReductionCopies[Cnt]->getType()),
Replacement.getAlignment());
Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement);
- Scope.addPrivate(RedCG.getBaseDecl(Cnt),
- [Replacement]() { return Replacement; });
+ Scope.addPrivate(RedCG.getBaseDecl(Cnt), Replacement);
}
}
// Privatize all private variables except for in_reduction items.
@@ -4554,10 +4958,10 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
Replacement.getPointer(), CGF.getContext().VoidPtrTy,
CGF.getContext().getPointerType(InRedPrivs[Cnt]->getType()),
InRedPrivs[Cnt]->getExprLoc()),
+ CGF.ConvertTypeForMem(InRedPrivs[Cnt]->getType()),
Replacement.getAlignment());
Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement);
- InRedScope.addPrivate(RedCG.getBaseDecl(Cnt),
- [Replacement]() { return Replacement; });
+ InRedScope.addPrivate(RedCG.getBaseDecl(Cnt), Replacement);
}
}
(void)InRedScope.Privatize();
@@ -4570,7 +4974,7 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
llvm::Function *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction(
S, *I, *PartId, *TaskT, S.getDirectiveKind(), CodeGen, Data.Tied,
Data.NumberOfParts);
- OMPLexicalScope Scope(*this, S, llvm::None,
+ OMPLexicalScope Scope(*this, S, std::nullopt,
!isOpenMPParallelDirective(S.getDirectiveKind()) &&
!isOpenMPSimdDirective(S.getDirectiveKind()));
TaskGen(*this, OutlinedFn, Data);
@@ -4581,18 +4985,18 @@ createImplicitFirstprivateForType(ASTContext &C, OMPTaskDataTy &Data,
QualType Ty, CapturedDecl *CD,
SourceLocation Loc) {
auto *OrigVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, Ty,
- ImplicitParamDecl::Other);
+ ImplicitParamKind::Other);
auto *OrigRef = DeclRefExpr::Create(
C, NestedNameSpecifierLoc(), SourceLocation(), OrigVD,
/*RefersToEnclosingVariableOrCapture=*/false, Loc, Ty, VK_LValue);
auto *PrivateVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, Ty,
- ImplicitParamDecl::Other);
+ ImplicitParamKind::Other);
auto *PrivateRef = DeclRefExpr::Create(
C, NestedNameSpecifierLoc(), SourceLocation(), PrivateVD,
/*RefersToEnclosingVariableOrCapture=*/false, Loc, Ty, VK_LValue);
QualType ElemType = C.getBaseElementType(Ty);
auto *InitVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, ElemType,
- ImplicitParamDecl::Other);
+ ImplicitParamKind::Other);
auto *InitRef = DeclRefExpr::Create(
C, NestedNameSpecifierLoc(), SourceLocation(), InitVD,
/*RefersToEnclosingVariableOrCapture=*/false, Loc, ElemType, VK_LValue);
@@ -4631,6 +5035,17 @@ void CodeGenFunction::EmitOMPTargetTaskBasedDirective(
++IElemInitRef;
}
}
+ SmallVector<const Expr *, 4> LHSs;
+ SmallVector<const Expr *, 4> RHSs;
+ for (const auto *C : S.getClausesOfKind<OMPInReductionClause>()) {
+ Data.ReductionVars.append(C->varlist_begin(), C->varlist_end());
+ Data.ReductionOrigs.append(C->varlist_begin(), C->varlist_end());
+ Data.ReductionCopies.append(C->privates().begin(), C->privates().end());
+ Data.ReductionOps.append(C->reduction_ops().begin(),
+ C->reduction_ops().end());
+ LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
+ RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
+ }
OMPPrivateScope TargetScope(*this);
VarDecl *BPVD = nullptr;
VarDecl *PVD = nullptr;
@@ -4641,7 +5056,7 @@ void CodeGenFunction::EmitOMPTargetTaskBasedDirective(
getContext(), getContext().getTranslationUnitDecl(), /*NumParams=*/0);
llvm::APInt ArrSize(/*numBits=*/32, InputInfo.NumberOfTargetItems);
QualType BaseAndPointerAndMapperType = getContext().getConstantArrayType(
- getContext().VoidPtrTy, ArrSize, nullptr, ArrayType::Normal,
+ getContext().VoidPtrTy, ArrSize, nullptr, ArraySizeModifier::Normal,
/*IndexTypeQuals=*/0);
BPVD = createImplicitFirstprivateForType(
getContext(), Data, BaseAndPointerAndMapperType, CD, S.getBeginLoc());
@@ -4649,33 +5064,24 @@ void CodeGenFunction::EmitOMPTargetTaskBasedDirective(
getContext(), Data, BaseAndPointerAndMapperType, CD, S.getBeginLoc());
QualType SizesType = getContext().getConstantArrayType(
getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1),
- ArrSize, nullptr, ArrayType::Normal,
+ ArrSize, nullptr, ArraySizeModifier::Normal,
/*IndexTypeQuals=*/0);
SVD = createImplicitFirstprivateForType(getContext(), Data, SizesType, CD,
S.getBeginLoc());
- TargetScope.addPrivate(
- BPVD, [&InputInfo]() { return InputInfo.BasePointersArray; });
- TargetScope.addPrivate(PVD,
- [&InputInfo]() { return InputInfo.PointersArray; });
- TargetScope.addPrivate(SVD,
- [&InputInfo]() { return InputInfo.SizesArray; });
+ TargetScope.addPrivate(BPVD, InputInfo.BasePointersArray);
+ TargetScope.addPrivate(PVD, InputInfo.PointersArray);
+ TargetScope.addPrivate(SVD, InputInfo.SizesArray);
// If there is no user-defined mapper, the mapper array will be nullptr. In
// this case, we don't need to privatize it.
- if (!dyn_cast_or_null<llvm::ConstantPointerNull>(
+ if (!isa_and_nonnull<llvm::ConstantPointerNull>(
InputInfo.MappersArray.getPointer())) {
MVD = createImplicitFirstprivateForType(
getContext(), Data, BaseAndPointerAndMapperType, CD, S.getBeginLoc());
- TargetScope.addPrivate(MVD,
- [&InputInfo]() { return InputInfo.MappersArray; });
+ TargetScope.addPrivate(MVD, InputInfo.MappersArray);
}
}
(void)TargetScope.Privatize();
- // Build list of dependences.
- for (const auto *C : S.getClausesOfKind<OMPDependClause>()) {
- OMPTaskDataTy::DependData &DD =
- Data.Dependences.emplace_back(C->getDependencyKind(), C->getModifier());
- DD.DepExprs.append(C->varlist_begin(), C->varlist_end());
- }
+ buildDependences(S, Data);
auto &&CodeGen = [&Data, &S, CS, &BodyGen, BPVD, PVD, SVD, MVD,
&InputInfo](CodeGenFunction &CGF, PrePostActionTy &Action) {
// Set proper addresses for generated private copies.
@@ -4703,18 +5109,17 @@ void CodeGenFunction::EmitOMPTargetTaskBasedDirective(
}
auto *CopyFnTy = llvm::FunctionType::get(CGF.Builder.getVoidTy(),
ParamTypes, /*isVarArg=*/false);
- CopyFn = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CopyFn, CopyFnTy->getPointerTo());
CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
CGF, S.getBeginLoc(), {CopyFnTy, CopyFn}, CallArgs);
for (const auto &Pair : PrivatePtrs) {
- Address Replacement(CGF.Builder.CreateLoad(Pair.second),
- CGF.getContext().getDeclAlign(Pair.first));
- Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; });
+ Address Replacement(
+ CGF.Builder.CreateLoad(Pair.second),
+ CGF.ConvertTypeForMem(Pair.first->getType().getNonReferenceType()),
+ CGF.getContext().getDeclAlign(Pair.first));
+ Scope.addPrivate(Pair.first, Replacement);
}
}
- // Privatize all private variables except for in_reduction items.
- (void)Scope.Privatize();
+ CGF.processInReduction(S, Data, CGF, CS, Scope);
if (InputInfo.NumberOfTargetItems > 0) {
InputInfo.BasePointersArray = CGF.Builder.CreateConstArrayGEP(
CGF.GetAddrOfLocalVar(BPVD), /*Index=*/0);
@@ -4730,6 +5135,15 @@ void CodeGenFunction::EmitOMPTargetTaskBasedDirective(
Action.Enter(CGF);
OMPLexicalScope LexScope(CGF, S, OMPD_task, /*EmitPreInitStmt=*/false);
+ auto *TL = S.getSingleClause<OMPThreadLimitClause>();
+ if (CGF.CGM.getLangOpts().OpenMP >= 51 &&
+ needsTaskBasedThreadLimit(S.getDirectiveKind()) && TL) {
+ // Emit __kmpc_set_thread_limit() to set the thread_limit for the task
+ // enclosing this target region. This will indirectly set the thread_limit
+ // for every applicable construct within target region.
+ CGF.CGM.getOpenMPRuntime().emitThreadLimitClause(
+ CGF, TL->getThreadLimit(), S.getBeginLoc());
+ }
BodyGen(CGF);
};
llvm::Function *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction(
@@ -4739,11 +5153,97 @@ void CodeGenFunction::EmitOMPTargetTaskBasedDirective(
IntegerLiteral IfCond(getContext(), TrueOrFalse,
getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
SourceLocation());
-
CGM.getOpenMPRuntime().emitTaskCall(*this, S.getBeginLoc(), S, OutlinedFn,
SharedsTy, CapturedStruct, &IfCond, Data);
}
+void CodeGenFunction::processInReduction(const OMPExecutableDirective &S,
+ OMPTaskDataTy &Data,
+ CodeGenFunction &CGF,
+ const CapturedStmt *CS,
+ OMPPrivateScope &Scope) {
+ if (Data.Reductions) {
+ OpenMPDirectiveKind CapturedRegion = S.getDirectiveKind();
+ OMPLexicalScope LexScope(CGF, S, CapturedRegion);
+ ReductionCodeGen RedCG(Data.ReductionVars, Data.ReductionVars,
+ Data.ReductionCopies, Data.ReductionOps);
+ llvm::Value *ReductionsPtr = CGF.Builder.CreateLoad(
+ CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(4)));
+ for (unsigned Cnt = 0, E = Data.ReductionVars.size(); Cnt < E; ++Cnt) {
+ RedCG.emitSharedOrigLValue(CGF, Cnt);
+ RedCG.emitAggregateType(CGF, Cnt);
+ // FIXME: This must removed once the runtime library is fixed.
+ // Emit required threadprivate variables for
+ // initializer/combiner/finalizer.
+ CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(),
+ RedCG, Cnt);
+ Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem(
+ CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt));
+ Replacement =
+ Address(CGF.EmitScalarConversion(
+ Replacement.getPointer(), CGF.getContext().VoidPtrTy,
+ CGF.getContext().getPointerType(
+ Data.ReductionCopies[Cnt]->getType()),
+ Data.ReductionCopies[Cnt]->getExprLoc()),
+ CGF.ConvertTypeForMem(Data.ReductionCopies[Cnt]->getType()),
+ Replacement.getAlignment());
+ Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement);
+ Scope.addPrivate(RedCG.getBaseDecl(Cnt), Replacement);
+ }
+ }
+ (void)Scope.Privatize();
+ SmallVector<const Expr *, 4> InRedVars;
+ SmallVector<const Expr *, 4> InRedPrivs;
+ SmallVector<const Expr *, 4> InRedOps;
+ SmallVector<const Expr *, 4> TaskgroupDescriptors;
+ for (const auto *C : S.getClausesOfKind<OMPInReductionClause>()) {
+ auto IPriv = C->privates().begin();
+ auto IRed = C->reduction_ops().begin();
+ auto ITD = C->taskgroup_descriptors().begin();
+ for (const Expr *Ref : C->varlists()) {
+ InRedVars.emplace_back(Ref);
+ InRedPrivs.emplace_back(*IPriv);
+ InRedOps.emplace_back(*IRed);
+ TaskgroupDescriptors.emplace_back(*ITD);
+ std::advance(IPriv, 1);
+ std::advance(IRed, 1);
+ std::advance(ITD, 1);
+ }
+ }
+ OMPPrivateScope InRedScope(CGF);
+ if (!InRedVars.empty()) {
+ ReductionCodeGen RedCG(InRedVars, InRedVars, InRedPrivs, InRedOps);
+ for (unsigned Cnt = 0, E = InRedVars.size(); Cnt < E; ++Cnt) {
+ RedCG.emitSharedOrigLValue(CGF, Cnt);
+ RedCG.emitAggregateType(CGF, Cnt);
+ // FIXME: This must removed once the runtime library is fixed.
+ // Emit required threadprivate variables for
+ // initializer/combiner/finalizer.
+ CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(),
+ RedCG, Cnt);
+ llvm::Value *ReductionsPtr;
+ if (const Expr *TRExpr = TaskgroupDescriptors[Cnt]) {
+ ReductionsPtr =
+ CGF.EmitLoadOfScalar(CGF.EmitLValue(TRExpr), TRExpr->getExprLoc());
+ } else {
+ ReductionsPtr = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
+ }
+ Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem(
+ CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt));
+ Replacement = Address(
+ CGF.EmitScalarConversion(
+ Replacement.getPointer(), CGF.getContext().VoidPtrTy,
+ CGF.getContext().getPointerType(InRedPrivs[Cnt]->getType()),
+ InRedPrivs[Cnt]->getExprLoc()),
+ CGF.ConvertTypeForMem(InRedPrivs[Cnt]->getType()),
+ Replacement.getAlignment());
+ Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement);
+ InRedScope.addPrivate(RedCG.getBaseDecl(Cnt), Replacement);
+ }
+ }
+ (void)InRedScope.Privatize();
+}
+
void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) {
// Emit outlined function for task construct.
const CapturedStmt *CS = S.getCapturedStmt(OMPD_task);
@@ -4781,16 +5281,52 @@ void CodeGenFunction::EmitOMPTaskyieldDirective(
CGM.getOpenMPRuntime().emitTaskyieldCall(*this, S.getBeginLoc());
}
+void CodeGenFunction::EmitOMPErrorDirective(const OMPErrorDirective &S) {
+ const OMPMessageClause *MC = S.getSingleClause<OMPMessageClause>();
+ Expr *ME = MC ? MC->getMessageString() : nullptr;
+ const OMPSeverityClause *SC = S.getSingleClause<OMPSeverityClause>();
+ bool IsFatal = false;
+ if (!SC || SC->getSeverityKind() == OMPC_SEVERITY_fatal)
+ IsFatal = true;
+ CGM.getOpenMPRuntime().emitErrorCall(*this, S.getBeginLoc(), ME, IsFatal);
+}
+
void CodeGenFunction::EmitOMPBarrierDirective(const OMPBarrierDirective &S) {
CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_barrier);
}
void CodeGenFunction::EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S) {
- CGM.getOpenMPRuntime().emitTaskwaitCall(*this, S.getBeginLoc());
+ OMPTaskDataTy Data;
+ // Build list of dependences
+ buildDependences(S, Data);
+ Data.HasNowaitClause = S.hasClausesOfKind<OMPNowaitClause>();
+ CGM.getOpenMPRuntime().emitTaskwaitCall(*this, S.getBeginLoc(), Data);
+}
+
+bool isSupportedByOpenMPIRBuilder(const OMPTaskgroupDirective &T) {
+ return T.clauses().empty();
}
void CodeGenFunction::EmitOMPTaskgroupDirective(
const OMPTaskgroupDirective &S) {
+ OMPLexicalScope Scope(*this, S, OMPD_unknown);
+ if (CGM.getLangOpts().OpenMPIRBuilder && isSupportedByOpenMPIRBuilder(S)) {
+ llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
+ using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
+ InsertPointTy AllocaIP(AllocaInsertPt->getParent(),
+ AllocaInsertPt->getIterator());
+
+ auto BodyGenCB = [&, this](InsertPointTy AllocaIP,
+ InsertPointTy CodeGenIP) {
+ Builder.restoreIP(CodeGenIP);
+ EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
+ };
+ CodeGenFunction::CGCapturedStmtInfo CapStmtInfo;
+ if (!CapturedStmtInfo)
+ CapturedStmtInfo = &CapStmtInfo;
+ Builder.restoreIP(OMPBuilder.createTaskgroup(Builder, AllocaIP, BodyGenCB));
+ return;
+ }
auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
Action.Enter(CGF);
if (const Expr *E = S.getReductionRef()) {
@@ -4816,7 +5352,6 @@ void CodeGenFunction::EmitOMPTaskgroupDirective(
}
CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
};
- OMPLexicalScope Scope(*this, S, OMPD_unknown);
CGM.getOpenMPRuntime().emitTaskgroupRegion(*this, CodeGen, S.getBeginLoc());
}
@@ -4828,9 +5363,9 @@ void CodeGenFunction::EmitOMPFlushDirective(const OMPFlushDirective &S) {
*this,
[&S]() -> ArrayRef<const Expr *> {
if (const auto *FlushClause = S.getSingleClause<OMPFlushClause>())
- return llvm::makeArrayRef(FlushClause->varlist_begin(),
- FlushClause->varlist_end());
- return llvm::None;
+ return llvm::ArrayRef(FlushClause->varlist_begin(),
+ FlushClause->varlist_end());
+ return std::nullopt;
}(),
S.getBeginLoc(), AO);
}
@@ -5168,8 +5703,8 @@ void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S,
// iteration space is divided into chunks that are approximately equal
// in size, and at most one chunk is distributed to each team of the
// league. The size of the chunks is unspecified in this case.
- bool StaticChunked = RT.isStaticChunked(
- ScheduleKind, /* Chunked */ Chunk != nullptr) &&
+ bool StaticChunked =
+ RT.isStaticChunked(ScheduleKind, /* Chunked */ Chunk != nullptr) &&
isOpenMPLoopBoundSharingDirective(S.getDirectiveKind());
if (RT.isStaticNonchunked(ScheduleKind,
/* Chunked */ Chunk != nullptr) ||
@@ -5307,12 +5842,88 @@ static llvm::Function *emitOutlinedOrderedFunction(CodeGenModule &CGM,
CGF.CapturedStmtInfo = &CapStmtInfo;
llvm::Function *Fn = CGF.GenerateOpenMPCapturedStmtFunction(*S, Loc);
Fn->setDoesNotRecurse();
- if (CGM.getCodeGenOpts().OptimizationLevel != 0)
- Fn->addFnAttr(llvm::Attribute::AlwaysInline);
return Fn;
}
+template <typename T>
+static void emitRestoreIP(CodeGenFunction &CGF, const T *C,
+ llvm::OpenMPIRBuilder::InsertPointTy AllocaIP,
+ llvm::OpenMPIRBuilder &OMPBuilder) {
+
+ unsigned NumLoops = C->getNumLoops();
+ QualType Int64Ty = CGF.CGM.getContext().getIntTypeForBitwidth(
+ /*DestWidth=*/64, /*Signed=*/1);
+ llvm::SmallVector<llvm::Value *> StoreValues;
+ for (unsigned I = 0; I < NumLoops; I++) {
+ const Expr *CounterVal = C->getLoopData(I);
+ assert(CounterVal);
+ llvm::Value *StoreValue = CGF.EmitScalarConversion(
+ CGF.EmitScalarExpr(CounterVal), CounterVal->getType(), Int64Ty,
+ CounterVal->getExprLoc());
+ StoreValues.emplace_back(StoreValue);
+ }
+ OMPDoacrossKind<T> ODK;
+ bool IsDependSource = ODK.isSource(C);
+ CGF.Builder.restoreIP(
+ OMPBuilder.createOrderedDepend(CGF.Builder, AllocaIP, NumLoops,
+ StoreValues, ".cnt.addr", IsDependSource));
+}
+
void CodeGenFunction::EmitOMPOrderedDirective(const OMPOrderedDirective &S) {
+ if (CGM.getLangOpts().OpenMPIRBuilder) {
+ llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
+ using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
+
+ if (S.hasClausesOfKind<OMPDependClause>() ||
+ S.hasClausesOfKind<OMPDoacrossClause>()) {
+ // The ordered directive with depend clause.
+ assert(!S.hasAssociatedStmt() && "No associated statement must be in "
+ "ordered depend|doacross construct.");
+ InsertPointTy AllocaIP(AllocaInsertPt->getParent(),
+ AllocaInsertPt->getIterator());
+ for (const auto *DC : S.getClausesOfKind<OMPDependClause>())
+ emitRestoreIP(*this, DC, AllocaIP, OMPBuilder);
+ for (const auto *DC : S.getClausesOfKind<OMPDoacrossClause>())
+ emitRestoreIP(*this, DC, AllocaIP, OMPBuilder);
+ } else {
+ // The ordered directive with threads or simd clause, or without clause.
+ // Without clause, it behaves as if the threads clause is specified.
+ const auto *C = S.getSingleClause<OMPSIMDClause>();
+
+ auto FiniCB = [this](InsertPointTy IP) {
+ OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP);
+ };
+
+ auto BodyGenCB = [&S, C, this](InsertPointTy AllocaIP,
+ InsertPointTy CodeGenIP) {
+ Builder.restoreIP(CodeGenIP);
+
+ const CapturedStmt *CS = S.getInnermostCapturedStmt();
+ if (C) {
+ llvm::BasicBlock *FiniBB = splitBBWithSuffix(
+ Builder, /*CreateBranch=*/false, ".ordered.after");
+ llvm::SmallVector<llvm::Value *, 16> CapturedVars;
+ GenerateOpenMPCapturedVars(*CS, CapturedVars);
+ llvm::Function *OutlinedFn =
+ emitOutlinedOrderedFunction(CGM, CS, S.getBeginLoc());
+ assert(S.getBeginLoc().isValid() &&
+ "Outlined function call location must be valid.");
+ ApplyDebugLocation::CreateDefaultArtificial(*this, S.getBeginLoc());
+ OMPBuilderCBHelpers::EmitCaptureStmt(*this, CodeGenIP, *FiniBB,
+ OutlinedFn, CapturedVars);
+ } else {
+ OMPBuilderCBHelpers::EmitOMPInlinedRegionBody(
+ *this, CS->getCapturedStmt(), AllocaIP, CodeGenIP, "ordered");
+ }
+ };
+
+ OMPLexicalScope Scope(*this, S, OMPD_unknown);
+ Builder.restoreIP(
+ OMPBuilder.createOrderedThreadsSimd(Builder, BodyGenCB, FiniCB, !C));
+ }
+ return;
+ }
+
if (S.hasClausesOfKind<OMPDependClause>()) {
assert(!S.hasAssociatedStmt() &&
"No associated statement must be in ordered depend construct.");
@@ -5320,6 +5931,13 @@ void CodeGenFunction::EmitOMPOrderedDirective(const OMPOrderedDirective &S) {
CGM.getOpenMPRuntime().emitDoacrossOrdered(*this, DC);
return;
}
+ if (S.hasClausesOfKind<OMPDoacrossClause>()) {
+ assert(!S.hasAssociatedStmt() &&
+ "No associated statement must be in ordered doacross construct.");
+ for (const auto *DC : S.getClausesOfKind<OMPDoacrossClause>())
+ CGM.getOpenMPRuntime().emitDoacrossOrdered(*this, DC);
+ return;
+ }
const auto *C = S.getSingleClause<OMPSIMDClause>();
auto &&CodeGen = [&S, C, this](CodeGenFunction &CGF,
PrePostActionTy &Action) {
@@ -5432,7 +6050,7 @@ static void emitOMPAtomicReadExpr(CodeGenFunction &CGF, llvm::AtomicOrdering AO,
case llvm::AtomicOrdering::Acquire:
case llvm::AtomicOrdering::AcquireRelease:
case llvm::AtomicOrdering::SequentiallyConsistent:
- CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc,
+ CGF.CGM.getOpenMPRuntime().emitFlush(CGF, std::nullopt, Loc,
llvm::AtomicOrdering::Acquire);
break;
case llvm::AtomicOrdering::Monotonic:
@@ -5461,7 +6079,7 @@ static void emitOMPAtomicWriteExpr(CodeGenFunction &CGF,
case llvm::AtomicOrdering::Release:
case llvm::AtomicOrdering::AcquireRelease:
case llvm::AtomicOrdering::SequentiallyConsistent:
- CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc,
+ CGF.CGM.getOpenMPRuntime().emitFlush(CGF, std::nullopt, Loc,
llvm::AtomicOrdering::Release);
break;
case llvm::AtomicOrdering::Acquire:
@@ -5482,25 +6100,38 @@ static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X,
// Allow atomicrmw only if 'x' and 'update' are integer values, lvalue for 'x'
// expression is simple and atomic is allowed for the given type for the
// target platform.
- if (BO == BO_Comma || !Update.isScalar() ||
- !Update.getScalarVal()->getType()->isIntegerTy() || !X.isSimple() ||
+ if (BO == BO_Comma || !Update.isScalar() || !X.isSimple() ||
(!isa<llvm::ConstantInt>(Update.getScalarVal()) &&
(Update.getScalarVal()->getType() !=
X.getAddress(CGF).getElementType())) ||
- !X.getAddress(CGF).getElementType()->isIntegerTy() ||
!Context.getTargetInfo().hasBuiltinAtomic(
Context.getTypeSize(X.getType()), Context.toBits(X.getAlignment())))
return std::make_pair(false, RValue::get(nullptr));
+ auto &&CheckAtomicSupport = [&CGF](llvm::Type *T, BinaryOperatorKind BO) {
+ if (T->isIntegerTy())
+ return true;
+
+ if (T->isFloatingPointTy() && (BO == BO_Add || BO == BO_Sub))
+ return llvm::isPowerOf2_64(CGF.CGM.getDataLayout().getTypeStoreSize(T));
+
+ return false;
+ };
+
+ if (!CheckAtomicSupport(Update.getScalarVal()->getType(), BO) ||
+ !CheckAtomicSupport(X.getAddress(CGF).getElementType(), BO))
+ return std::make_pair(false, RValue::get(nullptr));
+
+ bool IsInteger = X.getAddress(CGF).getElementType()->isIntegerTy();
llvm::AtomicRMWInst::BinOp RMWOp;
switch (BO) {
case BO_Add:
- RMWOp = llvm::AtomicRMWInst::Add;
+ RMWOp = IsInteger ? llvm::AtomicRMWInst::Add : llvm::AtomicRMWInst::FAdd;
break;
case BO_Sub:
if (!IsXLHSInRHSPart)
return std::make_pair(false, RValue::get(nullptr));
- RMWOp = llvm::AtomicRMWInst::Sub;
+ RMWOp = IsInteger ? llvm::AtomicRMWInst::Sub : llvm::AtomicRMWInst::FSub;
break;
case BO_And:
RMWOp = llvm::AtomicRMWInst::And;
@@ -5512,18 +6143,26 @@ static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X,
RMWOp = llvm::AtomicRMWInst::Xor;
break;
case BO_LT:
- RMWOp = X.getType()->hasSignedIntegerRepresentation()
- ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Min
- : llvm::AtomicRMWInst::Max)
- : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMin
- : llvm::AtomicRMWInst::UMax);
+ if (IsInteger)
+ RMWOp = X.getType()->hasSignedIntegerRepresentation()
+ ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Min
+ : llvm::AtomicRMWInst::Max)
+ : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMin
+ : llvm::AtomicRMWInst::UMax);
+ else
+ RMWOp = IsXLHSInRHSPart ? llvm::AtomicRMWInst::FMin
+ : llvm::AtomicRMWInst::FMax;
break;
case BO_GT:
- RMWOp = X.getType()->hasSignedIntegerRepresentation()
- ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Max
- : llvm::AtomicRMWInst::Min)
- : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMax
- : llvm::AtomicRMWInst::UMin);
+ if (IsInteger)
+ RMWOp = X.getType()->hasSignedIntegerRepresentation()
+ ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Max
+ : llvm::AtomicRMWInst::Min)
+ : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMax
+ : llvm::AtomicRMWInst::UMin);
+ else
+ RMWOp = IsXLHSInRHSPart ? llvm::AtomicRMWInst::FMax
+ : llvm::AtomicRMWInst::FMin;
break;
case BO_Assign:
RMWOp = llvm::AtomicRMWInst::Xchg;
@@ -5558,12 +6197,16 @@ static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X,
}
llvm::Value *UpdateVal = Update.getScalarVal();
if (auto *IC = dyn_cast<llvm::ConstantInt>(UpdateVal)) {
- UpdateVal = CGF.Builder.CreateIntCast(
- IC, X.getAddress(CGF).getElementType(),
- X.getType()->hasSignedIntegerRepresentation());
+ if (IsInteger)
+ UpdateVal = CGF.Builder.CreateIntCast(
+ IC, X.getAddress(CGF).getElementType(),
+ X.getType()->hasSignedIntegerRepresentation());
+ else
+ UpdateVal = CGF.Builder.CreateCast(llvm::Instruction::CastOps::UIToFP, IC,
+ X.getAddress(CGF).getElementType());
}
llvm::Value *Res =
- CGF.Builder.CreateAtomicRMW(RMWOp, X.getPointer(CGF), UpdateVal, AO);
+ CGF.Builder.CreateAtomicRMW(RMWOp, X.getAddress(CGF), UpdateVal, AO);
return std::make_pair(true, RValue::get(Res));
}
@@ -5627,7 +6270,7 @@ static void emitOMPAtomicUpdateExpr(CodeGenFunction &CGF,
case llvm::AtomicOrdering::Release:
case llvm::AtomicOrdering::AcquireRelease:
case llvm::AtomicOrdering::SequentiallyConsistent:
- CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc,
+ CGF.CGM.getOpenMPRuntime().emitFlush(CGF, std::nullopt, Loc,
llvm::AtomicOrdering::Release);
break;
case llvm::AtomicOrdering::Acquire:
@@ -5742,17 +6385,17 @@ static void emitOMPAtomicCaptureExpr(CodeGenFunction &CGF,
// operation is also an acquire flush.
switch (AO) {
case llvm::AtomicOrdering::Release:
- CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc,
+ CGF.CGM.getOpenMPRuntime().emitFlush(CGF, std::nullopt, Loc,
llvm::AtomicOrdering::Release);
break;
case llvm::AtomicOrdering::Acquire:
- CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc,
+ CGF.CGM.getOpenMPRuntime().emitFlush(CGF, std::nullopt, Loc,
llvm::AtomicOrdering::Acquire);
break;
case llvm::AtomicOrdering::AcquireRelease:
case llvm::AtomicOrdering::SequentiallyConsistent:
CGF.CGM.getOpenMPRuntime().emitFlush(
- CGF, llvm::None, Loc, llvm::AtomicOrdering::AcquireRelease);
+ CGF, std::nullopt, Loc, llvm::AtomicOrdering::AcquireRelease);
break;
case llvm::AtomicOrdering::Monotonic:
break;
@@ -5763,11 +6406,94 @@ static void emitOMPAtomicCaptureExpr(CodeGenFunction &CGF,
}
}
+static void emitOMPAtomicCompareExpr(
+ CodeGenFunction &CGF, llvm::AtomicOrdering AO, llvm::AtomicOrdering FailAO,
+ const Expr *X, const Expr *V, const Expr *R, const Expr *E, const Expr *D,
+ const Expr *CE, bool IsXBinopExpr, bool IsPostfixUpdate, bool IsFailOnly,
+ SourceLocation Loc) {
+ llvm::OpenMPIRBuilder &OMPBuilder =
+ CGF.CGM.getOpenMPRuntime().getOMPBuilder();
+
+ OMPAtomicCompareOp Op;
+ assert(isa<BinaryOperator>(CE) && "CE is not a BinaryOperator");
+ switch (cast<BinaryOperator>(CE)->getOpcode()) {
+ case BO_EQ:
+ Op = OMPAtomicCompareOp::EQ;
+ break;
+ case BO_LT:
+ Op = OMPAtomicCompareOp::MIN;
+ break;
+ case BO_GT:
+ Op = OMPAtomicCompareOp::MAX;
+ break;
+ default:
+ llvm_unreachable("unsupported atomic compare binary operator");
+ }
+
+ LValue XLVal = CGF.EmitLValue(X);
+ Address XAddr = XLVal.getAddress(CGF);
+
+ auto EmitRValueWithCastIfNeeded = [&CGF, Loc](const Expr *X, const Expr *E) {
+ if (X->getType() == E->getType())
+ return CGF.EmitScalarExpr(E);
+ const Expr *NewE = E->IgnoreImplicitAsWritten();
+ llvm::Value *V = CGF.EmitScalarExpr(NewE);
+ if (NewE->getType() == X->getType())
+ return V;
+ return CGF.EmitScalarConversion(V, NewE->getType(), X->getType(), Loc);
+ };
+
+ llvm::Value *EVal = EmitRValueWithCastIfNeeded(X, E);
+ llvm::Value *DVal = D ? EmitRValueWithCastIfNeeded(X, D) : nullptr;
+ if (auto *CI = dyn_cast<llvm::ConstantInt>(EVal))
+ EVal = CGF.Builder.CreateIntCast(
+ CI, XLVal.getAddress(CGF).getElementType(),
+ E->getType()->hasSignedIntegerRepresentation());
+ if (DVal)
+ if (auto *CI = dyn_cast<llvm::ConstantInt>(DVal))
+ DVal = CGF.Builder.CreateIntCast(
+ CI, XLVal.getAddress(CGF).getElementType(),
+ D->getType()->hasSignedIntegerRepresentation());
+
+ llvm::OpenMPIRBuilder::AtomicOpValue XOpVal{
+ XAddr.getPointer(), XAddr.getElementType(),
+ X->getType()->hasSignedIntegerRepresentation(),
+ X->getType().isVolatileQualified()};
+ llvm::OpenMPIRBuilder::AtomicOpValue VOpVal, ROpVal;
+ if (V) {
+ LValue LV = CGF.EmitLValue(V);
+ Address Addr = LV.getAddress(CGF);
+ VOpVal = {Addr.getPointer(), Addr.getElementType(),
+ V->getType()->hasSignedIntegerRepresentation(),
+ V->getType().isVolatileQualified()};
+ }
+ if (R) {
+ LValue LV = CGF.EmitLValue(R);
+ Address Addr = LV.getAddress(CGF);
+ ROpVal = {Addr.getPointer(), Addr.getElementType(),
+ R->getType()->hasSignedIntegerRepresentation(),
+ R->getType().isVolatileQualified()};
+ }
+
+ if (FailAO == llvm::AtomicOrdering::NotAtomic) {
+ // fail clause was not mentionend on the
+ // "#pragma omp atomic compare" construct.
+ CGF.Builder.restoreIP(OMPBuilder.createAtomicCompare(
+ CGF.Builder, XOpVal, VOpVal, ROpVal, EVal, DVal, AO, Op, IsXBinopExpr,
+ IsPostfixUpdate, IsFailOnly));
+ } else
+ CGF.Builder.restoreIP(OMPBuilder.createAtomicCompare(
+ CGF.Builder, XOpVal, VOpVal, ROpVal, EVal, DVal, AO, Op, IsXBinopExpr,
+ IsPostfixUpdate, IsFailOnly, FailAO));
+}
+
static void emitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind,
- llvm::AtomicOrdering AO, bool IsPostfixUpdate,
- const Expr *X, const Expr *V, const Expr *E,
- const Expr *UE, bool IsXLHSInRHSPart,
- SourceLocation Loc) {
+ llvm::AtomicOrdering AO,
+ llvm::AtomicOrdering FailAO, bool IsPostfixUpdate,
+ const Expr *X, const Expr *V, const Expr *R,
+ const Expr *E, const Expr *UE, const Expr *D,
+ const Expr *CE, bool IsXLHSInRHSPart,
+ bool IsFailOnly, SourceLocation Loc) {
switch (Kind) {
case OMPC_read:
emitOMPAtomicReadExpr(CGF, AO, X, V, Loc);
@@ -5783,92 +6509,20 @@ static void emitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind,
emitOMPAtomicCaptureExpr(CGF, AO, IsPostfixUpdate, V, X, E, UE,
IsXLHSInRHSPart, Loc);
break;
- case OMPC_if:
- case OMPC_final:
- case OMPC_num_threads:
- case OMPC_private:
- case OMPC_firstprivate:
- case OMPC_lastprivate:
- case OMPC_reduction:
- case OMPC_task_reduction:
- case OMPC_in_reduction:
- case OMPC_safelen:
- case OMPC_simdlen:
- case OMPC_sizes:
- case OMPC_full:
- case OMPC_partial:
- case OMPC_allocator:
- case OMPC_allocate:
- case OMPC_collapse:
- case OMPC_default:
- case OMPC_seq_cst:
- case OMPC_acq_rel:
- case OMPC_acquire:
- case OMPC_release:
- case OMPC_relaxed:
- case OMPC_shared:
- case OMPC_linear:
- case OMPC_aligned:
- case OMPC_copyin:
- case OMPC_copyprivate:
- case OMPC_flush:
- case OMPC_depobj:
- case OMPC_proc_bind:
- case OMPC_schedule:
- case OMPC_ordered:
- case OMPC_nowait:
- case OMPC_untied:
- case OMPC_threadprivate:
- case OMPC_depend:
- case OMPC_mergeable:
- case OMPC_device:
- case OMPC_threads:
- case OMPC_simd:
- case OMPC_map:
- case OMPC_num_teams:
- case OMPC_thread_limit:
- case OMPC_priority:
- case OMPC_grainsize:
- case OMPC_nogroup:
- case OMPC_num_tasks:
- case OMPC_hint:
- case OMPC_dist_schedule:
- case OMPC_defaultmap:
- case OMPC_uniform:
- case OMPC_to:
- case OMPC_from:
- case OMPC_use_device_ptr:
- case OMPC_use_device_addr:
- case OMPC_is_device_ptr:
- case OMPC_unified_address:
- case OMPC_unified_shared_memory:
- case OMPC_reverse_offload:
- case OMPC_dynamic_allocators:
- case OMPC_atomic_default_mem_order:
- case OMPC_device_type:
- case OMPC_match:
- case OMPC_nontemporal:
- case OMPC_order:
- case OMPC_destroy:
- case OMPC_detach:
- case OMPC_inclusive:
- case OMPC_exclusive:
- case OMPC_uses_allocators:
- case OMPC_affinity:
- case OMPC_init:
- case OMPC_inbranch:
- case OMPC_notinbranch:
- case OMPC_link:
- case OMPC_use:
- case OMPC_novariants:
- case OMPC_nocontext:
- case OMPC_filter:
+ case OMPC_compare: {
+ emitOMPAtomicCompareExpr(CGF, AO, FailAO, X, V, R, E, D, CE,
+ IsXLHSInRHSPart, IsPostfixUpdate, IsFailOnly, Loc);
+ break;
+ }
+ default:
llvm_unreachable("Clause is not allowed in 'omp atomic'.");
}
}
void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) {
llvm::AtomicOrdering AO = llvm::AtomicOrdering::Monotonic;
+ // Fail Memory Clause Ordering.
+ llvm::AtomicOrdering FailAO = llvm::AtomicOrdering::NotAtomic;
bool MemOrderingSpecified = false;
if (S.getSingleClause<OMPSeqCstClause>()) {
AO = llvm::AtomicOrdering::SequentiallyConsistent;
@@ -5886,19 +6540,24 @@ void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) {
AO = llvm::AtomicOrdering::Monotonic;
MemOrderingSpecified = true;
}
+ llvm::SmallSet<OpenMPClauseKind, 2> KindsEncountered;
OpenMPClauseKind Kind = OMPC_unknown;
for (const OMPClause *C : S.clauses()) {
// Find first clause (skip seq_cst|acq_rel|aqcuire|release|relaxed clause,
// if it is first).
- if (C->getClauseKind() != OMPC_seq_cst &&
- C->getClauseKind() != OMPC_acq_rel &&
- C->getClauseKind() != OMPC_acquire &&
- C->getClauseKind() != OMPC_release &&
- C->getClauseKind() != OMPC_relaxed && C->getClauseKind() != OMPC_hint) {
- Kind = C->getClauseKind();
- break;
- }
- }
+ OpenMPClauseKind K = C->getClauseKind();
+ if (K == OMPC_seq_cst || K == OMPC_acq_rel || K == OMPC_acquire ||
+ K == OMPC_release || K == OMPC_relaxed || K == OMPC_hint)
+ continue;
+ Kind = K;
+ KindsEncountered.insert(K);
+ }
+ // We just need to correct Kind here. No need to set a bool saying it is
+ // actually compare capture because we can tell from whether V and R are
+ // nullptr.
+ if (KindsEncountered.contains(OMPC_compare) &&
+ KindsEncountered.contains(OMPC_capture))
+ Kind = OMPC_compare;
if (!MemOrderingSpecified) {
llvm::AtomicOrdering DefaultOrder =
CGM.getOpenMPRuntime().getDefaultMemoryOrdering();
@@ -5917,11 +6576,27 @@ void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) {
}
}
+ if (KindsEncountered.contains(OMPC_compare) &&
+ KindsEncountered.contains(OMPC_fail)) {
+ Kind = OMPC_compare;
+ const auto *FailClause = S.getSingleClause<OMPFailClause>();
+ if (FailClause) {
+ OpenMPClauseKind FailParameter = FailClause->getFailParameter();
+ if (FailParameter == llvm::omp::OMPC_relaxed)
+ FailAO = llvm::AtomicOrdering::Monotonic;
+ else if (FailParameter == llvm::omp::OMPC_acquire)
+ FailAO = llvm::AtomicOrdering::Acquire;
+ else if (FailParameter == llvm::omp::OMPC_seq_cst)
+ FailAO = llvm::AtomicOrdering::SequentiallyConsistent;
+ }
+ }
+
LexicalScope Scope(*this, S.getSourceRange());
EmitStopPoint(S.getAssociatedStmt());
- emitOMPAtomicExpr(*this, Kind, AO, S.isPostfixUpdate(), S.getX(), S.getV(),
- S.getExpr(), S.getUpdateExpr(), S.isXLHSInRHSPart(),
- S.getBeginLoc());
+ emitOMPAtomicExpr(*this, Kind, AO, FailAO, S.isPostfixUpdate(), S.getX(),
+ S.getV(), S.getR(), S.getExpr(), S.getUpdateExpr(),
+ S.getD(), S.getCondExpr(), S.isXLHSInRHSPart(),
+ S.isFailOnly(), S.getBeginLoc());
}
static void emitCommonOMPTargetDirective(CodeGenFunction &CGF,
@@ -5931,7 +6606,7 @@ static void emitCommonOMPTargetDirective(CodeGenFunction &CGF,
CodeGenModule &CGM = CGF.CGM;
// On device emit this construct as inlined code.
- if (CGM.getLangOpts().OpenMPIsDevice) {
+ if (CGM.getLangOpts().OpenMPIsTargetDevice) {
OMPLexicalScope Scope(CGF, S, OMPD_target);
CGM.getOpenMPRuntime().emitInlinedDirective(
CGF, OMPD_target, [&S](CodeGenFunction &CGF, PrePostActionTy &) {
@@ -5940,8 +6615,7 @@ static void emitCommonOMPTargetDirective(CodeGenFunction &CGF,
return;
}
- auto LPCRegion =
- CGOpenMPRuntime::LastprivateConditionalRAII::disable(CGF, S);
+ auto LPCRegion = CGOpenMPRuntime::LastprivateConditionalRAII::disable(CGF, S);
llvm::Function *Fn = nullptr;
llvm::Constant *FnID = nullptr;
@@ -5973,6 +6647,13 @@ static void emitCommonOMPTargetDirective(CodeGenFunction &CGF,
if (CGM.getLangOpts().OMPTargetTriples.empty())
IsOffloadEntry = false;
+ if (CGM.getLangOpts().OpenMPOffloadMandatory && !IsOffloadEntry) {
+ unsigned DiagID = CGM.getDiags().getCustomDiagID(
+ DiagnosticsEngine::Error,
+ "No offloading entry generated while offloading is mandatory.");
+ CGM.getDiags().Report(DiagID);
+ }
+
assert(CGF.CurFuncDecl && "No parent declaration for target region!");
StringRef ParentName;
// In case we have Ctors/Dtors we use the complete type variant to produce
@@ -6048,7 +6729,8 @@ static void emitCommonOMPTeamsDirective(CodeGenFunction &CGF,
const CapturedStmt *CS = S.getCapturedStmt(OMPD_teams);
llvm::Function *OutlinedFn =
CGF.CGM.getOpenMPRuntime().emitTeamsOutlinedFunction(
- S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen);
+ CGF, S, *CS->getCapturedDecl()->param_begin(), InnermostKind,
+ CodeGen);
const auto *NT = S.getSingleClause<OMPNumTeamsClause>();
const auto *TL = S.getSingleClause<OMPThreadLimitClause>();
@@ -6312,6 +6994,60 @@ void CodeGenFunction::EmitOMPTeamsDistributeParallelForSimdDirective(
[](CodeGenFunction &) { return nullptr; });
}
+void CodeGenFunction::EmitOMPInteropDirective(const OMPInteropDirective &S) {
+ llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
+ llvm::Value *Device = nullptr;
+ llvm::Value *NumDependences = nullptr;
+ llvm::Value *DependenceList = nullptr;
+
+ if (const auto *C = S.getSingleClause<OMPDeviceClause>())
+ Device = EmitScalarExpr(C->getDevice());
+
+ // Build list and emit dependences
+ OMPTaskDataTy Data;
+ buildDependences(S, Data);
+ if (!Data.Dependences.empty()) {
+ Address DependenciesArray = Address::invalid();
+ std::tie(NumDependences, DependenciesArray) =
+ CGM.getOpenMPRuntime().emitDependClause(*this, Data.Dependences,
+ S.getBeginLoc());
+ DependenceList = DependenciesArray.getPointer();
+ }
+ Data.HasNowaitClause = S.hasClausesOfKind<OMPNowaitClause>();
+
+ assert(!(Data.HasNowaitClause && !(S.getSingleClause<OMPInitClause>() ||
+ S.getSingleClause<OMPDestroyClause>() ||
+ S.getSingleClause<OMPUseClause>())) &&
+ "OMPNowaitClause clause is used separately in OMPInteropDirective.");
+
+ if (const auto *C = S.getSingleClause<OMPInitClause>()) {
+ llvm::Value *InteropvarPtr =
+ EmitLValue(C->getInteropVar()).getPointer(*this);
+ llvm::omp::OMPInteropType InteropType = llvm::omp::OMPInteropType::Unknown;
+ if (C->getIsTarget()) {
+ InteropType = llvm::omp::OMPInteropType::Target;
+ } else {
+ assert(C->getIsTargetSync() && "Expected interop-type target/targetsync");
+ InteropType = llvm::omp::OMPInteropType::TargetSync;
+ }
+ OMPBuilder.createOMPInteropInit(Builder, InteropvarPtr, InteropType, Device,
+ NumDependences, DependenceList,
+ Data.HasNowaitClause);
+ } else if (const auto *C = S.getSingleClause<OMPDestroyClause>()) {
+ llvm::Value *InteropvarPtr =
+ EmitLValue(C->getInteropVar()).getPointer(*this);
+ OMPBuilder.createOMPInteropDestroy(Builder, InteropvarPtr, Device,
+ NumDependences, DependenceList,
+ Data.HasNowaitClause);
+ } else if (const auto *C = S.getSingleClause<OMPUseClause>()) {
+ llvm::Value *InteropvarPtr =
+ EmitLValue(C->getInteropVar()).getPointer(*this);
+ OMPBuilder.createOMPInteropUse(Builder, InteropvarPtr, Device,
+ NumDependences, DependenceList,
+ Data.HasNowaitClause);
+ }
+}
+
static void emitTargetTeamsDistributeParallelForRegion(
CodeGenFunction &CGF, const OMPTargetTeamsDistributeParallelForDirective &S,
PrePostActionTy &Action) {
@@ -6467,13 +7203,13 @@ CodeGenFunction::getOMPCancelDestination(OpenMPDirectiveKind Kind) {
void CodeGenFunction::EmitOMPUseDevicePtrClause(
const OMPUseDevicePtrClause &C, OMPPrivateScope &PrivateScope,
- const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap) {
- auto OrigVarIt = C.varlist_begin();
- auto InitIt = C.inits().begin();
- for (const Expr *PvtVarIt : C.private_copies()) {
- const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*OrigVarIt)->getDecl());
- const auto *InitVD = cast<VarDecl>(cast<DeclRefExpr>(*InitIt)->getDecl());
- const auto *PvtVD = cast<VarDecl>(cast<DeclRefExpr>(PvtVarIt)->getDecl());
+ const llvm::DenseMap<const ValueDecl *, llvm::Value *>
+ CaptureDeviceAddrMap) {
+ llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>, 4> Processed;
+ for (const Expr *OrigVarIt : C.varlists()) {
+ const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(OrigVarIt)->getDecl());
+ if (!Processed.insert(OrigVD).second)
+ continue;
// In order to identify the right initializer we need to match the
// declaration used by the mapping logic. In some cases we may get
@@ -6483,7 +7219,7 @@ void CodeGenFunction::EmitOMPUseDevicePtrClause(
// OMPCapturedExprDecl are used to privative fields of the current
// structure.
const auto *ME = cast<MemberExpr>(OED->getInit());
- assert(isa<CXXThisExpr>(ME->getBase()) &&
+ assert(isa<CXXThisExpr>(ME->getBase()->IgnoreImpCasts()) &&
"Base should be the current struct!");
MatchingVD = ME->getMemberDecl();
}
@@ -6494,37 +7230,16 @@ void CodeGenFunction::EmitOMPUseDevicePtrClause(
if (InitAddrIt == CaptureDeviceAddrMap.end())
continue;
- bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, OrigVD,
- InitAddrIt, InitVD,
- PvtVD]() {
- // Initialize the temporary initialization variable with the address we
- // get from the runtime library. We have to cast the source address
- // because it is always a void *. References are materialized in the
- // privatization scope, so the initialization here disregards the fact
- // the original variable is a reference.
- QualType AddrQTy =
- getContext().getPointerType(OrigVD->getType().getNonReferenceType());
- llvm::Type *AddrTy = ConvertTypeForMem(AddrQTy);
- Address InitAddr = Builder.CreateBitCast(InitAddrIt->second, AddrTy);
- setAddrOfLocalVar(InitVD, InitAddr);
-
- // Emit private declaration, it will be initialized by the value we
- // declaration we just added to the local declarations map.
- EmitDecl(*PvtVD);
-
- // The initialization variables reached its purpose in the emission
- // of the previous declaration, so we don't need it anymore.
- LocalDeclMap.erase(InitVD);
-
- // Return the address of the private variable.
- return GetAddrOfLocalVar(PvtVD);
- });
+ llvm::Type *Ty = ConvertTypeForMem(OrigVD->getType().getNonReferenceType());
+
+ // Return the address of the private variable.
+ bool IsRegistered = PrivateScope.addPrivate(
+ OrigVD,
+ Address(InitAddrIt->second, Ty,
+ getContext().getTypeAlignInChars(getContext().VoidPtrTy)));
assert(IsRegistered && "firstprivate var already registered as private");
// Silence the warning about unused variable.
(void)IsRegistered;
-
- ++OrigVarIt;
- ++InitIt;
}
}
@@ -6539,7 +7254,8 @@ static const VarDecl *getBaseDecl(const Expr *Ref) {
void CodeGenFunction::EmitOMPUseDeviceAddrClause(
const OMPUseDeviceAddrClause &C, OMPPrivateScope &PrivateScope,
- const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap) {
+ const llvm::DenseMap<const ValueDecl *, llvm::Value *>
+ CaptureDeviceAddrMap) {
llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>, 4> Processed;
for (const Expr *Ref : C.varlists()) {
const VarDecl *OrigVD = getBaseDecl(Ref);
@@ -6564,21 +7280,23 @@ void CodeGenFunction::EmitOMPUseDeviceAddrClause(
if (InitAddrIt == CaptureDeviceAddrMap.end())
continue;
- Address PrivAddr = InitAddrIt->getSecond();
+ llvm::Type *Ty = ConvertTypeForMem(OrigVD->getType().getNonReferenceType());
+
+ Address PrivAddr =
+ Address(InitAddrIt->second, Ty,
+ getContext().getTypeAlignInChars(getContext().VoidPtrTy));
// For declrefs and variable length array need to load the pointer for
// correct mapping, since the pointer to the data was passed to the runtime.
if (isa<DeclRefExpr>(Ref->IgnoreParenImpCasts()) ||
- MatchingVD->getType()->isArrayType())
+ MatchingVD->getType()->isArrayType()) {
+ QualType PtrTy = getContext().getPointerType(
+ OrigVD->getType().getNonReferenceType());
PrivAddr =
- EmitLoadOfPointer(PrivAddr, getContext()
- .getPointerType(OrigVD->getType())
- ->castAs<PointerType>());
- llvm::Type *RealTy =
- ConvertTypeForMem(OrigVD->getType().getNonReferenceType())
- ->getPointerTo();
- PrivAddr = Builder.CreatePointerBitCastOrAddrSpaceCast(PrivAddr, RealTy);
+ EmitLoadOfPointer(PrivAddr.withElementType(ConvertTypeForMem(PtrTy)),
+ PtrTy->castAs<PointerType>());
+ }
- (void)PrivateScope.addPrivate(OrigVD, [PrivAddr]() { return PrivAddr; });
+ (void)PrivateScope.addPrivate(OrigVD, PrivAddr);
}
}
@@ -6597,23 +7315,20 @@ void CodeGenFunction::EmitOMPTargetDataDirective(
public:
explicit DevicePointerPrivActionTy(bool &PrivatizeDevicePointers)
- : PrePostActionTy(), PrivatizeDevicePointers(PrivatizeDevicePointers) {}
+ : PrivatizeDevicePointers(PrivatizeDevicePointers) {}
void Enter(CodeGenFunction &CGF) override {
PrivatizeDevicePointers = true;
}
};
DevicePointerPrivActionTy PrivAction(PrivatizeDevicePointers);
- auto &&CodeGen = [&S, &Info, &PrivatizeDevicePointers](
- CodeGenFunction &CGF, PrePostActionTy &Action) {
+ auto &&CodeGen = [&](CodeGenFunction &CGF, PrePostActionTy &Action) {
auto &&InnermostCodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
};
// Codegen that selects whether to generate the privatization code or not.
- auto &&PrivCodeGen = [&S, &Info, &PrivatizeDevicePointers,
- &InnermostCodeGen](CodeGenFunction &CGF,
- PrePostActionTy &Action) {
+ auto &&PrivCodeGen = [&](CodeGenFunction &CGF, PrePostActionTy &Action) {
RegionCodeGenTy RCG(InnermostCodeGen);
PrivatizeDevicePointers = false;
@@ -6633,7 +7348,28 @@ void CodeGenFunction::EmitOMPTargetDataDirective(
(void)PrivateScope.Privatize();
RCG(CGF);
} else {
- OMPLexicalScope Scope(CGF, S, OMPD_unknown);
+ // If we don't have target devices, don't bother emitting the data
+ // mapping code.
+ std::optional<OpenMPDirectiveKind> CaptureRegion;
+ if (CGM.getLangOpts().OMPTargetTriples.empty()) {
+ // Emit helper decls of the use_device_ptr/use_device_addr clauses.
+ for (const auto *C : S.getClausesOfKind<OMPUseDevicePtrClause>())
+ for (const Expr *E : C->varlists()) {
+ const Decl *D = cast<DeclRefExpr>(E)->getDecl();
+ if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(D))
+ CGF.EmitVarDecl(*OED);
+ }
+ for (const auto *C : S.getClausesOfKind<OMPUseDeviceAddrClause>())
+ for (const Expr *E : C->varlists()) {
+ const Decl *D = getBaseDecl(E);
+ if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(D))
+ CGF.EmitVarDecl(*OED);
+ }
+ } else {
+ CaptureRegion = OMPD_unknown;
+ }
+
+ OMPLexicalScope Scope(CGF, S, CaptureRegion);
RCG(CGF);
}
};
@@ -6850,8 +7586,7 @@ static void mapParam(CodeGenFunction &CGF, const DeclRefExpr *Helper,
const ImplicitParamDecl *PVD,
CodeGenFunction::OMPPrivateScope &Privates) {
const auto *VDecl = cast<VarDecl>(Helper->getDecl());
- Privates.addPrivate(VDecl,
- [&CGF, PVD]() { return CGF.GetAddrOfLocalVar(PVD); });
+ Privates.addPrivate(VDecl, CGF.GetAddrOfLocalVar(PVD));
}
void CodeGenFunction::EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S) {
@@ -6879,11 +7614,11 @@ void CodeGenFunction::EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S) {
// TODO: Check if we should emit tied or untied task.
Data.Tied = true;
// Set scheduling for taskloop
- if (const auto* Clause = S.getSingleClause<OMPGrainsizeClause>()) {
+ if (const auto *Clause = S.getSingleClause<OMPGrainsizeClause>()) {
// grainsize clause
Data.Schedule.setInt(/*IntVal=*/false);
Data.Schedule.setPointer(EmitScalarExpr(Clause->getGrainsize()));
- } else if (const auto* Clause = S.getSingleClause<OMPNumTasksClause>()) {
+ } else if (const auto *Clause = S.getSingleClause<OMPNumTasksClause>()) {
// num_tasks clause
Data.Schedule.setInt(/*IntVal=*/true);
Data.Schedule.setPointer(EmitScalarExpr(Clause->getNumTasks()));
@@ -6981,6 +7716,7 @@ void CodeGenFunction::EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S) {
CGF.GetAddrOfLocalVar(*LIP), /*Volatile=*/false,
(*LIP)->getType(), S.getBeginLoc())));
}
+ LoopScope.restoreMap();
CGF.EmitOMPLinearClauseFinal(S, [LIP, &S](CodeGenFunction &CGF) {
return CGF.Builder.CreateIsNotNull(
CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(*LIP), /*Volatile=*/false,
@@ -7037,7 +7773,7 @@ void CodeGenFunction::EmitOMPMasterTaskLoopDirective(
};
auto LPCRegion =
CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
- OMPLexicalScope Scope(*this, S, llvm::None, /*EmitPreInitStmt=*/false);
+ OMPLexicalScope Scope(*this, S, std::nullopt, /*EmitPreInitStmt=*/false);
CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getBeginLoc());
}
@@ -7111,6 +7847,170 @@ void CodeGenFunction::EmitOMPTargetUpdateDirective(
CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device);
}
+void CodeGenFunction::EmitOMPGenericLoopDirective(
+ const OMPGenericLoopDirective &S) {
+ // Unimplemented, just inline the underlying statement for now.
+ auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
+ // Emit the loop iteration variable.
+ const Stmt *CS =
+ cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt();
+ const auto *ForS = dyn_cast<ForStmt>(CS);
+ if (ForS && !isa<DeclStmt>(ForS->getInit())) {
+ OMPPrivateScope LoopScope(CGF);
+ CGF.EmitOMPPrivateLoopCounters(S, LoopScope);
+ (void)LoopScope.Privatize();
+ CGF.EmitStmt(CS);
+ LoopScope.restoreMap();
+ } else {
+ CGF.EmitStmt(CS);
+ }
+ };
+ OMPLexicalScope Scope(*this, S, OMPD_unknown);
+ CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_loop, CodeGen);
+}
+
+void CodeGenFunction::EmitOMPParallelGenericLoopDirective(
+ const OMPLoopDirective &S) {
+ // Emit combined directive as if its consituent constructs are 'parallel'
+ // and 'for'.
+ auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
+ Action.Enter(CGF);
+ emitOMPCopyinClause(CGF, S);
+ (void)emitWorksharingDirective(CGF, S, /*HasCancel=*/false);
+ };
+ {
+ auto LPCRegion =
+ CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
+ emitCommonOMPParallelDirective(*this, S, OMPD_for, CodeGen,
+ emitEmptyBoundParameters);
+ }
+ // Check for outer lastprivate conditional update.
+ checkForLastprivateConditionalUpdate(*this, S);
+}
+
+void CodeGenFunction::EmitOMPTeamsGenericLoopDirective(
+ const OMPTeamsGenericLoopDirective &S) {
+ // To be consistent with current behavior of 'target teams loop', emit
+ // 'teams loop' as if its constituent constructs are 'distribute,
+ // 'parallel, and 'for'.
+ auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
+ CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined,
+ S.getDistInc());
+ };
+
+ // Emit teams region as a standalone region.
+ auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
+ PrePostActionTy &Action) {
+ Action.Enter(CGF);
+ OMPPrivateScope PrivateScope(CGF);
+ CGF.EmitOMPReductionClauseInit(S, PrivateScope);
+ (void)PrivateScope.Privatize();
+ CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute,
+ CodeGenDistribute);
+ CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
+ };
+ emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_parallel_for, CodeGen);
+ emitPostUpdateForReductionClause(*this, S,
+ [](CodeGenFunction &) { return nullptr; });
+}
+
+static void
+emitTargetTeamsGenericLoopRegion(CodeGenFunction &CGF,
+ const OMPTargetTeamsGenericLoopDirective &S,
+ PrePostActionTy &Action) {
+ Action.Enter(CGF);
+ // Emit 'teams loop' as if its constituent constructs are 'distribute,
+ // 'parallel, and 'for'.
+ auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
+ CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined,
+ S.getDistInc());
+ };
+
+ // Emit teams region as a standalone region.
+ auto &&CodeGenTeams = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
+ PrePostActionTy &Action) {
+ Action.Enter(CGF);
+ CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
+ CGF.EmitOMPReductionClauseInit(S, PrivateScope);
+ (void)PrivateScope.Privatize();
+ CGF.CGM.getOpenMPRuntime().emitInlinedDirective(
+ CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false);
+ CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
+ };
+
+ emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_parallel_for,
+ CodeGenTeams);
+ emitPostUpdateForReductionClause(CGF, S,
+ [](CodeGenFunction &) { return nullptr; });
+}
+
+/// Emit combined directive 'target teams loop' as if its constituent
+/// constructs are 'target', 'teams', 'distribute', 'parallel', and 'for'.
+void CodeGenFunction::EmitOMPTargetTeamsGenericLoopDirective(
+ const OMPTargetTeamsGenericLoopDirective &S) {
+ auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
+ emitTargetTeamsGenericLoopRegion(CGF, S, Action);
+ };
+ emitCommonOMPTargetDirective(*this, S, CodeGen);
+}
+
+void CodeGenFunction::EmitOMPTargetTeamsGenericLoopDeviceFunction(
+ CodeGenModule &CGM, StringRef ParentName,
+ const OMPTargetTeamsGenericLoopDirective &S) {
+ // Emit SPMD target parallel loop region as a standalone region.
+ auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
+ emitTargetTeamsGenericLoopRegion(CGF, S, Action);
+ };
+ llvm::Function *Fn;
+ llvm::Constant *Addr;
+ // Emit target region as a standalone region.
+ CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
+ S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
+ assert(Fn && Addr &&
+ "Target device function emission failed for 'target teams loop'.");
+}
+
+static void emitTargetParallelGenericLoopRegion(
+ CodeGenFunction &CGF, const OMPTargetParallelGenericLoopDirective &S,
+ PrePostActionTy &Action) {
+ Action.Enter(CGF);
+ // Emit as 'parallel for'.
+ auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
+ Action.Enter(CGF);
+ CodeGenFunction::OMPCancelStackRAII CancelRegion(
+ CGF, OMPD_target_parallel_loop, /*hasCancel=*/false);
+ CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds,
+ emitDispatchForLoopBounds);
+ };
+ emitCommonOMPParallelDirective(CGF, S, OMPD_for, CodeGen,
+ emitEmptyBoundParameters);
+}
+
+void CodeGenFunction::EmitOMPTargetParallelGenericLoopDeviceFunction(
+ CodeGenModule &CGM, StringRef ParentName,
+ const OMPTargetParallelGenericLoopDirective &S) {
+ // Emit target parallel loop region as a standalone region.
+ auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
+ emitTargetParallelGenericLoopRegion(CGF, S, Action);
+ };
+ llvm::Function *Fn;
+ llvm::Constant *Addr;
+ // Emit target region as a standalone region.
+ CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
+ S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
+ assert(Fn && Addr && "Target device function emission failed.");
+}
+
+/// Emit combined directive 'target parallel loop' as if its constituent
+/// constructs are 'target', 'parallel', and 'for'.
+void CodeGenFunction::EmitOMPTargetParallelGenericLoopDirective(
+ const OMPTargetParallelGenericLoopDirective &S) {
+ auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
+ emitTargetParallelGenericLoopRegion(CGF, S, Action);
+ };
+ emitCommonOMPTargetDirective(*this, S, CodeGen);
+}
+
void CodeGenFunction::EmitSimpleOMPExecutableDirective(
const OMPExecutableDirective &D) {
if (const auto *SD = dyn_cast<OMPScanDirective>(&D)) {
@@ -7133,8 +8033,7 @@ void CodeGenFunction::EmitSimpleOMPExecutableDirective(
continue;
if (!CGF.LocalDeclMap.count(VD)) {
LValue GlobLVal = CGF.EmitLValue(Ref);
- GlobalsScope.addPrivate(
- VD, [&GlobLVal, &CGF]() { return GlobLVal.getAddress(CGF); });
+ GlobalsScope.addPrivate(VD, GlobLVal.getAddress(CGF));
}
}
}
@@ -7149,8 +8048,7 @@ void CodeGenFunction::EmitSimpleOMPExecutableDirective(
const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
if (!VD->hasLocalStorage() && !CGF.LocalDeclMap.count(VD)) {
LValue GlobLVal = CGF.EmitLValue(E);
- GlobalsScope.addPrivate(
- VD, [&GlobLVal, &CGF]() { return GlobLVal.getAddress(CGF); });
+ GlobalsScope.addPrivate(VD, GlobLVal.getAddress(CGF));
}
if (isa<OMPCapturedExprDecl>(VD)) {
// Emit only those that were not explicitly referenced in clauses.
@@ -7181,7 +8079,8 @@ void CodeGenFunction::EmitSimpleOMPExecutableDirective(
D.getDirectiveKind() == OMPD_critical ||
D.getDirectiveKind() == OMPD_section ||
D.getDirectiveKind() == OMPD_master ||
- D.getDirectiveKind() == OMPD_masked) {
+ D.getDirectiveKind() == OMPD_masked ||
+ D.getDirectiveKind() == OMPD_unroll) {
EmitStmt(D.getAssociatedStmt());
} else {
auto LPCRegion =
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGVTT.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGVTT.cpp
index 564d9f354e64..1d3f14f1c534 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGVTT.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGVTT.cpp
@@ -42,8 +42,8 @@ CodeGenVTables::EmitVTTDefinition(llvm::GlobalVariable *VTT,
llvm::GlobalVariable::LinkageTypes Linkage,
const CXXRecordDecl *RD) {
VTTBuilder Builder(CGM.getContext(), RD, /*GenerateDefinition=*/true);
- llvm::ArrayType *ArrayType =
- llvm::ArrayType::get(CGM.Int8PtrTy, Builder.getVTTComponents().size());
+ llvm::ArrayType *ArrayType = llvm::ArrayType::get(
+ CGM.GlobalsInt8PtrTy, Builder.getVTTComponents().size());
SmallVector<llvm::GlobalVariable *, 8> VTables;
SmallVector<VTableAddressPointsMapTy, 8> VTableAddressPoints;
@@ -81,9 +81,6 @@ CodeGenVTables::EmitVTTDefinition(llvm::GlobalVariable *VTT,
VTable->getValueType(), VTable, Idxs, /*InBounds=*/true,
/*InRangeIndex=*/1);
- Init = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(Init,
- CGM.Int8PtrTy);
-
VTTComponents.push_back(Init);
}
@@ -97,7 +94,9 @@ CodeGenVTables::EmitVTTDefinition(llvm::GlobalVariable *VTT,
if (CGM.supportsCOMDAT() && VTT->isWeakForLinker())
VTT->setComdat(CGM.getModule().getOrInsertComdat(VTT->getName()));
- // Set the right visibility.
+ // Set the visibility. This will already have been set on the VTT declaration.
+ // Set it again, now that we have a definition, as the implicit visibility can
+ // apply differently to definitions.
CGM.setGVProperties(VTT, RD);
}
@@ -115,13 +114,14 @@ llvm::GlobalVariable *CodeGenVTables::GetAddrOfVTT(const CXXRecordDecl *RD) {
VTTBuilder Builder(CGM.getContext(), RD, /*GenerateDefinition=*/false);
- llvm::ArrayType *ArrayType =
- llvm::ArrayType::get(CGM.Int8PtrTy, Builder.getVTTComponents().size());
- unsigned Align = CGM.getDataLayout().getABITypeAlignment(CGM.Int8PtrTy);
+ llvm::ArrayType *ArrayType = llvm::ArrayType::get(
+ CGM.GlobalsInt8PtrTy, Builder.getVTTComponents().size());
+ llvm::Align Align = CGM.getDataLayout().getABITypeAlign(CGM.GlobalsInt8PtrTy);
llvm::GlobalVariable *GV = CGM.CreateOrReplaceCXXRuntimeVariable(
Name, ArrayType, llvm::GlobalValue::ExternalLinkage, Align);
GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
+ CGM.setGVProperties(GV, RD);
return GV;
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGVTables.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGVTables.cpp
index 9eb650814238..8dee3f74b44b 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGVTables.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGVTables.cpp
@@ -24,6 +24,7 @@
#include "llvm/Transforms/Utils/Cloning.h"
#include <algorithm>
#include <cstdio>
+#include <utility>
using namespace clang;
using namespace CodeGen;
@@ -90,9 +91,11 @@ static RValue PerformReturnAdjustment(CodeGenFunction &CGF,
auto ClassDecl = ResultType->getPointeeType()->getAsCXXRecordDecl();
auto ClassAlign = CGF.CGM.getClassPointerAlignment(ClassDecl);
- ReturnValue = CGF.CGM.getCXXABI().performReturnAdjustment(CGF,
- Address(ReturnValue, ClassAlign),
- Thunk.Return);
+ ReturnValue = CGF.CGM.getCXXABI().performReturnAdjustment(
+ CGF,
+ Address(ReturnValue, CGF.ConvertTypeForMem(ResultType->getPointeeType()),
+ ClassAlign),
+ Thunk.Return);
if (NullCheckValue) {
CGF.Builder.CreateBr(AdjustEnd);
@@ -126,7 +129,7 @@ static void resolveTopLevelMetadata(llvm::Function *Fn,
// Find all llvm.dbg.declare intrinsics and resolve the DILocalVariable nodes
// they are referencing.
- for (auto &BB : Fn->getBasicBlockList()) {
+ for (auto &BB : *Fn) {
for (auto &I : BB) {
if (auto *DII = dyn_cast<llvm::DbgVariableIntrinsic>(&I)) {
auto *DILocal = DII->getVariable();
@@ -198,10 +201,12 @@ CodeGenFunction::GenerateVarArgsThunk(llvm::Function *Fn,
// Find the first store of "this", which will be to the alloca associated
// with "this".
- Address ThisPtr(&*AI, CGM.getClassPointerAlignment(MD->getParent()));
+ Address ThisPtr =
+ Address(&*AI, ConvertTypeForMem(MD->getFunctionObjectParameterType()),
+ CGM.getClassPointerAlignment(MD->getParent()));
llvm::BasicBlock *EntryBB = &Fn->front();
llvm::BasicBlock::iterator ThisStore =
- std::find_if(EntryBB->begin(), EntryBB->end(), [&](llvm::Instruction &I) {
+ llvm::find_if(*EntryBB, [&](llvm::Instruction &I) {
return isa<llvm::StoreInst>(I) &&
I.getOperand(0) == ThisPtr.getPointer();
});
@@ -396,9 +401,7 @@ void CodeGenFunction::EmitMustTailThunk(GlobalDecl GD,
// to translate AST arguments into LLVM IR arguments. For thunks, we know
// that the caller prototype more or less matches the callee prototype with
// the exception of 'this'.
- SmallVector<llvm::Value *, 8> Args;
- for (llvm::Argument &A : CurFn->args())
- Args.push_back(&A);
+ SmallVector<llvm::Value *, 8> Args(llvm::make_pointer_range(CurFn->args()));
// Set the adjusted 'this' pointer.
const ABIArgInfo &ThisAI = CurFnInfo->arg_begin()->info;
@@ -462,10 +465,6 @@ void CodeGenFunction::generateThunk(llvm::Function *Fn,
llvm::Constant *Callee = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true);
- // Fix up the function type for an unprototyped musttail call.
- if (IsUnprototyped)
- Callee = llvm::ConstantExpr::getBitCast(Callee, Fn->getType());
-
// Make the call and return the result.
EmitCallAndReturnForThunk(llvm::FunctionCallee(Fn->getFunctionType(), Callee),
&Thunk, IsUnprototyped);
@@ -534,11 +533,8 @@ llvm::Constant *CodeGenVTables::maybeEmitThunk(GlobalDecl GD,
Name.str(), &CGM.getModule());
CGM.SetLLVMFunctionAttributes(MD, FnInfo, ThunkFn, /*IsThunk=*/false);
- // If needed, replace the old thunk with a bitcast.
if (!OldThunkFn->use_empty()) {
- llvm::Constant *NewPtrForOldDecl =
- llvm::ConstantExpr::getBitCast(ThunkFn, OldThunkFn->getType());
- OldThunkFn->replaceAllUsesWith(NewPtrForOldDecl);
+ OldThunkFn->replaceAllUsesWith(ThunkFn);
}
// Remove the old thunk.
@@ -637,8 +633,16 @@ void CodeGenVTables::addRelativeComponent(ConstantArrayBuilder &builder,
// want the stub/proxy to be emitted for properly calculating the offset.
// Examples where there would be no symbol emitted are available_externally
// and private linkages.
- auto stubLinkage = vtableHasLocalLinkage ? llvm::GlobalValue::InternalLinkage
- : llvm::GlobalValue::ExternalLinkage;
+ //
+ // `internal` linkage results in STB_LOCAL Elf binding while still manifesting a
+ // local symbol.
+ //
+ // `linkonce_odr` linkage results in a STB_DEFAULT Elf binding but also allows for
+ // the rtti_proxy to be transparently replaced with a GOTPCREL reloc by a
+ // target that supports this replacement.
+ auto stubLinkage = vtableHasLocalLinkage
+ ? llvm::GlobalValue::InternalLinkage
+ : llvm::GlobalValue::LinkOnceODRLinkage;
llvm::Constant *target;
if (auto *func = dyn_cast<llvm::Function>(globalVal)) {
@@ -662,6 +666,12 @@ void CodeGenVTables::addRelativeComponent(ConstantArrayBuilder &builder,
proxy->setVisibility(llvm::GlobalValue::HiddenVisibility);
proxy->setComdat(module.getOrInsertComdat(rttiProxyName));
}
+ // Do not instrument the rtti proxies with hwasan to avoid a duplicate
+ // symbol error. Aliases generated by hwasan will retain the same namebut
+ // the addresses they are set to may have different tags from different
+ // compilation units. We don't run into this without hwasan because the
+ // proxies are in comdat groups, but those aren't propagated to the alias.
+ RemoveHwasanMetadata(proxy);
}
target = proxy;
}
@@ -670,15 +680,23 @@ void CodeGenVTables::addRelativeComponent(ConstantArrayBuilder &builder,
/*position=*/vtableAddressPoint);
}
-bool CodeGenVTables::useRelativeLayout() const {
+static bool UseRelativeLayout(const CodeGenModule &CGM) {
return CGM.getTarget().getCXXABI().isItaniumFamily() &&
CGM.getItaniumVTableContext().isRelativeLayout();
}
+bool CodeGenVTables::useRelativeLayout() const {
+ return UseRelativeLayout(CGM);
+}
+
+llvm::Type *CodeGenModule::getVTableComponentType() const {
+ if (UseRelativeLayout(*this))
+ return Int32Ty;
+ return GlobalsInt8PtrTy;
+}
+
llvm::Type *CodeGenVTables::getVTableComponentType() const {
- if (useRelativeLayout())
- return CGM.Int32Ty;
- return CGM.Int8PtrTy;
+ return CGM.getVTableComponentType();
}
static void AddPointerLayoutOffset(const CodeGenModule &CGM,
@@ -686,7 +704,7 @@ static void AddPointerLayoutOffset(const CodeGenModule &CGM,
CharUnits offset) {
builder.add(llvm::ConstantExpr::getIntToPtr(
llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity()),
- CGM.Int8PtrTy));
+ CGM.GlobalsInt8PtrTy));
}
static void AddRelativeLayoutOffset(const CodeGenModule &CGM,
@@ -723,7 +741,7 @@ void CodeGenVTables::addVTableComponent(ConstantArrayBuilder &builder,
vtableHasLocalLinkage,
/*isCompleteDtor=*/false);
else
- return builder.add(llvm::ConstantExpr::getBitCast(rtti, CGM.Int8PtrTy));
+ return builder.add(rtti);
case VTableComponent::CK_FunctionPointer:
case VTableComponent::CK_CompleteDtorPointer:
@@ -742,7 +760,8 @@ void CodeGenVTables::addVTableComponent(ConstantArrayBuilder &builder,
? MD->hasAttr<CUDADeviceAttr>()
: (MD->hasAttr<CUDAHostAttr>() || !MD->hasAttr<CUDADeviceAttr>());
if (!CanEmitMethod)
- return builder.add(llvm::ConstantExpr::getNullValue(CGM.Int8PtrTy));
+ return builder.add(
+ llvm::ConstantExpr::getNullValue(CGM.GlobalsInt8PtrTy));
// Method is acceptable, continue processing as usual.
}
@@ -755,26 +774,26 @@ void CodeGenVTables::addVTableComponent(ConstantArrayBuilder &builder,
// with the local symbol. As a temporary solution, fill these components
// with zero. We shouldn't be calling these in the first place anyway.
if (useRelativeLayout())
- return llvm::ConstantPointerNull::get(CGM.Int8PtrTy);
+ return llvm::ConstantPointerNull::get(CGM.GlobalsInt8PtrTy);
// For NVPTX devices in OpenMP emit special functon as null pointers,
// otherwise linking ends up with unresolved references.
- if (CGM.getLangOpts().OpenMP && CGM.getLangOpts().OpenMPIsDevice &&
+ if (CGM.getLangOpts().OpenMP && CGM.getLangOpts().OpenMPIsTargetDevice &&
CGM.getTriple().isNVPTX())
- return llvm::ConstantPointerNull::get(CGM.Int8PtrTy);
+ return llvm::ConstantPointerNull::get(CGM.GlobalsInt8PtrTy);
llvm::FunctionType *fnTy =
llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
llvm::Constant *fn = cast<llvm::Constant>(
CGM.CreateRuntimeFunction(fnTy, name).getCallee());
if (auto f = dyn_cast<llvm::Function>(fn))
f->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
- return llvm::ConstantExpr::getBitCast(fn, CGM.Int8PtrTy);
+ return fn;
};
llvm::Constant *fnPtr;
// Pure virtual member functions.
- if (cast<CXXMethodDecl>(GD.getDecl())->isPure()) {
+ if (cast<CXXMethodDecl>(GD.getDecl())->isPureVirtual()) {
if (!PureVirtualFn)
PureVirtualFn =
getSpecialVirtualFn(CGM.getCXXABI().GetPureVirtualCallName());
@@ -806,15 +825,26 @@ void CodeGenVTables::addVTableComponent(ConstantArrayBuilder &builder,
return addRelativeComponent(
builder, fnPtr, vtableAddressPoint, vtableHasLocalLinkage,
component.getKind() == VTableComponent::CK_CompleteDtorPointer);
- } else
- return builder.add(llvm::ConstantExpr::getBitCast(fnPtr, CGM.Int8PtrTy));
+ } else {
+ // TODO: this icky and only exists due to functions being in the generic
+ // address space, rather than the global one, even though they are
+ // globals; fixing said issue might be intrusive, and will be done
+ // later.
+ unsigned FnAS = fnPtr->getType()->getPointerAddressSpace();
+ unsigned GVAS = CGM.GlobalsInt8PtrTy->getPointerAddressSpace();
+
+ if (FnAS != GVAS)
+ fnPtr =
+ llvm::ConstantExpr::getAddrSpaceCast(fnPtr, CGM.GlobalsInt8PtrTy);
+ return builder.add(fnPtr);
+ }
}
case VTableComponent::CK_UnusedFunctionPointer:
if (useRelativeLayout())
return builder.add(llvm::ConstantExpr::getNullValue(CGM.Int32Ty));
else
- return builder.addNullPointer(CGM.Int8PtrTy);
+ return builder.addNullPointer(CGM.GlobalsInt8PtrTy);
}
llvm_unreachable("Unexpected vtable component kind");
@@ -893,7 +923,7 @@ llvm::GlobalVariable *CodeGenVTables::GenerateConstructionVTable(
if (Linkage == llvm::GlobalVariable::AvailableExternallyLinkage)
Linkage = llvm::GlobalVariable::InternalLinkage;
- unsigned Align = CGM.getDataLayout().getABITypeAlignment(VTType);
+ llvm::Align Align = CGM.getDataLayout().getABITypeAlign(VTType);
// Create the variable that will hold the construction vtable.
llvm::GlobalVariable *VTable =
@@ -919,12 +949,33 @@ llvm::GlobalVariable *CodeGenVTables::GenerateConstructionVTable(
CGM.EmitVTableTypeMetadata(RD, VTable, *VTLayout.get());
- if (UsingRelativeLayout && !VTable->isDSOLocal())
- GenerateRelativeVTableAlias(VTable, OutName);
+ if (UsingRelativeLayout) {
+ RemoveHwasanMetadata(VTable);
+ if (!VTable->isDSOLocal())
+ GenerateRelativeVTableAlias(VTable, OutName);
+ }
return VTable;
}
+// Ensure this vtable is not instrumented by hwasan. That is, a global alias is
+// not generated for it. This is mainly used by the relative-vtables ABI where
+// vtables instead contain 32-bit offsets between the vtable and function
+// pointers. Hwasan is disabled for these vtables for now because the tag in a
+// vtable pointer may fail the overflow check when resolving 32-bit PLT
+// relocations. A future alternative for this would be finding which usages of
+// the vtable can continue to use the untagged hwasan value without any loss of
+// value in hwasan.
+void CodeGenVTables::RemoveHwasanMetadata(llvm::GlobalValue *GV) const {
+ if (CGM.getLangOpts().Sanitize.has(SanitizerKind::HWAddress)) {
+ llvm::GlobalValue::SanitizerMetadata Meta;
+ if (GV->hasSanitizerMetadata())
+ Meta = GV->getSanitizerMetadata();
+ Meta.NoHWAddress = true;
+ GV->setSanitizerMetadata(Meta);
+ }
+}
+
// If the VTable is not dso_local, then we will not be able to indicate that
// the VTable does not need a relocation and move into rodata. A frequent
// time this can occur is for classes that should be made public from a DSO
@@ -1008,19 +1059,20 @@ CodeGenModule::getVTableLinkage(const CXXRecordDecl *RD) {
switch (keyFunction->getTemplateSpecializationKind()) {
case TSK_Undeclared:
case TSK_ExplicitSpecialization:
- assert((def || CodeGenOpts.OptimizationLevel > 0 ||
- CodeGenOpts.getDebugInfo() != codegenoptions::NoDebugInfo) &&
- "Shouldn't query vtable linkage without key function, "
- "optimizations, or debug info");
- if (!def && CodeGenOpts.OptimizationLevel > 0)
- return llvm::GlobalVariable::AvailableExternallyLinkage;
+ assert(
+ (def || CodeGenOpts.OptimizationLevel > 0 ||
+ CodeGenOpts.getDebugInfo() != llvm::codegenoptions::NoDebugInfo) &&
+ "Shouldn't query vtable linkage without key function, "
+ "optimizations, or debug info");
+ if (!def && CodeGenOpts.OptimizationLevel > 0)
+ return llvm::GlobalVariable::AvailableExternallyLinkage;
- if (keyFunction->isInlined())
- return !Context.getLangOpts().AppleKext ?
- llvm::GlobalVariable::LinkOnceODRLinkage :
- llvm::Function::InternalLinkage;
+ if (keyFunction->isInlined())
+ return !Context.getLangOpts().AppleKext
+ ? llvm::GlobalVariable::LinkOnceODRLinkage
+ : llvm::Function::InternalLinkage;
- return llvm::GlobalVariable::ExternalLinkage;
+ return llvm::GlobalVariable::ExternalLinkage;
case TSK_ImplicitInstantiation:
return !Context.getLangOpts().AppleKext ?
@@ -1134,9 +1186,16 @@ bool CodeGenVTables::isVTableExternal(const CXXRecordDecl *RD) {
if (!keyFunction)
return false;
+ const FunctionDecl *Def;
// Otherwise, if we don't have a definition of the key function, the
// vtable must be defined somewhere else.
- return !keyFunction->hasBody();
+ if (!keyFunction->hasBody(Def))
+ return true;
+
+ assert(Def && "The body of the key function is not assigned to Def?");
+ // If the non-inline key function comes from another module unit, the vtable
+ // must be defined there.
+ return Def->isInAnotherModuleUnit() && !Def->isInlineSpecified();
}
/// Given that we're currently at the end of the translation unit, and
@@ -1173,12 +1232,16 @@ void CodeGenModule::EmitDeferredVTables() {
DeferredVTables.clear();
}
-bool CodeGenModule::HasLTOVisibilityPublicStd(const CXXRecordDecl *RD) {
+bool CodeGenModule::AlwaysHasLTOVisibilityPublic(const CXXRecordDecl *RD) {
+ if (RD->hasAttr<LTOVisibilityPublicAttr>() || RD->hasAttr<UuidAttr>() ||
+ RD->hasAttr<DLLExportAttr>() || RD->hasAttr<DLLImportAttr>())
+ return true;
+
if (!getCodeGenOpts().LTOVisibilityPublicStd)
return false;
const DeclContext *DC = RD;
- while (1) {
+ while (true) {
auto *D = cast<Decl>(DC);
DC = DC->getParent();
if (isa<TranslationUnitDecl>(DC->getRedeclContext())) {
@@ -1198,18 +1261,11 @@ bool CodeGenModule::HasHiddenLTOVisibility(const CXXRecordDecl *RD) {
if (!isExternallyVisible(LV.getLinkage()))
return true;
- if (RD->hasAttr<LTOVisibilityPublicAttr>() || RD->hasAttr<UuidAttr>())
+ if (!getTriple().isOSBinFormatCOFF() &&
+ LV.getVisibility() != HiddenVisibility)
return false;
- if (getTriple().isOSBinFormatCOFF()) {
- if (RD->hasAttr<DLLExportAttr>() || RD->hasAttr<DLLImportAttr>())
- return false;
- } else {
- if (LV.getVisibility() != HiddenVisibility)
- return false;
- }
-
- return !HasLTOVisibilityPublicStd(RD);
+ return !AlwaysHasLTOVisibilityPublic(RD);
}
llvm::GlobalObject::VCallVisibility CodeGenModule::GetVCallVisibilityLevel(
@@ -1231,13 +1287,13 @@ llvm::GlobalObject::VCallVisibility CodeGenModule::GetVCallVisibilityLevel(
else
TypeVis = llvm::GlobalObject::VCallVisibilityPublic;
- for (auto B : RD->bases())
+ for (const auto &B : RD->bases())
if (B.getType()->getAsCXXRecordDecl()->isDynamicClass())
TypeVis = std::min(
TypeVis,
GetVCallVisibilityLevel(B.getType()->getAsCXXRecordDecl(), Visited));
- for (auto B : RD->vbases())
+ for (const auto &B : RD->vbases())
if (B.getType()->getAsCXXRecordDecl()->isDynamicClass())
TypeVis = std::min(
TypeVis,
@@ -1249,49 +1305,42 @@ llvm::GlobalObject::VCallVisibility CodeGenModule::GetVCallVisibilityLevel(
void CodeGenModule::EmitVTableTypeMetadata(const CXXRecordDecl *RD,
llvm::GlobalVariable *VTable,
const VTableLayout &VTLayout) {
- if (!getCodeGenOpts().LTOUnit)
+ // Emit type metadata on vtables with LTO or IR instrumentation.
+ // In IR instrumentation, the type metadata is used to find out vtable
+ // definitions (for type profiling) among all global variables.
+ if (!getCodeGenOpts().LTOUnit && !getCodeGenOpts().hasProfileIRInstr())
return;
- CharUnits PointerWidth =
- Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
+ CharUnits ComponentWidth = GetTargetTypeStoreSize(getVTableComponentType());
- typedef std::pair<const CXXRecordDecl *, unsigned> AddressPoint;
+ struct AddressPoint {
+ const CXXRecordDecl *Base;
+ size_t Offset;
+ std::string TypeName;
+ bool operator<(const AddressPoint &RHS) const {
+ int D = TypeName.compare(RHS.TypeName);
+ return D < 0 || (D == 0 && Offset < RHS.Offset);
+ }
+ };
std::vector<AddressPoint> AddressPoints;
- for (auto &&AP : VTLayout.getAddressPoints())
- AddressPoints.push_back(std::make_pair(
- AP.first.getBase(), VTLayout.getVTableOffset(AP.second.VTableIndex) +
- AP.second.AddressPointIndex));
+ for (auto &&AP : VTLayout.getAddressPoints()) {
+ AddressPoint N{AP.first.getBase(),
+ VTLayout.getVTableOffset(AP.second.VTableIndex) +
+ AP.second.AddressPointIndex,
+ {}};
+ llvm::raw_string_ostream Stream(N.TypeName);
+ getCXXABI().getMangleContext().mangleCanonicalTypeName(
+ QualType(N.Base->getTypeForDecl(), 0), Stream);
+ AddressPoints.push_back(std::move(N));
+ }
// Sort the address points for determinism.
- llvm::sort(AddressPoints, [this](const AddressPoint &AP1,
- const AddressPoint &AP2) {
- if (&AP1 == &AP2)
- return false;
-
- std::string S1;
- llvm::raw_string_ostream O1(S1);
- getCXXABI().getMangleContext().mangleTypeName(
- QualType(AP1.first->getTypeForDecl(), 0), O1);
- O1.flush();
-
- std::string S2;
- llvm::raw_string_ostream O2(S2);
- getCXXABI().getMangleContext().mangleTypeName(
- QualType(AP2.first->getTypeForDecl(), 0), O2);
- O2.flush();
-
- if (S1 < S2)
- return true;
- if (S1 != S2)
- return false;
-
- return AP1.second < AP2.second;
- });
+ llvm::sort(AddressPoints);
ArrayRef<VTableComponent> Comps = VTLayout.vtable_components();
for (auto AP : AddressPoints) {
// Create type metadata for the address point.
- AddVTableTypeMetadata(VTable, PointerWidth * AP.second, AP.first);
+ AddVTableTypeMetadata(VTable, ComponentWidth * AP.Offset, AP.Base);
// The class associated with each address point could also potentially be
// used for indirect calls via a member function pointer, so we need to
@@ -1303,8 +1352,8 @@ void CodeGenModule::EmitVTableTypeMetadata(const CXXRecordDecl *RD,
llvm::Metadata *MD = CreateMetadataIdentifierForVirtualMemPtrType(
Context.getMemberPointerType(
Comps[I].getFunctionDecl()->getType(),
- Context.getRecordType(AP.first).getTypePtr()));
- VTable->addTypeMetadata((PointerWidth * I).getQuantity(), MD);
+ Context.getRecordType(AP.Base).getTypePtr()));
+ VTable->addTypeMetadata((ComponentWidth * I).getQuantity(), MD);
}
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGVTables.h b/contrib/llvm-project/clang/lib/CodeGen/CGVTables.h
index bdfc075ee305..9d4223547050 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGVTables.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGVTables.h
@@ -75,16 +75,6 @@ class CodeGenVTables {
bool vtableHasLocalLinkage,
bool isCompleteDtor) const;
- /// Create a dso_local stub that will be used for a relative reference in the
- /// relative vtable layout. This stub will just be a tail call to the original
- /// function and propagate any function attributes from the original. If the
- /// original function is already dso_local, the original is returned instead
- /// and a stub is not created.
- llvm::Function *
- getOrCreateRelativeStub(llvm::Function *func,
- llvm::GlobalValue::LinkageTypes stubLinkage,
- bool isCompleteDtor) const;
-
bool useRelativeLayout() const;
llvm::Type *getVTableComponentType() const;
@@ -102,6 +92,10 @@ public:
return *cast<ItaniumVTableContext>(VTContext);
}
+ const ItaniumVTableContext &getItaniumVTableContext() const {
+ return *cast<ItaniumVTableContext>(VTContext);
+ }
+
MicrosoftVTableContext &getMicrosoftVTableContext() {
return *cast<MicrosoftVTableContext>(VTContext);
}
@@ -154,6 +148,9 @@ public:
/// when a vtable may not be dso_local.
void GenerateRelativeVTableAlias(llvm::GlobalVariable *VTable,
llvm::StringRef AliasNameRef);
+
+ /// Specify a global should not be instrumented with hwasan.
+ void RemoveHwasanMetadata(llvm::GlobalValue *GV) const;
};
} // end namespace CodeGen
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGValue.h b/contrib/llvm-project/clang/lib/CodeGen/CGValue.h
index 4b39a0520833..1e6f67250583 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGValue.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGValue.h
@@ -47,6 +47,8 @@ class RValue {
llvm::PointerIntPair<llvm::Value *, 2, Flavor> V1;
// Stores second value and volatility.
llvm::PointerIntPair<llvm::Value *, 1, bool> V2;
+ // Stores element type for aggregate values.
+ llvm::Type *ElementType;
public:
bool isScalar() const { return V1.getInt() == Scalar; }
@@ -71,7 +73,8 @@ public:
Address getAggregateAddress() const {
assert(isAggregate() && "Not an aggregate!");
auto align = reinterpret_cast<uintptr_t>(V2.getPointer()) >> AggAlignShift;
- return Address(V1.getPointer(), CharUnits::fromQuantity(align));
+ return Address(
+ V1.getPointer(), ElementType, CharUnits::fromQuantity(align));
}
llvm::Value *getAggregatePointer() const {
assert(isAggregate() && "Not an aggregate!");
@@ -108,6 +111,7 @@ public:
RValue ER;
ER.V1.setPointer(addr.getPointer());
ER.V1.setInt(Aggregate);
+ ER.ElementType = addr.getElementType();
auto align = static_cast<uintptr_t>(addr.getAlignment().getQuantity());
ER.V2.setPointer(reinterpret_cast<llvm::Value*>(align << AggAlignShift));
@@ -175,6 +179,7 @@ class LValue {
} LVType;
llvm::Value *V;
+ llvm::Type *ElementType;
union {
// Index into a vector subscript: V[i]
@@ -220,6 +225,9 @@ class LValue {
// this lvalue.
bool Nontemporal : 1;
+ // The pointer is known not to be null.
+ bool IsKnownNonNull : 1;
+
LValueBaseInfo BaseInfo;
TBAAAccessInfo TBAAInfo;
@@ -230,6 +238,11 @@ private:
LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo) {
assert((!Alignment.isZero() || Type->isIncompleteType()) &&
"initializing l-value with zero alignment!");
+ if (isGlobalReg())
+ assert(ElementType == nullptr && "Global reg does not store elem type");
+ else
+ assert(ElementType != nullptr && "Must have elem type");
+
this->Type = Type;
this->Quals = Quals;
const unsigned MaxAlign = 1U << 31;
@@ -321,23 +334,35 @@ public:
LValueBaseInfo getBaseInfo() const { return BaseInfo; }
void setBaseInfo(LValueBaseInfo Info) { BaseInfo = Info; }
+ KnownNonNull_t isKnownNonNull() const {
+ return (KnownNonNull_t)IsKnownNonNull;
+ }
+ LValue setKnownNonNull() {
+ IsKnownNonNull = true;
+ return *this;
+ }
+
// simple lvalue
llvm::Value *getPointer(CodeGenFunction &CGF) const {
assert(isSimple());
return V;
}
Address getAddress(CodeGenFunction &CGF) const {
- return Address(getPointer(CGF), getAlignment());
+ return Address(getPointer(CGF), ElementType, getAlignment(),
+ isKnownNonNull());
}
void setAddress(Address address) {
assert(isSimple());
V = address.getPointer();
+ ElementType = address.getElementType();
Alignment = address.getAlignment().getQuantity();
+ IsKnownNonNull = address.isKnownNonNull();
}
// vector elt lvalue
Address getVectorAddress() const {
- return Address(getVectorPointer(), getAlignment());
+ return Address(getVectorPointer(), ElementType, getAlignment(),
+ (KnownNonNull_t)isKnownNonNull());
}
llvm::Value *getVectorPointer() const {
assert(isVectorElt());
@@ -349,7 +374,8 @@ public:
}
Address getMatrixAddress() const {
- return Address(getMatrixPointer(), getAlignment());
+ return Address(getMatrixPointer(), ElementType, getAlignment(),
+ (KnownNonNull_t)isKnownNonNull());
}
llvm::Value *getMatrixPointer() const {
assert(isMatrixElt());
@@ -362,7 +388,8 @@ public:
// extended vector elements.
Address getExtVectorAddress() const {
- return Address(getExtVectorPointer(), getAlignment());
+ return Address(getExtVectorPointer(), ElementType, getAlignment(),
+ (KnownNonNull_t)isKnownNonNull());
}
llvm::Value *getExtVectorPointer() const {
assert(isExtVectorElt());
@@ -375,7 +402,8 @@ public:
// bitfield lvalue
Address getBitFieldAddress() const {
- return Address(getBitFieldPointer(), getAlignment());
+ return Address(getBitFieldPointer(), ElementType, getAlignment(),
+ (KnownNonNull_t)isKnownNonNull());
}
llvm::Value *getBitFieldPointer() const { assert(isBitField()); return V; }
const CGBitFieldInfo &getBitFieldInfo() const {
@@ -395,6 +423,8 @@ public:
R.LVType = Simple;
assert(address.getPointer()->getType()->isPointerTy());
R.V = address.getPointer();
+ R.ElementType = address.getElementType();
+ R.IsKnownNonNull = address.isKnownNonNull();
R.Initialize(type, qs, address.getAlignment(), BaseInfo, TBAAInfo);
return R;
}
@@ -405,7 +435,9 @@ public:
LValue R;
R.LVType = VectorElt;
R.V = vecAddress.getPointer();
+ R.ElementType = vecAddress.getElementType();
R.VectorIdx = Idx;
+ R.IsKnownNonNull = vecAddress.isKnownNonNull();
R.Initialize(type, type.getQualifiers(), vecAddress.getAlignment(),
BaseInfo, TBAAInfo);
return R;
@@ -417,7 +449,9 @@ public:
LValue R;
R.LVType = ExtVectorElt;
R.V = vecAddress.getPointer();
+ R.ElementType = vecAddress.getElementType();
R.VectorElts = Elts;
+ R.IsKnownNonNull = vecAddress.isKnownNonNull();
R.Initialize(type, type.getQualifiers(), vecAddress.getAlignment(),
BaseInfo, TBAAInfo);
return R;
@@ -435,17 +469,22 @@ public:
LValue R;
R.LVType = BitField;
R.V = Addr.getPointer();
+ R.ElementType = Addr.getElementType();
R.BitFieldInfo = &Info;
+ R.IsKnownNonNull = Addr.isKnownNonNull();
R.Initialize(type, type.getQualifiers(), Addr.getAlignment(), BaseInfo,
TBAAInfo);
return R;
}
- static LValue MakeGlobalReg(Address Reg, QualType type) {
+ static LValue MakeGlobalReg(llvm::Value *V, CharUnits alignment,
+ QualType type) {
LValue R;
R.LVType = GlobalReg;
- R.V = Reg.getPointer();
- R.Initialize(type, type.getQualifiers(), Reg.getAlignment(),
+ R.V = V;
+ R.ElementType = nullptr;
+ R.IsKnownNonNull = true;
+ R.Initialize(type, type.getQualifiers(), alignment,
LValueBaseInfo(AlignmentSource::Decl), TBAAAccessInfo());
return R;
}
@@ -456,7 +495,9 @@ public:
LValue R;
R.LVType = MatrixElt;
R.V = matAddress.getPointer();
+ R.ElementType = matAddress.getElementType();
R.VectorIdx = Idx;
+ R.IsKnownNonNull = matAddress.isKnownNonNull();
R.Initialize(type, type.getQualifiers(), matAddress.getAlignment(),
BaseInfo, TBAAInfo);
return R;
@@ -470,13 +511,11 @@ public:
/// An aggregate value slot.
class AggValueSlot {
/// The address.
- llvm::Value *Addr;
+ Address Addr;
// Qualifiers
Qualifiers Quals;
- unsigned Alignment;
-
/// DestructedFlag - This is set to true if some external code is
/// responsible for setting up a destructor for the slot. Otherwise
/// the code which constructs it should push the appropriate cleanup.
@@ -520,6 +559,14 @@ class AggValueSlot {
/// them.
bool SanitizerCheckedFlag : 1;
+ AggValueSlot(Address Addr, Qualifiers Quals, bool DestructedFlag,
+ bool ObjCGCFlag, bool ZeroedFlag, bool AliasedFlag,
+ bool OverlapFlag, bool SanitizerCheckedFlag)
+ : Addr(Addr), Quals(Quals), DestructedFlag(DestructedFlag),
+ ObjCGCFlag(ObjCGCFlag), ZeroedFlag(ZeroedFlag),
+ AliasedFlag(AliasedFlag), OverlapFlag(OverlapFlag),
+ SanitizerCheckedFlag(SanitizerCheckedFlag) {}
+
public:
enum IsAliased_t { IsNotAliased, IsAliased };
enum IsDestructed_t { IsNotDestructed, IsDestructed };
@@ -553,22 +600,10 @@ public:
Overlap_t mayOverlap,
IsZeroed_t isZeroed = IsNotZeroed,
IsSanitizerChecked_t isChecked = IsNotSanitizerChecked) {
- AggValueSlot AV;
- if (addr.isValid()) {
- AV.Addr = addr.getPointer();
- AV.Alignment = addr.getAlignment().getQuantity();
- } else {
- AV.Addr = nullptr;
- AV.Alignment = 0;
- }
- AV.Quals = quals;
- AV.DestructedFlag = isDestructed;
- AV.ObjCGCFlag = needsGC;
- AV.ZeroedFlag = isZeroed;
- AV.AliasedFlag = isAliased;
- AV.OverlapFlag = mayOverlap;
- AV.SanitizerCheckedFlag = isChecked;
- return AV;
+ if (addr.isValid())
+ addr.setKnownNonNull();
+ return AggValueSlot(addr, quals, isDestructed, needsGC, isZeroed, isAliased,
+ mayOverlap, isChecked);
}
static AggValueSlot
@@ -609,19 +644,19 @@ public:
}
llvm::Value *getPointer() const {
- return Addr;
+ return Addr.getPointer();
}
Address getAddress() const {
- return Address(Addr, getAlignment());
+ return Addr;
}
bool isIgnored() const {
- return Addr == nullptr;
+ return !Addr.isValid();
}
CharUnits getAlignment() const {
- return CharUnits::fromQuantity(Alignment);
+ return Addr.getAlignment();
}
IsAliased_t isPotentiallyAliased() const {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenABITypes.cpp b/contrib/llvm-project/clang/lib/CodeGen/CodeGenABITypes.cpp
index d3a16a1d5acc..a6073e1188d6 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenABITypes.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenABITypes.cpp
@@ -65,9 +65,8 @@ CodeGen::arrangeFreeFunctionCall(CodeGenModule &CGM,
ArrayRef<CanQualType> argTypes,
FunctionType::ExtInfo info,
RequiredArgs args) {
- return CGM.getTypes().arrangeLLVMFunctionInfo(
- returnType, /*instanceMethod=*/false, /*chainCall=*/false, argTypes,
- info, {}, args);
+ return CGM.getTypes().arrangeLLVMFunctionInfo(returnType, FnInfoOpts::None,
+ argTypes, info, {}, args);
}
ImplicitCXXConstructorArgs
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenAction.cpp b/contrib/llvm-project/clang/lib/CodeGen/CodeGenAction.cpp
index b30bd11edbad..f8038497d90a 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenAction.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenAction.cpp
@@ -7,6 +7,8 @@
//===----------------------------------------------------------------------===//
#include "clang/CodeGen/CodeGenAction.h"
+#include "BackendConsumer.h"
+#include "CGCall.h"
#include "CodeGenModule.h"
#include "CoverageMappingGen.h"
#include "MacroPPCallbacks.h"
@@ -25,8 +27,10 @@
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Lex/Preprocessor.h"
+#include "llvm/ADT/Hashing.h"
#include "llvm/Bitcode/BitcodeReader.h"
#include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
+#include "llvm/Demangle/Demangle.h"
#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/DiagnosticPrinter.h"
@@ -45,367 +49,374 @@
#include "llvm/Support/ToolOutputFile.h"
#include "llvm/Support/YAMLTraits.h"
#include "llvm/Transforms/IPO/Internalize.h"
+#include "llvm/Transforms/Utils/Cloning.h"
-#include <memory>
+#include <optional>
using namespace clang;
using namespace llvm;
+#define DEBUG_TYPE "codegenaction"
+
+namespace llvm {
+extern cl::opt<bool> ClRelinkBuiltinBitcodePostop;
+}
+
namespace clang {
- class BackendConsumer;
- class ClangDiagnosticHandler final : public DiagnosticHandler {
- public:
- ClangDiagnosticHandler(const CodeGenOptions &CGOpts, BackendConsumer *BCon)
- : CodeGenOpts(CGOpts), BackendCon(BCon) {}
+class BackendConsumer;
+class ClangDiagnosticHandler final : public DiagnosticHandler {
+public:
+ ClangDiagnosticHandler(const CodeGenOptions &CGOpts, BackendConsumer *BCon)
+ : CodeGenOpts(CGOpts), BackendCon(BCon) {}
- bool handleDiagnostics(const DiagnosticInfo &DI) override;
+ bool handleDiagnostics(const DiagnosticInfo &DI) override;
- bool isAnalysisRemarkEnabled(StringRef PassName) const override {
- return CodeGenOpts.OptimizationRemarkAnalysis.patternMatches(PassName);
- }
- bool isMissedOptRemarkEnabled(StringRef PassName) const override {
- return CodeGenOpts.OptimizationRemarkMissed.patternMatches(PassName);
- }
- bool isPassedOptRemarkEnabled(StringRef PassName) const override {
- return CodeGenOpts.OptimizationRemark.patternMatches(PassName);
- }
+ bool isAnalysisRemarkEnabled(StringRef PassName) const override {
+ return CodeGenOpts.OptimizationRemarkAnalysis.patternMatches(PassName);
+ }
+ bool isMissedOptRemarkEnabled(StringRef PassName) const override {
+ return CodeGenOpts.OptimizationRemarkMissed.patternMatches(PassName);
+ }
+ bool isPassedOptRemarkEnabled(StringRef PassName) const override {
+ return CodeGenOpts.OptimizationRemark.patternMatches(PassName);
+ }
- bool isAnyRemarkEnabled() const override {
- return CodeGenOpts.OptimizationRemarkAnalysis.hasValidPattern() ||
- CodeGenOpts.OptimizationRemarkMissed.hasValidPattern() ||
- CodeGenOpts.OptimizationRemark.hasValidPattern();
- }
+ bool isAnyRemarkEnabled() const override {
+ return CodeGenOpts.OptimizationRemarkAnalysis.hasValidPattern() ||
+ CodeGenOpts.OptimizationRemarkMissed.hasValidPattern() ||
+ CodeGenOpts.OptimizationRemark.hasValidPattern();
+ }
- private:
- const CodeGenOptions &CodeGenOpts;
- BackendConsumer *BackendCon;
- };
+private:
+ const CodeGenOptions &CodeGenOpts;
+ BackendConsumer *BackendCon;
+};
+
+static void reportOptRecordError(Error E, DiagnosticsEngine &Diags,
+ const CodeGenOptions &CodeGenOpts) {
+ handleAllErrors(
+ std::move(E),
+ [&](const LLVMRemarkSetupFileError &E) {
+ Diags.Report(diag::err_cannot_open_file)
+ << CodeGenOpts.OptRecordFile << E.message();
+ },
+ [&](const LLVMRemarkSetupPatternError &E) {
+ Diags.Report(diag::err_drv_optimization_remark_pattern)
+ << E.message() << CodeGenOpts.OptRecordPasses;
+ },
+ [&](const LLVMRemarkSetupFormatError &E) {
+ Diags.Report(diag::err_drv_optimization_remark_format)
+ << CodeGenOpts.OptRecordFormat;
+ });
+}
- static void reportOptRecordError(Error E, DiagnosticsEngine &Diags,
- const CodeGenOptions CodeGenOpts) {
- handleAllErrors(
- std::move(E),
- [&](const LLVMRemarkSetupFileError &E) {
- Diags.Report(diag::err_cannot_open_file)
- << CodeGenOpts.OptRecordFile << E.message();
- },
- [&](const LLVMRemarkSetupPatternError &E) {
- Diags.Report(diag::err_drv_optimization_remark_pattern)
- << E.message() << CodeGenOpts.OptRecordPasses;
- },
- [&](const LLVMRemarkSetupFormatError &E) {
- Diags.Report(diag::err_drv_optimization_remark_format)
- << CodeGenOpts.OptRecordFormat;
- });
- }
+BackendConsumer::BackendConsumer(BackendAction Action, DiagnosticsEngine &Diags,
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS,
+ const HeaderSearchOptions &HeaderSearchOpts,
+ const PreprocessorOptions &PPOpts,
+ const CodeGenOptions &CodeGenOpts,
+ const TargetOptions &TargetOpts,
+ const LangOptions &LangOpts,
+ const std::string &InFile,
+ SmallVector<LinkModule, 4> LinkModules,
+ std::unique_ptr<raw_pwrite_stream> OS,
+ LLVMContext &C,
+ CoverageSourceInfo *CoverageInfo)
+ : Diags(Diags), Action(Action), HeaderSearchOpts(HeaderSearchOpts),
+ CodeGenOpts(CodeGenOpts), TargetOpts(TargetOpts), LangOpts(LangOpts),
+ AsmOutStream(std::move(OS)), Context(nullptr), FS(VFS),
+ LLVMIRGeneration("irgen", "LLVM IR Generation Time"),
+ LLVMIRGenerationRefCount(0),
+ Gen(CreateLLVMCodeGen(Diags, InFile, std::move(VFS), HeaderSearchOpts,
+ PPOpts, CodeGenOpts, C, CoverageInfo)),
+ LinkModules(std::move(LinkModules)) {
+ TimerIsEnabled = CodeGenOpts.TimePasses;
+ llvm::TimePassesIsEnabled = CodeGenOpts.TimePasses;
+ llvm::TimePassesPerRun = CodeGenOpts.TimePassesPerRun;
+}
- class BackendConsumer : public ASTConsumer {
- using LinkModule = CodeGenAction::LinkModule;
-
- virtual void anchor();
- DiagnosticsEngine &Diags;
- BackendAction Action;
- const HeaderSearchOptions &HeaderSearchOpts;
- const CodeGenOptions &CodeGenOpts;
- const TargetOptions &TargetOpts;
- const LangOptions &LangOpts;
- std::unique_ptr<raw_pwrite_stream> AsmOutStream;
- ASTContext *Context;
-
- Timer LLVMIRGeneration;
- unsigned LLVMIRGenerationRefCount;
-
- /// True if we've finished generating IR. This prevents us from generating
- /// additional LLVM IR after emitting output in HandleTranslationUnit. This
- /// can happen when Clang plugins trigger additional AST deserialization.
- bool IRGenFinished = false;
-
- bool TimerIsEnabled = false;
-
- std::unique_ptr<CodeGenerator> Gen;
-
- SmallVector<LinkModule, 4> LinkModules;
-
- // This is here so that the diagnostic printer knows the module a diagnostic
- // refers to.
- llvm::Module *CurLinkModule = nullptr;
-
- public:
- BackendConsumer(BackendAction Action, DiagnosticsEngine &Diags,
- const HeaderSearchOptions &HeaderSearchOpts,
- const PreprocessorOptions &PPOpts,
- const CodeGenOptions &CodeGenOpts,
- const TargetOptions &TargetOpts,
- const LangOptions &LangOpts, const std::string &InFile,
- SmallVector<LinkModule, 4> LinkModules,
- std::unique_ptr<raw_pwrite_stream> OS, LLVMContext &C,
- CoverageSourceInfo *CoverageInfo = nullptr)
- : Diags(Diags), Action(Action), HeaderSearchOpts(HeaderSearchOpts),
- CodeGenOpts(CodeGenOpts), TargetOpts(TargetOpts), LangOpts(LangOpts),
- AsmOutStream(std::move(OS)), Context(nullptr),
- LLVMIRGeneration("irgen", "LLVM IR Generation Time"),
- LLVMIRGenerationRefCount(0),
- Gen(CreateLLVMCodeGen(Diags, InFile, HeaderSearchOpts, PPOpts,
- CodeGenOpts, C, CoverageInfo)),
- LinkModules(std::move(LinkModules)) {
- TimerIsEnabled = CodeGenOpts.TimePasses;
- llvm::TimePassesIsEnabled = CodeGenOpts.TimePasses;
- llvm::TimePassesPerRun = CodeGenOpts.TimePassesPerRun;
- }
+// This constructor is used in installing an empty BackendConsumer
+// to use the clang diagnostic handler for IR input files. It avoids
+// initializing the OS field.
+BackendConsumer::BackendConsumer(BackendAction Action, DiagnosticsEngine &Diags,
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS,
+ const HeaderSearchOptions &HeaderSearchOpts,
+ const PreprocessorOptions &PPOpts,
+ const CodeGenOptions &CodeGenOpts,
+ const TargetOptions &TargetOpts,
+ const LangOptions &LangOpts,
+ llvm::Module *Module,
+ SmallVector<LinkModule, 4> LinkModules,
+ LLVMContext &C,
+ CoverageSourceInfo *CoverageInfo)
+ : Diags(Diags), Action(Action), HeaderSearchOpts(HeaderSearchOpts),
+ CodeGenOpts(CodeGenOpts), TargetOpts(TargetOpts), LangOpts(LangOpts),
+ Context(nullptr), FS(VFS),
+ LLVMIRGeneration("irgen", "LLVM IR Generation Time"),
+ LLVMIRGenerationRefCount(0),
+ Gen(CreateLLVMCodeGen(Diags, "", std::move(VFS), HeaderSearchOpts,
+ PPOpts, CodeGenOpts, C, CoverageInfo)),
+ LinkModules(std::move(LinkModules)), CurLinkModule(Module) {
+ TimerIsEnabled = CodeGenOpts.TimePasses;
+ llvm::TimePassesIsEnabled = CodeGenOpts.TimePasses;
+ llvm::TimePassesPerRun = CodeGenOpts.TimePassesPerRun;
+}
- // This constructor is used in installing an empty BackendConsumer
- // to use the clang diagnostic handler for IR input files. It avoids
- // initializing the OS field.
- BackendConsumer(BackendAction Action, DiagnosticsEngine &Diags,
- const HeaderSearchOptions &HeaderSearchOpts,
- const PreprocessorOptions &PPOpts,
- const CodeGenOptions &CodeGenOpts,
- const TargetOptions &TargetOpts,
- const LangOptions &LangOpts,
- SmallVector<LinkModule, 4> LinkModules, LLVMContext &C,
- CoverageSourceInfo *CoverageInfo = nullptr)
- : Diags(Diags), Action(Action), HeaderSearchOpts(HeaderSearchOpts),
- CodeGenOpts(CodeGenOpts), TargetOpts(TargetOpts), LangOpts(LangOpts),
- Context(nullptr),
- LLVMIRGeneration("irgen", "LLVM IR Generation Time"),
- LLVMIRGenerationRefCount(0),
- Gen(CreateLLVMCodeGen(Diags, "", HeaderSearchOpts, PPOpts,
- CodeGenOpts, C, CoverageInfo)),
- LinkModules(std::move(LinkModules)) {
- TimerIsEnabled = CodeGenOpts.TimePasses;
- llvm::TimePassesIsEnabled = CodeGenOpts.TimePasses;
- llvm::TimePassesPerRun = CodeGenOpts.TimePassesPerRun;
- }
- llvm::Module *getModule() const { return Gen->GetModule(); }
- std::unique_ptr<llvm::Module> takeModule() {
- return std::unique_ptr<llvm::Module>(Gen->ReleaseModule());
- }
+llvm::Module* BackendConsumer::getModule() const {
+ return Gen->GetModule();
+}
- CodeGenerator *getCodeGenerator() { return Gen.get(); }
+std::unique_ptr<llvm::Module> BackendConsumer::takeModule() {
+ return std::unique_ptr<llvm::Module>(Gen->ReleaseModule());
+}
- void HandleCXXStaticMemberVarInstantiation(VarDecl *VD) override {
- Gen->HandleCXXStaticMemberVarInstantiation(VD);
- }
+CodeGenerator* BackendConsumer::getCodeGenerator() {
+ return Gen.get();
+}
- void Initialize(ASTContext &Ctx) override {
- assert(!Context && "initialized multiple times");
+void BackendConsumer::HandleCXXStaticMemberVarInstantiation(VarDecl *VD) {
+ Gen->HandleCXXStaticMemberVarInstantiation(VD);
+}
- Context = &Ctx;
+void BackendConsumer::Initialize(ASTContext &Ctx) {
+ assert(!Context && "initialized multiple times");
- if (TimerIsEnabled)
- LLVMIRGeneration.startTimer();
+ Context = &Ctx;
- Gen->Initialize(Ctx);
+ if (TimerIsEnabled)
+ LLVMIRGeneration.startTimer();
- if (TimerIsEnabled)
- LLVMIRGeneration.stopTimer();
- }
+ Gen->Initialize(Ctx);
- bool HandleTopLevelDecl(DeclGroupRef D) override {
- PrettyStackTraceDecl CrashInfo(*D.begin(), SourceLocation(),
- Context->getSourceManager(),
- "LLVM IR generation of declaration");
+ if (TimerIsEnabled)
+ LLVMIRGeneration.stopTimer();
+}
- // Recurse.
- if (TimerIsEnabled) {
- LLVMIRGenerationRefCount += 1;
- if (LLVMIRGenerationRefCount == 1)
- LLVMIRGeneration.startTimer();
- }
+bool BackendConsumer::HandleTopLevelDecl(DeclGroupRef D) {
+ PrettyStackTraceDecl CrashInfo(*D.begin(), SourceLocation(),
+ Context->getSourceManager(),
+ "LLVM IR generation of declaration");
+
+ // Recurse.
+ if (TimerIsEnabled) {
+ LLVMIRGenerationRefCount += 1;
+ if (LLVMIRGenerationRefCount == 1)
+ LLVMIRGeneration.startTimer();
+ }
+
+ Gen->HandleTopLevelDecl(D);
- Gen->HandleTopLevelDecl(D);
+ if (TimerIsEnabled) {
+ LLVMIRGenerationRefCount -= 1;
+ if (LLVMIRGenerationRefCount == 0)
+ LLVMIRGeneration.stopTimer();
+ }
+
+ return true;
+}
+
+void BackendConsumer::HandleInlineFunctionDefinition(FunctionDecl *D) {
+ PrettyStackTraceDecl CrashInfo(D, SourceLocation(),
+ Context->getSourceManager(),
+ "LLVM IR generation of inline function");
+ if (TimerIsEnabled)
+ LLVMIRGeneration.startTimer();
+
+ Gen->HandleInlineFunctionDefinition(D);
+
+ if (TimerIsEnabled)
+ LLVMIRGeneration.stopTimer();
+}
+
+void BackendConsumer::HandleInterestingDecl(DeclGroupRef D) {
+ // Ignore interesting decls from the AST reader after IRGen is finished.
+ if (!IRGenFinished)
+ HandleTopLevelDecl(D);
+}
- if (TimerIsEnabled) {
- LLVMIRGenerationRefCount -= 1;
- if (LLVMIRGenerationRefCount == 0)
- LLVMIRGeneration.stopTimer();
+// Links each entry in LinkModules into our module. Returns true on error.
+bool BackendConsumer::LinkInModules(llvm::Module *M, bool ShouldLinkFiles) {
+
+ for (auto &LM : LinkModules) {
+ assert(LM.Module && "LinkModule does not actually have a module");
+
+ // If ShouldLinkFiles is not set, skip files added via the
+ // -mlink-bitcode-files, only linking -mlink-builtin-bitcode
+ if (!LM.Internalize && !ShouldLinkFiles)
+ continue;
+
+ if (LM.PropagateAttrs)
+ for (Function &F : *LM.Module) {
+ // Skip intrinsics. Keep consistent with how intrinsics are created
+ // in LLVM IR.
+ if (F.isIntrinsic())
+ continue;
+ CodeGen::mergeDefaultFunctionDefinitionAttributes(
+ F, CodeGenOpts, LangOpts, TargetOpts, LM.Internalize);
}
- return true;
- }
+ CurLinkModule = LM.Module.get();
+ bool Err;
- void HandleInlineFunctionDefinition(FunctionDecl *D) override {
- PrettyStackTraceDecl CrashInfo(D, SourceLocation(),
- Context->getSourceManager(),
- "LLVM IR generation of inline function");
- if (TimerIsEnabled)
- LLVMIRGeneration.startTimer();
+ auto DoLink = [&](auto &Mod) {
+ if (LM.Internalize) {
+ Err = Linker::linkModules(
+ *M, std::move(Mod), LM.LinkFlags,
+ [](llvm::Module &M, const llvm::StringSet<> &GVS) {
+ internalizeModule(M, [&GVS](const llvm::GlobalValue &GV) {
+ return !GV.hasName() || (GVS.count(GV.getName()) == 0);
+ });
+ });
+ } else
+ Err = Linker::linkModules(*M, std::move(Mod), LM.LinkFlags);
+ };
- Gen->HandleInlineFunctionDefinition(D);
+ // Create a Clone to move to the linker, which preserves the original
+ // linking modules, allowing them to be linked again in the future
+ if (ClRelinkBuiltinBitcodePostop) {
+ // TODO: If CloneModule() is updated to support cloning of unmaterialized
+ // modules, we can remove this
+ if (Error E = CurLinkModule->materializeAll())
+ return false;
- if (TimerIsEnabled)
- LLVMIRGeneration.stopTimer();
- }
+ std::unique_ptr<llvm::Module> Clone = llvm::CloneModule(*LM.Module);
- void HandleInterestingDecl(DeclGroupRef D) override {
- // Ignore interesting decls from the AST reader after IRGen is finished.
- if (!IRGenFinished)
- HandleTopLevelDecl(D);
+ DoLink(Clone);
+ }
+ // Otherwise we can link (and clean up) the original modules
+ else {
+ DoLink(LM.Module);
}
+ }
- // Links each entry in LinkModules into our module. Returns true on error.
- bool LinkInModules() {
- for (auto &LM : LinkModules) {
- if (LM.PropagateAttrs)
- for (Function &F : *LM.Module) {
- // Skip intrinsics. Keep consistent with how intrinsics are created
- // in LLVM IR.
- if (F.isIntrinsic())
- continue;
- Gen->CGM().addDefaultFunctionDefinitionAttributes(F);
- }
-
- CurLinkModule = LM.Module.get();
-
- bool Err;
- if (LM.Internalize) {
- Err = Linker::linkModules(
- *getModule(), std::move(LM.Module), LM.LinkFlags,
- [](llvm::Module &M, const llvm::StringSet<> &GVS) {
- internalizeModule(M, [&GVS](const llvm::GlobalValue &GV) {
- return !GV.hasName() || (GVS.count(GV.getName()) == 0);
- });
- });
- } else {
- Err = Linker::linkModules(*getModule(), std::move(LM.Module),
- LM.LinkFlags);
- }
+ return false; // success
+}
- if (Err)
- return true;
- }
- return false; // success
+void BackendConsumer::HandleTranslationUnit(ASTContext &C) {
+ {
+ llvm::TimeTraceScope TimeScope("Frontend");
+ PrettyStackTraceString CrashInfo("Per-file LLVM IR generation");
+ if (TimerIsEnabled) {
+ LLVMIRGenerationRefCount += 1;
+ if (LLVMIRGenerationRefCount == 1)
+ LLVMIRGeneration.startTimer();
}
- void HandleTranslationUnit(ASTContext &C) override {
- {
- llvm::TimeTraceScope TimeScope("Frontend");
- PrettyStackTraceString CrashInfo("Per-file LLVM IR generation");
- if (TimerIsEnabled) {
- LLVMIRGenerationRefCount += 1;
- if (LLVMIRGenerationRefCount == 1)
- LLVMIRGeneration.startTimer();
- }
+ Gen->HandleTranslationUnit(C);
- Gen->HandleTranslationUnit(C);
+ if (TimerIsEnabled) {
+ LLVMIRGenerationRefCount -= 1;
+ if (LLVMIRGenerationRefCount == 0)
+ LLVMIRGeneration.stopTimer();
+ }
- if (TimerIsEnabled) {
- LLVMIRGenerationRefCount -= 1;
- if (LLVMIRGenerationRefCount == 0)
- LLVMIRGeneration.stopTimer();
- }
+ IRGenFinished = true;
+ }
- IRGenFinished = true;
- }
+ // Silently ignore if we weren't initialized for some reason.
+ if (!getModule())
+ return;
- // Silently ignore if we weren't initialized for some reason.
- if (!getModule())
- return;
-
- LLVMContext &Ctx = getModule()->getContext();
- std::unique_ptr<DiagnosticHandler> OldDiagnosticHandler =
- Ctx.getDiagnosticHandler();
- Ctx.setDiagnosticHandler(std::make_unique<ClangDiagnosticHandler>(
- CodeGenOpts, this));
-
- Expected<std::unique_ptr<llvm::ToolOutputFile>> OptRecordFileOrErr =
- setupLLVMOptimizationRemarks(
- Ctx, CodeGenOpts.OptRecordFile, CodeGenOpts.OptRecordPasses,
- CodeGenOpts.OptRecordFormat, CodeGenOpts.DiagnosticsWithHotness,
- CodeGenOpts.DiagnosticsHotnessThreshold);
-
- if (Error E = OptRecordFileOrErr.takeError()) {
- reportOptRecordError(std::move(E), Diags, CodeGenOpts);
- return;
- }
+ LLVMContext &Ctx = getModule()->getContext();
+ std::unique_ptr<DiagnosticHandler> OldDiagnosticHandler =
+ Ctx.getDiagnosticHandler();
+ Ctx.setDiagnosticHandler(std::make_unique<ClangDiagnosticHandler>(
+ CodeGenOpts, this));
- std::unique_ptr<llvm::ToolOutputFile> OptRecordFile =
- std::move(*OptRecordFileOrErr);
+ Expected<std::unique_ptr<llvm::ToolOutputFile>> OptRecordFileOrErr =
+ setupLLVMOptimizationRemarks(
+ Ctx, CodeGenOpts.OptRecordFile, CodeGenOpts.OptRecordPasses,
+ CodeGenOpts.OptRecordFormat, CodeGenOpts.DiagnosticsWithHotness,
+ CodeGenOpts.DiagnosticsHotnessThreshold);
- if (OptRecordFile &&
- CodeGenOpts.getProfileUse() != CodeGenOptions::ProfileNone)
- Ctx.setDiagnosticsHotnessRequested(true);
+ if (Error E = OptRecordFileOrErr.takeError()) {
+ reportOptRecordError(std::move(E), Diags, CodeGenOpts);
+ return;
+ }
- // Link each LinkModule into our module.
- if (LinkInModules())
- return;
+ std::unique_ptr<llvm::ToolOutputFile> OptRecordFile =
+ std::move(*OptRecordFileOrErr);
- EmbedBitcode(getModule(), CodeGenOpts, llvm::MemoryBufferRef());
+ if (OptRecordFile &&
+ CodeGenOpts.getProfileUse() != CodeGenOptions::ProfileNone)
+ Ctx.setDiagnosticsHotnessRequested(true);
- EmitBackendOutput(Diags, HeaderSearchOpts, CodeGenOpts, TargetOpts,
- LangOpts, C.getTargetInfo().getDataLayoutString(),
- getModule(), Action, std::move(AsmOutStream));
+ if (CodeGenOpts.MisExpect) {
+ Ctx.setMisExpectWarningRequested(true);
+ }
- Ctx.setDiagnosticHandler(std::move(OldDiagnosticHandler));
+ if (CodeGenOpts.DiagnosticsMisExpectTolerance) {
+ Ctx.setDiagnosticsMisExpectTolerance(
+ CodeGenOpts.DiagnosticsMisExpectTolerance);
+ }
- if (OptRecordFile)
- OptRecordFile->keep();
- }
+ // Link each LinkModule into our module.
+ if (LinkInModules(getModule()))
+ return;
- void HandleTagDeclDefinition(TagDecl *D) override {
- PrettyStackTraceDecl CrashInfo(D, SourceLocation(),
- Context->getSourceManager(),
- "LLVM IR generation of declaration");
- Gen->HandleTagDeclDefinition(D);
+ for (auto &F : getModule()->functions()) {
+ if (const Decl *FD = Gen->GetDeclForMangledName(F.getName())) {
+ auto Loc = FD->getASTContext().getFullLoc(FD->getLocation());
+ // TODO: use a fast content hash when available.
+ auto NameHash = llvm::hash_value(F.getName());
+ ManglingFullSourceLocs.push_back(std::make_pair(NameHash, Loc));
}
+ }
- void HandleTagDeclRequiredDefinition(const TagDecl *D) override {
- Gen->HandleTagDeclRequiredDefinition(D);
- }
+ if (CodeGenOpts.ClearASTBeforeBackend) {
+ LLVM_DEBUG(llvm::dbgs() << "Clearing AST...\n");
+ // Access to the AST is no longer available after this.
+ // Other things that the ASTContext manages are still available, e.g.
+ // the SourceManager. It'd be nice if we could separate out all the
+ // things in ASTContext used after this point and null out the
+ // ASTContext, but too many various parts of the ASTContext are still
+ // used in various parts.
+ C.cleanup();
+ C.getAllocator().Reset();
+ }
- void CompleteTentativeDefinition(VarDecl *D) override {
- Gen->CompleteTentativeDefinition(D);
- }
+ EmbedBitcode(getModule(), CodeGenOpts, llvm::MemoryBufferRef());
- void CompleteExternalDeclaration(VarDecl *D) override {
- Gen->CompleteExternalDeclaration(D);
- }
+ EmitBackendOutput(Diags, HeaderSearchOpts, CodeGenOpts, TargetOpts, LangOpts,
+ C.getTargetInfo().getDataLayoutString(), getModule(),
+ Action, FS, std::move(AsmOutStream), this);
- void AssignInheritanceModel(CXXRecordDecl *RD) override {
- Gen->AssignInheritanceModel(RD);
- }
+ Ctx.setDiagnosticHandler(std::move(OldDiagnosticHandler));
- void HandleVTable(CXXRecordDecl *RD) override {
- Gen->HandleVTable(RD);
- }
+ if (OptRecordFile)
+ OptRecordFile->keep();
+}
- /// Get the best possible source location to represent a diagnostic that
- /// may have associated debug info.
- const FullSourceLoc
- getBestLocationFromDebugLoc(const llvm::DiagnosticInfoWithLocationBase &D,
- bool &BadDebugInfo, StringRef &Filename,
- unsigned &Line, unsigned &Column) const;
-
- void DiagnosticHandlerImpl(const llvm::DiagnosticInfo &DI);
- /// Specialized handler for InlineAsm diagnostic.
- /// \return True if the diagnostic has been successfully reported, false
- /// otherwise.
- bool InlineAsmDiagHandler(const llvm::DiagnosticInfoInlineAsm &D);
- /// Specialized handler for diagnostics reported using SMDiagnostic.
- void SrcMgrDiagHandler(const llvm::DiagnosticInfoSrcMgr &D);
- /// Specialized handler for StackSize diagnostic.
- /// \return True if the diagnostic has been successfully reported, false
- /// otherwise.
- bool StackSizeDiagHandler(const llvm::DiagnosticInfoStackSize &D);
- /// Specialized handler for unsupported backend feature diagnostic.
- void UnsupportedDiagHandler(const llvm::DiagnosticInfoUnsupported &D);
- /// Specialized handlers for optimization remarks.
- /// Note that these handlers only accept remarks and they always handle
- /// them.
- void EmitOptimizationMessage(const llvm::DiagnosticInfoOptimizationBase &D,
- unsigned DiagID);
- void
- OptimizationRemarkHandler(const llvm::DiagnosticInfoOptimizationBase &D);
- void OptimizationRemarkHandler(
- const llvm::OptimizationRemarkAnalysisFPCommute &D);
- void OptimizationRemarkHandler(
- const llvm::OptimizationRemarkAnalysisAliasing &D);
- void OptimizationFailureHandler(
- const llvm::DiagnosticInfoOptimizationFailure &D);
- };
+void BackendConsumer::HandleTagDeclDefinition(TagDecl *D) {
+ PrettyStackTraceDecl CrashInfo(D, SourceLocation(),
+ Context->getSourceManager(),
+ "LLVM IR generation of declaration");
+ Gen->HandleTagDeclDefinition(D);
+}
+
+void BackendConsumer::HandleTagDeclRequiredDefinition(const TagDecl *D) {
+ Gen->HandleTagDeclRequiredDefinition(D);
+}
+
+void BackendConsumer::CompleteTentativeDefinition(VarDecl *D) {
+ Gen->CompleteTentativeDefinition(D);
+}
- void BackendConsumer::anchor() {}
+void BackendConsumer::CompleteExternalDeclaration(VarDecl *D) {
+ Gen->CompleteExternalDeclaration(D);
}
+void BackendConsumer::AssignInheritanceModel(CXXRecordDecl *RD) {
+ Gen->AssignInheritanceModel(RD);
+}
+
+void BackendConsumer::HandleVTable(CXXRecordDecl *RD) {
+ Gen->HandleVTable(RD);
+}
+
+void BackendConsumer::anchor() { }
+
+} // namespace clang
+
bool ClangDiagnosticHandler::handleDiagnostics(const DiagnosticInfo &DI) {
BackendCon->DiagnosticHandlerImpl(DI);
return true;
@@ -532,7 +543,6 @@ void BackendConsumer::SrcMgrDiagHandler(const llvm::DiagnosticInfoSrcMgr &DI) {
// If Loc is invalid, we still need to report the issue, it just gets no
// location info.
Diags.Report(Loc, DiagID).AddString(Message);
- return;
}
bool
@@ -567,17 +577,28 @@ BackendConsumer::StackSizeDiagHandler(const llvm::DiagnosticInfoStackSize &D) {
// We do not know how to format other severities.
return false;
- if (const Decl *ND = Gen->GetDeclForMangledName(D.getFunction().getName())) {
- // FIXME: Shouldn't need to truncate to uint32_t
- Diags.Report(ND->getASTContext().getFullLoc(ND->getLocation()),
- diag::warn_fe_frame_larger_than)
- << static_cast<uint32_t>(D.getStackSize())
- << static_cast<uint32_t>(D.getStackLimit())
- << Decl::castToDeclContext(ND);
- return true;
- }
+ auto Loc = getFunctionSourceLocation(D.getFunction());
+ if (!Loc)
+ return false;
- return false;
+ Diags.Report(*Loc, diag::warn_fe_frame_larger_than)
+ << D.getStackSize() << D.getStackLimit()
+ << llvm::demangle(D.getFunction().getName());
+ return true;
+}
+
+bool BackendConsumer::ResourceLimitDiagHandler(
+ const llvm::DiagnosticInfoResourceLimit &D) {
+ auto Loc = getFunctionSourceLocation(D.getFunction());
+ if (!Loc)
+ return false;
+ unsigned DiagID = diag::err_fe_backend_resource_limit;
+ ComputeDiagID(D.getSeverity(), backend_resource_limit, DiagID);
+
+ Diags.Report(*Loc, DiagID)
+ << D.getResourceName() << D.getResourceSize() << D.getResourceLimit()
+ << llvm::demangle(D.getFunction().getName());
+ return true;
}
const FullSourceLoc BackendConsumer::getBestLocationFromDebugLoc(
@@ -606,9 +627,10 @@ const FullSourceLoc BackendConsumer::getBestLocationFromDebugLoc(
// function definition. We use the definition's right brace to differentiate
// from diagnostics that genuinely relate to the function itself.
FullSourceLoc Loc(DILoc, SourceMgr);
- if (Loc.isInvalid())
- if (const Decl *FD = Gen->GetDeclForMangledName(D.getFunction().getName()))
- Loc = FD->getASTContext().getFullLoc(FD->getLocation());
+ if (Loc.isInvalid()) {
+ if (auto MaybeLoc = getFunctionSourceLocation(D.getFunction()))
+ Loc = *MaybeLoc;
+ }
if (DILoc.isInvalid() && D.isLocationAvailable())
// If we were not able to translate the file:line:col information
@@ -621,6 +643,16 @@ const FullSourceLoc BackendConsumer::getBestLocationFromDebugLoc(
return Loc;
}
+std::optional<FullSourceLoc>
+BackendConsumer::getFunctionSourceLocation(const Function &F) const {
+ auto Hash = llvm::hash_value(F.getName());
+ for (const auto &Pair : ManglingFullSourceLocs) {
+ if (Pair.first == Hash)
+ return Pair.second;
+ }
+ return std::nullopt;
+}
+
void BackendConsumer::UnsupportedDiagHandler(
const llvm::DiagnosticInfoUnsupported &D) {
// We only support warnings or errors.
@@ -758,6 +790,40 @@ void BackendConsumer::OptimizationFailureHandler(
EmitOptimizationMessage(D, diag::warn_fe_backend_optimization_failure);
}
+void BackendConsumer::DontCallDiagHandler(const DiagnosticInfoDontCall &D) {
+ SourceLocation LocCookie =
+ SourceLocation::getFromRawEncoding(D.getLocCookie());
+
+ // FIXME: we can't yet diagnose indirect calls. When/if we can, we
+ // should instead assert that LocCookie.isValid().
+ if (!LocCookie.isValid())
+ return;
+
+ Diags.Report(LocCookie, D.getSeverity() == DiagnosticSeverity::DS_Error
+ ? diag::err_fe_backend_error_attr
+ : diag::warn_fe_backend_warning_attr)
+ << llvm::demangle(D.getFunctionName()) << D.getNote();
+}
+
+void BackendConsumer::MisExpectDiagHandler(
+ const llvm::DiagnosticInfoMisExpect &D) {
+ StringRef Filename;
+ unsigned Line, Column;
+ bool BadDebugInfo = false;
+ FullSourceLoc Loc =
+ getBestLocationFromDebugLoc(D, BadDebugInfo, Filename, Line, Column);
+
+ Diags.Report(Loc, diag::warn_profile_data_misexpect) << D.getMsg().str();
+
+ if (BadDebugInfo)
+ // If we were not able to translate the file:line:col information
+ // back to a SourceLocation, at least emit a note stating that
+ // we could not translate this location. This can happen in the
+ // case of #line directives.
+ Diags.Report(Loc, diag::note_fe_backend_invalid_loc)
+ << Filename << Line << Column;
+}
+
/// This function is invoked when the backend needs
/// to report something to the user.
void BackendConsumer::DiagnosticHandlerImpl(const DiagnosticInfo &DI) {
@@ -778,12 +844,13 @@ void BackendConsumer::DiagnosticHandlerImpl(const DiagnosticInfo &DI) {
return;
ComputeDiagID(Severity, backend_frame_larger_than, DiagID);
break;
- case DK_Linker:
- assert(CurLinkModule);
- // FIXME: stop eating the warnings and notes.
- if (Severity != DS_Error)
+ case llvm::DK_ResourceLimit:
+ if (ResourceLimitDiagHandler(cast<DiagnosticInfoResourceLimit>(DI)))
return;
- DiagID = diag::err_fe_cannot_link_module;
+ ComputeDiagID(Severity, backend_resource_limit, DiagID);
+ break;
+ case DK_Linker:
+ ComputeDiagID(Severity, linking_module, DiagID);
break;
case llvm::DK_OptimizationRemark:
// Optimization remarks are always handled completely by this
@@ -833,6 +900,12 @@ void BackendConsumer::DiagnosticHandlerImpl(const DiagnosticInfo &DI) {
case llvm::DK_Unsupported:
UnsupportedDiagHandler(cast<DiagnosticInfoUnsupported>(DI));
return;
+ case llvm::DK_DontCall:
+ DontCallDiagHandler(cast<DiagnosticInfoDontCall>(DI));
+ return;
+ case llvm::DK_MisExpect:
+ MisExpectDiagHandler(cast<DiagnosticInfoMisExpect>(DI));
+ return;
default:
// Plugin IDs are not bound to any value as they are set dynamically.
ComputeDiagRemarkID(Severity, backend_plugin, DiagID);
@@ -845,9 +918,9 @@ void BackendConsumer::DiagnosticHandlerImpl(const DiagnosticInfo &DI) {
DI.print(DP);
}
- if (DiagID == diag::err_fe_cannot_link_module) {
- Diags.Report(diag::err_fe_cannot_link_module)
- << CurLinkModule->getModuleIdentifier() << MsgStorage;
+ if (DI.getKind() == DK_Linker) {
+ assert(CurLinkModule && "CurLinkModule must be set for linker diagnostics");
+ Diags.Report(DiagID) << CurLinkModule->getModuleIdentifier() << MsgStorage;
return;
}
@@ -867,6 +940,36 @@ CodeGenAction::~CodeGenAction() {
delete VMContext;
}
+bool CodeGenAction::loadLinkModules(CompilerInstance &CI) {
+ if (!LinkModules.empty())
+ return false;
+
+ for (const CodeGenOptions::BitcodeFileToLink &F :
+ CI.getCodeGenOpts().LinkBitcodeFiles) {
+ auto BCBuf = CI.getFileManager().getBufferForFile(F.Filename);
+ if (!BCBuf) {
+ CI.getDiagnostics().Report(diag::err_cannot_open_file)
+ << F.Filename << BCBuf.getError().message();
+ LinkModules.clear();
+ return true;
+ }
+
+ Expected<std::unique_ptr<llvm::Module>> ModuleOrErr =
+ getOwningLazyBitcodeModule(std::move(*BCBuf), *VMContext);
+ if (!ModuleOrErr) {
+ handleAllErrors(ModuleOrErr.takeError(), [&](ErrorInfoBase &EIB) {
+ CI.getDiagnostics().Report(diag::err_cannot_open_file)
+ << F.Filename << EIB.message();
+ });
+ LinkModules.clear();
+ return true;
+ }
+ LinkModules.push_back({std::move(ModuleOrErr.get()), F.PropagateAttrs,
+ F.Internalize, F.LinkFlags});
+ }
+ return false;
+}
+
bool CodeGenAction::hasIRSupport() const { return true; }
void CodeGenAction::EndSourceFileAction() {
@@ -922,30 +1025,8 @@ CodeGenAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
return nullptr;
// Load bitcode modules to link with, if we need to.
- if (LinkModules.empty())
- for (const CodeGenOptions::BitcodeFileToLink &F :
- CI.getCodeGenOpts().LinkBitcodeFiles) {
- auto BCBuf = CI.getFileManager().getBufferForFile(F.Filename);
- if (!BCBuf) {
- CI.getDiagnostics().Report(diag::err_cannot_open_file)
- << F.Filename << BCBuf.getError().message();
- LinkModules.clear();
- return nullptr;
- }
-
- Expected<std::unique_ptr<llvm::Module>> ModuleOrErr =
- getOwningLazyBitcodeModule(std::move(*BCBuf), *VMContext);
- if (!ModuleOrErr) {
- handleAllErrors(ModuleOrErr.takeError(), [&](ErrorInfoBase &EIB) {
- CI.getDiagnostics().Report(diag::err_cannot_open_file)
- << F.Filename << EIB.message();
- });
- LinkModules.clear();
- return nullptr;
- }
- LinkModules.push_back({std::move(ModuleOrErr.get()), F.PropagateAttrs,
- F.Internalize, F.LinkFlags});
- }
+ if (loadLinkModules(CI))
+ return nullptr;
CoverageSourceInfo *CoverageInfo = nullptr;
// Add the preprocessor callback only when the coverage mapping is generated.
@@ -954,10 +1035,10 @@ CodeGenAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
CI.getPreprocessor());
std::unique_ptr<BackendConsumer> Result(new BackendConsumer(
- BA, CI.getDiagnostics(), CI.getHeaderSearchOpts(),
- CI.getPreprocessorOpts(), CI.getCodeGenOpts(), CI.getTargetOpts(),
- CI.getLangOpts(), std::string(InFile), std::move(LinkModules),
- std::move(OS), *VMContext, CoverageInfo));
+ BA, CI.getDiagnostics(), &CI.getVirtualFileSystem(),
+ CI.getHeaderSearchOpts(), CI.getPreprocessorOpts(), CI.getCodeGenOpts(),
+ CI.getTargetOpts(), CI.getLangOpts(), std::string(InFile),
+ std::move(LinkModules), std::move(OS), *VMContext, CoverageInfo));
BEConsumer = Result.get();
// Enable generating macro debug info only when debug info is not disabled and
@@ -978,21 +1059,21 @@ CodeGenAction::loadModule(MemoryBufferRef MBRef) {
CompilerInstance &CI = getCompilerInstance();
SourceManager &SM = CI.getSourceManager();
+ auto DiagErrors = [&](Error E) -> std::unique_ptr<llvm::Module> {
+ unsigned DiagID =
+ CI.getDiagnostics().getCustomDiagID(DiagnosticsEngine::Error, "%0");
+ handleAllErrors(std::move(E), [&](ErrorInfoBase &EIB) {
+ CI.getDiagnostics().Report(DiagID) << EIB.message();
+ });
+ return {};
+ };
+
// For ThinLTO backend invocations, ensure that the context
// merges types based on ODR identifiers. We also need to read
// the correct module out of a multi-module bitcode file.
if (!CI.getCodeGenOpts().ThinLTOIndexFile.empty()) {
VMContext->enableDebugTypeODRUniquing();
- auto DiagErrors = [&](Error E) -> std::unique_ptr<llvm::Module> {
- unsigned DiagID =
- CI.getDiagnostics().getCustomDiagID(DiagnosticsEngine::Error, "%0");
- handleAllErrors(std::move(E), [&](ErrorInfoBase &EIB) {
- CI.getDiagnostics().Report(DiagID) << EIB.message();
- });
- return {};
- };
-
Expected<std::vector<BitcodeModule>> BMsOrErr = getBitcodeModuleList(MBRef);
if (!BMsOrErr)
return DiagErrors(BMsOrErr.takeError());
@@ -1013,10 +1094,39 @@ CodeGenAction::loadModule(MemoryBufferRef MBRef) {
return std::move(*MOrErr);
}
+ // Load bitcode modules to link with, if we need to.
+ if (loadLinkModules(CI))
+ return nullptr;
+
+ // Handle textual IR and bitcode file with one single module.
llvm::SMDiagnostic Err;
if (std::unique_ptr<llvm::Module> M = parseIR(MBRef, Err, *VMContext))
return M;
+ // If MBRef is a bitcode with multiple modules (e.g., -fsplit-lto-unit
+ // output), place the extra modules (actually only one, a regular LTO module)
+ // into LinkModules as if we are using -mlink-bitcode-file.
+ Expected<std::vector<BitcodeModule>> BMsOrErr = getBitcodeModuleList(MBRef);
+ if (BMsOrErr && BMsOrErr->size()) {
+ std::unique_ptr<llvm::Module> FirstM;
+ for (auto &BM : *BMsOrErr) {
+ Expected<std::unique_ptr<llvm::Module>> MOrErr =
+ BM.parseModule(*VMContext);
+ if (!MOrErr)
+ return DiagErrors(MOrErr.takeError());
+ if (FirstM)
+ LinkModules.push_back({std::move(*MOrErr), /*PropagateAttrs=*/false,
+ /*Internalize=*/false, /*LinkFlags=*/{}});
+ else
+ FirstM = std::move(*MOrErr);
+ }
+ if (FirstM)
+ return FirstM;
+ }
+ // If BMsOrErr fails, consume the error and use the error message from
+ // parseIR.
+ consumeError(BMsOrErr.takeError());
+
// Translate from the diagnostic info to the SourceManager location if
// available.
// TODO: Unify this with ConvertBackendLocation()
@@ -1029,8 +1139,7 @@ CodeGenAction::loadModule(MemoryBufferRef MBRef) {
// Strip off a leading diagnostic code if there is one.
StringRef Msg = Err.getMessage();
- if (Msg.startswith("error: "))
- Msg = Msg.substr(7);
+ Msg.consume_front("error: ");
unsigned DiagID =
CI.getDiagnostics().getCustomDiagID(DiagnosticsEngine::Error, "%0");
@@ -1051,13 +1160,13 @@ void CodeGenAction::ExecuteAction() {
auto &CodeGenOpts = CI.getCodeGenOpts();
auto &Diagnostics = CI.getDiagnostics();
std::unique_ptr<raw_pwrite_stream> OS =
- GetOutputStream(CI, getCurrentFile(), BA);
+ GetOutputStream(CI, getCurrentFileOrBufferName(), BA);
if (BA != Backend_EmitNothing && !OS)
return;
SourceManager &SM = CI.getSourceManager();
FileID FID = SM.getMainFileID();
- Optional<MemoryBufferRef> MainFile = SM.getBufferOrNone(FID);
+ std::optional<MemoryBufferRef> MainFile = SM.getBufferOrNone(FID);
if (!MainFile)
return;
@@ -1072,6 +1181,7 @@ void CodeGenAction::ExecuteAction() {
TheModule->setTargetTriple(TargetOpts.Triple);
}
+ EmbedObject(TheModule.get(), CodeGenOpts, Diagnostics);
EmbedBitcode(TheModule.get(), CodeGenOpts, *MainFile);
LLVMContext &Ctx = TheModule->getContext();
@@ -1086,10 +1196,16 @@ void CodeGenAction::ExecuteAction() {
// Set clang diagnostic handler. To do this we need to create a fake
// BackendConsumer.
- BackendConsumer Result(BA, CI.getDiagnostics(), CI.getHeaderSearchOpts(),
- CI.getPreprocessorOpts(), CI.getCodeGenOpts(),
- CI.getTargetOpts(), CI.getLangOpts(),
+ BackendConsumer Result(BA, CI.getDiagnostics(), &CI.getVirtualFileSystem(),
+ CI.getHeaderSearchOpts(), CI.getPreprocessorOpts(),
+ CI.getCodeGenOpts(), CI.getTargetOpts(),
+ CI.getLangOpts(), TheModule.get(),
std::move(LinkModules), *VMContext, nullptr);
+
+ // Link in each pending link module.
+ if (Result.LinkInModules(&*TheModule))
+ return;
+
// PR44896: Force DiscardValueNames as false. DiscardValueNames cannot be
// true here because the valued names are needed for reading textual IR.
Ctx.setDiscardValueNames(false);
@@ -1109,10 +1225,10 @@ void CodeGenAction::ExecuteAction() {
std::unique_ptr<llvm::ToolOutputFile> OptRecordFile =
std::move(*OptRecordFileOrErr);
- EmitBackendOutput(Diagnostics, CI.getHeaderSearchOpts(), CodeGenOpts,
- TargetOpts, CI.getLangOpts(),
- CI.getTarget().getDataLayoutString(), TheModule.get(), BA,
- std::move(OS));
+ EmitBackendOutput(
+ Diagnostics, CI.getHeaderSearchOpts(), CodeGenOpts, TargetOpts,
+ CI.getLangOpts(), CI.getTarget().getDataLayoutString(), TheModule.get(),
+ BA, CI.getFileManager().getVirtualFileSystemPtr(), std::move(OS));
if (OptRecordFile)
OptRecordFile->keep();
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp b/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp
index a2384456ea94..2673e4a5cee7 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp
@@ -16,6 +16,7 @@
#include "CGCXXABI.h"
#include "CGCleanup.h"
#include "CGDebugInfo.h"
+#include "CGHLSLRuntime.h"
#include "CGOpenMPRuntime.h"
#include "CodeGenModule.h"
#include "CodeGenPGO.h"
@@ -43,8 +44,11 @@
#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/Operator.h"
#include "llvm/Support/CRC.h"
+#include "llvm/Support/xxhash.h"
#include "llvm/Transforms/Scalar/LowerExpectIntrinsic.h"
#include "llvm/Transforms/Utils/PromoteMemToReg.h"
+#include <optional>
+
using namespace clang;
using namespace CodeGen;
@@ -78,7 +82,6 @@ CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
EHStack.setCGF(this);
SetFastMathFlags(CurFPFeatures);
- SetFPModel();
}
CodeGenFunction::~CodeGenFunction() {
@@ -105,19 +108,9 @@ clang::ToConstrainedExceptMD(LangOptions::FPExceptionModeKind Kind) {
case LangOptions::FPE_Ignore: return llvm::fp::ebIgnore;
case LangOptions::FPE_MayTrap: return llvm::fp::ebMayTrap;
case LangOptions::FPE_Strict: return llvm::fp::ebStrict;
+ default:
+ llvm_unreachable("Unsupported FP Exception Behavior");
}
- llvm_unreachable("Unsupported FP Exception Behavior");
-}
-
-void CodeGenFunction::SetFPModel() {
- llvm::RoundingMode RM = getLangOpts().getFPRoundingMode();
- auto fpExceptionBehavior = ToConstrainedExceptMD(
- getLangOpts().getFPExceptionMode());
-
- Builder.setDefaultConstrainedRounding(RM);
- Builder.setDefaultConstrainedExcept(fpExceptionBehavior);
- Builder.setIsFPConstrained(fpExceptionBehavior != llvm::fp::ebIgnore ||
- RM != llvm::RoundingMode::NearestTiesToEven);
}
void CodeGenFunction::SetFastMathFlags(FPOptions FPFeatures) {
@@ -156,12 +149,11 @@ void CodeGenFunction::CGFPOptionsRAII::ConstructorHelper(FPOptions FPFeatures) {
FMFGuard.emplace(CGF.Builder);
- llvm::RoundingMode NewRoundingBehavior =
- static_cast<llvm::RoundingMode>(FPFeatures.getRoundingMode());
+ llvm::RoundingMode NewRoundingBehavior = FPFeatures.getRoundingMode();
CGF.Builder.setDefaultConstrainedRounding(NewRoundingBehavior);
auto NewExceptionBehavior =
ToConstrainedExceptMD(static_cast<LangOptions::FPExceptionModeKind>(
- FPFeatures.getFPExceptionMode()));
+ FPFeatures.getExceptionMode()));
CGF.Builder.setDefaultConstrainedExcept(NewExceptionBehavior);
CGF.SetFastMathFlags(FPFeatures);
@@ -183,10 +175,11 @@ void CodeGenFunction::CGFPOptionsRAII::ConstructorHelper(FPOptions FPFeatures) {
mergeFnAttrValue("no-infs-fp-math", FPFeatures.getNoHonorInfs());
mergeFnAttrValue("no-nans-fp-math", FPFeatures.getNoHonorNaNs());
mergeFnAttrValue("no-signed-zeros-fp-math", FPFeatures.getNoSignedZero());
- mergeFnAttrValue("unsafe-fp-math", FPFeatures.getAllowFPReassociate() &&
- FPFeatures.getAllowReciprocal() &&
- FPFeatures.getAllowApproxFunc() &&
- FPFeatures.getNoSignedZero());
+ mergeFnAttrValue(
+ "unsafe-fp-math",
+ FPFeatures.getAllowFPReassociate() && FPFeatures.getAllowReciprocal() &&
+ FPFeatures.getAllowApproxFunc() && FPFeatures.getNoSignedZero() &&
+ FPFeatures.allowFPContractAcrossStatement());
}
CodeGenFunction::CGFPOptionsRAII::~CGFPOptionsRAII() {
@@ -199,8 +192,8 @@ LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
LValueBaseInfo BaseInfo;
TBAAAccessInfo TBAAInfo;
CharUnits Alignment = CGM.getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo);
- return LValue::MakeAddr(Address(V, Alignment), T, getContext(), BaseInfo,
- TBAAInfo);
+ Address Addr(V, ConvertTypeForMem(T), Alignment);
+ return LValue::MakeAddr(Addr, T, getContext(), BaseInfo, TBAAInfo);
}
/// Given a value of type T* that may not be to a complete object,
@@ -211,7 +204,8 @@ CodeGenFunction::MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T) {
TBAAAccessInfo TBAAInfo;
CharUnits Align = CGM.getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo,
/* forPointeeType= */ true);
- return MakeAddrLValue(Address(V, Align), T, BaseInfo, TBAAInfo);
+ Address Addr(V, ConvertTypeForMem(T), Align);
+ return MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo);
}
@@ -254,7 +248,7 @@ TypeEvaluationKind CodeGenFunction::getEvaluationKind(QualType type) {
case Type::Enum:
case Type::ObjCObjectPointer:
case Type::Pipe:
- case Type::ExtInt:
+ case Type::BitInt:
return TEK_Scalar;
// Complexes.
@@ -327,8 +321,10 @@ llvm::DebugLoc CodeGenFunction::EmitReturnBlock() {
static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
if (!BB) return;
- if (!BB->use_empty())
- return CGF.CurFn->getBasicBlockList().push_back(BB);
+ if (!BB->use_empty()) {
+ CGF.CurFn->insert(CGF.CurFn->end(), BB);
+ return;
+ }
delete BB;
}
@@ -366,17 +362,18 @@ void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
bool HasOnlyLifetimeMarkers =
HasCleanups && EHStack.containsOnlyLifetimeMarkers(PrologueCleanupDepth);
bool EmitRetDbgLoc = !HasCleanups || HasOnlyLifetimeMarkers;
+
+ std::optional<ApplyDebugLocation> OAL;
if (HasCleanups) {
// Make sure the line table doesn't jump back into the body for
// the ret after it's been at EndLoc.
- Optional<ApplyDebugLocation> AL;
if (CGDebugInfo *DI = getDebugInfo()) {
if (OnlySimpleReturnStmts)
DI->EmitLocation(Builder, EndLoc);
else
// We may not have a valid end location. Try to apply it anyway, and
// fall back to an artificial location if needed.
- AL = ApplyDebugLocation::CreateDefaultArtificial(*this, EndLoc);
+ OAL = ApplyDebugLocation::CreateDefaultArtificial(*this, EndLoc);
}
PopCleanupBlocks(PrologueCleanupDepth);
@@ -432,6 +429,14 @@ void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
AllocaInsertPt = nullptr;
Ptr->eraseFromParent();
+ // PostAllocaInsertPt, if created, was lazily created when it was required,
+ // remove it now since it was just created for our own convenience.
+ if (PostAllocaInsertPt) {
+ llvm::Instruction *PostPtr = PostAllocaInsertPt;
+ PostAllocaInsertPt = nullptr;
+ PostPtr->eraseFromParent();
+ }
+
// If someone took the address of a label but never did an indirect goto, we
// made a zero entry PHI node, which is illegal, zap it now.
if (IndirectBranch) {
@@ -479,28 +484,34 @@ void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
if (auto *VT = dyn_cast<llvm::VectorType>(A.getType()))
LargestVectorWidth =
std::max((uint64_t)LargestVectorWidth,
- VT->getPrimitiveSizeInBits().getKnownMinSize());
+ VT->getPrimitiveSizeInBits().getKnownMinValue());
// Update vector width based on return type.
if (auto *VT = dyn_cast<llvm::VectorType>(CurFn->getReturnType()))
LargestVectorWidth =
std::max((uint64_t)LargestVectorWidth,
- VT->getPrimitiveSizeInBits().getKnownMinSize());
+ VT->getPrimitiveSizeInBits().getKnownMinValue());
+
+ if (CurFnInfo->getMaxVectorWidth() > LargestVectorWidth)
+ LargestVectorWidth = CurFnInfo->getMaxVectorWidth();
- // Add the required-vector-width attribute. This contains the max width from:
+ // Add the min-legal-vector-width attribute. This contains the max width from:
// 1. min-vector-width attribute used in the source program.
// 2. Any builtins used that have a vector width specified.
// 3. Values passed in and out of inline assembly.
// 4. Width of vector arguments and return types for this function.
- // 5. Width of vector aguments and return types for functions called by this
+ // 5. Width of vector arguments and return types for functions called by this
// function.
- CurFn->addFnAttr("min-legal-vector-width", llvm::utostr(LargestVectorWidth));
+ if (getContext().getTargetInfo().getTriple().isX86())
+ CurFn->addFnAttr("min-legal-vector-width",
+ llvm::utostr(LargestVectorWidth));
- // Add vscale attribute if appropriate.
- if (getLangOpts().ArmSveVectorBits) {
- unsigned VScale = getLangOpts().ArmSveVectorBits / 128;
- CurFn->addFnAttr(llvm::Attribute::getWithVScaleRangeArgs(getLLVMContext(),
- VScale, VScale));
+ // Add vscale_range attribute if appropriate.
+ std::optional<std::pair<unsigned, unsigned>> VScaleRange =
+ getContext().getTargetInfo().getVScaleRange(getLangOpts());
+ if (VScaleRange) {
+ CurFn->addFnAttr(llvm::Attribute::getWithVScaleRangeArgs(
+ getLLVMContext(), VScaleRange->first, VScaleRange->second));
}
// If we generated an unreachable return block, delete it now.
@@ -529,6 +540,12 @@ bool CodeGenFunction::ShouldInstrumentFunction() {
return true;
}
+bool CodeGenFunction::ShouldSkipSanitizerInstrumentation() {
+ if (!CurFuncDecl)
+ return false;
+ return CurFuncDecl->hasAttr<DisableSanitizerInstrumentationAttr>();
+}
+
/// ShouldXRayInstrument - Return true if the current function should be
/// instrumented with XRay nop sleds.
bool CodeGenFunction::ShouldXRayInstrumentFunction() const {
@@ -551,52 +568,30 @@ bool CodeGenFunction::AlwaysEmitXRayTypedEvents() const {
XRayInstrKind::Typed);
}
-llvm::Constant *
-CodeGenFunction::EncodeAddrForUseInPrologue(llvm::Function *F,
- llvm::Constant *Addr) {
- // Addresses stored in prologue data can't require run-time fixups and must
- // be PC-relative. Run-time fixups are undesirable because they necessitate
- // writable text segments, which are unsafe. And absolute addresses are
- // undesirable because they break PIE mode.
-
- // Add a layer of indirection through a private global. Taking its address
- // won't result in a run-time fixup, even if Addr has linkonce_odr linkage.
- auto *GV = new llvm::GlobalVariable(CGM.getModule(), Addr->getType(),
- /*isConstant=*/true,
- llvm::GlobalValue::PrivateLinkage, Addr);
-
- // Create a PC-relative address.
- auto *GOTAsInt = llvm::ConstantExpr::getPtrToInt(GV, IntPtrTy);
- auto *FuncAsInt = llvm::ConstantExpr::getPtrToInt(F, IntPtrTy);
- auto *PCRelAsInt = llvm::ConstantExpr::getSub(GOTAsInt, FuncAsInt);
- return (IntPtrTy == Int32Ty)
- ? PCRelAsInt
- : llvm::ConstantExpr::getTrunc(PCRelAsInt, Int32Ty);
+llvm::ConstantInt *
+CodeGenFunction::getUBSanFunctionTypeHash(QualType Ty) const {
+ // Remove any (C++17) exception specifications, to allow calling e.g. a
+ // noexcept function through a non-noexcept pointer.
+ if (!Ty->isFunctionNoProtoType())
+ Ty = getContext().getFunctionTypeWithExceptionSpec(Ty, EST_None);
+ std::string Mangled;
+ llvm::raw_string_ostream Out(Mangled);
+ CGM.getCXXABI().getMangleContext().mangleCanonicalTypeName(Ty, Out, false);
+ return llvm::ConstantInt::get(
+ CGM.Int32Ty, static_cast<uint32_t>(llvm::xxh3_64bits(Mangled)));
}
-llvm::Value *
-CodeGenFunction::DecodeAddrUsedInPrologue(llvm::Value *F,
- llvm::Value *EncodedAddr) {
- // Reconstruct the address of the global.
- auto *PCRelAsInt = Builder.CreateSExt(EncodedAddr, IntPtrTy);
- auto *FuncAsInt = Builder.CreatePtrToInt(F, IntPtrTy, "func_addr.int");
- auto *GOTAsInt = Builder.CreateAdd(PCRelAsInt, FuncAsInt, "global_addr.int");
- auto *GOTAddr = Builder.CreateIntToPtr(GOTAsInt, Int8PtrPtrTy, "global_addr");
-
- // Load the original pointer through the global.
- return Builder.CreateLoad(Address(GOTAddr, getPointerAlign()),
- "decoded_addr");
-}
-
-void CodeGenFunction::EmitOpenCLKernelMetadata(const FunctionDecl *FD,
- llvm::Function *Fn)
-{
- if (!FD->hasAttr<OpenCLKernelAttr>())
+void CodeGenFunction::EmitKernelMetadata(const FunctionDecl *FD,
+ llvm::Function *Fn) {
+ if (!FD->hasAttr<OpenCLKernelAttr>() && !FD->hasAttr<CUDAGlobalAttr>())
return;
llvm::LLVMContext &Context = getLLVMContext();
- CGM.GenOpenCLArgMetadata(Fn, FD, this);
+ CGM.GenKernelArgMetadata(Fn, FD, this);
+
+ if (!getLangOpts().OpenCL)
+ return;
if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) {
QualType HintQTy = A->getTypeHint();
@@ -688,6 +683,19 @@ static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx) {
return true;
}
+bool CodeGenFunction::isInAllocaArgument(CGCXXABI &ABI, QualType Ty) {
+ const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
+ return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
+}
+
+bool CodeGenFunction::hasInAllocaArg(const CXXMethodDecl *MD) {
+ return getTarget().getTriple().getArch() == llvm::Triple::x86 &&
+ getTarget().getCXXABI().isMicrosoft() &&
+ llvm::any_of(MD->parameters(), [&](ParmVarDecl *P) {
+ return isInAllocaArgument(CGM.getCXXABI(), P->getType());
+ });
+}
+
/// Return the UBSan prologue signature for \p FD if one is available.
static llvm::Constant *getPrologueSignature(CodeGenModule &CGM,
const FunctionDecl *FD) {
@@ -712,7 +720,7 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
CurCodeDecl = D;
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
if (FD && FD->usesSEHTry())
- CurSEHParent = FD;
+ CurSEHParent = GD;
CurFuncDecl = (D ? D->getNonClosureContext() : nullptr);
FnRetTy = RetTy;
CurFn = Fn;
@@ -731,44 +739,60 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
#include "clang/Basic/Sanitizers.def"
#undef SANITIZER
- } while (0);
+ } while (false);
if (D) {
+ const bool SanitizeBounds = SanOpts.hasOneOf(SanitizerKind::Bounds);
+ SanitizerMask no_sanitize_mask;
bool NoSanitizeCoverage = false;
- for (auto Attr : D->specific_attrs<NoSanitizeAttr>()) {
- // Apply the no_sanitize* attributes to SanOpts.
- SanitizerMask mask = Attr->getMask();
- SanOpts.Mask &= ~mask;
- if (mask & SanitizerKind::Address)
- SanOpts.set(SanitizerKind::KernelAddress, false);
- if (mask & SanitizerKind::KernelAddress)
- SanOpts.set(SanitizerKind::Address, false);
- if (mask & SanitizerKind::HWAddress)
- SanOpts.set(SanitizerKind::KernelHWAddress, false);
- if (mask & SanitizerKind::KernelHWAddress)
- SanOpts.set(SanitizerKind::HWAddress, false);
-
+ for (auto *Attr : D->specific_attrs<NoSanitizeAttr>()) {
+ no_sanitize_mask |= Attr->getMask();
// SanitizeCoverage is not handled by SanOpts.
if (Attr->hasCoverage())
NoSanitizeCoverage = true;
}
+ // Apply the no_sanitize* attributes to SanOpts.
+ SanOpts.Mask &= ~no_sanitize_mask;
+ if (no_sanitize_mask & SanitizerKind::Address)
+ SanOpts.set(SanitizerKind::KernelAddress, false);
+ if (no_sanitize_mask & SanitizerKind::KernelAddress)
+ SanOpts.set(SanitizerKind::Address, false);
+ if (no_sanitize_mask & SanitizerKind::HWAddress)
+ SanOpts.set(SanitizerKind::KernelHWAddress, false);
+ if (no_sanitize_mask & SanitizerKind::KernelHWAddress)
+ SanOpts.set(SanitizerKind::HWAddress, false);
+
+ if (SanitizeBounds && !SanOpts.hasOneOf(SanitizerKind::Bounds))
+ Fn->addFnAttr(llvm::Attribute::NoSanitizeBounds);
+
if (NoSanitizeCoverage && CGM.getCodeGenOpts().hasSanitizeCoverage())
Fn->addFnAttr(llvm::Attribute::NoSanitizeCoverage);
+
+ // Some passes need the non-negated no_sanitize attribute. Pass them on.
+ if (CGM.getCodeGenOpts().hasSanitizeBinaryMetadata()) {
+ if (no_sanitize_mask & SanitizerKind::Thread)
+ Fn->addFnAttr("no_sanitize_thread");
+ }
}
- // Apply sanitizer attributes to the function.
- if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress))
- Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
- if (SanOpts.hasOneOf(SanitizerKind::HWAddress | SanitizerKind::KernelHWAddress))
- Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
- if (SanOpts.has(SanitizerKind::MemTag))
- Fn->addFnAttr(llvm::Attribute::SanitizeMemTag);
- if (SanOpts.has(SanitizerKind::Thread))
- Fn->addFnAttr(llvm::Attribute::SanitizeThread);
- if (SanOpts.hasOneOf(SanitizerKind::Memory | SanitizerKind::KernelMemory))
- Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
+ if (ShouldSkipSanitizerInstrumentation()) {
+ CurFn->addFnAttr(llvm::Attribute::DisableSanitizerInstrumentation);
+ } else {
+ // Apply sanitizer attributes to the function.
+ if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress))
+ Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
+ if (SanOpts.hasOneOf(SanitizerKind::HWAddress |
+ SanitizerKind::KernelHWAddress))
+ Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
+ if (SanOpts.has(SanitizerKind::MemtagStack))
+ Fn->addFnAttr(llvm::Attribute::SanitizeMemTag);
+ if (SanOpts.has(SanitizerKind::Thread))
+ Fn->addFnAttr(llvm::Attribute::SanitizeThread);
+ if (SanOpts.hasOneOf(SanitizerKind::Memory | SanitizerKind::KernelMemory))
+ Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
+ }
if (SanOpts.has(SanitizerKind::SafeStack))
Fn->addFnAttr(llvm::Attribute::SafeStack);
if (SanOpts.has(SanitizerKind::ShadowCallStack))
@@ -846,8 +870,8 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
auto FuncGroups = CGM.getCodeGenOpts().XRayTotalFunctionGroups;
if (FuncGroups > 1) {
- auto FuncName = llvm::makeArrayRef<uint8_t>(
- CurFn->getName().bytes_begin(), CurFn->getName().bytes_end());
+ auto FuncName = llvm::ArrayRef<uint8_t>(CurFn->getName().bytes_begin(),
+ CurFn->getName().bytes_end());
auto Group = crc32(FuncName) % FuncGroups;
if (Group != CGM.getCodeGenOpts().XRaySelectedFunctionGroup &&
!AlwaysXRayAttr)
@@ -855,9 +879,18 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
}
}
- if (CGM.getCodeGenOpts().getProfileInstr() != CodeGenOptions::ProfileNone)
- if (CGM.isProfileInstrExcluded(Fn, Loc))
+ if (CGM.getCodeGenOpts().getProfileInstr() != CodeGenOptions::ProfileNone) {
+ switch (CGM.isFunctionBlockedFromProfileInstr(Fn, Loc)) {
+ case ProfileList::Skip:
+ Fn->addFnAttr(llvm::Attribute::SkipProfile);
+ break;
+ case ProfileList::Forbid:
Fn->addFnAttr(llvm::Attribute::NoProfile);
+ break;
+ case ProfileList::Allow:
+ break;
+ }
+ }
unsigned Count, Offset;
if (const auto *Attr =
@@ -873,6 +906,15 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
if (Offset)
Fn->addFnAttr("patchable-function-prefix", std::to_string(Offset));
}
+ // Instruct that functions for COFF/CodeView targets should start with a
+ // patchable instruction, but only on x86/x64. Don't forward this to ARM/ARM64
+ // backends as they don't need it -- instructions on these architectures are
+ // always atomically patchable at runtime.
+ if (CGM.getCodeGenOpts().HotPatch &&
+ getContext().getTargetInfo().getTriple().isX86() &&
+ getContext().getTargetInfo().getTriple().getEnvironment() !=
+ llvm::Triple::CODE16)
+ Fn->addFnAttr("patchable-function", "prologue-short-redirect");
// Add no-jump-tables value.
if (CGM.getCodeGenOpts().NoUseJumpTables)
@@ -895,34 +937,43 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
if (D && D->hasAttr<NoProfileFunctionAttr>())
Fn->addFnAttr(llvm::Attribute::NoProfile);
- if (FD && getLangOpts().OpenCL) {
+ if (D) {
+ // Function attributes take precedence over command line flags.
+ if (auto *A = D->getAttr<FunctionReturnThunksAttr>()) {
+ switch (A->getThunkType()) {
+ case FunctionReturnThunksAttr::Kind::Keep:
+ break;
+ case FunctionReturnThunksAttr::Kind::Extern:
+ Fn->addFnAttr(llvm::Attribute::FnRetThunkExtern);
+ break;
+ }
+ } else if (CGM.getCodeGenOpts().FunctionReturnThunks)
+ Fn->addFnAttr(llvm::Attribute::FnRetThunkExtern);
+ }
+
+ if (FD && (getLangOpts().OpenCL ||
+ (getLangOpts().HIP && getLangOpts().CUDAIsDevice))) {
// Add metadata for a kernel function.
- EmitOpenCLKernelMetadata(FD, Fn);
+ EmitKernelMetadata(FD, Fn);
}
// If we are checking function types, emit a function type signature as
// prologue data.
- if (FD && getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function)) {
+ if (FD && SanOpts.has(SanitizerKind::Function)) {
if (llvm::Constant *PrologueSig = getPrologueSignature(CGM, FD)) {
- // Remove any (C++17) exception specifications, to allow calling e.g. a
- // noexcept function through a non-noexcept pointer.
- auto ProtoTy = getContext().getFunctionTypeWithExceptionSpec(
- FD->getType(), EST_None);
- llvm::Constant *FTRTTIConst =
- CGM.GetAddrOfRTTIDescriptor(ProtoTy, /*ForEH=*/true);
- llvm::Constant *FTRTTIConstEncoded =
- EncodeAddrForUseInPrologue(Fn, FTRTTIConst);
- llvm::Constant *PrologueStructElems[] = {PrologueSig, FTRTTIConstEncoded};
- llvm::Constant *PrologueStructConst =
- llvm::ConstantStruct::getAnon(PrologueStructElems, /*Packed=*/true);
- Fn->setPrologueData(PrologueStructConst);
+ llvm::LLVMContext &Ctx = Fn->getContext();
+ llvm::MDBuilder MDB(Ctx);
+ Fn->setMetadata(
+ llvm::LLVMContext::MD_func_sanitize,
+ MDB.createRTTIPointerPrologue(
+ PrologueSig, getUBSanFunctionTypeHash(FD->getType())));
}
}
// If we're checking nullability, we need to know whether we can check the
// return value. Initialize the flag to 'true' and refine it in EmitParmDecl.
if (SanOpts.has(SanitizerKind::NullabilityReturn)) {
- auto Nullability = FnRetTy->getNullability(getContext());
+ auto Nullability = FnRetTy->getNullability();
if (Nullability && *Nullability == NullabilityKind::NonNull) {
if (!(SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) &&
CurCodeDecl && CurCodeDecl->getAttr<ReturnsNonNullAttr>()))
@@ -947,10 +998,16 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
(getLangOpts().CUDA && FD->hasAttr<CUDAGlobalAttr>())))
Fn->addFnAttr(llvm::Attribute::NoRecurse);
- if (FD) {
- Builder.setIsFPConstrained(FD->hasAttr<StrictFPAttr>());
- if (FD->hasAttr<StrictFPAttr>())
- Fn->addFnAttr(llvm::Attribute::StrictFP);
+ llvm::RoundingMode RM = getLangOpts().getDefaultRoundingMode();
+ llvm::fp::ExceptionBehavior FPExceptionBehavior =
+ ToConstrainedExceptMD(getLangOpts().getDefaultExceptionMode());
+ Builder.setDefaultConstrainedRounding(RM);
+ Builder.setDefaultConstrainedExcept(FPExceptionBehavior);
+ if ((FD && (FD->UsesFPIntrin() || FD->hasAttr<StrictFPAttr>())) ||
+ (!FD && (FPExceptionBehavior != llvm::fp::ebIgnore ||
+ RM != llvm::RoundingMode::NearestTiesToEven))) {
+ Builder.setIsFPConstrained(true);
+ Fn->addFnAttr(llvm::Attribute::StrictFP);
}
// If a custom alignment is used, force realigning to this alignment on
@@ -959,6 +1016,10 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
CGM.getCodeGenOpts().StackAlignment))
Fn->addFnAttr("stackrealign");
+ // "main" doesn't need to zero out call-used registers.
+ if (FD && FD->isMain())
+ Fn->removeFnAttr("zero-call-used-regs");
+
llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
// Create a marker to make it easy to insert allocas into the entryblock
@@ -975,7 +1036,8 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
// precise source location of the checked return statement.
if (requiresReturnValueCheck()) {
ReturnLocation = CreateDefaultAlignTempAlloca(Int8PtrTy, "return.sloc.ptr");
- InitTempAlloca(ReturnLocation, llvm::ConstantPointerNull::get(Int8PtrTy));
+ Builder.CreateStore(llvm::ConstantPointerNull::get(Int8PtrTy),
+ ReturnLocation);
}
// Emit subprogram debug descriptor.
@@ -983,16 +1045,9 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
// Reconstruct the type from the argument list so that implicit parameters,
// such as 'this' and 'vtt', show up in the debug info. Preserve the calling
// convention.
- CallingConv CC = CallingConv::CC_C;
- if (FD)
- if (const auto *SrcFnTy = FD->getType()->getAs<FunctionType>())
- CC = SrcFnTy->getCallConv();
- SmallVector<QualType, 16> ArgTypes;
- for (const VarDecl *VD : Args)
- ArgTypes.push_back(VD->getType());
- QualType FnType = getContext().getFunctionType(
- RetTy, ArgTypes, FunctionProtoType::ExtProtoInfo(CC));
- DI->emitFunctionStart(GD, Loc, StartLoc, FnType, CurFn, CurFuncIsThunk);
+ DI->emitFunctionStart(GD, Loc, StartLoc,
+ DI->getFunctionType(FD, RetTy, Args), CurFn,
+ CurFuncIsThunk);
}
if (ShouldInstrumentFunction()) {
@@ -1044,7 +1099,8 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
Fn->addFnAttr("packed-stack");
}
- if (CGM.getCodeGenOpts().WarnStackSize != UINT_MAX)
+ if (CGM.getCodeGenOpts().WarnStackSize != UINT_MAX &&
+ !CGM.getDiags().isIgnored(diag::warn_fe_backend_frame_larger_than, Loc))
Fn->addFnAttr("warn-stack-size",
std::to_string(CGM.getCodeGenOpts().WarnStackSize));
@@ -1061,13 +1117,13 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
auto AI = CurFn->arg_begin();
if (CurFnInfo->getReturnInfo().isSRetAfterThis())
++AI;
- ReturnValue = Address(&*AI, CurFnInfo->getReturnInfo().getIndirectAlign());
+ ReturnValue =
+ Address(&*AI, ConvertType(RetTy),
+ CurFnInfo->getReturnInfo().getIndirectAlign(), KnownNonNull);
if (!CurFnInfo->getReturnInfo().getIndirectByVal()) {
- ReturnValuePointer =
- CreateDefaultAlignTempAlloca(Int8PtrTy, "result.ptr");
- Builder.CreateStore(Builder.CreatePointerBitCastOrAddrSpaceCast(
- ReturnValue.getPointer(), Int8PtrTy),
- ReturnValuePointer);
+ ReturnValuePointer = CreateDefaultAlignTempAlloca(
+ ReturnValue.getPointer()->getType(), "result.ptr");
+ Builder.CreateStore(ReturnValue.getPointer(), ReturnValuePointer);
}
} else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca &&
!hasScalarEvaluationKind(CurFnInfo->getReturnType())) {
@@ -1076,12 +1132,13 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
llvm::Function::arg_iterator EI = CurFn->arg_end();
--EI;
llvm::Value *Addr = Builder.CreateStructGEP(
- EI->getType()->getPointerElementType(), &*EI, Idx);
+ CurFnInfo->getArgStruct(), &*EI, Idx);
llvm::Type *Ty =
cast<llvm::GetElementPtrInst>(Addr)->getResultElementType();
- ReturnValuePointer = Address(Addr, getPointerAlign());
+ ReturnValuePointer = Address(Addr, Ty, getPointerAlign());
Addr = Builder.CreateAlignedLoad(Ty, Addr, getPointerAlign(), "agg.result");
- ReturnValue = Address(Addr, CGM.getNaturalTypeAlignment(RetTy));
+ ReturnValue = Address(Addr, ConvertType(RetTy),
+ CGM.getNaturalTypeAlignment(RetTy), KnownNonNull);
} else {
ReturnValue = CreateIRTemp(RetTy, "retval");
@@ -1102,13 +1159,19 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
if (getLangOpts().OpenMP && CurCodeDecl)
CGM.getOpenMPRuntime().emitFunctionProlog(*this, CurCodeDecl);
+ // Handle emitting HLSL entry functions.
+ if (D && D->hasAttr<HLSLShaderAttr>())
+ CGM.getHLSLRuntime().emitEntryFunction(FD, Fn);
+
EmitFunctionProlog(*CurFnInfo, CurFn, Args);
- if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance()) {
- CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
- const CXXMethodDecl *MD = cast<CXXMethodDecl>(D);
- if (MD->getParent()->isLambda() &&
- MD->getOverloadedOperator() == OO_Call) {
+ if (const CXXMethodDecl *MD = dyn_cast_if_present<CXXMethodDecl>(D);
+ MD && !MD->isStatic()) {
+ bool IsInLambda =
+ MD->getParent()->isLambda() && MD->getOverloadedOperator() == OO_Call;
+ if (MD->isImplicitObjectMemberFunction())
+ CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
+ if (IsInLambda) {
// We're in a lambda; figure out the captures.
MD->getParent()->getCaptureFields(LambdaCaptureFields,
LambdaThisCaptureField);
@@ -1138,7 +1201,7 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
VLASizeMap[VAT->getSizeExpr()] = ExprArg;
}
}
- } else {
+ } else if (MD->isImplicitObjectMemberFunction()) {
// Not in a lambda; just use 'this' from the method.
// FIXME: Should we generate a new load for each use of 'this'? The
// fast register allocator would be happier...
@@ -1151,11 +1214,10 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
SkippedChecks.set(SanitizerKind::ObjectSize, true);
QualType ThisTy = MD->getThisType();
- // If this is the call operator of a lambda with no capture-default, it
+ // If this is the call operator of a lambda with no captures, it
// may have a static invoker function, which may call this operator with
// a null 'this' pointer.
- if (isLambdaCallOperator(MD) &&
- MD->getParent()->getLambdaCaptureDefault() == LCD_None)
+ if (isLambdaCallOperator(MD) && MD->getParent()->isCapturelessLambda())
SkippedChecks.set(SanitizerKind::Null, true);
EmitTypeCheck(
@@ -1165,27 +1227,26 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
}
// If any of the arguments have a variably modified type, make sure to
- // emit the type size.
- for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
- i != e; ++i) {
- const VarDecl *VD = *i;
-
- // Dig out the type as written from ParmVarDecls; it's unclear whether
- // the standard (C99 6.9.1p10) requires this, but we're following the
- // precedent set by gcc.
- QualType Ty;
- if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD))
- Ty = PVD->getOriginalType();
- else
- Ty = VD->getType();
+ // emit the type size, but only if the function is not naked. Naked functions
+ // have no prolog to run this evaluation.
+ if (!FD || !FD->hasAttr<NakedAttr>()) {
+ for (const VarDecl *VD : Args) {
+ // Dig out the type as written from ParmVarDecls; it's unclear whether
+ // the standard (C99 6.9.1p10) requires this, but we're following the
+ // precedent set by gcc.
+ QualType Ty;
+ if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD))
+ Ty = PVD->getOriginalType();
+ else
+ Ty = VD->getType();
- if (Ty->isVariablyModifiedType())
- EmitVariablyModifiedType(Ty);
+ if (Ty->isVariablyModifiedType())
+ EmitVariablyModifiedType(Ty);
+ }
}
// Emit a location at the end of the prologue.
if (CGDebugInfo *DI = getDebugInfo())
DI->EmitLocation(Builder, StartLoc);
-
// TODO: Do we need to handle this in two places like we do with
// target-features/target-cpu?
if (CurFuncDecl)
@@ -1195,15 +1256,11 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
void CodeGenFunction::EmitFunctionBody(const Stmt *Body) {
incrementProfileCounter(Body);
+ maybeCreateMCDCCondBitmap();
if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Body))
EmitCompoundStmtWithoutScope(*S);
else
EmitStmt(Body);
-
- // This is checked after emitting the function body so we know if there
- // are any permitted infinite loops.
- if (checkIfFunctionMustProgress())
- CurFn->addFnAttr(llvm::Attribute::MustProgress);
}
/// When instrumenting to collect profile data, the counts for some blocks
@@ -1250,7 +1307,7 @@ QualType CodeGenFunction::BuildFunctionArgList(GlobalDecl GD,
QualType ResTy = FD->getReturnType();
const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
- if (MD && MD->isInstance()) {
+ if (MD && MD->isImplicitObjectMemberFunction()) {
if (CGM.getCXXABI().HasThisReturn(GD))
ResTy = MD->getThisType();
else if (CGM.getCXXABI().hasMostDerivedReturn(GD))
@@ -1275,7 +1332,7 @@ QualType CodeGenFunction::BuildFunctionArgList(GlobalDecl GD,
auto *Implicit = ImplicitParamDecl::Create(
getContext(), Param->getDeclContext(), Param->getLocation(),
- /*Id=*/nullptr, getContext().getSizeType(), ImplicitParamDecl::Other);
+ /*Id=*/nullptr, getContext().getSizeType(), ImplicitParamKind::Other);
SizeArguments[Param] = Implicit;
Args.push_back(Implicit);
}
@@ -1289,18 +1346,53 @@ QualType CodeGenFunction::BuildFunctionArgList(GlobalDecl GD,
void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
const CGFunctionInfo &FnInfo) {
+ assert(Fn && "generating code for null Function");
const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
CurGD = GD;
FunctionArgList Args;
QualType ResTy = BuildFunctionArgList(GD, Args);
+ if (FD->isInlineBuiltinDeclaration()) {
+ // When generating code for a builtin with an inline declaration, use a
+ // mangled name to hold the actual body, while keeping an external
+ // definition in case the function pointer is referenced somewhere.
+ std::string FDInlineName = (Fn->getName() + ".inline").str();
+ llvm::Module *M = Fn->getParent();
+ llvm::Function *Clone = M->getFunction(FDInlineName);
+ if (!Clone) {
+ Clone = llvm::Function::Create(Fn->getFunctionType(),
+ llvm::GlobalValue::InternalLinkage,
+ Fn->getAddressSpace(), FDInlineName, M);
+ Clone->addFnAttr(llvm::Attribute::AlwaysInline);
+ }
+ Fn->setLinkage(llvm::GlobalValue::ExternalLinkage);
+ Fn = Clone;
+ } else {
+ // Detect the unusual situation where an inline version is shadowed by a
+ // non-inline version. In that case we should pick the external one
+ // everywhere. That's GCC behavior too. Unfortunately, I cannot find a way
+ // to detect that situation before we reach codegen, so do some late
+ // replacement.
+ for (const FunctionDecl *PD = FD->getPreviousDecl(); PD;
+ PD = PD->getPreviousDecl()) {
+ if (LLVM_UNLIKELY(PD->isInlineBuiltinDeclaration())) {
+ std::string FDInlineName = (Fn->getName() + ".inline").str();
+ llvm::Module *M = Fn->getParent();
+ if (llvm::Function *Clone = M->getFunction(FDInlineName)) {
+ Clone->replaceAllUsesWith(Fn);
+ Clone->eraseFromParent();
+ }
+ break;
+ }
+ }
+ }
+
// Check if we should generate debug info for this function.
if (FD->hasAttr<NoDebugAttr>()) {
// Clear non-distinct debug info that was possibly attached to the function
// due to an earlier declaration without the nodebug attribute
- if (Fn)
- Fn->setSubprogram(nullptr);
+ Fn->setSubprogram(nullptr);
// Disable debug info indefinitely for this function
DebugInfo = nullptr;
}
@@ -1345,8 +1437,12 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
// Save parameters for coroutine function.
if (Body && isa_and_nonnull<CoroutineBodyStmt>(Body))
- for (const auto *ParamDecl : FD->parameters())
- FnArgs.push_back(ParamDecl);
+ llvm::append_range(FnArgs, FD->parameters());
+
+ // Ensure that the function adheres to the forward progress guarantee, which
+ // is required by certain optimizations.
+ if (checkIfFunctionMustProgress())
+ CurFn->addFnAttr(llvm::Attribute::MustProgress);
// Generate the body of the function.
PGO.assignRegionCounters(GD, CurFn);
@@ -1363,6 +1459,17 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
// The lambda static invoker function is special, because it forwards or
// clones the body of the function call operator (but is actually static).
EmitLambdaStaticInvokeBody(cast<CXXMethodDecl>(FD));
+ } else if (isa<CXXMethodDecl>(FD) &&
+ isLambdaCallOperator(cast<CXXMethodDecl>(FD)) &&
+ !FnInfo.isDelegateCall() &&
+ cast<CXXMethodDecl>(FD)->getParent()->getLambdaStaticInvoker() &&
+ hasInAllocaArg(cast<CXXMethodDecl>(FD))) {
+ // If emitting a lambda with static invoker on X86 Windows, change
+ // the call operator body.
+ // Make sure that this is a call operator with an inalloca arg and check
+ // for delegate call to make sure this is the original call op and not the
+ // new forwarding function for the static invoker.
+ EmitLambdaInAllocaCallOpBody(cast<CXXMethodDecl>(FD));
} else if (FD->isDefaulted() && isa<CXXMethodDecl>(FD) &&
(cast<CXXMethodDecl>(FD)->isCopyAssignmentOperator() ||
cast<CXXMethodDecl>(FD)->isMoveAssignmentOperator())) {
@@ -1390,7 +1497,7 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
llvm::Value *IsFalse = Builder.getFalse();
EmitCheck(std::make_pair(IsFalse, SanitizerKind::Return),
SanitizerHandler::MissingReturn,
- EmitCheckSourceLocation(FD->getLocation()), None);
+ EmitCheckSourceLocation(FD->getLocation()), std::nullopt);
} else if (ShouldEmitUnreachable) {
if (CGM.getCodeGenOpts().OptimizationLevel == 0)
EmitTrapCall(llvm::Intrinsic::trap);
@@ -1495,6 +1602,13 @@ bool CodeGenFunction::mightAddDeclToScope(const Stmt *S) {
bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
bool &ResultBool,
bool AllowLabels) {
+ // If MC/DC is enabled, disable folding so that we can instrument all
+ // conditions to yield complete test vectors. We still keep track of
+ // folded conditions during region mapping and visualization.
+ if (!AllowLabels && CGM.getCodeGenOpts().hasProfileClangInstr() &&
+ CGM.getCodeGenOpts().MCDCCoverage)
+ return false;
+
llvm::APSInt ResultInt;
if (!ConstantFoldsToSimpleInteger(Cond, ResultInt, AllowLabels))
return false;
@@ -1523,16 +1637,20 @@ bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
return true;
}
+/// Strip parentheses and simplistic logical-NOT operators.
+const Expr *CodeGenFunction::stripCond(const Expr *C) {
+ while (const UnaryOperator *Op = dyn_cast<UnaryOperator>(C->IgnoreParens())) {
+ if (Op->getOpcode() != UO_LNot)
+ break;
+ C = Op->getSubExpr();
+ }
+ return C->IgnoreParens();
+}
+
/// Determine whether the given condition is an instrumentable condition
/// (i.e. no "&&" or "||").
bool CodeGenFunction::isInstrumentedCondition(const Expr *C) {
- // Bypass simplistic logical-NOT operator before determining whether the
- // condition contains any other logical operator.
- if (const UnaryOperator *UnOp = dyn_cast<UnaryOperator>(C->IgnoreParens()))
- if (UnOp->getOpcode() == UO_LNot)
- C = UnOp->getSubExpr();
-
- const BinaryOperator *BOp = dyn_cast<BinaryOperator>(C->IgnoreParens());
+ const BinaryOperator *BOp = dyn_cast<BinaryOperator>(stripCond(C));
return (!BOp || !BOp->isLogicalOp());
}
@@ -1549,9 +1667,9 @@ void CodeGenFunction::EmitBranchToCounterBlock(
if (!InstrumentRegions || !isInstrumentedCondition(Cond))
return EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount, LH);
- llvm::BasicBlock *ThenBlock = NULL;
- llvm::BasicBlock *ElseBlock = NULL;
- llvm::BasicBlock *NextBlock = NULL;
+ llvm::BasicBlock *ThenBlock = nullptr;
+ llvm::BasicBlock *ElseBlock = nullptr;
+ llvm::BasicBlock *NextBlock = nullptr;
// Create the block we'll use to increment the appropriate counter.
llvm::BasicBlock *CounterIncrBlock = createBasicBlock("lop.rhscnt");
@@ -1611,17 +1729,19 @@ void CodeGenFunction::EmitBranchToCounterBlock(
/// statement) to the specified blocks. Based on the condition, this might try
/// to simplify the codegen of the conditional based on the branch.
/// \param LH The value of the likelihood attribute on the True branch.
-void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
- llvm::BasicBlock *TrueBlock,
- llvm::BasicBlock *FalseBlock,
- uint64_t TrueCount,
- Stmt::Likelihood LH) {
+/// \param ConditionalOp Used by MC/DC code coverage to track the result of the
+/// ConditionalOperator (ternary) through a recursive call for the operator's
+/// LHS and RHS nodes.
+void CodeGenFunction::EmitBranchOnBoolExpr(
+ const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock,
+ uint64_t TrueCount, Stmt::Likelihood LH, const Expr *ConditionalOp) {
Cond = Cond->IgnoreParens();
if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
-
// Handle X && Y in a condition.
if (CondBOp->getOpcode() == BO_LAnd) {
+ MCDCLogOpStack.push_back(CondBOp);
+
// If we have "1 && X", simplify the code. "0 && X" would have constant
// folded if the case was simple enough.
bool ConstantBool = false;
@@ -1629,8 +1749,10 @@ void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
ConstantBool) {
// br(1 && X) -> br(X).
incrementProfileCounter(CondBOp);
- return EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LAnd, TrueBlock,
- FalseBlock, TrueCount, LH);
+ EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LAnd, TrueBlock,
+ FalseBlock, TrueCount, LH);
+ MCDCLogOpStack.pop_back();
+ return;
}
// If we have "X && 1", simplify the code to use an uncond branch.
@@ -1638,8 +1760,10 @@ void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
ConstantBool) {
// br(X && 1) -> br(X).
- return EmitBranchToCounterBlock(CondBOp->getLHS(), BO_LAnd, TrueBlock,
- FalseBlock, TrueCount, LH, CondBOp);
+ EmitBranchToCounterBlock(CondBOp->getLHS(), BO_LAnd, TrueBlock,
+ FalseBlock, TrueCount, LH, CondBOp);
+ MCDCLogOpStack.pop_back();
+ return;
}
// Emit the LHS as a conditional. If the LHS conditional is false, we
@@ -1668,11 +1792,13 @@ void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LAnd, TrueBlock,
FalseBlock, TrueCount, LH);
eval.end(*this);
-
+ MCDCLogOpStack.pop_back();
return;
}
if (CondBOp->getOpcode() == BO_LOr) {
+ MCDCLogOpStack.push_back(CondBOp);
+
// If we have "0 || X", simplify the code. "1 || X" would have constant
// folded if the case was simple enough.
bool ConstantBool = false;
@@ -1680,8 +1806,10 @@ void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
!ConstantBool) {
// br(0 || X) -> br(X).
incrementProfileCounter(CondBOp);
- return EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LOr, TrueBlock,
- FalseBlock, TrueCount, LH);
+ EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LOr, TrueBlock,
+ FalseBlock, TrueCount, LH);
+ MCDCLogOpStack.pop_back();
+ return;
}
// If we have "X || 0", simplify the code to use an uncond branch.
@@ -1689,10 +1817,11 @@ void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
!ConstantBool) {
// br(X || 0) -> br(X).
- return EmitBranchToCounterBlock(CondBOp->getLHS(), BO_LOr, TrueBlock,
- FalseBlock, TrueCount, LH, CondBOp);
+ EmitBranchToCounterBlock(CondBOp->getLHS(), BO_LOr, TrueBlock,
+ FalseBlock, TrueCount, LH, CondBOp);
+ MCDCLogOpStack.pop_back();
+ return;
}
-
// Emit the LHS as a conditional. If the LHS conditional is true, we
// want to jump to the TrueBlock.
llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
@@ -1723,14 +1852,20 @@ void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
RHSCount, LH);
eval.end(*this);
-
+ MCDCLogOpStack.pop_back();
return;
}
}
if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
// br(!x, t, f) -> br(x, f, t)
- if (CondUOp->getOpcode() == UO_LNot) {
+ // Avoid doing this optimization when instrumenting a condition for MC/DC.
+ // LNot is taken as part of the condition for simplicity, and changing its
+ // sense negatively impacts test vector tracking.
+ bool MCDCCondition = CGM.getCodeGenOpts().hasProfileClangInstr() &&
+ CGM.getCodeGenOpts().MCDCCoverage &&
+ isInstrumentedCondition(Cond);
+ if (CondUOp->getOpcode() == UO_LNot && !MCDCCondition) {
// Negate the count.
uint64_t FalseCount = getCurrentProfileCount() - TrueCount;
// The values of the enum are chosen to make this negation possible.
@@ -1770,14 +1905,14 @@ void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
{
ApplyDebugLocation DL(*this, Cond);
EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock,
- LHSScaledTrueCount, LH);
+ LHSScaledTrueCount, LH, CondOp);
}
cond.end(*this);
cond.begin(*this);
EmitBlock(RHSBlock);
EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock,
- TrueCount - LHSScaledTrueCount, LH);
+ TrueCount - LHSScaledTrueCount, LH, CondOp);
cond.end(*this);
return;
@@ -1800,6 +1935,21 @@ void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
CondV = EvaluateExprAsBool(Cond);
}
+ // If not at the top of the logical operator nest, update MCDC temp with the
+ // boolean result of the evaluated condition.
+ if (!MCDCLogOpStack.empty()) {
+ const Expr *MCDCBaseExpr = Cond;
+ // When a nested ConditionalOperator (ternary) is encountered in a boolean
+ // expression, MC/DC tracks the result of the ternary, and this is tied to
+ // the ConditionalOperator expression and not the ternary's LHS or RHS. If
+ // this is the case, the ConditionalOperator expression is passed through
+ // the ConditionalOp parameter and then used as the MCDC base expression.
+ if (ConditionalOp)
+ MCDCBaseExpr = ConditionalOp;
+
+ maybeUpdateMCDCCondBitmap(MCDCBaseExpr, CondV);
+ }
+
llvm::MDNode *Weights = nullptr;
llvm::MDNode *Unpredictable = nullptr;
@@ -1851,8 +2001,7 @@ static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
llvm::Value *baseSizeInChars
= llvm::ConstantInt::get(CGF.IntPtrTy, baseSize.getQuantity());
- Address begin =
- Builder.CreateElementBitCast(dest, CGF.Int8Ty, "vla.begin");
+ Address begin = dest.withElementType(CGF.Int8Ty);
llvm::Value *end = Builder.CreateInBoundsGEP(
begin.getElementType(), begin.getPointer(), sizeInChars, "vla.end");
@@ -1871,7 +2020,7 @@ static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
dest.getAlignment().alignmentOfArrayElement(baseSize);
// memcpy the individual element bit-pattern.
- Builder.CreateMemCpy(Address(cur, curAlign), src, baseSizeInChars,
+ Builder.CreateMemCpy(Address(cur, CGF.Int8Ty, curAlign), src, baseSizeInChars,
/*volatile*/ false);
// Go to the next element.
@@ -1896,9 +2045,8 @@ CodeGenFunction::EmitNullInitialization(Address DestPtr, QualType Ty) {
}
}
- // Cast the dest ptr to the appropriate i8 pointer type.
if (DestPtr.getElementType() != Int8Ty)
- DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty);
+ DestPtr = DestPtr.withElementType(Int8Ty);
// Get size and alignment info for this aggregate.
CharUnits size = getContext().getTypeSizeInChars(Ty);
@@ -1943,8 +2091,7 @@ CodeGenFunction::EmitNullInitialization(Address DestPtr, QualType Ty) {
NullConstant, Twine());
CharUnits NullAlign = DestPtr.getAlignment();
NullVariable->setAlignment(NullAlign.getAsAlign());
- Address SrcPtr(Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy()),
- NullAlign);
+ Address SrcPtr(NullVariable, Builder.getInt8Ty(), NullAlign);
if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal);
@@ -2058,11 +2205,12 @@ llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
}
llvm::Type *baseType = ConvertType(eltType);
- addr = Builder.CreateElementBitCast(addr, baseType, "array.begin");
+ addr = addr.withElementType(baseType);
} else {
// Create the actual GEP.
addr = Address(Builder.CreateInBoundsGEP(
addr.getElementType(), addr.getPointer(), gepIndices, "array.begin"),
+ ConvertTypeForMem(eltType),
addr.getAlignment());
}
@@ -2153,15 +2301,19 @@ void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
case Type::ConstantMatrix:
case Type::Record:
case Type::Enum:
- case Type::Elaborated:
+ case Type::Using:
case Type::TemplateSpecialization:
case Type::ObjCTypeParam:
case Type::ObjCObject:
case Type::ObjCInterface:
case Type::ObjCObjectPointer:
- case Type::ExtInt:
+ case Type::BitInt:
llvm_unreachable("type class is never variably-modified!");
+ case Type::Elaborated:
+ type = cast<ElaboratedType>(ty)->getNamedType();
+ break;
+
case Type::Adjusted:
type = cast<AdjustedType>(ty)->getAdjustedType();
break;
@@ -2199,32 +2351,36 @@ void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
// Unknown size indication requires no size computation.
// Otherwise, evaluate and record it.
- if (const Expr *size = vat->getSizeExpr()) {
+ if (const Expr *sizeExpr = vat->getSizeExpr()) {
// It's possible that we might have emitted this already,
// e.g. with a typedef and a pointer to it.
- llvm::Value *&entry = VLASizeMap[size];
+ llvm::Value *&entry = VLASizeMap[sizeExpr];
if (!entry) {
- llvm::Value *Size = EmitScalarExpr(size);
+ llvm::Value *size = EmitScalarExpr(sizeExpr);
// C11 6.7.6.2p5:
// If the size is an expression that is not an integer constant
// expression [...] each time it is evaluated it shall have a value
// greater than zero.
- if (SanOpts.has(SanitizerKind::VLABound) &&
- size->getType()->isSignedIntegerType()) {
+ if (SanOpts.has(SanitizerKind::VLABound)) {
SanitizerScope SanScope(this);
- llvm::Value *Zero = llvm::Constant::getNullValue(Size->getType());
+ llvm::Value *Zero = llvm::Constant::getNullValue(size->getType());
+ clang::QualType SEType = sizeExpr->getType();
+ llvm::Value *CheckCondition =
+ SEType->isSignedIntegerType()
+ ? Builder.CreateICmpSGT(size, Zero)
+ : Builder.CreateICmpUGT(size, Zero);
llvm::Constant *StaticArgs[] = {
- EmitCheckSourceLocation(size->getBeginLoc()),
- EmitCheckTypeDescriptor(size->getType())};
- EmitCheck(std::make_pair(Builder.CreateICmpSGT(Size, Zero),
- SanitizerKind::VLABound),
- SanitizerHandler::VLABoundNotPositive, StaticArgs, Size);
+ EmitCheckSourceLocation(sizeExpr->getBeginLoc()),
+ EmitCheckTypeDescriptor(SEType)};
+ EmitCheck(std::make_pair(CheckCondition, SanitizerKind::VLABound),
+ SanitizerHandler::VLABoundNotPositive, StaticArgs, size);
}
// Always zexting here would be wrong if it weren't
// undefined behavior to have a negative bound.
- entry = Builder.CreateIntCast(Size, SizeTy, /*signed*/ false);
+ // FIXME: What about when size's type is larger than size_t?
+ entry = Builder.CreateIntCast(size, SizeTy, /*signed*/ false);
}
}
type = vat->getElementType();
@@ -2240,6 +2396,7 @@ void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
case Type::TypeOf:
case Type::UnaryTransform:
case Type::Attributed:
+ case Type::BTFTagAttributed:
case Type::SubstTemplateTypeParm:
case Type::MacroQualified:
// Keep walking after single level desugaring.
@@ -2359,8 +2516,6 @@ void CodeGenFunction::emitAlignmentAssumption(llvm::Value *PtrValue,
SourceLocation AssumptionLoc,
llvm::Value *Alignment,
llvm::Value *OffsetValue) {
- if (auto *CE = dyn_cast<CastExpr>(E))
- E = CE->getSubExprAsWritten();
QualType Ty = E->getType();
SourceLocation Loc = E->getExprLoc();
@@ -2375,8 +2530,8 @@ llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Function *AnnotationFn,
const AnnotateAttr *Attr) {
SmallVector<llvm::Value *, 5> Args = {
AnnotatedVal,
- Builder.CreateBitCast(CGM.EmitAnnotationString(AnnotationStr), Int8PtrTy),
- Builder.CreateBitCast(CGM.EmitAnnotationUnit(Location), Int8PtrTy),
+ CGM.EmitAnnotationString(AnnotationStr),
+ CGM.EmitAnnotationUnit(Location),
CGM.EmitAnnotationLineNo(Location),
};
if (Attr)
@@ -2386,12 +2541,10 @@ llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Function *AnnotationFn,
void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) {
assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
- // FIXME We create a new bitcast for every annotation because that's what
- // llvm-gcc was doing.
for (const auto *I : D->specific_attrs<AnnotateAttr>())
- EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation),
- Builder.CreateBitCast(V, CGM.Int8PtrTy, V->getName()),
- I->getAnnotation(), D->getLocation(), I);
+ EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation,
+ {V->getType(), CGM.ConstGlobalsPtrTy}),
+ V, I->getAnnotation(), D->getLocation(), I);
}
Address CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D,
@@ -2399,20 +2552,24 @@ Address CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D,
assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
llvm::Value *V = Addr.getPointer();
llvm::Type *VTy = V->getType();
+ auto *PTy = dyn_cast<llvm::PointerType>(VTy);
+ unsigned AS = PTy ? PTy->getAddressSpace() : 0;
+ llvm::PointerType *IntrinTy =
+ llvm::PointerType::get(CGM.getLLVMContext(), AS);
llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation,
- CGM.Int8PtrTy);
+ {IntrinTy, CGM.ConstGlobalsPtrTy});
for (const auto *I : D->specific_attrs<AnnotateAttr>()) {
// FIXME Always emit the cast inst so we can differentiate between
// annotation on the first field of a struct and annotation on the struct
// itself.
- if (VTy != CGM.Int8PtrTy)
- V = Builder.CreateBitCast(V, CGM.Int8PtrTy);
+ if (VTy != IntrinTy)
+ V = Builder.CreateBitCast(V, IntrinTy);
V = EmitAnnotationCall(F, V, I->getAnnotation(), D->getLocation(), I);
V = Builder.CreateBitCast(V, VTy);
}
- return Address(V, Addr.getAlignment());
+ return Address(V, Addr.getElementType(), Addr.getAlignment());
}
CodeGenFunction::CGCapturedStmtInfo::~CGCapturedStmtInfo() { }
@@ -2433,7 +2590,7 @@ void CodeGenFunction::InsertHelper(llvm::Instruction *I,
llvm::BasicBlock::iterator InsertPt) const {
LoopStack.InsertHelper(I);
if (IsSanitizerScope)
- CGM.getSanitizerMetadata()->disableSanitizerForInstruction(I);
+ I->setNoSanitizeMetadata();
}
void CGBuilderInserter::InsertHelper(
@@ -2472,18 +2629,19 @@ void CodeGenFunction::checkTargetFeatures(SourceLocation Loc,
std::string MissingFeature;
llvm::StringMap<bool> CallerFeatureMap;
CGM.getContext().getFunctionFeatureMap(CallerFeatureMap, FD);
+ // When compiling in HipStdPar mode we have to be conservative in rejecting
+ // target specific features in the FE, and defer the possible error to the
+ // AcceleratorCodeSelection pass, wherein iff an unsupported target builtin is
+ // referenced by an accelerator executable function, we emit an error.
+ bool IsHipStdPar = getLangOpts().HIPStdPar && getLangOpts().CUDAIsDevice;
if (BuiltinID) {
- StringRef FeatureList(
- CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID));
- // Return if the builtin doesn't have any required features.
- if (FeatureList.empty())
- return;
- assert(FeatureList.find(' ') == StringRef::npos &&
- "Space in feature list");
- TargetFeatures TF(CallerFeatureMap);
- if (!TF.hasRequiredFeatures(FeatureList))
+ StringRef FeatureList(CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID));
+ if (!Builtin::evaluateRequiredTargetFeatures(
+ FeatureList, CallerFeatureMap) && !IsHipStdPar) {
CGM.getDiags().Report(Loc, diag::err_builtin_needs_feature)
- << TargetDecl->getDeclName() << FeatureList;
+ << TargetDecl->getDeclName()
+ << FeatureList;
+ }
} else if (!TargetDecl->isMultiVersion() &&
TargetDecl->hasAttr<TargetAttr>()) {
// Get the required features for the callee.
@@ -2512,9 +2670,20 @@ void CodeGenFunction::checkTargetFeatures(SourceLocation Loc,
return false;
}
return true;
- }))
+ }) && !IsHipStdPar)
CGM.getDiags().Report(Loc, diag::err_function_needs_feature)
<< FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature;
+ } else if (!FD->isMultiVersion() && FD->hasAttr<TargetAttr>()) {
+ llvm::StringMap<bool> CalleeFeatureMap;
+ CGM.getContext().getFunctionFeatureMap(CalleeFeatureMap, TargetDecl);
+
+ for (const auto &F : CalleeFeatureMap) {
+ if (F.getValue() && (!CallerFeatureMap.lookup(F.getKey()) ||
+ !CallerFeatureMap.find(F.getKey())->getValue()) &&
+ !IsHipStdPar)
+ CGM.getDiags().Report(Loc, diag::err_function_needs_feature)
+ << FD->getDeclName() << TargetDecl->getDeclName() << F.getKey();
+ }
}
}
@@ -2527,12 +2696,41 @@ void CodeGenFunction::EmitSanitizerStatReport(llvm::SanitizerStatKind SSK) {
CGM.getSanStats().create(IRB, SSK);
}
-llvm::Value *
-CodeGenFunction::FormResolverCondition(const MultiVersionResolverOption &RO) {
+void CodeGenFunction::EmitKCFIOperandBundle(
+ const CGCallee &Callee, SmallVectorImpl<llvm::OperandBundleDef> &Bundles) {
+ const FunctionProtoType *FP =
+ Callee.getAbstractInfo().getCalleeFunctionProtoType();
+ if (FP)
+ Bundles.emplace_back("kcfi", CGM.CreateKCFITypeId(FP->desugar()));
+}
+
+llvm::Value *CodeGenFunction::FormAArch64ResolverCondition(
+ const MultiVersionResolverOption &RO) {
+ llvm::SmallVector<StringRef, 8> CondFeatures;
+ for (const StringRef &Feature : RO.Conditions.Features) {
+ // Form condition for features which are not yet enabled in target
+ if (!getContext().getTargetInfo().hasFeature(Feature))
+ CondFeatures.push_back(Feature);
+ }
+ if (!CondFeatures.empty()) {
+ return EmitAArch64CpuSupports(CondFeatures);
+ }
+ return nullptr;
+}
+
+llvm::Value *CodeGenFunction::FormX86ResolverCondition(
+ const MultiVersionResolverOption &RO) {
llvm::Value *Condition = nullptr;
- if (!RO.Conditions.Architecture.empty())
- Condition = EmitX86CpuIs(RO.Conditions.Architecture);
+ if (!RO.Conditions.Architecture.empty()) {
+ StringRef Arch = RO.Conditions.Architecture;
+ // If arch= specifies an x86-64 micro-architecture level, test the feature
+ // with __builtin_cpu_supports, otherwise use __builtin_cpu_is.
+ if (Arch.starts_with("x86-64"))
+ Condition = EmitX86CpuSupports({Arch});
+ else
+ Condition = EmitX86CpuIs(Arch);
+ }
if (!RO.Conditions.Features.empty()) {
llvm::Value *FeatureCond = EmitX86CpuSupports(RO.Conditions.Features);
@@ -2552,9 +2750,8 @@ static void CreateMultiVersionResolverReturn(CodeGenModule &CGM,
return;
}
- llvm::SmallVector<llvm::Value *, 10> Args;
- llvm::for_each(Resolver->args(),
- [&](llvm::Argument &Arg) { Args.push_back(&Arg); });
+ llvm::SmallVector<llvm::Value *, 10> Args(
+ llvm::make_pointer_range(Resolver->args()));
llvm::CallInst *Result = Builder.CreateCall(FuncToReturn, Args);
Result->setTailCallKind(llvm::CallInst::TCK_MustTail);
@@ -2567,8 +2764,72 @@ static void CreateMultiVersionResolverReturn(CodeGenModule &CGM,
void CodeGenFunction::EmitMultiVersionResolver(
llvm::Function *Resolver, ArrayRef<MultiVersionResolverOption> Options) {
- assert(getContext().getTargetInfo().getTriple().isX86() &&
- "Only implemented for x86 targets");
+
+ llvm::Triple::ArchType ArchType =
+ getContext().getTargetInfo().getTriple().getArch();
+
+ switch (ArchType) {
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ EmitX86MultiVersionResolver(Resolver, Options);
+ return;
+ case llvm::Triple::aarch64:
+ EmitAArch64MultiVersionResolver(Resolver, Options);
+ return;
+
+ default:
+ assert(false && "Only implemented for x86 and AArch64 targets");
+ }
+}
+
+void CodeGenFunction::EmitAArch64MultiVersionResolver(
+ llvm::Function *Resolver, ArrayRef<MultiVersionResolverOption> Options) {
+ assert(!Options.empty() && "No multiversion resolver options found");
+ assert(Options.back().Conditions.Features.size() == 0 &&
+ "Default case must be last");
+ bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc();
+ assert(SupportsIFunc &&
+ "Multiversion resolver requires target IFUNC support");
+ bool AArch64CpuInitialized = false;
+ llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver);
+
+ for (const MultiVersionResolverOption &RO : Options) {
+ Builder.SetInsertPoint(CurBlock);
+ llvm::Value *Condition = FormAArch64ResolverCondition(RO);
+
+ // The 'default' or 'all features enabled' case.
+ if (!Condition) {
+ CreateMultiVersionResolverReturn(CGM, Resolver, Builder, RO.Function,
+ SupportsIFunc);
+ return;
+ }
+
+ if (!AArch64CpuInitialized) {
+ Builder.SetInsertPoint(CurBlock, CurBlock->begin());
+ EmitAArch64CpuInit();
+ AArch64CpuInitialized = true;
+ Builder.SetInsertPoint(CurBlock);
+ }
+
+ llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver);
+ CGBuilderTy RetBuilder(*this, RetBlock);
+ CreateMultiVersionResolverReturn(CGM, Resolver, RetBuilder, RO.Function,
+ SupportsIFunc);
+ CurBlock = createBasicBlock("resolver_else", Resolver);
+ Builder.CreateCondBr(Condition, RetBlock, CurBlock);
+ }
+
+ // If no default, emit an unreachable.
+ Builder.SetInsertPoint(CurBlock);
+ llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
+ TrapCall->setDoesNotReturn();
+ TrapCall->setDoesNotThrow();
+ Builder.CreateUnreachable();
+ Builder.ClearInsertionPoint();
+}
+
+void CodeGenFunction::EmitX86MultiVersionResolver(
+ llvm::Function *Resolver, ArrayRef<MultiVersionResolverOption> Options) {
bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc();
@@ -2579,7 +2840,7 @@ void CodeGenFunction::EmitMultiVersionResolver(
for (const MultiVersionResolverOption &RO : Options) {
Builder.SetInsertPoint(CurBlock);
- llvm::Value *Condition = FormResolverCondition(RO);
+ llvm::Value *Condition = FormX86ResolverCondition(RO);
// The 'default' or 'generic' case.
if (!Condition) {
@@ -2644,7 +2905,7 @@ void CodeGenFunction::emitAlignmentAssumptionCheck(
SanitizerScope SanScope(this);
if (!OffsetValue)
- OffsetValue = Builder.getInt1(0); // no offset.
+ OffsetValue = Builder.getInt1(false); // no offset.
llvm::Constant *StaticData[] = {EmitCheckSourceLocation(Loc),
EmitCheckSourceLocation(SecondaryLoc),
@@ -2692,3 +2953,19 @@ CodeGenFunction::emitCondLikelihoodViaExpectIntrinsic(llvm::Value *Cond,
}
llvm_unreachable("Unknown Likelihood");
}
+
+llvm::Value *CodeGenFunction::emitBoolVecConversion(llvm::Value *SrcVec,
+ unsigned NumElementsDst,
+ const llvm::Twine &Name) {
+ auto *SrcTy = cast<llvm::FixedVectorType>(SrcVec->getType());
+ unsigned NumElementsSrc = SrcTy->getNumElements();
+ if (NumElementsSrc == NumElementsDst)
+ return SrcVec;
+
+ std::vector<int> ShuffleMask(NumElementsDst, -1);
+ for (unsigned MaskIdx = 0;
+ MaskIdx < std::min<>(NumElementsDst, NumElementsSrc); ++MaskIdx)
+ ShuffleMask[MaskIdx] = MaskIdx;
+
+ return Builder.CreateShuffleVector(SrcVec, ShuffleMask, Name);
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.h b/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.h
index 4e087ce51e37..143ad64e8816 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.h
@@ -41,12 +41,12 @@
#include "llvm/IR/ValueHandle.h"
#include "llvm/Support/Debug.h"
#include "llvm/Transforms/Utils/SanitizerStats.h"
+#include <optional>
namespace llvm {
class BasicBlock;
class LLVMContext;
class MDNode;
-class Module;
class SwitchInst;
class Twine;
class Value;
@@ -55,13 +55,11 @@ class CanonicalLoopInfo;
namespace clang {
class ASTContext;
-class BlockDecl;
class CXXDestructorDecl;
class CXXForRangeStmt;
class CXXTryStmt;
class Decl;
class LabelDecl;
-class EnumConstantDecl;
class FunctionDecl;
class FunctionProtoType;
class LabelStmt;
@@ -80,7 +78,6 @@ class ObjCAtSynchronizedStmt;
class ObjCAutoreleasePoolStmt;
class OMPUseDevicePtrClause;
class OMPUseDeviceAddrClause;
-class ReturnsNonNullAttr;
class SVETypeFlags;
class OMPExecutableDirective;
@@ -92,12 +89,10 @@ namespace CodeGen {
class CodeGenTypes;
class CGCallee;
class CGFunctionInfo;
-class CGRecordLayout;
class CGBlockInfo;
class CGCXXABI;
class BlockByrefHelpers;
class BlockByrefInfo;
-class BlockFlags;
class BlockFieldFlags;
class RegionCodeGenTy;
class TargetCodeGenInfo;
@@ -122,7 +117,7 @@ enum TypeEvaluationKind {
SANITIZER_CHECK(DivremOverflow, divrem_overflow, 0) \
SANITIZER_CHECK(DynamicTypeCacheMiss, dynamic_type_cache_miss, 0) \
SANITIZER_CHECK(FloatCastOverflow, float_cast_overflow, 0) \
- SANITIZER_CHECK(FunctionTypeMismatch, function_type_mismatch, 1) \
+ SANITIZER_CHECK(FunctionTypeMismatch, function_type_mismatch, 0) \
SANITIZER_CHECK(ImplicitConversion, implicit_conversion, 0) \
SANITIZER_CHECK(InvalidBuiltin, invalid_builtin, 0) \
SANITIZER_CHECK(InvalidObjCCast, invalid_objc_cast, 0) \
@@ -182,6 +177,7 @@ template <> struct DominatingValue<Address> {
struct saved_type {
DominatingLLVMValue::saved_type SavedValue;
+ llvm::Type *ElementType;
CharUnits Alignment;
};
@@ -190,11 +186,11 @@ template <> struct DominatingValue<Address> {
}
static saved_type save(CodeGenFunction &CGF, type value) {
return { DominatingLLVMValue::save(CGF, value.getPointer()),
- value.getAlignment() };
+ value.getElementType(), value.getAlignment() };
}
static type restore(CodeGenFunction &CGF, saved_type value) {
return Address(DominatingLLVMValue::restore(CGF, value.SavedValue),
- value.Alignment);
+ value.ElementType, value.Alignment);
}
};
@@ -206,10 +202,11 @@ template <> struct DominatingValue<RValue> {
AggregateAddress, ComplexAddress };
llvm::Value *Value;
+ llvm::Type *ElementType;
unsigned K : 3;
unsigned Align : 29;
- saved_type(llvm::Value *v, Kind k, unsigned a = 0)
- : Value(v), K(k), Align(a) {}
+ saved_type(llvm::Value *v, llvm::Type *e, Kind k, unsigned a = 0)
+ : Value(v), ElementType(e), K(k), Align(a) {}
public:
static bool needsSaving(RValue value);
@@ -241,11 +238,10 @@ public:
/// A jump destination is an abstract label, branching to which may
/// require a jump out through normal cleanups.
struct JumpDest {
- JumpDest() : Block(nullptr), ScopeDepth(), Index(0) {}
- JumpDest(llvm::BasicBlock *Block,
- EHScopeStack::stable_iterator Depth,
+ JumpDest() : Block(nullptr), Index(0) {}
+ JumpDest(llvm::BasicBlock *Block, EHScopeStack::stable_iterator Depth,
unsigned Index)
- : Block(Block), ScopeDepth(Depth), Index(Index) {}
+ : Block(Block), ScopeDepth(Depth), Index(Index) {}
bool isValid() const { return Block != nullptr; }
llvm::BasicBlock *getBlock() const { return Block; }
@@ -291,6 +287,13 @@ public:
/// nest would extend.
SmallVector<llvm::CanonicalLoopInfo *, 4> OMPLoopNestStack;
+ /// Stack to track the Logical Operator recursion nest for MC/DC.
+ SmallVector<const BinaryOperator *, 16> MCDCLogOpStack;
+
+ /// Number of nested loop to be consumed by the last surrounding
+ /// loop-associated directive.
+ int ExpectedOMPLoopDepth = 0;
+
// CodeGen lambda for loops and support for ordered clause
typedef llvm::function_ref<void(CodeGenFunction &, const OMPLoopDirective &,
JumpDest)>
@@ -318,10 +321,10 @@ public:
/// CurFuncDecl - Holds the Decl for the current outermost
/// non-closure context.
- const Decl *CurFuncDecl;
+ const Decl *CurFuncDecl = nullptr;
/// CurCodeDecl - This is the inner-most code context, which includes blocks.
- const Decl *CurCodeDecl;
- const CGFunctionInfo *CurFnInfo;
+ const Decl *CurCodeDecl = nullptr;
+ const CGFunctionInfo *CurFnInfo = nullptr;
QualType FnRetTy;
llvm::Function *CurFn = nullptr;
@@ -333,6 +336,7 @@ public:
// in this header.
struct CGCoroInfo {
std::unique_ptr<CGCoroData> Data;
+ bool InSuspendBlock = false;
CGCoroInfo();
~CGCoroInfo();
};
@@ -342,6 +346,10 @@ public:
return CurCoro.Data != nullptr;
}
+ bool inSuspendBlock() const {
+ return isCoroutine() && CurCoro.InSuspendBlock;
+ }
+
/// CurGD - The GlobalDecl for the current function being compiled.
GlobalDecl CurGD;
@@ -375,6 +383,34 @@ public:
/// we prefer to insert allocas.
llvm::AssertingVH<llvm::Instruction> AllocaInsertPt;
+private:
+ /// PostAllocaInsertPt - This is a place in the prologue where code can be
+ /// inserted that will be dominated by all the static allocas. This helps
+ /// achieve two things:
+ /// 1. Contiguity of all static allocas (within the prologue) is maintained.
+ /// 2. All other prologue code (which are dominated by static allocas) do
+ /// appear in the source order immediately after all static allocas.
+ ///
+ /// PostAllocaInsertPt will be lazily created when it is *really* required.
+ llvm::AssertingVH<llvm::Instruction> PostAllocaInsertPt = nullptr;
+
+public:
+ /// Return PostAllocaInsertPt. If it is not yet created, then insert it
+ /// immediately after AllocaInsertPt.
+ llvm::Instruction *getPostAllocaInsertPoint() {
+ if (!PostAllocaInsertPt) {
+ assert(AllocaInsertPt &&
+ "Expected static alloca insertion point at function prologue");
+ assert(AllocaInsertPt->getParent()->isEntryBlock() &&
+ "EBB should be entry block of the current code gen function");
+ PostAllocaInsertPt = AllocaInsertPt->clone();
+ PostAllocaInsertPt->setName("postallocapt");
+ PostAllocaInsertPt->insertAfter(AllocaInsertPt);
+ }
+
+ return PostAllocaInsertPt;
+ }
+
/// API for captured statement code generation.
class CGCapturedStmtInfo {
public:
@@ -427,6 +463,11 @@ public:
/// Get the name of the capture helper.
virtual StringRef getHelperName() const { return "__captured_stmt"; }
+ /// Get the CaptureFields
+ llvm::SmallDenseMap<const VarDecl *, FieldDecl *> getCaptureFields() {
+ return CaptureFields;
+ }
+
private:
/// The kind of captured statement being generated.
CapturedRegionKind Kind;
@@ -467,7 +508,7 @@ public:
AbstractCallee(const FunctionDecl *FD) : CalleeDecl(FD) {}
AbstractCallee(const ObjCMethodDecl *OMD) : CalleeDecl(OMD) {}
bool hasFunctionDecl() const {
- return dyn_cast_or_null<FunctionDecl>(CalleeDecl);
+ return isa_and_nonnull<FunctionDecl>(CalleeDecl);
}
const Decl *getDecl() const { return CalleeDecl; }
unsigned getNumParams() const {
@@ -507,7 +548,7 @@ public:
/// potentially set the return value.
bool SawAsmBlock = false;
- const NamedDecl *CurSEHParent = nullptr;
+ GlobalDecl CurSEHParent;
/// True if the current function is an outlined SEH helper. This can be a
/// finally block or filter expression.
@@ -520,6 +561,12 @@ public:
/// True if the current statement has nomerge attribute.
bool InNoMergeAttributedStmt = false;
+ /// True if the current statement has noinline attribute.
+ bool InNoInlineAttributedStmt = false;
+
+ /// True if the current statement has always_inline attribute.
+ bool InAlwaysInlineAttributedStmt = false;
+
// The CallExpr within the current statement that the musttail attribute
// applies to. nullptr if there is no 'musttail' on the current statement.
const CallExpr *MustTailCall = nullptr;
@@ -532,7 +579,7 @@ public:
return false;
// C++11 and later guarantees that a thread eventually will do one of the
- // following (6.9.2.3.1 in C++11):
+ // following (C++11 [intro.multithread]p24 and C++17 [intro.progress]p1):
// - terminate,
// - make a call to a library I/O function,
// - perform an access through a volatile glvalue, or
@@ -571,7 +618,7 @@ public:
const CodeGen::CGBlockInfo *BlockInfo = nullptr;
llvm::Value *BlockPointer = nullptr;
- llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
+ llvm::DenseMap<const ValueDecl *, FieldDecl *> LambdaCaptureFields;
FieldDecl *LambdaThisCaptureField = nullptr;
/// A mapping from NRVO variables to the flags used to indicate
@@ -685,7 +732,7 @@ public:
FPOptions OldFPFeatures;
llvm::fp::ExceptionBehavior OldExcept;
llvm::RoundingMode OldRounding;
- Optional<CGBuilderTy::FastMathFlagGuard> FMFGuard;
+ std::optional<CGBuilderTy::FastMathFlagGuard> FMFGuard;
};
FPOptions CurFPFeatures;
@@ -704,11 +751,11 @@ public:
/// An i1 variable indicating whether or not the @finally is
/// running for an exception.
- llvm::AllocaInst *ForEHVar;
+ llvm::AllocaInst *ForEHVar = nullptr;
/// An i8* variable into which the exception pointer to rethrow
/// has been saved.
- llvm::AllocaInst *SavedExnVar;
+ llvm::AllocaInst *SavedExnVar = nullptr;
public:
void enter(CodeGenFunction &CGF, const Stmt *Finally,
@@ -1034,15 +1081,14 @@ public:
/// Enter a new OpenMP private scope.
explicit OMPPrivateScope(CodeGenFunction &CGF) : RunCleanupsScope(CGF) {}
- /// Registers \p LocalVD variable as a private and apply \p PrivateGen
- /// function for it to generate corresponding private variable. \p
- /// PrivateGen returns an address of the generated private variable.
+ /// Registers \p LocalVD variable as a private with \p Addr as the address
+ /// of the corresponding private variable. \p
+ /// PrivateGen is the address of the generated private variable.
/// \return true if the variable is registered as private, false if it has
/// been privatized already.
- bool addPrivate(const VarDecl *LocalVD,
- const llvm::function_ref<Address()> PrivateGen) {
+ bool addPrivate(const VarDecl *LocalVD, Address Addr) {
assert(PerformCleanup && "adding private to dead scope");
- return MappedVars.setVarAddr(CGF, LocalVD, PrivateGen());
+ return MappedVars.setVarAddr(CGF, LocalVD, Addr);
}
/// Privatizes local variables previously registered as private.
@@ -1057,7 +1103,7 @@ public:
void ForceCleanup() {
RunCleanupsScope::ForceCleanup();
- MappedVars.restore(CGF);
+ restoreMap();
}
/// Exit scope - all the mapped variables are restored.
@@ -1071,6 +1117,11 @@ public:
VD = VD->getCanonicalDecl();
return !VD->isLocalVarDeclOrParm() && CGF.LocalDeclMap.count(VD) > 0;
}
+
+ /// Restore all mapped variables w/o clean up. This is usefully when we want
+ /// to reference the original variables but don't want the clean up because
+ /// that could emit lifetime end too early, causing backend issue #56913.
+ void restoreMap() { MappedVars.restore(CGF); }
};
/// Save/restore original map of previously emitted local vars in case when we
@@ -1202,11 +1253,11 @@ public:
/// destroyed by aggressive peephole optimizations that assume that
/// all uses of a value have been realized in the IR.
class PeepholeProtection {
- llvm::Instruction *Inst;
+ llvm::Instruction *Inst = nullptr;
friend class CodeGenFunction;
public:
- PeepholeProtection() : Inst(nullptr) {}
+ PeepholeProtection() = default;
};
/// A non-RAII class containing all the information about a bound
@@ -1473,6 +1524,9 @@ private:
CodeGenPGO PGO;
+ /// Bitmap used by MC/DC to track condition outcomes of a boolean expression.
+ Address MCDCCondBitmapAddr = Address::invalid();
+
/// Calculate branch weights appropriate for PGO data
llvm::MDNode *createProfileWeights(uint64_t TrueCount,
uint64_t FalseCount) const;
@@ -1485,17 +1539,61 @@ public:
/// If \p StepV is null, the default increment is 1.
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV = nullptr) {
if (CGM.getCodeGenOpts().hasProfileClangInstr() &&
- !CurFn->hasFnAttribute(llvm::Attribute::NoProfile))
+ !CurFn->hasFnAttribute(llvm::Attribute::NoProfile) &&
+ !CurFn->hasFnAttribute(llvm::Attribute::SkipProfile))
PGO.emitCounterIncrement(Builder, S, StepV);
PGO.setCurrentStmt(S);
}
+ bool isMCDCCoverageEnabled() const {
+ return (CGM.getCodeGenOpts().hasProfileClangInstr() &&
+ CGM.getCodeGenOpts().MCDCCoverage &&
+ !CurFn->hasFnAttribute(llvm::Attribute::NoProfile));
+ }
+
+ /// Allocate a temp value on the stack that MCDC can use to track condition
+ /// results.
+ void maybeCreateMCDCCondBitmap() {
+ if (isMCDCCoverageEnabled()) {
+ PGO.emitMCDCParameters(Builder);
+ MCDCCondBitmapAddr =
+ CreateIRTemp(getContext().UnsignedIntTy, "mcdc.addr");
+ }
+ }
+
+ bool isBinaryLogicalOp(const Expr *E) const {
+ const BinaryOperator *BOp = dyn_cast<BinaryOperator>(E->IgnoreParens());
+ return (BOp && BOp->isLogicalOp());
+ }
+
+ /// Zero-init the MCDC temp value.
+ void maybeResetMCDCCondBitmap(const Expr *E) {
+ if (isMCDCCoverageEnabled() && isBinaryLogicalOp(E)) {
+ PGO.emitMCDCCondBitmapReset(Builder, E, MCDCCondBitmapAddr);
+ PGO.setCurrentStmt(E);
+ }
+ }
+
+ /// Increment the profiler's counter for the given expression by \p StepV.
+ /// If \p StepV is null, the default increment is 1.
+ void maybeUpdateMCDCTestVectorBitmap(const Expr *E) {
+ if (isMCDCCoverageEnabled() && isBinaryLogicalOp(E)) {
+ PGO.emitMCDCTestVectorBitmapUpdate(Builder, E, MCDCCondBitmapAddr);
+ PGO.setCurrentStmt(E);
+ }
+ }
+
+ /// Update the MCDC temp value with the condition's evaluated result.
+ void maybeUpdateMCDCCondBitmap(const Expr *E, llvm::Value *Val) {
+ if (isMCDCCoverageEnabled()) {
+ PGO.emitMCDCCondBitmapUpdate(Builder, E, MCDCCondBitmapAddr, Val);
+ PGO.setCurrentStmt(E);
+ }
+ }
+
/// Get the profiler's count for the given statement.
uint64_t getProfileCount(const Stmt *S) {
- Optional<uint64_t> Count = PGO.getStmtCount(S);
- if (!Count.hasValue())
- return 0;
- return *Count;
+ return PGO.getStmtCount(S).value_or(0);
}
/// Set the profiler's current count.
@@ -1754,33 +1852,55 @@ public:
}
/// Emit the body of an OMP region
- /// \param CGF The Codegen function this belongs to
- /// \param RegionBodyStmt The body statement for the OpenMP region being
- /// generated
- /// \param CodeGenIP Insertion point for generating the body code.
- /// \param FiniBB The finalization basic block
- static void EmitOMPRegionBody(CodeGenFunction &CGF,
- const Stmt *RegionBodyStmt,
- InsertPointTy CodeGenIP,
- llvm::BasicBlock &FiniBB) {
+ /// \param CGF The Codegen function this belongs to
+ /// \param RegionBodyStmt The body statement for the OpenMP region being
+ /// generated
+ /// \param AllocaIP Where to insert alloca instructions
+ /// \param CodeGenIP Where to insert the region code
+ /// \param RegionName Name to be used for new blocks
+ static void EmitOMPInlinedRegionBody(CodeGenFunction &CGF,
+ const Stmt *RegionBodyStmt,
+ InsertPointTy AllocaIP,
+ InsertPointTy CodeGenIP,
+ Twine RegionName);
+
+ static void EmitCaptureStmt(CodeGenFunction &CGF, InsertPointTy CodeGenIP,
+ llvm::BasicBlock &FiniBB, llvm::Function *Fn,
+ ArrayRef<llvm::Value *> Args) {
llvm::BasicBlock *CodeGenIPBB = CodeGenIP.getBlock();
if (llvm::Instruction *CodeGenIPBBTI = CodeGenIPBB->getTerminator())
CodeGenIPBBTI->eraseFromParent();
CGF.Builder.SetInsertPoint(CodeGenIPBB);
- CGF.EmitStmt(RegionBodyStmt);
+ if (Fn->doesNotThrow())
+ CGF.EmitNounwindRuntimeCall(Fn, Args);
+ else
+ CGF.EmitRuntimeCall(Fn, Args);
if (CGF.Builder.saveIP().isSet())
CGF.Builder.CreateBr(&FiniBB);
}
+ /// Emit the body of an OMP region that will be outlined in
+ /// OpenMPIRBuilder::finalize().
+ /// \param CGF The Codegen function this belongs to
+ /// \param RegionBodyStmt The body statement for the OpenMP region being
+ /// generated
+ /// \param AllocaIP Where to insert alloca instructions
+ /// \param CodeGenIP Where to insert the region code
+ /// \param RegionName Name to be used for new blocks
+ static void EmitOMPOutlinedRegionBody(CodeGenFunction &CGF,
+ const Stmt *RegionBodyStmt,
+ InsertPointTy AllocaIP,
+ InsertPointTy CodeGenIP,
+ Twine RegionName);
+
/// RAII for preserving necessary info during Outlined region body codegen.
class OutlinedRegionBodyRAII {
llvm::AssertingVH<llvm::Instruction> OldAllocaIP;
CodeGenFunction::JumpDest OldReturnBlock;
- CGBuilderTy::InsertPoint IP;
CodeGenFunction &CGF;
public:
@@ -1791,7 +1911,6 @@ public:
"Must specify Insertion point for allocas of outlined function");
OldAllocaIP = CGF.AllocaInsertPt;
CGF.AllocaInsertPt = &*AllocaIP.getPoint();
- IP = CGF.Builder.saveIP();
OldReturnBlock = CGF.ReturnBlock;
CGF.ReturnBlock = CGF.getJumpDestInCurrentScope(&RetBB);
@@ -1800,7 +1919,6 @@ public:
~OutlinedRegionBodyRAII() {
CGF.AllocaInsertPt = OldAllocaIP;
CGF.ReturnBlock = OldReturnBlock;
- CGF.Builder.restoreIP(IP);
}
};
@@ -1897,6 +2015,9 @@ private:
/// Check if the return value of this function requires sanitization.
bool requiresReturnValueCheck() const;
+ bool isInAllocaArgument(CGCXXABI &ABI, QualType Ty);
+ bool hasInAllocaArg(const CXXMethodDecl *MD);
+
llvm::BasicBlock *TerminateLandingPad = nullptr;
llvm::BasicBlock *TerminateHandler = nullptr;
llvm::SmallVector<llvm::BasicBlock *, 2> TrapBBs;
@@ -1914,8 +2035,7 @@ private:
/// Add OpenCL kernel arg metadata and the kernel attribute metadata to
/// the function metadata.
- void EmitOpenCLKernelMetadata(const FunctionDecl *FD,
- llvm::Function *Fn);
+ void EmitKernelMetadata(const FunctionDecl *FD, llvm::Function *Fn);
public:
CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext=false);
@@ -1962,7 +2082,7 @@ public:
return getInvokeDestImpl();
}
- bool currentFunctionUsesSEHTry() const { return CurSEHParent != nullptr; }
+ bool currentFunctionUsesSEHTry() const { return !!CurSEHParent; }
const TargetInfo &getTarget() const { return Target; }
llvm::LLVMContext &getLLVMContext() { return CGM.getLLVMContext(); }
@@ -2000,6 +2120,8 @@ public:
llvm::Value *CompletePtr,
QualType ElementType);
void pushStackRestore(CleanupKind kind, Address SPMem);
+ void pushKmpcAllocFree(CleanupKind Kind,
+ std::pair<llvm::Value *, llvm::Value *> AddrSizePair);
void emitDestroy(Address addr, QualType type, Destroyer *destroyer,
bool useEHCleanupForArray);
llvm::Function *generateDestroyHelper(Address addr, QualType type,
@@ -2160,10 +2282,17 @@ public:
void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S);
void EmitForwardingCallToLambda(const CXXMethodDecl *LambdaCallOperator,
- CallArgList &CallArgs);
+ CallArgList &CallArgs,
+ const CGFunctionInfo *CallOpFnInfo = nullptr,
+ llvm::Constant *CallOpFn = nullptr);
void EmitLambdaBlockInvokeBody();
- void EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD);
void EmitLambdaStaticInvokeBody(const CXXMethodDecl *MD);
+ void EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD,
+ CallArgList &CallArgs);
+ void EmitLambdaInAllocaImplFn(const CXXMethodDecl *CallOp,
+ const CGFunctionInfo **ImplFnInfo,
+ llvm::Function **ImplFn);
+ void EmitLambdaInAllocaCallOpBody(const CXXMethodDecl *MD);
void EmitLambdaVLACapture(const VariableArrayType *VAT, LValue LV) {
EmitStoreThroughLValue(RValue::get(VLASizeMap[VAT->getSizeExpr()]), LV);
}
@@ -2172,7 +2301,7 @@ public:
/// Emit the unified return block, trying to avoid its emission when
/// possible.
/// \return The debug location of the user written return statement if the
- /// return block is is avoided.
+ /// return block is avoided.
llvm::DebugLoc EmitReturnBlock();
/// FinishFunction - Complete IR generation of the current function. It is
@@ -2247,9 +2376,8 @@ public:
/// Derived is the presumed address of an object of type T after a
/// cast. If T is a polymorphic class type, emit a check that the virtual
/// table for Derived belongs to a class derived from T.
- void EmitVTablePtrCheckForCast(QualType T, llvm::Value *Derived,
- bool MayBeNull, CFITypeCheckKind TCK,
- SourceLocation Loc);
+ void EmitVTablePtrCheckForCast(QualType T, Address Derived, bool MayBeNull,
+ CFITypeCheckKind TCK, SourceLocation Loc);
/// EmitVTablePtrCheckForCall - Virtual method MD is being called via VTable.
/// If vptr CFI is enabled, emit a check that VTable is valid.
@@ -2273,7 +2401,9 @@ public:
bool ShouldEmitVTableTypeCheckedLoad(const CXXRecordDecl *RD);
/// Emit a type checked load from the given vtable.
- llvm::Value *EmitVTableTypeCheckedLoad(const CXXRecordDecl *RD, llvm::Value *VTable,
+ llvm::Value *EmitVTableTypeCheckedLoad(const CXXRecordDecl *RD,
+ llvm::Value *VTable,
+ llvm::Type *VTableTy,
uint64_t VTableByteOffset);
/// EnterDtorCleanups - Enter the cleanups necessary to complete the
@@ -2286,6 +2416,10 @@ public:
/// instrumented with __cyg_profile_func_* calls
bool ShouldInstrumentFunction();
+ /// ShouldSkipSanitizerInstrumentation - Return true if the current function
+ /// should not be instrumented with sanitizers.
+ bool ShouldSkipSanitizerInstrumentation();
+
/// ShouldXRayInstrument - Return true if the current function should be
/// instrumented with XRay nop sleds.
bool ShouldXRayInstrumentFunction() const;
@@ -2298,14 +2432,9 @@ public:
/// XRay typed event handling calls.
bool AlwaysEmitXRayTypedEvents() const;
- /// Encode an address into a form suitable for use in a function prologue.
- llvm::Constant *EncodeAddrForUseInPrologue(llvm::Function *F,
- llvm::Constant *Addr);
-
- /// Decode an address used in a function prologue, encoded by \c
- /// EncodeAddrForUseInPrologue.
- llvm::Value *DecodeAddrUsedInPrologue(llvm::Value *F,
- llvm::Value *EncodedAddr);
+ /// Return a type hash constant for a function instrumented by
+ /// -fsanitize=function.
+ llvm::ConstantInt *getUBSanFunctionTypeHash(QualType T) const;
/// EmitFunctionProlog - Emit the target specific LLVM code to load the
/// arguments for the given function. This is also responsible for naming the
@@ -2440,14 +2569,16 @@ public:
LValue MakeAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment,
AlignmentSource Source = AlignmentSource::Type) {
- return LValue::MakeAddr(Address(V, Alignment), T, getContext(),
- LValueBaseInfo(Source), CGM.getTBAAAccessInfo(T));
+ Address Addr(V, ConvertTypeForMem(T), Alignment);
+ return LValue::MakeAddr(Addr, T, getContext(), LValueBaseInfo(Source),
+ CGM.getTBAAAccessInfo(T));
}
- LValue MakeAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment,
- LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo) {
- return LValue::MakeAddr(Address(V, Alignment), T, getContext(),
- BaseInfo, TBAAInfo);
+ LValue
+ MakeAddrLValueWithoutTBAA(Address Addr, QualType T,
+ AlignmentSource Source = AlignmentSource::Type) {
+ return LValue::MakeAddr(Addr, T, getContext(), LValueBaseInfo(Source),
+ TBAAAccessInfo());
}
LValue MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T);
@@ -2465,6 +2596,9 @@ public:
return EmitLoadOfReferenceLValue(RefLVal);
}
+ /// Load a pointer with type \p PtrTy stored at address \p Ptr.
+ /// Note that \p PtrTy is the type of the loaded pointer, not the addresses
+ /// it is loaded from.
Address EmitLoadOfPointer(Address Ptr, const PointerType *PtrTy,
LValueBaseInfo *BaseInfo = nullptr,
TBAAAccessInfo *TBAAInfo = nullptr);
@@ -2519,15 +2653,6 @@ public:
Address CreateDefaultAlignTempAlloca(llvm::Type *Ty,
const Twine &Name = "tmp");
- /// InitTempAlloca - Provide an initial value for the given alloca which
- /// will be observable at all locations in the function.
- ///
- /// The address should be something that was returned from one of
- /// the CreateTempAlloca or CreateMemTemp routines, and the
- /// initializer must be valid in the entry block (i.e. it must
- /// either be a constant or an argument value).
- void InitTempAlloca(Address Alloca, llvm::Value *Value);
-
/// CreateIRTemp - Create a temporary IR object of the given type, with
/// appropriate alignment. This routine should only be used when an temporary
/// value needs to be stored into an alloca (for example, to avoid explicit
@@ -2564,9 +2689,6 @@ public:
AggValueSlot::DoesNotOverlap);
}
- /// Emit a cast to void* in the appropriate address space.
- llvm::Value *EmitCastToVoidPtr(llvm::Value *value);
-
/// EvaluateExprAsBool - Perform the usual unary conversions on the specified
/// expression and compare the result against zero, returning an Int1Ty value.
llvm::Value *EvaluateExprAsBool(const Expr *E);
@@ -2828,7 +2950,7 @@ public:
AggValueSlot::Overlap_t Overlap,
SourceLocation Loc, bool NewPointerIsChecked);
- /// Emit assumption load for all bases. Requires to be be called only on
+ /// Emit assumption load for all bases. Requires to be called only on
/// most-derived class and not under construction of the object.
void EmitVTableAssumptionLoads(const CXXRecordDecl *ClassDecl, Address This);
@@ -2951,6 +3073,25 @@ public:
/// this expression is used as an lvalue, for instance in "&Arr[Idx]".
void EmitBoundsCheck(const Expr *E, const Expr *Base, llvm::Value *Index,
QualType IndexType, bool Accessed);
+ void EmitBoundsCheckImpl(const Expr *E, llvm::Value *Bound,
+ llvm::Value *Index, QualType IndexType,
+ QualType IndexedType, bool Accessed);
+
+ // Find a struct's flexible array member. It may be embedded inside multiple
+ // sub-structs, but must still be the last field.
+ const FieldDecl *FindFlexibleArrayMemberField(ASTContext &Ctx,
+ const RecordDecl *RD,
+ StringRef Name,
+ uint64_t &Offset);
+
+ /// Find the FieldDecl specified in a FAM's "counted_by" attribute. Returns
+ /// \p nullptr if either the attribute or the field doesn't exist.
+ const FieldDecl *FindCountedByField(const FieldDecl *FD);
+
+ /// Build an expression accessing the "counted_by" field.
+ llvm::Value *EmitCountedByFieldExpr(const Expr *Base,
+ const FieldDecl *FAMDecl,
+ const FieldDecl *CountDecl);
llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
bool isInc, bool isPre);
@@ -3083,15 +3224,18 @@ public:
class ParamValue {
llvm::Value *Value;
+ llvm::Type *ElementType;
unsigned Alignment;
- ParamValue(llvm::Value *V, unsigned A) : Value(V), Alignment(A) {}
+ ParamValue(llvm::Value *V, llvm::Type *T, unsigned A)
+ : Value(V), ElementType(T), Alignment(A) {}
public:
static ParamValue forDirect(llvm::Value *value) {
- return ParamValue(value, 0);
+ return ParamValue(value, nullptr, 0);
}
static ParamValue forIndirect(Address addr) {
assert(!addr.getAlignment().isZero());
- return ParamValue(addr.getPointer(), addr.getAlignment().getQuantity());
+ return ParamValue(addr.getPointer(), addr.getElementType(),
+ addr.getAlignment().getQuantity());
}
bool isIndirect() const { return Alignment != 0; }
@@ -3104,7 +3248,8 @@ public:
Address getIndirectAddress() const {
assert(isIndirect());
- return Address(Value, CharUnits::fromQuantity(Alignment));
+ return Address(Value, ElementType, CharUnits::fromQuantity(Alignment),
+ KnownNonNull);
}
};
@@ -3154,7 +3299,7 @@ public:
/// This function may clear the current insertion point; callers should use
/// EnsureInsertPoint if they wish to subsequently generate code without first
/// calling EmitBlock, EmitBranch, or EmitStmt.
- void EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs = None);
+ void EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs = std::nullopt);
/// EmitSimpleStmt - Try to emit a "simple" statement which does not
/// necessarily require an insertion point or debug information; typically
@@ -3182,10 +3327,10 @@ public:
void EmitIfStmt(const IfStmt &S);
void EmitWhileStmt(const WhileStmt &S,
- ArrayRef<const Attr *> Attrs = None);
- void EmitDoStmt(const DoStmt &S, ArrayRef<const Attr *> Attrs = None);
+ ArrayRef<const Attr *> Attrs = std::nullopt);
+ void EmitDoStmt(const DoStmt &S, ArrayRef<const Attr *> Attrs = std::nullopt);
void EmitForStmt(const ForStmt &S,
- ArrayRef<const Attr *> Attrs = None);
+ ArrayRef<const Attr *> Attrs = std::nullopt);
void EmitReturnStmt(const ReturnStmt &S);
void EmitDeclStmt(const DeclStmt &S);
void EmitBreakStmt(const BreakStmt &S);
@@ -3262,7 +3407,7 @@ public:
llvm::Value *ParentFP);
void EmitCXXForRangeStmt(const CXXForRangeStmt &S,
- ArrayRef<const Attr *> Attrs = None);
+ ArrayRef<const Attr *> Attrs = std::nullopt);
/// Controls insertion of cancellation exit blocks in worksharing constructs.
class OMPCancelStackRAII {
@@ -3339,10 +3484,12 @@ public:
OMPPrivateScope &PrivateScope);
void EmitOMPUseDevicePtrClause(
const OMPUseDevicePtrClause &C, OMPPrivateScope &PrivateScope,
- const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap);
+ const llvm::DenseMap<const ValueDecl *, llvm::Value *>
+ CaptureDeviceAddrMap);
void EmitOMPUseDeviceAddrClause(
const OMPUseDeviceAddrClause &C, OMPPrivateScope &PrivateScope,
- const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap);
+ const llvm::DenseMap<const ValueDecl *, llvm::Value *>
+ CaptureDeviceAddrMap);
/// Emit code for copyin clause in \a D directive. The next code is
/// generated at the start of outlined functions for directives:
/// \code
@@ -3437,7 +3584,12 @@ public:
void EmitOMPTargetTaskBasedDirective(const OMPExecutableDirective &S,
const RegionCodeGenTy &BodyGen,
OMPTargetDataInfo &InputInfo);
-
+ void processInReduction(const OMPExecutableDirective &S,
+ OMPTaskDataTy &Data,
+ CodeGenFunction &CGF,
+ const CapturedStmt *CS,
+ OMPPrivateScope &Scope);
+ void EmitOMPMetaDirective(const OMPMetaDirective &S);
void EmitOMPParallelDirective(const OMPParallelDirective &S);
void EmitOMPSimdDirective(const OMPSimdDirective &S);
void EmitOMPTileDirective(const OMPTileDirective &S);
@@ -3456,6 +3608,7 @@ public:
void EmitOMPParallelMasterDirective(const OMPParallelMasterDirective &S);
void EmitOMPTaskDirective(const OMPTaskDirective &S);
void EmitOMPTaskyieldDirective(const OMPTaskyieldDirective &S);
+ void EmitOMPErrorDirective(const OMPErrorDirective &S);
void EmitOMPBarrierDirective(const OMPBarrierDirective &S);
void EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S);
void EmitOMPTaskgroupDirective(const OMPTaskgroupDirective &S);
@@ -3511,6 +3664,15 @@ public:
const OMPTargetTeamsDistributeParallelForSimdDirective &S);
void EmitOMPTargetTeamsDistributeSimdDirective(
const OMPTargetTeamsDistributeSimdDirective &S);
+ void EmitOMPGenericLoopDirective(const OMPGenericLoopDirective &S);
+ void EmitOMPParallelGenericLoopDirective(const OMPLoopDirective &S);
+ void EmitOMPTargetParallelGenericLoopDirective(
+ const OMPTargetParallelGenericLoopDirective &S);
+ void EmitOMPTargetTeamsGenericLoopDirective(
+ const OMPTargetTeamsGenericLoopDirective &S);
+ void EmitOMPTeamsGenericLoopDirective(const OMPTeamsGenericLoopDirective &S);
+ void EmitOMPInteropDirective(const OMPInteropDirective &S);
+ void EmitOMPParallelMaskedDirective(const OMPParallelMaskedDirective &S);
/// Emit device code for the target directive.
static void EmitOMPTargetDeviceFunction(CodeGenModule &CGM,
@@ -3549,6 +3711,16 @@ public:
CodeGenModule &CGM, StringRef ParentName,
const OMPTargetTeamsDistributeParallelForSimdDirective &S);
+ /// Emit device code for the target teams loop directive.
+ static void EmitOMPTargetTeamsGenericLoopDeviceFunction(
+ CodeGenModule &CGM, StringRef ParentName,
+ const OMPTargetTeamsGenericLoopDirective &S);
+
+ /// Emit device code for the target parallel loop directive.
+ static void EmitOMPTargetParallelGenericLoopDeviceFunction(
+ CodeGenModule &CGM, StringRef ParentName,
+ const OMPTargetParallelGenericLoopDirective &S);
+
static void EmitOMPTargetTeamsDistributeParallelForDeviceFunction(
CodeGenModule &CGM, StringRef ParentName,
const OMPTargetTeamsDistributeParallelForDirective &S);
@@ -3703,8 +3875,13 @@ public:
/// an LLVM type of the same size of the lvalue's type. If the lvalue has a
/// variable length type, this is not possible.
///
- LValue EmitLValue(const Expr *E);
+ LValue EmitLValue(const Expr *E,
+ KnownNonNull_t IsKnownNonNull = NotKnownNonNull);
+
+private:
+ LValue EmitLValueHelper(const Expr *E, KnownNonNull_t IsKnownNonNull);
+public:
/// Same as EmitLValue but additionally we generate checking code to
/// guard against undefined behavior. This is only suitable when we know
/// that the address will be used to access the object.
@@ -3853,6 +4030,7 @@ public:
LValue EmitObjCIsaExpr(const ObjCIsaExpr *E);
LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E);
LValue EmitInitListLValue(const InitListExpr *E);
+ void EmitIgnoredConditionalOperator(const AbstractConditionalOperator *E);
LValue EmitConditionalOperatorLValue(const AbstractConditionalOperator *E);
LValue EmitCastLValue(const CastExpr *E);
LValue EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E);
@@ -3906,8 +4084,12 @@ public:
llvm::Value *EmitIvarOffset(const ObjCInterfaceDecl *Interface,
const ObjCIvarDecl *Ivar);
+ llvm::Value *EmitIvarOffsetAsPointerDiff(const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar);
LValue EmitLValueForField(LValue Base, const FieldDecl* Field);
LValue EmitLValueForLambdaField(const FieldDecl *Field);
+ LValue EmitLValueForLambdaField(const FieldDecl *Field,
+ llvm::Value *ThisValue);
/// EmitLValueForFieldInitialization - Like EmitLValueForField, except that
/// if the Field is a reference, this will return the address of the reference
@@ -4051,10 +4233,9 @@ public:
RValue EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
ReturnValueSlot ReturnValue);
- RValue EmitNVPTXDevicePrintfCallExpr(const CallExpr *E,
- ReturnValueSlot ReturnValue);
- RValue EmitAMDGPUDevicePrintfCallExpr(const CallExpr *E,
- ReturnValueSlot ReturnValue);
+ RValue EmitNVPTXDevicePrintfCallExpr(const CallExpr *E);
+ RValue EmitAMDGPUDevicePrintfCallExpr(const CallExpr *E);
+ RValue EmitOpenMPDevicePrintfCallExpr(const CallExpr *E);
RValue EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
const CallExpr *E, ReturnValueSlot ReturnValue);
@@ -4126,30 +4307,36 @@ public:
/// SVEBuiltinMemEltTy - Returns the memory element type for this memory
/// access builtin. Only required if it can't be inferred from the base
/// pointer operand.
- llvm::Type *SVEBuiltinMemEltTy(SVETypeFlags TypeFlags);
+ llvm::Type *SVEBuiltinMemEltTy(const SVETypeFlags &TypeFlags);
- SmallVector<llvm::Type *, 2> getSVEOverloadTypes(SVETypeFlags TypeFlags,
- llvm::Type *ReturnType,
- ArrayRef<llvm::Value *> Ops);
- llvm::Type *getEltType(SVETypeFlags TypeFlags);
+ SmallVector<llvm::Type *, 2>
+ getSVEOverloadTypes(const SVETypeFlags &TypeFlags, llvm::Type *ReturnType,
+ ArrayRef<llvm::Value *> Ops);
+ llvm::Type *getEltType(const SVETypeFlags &TypeFlags);
llvm::ScalableVectorType *getSVEType(const SVETypeFlags &TypeFlags);
- llvm::ScalableVectorType *getSVEPredType(SVETypeFlags TypeFlags);
- llvm::Value *EmitSVEAllTruePred(SVETypeFlags TypeFlags);
+ llvm::ScalableVectorType *getSVEPredType(const SVETypeFlags &TypeFlags);
+ llvm::Value *EmitSVETupleSetOrGet(const SVETypeFlags &TypeFlags,
+ llvm::Type *ReturnType,
+ ArrayRef<llvm::Value *> Ops);
+ llvm::Value *EmitSVETupleCreate(const SVETypeFlags &TypeFlags,
+ llvm::Type *ReturnType,
+ ArrayRef<llvm::Value *> Ops);
+ llvm::Value *EmitSVEAllTruePred(const SVETypeFlags &TypeFlags);
llvm::Value *EmitSVEDupX(llvm::Value *Scalar);
llvm::Value *EmitSVEDupX(llvm::Value *Scalar, llvm::Type *Ty);
llvm::Value *EmitSVEReinterpret(llvm::Value *Val, llvm::Type *Ty);
- llvm::Value *EmitSVEPMull(SVETypeFlags TypeFlags,
+ llvm::Value *EmitSVEPMull(const SVETypeFlags &TypeFlags,
llvm::SmallVectorImpl<llvm::Value *> &Ops,
unsigned BuiltinID);
- llvm::Value *EmitSVEMovl(SVETypeFlags TypeFlags,
+ llvm::Value *EmitSVEMovl(const SVETypeFlags &TypeFlags,
llvm::ArrayRef<llvm::Value *> Ops,
unsigned BuiltinID);
llvm::Value *EmitSVEPredicateCast(llvm::Value *Pred,
llvm::ScalableVectorType *VTy);
- llvm::Value *EmitSVEGatherLoad(SVETypeFlags TypeFlags,
+ llvm::Value *EmitSVEGatherLoad(const SVETypeFlags &TypeFlags,
llvm::SmallVectorImpl<llvm::Value *> &Ops,
unsigned IntID);
- llvm::Value *EmitSVEScatterStore(SVETypeFlags TypeFlags,
+ llvm::Value *EmitSVEScatterStore(const SVETypeFlags &TypeFlags,
llvm::SmallVectorImpl<llvm::Value *> &Ops,
unsigned IntID);
llvm::Value *EmitSVEMaskedLoad(const CallExpr *, llvm::Type *ReturnTy,
@@ -4158,19 +4345,45 @@ public:
llvm::Value *EmitSVEMaskedStore(const CallExpr *,
SmallVectorImpl<llvm::Value *> &Ops,
unsigned BuiltinID);
- llvm::Value *EmitSVEPrefetchLoad(SVETypeFlags TypeFlags,
+ llvm::Value *EmitSVEPrefetchLoad(const SVETypeFlags &TypeFlags,
SmallVectorImpl<llvm::Value *> &Ops,
unsigned BuiltinID);
- llvm::Value *EmitSVEGatherPrefetch(SVETypeFlags TypeFlags,
+ llvm::Value *EmitSVEGatherPrefetch(const SVETypeFlags &TypeFlags,
SmallVectorImpl<llvm::Value *> &Ops,
unsigned IntID);
- llvm::Value *EmitSVEStructLoad(SVETypeFlags TypeFlags,
- SmallVectorImpl<llvm::Value *> &Ops, unsigned IntID);
- llvm::Value *EmitSVEStructStore(SVETypeFlags TypeFlags,
+ llvm::Value *EmitSVEStructLoad(const SVETypeFlags &TypeFlags,
+ SmallVectorImpl<llvm::Value *> &Ops,
+ unsigned IntID);
+ llvm::Value *EmitSVEStructStore(const SVETypeFlags &TypeFlags,
SmallVectorImpl<llvm::Value *> &Ops,
unsigned IntID);
+ /// FormSVEBuiltinResult - Returns the struct of scalable vectors as a wider
+ /// vector. It extracts the scalable vector from the struct and inserts into
+ /// the wider vector. This avoids the error when allocating space in llvm
+ /// for struct of scalable vectors if a function returns struct.
+ llvm::Value *FormSVEBuiltinResult(llvm::Value *Call);
+
llvm::Value *EmitAArch64SVEBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+ llvm::Value *EmitSMELd1St1(const SVETypeFlags &TypeFlags,
+ llvm::SmallVectorImpl<llvm::Value *> &Ops,
+ unsigned IntID);
+ llvm::Value *EmitSMEReadWrite(const SVETypeFlags &TypeFlags,
+ llvm::SmallVectorImpl<llvm::Value *> &Ops,
+ unsigned IntID);
+ llvm::Value *EmitSMEZero(const SVETypeFlags &TypeFlags,
+ llvm::SmallVectorImpl<llvm::Value *> &Ops,
+ unsigned IntID);
+ llvm::Value *EmitSMELdrStr(const SVETypeFlags &TypeFlags,
+ llvm::SmallVectorImpl<llvm::Value *> &Ops,
+ unsigned IntID);
+
+ void GetAArch64SVEProcessedOperands(unsigned BuiltinID, const CallExpr *E,
+ SmallVectorImpl<llvm::Value *> &Ops,
+ SVETypeFlags TypeFlags);
+
+ llvm::Value *EmitAArch64SMEBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+
llvm::Value *EmitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E,
llvm::Triple::ArchType Arch);
llvm::Value *EmitBPFBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
@@ -4179,6 +4392,8 @@ public:
llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
llvm::Value *EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
llvm::Value *EmitAMDGPUBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+ llvm::Value *EmitScalarOrConstFoldImmArg(unsigned ICEArguments, unsigned Idx,
+ const CallExpr *E);
llvm::Value *EmitSystemZBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
llvm::Value *EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
llvm::Value *EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
@@ -4186,7 +4401,7 @@ public:
llvm::Value *EmitHexagonBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
llvm::Value *EmitRISCVBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
ReturnValueSlot ReturnValue);
- bool ProcessOrderScopeAMDGCN(llvm::Value *Order, llvm::Value *Scope,
+ void ProcessOrderScopeAMDGCN(llvm::Value *Order, llvm::Value *Scope,
llvm::AtomicOrdering &AO,
llvm::SyncScope::ID &SSID);
@@ -4342,6 +4557,11 @@ public:
/// EmitLoadOfComplex - Load a complex number from the specified l-value.
ComplexPairTy EmitLoadOfComplex(LValue src, SourceLocation loc);
+ ComplexPairTy EmitPromotedComplexExpr(const Expr *E, QualType PromotionType);
+ llvm::Value *EmitPromotedScalarExpr(const Expr *E, QualType PromotionType);
+ ComplexPairTy EmitPromotedValue(ComplexPairTy result, QualType PromotionType);
+ ComplexPairTy EmitUnPromotedValue(ComplexPairTy result, QualType PromotionType);
+
Address emitAddrOfRealComponent(Address complex, QualType complexType);
Address emitAddrOfImagComponent(Address complex, QualType complexType);
@@ -4358,7 +4578,7 @@ public:
/// EmitCXXGlobalVarDeclInit - Create the initializer for a C++
/// variable with global storage.
- void EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::Constant *DeclPtr,
+ void EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::GlobalVariable *GV,
bool PerformInit);
llvm::Function *createAtExitStub(const VarDecl &VD, llvm::FunctionCallee Dtor,
@@ -4374,6 +4594,11 @@ public:
void registerGlobalDtorWithAtExit(const VarDecl &D, llvm::FunctionCallee fn,
llvm::Constant *addr);
+ /// Registers the dtor using 'llvm.global_dtors' for platforms that do not
+ /// support an 'atexit()' function.
+ void registerGlobalDtorWithLLVM(const VarDecl &D, llvm::FunctionCallee fn,
+ llvm::Constant *addr);
+
/// Call atexit() with function dtorStub.
void registerGlobalDtorWithAtExit(llvm::Constant *dtorStub);
@@ -4472,6 +4697,9 @@ public:
bool ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APSInt &Result,
bool AllowLabels = false);
+ /// Ignore parentheses and logical-NOT to track conditions consistently.
+ static const Expr *stripCond(const Expr *C);
+
/// isInstrumentedCondition - Determine whether the given condition is an
/// instrumentable condition (i.e. no "&&" or "||").
static bool isInstrumentedCondition(const Expr *C);
@@ -4494,7 +4722,8 @@ public:
/// evaluate to true based on PGO data.
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock,
llvm::BasicBlock *FalseBlock, uint64_t TrueCount,
- Stmt::Likelihood LH = Stmt::LH_None);
+ Stmt::Likelihood LH = Stmt::LH_None,
+ const Expr *ConditionalOp = nullptr);
/// Given an assignment `*LHS = RHS`, emit a test that checks if \p RHS is
/// nonnull, if \p LHS is marked _Nonnull.
@@ -4509,7 +4738,7 @@ public:
/// \p SignedIndices indicates whether any of the GEP indices are signed.
/// \p IsSubtraction indicates whether the expression used to form the GEP
/// is a subtraction.
- llvm::Value *EmitCheckedInBoundsGEP(llvm::Value *Ptr,
+ llvm::Value *EmitCheckedInBoundsGEP(llvm::Type *ElemTy, llvm::Value *Ptr,
ArrayRef<llvm::Value *> IdxList,
bool SignedIndices,
bool IsSubtraction,
@@ -4539,6 +4768,9 @@ public:
/// passing to a runtime sanitizer handler.
llvm::Constant *EmitCheckSourceLocation(SourceLocation Loc);
+ void EmitKCFIOperandBundle(const CGCallee &Callee,
+ SmallVectorImpl<llvm::OperandBundleDef> &Bundles);
+
/// Create a basic block that will either trap or call a handler function in
/// the UBSan runtime with the provided arguments, and create a conditional
/// branch to it.
@@ -4588,12 +4820,22 @@ public:
/// point operation, expressed as the maximum relative error in ulp.
void SetFPAccuracy(llvm::Value *Val, float Accuracy);
- /// SetFPModel - Control floating point behavior via fp-model settings.
- void SetFPModel();
+ /// Set the minimum required accuracy of the given sqrt operation
+ /// based on CodeGenOpts.
+ void SetSqrtFPAccuracy(llvm::Value *Val);
+
+ /// Set the minimum required accuracy of the given sqrt operation based on
+ /// CodeGenOpts.
+ void SetDivFPAccuracy(llvm::Value *Val);
/// Set the codegen fast-math flags.
void SetFastMathFlags(FPOptions FPFeatures);
+ // Truncate or extend a boolean vector to the requested number of elements.
+ llvm::Value *emitBoolVecConversion(llvm::Value *SrcVec,
+ unsigned NumElementsDst,
+ const llvm::Twine &Name = "");
+
private:
llvm::MDNode *getRangeForLoadFromType(QualType Ty);
void EmitReturnOfRValue(RValue RV, QualType Ty);
@@ -4623,13 +4865,14 @@ private:
SmallVectorImpl<llvm::Value *> &IRCallArgs,
unsigned &IRCallArgPos);
- llvm::Value* EmitAsmInput(const TargetInfo::ConstraintInfo &Info,
- const Expr *InputExpr, std::string &ConstraintStr);
+ std::pair<llvm::Value *, llvm::Type *>
+ EmitAsmInput(const TargetInfo::ConstraintInfo &Info, const Expr *InputExpr,
+ std::string &ConstraintStr);
- llvm::Value* EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info,
- LValue InputValue, QualType InputType,
- std::string &ConstraintStr,
- SourceLocation Loc);
+ std::pair<llvm::Value *, llvm::Type *>
+ EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info, LValue InputValue,
+ QualType InputType, std::string &ConstraintStr,
+ SourceLocation Loc);
/// Attempts to statically evaluate the object size of E. If that
/// fails, emits code to figure the size of E out for us. This is
@@ -4649,6 +4892,9 @@ private:
llvm::Value *EmittedE,
bool IsDynamic);
+ llvm::Value *emitFlexibleArrayMemberSize(const Expr *E, unsigned Type,
+ llvm::IntegerType *ResType);
+
void emitZeroOrPatternForAutoVarInit(QualType type, const VarDecl &D,
Address Loc);
@@ -4694,9 +4940,10 @@ public:
/// into the address of a local variable. In such a case, it's quite
/// reasonable to just ignore the returned alignment when it isn't from an
/// explicit source.
- Address EmitPointerWithAlignment(const Expr *Addr,
- LValueBaseInfo *BaseInfo = nullptr,
- TBAAAccessInfo *TBAAInfo = nullptr);
+ Address
+ EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo = nullptr,
+ TBAAAccessInfo *TBAAInfo = nullptr,
+ KnownNonNull_t IsKnownNonNull = NotKnownNonNull);
/// If \p E references a parameter with pass_object_size info or a constant
/// array size modifier, emit the object size divided by the size of \p EltTy.
@@ -4725,8 +4972,12 @@ public:
// last (if it exists).
void EmitMultiVersionResolver(llvm::Function *Resolver,
ArrayRef<MultiVersionResolverOption> Options);
-
- static uint64_t GetX86CpuSupportsMask(ArrayRef<StringRef> FeatureStrs);
+ void
+ EmitX86MultiVersionResolver(llvm::Function *Resolver,
+ ArrayRef<MultiVersionResolverOption> Options);
+ void
+ EmitAArch64MultiVersionResolver(llvm::Function *Resolver,
+ ArrayRef<MultiVersionResolverOption> Options);
private:
QualType getVarArgType(const Expr *Arg);
@@ -4743,81 +4994,15 @@ private:
llvm::Value *EmitX86CpuIs(StringRef CPUStr);
llvm::Value *EmitX86CpuSupports(const CallExpr *E);
llvm::Value *EmitX86CpuSupports(ArrayRef<StringRef> FeatureStrs);
- llvm::Value *EmitX86CpuSupports(uint64_t Mask);
+ llvm::Value *EmitX86CpuSupports(std::array<uint32_t, 4> FeatureMask);
llvm::Value *EmitX86CpuInit();
- llvm::Value *FormResolverCondition(const MultiVersionResolverOption &RO);
+ llvm::Value *FormX86ResolverCondition(const MultiVersionResolverOption &RO);
+ llvm::Value *EmitAArch64CpuInit();
+ llvm::Value *
+ FormAArch64ResolverCondition(const MultiVersionResolverOption &RO);
+ llvm::Value *EmitAArch64CpuSupports(ArrayRef<StringRef> FeatureStrs);
};
-/// TargetFeatures - This class is used to check whether the builtin function
-/// has the required tagert specific features. It is able to support the
-/// combination of ','(and), '|'(or), and '()'. By default, the priority of
-/// ',' is higher than that of '|' .
-/// E.g:
-/// A,B|C means the builtin function requires both A and B, or C.
-/// If we want the builtin function requires both A and B, or both A and C,
-/// there are two ways: A,B|A,C or A,(B|C).
-/// The FeaturesList should not contain spaces, and brackets must appear in
-/// pairs.
-class TargetFeatures {
- struct FeatureListStatus {
- bool HasFeatures;
- StringRef CurFeaturesList;
- };
-
- const llvm::StringMap<bool> &CallerFeatureMap;
-
- FeatureListStatus getAndFeatures(StringRef FeatureList) {
- int InParentheses = 0;
- bool HasFeatures = true;
- size_t SubexpressionStart = 0;
- for (size_t i = 0, e = FeatureList.size(); i < e; ++i) {
- char CurrentToken = FeatureList[i];
- switch (CurrentToken) {
- default:
- break;
- case '(':
- if (InParentheses == 0)
- SubexpressionStart = i + 1;
- ++InParentheses;
- break;
- case ')':
- --InParentheses;
- assert(InParentheses >= 0 && "Parentheses are not in pair");
- LLVM_FALLTHROUGH;
- case '|':
- case ',':
- if (InParentheses == 0) {
- if (HasFeatures && i != SubexpressionStart) {
- StringRef F = FeatureList.slice(SubexpressionStart, i);
- HasFeatures = CurrentToken == ')' ? hasRequiredFeatures(F)
- : CallerFeatureMap.lookup(F);
- }
- SubexpressionStart = i + 1;
- if (CurrentToken == '|') {
- return {HasFeatures, FeatureList.substr(SubexpressionStart)};
- }
- }
- break;
- }
- }
- assert(InParentheses == 0 && "Parentheses are not in pair");
- if (HasFeatures && SubexpressionStart != FeatureList.size())
- HasFeatures =
- CallerFeatureMap.lookup(FeatureList.substr(SubexpressionStart));
- return {HasFeatures, StringRef()};
- }
-
-public:
- bool hasRequiredFeatures(StringRef FeatureList) {
- FeatureListStatus FS = {false, FeatureList};
- while (!FS.HasFeatures && !FS.CurFeaturesList.empty())
- FS = getAndFeatures(FS.CurFeaturesList);
- return FS.HasFeatures;
- }
-
- TargetFeatures(const llvm::StringMap<bool> &CallerFeatureMap)
- : CallerFeatureMap(CallerFeatureMap) {}
-};
inline DominatingLLVMValue::saved_type
DominatingLLVMValue::save(CodeGenFunction &CGF, llvm::Value *value) {
@@ -4825,9 +5010,9 @@ DominatingLLVMValue::save(CodeGenFunction &CGF, llvm::Value *value) {
// Otherwise, we need an alloca.
auto align = CharUnits::fromQuantity(
- CGF.CGM.getDataLayout().getPrefTypeAlignment(value->getType()));
+ CGF.CGM.getDataLayout().getPrefTypeAlign(value->getType()));
Address alloca =
- CGF.CreateTempAlloca(value->getType(), align, "cond-cleanup.save");
+ CGF.CreateTempAlloca(value->getType(), align, "cond-cleanup.save");
CGF.Builder.CreateStore(value, alloca);
return saved_type(alloca.getPointer(), true);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.cpp b/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.cpp
index 49a1396b58e3..1280bcd36de9 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.cpp
@@ -11,28 +11,29 @@
//===----------------------------------------------------------------------===//
#include "CodeGenModule.h"
+#include "ABIInfo.h"
#include "CGBlocks.h"
#include "CGCUDARuntime.h"
#include "CGCXXABI.h"
#include "CGCall.h"
#include "CGDebugInfo.h"
+#include "CGHLSLRuntime.h"
#include "CGObjCRuntime.h"
#include "CGOpenCLRuntime.h"
#include "CGOpenMPRuntime.h"
-#include "CGOpenMPRuntimeAMDGCN.h"
-#include "CGOpenMPRuntimeNVPTX.h"
+#include "CGOpenMPRuntimeGPU.h"
#include "CodeGenFunction.h"
#include "CodeGenPGO.h"
#include "ConstantEmitter.h"
#include "CoverageMappingGen.h"
#include "TargetInfo.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTLambda.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Mangle.h"
-#include "clang/AST/RecordLayout.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/Basic/Builtins.h"
@@ -44,12 +45,15 @@
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/Version.h"
+#include "clang/CodeGen/BackendUtil.h"
#include "clang/CodeGen/ConstantInitBuilder.h"
#include "clang/Frontend/FrontendDiagnostic.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
+#include "llvm/IR/AttributeMask.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Intrinsics.h"
@@ -57,20 +61,24 @@
#include "llvm/IR/Module.h"
#include "llvm/IR/ProfileSummary.h"
#include "llvm/ProfileData/InstrProfReader.h"
+#include "llvm/ProfileData/SampleProf.h"
+#include "llvm/Support/CRC.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/MD5.h"
#include "llvm/Support/TimeProfiler.h"
+#include "llvm/Support/xxhash.h"
+#include "llvm/TargetParser/Triple.h"
+#include "llvm/TargetParser/X86TargetParser.h"
+#include <optional>
using namespace clang;
using namespace CodeGen;
static llvm::cl::opt<bool> LimitedCoverage(
- "limited-coverage-experimental", llvm::cl::ZeroOrMore, llvm::cl::Hidden,
- llvm::cl::desc("Emit limited coverage mapping information (experimental)"),
- llvm::cl::init(false));
+ "limited-coverage-experimental", llvm::cl::Hidden,
+ llvm::cl::desc("Emit limited coverage mapping information (experimental)"));
static const char AnnotationSection[] = "llvm.metadata";
@@ -94,12 +102,237 @@ static CGCXXABI *createCXXABI(CodeGenModule &CGM) {
llvm_unreachable("invalid C++ ABI kind");
}
-CodeGenModule::CodeGenModule(ASTContext &C, const HeaderSearchOptions &HSO,
+static std::unique_ptr<TargetCodeGenInfo>
+createTargetCodeGenInfo(CodeGenModule &CGM) {
+ const TargetInfo &Target = CGM.getTarget();
+ const llvm::Triple &Triple = Target.getTriple();
+ const CodeGenOptions &CodeGenOpts = CGM.getCodeGenOpts();
+
+ switch (Triple.getArch()) {
+ default:
+ return createDefaultTargetCodeGenInfo(CGM);
+
+ case llvm::Triple::le32:
+ return createPNaClTargetCodeGenInfo(CGM);
+ case llvm::Triple::m68k:
+ return createM68kTargetCodeGenInfo(CGM);
+ case llvm::Triple::mips:
+ case llvm::Triple::mipsel:
+ if (Triple.getOS() == llvm::Triple::NaCl)
+ return createPNaClTargetCodeGenInfo(CGM);
+ return createMIPSTargetCodeGenInfo(CGM, /*IsOS32=*/true);
+
+ case llvm::Triple::mips64:
+ case llvm::Triple::mips64el:
+ return createMIPSTargetCodeGenInfo(CGM, /*IsOS32=*/false);
+
+ case llvm::Triple::avr: {
+ // For passing parameters, R8~R25 are used on avr, and R18~R25 are used
+ // on avrtiny. For passing return value, R18~R25 are used on avr, and
+ // R22~R25 are used on avrtiny.
+ unsigned NPR = Target.getABI() == "avrtiny" ? 6 : 18;
+ unsigned NRR = Target.getABI() == "avrtiny" ? 4 : 8;
+ return createAVRTargetCodeGenInfo(CGM, NPR, NRR);
+ }
+
+ case llvm::Triple::aarch64:
+ case llvm::Triple::aarch64_32:
+ case llvm::Triple::aarch64_be: {
+ AArch64ABIKind Kind = AArch64ABIKind::AAPCS;
+ if (Target.getABI() == "darwinpcs")
+ Kind = AArch64ABIKind::DarwinPCS;
+ else if (Triple.isOSWindows())
+ return createWindowsAArch64TargetCodeGenInfo(CGM, AArch64ABIKind::Win64);
+
+ return createAArch64TargetCodeGenInfo(CGM, Kind);
+ }
+
+ case llvm::Triple::wasm32:
+ case llvm::Triple::wasm64: {
+ WebAssemblyABIKind Kind = WebAssemblyABIKind::MVP;
+ if (Target.getABI() == "experimental-mv")
+ Kind = WebAssemblyABIKind::ExperimentalMV;
+ return createWebAssemblyTargetCodeGenInfo(CGM, Kind);
+ }
+
+ case llvm::Triple::arm:
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumb:
+ case llvm::Triple::thumbeb: {
+ if (Triple.getOS() == llvm::Triple::Win32)
+ return createWindowsARMTargetCodeGenInfo(CGM, ARMABIKind::AAPCS_VFP);
+
+ ARMABIKind Kind = ARMABIKind::AAPCS;
+ StringRef ABIStr = Target.getABI();
+ if (ABIStr == "apcs-gnu")
+ Kind = ARMABIKind::APCS;
+ else if (ABIStr == "aapcs16")
+ Kind = ARMABIKind::AAPCS16_VFP;
+ else if (CodeGenOpts.FloatABI == "hard" ||
+ (CodeGenOpts.FloatABI != "soft" &&
+ (Triple.getEnvironment() == llvm::Triple::GNUEABIHF ||
+ Triple.getEnvironment() == llvm::Triple::MuslEABIHF ||
+ Triple.getEnvironment() == llvm::Triple::EABIHF)))
+ Kind = ARMABIKind::AAPCS_VFP;
+
+ return createARMTargetCodeGenInfo(CGM, Kind);
+ }
+
+ case llvm::Triple::ppc: {
+ if (Triple.isOSAIX())
+ return createAIXTargetCodeGenInfo(CGM, /*Is64Bit=*/false);
+
+ bool IsSoftFloat =
+ CodeGenOpts.FloatABI == "soft" || Target.hasFeature("spe");
+ return createPPC32TargetCodeGenInfo(CGM, IsSoftFloat);
+ }
+ case llvm::Triple::ppcle: {
+ bool IsSoftFloat = CodeGenOpts.FloatABI == "soft";
+ return createPPC32TargetCodeGenInfo(CGM, IsSoftFloat);
+ }
+ case llvm::Triple::ppc64:
+ if (Triple.isOSAIX())
+ return createAIXTargetCodeGenInfo(CGM, /*Is64Bit=*/true);
+
+ if (Triple.isOSBinFormatELF()) {
+ PPC64_SVR4_ABIKind Kind = PPC64_SVR4_ABIKind::ELFv1;
+ if (Target.getABI() == "elfv2")
+ Kind = PPC64_SVR4_ABIKind::ELFv2;
+ bool IsSoftFloat = CodeGenOpts.FloatABI == "soft";
+
+ return createPPC64_SVR4_TargetCodeGenInfo(CGM, Kind, IsSoftFloat);
+ }
+ return createPPC64TargetCodeGenInfo(CGM);
+ case llvm::Triple::ppc64le: {
+ assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!");
+ PPC64_SVR4_ABIKind Kind = PPC64_SVR4_ABIKind::ELFv2;
+ if (Target.getABI() == "elfv1")
+ Kind = PPC64_SVR4_ABIKind::ELFv1;
+ bool IsSoftFloat = CodeGenOpts.FloatABI == "soft";
+
+ return createPPC64_SVR4_TargetCodeGenInfo(CGM, Kind, IsSoftFloat);
+ }
+
+ case llvm::Triple::nvptx:
+ case llvm::Triple::nvptx64:
+ return createNVPTXTargetCodeGenInfo(CGM);
+
+ case llvm::Triple::msp430:
+ return createMSP430TargetCodeGenInfo(CGM);
+
+ case llvm::Triple::riscv32:
+ case llvm::Triple::riscv64: {
+ StringRef ABIStr = Target.getABI();
+ unsigned XLen = Target.getPointerWidth(LangAS::Default);
+ unsigned ABIFLen = 0;
+ if (ABIStr.ends_with("f"))
+ ABIFLen = 32;
+ else if (ABIStr.ends_with("d"))
+ ABIFLen = 64;
+ bool EABI = ABIStr.ends_with("e");
+ return createRISCVTargetCodeGenInfo(CGM, XLen, ABIFLen, EABI);
+ }
+
+ case llvm::Triple::systemz: {
+ bool SoftFloat = CodeGenOpts.FloatABI == "soft";
+ bool HasVector = !SoftFloat && Target.getABI() == "vector";
+ return createSystemZTargetCodeGenInfo(CGM, HasVector, SoftFloat);
+ }
+
+ case llvm::Triple::tce:
+ case llvm::Triple::tcele:
+ return createTCETargetCodeGenInfo(CGM);
+
+ case llvm::Triple::x86: {
+ bool IsDarwinVectorABI = Triple.isOSDarwin();
+ bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing();
+
+ if (Triple.getOS() == llvm::Triple::Win32) {
+ return createWinX86_32TargetCodeGenInfo(
+ CGM, IsDarwinVectorABI, IsWin32FloatStructABI,
+ CodeGenOpts.NumRegisterParameters);
+ }
+ return createX86_32TargetCodeGenInfo(
+ CGM, IsDarwinVectorABI, IsWin32FloatStructABI,
+ CodeGenOpts.NumRegisterParameters, CodeGenOpts.FloatABI == "soft");
+ }
+
+ case llvm::Triple::x86_64: {
+ StringRef ABI = Target.getABI();
+ X86AVXABILevel AVXLevel = (ABI == "avx512" ? X86AVXABILevel::AVX512
+ : ABI == "avx" ? X86AVXABILevel::AVX
+ : X86AVXABILevel::None);
+
+ switch (Triple.getOS()) {
+ case llvm::Triple::Win32:
+ return createWinX86_64TargetCodeGenInfo(CGM, AVXLevel);
+ default:
+ return createX86_64TargetCodeGenInfo(CGM, AVXLevel);
+ }
+ }
+ case llvm::Triple::hexagon:
+ return createHexagonTargetCodeGenInfo(CGM);
+ case llvm::Triple::lanai:
+ return createLanaiTargetCodeGenInfo(CGM);
+ case llvm::Triple::r600:
+ return createAMDGPUTargetCodeGenInfo(CGM);
+ case llvm::Triple::amdgcn:
+ return createAMDGPUTargetCodeGenInfo(CGM);
+ case llvm::Triple::sparc:
+ return createSparcV8TargetCodeGenInfo(CGM);
+ case llvm::Triple::sparcv9:
+ return createSparcV9TargetCodeGenInfo(CGM);
+ case llvm::Triple::xcore:
+ return createXCoreTargetCodeGenInfo(CGM);
+ case llvm::Triple::arc:
+ return createARCTargetCodeGenInfo(CGM);
+ case llvm::Triple::spir:
+ case llvm::Triple::spir64:
+ return createCommonSPIRTargetCodeGenInfo(CGM);
+ case llvm::Triple::spirv32:
+ case llvm::Triple::spirv64:
+ return createSPIRVTargetCodeGenInfo(CGM);
+ case llvm::Triple::ve:
+ return createVETargetCodeGenInfo(CGM);
+ case llvm::Triple::csky: {
+ bool IsSoftFloat = !Target.hasFeature("hard-float-abi");
+ bool hasFP64 =
+ Target.hasFeature("fpuv2_df") || Target.hasFeature("fpuv3_df");
+ return createCSKYTargetCodeGenInfo(CGM, IsSoftFloat ? 0
+ : hasFP64 ? 64
+ : 32);
+ }
+ case llvm::Triple::bpfeb:
+ case llvm::Triple::bpfel:
+ return createBPFTargetCodeGenInfo(CGM);
+ case llvm::Triple::loongarch32:
+ case llvm::Triple::loongarch64: {
+ StringRef ABIStr = Target.getABI();
+ unsigned ABIFRLen = 0;
+ if (ABIStr.ends_with("f"))
+ ABIFRLen = 32;
+ else if (ABIStr.ends_with("d"))
+ ABIFRLen = 64;
+ return createLoongArchTargetCodeGenInfo(
+ CGM, Target.getPointerWidth(LangAS::Default), ABIFRLen);
+ }
+ }
+}
+
+const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
+ if (!TheTargetCodeGenInfo)
+ TheTargetCodeGenInfo = createTargetCodeGenInfo(*this);
+ return *TheTargetCodeGenInfo;
+}
+
+CodeGenModule::CodeGenModule(ASTContext &C,
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS,
+ const HeaderSearchOptions &HSO,
const PreprocessorOptions &PPO,
const CodeGenOptions &CGO, llvm::Module &M,
DiagnosticsEngine &diags,
CoverageSourceInfo *CoverageInfo)
- : Context(C), LangOpts(C.getLangOpts()), HeaderSearchOpts(HSO),
+ : Context(C), LangOpts(C.getLangOpts()), FS(FS), HeaderSearchOpts(HSO),
PreprocessorOpts(PPO), CodeGenOpts(CGO), TheModule(M), Diags(diags),
Target(C.getTargetInfo()), ABI(createCXXABI(*this)),
VMContext(M.getContext()), Types(*this), VTables(*this),
@@ -116,9 +349,10 @@ CodeGenModule::CodeGenModule(ASTContext &C, const HeaderSearchOptions &HSO,
BFloatTy = llvm::Type::getBFloatTy(LLVMContext);
FloatTy = llvm::Type::getFloatTy(LLVMContext);
DoubleTy = llvm::Type::getDoubleTy(LLVMContext);
- PointerWidthInBits = C.getTargetInfo().getPointerWidth(0);
+ PointerWidthInBits = C.getTargetInfo().getPointerWidth(LangAS::Default);
PointerAlignInBytes =
- C.toCharUnitsFromBits(C.getTargetInfo().getPointerAlign(0)).getQuantity();
+ C.toCharUnitsFromBits(C.getTargetInfo().getPointerAlign(LangAS::Default))
+ .getQuantity();
SizeSizeInBytes =
C.toCharUnitsFromBits(C.getTargetInfo().getMaxPointerWidth()).getQuantity();
IntAlignInBytes =
@@ -128,12 +362,23 @@ CodeGenModule::CodeGenModule(ASTContext &C, const HeaderSearchOptions &HSO,
IntTy = llvm::IntegerType::get(LLVMContext, C.getTargetInfo().getIntWidth());
IntPtrTy = llvm::IntegerType::get(LLVMContext,
C.getTargetInfo().getMaxPointerWidth());
- Int8PtrTy = Int8Ty->getPointerTo(0);
- Int8PtrPtrTy = Int8PtrTy->getPointerTo(0);
- AllocaInt8PtrTy = Int8Ty->getPointerTo(
- M.getDataLayout().getAllocaAddrSpace());
+ Int8PtrTy = llvm::PointerType::get(LLVMContext, 0);
+ const llvm::DataLayout &DL = M.getDataLayout();
+ AllocaInt8PtrTy =
+ llvm::PointerType::get(LLVMContext, DL.getAllocaAddrSpace());
+ GlobalsInt8PtrTy =
+ llvm::PointerType::get(LLVMContext, DL.getDefaultGlobalsAddressSpace());
+ ConstGlobalsPtrTy = llvm::PointerType::get(
+ LLVMContext, C.getTargetAddressSpace(GetGlobalConstantAddressSpace()));
ASTAllocaAddressSpace = getTargetCodeGenInfo().getASTAllocaAddressSpace();
+ // Build C++20 Module initializers.
+ // TODO: Add Microsoft here once we know the mangling required for the
+ // initializers.
+ CXX20ModuleInits =
+ LangOpts.CPlusPlusModules && getCXXABI().getMangleContext().getKind() ==
+ ItaniumMangleContext::MK_Itanium;
+
RuntimeCC = getTargetCodeGenInfo().getABIInfo().getRuntimeCC();
if (LangOpts.ObjC)
@@ -144,6 +389,8 @@ CodeGenModule::CodeGenModule(ASTContext &C, const HeaderSearchOptions &HSO,
createOpenMPRuntime();
if (LangOpts.CUDA)
createCUDARuntime();
+ if (LangOpts.HLSL)
+ createHLSLRuntime();
// Enable TBAA unless it's suppressed. ThreadSanitizer needs TBAA even at O0.
if (LangOpts.Sanitize.has(SanitizerKind::Thread) ||
@@ -153,8 +400,9 @@ CodeGenModule::CodeGenModule(ASTContext &C, const HeaderSearchOptions &HSO,
// If debug info or coverage generation is enabled, create the CGDebugInfo
// object.
- if (CodeGenOpts.getDebugInfo() != codegenoptions::NoDebugInfo ||
- CodeGenOpts.EmitGcovArcs || CodeGenOpts.EmitGcovNotes)
+ if (CodeGenOpts.getDebugInfo() != llvm::codegenoptions::NoDebugInfo ||
+ CodeGenOpts.CoverageNotesFile.size() ||
+ CodeGenOpts.CoverageDataFile.size())
DebugInfo.reset(new CGDebugInfo(*this));
Block.GlobalUniqueCount = 0;
@@ -164,16 +412,13 @@ CodeGenModule::CodeGenModule(ASTContext &C, const HeaderSearchOptions &HSO,
if (CodeGenOpts.hasProfileClangUse()) {
auto ReaderOrErr = llvm::IndexedInstrProfReader::create(
- CodeGenOpts.ProfileInstrumentUsePath, CodeGenOpts.ProfileRemappingFile);
- if (auto E = ReaderOrErr.takeError()) {
- unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
- "Could not read profile %0: %1");
- llvm::handleAllErrors(std::move(E), [&](const llvm::ErrorInfoBase &EI) {
- getDiags().Report(DiagID) << CodeGenOpts.ProfileInstrumentUsePath
- << EI.message();
- });
- } else
- PGOReader = std::move(ReaderOrErr.get());
+ CodeGenOpts.ProfileInstrumentUsePath, *FS,
+ CodeGenOpts.ProfileRemappingFile);
+ // We're checking for profile read errors in CompilerInvocation, so if
+ // there was an error it should've already been caught. If it hasn't been
+ // somehow, trip an assertion.
+ assert(ReaderOrErr);
+ PGOReader = std::move(ReaderOrErr.get());
}
// If coverage mapping generation is enabled, create the
@@ -191,22 +436,7 @@ CodeGenModule::CodeGenModule(ASTContext &C, const HeaderSearchOptions &HSO,
Path = Entry.second + Path.substr(Entry.first.size());
break;
}
- llvm::MD5 Md5;
- Md5.update(Path);
- llvm::MD5::MD5Result R;
- Md5.final(R);
- SmallString<32> Str;
- llvm::MD5::stringifyResult(R, Str);
- // Convert MD5hash to Decimal. Demangler suffixes can either contain
- // numbers or characters but not both.
- llvm::APInt IntHash(128, Str.str(), 16);
- // Prepend "__uniq" before the hash for tools like profilers to understand
- // that this symbol is of internal linkage type. The "__uniq" is the
- // pre-determined prefix that is used to tell tools that this symbol was
- // created with -funique-internal-linakge-symbols and the tools can strip or
- // keep the prefix as needed.
- ModuleNameHash = (Twine(".__uniq.") +
- Twine(toString(IntHash, /* Radix = */ 10, /* Signed = */false))).str();
+ ModuleNameHash = llvm::getUniqueInternalLinkagePostfix(Path);
}
}
@@ -242,14 +472,10 @@ void CodeGenModule::createOpenMPRuntime() {
switch (getTriple().getArch()) {
case llvm::Triple::nvptx:
case llvm::Triple::nvptx64:
- assert(getLangOpts().OpenMPIsDevice &&
- "OpenMP NVPTX is only prepared to deal with device code.");
- OpenMPRuntime.reset(new CGOpenMPRuntimeNVPTX(*this));
- break;
case llvm::Triple::amdgcn:
- assert(getLangOpts().OpenMPIsDevice &&
- "OpenMP AMDGCN is only prepared to deal with device code.");
- OpenMPRuntime.reset(new CGOpenMPRuntimeAMDGCN(*this));
+ assert(getLangOpts().OpenMPIsTargetDevice &&
+ "OpenMP AMDGPU/NVPTX is only prepared to deal with device code.");
+ OpenMPRuntime.reset(new CGOpenMPRuntimeGPU(*this));
break;
default:
if (LangOpts.OpenMPSimd)
@@ -264,13 +490,17 @@ void CodeGenModule::createCUDARuntime() {
CUDARuntime.reset(CreateNVCUDARuntime(*this));
}
+void CodeGenModule::createHLSLRuntime() {
+ HLSLRuntime.reset(new CGHLSLRuntime(*this));
+}
+
void CodeGenModule::addReplacement(StringRef Name, llvm::Constant *C) {
Replacements[Name] = C;
}
void CodeGenModule::applyReplacements() {
for (auto &I : Replacements) {
- StringRef MangledName = I.first();
+ StringRef MangledName = I.first;
llvm::Constant *Replacement = I.second;
llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
if (!Entry)
@@ -315,22 +545,83 @@ void CodeGenModule::applyGlobalValReplacements() {
// This is only used in aliases that we created and we know they have a
// linear structure.
-static const llvm::GlobalObject *getAliasedGlobal(
- const llvm::GlobalIndirectSymbol &GIS) {
- llvm::SmallPtrSet<const llvm::GlobalIndirectSymbol*, 4> Visited;
- const llvm::Constant *C = &GIS;
- for (;;) {
- C = C->stripPointerCasts();
- if (auto *GO = dyn_cast<llvm::GlobalObject>(C))
- return GO;
- // stripPointerCasts will not walk over weak aliases.
- auto *GIS2 = dyn_cast<llvm::GlobalIndirectSymbol>(C);
- if (!GIS2)
- return nullptr;
- if (!Visited.insert(GIS2).second)
- return nullptr;
- C = GIS2->getIndirectSymbol();
+static const llvm::GlobalValue *getAliasedGlobal(const llvm::GlobalValue *GV) {
+ const llvm::Constant *C;
+ if (auto *GA = dyn_cast<llvm::GlobalAlias>(GV))
+ C = GA->getAliasee();
+ else if (auto *GI = dyn_cast<llvm::GlobalIFunc>(GV))
+ C = GI->getResolver();
+ else
+ return GV;
+
+ const auto *AliaseeGV = dyn_cast<llvm::GlobalValue>(C->stripPointerCasts());
+ if (!AliaseeGV)
+ return nullptr;
+
+ const llvm::GlobalValue *FinalGV = AliaseeGV->getAliaseeObject();
+ if (FinalGV == GV)
+ return nullptr;
+
+ return FinalGV;
+}
+
+static bool checkAliasedGlobal(
+ const ASTContext &Context, DiagnosticsEngine &Diags, SourceLocation Location,
+ bool IsIFunc, const llvm::GlobalValue *Alias, const llvm::GlobalValue *&GV,
+ const llvm::MapVector<GlobalDecl, StringRef> &MangledDeclNames,
+ SourceRange AliasRange) {
+ GV = getAliasedGlobal(Alias);
+ if (!GV) {
+ Diags.Report(Location, diag::err_cyclic_alias) << IsIFunc;
+ return false;
+ }
+
+ if (GV->hasCommonLinkage()) {
+ const llvm::Triple &Triple = Context.getTargetInfo().getTriple();
+ if (Triple.getObjectFormat() == llvm::Triple::XCOFF) {
+ Diags.Report(Location, diag::err_alias_to_common);
+ return false;
+ }
+ }
+
+ if (GV->isDeclaration()) {
+ Diags.Report(Location, diag::err_alias_to_undefined) << IsIFunc << IsIFunc;
+ Diags.Report(Location, diag::note_alias_requires_mangled_name)
+ << IsIFunc << IsIFunc;
+ // Provide a note if the given function is not found and exists as a
+ // mangled name.
+ for (const auto &[Decl, Name] : MangledDeclNames) {
+ if (const auto *ND = dyn_cast<NamedDecl>(Decl.getDecl())) {
+ if (ND->getName() == GV->getName()) {
+ Diags.Report(Location, diag::note_alias_mangled_name_alternative)
+ << Name
+ << FixItHint::CreateReplacement(
+ AliasRange,
+ (Twine(IsIFunc ? "ifunc" : "alias") + "(\"" + Name + "\")")
+ .str());
+ }
+ }
+ }
+ return false;
}
+
+ if (IsIFunc) {
+ // Check resolver function type.
+ const auto *F = dyn_cast<llvm::Function>(GV);
+ if (!F) {
+ Diags.Report(Location, diag::err_alias_to_undefined)
+ << IsIFunc << IsIFunc;
+ return false;
+ }
+
+ llvm::FunctionType *FTy = F->getFunctionType();
+ if (!FTy->getReturnType()->isPointerTy()) {
+ Diags.Report(Location, diag::err_ifunc_resolver_return);
+ return false;
+ }
+ }
+
+ return true;
}
void CodeGenModule::checkAliases() {
@@ -342,32 +633,27 @@ void CodeGenModule::checkAliases() {
for (const GlobalDecl &GD : Aliases) {
const auto *D = cast<ValueDecl>(GD.getDecl());
SourceLocation Location;
+ SourceRange Range;
bool IsIFunc = D->hasAttr<IFuncAttr>();
- if (const Attr *A = D->getDefiningAttr())
+ if (const Attr *A = D->getDefiningAttr()) {
Location = A->getLocation();
- else
+ Range = A->getRange();
+ } else
llvm_unreachable("Not an alias or ifunc?");
+
StringRef MangledName = getMangledName(GD);
- llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
- auto *Alias = cast<llvm::GlobalIndirectSymbol>(Entry);
- const llvm::GlobalValue *GV = getAliasedGlobal(*Alias);
- if (!GV) {
- Error = true;
- Diags.Report(Location, diag::err_cyclic_alias) << IsIFunc;
- } else if (GV->isDeclaration()) {
+ llvm::GlobalValue *Alias = GetGlobalValue(MangledName);
+ const llvm::GlobalValue *GV = nullptr;
+ if (!checkAliasedGlobal(getContext(), Diags, Location, IsIFunc, Alias, GV,
+ MangledDeclNames, Range)) {
Error = true;
- Diags.Report(Location, diag::err_alias_to_undefined)
- << IsIFunc << IsIFunc;
- } else if (IsIFunc) {
- // Check resolver function type.
- llvm::FunctionType *FTy = dyn_cast<llvm::FunctionType>(
- GV->getType()->getPointerElementType());
- assert(FTy);
- if (!FTy->getReturnType()->isPointerTy())
- Diags.Report(Location, diag::err_ifunc_resolver_return);
+ continue;
}
- llvm::Constant *Aliasee = Alias->getIndirectSymbol();
+ llvm::Constant *Aliasee =
+ IsIFunc ? cast<llvm::GlobalIFunc>(Alias)->getResolver()
+ : cast<llvm::GlobalAlias>(Alias)->getAliasee();
+
llvm::GlobalValue *AliaseeGV;
if (auto CE = dyn_cast<llvm::ConstantExpr>(Aliasee))
AliaseeGV = cast<llvm::GlobalValue>(CE->getOperand(0));
@@ -386,13 +672,17 @@ void CodeGenModule::checkAliases() {
// compatibility with gcc we implement it by just pointing the alias
// to its aliasee's aliasee. We also warn, since the user is probably
// expecting the link to be weak.
- if (auto GA = dyn_cast<llvm::GlobalIndirectSymbol>(AliaseeGV)) {
+ if (auto *GA = dyn_cast<llvm::GlobalAlias>(AliaseeGV)) {
if (GA->isInterposable()) {
Diags.Report(Location, diag::warn_alias_to_weak_alias)
<< GV->getName() << GA->getName() << IsIFunc;
Aliasee = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
- GA->getIndirectSymbol(), Alias->getType());
- Alias->setIndirectSymbol(Aliasee);
+ GA->getAliasee(), Alias->getType());
+
+ if (IsIFunc)
+ cast<llvm::GlobalIFunc>(Alias)->setResolver(Aliasee);
+ else
+ cast<llvm::GlobalAlias>(Alias)->setAliasee(Aliasee);
}
}
}
@@ -401,8 +691,7 @@ void CodeGenModule::checkAliases() {
for (const GlobalDecl &GD : Aliases) {
StringRef MangledName = getMangledName(GD);
- llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
- auto *Alias = cast<llvm::GlobalIndirectSymbol>(Entry);
+ llvm::GlobalValue *Alias = GetGlobalValue(MangledName);
Alias->replaceAllUsesWith(llvm::UndefValue::get(Alias->getType()));
Alias->eraseFromParent();
}
@@ -410,6 +699,8 @@ void CodeGenModule::checkAliases() {
void CodeGenModule::clear() {
DeferredDeclsToEmit.clear();
+ EmittedDeferredDecls.clear();
+ DeferredAnnotations.clear();
if (OpenMPRuntime)
OpenMPRuntime->clear();
}
@@ -431,56 +722,109 @@ void InstrProfStats::reportDiagnostics(DiagnosticsEngine &Diags,
}
}
+static std::optional<llvm::GlobalValue::VisibilityTypes>
+getLLVMVisibility(clang::LangOptions::VisibilityFromDLLStorageClassKinds K) {
+ // Map to LLVM visibility.
+ switch (K) {
+ case clang::LangOptions::VisibilityFromDLLStorageClassKinds::Keep:
+ return std::nullopt;
+ case clang::LangOptions::VisibilityFromDLLStorageClassKinds::Default:
+ return llvm::GlobalValue::DefaultVisibility;
+ case clang::LangOptions::VisibilityFromDLLStorageClassKinds::Hidden:
+ return llvm::GlobalValue::HiddenVisibility;
+ case clang::LangOptions::VisibilityFromDLLStorageClassKinds::Protected:
+ return llvm::GlobalValue::ProtectedVisibility;
+ }
+ llvm_unreachable("unknown option value!");
+}
+
+void setLLVMVisibility(llvm::GlobalValue &GV,
+ std::optional<llvm::GlobalValue::VisibilityTypes> V) {
+ if (!V)
+ return;
+
+ // Reset DSO locality before setting the visibility. This removes
+ // any effects that visibility options and annotations may have
+ // had on the DSO locality. Setting the visibility will implicitly set
+ // appropriate globals to DSO Local; however, this will be pessimistic
+ // w.r.t. to the normal compiler IRGen.
+ GV.setDSOLocal(false);
+ GV.setVisibility(*V);
+}
+
static void setVisibilityFromDLLStorageClass(const clang::LangOptions &LO,
llvm::Module &M) {
if (!LO.VisibilityFromDLLStorageClass)
return;
- llvm::GlobalValue::VisibilityTypes DLLExportVisibility =
- CodeGenModule::GetLLVMVisibility(LO.getDLLExportVisibility());
- llvm::GlobalValue::VisibilityTypes NoDLLStorageClassVisibility =
- CodeGenModule::GetLLVMVisibility(LO.getNoDLLStorageClassVisibility());
- llvm::GlobalValue::VisibilityTypes ExternDeclDLLImportVisibility =
- CodeGenModule::GetLLVMVisibility(LO.getExternDeclDLLImportVisibility());
- llvm::GlobalValue::VisibilityTypes ExternDeclNoDLLStorageClassVisibility =
- CodeGenModule::GetLLVMVisibility(
- LO.getExternDeclNoDLLStorageClassVisibility());
+ std::optional<llvm::GlobalValue::VisibilityTypes> DLLExportVisibility =
+ getLLVMVisibility(LO.getDLLExportVisibility());
+
+ std::optional<llvm::GlobalValue::VisibilityTypes>
+ NoDLLStorageClassVisibility =
+ getLLVMVisibility(LO.getNoDLLStorageClassVisibility());
+
+ std::optional<llvm::GlobalValue::VisibilityTypes>
+ ExternDeclDLLImportVisibility =
+ getLLVMVisibility(LO.getExternDeclDLLImportVisibility());
+
+ std::optional<llvm::GlobalValue::VisibilityTypes>
+ ExternDeclNoDLLStorageClassVisibility =
+ getLLVMVisibility(LO.getExternDeclNoDLLStorageClassVisibility());
for (llvm::GlobalValue &GV : M.global_values()) {
if (GV.hasAppendingLinkage() || GV.hasLocalLinkage())
continue;
- // Reset DSO locality before setting the visibility. This removes
- // any effects that visibility options and annotations may have
- // had on the DSO locality. Setting the visibility will implicitly set
- // appropriate globals to DSO Local; however, this will be pessimistic
- // w.r.t. to the normal compiler IRGen.
- GV.setDSOLocal(false);
-
- if (GV.isDeclarationForLinker()) {
- GV.setVisibility(GV.getDLLStorageClass() ==
- llvm::GlobalValue::DLLImportStorageClass
- ? ExternDeclDLLImportVisibility
- : ExternDeclNoDLLStorageClassVisibility);
- } else {
- GV.setVisibility(GV.getDLLStorageClass() ==
- llvm::GlobalValue::DLLExportStorageClass
- ? DLLExportVisibility
- : NoDLLStorageClassVisibility);
- }
+ if (GV.isDeclarationForLinker())
+ setLLVMVisibility(GV, GV.getDLLStorageClass() ==
+ llvm::GlobalValue::DLLImportStorageClass
+ ? ExternDeclDLLImportVisibility
+ : ExternDeclNoDLLStorageClassVisibility);
+ else
+ setLLVMVisibility(GV, GV.getDLLStorageClass() ==
+ llvm::GlobalValue::DLLExportStorageClass
+ ? DLLExportVisibility
+ : NoDLLStorageClassVisibility);
GV.setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
}
}
+static bool isStackProtectorOn(const LangOptions &LangOpts,
+ const llvm::Triple &Triple,
+ clang::LangOptions::StackProtectorMode Mode) {
+ if (Triple.isAMDGPU() || Triple.isNVPTX())
+ return false;
+ return LangOpts.getStackProtector() == Mode;
+}
+
void CodeGenModule::Release() {
+ Module *Primary = getContext().getCurrentNamedModule();
+ if (CXX20ModuleInits && Primary && !Primary->isHeaderLikeModule())
+ EmitModuleInitializers(Primary);
EmitDeferred();
+ DeferredDecls.insert(EmittedDeferredDecls.begin(),
+ EmittedDeferredDecls.end());
+ EmittedDeferredDecls.clear();
EmitVTablesOpportunistically();
applyGlobalValReplacements();
applyReplacements();
- checkAliases();
emitMultiVersionFunctions();
- EmitCXXGlobalInitFunc();
+
+ if (Context.getLangOpts().IncrementalExtensions &&
+ GlobalTopLevelStmtBlockInFlight.first) {
+ const TopLevelStmtDecl *TLSD = GlobalTopLevelStmtBlockInFlight.second;
+ GlobalTopLevelStmtBlockInFlight.first->FinishFunction(TLSD->getEndLoc());
+ GlobalTopLevelStmtBlockInFlight = {nullptr, nullptr};
+ }
+
+ // Module implementations are initialized the same way as a regular TU that
+ // imports one or more modules.
+ if (CXX20ModuleInits && Primary && Primary->isInterfaceOrPartition())
+ EmitCXXModuleInitFunc(Primary);
+ else
+ EmitCXXGlobalInitFunc();
EmitCXXGlobalCleanUpFunc();
registerGlobalDtorsWithAtExit();
EmitCXXThreadLocalInitFunc();
@@ -506,10 +850,14 @@ void CodeGenModule::Release() {
if (PGOStats.hasDiagnostics())
PGOStats.reportDiagnostics(getDiags(), getCodeGenOpts().MainFileName);
}
+ llvm::stable_sort(GlobalCtors, [](const Structor &L, const Structor &R) {
+ return L.LexOrder < R.LexOrder;
+ });
EmitCtorList(GlobalCtors, "llvm.global_ctors");
EmitCtorList(GlobalDtors, "llvm.global_dtors");
EmitGlobalAnnotations();
EmitStaticExternCAliases();
+ checkAliases();
EmitDeferredUnusedCoverageMappings();
CodeGenPGO(*this).setValueProfilingFlag(getModule());
if (CoverageMapping)
@@ -518,25 +866,56 @@ void CodeGenModule::Release() {
CodeGenFunction(*this).EmitCfiCheckFail();
CodeGenFunction(*this).EmitCfiCheckStub();
}
+ if (LangOpts.Sanitize.has(SanitizerKind::KCFI))
+ finalizeKCFITypes();
emitAtAvailableLinkGuard();
- if (Context.getTargetInfo().getTriple().isWasm() &&
- !Context.getTargetInfo().getTriple().isOSEmscripten()) {
+ if (Context.getTargetInfo().getTriple().isWasm())
EmitMainVoidAlias();
+
+ if (getTriple().isAMDGPU()) {
+ // Emit amdgpu_code_object_version module flag, which is code object version
+ // times 100.
+ if (getTarget().getTargetOpts().CodeObjectVersion !=
+ llvm::CodeObjectVersionKind::COV_None) {
+ getModule().addModuleFlag(llvm::Module::Error,
+ "amdgpu_code_object_version",
+ getTarget().getTargetOpts().CodeObjectVersion);
+ }
+
+ // Currently, "-mprintf-kind" option is only supported for HIP
+ if (LangOpts.HIP) {
+ auto *MDStr = llvm::MDString::get(
+ getLLVMContext(), (getTarget().getTargetOpts().AMDGPUPrintfKindVal ==
+ TargetOptions::AMDGPUPrintfKind::Hostcall)
+ ? "hostcall"
+ : "buffered");
+ getModule().addModuleFlag(llvm::Module::Error, "amdgpu_printf_kind",
+ MDStr);
+ }
}
- // Emit reference of __amdgpu_device_library_preserve_asan_functions to
- // preserve ASAN functions in bitcode libraries.
- if (LangOpts.Sanitize.has(SanitizerKind::Address) && getTriple().isAMDGPU()) {
- auto *FT = llvm::FunctionType::get(VoidTy, {});
- auto *F = llvm::Function::Create(
- FT, llvm::GlobalValue::ExternalLinkage,
- "__amdgpu_device_library_preserve_asan_functions", &getModule());
- auto *Var = new llvm::GlobalVariable(
- getModule(), FT->getPointerTo(),
- /*isConstant=*/true, llvm::GlobalValue::WeakAnyLinkage, F,
- "__amdgpu_device_library_preserve_asan_functions_ptr", nullptr,
- llvm::GlobalVariable::NotThreadLocal);
- addCompilerUsedGlobal(Var);
+ // Emit a global array containing all external kernels or device variables
+ // used by host functions and mark it as used for CUDA/HIP. This is necessary
+ // to get kernels or device variables in archives linked in even if these
+ // kernels or device variables are only used in host functions.
+ if (!Context.CUDAExternalDeviceDeclODRUsedByHost.empty()) {
+ SmallVector<llvm::Constant *, 8> UsedArray;
+ for (auto D : Context.CUDAExternalDeviceDeclODRUsedByHost) {
+ GlobalDecl GD;
+ if (auto *FD = dyn_cast<FunctionDecl>(D))
+ GD = GlobalDecl(FD, KernelReferenceKind::Kernel);
+ else
+ GD = GlobalDecl(D);
+ UsedArray.push_back(llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
+ GetAddrOfGlobal(GD), Int8PtrTy));
+ }
+
+ llvm::ArrayType *ATy = llvm::ArrayType::get(Int8PtrTy, UsedArray.size());
+
+ auto *GV = new llvm::GlobalVariable(
+ getModule(), ATy, false, llvm::GlobalValue::InternalLinkage,
+ llvm::ConstantArray::get(ATy, UsedArray), "__clang_gpu_used_external");
+ addCompilerUsedGlobal(GV);
}
emitLLVMUsed();
@@ -581,7 +960,7 @@ void CodeGenModule::Release() {
if (Context.getLangOpts().SemanticInterposition)
// Require various optimization to respect semantic interposition.
- getModule().setSemanticInterposition(1);
+ getModule().setSemanticInterposition(true);
if (CodeGenOpts.EmitCodeView) {
// Indicate that we want CodeView in the metadata.
@@ -601,6 +980,10 @@ void CodeGenModule::Release() {
// Function ID tables for EH Continuation Guard.
getModule().addModuleFlag(llvm::Module::Warning, "ehcontguard", 1);
}
+ if (Context.getLangOpts().Kernel) {
+ // Note if we are compiling with /kernel.
+ getModule().addModuleFlag(llvm::Module::Warning, "ms-kernel", 1);
+ }
if (CodeGenOpts.OptimizationLevel > 0 && CodeGenOpts.StrictVTablePointers) {
// We don't support LTO with 2 with different StrictVTablePointers
// FIXME: we could support it by stripping all the information introduced
@@ -631,6 +1014,36 @@ void CodeGenModule::Release() {
Context.getTypeSizeInChars(Context.getWideCharType()).getQuantity();
getModule().addModuleFlag(llvm::Module::Error, "wchar_size", WCharWidth);
+ if (getTriple().isOSzOS()) {
+ getModule().addModuleFlag(llvm::Module::Warning,
+ "zos_product_major_version",
+ uint32_t(CLANG_VERSION_MAJOR));
+ getModule().addModuleFlag(llvm::Module::Warning,
+ "zos_product_minor_version",
+ uint32_t(CLANG_VERSION_MINOR));
+ getModule().addModuleFlag(llvm::Module::Warning, "zos_product_patchlevel",
+ uint32_t(CLANG_VERSION_PATCHLEVEL));
+ std::string ProductId = getClangVendor() + "clang";
+ getModule().addModuleFlag(llvm::Module::Error, "zos_product_id",
+ llvm::MDString::get(VMContext, ProductId));
+
+ // Record the language because we need it for the PPA2.
+ StringRef lang_str = languageToString(
+ LangStandard::getLangStandardForKind(LangOpts.LangStd).Language);
+ getModule().addModuleFlag(llvm::Module::Error, "zos_cu_language",
+ llvm::MDString::get(VMContext, lang_str));
+
+ time_t TT = PreprocessorOpts.SourceDateEpoch
+ ? *PreprocessorOpts.SourceDateEpoch
+ : std::time(nullptr);
+ getModule().addModuleFlag(llvm::Module::Max, "zos_translation_time",
+ static_cast<uint64_t>(TT));
+
+ // Multiple modes will be supported here.
+ getModule().addModuleFlag(llvm::Module::Error, "zos_le_char_mode",
+ llvm::MDString::get(VMContext, "ascii"));
+ }
+
llvm::Triple::ArchType Arch = Context.getTargetInfo().getTriple().getArch();
if ( Arch == llvm::Triple::arm
|| Arch == llvm::Triple::armeb
@@ -667,36 +1080,83 @@ void CodeGenModule::Release() {
CodeGenOpts.SanitizeCfiCanonicalJumpTables);
}
+ if (LangOpts.Sanitize.has(SanitizerKind::KCFI)) {
+ getModule().addModuleFlag(llvm::Module::Override, "kcfi", 1);
+ // KCFI assumes patchable-function-prefix is the same for all indirectly
+ // called functions. Store the expected offset for code generation.
+ if (CodeGenOpts.PatchableFunctionEntryOffset)
+ getModule().addModuleFlag(llvm::Module::Override, "kcfi-offset",
+ CodeGenOpts.PatchableFunctionEntryOffset);
+ }
+
if (CodeGenOpts.CFProtectionReturn &&
Target.checkCFProtectionReturnSupported(getDiags())) {
// Indicate that we want to instrument return control flow protection.
- getModule().addModuleFlag(llvm::Module::Override, "cf-protection-return",
+ getModule().addModuleFlag(llvm::Module::Min, "cf-protection-return",
1);
}
if (CodeGenOpts.CFProtectionBranch &&
Target.checkCFProtectionBranchSupported(getDiags())) {
// Indicate that we want to instrument branch control flow protection.
- getModule().addModuleFlag(llvm::Module::Override, "cf-protection-branch",
+ getModule().addModuleFlag(llvm::Module::Min, "cf-protection-branch",
1);
}
- if (Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_32 ||
+ if (CodeGenOpts.FunctionReturnThunks)
+ getModule().addModuleFlag(llvm::Module::Override, "function_return_thunk_extern", 1);
+
+ if (CodeGenOpts.IndirectBranchCSPrefix)
+ getModule().addModuleFlag(llvm::Module::Override, "indirect_branch_cs_prefix", 1);
+
+ // Add module metadata for return address signing (ignoring
+ // non-leaf/all) and stack tagging. These are actually turned on by function
+ // attributes, but we use module metadata to emit build attributes. This is
+ // needed for LTO, where the function attributes are inside bitcode
+ // serialised into a global variable by the time build attributes are
+ // emitted, so we can't access them. LTO objects could be compiled with
+ // different flags therefore module flags are set to "Min" behavior to achieve
+ // the same end result of the normal build where e.g BTI is off if any object
+ // doesn't support it.
+ if (Context.getTargetInfo().hasFeature("ptrauth") &&
+ LangOpts.getSignReturnAddressScope() !=
+ LangOptions::SignReturnAddressScopeKind::None)
+ getModule().addModuleFlag(llvm::Module::Override,
+ "sign-return-address-buildattr", 1);
+ if (LangOpts.Sanitize.has(SanitizerKind::MemtagStack))
+ getModule().addModuleFlag(llvm::Module::Override,
+ "tag-stack-memory-buildattr", 1);
+
+ if (Arch == llvm::Triple::thumb || Arch == llvm::Triple::thumbeb ||
+ Arch == llvm::Triple::arm || Arch == llvm::Triple::armeb ||
+ Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_32 ||
Arch == llvm::Triple::aarch64_be) {
- getModule().addModuleFlag(llvm::Module::Error,
- "branch-target-enforcement",
- LangOpts.BranchTargetEnforcement);
+ if (LangOpts.BranchTargetEnforcement)
+ getModule().addModuleFlag(llvm::Module::Min, "branch-target-enforcement",
+ 1);
+ if (LangOpts.BranchProtectionPAuthLR)
+ getModule().addModuleFlag(llvm::Module::Min, "branch-protection-pauth-lr",
+ 1);
+ if (LangOpts.GuardedControlStack)
+ getModule().addModuleFlag(llvm::Module::Min, "guarded-control-stack", 1);
+ if (LangOpts.hasSignReturnAddress())
+ getModule().addModuleFlag(llvm::Module::Min, "sign-return-address", 1);
+ if (LangOpts.isSignReturnAddressScopeAll())
+ getModule().addModuleFlag(llvm::Module::Min, "sign-return-address-all",
+ 1);
+ if (!LangOpts.isSignReturnAddressWithAKey())
+ getModule().addModuleFlag(llvm::Module::Min,
+ "sign-return-address-with-bkey", 1);
+ }
- getModule().addModuleFlag(llvm::Module::Error, "sign-return-address",
- LangOpts.hasSignReturnAddress());
+ if (CodeGenOpts.StackClashProtector)
+ getModule().addModuleFlag(
+ llvm::Module::Override, "probe-stack",
+ llvm::MDString::get(TheModule.getContext(), "inline-asm"));
- getModule().addModuleFlag(llvm::Module::Error, "sign-return-address-all",
- LangOpts.isSignReturnAddressScopeAll());
-
- getModule().addModuleFlag(llvm::Module::Error,
- "sign-return-address-with-bkey",
- !LangOpts.isSignReturnAddressWithAKey());
- }
+ if (CodeGenOpts.StackProbeSize && CodeGenOpts.StackProbeSize != 4096)
+ getModule().addModuleFlag(llvm::Module::Min, "stack-probe-size",
+ CodeGenOpts.StackProbeSize);
if (!CodeGenOpts.MemoryProfileOutput.empty()) {
llvm::LLVMContext &Ctx = TheModule.getContext();
@@ -720,19 +1180,20 @@ void CodeGenModule::Release() {
// Indicate whether this Module was compiled with -fopenmp
if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd)
getModule().addModuleFlag(llvm::Module::Max, "openmp", LangOpts.OpenMP);
- if (getLangOpts().OpenMPIsDevice)
+ if (getLangOpts().OpenMPIsTargetDevice)
getModule().addModuleFlag(llvm::Module::Max, "openmp-device",
LangOpts.OpenMP);
// Emit OpenCL specific module metadata: OpenCL/SPIR version.
- if (LangOpts.OpenCL) {
+ if (LangOpts.OpenCL || (LangOpts.CUDAIsDevice && getTriple().isSPIRV())) {
EmitOpenCLMetadata();
// Emit SPIR version.
if (getTriple().isSPIR()) {
// SPIR v2.0 s2.12 - The SPIR version used by the module is stored in the
// opencl.spir.version named metadata.
- // C++ is backwards compatible with OpenCL v2.0.
- auto Version = LangOpts.OpenCLCPlusPlus ? 200 : LangOpts.OpenCLVersion;
+ // C++ for OpenCL has a distinct mapping for version compatibility with
+ // OpenCL.
+ auto Version = LangOpts.getOpenCLCompatibleVersion();
llvm::Metadata *SPIRVerElts[] = {
llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
Int32Ty, Version / 100)),
@@ -745,6 +1206,10 @@ void CodeGenModule::Release() {
}
}
+ // HLSL related end of code gen work items.
+ if (LangOpts.HLSL)
+ getHLSLRuntime().finishCodeGen();
+
if (uint32_t PLevel = Context.getLangOpts().PICLevel) {
assert(PLevel < 3 && "Invalid PIC Level");
getModule().setPICLevel(static_cast<llvm::PICLevel::Level>(PLevel));
@@ -763,13 +1228,25 @@ void CodeGenModule::Release() {
if (CM != ~0u) {
llvm::CodeModel::Model codeModel = static_cast<llvm::CodeModel::Model>(CM);
getModule().setCodeModel(codeModel);
+
+ if ((CM == llvm::CodeModel::Medium || CM == llvm::CodeModel::Large) &&
+ Context.getTargetInfo().getTriple().getArch() ==
+ llvm::Triple::x86_64) {
+ getModule().setLargeDataThreshold(getCodeGenOpts().LargeDataThreshold);
+ }
}
}
if (CodeGenOpts.NoPLT)
getModule().setRtLibUseGOT();
+ if (getTriple().isOSBinFormatELF() &&
+ CodeGenOpts.DirectAccessExternalData !=
+ getModule().getDirectAccessExternalData()) {
+ getModule().setDirectAccessExternalData(
+ CodeGenOpts.DirectAccessExternalData);
+ }
if (CodeGenOpts.UnwindTables)
- getModule().setUwtable();
+ getModule().setUwtable(llvm::UWTableKind(CodeGenOpts.UnwindTables));
switch (CodeGenOpts.getFramePointer()) {
case CodeGenOptions::FramePointerKind::None:
@@ -788,7 +1265,8 @@ void CodeGenModule::Release() {
if (getCodeGenOpts().EmitDeclMetadata)
EmitDeclMetadata();
- if (getCodeGenOpts().EmitGcovArcs || getCodeGenOpts().EmitGcovNotes)
+ if (getCodeGenOpts().CoverageNotesFile.size() ||
+ getCodeGenOpts().CoverageDataFile.size())
EmitCoverageFile();
if (CGDebugInfo *DI = getModuleDebugInfo())
@@ -805,16 +1283,32 @@ void CodeGenModule::Release() {
if (!getCodeGenOpts().StackProtectorGuardReg.empty())
getModule().setStackProtectorGuardReg(
getCodeGenOpts().StackProtectorGuardReg);
+ if (!getCodeGenOpts().StackProtectorGuardSymbol.empty())
+ getModule().setStackProtectorGuardSymbol(
+ getCodeGenOpts().StackProtectorGuardSymbol);
if (getCodeGenOpts().StackProtectorGuardOffset != INT_MAX)
getModule().setStackProtectorGuardOffset(
getCodeGenOpts().StackProtectorGuardOffset);
if (getCodeGenOpts().StackAlignment)
getModule().setOverrideStackAlignment(getCodeGenOpts().StackAlignment);
+ if (getCodeGenOpts().SkipRaxSetup)
+ getModule().addModuleFlag(llvm::Module::Override, "SkipRaxSetup", 1);
+ if (getLangOpts().RegCall4)
+ getModule().addModuleFlag(llvm::Module::Override, "RegCallv4", 1);
+
+ if (getContext().getTargetInfo().getMaxTLSAlign())
+ getModule().addModuleFlag(llvm::Module::Error, "MaxTLSAlign",
+ getContext().getTargetInfo().getMaxTLSAlign());
+
+ getTargetCodeGenInfo().emitTargetGlobals(*this);
getTargetCodeGenInfo().emitTargetMetadata(*this, MangledDeclNames);
EmitBackendOptionsMetadata(getCodeGenOpts());
+ // If there is device offloading code embed it in the host now.
+ EmbedObject(&getModule(), CodeGenOpts, getDiags());
+
// Set visibility from DLL storage class
// We do this at the end of LLVM IR generation; after any operation
// that might affect the DLL storage class or the visibility, and
@@ -825,9 +1319,8 @@ void CodeGenModule::Release() {
void CodeGenModule::EmitOpenCLMetadata() {
// SPIR v2.0 s2.13 - The OpenCL version used by the module is stored in the
// opencl.ocl.version named metadata node.
- // C++ is backwards compatible with OpenCL v2.0.
- // FIXME: We might need to add CXX version at some point too?
- auto Version = LangOpts.OpenCLCPlusPlus ? 200 : LangOpts.OpenCLVersion;
+ // C++ for OpenCL has a distinct mapping for versions compatibile with OpenCL.
+ auto Version = LangOpts.getOpenCLCompatibleVersion();
llvm::Metadata *OCLVerElts[] = {
llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
Int32Ty, Version / 100)),
@@ -840,15 +1333,10 @@ void CodeGenModule::EmitOpenCLMetadata() {
}
void CodeGenModule::EmitBackendOptionsMetadata(
- const CodeGenOptions CodeGenOpts) {
- switch (getTriple().getArch()) {
- default:
- break;
- case llvm::Triple::riscv32:
- case llvm::Triple::riscv64:
- getModule().addModuleFlag(llvm::Module::Error, "SmallDataLimit",
+ const CodeGenOptions &CodeGenOpts) {
+ if (getTriple().isRISCV()) {
+ getModule().addModuleFlag(llvm::Module::Min, "SmallDataLimit",
CodeGenOpts.SmallDataLimit);
- break;
}
}
@@ -977,8 +1465,6 @@ llvm::ConstantInt *CodeGenModule::getSize(CharUnits size) {
void CodeGenModule::setGlobalVisibility(llvm::GlobalValue *GV,
const NamedDecl *D) const {
- if (GV->hasDLLImportStorageClass())
- return;
// Internal definitions always have default visibility.
if (GV->hasLocalLinkage()) {
GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
@@ -986,9 +1472,39 @@ void CodeGenModule::setGlobalVisibility(llvm::GlobalValue *GV,
}
if (!D)
return;
+
// Set visibility for definitions, and for declarations if requested globally
// or set explicitly.
LinkageInfo LV = D->getLinkageAndVisibility();
+
+ // OpenMP declare target variables must be visible to the host so they can
+ // be registered. We require protected visibility unless the variable has
+ // the DT_nohost modifier and does not need to be registered.
+ if (Context.getLangOpts().OpenMP &&
+ Context.getLangOpts().OpenMPIsTargetDevice && isa<VarDecl>(D) &&
+ D->hasAttr<OMPDeclareTargetDeclAttr>() &&
+ D->getAttr<OMPDeclareTargetDeclAttr>()->getDevType() !=
+ OMPDeclareTargetDeclAttr::DT_NoHost &&
+ LV.getVisibility() == HiddenVisibility) {
+ GV->setVisibility(llvm::GlobalValue::ProtectedVisibility);
+ return;
+ }
+
+ if (GV->hasDLLExportStorageClass() || GV->hasDLLImportStorageClass()) {
+ // Reject incompatible dlllstorage and visibility annotations.
+ if (!LV.isVisibilityExplicit())
+ return;
+ if (GV->hasDLLExportStorageClass()) {
+ if (LV.getVisibility() == HiddenVisibility)
+ getDiags().Report(D->getLocation(),
+ diag::err_hidden_visibility_dllexport);
+ } else if (LV.getVisibility() != DefaultVisibility) {
+ getDiags().Report(D->getLocation(),
+ diag::err_non_default_visibility_dllimport);
+ }
+ return;
+ }
+
if (LV.isVisibilityExplicit() || getLangOpts().SetVisibilityForExternDecls ||
!GV->isDeclarationForLinker())
GV->setVisibility(GetLLVMVisibility(LV.getVisibility()));
@@ -1007,6 +1523,7 @@ static bool shouldAssumeDSOLocal(const CodeGenModule &CGM,
return false;
const llvm::Triple &TT = CGM.getTriple();
+ const auto &CGOpts = CGM.getCodeGenOpts();
if (TT.isWindowsGNUEnvironment()) {
// In MinGW, variables without DLLImport can still be automatically
// imported from a DLL by the linker; don't mark variables that
@@ -1017,7 +1534,8 @@ static bool shouldAssumeDSOLocal(const CodeGenModule &CGM,
// such variables can't be marked as DSO local. (Native TLS variables
// can't be dllimported at all, though.)
if (GV->isDeclarationForLinker() && isa<llvm::GlobalVariable>(GV) &&
- (!GV->isThreadLocal() || CGM.getCodeGenOpts().EmulatedTLS))
+ (!GV->isThreadLocal() || CGM.getCodeGenOpts().EmulatedTLS) &&
+ CGOpts.AutoImport)
return false;
}
@@ -1040,7 +1558,6 @@ static bool shouldAssumeDSOLocal(const CodeGenModule &CGM,
return false;
// If this is not an executable, don't assume anything is local.
- const auto &CGOpts = CGM.getCodeGenOpts();
llvm::Reloc::Model RM = CGOpts.RelocationModel;
const auto &LOpts = CGM.getLangOpts();
if (RM != llvm::Reloc::Static && !LOpts.PIE) {
@@ -1115,7 +1632,9 @@ void CodeGenModule::setDLLImportDLLExport(llvm::GlobalValue *GV,
if (D && D->isExternallyVisible()) {
if (D->hasAttr<DLLImportAttr>())
GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
- else if (D->hasAttr<DLLExportAttr>() && !GV->isDeclarationForLinker())
+ else if ((D->hasAttr<DLLExportAttr>() ||
+ shouldMapVisibilityToDLLExport(D)) &&
+ !GV->isDeclarationForLinker())
GV->setDLLStorageClass(llvm::GlobalVariable::DLLExportStorageClass);
}
}
@@ -1194,6 +1713,27 @@ static void AppendCPUSpecificCPUDispatchMangling(const CodeGenModule &CGM,
Out << ".resolver";
}
+static void AppendTargetVersionMangling(const CodeGenModule &CGM,
+ const TargetVersionAttr *Attr,
+ raw_ostream &Out) {
+ if (Attr->isDefaultVersion()) {
+ Out << ".default";
+ return;
+ }
+ Out << "._";
+ const TargetInfo &TI = CGM.getTarget();
+ llvm::SmallVector<StringRef, 8> Feats;
+ Attr->getFeatures(Feats);
+ llvm::stable_sort(Feats, [&TI](const StringRef FeatL, const StringRef FeatR) {
+ return TI.multiVersionSortPriority(FeatL) <
+ TI.multiVersionSortPriority(FeatR);
+ });
+ for (const auto &Feat : Feats) {
+ Out << 'M';
+ Out << Feat;
+ }
+}
+
static void AppendTargetMangling(const CodeGenModule &CGM,
const TargetAttr *Attr, raw_ostream &Out) {
if (Attr->isDefaultVersion())
@@ -1201,21 +1741,21 @@ static void AppendTargetMangling(const CodeGenModule &CGM,
Out << '.';
const TargetInfo &Target = CGM.getTarget();
- ParsedTargetAttr Info =
- Attr->parse([&Target](StringRef LHS, StringRef RHS) {
- // Multiversioning doesn't allow "no-${feature}", so we can
- // only have "+" prefixes here.
- assert(LHS.startswith("+") && RHS.startswith("+") &&
- "Features should always have a prefix.");
- return Target.multiVersionSortPriority(LHS.substr(1)) >
- Target.multiVersionSortPriority(RHS.substr(1));
- });
+ ParsedTargetAttr Info = Target.parseTargetAttr(Attr->getFeaturesStr());
+ llvm::sort(Info.Features, [&Target](StringRef LHS, StringRef RHS) {
+ // Multiversioning doesn't allow "no-${feature}", so we can
+ // only have "+" prefixes here.
+ assert(LHS.starts_with("+") && RHS.starts_with("+") &&
+ "Features should always have a prefix.");
+ return Target.multiVersionSortPriority(LHS.substr(1)) >
+ Target.multiVersionSortPriority(RHS.substr(1));
+ });
bool IsFirst = true;
- if (!Info.Architecture.empty()) {
+ if (!Info.CPU.empty()) {
IsFirst = false;
- Out << "arch_" << Info.Architecture;
+ Out << "arch_" << Info.CPU;
}
for (StringRef Feat : Info.Features) {
@@ -1235,6 +1775,41 @@ static bool isUniqueInternalLinkageDecl(GlobalDecl GD,
(CGM.getFunctionLinkage(GD) == llvm::GlobalValue::InternalLinkage);
}
+static void AppendTargetClonesMangling(const CodeGenModule &CGM,
+ const TargetClonesAttr *Attr,
+ unsigned VersionIndex,
+ raw_ostream &Out) {
+ const TargetInfo &TI = CGM.getTarget();
+ if (TI.getTriple().isAArch64()) {
+ StringRef FeatureStr = Attr->getFeatureStr(VersionIndex);
+ if (FeatureStr == "default") {
+ Out << ".default";
+ return;
+ }
+ Out << "._";
+ SmallVector<StringRef, 8> Features;
+ FeatureStr.split(Features, "+");
+ llvm::stable_sort(Features,
+ [&TI](const StringRef FeatL, const StringRef FeatR) {
+ return TI.multiVersionSortPriority(FeatL) <
+ TI.multiVersionSortPriority(FeatR);
+ });
+ for (auto &Feat : Features) {
+ Out << 'M';
+ Out << Feat;
+ }
+ } else {
+ Out << '.';
+ StringRef FeatureStr = Attr->getFeatureStr(VersionIndex);
+ if (FeatureStr.starts_with("arch="))
+ Out << "arch_" << FeatureStr.substr(sizeof("arch=") - 1);
+ else
+ Out << FeatureStr;
+
+ Out << '.' << Attr->getMangledIndex(VersionIndex);
+ }
+}
+
static std::string getMangledNameImpl(CodeGenModule &CGM, GlobalDecl GD,
const NamedDecl *ND,
bool OmitMultiVersionMangling = false) {
@@ -1253,7 +1828,10 @@ static std::string getMangledNameImpl(CodeGenModule &CGM, GlobalDecl GD,
if (FD &&
FD->getType()->castAs<FunctionType>()->getCallConv() == CC_X86RegCall) {
- Out << "__regcall3__" << II->getName();
+ if (CGM.getLangOpts().RegCall4)
+ Out << "__regcall4__" << II->getName();
+ else
+ Out << "__regcall3__" << II->getName();
} else if (FD && FD->hasAttr<CUDAGlobalAttr>() &&
GD.getKernelReferenceKind() == KernelReferenceKind::Stub) {
Out << "__device_stub__" << II->getName();
@@ -1288,21 +1866,30 @@ static std::string getMangledNameImpl(CodeGenModule &CGM, GlobalDecl GD,
case MultiVersionKind::Target:
AppendTargetMangling(CGM, FD->getAttr<TargetAttr>(), Out);
break;
+ case MultiVersionKind::TargetVersion:
+ AppendTargetVersionMangling(CGM, FD->getAttr<TargetVersionAttr>(), Out);
+ break;
+ case MultiVersionKind::TargetClones:
+ AppendTargetClonesMangling(CGM, FD->getAttr<TargetClonesAttr>(),
+ GD.getMultiVersionIndex(), Out);
+ break;
case MultiVersionKind::None:
llvm_unreachable("None multiversion type isn't valid here");
}
}
// Make unique name for device side static file-scope variable for HIP.
- if (CGM.getContext().shouldExternalizeStaticVar(ND) &&
+ if (CGM.getContext().shouldExternalize(ND) &&
CGM.getLangOpts().GPURelocatableDeviceCode &&
- CGM.getLangOpts().CUDAIsDevice && !CGM.getLangOpts().CUID.empty())
- CGM.printPostfixForExternalizedStaticVar(Out);
+ CGM.getLangOpts().CUDAIsDevice)
+ CGM.printPostfixForExternalizedDecl(Out, ND);
+
return std::string(Out.str());
}
void CodeGenModule::UpdateMultiVersionNames(GlobalDecl GD,
- const FunctionDecl *FD) {
+ const FunctionDecl *FD,
+ StringRef &CurName) {
if (!FD->isMultiVersion())
return;
@@ -1334,7 +1921,11 @@ void CodeGenModule::UpdateMultiVersionNames(GlobalDecl GD,
if (ExistingRecord != std::end(Manglings))
Manglings.remove(&(*ExistingRecord));
auto Result = Manglings.insert(std::make_pair(OtherName, OtherGD));
- MangledDeclNames[OtherGD.getCanonicalDecl()] = Result.first->first();
+ StringRef OtherNameRef = MangledDeclNames[OtherGD.getCanonicalDecl()] =
+ Result.first->first();
+ // If this is the current decl is being created, make sure we update the name.
+ if (GD.getCanonicalDecl() == OtherGD.getCanonicalDecl())
+ CurName = OtherNameRef;
if (llvm::GlobalValue *Entry = GetGlobalValue(NonTargetName))
Entry->setName(OtherName);
}
@@ -1359,8 +1950,7 @@ StringRef CodeGenModule::getMangledName(GlobalDecl GD) {
// static device variable depends on whether the variable is referenced by
// a host or device host function. Therefore the mangled name cannot be
// cached.
- if (!LangOpts.CUDAIsDevice ||
- !getContext().mayExternalizeStaticVar(GD.getDecl())) {
+ if (!LangOpts.CUDAIsDevice || !getContext().mayExternalize(GD.getDecl())) {
auto FoundName = MangledDeclNames.find(CanonicalGD);
if (FoundName != MangledDeclNames.end())
return FoundName->second;
@@ -1380,7 +1970,7 @@ StringRef CodeGenModule::getMangledName(GlobalDecl GD) {
// directly between host- and device-compilations, the host- and
// device-mangling in host compilation could help catching certain ones.
assert(!isa<FunctionDecl>(ND) || !ND->hasAttr<CUDAGlobalAttr>() ||
- getLangOpts().CUDAIsDevice ||
+ getContext().shouldExternalize(ND) || getLangOpts().CUDAIsDevice ||
(getContext().getAuxTargetInfo() &&
(getContext().getAuxTargetInfo()->getCXXABI() !=
getContext().getTargetInfo().getCXXABI())) ||
@@ -1415,6 +2005,16 @@ StringRef CodeGenModule::getBlockMangledName(GlobalDecl GD,
return Result.first->first();
}
+const GlobalDecl CodeGenModule::getMangledNameDecl(StringRef Name) {
+ auto it = MangledDeclNames.begin();
+ while (it != MangledDeclNames.end()) {
+ if (it->second == Name)
+ return it->first;
+ it++;
+ }
+ return GlobalDecl();
+}
+
llvm::GlobalValue *CodeGenModule::GetGlobalValue(StringRef Name) {
return getModule().getNamedValue(Name);
}
@@ -1422,9 +2022,10 @@ llvm::GlobalValue *CodeGenModule::GetGlobalValue(StringRef Name) {
/// AddGlobalCtor - Add a function to the list that will be called before
/// main() runs.
void CodeGenModule::AddGlobalCtor(llvm::Function *Ctor, int Priority,
+ unsigned LexOrder,
llvm::Constant *AssociatedData) {
// FIXME: Type coercion of void()* types.
- GlobalCtors.push_back(Structor(Priority, Ctor, AssociatedData));
+ GlobalCtors.push_back(Structor(Priority, LexOrder, Ctor, AssociatedData));
}
/// AddGlobalDtor - Add a function to the list that will be called
@@ -1438,7 +2039,7 @@ void CodeGenModule::AddGlobalDtor(llvm::Function *Dtor, int Priority,
}
// FIXME: Type coercion of void()* types.
- GlobalDtors.push_back(Structor(Priority, Dtor, nullptr));
+ GlobalDtors.push_back(Structor(Priority, ~0U, Dtor, nullptr));
}
void CodeGenModule::EmitCtorList(CtorList &Fns, const char *GlobalName) {
@@ -1459,9 +2060,9 @@ void CodeGenModule::EmitCtorList(CtorList &Fns, const char *GlobalName) {
for (const auto &I : Fns) {
auto ctor = ctors.beginStruct(CtorStructTy);
ctor.addInt(Int32Ty, I.Priority);
- ctor.add(llvm::ConstantExpr::getBitCast(I.Initializer, CtorPFTy));
+ ctor.add(I.Initializer);
if (I.AssociatedData)
- ctor.add(llvm::ConstantExpr::getBitCast(I.AssociatedData, VoidPtrTy));
+ ctor.add(I.AssociatedData);
else
ctor.addNullPointer(VoidPtrTy);
ctor.finishAndAddTo(ctors);
@@ -1474,7 +2075,7 @@ void CodeGenModule::EmitCtorList(CtorList &Fns, const char *GlobalName) {
// The LTO linker doesn't seem to like it when we set an alignment
// on appending variables. Take it off as a workaround.
- list->setAlignment(llvm::None);
+ list->setAlignment(std::nullopt);
Fns.clear();
}
@@ -1488,16 +2089,7 @@ CodeGenModule::getFunctionLinkage(GlobalDecl GD) {
if (const auto *Dtor = dyn_cast<CXXDestructorDecl>(D))
return getCXXABI().getCXXDestructorLinkage(Linkage, Dtor, GD.getDtorType());
- if (isa<CXXConstructorDecl>(D) &&
- cast<CXXConstructorDecl>(D)->isInheritingConstructor() &&
- Context.getTargetInfo().getCXXABI().isMicrosoft()) {
- // Our approach to inheriting constructors is fundamentally different from
- // that used by the MS ABI, so keep our inheriting constructor thunks
- // internal rather than trying to pick an unambiguous mangling for them.
- return llvm::GlobalValue::InternalLinkage;
- }
-
- return getLLVMLinkageForDeclarator(D, Linkage, /*IsConstantVariable=*/false);
+ return getLLVMLinkageForDeclarator(D, Linkage);
}
llvm::ConstantInt *CodeGenModule::CreateCrossDsoCfiTypeId(llvm::Metadata *MD) {
@@ -1507,6 +2099,24 @@ llvm::ConstantInt *CodeGenModule::CreateCrossDsoCfiTypeId(llvm::Metadata *MD) {
return llvm::ConstantInt::get(Int64Ty, llvm::MD5Hash(MDS->getString()));
}
+llvm::ConstantInt *CodeGenModule::CreateKCFITypeId(QualType T) {
+ if (auto *FnType = T->getAs<FunctionProtoType>())
+ T = getContext().getFunctionType(
+ FnType->getReturnType(), FnType->getParamTypes(),
+ FnType->getExtProtoInfo().withExceptionSpec(EST_None));
+
+ std::string OutName;
+ llvm::raw_string_ostream Out(OutName);
+ getCXXABI().getMangleContext().mangleCanonicalTypeName(
+ T, Out, getCodeGenOpts().SanitizeCfiICallNormalizeIntegers);
+
+ if (getCodeGenOpts().SanitizeCfiICallNormalizeIntegers)
+ Out << ".normalized";
+
+ return llvm::ConstantInt::get(Int32Ty,
+ static_cast<uint32_t>(llvm::xxHash64(OutName)));
+}
+
void CodeGenModule::SetLLVMFunctionAttributes(GlobalDecl GD,
const CGFunctionInfo &Info,
llvm::Function *F, bool IsThunk) {
@@ -1563,7 +2173,7 @@ static unsigned ArgInfoAddressSpace(LangAS AS) {
}
}
-void CodeGenModule::GenOpenCLArgMetadata(llvm::Function *Fn,
+void CodeGenModule::GenKernelArgMetadata(llvm::Function *Fn,
const FunctionDecl *FD,
CodeGenFunction *CGF) {
assert(((FD && CGF) || (!FD && !CGF)) &&
@@ -1595,13 +2205,18 @@ void CodeGenModule::GenOpenCLArgMetadata(llvm::Function *Fn,
if (FD && CGF)
for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) {
const ParmVarDecl *parm = FD->getParamDecl(i);
+ // Get argument name.
+ argNames.push_back(llvm::MDString::get(VMContext, parm->getName()));
+
+ if (!getLangOpts().OpenCL)
+ continue;
QualType ty = parm->getType();
std::string typeQuals;
// Get image and pipe access qualifier:
if (ty->isImageType() || ty->isPipeType()) {
const Decl *PDecl = parm;
- if (auto *TD = dyn_cast<TypedefType>(ty))
+ if (const auto *TD = ty->getAs<TypedefType>())
PDecl = TD->getDecl();
const OpenCLAccessAttr *A = PDecl->getAttr<OpenCLAccessAttr>();
if (A && A->isWriteOnly())
@@ -1613,9 +2228,6 @@ void CodeGenModule::GenOpenCLArgMetadata(llvm::Function *Fn,
} else
accessQuals.push_back(llvm::MDString::get(VMContext, "none"));
- // Get argument name.
- argNames.push_back(llvm::MDString::get(VMContext, parm->getName()));
-
auto getTypeSpelling = [&](QualType Ty) {
auto typeName = Ty.getUnqualifiedType().getAsString(Policy);
@@ -1688,17 +2300,20 @@ void CodeGenModule::GenOpenCLArgMetadata(llvm::Function *Fn,
argTypeQuals.push_back(llvm::MDString::get(VMContext, typeQuals));
}
- Fn->setMetadata("kernel_arg_addr_space",
- llvm::MDNode::get(VMContext, addressQuals));
- Fn->setMetadata("kernel_arg_access_qual",
- llvm::MDNode::get(VMContext, accessQuals));
- Fn->setMetadata("kernel_arg_type",
- llvm::MDNode::get(VMContext, argTypeNames));
- Fn->setMetadata("kernel_arg_base_type",
- llvm::MDNode::get(VMContext, argBaseTypeNames));
- Fn->setMetadata("kernel_arg_type_qual",
- llvm::MDNode::get(VMContext, argTypeQuals));
- if (getCodeGenOpts().EmitOpenCLArgMetadata)
+ if (getLangOpts().OpenCL) {
+ Fn->setMetadata("kernel_arg_addr_space",
+ llvm::MDNode::get(VMContext, addressQuals));
+ Fn->setMetadata("kernel_arg_access_qual",
+ llvm::MDNode::get(VMContext, accessQuals));
+ Fn->setMetadata("kernel_arg_type",
+ llvm::MDNode::get(VMContext, argTypeNames));
+ Fn->setMetadata("kernel_arg_base_type",
+ llvm::MDNode::get(VMContext, argBaseTypeNames));
+ Fn->setMetadata("kernel_arg_type_qual",
+ llvm::MDNode::get(VMContext, argTypeQuals));
+ }
+ if (getCodeGenOpts().EmitOpenCLArgMetadata ||
+ getCodeGenOpts().HIPSaveKernelArgName)
Fn->setMetadata("kernel_arg_name",
llvm::MDNode::get(VMContext, argNames));
}
@@ -1732,11 +2347,11 @@ static bool requiresMemberFunctionPointerTypeMetadata(CodeGenModule &CGM,
// Only functions whose address can be taken with a member function pointer
// need this sort of type metadata.
- return !MD->isStatic() && !MD->isVirtual() && !isa<CXXConstructorDecl>(MD) &&
- !isa<CXXDestructorDecl>(MD);
+ return MD->isImplicitObjectMemberFunction() && !MD->isVirtual() &&
+ !isa<CXXConstructorDecl, CXXDestructorDecl>(MD);
}
-std::vector<const CXXRecordDecl *>
+SmallVector<const CXXRecordDecl *, 0>
CodeGenModule::getMostBaseClasses(const CXXRecordDecl *RD) {
llvm::SetVector<const CXXRecordDecl *> MostBases;
@@ -1753,25 +2368,32 @@ CodeGenModule::getMostBaseClasses(const CXXRecordDecl *RD) {
void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
llvm::Function *F) {
- llvm::AttrBuilder B;
+ llvm::AttrBuilder B(F->getContext());
- if (CodeGenOpts.UnwindTables)
- B.addAttribute(llvm::Attribute::UWTable);
+ if ((!D || !D->hasAttr<NoUwtableAttr>()) && CodeGenOpts.UnwindTables)
+ B.addUWTableAttr(llvm::UWTableKind(CodeGenOpts.UnwindTables));
if (CodeGenOpts.StackClashProtector)
B.addAttribute("probe-stack", "inline-asm");
+ if (CodeGenOpts.StackProbeSize && CodeGenOpts.StackProbeSize != 4096)
+ B.addAttribute("stack-probe-size",
+ std::to_string(CodeGenOpts.StackProbeSize));
+
if (!hasUnwindExceptions(LangOpts))
B.addAttribute(llvm::Attribute::NoUnwind);
- if (!D || !D->hasAttr<NoStackProtectorAttr>()) {
- if (LangOpts.getStackProtector() == LangOptions::SSPOn)
- B.addAttribute(llvm::Attribute::StackProtect);
- else if (LangOpts.getStackProtector() == LangOptions::SSPStrong)
- B.addAttribute(llvm::Attribute::StackProtectStrong);
- else if (LangOpts.getStackProtector() == LangOptions::SSPReq)
- B.addAttribute(llvm::Attribute::StackProtectReq);
- }
+ if (D && D->hasAttr<NoStackProtectorAttr>())
+ ; // Do nothing.
+ else if (D && D->hasAttr<StrictGuardStackCheckAttr>() &&
+ isStackProtectorOn(LangOpts, getTriple(), LangOptions::SSPOn))
+ B.addAttribute(llvm::Attribute::StackProtectStrong);
+ else if (isStackProtectorOn(LangOpts, getTriple(), LangOptions::SSPOn))
+ B.addAttribute(llvm::Attribute::StackProtect);
+ else if (isStackProtectorOn(LangOpts, getTriple(), LangOptions::SSPStrong))
+ B.addAttribute(llvm::Attribute::StackProtectStrong);
+ else if (isStackProtectorOn(LangOpts, getTriple(), LangOptions::SSPReq))
+ B.addAttribute(llvm::Attribute::StackProtectReq);
if (!D) {
// If we don't have a declaration to control inlining, the function isn't
@@ -1781,10 +2403,22 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
CodeGenOpts.getInlining() == CodeGenOptions::OnlyAlwaysInlining)
B.addAttribute(llvm::Attribute::NoInline);
- F->addAttributes(llvm::AttributeList::FunctionIndex, B);
+ F->addFnAttrs(B);
return;
}
+ // Handle SME attributes that apply to function definitions,
+ // rather than to function prototypes.
+ if (D->hasAttr<ArmLocallyStreamingAttr>())
+ B.addAttribute("aarch64_pstate_sm_body");
+
+ if (auto *Attr = D->getAttr<ArmNewAttr>()) {
+ if (Attr->isNewZA())
+ B.addAttribute("aarch64_pstate_za_new");
+ if (Attr->isNewZT0())
+ B.addAttribute("aarch64_new_zt0");
+ }
+
// Track whether we need to add the optnone LLVM attribute,
// starting with the default for this optimization level.
bool ShouldAddOptNone =
@@ -1868,7 +2502,7 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
B.addAttribute(llvm::Attribute::MinSize);
}
- F->addAttributes(llvm::AttributeList::FunctionIndex, B);
+ F->addFnAttrs(B);
unsigned alignment = D->getMaxAlignment() / Context.getCharWidth();
if (alignment)
@@ -1883,8 +2517,8 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
// functions. If the current target's C++ ABI requires this and this is a
// member function, set its alignment accordingly.
if (getTarget().getCXXABI().areMemberFunctionsAligned()) {
- if (F->getAlignment() < 2 && isa<CXXMethodDecl>(D))
- F->setAlignment(llvm::Align(2));
+ if (isa<CXXMethodDecl>(D) && F->getPointerAlignment(getDataLayout()) < 2)
+ F->setAlignment(std::max(llvm::Align(2), F->getAlign().valueOrOne()));
}
// In the cross-dso CFI mode with canonical jump tables, we want !type
@@ -1913,18 +2547,9 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
}
}
-void CodeGenModule::setLLVMFunctionFEnvAttributes(const FunctionDecl *D,
- llvm::Function *F) {
- if (D->hasAttr<StrictFPAttr>()) {
- llvm::AttrBuilder FuncAttrs;
- FuncAttrs.addAttribute("strictfp");
- F->addAttributes(llvm::AttributeList::FunctionIndex, FuncAttrs);
- }
-}
-
void CodeGenModule::SetCommonAttributes(GlobalDecl GD, llvm::GlobalValue *GV) {
const Decl *D = GD.getDecl();
- if (dyn_cast_or_null<NamedDecl>(D))
+ if (isa_and_nonnull<NamedDecl>(D))
setGVProperties(GV, GD);
else
GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
@@ -1932,16 +2557,19 @@ void CodeGenModule::SetCommonAttributes(GlobalDecl GD, llvm::GlobalValue *GV) {
if (D && D->hasAttr<UsedAttr>())
addUsedOrCompilerUsedGlobal(GV);
- if (CodeGenOpts.KeepStaticConsts && D && isa<VarDecl>(D)) {
- const auto *VD = cast<VarDecl>(D);
- if (VD->getType().isConstQualified() &&
- VD->getStorageDuration() == SD_Static)
- addUsedOrCompilerUsedGlobal(GV);
- }
+ if (const auto *VD = dyn_cast_if_present<VarDecl>(D);
+ VD &&
+ ((CodeGenOpts.KeepPersistentStorageVariables &&
+ (VD->getStorageDuration() == SD_Static ||
+ VD->getStorageDuration() == SD_Thread)) ||
+ (CodeGenOpts.KeepStaticConsts && VD->getStorageDuration() == SD_Static &&
+ VD->getType().isConstQualified())))
+ addUsedOrCompilerUsedGlobal(GV);
}
bool CodeGenModule::GetCPUAndFeaturesAttributes(GlobalDecl GD,
- llvm::AttrBuilder &Attrs) {
+ llvm::AttrBuilder &Attrs,
+ bool SetTargetFeatures) {
// Add target-cpu and target-features attributes to functions. If
// we have a decl for the function and it has a target attribute then
// parse that and add it to the feature set.
@@ -1951,9 +2579,12 @@ bool CodeGenModule::GetCPUAndFeaturesAttributes(GlobalDecl GD,
const auto *FD = dyn_cast_or_null<FunctionDecl>(GD.getDecl());
FD = FD ? FD->getMostRecentDecl() : FD;
const auto *TD = FD ? FD->getAttr<TargetAttr>() : nullptr;
+ const auto *TV = FD ? FD->getAttr<TargetVersionAttr>() : nullptr;
+ assert((!TD || !TV) && "both target_version and target specified");
const auto *SD = FD ? FD->getAttr<CPUSpecificAttr>() : nullptr;
+ const auto *TC = FD ? FD->getAttr<TargetClonesAttr>() : nullptr;
bool AddedAttr = false;
- if (TD || SD) {
+ if (TD || TV || SD || TC) {
llvm::StringMap<bool> FeatureMap;
getContext().getFunctionFeatureMap(FeatureMap, GD);
@@ -1966,16 +2597,23 @@ bool CodeGenModule::GetCPUAndFeaturesAttributes(GlobalDecl GD,
// get and parse the target attribute so we can get the cpu for
// the function.
if (TD) {
- ParsedTargetAttr ParsedAttr = TD->parse();
- if (!ParsedAttr.Architecture.empty() &&
- getTarget().isValidCPUName(ParsedAttr.Architecture)) {
- TargetCPU = ParsedAttr.Architecture;
+ ParsedTargetAttr ParsedAttr =
+ Target.parseTargetAttr(TD->getFeaturesStr());
+ if (!ParsedAttr.CPU.empty() &&
+ getTarget().isValidCPUName(ParsedAttr.CPU)) {
+ TargetCPU = ParsedAttr.CPU;
TuneCPU = ""; // Clear the tune CPU.
}
if (!ParsedAttr.Tune.empty() &&
getTarget().isValidCPUName(ParsedAttr.Tune))
TuneCPU = ParsedAttr.Tune;
}
+
+ if (SD) {
+ // Apply the given CPU name as the 'tune-cpu' so that the optimizer can
+ // favor this processor.
+ TuneCPU = SD->getCPUName(GD.getMultiVersionIndex())->getName();
+ }
} else {
// Otherwise just add the existing target cpu and target features to the
// function.
@@ -1990,7 +2628,10 @@ bool CodeGenModule::GetCPUAndFeaturesAttributes(GlobalDecl GD,
Attrs.addAttribute("tune-cpu", TuneCPU);
AddedAttr = true;
}
- if (!Features.empty()) {
+ if (!Features.empty() && SetTargetFeatures) {
+ llvm::erase_if(Features, [&](const std::string& F) {
+ return getTarget().isReadOnlyFeature(F.substr(1));
+ });
llvm::sort(Features);
Attrs.addAttribute("target-features", llvm::join(Features, ","));
AddedAttr = true;
@@ -2025,17 +2666,17 @@ void CodeGenModule::setNonAliasAttributes(GlobalDecl GD,
if (!D->getAttr<SectionAttr>())
F->addFnAttr("implicit-section-name", SA->getName());
- llvm::AttrBuilder Attrs;
+ llvm::AttrBuilder Attrs(F->getContext());
if (GetCPUAndFeaturesAttributes(GD, Attrs)) {
// We know that GetCPUAndFeaturesAttributes will always have the
// newest set, since it has the newest possible FunctionDecl, so the
// new ones should replace the old.
- llvm::AttrBuilder RemoveAttrs;
+ llvm::AttributeMask RemoveAttrs;
RemoveAttrs.addAttribute("target-cpu");
RemoveAttrs.addAttribute("target-features");
RemoveAttrs.addAttribute("tune-cpu");
- F->removeAttributes(llvm::AttributeList::FunctionIndex, RemoveAttrs);
- F->addAttributes(llvm::AttributeList::FunctionIndex, Attrs);
+ F->removeFnAttrs(RemoveAttrs);
+ F->addFnAttrs(Attrs);
}
}
@@ -2092,6 +2733,54 @@ void CodeGenModule::CreateFunctionTypeMetadataForIcall(const FunctionDecl *FD,
F->addTypeMetadata(0, llvm::ConstantAsMetadata::get(CrossDsoTypeId));
}
+void CodeGenModule::setKCFIType(const FunctionDecl *FD, llvm::Function *F) {
+ llvm::LLVMContext &Ctx = F->getContext();
+ llvm::MDBuilder MDB(Ctx);
+ F->setMetadata(llvm::LLVMContext::MD_kcfi_type,
+ llvm::MDNode::get(
+ Ctx, MDB.createConstant(CreateKCFITypeId(FD->getType()))));
+}
+
+static bool allowKCFIIdentifier(StringRef Name) {
+ // KCFI type identifier constants are only necessary for external assembly
+ // functions, which means it's safe to skip unusual names. Subset of
+ // MCAsmInfo::isAcceptableChar() and MCAsmInfoXCOFF::isAcceptableChar().
+ return llvm::all_of(Name, [](const char &C) {
+ return llvm::isAlnum(C) || C == '_' || C == '.';
+ });
+}
+
+void CodeGenModule::finalizeKCFITypes() {
+ llvm::Module &M = getModule();
+ for (auto &F : M.functions()) {
+ // Remove KCFI type metadata from non-address-taken local functions.
+ bool AddressTaken = F.hasAddressTaken();
+ if (!AddressTaken && F.hasLocalLinkage())
+ F.eraseMetadata(llvm::LLVMContext::MD_kcfi_type);
+
+ // Generate a constant with the expected KCFI type identifier for all
+ // address-taken function declarations to support annotating indirectly
+ // called assembly functions.
+ if (!AddressTaken || !F.isDeclaration())
+ continue;
+
+ const llvm::ConstantInt *Type;
+ if (const llvm::MDNode *MD = F.getMetadata(llvm::LLVMContext::MD_kcfi_type))
+ Type = llvm::mdconst::extract<llvm::ConstantInt>(MD->getOperand(0));
+ else
+ continue;
+
+ StringRef Name = F.getName();
+ if (!allowKCFIIdentifier(Name))
+ continue;
+
+ std::string Asm = (".weak __kcfi_typeid_" + Name + "\n.set __kcfi_typeid_" +
+ Name + ", " + Twine(Type->getZExtValue()) + "\n")
+ .str();
+ M.appendModuleInlineAsm(Asm);
+ }
+}
+
void CodeGenModule::SetFunctionAttributes(GlobalDecl GD, llvm::Function *F,
bool IsIncompleteFunction,
bool IsThunk) {
@@ -2118,7 +2807,7 @@ void CodeGenModule::SetFunctionAttributes(GlobalDecl GD, llvm::Function *F,
F->arg_begin()->getType()
->canLosslesslyBitCastTo(F->getReturnType()) &&
"unexpected this return");
- F->addAttribute(1, llvm::Attribute::Returned);
+ F->addParamAttr(0, llvm::Attribute::Returned);
}
// Only a few attributes are set on declarations; these may later be
@@ -2136,6 +2825,13 @@ void CodeGenModule::SetFunctionAttributes(GlobalDecl GD, llvm::Function *F,
else if (const auto *SA = FD->getAttr<SectionAttr>())
F->setSection(SA->getName());
+ if (const auto *EA = FD->getAttr<ErrorAttr>()) {
+ if (EA->isError())
+ F->addFnAttr("dontcall-error", EA->getUserDiagnostic());
+ else if (EA->isWarning())
+ F->addFnAttr("dontcall-warn", EA->getUserDiagnostic());
+ }
+
// If we plan on emitting this inline builtin, we can't treat it as a builtin.
if (FD->isInlineBuiltinDeclaration()) {
const FunctionDecl *FDBody;
@@ -2144,15 +2840,13 @@ void CodeGenModule::SetFunctionAttributes(GlobalDecl GD, llvm::Function *F,
assert(HasBody && "Inline builtin declarations should always have an "
"available body!");
if (shouldEmitFunction(FDBody))
- F->addAttribute(llvm::AttributeList::FunctionIndex,
- llvm::Attribute::NoBuiltin);
+ F->addFnAttr(llvm::Attribute::NoBuiltin);
}
if (FD->isReplaceableGlobalAllocationFunction()) {
// A replaceable global allocation function does not act like a builtin by
// default, only if it is invoked by a new-expression or delete-expression.
- F->addAttribute(llvm::AttributeList::FunctionIndex,
- llvm::Attribute::NoBuiltin);
+ F->addFnAttr(llvm::Attribute::NoBuiltin);
}
if (isa<CXXConstructorDecl>(FD) || isa<CXXDestructorDecl>(FD))
@@ -2169,9 +2863,15 @@ void CodeGenModule::SetFunctionAttributes(GlobalDecl GD, llvm::Function *F,
!CodeGenOpts.SanitizeCfiCanonicalJumpTables)
CreateFunctionTypeMetadataForIcall(FD, F);
+ if (LangOpts.Sanitize.has(SanitizerKind::KCFI))
+ setKCFIType(FD, F);
+
if (getLangOpts().OpenMP && FD->hasAttr<OMPDeclareSimdDeclAttr>())
getOpenMPRuntime().emitDeclareSimdFunction(FD, F);
+ if (CodeGenOpts.InlineMaxStackSize != UINT_MAX)
+ F->addFnAttr("inline-max-stacksize", llvm::utostr(CodeGenOpts.InlineMaxStackSize));
+
if (const auto *CB = FD->getAttr<CallbackAttr>()) {
// Annotate the callback behavior as metadata:
// - The callback callee (as argument number).
@@ -2281,9 +2981,9 @@ static void addLinkOptionsPostorder(CodeGenModule &CGM, Module *Mod,
}
// Import this module's dependencies.
- for (unsigned I = Mod->Imports.size(); I > 0; --I) {
- if (Visited.insert(Mod->Imports[I - 1]).second)
- addLinkOptionsPostorder(CGM, Mod->Imports[I-1], Metadata, Visited);
+ for (Module *Import : llvm::reverse(Mod->Imports)) {
+ if (Visited.insert(Import).second)
+ addLinkOptionsPostorder(CGM, Import, Metadata, Visited);
}
// Add linker options to link against the libraries/frameworks
@@ -2296,13 +2996,12 @@ static void addLinkOptionsPostorder(CodeGenModule &CGM, Module *Mod,
if (Mod->UseExportAsModuleLinkName)
return;
- for (unsigned I = Mod->LinkLibraries.size(); I > 0; --I) {
+ for (const Module::LinkLibrary &LL : llvm::reverse(Mod->LinkLibraries)) {
// Link against a framework. Frameworks are currently Darwin only, so we
// don't to ask TargetCodeGenInfo for the spelling of the linker option.
- if (Mod->LinkLibraries[I-1].IsFramework) {
- llvm::Metadata *Args[2] = {
- llvm::MDString::get(Context, "-framework"),
- llvm::MDString::get(Context, Mod->LinkLibraries[I - 1].Library)};
+ if (LL.IsFramework) {
+ llvm::Metadata *Args[2] = {llvm::MDString::get(Context, "-framework"),
+ llvm::MDString::get(Context, LL.Library)};
Metadata.push_back(llvm::MDNode::get(Context, Args));
continue;
@@ -2312,19 +3011,51 @@ static void addLinkOptionsPostorder(CodeGenModule &CGM, Module *Mod,
if (IsELF) {
llvm::Metadata *Args[2] = {
llvm::MDString::get(Context, "lib"),
- llvm::MDString::get(Context, Mod->LinkLibraries[I - 1].Library),
+ llvm::MDString::get(Context, LL.Library),
};
Metadata.push_back(llvm::MDNode::get(Context, Args));
} else {
llvm::SmallString<24> Opt;
- CGM.getTargetCodeGenInfo().getDependentLibraryOption(
- Mod->LinkLibraries[I - 1].Library, Opt);
+ CGM.getTargetCodeGenInfo().getDependentLibraryOption(LL.Library, Opt);
auto *OptString = llvm::MDString::get(Context, Opt);
Metadata.push_back(llvm::MDNode::get(Context, OptString));
}
}
}
+void CodeGenModule::EmitModuleInitializers(clang::Module *Primary) {
+ assert(Primary->isNamedModuleUnit() &&
+ "We should only emit module initializers for named modules.");
+
+ // Emit the initializers in the order that sub-modules appear in the
+ // source, first Global Module Fragments, if present.
+ if (auto GMF = Primary->getGlobalModuleFragment()) {
+ for (Decl *D : getContext().getModuleInitializers(GMF)) {
+ if (isa<ImportDecl>(D))
+ continue;
+ assert(isa<VarDecl>(D) && "GMF initializer decl is not a var?");
+ EmitTopLevelDecl(D);
+ }
+ }
+ // Second any associated with the module, itself.
+ for (Decl *D : getContext().getModuleInitializers(Primary)) {
+ // Skip import decls, the inits for those are called explicitly.
+ if (isa<ImportDecl>(D))
+ continue;
+ EmitTopLevelDecl(D);
+ }
+ // Third any associated with the Privat eMOdule Fragment, if present.
+ if (auto PMF = Primary->getPrivateModuleFragment()) {
+ for (Decl *D : getContext().getModuleInitializers(PMF)) {
+ // Skip import decls, the inits for those are called explicitly.
+ if (isa<ImportDecl>(D))
+ continue;
+ assert(isa<VarDecl>(D) && "PMF initializer decl is not a var?");
+ EmitTopLevelDecl(D);
+ }
+ }
+}
+
void CodeGenModule::EmitModuleLinkOptions() {
// Collect the set of all of the modules we want to visit to emit link
// options, which is essentially the imported modules and all of their
@@ -2410,8 +3141,8 @@ void CodeGenModule::EmitDeferred() {
// Note we should not clear CUDADeviceVarODRUsedByHost since it is still
// needed for further handling.
if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice)
- for (const auto *V : getContext().CUDADeviceVarODRUsedByHost)
- DeferredDeclsToEmit.push_back(V);
+ llvm::append_range(DeferredDeclsToEmit,
+ getContext().CUDADeviceVarODRUsedByHost);
// Stop if we're out of both deferred vtables and deferred declarations.
if (DeferredDeclsToEmit.empty())
@@ -2484,6 +3215,13 @@ void CodeGenModule::EmitVTablesOpportunistically() {
}
void CodeGenModule::EmitGlobalAnnotations() {
+ for (const auto& [MangledName, VD] : DeferredAnnotations) {
+ llvm::GlobalValue *GV = GetGlobalValue(MangledName);
+ if (GV)
+ AddGlobalAnnotations(VD, GV);
+ }
+ DeferredAnnotations.clear();
+
if (Annotations.empty())
return;
@@ -2503,9 +3241,10 @@ llvm::Constant *CodeGenModule::EmitAnnotationString(StringRef Str) {
// Not found yet, create a new global.
llvm::Constant *s = llvm::ConstantDataArray::getString(getLLVMContext(), Str);
- auto *gv =
- new llvm::GlobalVariable(getModule(), s->getType(), true,
- llvm::GlobalValue::PrivateLinkage, s, ".str");
+ auto *gv = new llvm::GlobalVariable(
+ getModule(), s->getType(), true, llvm::GlobalValue::PrivateLinkage, s,
+ ".str", nullptr, llvm::GlobalValue::NotThreadLocal,
+ ConstGlobalsPtrTy->getAddressSpace());
gv->setSection(AnnotationSection);
gv->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
AStr = gv;
@@ -2531,7 +3270,7 @@ llvm::Constant *CodeGenModule::EmitAnnotationLineNo(SourceLocation L) {
llvm::Constant *CodeGenModule::EmitAnnotationArgs(const AnnotateAttr *Attr) {
ArrayRef<Expr *> Exprs = {Attr->args_begin(), Attr->args_size()};
if (Exprs.empty())
- return llvm::ConstantPointerNull::get(Int8PtrTy);
+ return llvm::ConstantPointerNull::get(ConstGlobalsPtrTy);
llvm::FoldingSetNodeID ID;
for (Expr *E : Exprs) {
@@ -2555,10 +3294,9 @@ llvm::Constant *CodeGenModule::EmitAnnotationArgs(const AnnotateAttr *Attr) {
".args");
GV->setSection(AnnotationSection);
GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
- auto *Bitcasted = llvm::ConstantExpr::getBitCast(GV, Int8PtrTy);
- Lookup = Bitcasted;
- return Bitcasted;
+ Lookup = GV;
+ return GV;
}
llvm::Constant *CodeGenModule::EmitAnnotateAttr(llvm::GlobalValue *GV,
@@ -2570,19 +3308,18 @@ llvm::Constant *CodeGenModule::EmitAnnotateAttr(llvm::GlobalValue *GV,
*LineNoCst = EmitAnnotationLineNo(L),
*Args = EmitAnnotationArgs(AA);
- llvm::Constant *ASZeroGV = GV;
- if (GV->getAddressSpace() != 0) {
- ASZeroGV = llvm::ConstantExpr::getAddrSpaceCast(
- GV, GV->getValueType()->getPointerTo(0));
+ llvm::Constant *GVInGlobalsAS = GV;
+ if (GV->getAddressSpace() !=
+ getDataLayout().getDefaultGlobalsAddressSpace()) {
+ GVInGlobalsAS = llvm::ConstantExpr::getAddrSpaceCast(
+ GV,
+ llvm::PointerType::get(
+ GV->getContext(), getDataLayout().getDefaultGlobalsAddressSpace()));
}
// Create the ConstantStruct for the global annotation.
llvm::Constant *Fields[] = {
- llvm::ConstantExpr::getBitCast(ASZeroGV, Int8PtrTy),
- llvm::ConstantExpr::getBitCast(AnnoGV, Int8PtrTy),
- llvm::ConstantExpr::getBitCast(UnitGV, Int8PtrTy),
- LineNoCst,
- Args,
+ GVInGlobalsAS, AnnoGV, UnitGV, LineNoCst, Args,
};
return llvm::ConstantStruct::getAnon(Fields);
}
@@ -2601,34 +3338,35 @@ bool CodeGenModule::isInNoSanitizeList(SanitizerMask Kind, llvm::Function *Fn,
// NoSanitize by function name.
if (NoSanitizeL.containsFunction(Kind, Fn->getName()))
return true;
- // NoSanitize by location.
+ // NoSanitize by location. Check "mainfile" prefix.
+ auto &SM = Context.getSourceManager();
+ FileEntryRef MainFile = *SM.getFileEntryRefForID(SM.getMainFileID());
+ if (NoSanitizeL.containsMainFile(Kind, MainFile.getName()))
+ return true;
+
+ // Check "src" prefix.
if (Loc.isValid())
return NoSanitizeL.containsLocation(Kind, Loc);
// If location is unknown, this may be a compiler-generated function. Assume
// it's located in the main file.
- auto &SM = Context.getSourceManager();
- if (const auto *MainFile = SM.getFileEntryForID(SM.getMainFileID())) {
- return NoSanitizeL.containsFile(Kind, MainFile->getName());
- }
- return false;
+ return NoSanitizeL.containsFile(Kind, MainFile.getName());
}
-bool CodeGenModule::isInNoSanitizeList(llvm::GlobalVariable *GV,
+bool CodeGenModule::isInNoSanitizeList(SanitizerMask Kind,
+ llvm::GlobalVariable *GV,
SourceLocation Loc, QualType Ty,
StringRef Category) const {
- // For now globals can be ignored only in ASan and KASan.
- const SanitizerMask EnabledAsanMask =
- LangOpts.Sanitize.Mask &
- (SanitizerKind::Address | SanitizerKind::KernelAddress |
- SanitizerKind::HWAddress | SanitizerKind::KernelHWAddress |
- SanitizerKind::MemTag);
- if (!EnabledAsanMask)
- return false;
const auto &NoSanitizeL = getContext().getNoSanitizeList();
- if (NoSanitizeL.containsGlobal(EnabledAsanMask, GV->getName(), Category))
+ if (NoSanitizeL.containsGlobal(Kind, GV->getName(), Category))
return true;
- if (NoSanitizeL.containsLocation(EnabledAsanMask, Loc, Category))
+ auto &SM = Context.getSourceManager();
+ if (NoSanitizeL.containsMainFile(
+ Kind, SM.getFileEntryRefForID(SM.getMainFileID())->getName(),
+ Category))
+ return true;
+ if (NoSanitizeL.containsLocation(Kind, Loc, Category))
return true;
+
// Check global type.
if (!Ty.isNull()) {
// Drill down the array types: if global variable of a fixed type is
@@ -2639,7 +3377,7 @@ bool CodeGenModule::isInNoSanitizeList(llvm::GlobalVariable *GV,
// Only record types (classes, structs etc.) are ignored.
if (Ty->isRecordType()) {
std::string TypeStr = Ty.getAsString(getContext().getPrintingPolicy());
- if (NoSanitizeL.containsType(EnabledAsanMask, TypeStr, Category))
+ if (NoSanitizeL.containsType(Kind, TypeStr, Category))
return true;
}
}
@@ -2672,32 +3410,44 @@ bool CodeGenModule::imbueXRayAttrs(llvm::Function *Fn, SourceLocation Loc,
return true;
}
-bool CodeGenModule::isProfileInstrExcluded(llvm::Function *Fn,
- SourceLocation Loc) const {
+ProfileList::ExclusionType
+CodeGenModule::isFunctionBlockedByProfileList(llvm::Function *Fn,
+ SourceLocation Loc) const {
const auto &ProfileList = getContext().getProfileList();
// If the profile list is empty, then instrument everything.
if (ProfileList.isEmpty())
- return false;
+ return ProfileList::Allow;
CodeGenOptions::ProfileInstrKind Kind = getCodeGenOpts().getProfileInstr();
// First, check the function name.
- Optional<bool> V = ProfileList.isFunctionExcluded(Fn->getName(), Kind);
- if (V.hasValue())
+ if (auto V = ProfileList.isFunctionExcluded(Fn->getName(), Kind))
return *V;
// Next, check the source location.
- if (Loc.isValid()) {
- Optional<bool> V = ProfileList.isLocationExcluded(Loc, Kind);
- if (V.hasValue())
+ if (Loc.isValid())
+ if (auto V = ProfileList.isLocationExcluded(Loc, Kind))
return *V;
- }
// If location is unknown, this may be a compiler-generated function. Assume
// it's located in the main file.
auto &SM = Context.getSourceManager();
- if (const auto *MainFile = SM.getFileEntryForID(SM.getMainFileID())) {
- Optional<bool> V = ProfileList.isFileExcluded(MainFile->getName(), Kind);
- if (V.hasValue())
+ if (auto MainFile = SM.getFileEntryRefForID(SM.getMainFileID()))
+ if (auto V = ProfileList.isFileExcluded(MainFile->getName(), Kind))
return *V;
+ return ProfileList.getDefault(Kind);
+}
+
+ProfileList::ExclusionType
+CodeGenModule::isFunctionBlockedFromProfileInstr(llvm::Function *Fn,
+ SourceLocation Loc) const {
+ auto V = isFunctionBlockedByProfileList(Fn, Loc);
+ if (V != ProfileList::Allow)
+ return V;
+
+ auto NumGroups = getCodeGenOpts().ProfileTotalFunctionGroups;
+ if (NumGroups > 1) {
+ auto Group = llvm::crc32(arrayRefFromStringRef(Fn->getName())) % NumGroups;
+ if (Group != getCodeGenOpts().ProfileSelectedFunctionGroup)
+ return ProfileList::Skip;
}
- return ProfileList.getDefault();
+ return ProfileList::Allow;
}
bool CodeGenModule::MustBeEmitted(const ValueDecl *Global) {
@@ -2705,12 +3455,14 @@ bool CodeGenModule::MustBeEmitted(const ValueDecl *Global) {
if (LangOpts.EmitAllDecls)
return true;
- if (CodeGenOpts.KeepStaticConsts) {
- const auto *VD = dyn_cast<VarDecl>(Global);
- if (VD && VD->getType().isConstQualified() &&
- VD->getStorageDuration() == SD_Static)
- return true;
- }
+ const auto *VD = dyn_cast<VarDecl>(Global);
+ if (VD &&
+ ((CodeGenOpts.KeepPersistentStorageVariables &&
+ (VD->getStorageDuration() == SD_Static ||
+ VD->getStorageDuration() == SD_Thread)) ||
+ (CodeGenOpts.KeepStaticConsts && VD->getStorageDuration() == SD_Static &&
+ VD->getType().isConstQualified())))
+ return true;
return getContext().DeclMustBeEmitted(Global);
}
@@ -2723,7 +3475,7 @@ bool CodeGenModule::MayBeEmittedEagerly(const ValueDecl *Global) {
// we have if the level of the declare target attribute is -1. Note that we
// check somewhere else if we should emit this at all.
if (LangOpts.OpenMP >= 50 && !LangOpts.OpenMPSimd) {
- llvm::Optional<OMPDeclareTargetDeclAttr *> ActiveAttr =
+ std::optional<OMPDeclareTargetDeclAttr *> ActiveAttr =
OMPDeclareTargetDeclAttr::getActiveAttr(Global);
if (!ActiveAttr || (*ActiveAttr)->getLevel() != (unsigned)-1)
return false;
@@ -2735,17 +3487,25 @@ bool CodeGenModule::MayBeEmittedEagerly(const ValueDecl *Global) {
// explicitly instantiated, so they should not be emitted eagerly.
return false;
}
- if (const auto *VD = dyn_cast<VarDecl>(Global))
+ if (const auto *VD = dyn_cast<VarDecl>(Global)) {
if (Context.getInlineVariableDefinitionKind(VD) ==
ASTContext::InlineVariableDefinitionKind::WeakUnknown)
// A definition of an inline constexpr static data member may change
// linkage later if it's redeclared outside the class.
return false;
+ if (CXX20ModuleInits && VD->getOwningModule() &&
+ !VD->getOwningModule()->isModuleMapModule()) {
+ // For CXX20, module-owned initializers need to be deferred, since it is
+ // not known at this point if they will be run for the current module or
+ // as part of the initializer for an imported one.
+ return false;
+ }
+ }
// If OpenMP is enabled and threadprivates must be generated like TLS, delay
// codegen for global variables, because they may be marked as threadprivate.
if (LangOpts.OpenMP && LangOpts.OpenMPUseTLS &&
getContext().getTargetInfo().isTLSSupported() && isa<VarDecl>(Global) &&
- !isTypeConstant(Global->getType(), false) &&
+ !Global->getType().isConstantStorage(getContext(), false, false) &&
!OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(Global))
return false;
@@ -2760,7 +3520,7 @@ ConstantAddress CodeGenModule::GetAddrOfMSGuidDecl(const MSGuidDecl *GD) {
// Look for an existing global.
if (llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name))
- return ConstantAddress(GV, Alignment);
+ return ConstantAddress(GV, GV->getValueType(), Alignment);
ConstantEmitter Emitter(*this);
llvm::Constant *Init;
@@ -2794,15 +3554,44 @@ ConstantAddress CodeGenModule::GetAddrOfMSGuidDecl(const MSGuidDecl *GD) {
GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
setDSOLocal(GV);
- llvm::Constant *Addr = GV;
if (!V.isAbsent()) {
Emitter.finalize(GV);
- } else {
- llvm::Type *Ty = getTypes().ConvertTypeForMem(GD->getType());
- Addr = llvm::ConstantExpr::getBitCast(
- GV, Ty->getPointerTo(GV->getAddressSpace()));
+ return ConstantAddress(GV, GV->getValueType(), Alignment);
}
- return ConstantAddress(Addr, Alignment);
+
+ llvm::Type *Ty = getTypes().ConvertTypeForMem(GD->getType());
+ return ConstantAddress(GV, Ty, Alignment);
+}
+
+ConstantAddress CodeGenModule::GetAddrOfUnnamedGlobalConstantDecl(
+ const UnnamedGlobalConstantDecl *GCD) {
+ CharUnits Alignment = getContext().getTypeAlignInChars(GCD->getType());
+
+ llvm::GlobalVariable **Entry = nullptr;
+ Entry = &UnnamedGlobalConstantDeclMap[GCD];
+ if (*Entry)
+ return ConstantAddress(*Entry, (*Entry)->getValueType(), Alignment);
+
+ ConstantEmitter Emitter(*this);
+ llvm::Constant *Init;
+
+ const APValue &V = GCD->getValue();
+
+ assert(!V.isAbsent());
+ Init = Emitter.emitForInitializer(V, GCD->getType().getAddressSpace(),
+ GCD->getType());
+
+ auto *GV = new llvm::GlobalVariable(getModule(), Init->getType(),
+ /*isConstant=*/true,
+ llvm::GlobalValue::PrivateLinkage, Init,
+ ".constant");
+ GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
+ GV->setAlignment(Alignment.getAsAlign());
+
+ Emitter.finalize(GV);
+
+ *Entry = GV;
+ return ConstantAddress(GV, GV->getValueType(), Alignment);
}
ConstantAddress CodeGenModule::GetAddrOfTemplateParamObject(
@@ -2811,7 +3600,7 @@ ConstantAddress CodeGenModule::GetAddrOfTemplateParamObject(
CharUnits Alignment = getNaturalTypeAlignment(TPO->getType());
if (llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name))
- return ConstantAddress(GV, Alignment);
+ return ConstantAddress(GV, GV->getValueType(), Alignment);
ConstantEmitter Emitter(*this);
llvm::Constant *Init = Emitter.emitForInitializer(
@@ -2822,14 +3611,18 @@ ConstantAddress CodeGenModule::GetAddrOfTemplateParamObject(
return ConstantAddress::invalid();
}
- auto *GV = new llvm::GlobalVariable(
- getModule(), Init->getType(),
- /*isConstant=*/true, llvm::GlobalValue::LinkOnceODRLinkage, Init, Name);
+ llvm::GlobalValue::LinkageTypes Linkage =
+ isExternallyVisible(TPO->getLinkageAndVisibility().getLinkage())
+ ? llvm::GlobalValue::LinkOnceODRLinkage
+ : llvm::GlobalValue::InternalLinkage;
+ auto *GV = new llvm::GlobalVariable(getModule(), Init->getType(),
+ /*isConstant=*/true, Linkage, Init, Name);
+ setGVProperties(GV, TPO);
if (supportsCOMDAT())
GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
Emitter.finalize(GV);
- return ConstantAddress(GV, Alignment);
+ return ConstantAddress(GV, GV->getValueType(), Alignment);
}
ConstantAddress CodeGenModule::GetWeakRefReference(const ValueDecl *VD) {
@@ -2841,11 +3634,8 @@ ConstantAddress CodeGenModule::GetWeakRefReference(const ValueDecl *VD) {
// See if there is already something with the target's name in the module.
llvm::GlobalValue *Entry = GetGlobalValue(AA->getAliasee());
- if (Entry) {
- unsigned AS = getContext().getTargetAddressSpace(VD->getType());
- auto Ptr = llvm::ConstantExpr::getBitCast(Entry, DeclTy->getPointerTo(AS));
- return ConstantAddress(Ptr, Alignment);
- }
+ if (Entry)
+ return ConstantAddress(Entry, DeclTy, Alignment);
llvm::Constant *Aliasee;
if (isa<llvm::FunctionType>(DeclTy))
@@ -2853,13 +3643,22 @@ ConstantAddress CodeGenModule::GetWeakRefReference(const ValueDecl *VD) {
GlobalDecl(cast<FunctionDecl>(VD)),
/*ForVTable=*/false);
else
- Aliasee = GetOrCreateLLVMGlobal(AA->getAliasee(), DeclTy, 0, nullptr);
+ Aliasee = GetOrCreateLLVMGlobal(AA->getAliasee(), DeclTy, LangAS::Default,
+ nullptr);
auto *F = cast<llvm::GlobalValue>(Aliasee);
F->setLinkage(llvm::Function::ExternalWeakLinkage);
WeakRefReferences.insert(F);
- return ConstantAddress(Aliasee, Alignment);
+ return ConstantAddress(Aliasee, DeclTy, Alignment);
+}
+
+template <typename AttrT> static bool hasImplicitAttr(const ValueDecl *D) {
+ if (!D)
+ return false;
+ if (auto *A = D->getAttr<AttrT>())
+ return A->isImplicit();
+ return D->isImplicit();
}
void CodeGenModule::EmitGlobal(GlobalDecl GD) {
@@ -2883,14 +3682,24 @@ void CodeGenModule::EmitGlobal(GlobalDecl GD) {
return emitCPUDispatchDefinition(GD);
// If this is CUDA, be selective about which declarations we emit.
+ // Non-constexpr non-lambda implicit host device functions are not emitted
+ // unless they are used on device side.
if (LangOpts.CUDA) {
if (LangOpts.CUDAIsDevice) {
- if (!Global->hasAttr<CUDADeviceAttr>() &&
+ const auto *FD = dyn_cast<FunctionDecl>(Global);
+ if ((!Global->hasAttr<CUDADeviceAttr>() ||
+ (LangOpts.OffloadImplicitHostDeviceTemplates && FD &&
+ hasImplicitAttr<CUDAHostAttr>(FD) &&
+ hasImplicitAttr<CUDADeviceAttr>(FD) && !FD->isConstexpr() &&
+ !isLambdaCallOperator(FD) &&
+ !getContext().CUDAImplicitHostDeviceFunUsedByDevice.count(FD))) &&
!Global->hasAttr<CUDAGlobalAttr>() &&
!Global->hasAttr<CUDAConstantAttr>() &&
!Global->hasAttr<CUDASharedAttr>() &&
!Global->getType()->isCUDADeviceBuiltinSurfaceType() &&
- !Global->getType()->isCUDADeviceBuiltinTextureType())
+ !Global->getType()->isCUDADeviceBuiltinTextureType() &&
+ !(LangOpts.HIPStdPar && isa<FunctionDecl>(Global) &&
+ !Global->hasAttr<CUDAHostAttr>()))
return;
} else {
// We need to emit host-side 'shadows' for all global
@@ -2916,7 +3725,8 @@ void CodeGenModule::EmitGlobal(GlobalDecl GD) {
if (MustBeEmitted(Global))
EmitOMPDeclareReduction(DRD);
return;
- } else if (auto *DMD = dyn_cast<OMPDeclareMapperDecl>(Global)) {
+ }
+ if (auto *DMD = dyn_cast<OMPDeclareMapperDecl>(Global)) {
if (MustBeEmitted(Global))
EmitOMPDeclareMapper(DMD);
return;
@@ -2925,6 +3735,14 @@ void CodeGenModule::EmitGlobal(GlobalDecl GD) {
// Ignore declarations, they will be emitted on their first use.
if (const auto *FD = dyn_cast<FunctionDecl>(Global)) {
+ // Update deferred annotations with the latest declaration if the function
+ // function was already used or defined.
+ if (FD->hasAttr<AnnotateAttr>()) {
+ StringRef MangledName = getMangledName(GD);
+ if (GetGlobalValue(MangledName))
+ DeferredAnnotations[MangledName] = FD;
+ }
+
// Forward declarations are emitted lazily on first use.
if (!FD->doesThisDeclarationHaveABody()) {
if (!FD->doesDeclarationForceExternallyVisibleDefinition())
@@ -2947,16 +3765,25 @@ void CodeGenModule::EmitGlobal(GlobalDecl GD) {
!Context.isMSStaticDataMemberInlineDefinition(VD)) {
if (LangOpts.OpenMP) {
// Emit declaration of the must-be-emitted declare target variable.
- if (llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
+ if (std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) {
+
+ // If this variable has external storage and doesn't require special
+ // link handling we defer to its canonical definition.
+ if (VD->hasExternalStorage() &&
+ Res != OMPDeclareTargetDeclAttr::MT_Link)
+ return;
+
bool UnifiedMemoryEnabled =
getOpenMPRuntime().hasRequiresUnifiedSharedMemory();
- if (*Res == OMPDeclareTargetDeclAttr::MT_To &&
+ if ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
+ *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
!UnifiedMemoryEnabled) {
(void)GetAddrOfGlobalVar(VD);
} else {
assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) ||
- (*Res == OMPDeclareTargetDeclAttr::MT_To &&
+ ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
+ *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
UnifiedMemoryEnabled)) &&
"Link clause or to clause with unified memory expected.");
(void)getOpenMPRuntime().getAddrOfDeclareTargetVar(VD);
@@ -2980,6 +3807,7 @@ void CodeGenModule::EmitGlobal(GlobalDecl GD) {
if (MustBeEmitted(Global) && MayBeEmittedEagerly(Global)) {
// Emit the definition if it can't be deferred.
EmitGlobalDefinition(GD);
+ addEmittedDeferredDecl(GD);
return;
}
@@ -3036,7 +3864,7 @@ namespace {
if (!BuiltinID || !BI.isLibFunction(BuiltinID))
return false;
StringRef BuiltinName = BI.getName(BuiltinID);
- if (BuiltinName.startswith("__builtin_") &&
+ if (BuiltinName.starts_with("__builtin_") &&
Name == BuiltinName.slice(strlen("__builtin_"), StringRef::npos)) {
return true;
}
@@ -3139,10 +3967,22 @@ CodeGenModule::isTriviallyRecursive(const FunctionDecl *FD) {
bool CodeGenModule::shouldEmitFunction(GlobalDecl GD) {
if (getFunctionLinkage(GD) != llvm::Function::AvailableExternallyLinkage)
return true;
+
const auto *F = cast<FunctionDecl>(GD.getDecl());
if (CodeGenOpts.OptimizationLevel == 0 && !F->hasAttr<AlwaysInlineAttr>())
return false;
+ // We don't import function bodies from other named module units since that
+ // behavior may break ABI compatibility of the current unit.
+ if (const Module *M = F->getOwningModule();
+ M && M->getTopLevelModule()->isNamedModule() &&
+ getContext().getCurrentNamedModule() != M->getTopLevelModule() &&
+ !F->hasAttr<AlwaysInlineAttr>())
+ return false;
+
+ if (F->hasAttr<NoInlineAttr>())
+ return false;
+
if (F->hasAttr<DLLImportAttr>() && !F->hasAttr<AlwaysInlineAttr>()) {
// Check whether it would be safe to inline this dllimport function.
DLLImportFunctionVisitor Visitor;
@@ -3163,6 +4003,11 @@ bool CodeGenModule::shouldEmitFunction(GlobalDecl GD) {
}
}
+ // Inline builtins declaration must be emitted. They often are fortified
+ // functions.
+ if (F->isInlineBuiltinDeclaration())
+ return true;
+
// PR9614. Avoid cases where the source code is lying to us. An available
// externally function should have an equivalent function somewhere else,
// but a function that calls itself through asm label/`__builtin_` trickery is
@@ -3183,7 +4028,15 @@ void CodeGenModule::EmitMultiVersionFunctionDefinition(GlobalDecl GD,
auto *Spec = FD->getAttr<CPUSpecificAttr>();
for (unsigned I = 0; I < Spec->cpus_size(); ++I)
EmitGlobalFunctionDefinition(GD.getWithMultiVersionIndex(I), nullptr);
- // Requires multiple emits.
+ } else if (FD->isTargetClonesMultiVersion()) {
+ auto *Clone = FD->getAttr<TargetClonesAttr>();
+ for (unsigned I = 0; I < Clone->featuresStrs_size(); ++I)
+ if (Clone->isFirstOfVersion(I))
+ EmitGlobalFunctionDefinition(GD.getWithMultiVersionIndex(I), nullptr);
+ // Ensure that the resolver function is also emitted.
+ GetOrCreateMultiVersionResolver(GD);
+ } else if (FD->hasAttr<TargetVersionAttr>()) {
+ GetOrCreateMultiVersionResolver(GD);
} else
EmitGlobalFunctionDefinition(GD, GV);
}
@@ -3243,64 +4096,151 @@ static unsigned
TargetMVPriority(const TargetInfo &TI,
const CodeGenFunction::MultiVersionResolverOption &RO) {
unsigned Priority = 0;
- for (StringRef Feat : RO.Conditions.Features)
+ unsigned NumFeatures = 0;
+ for (StringRef Feat : RO.Conditions.Features) {
Priority = std::max(Priority, TI.multiVersionSortPriority(Feat));
+ NumFeatures++;
+ }
if (!RO.Conditions.Architecture.empty())
Priority = std::max(
Priority, TI.multiVersionSortPriority(RO.Conditions.Architecture));
+
+ Priority += TI.multiVersionFeatureCost() * NumFeatures;
+
return Priority;
}
+// Multiversion functions should be at most 'WeakODRLinkage' so that a different
+// TU can forward declare the function without causing problems. Particularly
+// in the cases of CPUDispatch, this causes issues. This also makes sure we
+// work with internal linkage functions, so that the same function name can be
+// used with internal linkage in multiple TUs.
+llvm::GlobalValue::LinkageTypes getMultiversionLinkage(CodeGenModule &CGM,
+ GlobalDecl GD) {
+ const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
+ if (FD->getFormalLinkage() == Linkage::Internal)
+ return llvm::GlobalValue::InternalLinkage;
+ return llvm::GlobalValue::WeakODRLinkage;
+}
+
void CodeGenModule::emitMultiVersionFunctions() {
std::vector<GlobalDecl> MVFuncsToEmit;
MultiVersionFuncs.swap(MVFuncsToEmit);
for (GlobalDecl GD : MVFuncsToEmit) {
+ const auto *FD = cast<FunctionDecl>(GD.getDecl());
+ assert(FD && "Expected a FunctionDecl");
+
SmallVector<CodeGenFunction::MultiVersionResolverOption, 10> Options;
- const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
- getContext().forEachMultiversionedFunctionVersion(
- FD, [this, &GD, &Options](const FunctionDecl *CurFD) {
- GlobalDecl CurGD{
- (CurFD->isDefined() ? CurFD->getDefinition() : CurFD)};
- StringRef MangledName = getMangledName(CurGD);
- llvm::Constant *Func = GetGlobalValue(MangledName);
- if (!Func) {
- if (CurFD->isDefined()) {
- EmitGlobalFunctionDefinition(CurGD, nullptr);
- Func = GetGlobalValue(MangledName);
+ if (FD->isTargetMultiVersion()) {
+ getContext().forEachMultiversionedFunctionVersion(
+ FD, [this, &GD, &Options](const FunctionDecl *CurFD) {
+ GlobalDecl CurGD{
+ (CurFD->isDefined() ? CurFD->getDefinition() : CurFD)};
+ StringRef MangledName = getMangledName(CurGD);
+ llvm::Constant *Func = GetGlobalValue(MangledName);
+ if (!Func) {
+ if (CurFD->isDefined()) {
+ EmitGlobalFunctionDefinition(CurGD, nullptr);
+ Func = GetGlobalValue(MangledName);
+ } else {
+ const CGFunctionInfo &FI =
+ getTypes().arrangeGlobalDeclaration(GD);
+ llvm::FunctionType *Ty = getTypes().GetFunctionType(FI);
+ Func = GetAddrOfFunction(CurGD, Ty, /*ForVTable=*/false,
+ /*DontDefer=*/false, ForDefinition);
+ }
+ assert(Func && "This should have just been created");
+ }
+ if (CurFD->getMultiVersionKind() == MultiVersionKind::Target) {
+ const auto *TA = CurFD->getAttr<TargetAttr>();
+ llvm::SmallVector<StringRef, 8> Feats;
+ TA->getAddedFeatures(Feats);
+ Options.emplace_back(cast<llvm::Function>(Func),
+ TA->getArchitecture(), Feats);
} else {
- const CGFunctionInfo &FI =
- getTypes().arrangeGlobalDeclaration(GD);
- llvm::FunctionType *Ty = getTypes().GetFunctionType(FI);
- Func = GetAddrOfFunction(CurGD, Ty, /*ForVTable=*/false,
- /*DontDefer=*/false, ForDefinition);
+ const auto *TVA = CurFD->getAttr<TargetVersionAttr>();
+ llvm::SmallVector<StringRef, 8> Feats;
+ TVA->getFeatures(Feats);
+ Options.emplace_back(cast<llvm::Function>(Func),
+ /*Architecture*/ "", Feats);
}
- assert(Func && "This should have just been created");
+ });
+ } else if (FD->isTargetClonesMultiVersion()) {
+ const auto *TC = FD->getAttr<TargetClonesAttr>();
+ for (unsigned VersionIndex = 0; VersionIndex < TC->featuresStrs_size();
+ ++VersionIndex) {
+ if (!TC->isFirstOfVersion(VersionIndex))
+ continue;
+ GlobalDecl CurGD{(FD->isDefined() ? FD->getDefinition() : FD),
+ VersionIndex};
+ StringRef Version = TC->getFeatureStr(VersionIndex);
+ StringRef MangledName = getMangledName(CurGD);
+ llvm::Constant *Func = GetGlobalValue(MangledName);
+ if (!Func) {
+ if (FD->isDefined()) {
+ EmitGlobalFunctionDefinition(CurGD, nullptr);
+ Func = GetGlobalValue(MangledName);
+ } else {
+ const CGFunctionInfo &FI =
+ getTypes().arrangeGlobalDeclaration(CurGD);
+ llvm::FunctionType *Ty = getTypes().GetFunctionType(FI);
+ Func = GetAddrOfFunction(CurGD, Ty, /*ForVTable=*/false,
+ /*DontDefer=*/false, ForDefinition);
}
+ assert(Func && "This should have just been created");
+ }
- const auto *TA = CurFD->getAttr<TargetAttr>();
- llvm::SmallVector<StringRef, 8> Feats;
- TA->getAddedFeatures(Feats);
-
- Options.emplace_back(cast<llvm::Function>(Func),
- TA->getArchitecture(), Feats);
- });
+ StringRef Architecture;
+ llvm::SmallVector<StringRef, 1> Feature;
- llvm::Function *ResolverFunc;
- const TargetInfo &TI = getTarget();
+ if (getTarget().getTriple().isAArch64()) {
+ if (Version != "default") {
+ llvm::SmallVector<StringRef, 8> VerFeats;
+ Version.split(VerFeats, "+");
+ for (auto &CurFeat : VerFeats)
+ Feature.push_back(CurFeat.trim());
+ }
+ } else {
+ if (Version.starts_with("arch="))
+ Architecture = Version.drop_front(sizeof("arch=") - 1);
+ else if (Version != "default")
+ Feature.push_back(Version);
+ }
- if (TI.supportsIFunc() || FD->isTargetMultiVersion()) {
- ResolverFunc = cast<llvm::Function>(
- GetGlobalValue((getMangledName(GD) + ".resolver").str()));
- ResolverFunc->setLinkage(llvm::Function::WeakODRLinkage);
+ Options.emplace_back(cast<llvm::Function>(Func), Architecture, Feature);
+ }
} else {
- ResolverFunc = cast<llvm::Function>(GetGlobalValue(getMangledName(GD)));
+ assert(0 && "Expected a target or target_clones multiversion function");
+ continue;
}
- if (supportsCOMDAT())
+ llvm::Constant *ResolverConstant = GetOrCreateMultiVersionResolver(GD);
+ if (auto *IFunc = dyn_cast<llvm::GlobalIFunc>(ResolverConstant)) {
+ ResolverConstant = IFunc->getResolver();
+ if (FD->isTargetClonesMultiVersion()) {
+ const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD);
+ llvm::FunctionType *DeclTy = getTypes().GetFunctionType(FI);
+ std::string MangledName = getMangledNameImpl(
+ *this, GD, FD, /*OmitMultiVersionMangling=*/true);
+ // In prior versions of Clang, the mangling for ifuncs incorrectly
+ // included an .ifunc suffix. This alias is generated for backward
+ // compatibility. It is deprecated, and may be removed in the future.
+ auto *Alias = llvm::GlobalAlias::create(
+ DeclTy, 0, getMultiversionLinkage(*this, GD),
+ MangledName + ".ifunc", IFunc, &getModule());
+ SetCommonAttributes(FD, Alias);
+ }
+ }
+ llvm::Function *ResolverFunc = cast<llvm::Function>(ResolverConstant);
+
+ ResolverFunc->setLinkage(getMultiversionLinkage(*this, GD));
+
+ if (!ResolverFunc->hasLocalLinkage() && supportsCOMDAT())
ResolverFunc->setComdat(
getModule().getOrInsertComdat(ResolverFunc->getName()));
+ const TargetInfo &TI = getTarget();
llvm::stable_sort(
Options, [&TI](const CodeGenFunction::MultiVersionResolverOption &LHS,
const CodeGenFunction::MultiVersionResolverOption &RHS) {
@@ -3325,24 +4265,24 @@ void CodeGenModule::emitMultiVersionFunctions() {
void CodeGenModule::emitCPUDispatchDefinition(GlobalDecl GD) {
const auto *FD = cast<FunctionDecl>(GD.getDecl());
assert(FD && "Not a FunctionDecl?");
+ assert(FD->isCPUDispatchMultiVersion() && "Not a multiversion function?");
const auto *DD = FD->getAttr<CPUDispatchAttr>();
assert(DD && "Not a cpu_dispatch Function?");
- llvm::Type *DeclTy = getTypes().ConvertType(FD->getType());
- if (const auto *CXXFD = dyn_cast<CXXMethodDecl>(FD)) {
- const CGFunctionInfo &FInfo = getTypes().arrangeCXXMethodDeclaration(CXXFD);
- DeclTy = getTypes().GetFunctionType(FInfo);
- }
+ const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD);
+ llvm::FunctionType *DeclTy = getTypes().GetFunctionType(FI);
StringRef ResolverName = getMangledName(GD);
+ UpdateMultiVersionNames(GD, FD, ResolverName);
llvm::Type *ResolverType;
GlobalDecl ResolverGD;
- if (getTarget().supportsIFunc())
+ if (getTarget().supportsIFunc()) {
ResolverType = llvm::FunctionType::get(
llvm::PointerType::get(DeclTy,
- Context.getTargetAddressSpace(FD->getType())),
+ getTypes().getTargetAddressSpace(FD->getType())),
false);
+ }
else {
ResolverType = DeclTy;
ResolverGD = GD;
@@ -3350,7 +4290,7 @@ void CodeGenModule::emitCPUDispatchDefinition(GlobalDecl GD) {
auto *ResolverFunc = cast<llvm::Function>(GetOrCreateLLVMFunction(
ResolverName, ResolverType, ResolverGD, /*ForVTable=*/false));
- ResolverFunc->setLinkage(llvm::Function::WeakODRLinkage);
+ ResolverFunc->setLinkage(getMultiversionLinkage(*this, GD));
if (supportsCOMDAT())
ResolverFunc->setComdat(
getModule().getOrInsertComdat(ResolverFunc->getName()));
@@ -3386,10 +4326,9 @@ void CodeGenModule::emitCPUDispatchDefinition(GlobalDecl GD) {
Target.getCPUSpecificCPUDispatchFeatures(II->getName(), Features);
llvm::transform(Features, Features.begin(),
[](StringRef Str) { return Str.substr(1); });
- Features.erase(std::remove_if(
- Features.begin(), Features.end(), [&Target](StringRef Feat) {
- return !Target.validateCpuSupports(Feat);
- }), Features.end());
+ llvm::erase_if(Features, [&Target](StringRef Feat) {
+ return !Target.validateCpuSupports(Feat);
+ });
Options.emplace_back(cast<llvm::Function>(Func), StringRef{}, Features);
++Index;
}
@@ -3397,8 +4336,8 @@ void CodeGenModule::emitCPUDispatchDefinition(GlobalDecl GD) {
llvm::stable_sort(
Options, [](const CodeGenFunction::MultiVersionResolverOption &LHS,
const CodeGenFunction::MultiVersionResolverOption &RHS) {
- return CodeGenFunction::GetX86CpuSupportsMask(LHS.Conditions.Features) >
- CodeGenFunction::GetX86CpuSupportsMask(RHS.Conditions.Features);
+ return llvm::X86::getCpuSupportsMask(LHS.Conditions.Features) >
+ llvm::X86::getCpuSupportsMask(RHS.Conditions.Features);
});
// If the list contains multiple 'default' versions, such as when it contains
@@ -3406,8 +4345,9 @@ void CodeGenModule::emitCPUDispatchDefinition(GlobalDecl GD) {
// always run on at least a 'pentium'). We do this by deleting the 'least
// advanced' (read, lowest mangling letter).
while (Options.size() > 1 &&
- CodeGenFunction::GetX86CpuSupportsMask(
- (Options.end() - 2)->Conditions.Features) == 0) {
+ llvm::all_of(llvm::X86::getCpuSupportsMask(
+ (Options.end() - 2)->Conditions.Features),
+ [](auto X) { return X == 0; })) {
StringRef LHSName = (Options.end() - 2)->Function->getName();
StringRef RHSName = (Options.end() - 1)->Function->getName();
if (LHSName.compare(RHSName) < 0)
@@ -3420,16 +4360,27 @@ void CodeGenModule::emitCPUDispatchDefinition(GlobalDecl GD) {
CGF.EmitMultiVersionResolver(ResolverFunc, Options);
if (getTarget().supportsIFunc()) {
+ llvm::GlobalValue::LinkageTypes Linkage = getMultiversionLinkage(*this, GD);
+ auto *IFunc = cast<llvm::GlobalValue>(GetOrCreateMultiVersionResolver(GD));
+
+ // Fix up function declarations that were created for cpu_specific before
+ // cpu_dispatch was known
+ if (!isa<llvm::GlobalIFunc>(IFunc)) {
+ assert(cast<llvm::Function>(IFunc)->isDeclaration());
+ auto *GI = llvm::GlobalIFunc::create(DeclTy, 0, Linkage, "", ResolverFunc,
+ &getModule());
+ GI->takeName(IFunc);
+ IFunc->replaceAllUsesWith(GI);
+ IFunc->eraseFromParent();
+ IFunc = GI;
+ }
+
std::string AliasName = getMangledNameImpl(
*this, GD, FD, /*OmitMultiVersionMangling=*/true);
llvm::Constant *AliasFunc = GetGlobalValue(AliasName);
if (!AliasFunc) {
- auto *IFunc = cast<llvm::GlobalIFunc>(GetOrCreateLLVMFunction(
- AliasName, DeclTy, GD, /*ForVTable=*/false, /*DontDefer=*/true,
- /*IsThunk=*/false, llvm::AttributeList(), NotForDefinition));
- auto *GA = llvm::GlobalAlias::create(
- DeclTy, 0, getFunctionLinkage(GD), AliasName, IFunc, &getModule());
- GA->setLinkage(llvm::Function::WeakODRLinkage);
+ auto *GA = llvm::GlobalAlias::create(DeclTy, 0, Linkage, AliasName, IFunc,
+ &getModule());
SetCommonAttributes(GD, GA);
}
}
@@ -3437,39 +4388,48 @@ void CodeGenModule::emitCPUDispatchDefinition(GlobalDecl GD) {
/// If a dispatcher for the specified mangled name is not in the module, create
/// and return an llvm Function with the specified type.
-llvm::Constant *CodeGenModule::GetOrCreateMultiVersionResolver(
- GlobalDecl GD, llvm::Type *DeclTy, const FunctionDecl *FD) {
+llvm::Constant *CodeGenModule::GetOrCreateMultiVersionResolver(GlobalDecl GD) {
+ const auto *FD = cast<FunctionDecl>(GD.getDecl());
+ assert(FD && "Not a FunctionDecl?");
+
std::string MangledName =
getMangledNameImpl(*this, GD, FD, /*OmitMultiVersionMangling=*/true);
// Holds the name of the resolver, in ifunc mode this is the ifunc (which has
// a separate resolver).
std::string ResolverName = MangledName;
- if (getTarget().supportsIFunc())
- ResolverName += ".ifunc";
- else if (FD->isTargetMultiVersion())
+ if (getTarget().supportsIFunc()) {
+ if (!FD->isTargetClonesMultiVersion())
+ ResolverName += ".ifunc";
+ } else if (FD->isTargetMultiVersion()) {
ResolverName += ".resolver";
+ }
- // If this already exists, just return that one.
+ // If the resolver has already been created, just return it.
if (llvm::GlobalValue *ResolverGV = GetGlobalValue(ResolverName))
return ResolverGV;
- // Since this is the first time we've created this IFunc, make sure
- // that we put this multiversioned function into the list to be
- // replaced later if necessary (target multiversioning only).
- if (!FD->isCPUDispatchMultiVersion() && !FD->isCPUSpecificMultiVersion())
+ const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD);
+ llvm::FunctionType *DeclTy = getTypes().GetFunctionType(FI);
+
+ // The resolver needs to be created. For target and target_clones, defer
+ // creation until the end of the TU.
+ if (FD->isTargetMultiVersion() || FD->isTargetClonesMultiVersion())
MultiVersionFuncs.push_back(GD);
- if (getTarget().supportsIFunc()) {
+ // For cpu_specific, don't create an ifunc yet because we don't know if the
+ // cpu_dispatch will be emitted in this translation unit.
+ if (getTarget().supportsIFunc() && !FD->isCPUSpecificMultiVersion()) {
llvm::Type *ResolverType = llvm::FunctionType::get(
- llvm::PointerType::get(
- DeclTy, getContext().getTargetAddressSpace(FD->getType())),
+ llvm::PointerType::get(DeclTy,
+ getTypes().getTargetAddressSpace(FD->getType())),
false);
llvm::Constant *Resolver = GetOrCreateLLVMFunction(
MangledName + ".resolver", ResolverType, GlobalDecl{},
/*ForVTable=*/false);
- llvm::GlobalIFunc *GIF = llvm::GlobalIFunc::create(
- DeclTy, 0, llvm::Function::WeakODRLinkage, "", Resolver, &getModule());
+ llvm::GlobalIFunc *GIF =
+ llvm::GlobalIFunc::create(DeclTy, 0, getMultiversionLinkage(*this, GD),
+ "", Resolver, &getModule());
GIF->setName(ResolverName);
SetCommonAttributes(FD, GIF);
@@ -3501,7 +4461,7 @@ llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(
// the iFunc instead. Name Mangling will handle the rest of the changes.
if (const FunctionDecl *FD = cast_or_null<FunctionDecl>(D)) {
// For the device mark the function as one that should be emitted.
- if (getLangOpts().OpenMPIsDevice && OpenMPRuntime &&
+ if (getLangOpts().OpenMPIsTargetDevice && OpenMPRuntime &&
!OpenMPRuntime->markAsGlobalTarget(GD) && FD->isDefined() &&
!DontDefer && !IsForDefinition) {
if (const FunctionDecl *FDDef = FD->getDefinition()) {
@@ -3517,10 +4477,9 @@ llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(
}
if (FD->isMultiVersion()) {
- if (FD->hasAttr<TargetAttr>())
- UpdateMultiVersionNames(GD, FD);
+ UpdateMultiVersionNames(GD, FD, MangledName);
if (!IsForDefinition)
- return GetOrCreateMultiVersionResolver(GD, Ty, FD);
+ return GetOrCreateMultiVersionResolver(GD);
}
}
@@ -3534,7 +4493,8 @@ llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(
}
// Handle dropped DLL attributes.
- if (D && !D->hasAttr<DLLImportAttr>() && !D->hasAttr<DLLExportAttr>()) {
+ if (D && !D->hasAttr<DLLImportAttr>() && !D->hasAttr<DLLExportAttr>() &&
+ !shouldMapVisibilityToDLLExport(cast_or_null<NamedDecl>(D))) {
Entry->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
setDSOLocal(Entry);
}
@@ -3565,7 +4525,7 @@ llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(
// (If function is requested for a definition, we always need to create a new
// function, not just return a bitcast.)
if (!IsForDefinition)
- return llvm::ConstantExpr::getBitCast(Entry, Ty->getPointerTo());
+ return Entry;
}
// This function doesn't have a complete type (for example, the return
@@ -3585,6 +4545,11 @@ llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(
llvm::Function::Create(FTy, llvm::Function::ExternalLinkage,
Entry ? StringRef() : MangledName, &getModule());
+ // Store the declaration associated with this function so it is potentially
+ // updated by further declarations or definitions and emitted at the end.
+ if (D && D->hasAttr<AnnotateAttr>())
+ DeferredAnnotations[MangledName] = cast<ValueDecl>(D);
+
// If we already created a function with the same mangled name (but different
// type) before, take its name and add it to the list of functions to be
// replaced with F at the end of CodeGen.
@@ -3605,24 +4570,22 @@ llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(
Entry->removeDeadConstantUsers();
}
- llvm::Constant *BC = llvm::ConstantExpr::getBitCast(
- F, Entry->getValueType()->getPointerTo());
- addGlobalValReplacement(Entry, BC);
+ addGlobalValReplacement(Entry, F);
}
assert(F->getName() == MangledName && "name was uniqued!");
if (D)
SetFunctionAttributes(GD, F, IsIncompleteFunction, IsThunk);
- if (ExtraAttrs.hasAttributes(llvm::AttributeList::FunctionIndex)) {
- llvm::AttrBuilder B(ExtraAttrs, llvm::AttributeList::FunctionIndex);
- F->addAttributes(llvm::AttributeList::FunctionIndex, B);
+ if (ExtraAttrs.hasFnAttrs()) {
+ llvm::AttrBuilder B(F->getContext(), ExtraAttrs.getFnAttrs());
+ F->addFnAttrs(B);
}
if (!DontDefer) {
// All MSVC dtors other than the base dtor are linkonce_odr and delegate to
// each other bottoming out with the base dtor. Therefore we emit non-base
// dtors on usage, even if there is no dtor definition in the TU.
- if (D && isa<CXXDestructorDecl>(D) &&
+ if (isa_and_nonnull<CXXDestructorDecl>(D) &&
getCXXABI().useThunkForDtorVariant(cast<CXXDestructorDecl>(D),
GD.getDtorType()))
addDeferredDeclToEmit(GD);
@@ -3669,20 +4632,16 @@ llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(
return F;
}
- llvm::Type *PTy = llvm::PointerType::getUnqual(Ty);
- return llvm::ConstantExpr::getBitCast(F, PTy);
+ return F;
}
/// GetAddrOfFunction - Return the address of the given function. If Ty is
/// non-null, then this function will use the specified type if it has to
/// create it (this occurs when we see a definition of the function).
-llvm::Constant *CodeGenModule::GetAddrOfFunction(GlobalDecl GD,
- llvm::Type *Ty,
- bool ForVTable,
- bool DontDefer,
- ForDefinition_t IsForDefinition) {
- assert(!cast<FunctionDecl>(GD.getDecl())->isConsteval() &&
- "consteval function should never be emitted");
+llvm::Constant *
+CodeGenModule::GetAddrOfFunction(GlobalDecl GD, llvm::Type *Ty, bool ForVTable,
+ bool DontDefer,
+ ForDefinition_t IsForDefinition) {
// If there was no specific requested type, just convert it now.
if (!Ty) {
const auto *FD = cast<FunctionDecl>(GD.getDecl());
@@ -3710,11 +4669,18 @@ llvm::Constant *CodeGenModule::GetAddrOfFunction(GlobalDecl GD,
cast<llvm::Function>(F->stripPointerCasts()), GD);
if (IsForDefinition)
return F;
- return llvm::ConstantExpr::getBitCast(Handle, Ty->getPointerTo());
+ return Handle;
}
return F;
}
+llvm::Constant *CodeGenModule::GetFunctionStart(const ValueDecl *Decl) {
+ llvm::GlobalValue *F =
+ cast<llvm::GlobalValue>(GetAddrOfFunction(Decl)->stripPointerCasts());
+
+ return llvm::NoCFIValue::get(F);
+}
+
static const FunctionDecl *
GetRuntimeFunctionDecl(ASTContext &C, StringRef Name) {
TranslationUnitDecl *TUDecl = C.getTranslationUnitDecl();
@@ -3761,8 +4727,7 @@ CodeGenModule::CreateRuntimeFunction(llvm::FunctionType *FTy, StringRef Name,
bool AssumeConvergent) {
if (AssumeConvergent) {
ExtraAttrs =
- ExtraAttrs.addAttribute(VMContext, llvm::AttributeList::FunctionIndex,
- llvm::Attribute::Convergent);
+ ExtraAttrs.addFnAttribute(VMContext, llvm::Attribute::Convergent);
}
llvm::Constant *C =
@@ -3794,26 +4759,6 @@ CodeGenModule::CreateRuntimeFunction(llvm::FunctionType *FTy, StringRef Name,
return {FTy, C};
}
-/// isTypeConstant - Determine whether an object of this type can be emitted
-/// as a constant.
-///
-/// If ExcludeCtor is true, the duration when the object's constructor runs
-/// will not be considered. The caller will need to verify that the object is
-/// not written to during its construction.
-bool CodeGenModule::isTypeConstant(QualType Ty, bool ExcludeCtor) {
- if (!Ty.isConstant(Context) && !Ty->isReferenceType())
- return false;
-
- if (Context.getLangOpts().CPlusPlus) {
- if (const CXXRecordDecl *Record
- = Context.getBaseElementType(Ty)->getAsCXXRecordDecl())
- return ExcludeCtor && !Record->hasMutableFields() &&
- Record->hasTrivialDestructor();
- }
-
- return true;
-}
-
/// GetOrCreateLLVMGlobal - If the specified mangled name is not in the module,
/// create and return an llvm GlobalVariable with the specified type and address
/// space. If there is something in the module with the specified name, return
@@ -3827,10 +4772,11 @@ bool CodeGenModule::isTypeConstant(QualType Ty, bool ExcludeCtor) {
/// mangled name but some other type.
llvm::Constant *
CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName, llvm::Type *Ty,
- unsigned AddrSpace, const VarDecl *D,
+ LangAS AddrSpace, const VarDecl *D,
ForDefinition_t IsForDefinition) {
// Lookup the entry, lazily creating it if necessary.
llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
+ unsigned TargetAS = getContext().getTargetAddressSpace(AddrSpace);
if (Entry) {
if (WeakRefReferences.erase(Entry)) {
if (D && !D->hasAttr<WeakAttr>())
@@ -3838,13 +4784,14 @@ CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName, llvm::Type *Ty,
}
// Handle dropped DLL attributes.
- if (D && !D->hasAttr<DLLImportAttr>() && !D->hasAttr<DLLExportAttr>())
+ if (D && !D->hasAttr<DLLImportAttr>() && !D->hasAttr<DLLExportAttr>() &&
+ !shouldMapVisibilityToDLLExport(D))
Entry->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
if (LangOpts.OpenMP && !LangOpts.OpenMPSimd && D)
getOpenMPRuntime().registerTargetGlobalVariable(D, Entry);
- if (Entry->getValueType() == Ty && Entry->getAddressSpace() == AddrSpace)
+ if (Entry->getValueType() == Ty && Entry->getAddressSpace() == TargetAS)
return Entry;
// If there are two attempts to define the same mangled name, issue an
@@ -3868,24 +4815,22 @@ CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName, llvm::Type *Ty,
}
// Make sure the result is of the correct type.
- if (Entry->getType()->getAddressSpace() != AddrSpace) {
- return llvm::ConstantExpr::getAddrSpaceCast(Entry,
- Ty->getPointerTo(AddrSpace));
- }
+ if (Entry->getType()->getAddressSpace() != TargetAS)
+ return llvm::ConstantExpr::getAddrSpaceCast(
+ Entry, llvm::PointerType::get(Ty->getContext(), TargetAS));
// (If global is requested for a definition, we always need to create a new
// global, not just return a bitcast.)
if (!IsForDefinition)
- return llvm::ConstantExpr::getBitCast(Entry, Ty->getPointerTo(AddrSpace));
+ return Entry;
}
auto DAddrSpace = GetGlobalVarAddressSpace(D);
- auto TargetAddrSpace = getContext().getTargetAddressSpace(DAddrSpace);
auto *GV = new llvm::GlobalVariable(
getModule(), Ty, false, llvm::GlobalValue::ExternalLinkage, nullptr,
MangledName, nullptr, llvm::GlobalVariable::NotThreadLocal,
- TargetAddrSpace);
+ getContext().getTargetAddressSpace(DAddrSpace));
// If we already created a global with the same mangled name (but different
// type) before, take its name and remove it from its parent.
@@ -3893,9 +4838,7 @@ CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName, llvm::Type *Ty,
GV->takeName(Entry);
if (!Entry->use_empty()) {
- llvm::Constant *NewPtrForOldDecl =
- llvm::ConstantExpr::getBitCast(GV, Entry->getType());
- Entry->replaceAllUsesWith(NewPtrForOldDecl);
+ Entry->replaceAllUsesWith(GV);
}
Entry->eraseFromParent();
@@ -3919,7 +4862,7 @@ CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName, llvm::Type *Ty,
// FIXME: This code is overly simple and should be merged with other global
// handling.
- GV->setConstant(isTypeConstant(D->getType(), false));
+ GV->setConstant(D->getType().isConstantStorage(getContext(), false, false));
GV->setAlignment(getContext().getDeclAlign(D).getAsAlign());
@@ -3952,6 +4895,10 @@ CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName, llvm::Type *Ty,
isExternallyVisible(D->getLinkageAndVisibility().getLinkage()))
GV->setSection(".cp.rodata");
+ // Handle code model attribute
+ if (const auto *CMA = D->getAttr<CodeModelAttr>())
+ GV->setCodeModel(CMA->getModel());
+
// Check if we a have a const declaration with an initializer, we may be
// able to emit it as available_externally to expose it's value to the
// optimizer.
@@ -3996,7 +4943,8 @@ CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName, llvm::Type *Ty,
}
}
- if (GV->isDeclaration()) {
+ if (D &&
+ D->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly) {
getTargetCodeGenInfo().setTargetAttributes(D, GV, *this);
// External HIP managed variables needed to be recorded for transformation
// in both device and host compilations.
@@ -4005,13 +4953,17 @@ CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName, llvm::Type *Ty,
getCUDARuntime().handleVarRegistration(D, *GV);
}
+ if (D)
+ SanitizerMD->reportGlobal(GV, *D);
+
LangAS ExpectedAS =
D ? D->getType().getAddressSpace()
: (LangOpts.OpenCL ? LangAS::opencl_global : LangAS::Default);
- assert(getContext().getTargetAddressSpace(ExpectedAS) == AddrSpace);
+ assert(getContext().getTargetAddressSpace(ExpectedAS) == TargetAS);
if (DAddrSpace != ExpectedAS) {
return getTargetCodeGenInfo().performAddrSpaceCast(
- *this, GV, DAddrSpace, ExpectedAS, Ty->getPointerTo(AddrSpace));
+ *this, GV, DAddrSpace, ExpectedAS,
+ llvm::PointerType::get(getLLVMContext(), TargetAS));
}
return GV;
@@ -4045,7 +4997,7 @@ CodeGenModule::GetAddrOfGlobal(GlobalDecl GD, ForDefinition_t IsForDefinition) {
llvm::GlobalVariable *CodeGenModule::CreateOrReplaceCXXRuntimeVariable(
StringRef Name, llvm::Type *Ty, llvm::GlobalValue::LinkageTypes Linkage,
- unsigned Alignment) {
+ llvm::Align Alignment) {
llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name);
llvm::GlobalVariable *OldGV = nullptr;
@@ -4069,9 +5021,7 @@ llvm::GlobalVariable *CodeGenModule::CreateOrReplaceCXXRuntimeVariable(
GV->takeName(OldGV);
if (!OldGV->use_empty()) {
- llvm::Constant *NewPtrForOldDecl =
- llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
- OldGV->replaceAllUsesWith(NewPtrForOldDecl);
+ OldGV->replaceAllUsesWith(GV);
}
OldGV->eraseFromParent();
@@ -4081,7 +5031,7 @@ llvm::GlobalVariable *CodeGenModule::CreateOrReplaceCXXRuntimeVariable(
!GV->hasAvailableExternallyLinkage())
GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
- GV->setAlignment(llvm::MaybeAlign(Alignment));
+ GV->setAlignment(Alignment);
return GV;
}
@@ -4101,8 +5051,7 @@ llvm::Constant *CodeGenModule::GetAddrOfGlobalVar(const VarDecl *D,
Ty = getTypes().ConvertTypeForMem(ASTTy);
StringRef MangledName = getMangledName(D);
- return GetOrCreateLLVMGlobal(MangledName, Ty,
- getContext().getTargetAddressSpace(ASTTy), D,
+ return GetOrCreateLLVMGlobal(MangledName, Ty, ASTTy.getAddressSpace(), D,
IsForDefinition);
}
@@ -4111,10 +5060,8 @@ llvm::Constant *CodeGenModule::GetAddrOfGlobalVar(const VarDecl *D,
llvm::Constant *
CodeGenModule::CreateRuntimeVariable(llvm::Type *Ty,
StringRef Name) {
- auto AddrSpace =
- getContext().getLangOpts().OpenCL
- ? getContext().getTargetAddressSpace(LangAS::opencl_global)
- : 0;
+ LangAS AddrSpace = getContext().getLangOpts().OpenCL ? LangAS::opencl_global
+ : LangAS::Default;
auto *Ret = GetOrCreateLLVMGlobal(Name, Ty, AddrSpace, nullptr);
setDSOLocal(cast<llvm::GlobalValue>(Ret->stripPointerCasts()));
return Ret;
@@ -4153,16 +5100,15 @@ CharUnits CodeGenModule::GetTargetTypeStoreSize(llvm::Type *Ty) const {
}
LangAS CodeGenModule::GetGlobalVarAddressSpace(const VarDecl *D) {
- LangAS AddrSpace = LangAS::Default;
if (LangOpts.OpenCL) {
- AddrSpace = D ? D->getType().getAddressSpace() : LangAS::opencl_global;
- assert(AddrSpace == LangAS::opencl_global ||
- AddrSpace == LangAS::opencl_global_device ||
- AddrSpace == LangAS::opencl_global_host ||
- AddrSpace == LangAS::opencl_constant ||
- AddrSpace == LangAS::opencl_local ||
- AddrSpace >= LangAS::FirstTargetAddressSpace);
- return AddrSpace;
+ LangAS AS = D ? D->getType().getAddressSpace() : LangAS::opencl_global;
+ assert(AS == LangAS::opencl_global ||
+ AS == LangAS::opencl_global_device ||
+ AS == LangAS::opencl_global_host ||
+ AS == LangAS::opencl_constant ||
+ AS == LangAS::opencl_local ||
+ AS >= LangAS::FirstTargetAddressSpace);
+ return AS;
}
if (LangOpts.SYCLIsDevice &&
@@ -4170,16 +5116,17 @@ LangAS CodeGenModule::GetGlobalVarAddressSpace(const VarDecl *D) {
return LangAS::sycl_global;
if (LangOpts.CUDA && LangOpts.CUDAIsDevice) {
- if (D && D->hasAttr<CUDAConstantAttr>())
- return LangAS::cuda_constant;
- else if (D && D->hasAttr<CUDASharedAttr>())
- return LangAS::cuda_shared;
- else if (D && D->hasAttr<CUDADeviceAttr>())
- return LangAS::cuda_device;
- else if (D && D->getType().isConstQualified())
- return LangAS::cuda_constant;
- else
- return LangAS::cuda_device;
+ if (D) {
+ if (D->hasAttr<CUDAConstantAttr>())
+ return LangAS::cuda_constant;
+ if (D->hasAttr<CUDASharedAttr>())
+ return LangAS::cuda_shared;
+ if (D->hasAttr<CUDADeviceAttr>())
+ return LangAS::cuda_device;
+ if (D->getType().isConstQualified())
+ return LangAS::cuda_constant;
+ }
+ return LangAS::cuda_device;
}
if (LangOpts.OpenMP) {
@@ -4196,8 +5143,16 @@ LangAS CodeGenModule::GetGlobalConstantAddressSpace() const {
return LangAS::opencl_constant;
if (LangOpts.SYCLIsDevice)
return LangAS::sycl_global;
+ if (LangOpts.HIP && LangOpts.CUDAIsDevice && getTriple().isSPIRV())
+ // For HIPSPV map literals to cuda_device (maps to CrossWorkGroup in SPIR-V)
+ // instead of default AS (maps to Generic in SPIR-V). Otherwise, we end up
+ // with OpVariable instructions with Generic storage class which is not
+ // allowed (SPIR-V V1.6 s3.42.8). Also, mapping literals to SPIR-V
+ // UniformConstant storage class is not viable as pointers to it may not be
+ // casted to Generic pointers which are used to model HIP's "flat" pointers.
+ return LangAS::cuda_device;
if (auto AS = getTarget().getConstantAddressSpace())
- return AS.getValue();
+ return *AS;
return LangAS::Default;
}
@@ -4218,7 +5173,8 @@ castStringLiteralToDefaultAddressSpace(CodeGenModule &CGM,
if (AS != LangAS::Default)
Cast = CGM.getTargetCodeGenInfo().performAddrSpaceCast(
CGM, GV, AS, LangAS::Default,
- GV->getValueType()->getPointerTo(
+ llvm::PointerType::get(
+ CGM.getLLVMContext(),
CGM.getContext().getTargetAddressSpace(LangAS::Default)));
}
return Cast;
@@ -4236,7 +5192,7 @@ void CodeGenModule::MaybeHandleStaticInExternC(const SomeDecl *D,
return;
// Must have internal linkage and an ordinary name.
- if (!D->getIdentifier() || D->getFormalLinkage() != InternalLinkage)
+ if (!D->getIdentifier() || D->getFormalLinkage() != Linkage::Internal)
return;
// Must be in an extern "C" context. Entities declared directly within
@@ -4261,11 +5217,6 @@ static bool shouldBeInCOMDAT(CodeGenModule &CGM, const Decl &D) {
if (!CGM.supportsCOMDAT())
return false;
- // Do not set COMDAT attribute for CUDA/HIP stub functions to prevent
- // them being "merged" by the COMDAT Folding linker optimization.
- if (D.hasAttr<CUDAGlobalAttr>())
- return false;
-
if (D.hasAttr<SelectAnyAttr>())
return true;
@@ -4287,6 +5238,10 @@ static bool shouldBeInCOMDAT(CodeGenModule &CGM, const Decl &D) {
llvm_unreachable("No such linkage");
}
+bool CodeGenModule::supportsCOMDAT() const {
+ return getTriple().supportsCOMDAT();
+}
+
void CodeGenModule::maybeSetTrivialComdat(const Decl &D,
llvm::GlobalObject &GO) {
if (!shouldBeInCOMDAT(*this, D))
@@ -4305,19 +5260,25 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
// If this is OpenMP device, check if it is legal to emit this global
// normally.
- if (LangOpts.OpenMPIsDevice && OpenMPRuntime &&
+ if (LangOpts.OpenMPIsTargetDevice && OpenMPRuntime &&
OpenMPRuntime->emitTargetGlobalVariable(D))
return;
llvm::TrackingVH<llvm::Constant> Init;
bool NeedsGlobalCtor = false;
+ // Whether the definition of the variable is available externally.
+ // If yes, we shouldn't emit the GloablCtor and GlobalDtor for the variable
+ // since this is the job for its original source.
+ bool IsDefinitionAvailableExternally =
+ getContext().GetGVALinkageForVariable(D) == GVA_AvailableExternally;
bool NeedsGlobalDtor =
+ !IsDefinitionAvailableExternally &&
D->needsDestruction(getContext()) == QualType::DK_cxx_destructor;
const VarDecl *InitDecl;
const Expr *InitExpr = D->getAnyInitializer(InitDecl);
- Optional<ConstantEmitter> emitter;
+ std::optional<ConstantEmitter> emitter;
// CUDA E.2.4.1 "__shared__ variables cannot have an initialization
// as part of their declaration." Sema has already checked for
@@ -4362,8 +5323,12 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
T = D->getType();
if (getLangOpts().CPlusPlus) {
+ if (InitDecl->hasFlexibleArrayInit(getContext()))
+ ErrorUnsupported(D, "flexible array initializer");
Init = EmitNullConstant(T);
- NeedsGlobalCtor = true;
+
+ if (!IsDefinitionAvailableExternally)
+ NeedsGlobalCtor = true;
} else {
ErrorUnsupported(D, "static initializer");
Init = llvm::UndefValue::get(getTypes().ConvertType(T));
@@ -4375,6 +5340,14 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
// also don't need to register a destructor.
if (getLangOpts().CPlusPlus && !NeedsGlobalDtor)
DelayedCXXInitPosition.erase(D);
+
+#ifndef NDEBUG
+ CharUnits VarSize = getContext().getTypeSizeInChars(ASTTy) +
+ InitDecl->getFlexibleArrayInitChars(getContext());
+ CharUnits CstSize = CharUnits::fromQuantity(
+ getDataLayout().getTypeAllocSize(Init->getType()));
+ assert(VarSize == CstSize && "Emitted constant has unexpected size");
+#endif
}
}
@@ -4425,8 +5398,7 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
AddGlobalAnnotations(D, GV);
// Set the llvm linkage type as appropriate.
- llvm::GlobalValue::LinkageTypes Linkage =
- getLLVMLinkageVarDefinition(D, GV->isConstant());
+ llvm::GlobalValue::LinkageTypes Linkage = getLLVMLinkageVarDefinition(D);
// CUDA B.2.1 "The __device__ qualifier declares a variable that resides on
// the device. [...]"
@@ -4435,10 +5407,12 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
// Is accessible from all the threads within the grid and from the host
// through the runtime library (cudaGetSymbolAddress() / cudaGetSymbolSize()
// / cudaMemcpyToSymbol() / cudaMemcpyFromSymbol())."
- if (GV && LangOpts.CUDA) {
+ if (LangOpts.CUDA) {
if (LangOpts.CUDAIsDevice) {
if (Linkage != llvm::GlobalValue::InternalLinkage &&
- (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>()))
+ (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() ||
+ D->getType()->isCUDADeviceBuiltinSurfaceType() ||
+ D->getType()->isCUDADeviceBuiltinTextureType()))
GV->setExternallyInitialized(true);
} else {
getCUDARuntime().internalizeDeviceSideVar(D, Linkage);
@@ -4452,7 +5426,7 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
// If it is safe to mark the global 'constant', do so now.
GV->setConstant(!NeedsGlobalCtor && !NeedsGlobalDtor &&
- isTypeConstant(D->getType(), true));
+ D->getType().isConstantStorage(getContext(), true, true));
// If it is in a read-only section, mark it 'constant'.
if (const SectionAttr *SA = D->getAttr<SectionAttr>()) {
@@ -4461,7 +5435,12 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
GV->setConstant(true);
}
- GV->setAlignment(getContext().getDeclAlign(D).getAsAlign());
+ CharUnits AlignVal = getContext().getDeclAlign(D);
+ // Check for alignment specifed in an 'omp allocate' directive.
+ if (std::optional<CharUnits> AlignValFromAllocate =
+ getOMPAllocateAlignment(D))
+ AlignVal = *AlignValFromAllocate;
+ GV->setAlignment(AlignVal.getAsAlign());
// On Darwin, unlike other Itanium C++ ABI platforms, the thread-wrapper
// function is only defined alongside the variable, not also alongside
@@ -4516,7 +5495,7 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
if (NeedsGlobalCtor || NeedsGlobalDtor)
EmitCXXGlobalVarDeclInitFunc(D, GV, NeedsGlobalCtor);
- SanitizerMD->reportGlobalToASan(GV, *D, NeedsGlobalCtor);
+ SanitizerMD->reportGlobal(GV, *D, NeedsGlobalCtor);
// Emit global variable debug information.
if (CGDebugInfo *DI = getModuleDebugInfo())
@@ -4529,8 +5508,8 @@ void CodeGenModule::EmitExternalVarDeclaration(const VarDecl *D) {
if (getCodeGenOpts().hasReducedDebugInfo()) {
QualType ASTTy = D->getType();
llvm::Type *Ty = getTypes().ConvertTypeForMem(D->getType());
- llvm::Constant *GV = GetOrCreateLLVMGlobal(
- D->getName(), Ty, getContext().getTargetAddressSpace(ASTTy), D);
+ llvm::Constant *GV =
+ GetOrCreateLLVMGlobal(D->getName(), Ty, ASTTy.getAddressSpace(), D);
DI->EmitExternalVariable(
cast<llvm::GlobalVariable>(GV->stripPointerCasts()), D);
}
@@ -4612,17 +5591,14 @@ static bool isVarDeclStrongDefinition(const ASTContext &Context,
return false;
}
-llvm::GlobalValue::LinkageTypes CodeGenModule::getLLVMLinkageForDeclarator(
- const DeclaratorDecl *D, GVALinkage Linkage, bool IsConstantVariable) {
+llvm::GlobalValue::LinkageTypes
+CodeGenModule::getLLVMLinkageForDeclarator(const DeclaratorDecl *D,
+ GVALinkage Linkage) {
if (Linkage == GVA_Internal)
return llvm::Function::InternalLinkage;
- if (D->hasAttr<WeakAttr>()) {
- if (IsConstantVariable)
- return llvm::GlobalVariable::WeakODRLinkage;
- else
- return llvm::GlobalVariable::WeakAnyLinkage;
- }
+ if (D->hasAttr<WeakAttr>())
+ return llvm::GlobalVariable::WeakAnyLinkage;
if (const auto *FD = D->getAsFunction())
if (FD->isMultiVersion() && Linkage == GVA_AvailableExternally)
@@ -4687,10 +5663,10 @@ llvm::GlobalValue::LinkageTypes CodeGenModule::getLLVMLinkageForDeclarator(
return llvm::GlobalVariable::ExternalLinkage;
}
-llvm::GlobalValue::LinkageTypes CodeGenModule::getLLVMLinkageVarDefinition(
- const VarDecl *VD, bool IsConstant) {
+llvm::GlobalValue::LinkageTypes
+CodeGenModule::getLLVMLinkageVarDefinition(const VarDecl *VD) {
GVALinkage Linkage = getContext().GetGVALinkageForVariable(VD);
- return getLLVMLinkageForDeclarator(VD, Linkage, IsConstant);
+ return getLLVMLinkageForDeclarator(VD, Linkage);
}
/// Replace the uses of a function that was declared with a non-proto type.
@@ -4747,7 +5723,7 @@ static void replaceUsesOfNonProtoConstant(llvm::Constant *old,
}
// Add any parameter attributes.
- newArgAttrs.push_back(oldAttrs.getParamAttributes(argNo));
+ newArgAttrs.push_back(oldAttrs.getParamAttrs(argNo));
argNo++;
}
if (dontTransform)
@@ -4762,7 +5738,7 @@ static void replaceUsesOfNonProtoConstant(llvm::Constant *old,
callSite->getOperandBundlesAsDefs(newBundles);
llvm::CallBase *newCall;
- if (dyn_cast<llvm::CallInst>(callSite)) {
+ if (isa<llvm::CallInst>(callSite)) {
newCall =
llvm::CallInst::Create(newFn, newArgs, newBundles, "", callSite);
} else {
@@ -4775,9 +5751,9 @@ static void replaceUsesOfNonProtoConstant(llvm::Constant *old,
if (!newCall->getType()->isVoidTy())
newCall->takeName(callSite);
- newCall->setAttributes(llvm::AttributeList::get(
- newFn->getContext(), oldAttrs.getFnAttributes(),
- oldAttrs.getRetAttributes(), newArgAttrs));
+ newCall->setAttributes(
+ llvm::AttributeList::get(newFn->getContext(), oldAttrs.getFnAttrs(),
+ oldAttrs.getRetAttrs(), newArgAttrs));
newCall->setCallingConv(callSite->getCallingConv());
// Finally, remove the old call, replacing any uses with the new one.
@@ -4855,9 +5831,6 @@ void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD,
maybeSetTrivialComdat(*D, *Fn);
- // Set CodeGen attributes that represent floating point environment.
- setLLVMFunctionFEnvAttributes(D, Fn);
-
CodeGenFunction(*this).GenerateCode(GD, Fn, FI);
setNonAliasAttributes(GD, Fn);
@@ -4867,8 +5840,8 @@ void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD,
AddGlobalCtor(Fn, CA->getPriority());
if (const DestructorAttr *DA = D->getAttr<DestructorAttr>())
AddGlobalDtor(Fn, DA->getPriority(), true);
- if (D->hasAttr<AnnotateAttr>())
- AddGlobalAnnotations(D, Fn);
+ if (getLangOpts().OpenMP && D->hasAttr<OMPDeclareTargetDeclAttr>())
+ getOpenMPRuntime().emitDeclareTargetFunction(D, GV);
}
void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) {
@@ -4902,10 +5875,10 @@ void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) {
/*ForVTable=*/false);
LT = getFunctionLinkage(GD);
} else {
- Aliasee = GetOrCreateLLVMGlobal(AA->getAliasee(), DeclTy, 0,
+ Aliasee = GetOrCreateLLVMGlobal(AA->getAliasee(), DeclTy, LangAS::Default,
/*D=*/nullptr);
if (const auto *VD = dyn_cast<VarDecl>(GD.getDecl()))
- LT = getLLVMLinkageVarDefinition(VD, D->getType().isConstQualified());
+ LT = getLLVMLinkageVarDefinition(VD);
else
LT = getFunctionLinkage(GD);
}
@@ -4932,8 +5905,7 @@ void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) {
// Remove it and replace uses of it with the alias.
GA->takeName(Entry);
- Entry->replaceAllUsesWith(llvm::ConstantExpr::getBitCast(GA,
- Entry->getType()));
+ Entry->replaceAllUsesWith(GA);
Entry->eraseFromParent();
} else {
GA->setName(MangledName);
@@ -4952,6 +5924,11 @@ void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) {
setTLSMode(GA, *VD);
SetCommonAttributes(GD, GA);
+
+ // Emit global alias debug information.
+ if (isa<VarDecl>(D))
+ if (CGDebugInfo *DI = getModuleDebugInfo())
+ DI->EmitGlobalAlias(cast<llvm::GlobalValue>(GA->getAliasee()->stripPointerCasts()), GD);
}
void CodeGenModule::emitIFuncDefinition(GlobalDecl GD) {
@@ -4983,8 +5960,9 @@ void CodeGenModule::emitIFuncDefinition(GlobalDecl GD) {
Aliases.push_back(GD);
llvm::Type *DeclTy = getTypes().ConvertTypeForMem(D->getType());
+ llvm::Type *ResolverTy = llvm::GlobalIFunc::getResolverFunctionType(DeclTy);
llvm::Constant *Resolver =
- GetOrCreateLLVMFunction(IFA->getResolver(), DeclTy, GD,
+ GetOrCreateLLVMFunction(IFA->getResolver(), ResolverTy, {},
/*ForVTable=*/false);
llvm::GlobalIFunc *GIF =
llvm::GlobalIFunc::create(DeclTy, 0, llvm::Function::ExternalLinkage,
@@ -5005,12 +5983,13 @@ void CodeGenModule::emitIFuncDefinition(GlobalDecl GD) {
// Remove it and replace uses of it with the ifunc.
GIF->takeName(Entry);
- Entry->replaceAllUsesWith(llvm::ConstantExpr::getBitCast(GIF,
- Entry->getType()));
+ Entry->replaceAllUsesWith(GIF);
Entry->eraseFromParent();
} else
GIF->setName(MangledName);
-
+ if (auto *F = dyn_cast<llvm::Function>(Resolver)) {
+ F->addFnAttr(llvm::Attribute::DisableSanitizerInstrumentation);
+ }
SetCommonAttributes(GD, GIF);
}
@@ -5064,7 +6043,8 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
StringLength);
if (auto *C = Entry.second)
- return ConstantAddress(C, CharUnits::fromQuantity(C->getAlignment()));
+ return ConstantAddress(
+ C, C->getValueType(), CharUnits::fromQuantity(C->getAlignment()));
llvm::Constant *Zero = llvm::Constant::getNullValue(Int32Ty);
llvm::Constant *Zeros[] = { Zero, Zero };
@@ -5086,7 +6066,7 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
switch (CFRuntime) {
default: break;
- case LangOptions::CoreFoundationABI::Swift: LLVM_FALLTHROUGH;
+ case LangOptions::CoreFoundationABI::Swift: [[fallthrough]];
case LangOptions::CoreFoundationABI::Swift5_0:
CFConstantStringClassName =
Triple.isOSDarwin() ? "$s15SwiftFoundation19_NSCFConstantStringCN"
@@ -5151,7 +6131,7 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
auto Fields = Builder.beginStruct(STy);
// Class pointer.
- Fields.add(cast<llvm::ConstantExpr>(CFConstantStringClassRef));
+ Fields.add(cast<llvm::Constant>(CFConstantStringClassRef));
// Flags.
if (IsSwiftABI) {
@@ -5164,7 +6144,7 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
// String pointer.
llvm::Constant *C = nullptr;
if (isUTF16) {
- auto Arr = llvm::makeArrayRef(
+ auto Arr = llvm::ArrayRef(
reinterpret_cast<uint16_t *>(const_cast<char *>(Entry.first().data())),
Entry.first().size() / 2);
C = llvm::ConstantDataArray::get(VMContext, Arr);
@@ -5173,7 +6153,7 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
}
// Note: -fwritable-strings doesn't make the backing store strings of
- // CFStrings writable. (See <rdar://problem/10657500>)
+ // CFStrings writable.
auto *GV =
new llvm::GlobalVariable(getModule(), C->getType(), /*isConstant=*/true,
llvm::GlobalValue::PrivateLinkage, C, ".str");
@@ -5199,9 +6179,6 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
llvm::Constant *Str =
llvm::ConstantExpr::getGetElementPtr(GV->getValueType(), GV, Zeros);
- if (isUTF16)
- // Cast the UTF16 string to the correct type.
- Str = llvm::ConstantExpr::getBitCast(Str, Int8PtrTy);
Fields.add(Str);
// String length.
@@ -5230,10 +6207,11 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
switch (Triple.getObjectFormat()) {
case llvm::Triple::UnknownObjectFormat:
llvm_unreachable("unknown file format");
+ case llvm::Triple::DXContainer:
case llvm::Triple::GOFF:
- llvm_unreachable("GOFF is not yet implemented");
+ case llvm::Triple::SPIRV:
case llvm::Triple::XCOFF:
- llvm_unreachable("XCOFF is not yet implemented");
+ llvm_unreachable("unimplemented");
case llvm::Triple::COFF:
case llvm::Triple::ELF:
case llvm::Triple::Wasm:
@@ -5245,7 +6223,7 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
}
Entry.second = GV;
- return ConstantAddress(GV, Alignment);
+ return ConstantAddress(GV, GV->getValueType(), Alignment);
}
bool CodeGenModule::getExpressionLocationsEnabled() const {
@@ -5258,12 +6236,10 @@ QualType CodeGenModule::getObjCFastEnumerationStateType() {
D->startDefinition();
QualType FieldTypes[] = {
- Context.UnsignedLongTy,
- Context.getPointerType(Context.getObjCIdType()),
- Context.getPointerType(Context.UnsignedLongTy),
- Context.getConstantArrayType(Context.UnsignedLongTy,
- llvm::APInt(32, 5), nullptr, ArrayType::Normal, 0)
- };
+ Context.UnsignedLongTy, Context.getPointerType(Context.getObjCIdType()),
+ Context.getPointerType(Context.UnsignedLongTy),
+ Context.getConstantArrayType(Context.UnsignedLongTy, llvm::APInt(32, 5),
+ nullptr, ArraySizeModifier::Normal, 0)};
for (size_t i = 0; i < 4; ++i) {
FieldDecl *Field = FieldDecl::Create(Context,
@@ -5296,6 +6272,7 @@ CodeGenModule::GetConstantArrayFromStringLiteral(const StringLiteral *E) {
// Resize the string to the right size, which is indicated by its type.
const ConstantArrayType *CAT = Context.getAsConstantArrayType(E->getType());
+ assert(CAT && "String literal not of constant array type!");
Str.resize(CAT->getSize().getZExtValue());
return llvm::ConstantDataArray::getString(VMContext, Str, false);
}
@@ -5360,10 +6337,10 @@ CodeGenModule::GetAddrOfConstantStringFromLiteral(const StringLiteral *S,
if (!LangOpts.WritableStrings) {
Entry = &ConstantStringMap[C];
if (auto GV = *Entry) {
- if (Alignment.getQuantity() > GV->getAlignment())
+ if (uint64_t(Alignment.getQuantity()) > GV->getAlignment())
GV->setAlignment(Alignment.getAsAlign());
return ConstantAddress(castStringLiteralToDefaultAddressSpace(*this, GV),
- Alignment);
+ GV->getValueType(), Alignment);
}
}
@@ -5386,14 +6363,18 @@ CodeGenModule::GetAddrOfConstantStringFromLiteral(const StringLiteral *S,
}
auto GV = GenerateStringLiteral(C, LT, *this, GlobalVariableName, Alignment);
+
+ CGDebugInfo *DI = getModuleDebugInfo();
+ if (DI && getCodeGenOpts().hasReducedDebugInfo())
+ DI->AddStringLiteralDebugInfo(GV, S);
+
if (Entry)
*Entry = GV;
- SanitizerMD->reportGlobalToASan(GV, S->getStrTokenLoc(0), "<string literal>",
- QualType());
+ SanitizerMD->reportGlobal(GV, S->getStrTokenLoc(0), "<string literal>");
return ConstantAddress(castStringLiteralToDefaultAddressSpace(*this, GV),
- Alignment);
+ GV->getValueType(), Alignment);
}
/// GetAddrOfConstantStringFromObjCEncode - Return a pointer to a constant
@@ -5423,10 +6404,10 @@ ConstantAddress CodeGenModule::GetAddrOfConstantCString(
if (!LangOpts.WritableStrings) {
Entry = &ConstantStringMap[C];
if (auto GV = *Entry) {
- if (Alignment.getQuantity() > GV->getAlignment())
+ if (uint64_t(Alignment.getQuantity()) > GV->getAlignment())
GV->setAlignment(Alignment.getAsAlign());
return ConstantAddress(castStringLiteralToDefaultAddressSpace(*this, GV),
- Alignment);
+ GV->getValueType(), Alignment);
}
}
@@ -5440,7 +6421,7 @@ ConstantAddress CodeGenModule::GetAddrOfConstantCString(
*Entry = GV;
return ConstantAddress(castStringLiteralToDefaultAddressSpace(*this, GV),
- Alignment);
+ GV->getValueType(), Alignment);
}
ConstantAddress CodeGenModule::GetAddrOfGlobalTemporary(
@@ -5470,7 +6451,11 @@ ConstantAddress CodeGenModule::GetAddrOfGlobalTemporary(
getModule(), Type, false, llvm::GlobalVariable::InternalLinkage,
nullptr);
}
- return ConstantAddress(InsertResult.first->second, Align);
+ return ConstantAddress(InsertResult.first->second,
+ llvm::cast<llvm::GlobalVariable>(
+ InsertResult.first->second->stripPointerCasts())
+ ->getValueType(),
+ Align);
}
// FIXME: If an externally-visible declaration extends multiple temporaries,
@@ -5482,7 +6467,7 @@ ConstantAddress CodeGenModule::GetAddrOfGlobalTemporary(
VD, E->getManglingNumber(), Out);
APValue *Value = nullptr;
- if (E->getStorageDuration() == SD_Static && VD && VD->evaluateValue()) {
+ if (E->getStorageDuration() == SD_Static && VD->evaluateValue()) {
// If the initializer of the extending declaration is a constant
// initializer, we should have a cached constant initializer for this
// temporary. Note that this might have a different value from the value
@@ -5497,10 +6482,9 @@ ConstantAddress CodeGenModule::GetAddrOfGlobalTemporary(
!EvalResult.hasSideEffects())
Value = &EvalResult.Val;
- LangAS AddrSpace =
- VD ? GetGlobalVarAddressSpace(VD) : MaterializedType.getAddressSpace();
+ LangAS AddrSpace = GetGlobalVarAddressSpace(VD);
- Optional<ConstantEmitter> emitter;
+ std::optional<ConstantEmitter> emitter;
llvm::Constant *InitialValue = nullptr;
bool Constant = false;
llvm::Type *Type;
@@ -5509,7 +6493,9 @@ ConstantAddress CodeGenModule::GetAddrOfGlobalTemporary(
emitter.emplace(*this);
InitialValue = emitter->emitForInitializer(*Value, AddrSpace,
MaterializedType);
- Constant = isTypeConstant(MaterializedType, /*ExcludeCtor*/Value);
+ Constant =
+ MaterializedType.isConstantStorage(getContext(), /*ExcludeCtor*/ Value,
+ /*ExcludeDtor*/ false);
Type = InitialValue->getType();
} else {
// No initializer, the initialization will be provided when we
@@ -5518,8 +6504,7 @@ ConstantAddress CodeGenModule::GetAddrOfGlobalTemporary(
}
// Create a global variable for this lifetime-extended temporary.
- llvm::GlobalValue::LinkageTypes Linkage =
- getLLVMLinkageVarDefinition(VD, Constant);
+ llvm::GlobalValue::LinkageTypes Linkage = getLLVMLinkageVarDefinition(VD);
if (Linkage == llvm::GlobalVariable::ExternalLinkage) {
const VarDecl *InitVD;
if (VD->isStaticDataMember() && VD->getAnyInitializer(InitVD) &&
@@ -5538,7 +6523,13 @@ ConstantAddress CodeGenModule::GetAddrOfGlobalTemporary(
getModule(), Type, Constant, Linkage, InitialValue, Name.c_str(),
/*InsertBefore=*/nullptr, llvm::GlobalVariable::NotThreadLocal, TargetAS);
if (emitter) emitter->finalize(GV);
- setGVProperties(GV, VD);
+ // Don't assign dllimport or dllexport to local linkage globals.
+ if (!llvm::GlobalValue::isLocalLinkage(Linkage)) {
+ setGVProperties(GV, VD);
+ if (GV->getDLLStorageClass() == llvm::GlobalVariable::DLLExportStorageClass)
+ // The reference temporary should never be dllexport.
+ GV->setDLLStorageClass(llvm::GlobalVariable::DefaultStorageClass);
+ }
GV->setAlignment(Align.getAsAlign());
if (supportsCOMDAT() && GV->isWeakForLinker())
GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
@@ -5548,20 +6539,20 @@ ConstantAddress CodeGenModule::GetAddrOfGlobalTemporary(
if (AddrSpace != LangAS::Default)
CV = getTargetCodeGenInfo().performAddrSpaceCast(
*this, GV, AddrSpace, LangAS::Default,
- Type->getPointerTo(
+ llvm::PointerType::get(
+ getLLVMContext(),
getContext().getTargetAddressSpace(LangAS::Default)));
// Update the map with the new temporary. If we created a placeholder above,
// replace it with the new global now.
llvm::Constant *&Entry = MaterializedGlobalTemporaryMap[E];
if (Entry) {
- Entry->replaceAllUsesWith(
- llvm::ConstantExpr::getBitCast(CV, Entry->getType()));
+ Entry->replaceAllUsesWith(CV);
llvm::cast<llvm::GlobalVariable>(Entry)->eraseFromParent();
}
Entry = CV;
- return ConstantAddress(CV, Align);
+ return ConstantAddress(CV, Type, Align);
}
/// EmitObjCPropertyImplementations - Emit information for synthesized
@@ -5626,7 +6617,7 @@ void CodeGenModule::EmitObjCIvarInitializations(ObjCImplementationDecl *D) {
/*isInstance=*/true, /*isVariadic=*/false,
/*isPropertyAccessor=*/true, /*isSynthesizedAccessorStub=*/false,
/*isImplicitlyDeclared=*/true,
- /*isDefined=*/false, ObjCMethodDecl::Required);
+ /*isDefined=*/false, ObjCImplementationControl::Required);
D->addInstanceMethod(DTORMethod);
CodeGenFunction(*this).GenerateObjCCtorDtorMethod(D, DTORMethod, false);
D->setHasDestructors(true);
@@ -5647,7 +6638,7 @@ void CodeGenModule::EmitObjCIvarInitializations(ObjCImplementationDecl *D) {
/*isVariadic=*/false,
/*isPropertyAccessor=*/true, /*isSynthesizedAccessorStub=*/false,
/*isImplicitlyDeclared=*/true,
- /*isDefined=*/false, ObjCMethodDecl::Required);
+ /*isDefined=*/false, ObjCImplementationControl::Required);
D->addInstanceMethod(CTORMethod);
CodeGenFunction(*this).GenerateObjCCtorDtorMethod(D, CTORMethod, true);
D->setHasNonZeroConstructors(true);
@@ -5655,8 +6646,8 @@ void CodeGenModule::EmitObjCIvarInitializations(ObjCImplementationDecl *D) {
// EmitLinkageSpec - Emit all declarations in a linkage spec.
void CodeGenModule::EmitLinkageSpec(const LinkageSpecDecl *LSD) {
- if (LSD->getLanguage() != LinkageSpecDecl::lang_c &&
- LSD->getLanguage() != LinkageSpecDecl::lang_cxx) {
+ if (LSD->getLanguage() != LinkageSpecLanguageIDs::C &&
+ LSD->getLanguage() != LinkageSpecLanguageIDs::CXX) {
ErrorUnsupported(LSD, "linkage spec");
return;
}
@@ -5664,6 +6655,43 @@ void CodeGenModule::EmitLinkageSpec(const LinkageSpecDecl *LSD) {
EmitDeclContext(LSD);
}
+void CodeGenModule::EmitTopLevelStmt(const TopLevelStmtDecl *D) {
+ // Device code should not be at top level.
+ if (LangOpts.CUDA && LangOpts.CUDAIsDevice)
+ return;
+
+ std::unique_ptr<CodeGenFunction> &CurCGF =
+ GlobalTopLevelStmtBlockInFlight.first;
+
+ // We emitted a top-level stmt but after it there is initialization.
+ // Stop squashing the top-level stmts into a single function.
+ if (CurCGF && CXXGlobalInits.back() != CurCGF->CurFn) {
+ CurCGF->FinishFunction(D->getEndLoc());
+ CurCGF = nullptr;
+ }
+
+ if (!CurCGF) {
+ // void __stmts__N(void)
+ // FIXME: Ask the ABI name mangler to pick a name.
+ std::string Name = "__stmts__" + llvm::utostr(CXXGlobalInits.size());
+ FunctionArgList Args;
+ QualType RetTy = getContext().VoidTy;
+ const CGFunctionInfo &FnInfo =
+ getTypes().arrangeBuiltinFunctionDeclaration(RetTy, Args);
+ llvm::FunctionType *FnTy = getTypes().GetFunctionType(FnInfo);
+ llvm::Function *Fn = llvm::Function::Create(
+ FnTy, llvm::GlobalValue::InternalLinkage, Name, &getModule());
+
+ CurCGF.reset(new CodeGenFunction(*this));
+ GlobalTopLevelStmtBlockInFlight.second = D;
+ CurCGF->StartFunction(GlobalDecl(), RetTy, Fn, FnInfo, Args,
+ D->getBeginLoc(), D->getBeginLoc());
+ CXXGlobalInits.push_back(Fn);
+ }
+
+ CurCGF->EmitStmt(D->getStmt());
+}
+
void CodeGenModule::EmitDeclContext(const DeclContext *DC) {
for (auto *I : DC->decls()) {
// Unlike other DeclContexts, the contents of an ObjCImplDecl at TU scope
@@ -5687,9 +6715,8 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
return;
// Consteval function shouldn't be emitted.
- if (auto *FD = dyn_cast<FunctionDecl>(D))
- if (FD->isConsteval())
- return;
+ if (auto *FD = dyn_cast<FunctionDecl>(D); FD && FD->isImmediateFunction())
+ return;
switch (D->getKind()) {
case Decl::CXXConversion:
@@ -5731,7 +6758,7 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
TSK_ExplicitInstantiationDefinition &&
Spec->hasDefinition())
DI->completeTemplateDefinition(*Spec);
- } LLVM_FALLTHROUGH;
+ } [[fallthrough]];
case Decl::CXXRecord: {
CXXRecordDecl *CRD = cast<CXXRecordDecl>(D);
if (CGDebugInfo *DI = getModuleDebugInfo()) {
@@ -5863,7 +6890,7 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
if (LangOpts.CUDA && LangOpts.CUDAIsDevice)
break;
// File-scope asm is ignored during device-side OpenMP compilation.
- if (LangOpts.OpenMPIsDevice)
+ if (LangOpts.OpenMPIsTargetDevice)
break;
// File-scope asm is ignored during device-side SYCL compilation.
if (LangOpts.SYCLIsDevice)
@@ -5873,6 +6900,10 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
break;
}
+ case Decl::TopLevelStmt:
+ EmitTopLevelStmt(cast<TopLevelStmtDecl>(D));
+ break;
+
case Decl::Import: {
auto *Import = cast<ImportDecl>(D);
@@ -5886,6 +6917,16 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
DI->EmitImportDecl(*Import);
}
+ // For C++ standard modules we are done - we will call the module
+ // initializer for imported modules, and that will likewise call those for
+ // any imports it has.
+ if (CXX20ModuleInits && Import->getImportedOwningModule() &&
+ !Import->getImportedOwningModule()->isModuleMapModule())
+ break;
+
+ // For clang C++ module map modules the initializers for sub-modules are
+ // emitted here.
+
// Find all of the submodules and emit the module initializers.
llvm::SmallPtrSet<clang::Module *, 16> Visited;
SmallVector<clang::Module *, 16> Stack;
@@ -5901,16 +6942,14 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
EmitTopLevelDecl(D);
// Visit the submodules of this module.
- for (clang::Module::submodule_iterator Sub = Mod->submodule_begin(),
- SubEnd = Mod->submodule_end();
- Sub != SubEnd; ++Sub) {
+ for (auto *Submodule : Mod->submodules()) {
// Skip explicit children; they need to be explicitly imported to emit
// the initializers.
- if ((*Sub)->IsExplicit)
+ if (Submodule->IsExplicit)
continue;
- if (Visited.insert(*Sub).second)
- Stack.push_back(*Sub);
+ if (Visited.insert(Submodule).second)
+ Stack.push_back(Submodule);
}
}
break;
@@ -5959,6 +6998,10 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
DI->EmitAndRetainType(getContext().getEnumType(cast<EnumDecl>(D)));
break;
+ case Decl::HLSLBuffer:
+ getHLSLRuntime().addBuffer(cast<HLSLBufferDecl>(D));
+ break;
+
default:
// Make sure we handled everything we should, every other kind is a
// non-top-level decl. FIXME: Would be nice to have an isTopLevelDeclKind
@@ -5984,9 +7027,7 @@ void CodeGenModule::AddDeferredUnusedCoverageMapping(Decl *D) {
SourceManager &SM = getContext().getSourceManager();
if (LimitedCoverage && SM.getMainFileID() != SM.getFileID(D->getBeginLoc()))
break;
- auto I = DeferredEmptyCoverageMappingDecls.find(D);
- if (I == DeferredEmptyCoverageMappingDecls.end())
- DeferredEmptyCoverageMappingDecls[D] = true;
+ DeferredEmptyCoverageMappingDecls.try_emplace(D, true);
break;
}
default:
@@ -6002,11 +7043,7 @@ void CodeGenModule::ClearUnusedCoverageMapping(const Decl *D) {
if (Fn->isTemplateInstantiation())
ClearUnusedCoverageMapping(Fn->getTemplateInstantiationPattern());
}
- auto I = DeferredEmptyCoverageMappingDecls.find(D);
- if (I == DeferredEmptyCoverageMappingDecls.end())
- DeferredEmptyCoverageMappingDecls[D] = false;
- else
- I->second = false;
+ DeferredEmptyCoverageMappingDecls.insert_or_assign(D, false);
}
void CodeGenModule::EmitDeferredUnusedCoverageMappings() {
@@ -6055,8 +7092,10 @@ void CodeGenModule::EmitMainVoidAlias() {
// new-style no-argument main is in used.
if (llvm::Function *F = getModule().getFunction("main")) {
if (!F->isDeclaration() && F->arg_size() == 0 && !F->isVarArg() &&
- F->getReturnType()->isIntegerTy(Context.getTargetInfo().getIntWidth()))
- addUsedGlobal(llvm::GlobalAlias::create("__main_void", F));
+ F->getReturnType()->isIntegerTy(Context.getTargetInfo().getIntWidth())) {
+ auto *GA = llvm::GlobalAlias::create("__main_void", F);
+ GA->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ }
}
}
@@ -6083,6 +7122,72 @@ static void EmitGlobalDeclMetadata(CodeGenModule &CGM,
GlobalMetadata->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops));
}
+bool CodeGenModule::CheckAndReplaceExternCIFuncs(llvm::GlobalValue *Elem,
+ llvm::GlobalValue *CppFunc) {
+ // Store the list of ifuncs we need to replace uses in.
+ llvm::SmallVector<llvm::GlobalIFunc *> IFuncs;
+ // List of ConstantExprs that we should be able to delete when we're done
+ // here.
+ llvm::SmallVector<llvm::ConstantExpr *> CEs;
+
+ // It isn't valid to replace the extern-C ifuncs if all we find is itself!
+ if (Elem == CppFunc)
+ return false;
+
+ // First make sure that all users of this are ifuncs (or ifuncs via a
+ // bitcast), and collect the list of ifuncs and CEs so we can work on them
+ // later.
+ for (llvm::User *User : Elem->users()) {
+ // Users can either be a bitcast ConstExpr that is used by the ifuncs, OR an
+ // ifunc directly. In any other case, just give up, as we don't know what we
+ // could break by changing those.
+ if (auto *ConstExpr = dyn_cast<llvm::ConstantExpr>(User)) {
+ if (ConstExpr->getOpcode() != llvm::Instruction::BitCast)
+ return false;
+
+ for (llvm::User *CEUser : ConstExpr->users()) {
+ if (auto *IFunc = dyn_cast<llvm::GlobalIFunc>(CEUser)) {
+ IFuncs.push_back(IFunc);
+ } else {
+ return false;
+ }
+ }
+ CEs.push_back(ConstExpr);
+ } else if (auto *IFunc = dyn_cast<llvm::GlobalIFunc>(User)) {
+ IFuncs.push_back(IFunc);
+ } else {
+ // This user is one we don't know how to handle, so fail redirection. This
+ // will result in an ifunc retaining a resolver name that will ultimately
+ // fail to be resolved to a defined function.
+ return false;
+ }
+ }
+
+ // Now we know this is a valid case where we can do this alias replacement, we
+ // need to remove all of the references to Elem (and the bitcasts!) so we can
+ // delete it.
+ for (llvm::GlobalIFunc *IFunc : IFuncs)
+ IFunc->setResolver(nullptr);
+ for (llvm::ConstantExpr *ConstExpr : CEs)
+ ConstExpr->destroyConstant();
+
+ // We should now be out of uses for the 'old' version of this function, so we
+ // can erase it as well.
+ Elem->eraseFromParent();
+
+ for (llvm::GlobalIFunc *IFunc : IFuncs) {
+ // The type of the resolver is always just a function-type that returns the
+ // type of the IFunc, so create that here. If the type of the actual
+ // resolver doesn't match, it just gets bitcast to the right thing.
+ auto *ResolverTy =
+ llvm::FunctionType::get(IFunc->getType(), /*isVarArg*/ false);
+ llvm::Constant *Resolver = GetOrCreateLLVMFunction(
+ CppFunc->getName(), ResolverTy, {}, /*ForVTable*/ false);
+ IFunc->setResolver(Resolver);
+ }
+ return true;
+}
+
/// For each function which is declared within an extern "C" region and marked
/// as 'used', but has internal linkage, create an alias from the unmangled
/// name to the mangled name if possible. People expect to be able to refer
@@ -6094,7 +7199,19 @@ void CodeGenModule::EmitStaticExternCAliases() {
for (auto &I : StaticExternCValues) {
IdentifierInfo *Name = I.first;
llvm::GlobalValue *Val = I.second;
- if (Val && !getModule().getNamedValue(Name->getName()))
+
+ // If Val is null, that implies there were multiple declarations that each
+ // had a claim to the unmangled name. In this case, generation of the alias
+ // is suppressed. See CodeGenModule::MaybeHandleStaticInExternC.
+ if (!Val)
+ break;
+
+ llvm::GlobalValue *ExistingElem =
+ getModule().getNamedValue(Name->getName());
+
+ // If there is either not something already by this name, or we were able to
+ // replace all uses from IFuncs, create the alias.
+ if (!ExistingElem || CheckAndReplaceExternCIFuncs(ExistingElem, Val))
addCompilerUsedGlobal(llvm::GlobalAlias::create(Name->getName(), Val));
}
}
@@ -6175,10 +7292,6 @@ void CodeGenModule::EmitCommandLineMetadata() {
}
void CodeGenModule::EmitCoverageFile() {
- if (getCodeGenOpts().CoverageDataFile.empty() &&
- getCodeGenOpts().CoverageNotesFile.empty())
- return;
-
llvm::NamedMDNode *CUNode = TheModule.getNamedMetadata("llvm.dbg.cu");
if (!CUNode)
return;
@@ -6201,10 +7314,8 @@ llvm::Constant *CodeGenModule::GetAddrOfRTTIDescriptor(QualType Ty,
// Return a bogus pointer if RTTI is disabled, unless it's for EH.
// FIXME: should we even be calling this method if RTTI is disabled
// and it's not for EH?
- if ((!ForEH && !getLangOpts().RTTI) || getLangOpts().CUDAIsDevice ||
- (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice &&
- getTriple().isNVPTX()))
- return llvm::Constant::getNullValue(Int8PtrTy);
+ if (!shouldEmitRTTI(ForEH))
+ return llvm::Constant::getNullValue(GlobalsInt8PtrTy);
if (ForEH && Ty->isObjCObjectPointerType() &&
LangOpts.ObjCRuntime.isGNUFamily())
@@ -6224,7 +7335,9 @@ void CodeGenModule::EmitOMPThreadPrivateDecl(const OMPThreadPrivateDecl *D) {
!VD->getAnyInitializer()->isConstantInitializer(getContext(),
/*ForRef=*/false);
- Address Addr(GetAddrOfGlobalVar(VD), getContext().getDeclAlign(VD));
+ Address Addr(GetAddrOfGlobalVar(VD),
+ getTypes().ConvertTypeForMem(VD->getType()),
+ getContext().getDeclAlign(VD));
if (auto InitFunction = getOpenMPRuntime().emitThreadPrivateVarDefinition(
VD, Addr, RefExpr->getBeginLoc(), PerformInit))
CXXGlobalInits.push_back(InitFunction);
@@ -6234,6 +7347,11 @@ void CodeGenModule::EmitOMPThreadPrivateDecl(const OMPThreadPrivateDecl *D) {
llvm::Metadata *
CodeGenModule::CreateMetadataIdentifierImpl(QualType T, MetadataTypeMap &Map,
StringRef Suffix) {
+ if (auto *FnType = T->getAs<FunctionProtoType>())
+ T = getContext().getFunctionType(
+ FnType->getReturnType(), FnType->getParamTypes(),
+ FnType->getExtProtoInfo().withExceptionSpec(EST_None));
+
llvm::Metadata *&InternalId = Map[T.getCanonicalType()];
if (InternalId)
return InternalId;
@@ -6241,7 +7359,12 @@ CodeGenModule::CreateMetadataIdentifierImpl(QualType T, MetadataTypeMap &Map,
if (isExternallyVisible(T->getLinkage())) {
std::string OutName;
llvm::raw_string_ostream Out(OutName);
- getCXXABI().getMangleContext().mangleTypeName(T, Out);
+ getCXXABI().getMangleContext().mangleCanonicalTypeName(
+ T, Out, getCodeGenOpts().SanitizeCfiICallNormalizeIntegers);
+
+ if (getCodeGenOpts().SanitizeCfiICallNormalizeIntegers)
+ Out << ".normalized";
+
Out << Suffix;
InternalId = llvm::MDString::get(getLLVMContext(), Out.str());
@@ -6446,7 +7569,66 @@ bool CodeGenModule::stopAutoInit() {
return false;
}
-void CodeGenModule::printPostfixForExternalizedStaticVar(
- llvm::raw_ostream &OS) const {
- OS << ".static." << getContext().getCUIDHash();
+void CodeGenModule::printPostfixForExternalizedDecl(llvm::raw_ostream &OS,
+ const Decl *D) const {
+ // ptxas does not allow '.' in symbol names. On the other hand, HIP prefers
+ // postfix beginning with '.' since the symbol name can be demangled.
+ if (LangOpts.HIP)
+ OS << (isa<VarDecl>(D) ? ".static." : ".intern.");
+ else
+ OS << (isa<VarDecl>(D) ? "__static__" : "__intern__");
+
+ // If the CUID is not specified we try to generate a unique postfix.
+ if (getLangOpts().CUID.empty()) {
+ SourceManager &SM = getContext().getSourceManager();
+ PresumedLoc PLoc = SM.getPresumedLoc(D->getLocation());
+ assert(PLoc.isValid() && "Source location is expected to be valid.");
+
+ // Get the hash of the user defined macros.
+ llvm::MD5 Hash;
+ llvm::MD5::MD5Result Result;
+ for (const auto &Arg : PreprocessorOpts.Macros)
+ Hash.update(Arg.first);
+ Hash.final(Result);
+
+ // Get the UniqueID for the file containing the decl.
+ llvm::sys::fs::UniqueID ID;
+ if (llvm::sys::fs::getUniqueID(PLoc.getFilename(), ID)) {
+ PLoc = SM.getPresumedLoc(D->getLocation(), /*UseLineDirectives=*/false);
+ assert(PLoc.isValid() && "Source location is expected to be valid.");
+ if (auto EC = llvm::sys::fs::getUniqueID(PLoc.getFilename(), ID))
+ SM.getDiagnostics().Report(diag::err_cannot_open_file)
+ << PLoc.getFilename() << EC.message();
+ }
+ OS << llvm::format("%x", ID.getFile()) << llvm::format("%x", ID.getDevice())
+ << "_" << llvm::utohexstr(Result.low(), /*LowerCase=*/true, /*Width=*/8);
+ } else {
+ OS << getContext().getCUIDHash();
+ }
+}
+
+void CodeGenModule::moveLazyEmissionStates(CodeGenModule *NewBuilder) {
+ assert(DeferredDeclsToEmit.empty() &&
+ "Should have emitted all decls deferred to emit.");
+ assert(NewBuilder->DeferredDecls.empty() &&
+ "Newly created module should not have deferred decls");
+ NewBuilder->DeferredDecls = std::move(DeferredDecls);
+ assert(EmittedDeferredDecls.empty() &&
+ "Still have (unmerged) EmittedDeferredDecls deferred decls");
+
+ assert(NewBuilder->DeferredVTables.empty() &&
+ "Newly created module should not have deferred vtables");
+ NewBuilder->DeferredVTables = std::move(DeferredVTables);
+
+ assert(NewBuilder->MangledDeclNames.empty() &&
+ "Newly created module should not have mangled decl names");
+ assert(NewBuilder->Manglings.empty() &&
+ "Newly created module should not have manglings");
+ NewBuilder->Manglings = std::move(Manglings);
+
+ NewBuilder->WeakRefReferences = std::move(WeakRefReferences);
+
+ NewBuilder->TBAA = std::move(TBAA);
+
+ NewBuilder->ABI->MangleCtx = std::move(ABI->MangleCtx);
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.h b/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.h
index 47dc6f415b60..ec34680fd3f7 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.h
@@ -26,16 +26,19 @@
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/NoSanitizeList.h"
+#include "clang/Basic/ProfileList.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/XRayLists.h"
#include "clang/Lex/PreprocessorOptions.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Transforms/Utils/SanitizerStats.h"
+#include <optional>
namespace llvm {
class Module;
@@ -46,8 +49,11 @@ class GlobalValue;
class DataLayout;
class FunctionType;
class LLVMContext;
-class OpenMPIRBuilder;
class IndexedInstrProfReader;
+
+namespace vfs {
+class FileSystem;
+}
}
namespace clang {
@@ -55,17 +61,13 @@ class ASTContext;
class AtomicType;
class FunctionDecl;
class IdentifierInfo;
-class ObjCMethodDecl;
class ObjCImplementationDecl;
-class ObjCCategoryImplDecl;
-class ObjCProtocolDecl;
class ObjCEncodeExpr;
class BlockExpr;
class CharUnits;
class Decl;
class Expr;
class Stmt;
-class InitListExpr;
class StringLiteral;
class NamedDecl;
class ValueDecl;
@@ -78,13 +80,10 @@ class AnnotateAttr;
class CXXDestructorDecl;
class Module;
class CoverageSourceInfo;
-class TargetAttr;
class InitSegAttr;
-struct ParsedTargetAttr;
namespace CodeGen {
-class CallArgList;
class CodeGenFunction;
class CodeGenTBAA;
class CGCXXABI;
@@ -93,8 +92,7 @@ class CGObjCRuntime;
class CGOpenCLRuntime;
class CGOpenMPRuntime;
class CGCUDARuntime;
-class BlockFieldFlags;
-class FunctionArgList;
+class CGHLSLRuntime;
class CoverageMappingModuleGen;
class TargetCodeGenInfo;
@@ -217,16 +215,14 @@ struct ObjCEntrypoints {
/// This class records statistics on instrumentation based profiling.
class InstrProfStats {
- uint32_t VisitedInMainFile;
- uint32_t MissingInMainFile;
- uint32_t Visited;
- uint32_t Missing;
- uint32_t Mismatched;
+ uint32_t VisitedInMainFile = 0;
+ uint32_t MissingInMainFile = 0;
+ uint32_t Visited = 0;
+ uint32_t Missing = 0;
+ uint32_t Mismatched = 0;
public:
- InstrProfStats()
- : VisitedInMainFile(0), MissingInMainFile(0), Visited(0), Missing(0),
- Mismatched(0) {}
+ InstrProfStats() = default;
/// Record that we've visited a function and whether or not that function was
/// in the main source file.
void addVisited(bool MainFile) {
@@ -287,12 +283,15 @@ class CodeGenModule : public CodeGenTypeCache {
public:
struct Structor {
- Structor() : Priority(0), Initializer(nullptr), AssociatedData(nullptr) {}
- Structor(int Priority, llvm::Constant *Initializer,
+ Structor()
+ : Priority(0), LexOrder(~0u), Initializer(nullptr),
+ AssociatedData(nullptr) {}
+ Structor(int Priority, unsigned LexOrder, llvm::Constant *Initializer,
llvm::Constant *AssociatedData)
- : Priority(Priority), Initializer(Initializer),
+ : Priority(Priority), LexOrder(LexOrder), Initializer(Initializer),
AssociatedData(AssociatedData) {}
int Priority;
+ unsigned LexOrder;
llvm::Constant *Initializer;
llvm::Constant *AssociatedData;
};
@@ -302,6 +301,7 @@ public:
private:
ASTContext &Context;
const LangOptions &LangOpts;
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS; // Only used for debug info.
const HeaderSearchOptions &HeaderSearchOpts; // Only used for debug info.
const PreprocessorOptions &PreprocessorOpts; // Only used for debug info.
const CodeGenOptions &CodeGenOpts;
@@ -311,8 +311,8 @@ private:
const TargetInfo &Target;
std::unique_ptr<CGCXXABI> ABI;
llvm::LLVMContext &VMContext;
- std::string ModuleNameHash = "";
-
+ std::string ModuleNameHash;
+ bool CXX20ModuleInits = false;
std::unique_ptr<CodeGenTBAA> TBAA;
mutable std::unique_ptr<TargetCodeGenInfo> TheTargetCodeGenInfo;
@@ -329,6 +329,7 @@ private:
std::unique_ptr<CGOpenCLRuntime> OpenCLRuntime;
std::unique_ptr<CGOpenMPRuntime> OpenMPRuntime;
std::unique_ptr<CGCUDARuntime> CUDARuntime;
+ std::unique_ptr<CGHLSLRuntime> HLSLRuntime;
std::unique_ptr<CGDebugInfo> DebugInfo;
std::unique_ptr<ObjCEntrypoints> ObjCData;
llvm::MDNode *NoObjCARCExceptionsMetadata = nullptr;
@@ -345,25 +346,48 @@ private:
/// for emission and therefore should only be output if they are actually
/// used. If a decl is in this, then it is known to have not been referenced
/// yet.
- std::map<StringRef, GlobalDecl> DeferredDecls;
+ llvm::DenseMap<StringRef, GlobalDecl> DeferredDecls;
/// This is a list of deferred decls which we have seen that *are* actually
/// referenced. These get code generated when the module is done.
std::vector<GlobalDecl> DeferredDeclsToEmit;
void addDeferredDeclToEmit(GlobalDecl GD) {
DeferredDeclsToEmit.emplace_back(GD);
+ addEmittedDeferredDecl(GD);
+ }
+
+ /// Decls that were DeferredDecls and have now been emitted.
+ llvm::DenseMap<llvm::StringRef, GlobalDecl> EmittedDeferredDecls;
+
+ void addEmittedDeferredDecl(GlobalDecl GD) {
+ // Reemission is only needed in incremental mode.
+ if (!Context.getLangOpts().IncrementalExtensions)
+ return;
+
+ // Assume a linkage by default that does not need reemission.
+ auto L = llvm::GlobalValue::ExternalLinkage;
+ if (llvm::isa<FunctionDecl>(GD.getDecl()))
+ L = getFunctionLinkage(GD);
+ else if (auto *VD = llvm::dyn_cast<VarDecl>(GD.getDecl()))
+ L = getLLVMLinkageVarDefinition(VD);
+
+ if (llvm::GlobalValue::isInternalLinkage(L) ||
+ llvm::GlobalValue::isLinkOnceLinkage(L) ||
+ llvm::GlobalValue::isWeakLinkage(L)) {
+ EmittedDeferredDecls[getMangledName(GD)] = GD;
+ }
}
/// List of alias we have emitted. Used to make sure that what they point to
/// is defined once we get to the end of the of the translation unit.
std::vector<GlobalDecl> Aliases;
- /// List of multiversion functions that have to be emitted. Used to make sure
- /// we properly emit the iFunc.
+ /// List of multiversion functions to be emitted. This list is processed in
+ /// conjunction with other deferred symbols and is used to ensure that
+ /// multiversion function resolvers and ifuncs are defined and emitted.
std::vector<GlobalDecl> MultiVersionFuncs;
- typedef llvm::StringMap<llvm::TrackingVH<llvm::Constant> > ReplacementsTy;
- ReplacementsTy Replacements;
+ llvm::MapVector<StringRef, llvm::TrackingVH<llvm::Constant>> Replacements;
/// List of global values to be replaced with something else. Used when we
/// want to replace a GlobalValue but can't identify it by its mangled name
@@ -404,16 +428,13 @@ private:
llvm::MapVector<GlobalDecl, StringRef> MangledDeclNames;
llvm::StringMap<GlobalDecl, llvm::BumpPtrAllocator> Manglings;
- // An ordered map of canonical GlobalDecls paired with the cpu-index for
- // cpu-specific name manglings.
- llvm::MapVector<std::pair<GlobalDecl, unsigned>, StringRef>
- CPUSpecificMangledDeclNames;
- llvm::StringMap<std::pair<GlobalDecl, unsigned>, llvm::BumpPtrAllocator>
- CPUSpecificManglings;
-
/// Global annotations.
std::vector<llvm::Constant*> Annotations;
+ // Store deferred function annotations so they can be emitted at the end with
+ // most up to date ValueDecl that will have all the inherited annotations.
+ llvm::DenseMap<StringRef, const ValueDecl *> DeferredAnnotations;
+
/// Map used to get unique annotation strings.
llvm::StringMap<llvm::Constant*> AnnotationStrings;
@@ -423,6 +444,8 @@ private:
llvm::StringMap<llvm::GlobalVariable *> CFConstantStringMap;
llvm::DenseMap<llvm::Constant *, llvm::GlobalVariable *> ConstantStringMap;
+ llvm::DenseMap<const UnnamedGlobalConstantDecl *, llvm::GlobalVariable *>
+ UnnamedGlobalConstantDeclMap;
llvm::DenseMap<const Decl*, llvm::Constant *> StaticLocalDeclMap;
llvm::DenseMap<const Decl*, llvm::GlobalVariable*> StaticLocalDeclGuardMap;
llvm::DenseMap<const Expr*, llvm::Constant *> MaterializedGlobalTemporaryMap;
@@ -526,6 +549,7 @@ private:
void createOpenCLRuntime();
void createOpenMPRuntime();
void createCUDARuntime();
+ void createHLSLRuntime();
bool isTriviallyRecursive(const FunctionDecl *F);
bool shouldEmitFunction(GlobalDecl GD);
@@ -578,8 +602,14 @@ private:
MetadataTypeMap VirtualMetadataIdMap;
MetadataTypeMap GeneralizedMetadataIdMap;
+ // Helps squashing blocks of TopLevelStmtDecl into a single llvm::Function
+ // when used with -fincremental-extensions.
+ std::pair<std::unique_ptr<CodeGenFunction>, const TopLevelStmtDecl *>
+ GlobalTopLevelStmtBlockInFlight;
+
public:
- CodeGenModule(ASTContext &C, const HeaderSearchOptions &headersearchopts,
+ CodeGenModule(ASTContext &C, IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS,
+ const HeaderSearchOptions &headersearchopts,
const PreprocessorOptions &ppopts,
const CodeGenOptions &CodeGenOpts, llvm::Module &M,
DiagnosticsEngine &Diags,
@@ -624,6 +654,12 @@ public:
return *CUDARuntime;
}
+ /// Return a reference to the configured HLSL runtime.
+ CGHLSLRuntime &getHLSLRuntime() {
+ assert(HLSLRuntime != nullptr);
+ return *HLSLRuntime;
+ }
+
ObjCEntrypoints &getObjCEntrypoints() const {
assert(ObjCData != nullptr);
return *ObjCData;
@@ -695,12 +731,16 @@ public:
llvm::MDNode *getNoObjCARCExceptionsMetadata() {
if (!NoObjCARCExceptionsMetadata)
- NoObjCARCExceptionsMetadata = llvm::MDNode::get(getLLVMContext(), None);
+ NoObjCARCExceptionsMetadata =
+ llvm::MDNode::get(getLLVMContext(), std::nullopt);
return NoObjCARCExceptionsMetadata;
}
ASTContext &getContext() const { return Context; }
const LangOptions &getLangOpts() const { return LangOpts; }
+ const IntrusiveRefCntPtr<llvm::vfs::FileSystem> &getFileSystem() const {
+ return FS;
+ }
const HeaderSearchOptions &getHeaderSearchOpts()
const { return HeaderSearchOpts; }
const PreprocessorOptions &getPreprocessorOpts()
@@ -731,6 +771,10 @@ public:
return VTables.getItaniumVTableContext();
}
+ const ItaniumVTableContext &getItaniumVTableContext() const {
+ return VTables.getItaniumVTableContext();
+ }
+
MicrosoftVTableContext &getMicrosoftVTableContext() {
return VTables.getMicrosoftVTableContext();
}
@@ -782,8 +826,6 @@ public:
return getTBAAAccessInfo(AccessType);
}
- bool isTypeConstant(QualType QTy, bool ExcludeCtorDtor);
-
bool isPaddedAtomicType(QualType type);
bool isPaddedAtomicType(const AtomicType *type);
@@ -803,6 +845,14 @@ public:
void setDSOLocal(llvm::GlobalValue *GV) const;
+ bool shouldMapVisibilityToDLLExport(const NamedDecl *D) const {
+ return getLangOpts().hasDefaultVisibilityExportMapping() && D &&
+ (D->getLinkageAndVisibility().getVisibility() ==
+ DefaultVisibility) &&
+ (getLangOpts().isAllDefaultVisibilityExportMapping() ||
+ (getLangOpts().isExplicitDefaultVisibilityExportMapping() &&
+ D->getLinkageAndVisibility().isVisibilityExplicit()));
+ }
void setDLLImportDLLExport(llvm::GlobalValue *GV, GlobalDecl D) const;
void setDLLImportDLLExport(llvm::GlobalValue *GV, const NamedDecl *D) const;
/// Set visibility, dllimport/dllexport and dso_local.
@@ -839,11 +889,13 @@ public:
llvm::GlobalVariable *
CreateOrReplaceCXXRuntimeVariable(StringRef Name, llvm::Type *Ty,
llvm::GlobalValue::LinkageTypes Linkage,
- unsigned Alignment);
+ llvm::Align Alignment);
llvm::Function *CreateGlobalInitOrCleanUpFunction(
llvm::FunctionType *ty, const Twine &name, const CGFunctionInfo &FI,
- SourceLocation Loc = SourceLocation(), bool TLS = false);
+ SourceLocation Loc = SourceLocation(), bool TLS = false,
+ llvm::GlobalVariable::LinkageTypes Linkage =
+ llvm::GlobalVariable::InternalLinkage);
/// Return the AST address space of the underlying global variable for D, as
/// determined by its declaration. Normally this is the same as the address
@@ -881,12 +933,26 @@ public:
ForDefinition_t IsForDefinition
= NotForDefinition);
+ // Return the function body address of the given function.
+ llvm::Constant *GetFunctionStart(const ValueDecl *Decl);
+
+ // Return whether RTTI information should be emitted for this target.
+ bool shouldEmitRTTI(bool ForEH = false) {
+ return (ForEH || getLangOpts().RTTI) && !getLangOpts().CUDAIsDevice &&
+ !(getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice &&
+ getTriple().isNVPTX());
+ }
+
/// Get the address of the RTTI descriptor for the given type.
llvm::Constant *GetAddrOfRTTIDescriptor(QualType Ty, bool ForEH = false);
/// Get the address of a GUID.
ConstantAddress GetAddrOfMSGuidDecl(const MSGuidDecl *GD);
+ /// Get the address of a UnnamedGlobalConstant
+ ConstantAddress
+ GetAddrOfUnnamedGlobalConstantDecl(const UnnamedGlobalConstantDecl *GCD);
+
/// Get the address of a template parameter object.
ConstantAddress
GetAddrOfTemplateParamObject(const TemplateParamObjectDecl *TPO);
@@ -963,11 +1029,6 @@ public:
/// Return a pointer to a constant CFString object for the given string.
ConstantAddress GetAddrOfConstantCFString(const StringLiteral *Literal);
- /// Return a pointer to a constant NSString object for the given string. Or a
- /// user defined String object as defined via
- /// -fconstant-string-class=class_name option.
- ConstantAddress GetAddrOfConstantString(const StringLiteral *Literal);
-
/// Return a constant array for the given string.
llvm::Constant *GetConstantArrayFromStringLiteral(const StringLiteral *E);
@@ -1039,7 +1100,8 @@ public:
llvm::Constant *getBuiltinLibFunction(const FunctionDecl *FD,
unsigned BuiltinID);
- llvm::Function *getIntrinsic(unsigned IID, ArrayRef<llvm::Type*> Tys = None);
+ llvm::Function *getIntrinsic(unsigned IID,
+ ArrayRef<llvm::Type *> Tys = std::nullopt);
/// Emit code for a single top level declaration.
void EmitTopLevelDecl(Decl *D);
@@ -1202,24 +1264,11 @@ public:
llvm::AttributeList &Attrs, unsigned &CallingConv,
bool AttrOnCallSite, bool IsThunk);
- /// Adds attributes to F according to our CodeGenOptions and LangOptions, as
- /// though we had emitted it ourselves. We remove any attributes on F that
- /// conflict with the attributes we add here.
- ///
- /// This is useful for adding attrs to bitcode modules that you want to link
- /// with but don't control, such as CUDA's libdevice. When linking with such
- /// a bitcode library, you might want to set e.g. its functions'
- /// "unsafe-fp-math" attribute to match the attr of the functions you're
- /// codegen'ing. Otherwise, LLVM will interpret the bitcode module's lack of
- /// unsafe-fp-math attrs as tantamount to unsafe-fp-math=false, and then LLVM
- /// will propagate unsafe-fp-math=false up to every transitive caller of a
- /// function in the bitcode library!
- ///
- /// With the exception of fast-math attrs, this will only make the attributes
- /// on the function more conservative. But it's unsafe to call this on a
- /// function which relies on particular fast-math attributes for correctness.
- /// It's up to you to ensure that this is safe.
- void addDefaultFunctionDefinitionAttributes(llvm::Function &F);
+ /// Adjust Memory attribute to ensure that the BE gets the right attribute
+ // in order to generate the library call or the intrinsic for the function
+ // name 'Name'.
+ void AdjustMemoryAttribute(StringRef Name, CGCalleeInfo CalleeInfo,
+ llvm::AttributeList &Attrs);
/// Like the overload taking a `Function &`, but intended specifically
/// for frontends that want to build on Clang's target-configuration logic.
@@ -1227,6 +1276,7 @@ public:
StringRef getMangledName(GlobalDecl GD);
StringRef getBlockMangledName(GlobalDecl GD, const BlockDecl *BD);
+ const GlobalDecl getMangledNameDecl(StringRef);
void EmitTentativeDefinition(const VarDecl *D);
@@ -1261,12 +1311,11 @@ public:
/// Returns LLVM linkage for a declarator.
llvm::GlobalValue::LinkageTypes
- getLLVMLinkageForDeclarator(const DeclaratorDecl *D, GVALinkage Linkage,
- bool IsConstantVariable);
+ getLLVMLinkageForDeclarator(const DeclaratorDecl *D, GVALinkage Linkage);
/// Returns LLVM linkage for a declarator.
llvm::GlobalValue::LinkageTypes
- getLLVMLinkageVarDefinition(const VarDecl *VD, bool IsConstant);
+ getLLVMLinkageVarDefinition(const VarDecl *VD);
/// Emit all the global annotations.
void EmitGlobalAnnotations();
@@ -1301,8 +1350,9 @@ public:
bool isInNoSanitizeList(SanitizerMask Kind, llvm::Function *Fn,
SourceLocation Loc) const;
- bool isInNoSanitizeList(llvm::GlobalVariable *GV, SourceLocation Loc,
- QualType Ty, StringRef Category = StringRef()) const;
+ bool isInNoSanitizeList(SanitizerMask Kind, llvm::GlobalVariable *GV,
+ SourceLocation Loc, QualType Ty,
+ StringRef Category = StringRef()) const;
/// Imbue XRay attributes to a function, applying the always/never attribute
/// lists in the process. Returns true if we did imbue attributes this way,
@@ -1310,9 +1360,16 @@ public:
bool imbueXRayAttrs(llvm::Function *Fn, SourceLocation Loc,
StringRef Category = StringRef()) const;
- /// Returns true if function at the given location should be excluded from
- /// profile instrumentation.
- bool isProfileInstrExcluded(llvm::Function *Fn, SourceLocation Loc) const;
+ /// \returns true if \p Fn at \p Loc should be excluded from profile
+ /// instrumentation by the SCL passed by \p -fprofile-list.
+ ProfileList::ExclusionType
+ isFunctionBlockedByProfileList(llvm::Function *Fn, SourceLocation Loc) const;
+
+ /// \returns true if \p Fn at \p Loc should be excluded from profile
+ /// instrumentation.
+ ProfileList::ExclusionType
+ isFunctionBlockedFromProfileInstr(llvm::Function *Fn,
+ SourceLocation Loc) const;
SanitizerMetadata *getSanitizerMetadata() {
return SanitizerMD.get();
@@ -1360,15 +1417,18 @@ public:
/// \param D The allocate declaration
void EmitOMPAllocateDecl(const OMPAllocateDecl *D);
+ /// Return the alignment specified in an allocate directive, if present.
+ std::optional<CharUnits> getOMPAllocateAlignment(const VarDecl *VD);
+
/// Returns whether the given record has hidden LTO visibility and therefore
/// may participate in (single-module) CFI and whole-program vtable
/// optimization.
bool HasHiddenLTOVisibility(const CXXRecordDecl *RD);
- /// Returns whether the given record has public std LTO visibility
- /// and therefore may not participate in (single-module) CFI and whole-program
- /// vtable optimization.
- bool HasLTOVisibilityPublicStd(const CXXRecordDecl *RD);
+ /// Returns whether the given record has public LTO visibility (regardless of
+ /// -lto-whole-program-visibility) and therefore may not participate in
+ /// (single-module) CFI and whole-program vtable optimization.
+ bool AlwaysHasLTOVisibilityPublic(const CXXRecordDecl *RD);
/// Returns the vcall visibility of the given type. This is the scope in which
/// a virtual function call could be made which ends up being dispatched to a
@@ -1385,9 +1445,14 @@ public:
llvm::GlobalVariable *VTable,
const VTableLayout &VTLayout);
+ llvm::Type *getVTableComponentType() const;
+
/// Generate a cross-DSO type identifier for MD.
llvm::ConstantInt *CreateCrossDsoCfiTypeId(llvm::Metadata *MD);
+ /// Generate a KCFI type identifier for T.
+ llvm::ConstantInt *CreateKCFITypeId(QualType T);
+
/// Create a metadata identifier for the given type. This may either be an
/// MDString (for external identifiers) or a distinct unnamed MDNode (for
/// internal identifiers).
@@ -1406,9 +1471,16 @@ public:
void CreateFunctionTypeMetadataForIcall(const FunctionDecl *FD,
llvm::Function *F);
+ /// Set type metadata to the given function.
+ void setKCFIType(const FunctionDecl *FD, llvm::Function *F);
+
+ /// Emit KCFI type identifier constants and remove unused identifiers.
+ void finalizeKCFITypes();
+
/// Whether this function's return type has no side effects, and thus may
/// be trivially discarded if it is unused.
- bool MayDropFunctionReturn(const ASTContext &Context, QualType ReturnType);
+ bool MayDropFunctionReturn(const ASTContext &Context,
+ QualType ReturnType) const;
/// Returns whether this module needs the "all-vtables" type identifier.
bool NeedAllVtablesTypeId() const;
@@ -1422,7 +1494,7 @@ public:
///
/// A most-base class of a class C is defined as a recursive base class of C,
/// including C itself, that does not have any bases.
- std::vector<const CXXRecordDecl *>
+ SmallVector<const CXXRecordDecl *, 0>
getMostBaseClasses(const CXXRecordDecl *RD);
/// Get the declaration of std::terminate for the platform.
@@ -1443,7 +1515,7 @@ public:
/// \param FN is a pointer to IR function being generated.
/// \param FD is a pointer to function declaration if any.
/// \param CGF is a pointer to CodeGenFunction that generates this function.
- void GenOpenCLArgMetadata(llvm::Function *FN,
+ void GenKernelArgMetadata(llvm::Function *FN,
const FunctionDecl *FD = nullptr,
CodeGenFunction *CGF = nullptr);
@@ -1461,9 +1533,53 @@ public:
TBAAAccessInfo *TBAAInfo = nullptr);
bool stopAutoInit();
- /// Print the postfix for externalized static variable for single source
- /// offloading languages CUDA and HIP.
- void printPostfixForExternalizedStaticVar(llvm::raw_ostream &OS) const;
+ /// Print the postfix for externalized static variable or kernels for single
+ /// source offloading languages CUDA and HIP. The unique postfix is created
+ /// using either the CUID argument, or the file's UniqueID and active macros.
+ /// The fallback method without a CUID requires that the offloading toolchain
+ /// does not define separate macros via the -cc1 options.
+ void printPostfixForExternalizedDecl(llvm::raw_ostream &OS,
+ const Decl *D) const;
+
+ /// Move some lazily-emitted states to the NewBuilder. This is especially
+ /// essential for the incremental parsing environment like Clang Interpreter,
+ /// because we'll lose all important information after each repl.
+ void moveLazyEmissionStates(CodeGenModule *NewBuilder);
+
+ /// Emit the IR encoding to attach the CUDA launch bounds attribute to \p F.
+ /// If \p MaxThreadsVal is not nullptr, the max threads value is stored in it,
+ /// if a valid one was found.
+ void handleCUDALaunchBoundsAttr(llvm::Function *F,
+ const CUDALaunchBoundsAttr *A,
+ int32_t *MaxThreadsVal = nullptr,
+ int32_t *MinBlocksVal = nullptr,
+ int32_t *MaxClusterRankVal = nullptr);
+
+ /// Emit the IR encoding to attach the AMD GPU flat-work-group-size attribute
+ /// to \p F. Alternatively, the work group size can be taken from a \p
+ /// ReqdWGS. If \p MinThreadsVal is not nullptr, the min threads value is
+ /// stored in it, if a valid one was found. If \p MaxThreadsVal is not
+ /// nullptr, the max threads value is stored in it, if a valid one was found.
+ void handleAMDGPUFlatWorkGroupSizeAttr(
+ llvm::Function *F, const AMDGPUFlatWorkGroupSizeAttr *A,
+ const ReqdWorkGroupSizeAttr *ReqdWGS = nullptr,
+ int32_t *MinThreadsVal = nullptr, int32_t *MaxThreadsVal = nullptr);
+
+ /// Emit the IR encoding to attach the AMD GPU waves-per-eu attribute to \p F.
+ void handleAMDGPUWavesPerEUAttr(llvm::Function *F,
+ const AMDGPUWavesPerEUAttr *A);
+
+ llvm::Constant *
+ GetOrCreateLLVMGlobal(StringRef MangledName, llvm::Type *Ty, LangAS AddrSpace,
+ const VarDecl *D,
+ ForDefinition_t IsForDefinition = NotForDefinition);
+
+ // FIXME: Hardcoding priority here is gross.
+ void AddGlobalCtor(llvm::Function *Ctor, int Priority = 65535,
+ unsigned LexOrder = ~0U,
+ llvm::Constant *AssociatedData = nullptr);
+ void AddGlobalDtor(llvm::Function *Dtor, int Priority = 65535,
+ bool IsDtorAttrFunc = false);
private:
llvm::Constant *GetOrCreateLLVMFunction(
@@ -1472,18 +1588,24 @@ private:
llvm::AttributeList ExtraAttrs = llvm::AttributeList(),
ForDefinition_t IsForDefinition = NotForDefinition);
- llvm::Constant *GetOrCreateMultiVersionResolver(GlobalDecl GD,
- llvm::Type *DeclTy,
- const FunctionDecl *FD);
- void UpdateMultiVersionNames(GlobalDecl GD, const FunctionDecl *FD);
-
- llvm::Constant *
- GetOrCreateLLVMGlobal(StringRef MangledName, llvm::Type *Ty,
- unsigned AddrSpace, const VarDecl *D,
- ForDefinition_t IsForDefinition = NotForDefinition);
+ // References to multiversion functions are resolved through an implicitly
+ // defined resolver function. This function is responsible for creating
+ // the resolver symbol for the provided declaration. The value returned
+ // will be for an ifunc (llvm::GlobalIFunc) if the current target supports
+ // that feature and for a regular function (llvm::GlobalValue) otherwise.
+ llvm::Constant *GetOrCreateMultiVersionResolver(GlobalDecl GD);
+
+ // In scenarios where a function is not known to be a multiversion function
+ // until a later declaration, it is sometimes necessary to change the
+ // previously created mangled name to align with requirements of whatever
+ // multiversion function kind the function is now known to be. This function
+ // is responsible for performing such mangled name updates.
+ void UpdateMultiVersionNames(GlobalDecl GD, const FunctionDecl *FD,
+ StringRef &CurName);
bool GetCPUAndFeaturesAttributes(GlobalDecl GD,
- llvm::AttrBuilder &AttrBuilder);
+ llvm::AttrBuilder &AttrBuilder,
+ bool SetTargetFeatures = true);
void setNonAliasAttributes(GlobalDecl GD, llvm::GlobalObject *GO);
/// Set function attributes for a function declaration.
@@ -1507,10 +1629,14 @@ private:
void EmitDeclContext(const DeclContext *DC);
void EmitLinkageSpec(const LinkageSpecDecl *D);
+ void EmitTopLevelStmt(const TopLevelStmtDecl *D);
/// Emit the function that initializes C++ thread_local variables.
void EmitCXXThreadLocalInitFunc();
+ /// Emit the function that initializes global variables for a C++ Module.
+ void EmitCXXModuleInitFunc(clang::Module *Primary);
+
/// Emit the function that initializes C++ globals.
void EmitCXXGlobalInitFunc();
@@ -1526,12 +1652,6 @@ private:
void EmitPointerToInitFunc(const VarDecl *VD, llvm::GlobalVariable *Addr,
llvm::Function *InitFunc, InitSegAttr *ISA);
- // FIXME: Hardcoding priority here is gross.
- void AddGlobalCtor(llvm::Function *Ctor, int Priority = 65535,
- llvm::Constant *AssociatedData = nullptr);
- void AddGlobalDtor(llvm::Function *Dtor, int Priority = 65535,
- bool IsDtorAttrFunc = false);
-
/// EmitCtorList - Generates a global array of functions and priorities using
/// the given list and name. This array will have appending linkage and is
/// suitable for use as a LLVM constructor or destructor array. Clears Fns.
@@ -1565,6 +1685,7 @@ private:
// registered by the atexit subroutine using unatexit.
void unregisterGlobalDtorsWithUnAtExit();
+ /// Emit deferred multiversion function resolvers and associated variants.
void emitMultiVersionFunctions();
/// Emit any vtables which we deferred and still have a use for.
@@ -1577,9 +1698,22 @@ private:
/// Emit the llvm.used and llvm.compiler.used metadata.
void emitLLVMUsed();
+ /// For C++20 Itanium ABI, emit the initializers for the module.
+ void EmitModuleInitializers(clang::Module *Primary);
+
/// Emit the link options introduced by imported modules.
void EmitModuleLinkOptions();
+ /// Helper function for EmitStaticExternCAliases() to redirect ifuncs that
+ /// have a resolver name that matches 'Elem' to instead resolve to the name of
+ /// 'CppFunc'. This redirection is necessary in cases where 'Elem' has a name
+ /// that will be emitted as an alias of the name bound to 'CppFunc'; ifuncs
+ /// may not reference aliases. Redirection is only performed if 'Elem' is only
+ /// used by ifuncs in which case, 'Elem' is destroyed. 'true' is returned if
+ /// redirection is successful, and 'false' is returned otherwise.
+ bool CheckAndReplaceExternCIFuncs(llvm::GlobalValue *Elem,
+ llvm::GlobalValue *CppFunc);
+
/// Emit aliases for internal-linkage declarations inside "C" language
/// linkage specifications, giving them the "expected" name where possible.
void EmitStaticExternCAliases();
@@ -1594,7 +1728,7 @@ private:
/// Emit the module flag metadata used to pass options controlling the
/// the backend to LLVM.
- void EmitBackendOptionsMetadata(const CodeGenOptions CodeGenOpts);
+ void EmitBackendOptionsMetadata(const CodeGenOptions &CodeGenOpts);
/// Emits OpenCL specific Metadata e.g. OpenCL version.
void EmitOpenCLMetadata();
@@ -1617,6 +1751,12 @@ private:
/// function.
void SimplifyPersonality();
+ /// Helper function for getDefaultFunctionAttributes. Builds a set of function
+ /// attributes which can be simply added to a function.
+ void getTrivialDefaultFunctionAttributes(StringRef Name, bool HasOptnone,
+ bool AttrOnCallSite,
+ llvm::AttrBuilder &FuncAttrs);
+
/// Helper function for ConstructAttributeList and
/// addDefaultFunctionDefinitionAttributes. Builds a set of function
/// attributes to add to a function with the given properties.
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.cpp b/contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.cpp
index d828ac0eb5e9..fb4e86e8bd80 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.cpp
@@ -21,12 +21,15 @@
#include "llvm/Support/Endian.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MD5.h"
+#include <optional>
static llvm::cl::opt<bool>
- EnableValueProfiling("enable-value-profiling", llvm::cl::ZeroOrMore,
+ EnableValueProfiling("enable-value-profiling",
llvm::cl::desc("Enable value profiling"),
llvm::cl::Hidden, llvm::cl::init(false));
+extern llvm::cl::opt<bool> SystemHeadersCoverage;
+
using namespace clang;
using namespace CodeGen;
@@ -131,7 +134,7 @@ public:
static_assert(LastHashType <= TooBig, "Too many types in HashType");
PGOHash(PGOHashVersion HashVersion)
- : Working(0), Count(0), HashVersion(HashVersion), MD5() {}
+ : Working(0), Count(0), HashVersion(HashVersion) {}
void combine(HashType Type);
uint64_t finalize();
PGOHashVersion getHashVersion() const { return HashVersion; }
@@ -160,13 +163,24 @@ struct MapRegionCounters : public RecursiveASTVisitor<MapRegionCounters> {
PGOHash Hash;
/// The map of statements to counters.
llvm::DenseMap<const Stmt *, unsigned> &CounterMap;
+ /// The next bitmap byte index to assign.
+ unsigned NextMCDCBitmapIdx;
+ /// The map of statements to MC/DC bitmap coverage objects.
+ llvm::DenseMap<const Stmt *, unsigned> &MCDCBitmapMap;
+ /// Maximum number of supported MC/DC conditions in a boolean expression.
+ unsigned MCDCMaxCond;
/// The profile version.
uint64_t ProfileVersion;
+ /// Diagnostics Engine used to report warnings.
+ DiagnosticsEngine &Diag;
MapRegionCounters(PGOHashVersion HashVersion, uint64_t ProfileVersion,
- llvm::DenseMap<const Stmt *, unsigned> &CounterMap)
+ llvm::DenseMap<const Stmt *, unsigned> &CounterMap,
+ llvm::DenseMap<const Stmt *, unsigned> &MCDCBitmapMap,
+ unsigned MCDCMaxCond, DiagnosticsEngine &Diag)
: NextCounter(0), Hash(HashVersion), CounterMap(CounterMap),
- ProfileVersion(ProfileVersion) {}
+ NextMCDCBitmapIdx(0), MCDCBitmapMap(MCDCBitmapMap),
+ MCDCMaxCond(MCDCMaxCond), ProfileVersion(ProfileVersion), Diag(Diag) {}
// Blocks and lambdas are handled as separate functions, so we need not
// traverse them in the parent context.
@@ -206,15 +220,129 @@ struct MapRegionCounters : public RecursiveASTVisitor<MapRegionCounters> {
return Type;
}
+ /// The following stacks are used with dataTraverseStmtPre() and
+ /// dataTraverseStmtPost() to track the depth of nested logical operators in a
+ /// boolean expression in a function. The ultimate purpose is to keep track
+ /// of the number of leaf-level conditions in the boolean expression so that a
+ /// profile bitmap can be allocated based on that number.
+ ///
+ /// The stacks are also used to find error cases and notify the user. A
+ /// standard logical operator nest for a boolean expression could be in a form
+ /// similar to this: "x = a && b && c && (d || f)"
+ unsigned NumCond = 0;
+ bool SplitNestedLogicalOp = false;
+ SmallVector<const Stmt *, 16> NonLogOpStack;
+ SmallVector<const BinaryOperator *, 16> LogOpStack;
+
+ // Hook: dataTraverseStmtPre() is invoked prior to visiting an AST Stmt node.
+ bool dataTraverseStmtPre(Stmt *S) {
+ /// If MC/DC is not enabled, MCDCMaxCond will be set to 0. Do nothing.
+ if (MCDCMaxCond == 0)
+ return true;
+
+ /// At the top of the logical operator nest, reset the number of conditions,
+ /// also forget previously seen split nesting cases.
+ if (LogOpStack.empty()) {
+ NumCond = 0;
+ SplitNestedLogicalOp = false;
+ }
+
+ if (const Expr *E = dyn_cast<Expr>(S)) {
+ const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(E->IgnoreParens());
+ if (BinOp && BinOp->isLogicalOp()) {
+ /// Check for "split-nested" logical operators. This happens when a new
+ /// boolean expression logical-op nest is encountered within an existing
+ /// boolean expression, separated by a non-logical operator. For
+ /// example, in "x = (a && b && c && foo(d && f))", the "d && f" case
+ /// starts a new boolean expression that is separated from the other
+ /// conditions by the operator foo(). Split-nested cases are not
+ /// supported by MC/DC.
+ SplitNestedLogicalOp = SplitNestedLogicalOp || !NonLogOpStack.empty();
+
+ LogOpStack.push_back(BinOp);
+ return true;
+ }
+ }
+
+ /// Keep track of non-logical operators. These are OK as long as we don't
+ /// encounter a new logical operator after seeing one.
+ if (!LogOpStack.empty())
+ NonLogOpStack.push_back(S);
+
+ return true;
+ }
+
+ // Hook: dataTraverseStmtPost() is invoked by the AST visitor after visiting
+ // an AST Stmt node. MC/DC will use it to to signal when the top of a
+ // logical operation (boolean expression) nest is encountered.
+ bool dataTraverseStmtPost(Stmt *S) {
+ /// If MC/DC is not enabled, MCDCMaxCond will be set to 0. Do nothing.
+ if (MCDCMaxCond == 0)
+ return true;
+
+ if (const Expr *E = dyn_cast<Expr>(S)) {
+ const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(E->IgnoreParens());
+ if (BinOp && BinOp->isLogicalOp()) {
+ assert(LogOpStack.back() == BinOp);
+ LogOpStack.pop_back();
+
+ /// At the top of logical operator nest:
+ if (LogOpStack.empty()) {
+ /// Was the "split-nested" logical operator case encountered?
+ if (SplitNestedLogicalOp) {
+ unsigned DiagID = Diag.getCustomDiagID(
+ DiagnosticsEngine::Warning,
+ "unsupported MC/DC boolean expression; "
+ "contains an operation with a nested boolean expression. "
+ "Expression will not be covered");
+ Diag.Report(S->getBeginLoc(), DiagID);
+ return true;
+ }
+
+ /// Was the maximum number of conditions encountered?
+ if (NumCond > MCDCMaxCond) {
+ unsigned DiagID = Diag.getCustomDiagID(
+ DiagnosticsEngine::Warning,
+ "unsupported MC/DC boolean expression; "
+ "number of conditions (%0) exceeds max (%1). "
+ "Expression will not be covered");
+ Diag.Report(S->getBeginLoc(), DiagID) << NumCond << MCDCMaxCond;
+ return true;
+ }
+
+ // Otherwise, allocate the number of bytes required for the bitmap
+ // based on the number of conditions. Must be at least 1-byte long.
+ MCDCBitmapMap[BinOp] = NextMCDCBitmapIdx;
+ unsigned SizeInBits = std::max<unsigned>(1L << NumCond, CHAR_BIT);
+ NextMCDCBitmapIdx += SizeInBits / CHAR_BIT;
+ }
+ return true;
+ }
+ }
+
+ if (!LogOpStack.empty())
+ NonLogOpStack.pop_back();
+
+ return true;
+ }
+
/// The RHS of all logical operators gets a fresh counter in order to count
/// how many times the RHS evaluates to true or false, depending on the
/// semantics of the operator. This is only valid for ">= v7" of the profile
- /// version so that we facilitate backward compatibility.
+ /// version so that we facilitate backward compatibility. In addition, in
+ /// order to use MC/DC, count the number of total LHS and RHS conditions.
bool VisitBinaryOperator(BinaryOperator *S) {
- if (ProfileVersion >= llvm::IndexedInstrProf::Version7)
- if (S->isLogicalOp() &&
- CodeGenFunction::isInstrumentedCondition(S->getRHS()))
- CounterMap[S->getRHS()] = NextCounter++;
+ if (S->isLogicalOp()) {
+ if (CodeGenFunction::isInstrumentedCondition(S->getLHS()))
+ NumCond++;
+
+ if (CodeGenFunction::isInstrumentedCondition(S->getRHS())) {
+ if (ProfileVersion >= llvm::IndexedInstrProf::Version7)
+ CounterMap[S->getRHS()] = NextCounter++;
+
+ NumCond++;
+ }
+ }
return Base::VisitBinaryOperator(S);
}
@@ -375,9 +503,9 @@ struct ComputeRegionCounts : public ConstStmtVisitor<ComputeRegionCounts> {
/// BreakContinueStack - Keep counts of breaks and continues inside loops.
struct BreakContinue {
- uint64_t BreakCount;
- uint64_t ContinueCount;
- BreakContinue() : BreakCount(0), ContinueCount(0) {}
+ uint64_t BreakCount = 0;
+ uint64_t ContinueCount = 0;
+ BreakContinue() = default;
};
SmallVector<BreakContinue, 8> BreakContinueStack;
@@ -649,6 +777,14 @@ struct ComputeRegionCounts : public ConstStmtVisitor<ComputeRegionCounts> {
void VisitIfStmt(const IfStmt *S) {
RecordStmtCount(S);
+
+ if (S->isConsteval()) {
+ const Stmt *Stm = S->isNegatedConsteval() ? S->getThen() : S->getElse();
+ if (Stm)
+ Visit(Stm);
+ return;
+ }
+
uint64_t ParentCount = CurrentCount;
if (S->getInit())
Visit(S->getInit());
@@ -746,8 +882,9 @@ void PGOHash::combine(HashType Type) {
// Pass through MD5 if enough work has built up.
if (Count && Count % NumTypesPerWord == 0) {
using namespace llvm::support;
- uint64_t Swapped = endian::byte_swap<uint64_t, little>(Working);
- MD5.update(llvm::makeArrayRef((uint8_t *)&Swapped, sizeof(Swapped)));
+ uint64_t Swapped =
+ endian::byte_swap<uint64_t, llvm::endianness::little>(Working);
+ MD5.update(llvm::ArrayRef((uint8_t *)&Swapped, sizeof(Swapped)));
Working = 0;
}
@@ -772,8 +909,9 @@ uint64_t PGOHash::finalize() {
MD5.update({(uint8_t)Working});
} else {
using namespace llvm::support;
- uint64_t Swapped = endian::byte_swap<uint64_t, little>(Working);
- MD5.update(llvm::makeArrayRef((uint8_t *)&Swapped, sizeof(Swapped)));
+ uint64_t Swapped =
+ endian::byte_swap<uint64_t, llvm::endianness::little>(Working);
+ MD5.update(llvm::ArrayRef((uint8_t *)&Swapped, sizeof(Swapped)));
}
}
@@ -814,6 +952,8 @@ void CodeGenPGO::assignRegionCounters(GlobalDecl GD, llvm::Function *Fn) {
CGM.ClearUnusedCoverageMapping(D);
if (Fn->hasFnAttribute(llvm::Attribute::NoProfile))
return;
+ if (Fn->hasFnAttribute(llvm::Attribute::SkipProfile))
+ return;
setFuncName(Fn);
@@ -838,8 +978,22 @@ void CodeGenPGO::mapRegionCounters(const Decl *D) {
ProfileVersion = PGOReader->getVersion();
}
+ // If MC/DC is enabled, set the MaxConditions to a preset value. Otherwise,
+ // set it to zero. This value impacts the number of conditions accepted in a
+ // given boolean expression, which impacts the size of the bitmap used to
+ // track test vector execution for that boolean expression. Because the
+ // bitmap scales exponentially (2^n) based on the number of conditions seen,
+ // the maximum value is hard-coded at 6 conditions, which is more than enough
+ // for most embedded applications. Setting a maximum value prevents the
+ // bitmap footprint from growing too large without the user's knowledge. In
+ // the future, this value could be adjusted with a command-line option.
+ unsigned MCDCMaxConditions = (CGM.getCodeGenOpts().MCDCCoverage) ? 6 : 0;
+
RegionCounterMap.reset(new llvm::DenseMap<const Stmt *, unsigned>);
- MapRegionCounters Walker(HashVersion, ProfileVersion, *RegionCounterMap);
+ RegionMCDCBitmapMap.reset(new llvm::DenseMap<const Stmt *, unsigned>);
+ MapRegionCounters Walker(HashVersion, ProfileVersion, *RegionCounterMap,
+ *RegionMCDCBitmapMap, MCDCMaxConditions,
+ CGM.getDiags());
if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
Walker.TraverseDecl(const_cast<FunctionDecl *>(FD));
else if (const ObjCMethodDecl *MD = dyn_cast_or_null<ObjCMethodDecl>(D))
@@ -850,6 +1004,7 @@ void CodeGenPGO::mapRegionCounters(const Decl *D) {
Walker.TraverseDecl(const_cast<CapturedDecl *>(CD));
assert(Walker.NextCounter > 0 && "no entry counter mapped for decl");
NumRegionCounters = Walker.NextCounter;
+ MCDCBitmapBytes = Walker.NextMCDCBitmapIdx;
FunctionHash = Walker.Hash.finalize();
}
@@ -872,7 +1027,7 @@ bool CodeGenPGO::skipRegionMappingForDecl(const Decl *D) {
// Don't map the functions in system headers.
const auto &SM = CGM.getContext().getSourceManager();
auto Loc = D->getBody()->getBeginLoc();
- return SM.isInSystemHeader(Loc);
+ return !SystemHeadersCoverage && SM.isInSystemHeader(Loc);
}
void CodeGenPGO::emitCounterRegionMapping(const Decl *D) {
@@ -881,9 +1036,11 @@ void CodeGenPGO::emitCounterRegionMapping(const Decl *D) {
std::string CoverageMapping;
llvm::raw_string_ostream OS(CoverageMapping);
- CoverageMappingGen MappingGen(*CGM.getCoverageMapping(),
- CGM.getContext().getSourceManager(),
- CGM.getLangOpts(), RegionCounterMap.get());
+ RegionCondIDMap.reset(new llvm::DenseMap<const Stmt *, unsigned>);
+ CoverageMappingGen MappingGen(
+ *CGM.getCoverageMapping(), CGM.getContext().getSourceManager(),
+ CGM.getLangOpts(), RegionCounterMap.get(), RegionMCDCBitmapMap.get(),
+ RegionCondIDMap.get());
MappingGen.emitCounterMapping(D, OS);
OS.flush();
@@ -941,25 +1098,124 @@ CodeGenPGO::applyFunctionAttributes(llvm::IndexedInstrProfReader *PGOReader,
void CodeGenPGO::emitCounterIncrement(CGBuilderTy &Builder, const Stmt *S,
llvm::Value *StepV) {
- if (!CGM.getCodeGenOpts().hasProfileClangInstr() || !RegionCounterMap)
- return;
- if (!Builder.GetInsertBlock())
+ if (!RegionCounterMap || !Builder.GetInsertBlock())
return;
unsigned Counter = (*RegionCounterMap)[S];
- auto *I8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
- llvm::Value *Args[] = {llvm::ConstantExpr::getBitCast(FuncNameVar, I8PtrTy),
+ llvm::Value *Args[] = {FuncNameVar,
Builder.getInt64(FunctionHash),
Builder.getInt32(NumRegionCounters),
Builder.getInt32(Counter), StepV};
if (!StepV)
Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::instrprof_increment),
- makeArrayRef(Args, 4));
+ ArrayRef(Args, 4));
else
Builder.CreateCall(
CGM.getIntrinsic(llvm::Intrinsic::instrprof_increment_step),
- makeArrayRef(Args));
+ ArrayRef(Args));
+}
+
+bool CodeGenPGO::canEmitMCDCCoverage(const CGBuilderTy &Builder) {
+ return (CGM.getCodeGenOpts().hasProfileClangInstr() &&
+ CGM.getCodeGenOpts().MCDCCoverage && Builder.GetInsertBlock());
+}
+
+void CodeGenPGO::emitMCDCParameters(CGBuilderTy &Builder) {
+ if (!canEmitMCDCCoverage(Builder) || !RegionMCDCBitmapMap)
+ return;
+
+ auto *I8PtrTy = llvm::PointerType::getUnqual(CGM.getLLVMContext());
+
+ // Emit intrinsic representing MCDC bitmap parameters at function entry.
+ // This is used by the instrumentation pass, but it isn't actually lowered to
+ // anything.
+ llvm::Value *Args[3] = {llvm::ConstantExpr::getBitCast(FuncNameVar, I8PtrTy),
+ Builder.getInt64(FunctionHash),
+ Builder.getInt32(MCDCBitmapBytes)};
+ Builder.CreateCall(
+ CGM.getIntrinsic(llvm::Intrinsic::instrprof_mcdc_parameters), Args);
+}
+
+void CodeGenPGO::emitMCDCTestVectorBitmapUpdate(CGBuilderTy &Builder,
+ const Expr *S,
+ Address MCDCCondBitmapAddr) {
+ if (!canEmitMCDCCoverage(Builder) || !RegionMCDCBitmapMap)
+ return;
+
+ S = S->IgnoreParens();
+
+ auto ExprMCDCBitmapMapIterator = RegionMCDCBitmapMap->find(S);
+ if (ExprMCDCBitmapMapIterator == RegionMCDCBitmapMap->end())
+ return;
+
+ // Extract the ID of the global bitmap associated with this expression.
+ unsigned MCDCTestVectorBitmapID = ExprMCDCBitmapMapIterator->second;
+ auto *I8PtrTy = llvm::PointerType::getUnqual(CGM.getLLVMContext());
+
+ // Emit intrinsic responsible for updating the global bitmap corresponding to
+ // a boolean expression. The index being set is based on the value loaded
+ // from a pointer to a dedicated temporary value on the stack that is itself
+ // updated via emitMCDCCondBitmapReset() and emitMCDCCondBitmapUpdate(). The
+ // index represents an executed test vector.
+ llvm::Value *Args[5] = {llvm::ConstantExpr::getBitCast(FuncNameVar, I8PtrTy),
+ Builder.getInt64(FunctionHash),
+ Builder.getInt32(MCDCBitmapBytes),
+ Builder.getInt32(MCDCTestVectorBitmapID),
+ MCDCCondBitmapAddr.getPointer()};
+ Builder.CreateCall(
+ CGM.getIntrinsic(llvm::Intrinsic::instrprof_mcdc_tvbitmap_update), Args);
+}
+
+void CodeGenPGO::emitMCDCCondBitmapReset(CGBuilderTy &Builder, const Expr *S,
+ Address MCDCCondBitmapAddr) {
+ if (!canEmitMCDCCoverage(Builder) || !RegionMCDCBitmapMap)
+ return;
+
+ S = S->IgnoreParens();
+
+ if (RegionMCDCBitmapMap->find(S) == RegionMCDCBitmapMap->end())
+ return;
+
+ // Emit intrinsic that resets a dedicated temporary value on the stack to 0.
+ Builder.CreateStore(Builder.getInt32(0), MCDCCondBitmapAddr);
+}
+
+void CodeGenPGO::emitMCDCCondBitmapUpdate(CGBuilderTy &Builder, const Expr *S,
+ Address MCDCCondBitmapAddr,
+ llvm::Value *Val) {
+ if (!canEmitMCDCCoverage(Builder) || !RegionCondIDMap)
+ return;
+
+ // Even though, for simplicity, parentheses and unary logical-NOT operators
+ // are considered part of their underlying condition for both MC/DC and
+ // branch coverage, the condition IDs themselves are assigned and tracked
+ // using the underlying condition itself. This is done solely for
+ // consistency since parentheses and logical-NOTs are ignored when checking
+ // whether the condition is actually an instrumentable condition. This can
+ // also make debugging a bit easier.
+ S = CodeGenFunction::stripCond(S);
+
+ auto ExprMCDCConditionIDMapIterator = RegionCondIDMap->find(S);
+ if (ExprMCDCConditionIDMapIterator == RegionCondIDMap->end())
+ return;
+
+ // Extract the ID of the condition we are setting in the bitmap.
+ unsigned CondID = ExprMCDCConditionIDMapIterator->second;
+ assert(CondID > 0 && "Condition has no ID!");
+
+ auto *I8PtrTy = llvm::PointerType::getUnqual(CGM.getLLVMContext());
+
+ // Emit intrinsic that updates a dedicated temporary value on the stack after
+ // a condition is evaluated. After the set of conditions has been updated,
+ // the resulting value is used to update the boolean expression's bitmap.
+ llvm::Value *Args[5] = {llvm::ConstantExpr::getBitCast(FuncNameVar, I8PtrTy),
+ Builder.getInt64(FunctionHash),
+ Builder.getInt32(CondID - 1),
+ MCDCCondBitmapAddr.getPointer(), Val};
+ Builder.CreateCall(
+ CGM.getIntrinsic(llvm::Intrinsic::instrprof_mcdc_condbitmap_update),
+ Args);
}
void CodeGenPGO::setValueProfilingFlag(llvm::Module &M) {
@@ -987,7 +1243,7 @@ void CodeGenPGO::valueProfile(CGBuilderTy &Builder, uint32_t ValueKind,
auto BuilderInsertPoint = Builder.saveIP();
Builder.SetInsertPoint(ValueSite);
llvm::Value *Args[5] = {
- llvm::ConstantExpr::getBitCast(FuncNameVar, Builder.getInt8PtrTy()),
+ FuncNameVar,
Builder.getInt64(FunctionHash),
Builder.CreatePtrToInt(ValuePtr, Builder.getInt64Ty()),
Builder.getInt32(ValueKind),
@@ -1025,7 +1281,7 @@ void CodeGenPGO::loadRegionCounts(llvm::IndexedInstrProfReader *PGOReader,
llvm::Expected<llvm::InstrProfRecord> RecordExpected =
PGOReader->getInstrProfRecord(FuncName, FunctionHash);
if (auto E = RecordExpected.takeError()) {
- auto IPE = llvm::InstrProfError::take(std::move(E));
+ auto IPE = std::get<0>(llvm::InstrProfError::take(std::move(E)));
if (IPE == llvm::instrprof_error::unknown_function)
CGM.getPGOStats().addMissing(IsInMainFile);
else if (IPE == llvm::instrprof_error::hash_mismatch)
@@ -1106,7 +1362,7 @@ CodeGenFunction::createProfileWeightsForLoop(const Stmt *Cond,
uint64_t LoopCount) const {
if (!PGO.haveRegionCounts())
return nullptr;
- Optional<uint64_t> CondCount = PGO.getStmtCount(Cond);
+ std::optional<uint64_t> CondCount = PGO.getStmtCount(Cond);
if (!CondCount || *CondCount == 0)
return nullptr;
return createProfileWeights(LoopCount,
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.h b/contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.h
index f740692ac205..6596b6c35277 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.h
@@ -19,6 +19,7 @@
#include "llvm/ProfileData/InstrProfReader.h"
#include <array>
#include <memory>
+#include <optional>
namespace clang {
namespace CodeGen {
@@ -32,8 +33,11 @@ private:
std::array <unsigned, llvm::IPVK_Last + 1> NumValueSites;
unsigned NumRegionCounters;
+ unsigned MCDCBitmapBytes;
uint64_t FunctionHash;
std::unique_ptr<llvm::DenseMap<const Stmt *, unsigned>> RegionCounterMap;
+ std::unique_ptr<llvm::DenseMap<const Stmt *, unsigned>> RegionMCDCBitmapMap;
+ std::unique_ptr<llvm::DenseMap<const Stmt *, unsigned>> RegionCondIDMap;
std::unique_ptr<llvm::DenseMap<const Stmt *, uint64_t>> StmtCountMap;
std::unique_ptr<llvm::InstrProfRecord> ProfRecord;
std::vector<uint64_t> RegionCounts;
@@ -42,7 +46,8 @@ private:
public:
CodeGenPGO(CodeGenModule &CGModule)
: CGM(CGModule), FuncNameVar(nullptr), NumValueSites({{0}}),
- NumRegionCounters(0), FunctionHash(0), CurrentRegionCount(0) {}
+ NumRegionCounters(0), MCDCBitmapBytes(0), FunctionHash(0),
+ CurrentRegionCount(0) {}
/// Whether or not we have PGO region data for the current function. This is
/// false both when we have no data at all and when our data has been
@@ -59,12 +64,12 @@ public:
/// Check if an execution count is known for a given statement. If so, return
/// true and put the value in Count; else return false.
- Optional<uint64_t> getStmtCount(const Stmt *S) const {
+ std::optional<uint64_t> getStmtCount(const Stmt *S) const {
if (!StmtCountMap)
- return None;
+ return std::nullopt;
auto I = StmtCountMap->find(S);
if (I == StmtCountMap->end())
- return None;
+ return std::nullopt;
return I->second;
}
@@ -102,10 +107,18 @@ private:
bool IsInMainFile);
bool skipRegionMappingForDecl(const Decl *D);
void emitCounterRegionMapping(const Decl *D);
+ bool canEmitMCDCCoverage(const CGBuilderTy &Builder);
public:
void emitCounterIncrement(CGBuilderTy &Builder, const Stmt *S,
llvm::Value *StepV);
+ void emitMCDCTestVectorBitmapUpdate(CGBuilderTy &Builder, const Expr *S,
+ Address MCDCCondBitmapAddr);
+ void emitMCDCParameters(CGBuilderTy &Builder);
+ void emitMCDCCondBitmapReset(CGBuilderTy &Builder, const Expr *S,
+ Address MCDCCondBitmapAddr);
+ void emitMCDCCondBitmapUpdate(CGBuilderTy &Builder, const Expr *S,
+ Address MCDCCondBitmapAddr, llvm::Value *Val);
/// Return the region count for the counter at the given index.
uint64_t getRegionCount(const Stmt *S) {
@@ -113,7 +126,12 @@ public:
return 0;
if (!haveRegionCounts())
return 0;
- return RegionCounts[(*RegionCounterMap)[S]];
+ // With profiles from a differing version of clang we can have mismatched
+ // decl counts. Don't crash in such a case.
+ auto Index = (*RegionCounterMap)[S];
+ if (Index >= RegionCounts.size())
+ return 0;
+ return RegionCounts[Index];
}
};
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenTBAA.cpp b/contrib/llvm-project/clang/lib/CodeGen/CodeGenTBAA.cpp
index f4ebe6885675..dc288bc3f615 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenTBAA.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenTBAA.cpp
@@ -196,25 +196,28 @@ llvm::MDNode *CodeGenTBAA::getTypeInfoHelper(const Type *Ty) {
// Enum types are distinct types. In C++ they have "underlying types",
// however they aren't related for TBAA.
if (const EnumType *ETy = dyn_cast<EnumType>(Ty)) {
+ if (!Features.CPlusPlus)
+ return getTypeInfo(ETy->getDecl()->getIntegerType());
+
// In C++ mode, types have linkage, so we can rely on the ODR and
// on their mangled names, if they're external.
// TODO: Is there a way to get a program-wide unique name for a
// decl with local linkage or no linkage?
- if (!Features.CPlusPlus || !ETy->getDecl()->isExternallyVisible())
+ if (!ETy->getDecl()->isExternallyVisible())
return getChar();
SmallString<256> OutName;
llvm::raw_svector_ostream Out(OutName);
- MContext.mangleTypeName(QualType(ETy, 0), Out);
+ MContext.mangleCanonicalTypeName(QualType(ETy, 0), Out);
return createScalarTypeNode(OutName, getChar(), Size);
}
- if (const auto *EIT = dyn_cast<ExtIntType>(Ty)) {
+ if (const auto *EIT = dyn_cast<BitIntType>(Ty)) {
SmallString<256> OutName;
llvm::raw_svector_ostream Out(OutName);
// Don't specify signed/unsigned since integer types can alias despite sign
// differences.
- Out << "_ExtInt(" << EIT->getNumBits() << ')';
+ Out << "_BitInt(" << EIT->getNumBits() << ')';
return createScalarTypeNode(OutName, getChar(), Size);
}
@@ -335,7 +338,42 @@ llvm::MDNode *CodeGenTBAA::getBaseTypeInfoHelper(const Type *Ty) {
if (auto *TTy = dyn_cast<RecordType>(Ty)) {
const RecordDecl *RD = TTy->getDecl()->getDefinition();
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
- SmallVector<llvm::MDBuilder::TBAAStructField, 4> Fields;
+ using TBAAStructField = llvm::MDBuilder::TBAAStructField;
+ SmallVector<TBAAStructField, 4> Fields;
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ // Handle C++ base classes. Non-virtual bases can treated a kind of
+ // field. Virtual bases are more complex and omitted, but avoid an
+ // incomplete view for NewStructPathTBAA.
+ if (CodeGenOpts.NewStructPathTBAA && CXXRD->getNumVBases() != 0)
+ return nullptr;
+ for (const CXXBaseSpecifier &B : CXXRD->bases()) {
+ if (B.isVirtual())
+ continue;
+ QualType BaseQTy = B.getType();
+ const CXXRecordDecl *BaseRD = BaseQTy->getAsCXXRecordDecl();
+ if (BaseRD->isEmpty())
+ continue;
+ llvm::MDNode *TypeNode = isValidBaseType(BaseQTy)
+ ? getBaseTypeInfo(BaseQTy)
+ : getTypeInfo(BaseQTy);
+ if (!TypeNode)
+ return nullptr;
+ uint64_t Offset = Layout.getBaseClassOffset(BaseRD).getQuantity();
+ uint64_t Size =
+ Context.getASTRecordLayout(BaseRD).getDataSize().getQuantity();
+ Fields.push_back(
+ llvm::MDBuilder::TBAAStructField(Offset, Size, TypeNode));
+ }
+ // The order in which base class subobjects are allocated is unspecified,
+ // so may differ from declaration order. In particular, Itanium ABI will
+ // allocate a primary base first.
+ // Since we exclude empty subobjects, the objects are not overlapping and
+ // their offsets are unique.
+ llvm::sort(Fields,
+ [](const TBAAStructField &A, const TBAAStructField &B) {
+ return A.Offset < B.Offset;
+ });
+ }
for (FieldDecl *Field : RD->fields()) {
if (Field->isZeroSize(Context) || Field->isUnnamedBitfield())
continue;
@@ -343,7 +381,7 @@ llvm::MDNode *CodeGenTBAA::getBaseTypeInfoHelper(const Type *Ty) {
llvm::MDNode *TypeNode = isValidBaseType(FieldQTy) ?
getBaseTypeInfo(FieldQTy) : getTypeInfo(FieldQTy);
if (!TypeNode)
- return BaseTypeMetadataCache[Ty] = nullptr;
+ return nullptr;
uint64_t BitOffset = Layout.getFieldOffset(Field->getFieldIndex());
uint64_t Offset = Context.toCharUnitsFromBits(BitOffset).getQuantity();
@@ -356,7 +394,7 @@ llvm::MDNode *CodeGenTBAA::getBaseTypeInfoHelper(const Type *Ty) {
if (Features.CPlusPlus) {
// Don't use the mangler for C code.
llvm::raw_svector_ostream Out(OutName);
- MContext.mangleTypeName(QualType(Ty, 0), Out);
+ MContext.mangleCanonicalTypeName(QualType(Ty, 0), Out);
} else {
OutName = RD->getName();
}
@@ -383,14 +421,20 @@ llvm::MDNode *CodeGenTBAA::getBaseTypeInfo(QualType QTy) {
return nullptr;
const Type *Ty = Context.getCanonicalType(QTy).getTypePtr();
- if (llvm::MDNode *N = BaseTypeMetadataCache[Ty])
- return N;
- // Note that the following helper call is allowed to add new nodes to the
- // cache, which invalidates all its previously obtained iterators. So we
- // first generate the node for the type and then add that node to the cache.
+ // nullptr is a valid value in the cache, so use find rather than []
+ auto I = BaseTypeMetadataCache.find(Ty);
+ if (I != BaseTypeMetadataCache.end())
+ return I->second;
+
+ // First calculate the metadata, before recomputing the insertion point, as
+ // the helper can recursively call us.
llvm::MDNode *TypeNode = getBaseTypeInfoHelper(Ty);
- return BaseTypeMetadataCache[Ty] = TypeNode;
+ LLVM_ATTRIBUTE_UNUSED auto inserted =
+ BaseTypeMetadataCache.insert({Ty, TypeNode});
+ assert(inserted.second && "BaseType metadata was already inserted");
+
+ return TypeNode;
}
llvm::MDNode *CodeGenTBAA::getAccessTagInfo(TBAAAccessInfo Info) {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenTBAA.h b/contrib/llvm-project/clang/lib/CodeGen/CodeGenTBAA.h
index e8e006f41616..a65963596fe9 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenTBAA.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenTBAA.h
@@ -29,7 +29,6 @@ namespace clang {
class Type;
namespace CodeGen {
-class CGRecordLayout;
// TBAAAccessKind - A kind of TBAA memory access descriptor.
enum class TBAAAccessKind : unsigned {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypeCache.h b/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypeCache.h
index f258234fb4d8..083d69214fb3 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypeCache.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypeCache.h
@@ -51,14 +51,11 @@ struct CodeGenTypeCache {
llvm::IntegerType *PtrDiffTy;
};
- /// void* in address space 0
+ /// void*, void** in address space 0
union {
+ llvm::PointerType *UnqualPtrTy;
llvm::PointerType *VoidPtrTy;
llvm::PointerType *Int8PtrTy;
- };
-
- /// void** in address space 0
- union {
llvm::PointerType *VoidPtrPtrTy;
llvm::PointerType *Int8PtrPtrTy;
};
@@ -69,6 +66,15 @@ struct CodeGenTypeCache {
llvm::PointerType *AllocaInt8PtrTy;
};
+ /// void* in default globals address space
+ union {
+ llvm::PointerType *GlobalsVoidPtrTy;
+ llvm::PointerType *GlobalsInt8PtrTy;
+ };
+
+ /// void* in the address space for constant globals
+ llvm::PointerType *ConstGlobalsPtrTy;
+
/// The size and alignment of the builtin C type 'int'. This comes
/// up enough in various ABI lowering tasks to be worth pre-computing.
union {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.cpp b/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.cpp
index 9cb42941cb96..a6b51bfef876 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.cpp
@@ -25,6 +25,7 @@
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Module.h"
+
using namespace clang;
using namespace CodeGen;
@@ -33,6 +34,7 @@ CodeGenTypes::CodeGenTypes(CodeGenModule &cgm)
Target(cgm.getTarget()), TheCXXABI(cgm.getCXXABI()),
TheABIInfo(cgm.getTargetCodeGenInfo().getABIInfo()) {
SkippedLayout = false;
+ LongDoubleReferenced = false;
}
CodeGenTypes::~CodeGenTypes() {
@@ -66,7 +68,7 @@ void CodeGenTypes::addRecordTypeName(const RecordDecl *RD,
if (RD->getDeclContext())
RD->printQualifiedName(OS, Policy);
else
- RD->printName(OS);
+ RD->printName(OS, Policy);
} else if (const TypedefNameDecl *TDD = RD->getTypedefNameForAnonDecl()) {
// FIXME: We should not have to check for a null decl context here.
// Right now we do it because the implicit Obj-C decls don't have one.
@@ -97,10 +99,18 @@ llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T, bool ForBitField) {
llvm::Type *R = ConvertType(T);
- // If this is a bool type, or an ExtIntType in a bitfield representation,
- // map this integer to the target-specified size.
- if ((ForBitField && T->isExtIntType()) ||
- (!T->isExtIntType() && R->isIntegerTy(1)))
+ // Check for the boolean vector case.
+ if (T->isExtVectorBoolType()) {
+ auto *FixedVT = cast<llvm::FixedVectorType>(R);
+ // Pad to at least one byte.
+ uint64_t BytePadded = std::max<uint64_t>(FixedVT->getNumElements(), 8);
+ return llvm::IntegerType::get(FixedVT->getContext(), BytePadded);
+ }
+
+ // If this is a bool type, or a bit-precise integer type in a bitfield
+ // representation, map this integer to the target-specified size.
+ if ((ForBitField && T->isBitIntType()) ||
+ (!T->isBitIntType() && R->isIntegerTy(1)))
return llvm::IntegerType::get(getLLVMContext(),
(unsigned)Context.getTypeSize(T));
@@ -116,93 +126,9 @@ bool CodeGenTypes::isRecordLayoutComplete(const Type *Ty) const {
return I != RecordDeclTypes.end() && !I->second->isOpaque();
}
-static bool
-isSafeToConvert(QualType T, CodeGenTypes &CGT,
- llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked);
-
-
-/// isSafeToConvert - Return true if it is safe to convert the specified record
-/// decl to IR and lay it out, false if doing so would cause us to get into a
-/// recursive compilation mess.
-static bool
-isSafeToConvert(const RecordDecl *RD, CodeGenTypes &CGT,
- llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked) {
- // If we have already checked this type (maybe the same type is used by-value
- // multiple times in multiple structure fields, don't check again.
- if (!AlreadyChecked.insert(RD).second)
- return true;
-
- const Type *Key = CGT.getContext().getTagDeclType(RD).getTypePtr();
-
- // If this type is already laid out, converting it is a noop.
- if (CGT.isRecordLayoutComplete(Key)) return true;
-
- // If this type is currently being laid out, we can't recursively compile it.
- if (CGT.isRecordBeingLaidOut(Key))
- return false;
-
- // If this type would require laying out bases that are currently being laid
- // out, don't do it. This includes virtual base classes which get laid out
- // when a class is translated, even though they aren't embedded by-value into
- // the class.
- if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) {
- for (const auto &I : CRD->bases())
- if (!isSafeToConvert(I.getType()->castAs<RecordType>()->getDecl(), CGT,
- AlreadyChecked))
- return false;
- }
-
- // If this type would require laying out members that are currently being laid
- // out, don't do it.
- for (const auto *I : RD->fields())
- if (!isSafeToConvert(I->getType(), CGT, AlreadyChecked))
- return false;
-
- // If there are no problems, lets do it.
- return true;
-}
-
-/// isSafeToConvert - Return true if it is safe to convert this field type,
-/// which requires the structure elements contained by-value to all be
-/// recursively safe to convert.
-static bool
-isSafeToConvert(QualType T, CodeGenTypes &CGT,
- llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked) {
- // Strip off atomic type sugar.
- if (const auto *AT = T->getAs<AtomicType>())
- T = AT->getValueType();
-
- // If this is a record, check it.
- if (const auto *RT = T->getAs<RecordType>())
- return isSafeToConvert(RT->getDecl(), CGT, AlreadyChecked);
-
- // If this is an array, check the elements, which are embedded inline.
- if (const auto *AT = CGT.getContext().getAsArrayType(T))
- return isSafeToConvert(AT->getElementType(), CGT, AlreadyChecked);
-
- // Otherwise, there is no concern about transforming this. We only care about
- // things that are contained by-value in a structure that can have another
- // structure as a member.
- return true;
-}
-
-
-/// isSafeToConvert - Return true if it is safe to convert the specified record
-/// decl to IR and lay it out, false if doing so would cause us to get into a
-/// recursive compilation mess.
-static bool isSafeToConvert(const RecordDecl *RD, CodeGenTypes &CGT) {
- // If no structs are being laid out, we can certainly do this one.
- if (CGT.noRecordsBeingLaidOut()) return true;
-
- llvm::SmallPtrSet<const RecordDecl*, 16> AlreadyChecked;
- return isSafeToConvert(RD, CGT, AlreadyChecked);
-}
-
/// isFuncParamTypeConvertible - Return true if the specified type in a
/// function parameter or result position can be converted to an IR type at this
-/// point. This boils down to being whether it is complete, as well as whether
-/// we've temporarily deferred expanding the type because we're in a recursive
-/// context.
+/// point. This boils down to being whether it is complete.
bool CodeGenTypes::isFuncParamTypeConvertible(QualType Ty) {
// Some ABIs cannot have their member pointers represented in IR unless
// certain circumstances have been reached.
@@ -214,21 +140,7 @@ bool CodeGenTypes::isFuncParamTypeConvertible(QualType Ty) {
if (!TT) return true;
// Incomplete types cannot be converted.
- if (TT->isIncompleteType())
- return false;
-
- // If this is an enum, then it is always safe to convert.
- const RecordType *RT = dyn_cast<RecordType>(TT);
- if (!RT) return true;
-
- // Otherwise, we have to be careful. If it is a struct that we're in the
- // process of expanding, then we can't convert the function type. That's ok
- // though because we must be in a pointer context under the struct, so we can
- // just convert it to a dummy type.
- //
- // We decide this by checking whether ConvertRecordDeclType returns us an
- // opaque type for a struct that we know is defined.
- return isSafeToConvert(RT->getDecl(), *this);
+ return !TT->isIncompleteType();
}
@@ -324,7 +236,6 @@ static llvm::Type *getTypeForFormat(llvm::LLVMContext &VMContext,
llvm::Type *CodeGenTypes::ConvertFunctionTypeInternal(QualType QFT) {
assert(QFT.isCanonical());
- const Type *Ty = QFT.getTypePtr();
const FunctionType *FT = cast<FunctionType>(QFT.getTypePtr());
// First, check whether we can build the full function type. If the
// function type depends on an incomplete type (e.g. a struct or enum), we
@@ -347,14 +258,6 @@ llvm::Type *CodeGenTypes::ConvertFunctionTypeInternal(QualType QFT) {
return llvm::StructType::get(getLLVMContext());
}
- // While we're converting the parameter types for a function, we don't want
- // to recursively convert any pointed-to structs. Converting directly-used
- // structs is ok though.
- if (!RecordsBeingLaidOut.insert(Ty).second) {
- SkippedLayout = true;
- return llvm::StructType::get(getLLVMContext());
- }
-
// The function type can be built; call the appropriate routines to
// build it.
const CGFunctionInfo *FI;
@@ -380,14 +283,6 @@ llvm::Type *CodeGenTypes::ConvertFunctionTypeInternal(QualType QFT) {
ResultType = GetFunctionType(*FI);
}
- RecordsBeingLaidOut.erase(Ty);
-
- if (SkippedLayout)
- TypeCache.clear();
-
- if (RecordsBeingLaidOut.empty())
- while (!DeferredRecords.empty())
- ConvertRecordDeclType(DeferredRecords.pop_back_val());
return ResultType;
}
@@ -415,11 +310,16 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
if (const RecordType *RT = dyn_cast<RecordType>(Ty))
return ConvertRecordDeclType(RT->getDecl());
- // See if type is already cached.
- llvm::DenseMap<const Type *, llvm::Type *>::iterator TCI = TypeCache.find(Ty);
- // If type is found in map then use it. Otherwise, convert type T.
+ llvm::Type *CachedType = nullptr;
+ auto TCI = TypeCache.find(Ty);
if (TCI != TypeCache.end())
- return TCI->second;
+ CachedType = TCI->second;
+ // With expensive checks, check that the type we compute matches the
+ // cached type.
+#ifndef EXPENSIVE_CHECKS
+ if (CachedType)
+ return CachedType;
+#endif
// If we don't have it in the cache, convert it now.
llvm::Type *ResultType = nullptr;
@@ -507,11 +407,14 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
Context.getLangOpts().NativeHalfType ||
!Context.getTargetInfo().useFP16ConversionIntrinsics());
break;
+ case BuiltinType::LongDouble:
+ LongDoubleReferenced = true;
+ LLVM_FALLTHROUGH;
case BuiltinType::BFloat16:
case BuiltinType::Float:
case BuiltinType::Double:
- case BuiltinType::LongDouble:
case BuiltinType::Float128:
+ case BuiltinType::Ibm128:
ResultType = getTypeForFormat(getLLVMContext(),
Context.getFloatTypeSemantics(T),
/* UseNativeHalf = */ false);
@@ -519,7 +422,7 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
case BuiltinType::NullPtr:
// Model std::nullptr_t as i8*
- ResultType = llvm::Type::getInt8PtrTy(getLLVMContext());
+ ResultType = llvm::PointerType::getUnqual(getLLVMContext());
break;
case BuiltinType::UInt128:
@@ -573,6 +476,8 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
case BuiltinType::SveInt64x4:
case BuiltinType::SveUint64x4:
case BuiltinType::SveBool:
+ case BuiltinType::SveBoolx2:
+ case BuiltinType::SveBoolx4:
case BuiltinType::SveFloat16:
case BuiltinType::SveFloat16x2:
case BuiltinType::SveFloat16x3:
@@ -595,6 +500,8 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
Info.EC.getKnownMinValue() *
Info.NumVectors);
}
+ case BuiltinType::SveCount:
+ return llvm::TargetExtType::get(getLLVMContext(), "aarch64.svcount");
#define PPC_VECTOR_TYPE(Name, Id, Size) \
case BuiltinType::Id: \
ResultType = \
@@ -603,14 +510,31 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
#include "clang/Basic/PPCTypes.def"
#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
#include "clang/Basic/RISCVVTypes.def"
- {
- ASTContext::BuiltinVectorTypeInfo Info =
- Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(Ty));
- return llvm::ScalableVectorType::get(ConvertType(Info.ElementType),
- Info.EC.getKnownMinValue() *
- Info.NumVectors);
- }
- case BuiltinType::Dependent:
+ {
+ ASTContext::BuiltinVectorTypeInfo Info =
+ Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(Ty));
+ // Tuple types are expressed as aggregregate types of the same scalable
+ // vector type (e.g. vint32m1x2_t is two vint32m1_t, which is {<vscale x
+ // 2 x i32>, <vscale x 2 x i32>}).
+ if (Info.NumVectors != 1) {
+ llvm::Type *EltTy = llvm::ScalableVectorType::get(
+ ConvertType(Info.ElementType), Info.EC.getKnownMinValue());
+ llvm::SmallVector<llvm::Type *, 4> EltTys(Info.NumVectors, EltTy);
+ return llvm::StructType::get(getLLVMContext(), EltTys);
+ }
+ return llvm::ScalableVectorType::get(ConvertType(Info.ElementType),
+ Info.EC.getKnownMinValue() *
+ Info.NumVectors);
+ }
+#define WASM_REF_TYPE(Name, MangledName, Id, SingletonId, AS) \
+ case BuiltinType::Id: { \
+ if (BuiltinType::Id == BuiltinType::WasmExternRef) \
+ ResultType = CGM.getTargetCodeGenInfo().getWasmExternrefReferenceType(); \
+ else \
+ llvm_unreachable("Unexpected wasm reference builtin type!"); \
+ } break;
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
+ case BuiltinType::Dependent:
#define BUILTIN_TYPE(Id, SingletonId)
#define PLACEHOLDER_TYPE(Id, SingletonId) \
case BuiltinType::Id:
@@ -631,23 +555,15 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
case Type::RValueReference: {
const ReferenceType *RTy = cast<ReferenceType>(Ty);
QualType ETy = RTy->getPointeeType();
- llvm::Type *PointeeType = ConvertTypeForMem(ETy);
- unsigned AS = Context.getTargetAddressSpace(ETy);
- ResultType = llvm::PointerType::get(PointeeType, AS);
+ unsigned AS = getTargetAddressSpace(ETy);
+ ResultType = llvm::PointerType::get(getLLVMContext(), AS);
break;
}
case Type::Pointer: {
const PointerType *PTy = cast<PointerType>(Ty);
QualType ETy = PTy->getPointeeType();
- llvm::Type *PointeeType = ConvertTypeForMem(ETy);
- if (PointeeType->isVoidTy())
- PointeeType = llvm::Type::getInt8Ty(getLLVMContext());
-
- unsigned AS = PointeeType->isFunctionTy()
- ? getDataLayout().getProgramAddressSpace()
- : Context.getTargetAddressSpace(ETy);
-
- ResultType = llvm::PointerType::get(PointeeType, AS);
+ unsigned AS = getTargetAddressSpace(ETy);
+ ResultType = llvm::PointerType::get(getLLVMContext(), AS);
break;
}
@@ -690,9 +606,12 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
}
case Type::ExtVector:
case Type::Vector: {
- const VectorType *VT = cast<VectorType>(Ty);
- ResultType = llvm::FixedVectorType::get(ConvertType(VT->getElementType()),
- VT->getNumElements());
+ const auto *VT = cast<VectorType>(Ty);
+ // An ext_vector_type of Bool is really a vector of bits.
+ llvm::Type *IRElemTy = VT->isExtVectorBoolType()
+ ? llvm::Type::getInt1Ty(getLLVMContext())
+ : ConvertType(VT->getElementType());
+ ResultType = llvm::FixedVectorType::get(IRElemTy, VT->getNumElements());
break;
}
case Type::ConstantMatrix: {
@@ -721,15 +640,9 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
break;
}
- case Type::ObjCObjectPointer: {
- // Protocol qualifications do not influence the LLVM type, we just return a
- // pointer to the underlying interface type. We don't need to worry about
- // recursive conversion.
- llvm::Type *T =
- ConvertTypeForMem(cast<ObjCObjectPointerType>(Ty)->getPointeeType());
- ResultType = T->getPointerTo();
+ case Type::ObjCObjectPointer:
+ ResultType = llvm::PointerType::getUnqual(getLLVMContext());
break;
- }
case Type::Enum: {
const EnumDecl *ED = cast<EnumType>(Ty)->getDecl();
@@ -743,20 +656,26 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
}
case Type::BlockPointer: {
+ // Block pointers lower to function type. For function type,
+ // getTargetAddressSpace() returns default address space for
+ // function pointer i.e. program address space. Therefore, for block
+ // pointers, it is important to pass the pointee AST address space when
+ // calling getTargetAddressSpace(), to ensure that we get the LLVM IR
+ // address space for data pointers and not function pointers.
const QualType FTy = cast<BlockPointerType>(Ty)->getPointeeType();
- llvm::Type *PointeeType = CGM.getLangOpts().OpenCL
- ? CGM.getGenericBlockLiteralType()
- : ConvertTypeForMem(FTy);
- unsigned AS = Context.getTargetAddressSpace(FTy);
- ResultType = llvm::PointerType::get(PointeeType, AS);
+ unsigned AS = Context.getTargetAddressSpace(FTy.getAddressSpace());
+ ResultType = llvm::PointerType::get(getLLVMContext(), AS);
break;
}
case Type::MemberPointer: {
auto *MPTy = cast<MemberPointerType>(Ty);
if (!getCXXABI().isMemberPointerConvertible(MPTy)) {
- RecordsWithOpaqueMemberPointers.insert(MPTy->getClass());
- ResultType = llvm::StructType::create(getLLVMContext());
+ auto *C = MPTy->getClass();
+ auto Insertion = RecordsWithOpaqueMemberPointers.insert({C, nullptr});
+ if (Insertion.second)
+ Insertion.first->second = llvm::StructType::create(getLLVMContext());
+ ResultType = Insertion.first->second;
} else {
ResultType = getCXXABI().ConvertMemberPointerType(MPTy);
}
@@ -776,8 +695,8 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
ResultType,
llvm::ArrayType::get(CGM.Int8Ty, (atomicSize - valueSize) / 8)
};
- ResultType = llvm::StructType::get(getLLVMContext(),
- llvm::makeArrayRef(elts));
+ ResultType =
+ llvm::StructType::get(getLLVMContext(), llvm::ArrayRef(elts));
}
break;
}
@@ -785,14 +704,16 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
ResultType = CGM.getOpenCLRuntime().getPipeType(cast<PipeType>(Ty));
break;
}
- case Type::ExtInt: {
- const auto &EIT = cast<ExtIntType>(Ty);
+ case Type::BitInt: {
+ const auto &EIT = cast<BitIntType>(Ty);
ResultType = llvm::Type::getIntNTy(getLLVMContext(), EIT->getNumBits());
break;
}
}
assert(ResultType && "Didn't convert a type?");
+ assert((!CachedType || CachedType == ResultType) &&
+ "Cached type doesn't match computed type");
TypeCache[Ty] = ResultType;
return ResultType;
@@ -827,17 +748,6 @@ llvm::StructType *CodeGenTypes::ConvertRecordDeclType(const RecordDecl *RD) {
if (!RD || !RD->isCompleteDefinition() || !Ty->isOpaque())
return Ty;
- // If converting this type would cause us to infinitely loop, don't do it!
- if (!isSafeToConvert(RD, *this)) {
- DeferredRecords.push_back(RD);
- return Ty;
- }
-
- // Okay, this is a definition of a type. Compile the implementation now.
- bool InsertResult = RecordsBeingLaidOut.insert(Key).second;
- (void)InsertResult;
- assert(InsertResult && "Recursively compiling a struct?");
-
// Force conversion of non-virtual base classes recursively.
if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) {
for (const auto &I : CRD->bases()) {
@@ -850,22 +760,12 @@ llvm::StructType *CodeGenTypes::ConvertRecordDeclType(const RecordDecl *RD) {
std::unique_ptr<CGRecordLayout> Layout = ComputeRecordLayout(RD, Ty);
CGRecordLayouts[Key] = std::move(Layout);
- // We're done laying out this struct.
- bool EraseResult = RecordsBeingLaidOut.erase(Key); (void)EraseResult;
- assert(EraseResult && "struct not in RecordsBeingLaidOut set?");
-
// If this struct blocked a FunctionType conversion, then recompute whatever
// was derived from that.
// FIXME: This is hugely overconservative.
if (SkippedLayout)
TypeCache.clear();
- // If we're done converting the outer-most record, then convert any deferred
- // structs as well.
- if (RecordsBeingLaidOut.empty())
- while (!DeferredRecords.empty())
- ConvertRecordDeclType(DeferredRecords.pop_back_val());
-
return Ty;
}
@@ -924,3 +824,13 @@ bool CodeGenTypes::isZeroInitializable(QualType T) {
bool CodeGenTypes::isZeroInitializable(const RecordDecl *RD) {
return getCGRecordLayout(RD).isZeroInitializable();
}
+
+unsigned CodeGenTypes::getTargetAddressSpace(QualType T) const {
+ // Return the address space for the type. If the type is a
+ // function type without an address space qualifier, the
+ // program address space is used. Otherwise, the target picks
+ // the best address space based on the type information
+ return T->isFunctionType() && !T.hasAddressSpace()
+ ? getDataLayout().getProgramAddressSpace()
+ : getContext().getTargetAddressSpace(T.getAddressSpace());
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.h b/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.h
index f8f7542e4c83..01c0c673795c 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.h
@@ -31,14 +31,9 @@ namespace clang {
class ASTContext;
template <typename> class CanQual;
class CXXConstructorDecl;
-class CXXDestructorDecl;
class CXXMethodDecl;
class CodeGenOptions;
-class FieldDecl;
class FunctionProtoType;
-class ObjCInterfaceDecl;
-class ObjCIvarDecl;
-class PointerType;
class QualType;
class RecordDecl;
class TagDecl;
@@ -81,13 +76,7 @@ class CodeGenTypes {
llvm::DenseMap<const Type*, llvm::StructType *> RecordDeclTypes;
/// Hold memoized CGFunctionInfo results.
- llvm::FoldingSet<CGFunctionInfo> FunctionInfos;
-
- /// This set keeps track of records that we're currently converting
- /// to an IR type. For example, when converting:
- /// struct A { struct B { int x; } } when processing 'x', the 'A' and 'B'
- /// types will be in this set.
- llvm::SmallPtrSet<const Type*, 4> RecordsBeingLaidOut;
+ llvm::FoldingSet<CGFunctionInfo> FunctionInfos{FunctionInfosLog2InitSize};
llvm::SmallPtrSet<const CGFunctionInfo*, 4> FunctionsBeingProcessed;
@@ -95,14 +84,16 @@ class CodeGenTypes {
/// a recursive struct conversion, set this to true.
bool SkippedLayout;
- SmallVector<const RecordDecl *, 8> DeferredRecords;
+ /// True if any instance of long double types are used.
+ bool LongDoubleReferenced;
/// This map keeps cache of llvm::Types and maps clang::Type to
/// corresponding llvm::Type.
llvm::DenseMap<const Type *, llvm::Type *> TypeCache;
- llvm::SmallSet<const Type *, 8> RecordsWithOpaqueMemberPointers;
+ llvm::DenseMap<const Type *, llvm::Type *> RecordsWithOpaqueMemberPointers;
+ static constexpr unsigned FunctionInfosLog2InitSize = 9;
/// Helper for ConvertType.
llvm::Type *ConvertFunctionTypeInternal(QualType FT);
@@ -113,6 +104,7 @@ public:
const llvm::DataLayout &getDataLayout() const {
return TheModule.getDataLayout();
}
+ CodeGenModule &getCGM() const { return CGM; }
ASTContext &getContext() const { return Context; }
const ABIInfo &getABIInfo() const { return TheABIInfo; }
const TargetInfo &getTarget() const { return Target; }
@@ -263,13 +255,11 @@ public:
/// this.
///
/// \param argTypes - must all actually be canonical as params
- const CGFunctionInfo &arrangeLLVMFunctionInfo(CanQualType returnType,
- bool instanceMethod,
- bool chainCall,
- ArrayRef<CanQualType> argTypes,
- FunctionType::ExtInfo info,
- ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos,
- RequiredArgs args);
+ const CGFunctionInfo &arrangeLLVMFunctionInfo(
+ CanQualType returnType, FnInfoOpts opts, ArrayRef<CanQualType> argTypes,
+ FunctionType::ExtInfo info,
+ ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos,
+ RequiredArgs args);
/// Compute a new LLVM record layout object for the given record.
std::unique_ptr<CGRecordLayout> ComputeRecordLayout(const RecordDecl *D,
@@ -302,14 +292,9 @@ public: // These are internal details of CGT that shouldn't be used externally.
/// zero-initialized (in the C++ sense) with an LLVM zeroinitializer.
bool isZeroInitializable(const RecordDecl *RD);
+ bool isLongDoubleReferenced() const { return LongDoubleReferenced; }
bool isRecordLayoutComplete(const Type *Ty) const;
- bool noRecordsBeingLaidOut() const {
- return RecordsBeingLaidOut.empty();
- }
- bool isRecordBeingLaidOut(const Type *Ty) const {
- return RecordsBeingLaidOut.count(Ty);
- }
-
+ unsigned getTargetAddressSpace(QualType T) const;
};
} // end namespace CodeGen
diff --git a/contrib/llvm-project/clang/lib/CodeGen/ConstantEmitter.h b/contrib/llvm-project/clang/lib/CodeGen/ConstantEmitter.h
index 188b82e56f53..a55da0dcad79 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/ConstantEmitter.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/ConstantEmitter.h
@@ -42,7 +42,7 @@ private:
/// The AST address space where this (non-abstract) initializer is going.
/// Used for generating appropriate placeholders.
- LangAS DestAddressSpace;
+ LangAS DestAddressSpace = LangAS::Default;
llvm::SmallVector<std::pair<llvm::Constant *, llvm::GlobalVariable*>, 4>
PlaceholderAddresses;
@@ -67,6 +67,9 @@ public:
return Abstract;
}
+ bool isInConstantContext() const { return InConstantContext; }
+ void setInConstantContext(bool var) { InConstantContext = var; }
+
/// Try to emit the initiaizer of the given declaration as an abstract
/// constant. If this succeeds, the emission must be finalized.
llvm::Constant *tryEmitForInitializer(const VarDecl &D);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/ConstantInitBuilder.cpp b/contrib/llvm-project/clang/lib/CodeGen/ConstantInitBuilder.cpp
index 24e3ca19709c..3cf69f3b6415 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/ConstantInitBuilder.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/ConstantInitBuilder.cpp
@@ -114,7 +114,7 @@ void ConstantInitBuilderBase::abandon(size_t newEnd) {
if (newEnd == 0) {
for (auto &entry : SelfReferences) {
auto dummy = entry.Dummy;
- dummy->replaceAllUsesWith(llvm::UndefValue::get(dummy->getType()));
+ dummy->replaceAllUsesWith(llvm::PoisonValue::get(dummy->getType()));
dummy->eraseFromParent();
}
SelfReferences.clear();
@@ -209,8 +209,7 @@ ConstantAggregateBuilderBase::addPlaceholderWithSize(llvm::Type *type) {
// Advance the offset past that field.
auto &layout = Builder.CGM.getDataLayout();
if (!Packed)
- offset = offset.alignTo(CharUnits::fromQuantity(
- layout.getABITypeAlignment(type)));
+ offset = offset.alignTo(CharUnits::fromQuantity(layout.getABITypeAlign(type)));
offset += CharUnits::fromQuantity(layout.getTypeStoreSize(type));
CachedOffsetEnd = Builder.Buffer.size();
@@ -249,8 +248,8 @@ CharUnits ConstantAggregateBuilderBase::getOffsetFromGlobalTo(size_t end) const{
"cannot compute offset when a placeholder is present");
llvm::Type *elementType = element->getType();
if (!Packed)
- offset = offset.alignTo(CharUnits::fromQuantity(
- layout.getABITypeAlignment(elementType)));
+ offset = offset.alignTo(
+ CharUnits::fromQuantity(layout.getABITypeAlign(elementType)));
offset += CharUnits::fromQuantity(layout.getTypeStoreSize(elementType));
} while (++cacheEnd != end);
}
@@ -268,7 +267,7 @@ llvm::Constant *ConstantAggregateBuilderBase::finishArray(llvm::Type *eltTy) {
assert((Begin < buffer.size() ||
(Begin == buffer.size() && eltTy))
&& "didn't add any array elements without element type");
- auto elts = llvm::makeArrayRef(buffer).slice(Begin);
+ auto elts = llvm::ArrayRef(buffer).slice(Begin);
if (!eltTy) eltTy = elts[0]->getType();
auto type = llvm::ArrayType::get(eltTy, elts.size());
auto constant = llvm::ConstantArray::get(type, elts);
@@ -281,7 +280,7 @@ ConstantAggregateBuilderBase::finishStruct(llvm::StructType *ty) {
markFinished();
auto &buffer = getBuffer();
- auto elts = llvm::makeArrayRef(buffer).slice(Begin);
+ auto elts = llvm::ArrayRef(buffer).slice(Begin);
if (ty == nullptr && elts.empty())
ty = llvm::StructType::get(Builder.CGM.getLLVMContext(), {}, Packed);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CoverageMappingGen.cpp b/contrib/llvm-project/clang/lib/CodeGen/CoverageMappingGen.cpp
index 8a11da600e4a..0c43317642bc 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CoverageMappingGen.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CoverageMappingGen.cpp
@@ -17,7 +17,6 @@
#include "clang/Basic/FileManager.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Lex/Lexer.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ProfileData/Coverage/CoverageMapping.h"
@@ -26,6 +25,7 @@
#include "llvm/ProfileData/InstrProfReader.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
+#include <optional>
// This selects the coverage mapping format defined when `InstrProfData.inc`
// is textually included.
@@ -37,6 +37,11 @@ static llvm::cl::opt<bool> EmptyLineCommentCoverage(
"disable it on test)"),
llvm::cl::init(true), llvm::cl::Hidden);
+llvm::cl::opt<bool> SystemHeadersCoverage(
+ "system-headers-coverage",
+ llvm::cl::desc("Enable collecting coverage from system headers"),
+ llvm::cl::init(false), llvm::cl::Hidden);
+
using namespace clang;
using namespace CodeGen;
using namespace llvm::coverage;
@@ -60,26 +65,27 @@ CoverageMappingModuleGen::setUpCoverageCallbacks(Preprocessor &PP) {
return CoverageInfo;
}
-void CoverageSourceInfo::AddSkippedRange(SourceRange Range) {
+void CoverageSourceInfo::AddSkippedRange(SourceRange Range,
+ SkippedRange::Kind RangeKind) {
if (EmptyLineCommentCoverage && !SkippedRanges.empty() &&
PrevTokLoc == SkippedRanges.back().PrevTokLoc &&
SourceMgr.isWrittenInSameFile(SkippedRanges.back().Range.getEnd(),
Range.getBegin()))
SkippedRanges.back().Range.setEnd(Range.getEnd());
else
- SkippedRanges.push_back({Range, PrevTokLoc});
+ SkippedRanges.push_back({Range, RangeKind, PrevTokLoc});
}
void CoverageSourceInfo::SourceRangeSkipped(SourceRange Range, SourceLocation) {
- AddSkippedRange(Range);
+ AddSkippedRange(Range, SkippedRange::PPIfElse);
}
void CoverageSourceInfo::HandleEmptyline(SourceRange Range) {
- AddSkippedRange(Range);
+ AddSkippedRange(Range, SkippedRange::EmptyLine);
}
bool CoverageSourceInfo::HandleComment(Preprocessor &PP, SourceRange Range) {
- AddSkippedRange(Range);
+ AddSkippedRange(Range, SkippedRange::Comment);
return false;
}
@@ -89,6 +95,8 @@ void CoverageSourceInfo::updateNextTokLoc(SourceLocation Loc) {
}
namespace {
+using MCDCConditionID = CounterMappingRegion::MCDCConditionID;
+using MCDCParameters = CounterMappingRegion::MCDCParameters;
/// A region of source code that can be mapped to a counter.
class SourceMappingRegion {
@@ -96,29 +104,46 @@ class SourceMappingRegion {
Counter Count;
/// Secondary Counter used for Branch Regions for "False" branches.
- Optional<Counter> FalseCount;
+ std::optional<Counter> FalseCount;
+
+ /// Parameters used for Modified Condition/Decision Coverage
+ MCDCParameters MCDCParams;
/// The region's starting location.
- Optional<SourceLocation> LocStart;
+ std::optional<SourceLocation> LocStart;
/// The region's ending location.
- Optional<SourceLocation> LocEnd;
+ std::optional<SourceLocation> LocEnd;
/// Whether this region is a gap region. The count from a gap region is set
/// as the line execution count if there are no other regions on the line.
bool GapRegion;
-public:
- SourceMappingRegion(Counter Count, Optional<SourceLocation> LocStart,
- Optional<SourceLocation> LocEnd, bool GapRegion = false)
- : Count(Count), LocStart(LocStart), LocEnd(LocEnd), GapRegion(GapRegion) {
- }
+ /// Whetever this region is skipped ('if constexpr' or 'if consteval' untaken
+ /// branch, or anything skipped but not empty line / comments)
+ bool SkippedRegion;
- SourceMappingRegion(Counter Count, Optional<Counter> FalseCount,
- Optional<SourceLocation> LocStart,
- Optional<SourceLocation> LocEnd, bool GapRegion = false)
- : Count(Count), FalseCount(FalseCount), LocStart(LocStart),
- LocEnd(LocEnd), GapRegion(GapRegion) {}
+public:
+ SourceMappingRegion(Counter Count, std::optional<SourceLocation> LocStart,
+ std::optional<SourceLocation> LocEnd,
+ bool GapRegion = false)
+ : Count(Count), LocStart(LocStart), LocEnd(LocEnd), GapRegion(GapRegion),
+ SkippedRegion(false) {}
+
+ SourceMappingRegion(Counter Count, std::optional<Counter> FalseCount,
+ MCDCParameters MCDCParams,
+ std::optional<SourceLocation> LocStart,
+ std::optional<SourceLocation> LocEnd,
+ bool GapRegion = false)
+ : Count(Count), FalseCount(FalseCount), MCDCParams(MCDCParams),
+ LocStart(LocStart), LocEnd(LocEnd), GapRegion(GapRegion),
+ SkippedRegion(false) {}
+
+ SourceMappingRegion(MCDCParameters MCDCParams,
+ std::optional<SourceLocation> LocStart,
+ std::optional<SourceLocation> LocEnd)
+ : MCDCParams(MCDCParams), LocStart(LocStart), LocEnd(LocEnd),
+ GapRegion(false), SkippedRegion(false) {}
const Counter &getCounter() const { return Count; }
@@ -129,7 +154,7 @@ public:
void setCounter(Counter C) { Count = C; }
- bool hasStartLoc() const { return LocStart.hasValue(); }
+ bool hasStartLoc() const { return LocStart.has_value(); }
void setStartLoc(SourceLocation Loc) { LocStart = Loc; }
@@ -138,7 +163,7 @@ public:
return *LocStart;
}
- bool hasEndLoc() const { return LocEnd.hasValue(); }
+ bool hasEndLoc() const { return LocEnd.has_value(); }
void setEndLoc(SourceLocation Loc) {
assert(Loc.isValid() && "Setting an invalid end location");
@@ -154,7 +179,15 @@ public:
void setGap(bool Gap) { GapRegion = Gap; }
- bool isBranch() const { return FalseCount.hasValue(); }
+ bool isSkipped() const { return SkippedRegion; }
+
+ void setSkipped(bool Skipped) { SkippedRegion = Skipped; }
+
+ bool isBranch() const { return FalseCount.has_value(); }
+
+ bool isMCDCDecision() const { return MCDCParams.NumConditions != 0; }
+
+ const MCDCParameters &getMCDCParams() const { return MCDCParams; }
};
/// Spelling locations for the start and end of a source region.
@@ -298,8 +331,9 @@ public:
if (!Visited.insert(File).second)
continue;
- // Do not map FileID's associated with system headers.
- if (SM.isInSystemHeader(SM.getSpellingLoc(Loc)))
+ // Do not map FileID's associated with system headers unless collecting
+ // coverage from system headers is explicitly enabled.
+ if (!SystemHeadersCoverage && SM.isInSystemHeader(SM.getSpellingLoc(Loc)))
continue;
unsigned Depth = 0;
@@ -313,33 +347,35 @@ public:
for (const auto &FL : FileLocs) {
SourceLocation Loc = FL.first;
FileID SpellingFile = SM.getDecomposedSpellingLoc(Loc).first;
- auto Entry = SM.getFileEntryForID(SpellingFile);
+ auto Entry = SM.getFileEntryRefForID(SpellingFile);
if (!Entry)
continue;
FileIDMapping[SM.getFileID(Loc)] = std::make_pair(Mapping.size(), Loc);
- Mapping.push_back(CVM.getFileID(Entry));
+ Mapping.push_back(CVM.getFileID(*Entry));
}
}
/// Get the coverage mapping file ID for \c Loc.
///
- /// If such file id doesn't exist, return None.
- Optional<unsigned> getCoverageFileID(SourceLocation Loc) {
+ /// If such file id doesn't exist, return std::nullopt.
+ std::optional<unsigned> getCoverageFileID(SourceLocation Loc) {
auto Mapping = FileIDMapping.find(SM.getFileID(Loc));
if (Mapping != FileIDMapping.end())
return Mapping->second.first;
- return None;
+ return std::nullopt;
}
/// This shrinks the skipped range if it spans a line that contains a
/// non-comment token. If shrinking the skipped range would make it empty,
- /// this returns None.
- Optional<SpellingRegion> adjustSkippedRange(SourceManager &SM,
- SourceLocation LocStart,
- SourceLocation LocEnd,
- SourceLocation PrevTokLoc,
- SourceLocation NextTokLoc) {
+ /// this returns std::nullopt.
+ /// Note this function can potentially be expensive because
+ /// getSpellingLineNumber uses getLineNumber, which is expensive.
+ std::optional<SpellingRegion> adjustSkippedRange(SourceManager &SM,
+ SourceLocation LocStart,
+ SourceLocation LocEnd,
+ SourceLocation PrevTokLoc,
+ SourceLocation NextTokLoc) {
SpellingRegion SR{SM, LocStart, LocEnd};
SR.ColumnStart = 1;
if (PrevTokLoc.isValid() && SM.isWrittenInSameFile(LocStart, PrevTokLoc) &&
@@ -352,7 +388,7 @@ public:
}
if (SR.isInSourceOrder())
return SR;
- return None;
+ return std::nullopt;
}
/// Gather all the regions that were skipped by the preprocessor
@@ -382,9 +418,14 @@ public:
auto CovFileID = getCoverageFileID(LocStart);
if (!CovFileID)
continue;
- Optional<SpellingRegion> SR =
- adjustSkippedRange(SM, LocStart, LocEnd, I.PrevTokLoc, I.NextTokLoc);
- if (!SR.hasValue())
+ std::optional<SpellingRegion> SR;
+ if (I.isComment())
+ SR = adjustSkippedRange(SM, LocStart, LocEnd, I.PrevTokLoc,
+ I.NextTokLoc);
+ else if (I.isPPIfElse() || I.isEmptyLine())
+ SR = {SM, LocStart, LocEnd};
+
+ if (!SR)
continue;
auto Region = CounterMappingRegion::makeSkipped(
*CovFileID, SR->LineStart, SR->ColumnStart, SR->LineEnd,
@@ -406,8 +447,10 @@ public:
SourceLocation LocStart = Region.getBeginLoc();
assert(SM.getFileID(LocStart).isValid() && "region in invalid file");
- // Ignore regions from system headers.
- if (SM.isInSystemHeader(SM.getSpellingLoc(LocStart)))
+ // Ignore regions from system headers unless collecting coverage from
+ // system headers is explicitly enabled.
+ if (!SystemHeadersCoverage &&
+ SM.isInSystemHeader(SM.getSpellingLoc(LocStart)))
continue;
auto CovFileID = getCoverageFileID(LocStart);
@@ -434,10 +477,19 @@ public:
MappingRegions.push_back(CounterMappingRegion::makeGapRegion(
Region.getCounter(), *CovFileID, SR.LineStart, SR.ColumnStart,
SR.LineEnd, SR.ColumnEnd));
+ } else if (Region.isSkipped()) {
+ MappingRegions.push_back(CounterMappingRegion::makeSkipped(
+ *CovFileID, SR.LineStart, SR.ColumnStart, SR.LineEnd,
+ SR.ColumnEnd));
} else if (Region.isBranch()) {
MappingRegions.push_back(CounterMappingRegion::makeBranchRegion(
- Region.getCounter(), Region.getFalseCounter(), *CovFileID,
- SR.LineStart, SR.ColumnStart, SR.LineEnd, SR.ColumnEnd));
+ Region.getCounter(), Region.getFalseCounter(),
+ Region.getMCDCParams(), *CovFileID, SR.LineStart, SR.ColumnStart,
+ SR.LineEnd, SR.ColumnEnd));
+ } else if (Region.isMCDCDecision()) {
+ MappingRegions.push_back(CounterMappingRegion::makeDecisionRegion(
+ Region.getMCDCParams(), *CovFileID, SR.LineStart, SR.ColumnStart,
+ SR.LineEnd, SR.ColumnEnd));
} else {
MappingRegions.push_back(CounterMappingRegion::makeRegion(
Region.getCounter(), *CovFileID, SR.LineStart, SR.ColumnStart,
@@ -519,11 +571,237 @@ struct EmptyCoverageMappingBuilder : public CoverageMappingBuilder {
if (MappingRegions.empty())
return;
- CoverageMappingWriter Writer(FileIDMapping, None, MappingRegions);
+ CoverageMappingWriter Writer(FileIDMapping, std::nullopt, MappingRegions);
Writer.write(OS);
}
};
+/// A wrapper object for maintaining stacks to track the resursive AST visitor
+/// walks for the purpose of assigning IDs to leaf-level conditions measured by
+/// MC/DC. The object is created with a reference to the MCDCBitmapMap that was
+/// created during the initial AST walk. The presence of a bitmap associated
+/// with a boolean expression (top-level logical operator nest) indicates that
+/// the boolean expression qualified for MC/DC. The resulting condition IDs
+/// are preserved in a map reference that is also provided during object
+/// creation.
+struct MCDCCoverageBuilder {
+
+ struct DecisionIDPair {
+ MCDCConditionID TrueID = 0;
+ MCDCConditionID FalseID = 0;
+ };
+
+ /// The AST walk recursively visits nested logical-AND or logical-OR binary
+ /// operator nodes and then visits their LHS and RHS children nodes. As this
+ /// happens, the algorithm will assign IDs to each operator's LHS and RHS side
+ /// as the walk moves deeper into the nest. At each level of the recursive
+ /// nest, the LHS and RHS may actually correspond to larger subtrees (not
+ /// leaf-conditions). If this is the case, when that node is visited, the ID
+ /// assigned to the subtree is re-assigned to its LHS, and a new ID is given
+ /// to its RHS. At the end of the walk, all leaf-level conditions will have a
+ /// unique ID -- keep in mind that the final set of IDs may not be in
+ /// numerical order from left to right.
+ ///
+ /// Example: "x = (A && B) || (C && D) || (D && F)"
+ ///
+ /// Visit Depth1:
+ /// (A && B) || (C && D) || (D && F)
+ /// ^-------LHS--------^ ^-RHS--^
+ /// ID=1 ID=2
+ ///
+ /// Visit LHS-Depth2:
+ /// (A && B) || (C && D)
+ /// ^-LHS--^ ^-RHS--^
+ /// ID=1 ID=3
+ ///
+ /// Visit LHS-Depth3:
+ /// (A && B)
+ /// LHS RHS
+ /// ID=1 ID=4
+ ///
+ /// Visit RHS-Depth3:
+ /// (C && D)
+ /// LHS RHS
+ /// ID=3 ID=5
+ ///
+ /// Visit RHS-Depth2: (D && F)
+ /// LHS RHS
+ /// ID=2 ID=6
+ ///
+ /// Visit Depth1:
+ /// (A && B) || (C && D) || (D && F)
+ /// ID=1 ID=4 ID=3 ID=5 ID=2 ID=6
+ ///
+ /// A node ID of '0' always means MC/DC isn't being tracked.
+ ///
+ /// As the AST walk proceeds recursively, the algorithm will also use a stack
+ /// to track the IDs of logical-AND and logical-OR operations on the RHS so
+ /// that it can be determined which nodes are executed next, depending on how
+ /// a LHS or RHS of a logical-AND or logical-OR is evaluated. This
+ /// information relies on the assigned IDs and are embedded within the
+ /// coverage region IDs of each branch region associated with a leaf-level
+ /// condition. This information helps the visualization tool reconstruct all
+ /// possible test vectors for the purposes of MC/DC analysis. If a "next" node
+ /// ID is '0', it means it's the end of the test vector. The following rules
+ /// are used:
+ ///
+ /// For logical-AND ("LHS && RHS"):
+ /// - If LHS is TRUE, execution goes to the RHS node.
+ /// - If LHS is FALSE, execution goes to the LHS node of the next logical-OR.
+ /// If that does not exist, execution exits (ID == 0).
+ ///
+ /// - If RHS is TRUE, execution goes to LHS node of the next logical-AND.
+ /// If that does not exist, execution exits (ID == 0).
+ /// - If RHS is FALSE, execution goes to the LHS node of the next logical-OR.
+ /// If that does not exist, execution exits (ID == 0).
+ ///
+ /// For logical-OR ("LHS || RHS"):
+ /// - If LHS is TRUE, execution goes to the LHS node of the next logical-AND.
+ /// If that does not exist, execution exits (ID == 0).
+ /// - If LHS is FALSE, execution goes to the RHS node.
+ ///
+ /// - If RHS is TRUE, execution goes to LHS node of the next logical-AND.
+ /// If that does not exist, execution exits (ID == 0).
+ /// - If RHS is FALSE, execution goes to the LHS node of the next logical-OR.
+ /// If that does not exist, execution exits (ID == 0).
+ ///
+ /// Finally, the condition IDs are also used when instrumenting the code to
+ /// indicate a unique offset into a temporary bitmap that represents the true
+ /// or false evaluation of that particular condition.
+ ///
+ /// NOTE regarding the use of CodeGenFunction::stripCond(). Even though, for
+ /// simplicity, parentheses and unary logical-NOT operators are considered
+ /// part of their underlying condition for both MC/DC and branch coverage, the
+ /// condition IDs themselves are assigned and tracked using the underlying
+ /// condition itself. This is done solely for consistency since parentheses
+ /// and logical-NOTs are ignored when checking whether the condition is
+ /// actually an instrumentable condition. This can also make debugging a bit
+ /// easier.
+
+private:
+ CodeGenModule &CGM;
+
+ llvm::SmallVector<DecisionIDPair> DecisionStack;
+ llvm::DenseMap<const Stmt *, MCDCConditionID> &CondIDs;
+ llvm::DenseMap<const Stmt *, unsigned> &MCDCBitmapMap;
+ MCDCConditionID NextID = 1;
+ bool NotMapped = false;
+
+ /// Represent a sentinel value of [0,0] for the bottom of DecisionStack.
+ static constexpr DecisionIDPair DecisionStackSentinel{0, 0};
+
+ /// Is this a logical-AND operation?
+ bool isLAnd(const BinaryOperator *E) const {
+ return E->getOpcode() == BO_LAnd;
+ }
+
+public:
+ MCDCCoverageBuilder(CodeGenModule &CGM,
+ llvm::DenseMap<const Stmt *, MCDCConditionID> &CondIDMap,
+ llvm::DenseMap<const Stmt *, unsigned> &MCDCBitmapMap)
+ : CGM(CGM), DecisionStack(1, DecisionStackSentinel), CondIDs(CondIDMap),
+ MCDCBitmapMap(MCDCBitmapMap) {}
+
+ /// Return whether the build of the control flow map is at the top-level
+ /// (root) of a logical operator nest in a boolean expression prior to the
+ /// assignment of condition IDs.
+ bool isIdle() const { return (NextID == 1 && !NotMapped); }
+
+ /// Return whether any IDs have been assigned in the build of the control
+ /// flow map, indicating that the map is being generated for this boolean
+ /// expression.
+ bool isBuilding() const { return (NextID > 1); }
+
+ /// Set the given condition's ID.
+ void setCondID(const Expr *Cond, MCDCConditionID ID) {
+ CondIDs[CodeGenFunction::stripCond(Cond)] = ID;
+ }
+
+ /// Return the ID of a given condition.
+ MCDCConditionID getCondID(const Expr *Cond) const {
+ auto I = CondIDs.find(CodeGenFunction::stripCond(Cond));
+ if (I == CondIDs.end())
+ return 0;
+ else
+ return I->second;
+ }
+
+ /// Return the LHS Decision ([0,0] if not set).
+ const DecisionIDPair &back() const { return DecisionStack.back(); }
+
+ /// Push the binary operator statement to track the nest level and assign IDs
+ /// to the operator's LHS and RHS. The RHS may be a larger subtree that is
+ /// broken up on successive levels.
+ void pushAndAssignIDs(const BinaryOperator *E) {
+ if (!CGM.getCodeGenOpts().MCDCCoverage)
+ return;
+
+ // If binary expression is disqualified, don't do mapping.
+ if (!isBuilding() && !MCDCBitmapMap.contains(CodeGenFunction::stripCond(E)))
+ NotMapped = true;
+
+ // Don't go any further if we don't need to map condition IDs.
+ if (NotMapped)
+ return;
+
+ const DecisionIDPair &ParentDecision = DecisionStack.back();
+
+ // If the operator itself has an assigned ID, this means it represents a
+ // larger subtree. In this case, assign that ID to its LHS node. Its RHS
+ // will receive a new ID below. Otherwise, assign ID+1 to LHS.
+ if (CondIDs.contains(CodeGenFunction::stripCond(E)))
+ setCondID(E->getLHS(), getCondID(E));
+ else
+ setCondID(E->getLHS(), NextID++);
+
+ // Assign a ID+1 for the RHS.
+ MCDCConditionID RHSid = NextID++;
+ setCondID(E->getRHS(), RHSid);
+
+ // Push the LHS decision IDs onto the DecisionStack.
+ if (isLAnd(E))
+ DecisionStack.push_back({RHSid, ParentDecision.FalseID});
+ else
+ DecisionStack.push_back({ParentDecision.TrueID, RHSid});
+ }
+
+ /// Pop and return the LHS Decision ([0,0] if not set).
+ DecisionIDPair pop() {
+ if (!CGM.getCodeGenOpts().MCDCCoverage || NotMapped)
+ return DecisionStack.front();
+
+ assert(DecisionStack.size() > 1);
+ DecisionIDPair D = DecisionStack.back();
+ DecisionStack.pop_back();
+ return D;
+ }
+
+ /// Return the total number of conditions and reset the state. The number of
+ /// conditions is zero if the expression isn't mapped.
+ unsigned getTotalConditionsAndReset(const BinaryOperator *E) {
+ if (!CGM.getCodeGenOpts().MCDCCoverage)
+ return 0;
+
+ assert(!isIdle());
+ assert(DecisionStack.size() == 1);
+
+ // Reset state if not doing mapping.
+ if (NotMapped) {
+ NotMapped = false;
+ assert(NextID == 1);
+ return 0;
+ }
+
+ // Set number of conditions and reset.
+ unsigned TotalConds = NextID - 1;
+
+ // Reset ID back to beginning.
+ NextID = 1;
+
+ return TotalConds;
+ }
+};
+
/// A StmtVisitor that creates coverage mapping regions which map
/// from the source code locations to the PGO counters.
struct CounterCoverageMappingBuilder
@@ -532,8 +810,14 @@ struct CounterCoverageMappingBuilder
/// The map of statements to count values.
llvm::DenseMap<const Stmt *, unsigned> &CounterMap;
+ /// The map of statements to bitmap coverage object values.
+ llvm::DenseMap<const Stmt *, unsigned> &MCDCBitmapMap;
+
/// A stack of currently live regions.
- std::vector<SourceMappingRegion> RegionStack;
+ llvm::SmallVector<SourceMappingRegion> RegionStack;
+
+ /// An object to manage MCDC regions.
+ MCDCCoverageBuilder MCDCBuilder;
CounterExpressionBuilder Builder;
@@ -550,17 +834,18 @@ struct CounterCoverageMappingBuilder
Counter GapRegionCounter;
/// Return a counter for the subtraction of \c RHS from \c LHS
- Counter subtractCounters(Counter LHS, Counter RHS) {
- return Builder.subtract(LHS, RHS);
+ Counter subtractCounters(Counter LHS, Counter RHS, bool Simplify = true) {
+ return Builder.subtract(LHS, RHS, Simplify);
}
/// Return a counter for the sum of \c LHS and \c RHS.
- Counter addCounters(Counter LHS, Counter RHS) {
- return Builder.add(LHS, RHS);
+ Counter addCounters(Counter LHS, Counter RHS, bool Simplify = true) {
+ return Builder.add(LHS, RHS, Simplify);
}
- Counter addCounters(Counter C1, Counter C2, Counter C3) {
- return addCounters(addCounters(C1, C2), C3);
+ Counter addCounters(Counter C1, Counter C2, Counter C3,
+ bool Simplify = true) {
+ return addCounters(addCounters(C1, C2, Simplify), C3, Simplify);
}
/// Return the region counter for the given statement.
@@ -570,19 +855,49 @@ struct CounterCoverageMappingBuilder
return Counter::getCounter(CounterMap[S]);
}
+ unsigned getRegionBitmap(const Stmt *S) { return MCDCBitmapMap[S]; }
+
/// Push a region onto the stack.
///
/// Returns the index on the stack where the region was pushed. This can be
/// used with popRegions to exit a "scope", ending the region that was pushed.
- size_t pushRegion(Counter Count, Optional<SourceLocation> StartLoc = None,
- Optional<SourceLocation> EndLoc = None,
- Optional<Counter> FalseCount = None) {
-
- if (StartLoc && !FalseCount.hasValue()) {
+ size_t pushRegion(Counter Count,
+ std::optional<SourceLocation> StartLoc = std::nullopt,
+ std::optional<SourceLocation> EndLoc = std::nullopt,
+ std::optional<Counter> FalseCount = std::nullopt,
+ MCDCConditionID ID = 0, MCDCConditionID TrueID = 0,
+ MCDCConditionID FalseID = 0) {
+
+ if (StartLoc && !FalseCount) {
MostRecentLocation = *StartLoc;
}
- RegionStack.emplace_back(Count, FalseCount, StartLoc, EndLoc);
+ // If either of these locations is invalid, something elsewhere in the
+ // compiler has broken.
+ assert((!StartLoc || StartLoc->isValid()) && "Start location is not valid");
+ assert((!EndLoc || EndLoc->isValid()) && "End location is not valid");
+
+ // However, we can still recover without crashing.
+ // If either location is invalid, set it to std::nullopt to avoid
+ // letting users of RegionStack think that region has a valid start/end
+ // location.
+ if (StartLoc && StartLoc->isInvalid())
+ StartLoc = std::nullopt;
+ if (EndLoc && EndLoc->isInvalid())
+ EndLoc = std::nullopt;
+ RegionStack.emplace_back(Count, FalseCount,
+ MCDCParameters{0, 0, ID, TrueID, FalseID},
+ StartLoc, EndLoc);
+
+ return RegionStack.size() - 1;
+ }
+
+ size_t pushRegion(unsigned BitmapIdx, unsigned Conditions,
+ std::optional<SourceLocation> StartLoc = std::nullopt,
+ std::optional<SourceLocation> EndLoc = std::nullopt) {
+
+ RegionStack.emplace_back(MCDCParameters{BitmapIdx, Conditions}, StartLoc,
+ EndLoc);
return RegionStack.size() - 1;
}
@@ -604,7 +919,8 @@ struct CounterCoverageMappingBuilder
assert(RegionStack.size() >= ParentIndex && "parent not in stack");
while (RegionStack.size() > ParentIndex) {
SourceMappingRegion &Region = RegionStack.back();
- if (Region.hasStartLoc()) {
+ if (Region.hasStartLoc() &&
+ (Region.hasEndLoc() || RegionStack[ParentIndex].hasEndLoc())) {
SourceLocation StartLoc = Region.getBeginLoc();
SourceLocation EndLoc = Region.hasEndLoc()
? Region.getEndLoc()
@@ -671,7 +987,7 @@ struct CounterCoverageMappingBuilder
assert(SM.isWrittenInSameFile(Region.getBeginLoc(), EndLoc));
assert(SpellingRegion(SM, Region).isInSourceOrder());
SourceRegions.push_back(Region);
- }
+ }
RegionStack.pop_back();
}
}
@@ -708,11 +1024,15 @@ struct CounterCoverageMappingBuilder
return (Cond->EvaluateAsInt(Result, CVM.getCodeGenModule().getContext()));
}
+ using MCDCDecisionIDPair = MCDCCoverageBuilder::DecisionIDPair;
+
/// Create a Branch Region around an instrumentable condition for coverage
/// and add it to the function's SourceRegions. A branch region tracks a
/// "True" counter and a "False" counter for boolean expressions that
/// result in the generation of a branch.
- void createBranchRegion(const Expr *C, Counter TrueCnt, Counter FalseCnt) {
+ void
+ createBranchRegion(const Expr *C, Counter TrueCnt, Counter FalseCnt,
+ const MCDCDecisionIDPair &IDPair = MCDCDecisionIDPair()) {
// Check for NULL conditions.
if (!C)
return;
@@ -722,6 +1042,10 @@ struct CounterCoverageMappingBuilder
// function's SourceRegions) because it doesn't apply to any other source
// code other than the Condition.
if (CodeGenFunction::isInstrumentedCondition(C)) {
+ MCDCConditionID ID = MCDCBuilder.getCondID(C);
+ MCDCConditionID TrueID = IDPair.TrueID;
+ MCDCConditionID FalseID = IDPair.FalseID;
+
// If a condition can fold to true or false, the corresponding branch
// will be removed. Create a region with both counters hard-coded to
// zero. This allows us to visualize them in a special way.
@@ -730,13 +1054,21 @@ struct CounterCoverageMappingBuilder
// CodeGenFunction.c always returns false, but that is very heavy-handed.
if (ConditionFoldsToBool(C))
popRegions(pushRegion(Counter::getZero(), getStart(C), getEnd(C),
- Counter::getZero()));
+ Counter::getZero(), ID, TrueID, FalseID));
else
// Otherwise, create a region with the True counter and False counter.
- popRegions(pushRegion(TrueCnt, getStart(C), getEnd(C), FalseCnt));
+ popRegions(pushRegion(TrueCnt, getStart(C), getEnd(C), FalseCnt, ID,
+ TrueID, FalseID));
}
}
+ /// Create a Decision Region with a BitmapIdx and number of Conditions. This
+ /// type of region "contains" branch regions, one for each of the conditions.
+ /// The visualization tool will group everything together.
+ void createDecisionRegion(const Expr *C, unsigned BitmapIdx, unsigned Conds) {
+ popRegions(pushRegion(BitmapIdx, Conds, getStart(C), getEnd(C)));
+ }
+
/// Create a Branch Region around a SwitchCase for code coverage
/// and add it to the function's SourceRegions.
void createSwitchCaseRegion(const SwitchCase *SC, Counter TrueCnt,
@@ -751,13 +1083,11 @@ struct CounterCoverageMappingBuilder
/// is already added to \c SourceRegions.
bool isRegionAlreadyAdded(SourceLocation StartLoc, SourceLocation EndLoc,
bool isBranch = false) {
- return SourceRegions.rend() !=
- std::find_if(SourceRegions.rbegin(), SourceRegions.rend(),
- [&](const SourceMappingRegion &Region) {
- return Region.getBeginLoc() == StartLoc &&
- Region.getEndLoc() == EndLoc &&
- Region.isBranch() == isBranch;
- });
+ return llvm::any_of(
+ llvm::reverse(SourceRegions), [&](const SourceMappingRegion &Region) {
+ return Region.getBeginLoc() == StartLoc &&
+ Region.getEndLoc() == EndLoc && Region.isBranch() == isBranch;
+ });
}
/// Adjust the most recently visited location to \c EndLoc.
@@ -803,7 +1133,7 @@ struct CounterCoverageMappingBuilder
}
llvm::SmallSet<SourceLocation, 8> StartLocs;
- Optional<Counter> ParentCounter;
+ std::optional<Counter> ParentCounter;
for (SourceMappingRegion &I : llvm::reverse(RegionStack)) {
if (!I.hasStartLoc())
continue;
@@ -819,8 +1149,12 @@ struct CounterCoverageMappingBuilder
// we've seen this region.
if (StartLocs.insert(Loc).second) {
if (I.isBranch())
- SourceRegions.emplace_back(I.getCounter(), I.getFalseCounter(), Loc,
- getEndOfFileOrMacro(Loc), I.isBranch());
+ SourceRegions.emplace_back(
+ I.getCounter(), I.getFalseCounter(),
+ MCDCParameters{0, 0, I.getMCDCParams().ID,
+ I.getMCDCParams().TrueID,
+ I.getMCDCParams().FalseID},
+ Loc, getEndOfFileOrMacro(Loc), I.isBranch());
else
SourceRegions.emplace_back(I.getCounter(), Loc,
getEndOfFileOrMacro(Loc));
@@ -871,8 +1205,8 @@ struct CounterCoverageMappingBuilder
}
/// Find a valid gap range between \p AfterLoc and \p BeforeLoc.
- Optional<SourceRange> findGapAreaBetween(SourceLocation AfterLoc,
- SourceLocation BeforeLoc) {
+ std::optional<SourceRange> findGapAreaBetween(SourceLocation AfterLoc,
+ SourceLocation BeforeLoc) {
// If AfterLoc is in function-like macro, use the right parenthesis
// location.
if (AfterLoc.isMacroID()) {
@@ -910,10 +1244,10 @@ struct CounterCoverageMappingBuilder
// If the start and end locations of the gap are both within the same macro
// file, the range may not be in source order.
if (AfterLoc.isMacroID() || BeforeLoc.isMacroID())
- return None;
+ return std::nullopt;
if (!SM.isWrittenInSameFile(AfterLoc, BeforeLoc) ||
!SpellingRegion(SM, AfterLoc, BeforeLoc).isInSourceOrder())
- return None;
+ return std::nullopt;
return {{AfterLoc, BeforeLoc}};
}
@@ -930,6 +1264,69 @@ struct CounterCoverageMappingBuilder
popRegions(Index);
}
+ /// Find a valid range starting with \p StartingLoc and ending before \p
+ /// BeforeLoc.
+ std::optional<SourceRange> findAreaStartingFromTo(SourceLocation StartingLoc,
+ SourceLocation BeforeLoc) {
+ // If StartingLoc is in function-like macro, use its start location.
+ if (StartingLoc.isMacroID()) {
+ FileID FID = SM.getFileID(StartingLoc);
+ const SrcMgr::ExpansionInfo *EI = &SM.getSLocEntry(FID).getExpansion();
+ if (EI->isFunctionMacroExpansion())
+ StartingLoc = EI->getExpansionLocStart();
+ }
+
+ size_t StartDepth = locationDepth(StartingLoc);
+ size_t EndDepth = locationDepth(BeforeLoc);
+ while (!SM.isWrittenInSameFile(StartingLoc, BeforeLoc)) {
+ bool UnnestStart = StartDepth >= EndDepth;
+ bool UnnestEnd = EndDepth >= StartDepth;
+ if (UnnestEnd) {
+ assert(SM.isWrittenInSameFile(getStartOfFileOrMacro(BeforeLoc),
+ BeforeLoc));
+
+ BeforeLoc = getIncludeOrExpansionLoc(BeforeLoc);
+ assert(BeforeLoc.isValid());
+ EndDepth--;
+ }
+ if (UnnestStart) {
+ assert(SM.isWrittenInSameFile(StartingLoc,
+ getStartOfFileOrMacro(StartingLoc)));
+
+ StartingLoc = getIncludeOrExpansionLoc(StartingLoc);
+ assert(StartingLoc.isValid());
+ StartDepth--;
+ }
+ }
+ // If the start and end locations of the gap are both within the same macro
+ // file, the range may not be in source order.
+ if (StartingLoc.isMacroID() || BeforeLoc.isMacroID())
+ return std::nullopt;
+ if (!SM.isWrittenInSameFile(StartingLoc, BeforeLoc) ||
+ !SpellingRegion(SM, StartingLoc, BeforeLoc).isInSourceOrder())
+ return std::nullopt;
+ return {{StartingLoc, BeforeLoc}};
+ }
+
+ void markSkipped(SourceLocation StartLoc, SourceLocation BeforeLoc) {
+ const auto Skipped = findAreaStartingFromTo(StartLoc, BeforeLoc);
+
+ if (!Skipped)
+ return;
+
+ const auto NewStartLoc = Skipped->getBegin();
+ const auto EndLoc = Skipped->getEnd();
+
+ if (NewStartLoc == EndLoc)
+ return;
+ assert(SpellingRegion(SM, NewStartLoc, EndLoc).isInSourceOrder());
+ handleFileExit(NewStartLoc);
+ size_t Index = pushRegion({}, NewStartLoc, EndLoc);
+ getRegion().setSkipped(true);
+ handleFileExit(EndLoc);
+ popRegions(Index);
+ }
+
/// Keep counts of breaks and continues inside loops.
struct BreakContinue {
Counter BreakCount;
@@ -939,9 +1336,13 @@ struct CounterCoverageMappingBuilder
CounterCoverageMappingBuilder(
CoverageMappingModuleGen &CVM,
- llvm::DenseMap<const Stmt *, unsigned> &CounterMap, SourceManager &SM,
- const LangOptions &LangOpts)
- : CoverageMappingBuilder(CVM, SM, LangOpts), CounterMap(CounterMap) {}
+ llvm::DenseMap<const Stmt *, unsigned> &CounterMap,
+ llvm::DenseMap<const Stmt *, unsigned> &MCDCBitmapMap,
+ llvm::DenseMap<const Stmt *, MCDCConditionID> &CondIDMap,
+ SourceManager &SM, const LangOptions &LangOpts)
+ : CoverageMappingBuilder(CVM, SM, LangOpts), CounterMap(CounterMap),
+ MCDCBitmapMap(MCDCBitmapMap),
+ MCDCBuilder(CVM.getCodeGenModule(), CondIDMap, MCDCBitmapMap) {}
/// Write the mapping data to the output stream
void write(llvm::raw_ostream &OS) {
@@ -971,7 +1372,7 @@ struct CounterCoverageMappingBuilder
// If last statement contains terminate statements, add a gap area
// between the two statements. Skipping attributed statements, because
// they don't have valid start location.
- if (LastStmt && HasTerminateStmt && !dyn_cast<AttributedStmt>(Child)) {
+ if (LastStmt && HasTerminateStmt && !isa<AttributedStmt>(Child)) {
auto Gap = findGapAreaBetween(getEnd(LastStmt), getStart(Child));
if (Gap)
fillGapAreaWithCount(Gap->getBegin(), Gap->getEnd(),
@@ -990,19 +1391,31 @@ struct CounterCoverageMappingBuilder
void VisitDecl(const Decl *D) {
Stmt *Body = D->getBody();
- // Do not propagate region counts into system headers.
- if (Body && SM.isInSystemHeader(SM.getSpellingLoc(getStart(Body))))
+ // Do not propagate region counts into system headers unless collecting
+ // coverage from system headers is explicitly enabled.
+ if (!SystemHeadersCoverage && Body &&
+ SM.isInSystemHeader(SM.getSpellingLoc(getStart(Body))))
return;
// Do not visit the artificial children nodes of defaulted methods. The
// lexer may not be able to report back precise token end locations for
// these children nodes (llvm.org/PR39822), and moreover users will not be
// able to see coverage for them.
+ Counter BodyCounter = getRegionCounter(Body);
bool Defaulted = false;
if (auto *Method = dyn_cast<CXXMethodDecl>(D))
Defaulted = Method->isDefaulted();
+ if (auto *Ctor = dyn_cast<CXXConstructorDecl>(D)) {
+ for (auto *Initializer : Ctor->inits()) {
+ if (Initializer->isWritten()) {
+ auto *Init = Initializer->getInit();
+ if (getStart(Init).isValid() && getEnd(Init).isValid())
+ propagateCounts(BodyCounter, Init);
+ }
+ }
+ }
- propagateCounts(getRegionCounter(Body), Body,
+ propagateCounts(BodyCounter, Body,
/*VisitChildren=*/!Defaulted);
assert(RegionStack.empty() && "Regions entered but never exited");
}
@@ -1319,11 +1732,16 @@ struct CounterCoverageMappingBuilder
const SwitchCase *Case = S->getSwitchCaseList();
for (; Case; Case = Case->getNextSwitchCase()) {
HasDefaultCase = HasDefaultCase || isa<DefaultStmt>(Case);
- CaseCountSum = addCounters(CaseCountSum, getRegionCounter(Case));
+ CaseCountSum =
+ addCounters(CaseCountSum, getRegionCounter(Case), /*Simplify=*/false);
createSwitchCaseRegion(
Case, getRegionCounter(Case),
subtractCounters(ParentCount, getRegionCounter(Case)));
}
+ // Simplify is skipped while building the counters above: it can get really
+ // slow on top of switches with thousands of cases. Instead, trigger
+ // simplification by adding zero to the last counter.
+ CaseCountSum = addCounters(CaseCountSum, Counter::getZero());
// If no explicit default case exists, create a branch region to represent
// the hidden branch, which will be added later by the CodeGen. This region
@@ -1358,7 +1776,97 @@ struct CounterCoverageMappingBuilder
Visit(S->getSubStmt());
}
+ void coverIfConsteval(const IfStmt *S) {
+ assert(S->isConsteval());
+
+ const auto *Then = S->getThen();
+ const auto *Else = S->getElse();
+
+ // It's better for llvm-cov to create a new region with same counter
+ // so line-coverage can be properly calculated for lines containing
+ // a skipped region (without it the line is marked uncovered)
+ const Counter ParentCount = getRegion().getCounter();
+
+ extendRegion(S);
+
+ if (S->isNegatedConsteval()) {
+ // ignore 'if consteval'
+ markSkipped(S->getIfLoc(), getStart(Then));
+ propagateCounts(ParentCount, Then);
+
+ if (Else) {
+ // ignore 'else <else>'
+ markSkipped(getEnd(Then), getEnd(Else));
+ }
+ } else {
+ assert(S->isNonNegatedConsteval());
+ // ignore 'if consteval <then> [else]'
+ markSkipped(S->getIfLoc(), Else ? getStart(Else) : getEnd(Then));
+
+ if (Else)
+ propagateCounts(ParentCount, Else);
+ }
+ }
+
+ void coverIfConstexpr(const IfStmt *S) {
+ assert(S->isConstexpr());
+
+ // evaluate constant condition...
+ const bool isTrue =
+ S->getCond()
+ ->EvaluateKnownConstInt(CVM.getCodeGenModule().getContext())
+ .getBoolValue();
+
+ extendRegion(S);
+
+ // I'm using 'propagateCounts' later as new region is better and allows me
+ // to properly calculate line coverage in llvm-cov utility
+ const Counter ParentCount = getRegion().getCounter();
+
+ // ignore 'if constexpr ('
+ SourceLocation startOfSkipped = S->getIfLoc();
+
+ if (const auto *Init = S->getInit()) {
+ const auto start = getStart(Init);
+ const auto end = getEnd(Init);
+
+ // this check is to make sure typedef here which doesn't have valid source
+ // location won't crash it
+ if (start.isValid() && end.isValid()) {
+ markSkipped(startOfSkipped, start);
+ propagateCounts(ParentCount, Init);
+ startOfSkipped = getEnd(Init);
+ }
+ }
+
+ const auto *Then = S->getThen();
+ const auto *Else = S->getElse();
+
+ if (isTrue) {
+ // ignore '<condition>)'
+ markSkipped(startOfSkipped, getStart(Then));
+ propagateCounts(ParentCount, Then);
+
+ if (Else)
+ // ignore 'else <else>'
+ markSkipped(getEnd(Then), getEnd(Else));
+ } else {
+ // ignore '<condition>) <then> [else]'
+ markSkipped(startOfSkipped, Else ? getStart(Else) : getEnd(Then));
+
+ if (Else)
+ propagateCounts(ParentCount, Else);
+ }
+ }
+
void VisitIfStmt(const IfStmt *S) {
+ // "if constexpr" and "if consteval" are not normal conditional statements,
+ // their discarded statement should be skipped
+ if (S->isConsteval())
+ return coverIfConsteval(S);
+ else if (S->isConstexpr())
+ return coverIfConstexpr(S);
+
extendRegion(S);
if (S->getInit())
Visit(S->getInit());
@@ -1375,20 +1883,21 @@ struct CounterCoverageMappingBuilder
propagateCounts(ParentCount, S->getCond());
// The 'then' count applies to the area immediately after the condition.
- auto Gap = findGapAreaBetween(S->getRParenLoc(), getStart(S->getThen()));
+ std::optional<SourceRange> Gap =
+ findGapAreaBetween(S->getRParenLoc(), getStart(S->getThen()));
if (Gap)
fillGapAreaWithCount(Gap->getBegin(), Gap->getEnd(), ThenCount);
extendRegion(S->getThen());
Counter OutCount = propagateCounts(ThenCount, S->getThen());
-
Counter ElseCount = subtractCounters(ParentCount, ThenCount);
+
if (const Stmt *Else = S->getElse()) {
bool ThenHasTerminateStmt = HasTerminateStmt;
HasTerminateStmt = false;
-
// The 'else' count applies to the area immediately after the 'then'.
- Gap = findGapAreaBetween(getEnd(S->getThen()), getStart(Else));
+ std::optional<SourceRange> Gap =
+ findGapAreaBetween(getEnd(S->getThen()), getStart(Else));
if (Gap)
fillGapAreaWithCount(Gap->getBegin(), Gap->getEnd(), ElseCount);
extendRegion(Else);
@@ -1435,6 +1944,7 @@ struct CounterCoverageMappingBuilder
Counter TrueCount = getRegionCounter(E);
propagateCounts(ParentCount, E->getCond());
+ Counter OutCount;
if (!isa<BinaryConditionalOperator>(E)) {
// The 'then' count applies to the area immediately after the condition.
@@ -1444,12 +1954,18 @@ struct CounterCoverageMappingBuilder
fillGapAreaWithCount(Gap->getBegin(), Gap->getEnd(), TrueCount);
extendRegion(E->getTrueExpr());
- propagateCounts(TrueCount, E->getTrueExpr());
+ OutCount = propagateCounts(TrueCount, E->getTrueExpr());
}
extendRegion(E->getFalseExpr());
- propagateCounts(subtractCounters(ParentCount, TrueCount),
- E->getFalseExpr());
+ OutCount = addCounters(
+ OutCount, propagateCounts(subtractCounters(ParentCount, TrueCount),
+ E->getFalseExpr()));
+
+ if (OutCount != ParentCount) {
+ pushRegion(OutCount);
+ GapRegionCounter = OutCount;
+ }
// Create Branch Region around condition.
createBranchRegion(E->getCond(), TrueCount,
@@ -1457,14 +1973,30 @@ struct CounterCoverageMappingBuilder
}
void VisitBinLAnd(const BinaryOperator *E) {
+ bool IsRootNode = MCDCBuilder.isIdle();
+
+ // Keep track of Binary Operator and assign MCDC condition IDs.
+ MCDCBuilder.pushAndAssignIDs(E);
+
extendRegion(E->getLHS());
propagateCounts(getRegion().getCounter(), E->getLHS());
handleFileExit(getEnd(E->getLHS()));
+ // Track LHS True/False Decision.
+ const auto DecisionLHS = MCDCBuilder.pop();
+
// Counter tracks the right hand side of a logical and operator.
extendRegion(E->getRHS());
propagateCounts(getRegionCounter(E), E->getRHS());
+ // Track RHS True/False Decision.
+ const auto DecisionRHS = MCDCBuilder.back();
+
+ // Create MCDC Decision Region if at top-level (root).
+ unsigned NumConds = 0;
+ if (IsRootNode && (NumConds = MCDCBuilder.getTotalConditionsAndReset(E)))
+ createDecisionRegion(E, getRegionBitmap(E), NumConds);
+
// Extract the RHS's Execution Counter.
Counter RHSExecCnt = getRegionCounter(E);
@@ -1476,44 +2008,83 @@ struct CounterCoverageMappingBuilder
// Create Branch Region around LHS condition.
createBranchRegion(E->getLHS(), RHSExecCnt,
- subtractCounters(ParentCnt, RHSExecCnt));
+ subtractCounters(ParentCnt, RHSExecCnt), DecisionLHS);
// Create Branch Region around RHS condition.
createBranchRegion(E->getRHS(), RHSTrueCnt,
- subtractCounters(RHSExecCnt, RHSTrueCnt));
+ subtractCounters(RHSExecCnt, RHSTrueCnt), DecisionRHS);
+ }
+
+ // Determine whether the right side of OR operation need to be visited.
+ bool shouldVisitRHS(const Expr *LHS) {
+ bool LHSIsTrue = false;
+ bool LHSIsConst = false;
+ if (!LHS->isValueDependent())
+ LHSIsConst = LHS->EvaluateAsBooleanCondition(
+ LHSIsTrue, CVM.getCodeGenModule().getContext());
+ return !LHSIsConst || (LHSIsConst && !LHSIsTrue);
}
void VisitBinLOr(const BinaryOperator *E) {
+ bool IsRootNode = MCDCBuilder.isIdle();
+
+ // Keep track of Binary Operator and assign MCDC condition IDs.
+ MCDCBuilder.pushAndAssignIDs(E);
+
extendRegion(E->getLHS());
- propagateCounts(getRegion().getCounter(), E->getLHS());
+ Counter OutCount = propagateCounts(getRegion().getCounter(), E->getLHS());
handleFileExit(getEnd(E->getLHS()));
+ // Track LHS True/False Decision.
+ const auto DecisionLHS = MCDCBuilder.pop();
+
// Counter tracks the right hand side of a logical or operator.
extendRegion(E->getRHS());
propagateCounts(getRegionCounter(E), E->getRHS());
+ // Track RHS True/False Decision.
+ const auto DecisionRHS = MCDCBuilder.back();
+
+ // Create MCDC Decision Region if at top-level (root).
+ unsigned NumConds = 0;
+ if (IsRootNode && (NumConds = MCDCBuilder.getTotalConditionsAndReset(E)))
+ createDecisionRegion(E, getRegionBitmap(E), NumConds);
+
// Extract the RHS's Execution Counter.
Counter RHSExecCnt = getRegionCounter(E);
// Extract the RHS's "False" Instance Counter.
Counter RHSFalseCnt = getRegionCounter(E->getRHS());
+ if (!shouldVisitRHS(E->getLHS())) {
+ GapRegionCounter = OutCount;
+ }
+
// Extract the Parent Region Counter.
Counter ParentCnt = getRegion().getCounter();
// Create Branch Region around LHS condition.
createBranchRegion(E->getLHS(), subtractCounters(ParentCnt, RHSExecCnt),
- RHSExecCnt);
+ RHSExecCnt, DecisionLHS);
// Create Branch Region around RHS condition.
createBranchRegion(E->getRHS(), subtractCounters(RHSExecCnt, RHSFalseCnt),
- RHSFalseCnt);
+ RHSFalseCnt, DecisionRHS);
}
void VisitLambdaExpr(const LambdaExpr *LE) {
// Lambdas are treated as their own functions for now, so we shouldn't
// propagate counts into them.
}
+
+ void VisitPseudoObjectExpr(const PseudoObjectExpr *POE) {
+ // Just visit syntatic expression as this is what users actually write.
+ VisitStmt(POE->getSyntacticForm());
+ }
+
+ void VisitOpaqueValueExpr(const OpaqueValueExpr* OVE) {
+ Visit(OVE->getSourceExpr());
+ }
};
} // end anonymous namespace
@@ -1538,17 +2109,33 @@ static void dump(llvm::raw_ostream &OS, StringRef FunctionName,
OS << "Gap,";
break;
case CounterMappingRegion::BranchRegion:
+ case CounterMappingRegion::MCDCBranchRegion:
OS << "Branch,";
break;
+ case CounterMappingRegion::MCDCDecisionRegion:
+ OS << "Decision,";
+ break;
}
OS << "File " << R.FileID << ", " << R.LineStart << ":" << R.ColumnStart
<< " -> " << R.LineEnd << ":" << R.ColumnEnd << " = ";
- Ctx.dump(R.Count, OS);
- if (R.Kind == CounterMappingRegion::BranchRegion) {
- OS << ", ";
- Ctx.dump(R.FalseCount, OS);
+ if (R.Kind == CounterMappingRegion::MCDCDecisionRegion) {
+ OS << "M:" << R.MCDCParams.BitmapIdx;
+ OS << ", C:" << R.MCDCParams.NumConditions;
+ } else {
+ Ctx.dump(R.Count, OS);
+
+ if (R.Kind == CounterMappingRegion::BranchRegion ||
+ R.Kind == CounterMappingRegion::MCDCBranchRegion) {
+ OS << ", ";
+ Ctx.dump(R.FalseCount, OS);
+ }
+ }
+
+ if (R.Kind == CounterMappingRegion::MCDCBranchRegion) {
+ OS << " [" << R.MCDCParams.ID << "," << R.MCDCParams.TrueID;
+ OS << "," << R.MCDCParams.FalseID << "] ";
}
if (R.Kind == CounterMappingRegion::ExpansionRegion)
@@ -1559,9 +2146,7 @@ static void dump(llvm::raw_ostream &OS, StringRef FunctionName,
CoverageMappingModuleGen::CoverageMappingModuleGen(
CodeGenModule &CGM, CoverageSourceInfo &SourceInfo)
- : CGM(CGM), SourceInfo(SourceInfo) {
- CoveragePrefixMap = CGM.getCodeGenOpts().CoveragePrefixMap;
-}
+ : CGM(CGM), SourceInfo(SourceInfo) {}
std::string CoverageMappingModuleGen::getCurrentDirname() {
if (!CGM.getCodeGenOpts().CoverageCompilationDir.empty())
@@ -1575,8 +2160,13 @@ std::string CoverageMappingModuleGen::getCurrentDirname() {
std::string CoverageMappingModuleGen::normalizeFilename(StringRef Filename) {
llvm::SmallString<256> Path(Filename);
llvm::sys::path::remove_dots(Path, /*remove_dot_dot=*/true);
- for (const auto &Entry : CoveragePrefixMap) {
- if (llvm::sys::path::replace_path_prefix(Path, Entry.first, Entry.second))
+
+ /// Traverse coverage prefix map in reverse order because prefix replacements
+ /// are applied in reverse order starting from the last one when multiple
+ /// prefix replacement options are provided.
+ for (const auto &[From, To] :
+ llvm::reverse(CGM.getCodeGenOpts().CoveragePrefixMap)) {
+ if (llvm::sys::path::replace_path_prefix(Path, From, To))
break;
}
return Path.str().str();
@@ -1611,7 +2201,7 @@ void CoverageMappingModuleGen::emitFunctionMappingRecord(
#include "llvm/ProfileData/InstrProfData.inc"
};
auto *FunctionRecordTy =
- llvm::StructType::get(Ctx, makeArrayRef(FunctionRecordTypes),
+ llvm::StructType::get(Ctx, ArrayRef(FunctionRecordTypes),
/*isPacked=*/true);
// Create the function record constant.
@@ -1619,8 +2209,8 @@ void CoverageMappingModuleGen::emitFunctionMappingRecord(
llvm::Constant *FunctionRecordVals[] = {
#include "llvm/ProfileData/InstrProfData.inc"
};
- auto *FuncRecordConstant = llvm::ConstantStruct::get(
- FunctionRecordTy, makeArrayRef(FunctionRecordVals));
+ auto *FuncRecordConstant =
+ llvm::ConstantStruct::get(FunctionRecordTy, ArrayRef(FunctionRecordVals));
// Create the function record global.
auto *FuncRecord = new llvm::GlobalVariable(
@@ -1640,13 +2230,11 @@ void CoverageMappingModuleGen::emitFunctionMappingRecord(
void CoverageMappingModuleGen::addFunctionMappingRecord(
llvm::GlobalVariable *NamePtr, StringRef NameValue, uint64_t FuncHash,
const std::string &CoverageMapping, bool IsUsed) {
- llvm::LLVMContext &Ctx = CGM.getLLVMContext();
const uint64_t NameHash = llvm::IndexedInstrProf::ComputeHash(NameValue);
FunctionRecords.push_back({NameHash, FuncHash, CoverageMapping, IsUsed});
if (!IsUsed)
- FunctionNames.push_back(
- llvm::ConstantExpr::getBitCast(NamePtr, llvm::Type::getInt8PtrTy(Ctx)));
+ FunctionNames.push_back(NamePtr);
if (CGM.getCodeGenOpts().DumpCoverageMapping) {
// Dump the coverage mapping data for this function by decoding the
@@ -1662,9 +2250,9 @@ void CoverageMappingModuleGen::addFunctionMappingRecord(
FilenameStrs[0] = normalizeFilename(getCurrentDirname());
for (const auto &Entry : FileEntries) {
auto I = Entry.second;
- FilenameStrs[I] = normalizeFilename(Entry.first->getName());
+ FilenameStrs[I] = normalizeFilename(Entry.first.getName());
}
- ArrayRef<std::string> FilenameRefs = llvm::makeArrayRef(FilenameStrs);
+ ArrayRef<std::string> FilenameRefs = llvm::ArrayRef(FilenameStrs);
RawCoverageMappingReader Reader(CoverageMapping, FilenameRefs, Filenames,
Expressions, Regions);
if (Reader.read())
@@ -1686,7 +2274,7 @@ void CoverageMappingModuleGen::emit() {
FilenameStrs[0] = normalizeFilename(getCurrentDirname());
for (const auto &Entry : FileEntries) {
auto I = Entry.second;
- FilenameStrs[I] = normalizeFilename(Entry.first->getName());
+ FilenameStrs[I] = normalizeFilename(Entry.first.getName());
}
std::string Filenames;
@@ -1710,20 +2298,19 @@ void CoverageMappingModuleGen::emit() {
#include "llvm/ProfileData/InstrProfData.inc"
};
auto CovDataHeaderTy =
- llvm::StructType::get(Ctx, makeArrayRef(CovDataHeaderTypes));
+ llvm::StructType::get(Ctx, ArrayRef(CovDataHeaderTypes));
llvm::Constant *CovDataHeaderVals[] = {
#define COVMAP_HEADER(Type, LLVMType, Name, Init) Init,
#include "llvm/ProfileData/InstrProfData.inc"
};
- auto CovDataHeaderVal = llvm::ConstantStruct::get(
- CovDataHeaderTy, makeArrayRef(CovDataHeaderVals));
+ auto CovDataHeaderVal =
+ llvm::ConstantStruct::get(CovDataHeaderTy, ArrayRef(CovDataHeaderVals));
// Create the coverage data record
llvm::Type *CovDataTypes[] = {CovDataHeaderTy, FilenamesVal->getType()};
- auto CovDataTy = llvm::StructType::get(Ctx, makeArrayRef(CovDataTypes));
+ auto CovDataTy = llvm::StructType::get(Ctx, ArrayRef(CovDataTypes));
llvm::Constant *TUDataVals[] = {CovDataHeaderVal, FilenamesVal};
- auto CovDataVal =
- llvm::ConstantStruct::get(CovDataTy, makeArrayRef(TUDataVals));
+ auto CovDataVal = llvm::ConstantStruct::get(CovDataTy, ArrayRef(TUDataVals));
auto CovData = new llvm::GlobalVariable(
CGM.getModule(), CovDataTy, true, llvm::GlobalValue::PrivateLinkage,
CovDataVal, llvm::getCoverageMappingVarName());
@@ -1735,7 +2322,7 @@ void CoverageMappingModuleGen::emit() {
CGM.addUsedGlobal(CovData);
// Create the deferred function records array
if (!FunctionNames.empty()) {
- auto NamesArrTy = llvm::ArrayType::get(llvm::Type::getInt8PtrTy(Ctx),
+ auto NamesArrTy = llvm::ArrayType::get(llvm::PointerType::getUnqual(Ctx),
FunctionNames.size());
auto NamesArrVal = llvm::ConstantArray::get(NamesArrTy, FunctionNames);
// This variable will *NOT* be emitted to the object file. It is used
@@ -1746,7 +2333,7 @@ void CoverageMappingModuleGen::emit() {
}
}
-unsigned CoverageMappingModuleGen::getFileID(const FileEntry *File) {
+unsigned CoverageMappingModuleGen::getFileID(FileEntryRef File) {
auto It = FileEntries.find(File);
if (It != FileEntries.end())
return It->second;
@@ -1757,8 +2344,9 @@ unsigned CoverageMappingModuleGen::getFileID(const FileEntry *File) {
void CoverageMappingGen::emitCounterMapping(const Decl *D,
llvm::raw_ostream &OS) {
- assert(CounterMap);
- CounterCoverageMappingBuilder Walker(CVM, *CounterMap, SM, LangOpts);
+ assert(CounterMap && MCDCBitmapMap);
+ CounterCoverageMappingBuilder Walker(CVM, *CounterMap, *MCDCBitmapMap,
+ *CondIDMap, SM, LangOpts);
Walker.VisitDecl(D);
Walker.write(OS);
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CoverageMappingGen.h b/contrib/llvm-project/clang/lib/CodeGen/CoverageMappingGen.h
index ae4f435d4ff3..62cea173c9fc 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CoverageMappingGen.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CoverageMappingGen.h
@@ -31,15 +31,29 @@ class Decl;
class Stmt;
struct SkippedRange {
+ enum Kind {
+ PPIfElse, // Preprocessor #if/#else ...
+ EmptyLine,
+ Comment,
+ };
+
SourceRange Range;
// The location of token before the skipped source range.
SourceLocation PrevTokLoc;
// The location of token after the skipped source range.
SourceLocation NextTokLoc;
+ // The nature of this skipped range
+ Kind RangeKind;
+
+ bool isComment() { return RangeKind == Comment; }
+ bool isEmptyLine() { return RangeKind == EmptyLine; }
+ bool isPPIfElse() { return RangeKind == PPIfElse; }
- SkippedRange(SourceRange Range, SourceLocation PrevTokLoc = SourceLocation(),
+ SkippedRange(SourceRange Range, Kind K,
+ SourceLocation PrevTokLoc = SourceLocation(),
SourceLocation NextTokLoc = SourceLocation())
- : Range(Range), PrevTokLoc(PrevTokLoc), NextTokLoc(NextTokLoc) {}
+ : Range(Range), PrevTokLoc(PrevTokLoc), NextTokLoc(NextTokLoc),
+ RangeKind(K) {}
};
/// Stores additional source code information like skipped ranges which
@@ -62,7 +76,7 @@ public:
std::vector<SkippedRange> &getSkippedRanges() { return SkippedRanges; }
- void AddSkippedRange(SourceRange Range);
+ void AddSkippedRange(SourceRange Range, SkippedRange::Kind RangeKind);
void SourceRangeSkipped(SourceRange Range, SourceLocation EndifLoc) override;
@@ -90,10 +104,9 @@ class CoverageMappingModuleGen {
CodeGenModule &CGM;
CoverageSourceInfo &SourceInfo;
- llvm::SmallDenseMap<const FileEntry *, unsigned, 8> FileEntries;
+ llvm::SmallDenseMap<FileEntryRef, unsigned, 8> FileEntries;
std::vector<llvm::Constant *> FunctionNames;
std::vector<FunctionInfo> FunctionRecords;
- std::map<std::string, std::string> CoveragePrefixMap;
std::string getCurrentDirname();
std::string normalizeFilename(StringRef Filename);
@@ -124,7 +137,7 @@ public:
/// Return the coverage mapping translation unit file id
/// for the given file.
- unsigned getFileID(const FileEntry *File);
+ unsigned getFileID(FileEntryRef File);
/// Return an interface into CodeGenModule.
CodeGenModule &getCodeGenModule() { return CGM; }
@@ -137,16 +150,22 @@ class CoverageMappingGen {
SourceManager &SM;
const LangOptions &LangOpts;
llvm::DenseMap<const Stmt *, unsigned> *CounterMap;
+ llvm::DenseMap<const Stmt *, unsigned> *MCDCBitmapMap;
+ llvm::DenseMap<const Stmt *, unsigned> *CondIDMap;
public:
CoverageMappingGen(CoverageMappingModuleGen &CVM, SourceManager &SM,
const LangOptions &LangOpts)
- : CVM(CVM), SM(SM), LangOpts(LangOpts), CounterMap(nullptr) {}
+ : CVM(CVM), SM(SM), LangOpts(LangOpts), CounterMap(nullptr),
+ MCDCBitmapMap(nullptr), CondIDMap(nullptr) {}
CoverageMappingGen(CoverageMappingModuleGen &CVM, SourceManager &SM,
const LangOptions &LangOpts,
- llvm::DenseMap<const Stmt *, unsigned> *CounterMap)
- : CVM(CVM), SM(SM), LangOpts(LangOpts), CounterMap(CounterMap) {}
+ llvm::DenseMap<const Stmt *, unsigned> *CounterMap,
+ llvm::DenseMap<const Stmt *, unsigned> *MCDCBitmapMap,
+ llvm::DenseMap<const Stmt *, unsigned> *CondIDMap)
+ : CVM(CVM), SM(SM), LangOpts(LangOpts), CounterMap(CounterMap),
+ MCDCBitmapMap(MCDCBitmapMap), CondIDMap(CondIDMap) {}
/// Emit the coverage mapping data which maps the regions of
/// code to counters that will be used to find the execution
diff --git a/contrib/llvm-project/clang/lib/CodeGen/EHScopeStack.h b/contrib/llvm-project/clang/lib/CodeGen/EHScopeStack.h
index cd649cb11f9b..0c667e80bb6d 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/EHScopeStack.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/EHScopeStack.h
@@ -148,6 +148,12 @@ public:
public:
Cleanup(const Cleanup &) = default;
Cleanup(Cleanup &&) {}
+
+ // The copy and move assignment operator is defined as deleted pending
+ // further motivation.
+ Cleanup &operator=(const Cleanup &) = delete;
+ Cleanup &operator=(Cleanup &&) = delete;
+
Cleanup() = default;
virtual bool isRedundantBeforeReturn() { return false; }
@@ -160,10 +166,10 @@ public:
F_IsEHCleanupKind = 0x4,
F_HasExitSwitch = 0x8,
};
- unsigned flags;
+ unsigned flags = 0;
public:
- Flags() : flags(0) {}
+ Flags() = default;
/// isForEH - true if the current emission is for an EH cleanup.
bool isForEHCleanup() const { return flags & F_IsForEH; }
@@ -272,6 +278,9 @@ public:
CGF(nullptr) {}
~EHScopeStack() { delete[] StartOfBuffer; }
+ EHScopeStack(const EHScopeStack &) = delete;
+ EHScopeStack &operator=(const EHScopeStack &) = delete;
+
/// Push a lazily-created cleanup on the stack.
template <class T, class... As> void pushCleanup(CleanupKind Kind, As... A) {
static_assert(alignof(T) <= ScopeStackAlignment,
diff --git a/contrib/llvm-project/clang/lib/CodeGen/ItaniumCXXABI.cpp b/contrib/llvm-project/clang/lib/CodeGen/ItaniumCXXABI.cpp
index d3dc0e6212b8..d173806ec8ce 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/ItaniumCXXABI.cpp
@@ -36,6 +36,8 @@
#include "llvm/IR/Value.h"
#include "llvm/Support/ScopedPrinter.h"
+#include <optional>
+
using namespace clang;
using namespace CodeGen;
@@ -185,14 +187,58 @@ public:
bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
QualType SrcRecordTy) override;
- llvm::Value *EmitDynamicCastCall(CodeGenFunction &CGF, Address Value,
+ /// Determine whether we know that all instances of type RecordTy will have
+ /// the same vtable pointer values, that is distinct from all other vtable
+ /// pointers. While this is required by the Itanium ABI, it doesn't happen in
+ /// practice in some cases due to language extensions.
+ bool hasUniqueVTablePointer(QualType RecordTy) {
+ const CXXRecordDecl *RD = RecordTy->getAsCXXRecordDecl();
+
+ // Under -fapple-kext, multiple definitions of the same vtable may be
+ // emitted.
+ if (!CGM.getCodeGenOpts().AssumeUniqueVTables ||
+ getContext().getLangOpts().AppleKext)
+ return false;
+
+ // If the type_info* would be null, the vtable might be merged with that of
+ // another type.
+ if (!CGM.shouldEmitRTTI())
+ return false;
+
+ // If there's only one definition of the vtable in the program, it has a
+ // unique address.
+ if (!llvm::GlobalValue::isWeakForLinker(CGM.getVTableLinkage(RD)))
+ return true;
+
+ // Even if there are multiple definitions of the vtable, they are required
+ // by the ABI to use the same symbol name, so should be merged at load
+ // time. However, if the class has hidden visibility, there can be
+ // different versions of the class in different modules, and the ABI
+ // library might treat them as being the same.
+ if (CGM.GetLLVMVisibility(RD->getVisibility()) !=
+ llvm::GlobalValue::DefaultVisibility)
+ return false;
+
+ return true;
+ }
+
+ bool shouldEmitExactDynamicCast(QualType DestRecordTy) override {
+ return hasUniqueVTablePointer(DestRecordTy);
+ }
+
+ llvm::Value *emitDynamicCastCall(CodeGenFunction &CGF, Address Value,
QualType SrcRecordTy, QualType DestTy,
QualType DestRecordTy,
llvm::BasicBlock *CastEnd) override;
- llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
- QualType SrcRecordTy,
- QualType DestTy) override;
+ llvm::Value *emitExactDynamicCast(CodeGenFunction &CGF, Address ThisAddr,
+ QualType SrcRecordTy, QualType DestTy,
+ QualType DestRecordTy,
+ llvm::BasicBlock *CastSuccess,
+ llvm::BasicBlock *CastFail) override;
+
+ llvm::Value *emitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
+ QualType SrcRecordTy) override;
bool EmitBadCastCall(CodeGenFunction &CGF) override;
@@ -334,49 +380,9 @@ public:
ArrayRef<llvm::Function *> CXXThreadLocalInits,
ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override;
- /// Determine whether we will definitely emit this variable with a constant
- /// initializer, either because the language semantics demand it or because
- /// we know that the initializer is a constant.
- // For weak definitions, any initializer available in the current translation
- // is not necessarily reflective of the initializer used; such initializers
- // are ignored unless if InspectInitForWeakDef is true.
- bool
- isEmittedWithConstantInitializer(const VarDecl *VD,
- bool InspectInitForWeakDef = false) const {
- VD = VD->getMostRecentDecl();
- if (VD->hasAttr<ConstInitAttr>())
- return true;
-
- // All later checks examine the initializer specified on the variable. If
- // the variable is weak, such examination would not be correct.
- if (!InspectInitForWeakDef &&
- (VD->isWeak() || VD->hasAttr<SelectAnyAttr>()))
- return false;
-
- const VarDecl *InitDecl = VD->getInitializingDeclaration();
- if (!InitDecl)
- return false;
-
- // If there's no initializer to run, this is constant initialization.
- if (!InitDecl->hasInit())
- return true;
-
- // If we have the only definition, we don't need a thread wrapper if we
- // will emit the value as a constant.
- if (isUniqueGVALinkage(getContext().GetGVALinkageForVariable(VD)))
- return !VD->needsDestruction(getContext()) && InitDecl->evaluateValue();
-
- // Otherwise, we need a thread wrapper unless we know that every
- // translation unit will emit the value as a constant. We rely on the
- // variable being constant-initialized in every translation unit if it's
- // constant-initialized in any translation unit, which isn't actually
- // guaranteed by the standard but is necessary for sanity.
- return InitDecl->hasConstantInitialization();
- }
-
bool usesThreadWrapperFunction(const VarDecl *VD) const override {
return !isEmittedWithConstantInitializer(VD) ||
- VD->needsDestruction(getContext());
+ mayNeedDestruction(VD);
}
LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD,
QualType LValType) override;
@@ -473,11 +479,7 @@ public:
ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
/*UseARMGuardVarABI=*/true) {}
- bool HasThisReturn(GlobalDecl GD) const override {
- return (isa<CXXConstructorDecl>(GD.getDecl()) || (
- isa<CXXDestructorDecl>(GD.getDecl()) &&
- GD.getDtorType() != Dtor_Deleting));
- }
+ bool constructorsAndDestructorsReturnThis() const override { return true; }
void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV,
QualType ResTy) override;
@@ -508,11 +510,7 @@ public:
: ItaniumCXXABI(CGM) {}
private:
- bool HasThisReturn(GlobalDecl GD) const override {
- return isa<CXXConstructorDecl>(GD.getDecl()) ||
- (isa<CXXDestructorDecl>(GD.getDecl()) &&
- GD.getDtorType() != Dtor_Deleting);
- }
+ bool constructorsAndDestructorsReturnThis() const override { return true; }
};
class WebAssemblyCXXABI final : public ItaniumCXXABI {
@@ -526,11 +524,7 @@ public:
llvm::Value *Exn) override;
private:
- bool HasThisReturn(GlobalDecl GD) const override {
- return isa<CXXConstructorDecl>(GD.getDecl()) ||
- (isa<CXXDestructorDecl>(GD.getDecl()) &&
- GD.getDtorType() != Dtor_Deleting);
- }
+ bool constructorsAndDestructorsReturnThis() const override { return true; }
bool canCallMismatchedFunctionType() const override { return false; }
};
@@ -632,13 +626,10 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
CGBuilderTy &Builder = CGF.Builder;
const FunctionProtoType *FPT =
- MPT->getPointeeType()->getAs<FunctionProtoType>();
+ MPT->getPointeeType()->castAs<FunctionProtoType>();
auto *RD =
cast<CXXRecordDecl>(MPT->getClass()->castAs<RecordType>()->getDecl());
- llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(
- CGM.getTypes().arrangeCXXMethodType(RD, FPT, /*FD=*/nullptr));
-
llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1);
llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual");
@@ -656,9 +647,7 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
// Apply the adjustment and cast back to the original struct type
// for consistency.
llvm::Value *This = ThisAddr.getPointer();
- llvm::Value *Ptr = Builder.CreateBitCast(This, Builder.getInt8PtrTy());
- Ptr = Builder.CreateInBoundsGEP(Builder.getInt8Ty(), Ptr, Adj);
- This = Builder.CreateBitCast(Ptr, This->getType(), "this.adjusted");
+ This = Builder.CreateInBoundsGEP(Builder.getInt8Ty(), This, Adj);
ThisPtrForCall = This;
// Load the function pointer.
@@ -680,12 +669,12 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
CGF.EmitBlock(FnVirtual);
// Cast the adjusted this to a pointer to vtable pointer and load.
- llvm::Type *VTableTy = Builder.getInt8PtrTy();
+ llvm::Type *VTableTy = CGF.CGM.GlobalsInt8PtrTy;
CharUnits VTablePtrAlign =
CGF.CGM.getDynamicOffsetAlignment(ThisAddr.getAlignment(), RD,
CGF.getPointerAlign());
- llvm::Value *VTable =
- CGF.GetVTablePtr(Address(This, VTablePtrAlign), VTableTy, RD);
+ llvm::Value *VTable = CGF.GetVTablePtr(
+ Address(This, ThisAddr.getElementType(), VTablePtrAlign), VTableTy, RD);
// Apply the offset.
// On ARM64, to reserve extra space in virtual member function pointers,
@@ -708,8 +697,8 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
CGM.HasHiddenLTOVisibility(RD);
bool ShouldEmitWPDInfo =
CGM.getCodeGenOpts().WholeProgramVTables &&
- // Don't insert type tests if we are forcing public std visibility.
- !CGM.HasLTOVisibilityPublicStd(RD);
+ // Don't insert type tests if we are forcing public visibility.
+ !CGM.AlwaysHasLTOVisibilityPublic(RD);
llvm::Value *VirtualFn = nullptr;
{
@@ -739,17 +728,18 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
{VFPAddr, llvm::ConstantInt::get(CGM.Int32Ty, 0), TypeId});
CheckResult = Builder.CreateExtractValue(CheckedLoad, 1);
VirtualFn = Builder.CreateExtractValue(CheckedLoad, 0);
- VirtualFn = Builder.CreateBitCast(VirtualFn, FTy->getPointerTo(),
- "memptr.virtualfn");
} else {
// When not doing VFE, emit a normal load, as it allows more
// optimisations than type.checked.load.
if (ShouldEmitCFICheck || ShouldEmitWPDInfo) {
llvm::Value *VFPAddr =
Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
- CheckResult = Builder.CreateCall(
- CGM.getIntrinsic(llvm::Intrinsic::type_test),
- {Builder.CreateBitCast(VFPAddr, CGF.Int8PtrTy), TypeId});
+ llvm::Intrinsic::ID IID = CGM.HasHiddenLTOVisibility(RD)
+ ? llvm::Intrinsic::type_test
+ : llvm::Intrinsic::public_type_test;
+
+ CheckResult =
+ Builder.CreateCall(CGM.getIntrinsic(IID), {VFPAddr, TypeId});
}
if (CGM.getItaniumVTableContext().isRelativeLayout()) {
@@ -757,15 +747,12 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
CGM.getIntrinsic(llvm::Intrinsic::load_relative,
{VTableOffset->getType()}),
{VTable, VTableOffset});
- VirtualFn = CGF.Builder.CreateBitCast(VirtualFn, FTy->getPointerTo());
} else {
llvm::Value *VFPAddr =
CGF.Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
- VFPAddr = CGF.Builder.CreateBitCast(
- VFPAddr, FTy->getPointerTo()->getPointerTo());
- VirtualFn = CGF.Builder.CreateAlignedLoad(
- FTy->getPointerTo(), VFPAddr, CGF.getPointerAlign(),
- "memptr.virtualfn");
+ VirtualFn = CGF.Builder.CreateAlignedLoad(CGF.UnqualPtrTy, VFPAddr,
+ CGF.getPointerAlign(),
+ "memptr.virtualfn");
}
}
assert(VirtualFn && "Virtual fuction pointer not created!");
@@ -806,7 +793,7 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
// function pointer.
CGF.EmitBlock(FnNonVirtual);
llvm::Value *NonVirtualFn =
- Builder.CreateIntToPtr(FnAsInt, FTy->getPointerTo(), "memptr.nonvirtualfn");
+ Builder.CreateIntToPtr(FnAsInt, CGF.UnqualPtrTy, "memptr.nonvirtualfn");
// Check the function pointer if CFI on member function pointers is enabled.
if (ShouldEmitCFICheck) {
@@ -821,8 +808,6 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
};
llvm::Value *Bit = Builder.getFalse();
- llvm::Value *CastedNonVirtualFn =
- Builder.CreateBitCast(NonVirtualFn, CGF.Int8PtrTy);
for (const CXXRecordDecl *Base : CGM.getMostBaseClasses(RD)) {
llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(
getContext().getMemberPointerType(
@@ -833,13 +818,13 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
llvm::Value *TypeTest =
Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),
- {CastedNonVirtualFn, TypeId});
+ {NonVirtualFn, TypeId});
Bit = Builder.CreateOr(Bit, TypeTest);
}
CGF.EmitCheck(std::make_pair(Bit, SanitizerKind::CFIMFCall),
SanitizerHandler::CFICheckFail, StaticData,
- {CastedNonVirtualFn, llvm::UndefValue::get(CGF.IntPtrTy)});
+ {NonVirtualFn, llvm::UndefValue::get(CGF.IntPtrTy)});
FnNonVirtual = Builder.GetInsertBlock();
}
@@ -847,7 +832,7 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
// We're done.
CGF.EmitBlock(FnEnd);
- llvm::PHINode *CalleePtr = Builder.CreatePHI(FTy->getPointerTo(), 2);
+ llvm::PHINode *CalleePtr = Builder.CreatePHI(CGF.UnqualPtrTy, 2);
CalleePtr->addIncoming(VirtualFn, FnVirtual);
CalleePtr->addIncoming(NonVirtualFn, FnNonVirtual);
@@ -864,18 +849,9 @@ llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(
CGBuilderTy &Builder = CGF.Builder;
- // Cast to char*.
- Base = Builder.CreateElementBitCast(Base, CGF.Int8Ty);
-
// Apply the offset, which we assume is non-null.
- llvm::Value *Addr = Builder.CreateInBoundsGEP(
- Base.getElementType(), Base.getPointer(), MemPtr, "memptr.offset");
-
- // Cast the address to the appropriate pointer type, adopting the
- // address space of the base pointer.
- llvm::Type *PType = CGF.ConvertTypeForMem(MPT->getPointeeType())
- ->getPointerTo(Base.getAddressSpace());
- return Builder.CreateBitCast(Addr, PType);
+ return Builder.CreateInBoundsGEP(CGF.Int8Ty, Base.getPointer(), MemPtr,
+ "memptr.offset");
}
/// Perform a bitcast, derived-to-base, or base-to-derived member pointer
@@ -995,14 +971,16 @@ ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E,
adj = llvm::ConstantInt::get(adj->getType(), offset);
}
- llvm::Constant *srcAdj = llvm::ConstantExpr::getExtractValue(src, 1);
+ llvm::Constant *srcAdj = src->getAggregateElement(1);
llvm::Constant *dstAdj;
if (isDerivedToBase)
dstAdj = llvm::ConstantExpr::getNSWSub(srcAdj, adj);
else
dstAdj = llvm::ConstantExpr::getNSWAdd(srcAdj, adj);
- return llvm::ConstantExpr::getInsertValue(src, dstAdj, 1);
+ llvm::Constant *res = ConstantFoldInsertValueInstruction(src, dstAdj, 1);
+ assert(res != nullptr && "Folding must succeed");
+ return res;
}
llvm::Constant *
@@ -1048,7 +1026,7 @@ llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD,
} else {
const ASTContext &Context = getContext();
CharUnits PointerWidth = Context.toCharUnitsFromBits(
- Context.getTargetInfo().getPointerWidth(0));
+ Context.getTargetInfo().getPointerWidth(LangAS::Default));
VTableOffset = Index * PointerWidth.getQuantity();
}
@@ -1258,17 +1236,16 @@ void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
// Grab the vtable pointer as an intptr_t*.
auto *ClassDecl =
cast<CXXRecordDecl>(ElementType->castAs<RecordType>()->getDecl());
- llvm::Value *VTable =
- CGF.GetVTablePtr(Ptr, CGF.IntPtrTy->getPointerTo(), ClassDecl);
+ llvm::Value *VTable = CGF.GetVTablePtr(Ptr, CGF.UnqualPtrTy, ClassDecl);
// Track back to entry -2 and pull out the offset there.
llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
CGF.IntPtrTy, VTable, -2, "complete-offset.ptr");
- llvm::Value *Offset = CGF.Builder.CreateAlignedLoad(CGF.IntPtrTy, OffsetPtr, CGF.getPointerAlign());
+ llvm::Value *Offset = CGF.Builder.CreateAlignedLoad(CGF.IntPtrTy, OffsetPtr,
+ CGF.getPointerAlign());
// Apply the offset.
- llvm::Value *CompletePtr =
- CGF.Builder.CreateBitCast(Ptr.getPointer(), CGF.Int8PtrTy);
+ llvm::Value *CompletePtr = Ptr.getPointer();
CompletePtr =
CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, CompletePtr, Offset);
@@ -1296,7 +1273,7 @@ void ItaniumCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) {
llvm::FunctionCallee Fn = CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow");
if (isNoReturn)
- CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, None);
+ CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, std::nullopt);
else
CGF.EmitRuntimeCallOrInvoke(Fn);
}
@@ -1314,7 +1291,7 @@ static llvm::FunctionCallee getThrowFn(CodeGenModule &CGM) {
// void __cxa_throw(void *thrown_exception, std::type_info *tinfo,
// void (*dest) (void *));
- llvm::Type *Args[3] = { CGM.Int8PtrTy, CGM.Int8PtrTy, CGM.Int8PtrTy };
+ llvm::Type *Args[3] = { CGM.Int8PtrTy, CGM.GlobalsInt8PtrTy, CGM.Int8PtrTy };
llvm::FunctionType *FTy =
llvm::FunctionType::get(CGM.VoidTy, Args, /*isVarArg=*/false);
@@ -1332,7 +1309,8 @@ void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
AllocExceptionFn, llvm::ConstantInt::get(SizeTy, TypeSize), "exception");
CharUnits ExnAlign = CGF.getContext().getExnObjectAlignment();
- CGF.EmitAnyExprToExn(E->getSubExpr(), Address(ExceptionPtr, ExnAlign));
+ CGF.EmitAnyExprToExn(
+ E->getSubExpr(), Address(ExceptionPtr, CGM.Int8Ty, ExnAlign));
// Now throw the exception.
llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType,
@@ -1346,7 +1324,6 @@ void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
if (!Record->hasTrivialDestructor()) {
CXXDestructorDecl *DtorD = Record->getDestructor();
Dtor = CGM.getAddrOfCXXStructor(GlobalDecl(DtorD, Dtor_Complete));
- Dtor = llvm::ConstantExpr::getBitCast(Dtor, CGM.Int8PtrTy);
}
}
if (!Dtor) Dtor = llvm::Constant::getNullValue(CGM.Int8PtrTy);
@@ -1357,21 +1334,23 @@ void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
static llvm::FunctionCallee getItaniumDynamicCastFn(CodeGenFunction &CGF) {
// void *__dynamic_cast(const void *sub,
- // const abi::__class_type_info *src,
- // const abi::__class_type_info *dst,
+ // global_as const abi::__class_type_info *src,
+ // global_as const abi::__class_type_info *dst,
// std::ptrdiff_t src2dst_offset);
llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
+ llvm::Type *GlobInt8PtrTy = CGF.GlobalsInt8PtrTy;
llvm::Type *PtrDiffTy =
CGF.ConvertType(CGF.getContext().getPointerDiffType());
- llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
+ llvm::Type *Args[4] = { Int8PtrTy, GlobInt8PtrTy, GlobInt8PtrTy, PtrDiffTy };
llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false);
// Mark the function as nounwind readonly.
- llvm::Attribute::AttrKind FuncAttrs[] = { llvm::Attribute::NoUnwind,
- llvm::Attribute::ReadOnly };
+ llvm::AttrBuilder FuncAttrs(CGF.getLLVMContext());
+ FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
+ FuncAttrs.addMemoryAttr(llvm::MemoryEffects::readOnly());
llvm::AttributeList Attrs = llvm::AttributeList::get(
CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, FuncAttrs);
@@ -1461,18 +1440,14 @@ llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF,
llvm::Type *StdTypeInfoPtrTy) {
auto *ClassDecl =
cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
- llvm::Value *Value =
- CGF.GetVTablePtr(ThisPtr, StdTypeInfoPtrTy->getPointerTo(), ClassDecl);
+ llvm::Value *Value = CGF.GetVTablePtr(ThisPtr, CGM.GlobalsInt8PtrTy,
+ ClassDecl);
if (CGM.getItaniumVTableContext().isRelativeLayout()) {
// Load the type info.
- Value = CGF.Builder.CreateBitCast(Value, CGM.Int8PtrTy);
Value = CGF.Builder.CreateCall(
CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}),
{Value, llvm::ConstantInt::get(CGM.Int32Ty, -4)});
-
- // Setup to dereference again since this is a proxy we accessed.
- Value = CGF.Builder.CreateBitCast(Value, StdTypeInfoPtrTy->getPointerTo());
} else {
// Load the type info.
Value =
@@ -1487,12 +1462,11 @@ bool ItaniumCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
return SrcIsPtr;
}
-llvm::Value *ItaniumCXXABI::EmitDynamicCastCall(
+llvm::Value *ItaniumCXXABI::emitDynamicCastCall(
CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) {
llvm::Type *PtrDiffLTy =
CGF.ConvertType(CGF.getContext().getPointerDiffType());
- llvm::Type *DestLTy = CGF.ConvertType(DestTy);
llvm::Value *SrcRTTI =
CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
@@ -1507,12 +1481,9 @@ llvm::Value *ItaniumCXXABI::EmitDynamicCastCall(
computeOffsetHint(CGF.getContext(), SrcDecl, DestDecl).getQuantity());
// Emit the call to __dynamic_cast.
- llvm::Value *Value = ThisAddr.getPointer();
- Value = CGF.EmitCastToVoidPtr(Value);
-
- llvm::Value *args[] = {Value, SrcRTTI, DestRTTI, OffsetHint};
- Value = CGF.EmitNounwindRuntimeCall(getItaniumDynamicCastFn(CGF), args);
- Value = CGF.Builder.CreateBitCast(Value, DestLTy);
+ llvm::Value *Args[] = {ThisAddr.getPointer(), SrcRTTI, DestRTTI, OffsetHint};
+ llvm::Value *Value =
+ CGF.EmitNounwindRuntimeCall(getItaniumDynamicCastFn(CGF), Args);
/// C++ [expr.dynamic.cast]p9:
/// A failed cast to reference type throws std::bad_cast
@@ -1530,18 +1501,94 @@ llvm::Value *ItaniumCXXABI::EmitDynamicCastCall(
return Value;
}
-llvm::Value *ItaniumCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF,
+llvm::Value *ItaniumCXXABI::emitExactDynamicCast(
+ CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
+ QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastSuccess,
+ llvm::BasicBlock *CastFail) {
+ ASTContext &Context = getContext();
+
+ // Find all the inheritance paths.
+ const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
+ const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
+ CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
+ /*DetectVirtual=*/false);
+ (void)DestDecl->isDerivedFrom(SrcDecl, Paths);
+
+ // Find an offset within `DestDecl` where a `SrcDecl` instance and its vptr
+ // might appear.
+ std::optional<CharUnits> Offset;
+ for (const CXXBasePath &Path : Paths) {
+ // dynamic_cast only finds public inheritance paths.
+ if (Path.Access != AS_public)
+ continue;
+
+ CharUnits PathOffset;
+ for (const CXXBasePathElement &PathElement : Path) {
+ // Find the offset along this inheritance step.
+ const CXXRecordDecl *Base =
+ PathElement.Base->getType()->getAsCXXRecordDecl();
+ if (PathElement.Base->isVirtual()) {
+ // For a virtual base class, we know that the derived class is exactly
+ // DestDecl, so we can use the vbase offset from its layout.
+ const ASTRecordLayout &L = Context.getASTRecordLayout(DestDecl);
+ PathOffset = L.getVBaseClassOffset(Base);
+ } else {
+ const ASTRecordLayout &L =
+ Context.getASTRecordLayout(PathElement.Class);
+ PathOffset += L.getBaseClassOffset(Base);
+ }
+ }
+
+ if (!Offset)
+ Offset = PathOffset;
+ else if (Offset != PathOffset) {
+ // Base appears in at least two different places. Find the most-derived
+ // object and see if it's a DestDecl. Note that the most-derived object
+ // must be at least as aligned as this base class subobject, and must
+ // have a vptr at offset 0.
+ ThisAddr = Address(emitDynamicCastToVoid(CGF, ThisAddr, SrcRecordTy),
+ CGF.VoidPtrTy, ThisAddr.getAlignment());
+ SrcDecl = DestDecl;
+ Offset = CharUnits::Zero();
+ break;
+ }
+ }
+
+ if (!Offset) {
+ // If there are no public inheritance paths, the cast always fails.
+ CGF.EmitBranch(CastFail);
+ return llvm::PoisonValue::get(CGF.VoidPtrTy);
+ }
+
+ // Compare the vptr against the expected vptr for the destination type at
+ // this offset. Note that we do not know what type ThisAddr points to in
+ // the case where the derived class multiply inherits from the base class
+ // so we can't use GetVTablePtr, so we load the vptr directly instead.
+ llvm::Instruction *VPtr = CGF.Builder.CreateLoad(
+ ThisAddr.withElementType(CGF.VoidPtrPtrTy), "vtable");
+ CGM.DecorateInstructionWithTBAA(
+ VPtr, CGM.getTBAAVTablePtrAccessInfo(CGF.VoidPtrPtrTy));
+ llvm::Value *Success = CGF.Builder.CreateICmpEQ(
+ VPtr, getVTableAddressPoint(BaseSubobject(SrcDecl, *Offset), DestDecl));
+ llvm::Value *Result = ThisAddr.getPointer();
+ if (!Offset->isZero())
+ Result = CGF.Builder.CreateInBoundsGEP(
+ CGF.CharTy, Result,
+ {llvm::ConstantInt::get(CGF.PtrDiffTy, -Offset->getQuantity())});
+ CGF.Builder.CreateCondBr(Success, CastSuccess, CastFail);
+ return Result;
+}
+
+llvm::Value *ItaniumCXXABI::emitDynamicCastToVoid(CodeGenFunction &CGF,
Address ThisAddr,
- QualType SrcRecordTy,
- QualType DestTy) {
- llvm::Type *DestLTy = CGF.ConvertType(DestTy);
+ QualType SrcRecordTy) {
auto *ClassDecl =
cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
llvm::Value *OffsetToTop;
if (CGM.getItaniumVTableContext().isRelativeLayout()) {
// Get the vtable pointer.
llvm::Value *VTable =
- CGF.GetVTablePtr(ThisAddr, CGM.Int32Ty->getPointerTo(), ClassDecl);
+ CGF.GetVTablePtr(ThisAddr, CGF.UnqualPtrTy, ClassDecl);
// Get the offset-to-top from the vtable.
OffsetToTop =
@@ -1554,7 +1601,7 @@ llvm::Value *ItaniumCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF,
// Get the vtable pointer.
llvm::Value *VTable =
- CGF.GetVTablePtr(ThisAddr, PtrDiffLTy->getPointerTo(), ClassDecl);
+ CGF.GetVTablePtr(ThisAddr, CGF.UnqualPtrTy, ClassDecl);
// Get the offset-to-top from the vtable.
OffsetToTop =
@@ -1563,10 +1610,8 @@ llvm::Value *ItaniumCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF,
PtrDiffLTy, OffsetToTop, CGF.getPointerAlign(), "offset.to.top");
}
// Finally, add the offset to the pointer.
- llvm::Value *Value = ThisAddr.getPointer();
- Value = CGF.EmitCastToVoidPtr(Value);
- Value = CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, Value, OffsetToTop);
- return CGF.Builder.CreateBitCast(Value, DestLTy);
+ return CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, ThisAddr.getPointer(),
+ OffsetToTop);
}
bool ItaniumCXXABI::EmitBadCastCall(CodeGenFunction &CGF) {
@@ -1593,14 +1638,10 @@ ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF,
llvm::Value *VBaseOffset;
if (CGM.getItaniumVTableContext().isRelativeLayout()) {
- VBaseOffsetPtr =
- CGF.Builder.CreateBitCast(VBaseOffsetPtr, CGF.Int32Ty->getPointerTo());
VBaseOffset = CGF.Builder.CreateAlignedLoad(
CGF.Int32Ty, VBaseOffsetPtr, CharUnits::fromQuantity(4),
"vbase.offset");
} else {
- VBaseOffsetPtr = CGF.Builder.CreateBitCast(VBaseOffsetPtr,
- CGM.PtrDiffTy->getPointerTo());
VBaseOffset = CGF.Builder.CreateAlignedLoad(
CGM.PtrDiffTy, VBaseOffsetPtr, CGF.getPointerAlign(), "vbase.offset");
}
@@ -1631,12 +1672,14 @@ ItaniumCXXABI::buildStructorSignature(GlobalDecl GD,
// All parameters are already in place except VTT, which goes after 'this'.
// These are Clang types, so we don't need to worry about sret yet.
- // Check if we need to add a VTT parameter (which has type void **).
+ // Check if we need to add a VTT parameter (which has type global void **).
if ((isa<CXXConstructorDecl>(GD.getDecl()) ? GD.getCtorType() == Ctor_Base
: GD.getDtorType() == Dtor_Base) &&
cast<CXXMethodDecl>(GD.getDecl())->getParent()->getNumVBases() != 0) {
+ LangAS AS = CGM.GetGlobalVarAddressSpace(nullptr);
+ QualType Q = Context.getAddrSpaceQualType(Context.VoidPtrTy, AS);
ArgTys.insert(ArgTys.begin() + 1,
- Context.getPointerType(Context.VoidPtrTy));
+ Context.getPointerType(CanQualType::CreateUnsafe(Q)));
return AddedStructorArgCounts::prefix(1);
}
return AddedStructorArgCounts{};
@@ -1669,10 +1712,12 @@ void ItaniumCXXABI::addImplicitStructorParams(CodeGenFunction &CGF,
ASTContext &Context = getContext();
// FIXME: avoid the fake decl
- QualType T = Context.getPointerType(Context.VoidPtrTy);
+ LangAS AS = CGM.GetGlobalVarAddressSpace(nullptr);
+ QualType Q = Context.getAddrSpaceQualType(Context.VoidPtrTy, AS);
+ QualType T = Context.getPointerType(Q);
auto *VTTDecl = ImplicitParamDecl::Create(
Context, /*DC=*/nullptr, MD->getLocation(), &Context.Idents.get("vtt"),
- T, ImplicitParamDecl::CXXVTT);
+ T, ImplicitParamKind::CXXVTT);
Params.insert(Params.begin() + 1, VTTDecl);
getStructorImplicitParamDecl(CGF) = VTTDecl;
}
@@ -1711,10 +1756,14 @@ CGCXXABI::AddedStructorArgs ItaniumCXXABI::getImplicitConstructorArgs(
if (!NeedsVTTParameter(GlobalDecl(D, Type)))
return AddedStructorArgs{};
- // Insert the implicit 'vtt' argument as the second argument.
+ // Insert the implicit 'vtt' argument as the second argument. Make sure to
+ // correctly reflect its address space, which can differ from generic on
+ // some targets.
llvm::Value *VTT =
CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, Delegating);
- QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
+ LangAS AS = CGM.GetGlobalVarAddressSpace(nullptr);
+ QualType Q = getContext().getAddrSpaceQualType(getContext().VoidPtrTy, AS);
+ QualType VTTTy = getContext().getPointerType(Q);
return AddedStructorArgs::prefix({{VTT, VTTTy}});
}
@@ -1802,8 +1851,11 @@ void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
}
}
- if (VTContext.isRelativeLayout() && !VTable->isDSOLocal())
- CGVT.GenerateRelativeVTableAlias(VTable, VTable->getName());
+ if (VTContext.isRelativeLayout()) {
+ CGVT.RemoveHwasanMetadata(VTable);
+ if (!VTable->isDSOLocal())
+ CGVT.GenerateRelativeVTableAlias(VTable, VTable->getName());
+ }
}
bool ItaniumCXXABI::isVirtualOffsetNeededForVTableField(
@@ -1883,11 +1935,11 @@ llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT(
/// Load the VTT.
llvm::Value *VTT = CGF.LoadCXXVTT();
if (VirtualPointerIndex)
- VTT = CGF.Builder.CreateConstInBoundsGEP1_64(
- CGF.VoidPtrTy, VTT, VirtualPointerIndex);
+ VTT = CGF.Builder.CreateConstInBoundsGEP1_64(CGF.GlobalsVoidPtrTy, VTT,
+ VirtualPointerIndex);
// And load the address point from the VTT.
- return CGF.Builder.CreateAlignedLoad(CGF.VoidPtrTy, VTT,
+ return CGF.Builder.CreateAlignedLoad(CGF.GlobalsVoidPtrTy, VTT,
CGF.getPointerAlign());
}
@@ -1915,16 +1967,17 @@ llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
CGM.getItaniumVTableContext().getVTableLayout(RD);
llvm::Type *VTableType = CGM.getVTables().getVTableType(VTLayout);
- // Use pointer alignment for the vtable. Otherwise we would align them based
- // on the size of the initializer which doesn't make sense as only single
- // values are read.
+ // Use pointer to global alignment for the vtable. Otherwise we would align
+ // them based on the size of the initializer which doesn't make sense as only
+ // single values are read.
+ LangAS AS = CGM.GetGlobalVarAddressSpace(nullptr);
unsigned PAlign = CGM.getItaniumVTableContext().isRelativeLayout()
? 32
- : CGM.getTarget().getPointerAlign(0);
+ : CGM.getTarget().getPointerAlign(AS);
VTable = CGM.CreateOrReplaceCXXRuntimeVariable(
Name, VTableType, llvm::GlobalValue::ExternalLinkage,
- getContext().toCharUnitsFromBits(PAlign).getQuantity());
+ getContext().toCharUnitsFromBits(PAlign).getAsAlign());
VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
// In MS C++ if you have a class with virtual functions in which you are using
@@ -1955,35 +2008,31 @@ CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
Address This,
llvm::Type *Ty,
SourceLocation Loc) {
- llvm::Type *TyPtr = Ty->getPointerTo();
+ llvm::Type *PtrTy = CGM.GlobalsInt8PtrTy;
auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl());
- llvm::Value *VTable = CGF.GetVTablePtr(
- This, TyPtr->getPointerTo(), MethodDecl->getParent());
+ llvm::Value *VTable = CGF.GetVTablePtr(This, PtrTy, MethodDecl->getParent());
uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
llvm::Value *VFunc;
if (CGF.ShouldEmitVTableTypeCheckedLoad(MethodDecl->getParent())) {
VFunc = CGF.EmitVTableTypeCheckedLoad(
- MethodDecl->getParent(), VTable,
- VTableIndex * CGM.getContext().getTargetInfo().getPointerWidth(0) / 8);
+ MethodDecl->getParent(), VTable, PtrTy,
+ VTableIndex *
+ CGM.getContext().getTargetInfo().getPointerWidth(LangAS::Default) /
+ 8);
} else {
CGF.EmitTypeMetadataCodeForVCall(MethodDecl->getParent(), VTable, Loc);
llvm::Value *VFuncLoad;
if (CGM.getItaniumVTableContext().isRelativeLayout()) {
- VTable = CGF.Builder.CreateBitCast(VTable, CGM.Int8PtrTy);
- llvm::Value *Load = CGF.Builder.CreateCall(
+ VFuncLoad = CGF.Builder.CreateCall(
CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}),
{VTable, llvm::ConstantInt::get(CGM.Int32Ty, 4 * VTableIndex)});
- VFuncLoad = CGF.Builder.CreateBitCast(Load, TyPtr);
} else {
- VTable =
- CGF.Builder.CreateBitCast(VTable, TyPtr->getPointerTo());
llvm::Value *VTableSlotPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
- TyPtr, VTable, VTableIndex, "vfn");
- VFuncLoad =
- CGF.Builder.CreateAlignedLoad(TyPtr, VTableSlotPtr,
- CGF.getPointerAlign());
+ PtrTy, VTable, VTableIndex, "vfn");
+ VFuncLoad = CGF.Builder.CreateAlignedLoad(PtrTy, VTableSlotPtr,
+ CGF.getPointerAlign());
}
// Add !invariant.load md to virtual function load to indicate that
@@ -2106,7 +2155,7 @@ static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
if (!NonVirtualAdjustment && !VirtualAdjustment)
return InitialPtr.getPointer();
- Address V = CGF.Builder.CreateElementBitCast(InitialPtr, CGF.Int8Ty);
+ Address V = InitialPtr.withElementType(CGF.Int8Ty);
// In a base-to-derived cast, the non-virtual adjustment is applied first.
if (NonVirtualAdjustment && !IsReturnAdjustment) {
@@ -2117,7 +2166,7 @@ static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
// Perform the virtual adjustment if we have one.
llvm::Value *ResultPtr;
if (VirtualAdjustment) {
- Address VTablePtrPtr = CGF.Builder.CreateElementBitCast(V, CGF.Int8PtrTy);
+ Address VTablePtrPtr = V.withElementType(CGF.Int8PtrTy);
llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr);
llvm::Value *Offset;
@@ -2125,8 +2174,6 @@ static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
CGF.Int8Ty, VTablePtr, VirtualAdjustment);
if (CGF.CGM.getItaniumVTableContext().isRelativeLayout()) {
// Load the adjustment offset from the vtable as a 32-bit int.
- OffsetPtr =
- CGF.Builder.CreateBitCast(OffsetPtr, CGF.Int32Ty->getPointerTo());
Offset =
CGF.Builder.CreateAlignedLoad(CGF.Int32Ty, OffsetPtr,
CharUnits::fromQuantity(4));
@@ -2134,9 +2181,6 @@ static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
llvm::Type *PtrDiffTy =
CGF.ConvertType(CGF.getContext().getPointerDiffType());
- OffsetPtr =
- CGF.Builder.CreateBitCast(OffsetPtr, PtrDiffTy->getPointerTo());
-
// Load the adjustment offset from the vtable.
Offset = CGF.Builder.CreateAlignedLoad(PtrDiffTy, OffsetPtr,
CGF.getPointerAlign());
@@ -2155,8 +2199,7 @@ static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
NonVirtualAdjustment);
}
- // Cast back to the original type.
- return CGF.Builder.CreateBitCast(ResultPtr, InitialPtr.getType());
+ return ResultPtr;
}
llvm::Value *ItaniumCXXABI::performThisAdjustment(CodeGenFunction &CGF,
@@ -2219,8 +2262,7 @@ Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
CookiePtr = CGF.Builder.CreateConstInBoundsByteGEP(CookiePtr, CookieOffset);
// Write the number of elements into the appropriate slot.
- Address NumElementsPtr =
- CGF.Builder.CreateElementBitCast(CookiePtr, CGF.SizeTy);
+ Address NumElementsPtr = CookiePtr.withElementType(CGF.SizeTy);
llvm::Instruction *SI = CGF.Builder.CreateStore(NumElements, NumElementsPtr);
// Handle the array cookie specially in ASan.
@@ -2228,7 +2270,7 @@ Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
(expr->getOperatorNew()->isReplaceableGlobalAllocationFunction() ||
CGM.getCodeGenOpts().SanitizeAddressPoisonCustomArrayCookie)) {
// The store to the CookiePtr does not need to be instrumented.
- CGM.getSanitizerMetadata()->disableSanitizerForInstruction(SI);
+ SI->setNoSanitizeMetadata();
llvm::FunctionType *FTy =
llvm::FunctionType::get(CGM.VoidTy, NumElementsPtr.getType(), false);
llvm::FunctionCallee F =
@@ -2252,7 +2294,7 @@ llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
CGF.Builder.CreateConstInBoundsByteGEP(numElementsPtr, numElementsOffset);
unsigned AS = allocPtr.getAddressSpace();
- numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
+ numElementsPtr = numElementsPtr.withElementType(CGF.SizeTy);
if (!CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) || AS != 0)
return CGF.Builder.CreateLoad(numElementsPtr);
// In asan mode emit a function call instead of a regular load and let the
@@ -2261,7 +2303,7 @@ llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
// We can't simply ignore this load using nosanitize metadata because
// the metadata may be lost.
llvm::FunctionType *FTy =
- llvm::FunctionType::get(CGF.SizeTy, CGF.SizeTy->getPointerTo(0), false);
+ llvm::FunctionType::get(CGF.SizeTy, CGF.UnqualPtrTy, false);
llvm::FunctionCallee F =
CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie");
return CGF.Builder.CreateCall(F, numElementsPtr.getPointer());
@@ -2291,7 +2333,7 @@ Address ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
Address cookie = newPtr;
// The first element is the element size.
- cookie = CGF.Builder.CreateElementBitCast(cookie, CGF.SizeTy);
+ cookie = cookie.withElementType(CGF.SizeTy);
llvm::Value *elementSize = llvm::ConstantInt::get(CGF.SizeTy,
getContext().getTypeSizeInChars(elementType).getQuantity());
CGF.Builder.CreateStore(elementSize, cookie);
@@ -2314,7 +2356,7 @@ llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
Address numElementsPtr
= CGF.Builder.CreateConstInBoundsByteGEP(allocPtr, CGF.getSizeSize());
- numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
+ numElementsPtr = numElementsPtr.withElementType(CGF.SizeTy);
return CGF.Builder.CreateLoad(numElementsPtr);
}
@@ -2407,11 +2449,12 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
guardAlignment = CGF.getSizeAlign();
} else {
guardTy = CGF.Int64Ty;
- guardAlignment = CharUnits::fromQuantity(
- CGM.getDataLayout().getABITypeAlignment(guardTy));
+ guardAlignment =
+ CharUnits::fromQuantity(CGM.getDataLayout().getABITypeAlign(guardTy));
}
}
- llvm::PointerType *guardPtrTy = guardTy->getPointerTo(
+ llvm::PointerType *guardPtrTy = llvm::PointerType::get(
+ CGF.CGM.getLLVMContext(),
CGF.CGM.getDataLayout().getDefaultGlobalsAddressSpace());
// Create the guard variable if we don't already have it (as we
@@ -2426,13 +2469,15 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
}
// Create the guard variable with a zero-initializer.
- // Just absorb linkage and visibility from the guarded variable.
+ // Just absorb linkage, visibility and dll storage class from the guarded
+ // variable.
guard = new llvm::GlobalVariable(CGM.getModule(), guardTy,
false, var->getLinkage(),
llvm::ConstantInt::get(guardTy, 0),
guardName.str());
guard->setDSOLocal(var->isDSOLocal());
guard->setVisibility(var->getVisibility());
+ guard->setDLLStorageClass(var->getDLLStorageClass());
// If the variable is thread-local, so is its guard variable.
guard->setThreadLocalMode(var->getThreadLocalMode());
guard->setAlignment(guardAlignment.getAsAlign());
@@ -2445,11 +2490,6 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
(CGM.getTarget().getTriple().isOSBinFormatELF() ||
CGM.getTarget().getTriple().isOSBinFormatWasm())) {
guard->setComdat(C);
- // An inline variable's guard function is run from the per-TU
- // initialization function, not via a dedicated global ctor function, so
- // we can't put it in a comdat.
- if (!NonTemplateInline)
- CGF.CurFn->setComdat(C);
} else if (CGM.supportsCOMDAT() && guard->isWeakForLinker()) {
guard->setComdat(CGM.getModule().getOrInsertComdat(guard->getName()));
}
@@ -2457,7 +2497,7 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
CGM.setStaticLocalDeclGuardAddress(&D, guard);
}
- Address guardAddr = Address(guard, guardAlignment);
+ Address guardAddr = Address(guard, guard->getValueType(), guardAlignment);
// Test whether the variable has completed initialization.
//
@@ -2475,54 +2515,76 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
// __cxa_guard_release (&obj_guard);
// }
// }
-
- // Load the first byte of the guard variable.
- llvm::LoadInst *LI =
- Builder.CreateLoad(Builder.CreateElementBitCast(guardAddr, CGM.Int8Ty));
-
- // Itanium ABI:
- // An implementation supporting thread-safety on multiprocessor
- // systems must also guarantee that references to the initialized
- // object do not occur before the load of the initialization flag.
//
- // In LLVM, we do this by marking the load Acquire.
- if (threadsafe)
- LI->setAtomic(llvm::AtomicOrdering::Acquire);
+ // If threadsafe statics are enabled, but we don't have inline atomics, just
+ // call __cxa_guard_acquire unconditionally. The "inline" check isn't
+ // actually inline, and the user might not expect calls to __atomic libcalls.
- // For ARM, we should only check the first bit, rather than the entire byte:
- //
- // ARM C++ ABI 3.2.3.1:
- // To support the potential use of initialization guard variables
- // as semaphores that are the target of ARM SWP and LDREX/STREX
- // synchronizing instructions we define a static initialization
- // guard variable to be a 4-byte aligned, 4-byte word with the
- // following inline access protocol.
- // #define INITIALIZED 1
- // if ((obj_guard & INITIALIZED) != INITIALIZED) {
- // if (__cxa_guard_acquire(&obj_guard))
- // ...
- // }
- //
- // and similarly for ARM64:
- //
- // ARM64 C++ ABI 3.2.2:
- // This ABI instead only specifies the value bit 0 of the static guard
- // variable; all other bits are platform defined. Bit 0 shall be 0 when the
- // variable is not initialized and 1 when it is.
- llvm::Value *V =
- (UseARMGuardVarABI && !useInt8GuardVariable)
- ? Builder.CreateAnd(LI, llvm::ConstantInt::get(CGM.Int8Ty, 1))
- : LI;
- llvm::Value *NeedsInit = Builder.CreateIsNull(V, "guard.uninitialized");
-
- llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check");
+ unsigned MaxInlineWidthInBits = CGF.getTarget().getMaxAtomicInlineWidth();
llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end");
+ if (!threadsafe || MaxInlineWidthInBits) {
+ // Load the first byte of the guard variable.
+ llvm::LoadInst *LI =
+ Builder.CreateLoad(guardAddr.withElementType(CGM.Int8Ty));
+
+ // Itanium ABI:
+ // An implementation supporting thread-safety on multiprocessor
+ // systems must also guarantee that references to the initialized
+ // object do not occur before the load of the initialization flag.
+ //
+ // In LLVM, we do this by marking the load Acquire.
+ if (threadsafe)
+ LI->setAtomic(llvm::AtomicOrdering::Acquire);
+
+ // For ARM, we should only check the first bit, rather than the entire byte:
+ //
+ // ARM C++ ABI 3.2.3.1:
+ // To support the potential use of initialization guard variables
+ // as semaphores that are the target of ARM SWP and LDREX/STREX
+ // synchronizing instructions we define a static initialization
+ // guard variable to be a 4-byte aligned, 4-byte word with the
+ // following inline access protocol.
+ // #define INITIALIZED 1
+ // if ((obj_guard & INITIALIZED) != INITIALIZED) {
+ // if (__cxa_guard_acquire(&obj_guard))
+ // ...
+ // }
+ //
+ // and similarly for ARM64:
+ //
+ // ARM64 C++ ABI 3.2.2:
+ // This ABI instead only specifies the value bit 0 of the static guard
+ // variable; all other bits are platform defined. Bit 0 shall be 0 when the
+ // variable is not initialized and 1 when it is.
+ llvm::Value *V =
+ (UseARMGuardVarABI && !useInt8GuardVariable)
+ ? Builder.CreateAnd(LI, llvm::ConstantInt::get(CGM.Int8Ty, 1))
+ : LI;
+ llvm::Value *NeedsInit = Builder.CreateIsNull(V, "guard.uninitialized");
+
+ llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check");
+
+ // Check if the first byte of the guard variable is zero.
+ CGF.EmitCXXGuardedInitBranch(NeedsInit, InitCheckBlock, EndBlock,
+ CodeGenFunction::GuardKind::VariableGuard, &D);
+
+ CGF.EmitBlock(InitCheckBlock);
+ }
- // Check if the first byte of the guard variable is zero.
- CGF.EmitCXXGuardedInitBranch(NeedsInit, InitCheckBlock, EndBlock,
- CodeGenFunction::GuardKind::VariableGuard, &D);
-
- CGF.EmitBlock(InitCheckBlock);
+ // The semantics of dynamic initialization of variables with static or thread
+ // storage duration depends on whether they are declared at block-scope. The
+ // initialization of such variables at block-scope can be aborted with an
+ // exception and later retried (per C++20 [stmt.dcl]p4), and recursive entry
+ // to their initialization has undefined behavior (also per C++20
+ // [stmt.dcl]p4). For such variables declared at non-block scope, exceptions
+ // lead to termination (per C++20 [except.terminate]p1), and recursive
+ // references to the variables are governed only by the lifetime rules (per
+ // C++20 [class.cdtor]p2), which means such references are perfectly fine as
+ // long as they avoid touching memory. As a result, block-scope variables must
+ // not be marked as initialized until after initialization completes (unless
+ // the mark is reverted following an exception), but non-block-scope variables
+ // must be marked prior to initialization so that recursive accesses during
+ // initialization do not restart initialization.
// Variables used when coping with thread-safe statics and exceptions.
if (threadsafe) {
@@ -2539,6 +2601,12 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, guard);
CGF.EmitBlock(InitBlock);
+ } else if (!D.isLocalVarDecl()) {
+ // For non-local variables, store 1 into the first byte of the guard
+ // variable before the object initialization begins so that references
+ // to the variable during initialization don't restart initialization.
+ Builder.CreateStore(llvm::ConstantInt::get(CGM.Int8Ty, 1),
+ guardAddr.withElementType(CGM.Int8Ty));
}
// Emit the initializer and add a global destructor if appropriate.
@@ -2551,11 +2619,12 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
// Call __cxa_guard_release. This cannot throw.
CGF.EmitNounwindRuntimeCall(getGuardReleaseFn(CGM, guardPtrTy),
guardAddr.getPointer());
- } else {
- // Store 1 into the first byte of the guard variable after initialization is
- // complete.
+ } else if (D.isLocalVarDecl()) {
+ // For local variables, store 1 into the first byte of the guard variable
+ // after the object initialization completes so that initialization is
+ // retried if initialization is interrupted by an exception.
Builder.CreateStore(llvm::ConstantInt::get(CGM.Int8Ty, 1),
- Builder.CreateElementBitCast(guardAddr, CGM.Int8Ty));
+ guardAddr.withElementType(CGM.Int8Ty));
}
CGF.EmitBlock(EndBlock);
@@ -2576,15 +2645,13 @@ static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
}
// We're assuming that the destructor function is something we can
- // reasonably call with the default CC. Go ahead and cast it to the
- // right prototype.
- llvm::Type *dtorTy =
- llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, false)->getPointerTo();
+ // reasonably call with the default CC.
+ llvm::Type *dtorTy = CGF.UnqualPtrTy;
// Preserve address space of addr.
auto AddrAS = addr ? addr->getType()->getPointerAddressSpace() : 0;
- auto AddrInt8PtrTy =
- AddrAS ? CGF.Int8Ty->getPointerTo(AddrAS) : CGF.Int8PtrTy;
+ auto AddrPtrTy = AddrAS ? llvm::PointerType::get(CGF.getLLVMContext(), AddrAS)
+ : CGF.Int8PtrTy;
// Create a variable that binds the atexit to this shared object.
llvm::Constant *handle =
@@ -2593,7 +2660,7 @@ static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
// extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d);
- llvm::Type *paramTys[] = {dtorTy, AddrInt8PtrTy, handle->getType()};
+ llvm::Type *paramTys[] = {dtorTy, AddrPtrTy, handle->getType()};
llvm::FunctionType *atexitTy =
llvm::FunctionType::get(CGF.IntTy, paramTys, false);
@@ -2609,10 +2676,7 @@ static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
// function.
addr = llvm::Constant::getNullValue(CGF.Int8PtrTy);
- llvm::Value *args[] = {llvm::ConstantExpr::getBitCast(
- cast<llvm::Constant>(dtor.getCallee()), dtorTy),
- llvm::ConstantExpr::getBitCast(addr, AddrInt8PtrTy),
- handle};
+ llvm::Value *args[] = {dtor.getCallee(), addr, handle};
CGF.EmitNounwindRuntimeCall(atexit, args);
}
@@ -2644,7 +2708,6 @@ void CodeGenModule::unregisterGlobalDtorsWithUnAtExit() {
// Get the destructor function type, void(*)(void).
llvm::FunctionType *dtorFuncTy = llvm::FunctionType::get(CGF.VoidTy, false);
- llvm::Type *dtorTy = dtorFuncTy->getPointerTo();
// Destructor functions are run/unregistered in non-ascending
// order of their priorities.
@@ -2654,10 +2717,8 @@ void CodeGenModule::unregisterGlobalDtorsWithUnAtExit() {
llvm::Function *Dtor = *itv;
// We're assuming that the destructor function is something we can
- // reasonably call with the correct CC. Go ahead and cast it to the
- // right prototype.
- llvm::Constant *dtor = llvm::ConstantExpr::getBitCast(Dtor, dtorTy);
- llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(dtor);
+ // reasonably call with the correct CC.
+ llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(Dtor);
llvm::Value *NeedsDestruct =
CGF.Builder.CreateIsNull(V, "needs_destruct");
@@ -2672,7 +2733,7 @@ void CodeGenModule::unregisterGlobalDtorsWithUnAtExit() {
CGF.EmitBlock(DestructCallBlock);
// Emit the call to casted Dtor.
- llvm::CallInst *CI = CGF.Builder.CreateCall(dtorFuncTy, dtor);
+ llvm::CallInst *CI = CGF.Builder.CreateCall(dtorFuncTy, Dtor);
// Make sure the call and the callee agree on calling convention.
CI->setCallingConv(Dtor->getCallingConv());
@@ -2712,20 +2773,14 @@ void CodeGenModule::registerGlobalDtorsWithAtExit() {
if (getCodeGenOpts().CXAAtExit) {
emitGlobalDtorWithCXAAtExit(CGF, Dtor, nullptr, false);
} else {
- // Get the destructor function type, void(*)(void).
- llvm::Type *dtorTy =
- llvm::FunctionType::get(CGF.VoidTy, false)->getPointerTo();
-
// We're assuming that the destructor function is something we can
- // reasonably call with the correct CC. Go ahead and cast it to the
- // right prototype.
- CGF.registerGlobalDtorWithAtExit(
- llvm::ConstantExpr::getBitCast(Dtor, dtorTy));
+ // reasonably call with the correct CC.
+ CGF.registerGlobalDtorWithAtExit(Dtor);
}
}
CGF.FinishFunction();
- AddGlobalCtor(GlobalInitFn, Priority, nullptr);
+ AddGlobalCtor(GlobalInitFn, Priority);
}
if (getCXXABI().useSinitAndSterm())
@@ -2739,6 +2794,14 @@ void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
if (D.isNoDestroy(CGM.getContext()))
return;
+ // OpenMP offloading supports C++ constructors and destructors but we do not
+ // always have 'atexit' available. Instead lower these to use the LLVM global
+ // destructors which we can handle directly in the runtime. Note that this is
+ // not strictly 1-to-1 with using `atexit` because we no longer tear down
+ // globals in reverse order of when they were constructed.
+ if (!CGM.getLangOpts().hasAtExit() && !D.isStaticLocal())
+ return CGF.registerGlobalDtorWithLLVM(D, dtor, addr);
+
// emitGlobalDtorWithCXAAtExit will emit a call to either __cxa_thread_atexit
// or __cxa_atexit depending on whether this VarDecl is a thread-local storage
// or not. CXAAtExit controls only __cxa_atexit, so use it if it is enabled.
@@ -2771,7 +2834,7 @@ static bool isThreadWrapperReplaceable(const VarDecl *VD,
static llvm::GlobalValue::LinkageTypes
getThreadLocalWrapperLinkage(const VarDecl *VD, CodeGen::CodeGenModule &CGM) {
llvm::GlobalValue::LinkageTypes VarLinkage =
- CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false);
+ CGM.getLLVMLinkageVarDefinition(VD);
// For internal linkage variables, we don't need an external or weak wrapper.
if (llvm::GlobalValue::isLocalLinkage(VarLinkage))
@@ -2872,7 +2935,7 @@ void ItaniumCXXABI::EmitThreadLocalInitFuncs(
Guard->setAlignment(GuardAlign.getAsAlign());
CodeGenFunction(CGM).GenerateCXXGlobalInitFunc(
- InitFunc, OrderedInits, ConstantAddress(Guard, GuardAlign));
+ InitFunc, OrderedInits, ConstantAddress(Guard, CGM.Int8Ty, GuardAlign));
// On Darwin platforms, use CXX_FAST_TLS calling convention.
if (CGM.getTarget().getTriple().isOSDarwin()) {
InitFunc->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
@@ -2968,7 +3031,7 @@ void ItaniumCXXABI::EmitThreadLocalInitFuncs(
// also when the symbol is weak.
if (CGM.getTriple().isOSAIX() && VD->hasDefinition() &&
isEmittedWithConstantInitializer(VD, true) &&
- !VD->needsDestruction(getContext())) {
+ !mayNeedDestruction(VD)) {
// Init should be null. If it were non-null, then the logic above would
// either be defining the function to be an alias or declaring the
// function with the expectation that the definition of the variable
@@ -3026,14 +3089,13 @@ void ItaniumCXXABI::EmitThreadLocalInitFuncs(
// For a reference, the result of the wrapper function is a pointer to
// the referenced object.
- llvm::Value *Val = Var;
+ llvm::Value *Val = Builder.CreateThreadLocalAddress(Var);
+
if (VD->getType()->isReferenceType()) {
CharUnits Align = CGM.getContext().getDeclAlign(VD);
- Val = Builder.CreateAlignedLoad(Var->getValueType(), Var, Align);
+ Val = Builder.CreateAlignedLoad(Var->getValueType(), Val, Align);
}
- if (Val->getType() != Wrapper->getReturnType())
- Val = Builder.CreatePointerBitCastOrAddrSpaceCast(
- Val, Wrapper->getReturnType(), "");
+
Builder.CreateRet(Val);
}
}
@@ -3192,7 +3254,7 @@ llvm::GlobalVariable *ItaniumRTTIBuilder::GetAddrOfTypeName(
auto Align = CGM.getContext().getTypeAlignInChars(CGM.getContext().CharTy);
llvm::GlobalVariable *GV = CGM.CreateOrReplaceCXXRuntimeVariable(
- Name, Init->getType(), Linkage, Align.getQuantity());
+ Name, Init->getType(), Linkage, Align.getAsAlign());
GV->setInitializer(Init);
@@ -3214,10 +3276,9 @@ ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
// Note for the future: If we would ever like to do deferred emission of
// RTTI, check if emitting vtables opportunistically need any adjustment.
- GV = new llvm::GlobalVariable(CGM.getModule(), CGM.Int8PtrTy,
- /*isConstant=*/true,
- llvm::GlobalValue::ExternalLinkage, nullptr,
- Name);
+ GV = new llvm::GlobalVariable(
+ CGM.getModule(), CGM.GlobalsInt8PtrTy,
+ /*isConstant=*/true, llvm::GlobalValue::ExternalLinkage, nullptr, Name);
const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
CGM.setGVProperties(GV, RD);
// Import the typeinfo symbol when all non-inline virtual methods are
@@ -3230,7 +3291,7 @@ ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
}
}
- return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
+ return GV;
}
/// TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type
@@ -3274,6 +3335,7 @@ static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
case BuiltinType::LongDouble:
case BuiltinType::Float16:
case BuiltinType::Float128:
+ case BuiltinType::Ibm128:
case BuiltinType::Char8:
case BuiltinType::Char16:
case BuiltinType::Char32:
@@ -3300,6 +3362,8 @@ static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
#include "clang/Basic/PPCTypes.def"
#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
#include "clang/Basic/RISCVVTypes.def"
+#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
case BuiltinType::ShortAccum:
case BuiltinType::Accum:
case BuiltinType::LongAccum:
@@ -3520,7 +3584,7 @@ void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) {
llvm_unreachable("Pipe types shouldn't get here");
case Type::Builtin:
- case Type::ExtInt:
+ case Type::BitInt:
// GCC treats vector and complex types as fundamental types.
case Type::Vector:
case Type::ExtVector:
@@ -3577,7 +3641,7 @@ void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) {
}
assert(isa<ObjCInterfaceType>(Ty));
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Type::ObjCInterface:
if (cast<ObjCInterfaceType>(Ty)->getDecl()->getSuperClass()) {
@@ -3604,8 +3668,10 @@ void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) {
// Check if the alias exists. If it doesn't, then get or create the global.
if (CGM.getItaniumVTableContext().isRelativeLayout())
VTable = CGM.getModule().getNamedAlias(VTableName);
- if (!VTable)
- VTable = CGM.getModule().getOrInsertGlobal(VTableName, CGM.Int8PtrTy);
+ if (!VTable) {
+ llvm::Type *Ty = llvm::ArrayType::get(CGM.GlobalsInt8PtrTy, 0);
+ VTable = CGM.getModule().getOrInsertGlobal(VTableName, Ty);
+ }
CGM.setDSOLocal(cast<llvm::GlobalValue>(VTable->stripPointerCasts()));
@@ -3617,15 +3683,13 @@ void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) {
// The vtable address point is 8 bytes after its start:
// 4 for the offset to top + 4 for the relative offset to rtti.
llvm::Constant *Eight = llvm::ConstantInt::get(CGM.Int32Ty, 8);
- VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy);
VTable =
llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8Ty, VTable, Eight);
} else {
llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2);
- VTable = llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8PtrTy, VTable,
- Two);
+ VTable = llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.GlobalsInt8PtrTy,
+ VTable, Two);
}
- VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy);
Fields.push_back(VTable);
}
@@ -3647,15 +3711,17 @@ static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM,
return llvm::GlobalValue::InternalLinkage;
switch (Ty->getLinkage()) {
- case NoLinkage:
- case InternalLinkage:
- case UniqueExternalLinkage:
+ case Linkage::Invalid:
+ llvm_unreachable("Linkage hasn't been computed!");
+
+ case Linkage::None:
+ case Linkage::Internal:
+ case Linkage::UniqueExternal:
return llvm::GlobalValue::InternalLinkage;
- case VisibleNoLinkage:
- case ModuleInternalLinkage:
- case ModuleLinkage:
- case ExternalLinkage:
+ case Linkage::VisibleNone:
+ case Linkage::Module:
+ case Linkage::External:
// RTTI is not enabled, which means that this type info struct is going
// to be used for exception handling. Give it linkonce_odr linkage.
if (!CGM.getLangOpts().RTTI)
@@ -3698,7 +3764,7 @@ llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty) {
assert(!OldGV->hasAvailableExternallyLinkage() &&
"available_externally typeinfos not yet implemented");
- return llvm::ConstantExpr::getBitCast(OldGV, CGM.Int8PtrTy);
+ return OldGV;
}
// Check if there is already an external RTTI descriptor for this type.
@@ -3723,12 +3789,14 @@ llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty) {
llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
llvm::GlobalValue::DefaultStorageClass;
- if (CGM.getTriple().isWindowsItaniumEnvironment()) {
- auto RD = Ty->getAsCXXRecordDecl();
- if (RD && RD->hasAttr<DLLExportAttr>())
+ if (auto RD = Ty->getAsCXXRecordDecl()) {
+ if ((CGM.getTriple().isWindowsItaniumEnvironment() &&
+ RD->hasAttr<DLLExportAttr>()) ||
+ (CGM.shouldMapVisibilityToDLLExport(RD) &&
+ !llvm::GlobalValue::isLocalLinkage(Linkage) &&
+ llvmVisibility == llvm::GlobalValue::DefaultVisibility))
DLLStorageClass = llvm::GlobalValue::DLLExportStorageClass;
}
-
return BuildTypeInfo(Ty, Linkage, llvmVisibility, DLLStorageClass);
}
@@ -3756,9 +3824,9 @@ llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
llvm::ConstantInt::get(CGM.Int64Ty, ((uint64_t)1) << 63);
TypeNameField = llvm::ConstantExpr::getAdd(TypeNameField, flag);
TypeNameField =
- llvm::ConstantExpr::getIntToPtr(TypeNameField, CGM.Int8PtrTy);
+ llvm::ConstantExpr::getIntToPtr(TypeNameField, CGM.GlobalsInt8PtrTy);
} else {
- TypeNameField = llvm::ConstantExpr::getBitCast(TypeName, CGM.Int8PtrTy);
+ TypeNameField = TypeName;
}
Fields.push_back(TypeNameField);
@@ -3793,7 +3861,7 @@ llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
case Type::Pipe:
break;
- case Type::ExtInt:
+ case Type::BitInt:
break;
case Type::ConstantArray:
@@ -3878,17 +3946,15 @@ llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
// If there's already an old global variable, replace it with the new one.
if (OldGV) {
GV->takeName(OldGV);
- llvm::Constant *NewPtr =
- llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
- OldGV->replaceAllUsesWith(NewPtr);
+ OldGV->replaceAllUsesWith(GV);
OldGV->eraseFromParent();
}
if (CGM.supportsCOMDAT() && GV->isWeakForLinker())
GV->setComdat(M.getOrInsertComdat(GV->getName()));
- CharUnits Align =
- CGM.getContext().toCharUnitsFromBits(CGM.getTarget().getPointerAlign(0));
+ CharUnits Align = CGM.getContext().toCharUnitsFromBits(
+ CGM.getTarget().getPointerAlign(CGM.GetGlobalVarAddressSpace(nullptr)));
GV->setAlignment(Align.getAsAlign());
// The Itanium ABI specifies that type_info objects must be globally
@@ -3920,7 +3986,7 @@ llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
TypeName->setPartition(CGM.getCodeGenOpts().SymbolPartition);
GV->setPartition(CGM.getCodeGenOpts().SymbolPartition);
- return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
+ return GV;
}
/// BuildObjCObjectTypeInfo - Build the appropriate kind of type_info
@@ -4066,7 +4132,8 @@ void ItaniumRTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
// LLP64 platforms.
QualType OffsetFlagsTy = CGM.getContext().LongTy;
const TargetInfo &TI = CGM.getContext().getTargetInfo();
- if (TI.getTriple().isOSCygMing() && TI.getPointerWidth(0) > TI.getLongWidth())
+ if (TI.getTriple().isOSCygMing() &&
+ TI.getPointerWidth(LangAS::Default) > TI.getLongWidth())
OffsetFlagsTy = CGM.getContext().LongLongTy;
llvm::Type *OffsetFlagsLTy =
CGM.getTypes().ConvertType(OffsetFlagsTy);
@@ -4211,9 +4278,9 @@ void ItaniumCXXABI::EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD) {
getContext().Char32Ty
};
llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
- RD->hasAttr<DLLExportAttr>()
- ? llvm::GlobalValue::DLLExportStorageClass
- : llvm::GlobalValue::DefaultStorageClass;
+ RD->hasAttr<DLLExportAttr>() || CGM.shouldMapVisibilityToDLLExport(RD)
+ ? llvm::GlobalValue::DLLExportStorageClass
+ : llvm::GlobalValue::DefaultStorageClass;
llvm::GlobalValue::VisibilityTypes Visibility =
CodeGenModule::GetLLVMVisibility(RD->getVisibility());
for (const QualType &FundamentalType : FundamentalTypes) {
@@ -4446,7 +4513,9 @@ namespace {
}
/// Emits a call to __cxa_begin_catch and enters a cleanup to call
-/// __cxa_end_catch.
+/// __cxa_end_catch. If -fassume-nothrow-exception-dtor is specified, we assume
+/// that the exception object's dtor is nothrow, therefore the __cxa_end_catch
+/// call can be marked as nounwind even if EndMightThrow is true.
///
/// \param EndMightThrow - true if __cxa_end_catch might throw
static llvm::Value *CallBeginCatch(CodeGenFunction &CGF,
@@ -4455,7 +4524,9 @@ static llvm::Value *CallBeginCatch(CodeGenFunction &CGF,
llvm::CallInst *call =
CGF.EmitNounwindRuntimeCall(getBeginCatchFn(CGF.CGM), Exn);
- CGF.EHStack.pushCleanup<CallEndCatch>(NormalAndEHCleanup, EndMightThrow);
+ CGF.EHStack.pushCleanup<CallEndCatch>(
+ NormalAndEHCleanup,
+ EndMightThrow && !CGF.CGM.getLangOpts().AssumeNothrowExceptionDtor);
return call;
}
@@ -4515,8 +4586,7 @@ static void InitCatchParam(CodeGenFunction &CGF,
// pad. The best solution is to fix the personality function.
} else {
// Pull the pointer for the reference type off.
- llvm::Type *PtrTy =
- cast<llvm::PointerType>(LLVMCatchTy)->getElementType();
+ llvm::Type *PtrTy = CGF.ConvertTypeForMem(CaughtType);
// Create the temporary and write the adjusted pointer into it.
Address ExnPtrTmp =
@@ -4549,7 +4619,7 @@ static void InitCatchParam(CodeGenFunction &CGF,
switch (CatchType.getQualifiers().getObjCLifetime()) {
case Qualifiers::OCL_Strong:
CastExn = CGF.EmitARCRetainNonBlock(CastExn);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Qualifiers::OCL_None:
case Qualifiers::OCL_ExplicitNone:
@@ -4566,10 +4636,7 @@ static void InitCatchParam(CodeGenFunction &CGF,
// Otherwise, it returns a pointer into the exception object.
- llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
- llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
-
- LValue srcLV = CGF.MakeNaturalAlignAddrLValue(Cast, CatchType);
+ LValue srcLV = CGF.MakeNaturalAlignAddrLValue(AdjustedExn, CatchType);
LValue destLV = CGF.MakeAddrLValue(ParamAddr, CatchType);
switch (TEK) {
case TEK_Complex:
@@ -4591,7 +4658,7 @@ static void InitCatchParam(CodeGenFunction &CGF,
auto catchRD = CatchType->getAsCXXRecordDecl();
CharUnits caughtExnAlignment = CGF.CGM.getClassPointerAlignment(catchRD);
- llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
+ llvm::Type *PtrTy = CGF.UnqualPtrTy; // addrspace 0 ok
// Check for a copy expression. If we don't have a copy expression,
// that means a trivial copy is okay.
@@ -4599,7 +4666,7 @@ static void InitCatchParam(CodeGenFunction &CGF,
if (!copyExpr) {
llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, true);
Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
- caughtExnAlignment);
+ LLVMCatchTy, caughtExnAlignment);
LValue Dest = CGF.MakeAddrLValue(ParamAddr, CatchType);
LValue Src = CGF.MakeAddrLValue(adjustedExn, CatchType);
CGF.EmitAggregateCopy(Dest, Src, CatchType, AggValueSlot::DoesNotOverlap);
@@ -4613,7 +4680,7 @@ static void InitCatchParam(CodeGenFunction &CGF,
// Cast that to the appropriate type.
Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
- caughtExnAlignment);
+ LLVMCatchTy, caughtExnAlignment);
// The copy expression is defined in terms of an OpaqueValueExpr.
// Find it and map it to the adjusted expression.
@@ -4686,13 +4753,17 @@ void ItaniumCXXABI::emitBeginCatch(CodeGenFunction &CGF,
/// void @__clang_call_terminate(i8* %exn) nounwind noreturn
/// This code is used only in C++.
static llvm::FunctionCallee getClangCallTerminateFn(CodeGenModule &CGM) {
- llvm::FunctionType *fnTy =
- llvm::FunctionType::get(CGM.VoidTy, CGM.Int8PtrTy, /*isVarArg=*/false);
+ ASTContext &C = CGM.getContext();
+ const CGFunctionInfo &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
+ C.VoidTy, {C.getPointerType(C.CharTy)});
+ llvm::FunctionType *fnTy = CGM.getTypes().GetFunctionType(FI);
llvm::FunctionCallee fnRef = CGM.CreateRuntimeFunction(
fnTy, "__clang_call_terminate", llvm::AttributeList(), /*Local=*/true);
llvm::Function *fn =
cast<llvm::Function>(fnRef.getCallee()->stripPointerCasts());
if (fn->empty()) {
+ CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, fn, /*IsThunk=*/false);
+ CGM.SetLLVMFunctionAttributesForDefinition(nullptr, fn);
fn->setDoesNotThrow();
fn->setDoesNotReturn();
@@ -4775,14 +4846,11 @@ void XLCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
llvm::FunctionCallee Dtor,
llvm::Constant *Addr) {
if (D.getTLSKind() != VarDecl::TLS_None) {
- // atexit routine expects "int(*)(int,...)"
- llvm::FunctionType *FTy =
- llvm::FunctionType::get(CGM.IntTy, CGM.IntTy, true);
- llvm::PointerType *FpTy = FTy->getPointerTo();
+ llvm::PointerType *PtrTy = CGF.UnqualPtrTy;
// extern "C" int __pt_atexit_np(int flags, int(*)(int,...), ...);
llvm::FunctionType *AtExitTy =
- llvm::FunctionType::get(CGM.IntTy, {CGM.IntTy, FpTy}, true);
+ llvm::FunctionType::get(CGM.IntTy, {CGM.IntTy, PtrTy}, true);
// Fetch the actual function.
llvm::FunctionCallee AtExit =
diff --git a/contrib/llvm-project/clang/lib/CodeGen/LinkInModulesPass.cpp b/contrib/llvm-project/clang/lib/CodeGen/LinkInModulesPass.cpp
new file mode 100644
index 000000000000..6ce2b94c1db8
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/LinkInModulesPass.cpp
@@ -0,0 +1,29 @@
+//===-- LinkInModulesPass.cpp - Module Linking pass --------------- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// LinkInModulesPass implementation.
+///
+//===----------------------------------------------------------------------===//
+
+#include "LinkInModulesPass.h"
+#include "BackendConsumer.h"
+
+using namespace llvm;
+
+LinkInModulesPass::LinkInModulesPass(clang::BackendConsumer *BC,
+ bool ShouldLinkFiles)
+ : BC(BC), ShouldLinkFiles(ShouldLinkFiles) {}
+
+PreservedAnalyses LinkInModulesPass::run(Module &M, ModuleAnalysisManager &AM) {
+
+ if (BC && BC->LinkInModules(&M, ShouldLinkFiles))
+ report_fatal_error("Bitcode module linking failed, compilation aborted!");
+
+ return PreservedAnalyses::all();
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/LinkInModulesPass.h b/contrib/llvm-project/clang/lib/CodeGen/LinkInModulesPass.h
new file mode 100644
index 000000000000..7fe94d625058
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/LinkInModulesPass.h
@@ -0,0 +1,42 @@
+//===-- LinkInModulesPass.h - Module Linking pass ----------------- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file provides a pass to link in Modules from a provided
+/// BackendConsumer.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_BITCODE_LINKINMODULESPASS_H
+#define LLVM_BITCODE_LINKINMODULESPASS_H
+
+#include "BackendConsumer.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+class Module;
+class ModulePass;
+class Pass;
+
+/// Create and return a pass that links in Moduels from a provided
+/// BackendConsumer to a given primary Module. Note that this pass is designed
+/// for use with the legacy pass manager.
+class LinkInModulesPass : public PassInfoMixin<LinkInModulesPass> {
+ clang::BackendConsumer *BC;
+ bool ShouldLinkFiles;
+
+public:
+ LinkInModulesPass(clang::BackendConsumer *BC, bool ShouldLinkFiles = true);
+
+ PreservedAnalyses run(Module &M, AnalysisManager<Module> &);
+ static bool isRequired() { return true; }
+};
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm-project/clang/lib/CodeGen/MacroPPCallbacks.cpp b/contrib/llvm-project/clang/lib/CodeGen/MacroPPCallbacks.cpp
index 92800e738b62..8589869f6e2f 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/MacroPPCallbacks.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/MacroPPCallbacks.cpp
@@ -120,7 +120,7 @@ void MacroPPCallbacks::FileEntered(SourceLocation Loc) {
if (PP.getSourceManager().isWrittenInCommandLineFile(Loc))
return;
updateStatusToNextScope();
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case CommandLineIncludeScope:
EnteredCommandLineIncludeFiles++;
break;
@@ -167,7 +167,7 @@ void MacroPPCallbacks::FileChanged(SourceLocation Loc, FileChangeReason Reason,
void MacroPPCallbacks::InclusionDirective(
SourceLocation HashLoc, const Token &IncludeTok, StringRef FileName,
- bool IsAngled, CharSourceRange FilenameRange, const FileEntry *File,
+ bool IsAngled, CharSourceRange FilenameRange, OptionalFileEntryRef File,
StringRef SearchPath, StringRef RelativePath, const Module *Imported,
SrcMgr::CharacteristicKind FileType) {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/MacroPPCallbacks.h b/contrib/llvm-project/clang/lib/CodeGen/MacroPPCallbacks.h
index 32906a000269..5af177d0c3fa 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/MacroPPCallbacks.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/MacroPPCallbacks.h
@@ -17,7 +17,6 @@
namespace llvm {
class DIMacroFile;
-class DIMacroNode;
}
namespace clang {
class Preprocessor;
@@ -101,9 +100,9 @@ public:
/// Callback invoked whenever a directive (#xxx) is processed.
void InclusionDirective(SourceLocation HashLoc, const Token &IncludeTok,
StringRef FileName, bool IsAngled,
- CharSourceRange FilenameRange, const FileEntry *File,
- StringRef SearchPath, StringRef RelativePath,
- const Module *Imported,
+ CharSourceRange FilenameRange,
+ OptionalFileEntryRef File, StringRef SearchPath,
+ StringRef RelativePath, const Module *Imported,
SrcMgr::CharacteristicKind FileType) override;
/// Hook called whenever a macro definition is seen.
diff --git a/contrib/llvm-project/clang/lib/CodeGen/MicrosoftCXXABI.cpp b/contrib/llvm-project/clang/lib/CodeGen/MicrosoftCXXABI.cpp
index 990648b131fe..172c4c937b97 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/MicrosoftCXXABI.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -13,6 +13,7 @@
//
//===----------------------------------------------------------------------===//
+#include "ABIInfo.h"
#include "CGCXXABI.h"
#include "CGCleanup.h"
#include "CGVTables.h"
@@ -47,7 +48,11 @@ public:
: CGCXXABI(CGM), BaseClassDescriptorType(nullptr),
ClassHierarchyDescriptorType(nullptr),
CompleteObjectLocatorType(nullptr), CatchableTypeType(nullptr),
- ThrowInfoType(nullptr) {}
+ ThrowInfoType(nullptr) {
+ assert(!(CGM.getLangOpts().isExplicitDefaultVisibilityExportMapping() ||
+ CGM.getLangOpts().isAllDefaultVisibilityExportMapping()) &&
+ "visibility export mapping option unimplemented in this ABI");
+ }
bool HasThisReturn(GlobalDecl GD) const override;
bool hasMostDerivedReturn(GlobalDecl GD) const override;
@@ -148,14 +153,25 @@ public:
bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
QualType SrcRecordTy) override;
- llvm::Value *EmitDynamicCastCall(CodeGenFunction &CGF, Address Value,
+ bool shouldEmitExactDynamicCast(QualType DestRecordTy) override {
+ // TODO: Add support for exact dynamic_casts.
+ return false;
+ }
+ llvm::Value *emitExactDynamicCast(CodeGenFunction &CGF, Address Value,
+ QualType SrcRecordTy, QualType DestTy,
+ QualType DestRecordTy,
+ llvm::BasicBlock *CastSuccess,
+ llvm::BasicBlock *CastFail) override {
+ llvm_unreachable("unsupported");
+ }
+
+ llvm::Value *emitDynamicCastCall(CodeGenFunction &CGF, Address Value,
QualType SrcRecordTy, QualType DestTy,
QualType DestRecordTy,
llvm::BasicBlock *CastEnd) override;
- llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
- QualType SrcRecordTy,
- QualType DestTy) override;
+ llvm::Value *emitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
+ QualType SrcRecordTy) override;
bool EmitBadCastCall(CodeGenFunction &CGF) override;
bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override {
@@ -231,11 +247,24 @@ public:
void EmitCXXDestructors(const CXXDestructorDecl *D) override;
- const CXXRecordDecl *
- getThisArgumentTypeForMethod(const CXXMethodDecl *MD) override {
- if (MD->isVirtual() && !isa<CXXDestructorDecl>(MD)) {
+ const CXXRecordDecl *getThisArgumentTypeForMethod(GlobalDecl GD) override {
+ auto *MD = cast<CXXMethodDecl>(GD.getDecl());
+
+ if (MD->isVirtual()) {
+ GlobalDecl LookupGD = GD;
+ if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) {
+ // Complete dtors take a pointer to the complete object,
+ // thus don't need adjustment.
+ if (GD.getDtorType() == Dtor_Complete)
+ return MD->getParent();
+
+ // There's only Dtor_Deleting in vftable but it shares the this
+ // adjustment with the base one, so look up the deleting one instead.
+ LookupGD = GlobalDecl(DD, Dtor_Deleting);
+ }
MethodVFTableLocation ML =
- CGM.getMicrosoftVTableContext().getMethodVFTableLocation(MD);
+ CGM.getMicrosoftVTableContext().getMethodVFTableLocation(LookupGD);
+
// The vbases might be ordered differently in the final overrider object
// and the complete object, so the "this" argument may sometimes point to
// memory that has no particular type (e.g. past the complete object).
@@ -285,7 +314,7 @@ public:
CodeGenFunction::VPtr Vptr) override;
/// Don't initialize vptrs if dynamic class
- /// is marked with with the 'novtable' attribute.
+ /// is marked with the 'novtable' attribute.
bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override {
return !VTableClass->hasAttr<MSNoVTableAttr>();
}
@@ -401,7 +430,9 @@ public:
ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override;
bool usesThreadWrapperFunction(const VarDecl *VD) const override {
- return false;
+ return getContext().getLangOpts().isCompatibleWithMSVC(
+ LangOptions::MSVC2019_5) &&
+ (!isEmittedWithConstantInitializer(VD) || mayNeedDestruction(VD));
}
LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD,
QualType LValType) override;
@@ -452,7 +483,7 @@ public:
friend struct MSRTTIBuilder;
bool isImageRelative() const {
- return CGM.getTarget().getPointerWidth(/*AddrSpace=*/0) == 64;
+ return CGM.getTarget().getPointerWidth(LangAS::Default) == 64;
}
// 5 routines for constructing the llvm types for MS RTTI structs.
@@ -776,7 +807,7 @@ public:
LoadVTablePtr(CodeGenFunction &CGF, Address This,
const CXXRecordDecl *RD) override;
- virtual bool
+ bool
isPermittedToBeHomogeneousAggregate(const CXXRecordDecl *RD) const override;
private:
@@ -797,9 +828,9 @@ private:
/// Info on the global variable used to guard initialization of static locals.
/// The BitIndex field is only used for externally invisible declarations.
struct GuardInfo {
- GuardInfo() : Guard(nullptr), BitIndex(0) {}
- llvm::GlobalVariable *Guard;
- unsigned BitIndex;
+ GuardInfo() = default;
+ llvm::GlobalVariable *Guard = nullptr;
+ unsigned BitIndex = 0;
};
/// Map from DeclContext to the current guard variable. We assume that the
@@ -847,7 +878,7 @@ MicrosoftCXXABI::getRecordArgABI(const CXXRecordDecl *RD) const {
// arguments was not supported and resulted in a compiler error. In 19.14
// and later versions, such arguments are now passed indirectly.
TypeInfo Info = getContext().getTypeInfo(RD->getTypeForDecl());
- if (Info.AlignIsRequired && Info.Align > 4)
+ if (Info.isAlignRequired() && Info.Align > 4)
return RAA_Indirect;
// If C++ prohibits us from making a copy, construct the arguments directly
@@ -917,7 +948,7 @@ void MicrosoftCXXABI::emitBeginCatch(CodeGenFunction &CGF,
std::tuple<Address, llvm::Value *, const CXXRecordDecl *>
MicrosoftCXXABI::performBaseAdjustment(CodeGenFunction &CGF, Address Value,
QualType SrcRecordTy) {
- Value = CGF.Builder.CreateBitCast(Value, CGF.Int8PtrTy);
+ Value = Value.withElementType(CGF.Int8Ty);
const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
const ASTContext &Context = getContext();
@@ -946,7 +977,8 @@ MicrosoftCXXABI::performBaseAdjustment(CodeGenFunction &CGF, Address Value,
Value.getElementType(), Value.getPointer(), Offset);
CharUnits VBaseAlign =
CGF.CGM.getVBaseAlignment(Value.getAlignment(), SrcDecl, PolymorphicBase);
- return std::make_tuple(Address(Ptr, VBaseAlign), Offset, PolymorphicBase);
+ return std::make_tuple(Address(Ptr, CGF.Int8Ty, VBaseAlign), Offset,
+ PolymorphicBase);
}
bool MicrosoftCXXABI::shouldTypeidBeNullChecked(bool IsDeref,
@@ -990,11 +1022,9 @@ bool MicrosoftCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
!getContext().getASTRecordLayout(SrcDecl).hasExtendableVFPtr();
}
-llvm::Value *MicrosoftCXXABI::EmitDynamicCastCall(
- CodeGenFunction &CGF, Address This, QualType SrcRecordTy,
- QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) {
- llvm::Type *DestLTy = CGF.ConvertType(DestTy);
-
+llvm::Value *MicrosoftCXXABI::emitDynamicCastCall(
+ CodeGenFunction &CGF, Address This, QualType SrcRecordTy, QualType DestTy,
+ QualType DestRecordTy, llvm::BasicBlock *CastEnd) {
llvm::Value *SrcRTTI =
CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
llvm::Value *DestRTTI =
@@ -1020,14 +1050,12 @@ llvm::Value *MicrosoftCXXABI::EmitDynamicCastCall(
llvm::Value *Args[] = {
ThisPtr, Offset, SrcRTTI, DestRTTI,
llvm::ConstantInt::get(CGF.Int32Ty, DestTy->isReferenceType())};
- ThisPtr = CGF.EmitRuntimeCallOrInvoke(Function, Args);
- return CGF.Builder.CreateBitCast(ThisPtr, DestLTy);
+ return CGF.EmitRuntimeCallOrInvoke(Function, Args);
}
-llvm::Value *
-MicrosoftCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
- QualType SrcRecordTy,
- QualType DestTy) {
+llvm::Value *MicrosoftCXXABI::emitDynamicCastToVoid(CodeGenFunction &CGF,
+ Address Value,
+ QualType SrcRecordTy) {
std::tie(Value, std::ignore, std::ignore) =
performBaseAdjustment(CGF, Value, SrcRecordTy);
@@ -1079,8 +1107,20 @@ bool MicrosoftCXXABI::hasMostDerivedReturn(GlobalDecl GD) const {
return isDeletingDtor(GD);
}
-static bool isTrivialForAArch64MSVC(const CXXRecordDecl *RD) {
- // For AArch64, we use the C++14 definition of an aggregate, so we also
+static bool isTrivialForMSVC(const CXXRecordDecl *RD, QualType Ty,
+ CodeGenModule &CGM) {
+ // On AArch64, HVAs that can be passed in registers can also be returned
+ // in registers. (Note this is using the MSVC definition of an HVA; see
+ // isPermittedToBeHomogeneousAggregate().)
+ const Type *Base = nullptr;
+ uint64_t NumElts = 0;
+ if (CGM.getTarget().getTriple().isAArch64() &&
+ CGM.getTypes().getABIInfo().isHomogeneousAggregate(Ty, Base, NumElts) &&
+ isa<VectorType>(Base)) {
+ return true;
+ }
+
+ // We use the C++14 definition of an aggregate, so we also
// check for:
// No private or protected non static data members.
// No base classes
@@ -1108,15 +1148,8 @@ bool MicrosoftCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
if (!RD)
return false;
- // Normally, the C++ concept of "is trivially copyable" is used to determine
- // if a struct can be returned directly. However, as MSVC and the language
- // have evolved, the definition of "trivially copyable" has changed, while the
- // ABI must remain stable. AArch64 uses the C++14 concept of an "aggregate",
- // while other ISAs use the older concept of "plain old data".
- bool isTrivialForABI = RD->isPOD();
- bool isAArch64 = CGM.getTarget().getTriple().isAArch64();
- if (isAArch64)
- isTrivialForABI = RD->canPassInRegisters() && isTrivialForAArch64MSVC(RD);
+ bool isTrivialForABI = RD->canPassInRegisters() &&
+ isTrivialForMSVC(RD, FI.getReturnType(), CGM);
// MSVC always returns structs indirectly from C++ instance methods.
bool isIndirectReturn = !isTrivialForABI || FI.isInstanceMethod();
@@ -1130,7 +1163,7 @@ bool MicrosoftCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
// On AArch64, use the `inreg` attribute if the object is considered to not
// be trivially copyable, or if this is an instance method struct return.
- FI.getReturnInfo().setInReg(isAArch64);
+ FI.getReturnInfo().setInReg(CGM.getTarget().getTriple().isAArch64());
return true;
}
@@ -1202,7 +1235,6 @@ void MicrosoftCXXABI::initializeHiddenVirtualInheritanceMembers(
const VBOffsets &VBaseMap = Layout.getVBaseOffsetsMap();
CGBuilderTy &Builder = CGF.Builder;
- unsigned AS = getThisAddress(CGF).getAddressSpace();
llvm::Value *Int8This = nullptr; // Initialize lazily.
for (const CXXBaseSpecifier &S : RD->vbases()) {
@@ -1223,14 +1255,12 @@ void MicrosoftCXXABI::initializeHiddenVirtualInheritanceMembers(
VtorDispValue = Builder.CreateTruncOrBitCast(VtorDispValue, CGF.Int32Ty);
if (!Int8This)
- Int8This = Builder.CreateBitCast(getThisValue(CGF),
- CGF.Int8Ty->getPointerTo(AS));
+ Int8This = getThisValue(CGF);
+
llvm::Value *VtorDispPtr =
Builder.CreateInBoundsGEP(CGF.Int8Ty, Int8This, VBaseOffset);
// vtorDisp is always the 32-bits before the vbase in the class layout.
VtorDispPtr = Builder.CreateConstGEP1_32(CGF.Int8Ty, VtorDispPtr, -4);
- VtorDispPtr = Builder.CreateBitCast(
- VtorDispPtr, CGF.Int32Ty->getPointerTo(AS), "vtordisp.ptr");
Builder.CreateAlignedStore(VtorDispValue, VtorDispPtr,
CharUnits::fromQuantity(4));
@@ -1267,7 +1297,7 @@ void MicrosoftCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) {
void MicrosoftCXXABI::EmitVBPtrStores(CodeGenFunction &CGF,
const CXXRecordDecl *RD) {
Address This = getThisAddress(CGF);
- This = CGF.Builder.CreateElementBitCast(This, CGM.Int8Ty, "this.int8");
+ This = This.withElementType(CGM.Int8Ty);
const ASTContext &Context = getContext();
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
@@ -1284,8 +1314,7 @@ void MicrosoftCXXABI::EmitVBPtrStores(CodeGenFunction &CGF,
Address VBPtr = CGF.Builder.CreateConstInBoundsByteGEP(This, Offs);
llvm::Value *GVPtr =
CGF.Builder.CreateConstInBoundsGEP2_32(GV->getValueType(), GV, 0, 0);
- VBPtr = CGF.Builder.CreateElementBitCast(VBPtr, GVPtr->getType(),
- "vbptr." + VBT->ObjectWithVPtr->getName());
+ VBPtr = VBPtr.withElementType(GVPtr->getType());
CGF.Builder.CreateStore(GVPtr, VBPtr);
}
}
@@ -1347,8 +1376,7 @@ llvm::GlobalValue::LinkageTypes MicrosoftCXXABI::getCXXDestructorLinkage(
case Dtor_Base:
// The base destructor most closely tracks the user-declared constructor, so
// we delegate back to the normal declarator case.
- return CGM.getLLVMLinkageForDeclarator(Dtor, Linkage,
- /*IsConstantVariable=*/false);
+ return CGM.getLLVMLinkageForDeclarator(Dtor, Linkage);
case Dtor_Complete:
// The complete destructor is like an inline function, but it may be
// imported and therefore must be exported as well. This requires changing
@@ -1427,7 +1455,7 @@ Address MicrosoftCXXABI::adjustThisArgumentForVirtualFunctionCall(
if (Adjustment.isZero())
return This;
- This = CGF.Builder.CreateElementBitCast(This, CGF.Int8Ty);
+ This = This.withElementType(CGF.Int8Ty);
assert(Adjustment.isPositive());
return CGF.Builder.CreateConstByteGEP(This, Adjustment);
}
@@ -1458,7 +1486,7 @@ Address MicrosoftCXXABI::adjustThisArgumentForVirtualFunctionCall(
Address Result = This;
if (ML.VBase) {
- Result = CGF.Builder.CreateElementBitCast(Result, CGF.Int8Ty);
+ Result = Result.withElementType(CGF.Int8Ty);
const CXXRecordDecl *Derived = MD->getParent();
const CXXRecordDecl *VBase = ML.VBase;
@@ -1468,11 +1496,11 @@ Address MicrosoftCXXABI::adjustThisArgumentForVirtualFunctionCall(
Result.getElementType(), Result.getPointer(), VBaseOffset);
CharUnits VBaseAlign =
CGF.CGM.getVBaseAlignment(Result.getAlignment(), Derived, VBase);
- Result = Address(VBasePtr, VBaseAlign);
+ Result = Address(VBasePtr, CGF.Int8Ty, VBaseAlign);
}
if (!StaticOffset.isZero()) {
assert(StaticOffset.isPositive());
- Result = CGF.Builder.CreateElementBitCast(Result, CGF.Int8Ty);
+ Result = Result.withElementType(CGF.Int8Ty);
if (ML.VBase) {
// Non-virtual adjustment might result in a pointer outside the allocated
// object, e.g. if the final overrider class is laid out after the virtual
@@ -1496,7 +1524,7 @@ void MicrosoftCXXABI::addImplicitStructorParams(CodeGenFunction &CGF,
auto *IsMostDerived = ImplicitParamDecl::Create(
Context, /*DC=*/nullptr, CGF.CurGD.getDecl()->getLocation(),
&Context.Idents.get("is_most_derived"), Context.IntTy,
- ImplicitParamDecl::Other);
+ ImplicitParamKind::Other);
// The 'most_derived' parameter goes second if the ctor is variadic and last
// if it's not. Dtors can't be variadic.
const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
@@ -1509,7 +1537,7 @@ void MicrosoftCXXABI::addImplicitStructorParams(CodeGenFunction &CGF,
auto *ShouldDelete = ImplicitParamDecl::Create(
Context, /*DC=*/nullptr, CGF.CurGD.getDecl()->getLocation(),
&Context.Idents.get("should_call_delete"), Context.IntTy,
- ImplicitParamDecl::Other);
+ ImplicitParamKind::Other);
Params.push_back(ShouldDelete);
getStructorImplicitParamDecl(CGF) = ShouldDelete;
}
@@ -1537,14 +1565,9 @@ void MicrosoftCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
if (!CGF.CurFuncIsThunk && MD->isVirtual()) {
CharUnits Adjustment = getVirtualFunctionPrologueThisAdjustment(CGF.CurGD);
if (!Adjustment.isZero()) {
- unsigned AS = cast<llvm::PointerType>(This->getType())->getAddressSpace();
- llvm::Type *charPtrTy = CGF.Int8Ty->getPointerTo(AS),
- *thisTy = This->getType();
- This = CGF.Builder.CreateBitCast(This, charPtrTy);
assert(Adjustment.isPositive());
This = CGF.Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, This,
-Adjustment.getQuantity());
- This = CGF.Builder.CreateBitCast(This, thisTy, "this.adjusted");
}
}
setCXXABIThisValue(CGF, This);
@@ -1557,11 +1580,8 @@ void MicrosoftCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
// 1) getThisValue is currently protected
// 2) in theory, an ABI could implement 'this' returns some other way;
// HasThisReturn only specifies a contract, not the implementation
- if (HasThisReturn(CGF.CurGD))
+ if (HasThisReturn(CGF.CurGD) || hasMostDerivedReturn(CGF.CurGD))
CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue);
- else if (hasMostDerivedReturn(CGF.CurGD))
- CGF.Builder.CreateStore(CGF.EmitCastToVoidPtr(getThisValue(CGF)),
- CGF.ReturnValue);
if (isa<CXXConstructorDecl>(MD) && MD->getParent()->getNumVBases()) {
assert(getStructorImplicitParamDecl(CGF) &&
@@ -1653,7 +1673,11 @@ void MicrosoftCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
void MicrosoftCXXABI::emitVTableTypeMetadata(const VPtrInfo &Info,
const CXXRecordDecl *RD,
llvm::GlobalVariable *VTable) {
- if (!CGM.getCodeGenOpts().LTOUnit)
+ // Emit type metadata on vtables with LTO or IR instrumentation.
+ // In IR instrumentation, the type metadata could be used to find out vtable
+ // definitions (for type profiling) among all global variables.
+ if (!CGM.getCodeGenOpts().LTOUnit &&
+ !CGM.getCodeGenOpts().hasProfileIRInstr())
return;
// TODO: Should VirtualFunctionElimination also be supported here?
@@ -1672,7 +1696,7 @@ void MicrosoftCXXABI::emitVTableTypeMetadata(const VPtrInfo &Info,
CharUnits AddressPoint =
getContext().getLangOpts().RTTIData
? getContext().toCharUnitsFromBits(
- getContext().getTargetInfo().getPointerWidth(0))
+ getContext().getTargetInfo().getPointerWidth(LangAS::Default))
: CharUnits::Zero();
if (Info.PathToIntroducingObject.empty()) {
@@ -1810,8 +1834,8 @@ llvm::GlobalVariable *MicrosoftCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
#endif
}
- const std::unique_ptr<VPtrInfo> *VFPtrI = std::find_if(
- VFPtrs.begin(), VFPtrs.end(), [&](const std::unique_ptr<VPtrInfo>& VPI) {
+ const std::unique_ptr<VPtrInfo> *VFPtrI =
+ llvm::find_if(VFPtrs, [&](const std::unique_ptr<VPtrInfo> &VPI) {
return VPI->FullOffsetInMDC == VPtrOffset;
});
if (VFPtrI == VFPtrs.end()) {
@@ -1844,7 +1868,7 @@ llvm::GlobalVariable *MicrosoftCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
VFTablesMap[ID] = VFTable;
VTable = VTableAliasIsRequred
? cast<llvm::GlobalVariable>(
- cast<llvm::GlobalAlias>(VFTable)->getBaseObject())
+ cast<llvm::GlobalAlias>(VFTable)->getAliaseeObject())
: cast<llvm::GlobalVariable>(VFTable);
return VTable;
}
@@ -1868,9 +1892,7 @@ llvm::GlobalVariable *MicrosoftCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
llvm::Comdat *C = nullptr;
if (!VFTableComesFromAnotherTU &&
- (llvm::GlobalValue::isWeakForLinker(VFTableLinkage) ||
- (llvm::GlobalValue::isLocalLinkage(VFTableLinkage) &&
- VTableAliasIsRequred)))
+ llvm::GlobalValue::isWeakForLinker(VFTableLinkage))
C = CGM.getModule().getOrInsertComdat(VFTableName.str());
// Only insert a pointer into the VFTable for RTTI data if we are not
@@ -1944,8 +1966,10 @@ CGCallee MicrosoftCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
llvm::Value *VFunc;
if (CGF.ShouldEmitVTableTypeCheckedLoad(MethodDecl->getParent())) {
VFunc = CGF.EmitVTableTypeCheckedLoad(
- getObjectWithVPtr(), VTable,
- ML.Index * CGM.getContext().getTargetInfo().getPointerWidth(0) / 8);
+ getObjectWithVPtr(), VTable, Ty,
+ ML.Index *
+ CGM.getContext().getTargetInfo().getPointerWidth(LangAS::Default) /
+ 8);
} else {
if (CGM.getCodeGenOpts().PrepareForLTO)
CGF.EmitTypeMetadataCodeForVCall(getObjectWithVPtr(), VTable, Loc);
@@ -2076,6 +2100,8 @@ MicrosoftCXXABI::EmitVirtualMemPtrThunk(const CXXMethodDecl *MD,
// Start defining the function.
CGF.StartFunction(GlobalDecl(), FnInfo.getReturnType(), ThunkFn, FnInfo,
FunctionArgs, MD->getLocation(), SourceLocation());
+
+ ApplyDebugLocation AL(CGF, MD->getLocation());
setCXXABIThisValue(CGF, loadIncomingCXXThis(CGF));
// Load the vfptr and then callee from the vftable. The callee should have
@@ -2120,7 +2146,7 @@ MicrosoftCXXABI::getAddrOfVBTable(const VPtrInfo &VBT, const CXXRecordDecl *RD,
CharUnits Alignment =
CGM.getContext().getTypeAlignInChars(CGM.getContext().IntTy);
llvm::GlobalVariable *GV = CGM.CreateOrReplaceCXXRuntimeVariable(
- Name, VBTableType, Linkage, Alignment.getQuantity());
+ Name, VBTableType, Linkage, Alignment.getAsAlign());
GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
if (RD->hasAttr<DLLImportAttr>())
@@ -2188,7 +2214,7 @@ llvm::Value *MicrosoftCXXABI::performThisAdjustment(CodeGenFunction &CGF,
if (TA.isEmpty())
return This.getPointer();
- This = CGF.Builder.CreateElementBitCast(This, CGF.Int8Ty);
+ This = This.withElementType(CGF.Int8Ty);
llvm::Value *V;
if (TA.Virtual.isEmpty()) {
@@ -2199,7 +2225,7 @@ llvm::Value *MicrosoftCXXABI::performThisAdjustment(CodeGenFunction &CGF,
Address VtorDispPtr =
CGF.Builder.CreateConstInBoundsByteGEP(This,
CharUnits::fromQuantity(TA.Virtual.Microsoft.VtordispOffset));
- VtorDispPtr = CGF.Builder.CreateElementBitCast(VtorDispPtr, CGF.Int32Ty);
+ VtorDispPtr = VtorDispPtr.withElementType(CGF.Int32Ty);
llvm::Value *VtorDisp = CGF.Builder.CreateLoad(VtorDispPtr, "vtordisp");
V = CGF.Builder.CreateGEP(This.getElementType(), This.getPointer(),
CGF.Builder.CreateNeg(VtorDisp));
@@ -2215,10 +2241,10 @@ llvm::Value *MicrosoftCXXABI::performThisAdjustment(CodeGenFunction &CGF,
assert(TA.Virtual.Microsoft.VBPtrOffset > 0);
assert(TA.Virtual.Microsoft.VBOffsetOffset >= 0);
llvm::Value *VBPtr;
- llvm::Value *VBaseOffset =
- GetVBaseOffsetFromVBPtr(CGF, Address(V, CGF.getPointerAlign()),
- -TA.Virtual.Microsoft.VBPtrOffset,
- TA.Virtual.Microsoft.VBOffsetOffset, &VBPtr);
+ llvm::Value *VBaseOffset = GetVBaseOffsetFromVBPtr(
+ CGF, Address(V, CGF.Int8Ty, CGF.getPointerAlign()),
+ -TA.Virtual.Microsoft.VBPtrOffset,
+ TA.Virtual.Microsoft.VBOffsetOffset, &VBPtr);
V = CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, VBPtr, VBaseOffset);
}
}
@@ -2240,8 +2266,7 @@ MicrosoftCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
if (RA.isEmpty())
return Ret.getPointer();
- auto OrigTy = Ret.getType();
- Ret = CGF.Builder.CreateElementBitCast(Ret, CGF.Int8Ty);
+ Ret = Ret.withElementType(CGF.Int8Ty);
llvm::Value *V = Ret.getPointer();
if (RA.Virtual.Microsoft.VBIndex) {
@@ -2257,8 +2282,7 @@ MicrosoftCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
if (RA.NonVirtual)
V = CGF.Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, V, RA.NonVirtual);
- // Cast back to the original type.
- return CGF.Builder.CreateBitCast(V, OrigTy);
+ return V;
}
bool MicrosoftCXXABI::requiresArrayCookie(const CXXDeleteExpr *expr,
@@ -2285,8 +2309,7 @@ CharUnits MicrosoftCXXABI::getArrayCookieSizeImpl(QualType type) {
llvm::Value *MicrosoftCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
Address allocPtr,
CharUnits cookieSize) {
- Address numElementsPtr =
- CGF.Builder.CreateElementBitCast(allocPtr, CGF.SizeTy);
+ Address numElementsPtr = allocPtr.withElementType(CGF.SizeTy);
return CGF.Builder.CreateLoad(numElementsPtr);
}
@@ -2304,8 +2327,7 @@ Address MicrosoftCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
Address cookiePtr = newPtr;
// Write the number of elements into the appropriate slot.
- Address numElementsPtr
- = CGF.Builder.CreateElementBitCast(cookiePtr, CGF.SizeTy);
+ Address numElementsPtr = cookiePtr.withElementType(CGF.SizeTy);
CGF.Builder.CreateStore(numElements, numElementsPtr);
// Finally, compute a pointer to the actual data buffer by skipping
@@ -2341,6 +2363,10 @@ void MicrosoftCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
if (D.getTLSKind())
return emitGlobalDtorWithTLRegDtor(CGF, D, Dtor, Addr);
+ // HLSL doesn't support atexit.
+ if (CGM.getLangOpts().HLSL)
+ return CGM.AddCXXDtorEntry(Dtor, Addr);
+
// The default behavior is to use atexit.
CGF.registerGlobalDtorWithAtExit(D, Dtor, Addr);
}
@@ -2397,25 +2423,108 @@ void MicrosoftCXXABI::EmitThreadLocalInitFuncs(
}
}
+static llvm::GlobalValue *getTlsGuardVar(CodeGenModule &CGM) {
+ // __tls_guard comes from the MSVC runtime and reflects
+ // whether TLS has been initialized for a particular thread.
+ // It is set from within __dyn_tls_init by the runtime.
+ // Every library and executable has its own variable.
+ llvm::Type *VTy = llvm::Type::getInt8Ty(CGM.getLLVMContext());
+ llvm::Constant *TlsGuardConstant =
+ CGM.CreateRuntimeVariable(VTy, "__tls_guard");
+ llvm::GlobalValue *TlsGuard = cast<llvm::GlobalValue>(TlsGuardConstant);
+
+ TlsGuard->setThreadLocal(true);
+
+ return TlsGuard;
+}
+
+static llvm::FunctionCallee getDynTlsOnDemandInitFn(CodeGenModule &CGM) {
+ // __dyn_tls_on_demand_init comes from the MSVC runtime and triggers
+ // dynamic TLS initialization by calling __dyn_tls_init internally.
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()), {},
+ /*isVarArg=*/false);
+ return CGM.CreateRuntimeFunction(
+ FTy, "__dyn_tls_on_demand_init",
+ llvm::AttributeList::get(CGM.getLLVMContext(),
+ llvm::AttributeList::FunctionIndex,
+ llvm::Attribute::NoUnwind),
+ /*Local=*/true);
+}
+
+static void emitTlsGuardCheck(CodeGenFunction &CGF, llvm::GlobalValue *TlsGuard,
+ llvm::BasicBlock *DynInitBB,
+ llvm::BasicBlock *ContinueBB) {
+ llvm::LoadInst *TlsGuardValue =
+ CGF.Builder.CreateLoad(Address(TlsGuard, CGF.Int8Ty, CharUnits::One()));
+ llvm::Value *CmpResult =
+ CGF.Builder.CreateICmpEQ(TlsGuardValue, CGF.Builder.getInt8(0));
+ CGF.Builder.CreateCondBr(CmpResult, DynInitBB, ContinueBB);
+}
+
+static void emitDynamicTlsInitializationCall(CodeGenFunction &CGF,
+ llvm::GlobalValue *TlsGuard,
+ llvm::BasicBlock *ContinueBB) {
+ llvm::FunctionCallee Initializer = getDynTlsOnDemandInitFn(CGF.CGM);
+ llvm::Function *InitializerFunction =
+ cast<llvm::Function>(Initializer.getCallee());
+ llvm::CallInst *CallVal = CGF.Builder.CreateCall(InitializerFunction);
+ CallVal->setCallingConv(InitializerFunction->getCallingConv());
+
+ CGF.Builder.CreateBr(ContinueBB);
+}
+
+static void emitDynamicTlsInitialization(CodeGenFunction &CGF) {
+ llvm::BasicBlock *DynInitBB =
+ CGF.createBasicBlock("dyntls.dyn_init", CGF.CurFn);
+ llvm::BasicBlock *ContinueBB =
+ CGF.createBasicBlock("dyntls.continue", CGF.CurFn);
+
+ llvm::GlobalValue *TlsGuard = getTlsGuardVar(CGF.CGM);
+
+ emitTlsGuardCheck(CGF, TlsGuard, DynInitBB, ContinueBB);
+ CGF.Builder.SetInsertPoint(DynInitBB);
+ emitDynamicTlsInitializationCall(CGF, TlsGuard, ContinueBB);
+ CGF.Builder.SetInsertPoint(ContinueBB);
+}
+
LValue MicrosoftCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
const VarDecl *VD,
QualType LValType) {
- CGF.CGM.ErrorUnsupported(VD, "thread wrappers");
- return LValue();
+ // Dynamic TLS initialization works by checking the state of a
+ // guard variable (__tls_guard) to see whether TLS initialization
+ // for a thread has happend yet.
+ // If not, the initialization is triggered on-demand
+ // by calling __dyn_tls_on_demand_init.
+ emitDynamicTlsInitialization(CGF);
+
+ // Emit the variable just like any regular global variable.
+
+ llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
+ llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType());
+
+ CharUnits Alignment = CGF.getContext().getDeclAlign(VD);
+ Address Addr(V, RealVarTy, Alignment);
+
+ LValue LV = VD->getType()->isReferenceType()
+ ? CGF.EmitLoadOfReferenceLValue(Addr, VD->getType(),
+ AlignmentSource::Decl)
+ : CGF.MakeAddrLValue(Addr, LValType, AlignmentSource::Decl);
+ return LV;
}
static ConstantAddress getInitThreadEpochPtr(CodeGenModule &CGM) {
StringRef VarName("_Init_thread_epoch");
CharUnits Align = CGM.getIntAlign();
if (auto *GV = CGM.getModule().getNamedGlobal(VarName))
- return ConstantAddress(GV, Align);
+ return ConstantAddress(GV, GV->getValueType(), Align);
auto *GV = new llvm::GlobalVariable(
CGM.getModule(), CGM.IntTy,
/*isConstant=*/false, llvm::GlobalVariable::ExternalLinkage,
/*Initializer=*/nullptr, VarName,
/*InsertBefore=*/nullptr, llvm::GlobalVariable::GeneralDynamicTLSModel);
GV->setAlignment(Align.getAsAlign());
- return ConstantAddress(GV, Align);
+ return ConstantAddress(GV, GV->getValueType(), Align);
}
static llvm::FunctionCallee getInitThreadHeaderFn(CodeGenModule &CGM) {
@@ -2567,7 +2676,7 @@ void MicrosoftCXXABI::EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
GI->Guard = GuardVar;
}
- ConstantAddress GuardAddr(GuardVar, GuardAlign);
+ ConstantAddress GuardAddr(GuardVar, GuardTy, GuardAlign);
assert(GuardVar->getLinkage() == GV->getLinkage() &&
"static local from the same function had different linkage");
@@ -2877,7 +2986,6 @@ MicrosoftCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) {
NonVirtualBaseAdjustment -= getContext().getOffsetOfBaseWithVBPtr(RD);
// The rest of the fields are common with data member pointers.
- FirstField = llvm::ConstantExpr::getBitCast(FirstField, CGM.VoidPtrTy);
return EmitFullMemberPointer(FirstField, /*IsMemberFunction=*/true, RD,
NonVirtualBaseAdjustment, VBTableIndex);
}
@@ -3015,12 +3123,10 @@ MicrosoftCXXABI::GetVBaseOffsetFromVBPtr(CodeGenFunction &CGF,
llvm::Value **VBPtrOut) {
CGBuilderTy &Builder = CGF.Builder;
// Load the vbtable pointer from the vbptr in the instance.
- This = Builder.CreateElementBitCast(This, CGM.Int8Ty);
- llvm::Value *VBPtr = Builder.CreateInBoundsGEP(
- This.getElementType(), This.getPointer(), VBPtrOffset, "vbptr");
- if (VBPtrOut) *VBPtrOut = VBPtr;
- VBPtr = Builder.CreateBitCast(VBPtr,
- CGM.Int32Ty->getPointerTo(0)->getPointerTo(This.getAddressSpace()));
+ llvm::Value *VBPtr = Builder.CreateInBoundsGEP(CGM.Int8Ty, This.getPointer(),
+ VBPtrOffset, "vbptr");
+ if (VBPtrOut)
+ *VBPtrOut = VBPtr;
CharUnits VBPtrAlign;
if (auto CI = dyn_cast<llvm::ConstantInt>(VBPtrOffset)) {
@@ -3041,7 +3147,6 @@ MicrosoftCXXABI::GetVBaseOffsetFromVBPtr(CodeGenFunction &CGF,
// Load an i32 offset from the vb-table.
llvm::Value *VBaseOffs =
Builder.CreateInBoundsGEP(CGM.Int32Ty, VBTable, VBTableIndex);
- VBaseOffs = Builder.CreateBitCast(VBaseOffs, CGM.Int32Ty->getPointerTo(0));
return Builder.CreateAlignedLoad(CGM.Int32Ty, VBaseOffs,
CharUnits::fromQuantity(4), "vbase_offs");
}
@@ -3052,7 +3157,7 @@ llvm::Value *MicrosoftCXXABI::AdjustVirtualBase(
CodeGenFunction &CGF, const Expr *E, const CXXRecordDecl *RD,
Address Base, llvm::Value *VBTableOffset, llvm::Value *VBPtrOffset) {
CGBuilderTy &Builder = CGF.Builder;
- Base = Builder.CreateElementBitCast(Base, CGM.Int8Ty);
+ Base = Base.withElementType(CGM.Int8Ty);
llvm::BasicBlock *OriginalBB = nullptr;
llvm::BasicBlock *SkipAdjustBB = nullptr;
llvm::BasicBlock *VBaseAdjustBB = nullptr;
@@ -3109,9 +3214,6 @@ llvm::Value *MicrosoftCXXABI::EmitMemberDataPointerAddress(
CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr,
const MemberPointerType *MPT) {
assert(MPT->isMemberDataPointer());
- unsigned AS = Base.getAddressSpace();
- llvm::Type *PType =
- CGF.ConvertTypeForMem(MPT->getPointeeType())->getPointerTo(AS);
CGBuilderTy &Builder = CGF.Builder;
const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl();
MSInheritanceModel Inheritance = RD->getMSInheritanceModel();
@@ -3139,16 +3241,9 @@ llvm::Value *MicrosoftCXXABI::EmitMemberDataPointerAddress(
Addr = Base.getPointer();
}
- // Cast to char*.
- Addr = Builder.CreateBitCast(Addr, CGF.Int8Ty->getPointerTo(AS));
-
// Apply the offset, which we assume is non-null.
- Addr = Builder.CreateInBoundsGEP(CGF.Int8Ty, Addr, FieldOffset,
+ return Builder.CreateInBoundsGEP(CGF.Int8Ty, Addr, FieldOffset,
"memptr.offset");
-
- // Cast the address to the appropriate pointer type, adopting the address
- // space of the base pointer.
- return Builder.CreateBitCast(Addr, PType);
}
llvm::Value *
@@ -3405,8 +3500,6 @@ CGCallee MicrosoftCXXABI::EmitLoadOfMemberFunctionPointer(
const FunctionProtoType *FPT =
MPT->getPointeeType()->castAs<FunctionProtoType>();
const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl();
- llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(
- CGM.getTypes().arrangeCXXMethodType(RD, FPT, /*FD=*/nullptr));
CGBuilderTy &Builder = CGF.Builder;
MSInheritanceModel Inheritance = RD->getMSInheritanceModel();
@@ -3436,16 +3529,10 @@ CGCallee MicrosoftCXXABI::EmitLoadOfMemberFunctionPointer(
ThisPtrForCall = This.getPointer();
}
- if (NonVirtualBaseAdjustment) {
- // Apply the adjustment and cast back to the original struct type.
- llvm::Value *Ptr = Builder.CreateBitCast(ThisPtrForCall, CGF.Int8PtrTy);
- Ptr = Builder.CreateInBoundsGEP(CGF.Int8Ty, Ptr, NonVirtualBaseAdjustment);
- ThisPtrForCall = Builder.CreateBitCast(Ptr, ThisPtrForCall->getType(),
- "this.adjusted");
- }
+ if (NonVirtualBaseAdjustment)
+ ThisPtrForCall = Builder.CreateInBoundsGEP(CGF.Int8Ty, ThisPtrForCall,
+ NonVirtualBaseAdjustment);
- FunctionPointer =
- Builder.CreateBitCast(FunctionPointer, FTy->getPointerTo());
CGCallee Callee(FPT, FunctionPointer);
return Callee;
}
@@ -3556,15 +3643,17 @@ uint32_t MSRTTIClass::initialize(const MSRTTIClass *Parent,
static llvm::GlobalValue::LinkageTypes getLinkageForRTTI(QualType Ty) {
switch (Ty->getLinkage()) {
- case NoLinkage:
- case InternalLinkage:
- case UniqueExternalLinkage:
+ case Linkage::Invalid:
+ llvm_unreachable("Linkage hasn't been computed!");
+
+ case Linkage::None:
+ case Linkage::Internal:
+ case Linkage::UniqueExternal:
return llvm::GlobalValue::InternalLinkage;
- case VisibleNoLinkage:
- case ModuleInternalLinkage:
- case ModuleLinkage:
- case ExternalLinkage:
+ case Linkage::VisibleNone:
+ case Linkage::Module:
+ case Linkage::External:
return llvm::GlobalValue::LinkOnceODRLinkage;
}
llvm_unreachable("Invalid linkage!");
@@ -3652,7 +3741,7 @@ llvm::GlobalVariable *MSRTTIBuilder::getClassHierarchyDescriptor() {
Classes.front().initialize(/*Parent=*/nullptr, /*Specifier=*/nullptr);
detectAmbiguousBases(Classes);
int Flags = 0;
- for (auto Class : Classes) {
+ for (const MSRTTIClass &Class : Classes) {
if (Class.RD->getNumBases() > 1)
Flags |= HasBranchingHierarchy;
// Note: cl.exe does not calculate "HasAmbiguousBases" correctly. We
@@ -3891,7 +3980,7 @@ llvm::Constant *MicrosoftCXXABI::getAddrOfRTTIDescriptor(QualType Type) {
// Check to see if we've already declared this TypeDescriptor.
if (llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(MangledName))
- return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
+ return GV;
// Note for the future: If we would ever like to do deferred emission of
// RTTI, check if emitting vtables opportunistically need any adjustment.
@@ -3917,7 +4006,7 @@ llvm::Constant *MicrosoftCXXABI::getAddrOfRTTIDescriptor(QualType Type) {
MangledName);
if (Var->isWeakForLinker())
Var->setComdat(CGM.getModule().getOrInsertComdat(Var->getName()));
- return llvm::ConstantExpr::getBitCast(Var, CGM.Int8PtrTy);
+ return Var;
}
/// Gets or a creates a Microsoft CompleteObjectLocator.
@@ -4001,7 +4090,7 @@ MicrosoftCXXABI::getAddrOfCXXCtorClosure(const CXXConstructorDecl *CD,
&getContext().Idents.get("src"),
getContext().getLValueReferenceType(RecordTy,
/*SpelledAsLValue=*/true),
- ImplicitParamDecl::Other);
+ ImplicitParamKind::Other);
if (IsCopy)
FunctionArgs.push_back(&SrcParam);
@@ -4011,7 +4100,7 @@ MicrosoftCXXABI::getAddrOfCXXCtorClosure(const CXXConstructorDecl *CD,
ImplicitParamDecl IsMostDerived(getContext(), /*DC=*/nullptr,
SourceLocation(),
&getContext().Idents.get("is_most_derived"),
- getContext().IntTy, ImplicitParamDecl::Other);
+ getContext().IntTy, ImplicitParamKind::Other);
// Only add the parameter to the list if the class has virtual bases.
if (RD->getNumVBases() > 0)
FunctionArgs.push_back(&IsMostDerived);
@@ -4049,7 +4138,7 @@ MicrosoftCXXABI::getAddrOfCXXCtorClosure(const CXXConstructorDecl *CD,
CodeGenFunction::RunCleanupsScope Cleanups(CGF);
const auto *FPT = CD->getType()->castAs<FunctionProtoType>();
- CGF.EmitCallArgs(Args, FPT, llvm::makeArrayRef(ArgVec), CD, IsCopy ? 1 : 0);
+ CGF.EmitCallArgs(Args, FPT, llvm::ArrayRef(ArgVec), CD, IsCopy ? 1 : 0);
// Insert any ABI-specific implicit constructor arguments.
AddedStructorArgCounts ExtraArgs =
@@ -4110,8 +4199,6 @@ llvm::Constant *MicrosoftCXXABI::getCatchableType(QualType T,
CopyCtor = getAddrOfCXXCtorClosure(CD, Ctor_CopyingClosure);
else
CopyCtor = CGM.getAddrOfCXXStructor(GlobalDecl(CD, Ctor_Complete));
-
- CopyCtor = llvm::ConstantExpr::getBitCast(CopyCtor, CGM.Int8PtrTy);
} else {
CopyCtor = llvm::Constant::getNullValue(CGM.Int8PtrTy);
}
@@ -4257,10 +4344,10 @@ llvm::GlobalVariable *MicrosoftCXXABI::getCatchableTypeArray(QualType T) {
llvm::ArrayType *AT = llvm::ArrayType::get(CTType, NumEntries);
llvm::StructType *CTAType = getCatchableTypeArrayType(NumEntries);
llvm::Constant *Fields[] = {
- llvm::ConstantInt::get(CGM.IntTy, NumEntries), // NumEntries
+ llvm::ConstantInt::get(CGM.IntTy, NumEntries), // NumEntries
llvm::ConstantArray::get(
- AT, llvm::makeArrayRef(CatchableTypes.begin(),
- CatchableTypes.end())) // CatchableTypes
+ AT, llvm::ArrayRef(CatchableTypes.begin(),
+ CatchableTypes.end())) // CatchableTypes
};
SmallString<256> MangledName;
{
@@ -4321,14 +4408,11 @@ llvm::GlobalVariable *MicrosoftCXXABI::getThrowInfo(QualType T) {
if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl())
if (CXXDestructorDecl *DtorD = RD->getDestructor())
if (!DtorD->isTrivial())
- CleanupFn = llvm::ConstantExpr::getBitCast(
- CGM.getAddrOfCXXStructor(GlobalDecl(DtorD, Dtor_Complete)),
- CGM.Int8PtrTy);
+ CleanupFn = CGM.getAddrOfCXXStructor(GlobalDecl(DtorD, Dtor_Complete));
// This is unused as far as we can tell, initialize it to null.
llvm::Constant *ForwardCompat =
getImageRelativeConstant(llvm::Constant::getNullValue(CGM.Int8PtrTy));
- llvm::Constant *PointerToCatchableTypes = getImageRelativeConstant(
- llvm::ConstantExpr::getBitCast(CTA, CGM.Int8PtrTy));
+ llvm::Constant *PointerToCatchableTypes = getImageRelativeConstant(CTA);
llvm::StructType *TIType = getThrowInfoType();
llvm::Constant *Fields[] = {
llvm::ConstantInt::get(CGM.IntTy, Flags), // Flags
@@ -4348,6 +4432,7 @@ llvm::GlobalVariable *MicrosoftCXXABI::getThrowInfo(QualType T) {
void MicrosoftCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
const Expr *SubExpr = E->getSubExpr();
+ assert(SubExpr && "SubExpr cannot be null");
QualType ThrowType = SubExpr->getType();
// The exception object lives on the stack and it's address is passed to the
// runtime function.
@@ -4361,7 +4446,7 @@ void MicrosoftCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
// Call into the runtime to throw the exception.
llvm::Value *Args[] = {
- CGF.Builder.CreateBitCast(AI.getPointer(), CGM.Int8PtrTy),
+ AI.getPointer(),
TI
};
CGF.EmitNoreturnRuntimeCallOrInvoke(getThrowFn(), Args);
@@ -4376,10 +4461,45 @@ MicrosoftCXXABI::LoadVTablePtr(CodeGenFunction &CGF, Address This,
}
bool MicrosoftCXXABI::isPermittedToBeHomogeneousAggregate(
- const CXXRecordDecl *CXXRD) const {
- // MSVC Windows on Arm64 considers a type not HFA if it is not an
- // aggregate according to the C++14 spec. This is not consistent with the
- // AAPCS64, but is defacto spec on that platform.
- return !CGM.getTarget().getTriple().isAArch64() ||
- isTrivialForAArch64MSVC(CXXRD);
+ const CXXRecordDecl *RD) const {
+ // All aggregates are permitted to be HFA on non-ARM platforms, which mostly
+ // affects vectorcall on x64/x86.
+ if (!CGM.getTarget().getTriple().isAArch64())
+ return true;
+ // MSVC Windows on Arm64 has its own rules for determining if a type is HFA
+ // that are inconsistent with the AAPCS64 ABI. The following are our best
+ // determination of those rules so far, based on observation of MSVC's
+ // behavior.
+ if (RD->isEmpty())
+ return false;
+ if (RD->isPolymorphic())
+ return false;
+ if (RD->hasNonTrivialCopyAssignment())
+ return false;
+ if (RD->hasNonTrivialDestructor())
+ return false;
+ if (RD->hasNonTrivialDefaultConstructor())
+ return false;
+ // These two are somewhat redundant given the caller
+ // (ABIInfo::isHomogeneousAggregate) checks the bases and fields, but that
+ // caller doesn't consider empty bases/fields to be non-homogenous, but it
+ // looks like Microsoft's AArch64 ABI does care about these empty types &
+ // anything containing/derived from one is non-homogeneous.
+ // Instead we could add another CXXABI entry point to query this property and
+ // have ABIInfo::isHomogeneousAggregate use that property.
+ // I don't think any other of the features listed above could be true of a
+ // base/field while not true of the outer struct. For example, if you have a
+ // base/field that has an non-trivial copy assignment/dtor/default ctor, then
+ // the outer struct's corresponding operation must be non-trivial.
+ for (const CXXBaseSpecifier &B : RD->bases()) {
+ if (const CXXRecordDecl *FRD = B.getType()->getAsCXXRecordDecl()) {
+ if (!isPermittedToBeHomogeneousAggregate(FRD))
+ return false;
+ }
+ }
+ // empty fields seem to be caught by the ABIInfo::isHomogeneousAggregate
+ // checking for padding - but maybe there are ways to end up with an empty
+ // field without padding? Not that I know of, so don't check fields here &
+ // rely on the padding check.
+ return true;
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/ModuleBuilder.cpp b/contrib/llvm-project/clang/lib/CodeGen/ModuleBuilder.cpp
index b63f756ca288..3594f4c66e67 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/ModuleBuilder.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/ModuleBuilder.cpp
@@ -23,6 +23,7 @@
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
+#include "llvm/Support/VirtualFileSystem.h"
#include <memory>
using namespace clang;
@@ -32,9 +33,10 @@ namespace {
class CodeGeneratorImpl : public CodeGenerator {
DiagnosticsEngine &Diags;
ASTContext *Ctx;
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS; // Only used for debug info.
const HeaderSearchOptions &HeaderSearchOpts; // Only used for debug info.
const PreprocessorOptions &PreprocessorOpts; // Only used for debug info.
- const CodeGenOptions CodeGenOpts; // Intentionally copied in.
+ const CodeGenOptions &CodeGenOpts;
unsigned HandlingTopLevelDecls;
@@ -74,11 +76,12 @@ namespace {
public:
CodeGeneratorImpl(DiagnosticsEngine &diags, llvm::StringRef ModuleName,
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS,
const HeaderSearchOptions &HSO,
const PreprocessorOptions &PPO, const CodeGenOptions &CGO,
llvm::LLVMContext &C,
CoverageSourceInfo *CoverageInfo = nullptr)
- : Diags(diags), Ctx(nullptr), HeaderSearchOpts(HSO),
+ : Diags(diags), Ctx(nullptr), FS(std::move(FS)), HeaderSearchOpts(HSO),
PreprocessorOpts(PPO), CodeGenOpts(CGO), HandlingTopLevelDecls(0),
CoverageInfo(CoverageInfo),
M(new llvm::Module(ExpandModuleName(ModuleName, CGO), C)) {
@@ -122,6 +125,10 @@ namespace {
return D;
}
+ llvm::StringRef GetMangledName(GlobalDecl GD) {
+ return Builder->getMangledName(GD);
+ }
+
llvm::Constant *GetAddrOfGlobal(GlobalDecl global, bool isForDefinition) {
return Builder->GetAddrOfGlobal(global, ForDefinition_t(isForDefinition));
}
@@ -130,7 +137,14 @@ namespace {
llvm::LLVMContext &C) {
assert(!M && "Replacing existing Module?");
M.reset(new llvm::Module(ExpandModuleName(ModuleName, CodeGenOpts), C));
+
+ std::unique_ptr<CodeGenModule> OldBuilder = std::move(Builder);
+
Initialize(*Ctx);
+
+ if (OldBuilder)
+ OldBuilder->moveLazyEmissionStates(Builder.get());
+
return M.get();
}
@@ -142,7 +156,12 @@ namespace {
const auto &SDKVersion = Ctx->getTargetInfo().getSDKVersion();
if (!SDKVersion.empty())
M->setSDKVersion(SDKVersion);
- Builder.reset(new CodeGen::CodeGenModule(Context, HeaderSearchOpts,
+ if (const auto *TVT = Ctx->getTargetInfo().getDarwinTargetVariantTriple())
+ M->setDarwinTargetVariantTriple(TVT->getTriple());
+ if (auto TVSDKVersion =
+ Ctx->getTargetInfo().getDarwinTargetVariantSDKVersion())
+ M->setDarwinTargetVariantSDKVersion(*TVSDKVersion);
+ Builder.reset(new CodeGen::CodeGenModule(Context, FS, HeaderSearchOpts,
PreprocessorOpts, CodeGenOpts,
*M, Diags, CoverageInfo));
@@ -160,6 +179,7 @@ namespace {
}
bool HandleTopLevelDecl(DeclGroupRef DG) override {
+ // FIXME: Why not return false and abort parsing?
if (Diags.hasErrorOccurred())
return true;
@@ -325,6 +345,10 @@ const Decl *CodeGenerator::GetDeclForMangledName(llvm::StringRef name) {
return static_cast<CodeGeneratorImpl*>(this)->GetDeclForMangledName(name);
}
+llvm::StringRef CodeGenerator::GetMangledName(GlobalDecl GD) {
+ return static_cast<CodeGeneratorImpl *>(this)->GetMangledName(GD);
+}
+
llvm::Constant *CodeGenerator::GetAddrOfGlobal(GlobalDecl global,
bool isForDefinition) {
return static_cast<CodeGeneratorImpl*>(this)
@@ -336,11 +360,14 @@ llvm::Module *CodeGenerator::StartModule(llvm::StringRef ModuleName,
return static_cast<CodeGeneratorImpl*>(this)->StartModule(ModuleName, C);
}
-CodeGenerator *clang::CreateLLVMCodeGen(
- DiagnosticsEngine &Diags, llvm::StringRef ModuleName,
- const HeaderSearchOptions &HeaderSearchOpts,
- const PreprocessorOptions &PreprocessorOpts, const CodeGenOptions &CGO,
- llvm::LLVMContext &C, CoverageSourceInfo *CoverageInfo) {
- return new CodeGeneratorImpl(Diags, ModuleName, HeaderSearchOpts,
- PreprocessorOpts, CGO, C, CoverageInfo);
+CodeGenerator *
+clang::CreateLLVMCodeGen(DiagnosticsEngine &Diags, llvm::StringRef ModuleName,
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS,
+ const HeaderSearchOptions &HeaderSearchOpts,
+ const PreprocessorOptions &PreprocessorOpts,
+ const CodeGenOptions &CGO, llvm::LLVMContext &C,
+ CoverageSourceInfo *CoverageInfo) {
+ return new CodeGeneratorImpl(Diags, ModuleName, std::move(FS),
+ HeaderSearchOpts, PreprocessorOpts, CGO, C,
+ CoverageInfo);
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp b/contrib/llvm-project/clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
index 1adf0ad9c0e5..ee543e40b460 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
@@ -27,10 +27,10 @@
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
+#include "llvm/MC/TargetRegistry.h"
#include "llvm/Object/COFF.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Support/Path.h"
-#include "llvm/Support/TargetRegistry.h"
#include <memory>
#include <utility>
@@ -45,6 +45,7 @@ class PCHContainerGenerator : public ASTConsumer {
const std::string OutputFileName;
ASTContext *Ctx;
ModuleMap &MMap;
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS;
const HeaderSearchOptions &HeaderSearchOpts;
const PreprocessorOptions &PreprocessorOpts;
CodeGenOptions CodeGenOpts;
@@ -96,13 +97,17 @@ class PCHContainerGenerator : public ASTConsumer {
}
bool VisitFunctionDecl(FunctionDecl *D) {
+ // Skip deduction guides.
+ if (isa<CXXDeductionGuideDecl>(D))
+ return true;
+
if (isa<CXXMethodDecl>(D))
// This is not yet supported. Constructing the `this' argument
// mandates a CodeGenFunction.
return true;
SmallVector<QualType, 16> ArgTypes;
- for (auto i : D->parameters())
+ for (auto *i : D->parameters())
ArgTypes.push_back(i->getType());
QualType RetTy = D->getReturnType();
QualType FnTy = Ctx.getFunctionType(RetTy, ArgTypes,
@@ -121,7 +126,7 @@ class PCHContainerGenerator : public ASTConsumer {
ArgTypes.push_back(D->getSelfType(Ctx, D->getClassInterface(),
selfIsPseudoStrong, selfIsConsumed));
ArgTypes.push_back(Ctx.getObjCSelType());
- for (auto i : D->parameters())
+ for (auto *i : D->parameters())
ArgTypes.push_back(i->getType());
QualType RetTy = D->getReturnType();
QualType FnTy = Ctx.getFunctionType(RetTy, ArgTypes,
@@ -140,6 +145,7 @@ public:
: Diags(CI.getDiagnostics()), MainFileName(MainFileName),
OutputFileName(OutputFileName), Ctx(nullptr),
MMap(CI.getPreprocessor().getHeaderSearchInfo().getModuleMap()),
+ FS(&CI.getVirtualFileSystem()),
HeaderSearchOpts(CI.getHeaderSearchOpts()),
PreprocessorOpts(CI.getPreprocessorOpts()),
TargetOpts(CI.getTargetOpts()), LangOpts(CI.getLangOpts()),
@@ -152,10 +158,14 @@ public:
// When building a module MainFileName is the name of the modulemap file.
CodeGenOpts.MainFileName =
LangOpts.CurrentModule.empty() ? MainFileName : LangOpts.CurrentModule;
- CodeGenOpts.setDebugInfo(codegenoptions::FullDebugInfo);
+ CodeGenOpts.setDebugInfo(llvm::codegenoptions::FullDebugInfo);
CodeGenOpts.setDebuggerTuning(CI.getCodeGenOpts().getDebuggerTuning());
+ CodeGenOpts.DwarfVersion = CI.getCodeGenOpts().DwarfVersion;
+ CodeGenOpts.DebugCompilationDir =
+ CI.getInvocation().getCodeGenOpts().DebugCompilationDir;
CodeGenOpts.DebugPrefixMap =
CI.getInvocation().getCodeGenOpts().DebugPrefixMap;
+ CodeGenOpts.DebugStrictDwarf = CI.getCodeGenOpts().DebugStrictDwarf;
}
~PCHContainerGenerator() override = default;
@@ -168,7 +178,7 @@ public:
M.reset(new llvm::Module(MainFileName, *VMContext));
M->setDataLayout(Ctx->getTargetInfo().getDataLayoutString());
Builder.reset(new CodeGen::CodeGenModule(
- *Ctx, HeaderSearchOpts, PreprocessorOpts, CodeGenOpts, *M, Diags));
+ *Ctx, FS, HeaderSearchOpts, PreprocessorOpts, CodeGenOpts, *M, Diags));
// Prepare CGDebugInfo to emit debug info for a clang module.
auto *DI = Builder->getModuleDebugInfo();
@@ -264,31 +274,48 @@ public:
std::string Error;
auto Triple = Ctx.getTargetInfo().getTriple();
if (!llvm::TargetRegistry::lookupTarget(Triple.getTriple(), Error))
- llvm::report_fatal_error(Error);
+ llvm::report_fatal_error(llvm::Twine(Error));
// Emit the serialized Clang AST into its own section.
assert(Buffer->IsComplete && "serialization did not complete");
auto &SerializedAST = Buffer->Data;
auto Size = SerializedAST.size();
- auto Int8Ty = llvm::Type::getInt8Ty(*VMContext);
- auto *Ty = llvm::ArrayType::get(Int8Ty, Size);
- auto *Data = llvm::ConstantDataArray::getString(
- *VMContext, StringRef(SerializedAST.data(), Size),
- /*AddNull=*/false);
- auto *ASTSym = new llvm::GlobalVariable(
- *M, Ty, /*constant*/ true, llvm::GlobalVariable::InternalLinkage, Data,
- "__clang_ast");
- // The on-disk hashtable needs to be aligned.
- ASTSym->setAlignment(llvm::Align(8));
-
- // Mach-O also needs a segment name.
- if (Triple.isOSBinFormatMachO())
- ASTSym->setSection("__CLANG,__clangast");
- // COFF has an eight character length limit.
- else if (Triple.isOSBinFormatCOFF())
- ASTSym->setSection("clangast");
- else
- ASTSym->setSection("__clangast");
+
+ if (Triple.isOSBinFormatWasm()) {
+ // Emit __clangast in custom section instead of named data segment
+ // to find it while iterating sections.
+ // This could be avoided if all data segements (the wasm sense) were
+ // represented as their own sections (in the llvm sense).
+ // TODO: https://github.com/WebAssembly/tool-conventions/issues/138
+ llvm::NamedMDNode *MD =
+ M->getOrInsertNamedMetadata("wasm.custom_sections");
+ llvm::Metadata *Ops[2] = {
+ llvm::MDString::get(*VMContext, "__clangast"),
+ llvm::MDString::get(*VMContext,
+ StringRef(SerializedAST.data(), Size))};
+ auto *NameAndContent = llvm::MDTuple::get(*VMContext, Ops);
+ MD->addOperand(NameAndContent);
+ } else {
+ auto Int8Ty = llvm::Type::getInt8Ty(*VMContext);
+ auto *Ty = llvm::ArrayType::get(Int8Ty, Size);
+ auto *Data = llvm::ConstantDataArray::getString(
+ *VMContext, StringRef(SerializedAST.data(), Size),
+ /*AddNull=*/false);
+ auto *ASTSym = new llvm::GlobalVariable(
+ *M, Ty, /*constant*/ true, llvm::GlobalVariable::InternalLinkage,
+ Data, "__clang_ast");
+ // The on-disk hashtable needs to be aligned.
+ ASTSym->setAlignment(llvm::Align(8));
+
+ // Mach-O also needs a segment name.
+ if (Triple.isOSBinFormatMachO())
+ ASTSym->setSection("__CLANG,__clangast");
+ // COFF has an eight character length limit.
+ else if (Triple.isOSBinFormatCOFF())
+ ASTSym->setSection("clangast");
+ else
+ ASTSym->setSection("__clangast");
+ }
LLVM_DEBUG({
// Print the IR for the PCH container to the debug output.
@@ -296,7 +323,7 @@ public:
clang::EmitBackendOutput(
Diags, HeaderSearchOpts, CodeGenOpts, TargetOpts, LangOpts,
Ctx.getTargetInfo().getDataLayoutString(), M.get(),
- BackendAction::Backend_EmitLL,
+ BackendAction::Backend_EmitLL, FS,
std::make_unique<llvm::raw_svector_ostream>(Buffer));
llvm::dbgs() << Buffer;
});
@@ -305,7 +332,7 @@ public:
clang::EmitBackendOutput(Diags, HeaderSearchOpts, CodeGenOpts, TargetOpts,
LangOpts,
Ctx.getTargetInfo().getDataLayoutString(), M.get(),
- BackendAction::Backend_EmitObj, std::move(OS));
+ BackendAction::Backend_EmitObj, FS, std::move(OS));
// Free the memory for the temporary buffer.
llvm::SmallVector<char, 0> Empty;
@@ -325,6 +352,11 @@ ObjectFilePCHContainerWriter::CreatePCHContainerGenerator(
CI, MainFileName, OutputFileName, std::move(OS), Buffer);
}
+ArrayRef<StringRef> ObjectFilePCHContainerReader::getFormats() const {
+ static StringRef Formats[] = {"obj", "raw"};
+ return Formats;
+}
+
StringRef
ObjectFilePCHContainerReader::ExtractPCH(llvm::MemoryBufferRef Buffer) const {
StringRef PCH;
diff --git a/contrib/llvm-project/clang/lib/CodeGen/PatternInit.cpp b/contrib/llvm-project/clang/lib/CodeGen/PatternInit.cpp
index 26ac8b63a9ba..4400bc443688 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/PatternInit.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/PatternInit.cpp
@@ -43,8 +43,8 @@ llvm::Constant *clang::CodeGen::initializationPatternFor(CodeGenModule &CGM,
}
if (Ty->isPtrOrPtrVectorTy()) {
auto *PtrTy = cast<llvm::PointerType>(Ty->getScalarType());
- unsigned PtrWidth = CGM.getContext().getTargetInfo().getPointerWidth(
- PtrTy->getAddressSpace());
+ unsigned PtrWidth =
+ CGM.getDataLayout().getPointerSizeInBits(PtrTy->getAddressSpace());
if (PtrWidth > 64)
llvm_unreachable("pattern initialization of unsupported pointer width");
llvm::Type *IntTy = llvm::IntegerType::get(CGM.getLLVMContext(), PtrWidth);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/SanitizerMetadata.cpp b/contrib/llvm-project/clang/lib/CodeGen/SanitizerMetadata.cpp
index 009965a36c39..53161c316c58 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/SanitizerMetadata.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/SanitizerMetadata.cpp
@@ -22,84 +22,82 @@ using namespace CodeGen;
SanitizerMetadata::SanitizerMetadata(CodeGenModule &CGM) : CGM(CGM) {}
-static bool isAsanHwasanOrMemTag(const SanitizerSet& SS) {
+static bool isAsanHwasanOrMemTag(const SanitizerSet &SS) {
return SS.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress |
- SanitizerKind::HWAddress | SanitizerKind::KernelHWAddress |
- SanitizerKind::MemTag);
+ SanitizerKind::HWAddress | SanitizerKind::MemTag);
}
-void SanitizerMetadata::reportGlobalToASan(llvm::GlobalVariable *GV,
- SourceLocation Loc, StringRef Name,
- QualType Ty, bool IsDynInit,
- bool IsExcluded) {
- if (!isAsanHwasanOrMemTag(CGM.getLangOpts().Sanitize))
+SanitizerMask expandKernelSanitizerMasks(SanitizerMask Mask) {
+ if (Mask & (SanitizerKind::Address | SanitizerKind::KernelAddress))
+ Mask |= SanitizerKind::Address | SanitizerKind::KernelAddress;
+ // Note: KHWASan doesn't support globals.
+ return Mask;
+}
+
+void SanitizerMetadata::reportGlobal(llvm::GlobalVariable *GV,
+ SourceLocation Loc, StringRef Name,
+ QualType Ty,
+ SanitizerMask NoSanitizeAttrMask,
+ bool IsDynInit) {
+ SanitizerSet FsanitizeArgument = CGM.getLangOpts().Sanitize;
+ if (!isAsanHwasanOrMemTag(FsanitizeArgument))
return;
- IsDynInit &= !CGM.isInNoSanitizeList(GV, Loc, Ty, "init");
- IsExcluded |= CGM.isInNoSanitizeList(GV, Loc, Ty);
-
- llvm::Metadata *LocDescr = nullptr;
- llvm::Metadata *GlobalName = nullptr;
- llvm::LLVMContext &VMContext = CGM.getLLVMContext();
- if (!IsExcluded) {
- // Don't generate source location and global name if it is on
- // the NoSanitizeList - it won't be instrumented anyway.
- LocDescr = getLocationMetadata(Loc);
- if (!Name.empty())
- GlobalName = llvm::MDString::get(VMContext, Name);
- }
-
- llvm::Metadata *GlobalMetadata[] = {
- llvm::ConstantAsMetadata::get(GV), LocDescr, GlobalName,
- llvm::ConstantAsMetadata::get(
- llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), IsDynInit)),
- llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
- llvm::Type::getInt1Ty(VMContext), IsExcluded))};
-
- llvm::MDNode *ThisGlobal = llvm::MDNode::get(VMContext, GlobalMetadata);
- llvm::NamedMDNode *AsanGlobals =
- CGM.getModule().getOrInsertNamedMetadata("llvm.asan.globals");
- AsanGlobals->addOperand(ThisGlobal);
+
+ FsanitizeArgument.Mask = expandKernelSanitizerMasks(FsanitizeArgument.Mask);
+ NoSanitizeAttrMask = expandKernelSanitizerMasks(NoSanitizeAttrMask);
+ SanitizerSet NoSanitizeAttrSet = {NoSanitizeAttrMask &
+ FsanitizeArgument.Mask};
+
+ llvm::GlobalVariable::SanitizerMetadata Meta;
+ if (GV->hasSanitizerMetadata())
+ Meta = GV->getSanitizerMetadata();
+
+ Meta.NoAddress |= NoSanitizeAttrSet.hasOneOf(SanitizerKind::Address);
+ Meta.NoAddress |= CGM.isInNoSanitizeList(
+ FsanitizeArgument.Mask & SanitizerKind::Address, GV, Loc, Ty);
+
+ Meta.NoHWAddress |= NoSanitizeAttrSet.hasOneOf(SanitizerKind::HWAddress);
+ Meta.NoHWAddress |= CGM.isInNoSanitizeList(
+ FsanitizeArgument.Mask & SanitizerKind::HWAddress, GV, Loc, Ty);
+
+ Meta.Memtag |=
+ static_cast<bool>(FsanitizeArgument.Mask & SanitizerKind::MemtagGlobals);
+ Meta.Memtag &= !NoSanitizeAttrSet.hasOneOf(SanitizerKind::MemTag);
+ Meta.Memtag &= !CGM.isInNoSanitizeList(
+ FsanitizeArgument.Mask & SanitizerKind::MemTag, GV, Loc, Ty);
+
+ Meta.IsDynInit = IsDynInit && !Meta.NoAddress &&
+ FsanitizeArgument.has(SanitizerKind::Address) &&
+ !CGM.isInNoSanitizeList(SanitizerKind::Address |
+ SanitizerKind::KernelAddress,
+ GV, Loc, Ty, "init");
+
+ GV->setSanitizerMetadata(Meta);
}
-void SanitizerMetadata::reportGlobalToASan(llvm::GlobalVariable *GV,
- const VarDecl &D, bool IsDynInit) {
+void SanitizerMetadata::reportGlobal(llvm::GlobalVariable *GV, const VarDecl &D,
+ bool IsDynInit) {
if (!isAsanHwasanOrMemTag(CGM.getLangOpts().Sanitize))
return;
std::string QualName;
llvm::raw_string_ostream OS(QualName);
D.printQualifiedName(OS);
- bool IsExcluded = false;
- for (auto Attr : D.specific_attrs<NoSanitizeAttr>())
- if (Attr->getMask() & SanitizerKind::Address)
- IsExcluded = true;
- reportGlobalToASan(GV, D.getLocation(), OS.str(), D.getType(), IsDynInit,
- IsExcluded);
-}
+ auto getNoSanitizeMask = [](const VarDecl &D) {
+ if (D.hasAttr<DisableSanitizerInstrumentationAttr>())
+ return SanitizerKind::All;
-void SanitizerMetadata::disableSanitizerForGlobal(llvm::GlobalVariable *GV) {
- // For now, just make sure the global is not modified by the ASan
- // instrumentation.
- if (isAsanHwasanOrMemTag(CGM.getLangOpts().Sanitize))
- reportGlobalToASan(GV, SourceLocation(), "", QualType(), false, true);
-}
+ SanitizerMask NoSanitizeMask;
+ for (auto *Attr : D.specific_attrs<NoSanitizeAttr>())
+ NoSanitizeMask |= Attr->getMask();
-void SanitizerMetadata::disableSanitizerForInstruction(llvm::Instruction *I) {
- I->setMetadata(CGM.getModule().getMDKindID("nosanitize"),
- llvm::MDNode::get(CGM.getLLVMContext(), None));
+ return NoSanitizeMask;
+ };
+
+ reportGlobal(GV, D.getLocation(), OS.str(), D.getType(), getNoSanitizeMask(D),
+ IsDynInit);
}
-llvm::MDNode *SanitizerMetadata::getLocationMetadata(SourceLocation Loc) {
- PresumedLoc PLoc = CGM.getContext().getSourceManager().getPresumedLoc(Loc);
- if (!PLoc.isValid())
- return nullptr;
- llvm::LLVMContext &VMContext = CGM.getLLVMContext();
- llvm::Metadata *LocMetadata[] = {
- llvm::MDString::get(VMContext, PLoc.getFilename()),
- llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
- llvm::Type::getInt32Ty(VMContext), PLoc.getLine())),
- llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
- llvm::Type::getInt32Ty(VMContext), PLoc.getColumn())),
- };
- return llvm::MDNode::get(VMContext, LocMetadata);
+void SanitizerMetadata::disableSanitizerForGlobal(llvm::GlobalVariable *GV) {
+ reportGlobal(GV, SourceLocation(), "", QualType(), SanitizerKind::All);
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/SanitizerMetadata.h b/contrib/llvm-project/clang/lib/CodeGen/SanitizerMetadata.h
index 440a54590acc..000f02cf8dcf 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/SanitizerMetadata.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/SanitizerMetadata.h
@@ -14,13 +14,13 @@
#include "clang/AST/Type.h"
#include "clang/Basic/LLVM.h"
+#include "clang/Basic/Sanitizers.h"
#include "clang/Basic/SourceLocation.h"
namespace llvm {
class GlobalVariable;
class Instruction;
-class MDNode;
-}
+} // namespace llvm
namespace clang {
class VarDecl;
@@ -34,19 +34,18 @@ class SanitizerMetadata {
void operator=(const SanitizerMetadata &) = delete;
CodeGenModule &CGM;
+
public:
SanitizerMetadata(CodeGenModule &CGM);
- void reportGlobalToASan(llvm::GlobalVariable *GV, const VarDecl &D,
- bool IsDynInit = false);
- void reportGlobalToASan(llvm::GlobalVariable *GV, SourceLocation Loc,
- StringRef Name, QualType Ty, bool IsDynInit = false,
- bool IsExcluded = false);
+ void reportGlobal(llvm::GlobalVariable *GV, const VarDecl &D,
+ bool IsDynInit = false);
+ void reportGlobal(llvm::GlobalVariable *GV, SourceLocation Loc,
+ StringRef Name, QualType Ty = {},
+ SanitizerMask NoSanitizeAttrMask = {},
+ bool IsDynInit = false);
void disableSanitizerForGlobal(llvm::GlobalVariable *GV);
- void disableSanitizerForInstruction(llvm::Instruction *I);
-private:
- llvm::MDNode *getLocationMetadata(SourceLocation Loc);
};
-} // end namespace CodeGen
-} // end namespace clang
+} // end namespace CodeGen
+} // end namespace clang
#endif
diff --git a/contrib/llvm-project/clang/lib/CodeGen/SwiftCallingConv.cpp b/contrib/llvm-project/clang/lib/CodeGen/SwiftCallingConv.cpp
index 1d712f4fde3c..16fbf52a517d 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/SwiftCallingConv.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/SwiftCallingConv.cpp
@@ -11,16 +11,18 @@
//===----------------------------------------------------------------------===//
#include "clang/CodeGen/SwiftCallingConv.h"
-#include "clang/Basic/TargetInfo.h"
+#include "ABIInfo.h"
#include "CodeGenModule.h"
#include "TargetInfo.h"
+#include "clang/Basic/TargetInfo.h"
+#include <optional>
using namespace clang;
using namespace CodeGen;
using namespace swiftcall;
static const SwiftABIInfo &getSwiftABIInfo(CodeGenModule &CGM) {
- return cast<SwiftABIInfo>(CGM.getTargetCodeGenInfo().getABIInfo());
+ return CGM.getTargetCodeGenInfo().getSwiftABIInfo();
}
static bool isPowerOf2(unsigned n) {
@@ -123,7 +125,7 @@ void SwiftAggLowering::addTypedData(const RecordDecl *record, CharUnits begin,
const ASTRecordLayout &layout) {
// Unions are a special case.
if (record->isUnion()) {
- for (auto field : record->fields()) {
+ for (auto *field : record->fields()) {
if (field->isBitField()) {
addBitFieldData(field, begin, 0);
} else {
@@ -160,7 +162,7 @@ void SwiftAggLowering::addTypedData(const RecordDecl *record, CharUnits begin,
}
// Add fields.
- for (auto field : record->fields()) {
+ for (auto *field : record->fields()) {
auto fieldOffsetInBits = layout.getFieldOffset(field->getFieldIndex());
if (field->isBitField()) {
addBitFieldData(field, begin, fieldOffsetInBits);
@@ -407,9 +409,10 @@ void SwiftAggLowering::splitVectorEntry(unsigned index) {
CharUnits begin = Entries[index].Begin;
for (unsigned i = 0; i != numElts; ++i) {
- Entries[index].Type = eltTy;
- Entries[index].Begin = begin;
- Entries[index].End = begin + eltSize;
+ unsigned idx = index + i;
+ Entries[idx].Type = eltTy;
+ Entries[idx].Begin = begin;
+ Entries[idx].End = begin + eltSize;
begin += eltSize;
}
}
@@ -438,7 +441,7 @@ static bool isMergeableEntryType(llvm::Type *type) {
// merge pointers, but (1) it doesn't currently matter in practice because
// the chunk size is never greater than the size of a pointer and (2)
// Swift IRGen uses integer types for a lot of things that are "really"
- // just storing pointers (like Optional<SomePointer>). If we ever have a
+ // just storing pointers (like std::optional<SomePointer>). If we ever have a
// target that would otherwise combine pointers, we should put some effort
// into fixing those cases in Swift IRGen and then call out pointer types
// here.
@@ -589,9 +592,8 @@ SwiftAggLowering::getCoerceAndExpandTypes() const {
hasPadding = true;
}
- if (!packed && !entry.Begin.isMultipleOf(
- CharUnits::fromQuantity(
- CGM.getDataLayout().getABITypeAlignment(entry.Type))))
+ if (!packed && !entry.Begin.isMultipleOf(CharUnits::fromQuantity(
+ CGM.getDataLayout().getABITypeAlign(entry.Type))))
packed = true;
elts.push_back(entry.Type);
@@ -630,9 +632,8 @@ bool SwiftAggLowering::shouldPassIndirectly(bool asReturnValue) const {
// Avoid copying the array of types when there's just a single element.
if (Entries.size() == 1) {
- return getSwiftABIInfo(CGM).shouldPassIndirectlyForSwift(
- Entries.back().Type,
- asReturnValue);
+ return getSwiftABIInfo(CGM).shouldPassIndirectly(Entries.back().Type,
+ asReturnValue);
}
SmallVector<llvm::Type*, 8> componentTys;
@@ -640,31 +641,27 @@ bool SwiftAggLowering::shouldPassIndirectly(bool asReturnValue) const {
for (auto &entry : Entries) {
componentTys.push_back(entry.Type);
}
- return getSwiftABIInfo(CGM).shouldPassIndirectlyForSwift(componentTys,
- asReturnValue);
+ return getSwiftABIInfo(CGM).shouldPassIndirectly(componentTys, asReturnValue);
}
bool swiftcall::shouldPassIndirectly(CodeGenModule &CGM,
ArrayRef<llvm::Type*> componentTys,
bool asReturnValue) {
- return getSwiftABIInfo(CGM).shouldPassIndirectlyForSwift(componentTys,
- asReturnValue);
+ return getSwiftABIInfo(CGM).shouldPassIndirectly(componentTys, asReturnValue);
}
CharUnits swiftcall::getMaximumVoluntaryIntegerSize(CodeGenModule &CGM) {
// Currently always the size of an ordinary pointer.
return CGM.getContext().toCharUnitsFromBits(
- CGM.getContext().getTargetInfo().getPointerWidth(0));
+ CGM.getContext().getTargetInfo().getPointerWidth(LangAS::Default));
}
CharUnits swiftcall::getNaturalAlignment(CodeGenModule &CGM, llvm::Type *type) {
// For Swift's purposes, this is always just the store size of the type
// rounded up to a power of 2.
auto size = (unsigned long long) getTypeStoreSize(CGM, type).getQuantity();
- if (!isPowerOf2(size)) {
- size = 1ULL << (llvm::findLastSet(size, llvm::ZB_Undefined) + 1);
- }
- assert(size >= CGM.getDataLayout().getABITypeAlignment(type));
+ size = llvm::bit_ceil(size);
+ assert(CGM.getDataLayout().getABITypeAlign(type) <= size);
return CharUnits::fromQuantity(size);
}
@@ -698,8 +695,7 @@ bool swiftcall::isLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize,
bool swiftcall::isLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize,
llvm::Type *eltTy, unsigned numElts) {
assert(numElts > 1 && "illegal vector length");
- return getSwiftABIInfo(CGM)
- .isLegalVectorTypeForSwift(vectorSize, eltTy, numElts);
+ return getSwiftABIInfo(CGM).isLegalVectorType(vectorSize, eltTy, numElts);
}
std::pair<llvm::Type*, unsigned>
@@ -733,7 +729,7 @@ void swiftcall::legalizeVectorType(CodeGenModule &CGM, CharUnits origVectorSize,
// The largest size that we're still considering making subvectors of.
// Always a power of 2.
- unsigned logCandidateNumElts = llvm::findLastSet(numElts, llvm::ZB_Undefined);
+ unsigned logCandidateNumElts = llvm::Log2_32(numElts);
unsigned candidateNumElts = 1U << logCandidateNumElts;
assert(candidateNumElts <= numElts && candidateNumElts * 2 > numElts);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.cpp b/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.cpp
index d2cc0a699f43..60224d458f6a 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.cpp
@@ -13,227 +13,18 @@
#include "TargetInfo.h"
#include "ABIInfo.h"
-#include "CGBlocks.h"
-#include "CGCXXABI.h"
-#include "CGValue.h"
+#include "ABIInfoImpl.h"
#include "CodeGenFunction.h"
-#include "clang/AST/Attr.h"
-#include "clang/AST/RecordLayout.h"
#include "clang/Basic/CodeGenOptions.h"
-#include "clang/Basic/DiagnosticFrontend.h"
-#include "clang/Basic/Builtins.h"
#include "clang/CodeGen/CGFunctionInfo.h"
-#include "clang/CodeGen/SwiftCallingConv.h"
-#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/StringExtras.h"
-#include "llvm/ADT/StringSwitch.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/ADT/Twine.h"
-#include "llvm/IR/DataLayout.h"
-#include "llvm/IR/IntrinsicsNVPTX.h"
-#include "llvm/IR/IntrinsicsS390.h"
#include "llvm/IR/Type.h"
#include "llvm/Support/raw_ostream.h"
-#include <algorithm> // std::sort
using namespace clang;
using namespace CodeGen;
-// Helper for coercing an aggregate argument or return value into an integer
-// array of the same size (including padding) and alignment. This alternate
-// coercion happens only for the RenderScript ABI and can be removed after
-// runtimes that rely on it are no longer supported.
-//
-// RenderScript assumes that the size of the argument / return value in the IR
-// is the same as the size of the corresponding qualified type. This helper
-// coerces the aggregate type into an array of the same size (including
-// padding). This coercion is used in lieu of expansion of struct members or
-// other canonical coercions that return a coerced-type of larger size.
-//
-// Ty - The argument / return value type
-// Context - The associated ASTContext
-// LLVMContext - The associated LLVMContext
-static ABIArgInfo coerceToIntArray(QualType Ty,
- ASTContext &Context,
- llvm::LLVMContext &LLVMContext) {
- // Alignment and Size are measured in bits.
- const uint64_t Size = Context.getTypeSize(Ty);
- const uint64_t Alignment = Context.getTypeAlign(Ty);
- llvm::Type *IntType = llvm::Type::getIntNTy(LLVMContext, Alignment);
- const uint64_t NumElements = (Size + Alignment - 1) / Alignment;
- return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType, NumElements));
-}
-
-static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder,
- llvm::Value *Array,
- llvm::Value *Value,
- unsigned FirstIndex,
- unsigned LastIndex) {
- // Alternatively, we could emit this as a loop in the source.
- for (unsigned I = FirstIndex; I <= LastIndex; ++I) {
- llvm::Value *Cell =
- Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array, I);
- Builder.CreateAlignedStore(Value, Cell, CharUnits::One());
- }
-}
-
-static bool isAggregateTypeForABI(QualType T) {
- return !CodeGenFunction::hasScalarEvaluationKind(T) ||
- T->isMemberFunctionPointerType();
-}
-
-ABIArgInfo ABIInfo::getNaturalAlignIndirect(QualType Ty, bool ByVal,
- bool Realign,
- llvm::Type *Padding) const {
- return ABIArgInfo::getIndirect(getContext().getTypeAlignInChars(Ty), ByVal,
- Realign, Padding);
-}
-
-ABIArgInfo
-ABIInfo::getNaturalAlignIndirectInReg(QualType Ty, bool Realign) const {
- return ABIArgInfo::getIndirectInReg(getContext().getTypeAlignInChars(Ty),
- /*ByVal*/ false, Realign);
-}
-
-Address ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
- return Address::invalid();
-}
-
-bool ABIInfo::isPromotableIntegerTypeForABI(QualType Ty) const {
- if (Ty->isPromotableIntegerType())
- return true;
-
- if (const auto *EIT = Ty->getAs<ExtIntType>())
- if (EIT->getNumBits() < getContext().getTypeSize(getContext().IntTy))
- return true;
-
- return false;
-}
-
-ABIInfo::~ABIInfo() {}
-
-/// Does the given lowering require more than the given number of
-/// registers when expanded?
-///
-/// This is intended to be the basis of a reasonable basic implementation
-/// of should{Pass,Return}IndirectlyForSwift.
-///
-/// For most targets, a limit of four total registers is reasonable; this
-/// limits the amount of code required in order to move around the value
-/// in case it wasn't produced immediately prior to the call by the caller
-/// (or wasn't produced in exactly the right registers) or isn't used
-/// immediately within the callee. But some targets may need to further
-/// limit the register count due to an inability to support that many
-/// return registers.
-static bool occupiesMoreThan(CodeGenTypes &cgt,
- ArrayRef<llvm::Type*> scalarTypes,
- unsigned maxAllRegisters) {
- unsigned intCount = 0, fpCount = 0;
- for (llvm::Type *type : scalarTypes) {
- if (type->isPointerTy()) {
- intCount++;
- } else if (auto intTy = dyn_cast<llvm::IntegerType>(type)) {
- auto ptrWidth = cgt.getTarget().getPointerWidth(0);
- intCount += (intTy->getBitWidth() + ptrWidth - 1) / ptrWidth;
- } else {
- assert(type->isVectorTy() || type->isFloatingPointTy());
- fpCount++;
- }
- }
-
- return (intCount + fpCount > maxAllRegisters);
-}
-
-bool SwiftABIInfo::isLegalVectorTypeForSwift(CharUnits vectorSize,
- llvm::Type *eltTy,
- unsigned numElts) const {
- // The default implementation of this assumes that the target guarantees
- // 128-bit SIMD support but nothing more.
- return (vectorSize.getQuantity() > 8 && vectorSize.getQuantity() <= 16);
-}
-
-static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT,
- CGCXXABI &CXXABI) {
- const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
- if (!RD) {
- if (!RT->getDecl()->canPassInRegisters())
- return CGCXXABI::RAA_Indirect;
- return CGCXXABI::RAA_Default;
- }
- return CXXABI.getRecordArgABI(RD);
-}
-
-static CGCXXABI::RecordArgABI getRecordArgABI(QualType T,
- CGCXXABI &CXXABI) {
- const RecordType *RT = T->getAs<RecordType>();
- if (!RT)
- return CGCXXABI::RAA_Default;
- return getRecordArgABI(RT, CXXABI);
-}
-
-static bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI,
- const ABIInfo &Info) {
- QualType Ty = FI.getReturnType();
-
- if (const auto *RT = Ty->getAs<RecordType>())
- if (!isa<CXXRecordDecl>(RT->getDecl()) &&
- !RT->getDecl()->canPassInRegisters()) {
- FI.getReturnInfo() = Info.getNaturalAlignIndirect(Ty);
- return true;
- }
-
- return CXXABI.classifyReturnType(FI);
-}
-
-/// Pass transparent unions as if they were the type of the first element. Sema
-/// should ensure that all elements of the union have the same "machine type".
-static QualType useFirstFieldIfTransparentUnion(QualType Ty) {
- if (const RecordType *UT = Ty->getAsUnionType()) {
- const RecordDecl *UD = UT->getDecl();
- if (UD->hasAttr<TransparentUnionAttr>()) {
- assert(!UD->field_empty() && "sema created an empty transparent union");
- return UD->field_begin()->getType();
- }
- }
- return Ty;
-}
-
-CGCXXABI &ABIInfo::getCXXABI() const {
- return CGT.getCXXABI();
-}
-
-ASTContext &ABIInfo::getContext() const {
- return CGT.getContext();
-}
-
-llvm::LLVMContext &ABIInfo::getVMContext() const {
- return CGT.getLLVMContext();
-}
-
-const llvm::DataLayout &ABIInfo::getDataLayout() const {
- return CGT.getDataLayout();
-}
-
-const TargetInfo &ABIInfo::getTarget() const {
- return CGT.getTarget();
-}
-
-const CodeGenOptions &ABIInfo::getCodeGenOpts() const {
- return CGT.getCodeGenOpts();
-}
-
-bool ABIInfo::isAndroid() const { return getTarget().getTriple().isAndroid(); }
-
-bool ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
- return false;
-}
-
-bool ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
- uint64_t Members) const {
- return false;
-}
-
LLVM_DUMP_METHOD void ABIArgInfo::dump() const {
raw_ostream &OS = llvm::errs();
OS << "(ABIArgInfo Kind=";
@@ -275,134 +66,8 @@ LLVM_DUMP_METHOD void ABIArgInfo::dump() const {
OS << ")\n";
}
-// Dynamically round a pointer up to a multiple of the given alignment.
-static llvm::Value *emitRoundPointerUpToAlignment(CodeGenFunction &CGF,
- llvm::Value *Ptr,
- CharUnits Align) {
- llvm::Value *PtrAsInt = Ptr;
- // OverflowArgArea = (OverflowArgArea + Align - 1) & -Align;
- PtrAsInt = CGF.Builder.CreatePtrToInt(PtrAsInt, CGF.IntPtrTy);
- PtrAsInt = CGF.Builder.CreateAdd(PtrAsInt,
- llvm::ConstantInt::get(CGF.IntPtrTy, Align.getQuantity() - 1));
- PtrAsInt = CGF.Builder.CreateAnd(PtrAsInt,
- llvm::ConstantInt::get(CGF.IntPtrTy, -Align.getQuantity()));
- PtrAsInt = CGF.Builder.CreateIntToPtr(PtrAsInt,
- Ptr->getType(),
- Ptr->getName() + ".aligned");
- return PtrAsInt;
-}
-
-/// Emit va_arg for a platform using the common void* representation,
-/// where arguments are simply emitted in an array of slots on the stack.
-///
-/// This version implements the core direct-value passing rules.
-///
-/// \param SlotSize - The size and alignment of a stack slot.
-/// Each argument will be allocated to a multiple of this number of
-/// slots, and all the slots will be aligned to this value.
-/// \param AllowHigherAlign - The slot alignment is not a cap;
-/// an argument type with an alignment greater than the slot size
-/// will be emitted on a higher-alignment address, potentially
-/// leaving one or more empty slots behind as padding. If this
-/// is false, the returned address might be less-aligned than
-/// DirectAlign.
-static Address emitVoidPtrDirectVAArg(CodeGenFunction &CGF,
- Address VAListAddr,
- llvm::Type *DirectTy,
- CharUnits DirectSize,
- CharUnits DirectAlign,
- CharUnits SlotSize,
- bool AllowHigherAlign) {
- // Cast the element type to i8* if necessary. Some platforms define
- // va_list as a struct containing an i8* instead of just an i8*.
- if (VAListAddr.getElementType() != CGF.Int8PtrTy)
- VAListAddr = CGF.Builder.CreateElementBitCast(VAListAddr, CGF.Int8PtrTy);
-
- llvm::Value *Ptr = CGF.Builder.CreateLoad(VAListAddr, "argp.cur");
-
- // If the CC aligns values higher than the slot size, do so if needed.
- Address Addr = Address::invalid();
- if (AllowHigherAlign && DirectAlign > SlotSize) {
- Addr = Address(emitRoundPointerUpToAlignment(CGF, Ptr, DirectAlign),
- DirectAlign);
- } else {
- Addr = Address(Ptr, SlotSize);
- }
-
- // Advance the pointer past the argument, then store that back.
- CharUnits FullDirectSize = DirectSize.alignTo(SlotSize);
- Address NextPtr =
- CGF.Builder.CreateConstInBoundsByteGEP(Addr, FullDirectSize, "argp.next");
- CGF.Builder.CreateStore(NextPtr.getPointer(), VAListAddr);
-
- // If the argument is smaller than a slot, and this is a big-endian
- // target, the argument will be right-adjusted in its slot.
- if (DirectSize < SlotSize && CGF.CGM.getDataLayout().isBigEndian() &&
- !DirectTy->isStructTy()) {
- Addr = CGF.Builder.CreateConstInBoundsByteGEP(Addr, SlotSize - DirectSize);
- }
-
- Addr = CGF.Builder.CreateElementBitCast(Addr, DirectTy);
- return Addr;
-}
-
-/// Emit va_arg for a platform using the common void* representation,
-/// where arguments are simply emitted in an array of slots on the stack.
-///
-/// \param IsIndirect - Values of this type are passed indirectly.
-/// \param ValueInfo - The size and alignment of this type, generally
-/// computed with getContext().getTypeInfoInChars(ValueTy).
-/// \param SlotSizeAndAlign - The size and alignment of a stack slot.
-/// Each argument will be allocated to a multiple of this number of
-/// slots, and all the slots will be aligned to this value.
-/// \param AllowHigherAlign - The slot alignment is not a cap;
-/// an argument type with an alignment greater than the slot size
-/// will be emitted on a higher-alignment address, potentially
-/// leaving one or more empty slots behind as padding.
-static Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType ValueTy, bool IsIndirect,
- TypeInfoChars ValueInfo,
- CharUnits SlotSizeAndAlign,
- bool AllowHigherAlign) {
- // The size and alignment of the value that was passed directly.
- CharUnits DirectSize, DirectAlign;
- if (IsIndirect) {
- DirectSize = CGF.getPointerSize();
- DirectAlign = CGF.getPointerAlign();
- } else {
- DirectSize = ValueInfo.Width;
- DirectAlign = ValueInfo.Align;
- }
-
- // Cast the address we've calculated to the right type.
- llvm::Type *DirectTy = CGF.ConvertTypeForMem(ValueTy);
- if (IsIndirect)
- DirectTy = DirectTy->getPointerTo(0);
-
- Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, DirectTy,
- DirectSize, DirectAlign,
- SlotSizeAndAlign,
- AllowHigherAlign);
-
- if (IsIndirect) {
- Addr = Address(CGF.Builder.CreateLoad(Addr), ValueInfo.Align);
- }
-
- return Addr;
-
-}
-
-static Address emitMergePHI(CodeGenFunction &CGF,
- Address Addr1, llvm::BasicBlock *Block1,
- Address Addr2, llvm::BasicBlock *Block2,
- const llvm::Twine &Name = "") {
- assert(Addr1.getType() == Addr2.getType());
- llvm::PHINode *PHI = CGF.Builder.CreatePHI(Addr1.getType(), 2, Name);
- PHI->addIncoming(Addr1.getPointer(), Block1);
- PHI->addIncoming(Addr2.getPointer(), Block2);
- CharUnits Align = std::min(Addr1.getAlignment(), Addr2.getAlignment());
- return Address(PHI, Align);
-}
+TargetCodeGenInfo::TargetCodeGenInfo(std::unique_ptr<ABIInfo> Info)
+ : Info(std::move(Info)) {}
TargetCodeGenInfo::~TargetCodeGenInfo() = default;
@@ -412,7 +77,7 @@ unsigned TargetCodeGenInfo::getSizeOfUnwindException() const {
// Verified for:
// x86-64 FreeBSD, Linux, Darwin
// x86-32 FreeBSD, Linux, Darwin
- // PowerPC Linux, Darwin
+ // PowerPC Linux
// ARM Darwin (*not* EABI)
// AArch64 Linux
return 32;
@@ -472,7 +137,7 @@ llvm::Value *TargetCodeGenInfo::performAddrSpaceCast(
if (auto *C = dyn_cast<llvm::Constant>(Src))
return performAddrSpaceCast(CGF.CGM, C, SrcAddr, DestAddr, DestTy);
// Try to preserve the source's name to make IR more readable.
- return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ return CGF.Builder.CreateAddrSpaceCast(
Src, DestTy, Src->hasName() ? Src->getName() + ".ascast" : "");
}
@@ -493,2157 +158,9 @@ TargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &LangOpts,
return Ctx.getOrInsertSyncScopeID(""); /* default sync scope */
}
-static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays);
-
-/// isEmptyField - Return true iff a the field is "empty", that is it
-/// is an unnamed bit-field or an (array of) empty record(s).
-static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
- bool AllowArrays) {
- if (FD->isUnnamedBitfield())
- return true;
-
- QualType FT = FD->getType();
-
- // Constant arrays of empty records count as empty, strip them off.
- // Constant arrays of zero length always count as empty.
- bool WasArray = false;
- if (AllowArrays)
- while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
- if (AT->getSize() == 0)
- return true;
- FT = AT->getElementType();
- // The [[no_unique_address]] special case below does not apply to
- // arrays of C++ empty records, so we need to remember this fact.
- WasArray = true;
- }
-
- const RecordType *RT = FT->getAs<RecordType>();
- if (!RT)
- return false;
-
- // C++ record fields are never empty, at least in the Itanium ABI.
- //
- // FIXME: We should use a predicate for whether this behavior is true in the
- // current ABI.
- //
- // The exception to the above rule are fields marked with the
- // [[no_unique_address]] attribute (since C++20). Those do count as empty
- // according to the Itanium ABI. The exception applies only to records,
- // not arrays of records, so we must also check whether we stripped off an
- // array type above.
- if (isa<CXXRecordDecl>(RT->getDecl()) &&
- (WasArray || !FD->hasAttr<NoUniqueAddressAttr>()))
- return false;
-
- return isEmptyRecord(Context, FT, AllowArrays);
-}
-
-/// isEmptyRecord - Return true iff a structure contains only empty
-/// fields. Note that a structure with a flexible array member is not
-/// considered empty.
-static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) {
- const RecordType *RT = T->getAs<RecordType>();
- if (!RT)
- return false;
- const RecordDecl *RD = RT->getDecl();
- if (RD->hasFlexibleArrayMember())
- return false;
-
- // If this is a C++ record, check the bases first.
- if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
- for (const auto &I : CXXRD->bases())
- if (!isEmptyRecord(Context, I.getType(), true))
- return false;
-
- for (const auto *I : RD->fields())
- if (!isEmptyField(Context, I, AllowArrays))
- return false;
- return true;
-}
-
-/// isSingleElementStruct - Determine if a structure is a "single
-/// element struct", i.e. it has exactly one non-empty field or
-/// exactly one field which is itself a single element
-/// struct. Structures with flexible array members are never
-/// considered single element structs.
-///
-/// \return The field declaration for the single non-empty field, if
-/// it exists.
-static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
- const RecordType *RT = T->getAs<RecordType>();
- if (!RT)
- return nullptr;
-
- const RecordDecl *RD = RT->getDecl();
- if (RD->hasFlexibleArrayMember())
- return nullptr;
-
- const Type *Found = nullptr;
-
- // If this is a C++ record, check the bases first.
- if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
- for (const auto &I : CXXRD->bases()) {
- // Ignore empty records.
- if (isEmptyRecord(Context, I.getType(), true))
- continue;
-
- // If we already found an element then this isn't a single-element struct.
- if (Found)
- return nullptr;
-
- // If this is non-empty and not a single element struct, the composite
- // cannot be a single element struct.
- Found = isSingleElementStruct(I.getType(), Context);
- if (!Found)
- return nullptr;
- }
- }
-
- // Check for single element.
- for (const auto *FD : RD->fields()) {
- QualType FT = FD->getType();
-
- // Ignore empty fields.
- if (isEmptyField(Context, FD, true))
- continue;
-
- // If we already found an element then this isn't a single-element
- // struct.
- if (Found)
- return nullptr;
-
- // Treat single element arrays as the element.
- while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
- if (AT->getSize().getZExtValue() != 1)
- break;
- FT = AT->getElementType();
- }
-
- if (!isAggregateTypeForABI(FT)) {
- Found = FT.getTypePtr();
- } else {
- Found = isSingleElementStruct(FT, Context);
- if (!Found)
- return nullptr;
- }
- }
-
- // We don't consider a struct a single-element struct if it has
- // padding beyond the element type.
- if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T))
- return nullptr;
-
- return Found;
-}
-
-namespace {
-Address EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
- const ABIArgInfo &AI) {
- // This default implementation defers to the llvm backend's va_arg
- // instruction. It can handle only passing arguments directly
- // (typically only handled in the backend for primitive types), or
- // aggregates passed indirectly by pointer (NOTE: if the "byval"
- // flag has ABI impact in the callee, this implementation cannot
- // work.)
-
- // Only a few cases are covered here at the moment -- those needed
- // by the default abi.
- llvm::Value *Val;
-
- if (AI.isIndirect()) {
- assert(!AI.getPaddingType() &&
- "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
- assert(
- !AI.getIndirectRealign() &&
- "Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!");
-
- auto TyInfo = CGF.getContext().getTypeInfoInChars(Ty);
- CharUnits TyAlignForABI = TyInfo.Align;
-
- llvm::Type *BaseTy =
- llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
- llvm::Value *Addr =
- CGF.Builder.CreateVAArg(VAListAddr.getPointer(), BaseTy);
- return Address(Addr, TyAlignForABI);
- } else {
- assert((AI.isDirect() || AI.isExtend()) &&
- "Unexpected ArgInfo Kind in generic VAArg emitter!");
-
- assert(!AI.getInReg() &&
- "Unexpected InReg seen in arginfo in generic VAArg emitter!");
- assert(!AI.getPaddingType() &&
- "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
- assert(!AI.getDirectOffset() &&
- "Unexpected DirectOffset seen in arginfo in generic VAArg emitter!");
- assert(!AI.getCoerceToType() &&
- "Unexpected CoerceToType seen in arginfo in generic VAArg emitter!");
-
- Address Temp = CGF.CreateMemTemp(Ty, "varet");
- Val = CGF.Builder.CreateVAArg(VAListAddr.getPointer(), CGF.ConvertType(Ty));
- CGF.Builder.CreateStore(Val, Temp);
- return Temp;
- }
-}
-
-/// DefaultABIInfo - The default implementation for ABI specific
-/// details. This implementation provides information which results in
-/// self-consistent and sensible LLVM IR generation, but does not
-/// conform to any particular ABI.
-class DefaultABIInfo : public ABIInfo {
-public:
- DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
-
- ABIArgInfo classifyReturnType(QualType RetTy) const;
- ABIArgInfo classifyArgumentType(QualType RetTy) const;
-
- void computeInfo(CGFunctionInfo &FI) const override {
- if (!getCXXABI().classifyReturnType(FI))
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
- for (auto &I : FI.arguments())
- I.info = classifyArgumentType(I.type);
- }
-
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override {
- return EmitVAArgInstr(CGF, VAListAddr, Ty, classifyArgumentType(Ty));
- }
-};
-
-class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
-public:
- DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
- : TargetCodeGenInfo(std::make_unique<DefaultABIInfo>(CGT)) {}
-};
-
-ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
- Ty = useFirstFieldIfTransparentUnion(Ty);
-
- if (isAggregateTypeForABI(Ty)) {
- // Records with non-trivial destructors/copy-constructors should not be
- // passed by value.
- if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
- return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
-
- return getNaturalAlignIndirect(Ty);
- }
-
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
-
- ASTContext &Context = getContext();
- if (const auto *EIT = Ty->getAs<ExtIntType>())
- if (EIT->getNumBits() >
- Context.getTypeSize(Context.getTargetInfo().hasInt128Type()
- ? Context.Int128Ty
- : Context.LongLongTy))
- return getNaturalAlignIndirect(Ty);
-
- return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
- : ABIArgInfo::getDirect());
-}
-
-ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
- if (RetTy->isVoidType())
- return ABIArgInfo::getIgnore();
-
- if (isAggregateTypeForABI(RetTy))
- return getNaturalAlignIndirect(RetTy);
-
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
- RetTy = EnumTy->getDecl()->getIntegerType();
-
- if (const auto *EIT = RetTy->getAs<ExtIntType>())
- if (EIT->getNumBits() >
- getContext().getTypeSize(getContext().getTargetInfo().hasInt128Type()
- ? getContext().Int128Ty
- : getContext().LongLongTy))
- return getNaturalAlignIndirect(RetTy);
-
- return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
- : ABIArgInfo::getDirect());
-}
-
-//===----------------------------------------------------------------------===//
-// WebAssembly ABI Implementation
-//
-// This is a very simple ABI that relies a lot on DefaultABIInfo.
-//===----------------------------------------------------------------------===//
-
-class WebAssemblyABIInfo final : public SwiftABIInfo {
-public:
- enum ABIKind {
- MVP = 0,
- ExperimentalMV = 1,
- };
-
-private:
- DefaultABIInfo defaultInfo;
- ABIKind Kind;
-
-public:
- explicit WebAssemblyABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind)
- : SwiftABIInfo(CGT), defaultInfo(CGT), Kind(Kind) {}
-
-private:
- ABIArgInfo classifyReturnType(QualType RetTy) const;
- ABIArgInfo classifyArgumentType(QualType Ty) const;
-
- // DefaultABIInfo's classifyReturnType and classifyArgumentType are
- // non-virtual, but computeInfo and EmitVAArg are virtual, so we
- // overload them.
- void computeInfo(CGFunctionInfo &FI) const override {
- if (!getCXXABI().classifyReturnType(FI))
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
- for (auto &Arg : FI.arguments())
- Arg.info = classifyArgumentType(Arg.type);
- }
-
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
-
- bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
- bool asReturnValue) const override {
- return occupiesMoreThan(CGT, scalars, /*total*/ 4);
- }
-
- bool isSwiftErrorInRegister() const override {
- return false;
- }
-};
-
-class WebAssemblyTargetCodeGenInfo final : public TargetCodeGenInfo {
-public:
- explicit WebAssemblyTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
- WebAssemblyABIInfo::ABIKind K)
- : TargetCodeGenInfo(std::make_unique<WebAssemblyABIInfo>(CGT, K)) {}
-
- void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &CGM) const override {
- TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
- if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
- if (const auto *Attr = FD->getAttr<WebAssemblyImportModuleAttr>()) {
- llvm::Function *Fn = cast<llvm::Function>(GV);
- llvm::AttrBuilder B;
- B.addAttribute("wasm-import-module", Attr->getImportModule());
- Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
- }
- if (const auto *Attr = FD->getAttr<WebAssemblyImportNameAttr>()) {
- llvm::Function *Fn = cast<llvm::Function>(GV);
- llvm::AttrBuilder B;
- B.addAttribute("wasm-import-name", Attr->getImportName());
- Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
- }
- if (const auto *Attr = FD->getAttr<WebAssemblyExportNameAttr>()) {
- llvm::Function *Fn = cast<llvm::Function>(GV);
- llvm::AttrBuilder B;
- B.addAttribute("wasm-export-name", Attr->getExportName());
- Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
- }
- }
-
- if (auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
- llvm::Function *Fn = cast<llvm::Function>(GV);
- if (!FD->doesThisDeclarationHaveABody() && !FD->hasPrototype())
- Fn->addFnAttr("no-prototype");
- }
- }
-};
-
-/// Classify argument of given type \p Ty.
-ABIArgInfo WebAssemblyABIInfo::classifyArgumentType(QualType Ty) const {
- Ty = useFirstFieldIfTransparentUnion(Ty);
-
- if (isAggregateTypeForABI(Ty)) {
- // Records with non-trivial destructors/copy-constructors should not be
- // passed by value.
- if (auto RAA = getRecordArgABI(Ty, getCXXABI()))
- return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
- // Ignore empty structs/unions.
- if (isEmptyRecord(getContext(), Ty, true))
- return ABIArgInfo::getIgnore();
- // Lower single-element structs to just pass a regular value. TODO: We
- // could do reasonable-size multiple-element structs too, using getExpand(),
- // though watch out for things like bitfields.
- if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
- return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
- // For the experimental multivalue ABI, fully expand all other aggregates
- if (Kind == ABIKind::ExperimentalMV) {
- const RecordType *RT = Ty->getAs<RecordType>();
- assert(RT);
- bool HasBitField = false;
- for (auto *Field : RT->getDecl()->fields()) {
- if (Field->isBitField()) {
- HasBitField = true;
- break;
- }
- }
- if (!HasBitField)
- return ABIArgInfo::getExpand();
- }
- }
-
- // Otherwise just do the default thing.
- return defaultInfo.classifyArgumentType(Ty);
-}
-
-ABIArgInfo WebAssemblyABIInfo::classifyReturnType(QualType RetTy) const {
- if (isAggregateTypeForABI(RetTy)) {
- // Records with non-trivial destructors/copy-constructors should not be
- // returned by value.
- if (!getRecordArgABI(RetTy, getCXXABI())) {
- // Ignore empty structs/unions.
- if (isEmptyRecord(getContext(), RetTy, true))
- return ABIArgInfo::getIgnore();
- // Lower single-element structs to just return a regular value. TODO: We
- // could do reasonable-size multiple-element structs too, using
- // ABIArgInfo::getDirect().
- if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
- return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
- // For the experimental multivalue ABI, return all other aggregates
- if (Kind == ABIKind::ExperimentalMV)
- return ABIArgInfo::getDirect();
- }
- }
-
- // Otherwise just do the default thing.
- return defaultInfo.classifyReturnType(RetTy);
-}
-
-Address WebAssemblyABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
- bool IsIndirect = isAggregateTypeForABI(Ty) &&
- !isEmptyRecord(getContext(), Ty, true) &&
- !isSingleElementStruct(Ty, getContext());
- return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
- getContext().getTypeInfoInChars(Ty),
- CharUnits::fromQuantity(4),
- /*AllowHigherAlign=*/true);
-}
-
-//===----------------------------------------------------------------------===//
-// le32/PNaCl bitcode ABI Implementation
-//
-// This is a simplified version of the x86_32 ABI. Arguments and return values
-// are always passed on the stack.
-//===----------------------------------------------------------------------===//
-
-class PNaClABIInfo : public ABIInfo {
- public:
- PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
-
- ABIArgInfo classifyReturnType(QualType RetTy) const;
- ABIArgInfo classifyArgumentType(QualType RetTy) const;
-
- void computeInfo(CGFunctionInfo &FI) const override;
- Address EmitVAArg(CodeGenFunction &CGF,
- Address VAListAddr, QualType Ty) const override;
-};
-
-class PNaClTargetCodeGenInfo : public TargetCodeGenInfo {
- public:
- PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
- : TargetCodeGenInfo(std::make_unique<PNaClABIInfo>(CGT)) {}
-};
-
-void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const {
- if (!getCXXABI().classifyReturnType(FI))
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
-
- for (auto &I : FI.arguments())
- I.info = classifyArgumentType(I.type);
-}
-
-Address PNaClABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
- // The PNaCL ABI is a bit odd, in that varargs don't use normal
- // function classification. Structs get passed directly for varargs
- // functions, through a rewriting transform in
- // pnacl-llvm/lib/Transforms/NaCl/ExpandVarArgs.cpp, which allows
- // this target to actually support a va_arg instructions with an
- // aggregate type, unlike other targets.
- return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect());
-}
-
-/// Classify argument of given type \p Ty.
-ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const {
- if (isAggregateTypeForABI(Ty)) {
- if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
- return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
- return getNaturalAlignIndirect(Ty);
- } else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
- // Treat an enum type as its underlying type.
- Ty = EnumTy->getDecl()->getIntegerType();
- } else if (Ty->isFloatingType()) {
- // Floating-point types don't go inreg.
- return ABIArgInfo::getDirect();
- } else if (const auto *EIT = Ty->getAs<ExtIntType>()) {
- // Treat extended integers as integers if <=64, otherwise pass indirectly.
- if (EIT->getNumBits() > 64)
- return getNaturalAlignIndirect(Ty);
- return ABIArgInfo::getDirect();
- }
-
- return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
- : ABIArgInfo::getDirect());
-}
-
-ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const {
- if (RetTy->isVoidType())
- return ABIArgInfo::getIgnore();
-
- // In the PNaCl ABI we always return records/structures on the stack.
- if (isAggregateTypeForABI(RetTy))
- return getNaturalAlignIndirect(RetTy);
-
- // Treat extended integers as integers if <=64, otherwise pass indirectly.
- if (const auto *EIT = RetTy->getAs<ExtIntType>()) {
- if (EIT->getNumBits() > 64)
- return getNaturalAlignIndirect(RetTy);
- return ABIArgInfo::getDirect();
- }
-
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
- RetTy = EnumTy->getDecl()->getIntegerType();
-
- return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
- : ABIArgInfo::getDirect());
-}
-
-/// IsX86_MMXType - Return true if this is an MMX type.
-bool IsX86_MMXType(llvm::Type *IRType) {
- // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>.
- return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
- cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
- IRType->getScalarSizeInBits() != 64;
-}
-
-static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
- StringRef Constraint,
- llvm::Type* Ty) {
- bool IsMMXCons = llvm::StringSwitch<bool>(Constraint)
- .Cases("y", "&y", "^Ym", true)
- .Default(false);
- if (IsMMXCons && Ty->isVectorTy()) {
- if (cast<llvm::VectorType>(Ty)->getPrimitiveSizeInBits().getFixedSize() !=
- 64) {
- // Invalid MMX constraint
- return nullptr;
- }
-
- return llvm::Type::getX86_MMXTy(CGF.getLLVMContext());
- }
-
- // No operation needed
- return Ty;
-}
-
-/// Returns true if this type can be passed in SSE registers with the
-/// X86_VectorCall calling convention. Shared between x86_32 and x86_64.
-static bool isX86VectorTypeForVectorCall(ASTContext &Context, QualType Ty) {
- if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
- if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half) {
- if (BT->getKind() == BuiltinType::LongDouble) {
- if (&Context.getTargetInfo().getLongDoubleFormat() ==
- &llvm::APFloat::x87DoubleExtended())
- return false;
- }
- return true;
- }
- } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
- // vectorcall can pass XMM, YMM, and ZMM vectors. We don't pass SSE1 MMX
- // registers specially.
- unsigned VecSize = Context.getTypeSize(VT);
- if (VecSize == 128 || VecSize == 256 || VecSize == 512)
- return true;
- }
- return false;
-}
-
-/// Returns true if this aggregate is small enough to be passed in SSE registers
-/// in the X86_VectorCall calling convention. Shared between x86_32 and x86_64.
-static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) {
- return NumMembers <= 4;
-}
-
-/// Returns a Homogeneous Vector Aggregate ABIArgInfo, used in X86.
-static ABIArgInfo getDirectX86Hva(llvm::Type* T = nullptr) {
- auto AI = ABIArgInfo::getDirect(T);
- AI.setInReg(true);
- AI.setCanBeFlattened(false);
- return AI;
-}
-
-//===----------------------------------------------------------------------===//
-// X86-32 ABI Implementation
-//===----------------------------------------------------------------------===//
-
-/// Similar to llvm::CCState, but for Clang.
-struct CCState {
- CCState(CGFunctionInfo &FI)
- : IsPreassigned(FI.arg_size()), CC(FI.getCallingConvention()) {}
-
- llvm::SmallBitVector IsPreassigned;
- unsigned CC = CallingConv::CC_C;
- unsigned FreeRegs = 0;
- unsigned FreeSSERegs = 0;
-};
-
-/// X86_32ABIInfo - The X86-32 ABI information.
-class X86_32ABIInfo : public SwiftABIInfo {
- enum Class {
- Integer,
- Float
- };
-
- static const unsigned MinABIStackAlignInBytes = 4;
-
- bool IsDarwinVectorABI;
- bool IsRetSmallStructInRegABI;
- bool IsWin32StructABI;
- bool IsSoftFloatABI;
- bool IsMCUABI;
- bool IsLinuxABI;
- unsigned DefaultNumRegisterParameters;
-
- static bool isRegisterSize(unsigned Size) {
- return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
- }
-
- bool isHomogeneousAggregateBaseType(QualType Ty) const override {
- // FIXME: Assumes vectorcall is in use.
- return isX86VectorTypeForVectorCall(getContext(), Ty);
- }
-
- bool isHomogeneousAggregateSmallEnough(const Type *Ty,
- uint64_t NumMembers) const override {
- // FIXME: Assumes vectorcall is in use.
- return isX86VectorCallAggregateSmallEnough(NumMembers);
- }
-
- bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context) const;
-
- /// getIndirectResult - Give a source type \arg Ty, return a suitable result
- /// such that the argument will be passed in memory.
- ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
-
- ABIArgInfo getIndirectReturnResult(QualType Ty, CCState &State) const;
-
- /// Return the alignment to use for the given type on the stack.
- unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;
-
- Class classify(QualType Ty) const;
- ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const;
- ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
-
- /// Updates the number of available free registers, returns
- /// true if any registers were allocated.
- bool updateFreeRegs(QualType Ty, CCState &State) const;
-
- bool shouldAggregateUseDirect(QualType Ty, CCState &State, bool &InReg,
- bool &NeedsPadding) const;
- bool shouldPrimitiveUseInReg(QualType Ty, CCState &State) const;
-
- bool canExpandIndirectArgument(QualType Ty) const;
-
- /// Rewrite the function info so that all memory arguments use
- /// inalloca.
- void rewriteWithInAlloca(CGFunctionInfo &FI) const;
-
- void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
- CharUnits &StackOffset, ABIArgInfo &Info,
- QualType Type) const;
- void runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State) const;
-
-public:
-
- void computeInfo(CGFunctionInfo &FI) const override;
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
-
- X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
- bool RetSmallStructInRegABI, bool Win32StructABI,
- unsigned NumRegisterParameters, bool SoftFloatABI)
- : SwiftABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI),
- IsRetSmallStructInRegABI(RetSmallStructInRegABI),
- IsWin32StructABI(Win32StructABI), IsSoftFloatABI(SoftFloatABI),
- IsMCUABI(CGT.getTarget().getTriple().isOSIAMCU()),
- IsLinuxABI(CGT.getTarget().getTriple().isOSLinux()),
- DefaultNumRegisterParameters(NumRegisterParameters) {}
-
- bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
- bool asReturnValue) const override {
- // LLVM's x86-32 lowering currently only assigns up to three
- // integer registers and three fp registers. Oddly, it'll use up to
- // four vector registers for vectors, but those can overlap with the
- // scalar registers.
- return occupiesMoreThan(CGT, scalars, /*total*/ 3);
- }
-
- bool isSwiftErrorInRegister() const override {
- // x86-32 lowering does not support passing swifterror in a register.
- return false;
- }
-};
-
-class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
-public:
- X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
- bool RetSmallStructInRegABI, bool Win32StructABI,
- unsigned NumRegisterParameters, bool SoftFloatABI)
- : TargetCodeGenInfo(std::make_unique<X86_32ABIInfo>(
- CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
- NumRegisterParameters, SoftFloatABI)) {}
-
- static bool isStructReturnInRegABI(
- const llvm::Triple &Triple, const CodeGenOptions &Opts);
-
- void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &CGM) const override;
-
- int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
- // Darwin uses different dwarf register numbers for EH.
- if (CGM.getTarget().getTriple().isOSDarwin()) return 5;
- return 4;
- }
-
- bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
- llvm::Value *Address) const override;
-
- llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
- StringRef Constraint,
- llvm::Type* Ty) const override {
- return X86AdjustInlineAsmType(CGF, Constraint, Ty);
- }
-
- void addReturnRegisterOutputs(CodeGenFunction &CGF, LValue ReturnValue,
- std::string &Constraints,
- std::vector<llvm::Type *> &ResultRegTypes,
- std::vector<llvm::Type *> &ResultTruncRegTypes,
- std::vector<LValue> &ResultRegDests,
- std::string &AsmString,
- unsigned NumOutputs) const override;
-
- llvm::Constant *
- getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
- unsigned Sig = (0xeb << 0) | // jmp rel8
- (0x06 << 8) | // .+0x08
- ('v' << 16) |
- ('2' << 24);
- return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
- }
-
- StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
- return "movl\t%ebp, %ebp"
- "\t\t// marker for objc_retainAutoreleaseReturnValue";
- }
-};
-
-}
-
-/// Rewrite input constraint references after adding some output constraints.
-/// In the case where there is one output and one input and we add one output,
-/// we need to replace all operand references greater than or equal to 1:
-/// mov $0, $1
-/// mov eax, $1
-/// The result will be:
-/// mov $0, $2
-/// mov eax, $2
-static void rewriteInputConstraintReferences(unsigned FirstIn,
- unsigned NumNewOuts,
- std::string &AsmString) {
- std::string Buf;
- llvm::raw_string_ostream OS(Buf);
- size_t Pos = 0;
- while (Pos < AsmString.size()) {
- size_t DollarStart = AsmString.find('$', Pos);
- if (DollarStart == std::string::npos)
- DollarStart = AsmString.size();
- size_t DollarEnd = AsmString.find_first_not_of('$', DollarStart);
- if (DollarEnd == std::string::npos)
- DollarEnd = AsmString.size();
- OS << StringRef(&AsmString[Pos], DollarEnd - Pos);
- Pos = DollarEnd;
- size_t NumDollars = DollarEnd - DollarStart;
- if (NumDollars % 2 != 0 && Pos < AsmString.size()) {
- // We have an operand reference.
- size_t DigitStart = Pos;
- if (AsmString[DigitStart] == '{') {
- OS << '{';
- ++DigitStart;
- }
- size_t DigitEnd = AsmString.find_first_not_of("0123456789", DigitStart);
- if (DigitEnd == std::string::npos)
- DigitEnd = AsmString.size();
- StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);
- unsigned OperandIndex;
- if (!OperandStr.getAsInteger(10, OperandIndex)) {
- if (OperandIndex >= FirstIn)
- OperandIndex += NumNewOuts;
- OS << OperandIndex;
- } else {
- OS << OperandStr;
- }
- Pos = DigitEnd;
- }
- }
- AsmString = std::move(OS.str());
-}
-
-/// Add output constraints for EAX:EDX because they are return registers.
-void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
- CodeGenFunction &CGF, LValue ReturnSlot, std::string &Constraints,
- std::vector<llvm::Type *> &ResultRegTypes,
- std::vector<llvm::Type *> &ResultTruncRegTypes,
- std::vector<LValue> &ResultRegDests, std::string &AsmString,
- unsigned NumOutputs) const {
- uint64_t RetWidth = CGF.getContext().getTypeSize(ReturnSlot.getType());
-
- // Use the EAX constraint if the width is 32 or smaller and EAX:EDX if it is
- // larger.
- if (!Constraints.empty())
- Constraints += ',';
- if (RetWidth <= 32) {
- Constraints += "={eax}";
- ResultRegTypes.push_back(CGF.Int32Ty);
- } else {
- // Use the 'A' constraint for EAX:EDX.
- Constraints += "=A";
- ResultRegTypes.push_back(CGF.Int64Ty);
- }
-
- // Truncate EAX or EAX:EDX to an integer of the appropriate size.
- llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.getLLVMContext(), RetWidth);
- ResultTruncRegTypes.push_back(CoerceTy);
-
- // Coerce the integer by bitcasting the return slot pointer.
- ReturnSlot.setAddress(CGF.Builder.CreateBitCast(ReturnSlot.getAddress(CGF),
- CoerceTy->getPointerTo()));
- ResultRegDests.push_back(ReturnSlot);
-
- rewriteInputConstraintReferences(NumOutputs, 1, AsmString);
-}
-
-/// shouldReturnTypeInRegister - Determine if the given type should be
-/// returned in a register (for the Darwin and MCU ABI).
-bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
- ASTContext &Context) const {
- uint64_t Size = Context.getTypeSize(Ty);
-
- // For i386, type must be register sized.
- // For the MCU ABI, it only needs to be <= 8-byte
- if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size)))
- return false;
-
- if (Ty->isVectorType()) {
- // 64- and 128- bit vectors inside structures are not returned in
- // registers.
- if (Size == 64 || Size == 128)
- return false;
-
- return true;
- }
-
- // If this is a builtin, pointer, enum, complex type, member pointer, or
- // member function pointer it is ok.
- if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() ||
- Ty->isAnyComplexType() || Ty->isEnumeralType() ||
- Ty->isBlockPointerType() || Ty->isMemberPointerType())
- return true;
-
- // Arrays are treated like records.
- if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
- return shouldReturnTypeInRegister(AT->getElementType(), Context);
-
- // Otherwise, it must be a record type.
- const RecordType *RT = Ty->getAs<RecordType>();
- if (!RT) return false;
-
- // FIXME: Traverse bases here too.
-
- // Structure types are passed in register if all fields would be
- // passed in a register.
- for (const auto *FD : RT->getDecl()->fields()) {
- // Empty fields are ignored.
- if (isEmptyField(Context, FD, true))
- continue;
-
- // Check fields recursively.
- if (!shouldReturnTypeInRegister(FD->getType(), Context))
- return false;
- }
- return true;
-}
-
-static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
- // Treat complex types as the element type.
- if (const ComplexType *CTy = Ty->getAs<ComplexType>())
- Ty = CTy->getElementType();
-
- // Check for a type which we know has a simple scalar argument-passing
- // convention without any padding. (We're specifically looking for 32
- // and 64-bit integer and integer-equivalents, float, and double.)
- if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() &&
- !Ty->isEnumeralType() && !Ty->isBlockPointerType())
- return false;
-
- uint64_t Size = Context.getTypeSize(Ty);
- return Size == 32 || Size == 64;
-}
-
-static bool addFieldSizes(ASTContext &Context, const RecordDecl *RD,
- uint64_t &Size) {
- for (const auto *FD : RD->fields()) {
- // Scalar arguments on the stack get 4 byte alignment on x86. If the
- // argument is smaller than 32-bits, expanding the struct will create
- // alignment padding.
- if (!is32Or64BitBasicType(FD->getType(), Context))
- return false;
-
- // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
- // how to expand them yet, and the predicate for telling if a bitfield still
- // counts as "basic" is more complicated than what we were doing previously.
- if (FD->isBitField())
- return false;
-
- Size += Context.getTypeSize(FD->getType());
- }
- return true;
-}
-
-static bool addBaseAndFieldSizes(ASTContext &Context, const CXXRecordDecl *RD,
- uint64_t &Size) {
- // Don't do this if there are any non-empty bases.
- for (const CXXBaseSpecifier &Base : RD->bases()) {
- if (!addBaseAndFieldSizes(Context, Base.getType()->getAsCXXRecordDecl(),
- Size))
- return false;
- }
- if (!addFieldSizes(Context, RD, Size))
- return false;
- return true;
-}
-
-/// Test whether an argument type which is to be passed indirectly (on the
-/// stack) would have the equivalent layout if it was expanded into separate
-/// arguments. If so, we prefer to do the latter to avoid inhibiting
-/// optimizations.
-bool X86_32ABIInfo::canExpandIndirectArgument(QualType Ty) const {
- // We can only expand structure types.
- const RecordType *RT = Ty->getAs<RecordType>();
- if (!RT)
- return false;
- const RecordDecl *RD = RT->getDecl();
- uint64_t Size = 0;
- if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
- if (!IsWin32StructABI) {
- // On non-Windows, we have to conservatively match our old bitcode
- // prototypes in order to be ABI-compatible at the bitcode level.
- if (!CXXRD->isCLike())
- return false;
- } else {
- // Don't do this for dynamic classes.
- if (CXXRD->isDynamicClass())
- return false;
- }
- if (!addBaseAndFieldSizes(getContext(), CXXRD, Size))
- return false;
- } else {
- if (!addFieldSizes(getContext(), RD, Size))
- return false;
- }
-
- // We can do this if there was no alignment padding.
- return Size == getContext().getTypeSize(Ty);
-}
-
-ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(QualType RetTy, CCState &State) const {
- // If the return value is indirect, then the hidden argument is consuming one
- // integer register.
- if (State.FreeRegs) {
- --State.FreeRegs;
- if (!IsMCUABI)
- return getNaturalAlignIndirectInReg(RetTy);
- }
- return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
-}
-
-ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
- CCState &State) const {
- if (RetTy->isVoidType())
- return ABIArgInfo::getIgnore();
-
- const Type *Base = nullptr;
- uint64_t NumElts = 0;
- if ((State.CC == llvm::CallingConv::X86_VectorCall ||
- State.CC == llvm::CallingConv::X86_RegCall) &&
- isHomogeneousAggregate(RetTy, Base, NumElts)) {
- // The LLVM struct type for such an aggregate should lower properly.
- return ABIArgInfo::getDirect();
- }
-
- if (const VectorType *VT = RetTy->getAs<VectorType>()) {
- // On Darwin, some vectors are returned in registers.
- if (IsDarwinVectorABI) {
- uint64_t Size = getContext().getTypeSize(RetTy);
-
- // 128-bit vectors are a special case; they are returned in
- // registers and we need to make sure to pick a type the LLVM
- // backend will like.
- if (Size == 128)
- return ABIArgInfo::getDirect(llvm::FixedVectorType::get(
- llvm::Type::getInt64Ty(getVMContext()), 2));
-
- // Always return in register if it fits in a general purpose
- // register, or if it is 64 bits and has a single element.
- if ((Size == 8 || Size == 16 || Size == 32) ||
- (Size == 64 && VT->getNumElements() == 1))
- return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
- Size));
-
- return getIndirectReturnResult(RetTy, State);
- }
-
- return ABIArgInfo::getDirect();
- }
-
- if (isAggregateTypeForABI(RetTy)) {
- if (const RecordType *RT = RetTy->getAs<RecordType>()) {
- // Structures with flexible arrays are always indirect.
- if (RT->getDecl()->hasFlexibleArrayMember())
- return getIndirectReturnResult(RetTy, State);
- }
-
- // If specified, structs and unions are always indirect.
- if (!IsRetSmallStructInRegABI && !RetTy->isAnyComplexType())
- return getIndirectReturnResult(RetTy, State);
-
- // Ignore empty structs/unions.
- if (isEmptyRecord(getContext(), RetTy, true))
- return ABIArgInfo::getIgnore();
-
- // Small structures which are register sized are generally returned
- // in a register.
- if (shouldReturnTypeInRegister(RetTy, getContext())) {
- uint64_t Size = getContext().getTypeSize(RetTy);
-
- // As a special-case, if the struct is a "single-element" struct, and
- // the field is of type "float" or "double", return it in a
- // floating-point register. (MSVC does not apply this special case.)
- // We apply a similar transformation for pointer types to improve the
- // quality of the generated IR.
- if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
- if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
- || SeltTy->hasPointerRepresentation())
- return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
-
- // FIXME: We should be able to narrow this integer in cases with dead
- // padding.
- return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size));
- }
-
- return getIndirectReturnResult(RetTy, State);
- }
-
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
- RetTy = EnumTy->getDecl()->getIntegerType();
-
- if (const auto *EIT = RetTy->getAs<ExtIntType>())
- if (EIT->getNumBits() > 64)
- return getIndirectReturnResult(RetTy, State);
-
- return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
- : ABIArgInfo::getDirect());
-}
-
-static bool isSIMDVectorType(ASTContext &Context, QualType Ty) {
- return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128;
-}
-
-static bool isRecordWithSIMDVectorType(ASTContext &Context, QualType Ty) {
- const RecordType *RT = Ty->getAs<RecordType>();
- if (!RT)
- return 0;
- const RecordDecl *RD = RT->getDecl();
-
- // If this is a C++ record, check the bases first.
- if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
- for (const auto &I : CXXRD->bases())
- if (!isRecordWithSIMDVectorType(Context, I.getType()))
- return false;
-
- for (const auto *i : RD->fields()) {
- QualType FT = i->getType();
-
- if (isSIMDVectorType(Context, FT))
- return true;
-
- if (isRecordWithSIMDVectorType(Context, FT))
- return true;
- }
-
- return false;
-}
-
-unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
- unsigned Align) const {
- // Otherwise, if the alignment is less than or equal to the minimum ABI
- // alignment, just use the default; the backend will handle this.
- if (Align <= MinABIStackAlignInBytes)
- return 0; // Use default alignment.
-
- if (IsLinuxABI) {
- // Exclude other System V OS (e.g Darwin, PS4 and FreeBSD) since we don't
- // want to spend any effort dealing with the ramifications of ABI breaks.
- //
- // If the vector type is __m128/__m256/__m512, return the default alignment.
- if (Ty->isVectorType() && (Align == 16 || Align == 32 || Align == 64))
- return Align;
- }
- // On non-Darwin, the stack type alignment is always 4.
- if (!IsDarwinVectorABI) {
- // Set explicit alignment, since we may need to realign the top.
- return MinABIStackAlignInBytes;
- }
-
- // Otherwise, if the type contains an SSE vector type, the alignment is 16.
- if (Align >= 16 && (isSIMDVectorType(getContext(), Ty) ||
- isRecordWithSIMDVectorType(getContext(), Ty)))
- return 16;
-
- return MinABIStackAlignInBytes;
-}
-
-ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal,
- CCState &State) const {
- if (!ByVal) {
- if (State.FreeRegs) {
- --State.FreeRegs; // Non-byval indirects just use one pointer.
- if (!IsMCUABI)
- return getNaturalAlignIndirectInReg(Ty);
- }
- return getNaturalAlignIndirect(Ty, false);
- }
-
- // Compute the byval alignment.
- unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
- unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
- if (StackAlign == 0)
- return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true);
-
- // If the stack alignment is less than the type alignment, realign the
- // argument.
- bool Realign = TypeAlign > StackAlign;
- return ABIArgInfo::getIndirect(CharUnits::fromQuantity(StackAlign),
- /*ByVal=*/true, Realign);
-}
-
-X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const {
- const Type *T = isSingleElementStruct(Ty, getContext());
- if (!T)
- T = Ty.getTypePtr();
-
- if (const BuiltinType *BT = T->getAs<BuiltinType>()) {
- BuiltinType::Kind K = BT->getKind();
- if (K == BuiltinType::Float || K == BuiltinType::Double)
- return Float;
- }
- return Integer;
-}
-
-bool X86_32ABIInfo::updateFreeRegs(QualType Ty, CCState &State) const {
- if (!IsSoftFloatABI) {
- Class C = classify(Ty);
- if (C == Float)
- return false;
- }
-
- unsigned Size = getContext().getTypeSize(Ty);
- unsigned SizeInRegs = (Size + 31) / 32;
-
- if (SizeInRegs == 0)
- return false;
-
- if (!IsMCUABI) {
- if (SizeInRegs > State.FreeRegs) {
- State.FreeRegs = 0;
- return false;
- }
- } else {
- // The MCU psABI allows passing parameters in-reg even if there are
- // earlier parameters that are passed on the stack. Also,
- // it does not allow passing >8-byte structs in-register,
- // even if there are 3 free registers available.
- if (SizeInRegs > State.FreeRegs || SizeInRegs > 2)
- return false;
- }
-
- State.FreeRegs -= SizeInRegs;
- return true;
-}
-
-bool X86_32ABIInfo::shouldAggregateUseDirect(QualType Ty, CCState &State,
- bool &InReg,
- bool &NeedsPadding) const {
- // On Windows, aggregates other than HFAs are never passed in registers, and
- // they do not consume register slots. Homogenous floating-point aggregates
- // (HFAs) have already been dealt with at this point.
- if (IsWin32StructABI && isAggregateTypeForABI(Ty))
- return false;
-
- NeedsPadding = false;
- InReg = !IsMCUABI;
-
- if (!updateFreeRegs(Ty, State))
- return false;
-
- if (IsMCUABI)
- return true;
-
- if (State.CC == llvm::CallingConv::X86_FastCall ||
- State.CC == llvm::CallingConv::X86_VectorCall ||
- State.CC == llvm::CallingConv::X86_RegCall) {
- if (getContext().getTypeSize(Ty) <= 32 && State.FreeRegs)
- NeedsPadding = true;
-
- return false;
- }
-
- return true;
-}
-
-bool X86_32ABIInfo::shouldPrimitiveUseInReg(QualType Ty, CCState &State) const {
- if (!updateFreeRegs(Ty, State))
- return false;
-
- if (IsMCUABI)
- return false;
-
- if (State.CC == llvm::CallingConv::X86_FastCall ||
- State.CC == llvm::CallingConv::X86_VectorCall ||
- State.CC == llvm::CallingConv::X86_RegCall) {
- if (getContext().getTypeSize(Ty) > 32)
- return false;
-
- return (Ty->isIntegralOrEnumerationType() || Ty->isPointerType() ||
- Ty->isReferenceType());
- }
-
- return true;
-}
-
-void X86_32ABIInfo::runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State) const {
- // Vectorcall x86 works subtly different than in x64, so the format is
- // a bit different than the x64 version. First, all vector types (not HVAs)
- // are assigned, with the first 6 ending up in the [XYZ]MM0-5 registers.
- // This differs from the x64 implementation, where the first 6 by INDEX get
- // registers.
- // In the second pass over the arguments, HVAs are passed in the remaining
- // vector registers if possible, or indirectly by address. The address will be
- // passed in ECX/EDX if available. Any other arguments are passed according to
- // the usual fastcall rules.
- MutableArrayRef<CGFunctionInfoArgInfo> Args = FI.arguments();
- for (int I = 0, E = Args.size(); I < E; ++I) {
- const Type *Base = nullptr;
- uint64_t NumElts = 0;
- const QualType &Ty = Args[I].type;
- if ((Ty->isVectorType() || Ty->isBuiltinType()) &&
- isHomogeneousAggregate(Ty, Base, NumElts)) {
- if (State.FreeSSERegs >= NumElts) {
- State.FreeSSERegs -= NumElts;
- Args[I].info = ABIArgInfo::getDirectInReg();
- State.IsPreassigned.set(I);
- }
- }
- }
-}
-
-ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
- CCState &State) const {
- // FIXME: Set alignment on indirect arguments.
- bool IsFastCall = State.CC == llvm::CallingConv::X86_FastCall;
- bool IsRegCall = State.CC == llvm::CallingConv::X86_RegCall;
- bool IsVectorCall = State.CC == llvm::CallingConv::X86_VectorCall;
-
- Ty = useFirstFieldIfTransparentUnion(Ty);
- TypeInfo TI = getContext().getTypeInfo(Ty);
-
- // Check with the C++ ABI first.
- const RecordType *RT = Ty->getAs<RecordType>();
- if (RT) {
- CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
- if (RAA == CGCXXABI::RAA_Indirect) {
- return getIndirectResult(Ty, false, State);
- } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
- // The field index doesn't matter, we'll fix it up later.
- return ABIArgInfo::getInAlloca(/*FieldIndex=*/0);
- }
- }
-
- // Regcall uses the concept of a homogenous vector aggregate, similar
- // to other targets.
- const Type *Base = nullptr;
- uint64_t NumElts = 0;
- if ((IsRegCall || IsVectorCall) &&
- isHomogeneousAggregate(Ty, Base, NumElts)) {
- if (State.FreeSSERegs >= NumElts) {
- State.FreeSSERegs -= NumElts;
-
- // Vectorcall passes HVAs directly and does not flatten them, but regcall
- // does.
- if (IsVectorCall)
- return getDirectX86Hva();
-
- if (Ty->isBuiltinType() || Ty->isVectorType())
- return ABIArgInfo::getDirect();
- return ABIArgInfo::getExpand();
- }
- return getIndirectResult(Ty, /*ByVal=*/false, State);
- }
-
- if (isAggregateTypeForABI(Ty)) {
- // Structures with flexible arrays are always indirect.
- // FIXME: This should not be byval!
- if (RT && RT->getDecl()->hasFlexibleArrayMember())
- return getIndirectResult(Ty, true, State);
-
- // Ignore empty structs/unions on non-Windows.
- if (!IsWin32StructABI && isEmptyRecord(getContext(), Ty, true))
- return ABIArgInfo::getIgnore();
-
- llvm::LLVMContext &LLVMContext = getVMContext();
- llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
- bool NeedsPadding = false;
- bool InReg;
- if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) {
- unsigned SizeInRegs = (TI.Width + 31) / 32;
- SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32);
- llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
- if (InReg)
- return ABIArgInfo::getDirectInReg(Result);
- else
- return ABIArgInfo::getDirect(Result);
- }
- llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : nullptr;
-
- // Pass over-aligned aggregates on Windows indirectly. This behavior was
- // added in MSVC 2015.
- if (IsWin32StructABI && TI.AlignIsRequired && TI.Align > 32)
- return getIndirectResult(Ty, /*ByVal=*/false, State);
-
- // Expand small (<= 128-bit) record types when we know that the stack layout
- // of those arguments will match the struct. This is important because the
- // LLVM backend isn't smart enough to remove byval, which inhibits many
- // optimizations.
- // Don't do this for the MCU if there are still free integer registers
- // (see X86_64 ABI for full explanation).
- if (TI.Width <= 4 * 32 && (!IsMCUABI || State.FreeRegs == 0) &&
- canExpandIndirectArgument(Ty))
- return ABIArgInfo::getExpandWithPadding(
- IsFastCall || IsVectorCall || IsRegCall, PaddingType);
-
- return getIndirectResult(Ty, true, State);
- }
-
- if (const VectorType *VT = Ty->getAs<VectorType>()) {
- // On Windows, vectors are passed directly if registers are available, or
- // indirectly if not. This avoids the need to align argument memory. Pass
- // user-defined vector types larger than 512 bits indirectly for simplicity.
- if (IsWin32StructABI) {
- if (TI.Width <= 512 && State.FreeSSERegs > 0) {
- --State.FreeSSERegs;
- return ABIArgInfo::getDirectInReg();
- }
- return getIndirectResult(Ty, /*ByVal=*/false, State);
- }
-
- // On Darwin, some vectors are passed in memory, we handle this by passing
- // it as an i8/i16/i32/i64.
- if (IsDarwinVectorABI) {
- if ((TI.Width == 8 || TI.Width == 16 || TI.Width == 32) ||
- (TI.Width == 64 && VT->getNumElements() == 1))
- return ABIArgInfo::getDirect(
- llvm::IntegerType::get(getVMContext(), TI.Width));
- }
-
- if (IsX86_MMXType(CGT.ConvertType(Ty)))
- return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64));
-
- return ABIArgInfo::getDirect();
- }
-
-
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
-
- bool InReg = shouldPrimitiveUseInReg(Ty, State);
-
- if (isPromotableIntegerTypeForABI(Ty)) {
- if (InReg)
- return ABIArgInfo::getExtendInReg(Ty);
- return ABIArgInfo::getExtend(Ty);
- }
-
- if (const auto * EIT = Ty->getAs<ExtIntType>()) {
- if (EIT->getNumBits() <= 64) {
- if (InReg)
- return ABIArgInfo::getDirectInReg();
- return ABIArgInfo::getDirect();
- }
- return getIndirectResult(Ty, /*ByVal=*/false, State);
- }
-
- if (InReg)
- return ABIArgInfo::getDirectInReg();
- return ABIArgInfo::getDirect();
-}
-
-void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
- CCState State(FI);
- if (IsMCUABI)
- State.FreeRegs = 3;
- else if (State.CC == llvm::CallingConv::X86_FastCall) {
- State.FreeRegs = 2;
- State.FreeSSERegs = 3;
- } else if (State.CC == llvm::CallingConv::X86_VectorCall) {
- State.FreeRegs = 2;
- State.FreeSSERegs = 6;
- } else if (FI.getHasRegParm())
- State.FreeRegs = FI.getRegParm();
- else if (State.CC == llvm::CallingConv::X86_RegCall) {
- State.FreeRegs = 5;
- State.FreeSSERegs = 8;
- } else if (IsWin32StructABI) {
- // Since MSVC 2015, the first three SSE vectors have been passed in
- // registers. The rest are passed indirectly.
- State.FreeRegs = DefaultNumRegisterParameters;
- State.FreeSSERegs = 3;
- } else
- State.FreeRegs = DefaultNumRegisterParameters;
-
- if (!::classifyReturnType(getCXXABI(), FI, *this)) {
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), State);
- } else if (FI.getReturnInfo().isIndirect()) {
- // The C++ ABI is not aware of register usage, so we have to check if the
- // return value was sret and put it in a register ourselves if appropriate.
- if (State.FreeRegs) {
- --State.FreeRegs; // The sret parameter consumes a register.
- if (!IsMCUABI)
- FI.getReturnInfo().setInReg(true);
- }
- }
-
- // The chain argument effectively gives us another free register.
- if (FI.isChainCall())
- ++State.FreeRegs;
-
- // For vectorcall, do a first pass over the arguments, assigning FP and vector
- // arguments to XMM registers as available.
- if (State.CC == llvm::CallingConv::X86_VectorCall)
- runVectorCallFirstPass(FI, State);
-
- bool UsedInAlloca = false;
- MutableArrayRef<CGFunctionInfoArgInfo> Args = FI.arguments();
- for (int I = 0, E = Args.size(); I < E; ++I) {
- // Skip arguments that have already been assigned.
- if (State.IsPreassigned.test(I))
- continue;
-
- Args[I].info = classifyArgumentType(Args[I].type, State);
- UsedInAlloca |= (Args[I].info.getKind() == ABIArgInfo::InAlloca);
- }
-
- // If we needed to use inalloca for any argument, do a second pass and rewrite
- // all the memory arguments to use inalloca.
- if (UsedInAlloca)
- rewriteWithInAlloca(FI);
-}
-
-void
-X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
- CharUnits &StackOffset, ABIArgInfo &Info,
- QualType Type) const {
- // Arguments are always 4-byte-aligned.
- CharUnits WordSize = CharUnits::fromQuantity(4);
- assert(StackOffset.isMultipleOf(WordSize) && "unaligned inalloca struct");
-
- // sret pointers and indirect things will require an extra pointer
- // indirection, unless they are byval. Most things are byval, and will not
- // require this indirection.
- bool IsIndirect = false;
- if (Info.isIndirect() && !Info.getIndirectByVal())
- IsIndirect = true;
- Info = ABIArgInfo::getInAlloca(FrameFields.size(), IsIndirect);
- llvm::Type *LLTy = CGT.ConvertTypeForMem(Type);
- if (IsIndirect)
- LLTy = LLTy->getPointerTo(0);
- FrameFields.push_back(LLTy);
- StackOffset += IsIndirect ? WordSize : getContext().getTypeSizeInChars(Type);
-
- // Insert padding bytes to respect alignment.
- CharUnits FieldEnd = StackOffset;
- StackOffset = FieldEnd.alignTo(WordSize);
- if (StackOffset != FieldEnd) {
- CharUnits NumBytes = StackOffset - FieldEnd;
- llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
- Ty = llvm::ArrayType::get(Ty, NumBytes.getQuantity());
- FrameFields.push_back(Ty);
- }
-}
-
-static bool isArgInAlloca(const ABIArgInfo &Info) {
- // Leave ignored and inreg arguments alone.
- switch (Info.getKind()) {
- case ABIArgInfo::InAlloca:
- return true;
- case ABIArgInfo::Ignore:
- case ABIArgInfo::IndirectAliased:
- return false;
- case ABIArgInfo::Indirect:
- case ABIArgInfo::Direct:
- case ABIArgInfo::Extend:
- return !Info.getInReg();
- case ABIArgInfo::Expand:
- case ABIArgInfo::CoerceAndExpand:
- // These are aggregate types which are never passed in registers when
- // inalloca is involved.
- return true;
- }
- llvm_unreachable("invalid enum");
-}
-
-void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const {
- assert(IsWin32StructABI && "inalloca only supported on win32");
-
- // Build a packed struct type for all of the arguments in memory.
- SmallVector<llvm::Type *, 6> FrameFields;
-
- // The stack alignment is always 4.
- CharUnits StackAlign = CharUnits::fromQuantity(4);
-
- CharUnits StackOffset;
- CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end();
-
- // Put 'this' into the struct before 'sret', if necessary.
- bool IsThisCall =
- FI.getCallingConvention() == llvm::CallingConv::X86_ThisCall;
- ABIArgInfo &Ret = FI.getReturnInfo();
- if (Ret.isIndirect() && Ret.isSRetAfterThis() && !IsThisCall &&
- isArgInAlloca(I->info)) {
- addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
- ++I;
- }
-
- // Put the sret parameter into the inalloca struct if it's in memory.
- if (Ret.isIndirect() && !Ret.getInReg()) {
- addFieldToArgStruct(FrameFields, StackOffset, Ret, FI.getReturnType());
- // On Windows, the hidden sret parameter is always returned in eax.
- Ret.setInAllocaSRet(IsWin32StructABI);
- }
-
- // Skip the 'this' parameter in ecx.
- if (IsThisCall)
- ++I;
-
- // Put arguments passed in memory into the struct.
- for (; I != E; ++I) {
- if (isArgInAlloca(I->info))
- addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
- }
-
- FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,
- /*isPacked=*/true),
- StackAlign);
-}
-
-Address X86_32ABIInfo::EmitVAArg(CodeGenFunction &CGF,
- Address VAListAddr, QualType Ty) const {
-
- auto TypeInfo = getContext().getTypeInfoInChars(Ty);
-
- // x86-32 changes the alignment of certain arguments on the stack.
- //
- // Just messing with TypeInfo like this works because we never pass
- // anything indirectly.
- TypeInfo.Align = CharUnits::fromQuantity(
- getTypeStackAlignInBytes(Ty, TypeInfo.Align.getQuantity()));
-
- return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false,
- TypeInfo, CharUnits::fromQuantity(4),
- /*AllowHigherAlign*/ true);
-}
-
-bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
- const llvm::Triple &Triple, const CodeGenOptions &Opts) {
- assert(Triple.getArch() == llvm::Triple::x86);
-
- switch (Opts.getStructReturnConvention()) {
- case CodeGenOptions::SRCK_Default:
- break;
- case CodeGenOptions::SRCK_OnStack: // -fpcc-struct-return
- return false;
- case CodeGenOptions::SRCK_InRegs: // -freg-struct-return
- return true;
- }
-
- if (Triple.isOSDarwin() || Triple.isOSIAMCU())
- return true;
-
- switch (Triple.getOS()) {
- case llvm::Triple::DragonFly:
- case llvm::Triple::FreeBSD:
- case llvm::Triple::OpenBSD:
- case llvm::Triple::Win32:
- return true;
- default:
- return false;
- }
-}
-
-static void addX86InterruptAttrs(const FunctionDecl *FD, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &CGM) {
- if (!FD->hasAttr<AnyX86InterruptAttr>())
- return;
-
- llvm::Function *Fn = cast<llvm::Function>(GV);
- Fn->setCallingConv(llvm::CallingConv::X86_INTR);
- if (FD->getNumParams() == 0)
- return;
-
- auto PtrTy = cast<PointerType>(FD->getParamDecl(0)->getType());
- llvm::Type *ByValTy = CGM.getTypes().ConvertType(PtrTy->getPointeeType());
- llvm::Attribute NewAttr = llvm::Attribute::getWithByValType(
- Fn->getContext(), ByValTy);
- Fn->addParamAttr(0, NewAttr);
-}
-
-void X86_32TargetCodeGenInfo::setTargetAttributes(
+void TargetCodeGenInfo::addStackProbeTargetAttributes(
const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
- if (GV->isDeclaration())
- return;
- if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
- if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
- llvm::Function *Fn = cast<llvm::Function>(GV);
- Fn->addFnAttr("stackrealign");
- }
-
- addX86InterruptAttrs(FD, GV, CGM);
- }
-}
-
-bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
- CodeGen::CodeGenFunction &CGF,
- llvm::Value *Address) const {
- CodeGen::CGBuilderTy &Builder = CGF.Builder;
-
- llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
-
- // 0-7 are the eight integer registers; the order is different
- // on Darwin (for EH), but the range is the same.
- // 8 is %eip.
- AssignToArrayRange(Builder, Address, Four8, 0, 8);
-
- if (CGF.CGM.getTarget().getTriple().isOSDarwin()) {
- // 12-16 are st(0..4). Not sure why we stop at 4.
- // These have size 16, which is sizeof(long double) on
- // platforms with 8-byte alignment for that type.
- llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16);
- AssignToArrayRange(Builder, Address, Sixteen8, 12, 16);
-
- } else {
- // 9 is %eflags, which doesn't get a size on Darwin for some
- // reason.
- Builder.CreateAlignedStore(
- Four8, Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, Address, 9),
- CharUnits::One());
-
- // 11-16 are st(0..5). Not sure why we stop at 5.
- // These have size 12, which is sizeof(long double) on
- // platforms with 4-byte alignment for that type.
- llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12);
- AssignToArrayRange(Builder, Address, Twelve8, 11, 16);
- }
-
- return false;
-}
-
-//===----------------------------------------------------------------------===//
-// X86-64 ABI Implementation
-//===----------------------------------------------------------------------===//
-
-
-namespace {
-/// The AVX ABI level for X86 targets.
-enum class X86AVXABILevel {
- None,
- AVX,
- AVX512
-};
-
-/// \p returns the size in bits of the largest (native) vector for \p AVXLevel.
-static unsigned getNativeVectorSizeForAVXABI(X86AVXABILevel AVXLevel) {
- switch (AVXLevel) {
- case X86AVXABILevel::AVX512:
- return 512;
- case X86AVXABILevel::AVX:
- return 256;
- case X86AVXABILevel::None:
- return 128;
- }
- llvm_unreachable("Unknown AVXLevel");
-}
-
-/// X86_64ABIInfo - The X86_64 ABI information.
-class X86_64ABIInfo : public SwiftABIInfo {
- enum Class {
- Integer = 0,
- SSE,
- SSEUp,
- X87,
- X87Up,
- ComplexX87,
- NoClass,
- Memory
- };
-
- /// merge - Implement the X86_64 ABI merging algorithm.
- ///
- /// Merge an accumulating classification \arg Accum with a field
- /// classification \arg Field.
- ///
- /// \param Accum - The accumulating classification. This should
- /// always be either NoClass or the result of a previous merge
- /// call. In addition, this should never be Memory (the caller
- /// should just return Memory for the aggregate).
- static Class merge(Class Accum, Class Field);
-
- /// postMerge - Implement the X86_64 ABI post merging algorithm.
- ///
- /// Post merger cleanup, reduces a malformed Hi and Lo pair to
- /// final MEMORY or SSE classes when necessary.
- ///
- /// \param AggregateSize - The size of the current aggregate in
- /// the classification process.
- ///
- /// \param Lo - The classification for the parts of the type
- /// residing in the low word of the containing object.
- ///
- /// \param Hi - The classification for the parts of the type
- /// residing in the higher words of the containing object.
- ///
- void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const;
-
- /// classify - Determine the x86_64 register classes in which the
- /// given type T should be passed.
- ///
- /// \param Lo - The classification for the parts of the type
- /// residing in the low word of the containing object.
- ///
- /// \param Hi - The classification for the parts of the type
- /// residing in the high word of the containing object.
- ///
- /// \param OffsetBase - The bit offset of this type in the
- /// containing object. Some parameters are classified different
- /// depending on whether they straddle an eightbyte boundary.
- ///
- /// \param isNamedArg - Whether the argument in question is a "named"
- /// argument, as used in AMD64-ABI 3.5.7.
- ///
- /// If a word is unused its result will be NoClass; if a type should
- /// be passed in Memory then at least the classification of \arg Lo
- /// will be Memory.
- ///
- /// The \arg Lo class will be NoClass iff the argument is ignored.
- ///
- /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
- /// also be ComplexX87.
- void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
- bool isNamedArg) const;
-
- llvm::Type *GetByteVectorType(QualType Ty) const;
- llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
- unsigned IROffset, QualType SourceTy,
- unsigned SourceOffset) const;
- llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
- unsigned IROffset, QualType SourceTy,
- unsigned SourceOffset) const;
-
- /// getIndirectResult - Give a source type \arg Ty, return a suitable result
- /// such that the argument will be returned in memory.
- ABIArgInfo getIndirectReturnResult(QualType Ty) const;
-
- /// getIndirectResult - Give a source type \arg Ty, return a suitable result
- /// such that the argument will be passed in memory.
- ///
- /// \param freeIntRegs - The number of free integer registers remaining
- /// available.
- ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const;
-
- ABIArgInfo classifyReturnType(QualType RetTy) const;
-
- ABIArgInfo classifyArgumentType(QualType Ty, unsigned freeIntRegs,
- unsigned &neededInt, unsigned &neededSSE,
- bool isNamedArg) const;
-
- ABIArgInfo classifyRegCallStructType(QualType Ty, unsigned &NeededInt,
- unsigned &NeededSSE) const;
-
- ABIArgInfo classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
- unsigned &NeededSSE) const;
-
- bool IsIllegalVectorType(QualType Ty) const;
-
- /// The 0.98 ABI revision clarified a lot of ambiguities,
- /// unfortunately in ways that were not always consistent with
- /// certain previous compilers. In particular, platforms which
- /// required strict binary compatibility with older versions of GCC
- /// may need to exempt themselves.
- bool honorsRevision0_98() const {
- return !getTarget().getTriple().isOSDarwin();
- }
-
- /// GCC classifies <1 x long long> as SSE but some platform ABIs choose to
- /// classify it as INTEGER (for compatibility with older clang compilers).
- bool classifyIntegerMMXAsSSE() const {
- // Clang <= 3.8 did not do this.
- if (getContext().getLangOpts().getClangABICompat() <=
- LangOptions::ClangABI::Ver3_8)
- return false;
-
- const llvm::Triple &Triple = getTarget().getTriple();
- if (Triple.isOSDarwin() || Triple.getOS() == llvm::Triple::PS4)
- return false;
- if (Triple.isOSFreeBSD() && Triple.getOSMajorVersion() >= 10)
- return false;
- return true;
- }
-
- // GCC classifies vectors of __int128 as memory.
- bool passInt128VectorsInMem() const {
- // Clang <= 9.0 did not do this.
- if (getContext().getLangOpts().getClangABICompat() <=
- LangOptions::ClangABI::Ver9)
- return false;
-
- const llvm::Triple &T = getTarget().getTriple();
- return T.isOSLinux() || T.isOSNetBSD();
- }
-
- X86AVXABILevel AVXLevel;
- // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on
- // 64-bit hardware.
- bool Has64BitPointers;
-
-public:
- X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) :
- SwiftABIInfo(CGT), AVXLevel(AVXLevel),
- Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {
- }
-
- bool isPassedUsingAVXType(QualType type) const {
- unsigned neededInt, neededSSE;
- // The freeIntRegs argument doesn't matter here.
- ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE,
- /*isNamedArg*/true);
- if (info.isDirect()) {
- llvm::Type *ty = info.getCoerceToType();
- if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
- return vectorTy->getPrimitiveSizeInBits().getFixedSize() > 128;
- }
- return false;
- }
-
- void computeInfo(CGFunctionInfo &FI) const override;
-
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
- Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
-
- bool has64BitPointers() const {
- return Has64BitPointers;
- }
-
- bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
- bool asReturnValue) const override {
- return occupiesMoreThan(CGT, scalars, /*total*/ 4);
- }
- bool isSwiftErrorInRegister() const override {
- return true;
- }
-};
-
-/// WinX86_64ABIInfo - The Windows X86_64 ABI information.
-class WinX86_64ABIInfo : public SwiftABIInfo {
-public:
- WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
- : SwiftABIInfo(CGT), AVXLevel(AVXLevel),
- IsMingw64(getTarget().getTriple().isWindowsGNUEnvironment()) {}
-
- void computeInfo(CGFunctionInfo &FI) const override;
-
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
-
- bool isHomogeneousAggregateBaseType(QualType Ty) const override {
- // FIXME: Assumes vectorcall is in use.
- return isX86VectorTypeForVectorCall(getContext(), Ty);
- }
-
- bool isHomogeneousAggregateSmallEnough(const Type *Ty,
- uint64_t NumMembers) const override {
- // FIXME: Assumes vectorcall is in use.
- return isX86VectorCallAggregateSmallEnough(NumMembers);
- }
-
- bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type *> scalars,
- bool asReturnValue) const override {
- return occupiesMoreThan(CGT, scalars, /*total*/ 4);
- }
-
- bool isSwiftErrorInRegister() const override {
- return true;
- }
-
-private:
- ABIArgInfo classify(QualType Ty, unsigned &FreeSSERegs, bool IsReturnType,
- bool IsVectorCall, bool IsRegCall) const;
- ABIArgInfo reclassifyHvaArgForVectorCall(QualType Ty, unsigned &FreeSSERegs,
- const ABIArgInfo &current) const;
-
- X86AVXABILevel AVXLevel;
-
- bool IsMingw64;
-};
-
-class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
-public:
- X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
- : TargetCodeGenInfo(std::make_unique<X86_64ABIInfo>(CGT, AVXLevel)) {}
-
- const X86_64ABIInfo &getABIInfo() const {
- return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo());
- }
-
- /// Disable tail call on x86-64. The epilogue code before the tail jump blocks
- /// autoreleaseRV/retainRV and autoreleaseRV/unsafeClaimRV optimizations.
- bool markARCOptimizedReturnCallsAsNoTail() const override { return true; }
-
- int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
- return 7;
- }
-
- bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
- llvm::Value *Address) const override {
- llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
-
- // 0-15 are the 16 integer registers.
- // 16 is %rip.
- AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
- return false;
- }
-
- llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
- StringRef Constraint,
- llvm::Type* Ty) const override {
- return X86AdjustInlineAsmType(CGF, Constraint, Ty);
- }
-
- bool isNoProtoCallVariadic(const CallArgList &args,
- const FunctionNoProtoType *fnType) const override {
- // The default CC on x86-64 sets %al to the number of SSA
- // registers used, and GCC sets this when calling an unprototyped
- // function, so we override the default behavior. However, don't do
- // that when AVX types are involved: the ABI explicitly states it is
- // undefined, and it doesn't work in practice because of how the ABI
- // defines varargs anyway.
- if (fnType->getCallConv() == CC_C) {
- bool HasAVXType = false;
- for (CallArgList::const_iterator
- it = args.begin(), ie = args.end(); it != ie; ++it) {
- if (getABIInfo().isPassedUsingAVXType(it->Ty)) {
- HasAVXType = true;
- break;
- }
- }
-
- if (!HasAVXType)
- return true;
- }
-
- return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType);
- }
-
- llvm::Constant *
- getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
- unsigned Sig = (0xeb << 0) | // jmp rel8
- (0x06 << 8) | // .+0x08
- ('v' << 16) |
- ('2' << 24);
- return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
- }
-
- void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &CGM) const override {
- if (GV->isDeclaration())
- return;
- if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
- if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
- llvm::Function *Fn = cast<llvm::Function>(GV);
- Fn->addFnAttr("stackrealign");
- }
-
- addX86InterruptAttrs(FD, GV, CGM);
- }
- }
-
- void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc,
- const FunctionDecl *Caller,
- const FunctionDecl *Callee,
- const CallArgList &Args) const override;
-};
-
-static void initFeatureMaps(const ASTContext &Ctx,
- llvm::StringMap<bool> &CallerMap,
- const FunctionDecl *Caller,
- llvm::StringMap<bool> &CalleeMap,
- const FunctionDecl *Callee) {
- if (CalleeMap.empty() && CallerMap.empty()) {
- // The caller is potentially nullptr in the case where the call isn't in a
- // function. In this case, the getFunctionFeatureMap ensures we just get
- // the TU level setting (since it cannot be modified by 'target'..
- Ctx.getFunctionFeatureMap(CallerMap, Caller);
- Ctx.getFunctionFeatureMap(CalleeMap, Callee);
- }
-}
-
-static bool checkAVXParamFeature(DiagnosticsEngine &Diag,
- SourceLocation CallLoc,
- const llvm::StringMap<bool> &CallerMap,
- const llvm::StringMap<bool> &CalleeMap,
- QualType Ty, StringRef Feature,
- bool IsArgument) {
- bool CallerHasFeat = CallerMap.lookup(Feature);
- bool CalleeHasFeat = CalleeMap.lookup(Feature);
- if (!CallerHasFeat && !CalleeHasFeat)
- return Diag.Report(CallLoc, diag::warn_avx_calling_convention)
- << IsArgument << Ty << Feature;
-
- // Mixing calling conventions here is very clearly an error.
- if (!CallerHasFeat || !CalleeHasFeat)
- return Diag.Report(CallLoc, diag::err_avx_calling_convention)
- << IsArgument << Ty << Feature;
-
- // Else, both caller and callee have the required feature, so there is no need
- // to diagnose.
- return false;
-}
-
-static bool checkAVXParam(DiagnosticsEngine &Diag, ASTContext &Ctx,
- SourceLocation CallLoc,
- const llvm::StringMap<bool> &CallerMap,
- const llvm::StringMap<bool> &CalleeMap, QualType Ty,
- bool IsArgument) {
- uint64_t Size = Ctx.getTypeSize(Ty);
- if (Size > 256)
- return checkAVXParamFeature(Diag, CallLoc, CallerMap, CalleeMap, Ty,
- "avx512f", IsArgument);
-
- if (Size > 128)
- return checkAVXParamFeature(Diag, CallLoc, CallerMap, CalleeMap, Ty, "avx",
- IsArgument);
-
- return false;
-}
-
-void X86_64TargetCodeGenInfo::checkFunctionCallABI(
- CodeGenModule &CGM, SourceLocation CallLoc, const FunctionDecl *Caller,
- const FunctionDecl *Callee, const CallArgList &Args) const {
- llvm::StringMap<bool> CallerMap;
- llvm::StringMap<bool> CalleeMap;
- unsigned ArgIndex = 0;
-
- // We need to loop through the actual call arguments rather than the the
- // function's parameters, in case this variadic.
- for (const CallArg &Arg : Args) {
- // The "avx" feature changes how vectors >128 in size are passed. "avx512f"
- // additionally changes how vectors >256 in size are passed. Like GCC, we
- // warn when a function is called with an argument where this will change.
- // Unlike GCC, we also error when it is an obvious ABI mismatch, that is,
- // the caller and callee features are mismatched.
- // Unfortunately, we cannot do this diagnostic in SEMA, since the callee can
- // change its ABI with attribute-target after this call.
- if (Arg.getType()->isVectorType() &&
- CGM.getContext().getTypeSize(Arg.getType()) > 128) {
- initFeatureMaps(CGM.getContext(), CallerMap, Caller, CalleeMap, Callee);
- QualType Ty = Arg.getType();
- // The CallArg seems to have desugared the type already, so for clearer
- // diagnostics, replace it with the type in the FunctionDecl if possible.
- if (ArgIndex < Callee->getNumParams())
- Ty = Callee->getParamDecl(ArgIndex)->getType();
-
- if (checkAVXParam(CGM.getDiags(), CGM.getContext(), CallLoc, CallerMap,
- CalleeMap, Ty, /*IsArgument*/ true))
- return;
- }
- ++ArgIndex;
- }
-
- // Check return always, as we don't have a good way of knowing in codegen
- // whether this value is used, tail-called, etc.
- if (Callee->getReturnType()->isVectorType() &&
- CGM.getContext().getTypeSize(Callee->getReturnType()) > 128) {
- initFeatureMaps(CGM.getContext(), CallerMap, Caller, CalleeMap, Callee);
- checkAVXParam(CGM.getDiags(), CGM.getContext(), CallLoc, CallerMap,
- CalleeMap, Callee->getReturnType(),
- /*IsArgument*/ false);
- }
-}
-
-static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
- // If the argument does not end in .lib, automatically add the suffix.
- // If the argument contains a space, enclose it in quotes.
- // This matches the behavior of MSVC.
- bool Quote = (Lib.find(' ') != StringRef::npos);
- std::string ArgStr = Quote ? "\"" : "";
- ArgStr += Lib;
- if (!Lib.endswith_insensitive(".lib") && !Lib.endswith_insensitive(".a"))
- ArgStr += ".lib";
- ArgStr += Quote ? "\"" : "";
- return ArgStr;
-}
-
-class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo {
-public:
- WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
- bool DarwinVectorABI, bool RetSmallStructInRegABI, bool Win32StructABI,
- unsigned NumRegisterParameters)
- : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI,
- Win32StructABI, NumRegisterParameters, false) {}
-
- void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &CGM) const override;
-
- void getDependentLibraryOption(llvm::StringRef Lib,
- llvm::SmallString<24> &Opt) const override {
- Opt = "/DEFAULTLIB:";
- Opt += qualifyWindowsLibrary(Lib);
- }
-
- void getDetectMismatchOption(llvm::StringRef Name,
- llvm::StringRef Value,
- llvm::SmallString<32> &Opt) const override {
- Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
- }
-};
-
-static void addStackProbeTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &CGM) {
if (llvm::Function *Fn = dyn_cast_or_null<llvm::Function>(GV)) {
-
if (CGM.getCodeGenOpts().StackProbeSize != 4096)
Fn->addFnAttr("stack-probe-size",
llvm::utostr(CGM.getCodeGenOpts().StackProbeSize));
@@ -2652,8694 +169,52 @@ static void addStackProbeTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
}
}
-void WinX86_32TargetCodeGenInfo::setTargetAttributes(
- const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
- X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
- if (GV->isDeclaration())
- return;
- addStackProbeTargetAttributes(D, GV, CGM);
-}
-
-class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
-public:
- WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
- X86AVXABILevel AVXLevel)
- : TargetCodeGenInfo(std::make_unique<WinX86_64ABIInfo>(CGT, AVXLevel)) {}
-
- void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &CGM) const override;
-
- int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
- return 7;
- }
-
- bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
- llvm::Value *Address) const override {
- llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
-
- // 0-15 are the 16 integer registers.
- // 16 is %rip.
- AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
- return false;
- }
-
- void getDependentLibraryOption(llvm::StringRef Lib,
- llvm::SmallString<24> &Opt) const override {
- Opt = "/DEFAULTLIB:";
- Opt += qualifyWindowsLibrary(Lib);
- }
-
- void getDetectMismatchOption(llvm::StringRef Name,
- llvm::StringRef Value,
- llvm::SmallString<32> &Opt) const override {
- Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
- }
-};
-
-void WinX86_64TargetCodeGenInfo::setTargetAttributes(
- const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
- TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
- if (GV->isDeclaration())
- return;
- if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
- if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
- llvm::Function *Fn = cast<llvm::Function>(GV);
- Fn->addFnAttr("stackrealign");
- }
-
- addX86InterruptAttrs(FD, GV, CGM);
- }
-
- addStackProbeTargetAttributes(D, GV, CGM);
-}
-}
-
-void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo,
- Class &Hi) const {
- // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
- //
- // (a) If one of the classes is Memory, the whole argument is passed in
- // memory.
- //
- // (b) If X87UP is not preceded by X87, the whole argument is passed in
- // memory.
- //
- // (c) If the size of the aggregate exceeds two eightbytes and the first
- // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole
- // argument is passed in memory. NOTE: This is necessary to keep the
- // ABI working for processors that don't support the __m256 type.
- //
- // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE.
- //
- // Some of these are enforced by the merging logic. Others can arise
- // only with unions; for example:
- // union { _Complex double; unsigned; }
- //
- // Note that clauses (b) and (c) were added in 0.98.
- //
- if (Hi == Memory)
- Lo = Memory;
- if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
- Lo = Memory;
- if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
- Lo = Memory;
- if (Hi == SSEUp && Lo != SSE)
- Hi = SSE;
-}
-
-X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
- // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
- // classified recursively so that always two fields are
- // considered. The resulting class is calculated according to
- // the classes of the fields in the eightbyte:
- //
- // (a) If both classes are equal, this is the resulting class.
- //
- // (b) If one of the classes is NO_CLASS, the resulting class is
- // the other class.
- //
- // (c) If one of the classes is MEMORY, the result is the MEMORY
- // class.
- //
- // (d) If one of the classes is INTEGER, the result is the
- // INTEGER.
- //
- // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
- // MEMORY is used as class.
- //
- // (f) Otherwise class SSE is used.
-
- // Accum should never be memory (we should have returned) or
- // ComplexX87 (because this cannot be passed in a structure).
- assert((Accum != Memory && Accum != ComplexX87) &&
- "Invalid accumulated classification during merge.");
- if (Accum == Field || Field == NoClass)
- return Accum;
- if (Field == Memory)
- return Memory;
- if (Accum == NoClass)
- return Field;
- if (Accum == Integer || Field == Integer)
- return Integer;
- if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
- Accum == X87 || Accum == X87Up)
- return Memory;
- return SSE;
-}
-
-void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
- Class &Lo, Class &Hi, bool isNamedArg) const {
- // FIXME: This code can be simplified by introducing a simple value class for
- // Class pairs with appropriate constructor methods for the various
- // situations.
-
- // FIXME: Some of the split computations are wrong; unaligned vectors
- // shouldn't be passed in registers for example, so there is no chance they
- // can straddle an eightbyte. Verify & simplify.
-
- Lo = Hi = NoClass;
-
- Class &Current = OffsetBase < 64 ? Lo : Hi;
- Current = Memory;
-
- if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
- BuiltinType::Kind k = BT->getKind();
-
- if (k == BuiltinType::Void) {
- Current = NoClass;
- } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
- Lo = Integer;
- Hi = Integer;
- } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
- Current = Integer;
- } else if (k == BuiltinType::Float || k == BuiltinType::Double) {
- Current = SSE;
- } else if (k == BuiltinType::LongDouble) {
- const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
- if (LDF == &llvm::APFloat::IEEEquad()) {
- Lo = SSE;
- Hi = SSEUp;
- } else if (LDF == &llvm::APFloat::x87DoubleExtended()) {
- Lo = X87;
- Hi = X87Up;
- } else if (LDF == &llvm::APFloat::IEEEdouble()) {
- Current = SSE;
- } else
- llvm_unreachable("unexpected long double representation!");
- }
- // FIXME: _Decimal32 and _Decimal64 are SSE.
- // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
- return;
- }
-
- if (const EnumType *ET = Ty->getAs<EnumType>()) {
- // Classify the underlying integer type.
- classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
- return;
- }
-
- if (Ty->hasPointerRepresentation()) {
- Current = Integer;
- return;
- }
-
- if (Ty->isMemberPointerType()) {
- if (Ty->isMemberFunctionPointerType()) {
- if (Has64BitPointers) {
- // If Has64BitPointers, this is an {i64, i64}, so classify both
- // Lo and Hi now.
- Lo = Hi = Integer;
- } else {
- // Otherwise, with 32-bit pointers, this is an {i32, i32}. If that
- // straddles an eightbyte boundary, Hi should be classified as well.
- uint64_t EB_FuncPtr = (OffsetBase) / 64;
- uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64;
- if (EB_FuncPtr != EB_ThisAdj) {
- Lo = Hi = Integer;
- } else {
- Current = Integer;
- }
- }
- } else {
- Current = Integer;
- }
- return;
- }
-
- if (const VectorType *VT = Ty->getAs<VectorType>()) {
- uint64_t Size = getContext().getTypeSize(VT);
- if (Size == 1 || Size == 8 || Size == 16 || Size == 32) {
- // gcc passes the following as integer:
- // 4 bytes - <4 x char>, <2 x short>, <1 x int>, <1 x float>
- // 2 bytes - <2 x char>, <1 x short>
- // 1 byte - <1 x char>
- Current = Integer;
-
- // If this type crosses an eightbyte boundary, it should be
- // split.
- uint64_t EB_Lo = (OffsetBase) / 64;
- uint64_t EB_Hi = (OffsetBase + Size - 1) / 64;
- if (EB_Lo != EB_Hi)
- Hi = Lo;
- } else if (Size == 64) {
- QualType ElementType = VT->getElementType();
-
- // gcc passes <1 x double> in memory. :(
- if (ElementType->isSpecificBuiltinType(BuiltinType::Double))
- return;
-
- // gcc passes <1 x long long> as SSE but clang used to unconditionally
- // pass them as integer. For platforms where clang is the de facto
- // platform compiler, we must continue to use integer.
- if (!classifyIntegerMMXAsSSE() &&
- (ElementType->isSpecificBuiltinType(BuiltinType::LongLong) ||
- ElementType->isSpecificBuiltinType(BuiltinType::ULongLong) ||
- ElementType->isSpecificBuiltinType(BuiltinType::Long) ||
- ElementType->isSpecificBuiltinType(BuiltinType::ULong)))
- Current = Integer;
- else
- Current = SSE;
-
- // If this type crosses an eightbyte boundary, it should be
- // split.
- if (OffsetBase && OffsetBase != 64)
- Hi = Lo;
- } else if (Size == 128 ||
- (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) {
- QualType ElementType = VT->getElementType();
-
- // gcc passes 256 and 512 bit <X x __int128> vectors in memory. :(
- if (passInt128VectorsInMem() && Size != 128 &&
- (ElementType->isSpecificBuiltinType(BuiltinType::Int128) ||
- ElementType->isSpecificBuiltinType(BuiltinType::UInt128)))
- return;
-
- // Arguments of 256-bits are split into four eightbyte chunks. The
- // least significant one belongs to class SSE and all the others to class
- // SSEUP. The original Lo and Hi design considers that types can't be
- // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense.
- // This design isn't correct for 256-bits, but since there're no cases
- // where the upper parts would need to be inspected, avoid adding
- // complexity and just consider Hi to match the 64-256 part.
- //
- // Note that per 3.5.7 of AMD64-ABI, 256-bit args are only passed in
- // registers if they are "named", i.e. not part of the "..." of a
- // variadic function.
- //
- // Similarly, per 3.2.3. of the AVX512 draft, 512-bits ("named") args are
- // split into eight eightbyte chunks, one SSE and seven SSEUP.
- Lo = SSE;
- Hi = SSEUp;
- }
- return;
- }
-
- if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
- QualType ET = getContext().getCanonicalType(CT->getElementType());
-
- uint64_t Size = getContext().getTypeSize(Ty);
- if (ET->isIntegralOrEnumerationType()) {
- if (Size <= 64)
- Current = Integer;
- else if (Size <= 128)
- Lo = Hi = Integer;
- } else if (ET == getContext().FloatTy) {
- Current = SSE;
- } else if (ET == getContext().DoubleTy) {
- Lo = Hi = SSE;
- } else if (ET == getContext().LongDoubleTy) {
- const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
- if (LDF == &llvm::APFloat::IEEEquad())
- Current = Memory;
- else if (LDF == &llvm::APFloat::x87DoubleExtended())
- Current = ComplexX87;
- else if (LDF == &llvm::APFloat::IEEEdouble())
- Lo = Hi = SSE;
- else
- llvm_unreachable("unexpected long double representation!");
- }
-
- // If this complex type crosses an eightbyte boundary then it
- // should be split.
- uint64_t EB_Real = (OffsetBase) / 64;
- uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
- if (Hi == NoClass && EB_Real != EB_Imag)
- Hi = Lo;
-
- return;
- }
-
- if (const auto *EITy = Ty->getAs<ExtIntType>()) {
- if (EITy->getNumBits() <= 64)
- Current = Integer;
- else if (EITy->getNumBits() <= 128)
- Lo = Hi = Integer;
- // Larger values need to get passed in memory.
- return;
- }
-
- if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
- // Arrays are treated like structures.
-
- uint64_t Size = getContext().getTypeSize(Ty);
-
- // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
- // than eight eightbytes, ..., it has class MEMORY.
- if (Size > 512)
- return;
-
- // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
- // fields, it has class MEMORY.
- //
- // Only need to check alignment of array base.
- if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
- return;
-
- // Otherwise implement simplified merge. We could be smarter about
- // this, but it isn't worth it and would be harder to verify.
- Current = NoClass;
- uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
- uint64_t ArraySize = AT->getSize().getZExtValue();
-
- // The only case a 256-bit wide vector could be used is when the array
- // contains a single 256-bit element. Since Lo and Hi logic isn't extended
- // to work for sizes wider than 128, early check and fallback to memory.
- //
- if (Size > 128 &&
- (Size != EltSize || Size > getNativeVectorSizeForAVXABI(AVXLevel)))
- return;
-
- for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
- Class FieldLo, FieldHi;
- classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg);
- Lo = merge(Lo, FieldLo);
- Hi = merge(Hi, FieldHi);
- if (Lo == Memory || Hi == Memory)
- break;
- }
-
- postMerge(Size, Lo, Hi);
- assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
- return;
- }
-
- if (const RecordType *RT = Ty->getAs<RecordType>()) {
- uint64_t Size = getContext().getTypeSize(Ty);
-
- // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
- // than eight eightbytes, ..., it has class MEMORY.
- if (Size > 512)
- return;
-
- // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
- // copy constructor or a non-trivial destructor, it is passed by invisible
- // reference.
- if (getRecordArgABI(RT, getCXXABI()))
- return;
-
- const RecordDecl *RD = RT->getDecl();
-
- // Assume variable sized types are passed in memory.
- if (RD->hasFlexibleArrayMember())
- return;
-
- const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
-
- // Reset Lo class, this will be recomputed.
- Current = NoClass;
-
- // If this is a C++ record, classify the bases first.
- if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
- for (const auto &I : CXXRD->bases()) {
- assert(!I.isVirtual() && !I.getType()->isDependentType() &&
- "Unexpected base class!");
- const auto *Base =
- cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
-
- // Classify this field.
- //
- // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a
- // single eightbyte, each is classified separately. Each eightbyte gets
- // initialized to class NO_CLASS.
- Class FieldLo, FieldHi;
- uint64_t Offset =
- OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base));
- classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg);
- Lo = merge(Lo, FieldLo);
- Hi = merge(Hi, FieldHi);
- if (Lo == Memory || Hi == Memory) {
- postMerge(Size, Lo, Hi);
- return;
- }
- }
- }
-
- // Classify the fields one at a time, merging the results.
- unsigned idx = 0;
- bool UseClang11Compat = getContext().getLangOpts().getClangABICompat() <=
- LangOptions::ClangABI::Ver11 ||
- getContext().getTargetInfo().getTriple().isPS4();
- bool IsUnion = RT->isUnionType() && !UseClang11Compat;
-
- for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
- i != e; ++i, ++idx) {
- uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
- bool BitField = i->isBitField();
-
- // Ignore padding bit-fields.
- if (BitField && i->isUnnamedBitfield())
- continue;
-
- // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than
- // eight eightbytes, or it contains unaligned fields, it has class MEMORY.
- //
- // The only case a 256-bit or a 512-bit wide vector could be used is when
- // the struct contains a single 256-bit or 512-bit element. Early check
- // and fallback to memory.
- //
- // FIXME: Extended the Lo and Hi logic properly to work for size wider
- // than 128.
- if (Size > 128 &&
- ((!IsUnion && Size != getContext().getTypeSize(i->getType())) ||
- Size > getNativeVectorSizeForAVXABI(AVXLevel))) {
- Lo = Memory;
- postMerge(Size, Lo, Hi);
- return;
- }
- // Note, skip this test for bit-fields, see below.
- if (!BitField && Offset % getContext().getTypeAlign(i->getType())) {
- Lo = Memory;
- postMerge(Size, Lo, Hi);
- return;
- }
-
- // Classify this field.
- //
- // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
- // exceeds a single eightbyte, each is classified
- // separately. Each eightbyte gets initialized to class
- // NO_CLASS.
- Class FieldLo, FieldHi;
-
- // Bit-fields require special handling, they do not force the
- // structure to be passed in memory even if unaligned, and
- // therefore they can straddle an eightbyte.
- if (BitField) {
- assert(!i->isUnnamedBitfield());
- uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
- uint64_t Size = i->getBitWidthValue(getContext());
-
- uint64_t EB_Lo = Offset / 64;
- uint64_t EB_Hi = (Offset + Size - 1) / 64;
-
- if (EB_Lo) {
- assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
- FieldLo = NoClass;
- FieldHi = Integer;
- } else {
- FieldLo = Integer;
- FieldHi = EB_Hi ? Integer : NoClass;
- }
- } else
- classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg);
- Lo = merge(Lo, FieldLo);
- Hi = merge(Hi, FieldHi);
- if (Lo == Memory || Hi == Memory)
- break;
- }
-
- postMerge(Size, Lo, Hi);
- }
-}
-
-ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
- // If this is a scalar LLVM value then assume LLVM will pass it in the right
- // place naturally.
- if (!isAggregateTypeForABI(Ty)) {
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
-
- if (Ty->isExtIntType())
- return getNaturalAlignIndirect(Ty);
-
- return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
- : ABIArgInfo::getDirect());
- }
-
- return getNaturalAlignIndirect(Ty);
-}
-
-bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const {
- if (const VectorType *VecTy = Ty->getAs<VectorType>()) {
- uint64_t Size = getContext().getTypeSize(VecTy);
- unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel);
- if (Size <= 64 || Size > LargestVector)
- return true;
- QualType EltTy = VecTy->getElementType();
- if (passInt128VectorsInMem() &&
- (EltTy->isSpecificBuiltinType(BuiltinType::Int128) ||
- EltTy->isSpecificBuiltinType(BuiltinType::UInt128)))
- return true;
- }
-
- return false;
-}
-
-ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
- unsigned freeIntRegs) const {
- // If this is a scalar LLVM value then assume LLVM will pass it in the right
- // place naturally.
- //
- // This assumption is optimistic, as there could be free registers available
- // when we need to pass this argument in memory, and LLVM could try to pass
- // the argument in the free register. This does not seem to happen currently,
- // but this code would be much safer if we could mark the argument with
- // 'onstack'. See PR12193.
- if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty) &&
- !Ty->isExtIntType()) {
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
-
- return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
- : ABIArgInfo::getDirect());
- }
-
- if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
- return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
-
- // Compute the byval alignment. We specify the alignment of the byval in all
- // cases so that the mid-level optimizer knows the alignment of the byval.
- unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U);
-
- // Attempt to avoid passing indirect results using byval when possible. This
- // is important for good codegen.
- //
- // We do this by coercing the value into a scalar type which the backend can
- // handle naturally (i.e., without using byval).
- //
- // For simplicity, we currently only do this when we have exhausted all of the
- // free integer registers. Doing this when there are free integer registers
- // would require more care, as we would have to ensure that the coerced value
- // did not claim the unused register. That would require either reording the
- // arguments to the function (so that any subsequent inreg values came first),
- // or only doing this optimization when there were no following arguments that
- // might be inreg.
- //
- // We currently expect it to be rare (particularly in well written code) for
- // arguments to be passed on the stack when there are still free integer
- // registers available (this would typically imply large structs being passed
- // by value), so this seems like a fair tradeoff for now.
- //
- // We can revisit this if the backend grows support for 'onstack' parameter
- // attributes. See PR12193.
- if (freeIntRegs == 0) {
- uint64_t Size = getContext().getTypeSize(Ty);
-
- // If this type fits in an eightbyte, coerce it into the matching integral
- // type, which will end up on the stack (with alignment 8).
- if (Align == 8 && Size <= 64)
- return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
- Size));
- }
-
- return ABIArgInfo::getIndirect(CharUnits::fromQuantity(Align));
-}
-
-/// The ABI specifies that a value should be passed in a full vector XMM/YMM
-/// register. Pick an LLVM IR type that will be passed as a vector register.
-llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const {
- // Wrapper structs/arrays that only contain vectors are passed just like
- // vectors; strip them off if present.
- if (const Type *InnerTy = isSingleElementStruct(Ty, getContext()))
- Ty = QualType(InnerTy, 0);
-
- llvm::Type *IRType = CGT.ConvertType(Ty);
- if (isa<llvm::VectorType>(IRType)) {
- // Don't pass vXi128 vectors in their native type, the backend can't
- // legalize them.
- if (passInt128VectorsInMem() &&
- cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy(128)) {
- // Use a vXi64 vector.
- uint64_t Size = getContext().getTypeSize(Ty);
- return llvm::FixedVectorType::get(llvm::Type::getInt64Ty(getVMContext()),
- Size / 64);
- }
-
- return IRType;
- }
-
- if (IRType->getTypeID() == llvm::Type::FP128TyID)
- return IRType;
-
- // We couldn't find the preferred IR vector type for 'Ty'.
- uint64_t Size = getContext().getTypeSize(Ty);
- assert((Size == 128 || Size == 256 || Size == 512) && "Invalid type found!");
-
-
- // Return a LLVM IR vector type based on the size of 'Ty'.
- return llvm::FixedVectorType::get(llvm::Type::getDoubleTy(getVMContext()),
- Size / 64);
-}
-
-/// BitsContainNoUserData - Return true if the specified [start,end) bit range
-/// is known to either be off the end of the specified type or being in
-/// alignment padding. The user type specified is known to be at most 128 bits
-/// in size, and have passed through X86_64ABIInfo::classify with a successful
-/// classification that put one of the two halves in the INTEGER class.
-///
-/// It is conservatively correct to return false.
-static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
- unsigned EndBit, ASTContext &Context) {
- // If the bytes being queried are off the end of the type, there is no user
- // data hiding here. This handles analysis of builtins, vectors and other
- // types that don't contain interesting padding.
- unsigned TySize = (unsigned)Context.getTypeSize(Ty);
- if (TySize <= StartBit)
- return true;
-
- if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
- unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType());
- unsigned NumElts = (unsigned)AT->getSize().getZExtValue();
-
- // Check each element to see if the element overlaps with the queried range.
- for (unsigned i = 0; i != NumElts; ++i) {
- // If the element is after the span we care about, then we're done..
- unsigned EltOffset = i*EltSize;
- if (EltOffset >= EndBit) break;
-
- unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
- if (!BitsContainNoUserData(AT->getElementType(), EltStart,
- EndBit-EltOffset, Context))
- return false;
- }
- // If it overlaps no elements, then it is safe to process as padding.
- return true;
- }
-
- if (const RecordType *RT = Ty->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
- const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
-
- // If this is a C++ record, check the bases first.
- if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
- for (const auto &I : CXXRD->bases()) {
- assert(!I.isVirtual() && !I.getType()->isDependentType() &&
- "Unexpected base class!");
- const auto *Base =
- cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
-
- // If the base is after the span we care about, ignore it.
- unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base));
- if (BaseOffset >= EndBit) continue;
-
- unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
- if (!BitsContainNoUserData(I.getType(), BaseStart,
- EndBit-BaseOffset, Context))
- return false;
- }
- }
-
- // Verify that no field has data that overlaps the region of interest. Yes
- // this could be sped up a lot by being smarter about queried fields,
- // however we're only looking at structs up to 16 bytes, so we don't care
- // much.
- unsigned idx = 0;
- for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
- i != e; ++i, ++idx) {
- unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx);
-
- // If we found a field after the region we care about, then we're done.
- if (FieldOffset >= EndBit) break;
-
- unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
- if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset,
- Context))
- return false;
- }
-
- // If nothing in this record overlapped the area of interest, then we're
- // clean.
- return true;
- }
-
- return false;
-}
-
-/// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a
-/// float member at the specified offset. For example, {int,{float}} has a
-/// float at offset 4. It is conservatively correct for this routine to return
-/// false.
-static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset,
- const llvm::DataLayout &TD) {
- // Base case if we find a float.
- if (IROffset == 0 && IRType->isFloatTy())
- return true;
-
- // If this is a struct, recurse into the field at the specified offset.
- if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
- const llvm::StructLayout *SL = TD.getStructLayout(STy);
- unsigned Elt = SL->getElementContainingOffset(IROffset);
- IROffset -= SL->getElementOffset(Elt);
- return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD);
- }
-
- // If this is an array, recurse into the field at the specified offset.
- if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
- llvm::Type *EltTy = ATy->getElementType();
- unsigned EltSize = TD.getTypeAllocSize(EltTy);
- IROffset -= IROffset/EltSize*EltSize;
- return ContainsFloatAtOffset(EltTy, IROffset, TD);
- }
-
- return false;
-}
-
-
-/// GetSSETypeAtOffset - Return a type that will be passed by the backend in the
-/// low 8 bytes of an XMM register, corresponding to the SSE class.
-llvm::Type *X86_64ABIInfo::
-GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset,
- QualType SourceTy, unsigned SourceOffset) const {
- // The only three choices we have are either double, <2 x float>, or float. We
- // pass as float if the last 4 bytes is just padding. This happens for
- // structs that contain 3 floats.
- if (BitsContainNoUserData(SourceTy, SourceOffset*8+32,
- SourceOffset*8+64, getContext()))
- return llvm::Type::getFloatTy(getVMContext());
-
- // We want to pass as <2 x float> if the LLVM IR type contains a float at
- // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the
- // case.
- if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) &&
- ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout()))
- return llvm::FixedVectorType::get(llvm::Type::getFloatTy(getVMContext()),
- 2);
-
- return llvm::Type::getDoubleTy(getVMContext());
-}
-
-
-/// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in
-/// an 8-byte GPR. This means that we either have a scalar or we are talking
-/// about the high or low part of an up-to-16-byte struct. This routine picks
-/// the best LLVM IR type to represent this, which may be i64 or may be anything
-/// else that the backend will pass in a GPR that works better (e.g. i8, %foo*,
-/// etc).
-///
-/// PrefType is an LLVM IR type that corresponds to (part of) the IR type for
-/// the source type. IROffset is an offset in bytes into the LLVM IR type that
-/// the 8-byte value references. PrefType may be null.
-///
-/// SourceTy is the source-level type for the entire argument. SourceOffset is
-/// an offset into this that we're processing (which is always either 0 or 8).
-///
-llvm::Type *X86_64ABIInfo::
-GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
- QualType SourceTy, unsigned SourceOffset) const {
- // If we're dealing with an un-offset LLVM IR type, then it means that we're
- // returning an 8-byte unit starting with it. See if we can safely use it.
- if (IROffset == 0) {
- // Pointers and int64's always fill the 8-byte unit.
- if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
- IRType->isIntegerTy(64))
- return IRType;
-
- // If we have a 1/2/4-byte integer, we can use it only if the rest of the
- // goodness in the source type is just tail padding. This is allowed to
- // kick in for struct {double,int} on the int, but not on
- // struct{double,int,int} because we wouldn't return the second int. We
- // have to do this analysis on the source type because we can't depend on
- // unions being lowered a specific way etc.
- if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
- IRType->isIntegerTy(32) ||
- (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
- unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
- cast<llvm::IntegerType>(IRType)->getBitWidth();
-
- if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth,
- SourceOffset*8+64, getContext()))
- return IRType;
- }
- }
-
- if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
- // If this is a struct, recurse into the field at the specified offset.
- const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);
- if (IROffset < SL->getSizeInBytes()) {
- unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
- IROffset -= SL->getElementOffset(FieldIdx);
-
- return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
- SourceTy, SourceOffset);
- }
- }
-
- if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
- llvm::Type *EltTy = ATy->getElementType();
- unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);
- unsigned EltOffset = IROffset/EltSize*EltSize;
- return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
- SourceOffset);
- }
-
- // Okay, we don't have any better idea of what to pass, so we pass this in an
- // integer register that isn't too big to fit the rest of the struct.
- unsigned TySizeInBytes =
- (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
-
- assert(TySizeInBytes != SourceOffset && "Empty field?");
-
- // It is always safe to classify this as an integer type up to i64 that
- // isn't larger than the structure.
- return llvm::IntegerType::get(getVMContext(),
- std::min(TySizeInBytes-SourceOffset, 8U)*8);
-}
-
-
-/// GetX86_64ByValArgumentPair - Given a high and low type that can ideally
-/// be used as elements of a two register pair to pass or return, return a
-/// first class aggregate to represent them. For example, if the low part of
-/// a by-value argument should be passed as i32* and the high part as float,
-/// return {i32*, float}.
-static llvm::Type *
-GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi,
- const llvm::DataLayout &TD) {
- // In order to correctly satisfy the ABI, we need to the high part to start
- // at offset 8. If the high and low parts we inferred are both 4-byte types
- // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have
- // the second element at offset 8. Check for this:
- unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);
- unsigned HiAlign = TD.getABITypeAlignment(Hi);
- unsigned HiStart = llvm::alignTo(LoSize, HiAlign);
- assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!");
-
- // To handle this, we have to increase the size of the low part so that the
- // second element will start at an 8 byte offset. We can't increase the size
- // of the second element because it might make us access off the end of the
- // struct.
- if (HiStart != 8) {
- // There are usually two sorts of types the ABI generation code can produce
- // for the low part of a pair that aren't 8 bytes in size: float or
- // i8/i16/i32. This can also include pointers when they are 32-bit (X32 and
- // NaCl).
- // Promote these to a larger type.
- if (Lo->isFloatTy())
- Lo = llvm::Type::getDoubleTy(Lo->getContext());
- else {
- assert((Lo->isIntegerTy() || Lo->isPointerTy())
- && "Invalid/unknown lo type");
- Lo = llvm::Type::getInt64Ty(Lo->getContext());
- }
- }
-
- llvm::StructType *Result = llvm::StructType::get(Lo, Hi);
-
- // Verify that the second element is at an 8-byte offset.
- assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
- "Invalid x86-64 argument pair!");
- return Result;
-}
-
-ABIArgInfo X86_64ABIInfo::
-classifyReturnType(QualType RetTy) const {
- // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
- // classification algorithm.
- X86_64ABIInfo::Class Lo, Hi;
- classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true);
-
- // Check some invariants.
- assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
- assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
-
- llvm::Type *ResType = nullptr;
- switch (Lo) {
- case NoClass:
- if (Hi == NoClass)
- return ABIArgInfo::getIgnore();
- // If the low part is just padding, it takes no register, leave ResType
- // null.
- assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
- "Unknown missing lo part");
- break;
-
- case SSEUp:
- case X87Up:
- llvm_unreachable("Invalid classification for lo word.");
-
- // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
- // hidden argument.
- case Memory:
- return getIndirectReturnResult(RetTy);
-
- // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
- // available register of the sequence %rax, %rdx is used.
- case Integer:
- ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
-
- // If we have a sign or zero extended integer, make sure to return Extend
- // so that the parameter gets the right LLVM IR attributes.
- if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
- RetTy = EnumTy->getDecl()->getIntegerType();
-
- if (RetTy->isIntegralOrEnumerationType() &&
- isPromotableIntegerTypeForABI(RetTy))
- return ABIArgInfo::getExtend(RetTy);
- }
- break;
-
- // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
- // available SSE register of the sequence %xmm0, %xmm1 is used.
- case SSE:
- ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
- break;
-
- // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
- // returned on the X87 stack in %st0 as 80-bit x87 number.
- case X87:
- ResType = llvm::Type::getX86_FP80Ty(getVMContext());
- break;
-
- // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
- // part of the value is returned in %st0 and the imaginary part in
- // %st1.
- case ComplexX87:
- assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
- ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
- llvm::Type::getX86_FP80Ty(getVMContext()));
- break;
- }
-
- llvm::Type *HighPart = nullptr;
- switch (Hi) {
- // Memory was handled previously and X87 should
- // never occur as a hi class.
- case Memory:
- case X87:
- llvm_unreachable("Invalid classification for hi word.");
-
- case ComplexX87: // Previously handled.
- case NoClass:
- break;
-
- case Integer:
- HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
- if (Lo == NoClass) // Return HighPart at offset 8 in memory.
- return ABIArgInfo::getDirect(HighPart, 8);
- break;
- case SSE:
- HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
- if (Lo == NoClass) // Return HighPart at offset 8 in memory.
- return ABIArgInfo::getDirect(HighPart, 8);
- break;
-
- // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
- // is passed in the next available eightbyte chunk if the last used
- // vector register.
- //
- // SSEUP should always be preceded by SSE, just widen.
- case SSEUp:
- assert(Lo == SSE && "Unexpected SSEUp classification.");
- ResType = GetByteVectorType(RetTy);
- break;
-
- // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
- // returned together with the previous X87 value in %st0.
- case X87Up:
- // If X87Up is preceded by X87, we don't need to do
- // anything. However, in some cases with unions it may not be
- // preceded by X87. In such situations we follow gcc and pass the
- // extra bits in an SSE reg.
- if (Lo != X87) {
- HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
- if (Lo == NoClass) // Return HighPart at offset 8 in memory.
- return ABIArgInfo::getDirect(HighPart, 8);
- }
- break;
- }
-
- // If a high part was specified, merge it together with the low part. It is
- // known to pass in the high eightbyte of the result. We do this by forming a
- // first class struct aggregate with the high and low part: {low, high}
- if (HighPart)
- ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
-
- return ABIArgInfo::getDirect(ResType);
-}
-
-ABIArgInfo X86_64ABIInfo::classifyArgumentType(
- QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE,
- bool isNamedArg)
- const
-{
- Ty = useFirstFieldIfTransparentUnion(Ty);
-
- X86_64ABIInfo::Class Lo, Hi;
- classify(Ty, 0, Lo, Hi, isNamedArg);
-
- // Check some invariants.
- // FIXME: Enforce these by construction.
- assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
- assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
-
- neededInt = 0;
- neededSSE = 0;
- llvm::Type *ResType = nullptr;
- switch (Lo) {
- case NoClass:
- if (Hi == NoClass)
- return ABIArgInfo::getIgnore();
- // If the low part is just padding, it takes no register, leave ResType
- // null.
- assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
- "Unknown missing lo part");
- break;
-
- // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
- // on the stack.
- case Memory:
-
- // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
- // COMPLEX_X87, it is passed in memory.
- case X87:
- case ComplexX87:
- if (getRecordArgABI(Ty, getCXXABI()) == CGCXXABI::RAA_Indirect)
- ++neededInt;
- return getIndirectResult(Ty, freeIntRegs);
-
- case SSEUp:
- case X87Up:
- llvm_unreachable("Invalid classification for lo word.");
-
- // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
- // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
- // and %r9 is used.
- case Integer:
- ++neededInt;
-
- // Pick an 8-byte type based on the preferred type.
- ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
-
- // If we have a sign or zero extended integer, make sure to return Extend
- // so that the parameter gets the right LLVM IR attributes.
- if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
-
- if (Ty->isIntegralOrEnumerationType() &&
- isPromotableIntegerTypeForABI(Ty))
- return ABIArgInfo::getExtend(Ty);
- }
-
- break;
-
- // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
- // available SSE register is used, the registers are taken in the
- // order from %xmm0 to %xmm7.
- case SSE: {
- llvm::Type *IRType = CGT.ConvertType(Ty);
- ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
- ++neededSSE;
- break;
- }
- }
-
- llvm::Type *HighPart = nullptr;
- switch (Hi) {
- // Memory was handled previously, ComplexX87 and X87 should
- // never occur as hi classes, and X87Up must be preceded by X87,
- // which is passed in memory.
- case Memory:
- case X87:
- case ComplexX87:
- llvm_unreachable("Invalid classification for hi word.");
-
- case NoClass: break;
-
- case Integer:
- ++neededInt;
- // Pick an 8-byte type based on the preferred type.
- HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
-
- if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
- return ABIArgInfo::getDirect(HighPart, 8);
- break;
-
- // X87Up generally doesn't occur here (long double is passed in
- // memory), except in situations involving unions.
- case X87Up:
- case SSE:
- HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
-
- if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
- return ABIArgInfo::getDirect(HighPart, 8);
-
- ++neededSSE;
- break;
-
- // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
- // eightbyte is passed in the upper half of the last used SSE
- // register. This only happens when 128-bit vectors are passed.
- case SSEUp:
- assert(Lo == SSE && "Unexpected SSEUp classification");
- ResType = GetByteVectorType(Ty);
- break;
- }
-
- // If a high part was specified, merge it together with the low part. It is
- // known to pass in the high eightbyte of the result. We do this by forming a
- // first class struct aggregate with the high and low part: {low, high}
- if (HighPart)
- ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
-
- return ABIArgInfo::getDirect(ResType);
-}
-
-ABIArgInfo
-X86_64ABIInfo::classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
- unsigned &NeededSSE) const {
- auto RT = Ty->getAs<RecordType>();
- assert(RT && "classifyRegCallStructType only valid with struct types");
-
- if (RT->getDecl()->hasFlexibleArrayMember())
- return getIndirectReturnResult(Ty);
-
- // Sum up bases
- if (auto CXXRD = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
- if (CXXRD->isDynamicClass()) {
- NeededInt = NeededSSE = 0;
- return getIndirectReturnResult(Ty);
- }
-
- for (const auto &I : CXXRD->bases())
- if (classifyRegCallStructTypeImpl(I.getType(), NeededInt, NeededSSE)
- .isIndirect()) {
- NeededInt = NeededSSE = 0;
- return getIndirectReturnResult(Ty);
- }
- }
-
- // Sum up members
- for (const auto *FD : RT->getDecl()->fields()) {
- if (FD->getType()->isRecordType() && !FD->getType()->isUnionType()) {
- if (classifyRegCallStructTypeImpl(FD->getType(), NeededInt, NeededSSE)
- .isIndirect()) {
- NeededInt = NeededSSE = 0;
- return getIndirectReturnResult(Ty);
- }
- } else {
- unsigned LocalNeededInt, LocalNeededSSE;
- if (classifyArgumentType(FD->getType(), UINT_MAX, LocalNeededInt,
- LocalNeededSSE, true)
- .isIndirect()) {
- NeededInt = NeededSSE = 0;
- return getIndirectReturnResult(Ty);
- }
- NeededInt += LocalNeededInt;
- NeededSSE += LocalNeededSSE;
- }
- }
-
- return ABIArgInfo::getDirect();
-}
-
-ABIArgInfo X86_64ABIInfo::classifyRegCallStructType(QualType Ty,
- unsigned &NeededInt,
- unsigned &NeededSSE) const {
-
- NeededInt = 0;
- NeededSSE = 0;
-
- return classifyRegCallStructTypeImpl(Ty, NeededInt, NeededSSE);
-}
-
-void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
-
- const unsigned CallingConv = FI.getCallingConvention();
- // It is possible to force Win64 calling convention on any x86_64 target by
- // using __attribute__((ms_abi)). In such case to correctly emit Win64
- // compatible code delegate this call to WinX86_64ABIInfo::computeInfo.
- if (CallingConv == llvm::CallingConv::Win64) {
- WinX86_64ABIInfo Win64ABIInfo(CGT, AVXLevel);
- Win64ABIInfo.computeInfo(FI);
- return;
- }
-
- bool IsRegCall = CallingConv == llvm::CallingConv::X86_RegCall;
-
- // Keep track of the number of assigned registers.
- unsigned FreeIntRegs = IsRegCall ? 11 : 6;
- unsigned FreeSSERegs = IsRegCall ? 16 : 8;
- unsigned NeededInt, NeededSSE;
-
- if (!::classifyReturnType(getCXXABI(), FI, *this)) {
- if (IsRegCall && FI.getReturnType()->getTypePtr()->isRecordType() &&
- !FI.getReturnType()->getTypePtr()->isUnionType()) {
- FI.getReturnInfo() =
- classifyRegCallStructType(FI.getReturnType(), NeededInt, NeededSSE);
- if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
- FreeIntRegs -= NeededInt;
- FreeSSERegs -= NeededSSE;
- } else {
- FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType());
- }
- } else if (IsRegCall && FI.getReturnType()->getAs<ComplexType>() &&
- getContext().getCanonicalType(FI.getReturnType()
- ->getAs<ComplexType>()
- ->getElementType()) ==
- getContext().LongDoubleTy)
- // Complex Long Double Type is passed in Memory when Regcall
- // calling convention is used.
- FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType());
- else
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
- }
-
- // If the return value is indirect, then the hidden argument is consuming one
- // integer register.
- if (FI.getReturnInfo().isIndirect())
- --FreeIntRegs;
-
- // The chain argument effectively gives us another free register.
- if (FI.isChainCall())
- ++FreeIntRegs;
-
- unsigned NumRequiredArgs = FI.getNumRequiredArgs();
- // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
- // get assigned (in left-to-right order) for passing as follows...
- unsigned ArgNo = 0;
- for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
- it != ie; ++it, ++ArgNo) {
- bool IsNamedArg = ArgNo < NumRequiredArgs;
-
- if (IsRegCall && it->type->isStructureOrClassType())
- it->info = classifyRegCallStructType(it->type, NeededInt, NeededSSE);
- else
- it->info = classifyArgumentType(it->type, FreeIntRegs, NeededInt,
- NeededSSE, IsNamedArg);
-
- // AMD64-ABI 3.2.3p3: If there are no registers available for any
- // eightbyte of an argument, the whole argument is passed on the
- // stack. If registers have already been assigned for some
- // eightbytes of such an argument, the assignments get reverted.
- if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
- FreeIntRegs -= NeededInt;
- FreeSSERegs -= NeededSSE;
- } else {
- it->info = getIndirectResult(it->type, FreeIntRegs);
- }
- }
-}
-
-static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF,
- Address VAListAddr, QualType Ty) {
- Address overflow_arg_area_p =
- CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p");
- llvm::Value *overflow_arg_area =
- CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
-
- // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
- // byte boundary if alignment needed by type exceeds 8 byte boundary.
- // It isn't stated explicitly in the standard, but in practice we use
- // alignment greater than 16 where necessary.
- CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
- if (Align > CharUnits::fromQuantity(8)) {
- overflow_arg_area = emitRoundPointerUpToAlignment(CGF, overflow_arg_area,
- Align);
- }
-
- // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
- llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
- llvm::Value *Res =
- CGF.Builder.CreateBitCast(overflow_arg_area,
- llvm::PointerType::getUnqual(LTy));
-
- // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
- // l->overflow_arg_area + sizeof(type).
- // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
- // an 8 byte boundary.
-
- uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
- llvm::Value *Offset =
- llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7);
- overflow_arg_area = CGF.Builder.CreateGEP(CGF.Int8Ty, overflow_arg_area,
- Offset, "overflow_arg_area.next");
- CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
-
- // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
- return Address(Res, Align);
-}
-
-Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
- // Assume that va_list type is correct; should be pointer to LLVM type:
- // struct {
- // i32 gp_offset;
- // i32 fp_offset;
- // i8* overflow_arg_area;
- // i8* reg_save_area;
- // };
- unsigned neededInt, neededSSE;
-
- Ty = getContext().getCanonicalType(Ty);
- ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE,
- /*isNamedArg*/false);
-
- // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
- // in the registers. If not go to step 7.
- if (!neededInt && !neededSSE)
- return EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty);
-
- // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
- // general purpose registers needed to pass type and num_fp to hold
- // the number of floating point registers needed.
-
- // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
- // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
- // l->fp_offset > 304 - num_fp * 16 go to step 7.
- //
- // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
- // register save space).
-
- llvm::Value *InRegs = nullptr;
- Address gp_offset_p = Address::invalid(), fp_offset_p = Address::invalid();
- llvm::Value *gp_offset = nullptr, *fp_offset = nullptr;
- if (neededInt) {
- gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p");
- gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
- InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8);
- InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp");
- }
-
- if (neededSSE) {
- fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p");
- fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
- llvm::Value *FitsInFP =
- llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16);
- FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp");
- InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
- }
-
- llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
- llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
- llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
- CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
-
- // Emit code to load the value if it was passed in registers.
-
- CGF.EmitBlock(InRegBlock);
-
- // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
- // an offset of l->gp_offset and/or l->fp_offset. This may require
- // copying to a temporary location in case the parameter is passed
- // in different register classes or requires an alignment greater
- // than 8 for general purpose registers and 16 for XMM registers.
- //
- // FIXME: This really results in shameful code when we end up needing to
- // collect arguments from different places; often what should result in a
- // simple assembling of a structure from scattered addresses has many more
- // loads than necessary. Can we clean this up?
- llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
- llvm::Value *RegSaveArea = CGF.Builder.CreateLoad(
- CGF.Builder.CreateStructGEP(VAListAddr, 3), "reg_save_area");
-
- Address RegAddr = Address::invalid();
- if (neededInt && neededSSE) {
- // FIXME: Cleanup.
- assert(AI.isDirect() && "Unexpected ABI info for mixed regs");
- llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
- Address Tmp = CGF.CreateMemTemp(Ty);
- Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST);
- assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
- llvm::Type *TyLo = ST->getElementType(0);
- llvm::Type *TyHi = ST->getElementType(1);
- assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
- "Unexpected ABI info for mixed regs");
- llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
- llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
- llvm::Value *GPAddr =
- CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, gp_offset);
- llvm::Value *FPAddr =
- CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, fp_offset);
- llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
- llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
-
- // Copy the first element.
- // FIXME: Our choice of alignment here and below is probably pessimistic.
- llvm::Value *V = CGF.Builder.CreateAlignedLoad(
- TyLo, CGF.Builder.CreateBitCast(RegLoAddr, PTyLo),
- CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(TyLo)));
- CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
-
- // Copy the second element.
- V = CGF.Builder.CreateAlignedLoad(
- TyHi, CGF.Builder.CreateBitCast(RegHiAddr, PTyHi),
- CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(TyHi)));
- CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
-
- RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy);
- } else if (neededInt) {
- RegAddr = Address(CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, gp_offset),
- CharUnits::fromQuantity(8));
- RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy);
-
- // Copy to a temporary if necessary to ensure the appropriate alignment.
- auto TInfo = getContext().getTypeInfoInChars(Ty);
- uint64_t TySize = TInfo.Width.getQuantity();
- CharUnits TyAlign = TInfo.Align;
-
- // Copy into a temporary if the type is more aligned than the
- // register save area.
- if (TyAlign.getQuantity() > 8) {
- Address Tmp = CGF.CreateMemTemp(Ty);
- CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, false);
- RegAddr = Tmp;
- }
-
- } else if (neededSSE == 1) {
- RegAddr = Address(CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, fp_offset),
- CharUnits::fromQuantity(16));
- RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy);
- } else {
- assert(neededSSE == 2 && "Invalid number of needed registers!");
- // SSE registers are spaced 16 bytes apart in the register save
- // area, we need to collect the two eightbytes together.
- // The ABI isn't explicit about this, but it seems reasonable
- // to assume that the slots are 16-byte aligned, since the stack is
- // naturally 16-byte aligned and the prologue is expected to store
- // all the SSE registers to the RSA.
- Address RegAddrLo = Address(CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea,
- fp_offset),
- CharUnits::fromQuantity(16));
- Address RegAddrHi =
- CGF.Builder.CreateConstInBoundsByteGEP(RegAddrLo,
- CharUnits::fromQuantity(16));
- llvm::Type *ST = AI.canHaveCoerceToType()
- ? AI.getCoerceToType()
- : llvm::StructType::get(CGF.DoubleTy, CGF.DoubleTy);
- llvm::Value *V;
- Address Tmp = CGF.CreateMemTemp(Ty);
- Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST);
- V = CGF.Builder.CreateLoad(CGF.Builder.CreateElementBitCast(
- RegAddrLo, ST->getStructElementType(0)));
- CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
- V = CGF.Builder.CreateLoad(CGF.Builder.CreateElementBitCast(
- RegAddrHi, ST->getStructElementType(1)));
- CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
-
- RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy);
- }
-
- // AMD64-ABI 3.5.7p5: Step 5. Set:
- // l->gp_offset = l->gp_offset + num_gp * 8
- // l->fp_offset = l->fp_offset + num_fp * 16.
- if (neededInt) {
- llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8);
- CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
- gp_offset_p);
- }
- if (neededSSE) {
- llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16);
- CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
- fp_offset_p);
- }
- CGF.EmitBranch(ContBlock);
-
- // Emit code to load the value if it was passed in memory.
-
- CGF.EmitBlock(InMemBlock);
- Address MemAddr = EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty);
-
- // Return the appropriate result.
-
- CGF.EmitBlock(ContBlock);
- Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, MemAddr, InMemBlock,
- "vaarg.addr");
- return ResAddr;
-}
-
-Address X86_64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
- // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
- // not 1, 2, 4, or 8 bytes, must be passed by reference."
- uint64_t Width = getContext().getTypeSize(Ty);
- bool IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
-
- return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
- CGF.getContext().getTypeInfoInChars(Ty),
- CharUnits::fromQuantity(8),
- /*allowHigherAlign*/ false);
-}
-
-ABIArgInfo WinX86_64ABIInfo::reclassifyHvaArgForVectorCall(
- QualType Ty, unsigned &FreeSSERegs, const ABIArgInfo &current) const {
- const Type *Base = nullptr;
- uint64_t NumElts = 0;
-
- if (!Ty->isBuiltinType() && !Ty->isVectorType() &&
- isHomogeneousAggregate(Ty, Base, NumElts) && FreeSSERegs >= NumElts) {
- FreeSSERegs -= NumElts;
- return getDirectX86Hva();
- }
- return current;
-}
-
-ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
- bool IsReturnType, bool IsVectorCall,
- bool IsRegCall) const {
-
- if (Ty->isVoidType())
- return ABIArgInfo::getIgnore();
-
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
-
- TypeInfo Info = getContext().getTypeInfo(Ty);
- uint64_t Width = Info.Width;
- CharUnits Align = getContext().toCharUnitsFromBits(Info.Align);
-
- const RecordType *RT = Ty->getAs<RecordType>();
- if (RT) {
- if (!IsReturnType) {
- if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()))
- return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
- }
-
- if (RT->getDecl()->hasFlexibleArrayMember())
- return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
-
- }
-
- const Type *Base = nullptr;
- uint64_t NumElts = 0;
- // vectorcall adds the concept of a homogenous vector aggregate, similar to
- // other targets.
- if ((IsVectorCall || IsRegCall) &&
- isHomogeneousAggregate(Ty, Base, NumElts)) {
- if (IsRegCall) {
- if (FreeSSERegs >= NumElts) {
- FreeSSERegs -= NumElts;
- if (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())
- return ABIArgInfo::getDirect();
- return ABIArgInfo::getExpand();
- }
- return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
- } else if (IsVectorCall) {
- if (FreeSSERegs >= NumElts &&
- (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())) {
- FreeSSERegs -= NumElts;
- return ABIArgInfo::getDirect();
- } else if (IsReturnType) {
- return ABIArgInfo::getExpand();
- } else if (!Ty->isBuiltinType() && !Ty->isVectorType()) {
- // HVAs are delayed and reclassified in the 2nd step.
- return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
- }
- }
- }
-
- if (Ty->isMemberPointerType()) {
- // If the member pointer is represented by an LLVM int or ptr, pass it
- // directly.
- llvm::Type *LLTy = CGT.ConvertType(Ty);
- if (LLTy->isPointerTy() || LLTy->isIntegerTy())
- return ABIArgInfo::getDirect();
- }
-
- if (RT || Ty->isAnyComplexType() || Ty->isMemberPointerType()) {
- // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
- // not 1, 2, 4, or 8 bytes, must be passed by reference."
- if (Width > 64 || !llvm::isPowerOf2_64(Width))
- return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
-
- // Otherwise, coerce it to a small integer.
- return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Width));
- }
-
- if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
- switch (BT->getKind()) {
- case BuiltinType::Bool:
- // Bool type is always extended to the ABI, other builtin types are not
- // extended.
- return ABIArgInfo::getExtend(Ty);
-
- case BuiltinType::LongDouble:
- // Mingw64 GCC uses the old 80 bit extended precision floating point
- // unit. It passes them indirectly through memory.
- if (IsMingw64) {
- const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
- if (LDF == &llvm::APFloat::x87DoubleExtended())
- return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
- }
- break;
-
- case BuiltinType::Int128:
- case BuiltinType::UInt128:
- // If it's a parameter type, the normal ABI rule is that arguments larger
- // than 8 bytes are passed indirectly. GCC follows it. We follow it too,
- // even though it isn't particularly efficient.
- if (!IsReturnType)
- return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
-
- // Mingw64 GCC returns i128 in XMM0. Coerce to v2i64 to handle that.
- // Clang matches them for compatibility.
- return ABIArgInfo::getDirect(llvm::FixedVectorType::get(
- llvm::Type::getInt64Ty(getVMContext()), 2));
-
- default:
- break;
- }
- }
-
- if (Ty->isExtIntType()) {
- // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
- // not 1, 2, 4, or 8 bytes, must be passed by reference."
- // However, non-power-of-two _ExtInts will be passed as 1,2,4 or 8 bytes
- // anyway as long is it fits in them, so we don't have to check the power of
- // 2.
- if (Width <= 64)
- return ABIArgInfo::getDirect();
- return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
- }
-
- return ABIArgInfo::getDirect();
-}
-
-void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
- const unsigned CC = FI.getCallingConvention();
- bool IsVectorCall = CC == llvm::CallingConv::X86_VectorCall;
- bool IsRegCall = CC == llvm::CallingConv::X86_RegCall;
-
- // If __attribute__((sysv_abi)) is in use, use the SysV argument
- // classification rules.
- if (CC == llvm::CallingConv::X86_64_SysV) {
- X86_64ABIInfo SysVABIInfo(CGT, AVXLevel);
- SysVABIInfo.computeInfo(FI);
- return;
- }
-
- unsigned FreeSSERegs = 0;
- if (IsVectorCall) {
- // We can use up to 4 SSE return registers with vectorcall.
- FreeSSERegs = 4;
- } else if (IsRegCall) {
- // RegCall gives us 16 SSE registers.
- FreeSSERegs = 16;
- }
-
- if (!getCXXABI().classifyReturnType(FI))
- FI.getReturnInfo() = classify(FI.getReturnType(), FreeSSERegs, true,
- IsVectorCall, IsRegCall);
-
- if (IsVectorCall) {
- // We can use up to 6 SSE register parameters with vectorcall.
- FreeSSERegs = 6;
- } else if (IsRegCall) {
- // RegCall gives us 16 SSE registers, we can reuse the return registers.
- FreeSSERegs = 16;
- }
-
- unsigned ArgNum = 0;
- unsigned ZeroSSERegs = 0;
- for (auto &I : FI.arguments()) {
- // Vectorcall in x64 only permits the first 6 arguments to be passed as
- // XMM/YMM registers. After the sixth argument, pretend no vector
- // registers are left.
- unsigned *MaybeFreeSSERegs =
- (IsVectorCall && ArgNum >= 6) ? &ZeroSSERegs : &FreeSSERegs;
- I.info =
- classify(I.type, *MaybeFreeSSERegs, false, IsVectorCall, IsRegCall);
- ++ArgNum;
- }
-
- if (IsVectorCall) {
- // For vectorcall, assign aggregate HVAs to any free vector registers in a
- // second pass.
- for (auto &I : FI.arguments())
- I.info = reclassifyHvaArgForVectorCall(I.type, FreeSSERegs, I.info);
- }
-}
-
-Address WinX86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
- // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
- // not 1, 2, 4, or 8 bytes, must be passed by reference."
- uint64_t Width = getContext().getTypeSize(Ty);
- bool IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
-
- return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
- CGF.getContext().getTypeInfoInChars(Ty),
- CharUnits::fromQuantity(8),
- /*allowHigherAlign*/ false);
-}
-
-static bool PPC_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
- llvm::Value *Address, bool Is64Bit,
- bool IsAIX) {
- // This is calculated from the LLVM and GCC tables and verified
- // against gcc output. AFAIK all PPC ABIs use the same encoding.
-
- CodeGen::CGBuilderTy &Builder = CGF.Builder;
-
- llvm::IntegerType *i8 = CGF.Int8Ty;
- llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
- llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
- llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
-
- // 0-31: r0-31, the 4-byte or 8-byte general-purpose registers
- AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 0, 31);
-
- // 32-63: fp0-31, the 8-byte floating-point registers
- AssignToArrayRange(Builder, Address, Eight8, 32, 63);
-
- // 64-67 are various 4-byte or 8-byte special-purpose registers:
- // 64: mq
- // 65: lr
- // 66: ctr
- // 67: ap
- AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 64, 67);
-
- // 68-76 are various 4-byte special-purpose registers:
- // 68-75 cr0-7
- // 76: xer
- AssignToArrayRange(Builder, Address, Four8, 68, 76);
-
- // 77-108: v0-31, the 16-byte vector registers
- AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
-
- // 109: vrsave
- // 110: vscr
- AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 109, 110);
-
- // AIX does not utilize the rest of the registers.
- if (IsAIX)
- return false;
-
- // 111: spe_acc
- // 112: spefscr
- // 113: sfp
- AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 111, 113);
-
- if (!Is64Bit)
- return false;
-
- // TODO: Need to verify if these registers are used on 64 bit AIX with Power8
- // or above CPU.
- // 64-bit only registers:
- // 114: tfhar
- // 115: tfiar
- // 116: texasr
- AssignToArrayRange(Builder, Address, Eight8, 114, 116);
-
- return false;
-}
-
-// AIX
-namespace {
-/// AIXABIInfo - The AIX XCOFF ABI information.
-class AIXABIInfo : public ABIInfo {
- const bool Is64Bit;
- const unsigned PtrByteSize;
- CharUnits getParamTypeAlignment(QualType Ty) const;
-
-public:
- AIXABIInfo(CodeGen::CodeGenTypes &CGT, bool Is64Bit)
- : ABIInfo(CGT), Is64Bit(Is64Bit), PtrByteSize(Is64Bit ? 8 : 4) {}
-
- bool isPromotableTypeForABI(QualType Ty) const;
-
- ABIArgInfo classifyReturnType(QualType RetTy) const;
- ABIArgInfo classifyArgumentType(QualType Ty) const;
-
- void computeInfo(CGFunctionInfo &FI) const override {
- if (!getCXXABI().classifyReturnType(FI))
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
-
- for (auto &I : FI.arguments())
- I.info = classifyArgumentType(I.type);
- }
-
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
-};
-
-class AIXTargetCodeGenInfo : public TargetCodeGenInfo {
- const bool Is64Bit;
-
-public:
- AIXTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool Is64Bit)
- : TargetCodeGenInfo(std::make_unique<AIXABIInfo>(CGT, Is64Bit)),
- Is64Bit(Is64Bit) {}
- int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
- return 1; // r1 is the dedicated stack pointer
- }
-
- bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
- llvm::Value *Address) const override;
-};
-} // namespace
-
-// Return true if the ABI requires Ty to be passed sign- or zero-
-// extended to 32/64 bits.
-bool AIXABIInfo::isPromotableTypeForABI(QualType Ty) const {
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
-
- // Promotable integer types are required to be promoted by the ABI.
- if (Ty->isPromotableIntegerType())
- return true;
-
- if (!Is64Bit)
- return false;
-
- // For 64 bit mode, in addition to the usual promotable integer types, we also
- // need to extend all 32-bit types, since the ABI requires promotion to 64
- // bits.
- if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
- switch (BT->getKind()) {
- case BuiltinType::Int:
- case BuiltinType::UInt:
- return true;
- default:
- break;
- }
-
- return false;
-}
-
-ABIArgInfo AIXABIInfo::classifyReturnType(QualType RetTy) const {
- if (RetTy->isAnyComplexType())
- return ABIArgInfo::getDirect();
-
- if (RetTy->isVectorType())
- return ABIArgInfo::getDirect();
-
- if (RetTy->isVoidType())
- return ABIArgInfo::getIgnore();
-
- if (isAggregateTypeForABI(RetTy))
- return getNaturalAlignIndirect(RetTy);
-
- return (isPromotableTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
- : ABIArgInfo::getDirect());
-}
-
-ABIArgInfo AIXABIInfo::classifyArgumentType(QualType Ty) const {
- Ty = useFirstFieldIfTransparentUnion(Ty);
-
- if (Ty->isAnyComplexType())
- return ABIArgInfo::getDirect();
-
- if (Ty->isVectorType())
- return ABIArgInfo::getDirect();
-
- if (isAggregateTypeForABI(Ty)) {
- // Records with non-trivial destructors/copy-constructors should not be
- // passed by value.
- if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
- return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
-
- CharUnits CCAlign = getParamTypeAlignment(Ty);
- CharUnits TyAlign = getContext().getTypeAlignInChars(Ty);
-
- return ABIArgInfo::getIndirect(CCAlign, /*ByVal*/ true,
- /*Realign*/ TyAlign > CCAlign);
- }
-
- return (isPromotableTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
- : ABIArgInfo::getDirect());
-}
-
-CharUnits AIXABIInfo::getParamTypeAlignment(QualType Ty) const {
- // Complex types are passed just like their elements.
- if (const ComplexType *CTy = Ty->getAs<ComplexType>())
- Ty = CTy->getElementType();
-
- if (Ty->isVectorType())
- return CharUnits::fromQuantity(16);
-
- // If the structure contains a vector type, the alignment is 16.
- if (isRecordWithSIMDVectorType(getContext(), Ty))
- return CharUnits::fromQuantity(16);
-
- return CharUnits::fromQuantity(PtrByteSize);
-}
-
-Address AIXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
- if (Ty->isAnyComplexType())
- llvm::report_fatal_error("complex type is not supported on AIX yet");
-
- auto TypeInfo = getContext().getTypeInfoInChars(Ty);
- TypeInfo.Align = getParamTypeAlignment(Ty);
-
- CharUnits SlotSize = CharUnits::fromQuantity(PtrByteSize);
-
- return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false, TypeInfo,
- SlotSize, /*AllowHigher*/ true);
-}
-
-bool AIXTargetCodeGenInfo::initDwarfEHRegSizeTable(
- CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const {
- return PPC_initDwarfEHRegSizeTable(CGF, Address, Is64Bit, /*IsAIX*/ true);
-}
-
-// PowerPC-32
-namespace {
-/// PPC32_SVR4_ABIInfo - The 32-bit PowerPC ELF (SVR4) ABI information.
-class PPC32_SVR4_ABIInfo : public DefaultABIInfo {
- bool IsSoftFloatABI;
- bool IsRetSmallStructInRegABI;
-
- CharUnits getParamTypeAlignment(QualType Ty) const;
-
-public:
- PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, bool SoftFloatABI,
- bool RetSmallStructInRegABI)
- : DefaultABIInfo(CGT), IsSoftFloatABI(SoftFloatABI),
- IsRetSmallStructInRegABI(RetSmallStructInRegABI) {}
-
- ABIArgInfo classifyReturnType(QualType RetTy) const;
-
- void computeInfo(CGFunctionInfo &FI) const override {
- if (!getCXXABI().classifyReturnType(FI))
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
- for (auto &I : FI.arguments())
- I.info = classifyArgumentType(I.type);
- }
-
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
-};
-
-class PPC32TargetCodeGenInfo : public TargetCodeGenInfo {
-public:
- PPC32TargetCodeGenInfo(CodeGenTypes &CGT, bool SoftFloatABI,
- bool RetSmallStructInRegABI)
- : TargetCodeGenInfo(std::make_unique<PPC32_SVR4_ABIInfo>(
- CGT, SoftFloatABI, RetSmallStructInRegABI)) {}
-
- static bool isStructReturnInRegABI(const llvm::Triple &Triple,
- const CodeGenOptions &Opts);
-
- int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
- // This is recovered from gcc output.
- return 1; // r1 is the dedicated stack pointer
- }
-
- bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
- llvm::Value *Address) const override;
-};
-}
-
-CharUnits PPC32_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
- // Complex types are passed just like their elements.
- if (const ComplexType *CTy = Ty->getAs<ComplexType>())
- Ty = CTy->getElementType();
-
- if (Ty->isVectorType())
- return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16
- : 4);
-
- // For single-element float/vector structs, we consider the whole type
- // to have the same alignment requirements as its single element.
- const Type *AlignTy = nullptr;
- if (const Type *EltType = isSingleElementStruct(Ty, getContext())) {
- const BuiltinType *BT = EltType->getAs<BuiltinType>();
- if ((EltType->isVectorType() && getContext().getTypeSize(EltType) == 128) ||
- (BT && BT->isFloatingPoint()))
- AlignTy = EltType;
- }
-
- if (AlignTy)
- return CharUnits::fromQuantity(AlignTy->isVectorType() ? 16 : 4);
- return CharUnits::fromQuantity(4);
-}
-
-ABIArgInfo PPC32_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
- uint64_t Size;
-
- // -msvr4-struct-return puts small aggregates in GPR3 and GPR4.
- if (isAggregateTypeForABI(RetTy) && IsRetSmallStructInRegABI &&
- (Size = getContext().getTypeSize(RetTy)) <= 64) {
- // System V ABI (1995), page 3-22, specified:
- // > A structure or union whose size is less than or equal to 8 bytes
- // > shall be returned in r3 and r4, as if it were first stored in the
- // > 8-byte aligned memory area and then the low addressed word were
- // > loaded into r3 and the high-addressed word into r4. Bits beyond
- // > the last member of the structure or union are not defined.
- //
- // GCC for big-endian PPC32 inserts the pad before the first member,
- // not "beyond the last member" of the struct. To stay compatible
- // with GCC, we coerce the struct to an integer of the same size.
- // LLVM will extend it and return i32 in r3, or i64 in r3:r4.
- if (Size == 0)
- return ABIArgInfo::getIgnore();
- else {
- llvm::Type *CoerceTy = llvm::Type::getIntNTy(getVMContext(), Size);
- return ABIArgInfo::getDirect(CoerceTy);
- }
- }
-
- return DefaultABIInfo::classifyReturnType(RetTy);
-}
-
-// TODO: this implementation is now likely redundant with
-// DefaultABIInfo::EmitVAArg.
-Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
- QualType Ty) const {
- if (getTarget().getTriple().isOSDarwin()) {
- auto TI = getContext().getTypeInfoInChars(Ty);
- TI.Align = getParamTypeAlignment(Ty);
-
- CharUnits SlotSize = CharUnits::fromQuantity(4);
- return emitVoidPtrVAArg(CGF, VAList, Ty,
- classifyArgumentType(Ty).isIndirect(), TI, SlotSize,
- /*AllowHigherAlign=*/true);
- }
-
- const unsigned OverflowLimit = 8;
- if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
- // TODO: Implement this. For now ignore.
- (void)CTy;
- return Address::invalid(); // FIXME?
- }
-
- // struct __va_list_tag {
- // unsigned char gpr;
- // unsigned char fpr;
- // unsigned short reserved;
- // void *overflow_arg_area;
- // void *reg_save_area;
- // };
-
- bool isI64 = Ty->isIntegerType() && getContext().getTypeSize(Ty) == 64;
- bool isInt = !Ty->isFloatingType();
- bool isF64 = Ty->isFloatingType() && getContext().getTypeSize(Ty) == 64;
-
- // All aggregates are passed indirectly? That doesn't seem consistent
- // with the argument-lowering code.
- bool isIndirect = isAggregateTypeForABI(Ty);
-
- CGBuilderTy &Builder = CGF.Builder;
-
- // The calling convention either uses 1-2 GPRs or 1 FPR.
- Address NumRegsAddr = Address::invalid();
- if (isInt || IsSoftFloatABI) {
- NumRegsAddr = Builder.CreateStructGEP(VAList, 0, "gpr");
- } else {
- NumRegsAddr = Builder.CreateStructGEP(VAList, 1, "fpr");
- }
-
- llvm::Value *NumRegs = Builder.CreateLoad(NumRegsAddr, "numUsedRegs");
-
- // "Align" the register count when TY is i64.
- if (isI64 || (isF64 && IsSoftFloatABI)) {
- NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(1));
- NumRegs = Builder.CreateAnd(NumRegs, Builder.getInt8((uint8_t) ~1U));
- }
-
- llvm::Value *CC =
- Builder.CreateICmpULT(NumRegs, Builder.getInt8(OverflowLimit), "cond");
-
- llvm::BasicBlock *UsingRegs = CGF.createBasicBlock("using_regs");
- llvm::BasicBlock *UsingOverflow = CGF.createBasicBlock("using_overflow");
- llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
-
- Builder.CreateCondBr(CC, UsingRegs, UsingOverflow);
-
- llvm::Type *DirectTy = CGF.ConvertType(Ty);
- if (isIndirect) DirectTy = DirectTy->getPointerTo(0);
-
- // Case 1: consume registers.
- Address RegAddr = Address::invalid();
- {
- CGF.EmitBlock(UsingRegs);
-
- Address RegSaveAreaPtr = Builder.CreateStructGEP(VAList, 4);
- RegAddr = Address(Builder.CreateLoad(RegSaveAreaPtr),
- CharUnits::fromQuantity(8));
- assert(RegAddr.getElementType() == CGF.Int8Ty);
-
- // Floating-point registers start after the general-purpose registers.
- if (!(isInt || IsSoftFloatABI)) {
- RegAddr = Builder.CreateConstInBoundsByteGEP(RegAddr,
- CharUnits::fromQuantity(32));
- }
-
- // Get the address of the saved value by scaling the number of
- // registers we've used by the number of
- CharUnits RegSize = CharUnits::fromQuantity((isInt || IsSoftFloatABI) ? 4 : 8);
- llvm::Value *RegOffset =
- Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.getQuantity()));
- RegAddr = Address(Builder.CreateInBoundsGEP(CGF.Int8Ty,
- RegAddr.getPointer(), RegOffset),
- RegAddr.getAlignment().alignmentOfArrayElement(RegSize));
- RegAddr = Builder.CreateElementBitCast(RegAddr, DirectTy);
-
- // Increase the used-register count.
- NumRegs =
- Builder.CreateAdd(NumRegs,
- Builder.getInt8((isI64 || (isF64 && IsSoftFloatABI)) ? 2 : 1));
- Builder.CreateStore(NumRegs, NumRegsAddr);
-
- CGF.EmitBranch(Cont);
- }
-
- // Case 2: consume space in the overflow area.
- Address MemAddr = Address::invalid();
- {
- CGF.EmitBlock(UsingOverflow);
-
- Builder.CreateStore(Builder.getInt8(OverflowLimit), NumRegsAddr);
-
- // Everything in the overflow area is rounded up to a size of at least 4.
- CharUnits OverflowAreaAlign = CharUnits::fromQuantity(4);
-
- CharUnits Size;
- if (!isIndirect) {
- auto TypeInfo = CGF.getContext().getTypeInfoInChars(Ty);
- Size = TypeInfo.Width.alignTo(OverflowAreaAlign);
- } else {
- Size = CGF.getPointerSize();
- }
-
- Address OverflowAreaAddr = Builder.CreateStructGEP(VAList, 3);
- Address OverflowArea(Builder.CreateLoad(OverflowAreaAddr, "argp.cur"),
- OverflowAreaAlign);
- // Round up address of argument to alignment
- CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
- if (Align > OverflowAreaAlign) {
- llvm::Value *Ptr = OverflowArea.getPointer();
- OverflowArea = Address(emitRoundPointerUpToAlignment(CGF, Ptr, Align),
- Align);
- }
-
- MemAddr = Builder.CreateElementBitCast(OverflowArea, DirectTy);
-
- // Increase the overflow area.
- OverflowArea = Builder.CreateConstInBoundsByteGEP(OverflowArea, Size);
- Builder.CreateStore(OverflowArea.getPointer(), OverflowAreaAddr);
- CGF.EmitBranch(Cont);
- }
-
- CGF.EmitBlock(Cont);
-
- // Merge the cases with a phi.
- Address Result = emitMergePHI(CGF, RegAddr, UsingRegs, MemAddr, UsingOverflow,
- "vaarg.addr");
-
- // Load the pointer if the argument was passed indirectly.
- if (isIndirect) {
- Result = Address(Builder.CreateLoad(Result, "aggr"),
- getContext().getTypeAlignInChars(Ty));
- }
-
- return Result;
-}
-
-bool PPC32TargetCodeGenInfo::isStructReturnInRegABI(
- const llvm::Triple &Triple, const CodeGenOptions &Opts) {
- assert(Triple.isPPC32());
-
- switch (Opts.getStructReturnConvention()) {
- case CodeGenOptions::SRCK_Default:
- break;
- case CodeGenOptions::SRCK_OnStack: // -maix-struct-return
- return false;
- case CodeGenOptions::SRCK_InRegs: // -msvr4-struct-return
- return true;
- }
-
- if (Triple.isOSBinFormatELF() && !Triple.isOSLinux())
- return true;
-
- return false;
-}
-
-bool
-PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
- llvm::Value *Address) const {
- return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ false,
- /*IsAIX*/ false);
-}
-
-// PowerPC-64
-
-namespace {
-/// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information.
-class PPC64_SVR4_ABIInfo : public SwiftABIInfo {
-public:
- enum ABIKind {
- ELFv1 = 0,
- ELFv2
- };
-
-private:
- static const unsigned GPRBits = 64;
- ABIKind Kind;
- bool IsSoftFloatABI;
-
-public:
- PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind,
- bool SoftFloatABI)
- : SwiftABIInfo(CGT), Kind(Kind), IsSoftFloatABI(SoftFloatABI) {}
-
- bool isPromotableTypeForABI(QualType Ty) const;
- CharUnits getParamTypeAlignment(QualType Ty) const;
-
- ABIArgInfo classifyReturnType(QualType RetTy) const;
- ABIArgInfo classifyArgumentType(QualType Ty) const;
-
- bool isHomogeneousAggregateBaseType(QualType Ty) const override;
- bool isHomogeneousAggregateSmallEnough(const Type *Ty,
- uint64_t Members) const override;
-
- // TODO: We can add more logic to computeInfo to improve performance.
- // Example: For aggregate arguments that fit in a register, we could
- // use getDirectInReg (as is done below for structs containing a single
- // floating-point value) to avoid pushing them to memory on function
- // entry. This would require changing the logic in PPCISelLowering
- // when lowering the parameters in the caller and args in the callee.
- void computeInfo(CGFunctionInfo &FI) const override {
- if (!getCXXABI().classifyReturnType(FI))
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
- for (auto &I : FI.arguments()) {
- // We rely on the default argument classification for the most part.
- // One exception: An aggregate containing a single floating-point
- // or vector item must be passed in a register if one is available.
- const Type *T = isSingleElementStruct(I.type, getContext());
- if (T) {
- const BuiltinType *BT = T->getAs<BuiltinType>();
- if ((T->isVectorType() && getContext().getTypeSize(T) == 128) ||
- (BT && BT->isFloatingPoint())) {
- QualType QT(T, 0);
- I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT));
- continue;
- }
- }
- I.info = classifyArgumentType(I.type);
- }
- }
-
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
-
- bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
- bool asReturnValue) const override {
- return occupiesMoreThan(CGT, scalars, /*total*/ 4);
- }
-
- bool isSwiftErrorInRegister() const override {
- return false;
- }
-};
-
-class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo {
-
-public:
- PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT,
- PPC64_SVR4_ABIInfo::ABIKind Kind,
- bool SoftFloatABI)
- : TargetCodeGenInfo(
- std::make_unique<PPC64_SVR4_ABIInfo>(CGT, Kind, SoftFloatABI)) {}
-
- int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
- // This is recovered from gcc output.
- return 1; // r1 is the dedicated stack pointer
- }
-
- bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
- llvm::Value *Address) const override;
-};
-
-class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
-public:
- PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
-
- int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
- // This is recovered from gcc output.
- return 1; // r1 is the dedicated stack pointer
- }
-
- bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
- llvm::Value *Address) const override;
-};
-
-}
-
-// Return true if the ABI requires Ty to be passed sign- or zero-
-// extended to 64 bits.
-bool
-PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const {
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
-
- // Promotable integer types are required to be promoted by the ABI.
- if (isPromotableIntegerTypeForABI(Ty))
- return true;
-
- // In addition to the usual promotable integer types, we also need to
- // extend all 32-bit types, since the ABI requires promotion to 64 bits.
- if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
- switch (BT->getKind()) {
- case BuiltinType::Int:
- case BuiltinType::UInt:
- return true;
- default:
- break;
- }
-
- if (const auto *EIT = Ty->getAs<ExtIntType>())
- if (EIT->getNumBits() < 64)
- return true;
-
- return false;
-}
-
-/// isAlignedParamType - Determine whether a type requires 16-byte or
-/// higher alignment in the parameter area. Always returns at least 8.
-CharUnits PPC64_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
- // Complex types are passed just like their elements.
- if (const ComplexType *CTy = Ty->getAs<ComplexType>())
- Ty = CTy->getElementType();
-
- // Only vector types of size 16 bytes need alignment (larger types are
- // passed via reference, smaller types are not aligned).
- if (Ty->isVectorType()) {
- return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16 : 8);
- } else if (Ty->isRealFloatingType() &&
- &getContext().getFloatTypeSemantics(Ty) ==
- &llvm::APFloat::IEEEquad()) {
- // According to ABI document section 'Optional Save Areas': If extended
- // precision floating-point values in IEEE BINARY 128 QUADRUPLE PRECISION
- // format are supported, map them to a single quadword, quadword aligned.
- return CharUnits::fromQuantity(16);
- }
-
- // For single-element float/vector structs, we consider the whole type
- // to have the same alignment requirements as its single element.
- const Type *AlignAsType = nullptr;
- const Type *EltType = isSingleElementStruct(Ty, getContext());
- if (EltType) {
- const BuiltinType *BT = EltType->getAs<BuiltinType>();
- if ((EltType->isVectorType() && getContext().getTypeSize(EltType) == 128) ||
- (BT && BT->isFloatingPoint()))
- AlignAsType = EltType;
- }
-
- // Likewise for ELFv2 homogeneous aggregates.
- const Type *Base = nullptr;
- uint64_t Members = 0;
- if (!AlignAsType && Kind == ELFv2 &&
- isAggregateTypeForABI(Ty) && isHomogeneousAggregate(Ty, Base, Members))
- AlignAsType = Base;
-
- // With special case aggregates, only vector base types need alignment.
- if (AlignAsType) {
- return CharUnits::fromQuantity(AlignAsType->isVectorType() ? 16 : 8);
- }
-
- // Otherwise, we only need alignment for any aggregate type that
- // has an alignment requirement of >= 16 bytes.
- if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128) {
- return CharUnits::fromQuantity(16);
- }
-
- return CharUnits::fromQuantity(8);
-}
-
-/// isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous
-/// aggregate. Base is set to the base element type, and Members is set
-/// to the number of base elements.
-bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base,
- uint64_t &Members) const {
- if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
- uint64_t NElements = AT->getSize().getZExtValue();
- if (NElements == 0)
- return false;
- if (!isHomogeneousAggregate(AT->getElementType(), Base, Members))
- return false;
- Members *= NElements;
- } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
- if (RD->hasFlexibleArrayMember())
- return false;
-
- Members = 0;
-
- // If this is a C++ record, check the properties of the record such as
- // bases and ABI specific restrictions
- if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
- if (!getCXXABI().isPermittedToBeHomogeneousAggregate(CXXRD))
- return false;
-
- for (const auto &I : CXXRD->bases()) {
- // Ignore empty records.
- if (isEmptyRecord(getContext(), I.getType(), true))
- continue;
-
- uint64_t FldMembers;
- if (!isHomogeneousAggregate(I.getType(), Base, FldMembers))
- return false;
-
- Members += FldMembers;
- }
- }
-
- for (const auto *FD : RD->fields()) {
- // Ignore (non-zero arrays of) empty records.
- QualType FT = FD->getType();
- while (const ConstantArrayType *AT =
- getContext().getAsConstantArrayType(FT)) {
- if (AT->getSize().getZExtValue() == 0)
- return false;
- FT = AT->getElementType();
- }
- if (isEmptyRecord(getContext(), FT, true))
- continue;
-
- // For compatibility with GCC, ignore empty bitfields in C++ mode.
- if (getContext().getLangOpts().CPlusPlus &&
- FD->isZeroLengthBitField(getContext()))
- continue;
-
- uint64_t FldMembers;
- if (!isHomogeneousAggregate(FD->getType(), Base, FldMembers))
- return false;
-
- Members = (RD->isUnion() ?
- std::max(Members, FldMembers) : Members + FldMembers);
- }
-
- if (!Base)
- return false;
-
- // Ensure there is no padding.
- if (getContext().getTypeSize(Base) * Members !=
- getContext().getTypeSize(Ty))
- return false;
- } else {
- Members = 1;
- if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
- Members = 2;
- Ty = CT->getElementType();
- }
-
- // Most ABIs only support float, double, and some vector type widths.
- if (!isHomogeneousAggregateBaseType(Ty))
- return false;
-
- // The base type must be the same for all members. Types that
- // agree in both total size and mode (float vs. vector) are
- // treated as being equivalent here.
- const Type *TyPtr = Ty.getTypePtr();
- if (!Base) {
- Base = TyPtr;
- // If it's a non-power-of-2 vector, its size is already a power-of-2,
- // so make sure to widen it explicitly.
- if (const VectorType *VT = Base->getAs<VectorType>()) {
- QualType EltTy = VT->getElementType();
- unsigned NumElements =
- getContext().getTypeSize(VT) / getContext().getTypeSize(EltTy);
- Base = getContext()
- .getVectorType(EltTy, NumElements, VT->getVectorKind())
- .getTypePtr();
- }
- }
-
- if (Base->isVectorType() != TyPtr->isVectorType() ||
- getContext().getTypeSize(Base) != getContext().getTypeSize(TyPtr))
- return false;
- }
- return Members > 0 && isHomogeneousAggregateSmallEnough(Base, Members);
-}
-
-bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
- // Homogeneous aggregates for ELFv2 must have base types of float,
- // double, long double, or 128-bit vectors.
- if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
- if (BT->getKind() == BuiltinType::Float ||
- BT->getKind() == BuiltinType::Double ||
- BT->getKind() == BuiltinType::LongDouble ||
- (getContext().getTargetInfo().hasFloat128Type() &&
- (BT->getKind() == BuiltinType::Float128))) {
- if (IsSoftFloatABI)
- return false;
- return true;
- }
- }
- if (const VectorType *VT = Ty->getAs<VectorType>()) {
- if (getContext().getTypeSize(VT) == 128)
- return true;
- }
- return false;
-}
-
-bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough(
- const Type *Base, uint64_t Members) const {
- // Vector and fp128 types require one register, other floating point types
- // require one or two registers depending on their size.
- uint32_t NumRegs =
- ((getContext().getTargetInfo().hasFloat128Type() &&
- Base->isFloat128Type()) ||
- Base->isVectorType()) ? 1
- : (getContext().getTypeSize(Base) + 63) / 64;
-
- // Homogeneous Aggregates may occupy at most 8 registers.
- return Members * NumRegs <= 8;
-}
-
-ABIArgInfo
-PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
- Ty = useFirstFieldIfTransparentUnion(Ty);
-
- if (Ty->isAnyComplexType())
- return ABIArgInfo::getDirect();
-
- // Non-Altivec vector types are passed in GPRs (smaller than 16 bytes)
- // or via reference (larger than 16 bytes).
- if (Ty->isVectorType()) {
- uint64_t Size = getContext().getTypeSize(Ty);
- if (Size > 128)
- return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
- else if (Size < 128) {
- llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
- return ABIArgInfo::getDirect(CoerceTy);
- }
- }
-
- if (const auto *EIT = Ty->getAs<ExtIntType>())
- if (EIT->getNumBits() > 128)
- return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
-
- if (isAggregateTypeForABI(Ty)) {
- if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
- return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
-
- uint64_t ABIAlign = getParamTypeAlignment(Ty).getQuantity();
- uint64_t TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity();
-
- // ELFv2 homogeneous aggregates are passed as array types.
- const Type *Base = nullptr;
- uint64_t Members = 0;
- if (Kind == ELFv2 &&
- isHomogeneousAggregate(Ty, Base, Members)) {
- llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
- llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
- return ABIArgInfo::getDirect(CoerceTy);
- }
-
- // If an aggregate may end up fully in registers, we do not
- // use the ByVal method, but pass the aggregate as array.
- // This is usually beneficial since we avoid forcing the
- // back-end to store the argument to memory.
- uint64_t Bits = getContext().getTypeSize(Ty);
- if (Bits > 0 && Bits <= 8 * GPRBits) {
- llvm::Type *CoerceTy;
-
- // Types up to 8 bytes are passed as integer type (which will be
- // properly aligned in the argument save area doubleword).
- if (Bits <= GPRBits)
- CoerceTy =
- llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
- // Larger types are passed as arrays, with the base type selected
- // according to the required alignment in the save area.
- else {
- uint64_t RegBits = ABIAlign * 8;
- uint64_t NumRegs = llvm::alignTo(Bits, RegBits) / RegBits;
- llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits);
- CoerceTy = llvm::ArrayType::get(RegTy, NumRegs);
- }
-
- return ABIArgInfo::getDirect(CoerceTy);
- }
-
- // All other aggregates are passed ByVal.
- return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign),
- /*ByVal=*/true,
- /*Realign=*/TyAlign > ABIAlign);
- }
-
- return (isPromotableTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
- : ABIArgInfo::getDirect());
-}
-
-ABIArgInfo
-PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
- if (RetTy->isVoidType())
- return ABIArgInfo::getIgnore();
-
- if (RetTy->isAnyComplexType())
- return ABIArgInfo::getDirect();
-
- // Non-Altivec vector types are returned in GPRs (smaller than 16 bytes)
- // or via reference (larger than 16 bytes).
- if (RetTy->isVectorType()) {
- uint64_t Size = getContext().getTypeSize(RetTy);
- if (Size > 128)
- return getNaturalAlignIndirect(RetTy);
- else if (Size < 128) {
- llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
- return ABIArgInfo::getDirect(CoerceTy);
- }
- }
-
- if (const auto *EIT = RetTy->getAs<ExtIntType>())
- if (EIT->getNumBits() > 128)
- return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
-
- if (isAggregateTypeForABI(RetTy)) {
- // ELFv2 homogeneous aggregates are returned as array types.
- const Type *Base = nullptr;
- uint64_t Members = 0;
- if (Kind == ELFv2 &&
- isHomogeneousAggregate(RetTy, Base, Members)) {
- llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
- llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
- return ABIArgInfo::getDirect(CoerceTy);
- }
-
- // ELFv2 small aggregates are returned in up to two registers.
- uint64_t Bits = getContext().getTypeSize(RetTy);
- if (Kind == ELFv2 && Bits <= 2 * GPRBits) {
- if (Bits == 0)
- return ABIArgInfo::getIgnore();
-
- llvm::Type *CoerceTy;
- if (Bits > GPRBits) {
- CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits);
- CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy);
- } else
- CoerceTy =
- llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
- return ABIArgInfo::getDirect(CoerceTy);
- }
-
- // All other aggregates are returned indirectly.
- return getNaturalAlignIndirect(RetTy);
- }
-
- return (isPromotableTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
- : ABIArgInfo::getDirect());
-}
-
-// Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine.
-Address PPC64_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
- auto TypeInfo = getContext().getTypeInfoInChars(Ty);
- TypeInfo.Align = getParamTypeAlignment(Ty);
-
- CharUnits SlotSize = CharUnits::fromQuantity(8);
-
- // If we have a complex type and the base type is smaller than 8 bytes,
- // the ABI calls for the real and imaginary parts to be right-adjusted
- // in separate doublewords. However, Clang expects us to produce a
- // pointer to a structure with the two parts packed tightly. So generate
- // loads of the real and imaginary parts relative to the va_list pointer,
- // and store them to a temporary structure.
- if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
- CharUnits EltSize = TypeInfo.Width / 2;
- if (EltSize < SlotSize) {
- Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, CGF.Int8Ty,
- SlotSize * 2, SlotSize,
- SlotSize, /*AllowHigher*/ true);
-
- Address RealAddr = Addr;
- Address ImagAddr = RealAddr;
- if (CGF.CGM.getDataLayout().isBigEndian()) {
- RealAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr,
- SlotSize - EltSize);
- ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(ImagAddr,
- 2 * SlotSize - EltSize);
- } else {
- ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, SlotSize);
- }
-
- llvm::Type *EltTy = CGF.ConvertTypeForMem(CTy->getElementType());
- RealAddr = CGF.Builder.CreateElementBitCast(RealAddr, EltTy);
- ImagAddr = CGF.Builder.CreateElementBitCast(ImagAddr, EltTy);
- llvm::Value *Real = CGF.Builder.CreateLoad(RealAddr, ".vareal");
- llvm::Value *Imag = CGF.Builder.CreateLoad(ImagAddr, ".vaimag");
-
- Address Temp = CGF.CreateMemTemp(Ty, "vacplx");
- CGF.EmitStoreOfComplex({Real, Imag}, CGF.MakeAddrLValue(Temp, Ty),
- /*init*/ true);
- return Temp;
- }
- }
-
- // Otherwise, just use the general rule.
- return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false,
- TypeInfo, SlotSize, /*AllowHigher*/ true);
-}
-
-bool
-PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
- CodeGen::CodeGenFunction &CGF,
- llvm::Value *Address) const {
- return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ true,
- /*IsAIX*/ false);
-}
-
-bool
-PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
- llvm::Value *Address) const {
- return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ true,
- /*IsAIX*/ false);
-}
-
-//===----------------------------------------------------------------------===//
-// AArch64 ABI Implementation
-//===----------------------------------------------------------------------===//
-
-namespace {
-
-class AArch64ABIInfo : public SwiftABIInfo {
-public:
- enum ABIKind {
- AAPCS = 0,
- DarwinPCS,
- Win64
- };
-
-private:
- ABIKind Kind;
-
-public:
- AArch64ABIInfo(CodeGenTypes &CGT, ABIKind Kind)
- : SwiftABIInfo(CGT), Kind(Kind) {}
-
-private:
- ABIKind getABIKind() const { return Kind; }
- bool isDarwinPCS() const { return Kind == DarwinPCS; }
-
- ABIArgInfo classifyReturnType(QualType RetTy, bool IsVariadic) const;
- ABIArgInfo classifyArgumentType(QualType RetTy, bool IsVariadic,
- unsigned CallingConvention) const;
- ABIArgInfo coerceIllegalVector(QualType Ty) const;
- bool isHomogeneousAggregateBaseType(QualType Ty) const override;
- bool isHomogeneousAggregateSmallEnough(const Type *Ty,
- uint64_t Members) const override;
-
- bool isIllegalVectorType(QualType Ty) const;
-
- void computeInfo(CGFunctionInfo &FI) const override {
- if (!::classifyReturnType(getCXXABI(), FI, *this))
- FI.getReturnInfo() =
- classifyReturnType(FI.getReturnType(), FI.isVariadic());
-
- for (auto &it : FI.arguments())
- it.info = classifyArgumentType(it.type, FI.isVariadic(),
- FI.getCallingConvention());
- }
-
- Address EmitDarwinVAArg(Address VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const;
-
- Address EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const;
-
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override {
- llvm::Type *BaseTy = CGF.ConvertType(Ty);
- if (isa<llvm::ScalableVectorType>(BaseTy))
- llvm::report_fatal_error("Passing SVE types to variadic functions is "
- "currently not supported");
-
- return Kind == Win64 ? EmitMSVAArg(CGF, VAListAddr, Ty)
- : isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
- : EmitAAPCSVAArg(VAListAddr, Ty, CGF);
- }
-
- Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
-
- bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
- bool asReturnValue) const override {
- return occupiesMoreThan(CGT, scalars, /*total*/ 4);
- }
- bool isSwiftErrorInRegister() const override {
- return true;
- }
-
- bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy,
- unsigned elts) const override;
-
- bool allowBFloatArgsAndRet() const override {
- return getTarget().hasBFloat16Type();
- }
-};
-
-class AArch64TargetCodeGenInfo : public TargetCodeGenInfo {
-public:
- AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind Kind)
- : TargetCodeGenInfo(std::make_unique<AArch64ABIInfo>(CGT, Kind)) {}
-
- StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
- return "mov\tfp, fp\t\t// marker for objc_retainAutoreleaseReturnValue";
- }
-
- int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
- return 31;
- }
-
- bool doesReturnSlotInterfereWithArgs() const override { return false; }
-
- void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &CGM) const override {
- const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
- if (!FD)
- return;
-
- const auto *TA = FD->getAttr<TargetAttr>();
- if (TA == nullptr)
- return;
-
- ParsedTargetAttr Attr = TA->parse();
- if (Attr.BranchProtection.empty())
- return;
-
- TargetInfo::BranchProtectionInfo BPI;
- StringRef Error;
- (void)CGM.getTarget().validateBranchProtection(Attr.BranchProtection,
- BPI, Error);
- assert(Error.empty());
-
- auto *Fn = cast<llvm::Function>(GV);
- static const char *SignReturnAddrStr[] = {"none", "non-leaf", "all"};
- Fn->addFnAttr("sign-return-address", SignReturnAddrStr[static_cast<int>(BPI.SignReturnAddr)]);
-
- if (BPI.SignReturnAddr != LangOptions::SignReturnAddressScopeKind::None) {
- Fn->addFnAttr("sign-return-address-key",
- BPI.SignKey == LangOptions::SignReturnAddressKeyKind::AKey
- ? "a_key"
- : "b_key");
- }
-
- Fn->addFnAttr("branch-target-enforcement",
- BPI.BranchTargetEnforcement ? "true" : "false");
- }
-
- bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF,
- llvm::Type *Ty) const override {
- if (CGF.getTarget().hasFeature("ls64")) {
- auto *ST = dyn_cast<llvm::StructType>(Ty);
- if (ST && ST->getNumElements() == 1) {
- auto *AT = dyn_cast<llvm::ArrayType>(ST->getElementType(0));
- if (AT && AT->getNumElements() == 8 &&
- AT->getElementType()->isIntegerTy(64))
- return true;
- }
- }
- return TargetCodeGenInfo::isScalarizableAsmOperand(CGF, Ty);
- }
-};
-
-class WindowsAArch64TargetCodeGenInfo : public AArch64TargetCodeGenInfo {
-public:
- WindowsAArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind K)
- : AArch64TargetCodeGenInfo(CGT, K) {}
-
- void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &CGM) const override;
-
- void getDependentLibraryOption(llvm::StringRef Lib,
- llvm::SmallString<24> &Opt) const override {
- Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
- }
-
- void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value,
- llvm::SmallString<32> &Opt) const override {
- Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
- }
-};
-
-void WindowsAArch64TargetCodeGenInfo::setTargetAttributes(
- const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
- AArch64TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
- if (GV->isDeclaration())
- return;
- addStackProbeTargetAttributes(D, GV, CGM);
-}
-}
-
-ABIArgInfo AArch64ABIInfo::coerceIllegalVector(QualType Ty) const {
- assert(Ty->isVectorType() && "expected vector type!");
-
- const auto *VT = Ty->castAs<VectorType>();
- if (VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector) {
- assert(VT->getElementType()->isBuiltinType() && "expected builtin type!");
- assert(VT->getElementType()->castAs<BuiltinType>()->getKind() ==
- BuiltinType::UChar &&
- "unexpected builtin type for SVE predicate!");
- return ABIArgInfo::getDirect(llvm::ScalableVectorType::get(
- llvm::Type::getInt1Ty(getVMContext()), 16));
- }
-
- if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector) {
- assert(VT->getElementType()->isBuiltinType() && "expected builtin type!");
-
- const auto *BT = VT->getElementType()->castAs<BuiltinType>();
- llvm::ScalableVectorType *ResType = nullptr;
- switch (BT->getKind()) {
- default:
- llvm_unreachable("unexpected builtin type for SVE vector!");
- case BuiltinType::SChar:
- case BuiltinType::UChar:
- ResType = llvm::ScalableVectorType::get(
- llvm::Type::getInt8Ty(getVMContext()), 16);
- break;
- case BuiltinType::Short:
- case BuiltinType::UShort:
- ResType = llvm::ScalableVectorType::get(
- llvm::Type::getInt16Ty(getVMContext()), 8);
- break;
- case BuiltinType::Int:
- case BuiltinType::UInt:
- ResType = llvm::ScalableVectorType::get(
- llvm::Type::getInt32Ty(getVMContext()), 4);
- break;
- case BuiltinType::Long:
- case BuiltinType::ULong:
- ResType = llvm::ScalableVectorType::get(
- llvm::Type::getInt64Ty(getVMContext()), 2);
- break;
- case BuiltinType::Half:
- ResType = llvm::ScalableVectorType::get(
- llvm::Type::getHalfTy(getVMContext()), 8);
- break;
- case BuiltinType::Float:
- ResType = llvm::ScalableVectorType::get(
- llvm::Type::getFloatTy(getVMContext()), 4);
- break;
- case BuiltinType::Double:
- ResType = llvm::ScalableVectorType::get(
- llvm::Type::getDoubleTy(getVMContext()), 2);
- break;
- case BuiltinType::BFloat16:
- ResType = llvm::ScalableVectorType::get(
- llvm::Type::getBFloatTy(getVMContext()), 8);
- break;
- }
- return ABIArgInfo::getDirect(ResType);
- }
-
- uint64_t Size = getContext().getTypeSize(Ty);
- // Android promotes <2 x i8> to i16, not i32
- if (isAndroid() && (Size <= 16)) {
- llvm::Type *ResType = llvm::Type::getInt16Ty(getVMContext());
- return ABIArgInfo::getDirect(ResType);
- }
- if (Size <= 32) {
- llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
- return ABIArgInfo::getDirect(ResType);
- }
- if (Size == 64) {
- auto *ResType =
- llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
- return ABIArgInfo::getDirect(ResType);
- }
- if (Size == 128) {
- auto *ResType =
- llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
- return ABIArgInfo::getDirect(ResType);
- }
- return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
-}
-
-ABIArgInfo
-AArch64ABIInfo::classifyArgumentType(QualType Ty, bool IsVariadic,
- unsigned CallingConvention) const {
- Ty = useFirstFieldIfTransparentUnion(Ty);
-
- // Handle illegal vector types here.
- if (isIllegalVectorType(Ty))
- return coerceIllegalVector(Ty);
-
- if (!isAggregateTypeForABI(Ty)) {
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
-
- if (const auto *EIT = Ty->getAs<ExtIntType>())
- if (EIT->getNumBits() > 128)
- return getNaturalAlignIndirect(Ty);
-
- return (isPromotableIntegerTypeForABI(Ty) && isDarwinPCS()
- ? ABIArgInfo::getExtend(Ty)
- : ABIArgInfo::getDirect());
- }
-
- // Structures with either a non-trivial destructor or a non-trivial
- // copy constructor are always indirect.
- if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
- return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
- CGCXXABI::RAA_DirectInMemory);
- }
-
- // Empty records are always ignored on Darwin, but actually passed in C++ mode
- // elsewhere for GNU compatibility.
- uint64_t Size = getContext().getTypeSize(Ty);
- bool IsEmpty = isEmptyRecord(getContext(), Ty, true);
- if (IsEmpty || Size == 0) {
- if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS())
- return ABIArgInfo::getIgnore();
-
- // GNU C mode. The only argument that gets ignored is an empty one with size
- // 0.
- if (IsEmpty && Size == 0)
- return ABIArgInfo::getIgnore();
- return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
- }
-
- // Homogeneous Floating-point Aggregates (HFAs) need to be expanded.
- const Type *Base = nullptr;
- uint64_t Members = 0;
- bool IsWin64 = Kind == Win64 || CallingConvention == llvm::CallingConv::Win64;
- bool IsWinVariadic = IsWin64 && IsVariadic;
- // In variadic functions on Windows, all composite types are treated alike,
- // no special handling of HFAs/HVAs.
- if (!IsWinVariadic && isHomogeneousAggregate(Ty, Base, Members)) {
- if (Kind != AArch64ABIInfo::AAPCS)
- return ABIArgInfo::getDirect(
- llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members));
-
- // For alignment adjusted HFAs, cap the argument alignment to 16, leave it
- // default otherwise.
- unsigned Align =
- getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity();
- unsigned BaseAlign = getContext().getTypeAlignInChars(Base).getQuantity();
- Align = (Align > BaseAlign && Align >= 16) ? 16 : 0;
- return ABIArgInfo::getDirect(
- llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members), 0,
- nullptr, true, Align);
- }
-
- // Aggregates <= 16 bytes are passed directly in registers or on the stack.
- if (Size <= 128) {
- // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
- // same size and alignment.
- if (getTarget().isRenderScriptTarget()) {
- return coerceToIntArray(Ty, getContext(), getVMContext());
- }
- unsigned Alignment;
- if (Kind == AArch64ABIInfo::AAPCS) {
- Alignment = getContext().getTypeUnadjustedAlign(Ty);
- Alignment = Alignment < 128 ? 64 : 128;
- } else {
- Alignment = std::max(getContext().getTypeAlign(Ty),
- (unsigned)getTarget().getPointerWidth(0));
- }
- Size = llvm::alignTo(Size, Alignment);
-
- // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
- // For aggregates with 16-byte alignment, we use i128.
- llvm::Type *BaseTy = llvm::Type::getIntNTy(getVMContext(), Alignment);
- return ABIArgInfo::getDirect(
- Size == Alignment ? BaseTy
- : llvm::ArrayType::get(BaseTy, Size / Alignment));
- }
-
- return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
-}
-
-ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy,
- bool IsVariadic) const {
- if (RetTy->isVoidType())
- return ABIArgInfo::getIgnore();
-
- if (const auto *VT = RetTy->getAs<VectorType>()) {
- if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector ||
- VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector)
- return coerceIllegalVector(RetTy);
- }
-
- // Large vector types should be returned via memory.
- if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128)
- return getNaturalAlignIndirect(RetTy);
-
- if (!isAggregateTypeForABI(RetTy)) {
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
- RetTy = EnumTy->getDecl()->getIntegerType();
-
- if (const auto *EIT = RetTy->getAs<ExtIntType>())
- if (EIT->getNumBits() > 128)
- return getNaturalAlignIndirect(RetTy);
-
- return (isPromotableIntegerTypeForABI(RetTy) && isDarwinPCS()
- ? ABIArgInfo::getExtend(RetTy)
- : ABIArgInfo::getDirect());
- }
-
- uint64_t Size = getContext().getTypeSize(RetTy);
- if (isEmptyRecord(getContext(), RetTy, true) || Size == 0)
- return ABIArgInfo::getIgnore();
-
- const Type *Base = nullptr;
- uint64_t Members = 0;
- if (isHomogeneousAggregate(RetTy, Base, Members) &&
- !(getTarget().getTriple().getArch() == llvm::Triple::aarch64_32 &&
- IsVariadic))
- // Homogeneous Floating-point Aggregates (HFAs) are returned directly.
- return ABIArgInfo::getDirect();
-
- // Aggregates <= 16 bytes are returned directly in registers or on the stack.
- if (Size <= 128) {
- // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
- // same size and alignment.
- if (getTarget().isRenderScriptTarget()) {
- return coerceToIntArray(RetTy, getContext(), getVMContext());
- }
-
- if (Size <= 64 && getDataLayout().isLittleEndian()) {
- // Composite types are returned in lower bits of a 64-bit register for LE,
- // and in higher bits for BE. However, integer types are always returned
- // in lower bits for both LE and BE, and they are not rounded up to
- // 64-bits. We can skip rounding up of composite types for LE, but not for
- // BE, otherwise composite types will be indistinguishable from integer
- // types.
- return ABIArgInfo::getDirect(
- llvm::IntegerType::get(getVMContext(), Size));
- }
-
- unsigned Alignment = getContext().getTypeAlign(RetTy);
- Size = llvm::alignTo(Size, 64); // round up to multiple of 8 bytes
-
- // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
- // For aggregates with 16-byte alignment, we use i128.
- if (Alignment < 128 && Size == 128) {
- llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
- return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
- }
- return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
- }
-
- return getNaturalAlignIndirect(RetTy);
-}
-
-/// isIllegalVectorType - check whether the vector type is legal for AArch64.
-bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const {
- if (const VectorType *VT = Ty->getAs<VectorType>()) {
- // Check whether VT is a fixed-length SVE vector. These types are
- // represented as scalable vectors in function args/return and must be
- // coerced from fixed vectors.
- if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector ||
- VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector)
- return true;
-
- // Check whether VT is legal.
- unsigned NumElements = VT->getNumElements();
- uint64_t Size = getContext().getTypeSize(VT);
- // NumElements should be power of 2.
- if (!llvm::isPowerOf2_32(NumElements))
- return true;
-
- // arm64_32 has to be compatible with the ARM logic here, which allows huge
- // vectors for some reason.
- llvm::Triple Triple = getTarget().getTriple();
- if (Triple.getArch() == llvm::Triple::aarch64_32 &&
- Triple.isOSBinFormatMachO())
- return Size <= 32;
-
- return Size != 64 && (Size != 128 || NumElements == 1);
- }
- return false;
-}
-
-bool AArch64ABIInfo::isLegalVectorTypeForSwift(CharUnits totalSize,
- llvm::Type *eltTy,
- unsigned elts) const {
- if (!llvm::isPowerOf2_32(elts))
- return false;
- if (totalSize.getQuantity() != 8 &&
- (totalSize.getQuantity() != 16 || elts == 1))
- return false;
- return true;
-}
-
-bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
- // Homogeneous aggregates for AAPCS64 must have base types of a floating
- // point type or a short-vector type. This is the same as the 32-bit ABI,
- // but with the difference that any floating-point type is allowed,
- // including __fp16.
- if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
- if (BT->isFloatingPoint())
- return true;
- } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
- unsigned VecSize = getContext().getTypeSize(VT);
- if (VecSize == 64 || VecSize == 128)
- return true;
- }
- return false;
-}
-
-bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
- uint64_t Members) const {
- return Members <= 4;
-}
-
-Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const {
- ABIArgInfo AI = classifyArgumentType(Ty, /*IsVariadic=*/true,
- CGF.CurFnInfo->getCallingConvention());
- bool IsIndirect = AI.isIndirect();
-
- llvm::Type *BaseTy = CGF.ConvertType(Ty);
- if (IsIndirect)
- BaseTy = llvm::PointerType::getUnqual(BaseTy);
- else if (AI.getCoerceToType())
- BaseTy = AI.getCoerceToType();
-
- unsigned NumRegs = 1;
- if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {
- BaseTy = ArrTy->getElementType();
- NumRegs = ArrTy->getNumElements();
- }
- bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy();
-
- // The AArch64 va_list type and handling is specified in the Procedure Call
- // Standard, section B.4:
- //
- // struct {
- // void *__stack;
- // void *__gr_top;
- // void *__vr_top;
- // int __gr_offs;
- // int __vr_offs;
- // };
-
- llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
- llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
- llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
- llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
-
- CharUnits TySize = getContext().getTypeSizeInChars(Ty);
- CharUnits TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty);
-
- Address reg_offs_p = Address::invalid();
- llvm::Value *reg_offs = nullptr;
- int reg_top_index;
- int RegSize = IsIndirect ? 8 : TySize.getQuantity();
- if (!IsFPR) {
- // 3 is the field number of __gr_offs
- reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 3, "gr_offs_p");
- reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs");
- reg_top_index = 1; // field number for __gr_top
- RegSize = llvm::alignTo(RegSize, 8);
- } else {
- // 4 is the field number of __vr_offs.
- reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 4, "vr_offs_p");
- reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs");
- reg_top_index = 2; // field number for __vr_top
- RegSize = 16 * NumRegs;
- }
-
- //=======================================
- // Find out where argument was passed
- //=======================================
-
- // If reg_offs >= 0 we're already using the stack for this type of
- // argument. We don't want to keep updating reg_offs (in case it overflows,
- // though anyone passing 2GB of arguments, each at most 16 bytes, deserves
- // whatever they get).
- llvm::Value *UsingStack = nullptr;
- UsingStack = CGF.Builder.CreateICmpSGE(
- reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0));
-
- CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
-
- // Otherwise, at least some kind of argument could go in these registers, the
- // question is whether this particular type is too big.
- CGF.EmitBlock(MaybeRegBlock);
-
- // Integer arguments may need to correct register alignment (for example a
- // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we
- // align __gr_offs to calculate the potential address.
- if (!IsFPR && !IsIndirect && TyAlign.getQuantity() > 8) {
- int Align = TyAlign.getQuantity();
-
- reg_offs = CGF.Builder.CreateAdd(
- reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1),
- "align_regoffs");
- reg_offs = CGF.Builder.CreateAnd(
- reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align),
- "aligned_regoffs");
- }
-
- // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list.
- // The fact that this is done unconditionally reflects the fact that
- // allocating an argument to the stack also uses up all the remaining
- // registers of the appropriate kind.
- llvm::Value *NewOffset = nullptr;
- NewOffset = CGF.Builder.CreateAdd(
- reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs");
- CGF.Builder.CreateStore(NewOffset, reg_offs_p);
-
- // Now we're in a position to decide whether this argument really was in
- // registers or not.
- llvm::Value *InRegs = nullptr;
- InRegs = CGF.Builder.CreateICmpSLE(
- NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg");
-
- CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
-
- //=======================================
- // Argument was in registers
- //=======================================
-
- // Now we emit the code for if the argument was originally passed in
- // registers. First start the appropriate block:
- CGF.EmitBlock(InRegBlock);
-
- llvm::Value *reg_top = nullptr;
- Address reg_top_p =
- CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, "reg_top_p");
- reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top");
- Address BaseAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, reg_top, reg_offs),
- CharUnits::fromQuantity(IsFPR ? 16 : 8));
- Address RegAddr = Address::invalid();
- llvm::Type *MemTy = CGF.ConvertTypeForMem(Ty);
-
- if (IsIndirect) {
- // If it's been passed indirectly (actually a struct), whatever we find from
- // stored registers or on the stack will actually be a struct **.
- MemTy = llvm::PointerType::getUnqual(MemTy);
- }
-
- const Type *Base = nullptr;
- uint64_t NumMembers = 0;
- bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers);
- if (IsHFA && NumMembers > 1) {
- // Homogeneous aggregates passed in registers will have their elements split
- // and stored 16-bytes apart regardless of size (they're notionally in qN,
- // qN+1, ...). We reload and store into a temporary local variable
- // contiguously.
- assert(!IsIndirect && "Homogeneous aggregates should be passed directly");
- auto BaseTyInfo = getContext().getTypeInfoInChars(QualType(Base, 0));
- llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0));
- llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
- Address Tmp = CGF.CreateTempAlloca(HFATy,
- std::max(TyAlign, BaseTyInfo.Align));
-
- // On big-endian platforms, the value will be right-aligned in its slot.
- int Offset = 0;
- if (CGF.CGM.getDataLayout().isBigEndian() &&
- BaseTyInfo.Width.getQuantity() < 16)
- Offset = 16 - BaseTyInfo.Width.getQuantity();
-
- for (unsigned i = 0; i < NumMembers; ++i) {
- CharUnits BaseOffset = CharUnits::fromQuantity(16 * i + Offset);
- Address LoadAddr =
- CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, BaseOffset);
- LoadAddr = CGF.Builder.CreateElementBitCast(LoadAddr, BaseTy);
-
- Address StoreAddr = CGF.Builder.CreateConstArrayGEP(Tmp, i);
-
- llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr);
- CGF.Builder.CreateStore(Elem, StoreAddr);
- }
-
- RegAddr = CGF.Builder.CreateElementBitCast(Tmp, MemTy);
- } else {
- // Otherwise the object is contiguous in memory.
-
- // It might be right-aligned in its slot.
- CharUnits SlotSize = BaseAddr.getAlignment();
- if (CGF.CGM.getDataLayout().isBigEndian() && !IsIndirect &&
- (IsHFA || !isAggregateTypeForABI(Ty)) &&
- TySize < SlotSize) {
- CharUnits Offset = SlotSize - TySize;
- BaseAddr = CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, Offset);
- }
-
- RegAddr = CGF.Builder.CreateElementBitCast(BaseAddr, MemTy);
- }
-
- CGF.EmitBranch(ContBlock);
-
- //=======================================
- // Argument was on the stack
- //=======================================
- CGF.EmitBlock(OnStackBlock);
-
- Address stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "stack_p");
- llvm::Value *OnStackPtr = CGF.Builder.CreateLoad(stack_p, "stack");
-
- // Again, stack arguments may need realignment. In this case both integer and
- // floating-point ones might be affected.
- if (!IsIndirect && TyAlign.getQuantity() > 8) {
- int Align = TyAlign.getQuantity();
-
- OnStackPtr = CGF.Builder.CreatePtrToInt(OnStackPtr, CGF.Int64Ty);
-
- OnStackPtr = CGF.Builder.CreateAdd(
- OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1),
- "align_stack");
- OnStackPtr = CGF.Builder.CreateAnd(
- OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, -Align),
- "align_stack");
-
- OnStackPtr = CGF.Builder.CreateIntToPtr(OnStackPtr, CGF.Int8PtrTy);
- }
- Address OnStackAddr(OnStackPtr,
- std::max(CharUnits::fromQuantity(8), TyAlign));
-
- // All stack slots are multiples of 8 bytes.
- CharUnits StackSlotSize = CharUnits::fromQuantity(8);
- CharUnits StackSize;
- if (IsIndirect)
- StackSize = StackSlotSize;
- else
- StackSize = TySize.alignTo(StackSlotSize);
-
- llvm::Value *StackSizeC = CGF.Builder.getSize(StackSize);
- llvm::Value *NewStack = CGF.Builder.CreateInBoundsGEP(
- CGF.Int8Ty, OnStackPtr, StackSizeC, "new_stack");
-
- // Write the new value of __stack for the next call to va_arg
- CGF.Builder.CreateStore(NewStack, stack_p);
-
- if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) &&
- TySize < StackSlotSize) {
- CharUnits Offset = StackSlotSize - TySize;
- OnStackAddr = CGF.Builder.CreateConstInBoundsByteGEP(OnStackAddr, Offset);
- }
-
- OnStackAddr = CGF.Builder.CreateElementBitCast(OnStackAddr, MemTy);
-
- CGF.EmitBranch(ContBlock);
-
- //=======================================
- // Tidy up
- //=======================================
- CGF.EmitBlock(ContBlock);
-
- Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock,
- OnStackAddr, OnStackBlock, "vaargs.addr");
-
- if (IsIndirect)
- return Address(CGF.Builder.CreateLoad(ResAddr, "vaarg.addr"),
- TyAlign);
-
- return ResAddr;
-}
-
-Address AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr, QualType Ty,
- CodeGenFunction &CGF) const {
- // The backend's lowering doesn't support va_arg for aggregates or
- // illegal vector types. Lower VAArg here for these cases and use
- // the LLVM va_arg instruction for everything else.
- if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty))
- return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect());
-
- uint64_t PointerSize = getTarget().getPointerWidth(0) / 8;
- CharUnits SlotSize = CharUnits::fromQuantity(PointerSize);
-
- // Empty records are ignored for parameter passing purposes.
- if (isEmptyRecord(getContext(), Ty, true)) {
- Address Addr(CGF.Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize);
- Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
- return Addr;
- }
-
- // The size of the actual thing passed, which might end up just
- // being a pointer for indirect types.
- auto TyInfo = getContext().getTypeInfoInChars(Ty);
-
- // Arguments bigger than 16 bytes which aren't homogeneous
- // aggregates should be passed indirectly.
- bool IsIndirect = false;
- if (TyInfo.Width.getQuantity() > 16) {
- const Type *Base = nullptr;
- uint64_t Members = 0;
- IsIndirect = !isHomogeneousAggregate(Ty, Base, Members);
- }
-
- return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
- TyInfo, SlotSize, /*AllowHigherAlign*/ true);
-}
-
-Address AArch64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
- bool IsIndirect = false;
-
- // Composites larger than 16 bytes are passed by reference.
- if (isAggregateTypeForABI(Ty) && getContext().getTypeSize(Ty) > 128)
- IsIndirect = true;
-
- return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
- CGF.getContext().getTypeInfoInChars(Ty),
- CharUnits::fromQuantity(8),
- /*allowHigherAlign*/ false);
-}
-
-//===----------------------------------------------------------------------===//
-// ARM ABI Implementation
-//===----------------------------------------------------------------------===//
-
-namespace {
-
-class ARMABIInfo : public SwiftABIInfo {
-public:
- enum ABIKind {
- APCS = 0,
- AAPCS = 1,
- AAPCS_VFP = 2,
- AAPCS16_VFP = 3,
- };
-
-private:
- ABIKind Kind;
- bool IsFloatABISoftFP;
-
-public:
- ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind)
- : SwiftABIInfo(CGT), Kind(_Kind) {
- setCCs();
- IsFloatABISoftFP = CGT.getCodeGenOpts().FloatABI == "softfp" ||
- CGT.getCodeGenOpts().FloatABI == ""; // default
- }
-
- bool isEABI() const {
- switch (getTarget().getTriple().getEnvironment()) {
- case llvm::Triple::Android:
- case llvm::Triple::EABI:
- case llvm::Triple::EABIHF:
- case llvm::Triple::GNUEABI:
- case llvm::Triple::GNUEABIHF:
- case llvm::Triple::MuslEABI:
- case llvm::Triple::MuslEABIHF:
- return true;
- default:
- return false;
- }
- }
-
- bool isEABIHF() const {
- switch (getTarget().getTriple().getEnvironment()) {
- case llvm::Triple::EABIHF:
- case llvm::Triple::GNUEABIHF:
- case llvm::Triple::MuslEABIHF:
- return true;
- default:
- return false;
- }
- }
-
- ABIKind getABIKind() const { return Kind; }
-
- bool allowBFloatArgsAndRet() const override {
- return !IsFloatABISoftFP && getTarget().hasBFloat16Type();
- }
-
-private:
- ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic,
- unsigned functionCallConv) const;
- ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic,
- unsigned functionCallConv) const;
- ABIArgInfo classifyHomogeneousAggregate(QualType Ty, const Type *Base,
- uint64_t Members) const;
- ABIArgInfo coerceIllegalVector(QualType Ty) const;
- bool isIllegalVectorType(QualType Ty) const;
- bool containsAnyFP16Vectors(QualType Ty) const;
-
- bool isHomogeneousAggregateBaseType(QualType Ty) const override;
- bool isHomogeneousAggregateSmallEnough(const Type *Ty,
- uint64_t Members) const override;
-
- bool isEffectivelyAAPCS_VFP(unsigned callConvention, bool acceptHalf) const;
-
- void computeInfo(CGFunctionInfo &FI) const override;
-
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
-
- llvm::CallingConv::ID getLLVMDefaultCC() const;
- llvm::CallingConv::ID getABIDefaultCC() const;
- void setCCs();
-
- bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
- bool asReturnValue) const override {
- return occupiesMoreThan(CGT, scalars, /*total*/ 4);
- }
- bool isSwiftErrorInRegister() const override {
- return true;
- }
- bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy,
- unsigned elts) const override;
-};
-
-class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
-public:
- ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
- : TargetCodeGenInfo(std::make_unique<ARMABIInfo>(CGT, K)) {}
-
- const ARMABIInfo &getABIInfo() const {
- return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo());
- }
-
- int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
- return 13;
- }
-
- StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
- return "mov\tr7, r7\t\t// marker for objc_retainAutoreleaseReturnValue";
- }
-
- bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
- llvm::Value *Address) const override {
- llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
-
- // 0-15 are the 16 integer registers.
- AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15);
- return false;
- }
-
- unsigned getSizeOfUnwindException() const override {
- if (getABIInfo().isEABI()) return 88;
- return TargetCodeGenInfo::getSizeOfUnwindException();
- }
-
- void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &CGM) const override {
- if (GV->isDeclaration())
- return;
- const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
- if (!FD)
- return;
-
- const ARMInterruptAttr *Attr = FD->getAttr<ARMInterruptAttr>();
- if (!Attr)
- return;
-
- const char *Kind;
- switch (Attr->getInterrupt()) {
- case ARMInterruptAttr::Generic: Kind = ""; break;
- case ARMInterruptAttr::IRQ: Kind = "IRQ"; break;
- case ARMInterruptAttr::FIQ: Kind = "FIQ"; break;
- case ARMInterruptAttr::SWI: Kind = "SWI"; break;
- case ARMInterruptAttr::ABORT: Kind = "ABORT"; break;
- case ARMInterruptAttr::UNDEF: Kind = "UNDEF"; break;
- }
-
- llvm::Function *Fn = cast<llvm::Function>(GV);
-
- Fn->addFnAttr("interrupt", Kind);
-
- ARMABIInfo::ABIKind ABI = cast<ARMABIInfo>(getABIInfo()).getABIKind();
- if (ABI == ARMABIInfo::APCS)
- return;
-
- // AAPCS guarantees that sp will be 8-byte aligned on any public interface,
- // however this is not necessarily true on taking any interrupt. Instruct
- // the backend to perform a realignment as part of the function prologue.
- llvm::AttrBuilder B;
- B.addStackAlignmentAttr(8);
- Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
- }
-};
-
-class WindowsARMTargetCodeGenInfo : public ARMTargetCodeGenInfo {
-public:
- WindowsARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
- : ARMTargetCodeGenInfo(CGT, K) {}
-
- void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &CGM) const override;
-
- void getDependentLibraryOption(llvm::StringRef Lib,
- llvm::SmallString<24> &Opt) const override {
- Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
- }
-
- void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value,
- llvm::SmallString<32> &Opt) const override {
- Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
- }
-};
-
-void WindowsARMTargetCodeGenInfo::setTargetAttributes(
- const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
- ARMTargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
- if (GV->isDeclaration())
- return;
- addStackProbeTargetAttributes(D, GV, CGM);
-}
-}
-
-void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
- if (!::classifyReturnType(getCXXABI(), FI, *this))
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), FI.isVariadic(),
- FI.getCallingConvention());
-
- for (auto &I : FI.arguments())
- I.info = classifyArgumentType(I.type, FI.isVariadic(),
- FI.getCallingConvention());
-
-
- // Always honor user-specified calling convention.
- if (FI.getCallingConvention() != llvm::CallingConv::C)
- return;
-
- llvm::CallingConv::ID cc = getRuntimeCC();
- if (cc != llvm::CallingConv::C)
- FI.setEffectiveCallingConvention(cc);
-}
-
-/// Return the default calling convention that LLVM will use.
-llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const {
- // The default calling convention that LLVM will infer.
- if (isEABIHF() || getTarget().getTriple().isWatchABI())
- return llvm::CallingConv::ARM_AAPCS_VFP;
- else if (isEABI())
- return llvm::CallingConv::ARM_AAPCS;
- else
- return llvm::CallingConv::ARM_APCS;
-}
-
-/// Return the calling convention that our ABI would like us to use
-/// as the C calling convention.
-llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const {
- switch (getABIKind()) {
- case APCS: return llvm::CallingConv::ARM_APCS;
- case AAPCS: return llvm::CallingConv::ARM_AAPCS;
- case AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
- case AAPCS16_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
- }
- llvm_unreachable("bad ABI kind");
-}
-
-void ARMABIInfo::setCCs() {
- assert(getRuntimeCC() == llvm::CallingConv::C);
-
- // Don't muddy up the IR with a ton of explicit annotations if
- // they'd just match what LLVM will infer from the triple.
- llvm::CallingConv::ID abiCC = getABIDefaultCC();
- if (abiCC != getLLVMDefaultCC())
- RuntimeCC = abiCC;
-}
-
-ABIArgInfo ARMABIInfo::coerceIllegalVector(QualType Ty) const {
- uint64_t Size = getContext().getTypeSize(Ty);
- if (Size <= 32) {
- llvm::Type *ResType =
- llvm::Type::getInt32Ty(getVMContext());
- return ABIArgInfo::getDirect(ResType);
- }
- if (Size == 64 || Size == 128) {
- auto *ResType = llvm::FixedVectorType::get(
- llvm::Type::getInt32Ty(getVMContext()), Size / 32);
- return ABIArgInfo::getDirect(ResType);
- }
- return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
-}
-
-ABIArgInfo ARMABIInfo::classifyHomogeneousAggregate(QualType Ty,
- const Type *Base,
- uint64_t Members) const {
- assert(Base && "Base class should be set for homogeneous aggregate");
- // Base can be a floating-point or a vector.
- if (const VectorType *VT = Base->getAs<VectorType>()) {
- // FP16 vectors should be converted to integer vectors
- if (!getTarget().hasLegalHalfType() && containsAnyFP16Vectors(Ty)) {
- uint64_t Size = getContext().getTypeSize(VT);
- auto *NewVecTy = llvm::FixedVectorType::get(
- llvm::Type::getInt32Ty(getVMContext()), Size / 32);
- llvm::Type *Ty = llvm::ArrayType::get(NewVecTy, Members);
- return ABIArgInfo::getDirect(Ty, 0, nullptr, false);
- }
- }
- unsigned Align = 0;
- if (getABIKind() == ARMABIInfo::AAPCS ||
- getABIKind() == ARMABIInfo::AAPCS_VFP) {
- // For alignment adjusted HFAs, cap the argument alignment to 8, leave it
- // default otherwise.
- Align = getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity();
- unsigned BaseAlign = getContext().getTypeAlignInChars(Base).getQuantity();
- Align = (Align > BaseAlign && Align >= 8) ? 8 : 0;
- }
- return ABIArgInfo::getDirect(nullptr, 0, nullptr, false, Align);
-}
-
-ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic,
- unsigned functionCallConv) const {
- // 6.1.2.1 The following argument types are VFP CPRCs:
- // A single-precision floating-point type (including promoted
- // half-precision types); A double-precision floating-point type;
- // A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate
- // with a Base Type of a single- or double-precision floating-point type,
- // 64-bit containerized vectors or 128-bit containerized vectors with one
- // to four Elements.
- // Variadic functions should always marshal to the base standard.
- bool IsAAPCS_VFP =
- !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv, /* AAPCS16 */ false);
-
- Ty = useFirstFieldIfTransparentUnion(Ty);
-
- // Handle illegal vector types here.
- if (isIllegalVectorType(Ty))
- return coerceIllegalVector(Ty);
-
- if (!isAggregateTypeForABI(Ty)) {
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
- Ty = EnumTy->getDecl()->getIntegerType();
- }
-
- if (const auto *EIT = Ty->getAs<ExtIntType>())
- if (EIT->getNumBits() > 64)
- return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
-
- return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
- : ABIArgInfo::getDirect());
- }
-
- if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
- return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
- }
-
- // Ignore empty records.
- if (isEmptyRecord(getContext(), Ty, true))
- return ABIArgInfo::getIgnore();
-
- if (IsAAPCS_VFP) {
- // Homogeneous Aggregates need to be expanded when we can fit the aggregate
- // into VFP registers.
- const Type *Base = nullptr;
- uint64_t Members = 0;
- if (isHomogeneousAggregate(Ty, Base, Members))
- return classifyHomogeneousAggregate(Ty, Base, Members);
- } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
- // WatchOS does have homogeneous aggregates. Note that we intentionally use
- // this convention even for a variadic function: the backend will use GPRs
- // if needed.
- const Type *Base = nullptr;
- uint64_t Members = 0;
- if (isHomogeneousAggregate(Ty, Base, Members)) {
- assert(Base && Members <= 4 && "unexpected homogeneous aggregate");
- llvm::Type *Ty =
- llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members);
- return ABIArgInfo::getDirect(Ty, 0, nullptr, false);
- }
- }
-
- if (getABIKind() == ARMABIInfo::AAPCS16_VFP &&
- getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(16)) {
- // WatchOS is adopting the 64-bit AAPCS rule on composite types: if they're
- // bigger than 128-bits, they get placed in space allocated by the caller,
- // and a pointer is passed.
- return ABIArgInfo::getIndirect(
- CharUnits::fromQuantity(getContext().getTypeAlign(Ty) / 8), false);
- }
-
- // Support byval for ARM.
- // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at
- // most 8-byte. We realign the indirect argument if type alignment is bigger
- // than ABI alignment.
- uint64_t ABIAlign = 4;
- uint64_t TyAlign;
- if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
- getABIKind() == ARMABIInfo::AAPCS) {
- TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity();
- ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
- } else {
- TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity();
- }
- if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) {
- assert(getABIKind() != ARMABIInfo::AAPCS16_VFP && "unexpected byval");
- return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign),
- /*ByVal=*/true,
- /*Realign=*/TyAlign > ABIAlign);
- }
-
- // On RenderScript, coerce Aggregates <= 64 bytes to an integer array of
- // same size and alignment.
- if (getTarget().isRenderScriptTarget()) {
- return coerceToIntArray(Ty, getContext(), getVMContext());
- }
-
- // Otherwise, pass by coercing to a structure of the appropriate size.
- llvm::Type* ElemTy;
- unsigned SizeRegs;
- // FIXME: Try to match the types of the arguments more accurately where
- // we can.
- if (TyAlign <= 4) {
- ElemTy = llvm::Type::getInt32Ty(getVMContext());
- SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
- } else {
- ElemTy = llvm::Type::getInt64Ty(getVMContext());
- SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
- }
-
- return ABIArgInfo::getDirect(llvm::ArrayType::get(ElemTy, SizeRegs));
-}
-
-static bool isIntegerLikeType(QualType Ty, ASTContext &Context,
- llvm::LLVMContext &VMContext) {
- // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure
- // is called integer-like if its size is less than or equal to one word, and
- // the offset of each of its addressable sub-fields is zero.
-
- uint64_t Size = Context.getTypeSize(Ty);
-
- // Check that the type fits in a word.
- if (Size > 32)
- return false;
-
- // FIXME: Handle vector types!
- if (Ty->isVectorType())
- return false;
-
- // Float types are never treated as "integer like".
- if (Ty->isRealFloatingType())
- return false;
-
- // If this is a builtin or pointer type then it is ok.
- if (Ty->getAs<BuiltinType>() || Ty->isPointerType())
- return true;
-
- // Small complex integer types are "integer like".
- if (const ComplexType *CT = Ty->getAs<ComplexType>())
- return isIntegerLikeType(CT->getElementType(), Context, VMContext);
-
- // Single element and zero sized arrays should be allowed, by the definition
- // above, but they are not.
-
- // Otherwise, it must be a record type.
- const RecordType *RT = Ty->getAs<RecordType>();
- if (!RT) return false;
-
- // Ignore records with flexible arrays.
- const RecordDecl *RD = RT->getDecl();
- if (RD->hasFlexibleArrayMember())
- return false;
-
- // Check that all sub-fields are at offset 0, and are themselves "integer
- // like".
- const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
-
- bool HadField = false;
- unsigned idx = 0;
- for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
- i != e; ++i, ++idx) {
- const FieldDecl *FD = *i;
-
- // Bit-fields are not addressable, we only need to verify they are "integer
- // like". We still have to disallow a subsequent non-bitfield, for example:
- // struct { int : 0; int x }
- // is non-integer like according to gcc.
- if (FD->isBitField()) {
- if (!RD->isUnion())
- HadField = true;
-
- if (!isIntegerLikeType(FD->getType(), Context, VMContext))
- return false;
-
- continue;
- }
-
- // Check if this field is at offset 0.
- if (Layout.getFieldOffset(idx) != 0)
- return false;
-
- if (!isIntegerLikeType(FD->getType(), Context, VMContext))
- return false;
-
- // Only allow at most one field in a structure. This doesn't match the
- // wording above, but follows gcc in situations with a field following an
- // empty structure.
- if (!RD->isUnion()) {
- if (HadField)
- return false;
-
- HadField = true;
- }
- }
-
- return true;
-}
-
-ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, bool isVariadic,
- unsigned functionCallConv) const {
-
- // Variadic functions should always marshal to the base standard.
- bool IsAAPCS_VFP =
- !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv, /* AAPCS16 */ true);
-
- if (RetTy->isVoidType())
- return ABIArgInfo::getIgnore();
-
- if (const VectorType *VT = RetTy->getAs<VectorType>()) {
- // Large vector types should be returned via memory.
- if (getContext().getTypeSize(RetTy) > 128)
- return getNaturalAlignIndirect(RetTy);
- // TODO: FP16/BF16 vectors should be converted to integer vectors
- // This check is similar to isIllegalVectorType - refactor?
- if ((!getTarget().hasLegalHalfType() &&
- (VT->getElementType()->isFloat16Type() ||
- VT->getElementType()->isHalfType())) ||
- (IsFloatABISoftFP &&
- VT->getElementType()->isBFloat16Type()))
- return coerceIllegalVector(RetTy);
- }
-
- if (!isAggregateTypeForABI(RetTy)) {
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
- RetTy = EnumTy->getDecl()->getIntegerType();
-
- if (const auto *EIT = RetTy->getAs<ExtIntType>())
- if (EIT->getNumBits() > 64)
- return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
-
- return isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
- : ABIArgInfo::getDirect();
- }
-
- // Are we following APCS?
- if (getABIKind() == APCS) {
- if (isEmptyRecord(getContext(), RetTy, false))
- return ABIArgInfo::getIgnore();
-
- // Complex types are all returned as packed integers.
- //
- // FIXME: Consider using 2 x vector types if the back end handles them
- // correctly.
- if (RetTy->isAnyComplexType())
- return ABIArgInfo::getDirect(llvm::IntegerType::get(
- getVMContext(), getContext().getTypeSize(RetTy)));
-
- // Integer like structures are returned in r0.
- if (isIntegerLikeType(RetTy, getContext(), getVMContext())) {
- // Return in the smallest viable integer type.
- uint64_t Size = getContext().getTypeSize(RetTy);
- if (Size <= 8)
- return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
- if (Size <= 16)
- return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
- return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
- }
-
- // Otherwise return in memory.
- return getNaturalAlignIndirect(RetTy);
- }
-
- // Otherwise this is an AAPCS variant.
-
- if (isEmptyRecord(getContext(), RetTy, true))
- return ABIArgInfo::getIgnore();
-
- // Check for homogeneous aggregates with AAPCS-VFP.
- if (IsAAPCS_VFP) {
- const Type *Base = nullptr;
- uint64_t Members = 0;
- if (isHomogeneousAggregate(RetTy, Base, Members))
- return classifyHomogeneousAggregate(RetTy, Base, Members);
- }
-
- // Aggregates <= 4 bytes are returned in r0; other aggregates
- // are returned indirectly.
- uint64_t Size = getContext().getTypeSize(RetTy);
- if (Size <= 32) {
- // On RenderScript, coerce Aggregates <= 4 bytes to an integer array of
- // same size and alignment.
- if (getTarget().isRenderScriptTarget()) {
- return coerceToIntArray(RetTy, getContext(), getVMContext());
- }
- if (getDataLayout().isBigEndian())
- // Return in 32 bit integer integer type (as if loaded by LDR, AAPCS 5.4)
- return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
-
- // Return in the smallest viable integer type.
- if (Size <= 8)
- return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
- if (Size <= 16)
- return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
- return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
- } else if (Size <= 128 && getABIKind() == AAPCS16_VFP) {
- llvm::Type *Int32Ty = llvm::Type::getInt32Ty(getVMContext());
- llvm::Type *CoerceTy =
- llvm::ArrayType::get(Int32Ty, llvm::alignTo(Size, 32) / 32);
- return ABIArgInfo::getDirect(CoerceTy);
- }
-
- return getNaturalAlignIndirect(RetTy);
-}
-
-/// isIllegalVector - check whether Ty is an illegal vector type.
-bool ARMABIInfo::isIllegalVectorType(QualType Ty) const {
- if (const VectorType *VT = Ty->getAs<VectorType> ()) {
- // On targets that don't support half, fp16 or bfloat, they are expanded
- // into float, and we don't want the ABI to depend on whether or not they
- // are supported in hardware. Thus return false to coerce vectors of these
- // types into integer vectors.
- // We do not depend on hasLegalHalfType for bfloat as it is a
- // separate IR type.
- if ((!getTarget().hasLegalHalfType() &&
- (VT->getElementType()->isFloat16Type() ||
- VT->getElementType()->isHalfType())) ||
- (IsFloatABISoftFP &&
- VT->getElementType()->isBFloat16Type()))
- return true;
- if (isAndroid()) {
- // Android shipped using Clang 3.1, which supported a slightly different
- // vector ABI. The primary differences were that 3-element vector types
- // were legal, and so were sub 32-bit vectors (i.e. <2 x i8>). This path
- // accepts that legacy behavior for Android only.
- // Check whether VT is legal.
- unsigned NumElements = VT->getNumElements();
- // NumElements should be power of 2 or equal to 3.
- if (!llvm::isPowerOf2_32(NumElements) && NumElements != 3)
- return true;
- } else {
- // Check whether VT is legal.
- unsigned NumElements = VT->getNumElements();
- uint64_t Size = getContext().getTypeSize(VT);
- // NumElements should be power of 2.
- if (!llvm::isPowerOf2_32(NumElements))
- return true;
- // Size should be greater than 32 bits.
- return Size <= 32;
- }
- }
- return false;
-}
-
-/// Return true if a type contains any 16-bit floating point vectors
-bool ARMABIInfo::containsAnyFP16Vectors(QualType Ty) const {
- if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
- uint64_t NElements = AT->getSize().getZExtValue();
- if (NElements == 0)
- return false;
- return containsAnyFP16Vectors(AT->getElementType());
- } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
-
- // If this is a C++ record, check the bases first.
- if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
- if (llvm::any_of(CXXRD->bases(), [this](const CXXBaseSpecifier &B) {
- return containsAnyFP16Vectors(B.getType());
- }))
- return true;
-
- if (llvm::any_of(RD->fields(), [this](FieldDecl *FD) {
- return FD && containsAnyFP16Vectors(FD->getType());
- }))
- return true;
-
- return false;
- } else {
- if (const VectorType *VT = Ty->getAs<VectorType>())
- return (VT->getElementType()->isFloat16Type() ||
- VT->getElementType()->isBFloat16Type() ||
- VT->getElementType()->isHalfType());
- return false;
- }
-}
-
-bool ARMABIInfo::isLegalVectorTypeForSwift(CharUnits vectorSize,
- llvm::Type *eltTy,
- unsigned numElts) const {
- if (!llvm::isPowerOf2_32(numElts))
- return false;
- unsigned size = getDataLayout().getTypeStoreSizeInBits(eltTy);
- if (size > 64)
- return false;
- if (vectorSize.getQuantity() != 8 &&
- (vectorSize.getQuantity() != 16 || numElts == 1))
- return false;
- return true;
-}
-
-bool ARMABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
- // Homogeneous aggregates for AAPCS-VFP must have base types of float,
- // double, or 64-bit or 128-bit vectors.
- if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
- if (BT->getKind() == BuiltinType::Float ||
- BT->getKind() == BuiltinType::Double ||
- BT->getKind() == BuiltinType::LongDouble)
- return true;
- } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
- unsigned VecSize = getContext().getTypeSize(VT);
- if (VecSize == 64 || VecSize == 128)
- return true;
- }
- return false;
-}
-
-bool ARMABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
- uint64_t Members) const {
- return Members <= 4;
-}
-
-bool ARMABIInfo::isEffectivelyAAPCS_VFP(unsigned callConvention,
- bool acceptHalf) const {
- // Give precedence to user-specified calling conventions.
- if (callConvention != llvm::CallingConv::C)
- return (callConvention == llvm::CallingConv::ARM_AAPCS_VFP);
- else
- return (getABIKind() == AAPCS_VFP) ||
- (acceptHalf && (getABIKind() == AAPCS16_VFP));
-}
-
-Address ARMABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
- CharUnits SlotSize = CharUnits::fromQuantity(4);
-
- // Empty records are ignored for parameter passing purposes.
- if (isEmptyRecord(getContext(), Ty, true)) {
- Address Addr(CGF.Builder.CreateLoad(VAListAddr), SlotSize);
- Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
- return Addr;
- }
-
- CharUnits TySize = getContext().getTypeSizeInChars(Ty);
- CharUnits TyAlignForABI = getContext().getTypeUnadjustedAlignInChars(Ty);
-
- // Use indirect if size of the illegal vector is bigger than 16 bytes.
- bool IsIndirect = false;
- const Type *Base = nullptr;
- uint64_t Members = 0;
- if (TySize > CharUnits::fromQuantity(16) && isIllegalVectorType(Ty)) {
- IsIndirect = true;
-
- // ARMv7k passes structs bigger than 16 bytes indirectly, in space
- // allocated by the caller.
- } else if (TySize > CharUnits::fromQuantity(16) &&
- getABIKind() == ARMABIInfo::AAPCS16_VFP &&
- !isHomogeneousAggregate(Ty, Base, Members)) {
- IsIndirect = true;
-
- // Otherwise, bound the type's ABI alignment.
- // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for
- // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte.
- // Our callers should be prepared to handle an under-aligned address.
- } else if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
- getABIKind() == ARMABIInfo::AAPCS) {
- TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4));
- TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(8));
- } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
- // ARMv7k allows type alignment up to 16 bytes.
- TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4));
- TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(16));
- } else {
- TyAlignForABI = CharUnits::fromQuantity(4);
- }
-
- TypeInfoChars TyInfo(TySize, TyAlignForABI, false);
- return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TyInfo,
- SlotSize, /*AllowHigherAlign*/ true);
-}
-
-//===----------------------------------------------------------------------===//
-// NVPTX ABI Implementation
-//===----------------------------------------------------------------------===//
-
-namespace {
-
-class NVPTXTargetCodeGenInfo;
-
-class NVPTXABIInfo : public ABIInfo {
- NVPTXTargetCodeGenInfo &CGInfo;
-
-public:
- NVPTXABIInfo(CodeGenTypes &CGT, NVPTXTargetCodeGenInfo &Info)
- : ABIInfo(CGT), CGInfo(Info) {}
-
- ABIArgInfo classifyReturnType(QualType RetTy) const;
- ABIArgInfo classifyArgumentType(QualType Ty) const;
-
- void computeInfo(CGFunctionInfo &FI) const override;
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
- bool isUnsupportedType(QualType T) const;
- ABIArgInfo coerceToIntArrayWithLimit(QualType Ty, unsigned MaxSize) const;
-};
-
-class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo {
-public:
- NVPTXTargetCodeGenInfo(CodeGenTypes &CGT)
- : TargetCodeGenInfo(std::make_unique<NVPTXABIInfo>(CGT, *this)) {}
-
- void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &M) const override;
- bool shouldEmitStaticExternCAliases() const override;
-
- llvm::Type *getCUDADeviceBuiltinSurfaceDeviceType() const override {
- // On the device side, surface reference is represented as an object handle
- // in 64-bit integer.
- return llvm::Type::getInt64Ty(getABIInfo().getVMContext());
- }
-
- llvm::Type *getCUDADeviceBuiltinTextureDeviceType() const override {
- // On the device side, texture reference is represented as an object handle
- // in 64-bit integer.
- return llvm::Type::getInt64Ty(getABIInfo().getVMContext());
- }
-
- bool emitCUDADeviceBuiltinSurfaceDeviceCopy(CodeGenFunction &CGF, LValue Dst,
- LValue Src) const override {
- emitBuiltinSurfTexDeviceCopy(CGF, Dst, Src);
- return true;
- }
-
- bool emitCUDADeviceBuiltinTextureDeviceCopy(CodeGenFunction &CGF, LValue Dst,
- LValue Src) const override {
- emitBuiltinSurfTexDeviceCopy(CGF, Dst, Src);
- return true;
- }
-
-private:
- // Adds a NamedMDNode with GV, Name, and Operand as operands, and adds the
- // resulting MDNode to the nvvm.annotations MDNode.
- static void addNVVMMetadata(llvm::GlobalValue *GV, StringRef Name,
- int Operand);
-
- static void emitBuiltinSurfTexDeviceCopy(CodeGenFunction &CGF, LValue Dst,
- LValue Src) {
- llvm::Value *Handle = nullptr;
- llvm::Constant *C =
- llvm::dyn_cast<llvm::Constant>(Src.getAddress(CGF).getPointer());
- // Lookup `addrspacecast` through the constant pointer if any.
- if (auto *ASC = llvm::dyn_cast_or_null<llvm::AddrSpaceCastOperator>(C))
- C = llvm::cast<llvm::Constant>(ASC->getPointerOperand());
- if (auto *GV = llvm::dyn_cast_or_null<llvm::GlobalVariable>(C)) {
- // Load the handle from the specific global variable using
- // `nvvm.texsurf.handle.internal` intrinsic.
- Handle = CGF.EmitRuntimeCall(
- CGF.CGM.getIntrinsic(llvm::Intrinsic::nvvm_texsurf_handle_internal,
- {GV->getType()}),
- {GV}, "texsurf_handle");
- } else
- Handle = CGF.EmitLoadOfScalar(Src, SourceLocation());
- CGF.EmitStoreOfScalar(Handle, Dst);
- }
-};
-
-/// Checks if the type is unsupported directly by the current target.
-bool NVPTXABIInfo::isUnsupportedType(QualType T) const {
- ASTContext &Context = getContext();
- if (!Context.getTargetInfo().hasFloat16Type() && T->isFloat16Type())
- return true;
- if (!Context.getTargetInfo().hasFloat128Type() &&
- (T->isFloat128Type() ||
- (T->isRealFloatingType() && Context.getTypeSize(T) == 128)))
- return true;
- if (const auto *EIT = T->getAs<ExtIntType>())
- return EIT->getNumBits() >
- (Context.getTargetInfo().hasInt128Type() ? 128U : 64U);
- if (!Context.getTargetInfo().hasInt128Type() && T->isIntegerType() &&
- Context.getTypeSize(T) > 64U)
- return true;
- if (const auto *AT = T->getAsArrayTypeUnsafe())
- return isUnsupportedType(AT->getElementType());
- const auto *RT = T->getAs<RecordType>();
- if (!RT)
- return false;
- const RecordDecl *RD = RT->getDecl();
-
- // If this is a C++ record, check the bases first.
- if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
- for (const CXXBaseSpecifier &I : CXXRD->bases())
- if (isUnsupportedType(I.getType()))
- return true;
-
- for (const FieldDecl *I : RD->fields())
- if (isUnsupportedType(I->getType()))
- return true;
- return false;
-}
-
-/// Coerce the given type into an array with maximum allowed size of elements.
-ABIArgInfo NVPTXABIInfo::coerceToIntArrayWithLimit(QualType Ty,
- unsigned MaxSize) const {
- // Alignment and Size are measured in bits.
- const uint64_t Size = getContext().getTypeSize(Ty);
- const uint64_t Alignment = getContext().getTypeAlign(Ty);
- const unsigned Div = std::min<unsigned>(MaxSize, Alignment);
- llvm::Type *IntType = llvm::Type::getIntNTy(getVMContext(), Div);
- const uint64_t NumElements = (Size + Div - 1) / Div;
- return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType, NumElements));
-}
-
-ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const {
- if (RetTy->isVoidType())
- return ABIArgInfo::getIgnore();
-
- if (getContext().getLangOpts().OpenMP &&
- getContext().getLangOpts().OpenMPIsDevice && isUnsupportedType(RetTy))
- return coerceToIntArrayWithLimit(RetTy, 64);
-
- // note: this is different from default ABI
- if (!RetTy->isScalarType())
- return ABIArgInfo::getDirect();
-
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
- RetTy = EnumTy->getDecl()->getIntegerType();
-
- return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
- : ABIArgInfo::getDirect());
-}
-
-ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const {
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
-
- // Return aggregates type as indirect by value
- if (isAggregateTypeForABI(Ty)) {
- // Under CUDA device compilation, tex/surf builtin types are replaced with
- // object types and passed directly.
- if (getContext().getLangOpts().CUDAIsDevice) {
- if (Ty->isCUDADeviceBuiltinSurfaceType())
- return ABIArgInfo::getDirect(
- CGInfo.getCUDADeviceBuiltinSurfaceDeviceType());
- if (Ty->isCUDADeviceBuiltinTextureType())
- return ABIArgInfo::getDirect(
- CGInfo.getCUDADeviceBuiltinTextureDeviceType());
- }
- return getNaturalAlignIndirect(Ty, /* byval */ true);
- }
-
- if (const auto *EIT = Ty->getAs<ExtIntType>()) {
- if ((EIT->getNumBits() > 128) ||
- (!getContext().getTargetInfo().hasInt128Type() &&
- EIT->getNumBits() > 64))
- return getNaturalAlignIndirect(Ty, /* byval */ true);
- }
-
- return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
- : ABIArgInfo::getDirect());
-}
-
-void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
- if (!getCXXABI().classifyReturnType(FI))
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
- for (auto &I : FI.arguments())
- I.info = classifyArgumentType(I.type);
-
- // Always honor user-specified calling convention.
- if (FI.getCallingConvention() != llvm::CallingConv::C)
- return;
-
- FI.setEffectiveCallingConvention(getRuntimeCC());
-}
-
-Address NVPTXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
- llvm_unreachable("NVPTX does not support varargs");
-}
-
-void NVPTXTargetCodeGenInfo::setTargetAttributes(
- const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
- if (GV->isDeclaration())
- return;
- const VarDecl *VD = dyn_cast_or_null<VarDecl>(D);
- if (VD) {
- if (M.getLangOpts().CUDA) {
- if (VD->getType()->isCUDADeviceBuiltinSurfaceType())
- addNVVMMetadata(GV, "surface", 1);
- else if (VD->getType()->isCUDADeviceBuiltinTextureType())
- addNVVMMetadata(GV, "texture", 1);
- return;
- }
- }
-
- const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
- if (!FD) return;
-
- llvm::Function *F = cast<llvm::Function>(GV);
-
- // Perform special handling in OpenCL mode
- if (M.getLangOpts().OpenCL) {
- // Use OpenCL function attributes to check for kernel functions
- // By default, all functions are device functions
- if (FD->hasAttr<OpenCLKernelAttr>()) {
- // OpenCL __kernel functions get kernel metadata
- // Create !{<func-ref>, metadata !"kernel", i32 1} node
- addNVVMMetadata(F, "kernel", 1);
- // And kernel functions are not subject to inlining
- F->addFnAttr(llvm::Attribute::NoInline);
- }
- }
-
- // Perform special handling in CUDA mode.
- if (M.getLangOpts().CUDA) {
- // CUDA __global__ functions get a kernel metadata entry. Since
- // __global__ functions cannot be called from the device, we do not
- // need to set the noinline attribute.
- if (FD->hasAttr<CUDAGlobalAttr>()) {
- // Create !{<func-ref>, metadata !"kernel", i32 1} node
- addNVVMMetadata(F, "kernel", 1);
- }
- if (CUDALaunchBoundsAttr *Attr = FD->getAttr<CUDALaunchBoundsAttr>()) {
- // Create !{<func-ref>, metadata !"maxntidx", i32 <val>} node
- llvm::APSInt MaxThreads(32);
- MaxThreads = Attr->getMaxThreads()->EvaluateKnownConstInt(M.getContext());
- if (MaxThreads > 0)
- addNVVMMetadata(F, "maxntidx", MaxThreads.getExtValue());
-
- // min blocks is an optional argument for CUDALaunchBoundsAttr. If it was
- // not specified in __launch_bounds__ or if the user specified a 0 value,
- // we don't have to add a PTX directive.
- if (Attr->getMinBlocks()) {
- llvm::APSInt MinBlocks(32);
- MinBlocks = Attr->getMinBlocks()->EvaluateKnownConstInt(M.getContext());
- if (MinBlocks > 0)
- // Create !{<func-ref>, metadata !"minctasm", i32 <val>} node
- addNVVMMetadata(F, "minctasm", MinBlocks.getExtValue());
- }
- }
- }
-}
-
-void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::GlobalValue *GV,
- StringRef Name, int Operand) {
- llvm::Module *M = GV->getParent();
- llvm::LLVMContext &Ctx = M->getContext();
-
- // Get "nvvm.annotations" metadata node
- llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations");
-
- llvm::Metadata *MDVals[] = {
- llvm::ConstantAsMetadata::get(GV), llvm::MDString::get(Ctx, Name),
- llvm::ConstantAsMetadata::get(
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))};
- // Append metadata to nvvm.annotations
- MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
-}
-
-bool NVPTXTargetCodeGenInfo::shouldEmitStaticExternCAliases() const {
- return false;
-}
-}
-
-//===----------------------------------------------------------------------===//
-// SystemZ ABI Implementation
-//===----------------------------------------------------------------------===//
-
-namespace {
-
-class SystemZABIInfo : public SwiftABIInfo {
- bool HasVector;
- bool IsSoftFloatABI;
-
-public:
- SystemZABIInfo(CodeGenTypes &CGT, bool HV, bool SF)
- : SwiftABIInfo(CGT), HasVector(HV), IsSoftFloatABI(SF) {}
-
- bool isPromotableIntegerTypeForABI(QualType Ty) const;
- bool isCompoundType(QualType Ty) const;
- bool isVectorArgumentType(QualType Ty) const;
- bool isFPArgumentType(QualType Ty) const;
- QualType GetSingleElementType(QualType Ty) const;
-
- ABIArgInfo classifyReturnType(QualType RetTy) const;
- ABIArgInfo classifyArgumentType(QualType ArgTy) const;
-
- void computeInfo(CGFunctionInfo &FI) const override {
- if (!getCXXABI().classifyReturnType(FI))
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
- for (auto &I : FI.arguments())
- I.info = classifyArgumentType(I.type);
- }
-
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
-
- bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
- bool asReturnValue) const override {
- return occupiesMoreThan(CGT, scalars, /*total*/ 4);
- }
- bool isSwiftErrorInRegister() const override {
- return false;
- }
-};
-
-class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
-public:
- SystemZTargetCodeGenInfo(CodeGenTypes &CGT, bool HasVector, bool SoftFloatABI)
- : TargetCodeGenInfo(
- std::make_unique<SystemZABIInfo>(CGT, HasVector, SoftFloatABI)) {}
-
- llvm::Value *testFPKind(llvm::Value *V, unsigned BuiltinID,
- CGBuilderTy &Builder,
- CodeGenModule &CGM) const override {
- assert(V->getType()->isFloatingPointTy() && "V should have an FP type.");
- // Only use TDC in constrained FP mode.
- if (!Builder.getIsFPConstrained())
- return nullptr;
-
- llvm::Type *Ty = V->getType();
- if (Ty->isFloatTy() || Ty->isDoubleTy() || Ty->isFP128Ty()) {
- llvm::Module &M = CGM.getModule();
- auto &Ctx = M.getContext();
- llvm::Function *TDCFunc =
- llvm::Intrinsic::getDeclaration(&M, llvm::Intrinsic::s390_tdc, Ty);
- unsigned TDCBits = 0;
- switch (BuiltinID) {
- case Builtin::BI__builtin_isnan:
- TDCBits = 0xf;
- break;
- case Builtin::BIfinite:
- case Builtin::BI__finite:
- case Builtin::BIfinitef:
- case Builtin::BI__finitef:
- case Builtin::BIfinitel:
- case Builtin::BI__finitel:
- case Builtin::BI__builtin_isfinite:
- TDCBits = 0xfc0;
- break;
- case Builtin::BI__builtin_isinf:
- TDCBits = 0x30;
- break;
- default:
- break;
- }
- if (TDCBits)
- return Builder.CreateCall(
- TDCFunc,
- {V, llvm::ConstantInt::get(llvm::Type::getInt64Ty(Ctx), TDCBits)});
- }
- return nullptr;
- }
-};
-}
-
-bool SystemZABIInfo::isPromotableIntegerTypeForABI(QualType Ty) const {
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
-
- // Promotable integer types are required to be promoted by the ABI.
- if (ABIInfo::isPromotableIntegerTypeForABI(Ty))
- return true;
-
- if (const auto *EIT = Ty->getAs<ExtIntType>())
- if (EIT->getNumBits() < 64)
- return true;
-
- // 32-bit values must also be promoted.
- if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
- switch (BT->getKind()) {
- case BuiltinType::Int:
- case BuiltinType::UInt:
- return true;
- default:
- return false;
- }
- return false;
-}
-
-bool SystemZABIInfo::isCompoundType(QualType Ty) const {
- return (Ty->isAnyComplexType() ||
- Ty->isVectorType() ||
- isAggregateTypeForABI(Ty));
-}
-
-bool SystemZABIInfo::isVectorArgumentType(QualType Ty) const {
- return (HasVector &&
- Ty->isVectorType() &&
- getContext().getTypeSize(Ty) <= 128);
-}
-
-bool SystemZABIInfo::isFPArgumentType(QualType Ty) const {
- if (IsSoftFloatABI)
- return false;
-
- if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
- switch (BT->getKind()) {
- case BuiltinType::Float:
- case BuiltinType::Double:
- return true;
- default:
- return false;
- }
-
- return false;
-}
-
-QualType SystemZABIInfo::GetSingleElementType(QualType Ty) const {
- const RecordType *RT = Ty->getAs<RecordType>();
-
- if (RT && RT->isStructureOrClassType()) {
- const RecordDecl *RD = RT->getDecl();
- QualType Found;
-
- // If this is a C++ record, check the bases first.
- if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
- for (const auto &I : CXXRD->bases()) {
- QualType Base = I.getType();
-
- // Empty bases don't affect things either way.
- if (isEmptyRecord(getContext(), Base, true))
- continue;
-
- if (!Found.isNull())
- return Ty;
- Found = GetSingleElementType(Base);
- }
-
- // Check the fields.
- for (const auto *FD : RD->fields()) {
- // For compatibility with GCC, ignore empty bitfields in C++ mode.
- // Unlike isSingleElementStruct(), empty structure and array fields
- // do count. So do anonymous bitfields that aren't zero-sized.
- if (getContext().getLangOpts().CPlusPlus &&
- FD->isZeroLengthBitField(getContext()))
- continue;
- // Like isSingleElementStruct(), ignore C++20 empty data members.
- if (FD->hasAttr<NoUniqueAddressAttr>() &&
- isEmptyRecord(getContext(), FD->getType(), true))
- continue;
-
- // Unlike isSingleElementStruct(), arrays do not count.
- // Nested structures still do though.
- if (!Found.isNull())
- return Ty;
- Found = GetSingleElementType(FD->getType());
- }
-
- // Unlike isSingleElementStruct(), trailing padding is allowed.
- // An 8-byte aligned struct s { float f; } is passed as a double.
- if (!Found.isNull())
- return Found;
- }
-
- return Ty;
-}
-
-Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
- // Assume that va_list type is correct; should be pointer to LLVM type:
- // struct {
- // i64 __gpr;
- // i64 __fpr;
- // i8 *__overflow_arg_area;
- // i8 *__reg_save_area;
- // };
-
- // Every non-vector argument occupies 8 bytes and is passed by preference
- // in either GPRs or FPRs. Vector arguments occupy 8 or 16 bytes and are
- // always passed on the stack.
- Ty = getContext().getCanonicalType(Ty);
- auto TyInfo = getContext().getTypeInfoInChars(Ty);
- llvm::Type *ArgTy = CGF.ConvertTypeForMem(Ty);
- llvm::Type *DirectTy = ArgTy;
- ABIArgInfo AI = classifyArgumentType(Ty);
- bool IsIndirect = AI.isIndirect();
- bool InFPRs = false;
- bool IsVector = false;
- CharUnits UnpaddedSize;
- CharUnits DirectAlign;
- if (IsIndirect) {
- DirectTy = llvm::PointerType::getUnqual(DirectTy);
- UnpaddedSize = DirectAlign = CharUnits::fromQuantity(8);
- } else {
- if (AI.getCoerceToType())
- ArgTy = AI.getCoerceToType();
- InFPRs = (!IsSoftFloatABI && (ArgTy->isFloatTy() || ArgTy->isDoubleTy()));
- IsVector = ArgTy->isVectorTy();
- UnpaddedSize = TyInfo.Width;
- DirectAlign = TyInfo.Align;
- }
- CharUnits PaddedSize = CharUnits::fromQuantity(8);
- if (IsVector && UnpaddedSize > PaddedSize)
- PaddedSize = CharUnits::fromQuantity(16);
- assert((UnpaddedSize <= PaddedSize) && "Invalid argument size.");
-
- CharUnits Padding = (PaddedSize - UnpaddedSize);
-
- llvm::Type *IndexTy = CGF.Int64Ty;
- llvm::Value *PaddedSizeV =
- llvm::ConstantInt::get(IndexTy, PaddedSize.getQuantity());
-
- if (IsVector) {
- // Work out the address of a vector argument on the stack.
- // Vector arguments are always passed in the high bits of a
- // single (8 byte) or double (16 byte) stack slot.
- Address OverflowArgAreaPtr =
- CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr");
- Address OverflowArgArea =
- Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"),
- TyInfo.Align);
- Address MemAddr =
- CGF.Builder.CreateElementBitCast(OverflowArgArea, DirectTy, "mem_addr");
-
- // Update overflow_arg_area_ptr pointer
- llvm::Value *NewOverflowArgArea =
- CGF.Builder.CreateGEP(OverflowArgArea.getElementType(),
- OverflowArgArea.getPointer(), PaddedSizeV,
- "overflow_arg_area");
- CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
-
- return MemAddr;
- }
-
- assert(PaddedSize.getQuantity() == 8);
-
- unsigned MaxRegs, RegCountField, RegSaveIndex;
- CharUnits RegPadding;
- if (InFPRs) {
- MaxRegs = 4; // Maximum of 4 FPR arguments
- RegCountField = 1; // __fpr
- RegSaveIndex = 16; // save offset for f0
- RegPadding = CharUnits(); // floats are passed in the high bits of an FPR
- } else {
- MaxRegs = 5; // Maximum of 5 GPR arguments
- RegCountField = 0; // __gpr
- RegSaveIndex = 2; // save offset for r2
- RegPadding = Padding; // values are passed in the low bits of a GPR
- }
-
- Address RegCountPtr =
- CGF.Builder.CreateStructGEP(VAListAddr, RegCountField, "reg_count_ptr");
- llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count");
- llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs);
- llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV,
- "fits_in_regs");
-
- llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
- llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
- llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
- CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
-
- // Emit code to load the value if it was passed in registers.
- CGF.EmitBlock(InRegBlock);
-
- // Work out the address of an argument register.
- llvm::Value *ScaledRegCount =
- CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count");
- llvm::Value *RegBase =
- llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize.getQuantity()
- + RegPadding.getQuantity());
- llvm::Value *RegOffset =
- CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset");
- Address RegSaveAreaPtr =
- CGF.Builder.CreateStructGEP(VAListAddr, 3, "reg_save_area_ptr");
- llvm::Value *RegSaveArea =
- CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area");
- Address RawRegAddr(CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, RegOffset,
- "raw_reg_addr"),
- PaddedSize);
- Address RegAddr =
- CGF.Builder.CreateElementBitCast(RawRegAddr, DirectTy, "reg_addr");
-
- // Update the register count
- llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1);
- llvm::Value *NewRegCount =
- CGF.Builder.CreateAdd(RegCount, One, "reg_count");
- CGF.Builder.CreateStore(NewRegCount, RegCountPtr);
- CGF.EmitBranch(ContBlock);
-
- // Emit code to load the value if it was passed in memory.
- CGF.EmitBlock(InMemBlock);
-
- // Work out the address of a stack argument.
- Address OverflowArgAreaPtr =
- CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr");
- Address OverflowArgArea =
- Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"),
- PaddedSize);
- Address RawMemAddr =
- CGF.Builder.CreateConstByteGEP(OverflowArgArea, Padding, "raw_mem_addr");
- Address MemAddr =
- CGF.Builder.CreateElementBitCast(RawMemAddr, DirectTy, "mem_addr");
-
- // Update overflow_arg_area_ptr pointer
- llvm::Value *NewOverflowArgArea =
- CGF.Builder.CreateGEP(OverflowArgArea.getElementType(),
- OverflowArgArea.getPointer(), PaddedSizeV,
- "overflow_arg_area");
- CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
- CGF.EmitBranch(ContBlock);
-
- // Return the appropriate result.
- CGF.EmitBlock(ContBlock);
- Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock,
- MemAddr, InMemBlock, "va_arg.addr");
-
- if (IsIndirect)
- ResAddr = Address(CGF.Builder.CreateLoad(ResAddr, "indirect_arg"),
- TyInfo.Align);
-
- return ResAddr;
-}
-
-ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const {
- if (RetTy->isVoidType())
- return ABIArgInfo::getIgnore();
- if (isVectorArgumentType(RetTy))
- return ABIArgInfo::getDirect();
- if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64)
- return getNaturalAlignIndirect(RetTy);
- return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
- : ABIArgInfo::getDirect());
-}
-
-ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
- // Handle the generic C++ ABI.
- if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
- return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
-
- // Integers and enums are extended to full register width.
- if (isPromotableIntegerTypeForABI(Ty))
- return ABIArgInfo::getExtend(Ty);
-
- // Handle vector types and vector-like structure types. Note that
- // as opposed to float-like structure types, we do not allow any
- // padding for vector-like structures, so verify the sizes match.
- uint64_t Size = getContext().getTypeSize(Ty);
- QualType SingleElementTy = GetSingleElementType(Ty);
- if (isVectorArgumentType(SingleElementTy) &&
- getContext().getTypeSize(SingleElementTy) == Size)
- return ABIArgInfo::getDirect(CGT.ConvertType(SingleElementTy));
-
- // Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly.
- if (Size != 8 && Size != 16 && Size != 32 && Size != 64)
- return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
-
- // Handle small structures.
- if (const RecordType *RT = Ty->getAs<RecordType>()) {
- // Structures with flexible arrays have variable length, so really
- // fail the size test above.
- const RecordDecl *RD = RT->getDecl();
- if (RD->hasFlexibleArrayMember())
- return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
-
- // The structure is passed as an unextended integer, a float, or a double.
- llvm::Type *PassTy;
- if (isFPArgumentType(SingleElementTy)) {
- assert(Size == 32 || Size == 64);
- if (Size == 32)
- PassTy = llvm::Type::getFloatTy(getVMContext());
- else
- PassTy = llvm::Type::getDoubleTy(getVMContext());
- } else
- PassTy = llvm::IntegerType::get(getVMContext(), Size);
- return ABIArgInfo::getDirect(PassTy);
- }
-
- // Non-structure compounds are passed indirectly.
- if (isCompoundType(Ty))
- return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
-
- return ABIArgInfo::getDirect(nullptr);
-}
-
-//===----------------------------------------------------------------------===//
-// MSP430 ABI Implementation
-//===----------------------------------------------------------------------===//
-
-namespace {
-
-class MSP430ABIInfo : public DefaultABIInfo {
- static ABIArgInfo complexArgInfo() {
- ABIArgInfo Info = ABIArgInfo::getDirect();
- Info.setCanBeFlattened(false);
- return Info;
- }
-
-public:
- MSP430ABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
-
- ABIArgInfo classifyReturnType(QualType RetTy) const {
- if (RetTy->isAnyComplexType())
- return complexArgInfo();
-
- return DefaultABIInfo::classifyReturnType(RetTy);
- }
-
- ABIArgInfo classifyArgumentType(QualType RetTy) const {
- if (RetTy->isAnyComplexType())
- return complexArgInfo();
-
- return DefaultABIInfo::classifyArgumentType(RetTy);
- }
-
- // Just copy the original implementations because
- // DefaultABIInfo::classify{Return,Argument}Type() are not virtual
- void computeInfo(CGFunctionInfo &FI) const override {
- if (!getCXXABI().classifyReturnType(FI))
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
- for (auto &I : FI.arguments())
- I.info = classifyArgumentType(I.type);
- }
-
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override {
- return EmitVAArgInstr(CGF, VAListAddr, Ty, classifyArgumentType(Ty));
- }
-};
-
-class MSP430TargetCodeGenInfo : public TargetCodeGenInfo {
-public:
- MSP430TargetCodeGenInfo(CodeGenTypes &CGT)
- : TargetCodeGenInfo(std::make_unique<MSP430ABIInfo>(CGT)) {}
- void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &M) const override;
-};
-
-}
-
-void MSP430TargetCodeGenInfo::setTargetAttributes(
- const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
- if (GV->isDeclaration())
- return;
- if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
- const auto *InterruptAttr = FD->getAttr<MSP430InterruptAttr>();
- if (!InterruptAttr)
- return;
-
- // Handle 'interrupt' attribute:
- llvm::Function *F = cast<llvm::Function>(GV);
-
- // Step 1: Set ISR calling convention.
- F->setCallingConv(llvm::CallingConv::MSP430_INTR);
-
- // Step 2: Add attributes goodness.
- F->addFnAttr(llvm::Attribute::NoInline);
- F->addFnAttr("interrupt", llvm::utostr(InterruptAttr->getNumber()));
- }
-}
-
-//===----------------------------------------------------------------------===//
-// MIPS ABI Implementation. This works for both little-endian and
-// big-endian variants.
-//===----------------------------------------------------------------------===//
-
-namespace {
-class MipsABIInfo : public ABIInfo {
- bool IsO32;
- unsigned MinABIStackAlignInBytes, StackAlignInBytes;
- void CoerceToIntArgs(uint64_t TySize,
- SmallVectorImpl<llvm::Type *> &ArgList) const;
- llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const;
- llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const;
- llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const;
-public:
- MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) :
- ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8),
- StackAlignInBytes(IsO32 ? 8 : 16) {}
-
- ABIArgInfo classifyReturnType(QualType RetTy) const;
- ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const;
- void computeInfo(CGFunctionInfo &FI) const override;
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
- ABIArgInfo extendType(QualType Ty) const;
-};
-
-class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
- unsigned SizeOfUnwindException;
-public:
- MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32)
- : TargetCodeGenInfo(std::make_unique<MipsABIInfo>(CGT, IsO32)),
- SizeOfUnwindException(IsO32 ? 24 : 32) {}
-
- int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
- return 29;
- }
-
- void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &CGM) const override {
- const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
- if (!FD) return;
- llvm::Function *Fn = cast<llvm::Function>(GV);
-
- if (FD->hasAttr<MipsLongCallAttr>())
- Fn->addFnAttr("long-call");
- else if (FD->hasAttr<MipsShortCallAttr>())
- Fn->addFnAttr("short-call");
-
- // Other attributes do not have a meaning for declarations.
- if (GV->isDeclaration())
- return;
-
- if (FD->hasAttr<Mips16Attr>()) {
- Fn->addFnAttr("mips16");
- }
- else if (FD->hasAttr<NoMips16Attr>()) {
- Fn->addFnAttr("nomips16");
- }
-
- if (FD->hasAttr<MicroMipsAttr>())
- Fn->addFnAttr("micromips");
- else if (FD->hasAttr<NoMicroMipsAttr>())
- Fn->addFnAttr("nomicromips");
-
- const MipsInterruptAttr *Attr = FD->getAttr<MipsInterruptAttr>();
- if (!Attr)
- return;
-
- const char *Kind;
- switch (Attr->getInterrupt()) {
- case MipsInterruptAttr::eic: Kind = "eic"; break;
- case MipsInterruptAttr::sw0: Kind = "sw0"; break;
- case MipsInterruptAttr::sw1: Kind = "sw1"; break;
- case MipsInterruptAttr::hw0: Kind = "hw0"; break;
- case MipsInterruptAttr::hw1: Kind = "hw1"; break;
- case MipsInterruptAttr::hw2: Kind = "hw2"; break;
- case MipsInterruptAttr::hw3: Kind = "hw3"; break;
- case MipsInterruptAttr::hw4: Kind = "hw4"; break;
- case MipsInterruptAttr::hw5: Kind = "hw5"; break;
- }
-
- Fn->addFnAttr("interrupt", Kind);
-
- }
-
- bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
- llvm::Value *Address) const override;
-
- unsigned getSizeOfUnwindException() const override {
- return SizeOfUnwindException;
- }
-};
-}
-
-void MipsABIInfo::CoerceToIntArgs(
- uint64_t TySize, SmallVectorImpl<llvm::Type *> &ArgList) const {
- llvm::IntegerType *IntTy =
- llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8);
-
- // Add (TySize / MinABIStackAlignInBytes) args of IntTy.
- for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N)
- ArgList.push_back(IntTy);
-
- // If necessary, add one more integer type to ArgList.
- unsigned R = TySize % (MinABIStackAlignInBytes * 8);
-
- if (R)
- ArgList.push_back(llvm::IntegerType::get(getVMContext(), R));
-}
-
-// In N32/64, an aligned double precision floating point field is passed in
-// a register.
-llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const {
- SmallVector<llvm::Type*, 8> ArgList, IntArgList;
-
- if (IsO32) {
- CoerceToIntArgs(TySize, ArgList);
- return llvm::StructType::get(getVMContext(), ArgList);
- }
-
- if (Ty->isComplexType())
- return CGT.ConvertType(Ty);
-
- const RecordType *RT = Ty->getAs<RecordType>();
-
- // Unions/vectors are passed in integer registers.
- if (!RT || !RT->isStructureOrClassType()) {
- CoerceToIntArgs(TySize, ArgList);
- return llvm::StructType::get(getVMContext(), ArgList);
- }
-
- const RecordDecl *RD = RT->getDecl();
- const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
- assert(!(TySize % 8) && "Size of structure must be multiple of 8.");
-
- uint64_t LastOffset = 0;
- unsigned idx = 0;
- llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64);
-
- // Iterate over fields in the struct/class and check if there are any aligned
- // double fields.
- for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
- i != e; ++i, ++idx) {
- const QualType Ty = i->getType();
- const BuiltinType *BT = Ty->getAs<BuiltinType>();
-
- if (!BT || BT->getKind() != BuiltinType::Double)
- continue;
-
- uint64_t Offset = Layout.getFieldOffset(idx);
- if (Offset % 64) // Ignore doubles that are not aligned.
- continue;
-
- // Add ((Offset - LastOffset) / 64) args of type i64.
- for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j)
- ArgList.push_back(I64);
-
- // Add double type.
- ArgList.push_back(llvm::Type::getDoubleTy(getVMContext()));
- LastOffset = Offset + 64;
- }
-
- CoerceToIntArgs(TySize - LastOffset, IntArgList);
- ArgList.append(IntArgList.begin(), IntArgList.end());
-
- return llvm::StructType::get(getVMContext(), ArgList);
-}
-
-llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset,
- uint64_t Offset) const {
- if (OrigOffset + MinABIStackAlignInBytes > Offset)
- return nullptr;
-
- return llvm::IntegerType::get(getVMContext(), (Offset - OrigOffset) * 8);
-}
-
-ABIArgInfo
-MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
- Ty = useFirstFieldIfTransparentUnion(Ty);
-
- uint64_t OrigOffset = Offset;
- uint64_t TySize = getContext().getTypeSize(Ty);
- uint64_t Align = getContext().getTypeAlign(Ty) / 8;
-
- Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes),
- (uint64_t)StackAlignInBytes);
- unsigned CurrOffset = llvm::alignTo(Offset, Align);
- Offset = CurrOffset + llvm::alignTo(TySize, Align * 8) / 8;
-
- if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) {
- // Ignore empty aggregates.
- if (TySize == 0)
- return ABIArgInfo::getIgnore();
-
- if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
- Offset = OrigOffset + MinABIStackAlignInBytes;
- return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
- }
-
- // If we have reached here, aggregates are passed directly by coercing to
- // another structure type. Padding is inserted if the offset of the
- // aggregate is unaligned.
- ABIArgInfo ArgInfo =
- ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0,
- getPaddingType(OrigOffset, CurrOffset));
- ArgInfo.setInReg(true);
- return ArgInfo;
- }
-
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
-
- // Make sure we pass indirectly things that are too large.
- if (const auto *EIT = Ty->getAs<ExtIntType>())
- if (EIT->getNumBits() > 128 ||
- (EIT->getNumBits() > 64 &&
- !getContext().getTargetInfo().hasInt128Type()))
- return getNaturalAlignIndirect(Ty);
-
- // All integral types are promoted to the GPR width.
- if (Ty->isIntegralOrEnumerationType())
- return extendType(Ty);
-
- return ABIArgInfo::getDirect(
- nullptr, 0, IsO32 ? nullptr : getPaddingType(OrigOffset, CurrOffset));
-}
-
-llvm::Type*
-MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const {
- const RecordType *RT = RetTy->getAs<RecordType>();
- SmallVector<llvm::Type*, 8> RTList;
-
- if (RT && RT->isStructureOrClassType()) {
- const RecordDecl *RD = RT->getDecl();
- const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
- unsigned FieldCnt = Layout.getFieldCount();
-
- // N32/64 returns struct/classes in floating point registers if the
- // following conditions are met:
- // 1. The size of the struct/class is no larger than 128-bit.
- // 2. The struct/class has one or two fields all of which are floating
- // point types.
- // 3. The offset of the first field is zero (this follows what gcc does).
- //
- // Any other composite results are returned in integer registers.
- //
- if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) {
- RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end();
- for (; b != e; ++b) {
- const BuiltinType *BT = b->getType()->getAs<BuiltinType>();
-
- if (!BT || !BT->isFloatingPoint())
- break;
-
- RTList.push_back(CGT.ConvertType(b->getType()));
- }
-
- if (b == e)
- return llvm::StructType::get(getVMContext(), RTList,
- RD->hasAttr<PackedAttr>());
-
- RTList.clear();
- }
- }
-
- CoerceToIntArgs(Size, RTList);
- return llvm::StructType::get(getVMContext(), RTList);
-}
-
-ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
- uint64_t Size = getContext().getTypeSize(RetTy);
-
- if (RetTy->isVoidType())
- return ABIArgInfo::getIgnore();
-
- // O32 doesn't treat zero-sized structs differently from other structs.
- // However, N32/N64 ignores zero sized return values.
- if (!IsO32 && Size == 0)
- return ABIArgInfo::getIgnore();
-
- if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) {
- if (Size <= 128) {
- if (RetTy->isAnyComplexType())
- return ABIArgInfo::getDirect();
-
- // O32 returns integer vectors in registers and N32/N64 returns all small
- // aggregates in registers.
- if (!IsO32 ||
- (RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())) {
- ABIArgInfo ArgInfo =
- ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
- ArgInfo.setInReg(true);
- return ArgInfo;
- }
- }
-
- return getNaturalAlignIndirect(RetTy);
- }
-
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
- RetTy = EnumTy->getDecl()->getIntegerType();
-
- // Make sure we pass indirectly things that are too large.
- if (const auto *EIT = RetTy->getAs<ExtIntType>())
- if (EIT->getNumBits() > 128 ||
- (EIT->getNumBits() > 64 &&
- !getContext().getTargetInfo().hasInt128Type()))
- return getNaturalAlignIndirect(RetTy);
-
- if (isPromotableIntegerTypeForABI(RetTy))
- return ABIArgInfo::getExtend(RetTy);
-
- if ((RetTy->isUnsignedIntegerOrEnumerationType() ||
- RetTy->isSignedIntegerOrEnumerationType()) && Size == 32 && !IsO32)
- return ABIArgInfo::getSignExtend(RetTy);
-
- return ABIArgInfo::getDirect();
-}
-
-void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const {
- ABIArgInfo &RetInfo = FI.getReturnInfo();
- if (!getCXXABI().classifyReturnType(FI))
- RetInfo = classifyReturnType(FI.getReturnType());
-
- // Check if a pointer to an aggregate is passed as a hidden argument.
- uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0;
-
- for (auto &I : FI.arguments())
- I.info = classifyArgumentType(I.type, Offset);
-}
-
-Address MipsABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType OrigTy) const {
- QualType Ty = OrigTy;
-
- // Integer arguments are promoted to 32-bit on O32 and 64-bit on N32/N64.
- // Pointers are also promoted in the same way but this only matters for N32.
- unsigned SlotSizeInBits = IsO32 ? 32 : 64;
- unsigned PtrWidth = getTarget().getPointerWidth(0);
- bool DidPromote = false;
- if ((Ty->isIntegerType() &&
- getContext().getIntWidth(Ty) < SlotSizeInBits) ||
- (Ty->isPointerType() && PtrWidth < SlotSizeInBits)) {
- DidPromote = true;
- Ty = getContext().getIntTypeForBitwidth(SlotSizeInBits,
- Ty->isSignedIntegerType());
- }
-
- auto TyInfo = getContext().getTypeInfoInChars(Ty);
-
- // The alignment of things in the argument area is never larger than
- // StackAlignInBytes.
- TyInfo.Align =
- std::min(TyInfo.Align, CharUnits::fromQuantity(StackAlignInBytes));
-
- // MinABIStackAlignInBytes is the size of argument slots on the stack.
- CharUnits ArgSlotSize = CharUnits::fromQuantity(MinABIStackAlignInBytes);
-
- Address Addr = emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
- TyInfo, ArgSlotSize, /*AllowHigherAlign*/ true);
-
-
- // If there was a promotion, "unpromote" into a temporary.
- // TODO: can we just use a pointer into a subset of the original slot?
- if (DidPromote) {
- Address Temp = CGF.CreateMemTemp(OrigTy, "vaarg.promotion-temp");
- llvm::Value *Promoted = CGF.Builder.CreateLoad(Addr);
-
- // Truncate down to the right width.
- llvm::Type *IntTy = (OrigTy->isIntegerType() ? Temp.getElementType()
- : CGF.IntPtrTy);
- llvm::Value *V = CGF.Builder.CreateTrunc(Promoted, IntTy);
- if (OrigTy->isPointerType())
- V = CGF.Builder.CreateIntToPtr(V, Temp.getElementType());
-
- CGF.Builder.CreateStore(V, Temp);
- Addr = Temp;
- }
-
- return Addr;
-}
-
-ABIArgInfo MipsABIInfo::extendType(QualType Ty) const {
- int TySize = getContext().getTypeSize(Ty);
-
- // MIPS64 ABI requires unsigned 32 bit integers to be sign extended.
- if (Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32)
- return ABIArgInfo::getSignExtend(Ty);
-
- return ABIArgInfo::getExtend(Ty);
-}
-
-bool
-MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
- llvm::Value *Address) const {
- // This information comes from gcc's implementation, which seems to
- // as canonical as it gets.
-
- // Everything on MIPS is 4 bytes. Double-precision FP registers
- // are aliased to pairs of single-precision FP registers.
- llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
-
- // 0-31 are the general purpose registers, $0 - $31.
- // 32-63 are the floating-point registers, $f0 - $f31.
- // 64 and 65 are the multiply/divide registers, $hi and $lo.
- // 66 is the (notional, I think) register for signal-handler return.
- AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65);
-
- // 67-74 are the floating-point status registers, $fcc0 - $fcc7.
- // They are one bit wide and ignored here.
-
- // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31.
- // (coprocessor 1 is the FP unit)
- // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31.
- // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31.
- // 176-181 are the DSP accumulator registers.
- AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181);
- return false;
-}
-
-//===----------------------------------------------------------------------===//
-// M68k ABI Implementation
-//===----------------------------------------------------------------------===//
-
-namespace {
-
-class M68kTargetCodeGenInfo : public TargetCodeGenInfo {
-public:
- M68kTargetCodeGenInfo(CodeGenTypes &CGT)
- : TargetCodeGenInfo(std::make_unique<DefaultABIInfo>(CGT)) {}
- void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &M) const override;
-};
-
-} // namespace
-
-void M68kTargetCodeGenInfo::setTargetAttributes(
- const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
- if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
- if (const auto *attr = FD->getAttr<M68kInterruptAttr>()) {
- // Handle 'interrupt' attribute:
- llvm::Function *F = cast<llvm::Function>(GV);
-
- // Step 1: Set ISR calling convention.
- F->setCallingConv(llvm::CallingConv::M68k_INTR);
-
- // Step 2: Add attributes goodness.
- F->addFnAttr(llvm::Attribute::NoInline);
-
- // Step 3: Emit ISR vector alias.
- unsigned Num = attr->getNumber() / 2;
- llvm::GlobalAlias::create(llvm::Function::ExternalLinkage,
- "__isr_" + Twine(Num), F);
- }
- }
-}
-
-//===----------------------------------------------------------------------===//
-// AVR ABI Implementation. Documented at
-// https://gcc.gnu.org/wiki/avr-gcc#Calling_Convention
-// https://gcc.gnu.org/wiki/avr-gcc#Reduced_Tiny
-//===----------------------------------------------------------------------===//
-
-namespace {
-class AVRABIInfo : public DefaultABIInfo {
-public:
- AVRABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
-
- ABIArgInfo classifyReturnType(QualType Ty) const {
- // A return struct with size less than or equal to 8 bytes is returned
- // directly via registers R18-R25.
- if (isAggregateTypeForABI(Ty) && getContext().getTypeSize(Ty) <= 64)
- return ABIArgInfo::getDirect();
- else
- return DefaultABIInfo::classifyReturnType(Ty);
- }
-
- // Just copy the original implementation of DefaultABIInfo::computeInfo(),
- // since DefaultABIInfo::classify{Return,Argument}Type() are not virtual.
- void computeInfo(CGFunctionInfo &FI) const override {
- if (!getCXXABI().classifyReturnType(FI))
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
- for (auto &I : FI.arguments())
- I.info = classifyArgumentType(I.type);
- }
-};
-
-class AVRTargetCodeGenInfo : public TargetCodeGenInfo {
-public:
- AVRTargetCodeGenInfo(CodeGenTypes &CGT)
- : TargetCodeGenInfo(std::make_unique<AVRABIInfo>(CGT)) {}
-
- LangAS getGlobalVarAddressSpace(CodeGenModule &CGM,
- const VarDecl *D) const override {
- // Check if a global/static variable is defined within address space 1
- // but not constant.
- LangAS AS = D->getType().getAddressSpace();
- if (isTargetAddressSpace(AS) && toTargetAddressSpace(AS) == 1 &&
- !D->getType().isConstQualified())
- CGM.getDiags().Report(D->getLocation(),
- diag::err_verify_nonconst_addrspace)
- << "__flash";
- return TargetCodeGenInfo::getGlobalVarAddressSpace(CGM, D);
- }
-
- void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &CGM) const override {
- if (GV->isDeclaration())
- return;
- const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
- if (!FD) return;
- auto *Fn = cast<llvm::Function>(GV);
-
- if (FD->getAttr<AVRInterruptAttr>())
- Fn->addFnAttr("interrupt");
-
- if (FD->getAttr<AVRSignalAttr>())
- Fn->addFnAttr("signal");
- }
-};
-}
-
-//===----------------------------------------------------------------------===//
-// TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults.
-// Currently subclassed only to implement custom OpenCL C function attribute
-// handling.
-//===----------------------------------------------------------------------===//
-
-namespace {
-
-class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo {
-public:
- TCETargetCodeGenInfo(CodeGenTypes &CGT)
- : DefaultTargetCodeGenInfo(CGT) {}
-
- void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &M) const override;
-};
-
-void TCETargetCodeGenInfo::setTargetAttributes(
- const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
- if (GV->isDeclaration())
- return;
- const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
- if (!FD) return;
-
- llvm::Function *F = cast<llvm::Function>(GV);
-
- if (M.getLangOpts().OpenCL) {
- if (FD->hasAttr<OpenCLKernelAttr>()) {
- // OpenCL C Kernel functions are not subject to inlining
- F->addFnAttr(llvm::Attribute::NoInline);
- const ReqdWorkGroupSizeAttr *Attr = FD->getAttr<ReqdWorkGroupSizeAttr>();
- if (Attr) {
- // Convert the reqd_work_group_size() attributes to metadata.
- llvm::LLVMContext &Context = F->getContext();
- llvm::NamedMDNode *OpenCLMetadata =
- M.getModule().getOrInsertNamedMetadata(
- "opencl.kernel_wg_size_info");
-
- SmallVector<llvm::Metadata *, 5> Operands;
- Operands.push_back(llvm::ConstantAsMetadata::get(F));
-
- Operands.push_back(
- llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
- M.Int32Ty, llvm::APInt(32, Attr->getXDim()))));
- Operands.push_back(
- llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
- M.Int32Ty, llvm::APInt(32, Attr->getYDim()))));
- Operands.push_back(
- llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
- M.Int32Ty, llvm::APInt(32, Attr->getZDim()))));
-
- // Add a boolean constant operand for "required" (true) or "hint"
- // (false) for implementing the work_group_size_hint attr later.
- // Currently always true as the hint is not yet implemented.
- Operands.push_back(
- llvm::ConstantAsMetadata::get(llvm::ConstantInt::getTrue(Context)));
- OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands));
- }
- }
- }
-}
-
-}
-
-//===----------------------------------------------------------------------===//
-// Hexagon ABI Implementation
-//===----------------------------------------------------------------------===//
-
-namespace {
-
-class HexagonABIInfo : public DefaultABIInfo {
-public:
- HexagonABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
-
-private:
- ABIArgInfo classifyReturnType(QualType RetTy) const;
- ABIArgInfo classifyArgumentType(QualType RetTy) const;
- ABIArgInfo classifyArgumentType(QualType RetTy, unsigned *RegsLeft) const;
-
- void computeInfo(CGFunctionInfo &FI) const override;
-
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
- Address EmitVAArgFromMemory(CodeGenFunction &CFG, Address VAListAddr,
- QualType Ty) const;
- Address EmitVAArgForHexagon(CodeGenFunction &CFG, Address VAListAddr,
- QualType Ty) const;
- Address EmitVAArgForHexagonLinux(CodeGenFunction &CFG, Address VAListAddr,
- QualType Ty) const;
-};
-
-class HexagonTargetCodeGenInfo : public TargetCodeGenInfo {
-public:
- HexagonTargetCodeGenInfo(CodeGenTypes &CGT)
- : TargetCodeGenInfo(std::make_unique<HexagonABIInfo>(CGT)) {}
-
- int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
- return 29;
- }
-
- void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &GCM) const override {
- if (GV->isDeclaration())
- return;
- const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
- if (!FD)
- return;
- }
-};
-
-} // namespace
-
-void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const {
- unsigned RegsLeft = 6;
- if (!getCXXABI().classifyReturnType(FI))
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
- for (auto &I : FI.arguments())
- I.info = classifyArgumentType(I.type, &RegsLeft);
-}
-
-static bool HexagonAdjustRegsLeft(uint64_t Size, unsigned *RegsLeft) {
- assert(Size <= 64 && "Not expecting to pass arguments larger than 64 bits"
- " through registers");
-
- if (*RegsLeft == 0)
- return false;
-
- if (Size <= 32) {
- (*RegsLeft)--;
- return true;
- }
-
- if (2 <= (*RegsLeft & (~1U))) {
- *RegsLeft = (*RegsLeft & (~1U)) - 2;
- return true;
- }
-
- // Next available register was r5 but candidate was greater than 32-bits so it
- // has to go on the stack. However we still consume r5
- if (*RegsLeft == 1)
- *RegsLeft = 0;
-
- return false;
-}
-
-ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty,
- unsigned *RegsLeft) const {
- if (!isAggregateTypeForABI(Ty)) {
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
-
- uint64_t Size = getContext().getTypeSize(Ty);
- if (Size <= 64)
- HexagonAdjustRegsLeft(Size, RegsLeft);
-
- if (Size > 64 && Ty->isExtIntType())
- return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
-
- return isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
- : ABIArgInfo::getDirect();
- }
-
- if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
- return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
-
- // Ignore empty records.
- if (isEmptyRecord(getContext(), Ty, true))
- return ABIArgInfo::getIgnore();
-
- uint64_t Size = getContext().getTypeSize(Ty);
- unsigned Align = getContext().getTypeAlign(Ty);
-
- if (Size > 64)
- return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
-
- if (HexagonAdjustRegsLeft(Size, RegsLeft))
- Align = Size <= 32 ? 32 : 64;
- if (Size <= Align) {
- // Pass in the smallest viable integer type.
- if (!llvm::isPowerOf2_64(Size))
- Size = llvm::NextPowerOf2(Size);
- return ABIArgInfo::getDirect(llvm::Type::getIntNTy(getVMContext(), Size));
- }
- return DefaultABIInfo::classifyArgumentType(Ty);
-}
-
-ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const {
- if (RetTy->isVoidType())
- return ABIArgInfo::getIgnore();
-
- const TargetInfo &T = CGT.getTarget();
- uint64_t Size = getContext().getTypeSize(RetTy);
-
- if (RetTy->getAs<VectorType>()) {
- // HVX vectors are returned in vector registers or register pairs.
- if (T.hasFeature("hvx")) {
- assert(T.hasFeature("hvx-length64b") || T.hasFeature("hvx-length128b"));
- uint64_t VecSize = T.hasFeature("hvx-length64b") ? 64*8 : 128*8;
- if (Size == VecSize || Size == 2*VecSize)
- return ABIArgInfo::getDirectInReg();
- }
- // Large vector types should be returned via memory.
- if (Size > 64)
- return getNaturalAlignIndirect(RetTy);
- }
-
- if (!isAggregateTypeForABI(RetTy)) {
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
- RetTy = EnumTy->getDecl()->getIntegerType();
-
- if (Size > 64 && RetTy->isExtIntType())
- return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
-
- return isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
- : ABIArgInfo::getDirect();
- }
-
- if (isEmptyRecord(getContext(), RetTy, true))
- return ABIArgInfo::getIgnore();
-
- // Aggregates <= 8 bytes are returned in registers, other aggregates
- // are returned indirectly.
- if (Size <= 64) {
- // Return in the smallest viable integer type.
- if (!llvm::isPowerOf2_64(Size))
- Size = llvm::NextPowerOf2(Size);
- return ABIArgInfo::getDirect(llvm::Type::getIntNTy(getVMContext(), Size));
- }
- return getNaturalAlignIndirect(RetTy, /*ByVal=*/true);
-}
-
-Address HexagonABIInfo::EmitVAArgFromMemory(CodeGenFunction &CGF,
- Address VAListAddr,
- QualType Ty) const {
- // Load the overflow area pointer.
- Address __overflow_area_pointer_p =
- CGF.Builder.CreateStructGEP(VAListAddr, 2, "__overflow_area_pointer_p");
- llvm::Value *__overflow_area_pointer = CGF.Builder.CreateLoad(
- __overflow_area_pointer_p, "__overflow_area_pointer");
-
- uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
- if (Align > 4) {
- // Alignment should be a power of 2.
- assert((Align & (Align - 1)) == 0 && "Alignment is not power of 2!");
-
- // overflow_arg_area = (overflow_arg_area + align - 1) & -align;
- llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int64Ty, Align - 1);
-
- // Add offset to the current pointer to access the argument.
- __overflow_area_pointer =
- CGF.Builder.CreateGEP(CGF.Int8Ty, __overflow_area_pointer, Offset);
- llvm::Value *AsInt =
- CGF.Builder.CreatePtrToInt(__overflow_area_pointer, CGF.Int32Ty);
-
- // Create a mask which should be "AND"ed
- // with (overflow_arg_area + align - 1)
- llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -(int)Align);
- __overflow_area_pointer = CGF.Builder.CreateIntToPtr(
- CGF.Builder.CreateAnd(AsInt, Mask), __overflow_area_pointer->getType(),
- "__overflow_area_pointer.align");
- }
-
- // Get the type of the argument from memory and bitcast
- // overflow area pointer to the argument type.
- llvm::Type *PTy = CGF.ConvertTypeForMem(Ty);
- Address AddrTyped = CGF.Builder.CreateBitCast(
- Address(__overflow_area_pointer, CharUnits::fromQuantity(Align)),
- llvm::PointerType::getUnqual(PTy));
-
- // Round up to the minimum stack alignment for varargs which is 4 bytes.
- uint64_t Offset = llvm::alignTo(CGF.getContext().getTypeSize(Ty) / 8, 4);
-
- __overflow_area_pointer = CGF.Builder.CreateGEP(
- CGF.Int8Ty, __overflow_area_pointer,
- llvm::ConstantInt::get(CGF.Int32Ty, Offset),
- "__overflow_area_pointer.next");
- CGF.Builder.CreateStore(__overflow_area_pointer, __overflow_area_pointer_p);
-
- return AddrTyped;
-}
-
-Address HexagonABIInfo::EmitVAArgForHexagon(CodeGenFunction &CGF,
- Address VAListAddr,
- QualType Ty) const {
- // FIXME: Need to handle alignment
- llvm::Type *BP = CGF.Int8PtrTy;
- llvm::Type *BPP = CGF.Int8PtrPtrTy;
- CGBuilderTy &Builder = CGF.Builder;
- Address VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
- llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
- // Handle address alignment for type alignment > 32 bits
- uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8;
- if (TyAlign > 4) {
- assert((TyAlign & (TyAlign - 1)) == 0 && "Alignment is not power of 2!");
- llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty);
- AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1));
- AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1)));
- Addr = Builder.CreateIntToPtr(AddrAsInt, BP);
- }
- llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
- Address AddrTyped = Builder.CreateBitCast(
- Address(Addr, CharUnits::fromQuantity(TyAlign)), PTy);
-
- uint64_t Offset = llvm::alignTo(CGF.getContext().getTypeSize(Ty) / 8, 4);
- llvm::Value *NextAddr = Builder.CreateGEP(
- CGF.Int8Ty, Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), "ap.next");
- Builder.CreateStore(NextAddr, VAListAddrAsBPP);
-
- return AddrTyped;
-}
-
-Address HexagonABIInfo::EmitVAArgForHexagonLinux(CodeGenFunction &CGF,
- Address VAListAddr,
- QualType Ty) const {
- int ArgSize = CGF.getContext().getTypeSize(Ty) / 8;
-
- if (ArgSize > 8)
- return EmitVAArgFromMemory(CGF, VAListAddr, Ty);
-
- // Here we have check if the argument is in register area or
- // in overflow area.
- // If the saved register area pointer + argsize rounded up to alignment >
- // saved register area end pointer, argument is in overflow area.
- unsigned RegsLeft = 6;
- Ty = CGF.getContext().getCanonicalType(Ty);
- (void)classifyArgumentType(Ty, &RegsLeft);
-
- llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
- llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
- llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
- llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
-
- // Get rounded size of the argument.GCC does not allow vararg of
- // size < 4 bytes. We follow the same logic here.
- ArgSize = (CGF.getContext().getTypeSize(Ty) <= 32) ? 4 : 8;
- int ArgAlign = (CGF.getContext().getTypeSize(Ty) <= 32) ? 4 : 8;
-
- // Argument may be in saved register area
- CGF.EmitBlock(MaybeRegBlock);
-
- // Load the current saved register area pointer.
- Address __current_saved_reg_area_pointer_p = CGF.Builder.CreateStructGEP(
- VAListAddr, 0, "__current_saved_reg_area_pointer_p");
- llvm::Value *__current_saved_reg_area_pointer = CGF.Builder.CreateLoad(
- __current_saved_reg_area_pointer_p, "__current_saved_reg_area_pointer");
-
- // Load the saved register area end pointer.
- Address __saved_reg_area_end_pointer_p = CGF.Builder.CreateStructGEP(
- VAListAddr, 1, "__saved_reg_area_end_pointer_p");
- llvm::Value *__saved_reg_area_end_pointer = CGF.Builder.CreateLoad(
- __saved_reg_area_end_pointer_p, "__saved_reg_area_end_pointer");
-
- // If the size of argument is > 4 bytes, check if the stack
- // location is aligned to 8 bytes
- if (ArgAlign > 4) {
-
- llvm::Value *__current_saved_reg_area_pointer_int =
- CGF.Builder.CreatePtrToInt(__current_saved_reg_area_pointer,
- CGF.Int32Ty);
-
- __current_saved_reg_area_pointer_int = CGF.Builder.CreateAdd(
- __current_saved_reg_area_pointer_int,
- llvm::ConstantInt::get(CGF.Int32Ty, (ArgAlign - 1)),
- "align_current_saved_reg_area_pointer");
-
- __current_saved_reg_area_pointer_int =
- CGF.Builder.CreateAnd(__current_saved_reg_area_pointer_int,
- llvm::ConstantInt::get(CGF.Int32Ty, -ArgAlign),
- "align_current_saved_reg_area_pointer");
-
- __current_saved_reg_area_pointer =
- CGF.Builder.CreateIntToPtr(__current_saved_reg_area_pointer_int,
- __current_saved_reg_area_pointer->getType(),
- "align_current_saved_reg_area_pointer");
- }
-
- llvm::Value *__new_saved_reg_area_pointer =
- CGF.Builder.CreateGEP(CGF.Int8Ty, __current_saved_reg_area_pointer,
- llvm::ConstantInt::get(CGF.Int32Ty, ArgSize),
- "__new_saved_reg_area_pointer");
-
- llvm::Value *UsingStack = 0;
- UsingStack = CGF.Builder.CreateICmpSGT(__new_saved_reg_area_pointer,
- __saved_reg_area_end_pointer);
-
- CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, InRegBlock);
-
- // Argument in saved register area
- // Implement the block where argument is in register saved area
- CGF.EmitBlock(InRegBlock);
-
- llvm::Type *PTy = CGF.ConvertType(Ty);
- llvm::Value *__saved_reg_area_p = CGF.Builder.CreateBitCast(
- __current_saved_reg_area_pointer, llvm::PointerType::getUnqual(PTy));
-
- CGF.Builder.CreateStore(__new_saved_reg_area_pointer,
- __current_saved_reg_area_pointer_p);
-
- CGF.EmitBranch(ContBlock);
-
- // Argument in overflow area
- // Implement the block where the argument is in overflow area.
- CGF.EmitBlock(OnStackBlock);
-
- // Load the overflow area pointer
- Address __overflow_area_pointer_p =
- CGF.Builder.CreateStructGEP(VAListAddr, 2, "__overflow_area_pointer_p");
- llvm::Value *__overflow_area_pointer = CGF.Builder.CreateLoad(
- __overflow_area_pointer_p, "__overflow_area_pointer");
-
- // Align the overflow area pointer according to the alignment of the argument
- if (ArgAlign > 4) {
- llvm::Value *__overflow_area_pointer_int =
- CGF.Builder.CreatePtrToInt(__overflow_area_pointer, CGF.Int32Ty);
-
- __overflow_area_pointer_int =
- CGF.Builder.CreateAdd(__overflow_area_pointer_int,
- llvm::ConstantInt::get(CGF.Int32Ty, ArgAlign - 1),
- "align_overflow_area_pointer");
-
- __overflow_area_pointer_int =
- CGF.Builder.CreateAnd(__overflow_area_pointer_int,
- llvm::ConstantInt::get(CGF.Int32Ty, -ArgAlign),
- "align_overflow_area_pointer");
-
- __overflow_area_pointer = CGF.Builder.CreateIntToPtr(
- __overflow_area_pointer_int, __overflow_area_pointer->getType(),
- "align_overflow_area_pointer");
- }
-
- // Get the pointer for next argument in overflow area and store it
- // to overflow area pointer.
- llvm::Value *__new_overflow_area_pointer = CGF.Builder.CreateGEP(
- CGF.Int8Ty, __overflow_area_pointer,
- llvm::ConstantInt::get(CGF.Int32Ty, ArgSize),
- "__overflow_area_pointer.next");
-
- CGF.Builder.CreateStore(__new_overflow_area_pointer,
- __overflow_area_pointer_p);
-
- CGF.Builder.CreateStore(__new_overflow_area_pointer,
- __current_saved_reg_area_pointer_p);
-
- // Bitcast the overflow area pointer to the type of argument.
- llvm::Type *OverflowPTy = CGF.ConvertTypeForMem(Ty);
- llvm::Value *__overflow_area_p = CGF.Builder.CreateBitCast(
- __overflow_area_pointer, llvm::PointerType::getUnqual(OverflowPTy));
-
- CGF.EmitBranch(ContBlock);
-
- // Get the correct pointer to load the variable argument
- // Implement the ContBlock
- CGF.EmitBlock(ContBlock);
-
- llvm::Type *MemPTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
- llvm::PHINode *ArgAddr = CGF.Builder.CreatePHI(MemPTy, 2, "vaarg.addr");
- ArgAddr->addIncoming(__saved_reg_area_p, InRegBlock);
- ArgAddr->addIncoming(__overflow_area_p, OnStackBlock);
-
- return Address(ArgAddr, CharUnits::fromQuantity(ArgAlign));
-}
-
-Address HexagonABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
-
- if (getTarget().getTriple().isMusl())
- return EmitVAArgForHexagonLinux(CGF, VAListAddr, Ty);
-
- return EmitVAArgForHexagon(CGF, VAListAddr, Ty);
-}
-
-//===----------------------------------------------------------------------===//
-// Lanai ABI Implementation
-//===----------------------------------------------------------------------===//
-
-namespace {
-class LanaiABIInfo : public DefaultABIInfo {
-public:
- LanaiABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
-
- bool shouldUseInReg(QualType Ty, CCState &State) const;
-
- void computeInfo(CGFunctionInfo &FI) const override {
- CCState State(FI);
- // Lanai uses 4 registers to pass arguments unless the function has the
- // regparm attribute set.
- if (FI.getHasRegParm()) {
- State.FreeRegs = FI.getRegParm();
- } else {
- State.FreeRegs = 4;
- }
-
- if (!getCXXABI().classifyReturnType(FI))
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
- for (auto &I : FI.arguments())
- I.info = classifyArgumentType(I.type, State);
- }
-
- ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
- ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
-};
-} // end anonymous namespace
-
-bool LanaiABIInfo::shouldUseInReg(QualType Ty, CCState &State) const {
- unsigned Size = getContext().getTypeSize(Ty);
- unsigned SizeInRegs = llvm::alignTo(Size, 32U) / 32U;
-
- if (SizeInRegs == 0)
- return false;
-
- if (SizeInRegs > State.FreeRegs) {
- State.FreeRegs = 0;
- return false;
- }
-
- State.FreeRegs -= SizeInRegs;
-
- return true;
-}
-
-ABIArgInfo LanaiABIInfo::getIndirectResult(QualType Ty, bool ByVal,
- CCState &State) const {
- if (!ByVal) {
- if (State.FreeRegs) {
- --State.FreeRegs; // Non-byval indirects just use one pointer.
- return getNaturalAlignIndirectInReg(Ty);
- }
- return getNaturalAlignIndirect(Ty, false);
- }
-
- // Compute the byval alignment.
- const unsigned MinABIStackAlignInBytes = 4;
- unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
- return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true,
- /*Realign=*/TypeAlign >
- MinABIStackAlignInBytes);
-}
-
-ABIArgInfo LanaiABIInfo::classifyArgumentType(QualType Ty,
- CCState &State) const {
- // Check with the C++ ABI first.
- const RecordType *RT = Ty->getAs<RecordType>();
- if (RT) {
- CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
- if (RAA == CGCXXABI::RAA_Indirect) {
- return getIndirectResult(Ty, /*ByVal=*/false, State);
- } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
- return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
- }
- }
-
- if (isAggregateTypeForABI(Ty)) {
- // Structures with flexible arrays are always indirect.
- if (RT && RT->getDecl()->hasFlexibleArrayMember())
- return getIndirectResult(Ty, /*ByVal=*/true, State);
-
- // Ignore empty structs/unions.
- if (isEmptyRecord(getContext(), Ty, true))
- return ABIArgInfo::getIgnore();
-
- llvm::LLVMContext &LLVMContext = getVMContext();
- unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
- if (SizeInRegs <= State.FreeRegs) {
- llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
- SmallVector<llvm::Type *, 3> Elements(SizeInRegs, Int32);
- llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
- State.FreeRegs -= SizeInRegs;
- return ABIArgInfo::getDirectInReg(Result);
- } else {
- State.FreeRegs = 0;
- }
- return getIndirectResult(Ty, true, State);
- }
-
- // Treat an enum type as its underlying type.
- if (const auto *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
-
- bool InReg = shouldUseInReg(Ty, State);
-
- // Don't pass >64 bit integers in registers.
- if (const auto *EIT = Ty->getAs<ExtIntType>())
- if (EIT->getNumBits() > 64)
- return getIndirectResult(Ty, /*ByVal=*/true, State);
-
- if (isPromotableIntegerTypeForABI(Ty)) {
- if (InReg)
- return ABIArgInfo::getDirectInReg();
- return ABIArgInfo::getExtend(Ty);
- }
- if (InReg)
- return ABIArgInfo::getDirectInReg();
- return ABIArgInfo::getDirect();
-}
-
-namespace {
-class LanaiTargetCodeGenInfo : public TargetCodeGenInfo {
-public:
- LanaiTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
- : TargetCodeGenInfo(std::make_unique<LanaiABIInfo>(CGT)) {}
-};
-}
-
-//===----------------------------------------------------------------------===//
-// AMDGPU ABI Implementation
-//===----------------------------------------------------------------------===//
-
-namespace {
-
-class AMDGPUABIInfo final : public DefaultABIInfo {
-private:
- static const unsigned MaxNumRegsForArgsRet = 16;
-
- unsigned numRegsForType(QualType Ty) const;
-
- bool isHomogeneousAggregateBaseType(QualType Ty) const override;
- bool isHomogeneousAggregateSmallEnough(const Type *Base,
- uint64_t Members) const override;
-
- // Coerce HIP scalar pointer arguments from generic pointers to global ones.
- llvm::Type *coerceKernelArgumentType(llvm::Type *Ty, unsigned FromAS,
- unsigned ToAS) const {
- // Single value types.
- if (Ty->isPointerTy() && Ty->getPointerAddressSpace() == FromAS)
- return llvm::PointerType::get(
- cast<llvm::PointerType>(Ty)->getElementType(), ToAS);
- return Ty;
- }
-
-public:
- explicit AMDGPUABIInfo(CodeGen::CodeGenTypes &CGT) :
- DefaultABIInfo(CGT) {}
-
- ABIArgInfo classifyReturnType(QualType RetTy) const;
- ABIArgInfo classifyKernelArgumentType(QualType Ty) const;
- ABIArgInfo classifyArgumentType(QualType Ty, unsigned &NumRegsLeft) const;
-
- void computeInfo(CGFunctionInfo &FI) const override;
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
-};
-
-bool AMDGPUABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
- return true;
-}
-
-bool AMDGPUABIInfo::isHomogeneousAggregateSmallEnough(
- const Type *Base, uint64_t Members) const {
- uint32_t NumRegs = (getContext().getTypeSize(Base) + 31) / 32;
-
- // Homogeneous Aggregates may occupy at most 16 registers.
- return Members * NumRegs <= MaxNumRegsForArgsRet;
-}
-
-/// Estimate number of registers the type will use when passed in registers.
-unsigned AMDGPUABIInfo::numRegsForType(QualType Ty) const {
- unsigned NumRegs = 0;
-
- if (const VectorType *VT = Ty->getAs<VectorType>()) {
- // Compute from the number of elements. The reported size is based on the
- // in-memory size, which includes the padding 4th element for 3-vectors.
- QualType EltTy = VT->getElementType();
- unsigned EltSize = getContext().getTypeSize(EltTy);
-
- // 16-bit element vectors should be passed as packed.
- if (EltSize == 16)
- return (VT->getNumElements() + 1) / 2;
-
- unsigned EltNumRegs = (EltSize + 31) / 32;
- return EltNumRegs * VT->getNumElements();
- }
-
- if (const RecordType *RT = Ty->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
- assert(!RD->hasFlexibleArrayMember());
-
- for (const FieldDecl *Field : RD->fields()) {
- QualType FieldTy = Field->getType();
- NumRegs += numRegsForType(FieldTy);
- }
-
- return NumRegs;
- }
-
- return (getContext().getTypeSize(Ty) + 31) / 32;
-}
-
-void AMDGPUABIInfo::computeInfo(CGFunctionInfo &FI) const {
- llvm::CallingConv::ID CC = FI.getCallingConvention();
-
- if (!getCXXABI().classifyReturnType(FI))
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
-
- unsigned NumRegsLeft = MaxNumRegsForArgsRet;
- for (auto &Arg : FI.arguments()) {
- if (CC == llvm::CallingConv::AMDGPU_KERNEL) {
- Arg.info = classifyKernelArgumentType(Arg.type);
- } else {
- Arg.info = classifyArgumentType(Arg.type, NumRegsLeft);
- }
- }
-}
-
-Address AMDGPUABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
- llvm_unreachable("AMDGPU does not support varargs");
-}
-
-ABIArgInfo AMDGPUABIInfo::classifyReturnType(QualType RetTy) const {
- if (isAggregateTypeForABI(RetTy)) {
- // Records with non-trivial destructors/copy-constructors should not be
- // returned by value.
- if (!getRecordArgABI(RetTy, getCXXABI())) {
- // Ignore empty structs/unions.
- if (isEmptyRecord(getContext(), RetTy, true))
- return ABIArgInfo::getIgnore();
-
- // Lower single-element structs to just return a regular value.
- if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
- return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
-
- if (const RecordType *RT = RetTy->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
- if (RD->hasFlexibleArrayMember())
- return DefaultABIInfo::classifyReturnType(RetTy);
- }
-
- // Pack aggregates <= 4 bytes into single VGPR or pair.
- uint64_t Size = getContext().getTypeSize(RetTy);
- if (Size <= 16)
- return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
-
- if (Size <= 32)
- return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
-
- if (Size <= 64) {
- llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext());
- return ABIArgInfo::getDirect(llvm::ArrayType::get(I32Ty, 2));
- }
-
- if (numRegsForType(RetTy) <= MaxNumRegsForArgsRet)
- return ABIArgInfo::getDirect();
- }
- }
-
- // Otherwise just do the default thing.
- return DefaultABIInfo::classifyReturnType(RetTy);
-}
-
-/// For kernels all parameters are really passed in a special buffer. It doesn't
-/// make sense to pass anything byval, so everything must be direct.
-ABIArgInfo AMDGPUABIInfo::classifyKernelArgumentType(QualType Ty) const {
- Ty = useFirstFieldIfTransparentUnion(Ty);
-
- // TODO: Can we omit empty structs?
-
- if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
- Ty = QualType(SeltTy, 0);
-
- llvm::Type *OrigLTy = CGT.ConvertType(Ty);
- llvm::Type *LTy = OrigLTy;
- if (getContext().getLangOpts().HIP) {
- LTy = coerceKernelArgumentType(
- OrigLTy, /*FromAS=*/getContext().getTargetAddressSpace(LangAS::Default),
- /*ToAS=*/getContext().getTargetAddressSpace(LangAS::cuda_device));
- }
-
- // FIXME: Should also use this for OpenCL, but it requires addressing the
- // problem of kernels being called.
- //
- // FIXME: This doesn't apply the optimization of coercing pointers in structs
- // to global address space when using byref. This would require implementing a
- // new kind of coercion of the in-memory type when for indirect arguments.
- if (!getContext().getLangOpts().OpenCL && LTy == OrigLTy &&
- isAggregateTypeForABI(Ty)) {
- return ABIArgInfo::getIndirectAliased(
- getContext().getTypeAlignInChars(Ty),
- getContext().getTargetAddressSpace(LangAS::opencl_constant),
- false /*Realign*/, nullptr /*Padding*/);
- }
-
- // If we set CanBeFlattened to true, CodeGen will expand the struct to its
- // individual elements, which confuses the Clover OpenCL backend; therefore we
- // have to set it to false here. Other args of getDirect() are just defaults.
- return ABIArgInfo::getDirect(LTy, 0, nullptr, false);
-}
-
-ABIArgInfo AMDGPUABIInfo::classifyArgumentType(QualType Ty,
- unsigned &NumRegsLeft) const {
- assert(NumRegsLeft <= MaxNumRegsForArgsRet && "register estimate underflow");
-
- Ty = useFirstFieldIfTransparentUnion(Ty);
-
- if (isAggregateTypeForABI(Ty)) {
- // Records with non-trivial destructors/copy-constructors should not be
- // passed by value.
- if (auto RAA = getRecordArgABI(Ty, getCXXABI()))
- return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
-
- // Ignore empty structs/unions.
- if (isEmptyRecord(getContext(), Ty, true))
- return ABIArgInfo::getIgnore();
-
- // Lower single-element structs to just pass a regular value. TODO: We
- // could do reasonable-size multiple-element structs too, using getExpand(),
- // though watch out for things like bitfields.
- if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
- return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
-
- if (const RecordType *RT = Ty->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
- if (RD->hasFlexibleArrayMember())
- return DefaultABIInfo::classifyArgumentType(Ty);
- }
-
- // Pack aggregates <= 8 bytes into single VGPR or pair.
- uint64_t Size = getContext().getTypeSize(Ty);
- if (Size <= 64) {
- unsigned NumRegs = (Size + 31) / 32;
- NumRegsLeft -= std::min(NumRegsLeft, NumRegs);
-
- if (Size <= 16)
- return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
-
- if (Size <= 32)
- return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
-
- // XXX: Should this be i64 instead, and should the limit increase?
- llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext());
- return ABIArgInfo::getDirect(llvm::ArrayType::get(I32Ty, 2));
- }
-
- if (NumRegsLeft > 0) {
- unsigned NumRegs = numRegsForType(Ty);
- if (NumRegsLeft >= NumRegs) {
- NumRegsLeft -= NumRegs;
- return ABIArgInfo::getDirect();
- }
- }
- }
-
- // Otherwise just do the default thing.
- ABIArgInfo ArgInfo = DefaultABIInfo::classifyArgumentType(Ty);
- if (!ArgInfo.isIndirect()) {
- unsigned NumRegs = numRegsForType(Ty);
- NumRegsLeft -= std::min(NumRegs, NumRegsLeft);
- }
-
- return ArgInfo;
-}
-
-class AMDGPUTargetCodeGenInfo : public TargetCodeGenInfo {
-public:
- AMDGPUTargetCodeGenInfo(CodeGenTypes &CGT)
- : TargetCodeGenInfo(std::make_unique<AMDGPUABIInfo>(CGT)) {}
- void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &M) const override;
- unsigned getOpenCLKernelCallingConv() const override;
-
- llvm::Constant *getNullPointer(const CodeGen::CodeGenModule &CGM,
- llvm::PointerType *T, QualType QT) const override;
-
- LangAS getASTAllocaAddressSpace() const override {
- return getLangASFromTargetAS(
- getABIInfo().getDataLayout().getAllocaAddrSpace());
- }
- LangAS getGlobalVarAddressSpace(CodeGenModule &CGM,
- const VarDecl *D) const override;
- llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts,
- SyncScope Scope,
- llvm::AtomicOrdering Ordering,
- llvm::LLVMContext &Ctx) const override;
- llvm::Function *
- createEnqueuedBlockKernel(CodeGenFunction &CGF,
- llvm::Function *BlockInvokeFunc,
- llvm::Value *BlockLiteral) const override;
- bool shouldEmitStaticExternCAliases() const override;
- void setCUDAKernelCallingConvention(const FunctionType *&FT) const override;
-};
-}
-
-static bool requiresAMDGPUProtectedVisibility(const Decl *D,
- llvm::GlobalValue *GV) {
- if (GV->getVisibility() != llvm::GlobalValue::HiddenVisibility)
- return false;
-
- return D->hasAttr<OpenCLKernelAttr>() ||
- (isa<FunctionDecl>(D) && D->hasAttr<CUDAGlobalAttr>()) ||
- (isa<VarDecl>(D) &&
- (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() ||
- cast<VarDecl>(D)->getType()->isCUDADeviceBuiltinSurfaceType() ||
- cast<VarDecl>(D)->getType()->isCUDADeviceBuiltinTextureType()));
-}
-
-void AMDGPUTargetCodeGenInfo::setTargetAttributes(
- const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
- if (requiresAMDGPUProtectedVisibility(D, GV)) {
- GV->setVisibility(llvm::GlobalValue::ProtectedVisibility);
- GV->setDSOLocal(true);
- }
-
- if (GV->isDeclaration())
- return;
- const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
- if (!FD)
- return;
-
- llvm::Function *F = cast<llvm::Function>(GV);
-
- const auto *ReqdWGS = M.getLangOpts().OpenCL ?
- FD->getAttr<ReqdWorkGroupSizeAttr>() : nullptr;
-
-
- const bool IsOpenCLKernel = M.getLangOpts().OpenCL &&
- FD->hasAttr<OpenCLKernelAttr>();
- const bool IsHIPKernel = M.getLangOpts().HIP &&
- FD->hasAttr<CUDAGlobalAttr>();
- if ((IsOpenCLKernel || IsHIPKernel) &&
- (M.getTriple().getOS() == llvm::Triple::AMDHSA))
- F->addFnAttr("amdgpu-implicitarg-num-bytes", "56");
-
- if (IsHIPKernel)
- F->addFnAttr("uniform-work-group-size", "true");
-
-
- const auto *FlatWGS = FD->getAttr<AMDGPUFlatWorkGroupSizeAttr>();
- if (ReqdWGS || FlatWGS) {
- unsigned Min = 0;
- unsigned Max = 0;
- if (FlatWGS) {
- Min = FlatWGS->getMin()
- ->EvaluateKnownConstInt(M.getContext())
- .getExtValue();
- Max = FlatWGS->getMax()
- ->EvaluateKnownConstInt(M.getContext())
- .getExtValue();
- }
- if (ReqdWGS && Min == 0 && Max == 0)
- Min = Max = ReqdWGS->getXDim() * ReqdWGS->getYDim() * ReqdWGS->getZDim();
-
- if (Min != 0) {
- assert(Min <= Max && "Min must be less than or equal Max");
-
- std::string AttrVal = llvm::utostr(Min) + "," + llvm::utostr(Max);
- F->addFnAttr("amdgpu-flat-work-group-size", AttrVal);
- } else
- assert(Max == 0 && "Max must be zero");
- } else if (IsOpenCLKernel || IsHIPKernel) {
- // By default, restrict the maximum size to a value specified by
- // --gpu-max-threads-per-block=n or its default value for HIP.
- const unsigned OpenCLDefaultMaxWorkGroupSize = 256;
- const unsigned DefaultMaxWorkGroupSize =
- IsOpenCLKernel ? OpenCLDefaultMaxWorkGroupSize
- : M.getLangOpts().GPUMaxThreadsPerBlock;
- std::string AttrVal =
- std::string("1,") + llvm::utostr(DefaultMaxWorkGroupSize);
- F->addFnAttr("amdgpu-flat-work-group-size", AttrVal);
- }
-
- if (const auto *Attr = FD->getAttr<AMDGPUWavesPerEUAttr>()) {
- unsigned Min =
- Attr->getMin()->EvaluateKnownConstInt(M.getContext()).getExtValue();
- unsigned Max = Attr->getMax() ? Attr->getMax()
- ->EvaluateKnownConstInt(M.getContext())
- .getExtValue()
- : 0;
-
- if (Min != 0) {
- assert((Max == 0 || Min <= Max) && "Min must be less than or equal Max");
-
- std::string AttrVal = llvm::utostr(Min);
- if (Max != 0)
- AttrVal = AttrVal + "," + llvm::utostr(Max);
- F->addFnAttr("amdgpu-waves-per-eu", AttrVal);
- } else
- assert(Max == 0 && "Max must be zero");
- }
-
- if (const auto *Attr = FD->getAttr<AMDGPUNumSGPRAttr>()) {
- unsigned NumSGPR = Attr->getNumSGPR();
-
- if (NumSGPR != 0)
- F->addFnAttr("amdgpu-num-sgpr", llvm::utostr(NumSGPR));
- }
-
- if (const auto *Attr = FD->getAttr<AMDGPUNumVGPRAttr>()) {
- uint32_t NumVGPR = Attr->getNumVGPR();
-
- if (NumVGPR != 0)
- F->addFnAttr("amdgpu-num-vgpr", llvm::utostr(NumVGPR));
- }
-
- if (M.getContext().getTargetInfo().allowAMDGPUUnsafeFPAtomics())
- F->addFnAttr("amdgpu-unsafe-fp-atomics", "true");
-
- if (!getABIInfo().getCodeGenOpts().EmitIEEENaNCompliantInsts)
- F->addFnAttr("amdgpu-ieee", "false");
-}
-
-unsigned AMDGPUTargetCodeGenInfo::getOpenCLKernelCallingConv() const {
- return llvm::CallingConv::AMDGPU_KERNEL;
-}
-
-// Currently LLVM assumes null pointers always have value 0,
-// which results in incorrectly transformed IR. Therefore, instead of
-// emitting null pointers in private and local address spaces, a null
-// pointer in generic address space is emitted which is casted to a
-// pointer in local or private address space.
-llvm::Constant *AMDGPUTargetCodeGenInfo::getNullPointer(
- const CodeGen::CodeGenModule &CGM, llvm::PointerType *PT,
- QualType QT) const {
- if (CGM.getContext().getTargetNullPointerValue(QT) == 0)
- return llvm::ConstantPointerNull::get(PT);
-
- auto &Ctx = CGM.getContext();
- auto NPT = llvm::PointerType::get(PT->getElementType(),
- Ctx.getTargetAddressSpace(LangAS::opencl_generic));
- return llvm::ConstantExpr::getAddrSpaceCast(
- llvm::ConstantPointerNull::get(NPT), PT);
-}
-
-LangAS
-AMDGPUTargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM,
- const VarDecl *D) const {
- assert(!CGM.getLangOpts().OpenCL &&
- !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) &&
- "Address space agnostic languages only");
- LangAS DefaultGlobalAS = getLangASFromTargetAS(
- CGM.getContext().getTargetAddressSpace(LangAS::opencl_global));
- if (!D)
- return DefaultGlobalAS;
-
- LangAS AddrSpace = D->getType().getAddressSpace();
- assert(AddrSpace == LangAS::Default || isTargetAddressSpace(AddrSpace));
- if (AddrSpace != LangAS::Default)
- return AddrSpace;
-
- if (CGM.isTypeConstant(D->getType(), false)) {
- if (auto ConstAS = CGM.getTarget().getConstantAddressSpace())
- return ConstAS.getValue();
- }
- return DefaultGlobalAS;
-}
-
-llvm::SyncScope::ID
-AMDGPUTargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &LangOpts,
- SyncScope Scope,
- llvm::AtomicOrdering Ordering,
- llvm::LLVMContext &Ctx) const {
- std::string Name;
- switch (Scope) {
- case SyncScope::OpenCLWorkGroup:
- Name = "workgroup";
- break;
- case SyncScope::OpenCLDevice:
- Name = "agent";
- break;
- case SyncScope::OpenCLAllSVMDevices:
- Name = "";
- break;
- case SyncScope::OpenCLSubGroup:
- Name = "wavefront";
- }
-
- if (Ordering != llvm::AtomicOrdering::SequentiallyConsistent) {
- if (!Name.empty())
- Name = Twine(Twine(Name) + Twine("-")).str();
-
- Name = Twine(Twine(Name) + Twine("one-as")).str();
- }
-
- return Ctx.getOrInsertSyncScopeID(Name);
-}
-
-bool AMDGPUTargetCodeGenInfo::shouldEmitStaticExternCAliases() const {
- return false;
-}
-
-void AMDGPUTargetCodeGenInfo::setCUDAKernelCallingConvention(
- const FunctionType *&FT) const {
- FT = getABIInfo().getContext().adjustFunctionType(
- FT, FT->getExtInfo().withCallingConv(CC_OpenCLKernel));
-}
-
-//===----------------------------------------------------------------------===//
-// SPARC v8 ABI Implementation.
-// Based on the SPARC Compliance Definition version 2.4.1.
-//
-// Ensures that complex values are passed in registers.
-//
-namespace {
-class SparcV8ABIInfo : public DefaultABIInfo {
-public:
- SparcV8ABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
-
-private:
- ABIArgInfo classifyReturnType(QualType RetTy) const;
- void computeInfo(CGFunctionInfo &FI) const override;
-};
-} // end anonymous namespace
-
-
-ABIArgInfo
-SparcV8ABIInfo::classifyReturnType(QualType Ty) const {
- if (Ty->isAnyComplexType()) {
- return ABIArgInfo::getDirect();
- }
- else {
- return DefaultABIInfo::classifyReturnType(Ty);
- }
-}
-
-void SparcV8ABIInfo::computeInfo(CGFunctionInfo &FI) const {
-
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
- for (auto &Arg : FI.arguments())
- Arg.info = classifyArgumentType(Arg.type);
-}
-
-namespace {
-class SparcV8TargetCodeGenInfo : public TargetCodeGenInfo {
-public:
- SparcV8TargetCodeGenInfo(CodeGenTypes &CGT)
- : TargetCodeGenInfo(std::make_unique<SparcV8ABIInfo>(CGT)) {}
-};
-} // end anonymous namespace
-
-//===----------------------------------------------------------------------===//
-// SPARC v9 ABI Implementation.
-// Based on the SPARC Compliance Definition version 2.4.1.
-//
-// Function arguments a mapped to a nominal "parameter array" and promoted to
-// registers depending on their type. Each argument occupies 8 or 16 bytes in
-// the array, structs larger than 16 bytes are passed indirectly.
-//
-// One case requires special care:
-//
-// struct mixed {
-// int i;
-// float f;
-// };
-//
-// When a struct mixed is passed by value, it only occupies 8 bytes in the
-// parameter array, but the int is passed in an integer register, and the float
-// is passed in a floating point register. This is represented as two arguments
-// with the LLVM IR inreg attribute:
-//
-// declare void f(i32 inreg %i, float inreg %f)
-//
-// The code generator will only allocate 4 bytes from the parameter array for
-// the inreg arguments. All other arguments are allocated a multiple of 8
-// bytes.
-//
-namespace {
-class SparcV9ABIInfo : public ABIInfo {
-public:
- SparcV9ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
-
-private:
- ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const;
- void computeInfo(CGFunctionInfo &FI) const override;
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
-
- // Coercion type builder for structs passed in registers. The coercion type
- // serves two purposes:
- //
- // 1. Pad structs to a multiple of 64 bits, so they are passed 'left-aligned'
- // in registers.
- // 2. Expose aligned floating point elements as first-level elements, so the
- // code generator knows to pass them in floating point registers.
- //
- // We also compute the InReg flag which indicates that the struct contains
- // aligned 32-bit floats.
- //
- struct CoerceBuilder {
- llvm::LLVMContext &Context;
- const llvm::DataLayout &DL;
- SmallVector<llvm::Type*, 8> Elems;
- uint64_t Size;
- bool InReg;
-
- CoerceBuilder(llvm::LLVMContext &c, const llvm::DataLayout &dl)
- : Context(c), DL(dl), Size(0), InReg(false) {}
-
- // Pad Elems with integers until Size is ToSize.
- void pad(uint64_t ToSize) {
- assert(ToSize >= Size && "Cannot remove elements");
- if (ToSize == Size)
- return;
-
- // Finish the current 64-bit word.
- uint64_t Aligned = llvm::alignTo(Size, 64);
- if (Aligned > Size && Aligned <= ToSize) {
- Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size));
- Size = Aligned;
- }
-
- // Add whole 64-bit words.
- while (Size + 64 <= ToSize) {
- Elems.push_back(llvm::Type::getInt64Ty(Context));
- Size += 64;
- }
-
- // Final in-word padding.
- if (Size < ToSize) {
- Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size));
- Size = ToSize;
- }
- }
-
- // Add a floating point element at Offset.
- void addFloat(uint64_t Offset, llvm::Type *Ty, unsigned Bits) {
- // Unaligned floats are treated as integers.
- if (Offset % Bits)
- return;
- // The InReg flag is only required if there are any floats < 64 bits.
- if (Bits < 64)
- InReg = true;
- pad(Offset);
- Elems.push_back(Ty);
- Size = Offset + Bits;
- }
-
- // Add a struct type to the coercion type, starting at Offset (in bits).
- void addStruct(uint64_t Offset, llvm::StructType *StrTy) {
- const llvm::StructLayout *Layout = DL.getStructLayout(StrTy);
- for (unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) {
- llvm::Type *ElemTy = StrTy->getElementType(i);
- uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i);
- switch (ElemTy->getTypeID()) {
- case llvm::Type::StructTyID:
- addStruct(ElemOffset, cast<llvm::StructType>(ElemTy));
- break;
- case llvm::Type::FloatTyID:
- addFloat(ElemOffset, ElemTy, 32);
- break;
- case llvm::Type::DoubleTyID:
- addFloat(ElemOffset, ElemTy, 64);
- break;
- case llvm::Type::FP128TyID:
- addFloat(ElemOffset, ElemTy, 128);
- break;
- case llvm::Type::PointerTyID:
- if (ElemOffset % 64 == 0) {
- pad(ElemOffset);
- Elems.push_back(ElemTy);
- Size += 64;
- }
- break;
- default:
- break;
- }
- }
- }
-
- // Check if Ty is a usable substitute for the coercion type.
- bool isUsableType(llvm::StructType *Ty) const {
- return llvm::makeArrayRef(Elems) == Ty->elements();
- }
-
- // Get the coercion type as a literal struct type.
- llvm::Type *getType() const {
- if (Elems.size() == 1)
- return Elems.front();
- else
- return llvm::StructType::get(Context, Elems);
- }
- };
-};
-} // end anonymous namespace
-
-ABIArgInfo
-SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const {
- if (Ty->isVoidType())
- return ABIArgInfo::getIgnore();
-
- uint64_t Size = getContext().getTypeSize(Ty);
-
- // Anything too big to fit in registers is passed with an explicit indirect
- // pointer / sret pointer.
- if (Size > SizeLimit)
- return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
-
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
-
- // Integer types smaller than a register are extended.
- if (Size < 64 && Ty->isIntegerType())
- return ABIArgInfo::getExtend(Ty);
-
- if (const auto *EIT = Ty->getAs<ExtIntType>())
- if (EIT->getNumBits() < 64)
- return ABIArgInfo::getExtend(Ty);
-
- // Other non-aggregates go in registers.
- if (!isAggregateTypeForABI(Ty))
- return ABIArgInfo::getDirect();
-
- // If a C++ object has either a non-trivial copy constructor or a non-trivial
- // destructor, it is passed with an explicit indirect pointer / sret pointer.
- if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
- return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
-
- // This is a small aggregate type that should be passed in registers.
- // Build a coercion type from the LLVM struct type.
- llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty));
- if (!StrTy)
- return ABIArgInfo::getDirect();
-
- CoerceBuilder CB(getVMContext(), getDataLayout());
- CB.addStruct(0, StrTy);
- CB.pad(llvm::alignTo(CB.DL.getTypeSizeInBits(StrTy), 64));
-
- // Try to use the original type for coercion.
- llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType();
-
- if (CB.InReg)
- return ABIArgInfo::getDirectInReg(CoerceTy);
- else
- return ABIArgInfo::getDirect(CoerceTy);
-}
-
-Address SparcV9ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
- ABIArgInfo AI = classifyType(Ty, 16 * 8);
- llvm::Type *ArgTy = CGT.ConvertType(Ty);
- if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
- AI.setCoerceToType(ArgTy);
-
- CharUnits SlotSize = CharUnits::fromQuantity(8);
-
- CGBuilderTy &Builder = CGF.Builder;
- Address Addr(Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize);
- llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
-
- auto TypeInfo = getContext().getTypeInfoInChars(Ty);
-
- Address ArgAddr = Address::invalid();
- CharUnits Stride;
- switch (AI.getKind()) {
- case ABIArgInfo::Expand:
- case ABIArgInfo::CoerceAndExpand:
- case ABIArgInfo::InAlloca:
- llvm_unreachable("Unsupported ABI kind for va_arg");
-
- case ABIArgInfo::Extend: {
- Stride = SlotSize;
- CharUnits Offset = SlotSize - TypeInfo.Width;
- ArgAddr = Builder.CreateConstInBoundsByteGEP(Addr, Offset, "extend");
- break;
- }
-
- case ABIArgInfo::Direct: {
- auto AllocSize = getDataLayout().getTypeAllocSize(AI.getCoerceToType());
- Stride = CharUnits::fromQuantity(AllocSize).alignTo(SlotSize);
- ArgAddr = Addr;
- break;
- }
-
- case ABIArgInfo::Indirect:
- case ABIArgInfo::IndirectAliased:
- Stride = SlotSize;
- ArgAddr = Builder.CreateElementBitCast(Addr, ArgPtrTy, "indirect");
- ArgAddr = Address(Builder.CreateLoad(ArgAddr, "indirect.arg"),
- TypeInfo.Align);
- break;
-
- case ABIArgInfo::Ignore:
- return Address(llvm::UndefValue::get(ArgPtrTy), TypeInfo.Align);
- }
-
- // Update VAList.
- Address NextPtr = Builder.CreateConstInBoundsByteGEP(Addr, Stride, "ap.next");
- Builder.CreateStore(NextPtr.getPointer(), VAListAddr);
-
- return Builder.CreateBitCast(ArgAddr, ArgPtrTy, "arg.addr");
-}
-
-void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const {
- FI.getReturnInfo() = classifyType(FI.getReturnType(), 32 * 8);
- for (auto &I : FI.arguments())
- I.info = classifyType(I.type, 16 * 8);
-}
-
-namespace {
-class SparcV9TargetCodeGenInfo : public TargetCodeGenInfo {
-public:
- SparcV9TargetCodeGenInfo(CodeGenTypes &CGT)
- : TargetCodeGenInfo(std::make_unique<SparcV9ABIInfo>(CGT)) {}
-
- int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
- return 14;
- }
-
- bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
- llvm::Value *Address) const override;
-};
-} // end anonymous namespace
-
-bool
-SparcV9TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
- llvm::Value *Address) const {
- // This is calculated from the LLVM and GCC tables and verified
- // against gcc output. AFAIK all ABIs use the same encoding.
-
- CodeGen::CGBuilderTy &Builder = CGF.Builder;
-
- llvm::IntegerType *i8 = CGF.Int8Ty;
- llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
- llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
-
- // 0-31: the 8-byte general-purpose registers
- AssignToArrayRange(Builder, Address, Eight8, 0, 31);
-
- // 32-63: f0-31, the 4-byte floating-point registers
- AssignToArrayRange(Builder, Address, Four8, 32, 63);
-
- // Y = 64
- // PSR = 65
- // WIM = 66
- // TBR = 67
- // PC = 68
- // NPC = 69
- // FSR = 70
- // CSR = 71
- AssignToArrayRange(Builder, Address, Eight8, 64, 71);
-
- // 72-87: d0-15, the 8-byte floating-point registers
- AssignToArrayRange(Builder, Address, Eight8, 72, 87);
-
- return false;
-}
-
-// ARC ABI implementation.
-namespace {
-
-class ARCABIInfo : public DefaultABIInfo {
-public:
- using DefaultABIInfo::DefaultABIInfo;
-
-private:
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
-
- void updateState(const ABIArgInfo &Info, QualType Ty, CCState &State) const {
- if (!State.FreeRegs)
- return;
- if (Info.isIndirect() && Info.getInReg())
- State.FreeRegs--;
- else if (Info.isDirect() && Info.getInReg()) {
- unsigned sz = (getContext().getTypeSize(Ty) + 31) / 32;
- if (sz < State.FreeRegs)
- State.FreeRegs -= sz;
- else
- State.FreeRegs = 0;
- }
- }
-
- void computeInfo(CGFunctionInfo &FI) const override {
- CCState State(FI);
- // ARC uses 8 registers to pass arguments.
- State.FreeRegs = 8;
-
- if (!getCXXABI().classifyReturnType(FI))
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
- updateState(FI.getReturnInfo(), FI.getReturnType(), State);
- for (auto &I : FI.arguments()) {
- I.info = classifyArgumentType(I.type, State.FreeRegs);
- updateState(I.info, I.type, State);
- }
- }
-
- ABIArgInfo getIndirectByRef(QualType Ty, bool HasFreeRegs) const;
- ABIArgInfo getIndirectByValue(QualType Ty) const;
- ABIArgInfo classifyArgumentType(QualType Ty, uint8_t FreeRegs) const;
- ABIArgInfo classifyReturnType(QualType RetTy) const;
-};
-
-class ARCTargetCodeGenInfo : public TargetCodeGenInfo {
-public:
- ARCTargetCodeGenInfo(CodeGenTypes &CGT)
- : TargetCodeGenInfo(std::make_unique<ARCABIInfo>(CGT)) {}
-};
-
-
-ABIArgInfo ARCABIInfo::getIndirectByRef(QualType Ty, bool HasFreeRegs) const {
- return HasFreeRegs ? getNaturalAlignIndirectInReg(Ty) :
- getNaturalAlignIndirect(Ty, false);
-}
-
-ABIArgInfo ARCABIInfo::getIndirectByValue(QualType Ty) const {
- // Compute the byval alignment.
- const unsigned MinABIStackAlignInBytes = 4;
- unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
- return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true,
- TypeAlign > MinABIStackAlignInBytes);
-}
-
-Address ARCABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
- return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
- getContext().getTypeInfoInChars(Ty),
- CharUnits::fromQuantity(4), true);
-}
-
-ABIArgInfo ARCABIInfo::classifyArgumentType(QualType Ty,
- uint8_t FreeRegs) const {
- // Handle the generic C++ ABI.
- const RecordType *RT = Ty->getAs<RecordType>();
- if (RT) {
- CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
- if (RAA == CGCXXABI::RAA_Indirect)
- return getIndirectByRef(Ty, FreeRegs > 0);
-
- if (RAA == CGCXXABI::RAA_DirectInMemory)
- return getIndirectByValue(Ty);
- }
-
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
-
- auto SizeInRegs = llvm::alignTo(getContext().getTypeSize(Ty), 32) / 32;
-
- if (isAggregateTypeForABI(Ty)) {
- // Structures with flexible arrays are always indirect.
- if (RT && RT->getDecl()->hasFlexibleArrayMember())
- return getIndirectByValue(Ty);
-
- // Ignore empty structs/unions.
- if (isEmptyRecord(getContext(), Ty, true))
- return ABIArgInfo::getIgnore();
-
- llvm::LLVMContext &LLVMContext = getVMContext();
-
- llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
- SmallVector<llvm::Type *, 3> Elements(SizeInRegs, Int32);
- llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
-
- return FreeRegs >= SizeInRegs ?
- ABIArgInfo::getDirectInReg(Result) :
- ABIArgInfo::getDirect(Result, 0, nullptr, false);
- }
-
- if (const auto *EIT = Ty->getAs<ExtIntType>())
- if (EIT->getNumBits() > 64)
- return getIndirectByValue(Ty);
-
- return isPromotableIntegerTypeForABI(Ty)
- ? (FreeRegs >= SizeInRegs ? ABIArgInfo::getExtendInReg(Ty)
- : ABIArgInfo::getExtend(Ty))
- : (FreeRegs >= SizeInRegs ? ABIArgInfo::getDirectInReg()
- : ABIArgInfo::getDirect());
-}
-
-ABIArgInfo ARCABIInfo::classifyReturnType(QualType RetTy) const {
- if (RetTy->isAnyComplexType())
- return ABIArgInfo::getDirectInReg();
-
- // Arguments of size > 4 registers are indirect.
- auto RetSize = llvm::alignTo(getContext().getTypeSize(RetTy), 32) / 32;
- if (RetSize > 4)
- return getIndirectByRef(RetTy, /*HasFreeRegs*/ true);
-
- return DefaultABIInfo::classifyReturnType(RetTy);
-}
-
-} // End anonymous namespace.
-
-//===----------------------------------------------------------------------===//
-// XCore ABI Implementation
-//===----------------------------------------------------------------------===//
-
-namespace {
-
-/// A SmallStringEnc instance is used to build up the TypeString by passing
-/// it by reference between functions that append to it.
-typedef llvm::SmallString<128> SmallStringEnc;
-
-/// TypeStringCache caches the meta encodings of Types.
-///
-/// The reason for caching TypeStrings is two fold:
-/// 1. To cache a type's encoding for later uses;
-/// 2. As a means to break recursive member type inclusion.
-///
-/// A cache Entry can have a Status of:
-/// NonRecursive: The type encoding is not recursive;
-/// Recursive: The type encoding is recursive;
-/// Incomplete: An incomplete TypeString;
-/// IncompleteUsed: An incomplete TypeString that has been used in a
-/// Recursive type encoding.
-///
-/// A NonRecursive entry will have all of its sub-members expanded as fully
-/// as possible. Whilst it may contain types which are recursive, the type
-/// itself is not recursive and thus its encoding may be safely used whenever
-/// the type is encountered.
-///
-/// A Recursive entry will have all of its sub-members expanded as fully as
-/// possible. The type itself is recursive and it may contain other types which
-/// are recursive. The Recursive encoding must not be used during the expansion
-/// of a recursive type's recursive branch. For simplicity the code uses
-/// IncompleteCount to reject all usage of Recursive encodings for member types.
-///
-/// An Incomplete entry is always a RecordType and only encodes its
-/// identifier e.g. "s(S){}". Incomplete 'StubEnc' entries are ephemeral and
-/// are placed into the cache during type expansion as a means to identify and
-/// handle recursive inclusion of types as sub-members. If there is recursion
-/// the entry becomes IncompleteUsed.
-///
-/// During the expansion of a RecordType's members:
-///
-/// If the cache contains a NonRecursive encoding for the member type, the
-/// cached encoding is used;
-///
-/// If the cache contains a Recursive encoding for the member type, the
-/// cached encoding is 'Swapped' out, as it may be incorrect, and...
-///
-/// If the member is a RecordType, an Incomplete encoding is placed into the
-/// cache to break potential recursive inclusion of itself as a sub-member;
-///
-/// Once a member RecordType has been expanded, its temporary incomplete
-/// entry is removed from the cache. If a Recursive encoding was swapped out
-/// it is swapped back in;
-///
-/// If an incomplete entry is used to expand a sub-member, the incomplete
-/// entry is marked as IncompleteUsed. The cache keeps count of how many
-/// IncompleteUsed entries it currently contains in IncompleteUsedCount;
-///
-/// If a member's encoding is found to be a NonRecursive or Recursive viz:
-/// IncompleteUsedCount==0, the member's encoding is added to the cache.
-/// Else the member is part of a recursive type and thus the recursion has
-/// been exited too soon for the encoding to be correct for the member.
-///
-class TypeStringCache {
- enum Status {NonRecursive, Recursive, Incomplete, IncompleteUsed};
- struct Entry {
- std::string Str; // The encoded TypeString for the type.
- enum Status State; // Information about the encoding in 'Str'.
- std::string Swapped; // A temporary place holder for a Recursive encoding
- // during the expansion of RecordType's members.
- };
- std::map<const IdentifierInfo *, struct Entry> Map;
- unsigned IncompleteCount; // Number of Incomplete entries in the Map.
- unsigned IncompleteUsedCount; // Number of IncompleteUsed entries in the Map.
-public:
- TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {}
- void addIncomplete(const IdentifierInfo *ID, std::string StubEnc);
- bool removeIncomplete(const IdentifierInfo *ID);
- void addIfComplete(const IdentifierInfo *ID, StringRef Str,
- bool IsRecursive);
- StringRef lookupStr(const IdentifierInfo *ID);
-};
-
-/// TypeString encodings for enum & union fields must be order.
-/// FieldEncoding is a helper for this ordering process.
-class FieldEncoding {
- bool HasName;
- std::string Enc;
-public:
- FieldEncoding(bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {}
- StringRef str() { return Enc; }
- bool operator<(const FieldEncoding &rhs) const {
- if (HasName != rhs.HasName) return HasName;
- return Enc < rhs.Enc;
- }
-};
-
-class XCoreABIInfo : public DefaultABIInfo {
-public:
- XCoreABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
-};
-
-class XCoreTargetCodeGenInfo : public TargetCodeGenInfo {
- mutable TypeStringCache TSC;
- void emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
- const CodeGen::CodeGenModule &M) const;
-
-public:
- XCoreTargetCodeGenInfo(CodeGenTypes &CGT)
- : TargetCodeGenInfo(std::make_unique<XCoreABIInfo>(CGT)) {}
- void emitTargetMetadata(CodeGen::CodeGenModule &CGM,
- const llvm::MapVector<GlobalDecl, StringRef>
- &MangledDeclNames) const override;
-};
-
-} // End anonymous namespace.
-
-// TODO: this implementation is likely now redundant with the default
-// EmitVAArg.
-Address XCoreABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
- CGBuilderTy &Builder = CGF.Builder;
-
- // Get the VAList.
- CharUnits SlotSize = CharUnits::fromQuantity(4);
- Address AP(Builder.CreateLoad(VAListAddr), SlotSize);
-
- // Handle the argument.
- ABIArgInfo AI = classifyArgumentType(Ty);
- CharUnits TypeAlign = getContext().getTypeAlignInChars(Ty);
- llvm::Type *ArgTy = CGT.ConvertType(Ty);
- if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
- AI.setCoerceToType(ArgTy);
- llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
-
- Address Val = Address::invalid();
- CharUnits ArgSize = CharUnits::Zero();
- switch (AI.getKind()) {
- case ABIArgInfo::Expand:
- case ABIArgInfo::CoerceAndExpand:
- case ABIArgInfo::InAlloca:
- llvm_unreachable("Unsupported ABI kind for va_arg");
- case ABIArgInfo::Ignore:
- Val = Address(llvm::UndefValue::get(ArgPtrTy), TypeAlign);
- ArgSize = CharUnits::Zero();
- break;
- case ABIArgInfo::Extend:
- case ABIArgInfo::Direct:
- Val = Builder.CreateBitCast(AP, ArgPtrTy);
- ArgSize = CharUnits::fromQuantity(
- getDataLayout().getTypeAllocSize(AI.getCoerceToType()));
- ArgSize = ArgSize.alignTo(SlotSize);
- break;
- case ABIArgInfo::Indirect:
- case ABIArgInfo::IndirectAliased:
- Val = Builder.CreateElementBitCast(AP, ArgPtrTy);
- Val = Address(Builder.CreateLoad(Val), TypeAlign);
- ArgSize = SlotSize;
- break;
- }
-
- // Increment the VAList.
- if (!ArgSize.isZero()) {
- Address APN = Builder.CreateConstInBoundsByteGEP(AP, ArgSize);
- Builder.CreateStore(APN.getPointer(), VAListAddr);
- }
-
- return Val;
-}
-
-/// During the expansion of a RecordType, an incomplete TypeString is placed
-/// into the cache as a means to identify and break recursion.
-/// If there is a Recursive encoding in the cache, it is swapped out and will
-/// be reinserted by removeIncomplete().
-/// All other types of encoding should have been used rather than arriving here.
-void TypeStringCache::addIncomplete(const IdentifierInfo *ID,
- std::string StubEnc) {
- if (!ID)
- return;
- Entry &E = Map[ID];
- assert( (E.Str.empty() || E.State == Recursive) &&
- "Incorrectly use of addIncomplete");
- assert(!StubEnc.empty() && "Passing an empty string to addIncomplete()");
- E.Swapped.swap(E.Str); // swap out the Recursive
- E.Str.swap(StubEnc);
- E.State = Incomplete;
- ++IncompleteCount;
-}
-
-/// Once the RecordType has been expanded, the temporary incomplete TypeString
-/// must be removed from the cache.
-/// If a Recursive was swapped out by addIncomplete(), it will be replaced.
-/// Returns true if the RecordType was defined recursively.
-bool TypeStringCache::removeIncomplete(const IdentifierInfo *ID) {
- if (!ID)
- return false;
- auto I = Map.find(ID);
- assert(I != Map.end() && "Entry not present");
- Entry &E = I->second;
- assert( (E.State == Incomplete ||
- E.State == IncompleteUsed) &&
- "Entry must be an incomplete type");
- bool IsRecursive = false;
- if (E.State == IncompleteUsed) {
- // We made use of our Incomplete encoding, thus we are recursive.
- IsRecursive = true;
- --IncompleteUsedCount;
- }
- if (E.Swapped.empty())
- Map.erase(I);
- else {
- // Swap the Recursive back.
- E.Swapped.swap(E.Str);
- E.Swapped.clear();
- E.State = Recursive;
- }
- --IncompleteCount;
- return IsRecursive;
-}
-
-/// Add the encoded TypeString to the cache only if it is NonRecursive or
-/// Recursive (viz: all sub-members were expanded as fully as possible).
-void TypeStringCache::addIfComplete(const IdentifierInfo *ID, StringRef Str,
- bool IsRecursive) {
- if (!ID || IncompleteUsedCount)
- return; // No key or it is is an incomplete sub-type so don't add.
- Entry &E = Map[ID];
- if (IsRecursive && !E.Str.empty()) {
- assert(E.State==Recursive && E.Str.size() == Str.size() &&
- "This is not the same Recursive entry");
- // The parent container was not recursive after all, so we could have used
- // this Recursive sub-member entry after all, but we assumed the worse when
- // we started viz: IncompleteCount!=0.
- return;
- }
- assert(E.Str.empty() && "Entry already present");
- E.Str = Str.str();
- E.State = IsRecursive? Recursive : NonRecursive;
-}
-
-/// Return a cached TypeString encoding for the ID. If there isn't one, or we
-/// are recursively expanding a type (IncompleteCount != 0) and the cached
-/// encoding is Recursive, return an empty StringRef.
-StringRef TypeStringCache::lookupStr(const IdentifierInfo *ID) {
- if (!ID)
- return StringRef(); // We have no key.
- auto I = Map.find(ID);
- if (I == Map.end())
- return StringRef(); // We have no encoding.
- Entry &E = I->second;
- if (E.State == Recursive && IncompleteCount)
- return StringRef(); // We don't use Recursive encodings for member types.
-
- if (E.State == Incomplete) {
- // The incomplete type is being used to break out of recursion.
- E.State = IncompleteUsed;
- ++IncompleteUsedCount;
- }
- return E.Str;
-}
-
-/// The XCore ABI includes a type information section that communicates symbol
-/// type information to the linker. The linker uses this information to verify
-/// safety/correctness of things such as array bound and pointers et al.
-/// The ABI only requires C (and XC) language modules to emit TypeStrings.
-/// This type information (TypeString) is emitted into meta data for all global
-/// symbols: definitions, declarations, functions & variables.
-///
-/// The TypeString carries type, qualifier, name, size & value details.
-/// Please see 'Tools Development Guide' section 2.16.2 for format details:
-/// https://www.xmos.com/download/public/Tools-Development-Guide%28X9114A%29.pdf
-/// The output is tested by test/CodeGen/xcore-stringtype.c.
-///
-static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
- const CodeGen::CodeGenModule &CGM,
- TypeStringCache &TSC);
-
-/// XCore uses emitTargetMD to emit TypeString metadata for global symbols.
-void XCoreTargetCodeGenInfo::emitTargetMD(
- const Decl *D, llvm::GlobalValue *GV,
- const CodeGen::CodeGenModule &CGM) const {
- SmallStringEnc Enc;
- if (getTypeString(Enc, D, CGM, TSC)) {
- llvm::LLVMContext &Ctx = CGM.getModule().getContext();
- llvm::Metadata *MDVals[] = {llvm::ConstantAsMetadata::get(GV),
- llvm::MDString::get(Ctx, Enc.str())};
- llvm::NamedMDNode *MD =
- CGM.getModule().getOrInsertNamedMetadata("xcore.typestrings");
- MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
- }
-}
-
-void XCoreTargetCodeGenInfo::emitTargetMetadata(
- CodeGen::CodeGenModule &CGM,
- const llvm::MapVector<GlobalDecl, StringRef> &MangledDeclNames) const {
- // Warning, new MangledDeclNames may be appended within this loop.
- // We rely on MapVector insertions adding new elements to the end
- // of the container.
- for (unsigned I = 0; I != MangledDeclNames.size(); ++I) {
- auto Val = *(MangledDeclNames.begin() + I);
- llvm::GlobalValue *GV = CGM.GetGlobalValue(Val.second);
- if (GV) {
- const Decl *D = Val.first.getDecl()->getMostRecentDecl();
- emitTargetMD(D, GV, CGM);
- }
- }
-}
-//===----------------------------------------------------------------------===//
-// SPIR ABI Implementation
-//===----------------------------------------------------------------------===//
-
-namespace {
-class SPIRABIInfo : public DefaultABIInfo {
-public:
- SPIRABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) { setCCs(); }
-
-private:
- void setCCs();
-};
-} // end anonymous namespace
-namespace {
-class SPIRTargetCodeGenInfo : public TargetCodeGenInfo {
-public:
- SPIRTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
- : TargetCodeGenInfo(std::make_unique<SPIRABIInfo>(CGT)) {}
-
- LangAS getASTAllocaAddressSpace() const override {
- return getLangASFromTargetAS(
- getABIInfo().getDataLayout().getAllocaAddrSpace());
- }
-
- unsigned getOpenCLKernelCallingConv() const override;
-};
-
-} // End anonymous namespace.
-void SPIRABIInfo::setCCs() {
- assert(getRuntimeCC() == llvm::CallingConv::C);
- RuntimeCC = llvm::CallingConv::SPIR_FUNC;
-}
-
-namespace clang {
-namespace CodeGen {
-void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI) {
- DefaultABIInfo SPIRABI(CGM.getTypes());
- SPIRABI.computeInfo(FI);
-}
-}
-}
-
-unsigned SPIRTargetCodeGenInfo::getOpenCLKernelCallingConv() const {
- return llvm::CallingConv::SPIR_KERNEL;
-}
-
-static bool appendType(SmallStringEnc &Enc, QualType QType,
- const CodeGen::CodeGenModule &CGM,
- TypeStringCache &TSC);
-
-/// Helper function for appendRecordType().
-/// Builds a SmallVector containing the encoded field types in declaration
-/// order.
-static bool extractFieldType(SmallVectorImpl<FieldEncoding> &FE,
- const RecordDecl *RD,
- const CodeGen::CodeGenModule &CGM,
- TypeStringCache &TSC) {
- for (const auto *Field : RD->fields()) {
- SmallStringEnc Enc;
- Enc += "m(";
- Enc += Field->getName();
- Enc += "){";
- if (Field->isBitField()) {
- Enc += "b(";
- llvm::raw_svector_ostream OS(Enc);
- OS << Field->getBitWidthValue(CGM.getContext());
- Enc += ':';
- }
- if (!appendType(Enc, Field->getType(), CGM, TSC))
- return false;
- if (Field->isBitField())
- Enc += ')';
- Enc += '}';
- FE.emplace_back(!Field->getName().empty(), Enc);
- }
- return true;
-}
-
-/// Appends structure and union types to Enc and adds encoding to cache.
-/// Recursively calls appendType (via extractFieldType) for each field.
-/// Union types have their fields ordered according to the ABI.
-static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT,
- const CodeGen::CodeGenModule &CGM,
- TypeStringCache &TSC, const IdentifierInfo *ID) {
- // Append the cached TypeString if we have one.
- StringRef TypeString = TSC.lookupStr(ID);
- if (!TypeString.empty()) {
- Enc += TypeString;
- return true;
- }
-
- // Start to emit an incomplete TypeString.
- size_t Start = Enc.size();
- Enc += (RT->isUnionType()? 'u' : 's');
- Enc += '(';
- if (ID)
- Enc += ID->getName();
- Enc += "){";
-
- // We collect all encoded fields and order as necessary.
- bool IsRecursive = false;
- const RecordDecl *RD = RT->getDecl()->getDefinition();
- if (RD && !RD->field_empty()) {
- // An incomplete TypeString stub is placed in the cache for this RecordType
- // so that recursive calls to this RecordType will use it whilst building a
- // complete TypeString for this RecordType.
- SmallVector<FieldEncoding, 16> FE;
- std::string StubEnc(Enc.substr(Start).str());
- StubEnc += '}'; // StubEnc now holds a valid incomplete TypeString.
- TSC.addIncomplete(ID, std::move(StubEnc));
- if (!extractFieldType(FE, RD, CGM, TSC)) {
- (void) TSC.removeIncomplete(ID);
- return false;
- }
- IsRecursive = TSC.removeIncomplete(ID);
- // The ABI requires unions to be sorted but not structures.
- // See FieldEncoding::operator< for sort algorithm.
- if (RT->isUnionType())
- llvm::sort(FE);
- // We can now complete the TypeString.
- unsigned E = FE.size();
- for (unsigned I = 0; I != E; ++I) {
- if (I)
- Enc += ',';
- Enc += FE[I].str();
- }
- }
- Enc += '}';
- TSC.addIfComplete(ID, Enc.substr(Start), IsRecursive);
- return true;
-}
-
-/// Appends enum types to Enc and adds the encoding to the cache.
-static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET,
- TypeStringCache &TSC,
- const IdentifierInfo *ID) {
- // Append the cached TypeString if we have one.
- StringRef TypeString = TSC.lookupStr(ID);
- if (!TypeString.empty()) {
- Enc += TypeString;
- return true;
- }
-
- size_t Start = Enc.size();
- Enc += "e(";
- if (ID)
- Enc += ID->getName();
- Enc += "){";
-
- // We collect all encoded enumerations and order them alphanumerically.
- if (const EnumDecl *ED = ET->getDecl()->getDefinition()) {
- SmallVector<FieldEncoding, 16> FE;
- for (auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E;
- ++I) {
- SmallStringEnc EnumEnc;
- EnumEnc += "m(";
- EnumEnc += I->getName();
- EnumEnc += "){";
- I->getInitVal().toString(EnumEnc);
- EnumEnc += '}';
- FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc));
- }
- llvm::sort(FE);
- unsigned E = FE.size();
- for (unsigned I = 0; I != E; ++I) {
- if (I)
- Enc += ',';
- Enc += FE[I].str();
- }
- }
- Enc += '}';
- TSC.addIfComplete(ID, Enc.substr(Start), false);
- return true;
-}
-
-/// Appends type's qualifier to Enc.
-/// This is done prior to appending the type's encoding.
-static void appendQualifier(SmallStringEnc &Enc, QualType QT) {
- // Qualifiers are emitted in alphabetical order.
- static const char *const Table[]={"","c:","r:","cr:","v:","cv:","rv:","crv:"};
- int Lookup = 0;
- if (QT.isConstQualified())
- Lookup += 1<<0;
- if (QT.isRestrictQualified())
- Lookup += 1<<1;
- if (QT.isVolatileQualified())
- Lookup += 1<<2;
- Enc += Table[Lookup];
-}
-
-/// Appends built-in types to Enc.
-static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT) {
- const char *EncType;
- switch (BT->getKind()) {
- case BuiltinType::Void:
- EncType = "0";
- break;
- case BuiltinType::Bool:
- EncType = "b";
- break;
- case BuiltinType::Char_U:
- EncType = "uc";
- break;
- case BuiltinType::UChar:
- EncType = "uc";
- break;
- case BuiltinType::SChar:
- EncType = "sc";
- break;
- case BuiltinType::UShort:
- EncType = "us";
- break;
- case BuiltinType::Short:
- EncType = "ss";
- break;
- case BuiltinType::UInt:
- EncType = "ui";
- break;
- case BuiltinType::Int:
- EncType = "si";
- break;
- case BuiltinType::ULong:
- EncType = "ul";
- break;
- case BuiltinType::Long:
- EncType = "sl";
- break;
- case BuiltinType::ULongLong:
- EncType = "ull";
- break;
- case BuiltinType::LongLong:
- EncType = "sll";
- break;
- case BuiltinType::Float:
- EncType = "ft";
- break;
- case BuiltinType::Double:
- EncType = "d";
- break;
- case BuiltinType::LongDouble:
- EncType = "ld";
- break;
- default:
- return false;
- }
- Enc += EncType;
- return true;
-}
-
-/// Appends a pointer encoding to Enc before calling appendType for the pointee.
-static bool appendPointerType(SmallStringEnc &Enc, const PointerType *PT,
- const CodeGen::CodeGenModule &CGM,
- TypeStringCache &TSC) {
- Enc += "p(";
- if (!appendType(Enc, PT->getPointeeType(), CGM, TSC))
- return false;
- Enc += ')';
- return true;
-}
-
-/// Appends array encoding to Enc before calling appendType for the element.
-static bool appendArrayType(SmallStringEnc &Enc, QualType QT,
- const ArrayType *AT,
- const CodeGen::CodeGenModule &CGM,
- TypeStringCache &TSC, StringRef NoSizeEnc) {
- if (AT->getSizeModifier() != ArrayType::Normal)
- return false;
- Enc += "a(";
- if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT))
- CAT->getSize().toStringUnsigned(Enc);
- else
- Enc += NoSizeEnc; // Global arrays use "*", otherwise it is "".
- Enc += ':';
- // The Qualifiers should be attached to the type rather than the array.
- appendQualifier(Enc, QT);
- if (!appendType(Enc, AT->getElementType(), CGM, TSC))
- return false;
- Enc += ')';
- return true;
-}
-
-/// Appends a function encoding to Enc, calling appendType for the return type
-/// and the arguments.
-static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT,
- const CodeGen::CodeGenModule &CGM,
- TypeStringCache &TSC) {
- Enc += "f{";
- if (!appendType(Enc, FT->getReturnType(), CGM, TSC))
- return false;
- Enc += "}(";
- if (const FunctionProtoType *FPT = FT->getAs<FunctionProtoType>()) {
- // N.B. we are only interested in the adjusted param types.
- auto I = FPT->param_type_begin();
- auto E = FPT->param_type_end();
- if (I != E) {
- do {
- if (!appendType(Enc, *I, CGM, TSC))
- return false;
- ++I;
- if (I != E)
- Enc += ',';
- } while (I != E);
- if (FPT->isVariadic())
- Enc += ",va";
- } else {
- if (FPT->isVariadic())
- Enc += "va";
- else
- Enc += '0';
- }
- }
- Enc += ')';
- return true;
-}
-
-/// Handles the type's qualifier before dispatching a call to handle specific
-/// type encodings.
-static bool appendType(SmallStringEnc &Enc, QualType QType,
- const CodeGen::CodeGenModule &CGM,
- TypeStringCache &TSC) {
-
- QualType QT = QType.getCanonicalType();
-
- if (const ArrayType *AT = QT->getAsArrayTypeUnsafe())
- // The Qualifiers should be attached to the type rather than the array.
- // Thus we don't call appendQualifier() here.
- return appendArrayType(Enc, QT, AT, CGM, TSC, "");
-
- appendQualifier(Enc, QT);
-
- if (const BuiltinType *BT = QT->getAs<BuiltinType>())
- return appendBuiltinType(Enc, BT);
-
- if (const PointerType *PT = QT->getAs<PointerType>())
- return appendPointerType(Enc, PT, CGM, TSC);
-
- if (const EnumType *ET = QT->getAs<EnumType>())
- return appendEnumType(Enc, ET, TSC, QT.getBaseTypeIdentifier());
-
- if (const RecordType *RT = QT->getAsStructureType())
- return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
-
- if (const RecordType *RT = QT->getAsUnionType())
- return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
-
- if (const FunctionType *FT = QT->getAs<FunctionType>())
- return appendFunctionType(Enc, FT, CGM, TSC);
-
- return false;
-}
-
-static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
- const CodeGen::CodeGenModule &CGM,
- TypeStringCache &TSC) {
- if (!D)
- return false;
-
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
- if (FD->getLanguageLinkage() != CLanguageLinkage)
- return false;
- return appendType(Enc, FD->getType(), CGM, TSC);
- }
-
- if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
- if (VD->getLanguageLinkage() != CLanguageLinkage)
- return false;
- QualType QT = VD->getType().getCanonicalType();
- if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) {
- // Global ArrayTypes are given a size of '*' if the size is unknown.
- // The Qualifiers should be attached to the type rather than the array.
- // Thus we don't call appendQualifier() here.
- return appendArrayType(Enc, QT, AT, CGM, TSC, "*");
- }
- return appendType(Enc, QT, CGM, TSC);
- }
- return false;
-}
-
-//===----------------------------------------------------------------------===//
-// RISCV ABI Implementation
-//===----------------------------------------------------------------------===//
-
-namespace {
-class RISCVABIInfo : public DefaultABIInfo {
-private:
- // Size of the integer ('x') registers in bits.
- unsigned XLen;
- // Size of the floating point ('f') registers in bits. Note that the target
- // ISA might have a wider FLen than the selected ABI (e.g. an RV32IF target
- // with soft float ABI has FLen==0).
- unsigned FLen;
- static const int NumArgGPRs = 8;
- static const int NumArgFPRs = 8;
- bool detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff,
- llvm::Type *&Field1Ty,
- CharUnits &Field1Off,
- llvm::Type *&Field2Ty,
- CharUnits &Field2Off) const;
-
-public:
- RISCVABIInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen, unsigned FLen)
- : DefaultABIInfo(CGT), XLen(XLen), FLen(FLen) {}
-
- // DefaultABIInfo's classifyReturnType and classifyArgumentType are
- // non-virtual, but computeInfo is virtual, so we overload it.
- void computeInfo(CGFunctionInfo &FI) const override;
-
- ABIArgInfo classifyArgumentType(QualType Ty, bool IsFixed, int &ArgGPRsLeft,
- int &ArgFPRsLeft) const;
- ABIArgInfo classifyReturnType(QualType RetTy) const;
-
- Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const override;
-
- ABIArgInfo extendType(QualType Ty) const;
-
- bool detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty,
- CharUnits &Field1Off, llvm::Type *&Field2Ty,
- CharUnits &Field2Off, int &NeededArgGPRs,
- int &NeededArgFPRs) const;
- ABIArgInfo coerceAndExpandFPCCEligibleStruct(llvm::Type *Field1Ty,
- CharUnits Field1Off,
- llvm::Type *Field2Ty,
- CharUnits Field2Off) const;
-};
-} // end anonymous namespace
-
-void RISCVABIInfo::computeInfo(CGFunctionInfo &FI) const {
- QualType RetTy = FI.getReturnType();
- if (!getCXXABI().classifyReturnType(FI))
- FI.getReturnInfo() = classifyReturnType(RetTy);
-
- // IsRetIndirect is true if classifyArgumentType indicated the value should
- // be passed indirect, or if the type size is a scalar greater than 2*XLen
- // and not a complex type with elements <= FLen. e.g. fp128 is passed direct
- // in LLVM IR, relying on the backend lowering code to rewrite the argument
- // list and pass indirectly on RV32.
- bool IsRetIndirect = FI.getReturnInfo().getKind() == ABIArgInfo::Indirect;
- if (!IsRetIndirect && RetTy->isScalarType() &&
- getContext().getTypeSize(RetTy) > (2 * XLen)) {
- if (RetTy->isComplexType() && FLen) {
- QualType EltTy = RetTy->castAs<ComplexType>()->getElementType();
- IsRetIndirect = getContext().getTypeSize(EltTy) > FLen;
- } else {
- // This is a normal scalar > 2*XLen, such as fp128 on RV32.
- IsRetIndirect = true;
- }
- }
-
- // We must track the number of GPRs used in order to conform to the RISC-V
- // ABI, as integer scalars passed in registers should have signext/zeroext
- // when promoted, but are anyext if passed on the stack. As GPR usage is
- // different for variadic arguments, we must also track whether we are
- // examining a vararg or not.
- int ArgGPRsLeft = IsRetIndirect ? NumArgGPRs - 1 : NumArgGPRs;
- int ArgFPRsLeft = FLen ? NumArgFPRs : 0;
- int NumFixedArgs = FI.getNumRequiredArgs();
-
- int ArgNum = 0;
- for (auto &ArgInfo : FI.arguments()) {
- bool IsFixed = ArgNum < NumFixedArgs;
- ArgInfo.info =
- classifyArgumentType(ArgInfo.type, IsFixed, ArgGPRsLeft, ArgFPRsLeft);
- ArgNum++;
- }
-}
-
-// Returns true if the struct is a potential candidate for the floating point
-// calling convention. If this function returns true, the caller is
-// responsible for checking that if there is only a single field then that
-// field is a float.
-bool RISCVABIInfo::detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff,
- llvm::Type *&Field1Ty,
- CharUnits &Field1Off,
- llvm::Type *&Field2Ty,
- CharUnits &Field2Off) const {
- bool IsInt = Ty->isIntegralOrEnumerationType();
- bool IsFloat = Ty->isRealFloatingType();
-
- if (IsInt || IsFloat) {
- uint64_t Size = getContext().getTypeSize(Ty);
- if (IsInt && Size > XLen)
- return false;
- // Can't be eligible if larger than the FP registers. Half precision isn't
- // currently supported on RISC-V and the ABI hasn't been confirmed, so
- // default to the integer ABI in that case.
- if (IsFloat && (Size > FLen || Size < 32))
- return false;
- // Can't be eligible if an integer type was already found (int+int pairs
- // are not eligible).
- if (IsInt && Field1Ty && Field1Ty->isIntegerTy())
- return false;
- if (!Field1Ty) {
- Field1Ty = CGT.ConvertType(Ty);
- Field1Off = CurOff;
- return true;
- }
- if (!Field2Ty) {
- Field2Ty = CGT.ConvertType(Ty);
- Field2Off = CurOff;
- return true;
- }
- return false;
- }
-
- if (auto CTy = Ty->getAs<ComplexType>()) {
- if (Field1Ty)
- return false;
- QualType EltTy = CTy->getElementType();
- if (getContext().getTypeSize(EltTy) > FLen)
- return false;
- Field1Ty = CGT.ConvertType(EltTy);
- Field1Off = CurOff;
- Field2Ty = Field1Ty;
- Field2Off = Field1Off + getContext().getTypeSizeInChars(EltTy);
- return true;
- }
-
- if (const ConstantArrayType *ATy = getContext().getAsConstantArrayType(Ty)) {
- uint64_t ArraySize = ATy->getSize().getZExtValue();
- QualType EltTy = ATy->getElementType();
- CharUnits EltSize = getContext().getTypeSizeInChars(EltTy);
- for (uint64_t i = 0; i < ArraySize; ++i) {
- bool Ret = detectFPCCEligibleStructHelper(EltTy, CurOff, Field1Ty,
- Field1Off, Field2Ty, Field2Off);
- if (!Ret)
- return false;
- CurOff += EltSize;
- }
- return true;
- }
-
- if (const auto *RTy = Ty->getAs<RecordType>()) {
- // Structures with either a non-trivial destructor or a non-trivial
- // copy constructor are not eligible for the FP calling convention.
- if (getRecordArgABI(Ty, CGT.getCXXABI()))
- return false;
- if (isEmptyRecord(getContext(), Ty, true))
- return true;
- const RecordDecl *RD = RTy->getDecl();
- // Unions aren't eligible unless they're empty (which is caught above).
- if (RD->isUnion())
- return false;
- int ZeroWidthBitFieldCount = 0;
- for (const FieldDecl *FD : RD->fields()) {
- const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
- uint64_t FieldOffInBits = Layout.getFieldOffset(FD->getFieldIndex());
- QualType QTy = FD->getType();
- if (FD->isBitField()) {
- unsigned BitWidth = FD->getBitWidthValue(getContext());
- // Allow a bitfield with a type greater than XLen as long as the
- // bitwidth is XLen or less.
- if (getContext().getTypeSize(QTy) > XLen && BitWidth <= XLen)
- QTy = getContext().getIntTypeForBitwidth(XLen, false);
- if (BitWidth == 0) {
- ZeroWidthBitFieldCount++;
- continue;
- }
- }
-
- bool Ret = detectFPCCEligibleStructHelper(
- QTy, CurOff + getContext().toCharUnitsFromBits(FieldOffInBits),
- Field1Ty, Field1Off, Field2Ty, Field2Off);
- if (!Ret)
- return false;
-
- // As a quirk of the ABI, zero-width bitfields aren't ignored for fp+fp
- // or int+fp structs, but are ignored for a struct with an fp field and
- // any number of zero-width bitfields.
- if (Field2Ty && ZeroWidthBitFieldCount > 0)
- return false;
- }
- return Field1Ty != nullptr;
- }
-
- return false;
-}
-
-// Determine if a struct is eligible for passing according to the floating
-// point calling convention (i.e., when flattened it contains a single fp
-// value, fp+fp, or int+fp of appropriate size). If so, NeededArgFPRs and
-// NeededArgGPRs are incremented appropriately.
-bool RISCVABIInfo::detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty,
- CharUnits &Field1Off,
- llvm::Type *&Field2Ty,
- CharUnits &Field2Off,
- int &NeededArgGPRs,
- int &NeededArgFPRs) const {
- Field1Ty = nullptr;
- Field2Ty = nullptr;
- NeededArgGPRs = 0;
- NeededArgFPRs = 0;
- bool IsCandidate = detectFPCCEligibleStructHelper(
- Ty, CharUnits::Zero(), Field1Ty, Field1Off, Field2Ty, Field2Off);
- // Not really a candidate if we have a single int but no float.
- if (Field1Ty && !Field2Ty && !Field1Ty->isFloatingPointTy())
- return false;
- if (!IsCandidate)
- return false;
- if (Field1Ty && Field1Ty->isFloatingPointTy())
- NeededArgFPRs++;
- else if (Field1Ty)
- NeededArgGPRs++;
- if (Field2Ty && Field2Ty->isFloatingPointTy())
- NeededArgFPRs++;
- else if (Field2Ty)
- NeededArgGPRs++;
- return true;
-}
-
-// Call getCoerceAndExpand for the two-element flattened struct described by
-// Field1Ty, Field1Off, Field2Ty, Field2Off. This method will create an
-// appropriate coerceToType and unpaddedCoerceToType.
-ABIArgInfo RISCVABIInfo::coerceAndExpandFPCCEligibleStruct(
- llvm::Type *Field1Ty, CharUnits Field1Off, llvm::Type *Field2Ty,
- CharUnits Field2Off) const {
- SmallVector<llvm::Type *, 3> CoerceElts;
- SmallVector<llvm::Type *, 2> UnpaddedCoerceElts;
- if (!Field1Off.isZero())
- CoerceElts.push_back(llvm::ArrayType::get(
- llvm::Type::getInt8Ty(getVMContext()), Field1Off.getQuantity()));
-
- CoerceElts.push_back(Field1Ty);
- UnpaddedCoerceElts.push_back(Field1Ty);
-
- if (!Field2Ty) {
- return ABIArgInfo::getCoerceAndExpand(
- llvm::StructType::get(getVMContext(), CoerceElts, !Field1Off.isZero()),
- UnpaddedCoerceElts[0]);
- }
-
- CharUnits Field2Align =
- CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(Field2Ty));
- CharUnits Field1End = Field1Off +
- CharUnits::fromQuantity(getDataLayout().getTypeStoreSize(Field1Ty));
- CharUnits Field2OffNoPadNoPack = Field1End.alignTo(Field2Align);
-
- CharUnits Padding = CharUnits::Zero();
- if (Field2Off > Field2OffNoPadNoPack)
- Padding = Field2Off - Field2OffNoPadNoPack;
- else if (Field2Off != Field2Align && Field2Off > Field1End)
- Padding = Field2Off - Field1End;
-
- bool IsPacked = !Field2Off.isMultipleOf(Field2Align);
-
- if (!Padding.isZero())
- CoerceElts.push_back(llvm::ArrayType::get(
- llvm::Type::getInt8Ty(getVMContext()), Padding.getQuantity()));
-
- CoerceElts.push_back(Field2Ty);
- UnpaddedCoerceElts.push_back(Field2Ty);
-
- auto CoerceToType =
- llvm::StructType::get(getVMContext(), CoerceElts, IsPacked);
- auto UnpaddedCoerceToType =
- llvm::StructType::get(getVMContext(), UnpaddedCoerceElts, IsPacked);
-
- return ABIArgInfo::getCoerceAndExpand(CoerceToType, UnpaddedCoerceToType);
-}
-
-ABIArgInfo RISCVABIInfo::classifyArgumentType(QualType Ty, bool IsFixed,
- int &ArgGPRsLeft,
- int &ArgFPRsLeft) const {
- assert(ArgGPRsLeft <= NumArgGPRs && "Arg GPR tracking underflow");
- Ty = useFirstFieldIfTransparentUnion(Ty);
-
- // Structures with either a non-trivial destructor or a non-trivial
- // copy constructor are always passed indirectly.
- if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
- if (ArgGPRsLeft)
- ArgGPRsLeft -= 1;
- return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
- CGCXXABI::RAA_DirectInMemory);
- }
-
- // Ignore empty structs/unions.
- if (isEmptyRecord(getContext(), Ty, true))
- return ABIArgInfo::getIgnore();
-
- uint64_t Size = getContext().getTypeSize(Ty);
-
- // Pass floating point values via FPRs if possible.
- if (IsFixed && Ty->isFloatingType() && !Ty->isComplexType() &&
- FLen >= Size && ArgFPRsLeft) {
- ArgFPRsLeft--;
- return ABIArgInfo::getDirect();
- }
-
- // Complex types for the hard float ABI must be passed direct rather than
- // using CoerceAndExpand.
- if (IsFixed && Ty->isComplexType() && FLen && ArgFPRsLeft >= 2) {
- QualType EltTy = Ty->castAs<ComplexType>()->getElementType();
- if (getContext().getTypeSize(EltTy) <= FLen) {
- ArgFPRsLeft -= 2;
- return ABIArgInfo::getDirect();
- }
- }
-
- if (IsFixed && FLen && Ty->isStructureOrClassType()) {
- llvm::Type *Field1Ty = nullptr;
- llvm::Type *Field2Ty = nullptr;
- CharUnits Field1Off = CharUnits::Zero();
- CharUnits Field2Off = CharUnits::Zero();
- int NeededArgGPRs = 0;
- int NeededArgFPRs = 0;
- bool IsCandidate =
- detectFPCCEligibleStruct(Ty, Field1Ty, Field1Off, Field2Ty, Field2Off,
- NeededArgGPRs, NeededArgFPRs);
- if (IsCandidate && NeededArgGPRs <= ArgGPRsLeft &&
- NeededArgFPRs <= ArgFPRsLeft) {
- ArgGPRsLeft -= NeededArgGPRs;
- ArgFPRsLeft -= NeededArgFPRs;
- return coerceAndExpandFPCCEligibleStruct(Field1Ty, Field1Off, Field2Ty,
- Field2Off);
- }
- }
-
- uint64_t NeededAlign = getContext().getTypeAlign(Ty);
- bool MustUseStack = false;
- // Determine the number of GPRs needed to pass the current argument
- // according to the ABI. 2*XLen-aligned varargs are passed in "aligned"
- // register pairs, so may consume 3 registers.
- int NeededArgGPRs = 1;
- if (!IsFixed && NeededAlign == 2 * XLen)
- NeededArgGPRs = 2 + (ArgGPRsLeft % 2);
- else if (Size > XLen && Size <= 2 * XLen)
- NeededArgGPRs = 2;
-
- if (NeededArgGPRs > ArgGPRsLeft) {
- MustUseStack = true;
- NeededArgGPRs = ArgGPRsLeft;
- }
-
- ArgGPRsLeft -= NeededArgGPRs;
-
- if (!isAggregateTypeForABI(Ty) && !Ty->isVectorType()) {
- // Treat an enum type as its underlying type.
- if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
-
- // All integral types are promoted to XLen width, unless passed on the
- // stack.
- if (Size < XLen && Ty->isIntegralOrEnumerationType() && !MustUseStack) {
- return extendType(Ty);
- }
-
- if (const auto *EIT = Ty->getAs<ExtIntType>()) {
- if (EIT->getNumBits() < XLen && !MustUseStack)
- return extendType(Ty);
- if (EIT->getNumBits() > 128 ||
- (!getContext().getTargetInfo().hasInt128Type() &&
- EIT->getNumBits() > 64))
- return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
- }
-
- return ABIArgInfo::getDirect();
- }
-
- // Aggregates which are <= 2*XLen will be passed in registers if possible,
- // so coerce to integers.
- if (Size <= 2 * XLen) {
- unsigned Alignment = getContext().getTypeAlign(Ty);
-
- // Use a single XLen int if possible, 2*XLen if 2*XLen alignment is
- // required, and a 2-element XLen array if only XLen alignment is required.
- if (Size <= XLen) {
- return ABIArgInfo::getDirect(
- llvm::IntegerType::get(getVMContext(), XLen));
- } else if (Alignment == 2 * XLen) {
- return ABIArgInfo::getDirect(
- llvm::IntegerType::get(getVMContext(), 2 * XLen));
- } else {
- return ABIArgInfo::getDirect(llvm::ArrayType::get(
- llvm::IntegerType::get(getVMContext(), XLen), 2));
- }
- }
- return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
-}
-
-ABIArgInfo RISCVABIInfo::classifyReturnType(QualType RetTy) const {
- if (RetTy->isVoidType())
- return ABIArgInfo::getIgnore();
-
- int ArgGPRsLeft = 2;
- int ArgFPRsLeft = FLen ? 2 : 0;
-
- // The rules for return and argument types are the same, so defer to
- // classifyArgumentType.
- return classifyArgumentType(RetTy, /*IsFixed=*/true, ArgGPRsLeft,
- ArgFPRsLeft);
-}
-
-Address RISCVABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
- QualType Ty) const {
- CharUnits SlotSize = CharUnits::fromQuantity(XLen / 8);
-
- // Empty records are ignored for parameter passing purposes.
- if (isEmptyRecord(getContext(), Ty, true)) {
- Address Addr(CGF.Builder.CreateLoad(VAListAddr), SlotSize);
- Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
- return Addr;
- }
-
- auto TInfo = getContext().getTypeInfoInChars(Ty);
-
- // Arguments bigger than 2*Xlen bytes are passed indirectly.
- bool IsIndirect = TInfo.Width > 2 * SlotSize;
-
- return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TInfo,
- SlotSize, /*AllowHigherAlign=*/true);
-}
-
-ABIArgInfo RISCVABIInfo::extendType(QualType Ty) const {
- int TySize = getContext().getTypeSize(Ty);
- // RV64 ABI requires unsigned 32 bit integers to be sign extended.
- if (XLen == 64 && Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32)
- return ABIArgInfo::getSignExtend(Ty);
- return ABIArgInfo::getExtend(Ty);
-}
-
-namespace {
-class RISCVTargetCodeGenInfo : public TargetCodeGenInfo {
-public:
- RISCVTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen,
- unsigned FLen)
- : TargetCodeGenInfo(std::make_unique<RISCVABIInfo>(CGT, XLen, FLen)) {}
-
- void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &CGM) const override {
- const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
- if (!FD) return;
-
- const auto *Attr = FD->getAttr<RISCVInterruptAttr>();
- if (!Attr)
- return;
-
- const char *Kind;
- switch (Attr->getInterrupt()) {
- case RISCVInterruptAttr::user: Kind = "user"; break;
- case RISCVInterruptAttr::supervisor: Kind = "supervisor"; break;
- case RISCVInterruptAttr::machine: Kind = "machine"; break;
- }
-
- auto *Fn = cast<llvm::Function>(GV);
-
- Fn->addFnAttr("interrupt", Kind);
- }
-};
-} // namespace
-
-//===----------------------------------------------------------------------===//
-// VE ABI Implementation.
-//
-namespace {
-class VEABIInfo : public DefaultABIInfo {
-public:
- VEABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
-
-private:
- ABIArgInfo classifyReturnType(QualType RetTy) const;
- ABIArgInfo classifyArgumentType(QualType RetTy) const;
- void computeInfo(CGFunctionInfo &FI) const override;
-};
-} // end anonymous namespace
-
-ABIArgInfo VEABIInfo::classifyReturnType(QualType Ty) const {
- if (Ty->isAnyComplexType())
- return ABIArgInfo::getDirect();
- uint64_t Size = getContext().getTypeSize(Ty);
- if (Size < 64 && Ty->isIntegerType())
- return ABIArgInfo::getExtend(Ty);
- return DefaultABIInfo::classifyReturnType(Ty);
-}
-
-ABIArgInfo VEABIInfo::classifyArgumentType(QualType Ty) const {
- if (Ty->isAnyComplexType())
- return ABIArgInfo::getDirect();
- uint64_t Size = getContext().getTypeSize(Ty);
- if (Size < 64 && Ty->isIntegerType())
- return ABIArgInfo::getExtend(Ty);
- return DefaultABIInfo::classifyArgumentType(Ty);
-}
-
-void VEABIInfo::computeInfo(CGFunctionInfo &FI) const {
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
- for (auto &Arg : FI.arguments())
- Arg.info = classifyArgumentType(Arg.type);
-}
-
-namespace {
-class VETargetCodeGenInfo : public TargetCodeGenInfo {
-public:
- VETargetCodeGenInfo(CodeGenTypes &CGT)
- : TargetCodeGenInfo(std::make_unique<VEABIInfo>(CGT)) {}
- // VE ABI requires the arguments of variadic and prototype-less functions
- // are passed in both registers and memory.
- bool isNoProtoCallVariadic(const CallArgList &args,
- const FunctionNoProtoType *fnType) const override {
- return true;
- }
-};
-} // end anonymous namespace
-
-//===----------------------------------------------------------------------===//
-// Driver code
-//===----------------------------------------------------------------------===//
-
-bool CodeGenModule::supportsCOMDAT() const {
- return getTriple().supportsCOMDAT();
-}
-
-const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
- if (TheTargetCodeGenInfo)
- return *TheTargetCodeGenInfo;
-
- // Helper to set the unique_ptr while still keeping the return value.
- auto SetCGInfo = [&](TargetCodeGenInfo *P) -> const TargetCodeGenInfo & {
- this->TheTargetCodeGenInfo.reset(P);
- return *P;
- };
-
- const llvm::Triple &Triple = getTarget().getTriple();
- switch (Triple.getArch()) {
- default:
- return SetCGInfo(new DefaultTargetCodeGenInfo(Types));
-
- case llvm::Triple::le32:
- return SetCGInfo(new PNaClTargetCodeGenInfo(Types));
- case llvm::Triple::m68k:
- return SetCGInfo(new M68kTargetCodeGenInfo(Types));
- case llvm::Triple::mips:
- case llvm::Triple::mipsel:
- if (Triple.getOS() == llvm::Triple::NaCl)
- return SetCGInfo(new PNaClTargetCodeGenInfo(Types));
- return SetCGInfo(new MIPSTargetCodeGenInfo(Types, true));
-
- case llvm::Triple::mips64:
- case llvm::Triple::mips64el:
- return SetCGInfo(new MIPSTargetCodeGenInfo(Types, false));
-
- case llvm::Triple::avr:
- return SetCGInfo(new AVRTargetCodeGenInfo(Types));
-
- case llvm::Triple::aarch64:
- case llvm::Triple::aarch64_32:
- case llvm::Triple::aarch64_be: {
- AArch64ABIInfo::ABIKind Kind = AArch64ABIInfo::AAPCS;
- if (getTarget().getABI() == "darwinpcs")
- Kind = AArch64ABIInfo::DarwinPCS;
- else if (Triple.isOSWindows())
- return SetCGInfo(
- new WindowsAArch64TargetCodeGenInfo(Types, AArch64ABIInfo::Win64));
-
- return SetCGInfo(new AArch64TargetCodeGenInfo(Types, Kind));
- }
-
- case llvm::Triple::wasm32:
- case llvm::Triple::wasm64: {
- WebAssemblyABIInfo::ABIKind Kind = WebAssemblyABIInfo::MVP;
- if (getTarget().getABI() == "experimental-mv")
- Kind = WebAssemblyABIInfo::ExperimentalMV;
- return SetCGInfo(new WebAssemblyTargetCodeGenInfo(Types, Kind));
- }
-
- case llvm::Triple::arm:
- case llvm::Triple::armeb:
- case llvm::Triple::thumb:
- case llvm::Triple::thumbeb: {
- if (Triple.getOS() == llvm::Triple::Win32) {
- return SetCGInfo(
- new WindowsARMTargetCodeGenInfo(Types, ARMABIInfo::AAPCS_VFP));
- }
-
- ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS;
- StringRef ABIStr = getTarget().getABI();
- if (ABIStr == "apcs-gnu")
- Kind = ARMABIInfo::APCS;
- else if (ABIStr == "aapcs16")
- Kind = ARMABIInfo::AAPCS16_VFP;
- else if (CodeGenOpts.FloatABI == "hard" ||
- (CodeGenOpts.FloatABI != "soft" &&
- (Triple.getEnvironment() == llvm::Triple::GNUEABIHF ||
- Triple.getEnvironment() == llvm::Triple::MuslEABIHF ||
- Triple.getEnvironment() == llvm::Triple::EABIHF)))
- Kind = ARMABIInfo::AAPCS_VFP;
-
- return SetCGInfo(new ARMTargetCodeGenInfo(Types, Kind));
- }
-
- case llvm::Triple::ppc: {
- if (Triple.isOSAIX())
- return SetCGInfo(new AIXTargetCodeGenInfo(Types, /*Is64Bit*/ false));
-
- bool IsSoftFloat =
- CodeGenOpts.FloatABI == "soft" || getTarget().hasFeature("spe");
- bool RetSmallStructInRegABI =
- PPC32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
- return SetCGInfo(
- new PPC32TargetCodeGenInfo(Types, IsSoftFloat, RetSmallStructInRegABI));
- }
- case llvm::Triple::ppcle: {
- bool IsSoftFloat = CodeGenOpts.FloatABI == "soft";
- bool RetSmallStructInRegABI =
- PPC32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
- return SetCGInfo(
- new PPC32TargetCodeGenInfo(Types, IsSoftFloat, RetSmallStructInRegABI));
- }
- case llvm::Triple::ppc64:
- if (Triple.isOSAIX())
- return SetCGInfo(new AIXTargetCodeGenInfo(Types, /*Is64Bit*/ true));
-
- if (Triple.isOSBinFormatELF()) {
- PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv1;
- if (getTarget().getABI() == "elfv2")
- Kind = PPC64_SVR4_ABIInfo::ELFv2;
- bool IsSoftFloat = CodeGenOpts.FloatABI == "soft";
-
- return SetCGInfo(
- new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, IsSoftFloat));
- }
- return SetCGInfo(new PPC64TargetCodeGenInfo(Types));
- case llvm::Triple::ppc64le: {
- assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!");
- PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv2;
- if (getTarget().getABI() == "elfv1")
- Kind = PPC64_SVR4_ABIInfo::ELFv1;
- bool IsSoftFloat = CodeGenOpts.FloatABI == "soft";
-
- return SetCGInfo(
- new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, IsSoftFloat));
- }
-
- case llvm::Triple::nvptx:
- case llvm::Triple::nvptx64:
- return SetCGInfo(new NVPTXTargetCodeGenInfo(Types));
-
- case llvm::Triple::msp430:
- return SetCGInfo(new MSP430TargetCodeGenInfo(Types));
-
- case llvm::Triple::riscv32:
- case llvm::Triple::riscv64: {
- StringRef ABIStr = getTarget().getABI();
- unsigned XLen = getTarget().getPointerWidth(0);
- unsigned ABIFLen = 0;
- if (ABIStr.endswith("f"))
- ABIFLen = 32;
- else if (ABIStr.endswith("d"))
- ABIFLen = 64;
- return SetCGInfo(new RISCVTargetCodeGenInfo(Types, XLen, ABIFLen));
- }
-
- case llvm::Triple::systemz: {
- bool SoftFloat = CodeGenOpts.FloatABI == "soft";
- bool HasVector = !SoftFloat && getTarget().getABI() == "vector";
- return SetCGInfo(new SystemZTargetCodeGenInfo(Types, HasVector, SoftFloat));
- }
-
- case llvm::Triple::tce:
- case llvm::Triple::tcele:
- return SetCGInfo(new TCETargetCodeGenInfo(Types));
-
- case llvm::Triple::x86: {
- bool IsDarwinVectorABI = Triple.isOSDarwin();
- bool RetSmallStructInRegABI =
- X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
- bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing();
-
- if (Triple.getOS() == llvm::Triple::Win32) {
- return SetCGInfo(new WinX86_32TargetCodeGenInfo(
- Types, IsDarwinVectorABI, RetSmallStructInRegABI,
- IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters));
- } else {
- return SetCGInfo(new X86_32TargetCodeGenInfo(
- Types, IsDarwinVectorABI, RetSmallStructInRegABI,
- IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters,
- CodeGenOpts.FloatABI == "soft"));
- }
- }
-
- case llvm::Triple::x86_64: {
- StringRef ABI = getTarget().getABI();
- X86AVXABILevel AVXLevel =
- (ABI == "avx512"
- ? X86AVXABILevel::AVX512
- : ABI == "avx" ? X86AVXABILevel::AVX : X86AVXABILevel::None);
-
- switch (Triple.getOS()) {
- case llvm::Triple::Win32:
- return SetCGInfo(new WinX86_64TargetCodeGenInfo(Types, AVXLevel));
- default:
- return SetCGInfo(new X86_64TargetCodeGenInfo(Types, AVXLevel));
- }
- }
- case llvm::Triple::hexagon:
- return SetCGInfo(new HexagonTargetCodeGenInfo(Types));
- case llvm::Triple::lanai:
- return SetCGInfo(new LanaiTargetCodeGenInfo(Types));
- case llvm::Triple::r600:
- return SetCGInfo(new AMDGPUTargetCodeGenInfo(Types));
- case llvm::Triple::amdgcn:
- return SetCGInfo(new AMDGPUTargetCodeGenInfo(Types));
- case llvm::Triple::sparc:
- return SetCGInfo(new SparcV8TargetCodeGenInfo(Types));
- case llvm::Triple::sparcv9:
- return SetCGInfo(new SparcV9TargetCodeGenInfo(Types));
- case llvm::Triple::xcore:
- return SetCGInfo(new XCoreTargetCodeGenInfo(Types));
- case llvm::Triple::arc:
- return SetCGInfo(new ARCTargetCodeGenInfo(Types));
- case llvm::Triple::spir:
- case llvm::Triple::spir64:
- return SetCGInfo(new SPIRTargetCodeGenInfo(Types));
- case llvm::Triple::ve:
- return SetCGInfo(new VETargetCodeGenInfo(Types));
- }
-}
-
/// Create an OpenCL kernel for an enqueued block.
///
/// The kernel has the same function type as the block invoke function. Its
/// name is the name of the block invoke function postfixed with "_kernel".
/// It simply calls the block invoke function then returns.
-llvm::Function *
-TargetCodeGenInfo::createEnqueuedBlockKernel(CodeGenFunction &CGF,
- llvm::Function *Invoke,
- llvm::Value *BlockLiteral) const {
+llvm::Value *TargetCodeGenInfo::createEnqueuedBlockKernel(
+ CodeGenFunction &CGF, llvm::Function *Invoke, llvm::Type *BlockTy) const {
auto *InvokeFT = Invoke->getFunctionType();
- llvm::SmallVector<llvm::Type *, 2> ArgTys;
- for (auto &P : InvokeFT->params())
- ArgTys.push_back(P);
auto &C = CGF.getLLVMContext();
std::string Name = Invoke->getName().str() + "_kernel";
- auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), ArgTys, false);
- auto *F = llvm::Function::Create(FT, llvm::GlobalValue::InternalLinkage, Name,
+ auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C),
+ InvokeFT->params(), false);
+ auto *F = llvm::Function::Create(FT, llvm::GlobalValue::ExternalLinkage, Name,
&CGF.CGM.getModule());
- auto IP = CGF.Builder.saveIP();
- auto *BB = llvm::BasicBlock::Create(C, "entry", F);
- auto &Builder = CGF.Builder;
- Builder.SetInsertPoint(BB);
- llvm::SmallVector<llvm::Value *, 2> Args;
- for (auto &A : F->args())
- Args.push_back(&A);
- llvm::CallInst *call = Builder.CreateCall(Invoke, Args);
- call->setCallingConv(Invoke->getCallingConv());
- Builder.CreateRetVoid();
- Builder.restoreIP(IP);
- return F;
-}
+ llvm::CallingConv::ID KernelCC =
+ CGF.getTypes().ClangCallConvToLLVMCallConv(CallingConv::CC_OpenCLKernel);
+ F->setCallingConv(KernelCC);
-/// Create an OpenCL kernel for an enqueued block.
-///
-/// The type of the first argument (the block literal) is the struct type
-/// of the block literal instead of a pointer type. The first argument
-/// (block literal) is passed directly by value to the kernel. The kernel
-/// allocates the same type of struct on stack and stores the block literal
-/// to it and passes its pointer to the block invoke function. The kernel
-/// has "enqueued-block" function attribute and kernel argument metadata.
-llvm::Function *AMDGPUTargetCodeGenInfo::createEnqueuedBlockKernel(
- CodeGenFunction &CGF, llvm::Function *Invoke,
- llvm::Value *BlockLiteral) const {
- auto &Builder = CGF.Builder;
- auto &C = CGF.getLLVMContext();
+ llvm::AttrBuilder KernelAttrs(C);
- auto *BlockTy = BlockLiteral->getType()->getPointerElementType();
- auto *InvokeFT = Invoke->getFunctionType();
- llvm::SmallVector<llvm::Type *, 2> ArgTys;
- llvm::SmallVector<llvm::Metadata *, 8> AddressQuals;
- llvm::SmallVector<llvm::Metadata *, 8> AccessQuals;
- llvm::SmallVector<llvm::Metadata *, 8> ArgTypeNames;
- llvm::SmallVector<llvm::Metadata *, 8> ArgBaseTypeNames;
- llvm::SmallVector<llvm::Metadata *, 8> ArgTypeQuals;
- llvm::SmallVector<llvm::Metadata *, 8> ArgNames;
+ // FIXME: This is missing setTargetAttributes
+ CGF.CGM.addDefaultFunctionDefinitionAttributes(KernelAttrs);
+ F->addFnAttrs(KernelAttrs);
- ArgTys.push_back(BlockTy);
- ArgTypeNames.push_back(llvm::MDString::get(C, "__block_literal"));
- AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(0)));
- ArgBaseTypeNames.push_back(llvm::MDString::get(C, "__block_literal"));
- ArgTypeQuals.push_back(llvm::MDString::get(C, ""));
- AccessQuals.push_back(llvm::MDString::get(C, "none"));
- ArgNames.push_back(llvm::MDString::get(C, "block_literal"));
- for (unsigned I = 1, E = InvokeFT->getNumParams(); I < E; ++I) {
- ArgTys.push_back(InvokeFT->getParamType(I));
- ArgTypeNames.push_back(llvm::MDString::get(C, "void*"));
- AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(3)));
- AccessQuals.push_back(llvm::MDString::get(C, "none"));
- ArgBaseTypeNames.push_back(llvm::MDString::get(C, "void*"));
- ArgTypeQuals.push_back(llvm::MDString::get(C, ""));
- ArgNames.push_back(
- llvm::MDString::get(C, (Twine("local_arg") + Twine(I)).str()));
- }
- std::string Name = Invoke->getName().str() + "_kernel";
- auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), ArgTys, false);
- auto *F = llvm::Function::Create(FT, llvm::GlobalValue::InternalLinkage, Name,
- &CGF.CGM.getModule());
- F->addFnAttr("enqueued-block");
auto IP = CGF.Builder.saveIP();
auto *BB = llvm::BasicBlock::Create(C, "entry", F);
+ auto &Builder = CGF.Builder;
Builder.SetInsertPoint(BB);
- const auto BlockAlign = CGF.CGM.getDataLayout().getPrefTypeAlign(BlockTy);
- auto *BlockPtr = Builder.CreateAlloca(BlockTy, nullptr);
- BlockPtr->setAlignment(BlockAlign);
- Builder.CreateAlignedStore(F->arg_begin(), BlockPtr, BlockAlign);
- auto *Cast = Builder.CreatePointerCast(BlockPtr, InvokeFT->getParamType(0));
- llvm::SmallVector<llvm::Value *, 2> Args;
- Args.push_back(Cast);
- for (auto I = F->arg_begin() + 1, E = F->arg_end(); I != E; ++I)
- Args.push_back(I);
- llvm::CallInst *call = Builder.CreateCall(Invoke, Args);
- call->setCallingConv(Invoke->getCallingConv());
+ llvm::SmallVector<llvm::Value *, 2> Args(llvm::make_pointer_range(F->args()));
+ llvm::CallInst *Call = Builder.CreateCall(Invoke, Args);
+ Call->setCallingConv(Invoke->getCallingConv());
+
Builder.CreateRetVoid();
Builder.restoreIP(IP);
+ return F;
+}
- F->setMetadata("kernel_arg_addr_space", llvm::MDNode::get(C, AddressQuals));
- F->setMetadata("kernel_arg_access_qual", llvm::MDNode::get(C, AccessQuals));
- F->setMetadata("kernel_arg_type", llvm::MDNode::get(C, ArgTypeNames));
- F->setMetadata("kernel_arg_base_type",
- llvm::MDNode::get(C, ArgBaseTypeNames));
- F->setMetadata("kernel_arg_type_qual", llvm::MDNode::get(C, ArgTypeQuals));
- if (CGF.CGM.getCodeGenOpts().EmitOpenCLArgMetadata)
- F->setMetadata("kernel_arg_name", llvm::MDNode::get(C, ArgNames));
+namespace {
+class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
+ : TargetCodeGenInfo(std::make_unique<DefaultABIInfo>(CGT)) {}
+};
+} // namespace
- return F;
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createDefaultTargetCodeGenInfo(CodeGenModule &CGM) {
+ return std::make_unique<DefaultTargetCodeGenInfo>(CGM.getTypes());
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.h b/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.h
index aa8bbb60a75f..7682f197041c 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.h
@@ -38,21 +38,38 @@ class ABIInfo;
class CallArgList;
class CodeGenFunction;
class CGBlockInfo;
-class CGFunctionInfo;
+class SwiftABIInfo;
/// TargetCodeGenInfo - This class organizes various target-specific
/// codegeneration issues, like target-specific attributes, builtins and so
/// on.
class TargetCodeGenInfo {
- std::unique_ptr<ABIInfo> Info = nullptr;
+ std::unique_ptr<ABIInfo> Info;
+
+protected:
+ // Target hooks supporting Swift calling conventions. The target must
+ // initialize this field if it claims to support these calling conventions
+ // by returning true from TargetInfo::checkCallingConvention for them.
+ std::unique_ptr<SwiftABIInfo> SwiftInfo;
+
+ // Returns ABI info helper for the target. This is for use by derived classes.
+ template <typename T> const T &getABIInfo() const {
+ return static_cast<const T &>(*Info);
+ }
public:
- TargetCodeGenInfo(std::unique_ptr<ABIInfo> Info) : Info(std::move(Info)) {}
+ TargetCodeGenInfo(std::unique_ptr<ABIInfo> Info);
virtual ~TargetCodeGenInfo();
/// getABIInfo() - Returns ABI info helper for the target.
const ABIInfo &getABIInfo() const { return *Info; }
+ /// Returns Swift ABI info helper for the target.
+ const SwiftABIInfo &getSwiftABIInfo() const {
+ assert(SwiftInfo && "Swift ABI info has not been initialized");
+ return *SwiftInfo;
+ }
+
/// setTargetAttributes - Provides a convenient hook to handle extra
/// target-specific attributes for the given global.
virtual void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
@@ -64,6 +81,9 @@ public:
CodeGen::CodeGenModule &CGM,
const llvm::MapVector<GlobalDecl, StringRef> &MangledDeclNames) const {}
+ /// Provides a convenient hook to handle extra target-specific globals.
+ virtual void emitTargetGlobals(CodeGen::CodeGenModule &CGM) const {}
+
/// Any further codegen related checks that need to be done on a function call
/// in a target specific manner.
virtual void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc,
@@ -187,9 +207,10 @@ public:
/// Return a constant used by UBSan as a signature to identify functions
/// possessing type information, or 0 if the platform is unsupported.
+ /// This magic number is invalid instruction encoding in many targets.
virtual llvm::Constant *
getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const {
- return nullptr;
+ return llvm::ConstantInt::get(CGM.Int32Ty, 0xc105cafe);
}
/// Determine whether a call to an unprototyped functions under
@@ -327,16 +348,21 @@ public:
/// convention and ABI as an OpenCL kernel. The wrapper function accepts
/// block context and block arguments in target-specific way and calls
/// the original block invoke function.
- virtual llvm::Function *
+ virtual llvm::Value *
createEnqueuedBlockKernel(CodeGenFunction &CGF,
llvm::Function *BlockInvokeFunc,
- llvm::Value *BlockLiteral) const;
+ llvm::Type *BlockTy) const;
/// \return true if the target supports alias from the unmangled name to the
/// mangled name of functions declared within an extern "C" region and marked
/// as 'used', and having internal linkage.
virtual bool shouldEmitStaticExternCAliases() const { return true; }
+ /// \return true if annonymous zero-sized bitfields should be emitted to
+ /// correctly distinguish between struct types whose memory layout is the
+ /// same, but whose layout may differ when used as argument passed by value
+ virtual bool shouldEmitDWARFBitFieldSeparators() const { return false; }
+
virtual void setCUDAKernelCallingConvention(const FunctionType *&FT) const {}
/// Return the device-side type for the CUDA device builtin surface type.
@@ -350,6 +376,12 @@ public:
return nullptr;
}
+ /// Return the WebAssembly externref reference type.
+ virtual llvm::Type *getWasmExternrefReferenceType() const { return nullptr; }
+
+ /// Return the WebAssembly funcref reference type.
+ virtual llvm::Type *getWasmFuncrefReferenceType() const { return nullptr; }
+
/// Emit the device-side copy of the builtin surface type.
virtual bool emitCUDADeviceBuiltinSurfaceDeviceCopy(CodeGenFunction &CGF,
LValue Dst,
@@ -364,8 +396,164 @@ public:
// DO NOTHING by default.
return false;
}
+
+ /// Return an LLVM type that corresponds to an OpenCL type.
+ virtual llvm::Type *getOpenCLType(CodeGenModule &CGM, const Type *T) const {
+ return nullptr;
+ }
+
+protected:
+ static std::string qualifyWindowsLibrary(StringRef Lib);
+
+ void addStackProbeTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const;
+};
+
+std::unique_ptr<TargetCodeGenInfo>
+createDefaultTargetCodeGenInfo(CodeGenModule &CGM);
+
+enum class AArch64ABIKind {
+ AAPCS = 0,
+ DarwinPCS,
+ Win64,
+};
+
+std::unique_ptr<TargetCodeGenInfo>
+createAArch64TargetCodeGenInfo(CodeGenModule &CGM, AArch64ABIKind Kind);
+
+std::unique_ptr<TargetCodeGenInfo>
+createWindowsAArch64TargetCodeGenInfo(CodeGenModule &CGM, AArch64ABIKind K);
+
+std::unique_ptr<TargetCodeGenInfo>
+createAMDGPUTargetCodeGenInfo(CodeGenModule &CGM);
+
+std::unique_ptr<TargetCodeGenInfo>
+createARCTargetCodeGenInfo(CodeGenModule &CGM);
+
+enum class ARMABIKind {
+ APCS = 0,
+ AAPCS = 1,
+ AAPCS_VFP = 2,
+ AAPCS16_VFP = 3,
+};
+
+std::unique_ptr<TargetCodeGenInfo>
+createARMTargetCodeGenInfo(CodeGenModule &CGM, ARMABIKind Kind);
+
+std::unique_ptr<TargetCodeGenInfo>
+createWindowsARMTargetCodeGenInfo(CodeGenModule &CGM, ARMABIKind K);
+
+std::unique_ptr<TargetCodeGenInfo>
+createAVRTargetCodeGenInfo(CodeGenModule &CGM, unsigned NPR, unsigned NRR);
+
+std::unique_ptr<TargetCodeGenInfo>
+createBPFTargetCodeGenInfo(CodeGenModule &CGM);
+
+std::unique_ptr<TargetCodeGenInfo>
+createCSKYTargetCodeGenInfo(CodeGenModule &CGM, unsigned FLen);
+
+std::unique_ptr<TargetCodeGenInfo>
+createHexagonTargetCodeGenInfo(CodeGenModule &CGM);
+
+std::unique_ptr<TargetCodeGenInfo>
+createLanaiTargetCodeGenInfo(CodeGenModule &CGM);
+
+std::unique_ptr<TargetCodeGenInfo>
+createLoongArchTargetCodeGenInfo(CodeGenModule &CGM, unsigned GRLen,
+ unsigned FLen);
+
+std::unique_ptr<TargetCodeGenInfo>
+createM68kTargetCodeGenInfo(CodeGenModule &CGM);
+
+std::unique_ptr<TargetCodeGenInfo>
+createMIPSTargetCodeGenInfo(CodeGenModule &CGM, bool IsOS32);
+
+std::unique_ptr<TargetCodeGenInfo>
+createMSP430TargetCodeGenInfo(CodeGenModule &CGM);
+
+std::unique_ptr<TargetCodeGenInfo>
+createNVPTXTargetCodeGenInfo(CodeGenModule &CGM);
+
+std::unique_ptr<TargetCodeGenInfo>
+createPNaClTargetCodeGenInfo(CodeGenModule &CGM);
+
+enum class PPC64_SVR4_ABIKind {
+ ELFv1 = 0,
+ ELFv2,
};
+std::unique_ptr<TargetCodeGenInfo>
+createAIXTargetCodeGenInfo(CodeGenModule &CGM, bool Is64Bit);
+
+std::unique_ptr<TargetCodeGenInfo>
+createPPC32TargetCodeGenInfo(CodeGenModule &CGM, bool SoftFloatABI);
+
+std::unique_ptr<TargetCodeGenInfo>
+createPPC64TargetCodeGenInfo(CodeGenModule &CGM);
+
+std::unique_ptr<TargetCodeGenInfo>
+createPPC64_SVR4_TargetCodeGenInfo(CodeGenModule &CGM, PPC64_SVR4_ABIKind Kind,
+ bool SoftFloatABI);
+
+std::unique_ptr<TargetCodeGenInfo>
+createRISCVTargetCodeGenInfo(CodeGenModule &CGM, unsigned XLen, unsigned FLen,
+ bool EABI);
+
+std::unique_ptr<TargetCodeGenInfo>
+createCommonSPIRTargetCodeGenInfo(CodeGenModule &CGM);
+
+std::unique_ptr<TargetCodeGenInfo>
+createSPIRVTargetCodeGenInfo(CodeGenModule &CGM);
+
+std::unique_ptr<TargetCodeGenInfo>
+createSparcV8TargetCodeGenInfo(CodeGenModule &CGM);
+
+std::unique_ptr<TargetCodeGenInfo>
+createSparcV9TargetCodeGenInfo(CodeGenModule &CGM);
+
+std::unique_ptr<TargetCodeGenInfo>
+createSystemZTargetCodeGenInfo(CodeGenModule &CGM, bool HasVector,
+ bool SoftFloatABI);
+
+std::unique_ptr<TargetCodeGenInfo>
+createTCETargetCodeGenInfo(CodeGenModule &CGM);
+
+std::unique_ptr<TargetCodeGenInfo>
+createVETargetCodeGenInfo(CodeGenModule &CGM);
+
+enum class WebAssemblyABIKind {
+ MVP = 0,
+ ExperimentalMV = 1,
+};
+
+std::unique_ptr<TargetCodeGenInfo>
+createWebAssemblyTargetCodeGenInfo(CodeGenModule &CGM, WebAssemblyABIKind K);
+
+/// The AVX ABI level for X86 targets.
+enum class X86AVXABILevel {
+ None,
+ AVX,
+ AVX512,
+};
+
+std::unique_ptr<TargetCodeGenInfo> createX86_32TargetCodeGenInfo(
+ CodeGenModule &CGM, bool DarwinVectorABI, bool Win32StructABI,
+ unsigned NumRegisterParameters, bool SoftFloatABI);
+
+std::unique_ptr<TargetCodeGenInfo>
+createWinX86_32TargetCodeGenInfo(CodeGenModule &CGM, bool DarwinVectorABI,
+ bool Win32StructABI,
+ unsigned NumRegisterParameters);
+
+std::unique_ptr<TargetCodeGenInfo>
+createX86_64TargetCodeGenInfo(CodeGenModule &CGM, X86AVXABILevel AVXLevel);
+
+std::unique_ptr<TargetCodeGenInfo>
+createWinX86_64TargetCodeGenInfo(CodeGenModule &CGM, X86AVXABILevel AVXLevel);
+
+std::unique_ptr<TargetCodeGenInfo>
+createXCoreTargetCodeGenInfo(CodeGenModule &CGM);
+
} // namespace CodeGen
} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/AArch64.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/AArch64.cpp
new file mode 100644
index 000000000000..ee7f95084d2e
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/AArch64.cpp
@@ -0,0 +1,827 @@
+//===- AArch64.cpp --------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfoImpl.h"
+#include "TargetInfo.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+//===----------------------------------------------------------------------===//
+// AArch64 ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class AArch64ABIInfo : public ABIInfo {
+ AArch64ABIKind Kind;
+
+public:
+ AArch64ABIInfo(CodeGenTypes &CGT, AArch64ABIKind Kind)
+ : ABIInfo(CGT), Kind(Kind) {}
+
+private:
+ AArch64ABIKind getABIKind() const { return Kind; }
+ bool isDarwinPCS() const { return Kind == AArch64ABIKind::DarwinPCS; }
+
+ ABIArgInfo classifyReturnType(QualType RetTy, bool IsVariadic) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy, bool IsVariadic,
+ unsigned CallingConvention) const;
+ ABIArgInfo coerceIllegalVector(QualType Ty) const;
+ bool isHomogeneousAggregateBaseType(QualType Ty) const override;
+ bool isHomogeneousAggregateSmallEnough(const Type *Ty,
+ uint64_t Members) const override;
+ bool isZeroLengthBitfieldPermittedInHomogeneousAggregate() const override;
+
+ bool isIllegalVectorType(QualType Ty) const;
+
+ void computeInfo(CGFunctionInfo &FI) const override {
+ if (!::classifyReturnType(getCXXABI(), FI, *this))
+ FI.getReturnInfo() =
+ classifyReturnType(FI.getReturnType(), FI.isVariadic());
+
+ for (auto &it : FI.arguments())
+ it.info = classifyArgumentType(it.type, FI.isVariadic(),
+ FI.getCallingConvention());
+ }
+
+ Address EmitDarwinVAArg(Address VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+
+ Address EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override {
+ llvm::Type *BaseTy = CGF.ConvertType(Ty);
+ if (isa<llvm::ScalableVectorType>(BaseTy))
+ llvm::report_fatal_error("Passing SVE types to variadic functions is "
+ "currently not supported");
+
+ return Kind == AArch64ABIKind::Win64 ? EmitMSVAArg(CGF, VAListAddr, Ty)
+ : isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
+ : EmitAAPCSVAArg(VAListAddr, Ty, CGF);
+ }
+
+ Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+
+ bool allowBFloatArgsAndRet() const override {
+ return getTarget().hasBFloat16Type();
+ }
+};
+
+class AArch64SwiftABIInfo : public SwiftABIInfo {
+public:
+ explicit AArch64SwiftABIInfo(CodeGenTypes &CGT)
+ : SwiftABIInfo(CGT, /*SwiftErrorInRegister=*/true) {}
+
+ bool isLegalVectorType(CharUnits VectorSize, llvm::Type *EltTy,
+ unsigned NumElts) const override;
+};
+
+class AArch64TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIKind Kind)
+ : TargetCodeGenInfo(std::make_unique<AArch64ABIInfo>(CGT, Kind)) {
+ SwiftInfo = std::make_unique<AArch64SwiftABIInfo>(CGT);
+ }
+
+ StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
+ return "mov\tfp, fp\t\t// marker for objc_retainAutoreleaseReturnValue";
+ }
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
+ return 31;
+ }
+
+ bool doesReturnSlotInterfereWithArgs() const override { return false; }
+
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const override {
+ const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
+ if (!FD)
+ return;
+
+ const auto *TA = FD->getAttr<TargetAttr>();
+ if (TA == nullptr)
+ return;
+
+ ParsedTargetAttr Attr =
+ CGM.getTarget().parseTargetAttr(TA->getFeaturesStr());
+ if (Attr.BranchProtection.empty())
+ return;
+
+ TargetInfo::BranchProtectionInfo BPI;
+ StringRef Error;
+ (void)CGM.getTarget().validateBranchProtection(Attr.BranchProtection,
+ Attr.CPU, BPI, Error);
+ assert(Error.empty());
+
+ auto *Fn = cast<llvm::Function>(GV);
+ static const char *SignReturnAddrStr[] = {"none", "non-leaf", "all"};
+ Fn->addFnAttr("sign-return-address", SignReturnAddrStr[static_cast<int>(BPI.SignReturnAddr)]);
+
+ if (BPI.SignReturnAddr != LangOptions::SignReturnAddressScopeKind::None) {
+ Fn->addFnAttr("sign-return-address-key",
+ BPI.SignKey == LangOptions::SignReturnAddressKeyKind::AKey
+ ? "a_key"
+ : "b_key");
+ }
+
+ Fn->addFnAttr("branch-target-enforcement",
+ BPI.BranchTargetEnforcement ? "true" : "false");
+ Fn->addFnAttr("branch-protection-pauth-lr",
+ BPI.BranchProtectionPAuthLR ? "true" : "false");
+ Fn->addFnAttr("guarded-control-stack",
+ BPI.GuardedControlStack ? "true" : "false");
+ }
+
+ bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF,
+ llvm::Type *Ty) const override {
+ if (CGF.getTarget().hasFeature("ls64")) {
+ auto *ST = dyn_cast<llvm::StructType>(Ty);
+ if (ST && ST->getNumElements() == 1) {
+ auto *AT = dyn_cast<llvm::ArrayType>(ST->getElementType(0));
+ if (AT && AT->getNumElements() == 8 &&
+ AT->getElementType()->isIntegerTy(64))
+ return true;
+ }
+ }
+ return TargetCodeGenInfo::isScalarizableAsmOperand(CGF, Ty);
+ }
+};
+
+class WindowsAArch64TargetCodeGenInfo : public AArch64TargetCodeGenInfo {
+public:
+ WindowsAArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIKind K)
+ : AArch64TargetCodeGenInfo(CGT, K) {}
+
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const override;
+
+ void getDependentLibraryOption(llvm::StringRef Lib,
+ llvm::SmallString<24> &Opt) const override {
+ Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
+ }
+
+ void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value,
+ llvm::SmallString<32> &Opt) const override {
+ Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
+ }
+};
+
+void WindowsAArch64TargetCodeGenInfo::setTargetAttributes(
+ const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
+ AArch64TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
+ if (GV->isDeclaration())
+ return;
+ addStackProbeTargetAttributes(D, GV, CGM);
+}
+}
+
+ABIArgInfo AArch64ABIInfo::coerceIllegalVector(QualType Ty) const {
+ assert(Ty->isVectorType() && "expected vector type!");
+
+ const auto *VT = Ty->castAs<VectorType>();
+ if (VT->getVectorKind() == VectorKind::SveFixedLengthPredicate) {
+ assert(VT->getElementType()->isBuiltinType() && "expected builtin type!");
+ assert(VT->getElementType()->castAs<BuiltinType>()->getKind() ==
+ BuiltinType::UChar &&
+ "unexpected builtin type for SVE predicate!");
+ return ABIArgInfo::getDirect(llvm::ScalableVectorType::get(
+ llvm::Type::getInt1Ty(getVMContext()), 16));
+ }
+
+ if (VT->getVectorKind() == VectorKind::SveFixedLengthData) {
+ assert(VT->getElementType()->isBuiltinType() && "expected builtin type!");
+
+ const auto *BT = VT->getElementType()->castAs<BuiltinType>();
+ llvm::ScalableVectorType *ResType = nullptr;
+ switch (BT->getKind()) {
+ default:
+ llvm_unreachable("unexpected builtin type for SVE vector!");
+ case BuiltinType::SChar:
+ case BuiltinType::UChar:
+ ResType = llvm::ScalableVectorType::get(
+ llvm::Type::getInt8Ty(getVMContext()), 16);
+ break;
+ case BuiltinType::Short:
+ case BuiltinType::UShort:
+ ResType = llvm::ScalableVectorType::get(
+ llvm::Type::getInt16Ty(getVMContext()), 8);
+ break;
+ case BuiltinType::Int:
+ case BuiltinType::UInt:
+ ResType = llvm::ScalableVectorType::get(
+ llvm::Type::getInt32Ty(getVMContext()), 4);
+ break;
+ case BuiltinType::Long:
+ case BuiltinType::ULong:
+ ResType = llvm::ScalableVectorType::get(
+ llvm::Type::getInt64Ty(getVMContext()), 2);
+ break;
+ case BuiltinType::Half:
+ ResType = llvm::ScalableVectorType::get(
+ llvm::Type::getHalfTy(getVMContext()), 8);
+ break;
+ case BuiltinType::Float:
+ ResType = llvm::ScalableVectorType::get(
+ llvm::Type::getFloatTy(getVMContext()), 4);
+ break;
+ case BuiltinType::Double:
+ ResType = llvm::ScalableVectorType::get(
+ llvm::Type::getDoubleTy(getVMContext()), 2);
+ break;
+ case BuiltinType::BFloat16:
+ ResType = llvm::ScalableVectorType::get(
+ llvm::Type::getBFloatTy(getVMContext()), 8);
+ break;
+ }
+ return ABIArgInfo::getDirect(ResType);
+ }
+
+ uint64_t Size = getContext().getTypeSize(Ty);
+ // Android promotes <2 x i8> to i16, not i32
+ if ((isAndroid() || isOHOSFamily()) && (Size <= 16)) {
+ llvm::Type *ResType = llvm::Type::getInt16Ty(getVMContext());
+ return ABIArgInfo::getDirect(ResType);
+ }
+ if (Size <= 32) {
+ llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
+ return ABIArgInfo::getDirect(ResType);
+ }
+ if (Size == 64) {
+ auto *ResType =
+ llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
+ return ABIArgInfo::getDirect(ResType);
+ }
+ if (Size == 128) {
+ auto *ResType =
+ llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
+ return ABIArgInfo::getDirect(ResType);
+ }
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
+}
+
+ABIArgInfo
+AArch64ABIInfo::classifyArgumentType(QualType Ty, bool IsVariadic,
+ unsigned CallingConvention) const {
+ Ty = useFirstFieldIfTransparentUnion(Ty);
+
+ // Handle illegal vector types here.
+ if (isIllegalVectorType(Ty))
+ return coerceIllegalVector(Ty);
+
+ if (!isAggregateTypeForABI(Ty)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ if (const auto *EIT = Ty->getAs<BitIntType>())
+ if (EIT->getNumBits() > 128)
+ return getNaturalAlignIndirect(Ty);
+
+ return (isPromotableIntegerTypeForABI(Ty) && isDarwinPCS()
+ ? ABIArgInfo::getExtend(Ty)
+ : ABIArgInfo::getDirect());
+ }
+
+ // Structures with either a non-trivial destructor or a non-trivial
+ // copy constructor are always indirect.
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
+ CGCXXABI::RAA_DirectInMemory);
+ }
+
+ // Empty records are always ignored on Darwin, but actually passed in C++ mode
+ // elsewhere for GNU compatibility.
+ uint64_t Size = getContext().getTypeSize(Ty);
+ bool IsEmpty = isEmptyRecord(getContext(), Ty, true);
+ if (IsEmpty || Size == 0) {
+ if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS())
+ return ABIArgInfo::getIgnore();
+
+ // GNU C mode. The only argument that gets ignored is an empty one with size
+ // 0.
+ if (IsEmpty && Size == 0)
+ return ABIArgInfo::getIgnore();
+ return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
+ }
+
+ // Homogeneous Floating-point Aggregates (HFAs) need to be expanded.
+ const Type *Base = nullptr;
+ uint64_t Members = 0;
+ bool IsWin64 = Kind == AArch64ABIKind::Win64 ||
+ CallingConvention == llvm::CallingConv::Win64;
+ bool IsWinVariadic = IsWin64 && IsVariadic;
+ // In variadic functions on Windows, all composite types are treated alike,
+ // no special handling of HFAs/HVAs.
+ if (!IsWinVariadic && isHomogeneousAggregate(Ty, Base, Members)) {
+ if (Kind != AArch64ABIKind::AAPCS)
+ return ABIArgInfo::getDirect(
+ llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members));
+
+ // For HFAs/HVAs, cap the argument alignment to 16, otherwise
+ // set it to 8 according to the AAPCS64 document.
+ unsigned Align =
+ getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity();
+ Align = (Align >= 16) ? 16 : 8;
+ return ABIArgInfo::getDirect(
+ llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members), 0,
+ nullptr, true, Align);
+ }
+
+ // Aggregates <= 16 bytes are passed directly in registers or on the stack.
+ if (Size <= 128) {
+ // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
+ // same size and alignment.
+ if (getTarget().isRenderScriptTarget()) {
+ return coerceToIntArray(Ty, getContext(), getVMContext());
+ }
+ unsigned Alignment;
+ if (Kind == AArch64ABIKind::AAPCS) {
+ Alignment = getContext().getTypeUnadjustedAlign(Ty);
+ Alignment = Alignment < 128 ? 64 : 128;
+ } else {
+ Alignment =
+ std::max(getContext().getTypeAlign(Ty),
+ (unsigned)getTarget().getPointerWidth(LangAS::Default));
+ }
+ Size = llvm::alignTo(Size, Alignment);
+
+ // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
+ // For aggregates with 16-byte alignment, we use i128.
+ llvm::Type *BaseTy = llvm::Type::getIntNTy(getVMContext(), Alignment);
+ return ABIArgInfo::getDirect(
+ Size == Alignment ? BaseTy
+ : llvm::ArrayType::get(BaseTy, Size / Alignment));
+ }
+
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
+}
+
+ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy,
+ bool IsVariadic) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ if (const auto *VT = RetTy->getAs<VectorType>()) {
+ if (VT->getVectorKind() == VectorKind::SveFixedLengthData ||
+ VT->getVectorKind() == VectorKind::SveFixedLengthPredicate)
+ return coerceIllegalVector(RetTy);
+ }
+
+ // Large vector types should be returned via memory.
+ if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128)
+ return getNaturalAlignIndirect(RetTy);
+
+ if (!isAggregateTypeForABI(RetTy)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ if (const auto *EIT = RetTy->getAs<BitIntType>())
+ if (EIT->getNumBits() > 128)
+ return getNaturalAlignIndirect(RetTy);
+
+ return (isPromotableIntegerTypeForABI(RetTy) && isDarwinPCS()
+ ? ABIArgInfo::getExtend(RetTy)
+ : ABIArgInfo::getDirect());
+ }
+
+ uint64_t Size = getContext().getTypeSize(RetTy);
+ if (isEmptyRecord(getContext(), RetTy, true) || Size == 0)
+ return ABIArgInfo::getIgnore();
+
+ const Type *Base = nullptr;
+ uint64_t Members = 0;
+ if (isHomogeneousAggregate(RetTy, Base, Members) &&
+ !(getTarget().getTriple().getArch() == llvm::Triple::aarch64_32 &&
+ IsVariadic))
+ // Homogeneous Floating-point Aggregates (HFAs) are returned directly.
+ return ABIArgInfo::getDirect();
+
+ // Aggregates <= 16 bytes are returned directly in registers or on the stack.
+ if (Size <= 128) {
+ // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
+ // same size and alignment.
+ if (getTarget().isRenderScriptTarget()) {
+ return coerceToIntArray(RetTy, getContext(), getVMContext());
+ }
+
+ if (Size <= 64 && getDataLayout().isLittleEndian()) {
+ // Composite types are returned in lower bits of a 64-bit register for LE,
+ // and in higher bits for BE. However, integer types are always returned
+ // in lower bits for both LE and BE, and they are not rounded up to
+ // 64-bits. We can skip rounding up of composite types for LE, but not for
+ // BE, otherwise composite types will be indistinguishable from integer
+ // types.
+ return ABIArgInfo::getDirect(
+ llvm::IntegerType::get(getVMContext(), Size));
+ }
+
+ unsigned Alignment = getContext().getTypeAlign(RetTy);
+ Size = llvm::alignTo(Size, 64); // round up to multiple of 8 bytes
+
+ // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
+ // For aggregates with 16-byte alignment, we use i128.
+ if (Alignment < 128 && Size == 128) {
+ llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
+ return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
+ }
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
+ }
+
+ return getNaturalAlignIndirect(RetTy);
+}
+
+/// isIllegalVectorType - check whether the vector type is legal for AArch64.
+bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const {
+ if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ // Check whether VT is a fixed-length SVE vector. These types are
+ // represented as scalable vectors in function args/return and must be
+ // coerced from fixed vectors.
+ if (VT->getVectorKind() == VectorKind::SveFixedLengthData ||
+ VT->getVectorKind() == VectorKind::SveFixedLengthPredicate)
+ return true;
+
+ // Check whether VT is legal.
+ unsigned NumElements = VT->getNumElements();
+ uint64_t Size = getContext().getTypeSize(VT);
+ // NumElements should be power of 2.
+ if (!llvm::isPowerOf2_32(NumElements))
+ return true;
+
+ // arm64_32 has to be compatible with the ARM logic here, which allows huge
+ // vectors for some reason.
+ llvm::Triple Triple = getTarget().getTriple();
+ if (Triple.getArch() == llvm::Triple::aarch64_32 &&
+ Triple.isOSBinFormatMachO())
+ return Size <= 32;
+
+ return Size != 64 && (Size != 128 || NumElements == 1);
+ }
+ return false;
+}
+
+bool AArch64SwiftABIInfo::isLegalVectorType(CharUnits VectorSize,
+ llvm::Type *EltTy,
+ unsigned NumElts) const {
+ if (!llvm::isPowerOf2_32(NumElts))
+ return false;
+ if (VectorSize.getQuantity() != 8 &&
+ (VectorSize.getQuantity() != 16 || NumElts == 1))
+ return false;
+ return true;
+}
+
+bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
+ // Homogeneous aggregates for AAPCS64 must have base types of a floating
+ // point type or a short-vector type. This is the same as the 32-bit ABI,
+ // but with the difference that any floating-point type is allowed,
+ // including __fp16.
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
+ if (BT->isFloatingPoint())
+ return true;
+ } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ unsigned VecSize = getContext().getTypeSize(VT);
+ if (VecSize == 64 || VecSize == 128)
+ return true;
+ }
+ return false;
+}
+
+bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
+ uint64_t Members) const {
+ return Members <= 4;
+}
+
+bool AArch64ABIInfo::isZeroLengthBitfieldPermittedInHomogeneousAggregate()
+ const {
+ // AAPCS64 says that the rule for whether something is a homogeneous
+ // aggregate is applied to the output of the data layout decision. So
+ // anything that doesn't affect the data layout also does not affect
+ // homogeneity. In particular, zero-length bitfields don't stop a struct
+ // being homogeneous.
+ return true;
+}
+
+Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ ABIArgInfo AI = classifyArgumentType(Ty, /*IsVariadic=*/true,
+ CGF.CurFnInfo->getCallingConvention());
+ // Empty records are ignored for parameter passing purposes.
+ if (AI.isIgnore()) {
+ uint64_t PointerSize = getTarget().getPointerWidth(LangAS::Default) / 8;
+ CharUnits SlotSize = CharUnits::fromQuantity(PointerSize);
+ VAListAddr = VAListAddr.withElementType(CGF.Int8PtrTy);
+ auto *Load = CGF.Builder.CreateLoad(VAListAddr);
+ return Address(Load, CGF.ConvertTypeForMem(Ty), SlotSize);
+ }
+
+ bool IsIndirect = AI.isIndirect();
+
+ llvm::Type *BaseTy = CGF.ConvertType(Ty);
+ if (IsIndirect)
+ BaseTy = llvm::PointerType::getUnqual(BaseTy);
+ else if (AI.getCoerceToType())
+ BaseTy = AI.getCoerceToType();
+
+ unsigned NumRegs = 1;
+ if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {
+ BaseTy = ArrTy->getElementType();
+ NumRegs = ArrTy->getNumElements();
+ }
+ bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy();
+
+ // The AArch64 va_list type and handling is specified in the Procedure Call
+ // Standard, section B.4:
+ //
+ // struct {
+ // void *__stack;
+ // void *__gr_top;
+ // void *__vr_top;
+ // int __gr_offs;
+ // int __vr_offs;
+ // };
+
+ llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
+ llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
+ llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
+
+ CharUnits TySize = getContext().getTypeSizeInChars(Ty);
+ CharUnits TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty);
+
+ Address reg_offs_p = Address::invalid();
+ llvm::Value *reg_offs = nullptr;
+ int reg_top_index;
+ int RegSize = IsIndirect ? 8 : TySize.getQuantity();
+ if (!IsFPR) {
+ // 3 is the field number of __gr_offs
+ reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 3, "gr_offs_p");
+ reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs");
+ reg_top_index = 1; // field number for __gr_top
+ RegSize = llvm::alignTo(RegSize, 8);
+ } else {
+ // 4 is the field number of __vr_offs.
+ reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 4, "vr_offs_p");
+ reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs");
+ reg_top_index = 2; // field number for __vr_top
+ RegSize = 16 * NumRegs;
+ }
+
+ //=======================================
+ // Find out where argument was passed
+ //=======================================
+
+ // If reg_offs >= 0 we're already using the stack for this type of
+ // argument. We don't want to keep updating reg_offs (in case it overflows,
+ // though anyone passing 2GB of arguments, each at most 16 bytes, deserves
+ // whatever they get).
+ llvm::Value *UsingStack = nullptr;
+ UsingStack = CGF.Builder.CreateICmpSGE(
+ reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0));
+
+ CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
+
+ // Otherwise, at least some kind of argument could go in these registers, the
+ // question is whether this particular type is too big.
+ CGF.EmitBlock(MaybeRegBlock);
+
+ // Integer arguments may need to correct register alignment (for example a
+ // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we
+ // align __gr_offs to calculate the potential address.
+ if (!IsFPR && !IsIndirect && TyAlign.getQuantity() > 8) {
+ int Align = TyAlign.getQuantity();
+
+ reg_offs = CGF.Builder.CreateAdd(
+ reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1),
+ "align_regoffs");
+ reg_offs = CGF.Builder.CreateAnd(
+ reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align),
+ "aligned_regoffs");
+ }
+
+ // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list.
+ // The fact that this is done unconditionally reflects the fact that
+ // allocating an argument to the stack also uses up all the remaining
+ // registers of the appropriate kind.
+ llvm::Value *NewOffset = nullptr;
+ NewOffset = CGF.Builder.CreateAdd(
+ reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs");
+ CGF.Builder.CreateStore(NewOffset, reg_offs_p);
+
+ // Now we're in a position to decide whether this argument really was in
+ // registers or not.
+ llvm::Value *InRegs = nullptr;
+ InRegs = CGF.Builder.CreateICmpSLE(
+ NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg");
+
+ CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
+
+ //=======================================
+ // Argument was in registers
+ //=======================================
+
+ // Now we emit the code for if the argument was originally passed in
+ // registers. First start the appropriate block:
+ CGF.EmitBlock(InRegBlock);
+
+ llvm::Value *reg_top = nullptr;
+ Address reg_top_p =
+ CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, "reg_top_p");
+ reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top");
+ Address BaseAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, reg_top, reg_offs),
+ CGF.Int8Ty, CharUnits::fromQuantity(IsFPR ? 16 : 8));
+ Address RegAddr = Address::invalid();
+ llvm::Type *MemTy = CGF.ConvertTypeForMem(Ty), *ElementTy = MemTy;
+
+ if (IsIndirect) {
+ // If it's been passed indirectly (actually a struct), whatever we find from
+ // stored registers or on the stack will actually be a struct **.
+ MemTy = llvm::PointerType::getUnqual(MemTy);
+ }
+
+ const Type *Base = nullptr;
+ uint64_t NumMembers = 0;
+ bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers);
+ if (IsHFA && NumMembers > 1) {
+ // Homogeneous aggregates passed in registers will have their elements split
+ // and stored 16-bytes apart regardless of size (they're notionally in qN,
+ // qN+1, ...). We reload and store into a temporary local variable
+ // contiguously.
+ assert(!IsIndirect && "Homogeneous aggregates should be passed directly");
+ auto BaseTyInfo = getContext().getTypeInfoInChars(QualType(Base, 0));
+ llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0));
+ llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
+ Address Tmp = CGF.CreateTempAlloca(HFATy,
+ std::max(TyAlign, BaseTyInfo.Align));
+
+ // On big-endian platforms, the value will be right-aligned in its slot.
+ int Offset = 0;
+ if (CGF.CGM.getDataLayout().isBigEndian() &&
+ BaseTyInfo.Width.getQuantity() < 16)
+ Offset = 16 - BaseTyInfo.Width.getQuantity();
+
+ for (unsigned i = 0; i < NumMembers; ++i) {
+ CharUnits BaseOffset = CharUnits::fromQuantity(16 * i + Offset);
+ Address LoadAddr =
+ CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, BaseOffset);
+ LoadAddr = LoadAddr.withElementType(BaseTy);
+
+ Address StoreAddr = CGF.Builder.CreateConstArrayGEP(Tmp, i);
+
+ llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr);
+ CGF.Builder.CreateStore(Elem, StoreAddr);
+ }
+
+ RegAddr = Tmp.withElementType(MemTy);
+ } else {
+ // Otherwise the object is contiguous in memory.
+
+ // It might be right-aligned in its slot.
+ CharUnits SlotSize = BaseAddr.getAlignment();
+ if (CGF.CGM.getDataLayout().isBigEndian() && !IsIndirect &&
+ (IsHFA || !isAggregateTypeForABI(Ty)) &&
+ TySize < SlotSize) {
+ CharUnits Offset = SlotSize - TySize;
+ BaseAddr = CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, Offset);
+ }
+
+ RegAddr = BaseAddr.withElementType(MemTy);
+ }
+
+ CGF.EmitBranch(ContBlock);
+
+ //=======================================
+ // Argument was on the stack
+ //=======================================
+ CGF.EmitBlock(OnStackBlock);
+
+ Address stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "stack_p");
+ llvm::Value *OnStackPtr = CGF.Builder.CreateLoad(stack_p, "stack");
+
+ // Again, stack arguments may need realignment. In this case both integer and
+ // floating-point ones might be affected.
+ if (!IsIndirect && TyAlign.getQuantity() > 8) {
+ int Align = TyAlign.getQuantity();
+
+ OnStackPtr = CGF.Builder.CreatePtrToInt(OnStackPtr, CGF.Int64Ty);
+
+ OnStackPtr = CGF.Builder.CreateAdd(
+ OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1),
+ "align_stack");
+ OnStackPtr = CGF.Builder.CreateAnd(
+ OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, -Align),
+ "align_stack");
+
+ OnStackPtr = CGF.Builder.CreateIntToPtr(OnStackPtr, CGF.Int8PtrTy);
+ }
+ Address OnStackAddr = Address(OnStackPtr, CGF.Int8Ty,
+ std::max(CharUnits::fromQuantity(8), TyAlign));
+
+ // All stack slots are multiples of 8 bytes.
+ CharUnits StackSlotSize = CharUnits::fromQuantity(8);
+ CharUnits StackSize;
+ if (IsIndirect)
+ StackSize = StackSlotSize;
+ else
+ StackSize = TySize.alignTo(StackSlotSize);
+
+ llvm::Value *StackSizeC = CGF.Builder.getSize(StackSize);
+ llvm::Value *NewStack = CGF.Builder.CreateInBoundsGEP(
+ CGF.Int8Ty, OnStackPtr, StackSizeC, "new_stack");
+
+ // Write the new value of __stack for the next call to va_arg
+ CGF.Builder.CreateStore(NewStack, stack_p);
+
+ if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) &&
+ TySize < StackSlotSize) {
+ CharUnits Offset = StackSlotSize - TySize;
+ OnStackAddr = CGF.Builder.CreateConstInBoundsByteGEP(OnStackAddr, Offset);
+ }
+
+ OnStackAddr = OnStackAddr.withElementType(MemTy);
+
+ CGF.EmitBranch(ContBlock);
+
+ //=======================================
+ // Tidy up
+ //=======================================
+ CGF.EmitBlock(ContBlock);
+
+ Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, OnStackAddr,
+ OnStackBlock, "vaargs.addr");
+
+ if (IsIndirect)
+ return Address(CGF.Builder.CreateLoad(ResAddr, "vaarg.addr"), ElementTy,
+ TyAlign);
+
+ return ResAddr;
+}
+
+Address AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ // The backend's lowering doesn't support va_arg for aggregates or
+ // illegal vector types. Lower VAArg here for these cases and use
+ // the LLVM va_arg instruction for everything else.
+ if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty))
+ return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect());
+
+ uint64_t PointerSize = getTarget().getPointerWidth(LangAS::Default) / 8;
+ CharUnits SlotSize = CharUnits::fromQuantity(PointerSize);
+
+ // Empty records are ignored for parameter passing purposes.
+ if (isEmptyRecord(getContext(), Ty, true))
+ return Address(CGF.Builder.CreateLoad(VAListAddr, "ap.cur"),
+ CGF.ConvertTypeForMem(Ty), SlotSize);
+
+ // The size of the actual thing passed, which might end up just
+ // being a pointer for indirect types.
+ auto TyInfo = getContext().getTypeInfoInChars(Ty);
+
+ // Arguments bigger than 16 bytes which aren't homogeneous
+ // aggregates should be passed indirectly.
+ bool IsIndirect = false;
+ if (TyInfo.Width.getQuantity() > 16) {
+ const Type *Base = nullptr;
+ uint64_t Members = 0;
+ IsIndirect = !isHomogeneousAggregate(Ty, Base, Members);
+ }
+
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
+ TyInfo, SlotSize, /*AllowHigherAlign*/ true);
+}
+
+Address AArch64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ bool IsIndirect = false;
+
+ // Composites larger than 16 bytes are passed by reference.
+ if (isAggregateTypeForABI(Ty) && getContext().getTypeSize(Ty) > 128)
+ IsIndirect = true;
+
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
+ CGF.getContext().getTypeInfoInChars(Ty),
+ CharUnits::fromQuantity(8),
+ /*allowHigherAlign*/ false);
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createAArch64TargetCodeGenInfo(CodeGenModule &CGM,
+ AArch64ABIKind Kind) {
+ return std::make_unique<AArch64TargetCodeGenInfo>(CGM.getTypes(), Kind);
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createWindowsAArch64TargetCodeGenInfo(CodeGenModule &CGM,
+ AArch64ABIKind K) {
+ return std::make_unique<WindowsAArch64TargetCodeGenInfo>(CGM.getTypes(), K);
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/AMDGPU.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/AMDGPU.cpp
new file mode 100644
index 000000000000..03ac6b78598f
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/AMDGPU.cpp
@@ -0,0 +1,654 @@
+//===- AMDGPU.cpp ---------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfoImpl.h"
+#include "TargetInfo.h"
+#include "clang/Basic/TargetOptions.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+//===----------------------------------------------------------------------===//
+// AMDGPU ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class AMDGPUABIInfo final : public DefaultABIInfo {
+private:
+ static const unsigned MaxNumRegsForArgsRet = 16;
+
+ unsigned numRegsForType(QualType Ty) const;
+
+ bool isHomogeneousAggregateBaseType(QualType Ty) const override;
+ bool isHomogeneousAggregateSmallEnough(const Type *Base,
+ uint64_t Members) const override;
+
+ // Coerce HIP scalar pointer arguments from generic pointers to global ones.
+ llvm::Type *coerceKernelArgumentType(llvm::Type *Ty, unsigned FromAS,
+ unsigned ToAS) const {
+ // Single value types.
+ auto *PtrTy = llvm::dyn_cast<llvm::PointerType>(Ty);
+ if (PtrTy && PtrTy->getAddressSpace() == FromAS)
+ return llvm::PointerType::get(Ty->getContext(), ToAS);
+ return Ty;
+ }
+
+public:
+ explicit AMDGPUABIInfo(CodeGen::CodeGenTypes &CGT) :
+ DefaultABIInfo(CGT) {}
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyKernelArgumentType(QualType Ty) const;
+ ABIArgInfo classifyArgumentType(QualType Ty, unsigned &NumRegsLeft) const;
+
+ void computeInfo(CGFunctionInfo &FI) const override;
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+};
+
+bool AMDGPUABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
+ return true;
+}
+
+bool AMDGPUABIInfo::isHomogeneousAggregateSmallEnough(
+ const Type *Base, uint64_t Members) const {
+ uint32_t NumRegs = (getContext().getTypeSize(Base) + 31) / 32;
+
+ // Homogeneous Aggregates may occupy at most 16 registers.
+ return Members * NumRegs <= MaxNumRegsForArgsRet;
+}
+
+/// Estimate number of registers the type will use when passed in registers.
+unsigned AMDGPUABIInfo::numRegsForType(QualType Ty) const {
+ unsigned NumRegs = 0;
+
+ if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ // Compute from the number of elements. The reported size is based on the
+ // in-memory size, which includes the padding 4th element for 3-vectors.
+ QualType EltTy = VT->getElementType();
+ unsigned EltSize = getContext().getTypeSize(EltTy);
+
+ // 16-bit element vectors should be passed as packed.
+ if (EltSize == 16)
+ return (VT->getNumElements() + 1) / 2;
+
+ unsigned EltNumRegs = (EltSize + 31) / 32;
+ return EltNumRegs * VT->getNumElements();
+ }
+
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ const RecordDecl *RD = RT->getDecl();
+ assert(!RD->hasFlexibleArrayMember());
+
+ for (const FieldDecl *Field : RD->fields()) {
+ QualType FieldTy = Field->getType();
+ NumRegs += numRegsForType(FieldTy);
+ }
+
+ return NumRegs;
+ }
+
+ return (getContext().getTypeSize(Ty) + 31) / 32;
+}
+
+void AMDGPUABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ llvm::CallingConv::ID CC = FI.getCallingConvention();
+
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+
+ unsigned NumRegsLeft = MaxNumRegsForArgsRet;
+ for (auto &Arg : FI.arguments()) {
+ if (CC == llvm::CallingConv::AMDGPU_KERNEL) {
+ Arg.info = classifyKernelArgumentType(Arg.type);
+ } else {
+ Arg.info = classifyArgumentType(Arg.type, NumRegsLeft);
+ }
+ }
+}
+
+Address AMDGPUABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ llvm_unreachable("AMDGPU does not support varargs");
+}
+
+ABIArgInfo AMDGPUABIInfo::classifyReturnType(QualType RetTy) const {
+ if (isAggregateTypeForABI(RetTy)) {
+ // Records with non-trivial destructors/copy-constructors should not be
+ // returned by value.
+ if (!getRecordArgABI(RetTy, getCXXABI())) {
+ // Ignore empty structs/unions.
+ if (isEmptyRecord(getContext(), RetTy, true))
+ return ABIArgInfo::getIgnore();
+
+ // Lower single-element structs to just return a regular value.
+ if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
+ return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
+
+ if (const RecordType *RT = RetTy->getAs<RecordType>()) {
+ const RecordDecl *RD = RT->getDecl();
+ if (RD->hasFlexibleArrayMember())
+ return DefaultABIInfo::classifyReturnType(RetTy);
+ }
+
+ // Pack aggregates <= 4 bytes into single VGPR or pair.
+ uint64_t Size = getContext().getTypeSize(RetTy);
+ if (Size <= 16)
+ return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
+
+ if (Size <= 32)
+ return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
+
+ if (Size <= 64) {
+ llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext());
+ return ABIArgInfo::getDirect(llvm::ArrayType::get(I32Ty, 2));
+ }
+
+ if (numRegsForType(RetTy) <= MaxNumRegsForArgsRet)
+ return ABIArgInfo::getDirect();
+ }
+ }
+
+ // Otherwise just do the default thing.
+ return DefaultABIInfo::classifyReturnType(RetTy);
+}
+
+/// For kernels all parameters are really passed in a special buffer. It doesn't
+/// make sense to pass anything byval, so everything must be direct.
+ABIArgInfo AMDGPUABIInfo::classifyKernelArgumentType(QualType Ty) const {
+ Ty = useFirstFieldIfTransparentUnion(Ty);
+
+ // TODO: Can we omit empty structs?
+
+ if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
+ Ty = QualType(SeltTy, 0);
+
+ llvm::Type *OrigLTy = CGT.ConvertType(Ty);
+ llvm::Type *LTy = OrigLTy;
+ if (getContext().getLangOpts().HIP) {
+ LTy = coerceKernelArgumentType(
+ OrigLTy, /*FromAS=*/getContext().getTargetAddressSpace(LangAS::Default),
+ /*ToAS=*/getContext().getTargetAddressSpace(LangAS::cuda_device));
+ }
+
+ // FIXME: Should also use this for OpenCL, but it requires addressing the
+ // problem of kernels being called.
+ //
+ // FIXME: This doesn't apply the optimization of coercing pointers in structs
+ // to global address space when using byref. This would require implementing a
+ // new kind of coercion of the in-memory type when for indirect arguments.
+ if (!getContext().getLangOpts().OpenCL && LTy == OrigLTy &&
+ isAggregateTypeForABI(Ty)) {
+ return ABIArgInfo::getIndirectAliased(
+ getContext().getTypeAlignInChars(Ty),
+ getContext().getTargetAddressSpace(LangAS::opencl_constant),
+ false /*Realign*/, nullptr /*Padding*/);
+ }
+
+ // If we set CanBeFlattened to true, CodeGen will expand the struct to its
+ // individual elements, which confuses the Clover OpenCL backend; therefore we
+ // have to set it to false here. Other args of getDirect() are just defaults.
+ return ABIArgInfo::getDirect(LTy, 0, nullptr, false);
+}
+
+ABIArgInfo AMDGPUABIInfo::classifyArgumentType(QualType Ty,
+ unsigned &NumRegsLeft) const {
+ assert(NumRegsLeft <= MaxNumRegsForArgsRet && "register estimate underflow");
+
+ Ty = useFirstFieldIfTransparentUnion(Ty);
+
+ if (isAggregateTypeForABI(Ty)) {
+ // Records with non-trivial destructors/copy-constructors should not be
+ // passed by value.
+ if (auto RAA = getRecordArgABI(Ty, getCXXABI()))
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
+
+ // Ignore empty structs/unions.
+ if (isEmptyRecord(getContext(), Ty, true))
+ return ABIArgInfo::getIgnore();
+
+ // Lower single-element structs to just pass a regular value. TODO: We
+ // could do reasonable-size multiple-element structs too, using getExpand(),
+ // though watch out for things like bitfields.
+ if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
+ return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
+
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ const RecordDecl *RD = RT->getDecl();
+ if (RD->hasFlexibleArrayMember())
+ return DefaultABIInfo::classifyArgumentType(Ty);
+ }
+
+ // Pack aggregates <= 8 bytes into single VGPR or pair.
+ uint64_t Size = getContext().getTypeSize(Ty);
+ if (Size <= 64) {
+ unsigned NumRegs = (Size + 31) / 32;
+ NumRegsLeft -= std::min(NumRegsLeft, NumRegs);
+
+ if (Size <= 16)
+ return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
+
+ if (Size <= 32)
+ return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
+
+ // XXX: Should this be i64 instead, and should the limit increase?
+ llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext());
+ return ABIArgInfo::getDirect(llvm::ArrayType::get(I32Ty, 2));
+ }
+
+ if (NumRegsLeft > 0) {
+ unsigned NumRegs = numRegsForType(Ty);
+ if (NumRegsLeft >= NumRegs) {
+ NumRegsLeft -= NumRegs;
+ return ABIArgInfo::getDirect();
+ }
+ }
+
+ // Use pass-by-reference in stead of pass-by-value for struct arguments in
+ // function ABI.
+ return ABIArgInfo::getIndirectAliased(
+ getContext().getTypeAlignInChars(Ty),
+ getContext().getTargetAddressSpace(LangAS::opencl_private));
+ }
+
+ // Otherwise just do the default thing.
+ ABIArgInfo ArgInfo = DefaultABIInfo::classifyArgumentType(Ty);
+ if (!ArgInfo.isIndirect()) {
+ unsigned NumRegs = numRegsForType(Ty);
+ NumRegsLeft -= std::min(NumRegs, NumRegsLeft);
+ }
+
+ return ArgInfo;
+}
+
+class AMDGPUTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ AMDGPUTargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(std::make_unique<AMDGPUABIInfo>(CGT)) {}
+
+ void setFunctionDeclAttributes(const FunctionDecl *FD, llvm::Function *F,
+ CodeGenModule &CGM) const;
+
+ void emitTargetGlobals(CodeGen::CodeGenModule &CGM) const override;
+
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const override;
+ unsigned getOpenCLKernelCallingConv() const override;
+
+ llvm::Constant *getNullPointer(const CodeGen::CodeGenModule &CGM,
+ llvm::PointerType *T, QualType QT) const override;
+
+ LangAS getASTAllocaAddressSpace() const override {
+ return getLangASFromTargetAS(
+ getABIInfo().getDataLayout().getAllocaAddrSpace());
+ }
+ LangAS getGlobalVarAddressSpace(CodeGenModule &CGM,
+ const VarDecl *D) const override;
+ llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts,
+ SyncScope Scope,
+ llvm::AtomicOrdering Ordering,
+ llvm::LLVMContext &Ctx) const override;
+ llvm::Value *createEnqueuedBlockKernel(CodeGenFunction &CGF,
+ llvm::Function *BlockInvokeFunc,
+ llvm::Type *BlockTy) const override;
+ bool shouldEmitStaticExternCAliases() const override;
+ bool shouldEmitDWARFBitFieldSeparators() const override;
+ void setCUDAKernelCallingConvention(const FunctionType *&FT) const override;
+};
+}
+
+static bool requiresAMDGPUProtectedVisibility(const Decl *D,
+ llvm::GlobalValue *GV) {
+ if (GV->getVisibility() != llvm::GlobalValue::HiddenVisibility)
+ return false;
+
+ return !D->hasAttr<OMPDeclareTargetDeclAttr>() &&
+ (D->hasAttr<OpenCLKernelAttr>() ||
+ (isa<FunctionDecl>(D) && D->hasAttr<CUDAGlobalAttr>()) ||
+ (isa<VarDecl>(D) &&
+ (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() ||
+ cast<VarDecl>(D)->getType()->isCUDADeviceBuiltinSurfaceType() ||
+ cast<VarDecl>(D)->getType()->isCUDADeviceBuiltinTextureType())));
+}
+
+void AMDGPUTargetCodeGenInfo::setFunctionDeclAttributes(
+ const FunctionDecl *FD, llvm::Function *F, CodeGenModule &M) const {
+ const auto *ReqdWGS =
+ M.getLangOpts().OpenCL ? FD->getAttr<ReqdWorkGroupSizeAttr>() : nullptr;
+ const bool IsOpenCLKernel =
+ M.getLangOpts().OpenCL && FD->hasAttr<OpenCLKernelAttr>();
+ const bool IsHIPKernel = M.getLangOpts().HIP && FD->hasAttr<CUDAGlobalAttr>();
+
+ const auto *FlatWGS = FD->getAttr<AMDGPUFlatWorkGroupSizeAttr>();
+ if (ReqdWGS || FlatWGS) {
+ M.handleAMDGPUFlatWorkGroupSizeAttr(F, FlatWGS, ReqdWGS);
+ } else if (IsOpenCLKernel || IsHIPKernel) {
+ // By default, restrict the maximum size to a value specified by
+ // --gpu-max-threads-per-block=n or its default value for HIP.
+ const unsigned OpenCLDefaultMaxWorkGroupSize = 256;
+ const unsigned DefaultMaxWorkGroupSize =
+ IsOpenCLKernel ? OpenCLDefaultMaxWorkGroupSize
+ : M.getLangOpts().GPUMaxThreadsPerBlock;
+ std::string AttrVal =
+ std::string("1,") + llvm::utostr(DefaultMaxWorkGroupSize);
+ F->addFnAttr("amdgpu-flat-work-group-size", AttrVal);
+ }
+
+ if (const auto *Attr = FD->getAttr<AMDGPUWavesPerEUAttr>())
+ M.handleAMDGPUWavesPerEUAttr(F, Attr);
+
+ if (const auto *Attr = FD->getAttr<AMDGPUNumSGPRAttr>()) {
+ unsigned NumSGPR = Attr->getNumSGPR();
+
+ if (NumSGPR != 0)
+ F->addFnAttr("amdgpu-num-sgpr", llvm::utostr(NumSGPR));
+ }
+
+ if (const auto *Attr = FD->getAttr<AMDGPUNumVGPRAttr>()) {
+ uint32_t NumVGPR = Attr->getNumVGPR();
+
+ if (NumVGPR != 0)
+ F->addFnAttr("amdgpu-num-vgpr", llvm::utostr(NumVGPR));
+ }
+}
+
+/// Emits control constants used to change per-architecture behaviour in the
+/// AMDGPU ROCm device libraries.
+void AMDGPUTargetCodeGenInfo::emitTargetGlobals(
+ CodeGen::CodeGenModule &CGM) const {
+ StringRef Name = "__oclc_ABI_version";
+ llvm::GlobalVariable *OriginalGV = CGM.getModule().getNamedGlobal(Name);
+ if (OriginalGV && !llvm::GlobalVariable::isExternalLinkage(OriginalGV->getLinkage()))
+ return;
+
+ if (CGM.getTarget().getTargetOpts().CodeObjectVersion ==
+ llvm::CodeObjectVersionKind::COV_None)
+ return;
+
+ auto *Type = llvm::IntegerType::getIntNTy(CGM.getModule().getContext(), 32);
+ llvm::Constant *COV = llvm::ConstantInt::get(
+ Type, CGM.getTarget().getTargetOpts().CodeObjectVersion);
+
+ // It needs to be constant weak_odr without externally_initialized so that
+ // the load instuction can be eliminated by the IPSCCP.
+ auto *GV = new llvm::GlobalVariable(
+ CGM.getModule(), Type, true, llvm::GlobalValue::WeakODRLinkage, COV, Name,
+ nullptr, llvm::GlobalValue::ThreadLocalMode::NotThreadLocal,
+ CGM.getContext().getTargetAddressSpace(LangAS::opencl_constant));
+ GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Local);
+ GV->setVisibility(llvm::GlobalValue::VisibilityTypes::HiddenVisibility);
+
+ // Replace any external references to this variable with the new global.
+ if (OriginalGV) {
+ OriginalGV->replaceAllUsesWith(GV);
+ GV->takeName(OriginalGV);
+ OriginalGV->eraseFromParent();
+ }
+}
+
+void AMDGPUTargetCodeGenInfo::setTargetAttributes(
+ const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
+ if (requiresAMDGPUProtectedVisibility(D, GV)) {
+ GV->setVisibility(llvm::GlobalValue::ProtectedVisibility);
+ GV->setDSOLocal(true);
+ }
+
+ if (GV->isDeclaration())
+ return;
+
+ llvm::Function *F = dyn_cast<llvm::Function>(GV);
+ if (!F)
+ return;
+
+ const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
+ if (FD)
+ setFunctionDeclAttributes(FD, F, M);
+
+ if (M.getContext().getTargetInfo().allowAMDGPUUnsafeFPAtomics())
+ F->addFnAttr("amdgpu-unsafe-fp-atomics", "true");
+
+ if (!getABIInfo().getCodeGenOpts().EmitIEEENaNCompliantInsts)
+ F->addFnAttr("amdgpu-ieee", "false");
+}
+
+unsigned AMDGPUTargetCodeGenInfo::getOpenCLKernelCallingConv() const {
+ return llvm::CallingConv::AMDGPU_KERNEL;
+}
+
+// Currently LLVM assumes null pointers always have value 0,
+// which results in incorrectly transformed IR. Therefore, instead of
+// emitting null pointers in private and local address spaces, a null
+// pointer in generic address space is emitted which is casted to a
+// pointer in local or private address space.
+llvm::Constant *AMDGPUTargetCodeGenInfo::getNullPointer(
+ const CodeGen::CodeGenModule &CGM, llvm::PointerType *PT,
+ QualType QT) const {
+ if (CGM.getContext().getTargetNullPointerValue(QT) == 0)
+ return llvm::ConstantPointerNull::get(PT);
+
+ auto &Ctx = CGM.getContext();
+ auto NPT = llvm::PointerType::get(
+ PT->getContext(), Ctx.getTargetAddressSpace(LangAS::opencl_generic));
+ return llvm::ConstantExpr::getAddrSpaceCast(
+ llvm::ConstantPointerNull::get(NPT), PT);
+}
+
+LangAS
+AMDGPUTargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM,
+ const VarDecl *D) const {
+ assert(!CGM.getLangOpts().OpenCL &&
+ !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) &&
+ "Address space agnostic languages only");
+ LangAS DefaultGlobalAS = getLangASFromTargetAS(
+ CGM.getContext().getTargetAddressSpace(LangAS::opencl_global));
+ if (!D)
+ return DefaultGlobalAS;
+
+ LangAS AddrSpace = D->getType().getAddressSpace();
+ if (AddrSpace != LangAS::Default)
+ return AddrSpace;
+
+ // Only promote to address space 4 if VarDecl has constant initialization.
+ if (D->getType().isConstantStorage(CGM.getContext(), false, false) &&
+ D->hasConstantInitialization()) {
+ if (auto ConstAS = CGM.getTarget().getConstantAddressSpace())
+ return *ConstAS;
+ }
+ return DefaultGlobalAS;
+}
+
+llvm::SyncScope::ID
+AMDGPUTargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &LangOpts,
+ SyncScope Scope,
+ llvm::AtomicOrdering Ordering,
+ llvm::LLVMContext &Ctx) const {
+ std::string Name;
+ switch (Scope) {
+ case SyncScope::HIPSingleThread:
+ case SyncScope::SingleScope:
+ Name = "singlethread";
+ break;
+ case SyncScope::HIPWavefront:
+ case SyncScope::OpenCLSubGroup:
+ case SyncScope::WavefrontScope:
+ Name = "wavefront";
+ break;
+ case SyncScope::HIPWorkgroup:
+ case SyncScope::OpenCLWorkGroup:
+ case SyncScope::WorkgroupScope:
+ Name = "workgroup";
+ break;
+ case SyncScope::HIPAgent:
+ case SyncScope::OpenCLDevice:
+ case SyncScope::DeviceScope:
+ Name = "agent";
+ break;
+ case SyncScope::SystemScope:
+ case SyncScope::HIPSystem:
+ case SyncScope::OpenCLAllSVMDevices:
+ Name = "";
+ break;
+ }
+
+ if (Ordering != llvm::AtomicOrdering::SequentiallyConsistent) {
+ if (!Name.empty())
+ Name = Twine(Twine(Name) + Twine("-")).str();
+
+ Name = Twine(Twine(Name) + Twine("one-as")).str();
+ }
+
+ return Ctx.getOrInsertSyncScopeID(Name);
+}
+
+bool AMDGPUTargetCodeGenInfo::shouldEmitStaticExternCAliases() const {
+ return false;
+}
+
+bool AMDGPUTargetCodeGenInfo::shouldEmitDWARFBitFieldSeparators() const {
+ return true;
+}
+
+void AMDGPUTargetCodeGenInfo::setCUDAKernelCallingConvention(
+ const FunctionType *&FT) const {
+ FT = getABIInfo().getContext().adjustFunctionType(
+ FT, FT->getExtInfo().withCallingConv(CC_OpenCLKernel));
+}
+
+/// Create an OpenCL kernel for an enqueued block.
+///
+/// The type of the first argument (the block literal) is the struct type
+/// of the block literal instead of a pointer type. The first argument
+/// (block literal) is passed directly by value to the kernel. The kernel
+/// allocates the same type of struct on stack and stores the block literal
+/// to it and passes its pointer to the block invoke function. The kernel
+/// has "enqueued-block" function attribute and kernel argument metadata.
+llvm::Value *AMDGPUTargetCodeGenInfo::createEnqueuedBlockKernel(
+ CodeGenFunction &CGF, llvm::Function *Invoke, llvm::Type *BlockTy) const {
+ auto &Builder = CGF.Builder;
+ auto &C = CGF.getLLVMContext();
+
+ auto *InvokeFT = Invoke->getFunctionType();
+ llvm::SmallVector<llvm::Type *, 2> ArgTys;
+ llvm::SmallVector<llvm::Metadata *, 8> AddressQuals;
+ llvm::SmallVector<llvm::Metadata *, 8> AccessQuals;
+ llvm::SmallVector<llvm::Metadata *, 8> ArgTypeNames;
+ llvm::SmallVector<llvm::Metadata *, 8> ArgBaseTypeNames;
+ llvm::SmallVector<llvm::Metadata *, 8> ArgTypeQuals;
+ llvm::SmallVector<llvm::Metadata *, 8> ArgNames;
+
+ ArgTys.push_back(BlockTy);
+ ArgTypeNames.push_back(llvm::MDString::get(C, "__block_literal"));
+ AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(0)));
+ ArgBaseTypeNames.push_back(llvm::MDString::get(C, "__block_literal"));
+ ArgTypeQuals.push_back(llvm::MDString::get(C, ""));
+ AccessQuals.push_back(llvm::MDString::get(C, "none"));
+ ArgNames.push_back(llvm::MDString::get(C, "block_literal"));
+ for (unsigned I = 1, E = InvokeFT->getNumParams(); I < E; ++I) {
+ ArgTys.push_back(InvokeFT->getParamType(I));
+ ArgTypeNames.push_back(llvm::MDString::get(C, "void*"));
+ AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(3)));
+ AccessQuals.push_back(llvm::MDString::get(C, "none"));
+ ArgBaseTypeNames.push_back(llvm::MDString::get(C, "void*"));
+ ArgTypeQuals.push_back(llvm::MDString::get(C, ""));
+ ArgNames.push_back(
+ llvm::MDString::get(C, (Twine("local_arg") + Twine(I)).str()));
+ }
+ std::string Name = Invoke->getName().str() + "_kernel";
+ auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), ArgTys, false);
+ auto *F = llvm::Function::Create(FT, llvm::GlobalValue::InternalLinkage, Name,
+ &CGF.CGM.getModule());
+ F->setCallingConv(llvm::CallingConv::AMDGPU_KERNEL);
+
+ llvm::AttrBuilder KernelAttrs(C);
+ // FIXME: The invoke isn't applying the right attributes either
+ // FIXME: This is missing setTargetAttributes
+ CGF.CGM.addDefaultFunctionDefinitionAttributes(KernelAttrs);
+ KernelAttrs.addAttribute("enqueued-block");
+ F->addFnAttrs(KernelAttrs);
+
+ auto IP = CGF.Builder.saveIP();
+ auto *BB = llvm::BasicBlock::Create(C, "entry", F);
+ Builder.SetInsertPoint(BB);
+ const auto BlockAlign = CGF.CGM.getDataLayout().getPrefTypeAlign(BlockTy);
+ auto *BlockPtr = Builder.CreateAlloca(BlockTy, nullptr);
+ BlockPtr->setAlignment(BlockAlign);
+ Builder.CreateAlignedStore(F->arg_begin(), BlockPtr, BlockAlign);
+ auto *Cast = Builder.CreatePointerCast(BlockPtr, InvokeFT->getParamType(0));
+ llvm::SmallVector<llvm::Value *, 2> Args;
+ Args.push_back(Cast);
+ for (llvm::Argument &A : llvm::drop_begin(F->args()))
+ Args.push_back(&A);
+ llvm::CallInst *call = Builder.CreateCall(Invoke, Args);
+ call->setCallingConv(Invoke->getCallingConv());
+ Builder.CreateRetVoid();
+ Builder.restoreIP(IP);
+
+ F->setMetadata("kernel_arg_addr_space", llvm::MDNode::get(C, AddressQuals));
+ F->setMetadata("kernel_arg_access_qual", llvm::MDNode::get(C, AccessQuals));
+ F->setMetadata("kernel_arg_type", llvm::MDNode::get(C, ArgTypeNames));
+ F->setMetadata("kernel_arg_base_type",
+ llvm::MDNode::get(C, ArgBaseTypeNames));
+ F->setMetadata("kernel_arg_type_qual", llvm::MDNode::get(C, ArgTypeQuals));
+ if (CGF.CGM.getCodeGenOpts().EmitOpenCLArgMetadata)
+ F->setMetadata("kernel_arg_name", llvm::MDNode::get(C, ArgNames));
+
+ return F;
+}
+
+void CodeGenModule::handleAMDGPUFlatWorkGroupSizeAttr(
+ llvm::Function *F, const AMDGPUFlatWorkGroupSizeAttr *FlatWGS,
+ const ReqdWorkGroupSizeAttr *ReqdWGS, int32_t *MinThreadsVal,
+ int32_t *MaxThreadsVal) {
+ unsigned Min = 0;
+ unsigned Max = 0;
+ if (FlatWGS) {
+ Min = FlatWGS->getMin()->EvaluateKnownConstInt(getContext()).getExtValue();
+ Max = FlatWGS->getMax()->EvaluateKnownConstInt(getContext()).getExtValue();
+ }
+ if (ReqdWGS && Min == 0 && Max == 0)
+ Min = Max = ReqdWGS->getXDim() * ReqdWGS->getYDim() * ReqdWGS->getZDim();
+
+ if (Min != 0) {
+ assert(Min <= Max && "Min must be less than or equal Max");
+
+ if (MinThreadsVal)
+ *MinThreadsVal = Min;
+ if (MaxThreadsVal)
+ *MaxThreadsVal = Max;
+ std::string AttrVal = llvm::utostr(Min) + "," + llvm::utostr(Max);
+ if (F)
+ F->addFnAttr("amdgpu-flat-work-group-size", AttrVal);
+ } else
+ assert(Max == 0 && "Max must be zero");
+}
+
+void CodeGenModule::handleAMDGPUWavesPerEUAttr(
+ llvm::Function *F, const AMDGPUWavesPerEUAttr *Attr) {
+ unsigned Min =
+ Attr->getMin()->EvaluateKnownConstInt(getContext()).getExtValue();
+ unsigned Max =
+ Attr->getMax()
+ ? Attr->getMax()->EvaluateKnownConstInt(getContext()).getExtValue()
+ : 0;
+
+ if (Min != 0) {
+ assert((Max == 0 || Min <= Max) && "Min must be less than or equal Max");
+
+ std::string AttrVal = llvm::utostr(Min);
+ if (Max != 0)
+ AttrVal = AttrVal + "," + llvm::utostr(Max);
+ F->addFnAttr("amdgpu-waves-per-eu", AttrVal);
+ } else
+ assert(Max == 0 && "Max must be zero");
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createAMDGPUTargetCodeGenInfo(CodeGenModule &CGM) {
+ return std::make_unique<AMDGPUTargetCodeGenInfo>(CGM.getTypes());
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/ARC.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/ARC.cpp
new file mode 100644
index 000000000000..550eb4068f25
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/ARC.cpp
@@ -0,0 +1,158 @@
+//===- ARC.cpp ------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfoImpl.h"
+#include "TargetInfo.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+// ARC ABI implementation.
+namespace {
+
+class ARCABIInfo : public DefaultABIInfo {
+ struct CCState {
+ unsigned FreeRegs;
+ };
+
+public:
+ using DefaultABIInfo::DefaultABIInfo;
+
+private:
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+
+ void updateState(const ABIArgInfo &Info, QualType Ty, CCState &State) const {
+ if (!State.FreeRegs)
+ return;
+ if (Info.isIndirect() && Info.getInReg())
+ State.FreeRegs--;
+ else if (Info.isDirect() && Info.getInReg()) {
+ unsigned sz = (getContext().getTypeSize(Ty) + 31) / 32;
+ if (sz < State.FreeRegs)
+ State.FreeRegs -= sz;
+ else
+ State.FreeRegs = 0;
+ }
+ }
+
+ void computeInfo(CGFunctionInfo &FI) const override {
+ CCState State;
+ // ARC uses 8 registers to pass arguments.
+ State.FreeRegs = 8;
+
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ updateState(FI.getReturnInfo(), FI.getReturnType(), State);
+ for (auto &I : FI.arguments()) {
+ I.info = classifyArgumentType(I.type, State.FreeRegs);
+ updateState(I.info, I.type, State);
+ }
+ }
+
+ ABIArgInfo getIndirectByRef(QualType Ty, bool HasFreeRegs) const;
+ ABIArgInfo getIndirectByValue(QualType Ty) const;
+ ABIArgInfo classifyArgumentType(QualType Ty, uint8_t FreeRegs) const;
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+};
+
+class ARCTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ ARCTargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(std::make_unique<ARCABIInfo>(CGT)) {}
+};
+
+
+ABIArgInfo ARCABIInfo::getIndirectByRef(QualType Ty, bool HasFreeRegs) const {
+ return HasFreeRegs ? getNaturalAlignIndirectInReg(Ty) :
+ getNaturalAlignIndirect(Ty, false);
+}
+
+ABIArgInfo ARCABIInfo::getIndirectByValue(QualType Ty) const {
+ // Compute the byval alignment.
+ const unsigned MinABIStackAlignInBytes = 4;
+ unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
+ return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true,
+ TypeAlign > MinABIStackAlignInBytes);
+}
+
+Address ARCABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
+ getContext().getTypeInfoInChars(Ty),
+ CharUnits::fromQuantity(4), true);
+}
+
+ABIArgInfo ARCABIInfo::classifyArgumentType(QualType Ty,
+ uint8_t FreeRegs) const {
+ // Handle the generic C++ ABI.
+ const RecordType *RT = Ty->getAs<RecordType>();
+ if (RT) {
+ CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
+ if (RAA == CGCXXABI::RAA_Indirect)
+ return getIndirectByRef(Ty, FreeRegs > 0);
+
+ if (RAA == CGCXXABI::RAA_DirectInMemory)
+ return getIndirectByValue(Ty);
+ }
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ auto SizeInRegs = llvm::alignTo(getContext().getTypeSize(Ty), 32) / 32;
+
+ if (isAggregateTypeForABI(Ty)) {
+ // Structures with flexible arrays are always indirect.
+ if (RT && RT->getDecl()->hasFlexibleArrayMember())
+ return getIndirectByValue(Ty);
+
+ // Ignore empty structs/unions.
+ if (isEmptyRecord(getContext(), Ty, true))
+ return ABIArgInfo::getIgnore();
+
+ llvm::LLVMContext &LLVMContext = getVMContext();
+
+ llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
+ SmallVector<llvm::Type *, 3> Elements(SizeInRegs, Int32);
+ llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
+
+ return FreeRegs >= SizeInRegs ?
+ ABIArgInfo::getDirectInReg(Result) :
+ ABIArgInfo::getDirect(Result, 0, nullptr, false);
+ }
+
+ if (const auto *EIT = Ty->getAs<BitIntType>())
+ if (EIT->getNumBits() > 64)
+ return getIndirectByValue(Ty);
+
+ return isPromotableIntegerTypeForABI(Ty)
+ ? (FreeRegs >= SizeInRegs ? ABIArgInfo::getExtendInReg(Ty)
+ : ABIArgInfo::getExtend(Ty))
+ : (FreeRegs >= SizeInRegs ? ABIArgInfo::getDirectInReg()
+ : ABIArgInfo::getDirect());
+}
+
+ABIArgInfo ARCABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isAnyComplexType())
+ return ABIArgInfo::getDirectInReg();
+
+ // Arguments of size > 4 registers are indirect.
+ auto RetSize = llvm::alignTo(getContext().getTypeSize(RetTy), 32) / 32;
+ if (RetSize > 4)
+ return getIndirectByRef(RetTy, /*HasFreeRegs*/ true);
+
+ return DefaultABIInfo::classifyReturnType(RetTy);
+}
+
+} // End anonymous namespace.
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createARCTargetCodeGenInfo(CodeGenModule &CGM) {
+ return std::make_unique<ARCTargetCodeGenInfo>(CGM.getTypes());
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/ARM.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/ARM.cpp
new file mode 100644
index 000000000000..d7d175ff1724
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/ARM.cpp
@@ -0,0 +1,819 @@
+//===- ARM.cpp ------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfoImpl.h"
+#include "TargetInfo.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+//===----------------------------------------------------------------------===//
+// ARM ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class ARMABIInfo : public ABIInfo {
+ ARMABIKind Kind;
+ bool IsFloatABISoftFP;
+
+public:
+ ARMABIInfo(CodeGenTypes &CGT, ARMABIKind Kind) : ABIInfo(CGT), Kind(Kind) {
+ setCCs();
+ IsFloatABISoftFP = CGT.getCodeGenOpts().FloatABI == "softfp" ||
+ CGT.getCodeGenOpts().FloatABI == ""; // default
+ }
+
+ bool isEABI() const {
+ switch (getTarget().getTriple().getEnvironment()) {
+ case llvm::Triple::Android:
+ case llvm::Triple::EABI:
+ case llvm::Triple::EABIHF:
+ case llvm::Triple::GNUEABI:
+ case llvm::Triple::GNUEABIHF:
+ case llvm::Triple::MuslEABI:
+ case llvm::Triple::MuslEABIHF:
+ return true;
+ default:
+ return getTarget().getTriple().isOHOSFamily();
+ }
+ }
+
+ bool isEABIHF() const {
+ switch (getTarget().getTriple().getEnvironment()) {
+ case llvm::Triple::EABIHF:
+ case llvm::Triple::GNUEABIHF:
+ case llvm::Triple::MuslEABIHF:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ ARMABIKind getABIKind() const { return Kind; }
+
+ bool allowBFloatArgsAndRet() const override {
+ return !IsFloatABISoftFP && getTarget().hasBFloat16Type();
+ }
+
+private:
+ ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic,
+ unsigned functionCallConv) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic,
+ unsigned functionCallConv) const;
+ ABIArgInfo classifyHomogeneousAggregate(QualType Ty, const Type *Base,
+ uint64_t Members) const;
+ ABIArgInfo coerceIllegalVector(QualType Ty) const;
+ bool isIllegalVectorType(QualType Ty) const;
+ bool containsAnyFP16Vectors(QualType Ty) const;
+
+ bool isHomogeneousAggregateBaseType(QualType Ty) const override;
+ bool isHomogeneousAggregateSmallEnough(const Type *Ty,
+ uint64_t Members) const override;
+ bool isZeroLengthBitfieldPermittedInHomogeneousAggregate() const override;
+
+ bool isEffectivelyAAPCS_VFP(unsigned callConvention, bool acceptHalf) const;
+
+ void computeInfo(CGFunctionInfo &FI) const override;
+
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+
+ llvm::CallingConv::ID getLLVMDefaultCC() const;
+ llvm::CallingConv::ID getABIDefaultCC() const;
+ void setCCs();
+};
+
+class ARMSwiftABIInfo : public SwiftABIInfo {
+public:
+ explicit ARMSwiftABIInfo(CodeGenTypes &CGT)
+ : SwiftABIInfo(CGT, /*SwiftErrorInRegister=*/true) {}
+
+ bool isLegalVectorType(CharUnits VectorSize, llvm::Type *EltTy,
+ unsigned NumElts) const override;
+};
+
+class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIKind K)
+ : TargetCodeGenInfo(std::make_unique<ARMABIInfo>(CGT, K)) {
+ SwiftInfo = std::make_unique<ARMSwiftABIInfo>(CGT);
+ }
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
+ return 13;
+ }
+
+ StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
+ return "mov\tr7, r7\t\t// marker for objc_retainAutoreleaseReturnValue";
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const override {
+ llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
+
+ // 0-15 are the 16 integer registers.
+ AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15);
+ return false;
+ }
+
+ unsigned getSizeOfUnwindException() const override {
+ if (getABIInfo<ARMABIInfo>().isEABI())
+ return 88;
+ return TargetCodeGenInfo::getSizeOfUnwindException();
+ }
+
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const override {
+ if (GV->isDeclaration())
+ return;
+ const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
+ if (!FD)
+ return;
+ auto *Fn = cast<llvm::Function>(GV);
+
+ if (const auto *TA = FD->getAttr<TargetAttr>()) {
+ ParsedTargetAttr Attr =
+ CGM.getTarget().parseTargetAttr(TA->getFeaturesStr());
+ if (!Attr.BranchProtection.empty()) {
+ TargetInfo::BranchProtectionInfo BPI;
+ StringRef DiagMsg;
+ StringRef Arch =
+ Attr.CPU.empty() ? CGM.getTarget().getTargetOpts().CPU : Attr.CPU;
+ if (!CGM.getTarget().validateBranchProtection(Attr.BranchProtection,
+ Arch, BPI, DiagMsg)) {
+ CGM.getDiags().Report(
+ D->getLocation(),
+ diag::warn_target_unsupported_branch_protection_attribute)
+ << Arch;
+ } else {
+ static const char *SignReturnAddrStr[] = {"none", "non-leaf", "all"};
+ assert(static_cast<unsigned>(BPI.SignReturnAddr) <= 2 &&
+ "Unexpected SignReturnAddressScopeKind");
+ Fn->addFnAttr(
+ "sign-return-address",
+ SignReturnAddrStr[static_cast<int>(BPI.SignReturnAddr)]);
+
+ Fn->addFnAttr("branch-target-enforcement",
+ BPI.BranchTargetEnforcement ? "true" : "false");
+ }
+ } else if (CGM.getLangOpts().BranchTargetEnforcement ||
+ CGM.getLangOpts().hasSignReturnAddress()) {
+ // If the Branch Protection attribute is missing, validate the target
+ // Architecture attribute against Branch Protection command line
+ // settings.
+ if (!CGM.getTarget().isBranchProtectionSupportedArch(Attr.CPU))
+ CGM.getDiags().Report(
+ D->getLocation(),
+ diag::warn_target_unsupported_branch_protection_attribute)
+ << Attr.CPU;
+ }
+ }
+
+ const ARMInterruptAttr *Attr = FD->getAttr<ARMInterruptAttr>();
+ if (!Attr)
+ return;
+
+ const char *Kind;
+ switch (Attr->getInterrupt()) {
+ case ARMInterruptAttr::Generic: Kind = ""; break;
+ case ARMInterruptAttr::IRQ: Kind = "IRQ"; break;
+ case ARMInterruptAttr::FIQ: Kind = "FIQ"; break;
+ case ARMInterruptAttr::SWI: Kind = "SWI"; break;
+ case ARMInterruptAttr::ABORT: Kind = "ABORT"; break;
+ case ARMInterruptAttr::UNDEF: Kind = "UNDEF"; break;
+ }
+
+ Fn->addFnAttr("interrupt", Kind);
+
+ ARMABIKind ABI = getABIInfo<ARMABIInfo>().getABIKind();
+ if (ABI == ARMABIKind::APCS)
+ return;
+
+ // AAPCS guarantees that sp will be 8-byte aligned on any public interface,
+ // however this is not necessarily true on taking any interrupt. Instruct
+ // the backend to perform a realignment as part of the function prologue.
+ llvm::AttrBuilder B(Fn->getContext());
+ B.addStackAlignmentAttr(8);
+ Fn->addFnAttrs(B);
+ }
+};
+
+class WindowsARMTargetCodeGenInfo : public ARMTargetCodeGenInfo {
+public:
+ WindowsARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIKind K)
+ : ARMTargetCodeGenInfo(CGT, K) {}
+
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const override;
+
+ void getDependentLibraryOption(llvm::StringRef Lib,
+ llvm::SmallString<24> &Opt) const override {
+ Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
+ }
+
+ void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value,
+ llvm::SmallString<32> &Opt) const override {
+ Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
+ }
+};
+
+void WindowsARMTargetCodeGenInfo::setTargetAttributes(
+ const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
+ ARMTargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
+ if (GV->isDeclaration())
+ return;
+ addStackProbeTargetAttributes(D, GV, CGM);
+}
+}
+
+void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ if (!::classifyReturnType(getCXXABI(), FI, *this))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), FI.isVariadic(),
+ FI.getCallingConvention());
+
+ for (auto &I : FI.arguments())
+ I.info = classifyArgumentType(I.type, FI.isVariadic(),
+ FI.getCallingConvention());
+
+
+ // Always honor user-specified calling convention.
+ if (FI.getCallingConvention() != llvm::CallingConv::C)
+ return;
+
+ llvm::CallingConv::ID cc = getRuntimeCC();
+ if (cc != llvm::CallingConv::C)
+ FI.setEffectiveCallingConvention(cc);
+}
+
+/// Return the default calling convention that LLVM will use.
+llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const {
+ // The default calling convention that LLVM will infer.
+ if (isEABIHF() || getTarget().getTriple().isWatchABI())
+ return llvm::CallingConv::ARM_AAPCS_VFP;
+ else if (isEABI())
+ return llvm::CallingConv::ARM_AAPCS;
+ else
+ return llvm::CallingConv::ARM_APCS;
+}
+
+/// Return the calling convention that our ABI would like us to use
+/// as the C calling convention.
+llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const {
+ switch (getABIKind()) {
+ case ARMABIKind::APCS:
+ return llvm::CallingConv::ARM_APCS;
+ case ARMABIKind::AAPCS:
+ return llvm::CallingConv::ARM_AAPCS;
+ case ARMABIKind::AAPCS_VFP:
+ return llvm::CallingConv::ARM_AAPCS_VFP;
+ case ARMABIKind::AAPCS16_VFP:
+ return llvm::CallingConv::ARM_AAPCS_VFP;
+ }
+ llvm_unreachable("bad ABI kind");
+}
+
+void ARMABIInfo::setCCs() {
+ assert(getRuntimeCC() == llvm::CallingConv::C);
+
+ // Don't muddy up the IR with a ton of explicit annotations if
+ // they'd just match what LLVM will infer from the triple.
+ llvm::CallingConv::ID abiCC = getABIDefaultCC();
+ if (abiCC != getLLVMDefaultCC())
+ RuntimeCC = abiCC;
+}
+
+ABIArgInfo ARMABIInfo::coerceIllegalVector(QualType Ty) const {
+ uint64_t Size = getContext().getTypeSize(Ty);
+ if (Size <= 32) {
+ llvm::Type *ResType =
+ llvm::Type::getInt32Ty(getVMContext());
+ return ABIArgInfo::getDirect(ResType);
+ }
+ if (Size == 64 || Size == 128) {
+ auto *ResType = llvm::FixedVectorType::get(
+ llvm::Type::getInt32Ty(getVMContext()), Size / 32);
+ return ABIArgInfo::getDirect(ResType);
+ }
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
+}
+
+ABIArgInfo ARMABIInfo::classifyHomogeneousAggregate(QualType Ty,
+ const Type *Base,
+ uint64_t Members) const {
+ assert(Base && "Base class should be set for homogeneous aggregate");
+ // Base can be a floating-point or a vector.
+ if (const VectorType *VT = Base->getAs<VectorType>()) {
+ // FP16 vectors should be converted to integer vectors
+ if (!getTarget().hasLegalHalfType() && containsAnyFP16Vectors(Ty)) {
+ uint64_t Size = getContext().getTypeSize(VT);
+ auto *NewVecTy = llvm::FixedVectorType::get(
+ llvm::Type::getInt32Ty(getVMContext()), Size / 32);
+ llvm::Type *Ty = llvm::ArrayType::get(NewVecTy, Members);
+ return ABIArgInfo::getDirect(Ty, 0, nullptr, false);
+ }
+ }
+ unsigned Align = 0;
+ if (getABIKind() == ARMABIKind::AAPCS ||
+ getABIKind() == ARMABIKind::AAPCS_VFP) {
+ // For alignment adjusted HFAs, cap the argument alignment to 8, leave it
+ // default otherwise.
+ Align = getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity();
+ unsigned BaseAlign = getContext().getTypeAlignInChars(Base).getQuantity();
+ Align = (Align > BaseAlign && Align >= 8) ? 8 : 0;
+ }
+ return ABIArgInfo::getDirect(nullptr, 0, nullptr, false, Align);
+}
+
+ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic,
+ unsigned functionCallConv) const {
+ // 6.1.2.1 The following argument types are VFP CPRCs:
+ // A single-precision floating-point type (including promoted
+ // half-precision types); A double-precision floating-point type;
+ // A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate
+ // with a Base Type of a single- or double-precision floating-point type,
+ // 64-bit containerized vectors or 128-bit containerized vectors with one
+ // to four Elements.
+ // Variadic functions should always marshal to the base standard.
+ bool IsAAPCS_VFP =
+ !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv, /* AAPCS16 */ false);
+
+ Ty = useFirstFieldIfTransparentUnion(Ty);
+
+ // Handle illegal vector types here.
+ if (isIllegalVectorType(Ty))
+ return coerceIllegalVector(Ty);
+
+ if (!isAggregateTypeForABI(Ty)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
+ Ty = EnumTy->getDecl()->getIntegerType();
+ }
+
+ if (const auto *EIT = Ty->getAs<BitIntType>())
+ if (EIT->getNumBits() > 64)
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
+
+ return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
+ : ABIArgInfo::getDirect());
+ }
+
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
+ }
+
+ // Ignore empty records.
+ if (isEmptyRecord(getContext(), Ty, true))
+ return ABIArgInfo::getIgnore();
+
+ if (IsAAPCS_VFP) {
+ // Homogeneous Aggregates need to be expanded when we can fit the aggregate
+ // into VFP registers.
+ const Type *Base = nullptr;
+ uint64_t Members = 0;
+ if (isHomogeneousAggregate(Ty, Base, Members))
+ return classifyHomogeneousAggregate(Ty, Base, Members);
+ } else if (getABIKind() == ARMABIKind::AAPCS16_VFP) {
+ // WatchOS does have homogeneous aggregates. Note that we intentionally use
+ // this convention even for a variadic function: the backend will use GPRs
+ // if needed.
+ const Type *Base = nullptr;
+ uint64_t Members = 0;
+ if (isHomogeneousAggregate(Ty, Base, Members)) {
+ assert(Base && Members <= 4 && "unexpected homogeneous aggregate");
+ llvm::Type *Ty =
+ llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members);
+ return ABIArgInfo::getDirect(Ty, 0, nullptr, false);
+ }
+ }
+
+ if (getABIKind() == ARMABIKind::AAPCS16_VFP &&
+ getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(16)) {
+ // WatchOS is adopting the 64-bit AAPCS rule on composite types: if they're
+ // bigger than 128-bits, they get placed in space allocated by the caller,
+ // and a pointer is passed.
+ return ABIArgInfo::getIndirect(
+ CharUnits::fromQuantity(getContext().getTypeAlign(Ty) / 8), false);
+ }
+
+ // Support byval for ARM.
+ // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at
+ // most 8-byte. We realign the indirect argument if type alignment is bigger
+ // than ABI alignment.
+ uint64_t ABIAlign = 4;
+ uint64_t TyAlign;
+ if (getABIKind() == ARMABIKind::AAPCS_VFP ||
+ getABIKind() == ARMABIKind::AAPCS) {
+ TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity();
+ ABIAlign = std::clamp(TyAlign, (uint64_t)4, (uint64_t)8);
+ } else {
+ TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity();
+ }
+ if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) {
+ assert(getABIKind() != ARMABIKind::AAPCS16_VFP && "unexpected byval");
+ return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign),
+ /*ByVal=*/true,
+ /*Realign=*/TyAlign > ABIAlign);
+ }
+
+ // On RenderScript, coerce Aggregates <= 64 bytes to an integer array of
+ // same size and alignment.
+ if (getTarget().isRenderScriptTarget()) {
+ return coerceToIntArray(Ty, getContext(), getVMContext());
+ }
+
+ // Otherwise, pass by coercing to a structure of the appropriate size.
+ llvm::Type* ElemTy;
+ unsigned SizeRegs;
+ // FIXME: Try to match the types of the arguments more accurately where
+ // we can.
+ if (TyAlign <= 4) {
+ ElemTy = llvm::Type::getInt32Ty(getVMContext());
+ SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
+ } else {
+ ElemTy = llvm::Type::getInt64Ty(getVMContext());
+ SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
+ }
+
+ return ABIArgInfo::getDirect(llvm::ArrayType::get(ElemTy, SizeRegs));
+}
+
+static bool isIntegerLikeType(QualType Ty, ASTContext &Context,
+ llvm::LLVMContext &VMContext) {
+ // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure
+ // is called integer-like if its size is less than or equal to one word, and
+ // the offset of each of its addressable sub-fields is zero.
+
+ uint64_t Size = Context.getTypeSize(Ty);
+
+ // Check that the type fits in a word.
+ if (Size > 32)
+ return false;
+
+ // FIXME: Handle vector types!
+ if (Ty->isVectorType())
+ return false;
+
+ // Float types are never treated as "integer like".
+ if (Ty->isRealFloatingType())
+ return false;
+
+ // If this is a builtin or pointer type then it is ok.
+ if (Ty->getAs<BuiltinType>() || Ty->isPointerType())
+ return true;
+
+ // Small complex integer types are "integer like".
+ if (const ComplexType *CT = Ty->getAs<ComplexType>())
+ return isIntegerLikeType(CT->getElementType(), Context, VMContext);
+
+ // Single element and zero sized arrays should be allowed, by the definition
+ // above, but they are not.
+
+ // Otherwise, it must be a record type.
+ const RecordType *RT = Ty->getAs<RecordType>();
+ if (!RT) return false;
+
+ // Ignore records with flexible arrays.
+ const RecordDecl *RD = RT->getDecl();
+ if (RD->hasFlexibleArrayMember())
+ return false;
+
+ // Check that all sub-fields are at offset 0, and are themselves "integer
+ // like".
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ bool HadField = false;
+ unsigned idx = 0;
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i, ++idx) {
+ const FieldDecl *FD = *i;
+
+ // Bit-fields are not addressable, we only need to verify they are "integer
+ // like". We still have to disallow a subsequent non-bitfield, for example:
+ // struct { int : 0; int x }
+ // is non-integer like according to gcc.
+ if (FD->isBitField()) {
+ if (!RD->isUnion())
+ HadField = true;
+
+ if (!isIntegerLikeType(FD->getType(), Context, VMContext))
+ return false;
+
+ continue;
+ }
+
+ // Check if this field is at offset 0.
+ if (Layout.getFieldOffset(idx) != 0)
+ return false;
+
+ if (!isIntegerLikeType(FD->getType(), Context, VMContext))
+ return false;
+
+ // Only allow at most one field in a structure. This doesn't match the
+ // wording above, but follows gcc in situations with a field following an
+ // empty structure.
+ if (!RD->isUnion()) {
+ if (HadField)
+ return false;
+
+ HadField = true;
+ }
+ }
+
+ return true;
+}
+
+ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, bool isVariadic,
+ unsigned functionCallConv) const {
+
+ // Variadic functions should always marshal to the base standard.
+ bool IsAAPCS_VFP =
+ !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv, /* AAPCS16 */ true);
+
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ if (const VectorType *VT = RetTy->getAs<VectorType>()) {
+ // Large vector types should be returned via memory.
+ if (getContext().getTypeSize(RetTy) > 128)
+ return getNaturalAlignIndirect(RetTy);
+ // TODO: FP16/BF16 vectors should be converted to integer vectors
+ // This check is similar to isIllegalVectorType - refactor?
+ if ((!getTarget().hasLegalHalfType() &&
+ (VT->getElementType()->isFloat16Type() ||
+ VT->getElementType()->isHalfType())) ||
+ (IsFloatABISoftFP &&
+ VT->getElementType()->isBFloat16Type()))
+ return coerceIllegalVector(RetTy);
+ }
+
+ if (!isAggregateTypeForABI(RetTy)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ if (const auto *EIT = RetTy->getAs<BitIntType>())
+ if (EIT->getNumBits() > 64)
+ return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
+
+ return isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
+ : ABIArgInfo::getDirect();
+ }
+
+ // Are we following APCS?
+ if (getABIKind() == ARMABIKind::APCS) {
+ if (isEmptyRecord(getContext(), RetTy, false))
+ return ABIArgInfo::getIgnore();
+
+ // Complex types are all returned as packed integers.
+ //
+ // FIXME: Consider using 2 x vector types if the back end handles them
+ // correctly.
+ if (RetTy->isAnyComplexType())
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(
+ getVMContext(), getContext().getTypeSize(RetTy)));
+
+ // Integer like structures are returned in r0.
+ if (isIntegerLikeType(RetTy, getContext(), getVMContext())) {
+ // Return in the smallest viable integer type.
+ uint64_t Size = getContext().getTypeSize(RetTy);
+ if (Size <= 8)
+ return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
+ if (Size <= 16)
+ return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
+ return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
+ }
+
+ // Otherwise return in memory.
+ return getNaturalAlignIndirect(RetTy);
+ }
+
+ // Otherwise this is an AAPCS variant.
+
+ if (isEmptyRecord(getContext(), RetTy, true))
+ return ABIArgInfo::getIgnore();
+
+ // Check for homogeneous aggregates with AAPCS-VFP.
+ if (IsAAPCS_VFP) {
+ const Type *Base = nullptr;
+ uint64_t Members = 0;
+ if (isHomogeneousAggregate(RetTy, Base, Members))
+ return classifyHomogeneousAggregate(RetTy, Base, Members);
+ }
+
+ // Aggregates <= 4 bytes are returned in r0; other aggregates
+ // are returned indirectly.
+ uint64_t Size = getContext().getTypeSize(RetTy);
+ if (Size <= 32) {
+ // On RenderScript, coerce Aggregates <= 4 bytes to an integer array of
+ // same size and alignment.
+ if (getTarget().isRenderScriptTarget()) {
+ return coerceToIntArray(RetTy, getContext(), getVMContext());
+ }
+ if (getDataLayout().isBigEndian())
+ // Return in 32 bit integer integer type (as if loaded by LDR, AAPCS 5.4)
+ return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
+
+ // Return in the smallest viable integer type.
+ if (Size <= 8)
+ return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
+ if (Size <= 16)
+ return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
+ return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
+ } else if (Size <= 128 && getABIKind() == ARMABIKind::AAPCS16_VFP) {
+ llvm::Type *Int32Ty = llvm::Type::getInt32Ty(getVMContext());
+ llvm::Type *CoerceTy =
+ llvm::ArrayType::get(Int32Ty, llvm::alignTo(Size, 32) / 32);
+ return ABIArgInfo::getDirect(CoerceTy);
+ }
+
+ return getNaturalAlignIndirect(RetTy);
+}
+
+/// isIllegalVector - check whether Ty is an illegal vector type.
+bool ARMABIInfo::isIllegalVectorType(QualType Ty) const {
+ if (const VectorType *VT = Ty->getAs<VectorType> ()) {
+ // On targets that don't support half, fp16 or bfloat, they are expanded
+ // into float, and we don't want the ABI to depend on whether or not they
+ // are supported in hardware. Thus return false to coerce vectors of these
+ // types into integer vectors.
+ // We do not depend on hasLegalHalfType for bfloat as it is a
+ // separate IR type.
+ if ((!getTarget().hasLegalHalfType() &&
+ (VT->getElementType()->isFloat16Type() ||
+ VT->getElementType()->isHalfType())) ||
+ (IsFloatABISoftFP &&
+ VT->getElementType()->isBFloat16Type()))
+ return true;
+ if (isAndroid()) {
+ // Android shipped using Clang 3.1, which supported a slightly different
+ // vector ABI. The primary differences were that 3-element vector types
+ // were legal, and so were sub 32-bit vectors (i.e. <2 x i8>). This path
+ // accepts that legacy behavior for Android only.
+ // Check whether VT is legal.
+ unsigned NumElements = VT->getNumElements();
+ // NumElements should be power of 2 or equal to 3.
+ if (!llvm::isPowerOf2_32(NumElements) && NumElements != 3)
+ return true;
+ } else {
+ // Check whether VT is legal.
+ unsigned NumElements = VT->getNumElements();
+ uint64_t Size = getContext().getTypeSize(VT);
+ // NumElements should be power of 2.
+ if (!llvm::isPowerOf2_32(NumElements))
+ return true;
+ // Size should be greater than 32 bits.
+ return Size <= 32;
+ }
+ }
+ return false;
+}
+
+/// Return true if a type contains any 16-bit floating point vectors
+bool ARMABIInfo::containsAnyFP16Vectors(QualType Ty) const {
+ if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
+ uint64_t NElements = AT->getSize().getZExtValue();
+ if (NElements == 0)
+ return false;
+ return containsAnyFP16Vectors(AT->getElementType());
+ } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ const RecordDecl *RD = RT->getDecl();
+
+ // If this is a C++ record, check the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
+ if (llvm::any_of(CXXRD->bases(), [this](const CXXBaseSpecifier &B) {
+ return containsAnyFP16Vectors(B.getType());
+ }))
+ return true;
+
+ if (llvm::any_of(RD->fields(), [this](FieldDecl *FD) {
+ return FD && containsAnyFP16Vectors(FD->getType());
+ }))
+ return true;
+
+ return false;
+ } else {
+ if (const VectorType *VT = Ty->getAs<VectorType>())
+ return (VT->getElementType()->isFloat16Type() ||
+ VT->getElementType()->isBFloat16Type() ||
+ VT->getElementType()->isHalfType());
+ return false;
+ }
+}
+
+bool ARMSwiftABIInfo::isLegalVectorType(CharUnits VectorSize, llvm::Type *EltTy,
+ unsigned NumElts) const {
+ if (!llvm::isPowerOf2_32(NumElts))
+ return false;
+ unsigned size = CGT.getDataLayout().getTypeStoreSizeInBits(EltTy);
+ if (size > 64)
+ return false;
+ if (VectorSize.getQuantity() != 8 &&
+ (VectorSize.getQuantity() != 16 || NumElts == 1))
+ return false;
+ return true;
+}
+
+bool ARMABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
+ // Homogeneous aggregates for AAPCS-VFP must have base types of float,
+ // double, or 64-bit or 128-bit vectors.
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
+ if (BT->getKind() == BuiltinType::Float ||
+ BT->getKind() == BuiltinType::Double ||
+ BT->getKind() == BuiltinType::LongDouble)
+ return true;
+ } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ unsigned VecSize = getContext().getTypeSize(VT);
+ if (VecSize == 64 || VecSize == 128)
+ return true;
+ }
+ return false;
+}
+
+bool ARMABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
+ uint64_t Members) const {
+ return Members <= 4;
+}
+
+bool ARMABIInfo::isZeroLengthBitfieldPermittedInHomogeneousAggregate() const {
+ // AAPCS32 says that the rule for whether something is a homogeneous
+ // aggregate is applied to the output of the data layout decision. So
+ // anything that doesn't affect the data layout also does not affect
+ // homogeneity. In particular, zero-length bitfields don't stop a struct
+ // being homogeneous.
+ return true;
+}
+
+bool ARMABIInfo::isEffectivelyAAPCS_VFP(unsigned callConvention,
+ bool acceptHalf) const {
+ // Give precedence to user-specified calling conventions.
+ if (callConvention != llvm::CallingConv::C)
+ return (callConvention == llvm::CallingConv::ARM_AAPCS_VFP);
+ else
+ return (getABIKind() == ARMABIKind::AAPCS_VFP) ||
+ (acceptHalf && (getABIKind() == ARMABIKind::AAPCS16_VFP));
+}
+
+Address ARMABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ CharUnits SlotSize = CharUnits::fromQuantity(4);
+
+ // Empty records are ignored for parameter passing purposes.
+ if (isEmptyRecord(getContext(), Ty, true)) {
+ VAListAddr = VAListAddr.withElementType(CGF.Int8PtrTy);
+ auto *Load = CGF.Builder.CreateLoad(VAListAddr);
+ return Address(Load, CGF.ConvertTypeForMem(Ty), SlotSize);
+ }
+
+ CharUnits TySize = getContext().getTypeSizeInChars(Ty);
+ CharUnits TyAlignForABI = getContext().getTypeUnadjustedAlignInChars(Ty);
+
+ // Use indirect if size of the illegal vector is bigger than 16 bytes.
+ bool IsIndirect = false;
+ const Type *Base = nullptr;
+ uint64_t Members = 0;
+ if (TySize > CharUnits::fromQuantity(16) && isIllegalVectorType(Ty)) {
+ IsIndirect = true;
+
+ // ARMv7k passes structs bigger than 16 bytes indirectly, in space
+ // allocated by the caller.
+ } else if (TySize > CharUnits::fromQuantity(16) &&
+ getABIKind() == ARMABIKind::AAPCS16_VFP &&
+ !isHomogeneousAggregate(Ty, Base, Members)) {
+ IsIndirect = true;
+
+ // Otherwise, bound the type's ABI alignment.
+ // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for
+ // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte.
+ // Our callers should be prepared to handle an under-aligned address.
+ } else if (getABIKind() == ARMABIKind::AAPCS_VFP ||
+ getABIKind() == ARMABIKind::AAPCS) {
+ TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4));
+ TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(8));
+ } else if (getABIKind() == ARMABIKind::AAPCS16_VFP) {
+ // ARMv7k allows type alignment up to 16 bytes.
+ TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4));
+ TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(16));
+ } else {
+ TyAlignForABI = CharUnits::fromQuantity(4);
+ }
+
+ TypeInfoChars TyInfo(TySize, TyAlignForABI, AlignRequirementKind::None);
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TyInfo,
+ SlotSize, /*AllowHigherAlign*/ true);
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createARMTargetCodeGenInfo(CodeGenModule &CGM, ARMABIKind Kind) {
+ return std::make_unique<ARMTargetCodeGenInfo>(CGM.getTypes(), Kind);
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createWindowsARMTargetCodeGenInfo(CodeGenModule &CGM, ARMABIKind K) {
+ return std::make_unique<WindowsARMTargetCodeGenInfo>(CGM.getTypes(), K);
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/AVR.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/AVR.cpp
new file mode 100644
index 000000000000..50547dd6dec5
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/AVR.cpp
@@ -0,0 +1,154 @@
+//===- AVR.cpp ------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfoImpl.h"
+#include "TargetInfo.h"
+#include "clang/Basic/DiagnosticFrontend.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+//===----------------------------------------------------------------------===//
+// AVR ABI Implementation. Documented at
+// https://gcc.gnu.org/wiki/avr-gcc#Calling_Convention
+// https://gcc.gnu.org/wiki/avr-gcc#Reduced_Tiny
+//===----------------------------------------------------------------------===//
+
+namespace {
+class AVRABIInfo : public DefaultABIInfo {
+private:
+ // The total amount of registers can be used to pass parameters. It is 18 on
+ // AVR, or 6 on AVRTiny.
+ const unsigned ParamRegs;
+ // The total amount of registers can be used to pass return value. It is 8 on
+ // AVR, or 4 on AVRTiny.
+ const unsigned RetRegs;
+
+public:
+ AVRABIInfo(CodeGenTypes &CGT, unsigned NPR, unsigned NRR)
+ : DefaultABIInfo(CGT), ParamRegs(NPR), RetRegs(NRR) {}
+
+ ABIArgInfo classifyReturnType(QualType Ty, bool &LargeRet) const {
+ // On AVR, a return struct with size less than or equals to 8 bytes is
+ // returned directly via registers R18-R25. On AVRTiny, a return struct
+ // with size less than or equals to 4 bytes is returned directly via
+ // registers R22-R25.
+ if (isAggregateTypeForABI(Ty) &&
+ getContext().getTypeSize(Ty) <= RetRegs * 8)
+ return ABIArgInfo::getDirect();
+ // A return value (struct or scalar) with larger size is returned via a
+ // stack slot, along with a pointer as the function's implicit argument.
+ if (getContext().getTypeSize(Ty) > RetRegs * 8) {
+ LargeRet = true;
+ return getNaturalAlignIndirect(Ty);
+ }
+ // An i8 return value should not be extended to i16, since AVR has 8-bit
+ // registers.
+ if (Ty->isIntegralOrEnumerationType() && getContext().getTypeSize(Ty) <= 8)
+ return ABIArgInfo::getDirect();
+ // Otherwise we follow the default way which is compatible.
+ return DefaultABIInfo::classifyReturnType(Ty);
+ }
+
+ ABIArgInfo classifyArgumentType(QualType Ty, unsigned &NumRegs) const {
+ unsigned TySize = getContext().getTypeSize(Ty);
+
+ // An int8 type argument always costs two registers like an int16.
+ if (TySize == 8 && NumRegs >= 2) {
+ NumRegs -= 2;
+ return ABIArgInfo::getExtend(Ty);
+ }
+
+ // If the argument size is an odd number of bytes, round up the size
+ // to the next even number.
+ TySize = llvm::alignTo(TySize, 16);
+
+ // Any type including an array/struct type can be passed in rgisters,
+ // if there are enough registers left.
+ if (TySize <= NumRegs * 8) {
+ NumRegs -= TySize / 8;
+ return ABIArgInfo::getDirect();
+ }
+
+ // An argument is passed either completely in registers or completely in
+ // memory. Since there are not enough registers left, current argument
+ // and all other unprocessed arguments should be passed in memory.
+ // However we still need to return `ABIArgInfo::getDirect()` other than
+ // `ABIInfo::getNaturalAlignIndirect(Ty)`, otherwise an extra stack slot
+ // will be allocated, so the stack frame layout will be incompatible with
+ // avr-gcc.
+ NumRegs = 0;
+ return ABIArgInfo::getDirect();
+ }
+
+ void computeInfo(CGFunctionInfo &FI) const override {
+ // Decide the return type.
+ bool LargeRet = false;
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), LargeRet);
+
+ // Decide each argument type. The total number of registers can be used for
+ // arguments depends on several factors:
+ // 1. Arguments of varargs functions are passed on the stack. This applies
+ // even to the named arguments. So no register can be used.
+ // 2. Total 18 registers can be used on avr and 6 ones on avrtiny.
+ // 3. If the return type is a struct with too large size, two registers
+ // (out of 18/6) will be cost as an implicit pointer argument.
+ unsigned NumRegs = ParamRegs;
+ if (FI.isVariadic())
+ NumRegs = 0;
+ else if (LargeRet)
+ NumRegs -= 2;
+ for (auto &I : FI.arguments())
+ I.info = classifyArgumentType(I.type, NumRegs);
+ }
+};
+
+class AVRTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ AVRTargetCodeGenInfo(CodeGenTypes &CGT, unsigned NPR, unsigned NRR)
+ : TargetCodeGenInfo(std::make_unique<AVRABIInfo>(CGT, NPR, NRR)) {}
+
+ LangAS getGlobalVarAddressSpace(CodeGenModule &CGM,
+ const VarDecl *D) const override {
+ // Check if global/static variable is defined in address space
+ // 1~6 (__flash, __flash1, __flash2, __flash3, __flash4, __flash5)
+ // but not constant.
+ if (D) {
+ LangAS AS = D->getType().getAddressSpace();
+ if (isTargetAddressSpace(AS) && 1 <= toTargetAddressSpace(AS) &&
+ toTargetAddressSpace(AS) <= 6 && !D->getType().isConstQualified())
+ CGM.getDiags().Report(D->getLocation(),
+ diag::err_verify_nonconst_addrspace)
+ << "__flash*";
+ }
+ return TargetCodeGenInfo::getGlobalVarAddressSpace(CGM, D);
+ }
+
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const override {
+ if (GV->isDeclaration())
+ return;
+ const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
+ if (!FD) return;
+ auto *Fn = cast<llvm::Function>(GV);
+
+ if (FD->getAttr<AVRInterruptAttr>())
+ Fn->addFnAttr("interrupt");
+
+ if (FD->getAttr<AVRSignalAttr>())
+ Fn->addFnAttr("signal");
+ }
+};
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createAVRTargetCodeGenInfo(CodeGenModule &CGM, unsigned NPR,
+ unsigned NRR) {
+ return std::make_unique<AVRTargetCodeGenInfo>(CGM.getTypes(), NPR, NRR);
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/BPF.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/BPF.cpp
new file mode 100644
index 000000000000..2849222f7a18
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/BPF.cpp
@@ -0,0 +1,100 @@
+//===- BPF.cpp ------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfoImpl.h"
+#include "TargetInfo.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+//===----------------------------------------------------------------------===//
+// BPF ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class BPFABIInfo : public DefaultABIInfo {
+public:
+ BPFABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
+
+ ABIArgInfo classifyArgumentType(QualType Ty) const {
+ Ty = useFirstFieldIfTransparentUnion(Ty);
+
+ if (isAggregateTypeForABI(Ty)) {
+ uint64_t Bits = getContext().getTypeSize(Ty);
+ if (Bits == 0)
+ return ABIArgInfo::getIgnore();
+
+ // If the aggregate needs 1 or 2 registers, do not use reference.
+ if (Bits <= 128) {
+ llvm::Type *CoerceTy;
+ if (Bits <= 64) {
+ CoerceTy =
+ llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
+ } else {
+ llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), 64);
+ CoerceTy = llvm::ArrayType::get(RegTy, 2);
+ }
+ return ABIArgInfo::getDirect(CoerceTy);
+ } else {
+ return getNaturalAlignIndirect(Ty);
+ }
+ }
+
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ ASTContext &Context = getContext();
+ if (const auto *EIT = Ty->getAs<BitIntType>())
+ if (EIT->getNumBits() > Context.getTypeSize(Context.Int128Ty))
+ return getNaturalAlignIndirect(Ty);
+
+ return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
+ : ABIArgInfo::getDirect());
+ }
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ if (isAggregateTypeForABI(RetTy))
+ return getNaturalAlignIndirect(RetTy);
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ ASTContext &Context = getContext();
+ if (const auto *EIT = RetTy->getAs<BitIntType>())
+ if (EIT->getNumBits() > Context.getTypeSize(Context.Int128Ty))
+ return getNaturalAlignIndirect(RetTy);
+
+ // Caller will do necessary sign/zero extension.
+ return ABIArgInfo::getDirect();
+ }
+
+ void computeInfo(CGFunctionInfo &FI) const override {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (auto &I : FI.arguments())
+ I.info = classifyArgumentType(I.type);
+ }
+
+};
+
+class BPFTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ BPFTargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(std::make_unique<BPFABIInfo>(CGT)) {}
+};
+
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createBPFTargetCodeGenInfo(CodeGenModule &CGM) {
+ return std::make_unique<BPFTargetCodeGenInfo>(CGM.getTypes());
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/CSKY.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/CSKY.cpp
new file mode 100644
index 000000000000..924eced700e1
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/CSKY.cpp
@@ -0,0 +1,175 @@
+//===- CSKY.cpp -----------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfoImpl.h"
+#include "TargetInfo.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+//===----------------------------------------------------------------------===//
+// CSKY ABI Implementation
+//===----------------------------------------------------------------------===//
+namespace {
+class CSKYABIInfo : public DefaultABIInfo {
+ static const int NumArgGPRs = 4;
+ static const int NumArgFPRs = 4;
+
+ static const unsigned XLen = 32;
+ unsigned FLen;
+
+public:
+ CSKYABIInfo(CodeGen::CodeGenTypes &CGT, unsigned FLen)
+ : DefaultABIInfo(CGT), FLen(FLen) {}
+
+ void computeInfo(CGFunctionInfo &FI) const override;
+ ABIArgInfo classifyArgumentType(QualType Ty, int &ArgGPRsLeft,
+ int &ArgFPRsLeft,
+ bool isReturnType = false) const;
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+};
+
+} // end anonymous namespace
+
+void CSKYABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ QualType RetTy = FI.getReturnType();
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(RetTy);
+
+ bool IsRetIndirect = FI.getReturnInfo().getKind() == ABIArgInfo::Indirect;
+
+ // We must track the number of GPRs used in order to conform to the CSKY
+ // ABI, as integer scalars passed in registers should have signext/zeroext
+ // when promoted.
+ int ArgGPRsLeft = IsRetIndirect ? NumArgGPRs - 1 : NumArgGPRs;
+ int ArgFPRsLeft = FLen ? NumArgFPRs : 0;
+
+ for (auto &ArgInfo : FI.arguments()) {
+ ArgInfo.info = classifyArgumentType(ArgInfo.type, ArgGPRsLeft, ArgFPRsLeft);
+ }
+}
+
+Address CSKYABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ CharUnits SlotSize = CharUnits::fromQuantity(XLen / 8);
+
+ // Empty records are ignored for parameter passing purposes.
+ if (isEmptyRecord(getContext(), Ty, true)) {
+ return Address(CGF.Builder.CreateLoad(VAListAddr),
+ CGF.ConvertTypeForMem(Ty), SlotSize);
+ }
+
+ auto TInfo = getContext().getTypeInfoInChars(Ty);
+
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, false, TInfo, SlotSize,
+ /*AllowHigherAlign=*/true);
+}
+
+ABIArgInfo CSKYABIInfo::classifyArgumentType(QualType Ty, int &ArgGPRsLeft,
+ int &ArgFPRsLeft,
+ bool isReturnType) const {
+ assert(ArgGPRsLeft <= NumArgGPRs && "Arg GPR tracking underflow");
+ Ty = useFirstFieldIfTransparentUnion(Ty);
+
+ // Structures with either a non-trivial destructor or a non-trivial
+ // copy constructor are always passed indirectly.
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
+ if (ArgGPRsLeft)
+ ArgGPRsLeft -= 1;
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
+ CGCXXABI::RAA_DirectInMemory);
+ }
+
+ // Ignore empty structs/unions.
+ if (isEmptyRecord(getContext(), Ty, true))
+ return ABIArgInfo::getIgnore();
+
+ if (!Ty->getAsUnionType())
+ if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
+ return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
+
+ uint64_t Size = getContext().getTypeSize(Ty);
+ // Pass floating point values via FPRs if possible.
+ if (Ty->isFloatingType() && !Ty->isComplexType() && FLen >= Size &&
+ ArgFPRsLeft) {
+ ArgFPRsLeft--;
+ return ABIArgInfo::getDirect();
+ }
+
+ // Complex types for the hard float ABI must be passed direct rather than
+ // using CoerceAndExpand.
+ if (Ty->isComplexType() && FLen && !isReturnType) {
+ QualType EltTy = Ty->castAs<ComplexType>()->getElementType();
+ if (getContext().getTypeSize(EltTy) <= FLen) {
+ ArgFPRsLeft -= 2;
+ return ABIArgInfo::getDirect();
+ }
+ }
+
+ if (!isAggregateTypeForABI(Ty)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ // All integral types are promoted to XLen width, unless passed on the
+ // stack.
+ if (Size < XLen && Ty->isIntegralOrEnumerationType())
+ return ABIArgInfo::getExtend(Ty);
+
+ if (const auto *EIT = Ty->getAs<BitIntType>()) {
+ if (EIT->getNumBits() < XLen)
+ return ABIArgInfo::getExtend(Ty);
+ }
+
+ return ABIArgInfo::getDirect();
+ }
+
+ // For argument type, the first 4*XLen parts of aggregate will be passed
+ // in registers, and the rest will be passed in stack.
+ // So we can coerce to integers directly and let backend handle it correctly.
+ // For return type, aggregate which <= 2*XLen will be returned in registers.
+ // Otherwise, aggregate will be returned indirectly.
+ if (!isReturnType || (isReturnType && Size <= 2 * XLen)) {
+ if (Size <= XLen) {
+ return ABIArgInfo::getDirect(
+ llvm::IntegerType::get(getVMContext(), XLen));
+ } else {
+ return ABIArgInfo::getDirect(llvm::ArrayType::get(
+ llvm::IntegerType::get(getVMContext(), XLen), (Size + 31) / XLen));
+ }
+ }
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
+}
+
+ABIArgInfo CSKYABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ int ArgGPRsLeft = 2;
+ int ArgFPRsLeft = FLen ? 1 : 0;
+
+ // The rules for return and argument types are the same, so defer to
+ // classifyArgumentType.
+ return classifyArgumentType(RetTy, ArgGPRsLeft, ArgFPRsLeft, true);
+}
+
+namespace {
+class CSKYTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ CSKYTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, unsigned FLen)
+ : TargetCodeGenInfo(std::make_unique<CSKYABIInfo>(CGT, FLen)) {}
+};
+} // end anonymous namespace
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createCSKYTargetCodeGenInfo(CodeGenModule &CGM, unsigned FLen) {
+ return std::make_unique<CSKYTargetCodeGenInfo>(CGM.getTypes(), FLen);
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/Hexagon.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/Hexagon.cpp
new file mode 100644
index 000000000000..944a8d002ecf
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/Hexagon.cpp
@@ -0,0 +1,423 @@
+//===- Hexagon.cpp --------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfoImpl.h"
+#include "TargetInfo.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+//===----------------------------------------------------------------------===//
+// Hexagon ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class HexagonABIInfo : public DefaultABIInfo {
+public:
+ HexagonABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
+
+private:
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy, unsigned *RegsLeft) const;
+
+ void computeInfo(CGFunctionInfo &FI) const override;
+
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+ Address EmitVAArgFromMemory(CodeGenFunction &CFG, Address VAListAddr,
+ QualType Ty) const;
+ Address EmitVAArgForHexagon(CodeGenFunction &CFG, Address VAListAddr,
+ QualType Ty) const;
+ Address EmitVAArgForHexagonLinux(CodeGenFunction &CFG, Address VAListAddr,
+ QualType Ty) const;
+};
+
+class HexagonTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ HexagonTargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(std::make_unique<HexagonABIInfo>(CGT)) {}
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
+ return 29;
+ }
+
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &GCM) const override {
+ if (GV->isDeclaration())
+ return;
+ const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
+ if (!FD)
+ return;
+ }
+};
+
+} // namespace
+
+void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ unsigned RegsLeft = 6;
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (auto &I : FI.arguments())
+ I.info = classifyArgumentType(I.type, &RegsLeft);
+}
+
+static bool HexagonAdjustRegsLeft(uint64_t Size, unsigned *RegsLeft) {
+ assert(Size <= 64 && "Not expecting to pass arguments larger than 64 bits"
+ " through registers");
+
+ if (*RegsLeft == 0)
+ return false;
+
+ if (Size <= 32) {
+ (*RegsLeft)--;
+ return true;
+ }
+
+ if (2 <= (*RegsLeft & (~1U))) {
+ *RegsLeft = (*RegsLeft & (~1U)) - 2;
+ return true;
+ }
+
+ // Next available register was r5 but candidate was greater than 32-bits so it
+ // has to go on the stack. However we still consume r5
+ if (*RegsLeft == 1)
+ *RegsLeft = 0;
+
+ return false;
+}
+
+ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty,
+ unsigned *RegsLeft) const {
+ if (!isAggregateTypeForABI(Ty)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ uint64_t Size = getContext().getTypeSize(Ty);
+ if (Size <= 64)
+ HexagonAdjustRegsLeft(Size, RegsLeft);
+
+ if (Size > 64 && Ty->isBitIntType())
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
+
+ return isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
+ : ABIArgInfo::getDirect();
+ }
+
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
+
+ // Ignore empty records.
+ if (isEmptyRecord(getContext(), Ty, true))
+ return ABIArgInfo::getIgnore();
+
+ uint64_t Size = getContext().getTypeSize(Ty);
+ unsigned Align = getContext().getTypeAlign(Ty);
+
+ if (Size > 64)
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
+
+ if (HexagonAdjustRegsLeft(Size, RegsLeft))
+ Align = Size <= 32 ? 32 : 64;
+ if (Size <= Align) {
+ // Pass in the smallest viable integer type.
+ Size = llvm::bit_ceil(Size);
+ return ABIArgInfo::getDirect(llvm::Type::getIntNTy(getVMContext(), Size));
+ }
+ return DefaultABIInfo::classifyArgumentType(Ty);
+}
+
+ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ const TargetInfo &T = CGT.getTarget();
+ uint64_t Size = getContext().getTypeSize(RetTy);
+
+ if (RetTy->getAs<VectorType>()) {
+ // HVX vectors are returned in vector registers or register pairs.
+ if (T.hasFeature("hvx")) {
+ assert(T.hasFeature("hvx-length64b") || T.hasFeature("hvx-length128b"));
+ uint64_t VecSize = T.hasFeature("hvx-length64b") ? 64*8 : 128*8;
+ if (Size == VecSize || Size == 2*VecSize)
+ return ABIArgInfo::getDirectInReg();
+ }
+ // Large vector types should be returned via memory.
+ if (Size > 64)
+ return getNaturalAlignIndirect(RetTy);
+ }
+
+ if (!isAggregateTypeForABI(RetTy)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ if (Size > 64 && RetTy->isBitIntType())
+ return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
+
+ return isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
+ : ABIArgInfo::getDirect();
+ }
+
+ if (isEmptyRecord(getContext(), RetTy, true))
+ return ABIArgInfo::getIgnore();
+
+ // Aggregates <= 8 bytes are returned in registers, other aggregates
+ // are returned indirectly.
+ if (Size <= 64) {
+ // Return in the smallest viable integer type.
+ Size = llvm::bit_ceil(Size);
+ return ABIArgInfo::getDirect(llvm::Type::getIntNTy(getVMContext(), Size));
+ }
+ return getNaturalAlignIndirect(RetTy, /*ByVal=*/true);
+}
+
+Address HexagonABIInfo::EmitVAArgFromMemory(CodeGenFunction &CGF,
+ Address VAListAddr,
+ QualType Ty) const {
+ // Load the overflow area pointer.
+ Address __overflow_area_pointer_p =
+ CGF.Builder.CreateStructGEP(VAListAddr, 2, "__overflow_area_pointer_p");
+ llvm::Value *__overflow_area_pointer = CGF.Builder.CreateLoad(
+ __overflow_area_pointer_p, "__overflow_area_pointer");
+
+ uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
+ if (Align > 4) {
+ // Alignment should be a power of 2.
+ assert((Align & (Align - 1)) == 0 && "Alignment is not power of 2!");
+
+ // overflow_arg_area = (overflow_arg_area + align - 1) & -align;
+ llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int64Ty, Align - 1);
+
+ // Add offset to the current pointer to access the argument.
+ __overflow_area_pointer =
+ CGF.Builder.CreateGEP(CGF.Int8Ty, __overflow_area_pointer, Offset);
+ llvm::Value *AsInt =
+ CGF.Builder.CreatePtrToInt(__overflow_area_pointer, CGF.Int32Ty);
+
+ // Create a mask which should be "AND"ed
+ // with (overflow_arg_area + align - 1)
+ llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -(int)Align);
+ __overflow_area_pointer = CGF.Builder.CreateIntToPtr(
+ CGF.Builder.CreateAnd(AsInt, Mask), __overflow_area_pointer->getType(),
+ "__overflow_area_pointer.align");
+ }
+
+ // Get the type of the argument from memory and bitcast
+ // overflow area pointer to the argument type.
+ llvm::Type *PTy = CGF.ConvertTypeForMem(Ty);
+ Address AddrTyped =
+ Address(__overflow_area_pointer, PTy, CharUnits::fromQuantity(Align));
+
+ // Round up to the minimum stack alignment for varargs which is 4 bytes.
+ uint64_t Offset = llvm::alignTo(CGF.getContext().getTypeSize(Ty) / 8, 4);
+
+ __overflow_area_pointer = CGF.Builder.CreateGEP(
+ CGF.Int8Ty, __overflow_area_pointer,
+ llvm::ConstantInt::get(CGF.Int32Ty, Offset),
+ "__overflow_area_pointer.next");
+ CGF.Builder.CreateStore(__overflow_area_pointer, __overflow_area_pointer_p);
+
+ return AddrTyped;
+}
+
+Address HexagonABIInfo::EmitVAArgForHexagon(CodeGenFunction &CGF,
+ Address VAListAddr,
+ QualType Ty) const {
+ // FIXME: Need to handle alignment
+ llvm::Type *BP = CGF.Int8PtrTy;
+ CGBuilderTy &Builder = CGF.Builder;
+ Address VAListAddrAsBPP = VAListAddr.withElementType(BP);
+ llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
+ // Handle address alignment for type alignment > 32 bits
+ uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8;
+ if (TyAlign > 4) {
+ assert((TyAlign & (TyAlign - 1)) == 0 && "Alignment is not power of 2!");
+ llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty);
+ AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1));
+ AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1)));
+ Addr = Builder.CreateIntToPtr(AddrAsInt, BP);
+ }
+ Address AddrTyped =
+ Address(Addr, CGF.ConvertType(Ty), CharUnits::fromQuantity(TyAlign));
+
+ uint64_t Offset = llvm::alignTo(CGF.getContext().getTypeSize(Ty) / 8, 4);
+ llvm::Value *NextAddr = Builder.CreateGEP(
+ CGF.Int8Ty, Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), "ap.next");
+ Builder.CreateStore(NextAddr, VAListAddrAsBPP);
+
+ return AddrTyped;
+}
+
+Address HexagonABIInfo::EmitVAArgForHexagonLinux(CodeGenFunction &CGF,
+ Address VAListAddr,
+ QualType Ty) const {
+ int ArgSize = CGF.getContext().getTypeSize(Ty) / 8;
+
+ if (ArgSize > 8)
+ return EmitVAArgFromMemory(CGF, VAListAddr, Ty);
+
+ // Here we have check if the argument is in register area or
+ // in overflow area.
+ // If the saved register area pointer + argsize rounded up to alignment >
+ // saved register area end pointer, argument is in overflow area.
+ unsigned RegsLeft = 6;
+ Ty = CGF.getContext().getCanonicalType(Ty);
+ (void)classifyArgumentType(Ty, &RegsLeft);
+
+ llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
+ llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
+ llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
+
+ // Get rounded size of the argument.GCC does not allow vararg of
+ // size < 4 bytes. We follow the same logic here.
+ ArgSize = (CGF.getContext().getTypeSize(Ty) <= 32) ? 4 : 8;
+ int ArgAlign = (CGF.getContext().getTypeSize(Ty) <= 32) ? 4 : 8;
+
+ // Argument may be in saved register area
+ CGF.EmitBlock(MaybeRegBlock);
+
+ // Load the current saved register area pointer.
+ Address __current_saved_reg_area_pointer_p = CGF.Builder.CreateStructGEP(
+ VAListAddr, 0, "__current_saved_reg_area_pointer_p");
+ llvm::Value *__current_saved_reg_area_pointer = CGF.Builder.CreateLoad(
+ __current_saved_reg_area_pointer_p, "__current_saved_reg_area_pointer");
+
+ // Load the saved register area end pointer.
+ Address __saved_reg_area_end_pointer_p = CGF.Builder.CreateStructGEP(
+ VAListAddr, 1, "__saved_reg_area_end_pointer_p");
+ llvm::Value *__saved_reg_area_end_pointer = CGF.Builder.CreateLoad(
+ __saved_reg_area_end_pointer_p, "__saved_reg_area_end_pointer");
+
+ // If the size of argument is > 4 bytes, check if the stack
+ // location is aligned to 8 bytes
+ if (ArgAlign > 4) {
+
+ llvm::Value *__current_saved_reg_area_pointer_int =
+ CGF.Builder.CreatePtrToInt(__current_saved_reg_area_pointer,
+ CGF.Int32Ty);
+
+ __current_saved_reg_area_pointer_int = CGF.Builder.CreateAdd(
+ __current_saved_reg_area_pointer_int,
+ llvm::ConstantInt::get(CGF.Int32Ty, (ArgAlign - 1)),
+ "align_current_saved_reg_area_pointer");
+
+ __current_saved_reg_area_pointer_int =
+ CGF.Builder.CreateAnd(__current_saved_reg_area_pointer_int,
+ llvm::ConstantInt::get(CGF.Int32Ty, -ArgAlign),
+ "align_current_saved_reg_area_pointer");
+
+ __current_saved_reg_area_pointer =
+ CGF.Builder.CreateIntToPtr(__current_saved_reg_area_pointer_int,
+ __current_saved_reg_area_pointer->getType(),
+ "align_current_saved_reg_area_pointer");
+ }
+
+ llvm::Value *__new_saved_reg_area_pointer =
+ CGF.Builder.CreateGEP(CGF.Int8Ty, __current_saved_reg_area_pointer,
+ llvm::ConstantInt::get(CGF.Int32Ty, ArgSize),
+ "__new_saved_reg_area_pointer");
+
+ llvm::Value *UsingStack = nullptr;
+ UsingStack = CGF.Builder.CreateICmpSGT(__new_saved_reg_area_pointer,
+ __saved_reg_area_end_pointer);
+
+ CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, InRegBlock);
+
+ // Argument in saved register area
+ // Implement the block where argument is in register saved area
+ CGF.EmitBlock(InRegBlock);
+
+ llvm::Type *PTy = CGF.ConvertType(Ty);
+ llvm::Value *__saved_reg_area_p = CGF.Builder.CreateBitCast(
+ __current_saved_reg_area_pointer, llvm::PointerType::getUnqual(PTy));
+
+ CGF.Builder.CreateStore(__new_saved_reg_area_pointer,
+ __current_saved_reg_area_pointer_p);
+
+ CGF.EmitBranch(ContBlock);
+
+ // Argument in overflow area
+ // Implement the block where the argument is in overflow area.
+ CGF.EmitBlock(OnStackBlock);
+
+ // Load the overflow area pointer
+ Address __overflow_area_pointer_p =
+ CGF.Builder.CreateStructGEP(VAListAddr, 2, "__overflow_area_pointer_p");
+ llvm::Value *__overflow_area_pointer = CGF.Builder.CreateLoad(
+ __overflow_area_pointer_p, "__overflow_area_pointer");
+
+ // Align the overflow area pointer according to the alignment of the argument
+ if (ArgAlign > 4) {
+ llvm::Value *__overflow_area_pointer_int =
+ CGF.Builder.CreatePtrToInt(__overflow_area_pointer, CGF.Int32Ty);
+
+ __overflow_area_pointer_int =
+ CGF.Builder.CreateAdd(__overflow_area_pointer_int,
+ llvm::ConstantInt::get(CGF.Int32Ty, ArgAlign - 1),
+ "align_overflow_area_pointer");
+
+ __overflow_area_pointer_int =
+ CGF.Builder.CreateAnd(__overflow_area_pointer_int,
+ llvm::ConstantInt::get(CGF.Int32Ty, -ArgAlign),
+ "align_overflow_area_pointer");
+
+ __overflow_area_pointer = CGF.Builder.CreateIntToPtr(
+ __overflow_area_pointer_int, __overflow_area_pointer->getType(),
+ "align_overflow_area_pointer");
+ }
+
+ // Get the pointer for next argument in overflow area and store it
+ // to overflow area pointer.
+ llvm::Value *__new_overflow_area_pointer = CGF.Builder.CreateGEP(
+ CGF.Int8Ty, __overflow_area_pointer,
+ llvm::ConstantInt::get(CGF.Int32Ty, ArgSize),
+ "__overflow_area_pointer.next");
+
+ CGF.Builder.CreateStore(__new_overflow_area_pointer,
+ __overflow_area_pointer_p);
+
+ CGF.Builder.CreateStore(__new_overflow_area_pointer,
+ __current_saved_reg_area_pointer_p);
+
+ // Bitcast the overflow area pointer to the type of argument.
+ llvm::Type *OverflowPTy = CGF.ConvertTypeForMem(Ty);
+ llvm::Value *__overflow_area_p = CGF.Builder.CreateBitCast(
+ __overflow_area_pointer, llvm::PointerType::getUnqual(OverflowPTy));
+
+ CGF.EmitBranch(ContBlock);
+
+ // Get the correct pointer to load the variable argument
+ // Implement the ContBlock
+ CGF.EmitBlock(ContBlock);
+
+ llvm::Type *MemTy = CGF.ConvertTypeForMem(Ty);
+ llvm::Type *MemPTy = llvm::PointerType::getUnqual(MemTy);
+ llvm::PHINode *ArgAddr = CGF.Builder.CreatePHI(MemPTy, 2, "vaarg.addr");
+ ArgAddr->addIncoming(__saved_reg_area_p, InRegBlock);
+ ArgAddr->addIncoming(__overflow_area_p, OnStackBlock);
+
+ return Address(ArgAddr, MemTy, CharUnits::fromQuantity(ArgAlign));
+}
+
+Address HexagonABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+
+ if (getTarget().getTriple().isMusl())
+ return EmitVAArgForHexagonLinux(CGF, VAListAddr, Ty);
+
+ return EmitVAArgForHexagon(CGF, VAListAddr, Ty);
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createHexagonTargetCodeGenInfo(CodeGenModule &CGM) {
+ return std::make_unique<HexagonTargetCodeGenInfo>(CGM.getTypes());
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/Lanai.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/Lanai.cpp
new file mode 100644
index 000000000000..2578fc0291e7
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/Lanai.cpp
@@ -0,0 +1,154 @@
+//===- Lanai.cpp ----------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfoImpl.h"
+#include "TargetInfo.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+//===----------------------------------------------------------------------===//
+// Lanai ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+class LanaiABIInfo : public DefaultABIInfo {
+ struct CCState {
+ unsigned FreeRegs;
+ };
+
+public:
+ LanaiABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
+
+ bool shouldUseInReg(QualType Ty, CCState &State) const;
+
+ void computeInfo(CGFunctionInfo &FI) const override {
+ CCState State;
+ // Lanai uses 4 registers to pass arguments unless the function has the
+ // regparm attribute set.
+ if (FI.getHasRegParm()) {
+ State.FreeRegs = FI.getRegParm();
+ } else {
+ State.FreeRegs = 4;
+ }
+
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (auto &I : FI.arguments())
+ I.info = classifyArgumentType(I.type, State);
+ }
+
+ ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
+};
+} // end anonymous namespace
+
+bool LanaiABIInfo::shouldUseInReg(QualType Ty, CCState &State) const {
+ unsigned Size = getContext().getTypeSize(Ty);
+ unsigned SizeInRegs = llvm::alignTo(Size, 32U) / 32U;
+
+ if (SizeInRegs == 0)
+ return false;
+
+ if (SizeInRegs > State.FreeRegs) {
+ State.FreeRegs = 0;
+ return false;
+ }
+
+ State.FreeRegs -= SizeInRegs;
+
+ return true;
+}
+
+ABIArgInfo LanaiABIInfo::getIndirectResult(QualType Ty, bool ByVal,
+ CCState &State) const {
+ if (!ByVal) {
+ if (State.FreeRegs) {
+ --State.FreeRegs; // Non-byval indirects just use one pointer.
+ return getNaturalAlignIndirectInReg(Ty);
+ }
+ return getNaturalAlignIndirect(Ty, false);
+ }
+
+ // Compute the byval alignment.
+ const unsigned MinABIStackAlignInBytes = 4;
+ unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
+ return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true,
+ /*Realign=*/TypeAlign >
+ MinABIStackAlignInBytes);
+}
+
+ABIArgInfo LanaiABIInfo::classifyArgumentType(QualType Ty,
+ CCState &State) const {
+ // Check with the C++ ABI first.
+ const RecordType *RT = Ty->getAs<RecordType>();
+ if (RT) {
+ CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
+ if (RAA == CGCXXABI::RAA_Indirect) {
+ return getIndirectResult(Ty, /*ByVal=*/false, State);
+ } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
+ }
+ }
+
+ if (isAggregateTypeForABI(Ty)) {
+ // Structures with flexible arrays are always indirect.
+ if (RT && RT->getDecl()->hasFlexibleArrayMember())
+ return getIndirectResult(Ty, /*ByVal=*/true, State);
+
+ // Ignore empty structs/unions.
+ if (isEmptyRecord(getContext(), Ty, true))
+ return ABIArgInfo::getIgnore();
+
+ llvm::LLVMContext &LLVMContext = getVMContext();
+ unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
+ if (SizeInRegs <= State.FreeRegs) {
+ llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
+ SmallVector<llvm::Type *, 3> Elements(SizeInRegs, Int32);
+ llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
+ State.FreeRegs -= SizeInRegs;
+ return ABIArgInfo::getDirectInReg(Result);
+ } else {
+ State.FreeRegs = 0;
+ }
+ return getIndirectResult(Ty, true, State);
+ }
+
+ // Treat an enum type as its underlying type.
+ if (const auto *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ bool InReg = shouldUseInReg(Ty, State);
+
+ // Don't pass >64 bit integers in registers.
+ if (const auto *EIT = Ty->getAs<BitIntType>())
+ if (EIT->getNumBits() > 64)
+ return getIndirectResult(Ty, /*ByVal=*/true, State);
+
+ if (isPromotableIntegerTypeForABI(Ty)) {
+ if (InReg)
+ return ABIArgInfo::getDirectInReg();
+ return ABIArgInfo::getExtend(Ty);
+ }
+ if (InReg)
+ return ABIArgInfo::getDirectInReg();
+ return ABIArgInfo::getDirect();
+}
+
+namespace {
+class LanaiTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ LanaiTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
+ : TargetCodeGenInfo(std::make_unique<LanaiABIInfo>(CGT)) {}
+};
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createLanaiTargetCodeGenInfo(CodeGenModule &CGM) {
+ return std::make_unique<LanaiTargetCodeGenInfo>(CGM.getTypes());
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/LoongArch.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/LoongArch.cpp
new file mode 100644
index 000000000000..63b9a1fdb988
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/LoongArch.cpp
@@ -0,0 +1,461 @@
+//===- LoongArch.cpp ------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfoImpl.h"
+#include "TargetInfo.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+// LoongArch ABI Implementation. Documented at
+// https://loongson.github.io/LoongArch-Documentation/LoongArch-ELF-ABI-EN.html
+//
+//===----------------------------------------------------------------------===//
+
+namespace {
+class LoongArchABIInfo : public DefaultABIInfo {
+private:
+ // Size of the integer ('r') registers in bits.
+ unsigned GRLen;
+ // Size of the floating point ('f') registers in bits.
+ unsigned FRLen;
+ // Number of general-purpose argument registers.
+ static const int NumGARs = 8;
+ // Number of floating-point argument registers.
+ static const int NumFARs = 8;
+ bool detectFARsEligibleStructHelper(QualType Ty, CharUnits CurOff,
+ llvm::Type *&Field1Ty,
+ CharUnits &Field1Off,
+ llvm::Type *&Field2Ty,
+ CharUnits &Field2Off) const;
+
+public:
+ LoongArchABIInfo(CodeGen::CodeGenTypes &CGT, unsigned GRLen, unsigned FRLen)
+ : DefaultABIInfo(CGT), GRLen(GRLen), FRLen(FRLen) {}
+
+ void computeInfo(CGFunctionInfo &FI) const override;
+
+ ABIArgInfo classifyArgumentType(QualType Ty, bool IsFixed, int &GARsLeft,
+ int &FARsLeft) const;
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+
+ ABIArgInfo extendType(QualType Ty) const;
+
+ bool detectFARsEligibleStruct(QualType Ty, llvm::Type *&Field1Ty,
+ CharUnits &Field1Off, llvm::Type *&Field2Ty,
+ CharUnits &Field2Off, int &NeededArgGPRs,
+ int &NeededArgFPRs) const;
+ ABIArgInfo coerceAndExpandFARsEligibleStruct(llvm::Type *Field1Ty,
+ CharUnits Field1Off,
+ llvm::Type *Field2Ty,
+ CharUnits Field2Off) const;
+};
+} // end anonymous namespace
+
+void LoongArchABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ QualType RetTy = FI.getReturnType();
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(RetTy);
+
+ // IsRetIndirect is true if classifyArgumentType indicated the value should
+ // be passed indirect, or if the type size is a scalar greater than 2*GRLen
+ // and not a complex type with elements <= FRLen. e.g. fp128 is passed direct
+ // in LLVM IR, relying on the backend lowering code to rewrite the argument
+ // list and pass indirectly on LA32.
+ bool IsRetIndirect = FI.getReturnInfo().getKind() == ABIArgInfo::Indirect;
+ if (!IsRetIndirect && RetTy->isScalarType() &&
+ getContext().getTypeSize(RetTy) > (2 * GRLen)) {
+ if (RetTy->isComplexType() && FRLen) {
+ QualType EltTy = RetTy->castAs<ComplexType>()->getElementType();
+ IsRetIndirect = getContext().getTypeSize(EltTy) > FRLen;
+ } else {
+ // This is a normal scalar > 2*GRLen, such as fp128 on LA32.
+ IsRetIndirect = true;
+ }
+ }
+
+ // We must track the number of GARs and FARs used in order to conform to the
+ // LoongArch ABI. As GAR usage is different for variadic arguments, we must
+ // also track whether we are examining a vararg or not.
+ int GARsLeft = IsRetIndirect ? NumGARs - 1 : NumGARs;
+ int FARsLeft = FRLen ? NumFARs : 0;
+ int NumFixedArgs = FI.getNumRequiredArgs();
+
+ int ArgNum = 0;
+ for (auto &ArgInfo : FI.arguments()) {
+ ArgInfo.info = classifyArgumentType(
+ ArgInfo.type, /*IsFixed=*/ArgNum < NumFixedArgs, GARsLeft, FARsLeft);
+ ArgNum++;
+ }
+}
+
+// Returns true if the struct is a potential candidate to be passed in FARs (and
+// GARs). If this function returns true, the caller is responsible for checking
+// that if there is only a single field then that field is a float.
+bool LoongArchABIInfo::detectFARsEligibleStructHelper(
+ QualType Ty, CharUnits CurOff, llvm::Type *&Field1Ty, CharUnits &Field1Off,
+ llvm::Type *&Field2Ty, CharUnits &Field2Off) const {
+ bool IsInt = Ty->isIntegralOrEnumerationType();
+ bool IsFloat = Ty->isRealFloatingType();
+
+ if (IsInt || IsFloat) {
+ uint64_t Size = getContext().getTypeSize(Ty);
+ if (IsInt && Size > GRLen)
+ return false;
+ // Can't be eligible if larger than the FP registers. Half precision isn't
+ // currently supported on LoongArch and the ABI hasn't been confirmed, so
+ // default to the integer ABI in that case.
+ if (IsFloat && (Size > FRLen || Size < 32))
+ return false;
+ // Can't be eligible if an integer type was already found (int+int pairs
+ // are not eligible).
+ if (IsInt && Field1Ty && Field1Ty->isIntegerTy())
+ return false;
+ if (!Field1Ty) {
+ Field1Ty = CGT.ConvertType(Ty);
+ Field1Off = CurOff;
+ return true;
+ }
+ if (!Field2Ty) {
+ Field2Ty = CGT.ConvertType(Ty);
+ Field2Off = CurOff;
+ return true;
+ }
+ return false;
+ }
+
+ if (auto CTy = Ty->getAs<ComplexType>()) {
+ if (Field1Ty)
+ return false;
+ QualType EltTy = CTy->getElementType();
+ if (getContext().getTypeSize(EltTy) > FRLen)
+ return false;
+ Field1Ty = CGT.ConvertType(EltTy);
+ Field1Off = CurOff;
+ Field2Ty = Field1Ty;
+ Field2Off = Field1Off + getContext().getTypeSizeInChars(EltTy);
+ return true;
+ }
+
+ if (const ConstantArrayType *ATy = getContext().getAsConstantArrayType(Ty)) {
+ uint64_t ArraySize = ATy->getSize().getZExtValue();
+ QualType EltTy = ATy->getElementType();
+ // Non-zero-length arrays of empty records make the struct ineligible to be
+ // passed via FARs in C++.
+ if (const auto *RTy = EltTy->getAs<RecordType>()) {
+ if (ArraySize != 0 && isa<CXXRecordDecl>(RTy->getDecl()) &&
+ isEmptyRecord(getContext(), EltTy, true, true))
+ return false;
+ }
+ CharUnits EltSize = getContext().getTypeSizeInChars(EltTy);
+ for (uint64_t i = 0; i < ArraySize; ++i) {
+ if (!detectFARsEligibleStructHelper(EltTy, CurOff, Field1Ty, Field1Off,
+ Field2Ty, Field2Off))
+ return false;
+ CurOff += EltSize;
+ }
+ return true;
+ }
+
+ if (const auto *RTy = Ty->getAs<RecordType>()) {
+ // Structures with either a non-trivial destructor or a non-trivial
+ // copy constructor are not eligible for the FP calling convention.
+ if (getRecordArgABI(Ty, CGT.getCXXABI()))
+ return false;
+ const RecordDecl *RD = RTy->getDecl();
+ if (isEmptyRecord(getContext(), Ty, true, true) &&
+ (!RD->isUnion() || !isa<CXXRecordDecl>(RD)))
+ return true;
+ // Unions aren't eligible unless they're empty in C (which is caught above).
+ if (RD->isUnion())
+ return false;
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
+ // If this is a C++ record, check the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ for (const CXXBaseSpecifier &B : CXXRD->bases()) {
+ const auto *BDecl =
+ cast<CXXRecordDecl>(B.getType()->castAs<RecordType>()->getDecl());
+ if (!detectFARsEligibleStructHelper(
+ B.getType(), CurOff + Layout.getBaseClassOffset(BDecl),
+ Field1Ty, Field1Off, Field2Ty, Field2Off))
+ return false;
+ }
+ }
+ for (const FieldDecl *FD : RD->fields()) {
+ QualType QTy = FD->getType();
+ if (FD->isBitField()) {
+ unsigned BitWidth = FD->getBitWidthValue(getContext());
+ // Zero-width bitfields are ignored.
+ if (BitWidth == 0)
+ continue;
+ // Allow a bitfield with a type greater than GRLen as long as the
+ // bitwidth is GRLen or less.
+ if (getContext().getTypeSize(QTy) > GRLen && BitWidth <= GRLen) {
+ QTy = getContext().getIntTypeForBitwidth(GRLen, false);
+ }
+ }
+
+ if (!detectFARsEligibleStructHelper(
+ QTy,
+ CurOff + getContext().toCharUnitsFromBits(
+ Layout.getFieldOffset(FD->getFieldIndex())),
+ Field1Ty, Field1Off, Field2Ty, Field2Off))
+ return false;
+ }
+ return Field1Ty != nullptr;
+ }
+
+ return false;
+}
+
+// Determine if a struct is eligible to be passed in FARs (and GARs) (i.e., when
+// flattened it contains a single fp value, fp+fp, or int+fp of appropriate
+// size). If so, NeededFARs and NeededGARs are incremented appropriately.
+bool LoongArchABIInfo::detectFARsEligibleStruct(
+ QualType Ty, llvm::Type *&Field1Ty, CharUnits &Field1Off,
+ llvm::Type *&Field2Ty, CharUnits &Field2Off, int &NeededGARs,
+ int &NeededFARs) const {
+ Field1Ty = nullptr;
+ Field2Ty = nullptr;
+ NeededGARs = 0;
+ NeededFARs = 0;
+ if (!detectFARsEligibleStructHelper(Ty, CharUnits::Zero(), Field1Ty,
+ Field1Off, Field2Ty, Field2Off))
+ return false;
+ if (!Field1Ty)
+ return false;
+ // Not really a candidate if we have a single int but no float.
+ if (Field1Ty && !Field2Ty && !Field1Ty->isFloatingPointTy())
+ return false;
+ if (Field1Ty && Field1Ty->isFloatingPointTy())
+ NeededFARs++;
+ else if (Field1Ty)
+ NeededGARs++;
+ if (Field2Ty && Field2Ty->isFloatingPointTy())
+ NeededFARs++;
+ else if (Field2Ty)
+ NeededGARs++;
+ return true;
+}
+
+// Call getCoerceAndExpand for the two-element flattened struct described by
+// Field1Ty, Field1Off, Field2Ty, Field2Off. This method will create an
+// appropriate coerceToType and unpaddedCoerceToType.
+ABIArgInfo LoongArchABIInfo::coerceAndExpandFARsEligibleStruct(
+ llvm::Type *Field1Ty, CharUnits Field1Off, llvm::Type *Field2Ty,
+ CharUnits Field2Off) const {
+ SmallVector<llvm::Type *, 3> CoerceElts;
+ SmallVector<llvm::Type *, 2> UnpaddedCoerceElts;
+ if (!Field1Off.isZero())
+ CoerceElts.push_back(llvm::ArrayType::get(
+ llvm::Type::getInt8Ty(getVMContext()), Field1Off.getQuantity()));
+
+ CoerceElts.push_back(Field1Ty);
+ UnpaddedCoerceElts.push_back(Field1Ty);
+
+ if (!Field2Ty) {
+ return ABIArgInfo::getCoerceAndExpand(
+ llvm::StructType::get(getVMContext(), CoerceElts, !Field1Off.isZero()),
+ UnpaddedCoerceElts[0]);
+ }
+
+ CharUnits Field2Align =
+ CharUnits::fromQuantity(getDataLayout().getABITypeAlign(Field2Ty));
+ CharUnits Field1End =
+ Field1Off +
+ CharUnits::fromQuantity(getDataLayout().getTypeStoreSize(Field1Ty));
+ CharUnits Field2OffNoPadNoPack = Field1End.alignTo(Field2Align);
+
+ CharUnits Padding = CharUnits::Zero();
+ if (Field2Off > Field2OffNoPadNoPack)
+ Padding = Field2Off - Field2OffNoPadNoPack;
+ else if (Field2Off != Field2Align && Field2Off > Field1End)
+ Padding = Field2Off - Field1End;
+
+ bool IsPacked = !Field2Off.isMultipleOf(Field2Align);
+
+ if (!Padding.isZero())
+ CoerceElts.push_back(llvm::ArrayType::get(
+ llvm::Type::getInt8Ty(getVMContext()), Padding.getQuantity()));
+
+ CoerceElts.push_back(Field2Ty);
+ UnpaddedCoerceElts.push_back(Field2Ty);
+
+ return ABIArgInfo::getCoerceAndExpand(
+ llvm::StructType::get(getVMContext(), CoerceElts, IsPacked),
+ llvm::StructType::get(getVMContext(), UnpaddedCoerceElts, IsPacked));
+}
+
+ABIArgInfo LoongArchABIInfo::classifyArgumentType(QualType Ty, bool IsFixed,
+ int &GARsLeft,
+ int &FARsLeft) const {
+ assert(GARsLeft <= NumGARs && "GAR tracking underflow");
+ Ty = useFirstFieldIfTransparentUnion(Ty);
+
+ // Structures with either a non-trivial destructor or a non-trivial
+ // copy constructor are always passed indirectly.
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
+ if (GARsLeft)
+ GARsLeft -= 1;
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
+ CGCXXABI::RAA_DirectInMemory);
+ }
+
+ uint64_t Size = getContext().getTypeSize(Ty);
+
+ // Ignore empty struct or union whose size is zero, e.g. `struct { }` in C or
+ // `struct { int a[0]; }` in C++. In C++, `struct { }` is empty but it's size
+ // is 1 byte and g++ doesn't ignore it; clang++ matches this behaviour.
+ if (isEmptyRecord(getContext(), Ty, true) && Size == 0)
+ return ABIArgInfo::getIgnore();
+
+ // Pass floating point values via FARs if possible.
+ if (IsFixed && Ty->isFloatingType() && !Ty->isComplexType() &&
+ FRLen >= Size && FARsLeft) {
+ FARsLeft--;
+ return ABIArgInfo::getDirect();
+ }
+
+ // Complex types for the *f or *d ABI must be passed directly rather than
+ // using CoerceAndExpand.
+ if (IsFixed && Ty->isComplexType() && FRLen && FARsLeft >= 2) {
+ QualType EltTy = Ty->castAs<ComplexType>()->getElementType();
+ if (getContext().getTypeSize(EltTy) <= FRLen) {
+ FARsLeft -= 2;
+ return ABIArgInfo::getDirect();
+ }
+ }
+
+ if (IsFixed && FRLen && Ty->isStructureOrClassType()) {
+ llvm::Type *Field1Ty = nullptr;
+ llvm::Type *Field2Ty = nullptr;
+ CharUnits Field1Off = CharUnits::Zero();
+ CharUnits Field2Off = CharUnits::Zero();
+ int NeededGARs = 0;
+ int NeededFARs = 0;
+ bool IsCandidate = detectFARsEligibleStruct(
+ Ty, Field1Ty, Field1Off, Field2Ty, Field2Off, NeededGARs, NeededFARs);
+ if (IsCandidate && NeededGARs <= GARsLeft && NeededFARs <= FARsLeft) {
+ GARsLeft -= NeededGARs;
+ FARsLeft -= NeededFARs;
+ return coerceAndExpandFARsEligibleStruct(Field1Ty, Field1Off, Field2Ty,
+ Field2Off);
+ }
+ }
+
+ uint64_t NeededAlign = getContext().getTypeAlign(Ty);
+ // Determine the number of GARs needed to pass the current argument
+ // according to the ABI. 2*GRLen-aligned varargs are passed in "aligned"
+ // register pairs, so may consume 3 registers.
+ int NeededGARs = 1;
+ if (!IsFixed && NeededAlign == 2 * GRLen)
+ NeededGARs = 2 + (GARsLeft % 2);
+ else if (Size > GRLen && Size <= 2 * GRLen)
+ NeededGARs = 2;
+
+ if (NeededGARs > GARsLeft)
+ NeededGARs = GARsLeft;
+
+ GARsLeft -= NeededGARs;
+
+ if (!isAggregateTypeForABI(Ty) && !Ty->isVectorType()) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ // All integral types are promoted to GRLen width.
+ if (Size < GRLen && Ty->isIntegralOrEnumerationType())
+ return extendType(Ty);
+
+ if (const auto *EIT = Ty->getAs<BitIntType>()) {
+ if (EIT->getNumBits() < GRLen)
+ return extendType(Ty);
+ if (EIT->getNumBits() > 128 ||
+ (!getContext().getTargetInfo().hasInt128Type() &&
+ EIT->getNumBits() > 64))
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
+ }
+
+ return ABIArgInfo::getDirect();
+ }
+
+ // Aggregates which are <= 2*GRLen will be passed in registers if possible,
+ // so coerce to integers.
+ if (Size <= 2 * GRLen) {
+ // Use a single GRLen int if possible, 2*GRLen if 2*GRLen alignment is
+ // required, and a 2-element GRLen array if only GRLen alignment is
+ // required.
+ if (Size <= GRLen) {
+ return ABIArgInfo::getDirect(
+ llvm::IntegerType::get(getVMContext(), GRLen));
+ }
+ if (getContext().getTypeAlign(Ty) == 2 * GRLen) {
+ return ABIArgInfo::getDirect(
+ llvm::IntegerType::get(getVMContext(), 2 * GRLen));
+ }
+ return ABIArgInfo::getDirect(
+ llvm::ArrayType::get(llvm::IntegerType::get(getVMContext(), GRLen), 2));
+ }
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
+}
+
+ABIArgInfo LoongArchABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+ // The rules for return and argument types are the same, so defer to
+ // classifyArgumentType.
+ int GARsLeft = 2;
+ int FARsLeft = FRLen ? 2 : 0;
+ return classifyArgumentType(RetTy, /*IsFixed=*/true, GARsLeft, FARsLeft);
+}
+
+Address LoongArchABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ CharUnits SlotSize = CharUnits::fromQuantity(GRLen / 8);
+
+ // Empty records are ignored for parameter passing purposes.
+ if (isEmptyRecord(getContext(), Ty, true))
+ return Address(CGF.Builder.CreateLoad(VAListAddr),
+ CGF.ConvertTypeForMem(Ty), SlotSize);
+
+ auto TInfo = getContext().getTypeInfoInChars(Ty);
+
+ // Arguments bigger than 2*GRLen bytes are passed indirectly.
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty,
+ /*IsIndirect=*/TInfo.Width > 2 * SlotSize, TInfo,
+ SlotSize,
+ /*AllowHigherAlign=*/true);
+}
+
+ABIArgInfo LoongArchABIInfo::extendType(QualType Ty) const {
+ int TySize = getContext().getTypeSize(Ty);
+ // LA64 ABI requires unsigned 32 bit integers to be sign extended.
+ if (GRLen == 64 && Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32)
+ return ABIArgInfo::getSignExtend(Ty);
+ return ABIArgInfo::getExtend(Ty);
+}
+
+namespace {
+class LoongArchTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ LoongArchTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, unsigned GRLen,
+ unsigned FRLen)
+ : TargetCodeGenInfo(
+ std::make_unique<LoongArchABIInfo>(CGT, GRLen, FRLen)) {}
+};
+} // namespace
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createLoongArchTargetCodeGenInfo(CodeGenModule &CGM, unsigned GRLen,
+ unsigned FLen) {
+ return std::make_unique<LoongArchTargetCodeGenInfo>(CGM.getTypes(), GRLen,
+ FLen);
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/M68k.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/M68k.cpp
new file mode 100644
index 000000000000..120022105f34
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/M68k.cpp
@@ -0,0 +1,55 @@
+//===- M68k.cpp -----------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfoImpl.h"
+#include "TargetInfo.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+//===----------------------------------------------------------------------===//
+// M68k ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class M68kTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ M68kTargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(std::make_unique<DefaultABIInfo>(CGT)) {}
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const override;
+};
+
+} // namespace
+
+void M68kTargetCodeGenInfo::setTargetAttributes(
+ const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
+ if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
+ if (const auto *attr = FD->getAttr<M68kInterruptAttr>()) {
+ // Handle 'interrupt' attribute:
+ llvm::Function *F = cast<llvm::Function>(GV);
+
+ // Step 1: Set ISR calling convention.
+ F->setCallingConv(llvm::CallingConv::M68k_INTR);
+
+ // Step 2: Add attributes goodness.
+ F->addFnAttr(llvm::Attribute::NoInline);
+
+ // Step 3: Emit ISR vector alias.
+ unsigned Num = attr->getNumber() / 2;
+ llvm::GlobalAlias::create(llvm::Function::ExternalLinkage,
+ "__isr_" + Twine(Num), F);
+ }
+ }
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createM68kTargetCodeGenInfo(CodeGenModule &CGM) {
+ return std::make_unique<M68kTargetCodeGenInfo>(CGM.getTypes());
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/MSP430.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/MSP430.cpp
new file mode 100644
index 000000000000..bb67d97f4421
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/MSP430.cpp
@@ -0,0 +1,94 @@
+//===- MSP430.cpp ---------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfoImpl.h"
+#include "TargetInfo.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+//===----------------------------------------------------------------------===//
+// MSP430 ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class MSP430ABIInfo : public DefaultABIInfo {
+ static ABIArgInfo complexArgInfo() {
+ ABIArgInfo Info = ABIArgInfo::getDirect();
+ Info.setCanBeFlattened(false);
+ return Info;
+ }
+
+public:
+ MSP430ABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const {
+ if (RetTy->isAnyComplexType())
+ return complexArgInfo();
+
+ return DefaultABIInfo::classifyReturnType(RetTy);
+ }
+
+ ABIArgInfo classifyArgumentType(QualType RetTy) const {
+ if (RetTy->isAnyComplexType())
+ return complexArgInfo();
+
+ return DefaultABIInfo::classifyArgumentType(RetTy);
+ }
+
+ // Just copy the original implementations because
+ // DefaultABIInfo::classify{Return,Argument}Type() are not virtual
+ void computeInfo(CGFunctionInfo &FI) const override {
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (auto &I : FI.arguments())
+ I.info = classifyArgumentType(I.type);
+ }
+
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override {
+ return EmitVAArgInstr(CGF, VAListAddr, Ty, classifyArgumentType(Ty));
+ }
+};
+
+class MSP430TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ MSP430TargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(std::make_unique<MSP430ABIInfo>(CGT)) {}
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const override;
+};
+
+}
+
+void MSP430TargetCodeGenInfo::setTargetAttributes(
+ const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
+ if (GV->isDeclaration())
+ return;
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
+ const auto *InterruptAttr = FD->getAttr<MSP430InterruptAttr>();
+ if (!InterruptAttr)
+ return;
+
+ // Handle 'interrupt' attribute:
+ llvm::Function *F = cast<llvm::Function>(GV);
+
+ // Step 1: Set ISR calling convention.
+ F->setCallingConv(llvm::CallingConv::MSP430_INTR);
+
+ // Step 2: Add attributes goodness.
+ F->addFnAttr(llvm::Attribute::NoInline);
+ F->addFnAttr("interrupt", llvm::utostr(InterruptAttr->getNumber()));
+ }
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createMSP430TargetCodeGenInfo(CodeGenModule &CGM) {
+ return std::make_unique<MSP430TargetCodeGenInfo>(CGM.getTypes());
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/Mips.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/Mips.cpp
new file mode 100644
index 000000000000..8f11c63dcd85
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/Mips.cpp
@@ -0,0 +1,441 @@
+//===- Mips.cpp -----------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfoImpl.h"
+#include "TargetInfo.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+//===----------------------------------------------------------------------===//
+// MIPS ABI Implementation. This works for both little-endian and
+// big-endian variants.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class MipsABIInfo : public ABIInfo {
+ bool IsO32;
+ const unsigned MinABIStackAlignInBytes, StackAlignInBytes;
+ void CoerceToIntArgs(uint64_t TySize,
+ SmallVectorImpl<llvm::Type *> &ArgList) const;
+ llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const;
+ llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const;
+ llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const;
+public:
+ MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) :
+ ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8),
+ StackAlignInBytes(IsO32 ? 8 : 16) {}
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const;
+ void computeInfo(CGFunctionInfo &FI) const override;
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+ ABIArgInfo extendType(QualType Ty) const;
+};
+
+class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
+ unsigned SizeOfUnwindException;
+public:
+ MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32)
+ : TargetCodeGenInfo(std::make_unique<MipsABIInfo>(CGT, IsO32)),
+ SizeOfUnwindException(IsO32 ? 24 : 32) {}
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
+ return 29;
+ }
+
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const override {
+ const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
+ if (!FD) return;
+ llvm::Function *Fn = cast<llvm::Function>(GV);
+
+ if (FD->hasAttr<MipsLongCallAttr>())
+ Fn->addFnAttr("long-call");
+ else if (FD->hasAttr<MipsShortCallAttr>())
+ Fn->addFnAttr("short-call");
+
+ // Other attributes do not have a meaning for declarations.
+ if (GV->isDeclaration())
+ return;
+
+ if (FD->hasAttr<Mips16Attr>()) {
+ Fn->addFnAttr("mips16");
+ }
+ else if (FD->hasAttr<NoMips16Attr>()) {
+ Fn->addFnAttr("nomips16");
+ }
+
+ if (FD->hasAttr<MicroMipsAttr>())
+ Fn->addFnAttr("micromips");
+ else if (FD->hasAttr<NoMicroMipsAttr>())
+ Fn->addFnAttr("nomicromips");
+
+ const MipsInterruptAttr *Attr = FD->getAttr<MipsInterruptAttr>();
+ if (!Attr)
+ return;
+
+ const char *Kind;
+ switch (Attr->getInterrupt()) {
+ case MipsInterruptAttr::eic: Kind = "eic"; break;
+ case MipsInterruptAttr::sw0: Kind = "sw0"; break;
+ case MipsInterruptAttr::sw1: Kind = "sw1"; break;
+ case MipsInterruptAttr::hw0: Kind = "hw0"; break;
+ case MipsInterruptAttr::hw1: Kind = "hw1"; break;
+ case MipsInterruptAttr::hw2: Kind = "hw2"; break;
+ case MipsInterruptAttr::hw3: Kind = "hw3"; break;
+ case MipsInterruptAttr::hw4: Kind = "hw4"; break;
+ case MipsInterruptAttr::hw5: Kind = "hw5"; break;
+ }
+
+ Fn->addFnAttr("interrupt", Kind);
+
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const override;
+
+ unsigned getSizeOfUnwindException() const override {
+ return SizeOfUnwindException;
+ }
+};
+}
+
+void MipsABIInfo::CoerceToIntArgs(
+ uint64_t TySize, SmallVectorImpl<llvm::Type *> &ArgList) const {
+ llvm::IntegerType *IntTy =
+ llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8);
+
+ // Add (TySize / MinABIStackAlignInBytes) args of IntTy.
+ for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N)
+ ArgList.push_back(IntTy);
+
+ // If necessary, add one more integer type to ArgList.
+ unsigned R = TySize % (MinABIStackAlignInBytes * 8);
+
+ if (R)
+ ArgList.push_back(llvm::IntegerType::get(getVMContext(), R));
+}
+
+// In N32/64, an aligned double precision floating point field is passed in
+// a register.
+llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const {
+ SmallVector<llvm::Type*, 8> ArgList, IntArgList;
+
+ if (IsO32) {
+ CoerceToIntArgs(TySize, ArgList);
+ return llvm::StructType::get(getVMContext(), ArgList);
+ }
+
+ if (Ty->isComplexType())
+ return CGT.ConvertType(Ty);
+
+ const RecordType *RT = Ty->getAs<RecordType>();
+
+ // Unions/vectors are passed in integer registers.
+ if (!RT || !RT->isStructureOrClassType()) {
+ CoerceToIntArgs(TySize, ArgList);
+ return llvm::StructType::get(getVMContext(), ArgList);
+ }
+
+ const RecordDecl *RD = RT->getDecl();
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
+ assert(!(TySize % 8) && "Size of structure must be multiple of 8.");
+
+ uint64_t LastOffset = 0;
+ unsigned idx = 0;
+ llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64);
+
+ // Iterate over fields in the struct/class and check if there are any aligned
+ // double fields.
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i, ++idx) {
+ const QualType Ty = i->getType();
+ const BuiltinType *BT = Ty->getAs<BuiltinType>();
+
+ if (!BT || BT->getKind() != BuiltinType::Double)
+ continue;
+
+ uint64_t Offset = Layout.getFieldOffset(idx);
+ if (Offset % 64) // Ignore doubles that are not aligned.
+ continue;
+
+ // Add ((Offset - LastOffset) / 64) args of type i64.
+ for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j)
+ ArgList.push_back(I64);
+
+ // Add double type.
+ ArgList.push_back(llvm::Type::getDoubleTy(getVMContext()));
+ LastOffset = Offset + 64;
+ }
+
+ CoerceToIntArgs(TySize - LastOffset, IntArgList);
+ ArgList.append(IntArgList.begin(), IntArgList.end());
+
+ return llvm::StructType::get(getVMContext(), ArgList);
+}
+
+llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset,
+ uint64_t Offset) const {
+ if (OrigOffset + MinABIStackAlignInBytes > Offset)
+ return nullptr;
+
+ return llvm::IntegerType::get(getVMContext(), (Offset - OrigOffset) * 8);
+}
+
+ABIArgInfo
+MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
+ Ty = useFirstFieldIfTransparentUnion(Ty);
+
+ uint64_t OrigOffset = Offset;
+ uint64_t TySize = getContext().getTypeSize(Ty);
+ uint64_t Align = getContext().getTypeAlign(Ty) / 8;
+
+ Align = std::clamp(Align, (uint64_t)MinABIStackAlignInBytes,
+ (uint64_t)StackAlignInBytes);
+ unsigned CurrOffset = llvm::alignTo(Offset, Align);
+ Offset = CurrOffset + llvm::alignTo(TySize, Align * 8) / 8;
+
+ if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) {
+ // Ignore empty aggregates.
+ if (TySize == 0)
+ return ABIArgInfo::getIgnore();
+
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
+ Offset = OrigOffset + MinABIStackAlignInBytes;
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
+ }
+
+ // If we have reached here, aggregates are passed directly by coercing to
+ // another structure type. Padding is inserted if the offset of the
+ // aggregate is unaligned.
+ ABIArgInfo ArgInfo =
+ ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0,
+ getPaddingType(OrigOffset, CurrOffset));
+ ArgInfo.setInReg(true);
+ return ArgInfo;
+ }
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ // Make sure we pass indirectly things that are too large.
+ if (const auto *EIT = Ty->getAs<BitIntType>())
+ if (EIT->getNumBits() > 128 ||
+ (EIT->getNumBits() > 64 &&
+ !getContext().getTargetInfo().hasInt128Type()))
+ return getNaturalAlignIndirect(Ty);
+
+ // All integral types are promoted to the GPR width.
+ if (Ty->isIntegralOrEnumerationType())
+ return extendType(Ty);
+
+ return ABIArgInfo::getDirect(
+ nullptr, 0, IsO32 ? nullptr : getPaddingType(OrigOffset, CurrOffset));
+}
+
+llvm::Type*
+MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const {
+ const RecordType *RT = RetTy->getAs<RecordType>();
+ SmallVector<llvm::Type*, 8> RTList;
+
+ if (RT && RT->isStructureOrClassType()) {
+ const RecordDecl *RD = RT->getDecl();
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
+ unsigned FieldCnt = Layout.getFieldCount();
+
+ // N32/64 returns struct/classes in floating point registers if the
+ // following conditions are met:
+ // 1. The size of the struct/class is no larger than 128-bit.
+ // 2. The struct/class has one or two fields all of which are floating
+ // point types.
+ // 3. The offset of the first field is zero (this follows what gcc does).
+ //
+ // Any other composite results are returned in integer registers.
+ //
+ if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) {
+ RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end();
+ for (; b != e; ++b) {
+ const BuiltinType *BT = b->getType()->getAs<BuiltinType>();
+
+ if (!BT || !BT->isFloatingPoint())
+ break;
+
+ RTList.push_back(CGT.ConvertType(b->getType()));
+ }
+
+ if (b == e)
+ return llvm::StructType::get(getVMContext(), RTList,
+ RD->hasAttr<PackedAttr>());
+
+ RTList.clear();
+ }
+ }
+
+ CoerceToIntArgs(Size, RTList);
+ return llvm::StructType::get(getVMContext(), RTList);
+}
+
+ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
+ uint64_t Size = getContext().getTypeSize(RetTy);
+
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ // O32 doesn't treat zero-sized structs differently from other structs.
+ // However, N32/N64 ignores zero sized return values.
+ if (!IsO32 && Size == 0)
+ return ABIArgInfo::getIgnore();
+
+ if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) {
+ if (Size <= 128) {
+ if (RetTy->isAnyComplexType())
+ return ABIArgInfo::getDirect();
+
+ // O32 returns integer vectors in registers and N32/N64 returns all small
+ // aggregates in registers.
+ if (!IsO32 ||
+ (RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())) {
+ ABIArgInfo ArgInfo =
+ ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
+ ArgInfo.setInReg(true);
+ return ArgInfo;
+ }
+ }
+
+ return getNaturalAlignIndirect(RetTy);
+ }
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ // Make sure we pass indirectly things that are too large.
+ if (const auto *EIT = RetTy->getAs<BitIntType>())
+ if (EIT->getNumBits() > 128 ||
+ (EIT->getNumBits() > 64 &&
+ !getContext().getTargetInfo().hasInt128Type()))
+ return getNaturalAlignIndirect(RetTy);
+
+ if (isPromotableIntegerTypeForABI(RetTy))
+ return ABIArgInfo::getExtend(RetTy);
+
+ if ((RetTy->isUnsignedIntegerOrEnumerationType() ||
+ RetTy->isSignedIntegerOrEnumerationType()) && Size == 32 && !IsO32)
+ return ABIArgInfo::getSignExtend(RetTy);
+
+ return ABIArgInfo::getDirect();
+}
+
+void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ ABIArgInfo &RetInfo = FI.getReturnInfo();
+ if (!getCXXABI().classifyReturnType(FI))
+ RetInfo = classifyReturnType(FI.getReturnType());
+
+ // Check if a pointer to an aggregate is passed as a hidden argument.
+ uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0;
+
+ for (auto &I : FI.arguments())
+ I.info = classifyArgumentType(I.type, Offset);
+}
+
+Address MipsABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType OrigTy) const {
+ QualType Ty = OrigTy;
+
+ // Integer arguments are promoted to 32-bit on O32 and 64-bit on N32/N64.
+ // Pointers are also promoted in the same way but this only matters for N32.
+ unsigned SlotSizeInBits = IsO32 ? 32 : 64;
+ unsigned PtrWidth = getTarget().getPointerWidth(LangAS::Default);
+ bool DidPromote = false;
+ if ((Ty->isIntegerType() &&
+ getContext().getIntWidth(Ty) < SlotSizeInBits) ||
+ (Ty->isPointerType() && PtrWidth < SlotSizeInBits)) {
+ DidPromote = true;
+ Ty = getContext().getIntTypeForBitwidth(SlotSizeInBits,
+ Ty->isSignedIntegerType());
+ }
+
+ auto TyInfo = getContext().getTypeInfoInChars(Ty);
+
+ // The alignment of things in the argument area is never larger than
+ // StackAlignInBytes.
+ TyInfo.Align =
+ std::min(TyInfo.Align, CharUnits::fromQuantity(StackAlignInBytes));
+
+ // MinABIStackAlignInBytes is the size of argument slots on the stack.
+ CharUnits ArgSlotSize = CharUnits::fromQuantity(MinABIStackAlignInBytes);
+
+ Address Addr = emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
+ TyInfo, ArgSlotSize, /*AllowHigherAlign*/ true);
+
+
+ // If there was a promotion, "unpromote" into a temporary.
+ // TODO: can we just use a pointer into a subset of the original slot?
+ if (DidPromote) {
+ Address Temp = CGF.CreateMemTemp(OrigTy, "vaarg.promotion-temp");
+ llvm::Value *Promoted = CGF.Builder.CreateLoad(Addr);
+
+ // Truncate down to the right width.
+ llvm::Type *IntTy = (OrigTy->isIntegerType() ? Temp.getElementType()
+ : CGF.IntPtrTy);
+ llvm::Value *V = CGF.Builder.CreateTrunc(Promoted, IntTy);
+ if (OrigTy->isPointerType())
+ V = CGF.Builder.CreateIntToPtr(V, Temp.getElementType());
+
+ CGF.Builder.CreateStore(V, Temp);
+ Addr = Temp;
+ }
+
+ return Addr;
+}
+
+ABIArgInfo MipsABIInfo::extendType(QualType Ty) const {
+ int TySize = getContext().getTypeSize(Ty);
+
+ // MIPS64 ABI requires unsigned 32 bit integers to be sign extended.
+ if (Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32)
+ return ABIArgInfo::getSignExtend(Ty);
+
+ return ABIArgInfo::getExtend(Ty);
+}
+
+bool
+MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ // This information comes from gcc's implementation, which seems to
+ // as canonical as it gets.
+
+ // Everything on MIPS is 4 bytes. Double-precision FP registers
+ // are aliased to pairs of single-precision FP registers.
+ llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
+
+ // 0-31 are the general purpose registers, $0 - $31.
+ // 32-63 are the floating-point registers, $f0 - $f31.
+ // 64 and 65 are the multiply/divide registers, $hi and $lo.
+ // 66 is the (notional, I think) register for signal-handler return.
+ AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65);
+
+ // 67-74 are the floating-point status registers, $fcc0 - $fcc7.
+ // They are one bit wide and ignored here.
+
+ // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31.
+ // (coprocessor 1 is the FP unit)
+ // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31.
+ // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31.
+ // 176-181 are the DSP accumulator registers.
+ AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181);
+ return false;
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createMIPSTargetCodeGenInfo(CodeGenModule &CGM, bool IsOS32) {
+ return std::make_unique<MIPSTargetCodeGenInfo>(CGM.getTypes(), IsOS32);
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/NVPTX.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/NVPTX.cpp
new file mode 100644
index 000000000000..d0dc7c258a03
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/NVPTX.cpp
@@ -0,0 +1,342 @@
+//===- NVPTX.cpp ----------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfoImpl.h"
+#include "TargetInfo.h"
+#include "llvm/IR/IntrinsicsNVPTX.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+//===----------------------------------------------------------------------===//
+// NVPTX ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class NVPTXTargetCodeGenInfo;
+
+class NVPTXABIInfo : public ABIInfo {
+ NVPTXTargetCodeGenInfo &CGInfo;
+
+public:
+ NVPTXABIInfo(CodeGenTypes &CGT, NVPTXTargetCodeGenInfo &Info)
+ : ABIInfo(CGT), CGInfo(Info) {}
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType Ty) const;
+
+ void computeInfo(CGFunctionInfo &FI) const override;
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+ bool isUnsupportedType(QualType T) const;
+ ABIArgInfo coerceToIntArrayWithLimit(QualType Ty, unsigned MaxSize) const;
+};
+
+class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ NVPTXTargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(std::make_unique<NVPTXABIInfo>(CGT, *this)) {}
+
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const override;
+ bool shouldEmitStaticExternCAliases() const override;
+
+ llvm::Type *getCUDADeviceBuiltinSurfaceDeviceType() const override {
+ // On the device side, surface reference is represented as an object handle
+ // in 64-bit integer.
+ return llvm::Type::getInt64Ty(getABIInfo().getVMContext());
+ }
+
+ llvm::Type *getCUDADeviceBuiltinTextureDeviceType() const override {
+ // On the device side, texture reference is represented as an object handle
+ // in 64-bit integer.
+ return llvm::Type::getInt64Ty(getABIInfo().getVMContext());
+ }
+
+ bool emitCUDADeviceBuiltinSurfaceDeviceCopy(CodeGenFunction &CGF, LValue Dst,
+ LValue Src) const override {
+ emitBuiltinSurfTexDeviceCopy(CGF, Dst, Src);
+ return true;
+ }
+
+ bool emitCUDADeviceBuiltinTextureDeviceCopy(CodeGenFunction &CGF, LValue Dst,
+ LValue Src) const override {
+ emitBuiltinSurfTexDeviceCopy(CGF, Dst, Src);
+ return true;
+ }
+
+ // Adds a NamedMDNode with GV, Name, and Operand as operands, and adds the
+ // resulting MDNode to the nvvm.annotations MDNode.
+ static void addNVVMMetadata(llvm::GlobalValue *GV, StringRef Name,
+ int Operand);
+
+private:
+ static void emitBuiltinSurfTexDeviceCopy(CodeGenFunction &CGF, LValue Dst,
+ LValue Src) {
+ llvm::Value *Handle = nullptr;
+ llvm::Constant *C =
+ llvm::dyn_cast<llvm::Constant>(Src.getAddress(CGF).getPointer());
+ // Lookup `addrspacecast` through the constant pointer if any.
+ if (auto *ASC = llvm::dyn_cast_or_null<llvm::AddrSpaceCastOperator>(C))
+ C = llvm::cast<llvm::Constant>(ASC->getPointerOperand());
+ if (auto *GV = llvm::dyn_cast_or_null<llvm::GlobalVariable>(C)) {
+ // Load the handle from the specific global variable using
+ // `nvvm.texsurf.handle.internal` intrinsic.
+ Handle = CGF.EmitRuntimeCall(
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::nvvm_texsurf_handle_internal,
+ {GV->getType()}),
+ {GV}, "texsurf_handle");
+ } else
+ Handle = CGF.EmitLoadOfScalar(Src, SourceLocation());
+ CGF.EmitStoreOfScalar(Handle, Dst);
+ }
+};
+
+/// Checks if the type is unsupported directly by the current target.
+bool NVPTXABIInfo::isUnsupportedType(QualType T) const {
+ ASTContext &Context = getContext();
+ if (!Context.getTargetInfo().hasFloat16Type() && T->isFloat16Type())
+ return true;
+ if (!Context.getTargetInfo().hasFloat128Type() &&
+ (T->isFloat128Type() ||
+ (T->isRealFloatingType() && Context.getTypeSize(T) == 128)))
+ return true;
+ if (const auto *EIT = T->getAs<BitIntType>())
+ return EIT->getNumBits() >
+ (Context.getTargetInfo().hasInt128Type() ? 128U : 64U);
+ if (!Context.getTargetInfo().hasInt128Type() && T->isIntegerType() &&
+ Context.getTypeSize(T) > 64U)
+ return true;
+ if (const auto *AT = T->getAsArrayTypeUnsafe())
+ return isUnsupportedType(AT->getElementType());
+ const auto *RT = T->getAs<RecordType>();
+ if (!RT)
+ return false;
+ const RecordDecl *RD = RT->getDecl();
+
+ // If this is a C++ record, check the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
+ for (const CXXBaseSpecifier &I : CXXRD->bases())
+ if (isUnsupportedType(I.getType()))
+ return true;
+
+ for (const FieldDecl *I : RD->fields())
+ if (isUnsupportedType(I->getType()))
+ return true;
+ return false;
+}
+
+/// Coerce the given type into an array with maximum allowed size of elements.
+ABIArgInfo NVPTXABIInfo::coerceToIntArrayWithLimit(QualType Ty,
+ unsigned MaxSize) const {
+ // Alignment and Size are measured in bits.
+ const uint64_t Size = getContext().getTypeSize(Ty);
+ const uint64_t Alignment = getContext().getTypeAlign(Ty);
+ const unsigned Div = std::min<unsigned>(MaxSize, Alignment);
+ llvm::Type *IntType = llvm::Type::getIntNTy(getVMContext(), Div);
+ const uint64_t NumElements = (Size + Div - 1) / Div;
+ return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType, NumElements));
+}
+
+ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ if (getContext().getLangOpts().OpenMP &&
+ getContext().getLangOpts().OpenMPIsTargetDevice &&
+ isUnsupportedType(RetTy))
+ return coerceToIntArrayWithLimit(RetTy, 64);
+
+ // note: this is different from default ABI
+ if (!RetTy->isScalarType())
+ return ABIArgInfo::getDirect();
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
+ : ABIArgInfo::getDirect());
+}
+
+ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ // Return aggregates type as indirect by value
+ if (isAggregateTypeForABI(Ty)) {
+ // Under CUDA device compilation, tex/surf builtin types are replaced with
+ // object types and passed directly.
+ if (getContext().getLangOpts().CUDAIsDevice) {
+ if (Ty->isCUDADeviceBuiltinSurfaceType())
+ return ABIArgInfo::getDirect(
+ CGInfo.getCUDADeviceBuiltinSurfaceDeviceType());
+ if (Ty->isCUDADeviceBuiltinTextureType())
+ return ABIArgInfo::getDirect(
+ CGInfo.getCUDADeviceBuiltinTextureDeviceType());
+ }
+ return getNaturalAlignIndirect(Ty, /* byval */ true);
+ }
+
+ if (const auto *EIT = Ty->getAs<BitIntType>()) {
+ if ((EIT->getNumBits() > 128) ||
+ (!getContext().getTargetInfo().hasInt128Type() &&
+ EIT->getNumBits() > 64))
+ return getNaturalAlignIndirect(Ty, /* byval */ true);
+ }
+
+ return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
+ : ABIArgInfo::getDirect());
+}
+
+void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (auto &I : FI.arguments())
+ I.info = classifyArgumentType(I.type);
+
+ // Always honor user-specified calling convention.
+ if (FI.getCallingConvention() != llvm::CallingConv::C)
+ return;
+
+ FI.setEffectiveCallingConvention(getRuntimeCC());
+}
+
+Address NVPTXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ llvm_unreachable("NVPTX does not support varargs");
+}
+
+void NVPTXTargetCodeGenInfo::setTargetAttributes(
+ const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
+ if (GV->isDeclaration())
+ return;
+ const VarDecl *VD = dyn_cast_or_null<VarDecl>(D);
+ if (VD) {
+ if (M.getLangOpts().CUDA) {
+ if (VD->getType()->isCUDADeviceBuiltinSurfaceType())
+ addNVVMMetadata(GV, "surface", 1);
+ else if (VD->getType()->isCUDADeviceBuiltinTextureType())
+ addNVVMMetadata(GV, "texture", 1);
+ return;
+ }
+ }
+
+ const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
+ if (!FD) return;
+
+ llvm::Function *F = cast<llvm::Function>(GV);
+
+ // Perform special handling in OpenCL mode
+ if (M.getLangOpts().OpenCL) {
+ // Use OpenCL function attributes to check for kernel functions
+ // By default, all functions are device functions
+ if (FD->hasAttr<OpenCLKernelAttr>()) {
+ // OpenCL __kernel functions get kernel metadata
+ // Create !{<func-ref>, metadata !"kernel", i32 1} node
+ addNVVMMetadata(F, "kernel", 1);
+ // And kernel functions are not subject to inlining
+ F->addFnAttr(llvm::Attribute::NoInline);
+ }
+ }
+
+ // Perform special handling in CUDA mode.
+ if (M.getLangOpts().CUDA) {
+ // CUDA __global__ functions get a kernel metadata entry. Since
+ // __global__ functions cannot be called from the device, we do not
+ // need to set the noinline attribute.
+ if (FD->hasAttr<CUDAGlobalAttr>()) {
+ // Create !{<func-ref>, metadata !"kernel", i32 1} node
+ addNVVMMetadata(F, "kernel", 1);
+ }
+ if (CUDALaunchBoundsAttr *Attr = FD->getAttr<CUDALaunchBoundsAttr>())
+ M.handleCUDALaunchBoundsAttr(F, Attr);
+ }
+
+ // Attach kernel metadata directly if compiling for NVPTX.
+ if (FD->hasAttr<NVPTXKernelAttr>()) {
+ addNVVMMetadata(F, "kernel", 1);
+ }
+}
+
+void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::GlobalValue *GV,
+ StringRef Name, int Operand) {
+ llvm::Module *M = GV->getParent();
+ llvm::LLVMContext &Ctx = M->getContext();
+
+ // Get "nvvm.annotations" metadata node
+ llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations");
+
+ llvm::Metadata *MDVals[] = {
+ llvm::ConstantAsMetadata::get(GV), llvm::MDString::get(Ctx, Name),
+ llvm::ConstantAsMetadata::get(
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))};
+ // Append metadata to nvvm.annotations
+ MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
+}
+
+bool NVPTXTargetCodeGenInfo::shouldEmitStaticExternCAliases() const {
+ return false;
+}
+}
+
+void CodeGenModule::handleCUDALaunchBoundsAttr(llvm::Function *F,
+ const CUDALaunchBoundsAttr *Attr,
+ int32_t *MaxThreadsVal,
+ int32_t *MinBlocksVal,
+ int32_t *MaxClusterRankVal) {
+ // Create !{<func-ref>, metadata !"maxntidx", i32 <val>} node
+ llvm::APSInt MaxThreads(32);
+ MaxThreads = Attr->getMaxThreads()->EvaluateKnownConstInt(getContext());
+ if (MaxThreads > 0) {
+ if (MaxThreadsVal)
+ *MaxThreadsVal = MaxThreads.getExtValue();
+ if (F) {
+ // Create !{<func-ref>, metadata !"maxntidx", i32 <val>} node
+ NVPTXTargetCodeGenInfo::addNVVMMetadata(F, "maxntidx",
+ MaxThreads.getExtValue());
+ }
+ }
+
+ // min and max blocks is an optional argument for CUDALaunchBoundsAttr. If it
+ // was not specified in __launch_bounds__ or if the user specified a 0 value,
+ // we don't have to add a PTX directive.
+ if (Attr->getMinBlocks()) {
+ llvm::APSInt MinBlocks(32);
+ MinBlocks = Attr->getMinBlocks()->EvaluateKnownConstInt(getContext());
+ if (MinBlocks > 0) {
+ if (MinBlocksVal)
+ *MinBlocksVal = MinBlocks.getExtValue();
+ if (F) {
+ // Create !{<func-ref>, metadata !"minctasm", i32 <val>} node
+ NVPTXTargetCodeGenInfo::addNVVMMetadata(F, "minctasm",
+ MinBlocks.getExtValue());
+ }
+ }
+ }
+ if (Attr->getMaxBlocks()) {
+ llvm::APSInt MaxBlocks(32);
+ MaxBlocks = Attr->getMaxBlocks()->EvaluateKnownConstInt(getContext());
+ if (MaxBlocks > 0) {
+ if (MaxClusterRankVal)
+ *MaxClusterRankVal = MaxBlocks.getExtValue();
+ if (F) {
+ // Create !{<func-ref>, metadata !"maxclusterrank", i32 <val>} node
+ NVPTXTargetCodeGenInfo::addNVVMMetadata(F, "maxclusterrank",
+ MaxBlocks.getExtValue());
+ }
+ }
+ }
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createNVPTXTargetCodeGenInfo(CodeGenModule &CGM) {
+ return std::make_unique<NVPTXTargetCodeGenInfo>(CGM.getTypes());
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/PNaCl.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/PNaCl.cpp
new file mode 100644
index 000000000000..771aa7469da2
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/PNaCl.cpp
@@ -0,0 +1,109 @@
+//===- PNaCl.cpp ----------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfoImpl.h"
+#include "TargetInfo.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+//===----------------------------------------------------------------------===//
+// le32/PNaCl bitcode ABI Implementation
+//
+// This is a simplified version of the x86_32 ABI. Arguments and return values
+// are always passed on the stack.
+//===----------------------------------------------------------------------===//
+
+class PNaClABIInfo : public ABIInfo {
+ public:
+ PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy) const;
+
+ void computeInfo(CGFunctionInfo &FI) const override;
+ Address EmitVAArg(CodeGenFunction &CGF,
+ Address VAListAddr, QualType Ty) const override;
+};
+
+class PNaClTargetCodeGenInfo : public TargetCodeGenInfo {
+ public:
+ PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
+ : TargetCodeGenInfo(std::make_unique<PNaClABIInfo>(CGT)) {}
+};
+
+void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+
+ for (auto &I : FI.arguments())
+ I.info = classifyArgumentType(I.type);
+}
+
+Address PNaClABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ // The PNaCL ABI is a bit odd, in that varargs don't use normal
+ // function classification. Structs get passed directly for varargs
+ // functions, through a rewriting transform in
+ // pnacl-llvm/lib/Transforms/NaCl/ExpandVarArgs.cpp, which allows
+ // this target to actually support a va_arg instructions with an
+ // aggregate type, unlike other targets.
+ return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect());
+}
+
+/// Classify argument of given type \p Ty.
+ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const {
+ if (isAggregateTypeForABI(Ty)) {
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
+ return getNaturalAlignIndirect(Ty);
+ } else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
+ // Treat an enum type as its underlying type.
+ Ty = EnumTy->getDecl()->getIntegerType();
+ } else if (Ty->isFloatingType()) {
+ // Floating-point types don't go inreg.
+ return ABIArgInfo::getDirect();
+ } else if (const auto *EIT = Ty->getAs<BitIntType>()) {
+ // Treat bit-precise integers as integers if <= 64, otherwise pass
+ // indirectly.
+ if (EIT->getNumBits() > 64)
+ return getNaturalAlignIndirect(Ty);
+ return ABIArgInfo::getDirect();
+ }
+
+ return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
+ : ABIArgInfo::getDirect());
+}
+
+ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ // In the PNaCl ABI we always return records/structures on the stack.
+ if (isAggregateTypeForABI(RetTy))
+ return getNaturalAlignIndirect(RetTy);
+
+ // Treat bit-precise integers as integers if <= 64, otherwise pass indirectly.
+ if (const auto *EIT = RetTy->getAs<BitIntType>()) {
+ if (EIT->getNumBits() > 64)
+ return getNaturalAlignIndirect(RetTy);
+ return ABIArgInfo::getDirect();
+ }
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
+ : ABIArgInfo::getDirect());
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createPNaClTargetCodeGenInfo(CodeGenModule &CGM) {
+ return std::make_unique<PNaClTargetCodeGenInfo>(CGM.getTypes());
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/PPC.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/PPC.cpp
new file mode 100644
index 000000000000..40dddde508c1
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/PPC.cpp
@@ -0,0 +1,993 @@
+//===- PPC.cpp ------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfoImpl.h"
+#include "TargetInfo.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+static Address complexTempStructure(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty, CharUnits SlotSize,
+ CharUnits EltSize, const ComplexType *CTy) {
+ Address Addr =
+ emitVoidPtrDirectVAArg(CGF, VAListAddr, CGF.Int8Ty, SlotSize * 2,
+ SlotSize, SlotSize, /*AllowHigher*/ true);
+
+ Address RealAddr = Addr;
+ Address ImagAddr = RealAddr;
+ if (CGF.CGM.getDataLayout().isBigEndian()) {
+ RealAddr =
+ CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, SlotSize - EltSize);
+ ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(ImagAddr,
+ 2 * SlotSize - EltSize);
+ } else {
+ ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, SlotSize);
+ }
+
+ llvm::Type *EltTy = CGF.ConvertTypeForMem(CTy->getElementType());
+ RealAddr = RealAddr.withElementType(EltTy);
+ ImagAddr = ImagAddr.withElementType(EltTy);
+ llvm::Value *Real = CGF.Builder.CreateLoad(RealAddr, ".vareal");
+ llvm::Value *Imag = CGF.Builder.CreateLoad(ImagAddr, ".vaimag");
+
+ Address Temp = CGF.CreateMemTemp(Ty, "vacplx");
+ CGF.EmitStoreOfComplex({Real, Imag}, CGF.MakeAddrLValue(Temp, Ty),
+ /*init*/ true);
+ return Temp;
+}
+
+static bool PPC_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address, bool Is64Bit,
+ bool IsAIX) {
+ // This is calculated from the LLVM and GCC tables and verified
+ // against gcc output. AFAIK all PPC ABIs use the same encoding.
+
+ CodeGen::CGBuilderTy &Builder = CGF.Builder;
+
+ llvm::IntegerType *i8 = CGF.Int8Ty;
+ llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
+ llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
+ llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
+
+ // 0-31: r0-31, the 4-byte or 8-byte general-purpose registers
+ AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 0, 31);
+
+ // 32-63: fp0-31, the 8-byte floating-point registers
+ AssignToArrayRange(Builder, Address, Eight8, 32, 63);
+
+ // 64-67 are various 4-byte or 8-byte special-purpose registers:
+ // 64: mq
+ // 65: lr
+ // 66: ctr
+ // 67: ap
+ AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 64, 67);
+
+ // 68-76 are various 4-byte special-purpose registers:
+ // 68-75 cr0-7
+ // 76: xer
+ AssignToArrayRange(Builder, Address, Four8, 68, 76);
+
+ // 77-108: v0-31, the 16-byte vector registers
+ AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
+
+ // 109: vrsave
+ // 110: vscr
+ AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 109, 110);
+
+ // AIX does not utilize the rest of the registers.
+ if (IsAIX)
+ return false;
+
+ // 111: spe_acc
+ // 112: spefscr
+ // 113: sfp
+ AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 111, 113);
+
+ if (!Is64Bit)
+ return false;
+
+ // TODO: Need to verify if these registers are used on 64 bit AIX with Power8
+ // or above CPU.
+ // 64-bit only registers:
+ // 114: tfhar
+ // 115: tfiar
+ // 116: texasr
+ AssignToArrayRange(Builder, Address, Eight8, 114, 116);
+
+ return false;
+}
+
+// AIX
+namespace {
+/// AIXABIInfo - The AIX XCOFF ABI information.
+class AIXABIInfo : public ABIInfo {
+ const bool Is64Bit;
+ const unsigned PtrByteSize;
+ CharUnits getParamTypeAlignment(QualType Ty) const;
+
+public:
+ AIXABIInfo(CodeGen::CodeGenTypes &CGT, bool Is64Bit)
+ : ABIInfo(CGT), Is64Bit(Is64Bit), PtrByteSize(Is64Bit ? 8 : 4) {}
+
+ bool isPromotableTypeForABI(QualType Ty) const;
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType Ty) const;
+
+ void computeInfo(CGFunctionInfo &FI) const override {
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+
+ for (auto &I : FI.arguments())
+ I.info = classifyArgumentType(I.type);
+ }
+
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+};
+
+class AIXTargetCodeGenInfo : public TargetCodeGenInfo {
+ const bool Is64Bit;
+
+public:
+ AIXTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool Is64Bit)
+ : TargetCodeGenInfo(std::make_unique<AIXABIInfo>(CGT, Is64Bit)),
+ Is64Bit(Is64Bit) {}
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
+ return 1; // r1 is the dedicated stack pointer
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const override;
+};
+} // namespace
+
+// Return true if the ABI requires Ty to be passed sign- or zero-
+// extended to 32/64 bits.
+bool AIXABIInfo::isPromotableTypeForABI(QualType Ty) const {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ // Promotable integer types are required to be promoted by the ABI.
+ if (getContext().isPromotableIntegerType(Ty))
+ return true;
+
+ if (!Is64Bit)
+ return false;
+
+ // For 64 bit mode, in addition to the usual promotable integer types, we also
+ // need to extend all 32-bit types, since the ABI requires promotion to 64
+ // bits.
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
+ switch (BT->getKind()) {
+ case BuiltinType::Int:
+ case BuiltinType::UInt:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+ABIArgInfo AIXABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isAnyComplexType())
+ return ABIArgInfo::getDirect();
+
+ if (RetTy->isVectorType())
+ return ABIArgInfo::getDirect();
+
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ if (isAggregateTypeForABI(RetTy))
+ return getNaturalAlignIndirect(RetTy);
+
+ return (isPromotableTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
+ : ABIArgInfo::getDirect());
+}
+
+ABIArgInfo AIXABIInfo::classifyArgumentType(QualType Ty) const {
+ Ty = useFirstFieldIfTransparentUnion(Ty);
+
+ if (Ty->isAnyComplexType())
+ return ABIArgInfo::getDirect();
+
+ if (Ty->isVectorType())
+ return ABIArgInfo::getDirect();
+
+ if (isAggregateTypeForABI(Ty)) {
+ // Records with non-trivial destructors/copy-constructors should not be
+ // passed by value.
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
+
+ CharUnits CCAlign = getParamTypeAlignment(Ty);
+ CharUnits TyAlign = getContext().getTypeAlignInChars(Ty);
+
+ return ABIArgInfo::getIndirect(CCAlign, /*ByVal*/ true,
+ /*Realign*/ TyAlign > CCAlign);
+ }
+
+ return (isPromotableTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
+ : ABIArgInfo::getDirect());
+}
+
+CharUnits AIXABIInfo::getParamTypeAlignment(QualType Ty) const {
+ // Complex types are passed just like their elements.
+ if (const ComplexType *CTy = Ty->getAs<ComplexType>())
+ Ty = CTy->getElementType();
+
+ if (Ty->isVectorType())
+ return CharUnits::fromQuantity(16);
+
+ // If the structure contains a vector type, the alignment is 16.
+ if (isRecordWithSIMDVectorType(getContext(), Ty))
+ return CharUnits::fromQuantity(16);
+
+ return CharUnits::fromQuantity(PtrByteSize);
+}
+
+Address AIXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+
+ auto TypeInfo = getContext().getTypeInfoInChars(Ty);
+ TypeInfo.Align = getParamTypeAlignment(Ty);
+
+ CharUnits SlotSize = CharUnits::fromQuantity(PtrByteSize);
+
+ // If we have a complex type and the base type is smaller than the register
+ // size, the ABI calls for the real and imaginary parts to be right-adjusted
+ // in separate words in 32bit mode or doublewords in 64bit mode. However,
+ // Clang expects us to produce a pointer to a structure with the two parts
+ // packed tightly. So generate loads of the real and imaginary parts relative
+ // to the va_list pointer, and store them to a temporary structure. We do the
+ // same as the PPC64ABI here.
+ if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
+ CharUnits EltSize = TypeInfo.Width / 2;
+ if (EltSize < SlotSize)
+ return complexTempStructure(CGF, VAListAddr, Ty, SlotSize, EltSize, CTy);
+ }
+
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false, TypeInfo,
+ SlotSize, /*AllowHigher*/ true);
+}
+
+bool AIXTargetCodeGenInfo::initDwarfEHRegSizeTable(
+ CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const {
+ return PPC_initDwarfEHRegSizeTable(CGF, Address, Is64Bit, /*IsAIX*/ true);
+}
+
+// PowerPC-32
+namespace {
+/// PPC32_SVR4_ABIInfo - The 32-bit PowerPC ELF (SVR4) ABI information.
+class PPC32_SVR4_ABIInfo : public DefaultABIInfo {
+ bool IsSoftFloatABI;
+ bool IsRetSmallStructInRegABI;
+
+ CharUnits getParamTypeAlignment(QualType Ty) const;
+
+public:
+ PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, bool SoftFloatABI,
+ bool RetSmallStructInRegABI)
+ : DefaultABIInfo(CGT), IsSoftFloatABI(SoftFloatABI),
+ IsRetSmallStructInRegABI(RetSmallStructInRegABI) {}
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+
+ void computeInfo(CGFunctionInfo &FI) const override {
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (auto &I : FI.arguments())
+ I.info = classifyArgumentType(I.type);
+ }
+
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+};
+
+class PPC32TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ PPC32TargetCodeGenInfo(CodeGenTypes &CGT, bool SoftFloatABI,
+ bool RetSmallStructInRegABI)
+ : TargetCodeGenInfo(std::make_unique<PPC32_SVR4_ABIInfo>(
+ CGT, SoftFloatABI, RetSmallStructInRegABI)) {}
+
+ static bool isStructReturnInRegABI(const llvm::Triple &Triple,
+ const CodeGenOptions &Opts);
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
+ // This is recovered from gcc output.
+ return 1; // r1 is the dedicated stack pointer
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const override;
+};
+}
+
+CharUnits PPC32_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
+ // Complex types are passed just like their elements.
+ if (const ComplexType *CTy = Ty->getAs<ComplexType>())
+ Ty = CTy->getElementType();
+
+ if (Ty->isVectorType())
+ return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16
+ : 4);
+
+ // For single-element float/vector structs, we consider the whole type
+ // to have the same alignment requirements as its single element.
+ const Type *AlignTy = nullptr;
+ if (const Type *EltType = isSingleElementStruct(Ty, getContext())) {
+ const BuiltinType *BT = EltType->getAs<BuiltinType>();
+ if ((EltType->isVectorType() && getContext().getTypeSize(EltType) == 128) ||
+ (BT && BT->isFloatingPoint()))
+ AlignTy = EltType;
+ }
+
+ if (AlignTy)
+ return CharUnits::fromQuantity(AlignTy->isVectorType() ? 16 : 4);
+ return CharUnits::fromQuantity(4);
+}
+
+ABIArgInfo PPC32_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
+ uint64_t Size;
+
+ // -msvr4-struct-return puts small aggregates in GPR3 and GPR4.
+ if (isAggregateTypeForABI(RetTy) && IsRetSmallStructInRegABI &&
+ (Size = getContext().getTypeSize(RetTy)) <= 64) {
+ // System V ABI (1995), page 3-22, specified:
+ // > A structure or union whose size is less than or equal to 8 bytes
+ // > shall be returned in r3 and r4, as if it were first stored in the
+ // > 8-byte aligned memory area and then the low addressed word were
+ // > loaded into r3 and the high-addressed word into r4. Bits beyond
+ // > the last member of the structure or union are not defined.
+ //
+ // GCC for big-endian PPC32 inserts the pad before the first member,
+ // not "beyond the last member" of the struct. To stay compatible
+ // with GCC, we coerce the struct to an integer of the same size.
+ // LLVM will extend it and return i32 in r3, or i64 in r3:r4.
+ if (Size == 0)
+ return ABIArgInfo::getIgnore();
+ else {
+ llvm::Type *CoerceTy = llvm::Type::getIntNTy(getVMContext(), Size);
+ return ABIArgInfo::getDirect(CoerceTy);
+ }
+ }
+
+ return DefaultABIInfo::classifyReturnType(RetTy);
+}
+
+// TODO: this implementation is now likely redundant with
+// DefaultABIInfo::EmitVAArg.
+Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
+ QualType Ty) const {
+ if (getTarget().getTriple().isOSDarwin()) {
+ auto TI = getContext().getTypeInfoInChars(Ty);
+ TI.Align = getParamTypeAlignment(Ty);
+
+ CharUnits SlotSize = CharUnits::fromQuantity(4);
+ return emitVoidPtrVAArg(CGF, VAList, Ty,
+ classifyArgumentType(Ty).isIndirect(), TI, SlotSize,
+ /*AllowHigherAlign=*/true);
+ }
+
+ const unsigned OverflowLimit = 8;
+ if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
+ // TODO: Implement this. For now ignore.
+ (void)CTy;
+ return Address::invalid(); // FIXME?
+ }
+
+ // struct __va_list_tag {
+ // unsigned char gpr;
+ // unsigned char fpr;
+ // unsigned short reserved;
+ // void *overflow_arg_area;
+ // void *reg_save_area;
+ // };
+
+ bool isI64 = Ty->isIntegerType() && getContext().getTypeSize(Ty) == 64;
+ bool isInt = !Ty->isFloatingType();
+ bool isF64 = Ty->isFloatingType() && getContext().getTypeSize(Ty) == 64;
+
+ // All aggregates are passed indirectly? That doesn't seem consistent
+ // with the argument-lowering code.
+ bool isIndirect = isAggregateTypeForABI(Ty);
+
+ CGBuilderTy &Builder = CGF.Builder;
+
+ // The calling convention either uses 1-2 GPRs or 1 FPR.
+ Address NumRegsAddr = Address::invalid();
+ if (isInt || IsSoftFloatABI) {
+ NumRegsAddr = Builder.CreateStructGEP(VAList, 0, "gpr");
+ } else {
+ NumRegsAddr = Builder.CreateStructGEP(VAList, 1, "fpr");
+ }
+
+ llvm::Value *NumRegs = Builder.CreateLoad(NumRegsAddr, "numUsedRegs");
+
+ // "Align" the register count when TY is i64.
+ if (isI64 || (isF64 && IsSoftFloatABI)) {
+ NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(1));
+ NumRegs = Builder.CreateAnd(NumRegs, Builder.getInt8((uint8_t) ~1U));
+ }
+
+ llvm::Value *CC =
+ Builder.CreateICmpULT(NumRegs, Builder.getInt8(OverflowLimit), "cond");
+
+ llvm::BasicBlock *UsingRegs = CGF.createBasicBlock("using_regs");
+ llvm::BasicBlock *UsingOverflow = CGF.createBasicBlock("using_overflow");
+ llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
+
+ Builder.CreateCondBr(CC, UsingRegs, UsingOverflow);
+
+ llvm::Type *DirectTy = CGF.ConvertType(Ty), *ElementTy = DirectTy;
+ if (isIndirect)
+ DirectTy = CGF.UnqualPtrTy;
+
+ // Case 1: consume registers.
+ Address RegAddr = Address::invalid();
+ {
+ CGF.EmitBlock(UsingRegs);
+
+ Address RegSaveAreaPtr = Builder.CreateStructGEP(VAList, 4);
+ RegAddr = Address(Builder.CreateLoad(RegSaveAreaPtr), CGF.Int8Ty,
+ CharUnits::fromQuantity(8));
+ assert(RegAddr.getElementType() == CGF.Int8Ty);
+
+ // Floating-point registers start after the general-purpose registers.
+ if (!(isInt || IsSoftFloatABI)) {
+ RegAddr = Builder.CreateConstInBoundsByteGEP(RegAddr,
+ CharUnits::fromQuantity(32));
+ }
+
+ // Get the address of the saved value by scaling the number of
+ // registers we've used by the number of
+ CharUnits RegSize = CharUnits::fromQuantity((isInt || IsSoftFloatABI) ? 4 : 8);
+ llvm::Value *RegOffset =
+ Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.getQuantity()));
+ RegAddr = Address(
+ Builder.CreateInBoundsGEP(CGF.Int8Ty, RegAddr.getPointer(), RegOffset),
+ DirectTy, RegAddr.getAlignment().alignmentOfArrayElement(RegSize));
+
+ // Increase the used-register count.
+ NumRegs =
+ Builder.CreateAdd(NumRegs,
+ Builder.getInt8((isI64 || (isF64 && IsSoftFloatABI)) ? 2 : 1));
+ Builder.CreateStore(NumRegs, NumRegsAddr);
+
+ CGF.EmitBranch(Cont);
+ }
+
+ // Case 2: consume space in the overflow area.
+ Address MemAddr = Address::invalid();
+ {
+ CGF.EmitBlock(UsingOverflow);
+
+ Builder.CreateStore(Builder.getInt8(OverflowLimit), NumRegsAddr);
+
+ // Everything in the overflow area is rounded up to a size of at least 4.
+ CharUnits OverflowAreaAlign = CharUnits::fromQuantity(4);
+
+ CharUnits Size;
+ if (!isIndirect) {
+ auto TypeInfo = CGF.getContext().getTypeInfoInChars(Ty);
+ Size = TypeInfo.Width.alignTo(OverflowAreaAlign);
+ } else {
+ Size = CGF.getPointerSize();
+ }
+
+ Address OverflowAreaAddr = Builder.CreateStructGEP(VAList, 3);
+ Address OverflowArea =
+ Address(Builder.CreateLoad(OverflowAreaAddr, "argp.cur"), CGF.Int8Ty,
+ OverflowAreaAlign);
+ // Round up address of argument to alignment
+ CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
+ if (Align > OverflowAreaAlign) {
+ llvm::Value *Ptr = OverflowArea.getPointer();
+ OverflowArea = Address(emitRoundPointerUpToAlignment(CGF, Ptr, Align),
+ OverflowArea.getElementType(), Align);
+ }
+
+ MemAddr = OverflowArea.withElementType(DirectTy);
+
+ // Increase the overflow area.
+ OverflowArea = Builder.CreateConstInBoundsByteGEP(OverflowArea, Size);
+ Builder.CreateStore(OverflowArea.getPointer(), OverflowAreaAddr);
+ CGF.EmitBranch(Cont);
+ }
+
+ CGF.EmitBlock(Cont);
+
+ // Merge the cases with a phi.
+ Address Result = emitMergePHI(CGF, RegAddr, UsingRegs, MemAddr, UsingOverflow,
+ "vaarg.addr");
+
+ // Load the pointer if the argument was passed indirectly.
+ if (isIndirect) {
+ Result = Address(Builder.CreateLoad(Result, "aggr"), ElementTy,
+ getContext().getTypeAlignInChars(Ty));
+ }
+
+ return Result;
+}
+
+bool PPC32TargetCodeGenInfo::isStructReturnInRegABI(
+ const llvm::Triple &Triple, const CodeGenOptions &Opts) {
+ assert(Triple.isPPC32());
+
+ switch (Opts.getStructReturnConvention()) {
+ case CodeGenOptions::SRCK_Default:
+ break;
+ case CodeGenOptions::SRCK_OnStack: // -maix-struct-return
+ return false;
+ case CodeGenOptions::SRCK_InRegs: // -msvr4-struct-return
+ return true;
+ }
+
+ if (Triple.isOSBinFormatELF() && !Triple.isOSLinux())
+ return true;
+
+ return false;
+}
+
+bool
+PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ false,
+ /*IsAIX*/ false);
+}
+
+// PowerPC-64
+
+namespace {
+
+/// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information.
+class PPC64_SVR4_ABIInfo : public ABIInfo {
+ static const unsigned GPRBits = 64;
+ PPC64_SVR4_ABIKind Kind;
+ bool IsSoftFloatABI;
+
+public:
+ PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, PPC64_SVR4_ABIKind Kind,
+ bool SoftFloatABI)
+ : ABIInfo(CGT), Kind(Kind), IsSoftFloatABI(SoftFloatABI) {}
+
+ bool isPromotableTypeForABI(QualType Ty) const;
+ CharUnits getParamTypeAlignment(QualType Ty) const;
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType Ty) const;
+
+ bool isHomogeneousAggregateBaseType(QualType Ty) const override;
+ bool isHomogeneousAggregateSmallEnough(const Type *Ty,
+ uint64_t Members) const override;
+
+ // TODO: We can add more logic to computeInfo to improve performance.
+ // Example: For aggregate arguments that fit in a register, we could
+ // use getDirectInReg (as is done below for structs containing a single
+ // floating-point value) to avoid pushing them to memory on function
+ // entry. This would require changing the logic in PPCISelLowering
+ // when lowering the parameters in the caller and args in the callee.
+ void computeInfo(CGFunctionInfo &FI) const override {
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (auto &I : FI.arguments()) {
+ // We rely on the default argument classification for the most part.
+ // One exception: An aggregate containing a single floating-point
+ // or vector item must be passed in a register if one is available.
+ const Type *T = isSingleElementStruct(I.type, getContext());
+ if (T) {
+ const BuiltinType *BT = T->getAs<BuiltinType>();
+ if ((T->isVectorType() && getContext().getTypeSize(T) == 128) ||
+ (BT && BT->isFloatingPoint())) {
+ QualType QT(T, 0);
+ I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT));
+ continue;
+ }
+ }
+ I.info = classifyArgumentType(I.type);
+ }
+ }
+
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+};
+
+class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo {
+
+public:
+ PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT, PPC64_SVR4_ABIKind Kind,
+ bool SoftFloatABI)
+ : TargetCodeGenInfo(
+ std::make_unique<PPC64_SVR4_ABIInfo>(CGT, Kind, SoftFloatABI)) {
+ SwiftInfo =
+ std::make_unique<SwiftABIInfo>(CGT, /*SwiftErrorInRegister=*/false);
+ }
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
+ // This is recovered from gcc output.
+ return 1; // r1 is the dedicated stack pointer
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const override;
+ void emitTargetMetadata(CodeGen::CodeGenModule &CGM,
+ const llvm::MapVector<GlobalDecl, StringRef>
+ &MangledDeclNames) const override;
+};
+
+class PPC64TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ PPC64TargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(std::make_unique<DefaultABIInfo>(CGT)) {}
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
+ // This is recovered from gcc output.
+ return 1; // r1 is the dedicated stack pointer
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const override;
+};
+}
+
+// Return true if the ABI requires Ty to be passed sign- or zero-
+// extended to 64 bits.
+bool
+PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ // Promotable integer types are required to be promoted by the ABI.
+ if (isPromotableIntegerTypeForABI(Ty))
+ return true;
+
+ // In addition to the usual promotable integer types, we also need to
+ // extend all 32-bit types, since the ABI requires promotion to 64 bits.
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
+ switch (BT->getKind()) {
+ case BuiltinType::Int:
+ case BuiltinType::UInt:
+ return true;
+ default:
+ break;
+ }
+
+ if (const auto *EIT = Ty->getAs<BitIntType>())
+ if (EIT->getNumBits() < 64)
+ return true;
+
+ return false;
+}
+
+/// isAlignedParamType - Determine whether a type requires 16-byte or
+/// higher alignment in the parameter area. Always returns at least 8.
+CharUnits PPC64_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
+ // Complex types are passed just like their elements.
+ if (const ComplexType *CTy = Ty->getAs<ComplexType>())
+ Ty = CTy->getElementType();
+
+ auto FloatUsesVector = [this](QualType Ty){
+ return Ty->isRealFloatingType() && &getContext().getFloatTypeSemantics(
+ Ty) == &llvm::APFloat::IEEEquad();
+ };
+
+ // Only vector types of size 16 bytes need alignment (larger types are
+ // passed via reference, smaller types are not aligned).
+ if (Ty->isVectorType()) {
+ return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16 : 8);
+ } else if (FloatUsesVector(Ty)) {
+ // According to ABI document section 'Optional Save Areas': If extended
+ // precision floating-point values in IEEE BINARY 128 QUADRUPLE PRECISION
+ // format are supported, map them to a single quadword, quadword aligned.
+ return CharUnits::fromQuantity(16);
+ }
+
+ // For single-element float/vector structs, we consider the whole type
+ // to have the same alignment requirements as its single element.
+ const Type *AlignAsType = nullptr;
+ const Type *EltType = isSingleElementStruct(Ty, getContext());
+ if (EltType) {
+ const BuiltinType *BT = EltType->getAs<BuiltinType>();
+ if ((EltType->isVectorType() && getContext().getTypeSize(EltType) == 128) ||
+ (BT && BT->isFloatingPoint()))
+ AlignAsType = EltType;
+ }
+
+ // Likewise for ELFv2 homogeneous aggregates.
+ const Type *Base = nullptr;
+ uint64_t Members = 0;
+ if (!AlignAsType && Kind == PPC64_SVR4_ABIKind::ELFv2 &&
+ isAggregateTypeForABI(Ty) && isHomogeneousAggregate(Ty, Base, Members))
+ AlignAsType = Base;
+
+ // With special case aggregates, only vector base types need alignment.
+ if (AlignAsType) {
+ bool UsesVector = AlignAsType->isVectorType() ||
+ FloatUsesVector(QualType(AlignAsType, 0));
+ return CharUnits::fromQuantity(UsesVector ? 16 : 8);
+ }
+
+ // Otherwise, we only need alignment for any aggregate type that
+ // has an alignment requirement of >= 16 bytes.
+ if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128) {
+ return CharUnits::fromQuantity(16);
+ }
+
+ return CharUnits::fromQuantity(8);
+}
+
+bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
+ // Homogeneous aggregates for ELFv2 must have base types of float,
+ // double, long double, or 128-bit vectors.
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
+ if (BT->getKind() == BuiltinType::Float ||
+ BT->getKind() == BuiltinType::Double ||
+ BT->getKind() == BuiltinType::LongDouble ||
+ BT->getKind() == BuiltinType::Ibm128 ||
+ (getContext().getTargetInfo().hasFloat128Type() &&
+ (BT->getKind() == BuiltinType::Float128))) {
+ if (IsSoftFloatABI)
+ return false;
+ return true;
+ }
+ }
+ if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ if (getContext().getTypeSize(VT) == 128)
+ return true;
+ }
+ return false;
+}
+
+bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough(
+ const Type *Base, uint64_t Members) const {
+ // Vector and fp128 types require one register, other floating point types
+ // require one or two registers depending on their size.
+ uint32_t NumRegs =
+ ((getContext().getTargetInfo().hasFloat128Type() &&
+ Base->isFloat128Type()) ||
+ Base->isVectorType()) ? 1
+ : (getContext().getTypeSize(Base) + 63) / 64;
+
+ // Homogeneous Aggregates may occupy at most 8 registers.
+ return Members * NumRegs <= 8;
+}
+
+ABIArgInfo
+PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
+ Ty = useFirstFieldIfTransparentUnion(Ty);
+
+ if (Ty->isAnyComplexType())
+ return ABIArgInfo::getDirect();
+
+ // Non-Altivec vector types are passed in GPRs (smaller than 16 bytes)
+ // or via reference (larger than 16 bytes).
+ if (Ty->isVectorType()) {
+ uint64_t Size = getContext().getTypeSize(Ty);
+ if (Size > 128)
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
+ else if (Size < 128) {
+ llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
+ return ABIArgInfo::getDirect(CoerceTy);
+ }
+ }
+
+ if (const auto *EIT = Ty->getAs<BitIntType>())
+ if (EIT->getNumBits() > 128)
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
+
+ if (isAggregateTypeForABI(Ty)) {
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
+
+ uint64_t ABIAlign = getParamTypeAlignment(Ty).getQuantity();
+ uint64_t TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity();
+
+ // ELFv2 homogeneous aggregates are passed as array types.
+ const Type *Base = nullptr;
+ uint64_t Members = 0;
+ if (Kind == PPC64_SVR4_ABIKind::ELFv2 &&
+ isHomogeneousAggregate(Ty, Base, Members)) {
+ llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
+ llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
+ return ABIArgInfo::getDirect(CoerceTy);
+ }
+
+ // If an aggregate may end up fully in registers, we do not
+ // use the ByVal method, but pass the aggregate as array.
+ // This is usually beneficial since we avoid forcing the
+ // back-end to store the argument to memory.
+ uint64_t Bits = getContext().getTypeSize(Ty);
+ if (Bits > 0 && Bits <= 8 * GPRBits) {
+ llvm::Type *CoerceTy;
+
+ // Types up to 8 bytes are passed as integer type (which will be
+ // properly aligned in the argument save area doubleword).
+ if (Bits <= GPRBits)
+ CoerceTy =
+ llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
+ // Larger types are passed as arrays, with the base type selected
+ // according to the required alignment in the save area.
+ else {
+ uint64_t RegBits = ABIAlign * 8;
+ uint64_t NumRegs = llvm::alignTo(Bits, RegBits) / RegBits;
+ llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits);
+ CoerceTy = llvm::ArrayType::get(RegTy, NumRegs);
+ }
+
+ return ABIArgInfo::getDirect(CoerceTy);
+ }
+
+ // All other aggregates are passed ByVal.
+ return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign),
+ /*ByVal=*/true,
+ /*Realign=*/TyAlign > ABIAlign);
+ }
+
+ return (isPromotableTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
+ : ABIArgInfo::getDirect());
+}
+
+ABIArgInfo
+PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ if (RetTy->isAnyComplexType())
+ return ABIArgInfo::getDirect();
+
+ // Non-Altivec vector types are returned in GPRs (smaller than 16 bytes)
+ // or via reference (larger than 16 bytes).
+ if (RetTy->isVectorType()) {
+ uint64_t Size = getContext().getTypeSize(RetTy);
+ if (Size > 128)
+ return getNaturalAlignIndirect(RetTy);
+ else if (Size < 128) {
+ llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
+ return ABIArgInfo::getDirect(CoerceTy);
+ }
+ }
+
+ if (const auto *EIT = RetTy->getAs<BitIntType>())
+ if (EIT->getNumBits() > 128)
+ return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
+
+ if (isAggregateTypeForABI(RetTy)) {
+ // ELFv2 homogeneous aggregates are returned as array types.
+ const Type *Base = nullptr;
+ uint64_t Members = 0;
+ if (Kind == PPC64_SVR4_ABIKind::ELFv2 &&
+ isHomogeneousAggregate(RetTy, Base, Members)) {
+ llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
+ llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
+ return ABIArgInfo::getDirect(CoerceTy);
+ }
+
+ // ELFv2 small aggregates are returned in up to two registers.
+ uint64_t Bits = getContext().getTypeSize(RetTy);
+ if (Kind == PPC64_SVR4_ABIKind::ELFv2 && Bits <= 2 * GPRBits) {
+ if (Bits == 0)
+ return ABIArgInfo::getIgnore();
+
+ llvm::Type *CoerceTy;
+ if (Bits > GPRBits) {
+ CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits);
+ CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy);
+ } else
+ CoerceTy =
+ llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
+ return ABIArgInfo::getDirect(CoerceTy);
+ }
+
+ // All other aggregates are returned indirectly.
+ return getNaturalAlignIndirect(RetTy);
+ }
+
+ return (isPromotableTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
+ : ABIArgInfo::getDirect());
+}
+
+// Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine.
+Address PPC64_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ auto TypeInfo = getContext().getTypeInfoInChars(Ty);
+ TypeInfo.Align = getParamTypeAlignment(Ty);
+
+ CharUnits SlotSize = CharUnits::fromQuantity(8);
+
+ // If we have a complex type and the base type is smaller than 8 bytes,
+ // the ABI calls for the real and imaginary parts to be right-adjusted
+ // in separate doublewords. However, Clang expects us to produce a
+ // pointer to a structure with the two parts packed tightly. So generate
+ // loads of the real and imaginary parts relative to the va_list pointer,
+ // and store them to a temporary structure.
+ if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
+ CharUnits EltSize = TypeInfo.Width / 2;
+ if (EltSize < SlotSize)
+ return complexTempStructure(CGF, VAListAddr, Ty, SlotSize, EltSize, CTy);
+ }
+
+ // Otherwise, just use the general rule.
+ //
+ // The PPC64 ABI passes some arguments in integer registers, even to variadic
+ // functions. To allow va_list to use the simple "void*" representation,
+ // variadic calls allocate space in the argument area for the integer argument
+ // registers, and variadic functions spill their integer argument registers to
+ // this area in their prologues. When aggregates smaller than a register are
+ // passed this way, they are passed in the least significant bits of the
+ // register, which means that after spilling on big-endian targets they will
+ // be right-aligned in their argument slot. This is uncommon; for a variety of
+ // reasons, other big-endian targets don't end up right-aligning aggregate
+ // types this way, and so right-alignment only applies to fundamental types.
+ // So on PPC64, we must force the use of right-alignment even for aggregates.
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false, TypeInfo,
+ SlotSize, /*AllowHigher*/ true,
+ /*ForceRightAdjust*/ true);
+}
+
+bool
+PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
+ CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ true,
+ /*IsAIX*/ false);
+}
+
+void PPC64_SVR4_TargetCodeGenInfo::emitTargetMetadata(
+ CodeGen::CodeGenModule &CGM,
+ const llvm::MapVector<GlobalDecl, StringRef> &MangledDeclNames) const {
+ if (CGM.getTypes().isLongDoubleReferenced()) {
+ llvm::LLVMContext &Ctx = CGM.getLLVMContext();
+ const auto *flt = &CGM.getTarget().getLongDoubleFormat();
+ if (flt == &llvm::APFloat::PPCDoubleDouble())
+ CGM.getModule().addModuleFlag(llvm::Module::Error, "float-abi",
+ llvm::MDString::get(Ctx, "doubledouble"));
+ else if (flt == &llvm::APFloat::IEEEquad())
+ CGM.getModule().addModuleFlag(llvm::Module::Error, "float-abi",
+ llvm::MDString::get(Ctx, "ieeequad"));
+ else if (flt == &llvm::APFloat::IEEEdouble())
+ CGM.getModule().addModuleFlag(llvm::Module::Error, "float-abi",
+ llvm::MDString::get(Ctx, "ieeedouble"));
+ }
+}
+
+bool
+PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ true,
+ /*IsAIX*/ false);
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createAIXTargetCodeGenInfo(CodeGenModule &CGM, bool Is64Bit) {
+ return std::make_unique<AIXTargetCodeGenInfo>(CGM.getTypes(), Is64Bit);
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createPPC32TargetCodeGenInfo(CodeGenModule &CGM, bool SoftFloatABI) {
+ bool RetSmallStructInRegABI = PPC32TargetCodeGenInfo::isStructReturnInRegABI(
+ CGM.getTriple(), CGM.getCodeGenOpts());
+ return std::make_unique<PPC32TargetCodeGenInfo>(CGM.getTypes(), SoftFloatABI,
+ RetSmallStructInRegABI);
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createPPC64TargetCodeGenInfo(CodeGenModule &CGM) {
+ return std::make_unique<PPC64TargetCodeGenInfo>(CGM.getTypes());
+}
+
+std::unique_ptr<TargetCodeGenInfo> CodeGen::createPPC64_SVR4_TargetCodeGenInfo(
+ CodeGenModule &CGM, PPC64_SVR4_ABIKind Kind, bool SoftFloatABI) {
+ return std::make_unique<PPC64_SVR4_TargetCodeGenInfo>(CGM.getTypes(), Kind,
+ SoftFloatABI);
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/RISCV.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/RISCV.cpp
new file mode 100644
index 000000000000..02c86ad2e58c
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/RISCV.cpp
@@ -0,0 +1,555 @@
+//===- RISCV.cpp ----------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfoImpl.h"
+#include "TargetInfo.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+//===----------------------------------------------------------------------===//
+// RISC-V ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+class RISCVABIInfo : public DefaultABIInfo {
+private:
+ // Size of the integer ('x') registers in bits.
+ unsigned XLen;
+ // Size of the floating point ('f') registers in bits. Note that the target
+ // ISA might have a wider FLen than the selected ABI (e.g. an RV32IF target
+ // with soft float ABI has FLen==0).
+ unsigned FLen;
+ const int NumArgGPRs;
+ const int NumArgFPRs;
+ const bool EABI;
+ bool detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff,
+ llvm::Type *&Field1Ty,
+ CharUnits &Field1Off,
+ llvm::Type *&Field2Ty,
+ CharUnits &Field2Off) const;
+
+public:
+ RISCVABIInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen, unsigned FLen,
+ bool EABI)
+ : DefaultABIInfo(CGT), XLen(XLen), FLen(FLen), NumArgGPRs(EABI ? 6 : 8),
+ NumArgFPRs(FLen != 0 ? 8 : 0), EABI(EABI) {}
+
+ // DefaultABIInfo's classifyReturnType and classifyArgumentType are
+ // non-virtual, but computeInfo is virtual, so we overload it.
+ void computeInfo(CGFunctionInfo &FI) const override;
+
+ ABIArgInfo classifyArgumentType(QualType Ty, bool IsFixed, int &ArgGPRsLeft,
+ int &ArgFPRsLeft) const;
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+
+ ABIArgInfo extendType(QualType Ty) const;
+
+ bool detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty,
+ CharUnits &Field1Off, llvm::Type *&Field2Ty,
+ CharUnits &Field2Off, int &NeededArgGPRs,
+ int &NeededArgFPRs) const;
+ ABIArgInfo coerceAndExpandFPCCEligibleStruct(llvm::Type *Field1Ty,
+ CharUnits Field1Off,
+ llvm::Type *Field2Ty,
+ CharUnits Field2Off) const;
+
+ ABIArgInfo coerceVLSVector(QualType Ty) const;
+};
+} // end anonymous namespace
+
+void RISCVABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ QualType RetTy = FI.getReturnType();
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(RetTy);
+
+ // IsRetIndirect is true if classifyArgumentType indicated the value should
+ // be passed indirect, or if the type size is a scalar greater than 2*XLen
+ // and not a complex type with elements <= FLen. e.g. fp128 is passed direct
+ // in LLVM IR, relying on the backend lowering code to rewrite the argument
+ // list and pass indirectly on RV32.
+ bool IsRetIndirect = FI.getReturnInfo().getKind() == ABIArgInfo::Indirect;
+ if (!IsRetIndirect && RetTy->isScalarType() &&
+ getContext().getTypeSize(RetTy) > (2 * XLen)) {
+ if (RetTy->isComplexType() && FLen) {
+ QualType EltTy = RetTy->castAs<ComplexType>()->getElementType();
+ IsRetIndirect = getContext().getTypeSize(EltTy) > FLen;
+ } else {
+ // This is a normal scalar > 2*XLen, such as fp128 on RV32.
+ IsRetIndirect = true;
+ }
+ }
+
+ int ArgGPRsLeft = IsRetIndirect ? NumArgGPRs - 1 : NumArgGPRs;
+ int ArgFPRsLeft = NumArgFPRs;
+ int NumFixedArgs = FI.getNumRequiredArgs();
+
+ int ArgNum = 0;
+ for (auto &ArgInfo : FI.arguments()) {
+ bool IsFixed = ArgNum < NumFixedArgs;
+ ArgInfo.info =
+ classifyArgumentType(ArgInfo.type, IsFixed, ArgGPRsLeft, ArgFPRsLeft);
+ ArgNum++;
+ }
+}
+
+// Returns true if the struct is a potential candidate for the floating point
+// calling convention. If this function returns true, the caller is
+// responsible for checking that if there is only a single field then that
+// field is a float.
+bool RISCVABIInfo::detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff,
+ llvm::Type *&Field1Ty,
+ CharUnits &Field1Off,
+ llvm::Type *&Field2Ty,
+ CharUnits &Field2Off) const {
+ bool IsInt = Ty->isIntegralOrEnumerationType();
+ bool IsFloat = Ty->isRealFloatingType();
+
+ if (IsInt || IsFloat) {
+ uint64_t Size = getContext().getTypeSize(Ty);
+ if (IsInt && Size > XLen)
+ return false;
+ // Can't be eligible if larger than the FP registers. Handling of half
+ // precision values has been specified in the ABI, so don't block those.
+ if (IsFloat && Size > FLen)
+ return false;
+ // Can't be eligible if an integer type was already found (int+int pairs
+ // are not eligible).
+ if (IsInt && Field1Ty && Field1Ty->isIntegerTy())
+ return false;
+ if (!Field1Ty) {
+ Field1Ty = CGT.ConvertType(Ty);
+ Field1Off = CurOff;
+ return true;
+ }
+ if (!Field2Ty) {
+ Field2Ty = CGT.ConvertType(Ty);
+ Field2Off = CurOff;
+ return true;
+ }
+ return false;
+ }
+
+ if (auto CTy = Ty->getAs<ComplexType>()) {
+ if (Field1Ty)
+ return false;
+ QualType EltTy = CTy->getElementType();
+ if (getContext().getTypeSize(EltTy) > FLen)
+ return false;
+ Field1Ty = CGT.ConvertType(EltTy);
+ Field1Off = CurOff;
+ Field2Ty = Field1Ty;
+ Field2Off = Field1Off + getContext().getTypeSizeInChars(EltTy);
+ return true;
+ }
+
+ if (const ConstantArrayType *ATy = getContext().getAsConstantArrayType(Ty)) {
+ uint64_t ArraySize = ATy->getSize().getZExtValue();
+ QualType EltTy = ATy->getElementType();
+ // Non-zero-length arrays of empty records make the struct ineligible for
+ // the FP calling convention in C++.
+ if (const auto *RTy = EltTy->getAs<RecordType>()) {
+ if (ArraySize != 0 && isa<CXXRecordDecl>(RTy->getDecl()) &&
+ isEmptyRecord(getContext(), EltTy, true, true))
+ return false;
+ }
+ CharUnits EltSize = getContext().getTypeSizeInChars(EltTy);
+ for (uint64_t i = 0; i < ArraySize; ++i) {
+ bool Ret = detectFPCCEligibleStructHelper(EltTy, CurOff, Field1Ty,
+ Field1Off, Field2Ty, Field2Off);
+ if (!Ret)
+ return false;
+ CurOff += EltSize;
+ }
+ return true;
+ }
+
+ if (const auto *RTy = Ty->getAs<RecordType>()) {
+ // Structures with either a non-trivial destructor or a non-trivial
+ // copy constructor are not eligible for the FP calling convention.
+ if (getRecordArgABI(Ty, CGT.getCXXABI()))
+ return false;
+ if (isEmptyRecord(getContext(), Ty, true, true))
+ return true;
+ const RecordDecl *RD = RTy->getDecl();
+ // Unions aren't eligible unless they're empty (which is caught above).
+ if (RD->isUnion())
+ return false;
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
+ // If this is a C++ record, check the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ for (const CXXBaseSpecifier &B : CXXRD->bases()) {
+ const auto *BDecl =
+ cast<CXXRecordDecl>(B.getType()->castAs<RecordType>()->getDecl());
+ CharUnits BaseOff = Layout.getBaseClassOffset(BDecl);
+ bool Ret = detectFPCCEligibleStructHelper(B.getType(), CurOff + BaseOff,
+ Field1Ty, Field1Off, Field2Ty,
+ Field2Off);
+ if (!Ret)
+ return false;
+ }
+ }
+ int ZeroWidthBitFieldCount = 0;
+ for (const FieldDecl *FD : RD->fields()) {
+ uint64_t FieldOffInBits = Layout.getFieldOffset(FD->getFieldIndex());
+ QualType QTy = FD->getType();
+ if (FD->isBitField()) {
+ unsigned BitWidth = FD->getBitWidthValue(getContext());
+ // Allow a bitfield with a type greater than XLen as long as the
+ // bitwidth is XLen or less.
+ if (getContext().getTypeSize(QTy) > XLen && BitWidth <= XLen)
+ QTy = getContext().getIntTypeForBitwidth(XLen, false);
+ if (BitWidth == 0) {
+ ZeroWidthBitFieldCount++;
+ continue;
+ }
+ }
+
+ bool Ret = detectFPCCEligibleStructHelper(
+ QTy, CurOff + getContext().toCharUnitsFromBits(FieldOffInBits),
+ Field1Ty, Field1Off, Field2Ty, Field2Off);
+ if (!Ret)
+ return false;
+
+ // As a quirk of the ABI, zero-width bitfields aren't ignored for fp+fp
+ // or int+fp structs, but are ignored for a struct with an fp field and
+ // any number of zero-width bitfields.
+ if (Field2Ty && ZeroWidthBitFieldCount > 0)
+ return false;
+ }
+ return Field1Ty != nullptr;
+ }
+
+ return false;
+}
+
+// Determine if a struct is eligible for passing according to the floating
+// point calling convention (i.e., when flattened it contains a single fp
+// value, fp+fp, or int+fp of appropriate size). If so, NeededArgFPRs and
+// NeededArgGPRs are incremented appropriately.
+bool RISCVABIInfo::detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty,
+ CharUnits &Field1Off,
+ llvm::Type *&Field2Ty,
+ CharUnits &Field2Off,
+ int &NeededArgGPRs,
+ int &NeededArgFPRs) const {
+ Field1Ty = nullptr;
+ Field2Ty = nullptr;
+ NeededArgGPRs = 0;
+ NeededArgFPRs = 0;
+ bool IsCandidate = detectFPCCEligibleStructHelper(
+ Ty, CharUnits::Zero(), Field1Ty, Field1Off, Field2Ty, Field2Off);
+ if (!Field1Ty)
+ return false;
+ // Not really a candidate if we have a single int but no float.
+ if (Field1Ty && !Field2Ty && !Field1Ty->isFloatingPointTy())
+ return false;
+ if (!IsCandidate)
+ return false;
+ if (Field1Ty && Field1Ty->isFloatingPointTy())
+ NeededArgFPRs++;
+ else if (Field1Ty)
+ NeededArgGPRs++;
+ if (Field2Ty && Field2Ty->isFloatingPointTy())
+ NeededArgFPRs++;
+ else if (Field2Ty)
+ NeededArgGPRs++;
+ return true;
+}
+
+// Call getCoerceAndExpand for the two-element flattened struct described by
+// Field1Ty, Field1Off, Field2Ty, Field2Off. This method will create an
+// appropriate coerceToType and unpaddedCoerceToType.
+ABIArgInfo RISCVABIInfo::coerceAndExpandFPCCEligibleStruct(
+ llvm::Type *Field1Ty, CharUnits Field1Off, llvm::Type *Field2Ty,
+ CharUnits Field2Off) const {
+ SmallVector<llvm::Type *, 3> CoerceElts;
+ SmallVector<llvm::Type *, 2> UnpaddedCoerceElts;
+ if (!Field1Off.isZero())
+ CoerceElts.push_back(llvm::ArrayType::get(
+ llvm::Type::getInt8Ty(getVMContext()), Field1Off.getQuantity()));
+
+ CoerceElts.push_back(Field1Ty);
+ UnpaddedCoerceElts.push_back(Field1Ty);
+
+ if (!Field2Ty) {
+ return ABIArgInfo::getCoerceAndExpand(
+ llvm::StructType::get(getVMContext(), CoerceElts, !Field1Off.isZero()),
+ UnpaddedCoerceElts[0]);
+ }
+
+ CharUnits Field2Align =
+ CharUnits::fromQuantity(getDataLayout().getABITypeAlign(Field2Ty));
+ CharUnits Field1End = Field1Off +
+ CharUnits::fromQuantity(getDataLayout().getTypeStoreSize(Field1Ty));
+ CharUnits Field2OffNoPadNoPack = Field1End.alignTo(Field2Align);
+
+ CharUnits Padding = CharUnits::Zero();
+ if (Field2Off > Field2OffNoPadNoPack)
+ Padding = Field2Off - Field2OffNoPadNoPack;
+ else if (Field2Off != Field2Align && Field2Off > Field1End)
+ Padding = Field2Off - Field1End;
+
+ bool IsPacked = !Field2Off.isMultipleOf(Field2Align);
+
+ if (!Padding.isZero())
+ CoerceElts.push_back(llvm::ArrayType::get(
+ llvm::Type::getInt8Ty(getVMContext()), Padding.getQuantity()));
+
+ CoerceElts.push_back(Field2Ty);
+ UnpaddedCoerceElts.push_back(Field2Ty);
+
+ auto CoerceToType =
+ llvm::StructType::get(getVMContext(), CoerceElts, IsPacked);
+ auto UnpaddedCoerceToType =
+ llvm::StructType::get(getVMContext(), UnpaddedCoerceElts, IsPacked);
+
+ return ABIArgInfo::getCoerceAndExpand(CoerceToType, UnpaddedCoerceToType);
+}
+
+// Fixed-length RVV vectors are represented as scalable vectors in function
+// args/return and must be coerced from fixed vectors.
+ABIArgInfo RISCVABIInfo::coerceVLSVector(QualType Ty) const {
+ assert(Ty->isVectorType() && "expected vector type!");
+
+ const auto *VT = Ty->castAs<VectorType>();
+ assert(VT->getElementType()->isBuiltinType() && "expected builtin type!");
+
+ auto VScale =
+ getContext().getTargetInfo().getVScaleRange(getContext().getLangOpts());
+
+ unsigned NumElts = VT->getNumElements();
+ llvm::Type *EltType;
+ if (VT->getVectorKind() == VectorKind::RVVFixedLengthMask) {
+ NumElts *= 8;
+ EltType = llvm::Type::getInt1Ty(getVMContext());
+ } else {
+ assert(VT->getVectorKind() == VectorKind::RVVFixedLengthData &&
+ "Unexpected vector kind");
+ EltType = CGT.ConvertType(VT->getElementType());
+ }
+
+ // The MinNumElts is simplified from equation:
+ // NumElts / VScale =
+ // (EltSize * NumElts / (VScale * RVVBitsPerBlock))
+ // * (RVVBitsPerBlock / EltSize)
+ llvm::ScalableVectorType *ResType =
+ llvm::ScalableVectorType::get(EltType, NumElts / VScale->first);
+ return ABIArgInfo::getDirect(ResType);
+}
+
+ABIArgInfo RISCVABIInfo::classifyArgumentType(QualType Ty, bool IsFixed,
+ int &ArgGPRsLeft,
+ int &ArgFPRsLeft) const {
+ assert(ArgGPRsLeft <= NumArgGPRs && "Arg GPR tracking underflow");
+ Ty = useFirstFieldIfTransparentUnion(Ty);
+
+ // Structures with either a non-trivial destructor or a non-trivial
+ // copy constructor are always passed indirectly.
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
+ if (ArgGPRsLeft)
+ ArgGPRsLeft -= 1;
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
+ CGCXXABI::RAA_DirectInMemory);
+ }
+
+ // Ignore empty structs/unions.
+ if (isEmptyRecord(getContext(), Ty, true))
+ return ABIArgInfo::getIgnore();
+
+ uint64_t Size = getContext().getTypeSize(Ty);
+
+ // Pass floating point values via FPRs if possible.
+ if (IsFixed && Ty->isFloatingType() && !Ty->isComplexType() &&
+ FLen >= Size && ArgFPRsLeft) {
+ ArgFPRsLeft--;
+ return ABIArgInfo::getDirect();
+ }
+
+ // Complex types for the hard float ABI must be passed direct rather than
+ // using CoerceAndExpand.
+ if (IsFixed && Ty->isComplexType() && FLen && ArgFPRsLeft >= 2) {
+ QualType EltTy = Ty->castAs<ComplexType>()->getElementType();
+ if (getContext().getTypeSize(EltTy) <= FLen) {
+ ArgFPRsLeft -= 2;
+ return ABIArgInfo::getDirect();
+ }
+ }
+
+ if (IsFixed && FLen && Ty->isStructureOrClassType()) {
+ llvm::Type *Field1Ty = nullptr;
+ llvm::Type *Field2Ty = nullptr;
+ CharUnits Field1Off = CharUnits::Zero();
+ CharUnits Field2Off = CharUnits::Zero();
+ int NeededArgGPRs = 0;
+ int NeededArgFPRs = 0;
+ bool IsCandidate =
+ detectFPCCEligibleStruct(Ty, Field1Ty, Field1Off, Field2Ty, Field2Off,
+ NeededArgGPRs, NeededArgFPRs);
+ if (IsCandidate && NeededArgGPRs <= ArgGPRsLeft &&
+ NeededArgFPRs <= ArgFPRsLeft) {
+ ArgGPRsLeft -= NeededArgGPRs;
+ ArgFPRsLeft -= NeededArgFPRs;
+ return coerceAndExpandFPCCEligibleStruct(Field1Ty, Field1Off, Field2Ty,
+ Field2Off);
+ }
+ }
+
+ uint64_t NeededAlign = getContext().getTypeAlign(Ty);
+ // Determine the number of GPRs needed to pass the current argument
+ // according to the ABI. 2*XLen-aligned varargs are passed in "aligned"
+ // register pairs, so may consume 3 registers.
+ // TODO: To be compatible with GCC's behaviors, we don't align registers
+ // currently if we are using ILP32E calling convention. This behavior may be
+ // changed when RV32E/ILP32E is ratified.
+ int NeededArgGPRs = 1;
+ if (!IsFixed && NeededAlign == 2 * XLen)
+ NeededArgGPRs = 2 + (EABI && XLen == 32 ? 0 : (ArgGPRsLeft % 2));
+ else if (Size > XLen && Size <= 2 * XLen)
+ NeededArgGPRs = 2;
+
+ if (NeededArgGPRs > ArgGPRsLeft) {
+ NeededArgGPRs = ArgGPRsLeft;
+ }
+
+ ArgGPRsLeft -= NeededArgGPRs;
+
+ if (!isAggregateTypeForABI(Ty) && !Ty->isVectorType()) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ // All integral types are promoted to XLen width
+ if (Size < XLen && Ty->isIntegralOrEnumerationType()) {
+ return extendType(Ty);
+ }
+
+ if (const auto *EIT = Ty->getAs<BitIntType>()) {
+ if (EIT->getNumBits() < XLen)
+ return extendType(Ty);
+ if (EIT->getNumBits() > 128 ||
+ (!getContext().getTargetInfo().hasInt128Type() &&
+ EIT->getNumBits() > 64))
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
+ }
+
+ return ABIArgInfo::getDirect();
+ }
+
+ if (const VectorType *VT = Ty->getAs<VectorType>())
+ if (VT->getVectorKind() == VectorKind::RVVFixedLengthData ||
+ VT->getVectorKind() == VectorKind::RVVFixedLengthMask)
+ return coerceVLSVector(Ty);
+
+ // Aggregates which are <= 2*XLen will be passed in registers if possible,
+ // so coerce to integers.
+ if (Size <= 2 * XLen) {
+ unsigned Alignment = getContext().getTypeAlign(Ty);
+
+ // Use a single XLen int if possible, 2*XLen if 2*XLen alignment is
+ // required, and a 2-element XLen array if only XLen alignment is required.
+ if (Size <= XLen) {
+ return ABIArgInfo::getDirect(
+ llvm::IntegerType::get(getVMContext(), XLen));
+ } else if (Alignment == 2 * XLen) {
+ return ABIArgInfo::getDirect(
+ llvm::IntegerType::get(getVMContext(), 2 * XLen));
+ } else {
+ return ABIArgInfo::getDirect(llvm::ArrayType::get(
+ llvm::IntegerType::get(getVMContext(), XLen), 2));
+ }
+ }
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
+}
+
+ABIArgInfo RISCVABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ int ArgGPRsLeft = 2;
+ int ArgFPRsLeft = FLen ? 2 : 0;
+
+ // The rules for return and argument types are the same, so defer to
+ // classifyArgumentType.
+ return classifyArgumentType(RetTy, /*IsFixed=*/true, ArgGPRsLeft,
+ ArgFPRsLeft);
+}
+
+Address RISCVABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ CharUnits SlotSize = CharUnits::fromQuantity(XLen / 8);
+
+ // Empty records are ignored for parameter passing purposes.
+ if (isEmptyRecord(getContext(), Ty, true)) {
+ return Address(CGF.Builder.CreateLoad(VAListAddr),
+ CGF.ConvertTypeForMem(Ty), SlotSize);
+ }
+
+ auto TInfo = getContext().getTypeInfoInChars(Ty);
+
+ // TODO: To be compatible with GCC's behaviors, we force arguments with
+ // 2×XLEN-bit alignment and size at most 2×XLEN bits like `long long`,
+ // `unsigned long long` and `double` to have 4-byte alignment. This
+ // behavior may be changed when RV32E/ILP32E is ratified.
+ if (EABI && XLen == 32)
+ TInfo.Align = std::min(TInfo.Align, CharUnits::fromQuantity(4));
+
+ // Arguments bigger than 2*Xlen bytes are passed indirectly.
+ bool IsIndirect = TInfo.Width > 2 * SlotSize;
+
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TInfo,
+ SlotSize, /*AllowHigherAlign=*/true);
+}
+
+ABIArgInfo RISCVABIInfo::extendType(QualType Ty) const {
+ int TySize = getContext().getTypeSize(Ty);
+ // RV64 ABI requires unsigned 32 bit integers to be sign extended.
+ if (XLen == 64 && Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32)
+ return ABIArgInfo::getSignExtend(Ty);
+ return ABIArgInfo::getExtend(Ty);
+}
+
+namespace {
+class RISCVTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ RISCVTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen,
+ unsigned FLen, bool EABI)
+ : TargetCodeGenInfo(
+ std::make_unique<RISCVABIInfo>(CGT, XLen, FLen, EABI)) {}
+
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const override {
+ const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
+ if (!FD) return;
+
+ const auto *Attr = FD->getAttr<RISCVInterruptAttr>();
+ if (!Attr)
+ return;
+
+ const char *Kind;
+ switch (Attr->getInterrupt()) {
+ case RISCVInterruptAttr::supervisor: Kind = "supervisor"; break;
+ case RISCVInterruptAttr::machine: Kind = "machine"; break;
+ }
+
+ auto *Fn = cast<llvm::Function>(GV);
+
+ Fn->addFnAttr("interrupt", Kind);
+ }
+};
+} // namespace
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createRISCVTargetCodeGenInfo(CodeGenModule &CGM, unsigned XLen,
+ unsigned FLen, bool EABI) {
+ return std::make_unique<RISCVTargetCodeGenInfo>(CGM.getTypes(), XLen, FLen,
+ EABI);
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/SPIR.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/SPIR.cpp
new file mode 100644
index 000000000000..cf068cbc4fcd
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/SPIR.cpp
@@ -0,0 +1,218 @@
+//===- SPIR.cpp -----------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfoImpl.h"
+#include "TargetInfo.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+//===----------------------------------------------------------------------===//
+// Base ABI and target codegen info implementation common between SPIR and
+// SPIR-V.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class CommonSPIRABIInfo : public DefaultABIInfo {
+public:
+ CommonSPIRABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) { setCCs(); }
+
+private:
+ void setCCs();
+};
+
+class SPIRVABIInfo : public CommonSPIRABIInfo {
+public:
+ SPIRVABIInfo(CodeGenTypes &CGT) : CommonSPIRABIInfo(CGT) {}
+ void computeInfo(CGFunctionInfo &FI) const override;
+
+private:
+ ABIArgInfo classifyKernelArgumentType(QualType Ty) const;
+};
+} // end anonymous namespace
+namespace {
+class CommonSPIRTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ CommonSPIRTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
+ : TargetCodeGenInfo(std::make_unique<CommonSPIRABIInfo>(CGT)) {}
+ CommonSPIRTargetCodeGenInfo(std::unique_ptr<ABIInfo> ABIInfo)
+ : TargetCodeGenInfo(std::move(ABIInfo)) {}
+
+ LangAS getASTAllocaAddressSpace() const override {
+ return getLangASFromTargetAS(
+ getABIInfo().getDataLayout().getAllocaAddrSpace());
+ }
+
+ unsigned getOpenCLKernelCallingConv() const override;
+ llvm::Type *getOpenCLType(CodeGenModule &CGM, const Type *T) const override;
+};
+class SPIRVTargetCodeGenInfo : public CommonSPIRTargetCodeGenInfo {
+public:
+ SPIRVTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
+ : CommonSPIRTargetCodeGenInfo(std::make_unique<SPIRVABIInfo>(CGT)) {}
+ void setCUDAKernelCallingConvention(const FunctionType *&FT) const override;
+};
+} // End anonymous namespace.
+
+void CommonSPIRABIInfo::setCCs() {
+ assert(getRuntimeCC() == llvm::CallingConv::C);
+ RuntimeCC = llvm::CallingConv::SPIR_FUNC;
+}
+
+ABIArgInfo SPIRVABIInfo::classifyKernelArgumentType(QualType Ty) const {
+ if (getContext().getLangOpts().CUDAIsDevice) {
+ // Coerce pointer arguments with default address space to CrossWorkGroup
+ // pointers for HIPSPV/CUDASPV. When the language mode is HIP/CUDA, the
+ // SPIRTargetInfo maps cuda_device to SPIR-V's CrossWorkGroup address space.
+ llvm::Type *LTy = CGT.ConvertType(Ty);
+ auto DefaultAS = getContext().getTargetAddressSpace(LangAS::Default);
+ auto GlobalAS = getContext().getTargetAddressSpace(LangAS::cuda_device);
+ auto *PtrTy = llvm::dyn_cast<llvm::PointerType>(LTy);
+ if (PtrTy && PtrTy->getAddressSpace() == DefaultAS) {
+ LTy = llvm::PointerType::get(PtrTy->getContext(), GlobalAS);
+ return ABIArgInfo::getDirect(LTy, 0, nullptr, false);
+ }
+
+ // Force copying aggregate type in kernel arguments by value when
+ // compiling CUDA targeting SPIR-V. This is required for the object
+ // copied to be valid on the device.
+ // This behavior follows the CUDA spec
+ // https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#global-function-argument-processing,
+ // and matches the NVPTX implementation.
+ if (isAggregateTypeForABI(Ty))
+ return getNaturalAlignIndirect(Ty, /* byval */ true);
+ }
+ return classifyArgumentType(Ty);
+}
+
+void SPIRVABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ // The logic is same as in DefaultABIInfo with an exception on the kernel
+ // arguments handling.
+ llvm::CallingConv::ID CC = FI.getCallingConvention();
+
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+
+ for (auto &I : FI.arguments()) {
+ if (CC == llvm::CallingConv::SPIR_KERNEL) {
+ I.info = classifyKernelArgumentType(I.type);
+ } else {
+ I.info = classifyArgumentType(I.type);
+ }
+ }
+}
+
+namespace clang {
+namespace CodeGen {
+void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI) {
+ if (CGM.getTarget().getTriple().isSPIRV())
+ SPIRVABIInfo(CGM.getTypes()).computeInfo(FI);
+ else
+ CommonSPIRABIInfo(CGM.getTypes()).computeInfo(FI);
+}
+}
+}
+
+unsigned CommonSPIRTargetCodeGenInfo::getOpenCLKernelCallingConv() const {
+ return llvm::CallingConv::SPIR_KERNEL;
+}
+
+void SPIRVTargetCodeGenInfo::setCUDAKernelCallingConvention(
+ const FunctionType *&FT) const {
+ // Convert HIP kernels to SPIR-V kernels.
+ if (getABIInfo().getContext().getLangOpts().HIP) {
+ FT = getABIInfo().getContext().adjustFunctionType(
+ FT, FT->getExtInfo().withCallingConv(CC_OpenCLKernel));
+ return;
+ }
+}
+
+/// Construct a SPIR-V target extension type for the given OpenCL image type.
+static llvm::Type *getSPIRVImageType(llvm::LLVMContext &Ctx, StringRef BaseType,
+ StringRef OpenCLName,
+ unsigned AccessQualifier) {
+ // These parameters compare to the operands of OpTypeImage (see
+ // https://registry.khronos.org/SPIR-V/specs/unified1/SPIRV.html#OpTypeImage
+ // for more details). The first 6 integer parameters all default to 0, and
+ // will be changed to 1 only for the image type(s) that set the parameter to
+ // one. The 7th integer parameter is the access qualifier, which is tacked on
+ // at the end.
+ SmallVector<unsigned, 7> IntParams = {0, 0, 0, 0, 0, 0};
+
+ // Choose the dimension of the image--this corresponds to the Dim enum in
+ // SPIR-V (first integer parameter of OpTypeImage).
+ if (OpenCLName.starts_with("image2d"))
+ IntParams[0] = 1; // 1D
+ else if (OpenCLName.starts_with("image3d"))
+ IntParams[0] = 2; // 2D
+ else if (OpenCLName == "image1d_buffer")
+ IntParams[0] = 5; // Buffer
+ else
+ assert(OpenCLName.starts_with("image1d") && "Unknown image type");
+
+ // Set the other integer parameters of OpTypeImage if necessary. Note that the
+ // OpenCL image types don't provide any information for the Sampled or
+ // Image Format parameters.
+ if (OpenCLName.contains("_depth"))
+ IntParams[1] = 1;
+ if (OpenCLName.contains("_array"))
+ IntParams[2] = 1;
+ if (OpenCLName.contains("_msaa"))
+ IntParams[3] = 1;
+
+ // Access qualifier
+ IntParams.push_back(AccessQualifier);
+
+ return llvm::TargetExtType::get(Ctx, BaseType, {llvm::Type::getVoidTy(Ctx)},
+ IntParams);
+}
+
+llvm::Type *CommonSPIRTargetCodeGenInfo::getOpenCLType(CodeGenModule &CGM,
+ const Type *Ty) const {
+ llvm::LLVMContext &Ctx = CGM.getLLVMContext();
+ if (auto *PipeTy = dyn_cast<PipeType>(Ty))
+ return llvm::TargetExtType::get(Ctx, "spirv.Pipe", {},
+ {!PipeTy->isReadOnly()});
+ if (auto *BuiltinTy = dyn_cast<BuiltinType>(Ty)) {
+ enum AccessQualifier : unsigned { AQ_ro = 0, AQ_wo = 1, AQ_rw = 2 };
+ switch (BuiltinTy->getKind()) {
+#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
+ case BuiltinType::Id: \
+ return getSPIRVImageType(Ctx, "spirv.Image", #ImgType, AQ_##Suffix);
+#include "clang/Basic/OpenCLImageTypes.def"
+ case BuiltinType::OCLSampler:
+ return llvm::TargetExtType::get(Ctx, "spirv.Sampler");
+ case BuiltinType::OCLEvent:
+ return llvm::TargetExtType::get(Ctx, "spirv.Event");
+ case BuiltinType::OCLClkEvent:
+ return llvm::TargetExtType::get(Ctx, "spirv.DeviceEvent");
+ case BuiltinType::OCLQueue:
+ return llvm::TargetExtType::get(Ctx, "spirv.Queue");
+ case BuiltinType::OCLReserveID:
+ return llvm::TargetExtType::get(Ctx, "spirv.ReserveId");
+#define INTEL_SUBGROUP_AVC_TYPE(Name, Id) \
+ case BuiltinType::OCLIntelSubgroupAVC##Id: \
+ return llvm::TargetExtType::get(Ctx, "spirv.Avc" #Id "INTEL");
+#include "clang/Basic/OpenCLExtensionTypes.def"
+ default:
+ return nullptr;
+ }
+ }
+
+ return nullptr;
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createCommonSPIRTargetCodeGenInfo(CodeGenModule &CGM) {
+ return std::make_unique<CommonSPIRTargetCodeGenInfo>(CGM.getTypes());
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createSPIRVTargetCodeGenInfo(CodeGenModule &CGM) {
+ return std::make_unique<SPIRVTargetCodeGenInfo>(CGM.getTypes());
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/Sparc.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/Sparc.cpp
new file mode 100644
index 000000000000..a337a52a94ec
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/Sparc.cpp
@@ -0,0 +1,409 @@
+//===- Sparc.cpp ----------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfoImpl.h"
+#include "TargetInfo.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+//===----------------------------------------------------------------------===//
+// SPARC v8 ABI Implementation.
+// Based on the SPARC Compliance Definition version 2.4.1.
+//
+// Ensures that complex values are passed in registers.
+//
+namespace {
+class SparcV8ABIInfo : public DefaultABIInfo {
+public:
+ SparcV8ABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
+
+private:
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ void computeInfo(CGFunctionInfo &FI) const override;
+};
+} // end anonymous namespace
+
+
+ABIArgInfo
+SparcV8ABIInfo::classifyReturnType(QualType Ty) const {
+ if (Ty->isAnyComplexType()) {
+ return ABIArgInfo::getDirect();
+ }
+ else {
+ return DefaultABIInfo::classifyReturnType(Ty);
+ }
+}
+
+void SparcV8ABIInfo::computeInfo(CGFunctionInfo &FI) const {
+
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (auto &Arg : FI.arguments())
+ Arg.info = classifyArgumentType(Arg.type);
+}
+
+namespace {
+class SparcV8TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ SparcV8TargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(std::make_unique<SparcV8ABIInfo>(CGT)) {}
+
+ llvm::Value *decodeReturnAddress(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const override {
+ int Offset;
+ if (isAggregateTypeForABI(CGF.CurFnInfo->getReturnType()))
+ Offset = 12;
+ else
+ Offset = 8;
+ return CGF.Builder.CreateGEP(CGF.Int8Ty, Address,
+ llvm::ConstantInt::get(CGF.Int32Ty, Offset));
+ }
+
+ llvm::Value *encodeReturnAddress(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const override {
+ int Offset;
+ if (isAggregateTypeForABI(CGF.CurFnInfo->getReturnType()))
+ Offset = -12;
+ else
+ Offset = -8;
+ return CGF.Builder.CreateGEP(CGF.Int8Ty, Address,
+ llvm::ConstantInt::get(CGF.Int32Ty, Offset));
+ }
+};
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// SPARC v9 ABI Implementation.
+// Based on the SPARC Compliance Definition version 2.4.1.
+//
+// Function arguments a mapped to a nominal "parameter array" and promoted to
+// registers depending on their type. Each argument occupies 8 or 16 bytes in
+// the array, structs larger than 16 bytes are passed indirectly.
+//
+// One case requires special care:
+//
+// struct mixed {
+// int i;
+// float f;
+// };
+//
+// When a struct mixed is passed by value, it only occupies 8 bytes in the
+// parameter array, but the int is passed in an integer register, and the float
+// is passed in a floating point register. This is represented as two arguments
+// with the LLVM IR inreg attribute:
+//
+// declare void f(i32 inreg %i, float inreg %f)
+//
+// The code generator will only allocate 4 bytes from the parameter array for
+// the inreg arguments. All other arguments are allocated a multiple of 8
+// bytes.
+//
+namespace {
+class SparcV9ABIInfo : public ABIInfo {
+public:
+ SparcV9ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
+
+private:
+ ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const;
+ void computeInfo(CGFunctionInfo &FI) const override;
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+
+ // Coercion type builder for structs passed in registers. The coercion type
+ // serves two purposes:
+ //
+ // 1. Pad structs to a multiple of 64 bits, so they are passed 'left-aligned'
+ // in registers.
+ // 2. Expose aligned floating point elements as first-level elements, so the
+ // code generator knows to pass them in floating point registers.
+ //
+ // We also compute the InReg flag which indicates that the struct contains
+ // aligned 32-bit floats.
+ //
+ struct CoerceBuilder {
+ llvm::LLVMContext &Context;
+ const llvm::DataLayout &DL;
+ SmallVector<llvm::Type*, 8> Elems;
+ uint64_t Size;
+ bool InReg;
+
+ CoerceBuilder(llvm::LLVMContext &c, const llvm::DataLayout &dl)
+ : Context(c), DL(dl), Size(0), InReg(false) {}
+
+ // Pad Elems with integers until Size is ToSize.
+ void pad(uint64_t ToSize) {
+ assert(ToSize >= Size && "Cannot remove elements");
+ if (ToSize == Size)
+ return;
+
+ // Finish the current 64-bit word.
+ uint64_t Aligned = llvm::alignTo(Size, 64);
+ if (Aligned > Size && Aligned <= ToSize) {
+ Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size));
+ Size = Aligned;
+ }
+
+ // Add whole 64-bit words.
+ while (Size + 64 <= ToSize) {
+ Elems.push_back(llvm::Type::getInt64Ty(Context));
+ Size += 64;
+ }
+
+ // Final in-word padding.
+ if (Size < ToSize) {
+ Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size));
+ Size = ToSize;
+ }
+ }
+
+ // Add a floating point element at Offset.
+ void addFloat(uint64_t Offset, llvm::Type *Ty, unsigned Bits) {
+ // Unaligned floats are treated as integers.
+ if (Offset % Bits)
+ return;
+ // The InReg flag is only required if there are any floats < 64 bits.
+ if (Bits < 64)
+ InReg = true;
+ pad(Offset);
+ Elems.push_back(Ty);
+ Size = Offset + Bits;
+ }
+
+ // Add a struct type to the coercion type, starting at Offset (in bits).
+ void addStruct(uint64_t Offset, llvm::StructType *StrTy) {
+ const llvm::StructLayout *Layout = DL.getStructLayout(StrTy);
+ for (unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) {
+ llvm::Type *ElemTy = StrTy->getElementType(i);
+ uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i);
+ switch (ElemTy->getTypeID()) {
+ case llvm::Type::StructTyID:
+ addStruct(ElemOffset, cast<llvm::StructType>(ElemTy));
+ break;
+ case llvm::Type::FloatTyID:
+ addFloat(ElemOffset, ElemTy, 32);
+ break;
+ case llvm::Type::DoubleTyID:
+ addFloat(ElemOffset, ElemTy, 64);
+ break;
+ case llvm::Type::FP128TyID:
+ addFloat(ElemOffset, ElemTy, 128);
+ break;
+ case llvm::Type::PointerTyID:
+ if (ElemOffset % 64 == 0) {
+ pad(ElemOffset);
+ Elems.push_back(ElemTy);
+ Size += 64;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ // Check if Ty is a usable substitute for the coercion type.
+ bool isUsableType(llvm::StructType *Ty) const {
+ return llvm::ArrayRef(Elems) == Ty->elements();
+ }
+
+ // Get the coercion type as a literal struct type.
+ llvm::Type *getType() const {
+ if (Elems.size() == 1)
+ return Elems.front();
+ else
+ return llvm::StructType::get(Context, Elems);
+ }
+ };
+};
+} // end anonymous namespace
+
+ABIArgInfo
+SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const {
+ if (Ty->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ uint64_t Size = getContext().getTypeSize(Ty);
+
+ // Anything too big to fit in registers is passed with an explicit indirect
+ // pointer / sret pointer.
+ if (Size > SizeLimit)
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ // Integer types smaller than a register are extended.
+ if (Size < 64 && Ty->isIntegerType())
+ return ABIArgInfo::getExtend(Ty);
+
+ if (const auto *EIT = Ty->getAs<BitIntType>())
+ if (EIT->getNumBits() < 64)
+ return ABIArgInfo::getExtend(Ty);
+
+ // Other non-aggregates go in registers.
+ if (!isAggregateTypeForABI(Ty))
+ return ABIArgInfo::getDirect();
+
+ // If a C++ object has either a non-trivial copy constructor or a non-trivial
+ // destructor, it is passed with an explicit indirect pointer / sret pointer.
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
+
+ // This is a small aggregate type that should be passed in registers.
+ // Build a coercion type from the LLVM struct type.
+ llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty));
+ if (!StrTy)
+ return ABIArgInfo::getDirect();
+
+ CoerceBuilder CB(getVMContext(), getDataLayout());
+ CB.addStruct(0, StrTy);
+ CB.pad(llvm::alignTo(CB.DL.getTypeSizeInBits(StrTy), 64));
+
+ // Try to use the original type for coercion.
+ llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType();
+
+ if (CB.InReg)
+ return ABIArgInfo::getDirectInReg(CoerceTy);
+ else
+ return ABIArgInfo::getDirect(CoerceTy);
+}
+
+Address SparcV9ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ ABIArgInfo AI = classifyType(Ty, 16 * 8);
+ llvm::Type *ArgTy = CGT.ConvertType(Ty);
+ if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
+ AI.setCoerceToType(ArgTy);
+
+ CharUnits SlotSize = CharUnits::fromQuantity(8);
+
+ CGBuilderTy &Builder = CGF.Builder;
+ Address Addr = Address(Builder.CreateLoad(VAListAddr, "ap.cur"),
+ getVAListElementType(CGF), SlotSize);
+ llvm::Type *ArgPtrTy = CGF.UnqualPtrTy;
+
+ auto TypeInfo = getContext().getTypeInfoInChars(Ty);
+
+ Address ArgAddr = Address::invalid();
+ CharUnits Stride;
+ switch (AI.getKind()) {
+ case ABIArgInfo::Expand:
+ case ABIArgInfo::CoerceAndExpand:
+ case ABIArgInfo::InAlloca:
+ llvm_unreachable("Unsupported ABI kind for va_arg");
+
+ case ABIArgInfo::Extend: {
+ Stride = SlotSize;
+ CharUnits Offset = SlotSize - TypeInfo.Width;
+ ArgAddr = Builder.CreateConstInBoundsByteGEP(Addr, Offset, "extend");
+ break;
+ }
+
+ case ABIArgInfo::Direct: {
+ auto AllocSize = getDataLayout().getTypeAllocSize(AI.getCoerceToType());
+ Stride = CharUnits::fromQuantity(AllocSize).alignTo(SlotSize);
+ ArgAddr = Addr;
+ break;
+ }
+
+ case ABIArgInfo::Indirect:
+ case ABIArgInfo::IndirectAliased:
+ Stride = SlotSize;
+ ArgAddr = Addr.withElementType(ArgPtrTy);
+ ArgAddr = Address(Builder.CreateLoad(ArgAddr, "indirect.arg"), ArgTy,
+ TypeInfo.Align);
+ break;
+
+ case ABIArgInfo::Ignore:
+ return Address(llvm::UndefValue::get(ArgPtrTy), ArgTy, TypeInfo.Align);
+ }
+
+ // Update VAList.
+ Address NextPtr = Builder.CreateConstInBoundsByteGEP(Addr, Stride, "ap.next");
+ Builder.CreateStore(NextPtr.getPointer(), VAListAddr);
+
+ return ArgAddr.withElementType(ArgTy);
+}
+
+void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ FI.getReturnInfo() = classifyType(FI.getReturnType(), 32 * 8);
+ for (auto &I : FI.arguments())
+ I.info = classifyType(I.type, 16 * 8);
+}
+
+namespace {
+class SparcV9TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ SparcV9TargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(std::make_unique<SparcV9ABIInfo>(CGT)) {}
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
+ return 14;
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const override;
+
+ llvm::Value *decodeReturnAddress(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const override {
+ return CGF.Builder.CreateGEP(CGF.Int8Ty, Address,
+ llvm::ConstantInt::get(CGF.Int32Ty, 8));
+ }
+
+ llvm::Value *encodeReturnAddress(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const override {
+ return CGF.Builder.CreateGEP(CGF.Int8Ty, Address,
+ llvm::ConstantInt::get(CGF.Int32Ty, -8));
+ }
+};
+} // end anonymous namespace
+
+bool
+SparcV9TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ // This is calculated from the LLVM and GCC tables and verified
+ // against gcc output. AFAIK all ABIs use the same encoding.
+
+ CodeGen::CGBuilderTy &Builder = CGF.Builder;
+
+ llvm::IntegerType *i8 = CGF.Int8Ty;
+ llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
+ llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
+
+ // 0-31: the 8-byte general-purpose registers
+ AssignToArrayRange(Builder, Address, Eight8, 0, 31);
+
+ // 32-63: f0-31, the 4-byte floating-point registers
+ AssignToArrayRange(Builder, Address, Four8, 32, 63);
+
+ // Y = 64
+ // PSR = 65
+ // WIM = 66
+ // TBR = 67
+ // PC = 68
+ // NPC = 69
+ // FSR = 70
+ // CSR = 71
+ AssignToArrayRange(Builder, Address, Eight8, 64, 71);
+
+ // 72-87: d0-15, the 8-byte floating-point registers
+ AssignToArrayRange(Builder, Address, Eight8, 72, 87);
+
+ return false;
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createSparcV8TargetCodeGenInfo(CodeGenModule &CGM) {
+ return std::make_unique<SparcV8TargetCodeGenInfo>(CGM.getTypes());
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createSparcV9TargetCodeGenInfo(CodeGenModule &CGM) {
+ return std::make_unique<SparcV9TargetCodeGenInfo>(CGM.getTypes());
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/SystemZ.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/SystemZ.cpp
new file mode 100644
index 000000000000..6eb0c6ef2f7d
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/SystemZ.cpp
@@ -0,0 +1,538 @@
+//===- SystemZ.cpp --------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfoImpl.h"
+#include "TargetInfo.h"
+#include "clang/Basic/Builtins.h"
+#include "llvm/IR/IntrinsicsS390.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+//===----------------------------------------------------------------------===//
+// SystemZ ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class SystemZABIInfo : public ABIInfo {
+ bool HasVector;
+ bool IsSoftFloatABI;
+
+public:
+ SystemZABIInfo(CodeGenTypes &CGT, bool HV, bool SF)
+ : ABIInfo(CGT), HasVector(HV), IsSoftFloatABI(SF) {}
+
+ bool isPromotableIntegerTypeForABI(QualType Ty) const;
+ bool isCompoundType(QualType Ty) const;
+ bool isVectorArgumentType(QualType Ty) const;
+ bool isFPArgumentType(QualType Ty) const;
+ QualType GetSingleElementType(QualType Ty) const;
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType ArgTy) const;
+
+ void computeInfo(CGFunctionInfo &FI) const override;
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+};
+
+class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
+ ASTContext &Ctx;
+
+ // These are used for speeding up the search for a visible vector ABI.
+ mutable bool HasVisibleVecABIFlag = false;
+ mutable std::set<const Type *> SeenTypes;
+
+ // Returns true (the first time) if Ty is, or is found to include, a vector
+ // type that exposes the vector ABI. This is any vector >=16 bytes which
+ // with vector support are aligned to only 8 bytes. When IsParam is true,
+ // the type belongs to a value as passed between functions. If it is a
+ // vector <=16 bytes it will be passed in a vector register (if supported).
+ bool isVectorTypeBased(const Type *Ty, bool IsParam) const;
+
+public:
+ SystemZTargetCodeGenInfo(CodeGenTypes &CGT, bool HasVector, bool SoftFloatABI)
+ : TargetCodeGenInfo(
+ std::make_unique<SystemZABIInfo>(CGT, HasVector, SoftFloatABI)),
+ Ctx(CGT.getContext()) {
+ SwiftInfo =
+ std::make_unique<SwiftABIInfo>(CGT, /*SwiftErrorInRegister=*/false);
+ }
+
+ // The vector ABI is different when the vector facility is present and when
+ // a module e.g. defines an externally visible vector variable, a flag
+ // indicating a visible vector ABI is added. Eventually this will result in
+ // a GNU attribute indicating the vector ABI of the module. Ty is the type
+ // of a variable or function parameter that is globally visible.
+ void handleExternallyVisibleObjABI(const Type *Ty, CodeGen::CodeGenModule &M,
+ bool IsParam) const {
+ if (!HasVisibleVecABIFlag && isVectorTypeBased(Ty, IsParam)) {
+ M.getModule().addModuleFlag(llvm::Module::Warning,
+ "s390x-visible-vector-ABI", 1);
+ HasVisibleVecABIFlag = true;
+ }
+ }
+
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const override {
+ if (!D)
+ return;
+
+ // Check if the vector ABI becomes visible by an externally visible
+ // variable or function.
+ if (const auto *VD = dyn_cast<VarDecl>(D)) {
+ if (VD->isExternallyVisible())
+ handleExternallyVisibleObjABI(VD->getType().getTypePtr(), M,
+ /*IsParam*/false);
+ }
+ else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (FD->isExternallyVisible())
+ handleExternallyVisibleObjABI(FD->getType().getTypePtr(), M,
+ /*IsParam*/false);
+ }
+ }
+
+ llvm::Value *testFPKind(llvm::Value *V, unsigned BuiltinID,
+ CGBuilderTy &Builder,
+ CodeGenModule &CGM) const override {
+ assert(V->getType()->isFloatingPointTy() && "V should have an FP type.");
+ // Only use TDC in constrained FP mode.
+ if (!Builder.getIsFPConstrained())
+ return nullptr;
+
+ llvm::Type *Ty = V->getType();
+ if (Ty->isFloatTy() || Ty->isDoubleTy() || Ty->isFP128Ty()) {
+ llvm::Module &M = CGM.getModule();
+ auto &Ctx = M.getContext();
+ llvm::Function *TDCFunc =
+ llvm::Intrinsic::getDeclaration(&M, llvm::Intrinsic::s390_tdc, Ty);
+ unsigned TDCBits = 0;
+ switch (BuiltinID) {
+ case Builtin::BI__builtin_isnan:
+ TDCBits = 0xf;
+ break;
+ case Builtin::BIfinite:
+ case Builtin::BI__finite:
+ case Builtin::BIfinitef:
+ case Builtin::BI__finitef:
+ case Builtin::BIfinitel:
+ case Builtin::BI__finitel:
+ case Builtin::BI__builtin_isfinite:
+ TDCBits = 0xfc0;
+ break;
+ case Builtin::BI__builtin_isinf:
+ TDCBits = 0x30;
+ break;
+ default:
+ break;
+ }
+ if (TDCBits)
+ return Builder.CreateCall(
+ TDCFunc,
+ {V, llvm::ConstantInt::get(llvm::Type::getInt64Ty(Ctx), TDCBits)});
+ }
+ return nullptr;
+ }
+};
+}
+
+bool SystemZABIInfo::isPromotableIntegerTypeForABI(QualType Ty) const {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ // Promotable integer types are required to be promoted by the ABI.
+ if (ABIInfo::isPromotableIntegerTypeForABI(Ty))
+ return true;
+
+ if (const auto *EIT = Ty->getAs<BitIntType>())
+ if (EIT->getNumBits() < 64)
+ return true;
+
+ // 32-bit values must also be promoted.
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
+ switch (BT->getKind()) {
+ case BuiltinType::Int:
+ case BuiltinType::UInt:
+ return true;
+ default:
+ return false;
+ }
+ return false;
+}
+
+bool SystemZABIInfo::isCompoundType(QualType Ty) const {
+ return (Ty->isAnyComplexType() ||
+ Ty->isVectorType() ||
+ isAggregateTypeForABI(Ty));
+}
+
+bool SystemZABIInfo::isVectorArgumentType(QualType Ty) const {
+ return (HasVector &&
+ Ty->isVectorType() &&
+ getContext().getTypeSize(Ty) <= 128);
+}
+
+bool SystemZABIInfo::isFPArgumentType(QualType Ty) const {
+ if (IsSoftFloatABI)
+ return false;
+
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
+ switch (BT->getKind()) {
+ case BuiltinType::Float:
+ case BuiltinType::Double:
+ return true;
+ default:
+ return false;
+ }
+
+ return false;
+}
+
+QualType SystemZABIInfo::GetSingleElementType(QualType Ty) const {
+ const RecordType *RT = Ty->getAs<RecordType>();
+
+ if (RT && RT->isStructureOrClassType()) {
+ const RecordDecl *RD = RT->getDecl();
+ QualType Found;
+
+ // If this is a C++ record, check the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
+ if (CXXRD->hasDefinition())
+ for (const auto &I : CXXRD->bases()) {
+ QualType Base = I.getType();
+
+ // Empty bases don't affect things either way.
+ if (isEmptyRecord(getContext(), Base, true))
+ continue;
+
+ if (!Found.isNull())
+ return Ty;
+ Found = GetSingleElementType(Base);
+ }
+
+ // Check the fields.
+ for (const auto *FD : RD->fields()) {
+ // Unlike isSingleElementStruct(), empty structure and array fields
+ // do count. So do anonymous bitfields that aren't zero-sized.
+
+ // Like isSingleElementStruct(), ignore C++20 empty data members.
+ if (FD->hasAttr<NoUniqueAddressAttr>() &&
+ isEmptyRecord(getContext(), FD->getType(), true))
+ continue;
+
+ // Unlike isSingleElementStruct(), arrays do not count.
+ // Nested structures still do though.
+ if (!Found.isNull())
+ return Ty;
+ Found = GetSingleElementType(FD->getType());
+ }
+
+ // Unlike isSingleElementStruct(), trailing padding is allowed.
+ // An 8-byte aligned struct s { float f; } is passed as a double.
+ if (!Found.isNull())
+ return Found;
+ }
+
+ return Ty;
+}
+
+Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ // Assume that va_list type is correct; should be pointer to LLVM type:
+ // struct {
+ // i64 __gpr;
+ // i64 __fpr;
+ // i8 *__overflow_arg_area;
+ // i8 *__reg_save_area;
+ // };
+
+ // Every non-vector argument occupies 8 bytes and is passed by preference
+ // in either GPRs or FPRs. Vector arguments occupy 8 or 16 bytes and are
+ // always passed on the stack.
+ const SystemZTargetCodeGenInfo &SZCGI =
+ static_cast<const SystemZTargetCodeGenInfo &>(
+ CGT.getCGM().getTargetCodeGenInfo());
+ Ty = getContext().getCanonicalType(Ty);
+ auto TyInfo = getContext().getTypeInfoInChars(Ty);
+ llvm::Type *ArgTy = CGF.ConvertTypeForMem(Ty);
+ llvm::Type *DirectTy = ArgTy;
+ ABIArgInfo AI = classifyArgumentType(Ty);
+ bool IsIndirect = AI.isIndirect();
+ bool InFPRs = false;
+ bool IsVector = false;
+ CharUnits UnpaddedSize;
+ CharUnits DirectAlign;
+ SZCGI.handleExternallyVisibleObjABI(Ty.getTypePtr(), CGT.getCGM(),
+ /*IsParam*/true);
+ if (IsIndirect) {
+ DirectTy = llvm::PointerType::getUnqual(DirectTy);
+ UnpaddedSize = DirectAlign = CharUnits::fromQuantity(8);
+ } else {
+ if (AI.getCoerceToType())
+ ArgTy = AI.getCoerceToType();
+ InFPRs = (!IsSoftFloatABI && (ArgTy->isFloatTy() || ArgTy->isDoubleTy()));
+ IsVector = ArgTy->isVectorTy();
+ UnpaddedSize = TyInfo.Width;
+ DirectAlign = TyInfo.Align;
+ }
+ CharUnits PaddedSize = CharUnits::fromQuantity(8);
+ if (IsVector && UnpaddedSize > PaddedSize)
+ PaddedSize = CharUnits::fromQuantity(16);
+ assert((UnpaddedSize <= PaddedSize) && "Invalid argument size.");
+
+ CharUnits Padding = (PaddedSize - UnpaddedSize);
+
+ llvm::Type *IndexTy = CGF.Int64Ty;
+ llvm::Value *PaddedSizeV =
+ llvm::ConstantInt::get(IndexTy, PaddedSize.getQuantity());
+
+ if (IsVector) {
+ // Work out the address of a vector argument on the stack.
+ // Vector arguments are always passed in the high bits of a
+ // single (8 byte) or double (16 byte) stack slot.
+ Address OverflowArgAreaPtr =
+ CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr");
+ Address OverflowArgArea =
+ Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"),
+ CGF.Int8Ty, TyInfo.Align);
+ Address MemAddr = OverflowArgArea.withElementType(DirectTy);
+
+ // Update overflow_arg_area_ptr pointer
+ llvm::Value *NewOverflowArgArea = CGF.Builder.CreateGEP(
+ OverflowArgArea.getElementType(), OverflowArgArea.getPointer(),
+ PaddedSizeV, "overflow_arg_area");
+ CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
+
+ return MemAddr;
+ }
+
+ assert(PaddedSize.getQuantity() == 8);
+
+ unsigned MaxRegs, RegCountField, RegSaveIndex;
+ CharUnits RegPadding;
+ if (InFPRs) {
+ MaxRegs = 4; // Maximum of 4 FPR arguments
+ RegCountField = 1; // __fpr
+ RegSaveIndex = 16; // save offset for f0
+ RegPadding = CharUnits(); // floats are passed in the high bits of an FPR
+ } else {
+ MaxRegs = 5; // Maximum of 5 GPR arguments
+ RegCountField = 0; // __gpr
+ RegSaveIndex = 2; // save offset for r2
+ RegPadding = Padding; // values are passed in the low bits of a GPR
+ }
+
+ Address RegCountPtr =
+ CGF.Builder.CreateStructGEP(VAListAddr, RegCountField, "reg_count_ptr");
+ llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count");
+ llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs);
+ llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV,
+ "fits_in_regs");
+
+ llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
+ llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
+ CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
+
+ // Emit code to load the value if it was passed in registers.
+ CGF.EmitBlock(InRegBlock);
+
+ // Work out the address of an argument register.
+ llvm::Value *ScaledRegCount =
+ CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count");
+ llvm::Value *RegBase =
+ llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize.getQuantity()
+ + RegPadding.getQuantity());
+ llvm::Value *RegOffset =
+ CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset");
+ Address RegSaveAreaPtr =
+ CGF.Builder.CreateStructGEP(VAListAddr, 3, "reg_save_area_ptr");
+ llvm::Value *RegSaveArea =
+ CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area");
+ Address RawRegAddr(
+ CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, RegOffset, "raw_reg_addr"),
+ CGF.Int8Ty, PaddedSize);
+ Address RegAddr = RawRegAddr.withElementType(DirectTy);
+
+ // Update the register count
+ llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1);
+ llvm::Value *NewRegCount =
+ CGF.Builder.CreateAdd(RegCount, One, "reg_count");
+ CGF.Builder.CreateStore(NewRegCount, RegCountPtr);
+ CGF.EmitBranch(ContBlock);
+
+ // Emit code to load the value if it was passed in memory.
+ CGF.EmitBlock(InMemBlock);
+
+ // Work out the address of a stack argument.
+ Address OverflowArgAreaPtr =
+ CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr");
+ Address OverflowArgArea =
+ Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"),
+ CGF.Int8Ty, PaddedSize);
+ Address RawMemAddr =
+ CGF.Builder.CreateConstByteGEP(OverflowArgArea, Padding, "raw_mem_addr");
+ Address MemAddr = RawMemAddr.withElementType(DirectTy);
+
+ // Update overflow_arg_area_ptr pointer
+ llvm::Value *NewOverflowArgArea =
+ CGF.Builder.CreateGEP(OverflowArgArea.getElementType(),
+ OverflowArgArea.getPointer(), PaddedSizeV,
+ "overflow_arg_area");
+ CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
+ CGF.EmitBranch(ContBlock);
+
+ // Return the appropriate result.
+ CGF.EmitBlock(ContBlock);
+ Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, MemAddr, InMemBlock,
+ "va_arg.addr");
+
+ if (IsIndirect)
+ ResAddr = Address(CGF.Builder.CreateLoad(ResAddr, "indirect_arg"), ArgTy,
+ TyInfo.Align);
+
+ return ResAddr;
+}
+
+ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+ if (isVectorArgumentType(RetTy))
+ return ABIArgInfo::getDirect();
+ if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64)
+ return getNaturalAlignIndirect(RetTy);
+ return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
+ : ABIArgInfo::getDirect());
+}
+
+ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
+ // Handle the generic C++ ABI.
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
+
+ // Integers and enums are extended to full register width.
+ if (isPromotableIntegerTypeForABI(Ty))
+ return ABIArgInfo::getExtend(Ty);
+
+ // Handle vector types and vector-like structure types. Note that
+ // as opposed to float-like structure types, we do not allow any
+ // padding for vector-like structures, so verify the sizes match.
+ uint64_t Size = getContext().getTypeSize(Ty);
+ QualType SingleElementTy = GetSingleElementType(Ty);
+ if (isVectorArgumentType(SingleElementTy) &&
+ getContext().getTypeSize(SingleElementTy) == Size)
+ return ABIArgInfo::getDirect(CGT.ConvertType(SingleElementTy));
+
+ // Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly.
+ if (Size != 8 && Size != 16 && Size != 32 && Size != 64)
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
+
+ // Handle small structures.
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ // Structures with flexible arrays have variable length, so really
+ // fail the size test above.
+ const RecordDecl *RD = RT->getDecl();
+ if (RD->hasFlexibleArrayMember())
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
+
+ // The structure is passed as an unextended integer, a float, or a double.
+ llvm::Type *PassTy;
+ if (isFPArgumentType(SingleElementTy)) {
+ assert(Size == 32 || Size == 64);
+ if (Size == 32)
+ PassTy = llvm::Type::getFloatTy(getVMContext());
+ else
+ PassTy = llvm::Type::getDoubleTy(getVMContext());
+ } else
+ PassTy = llvm::IntegerType::get(getVMContext(), Size);
+ return ABIArgInfo::getDirect(PassTy);
+ }
+
+ // Non-structure compounds are passed indirectly.
+ if (isCompoundType(Ty))
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
+
+ return ABIArgInfo::getDirect(nullptr);
+}
+
+void SystemZABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ const SystemZTargetCodeGenInfo &SZCGI =
+ static_cast<const SystemZTargetCodeGenInfo &>(
+ CGT.getCGM().getTargetCodeGenInfo());
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ unsigned Idx = 0;
+ for (auto &I : FI.arguments()) {
+ I.info = classifyArgumentType(I.type);
+ if (FI.isVariadic() && Idx++ >= FI.getNumRequiredArgs())
+ // Check if a vararg vector argument is passed, in which case the
+ // vector ABI becomes visible as the va_list could be passed on to
+ // other functions.
+ SZCGI.handleExternallyVisibleObjABI(I.type.getTypePtr(), CGT.getCGM(),
+ /*IsParam*/true);
+ }
+}
+
+bool SystemZTargetCodeGenInfo::isVectorTypeBased(const Type *Ty,
+ bool IsParam) const {
+ if (!SeenTypes.insert(Ty).second)
+ return false;
+
+ if (IsParam) {
+ // A narrow (<16 bytes) vector will as a parameter also expose the ABI as
+ // it will be passed in a vector register. A wide (>16 bytes) vector will
+ // be passed via "hidden" pointer where any extra alignment is not
+ // required (per GCC).
+ const Type *SingleEltTy = getABIInfo<SystemZABIInfo>()
+ .GetSingleElementType(QualType(Ty, 0))
+ .getTypePtr();
+ bool SingleVecEltStruct = SingleEltTy != Ty && SingleEltTy->isVectorType() &&
+ Ctx.getTypeSize(SingleEltTy) == Ctx.getTypeSize(Ty);
+ if (Ty->isVectorType() || SingleVecEltStruct)
+ return Ctx.getTypeSize(Ty) / 8 <= 16;
+ }
+
+ // Assume pointers are dereferenced.
+ while (Ty->isPointerType() || Ty->isArrayType())
+ Ty = Ty->getPointeeOrArrayElementType();
+
+ // Vectors >= 16 bytes expose the ABI through alignment requirements.
+ if (Ty->isVectorType() && Ctx.getTypeSize(Ty) / 8 >= 16)
+ return true;
+
+ if (const auto *RecordTy = Ty->getAs<RecordType>()) {
+ const RecordDecl *RD = RecordTy->getDecl();
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
+ if (CXXRD->hasDefinition())
+ for (const auto &I : CXXRD->bases())
+ if (isVectorTypeBased(I.getType().getTypePtr(), /*IsParam*/false))
+ return true;
+ for (const auto *FD : RD->fields())
+ if (isVectorTypeBased(FD->getType().getTypePtr(), /*IsParam*/false))
+ return true;
+ }
+
+ if (const auto *FT = Ty->getAs<FunctionType>())
+ if (isVectorTypeBased(FT->getReturnType().getTypePtr(), /*IsParam*/true))
+ return true;
+ if (const FunctionProtoType *Proto = Ty->getAs<FunctionProtoType>())
+ for (const auto &ParamType : Proto->getParamTypes())
+ if (isVectorTypeBased(ParamType.getTypePtr(), /*IsParam*/true))
+ return true;
+
+ return false;
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createSystemZTargetCodeGenInfo(CodeGenModule &CGM, bool HasVector,
+ bool SoftFloatABI) {
+ return std::make_unique<SystemZTargetCodeGenInfo>(CGM.getTypes(), HasVector,
+ SoftFloatABI);
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/TCE.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/TCE.cpp
new file mode 100644
index 000000000000..d7178b4b8a94
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/TCE.cpp
@@ -0,0 +1,82 @@
+//===- TCE.cpp ------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfoImpl.h"
+#include "TargetInfo.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+//===----------------------------------------------------------------------===//
+// TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults.
+// Currently subclassed only to implement custom OpenCL C function attribute
+// handling.
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class TCETargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ TCETargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(std::make_unique<DefaultABIInfo>(CGT)) {}
+
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const override;
+};
+
+void TCETargetCodeGenInfo::setTargetAttributes(
+ const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
+ if (GV->isDeclaration())
+ return;
+ const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
+ if (!FD) return;
+
+ llvm::Function *F = cast<llvm::Function>(GV);
+
+ if (M.getLangOpts().OpenCL) {
+ if (FD->hasAttr<OpenCLKernelAttr>()) {
+ // OpenCL C Kernel functions are not subject to inlining
+ F->addFnAttr(llvm::Attribute::NoInline);
+ const ReqdWorkGroupSizeAttr *Attr = FD->getAttr<ReqdWorkGroupSizeAttr>();
+ if (Attr) {
+ // Convert the reqd_work_group_size() attributes to metadata.
+ llvm::LLVMContext &Context = F->getContext();
+ llvm::NamedMDNode *OpenCLMetadata =
+ M.getModule().getOrInsertNamedMetadata(
+ "opencl.kernel_wg_size_info");
+
+ SmallVector<llvm::Metadata *, 5> Operands;
+ Operands.push_back(llvm::ConstantAsMetadata::get(F));
+
+ Operands.push_back(
+ llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
+ M.Int32Ty, llvm::APInt(32, Attr->getXDim()))));
+ Operands.push_back(
+ llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
+ M.Int32Ty, llvm::APInt(32, Attr->getYDim()))));
+ Operands.push_back(
+ llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
+ M.Int32Ty, llvm::APInt(32, Attr->getZDim()))));
+
+ // Add a boolean constant operand for "required" (true) or "hint"
+ // (false) for implementing the work_group_size_hint attr later.
+ // Currently always true as the hint is not yet implemented.
+ Operands.push_back(
+ llvm::ConstantAsMetadata::get(llvm::ConstantInt::getTrue(Context)));
+ OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands));
+ }
+ }
+ }
+}
+
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createTCETargetCodeGenInfo(CodeGenModule &CGM) {
+ return std::make_unique<TCETargetCodeGenInfo>(CGM.getTypes());
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/VE.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/VE.cpp
new file mode 100644
index 000000000000..a7acc249cc2b
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/VE.cpp
@@ -0,0 +1,71 @@
+//===- VE.cpp -------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfoImpl.h"
+#include "TargetInfo.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+//===----------------------------------------------------------------------===//
+// VE ABI Implementation.
+//
+namespace {
+class VEABIInfo : public DefaultABIInfo {
+public:
+ VEABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
+
+private:
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy) const;
+ void computeInfo(CGFunctionInfo &FI) const override;
+};
+} // end anonymous namespace
+
+ABIArgInfo VEABIInfo::classifyReturnType(QualType Ty) const {
+ if (Ty->isAnyComplexType())
+ return ABIArgInfo::getDirect();
+ uint64_t Size = getContext().getTypeSize(Ty);
+ if (Size < 64 && Ty->isIntegerType())
+ return ABIArgInfo::getExtend(Ty);
+ return DefaultABIInfo::classifyReturnType(Ty);
+}
+
+ABIArgInfo VEABIInfo::classifyArgumentType(QualType Ty) const {
+ if (Ty->isAnyComplexType())
+ return ABIArgInfo::getDirect();
+ uint64_t Size = getContext().getTypeSize(Ty);
+ if (Size < 64 && Ty->isIntegerType())
+ return ABIArgInfo::getExtend(Ty);
+ return DefaultABIInfo::classifyArgumentType(Ty);
+}
+
+void VEABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (auto &Arg : FI.arguments())
+ Arg.info = classifyArgumentType(Arg.type);
+}
+
+namespace {
+class VETargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ VETargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(std::make_unique<VEABIInfo>(CGT)) {}
+ // VE ABI requires the arguments of variadic and prototype-less functions
+ // are passed in both registers and memory.
+ bool isNoProtoCallVariadic(const CallArgList &args,
+ const FunctionNoProtoType *fnType) const override {
+ return true;
+ }
+};
+} // end anonymous namespace
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createVETargetCodeGenInfo(CodeGenModule &CGM) {
+ return std::make_unique<VETargetCodeGenInfo>(CGM.getTypes());
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/WebAssembly.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/WebAssembly.cpp
new file mode 100644
index 000000000000..bd332228ce5b
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/WebAssembly.cpp
@@ -0,0 +1,173 @@
+//===- WebAssembly.cpp ----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfoImpl.h"
+#include "TargetInfo.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+//===----------------------------------------------------------------------===//
+// WebAssembly ABI Implementation
+//
+// This is a very simple ABI that relies a lot on DefaultABIInfo.
+//===----------------------------------------------------------------------===//
+
+class WebAssemblyABIInfo final : public ABIInfo {
+ DefaultABIInfo defaultInfo;
+ WebAssemblyABIKind Kind;
+
+public:
+ explicit WebAssemblyABIInfo(CodeGen::CodeGenTypes &CGT,
+ WebAssemblyABIKind Kind)
+ : ABIInfo(CGT), defaultInfo(CGT), Kind(Kind) {}
+
+private:
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType Ty) const;
+
+ // DefaultABIInfo's classifyReturnType and classifyArgumentType are
+ // non-virtual, but computeInfo and EmitVAArg are virtual, so we
+ // overload them.
+ void computeInfo(CGFunctionInfo &FI) const override {
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (auto &Arg : FI.arguments())
+ Arg.info = classifyArgumentType(Arg.type);
+ }
+
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+};
+
+class WebAssemblyTargetCodeGenInfo final : public TargetCodeGenInfo {
+public:
+ explicit WebAssemblyTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
+ WebAssemblyABIKind K)
+ : TargetCodeGenInfo(std::make_unique<WebAssemblyABIInfo>(CGT, K)) {
+ SwiftInfo =
+ std::make_unique<SwiftABIInfo>(CGT, /*SwiftErrorInRegister=*/false);
+ }
+
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const override {
+ TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
+ if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
+ if (const auto *Attr = FD->getAttr<WebAssemblyImportModuleAttr>()) {
+ llvm::Function *Fn = cast<llvm::Function>(GV);
+ llvm::AttrBuilder B(GV->getContext());
+ B.addAttribute("wasm-import-module", Attr->getImportModule());
+ Fn->addFnAttrs(B);
+ }
+ if (const auto *Attr = FD->getAttr<WebAssemblyImportNameAttr>()) {
+ llvm::Function *Fn = cast<llvm::Function>(GV);
+ llvm::AttrBuilder B(GV->getContext());
+ B.addAttribute("wasm-import-name", Attr->getImportName());
+ Fn->addFnAttrs(B);
+ }
+ if (const auto *Attr = FD->getAttr<WebAssemblyExportNameAttr>()) {
+ llvm::Function *Fn = cast<llvm::Function>(GV);
+ llvm::AttrBuilder B(GV->getContext());
+ B.addAttribute("wasm-export-name", Attr->getExportName());
+ Fn->addFnAttrs(B);
+ }
+ }
+
+ if (auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
+ llvm::Function *Fn = cast<llvm::Function>(GV);
+ if (!FD->doesThisDeclarationHaveABody() && !FD->hasPrototype())
+ Fn->addFnAttr("no-prototype");
+ }
+ }
+
+ /// Return the WebAssembly externref reference type.
+ virtual llvm::Type *getWasmExternrefReferenceType() const override {
+ return llvm::Type::getWasm_ExternrefTy(getABIInfo().getVMContext());
+ }
+ /// Return the WebAssembly funcref reference type.
+ virtual llvm::Type *getWasmFuncrefReferenceType() const override {
+ return llvm::Type::getWasm_FuncrefTy(getABIInfo().getVMContext());
+ }
+};
+
+/// Classify argument of given type \p Ty.
+ABIArgInfo WebAssemblyABIInfo::classifyArgumentType(QualType Ty) const {
+ Ty = useFirstFieldIfTransparentUnion(Ty);
+
+ if (isAggregateTypeForABI(Ty)) {
+ // Records with non-trivial destructors/copy-constructors should not be
+ // passed by value.
+ if (auto RAA = getRecordArgABI(Ty, getCXXABI()))
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
+ // Ignore empty structs/unions.
+ if (isEmptyRecord(getContext(), Ty, true))
+ return ABIArgInfo::getIgnore();
+ // Lower single-element structs to just pass a regular value. TODO: We
+ // could do reasonable-size multiple-element structs too, using getExpand(),
+ // though watch out for things like bitfields.
+ if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
+ return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
+ // For the experimental multivalue ABI, fully expand all other aggregates
+ if (Kind == WebAssemblyABIKind::ExperimentalMV) {
+ const RecordType *RT = Ty->getAs<RecordType>();
+ assert(RT);
+ bool HasBitField = false;
+ for (auto *Field : RT->getDecl()->fields()) {
+ if (Field->isBitField()) {
+ HasBitField = true;
+ break;
+ }
+ }
+ if (!HasBitField)
+ return ABIArgInfo::getExpand();
+ }
+ }
+
+ // Otherwise just do the default thing.
+ return defaultInfo.classifyArgumentType(Ty);
+}
+
+ABIArgInfo WebAssemblyABIInfo::classifyReturnType(QualType RetTy) const {
+ if (isAggregateTypeForABI(RetTy)) {
+ // Records with non-trivial destructors/copy-constructors should not be
+ // returned by value.
+ if (!getRecordArgABI(RetTy, getCXXABI())) {
+ // Ignore empty structs/unions.
+ if (isEmptyRecord(getContext(), RetTy, true))
+ return ABIArgInfo::getIgnore();
+ // Lower single-element structs to just return a regular value. TODO: We
+ // could do reasonable-size multiple-element structs too, using
+ // ABIArgInfo::getDirect().
+ if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
+ return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
+ // For the experimental multivalue ABI, return all other aggregates
+ if (Kind == WebAssemblyABIKind::ExperimentalMV)
+ return ABIArgInfo::getDirect();
+ }
+ }
+
+ // Otherwise just do the default thing.
+ return defaultInfo.classifyReturnType(RetTy);
+}
+
+Address WebAssemblyABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ bool IsIndirect = isAggregateTypeForABI(Ty) &&
+ !isEmptyRecord(getContext(), Ty, true) &&
+ !isSingleElementStruct(Ty, getContext());
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
+ getContext().getTypeInfoInChars(Ty),
+ CharUnits::fromQuantity(4),
+ /*AllowHigherAlign=*/true);
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createWebAssemblyTargetCodeGenInfo(CodeGenModule &CGM,
+ WebAssemblyABIKind K) {
+ return std::make_unique<WebAssemblyTargetCodeGenInfo>(CGM.getTypes(), K);
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/X86.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/X86.cpp
new file mode 100644
index 000000000000..2291c991fb11
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/X86.cpp
@@ -0,0 +1,3436 @@
+//===- X86.cpp ------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfoImpl.h"
+#include "TargetInfo.h"
+#include "clang/Basic/DiagnosticFrontend.h"
+#include "llvm/ADT/SmallBitVector.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+namespace {
+
+/// IsX86_MMXType - Return true if this is an MMX type.
+bool IsX86_MMXType(llvm::Type *IRType) {
+ // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>.
+ return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
+ cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
+ IRType->getScalarSizeInBits() != 64;
+}
+
+static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
+ StringRef Constraint,
+ llvm::Type* Ty) {
+ bool IsMMXCons = llvm::StringSwitch<bool>(Constraint)
+ .Cases("y", "&y", "^Ym", true)
+ .Default(false);
+ if (IsMMXCons && Ty->isVectorTy()) {
+ if (cast<llvm::VectorType>(Ty)->getPrimitiveSizeInBits().getFixedValue() !=
+ 64) {
+ // Invalid MMX constraint
+ return nullptr;
+ }
+
+ return llvm::Type::getX86_MMXTy(CGF.getLLVMContext());
+ }
+
+ if (Constraint == "k") {
+ llvm::Type *Int1Ty = llvm::Type::getInt1Ty(CGF.getLLVMContext());
+ return llvm::FixedVectorType::get(Int1Ty, Ty->getScalarSizeInBits());
+ }
+
+ // No operation needed
+ return Ty;
+}
+
+/// Returns true if this type can be passed in SSE registers with the
+/// X86_VectorCall calling convention. Shared between x86_32 and x86_64.
+static bool isX86VectorTypeForVectorCall(ASTContext &Context, QualType Ty) {
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
+ if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half) {
+ if (BT->getKind() == BuiltinType::LongDouble) {
+ if (&Context.getTargetInfo().getLongDoubleFormat() ==
+ &llvm::APFloat::x87DoubleExtended())
+ return false;
+ }
+ return true;
+ }
+ } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ // vectorcall can pass XMM, YMM, and ZMM vectors. We don't pass SSE1 MMX
+ // registers specially.
+ unsigned VecSize = Context.getTypeSize(VT);
+ if (VecSize == 128 || VecSize == 256 || VecSize == 512)
+ return true;
+ }
+ return false;
+}
+
+/// Returns true if this aggregate is small enough to be passed in SSE registers
+/// in the X86_VectorCall calling convention. Shared between x86_32 and x86_64.
+static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) {
+ return NumMembers <= 4;
+}
+
+/// Returns a Homogeneous Vector Aggregate ABIArgInfo, used in X86.
+static ABIArgInfo getDirectX86Hva(llvm::Type* T = nullptr) {
+ auto AI = ABIArgInfo::getDirect(T);
+ AI.setInReg(true);
+ AI.setCanBeFlattened(false);
+ return AI;
+}
+
+//===----------------------------------------------------------------------===//
+// X86-32 ABI Implementation
+//===----------------------------------------------------------------------===//
+
+/// Similar to llvm::CCState, but for Clang.
+struct CCState {
+ CCState(CGFunctionInfo &FI)
+ : IsPreassigned(FI.arg_size()), CC(FI.getCallingConvention()),
+ Required(FI.getRequiredArgs()), IsDelegateCall(FI.isDelegateCall()) {}
+
+ llvm::SmallBitVector IsPreassigned;
+ unsigned CC = CallingConv::CC_C;
+ unsigned FreeRegs = 0;
+ unsigned FreeSSERegs = 0;
+ RequiredArgs Required;
+ bool IsDelegateCall = false;
+};
+
+/// X86_32ABIInfo - The X86-32 ABI information.
+class X86_32ABIInfo : public ABIInfo {
+ enum Class {
+ Integer,
+ Float
+ };
+
+ static const unsigned MinABIStackAlignInBytes = 4;
+
+ bool IsDarwinVectorABI;
+ bool IsRetSmallStructInRegABI;
+ bool IsWin32StructABI;
+ bool IsSoftFloatABI;
+ bool IsMCUABI;
+ bool IsLinuxABI;
+ unsigned DefaultNumRegisterParameters;
+
+ static bool isRegisterSize(unsigned Size) {
+ return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
+ }
+
+ bool isHomogeneousAggregateBaseType(QualType Ty) const override {
+ // FIXME: Assumes vectorcall is in use.
+ return isX86VectorTypeForVectorCall(getContext(), Ty);
+ }
+
+ bool isHomogeneousAggregateSmallEnough(const Type *Ty,
+ uint64_t NumMembers) const override {
+ // FIXME: Assumes vectorcall is in use.
+ return isX86VectorCallAggregateSmallEnough(NumMembers);
+ }
+
+ bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context) const;
+
+ /// getIndirectResult - Give a source type \arg Ty, return a suitable result
+ /// such that the argument will be passed in memory.
+ ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
+
+ ABIArgInfo getIndirectReturnResult(QualType Ty, CCState &State) const;
+
+ /// Return the alignment to use for the given type on the stack.
+ unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;
+
+ Class classify(QualType Ty) const;
+ ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State,
+ unsigned ArgIndex) const;
+
+ /// Updates the number of available free registers, returns
+ /// true if any registers were allocated.
+ bool updateFreeRegs(QualType Ty, CCState &State) const;
+
+ bool shouldAggregateUseDirect(QualType Ty, CCState &State, bool &InReg,
+ bool &NeedsPadding) const;
+ bool shouldPrimitiveUseInReg(QualType Ty, CCState &State) const;
+
+ bool canExpandIndirectArgument(QualType Ty) const;
+
+ /// Rewrite the function info so that all memory arguments use
+ /// inalloca.
+ void rewriteWithInAlloca(CGFunctionInfo &FI) const;
+
+ void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
+ CharUnits &StackOffset, ABIArgInfo &Info,
+ QualType Type) const;
+ void runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State) const;
+
+public:
+
+ void computeInfo(CGFunctionInfo &FI) const override;
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+
+ X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
+ bool RetSmallStructInRegABI, bool Win32StructABI,
+ unsigned NumRegisterParameters, bool SoftFloatABI)
+ : ABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI),
+ IsRetSmallStructInRegABI(RetSmallStructInRegABI),
+ IsWin32StructABI(Win32StructABI), IsSoftFloatABI(SoftFloatABI),
+ IsMCUABI(CGT.getTarget().getTriple().isOSIAMCU()),
+ IsLinuxABI(CGT.getTarget().getTriple().isOSLinux() ||
+ CGT.getTarget().getTriple().isOSCygMing()),
+ DefaultNumRegisterParameters(NumRegisterParameters) {}
+};
+
+class X86_32SwiftABIInfo : public SwiftABIInfo {
+public:
+ explicit X86_32SwiftABIInfo(CodeGenTypes &CGT)
+ : SwiftABIInfo(CGT, /*SwiftErrorInRegister=*/false) {}
+
+ bool shouldPassIndirectly(ArrayRef<llvm::Type *> ComponentTys,
+ bool AsReturnValue) const override {
+ // LLVM's x86-32 lowering currently only assigns up to three
+ // integer registers and three fp registers. Oddly, it'll use up to
+ // four vector registers for vectors, but those can overlap with the
+ // scalar registers.
+ return occupiesMoreThan(ComponentTys, /*total=*/3);
+ }
+};
+
+class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
+ bool RetSmallStructInRegABI, bool Win32StructABI,
+ unsigned NumRegisterParameters, bool SoftFloatABI)
+ : TargetCodeGenInfo(std::make_unique<X86_32ABIInfo>(
+ CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
+ NumRegisterParameters, SoftFloatABI)) {
+ SwiftInfo = std::make_unique<X86_32SwiftABIInfo>(CGT);
+ }
+
+ static bool isStructReturnInRegABI(
+ const llvm::Triple &Triple, const CodeGenOptions &Opts);
+
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const override;
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
+ // Darwin uses different dwarf register numbers for EH.
+ if (CGM.getTarget().getTriple().isOSDarwin()) return 5;
+ return 4;
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const override;
+
+ llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
+ StringRef Constraint,
+ llvm::Type* Ty) const override {
+ return X86AdjustInlineAsmType(CGF, Constraint, Ty);
+ }
+
+ void addReturnRegisterOutputs(CodeGenFunction &CGF, LValue ReturnValue,
+ std::string &Constraints,
+ std::vector<llvm::Type *> &ResultRegTypes,
+ std::vector<llvm::Type *> &ResultTruncRegTypes,
+ std::vector<LValue> &ResultRegDests,
+ std::string &AsmString,
+ unsigned NumOutputs) const override;
+
+ StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
+ return "movl\t%ebp, %ebp"
+ "\t\t// marker for objc_retainAutoreleaseReturnValue";
+ }
+};
+
+}
+
+/// Rewrite input constraint references after adding some output constraints.
+/// In the case where there is one output and one input and we add one output,
+/// we need to replace all operand references greater than or equal to 1:
+/// mov $0, $1
+/// mov eax, $1
+/// The result will be:
+/// mov $0, $2
+/// mov eax, $2
+static void rewriteInputConstraintReferences(unsigned FirstIn,
+ unsigned NumNewOuts,
+ std::string &AsmString) {
+ std::string Buf;
+ llvm::raw_string_ostream OS(Buf);
+ size_t Pos = 0;
+ while (Pos < AsmString.size()) {
+ size_t DollarStart = AsmString.find('$', Pos);
+ if (DollarStart == std::string::npos)
+ DollarStart = AsmString.size();
+ size_t DollarEnd = AsmString.find_first_not_of('$', DollarStart);
+ if (DollarEnd == std::string::npos)
+ DollarEnd = AsmString.size();
+ OS << StringRef(&AsmString[Pos], DollarEnd - Pos);
+ Pos = DollarEnd;
+ size_t NumDollars = DollarEnd - DollarStart;
+ if (NumDollars % 2 != 0 && Pos < AsmString.size()) {
+ // We have an operand reference.
+ size_t DigitStart = Pos;
+ if (AsmString[DigitStart] == '{') {
+ OS << '{';
+ ++DigitStart;
+ }
+ size_t DigitEnd = AsmString.find_first_not_of("0123456789", DigitStart);
+ if (DigitEnd == std::string::npos)
+ DigitEnd = AsmString.size();
+ StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);
+ unsigned OperandIndex;
+ if (!OperandStr.getAsInteger(10, OperandIndex)) {
+ if (OperandIndex >= FirstIn)
+ OperandIndex += NumNewOuts;
+ OS << OperandIndex;
+ } else {
+ OS << OperandStr;
+ }
+ Pos = DigitEnd;
+ }
+ }
+ AsmString = std::move(OS.str());
+}
+
+/// Add output constraints for EAX:EDX because they are return registers.
+void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
+ CodeGenFunction &CGF, LValue ReturnSlot, std::string &Constraints,
+ std::vector<llvm::Type *> &ResultRegTypes,
+ std::vector<llvm::Type *> &ResultTruncRegTypes,
+ std::vector<LValue> &ResultRegDests, std::string &AsmString,
+ unsigned NumOutputs) const {
+ uint64_t RetWidth = CGF.getContext().getTypeSize(ReturnSlot.getType());
+
+ // Use the EAX constraint if the width is 32 or smaller and EAX:EDX if it is
+ // larger.
+ if (!Constraints.empty())
+ Constraints += ',';
+ if (RetWidth <= 32) {
+ Constraints += "={eax}";
+ ResultRegTypes.push_back(CGF.Int32Ty);
+ } else {
+ // Use the 'A' constraint for EAX:EDX.
+ Constraints += "=A";
+ ResultRegTypes.push_back(CGF.Int64Ty);
+ }
+
+ // Truncate EAX or EAX:EDX to an integer of the appropriate size.
+ llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.getLLVMContext(), RetWidth);
+ ResultTruncRegTypes.push_back(CoerceTy);
+
+ // Coerce the integer by bitcasting the return slot pointer.
+ ReturnSlot.setAddress(ReturnSlot.getAddress(CGF).withElementType(CoerceTy));
+ ResultRegDests.push_back(ReturnSlot);
+
+ rewriteInputConstraintReferences(NumOutputs, 1, AsmString);
+}
+
+/// shouldReturnTypeInRegister - Determine if the given type should be
+/// returned in a register (for the Darwin and MCU ABI).
+bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
+ ASTContext &Context) const {
+ uint64_t Size = Context.getTypeSize(Ty);
+
+ // For i386, type must be register sized.
+ // For the MCU ABI, it only needs to be <= 8-byte
+ if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size)))
+ return false;
+
+ if (Ty->isVectorType()) {
+ // 64- and 128- bit vectors inside structures are not returned in
+ // registers.
+ if (Size == 64 || Size == 128)
+ return false;
+
+ return true;
+ }
+
+ // If this is a builtin, pointer, enum, complex type, member pointer, or
+ // member function pointer it is ok.
+ if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() ||
+ Ty->isAnyComplexType() || Ty->isEnumeralType() ||
+ Ty->isBlockPointerType() || Ty->isMemberPointerType())
+ return true;
+
+ // Arrays are treated like records.
+ if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
+ return shouldReturnTypeInRegister(AT->getElementType(), Context);
+
+ // Otherwise, it must be a record type.
+ const RecordType *RT = Ty->getAs<RecordType>();
+ if (!RT) return false;
+
+ // FIXME: Traverse bases here too.
+
+ // Structure types are passed in register if all fields would be
+ // passed in a register.
+ for (const auto *FD : RT->getDecl()->fields()) {
+ // Empty fields are ignored.
+ if (isEmptyField(Context, FD, true))
+ continue;
+
+ // Check fields recursively.
+ if (!shouldReturnTypeInRegister(FD->getType(), Context))
+ return false;
+ }
+ return true;
+}
+
+static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
+ // Treat complex types as the element type.
+ if (const ComplexType *CTy = Ty->getAs<ComplexType>())
+ Ty = CTy->getElementType();
+
+ // Check for a type which we know has a simple scalar argument-passing
+ // convention without any padding. (We're specifically looking for 32
+ // and 64-bit integer and integer-equivalents, float, and double.)
+ if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() &&
+ !Ty->isEnumeralType() && !Ty->isBlockPointerType())
+ return false;
+
+ uint64_t Size = Context.getTypeSize(Ty);
+ return Size == 32 || Size == 64;
+}
+
+static bool addFieldSizes(ASTContext &Context, const RecordDecl *RD,
+ uint64_t &Size) {
+ for (const auto *FD : RD->fields()) {
+ // Scalar arguments on the stack get 4 byte alignment on x86. If the
+ // argument is smaller than 32-bits, expanding the struct will create
+ // alignment padding.
+ if (!is32Or64BitBasicType(FD->getType(), Context))
+ return false;
+
+ // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
+ // how to expand them yet, and the predicate for telling if a bitfield still
+ // counts as "basic" is more complicated than what we were doing previously.
+ if (FD->isBitField())
+ return false;
+
+ Size += Context.getTypeSize(FD->getType());
+ }
+ return true;
+}
+
+static bool addBaseAndFieldSizes(ASTContext &Context, const CXXRecordDecl *RD,
+ uint64_t &Size) {
+ // Don't do this if there are any non-empty bases.
+ for (const CXXBaseSpecifier &Base : RD->bases()) {
+ if (!addBaseAndFieldSizes(Context, Base.getType()->getAsCXXRecordDecl(),
+ Size))
+ return false;
+ }
+ if (!addFieldSizes(Context, RD, Size))
+ return false;
+ return true;
+}
+
+/// Test whether an argument type which is to be passed indirectly (on the
+/// stack) would have the equivalent layout if it was expanded into separate
+/// arguments. If so, we prefer to do the latter to avoid inhibiting
+/// optimizations.
+bool X86_32ABIInfo::canExpandIndirectArgument(QualType Ty) const {
+ // We can only expand structure types.
+ const RecordType *RT = Ty->getAs<RecordType>();
+ if (!RT)
+ return false;
+ const RecordDecl *RD = RT->getDecl();
+ uint64_t Size = 0;
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ if (!IsWin32StructABI) {
+ // On non-Windows, we have to conservatively match our old bitcode
+ // prototypes in order to be ABI-compatible at the bitcode level.
+ if (!CXXRD->isCLike())
+ return false;
+ } else {
+ // Don't do this for dynamic classes.
+ if (CXXRD->isDynamicClass())
+ return false;
+ }
+ if (!addBaseAndFieldSizes(getContext(), CXXRD, Size))
+ return false;
+ } else {
+ if (!addFieldSizes(getContext(), RD, Size))
+ return false;
+ }
+
+ // We can do this if there was no alignment padding.
+ return Size == getContext().getTypeSize(Ty);
+}
+
+ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(QualType RetTy, CCState &State) const {
+ // If the return value is indirect, then the hidden argument is consuming one
+ // integer register.
+ if (State.FreeRegs) {
+ --State.FreeRegs;
+ if (!IsMCUABI)
+ return getNaturalAlignIndirectInReg(RetTy);
+ }
+ return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
+}
+
+ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
+ CCState &State) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ const Type *Base = nullptr;
+ uint64_t NumElts = 0;
+ if ((State.CC == llvm::CallingConv::X86_VectorCall ||
+ State.CC == llvm::CallingConv::X86_RegCall) &&
+ isHomogeneousAggregate(RetTy, Base, NumElts)) {
+ // The LLVM struct type for such an aggregate should lower properly.
+ return ABIArgInfo::getDirect();
+ }
+
+ if (const VectorType *VT = RetTy->getAs<VectorType>()) {
+ // On Darwin, some vectors are returned in registers.
+ if (IsDarwinVectorABI) {
+ uint64_t Size = getContext().getTypeSize(RetTy);
+
+ // 128-bit vectors are a special case; they are returned in
+ // registers and we need to make sure to pick a type the LLVM
+ // backend will like.
+ if (Size == 128)
+ return ABIArgInfo::getDirect(llvm::FixedVectorType::get(
+ llvm::Type::getInt64Ty(getVMContext()), 2));
+
+ // Always return in register if it fits in a general purpose
+ // register, or if it is 64 bits and has a single element.
+ if ((Size == 8 || Size == 16 || Size == 32) ||
+ (Size == 64 && VT->getNumElements() == 1))
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
+ Size));
+
+ return getIndirectReturnResult(RetTy, State);
+ }
+
+ return ABIArgInfo::getDirect();
+ }
+
+ if (isAggregateTypeForABI(RetTy)) {
+ if (const RecordType *RT = RetTy->getAs<RecordType>()) {
+ // Structures with flexible arrays are always indirect.
+ if (RT->getDecl()->hasFlexibleArrayMember())
+ return getIndirectReturnResult(RetTy, State);
+ }
+
+ // If specified, structs and unions are always indirect.
+ if (!IsRetSmallStructInRegABI && !RetTy->isAnyComplexType())
+ return getIndirectReturnResult(RetTy, State);
+
+ // Ignore empty structs/unions.
+ if (isEmptyRecord(getContext(), RetTy, true))
+ return ABIArgInfo::getIgnore();
+
+ // Return complex of _Float16 as <2 x half> so the backend will use xmm0.
+ if (const ComplexType *CT = RetTy->getAs<ComplexType>()) {
+ QualType ET = getContext().getCanonicalType(CT->getElementType());
+ if (ET->isFloat16Type())
+ return ABIArgInfo::getDirect(llvm::FixedVectorType::get(
+ llvm::Type::getHalfTy(getVMContext()), 2));
+ }
+
+ // Small structures which are register sized are generally returned
+ // in a register.
+ if (shouldReturnTypeInRegister(RetTy, getContext())) {
+ uint64_t Size = getContext().getTypeSize(RetTy);
+
+ // As a special-case, if the struct is a "single-element" struct, and
+ // the field is of type "float" or "double", return it in a
+ // floating-point register. (MSVC does not apply this special case.)
+ // We apply a similar transformation for pointer types to improve the
+ // quality of the generated IR.
+ if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
+ if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
+ || SeltTy->hasPointerRepresentation())
+ return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
+
+ // FIXME: We should be able to narrow this integer in cases with dead
+ // padding.
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size));
+ }
+
+ return getIndirectReturnResult(RetTy, State);
+ }
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ if (const auto *EIT = RetTy->getAs<BitIntType>())
+ if (EIT->getNumBits() > 64)
+ return getIndirectReturnResult(RetTy, State);
+
+ return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
+ : ABIArgInfo::getDirect());
+}
+
+unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
+ unsigned Align) const {
+ // Otherwise, if the alignment is less than or equal to the minimum ABI
+ // alignment, just use the default; the backend will handle this.
+ if (Align <= MinABIStackAlignInBytes)
+ return 0; // Use default alignment.
+
+ if (IsLinuxABI) {
+ // Exclude other System V OS (e.g Darwin, PS4 and FreeBSD) since we don't
+ // want to spend any effort dealing with the ramifications of ABI breaks.
+ //
+ // If the vector type is __m128/__m256/__m512, return the default alignment.
+ if (Ty->isVectorType() && (Align == 16 || Align == 32 || Align == 64))
+ return Align;
+ }
+ // On non-Darwin, the stack type alignment is always 4.
+ if (!IsDarwinVectorABI) {
+ // Set explicit alignment, since we may need to realign the top.
+ return MinABIStackAlignInBytes;
+ }
+
+ // Otherwise, if the type contains an SSE vector type, the alignment is 16.
+ if (Align >= 16 && (isSIMDVectorType(getContext(), Ty) ||
+ isRecordWithSIMDVectorType(getContext(), Ty)))
+ return 16;
+
+ return MinABIStackAlignInBytes;
+}
+
+ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal,
+ CCState &State) const {
+ if (!ByVal) {
+ if (State.FreeRegs) {
+ --State.FreeRegs; // Non-byval indirects just use one pointer.
+ if (!IsMCUABI)
+ return getNaturalAlignIndirectInReg(Ty);
+ }
+ return getNaturalAlignIndirect(Ty, false);
+ }
+
+ // Compute the byval alignment.
+ unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
+ unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
+ if (StackAlign == 0)
+ return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true);
+
+ // If the stack alignment is less than the type alignment, realign the
+ // argument.
+ bool Realign = TypeAlign > StackAlign;
+ return ABIArgInfo::getIndirect(CharUnits::fromQuantity(StackAlign),
+ /*ByVal=*/true, Realign);
+}
+
+X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const {
+ const Type *T = isSingleElementStruct(Ty, getContext());
+ if (!T)
+ T = Ty.getTypePtr();
+
+ if (const BuiltinType *BT = T->getAs<BuiltinType>()) {
+ BuiltinType::Kind K = BT->getKind();
+ if (K == BuiltinType::Float || K == BuiltinType::Double)
+ return Float;
+ }
+ return Integer;
+}
+
+bool X86_32ABIInfo::updateFreeRegs(QualType Ty, CCState &State) const {
+ if (!IsSoftFloatABI) {
+ Class C = classify(Ty);
+ if (C == Float)
+ return false;
+ }
+
+ unsigned Size = getContext().getTypeSize(Ty);
+ unsigned SizeInRegs = (Size + 31) / 32;
+
+ if (SizeInRegs == 0)
+ return false;
+
+ if (!IsMCUABI) {
+ if (SizeInRegs > State.FreeRegs) {
+ State.FreeRegs = 0;
+ return false;
+ }
+ } else {
+ // The MCU psABI allows passing parameters in-reg even if there are
+ // earlier parameters that are passed on the stack. Also,
+ // it does not allow passing >8-byte structs in-register,
+ // even if there are 3 free registers available.
+ if (SizeInRegs > State.FreeRegs || SizeInRegs > 2)
+ return false;
+ }
+
+ State.FreeRegs -= SizeInRegs;
+ return true;
+}
+
+bool X86_32ABIInfo::shouldAggregateUseDirect(QualType Ty, CCState &State,
+ bool &InReg,
+ bool &NeedsPadding) const {
+ // On Windows, aggregates other than HFAs are never passed in registers, and
+ // they do not consume register slots. Homogenous floating-point aggregates
+ // (HFAs) have already been dealt with at this point.
+ if (IsWin32StructABI && isAggregateTypeForABI(Ty))
+ return false;
+
+ NeedsPadding = false;
+ InReg = !IsMCUABI;
+
+ if (!updateFreeRegs(Ty, State))
+ return false;
+
+ if (IsMCUABI)
+ return true;
+
+ if (State.CC == llvm::CallingConv::X86_FastCall ||
+ State.CC == llvm::CallingConv::X86_VectorCall ||
+ State.CC == llvm::CallingConv::X86_RegCall) {
+ if (getContext().getTypeSize(Ty) <= 32 && State.FreeRegs)
+ NeedsPadding = true;
+
+ return false;
+ }
+
+ return true;
+}
+
+bool X86_32ABIInfo::shouldPrimitiveUseInReg(QualType Ty, CCState &State) const {
+ bool IsPtrOrInt = (getContext().getTypeSize(Ty) <= 32) &&
+ (Ty->isIntegralOrEnumerationType() || Ty->isPointerType() ||
+ Ty->isReferenceType());
+
+ if (!IsPtrOrInt && (State.CC == llvm::CallingConv::X86_FastCall ||
+ State.CC == llvm::CallingConv::X86_VectorCall))
+ return false;
+
+ if (!updateFreeRegs(Ty, State))
+ return false;
+
+ if (!IsPtrOrInt && State.CC == llvm::CallingConv::X86_RegCall)
+ return false;
+
+ // Return true to apply inreg to all legal parameters except for MCU targets.
+ return !IsMCUABI;
+}
+
+void X86_32ABIInfo::runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State) const {
+ // Vectorcall x86 works subtly different than in x64, so the format is
+ // a bit different than the x64 version. First, all vector types (not HVAs)
+ // are assigned, with the first 6 ending up in the [XYZ]MM0-5 registers.
+ // This differs from the x64 implementation, where the first 6 by INDEX get
+ // registers.
+ // In the second pass over the arguments, HVAs are passed in the remaining
+ // vector registers if possible, or indirectly by address. The address will be
+ // passed in ECX/EDX if available. Any other arguments are passed according to
+ // the usual fastcall rules.
+ MutableArrayRef<CGFunctionInfoArgInfo> Args = FI.arguments();
+ for (int I = 0, E = Args.size(); I < E; ++I) {
+ const Type *Base = nullptr;
+ uint64_t NumElts = 0;
+ const QualType &Ty = Args[I].type;
+ if ((Ty->isVectorType() || Ty->isBuiltinType()) &&
+ isHomogeneousAggregate(Ty, Base, NumElts)) {
+ if (State.FreeSSERegs >= NumElts) {
+ State.FreeSSERegs -= NumElts;
+ Args[I].info = ABIArgInfo::getDirectInReg();
+ State.IsPreassigned.set(I);
+ }
+ }
+ }
+}
+
+ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, CCState &State,
+ unsigned ArgIndex) const {
+ // FIXME: Set alignment on indirect arguments.
+ bool IsFastCall = State.CC == llvm::CallingConv::X86_FastCall;
+ bool IsRegCall = State.CC == llvm::CallingConv::X86_RegCall;
+ bool IsVectorCall = State.CC == llvm::CallingConv::X86_VectorCall;
+
+ Ty = useFirstFieldIfTransparentUnion(Ty);
+ TypeInfo TI = getContext().getTypeInfo(Ty);
+
+ // Check with the C++ ABI first.
+ const RecordType *RT = Ty->getAs<RecordType>();
+ if (RT) {
+ CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
+ if (RAA == CGCXXABI::RAA_Indirect) {
+ return getIndirectResult(Ty, false, State);
+ } else if (State.IsDelegateCall) {
+ // Avoid having different alignments on delegate call args by always
+ // setting the alignment to 4, which is what we do for inallocas.
+ ABIArgInfo Res = getIndirectResult(Ty, false, State);
+ Res.setIndirectAlign(CharUnits::fromQuantity(4));
+ return Res;
+ } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
+ // The field index doesn't matter, we'll fix it up later.
+ return ABIArgInfo::getInAlloca(/*FieldIndex=*/0);
+ }
+ }
+
+ // Regcall uses the concept of a homogenous vector aggregate, similar
+ // to other targets.
+ const Type *Base = nullptr;
+ uint64_t NumElts = 0;
+ if ((IsRegCall || IsVectorCall) &&
+ isHomogeneousAggregate(Ty, Base, NumElts)) {
+ if (State.FreeSSERegs >= NumElts) {
+ State.FreeSSERegs -= NumElts;
+
+ // Vectorcall passes HVAs directly and does not flatten them, but regcall
+ // does.
+ if (IsVectorCall)
+ return getDirectX86Hva();
+
+ if (Ty->isBuiltinType() || Ty->isVectorType())
+ return ABIArgInfo::getDirect();
+ return ABIArgInfo::getExpand();
+ }
+ return getIndirectResult(Ty, /*ByVal=*/false, State);
+ }
+
+ if (isAggregateTypeForABI(Ty)) {
+ // Structures with flexible arrays are always indirect.
+ // FIXME: This should not be byval!
+ if (RT && RT->getDecl()->hasFlexibleArrayMember())
+ return getIndirectResult(Ty, true, State);
+
+ // Ignore empty structs/unions on non-Windows.
+ if (!IsWin32StructABI && isEmptyRecord(getContext(), Ty, true))
+ return ABIArgInfo::getIgnore();
+
+ llvm::LLVMContext &LLVMContext = getVMContext();
+ llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
+ bool NeedsPadding = false;
+ bool InReg;
+ if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) {
+ unsigned SizeInRegs = (TI.Width + 31) / 32;
+ SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32);
+ llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
+ if (InReg)
+ return ABIArgInfo::getDirectInReg(Result);
+ else
+ return ABIArgInfo::getDirect(Result);
+ }
+ llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : nullptr;
+
+ // Pass over-aligned aggregates to non-variadic functions on Windows
+ // indirectly. This behavior was added in MSVC 2015. Use the required
+ // alignment from the record layout, since that may be less than the
+ // regular type alignment, and types with required alignment of less than 4
+ // bytes are not passed indirectly.
+ if (IsWin32StructABI && State.Required.isRequiredArg(ArgIndex)) {
+ unsigned AlignInBits = 0;
+ if (RT) {
+ const ASTRecordLayout &Layout =
+ getContext().getASTRecordLayout(RT->getDecl());
+ AlignInBits = getContext().toBits(Layout.getRequiredAlignment());
+ } else if (TI.isAlignRequired()) {
+ AlignInBits = TI.Align;
+ }
+ if (AlignInBits > 32)
+ return getIndirectResult(Ty, /*ByVal=*/false, State);
+ }
+
+ // Expand small (<= 128-bit) record types when we know that the stack layout
+ // of those arguments will match the struct. This is important because the
+ // LLVM backend isn't smart enough to remove byval, which inhibits many
+ // optimizations.
+ // Don't do this for the MCU if there are still free integer registers
+ // (see X86_64 ABI for full explanation).
+ if (TI.Width <= 4 * 32 && (!IsMCUABI || State.FreeRegs == 0) &&
+ canExpandIndirectArgument(Ty))
+ return ABIArgInfo::getExpandWithPadding(
+ IsFastCall || IsVectorCall || IsRegCall, PaddingType);
+
+ return getIndirectResult(Ty, true, State);
+ }
+
+ if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ // On Windows, vectors are passed directly if registers are available, or
+ // indirectly if not. This avoids the need to align argument memory. Pass
+ // user-defined vector types larger than 512 bits indirectly for simplicity.
+ if (IsWin32StructABI) {
+ if (TI.Width <= 512 && State.FreeSSERegs > 0) {
+ --State.FreeSSERegs;
+ return ABIArgInfo::getDirectInReg();
+ }
+ return getIndirectResult(Ty, /*ByVal=*/false, State);
+ }
+
+ // On Darwin, some vectors are passed in memory, we handle this by passing
+ // it as an i8/i16/i32/i64.
+ if (IsDarwinVectorABI) {
+ if ((TI.Width == 8 || TI.Width == 16 || TI.Width == 32) ||
+ (TI.Width == 64 && VT->getNumElements() == 1))
+ return ABIArgInfo::getDirect(
+ llvm::IntegerType::get(getVMContext(), TI.Width));
+ }
+
+ if (IsX86_MMXType(CGT.ConvertType(Ty)))
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64));
+
+ return ABIArgInfo::getDirect();
+ }
+
+
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ bool InReg = shouldPrimitiveUseInReg(Ty, State);
+
+ if (isPromotableIntegerTypeForABI(Ty)) {
+ if (InReg)
+ return ABIArgInfo::getExtendInReg(Ty);
+ return ABIArgInfo::getExtend(Ty);
+ }
+
+ if (const auto *EIT = Ty->getAs<BitIntType>()) {
+ if (EIT->getNumBits() <= 64) {
+ if (InReg)
+ return ABIArgInfo::getDirectInReg();
+ return ABIArgInfo::getDirect();
+ }
+ return getIndirectResult(Ty, /*ByVal=*/false, State);
+ }
+
+ if (InReg)
+ return ABIArgInfo::getDirectInReg();
+ return ABIArgInfo::getDirect();
+}
+
+void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ CCState State(FI);
+ if (IsMCUABI)
+ State.FreeRegs = 3;
+ else if (State.CC == llvm::CallingConv::X86_FastCall) {
+ State.FreeRegs = 2;
+ State.FreeSSERegs = 3;
+ } else if (State.CC == llvm::CallingConv::X86_VectorCall) {
+ State.FreeRegs = 2;
+ State.FreeSSERegs = 6;
+ } else if (FI.getHasRegParm())
+ State.FreeRegs = FI.getRegParm();
+ else if (State.CC == llvm::CallingConv::X86_RegCall) {
+ State.FreeRegs = 5;
+ State.FreeSSERegs = 8;
+ } else if (IsWin32StructABI) {
+ // Since MSVC 2015, the first three SSE vectors have been passed in
+ // registers. The rest are passed indirectly.
+ State.FreeRegs = DefaultNumRegisterParameters;
+ State.FreeSSERegs = 3;
+ } else
+ State.FreeRegs = DefaultNumRegisterParameters;
+
+ if (!::classifyReturnType(getCXXABI(), FI, *this)) {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), State);
+ } else if (FI.getReturnInfo().isIndirect()) {
+ // The C++ ABI is not aware of register usage, so we have to check if the
+ // return value was sret and put it in a register ourselves if appropriate.
+ if (State.FreeRegs) {
+ --State.FreeRegs; // The sret parameter consumes a register.
+ if (!IsMCUABI)
+ FI.getReturnInfo().setInReg(true);
+ }
+ }
+
+ // The chain argument effectively gives us another free register.
+ if (FI.isChainCall())
+ ++State.FreeRegs;
+
+ // For vectorcall, do a first pass over the arguments, assigning FP and vector
+ // arguments to XMM registers as available.
+ if (State.CC == llvm::CallingConv::X86_VectorCall)
+ runVectorCallFirstPass(FI, State);
+
+ bool UsedInAlloca = false;
+ MutableArrayRef<CGFunctionInfoArgInfo> Args = FI.arguments();
+ for (unsigned I = 0, E = Args.size(); I < E; ++I) {
+ // Skip arguments that have already been assigned.
+ if (State.IsPreassigned.test(I))
+ continue;
+
+ Args[I].info =
+ classifyArgumentType(Args[I].type, State, I);
+ UsedInAlloca |= (Args[I].info.getKind() == ABIArgInfo::InAlloca);
+ }
+
+ // If we needed to use inalloca for any argument, do a second pass and rewrite
+ // all the memory arguments to use inalloca.
+ if (UsedInAlloca)
+ rewriteWithInAlloca(FI);
+}
+
+void
+X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
+ CharUnits &StackOffset, ABIArgInfo &Info,
+ QualType Type) const {
+ // Arguments are always 4-byte-aligned.
+ CharUnits WordSize = CharUnits::fromQuantity(4);
+ assert(StackOffset.isMultipleOf(WordSize) && "unaligned inalloca struct");
+
+ // sret pointers and indirect things will require an extra pointer
+ // indirection, unless they are byval. Most things are byval, and will not
+ // require this indirection.
+ bool IsIndirect = false;
+ if (Info.isIndirect() && !Info.getIndirectByVal())
+ IsIndirect = true;
+ Info = ABIArgInfo::getInAlloca(FrameFields.size(), IsIndirect);
+ llvm::Type *LLTy = CGT.ConvertTypeForMem(Type);
+ if (IsIndirect)
+ LLTy = llvm::PointerType::getUnqual(getVMContext());
+ FrameFields.push_back(LLTy);
+ StackOffset += IsIndirect ? WordSize : getContext().getTypeSizeInChars(Type);
+
+ // Insert padding bytes to respect alignment.
+ CharUnits FieldEnd = StackOffset;
+ StackOffset = FieldEnd.alignTo(WordSize);
+ if (StackOffset != FieldEnd) {
+ CharUnits NumBytes = StackOffset - FieldEnd;
+ llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
+ Ty = llvm::ArrayType::get(Ty, NumBytes.getQuantity());
+ FrameFields.push_back(Ty);
+ }
+}
+
+static bool isArgInAlloca(const ABIArgInfo &Info) {
+ // Leave ignored and inreg arguments alone.
+ switch (Info.getKind()) {
+ case ABIArgInfo::InAlloca:
+ return true;
+ case ABIArgInfo::Ignore:
+ case ABIArgInfo::IndirectAliased:
+ return false;
+ case ABIArgInfo::Indirect:
+ case ABIArgInfo::Direct:
+ case ABIArgInfo::Extend:
+ return !Info.getInReg();
+ case ABIArgInfo::Expand:
+ case ABIArgInfo::CoerceAndExpand:
+ // These are aggregate types which are never passed in registers when
+ // inalloca is involved.
+ return true;
+ }
+ llvm_unreachable("invalid enum");
+}
+
+void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const {
+ assert(IsWin32StructABI && "inalloca only supported on win32");
+
+ // Build a packed struct type for all of the arguments in memory.
+ SmallVector<llvm::Type *, 6> FrameFields;
+
+ // The stack alignment is always 4.
+ CharUnits StackAlign = CharUnits::fromQuantity(4);
+
+ CharUnits StackOffset;
+ CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end();
+
+ // Put 'this' into the struct before 'sret', if necessary.
+ bool IsThisCall =
+ FI.getCallingConvention() == llvm::CallingConv::X86_ThisCall;
+ ABIArgInfo &Ret = FI.getReturnInfo();
+ if (Ret.isIndirect() && Ret.isSRetAfterThis() && !IsThisCall &&
+ isArgInAlloca(I->info)) {
+ addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
+ ++I;
+ }
+
+ // Put the sret parameter into the inalloca struct if it's in memory.
+ if (Ret.isIndirect() && !Ret.getInReg()) {
+ addFieldToArgStruct(FrameFields, StackOffset, Ret, FI.getReturnType());
+ // On Windows, the hidden sret parameter is always returned in eax.
+ Ret.setInAllocaSRet(IsWin32StructABI);
+ }
+
+ // Skip the 'this' parameter in ecx.
+ if (IsThisCall)
+ ++I;
+
+ // Put arguments passed in memory into the struct.
+ for (; I != E; ++I) {
+ if (isArgInAlloca(I->info))
+ addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
+ }
+
+ FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,
+ /*isPacked=*/true),
+ StackAlign);
+}
+
+Address X86_32ABIInfo::EmitVAArg(CodeGenFunction &CGF,
+ Address VAListAddr, QualType Ty) const {
+
+ auto TypeInfo = getContext().getTypeInfoInChars(Ty);
+
+ // x86-32 changes the alignment of certain arguments on the stack.
+ //
+ // Just messing with TypeInfo like this works because we never pass
+ // anything indirectly.
+ TypeInfo.Align = CharUnits::fromQuantity(
+ getTypeStackAlignInBytes(Ty, TypeInfo.Align.getQuantity()));
+
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false,
+ TypeInfo, CharUnits::fromQuantity(4),
+ /*AllowHigherAlign*/ true);
+}
+
+bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
+ const llvm::Triple &Triple, const CodeGenOptions &Opts) {
+ assert(Triple.getArch() == llvm::Triple::x86);
+
+ switch (Opts.getStructReturnConvention()) {
+ case CodeGenOptions::SRCK_Default:
+ break;
+ case CodeGenOptions::SRCK_OnStack: // -fpcc-struct-return
+ return false;
+ case CodeGenOptions::SRCK_InRegs: // -freg-struct-return
+ return true;
+ }
+
+ if (Triple.isOSDarwin() || Triple.isOSIAMCU())
+ return true;
+
+ switch (Triple.getOS()) {
+ case llvm::Triple::DragonFly:
+ case llvm::Triple::FreeBSD:
+ case llvm::Triple::OpenBSD:
+ case llvm::Triple::Win32:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void addX86InterruptAttrs(const FunctionDecl *FD, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) {
+ if (!FD->hasAttr<AnyX86InterruptAttr>())
+ return;
+
+ llvm::Function *Fn = cast<llvm::Function>(GV);
+ Fn->setCallingConv(llvm::CallingConv::X86_INTR);
+ if (FD->getNumParams() == 0)
+ return;
+
+ auto PtrTy = cast<PointerType>(FD->getParamDecl(0)->getType());
+ llvm::Type *ByValTy = CGM.getTypes().ConvertType(PtrTy->getPointeeType());
+ llvm::Attribute NewAttr = llvm::Attribute::getWithByValType(
+ Fn->getContext(), ByValTy);
+ Fn->addParamAttr(0, NewAttr);
+}
+
+void X86_32TargetCodeGenInfo::setTargetAttributes(
+ const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
+ if (GV->isDeclaration())
+ return;
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
+ if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
+ llvm::Function *Fn = cast<llvm::Function>(GV);
+ Fn->addFnAttr("stackrealign");
+ }
+
+ addX86InterruptAttrs(FD, GV, CGM);
+ }
+}
+
+bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
+ CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ CodeGen::CGBuilderTy &Builder = CGF.Builder;
+
+ llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
+
+ // 0-7 are the eight integer registers; the order is different
+ // on Darwin (for EH), but the range is the same.
+ // 8 is %eip.
+ AssignToArrayRange(Builder, Address, Four8, 0, 8);
+
+ if (CGF.CGM.getTarget().getTriple().isOSDarwin()) {
+ // 12-16 are st(0..4). Not sure why we stop at 4.
+ // These have size 16, which is sizeof(long double) on
+ // platforms with 8-byte alignment for that type.
+ llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16);
+ AssignToArrayRange(Builder, Address, Sixteen8, 12, 16);
+
+ } else {
+ // 9 is %eflags, which doesn't get a size on Darwin for some
+ // reason.
+ Builder.CreateAlignedStore(
+ Four8, Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, Address, 9),
+ CharUnits::One());
+
+ // 11-16 are st(0..5). Not sure why we stop at 5.
+ // These have size 12, which is sizeof(long double) on
+ // platforms with 4-byte alignment for that type.
+ llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12);
+ AssignToArrayRange(Builder, Address, Twelve8, 11, 16);
+ }
+
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// X86-64 ABI Implementation
+//===----------------------------------------------------------------------===//
+
+
+namespace {
+
+/// \p returns the size in bits of the largest (native) vector for \p AVXLevel.
+static unsigned getNativeVectorSizeForAVXABI(X86AVXABILevel AVXLevel) {
+ switch (AVXLevel) {
+ case X86AVXABILevel::AVX512:
+ return 512;
+ case X86AVXABILevel::AVX:
+ return 256;
+ case X86AVXABILevel::None:
+ return 128;
+ }
+ llvm_unreachable("Unknown AVXLevel");
+}
+
+/// X86_64ABIInfo - The X86_64 ABI information.
+class X86_64ABIInfo : public ABIInfo {
+ enum Class {
+ Integer = 0,
+ SSE,
+ SSEUp,
+ X87,
+ X87Up,
+ ComplexX87,
+ NoClass,
+ Memory
+ };
+
+ /// merge - Implement the X86_64 ABI merging algorithm.
+ ///
+ /// Merge an accumulating classification \arg Accum with a field
+ /// classification \arg Field.
+ ///
+ /// \param Accum - The accumulating classification. This should
+ /// always be either NoClass or the result of a previous merge
+ /// call. In addition, this should never be Memory (the caller
+ /// should just return Memory for the aggregate).
+ static Class merge(Class Accum, Class Field);
+
+ /// postMerge - Implement the X86_64 ABI post merging algorithm.
+ ///
+ /// Post merger cleanup, reduces a malformed Hi and Lo pair to
+ /// final MEMORY or SSE classes when necessary.
+ ///
+ /// \param AggregateSize - The size of the current aggregate in
+ /// the classification process.
+ ///
+ /// \param Lo - The classification for the parts of the type
+ /// residing in the low word of the containing object.
+ ///
+ /// \param Hi - The classification for the parts of the type
+ /// residing in the higher words of the containing object.
+ ///
+ void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const;
+
+ /// classify - Determine the x86_64 register classes in which the
+ /// given type T should be passed.
+ ///
+ /// \param Lo - The classification for the parts of the type
+ /// residing in the low word of the containing object.
+ ///
+ /// \param Hi - The classification for the parts of the type
+ /// residing in the high word of the containing object.
+ ///
+ /// \param OffsetBase - The bit offset of this type in the
+ /// containing object. Some parameters are classified different
+ /// depending on whether they straddle an eightbyte boundary.
+ ///
+ /// \param isNamedArg - Whether the argument in question is a "named"
+ /// argument, as used in AMD64-ABI 3.5.7.
+ ///
+ /// \param IsRegCall - Whether the calling conversion is regcall.
+ ///
+ /// If a word is unused its result will be NoClass; if a type should
+ /// be passed in Memory then at least the classification of \arg Lo
+ /// will be Memory.
+ ///
+ /// The \arg Lo class will be NoClass iff the argument is ignored.
+ ///
+ /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
+ /// also be ComplexX87.
+ void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
+ bool isNamedArg, bool IsRegCall = false) const;
+
+ llvm::Type *GetByteVectorType(QualType Ty) const;
+ llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
+ unsigned IROffset, QualType SourceTy,
+ unsigned SourceOffset) const;
+ llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
+ unsigned IROffset, QualType SourceTy,
+ unsigned SourceOffset) const;
+
+ /// getIndirectResult - Give a source type \arg Ty, return a suitable result
+ /// such that the argument will be returned in memory.
+ ABIArgInfo getIndirectReturnResult(QualType Ty) const;
+
+ /// getIndirectResult - Give a source type \arg Ty, return a suitable result
+ /// such that the argument will be passed in memory.
+ ///
+ /// \param freeIntRegs - The number of free integer registers remaining
+ /// available.
+ ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const;
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+
+ ABIArgInfo classifyArgumentType(QualType Ty, unsigned freeIntRegs,
+ unsigned &neededInt, unsigned &neededSSE,
+ bool isNamedArg,
+ bool IsRegCall = false) const;
+
+ ABIArgInfo classifyRegCallStructType(QualType Ty, unsigned &NeededInt,
+ unsigned &NeededSSE,
+ unsigned &MaxVectorWidth) const;
+
+ ABIArgInfo classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
+ unsigned &NeededSSE,
+ unsigned &MaxVectorWidth) const;
+
+ bool IsIllegalVectorType(QualType Ty) const;
+
+ /// The 0.98 ABI revision clarified a lot of ambiguities,
+ /// unfortunately in ways that were not always consistent with
+ /// certain previous compilers. In particular, platforms which
+ /// required strict binary compatibility with older versions of GCC
+ /// may need to exempt themselves.
+ bool honorsRevision0_98() const {
+ return !getTarget().getTriple().isOSDarwin();
+ }
+
+ /// GCC classifies <1 x long long> as SSE but some platform ABIs choose to
+ /// classify it as INTEGER (for compatibility with older clang compilers).
+ bool classifyIntegerMMXAsSSE() const {
+ // Clang <= 3.8 did not do this.
+ if (getContext().getLangOpts().getClangABICompat() <=
+ LangOptions::ClangABI::Ver3_8)
+ return false;
+
+ const llvm::Triple &Triple = getTarget().getTriple();
+ if (Triple.isOSDarwin() || Triple.isPS() || Triple.isOSFreeBSD())
+ return false;
+ return true;
+ }
+
+ // GCC classifies vectors of __int128 as memory.
+ bool passInt128VectorsInMem() const {
+ // Clang <= 9.0 did not do this.
+ if (getContext().getLangOpts().getClangABICompat() <=
+ LangOptions::ClangABI::Ver9)
+ return false;
+
+ const llvm::Triple &T = getTarget().getTriple();
+ return T.isOSLinux() || T.isOSNetBSD();
+ }
+
+ X86AVXABILevel AVXLevel;
+ // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on
+ // 64-bit hardware.
+ bool Has64BitPointers;
+
+public:
+ X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
+ : ABIInfo(CGT), AVXLevel(AVXLevel),
+ Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {}
+
+ bool isPassedUsingAVXType(QualType type) const {
+ unsigned neededInt, neededSSE;
+ // The freeIntRegs argument doesn't matter here.
+ ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE,
+ /*isNamedArg*/true);
+ if (info.isDirect()) {
+ llvm::Type *ty = info.getCoerceToType();
+ if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
+ return vectorTy->getPrimitiveSizeInBits().getFixedValue() > 128;
+ }
+ return false;
+ }
+
+ void computeInfo(CGFunctionInfo &FI) const override;
+
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+ Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+
+ bool has64BitPointers() const {
+ return Has64BitPointers;
+ }
+};
+
+/// WinX86_64ABIInfo - The Windows X86_64 ABI information.
+class WinX86_64ABIInfo : public ABIInfo {
+public:
+ WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
+ : ABIInfo(CGT), AVXLevel(AVXLevel),
+ IsMingw64(getTarget().getTriple().isWindowsGNUEnvironment()) {}
+
+ void computeInfo(CGFunctionInfo &FI) const override;
+
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+
+ bool isHomogeneousAggregateBaseType(QualType Ty) const override {
+ // FIXME: Assumes vectorcall is in use.
+ return isX86VectorTypeForVectorCall(getContext(), Ty);
+ }
+
+ bool isHomogeneousAggregateSmallEnough(const Type *Ty,
+ uint64_t NumMembers) const override {
+ // FIXME: Assumes vectorcall is in use.
+ return isX86VectorCallAggregateSmallEnough(NumMembers);
+ }
+
+private:
+ ABIArgInfo classify(QualType Ty, unsigned &FreeSSERegs, bool IsReturnType,
+ bool IsVectorCall, bool IsRegCall) const;
+ ABIArgInfo reclassifyHvaArgForVectorCall(QualType Ty, unsigned &FreeSSERegs,
+ const ABIArgInfo &current) const;
+
+ X86AVXABILevel AVXLevel;
+
+ bool IsMingw64;
+};
+
+class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
+ : TargetCodeGenInfo(std::make_unique<X86_64ABIInfo>(CGT, AVXLevel)) {
+ SwiftInfo =
+ std::make_unique<SwiftABIInfo>(CGT, /*SwiftErrorInRegister=*/true);
+ }
+
+ /// Disable tail call on x86-64. The epilogue code before the tail jump blocks
+ /// autoreleaseRV/retainRV and autoreleaseRV/unsafeClaimRV optimizations.
+ bool markARCOptimizedReturnCallsAsNoTail() const override { return true; }
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
+ return 7;
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const override {
+ llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
+
+ // 0-15 are the 16 integer registers.
+ // 16 is %rip.
+ AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
+ return false;
+ }
+
+ llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
+ StringRef Constraint,
+ llvm::Type* Ty) const override {
+ return X86AdjustInlineAsmType(CGF, Constraint, Ty);
+ }
+
+ bool isNoProtoCallVariadic(const CallArgList &args,
+ const FunctionNoProtoType *fnType) const override {
+ // The default CC on x86-64 sets %al to the number of SSA
+ // registers used, and GCC sets this when calling an unprototyped
+ // function, so we override the default behavior. However, don't do
+ // that when AVX types are involved: the ABI explicitly states it is
+ // undefined, and it doesn't work in practice because of how the ABI
+ // defines varargs anyway.
+ if (fnType->getCallConv() == CC_C) {
+ bool HasAVXType = false;
+ for (CallArgList::const_iterator
+ it = args.begin(), ie = args.end(); it != ie; ++it) {
+ if (getABIInfo<X86_64ABIInfo>().isPassedUsingAVXType(it->Ty)) {
+ HasAVXType = true;
+ break;
+ }
+ }
+
+ if (!HasAVXType)
+ return true;
+ }
+
+ return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType);
+ }
+
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const override {
+ if (GV->isDeclaration())
+ return;
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
+ if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
+ llvm::Function *Fn = cast<llvm::Function>(GV);
+ Fn->addFnAttr("stackrealign");
+ }
+
+ addX86InterruptAttrs(FD, GV, CGM);
+ }
+ }
+
+ void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc,
+ const FunctionDecl *Caller,
+ const FunctionDecl *Callee,
+ const CallArgList &Args) const override;
+};
+} // namespace
+
+static void initFeatureMaps(const ASTContext &Ctx,
+ llvm::StringMap<bool> &CallerMap,
+ const FunctionDecl *Caller,
+ llvm::StringMap<bool> &CalleeMap,
+ const FunctionDecl *Callee) {
+ if (CalleeMap.empty() && CallerMap.empty()) {
+ // The caller is potentially nullptr in the case where the call isn't in a
+ // function. In this case, the getFunctionFeatureMap ensures we just get
+ // the TU level setting (since it cannot be modified by 'target'..
+ Ctx.getFunctionFeatureMap(CallerMap, Caller);
+ Ctx.getFunctionFeatureMap(CalleeMap, Callee);
+ }
+}
+
+static bool checkAVXParamFeature(DiagnosticsEngine &Diag,
+ SourceLocation CallLoc,
+ const llvm::StringMap<bool> &CallerMap,
+ const llvm::StringMap<bool> &CalleeMap,
+ QualType Ty, StringRef Feature,
+ bool IsArgument) {
+ bool CallerHasFeat = CallerMap.lookup(Feature);
+ bool CalleeHasFeat = CalleeMap.lookup(Feature);
+ if (!CallerHasFeat && !CalleeHasFeat)
+ return Diag.Report(CallLoc, diag::warn_avx_calling_convention)
+ << IsArgument << Ty << Feature;
+
+ // Mixing calling conventions here is very clearly an error.
+ if (!CallerHasFeat || !CalleeHasFeat)
+ return Diag.Report(CallLoc, diag::err_avx_calling_convention)
+ << IsArgument << Ty << Feature;
+
+ // Else, both caller and callee have the required feature, so there is no need
+ // to diagnose.
+ return false;
+}
+
+static bool checkAVX512ParamFeature(DiagnosticsEngine &Diag,
+ SourceLocation CallLoc,
+ const llvm::StringMap<bool> &CallerMap,
+ const llvm::StringMap<bool> &CalleeMap,
+ QualType Ty, bool IsArgument) {
+ bool Caller256 = CallerMap.lookup("avx512f") && !CallerMap.lookup("evex512");
+ bool Callee256 = CalleeMap.lookup("avx512f") && !CalleeMap.lookup("evex512");
+
+ // Forbid 512-bit or larger vector pass or return when we disabled ZMM
+ // instructions.
+ if (Caller256 || Callee256)
+ return Diag.Report(CallLoc, diag::err_avx_calling_convention)
+ << IsArgument << Ty << "evex512";
+
+ return checkAVXParamFeature(Diag, CallLoc, CallerMap, CalleeMap, Ty,
+ "avx512f", IsArgument);
+}
+
+static bool checkAVXParam(DiagnosticsEngine &Diag, ASTContext &Ctx,
+ SourceLocation CallLoc,
+ const llvm::StringMap<bool> &CallerMap,
+ const llvm::StringMap<bool> &CalleeMap, QualType Ty,
+ bool IsArgument) {
+ uint64_t Size = Ctx.getTypeSize(Ty);
+ if (Size > 256)
+ return checkAVX512ParamFeature(Diag, CallLoc, CallerMap, CalleeMap, Ty,
+ IsArgument);
+
+ if (Size > 128)
+ return checkAVXParamFeature(Diag, CallLoc, CallerMap, CalleeMap, Ty, "avx",
+ IsArgument);
+
+ return false;
+}
+
+void X86_64TargetCodeGenInfo::checkFunctionCallABI(
+ CodeGenModule &CGM, SourceLocation CallLoc, const FunctionDecl *Caller,
+ const FunctionDecl *Callee, const CallArgList &Args) const {
+ llvm::StringMap<bool> CallerMap;
+ llvm::StringMap<bool> CalleeMap;
+ unsigned ArgIndex = 0;
+
+ // We need to loop through the actual call arguments rather than the
+ // function's parameters, in case this variadic.
+ for (const CallArg &Arg : Args) {
+ // The "avx" feature changes how vectors >128 in size are passed. "avx512f"
+ // additionally changes how vectors >256 in size are passed. Like GCC, we
+ // warn when a function is called with an argument where this will change.
+ // Unlike GCC, we also error when it is an obvious ABI mismatch, that is,
+ // the caller and callee features are mismatched.
+ // Unfortunately, we cannot do this diagnostic in SEMA, since the callee can
+ // change its ABI with attribute-target after this call.
+ if (Arg.getType()->isVectorType() &&
+ CGM.getContext().getTypeSize(Arg.getType()) > 128) {
+ initFeatureMaps(CGM.getContext(), CallerMap, Caller, CalleeMap, Callee);
+ QualType Ty = Arg.getType();
+ // The CallArg seems to have desugared the type already, so for clearer
+ // diagnostics, replace it with the type in the FunctionDecl if possible.
+ if (ArgIndex < Callee->getNumParams())
+ Ty = Callee->getParamDecl(ArgIndex)->getType();
+
+ if (checkAVXParam(CGM.getDiags(), CGM.getContext(), CallLoc, CallerMap,
+ CalleeMap, Ty, /*IsArgument*/ true))
+ return;
+ }
+ ++ArgIndex;
+ }
+
+ // Check return always, as we don't have a good way of knowing in codegen
+ // whether this value is used, tail-called, etc.
+ if (Callee->getReturnType()->isVectorType() &&
+ CGM.getContext().getTypeSize(Callee->getReturnType()) > 128) {
+ initFeatureMaps(CGM.getContext(), CallerMap, Caller, CalleeMap, Callee);
+ checkAVXParam(CGM.getDiags(), CGM.getContext(), CallLoc, CallerMap,
+ CalleeMap, Callee->getReturnType(),
+ /*IsArgument*/ false);
+ }
+}
+
+std::string TargetCodeGenInfo::qualifyWindowsLibrary(StringRef Lib) {
+ // If the argument does not end in .lib, automatically add the suffix.
+ // If the argument contains a space, enclose it in quotes.
+ // This matches the behavior of MSVC.
+ bool Quote = Lib.contains(' ');
+ std::string ArgStr = Quote ? "\"" : "";
+ ArgStr += Lib;
+ if (!Lib.ends_with_insensitive(".lib") && !Lib.ends_with_insensitive(".a"))
+ ArgStr += ".lib";
+ ArgStr += Quote ? "\"" : "";
+ return ArgStr;
+}
+
+namespace {
+class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo {
+public:
+ WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
+ bool DarwinVectorABI, bool RetSmallStructInRegABI, bool Win32StructABI,
+ unsigned NumRegisterParameters)
+ : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI,
+ Win32StructABI, NumRegisterParameters, false) {}
+
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const override;
+
+ void getDependentLibraryOption(llvm::StringRef Lib,
+ llvm::SmallString<24> &Opt) const override {
+ Opt = "/DEFAULTLIB:";
+ Opt += qualifyWindowsLibrary(Lib);
+ }
+
+ void getDetectMismatchOption(llvm::StringRef Name,
+ llvm::StringRef Value,
+ llvm::SmallString<32> &Opt) const override {
+ Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
+ }
+};
+} // namespace
+
+void WinX86_32TargetCodeGenInfo::setTargetAttributes(
+ const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
+ X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
+ if (GV->isDeclaration())
+ return;
+ addStackProbeTargetAttributes(D, GV, CGM);
+}
+
+namespace {
+class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
+ X86AVXABILevel AVXLevel)
+ : TargetCodeGenInfo(std::make_unique<WinX86_64ABIInfo>(CGT, AVXLevel)) {
+ SwiftInfo =
+ std::make_unique<SwiftABIInfo>(CGT, /*SwiftErrorInRegister=*/true);
+ }
+
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const override;
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
+ return 7;
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const override {
+ llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
+
+ // 0-15 are the 16 integer registers.
+ // 16 is %rip.
+ AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
+ return false;
+ }
+
+ void getDependentLibraryOption(llvm::StringRef Lib,
+ llvm::SmallString<24> &Opt) const override {
+ Opt = "/DEFAULTLIB:";
+ Opt += qualifyWindowsLibrary(Lib);
+ }
+
+ void getDetectMismatchOption(llvm::StringRef Name,
+ llvm::StringRef Value,
+ llvm::SmallString<32> &Opt) const override {
+ Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
+ }
+};
+} // namespace
+
+void WinX86_64TargetCodeGenInfo::setTargetAttributes(
+ const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
+ TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
+ if (GV->isDeclaration())
+ return;
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
+ if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
+ llvm::Function *Fn = cast<llvm::Function>(GV);
+ Fn->addFnAttr("stackrealign");
+ }
+
+ addX86InterruptAttrs(FD, GV, CGM);
+ }
+
+ addStackProbeTargetAttributes(D, GV, CGM);
+}
+
+void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo,
+ Class &Hi) const {
+ // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
+ //
+ // (a) If one of the classes is Memory, the whole argument is passed in
+ // memory.
+ //
+ // (b) If X87UP is not preceded by X87, the whole argument is passed in
+ // memory.
+ //
+ // (c) If the size of the aggregate exceeds two eightbytes and the first
+ // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole
+ // argument is passed in memory. NOTE: This is necessary to keep the
+ // ABI working for processors that don't support the __m256 type.
+ //
+ // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE.
+ //
+ // Some of these are enforced by the merging logic. Others can arise
+ // only with unions; for example:
+ // union { _Complex double; unsigned; }
+ //
+ // Note that clauses (b) and (c) were added in 0.98.
+ //
+ if (Hi == Memory)
+ Lo = Memory;
+ if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
+ Lo = Memory;
+ if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
+ Lo = Memory;
+ if (Hi == SSEUp && Lo != SSE)
+ Hi = SSE;
+}
+
+X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
+ // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
+ // classified recursively so that always two fields are
+ // considered. The resulting class is calculated according to
+ // the classes of the fields in the eightbyte:
+ //
+ // (a) If both classes are equal, this is the resulting class.
+ //
+ // (b) If one of the classes is NO_CLASS, the resulting class is
+ // the other class.
+ //
+ // (c) If one of the classes is MEMORY, the result is the MEMORY
+ // class.
+ //
+ // (d) If one of the classes is INTEGER, the result is the
+ // INTEGER.
+ //
+ // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
+ // MEMORY is used as class.
+ //
+ // (f) Otherwise class SSE is used.
+
+ // Accum should never be memory (we should have returned) or
+ // ComplexX87 (because this cannot be passed in a structure).
+ assert((Accum != Memory && Accum != ComplexX87) &&
+ "Invalid accumulated classification during merge.");
+ if (Accum == Field || Field == NoClass)
+ return Accum;
+ if (Field == Memory)
+ return Memory;
+ if (Accum == NoClass)
+ return Field;
+ if (Accum == Integer || Field == Integer)
+ return Integer;
+ if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
+ Accum == X87 || Accum == X87Up)
+ return Memory;
+ return SSE;
+}
+
+void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, Class &Lo,
+ Class &Hi, bool isNamedArg, bool IsRegCall) const {
+ // FIXME: This code can be simplified by introducing a simple value class for
+ // Class pairs with appropriate constructor methods for the various
+ // situations.
+
+ // FIXME: Some of the split computations are wrong; unaligned vectors
+ // shouldn't be passed in registers for example, so there is no chance they
+ // can straddle an eightbyte. Verify & simplify.
+
+ Lo = Hi = NoClass;
+
+ Class &Current = OffsetBase < 64 ? Lo : Hi;
+ Current = Memory;
+
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
+ BuiltinType::Kind k = BT->getKind();
+
+ if (k == BuiltinType::Void) {
+ Current = NoClass;
+ } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
+ Lo = Integer;
+ Hi = Integer;
+ } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
+ Current = Integer;
+ } else if (k == BuiltinType::Float || k == BuiltinType::Double ||
+ k == BuiltinType::Float16 || k == BuiltinType::BFloat16) {
+ Current = SSE;
+ } else if (k == BuiltinType::Float128) {
+ Lo = SSE;
+ Hi = SSEUp;
+ } else if (k == BuiltinType::LongDouble) {
+ const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
+ if (LDF == &llvm::APFloat::IEEEquad()) {
+ Lo = SSE;
+ Hi = SSEUp;
+ } else if (LDF == &llvm::APFloat::x87DoubleExtended()) {
+ Lo = X87;
+ Hi = X87Up;
+ } else if (LDF == &llvm::APFloat::IEEEdouble()) {
+ Current = SSE;
+ } else
+ llvm_unreachable("unexpected long double representation!");
+ }
+ // FIXME: _Decimal32 and _Decimal64 are SSE.
+ // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
+ return;
+ }
+
+ if (const EnumType *ET = Ty->getAs<EnumType>()) {
+ // Classify the underlying integer type.
+ classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
+ return;
+ }
+
+ if (Ty->hasPointerRepresentation()) {
+ Current = Integer;
+ return;
+ }
+
+ if (Ty->isMemberPointerType()) {
+ if (Ty->isMemberFunctionPointerType()) {
+ if (Has64BitPointers) {
+ // If Has64BitPointers, this is an {i64, i64}, so classify both
+ // Lo and Hi now.
+ Lo = Hi = Integer;
+ } else {
+ // Otherwise, with 32-bit pointers, this is an {i32, i32}. If that
+ // straddles an eightbyte boundary, Hi should be classified as well.
+ uint64_t EB_FuncPtr = (OffsetBase) / 64;
+ uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64;
+ if (EB_FuncPtr != EB_ThisAdj) {
+ Lo = Hi = Integer;
+ } else {
+ Current = Integer;
+ }
+ }
+ } else {
+ Current = Integer;
+ }
+ return;
+ }
+
+ if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ uint64_t Size = getContext().getTypeSize(VT);
+ if (Size == 1 || Size == 8 || Size == 16 || Size == 32) {
+ // gcc passes the following as integer:
+ // 4 bytes - <4 x char>, <2 x short>, <1 x int>, <1 x float>
+ // 2 bytes - <2 x char>, <1 x short>
+ // 1 byte - <1 x char>
+ Current = Integer;
+
+ // If this type crosses an eightbyte boundary, it should be
+ // split.
+ uint64_t EB_Lo = (OffsetBase) / 64;
+ uint64_t EB_Hi = (OffsetBase + Size - 1) / 64;
+ if (EB_Lo != EB_Hi)
+ Hi = Lo;
+ } else if (Size == 64) {
+ QualType ElementType = VT->getElementType();
+
+ // gcc passes <1 x double> in memory. :(
+ if (ElementType->isSpecificBuiltinType(BuiltinType::Double))
+ return;
+
+ // gcc passes <1 x long long> as SSE but clang used to unconditionally
+ // pass them as integer. For platforms where clang is the de facto
+ // platform compiler, we must continue to use integer.
+ if (!classifyIntegerMMXAsSSE() &&
+ (ElementType->isSpecificBuiltinType(BuiltinType::LongLong) ||
+ ElementType->isSpecificBuiltinType(BuiltinType::ULongLong) ||
+ ElementType->isSpecificBuiltinType(BuiltinType::Long) ||
+ ElementType->isSpecificBuiltinType(BuiltinType::ULong)))
+ Current = Integer;
+ else
+ Current = SSE;
+
+ // If this type crosses an eightbyte boundary, it should be
+ // split.
+ if (OffsetBase && OffsetBase != 64)
+ Hi = Lo;
+ } else if (Size == 128 ||
+ (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) {
+ QualType ElementType = VT->getElementType();
+
+ // gcc passes 256 and 512 bit <X x __int128> vectors in memory. :(
+ if (passInt128VectorsInMem() && Size != 128 &&
+ (ElementType->isSpecificBuiltinType(BuiltinType::Int128) ||
+ ElementType->isSpecificBuiltinType(BuiltinType::UInt128)))
+ return;
+
+ // Arguments of 256-bits are split into four eightbyte chunks. The
+ // least significant one belongs to class SSE and all the others to class
+ // SSEUP. The original Lo and Hi design considers that types can't be
+ // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense.
+ // This design isn't correct for 256-bits, but since there're no cases
+ // where the upper parts would need to be inspected, avoid adding
+ // complexity and just consider Hi to match the 64-256 part.
+ //
+ // Note that per 3.5.7 of AMD64-ABI, 256-bit args are only passed in
+ // registers if they are "named", i.e. not part of the "..." of a
+ // variadic function.
+ //
+ // Similarly, per 3.2.3. of the AVX512 draft, 512-bits ("named") args are
+ // split into eight eightbyte chunks, one SSE and seven SSEUP.
+ Lo = SSE;
+ Hi = SSEUp;
+ }
+ return;
+ }
+
+ if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
+ QualType ET = getContext().getCanonicalType(CT->getElementType());
+
+ uint64_t Size = getContext().getTypeSize(Ty);
+ if (ET->isIntegralOrEnumerationType()) {
+ if (Size <= 64)
+ Current = Integer;
+ else if (Size <= 128)
+ Lo = Hi = Integer;
+ } else if (ET->isFloat16Type() || ET == getContext().FloatTy ||
+ ET->isBFloat16Type()) {
+ Current = SSE;
+ } else if (ET == getContext().DoubleTy) {
+ Lo = Hi = SSE;
+ } else if (ET == getContext().LongDoubleTy) {
+ const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
+ if (LDF == &llvm::APFloat::IEEEquad())
+ Current = Memory;
+ else if (LDF == &llvm::APFloat::x87DoubleExtended())
+ Current = ComplexX87;
+ else if (LDF == &llvm::APFloat::IEEEdouble())
+ Lo = Hi = SSE;
+ else
+ llvm_unreachable("unexpected long double representation!");
+ }
+
+ // If this complex type crosses an eightbyte boundary then it
+ // should be split.
+ uint64_t EB_Real = (OffsetBase) / 64;
+ uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
+ if (Hi == NoClass && EB_Real != EB_Imag)
+ Hi = Lo;
+
+ return;
+ }
+
+ if (const auto *EITy = Ty->getAs<BitIntType>()) {
+ if (EITy->getNumBits() <= 64)
+ Current = Integer;
+ else if (EITy->getNumBits() <= 128)
+ Lo = Hi = Integer;
+ // Larger values need to get passed in memory.
+ return;
+ }
+
+ if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
+ // Arrays are treated like structures.
+
+ uint64_t Size = getContext().getTypeSize(Ty);
+
+ // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
+ // than eight eightbytes, ..., it has class MEMORY.
+ // regcall ABI doesn't have limitation to an object. The only limitation
+ // is the free registers, which will be checked in computeInfo.
+ if (!IsRegCall && Size > 512)
+ return;
+
+ // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
+ // fields, it has class MEMORY.
+ //
+ // Only need to check alignment of array base.
+ if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
+ return;
+
+ // Otherwise implement simplified merge. We could be smarter about
+ // this, but it isn't worth it and would be harder to verify.
+ Current = NoClass;
+ uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
+ uint64_t ArraySize = AT->getSize().getZExtValue();
+
+ // The only case a 256-bit wide vector could be used is when the array
+ // contains a single 256-bit element. Since Lo and Hi logic isn't extended
+ // to work for sizes wider than 128, early check and fallback to memory.
+ //
+ if (Size > 128 &&
+ (Size != EltSize || Size > getNativeVectorSizeForAVXABI(AVXLevel)))
+ return;
+
+ for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
+ Class FieldLo, FieldHi;
+ classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg);
+ Lo = merge(Lo, FieldLo);
+ Hi = merge(Hi, FieldHi);
+ if (Lo == Memory || Hi == Memory)
+ break;
+ }
+
+ postMerge(Size, Lo, Hi);
+ assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
+ return;
+ }
+
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ uint64_t Size = getContext().getTypeSize(Ty);
+
+ // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
+ // than eight eightbytes, ..., it has class MEMORY.
+ if (Size > 512)
+ return;
+
+ // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
+ // copy constructor or a non-trivial destructor, it is passed by invisible
+ // reference.
+ if (getRecordArgABI(RT, getCXXABI()))
+ return;
+
+ const RecordDecl *RD = RT->getDecl();
+
+ // Assume variable sized types are passed in memory.
+ if (RD->hasFlexibleArrayMember())
+ return;
+
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
+
+ // Reset Lo class, this will be recomputed.
+ Current = NoClass;
+
+ // If this is a C++ record, classify the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ for (const auto &I : CXXRD->bases()) {
+ assert(!I.isVirtual() && !I.getType()->isDependentType() &&
+ "Unexpected base class!");
+ const auto *Base =
+ cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
+
+ // Classify this field.
+ //
+ // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a
+ // single eightbyte, each is classified separately. Each eightbyte gets
+ // initialized to class NO_CLASS.
+ Class FieldLo, FieldHi;
+ uint64_t Offset =
+ OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base));
+ classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg);
+ Lo = merge(Lo, FieldLo);
+ Hi = merge(Hi, FieldHi);
+ if (Lo == Memory || Hi == Memory) {
+ postMerge(Size, Lo, Hi);
+ return;
+ }
+ }
+ }
+
+ // Classify the fields one at a time, merging the results.
+ unsigned idx = 0;
+ bool UseClang11Compat = getContext().getLangOpts().getClangABICompat() <=
+ LangOptions::ClangABI::Ver11 ||
+ getContext().getTargetInfo().getTriple().isPS();
+ bool IsUnion = RT->isUnionType() && !UseClang11Compat;
+
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i, ++idx) {
+ uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
+ bool BitField = i->isBitField();
+
+ // Ignore padding bit-fields.
+ if (BitField && i->isUnnamedBitfield())
+ continue;
+
+ // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than
+ // eight eightbytes, or it contains unaligned fields, it has class MEMORY.
+ //
+ // The only case a 256-bit or a 512-bit wide vector could be used is when
+ // the struct contains a single 256-bit or 512-bit element. Early check
+ // and fallback to memory.
+ //
+ // FIXME: Extended the Lo and Hi logic properly to work for size wider
+ // than 128.
+ if (Size > 128 &&
+ ((!IsUnion && Size != getContext().getTypeSize(i->getType())) ||
+ Size > getNativeVectorSizeForAVXABI(AVXLevel))) {
+ Lo = Memory;
+ postMerge(Size, Lo, Hi);
+ return;
+ }
+ // Note, skip this test for bit-fields, see below.
+ if (!BitField && Offset % getContext().getTypeAlign(i->getType())) {
+ Lo = Memory;
+ postMerge(Size, Lo, Hi);
+ return;
+ }
+
+ // Classify this field.
+ //
+ // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
+ // exceeds a single eightbyte, each is classified
+ // separately. Each eightbyte gets initialized to class
+ // NO_CLASS.
+ Class FieldLo, FieldHi;
+
+ // Bit-fields require special handling, they do not force the
+ // structure to be passed in memory even if unaligned, and
+ // therefore they can straddle an eightbyte.
+ if (BitField) {
+ assert(!i->isUnnamedBitfield());
+ uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
+ uint64_t Size = i->getBitWidthValue(getContext());
+
+ uint64_t EB_Lo = Offset / 64;
+ uint64_t EB_Hi = (Offset + Size - 1) / 64;
+
+ if (EB_Lo) {
+ assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
+ FieldLo = NoClass;
+ FieldHi = Integer;
+ } else {
+ FieldLo = Integer;
+ FieldHi = EB_Hi ? Integer : NoClass;
+ }
+ } else
+ classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg);
+ Lo = merge(Lo, FieldLo);
+ Hi = merge(Hi, FieldHi);
+ if (Lo == Memory || Hi == Memory)
+ break;
+ }
+
+ postMerge(Size, Lo, Hi);
+ }
+}
+
+ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
+ // If this is a scalar LLVM value then assume LLVM will pass it in the right
+ // place naturally.
+ if (!isAggregateTypeForABI(Ty)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ if (Ty->isBitIntType())
+ return getNaturalAlignIndirect(Ty);
+
+ return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
+ : ABIArgInfo::getDirect());
+ }
+
+ return getNaturalAlignIndirect(Ty);
+}
+
+bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const {
+ if (const VectorType *VecTy = Ty->getAs<VectorType>()) {
+ uint64_t Size = getContext().getTypeSize(VecTy);
+ unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel);
+ if (Size <= 64 || Size > LargestVector)
+ return true;
+ QualType EltTy = VecTy->getElementType();
+ if (passInt128VectorsInMem() &&
+ (EltTy->isSpecificBuiltinType(BuiltinType::Int128) ||
+ EltTy->isSpecificBuiltinType(BuiltinType::UInt128)))
+ return true;
+ }
+
+ return false;
+}
+
+ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
+ unsigned freeIntRegs) const {
+ // If this is a scalar LLVM value then assume LLVM will pass it in the right
+ // place naturally.
+ //
+ // This assumption is optimistic, as there could be free registers available
+ // when we need to pass this argument in memory, and LLVM could try to pass
+ // the argument in the free register. This does not seem to happen currently,
+ // but this code would be much safer if we could mark the argument with
+ // 'onstack'. See PR12193.
+ if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty) &&
+ !Ty->isBitIntType()) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
+ : ABIArgInfo::getDirect());
+ }
+
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
+
+ // Compute the byval alignment. We specify the alignment of the byval in all
+ // cases so that the mid-level optimizer knows the alignment of the byval.
+ unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U);
+
+ // Attempt to avoid passing indirect results using byval when possible. This
+ // is important for good codegen.
+ //
+ // We do this by coercing the value into a scalar type which the backend can
+ // handle naturally (i.e., without using byval).
+ //
+ // For simplicity, we currently only do this when we have exhausted all of the
+ // free integer registers. Doing this when there are free integer registers
+ // would require more care, as we would have to ensure that the coerced value
+ // did not claim the unused register. That would require either reording the
+ // arguments to the function (so that any subsequent inreg values came first),
+ // or only doing this optimization when there were no following arguments that
+ // might be inreg.
+ //
+ // We currently expect it to be rare (particularly in well written code) for
+ // arguments to be passed on the stack when there are still free integer
+ // registers available (this would typically imply large structs being passed
+ // by value), so this seems like a fair tradeoff for now.
+ //
+ // We can revisit this if the backend grows support for 'onstack' parameter
+ // attributes. See PR12193.
+ if (freeIntRegs == 0) {
+ uint64_t Size = getContext().getTypeSize(Ty);
+
+ // If this type fits in an eightbyte, coerce it into the matching integral
+ // type, which will end up on the stack (with alignment 8).
+ if (Align == 8 && Size <= 64)
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
+ Size));
+ }
+
+ return ABIArgInfo::getIndirect(CharUnits::fromQuantity(Align));
+}
+
+/// The ABI specifies that a value should be passed in a full vector XMM/YMM
+/// register. Pick an LLVM IR type that will be passed as a vector register.
+llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const {
+ // Wrapper structs/arrays that only contain vectors are passed just like
+ // vectors; strip them off if present.
+ if (const Type *InnerTy = isSingleElementStruct(Ty, getContext()))
+ Ty = QualType(InnerTy, 0);
+
+ llvm::Type *IRType = CGT.ConvertType(Ty);
+ if (isa<llvm::VectorType>(IRType)) {
+ // Don't pass vXi128 vectors in their native type, the backend can't
+ // legalize them.
+ if (passInt128VectorsInMem() &&
+ cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy(128)) {
+ // Use a vXi64 vector.
+ uint64_t Size = getContext().getTypeSize(Ty);
+ return llvm::FixedVectorType::get(llvm::Type::getInt64Ty(getVMContext()),
+ Size / 64);
+ }
+
+ return IRType;
+ }
+
+ if (IRType->getTypeID() == llvm::Type::FP128TyID)
+ return IRType;
+
+ // We couldn't find the preferred IR vector type for 'Ty'.
+ uint64_t Size = getContext().getTypeSize(Ty);
+ assert((Size == 128 || Size == 256 || Size == 512) && "Invalid type found!");
+
+
+ // Return a LLVM IR vector type based on the size of 'Ty'.
+ return llvm::FixedVectorType::get(llvm::Type::getDoubleTy(getVMContext()),
+ Size / 64);
+}
+
+/// BitsContainNoUserData - Return true if the specified [start,end) bit range
+/// is known to either be off the end of the specified type or being in
+/// alignment padding. The user type specified is known to be at most 128 bits
+/// in size, and have passed through X86_64ABIInfo::classify with a successful
+/// classification that put one of the two halves in the INTEGER class.
+///
+/// It is conservatively correct to return false.
+static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
+ unsigned EndBit, ASTContext &Context) {
+ // If the bytes being queried are off the end of the type, there is no user
+ // data hiding here. This handles analysis of builtins, vectors and other
+ // types that don't contain interesting padding.
+ unsigned TySize = (unsigned)Context.getTypeSize(Ty);
+ if (TySize <= StartBit)
+ return true;
+
+ if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
+ unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType());
+ unsigned NumElts = (unsigned)AT->getSize().getZExtValue();
+
+ // Check each element to see if the element overlaps with the queried range.
+ for (unsigned i = 0; i != NumElts; ++i) {
+ // If the element is after the span we care about, then we're done..
+ unsigned EltOffset = i*EltSize;
+ if (EltOffset >= EndBit) break;
+
+ unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
+ if (!BitsContainNoUserData(AT->getElementType(), EltStart,
+ EndBit-EltOffset, Context))
+ return false;
+ }
+ // If it overlaps no elements, then it is safe to process as padding.
+ return true;
+ }
+
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ const RecordDecl *RD = RT->getDecl();
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ // If this is a C++ record, check the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ for (const auto &I : CXXRD->bases()) {
+ assert(!I.isVirtual() && !I.getType()->isDependentType() &&
+ "Unexpected base class!");
+ const auto *Base =
+ cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
+
+ // If the base is after the span we care about, ignore it.
+ unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base));
+ if (BaseOffset >= EndBit) continue;
+
+ unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
+ if (!BitsContainNoUserData(I.getType(), BaseStart,
+ EndBit-BaseOffset, Context))
+ return false;
+ }
+ }
+
+ // Verify that no field has data that overlaps the region of interest. Yes
+ // this could be sped up a lot by being smarter about queried fields,
+ // however we're only looking at structs up to 16 bytes, so we don't care
+ // much.
+ unsigned idx = 0;
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i, ++idx) {
+ unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx);
+
+ // If we found a field after the region we care about, then we're done.
+ if (FieldOffset >= EndBit) break;
+
+ unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
+ if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset,
+ Context))
+ return false;
+ }
+
+ // If nothing in this record overlapped the area of interest, then we're
+ // clean.
+ return true;
+ }
+
+ return false;
+}
+
+/// getFPTypeAtOffset - Return a floating point type at the specified offset.
+static llvm::Type *getFPTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
+ const llvm::DataLayout &TD) {
+ if (IROffset == 0 && IRType->isFloatingPointTy())
+ return IRType;
+
+ // If this is a struct, recurse into the field at the specified offset.
+ if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
+ if (!STy->getNumContainedTypes())
+ return nullptr;
+
+ const llvm::StructLayout *SL = TD.getStructLayout(STy);
+ unsigned Elt = SL->getElementContainingOffset(IROffset);
+ IROffset -= SL->getElementOffset(Elt);
+ return getFPTypeAtOffset(STy->getElementType(Elt), IROffset, TD);
+ }
+
+ // If this is an array, recurse into the field at the specified offset.
+ if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
+ llvm::Type *EltTy = ATy->getElementType();
+ unsigned EltSize = TD.getTypeAllocSize(EltTy);
+ IROffset -= IROffset / EltSize * EltSize;
+ return getFPTypeAtOffset(EltTy, IROffset, TD);
+ }
+
+ return nullptr;
+}
+
+/// GetSSETypeAtOffset - Return a type that will be passed by the backend in the
+/// low 8 bytes of an XMM register, corresponding to the SSE class.
+llvm::Type *X86_64ABIInfo::
+GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset,
+ QualType SourceTy, unsigned SourceOffset) const {
+ const llvm::DataLayout &TD = getDataLayout();
+ unsigned SourceSize =
+ (unsigned)getContext().getTypeSize(SourceTy) / 8 - SourceOffset;
+ llvm::Type *T0 = getFPTypeAtOffset(IRType, IROffset, TD);
+ if (!T0 || T0->isDoubleTy())
+ return llvm::Type::getDoubleTy(getVMContext());
+
+ // Get the adjacent FP type.
+ llvm::Type *T1 = nullptr;
+ unsigned T0Size = TD.getTypeAllocSize(T0);
+ if (SourceSize > T0Size)
+ T1 = getFPTypeAtOffset(IRType, IROffset + T0Size, TD);
+ if (T1 == nullptr) {
+ // Check if IRType is a half/bfloat + float. float type will be in IROffset+4 due
+ // to its alignment.
+ if (T0->is16bitFPTy() && SourceSize > 4)
+ T1 = getFPTypeAtOffset(IRType, IROffset + 4, TD);
+ // If we can't get a second FP type, return a simple half or float.
+ // avx512fp16-abi.c:pr51813_2 shows it works to return float for
+ // {float, i8} too.
+ if (T1 == nullptr)
+ return T0;
+ }
+
+ if (T0->isFloatTy() && T1->isFloatTy())
+ return llvm::FixedVectorType::get(T0, 2);
+
+ if (T0->is16bitFPTy() && T1->is16bitFPTy()) {
+ llvm::Type *T2 = nullptr;
+ if (SourceSize > 4)
+ T2 = getFPTypeAtOffset(IRType, IROffset + 4, TD);
+ if (T2 == nullptr)
+ return llvm::FixedVectorType::get(T0, 2);
+ return llvm::FixedVectorType::get(T0, 4);
+ }
+
+ if (T0->is16bitFPTy() || T1->is16bitFPTy())
+ return llvm::FixedVectorType::get(llvm::Type::getHalfTy(getVMContext()), 4);
+
+ return llvm::Type::getDoubleTy(getVMContext());
+}
+
+
+/// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in
+/// an 8-byte GPR. This means that we either have a scalar or we are talking
+/// about the high or low part of an up-to-16-byte struct. This routine picks
+/// the best LLVM IR type to represent this, which may be i64 or may be anything
+/// else that the backend will pass in a GPR that works better (e.g. i8, %foo*,
+/// etc).
+///
+/// PrefType is an LLVM IR type that corresponds to (part of) the IR type for
+/// the source type. IROffset is an offset in bytes into the LLVM IR type that
+/// the 8-byte value references. PrefType may be null.
+///
+/// SourceTy is the source-level type for the entire argument. SourceOffset is
+/// an offset into this that we're processing (which is always either 0 or 8).
+///
+llvm::Type *X86_64ABIInfo::
+GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
+ QualType SourceTy, unsigned SourceOffset) const {
+ // If we're dealing with an un-offset LLVM IR type, then it means that we're
+ // returning an 8-byte unit starting with it. See if we can safely use it.
+ if (IROffset == 0) {
+ // Pointers and int64's always fill the 8-byte unit.
+ if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
+ IRType->isIntegerTy(64))
+ return IRType;
+
+ // If we have a 1/2/4-byte integer, we can use it only if the rest of the
+ // goodness in the source type is just tail padding. This is allowed to
+ // kick in for struct {double,int} on the int, but not on
+ // struct{double,int,int} because we wouldn't return the second int. We
+ // have to do this analysis on the source type because we can't depend on
+ // unions being lowered a specific way etc.
+ if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
+ IRType->isIntegerTy(32) ||
+ (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
+ unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
+ cast<llvm::IntegerType>(IRType)->getBitWidth();
+
+ if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth,
+ SourceOffset*8+64, getContext()))
+ return IRType;
+ }
+ }
+
+ if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
+ // If this is a struct, recurse into the field at the specified offset.
+ const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);
+ if (IROffset < SL->getSizeInBytes()) {
+ unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
+ IROffset -= SL->getElementOffset(FieldIdx);
+
+ return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
+ SourceTy, SourceOffset);
+ }
+ }
+
+ if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
+ llvm::Type *EltTy = ATy->getElementType();
+ unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);
+ unsigned EltOffset = IROffset/EltSize*EltSize;
+ return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
+ SourceOffset);
+ }
+
+ // Okay, we don't have any better idea of what to pass, so we pass this in an
+ // integer register that isn't too big to fit the rest of the struct.
+ unsigned TySizeInBytes =
+ (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
+
+ assert(TySizeInBytes != SourceOffset && "Empty field?");
+
+ // It is always safe to classify this as an integer type up to i64 that
+ // isn't larger than the structure.
+ return llvm::IntegerType::get(getVMContext(),
+ std::min(TySizeInBytes-SourceOffset, 8U)*8);
+}
+
+
+/// GetX86_64ByValArgumentPair - Given a high and low type that can ideally
+/// be used as elements of a two register pair to pass or return, return a
+/// first class aggregate to represent them. For example, if the low part of
+/// a by-value argument should be passed as i32* and the high part as float,
+/// return {i32*, float}.
+static llvm::Type *
+GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi,
+ const llvm::DataLayout &TD) {
+ // In order to correctly satisfy the ABI, we need to the high part to start
+ // at offset 8. If the high and low parts we inferred are both 4-byte types
+ // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have
+ // the second element at offset 8. Check for this:
+ unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);
+ llvm::Align HiAlign = TD.getABITypeAlign(Hi);
+ unsigned HiStart = llvm::alignTo(LoSize, HiAlign);
+ assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!");
+
+ // To handle this, we have to increase the size of the low part so that the
+ // second element will start at an 8 byte offset. We can't increase the size
+ // of the second element because it might make us access off the end of the
+ // struct.
+ if (HiStart != 8) {
+ // There are usually two sorts of types the ABI generation code can produce
+ // for the low part of a pair that aren't 8 bytes in size: half, float or
+ // i8/i16/i32. This can also include pointers when they are 32-bit (X32 and
+ // NaCl).
+ // Promote these to a larger type.
+ if (Lo->isHalfTy() || Lo->isFloatTy())
+ Lo = llvm::Type::getDoubleTy(Lo->getContext());
+ else {
+ assert((Lo->isIntegerTy() || Lo->isPointerTy())
+ && "Invalid/unknown lo type");
+ Lo = llvm::Type::getInt64Ty(Lo->getContext());
+ }
+ }
+
+ llvm::StructType *Result = llvm::StructType::get(Lo, Hi);
+
+ // Verify that the second element is at an 8-byte offset.
+ assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
+ "Invalid x86-64 argument pair!");
+ return Result;
+}
+
+ABIArgInfo X86_64ABIInfo::
+classifyReturnType(QualType RetTy) const {
+ // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
+ // classification algorithm.
+ X86_64ABIInfo::Class Lo, Hi;
+ classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true);
+
+ // Check some invariants.
+ assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
+ assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
+
+ llvm::Type *ResType = nullptr;
+ switch (Lo) {
+ case NoClass:
+ if (Hi == NoClass)
+ return ABIArgInfo::getIgnore();
+ // If the low part is just padding, it takes no register, leave ResType
+ // null.
+ assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
+ "Unknown missing lo part");
+ break;
+
+ case SSEUp:
+ case X87Up:
+ llvm_unreachable("Invalid classification for lo word.");
+
+ // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
+ // hidden argument.
+ case Memory:
+ return getIndirectReturnResult(RetTy);
+
+ // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
+ // available register of the sequence %rax, %rdx is used.
+ case Integer:
+ ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
+
+ // If we have a sign or zero extended integer, make sure to return Extend
+ // so that the parameter gets the right LLVM IR attributes.
+ if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ if (RetTy->isIntegralOrEnumerationType() &&
+ isPromotableIntegerTypeForABI(RetTy))
+ return ABIArgInfo::getExtend(RetTy);
+ }
+ break;
+
+ // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
+ // available SSE register of the sequence %xmm0, %xmm1 is used.
+ case SSE:
+ ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
+ break;
+
+ // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
+ // returned on the X87 stack in %st0 as 80-bit x87 number.
+ case X87:
+ ResType = llvm::Type::getX86_FP80Ty(getVMContext());
+ break;
+
+ // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
+ // part of the value is returned in %st0 and the imaginary part in
+ // %st1.
+ case ComplexX87:
+ assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
+ ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
+ llvm::Type::getX86_FP80Ty(getVMContext()));
+ break;
+ }
+
+ llvm::Type *HighPart = nullptr;
+ switch (Hi) {
+ // Memory was handled previously and X87 should
+ // never occur as a hi class.
+ case Memory:
+ case X87:
+ llvm_unreachable("Invalid classification for hi word.");
+
+ case ComplexX87: // Previously handled.
+ case NoClass:
+ break;
+
+ case Integer:
+ HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
+ if (Lo == NoClass) // Return HighPart at offset 8 in memory.
+ return ABIArgInfo::getDirect(HighPart, 8);
+ break;
+ case SSE:
+ HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
+ if (Lo == NoClass) // Return HighPart at offset 8 in memory.
+ return ABIArgInfo::getDirect(HighPart, 8);
+ break;
+
+ // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
+ // is passed in the next available eightbyte chunk if the last used
+ // vector register.
+ //
+ // SSEUP should always be preceded by SSE, just widen.
+ case SSEUp:
+ assert(Lo == SSE && "Unexpected SSEUp classification.");
+ ResType = GetByteVectorType(RetTy);
+ break;
+
+ // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
+ // returned together with the previous X87 value in %st0.
+ case X87Up:
+ // If X87Up is preceded by X87, we don't need to do
+ // anything. However, in some cases with unions it may not be
+ // preceded by X87. In such situations we follow gcc and pass the
+ // extra bits in an SSE reg.
+ if (Lo != X87) {
+ HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
+ if (Lo == NoClass) // Return HighPart at offset 8 in memory.
+ return ABIArgInfo::getDirect(HighPart, 8);
+ }
+ break;
+ }
+
+ // If a high part was specified, merge it together with the low part. It is
+ // known to pass in the high eightbyte of the result. We do this by forming a
+ // first class struct aggregate with the high and low part: {low, high}
+ if (HighPart)
+ ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
+
+ return ABIArgInfo::getDirect(ResType);
+}
+
+ABIArgInfo
+X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned freeIntRegs,
+ unsigned &neededInt, unsigned &neededSSE,
+ bool isNamedArg, bool IsRegCall) const {
+ Ty = useFirstFieldIfTransparentUnion(Ty);
+
+ X86_64ABIInfo::Class Lo, Hi;
+ classify(Ty, 0, Lo, Hi, isNamedArg, IsRegCall);
+
+ // Check some invariants.
+ // FIXME: Enforce these by construction.
+ assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
+ assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
+
+ neededInt = 0;
+ neededSSE = 0;
+ llvm::Type *ResType = nullptr;
+ switch (Lo) {
+ case NoClass:
+ if (Hi == NoClass)
+ return ABIArgInfo::getIgnore();
+ // If the low part is just padding, it takes no register, leave ResType
+ // null.
+ assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
+ "Unknown missing lo part");
+ break;
+
+ // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
+ // on the stack.
+ case Memory:
+
+ // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
+ // COMPLEX_X87, it is passed in memory.
+ case X87:
+ case ComplexX87:
+ if (getRecordArgABI(Ty, getCXXABI()) == CGCXXABI::RAA_Indirect)
+ ++neededInt;
+ return getIndirectResult(Ty, freeIntRegs);
+
+ case SSEUp:
+ case X87Up:
+ llvm_unreachable("Invalid classification for lo word.");
+
+ // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
+ // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
+ // and %r9 is used.
+ case Integer:
+ ++neededInt;
+
+ // Pick an 8-byte type based on the preferred type.
+ ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
+
+ // If we have a sign or zero extended integer, make sure to return Extend
+ // so that the parameter gets the right LLVM IR attributes.
+ if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ if (Ty->isIntegralOrEnumerationType() &&
+ isPromotableIntegerTypeForABI(Ty))
+ return ABIArgInfo::getExtend(Ty);
+ }
+
+ break;
+
+ // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
+ // available SSE register is used, the registers are taken in the
+ // order from %xmm0 to %xmm7.
+ case SSE: {
+ llvm::Type *IRType = CGT.ConvertType(Ty);
+ ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
+ ++neededSSE;
+ break;
+ }
+ }
+
+ llvm::Type *HighPart = nullptr;
+ switch (Hi) {
+ // Memory was handled previously, ComplexX87 and X87 should
+ // never occur as hi classes, and X87Up must be preceded by X87,
+ // which is passed in memory.
+ case Memory:
+ case X87:
+ case ComplexX87:
+ llvm_unreachable("Invalid classification for hi word.");
+
+ case NoClass: break;
+
+ case Integer:
+ ++neededInt;
+ // Pick an 8-byte type based on the preferred type.
+ HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
+
+ if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
+ return ABIArgInfo::getDirect(HighPart, 8);
+ break;
+
+ // X87Up generally doesn't occur here (long double is passed in
+ // memory), except in situations involving unions.
+ case X87Up:
+ case SSE:
+ HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
+
+ if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
+ return ABIArgInfo::getDirect(HighPart, 8);
+
+ ++neededSSE;
+ break;
+
+ // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
+ // eightbyte is passed in the upper half of the last used SSE
+ // register. This only happens when 128-bit vectors are passed.
+ case SSEUp:
+ assert(Lo == SSE && "Unexpected SSEUp classification");
+ ResType = GetByteVectorType(Ty);
+ break;
+ }
+
+ // If a high part was specified, merge it together with the low part. It is
+ // known to pass in the high eightbyte of the result. We do this by forming a
+ // first class struct aggregate with the high and low part: {low, high}
+ if (HighPart)
+ ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
+
+ return ABIArgInfo::getDirect(ResType);
+}
+
+ABIArgInfo
+X86_64ABIInfo::classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
+ unsigned &NeededSSE,
+ unsigned &MaxVectorWidth) const {
+ auto RT = Ty->getAs<RecordType>();
+ assert(RT && "classifyRegCallStructType only valid with struct types");
+
+ if (RT->getDecl()->hasFlexibleArrayMember())
+ return getIndirectReturnResult(Ty);
+
+ // Sum up bases
+ if (auto CXXRD = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
+ if (CXXRD->isDynamicClass()) {
+ NeededInt = NeededSSE = 0;
+ return getIndirectReturnResult(Ty);
+ }
+
+ for (const auto &I : CXXRD->bases())
+ if (classifyRegCallStructTypeImpl(I.getType(), NeededInt, NeededSSE,
+ MaxVectorWidth)
+ .isIndirect()) {
+ NeededInt = NeededSSE = 0;
+ return getIndirectReturnResult(Ty);
+ }
+ }
+
+ // Sum up members
+ for (const auto *FD : RT->getDecl()->fields()) {
+ QualType MTy = FD->getType();
+ if (MTy->isRecordType() && !MTy->isUnionType()) {
+ if (classifyRegCallStructTypeImpl(MTy, NeededInt, NeededSSE,
+ MaxVectorWidth)
+ .isIndirect()) {
+ NeededInt = NeededSSE = 0;
+ return getIndirectReturnResult(Ty);
+ }
+ } else {
+ unsigned LocalNeededInt, LocalNeededSSE;
+ if (classifyArgumentType(MTy, UINT_MAX, LocalNeededInt, LocalNeededSSE,
+ true, true)
+ .isIndirect()) {
+ NeededInt = NeededSSE = 0;
+ return getIndirectReturnResult(Ty);
+ }
+ if (const auto *AT = getContext().getAsConstantArrayType(MTy))
+ MTy = AT->getElementType();
+ if (const auto *VT = MTy->getAs<VectorType>())
+ if (getContext().getTypeSize(VT) > MaxVectorWidth)
+ MaxVectorWidth = getContext().getTypeSize(VT);
+ NeededInt += LocalNeededInt;
+ NeededSSE += LocalNeededSSE;
+ }
+ }
+
+ return ABIArgInfo::getDirect();
+}
+
+ABIArgInfo
+X86_64ABIInfo::classifyRegCallStructType(QualType Ty, unsigned &NeededInt,
+ unsigned &NeededSSE,
+ unsigned &MaxVectorWidth) const {
+
+ NeededInt = 0;
+ NeededSSE = 0;
+ MaxVectorWidth = 0;
+
+ return classifyRegCallStructTypeImpl(Ty, NeededInt, NeededSSE,
+ MaxVectorWidth);
+}
+
+void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
+
+ const unsigned CallingConv = FI.getCallingConvention();
+ // It is possible to force Win64 calling convention on any x86_64 target by
+ // using __attribute__((ms_abi)). In such case to correctly emit Win64
+ // compatible code delegate this call to WinX86_64ABIInfo::computeInfo.
+ if (CallingConv == llvm::CallingConv::Win64) {
+ WinX86_64ABIInfo Win64ABIInfo(CGT, AVXLevel);
+ Win64ABIInfo.computeInfo(FI);
+ return;
+ }
+
+ bool IsRegCall = CallingConv == llvm::CallingConv::X86_RegCall;
+
+ // Keep track of the number of assigned registers.
+ unsigned FreeIntRegs = IsRegCall ? 11 : 6;
+ unsigned FreeSSERegs = IsRegCall ? 16 : 8;
+ unsigned NeededInt = 0, NeededSSE = 0, MaxVectorWidth = 0;
+
+ if (!::classifyReturnType(getCXXABI(), FI, *this)) {
+ if (IsRegCall && FI.getReturnType()->getTypePtr()->isRecordType() &&
+ !FI.getReturnType()->getTypePtr()->isUnionType()) {
+ FI.getReturnInfo() = classifyRegCallStructType(
+ FI.getReturnType(), NeededInt, NeededSSE, MaxVectorWidth);
+ if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
+ FreeIntRegs -= NeededInt;
+ FreeSSERegs -= NeededSSE;
+ } else {
+ FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType());
+ }
+ } else if (IsRegCall && FI.getReturnType()->getAs<ComplexType>() &&
+ getContext().getCanonicalType(FI.getReturnType()
+ ->getAs<ComplexType>()
+ ->getElementType()) ==
+ getContext().LongDoubleTy)
+ // Complex Long Double Type is passed in Memory when Regcall
+ // calling convention is used.
+ FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType());
+ else
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ }
+
+ // If the return value is indirect, then the hidden argument is consuming one
+ // integer register.
+ if (FI.getReturnInfo().isIndirect())
+ --FreeIntRegs;
+ else if (NeededSSE && MaxVectorWidth > 0)
+ FI.setMaxVectorWidth(MaxVectorWidth);
+
+ // The chain argument effectively gives us another free register.
+ if (FI.isChainCall())
+ ++FreeIntRegs;
+
+ unsigned NumRequiredArgs = FI.getNumRequiredArgs();
+ // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
+ // get assigned (in left-to-right order) for passing as follows...
+ unsigned ArgNo = 0;
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it, ++ArgNo) {
+ bool IsNamedArg = ArgNo < NumRequiredArgs;
+
+ if (IsRegCall && it->type->isStructureOrClassType())
+ it->info = classifyRegCallStructType(it->type, NeededInt, NeededSSE,
+ MaxVectorWidth);
+ else
+ it->info = classifyArgumentType(it->type, FreeIntRegs, NeededInt,
+ NeededSSE, IsNamedArg);
+
+ // AMD64-ABI 3.2.3p3: If there are no registers available for any
+ // eightbyte of an argument, the whole argument is passed on the
+ // stack. If registers have already been assigned for some
+ // eightbytes of such an argument, the assignments get reverted.
+ if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
+ FreeIntRegs -= NeededInt;
+ FreeSSERegs -= NeededSSE;
+ if (MaxVectorWidth > FI.getMaxVectorWidth())
+ FI.setMaxVectorWidth(MaxVectorWidth);
+ } else {
+ it->info = getIndirectResult(it->type, FreeIntRegs);
+ }
+ }
+}
+
+static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF,
+ Address VAListAddr, QualType Ty) {
+ Address overflow_arg_area_p =
+ CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p");
+ llvm::Value *overflow_arg_area =
+ CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
+
+ // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
+ // byte boundary if alignment needed by type exceeds 8 byte boundary.
+ // It isn't stated explicitly in the standard, but in practice we use
+ // alignment greater than 16 where necessary.
+ CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
+ if (Align > CharUnits::fromQuantity(8)) {
+ overflow_arg_area = emitRoundPointerUpToAlignment(CGF, overflow_arg_area,
+ Align);
+ }
+
+ // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
+ llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
+ llvm::Value *Res = overflow_arg_area;
+
+ // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
+ // l->overflow_arg_area + sizeof(type).
+ // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
+ // an 8 byte boundary.
+
+ uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
+ llvm::Value *Offset =
+ llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7);
+ overflow_arg_area = CGF.Builder.CreateGEP(CGF.Int8Ty, overflow_arg_area,
+ Offset, "overflow_arg_area.next");
+ CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
+
+ // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
+ return Address(Res, LTy, Align);
+}
+
+Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ // Assume that va_list type is correct; should be pointer to LLVM type:
+ // struct {
+ // i32 gp_offset;
+ // i32 fp_offset;
+ // i8* overflow_arg_area;
+ // i8* reg_save_area;
+ // };
+ unsigned neededInt, neededSSE;
+
+ Ty = getContext().getCanonicalType(Ty);
+ ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE,
+ /*isNamedArg*/false);
+
+ // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
+ // in the registers. If not go to step 7.
+ if (!neededInt && !neededSSE)
+ return EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty);
+
+ // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
+ // general purpose registers needed to pass type and num_fp to hold
+ // the number of floating point registers needed.
+
+ // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
+ // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
+ // l->fp_offset > 304 - num_fp * 16 go to step 7.
+ //
+ // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
+ // register save space).
+
+ llvm::Value *InRegs = nullptr;
+ Address gp_offset_p = Address::invalid(), fp_offset_p = Address::invalid();
+ llvm::Value *gp_offset = nullptr, *fp_offset = nullptr;
+ if (neededInt) {
+ gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p");
+ gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
+ InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8);
+ InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp");
+ }
+
+ if (neededSSE) {
+ fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p");
+ fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
+ llvm::Value *FitsInFP =
+ llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16);
+ FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp");
+ InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
+ }
+
+ llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
+ llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
+ CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
+
+ // Emit code to load the value if it was passed in registers.
+
+ CGF.EmitBlock(InRegBlock);
+
+ // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
+ // an offset of l->gp_offset and/or l->fp_offset. This may require
+ // copying to a temporary location in case the parameter is passed
+ // in different register classes or requires an alignment greater
+ // than 8 for general purpose registers and 16 for XMM registers.
+ //
+ // FIXME: This really results in shameful code when we end up needing to
+ // collect arguments from different places; often what should result in a
+ // simple assembling of a structure from scattered addresses has many more
+ // loads than necessary. Can we clean this up?
+ llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
+ llvm::Value *RegSaveArea = CGF.Builder.CreateLoad(
+ CGF.Builder.CreateStructGEP(VAListAddr, 3), "reg_save_area");
+
+ Address RegAddr = Address::invalid();
+ if (neededInt && neededSSE) {
+ // FIXME: Cleanup.
+ assert(AI.isDirect() && "Unexpected ABI info for mixed regs");
+ llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
+ Address Tmp = CGF.CreateMemTemp(Ty);
+ Tmp = Tmp.withElementType(ST);
+ assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
+ llvm::Type *TyLo = ST->getElementType(0);
+ llvm::Type *TyHi = ST->getElementType(1);
+ assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
+ "Unexpected ABI info for mixed regs");
+ llvm::Value *GPAddr =
+ CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, gp_offset);
+ llvm::Value *FPAddr =
+ CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, fp_offset);
+ llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
+ llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
+
+ // Copy the first element.
+ // FIXME: Our choice of alignment here and below is probably pessimistic.
+ llvm::Value *V = CGF.Builder.CreateAlignedLoad(
+ TyLo, RegLoAddr,
+ CharUnits::fromQuantity(getDataLayout().getABITypeAlign(TyLo)));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
+
+ // Copy the second element.
+ V = CGF.Builder.CreateAlignedLoad(
+ TyHi, RegHiAddr,
+ CharUnits::fromQuantity(getDataLayout().getABITypeAlign(TyHi)));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
+
+ RegAddr = Tmp.withElementType(LTy);
+ } else if (neededInt) {
+ RegAddr = Address(CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, gp_offset),
+ LTy, CharUnits::fromQuantity(8));
+
+ // Copy to a temporary if necessary to ensure the appropriate alignment.
+ auto TInfo = getContext().getTypeInfoInChars(Ty);
+ uint64_t TySize = TInfo.Width.getQuantity();
+ CharUnits TyAlign = TInfo.Align;
+
+ // Copy into a temporary if the type is more aligned than the
+ // register save area.
+ if (TyAlign.getQuantity() > 8) {
+ Address Tmp = CGF.CreateMemTemp(Ty);
+ CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, false);
+ RegAddr = Tmp;
+ }
+
+ } else if (neededSSE == 1) {
+ RegAddr = Address(CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, fp_offset),
+ LTy, CharUnits::fromQuantity(16));
+ } else {
+ assert(neededSSE == 2 && "Invalid number of needed registers!");
+ // SSE registers are spaced 16 bytes apart in the register save
+ // area, we need to collect the two eightbytes together.
+ // The ABI isn't explicit about this, but it seems reasonable
+ // to assume that the slots are 16-byte aligned, since the stack is
+ // naturally 16-byte aligned and the prologue is expected to store
+ // all the SSE registers to the RSA.
+ Address RegAddrLo = Address(CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea,
+ fp_offset),
+ CGF.Int8Ty, CharUnits::fromQuantity(16));
+ Address RegAddrHi =
+ CGF.Builder.CreateConstInBoundsByteGEP(RegAddrLo,
+ CharUnits::fromQuantity(16));
+ llvm::Type *ST = AI.canHaveCoerceToType()
+ ? AI.getCoerceToType()
+ : llvm::StructType::get(CGF.DoubleTy, CGF.DoubleTy);
+ llvm::Value *V;
+ Address Tmp = CGF.CreateMemTemp(Ty);
+ Tmp = Tmp.withElementType(ST);
+ V = CGF.Builder.CreateLoad(
+ RegAddrLo.withElementType(ST->getStructElementType(0)));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
+ V = CGF.Builder.CreateLoad(
+ RegAddrHi.withElementType(ST->getStructElementType(1)));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
+
+ RegAddr = Tmp.withElementType(LTy);
+ }
+
+ // AMD64-ABI 3.5.7p5: Step 5. Set:
+ // l->gp_offset = l->gp_offset + num_gp * 8
+ // l->fp_offset = l->fp_offset + num_fp * 16.
+ if (neededInt) {
+ llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8);
+ CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
+ gp_offset_p);
+ }
+ if (neededSSE) {
+ llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16);
+ CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
+ fp_offset_p);
+ }
+ CGF.EmitBranch(ContBlock);
+
+ // Emit code to load the value if it was passed in memory.
+
+ CGF.EmitBlock(InMemBlock);
+ Address MemAddr = EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty);
+
+ // Return the appropriate result.
+
+ CGF.EmitBlock(ContBlock);
+ Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, MemAddr, InMemBlock,
+ "vaarg.addr");
+ return ResAddr;
+}
+
+Address X86_64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
+ // not 1, 2, 4, or 8 bytes, must be passed by reference."
+ uint64_t Width = getContext().getTypeSize(Ty);
+ bool IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
+
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
+ CGF.getContext().getTypeInfoInChars(Ty),
+ CharUnits::fromQuantity(8),
+ /*allowHigherAlign*/ false);
+}
+
+ABIArgInfo WinX86_64ABIInfo::reclassifyHvaArgForVectorCall(
+ QualType Ty, unsigned &FreeSSERegs, const ABIArgInfo &current) const {
+ const Type *Base = nullptr;
+ uint64_t NumElts = 0;
+
+ if (!Ty->isBuiltinType() && !Ty->isVectorType() &&
+ isHomogeneousAggregate(Ty, Base, NumElts) && FreeSSERegs >= NumElts) {
+ FreeSSERegs -= NumElts;
+ return getDirectX86Hva();
+ }
+ return current;
+}
+
+ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
+ bool IsReturnType, bool IsVectorCall,
+ bool IsRegCall) const {
+
+ if (Ty->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ TypeInfo Info = getContext().getTypeInfo(Ty);
+ uint64_t Width = Info.Width;
+ CharUnits Align = getContext().toCharUnitsFromBits(Info.Align);
+
+ const RecordType *RT = Ty->getAs<RecordType>();
+ if (RT) {
+ if (!IsReturnType) {
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()))
+ return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
+ }
+
+ if (RT->getDecl()->hasFlexibleArrayMember())
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
+
+ }
+
+ const Type *Base = nullptr;
+ uint64_t NumElts = 0;
+ // vectorcall adds the concept of a homogenous vector aggregate, similar to
+ // other targets.
+ if ((IsVectorCall || IsRegCall) &&
+ isHomogeneousAggregate(Ty, Base, NumElts)) {
+ if (IsRegCall) {
+ if (FreeSSERegs >= NumElts) {
+ FreeSSERegs -= NumElts;
+ if (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())
+ return ABIArgInfo::getDirect();
+ return ABIArgInfo::getExpand();
+ }
+ return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
+ } else if (IsVectorCall) {
+ if (FreeSSERegs >= NumElts &&
+ (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())) {
+ FreeSSERegs -= NumElts;
+ return ABIArgInfo::getDirect();
+ } else if (IsReturnType) {
+ return ABIArgInfo::getExpand();
+ } else if (!Ty->isBuiltinType() && !Ty->isVectorType()) {
+ // HVAs are delayed and reclassified in the 2nd step.
+ return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
+ }
+ }
+ }
+
+ if (Ty->isMemberPointerType()) {
+ // If the member pointer is represented by an LLVM int or ptr, pass it
+ // directly.
+ llvm::Type *LLTy = CGT.ConvertType(Ty);
+ if (LLTy->isPointerTy() || LLTy->isIntegerTy())
+ return ABIArgInfo::getDirect();
+ }
+
+ if (RT || Ty->isAnyComplexType() || Ty->isMemberPointerType()) {
+ // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
+ // not 1, 2, 4, or 8 bytes, must be passed by reference."
+ if (Width > 64 || !llvm::isPowerOf2_64(Width))
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
+
+ // Otherwise, coerce it to a small integer.
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Width));
+ }
+
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
+ switch (BT->getKind()) {
+ case BuiltinType::Bool:
+ // Bool type is always extended to the ABI, other builtin types are not
+ // extended.
+ return ABIArgInfo::getExtend(Ty);
+
+ case BuiltinType::LongDouble:
+ // Mingw64 GCC uses the old 80 bit extended precision floating point
+ // unit. It passes them indirectly through memory.
+ if (IsMingw64) {
+ const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
+ if (LDF == &llvm::APFloat::x87DoubleExtended())
+ return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
+ }
+ break;
+
+ case BuiltinType::Int128:
+ case BuiltinType::UInt128:
+ // If it's a parameter type, the normal ABI rule is that arguments larger
+ // than 8 bytes are passed indirectly. GCC follows it. We follow it too,
+ // even though it isn't particularly efficient.
+ if (!IsReturnType)
+ return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
+
+ // Mingw64 GCC returns i128 in XMM0. Coerce to v2i64 to handle that.
+ // Clang matches them for compatibility.
+ return ABIArgInfo::getDirect(llvm::FixedVectorType::get(
+ llvm::Type::getInt64Ty(getVMContext()), 2));
+
+ default:
+ break;
+ }
+ }
+
+ if (Ty->isBitIntType()) {
+ // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
+ // not 1, 2, 4, or 8 bytes, must be passed by reference."
+ // However, non-power-of-two bit-precise integers will be passed as 1, 2, 4,
+ // or 8 bytes anyway as long is it fits in them, so we don't have to check
+ // the power of 2.
+ if (Width <= 64)
+ return ABIArgInfo::getDirect();
+ return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
+ }
+
+ return ABIArgInfo::getDirect();
+}
+
+void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ const unsigned CC = FI.getCallingConvention();
+ bool IsVectorCall = CC == llvm::CallingConv::X86_VectorCall;
+ bool IsRegCall = CC == llvm::CallingConv::X86_RegCall;
+
+ // If __attribute__((sysv_abi)) is in use, use the SysV argument
+ // classification rules.
+ if (CC == llvm::CallingConv::X86_64_SysV) {
+ X86_64ABIInfo SysVABIInfo(CGT, AVXLevel);
+ SysVABIInfo.computeInfo(FI);
+ return;
+ }
+
+ unsigned FreeSSERegs = 0;
+ if (IsVectorCall) {
+ // We can use up to 4 SSE return registers with vectorcall.
+ FreeSSERegs = 4;
+ } else if (IsRegCall) {
+ // RegCall gives us 16 SSE registers.
+ FreeSSERegs = 16;
+ }
+
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classify(FI.getReturnType(), FreeSSERegs, true,
+ IsVectorCall, IsRegCall);
+
+ if (IsVectorCall) {
+ // We can use up to 6 SSE register parameters with vectorcall.
+ FreeSSERegs = 6;
+ } else if (IsRegCall) {
+ // RegCall gives us 16 SSE registers, we can reuse the return registers.
+ FreeSSERegs = 16;
+ }
+
+ unsigned ArgNum = 0;
+ unsigned ZeroSSERegs = 0;
+ for (auto &I : FI.arguments()) {
+ // Vectorcall in x64 only permits the first 6 arguments to be passed as
+ // XMM/YMM registers. After the sixth argument, pretend no vector
+ // registers are left.
+ unsigned *MaybeFreeSSERegs =
+ (IsVectorCall && ArgNum >= 6) ? &ZeroSSERegs : &FreeSSERegs;
+ I.info =
+ classify(I.type, *MaybeFreeSSERegs, false, IsVectorCall, IsRegCall);
+ ++ArgNum;
+ }
+
+ if (IsVectorCall) {
+ // For vectorcall, assign aggregate HVAs to any free vector registers in a
+ // second pass.
+ for (auto &I : FI.arguments())
+ I.info = reclassifyHvaArgForVectorCall(I.type, FreeSSERegs, I.info);
+ }
+}
+
+Address WinX86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
+ // not 1, 2, 4, or 8 bytes, must be passed by reference."
+ uint64_t Width = getContext().getTypeSize(Ty);
+ bool IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
+
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
+ CGF.getContext().getTypeInfoInChars(Ty),
+ CharUnits::fromQuantity(8),
+ /*allowHigherAlign*/ false);
+}
+
+std::unique_ptr<TargetCodeGenInfo> CodeGen::createX86_32TargetCodeGenInfo(
+ CodeGenModule &CGM, bool DarwinVectorABI, bool Win32StructABI,
+ unsigned NumRegisterParameters, bool SoftFloatABI) {
+ bool RetSmallStructInRegABI = X86_32TargetCodeGenInfo::isStructReturnInRegABI(
+ CGM.getTriple(), CGM.getCodeGenOpts());
+ return std::make_unique<X86_32TargetCodeGenInfo>(
+ CGM.getTypes(), DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
+ NumRegisterParameters, SoftFloatABI);
+}
+
+std::unique_ptr<TargetCodeGenInfo> CodeGen::createWinX86_32TargetCodeGenInfo(
+ CodeGenModule &CGM, bool DarwinVectorABI, bool Win32StructABI,
+ unsigned NumRegisterParameters) {
+ bool RetSmallStructInRegABI = X86_32TargetCodeGenInfo::isStructReturnInRegABI(
+ CGM.getTriple(), CGM.getCodeGenOpts());
+ return std::make_unique<WinX86_32TargetCodeGenInfo>(
+ CGM.getTypes(), DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
+ NumRegisterParameters);
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createX86_64TargetCodeGenInfo(CodeGenModule &CGM,
+ X86AVXABILevel AVXLevel) {
+ return std::make_unique<X86_64TargetCodeGenInfo>(CGM.getTypes(), AVXLevel);
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createWinX86_64TargetCodeGenInfo(CodeGenModule &CGM,
+ X86AVXABILevel AVXLevel) {
+ return std::make_unique<WinX86_64TargetCodeGenInfo>(CGM.getTypes(), AVXLevel);
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Targets/XCore.cpp b/contrib/llvm-project/clang/lib/CodeGen/Targets/XCore.cpp
new file mode 100644
index 000000000000..aeb48f851e16
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/Targets/XCore.cpp
@@ -0,0 +1,662 @@
+//===- XCore.cpp ----------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ABIInfoImpl.h"
+#include "TargetInfo.h"
+
+using namespace clang;
+using namespace clang::CodeGen;
+
+//===----------------------------------------------------------------------===//
+// XCore ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+/// A SmallStringEnc instance is used to build up the TypeString by passing
+/// it by reference between functions that append to it.
+typedef llvm::SmallString<128> SmallStringEnc;
+
+/// TypeStringCache caches the meta encodings of Types.
+///
+/// The reason for caching TypeStrings is two fold:
+/// 1. To cache a type's encoding for later uses;
+/// 2. As a means to break recursive member type inclusion.
+///
+/// A cache Entry can have a Status of:
+/// NonRecursive: The type encoding is not recursive;
+/// Recursive: The type encoding is recursive;
+/// Incomplete: An incomplete TypeString;
+/// IncompleteUsed: An incomplete TypeString that has been used in a
+/// Recursive type encoding.
+///
+/// A NonRecursive entry will have all of its sub-members expanded as fully
+/// as possible. Whilst it may contain types which are recursive, the type
+/// itself is not recursive and thus its encoding may be safely used whenever
+/// the type is encountered.
+///
+/// A Recursive entry will have all of its sub-members expanded as fully as
+/// possible. The type itself is recursive and it may contain other types which
+/// are recursive. The Recursive encoding must not be used during the expansion
+/// of a recursive type's recursive branch. For simplicity the code uses
+/// IncompleteCount to reject all usage of Recursive encodings for member types.
+///
+/// An Incomplete entry is always a RecordType and only encodes its
+/// identifier e.g. "s(S){}". Incomplete 'StubEnc' entries are ephemeral and
+/// are placed into the cache during type expansion as a means to identify and
+/// handle recursive inclusion of types as sub-members. If there is recursion
+/// the entry becomes IncompleteUsed.
+///
+/// During the expansion of a RecordType's members:
+///
+/// If the cache contains a NonRecursive encoding for the member type, the
+/// cached encoding is used;
+///
+/// If the cache contains a Recursive encoding for the member type, the
+/// cached encoding is 'Swapped' out, as it may be incorrect, and...
+///
+/// If the member is a RecordType, an Incomplete encoding is placed into the
+/// cache to break potential recursive inclusion of itself as a sub-member;
+///
+/// Once a member RecordType has been expanded, its temporary incomplete
+/// entry is removed from the cache. If a Recursive encoding was swapped out
+/// it is swapped back in;
+///
+/// If an incomplete entry is used to expand a sub-member, the incomplete
+/// entry is marked as IncompleteUsed. The cache keeps count of how many
+/// IncompleteUsed entries it currently contains in IncompleteUsedCount;
+///
+/// If a member's encoding is found to be a NonRecursive or Recursive viz:
+/// IncompleteUsedCount==0, the member's encoding is added to the cache.
+/// Else the member is part of a recursive type and thus the recursion has
+/// been exited too soon for the encoding to be correct for the member.
+///
+class TypeStringCache {
+ enum Status {NonRecursive, Recursive, Incomplete, IncompleteUsed};
+ struct Entry {
+ std::string Str; // The encoded TypeString for the type.
+ enum Status State; // Information about the encoding in 'Str'.
+ std::string Swapped; // A temporary place holder for a Recursive encoding
+ // during the expansion of RecordType's members.
+ };
+ std::map<const IdentifierInfo *, struct Entry> Map;
+ unsigned IncompleteCount; // Number of Incomplete entries in the Map.
+ unsigned IncompleteUsedCount; // Number of IncompleteUsed entries in the Map.
+public:
+ TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {}
+ void addIncomplete(const IdentifierInfo *ID, std::string StubEnc);
+ bool removeIncomplete(const IdentifierInfo *ID);
+ void addIfComplete(const IdentifierInfo *ID, StringRef Str,
+ bool IsRecursive);
+ StringRef lookupStr(const IdentifierInfo *ID);
+};
+
+/// TypeString encodings for enum & union fields must be order.
+/// FieldEncoding is a helper for this ordering process.
+class FieldEncoding {
+ bool HasName;
+ std::string Enc;
+public:
+ FieldEncoding(bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {}
+ StringRef str() { return Enc; }
+ bool operator<(const FieldEncoding &rhs) const {
+ if (HasName != rhs.HasName) return HasName;
+ return Enc < rhs.Enc;
+ }
+};
+
+class XCoreABIInfo : public DefaultABIInfo {
+public:
+ XCoreABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+};
+
+class XCoreTargetCodeGenInfo : public TargetCodeGenInfo {
+ mutable TypeStringCache TSC;
+ void emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
+ const CodeGen::CodeGenModule &M) const;
+
+public:
+ XCoreTargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(std::make_unique<XCoreABIInfo>(CGT)) {}
+ void emitTargetMetadata(CodeGen::CodeGenModule &CGM,
+ const llvm::MapVector<GlobalDecl, StringRef>
+ &MangledDeclNames) const override;
+};
+
+} // End anonymous namespace.
+
+// TODO: this implementation is likely now redundant with the default
+// EmitVAArg.
+Address XCoreABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ CGBuilderTy &Builder = CGF.Builder;
+
+ // Get the VAList.
+ CharUnits SlotSize = CharUnits::fromQuantity(4);
+ Address AP = Address(Builder.CreateLoad(VAListAddr),
+ getVAListElementType(CGF), SlotSize);
+
+ // Handle the argument.
+ ABIArgInfo AI = classifyArgumentType(Ty);
+ CharUnits TypeAlign = getContext().getTypeAlignInChars(Ty);
+ llvm::Type *ArgTy = CGT.ConvertType(Ty);
+ if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
+ AI.setCoerceToType(ArgTy);
+ llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
+
+ Address Val = Address::invalid();
+ CharUnits ArgSize = CharUnits::Zero();
+ switch (AI.getKind()) {
+ case ABIArgInfo::Expand:
+ case ABIArgInfo::CoerceAndExpand:
+ case ABIArgInfo::InAlloca:
+ llvm_unreachable("Unsupported ABI kind for va_arg");
+ case ABIArgInfo::Ignore:
+ Val = Address(llvm::UndefValue::get(ArgPtrTy), ArgTy, TypeAlign);
+ ArgSize = CharUnits::Zero();
+ break;
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Direct:
+ Val = AP.withElementType(ArgTy);
+ ArgSize = CharUnits::fromQuantity(
+ getDataLayout().getTypeAllocSize(AI.getCoerceToType()));
+ ArgSize = ArgSize.alignTo(SlotSize);
+ break;
+ case ABIArgInfo::Indirect:
+ case ABIArgInfo::IndirectAliased:
+ Val = AP.withElementType(ArgPtrTy);
+ Val = Address(Builder.CreateLoad(Val), ArgTy, TypeAlign);
+ ArgSize = SlotSize;
+ break;
+ }
+
+ // Increment the VAList.
+ if (!ArgSize.isZero()) {
+ Address APN = Builder.CreateConstInBoundsByteGEP(AP, ArgSize);
+ Builder.CreateStore(APN.getPointer(), VAListAddr);
+ }
+
+ return Val;
+}
+
+/// During the expansion of a RecordType, an incomplete TypeString is placed
+/// into the cache as a means to identify and break recursion.
+/// If there is a Recursive encoding in the cache, it is swapped out and will
+/// be reinserted by removeIncomplete().
+/// All other types of encoding should have been used rather than arriving here.
+void TypeStringCache::addIncomplete(const IdentifierInfo *ID,
+ std::string StubEnc) {
+ if (!ID)
+ return;
+ Entry &E = Map[ID];
+ assert( (E.Str.empty() || E.State == Recursive) &&
+ "Incorrectly use of addIncomplete");
+ assert(!StubEnc.empty() && "Passing an empty string to addIncomplete()");
+ E.Swapped.swap(E.Str); // swap out the Recursive
+ E.Str.swap(StubEnc);
+ E.State = Incomplete;
+ ++IncompleteCount;
+}
+
+/// Once the RecordType has been expanded, the temporary incomplete TypeString
+/// must be removed from the cache.
+/// If a Recursive was swapped out by addIncomplete(), it will be replaced.
+/// Returns true if the RecordType was defined recursively.
+bool TypeStringCache::removeIncomplete(const IdentifierInfo *ID) {
+ if (!ID)
+ return false;
+ auto I = Map.find(ID);
+ assert(I != Map.end() && "Entry not present");
+ Entry &E = I->second;
+ assert( (E.State == Incomplete ||
+ E.State == IncompleteUsed) &&
+ "Entry must be an incomplete type");
+ bool IsRecursive = false;
+ if (E.State == IncompleteUsed) {
+ // We made use of our Incomplete encoding, thus we are recursive.
+ IsRecursive = true;
+ --IncompleteUsedCount;
+ }
+ if (E.Swapped.empty())
+ Map.erase(I);
+ else {
+ // Swap the Recursive back.
+ E.Swapped.swap(E.Str);
+ E.Swapped.clear();
+ E.State = Recursive;
+ }
+ --IncompleteCount;
+ return IsRecursive;
+}
+
+/// Add the encoded TypeString to the cache only if it is NonRecursive or
+/// Recursive (viz: all sub-members were expanded as fully as possible).
+void TypeStringCache::addIfComplete(const IdentifierInfo *ID, StringRef Str,
+ bool IsRecursive) {
+ if (!ID || IncompleteUsedCount)
+ return; // No key or it is an incomplete sub-type so don't add.
+ Entry &E = Map[ID];
+ if (IsRecursive && !E.Str.empty()) {
+ assert(E.State==Recursive && E.Str.size() == Str.size() &&
+ "This is not the same Recursive entry");
+ // The parent container was not recursive after all, so we could have used
+ // this Recursive sub-member entry after all, but we assumed the worse when
+ // we started viz: IncompleteCount!=0.
+ return;
+ }
+ assert(E.Str.empty() && "Entry already present");
+ E.Str = Str.str();
+ E.State = IsRecursive? Recursive : NonRecursive;
+}
+
+/// Return a cached TypeString encoding for the ID. If there isn't one, or we
+/// are recursively expanding a type (IncompleteCount != 0) and the cached
+/// encoding is Recursive, return an empty StringRef.
+StringRef TypeStringCache::lookupStr(const IdentifierInfo *ID) {
+ if (!ID)
+ return StringRef(); // We have no key.
+ auto I = Map.find(ID);
+ if (I == Map.end())
+ return StringRef(); // We have no encoding.
+ Entry &E = I->second;
+ if (E.State == Recursive && IncompleteCount)
+ return StringRef(); // We don't use Recursive encodings for member types.
+
+ if (E.State == Incomplete) {
+ // The incomplete type is being used to break out of recursion.
+ E.State = IncompleteUsed;
+ ++IncompleteUsedCount;
+ }
+ return E.Str;
+}
+
+/// The XCore ABI includes a type information section that communicates symbol
+/// type information to the linker. The linker uses this information to verify
+/// safety/correctness of things such as array bound and pointers et al.
+/// The ABI only requires C (and XC) language modules to emit TypeStrings.
+/// This type information (TypeString) is emitted into meta data for all global
+/// symbols: definitions, declarations, functions & variables.
+///
+/// The TypeString carries type, qualifier, name, size & value details.
+/// Please see 'Tools Development Guide' section 2.16.2 for format details:
+/// https://www.xmos.com/download/public/Tools-Development-Guide%28X9114A%29.pdf
+/// The output is tested by test/CodeGen/xcore-stringtype.c.
+///
+static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
+ const CodeGen::CodeGenModule &CGM,
+ TypeStringCache &TSC);
+
+/// XCore uses emitTargetMD to emit TypeString metadata for global symbols.
+void XCoreTargetCodeGenInfo::emitTargetMD(
+ const Decl *D, llvm::GlobalValue *GV,
+ const CodeGen::CodeGenModule &CGM) const {
+ SmallStringEnc Enc;
+ if (getTypeString(Enc, D, CGM, TSC)) {
+ llvm::LLVMContext &Ctx = CGM.getModule().getContext();
+ llvm::Metadata *MDVals[] = {llvm::ConstantAsMetadata::get(GV),
+ llvm::MDString::get(Ctx, Enc.str())};
+ llvm::NamedMDNode *MD =
+ CGM.getModule().getOrInsertNamedMetadata("xcore.typestrings");
+ MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
+ }
+}
+
+void XCoreTargetCodeGenInfo::emitTargetMetadata(
+ CodeGen::CodeGenModule &CGM,
+ const llvm::MapVector<GlobalDecl, StringRef> &MangledDeclNames) const {
+ // Warning, new MangledDeclNames may be appended within this loop.
+ // We rely on MapVector insertions adding new elements to the end
+ // of the container.
+ for (unsigned I = 0; I != MangledDeclNames.size(); ++I) {
+ auto Val = *(MangledDeclNames.begin() + I);
+ llvm::GlobalValue *GV = CGM.GetGlobalValue(Val.second);
+ if (GV) {
+ const Decl *D = Val.first.getDecl()->getMostRecentDecl();
+ emitTargetMD(D, GV, CGM);
+ }
+ }
+}
+
+static bool appendType(SmallStringEnc &Enc, QualType QType,
+ const CodeGen::CodeGenModule &CGM,
+ TypeStringCache &TSC);
+
+/// Helper function for appendRecordType().
+/// Builds a SmallVector containing the encoded field types in declaration
+/// order.
+static bool extractFieldType(SmallVectorImpl<FieldEncoding> &FE,
+ const RecordDecl *RD,
+ const CodeGen::CodeGenModule &CGM,
+ TypeStringCache &TSC) {
+ for (const auto *Field : RD->fields()) {
+ SmallStringEnc Enc;
+ Enc += "m(";
+ Enc += Field->getName();
+ Enc += "){";
+ if (Field->isBitField()) {
+ Enc += "b(";
+ llvm::raw_svector_ostream OS(Enc);
+ OS << Field->getBitWidthValue(CGM.getContext());
+ Enc += ':';
+ }
+ if (!appendType(Enc, Field->getType(), CGM, TSC))
+ return false;
+ if (Field->isBitField())
+ Enc += ')';
+ Enc += '}';
+ FE.emplace_back(!Field->getName().empty(), Enc);
+ }
+ return true;
+}
+
+/// Appends structure and union types to Enc and adds encoding to cache.
+/// Recursively calls appendType (via extractFieldType) for each field.
+/// Union types have their fields ordered according to the ABI.
+static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT,
+ const CodeGen::CodeGenModule &CGM,
+ TypeStringCache &TSC, const IdentifierInfo *ID) {
+ // Append the cached TypeString if we have one.
+ StringRef TypeString = TSC.lookupStr(ID);
+ if (!TypeString.empty()) {
+ Enc += TypeString;
+ return true;
+ }
+
+ // Start to emit an incomplete TypeString.
+ size_t Start = Enc.size();
+ Enc += (RT->isUnionType()? 'u' : 's');
+ Enc += '(';
+ if (ID)
+ Enc += ID->getName();
+ Enc += "){";
+
+ // We collect all encoded fields and order as necessary.
+ bool IsRecursive = false;
+ const RecordDecl *RD = RT->getDecl()->getDefinition();
+ if (RD && !RD->field_empty()) {
+ // An incomplete TypeString stub is placed in the cache for this RecordType
+ // so that recursive calls to this RecordType will use it whilst building a
+ // complete TypeString for this RecordType.
+ SmallVector<FieldEncoding, 16> FE;
+ std::string StubEnc(Enc.substr(Start).str());
+ StubEnc += '}'; // StubEnc now holds a valid incomplete TypeString.
+ TSC.addIncomplete(ID, std::move(StubEnc));
+ if (!extractFieldType(FE, RD, CGM, TSC)) {
+ (void) TSC.removeIncomplete(ID);
+ return false;
+ }
+ IsRecursive = TSC.removeIncomplete(ID);
+ // The ABI requires unions to be sorted but not structures.
+ // See FieldEncoding::operator< for sort algorithm.
+ if (RT->isUnionType())
+ llvm::sort(FE);
+ // We can now complete the TypeString.
+ unsigned E = FE.size();
+ for (unsigned I = 0; I != E; ++I) {
+ if (I)
+ Enc += ',';
+ Enc += FE[I].str();
+ }
+ }
+ Enc += '}';
+ TSC.addIfComplete(ID, Enc.substr(Start), IsRecursive);
+ return true;
+}
+
+/// Appends enum types to Enc and adds the encoding to the cache.
+static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET,
+ TypeStringCache &TSC,
+ const IdentifierInfo *ID) {
+ // Append the cached TypeString if we have one.
+ StringRef TypeString = TSC.lookupStr(ID);
+ if (!TypeString.empty()) {
+ Enc += TypeString;
+ return true;
+ }
+
+ size_t Start = Enc.size();
+ Enc += "e(";
+ if (ID)
+ Enc += ID->getName();
+ Enc += "){";
+
+ // We collect all encoded enumerations and order them alphanumerically.
+ if (const EnumDecl *ED = ET->getDecl()->getDefinition()) {
+ SmallVector<FieldEncoding, 16> FE;
+ for (auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E;
+ ++I) {
+ SmallStringEnc EnumEnc;
+ EnumEnc += "m(";
+ EnumEnc += I->getName();
+ EnumEnc += "){";
+ I->getInitVal().toString(EnumEnc);
+ EnumEnc += '}';
+ FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc));
+ }
+ llvm::sort(FE);
+ unsigned E = FE.size();
+ for (unsigned I = 0; I != E; ++I) {
+ if (I)
+ Enc += ',';
+ Enc += FE[I].str();
+ }
+ }
+ Enc += '}';
+ TSC.addIfComplete(ID, Enc.substr(Start), false);
+ return true;
+}
+
+/// Appends type's qualifier to Enc.
+/// This is done prior to appending the type's encoding.
+static void appendQualifier(SmallStringEnc &Enc, QualType QT) {
+ // Qualifiers are emitted in alphabetical order.
+ static const char *const Table[]={"","c:","r:","cr:","v:","cv:","rv:","crv:"};
+ int Lookup = 0;
+ if (QT.isConstQualified())
+ Lookup += 1<<0;
+ if (QT.isRestrictQualified())
+ Lookup += 1<<1;
+ if (QT.isVolatileQualified())
+ Lookup += 1<<2;
+ Enc += Table[Lookup];
+}
+
+/// Appends built-in types to Enc.
+static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT) {
+ const char *EncType;
+ switch (BT->getKind()) {
+ case BuiltinType::Void:
+ EncType = "0";
+ break;
+ case BuiltinType::Bool:
+ EncType = "b";
+ break;
+ case BuiltinType::Char_U:
+ EncType = "uc";
+ break;
+ case BuiltinType::UChar:
+ EncType = "uc";
+ break;
+ case BuiltinType::SChar:
+ EncType = "sc";
+ break;
+ case BuiltinType::UShort:
+ EncType = "us";
+ break;
+ case BuiltinType::Short:
+ EncType = "ss";
+ break;
+ case BuiltinType::UInt:
+ EncType = "ui";
+ break;
+ case BuiltinType::Int:
+ EncType = "si";
+ break;
+ case BuiltinType::ULong:
+ EncType = "ul";
+ break;
+ case BuiltinType::Long:
+ EncType = "sl";
+ break;
+ case BuiltinType::ULongLong:
+ EncType = "ull";
+ break;
+ case BuiltinType::LongLong:
+ EncType = "sll";
+ break;
+ case BuiltinType::Float:
+ EncType = "ft";
+ break;
+ case BuiltinType::Double:
+ EncType = "d";
+ break;
+ case BuiltinType::LongDouble:
+ EncType = "ld";
+ break;
+ default:
+ return false;
+ }
+ Enc += EncType;
+ return true;
+}
+
+/// Appends a pointer encoding to Enc before calling appendType for the pointee.
+static bool appendPointerType(SmallStringEnc &Enc, const PointerType *PT,
+ const CodeGen::CodeGenModule &CGM,
+ TypeStringCache &TSC) {
+ Enc += "p(";
+ if (!appendType(Enc, PT->getPointeeType(), CGM, TSC))
+ return false;
+ Enc += ')';
+ return true;
+}
+
+/// Appends array encoding to Enc before calling appendType for the element.
+static bool appendArrayType(SmallStringEnc &Enc, QualType QT,
+ const ArrayType *AT,
+ const CodeGen::CodeGenModule &CGM,
+ TypeStringCache &TSC, StringRef NoSizeEnc) {
+ if (AT->getSizeModifier() != ArraySizeModifier::Normal)
+ return false;
+ Enc += "a(";
+ if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT))
+ CAT->getSize().toStringUnsigned(Enc);
+ else
+ Enc += NoSizeEnc; // Global arrays use "*", otherwise it is "".
+ Enc += ':';
+ // The Qualifiers should be attached to the type rather than the array.
+ appendQualifier(Enc, QT);
+ if (!appendType(Enc, AT->getElementType(), CGM, TSC))
+ return false;
+ Enc += ')';
+ return true;
+}
+
+/// Appends a function encoding to Enc, calling appendType for the return type
+/// and the arguments.
+static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT,
+ const CodeGen::CodeGenModule &CGM,
+ TypeStringCache &TSC) {
+ Enc += "f{";
+ if (!appendType(Enc, FT->getReturnType(), CGM, TSC))
+ return false;
+ Enc += "}(";
+ if (const FunctionProtoType *FPT = FT->getAs<FunctionProtoType>()) {
+ // N.B. we are only interested in the adjusted param types.
+ auto I = FPT->param_type_begin();
+ auto E = FPT->param_type_end();
+ if (I != E) {
+ do {
+ if (!appendType(Enc, *I, CGM, TSC))
+ return false;
+ ++I;
+ if (I != E)
+ Enc += ',';
+ } while (I != E);
+ if (FPT->isVariadic())
+ Enc += ",va";
+ } else {
+ if (FPT->isVariadic())
+ Enc += "va";
+ else
+ Enc += '0';
+ }
+ }
+ Enc += ')';
+ return true;
+}
+
+/// Handles the type's qualifier before dispatching a call to handle specific
+/// type encodings.
+static bool appendType(SmallStringEnc &Enc, QualType QType,
+ const CodeGen::CodeGenModule &CGM,
+ TypeStringCache &TSC) {
+
+ QualType QT = QType.getCanonicalType();
+
+ if (const ArrayType *AT = QT->getAsArrayTypeUnsafe())
+ // The Qualifiers should be attached to the type rather than the array.
+ // Thus we don't call appendQualifier() here.
+ return appendArrayType(Enc, QT, AT, CGM, TSC, "");
+
+ appendQualifier(Enc, QT);
+
+ if (const BuiltinType *BT = QT->getAs<BuiltinType>())
+ return appendBuiltinType(Enc, BT);
+
+ if (const PointerType *PT = QT->getAs<PointerType>())
+ return appendPointerType(Enc, PT, CGM, TSC);
+
+ if (const EnumType *ET = QT->getAs<EnumType>())
+ return appendEnumType(Enc, ET, TSC, QT.getBaseTypeIdentifier());
+
+ if (const RecordType *RT = QT->getAsStructureType())
+ return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
+
+ if (const RecordType *RT = QT->getAsUnionType())
+ return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
+
+ if (const FunctionType *FT = QT->getAs<FunctionType>())
+ return appendFunctionType(Enc, FT, CGM, TSC);
+
+ return false;
+}
+
+static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
+ const CodeGen::CodeGenModule &CGM,
+ TypeStringCache &TSC) {
+ if (!D)
+ return false;
+
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (FD->getLanguageLinkage() != CLanguageLinkage)
+ return false;
+ return appendType(Enc, FD->getType(), CGM, TSC);
+ }
+
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ if (VD->getLanguageLinkage() != CLanguageLinkage)
+ return false;
+ QualType QT = VD->getType().getCanonicalType();
+ if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) {
+ // Global ArrayTypes are given a size of '*' if the size is unknown.
+ // The Qualifiers should be attached to the type rather than the array.
+ // Thus we don't call appendQualifier() here.
+ return appendArrayType(Enc, QT, AT, CGM, TSC, "*");
+ }
+ return appendType(Enc, QT, CGM, TSC);
+ }
+ return false;
+}
+
+std::unique_ptr<TargetCodeGenInfo>
+CodeGen::createXCoreTargetCodeGenInfo(CodeGenModule &CGM) {
+ return std::make_unique<XCoreTargetCodeGenInfo>(CGM.getTypes());
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/VarBypassDetector.cpp b/contrib/llvm-project/clang/lib/CodeGen/VarBypassDetector.cpp
index e8717a61ce5e..6eda83dfdef2 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/VarBypassDetector.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/VarBypassDetector.cpp
@@ -77,7 +77,7 @@ bool VarBypassDetector::BuildScopeInformation(const Stmt *S,
return false;
++StmtsToSkip;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Stmt::GotoStmtClass:
FromScopes.push_back({S, ParentScope});
diff --git a/contrib/llvm-project/clang/lib/CodeGen/VarBypassDetector.h b/contrib/llvm-project/clang/lib/CodeGen/VarBypassDetector.h
index b654eefd963d..164e88c0b2f1 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/VarBypassDetector.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/VarBypassDetector.h
@@ -55,7 +55,7 @@ public:
/// Returns true if the variable declaration was by bypassed by any goto or
/// switch statement.
bool IsBypassed(const VarDecl *D) const {
- return AlwaysBypassed || Bypasses.find(D) != Bypasses.end();
+ return AlwaysBypassed || Bypasses.contains(D);
}
private:
diff --git a/contrib/llvm-project/clang/lib/CrossTU/CrossTranslationUnit.cpp b/contrib/llvm-project/clang/lib/CrossTU/CrossTranslationUnit.cpp
index 0aecad491ecc..986470042bd8 100644
--- a/contrib/llvm-project/clang/lib/CrossTU/CrossTranslationUnit.cpp
+++ b/contrib/llvm-project/clang/lib/CrossTU/CrossTranslationUnit.cpp
@@ -19,17 +19,17 @@
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/TextDiagnosticPrinter.h"
#include "clang/Index/USRGeneration.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/YAMLParser.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/Triple.h"
#include <algorithm>
#include <fstream>
+#include <optional>
#include <sstream>
#include <tuple>
@@ -149,6 +149,35 @@ std::error_code IndexError::convertToErrorCode() const {
return std::error_code(static_cast<int>(Code), *Category);
}
+/// Parse one line of the input CTU index file.
+///
+/// @param[in] LineRef The input CTU index item in format
+/// "<USR-Length>:<USR> <File-Path>".
+/// @param[out] LookupName The lookup name in format "<USR-Length>:<USR>".
+/// @param[out] FilePath The file path "<File-Path>".
+static bool parseCrossTUIndexItem(StringRef LineRef, StringRef &LookupName,
+ StringRef &FilePath) {
+ // `LineRef` is "<USR-Length>:<USR> <File-Path>" now.
+
+ size_t USRLength = 0;
+ if (LineRef.consumeInteger(10, USRLength))
+ return false;
+ assert(USRLength && "USRLength should be greater than zero.");
+
+ if (!LineRef.consume_front(":"))
+ return false;
+
+ // `LineRef` is now just "<USR> <File-Path>".
+
+ // Check LookupName length out of bound and incorrect delimiter.
+ if (USRLength >= LineRef.size() || ' ' != LineRef[USRLength])
+ return false;
+
+ LookupName = LineRef.substr(0, USRLength);
+ FilePath = LineRef.substr(USRLength + 1);
+ return true;
+}
+
llvm::Expected<llvm::StringMap<std::string>>
parseCrossTUIndex(StringRef IndexPath) {
std::ifstream ExternalMapFile{std::string(IndexPath)};
@@ -160,24 +189,23 @@ parseCrossTUIndex(StringRef IndexPath) {
std::string Line;
unsigned LineNo = 1;
while (std::getline(ExternalMapFile, Line)) {
- StringRef LineRef{Line};
- const size_t Delimiter = LineRef.find(' ');
- if (Delimiter > 0 && Delimiter != std::string::npos) {
- StringRef LookupName = LineRef.substr(0, Delimiter);
-
- // Store paths with posix-style directory separator.
- SmallString<32> FilePath(LineRef.substr(Delimiter + 1));
- llvm::sys::path::native(FilePath, llvm::sys::path::Style::posix);
-
- bool InsertionOccured;
- std::tie(std::ignore, InsertionOccured) =
- Result.try_emplace(LookupName, FilePath.begin(), FilePath.end());
- if (!InsertionOccured)
- return llvm::make_error<IndexError>(
- index_error_code::multiple_definitions, IndexPath.str(), LineNo);
- } else
+ // Split lookup name and file path
+ StringRef LookupName, FilePathInIndex;
+ if (!parseCrossTUIndexItem(Line, LookupName, FilePathInIndex))
return llvm::make_error<IndexError>(
index_error_code::invalid_index_format, IndexPath.str(), LineNo);
+
+ // Store paths with posix-style directory separator.
+ SmallString<32> FilePath(FilePathInIndex);
+ llvm::sys::path::native(FilePath, llvm::sys::path::Style::posix);
+
+ bool InsertionOccured;
+ std::tie(std::ignore, InsertionOccured) =
+ Result.try_emplace(LookupName, FilePath.begin(), FilePath.end());
+ if (!InsertionOccured)
+ return llvm::make_error<IndexError>(
+ index_error_code::multiple_definitions, IndexPath.str(), LineNo);
+
++LineNo;
}
return Result;
@@ -187,18 +215,14 @@ std::string
createCrossTUIndexString(const llvm::StringMap<std::string> &Index) {
std::ostringstream Result;
for (const auto &E : Index)
- Result << E.getKey().str() << " " << E.getValue() << '\n';
+ Result << E.getKey().size() << ':' << E.getKey().str() << ' '
+ << E.getValue() << '\n';
return Result.str();
}
-bool containsConst(const VarDecl *VD, const ASTContext &ACtx) {
+bool shouldImport(const VarDecl *VD, const ASTContext &ACtx) {
CanQualType CT = ACtx.getCanonicalType(VD->getType());
- if (!CT.isConstQualified()) {
- const RecordType *RTy = CT->getAs<RecordType>();
- if (!RTy || !RTy->hasConstFields())
- return false;
- }
- return true;
+ return CT.isConstQualified() && VD->getType().isTrivialType(ACtx);
}
static bool hasBodyOrInit(const FunctionDecl *D, const FunctionDecl *&DefD) {
@@ -217,13 +241,13 @@ CrossTranslationUnitContext::CrossTranslationUnitContext(CompilerInstance &CI)
CrossTranslationUnitContext::~CrossTranslationUnitContext() {}
-llvm::Optional<std::string>
+std::optional<std::string>
CrossTranslationUnitContext::getLookupName(const NamedDecl *ND) {
SmallString<128> DeclUSR;
bool Ret = index::generateUSRForDecl(ND, DeclUSR);
if (Ret)
return {};
- return std::string(DeclUSR.str());
+ return std::string(DeclUSR);
}
/// Recursively visits the decls of a DeclContext, and returns one with the
@@ -243,7 +267,7 @@ CrossTranslationUnitContext::findDefInDeclContext(const DeclContext *DC,
const T *ResultDecl;
if (!ND || !hasBodyOrInit(ND, ResultDecl))
continue;
- llvm::Optional<std::string> ResultLookupName = getLookupName(ResultDecl);
+ std::optional<std::string> ResultLookupName = getLookupName(ResultDecl);
if (!ResultLookupName || *ResultLookupName != LookupName)
continue;
return ResultDecl;
@@ -259,7 +283,7 @@ llvm::Expected<const T *> CrossTranslationUnitContext::getCrossTUDefinitionImpl(
assert(!hasBodyOrInit(D) &&
"D has a body or init in current translation unit!");
++NumGetCTUCalled;
- const llvm::Optional<std::string> LookupName = getLookupName(D);
+ const std::optional<std::string> LookupName = getLookupName(D);
if (!LookupName)
return llvm::make_error<IndexError>(
index_error_code::failed_to_generate_usr);
@@ -368,11 +392,11 @@ void CrossTranslationUnitContext::emitCrossTUDiagnostics(const IndexError &IE) {
CrossTranslationUnitContext::ASTUnitStorage::ASTUnitStorage(
CompilerInstance &CI)
- : Loader(CI, CI.getAnalyzerOpts()->CTUDir,
- CI.getAnalyzerOpts()->CTUInvocationList),
+ : Loader(CI, CI.getAnalyzerOpts().CTUDir,
+ CI.getAnalyzerOpts().CTUInvocationList),
LoadGuard(CI.getASTContext().getLangOpts().CPlusPlus
- ? CI.getAnalyzerOpts()->CTUImportCppThreshold
- : CI.getAnalyzerOpts()->CTUImportThreshold) {}
+ ? CI.getAnalyzerOpts().CTUImportCppThreshold
+ : CI.getAnalyzerOpts().CTUImportThreshold) {}
llvm::Expected<ASTUnit *>
CrossTranslationUnitContext::ASTUnitStorage::getASTUnitForFile(
@@ -428,13 +452,13 @@ CrossTranslationUnitContext::ASTUnitStorage::getASTUnitForFunction(
ensureCTUIndexLoaded(CrossTUDir, IndexName))
return std::move(IndexLoadError);
- // Check if there is and entry in the index for the function.
+ // Check if there is an entry in the index for the function.
if (!NameFileMap.count(FunctionName)) {
++NumNotInOtherTU;
return llvm::make_error<IndexError>(index_error_code::missing_definition);
}
- // Search in the index for the filename where the definition of FuncitonName
+ // Search in the index for the filename where the definition of FunctionName
// resides.
if (llvm::Expected<ASTUnit *> FoundForFile =
getASTUnitForFile(NameFileMap[FunctionName], DisplayCTUProgress)) {
@@ -527,7 +551,7 @@ CrossTranslationUnitContext::ASTLoader::load(StringRef Identifier) {
// Normalize by removing relative path components.
llvm::sys::path::remove_dots(Path, /*remove_dot_dot*/ true, PathStyle);
- if (Path.endswith(".ast"))
+ if (Path.ends_with(".ast"))
return loadFromDump(Path);
else
return loadFromSource(Path);
@@ -544,7 +568,7 @@ CrossTranslationUnitContext::ASTLoader::loadFromDump(StringRef ASTDumpPath) {
return ASTUnit::LoadFromASTFile(
std::string(ASTDumpPath.str()),
CI.getPCHContainerOperations()->getRawReader(), ASTUnit::LoadEverything,
- Diags, CI.getFileSystemOpts());
+ Diags, CI.getFileSystemOpts(), CI.getHeaderSearchOptsPtr());
}
/// Load the AST from a source-file, which is supposed to be located inside the
@@ -585,10 +609,10 @@ CrossTranslationUnitContext::ASTLoader::loadFromSource(
IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
new DiagnosticsEngine{DiagID, &*DiagOpts, DiagClient});
- return std::unique_ptr<ASTUnit>(ASTUnit::LoadFromCommandLine(
- CommandLineArgs.begin(), (CommandLineArgs.end()),
- CI.getPCHContainerOperations(), Diags,
- CI.getHeaderSearchOpts().ResourceDir));
+ return ASTUnit::LoadFromCommandLine(CommandLineArgs.begin(),
+ (CommandLineArgs.end()),
+ CI.getPCHContainerOperations(), Diags,
+ CI.getHeaderSearchOpts().ResourceDir);
}
llvm::Expected<InvocationListTy>
@@ -636,7 +660,7 @@ parseInvocationList(StringRef FileContent, llvm::sys::path::Style PathStyle) {
StringRef InvocationKey = NativeSourcePath;
- if (InvocationList.find(InvocationKey) != InvocationList.end())
+ if (InvocationList.contains(InvocationKey))
return llvm::make_error<IndexError>(
index_error_code::invocation_list_ambiguous);
@@ -711,20 +735,19 @@ CrossTranslationUnitContext::importDefinitionImpl(const T *D, ASTUnit *Unit) {
auto ToDeclOrError = Importer.Import(D);
if (!ToDeclOrError) {
- handleAllErrors(ToDeclOrError.takeError(),
- [&](const ImportError &IE) {
- switch (IE.Error) {
- case ImportError::NameConflict:
- ++NumNameConflicts;
- break;
- case ImportError::UnsupportedConstruct:
- ++NumUnsupportedNodeFound;
- break;
- case ImportError::Unknown:
- llvm_unreachable("Unknown import error happened.");
- break;
- }
- });
+ handleAllErrors(ToDeclOrError.takeError(), [&](const ASTImportError &IE) {
+ switch (IE.Error) {
+ case ASTImportError::NameConflict:
+ ++NumNameConflicts;
+ break;
+ case ASTImportError::UnsupportedConstruct:
+ ++NumUnsupportedNodeFound;
+ break;
+ case ASTImportError::Unknown:
+ llvm_unreachable("Unknown import error happened.");
+ break;
+ }
+ });
return llvm::make_error<IndexError>(index_error_code::failed_import);
}
auto *ToDecl = cast<T>(*ToDeclOrError);
@@ -770,11 +793,24 @@ CrossTranslationUnitContext::getOrCreateASTImporter(ASTUnit *Unit) {
return *NewImporter;
}
-llvm::Optional<clang::MacroExpansionContext>
+std::optional<clang::MacroExpansionContext>
CrossTranslationUnitContext::getMacroExpansionContextForSourceLocation(
const clang::SourceLocation &ToLoc) const {
// FIXME: Implement: Record such a context for every imported ASTUnit; lookup.
- return llvm::None;
+ return std::nullopt;
+}
+
+bool CrossTranslationUnitContext::isImportedAsNew(const Decl *ToDecl) const {
+ if (!ImporterSharedSt)
+ return false;
+ return ImporterSharedSt->isNewDecl(const_cast<Decl *>(ToDecl));
+}
+
+bool CrossTranslationUnitContext::hasError(const Decl *ToDecl) const {
+ if (!ImporterSharedSt)
+ return false;
+ return static_cast<bool>(
+ ImporterSharedSt->getImportDeclErrorIfAny(const_cast<Decl *>(ToDecl)));
}
} // namespace cross_tu
diff --git a/contrib/llvm-project/clang/lib/DirectoryWatcher/DirectoryScanner.cpp b/contrib/llvm-project/clang/lib/DirectoryWatcher/DirectoryScanner.cpp
index 1bc286236a0e..428f87eddc7b 100644
--- a/contrib/llvm-project/clang/lib/DirectoryWatcher/DirectoryScanner.cpp
+++ b/contrib/llvm-project/clang/lib/DirectoryWatcher/DirectoryScanner.cpp
@@ -9,16 +9,17 @@
#include "DirectoryScanner.h"
#include "llvm/Support/Path.h"
+#include <optional>
namespace clang {
using namespace llvm;
-Optional<sys::fs::file_status> getFileStatus(StringRef Path) {
+std::optional<sys::fs::file_status> getFileStatus(StringRef Path) {
sys::fs::file_status Status;
std::error_code EC = status(Path, Status);
if (EC)
- return None;
+ return std::nullopt;
return Status;
}
@@ -31,7 +32,7 @@ std::vector<std::string> scanDirectory(StringRef Path) {
End = fs::directory_iterator();
!EC && It != End; It.increment(EC)) {
auto status = getFileStatus(It->path());
- if (!status.hasValue())
+ if (!status)
continue;
Result.emplace_back(sys::path::filename(It->path()));
}
diff --git a/contrib/llvm-project/clang/lib/DirectoryWatcher/DirectoryScanner.h b/contrib/llvm-project/clang/lib/DirectoryWatcher/DirectoryScanner.h
index feb8b4ea861e..84cffa5704f7 100644
--- a/contrib/llvm-project/clang/lib/DirectoryWatcher/DirectoryScanner.h
+++ b/contrib/llvm-project/clang/lib/DirectoryWatcher/DirectoryScanner.h
@@ -8,6 +8,7 @@
#include "clang/DirectoryWatcher/DirectoryWatcher.h"
#include "llvm/Support/FileSystem.h"
+#include <optional>
#include <string>
#include <vector>
@@ -23,7 +24,7 @@ std::vector<DirectoryWatcher::Event>
getAsFileEvents(const std::vector<std::string> &Scan);
/// Gets status of file (or directory) at \p Path.
-/// \returns llvm::None if \p Path doesn't exist or can't get the status.
-llvm::Optional<llvm::sys::fs::file_status> getFileStatus(llvm::StringRef Path);
+/// \returns std::nullopt if \p Path doesn't exist or can't get the status.
+std::optional<llvm::sys::fs::file_status> getFileStatus(llvm::StringRef Path);
} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/DirectoryWatcher/linux/DirectoryWatcher-linux.cpp b/contrib/llvm-project/clang/lib/DirectoryWatcher/linux/DirectoryWatcher-linux.cpp
index 963256f268bb..beca9586988b 100644
--- a/contrib/llvm-project/clang/lib/DirectoryWatcher/linux/DirectoryWatcher-linux.cpp
+++ b/contrib/llvm-project/clang/lib/DirectoryWatcher/linux/DirectoryWatcher-linux.cpp
@@ -14,7 +14,6 @@
#include "llvm/Support/AlignOf.h"
#include "llvm/Support/Errno.h"
#include "llvm/Support/Error.h"
-#include "llvm/Support/MathExtras.h"
#include "llvm/Support/Path.h"
#include <atomic>
#include <condition_variable>
@@ -25,6 +24,8 @@
#include <vector>
#include <fcntl.h>
+#include <limits.h>
+#include <optional>
#include <sys/epoll.h>
#include <sys/inotify.h>
#include <unistd.h>
@@ -72,10 +73,10 @@ struct SemaphorePipe {
const int FDWrite;
bool OwnsFDs;
- static llvm::Optional<SemaphorePipe> create() {
+ static std::optional<SemaphorePipe> create() {
int InotifyPollingStopperFDs[2];
if (pipe2(InotifyPollingStopperFDs, O_CLOEXEC) == -1)
- return llvm::None;
+ return std::nullopt;
return SemaphorePipe(InotifyPollingStopperFDs);
}
};
diff --git a/contrib/llvm-project/clang/lib/DirectoryWatcher/mac/DirectoryWatcher-mac.cpp b/contrib/llvm-project/clang/lib/DirectoryWatcher/mac/DirectoryWatcher-mac.cpp
index bdc389516289..b8788bae8171 100644
--- a/contrib/llvm-project/clang/lib/DirectoryWatcher/mac/DirectoryWatcher-mac.cpp
+++ b/contrib/llvm-project/clang/lib/DirectoryWatcher/mac/DirectoryWatcher-mac.cpp
@@ -136,7 +136,7 @@ static void eventStreamCallback(ConstFSEventStreamRef Stream,
llvm::sys::path::filename(Path));
continue;
} else if (Flags & ModifyingFileEvents) {
- if (!getFileStatus(Path).hasValue()) {
+ if (!getFileStatus(Path).has_value()) {
Events.emplace_back(DirectoryWatcher::Event::EventKind::Removed,
llvm::sys::path::filename(Path));
} else {
diff --git a/contrib/llvm-project/clang/lib/DirectoryWatcher/windows/DirectoryWatcher-windows.cpp b/contrib/llvm-project/clang/lib/DirectoryWatcher/windows/DirectoryWatcher-windows.cpp
index 1f040f60ff19..110d402436ee 100644
--- a/contrib/llvm-project/clang/lib/DirectoryWatcher/windows/DirectoryWatcher-windows.cpp
+++ b/contrib/llvm-project/clang/lib/DirectoryWatcher/windows/DirectoryWatcher-windows.cpp
@@ -88,10 +88,15 @@ DirectoryWatcherWindows::DirectoryWatcherWindows(
// handle to the watcher and performing synchronous operations.
{
DWORD Size = GetFinalPathNameByHandleW(DirectoryHandle, NULL, 0, 0);
- std::unique_ptr<WCHAR[]> Buffer{new WCHAR[Size]};
+ std::unique_ptr<WCHAR[]> Buffer{new WCHAR[Size + 1]};
Size = GetFinalPathNameByHandleW(DirectoryHandle, Buffer.get(), Size, 0);
Buffer[Size] = L'\0';
- llvm::sys::windows::UTF16ToUTF8(Buffer.get(), Size, Path);
+ WCHAR *Data = Buffer.get();
+ if (Size >= 4 && ::memcmp(Data, L"\\\\?\\", 8) == 0) {
+ Data += 4;
+ Size -= 4;
+ }
+ llvm::sys::windows::UTF16ToUTF8(Data, Size, Path);
}
size_t EntrySize = sizeof(FILE_NOTIFY_INFORMATION) + MAX_PATH * sizeof(WCHAR);
diff --git a/contrib/llvm-project/clang/lib/Driver/Action.cpp b/contrib/llvm-project/clang/lib/Driver/Action.cpp
index e2d2f6c22de0..849bf6035ebd 100644
--- a/contrib/llvm-project/clang/lib/Driver/Action.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/Action.cpp
@@ -25,7 +25,8 @@ const char *Action::getClassName(ActionClass AC) {
return "offload";
case PreprocessJobClass: return "preprocessor";
case PrecompileJobClass: return "precompiler";
- case HeaderModulePrecompileJobClass: return "header-module-precompiler";
+ case ExtractAPIJobClass:
+ return "api-extractor";
case AnalyzeJobClass: return "analyzer";
case MigrateJobClass: return "migrator";
case CompileJobClass: return "compiler";
@@ -41,16 +42,21 @@ const char *Action::getClassName(ActionClass AC) {
return "clang-offload-bundler";
case OffloadUnbundlingJobClass:
return "clang-offload-unbundler";
- case OffloadWrapperJobClass:
- return "clang-offload-wrapper";
+ case OffloadPackagerJobClass:
+ return "clang-offload-packager";
+ case LinkerWrapperJobClass:
+ return "clang-linker-wrapper";
case StaticLibJobClass:
return "static-lib-linker";
+ case BinaryAnalyzeJobClass:
+ return "binary-analyzer";
}
llvm_unreachable("invalid class");
}
-void Action::propagateDeviceOffloadInfo(OffloadKind OKind, const char *OArch) {
+void Action::propagateDeviceOffloadInfo(OffloadKind OKind, const char *OArch,
+ const ToolChain *OToolChain) {
// Offload action set its own kinds on their dependences.
if (Kind == OffloadClass)
return;
@@ -63,9 +69,10 @@ void Action::propagateDeviceOffloadInfo(OffloadKind OKind, const char *OArch) {
assert(!ActiveOffloadKindMask && "Setting a device kind in a host action??");
OffloadingDeviceKind = OKind;
OffloadingArch = OArch;
+ OffloadingToolChain = OToolChain;
for (auto *A : Inputs)
- A->propagateDeviceOffloadInfo(OffloadingDeviceKind, OArch);
+ A->propagateDeviceOffloadInfo(OffloadingDeviceKind, OArch, OToolChain);
}
void Action::propagateHostOffloadInfo(unsigned OKinds, const char *OArch) {
@@ -87,7 +94,8 @@ void Action::propagateOffloadInfo(const Action *A) {
propagateHostOffloadInfo(HK, A->getOffloadingArch());
else
propagateDeviceOffloadInfo(A->getOffloadingDeviceKind(),
- A->getOffloadingArch());
+ A->getOffloadingArch(),
+ A->getOffloadingToolChain());
}
std::string Action::getOffloadingKindPrefix() const {
@@ -188,9 +196,10 @@ OffloadAction::OffloadAction(const DeviceDependences &DDeps, types::ID Ty)
DevToolChains(DDeps.getToolChains()) {
auto &OKinds = DDeps.getOffloadKinds();
auto &BArchs = DDeps.getBoundArchs();
+ auto &OTCs = DDeps.getToolChains();
// If all inputs agree on the same kind, use it also for this action.
- if (llvm::all_of(OKinds, [&](OffloadKind K) { return K == OKinds.front(); }))
+ if (llvm::all_equal(OKinds))
OffloadingDeviceKind = OKinds.front();
// If we have a single dependency, inherit the architecture from it.
@@ -199,7 +208,7 @@ OffloadAction::OffloadAction(const DeviceDependences &DDeps, types::ID Ty)
// Propagate info to the dependencies.
for (unsigned i = 0, e = getInputs().size(); i != e; ++i)
- getInputs()[i]->propagateDeviceOffloadInfo(OKinds[i], BArchs[i]);
+ getInputs()[i]->propagateDeviceOffloadInfo(OKinds[i], BArchs[i], OTCs[i]);
}
OffloadAction::OffloadAction(const HostDependence &HDep,
@@ -214,12 +223,17 @@ OffloadAction::OffloadAction(const HostDependence &HDep,
// Add device inputs and propagate info to the device actions. Do work only if
// we have dependencies.
- for (unsigned i = 0, e = DDeps.getActions().size(); i != e; ++i)
+ for (unsigned i = 0, e = DDeps.getActions().size(); i != e; ++i) {
if (auto *A = DDeps.getActions()[i]) {
getInputs().push_back(A);
A->propagateDeviceOffloadInfo(DDeps.getOffloadKinds()[i],
- DDeps.getBoundArchs()[i]);
+ DDeps.getBoundArchs()[i],
+ DDeps.getToolChains()[i]);
+ // If this action is used to forward single dependency, set the toolchain.
+ if (DDeps.getActions().size() == 1)
+ OffloadingToolChain = DDeps.getToolChains()[i];
}
+ }
}
void OffloadAction::doOnHostDependence(const OffloadActionWorkTy &Work) const {
@@ -298,6 +312,19 @@ void OffloadAction::DeviceDependences::add(Action &A, const ToolChain &TC,
DeviceOffloadKinds.push_back(OKind);
}
+void OffloadAction::DeviceDependences::add(Action &A, const ToolChain &TC,
+ const char *BoundArch,
+ unsigned OffloadKindMask) {
+ DeviceActions.push_back(&A);
+ DeviceToolChains.push_back(&TC);
+ DeviceBoundArchs.push_back(BoundArch);
+
+ // Add each active offloading kind from a mask.
+ for (OffloadKind OKind : {OFK_OpenMP, OFK_Cuda, OFK_HIP})
+ if (OKind & OffloadKindMask)
+ DeviceOffloadKinds.push_back(OKind);
+}
+
OffloadAction::HostDependence::HostDependence(Action &A, const ToolChain &TC,
const char *BoundArch,
const DeviceDependences &DDeps)
@@ -330,12 +357,10 @@ PrecompileJobAction::PrecompileJobAction(ActionClass Kind, Action *Input,
assert(isa<PrecompileJobAction>((Action*)this) && "invalid action kind");
}
-void HeaderModulePrecompileJobAction::anchor() {}
+void ExtractAPIJobAction::anchor() {}
-HeaderModulePrecompileJobAction::HeaderModulePrecompileJobAction(
- Action *Input, types::ID OutputType, const char *ModuleName)
- : PrecompileJobAction(HeaderModulePrecompileJobClass, Input, OutputType),
- ModuleName(ModuleName) {}
+ExtractAPIJobAction::ExtractAPIJobAction(Action *Inputs, types::ID OutputType)
+ : JobAction(ExtractAPIJobClass, Inputs, OutputType) {}
void AnalyzeJobAction::anchor() {}
@@ -412,13 +437,24 @@ void OffloadUnbundlingJobAction::anchor() {}
OffloadUnbundlingJobAction::OffloadUnbundlingJobAction(Action *Input)
: JobAction(OffloadUnbundlingJobClass, Input, Input->getType()) {}
-void OffloadWrapperJobAction::anchor() {}
+void OffloadPackagerJobAction::anchor() {}
+
+OffloadPackagerJobAction::OffloadPackagerJobAction(ActionList &Inputs,
+ types::ID Type)
+ : JobAction(OffloadPackagerJobClass, Inputs, Type) {}
-OffloadWrapperJobAction::OffloadWrapperJobAction(ActionList &Inputs,
- types::ID Type)
- : JobAction(OffloadWrapperJobClass, Inputs, Type) {}
+void LinkerWrapperJobAction::anchor() {}
+
+LinkerWrapperJobAction::LinkerWrapperJobAction(ActionList &Inputs,
+ types::ID Type)
+ : JobAction(LinkerWrapperJobClass, Inputs, Type) {}
void StaticLibJobAction::anchor() {}
StaticLibJobAction::StaticLibJobAction(ActionList &Inputs, types::ID Type)
: JobAction(StaticLibJobClass, Inputs, Type) {}
+
+void BinaryAnalyzeJobAction::anchor() {}
+
+BinaryAnalyzeJobAction::BinaryAnalyzeJobAction(Action *Input, types::ID Type)
+ : JobAction(BinaryAnalyzeJobClass, Input, Type) {}
diff --git a/contrib/llvm-project/clang/lib/Driver/Compilation.cpp b/contrib/llvm-project/clang/lib/Driver/Compilation.cpp
index 0144d808cf12..ad077d5bbfa6 100644
--- a/contrib/llvm-project/clang/lib/Driver/Compilation.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/Compilation.cpp
@@ -15,15 +15,14 @@
#include "clang/Driver/Options.h"
#include "clang/Driver/ToolChain.h"
#include "clang/Driver/Util.h"
-#include "llvm/ADT/None.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Option/OptSpecifier.h"
#include "llvm/Option/Option.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/Triple.h"
#include <cassert>
#include <string>
#include <system_error>
@@ -102,7 +101,7 @@ Compilation::getArgsForToolChain(const ToolChain *TC, StringRef BoundArch,
}
// Add allocated arguments to the final DAL.
- for (auto ArgPtr : AllocatedArgs)
+ for (auto *ArgPtr : AllocatedArgs)
Entry->AddSynthesizedArg(ArgPtr);
}
@@ -162,7 +161,8 @@ bool Compilation::CleanupFileMap(const ArgStringMap &Files,
}
int Compilation::ExecuteCommand(const Command &C,
- const Command *&FailingCommand) const {
+ const Command *&FailingCommand,
+ bool LogOnly) const {
if ((getDriver().CCPrintOptions ||
getArgs().hasArg(options::OPT_v)) && !getDriver().CCGenDiagnostics) {
raw_ostream *OS = &llvm::errs();
@@ -174,7 +174,7 @@ int Compilation::ExecuteCommand(const Command &C,
!getDriver().CCPrintOptionsFilename.empty()) {
std::error_code EC;
OwnedStream.reset(new llvm::raw_fd_ostream(
- getDriver().CCPrintOptionsFilename.c_str(), EC,
+ getDriver().CCPrintOptionsFilename, EC,
llvm::sys::fs::OF_Append | llvm::sys::fs::OF_TextWithCRLF));
if (EC) {
getDriver().Diag(diag::err_drv_cc_print_options_failure)
@@ -191,6 +191,9 @@ int Compilation::ExecuteCommand(const Command &C,
C.Print(*OS, "\n", /*Quote=*/getDriver().CCPrintOptions);
}
+ if (LogOnly)
+ return 0;
+
std::string Error;
bool ExecutionFailed;
int Res = C.Execute(Redirects, &Error, &ExecutionFailed);
@@ -237,7 +240,8 @@ static bool InputsOk(const Command &C,
}
void Compilation::ExecuteJobs(const JobList &Jobs,
- FailingCommandList &FailingCommands) const {
+ FailingCommandList &FailingCommands,
+ bool LogOnly) const {
// According to UNIX standard, driver need to continue compiling all the
// inputs on the command line even one of them failed.
// In all but CLMode, execute all the jobs unless the necessary inputs for the
@@ -246,7 +250,7 @@ void Compilation::ExecuteJobs(const JobList &Jobs,
if (!InputsOk(Job, FailingCommands))
continue;
const Command *FailingCommand = nullptr;
- if (int Res = ExecuteCommand(Job, FailingCommand)) {
+ if (int Res = ExecuteCommand(Job, FailingCommand, LogOnly)) {
FailingCommands.push_back(std::make_pair(Res, FailingCommand));
// Bail as soon as one command fails in cl driver mode.
if (TheDriver.IsCLMode())
@@ -278,9 +282,9 @@ void Compilation::initCompilationForDiagnostics() {
options::OPT_o, options::OPT_MD, options::OPT_MMD, options::OPT_M,
options::OPT_MM, options::OPT_MF, options::OPT_MG, options::OPT_MJ,
options::OPT_MQ, options::OPT_MT, options::OPT_MV};
- for (unsigned i = 0, e = llvm::array_lengthof(OutputOpts); i != e; ++i) {
- if (TranslatedArgs->hasArg(OutputOpts[i]))
- TranslatedArgs->eraseArg(OutputOpts[i]);
+ for (const auto &Opt : OutputOpts) {
+ if (TranslatedArgs->hasArg(Opt))
+ TranslatedArgs->eraseArg(Opt);
}
TranslatedArgs->ClaimAllArgs();
@@ -292,7 +296,7 @@ void Compilation::initCompilationForDiagnostics() {
TCArgs.clear();
// Redirect stdout/stderr to /dev/null.
- Redirects = {None, {""}, {""}};
+ Redirects = {std::nullopt, {""}, {""}};
// Temporary files added by diagnostics should be kept.
ForceKeepTempFiles = true;
@@ -302,6 +306,6 @@ StringRef Compilation::getSysRoot() const {
return getDriver().SysRoot;
}
-void Compilation::Redirect(ArrayRef<Optional<StringRef>> Redirects) {
+void Compilation::Redirect(ArrayRef<std::optional<StringRef>> Redirects) {
this->Redirects = Redirects;
}
diff --git a/contrib/llvm-project/clang/lib/Driver/Distro.cpp b/contrib/llvm-project/clang/lib/Driver/Distro.cpp
index c4cf4e48b5b8..a7e7f169dc14 100644
--- a/contrib/llvm-project/clang/lib/Driver/Distro.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/Distro.cpp
@@ -11,11 +11,11 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/ErrorOr.h"
-#include "llvm/Support/Host.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Threading.h"
+#include "llvm/TargetParser/Host.h"
+#include "llvm/TargetParser/Triple.h"
using namespace clang::driver;
using namespace clang;
@@ -34,7 +34,7 @@ static Distro::DistroType DetectOsRelease(llvm::vfs::FileSystem &VFS) {
// Obviously this can be improved a lot.
for (StringRef Line : Lines)
- if (Version == Distro::UnknownDistro && Line.startswith("ID="))
+ if (Version == Distro::UnknownDistro && Line.starts_with("ID="))
Version = llvm::StringSwitch<Distro::DistroType>(Line.substr(3))
.Case("alpine", Distro::AlpineLinux)
.Case("fedora", Distro::Fedora)
@@ -60,7 +60,7 @@ static Distro::DistroType DetectLsbRelease(llvm::vfs::FileSystem &VFS) {
for (StringRef Line : Lines)
if (Version == Distro::UnknownDistro &&
- Line.startswith("DISTRIB_CODENAME="))
+ Line.starts_with("DISTRIB_CODENAME="))
Version = llvm::StringSwitch<Distro::DistroType>(Line.substr(17))
.Case("hardy", Distro::UbuntuHardy)
.Case("intrepid", Distro::UbuntuIntrepid)
@@ -90,6 +90,11 @@ static Distro::DistroType DetectLsbRelease(llvm::vfs::FileSystem &VFS) {
.Case("groovy", Distro::UbuntuGroovy)
.Case("hirsute", Distro::UbuntuHirsute)
.Case("impish", Distro::UbuntuImpish)
+ .Case("jammy", Distro::UbuntuJammy)
+ .Case("kinetic", Distro::UbuntuKinetic)
+ .Case("lunar", Distro::UbuntuLunar)
+ .Case("mantic", Distro::UbuntuMantic)
+ .Case("noble", Distro::UbuntuNoble)
.Default(Distro::UnknownDistro);
return Version;
}
@@ -108,21 +113,21 @@ static Distro::DistroType DetectDistro(llvm::vfs::FileSystem &VFS) {
if (Version != Distro::UnknownDistro)
return Version;
- // Otherwise try some distro-specific quirks for RedHat...
+ // Otherwise try some distro-specific quirks for Red Hat...
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> File =
VFS.getBufferForFile("/etc/redhat-release");
if (File) {
StringRef Data = File.get()->getBuffer();
- if (Data.startswith("Fedora release"))
+ if (Data.starts_with("Fedora release"))
return Distro::Fedora;
- if (Data.startswith("Red Hat Enterprise Linux") ||
- Data.startswith("CentOS") || Data.startswith("Scientific Linux")) {
- if (Data.find("release 7") != StringRef::npos)
+ if (Data.starts_with("Red Hat Enterprise Linux") ||
+ Data.starts_with("CentOS") || Data.starts_with("Scientific Linux")) {
+ if (Data.contains("release 7"))
return Distro::RHEL7;
- else if (Data.find("release 6") != StringRef::npos)
+ else if (Data.contains("release 6"))
return Distro::RHEL6;
- else if (Data.find("release 5") != StringRef::npos)
+ else if (Data.contains("release 5"))
return Distro::RHEL5;
}
return Distro::UnknownDistro;
@@ -150,6 +155,10 @@ static Distro::DistroType DetectDistro(llvm::vfs::FileSystem &VFS) {
return Distro::DebianBuster;
case 11:
return Distro::DebianBullseye;
+ case 12:
+ return Distro::DebianBookworm;
+ case 13:
+ return Distro::DebianTrixie;
default:
return Distro::UnknownDistro;
}
@@ -161,6 +170,8 @@ static Distro::DistroType DetectDistro(llvm::vfs::FileSystem &VFS) {
.Case("stretch/sid", Distro::DebianStretch)
.Case("buster/sid", Distro::DebianBuster)
.Case("bullseye/sid", Distro::DebianBullseye)
+ .Case("bookworm/sid", Distro::DebianBookworm)
+ .Case("trixie/sid", Distro::DebianTrixie)
.Default(Distro::UnknownDistro);
}
@@ -171,7 +182,7 @@ static Distro::DistroType DetectDistro(llvm::vfs::FileSystem &VFS) {
SmallVector<StringRef, 8> Lines;
Data.split(Lines, "\n");
for (const StringRef &Line : Lines) {
- if (!Line.trim().startswith("VERSION"))
+ if (!Line.trim().starts_with("VERSION"))
continue;
std::pair<StringRef, StringRef> SplitLine = Line.split('=');
// Old versions have split VERSION and PATCHLEVEL
diff --git a/contrib/llvm-project/clang/lib/Driver/Driver.cpp b/contrib/llvm-project/clang/lib/Driver/Driver.cpp
index 94a7553e273b..93cddf742d52 100644
--- a/contrib/llvm-project/clang/lib/Driver/Driver.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/Driver.cpp
@@ -11,11 +11,10 @@
#include "ToolChains/AMDGPU.h"
#include "ToolChains/AMDGPUOpenMP.h"
#include "ToolChains/AVR.h"
-#include "ToolChains/Ananas.h"
+#include "ToolChains/Arch/RISCV.h"
#include "ToolChains/BareMetal.h"
+#include "ToolChains/CSKYToolChain.h"
#include "ToolChains/Clang.h"
-#include "ToolChains/CloudABI.h"
-#include "ToolChains/Contiki.h"
#include "ToolChains/CrossWindows.h"
#include "ToolChains/Cuda.h"
#include "ToolChains/Darwin.h"
@@ -23,7 +22,9 @@
#include "ToolChains/FreeBSD.h"
#include "ToolChains/Fuchsia.h"
#include "ToolChains/Gnu.h"
-#include "ToolChains/HIP.h"
+#include "ToolChains/HIPAMD.h"
+#include "ToolChains/HIPSPV.h"
+#include "ToolChains/HLSL.h"
#include "ToolChains/Haiku.h"
#include "ToolChains/Hexagon.h"
#include "ToolChains/Hurd.h"
@@ -32,15 +33,16 @@
#include "ToolChains/MSP430.h"
#include "ToolChains/MSVC.h"
#include "ToolChains/MinGW.h"
-#include "ToolChains/Minix.h"
#include "ToolChains/MipsLinux.h"
-#include "ToolChains/Myriad.h"
#include "ToolChains/NaCl.h"
#include "ToolChains/NetBSD.h"
+#include "ToolChains/OHOS.h"
#include "ToolChains/OpenBSD.h"
+#include "ToolChains/PPCFreeBSD.h"
#include "ToolChains/PPCLinux.h"
#include "ToolChains/PS4CPU.h"
#include "ToolChains/RISCVToolchain.h"
+#include "ToolChains/SPIRV.h"
#include "ToolChains/Solaris.h"
#include "ToolChains/TCE.h"
#include "ToolChains/VEToolchain.h"
@@ -56,17 +58,19 @@
#include "clang/Driver/InputInfo.h"
#include "clang/Driver/Job.h"
#include "clang/Driver/Options.h"
+#include "clang/Driver/Phases.h"
#include "clang/Driver/SanitizerArgs.h"
#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
+#include "clang/Driver/Types.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Config/llvm-config.h"
+#include "llvm/MC/TargetRegistry.h"
#include "llvm/Option/Arg.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Option/OptSpecifier.h"
@@ -77,18 +81,21 @@
#include "llvm/Support/ExitCodes.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/FormatVariadic.h"
-#include "llvm/Support/Host.h"
#include "llvm/Support/MD5.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/Process.h"
#include "llvm/Support/Program.h"
+#include "llvm/Support/RISCVISAInfo.h"
#include "llvm/Support/StringSaver.h"
-#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/VirtualFileSystem.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/Host.h"
+#include <cstdlib> // ::getenv
#include <map>
#include <memory>
+#include <optional>
+#include <set>
#include <utility>
#if LLVM_ON_UNIX
#include <unistd.h> // getpid
@@ -98,9 +105,59 @@ using namespace clang::driver;
using namespace clang;
using namespace llvm::opt;
-static llvm::Triple getHIPOffloadTargetTriple() {
- static const llvm::Triple T("amdgcn-amd-amdhsa");
- return T;
+static std::optional<llvm::Triple> getOffloadTargetTriple(const Driver &D,
+ const ArgList &Args) {
+ auto OffloadTargets = Args.getAllArgValues(options::OPT_offload_EQ);
+ // Offload compilation flow does not support multiple targets for now. We
+ // need the HIPActionBuilder (and possibly the CudaActionBuilder{,Base}too)
+ // to support multiple tool chains first.
+ switch (OffloadTargets.size()) {
+ default:
+ D.Diag(diag::err_drv_only_one_offload_target_supported);
+ return std::nullopt;
+ case 0:
+ D.Diag(diag::err_drv_invalid_or_unsupported_offload_target) << "";
+ return std::nullopt;
+ case 1:
+ break;
+ }
+ return llvm::Triple(OffloadTargets[0]);
+}
+
+static std::optional<llvm::Triple>
+getNVIDIAOffloadTargetTriple(const Driver &D, const ArgList &Args,
+ const llvm::Triple &HostTriple) {
+ if (!Args.hasArg(options::OPT_offload_EQ)) {
+ return llvm::Triple(HostTriple.isArch64Bit() ? "nvptx64-nvidia-cuda"
+ : "nvptx-nvidia-cuda");
+ }
+ auto TT = getOffloadTargetTriple(D, Args);
+ if (TT && (TT->getArch() == llvm::Triple::spirv32 ||
+ TT->getArch() == llvm::Triple::spirv64)) {
+ if (Args.hasArg(options::OPT_emit_llvm))
+ return TT;
+ D.Diag(diag::err_drv_cuda_offload_only_emit_bc);
+ return std::nullopt;
+ }
+ D.Diag(diag::err_drv_invalid_or_unsupported_offload_target) << TT->str();
+ return std::nullopt;
+}
+static std::optional<llvm::Triple>
+getHIPOffloadTargetTriple(const Driver &D, const ArgList &Args) {
+ if (!Args.hasArg(options::OPT_offload_EQ)) {
+ return llvm::Triple("amdgcn-amd-amdhsa"); // Default HIP triple.
+ }
+ auto TT = getOffloadTargetTriple(D, Args);
+ if (!TT)
+ return std::nullopt;
+ if (TT->getArch() == llvm::Triple::amdgcn &&
+ TT->getVendor() == llvm::Triple::AMD &&
+ TT->getOS() == llvm::Triple::AMDHSA)
+ return TT;
+ if (TT->getArch() == llvm::Triple::spirv64)
+ return TT;
+ D.Diag(diag::err_drv_invalid_or_unsupported_offload_target) << TT->str();
+ return std::nullopt;
}
// static
@@ -123,26 +180,29 @@ std::string Driver::GetResourcesPath(StringRef BinaryPath,
// path of the embedding binary, which for LLVM binaries will be in bin/.
// ../lib gets us to lib/ in both cases.
P = llvm::sys::path::parent_path(Dir);
- llvm::sys::path::append(P, Twine("lib") + CLANG_LIBDIR_SUFFIX, "clang",
- CLANG_VERSION_STRING);
+ // This search path is also created in the COFF driver of lld, so any
+ // changes here also needs to happen in lld/COFF/Driver.cpp
+ llvm::sys::path::append(P, CLANG_INSTALL_LIBDIR_BASENAME, "clang",
+ CLANG_VERSION_MAJOR_STRING);
}
- return std::string(P.str());
+ return std::string(P);
}
Driver::Driver(StringRef ClangExecutable, StringRef TargetTriple,
DiagnosticsEngine &Diags, std::string Title,
IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS)
: Diags(Diags), VFS(std::move(VFS)), Mode(GCCMode),
- SaveTemps(SaveTempsNone), BitcodeEmbed(EmbedNone), LTOMode(LTOK_None),
+ SaveTemps(SaveTempsNone), BitcodeEmbed(EmbedNone),
+ Offload(OffloadHostDevice), CXX20HeaderType(HeaderMode_None),
+ ModulesModeCXX20(false), LTOMode(LTOK_None),
ClangExecutable(ClangExecutable), SysRoot(DEFAULT_SYSROOT),
- DriverTitle(Title), CCPrintStatReportFilename(), CCPrintOptionsFilename(),
- CCPrintHeadersFilename(), CCLogDiagnosticsFilename(),
- CCCPrintBindings(false), CCPrintOptions(false), CCPrintHeaders(false),
+ DriverTitle(Title), CCCPrintBindings(false), CCPrintOptions(false),
CCLogDiagnostics(false), CCGenDiagnostics(false),
- CCPrintProcessStats(false), TargetTriple(TargetTriple),
- CCCGenericGCCName(""), Saver(Alloc), CheckInputsExist(true),
- GenReproducer(false), SuppressMissingInputWarning(false) {
+ CCPrintProcessStats(false), CCPrintInternalStats(false),
+ TargetTriple(TargetTriple), Saver(Alloc), PrependArg(nullptr),
+ CheckInputsExist(true), ProbePrecompiled(true),
+ SuppressMissingInputWarning(false) {
// Provide a sane fallback if no VFS is specified.
if (!this->VFS)
this->VFS = llvm::vfs::getRealFileSystem();
@@ -162,7 +222,11 @@ Driver::Driver(StringRef ClangExecutable, StringRef TargetTriple,
SystemConfigDir = CLANG_CONFIG_FILE_SYSTEM_DIR;
#endif
#if defined(CLANG_CONFIG_FILE_USER_DIR)
- UserConfigDir = CLANG_CONFIG_FILE_USER_DIR;
+ {
+ SmallString<128> P;
+ llvm::sys::fs::expand_tilde(CLANG_CONFIG_FILE_USER_DIR, P);
+ UserConfigDir = static_cast<std::string>(P);
+ }
#endif
// Compute the path to the resource directory.
@@ -170,40 +234,30 @@ Driver::Driver(StringRef ClangExecutable, StringRef TargetTriple,
}
void Driver::setDriverMode(StringRef Value) {
- static const std::string OptName =
+ static StringRef OptName =
getOpts().getOption(options::OPT_driver_mode).getPrefixedName();
- if (auto M = llvm::StringSwitch<llvm::Optional<DriverMode>>(Value)
+ if (auto M = llvm::StringSwitch<std::optional<DriverMode>>(Value)
.Case("gcc", GCCMode)
.Case("g++", GXXMode)
.Case("cpp", CPPMode)
.Case("cl", CLMode)
.Case("flang", FlangMode)
- .Default(None))
+ .Case("dxc", DXCMode)
+ .Default(std::nullopt))
Mode = *M;
else
Diag(diag::err_drv_unsupported_option_argument) << OptName << Value;
}
InputArgList Driver::ParseArgStrings(ArrayRef<const char *> ArgStrings,
- bool IsClCompatMode,
- bool &ContainsError) {
+ bool UseDriverMode, bool &ContainsError) {
llvm::PrettyStackTraceString CrashInfo("Command line argument parsing");
ContainsError = false;
- unsigned IncludedFlagsBitmask;
- unsigned ExcludedFlagsBitmask;
- std::tie(IncludedFlagsBitmask, ExcludedFlagsBitmask) =
- getIncludeExcludeOptionFlagMasks(IsClCompatMode);
-
- // Make sure that Flang-only options don't pollute the Clang output
- // TODO: Make sure that Clang-only options don't pollute Flang output
- if (!IsFlangMode())
- ExcludedFlagsBitmask |= options::FlangOnlyOption;
-
+ llvm::opt::Visibility VisibilityMask = getOptionVisibilityMask(UseDriverMode);
unsigned MissingArgIndex, MissingArgCount;
- InputArgList Args =
- getOpts().ParseArgs(ArgStrings, MissingArgIndex, MissingArgCount,
- IncludedFlagsBitmask, ExcludedFlagsBitmask);
+ InputArgList Args = getOpts().ParseArgs(ArgStrings, MissingArgIndex,
+ MissingArgCount, VisibilityMask);
// Check for missing argument error.
if (MissingArgCount) {
@@ -217,19 +271,9 @@ InputArgList Driver::ParseArgStrings(ArrayRef<const char *> ArgStrings,
// Check for unsupported options.
for (const Arg *A : Args) {
if (A->getOption().hasFlag(options::Unsupported)) {
- unsigned DiagID;
- auto ArgString = A->getAsString(Args);
- std::string Nearest;
- if (getOpts().findNearest(
- ArgString, Nearest, IncludedFlagsBitmask,
- ExcludedFlagsBitmask | options::Unsupported) > 1) {
- DiagID = diag::err_drv_unsupported_opt;
- Diag(DiagID) << ArgString;
- } else {
- DiagID = diag::err_drv_unsupported_opt_with_suggestion;
- Diag(DiagID) << ArgString << Nearest;
- }
- ContainsError |= Diags.getDiagnosticLevel(DiagID, SourceLocation()) >
+ Diag(diag::err_drv_unsupported_opt) << A->getAsString(Args);
+ ContainsError |= Diags.getDiagnosticLevel(diag::err_drv_unsupported_opt,
+ SourceLocation()) >
DiagnosticsEngine::Warning;
continue;
}
@@ -247,11 +291,17 @@ InputArgList Driver::ParseArgStrings(ArrayRef<const char *> ArgStrings,
unsigned DiagID;
auto ArgString = A->getAsString(Args);
std::string Nearest;
- if (getOpts().findNearest(
- ArgString, Nearest, IncludedFlagsBitmask, ExcludedFlagsBitmask) > 1) {
- DiagID = IsCLMode() ? diag::warn_drv_unknown_argument_clang_cl
- : diag::err_drv_unknown_argument;
- Diags.Report(DiagID) << ArgString;
+ if (getOpts().findNearest(ArgString, Nearest, VisibilityMask) > 1) {
+ if (!IsCLMode() &&
+ getOpts().findExact(ArgString, Nearest,
+ llvm::opt::Visibility(options::CC1Option))) {
+ DiagID = diag::err_drv_unknown_argument_with_suggestion;
+ Diags.Report(DiagID) << ArgString << "-Xclang " + Nearest;
+ } else {
+ DiagID = IsCLMode() ? diag::warn_drv_unknown_argument_clang_cl
+ : diag::err_drv_unknown_argument;
+ Diags.Report(DiagID) << ArgString;
+ }
} else {
DiagID = IsCLMode()
? diag::warn_drv_unknown_argument_clang_cl_with_suggestion
@@ -262,6 +312,18 @@ InputArgList Driver::ParseArgStrings(ArrayRef<const char *> ArgStrings,
DiagnosticsEngine::Warning;
}
+ for (const Arg *A : Args.filtered(options::OPT_o)) {
+ if (ArgStrings[A->getIndex()] == A->getSpelling())
+ continue;
+
+ // Warn on joined arguments that are similar to a long argument.
+ std::string ArgString = ArgStrings[A->getIndex()];
+ std::string Nearest;
+ if (getOpts().findExact("-" + ArgString, Nearest, VisibilityMask))
+ Diags.Report(diag::warn_drv_potentially_misspelled_joined_argument)
+ << A->getAsString(Args) << Nearest;
+ }
+
return Args;
}
@@ -277,14 +339,19 @@ phases::ID Driver::getFinalPhase(const DerivedArgList &DAL,
if (CCCIsCPP() || (PhaseArg = DAL.getLastArg(options::OPT_E)) ||
(PhaseArg = DAL.getLastArg(options::OPT__SLASH_EP)) ||
(PhaseArg = DAL.getLastArg(options::OPT_M, options::OPT_MM)) ||
- (PhaseArg = DAL.getLastArg(options::OPT__SLASH_P))) {
+ (PhaseArg = DAL.getLastArg(options::OPT__SLASH_P)) ||
+ CCGenDiagnostics) {
FinalPhase = phases::Preprocess;
- // --precompile only runs up to precompilation.
- } else if ((PhaseArg = DAL.getLastArg(options::OPT__precompile))) {
+ // --precompile only runs up to precompilation.
+ // Options that cause the output of C++20 compiled module interfaces or
+ // header units have the same effect.
+ } else if ((PhaseArg = DAL.getLastArg(options::OPT__precompile)) ||
+ (PhaseArg = DAL.getLastArg(options::OPT_extract_api)) ||
+ (PhaseArg = DAL.getLastArg(options::OPT_fmodule_header,
+ options::OPT_fmodule_header_EQ))) {
FinalPhase = phases::Precompile;
-
- // -{fsyntax-only,-analyze,emit-ast} only run up to the compiler.
+ // -{fsyntax-only,-analyze,emit-ast} only run up to the compiler.
} else if ((PhaseArg = DAL.getLastArg(options::OPT_fsyntax_only)) ||
(PhaseArg = DAL.getLastArg(options::OPT_print_supported_cpus)) ||
(PhaseArg = DAL.getLastArg(options::OPT_module_file_info)) ||
@@ -304,6 +371,9 @@ phases::ID Driver::getFinalPhase(const DerivedArgList &DAL,
} else if ((PhaseArg = DAL.getLastArg(options::OPT_c))) {
FinalPhase = phases::Assemble;
+ } else if ((PhaseArg = DAL.getLastArg(options::OPT_emit_interface_stubs))) {
+ FinalPhase = phases::IfsMerge;
+
// Otherwise do everything.
} else
FinalPhase = phases::Link;
@@ -331,7 +401,20 @@ DerivedArgList *Driver::TranslateInputArgs(const InputArgList &Args) const {
bool HasNostdlib = Args.hasArg(options::OPT_nostdlib);
bool HasNostdlibxx = Args.hasArg(options::OPT_nostdlibxx);
bool HasNodefaultlib = Args.hasArg(options::OPT_nodefaultlibs);
+ bool IgnoreUnused = false;
for (Arg *A : Args) {
+ if (IgnoreUnused)
+ A->claim();
+
+ if (A->getOption().matches(options::OPT_start_no_unused_arguments)) {
+ IgnoreUnused = true;
+ continue;
+ }
+ if (A->getOption().matches(options::OPT_end_no_unused_arguments)) {
+ IgnoreUnused = false;
+ continue;
+ }
+
// Unfortunately, we have to parse some forwarding options (-Xassembler,
// -Xlinker, -Xpreprocessor) because we either integrate their functionality
// (assembler and preprocessor), or bypass a previous driver ('collect2').
@@ -397,9 +480,13 @@ DerivedArgList *Driver::TranslateInputArgs(const InputArgList &Args) const {
DAL->append(A);
}
+ // DXC mode quits before assembly if an output object file isn't specified.
+ if (IsDXCMode() && !Args.hasArg(options::OPT_dxc_Fo))
+ DAL->AddFlagArg(nullptr, Opts.getOption(options::OPT_S));
+
// Enforce -static if -miamcu is present.
if (Args.hasFlag(options::OPT_miamcu, options::OPT_mno_iamcu, false))
- DAL->AddFlagArg(0, Opts.getOption(options::OPT_static));
+ DAL->AddFlagArg(nullptr, Opts.getOption(options::OPT_static));
// Add a default value of -mlinker-version=, if one was given and the user
// didn't specify one.
@@ -432,48 +519,45 @@ static llvm::Triple computeTargetTriple(const Driver &D,
// GNU/Hurd's triples should have been -hurd-gnu*, but were historically made
// -gnu* only, and we can not change this, so we have to detect that case as
// being the Hurd OS.
- if (TargetTriple.find("-unknown-gnu") != StringRef::npos ||
- TargetTriple.find("-pc-gnu") != StringRef::npos)
+ if (TargetTriple.contains("-unknown-gnu") || TargetTriple.contains("-pc-gnu"))
Target.setOSName("hurd");
// Handle Apple-specific options available here.
if (Target.isOSBinFormatMachO()) {
// If an explicit Darwin arch name is given, that trumps all.
if (!DarwinArchName.empty()) {
- tools::darwin::setTripleTypeForMachOArchName(Target, DarwinArchName);
+ tools::darwin::setTripleTypeForMachOArchName(Target, DarwinArchName,
+ Args);
return Target;
}
// Handle the Darwin '-arch' flag.
if (Arg *A = Args.getLastArg(options::OPT_arch)) {
StringRef ArchName = A->getValue();
- tools::darwin::setTripleTypeForMachOArchName(Target, ArchName);
+ tools::darwin::setTripleTypeForMachOArchName(Target, ArchName, Args);
}
}
// Handle pseudo-target flags '-mlittle-endian'/'-EL' and
// '-mbig-endian'/'-EB'.
- if (Arg *A = Args.getLastArg(options::OPT_mlittle_endian,
- options::OPT_mbig_endian)) {
- if (A->getOption().matches(options::OPT_mlittle_endian)) {
- llvm::Triple LE = Target.getLittleEndianArchVariant();
- if (LE.getArch() != llvm::Triple::UnknownArch)
- Target = std::move(LE);
- } else {
- llvm::Triple BE = Target.getBigEndianArchVariant();
- if (BE.getArch() != llvm::Triple::UnknownArch)
- Target = std::move(BE);
+ if (Arg *A = Args.getLastArgNoClaim(options::OPT_mlittle_endian,
+ options::OPT_mbig_endian)) {
+ llvm::Triple T = A->getOption().matches(options::OPT_mlittle_endian)
+ ? Target.getLittleEndianArchVariant()
+ : Target.getBigEndianArchVariant();
+ if (T.getArch() != llvm::Triple::UnknownArch) {
+ Target = std::move(T);
+ Args.claimAllArgs(options::OPT_mlittle_endian, options::OPT_mbig_endian);
}
}
// Skip further flag support on OSes which don't support '-m32' or '-m64'.
- if (Target.getArch() == llvm::Triple::tce ||
- Target.getOS() == llvm::Triple::Minix)
+ if (Target.getArch() == llvm::Triple::tce)
return Target;
// On AIX, the env OBJECT_MODE may affect the resulting arch variant.
if (Target.isOSAIX()) {
- if (Optional<std::string> ObjectModeValue =
+ if (std::optional<std::string> ObjectModeValue =
llvm::sys::Process::GetEnv("OBJECT_MODE")) {
StringRef ObjectMode = *ObjectModeValue;
llvm::Triple::ArchType AT = llvm::Triple::UnknownArch;
@@ -491,13 +575,21 @@ static llvm::Triple computeTargetTriple(const Driver &D,
}
}
+ // The `-maix[32|64]` flags are only valid for AIX targets.
+ if (Arg *A = Args.getLastArgNoClaim(options::OPT_maix32, options::OPT_maix64);
+ A && !Target.isOSAIX())
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << A->getAsString(Args) << Target.str();
+
// Handle pseudo-target flags '-m64', '-mx32', '-m32' and '-m16'.
Arg *A = Args.getLastArg(options::OPT_m64, options::OPT_mx32,
- options::OPT_m32, options::OPT_m16);
+ options::OPT_m32, options::OPT_m16,
+ options::OPT_maix32, options::OPT_maix64);
if (A) {
llvm::Triple::ArchType AT = llvm::Triple::UnknownArch;
- if (A->getOption().matches(options::OPT_m64)) {
+ if (A->getOption().matches(options::OPT_m64) ||
+ A->getOption().matches(options::OPT_maix64)) {
AT = Target.get64BitArchVariant().getArch();
if (Target.getEnvironment() == llvm::Triple::GNUX32)
Target.setEnvironment(llvm::Triple::GNU);
@@ -510,7 +602,8 @@ static llvm::Triple computeTargetTriple(const Driver &D,
Target.setEnvironment(llvm::Triple::MuslX32);
else
Target.setEnvironment(llvm::Triple::GNUX32);
- } else if (A->getOption().matches(options::OPT_m32)) {
+ } else if (A->getOption().matches(options::OPT_m32) ||
+ A->getOption().matches(options::OPT_maix32)) {
AT = Target.get32BitArchVariant().getArch();
if (Target.getEnvironment() == llvm::Triple::GNUX32)
Target.setEnvironment(llvm::Triple::GNU);
@@ -522,8 +615,11 @@ static llvm::Triple computeTargetTriple(const Driver &D,
Target.setEnvironment(llvm::Triple::CODE16);
}
- if (AT != llvm::Triple::UnknownArch && AT != Target.getArch())
+ if (AT != llvm::Triple::UnknownArch && AT != Target.getArch()) {
Target.setArch(AT);
+ if (Target.isWindowsGNUEnvironment())
+ toolchains::MinGW::fixTripleArch(D, Target, Args);
+ }
}
// Handle -miamcu flag.
@@ -547,36 +643,44 @@ static llvm::Triple computeTargetTriple(const Driver &D,
// If target is MIPS adjust the target triple
// accordingly to provided ABI name.
- A = Args.getLastArg(options::OPT_mabi_EQ);
- if (A && Target.isMIPS()) {
- StringRef ABIName = A->getValue();
- if (ABIName == "32") {
- Target = Target.get32BitArchVariant();
- if (Target.getEnvironment() == llvm::Triple::GNUABI64 ||
- Target.getEnvironment() == llvm::Triple::GNUABIN32)
- Target.setEnvironment(llvm::Triple::GNU);
- } else if (ABIName == "n32") {
- Target = Target.get64BitArchVariant();
- if (Target.getEnvironment() == llvm::Triple::GNU ||
- Target.getEnvironment() == llvm::Triple::GNUABI64)
- Target.setEnvironment(llvm::Triple::GNUABIN32);
- } else if (ABIName == "64") {
- Target = Target.get64BitArchVariant();
- if (Target.getEnvironment() == llvm::Triple::GNU ||
- Target.getEnvironment() == llvm::Triple::GNUABIN32)
- Target.setEnvironment(llvm::Triple::GNUABI64);
+ if (Target.isMIPS()) {
+ if ((A = Args.getLastArg(options::OPT_mabi_EQ))) {
+ StringRef ABIName = A->getValue();
+ if (ABIName == "32") {
+ Target = Target.get32BitArchVariant();
+ if (Target.getEnvironment() == llvm::Triple::GNUABI64 ||
+ Target.getEnvironment() == llvm::Triple::GNUABIN32)
+ Target.setEnvironment(llvm::Triple::GNU);
+ } else if (ABIName == "n32") {
+ Target = Target.get64BitArchVariant();
+ if (Target.getEnvironment() == llvm::Triple::GNU ||
+ Target.getEnvironment() == llvm::Triple::GNUABI64)
+ Target.setEnvironment(llvm::Triple::GNUABIN32);
+ } else if (ABIName == "64") {
+ Target = Target.get64BitArchVariant();
+ if (Target.getEnvironment() == llvm::Triple::GNU ||
+ Target.getEnvironment() == llvm::Triple::GNUABIN32)
+ Target.setEnvironment(llvm::Triple::GNUABI64);
+ }
}
}
// If target is RISC-V adjust the target triple according to
// provided architecture name
- A = Args.getLastArg(options::OPT_march_EQ);
- if (A && Target.isRISCV()) {
- StringRef ArchName = A->getValue();
- if (ArchName.startswith_insensitive("rv32"))
- Target.setArch(llvm::Triple::riscv32);
- else if (ArchName.startswith_insensitive("rv64"))
- Target.setArch(llvm::Triple::riscv64);
+ if (Target.isRISCV()) {
+ if (Args.hasArg(options::OPT_march_EQ) ||
+ Args.hasArg(options::OPT_mcpu_EQ)) {
+ StringRef ArchName = tools::riscv::getRISCVArch(Args, Target);
+ auto ISAInfo = llvm::RISCVISAInfo::parseArchString(
+ ArchName, /*EnableExperimentalExtensions=*/true);
+ if (!llvm::errorToBool(ISAInfo.takeError())) {
+ unsigned XLen = (*ISAInfo)->getXLen();
+ if (XLen == 32)
+ Target.setArch(llvm::Triple::riscv32);
+ else if (XLen == 64)
+ Target.setArch(llvm::Triple::riscv64);
+ }
+ }
}
return Target;
@@ -585,53 +689,45 @@ static llvm::Triple computeTargetTriple(const Driver &D,
// Parse the LTO options and record the type of LTO compilation
// based on which -f(no-)?lto(=.*)? or -f(no-)?offload-lto(=.*)?
// option occurs last.
-static llvm::Optional<driver::LTOKind>
-parseLTOMode(Driver &D, const llvm::opt::ArgList &Args, OptSpecifier OptPos,
- OptSpecifier OptNeg, OptSpecifier OptEq, bool IsOffload) {
- driver::LTOKind LTOMode = LTOK_None;
- // Non-offload LTO allows -flto=auto and -flto=jobserver. Offload LTO does
- // not support those options.
- if (!Args.hasFlag(OptPos, OptEq, OptNeg, false) &&
- (IsOffload ||
- (!Args.hasFlag(options::OPT_flto_EQ_auto, options::OPT_fno_lto, false) &&
- !Args.hasFlag(options::OPT_flto_EQ_jobserver, options::OPT_fno_lto,
- false))))
- return None;
-
- StringRef LTOName("full");
+static driver::LTOKind parseLTOMode(Driver &D, const llvm::opt::ArgList &Args,
+ OptSpecifier OptEq, OptSpecifier OptNeg) {
+ if (!Args.hasFlag(OptEq, OptNeg, false))
+ return LTOK_None;
const Arg *A = Args.getLastArg(OptEq);
- if (A)
- LTOName = A->getValue();
+ StringRef LTOName = A->getValue();
- LTOMode = llvm::StringSwitch<LTOKind>(LTOName)
- .Case("full", LTOK_Full)
- .Case("thin", LTOK_Thin)
- .Default(LTOK_Unknown);
+ driver::LTOKind LTOMode = llvm::StringSwitch<LTOKind>(LTOName)
+ .Case("full", LTOK_Full)
+ .Case("thin", LTOK_Thin)
+ .Default(LTOK_Unknown);
if (LTOMode == LTOK_Unknown) {
- assert(A);
D.Diag(diag::err_drv_unsupported_option_argument)
- << A->getOption().getName() << A->getValue();
- return None;
+ << A->getSpelling() << A->getValue();
+ return LTOK_None;
}
return LTOMode;
}
// Parse the LTO options.
void Driver::setLTOMode(const llvm::opt::ArgList &Args) {
- LTOMode = LTOK_None;
- if (auto M = parseLTOMode(*this, Args, options::OPT_flto,
- options::OPT_fno_lto, options::OPT_flto_EQ,
- /*IsOffload=*/false))
- LTOMode = M.getValue();
-
- OffloadLTOMode = LTOK_None;
- if (auto M = parseLTOMode(*this, Args, options::OPT_foffload_lto,
- options::OPT_fno_offload_lto,
- options::OPT_foffload_lto_EQ,
- /*IsOffload=*/true))
- OffloadLTOMode = M.getValue();
+ LTOMode =
+ parseLTOMode(*this, Args, options::OPT_flto_EQ, options::OPT_fno_lto);
+
+ OffloadLTOMode = parseLTOMode(*this, Args, options::OPT_foffload_lto_EQ,
+ options::OPT_fno_offload_lto);
+
+ // Try to enable `-foffload-lto=full` if `-fopenmp-target-jit` is on.
+ if (Args.hasFlag(options::OPT_fopenmp_target_jit,
+ options::OPT_fno_openmp_target_jit, false)) {
+ if (Arg *A = Args.getLastArg(options::OPT_foffload_lto_EQ,
+ options::OPT_fno_offload_lto))
+ if (OffloadLTOMode != LTOK_Full)
+ Diag(diag::err_drv_incompatible_options)
+ << A->getSpelling() << "-fopenmp-target-jit";
+ OffloadLTOMode = LTOK_Full;
+ }
}
/// Compute the desired OpenMP runtime from the flags provided.
@@ -651,7 +747,7 @@ Driver::OpenMPRuntimeKind Driver::getOpenMPRuntime(const ArgList &Args) const {
if (RT == OMPRT_Unknown) {
if (A)
Diag(diag::err_drv_unsupported_option_argument)
- << A->getOption().getName() << A->getValue();
+ << A->getSpelling() << A->getValue();
else
// FIXME: We could use a nicer diagnostic here.
Diag(diag::err_drv_unsupported_opt) << "-fopenmp";
@@ -677,7 +773,8 @@ void Driver::CreateOffloadingDeviceToolChains(Compilation &C,
[](std::pair<types::ID, const llvm::opt::Arg *> &I) {
return types::isHIP(I.first);
}) ||
- C.getInputArgs().hasArg(options::OPT_hip_link);
+ C.getInputArgs().hasArg(options::OPT_hip_link) ||
+ C.getInputArgs().hasArg(options::OPT_hipstdpar);
if (IsCuda && IsHIP) {
Diag(clang::diag::err_drv_mix_cuda_hip);
return;
@@ -685,108 +782,187 @@ void Driver::CreateOffloadingDeviceToolChains(Compilation &C,
if (IsCuda) {
const ToolChain *HostTC = C.getSingleOffloadToolChain<Action::OFK_Host>();
const llvm::Triple &HostTriple = HostTC->getTriple();
- StringRef DeviceTripleStr;
auto OFK = Action::OFK_Cuda;
- DeviceTripleStr =
- HostTriple.isArch64Bit() ? "nvptx64-nvidia-cuda" : "nvptx-nvidia-cuda";
- llvm::Triple CudaTriple(DeviceTripleStr);
+ auto CudaTriple =
+ getNVIDIAOffloadTargetTriple(*this, C.getInputArgs(), HostTriple);
+ if (!CudaTriple)
+ return;
// Use the CUDA and host triples as the key into the ToolChains map,
// because the device toolchain we create depends on both.
- auto &CudaTC = ToolChains[CudaTriple.str() + "/" + HostTriple.str()];
+ auto &CudaTC = ToolChains[CudaTriple->str() + "/" + HostTriple.str()];
if (!CudaTC) {
CudaTC = std::make_unique<toolchains::CudaToolChain>(
- *this, CudaTriple, *HostTC, C.getInputArgs(), OFK);
+ *this, *CudaTriple, *HostTC, C.getInputArgs());
+
+ // Emit a warning if the detected CUDA version is too new.
+ CudaInstallationDetector &CudaInstallation =
+ static_cast<toolchains::CudaToolChain &>(*CudaTC).CudaInstallation;
+ if (CudaInstallation.isValid())
+ CudaInstallation.WarnIfUnsupportedVersion();
}
C.addOffloadDeviceToolChain(CudaTC.get(), OFK);
} else if (IsHIP) {
+ if (auto *OMPTargetArg =
+ C.getInputArgs().getLastArg(options::OPT_fopenmp_targets_EQ)) {
+ Diag(clang::diag::err_drv_unsupported_opt_for_language_mode)
+ << OMPTargetArg->getSpelling() << "HIP";
+ return;
+ }
const ToolChain *HostTC = C.getSingleOffloadToolChain<Action::OFK_Host>();
- const llvm::Triple &HostTriple = HostTC->getTriple();
auto OFK = Action::OFK_HIP;
- llvm::Triple HIPTriple = getHIPOffloadTargetTriple();
- // Use the HIP and host triples as the key into the ToolChains map,
- // because the device toolchain we create depends on both.
- auto &HIPTC = ToolChains[HIPTriple.str() + "/" + HostTriple.str()];
- if (!HIPTC) {
- HIPTC = std::make_unique<toolchains::HIPToolChain>(
- *this, HIPTriple, *HostTC, C.getInputArgs());
- }
- C.addOffloadDeviceToolChain(HIPTC.get(), OFK);
+ auto HIPTriple = getHIPOffloadTargetTriple(*this, C.getInputArgs());
+ if (!HIPTriple)
+ return;
+ auto *HIPTC = &getOffloadingDeviceToolChain(C.getInputArgs(), *HIPTriple,
+ *HostTC, OFK);
+ assert(HIPTC && "Could not create offloading device tool chain.");
+ C.addOffloadDeviceToolChain(HIPTC, OFK);
}
//
// OpenMP
//
// We need to generate an OpenMP toolchain if the user specified targets with
- // the -fopenmp-targets option.
- if (Arg *OpenMPTargets =
- C.getInputArgs().getLastArg(options::OPT_fopenmp_targets_EQ)) {
- if (OpenMPTargets->getNumValues()) {
- // We expect that -fopenmp-targets is always used in conjunction with the
- // option -fopenmp specifying a valid runtime with offloading support,
- // i.e. libomp or libiomp.
- bool HasValidOpenMPRuntime = C.getInputArgs().hasFlag(
- options::OPT_fopenmp, options::OPT_fopenmp_EQ,
- options::OPT_fno_openmp, false);
- if (HasValidOpenMPRuntime) {
- OpenMPRuntimeKind OpenMPKind = getOpenMPRuntime(C.getInputArgs());
- HasValidOpenMPRuntime =
- OpenMPKind == OMPRT_OMP || OpenMPKind == OMPRT_IOMP5;
+ // the -fopenmp-targets option or used --offload-arch with OpenMP enabled.
+ bool IsOpenMPOffloading =
+ C.getInputArgs().hasFlag(options::OPT_fopenmp, options::OPT_fopenmp_EQ,
+ options::OPT_fno_openmp, false) &&
+ (C.getInputArgs().hasArg(options::OPT_fopenmp_targets_EQ) ||
+ C.getInputArgs().hasArg(options::OPT_offload_arch_EQ));
+ if (IsOpenMPOffloading) {
+ // We expect that -fopenmp-targets is always used in conjunction with the
+ // option -fopenmp specifying a valid runtime with offloading support, i.e.
+ // libomp or libiomp.
+ OpenMPRuntimeKind RuntimeKind = getOpenMPRuntime(C.getInputArgs());
+ if (RuntimeKind != OMPRT_OMP && RuntimeKind != OMPRT_IOMP5) {
+ Diag(clang::diag::err_drv_expecting_fopenmp_with_fopenmp_targets);
+ return;
+ }
+
+ llvm::StringMap<llvm::DenseSet<StringRef>> DerivedArchs;
+ llvm::StringMap<StringRef> FoundNormalizedTriples;
+ std::multiset<StringRef> OpenMPTriples;
+
+ // If the user specified -fopenmp-targets= we create a toolchain for each
+ // valid triple. Otherwise, if only --offload-arch= was specified we instead
+ // attempt to derive the appropriate toolchains from the arguments.
+ if (Arg *OpenMPTargets =
+ C.getInputArgs().getLastArg(options::OPT_fopenmp_targets_EQ)) {
+ if (OpenMPTargets && !OpenMPTargets->getNumValues()) {
+ Diag(clang::diag::warn_drv_empty_joined_argument)
+ << OpenMPTargets->getAsString(C.getInputArgs());
+ return;
+ }
+ for (StringRef T : OpenMPTargets->getValues())
+ OpenMPTriples.insert(T);
+ } else if (C.getInputArgs().hasArg(options::OPT_offload_arch_EQ) &&
+ !IsHIP && !IsCuda) {
+ const ToolChain *HostTC = C.getSingleOffloadToolChain<Action::OFK_Host>();
+ auto AMDTriple = getHIPOffloadTargetTriple(*this, C.getInputArgs());
+ auto NVPTXTriple = getNVIDIAOffloadTargetTriple(*this, C.getInputArgs(),
+ HostTC->getTriple());
+
+ // Attempt to deduce the offloading triple from the set of architectures.
+ // We can only correctly deduce NVPTX / AMDGPU triples currently. We need
+ // to temporarily create these toolchains so that we can access tools for
+ // inferring architectures.
+ llvm::DenseSet<StringRef> Archs;
+ if (NVPTXTriple) {
+ auto TempTC = std::make_unique<toolchains::CudaToolChain>(
+ *this, *NVPTXTriple, *HostTC, C.getInputArgs());
+ for (StringRef Arch : getOffloadArchs(
+ C, C.getArgs(), Action::OFK_OpenMP, &*TempTC, true))
+ Archs.insert(Arch);
+ }
+ if (AMDTriple) {
+ auto TempTC = std::make_unique<toolchains::AMDGPUOpenMPToolChain>(
+ *this, *AMDTriple, *HostTC, C.getInputArgs());
+ for (StringRef Arch : getOffloadArchs(
+ C, C.getArgs(), Action::OFK_OpenMP, &*TempTC, true))
+ Archs.insert(Arch);
+ }
+ if (!AMDTriple && !NVPTXTriple) {
+ for (StringRef Arch :
+ getOffloadArchs(C, C.getArgs(), Action::OFK_OpenMP, nullptr, true))
+ Archs.insert(Arch);
}
- if (HasValidOpenMPRuntime) {
- llvm::StringMap<const char *> FoundNormalizedTriples;
- for (const char *Val : OpenMPTargets->getValues()) {
- llvm::Triple TT(Val);
- std::string NormalizedName = TT.normalize();
-
- // Make sure we don't have a duplicate triple.
- auto Duplicate = FoundNormalizedTriples.find(NormalizedName);
- if (Duplicate != FoundNormalizedTriples.end()) {
- Diag(clang::diag::warn_drv_omp_offload_target_duplicate)
- << Val << Duplicate->second;
- continue;
- }
+ for (StringRef Arch : Archs) {
+ if (NVPTXTriple && IsNVIDIAGpuArch(StringToCudaArch(
+ getProcessorFromTargetID(*NVPTXTriple, Arch)))) {
+ DerivedArchs[NVPTXTriple->getTriple()].insert(Arch);
+ } else if (AMDTriple &&
+ IsAMDGpuArch(StringToCudaArch(
+ getProcessorFromTargetID(*AMDTriple, Arch)))) {
+ DerivedArchs[AMDTriple->getTriple()].insert(Arch);
+ } else {
+ Diag(clang::diag::err_drv_failed_to_deduce_target_from_arch) << Arch;
+ return;
+ }
+ }
- // Store the current triple so that we can check for duplicates in the
- // following iterations.
- FoundNormalizedTriples[NormalizedName] = Val;
-
- // If the specified target is invalid, emit a diagnostic.
- if (TT.getArch() == llvm::Triple::UnknownArch)
- Diag(clang::diag::err_drv_invalid_omp_target) << Val;
- else {
- const ToolChain *TC;
- // Device toolchains have to be selected differently. They pair host
- // and device in their implementation.
- if (TT.isNVPTX() || TT.isAMDGCN()) {
- const ToolChain *HostTC =
- C.getSingleOffloadToolChain<Action::OFK_Host>();
- assert(HostTC && "Host toolchain should be always defined.");
- auto &DeviceTC =
- ToolChains[TT.str() + "/" + HostTC->getTriple().normalize()];
- if (!DeviceTC) {
- if (TT.isNVPTX())
- DeviceTC = std::make_unique<toolchains::CudaToolChain>(
- *this, TT, *HostTC, C.getInputArgs(), Action::OFK_OpenMP);
- else if (TT.isAMDGCN())
- DeviceTC =
- std::make_unique<toolchains::AMDGPUOpenMPToolChain>(
- *this, TT, *HostTC, C.getInputArgs());
- else
- assert(DeviceTC && "Device toolchain not defined.");
- }
-
- TC = DeviceTC.get();
- } else
- TC = &getToolChain(C.getInputArgs(), TT);
- C.addOffloadDeviceToolChain(TC, Action::OFK_OpenMP);
+ // If the set is empty then we failed to find a native architecture.
+ if (Archs.empty()) {
+ Diag(clang::diag::err_drv_failed_to_deduce_target_from_arch)
+ << "native";
+ return;
+ }
+
+ for (const auto &TripleAndArchs : DerivedArchs)
+ OpenMPTriples.insert(TripleAndArchs.first());
+ }
+
+ for (StringRef Val : OpenMPTriples) {
+ llvm::Triple TT(ToolChain::getOpenMPTriple(Val));
+ std::string NormalizedName = TT.normalize();
+
+ // Make sure we don't have a duplicate triple.
+ auto Duplicate = FoundNormalizedTriples.find(NormalizedName);
+ if (Duplicate != FoundNormalizedTriples.end()) {
+ Diag(clang::diag::warn_drv_omp_offload_target_duplicate)
+ << Val << Duplicate->second;
+ continue;
+ }
+
+ // Store the current triple so that we can check for duplicates in the
+ // following iterations.
+ FoundNormalizedTriples[NormalizedName] = Val;
+
+ // If the specified target is invalid, emit a diagnostic.
+ if (TT.getArch() == llvm::Triple::UnknownArch)
+ Diag(clang::diag::err_drv_invalid_omp_target) << Val;
+ else {
+ const ToolChain *TC;
+ // Device toolchains have to be selected differently. They pair host
+ // and device in their implementation.
+ if (TT.isNVPTX() || TT.isAMDGCN()) {
+ const ToolChain *HostTC =
+ C.getSingleOffloadToolChain<Action::OFK_Host>();
+ assert(HostTC && "Host toolchain should be always defined.");
+ auto &DeviceTC =
+ ToolChains[TT.str() + "/" + HostTC->getTriple().normalize()];
+ if (!DeviceTC) {
+ if (TT.isNVPTX())
+ DeviceTC = std::make_unique<toolchains::CudaToolChain>(
+ *this, TT, *HostTC, C.getInputArgs());
+ else if (TT.isAMDGCN())
+ DeviceTC = std::make_unique<toolchains::AMDGPUOpenMPToolChain>(
+ *this, TT, *HostTC, C.getInputArgs());
+ else
+ assert(DeviceTC && "Device toolchain not defined.");
}
- }
- } else
- Diag(clang::diag::err_drv_expecting_fopenmp_with_fopenmp_targets);
- } else
- Diag(clang::diag::warn_drv_empty_joined_argument)
- << OpenMPTargets->getAsString(C.getInputArgs());
+
+ TC = DeviceTC.get();
+ } else
+ TC = &getToolChain(C.getInputArgs(), TT);
+ C.addOffloadDeviceToolChain(TC, Action::OFK_OpenMP);
+ if (DerivedArchs.contains(TT.getTriple()))
+ KnownArchs[TC] = DerivedArchs[TT.getTriple()];
+ }
+ }
+ } else if (C.getInputArgs().hasArg(options::OPT_fopenmp_targets_EQ)) {
+ Diag(clang::diag::err_drv_expecting_fopenmp_with_fopenmp_targets);
+ return;
}
//
@@ -794,69 +970,78 @@ void Driver::CreateOffloadingDeviceToolChains(Compilation &C,
//
}
-/// Looks the given directories for the specified file.
-///
-/// \param[out] FilePath File path, if the file was found.
-/// \param[in] Dirs Directories used for the search.
-/// \param[in] FileName Name of the file to search for.
-/// \return True if file was found.
-///
-/// Looks for file specified by FileName sequentially in directories specified
-/// by Dirs.
-///
-static bool searchForFile(SmallVectorImpl<char> &FilePath,
- ArrayRef<StringRef> Dirs, StringRef FileName) {
- SmallString<128> WPath;
- for (const StringRef &Dir : Dirs) {
- if (Dir.empty())
- continue;
- WPath.clear();
- llvm::sys::path::append(WPath, Dir, FileName);
- llvm::sys::path::native(WPath);
- if (llvm::sys::fs::is_regular_file(WPath)) {
- FilePath = std::move(WPath);
- return true;
- }
- }
- return false;
+static void appendOneArg(InputArgList &Args, const Arg *Opt,
+ const Arg *BaseArg) {
+ // The args for config files or /clang: flags belong to different InputArgList
+ // objects than Args. This copies an Arg from one of those other InputArgLists
+ // to the ownership of Args.
+ unsigned Index = Args.MakeIndex(Opt->getSpelling());
+ Arg *Copy = new llvm::opt::Arg(Opt->getOption(), Args.getArgString(Index),
+ Index, BaseArg);
+ Copy->getValues() = Opt->getValues();
+ if (Opt->isClaimed())
+ Copy->claim();
+ Copy->setOwnsValues(Opt->getOwnsValues());
+ Opt->setOwnsValues(false);
+ Args.append(Copy);
}
-bool Driver::readConfigFile(StringRef FileName) {
+bool Driver::readConfigFile(StringRef FileName,
+ llvm::cl::ExpansionContext &ExpCtx) {
+ // Try opening the given file.
+ auto Status = getVFS().status(FileName);
+ if (!Status) {
+ Diag(diag::err_drv_cannot_open_config_file)
+ << FileName << Status.getError().message();
+ return true;
+ }
+ if (Status->getType() != llvm::sys::fs::file_type::regular_file) {
+ Diag(diag::err_drv_cannot_open_config_file)
+ << FileName << "not a regular file";
+ return true;
+ }
+
// Try reading the given file.
SmallVector<const char *, 32> NewCfgArgs;
- if (!llvm::cl::readConfigFile(FileName, Saver, NewCfgArgs)) {
- Diag(diag::err_drv_cannot_read_config_file) << FileName;
+ if (llvm::Error Err = ExpCtx.readConfigFile(FileName, NewCfgArgs)) {
+ Diag(diag::err_drv_cannot_read_config_file)
+ << FileName << toString(std::move(Err));
return true;
}
// Read options from config file.
llvm::SmallString<128> CfgFileName(FileName);
llvm::sys::path::native(CfgFileName);
- ConfigFile = std::string(CfgFileName);
bool ContainErrors;
- CfgOptions = std::make_unique<InputArgList>(
- ParseArgStrings(NewCfgArgs, IsCLMode(), ContainErrors));
- if (ContainErrors) {
- CfgOptions.reset();
+ std::unique_ptr<InputArgList> NewOptions = std::make_unique<InputArgList>(
+ ParseArgStrings(NewCfgArgs, /*UseDriverMode=*/true, ContainErrors));
+ if (ContainErrors)
return true;
- }
-
- if (CfgOptions->hasArg(options::OPT_config)) {
- CfgOptions.reset();
- Diag(diag::err_drv_nested_config_file);
- return true;
- }
// Claim all arguments that come from a configuration file so that the driver
// does not warn on any that is unused.
- for (Arg *A : *CfgOptions)
+ for (Arg *A : *NewOptions)
A->claim();
+
+ if (!CfgOptions)
+ CfgOptions = std::move(NewOptions);
+ else {
+ // If this is a subsequent config file, append options to the previous one.
+ for (auto *Opt : *NewOptions) {
+ const Arg *BaseArg = &Opt->getBaseArg();
+ if (BaseArg == Opt)
+ BaseArg = nullptr;
+ appendOneArg(*CfgOptions, Opt, BaseArg);
+ }
+ }
+ ConfigFiles.push_back(std::string(CfgFileName));
return false;
}
-bool Driver::loadConfigFile() {
- std::string CfgFileName;
- bool FileSpecifiedExplicitly = false;
+bool Driver::loadConfigFiles() {
+ llvm::cl::ExpansionContext ExpCtx(Saver.getAllocator(),
+ llvm::cl::tokenizeConfigFile);
+ ExpCtx.setVFS(&getVFS());
// Process options that change search path for config files.
if (CLOptions) {
@@ -864,144 +1049,141 @@ bool Driver::loadConfigFile() {
SmallString<128> CfgDir;
CfgDir.append(
CLOptions->getLastArgValue(options::OPT_config_system_dir_EQ));
- if (!CfgDir.empty()) {
- if (llvm::sys::fs::make_absolute(CfgDir).value() != 0)
- SystemConfigDir.clear();
- else
- SystemConfigDir = std::string(CfgDir.begin(), CfgDir.end());
- }
+ if (CfgDir.empty() || getVFS().makeAbsolute(CfgDir))
+ SystemConfigDir.clear();
+ else
+ SystemConfigDir = static_cast<std::string>(CfgDir);
}
if (CLOptions->hasArg(options::OPT_config_user_dir_EQ)) {
SmallString<128> CfgDir;
- CfgDir.append(
- CLOptions->getLastArgValue(options::OPT_config_user_dir_EQ));
- if (!CfgDir.empty()) {
- if (llvm::sys::fs::make_absolute(CfgDir).value() != 0)
- UserConfigDir.clear();
- else
- UserConfigDir = std::string(CfgDir.begin(), CfgDir.end());
- }
+ llvm::sys::fs::expand_tilde(
+ CLOptions->getLastArgValue(options::OPT_config_user_dir_EQ), CfgDir);
+ if (CfgDir.empty() || getVFS().makeAbsolute(CfgDir))
+ UserConfigDir.clear();
+ else
+ UserConfigDir = static_cast<std::string>(CfgDir);
}
}
- // First try to find config file specified in command line.
- if (CLOptions) {
- std::vector<std::string> ConfigFiles =
- CLOptions->getAllArgValues(options::OPT_config);
- if (ConfigFiles.size() > 1) {
- if (!std::all_of(ConfigFiles.begin(), ConfigFiles.end(),
- [ConfigFiles](const std::string &s) {
- return s == ConfigFiles[0];
- })) {
- Diag(diag::err_drv_duplicate_config);
- return true;
- }
- }
+ // Prepare list of directories where config file is searched for.
+ StringRef CfgFileSearchDirs[] = {UserConfigDir, SystemConfigDir, Dir};
+ ExpCtx.setSearchDirs(CfgFileSearchDirs);
- if (!ConfigFiles.empty()) {
- CfgFileName = ConfigFiles.front();
- assert(!CfgFileName.empty());
+ // First try to load configuration from the default files, return on error.
+ if (loadDefaultConfigFiles(ExpCtx))
+ return true;
+ // Then load configuration files specified explicitly.
+ SmallString<128> CfgFilePath;
+ if (CLOptions) {
+ for (auto CfgFileName : CLOptions->getAllArgValues(options::OPT_config)) {
// If argument contains directory separator, treat it as a path to
// configuration file.
if (llvm::sys::path::has_parent_path(CfgFileName)) {
- SmallString<128> CfgFilePath;
- if (llvm::sys::path::is_relative(CfgFileName))
- llvm::sys::fs::current_path(CfgFilePath);
- llvm::sys::path::append(CfgFilePath, CfgFileName);
- if (!llvm::sys::fs::is_regular_file(CfgFilePath)) {
- Diag(diag::err_drv_config_file_not_exist) << CfgFilePath;
- return true;
+ CfgFilePath.assign(CfgFileName);
+ if (llvm::sys::path::is_relative(CfgFilePath)) {
+ if (getVFS().makeAbsolute(CfgFilePath)) {
+ Diag(diag::err_drv_cannot_open_config_file)
+ << CfgFilePath << "cannot get absolute path";
+ return true;
+ }
}
- return readConfigFile(CfgFilePath);
+ } else if (!ExpCtx.findConfigFile(CfgFileName, CfgFilePath)) {
+ // Report an error that the config file could not be found.
+ Diag(diag::err_drv_config_file_not_found) << CfgFileName;
+ for (const StringRef &SearchDir : CfgFileSearchDirs)
+ if (!SearchDir.empty())
+ Diag(diag::note_drv_config_file_searched_in) << SearchDir;
+ return true;
}
- FileSpecifiedExplicitly = true;
+ // Try to read the config file, return on error.
+ if (readConfigFile(CfgFilePath, ExpCtx))
+ return true;
}
}
- // If config file is not specified explicitly, try to deduce configuration
- // from executable name. For instance, an executable 'armv7l-clang' will
- // search for config file 'armv7l-clang.cfg'.
- if (CfgFileName.empty() && !ClangNameParts.TargetPrefix.empty())
- CfgFileName = ClangNameParts.TargetPrefix + '-' + ClangNameParts.ModeSuffix;
+ // No error occurred.
+ return false;
+}
- if (CfgFileName.empty())
+bool Driver::loadDefaultConfigFiles(llvm::cl::ExpansionContext &ExpCtx) {
+ // Disable default config if CLANG_NO_DEFAULT_CONFIG is set to a non-empty
+ // value.
+ if (const char *NoConfigEnv = ::getenv("CLANG_NO_DEFAULT_CONFIG")) {
+ if (*NoConfigEnv)
+ return false;
+ }
+ if (CLOptions && CLOptions->hasArg(options::OPT_no_default_config))
return false;
- // Determine architecture part of the file name, if it is present.
- StringRef CfgFileArch = CfgFileName;
- size_t ArchPrefixLen = CfgFileArch.find('-');
- if (ArchPrefixLen == StringRef::npos)
- ArchPrefixLen = CfgFileArch.size();
- llvm::Triple CfgTriple;
- CfgFileArch = CfgFileArch.take_front(ArchPrefixLen);
- CfgTriple = llvm::Triple(llvm::Triple::normalize(CfgFileArch));
- if (CfgTriple.getArch() == llvm::Triple::ArchType::UnknownArch)
- ArchPrefixLen = 0;
-
- if (!StringRef(CfgFileName).endswith(".cfg"))
- CfgFileName += ".cfg";
-
- // If config file starts with architecture name and command line options
- // redefine architecture (with options like -m32 -LE etc), try finding new
- // config file with that architecture.
- SmallString<128> FixedConfigFile;
- size_t FixedArchPrefixLen = 0;
- if (ArchPrefixLen) {
- // Get architecture name from config file name like 'i386.cfg' or
- // 'armv7l-clang.cfg'.
- // Check if command line options changes effective triple.
- llvm::Triple EffectiveTriple = computeTargetTriple(*this,
- CfgTriple.getTriple(), *CLOptions);
- if (CfgTriple.getArch() != EffectiveTriple.getArch()) {
- FixedConfigFile = EffectiveTriple.getArchName();
- FixedArchPrefixLen = FixedConfigFile.size();
- // Append the rest of original file name so that file name transforms
- // like: i386-clang.cfg -> x86_64-clang.cfg.
- if (ArchPrefixLen < CfgFileName.size())
- FixedConfigFile += CfgFileName.substr(ArchPrefixLen);
- }
+ std::string RealMode = getExecutableForDriverMode(Mode);
+ std::string Triple;
+
+ // If name prefix is present, no --target= override was passed via CLOptions
+ // and the name prefix is not a valid triple, force it for backwards
+ // compatibility.
+ if (!ClangNameParts.TargetPrefix.empty() &&
+ computeTargetTriple(*this, "/invalid/", *CLOptions).str() ==
+ "/invalid/") {
+ llvm::Triple PrefixTriple{ClangNameParts.TargetPrefix};
+ if (PrefixTriple.getArch() == llvm::Triple::UnknownArch ||
+ PrefixTriple.isOSUnknown())
+ Triple = PrefixTriple.str();
+ }
+
+ // Otherwise, use the real triple as used by the driver.
+ if (Triple.empty()) {
+ llvm::Triple RealTriple =
+ computeTargetTriple(*this, TargetTriple, *CLOptions);
+ Triple = RealTriple.str();
+ assert(!Triple.empty());
+ }
+
+ // Search for config files in the following order:
+ // 1. <triple>-<mode>.cfg using real driver mode
+ // (e.g. i386-pc-linux-gnu-clang++.cfg).
+ // 2. <triple>-<mode>.cfg using executable suffix
+ // (e.g. i386-pc-linux-gnu-clang-g++.cfg for *clang-g++).
+ // 3. <triple>.cfg + <mode>.cfg using real driver mode
+ // (e.g. i386-pc-linux-gnu.cfg + clang++.cfg).
+ // 4. <triple>.cfg + <mode>.cfg using executable suffix
+ // (e.g. i386-pc-linux-gnu.cfg + clang-g++.cfg for *clang-g++).
+
+ // Try loading <triple>-<mode>.cfg, and return if we find a match.
+ SmallString<128> CfgFilePath;
+ std::string CfgFileName = Triple + '-' + RealMode + ".cfg";
+ if (ExpCtx.findConfigFile(CfgFileName, CfgFilePath))
+ return readConfigFile(CfgFilePath, ExpCtx);
+
+ bool TryModeSuffix = !ClangNameParts.ModeSuffix.empty() &&
+ ClangNameParts.ModeSuffix != RealMode;
+ if (TryModeSuffix) {
+ CfgFileName = Triple + '-' + ClangNameParts.ModeSuffix + ".cfg";
+ if (ExpCtx.findConfigFile(CfgFileName, CfgFilePath))
+ return readConfigFile(CfgFilePath, ExpCtx);
+ }
+
+ // Try loading <mode>.cfg, and return if loading failed. If a matching file
+ // was not found, still proceed on to try <triple>.cfg.
+ CfgFileName = RealMode + ".cfg";
+ if (ExpCtx.findConfigFile(CfgFileName, CfgFilePath)) {
+ if (readConfigFile(CfgFilePath, ExpCtx))
+ return true;
+ } else if (TryModeSuffix) {
+ CfgFileName = ClangNameParts.ModeSuffix + ".cfg";
+ if (ExpCtx.findConfigFile(CfgFileName, CfgFilePath) &&
+ readConfigFile(CfgFilePath, ExpCtx))
+ return true;
}
- // Prepare list of directories where config file is searched for.
- StringRef CfgFileSearchDirs[] = {UserConfigDir, SystemConfigDir, Dir};
-
- // Try to find config file. First try file with corrected architecture.
- llvm::SmallString<128> CfgFilePath;
- if (!FixedConfigFile.empty()) {
- if (searchForFile(CfgFilePath, CfgFileSearchDirs, FixedConfigFile))
- return readConfigFile(CfgFilePath);
- // If 'x86_64-clang.cfg' was not found, try 'x86_64.cfg'.
- FixedConfigFile.resize(FixedArchPrefixLen);
- FixedConfigFile.append(".cfg");
- if (searchForFile(CfgFilePath, CfgFileSearchDirs, FixedConfigFile))
- return readConfigFile(CfgFilePath);
- }
-
- // Then try original file name.
- if (searchForFile(CfgFilePath, CfgFileSearchDirs, CfgFileName))
- return readConfigFile(CfgFilePath);
-
- // Finally try removing driver mode part: 'x86_64-clang.cfg' -> 'x86_64.cfg'.
- if (!ClangNameParts.ModeSuffix.empty() &&
- !ClangNameParts.TargetPrefix.empty()) {
- CfgFileName.assign(ClangNameParts.TargetPrefix);
- CfgFileName.append(".cfg");
- if (searchForFile(CfgFilePath, CfgFileSearchDirs, CfgFileName))
- return readConfigFile(CfgFilePath);
- }
-
- // Report error but only if config file was specified explicitly, by option
- // --config. If it was deduced from executable name, it is not an error.
- if (FileSpecifiedExplicitly) {
- Diag(diag::err_drv_config_file_not_found) << CfgFileName;
- for (const StringRef &SearchDir : CfgFileSearchDirs)
- if (!SearchDir.empty())
- Diag(diag::note_drv_config_file_searched_in) << SearchDir;
- return true;
- }
+ // Try loading <triple>.cfg and return if we find a match.
+ CfgFileName = Triple + ".cfg";
+ if (ExpCtx.findConfigFile(CfgFileName, CfgFilePath))
+ return readConfigFile(CfgFilePath, ExpCtx);
+ // If we were unable to find a config file deduced from executable name,
+ // that is not an error.
return false;
}
@@ -1023,32 +1205,17 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
// Arguments specified in command line.
bool ContainsError;
CLOptions = std::make_unique<InputArgList>(
- ParseArgStrings(ArgList.slice(1), IsCLMode(), ContainsError));
+ ParseArgStrings(ArgList.slice(1), /*UseDriverMode=*/true, ContainsError));
// Try parsing configuration file.
if (!ContainsError)
- ContainsError = loadConfigFile();
+ ContainsError = loadConfigFiles();
bool HasConfigFile = !ContainsError && (CfgOptions.get() != nullptr);
// All arguments, from both config file and command line.
InputArgList Args = std::move(HasConfigFile ? std::move(*CfgOptions)
: std::move(*CLOptions));
- // The args for config files or /clang: flags belong to different InputArgList
- // objects than Args. This copies an Arg from one of those other InputArgLists
- // to the ownership of Args.
- auto appendOneArg = [&Args](const Arg *Opt, const Arg *BaseArg) {
- unsigned Index = Args.MakeIndex(Opt->getSpelling());
- Arg *Copy = new llvm::opt::Arg(Opt->getOption(), Args.getArgString(Index),
- Index, BaseArg);
- Copy->getValues() = Opt->getValues();
- if (Opt->isClaimed())
- Copy->claim();
- Copy->setOwnsValues(Opt->getOwnsValues());
- Opt->setOwnsValues(false);
- Args.append(Copy);
- };
-
if (HasConfigFile)
for (auto *Opt : *CLOptions) {
if (Opt->getOption().matches(options::OPT_config))
@@ -1056,7 +1223,7 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
const Arg *BaseArg = &Opt->getBaseArg();
if (BaseArg == Opt)
BaseArg = nullptr;
- appendOneArg(Opt, BaseArg);
+ appendOneArg(Args, Opt, BaseArg);
}
// In CL mode, look for any pass-through arguments
@@ -1071,11 +1238,12 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
// Parse any pass through args using default clang processing rather
// than clang-cl processing.
auto CLModePassThroughOptions = std::make_unique<InputArgList>(
- ParseArgStrings(CLModePassThroughArgList, false, ContainsError));
+ ParseArgStrings(CLModePassThroughArgList, /*UseDriverMode=*/false,
+ ContainsError));
if (!ContainsError)
for (auto *Opt : *CLModePassThroughOptions) {
- appendOneArg(Opt, nullptr);
+ appendOneArg(Args, Opt, nullptr);
}
}
}
@@ -1088,10 +1256,8 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
// FIXME: This stuff needs to go into the Compilation, not the driver.
bool CCCPrintPhases;
- // Silence driver warnings if requested
- Diags.setIgnoreAllWarnings(Args.hasArg(options::OPT_w));
-
- // -no-canonical-prefixes is used very early in main.
+ // -canonical-prefixes, -no-canonical-prefixes are used very early in main.
+ Args.ClaimAllArgs(options::OPT_canonical_prefixes);
Args.ClaimAllArgs(options::OPT_no_canonical_prefixes);
// f(no-)integated-cc1 is also used very early in main.
@@ -1111,9 +1277,6 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
CCCPrintBindings = Args.hasArg(options::OPT_ccc_print_bindings);
if (const Arg *A = Args.getLastArg(options::OPT_ccc_gcc_name))
CCCGenericGCCName = A->getValue();
- GenReproducer = Args.hasFlag(options::OPT_gen_reproducer,
- options::OPT_fno_crash_diagnostics,
- !!::getenv("FORCE_CLANG_DIAGNOSTICS_CRASH"));
// Process -fproc-stat-report options.
if (const Arg *A = Args.getLastArg(options::OPT_fproc_stat_report_EQ)) {
@@ -1132,8 +1295,45 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
T.setVendor(llvm::Triple::PC);
T.setEnvironment(llvm::Triple::MSVC);
T.setObjectFormat(llvm::Triple::COFF);
+ if (Args.hasArg(options::OPT__SLASH_arm64EC))
+ T.setArch(llvm::Triple::aarch64, llvm::Triple::AArch64SubArch_arm64ec);
TargetTriple = T.str();
+ } else if (IsDXCMode()) {
+ // Build TargetTriple from target_profile option for clang-dxc.
+ if (const Arg *A = Args.getLastArg(options::OPT_target_profile)) {
+ StringRef TargetProfile = A->getValue();
+ if (auto Triple =
+ toolchains::HLSLToolChain::parseTargetProfile(TargetProfile))
+ TargetTriple = *Triple;
+ else
+ Diag(diag::err_drv_invalid_directx_shader_module) << TargetProfile;
+
+ A->claim();
+
+ if (Args.hasArg(options::OPT_spirv)) {
+ llvm::Triple T(TargetTriple);
+ T.setArch(llvm::Triple::spirv);
+ T.setOS(llvm::Triple::Vulkan);
+
+ // Set specific Vulkan version if applicable.
+ if (const Arg *A = Args.getLastArg(options::OPT_fspv_target_env_EQ)) {
+ const llvm::StringSet<> ValidValues = {"vulkan1.2", "vulkan1.3"};
+ if (ValidValues.contains(A->getValue())) {
+ T.setOSName(A->getValue());
+ } else {
+ Diag(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << A->getValue();
+ }
+ A->claim();
+ }
+
+ TargetTriple = T.str();
+ }
+ } else {
+ Diag(diag::err_drv_dxc_missing_target_profile);
+ }
}
+
if (const Arg *A = Args.getLastArg(options::OPT_target))
TargetTriple = A->getValue();
if (const Arg *A = Args.getLastArg(options::OPT_ccc_install_dir))
@@ -1142,7 +1342,7 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
A->claim();
PrefixDirs.push_back(A->getValue(0));
}
- if (Optional<std::string> CompilerPathValue =
+ if (std::optional<std::string> CompilerPathValue =
llvm::sys::Process::GetEnv("COMPILER_PATH")) {
StringRef CompilerPath = *CompilerPathValue;
while (!CompilerPath.empty()) {
@@ -1167,6 +1367,17 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
.Default(SaveTempsCwd);
}
+ if (const Arg *A = Args.getLastArg(options::OPT_offload_host_only,
+ options::OPT_offload_device_only,
+ options::OPT_offload_host_device)) {
+ if (A->getOption().matches(options::OPT_offload_host_only))
+ Offload = OffloadHost;
+ else if (A->getOption().matches(options::OPT_offload_device_only))
+ Offload = OffloadDevice;
+ else
+ Offload = OffloadHostDevice;
+ }
+
setLTOMode(Args);
// Process -fembed-bitcode= flags.
@@ -1185,6 +1396,43 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
BitcodeEmbed = static_cast<BitcodeEmbedMode>(Model);
}
+ // Remove existing compilation database so that each job can append to it.
+ if (Arg *A = Args.getLastArg(options::OPT_MJ))
+ llvm::sys::fs::remove(A->getValue());
+
+ // Setting up the jobs for some precompile cases depends on whether we are
+ // treating them as PCH, implicit modules or C++20 ones.
+ // TODO: inferring the mode like this seems fragile (it meets the objective
+ // of not requiring anything new for operation, however).
+ const Arg *Std = Args.getLastArg(options::OPT_std_EQ);
+ ModulesModeCXX20 =
+ !Args.hasArg(options::OPT_fmodules) && Std &&
+ (Std->containsValue("c++20") || Std->containsValue("c++2a") ||
+ Std->containsValue("c++23") || Std->containsValue("c++2b") ||
+ Std->containsValue("c++26") || Std->containsValue("c++2c") ||
+ Std->containsValue("c++latest"));
+
+ // Process -fmodule-header{=} flags.
+ if (Arg *A = Args.getLastArg(options::OPT_fmodule_header_EQ,
+ options::OPT_fmodule_header)) {
+ // These flags force C++20 handling of headers.
+ ModulesModeCXX20 = true;
+ if (A->getOption().matches(options::OPT_fmodule_header))
+ CXX20HeaderType = HeaderMode_Default;
+ else {
+ StringRef ArgName = A->getValue();
+ unsigned Kind = llvm::StringSwitch<unsigned>(ArgName)
+ .Case("user", HeaderMode_User)
+ .Case("system", HeaderMode_System)
+ .Default(~0U);
+ if (Kind == ~0U) {
+ Diags.Report(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << ArgName;
+ } else
+ CXX20HeaderType = static_cast<ModuleHeaderMode>(Kind);
+ }
+ }
+
std::unique_ptr<llvm::opt::InputArgList> UArgs =
std::make_unique<InputArgList>(std::move(Args));
@@ -1195,6 +1443,55 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
const ToolChain &TC = getToolChain(
*UArgs, computeTargetTriple(*this, TargetTriple, *UArgs));
+ if (TC.getTriple().isAndroid()) {
+ llvm::Triple Triple = TC.getTriple();
+ StringRef TripleVersionName = Triple.getEnvironmentVersionString();
+
+ if (Triple.getEnvironmentVersion().empty() && TripleVersionName != "") {
+ Diags.Report(diag::err_drv_triple_version_invalid)
+ << TripleVersionName << TC.getTripleString();
+ ContainsError = true;
+ }
+ }
+
+ // Report warning when arm64EC option is overridden by specified target
+ if ((TC.getTriple().getArch() != llvm::Triple::aarch64 ||
+ TC.getTriple().getSubArch() != llvm::Triple::AArch64SubArch_arm64ec) &&
+ UArgs->hasArg(options::OPT__SLASH_arm64EC)) {
+ getDiags().Report(clang::diag::warn_target_override_arm64ec)
+ << TC.getTriple().str();
+ }
+
+ // A common user mistake is specifying a target of aarch64-none-eabi or
+ // arm-none-elf whereas the correct names are aarch64-none-elf &
+ // arm-none-eabi. Detect these cases and issue a warning.
+ if (TC.getTriple().getOS() == llvm::Triple::UnknownOS &&
+ TC.getTriple().getVendor() == llvm::Triple::UnknownVendor) {
+ switch (TC.getTriple().getArch()) {
+ case llvm::Triple::arm:
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumb:
+ case llvm::Triple::thumbeb:
+ if (TC.getTriple().getEnvironmentName() == "elf") {
+ Diag(diag::warn_target_unrecognized_env)
+ << TargetTriple
+ << (TC.getTriple().getArchName().str() + "-none-eabi");
+ }
+ break;
+ case llvm::Triple::aarch64:
+ case llvm::Triple::aarch64_be:
+ case llvm::Triple::aarch64_32:
+ if (TC.getTriple().getEnvironmentName().starts_with("eabi")) {
+ Diag(diag::warn_target_unrecognized_env)
+ << TargetTriple
+ << (TC.getTriple().getArchName().str() + "-none-elf");
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
// The compilation takes ownership of Args.
Compilation *C = new Compilation(*this, TC, UArgs.release(), TranslatedArgs,
ContainsError);
@@ -1228,8 +1525,14 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
static void printArgList(raw_ostream &OS, const llvm::opt::ArgList &Args) {
llvm::opt::ArgStringList ASL;
- for (const auto *A : Args)
+ for (const auto *A : Args) {
+ // Use user's original spelling of flags. For example, use
+ // `/source-charset:utf-8` instead of `-finput-charset=utf-8` if the user
+ // wrote the former.
+ while (A->getAlias())
+ A = A->getAlias();
A->render(Args, ASL);
+ }
for (auto I = ASL.begin(), E = ASL.end(); I != E; ++I) {
if (I != ASL.begin())
@@ -1249,7 +1552,7 @@ bool Driver::getCrashDiagnosticFile(StringRef ReproCrashFilename,
// (or /Library/Logs/DiagnosticReports for root) and has the filename pattern
// clang-<VERSION>_<YYYY-MM-DD-HHMMSS>_<hostname>.crash.
path::home_directory(CrashDiagDir);
- if (CrashDiagDir.startswith("/var/root"))
+ if (CrashDiagDir.starts_with("/var/root"))
CrashDiagDir = "/";
path::append(CrashDiagDir, "Library/Logs/DiagnosticReports");
int PID =
@@ -1267,7 +1570,7 @@ bool Driver::getCrashDiagnosticFile(StringRef ReproCrashFilename,
for (fs::directory_iterator File(CrashDiagDir, EC), FileEnd;
File != FileEnd && !EC; File.increment(EC)) {
StringRef FileName = path::filename(File->path());
- if (!FileName.startswith(Name))
+ if (!FileName.starts_with(Name))
continue;
if (fs::status(File->path(), FileStatus))
continue;
@@ -1278,7 +1581,7 @@ bool Driver::getCrashDiagnosticFile(StringRef ReproCrashFilename,
// The first line should start with "Process:", otherwise this isn't a real
// .crash file.
StringRef Data = CrashFile.get()->getBuffer();
- if (!Data.startswith("Process:"))
+ if (!Data.starts_with("Process:"))
continue;
// Parse parent process pid line, e.g: "Parent Process: clang-4.0 [79141]"
size_t ParentProcPos = Data.find("Parent Process:");
@@ -1328,6 +1631,11 @@ bool Driver::getCrashDiagnosticFile(StringRef ReproCrashFilename,
return false;
}
+static const char BugReporMsg[] =
+ "\n********************\n\n"
+ "PLEASE ATTACH THE FOLLOWING FILES TO THE BUG REPORT:\n"
+ "Preprocessed source(s) and associated run script(s) are located at:";
+
// When clang crashes, produce diagnostic information including the fully
// preprocessed source file(s). Request that the developer attach the
// diagnostic information to a bug report.
@@ -1337,16 +1645,40 @@ void Driver::generateCompilationDiagnostics(
if (C.getArgs().hasArg(options::OPT_fno_crash_diagnostics))
return;
- // Don't try to generate diagnostics for link or dsymutil jobs.
- if (FailingCommand.getCreator().isLinkJob() ||
- FailingCommand.getCreator().isDsymutilJob())
+ unsigned Level = 1;
+ if (Arg *A = C.getArgs().getLastArg(options::OPT_fcrash_diagnostics_EQ)) {
+ Level = llvm::StringSwitch<unsigned>(A->getValue())
+ .Case("off", 0)
+ .Case("compiler", 1)
+ .Case("all", 2)
+ .Default(1);
+ }
+ if (!Level)
return;
+ // Don't try to generate diagnostics for dsymutil jobs.
+ if (FailingCommand.getCreator().isDsymutilJob())
+ return;
+
+ bool IsLLD = false;
+ ArgStringList SavedTemps;
+ if (FailingCommand.getCreator().isLinkJob()) {
+ C.getDefaultToolChain().GetLinkerPath(&IsLLD);
+ if (!IsLLD || Level < 2)
+ return;
+
+ // If lld crashed, we will re-run the same command with the input it used
+ // to have. In that case we should not remove temp files in
+ // initCompilationForDiagnostics yet. They will be added back and removed
+ // later.
+ SavedTemps = std::move(C.getTempFiles());
+ assert(!C.getTempFiles().size());
+ }
+
// Print the version of the compiler.
PrintVersion(C, llvm::errs());
// Suppress driver output and emit preprocessor output to temp file.
- Mode = CPPMode;
CCGenDiagnostics = true;
// Save the original job command(s).
@@ -1359,6 +1691,29 @@ void Driver::generateCompilationDiagnostics(
// Suppress tool output.
C.initCompilationForDiagnostics();
+ // If lld failed, rerun it again with --reproduce.
+ if (IsLLD) {
+ const char *TmpName = CreateTempFile(C, "linker-crash", "tar");
+ Command NewLLDInvocation = Cmd;
+ llvm::opt::ArgStringList ArgList = NewLLDInvocation.getArguments();
+ StringRef ReproduceOption =
+ C.getDefaultToolChain().getTriple().isWindowsMSVCEnvironment()
+ ? "/reproduce:"
+ : "--reproduce=";
+ ArgList.push_back(Saver.save(Twine(ReproduceOption) + TmpName).data());
+ NewLLDInvocation.replaceArguments(std::move(ArgList));
+
+ // Redirect stdout/stderr to /dev/null.
+ NewLLDInvocation.Execute({std::nullopt, {""}, {""}}, nullptr, nullptr);
+ Diag(clang::diag::note_drv_command_failed_diag_msg) << BugReporMsg;
+ Diag(clang::diag::note_drv_command_failed_diag_msg) << TmpName;
+ Diag(clang::diag::note_drv_command_failed_diag_msg)
+ << "\n\n********************";
+ if (Report)
+ Report->TemporaryFiles.push_back(TmpName);
+ return;
+ }
+
// Construct the list of inputs.
InputList Inputs;
BuildInputs(C.getDefaultToolChain(), C.getArgs(), Inputs);
@@ -1443,10 +1798,7 @@ void Driver::generateCompilationDiagnostics(
return;
}
- Diag(clang::diag::note_drv_command_failed_diag_msg)
- << "\n********************\n\n"
- "PLEASE ATTACH THE FOLLOWING FILES TO THE BUG REPORT:\n"
- "Preprocessed source(s) and associated run script(s) are located at:";
+ Diag(clang::diag::note_drv_command_failed_diag_msg) << BugReporMsg;
SmallString<128> VFS;
SmallString<128> ReproCrashFilename;
@@ -1458,7 +1810,7 @@ void Driver::generateCompilationDiagnostics(
ReproCrashFilename = TempFile;
llvm::sys::path::replace_extension(ReproCrashFilename, ".crash");
}
- if (StringRef(TempFile).endswith(".cache")) {
+ if (StringRef(TempFile).ends_with(".cache")) {
// In some cases (modules) we'll dump extra data to help with reproducing
// the crash into a directory next to the output.
VFS = llvm::sys::path::filename(TempFile);
@@ -1466,6 +1818,9 @@ void Driver::generateCompilationDiagnostics(
}
}
+ for (const char *TempFile : SavedTemps)
+ C.addTempFile(TempFile);
+
// Assume associated files are based off of the first temporary file.
CrashReportInfo CrashInfo(TempFiles[0], VFS);
@@ -1489,7 +1844,7 @@ void Driver::generateCompilationDiagnostics(
ScriptOS << "\n# Additional information: " << AdditionalInformation
<< "\n";
if (Report)
- Report->TemporaryFiles.push_back(std::string(Script.str()));
+ Report->TemporaryFiles.push_back(std::string(Script));
Diag(clang::diag::note_drv_command_failed_diag_msg) << Script;
}
@@ -1511,9 +1866,6 @@ void Driver::generateCompilationDiagnostics(
}
}
- for (const auto &A : C.getArgs().filtered(options::OPT_frewrite_map_file_EQ))
- Diag(clang::diag::note_drv_command_failed_diag_msg) << A->getValue();
-
Diag(clang::diag::note_drv_command_failed_diag_msg)
<< "\n\n********************";
}
@@ -1536,17 +1888,30 @@ void Driver::setUpResponseFiles(Compilation &C, Command &Cmd) {
int Driver::ExecuteCompilation(
Compilation &C,
SmallVectorImpl<std::pair<int, const Command *>> &FailingCommands) {
+ if (C.getArgs().hasArg(options::OPT_fdriver_only)) {
+ if (C.getArgs().hasArg(options::OPT_v))
+ C.getJobs().Print(llvm::errs(), "\n", true);
+
+ C.ExecuteJobs(C.getJobs(), FailingCommands, /*LogOnly=*/true);
+
+ // If there were errors building the compilation, quit now.
+ if (!FailingCommands.empty() || Diags.hasErrorOccurred())
+ return 1;
+
+ return 0;
+ }
+
// Just print if -### was present.
if (C.getArgs().hasArg(options::OPT__HASH_HASH_HASH)) {
C.getJobs().Print(llvm::errs(), "\n", true);
- return 0;
+ return Diags.hasErrorOccurred() ? 1 : 0;
}
// If there were errors building the compilation, quit now.
if (Diags.hasErrorOccurred())
return 1;
- // Set up response file names for each command, if necessary
+ // Set up response file names for each command, if necessary.
for (auto &Job : C.getJobs())
setUpResponseFiles(C, Job);
@@ -1573,14 +1938,12 @@ int Driver::ExecuteCompilation(
C.CleanupFileMap(C.getFailureResultFiles(), JA, true);
}
-#if LLVM_ON_UNIX
- // llvm/lib/Support/Unix/Signals.inc will exit with a special return code
+ // llvm/lib/Support/*/Signals.inc will exit with a special return code
// for SIGPIPE. Do not print diagnostics for this case.
if (CommandRes == EX_IOERR) {
Res = CommandRes;
continue;
}
-#endif
// Print extra information about abnormal failures, if possible.
//
@@ -1605,24 +1968,12 @@ int Driver::ExecuteCompilation(
}
void Driver::PrintHelp(bool ShowHidden) const {
- unsigned IncludedFlagsBitmask;
- unsigned ExcludedFlagsBitmask;
- std::tie(IncludedFlagsBitmask, ExcludedFlagsBitmask) =
- getIncludeExcludeOptionFlagMasks(IsCLMode());
-
- ExcludedFlagsBitmask |= options::NoDriverOption;
- if (!ShowHidden)
- ExcludedFlagsBitmask |= HelpHidden;
-
- if (IsFlangMode())
- IncludedFlagsBitmask |= options::FlangOption;
- else
- ExcludedFlagsBitmask |= options::FlangOnlyOption;
+ llvm::opt::Visibility VisibilityMask = getOptionVisibilityMask();
std::string Usage = llvm::formatv("{0} [options] file...", Name).str();
getOpts().printHelp(llvm::outs(), Usage.c_str(), DriverTitle.c_str(),
- IncludedFlagsBitmask, ExcludedFlagsBitmask,
- /*ShowAllAliases=*/false);
+ ShowHidden, /*ShowAllAliases=*/false,
+ VisibilityMask);
}
void Driver::PrintVersion(const Compilation &C, raw_ostream &OS) const {
@@ -1648,8 +1999,8 @@ void Driver::PrintVersion(const Compilation &C, raw_ostream &OS) const {
// Print out the install directory.
OS << "InstalledDir: " << InstalledDir << '\n';
- // If configuration file was used, print its path.
- if (!ConfigFile.empty())
+ // If configuration files were used, print their paths.
+ for (auto ConfigFile : ConfigFiles)
OS << "Configuration file: " << ConfigFile << '\n';
}
@@ -1670,18 +2021,17 @@ void Driver::HandleAutocompletions(StringRef PassedFlags) const {
std::vector<std::string> SuggestedCompletions;
std::vector<std::string> Flags;
- unsigned int DisableFlags =
- options::NoDriverOption | options::Unsupported | options::Ignored;
+ llvm::opt::Visibility VisibilityMask(options::ClangOption);
// Make sure that Flang-only options don't pollute the Clang output
// TODO: Make sure that Clang-only options don't pollute Flang output
- if (!IsFlangMode())
- DisableFlags |= options::FlangOnlyOption;
+ if (IsFlangMode())
+ VisibilityMask = llvm::opt::Visibility(options::FlangOption);
// Distinguish "--autocomplete=-someflag" and "--autocomplete=-someflag,"
// because the latter indicates that the user put space before pushing tab
// which should end up in a file completion.
- const bool HasSpace = PassedFlags.endswith(",");
+ const bool HasSpace = PassedFlags.ends_with(",");
// Parse PassedFlags by "," as all the command-line flags are passed to this
// function separated by ","
@@ -1695,7 +2045,7 @@ void Driver::HandleAutocompletions(StringRef PassedFlags) const {
// We want to show cc1-only options only when clang is invoked with -cc1 or
// -Xclang.
if (llvm::is_contained(Flags, "-Xclang") || llvm::is_contained(Flags, "-cc1"))
- DisableFlags &= ~options::NoDriverOption;
+ VisibilityMask = llvm::opt::Visibility(options::CC1Option);
const llvm::opt::OptTable &Opts = getOpts();
StringRef Cur;
@@ -1721,17 +2071,19 @@ void Driver::HandleAutocompletions(StringRef PassedFlags) const {
// When flag ends with '=' and there was no value completion, return empty
// string and fall back to the file autocompletion.
- if (SuggestedCompletions.empty() && !Cur.endswith("=")) {
+ if (SuggestedCompletions.empty() && !Cur.ends_with("=")) {
// If the flag is in the form of "--autocomplete=-foo",
// we were requested to print out all option names that start with "-foo".
// For example, "--autocomplete=-fsyn" is expanded to "-fsyntax-only".
- SuggestedCompletions = Opts.findByPrefix(Cur, DisableFlags);
+ SuggestedCompletions = Opts.findByPrefix(
+ Cur, VisibilityMask,
+ /*DisableFlags=*/options::Unsupported | options::Ignored);
// We have to query the -W flags manually as they're not in the OptTable.
// TODO: Find a good way to add them to OptTable instead and them remove
// this code.
for (StringRef S : DiagnosticIDs::getDiagnosticFlags())
- if (S.startswith(Cur))
+ if (S.starts_with(Cur))
SuggestedCompletions.push_back(std::string(S));
}
@@ -1783,7 +2135,8 @@ bool Driver::HandleImmediateArgs(const Compilation &C) {
if (C.getArgs().hasArg(options::OPT_v) ||
C.getArgs().hasArg(options::OPT__HASH_HASH_HASH) ||
- C.getArgs().hasArg(options::OPT_print_supported_cpus)) {
+ C.getArgs().hasArg(options::OPT_print_supported_cpus) ||
+ C.getArgs().hasArg(options::OPT_print_supported_extensions)) {
PrintVersion(C, llvm::errs());
SuppressMissingInputWarning = true;
}
@@ -1842,14 +2195,20 @@ bool Driver::HandleImmediateArgs(const Compilation &C) {
}
if (C.getArgs().hasArg(options::OPT_print_runtime_dir)) {
- std::string CandidateRuntimePath = TC.getRuntimePath();
- if (getVFS().exists(CandidateRuntimePath))
- llvm::outs() << CandidateRuntimePath << '\n';
+ if (std::optional<std::string> RuntimePath = TC.getRuntimePath())
+ llvm::outs() << *RuntimePath << '\n';
else
llvm::outs() << TC.getCompilerRTPath() << '\n';
return false;
}
+ if (C.getArgs().hasArg(options::OPT_print_diagnostic_options)) {
+ std::vector<std::string> Flags = DiagnosticIDs::getDiagnosticFlags();
+ for (std::size_t I = 0; I != Flags.size(); I += 2)
+ llvm::outs() << " " << Flags[I] << "\n " << Flags[I + 1] << "\n\n";
+ return false;
+ }
+
// FIXME: The following handlers should use a callback mechanism, we don't
// know what the client would like to do.
if (Arg *A = C.getArgs().getLastArg(options::OPT_print_file_name_EQ)) {
@@ -1895,14 +2254,26 @@ bool Driver::HandleImmediateArgs(const Compilation &C) {
return false;
}
+ if (C.getArgs().hasArg(options::OPT_print_multi_flags)) {
+ Multilib::flags_list ArgFlags = TC.getMultilibFlags(C.getArgs());
+ llvm::StringSet<> ExpandedFlags = TC.getMultilibs().expandFlags(ArgFlags);
+ std::set<llvm::StringRef> SortedFlags;
+ for (const auto &FlagEntry : ExpandedFlags)
+ SortedFlags.insert(FlagEntry.getKey());
+ for (auto Flag : SortedFlags)
+ llvm::outs() << Flag << '\n';
+ return false;
+ }
+
if (C.getArgs().hasArg(options::OPT_print_multi_directory)) {
- const Multilib &Multilib = TC.getMultilib();
- if (Multilib.gccSuffix().empty())
- llvm::outs() << ".\n";
- else {
- StringRef Suffix(Multilib.gccSuffix());
- assert(Suffix.front() == '/');
- llvm::outs() << Suffix.substr(1) << "\n";
+ for (const Multilib &Multilib : TC.getSelectedMultilibs()) {
+ if (Multilib.gccSuffix().empty())
+ llvm::outs() << ".\n";
+ else {
+ StringRef Suffix(Multilib.gccSuffix());
+ assert(Suffix.front() == '/');
+ llvm::outs() << Suffix.substr(1) << "\n";
+ }
}
return false;
}
@@ -1918,12 +2289,6 @@ bool Driver::HandleImmediateArgs(const Compilation &C) {
return false;
}
- if (C.getArgs().hasArg(options::OPT_print_multiarch)) {
- llvm::outs() << TC.getMultiarchTriple(*this, TC.getTriple(), SysRoot)
- << "\n";
- return false;
- }
-
if (C.getArgs().hasArg(options::OPT_print_targets)) {
llvm::TargetRegistry::printRegisteredTargetsForVersion(llvm::outs());
return false;
@@ -2041,11 +2406,7 @@ static bool ContainsCompileOrAssembleAction(const Action *A) {
isa<AssembleJobAction>(A))
return true;
- for (const Action *Input : A->inputs())
- if (ContainsCompileOrAssembleAction(Input))
- return true;
-
- return false;
+ return llvm::any_of(A->inputs(), ContainsCompileOrAssembleAction);
}
void Driver::BuildUniversalActions(Compilation &C, const ToolChain &TC,
@@ -2145,21 +2506,16 @@ bool Driver::DiagnoseInputExistence(const DerivedArgList &Args, StringRef Value,
if (Value == "-")
return true;
- if (getVFS().exists(Value))
+ // If it's a header to be found in the system or user search path, then defer
+ // complaints about its absence until those searches can be done. When we
+ // are definitely processing headers for C++20 header units, extend this to
+ // allow the user to put "-fmodule-header -xc++-header vector" for example.
+ if (Ty == types::TY_CXXSHeader || Ty == types::TY_CXXUHeader ||
+ (ModulesModeCXX20 && Ty == types::TY_CXXHeader))
return true;
- if (IsCLMode()) {
- if (!llvm::sys::path::is_absolute(Twine(Value)) &&
- llvm::sys::Process::FindInEnvPath("LIB", Value, ';'))
- return true;
-
- if (Args.hasArg(options::OPT__SLASH_link) && Ty == types::TY_Object) {
- // Arguments to the /link flag might cause the linker to search for object
- // and library files in paths we don't know about. Don't error in such
- // cases.
- return true;
- }
- }
+ if (getVFS().exists(Value))
+ return true;
if (TypoCorrect) {
// Check if the filename is a typo for an option flag. OptTable thinks
@@ -2167,23 +2523,70 @@ bool Driver::DiagnoseInputExistence(const DerivedArgList &Args, StringRef Value,
// filenames, but e.g. `/diagnostic:caret` is more likely a typo for
// the option `/diagnostics:caret` than a reference to a file in the root
// directory.
- unsigned IncludedFlagsBitmask;
- unsigned ExcludedFlagsBitmask;
- std::tie(IncludedFlagsBitmask, ExcludedFlagsBitmask) =
- getIncludeExcludeOptionFlagMasks(IsCLMode());
std::string Nearest;
- if (getOpts().findNearest(Value, Nearest, IncludedFlagsBitmask,
- ExcludedFlagsBitmask) <= 1) {
+ if (getOpts().findNearest(Value, Nearest, getOptionVisibilityMask()) <= 1) {
Diag(clang::diag::err_drv_no_such_file_with_suggestion)
<< Value << Nearest;
return false;
}
}
+ // In CL mode, don't error on apparently non-existent linker inputs, because
+ // they can be influenced by linker flags the clang driver might not
+ // understand.
+ // Examples:
+ // - `clang-cl main.cc ole32.lib` in a non-MSVC shell will make the driver
+ // module look for an MSVC installation in the registry. (We could ask
+ // the MSVCToolChain object if it can find `ole32.lib`, but the logic to
+ // look in the registry might move into lld-link in the future so that
+ // lld-link invocations in non-MSVC shells just work too.)
+ // - `clang-cl ... /link ...` can pass arbitrary flags to the linker,
+ // including /libpath:, which is used to find .lib and .obj files.
+ // So do not diagnose this on the driver level. Rely on the linker diagnosing
+ // it. (If we don't end up invoking the linker, this means we'll emit a
+ // "'linker' input unused [-Wunused-command-line-argument]" warning instead
+ // of an error.)
+ //
+ // Only do this skip after the typo correction step above. `/Brepo` is treated
+ // as TY_Object, but it's clearly a typo for `/Brepro`. It seems fine to emit
+ // an error if we have a flag that's within an edit distance of 1 from a
+ // flag. (Users can use `-Wl,` or `/linker` to launder the flag past the
+ // driver in the unlikely case they run into this.)
+ //
+ // Don't do this for inputs that start with a '/', else we'd pass options
+ // like /libpath: through to the linker silently.
+ //
+ // Emitting an error for linker inputs can also cause incorrect diagnostics
+ // with the gcc driver. The command
+ // clang -fuse-ld=lld -Wl,--chroot,some/dir /file.o
+ // will make lld look for some/dir/file.o, while we will diagnose here that
+ // `/file.o` does not exist. However, configure scripts check if
+ // `clang /GR-` compiles without error to see if the compiler is cl.exe,
+ // so we can't downgrade diagnostics for `/GR-` from an error to a warning
+ // in cc mode. (We can in cl mode because cl.exe itself only warns on
+ // unknown flags.)
+ if (IsCLMode() && Ty == types::TY_Object && !Value.starts_with("/"))
+ return true;
+
Diag(clang::diag::err_drv_no_such_file) << Value;
return false;
}
+// Get the C++20 Header Unit type corresponding to the input type.
+static types::ID CXXHeaderUnitType(ModuleHeaderMode HM) {
+ switch (HM) {
+ case HeaderMode_User:
+ return types::TY_CXXUHeader;
+ case HeaderMode_System:
+ return types::TY_CXXSHeader;
+ case HeaderMode_Default:
+ break;
+ case HeaderMode_None:
+ llvm_unreachable("should not be called in this case");
+ }
+ return types::TY_CXXHUHeader;
+}
+
// Construct a the list of inputs and their types.
void Driver::BuildInputs(const ToolChain &TC, DerivedArgList &Args,
InputList &Inputs) const {
@@ -2207,17 +2610,32 @@ void Driver::BuildInputs(const ToolChain &TC, DerivedArgList &Args,
for (Arg *A :
Args.filtered(options::OPT__SLASH_TC, options::OPT__SLASH_TP)) {
if (Previous) {
- Diag(clang::diag::warn_drv_overriding_flag_option)
- << Previous->getSpelling() << A->getSpelling();
+ Diag(clang::diag::warn_drv_overriding_option)
+ << Previous->getSpelling() << A->getSpelling();
ShowNote = true;
}
Previous = A;
}
if (ShowNote)
Diag(clang::diag::note_drv_t_option_is_global);
+ }
- // No driver mode exposes -x and /TC or /TP; we don't support mixing them.
- assert(!Args.hasArg(options::OPT_x) && "-x and /TC or /TP is not allowed");
+ // CUDA/HIP and their preprocessor expansions can be accepted by CL mode.
+ // Warn -x after last input file has no effect
+ auto LastXArg = Args.getLastArgValue(options::OPT_x);
+ const llvm::StringSet<> ValidXArgs = {"cuda", "hip", "cui", "hipi"};
+ if (!IsCLMode() || ValidXArgs.contains(LastXArg)) {
+ Arg *LastXArg = Args.getLastArgNoClaim(options::OPT_x);
+ Arg *LastInputArg = Args.getLastArgNoClaim(options::OPT_INPUT);
+ if (LastXArg && LastInputArg &&
+ LastInputArg->getIndex() < LastXArg->getIndex())
+ Diag(clang::diag::warn_drv_unused_x) << LastXArg->getValue();
+ } else {
+ // In CL mode suggest /TC or /TP since -x doesn't make sense if passed via
+ // /clang:.
+ if (auto *A = Args.getLastArg(options::OPT_x))
+ Diag(diag::err_drv_unsupported_opt_with_suggestion)
+ << A->getAsString(Args) << "/TC' or '/TP";
}
for (Arg *A : Args) {
@@ -2235,6 +2653,8 @@ void Driver::BuildInputs(const ToolChain &TC, DerivedArgList &Args,
if (memcmp(Value, "-", 2) == 0) {
if (IsFlangMode()) {
Ty = types::TY_Fortran;
+ } else if (IsDXCMode()) {
+ Ty = types::TY_HLSL;
} else {
// If running with -E, treat as a C input (this changes the
// builtin macros, for example). This may be overridden by -ObjC
@@ -2242,6 +2662,7 @@ void Driver::BuildInputs(const ToolChain &TC, DerivedArgList &Args,
//
// Otherwise emit an error but still use a valid type to avoid
// spurious errors (e.g., no inputs).
+ assert(!CCGenDiagnostics && "stdin produces no crash reproducer");
if (!Args.hasArgNoClaim(options::OPT_E) && !CCCIsCPP())
Diag(IsCLMode() ? clang::diag::err_drv_unknown_stdin_type_clang_cl
: clang::diag::err_drv_unknown_stdin_type);
@@ -2257,10 +2678,10 @@ void Driver::BuildInputs(const ToolChain &TC, DerivedArgList &Args,
Ty = TC.LookupTypeForExtension(Ext + 1);
if (Ty == types::TY_INVALID) {
- if (CCCIsCPP())
- Ty = types::TY_C;
- else if (IsCLMode() && Args.hasArgNoClaim(options::OPT_E))
+ if (IsCLMode() && (Args.hasArgNoClaim(options::OPT_E) || CCGenDiagnostics))
Ty = types::TY_CXX;
+ else if (CCCIsCPP() || CCGenDiagnostics)
+ Ty = types::TY_C;
else
Ty = types::TY_Object;
}
@@ -2271,7 +2692,9 @@ void Driver::BuildInputs(const ToolChain &TC, DerivedArgList &Args,
types::ID OldTy = Ty;
Ty = types::lookupCXXTypeForCType(Ty);
- if (Ty != OldTy)
+ // Do not complain about foo.h, when we are known to be processing
+ // it as a C++20 header unit.
+ if (Ty != OldTy && !(OldTy == types::TY_CHeader && hasHeaderMode()))
Diag(clang::diag::warn_drv_treating_input_as_cxx)
<< getTypeName(OldTy) << getTypeName(Ty);
}
@@ -2294,6 +2717,14 @@ void Driver::BuildInputs(const ToolChain &TC, DerivedArgList &Args,
else if (Args.hasArg(options::OPT_ObjCXX))
Ty = types::TY_ObjCXX;
}
+
+ // Disambiguate headers that are meant to be header units from those
+ // intended to be PCH. Avoid missing '.h' cases that are counted as
+ // C headers by default - we know we are in C++ mode and we do not
+ // want to issue a complaint about compiling things in the wrong mode.
+ if ((Ty == types::TY_CXXHeader || Ty == types::TY_CHeader) &&
+ hasHeaderMode())
+ Ty = CXXHeaderUnitType(CXX20HeaderType);
} else {
assert(InputTypeArg && "InputType set w/o InputTypeArg");
if (!InputTypeArg->getOption().matches(options::OPT_x)) {
@@ -2309,6 +2740,10 @@ void Driver::BuildInputs(const ToolChain &TC, DerivedArgList &Args,
}
}
+ if ((Ty == types::TY_C || Ty == types::TY_CXX) &&
+ Args.hasArgNoClaim(options::OPT_hipstdpar))
+ Ty = types::TY_HIP;
+
if (DiagnoseInputExistence(Args, Value, Ty, /*TypoCorrect=*/true))
Inputs.push_back(std::make_pair(Ty, A));
@@ -2345,6 +2780,11 @@ void Driver::BuildInputs(const ToolChain &TC, DerivedArgList &Args,
Diag(clang::diag::err_drv_unknown_language) << A->getValue();
InputType = types::TY_Object;
}
+
+ // If the user has put -fmodule-header{,=} then we treat C++ headers as
+ // header unit inputs. So we 'promote' -xc++-header appropriately.
+ if (InputType == types::TY_CXXHeader && hasHeaderMode())
+ InputType = CXXHeaderUnitType(CXX20HeaderType);
} else if (A->getOption().getID() == options::OPT_U) {
assert(A->getNumValues() == 1 && "The /U option has one value.");
StringRef Val = A->getValue(0);
@@ -2376,6 +2816,9 @@ class OffloadingActionBuilder final {
/// Map between an input argument and the offload kinds used to process it.
std::map<const Arg *, unsigned> InputArgToOffloadKindMap;
+ /// Map between a host action and its originating input argument.
+ std::map<Action *, const Arg *> HostActionToInputArgMap;
+
/// Builder interface. It doesn't build anything or keep any state.
class DeviceActionBuilder {
public:
@@ -2428,7 +2871,7 @@ class OffloadingActionBuilder final {
/// Update the state to include the provided host action \a HostAction as a
/// dependency of the current device action. By default it is inactive.
- virtual ActionBuilderReturnCode addDeviceDepences(Action *HostAction) {
+ virtual ActionBuilderReturnCode addDeviceDependences(Action *HostAction) {
return ABRT_Inactive;
}
@@ -2514,9 +2957,14 @@ class OffloadingActionBuilder final {
CudaActionBuilderBase(Compilation &C, DerivedArgList &Args,
const Driver::InputList &Inputs,
Action::OffloadKind OFKind)
- : DeviceActionBuilder(C, Args, Inputs, OFKind) {}
+ : DeviceActionBuilder(C, Args, Inputs, OFKind) {
- ActionBuilderReturnCode addDeviceDepences(Action *HostAction) override {
+ CompileDeviceOnly = C.getDriver().offloadDeviceOnly();
+ Relocatable = Args.hasFlag(options::OPT_fgpu_rdc,
+ options::OPT_fno_gpu_rdc, /*Default=*/false);
+ }
+
+ ActionBuilderReturnCode addDeviceDependences(Action *HostAction) override {
// While generating code for CUDA, we only depend on the host input action
// to trigger the creation of all the CUDA device actions.
@@ -2590,12 +3038,16 @@ class OffloadingActionBuilder final {
std::string FileName = IA->getInputArg().getAsString(Args);
// Check if the type of the file is the same as the action. Do not
// unbundle it if it is not. Do not unbundle .so files, for example,
- // which are not object files.
+ // which are not object files. Files with extension ".lib" is classified
+ // as TY_Object but they are actually archives, therefore should not be
+ // unbundled here as objects. They will be handled at other places.
+ const StringRef LibFileExt = ".lib";
if (IA->getType() == types::TY_Object &&
(!llvm::sys::path::has_extension(FileName) ||
types::lookupTypeForExtension(
llvm::sys::path::extension(FileName).drop_front()) !=
- types::TY_Object))
+ types::TY_Object ||
+ llvm::sys::path::extension(FileName) == LibFileExt))
return ABRT_Inactive;
for (auto Arch : GpuArchList) {
@@ -2603,6 +3055,7 @@ class OffloadingActionBuilder final {
UA->registerDependentActionInfo(ToolChains[0], Arch,
AssociatedOffloadKind);
}
+ IsActive = true;
return ABRT_Success;
}
@@ -2634,7 +3087,7 @@ class OffloadingActionBuilder final {
assert(CudaDeviceActions.size() == GpuArchList.size() &&
"Expecting one action per GPU architecture.");
assert(ToolChains.size() == 1 &&
- "Expecting to have a sing CUDA toolchain.");
+ "Expecting to have a single CUDA toolchain.");
for (unsigned I = 0, E = GpuArchList.size(); I != E; ++I)
AddTopLevel(CudaDeviceActions[I], GpuArchList[I]);
@@ -2645,7 +3098,7 @@ class OffloadingActionBuilder final {
/// option is invalid.
virtual StringRef getCanonicalOffloadArch(StringRef Arch) = 0;
- virtual llvm::Optional<std::pair<llvm::StringRef, llvm::StringRef>>
+ virtual std::optional<std::pair<llvm::StringRef, llvm::StringRef>>
getConflictOffloadArchCombination(const std::set<StringRef> &GpuArchs) = 0;
bool initialize() override {
@@ -2662,9 +3115,6 @@ class OffloadingActionBuilder final {
!C.hasOffloadToolChain<Action::OFK_HIP>())
return false;
- Relocatable = Args.hasFlag(options::OPT_fgpu_rdc,
- options::OPT_fno_gpu_rdc, /*Default=*/false);
-
const ToolChain *HostTC = C.getSingleOffloadToolChain<Action::OFK_Host>();
assert(HostTC && "No toolchain for host compilation.");
if (HostTC->getTriple().isNVPTX() ||
@@ -2682,15 +3132,7 @@ class OffloadingActionBuilder final {
? C.getSingleOffloadToolChain<Action::OFK_Cuda>()
: C.getSingleOffloadToolChain<Action::OFK_HIP>());
- Arg *PartialCompilationArg = Args.getLastArg(
- options::OPT_cuda_host_only, options::OPT_cuda_device_only,
- options::OPT_cuda_compile_host_device);
- CompileHostOnly = PartialCompilationArg &&
- PartialCompilationArg->getOption().matches(
- options::OPT_cuda_host_only);
- CompileDeviceOnly = PartialCompilationArg &&
- PartialCompilationArg->getOption().matches(
- options::OPT_cuda_device_only);
+ CompileHostOnly = C.getDriver().offloadHostOnly();
EmitLLVM = Args.getLastArg(options::OPT_emit_llvm);
EmitAsm = Args.getLastArg(options::OPT_S);
FixedCUID = Args.getLastArgValue(options::OPT_cuid_EQ);
@@ -2709,7 +3151,15 @@ class OffloadingActionBuilder final {
}
}
- // Collect all cuda_gpu_arch parameters, removing duplicates.
+ // --offload and --offload-arch options are mutually exclusive.
+ if (Args.hasArgNoClaim(options::OPT_offload_EQ) &&
+ Args.hasArgNoClaim(options::OPT_offload_arch_EQ,
+ options::OPT_no_offload_arch_EQ)) {
+ C.getDriver().Diag(diag::err_opt_not_valid_with_opt) << "--offload-arch"
+ << "--offload";
+ }
+
+ // Collect all offload arch parameters, removing duplicates.
std::set<StringRef> GpuArchs;
bool Error = false;
for (Arg *A : Args) {
@@ -2718,28 +3168,41 @@ class OffloadingActionBuilder final {
continue;
A->claim();
- StringRef ArchStr = A->getValue();
- if (A->getOption().matches(options::OPT_no_offload_arch_EQ) &&
- ArchStr == "all") {
- GpuArchs.clear();
- continue;
+ for (StringRef ArchStr : llvm::split(A->getValue(), ",")) {
+ if (A->getOption().matches(options::OPT_no_offload_arch_EQ) &&
+ ArchStr == "all") {
+ GpuArchs.clear();
+ } else if (ArchStr == "native") {
+ const ToolChain &TC = *ToolChains.front();
+ auto GPUsOrErr = ToolChains.front()->getSystemGPUArchs(Args);
+ if (!GPUsOrErr) {
+ TC.getDriver().Diag(diag::err_drv_undetermined_gpu_arch)
+ << llvm::Triple::getArchTypeName(TC.getArch())
+ << llvm::toString(GPUsOrErr.takeError()) << "--offload-arch";
+ continue;
+ }
+
+ for (auto GPU : *GPUsOrErr) {
+ GpuArchs.insert(Args.MakeArgString(GPU));
+ }
+ } else {
+ ArchStr = getCanonicalOffloadArch(ArchStr);
+ if (ArchStr.empty()) {
+ Error = true;
+ } else if (A->getOption().matches(options::OPT_offload_arch_EQ))
+ GpuArchs.insert(ArchStr);
+ else if (A->getOption().matches(options::OPT_no_offload_arch_EQ))
+ GpuArchs.erase(ArchStr);
+ else
+ llvm_unreachable("Unexpected option.");
+ }
}
- ArchStr = getCanonicalOffloadArch(ArchStr);
- if (ArchStr.empty()) {
- Error = true;
- } else if (A->getOption().matches(options::OPT_offload_arch_EQ))
- GpuArchs.insert(ArchStr);
- else if (A->getOption().matches(options::OPT_no_offload_arch_EQ))
- GpuArchs.erase(ArchStr);
- else
- llvm_unreachable("Unexpected option.");
}
auto &&ConflictingArchs = getConflictOffloadArchCombination(GpuArchs);
if (ConflictingArchs) {
C.getDriver().Diag(clang::diag::err_drv_bad_offload_arch_combo)
- << ConflictingArchs.getValue().first
- << ConflictingArchs.getValue().second;
+ << ConflictingArchs->first << ConflictingArchs->second;
C.setContainsError();
return true;
}
@@ -2751,8 +3214,12 @@ class OffloadingActionBuilder final {
// Default to sm_20 which is the lowest common denominator for
// supported GPUs. sm_20 code should work correctly, if
// suboptimally, on all newer GPUs.
- if (GpuArchList.empty())
- GpuArchList.push_back(DefaultCudaArch);
+ if (GpuArchList.empty()) {
+ if (ToolChains.front()->getTriple().isSPIRV())
+ GpuArchList.push_back(CudaArch::Generic);
+ else
+ GpuArchList.push_back(DefaultCudaArch);
+ }
return Error;
}
@@ -2765,7 +3232,7 @@ class OffloadingActionBuilder final {
CudaActionBuilder(Compilation &C, DerivedArgList &Args,
const Driver::InputList &Inputs)
: CudaActionBuilderBase(C, Args, Inputs, Action::OFK_Cuda) {
- DefaultCudaArch = CudaArch::SM_20;
+ DefaultCudaArch = CudaArch::SM_35;
}
StringRef getCanonicalOffloadArch(StringRef ArchStr) override {
@@ -2777,10 +3244,10 @@ class OffloadingActionBuilder final {
return CudaArchToString(Arch);
}
- llvm::Optional<std::pair<llvm::StringRef, llvm::StringRef>>
+ std::optional<std::pair<llvm::StringRef, llvm::StringRef>>
getConflictOffloadArchCombination(
const std::set<StringRef> &GpuArchs) override {
- return llvm::None;
+ return std::nullopt;
}
ActionBuilderReturnCode
@@ -2891,43 +3358,67 @@ class OffloadingActionBuilder final {
class HIPActionBuilder final : public CudaActionBuilderBase {
/// The linker inputs obtained for each device arch.
SmallVector<ActionList, 8> DeviceLinkerInputs;
- bool GPUSanitize;
// The default bundling behavior depends on the type of output, therefore
// BundleOutput needs to be tri-value: None, true, or false.
// Bundle code objects except --no-gpu-output is specified for device
// only compilation. Bundle other type of output files only if
// --gpu-bundle-output is specified for device only compilation.
- Optional<bool> BundleOutput;
+ std::optional<bool> BundleOutput;
+ std::optional<bool> EmitReloc;
public:
HIPActionBuilder(Compilation &C, DerivedArgList &Args,
const Driver::InputList &Inputs)
: CudaActionBuilderBase(C, Args, Inputs, Action::OFK_HIP) {
- DefaultCudaArch = CudaArch::GFX803;
- GPUSanitize = Args.hasFlag(options::OPT_fgpu_sanitize,
- options::OPT_fno_gpu_sanitize, false);
+
+ DefaultCudaArch = CudaArch::GFX906;
+
+ if (Args.hasArg(options::OPT_fhip_emit_relocatable,
+ options::OPT_fno_hip_emit_relocatable)) {
+ EmitReloc = Args.hasFlag(options::OPT_fhip_emit_relocatable,
+ options::OPT_fno_hip_emit_relocatable, false);
+
+ if (*EmitReloc) {
+ if (Relocatable) {
+ C.getDriver().Diag(diag::err_opt_not_valid_with_opt)
+ << "-fhip-emit-relocatable"
+ << "-fgpu-rdc";
+ }
+
+ if (!CompileDeviceOnly) {
+ C.getDriver().Diag(diag::err_opt_not_valid_without_opt)
+ << "-fhip-emit-relocatable"
+ << "--cuda-device-only";
+ }
+ }
+ }
+
if (Args.hasArg(options::OPT_gpu_bundle_output,
options::OPT_no_gpu_bundle_output))
BundleOutput = Args.hasFlag(options::OPT_gpu_bundle_output,
- options::OPT_no_gpu_bundle_output);
+ options::OPT_no_gpu_bundle_output, true) &&
+ (!EmitReloc || !*EmitReloc);
}
bool canUseBundlerUnbundler() const override { return true; }
StringRef getCanonicalOffloadArch(StringRef IdStr) override {
llvm::StringMap<bool> Features;
- auto ArchStr =
- parseTargetID(getHIPOffloadTargetTriple(), IdStr, &Features);
+ // getHIPOffloadTargetTriple() is known to return valid value as it has
+ // been called successfully in the CreateOffloadingDeviceToolChains().
+ auto ArchStr = parseTargetID(
+ *getHIPOffloadTargetTriple(C.getDriver(), C.getInputArgs()), IdStr,
+ &Features);
if (!ArchStr) {
C.getDriver().Diag(clang::diag::err_drv_bad_target_id) << IdStr;
C.setContainsError();
return StringRef();
}
- auto CanId = getCanonicalTargetID(ArchStr.getValue(), Features);
+ auto CanId = getCanonicalTargetID(*ArchStr, Features);
return Args.MakeArgStringRef(CanId);
};
- llvm::Optional<std::pair<llvm::StringRef, llvm::StringRef>>
+ std::optional<std::pair<llvm::StringRef, llvm::StringRef>>
getConflictOffloadArchCombination(
const std::set<StringRef> &GpuArchs) override {
return getConflictTargetIDCombination(GpuArchs);
@@ -2937,9 +3428,12 @@ class OffloadingActionBuilder final {
getDeviceDependences(OffloadAction::DeviceDependences &DA,
phases::ID CurPhase, phases::ID FinalPhase,
PhasesTy &Phases) override {
+ if (!IsActive)
+ return ABRT_Inactive;
+
// amdgcn does not support linking of object files, therefore we skip
// backend and assemble phases to output LLVM IR. Except for generating
- // non-relocatable device coee, where we generate fat binary for device
+ // non-relocatable device code, where we generate fat binary for device
// code and pass to host in Backend phase.
if (CudaDeviceActions.empty())
return ABRT_Success;
@@ -2948,10 +3442,12 @@ class OffloadingActionBuilder final {
CudaDeviceActions.size() == GpuArchList.size()) &&
"Expecting one action per GPU architecture.");
assert(!CompileHostOnly &&
- "Not expecting CUDA actions in host-only compilation.");
+ "Not expecting HIP actions in host-only compilation.");
+
+ bool ShouldLink = !EmitReloc || !*EmitReloc;
if (!Relocatable && CurPhase == phases::Backend && !EmitLLVM &&
- !EmitAsm) {
+ !EmitAsm && ShouldLink) {
// If we are in backend phase, we attempt to generate the fat binary.
// We compile each arch to IR and use a link action to generate code
// object containing ISA. Then we use a special "link" action to create
@@ -2971,9 +3467,19 @@ class OffloadingActionBuilder final {
// When LTO is not enabled, we follow the conventional
// compiler phases, including backend and assemble phases.
ActionList AL;
- auto BackendAction = C.getDriver().ConstructPhaseAction(
- C, Args, phases::Backend, CudaDeviceActions[I],
- AssociatedOffloadKind);
+ Action *BackendAction = nullptr;
+ if (ToolChains.front()->getTriple().isSPIRV()) {
+ // Emit LLVM bitcode for SPIR-V targets. SPIR-V device tool chain
+ // (HIPSPVToolChain) runs post-link LLVM IR passes.
+ types::ID Output = Args.hasArg(options::OPT_S)
+ ? types::TY_LLVM_IR
+ : types::TY_LLVM_BC;
+ BackendAction =
+ C.MakeAction<BackendJobAction>(CudaDeviceActions[I], Output);
+ } else
+ BackendAction = C.getDriver().ConstructPhaseAction(
+ C, Args, phases::Backend, CudaDeviceActions[I],
+ AssociatedOffloadKind);
auto AssembleAction = C.getDriver().ConstructPhaseAction(
C, Args, phases::Assemble, BackendAction,
AssociatedOffloadKind);
@@ -2997,8 +3503,7 @@ class OffloadingActionBuilder final {
DDep, CudaDeviceActions[I]->getType());
}
- if (!CompileDeviceOnly || !BundleOutput.hasValue() ||
- BundleOutput.getValue()) {
+ if (!CompileDeviceOnly || !BundleOutput || *BundleOutput) {
// Create HIP fat binary with a special "link" action.
CudaFatBinary = C.MakeAction<LinkJobAction>(CudaDeviceActions,
types::TY_HIP_FATBIN);
@@ -3018,6 +3523,8 @@ class OffloadingActionBuilder final {
return CompileDeviceOnly ? ABRT_Ignore_Host : ABRT_Success;
} else if (CurPhase == phases::Link) {
+ if (!ShouldLink)
+ return ABRT_Success;
// Save CudaDeviceActions to DeviceLinkerInputs for each GPU subarch.
// This happens to each device action originated from each input file.
// Later on, device actions in DeviceLinkerInputs are used to create
@@ -3033,7 +3540,7 @@ class OffloadingActionBuilder final {
// We will pass the device action as a host dependence, so we don't
// need to do anything else with them.
CudaDeviceActions.clear();
- return ABRT_Success;
+ return CompileDeviceOnly ? ABRT_Ignore_Host : ABRT_Success;
}
// By default, we produce an action for each device arch.
@@ -3041,8 +3548,8 @@ class OffloadingActionBuilder final {
A = C.getDriver().ConstructPhaseAction(C, Args, CurPhase, A,
AssociatedOffloadKind);
- if (CompileDeviceOnly && CurPhase == FinalPhase &&
- BundleOutput.hasValue() && BundleOutput.getValue()) {
+ if (CompileDeviceOnly && CurPhase == FinalPhase && BundleOutput &&
+ *BundleOutput) {
for (unsigned I = 0, E = GpuArchList.size(); I != E; ++I) {
OffloadAction::DeviceDependences DDep;
DDep.add(*CudaDeviceActions[I], *ToolChains.front(), GpuArchList[I],
@@ -3055,8 +3562,11 @@ class OffloadingActionBuilder final {
CudaDeviceActions.clear();
}
- return (CompileDeviceOnly && CurPhase == FinalPhase) ? ABRT_Ignore_Host
- : ABRT_Success;
+ return (CompileDeviceOnly &&
+ (CurPhase == FinalPhase ||
+ (!ShouldLink && CurPhase == phases::Assemble)))
+ ? ABRT_Ignore_Host
+ : ABRT_Success;
}
void appendLinkDeviceActions(ActionList &AL) override {
@@ -3066,210 +3576,55 @@ class OffloadingActionBuilder final {
assert(DeviceLinkerInputs.size() == GpuArchList.size() &&
"Linker inputs and GPU arch list sizes do not match.");
- // Append a new link action for each device.
+ ActionList Actions;
unsigned I = 0;
+ // Append a new link action for each device.
+ // Each entry in DeviceLinkerInputs corresponds to a GPU arch.
for (auto &LI : DeviceLinkerInputs) {
- // Each entry in DeviceLinkerInputs corresponds to a GPU arch.
- auto *DeviceLinkAction =
- C.MakeAction<LinkJobAction>(LI, types::TY_Image);
+
+ types::ID Output = Args.hasArg(options::OPT_emit_llvm)
+ ? types::TY_LLVM_BC
+ : types::TY_Image;
+
+ auto *DeviceLinkAction = C.MakeAction<LinkJobAction>(LI, Output);
// Linking all inputs for the current GPU arch.
// LI contains all the inputs for the linker.
OffloadAction::DeviceDependences DeviceLinkDeps;
DeviceLinkDeps.add(*DeviceLinkAction, *ToolChains[0],
GpuArchList[I], AssociatedOffloadKind);
- AL.push_back(C.MakeAction<OffloadAction>(DeviceLinkDeps,
- DeviceLinkAction->getType()));
+ Actions.push_back(C.MakeAction<OffloadAction>(
+ DeviceLinkDeps, DeviceLinkAction->getType()));
++I;
}
DeviceLinkerInputs.clear();
- // Create a host object from all the device images by embedding them
- // in a fat binary.
- OffloadAction::DeviceDependences DDeps;
- auto *TopDeviceLinkAction =
- C.MakeAction<LinkJobAction>(AL, types::TY_Object);
- DDeps.add(*TopDeviceLinkAction, *ToolChains[0],
- nullptr, AssociatedOffloadKind);
-
- // Offload the host object to the host linker.
- AL.push_back(C.MakeAction<OffloadAction>(DDeps, TopDeviceLinkAction->getType()));
- }
-
- Action* appendLinkHostActions(ActionList &AL) override { return AL.back(); }
-
- void appendLinkDependences(OffloadAction::DeviceDependences &DA) override {}
- };
-
- /// OpenMP action builder. The host bitcode is passed to the device frontend
- /// and all the device linked images are passed to the host link phase.
- class OpenMPActionBuilder final : public DeviceActionBuilder {
- /// The OpenMP actions for the current input.
- ActionList OpenMPDeviceActions;
-
- /// The linker inputs obtained for each toolchain.
- SmallVector<ActionList, 8> DeviceLinkerInputs;
-
- public:
- OpenMPActionBuilder(Compilation &C, DerivedArgList &Args,
- const Driver::InputList &Inputs)
- : DeviceActionBuilder(C, Args, Inputs, Action::OFK_OpenMP) {}
-
- ActionBuilderReturnCode
- getDeviceDependences(OffloadAction::DeviceDependences &DA,
- phases::ID CurPhase, phases::ID FinalPhase,
- PhasesTy &Phases) override {
- if (OpenMPDeviceActions.empty())
- return ABRT_Inactive;
-
- // We should always have an action for each input.
- assert(OpenMPDeviceActions.size() == ToolChains.size() &&
- "Number of OpenMP actions and toolchains do not match.");
-
- // The host only depends on device action in the linking phase, when all
- // the device images have to be embedded in the host image.
- if (CurPhase == phases::Link) {
- assert(ToolChains.size() == DeviceLinkerInputs.size() &&
- "Toolchains and linker inputs sizes do not match.");
- auto LI = DeviceLinkerInputs.begin();
- for (auto *A : OpenMPDeviceActions) {
- LI->push_back(A);
- ++LI;
- }
-
- // We passed the device action as a host dependence, so we don't need to
- // do anything else with them.
- OpenMPDeviceActions.clear();
- return ABRT_Success;
- }
-
- // By default, we produce an action for each device arch.
- for (Action *&A : OpenMPDeviceActions)
- A = C.getDriver().ConstructPhaseAction(C, Args, CurPhase, A);
-
- return ABRT_Success;
- }
-
- ActionBuilderReturnCode addDeviceDepences(Action *HostAction) override {
-
- // If this is an input action replicate it for each OpenMP toolchain.
- if (auto *IA = dyn_cast<InputAction>(HostAction)) {
- OpenMPDeviceActions.clear();
- for (unsigned I = 0; I < ToolChains.size(); ++I)
- OpenMPDeviceActions.push_back(
- C.MakeAction<InputAction>(IA->getInputArg(), IA->getType()));
- return ABRT_Success;
- }
-
- // If this is an unbundling action use it as is for each OpenMP toolchain.
- if (auto *UA = dyn_cast<OffloadUnbundlingJobAction>(HostAction)) {
- OpenMPDeviceActions.clear();
- auto *IA = cast<InputAction>(UA->getInputs().back());
- std::string FileName = IA->getInputArg().getAsString(Args);
- // Check if the type of the file is the same as the action. Do not
- // unbundle it if it is not. Do not unbundle .so files, for example,
- // which are not object files.
- if (IA->getType() == types::TY_Object &&
- (!llvm::sys::path::has_extension(FileName) ||
- types::lookupTypeForExtension(
- llvm::sys::path::extension(FileName).drop_front()) !=
- types::TY_Object))
- return ABRT_Inactive;
- for (unsigned I = 0; I < ToolChains.size(); ++I) {
- OpenMPDeviceActions.push_back(UA);
- UA->registerDependentActionInfo(
- ToolChains[I], /*BoundArch=*/StringRef(), Action::OFK_OpenMP);
- }
- return ABRT_Success;
- }
-
- // When generating code for OpenMP we use the host compile phase result as
- // a dependence to the device compile phase so that it can learn what
- // declarations should be emitted. However, this is not the only use for
- // the host action, so we prevent it from being collapsed.
- if (isa<CompileJobAction>(HostAction)) {
- HostAction->setCannotBeCollapsedWithNextDependentAction();
- assert(ToolChains.size() == OpenMPDeviceActions.size() &&
- "Toolchains and device action sizes do not match.");
- OffloadAction::HostDependence HDep(
- *HostAction, *C.getSingleOffloadToolChain<Action::OFK_Host>(),
- /*BoundArch=*/nullptr, Action::OFK_OpenMP);
- auto TC = ToolChains.begin();
- for (Action *&A : OpenMPDeviceActions) {
- assert(isa<CompileJobAction>(A));
- OffloadAction::DeviceDependences DDep;
- DDep.add(*A, **TC, /*BoundArch=*/nullptr, Action::OFK_OpenMP);
- A = C.MakeAction<OffloadAction>(HDep, DDep);
- ++TC;
- }
- }
- return ABRT_Success;
- }
-
- void appendTopLevelActions(ActionList &AL) override {
- if (OpenMPDeviceActions.empty())
- return;
-
- // We should always have an action for each input.
- assert(OpenMPDeviceActions.size() == ToolChains.size() &&
- "Number of OpenMP actions and toolchains do not match.");
-
- // Append all device actions followed by the proper offload action.
- auto TI = ToolChains.begin();
- for (auto *A : OpenMPDeviceActions) {
- OffloadAction::DeviceDependences Dep;
- Dep.add(*A, **TI, /*BoundArch=*/nullptr, Action::OFK_OpenMP);
- AL.push_back(C.MakeAction<OffloadAction>(Dep, A->getType()));
- ++TI;
+ // If emitting LLVM, do not generate final host/device compilation action
+ if (Args.hasArg(options::OPT_emit_llvm)) {
+ AL.append(Actions);
+ return;
}
- // We no longer need the action stored in this builder.
- OpenMPDeviceActions.clear();
- }
-
- void appendLinkDeviceActions(ActionList &AL) override {
- assert(ToolChains.size() == DeviceLinkerInputs.size() &&
- "Toolchains and linker inputs sizes do not match.");
- // Append a new link action for each device.
- auto TC = ToolChains.begin();
- for (auto &LI : DeviceLinkerInputs) {
- auto *DeviceLinkAction =
- C.MakeAction<LinkJobAction>(LI, types::TY_Image);
- OffloadAction::DeviceDependences DeviceLinkDeps;
- DeviceLinkDeps.add(*DeviceLinkAction, **TC, /*BoundArch=*/nullptr,
- Action::OFK_OpenMP);
- AL.push_back(C.MakeAction<OffloadAction>(DeviceLinkDeps,
- DeviceLinkAction->getType()));
- ++TC;
+ // Create a host object from all the device images by embedding them
+ // in a fat binary for mixed host-device compilation. For device-only
+ // compilation, creates a fat binary.
+ OffloadAction::DeviceDependences DDeps;
+ if (!CompileDeviceOnly || !BundleOutput || *BundleOutput) {
+ auto *TopDeviceLinkAction = C.MakeAction<LinkJobAction>(
+ Actions,
+ CompileDeviceOnly ? types::TY_HIP_FATBIN : types::TY_Object);
+ DDeps.add(*TopDeviceLinkAction, *ToolChains[0], nullptr,
+ AssociatedOffloadKind);
+ // Offload the host object to the host linker.
+ AL.push_back(
+ C.MakeAction<OffloadAction>(DDeps, TopDeviceLinkAction->getType()));
+ } else {
+ AL.append(Actions);
}
- DeviceLinkerInputs.clear();
}
- Action* appendLinkHostActions(ActionList &AL) override {
- // Create wrapper bitcode from the result of device link actions and compile
- // it to an object which will be added to the host link command.
- auto *BC = C.MakeAction<OffloadWrapperJobAction>(AL, types::TY_LLVM_BC);
- auto *ASM = C.MakeAction<BackendJobAction>(BC, types::TY_PP_Asm);
- return C.MakeAction<AssembleJobAction>(ASM, types::TY_Object);
- }
+ Action* appendLinkHostActions(ActionList &AL) override { return AL.back(); }
void appendLinkDependences(OffloadAction::DeviceDependences &DA) override {}
-
- bool initialize() override {
- // Get the OpenMP toolchains. If we don't get any, the action builder will
- // know there is nothing to do related to OpenMP offloading.
- auto OpenMPTCRange = C.getOffloadToolChains<Action::OFK_OpenMP>();
- for (auto TI = OpenMPTCRange.first, TE = OpenMPTCRange.second; TI != TE;
- ++TI)
- ToolChains.push_back(TI->second);
-
- DeviceLinkerInputs.resize(ToolChains.size());
- return false;
- }
-
- bool canUseBundlerUnbundler() const override {
- // OpenMP should use bundled files whenever possible.
- return true;
- }
};
///
@@ -3296,9 +3651,6 @@ public:
// Create a specialized builder for HIP.
SpecializedBuilders.push_back(new HIPActionBuilder(C, Args, Inputs));
- // Create a specialized builder for OpenMP.
- SpecializedBuilders.push_back(new OpenMPActionBuilder(C, Args, Inputs));
-
//
// TODO: Build other specialized builders here.
//
@@ -3326,6 +3678,17 @@ public:
delete SB;
}
+ /// Record a host action and its originating input argument.
+ void recordHostAction(Action *HostAction, const Arg *InputArg) {
+ assert(HostAction && "Invalid host action");
+ assert(InputArg && "Invalid input argument");
+ auto Loc = HostActionToInputArgMap.find(HostAction);
+ if (Loc == HostActionToInputArgMap.end())
+ HostActionToInputArgMap[HostAction] = InputArg;
+ assert(HostActionToInputArgMap[HostAction] == InputArg &&
+ "host action mapped to multiple input arguments");
+ }
+
/// Generate an action that adds device dependences (if any) to a host action.
/// If no device dependence actions exist, just return the host action \a
/// HostAction. If an error is found or if no builder requires the host action
@@ -3341,6 +3704,7 @@ public:
return HostAction;
assert(HostAction && "Invalid host action!");
+ recordHostAction(HostAction, InputArg);
OffloadAction::DeviceDependences DDeps;
// Check if all the programming models agree we should not emit the host
@@ -3353,7 +3717,6 @@ public:
++InactiveBuilders;
continue;
}
-
auto RetCode =
SB->getDeviceDependences(DDeps, CurPhase, FinalPhase, Phases);
@@ -3394,6 +3757,8 @@ public:
if (!IsValid)
return true;
+ recordHostAction(HostAction, InputArg);
+
// If we are supporting bundling/unbundling and the current action is an
// input action of non-source file, we replace the host action by the
// unbundling action. The bundler tool has the logic to detect if an input
@@ -3410,6 +3775,7 @@ public:
C.getSingleOffloadToolChain<Action::OFK_Host>(),
/*BoundArch=*/StringRef(), Action::OFK_Host);
HostAction = UnbundlingHostAction;
+ recordHostAction(HostAction, InputArg);
}
assert(HostAction && "Invalid host action!");
@@ -3420,7 +3786,7 @@ public:
if (!SB->isValid())
continue;
- auto RetCode = SB->addDeviceDepences(HostAction);
+ auto RetCode = SB->addDeviceDependences(HostAction);
// Host dependences for device actions are not compatible with that same
// action being ignored.
@@ -3446,6 +3812,9 @@ public:
/// programming models allow it.
bool appendTopLevelActions(ActionList &AL, Action *HostAction,
const Arg *InputArg) {
+ if (HostAction)
+ recordHostAction(HostAction, InputArg);
+
// Get the device actions to be appended.
ActionList OffloadAL;
for (auto *SB : SpecializedBuilders) {
@@ -3467,6 +3836,7 @@ public:
// before this method was called.
assert(HostAction == AL.back() && "Host action not in the list??");
HostAction = C.MakeAction<OffloadBundlingJobAction>(OffloadAL);
+ recordHostAction(HostAction, InputArg);
AL.back() = HostAction;
} else
AL.append(OffloadAL.begin(), OffloadAL.end());
@@ -3479,15 +3849,18 @@ public:
return false;
}
- Action* makeHostLinkAction() {
- // Build a list of device linking actions.
- ActionList DeviceAL;
+ void appendDeviceLinkActions(ActionList &AL) {
for (DeviceActionBuilder *SB : SpecializedBuilders) {
if (!SB->isValid())
continue;
- SB->appendLinkDeviceActions(DeviceAL);
+ SB->appendLinkDeviceActions(AL);
}
+ }
+ Action *makeHostLinkAction() {
+ // Build a list of device linking actions.
+ ActionList DeviceAL;
+ appendDeviceLinkActions(DeviceAL);
if (DeviceAL.empty())
return nullptr;
@@ -3497,6 +3870,11 @@ public:
if (!SB->isValid())
continue;
HA = SB->appendLinkHostActions(DeviceAL);
+ // This created host action has no originating input argument, therefore
+ // needs to set its offloading kind directly.
+ if (HA)
+ HA->propagateHostOffloadInfo(SB->getAssociatedOffloadKind(),
+ /*BoundArch=*/nullptr);
}
return HA;
}
@@ -3523,10 +3901,22 @@ public:
// If we don't have device dependencies, we don't have to create an offload
// action.
if (DDeps.getActions().empty()) {
- // Propagate all the active kinds to host action. Given that it is a link
- // action it is assumed to depend on all actions generated so far.
- HostAction->propagateHostOffloadInfo(ActiveOffloadKinds,
- /*BoundArch=*/nullptr);
+ // Set all the active offloading kinds to the link action. Given that it
+ // is a link action it is assumed to depend on all actions generated so
+ // far.
+ HostAction->setHostOffloadInfo(ActiveOffloadKinds,
+ /*BoundArch=*/nullptr);
+ // Propagate active offloading kinds for each input to the link action.
+ // Each input may have different active offloading kind.
+ for (auto *A : HostAction->inputs()) {
+ auto ArgLoc = HostActionToInputArgMap.find(A);
+ if (ArgLoc == HostActionToInputArgMap.end())
+ continue;
+ auto OFKLoc = InputArgToOffloadKindMap.find(ArgLoc->second);
+ if (OFKLoc == InputArgToOffloadKindMap.end())
+ continue;
+ A->propagateHostOffloadInfo(OFKLoc->second, /*BoundArch=*/nullptr);
+ }
return HostAction;
}
@@ -3564,12 +3954,34 @@ void Driver::handleArguments(Compilation &C, DerivedArgList &Args,
phases::ID FinalPhase = getFinalPhase(Args, &FinalPhaseArg);
if (FinalPhase == phases::Link) {
- if (Args.hasArg(options::OPT_emit_llvm))
+ if (Args.hasArgNoClaim(options::OPT_hipstdpar)) {
+ Args.AddFlagArg(nullptr, getOpts().getOption(options::OPT_hip_link));
+ Args.AddFlagArg(nullptr,
+ getOpts().getOption(options::OPT_frtlib_add_rpath));
+ }
+ // Emitting LLVM while linking disabled except in HIPAMD Toolchain
+ if (Args.hasArg(options::OPT_emit_llvm) && !Args.hasArg(options::OPT_hip_link))
Diag(clang::diag::err_drv_emit_llvm_link);
if (IsCLMode() && LTOMode != LTOK_None &&
!Args.getLastArgValue(options::OPT_fuse_ld_EQ)
.equals_insensitive("lld"))
Diag(clang::diag::err_drv_lto_without_lld);
+
+ // If -dumpdir is not specified, give a default prefix derived from the link
+ // output filename. For example, `clang -g -gsplit-dwarf a.c -o x` passes
+ // `-dumpdir x-` to cc1. If -o is unspecified, use
+ // stem(getDefaultImageName()) (usually stem("a.out") = "a").
+ if (!Args.hasArg(options::OPT_dumpdir)) {
+ Arg *FinalOutput = Args.getLastArg(options::OPT_o, options::OPT__SLASH_o);
+ Arg *Arg = Args.MakeSeparateArg(
+ nullptr, getOpts().getOption(options::OPT_dumpdir),
+ Args.MakeArgString(
+ (FinalOutput ? FinalOutput->getValue()
+ : llvm::sys::path::stem(getDefaultImageName())) +
+ "-"));
+ Arg->claim();
+ Args.append(Arg);
+ }
}
if (FinalPhase == phases::Preprocess || Args.hasArg(options::OPT__SLASH_Y_)) {
@@ -3665,11 +4077,6 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
return;
}
- // Reject -Z* at the top level, these options should never have been exposed
- // by gcc.
- if (Arg *A = Args.getLastArg(options::OPT_Z_Joined))
- Diag(clang::diag::err_drv_use_of_Z_option) << A->getAsString(Args);
-
// Diagnose misuse of /Fo.
if (Arg *A = Args.getLastArg(options::OPT__SLASH_Fo)) {
StringRef V = A->getValue();
@@ -3705,11 +4112,19 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
handleArguments(C, Args, Inputs, Actions);
+ bool UseNewOffloadingDriver =
+ C.isOffloadingHostKind(Action::OFK_OpenMP) ||
+ Args.hasFlag(options::OPT_offload_new_driver,
+ options::OPT_no_offload_new_driver, false);
+
// Builder to be used to build offloading actions.
- OffloadingActionBuilder OffloadBuilder(C, Args, Inputs);
+ std::unique_ptr<OffloadingActionBuilder> OffloadBuilder =
+ !UseNewOffloadingDriver
+ ? std::make_unique<OffloadingActionBuilder>(C, Args, Inputs)
+ : nullptr;
// Construct the actions to perform.
- HeaderModulePrecompileJobAction *HeaderModuleAction = nullptr;
+ ExtractAPIJobAction *ExtractAPIAction = nullptr;
ActionList LinkerInputs;
ActionList MergerInputs;
@@ -3728,21 +4143,28 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
// Use the current host action in any of the offloading actions, if
// required.
- if (OffloadBuilder.addHostDependenceToDeviceActions(Current, InputArg))
- break;
+ if (!UseNewOffloadingDriver)
+ if (OffloadBuilder->addHostDependenceToDeviceActions(Current, InputArg))
+ break;
for (phases::ID Phase : PL) {
// Add any offload action the host action depends on.
- Current = OffloadBuilder.addDeviceDependencesToHostAction(
- Current, InputArg, Phase, PL.back(), FullPL);
+ if (!UseNewOffloadingDriver)
+ Current = OffloadBuilder->addDeviceDependencesToHostAction(
+ Current, InputArg, Phase, PL.back(), FullPL);
if (!Current)
break;
// Queue linker inputs.
if (Phase == phases::Link) {
assert(Phase == PL.back() && "linking must be final compilation step.");
- LinkerInputs.push_back(Current);
+ // We don't need to generate additional link commands if emitting AMD
+ // bitcode or compiling only for the offload device
+ if (!(C.getInputArgs().hasArg(options::OPT_hip_link) &&
+ (C.getInputArgs().hasArg(options::OPT_emit_llvm))) &&
+ !offloadDeviceOnly())
+ LinkerInputs.push_back(Current);
Current = nullptr;
break;
}
@@ -3758,12 +4180,8 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
break;
}
- // Each precompiled header file after a module file action is a module
- // header of that same module file, rather than being compiled to a
- // separate PCH.
- if (Phase == phases::Precompile && HeaderModuleAction &&
- getPrecompiledType(InputType) == types::TY_PCH) {
- HeaderModuleAction->addModuleHeaderInput(Current);
+ if (Phase == phases::Precompile && ExtractAPIAction) {
+ ExtractAPIAction->addHeaderInput(Current);
Current = nullptr;
break;
}
@@ -3778,14 +4196,19 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
if (NewCurrent == Current)
continue;
- if (auto *HMA = dyn_cast<HeaderModulePrecompileJobAction>(NewCurrent))
- HeaderModuleAction = HMA;
+ if (auto *EAA = dyn_cast<ExtractAPIJobAction>(NewCurrent))
+ ExtractAPIAction = EAA;
Current = NewCurrent;
+ // Try to build the offloading actions and add the result as a dependency
+ // to the host.
+ if (UseNewOffloadingDriver)
+ Current = BuildOffloadingActions(C, Args, I, Current);
// Use the current host action in any of the offloading actions, if
// required.
- if (OffloadBuilder.addHostDependenceToDeviceActions(Current, InputArg))
+ else if (OffloadBuilder->addHostDependenceToDeviceActions(Current,
+ InputArg))
break;
if (Current->getType() == types::TY_Nothing)
@@ -3797,21 +4220,40 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
Actions.push_back(Current);
// Add any top level actions generated for offloading.
- OffloadBuilder.appendTopLevelActions(Actions, Current, InputArg);
+ if (!UseNewOffloadingDriver)
+ OffloadBuilder->appendTopLevelActions(Actions, Current, InputArg);
+ else if (Current)
+ Current->propagateHostOffloadInfo(C.getActiveOffloadKinds(),
+ /*BoundArch=*/nullptr);
}
// Add a link action if necessary.
+
+ if (LinkerInputs.empty()) {
+ Arg *FinalPhaseArg;
+ if (getFinalPhase(Args, &FinalPhaseArg) == phases::Link)
+ if (!UseNewOffloadingDriver)
+ OffloadBuilder->appendDeviceLinkActions(Actions);
+ }
+
if (!LinkerInputs.empty()) {
- if (Action *Wrapper = OffloadBuilder.makeHostLinkAction())
- LinkerInputs.push_back(Wrapper);
+ if (!UseNewOffloadingDriver)
+ if (Action *Wrapper = OffloadBuilder->makeHostLinkAction())
+ LinkerInputs.push_back(Wrapper);
Action *LA;
// Check if this Linker Job should emit a static library.
if (ShouldEmitStaticLibrary(Args)) {
LA = C.MakeAction<StaticLibJobAction>(LinkerInputs, types::TY_Image);
+ } else if (UseNewOffloadingDriver ||
+ Args.hasArg(options::OPT_offload_link)) {
+ LA = C.MakeAction<LinkerWrapperJobAction>(LinkerInputs, types::TY_Image);
+ LA->propagateHostOffloadInfo(C.getActiveOffloadKinds(),
+ /*BoundArch=*/nullptr);
} else {
LA = C.MakeAction<LinkJobAction>(LinkerInputs, types::TY_Image);
}
- LA = OffloadBuilder.processHostLinkAction(LA);
+ if (!UseNewOffloadingDriver)
+ LA = OffloadBuilder->processHostLinkAction(LA);
Actions.push_back(LA);
}
@@ -3823,7 +4265,7 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
if (Args.hasArg(options::OPT_emit_interface_stubs)) {
auto PhaseList = types::getCompilationPhases(
types::TY_IFS_CPP,
- Args.hasArg(options::OPT_c) ? phases::Compile : phases::LastPhase);
+ Args.hasArg(options::OPT_c) ? phases::Compile : phases::IfsMerge);
ActionList MergerInputs;
@@ -3876,25 +4318,358 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
C.MakeAction<IfsMergeJobAction>(MergerInputs, types::TY_Image));
}
- // If --print-supported-cpus, -mcpu=? or -mtune=? is specified, build a custom
- // Compile phase that prints out supported cpu models and quits.
- if (Arg *A = Args.getLastArg(options::OPT_print_supported_cpus)) {
- // Use the -mcpu=? flag as the dummy input to cc1.
- Actions.clear();
- Action *InputAc = C.MakeAction<InputAction>(*A, types::TY_C);
- Actions.push_back(
- C.MakeAction<PrecompileJobAction>(InputAc, types::TY_Nothing));
- for (auto &I : Inputs)
- I.second->claim();
+ for (auto Opt : {options::OPT_print_supported_cpus,
+ options::OPT_print_supported_extensions}) {
+ // If --print-supported-cpus, -mcpu=? or -mtune=? is specified, build a
+ // custom Compile phase that prints out supported cpu models and quits.
+ //
+ // If --print-supported-extensions is specified, call the helper function
+ // RISCVMarchHelp in RISCVISAInfo.cpp that prints out supported extensions
+ // and quits.
+ if (Arg *A = Args.getLastArg(Opt)) {
+ if (Opt == options::OPT_print_supported_extensions &&
+ !C.getDefaultToolChain().getTriple().isRISCV() &&
+ !C.getDefaultToolChain().getTriple().isAArch64() &&
+ !C.getDefaultToolChain().getTriple().isARM()) {
+ C.getDriver().Diag(diag::err_opt_not_valid_on_target)
+ << "--print-supported-extensions";
+ return;
+ }
+
+ // Use the -mcpu=? flag as the dummy input to cc1.
+ Actions.clear();
+ Action *InputAc = C.MakeAction<InputAction>(*A, types::TY_C);
+ Actions.push_back(
+ C.MakeAction<PrecompileJobAction>(InputAc, types::TY_Nothing));
+ for (auto &I : Inputs)
+ I.second->claim();
+ }
+ }
+
+ // Call validator for dxil when -Vd not in Args.
+ if (C.getDefaultToolChain().getTriple().isDXIL()) {
+ // Only add action when needValidation.
+ const auto &TC =
+ static_cast<const toolchains::HLSLToolChain &>(C.getDefaultToolChain());
+ if (TC.requiresValidation(Args)) {
+ Action *LastAction = Actions.back();
+ Actions.push_back(C.MakeAction<BinaryAnalyzeJobAction>(
+ LastAction, types::TY_DX_CONTAINER));
+ }
}
// Claim ignored clang-cl options.
Args.ClaimAllArgs(options::OPT_cl_ignored_Group);
+}
+
+/// Returns the canonical name for the offloading architecture when using a HIP
+/// or CUDA architecture.
+static StringRef getCanonicalArchString(Compilation &C,
+ const llvm::opt::DerivedArgList &Args,
+ StringRef ArchStr,
+ const llvm::Triple &Triple,
+ bool SuppressError = false) {
+ // Lookup the CUDA / HIP architecture string. Only report an error if we were
+ // expecting the triple to be only NVPTX / AMDGPU.
+ CudaArch Arch = StringToCudaArch(getProcessorFromTargetID(Triple, ArchStr));
+ if (!SuppressError && Triple.isNVPTX() &&
+ (Arch == CudaArch::UNKNOWN || !IsNVIDIAGpuArch(Arch))) {
+ C.getDriver().Diag(clang::diag::err_drv_offload_bad_gpu_arch)
+ << "CUDA" << ArchStr;
+ return StringRef();
+ } else if (!SuppressError && Triple.isAMDGPU() &&
+ (Arch == CudaArch::UNKNOWN || !IsAMDGpuArch(Arch))) {
+ C.getDriver().Diag(clang::diag::err_drv_offload_bad_gpu_arch)
+ << "HIP" << ArchStr;
+ return StringRef();
+ }
+
+ if (IsNVIDIAGpuArch(Arch))
+ return Args.MakeArgStringRef(CudaArchToString(Arch));
+
+ if (IsAMDGpuArch(Arch)) {
+ llvm::StringMap<bool> Features;
+ auto HIPTriple = getHIPOffloadTargetTriple(C.getDriver(), C.getInputArgs());
+ if (!HIPTriple)
+ return StringRef();
+ auto Arch = parseTargetID(*HIPTriple, ArchStr, &Features);
+ if (!Arch) {
+ C.getDriver().Diag(clang::diag::err_drv_bad_target_id) << ArchStr;
+ C.setContainsError();
+ return StringRef();
+ }
+ return Args.MakeArgStringRef(getCanonicalTargetID(*Arch, Features));
+ }
+
+ // If the input isn't CUDA or HIP just return the architecture.
+ return ArchStr;
+}
+
+/// Checks if the set offloading architectures does not conflict. Returns the
+/// incompatible pair if a conflict occurs.
+static std::optional<std::pair<llvm::StringRef, llvm::StringRef>>
+getConflictOffloadArchCombination(const llvm::DenseSet<StringRef> &Archs,
+ llvm::Triple Triple) {
+ if (!Triple.isAMDGPU())
+ return std::nullopt;
+
+ std::set<StringRef> ArchSet;
+ llvm::copy(Archs, std::inserter(ArchSet, ArchSet.begin()));
+ return getConflictTargetIDCombination(ArchSet);
+}
+
+llvm::DenseSet<StringRef>
+Driver::getOffloadArchs(Compilation &C, const llvm::opt::DerivedArgList &Args,
+ Action::OffloadKind Kind, const ToolChain *TC,
+ bool SuppressError) const {
+ if (!TC)
+ TC = &C.getDefaultToolChain();
+
+ // --offload and --offload-arch options are mutually exclusive.
+ if (Args.hasArgNoClaim(options::OPT_offload_EQ) &&
+ Args.hasArgNoClaim(options::OPT_offload_arch_EQ,
+ options::OPT_no_offload_arch_EQ)) {
+ C.getDriver().Diag(diag::err_opt_not_valid_with_opt)
+ << "--offload"
+ << (Args.hasArgNoClaim(options::OPT_offload_arch_EQ)
+ ? "--offload-arch"
+ : "--no-offload-arch");
+ }
+
+ if (KnownArchs.contains(TC))
+ return KnownArchs.lookup(TC);
+
+ llvm::DenseSet<StringRef> Archs;
+ for (auto *Arg : Args) {
+ // Extract any '--[no-]offload-arch' arguments intended for this toolchain.
+ std::unique_ptr<llvm::opt::Arg> ExtractedArg = nullptr;
+ if (Arg->getOption().matches(options::OPT_Xopenmp_target_EQ) &&
+ ToolChain::getOpenMPTriple(Arg->getValue(0)) == TC->getTriple()) {
+ Arg->claim();
+ unsigned Index = Args.getBaseArgs().MakeIndex(Arg->getValue(1));
+ ExtractedArg = getOpts().ParseOneArg(Args, Index);
+ Arg = ExtractedArg.get();
+ }
+
+ // Add or remove the seen architectures in order of appearance. If an
+ // invalid architecture is given we simply exit.
+ if (Arg->getOption().matches(options::OPT_offload_arch_EQ)) {
+ for (StringRef Arch : llvm::split(Arg->getValue(), ",")) {
+ if (Arch == "native" || Arch.empty()) {
+ auto GPUsOrErr = TC->getSystemGPUArchs(Args);
+ if (!GPUsOrErr) {
+ if (SuppressError)
+ llvm::consumeError(GPUsOrErr.takeError());
+ else
+ TC->getDriver().Diag(diag::err_drv_undetermined_gpu_arch)
+ << llvm::Triple::getArchTypeName(TC->getArch())
+ << llvm::toString(GPUsOrErr.takeError()) << "--offload-arch";
+ continue;
+ }
+
+ for (auto ArchStr : *GPUsOrErr) {
+ Archs.insert(
+ getCanonicalArchString(C, Args, Args.MakeArgString(ArchStr),
+ TC->getTriple(), SuppressError));
+ }
+ } else {
+ StringRef ArchStr = getCanonicalArchString(
+ C, Args, Arch, TC->getTriple(), SuppressError);
+ if (ArchStr.empty())
+ return Archs;
+ Archs.insert(ArchStr);
+ }
+ }
+ } else if (Arg->getOption().matches(options::OPT_no_offload_arch_EQ)) {
+ for (StringRef Arch : llvm::split(Arg->getValue(), ",")) {
+ if (Arch == "all") {
+ Archs.clear();
+ } else {
+ StringRef ArchStr = getCanonicalArchString(
+ C, Args, Arch, TC->getTriple(), SuppressError);
+ if (ArchStr.empty())
+ return Archs;
+ Archs.erase(ArchStr);
+ }
+ }
+ }
+ }
+
+ if (auto ConflictingArchs =
+ getConflictOffloadArchCombination(Archs, TC->getTriple())) {
+ C.getDriver().Diag(clang::diag::err_drv_bad_offload_arch_combo)
+ << ConflictingArchs->first << ConflictingArchs->second;
+ C.setContainsError();
+ }
+
+ // Skip filling defaults if we're just querying what is availible.
+ if (SuppressError)
+ return Archs;
+
+ if (Archs.empty()) {
+ if (Kind == Action::OFK_Cuda)
+ Archs.insert(CudaArchToString(CudaArch::CudaDefault));
+ else if (Kind == Action::OFK_HIP)
+ Archs.insert(CudaArchToString(CudaArch::HIPDefault));
+ else if (Kind == Action::OFK_OpenMP)
+ Archs.insert(StringRef());
+ } else {
+ Args.ClaimAllArgs(options::OPT_offload_arch_EQ);
+ Args.ClaimAllArgs(options::OPT_no_offload_arch_EQ);
+ }
- // Claim --cuda-host-only and --cuda-compile-host-device, which may be passed
- // to non-CUDA compilations and should not trigger warnings there.
- Args.ClaimAllArgs(options::OPT_cuda_host_only);
- Args.ClaimAllArgs(options::OPT_cuda_compile_host_device);
+ return Archs;
+}
+
+Action *Driver::BuildOffloadingActions(Compilation &C,
+ llvm::opt::DerivedArgList &Args,
+ const InputTy &Input,
+ Action *HostAction) const {
+ // Don't build offloading actions if explicitly disabled or we do not have a
+ // valid source input and compile action to embed it in. If preprocessing only
+ // ignore embedding.
+ if (offloadHostOnly() || !types::isSrcFile(Input.first) ||
+ !(isa<CompileJobAction>(HostAction) ||
+ getFinalPhase(Args) == phases::Preprocess))
+ return HostAction;
+
+ ActionList OffloadActions;
+ OffloadAction::DeviceDependences DDeps;
+
+ const Action::OffloadKind OffloadKinds[] = {
+ Action::OFK_OpenMP, Action::OFK_Cuda, Action::OFK_HIP};
+
+ for (Action::OffloadKind Kind : OffloadKinds) {
+ SmallVector<const ToolChain *, 2> ToolChains;
+ ActionList DeviceActions;
+
+ auto TCRange = C.getOffloadToolChains(Kind);
+ for (auto TI = TCRange.first, TE = TCRange.second; TI != TE; ++TI)
+ ToolChains.push_back(TI->second);
+
+ if (ToolChains.empty())
+ continue;
+
+ types::ID InputType = Input.first;
+ const Arg *InputArg = Input.second;
+
+ // The toolchain can be active for unsupported file types.
+ if ((Kind == Action::OFK_Cuda && !types::isCuda(InputType)) ||
+ (Kind == Action::OFK_HIP && !types::isHIP(InputType)))
+ continue;
+
+ // Get the product of all bound architectures and toolchains.
+ SmallVector<std::pair<const ToolChain *, StringRef>> TCAndArchs;
+ for (const ToolChain *TC : ToolChains)
+ for (StringRef Arch : getOffloadArchs(C, Args, Kind, TC))
+ TCAndArchs.push_back(std::make_pair(TC, Arch));
+
+ for (unsigned I = 0, E = TCAndArchs.size(); I != E; ++I)
+ DeviceActions.push_back(C.MakeAction<InputAction>(*InputArg, InputType));
+
+ if (DeviceActions.empty())
+ return HostAction;
+
+ auto PL = types::getCompilationPhases(*this, Args, InputType);
+
+ for (phases::ID Phase : PL) {
+ if (Phase == phases::Link) {
+ assert(Phase == PL.back() && "linking must be final compilation step.");
+ break;
+ }
+
+ auto TCAndArch = TCAndArchs.begin();
+ for (Action *&A : DeviceActions) {
+ if (A->getType() == types::TY_Nothing)
+ continue;
+
+ // Propagate the ToolChain so we can use it in ConstructPhaseAction.
+ A->propagateDeviceOffloadInfo(Kind, TCAndArch->second.data(),
+ TCAndArch->first);
+ A = ConstructPhaseAction(C, Args, Phase, A, Kind);
+
+ if (isa<CompileJobAction>(A) && isa<CompileJobAction>(HostAction) &&
+ Kind == Action::OFK_OpenMP &&
+ HostAction->getType() != types::TY_Nothing) {
+ // OpenMP offloading has a dependency on the host compile action to
+ // identify which declarations need to be emitted. This shouldn't be
+ // collapsed with any other actions so we can use it in the device.
+ HostAction->setCannotBeCollapsedWithNextDependentAction();
+ OffloadAction::HostDependence HDep(
+ *HostAction, *C.getSingleOffloadToolChain<Action::OFK_Host>(),
+ TCAndArch->second.data(), Kind);
+ OffloadAction::DeviceDependences DDep;
+ DDep.add(*A, *TCAndArch->first, TCAndArch->second.data(), Kind);
+ A = C.MakeAction<OffloadAction>(HDep, DDep);
+ }
+
+ ++TCAndArch;
+ }
+ }
+
+ // Compiling HIP in non-RDC mode requires linking each action individually.
+ for (Action *&A : DeviceActions) {
+ if ((A->getType() != types::TY_Object &&
+ A->getType() != types::TY_LTO_BC) ||
+ Kind != Action::OFK_HIP ||
+ Args.hasFlag(options::OPT_fgpu_rdc, options::OPT_fno_gpu_rdc, false))
+ continue;
+ ActionList LinkerInput = {A};
+ A = C.MakeAction<LinkJobAction>(LinkerInput, types::TY_Image);
+ }
+
+ auto TCAndArch = TCAndArchs.begin();
+ for (Action *A : DeviceActions) {
+ DDeps.add(*A, *TCAndArch->first, TCAndArch->second.data(), Kind);
+ OffloadAction::DeviceDependences DDep;
+ DDep.add(*A, *TCAndArch->first, TCAndArch->second.data(), Kind);
+ OffloadActions.push_back(C.MakeAction<OffloadAction>(DDep, A->getType()));
+ ++TCAndArch;
+ }
+ }
+
+ if (offloadDeviceOnly())
+ return C.MakeAction<OffloadAction>(DDeps, types::TY_Nothing);
+
+ if (OffloadActions.empty())
+ return HostAction;
+
+ OffloadAction::DeviceDependences DDep;
+ if (C.isOffloadingHostKind(Action::OFK_Cuda) &&
+ !Args.hasFlag(options::OPT_fgpu_rdc, options::OPT_fno_gpu_rdc, false)) {
+ // If we are not in RDC-mode we just emit the final CUDA fatbinary for
+ // each translation unit without requiring any linking.
+ Action *FatbinAction =
+ C.MakeAction<LinkJobAction>(OffloadActions, types::TY_CUDA_FATBIN);
+ DDep.add(*FatbinAction, *C.getSingleOffloadToolChain<Action::OFK_Cuda>(),
+ nullptr, Action::OFK_Cuda);
+ } else if (C.isOffloadingHostKind(Action::OFK_HIP) &&
+ !Args.hasFlag(options::OPT_fgpu_rdc, options::OPT_fno_gpu_rdc,
+ false)) {
+ // If we are not in RDC-mode we just emit the final HIP fatbinary for each
+ // translation unit, linking each input individually.
+ Action *FatbinAction =
+ C.MakeAction<LinkJobAction>(OffloadActions, types::TY_HIP_FATBIN);
+ DDep.add(*FatbinAction, *C.getSingleOffloadToolChain<Action::OFK_HIP>(),
+ nullptr, Action::OFK_HIP);
+ } else {
+ // Package all the offloading actions into a single output that can be
+ // embedded in the host and linked.
+ Action *PackagerAction =
+ C.MakeAction<OffloadPackagerJobAction>(OffloadActions, types::TY_Image);
+ DDep.add(*PackagerAction, *C.getSingleOffloadToolChain<Action::OFK_Host>(),
+ nullptr, C.getActiveOffloadKinds());
+ }
+
+ // If we are unable to embed a single device output into the host, we need to
+ // add each device output as a host dependency to ensure they are still built.
+ bool SingleDeviceOutput = !llvm::any_of(OffloadActions, [](Action *A) {
+ return A->getType() == types::TY_Nothing;
+ }) && isa<CompileJobAction>(HostAction);
+ OffloadAction::HostDependence HDep(
+ *HostAction, *C.getSingleOffloadToolChain<Action::OFK_Host>(),
+ /*BoundArch=*/nullptr, SingleDeviceOutput ? DDep : DDeps);
+ return C.MakeAction<OffloadAction>(HDep, SingleDeviceOutput ? DDep : DDeps);
}
Action *Driver::ConstructPhaseAction(
@@ -3923,10 +4698,14 @@ Action *Driver::ConstructPhaseAction(
OutputTy = types::TY_Dependencies;
} else {
OutputTy = Input->getType();
+ // For these cases, the preprocessor is only translating forms, the Output
+ // still needs preprocessing.
if (!Args.hasFlag(options::OPT_frewrite_includes,
options::OPT_fno_rewrite_includes, false) &&
!Args.hasFlag(options::OPT_frewrite_imports,
options::OPT_fno_rewrite_imports, false) &&
+ !Args.hasFlag(options::OPT_fdirectives_only,
+ options::OPT_fno_directives_only, false) &&
!CCGenDiagnostics)
OutputTy = types::getPreprocessedType(OutputTy);
assert(OutputTy != types::TY_INVALID &&
@@ -3935,6 +4714,10 @@ Action *Driver::ConstructPhaseAction(
return C.MakeAction<PreprocessJobAction>(Input, OutputTy);
}
case phases::Precompile: {
+ // API extraction should not generate an actual precompilation action.
+ if (Args.hasArg(options::OPT_extract_api))
+ return C.MakeAction<ExtractAPIJobAction>(Input, types::TY_API_INFO);
+
types::ID OutputTy = getPrecompiledType(Input->getType());
assert(OutputTy != types::TY_INVALID &&
"Cannot precompile this input type!");
@@ -3954,9 +4737,6 @@ Action *Driver::ConstructPhaseAction(
OutputTy = types::TY_Nothing;
}
- if (ModName)
- return C.MakeAction<HeaderModulePrecompileJobAction>(Input, OutputTy,
- ModName);
return C.MakeAction<PrecompileJobAction>(Input, OutputTy);
}
case phases::Compile: {
@@ -3977,20 +4757,44 @@ Action *Driver::ConstructPhaseAction(
return C.MakeAction<CompileJobAction>(Input, types::TY_ModuleFile);
if (Args.hasArg(options::OPT_verify_pch))
return C.MakeAction<VerifyPCHJobAction>(Input, types::TY_Nothing);
+ if (Args.hasArg(options::OPT_extract_api))
+ return C.MakeAction<ExtractAPIJobAction>(Input, types::TY_API_INFO);
return C.MakeAction<CompileJobAction>(Input, types::TY_LLVM_BC);
}
case phases::Backend: {
if (isUsingLTO() && TargetDeviceOffloadKind == Action::OFK_None) {
+ types::ID Output;
+ if (Args.hasArg(options::OPT_ffat_lto_objects) &&
+ !Args.hasArg(options::OPT_emit_llvm))
+ Output = types::TY_PP_Asm;
+ else if (Args.hasArg(options::OPT_S))
+ Output = types::TY_LTO_IR;
+ else
+ Output = types::TY_LTO_BC;
+ return C.MakeAction<BackendJobAction>(Input, Output);
+ }
+ if (isUsingLTO(/* IsOffload */ true) &&
+ TargetDeviceOffloadKind != Action::OFK_None) {
types::ID Output =
Args.hasArg(options::OPT_S) ? types::TY_LTO_IR : types::TY_LTO_BC;
return C.MakeAction<BackendJobAction>(Input, Output);
}
if (Args.hasArg(options::OPT_emit_llvm) ||
- (TargetDeviceOffloadKind == Action::OFK_HIP &&
- Args.hasFlag(options::OPT_fgpu_rdc, options::OPT_fno_gpu_rdc,
- false))) {
+ (((Input->getOffloadingToolChain() &&
+ Input->getOffloadingToolChain()->getTriple().isAMDGPU()) ||
+ TargetDeviceOffloadKind == Action::OFK_HIP) &&
+ (Args.hasFlag(options::OPT_fgpu_rdc, options::OPT_fno_gpu_rdc,
+ false) ||
+ TargetDeviceOffloadKind == Action::OFK_OpenMP))) {
types::ID Output =
- Args.hasArg(options::OPT_S) ? types::TY_LLVM_IR : types::TY_LLVM_BC;
+ Args.hasArg(options::OPT_S) &&
+ (TargetDeviceOffloadKind == Action::OFK_None ||
+ offloadDeviceOnly() ||
+ (TargetDeviceOffloadKind == Action::OFK_HIP &&
+ !Args.hasFlag(options::OPT_offload_new_driver,
+ options::OPT_no_offload_new_driver, false)))
+ ? types::TY_LLVM_IR
+ : types::TY_LLVM_BC;
return C.MakeAction<BackendJobAction>(Input, Output);
}
return C.MakeAction<BackendJobAction>(Input, types::TY_PP_Asm);
@@ -4020,11 +4824,16 @@ void Driver::BuildJobs(Compilation &C) const {
// we are also generating .o files. So we allow more than one output file in
// this case as well.
//
+ // OffloadClass of type TY_Nothing: device-only output will place many outputs
+ // into a single offloading action. We should count all inputs to the action
+ // as outputs. Also ignore device-only outputs if we're compiling with
+ // -fsyntax-only.
if (FinalOutput) {
unsigned NumOutputs = 0;
unsigned NumIfsOutputs = 0;
- for (const Action *A : C.getActions())
+ for (const Action *A : C.getActions()) {
if (A->getType() != types::TY_Nothing &&
+ A->getType() != types::TY_DX_CONTAINER &&
!(A->getKind() == Action::IfsMergeJobClass ||
(A->getType() == clang::driver::types::TY_IFS_CPP &&
A->getKind() == clang::driver::Action::CompileJobClass &&
@@ -4032,6 +4841,11 @@ void Driver::BuildJobs(Compilation &C) const {
(A->getKind() == Action::BindArchClass && A->getInputs().size() &&
A->getInputs().front()->getKind() == Action::IfsMergeJobClass)))
++NumOutputs;
+ else if (A->getKind() == Action::OffloadClass &&
+ A->getType() == types::TY_Nothing &&
+ !C.getArgs().hasArg(options::OPT_fsyntax_only))
+ NumOutputs += A->size();
+ }
if (NumOutputs > 1) {
Diag(clang::diag::err_drv_output_argument_with_multiple_files);
@@ -4040,13 +4854,6 @@ void Driver::BuildJobs(Compilation &C) const {
}
const llvm::Triple &RawTriple = C.getDefaultToolChain().getTriple();
- if (RawTriple.isOSAIX()) {
- if (Arg *A = C.getArgs().getLastArg(options::OPT_G))
- Diag(diag::err_drv_unsupported_opt_for_target)
- << A->getSpelling() << RawTriple.str();
- if (LTOMode == LTOK_Thin)
- Diag(diag::err_drv_clang_unsupported) << "thinLTO on AIX";
- }
// Collect the list of architectures.
llvm::StringSet<> ArchNames;
@@ -4056,7 +4863,7 @@ void Driver::BuildJobs(Compilation &C) const {
ArchNames.insert(A->getValue());
// Set of (Action, canonical ToolChain triple) pairs we've built jobs for.
- std::map<std::pair<const Action *, std::string>, InputInfo> CachedResults;
+ std::map<std::pair<const Action *, std::string>, InputInfoList> CachedResults;
for (Action *A : C.getActions()) {
// If we are linking an image for multiple archs then the linker wants
// -arch_multiple and -final_output <final image name>. Unfortunately, this
@@ -4088,7 +4895,7 @@ void Driver::BuildJobs(Compilation &C) const {
if (CCPrintProcessStats) {
C.setPostCallback([=](const Command &Cmd, int Res) {
- Optional<llvm::sys::ProcessStatistics> ProcStat =
+ std::optional<llvm::sys::ProcessStatistics> ProcStat =
Cmd.getProcessStatistics();
if (!ProcStat)
return;
@@ -4124,7 +4931,7 @@ void Driver::BuildJobs(Compilation &C) const {
<< '\n';
Out.flush();
std::error_code EC;
- llvm::raw_fd_ostream OS(CCPrintStatReportFilename.c_str(), EC,
+ llvm::raw_fd_ostream OS(CCPrintStatReportFilename, EC,
llvm::sys::fs::OF_Append |
llvm::sys::fs::OF_Text);
if (EC)
@@ -4148,6 +4955,8 @@ void Driver::BuildJobs(Compilation &C) const {
C.getArgs().hasArg(options::OPT_Qunused_arguments))
return;
+ // Claim -fdriver-only here.
+ (void)C.getArgs().hasArg(options::OPT_fdriver_only);
// Claim -### here.
(void)C.getArgs().hasArg(options::OPT__HASH_HASH_HASH);
@@ -4155,6 +4964,12 @@ void Driver::BuildJobs(Compilation &C) const {
(void)C.getArgs().hasArg(options::OPT_driver_mode);
(void)C.getArgs().hasArg(options::OPT_rsp_quoting);
+ bool HasAssembleJob = llvm::any_of(C.getJobs(), [](auto &J) {
+ // Match ClangAs and other derived assemblers of Tool. ClangAs uses a
+ // longer ShortName "clang integrated assembler" while other assemblers just
+ // use "assembler".
+ return strstr(J.getCreator().getShortName(), "assembler");
+ });
for (Arg *A : C.getArgs()) {
// FIXME: It would be nice to be able to send the argument to the
// DiagnosticsEngine, so that extra values, position, and so on could be
@@ -4182,9 +4997,21 @@ void Driver::BuildJobs(Compilation &C) const {
// In clang-cl, don't mention unknown arguments here since they have
// already been warned about.
- if (!IsCLMode() || !A->getOption().matches(options::OPT_UNKNOWN))
- Diag(clang::diag::warn_drv_unused_argument)
- << A->getAsString(C.getArgs());
+ if (!IsCLMode() || !A->getOption().matches(options::OPT_UNKNOWN)) {
+ if (A->getOption().hasFlag(options::TargetSpecific) &&
+ !A->isIgnoredTargetSpecific() && !HasAssembleJob &&
+ // When for example -### or -v is used
+ // without a file, target specific options are not
+ // consumed/validated.
+ // Instead emitting an error emit a warning instead.
+ !C.getActions().empty()) {
+ Diag(diag::err_drv_unsupported_opt_for_target)
+ << A->getSpelling() << getTargetTriple();
+ } else {
+ Diag(clang::diag::warn_drv_unused_argument)
+ << A->getAsString(C.getArgs());
+ }
+ }
}
}
}
@@ -4259,7 +5086,8 @@ class ToolSelector final {
return TC.useIntegratedAs() && !SaveTemps &&
!C.getArgs().hasArg(options::OPT_via_file_asm) &&
!C.getArgs().hasArg(options::OPT__SLASH_FA) &&
- !C.getArgs().hasArg(options::OPT__SLASH_Fa);
+ !C.getArgs().hasArg(options::OPT__SLASH_Fa) &&
+ !C.getArgs().hasArg(options::OPT_dxc_Fc);
}
/// Return true if a preprocessor action can be collapsed.
@@ -4316,6 +5144,12 @@ class ToolSelector final {
if (!T)
return nullptr;
+ // Can't collapse if we don't have codegen support unless we are
+ // emitting LLVM IR.
+ bool OutputIsLLVM = types::isLLVMIR(ActionInfo[0].JA->getType());
+ if (!T->hasIntegratedBackend() && !(OutputIsLLVM && T->canEmitIR()))
+ return nullptr;
+
// When using -fembed-bitcode, it is required to have the same tool (clang)
// for both CompilerJA and BackendJA. Otherwise, combine two stages.
if (EmbedBitcode) {
@@ -4385,6 +5219,12 @@ class ToolSelector final {
if (!T)
return nullptr;
+ // Can't collapse if we don't have codegen support unless we are
+ // emitting LLVM IR.
+ bool OutputIsLLVM = types::isLLVMIR(ActionInfo[0].JA->getType());
+ if (!T->hasIntegratedBackend() && !(OutputIsLLVM && T->canEmitIR()))
+ return nullptr;
+
if (T->canEmitIR() && ((SaveTemps && !InputIsBitcode) || EmbedBitcode))
return nullptr;
@@ -4501,10 +5341,11 @@ static std::string GetTriplePlusArchString(const ToolChain *TC,
return TriplePlusArch;
}
-InputInfo Driver::BuildJobsForAction(
+InputInfoList Driver::BuildJobsForAction(
Compilation &C, const Action *A, const ToolChain *TC, StringRef BoundArch,
bool AtTopLevel, bool MultipleArchs, const char *LinkingOutput,
- std::map<std::pair<const Action *, std::string>, InputInfo> &CachedResults,
+ std::map<std::pair<const Action *, std::string>, InputInfoList>
+ &CachedResults,
Action::OffloadKind TargetDeviceOffloadKind) const {
std::pair<const Action *, std::string> ActionTC = {
A, GetTriplePlusArchString(TC, BoundArch, TargetDeviceOffloadKind)};
@@ -4512,17 +5353,49 @@ InputInfo Driver::BuildJobsForAction(
if (CachedResult != CachedResults.end()) {
return CachedResult->second;
}
- InputInfo Result = BuildJobsForActionNoCache(
+ InputInfoList Result = BuildJobsForActionNoCache(
C, A, TC, BoundArch, AtTopLevel, MultipleArchs, LinkingOutput,
CachedResults, TargetDeviceOffloadKind);
CachedResults[ActionTC] = Result;
return Result;
}
-InputInfo Driver::BuildJobsForActionNoCache(
+static void handleTimeTrace(Compilation &C, const ArgList &Args,
+ const JobAction *JA, const char *BaseInput,
+ const InputInfo &Result) {
+ Arg *A =
+ Args.getLastArg(options::OPT_ftime_trace, options::OPT_ftime_trace_EQ);
+ if (!A)
+ return;
+ SmallString<128> Path;
+ if (A->getOption().matches(options::OPT_ftime_trace_EQ)) {
+ Path = A->getValue();
+ if (llvm::sys::fs::is_directory(Path)) {
+ SmallString<128> Tmp(Result.getFilename());
+ llvm::sys::path::replace_extension(Tmp, "json");
+ llvm::sys::path::append(Path, llvm::sys::path::filename(Tmp));
+ }
+ } else {
+ if (Arg *DumpDir = Args.getLastArgNoClaim(options::OPT_dumpdir)) {
+ // The trace file is ${dumpdir}${basename}.json. Note that dumpdir may not
+ // end with a path separator.
+ Path = DumpDir->getValue();
+ Path += llvm::sys::path::filename(BaseInput);
+ } else {
+ Path = Result.getFilename();
+ }
+ llvm::sys::path::replace_extension(Path, "json");
+ }
+ const char *ResultFile = C.getArgs().MakeArgString(Path);
+ C.addTimeTraceFile(ResultFile, JA);
+ C.addResultFile(ResultFile, JA);
+}
+
+InputInfoList Driver::BuildJobsForActionNoCache(
Compilation &C, const Action *A, const ToolChain *TC, StringRef BoundArch,
bool AtTopLevel, bool MultipleArchs, const char *LinkingOutput,
- std::map<std::pair<const Action *, std::string>, InputInfo> &CachedResults,
+ std::map<std::pair<const Action *, std::string>, InputInfoList>
+ &CachedResults,
Action::OffloadKind TargetDeviceOffloadKind) const {
llvm::PrettyStackTraceString CrashInfo("Building compilation jobs");
@@ -4553,20 +5426,21 @@ InputInfo Driver::BuildJobsForActionNoCache(
// \
// Device Action 1 ---> OffloadAction -> Device Action 2
//
- // For a) and b), we just return the job generated for the dependence. For
+ // For a) and b), we just return the job generated for the dependences. For
// c) and d) we override the current action with the host/device dependence
// if the current toolchain is host/device and set the offload dependences
// info with the jobs obtained from the device/host dependence(s).
- // If there is a single device option, just generate the job for it.
- if (OA->hasSingleDeviceDependence()) {
- InputInfo DevA;
+ // If there is a single device option or has no host action, just generate
+ // the job for it.
+ if (OA->hasSingleDeviceDependence() || !OA->hasHostDependence()) {
+ InputInfoList DevA;
OA->doOnEachDeviceDependence([&](Action *DepA, const ToolChain *DepTC,
const char *DepBoundArch) {
- DevA =
- BuildJobsForAction(C, DepA, DepTC, DepBoundArch, AtTopLevel,
- /*MultipleArchs*/ !!DepBoundArch, LinkingOutput,
- CachedResults, DepA->getOffloadingDeviceKind());
+ DevA.append(BuildJobsForAction(C, DepA, DepTC, DepBoundArch, AtTopLevel,
+ /*MultipleArchs*/ !!DepBoundArch,
+ LinkingOutput, CachedResults,
+ DepA->getOffloadingDeviceKind()));
});
return DevA;
}
@@ -4578,7 +5452,7 @@ InputInfo Driver::BuildJobsForActionNoCache(
OA->doOnEachDependence(
/*IsHostDependence=*/BuildingForOffloadDevice,
[&](Action *DepA, const ToolChain *DepTC, const char *DepBoundArch) {
- OffloadDependencesInputInfo.push_back(BuildJobsForAction(
+ OffloadDependencesInputInfo.append(BuildJobsForAction(
C, DepA, DepTC, DepBoundArch, /*AtTopLevel=*/false,
/*MultipleArchs*/ !!DepBoundArch, LinkingOutput, CachedResults,
DepA->getOffloadingDeviceKind()));
@@ -4587,6 +5461,17 @@ InputInfo Driver::BuildJobsForActionNoCache(
A = BuildingForOffloadDevice
? OA->getSingleDeviceDependence(/*DoNotConsiderHostActions=*/true)
: OA->getHostDependence();
+
+ // We may have already built this action as a part of the offloading
+ // toolchain, return the cached input if so.
+ std::pair<const Action *, std::string> ActionTC = {
+ OA->getHostDependence(),
+ GetTriplePlusArchString(TC, BoundArch, TargetDeviceOffloadKind)};
+ if (CachedResults.find(ActionTC) != CachedResults.end()) {
+ InputInfoList Inputs = CachedResults[ActionTC];
+ Inputs.append(OffloadDependencesInputInfo);
+ return Inputs;
+ }
}
if (const InputAction *IA = dyn_cast<InputAction>(A)) {
@@ -4596,9 +5481,9 @@ InputInfo Driver::BuildJobsForActionNoCache(
Input.claim();
if (Input.getOption().matches(options::OPT_INPUT)) {
const char *Name = Input.getValue();
- return InputInfo(A, Name, /* _BaseInput = */ Name);
+ return {InputInfo(A, Name, /* _BaseInput = */ Name)};
}
- return InputInfo(A, &Input, /* _BaseInput = */ "");
+ return {InputInfo(A, &Input, /* _BaseInput = */ "")};
}
if (const BindArchAction *BAA = dyn_cast<BindArchAction>(A)) {
@@ -4628,26 +5513,7 @@ InputInfo Driver::BuildJobsForActionNoCache(
const Tool *T = TS.getTool(Inputs, CollapsedOffloadActions);
if (!T)
- return InputInfo();
-
- if (BuildingForOffloadDevice &&
- A->getOffloadingDeviceKind() == Action::OFK_OpenMP) {
- if (TC->getTriple().isAMDGCN()) {
- // AMDGCN treats backend and assemble actions as no-op because
- // linker does not support object files.
- if (const BackendJobAction *BA = dyn_cast<BackendJobAction>(A)) {
- return BuildJobsForAction(C, *BA->input_begin(), TC, BoundArch,
- AtTopLevel, MultipleArchs, LinkingOutput,
- CachedResults, TargetDeviceOffloadKind);
- }
-
- if (const AssembleJobAction *AA = dyn_cast<AssembleJobAction>(A)) {
- return BuildJobsForAction(C, *AA->input_begin(), TC, BoundArch,
- AtTopLevel, MultipleArchs, LinkingOutput,
- CachedResults, TargetDeviceOffloadKind);
- }
- }
- }
+ return {InputInfo()};
// If we've collapsed action list that contained OffloadAction we
// need to build jobs for host/device-side inputs it may have held.
@@ -4655,7 +5521,7 @@ InputInfo Driver::BuildJobsForActionNoCache(
cast<OffloadAction>(OA)->doOnEachDependence(
/*IsHostDependence=*/BuildingForOffloadDevice,
[&](Action *DepA, const ToolChain *DepTC, const char *DepBoundArch) {
- OffloadDependencesInputInfo.push_back(BuildJobsForAction(
+ OffloadDependencesInputInfo.append(BuildJobsForAction(
C, DepA, DepTC, DepBoundArch, /* AtTopLevel */ false,
/*MultipleArchs=*/!!DepBoundArch, LinkingOutput, CachedResults,
DepA->getOffloadingDeviceKind()));
@@ -4669,23 +5535,25 @@ InputInfo Driver::BuildJobsForActionNoCache(
// FIXME: Clean this up.
bool SubJobAtTopLevel =
AtTopLevel && (isa<DsymutilJobAction>(A) || isa<VerifyJobAction>(A));
- InputInfos.push_back(BuildJobsForAction(
+ InputInfos.append(BuildJobsForAction(
C, Input, TC, BoundArch, SubJobAtTopLevel, MultipleArchs, LinkingOutput,
CachedResults, A->getOffloadingDeviceKind()));
}
- // Always use the first input as the base input.
+ // Always use the first file input as the base input.
const char *BaseInput = InputInfos[0].getBaseInput();
+ for (auto &Info : InputInfos) {
+ if (Info.isFilename()) {
+ BaseInput = Info.getBaseInput();
+ break;
+ }
+ }
// ... except dsymutil actions, which use their actual input as the base
// input.
if (JA->getType() == types::TY_dSYM)
BaseInput = InputInfos[0].getFilename();
- // ... and in header module compilations, which use the module name.
- if (auto *ModuleJA = dyn_cast<HeaderModulePrecompileJobAction>(JA))
- BaseInput = ModuleJA->getModuleName();
-
// Append outputs of offload device jobs to the input list
if (!OffloadDependencesInputInfo.empty())
InputInfos.append(OffloadDependencesInputInfo.begin(),
@@ -4747,8 +5615,8 @@ InputInfo Driver::BuildJobsForActionNoCache(
Arch = BoundArch;
CachedResults[{A, GetTriplePlusArchString(UI.DependentToolChain, Arch,
- UI.DependentOffloadKind)}] =
- CurI;
+ UI.DependentOffloadKind)}] = {
+ CurI};
}
// Now that we have all the results generated, select the one that should be
@@ -4757,28 +5625,23 @@ InputInfo Driver::BuildJobsForActionNoCache(
A, GetTriplePlusArchString(TC, BoundArch, TargetDeviceOffloadKind)};
assert(CachedResults.find(ActionTC) != CachedResults.end() &&
"Result does not exist??");
- Result = CachedResults[ActionTC];
+ Result = CachedResults[ActionTC].front();
} else if (JA->getType() == types::TY_Nothing)
- Result = InputInfo(A, BaseInput);
+ Result = {InputInfo(A, BaseInput)};
else {
// We only have to generate a prefix for the host if this is not a top-level
// action.
std::string OffloadingPrefix = Action::GetOffloadingFileNamePrefix(
A->getOffloadingDeviceKind(), TC->getTriple().normalize(),
- /*CreatePrefixForHost=*/!!A->getOffloadingHostActiveKinds() &&
- !AtTopLevel);
- if (isa<OffloadWrapperJobAction>(JA)) {
- if (Arg *FinalOutput = C.getArgs().getLastArg(options::OPT_o))
- BaseInput = FinalOutput->getValue();
- else
- BaseInput = getDefaultImageName();
- BaseInput =
- C.getArgs().MakeArgString(std::string(BaseInput) + "-wrapper");
- }
+ /*CreatePrefixForHost=*/isa<OffloadPackagerJobAction>(A) ||
+ !(A->getOffloadingHostActiveKinds() == Action::OFK_None ||
+ AtTopLevel));
Result = InputInfo(A, GetNamedOutputPath(C, *JA, BaseInput, BoundArch,
AtTopLevel, MultipleArchs,
OffloadingPrefix),
BaseInput);
+ if (T->canEmitIR() && OffloadingPrefix.empty())
+ handleTimeTrace(C, Args, JA, BaseInput, Result);
}
if (CCCPrintBindings && !CCGenDiagnostics) {
@@ -4812,7 +5675,7 @@ InputInfo Driver::BuildJobsForActionNoCache(
C.getArgsForToolChain(TC, BoundArch, JA->getOffloadingDeviceKind()),
LinkingOutput);
}
- return Result;
+ return {Result};
}
const char *Driver::getDefaultImageName() const {
@@ -4864,17 +5727,89 @@ static bool HasPreprocessOutput(const Action &JA) {
return false;
}
+const char *Driver::CreateTempFile(Compilation &C, StringRef Prefix,
+ StringRef Suffix, bool MultipleArchs,
+ StringRef BoundArch,
+ bool NeedUniqueDirectory) const {
+ SmallString<128> TmpName;
+ Arg *A = C.getArgs().getLastArg(options::OPT_fcrash_diagnostics_dir);
+ std::optional<std::string> CrashDirectory =
+ CCGenDiagnostics && A
+ ? std::string(A->getValue())
+ : llvm::sys::Process::GetEnv("CLANG_CRASH_DIAGNOSTICS_DIR");
+ if (CrashDirectory) {
+ if (!getVFS().exists(*CrashDirectory))
+ llvm::sys::fs::create_directories(*CrashDirectory);
+ SmallString<128> Path(*CrashDirectory);
+ llvm::sys::path::append(Path, Prefix);
+ const char *Middle = !Suffix.empty() ? "-%%%%%%." : "-%%%%%%";
+ if (std::error_code EC =
+ llvm::sys::fs::createUniqueFile(Path + Middle + Suffix, TmpName)) {
+ Diag(clang::diag::err_unable_to_make_temp) << EC.message();
+ return "";
+ }
+ } else {
+ if (MultipleArchs && !BoundArch.empty()) {
+ if (NeedUniqueDirectory) {
+ TmpName = GetTemporaryDirectory(Prefix);
+ llvm::sys::path::append(TmpName,
+ Twine(Prefix) + "-" + BoundArch + "." + Suffix);
+ } else {
+ TmpName =
+ GetTemporaryPath((Twine(Prefix) + "-" + BoundArch).str(), Suffix);
+ }
+
+ } else {
+ TmpName = GetTemporaryPath(Prefix, Suffix);
+ }
+ }
+ return C.addTempFile(C.getArgs().MakeArgString(TmpName));
+}
+
+// Calculate the output path of the module file when compiling a module unit
+// with the `-fmodule-output` option or `-fmodule-output=` option specified.
+// The behavior is:
+// - If `-fmodule-output=` is specfied, then the module file is
+// writing to the value.
+// - Otherwise if the output object file of the module unit is specified, the
+// output path
+// of the module file should be the same with the output object file except
+// the corresponding suffix. This requires both `-o` and `-c` are specified.
+// - Otherwise, the output path of the module file will be the same with the
+// input with the corresponding suffix.
+static const char *GetModuleOutputPath(Compilation &C, const JobAction &JA,
+ const char *BaseInput) {
+ assert(isa<PrecompileJobAction>(JA) && JA.getType() == types::TY_ModuleFile &&
+ (C.getArgs().hasArg(options::OPT_fmodule_output) ||
+ C.getArgs().hasArg(options::OPT_fmodule_output_EQ)));
+
+ if (Arg *ModuleOutputEQ =
+ C.getArgs().getLastArg(options::OPT_fmodule_output_EQ))
+ return C.addResultFile(ModuleOutputEQ->getValue(), &JA);
+
+ SmallString<64> OutputPath;
+ Arg *FinalOutput = C.getArgs().getLastArg(options::OPT_o);
+ if (FinalOutput && C.getArgs().hasArg(options::OPT_c))
+ OutputPath = FinalOutput->getValue();
+ else
+ OutputPath = BaseInput;
+
+ const char *Extension = types::getTypeTempSuffix(JA.getType());
+ llvm::sys::path::replace_extension(OutputPath, Extension);
+ return C.addResultFile(C.getArgs().MakeArgString(OutputPath.c_str()), &JA);
+}
+
const char *Driver::GetNamedOutputPath(Compilation &C, const JobAction &JA,
const char *BaseInput,
StringRef OrigBoundArch, bool AtTopLevel,
bool MultipleArchs,
StringRef OffloadingPrefix) const {
std::string BoundArch = OrigBoundArch.str();
-#if defined(_WIN32)
- // BoundArch may contains ':', which is invalid in file names on Windows,
- // therefore replace it with '%'.
- std::replace(BoundArch.begin(), BoundArch.end(), ':', '@');
-#endif
+ if (is_style_windows(llvm::sys::path::Style::native)) {
+ // BoundArch may contains ':', which is invalid in file names on Windows,
+ // therefore replace it with '%'.
+ std::replace(BoundArch.begin(), BoundArch.end(), ':', '@');
+ }
llvm::PrettyStackTraceString CrashInfo("Computing output path");
// Output to a user requested destination?
@@ -4905,6 +5840,22 @@ const char *Driver::GetNamedOutputPath(Compilation &C, const JobAction &JA,
return "-";
}
+ if (JA.getType() == types::TY_PP_Asm &&
+ C.getArgs().hasArg(options::OPT_dxc_Fc)) {
+ StringRef FcValue = C.getArgs().getLastArgValue(options::OPT_dxc_Fc);
+ // TODO: Should we use `MakeCLOutputFilename` here? If so, we can probably
+ // handle this as part of the SLASH_Fa handling below.
+ return C.addResultFile(C.getArgs().MakeArgString(FcValue.str()), &JA);
+ }
+
+ if (JA.getType() == types::TY_Object &&
+ C.getArgs().hasArg(options::OPT_dxc_Fo)) {
+ StringRef FoValue = C.getArgs().getLastArgValue(options::OPT_dxc_Fo);
+ // TODO: Should we use `MakeCLOutputFilename` here? If so, we can probably
+ // handle this as part of the SLASH_Fo handling below.
+ return C.addResultFile(C.getArgs().MakeArgString(FoValue.str()), &JA);
+ }
+
// Is this the assembly listing for /FA?
if (JA.getType() == types::TY_PP_Asm &&
(C.getArgs().hasArg(options::OPT__SLASH_FA) ||
@@ -4917,31 +5868,41 @@ const char *Driver::GetNamedOutputPath(Compilation &C, const JobAction &JA,
&JA);
}
+ // DXC defaults to standard out when generating assembly. We check this after
+ // any DXC flags that might specify a file.
+ if (AtTopLevel && JA.getType() == types::TY_PP_Asm && IsDXCMode())
+ return "-";
+
+ bool SpecifiedModuleOutput =
+ C.getArgs().hasArg(options::OPT_fmodule_output) ||
+ C.getArgs().hasArg(options::OPT_fmodule_output_EQ);
+ if (MultipleArchs && SpecifiedModuleOutput)
+ Diag(clang::diag::err_drv_module_output_with_multiple_arch);
+
+ // If we're emitting a module output with the specified option
+ // `-fmodule-output`.
+ if (!AtTopLevel && isa<PrecompileJobAction>(JA) &&
+ JA.getType() == types::TY_ModuleFile && SpecifiedModuleOutput)
+ return GetModuleOutputPath(C, JA, BaseInput);
+
// Output to a temporary file?
if ((!AtTopLevel && !isSaveTempsEnabled() &&
!C.getArgs().hasArg(options::OPT__SLASH_Fo)) ||
CCGenDiagnostics) {
StringRef Name = llvm::sys::path::filename(BaseInput);
std::pair<StringRef, StringRef> Split = Name.split('.');
- SmallString<128> TmpName;
- const char *Suffix = types::getTypeTempSuffix(JA.getType(), IsCLMode());
- Arg *A = C.getArgs().getLastArg(options::OPT_fcrash_diagnostics_dir);
- if (CCGenDiagnostics && A) {
- SmallString<128> CrashDirectory(A->getValue());
- if (!getVFS().exists(CrashDirectory))
- llvm::sys::fs::create_directories(CrashDirectory);
- llvm::sys::path::append(CrashDirectory, Split.first);
- const char *Middle = Suffix ? "-%%%%%%." : "-%%%%%%";
- std::error_code EC = llvm::sys::fs::createUniqueFile(
- CrashDirectory + Middle + Suffix, TmpName);
- if (EC) {
- Diag(clang::diag::err_unable_to_make_temp) << EC.message();
- return "";
- }
- } else {
- TmpName = GetTemporaryPath(Split.first, Suffix);
- }
- return C.addTempFile(C.getArgs().MakeArgString(TmpName));
+ const char *Suffix =
+ types::getTypeTempSuffix(JA.getType(), IsCLMode() || IsDXCMode());
+ // The non-offloading toolchain on Darwin requires deterministic input
+ // file name for binaries to be deterministic, therefore it needs unique
+ // directory.
+ llvm::Triple Triple(C.getDriver().getTargetTriple());
+ bool NeedUniqueDirectory =
+ (JA.getOffloadingDeviceKind() == Action::OFK_None ||
+ JA.getOffloadingDeviceKind() == Action::OFK_Host) &&
+ Triple.isOSDarwin();
+ return CreateTempFile(C, Split.first, Suffix, MultipleArchs, BoundArch,
+ NeedUniqueDirectory);
}
SmallString<128> BasePath(BaseInput);
@@ -4997,7 +5958,8 @@ const char *Driver::GetNamedOutputPath(Compilation &C, const JobAction &JA,
bool IsHIPNoRDC = JA.getOffloadingDeviceKind() == Action::OFK_HIP &&
!C.getArgs().hasFlag(options::OPT_fgpu_rdc,
options::OPT_fno_gpu_rdc, false);
- if (IsHIPNoRDC) {
+ bool UseOutExtension = IsHIPNoRDC || isa<OffloadPackagerJobAction>(JA);
+ if (UseOutExtension) {
Output = BaseName;
llvm::sys::path::replace_extension(Output, "");
}
@@ -5006,14 +5968,23 @@ const char *Driver::GetNamedOutputPath(Compilation &C, const JobAction &JA,
Output += "-";
Output.append(BoundArch);
}
- if (IsHIPNoRDC)
+ if (UseOutExtension)
Output += ".out";
NamedOutput = C.getArgs().MakeArgString(Output.c_str());
}
} else if (JA.getType() == types::TY_PCH && IsCLMode()) {
NamedOutput = C.getArgs().MakeArgString(GetClPchPath(C, BaseName));
+ } else if ((JA.getType() == types::TY_Plist || JA.getType() == types::TY_AST) &&
+ C.getArgs().hasArg(options::OPT__SLASH_o)) {
+ StringRef Val =
+ C.getArgs()
+ .getLastArg(options::OPT__SLASH_o)
+ ->getValue();
+ NamedOutput =
+ MakeCLOutputFilename(C.getArgs(), Val, BaseName, types::TY_Object);
} else {
- const char *Suffix = types::getTypeTempSuffix(JA.getType(), IsCLMode());
+ const char *Suffix =
+ types::getTypeTempSuffix(JA.getType(), IsCLMode() || IsDXCMode());
assert(Suffix && "All types used for output should have a suffix.");
std::string::size_type End = std::string::npos;
@@ -5028,19 +5999,22 @@ const char *Driver::GetNamedOutputPath(Compilation &C, const JobAction &JA,
// When using both -save-temps and -emit-llvm, use a ".tmp.bc" suffix for
// the unoptimized bitcode so that it does not get overwritten by the ".bc"
// optimized bitcode output.
- auto IsHIPRDCInCompilePhase = [](const JobAction &JA,
+ auto IsAMDRDCInCompilePhase = [](const JobAction &JA,
const llvm::opt::DerivedArgList &Args) {
- // The relocatable compilation in HIP implies -emit-llvm. Similarly, use a
- // ".tmp.bc" suffix for the unoptimized bitcode (generated in the compile
- // phase.)
+ // The relocatable compilation in HIP and OpenMP implies -emit-llvm.
+ // Similarly, use a ".tmp.bc" suffix for the unoptimized bitcode
+ // (generated in the compile phase.)
+ const ToolChain *TC = JA.getOffloadingToolChain();
return isa<CompileJobAction>(JA) &&
- JA.getOffloadingDeviceKind() == Action::OFK_HIP &&
- Args.hasFlag(options::OPT_fgpu_rdc, options::OPT_fno_gpu_rdc,
- false);
+ ((JA.getOffloadingDeviceKind() == Action::OFK_HIP &&
+ Args.hasFlag(options::OPT_fgpu_rdc, options::OPT_fno_gpu_rdc,
+ false)) ||
+ (JA.getOffloadingDeviceKind() == Action::OFK_OpenMP && TC &&
+ TC->getTriple().isAMDGPU()));
};
if (!AtTopLevel && JA.getType() == types::TY_LLVM_BC &&
(C.getArgs().hasArg(options::OPT_emit_llvm) ||
- IsHIPRDCInCompilePhase(JA, C.getArgs())))
+ IsAMDRDCInCompilePhase(JA, C.getArgs())))
Suffixed += ".tmp";
Suffixed += '.';
Suffixed += Suffix;
@@ -5071,7 +6045,8 @@ const char *Driver::GetNamedOutputPath(Compilation &C, const JobAction &JA,
StringRef Name = llvm::sys::path::filename(BaseInput);
std::pair<StringRef, StringRef> Split = Name.split('.');
std::string TmpName = GetTemporaryPath(
- Split.first, types::getTypeTempSuffix(JA.getType(), IsCLMode()));
+ Split.first,
+ types::getTypeTempSuffix(JA.getType(), IsCLMode() || IsDXCMode()));
return C.addTempFile(C.getArgs().MakeArgString(TmpName));
}
}
@@ -5084,15 +6059,15 @@ const char *Driver::GetNamedOutputPath(Compilation &C, const JobAction &JA,
else
llvm::sys::path::append(BasePath, NamedOutput);
return C.addResultFile(C.getArgs().MakeArgString(BasePath.c_str()), &JA);
- } else {
- return C.addResultFile(NamedOutput, &JA);
}
+
+ return C.addResultFile(NamedOutput, &JA);
}
std::string Driver::GetFilePath(StringRef Name, const ToolChain &TC) const {
// Search for Name in a list of paths.
auto SearchPaths = [&](const llvm::SmallVectorImpl<std::string> &P)
- -> llvm::Optional<std::string> {
+ -> std::optional<std::string> {
// Respect a limited subset of the '-Bprefix' functionality in GCC by
// attempting to use this prefix when looking for file paths.
for (const auto &Dir : P) {
@@ -5103,7 +6078,7 @@ std::string Driver::GetFilePath(StringRef Name, const ToolChain &TC) const {
if (llvm::sys::fs::exists(Twine(P)))
return std::string(P);
}
- return None;
+ return std::nullopt;
};
if (auto P = SearchPaths(PrefixDirs))
@@ -5112,17 +6087,17 @@ std::string Driver::GetFilePath(StringRef Name, const ToolChain &TC) const {
SmallString<128> R(ResourceDir);
llvm::sys::path::append(R, Name);
if (llvm::sys::fs::exists(Twine(R)))
- return std::string(R.str());
+ return std::string(R);
SmallString<128> P(TC.getCompilerRTPath());
llvm::sys::path::append(P, Name);
if (llvm::sys::fs::exists(Twine(P)))
- return std::string(P.str());
+ return std::string(P);
SmallString<128> D(Dir);
llvm::sys::path::append(D, "..", Name);
if (llvm::sys::fs::exists(Twine(D)))
- return std::string(D.str());
+ return std::string(D);
if (auto P = SearchPaths(TC.getLibraryPaths()))
return *P;
@@ -5159,11 +6134,11 @@ std::string Driver::GetProgramPath(StringRef Name, const ToolChain &TC) const {
if (llvm::sys::fs::is_directory(PrefixDir)) {
SmallString<128> P(PrefixDir);
if (ScanDirForExecutable(P, Name))
- return std::string(P.str());
+ return std::string(P);
} else {
SmallString<128> P((PrefixDir + Name).str());
if (llvm::sys::fs::can_execute(Twine(P)))
- return std::string(P.str());
+ return std::string(P);
}
}
@@ -5179,7 +6154,7 @@ std::string Driver::GetProgramPath(StringRef Name, const ToolChain &TC) const {
for (const auto &Path : List) {
SmallString<128> P(Path);
if (ScanDirForExecutable(P, TargetSpecificExecutable))
- return std::string(P.str());
+ return std::string(P);
}
// Fall back to the path
@@ -5199,7 +6174,7 @@ std::string Driver::GetTemporaryPath(StringRef Prefix, StringRef Suffix) const {
return "";
}
- return std::string(Path.str());
+ return std::string(Path);
}
std::string Driver::GetTemporaryDirectory(StringRef Prefix) const {
@@ -5210,7 +6185,7 @@ std::string Driver::GetTemporaryDirectory(StringRef Prefix) const {
return "";
}
- return std::string(Path.str());
+ return std::string(Path);
}
std::string Driver::GetClPchPath(Compilation &C, StringRef BaseName) const {
@@ -5232,7 +6207,7 @@ std::string Driver::GetClPchPath(Compilation &C, StringRef BaseName) const {
Output = BaseName;
llvm::sys::path::replace_extension(Output, ".pch");
}
- return std::string(Output.str());
+ return std::string(Output);
}
const ToolChain &Driver::getToolChain(const ArgList &Args,
@@ -5247,17 +6222,13 @@ const ToolChain &Driver::getToolChain(const ArgList &Args,
case llvm::Triple::Haiku:
TC = std::make_unique<toolchains::Haiku>(*this, Target, Args);
break;
- case llvm::Triple::Ananas:
- TC = std::make_unique<toolchains::Ananas>(*this, Target, Args);
- break;
- case llvm::Triple::CloudABI:
- TC = std::make_unique<toolchains::CloudABI>(*this, Target, Args);
- break;
case llvm::Triple::Darwin:
case llvm::Triple::MacOSX:
case llvm::Triple::IOS:
case llvm::Triple::TvOS:
case llvm::Triple::WatchOS:
+ case llvm::Triple::XROS:
+ case llvm::Triple::DriverKit:
TC = std::make_unique<toolchains::DarwinClang>(*this, Target, Args);
break;
case llvm::Triple::DragonFly:
@@ -5270,10 +6241,11 @@ const ToolChain &Driver::getToolChain(const ArgList &Args,
TC = std::make_unique<toolchains::NetBSD>(*this, Target, Args);
break;
case llvm::Triple::FreeBSD:
- TC = std::make_unique<toolchains::FreeBSD>(*this, Target, Args);
- break;
- case llvm::Triple::Minix:
- TC = std::make_unique<toolchains::Minix>(*this, Target, Args);
+ if (Target.isPPC())
+ TC = std::make_unique<toolchains::PPCFreeBSDToolChain>(*this, Target,
+ Args);
+ else
+ TC = std::make_unique<toolchains::FreeBSD>(*this, Target, Args);
break;
case llvm::Triple::Linux:
case llvm::Triple::ELFIAMCU:
@@ -5289,7 +6261,8 @@ const ToolChain &Driver::getToolChain(const ArgList &Args,
Args);
else if (Target.getArch() == llvm::Triple::ve)
TC = std::make_unique<toolchains::VEToolChain>(*this, Target, Args);
-
+ else if (Target.isOHOSFamily())
+ TC = std::make_unique<toolchains::OHOS>(*this, Target, Args);
else
TC = std::make_unique<toolchains::Linux>(*this, Target, Args);
break;
@@ -5302,6 +6275,9 @@ const ToolChain &Driver::getToolChain(const ArgList &Args,
case llvm::Triple::Solaris:
TC = std::make_unique<toolchains::Solaris>(*this, Target, Args);
break;
+ case llvm::Triple::CUDA:
+ TC = std::make_unique<toolchains::NVPTXToolChain>(*this, Target, Args);
+ break;
case llvm::Triple::AMDHSA:
TC = std::make_unique<toolchains::ROCMToolChain>(*this, Target, Args);
break;
@@ -5329,7 +6305,7 @@ const ToolChain &Driver::getToolChain(const ArgList &Args,
case llvm::Triple::MSVC:
case llvm::Triple::UnknownEnvironment:
if (Args.getLastArgValue(options::OPT_fuse_ld_EQ)
- .startswith_insensitive("bfd"))
+ .starts_with_insensitive("bfd"))
TC = std::make_unique<toolchains::CrossWindowsToolChain>(
*this, Target, Args);
else
@@ -5341,15 +6317,21 @@ const ToolChain &Driver::getToolChain(const ArgList &Args,
case llvm::Triple::PS4:
TC = std::make_unique<toolchains::PS4CPU>(*this, Target, Args);
break;
- case llvm::Triple::Contiki:
- TC = std::make_unique<toolchains::Contiki>(*this, Target, Args);
+ case llvm::Triple::PS5:
+ TC = std::make_unique<toolchains::PS5CPU>(*this, Target, Args);
break;
case llvm::Triple::Hurd:
TC = std::make_unique<toolchains::Hurd>(*this, Target, Args);
break;
+ case llvm::Triple::LiteOS:
+ TC = std::make_unique<toolchains::OHOS>(*this, Target, Args);
+ break;
case llvm::Triple::ZOS:
TC = std::make_unique<toolchains::ZOS>(*this, Target, Args);
break;
+ case llvm::Triple::ShaderModel:
+ TC = std::make_unique<toolchains::HLSLToolChain>(*this, Target, Args);
+ break;
default:
// Of these targets, Hexagon is the only one that might have
// an OS of Linux, in which case it got handled above already.
@@ -5392,11 +6374,15 @@ const ToolChain &Driver::getToolChain(const ArgList &Args,
case llvm::Triple::ve:
TC = std::make_unique<toolchains::VEToolChain>(*this, Target, Args);
break;
+ case llvm::Triple::spirv32:
+ case llvm::Triple::spirv64:
+ TC = std::make_unique<toolchains::SPIRVToolChain>(*this, Target, Args);
+ break;
+ case llvm::Triple::csky:
+ TC = std::make_unique<toolchains::CSKYToolChain>(*this, Target, Args);
+ break;
default:
- if (Target.getVendor() == llvm::Triple::Myriad)
- TC = std::make_unique<toolchains::MyriadToolChain>(*this, Target,
- Args);
- else if (toolchains::BareMetal::handlesTarget(Target))
+ if (toolchains::BareMetal::handlesTarget(Target))
TC = std::make_unique<toolchains::BareMetal>(*this, Target, Args);
else if (Target.isOSBinFormatELF())
TC = std::make_unique<toolchains::Generic_ELF>(*this, Target, Args);
@@ -5408,10 +6394,37 @@ const ToolChain &Driver::getToolChain(const ArgList &Args,
}
}
- // Intentionally omitted from the switch above: llvm::Triple::CUDA. CUDA
- // compiles always need two toolchains, the CUDA toolchain and the host
- // toolchain. So the only valid way to create a CUDA toolchain is via
- // CreateOffloadingDeviceToolChains.
+ return *TC;
+}
+
+const ToolChain &Driver::getOffloadingDeviceToolChain(
+ const ArgList &Args, const llvm::Triple &Target, const ToolChain &HostTC,
+ const Action::OffloadKind &TargetDeviceOffloadKind) const {
+ // Use device / host triples as the key into the ToolChains map because the
+ // device ToolChain we create depends on both.
+ auto &TC = ToolChains[Target.str() + "/" + HostTC.getTriple().str()];
+ if (!TC) {
+ // Categorized by offload kind > arch rather than OS > arch like
+ // the normal getToolChain call, as it seems a reasonable way to categorize
+ // things.
+ switch (TargetDeviceOffloadKind) {
+ case Action::OFK_HIP: {
+ if (Target.getArch() == llvm::Triple::amdgcn &&
+ Target.getVendor() == llvm::Triple::AMD &&
+ Target.getOS() == llvm::Triple::AMDHSA)
+ TC = std::make_unique<toolchains::HIPAMDToolChain>(*this, Target,
+ HostTC, Args);
+ else if (Target.getArch() == llvm::Triple::spirv64 &&
+ Target.getVendor() == llvm::Triple::UnknownVendor &&
+ Target.getOS() == llvm::Triple::UnknownOS)
+ TC = std::make_unique<toolchains::HIPSPVToolChain>(*this, Target,
+ HostTC, Args);
+ break;
+ }
+ default:
+ break;
+ }
+ }
return *TC;
}
@@ -5424,7 +6437,8 @@ bool Driver::ShouldUseClangCompiler(const JobAction &JA) const {
// And say "no" if this is not a kind of action clang understands.
if (!isa<PreprocessJobAction>(JA) && !isa<PrecompileJobAction>(JA) &&
- !isa<CompileJobAction>(JA) && !isa<BackendJobAction>(JA))
+ !isa<CompileJobAction>(JA) && !isa<BackendJobAction>(JA) &&
+ !isa<ExtractAPIJobAction>(JA))
return false;
return true;
@@ -5433,11 +6447,12 @@ bool Driver::ShouldUseClangCompiler(const JobAction &JA) const {
bool Driver::ShouldUseFlangCompiler(const JobAction &JA) const {
// Say "no" if there is not exactly one input of a type flang understands.
if (JA.size() != 1 ||
- !types::isFortran((*JA.input_begin())->getType()))
+ !types::isAcceptedByFlang((*JA.input_begin())->getType()))
return false;
// And say "no" if this is not a kind of action flang understands.
- if (!isa<PreprocessJobAction>(JA) && !isa<CompileJobAction>(JA) && !isa<BackendJobAction>(JA))
+ if (!isa<PreprocessJobAction>(JA) && !isa<CompileJobAction>(JA) &&
+ !isa<BackendJobAction>(JA))
return false;
return true;
@@ -5516,20 +6531,37 @@ bool Driver::GetReleaseVersion(StringRef Str,
return false;
}
-std::pair<unsigned, unsigned>
-Driver::getIncludeExcludeOptionFlagMasks(bool IsClCompatMode) const {
- unsigned IncludedFlagsBitmask = 0;
- unsigned ExcludedFlagsBitmask = options::NoDriverOption;
-
- if (IsClCompatMode) {
- // Include CL and Core options.
- IncludedFlagsBitmask |= options::CLOption;
- IncludedFlagsBitmask |= options::CoreOption;
- } else {
- ExcludedFlagsBitmask |= options::CLOption;
- }
+llvm::opt::Visibility
+Driver::getOptionVisibilityMask(bool UseDriverMode) const {
+ if (!UseDriverMode)
+ return llvm::opt::Visibility(options::ClangOption);
+ if (IsCLMode())
+ return llvm::opt::Visibility(options::CLOption);
+ if (IsDXCMode())
+ return llvm::opt::Visibility(options::DXCOption);
+ if (IsFlangMode()) {
+ return llvm::opt::Visibility(options::FlangOption);
+ }
+ return llvm::opt::Visibility(options::ClangOption);
+}
- return std::make_pair(IncludedFlagsBitmask, ExcludedFlagsBitmask);
+const char *Driver::getExecutableForDriverMode(DriverMode Mode) {
+ switch (Mode) {
+ case GCCMode:
+ return "clang";
+ case GXXMode:
+ return "clang++";
+ case CPPMode:
+ return "clang-cpp";
+ case CLMode:
+ return "clang-cl";
+ case FlangMode:
+ return "flang";
+ case DXCMode:
+ return "clang-dxc";
+ }
+
+ llvm_unreachable("Unhandled Mode");
}
bool clang::driver::isOptimizationLevelFast(const ArgList &Args) {
@@ -5561,11 +6593,11 @@ bool clang::driver::willEmitRemarks(const ArgList &Args) {
llvm::StringRef clang::driver::getDriverMode(StringRef ProgName,
ArrayRef<const char *> Args) {
- static const std::string OptName =
+ static StringRef OptName =
getDriverOptTable().getOption(options::OPT_driver_mode).getPrefixedName();
llvm::StringRef Opt;
for (StringRef Arg : Args) {
- if (!Arg.startswith(OptName))
+ if (!Arg.starts_with(OptName))
continue;
Opt = Arg;
}
@@ -5575,3 +6607,58 @@ llvm::StringRef clang::driver::getDriverMode(StringRef ProgName,
}
bool driver::IsClangCL(StringRef DriverMode) { return DriverMode.equals("cl"); }
+
+llvm::Error driver::expandResponseFiles(SmallVectorImpl<const char *> &Args,
+ bool ClangCLMode,
+ llvm::BumpPtrAllocator &Alloc,
+ llvm::vfs::FileSystem *FS) {
+ // Parse response files using the GNU syntax, unless we're in CL mode. There
+ // are two ways to put clang in CL compatibility mode: ProgName is either
+ // clang-cl or cl, or --driver-mode=cl is on the command line. The normal
+ // command line parsing can't happen until after response file parsing, so we
+ // have to manually search for a --driver-mode=cl argument the hard way.
+ // Finally, our -cc1 tools don't care which tokenization mode we use because
+ // response files written by clang will tokenize the same way in either mode.
+ enum { Default, POSIX, Windows } RSPQuoting = Default;
+ for (const char *F : Args) {
+ if (strcmp(F, "--rsp-quoting=posix") == 0)
+ RSPQuoting = POSIX;
+ else if (strcmp(F, "--rsp-quoting=windows") == 0)
+ RSPQuoting = Windows;
+ }
+
+ // Determines whether we want nullptr markers in Args to indicate response
+ // files end-of-lines. We only use this for the /LINK driver argument with
+ // clang-cl.exe on Windows.
+ bool MarkEOLs = ClangCLMode;
+
+ llvm::cl::TokenizerCallback Tokenizer;
+ if (RSPQuoting == Windows || (RSPQuoting == Default && ClangCLMode))
+ Tokenizer = &llvm::cl::TokenizeWindowsCommandLine;
+ else
+ Tokenizer = &llvm::cl::TokenizeGNUCommandLine;
+
+ if (MarkEOLs && Args.size() > 1 && StringRef(Args[1]).starts_with("-cc1"))
+ MarkEOLs = false;
+
+ llvm::cl::ExpansionContext ECtx(Alloc, Tokenizer);
+ ECtx.setMarkEOLs(MarkEOLs);
+ if (FS)
+ ECtx.setVFS(FS);
+
+ if (llvm::Error Err = ECtx.expandResponseFiles(Args))
+ return Err;
+
+ // If -cc1 came from a response file, remove the EOL sentinels.
+ auto FirstArg = llvm::find_if(llvm::drop_begin(Args),
+ [](const char *A) { return A != nullptr; });
+ if (FirstArg != Args.end() && StringRef(*FirstArg).starts_with("-cc1")) {
+ // If -cc1 came from a response file, remove the EOL sentinels.
+ if (MarkEOLs) {
+ auto newEnd = std::remove(Args.begin(), Args.end(), nullptr);
+ Args.resize(newEnd - Args.begin());
+ }
+ }
+
+ return llvm::Error::success();
+}
diff --git a/contrib/llvm-project/clang/lib/Driver/DriverOptions.cpp b/contrib/llvm-project/clang/lib/Driver/DriverOptions.cpp
index 67d4198d222a..b25801a8f3f4 100644
--- a/contrib/llvm-project/clang/lib/Driver/DriverOptions.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/DriverOptions.cpp
@@ -16,40 +16,40 @@ using namespace clang::driver;
using namespace clang::driver::options;
using namespace llvm::opt;
-#define PREFIX(NAME, VALUE) static const char *const NAME[] = VALUE;
+#define OPTTABLE_VALUES_CODE
+#include "clang/Driver/Options.inc"
+#undef OPTTABLE_VALUES_CODE
+
+#define PREFIX(NAME, VALUE) \
+ static constexpr llvm::StringLiteral NAME##_init[] = VALUE; \
+ static constexpr llvm::ArrayRef<llvm::StringLiteral> NAME( \
+ NAME##_init, std::size(NAME##_init) - 1);
#include "clang/Driver/Options.inc"
#undef PREFIX
-static const OptTable::Info InfoTable[] = {
-#define OPTION(PREFIX, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
- HELPTEXT, METAVAR, VALUES) \
- {PREFIX, NAME, HELPTEXT, METAVAR, OPT_##ID, Option::KIND##Class, \
- PARAM, FLAGS, OPT_##GROUP, OPT_##ALIAS, ALIASARGS, VALUES},
+static constexpr const llvm::StringLiteral PrefixTable_init[] =
+#define PREFIX_UNION(VALUES) VALUES
+#include "clang/Driver/Options.inc"
+#undef PREFIX_UNION
+ ;
+static constexpr const llvm::ArrayRef<llvm::StringLiteral>
+ PrefixTable(PrefixTable_init, std::size(PrefixTable_init) - 1);
+
+static constexpr OptTable::Info InfoTable[] = {
+#define OPTION(...) LLVM_CONSTRUCT_OPT_INFO(__VA_ARGS__),
#include "clang/Driver/Options.inc"
#undef OPTION
};
namespace {
-class DriverOptTable : public OptTable {
+class DriverOptTable : public PrecomputedOptTable {
public:
- DriverOptTable()
- : OptTable(InfoTable) {}
+ DriverOptTable() : PrecomputedOptTable(InfoTable, PrefixTable) {}
};
-
}
const llvm::opt::OptTable &clang::driver::getDriverOptTable() {
- static const DriverOptTable *Table = []() {
- auto Result = std::make_unique<DriverOptTable>();
- // Options.inc is included in DriverOptions.cpp, and calls OptTable's
- // addValues function.
- // Opt is a variable used in the code fragment in Options.inc.
- OptTable &Opt = *Result;
-#define OPTTABLE_ARG_INIT
-#include "clang/Driver/Options.inc"
-#undef OPTTABLE_ARG_INIT
- return Result.release();
- }();
- return *Table;
+ static DriverOptTable Table;
+ return Table;
}
diff --git a/contrib/llvm-project/clang/lib/Driver/Job.cpp b/contrib/llvm-project/clang/lib/Driver/Job.cpp
index 5b87106b6565..a6c1581be796 100644
--- a/contrib/llvm-project/clang/lib/Driver/Job.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/Job.cpp
@@ -16,6 +16,7 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/ADT/StringSwitch.h"
@@ -38,9 +39,10 @@ using namespace driver;
Command::Command(const Action &Source, const Tool &Creator,
ResponseFileSupport ResponseSupport, const char *Executable,
const llvm::opt::ArgStringList &Arguments,
- ArrayRef<InputInfo> Inputs, ArrayRef<InputInfo> Outputs)
+ ArrayRef<InputInfo> Inputs, ArrayRef<InputInfo> Outputs,
+ const char *PrependArg)
: Source(Source), Creator(Creator), ResponseSupport(ResponseSupport),
- Executable(Executable), Arguments(Arguments) {
+ Executable(Executable), PrependArg(PrependArg), Arguments(Arguments) {
for (const auto &II : Inputs)
if (II.isFilename())
InputInfoList.push_back(II);
@@ -93,10 +95,10 @@ static bool skipArgs(const char *Flag, bool HaveCrashVFS, int &SkipNum,
// These flags are treated as a single argument (e.g., -F<Dir>).
StringRef FlagRef(Flag);
- IsInclude = FlagRef.startswith("-F") || FlagRef.startswith("-I");
+ IsInclude = FlagRef.starts_with("-F") || FlagRef.starts_with("-I");
if (IsInclude)
return !HaveCrashVFS;
- if (FlagRef.startswith("-fmodules-cache-path="))
+ if (FlagRef.starts_with("-fmodules-cache-path="))
return true;
SkipNum = 0;
@@ -144,6 +146,10 @@ void Command::buildArgvForResponseFile(
for (const auto *InputName : InputFileList)
Inputs.insert(InputName);
Out.push_back(Executable);
+
+ if (PrependArg)
+ Out.push_back(PrependArg);
+
// In a file list, build args vector ignoring parameters that will go in the
// response file (elements of the InputFileList vector)
bool FirstInput = true;
@@ -179,8 +185,8 @@ rewriteIncludes(const llvm::ArrayRef<const char *> &Args, size_t Idx,
SmallString<128> NewInc;
if (NumArgs == 1) {
StringRef FlagRef(Args[Idx + NumArgs - 1]);
- assert((FlagRef.startswith("-F") || FlagRef.startswith("-I")) &&
- "Expecting -I or -F");
+ assert((FlagRef.starts_with("-F") || FlagRef.starts_with("-I")) &&
+ "Expecting -I or -F");
StringRef Inc = FlagRef.slice(2, StringRef::npos);
if (getAbsPath(Inc, NewInc)) {
SmallString<128> NewArg(FlagRef.slice(0, 2));
@@ -209,6 +215,9 @@ void Command::Print(raw_ostream &OS, const char *Terminator, bool Quote,
if (ResponseFile != nullptr) {
buildArgvForResponseFile(ArgsRespFile);
Args = ArrayRef<const char *>(ArgsRespFile).slice(1); // no executable name
+ } else if (PrependArg) {
+ OS << ' ';
+ llvm::sys::printArg(OS, PrependArg, /*Quote=*/true);
}
bool HaveCrashVFS = CrashInfo && !CrashInfo->VFSPath.empty();
@@ -301,6 +310,11 @@ void Command::setEnvironment(llvm::ArrayRef<const char *> NewEnvironment) {
Environment.push_back(nullptr);
}
+void Command::setRedirectFiles(
+ const std::vector<std::optional<std::string>> &Redirects) {
+ RedirectFiles = Redirects;
+}
+
void Command::PrintFileNames() const {
if (PrintInputFilenames) {
for (const auto &Arg : InputInfoList)
@@ -309,13 +323,15 @@ void Command::PrintFileNames() const {
}
}
-int Command::Execute(ArrayRef<llvm::Optional<StringRef>> Redirects,
+int Command::Execute(ArrayRef<std::optional<StringRef>> Redirects,
std::string *ErrMsg, bool *ExecutionFailed) const {
PrintFileNames();
SmallVector<const char *, 128> Argv;
if (ResponseFile == nullptr) {
Argv.push_back(Executable);
+ if (PrependArg)
+ Argv.push_back(PrependArg);
Argv.append(Arguments.begin(), Arguments.end());
Argv.push_back(nullptr);
} else {
@@ -342,16 +358,32 @@ int Command::Execute(ArrayRef<llvm::Optional<StringRef>> Redirects,
}
}
- Optional<ArrayRef<StringRef>> Env;
+ std::optional<ArrayRef<StringRef>> Env;
std::vector<StringRef> ArgvVectorStorage;
if (!Environment.empty()) {
assert(Environment.back() == nullptr &&
"Environment vector should be null-terminated by now");
ArgvVectorStorage = llvm::toStringRefArray(Environment.data());
- Env = makeArrayRef(ArgvVectorStorage);
+ Env = ArrayRef(ArgvVectorStorage);
}
auto Args = llvm::toStringRefArray(Argv.data());
+
+ // Use Job-specific redirect files if they are present.
+ if (!RedirectFiles.empty()) {
+ std::vector<std::optional<StringRef>> RedirectFilesOptional;
+ for (const auto &Ele : RedirectFiles)
+ if (Ele)
+ RedirectFilesOptional.push_back(std::optional<StringRef>(*Ele));
+ else
+ RedirectFilesOptional.push_back(std::nullopt);
+
+ return llvm::sys::ExecuteAndWait(Executable, Args, Env,
+ ArrayRef(RedirectFilesOptional),
+ /*secondsToWait=*/0, /*memoryLimit=*/0,
+ ErrMsg, ExecutionFailed, &ProcStat);
+ }
+
return llvm::sys::ExecuteAndWait(Executable, Args, Env, Redirects,
/*secondsToWait*/ 0, /*memoryLimit*/ 0,
ErrMsg, ExecutionFailed, &ProcStat);
@@ -361,9 +393,10 @@ CC1Command::CC1Command(const Action &Source, const Tool &Creator,
ResponseFileSupport ResponseSupport,
const char *Executable,
const llvm::opt::ArgStringList &Arguments,
- ArrayRef<InputInfo> Inputs, ArrayRef<InputInfo> Outputs)
+ ArrayRef<InputInfo> Inputs, ArrayRef<InputInfo> Outputs,
+ const char *PrependArg)
: Command(Source, Creator, ResponseSupport, Executable, Arguments, Inputs,
- Outputs) {
+ Outputs, PrependArg) {
InProcess = true;
}
@@ -374,7 +407,7 @@ void CC1Command::Print(raw_ostream &OS, const char *Terminator, bool Quote,
Command::Print(OS, Terminator, Quote, CrashInfo);
}
-int CC1Command::Execute(ArrayRef<llvm::Optional<StringRef>> Redirects,
+int CC1Command::Execute(ArrayRef<std::optional<StringRef>> Redirects,
std::string *ErrMsg, bool *ExecutionFailed) const {
// FIXME: Currently, if there're more than one job, we disable
// -fintegrate-cc1. If we're no longer a integrated-cc1 job, fallback to
@@ -388,6 +421,8 @@ int CC1Command::Execute(ArrayRef<llvm::Optional<StringRef>> Redirects,
Argv.push_back(getExecutable());
Argv.append(getArguments().begin(), getArguments().end());
Argv.push_back(nullptr);
+ Argv.pop_back(); // The terminating null element shall not be part of the
+ // slice (main() behavior).
// This flag simply indicates that the program couldn't start, which isn't
// applicable here.
@@ -415,30 +450,6 @@ void CC1Command::setEnvironment(llvm::ArrayRef<const char *> NewEnvironment) {
"The CC1Command doesn't support changing the environment vars!");
}
-ForceSuccessCommand::ForceSuccessCommand(
- const Action &Source_, const Tool &Creator_,
- ResponseFileSupport ResponseSupport, const char *Executable_,
- const llvm::opt::ArgStringList &Arguments_, ArrayRef<InputInfo> Inputs,
- ArrayRef<InputInfo> Outputs)
- : Command(Source_, Creator_, ResponseSupport, Executable_, Arguments_,
- Inputs, Outputs) {}
-
-void ForceSuccessCommand::Print(raw_ostream &OS, const char *Terminator,
- bool Quote, CrashReportInfo *CrashInfo) const {
- Command::Print(OS, "", Quote, CrashInfo);
- OS << " || (exit 0)" << Terminator;
-}
-
-int ForceSuccessCommand::Execute(ArrayRef<llvm::Optional<StringRef>> Redirects,
- std::string *ErrMsg,
- bool *ExecutionFailed) const {
- int Status = Command::Execute(Redirects, ErrMsg, ExecutionFailed);
- (void)Status;
- if (ExecutionFailed)
- *ExecutionFailed = false;
- return 0;
-}
-
void JobList::Print(raw_ostream &OS, const char *Terminator, bool Quote,
CrashReportInfo *CrashInfo) const {
for (const auto &Job : *this)
diff --git a/contrib/llvm-project/clang/lib/Driver/Multilib.cpp b/contrib/llvm-project/clang/lib/Driver/Multilib.cpp
index 5dd55553bcb5..9c091bbfdaba 100644
--- a/contrib/llvm-project/clang/lib/Driver/Multilib.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/Multilib.cpp
@@ -8,14 +8,18 @@
#include "clang/Driver/Multilib.h"
#include "clang/Basic/LLVM.h"
+#include "clang/Basic/Version.h"
+#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/StringSet.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Error.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Regex.h"
+#include "llvm/Support/VersionTuple.h"
+#include "llvm/Support/YAMLParser.h"
+#include "llvm/Support/YAMLTraits.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cassert>
@@ -25,56 +29,17 @@ using namespace clang;
using namespace driver;
using namespace llvm::sys;
-/// normalize Segment to "/foo/bar" or "".
-static void normalizePathSegment(std::string &Segment) {
- StringRef seg = Segment;
-
- // Prune trailing "/" or "./"
- while (true) {
- StringRef last = path::filename(seg);
- if (last != ".")
- break;
- seg = path::parent_path(seg);
- }
-
- if (seg.empty() || seg == "/") {
- Segment.clear();
- return;
- }
-
- // Add leading '/'
- if (seg.front() != '/') {
- Segment = "/" + seg.str();
- } else {
- Segment = std::string(seg);
- }
-}
-
Multilib::Multilib(StringRef GCCSuffix, StringRef OSSuffix,
- StringRef IncludeSuffix, int Priority)
+ StringRef IncludeSuffix, const flags_list &Flags,
+ StringRef ExclusiveGroup)
: GCCSuffix(GCCSuffix), OSSuffix(OSSuffix), IncludeSuffix(IncludeSuffix),
- Priority(Priority) {
- normalizePathSegment(this->GCCSuffix);
- normalizePathSegment(this->OSSuffix);
- normalizePathSegment(this->IncludeSuffix);
-}
-
-Multilib &Multilib::gccSuffix(StringRef S) {
- GCCSuffix = std::string(S);
- normalizePathSegment(GCCSuffix);
- return *this;
-}
-
-Multilib &Multilib::osSuffix(StringRef S) {
- OSSuffix = std::string(S);
- normalizePathSegment(OSSuffix);
- return *this;
-}
-
-Multilib &Multilib::includeSuffix(StringRef S) {
- IncludeSuffix = std::string(S);
- normalizePathSegment(IncludeSuffix);
- return *this;
+ Flags(Flags), ExclusiveGroup(ExclusiveGroup) {
+ assert(GCCSuffix.empty() ||
+ (StringRef(GCCSuffix).front() == '/' && GCCSuffix.size() > 1));
+ assert(OSSuffix.empty() ||
+ (StringRef(OSSuffix).front() == '/' && OSSuffix.size() > 1));
+ assert(IncludeSuffix.empty() ||
+ (StringRef(IncludeSuffix).front() == '/' && IncludeSuffix.size() > 1));
}
LLVM_DUMP_METHOD void Multilib::dump() const {
@@ -82,7 +47,6 @@ LLVM_DUMP_METHOD void Multilib::dump() const {
}
void Multilib::print(raw_ostream &OS) const {
- assert(GCCSuffix.empty() || (StringRef(GCCSuffix).front() == '/'));
if (GCCSuffix.empty())
OS << ".";
else {
@@ -90,27 +54,11 @@ void Multilib::print(raw_ostream &OS) const {
}
OS << ";";
for (StringRef Flag : Flags) {
- if (Flag.front() == '+')
+ if (Flag.front() == '-')
OS << "@" << Flag.substr(1);
}
}
-bool Multilib::isValid() const {
- llvm::StringMap<int> FlagSet;
- for (unsigned I = 0, N = Flags.size(); I != N; ++I) {
- StringRef Flag(Flags[I]);
- llvm::StringMap<int>::iterator SI = FlagSet.find(Flag.substr(1));
-
- assert(StringRef(Flag).front() == '+' || StringRef(Flag).front() == '-');
-
- if (SI == FlagSet.end())
- FlagSet[Flag.substr(1)] = I;
- else if (Flags[I] != Flags[SI->getValue()])
- return false;
- }
- return true;
-}
-
bool Multilib::operator==(const Multilib &Other) const {
// Check whether the flags sets match
// allowing for the match to be order invariant
@@ -119,7 +67,7 @@ bool Multilib::operator==(const Multilib &Other) const {
MyFlags.insert(Flag);
for (const auto &Flag : Other.Flags)
- if (MyFlags.find(Flag) == MyFlags.end())
+ if (!MyFlags.contains(Flag))
return false;
if (osSuffix() != Other.osSuffix())
@@ -139,147 +87,225 @@ raw_ostream &clang::driver::operator<<(raw_ostream &OS, const Multilib &M) {
return OS;
}
-MultilibSet &MultilibSet::Maybe(const Multilib &M) {
- Multilib Opposite;
- // Negate any '+' flags
- for (StringRef Flag : M.flags()) {
- if (Flag.front() == '+')
- Opposite.flags().push_back(("-" + Flag.substr(1)).str());
- }
- return Either(M, Opposite);
-}
-
-MultilibSet &MultilibSet::Either(const Multilib &M1, const Multilib &M2) {
- return Either({M1, M2});
-}
-
-MultilibSet &MultilibSet::Either(const Multilib &M1, const Multilib &M2,
- const Multilib &M3) {
- return Either({M1, M2, M3});
-}
-
-MultilibSet &MultilibSet::Either(const Multilib &M1, const Multilib &M2,
- const Multilib &M3, const Multilib &M4) {
- return Either({M1, M2, M3, M4});
-}
-
-MultilibSet &MultilibSet::Either(const Multilib &M1, const Multilib &M2,
- const Multilib &M3, const Multilib &M4,
- const Multilib &M5) {
- return Either({M1, M2, M3, M4, M5});
-}
-
-static Multilib compose(const Multilib &Base, const Multilib &New) {
- SmallString<128> GCCSuffix;
- llvm::sys::path::append(GCCSuffix, "/", Base.gccSuffix(), New.gccSuffix());
- SmallString<128> OSSuffix;
- llvm::sys::path::append(OSSuffix, "/", Base.osSuffix(), New.osSuffix());
- SmallString<128> IncludeSuffix;
- llvm::sys::path::append(IncludeSuffix, "/", Base.includeSuffix(),
- New.includeSuffix());
-
- Multilib Composed(GCCSuffix, OSSuffix, IncludeSuffix);
-
- Multilib::flags_list &Flags = Composed.flags();
-
- Flags.insert(Flags.end(), Base.flags().begin(), Base.flags().end());
- Flags.insert(Flags.end(), New.flags().begin(), New.flags().end());
-
- return Composed;
+MultilibSet &MultilibSet::FilterOut(FilterCallback F) {
+ llvm::erase_if(Multilibs, F);
+ return *this;
}
-MultilibSet &MultilibSet::Either(ArrayRef<Multilib> MultilibSegments) {
- multilib_list Composed;
+void MultilibSet::push_back(const Multilib &M) { Multilibs.push_back(M); }
- if (Multilibs.empty())
- Multilibs.insert(Multilibs.end(), MultilibSegments.begin(),
- MultilibSegments.end());
- else {
- for (const auto &New : MultilibSegments) {
- for (const auto &Base : *this) {
- Multilib MO = compose(Base, New);
- if (MO.isValid())
- Composed.push_back(MO);
- }
+bool MultilibSet::select(const Multilib::flags_list &Flags,
+ llvm::SmallVectorImpl<Multilib> &Selected) const {
+ llvm::StringSet<> FlagSet(expandFlags(Flags));
+ Selected.clear();
+
+ // Decide which multilibs we're going to select at all.
+ llvm::DenseSet<StringRef> ExclusiveGroupsSelected;
+ for (const Multilib &M : llvm::reverse(Multilibs)) {
+ // If this multilib doesn't match all our flags, don't select it.
+ if (!llvm::all_of(M.flags(), [&FlagSet](const std::string &F) {
+ return FlagSet.contains(F);
+ }))
+ continue;
+
+ const std::string &group = M.exclusiveGroup();
+ if (!group.empty()) {
+ // If this multilib has the same ExclusiveGroup as one we've already
+ // selected, skip it. We're iterating in reverse order, so the group
+ // member we've selected already is preferred.
+ //
+ // Otherwise, add the group name to the set of groups we've already
+ // selected a member of.
+ auto [It, Inserted] = ExclusiveGroupsSelected.insert(group);
+ if (!Inserted)
+ continue;
}
- Multilibs = Composed;
+ // Select this multilib.
+ Selected.push_back(M);
}
- return *this;
-}
+ // We iterated in reverse order, so now put Selected back the right way
+ // round.
+ std::reverse(Selected.begin(), Selected.end());
-MultilibSet &MultilibSet::FilterOut(FilterCallback F) {
- filterInPlace(F, Multilibs);
- return *this;
+ return !Selected.empty();
}
-MultilibSet &MultilibSet::FilterOut(const char *Regex) {
- llvm::Regex R(Regex);
-#ifndef NDEBUG
- std::string Error;
- if (!R.isValid(Error)) {
- llvm::errs() << Error;
- llvm_unreachable("Invalid regex!");
+llvm::StringSet<>
+MultilibSet::expandFlags(const Multilib::flags_list &InFlags) const {
+ llvm::StringSet<> Result;
+ for (const auto &F : InFlags)
+ Result.insert(F);
+ for (const FlagMatcher &M : FlagMatchers) {
+ std::string RegexString(M.Match);
+
+ // Make the regular expression match the whole string.
+ if (!StringRef(M.Match).starts_with("^"))
+ RegexString.insert(RegexString.begin(), '^');
+ if (!StringRef(M.Match).ends_with("$"))
+ RegexString.push_back('$');
+
+ const llvm::Regex Regex(RegexString);
+ assert(Regex.isValid());
+ if (llvm::any_of(InFlags,
+ [&Regex](StringRef F) { return Regex.match(F); })) {
+ Result.insert(M.Flags.begin(), M.Flags.end());
+ }
}
-#endif
-
- filterInPlace([&R](const Multilib &M) { return R.match(M.gccSuffix()); },
- Multilibs);
- return *this;
-}
-
-void MultilibSet::push_back(const Multilib &M) { Multilibs.push_back(M); }
-
-void MultilibSet::combineWith(const MultilibSet &Other) {
- Multilibs.insert(Multilibs.end(), Other.begin(), Other.end());
+ return Result;
}
-static bool isFlagEnabled(StringRef Flag) {
- char Indicator = Flag.front();
- assert(Indicator == '+' || Indicator == '-');
- return Indicator == '+';
-}
+namespace {
+
+// When updating this also update MULTILIB_VERSION in MultilibTest.cpp
+static const VersionTuple MultilibVersionCurrent(1, 0);
+
+struct MultilibSerialization {
+ std::string Dir;
+ std::vector<std::string> Flags;
+ std::string Group;
+};
+
+enum class MultilibGroupType {
+ /*
+ * The only group type currently supported is 'Exclusive', which indicates a
+ * group of multilibs of which at most one may be selected.
+ */
+ Exclusive,
+
+ /*
+ * Future possibility: a second group type indicating a set of library
+ * directories that are mutually _dependent_ rather than mutually exclusive:
+ * if you include one you must include them all.
+ *
+ * It might also be useful to allow groups to be members of other groups, so
+ * that a mutually exclusive group could contain a mutually dependent set of
+ * library directories, or vice versa.
+ *
+ * These additional features would need changes in the implementation, but
+ * the YAML schema is set up so they can be added without requiring changes
+ * in existing users' multilib.yaml files.
+ */
+};
+
+struct MultilibGroupSerialization {
+ std::string Name;
+ MultilibGroupType Type;
+};
+
+struct MultilibSetSerialization {
+ llvm::VersionTuple MultilibVersion;
+ std::vector<MultilibGroupSerialization> Groups;
+ std::vector<MultilibSerialization> Multilibs;
+ std::vector<MultilibSet::FlagMatcher> FlagMatchers;
+};
+
+} // end anonymous namespace
+
+template <> struct llvm::yaml::MappingTraits<MultilibSerialization> {
+ static void mapping(llvm::yaml::IO &io, MultilibSerialization &V) {
+ io.mapRequired("Dir", V.Dir);
+ io.mapRequired("Flags", V.Flags);
+ io.mapOptional("Group", V.Group);
+ }
+ static std::string validate(IO &io, MultilibSerialization &V) {
+ if (StringRef(V.Dir).starts_with("/"))
+ return "paths must be relative but \"" + V.Dir + "\" starts with \"/\"";
+ return std::string{};
+ }
+};
-bool MultilibSet::select(const Multilib::flags_list &Flags, Multilib &M) const {
- llvm::StringMap<bool> FlagSet;
+template <> struct llvm::yaml::ScalarEnumerationTraits<MultilibGroupType> {
+ static void enumeration(IO &io, MultilibGroupType &Val) {
+ io.enumCase(Val, "Exclusive", MultilibGroupType::Exclusive);
+ }
+};
- // Stuff all of the flags into the FlagSet such that a true mappend indicates
- // the flag was enabled, and a false mappend indicates the flag was disabled.
- for (StringRef Flag : Flags)
- FlagSet[Flag.substr(1)] = isFlagEnabled(Flag);
+template <> struct llvm::yaml::MappingTraits<MultilibGroupSerialization> {
+ static void mapping(llvm::yaml::IO &io, MultilibGroupSerialization &V) {
+ io.mapRequired("Name", V.Name);
+ io.mapRequired("Type", V.Type);
+ }
+};
- multilib_list Filtered = filterCopy([&FlagSet](const Multilib &M) {
- for (StringRef Flag : M.flags()) {
- llvm::StringMap<bool>::const_iterator SI = FlagSet.find(Flag.substr(1));
- if (SI != FlagSet.end())
- if (SI->getValue() != isFlagEnabled(Flag))
- return true;
+template <> struct llvm::yaml::MappingTraits<MultilibSet::FlagMatcher> {
+ static void mapping(llvm::yaml::IO &io, MultilibSet::FlagMatcher &M) {
+ io.mapRequired("Match", M.Match);
+ io.mapRequired("Flags", M.Flags);
+ }
+ static std::string validate(IO &io, MultilibSet::FlagMatcher &M) {
+ llvm::Regex Regex(M.Match);
+ std::string RegexError;
+ if (!Regex.isValid(RegexError))
+ return RegexError;
+ if (M.Flags.empty())
+ return "value required for 'Flags'";
+ return std::string{};
+ }
+};
+
+template <> struct llvm::yaml::MappingTraits<MultilibSetSerialization> {
+ static void mapping(llvm::yaml::IO &io, MultilibSetSerialization &M) {
+ io.mapRequired("MultilibVersion", M.MultilibVersion);
+ io.mapRequired("Variants", M.Multilibs);
+ io.mapOptional("Groups", M.Groups);
+ io.mapOptional("Mappings", M.FlagMatchers);
+ }
+ static std::string validate(IO &io, MultilibSetSerialization &M) {
+ if (M.MultilibVersion.empty())
+ return "missing required key 'MultilibVersion'";
+ if (M.MultilibVersion.getMajor() != MultilibVersionCurrent.getMajor())
+ return "multilib version " + M.MultilibVersion.getAsString() +
+ " is unsupported";
+ if (M.MultilibVersion.getMinor() > MultilibVersionCurrent.getMinor())
+ return "multilib version " + M.MultilibVersion.getAsString() +
+ " is unsupported";
+ for (const MultilibSerialization &Lib : M.Multilibs) {
+ if (!Lib.Group.empty()) {
+ bool Found = false;
+ for (const MultilibGroupSerialization &Group : M.Groups)
+ if (Group.Name == Lib.Group) {
+ Found = true;
+ break;
+ }
+ if (!Found)
+ return "multilib \"" + Lib.Dir +
+ "\" specifies undefined group name \"" + Lib.Group + "\"";
+ }
}
- return false;
- }, Multilibs);
-
- if (Filtered.empty())
- return false;
- if (Filtered.size() == 1) {
- M = Filtered[0];
- return true;
+ return std::string{};
}
-
- // Sort multilibs by priority and select the one with the highest priority.
- llvm::sort(Filtered.begin(), Filtered.end(),
- [](const Multilib &a, const Multilib &b) -> bool {
- return a.priority() > b.priority();
- });
-
- if (Filtered[0].priority() > Filtered[1].priority()) {
- M = Filtered[0];
- return true;
+};
+
+LLVM_YAML_IS_SEQUENCE_VECTOR(MultilibSerialization)
+LLVM_YAML_IS_SEQUENCE_VECTOR(MultilibGroupSerialization)
+LLVM_YAML_IS_SEQUENCE_VECTOR(MultilibSet::FlagMatcher)
+
+llvm::ErrorOr<MultilibSet>
+MultilibSet::parseYaml(llvm::MemoryBufferRef Input,
+ llvm::SourceMgr::DiagHandlerTy DiagHandler,
+ void *DiagHandlerCtxt) {
+ MultilibSetSerialization MS;
+ llvm::yaml::Input YamlInput(Input, nullptr, DiagHandler, DiagHandlerCtxt);
+ YamlInput >> MS;
+ if (YamlInput.error())
+ return YamlInput.error();
+
+ multilib_list Multilibs;
+ Multilibs.reserve(MS.Multilibs.size());
+ for (const auto &M : MS.Multilibs) {
+ std::string Dir;
+ if (M.Dir != ".")
+ Dir = "/" + M.Dir;
+ // We transfer M.Group straight into the ExclusiveGroup parameter for the
+ // Multilib constructor. If we later support more than one type of group,
+ // we'll have to look up the group name in MS.Groups, check its type, and
+ // decide what to do here.
+ Multilibs.emplace_back(Dir, Dir, Dir, M.Flags, M.Group);
}
- // TODO: We should consider returning llvm::Error rather than aborting.
- assert(false && "More than one multilib with the same priority");
- return false;
+ return MultilibSet(std::move(Multilibs), std::move(MS.FlagMatchers));
}
LLVM_DUMP_METHOD void MultilibSet::dump() const {
@@ -291,17 +317,6 @@ void MultilibSet::print(raw_ostream &OS) const {
OS << M << "\n";
}
-MultilibSet::multilib_list MultilibSet::filterCopy(FilterCallback F,
- const multilib_list &Ms) {
- multilib_list Copy(Ms);
- filterInPlace(F, Copy);
- return Copy;
-}
-
-void MultilibSet::filterInPlace(FilterCallback F, multilib_list &Ms) {
- Ms.erase(std::remove_if(Ms.begin(), Ms.end(), F), Ms.end());
-}
-
raw_ostream &clang::driver::operator<<(raw_ostream &OS, const MultilibSet &MS) {
MS.print(OS);
return OS;
diff --git a/contrib/llvm-project/clang/lib/Driver/MultilibBuilder.cpp b/contrib/llvm-project/clang/lib/Driver/MultilibBuilder.cpp
new file mode 100644
index 000000000000..15adf5017780
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Driver/MultilibBuilder.cpp
@@ -0,0 +1,197 @@
+//===- MultilibBuilder.cpp - MultilibBuilder Implementation -===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Driver/MultilibBuilder.h"
+#include "ToolChains/CommonArgs.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/Regex.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace driver;
+
+/// normalize Segment to "/foo/bar" or "".
+static void normalizePathSegment(std::string &Segment) {
+ StringRef seg = Segment;
+
+ // Prune trailing "/" or "./"
+ while (true) {
+ StringRef last = llvm::sys::path::filename(seg);
+ if (last != ".")
+ break;
+ seg = llvm::sys::path::parent_path(seg);
+ }
+
+ if (seg.empty() || seg == "/") {
+ Segment.clear();
+ return;
+ }
+
+ // Add leading '/'
+ if (seg.front() != '/') {
+ Segment = "/" + seg.str();
+ } else {
+ Segment = std::string(seg);
+ }
+}
+
+MultilibBuilder::MultilibBuilder(StringRef GCC, StringRef OS, StringRef Include)
+ : GCCSuffix(GCC), OSSuffix(OS), IncludeSuffix(Include) {
+ normalizePathSegment(GCCSuffix);
+ normalizePathSegment(OSSuffix);
+ normalizePathSegment(IncludeSuffix);
+}
+
+MultilibBuilder::MultilibBuilder(StringRef Suffix)
+ : MultilibBuilder(Suffix, Suffix, Suffix) {}
+
+MultilibBuilder &MultilibBuilder::gccSuffix(StringRef S) {
+ GCCSuffix = std::string(S);
+ normalizePathSegment(GCCSuffix);
+ return *this;
+}
+
+MultilibBuilder &MultilibBuilder::osSuffix(StringRef S) {
+ OSSuffix = std::string(S);
+ normalizePathSegment(OSSuffix);
+ return *this;
+}
+
+MultilibBuilder &MultilibBuilder::includeSuffix(StringRef S) {
+ IncludeSuffix = std::string(S);
+ normalizePathSegment(IncludeSuffix);
+ return *this;
+}
+
+bool MultilibBuilder::isValid() const {
+ llvm::StringMap<int> FlagSet;
+ for (unsigned I = 0, N = Flags.size(); I != N; ++I) {
+ StringRef Flag(Flags[I]);
+ llvm::StringMap<int>::iterator SI = FlagSet.find(Flag.substr(1));
+
+ assert(StringRef(Flag).front() == '-' || StringRef(Flag).front() == '!');
+
+ if (SI == FlagSet.end())
+ FlagSet[Flag.substr(1)] = I;
+ else if (Flags[I] != Flags[SI->getValue()])
+ return false;
+ }
+ return true;
+}
+
+MultilibBuilder &MultilibBuilder::flag(StringRef Flag, bool Disallow) {
+ tools::addMultilibFlag(!Disallow, Flag, Flags);
+ return *this;
+}
+
+Multilib MultilibBuilder::makeMultilib() const {
+ return Multilib(GCCSuffix, OSSuffix, IncludeSuffix, Flags);
+}
+
+MultilibSetBuilder &MultilibSetBuilder::Maybe(const MultilibBuilder &M) {
+ MultilibBuilder Opposite;
+ // Negate positive flags
+ for (StringRef Flag : M.flags()) {
+ if (Flag.front() == '-')
+ Opposite.flag(Flag, /*Disallow=*/true);
+ }
+ return Either(M, Opposite);
+}
+
+MultilibSetBuilder &MultilibSetBuilder::Either(const MultilibBuilder &M1,
+ const MultilibBuilder &M2) {
+ return Either({M1, M2});
+}
+
+MultilibSetBuilder &MultilibSetBuilder::Either(const MultilibBuilder &M1,
+ const MultilibBuilder &M2,
+ const MultilibBuilder &M3) {
+ return Either({M1, M2, M3});
+}
+
+MultilibSetBuilder &MultilibSetBuilder::Either(const MultilibBuilder &M1,
+ const MultilibBuilder &M2,
+ const MultilibBuilder &M3,
+ const MultilibBuilder &M4) {
+ return Either({M1, M2, M3, M4});
+}
+
+MultilibSetBuilder &MultilibSetBuilder::Either(const MultilibBuilder &M1,
+ const MultilibBuilder &M2,
+ const MultilibBuilder &M3,
+ const MultilibBuilder &M4,
+ const MultilibBuilder &M5) {
+ return Either({M1, M2, M3, M4, M5});
+}
+
+static MultilibBuilder compose(const MultilibBuilder &Base,
+ const MultilibBuilder &New) {
+ SmallString<128> GCCSuffix;
+ llvm::sys::path::append(GCCSuffix, "/", Base.gccSuffix(), New.gccSuffix());
+ SmallString<128> OSSuffix;
+ llvm::sys::path::append(OSSuffix, "/", Base.osSuffix(), New.osSuffix());
+ SmallString<128> IncludeSuffix;
+ llvm::sys::path::append(IncludeSuffix, "/", Base.includeSuffix(),
+ New.includeSuffix());
+
+ MultilibBuilder Composed(GCCSuffix, OSSuffix, IncludeSuffix);
+
+ MultilibBuilder::flags_list &Flags = Composed.flags();
+
+ Flags.insert(Flags.end(), Base.flags().begin(), Base.flags().end());
+ Flags.insert(Flags.end(), New.flags().begin(), New.flags().end());
+
+ return Composed;
+}
+
+MultilibSetBuilder &
+MultilibSetBuilder::Either(ArrayRef<MultilibBuilder> MultilibSegments) {
+ multilib_list Composed;
+
+ if (Multilibs.empty())
+ Multilibs.insert(Multilibs.end(), MultilibSegments.begin(),
+ MultilibSegments.end());
+ else {
+ for (const auto &New : MultilibSegments) {
+ for (const auto &Base : Multilibs) {
+ MultilibBuilder MO = compose(Base, New);
+ if (MO.isValid())
+ Composed.push_back(MO);
+ }
+ }
+
+ Multilibs = Composed;
+ }
+
+ return *this;
+}
+
+MultilibSetBuilder &MultilibSetBuilder::FilterOut(const char *Regex) {
+ llvm::Regex R(Regex);
+#ifndef NDEBUG
+ std::string Error;
+ if (!R.isValid(Error)) {
+ llvm::errs() << Error;
+ llvm_unreachable("Invalid regex!");
+ }
+#endif
+ llvm::erase_if(Multilibs, [&R](const MultilibBuilder &M) {
+ return R.match(M.gccSuffix());
+ });
+ return *this;
+}
+
+MultilibSet MultilibSetBuilder::makeMultilibSet() const {
+ MultilibSet Result;
+ for (const auto &M : Multilibs) {
+ Result.push_back(M.makeMultilib());
+ }
+ return Result;
+}
diff --git a/contrib/llvm-project/clang/lib/Driver/OffloadBundler.cpp b/contrib/llvm-project/clang/lib/Driver/OffloadBundler.cpp
new file mode 100644
index 000000000000..b1091aca5616
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Driver/OffloadBundler.cpp
@@ -0,0 +1,1684 @@
+//===- OffloadBundler.cpp - File Bundling and Unbundling ------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements an offload bundling API that bundles different files
+/// that relate with the same source code but different targets into a single
+/// one. Also the implements the opposite functionality, i.e. unbundle files
+/// previous created by this API.
+///
+//===----------------------------------------------------------------------===//
+
+#include "clang/Driver/OffloadBundler.h"
+#include "clang/Basic/Cuda.h"
+#include "clang/Basic/TargetID.h"
+#include "clang/Basic/Version.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/BinaryFormat/Magic.h"
+#include "llvm/Object/Archive.h"
+#include "llvm/Object/ArchiveWriter.h"
+#include "llvm/Object/Binary.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Compression.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/EndianStream.h"
+#include "llvm/Support/Errc.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/ErrorOr.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/MD5.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/Program.h"
+#include "llvm/Support/Signals.h"
+#include "llvm/Support/StringSaver.h"
+#include "llvm/Support/Timer.h"
+#include "llvm/Support/WithColor.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/Host.h"
+#include "llvm/TargetParser/Triple.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <forward_list>
+#include <llvm/Support/Process.h>
+#include <memory>
+#include <set>
+#include <string>
+#include <system_error>
+#include <utility>
+
+using namespace llvm;
+using namespace llvm::object;
+using namespace clang;
+
+static llvm::TimerGroup
+ ClangOffloadBundlerTimerGroup("Clang Offload Bundler Timer Group",
+ "Timer group for clang offload bundler");
+
+/// Magic string that marks the existence of offloading data.
+#define OFFLOAD_BUNDLER_MAGIC_STR "__CLANG_OFFLOAD_BUNDLE__"
+
+OffloadTargetInfo::OffloadTargetInfo(const StringRef Target,
+ const OffloadBundlerConfig &BC)
+ : BundlerConfig(BC) {
+
+ // TODO: Add error checking from ClangOffloadBundler.cpp
+ auto TargetFeatures = Target.split(':');
+ auto TripleOrGPU = TargetFeatures.first.rsplit('-');
+
+ if (clang::StringToCudaArch(TripleOrGPU.second) != clang::CudaArch::UNKNOWN) {
+ auto KindTriple = TripleOrGPU.first.split('-');
+ this->OffloadKind = KindTriple.first;
+
+ // Enforce optional env field to standardize bundles
+ llvm::Triple t = llvm::Triple(KindTriple.second);
+ this->Triple = llvm::Triple(t.getArchName(), t.getVendorName(),
+ t.getOSName(), t.getEnvironmentName());
+
+ this->TargetID = Target.substr(Target.find(TripleOrGPU.second));
+ } else {
+ auto KindTriple = TargetFeatures.first.split('-');
+ this->OffloadKind = KindTriple.first;
+
+ // Enforce optional env field to standardize bundles
+ llvm::Triple t = llvm::Triple(KindTriple.second);
+ this->Triple = llvm::Triple(t.getArchName(), t.getVendorName(),
+ t.getOSName(), t.getEnvironmentName());
+
+ this->TargetID = "";
+ }
+}
+
+bool OffloadTargetInfo::hasHostKind() const {
+ return this->OffloadKind == "host";
+}
+
+bool OffloadTargetInfo::isOffloadKindValid() const {
+ return OffloadKind == "host" || OffloadKind == "openmp" ||
+ OffloadKind == "hip" || OffloadKind == "hipv4";
+}
+
+bool OffloadTargetInfo::isOffloadKindCompatible(
+ const StringRef TargetOffloadKind) const {
+ if (OffloadKind == TargetOffloadKind)
+ return true;
+ if (BundlerConfig.HipOpenmpCompatible) {
+ bool HIPCompatibleWithOpenMP = OffloadKind.starts_with_insensitive("hip") &&
+ TargetOffloadKind == "openmp";
+ bool OpenMPCompatibleWithHIP =
+ OffloadKind == "openmp" &&
+ TargetOffloadKind.starts_with_insensitive("hip");
+ return HIPCompatibleWithOpenMP || OpenMPCompatibleWithHIP;
+ }
+ return false;
+}
+
+bool OffloadTargetInfo::isTripleValid() const {
+ return !Triple.str().empty() && Triple.getArch() != Triple::UnknownArch;
+}
+
+bool OffloadTargetInfo::operator==(const OffloadTargetInfo &Target) const {
+ return OffloadKind == Target.OffloadKind &&
+ Triple.isCompatibleWith(Target.Triple) && TargetID == Target.TargetID;
+}
+
+std::string OffloadTargetInfo::str() const {
+ return Twine(OffloadKind + "-" + Triple.str() + "-" + TargetID).str();
+}
+
+static StringRef getDeviceFileExtension(StringRef Device,
+ StringRef BundleFileName) {
+ if (Device.contains("gfx"))
+ return ".bc";
+ if (Device.contains("sm_"))
+ return ".cubin";
+ return sys::path::extension(BundleFileName);
+}
+
+static std::string getDeviceLibraryFileName(StringRef BundleFileName,
+ StringRef Device) {
+ StringRef LibName = sys::path::stem(BundleFileName);
+ StringRef Extension = getDeviceFileExtension(Device, BundleFileName);
+
+ std::string Result;
+ Result += LibName;
+ Result += Extension;
+ return Result;
+}
+
+namespace {
+/// Generic file handler interface.
+class FileHandler {
+public:
+ struct BundleInfo {
+ StringRef BundleID;
+ };
+
+ FileHandler() {}
+
+ virtual ~FileHandler() {}
+
+ /// Update the file handler with information from the header of the bundled
+ /// file.
+ virtual Error ReadHeader(MemoryBuffer &Input) = 0;
+
+ /// Read the marker of the next bundled to be read in the file. The bundle
+ /// name is returned if there is one in the file, or `std::nullopt` if there
+ /// are no more bundles to be read.
+ virtual Expected<std::optional<StringRef>>
+ ReadBundleStart(MemoryBuffer &Input) = 0;
+
+ /// Read the marker that closes the current bundle.
+ virtual Error ReadBundleEnd(MemoryBuffer &Input) = 0;
+
+ /// Read the current bundle and write the result into the stream \a OS.
+ virtual Error ReadBundle(raw_ostream &OS, MemoryBuffer &Input) = 0;
+
+ /// Write the header of the bundled file to \a OS based on the information
+ /// gathered from \a Inputs.
+ virtual Error WriteHeader(raw_ostream &OS,
+ ArrayRef<std::unique_ptr<MemoryBuffer>> Inputs) = 0;
+
+ /// Write the marker that initiates a bundle for the triple \a TargetTriple to
+ /// \a OS.
+ virtual Error WriteBundleStart(raw_ostream &OS, StringRef TargetTriple) = 0;
+
+ /// Write the marker that closes a bundle for the triple \a TargetTriple to \a
+ /// OS.
+ virtual Error WriteBundleEnd(raw_ostream &OS, StringRef TargetTriple) = 0;
+
+ /// Write the bundle from \a Input into \a OS.
+ virtual Error WriteBundle(raw_ostream &OS, MemoryBuffer &Input) = 0;
+
+ /// Finalize output file.
+ virtual Error finalizeOutputFile() { return Error::success(); }
+
+ /// List bundle IDs in \a Input.
+ virtual Error listBundleIDs(MemoryBuffer &Input) {
+ if (Error Err = ReadHeader(Input))
+ return Err;
+ return forEachBundle(Input, [&](const BundleInfo &Info) -> Error {
+ llvm::outs() << Info.BundleID << '\n';
+ Error Err = listBundleIDsCallback(Input, Info);
+ if (Err)
+ return Err;
+ return Error::success();
+ });
+ }
+
+ /// Get bundle IDs in \a Input in \a BundleIds.
+ virtual Error getBundleIDs(MemoryBuffer &Input,
+ std::set<StringRef> &BundleIds) {
+ if (Error Err = ReadHeader(Input))
+ return Err;
+ return forEachBundle(Input, [&](const BundleInfo &Info) -> Error {
+ BundleIds.insert(Info.BundleID);
+ Error Err = listBundleIDsCallback(Input, Info);
+ if (Err)
+ return Err;
+ return Error::success();
+ });
+ }
+
+ /// For each bundle in \a Input, do \a Func.
+ Error forEachBundle(MemoryBuffer &Input,
+ std::function<Error(const BundleInfo &)> Func) {
+ while (true) {
+ Expected<std::optional<StringRef>> CurTripleOrErr =
+ ReadBundleStart(Input);
+ if (!CurTripleOrErr)
+ return CurTripleOrErr.takeError();
+
+ // No more bundles.
+ if (!*CurTripleOrErr)
+ break;
+
+ StringRef CurTriple = **CurTripleOrErr;
+ assert(!CurTriple.empty());
+
+ BundleInfo Info{CurTriple};
+ if (Error Err = Func(Info))
+ return Err;
+ }
+ return Error::success();
+ }
+
+protected:
+ virtual Error listBundleIDsCallback(MemoryBuffer &Input,
+ const BundleInfo &Info) {
+ return Error::success();
+ }
+};
+
+/// Handler for binary files. The bundled file will have the following format
+/// (all integers are stored in little-endian format):
+///
+/// "OFFLOAD_BUNDLER_MAGIC_STR" (ASCII encoding of the string)
+///
+/// NumberOfOffloadBundles (8-byte integer)
+///
+/// OffsetOfBundle1 (8-byte integer)
+/// SizeOfBundle1 (8-byte integer)
+/// NumberOfBytesInTripleOfBundle1 (8-byte integer)
+/// TripleOfBundle1 (byte length defined before)
+///
+/// ...
+///
+/// OffsetOfBundleN (8-byte integer)
+/// SizeOfBundleN (8-byte integer)
+/// NumberOfBytesInTripleOfBundleN (8-byte integer)
+/// TripleOfBundleN (byte length defined before)
+///
+/// Bundle1
+/// ...
+/// BundleN
+
+/// Read 8-byte integers from a buffer in little-endian format.
+static uint64_t Read8byteIntegerFromBuffer(StringRef Buffer, size_t pos) {
+ return llvm::support::endian::read64le(Buffer.data() + pos);
+}
+
+/// Write 8-byte integers to a buffer in little-endian format.
+static void Write8byteIntegerToBuffer(raw_ostream &OS, uint64_t Val) {
+ llvm::support::endian::write(OS, Val, llvm::endianness::little);
+}
+
+class BinaryFileHandler final : public FileHandler {
+ /// Information about the bundles extracted from the header.
+ struct BinaryBundleInfo final : public BundleInfo {
+ /// Size of the bundle.
+ uint64_t Size = 0u;
+ /// Offset at which the bundle starts in the bundled file.
+ uint64_t Offset = 0u;
+
+ BinaryBundleInfo() {}
+ BinaryBundleInfo(uint64_t Size, uint64_t Offset)
+ : Size(Size), Offset(Offset) {}
+ };
+
+ /// Map between a triple and the corresponding bundle information.
+ StringMap<BinaryBundleInfo> BundlesInfo;
+
+ /// Iterator for the bundle information that is being read.
+ StringMap<BinaryBundleInfo>::iterator CurBundleInfo;
+ StringMap<BinaryBundleInfo>::iterator NextBundleInfo;
+
+ /// Current bundle target to be written.
+ std::string CurWriteBundleTarget;
+
+ /// Configuration options and arrays for this bundler job
+ const OffloadBundlerConfig &BundlerConfig;
+
+public:
+ // TODO: Add error checking from ClangOffloadBundler.cpp
+ BinaryFileHandler(const OffloadBundlerConfig &BC) : BundlerConfig(BC) {}
+
+ ~BinaryFileHandler() final {}
+
+ Error ReadHeader(MemoryBuffer &Input) final {
+ StringRef FC = Input.getBuffer();
+
+ // Initialize the current bundle with the end of the container.
+ CurBundleInfo = BundlesInfo.end();
+
+ // Check if buffer is smaller than magic string.
+ size_t ReadChars = sizeof(OFFLOAD_BUNDLER_MAGIC_STR) - 1;
+ if (ReadChars > FC.size())
+ return Error::success();
+
+ // Check if no magic was found.
+ if (llvm::identify_magic(FC) != llvm::file_magic::offload_bundle)
+ return Error::success();
+
+ // Read number of bundles.
+ if (ReadChars + 8 > FC.size())
+ return Error::success();
+
+ uint64_t NumberOfBundles = Read8byteIntegerFromBuffer(FC, ReadChars);
+ ReadChars += 8;
+
+ // Read bundle offsets, sizes and triples.
+ for (uint64_t i = 0; i < NumberOfBundles; ++i) {
+
+ // Read offset.
+ if (ReadChars + 8 > FC.size())
+ return Error::success();
+
+ uint64_t Offset = Read8byteIntegerFromBuffer(FC, ReadChars);
+ ReadChars += 8;
+
+ // Read size.
+ if (ReadChars + 8 > FC.size())
+ return Error::success();
+
+ uint64_t Size = Read8byteIntegerFromBuffer(FC, ReadChars);
+ ReadChars += 8;
+
+ // Read triple size.
+ if (ReadChars + 8 > FC.size())
+ return Error::success();
+
+ uint64_t TripleSize = Read8byteIntegerFromBuffer(FC, ReadChars);
+ ReadChars += 8;
+
+ // Read triple.
+ if (ReadChars + TripleSize > FC.size())
+ return Error::success();
+
+ StringRef Triple(&FC.data()[ReadChars], TripleSize);
+ ReadChars += TripleSize;
+
+ // Check if the offset and size make sense.
+ if (!Offset || Offset + Size > FC.size())
+ return Error::success();
+
+ assert(!BundlesInfo.contains(Triple) && "Triple is duplicated??");
+ BundlesInfo[Triple] = BinaryBundleInfo(Size, Offset);
+ }
+ // Set the iterator to where we will start to read.
+ CurBundleInfo = BundlesInfo.end();
+ NextBundleInfo = BundlesInfo.begin();
+ return Error::success();
+ }
+
+ Expected<std::optional<StringRef>>
+ ReadBundleStart(MemoryBuffer &Input) final {
+ if (NextBundleInfo == BundlesInfo.end())
+ return std::nullopt;
+ CurBundleInfo = NextBundleInfo++;
+ return CurBundleInfo->first();
+ }
+
+ Error ReadBundleEnd(MemoryBuffer &Input) final {
+ assert(CurBundleInfo != BundlesInfo.end() && "Invalid reader info!");
+ return Error::success();
+ }
+
+ Error ReadBundle(raw_ostream &OS, MemoryBuffer &Input) final {
+ assert(CurBundleInfo != BundlesInfo.end() && "Invalid reader info!");
+ StringRef FC = Input.getBuffer();
+ OS.write(FC.data() + CurBundleInfo->second.Offset,
+ CurBundleInfo->second.Size);
+ return Error::success();
+ }
+
+ Error WriteHeader(raw_ostream &OS,
+ ArrayRef<std::unique_ptr<MemoryBuffer>> Inputs) final {
+
+ // Compute size of the header.
+ uint64_t HeaderSize = 0;
+
+ HeaderSize += sizeof(OFFLOAD_BUNDLER_MAGIC_STR) - 1;
+ HeaderSize += 8; // Number of Bundles
+
+ for (auto &T : BundlerConfig.TargetNames) {
+ HeaderSize += 3 * 8; // Bundle offset, Size of bundle and size of triple.
+ HeaderSize += T.size(); // The triple.
+ }
+
+ // Write to the buffer the header.
+ OS << OFFLOAD_BUNDLER_MAGIC_STR;
+
+ Write8byteIntegerToBuffer(OS, BundlerConfig.TargetNames.size());
+
+ unsigned Idx = 0;
+ for (auto &T : BundlerConfig.TargetNames) {
+ MemoryBuffer &MB = *Inputs[Idx++];
+ HeaderSize = alignTo(HeaderSize, BundlerConfig.BundleAlignment);
+ // Bundle offset.
+ Write8byteIntegerToBuffer(OS, HeaderSize);
+ // Size of the bundle (adds to the next bundle's offset)
+ Write8byteIntegerToBuffer(OS, MB.getBufferSize());
+ BundlesInfo[T] = BinaryBundleInfo(MB.getBufferSize(), HeaderSize);
+ HeaderSize += MB.getBufferSize();
+ // Size of the triple
+ Write8byteIntegerToBuffer(OS, T.size());
+ // Triple
+ OS << T;
+ }
+ return Error::success();
+ }
+
+ Error WriteBundleStart(raw_ostream &OS, StringRef TargetTriple) final {
+ CurWriteBundleTarget = TargetTriple.str();
+ return Error::success();
+ }
+
+ Error WriteBundleEnd(raw_ostream &OS, StringRef TargetTriple) final {
+ return Error::success();
+ }
+
+ Error WriteBundle(raw_ostream &OS, MemoryBuffer &Input) final {
+ auto BI = BundlesInfo[CurWriteBundleTarget];
+
+ // Pad with 0 to reach specified offset.
+ size_t CurrentPos = OS.tell();
+ size_t PaddingSize = BI.Offset > CurrentPos ? BI.Offset - CurrentPos : 0;
+ for (size_t I = 0; I < PaddingSize; ++I)
+ OS.write('\0');
+ assert(OS.tell() == BI.Offset);
+
+ OS.write(Input.getBufferStart(), Input.getBufferSize());
+
+ return Error::success();
+ }
+};
+
+// This class implements a list of temporary files that are removed upon
+// object destruction.
+class TempFileHandlerRAII {
+public:
+ ~TempFileHandlerRAII() {
+ for (const auto &File : Files)
+ sys::fs::remove(File);
+ }
+
+ // Creates temporary file with given contents.
+ Expected<StringRef> Create(std::optional<ArrayRef<char>> Contents) {
+ SmallString<128u> File;
+ if (std::error_code EC =
+ sys::fs::createTemporaryFile("clang-offload-bundler", "tmp", File))
+ return createFileError(File, EC);
+ Files.push_front(File);
+
+ if (Contents) {
+ std::error_code EC;
+ raw_fd_ostream OS(File, EC);
+ if (EC)
+ return createFileError(File, EC);
+ OS.write(Contents->data(), Contents->size());
+ }
+ return Files.front().str();
+ }
+
+private:
+ std::forward_list<SmallString<128u>> Files;
+};
+
+/// Handler for object files. The bundles are organized by sections with a
+/// designated name.
+///
+/// To unbundle, we just copy the contents of the designated section.
+class ObjectFileHandler final : public FileHandler {
+
+ /// The object file we are currently dealing with.
+ std::unique_ptr<ObjectFile> Obj;
+
+ /// Return the input file contents.
+ StringRef getInputFileContents() const { return Obj->getData(); }
+
+ /// Return bundle name (<kind>-<triple>) if the provided section is an offload
+ /// section.
+ static Expected<std::optional<StringRef>>
+ IsOffloadSection(SectionRef CurSection) {
+ Expected<StringRef> NameOrErr = CurSection.getName();
+ if (!NameOrErr)
+ return NameOrErr.takeError();
+
+ // If it does not start with the reserved suffix, just skip this section.
+ if (llvm::identify_magic(*NameOrErr) != llvm::file_magic::offload_bundle)
+ return std::nullopt;
+
+ // Return the triple that is right after the reserved prefix.
+ return NameOrErr->substr(sizeof(OFFLOAD_BUNDLER_MAGIC_STR) - 1);
+ }
+
+ /// Total number of inputs.
+ unsigned NumberOfInputs = 0;
+
+ /// Total number of processed inputs, i.e, inputs that were already
+ /// read from the buffers.
+ unsigned NumberOfProcessedInputs = 0;
+
+ /// Iterator of the current and next section.
+ section_iterator CurrentSection;
+ section_iterator NextSection;
+
+ /// Configuration options and arrays for this bundler job
+ const OffloadBundlerConfig &BundlerConfig;
+
+public:
+ // TODO: Add error checking from ClangOffloadBundler.cpp
+ ObjectFileHandler(std::unique_ptr<ObjectFile> ObjIn,
+ const OffloadBundlerConfig &BC)
+ : Obj(std::move(ObjIn)), CurrentSection(Obj->section_begin()),
+ NextSection(Obj->section_begin()), BundlerConfig(BC) {}
+
+ ~ObjectFileHandler() final {}
+
+ Error ReadHeader(MemoryBuffer &Input) final { return Error::success(); }
+
+ Expected<std::optional<StringRef>>
+ ReadBundleStart(MemoryBuffer &Input) final {
+ while (NextSection != Obj->section_end()) {
+ CurrentSection = NextSection;
+ ++NextSection;
+
+ // Check if the current section name starts with the reserved prefix. If
+ // so, return the triple.
+ Expected<std::optional<StringRef>> TripleOrErr =
+ IsOffloadSection(*CurrentSection);
+ if (!TripleOrErr)
+ return TripleOrErr.takeError();
+ if (*TripleOrErr)
+ return **TripleOrErr;
+ }
+ return std::nullopt;
+ }
+
+ Error ReadBundleEnd(MemoryBuffer &Input) final { return Error::success(); }
+
+ Error ReadBundle(raw_ostream &OS, MemoryBuffer &Input) final {
+ Expected<StringRef> ContentOrErr = CurrentSection->getContents();
+ if (!ContentOrErr)
+ return ContentOrErr.takeError();
+ StringRef Content = *ContentOrErr;
+
+ // Copy fat object contents to the output when extracting host bundle.
+ if (Content.size() == 1u && Content.front() == 0)
+ Content = StringRef(Input.getBufferStart(), Input.getBufferSize());
+
+ OS.write(Content.data(), Content.size());
+ return Error::success();
+ }
+
+ Error WriteHeader(raw_ostream &OS,
+ ArrayRef<std::unique_ptr<MemoryBuffer>> Inputs) final {
+ assert(BundlerConfig.HostInputIndex != ~0u &&
+ "Host input index not defined.");
+
+ // Record number of inputs.
+ NumberOfInputs = Inputs.size();
+ return Error::success();
+ }
+
+ Error WriteBundleStart(raw_ostream &OS, StringRef TargetTriple) final {
+ ++NumberOfProcessedInputs;
+ return Error::success();
+ }
+
+ Error WriteBundleEnd(raw_ostream &OS, StringRef TargetTriple) final {
+ return Error::success();
+ }
+
+ Error finalizeOutputFile() final {
+ assert(NumberOfProcessedInputs <= NumberOfInputs &&
+ "Processing more inputs that actually exist!");
+ assert(BundlerConfig.HostInputIndex != ~0u &&
+ "Host input index not defined.");
+
+ // If this is not the last output, we don't have to do anything.
+ if (NumberOfProcessedInputs != NumberOfInputs)
+ return Error::success();
+
+ // We will use llvm-objcopy to add target objects sections to the output
+ // fat object. These sections should have 'exclude' flag set which tells
+ // link editor to remove them from linker inputs when linking executable or
+ // shared library.
+
+ assert(BundlerConfig.ObjcopyPath != "" &&
+ "llvm-objcopy path not specified");
+
+ // Temporary files that need to be removed.
+ TempFileHandlerRAII TempFiles;
+
+ // Compose llvm-objcopy command line for add target objects' sections with
+ // appropriate flags.
+ BumpPtrAllocator Alloc;
+ StringSaver SS{Alloc};
+ SmallVector<StringRef, 8u> ObjcopyArgs{"llvm-objcopy"};
+
+ for (unsigned I = 0; I < NumberOfInputs; ++I) {
+ StringRef InputFile = BundlerConfig.InputFileNames[I];
+ if (I == BundlerConfig.HostInputIndex) {
+ // Special handling for the host bundle. We do not need to add a
+ // standard bundle for the host object since we are going to use fat
+ // object as a host object. Therefore use dummy contents (one zero byte)
+ // when creating section for the host bundle.
+ Expected<StringRef> TempFileOrErr = TempFiles.Create(ArrayRef<char>(0));
+ if (!TempFileOrErr)
+ return TempFileOrErr.takeError();
+ InputFile = *TempFileOrErr;
+ }
+
+ ObjcopyArgs.push_back(
+ SS.save(Twine("--add-section=") + OFFLOAD_BUNDLER_MAGIC_STR +
+ BundlerConfig.TargetNames[I] + "=" + InputFile));
+ ObjcopyArgs.push_back(
+ SS.save(Twine("--set-section-flags=") + OFFLOAD_BUNDLER_MAGIC_STR +
+ BundlerConfig.TargetNames[I] + "=readonly,exclude"));
+ }
+ ObjcopyArgs.push_back("--");
+ ObjcopyArgs.push_back(
+ BundlerConfig.InputFileNames[BundlerConfig.HostInputIndex]);
+ ObjcopyArgs.push_back(BundlerConfig.OutputFileNames.front());
+
+ if (Error Err = executeObjcopy(BundlerConfig.ObjcopyPath, ObjcopyArgs))
+ return Err;
+
+ return Error::success();
+ }
+
+ Error WriteBundle(raw_ostream &OS, MemoryBuffer &Input) final {
+ return Error::success();
+ }
+
+private:
+ Error executeObjcopy(StringRef Objcopy, ArrayRef<StringRef> Args) {
+ // If the user asked for the commands to be printed out, we do that
+ // instead of executing it.
+ if (BundlerConfig.PrintExternalCommands) {
+ errs() << "\"" << Objcopy << "\"";
+ for (StringRef Arg : drop_begin(Args, 1))
+ errs() << " \"" << Arg << "\"";
+ errs() << "\n";
+ } else {
+ if (sys::ExecuteAndWait(Objcopy, Args))
+ return createStringError(inconvertibleErrorCode(),
+ "'llvm-objcopy' tool failed");
+ }
+ return Error::success();
+ }
+};
+
+/// Handler for text files. The bundled file will have the following format.
+///
+/// "Comment OFFLOAD_BUNDLER_MAGIC_STR__START__ triple"
+/// Bundle 1
+/// "Comment OFFLOAD_BUNDLER_MAGIC_STR__END__ triple"
+/// ...
+/// "Comment OFFLOAD_BUNDLER_MAGIC_STR__START__ triple"
+/// Bundle N
+/// "Comment OFFLOAD_BUNDLER_MAGIC_STR__END__ triple"
+class TextFileHandler final : public FileHandler {
+ /// String that begins a line comment.
+ StringRef Comment;
+
+ /// String that initiates a bundle.
+ std::string BundleStartString;
+
+ /// String that closes a bundle.
+ std::string BundleEndString;
+
+ /// Number of chars read from input.
+ size_t ReadChars = 0u;
+
+protected:
+ Error ReadHeader(MemoryBuffer &Input) final { return Error::success(); }
+
+ Expected<std::optional<StringRef>>
+ ReadBundleStart(MemoryBuffer &Input) final {
+ StringRef FC = Input.getBuffer();
+
+ // Find start of the bundle.
+ ReadChars = FC.find(BundleStartString, ReadChars);
+ if (ReadChars == FC.npos)
+ return std::nullopt;
+
+ // Get position of the triple.
+ size_t TripleStart = ReadChars = ReadChars + BundleStartString.size();
+
+ // Get position that closes the triple.
+ size_t TripleEnd = ReadChars = FC.find("\n", ReadChars);
+ if (TripleEnd == FC.npos)
+ return std::nullopt;
+
+ // Next time we read after the new line.
+ ++ReadChars;
+
+ return StringRef(&FC.data()[TripleStart], TripleEnd - TripleStart);
+ }
+
+ Error ReadBundleEnd(MemoryBuffer &Input) final {
+ StringRef FC = Input.getBuffer();
+
+ // Read up to the next new line.
+ assert(FC[ReadChars] == '\n' && "The bundle should end with a new line.");
+
+ size_t TripleEnd = ReadChars = FC.find("\n", ReadChars + 1);
+ if (TripleEnd != FC.npos)
+ // Next time we read after the new line.
+ ++ReadChars;
+
+ return Error::success();
+ }
+
+ Error ReadBundle(raw_ostream &OS, MemoryBuffer &Input) final {
+ StringRef FC = Input.getBuffer();
+ size_t BundleStart = ReadChars;
+
+ // Find end of the bundle.
+ size_t BundleEnd = ReadChars = FC.find(BundleEndString, ReadChars);
+
+ StringRef Bundle(&FC.data()[BundleStart], BundleEnd - BundleStart);
+ OS << Bundle;
+
+ return Error::success();
+ }
+
+ Error WriteHeader(raw_ostream &OS,
+ ArrayRef<std::unique_ptr<MemoryBuffer>> Inputs) final {
+ return Error::success();
+ }
+
+ Error WriteBundleStart(raw_ostream &OS, StringRef TargetTriple) final {
+ OS << BundleStartString << TargetTriple << "\n";
+ return Error::success();
+ }
+
+ Error WriteBundleEnd(raw_ostream &OS, StringRef TargetTriple) final {
+ OS << BundleEndString << TargetTriple << "\n";
+ return Error::success();
+ }
+
+ Error WriteBundle(raw_ostream &OS, MemoryBuffer &Input) final {
+ OS << Input.getBuffer();
+ return Error::success();
+ }
+
+public:
+ TextFileHandler(StringRef Comment) : Comment(Comment), ReadChars(0) {
+ BundleStartString =
+ "\n" + Comment.str() + " " OFFLOAD_BUNDLER_MAGIC_STR "__START__ ";
+ BundleEndString =
+ "\n" + Comment.str() + " " OFFLOAD_BUNDLER_MAGIC_STR "__END__ ";
+ }
+
+ Error listBundleIDsCallback(MemoryBuffer &Input,
+ const BundleInfo &Info) final {
+ // TODO: To list bundle IDs in a bundled text file we need to go through
+ // all bundles. The format of bundled text file may need to include a
+ // header if the performance of listing bundle IDs of bundled text file is
+ // important.
+ ReadChars = Input.getBuffer().find(BundleEndString, ReadChars);
+ if (Error Err = ReadBundleEnd(Input))
+ return Err;
+ return Error::success();
+ }
+};
+} // namespace
+
+/// Return an appropriate object file handler. We use the specific object
+/// handler if we know how to deal with that format, otherwise we use a default
+/// binary file handler.
+static std::unique_ptr<FileHandler>
+CreateObjectFileHandler(MemoryBuffer &FirstInput,
+ const OffloadBundlerConfig &BundlerConfig) {
+ // Check if the input file format is one that we know how to deal with.
+ Expected<std::unique_ptr<Binary>> BinaryOrErr = createBinary(FirstInput);
+
+ // We only support regular object files. If failed to open the input as a
+ // known binary or this is not an object file use the default binary handler.
+ if (errorToBool(BinaryOrErr.takeError()) || !isa<ObjectFile>(*BinaryOrErr))
+ return std::make_unique<BinaryFileHandler>(BundlerConfig);
+
+ // Otherwise create an object file handler. The handler will be owned by the
+ // client of this function.
+ return std::make_unique<ObjectFileHandler>(
+ std::unique_ptr<ObjectFile>(cast<ObjectFile>(BinaryOrErr->release())),
+ BundlerConfig);
+}
+
+/// Return an appropriate handler given the input files and options.
+static Expected<std::unique_ptr<FileHandler>>
+CreateFileHandler(MemoryBuffer &FirstInput,
+ const OffloadBundlerConfig &BundlerConfig) {
+ std::string FilesType = BundlerConfig.FilesType;
+
+ if (FilesType == "i")
+ return std::make_unique<TextFileHandler>(/*Comment=*/"//");
+ if (FilesType == "ii")
+ return std::make_unique<TextFileHandler>(/*Comment=*/"//");
+ if (FilesType == "cui")
+ return std::make_unique<TextFileHandler>(/*Comment=*/"//");
+ if (FilesType == "hipi")
+ return std::make_unique<TextFileHandler>(/*Comment=*/"//");
+ // TODO: `.d` should be eventually removed once `-M` and its variants are
+ // handled properly in offload compilation.
+ if (FilesType == "d")
+ return std::make_unique<TextFileHandler>(/*Comment=*/"#");
+ if (FilesType == "ll")
+ return std::make_unique<TextFileHandler>(/*Comment=*/";");
+ if (FilesType == "bc")
+ return std::make_unique<BinaryFileHandler>(BundlerConfig);
+ if (FilesType == "s")
+ return std::make_unique<TextFileHandler>(/*Comment=*/"#");
+ if (FilesType == "o")
+ return CreateObjectFileHandler(FirstInput, BundlerConfig);
+ if (FilesType == "a")
+ return CreateObjectFileHandler(FirstInput, BundlerConfig);
+ if (FilesType == "gch")
+ return std::make_unique<BinaryFileHandler>(BundlerConfig);
+ if (FilesType == "ast")
+ return std::make_unique<BinaryFileHandler>(BundlerConfig);
+
+ return createStringError(errc::invalid_argument,
+ "'" + FilesType + "': invalid file type specified");
+}
+
+OffloadBundlerConfig::OffloadBundlerConfig() {
+ auto IgnoreEnvVarOpt =
+ llvm::sys::Process::GetEnv("OFFLOAD_BUNDLER_IGNORE_ENV_VAR");
+ if (IgnoreEnvVarOpt.has_value() && IgnoreEnvVarOpt.value() == "1")
+ return;
+
+ auto VerboseEnvVarOpt = llvm::sys::Process::GetEnv("OFFLOAD_BUNDLER_VERBOSE");
+ if (VerboseEnvVarOpt.has_value())
+ Verbose = VerboseEnvVarOpt.value() == "1";
+
+ auto CompressEnvVarOpt =
+ llvm::sys::Process::GetEnv("OFFLOAD_BUNDLER_COMPRESS");
+ if (CompressEnvVarOpt.has_value())
+ Compress = CompressEnvVarOpt.value() == "1";
+}
+
+llvm::Expected<std::unique_ptr<llvm::MemoryBuffer>>
+CompressedOffloadBundle::compress(const llvm::MemoryBuffer &Input,
+ bool Verbose) {
+ llvm::Timer HashTimer("Hash Calculation Timer", "Hash calculation time",
+ ClangOffloadBundlerTimerGroup);
+ if (Verbose)
+ HashTimer.startTimer();
+ llvm::MD5 Hash;
+ llvm::MD5::MD5Result Result;
+ Hash.update(Input.getBuffer());
+ Hash.final(Result);
+ uint64_t TruncatedHash = Result.low();
+ if (Verbose)
+ HashTimer.stopTimer();
+
+ SmallVector<uint8_t, 0> CompressedBuffer;
+ auto BufferUint8 = llvm::ArrayRef<uint8_t>(
+ reinterpret_cast<const uint8_t *>(Input.getBuffer().data()),
+ Input.getBuffer().size());
+
+ llvm::compression::Format CompressionFormat;
+
+ if (llvm::compression::zstd::isAvailable())
+ CompressionFormat = llvm::compression::Format::Zstd;
+ else if (llvm::compression::zlib::isAvailable())
+ CompressionFormat = llvm::compression::Format::Zlib;
+ else
+ return createStringError(llvm::inconvertibleErrorCode(),
+ "Compression not supported");
+
+ llvm::Timer CompressTimer("Compression Timer", "Compression time",
+ ClangOffloadBundlerTimerGroup);
+ if (Verbose)
+ CompressTimer.startTimer();
+ llvm::compression::compress(CompressionFormat, BufferUint8, CompressedBuffer);
+ if (Verbose)
+ CompressTimer.stopTimer();
+
+ uint16_t CompressionMethod = static_cast<uint16_t>(CompressionFormat);
+ uint32_t UncompressedSize = Input.getBuffer().size();
+
+ SmallVector<char, 0> FinalBuffer;
+ llvm::raw_svector_ostream OS(FinalBuffer);
+ OS << MagicNumber;
+ OS.write(reinterpret_cast<const char *>(&Version), sizeof(Version));
+ OS.write(reinterpret_cast<const char *>(&CompressionMethod),
+ sizeof(CompressionMethod));
+ OS.write(reinterpret_cast<const char *>(&UncompressedSize),
+ sizeof(UncompressedSize));
+ OS.write(reinterpret_cast<const char *>(&TruncatedHash),
+ sizeof(TruncatedHash));
+ OS.write(reinterpret_cast<const char *>(CompressedBuffer.data()),
+ CompressedBuffer.size());
+
+ if (Verbose) {
+ auto MethodUsed =
+ CompressionFormat == llvm::compression::Format::Zstd ? "zstd" : "zlib";
+ llvm::errs() << "Compressed bundle format version: " << Version << "\n"
+ << "Compression method used: " << MethodUsed << "\n"
+ << "Binary size before compression: " << UncompressedSize
+ << " bytes\n"
+ << "Binary size after compression: " << CompressedBuffer.size()
+ << " bytes\n"
+ << "Truncated MD5 hash: "
+ << llvm::format_hex(TruncatedHash, 16) << "\n";
+ }
+
+ return llvm::MemoryBuffer::getMemBufferCopy(
+ llvm::StringRef(FinalBuffer.data(), FinalBuffer.size()));
+}
+
+llvm::Expected<std::unique_ptr<llvm::MemoryBuffer>>
+CompressedOffloadBundle::decompress(const llvm::MemoryBuffer &Input,
+ bool Verbose) {
+
+ StringRef Blob = Input.getBuffer();
+
+ if (Blob.size() < HeaderSize) {
+ return llvm::MemoryBuffer::getMemBufferCopy(Blob);
+ }
+ if (llvm::identify_magic(Blob) !=
+ llvm::file_magic::offload_bundle_compressed) {
+ if (Verbose)
+ llvm::errs() << "Uncompressed bundle.\n";
+ return llvm::MemoryBuffer::getMemBufferCopy(Blob);
+ }
+
+ uint16_t ThisVersion;
+ uint16_t CompressionMethod;
+ uint32_t UncompressedSize;
+ uint64_t StoredHash;
+ memcpy(&ThisVersion, Input.getBuffer().data() + MagicNumber.size(),
+ sizeof(uint16_t));
+ memcpy(&CompressionMethod, Blob.data() + MagicSize + VersionFieldSize,
+ sizeof(uint16_t));
+ memcpy(&UncompressedSize,
+ Blob.data() + MagicSize + VersionFieldSize + MethodFieldSize,
+ sizeof(uint32_t));
+ memcpy(&StoredHash,
+ Blob.data() + MagicSize + VersionFieldSize + MethodFieldSize +
+ SizeFieldSize,
+ sizeof(uint64_t));
+
+ llvm::compression::Format CompressionFormat;
+ if (CompressionMethod ==
+ static_cast<uint16_t>(llvm::compression::Format::Zlib))
+ CompressionFormat = llvm::compression::Format::Zlib;
+ else if (CompressionMethod ==
+ static_cast<uint16_t>(llvm::compression::Format::Zstd))
+ CompressionFormat = llvm::compression::Format::Zstd;
+ else
+ return createStringError(inconvertibleErrorCode(),
+ "Unknown compressing method");
+
+ llvm::Timer DecompressTimer("Decompression Timer", "Decompression time",
+ ClangOffloadBundlerTimerGroup);
+ if (Verbose)
+ DecompressTimer.startTimer();
+
+ SmallVector<uint8_t, 0> DecompressedData;
+ StringRef CompressedData = Blob.substr(HeaderSize);
+ if (llvm::Error DecompressionError = llvm::compression::decompress(
+ CompressionFormat, llvm::arrayRefFromStringRef(CompressedData),
+ DecompressedData, UncompressedSize))
+ return createStringError(inconvertibleErrorCode(),
+ "Could not decompress embedded file contents: " +
+ llvm::toString(std::move(DecompressionError)));
+
+ if (Verbose) {
+ DecompressTimer.stopTimer();
+
+ // Recalculate MD5 hash
+ llvm::Timer HashRecalcTimer("Hash Recalculation Timer",
+ "Hash recalculation time",
+ ClangOffloadBundlerTimerGroup);
+ HashRecalcTimer.startTimer();
+ llvm::MD5 Hash;
+ llvm::MD5::MD5Result Result;
+ Hash.update(llvm::ArrayRef<uint8_t>(DecompressedData.data(),
+ DecompressedData.size()));
+ Hash.final(Result);
+ uint64_t RecalculatedHash = Result.low();
+ HashRecalcTimer.stopTimer();
+ bool HashMatch = (StoredHash == RecalculatedHash);
+
+ llvm::errs() << "Compressed bundle format version: " << ThisVersion << "\n"
+ << "Decompression method: "
+ << (CompressionFormat == llvm::compression::Format::Zlib
+ ? "zlib"
+ : "zstd")
+ << "\n"
+ << "Size before decompression: " << CompressedData.size()
+ << " bytes\n"
+ << "Size after decompression: " << UncompressedSize
+ << " bytes\n"
+ << "Stored hash: " << llvm::format_hex(StoredHash, 16) << "\n"
+ << "Recalculated hash: "
+ << llvm::format_hex(RecalculatedHash, 16) << "\n"
+ << "Hashes match: " << (HashMatch ? "Yes" : "No") << "\n";
+ }
+
+ return llvm::MemoryBuffer::getMemBufferCopy(
+ llvm::toStringRef(DecompressedData));
+}
+
+// List bundle IDs. Return true if an error was found.
+Error OffloadBundler::ListBundleIDsInFile(
+ StringRef InputFileName, const OffloadBundlerConfig &BundlerConfig) {
+ // Open Input file.
+ ErrorOr<std::unique_ptr<MemoryBuffer>> CodeOrErr =
+ MemoryBuffer::getFileOrSTDIN(InputFileName);
+ if (std::error_code EC = CodeOrErr.getError())
+ return createFileError(InputFileName, EC);
+
+ // Decompress the input if necessary.
+ Expected<std::unique_ptr<MemoryBuffer>> DecompressedBufferOrErr =
+ CompressedOffloadBundle::decompress(**CodeOrErr, BundlerConfig.Verbose);
+ if (!DecompressedBufferOrErr)
+ return createStringError(
+ inconvertibleErrorCode(),
+ "Failed to decompress input: " +
+ llvm::toString(DecompressedBufferOrErr.takeError()));
+
+ MemoryBuffer &DecompressedInput = **DecompressedBufferOrErr;
+
+ // Select the right files handler.
+ Expected<std::unique_ptr<FileHandler>> FileHandlerOrErr =
+ CreateFileHandler(DecompressedInput, BundlerConfig);
+ if (!FileHandlerOrErr)
+ return FileHandlerOrErr.takeError();
+
+ std::unique_ptr<FileHandler> &FH = *FileHandlerOrErr;
+ assert(FH);
+ return FH->listBundleIDs(DecompressedInput);
+}
+
+/// @brief Checks if a code object \p CodeObjectInfo is compatible with a given
+/// target \p TargetInfo.
+/// @link https://clang.llvm.org/docs/ClangOffloadBundler.html#bundle-entry-id
+bool isCodeObjectCompatible(const OffloadTargetInfo &CodeObjectInfo,
+ const OffloadTargetInfo &TargetInfo) {
+
+ // Compatible in case of exact match.
+ if (CodeObjectInfo == TargetInfo) {
+ DEBUG_WITH_TYPE("CodeObjectCompatibility",
+ dbgs() << "Compatible: Exact match: \t[CodeObject: "
+ << CodeObjectInfo.str()
+ << "]\t:\t[Target: " << TargetInfo.str() << "]\n");
+ return true;
+ }
+
+ // Incompatible if Kinds or Triples mismatch.
+ if (!CodeObjectInfo.isOffloadKindCompatible(TargetInfo.OffloadKind) ||
+ !CodeObjectInfo.Triple.isCompatibleWith(TargetInfo.Triple)) {
+ DEBUG_WITH_TYPE(
+ "CodeObjectCompatibility",
+ dbgs() << "Incompatible: Kind/Triple mismatch \t[CodeObject: "
+ << CodeObjectInfo.str() << "]\t:\t[Target: " << TargetInfo.str()
+ << "]\n");
+ return false;
+ }
+
+ // Incompatible if Processors mismatch.
+ llvm::StringMap<bool> CodeObjectFeatureMap, TargetFeatureMap;
+ std::optional<StringRef> CodeObjectProc = clang::parseTargetID(
+ CodeObjectInfo.Triple, CodeObjectInfo.TargetID, &CodeObjectFeatureMap);
+ std::optional<StringRef> TargetProc = clang::parseTargetID(
+ TargetInfo.Triple, TargetInfo.TargetID, &TargetFeatureMap);
+
+ // Both TargetProc and CodeObjectProc can't be empty here.
+ if (!TargetProc || !CodeObjectProc ||
+ CodeObjectProc.value() != TargetProc.value()) {
+ DEBUG_WITH_TYPE("CodeObjectCompatibility",
+ dbgs() << "Incompatible: Processor mismatch \t[CodeObject: "
+ << CodeObjectInfo.str()
+ << "]\t:\t[Target: " << TargetInfo.str() << "]\n");
+ return false;
+ }
+
+ // Incompatible if CodeObject has more features than Target, irrespective of
+ // type or sign of features.
+ if (CodeObjectFeatureMap.getNumItems() > TargetFeatureMap.getNumItems()) {
+ DEBUG_WITH_TYPE("CodeObjectCompatibility",
+ dbgs() << "Incompatible: CodeObject has more features "
+ "than target \t[CodeObject: "
+ << CodeObjectInfo.str()
+ << "]\t:\t[Target: " << TargetInfo.str() << "]\n");
+ return false;
+ }
+
+ // Compatible if each target feature specified by target is compatible with
+ // target feature of code object. The target feature is compatible if the
+ // code object does not specify it (meaning Any), or if it specifies it
+ // with the same value (meaning On or Off).
+ for (const auto &CodeObjectFeature : CodeObjectFeatureMap) {
+ auto TargetFeature = TargetFeatureMap.find(CodeObjectFeature.getKey());
+ if (TargetFeature == TargetFeatureMap.end()) {
+ DEBUG_WITH_TYPE(
+ "CodeObjectCompatibility",
+ dbgs()
+ << "Incompatible: Value of CodeObject's non-ANY feature is "
+ "not matching with Target feature's ANY value \t[CodeObject: "
+ << CodeObjectInfo.str() << "]\t:\t[Target: " << TargetInfo.str()
+ << "]\n");
+ return false;
+ } else if (TargetFeature->getValue() != CodeObjectFeature.getValue()) {
+ DEBUG_WITH_TYPE(
+ "CodeObjectCompatibility",
+ dbgs() << "Incompatible: Value of CodeObject's non-ANY feature is "
+ "not matching with Target feature's non-ANY value "
+ "\t[CodeObject: "
+ << CodeObjectInfo.str()
+ << "]\t:\t[Target: " << TargetInfo.str() << "]\n");
+ return false;
+ }
+ }
+
+ // CodeObject is compatible if all features of Target are:
+ // - either, present in the Code Object's features map with the same sign,
+ // - or, the feature is missing from CodeObjects's features map i.e. it is
+ // set to ANY
+ DEBUG_WITH_TYPE(
+ "CodeObjectCompatibility",
+ dbgs() << "Compatible: Target IDs are compatible \t[CodeObject: "
+ << CodeObjectInfo.str() << "]\t:\t[Target: " << TargetInfo.str()
+ << "]\n");
+ return true;
+}
+
+/// Bundle the files. Return true if an error was found.
+Error OffloadBundler::BundleFiles() {
+ std::error_code EC;
+
+ // Create a buffer to hold the content before compressing.
+ SmallVector<char, 0> Buffer;
+ llvm::raw_svector_ostream BufferStream(Buffer);
+
+ // Open input files.
+ SmallVector<std::unique_ptr<MemoryBuffer>, 8u> InputBuffers;
+ InputBuffers.reserve(BundlerConfig.InputFileNames.size());
+ for (auto &I : BundlerConfig.InputFileNames) {
+ ErrorOr<std::unique_ptr<MemoryBuffer>> CodeOrErr =
+ MemoryBuffer::getFileOrSTDIN(I);
+ if (std::error_code EC = CodeOrErr.getError())
+ return createFileError(I, EC);
+ InputBuffers.emplace_back(std::move(*CodeOrErr));
+ }
+
+ // Get the file handler. We use the host buffer as reference.
+ assert((BundlerConfig.HostInputIndex != ~0u || BundlerConfig.AllowNoHost) &&
+ "Host input index undefined??");
+ Expected<std::unique_ptr<FileHandler>> FileHandlerOrErr = CreateFileHandler(
+ *InputBuffers[BundlerConfig.AllowNoHost ? 0
+ : BundlerConfig.HostInputIndex],
+ BundlerConfig);
+ if (!FileHandlerOrErr)
+ return FileHandlerOrErr.takeError();
+
+ std::unique_ptr<FileHandler> &FH = *FileHandlerOrErr;
+ assert(FH);
+
+ // Write header.
+ if (Error Err = FH->WriteHeader(BufferStream, InputBuffers))
+ return Err;
+
+ // Write all bundles along with the start/end markers. If an error was found
+ // writing the end of the bundle component, abort the bundle writing.
+ auto Input = InputBuffers.begin();
+ for (auto &Triple : BundlerConfig.TargetNames) {
+ if (Error Err = FH->WriteBundleStart(BufferStream, Triple))
+ return Err;
+ if (Error Err = FH->WriteBundle(BufferStream, **Input))
+ return Err;
+ if (Error Err = FH->WriteBundleEnd(BufferStream, Triple))
+ return Err;
+ ++Input;
+ }
+
+ raw_fd_ostream OutputFile(BundlerConfig.OutputFileNames.front(), EC,
+ sys::fs::OF_None);
+ if (EC)
+ return createFileError(BundlerConfig.OutputFileNames.front(), EC);
+
+ SmallVector<char, 0> CompressedBuffer;
+ if (BundlerConfig.Compress) {
+ std::unique_ptr<llvm::MemoryBuffer> BufferMemory =
+ llvm::MemoryBuffer::getMemBufferCopy(
+ llvm::StringRef(Buffer.data(), Buffer.size()));
+ auto CompressionResult =
+ CompressedOffloadBundle::compress(*BufferMemory, BundlerConfig.Verbose);
+ if (auto Error = CompressionResult.takeError())
+ return Error;
+
+ auto CompressedMemBuffer = std::move(CompressionResult.get());
+ CompressedBuffer.assign(CompressedMemBuffer->getBufferStart(),
+ CompressedMemBuffer->getBufferEnd());
+ } else
+ CompressedBuffer = Buffer;
+
+ OutputFile.write(CompressedBuffer.data(), CompressedBuffer.size());
+
+ return FH->finalizeOutputFile();
+}
+
+// Unbundle the files. Return true if an error was found.
+Error OffloadBundler::UnbundleFiles() {
+ // Open Input file.
+ ErrorOr<std::unique_ptr<MemoryBuffer>> CodeOrErr =
+ MemoryBuffer::getFileOrSTDIN(BundlerConfig.InputFileNames.front());
+ if (std::error_code EC = CodeOrErr.getError())
+ return createFileError(BundlerConfig.InputFileNames.front(), EC);
+
+ // Decompress the input if necessary.
+ Expected<std::unique_ptr<MemoryBuffer>> DecompressedBufferOrErr =
+ CompressedOffloadBundle::decompress(**CodeOrErr, BundlerConfig.Verbose);
+ if (!DecompressedBufferOrErr)
+ return createStringError(
+ inconvertibleErrorCode(),
+ "Failed to decompress input: " +
+ llvm::toString(DecompressedBufferOrErr.takeError()));
+
+ MemoryBuffer &Input = **DecompressedBufferOrErr;
+
+ // Select the right files handler.
+ Expected<std::unique_ptr<FileHandler>> FileHandlerOrErr =
+ CreateFileHandler(Input, BundlerConfig);
+ if (!FileHandlerOrErr)
+ return FileHandlerOrErr.takeError();
+
+ std::unique_ptr<FileHandler> &FH = *FileHandlerOrErr;
+ assert(FH);
+
+ // Read the header of the bundled file.
+ if (Error Err = FH->ReadHeader(Input))
+ return Err;
+
+ // Create a work list that consist of the map triple/output file.
+ StringMap<StringRef> Worklist;
+ auto Output = BundlerConfig.OutputFileNames.begin();
+ for (auto &Triple : BundlerConfig.TargetNames) {
+ Worklist[Triple] = *Output;
+ ++Output;
+ }
+
+ // Read all the bundles that are in the work list. If we find no bundles we
+ // assume the file is meant for the host target.
+ bool FoundHostBundle = false;
+ while (!Worklist.empty()) {
+ Expected<std::optional<StringRef>> CurTripleOrErr =
+ FH->ReadBundleStart(Input);
+ if (!CurTripleOrErr)
+ return CurTripleOrErr.takeError();
+
+ // We don't have more bundles.
+ if (!*CurTripleOrErr)
+ break;
+
+ StringRef CurTriple = **CurTripleOrErr;
+ assert(!CurTriple.empty());
+
+ auto Output = Worklist.begin();
+ for (auto E = Worklist.end(); Output != E; Output++) {
+ if (isCodeObjectCompatible(
+ OffloadTargetInfo(CurTriple, BundlerConfig),
+ OffloadTargetInfo((*Output).first(), BundlerConfig))) {
+ break;
+ }
+ }
+
+ if (Output == Worklist.end())
+ continue;
+ // Check if the output file can be opened and copy the bundle to it.
+ std::error_code EC;
+ raw_fd_ostream OutputFile((*Output).second, EC, sys::fs::OF_None);
+ if (EC)
+ return createFileError((*Output).second, EC);
+ if (Error Err = FH->ReadBundle(OutputFile, Input))
+ return Err;
+ if (Error Err = FH->ReadBundleEnd(Input))
+ return Err;
+ Worklist.erase(Output);
+
+ // Record if we found the host bundle.
+ auto OffloadInfo = OffloadTargetInfo(CurTriple, BundlerConfig);
+ if (OffloadInfo.hasHostKind())
+ FoundHostBundle = true;
+ }
+
+ if (!BundlerConfig.AllowMissingBundles && !Worklist.empty()) {
+ std::string ErrMsg = "Can't find bundles for";
+ std::set<StringRef> Sorted;
+ for (auto &E : Worklist)
+ Sorted.insert(E.first());
+ unsigned I = 0;
+ unsigned Last = Sorted.size() - 1;
+ for (auto &E : Sorted) {
+ if (I != 0 && Last > 1)
+ ErrMsg += ",";
+ ErrMsg += " ";
+ if (I == Last && I != 0)
+ ErrMsg += "and ";
+ ErrMsg += E.str();
+ ++I;
+ }
+ return createStringError(inconvertibleErrorCode(), ErrMsg);
+ }
+
+ // If no bundles were found, assume the input file is the host bundle and
+ // create empty files for the remaining targets.
+ if (Worklist.size() == BundlerConfig.TargetNames.size()) {
+ for (auto &E : Worklist) {
+ std::error_code EC;
+ raw_fd_ostream OutputFile(E.second, EC, sys::fs::OF_None);
+ if (EC)
+ return createFileError(E.second, EC);
+
+ // If this entry has a host kind, copy the input file to the output file.
+ auto OffloadInfo = OffloadTargetInfo(E.getKey(), BundlerConfig);
+ if (OffloadInfo.hasHostKind())
+ OutputFile.write(Input.getBufferStart(), Input.getBufferSize());
+ }
+ return Error::success();
+ }
+
+ // If we found elements, we emit an error if none of those were for the host
+ // in case host bundle name was provided in command line.
+ if (!(FoundHostBundle || BundlerConfig.HostInputIndex == ~0u ||
+ BundlerConfig.AllowMissingBundles))
+ return createStringError(inconvertibleErrorCode(),
+ "Can't find bundle for the host target");
+
+ // If we still have any elements in the worklist, create empty files for them.
+ for (auto &E : Worklist) {
+ std::error_code EC;
+ raw_fd_ostream OutputFile(E.second, EC, sys::fs::OF_None);
+ if (EC)
+ return createFileError(E.second, EC);
+ }
+
+ return Error::success();
+}
+
+static Archive::Kind getDefaultArchiveKindForHost() {
+ return Triple(sys::getDefaultTargetTriple()).isOSDarwin() ? Archive::K_DARWIN
+ : Archive::K_GNU;
+}
+
+/// @brief Computes a list of targets among all given targets which are
+/// compatible with this code object
+/// @param [in] CodeObjectInfo Code Object
+/// @param [out] CompatibleTargets List of all compatible targets among all
+/// given targets
+/// @return false, if no compatible target is found.
+static bool
+getCompatibleOffloadTargets(OffloadTargetInfo &CodeObjectInfo,
+ SmallVectorImpl<StringRef> &CompatibleTargets,
+ const OffloadBundlerConfig &BundlerConfig) {
+ if (!CompatibleTargets.empty()) {
+ DEBUG_WITH_TYPE("CodeObjectCompatibility",
+ dbgs() << "CompatibleTargets list should be empty\n");
+ return false;
+ }
+ for (auto &Target : BundlerConfig.TargetNames) {
+ auto TargetInfo = OffloadTargetInfo(Target, BundlerConfig);
+ if (isCodeObjectCompatible(CodeObjectInfo, TargetInfo))
+ CompatibleTargets.push_back(Target);
+ }
+ return !CompatibleTargets.empty();
+}
+
+// Check that each code object file in the input archive conforms to following
+// rule: for a specific processor, a feature either shows up in all target IDs,
+// or does not show up in any target IDs. Otherwise the target ID combination is
+// invalid.
+static Error
+CheckHeterogeneousArchive(StringRef ArchiveName,
+ const OffloadBundlerConfig &BundlerConfig) {
+ std::vector<std::unique_ptr<MemoryBuffer>> ArchiveBuffers;
+ ErrorOr<std::unique_ptr<MemoryBuffer>> BufOrErr =
+ MemoryBuffer::getFileOrSTDIN(ArchiveName, true, false);
+ if (std::error_code EC = BufOrErr.getError())
+ return createFileError(ArchiveName, EC);
+
+ ArchiveBuffers.push_back(std::move(*BufOrErr));
+ Expected<std::unique_ptr<llvm::object::Archive>> LibOrErr =
+ Archive::create(ArchiveBuffers.back()->getMemBufferRef());
+ if (!LibOrErr)
+ return LibOrErr.takeError();
+
+ auto Archive = std::move(*LibOrErr);
+
+ Error ArchiveErr = Error::success();
+ auto ChildEnd = Archive->child_end();
+
+ /// Iterate over all bundled code object files in the input archive.
+ for (auto ArchiveIter = Archive->child_begin(ArchiveErr);
+ ArchiveIter != ChildEnd; ++ArchiveIter) {
+ if (ArchiveErr)
+ return ArchiveErr;
+ auto ArchiveChildNameOrErr = (*ArchiveIter).getName();
+ if (!ArchiveChildNameOrErr)
+ return ArchiveChildNameOrErr.takeError();
+
+ auto CodeObjectBufferRefOrErr = (*ArchiveIter).getMemoryBufferRef();
+ if (!CodeObjectBufferRefOrErr)
+ return CodeObjectBufferRefOrErr.takeError();
+
+ auto CodeObjectBuffer =
+ MemoryBuffer::getMemBuffer(*CodeObjectBufferRefOrErr, false);
+
+ Expected<std::unique_ptr<FileHandler>> FileHandlerOrErr =
+ CreateFileHandler(*CodeObjectBuffer, BundlerConfig);
+ if (!FileHandlerOrErr)
+ return FileHandlerOrErr.takeError();
+
+ std::unique_ptr<FileHandler> &FileHandler = *FileHandlerOrErr;
+ assert(FileHandler);
+
+ std::set<StringRef> BundleIds;
+ auto CodeObjectFileError =
+ FileHandler->getBundleIDs(*CodeObjectBuffer, BundleIds);
+ if (CodeObjectFileError)
+ return CodeObjectFileError;
+
+ auto &&ConflictingArchs = clang::getConflictTargetIDCombination(BundleIds);
+ if (ConflictingArchs) {
+ std::string ErrMsg =
+ Twine("conflicting TargetIDs [" + ConflictingArchs.value().first +
+ ", " + ConflictingArchs.value().second + "] found in " +
+ ArchiveChildNameOrErr.get() + " of " + ArchiveName)
+ .str();
+ return createStringError(inconvertibleErrorCode(), ErrMsg);
+ }
+ }
+
+ return ArchiveErr;
+}
+
+/// UnbundleArchive takes an archive file (".a") as input containing bundled
+/// code object files, and a list of offload targets (not host), and extracts
+/// the code objects into a new archive file for each offload target. Each
+/// resulting archive file contains all code object files corresponding to that
+/// particular offload target. The created archive file does not
+/// contain an index of the symbols and code object files are named as
+/// <<Parent Bundle Name>-<CodeObject's TargetID>>, with ':' replaced with '_'.
+Error OffloadBundler::UnbundleArchive() {
+ std::vector<std::unique_ptr<MemoryBuffer>> ArchiveBuffers;
+
+ /// Map of target names with list of object files that will form the device
+ /// specific archive for that target
+ StringMap<std::vector<NewArchiveMember>> OutputArchivesMap;
+
+ // Map of target names and output archive filenames
+ StringMap<StringRef> TargetOutputFileNameMap;
+
+ auto Output = BundlerConfig.OutputFileNames.begin();
+ for (auto &Target : BundlerConfig.TargetNames) {
+ TargetOutputFileNameMap[Target] = *Output;
+ ++Output;
+ }
+
+ StringRef IFName = BundlerConfig.InputFileNames.front();
+
+ if (BundlerConfig.CheckInputArchive) {
+ // For a specific processor, a feature either shows up in all target IDs, or
+ // does not show up in any target IDs. Otherwise the target ID combination
+ // is invalid.
+ auto ArchiveError = CheckHeterogeneousArchive(IFName, BundlerConfig);
+ if (ArchiveError) {
+ return ArchiveError;
+ }
+ }
+
+ ErrorOr<std::unique_ptr<MemoryBuffer>> BufOrErr =
+ MemoryBuffer::getFileOrSTDIN(IFName, true, false);
+ if (std::error_code EC = BufOrErr.getError())
+ return createFileError(BundlerConfig.InputFileNames.front(), EC);
+
+ ArchiveBuffers.push_back(std::move(*BufOrErr));
+ Expected<std::unique_ptr<llvm::object::Archive>> LibOrErr =
+ Archive::create(ArchiveBuffers.back()->getMemBufferRef());
+ if (!LibOrErr)
+ return LibOrErr.takeError();
+
+ auto Archive = std::move(*LibOrErr);
+
+ Error ArchiveErr = Error::success();
+ auto ChildEnd = Archive->child_end();
+
+ /// Iterate over all bundled code object files in the input archive.
+ for (auto ArchiveIter = Archive->child_begin(ArchiveErr);
+ ArchiveIter != ChildEnd; ++ArchiveIter) {
+ if (ArchiveErr)
+ return ArchiveErr;
+ auto ArchiveChildNameOrErr = (*ArchiveIter).getName();
+ if (!ArchiveChildNameOrErr)
+ return ArchiveChildNameOrErr.takeError();
+
+ StringRef BundledObjectFile = sys::path::filename(*ArchiveChildNameOrErr);
+
+ auto CodeObjectBufferRefOrErr = (*ArchiveIter).getMemoryBufferRef();
+ if (!CodeObjectBufferRefOrErr)
+ return CodeObjectBufferRefOrErr.takeError();
+
+ auto TempCodeObjectBuffer =
+ MemoryBuffer::getMemBuffer(*CodeObjectBufferRefOrErr, false);
+
+ // Decompress the buffer if necessary.
+ Expected<std::unique_ptr<MemoryBuffer>> DecompressedBufferOrErr =
+ CompressedOffloadBundle::decompress(*TempCodeObjectBuffer,
+ BundlerConfig.Verbose);
+ if (!DecompressedBufferOrErr)
+ return createStringError(
+ inconvertibleErrorCode(),
+ "Failed to decompress code object: " +
+ llvm::toString(DecompressedBufferOrErr.takeError()));
+
+ MemoryBuffer &CodeObjectBuffer = **DecompressedBufferOrErr;
+
+ Expected<std::unique_ptr<FileHandler>> FileHandlerOrErr =
+ CreateFileHandler(CodeObjectBuffer, BundlerConfig);
+ if (!FileHandlerOrErr)
+ return FileHandlerOrErr.takeError();
+
+ std::unique_ptr<FileHandler> &FileHandler = *FileHandlerOrErr;
+ assert(FileHandler &&
+ "FileHandle creation failed for file in the archive!");
+
+ if (Error ReadErr = FileHandler->ReadHeader(CodeObjectBuffer))
+ return ReadErr;
+
+ Expected<std::optional<StringRef>> CurBundleIDOrErr =
+ FileHandler->ReadBundleStart(CodeObjectBuffer);
+ if (!CurBundleIDOrErr)
+ return CurBundleIDOrErr.takeError();
+
+ std::optional<StringRef> OptionalCurBundleID = *CurBundleIDOrErr;
+ // No device code in this child, skip.
+ if (!OptionalCurBundleID)
+ continue;
+ StringRef CodeObject = *OptionalCurBundleID;
+
+ // Process all bundle entries (CodeObjects) found in this child of input
+ // archive.
+ while (!CodeObject.empty()) {
+ SmallVector<StringRef> CompatibleTargets;
+ auto CodeObjectInfo = OffloadTargetInfo(CodeObject, BundlerConfig);
+ if (CodeObjectInfo.hasHostKind()) {
+ // Do nothing, we don't extract host code yet.
+ } else if (getCompatibleOffloadTargets(CodeObjectInfo, CompatibleTargets,
+ BundlerConfig)) {
+ std::string BundleData;
+ raw_string_ostream DataStream(BundleData);
+ if (Error Err = FileHandler->ReadBundle(DataStream, CodeObjectBuffer))
+ return Err;
+
+ for (auto &CompatibleTarget : CompatibleTargets) {
+ SmallString<128> BundledObjectFileName;
+ BundledObjectFileName.assign(BundledObjectFile);
+ auto OutputBundleName =
+ Twine(llvm::sys::path::stem(BundledObjectFileName) + "-" +
+ CodeObject +
+ getDeviceLibraryFileName(BundledObjectFileName,
+ CodeObjectInfo.TargetID))
+ .str();
+ // Replace ':' in optional target feature list with '_' to ensure
+ // cross-platform validity.
+ std::replace(OutputBundleName.begin(), OutputBundleName.end(), ':',
+ '_');
+
+ std::unique_ptr<MemoryBuffer> MemBuf = MemoryBuffer::getMemBufferCopy(
+ DataStream.str(), OutputBundleName);
+ ArchiveBuffers.push_back(std::move(MemBuf));
+ llvm::MemoryBufferRef MemBufRef =
+ MemoryBufferRef(*(ArchiveBuffers.back()));
+
+ // For inserting <CompatibleTarget, list<CodeObject>> entry in
+ // OutputArchivesMap.
+ if (!OutputArchivesMap.contains(CompatibleTarget)) {
+
+ std::vector<NewArchiveMember> ArchiveMembers;
+ ArchiveMembers.push_back(NewArchiveMember(MemBufRef));
+ OutputArchivesMap.insert_or_assign(CompatibleTarget,
+ std::move(ArchiveMembers));
+ } else {
+ OutputArchivesMap[CompatibleTarget].push_back(
+ NewArchiveMember(MemBufRef));
+ }
+ }
+ }
+
+ if (Error Err = FileHandler->ReadBundleEnd(CodeObjectBuffer))
+ return Err;
+
+ Expected<std::optional<StringRef>> NextTripleOrErr =
+ FileHandler->ReadBundleStart(CodeObjectBuffer);
+ if (!NextTripleOrErr)
+ return NextTripleOrErr.takeError();
+
+ CodeObject = ((*NextTripleOrErr).has_value()) ? **NextTripleOrErr : "";
+ } // End of processing of all bundle entries of this child of input archive.
+ } // End of while over children of input archive.
+
+ assert(!ArchiveErr && "Error occurred while reading archive!");
+
+ /// Write out an archive for each target
+ for (auto &Target : BundlerConfig.TargetNames) {
+ StringRef FileName = TargetOutputFileNameMap[Target];
+ StringMapIterator<std::vector<llvm::NewArchiveMember>> CurArchiveMembers =
+ OutputArchivesMap.find(Target);
+ if (CurArchiveMembers != OutputArchivesMap.end()) {
+ if (Error WriteErr = writeArchive(FileName, CurArchiveMembers->getValue(),
+ SymtabWritingMode::NormalSymtab,
+ getDefaultArchiveKindForHost(), true,
+ false, nullptr))
+ return WriteErr;
+ } else if (!BundlerConfig.AllowMissingBundles) {
+ std::string ErrMsg =
+ Twine("no compatible code object found for the target '" + Target +
+ "' in heterogeneous archive library: " + IFName)
+ .str();
+ return createStringError(inconvertibleErrorCode(), ErrMsg);
+ } else { // Create an empty archive file if no compatible code object is
+ // found and "allow-missing-bundles" is enabled. It ensures that
+ // the linker using output of this step doesn't complain about
+ // the missing input file.
+ std::vector<llvm::NewArchiveMember> EmptyArchive;
+ EmptyArchive.clear();
+ if (Error WriteErr = writeArchive(
+ FileName, EmptyArchive, SymtabWritingMode::NormalSymtab,
+ getDefaultArchiveKindForHost(), true, false, nullptr))
+ return WriteErr;
+ }
+ }
+
+ return Error::success();
+}
diff --git a/contrib/llvm-project/clang/lib/Driver/SanitizerArgs.cpp b/contrib/llvm-project/clang/lib/Driver/SanitizerArgs.cpp
index 8770fb1cf9fe..56d497eb4c32 100644
--- a/contrib/llvm-project/clang/lib/Driver/SanitizerArgs.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/SanitizerArgs.cpp
@@ -13,11 +13,14 @@
#include "clang/Driver/Options.h"
#include "clang/Driver/ToolChain.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/SpecialCaseList.h"
-#include "llvm/Support/TargetParser.h"
#include "llvm/Support/VirtualFileSystem.h"
+#include "llvm/TargetParser/AArch64TargetParser.h"
+#include "llvm/TargetParser/RISCVTargetParser.h"
+#include "llvm/TargetParser/TargetParser.h"
#include "llvm/Transforms/Instrumentation/AddressSanitizerOptions.h"
#include <memory>
@@ -33,32 +36,33 @@ static const SanitizerMask NeedsUbsanRt =
static const SanitizerMask NeedsUbsanCxxRt =
SanitizerKind::Vptr | SanitizerKind::CFI;
static const SanitizerMask NotAllowedWithTrap = SanitizerKind::Vptr;
-static const SanitizerMask NotAllowedWithMinimalRuntime =
- SanitizerKind::Function | SanitizerKind::Vptr;
-static const SanitizerMask RequiresPIE =
- SanitizerKind::DataFlow | SanitizerKind::HWAddress | SanitizerKind::Scudo;
+static const SanitizerMask NotAllowedWithMinimalRuntime = SanitizerKind::Vptr;
+static const SanitizerMask NotAllowedWithExecuteOnly =
+ SanitizerKind::Function | SanitizerKind::KCFI;
static const SanitizerMask NeedsUnwindTables =
SanitizerKind::Address | SanitizerKind::HWAddress | SanitizerKind::Thread |
SanitizerKind::Memory | SanitizerKind::DataFlow;
static const SanitizerMask SupportsCoverage =
SanitizerKind::Address | SanitizerKind::HWAddress |
SanitizerKind::KernelAddress | SanitizerKind::KernelHWAddress |
- SanitizerKind::MemTag | SanitizerKind::Memory |
+ SanitizerKind::MemtagStack | SanitizerKind::MemtagHeap |
+ SanitizerKind::MemtagGlobals | SanitizerKind::Memory |
SanitizerKind::KernelMemory | SanitizerKind::Leak |
SanitizerKind::Undefined | SanitizerKind::Integer | SanitizerKind::Bounds |
SanitizerKind::ImplicitConversion | SanitizerKind::Nullability |
SanitizerKind::DataFlow | SanitizerKind::Fuzzer |
SanitizerKind::FuzzerNoLink | SanitizerKind::FloatDivideByZero |
SanitizerKind::SafeStack | SanitizerKind::ShadowCallStack |
- SanitizerKind::Thread | SanitizerKind::ObjCCast;
+ SanitizerKind::Thread | SanitizerKind::ObjCCast | SanitizerKind::KCFI;
static const SanitizerMask RecoverableByDefault =
SanitizerKind::Undefined | SanitizerKind::Integer |
SanitizerKind::ImplicitConversion | SanitizerKind::Nullability |
SanitizerKind::FloatDivideByZero | SanitizerKind::ObjCCast;
static const SanitizerMask Unrecoverable =
SanitizerKind::Unreachable | SanitizerKind::Return;
-static const SanitizerMask AlwaysRecoverable =
- SanitizerKind::KernelAddress | SanitizerKind::KernelHWAddress;
+static const SanitizerMask AlwaysRecoverable = SanitizerKind::KernelAddress |
+ SanitizerKind::KernelHWAddress |
+ SanitizerKind::KCFI;
static const SanitizerMask NeedsLTO = SanitizerKind::CFI;
static const SanitizerMask TrappingSupported =
(SanitizerKind::Undefined & ~SanitizerKind::Vptr) | SanitizerKind::Integer |
@@ -72,7 +76,8 @@ static const SanitizerMask CFIClasses =
SanitizerKind::CFIUnrelatedCast;
static const SanitizerMask CompatibleWithMinimalRuntime =
TrappingSupported | SanitizerKind::Scudo | SanitizerKind::ShadowCallStack |
- SanitizerKind::MemTag;
+ SanitizerKind::MemtagStack | SanitizerKind::MemtagHeap |
+ SanitizerKind::MemtagGlobals | SanitizerKind::KCFI;
enum CoverageFeature {
CoverageFunc = 1 << 0,
@@ -91,6 +96,15 @@ enum CoverageFeature {
CoveragePCTable = 1 << 13,
CoverageStackDepth = 1 << 14,
CoverageInlineBoolFlag = 1 << 15,
+ CoverageTraceLoads = 1 << 16,
+ CoverageTraceStores = 1 << 17,
+ CoverageControlFlow = 1 << 18,
+};
+
+enum BinaryMetadataFeature {
+ BinaryMetadataCovered = 1 << 0,
+ BinaryMetadataAtomics = 1 << 1,
+ BinaryMetadataUAR = 1 << 2,
};
/// Parse a -fsanitize= or -fno-sanitize= argument's values, diagnosing any
@@ -100,7 +114,13 @@ static SanitizerMask parseArgValues(const Driver &D, const llvm::opt::Arg *A,
/// Parse -f(no-)?sanitize-coverage= flag values, diagnosing any invalid
/// components. Returns OR of members of \c CoverageFeature enumeration.
-static int parseCoverageFeatures(const Driver &D, const llvm::opt::Arg *A);
+static int parseCoverageFeatures(const Driver &D, const llvm::opt::Arg *A,
+ bool DiagnoseErrors);
+
+/// Parse -f(no-)?sanitize-metadata= flag values, diagnosing any invalid
+/// components. Returns OR of members of \c BinaryMetadataFeature enumeration.
+static int parseBinaryMetadataFeatures(const Driver &D, const llvm::opt::Arg *A,
+ bool DiagnoseErrors);
/// Produce an argument string from ArgList \p Args, which shows how it
/// provides some sanitizer kind from \p Mask. For example, the argument list
@@ -121,21 +141,33 @@ static std::string describeSanitizeArg(const llvm::opt::Arg *A,
/// Sanitizers set.
static std::string toString(const clang::SanitizerSet &Sanitizers);
+/// Return true if an execute-only target disallows data access to code
+/// sections.
+static bool isExecuteOnlyTarget(const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args) {
+ if (Triple.isPS5())
+ return true;
+ return Args.hasFlagNoClaim(options::OPT_mexecute_only,
+ options::OPT_mno_execute_only, false);
+}
+
static void validateSpecialCaseListFormat(const Driver &D,
std::vector<std::string> &SCLFiles,
- unsigned MalformedSCLErrorDiagID) {
+ unsigned MalformedSCLErrorDiagID,
+ bool DiagnoseErrors) {
if (SCLFiles.empty())
return;
std::string BLError;
std::unique_ptr<llvm::SpecialCaseList> SCL(
llvm::SpecialCaseList::create(SCLFiles, D.getVFS(), BLError));
- if (!SCL.get())
+ if (!SCL.get() && DiagnoseErrors)
D.Diag(MalformedSCLErrorDiagID) << BLError;
}
static void addDefaultIgnorelists(const Driver &D, SanitizerMask Kinds,
- std::vector<std::string> &IgnorelistFiles) {
+ std::vector<std::string> &IgnorelistFiles,
+ bool DiagnoseErrors) {
struct Ignorelist {
const char *File;
SanitizerMask Mask;
@@ -158,32 +190,34 @@ static void addDefaultIgnorelists(const Driver &D, SanitizerMask Kinds,
clang::SmallString<64> Path(D.ResourceDir);
llvm::sys::path::append(Path, "share", BL.File);
if (D.getVFS().exists(Path))
- IgnorelistFiles.push_back(std::string(Path.str()));
- else if (BL.Mask == SanitizerKind::CFI)
+ IgnorelistFiles.push_back(std::string(Path));
+ else if (BL.Mask == SanitizerKind::CFI && DiagnoseErrors)
// If cfi_ignorelist.txt cannot be found in the resource dir, driver
// should fail.
- D.Diag(clang::diag::err_drv_no_such_file) << Path;
+ D.Diag(clang::diag::err_drv_missing_sanitizer_ignorelist) << Path;
}
validateSpecialCaseListFormat(
- D, IgnorelistFiles, clang::diag::err_drv_malformed_sanitizer_ignorelist);
+ D, IgnorelistFiles, clang::diag::err_drv_malformed_sanitizer_ignorelist,
+ DiagnoseErrors);
}
-/// Parse -f(no-)?sanitize-(coverage-)?(white|ignore)list argument's values,
+/// Parse -f(no-)?sanitize-(coverage-)?(allow|ignore)list argument's values,
/// diagnosing any invalid file paths and validating special case list format.
static void parseSpecialCaseListArg(const Driver &D,
const llvm::opt::ArgList &Args,
std::vector<std::string> &SCLFiles,
llvm::opt::OptSpecifier SCLOptionID,
llvm::opt::OptSpecifier NoSCLOptionID,
- unsigned MalformedSCLErrorDiagID) {
+ unsigned MalformedSCLErrorDiagID,
+ bool DiagnoseErrors) {
for (const auto *Arg : Args) {
- // Match -fsanitize-(coverage-)?(white|ignore)list.
+ // Match -fsanitize-(coverage-)?(allow|ignore)list.
if (Arg->getOption().matches(SCLOptionID)) {
Arg->claim();
std::string SCLPath = Arg->getValue();
if (D.getVFS().exists(SCLPath)) {
SCLFiles.push_back(SCLPath);
- } else {
+ } else if (DiagnoseErrors) {
D.Diag(clang::diag::err_drv_no_such_file) << SCLPath;
}
// Match -fno-sanitize-ignorelist.
@@ -192,7 +226,8 @@ static void parseSpecialCaseListArg(const Driver &D,
SCLFiles.clear();
}
}
- validateSpecialCaseListFormat(D, SCLFiles, MalformedSCLErrorDiagID);
+ validateSpecialCaseListFormat(D, SCLFiles, MalformedSCLErrorDiagID,
+ DiagnoseErrors);
}
/// Sets group bits for every group that has at least one representative already
@@ -207,30 +242,31 @@ static SanitizerMask setGroupBits(SanitizerMask Kinds) {
}
static SanitizerMask parseSanitizeTrapArgs(const Driver &D,
- const llvm::opt::ArgList &Args) {
- SanitizerMask TrapRemove; // During the loop below, the accumulated set of
- // sanitizers disabled by the current sanitizer
- // argument or any argument after it.
+ const llvm::opt::ArgList &Args,
+ bool DiagnoseErrors) {
+ SanitizerMask TrapRemove; // During the loop below, the accumulated set of
+ // sanitizers disabled by the current sanitizer
+ // argument or any argument after it.
SanitizerMask TrappingKinds;
SanitizerMask TrappingSupportedWithGroups = setGroupBits(TrappingSupported);
- for (ArgList::const_reverse_iterator I = Args.rbegin(), E = Args.rend();
- I != E; ++I) {
- const auto *Arg = *I;
+ for (const llvm::opt::Arg *Arg : llvm::reverse(Args)) {
if (Arg->getOption().matches(options::OPT_fsanitize_trap_EQ)) {
Arg->claim();
SanitizerMask Add = parseArgValues(D, Arg, true);
Add &= ~TrapRemove;
- if (SanitizerMask InvalidValues = Add & ~TrappingSupportedWithGroups) {
+ SanitizerMask InvalidValues = Add & ~TrappingSupportedWithGroups;
+ if (InvalidValues && DiagnoseErrors) {
SanitizerSet S;
S.Mask = InvalidValues;
- D.Diag(diag::err_drv_unsupported_option_argument) << "-fsanitize-trap"
- << toString(S);
+ D.Diag(diag::err_drv_unsupported_option_argument)
+ << Arg->getSpelling() << toString(S);
}
TrappingKinds |= expandSanitizerGroups(Add) & ~TrapRemove;
} else if (Arg->getOption().matches(options::OPT_fno_sanitize_trap_EQ)) {
Arg->claim();
- TrapRemove |= expandSanitizerGroups(parseArgValues(D, Arg, true));
+ TrapRemove |=
+ expandSanitizerGroups(parseArgValues(D, Arg, DiagnoseErrors));
}
}
@@ -265,9 +301,7 @@ bool SanitizerArgs::needsCfiDiagRt() const {
CfiCrossDso && !ImplicitCfiRuntime;
}
-bool SanitizerArgs::requiresPIE() const {
- return NeedPIE || (Sanitizers.Mask & RequiresPIE);
-}
+bool SanitizerArgs::requiresPIE() const { return NeedPIE; }
bool SanitizerArgs::needsUnwindTables() const {
return static_cast<bool>(Sanitizers.Mask & NeedsUnwindTables);
@@ -278,17 +312,18 @@ bool SanitizerArgs::needsLTO() const {
}
SanitizerArgs::SanitizerArgs(const ToolChain &TC,
- const llvm::opt::ArgList &Args) {
+ const llvm::opt::ArgList &Args,
+ bool DiagnoseErrors) {
SanitizerMask AllRemove; // During the loop below, the accumulated set of
// sanitizers disabled by the current sanitizer
// argument or any argument after it.
- SanitizerMask AllAddedKinds; // Mask of all sanitizers ever enabled by
- // -fsanitize= flags (directly or via group
- // expansion), some of which may be disabled
- // later. Used to carefully prune
- // unused-argument diagnostics.
- SanitizerMask DiagnosedKinds; // All Kinds we have diagnosed up to now.
- // Used to deduplicate diagnostics.
+ SanitizerMask AllAddedKinds; // Mask of all sanitizers ever enabled by
+ // -fsanitize= flags (directly or via group
+ // expansion), some of which may be disabled
+ // later. Used to carefully prune
+ // unused-argument diagnostics.
+ SanitizerMask DiagnosedKinds; // All Kinds we have diagnosed up to now.
+ // Used to deduplicate diagnostics.
SanitizerMask Kinds;
const SanitizerMask Supported = setGroupBits(TC.getSupportedSanitizers());
@@ -298,7 +333,7 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
ToolChain::RTTIMode RTTIMode = TC.getRTTIMode();
const Driver &D = TC.getDriver();
- SanitizerMask TrappingKinds = parseSanitizeTrapArgs(D, Args);
+ SanitizerMask TrappingKinds = parseSanitizeTrapArgs(D, Args, DiagnoseErrors);
SanitizerMask InvalidTrappingKinds = TrappingKinds & NotAllowedWithTrap;
MinimalRuntime =
@@ -310,19 +345,17 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
bool RemoveObjectSizeAtO0 =
!OptLevel || OptLevel->getOption().matches(options::OPT_O0);
- for (ArgList::const_reverse_iterator I = Args.rbegin(), E = Args.rend();
- I != E; ++I) {
- const auto *Arg = *I;
+ for (const llvm::opt::Arg *Arg : llvm::reverse(Args)) {
if (Arg->getOption().matches(options::OPT_fsanitize_EQ)) {
Arg->claim();
- SanitizerMask Add = parseArgValues(D, Arg, /*AllowGroups=*/true);
+ SanitizerMask Add = parseArgValues(D, Arg, DiagnoseErrors);
if (RemoveObjectSizeAtO0) {
AllRemove |= SanitizerKind::ObjectSize;
// The user explicitly enabled the object size sanitizer. Warn
// that this does nothing at -O0.
- if (Add & SanitizerKind::ObjectSize)
+ if ((Add & SanitizerKind::ObjectSize) && DiagnoseErrors)
D.Diag(diag::warn_drv_object_size_disabled_O0)
<< Arg->getAsString(Args);
}
@@ -336,9 +369,11 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
// Diagnose them.
if (SanitizerMask KindsToDiagnose =
Add & InvalidTrappingKinds & ~DiagnosedKinds) {
- std::string Desc = describeSanitizeArg(*I, KindsToDiagnose);
- D.Diag(diag::err_drv_argument_not_allowed_with)
- << Desc << "-fsanitize-trap=undefined";
+ if (DiagnoseErrors) {
+ std::string Desc = describeSanitizeArg(Arg, KindsToDiagnose);
+ D.Diag(diag::err_drv_argument_not_allowed_with)
+ << Desc << "-fsanitize-trap=undefined";
+ }
DiagnosedKinds |= KindsToDiagnose;
}
Add &= ~InvalidTrappingKinds;
@@ -346,14 +381,45 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
if (MinimalRuntime) {
if (SanitizerMask KindsToDiagnose =
Add & NotAllowedWithMinimalRuntime & ~DiagnosedKinds) {
- std::string Desc = describeSanitizeArg(*I, KindsToDiagnose);
- D.Diag(diag::err_drv_argument_not_allowed_with)
- << Desc << "-fsanitize-minimal-runtime";
+ if (DiagnoseErrors) {
+ std::string Desc = describeSanitizeArg(Arg, KindsToDiagnose);
+ D.Diag(diag::err_drv_argument_not_allowed_with)
+ << Desc << "-fsanitize-minimal-runtime";
+ }
DiagnosedKinds |= KindsToDiagnose;
}
Add &= ~NotAllowedWithMinimalRuntime;
}
+ if (llvm::opt::Arg *A = Args.getLastArg(options::OPT_mcmodel_EQ)) {
+ StringRef CM = A->getValue();
+ if (CM != "small" &&
+ (Add & SanitizerKind::Function & ~DiagnosedKinds)) {
+ if (DiagnoseErrors)
+ D.Diag(diag::err_drv_argument_only_allowed_with)
+ << "-fsanitize=function"
+ << "-mcmodel=small";
+ Add &= ~SanitizerKind::Function;
+ DiagnosedKinds |= SanitizerKind::Function;
+ }
+ }
+ // -fsanitize=function and -fsanitize=kcfi instrument indirect function
+ // calls to load a type hash before the function label. Therefore, an
+ // execute-only target doesn't support the function and kcfi sanitizers.
+ const llvm::Triple &Triple = TC.getTriple();
+ if (isExecuteOnlyTarget(Triple, Args)) {
+ if (SanitizerMask KindsToDiagnose =
+ Add & NotAllowedWithExecuteOnly & ~DiagnosedKinds) {
+ if (DiagnoseErrors) {
+ std::string Desc = describeSanitizeArg(Arg, KindsToDiagnose);
+ D.Diag(diag::err_drv_argument_not_allowed_with)
+ << Desc << Triple.str();
+ }
+ DiagnosedKinds |= KindsToDiagnose;
+ }
+ Add &= ~NotAllowedWithExecuteOnly;
+ }
+
// FIXME: Make CFI on member function calls compatible with cross-DSO CFI.
// There are currently two problems:
// - Virtual function call checks need to pass a pointer to the function
@@ -365,17 +431,20 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
// Fixing both of those may require changes to the cross-DSO CFI
// interface.
if (CfiCrossDso && (Add & SanitizerKind::CFIMFCall & ~DiagnosedKinds)) {
- D.Diag(diag::err_drv_argument_not_allowed_with)
- << "-fsanitize=cfi-mfcall"
- << "-fsanitize-cfi-cross-dso";
+ if (DiagnoseErrors)
+ D.Diag(diag::err_drv_argument_not_allowed_with)
+ << "-fsanitize=cfi-mfcall"
+ << "-fsanitize-cfi-cross-dso";
Add &= ~SanitizerKind::CFIMFCall;
DiagnosedKinds |= SanitizerKind::CFIMFCall;
}
if (SanitizerMask KindsToDiagnose = Add & ~Supported & ~DiagnosedKinds) {
- std::string Desc = describeSanitizeArg(*I, KindsToDiagnose);
- D.Diag(diag::err_drv_unsupported_opt_for_target)
- << Desc << TC.getTriple().str();
+ if (DiagnoseErrors) {
+ std::string Desc = describeSanitizeArg(Arg, KindsToDiagnose);
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << Desc << TC.getTriple().str();
+ }
DiagnosedKinds |= KindsToDiagnose;
}
Add &= Supported;
@@ -386,15 +455,17 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
if ((Add & SanitizerKind::Vptr) && (RTTIMode == ToolChain::RM_Disabled)) {
if (const llvm::opt::Arg *NoRTTIArg = TC.getRTTIArg()) {
assert(NoRTTIArg->getOption().matches(options::OPT_fno_rtti) &&
- "RTTI disabled without -fno-rtti option?");
+ "RTTI disabled without -fno-rtti option?");
// The user explicitly passed -fno-rtti with -fsanitize=vptr, but
// the vptr sanitizer requires RTTI, so this is a user error.
- D.Diag(diag::err_drv_argument_not_allowed_with)
- << "-fsanitize=vptr" << NoRTTIArg->getAsString(Args);
+ if (DiagnoseErrors)
+ D.Diag(diag::err_drv_argument_not_allowed_with)
+ << "-fsanitize=vptr" << NoRTTIArg->getAsString(Args);
} else {
// The vptr sanitizer requires RTTI, but RTTI is disabled (by
// default). Warn that the vptr sanitizer is being disabled.
- D.Diag(diag::warn_drv_disabling_vptr_no_rtti_default);
+ if (DiagnoseErrors)
+ D.Diag(diag::warn_drv_disabling_vptr_no_rtti_default);
}
// Take out the Vptr sanitizer from the enabled sanitizers
@@ -410,6 +481,10 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
if (MinimalRuntime) {
Add &= ~NotAllowedWithMinimalRuntime;
}
+ // NotAllowedWithExecuteOnly is silently discarded on an execute-only
+ // target if implicitly enabled through group expansion.
+ if (isExecuteOnlyTarget(Triple, Args))
+ Add &= ~NotAllowedWithExecuteOnly;
if (CfiCrossDso)
Add &= ~SanitizerKind::CFIMFCall;
Add &= Supported;
@@ -429,7 +504,7 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
Kinds |= Add;
} else if (Arg->getOption().matches(options::OPT_fno_sanitize_EQ)) {
Arg->claim();
- SanitizerMask Remove = parseArgValues(D, Arg, true);
+ SanitizerMask Remove = parseArgValues(D, Arg, DiagnoseErrors);
AllRemove |= expandSanitizerGroups(Remove);
}
}
@@ -469,7 +544,8 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
std::make_pair(SanitizerKind::MemTag,
SanitizerKind::Address | SanitizerKind::KernelAddress |
SanitizerKind::HWAddress |
- SanitizerKind::KernelHWAddress)};
+ SanitizerKind::KernelHWAddress),
+ std::make_pair(SanitizerKind::KCFI, SanitizerKind::Function)};
// Enable toolchain specific default sanitizers if not explicitly disabled.
SanitizerMask Default = TC.getDefaultSanitizers() & ~AllRemove;
@@ -490,16 +566,14 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
}
// Check that LTO is enabled if we need it.
- if ((Kinds & NeedsLTO) && !D.isUsingLTO()) {
+ if ((Kinds & NeedsLTO) && !D.isUsingLTO() && DiagnoseErrors) {
D.Diag(diag::err_drv_argument_only_allowed_with)
<< lastArgumentForMask(D, Args, Kinds & NeedsLTO) << "-flto";
}
- if ((Kinds & SanitizerKind::ShadowCallStack) &&
- ((TC.getTriple().isAArch64() &&
- !llvm::AArch64::isX18ReservedByDefault(TC.getTriple())) ||
- TC.getTriple().isRISCV()) &&
- !Args.hasArg(options::OPT_ffixed_x18)) {
+ if ((Kinds & SanitizerKind::ShadowCallStack) && TC.getTriple().isAArch64() &&
+ !llvm::AArch64::isX18ReservedByDefault(TC.getTriple()) &&
+ !Args.hasArg(options::OPT_ffixed_x18) && DiagnoseErrors) {
D.Diag(diag::err_drv_argument_only_allowed_with)
<< lastArgumentForMask(D, Args, Kinds & SanitizerKind::ShadowCallStack)
<< "-ffixed-x18";
@@ -518,8 +592,9 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
if (KindsToDiagnose) {
SanitizerSet S;
S.Mask = KindsToDiagnose;
- D.Diag(diag::err_drv_unsupported_opt_for_target)
- << ("-fno-sanitize-trap=" + toString(S)) << TC.getTriple().str();
+ if (DiagnoseErrors)
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << ("-fno-sanitize-trap=" + toString(S)) << TC.getTriple().str();
Kinds &= ~KindsToDiagnose;
}
}
@@ -529,9 +604,10 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
SanitizerMask Group = G.first;
if (Kinds & Group) {
if (SanitizerMask Incompatible = Kinds & G.second) {
- D.Diag(clang::diag::err_drv_argument_not_allowed_with)
- << lastArgumentForMask(D, Args, Group)
- << lastArgumentForMask(D, Args, Incompatible);
+ if (DiagnoseErrors)
+ D.Diag(clang::diag::err_drv_argument_not_allowed_with)
+ << lastArgumentForMask(D, Args, Group)
+ << lastArgumentForMask(D, Args, Incompatible);
Kinds &= ~Incompatible;
}
}
@@ -547,29 +623,31 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
SanitizerMask DiagnosedAlwaysRecoverableKinds;
for (const auto *Arg : Args) {
if (Arg->getOption().matches(options::OPT_fsanitize_recover_EQ)) {
- SanitizerMask Add = parseArgValues(D, Arg, true);
+ SanitizerMask Add = parseArgValues(D, Arg, DiagnoseErrors);
// Report error if user explicitly tries to recover from unrecoverable
// sanitizer.
if (SanitizerMask KindsToDiagnose =
Add & Unrecoverable & ~DiagnosedUnrecoverableKinds) {
SanitizerSet SetToDiagnose;
SetToDiagnose.Mask |= KindsToDiagnose;
- D.Diag(diag::err_drv_unsupported_option_argument)
- << Arg->getOption().getName() << toString(SetToDiagnose);
+ if (DiagnoseErrors)
+ D.Diag(diag::err_drv_unsupported_option_argument)
+ << Arg->getSpelling() << toString(SetToDiagnose);
DiagnosedUnrecoverableKinds |= KindsToDiagnose;
}
RecoverableKinds |= expandSanitizerGroups(Add);
Arg->claim();
} else if (Arg->getOption().matches(options::OPT_fno_sanitize_recover_EQ)) {
- SanitizerMask Remove = parseArgValues(D, Arg, true);
+ SanitizerMask Remove = parseArgValues(D, Arg, DiagnoseErrors);
// Report error if user explicitly tries to disable recovery from
// always recoverable sanitizer.
if (SanitizerMask KindsToDiagnose =
Remove & AlwaysRecoverable & ~DiagnosedAlwaysRecoverableKinds) {
SanitizerSet SetToDiagnose;
SetToDiagnose.Mask |= KindsToDiagnose;
- D.Diag(diag::err_drv_unsupported_option_argument)
- << Arg->getOption().getName() << toString(SetToDiagnose);
+ if (DiagnoseErrors)
+ D.Diag(diag::err_drv_unsupported_option_argument)
+ << Arg->getSpelling() << toString(SetToDiagnose);
DiagnosedAlwaysRecoverableKinds |= KindsToDiagnose;
}
RecoverableKinds &= ~expandSanitizerGroups(Remove);
@@ -586,42 +664,57 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
// Add default ignorelist from resource directory for activated sanitizers,
// and validate special case lists format.
if (!Args.hasArgNoClaim(options::OPT_fno_sanitize_ignorelist))
- addDefaultIgnorelists(D, Kinds, SystemIgnorelistFiles);
+ addDefaultIgnorelists(D, Kinds, SystemIgnorelistFiles, DiagnoseErrors);
// Parse -f(no-)?sanitize-ignorelist options.
// This also validates special case lists format.
- parseSpecialCaseListArg(D, Args, UserIgnorelistFiles,
- options::OPT_fsanitize_ignorelist_EQ,
- options::OPT_fno_sanitize_ignorelist,
- clang::diag::err_drv_malformed_sanitizer_ignorelist);
+ parseSpecialCaseListArg(
+ D, Args, UserIgnorelistFiles, options::OPT_fsanitize_ignorelist_EQ,
+ options::OPT_fno_sanitize_ignorelist,
+ clang::diag::err_drv_malformed_sanitizer_ignorelist, DiagnoseErrors);
// Parse -f[no-]sanitize-memory-track-origins[=level] options.
if (AllAddedKinds & SanitizerKind::Memory) {
if (Arg *A =
Args.getLastArg(options::OPT_fsanitize_memory_track_origins_EQ,
- options::OPT_fsanitize_memory_track_origins,
options::OPT_fno_sanitize_memory_track_origins)) {
- if (A->getOption().matches(options::OPT_fsanitize_memory_track_origins)) {
- MsanTrackOrigins = 2;
- } else if (A->getOption().matches(
- options::OPT_fno_sanitize_memory_track_origins)) {
- MsanTrackOrigins = 0;
- } else {
+ if (!A->getOption().matches(
+ options::OPT_fno_sanitize_memory_track_origins)) {
StringRef S = A->getValue();
if (S.getAsInteger(0, MsanTrackOrigins) || MsanTrackOrigins < 0 ||
MsanTrackOrigins > 2) {
- D.Diag(clang::diag::err_drv_invalid_value) << A->getAsString(Args) << S;
+ if (DiagnoseErrors)
+ D.Diag(clang::diag::err_drv_invalid_value)
+ << A->getAsString(Args) << S;
}
}
}
- MsanUseAfterDtor =
- Args.hasFlag(options::OPT_fsanitize_memory_use_after_dtor,
- options::OPT_fno_sanitize_memory_use_after_dtor,
- MsanUseAfterDtor);
- NeedPIE |= !(TC.getTriple().isOSLinux() &&
- TC.getTriple().getArch() == llvm::Triple::x86_64);
+ MsanUseAfterDtor = Args.hasFlag(
+ options::OPT_fsanitize_memory_use_after_dtor,
+ options::OPT_fno_sanitize_memory_use_after_dtor, MsanUseAfterDtor);
+ MsanParamRetval = Args.hasFlag(
+ options::OPT_fsanitize_memory_param_retval,
+ options::OPT_fno_sanitize_memory_param_retval, MsanParamRetval);
+ } else if (AllAddedKinds & SanitizerKind::KernelMemory) {
+ MsanUseAfterDtor = false;
+ MsanParamRetval = Args.hasFlag(
+ options::OPT_fsanitize_memory_param_retval,
+ options::OPT_fno_sanitize_memory_param_retval, MsanParamRetval);
} else {
MsanUseAfterDtor = false;
+ MsanParamRetval = false;
+ }
+
+ if (AllAddedKinds & SanitizerKind::MemTag) {
+ StringRef S =
+ Args.getLastArgValue(options::OPT_fsanitize_memtag_mode_EQ, "sync");
+ if (S == "async" || S == "sync") {
+ MemtagMode = S.str();
+ } else {
+ D.Diag(clang::diag::err_drv_invalid_value_with_suggestion)
+ << "-fsanitize-memtag-mode=" << S << "{async, sync}";
+ MemtagMode = "sync";
+ }
}
if (AllAddedKinds & SanitizerKind::Thread) {
@@ -643,7 +736,10 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
CfiICallGeneralizePointers =
Args.hasArg(options::OPT_fsanitize_cfi_icall_generalize_pointers);
- if (CfiCrossDso && CfiICallGeneralizePointers)
+ CfiICallNormalizeIntegers =
+ Args.hasArg(options::OPT_fsanitize_cfi_icall_normalize_integers);
+
+ if (CfiCrossDso && CfiICallGeneralizePointers && DiagnoseErrors)
D.Diag(diag::err_drv_argument_not_allowed_with)
<< "-fsanitize-cfi-cross-dso"
<< "-fsanitize-cfi-icall-generalize-pointers";
@@ -653,19 +749,29 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
options::OPT_fno_sanitize_cfi_canonical_jump_tables, true);
}
+ if (AllAddedKinds & SanitizerKind::KCFI) {
+ CfiICallNormalizeIntegers =
+ Args.hasArg(options::OPT_fsanitize_cfi_icall_normalize_integers);
+
+ if (AllAddedKinds & SanitizerKind::CFI && DiagnoseErrors)
+ D.Diag(diag::err_drv_argument_not_allowed_with)
+ << "-fsanitize=kcfi"
+ << lastArgumentForMask(D, Args, SanitizerKind::CFI);
+ }
+
Stats = Args.hasFlag(options::OPT_fsanitize_stats,
options::OPT_fno_sanitize_stats, false);
if (MinimalRuntime) {
SanitizerMask IncompatibleMask =
Kinds & ~setGroupBits(CompatibleWithMinimalRuntime);
- if (IncompatibleMask)
+ if (IncompatibleMask && DiagnoseErrors)
D.Diag(clang::diag::err_drv_argument_not_allowed_with)
<< "-fsanitize-minimal-runtime"
<< lastArgumentForMask(D, Args, IncompatibleMask);
SanitizerMask NonTrappingCfi = Kinds & SanitizerKind::CFI & ~TrappingKinds;
- if (NonTrappingCfi)
+ if (NonTrappingCfi && DiagnoseErrors)
D.Diag(clang::diag::err_drv_argument_only_allowed_with)
<< "fsanitize-minimal-runtime"
<< "fsanitize-trap=cfi";
@@ -681,13 +787,13 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
.getAsInteger(0, LegacySanitizeCoverage)) {
CoverageFeatures = 0;
Arg->claim();
- if (LegacySanitizeCoverage != 0) {
+ if (LegacySanitizeCoverage != 0 && DiagnoseErrors) {
D.Diag(diag::warn_drv_deprecated_arg)
<< Arg->getAsString(Args) << "-fsanitize-coverage=trace-pc-guard";
}
continue;
}
- CoverageFeatures |= parseCoverageFeatures(D, Arg);
+ CoverageFeatures |= parseCoverageFeatures(D, Arg, DiagnoseErrors);
// Disable coverage and not claim the flags if there is at least one
// non-supporting sanitizer.
@@ -698,56 +804,60 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
}
} else if (Arg->getOption().matches(options::OPT_fno_sanitize_coverage)) {
Arg->claim();
- CoverageFeatures &= ~parseCoverageFeatures(D, Arg);
+ CoverageFeatures &= ~parseCoverageFeatures(D, Arg, DiagnoseErrors);
}
}
// Choose at most one coverage type: function, bb, or edge.
- if ((CoverageFeatures & CoverageFunc) && (CoverageFeatures & CoverageBB))
- D.Diag(clang::diag::err_drv_argument_not_allowed_with)
- << "-fsanitize-coverage=func"
- << "-fsanitize-coverage=bb";
- if ((CoverageFeatures & CoverageFunc) && (CoverageFeatures & CoverageEdge))
- D.Diag(clang::diag::err_drv_argument_not_allowed_with)
- << "-fsanitize-coverage=func"
- << "-fsanitize-coverage=edge";
- if ((CoverageFeatures & CoverageBB) && (CoverageFeatures & CoverageEdge))
- D.Diag(clang::diag::err_drv_argument_not_allowed_with)
- << "-fsanitize-coverage=bb"
- << "-fsanitize-coverage=edge";
- // Basic block tracing and 8-bit counters require some type of coverage
- // enabled.
- if (CoverageFeatures & CoverageTraceBB)
- D.Diag(clang::diag::warn_drv_deprecated_arg)
- << "-fsanitize-coverage=trace-bb"
- << "-fsanitize-coverage=trace-pc-guard";
- if (CoverageFeatures & Coverage8bitCounters)
- D.Diag(clang::diag::warn_drv_deprecated_arg)
- << "-fsanitize-coverage=8bit-counters"
- << "-fsanitize-coverage=trace-pc-guard";
+ if (DiagnoseErrors) {
+ if ((CoverageFeatures & CoverageFunc) && (CoverageFeatures & CoverageBB))
+ D.Diag(clang::diag::err_drv_argument_not_allowed_with)
+ << "-fsanitize-coverage=func"
+ << "-fsanitize-coverage=bb";
+ if ((CoverageFeatures & CoverageFunc) && (CoverageFeatures & CoverageEdge))
+ D.Diag(clang::diag::err_drv_argument_not_allowed_with)
+ << "-fsanitize-coverage=func"
+ << "-fsanitize-coverage=edge";
+ if ((CoverageFeatures & CoverageBB) && (CoverageFeatures & CoverageEdge))
+ D.Diag(clang::diag::err_drv_argument_not_allowed_with)
+ << "-fsanitize-coverage=bb"
+ << "-fsanitize-coverage=edge";
+ // Basic block tracing and 8-bit counters require some type of coverage
+ // enabled.
+ if (CoverageFeatures & CoverageTraceBB)
+ D.Diag(clang::diag::warn_drv_deprecated_arg)
+ << "-fsanitize-coverage=trace-bb"
+ << "-fsanitize-coverage=trace-pc-guard";
+ if (CoverageFeatures & Coverage8bitCounters)
+ D.Diag(clang::diag::warn_drv_deprecated_arg)
+ << "-fsanitize-coverage=8bit-counters"
+ << "-fsanitize-coverage=trace-pc-guard";
+ }
int InsertionPointTypes = CoverageFunc | CoverageBB | CoverageEdge;
int InstrumentationTypes = CoverageTracePC | CoverageTracePCGuard |
- CoverageInline8bitCounters |
- CoverageInlineBoolFlag;
+ CoverageInline8bitCounters | CoverageTraceLoads |
+ CoverageTraceStores | CoverageInlineBoolFlag |
+ CoverageControlFlow;
if ((CoverageFeatures & InsertionPointTypes) &&
- !(CoverageFeatures & InstrumentationTypes)) {
+ !(CoverageFeatures & InstrumentationTypes) && DiagnoseErrors) {
D.Diag(clang::diag::warn_drv_deprecated_arg)
<< "-fsanitize-coverage=[func|bb|edge]"
- << "-fsanitize-coverage=[func|bb|edge],[trace-pc-guard|trace-pc]";
+ << "-fsanitize-coverage=[func|bb|edge],[trace-pc-guard|trace-pc],["
+ "control-flow]";
}
// trace-pc w/o func/bb/edge implies edge.
if (!(CoverageFeatures & InsertionPointTypes)) {
if (CoverageFeatures &
(CoverageTracePC | CoverageTracePCGuard | CoverageInline8bitCounters |
- CoverageInlineBoolFlag))
+ CoverageInlineBoolFlag | CoverageControlFlow))
CoverageFeatures |= CoverageEdge;
if (CoverageFeatures & CoverageStackDepth)
CoverageFeatures |= CoverageFunc;
}
- // Parse -fsanitize-coverage-(ignore|white)list options if coverage enabled.
+ // Parse -fsanitize-coverage-(allow|ignore)list options if coverage enabled.
// This also validates special case lists format.
// Here, OptSpecifier() acts as a never-matching command-line argument.
// So, there is no way to clear coverage lists but you can append to them.
@@ -755,11 +865,39 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
parseSpecialCaseListArg(
D, Args, CoverageAllowlistFiles,
options::OPT_fsanitize_coverage_allowlist, OptSpecifier(),
- clang::diag::err_drv_malformed_sanitizer_coverage_whitelist);
+ clang::diag::err_drv_malformed_sanitizer_coverage_allowlist,
+ DiagnoseErrors);
parseSpecialCaseListArg(
D, Args, CoverageIgnorelistFiles,
options::OPT_fsanitize_coverage_ignorelist, OptSpecifier(),
- clang::diag::err_drv_malformed_sanitizer_coverage_ignorelist);
+ clang::diag::err_drv_malformed_sanitizer_coverage_ignorelist,
+ DiagnoseErrors);
+ }
+
+ // Parse -f(no-)?sanitize-metadata.
+ for (const auto *Arg :
+ Args.filtered(options::OPT_fexperimental_sanitize_metadata_EQ,
+ options::OPT_fno_experimental_sanitize_metadata_EQ)) {
+ if (Arg->getOption().matches(
+ options::OPT_fexperimental_sanitize_metadata_EQ)) {
+ Arg->claim();
+ BinaryMetadataFeatures |=
+ parseBinaryMetadataFeatures(D, Arg, DiagnoseErrors);
+ } else {
+ Arg->claim();
+ BinaryMetadataFeatures &=
+ ~parseBinaryMetadataFeatures(D, Arg, DiagnoseErrors);
+ }
+ }
+
+ // Parse -fsanitize-metadata-ignorelist option if enabled.
+ if (BinaryMetadataFeatures) {
+ parseSpecialCaseListArg(
+ D, Args, BinaryMetadataIgnorelistFiles,
+ options::OPT_fexperimental_sanitize_metadata_ignorelist_EQ,
+ OptSpecifier(), // Cannot clear ignore list, only append.
+ clang::diag::err_drv_malformed_sanitizer_metadata_ignorelist,
+ DiagnoseErrors);
}
SharedRuntime =
@@ -773,12 +911,13 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
NeedPIE |= TC.getTriple().isOSFuchsia();
if (Arg *A =
Args.getLastArg(options::OPT_fsanitize_address_field_padding)) {
- StringRef S = A->getValue();
- // Legal values are 0 and 1, 2, but in future we may add more levels.
- if (S.getAsInteger(0, AsanFieldPadding) || AsanFieldPadding < 0 ||
- AsanFieldPadding > 2) {
- D.Diag(clang::diag::err_drv_invalid_value) << A->getAsString(Args) << S;
- }
+ StringRef S = A->getValue();
+ // Legal values are 0 and 1, 2, but in future we may add more levels.
+ if ((S.getAsInteger(0, AsanFieldPadding) || AsanFieldPadding < 0 ||
+ AsanFieldPadding > 2) &&
+ DiagnoseErrors) {
+ D.Diag(clang::diag::err_drv_invalid_value) << A->getAsString(Args) << S;
+ }
}
if (Arg *WindowsDebugRTArg =
@@ -789,13 +928,18 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
case options::OPT__SLASH_MTd:
case options::OPT__SLASH_MDd:
case options::OPT__SLASH_LDd:
- D.Diag(clang::diag::err_drv_argument_not_allowed_with)
- << WindowsDebugRTArg->getAsString(Args)
- << lastArgumentForMask(D, Args, SanitizerKind::Address);
- D.Diag(clang::diag::note_drv_address_sanitizer_debug_runtime);
+ if (DiagnoseErrors) {
+ D.Diag(clang::diag::err_drv_argument_not_allowed_with)
+ << WindowsDebugRTArg->getAsString(Args)
+ << lastArgumentForMask(D, Args, SanitizerKind::Address);
+ D.Diag(clang::diag::note_drv_address_sanitizer_debug_runtime);
+ }
}
}
+ StableABI = Args.hasFlag(options::OPT_fsanitize_stable_abi,
+ options::OPT_fno_sanitize_stable_abi, false);
+
AsanUseAfterScope = Args.hasFlag(
options::OPT_fsanitize_address_use_after_scope,
options::OPT_fno_sanitize_address_use_after_scope, AsanUseAfterScope);
@@ -810,18 +954,18 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
options::OPT_fno_sanitize_address_outline_instrumentation,
AsanOutlineInstrumentation);
- // As a workaround for a bug in gold 2.26 and earlier, dead stripping of
- // globals in ASan is disabled by default on ELF targets.
- // See https://sourceware.org/bugzilla/show_bug.cgi?id=19002
- AsanGlobalsDeadStripping =
- !TC.getTriple().isOSBinFormatELF() || TC.getTriple().isOSFuchsia() ||
- TC.getTriple().isPS4() ||
- Args.hasArg(options::OPT_fsanitize_address_globals_dead_stripping);
+ AsanGlobalsDeadStripping = Args.hasFlag(
+ options::OPT_fsanitize_address_globals_dead_stripping,
+ options::OPT_fno_sanitize_address_globals_dead_stripping, true);
+ // Enable ODR indicators which allow better handling of mixed instrumented
+ // and uninstrumented globals. Disable them for Windows where weak odr
+ // indicators (.weak.__odr_asan_gen*) may cause multiple definition linker
+ // errors in the absence of -lldmingw.
AsanUseOdrIndicator =
Args.hasFlag(options::OPT_fsanitize_address_use_odr_indicator,
options::OPT_fno_sanitize_address_use_odr_indicator,
- AsanUseOdrIndicator);
+ !TC.getTriple().isOSWindows());
if (AllAddedKinds & SanitizerKind::PointerCompare & ~AllRemove) {
AsanInvalidPointerCmp = true;
@@ -840,9 +984,9 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
if (const auto *Arg =
Args.getLastArg(options::OPT_sanitize_address_destructor_EQ)) {
auto parsedAsanDtorKind = AsanDtorKindFromString(Arg->getValue());
- if (parsedAsanDtorKind == llvm::AsanDtorKind::Invalid) {
+ if (parsedAsanDtorKind == llvm::AsanDtorKind::Invalid && DiagnoseErrors) {
TC.getDriver().Diag(clang::diag::err_drv_unsupported_option_argument)
- << Arg->getOption().getName() << Arg->getValue();
+ << Arg->getSpelling() << Arg->getValue();
}
AsanDtorKind = parsedAsanDtorKind;
}
@@ -852,9 +996,10 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
auto parsedAsanUseAfterReturn =
AsanDetectStackUseAfterReturnModeFromString(Arg->getValue());
if (parsedAsanUseAfterReturn ==
- llvm::AsanDetectStackUseAfterReturnMode::Invalid) {
+ llvm::AsanDetectStackUseAfterReturnMode::Invalid &&
+ DiagnoseErrors) {
TC.getDriver().Diag(clang::diag::err_drv_unsupported_option_argument)
- << Arg->getOption().getName() << Arg->getValue();
+ << Arg->getSpelling() << Arg->getValue();
}
AsanUseAfterReturn = parsedAsanUseAfterReturn;
}
@@ -864,7 +1009,8 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
// -fsanitize=pointer-compare/pointer-subtract requires -fsanitize=address.
SanitizerMask DetectInvalidPointerPairs =
SanitizerKind::PointerCompare | SanitizerKind::PointerSubtract;
- if (AllAddedKinds & DetectInvalidPointerPairs & ~AllRemove) {
+ if ((AllAddedKinds & DetectInvalidPointerPairs & ~AllRemove) &&
+ DiagnoseErrors) {
TC.getDriver().Diag(clang::diag::err_drv_argument_only_allowed_with)
<< lastArgumentForMask(D, Args,
SanitizerKind::PointerCompare |
@@ -877,7 +1023,8 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
if (Arg *HwasanAbiArg =
Args.getLastArg(options::OPT_fsanitize_hwaddress_abi_EQ)) {
HwasanAbi = HwasanAbiArg->getValue();
- if (HwasanAbi != "platform" && HwasanAbi != "interceptor")
+ if (HwasanAbi != "platform" && HwasanAbi != "interceptor" &&
+ DiagnoseErrors)
D.Diag(clang::diag::err_drv_invalid_value)
<< HwasanAbiArg->getAsString(Args) << HwasanAbi;
} else {
@@ -956,7 +1103,8 @@ static void addIncludeLinkerOption(const ToolChain &TC,
}
static bool hasTargetFeatureMTE(const llvm::opt::ArgStringList &CmdArgs) {
- for (auto Start = CmdArgs.begin(), End = CmdArgs.end(); Start != End; ++Start) {
+ for (auto Start = CmdArgs.begin(), End = CmdArgs.end(); Start != End;
+ ++Start) {
auto It = std::find(Start, End, StringRef("+mte"));
if (It == End)
break;
@@ -973,13 +1121,16 @@ void SanitizerArgs::addArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
// NVPTX doesn't currently support sanitizers. Bailing out here means
// that e.g. -fsanitize=address applies only to host code, which is what we
// want for now.
- //
- // AMDGPU sanitizer support is experimental and controlled by -fgpu-sanitize.
- if (TC.getTriple().isNVPTX() ||
- (TC.getTriple().isAMDGPU() &&
- !Args.hasFlag(options::OPT_fgpu_sanitize, options::OPT_fno_gpu_sanitize,
- false)))
+ if (TC.getTriple().isNVPTX())
return;
+ // AMDGPU sanitizer support is experimental and controlled by -fgpu-sanitize.
+ bool GPUSanitize = false;
+ if (TC.getTriple().isAMDGPU()) {
+ if (!Args.hasFlag(options::OPT_fgpu_sanitize, options::OPT_fno_gpu_sanitize,
+ true))
+ return;
+ GPUSanitize = true;
+ }
// Translate available CoverageFeatures to corresponding clang-cc1 flags.
// Do it even if Sanitizers.empty() since some forms of coverage don't require
@@ -1003,7 +1154,10 @@ void SanitizerArgs::addArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
"-fsanitize-coverage-inline-bool-flag"),
std::make_pair(CoveragePCTable, "-fsanitize-coverage-pc-table"),
std::make_pair(CoverageNoPrune, "-fsanitize-coverage-no-prune"),
- std::make_pair(CoverageStackDepth, "-fsanitize-coverage-stack-depth")};
+ std::make_pair(CoverageStackDepth, "-fsanitize-coverage-stack-depth"),
+ std::make_pair(CoverageTraceLoads, "-fsanitize-coverage-trace-loads"),
+ std::make_pair(CoverageTraceStores, "-fsanitize-coverage-trace-stores"),
+ std::make_pair(CoverageControlFlow, "-fsanitize-coverage-control-flow")};
for (auto F : CoverageFlags) {
if (CoverageFeatures & F.first)
CmdArgs.push_back(F.second);
@@ -1013,6 +1167,23 @@ void SanitizerArgs::addArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
addSpecialCaseListOpt(Args, CmdArgs, "-fsanitize-coverage-ignorelist=",
CoverageIgnorelistFiles);
+ if (!GPUSanitize) {
+ // Translate available BinaryMetadataFeatures to corresponding clang-cc1
+ // flags. Does not depend on any other sanitizers. Unsupported on GPUs.
+ const std::pair<int, std::string> BinaryMetadataFlags[] = {
+ std::make_pair(BinaryMetadataCovered, "covered"),
+ std::make_pair(BinaryMetadataAtomics, "atomics"),
+ std::make_pair(BinaryMetadataUAR, "uar")};
+ for (const auto &F : BinaryMetadataFlags) {
+ if (BinaryMetadataFeatures & F.first)
+ CmdArgs.push_back(
+ Args.MakeArgString("-fexperimental-sanitize-metadata=" + F.second));
+ }
+ addSpecialCaseListOpt(Args, CmdArgs,
+ "-fexperimental-sanitize-metadata-ignorelist=",
+ BinaryMetadataIgnorelistFiles);
+ }
+
if (TC.getTriple().isOSWindows() && needsUbsanRt()) {
// Instruct the code generator to embed linker directives in the object file
// that cause the required runtime libraries to be linked.
@@ -1061,6 +1232,9 @@ void SanitizerArgs::addArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
if (MsanUseAfterDtor)
CmdArgs.push_back("-fsanitize-memory-use-after-dtor");
+ if (!MsanParamRetval)
+ CmdArgs.push_back("-fno-sanitize-memory-param-retval");
+
// FIXME: Pass these parameters as function attributes, not as -llvm flags.
if (!TsanMemoryAccess) {
CmdArgs.push_back("-mllvm");
@@ -1088,6 +1262,9 @@ void SanitizerArgs::addArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
if (CfiICallGeneralizePointers)
CmdArgs.push_back("-fsanitize-cfi-icall-generalize-pointers");
+ if (CfiICallNormalizeIntegers)
+ CmdArgs.push_back("-fsanitize-cfi-icall-experimental-normalize-integers");
+
if (CfiCanonicalJumpTables)
CmdArgs.push_back("-fsanitize-cfi-canonical-jump-tables");
@@ -1110,8 +1287,8 @@ void SanitizerArgs::addArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
if (AsanGlobalsDeadStripping)
CmdArgs.push_back("-fsanitize-address-globals-dead-stripping");
- if (AsanUseOdrIndicator)
- CmdArgs.push_back("-fsanitize-address-use-odr-indicator");
+ if (!AsanUseOdrIndicator)
+ CmdArgs.push_back("-fno-sanitize-address-use-odr-indicator");
if (AsanInvalidPointerCmp) {
CmdArgs.push_back("-mllvm");
@@ -1128,6 +1305,18 @@ void SanitizerArgs::addArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
CmdArgs.push_back("-asan-instrumentation-with-call-threshold=0");
}
+ // When emitting Stable ABI instrumentation, force outlining calls and avoid
+ // inlining shadow memory poisoning. While this is a big performance burden
+ // for now it allows full abstraction from implementation details.
+ if (StableABI) {
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back("-asan-instrumentation-with-call-threshold=0");
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back("-asan-max-inline-poisoning-size=0");
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back("-asan-guard-against-version-mismatch=0");
+ }
+
// Only pass the option to the frontend if the user requested,
// otherwise the frontend will just use the codegen default.
if (AsanDtorKind != llvm::AsanDtorKind::Invalid) {
@@ -1146,7 +1335,7 @@ void SanitizerArgs::addArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
CmdArgs.push_back(Args.MakeArgString("hwasan-abi=" + HwasanAbi));
}
- if (Sanitizers.has(SanitizerKind::HWAddress) && TC.getTriple().isAArch64()) {
+ if (Sanitizers.has(SanitizerKind::HWAddress) && !HwasanUseAliases) {
CmdArgs.push_back("-target-feature");
CmdArgs.push_back("+tagged-globals");
}
@@ -1187,7 +1376,8 @@ void SanitizerArgs::addArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
<< "-fvisibility=";
}
- if (Sanitizers.has(SanitizerKind::MemTag) && !hasTargetFeatureMTE(CmdArgs))
+ if (Sanitizers.has(SanitizerKind::MemtagStack) &&
+ !hasTargetFeatureMTE(CmdArgs))
TC.getDriver().Diag(diag::err_stack_tagging_requires_hardware_feature);
}
@@ -1215,12 +1405,13 @@ SanitizerMask parseArgValues(const Driver &D, const llvm::opt::Arg *A,
Kinds |= Kind;
else if (DiagnoseErrors)
D.Diag(clang::diag::err_drv_unsupported_option_argument)
- << A->getOption().getName() << Value;
+ << A->getSpelling() << Value;
}
return Kinds;
}
-int parseCoverageFeatures(const Driver &D, const llvm::opt::Arg *A) {
+int parseCoverageFeatures(const Driver &D, const llvm::opt::Arg *A,
+ bool DiagnoseErrors) {
assert(A->getOption().matches(options::OPT_fsanitize_coverage) ||
A->getOption().matches(options::OPT_fno_sanitize_coverage));
int Features = 0;
@@ -1243,10 +1434,36 @@ int parseCoverageFeatures(const Driver &D, const llvm::opt::Arg *A) {
.Case("inline-bool-flag", CoverageInlineBoolFlag)
.Case("pc-table", CoveragePCTable)
.Case("stack-depth", CoverageStackDepth)
+ .Case("trace-loads", CoverageTraceLoads)
+ .Case("trace-stores", CoverageTraceStores)
+ .Case("control-flow", CoverageControlFlow)
+ .Default(0);
+ if (F == 0 && DiagnoseErrors)
+ D.Diag(clang::diag::err_drv_unsupported_option_argument)
+ << A->getSpelling() << Value;
+ Features |= F;
+ }
+ return Features;
+}
+
+int parseBinaryMetadataFeatures(const Driver &D, const llvm::opt::Arg *A,
+ bool DiagnoseErrors) {
+ assert(
+ A->getOption().matches(options::OPT_fexperimental_sanitize_metadata_EQ) ||
+ A->getOption().matches(
+ options::OPT_fno_experimental_sanitize_metadata_EQ));
+ int Features = 0;
+ for (int i = 0, n = A->getNumValues(); i != n; ++i) {
+ const char *Value = A->getValue(i);
+ int F = llvm::StringSwitch<int>(Value)
+ .Case("covered", BinaryMetadataCovered)
+ .Case("atomics", BinaryMetadataAtomics)
+ .Case("uar", BinaryMetadataUAR)
+ .Case("all", ~0)
.Default(0);
- if (F == 0)
+ if (F == 0 && DiagnoseErrors)
D.Diag(clang::diag::err_drv_unsupported_option_argument)
- << A->getOption().getName() << Value;
+ << A->getSpelling() << Value;
Features |= F;
}
return Features;
@@ -1273,8 +1490,8 @@ std::string lastArgumentForMask(const Driver &D, const llvm::opt::ArgList &Args,
}
std::string describeSanitizeArg(const llvm::opt::Arg *A, SanitizerMask Mask) {
- assert(A->getOption().matches(options::OPT_fsanitize_EQ)
- && "Invalid argument in describeSanitizerArg!");
+ assert(A->getOption().matches(options::OPT_fsanitize_EQ) &&
+ "Invalid argument in describeSanitizerArg!");
std::string Sanitizers;
for (int i = 0, n = A->getNumValues(); i != n; ++i) {
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChain.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChain.cpp
index 6c1b88141c45..388030592b48 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChain.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChain.cpp
@@ -7,10 +7,12 @@
//===----------------------------------------------------------------------===//
#include "clang/Driver/ToolChain.h"
+#include "ToolChains/Arch/AArch64.h"
#include "ToolChains/Arch/ARM.h"
#include "ToolChains/Clang.h"
-#include "ToolChains/InterfaceStubs.h"
+#include "ToolChains/CommonArgs.h"
#include "ToolChains/Flang.h"
+#include "ToolChains/InterfaceStubs.h"
#include "clang/Basic/ObjCRuntime.h"
#include "clang/Basic/Sanitizers.h"
#include "clang/Config/config.h"
@@ -24,22 +26,25 @@
#include "clang/Driver/XRayArgs.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/MC/MCTargetOptions.h"
+#include "llvm/MC/TargetRegistry.h"
#include "llvm/Option/Arg.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Option/OptTable.h"
#include "llvm/Option/Option.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/FileUtilities.h"
#include "llvm/Support/Path.h"
-#include "llvm/Support/TargetParser.h"
-#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/VersionTuple.h"
#include "llvm/Support/VirtualFileSystem.h"
+#include "llvm/TargetParser/AArch64TargetParser.h"
+#include "llvm/TargetParser/TargetParser.h"
+#include "llvm/TargetParser/Triple.h"
#include <cassert>
#include <cstddef>
#include <cstring>
@@ -67,25 +72,53 @@ static ToolChain::RTTIMode CalculateRTTIMode(const ArgList &Args,
return ToolChain::RM_Disabled;
}
- // -frtti is default, except for the PS4 CPU.
- return (Triple.isPS4CPU()) ? ToolChain::RM_Disabled : ToolChain::RM_Enabled;
+ // -frtti is default, except for the PS4/PS5 and DriverKit.
+ bool NoRTTI = Triple.isPS() || Triple.isDriverKit();
+ return NoRTTI ? ToolChain::RM_Disabled : ToolChain::RM_Enabled;
}
ToolChain::ToolChain(const Driver &D, const llvm::Triple &T,
const ArgList &Args)
: D(D), Triple(T), Args(Args), CachedRTTIArg(GetRTTIArgument(Args)),
CachedRTTIMode(CalculateRTTIMode(Args, Triple, CachedRTTIArg)) {
- std::string RuntimePath = getRuntimePath();
- if (getVFS().exists(RuntimePath))
- getLibraryPaths().push_back(RuntimePath);
+ auto addIfExists = [this](path_list &List, const std::string &Path) {
+ if (getVFS().exists(Path))
+ List.push_back(Path);
+ };
- std::string StdlibPath = getStdlibPath();
- if (getVFS().exists(StdlibPath))
- getFilePaths().push_back(StdlibPath);
+ if (std::optional<std::string> Path = getRuntimePath())
+ getLibraryPaths().push_back(*Path);
+ if (std::optional<std::string> Path = getStdlibPath())
+ getFilePaths().push_back(*Path);
+ for (const auto &Path : getArchSpecificLibPaths())
+ addIfExists(getFilePaths(), Path);
+}
+
+llvm::Expected<std::unique_ptr<llvm::MemoryBuffer>>
+ToolChain::executeToolChainProgram(StringRef Executable) const {
+ llvm::SmallString<64> OutputFile;
+ llvm::sys::fs::createTemporaryFile("toolchain-program", "txt", OutputFile);
+ llvm::FileRemover OutputRemover(OutputFile.c_str());
+ std::optional<llvm::StringRef> Redirects[] = {
+ {""},
+ OutputFile.str(),
+ {""},
+ };
+
+ std::string ErrorMessage;
+ if (llvm::sys::ExecuteAndWait(Executable, {}, {}, Redirects,
+ /* SecondsToWait */ 0,
+ /*MemoryLimit*/ 0, &ErrorMessage))
+ return llvm::createStringError(std::error_code(),
+ Executable + ": " + ErrorMessage);
- std::string CandidateLibPath = getArchSpecificLibPath();
- if (getVFS().exists(CandidateLibPath))
- getFilePaths().push_back(CandidateLibPath);
+ llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> OutputBuf =
+ llvm::MemoryBuffer::getFile(OutputFile.c_str());
+ if (!OutputBuf)
+ return llvm::createStringError(OutputBuf.getError(),
+ "Failed to read stdout of " + Executable +
+ ": " + OutputBuf.getError().message());
+ return std::move(*OutputBuf);
}
void ToolChain::setTripleEnvironment(llvm::Triple::EnvironmentType Env) {
@@ -106,24 +139,148 @@ bool ToolChain::useIntegratedAs() const {
IsIntegratedAssemblerDefault());
}
+bool ToolChain::useIntegratedBackend() const {
+ assert(
+ ((IsIntegratedBackendDefault() && IsIntegratedBackendSupported()) ||
+ (!IsIntegratedBackendDefault() || IsNonIntegratedBackendSupported())) &&
+ "(Non-)integrated backend set incorrectly!");
+
+ bool IBackend = Args.hasFlag(options::OPT_fintegrated_objemitter,
+ options::OPT_fno_integrated_objemitter,
+ IsIntegratedBackendDefault());
+
+ // Diagnose when integrated-objemitter options are not supported by this
+ // toolchain.
+ unsigned DiagID;
+ if ((IBackend && !IsIntegratedBackendSupported()) ||
+ (!IBackend && !IsNonIntegratedBackendSupported()))
+ DiagID = clang::diag::err_drv_unsupported_opt_for_target;
+ else
+ DiagID = clang::diag::warn_drv_unsupported_opt_for_target;
+ Arg *A = Args.getLastArg(options::OPT_fno_integrated_objemitter);
+ if (A && !IsNonIntegratedBackendSupported())
+ D.Diag(DiagID) << A->getAsString(Args) << Triple.getTriple();
+ A = Args.getLastArg(options::OPT_fintegrated_objemitter);
+ if (A && !IsIntegratedBackendSupported())
+ D.Diag(DiagID) << A->getAsString(Args) << Triple.getTriple();
+
+ return IBackend;
+}
+
bool ToolChain::useRelaxRelocations() const {
return ENABLE_X86_RELAX_RELOCATIONS;
}
-bool ToolChain::isNoExecStackDefault() const {
- return false;
+bool ToolChain::defaultToIEEELongDouble() const {
+ return PPC_LINUX_DEFAULT_IEEELONGDOUBLE && getTriple().isOSLinux();
+}
+
+static void getAArch64MultilibFlags(const Driver &D,
+ const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args,
+ Multilib::flags_list &Result) {
+ std::vector<StringRef> Features;
+ tools::aarch64::getAArch64TargetFeatures(D, Triple, Args, Features, false);
+ const auto UnifiedFeatures = tools::unifyTargetFeatures(Features);
+ llvm::DenseSet<StringRef> FeatureSet(UnifiedFeatures.begin(),
+ UnifiedFeatures.end());
+ std::vector<std::string> MArch;
+ for (const auto &Ext : AArch64::Extensions)
+ if (FeatureSet.contains(Ext.Feature))
+ MArch.push_back(Ext.Name.str());
+ for (const auto &Ext : AArch64::Extensions)
+ if (FeatureSet.contains(Ext.NegFeature))
+ MArch.push_back(("no" + Ext.Name).str());
+ MArch.insert(MArch.begin(), ("-march=" + Triple.getArchName()).str());
+ Result.push_back(llvm::join(MArch, "+"));
+}
+
+static void getARMMultilibFlags(const Driver &D,
+ const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args,
+ Multilib::flags_list &Result) {
+ std::vector<StringRef> Features;
+ llvm::ARM::FPUKind FPUKind = tools::arm::getARMTargetFeatures(
+ D, Triple, Args, Features, false /*ForAs*/, true /*ForMultilib*/);
+ const auto UnifiedFeatures = tools::unifyTargetFeatures(Features);
+ llvm::DenseSet<StringRef> FeatureSet(UnifiedFeatures.begin(),
+ UnifiedFeatures.end());
+ std::vector<std::string> MArch;
+ for (const auto &Ext : ARM::ARCHExtNames)
+ if (FeatureSet.contains(Ext.Feature))
+ MArch.push_back(Ext.Name.str());
+ for (const auto &Ext : ARM::ARCHExtNames)
+ if (FeatureSet.contains(Ext.NegFeature))
+ MArch.push_back(("no" + Ext.Name).str());
+ MArch.insert(MArch.begin(), ("-march=" + Triple.getArchName()).str());
+ Result.push_back(llvm::join(MArch, "+"));
+
+ switch (FPUKind) {
+#define ARM_FPU(NAME, KIND, VERSION, NEON_SUPPORT, RESTRICTION) \
+ case llvm::ARM::KIND: \
+ Result.push_back("-mfpu=" NAME); \
+ break;
+#include "llvm/TargetParser/ARMTargetParser.def"
+ default:
+ llvm_unreachable("Invalid FPUKind");
+ }
+
+ switch (arm::getARMFloatABI(D, Triple, Args)) {
+ case arm::FloatABI::Soft:
+ Result.push_back("-mfloat-abi=soft");
+ break;
+ case arm::FloatABI::SoftFP:
+ Result.push_back("-mfloat-abi=softfp");
+ break;
+ case arm::FloatABI::Hard:
+ Result.push_back("-mfloat-abi=hard");
+ break;
+ case arm::FloatABI::Invalid:
+ llvm_unreachable("Invalid float ABI");
+ }
+}
+
+Multilib::flags_list
+ToolChain::getMultilibFlags(const llvm::opt::ArgList &Args) const {
+ using namespace clang::driver::options;
+
+ std::vector<std::string> Result;
+ const llvm::Triple Triple(ComputeEffectiveClangTriple(Args));
+ Result.push_back("--target=" + Triple.str());
+
+ switch (Triple.getArch()) {
+ case llvm::Triple::aarch64:
+ case llvm::Triple::aarch64_32:
+ case llvm::Triple::aarch64_be:
+ getAArch64MultilibFlags(D, Triple, Args, Result);
+ break;
+ case llvm::Triple::arm:
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumb:
+ case llvm::Triple::thumbeb:
+ getARMMultilibFlags(D, Triple, Args, Result);
+ break;
+ default:
+ break;
+ }
+
+ // Sort and remove duplicates.
+ std::sort(Result.begin(), Result.end());
+ Result.erase(std::unique(Result.begin(), Result.end()), Result.end());
+ return Result;
}
-const SanitizerArgs& ToolChain::getSanitizerArgs() const {
- if (!SanitizerArguments.get())
- SanitizerArguments.reset(new SanitizerArgs(*this, Args));
- return *SanitizerArguments.get();
+SanitizerArgs
+ToolChain::getSanitizerArgs(const llvm::opt::ArgList &JobArgs) const {
+ SanitizerArgs SanArgs(*this, JobArgs, !SanitizerArgsChecked);
+ SanitizerArgsChecked = true;
+ return SanArgs;
}
const XRayArgs& ToolChain::getXRayArgs() const {
- if (!XRayArguments.get())
+ if (!XRayArguments)
XRayArguments.reset(new XRayArgs(*this, Args));
- return *XRayArguments.get();
+ return *XRayArguments;
}
namespace {
@@ -153,13 +310,14 @@ static const DriverSuffix *FindDriverSuffix(StringRef ProgName, size_t &Pos) {
{"cl", "--driver-mode=cl"},
{"++", "--driver-mode=g++"},
{"flang", "--driver-mode=flang"},
+ {"clang-dxc", "--driver-mode=dxc"},
};
- for (size_t i = 0; i < llvm::array_lengthof(DriverSuffixes); ++i) {
- StringRef Suffix(DriverSuffixes[i].Suffix);
- if (ProgName.endswith(Suffix)) {
+ for (const auto &DS : DriverSuffixes) {
+ StringRef Suffix(DS.Suffix);
+ if (ProgName.ends_with(Suffix)) {
Pos = ProgName.size() - Suffix.size();
- return &DriverSuffixes[i];
+ return &DS;
}
}
return nullptr;
@@ -168,11 +326,12 @@ static const DriverSuffix *FindDriverSuffix(StringRef ProgName, size_t &Pos) {
/// Normalize the program name from argv[0] by stripping the file extension if
/// present and lower-casing the string on Windows.
static std::string normalizeProgramName(llvm::StringRef Argv0) {
- std::string ProgName = std::string(llvm::sys::path::stem(Argv0));
-#ifdef _WIN32
- // Transform to lowercase for case insensitive file systems.
- std::transform(ProgName.begin(), ProgName.end(), ProgName.begin(), ::tolower);
-#endif
+ std::string ProgName = std::string(llvm::sys::path::filename(Argv0));
+ if (is_style_windows(llvm::sys::path::Style::native)) {
+ // Transform to lowercase for case insensitive file systems.
+ std::transform(ProgName.begin(), ProgName.end(), ProgName.begin(),
+ ::tolower);
+ }
return ProgName;
}
@@ -186,6 +345,13 @@ static const DriverSuffix *parseDriverSuffix(StringRef ProgName, size_t &Pos) {
// added via -target as implicit first argument.
const DriverSuffix *DS = FindDriverSuffix(ProgName, Pos);
+ if (!DS && ProgName.ends_with(".exe")) {
+ // Try again after stripping the executable suffix:
+ // clang++.exe -> clang++
+ ProgName = ProgName.drop_back(StringRef(".exe").size());
+ DS = FindDriverSuffix(ProgName, Pos);
+ }
+
if (!DS) {
// Try again after stripping any trailing version number:
// clang++3.5 -> clang++
@@ -256,13 +422,20 @@ std::string ToolChain::getInputFilename(const InputInfo &Input) const {
return Input.getFilename();
}
-bool ToolChain::IsUnwindTablesDefault(const ArgList &Args) const {
- return false;
+ToolChain::UnwindTableLevel
+ToolChain::getDefaultUnwindTableLevel(const ArgList &Args) const {
+ return UnwindTableLevel::None;
+}
+
+unsigned ToolChain::GetDefaultDwarfVersion() const {
+ // TODO: Remove the RISC-V special case when R_RISCV_SET_ULEB128 linker
+ // support becomes more widely available.
+ return getTriple().isRISCV() ? 4 : 5;
}
Tool *ToolChain::getClang() const {
if (!Clang)
- Clang.reset(new tools::Clang(*this));
+ Clang.reset(new tools::Clang(*this, useIntegratedBackend()));
return Clang.get();
}
@@ -320,10 +493,16 @@ Tool *ToolChain::getOffloadBundler() const {
return OffloadBundler.get();
}
-Tool *ToolChain::getOffloadWrapper() const {
- if (!OffloadWrapper)
- OffloadWrapper.reset(new tools::OffloadWrapper(*this));
- return OffloadWrapper.get();
+Tool *ToolChain::getOffloadPackager() const {
+ if (!OffloadPackager)
+ OffloadPackager.reset(new tools::OffloadPackager(*this));
+ return OffloadPackager.get();
+}
+
+Tool *ToolChain::getLinkerWrapper() const {
+ if (!LinkerWrapper)
+ LinkerWrapper.reset(new tools::LinkerWrapper(*this, getLink()));
+ return LinkerWrapper.get();
}
Tool *ToolChain::getTool(Action::ActionClass AC) const {
@@ -346,12 +525,13 @@ Tool *ToolChain::getTool(Action::ActionClass AC) const {
case Action::LipoJobClass:
case Action::DsymutilJobClass:
case Action::VerifyDebugInfoJobClass:
+ case Action::BinaryAnalyzeJobClass:
llvm_unreachable("Invalid tool kind.");
case Action::CompileJobClass:
case Action::PrecompileJobClass:
- case Action::HeaderModulePrecompileJobClass:
case Action::PreprocessJobClass:
+ case Action::ExtractAPIJobClass:
case Action::AnalyzeJobClass:
case Action::MigrateJobClass:
case Action::VerifyPCHJobClass:
@@ -362,8 +542,10 @@ Tool *ToolChain::getTool(Action::ActionClass AC) const {
case Action::OffloadUnbundlingJobClass:
return getOffloadBundler();
- case Action::OffloadWrapperJobClass:
- return getOffloadWrapper();
+ case Action::OffloadPackagerJobClass:
+ return getOffloadPackager();
+ case Action::LinkerWrapperJobClass:
+ return getLinkerWrapper();
}
llvm_unreachable("Invalid tool kind.");
@@ -374,6 +556,9 @@ static StringRef getArchNameForCompilerRTLib(const ToolChain &TC,
const llvm::Triple &Triple = TC.getTriple();
bool IsWindows = Triple.isOSWindows();
+ if (TC.isBareMetal())
+ return Triple.getArchName();
+
if (TC.getArch() == llvm::Triple::arm || TC.getArch() == llvm::Triple::armeb)
return (arm::getARMFloatABI(TC, Args) == arm::FloatABI::Hard && !IsWindows)
? "armhf"
@@ -411,12 +596,17 @@ StringRef ToolChain::getOSLibName() const {
std::string ToolChain::getCompilerRTPath() const {
SmallString<128> Path(getDriver().ResourceDir);
- if (Triple.isOSUnknown()) {
+ if (isBareMetal()) {
+ llvm::sys::path::append(Path, "lib", getOSLibName());
+ if (!SelectedMultilibs.empty()) {
+ Path += SelectedMultilibs.back().gccSuffix();
+ }
+ } else if (Triple.isOSUnknown()) {
llvm::sys::path::append(Path, "lib");
} else {
llvm::sys::path::append(Path, "lib", getOSLibName());
}
- return std::string(Path.str());
+ return std::string(Path);
}
std::string ToolChain::getCompilerRTBasename(const ArgList &Args,
@@ -469,7 +659,7 @@ std::string ToolChain::getCompilerRT(const ArgList &Args, StringRef Component,
SmallString<128> P(LibPath);
llvm::sys::path::append(P, CRTBasename);
if (getVFS().exists(P))
- return std::string(P.str());
+ return std::string(P);
}
// Fall back to the old expected compiler-rt name if the new one does not
@@ -478,7 +668,7 @@ std::string ToolChain::getCompilerRT(const ArgList &Args, StringRef Component,
buildCompilerRTBasename(Args, Component, Type, /*AddArch=*/true);
SmallString<128> Path(getCompilerRTPath());
llvm::sys::path::append(Path, CRTBasename);
- return std::string(Path.str());
+ return std::string(Path);
}
const char *ToolChain::getCompilerRTArgString(const llvm::opt::ArgList &Args,
@@ -487,23 +677,118 @@ const char *ToolChain::getCompilerRTArgString(const llvm::opt::ArgList &Args,
return Args.MakeArgString(getCompilerRT(Args, Component, Type));
}
-std::string ToolChain::getRuntimePath() const {
+// Android target triples contain a target version. If we don't have libraries
+// for the exact target version, we should fall back to the next newest version
+// or a versionless path, if any.
+std::optional<std::string>
+ToolChain::getFallbackAndroidTargetPath(StringRef BaseDir) const {
+ llvm::Triple TripleWithoutLevel(getTriple());
+ TripleWithoutLevel.setEnvironmentName("android"); // remove any version number
+ const std::string &TripleWithoutLevelStr = TripleWithoutLevel.str();
+ unsigned TripleVersion = getTriple().getEnvironmentVersion().getMajor();
+ unsigned BestVersion = 0;
+
+ SmallString<32> TripleDir;
+ bool UsingUnversionedDir = false;
+ std::error_code EC;
+ for (llvm::vfs::directory_iterator LI = getVFS().dir_begin(BaseDir, EC), LE;
+ !EC && LI != LE; LI = LI.increment(EC)) {
+ StringRef DirName = llvm::sys::path::filename(LI->path());
+ StringRef DirNameSuffix = DirName;
+ if (DirNameSuffix.consume_front(TripleWithoutLevelStr)) {
+ if (DirNameSuffix.empty() && TripleDir.empty()) {
+ TripleDir = DirName;
+ UsingUnversionedDir = true;
+ } else {
+ unsigned Version;
+ if (!DirNameSuffix.getAsInteger(10, Version) && Version > BestVersion &&
+ Version < TripleVersion) {
+ BestVersion = Version;
+ TripleDir = DirName;
+ UsingUnversionedDir = false;
+ }
+ }
+ }
+ }
+
+ if (TripleDir.empty())
+ return {};
+
+ SmallString<128> P(BaseDir);
+ llvm::sys::path::append(P, TripleDir);
+ if (UsingUnversionedDir)
+ D.Diag(diag::warn_android_unversioned_fallback) << P << getTripleString();
+ return std::string(P);
+}
+
+std::optional<std::string>
+ToolChain::getTargetSubDirPath(StringRef BaseDir) const {
+ auto getPathForTriple =
+ [&](const llvm::Triple &Triple) -> std::optional<std::string> {
+ SmallString<128> P(BaseDir);
+ llvm::sys::path::append(P, Triple.str());
+ if (getVFS().exists(P))
+ return std::string(P);
+ return {};
+ };
+
+ if (auto Path = getPathForTriple(getTriple()))
+ return *Path;
+
+ // When building with per target runtime directories, various ways of naming
+ // the Arm architecture may have been normalised to simply "arm".
+ // For example "armv8l" (Armv8 AArch32 little endian) is replaced with "arm".
+ // Since an armv8l system can use libraries built for earlier architecture
+ // versions assuming endian and float ABI match.
+ //
+ // Original triple: armv8l-unknown-linux-gnueabihf
+ // Runtime triple: arm-unknown-linux-gnueabihf
+ //
+ // We do not do this for armeb (big endian) because doing so could make us
+ // select little endian libraries. In addition, all known armeb triples only
+ // use the "armeb" architecture name.
+ //
+ // M profile Arm is bare metal and we know they will not be using the per
+ // target runtime directory layout.
+ if (getTriple().getArch() == Triple::arm && !getTriple().isArmMClass()) {
+ llvm::Triple ArmTriple = getTriple();
+ ArmTriple.setArch(Triple::arm);
+ if (auto Path = getPathForTriple(ArmTriple))
+ return *Path;
+ }
+
+ if (getTriple().isAndroid())
+ return getFallbackAndroidTargetPath(BaseDir);
+
+ return {};
+}
+
+std::optional<std::string> ToolChain::getRuntimePath() const {
SmallString<128> P(D.ResourceDir);
- llvm::sys::path::append(P, "lib", getTripleString());
- return std::string(P.str());
+ llvm::sys::path::append(P, "lib");
+ return getTargetSubDirPath(P);
}
-std::string ToolChain::getStdlibPath() const {
+std::optional<std::string> ToolChain::getStdlibPath() const {
SmallString<128> P(D.Dir);
- llvm::sys::path::append(P, "..", "lib", getTripleString());
- return std::string(P.str());
+ llvm::sys::path::append(P, "..", "lib");
+ return getTargetSubDirPath(P);
}
-std::string ToolChain::getArchSpecificLibPath() const {
- SmallString<128> Path(getDriver().ResourceDir);
- llvm::sys::path::append(Path, "lib", getOSLibName(),
- llvm::Triple::getArchTypeName(getArch()));
- return std::string(Path.str());
+ToolChain::path_list ToolChain::getArchSpecificLibPaths() const {
+ path_list Paths;
+
+ auto AddPath = [&](const ArrayRef<StringRef> &SS) {
+ SmallString<128> Path(getDriver().ResourceDir);
+ llvm::sys::path::append(Path, "lib");
+ for (auto &S : SS)
+ llvm::sys::path::append(Path, S);
+ Paths.push_back(std::string(Path));
+ };
+
+ AddPath({getTriple().str()});
+ AddPath({getOSLibName(), llvm::Triple::getArchTypeName(getArch())});
+ return Paths;
}
bool ToolChain::needsProfileRT(const ArgList &Args) {
@@ -530,7 +815,8 @@ Tool *ToolChain::SelectTool(const JobAction &JA) const {
if (D.IsFlangMode() && getDriver().ShouldUseFlangCompiler(JA)) return getFlang();
if (getDriver().ShouldUseClangCompiler(JA)) return getClang();
Action::ActionClass AC = JA.getKind();
- if (AC == Action::AssembleJobClass && useIntegratedAs())
+ if (AC == Action::AssembleJobClass && useIntegratedAs() &&
+ !getTriple().isOSAIX())
return getClangAs();
return getTool(AC);
}
@@ -543,12 +829,9 @@ std::string ToolChain::GetProgramPath(const char *Name) const {
return D.GetProgramPath(Name, *this);
}
-std::string ToolChain::GetLinkerPath(bool *LinkerIsLLD,
- bool *LinkerIsLLDDarwinNew) const {
+std::string ToolChain::GetLinkerPath(bool *LinkerIsLLD) const {
if (LinkerIsLLD)
*LinkerIsLLD = false;
- if (LinkerIsLLDDarwinNew)
- *LinkerIsLLDDarwinNew = false;
// Get -fuse-ld= first to prevent -Wunused-command-line-argument. -fuse-ld= is
// considered as the linker flavor, e.g. "bfd", "gold", or "lld".
@@ -558,13 +841,18 @@ std::string ToolChain::GetLinkerPath(bool *LinkerIsLLD,
// --ld-path= takes precedence over -fuse-ld= and specifies the executable
// name. -B, COMPILER_PATH and PATH and consulted if the value does not
// contain a path component separator.
+ // -fuse-ld=lld can be used with --ld-path= to inform clang that the binary
+ // that --ld-path= points to is lld.
if (const Arg *A = Args.getLastArg(options::OPT_ld_path_EQ)) {
std::string Path(A->getValue());
if (!Path.empty()) {
if (llvm::sys::path::parent_path(Path).empty())
Path = GetProgramPath(A->getValue());
- if (llvm::sys::fs::can_execute(Path))
+ if (llvm::sys::fs::can_execute(Path)) {
+ if (LinkerIsLLD)
+ *LinkerIsLLD = UseLinker == "lld";
return std::string(Path);
+ }
}
getDriver().Diag(diag::err_drv_invalid_linker_name) << A->getAsString(Args);
return GetProgramPath(getDefaultLinker());
@@ -583,7 +871,7 @@ std::string ToolChain::GetLinkerPath(bool *LinkerIsLLD,
// for the linker flavor is brittle. In addition, prepending "ld." or "ld64."
// to a relative path is surprising. This is more complex due to priorities
// among -B, COMPILER_PATH and PATH. --ld-path= should be used instead.
- if (UseLinker.find('/') != StringRef::npos)
+ if (UseLinker.contains('/'))
getDriver().Diag(diag::warn_drv_fuse_ld_path);
if (llvm::sys::path::is_absolute(UseLinker)) {
@@ -601,11 +889,8 @@ std::string ToolChain::GetLinkerPath(bool *LinkerIsLLD,
std::string LinkerPath(GetProgramPath(LinkerName.c_str()));
if (llvm::sys::fs::can_execute(LinkerPath)) {
- // FIXME: Remove LinkerIsLLDDarwinNew once there's only one MachO lld.
if (LinkerIsLLD)
- *LinkerIsLLD = UseLinker == "lld" || UseLinker == "lld.darwinold";
- if (LinkerIsLLDDarwinNew)
- *LinkerIsLLDDarwinNew = UseLinker == "lld";
+ *LinkerIsLLD = UseLinker == "lld";
return LinkerPath;
}
}
@@ -618,6 +903,8 @@ std::string ToolChain::GetLinkerPath(bool *LinkerIsLLD,
std::string ToolChain::GetStaticLibToolPath() const {
// TODO: Add support for static lib archiving on Windows
+ if (Triple.isOSDarwin())
+ return GetProgramPath("libtool");
return GetProgramPath("llvm-ar");
}
@@ -742,6 +1029,9 @@ void ToolChain::addClangTargetOptions(
const ArgList &DriverArgs, ArgStringList &CC1Args,
Action::OffloadKind DeviceOffloadKind) const {}
+void ToolChain::addClangCC1ASTargetOptions(const ArgList &Args,
+ ArgStringList &CC1ASArgs) const {}
+
void ToolChain::addClangWarningOptions(ArgStringList &CC1Args) const {}
void ToolChain::addProfileRTLibs(const llvm::opt::ArgList &Args,
@@ -879,6 +1169,14 @@ void ToolChain::addExternCSystemIncludeIfExists(const ArgList &DriverArgs,
}
}
+/*static*/ std::string ToolChain::concat(StringRef Path, const Twine &A,
+ const Twine &B, const Twine &C,
+ const Twine &D) {
+ SmallString<128> Result(Path);
+ llvm::sys::path::append(Result, llvm::sys::path::Style::posix, A, B, C, D);
+ return std::string(Result);
+}
+
std::string ToolChain::detectLibcxxVersion(StringRef IncludePath) const {
std::error_code EC;
int MaxVersion = 0;
@@ -920,8 +1218,15 @@ void ToolChain::AddClangCXXStdlibIsystemArgs(
const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const {
DriverArgs.ClaimAllArgs(options::OPT_stdlibxx_isystem);
- if (!DriverArgs.hasArg(options::OPT_nostdinc, options::OPT_nostdincxx,
- options::OPT_nostdlibinc))
+ // This intentionally only looks at -nostdinc++, and not -nostdinc or
+ // -nostdlibinc. The purpose of -stdlib++-isystem is to support toolchain
+ // setups with non-standard search logic for the C++ headers, while still
+ // allowing users of the toolchain to bring their own C++ headers. Such a
+ // toolchain likely also has non-standard search logic for the C headers and
+ // uses -nostdinc to suppress the default logic, but -stdlib++-isystem should
+ // still work in that case and only be suppressed by an explicit -nostdinc++
+ // in a project using the toolchain.
+ if (!DriverArgs.hasArg(options::OPT_nostdincxx))
for (const auto &P :
DriverArgs.getAllArgValues(options::OPT_stdlibxx_isystem))
addSystemInclude(DriverArgs, CC1Args, P);
@@ -942,6 +1247,8 @@ void ToolChain::AddCXXStdlibLibArgs(const ArgList &Args,
switch (Type) {
case ToolChain::CST_Libcxx:
CmdArgs.push_back("-lc++");
+ if (Args.hasArg(options::OPT_fexperimental_library))
+ CmdArgs.push_back("-lc++experimental");
break;
case ToolChain::CST_Libstdcxx:
@@ -993,22 +1300,27 @@ bool ToolChain::addFastMathRuntimeIfAvailable(const ArgList &Args,
return false;
}
+Expected<SmallVector<std::string>>
+ToolChain::getSystemGPUArchs(const llvm::opt::ArgList &Args) const {
+ return SmallVector<std::string>();
+}
+
SanitizerMask ToolChain::getSupportedSanitizers() const {
// Return sanitizers which don't require runtime support and are not
// platform dependent.
SanitizerMask Res =
- (SanitizerKind::Undefined & ~SanitizerKind::Vptr &
- ~SanitizerKind::Function) |
+ (SanitizerKind::Undefined & ~SanitizerKind::Vptr) |
(SanitizerKind::CFI & ~SanitizerKind::CFIICall) |
SanitizerKind::CFICastStrict | SanitizerKind::FloatDivideByZero |
- SanitizerKind::UnsignedIntegerOverflow |
+ SanitizerKind::KCFI | SanitizerKind::UnsignedIntegerOverflow |
SanitizerKind::UnsignedShiftBase | SanitizerKind::ImplicitConversion |
SanitizerKind::Nullability | SanitizerKind::LocalBounds;
if (getTriple().getArch() == llvm::Triple::x86 ||
getTriple().getArch() == llvm::Triple::x86_64 ||
getTriple().getArch() == llvm::Triple::arm || getTriple().isWasm() ||
- getTriple().isAArch64())
+ getTriple().isAArch64() || getTriple().isRISCV() ||
+ getTriple().isLoongArch64())
Res |= SanitizerKind::CFIICall;
if (getTriple().getArch() == llvm::Triple::x86_64 ||
getTriple().isAArch64(64) || getTriple().isRISCV())
@@ -1024,8 +1336,8 @@ void ToolChain::AddCudaIncludeArgs(const ArgList &DriverArgs,
void ToolChain::AddHIPIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {}
-llvm::SmallVector<std::string, 12>
-ToolChain::getHIPDeviceLibs(const ArgList &DriverArgs) const {
+llvm::SmallVector<ToolChain::BitCodeLibraryInfo, 12>
+ToolChain::getDeviceLibs(const ArgList &DriverArgs) const {
return {};
}
@@ -1100,7 +1412,10 @@ llvm::opt::DerivedArgList *ToolChain::TranslateOpenMPTargetArgs(
// matches the current toolchain triple. If it is not present
// at all, target and host share a toolchain.
if (A->getOption().matches(options::OPT_m_Group)) {
- if (SameTripleAsHost)
+ // Pass code object version to device toolchain
+ // to correctly set metadata in intermediate files.
+ if (SameTripleAsHost ||
+ A->getOption().matches(options::OPT_mcode_object_version_EQ))
DAL->append(A);
else
Modified = true;
@@ -1113,8 +1428,10 @@ llvm::opt::DerivedArgList *ToolChain::TranslateOpenMPTargetArgs(
A->getOption().matches(options::OPT_Xopenmp_target);
if (A->getOption().matches(options::OPT_Xopenmp_target_EQ)) {
+ llvm::Triple TT(getOpenMPTriple(A->getValue(0)));
+
// Passing device args: -Xopenmp-target=<triple> -opt=val.
- if (A->getValue(0) == getTripleString())
+ if (TT.getTriple() == getTripleString())
Index = Args.getBaseArgs().MakeIndex(A->getValue(1));
else
continue;
@@ -1205,17 +1522,17 @@ llvm::opt::DerivedArgList *ToolChain::TranslateXarchArgs(
DerivedArgList *DAL = new DerivedArgList(Args.getBaseArgs());
bool Modified = false;
- bool IsGPU = OFK == Action::OFK_Cuda || OFK == Action::OFK_HIP;
+ bool IsDevice = OFK != Action::OFK_None && OFK != Action::OFK_Host;
for (Arg *A : Args) {
bool NeedTrans = false;
bool Skip = false;
if (A->getOption().matches(options::OPT_Xarch_device)) {
- NeedTrans = IsGPU;
- Skip = !IsGPU;
+ NeedTrans = IsDevice;
+ Skip = !IsDevice;
} else if (A->getOption().matches(options::OPT_Xarch_host)) {
- NeedTrans = !IsGPU;
- Skip = IsGPU;
- } else if (A->getOption().matches(options::OPT_Xarch__) && IsGPU) {
+ NeedTrans = !IsDevice;
+ Skip = IsDevice;
+ } else if (A->getOption().matches(options::OPT_Xarch__) && IsDevice) {
// Do not translate -Xarch_ options for non CUDA/HIP toolchain since
// they may need special translation.
// Skip this argument unless the architecture matches BoundArch
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/AIX.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/AIX.cpp
index 3000b8416adf..e6126ff62db3 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/AIX.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/AIX.cpp
@@ -12,7 +12,9 @@
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Options.h"
#include "clang/Driver/SanitizerArgs.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/Option/ArgList.h"
+#include "llvm/ProfileData/InstrProf.h"
#include "llvm/Support/Path.h"
using AIX = clang::driver::toolchains::AIX;
@@ -28,6 +30,7 @@ void aix::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfoList &Inputs,
const ArgList &Args,
const char *LinkingOutput) const {
+ const Driver &D = getToolChain().getDriver();
ArgStringList CmdArgs;
const bool IsArch32Bit = getToolChain().getTriple().isArch32Bit();
@@ -36,6 +39,11 @@ void aix::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
if (!IsArch32Bit && !IsArch64Bit)
llvm_unreachable("Unsupported bit width value.");
+ if (Arg *A = C.getArgs().getLastArg(options::OPT_G)) {
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << A->getSpelling() << D.getTargetTriple();
+ }
+
// Specify the mode in which the as(1) command operates.
if (IsArch32Bit) {
CmdArgs.push_back("-a32");
@@ -74,6 +82,29 @@ void aix::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
Exec, CmdArgs, Inputs, Output));
}
+// Determine whether there are any linker options that supply an export list
+// (or equivalent information about what to export) being sent to the linker.
+static bool hasExportListLinkerOpts(const ArgStringList &CmdArgs) {
+ for (size_t i = 0, Size = CmdArgs.size(); i < Size; ++i) {
+ llvm::StringRef ArgString(CmdArgs[i]);
+
+ if (ArgString.starts_with("-bE:") || ArgString.starts_with("-bexport:") ||
+ ArgString == "-bexpall" || ArgString == "-bexpfull")
+ return true;
+
+ // If we split -b option, check the next opt.
+ if (ArgString == "-b" && i + 1 < Size) {
+ ++i;
+ llvm::StringRef ArgNextString(CmdArgs[i]);
+ if (ArgNextString.starts_with("E:") ||
+ ArgNextString.starts_with("export:") || ArgNextString == "expall" ||
+ ArgNextString == "expfull")
+ return true;
+ }
+ }
+ return false;
+}
+
void aix::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
const InputInfoList &Inputs, const ArgList &Args,
@@ -88,6 +119,11 @@ void aix::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (!(IsArch32Bit || IsArch64Bit))
llvm_unreachable("Unsupported bit width value.");
+ if (Arg *A = C.getArgs().getLastArg(options::OPT_G)) {
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << A->getSpelling() << D.getTargetTriple();
+ }
+
// Force static linking when "-static" is present.
if (Args.hasArg(options::OPT_static))
CmdArgs.push_back("-bnso");
@@ -98,6 +134,54 @@ void aix::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-bnoentry");
}
+ if (Args.hasFlag(options::OPT_mxcoff_roptr, options::OPT_mno_xcoff_roptr,
+ false)) {
+ if (Args.hasArg(options::OPT_shared))
+ D.Diag(diag::err_roptr_cannot_build_shared);
+
+ // The `-mxcoff-roptr` option places constants in RO sections as much as
+ // possible. Then `-bforceimprw` changes such sections to RW if they contain
+ // imported symbols that need to be resolved.
+ CmdArgs.push_back("-bforceimprw");
+ }
+
+ // PGO instrumentation generates symbols belonging to special sections, and
+ // the linker needs to place all symbols in a particular section together in
+ // memory; the AIX linker does that under an option.
+ if (Args.hasFlag(options::OPT_fprofile_arcs, options::OPT_fno_profile_arcs,
+ false) ||
+ Args.hasFlag(options::OPT_fprofile_generate,
+ options::OPT_fno_profile_generate, false) ||
+ Args.hasFlag(options::OPT_fprofile_generate_EQ,
+ options::OPT_fno_profile_generate, false) ||
+ Args.hasFlag(options::OPT_fprofile_instr_generate,
+ options::OPT_fno_profile_instr_generate, false) ||
+ Args.hasFlag(options::OPT_fprofile_instr_generate_EQ,
+ options::OPT_fno_profile_instr_generate, false) ||
+ Args.hasFlag(options::OPT_fcs_profile_generate,
+ options::OPT_fno_profile_generate, false) ||
+ Args.hasFlag(options::OPT_fcs_profile_generate_EQ,
+ options::OPT_fno_profile_generate, false) ||
+ Args.hasArg(options::OPT_fcreate_profile) ||
+ Args.hasArg(options::OPT_coverage))
+ CmdArgs.push_back("-bdbg:namedsects:ss");
+
+ if (Arg *A =
+ Args.getLastArg(clang::driver::options::OPT_mxcoff_build_id_EQ)) {
+ StringRef BuildId = A->getValue();
+ if (BuildId[0] != '0' || BuildId[1] != 'x' ||
+ BuildId.find_if_not(llvm::isHexDigit, 2) != StringRef::npos)
+ ToolChain.getDriver().Diag(diag::err_drv_unsupported_option_argument)
+ << A->getSpelling() << BuildId;
+ else {
+ std::string LinkerFlag = "-bdbg:ldrinfo:xcoff_binary_id:0x";
+ if (BuildId.size() % 2) // Prepend a 0 if odd number of digits.
+ LinkerFlag += "0";
+ LinkerFlag += BuildId.drop_front(2).lower();
+ CmdArgs.push_back(Args.MakeArgString(LinkerFlag));
+ }
+ }
+
// Specify linker output file.
assert((Output.isFilename() || Output.isNothing()) && "Invalid output.");
if (Output.isFilename()) {
@@ -118,19 +202,19 @@ void aix::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-bpD:0x110000000");
}
- auto getCrt0Basename = [&Args, IsArch32Bit] {
- // Enable gprofiling when "-pg" is specified.
- if (Args.hasArg(options::OPT_pg))
- return IsArch32Bit ? "gcrt0.o" : "gcrt0_64.o";
- // Enable profiling when "-p" is specified.
- else if (Args.hasArg(options::OPT_p))
- return IsArch32Bit ? "mcrt0.o" : "mcrt0_64.o";
- else
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles,
+ options::OPT_shared, options::OPT_r)) {
+ auto getCrt0Basename = [&Args, IsArch32Bit] {
+ if (Arg *A = Args.getLastArgNoClaim(options::OPT_p, options::OPT_pg)) {
+ // Enable gprofiling when "-pg" is specified.
+ if (A->getOption().matches(options::OPT_pg))
+ return IsArch32Bit ? "gcrt0.o" : "gcrt0_64.o";
+ // Enable profiling when "-p" is specified.
+ return IsArch32Bit ? "mcrt0.o" : "mcrt0_64.o";
+ }
return IsArch32Bit ? "crt0.o" : "crt0_64.o";
- };
+ };
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles,
- options::OPT_shared)) {
CmdArgs.push_back(
Args.MakeArgString(ToolChain.GetFilePath(getCrt0Basename())));
@@ -147,27 +231,109 @@ void aix::Linker::ConstructJob(Compilation &C, const JobAction &JA,
// Specify linker input file(s).
AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
- // Add directory to library search path.
- Args.AddAllArgs(CmdArgs, options::OPT_L);
- ToolChain.AddFilePathLibArgs(Args, CmdArgs);
- ToolChain.addProfileRTLibs(Args, CmdArgs);
-
- if (getToolChain().ShouldLinkCXXStdlib(Args))
- getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs);
-
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
- AddRunTimeLibs(ToolChain, D, CmdArgs, Args);
-
- // Support POSIX threads if "-pthreads" or "-pthread" is present.
- if (Args.hasArg(options::OPT_pthreads, options::OPT_pthread))
- CmdArgs.push_back("-lpthreads");
+ if (D.isUsingLTO()) {
+ assert(!Inputs.empty() && "Must have at least one input.");
+ // Find the first filename InputInfo object.
+ auto Input = llvm::find_if(
+ Inputs, [](const InputInfo &II) -> bool { return II.isFilename(); });
+ if (Input == Inputs.end())
+ // For a very rare case, all of the inputs to the linker are
+ // InputArg. If that happens, just use the first InputInfo.
+ Input = Inputs.begin();
+
+ addLTOOptions(ToolChain, Args, CmdArgs, Output, *Input,
+ D.getLTOMode() == LTOK_Thin);
+ }
- if (D.CCCIsCXX())
- CmdArgs.push_back("-lm");
+ if (Args.hasArg(options::OPT_shared) && !hasExportListLinkerOpts(CmdArgs)) {
+
+ const char *CreateExportListExec = Args.MakeArgString(
+ path::parent_path(ToolChain.getDriver().ClangExecutable) +
+ "/llvm-nm");
+ ArgStringList CreateExportCmdArgs;
+
+ std::string CreateExportListPath =
+ C.getDriver().GetTemporaryPath("CreateExportList", "exp");
+ const char *ExportList =
+ C.addTempFile(C.getArgs().MakeArgString(CreateExportListPath));
+
+ for (const auto &II : Inputs)
+ if (II.isFilename())
+ CreateExportCmdArgs.push_back(II.getFilename());
+
+ CreateExportCmdArgs.push_back("--export-symbols");
+ CreateExportCmdArgs.push_back("-X");
+ if (IsArch32Bit) {
+ CreateExportCmdArgs.push_back("32");
+ } else {
+ // Must be 64-bit, otherwise asserted already.
+ CreateExportCmdArgs.push_back("64");
+ }
+
+ auto ExpCommand = std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::None(), CreateExportListExec,
+ CreateExportCmdArgs, Inputs, Output);
+ ExpCommand->setRedirectFiles(
+ {std::nullopt, std::string(ExportList), std::nullopt});
+ C.addCommand(std::move(ExpCommand));
+ CmdArgs.push_back(Args.MakeArgString(llvm::Twine("-bE:") + ExportList));
+ }
- CmdArgs.push_back("-lc");
+ // Add directory to library search path.
+ Args.AddAllArgs(CmdArgs, options::OPT_L);
+ if (!Args.hasArg(options::OPT_r)) {
+ ToolChain.AddFilePathLibArgs(Args, CmdArgs);
+ ToolChain.addProfileRTLibs(Args, CmdArgs);
+
+ if (getToolChain().ShouldLinkCXXStdlib(Args))
+ getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs);
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
+ AddRunTimeLibs(ToolChain, D, CmdArgs, Args);
+
+ // Add OpenMP runtime if -fopenmp is specified.
+ if (Args.hasFlag(options::OPT_fopenmp, options::OPT_fopenmp_EQ,
+ options::OPT_fno_openmp, false)) {
+ switch (ToolChain.getDriver().getOpenMPRuntime(Args)) {
+ case Driver::OMPRT_OMP:
+ CmdArgs.push_back("-lomp");
+ break;
+ case Driver::OMPRT_IOMP5:
+ CmdArgs.push_back("-liomp5");
+ break;
+ case Driver::OMPRT_GOMP:
+ CmdArgs.push_back("-lgomp");
+ break;
+ case Driver::OMPRT_Unknown:
+ // Already diagnosed.
+ break;
+ }
+ }
+
+ // Support POSIX threads if "-pthreads" or "-pthread" is present.
+ if (Args.hasArg(options::OPT_pthreads, options::OPT_pthread))
+ CmdArgs.push_back("-lpthreads");
+
+ if (D.CCCIsCXX())
+ CmdArgs.push_back("-lm");
+
+ CmdArgs.push_back("-lc");
+
+ if (Args.hasArgNoClaim(options::OPT_p, options::OPT_pg)) {
+ CmdArgs.push_back(Args.MakeArgString((llvm::Twine("-L") + D.SysRoot) +
+ "/lib/profiled"));
+ CmdArgs.push_back(Args.MakeArgString((llvm::Twine("-L") + D.SysRoot) +
+ "/usr/lib/profiled"));
+ }
+ }
}
+ if (D.IsFlangMode()) {
+ addFortranRuntimeLibraryPath(ToolChain, Args, CmdArgs);
+ addFortranRuntimeLibs(ToolChain, Args, CmdArgs);
+ CmdArgs.push_back("-lm");
+ CmdArgs.push_back("-lpthread");
+ }
const char *Exec = Args.MakeArgString(ToolChain.GetLinkerPath());
C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
Exec, CmdArgs, Inputs, Output));
@@ -176,6 +342,10 @@ void aix::Linker::ConstructJob(Compilation &C, const JobAction &JA,
/// AIX - AIX tool chain which can call as(1) and ld(1) directly.
AIX::AIX(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
: ToolChain(D, Triple, Args) {
+ getProgramPaths().push_back(getDriver().getInstalledDir());
+ if (getDriver().getInstalledDir() != getDriver().Dir)
+ getProgramPaths().push_back(getDriver().Dir);
+
ParseInlineAsmUsingAsmParser = Args.hasFlag(
options::OPT_fintegrated_as, options::OPT_fno_integrated_as, true);
getLibraryPaths().push_back(getDriver().SysRoot + "/usr/lib");
@@ -201,11 +371,13 @@ void AIX::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
llvm::StringRef Sysroot = GetHeaderSysroot(DriverArgs);
const Driver &D = getDriver();
- // Add the Clang builtin headers (<resource>/include).
if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) {
SmallString<128> P(D.ResourceDir);
- path::append(P, "/include");
- addSystemInclude(DriverArgs, CC1Args, P.str());
+ // Add the PowerPC intrinsic headers (<resource>/include/ppc_wrappers)
+ path::append(P, "include", "ppc_wrappers");
+ addSystemInclude(DriverArgs, CC1Args, P);
+ // Add the Clang builtin headers (<resource>/include)
+ addSystemInclude(DriverArgs, CC1Args, path::parent_path(P.str()));
}
// Return if -nostdlibinc is specified as a driver option.
@@ -218,20 +390,82 @@ void AIX::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
addSystemInclude(DriverArgs, CC1Args, UP.str());
}
+void AIX::AddClangCXXStdlibIncludeArgs(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const {
+
+ if (DriverArgs.hasArg(options::OPT_nostdinc) ||
+ DriverArgs.hasArg(options::OPT_nostdincxx) ||
+ DriverArgs.hasArg(options::OPT_nostdlibinc))
+ return;
+
+ switch (GetCXXStdlibType(DriverArgs)) {
+ case ToolChain::CST_Libstdcxx:
+ llvm::report_fatal_error(
+ "picking up libstdc++ headers is unimplemented on AIX");
+ case ToolChain::CST_Libcxx: {
+ llvm::StringRef Sysroot = GetHeaderSysroot(DriverArgs);
+ SmallString<128> PathCPP(Sysroot);
+ llvm::sys::path::append(PathCPP, "opt/IBM/openxlCSDK", "include", "c++",
+ "v1");
+ addSystemInclude(DriverArgs, CC1Args, PathCPP.str());
+ // Required in order to suppress conflicting C++ overloads in the system
+ // libc headers that were used by XL C++.
+ CC1Args.push_back("-D__LIBC_NO_CPP_MATH_OVERLOADS__");
+ return;
+ }
+ }
+
+ llvm_unreachable("Unexpected C++ library type; only libc++ is supported.");
+}
+
void AIX::AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const {
switch (GetCXXStdlibType(Args)) {
+ case ToolChain::CST_Libstdcxx:
+ llvm::report_fatal_error("linking libstdc++ unimplemented on AIX");
case ToolChain::CST_Libcxx:
CmdArgs.push_back("-lc++");
+ if (Args.hasArg(options::OPT_fexperimental_library))
+ CmdArgs.push_back("-lc++experimental");
CmdArgs.push_back("-lc++abi");
return;
- case ToolChain::CST_Libstdcxx:
- llvm::report_fatal_error("linking libstdc++ unimplemented on AIX");
}
llvm_unreachable("Unexpected C++ library type; only libc++ is supported.");
}
+void AIX::addClangTargetOptions(
+ const llvm::opt::ArgList &Args, llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadingKind) const {
+ Args.AddLastArg(CC1Args, options::OPT_mignore_xcoff_visibility);
+ Args.AddLastArg(CC1Args, options::OPT_mdefault_visibility_export_mapping_EQ);
+ Args.addOptInFlag(CC1Args, options::OPT_mxcoff_roptr, options::OPT_mno_xcoff_roptr);
+
+ if (Args.hasFlag(options::OPT_fxl_pragma_pack,
+ options::OPT_fno_xl_pragma_pack, true))
+ CC1Args.push_back("-fxl-pragma-pack");
+}
+
+void AIX::addProfileRTLibs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const {
+ if (needsProfileRT(Args)) {
+ // Add linker option -u__llvm_profile_runtime to cause runtime
+ // initialization to occur.
+ CmdArgs.push_back(Args.MakeArgString(
+ Twine("-u", llvm::getInstrProfRuntimeHookVarName())));
+
+ if (const auto *A =
+ Args.getLastArgNoClaim(options::OPT_fprofile_update_EQ)) {
+ StringRef Val = A->getValue();
+ if (Val == "atomic" || Val == "prefer-atomic")
+ CmdArgs.push_back("-latomic");
+ }
+ }
+
+ ToolChain::addProfileRTLibs(Args, CmdArgs);
+}
+
ToolChain::CXXStdlibType AIX::GetDefaultCXXStdlibType() const {
return ToolChain::CST_Libcxx;
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/AIX.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/AIX.h
index d1ec6d10fb3a..755d87e07ec5 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/AIX.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/AIX.h
@@ -16,10 +16,10 @@ namespace clang {
namespace driver {
namespace tools {
-/// aix -- Directly call system default assembler and linker.
+/// Directly call system default assembler and linker.
namespace aix {
-class LLVM_LIBRARY_VISIBILITY Assembler : public Tool {
+class LLVM_LIBRARY_VISIBILITY Assembler final : public Tool {
public:
Assembler(const ToolChain &TC) : Tool("aix::Assembler", "assembler", TC) {}
@@ -31,7 +31,7 @@ public:
const char *LinkingOutput) const override;
};
-class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
+class LLVM_LIBRARY_VISIBILITY Linker final : public Tool {
public:
Linker(const ToolChain &TC) : Tool("aix::Linker", "linker", TC) {}
@@ -63,16 +63,30 @@ public:
return ParseInlineAsmUsingAsmParser;
}
bool isPICDefault() const override { return true; }
- bool isPIEDefault() const override { return false; }
+ bool isPIEDefault(const llvm::opt::ArgList &Args) const override {
+ return false;
+ }
bool isPICDefaultForced() const override { return true; }
+ bool HasNativeLLVMSupport() const override { return true; }
void
AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
+ void AddClangCXXStdlibIncludeArgs(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+
void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const override;
+ void addClangTargetOptions(
+ const llvm::opt::ArgList &Args, llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadingKind) const override;
+
+ void addProfileRTLibs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const override;
+
CXXStdlibType GetDefaultCXXStdlibType() const override;
RuntimeLibType GetDefaultRuntimeLibType() const override;
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.cpp
index 4a7413112b55..b3c9d5908654 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.cpp
@@ -9,20 +9,22 @@
#include "AMDGPU.h"
#include "CommonArgs.h"
#include "clang/Basic/TargetID.h"
+#include "clang/Config/config.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/InputInfo.h"
#include "clang/Driver/Options.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Support/Error.h"
-#include "llvm/Support/FileUtilities.h"
#include "llvm/Support/LineIterator.h"
#include "llvm/Support/Path.h"
+#include "llvm/Support/Process.h"
#include "llvm/Support/VirtualFileSystem.h"
+#include "llvm/TargetParser/Host.h"
+#include <optional>
#include <system_error>
-#define AMDGPU_ARCH_PROGRAM_NAME "amdgpu-arch"
-
using namespace clang::driver;
using namespace clang::driver::tools;
using namespace clang::driver::toolchains;
@@ -47,7 +49,7 @@ RocmInstallationDetector::findSPACKPackage(const Candidate &Cand,
FileEnd;
File != FileEnd && !EC; File.increment(EC)) {
llvm::StringRef FileName = llvm::sys::path::filename(File->path());
- if (FileName.startswith(Prefix)) {
+ if (FileName.starts_with(Prefix)) {
SubDirs.push_back(FileName);
if (SubDirs.size() > 1)
break;
@@ -82,15 +84,16 @@ void RocmInstallationDetector::scanLibDevicePath(llvm::StringRef Path) {
!EC && LI != LE; LI = LI.increment(EC)) {
StringRef FilePath = LI->path();
StringRef FileName = llvm::sys::path::filename(FilePath);
- if (!FileName.endswith(Suffix))
+ if (!FileName.ends_with(Suffix))
continue;
StringRef BaseName;
- if (FileName.endswith(Suffix2))
+ if (FileName.ends_with(Suffix2))
BaseName = FileName.drop_back(Suffix2.size());
- else if (FileName.endswith(Suffix))
+ else if (FileName.ends_with(Suffix))
BaseName = FileName.drop_back(Suffix.size());
+ const StringRef ABIVersionPrefix = "oclc_abi_version_";
if (BaseName == "ocml") {
OCML = FilePath;
} else if (BaseName == "ockl") {
@@ -121,11 +124,17 @@ void RocmInstallationDetector::scanLibDevicePath(llvm::StringRef Path) {
WavefrontSize64.On = FilePath;
} else if (BaseName == "oclc_wavefrontsize64_off") {
WavefrontSize64.Off = FilePath;
+ } else if (BaseName.starts_with(ABIVersionPrefix)) {
+ unsigned ABIVersionNumber;
+ if (BaseName.drop_front(ABIVersionPrefix.size())
+ .getAsInteger(/*Redex=*/0, ABIVersionNumber))
+ continue;
+ ABIVersionMap[ABIVersionNumber] = FilePath.str();
} else {
// Process all bitcode filenames that look like
// ocl_isa_version_XXX.amdgcn.bc
const StringRef DeviceLibPrefix = "oclc_isa_version_";
- if (!BaseName.startswith(DeviceLibPrefix))
+ if (!BaseName.starts_with(DeviceLibPrefix))
continue;
StringRef IsaVersionNumber =
@@ -190,9 +199,10 @@ RocmInstallationDetector::getInstallationPathCandidates() {
ROCmSearchDirs.emplace_back(RocmPathArg.str());
DoPrintROCmSearchDirs();
return ROCmSearchDirs;
- } else if (const char *RocmPathEnv = ::getenv("ROCM_PATH")) {
- if (!StringRef(RocmPathEnv).empty()) {
- ROCmSearchDirs.emplace_back(RocmPathEnv);
+ } else if (std::optional<std::string> RocmPathEnv =
+ llvm::sys::Process::GetEnv("ROCM_PATH")) {
+ if (!RocmPathEnv->empty()) {
+ ROCmSearchDirs.emplace_back(std::move(*RocmPathEnv));
DoPrintROCmSearchDirs();
return ROCmSearchDirs;
}
@@ -220,7 +230,7 @@ RocmInstallationDetector::getInstallationPathCandidates() {
// <rocm_root>/llvm-amdgpu-<rocm_release_string>-<hash>/bin directory.
// We only consider the parent directory of llvm-amdgpu package as ROCm
// installation candidate for SPACK.
- if (ParentName.startswith("llvm-amdgpu-")) {
+ if (ParentName.starts_with("llvm-amdgpu-")) {
auto SPACKPostfix =
ParentName.drop_front(strlen("llvm-amdgpu-")).split('-');
auto SPACKReleaseStr = SPACKPostfix.first;
@@ -233,7 +243,7 @@ RocmInstallationDetector::getInstallationPathCandidates() {
// Some versions of the rocm llvm package install to /opt/rocm/llvm/bin
// Some versions of the aomp package install to /opt/rocm/aomp/bin
- if (ParentName == "llvm" || ParentName.startswith("aomp"))
+ if (ParentName == "llvm" || ParentName.starts_with("aomp"))
ParentDir = llvm::sys::path::parent_path(ParentDir);
return Candidate(ParentDir.str(), /*StrictChecking=*/true);
@@ -282,7 +292,7 @@ RocmInstallationDetector::getInstallationPathCandidates() {
FileEnd;
File != FileEnd && !EC; File.increment(EC)) {
llvm::StringRef FileName = llvm::sys::path::filename(File->path());
- if (!FileName.startswith("rocm-"))
+ if (!FileName.starts_with("rocm-"))
continue;
if (LatestROCm.empty()) {
LatestROCm = FileName.str();
@@ -299,6 +309,11 @@ RocmInstallationDetector::getInstallationPathCandidates() {
ROCmSearchDirs.emplace_back(D.SysRoot + "/opt/" + LatestROCm,
/*StrictChecking=*/true);
+ ROCmSearchDirs.emplace_back(D.SysRoot + "/usr/local",
+ /*StrictChecking=*/true);
+ ROCmSearchDirs.emplace_back(D.SysRoot + "/usr",
+ /*StrictChecking=*/true);
+
DoPrintROCmSearchDirs();
return ROCmSearchDirs;
}
@@ -314,6 +329,20 @@ RocmInstallationDetector::RocmInstallationDetector(
RocmDeviceLibPathArg =
Args.getAllArgValues(clang::driver::options::OPT_rocm_device_lib_path_EQ);
HIPPathArg = Args.getLastArgValue(clang::driver::options::OPT_hip_path_EQ);
+ HIPStdParPathArg =
+ Args.getLastArgValue(clang::driver::options::OPT_hipstdpar_path_EQ);
+ HasHIPStdParLibrary =
+ !HIPStdParPathArg.empty() && D.getVFS().exists(HIPStdParPathArg +
+ "/hipstdpar_lib.hpp");
+ HIPRocThrustPathArg =
+ Args.getLastArgValue(clang::driver::options::OPT_hipstdpar_thrust_path_EQ);
+ HasRocThrustLibrary = !HIPRocThrustPathArg.empty() &&
+ D.getVFS().exists(HIPRocThrustPathArg + "/thrust");
+ HIPRocPrimPathArg =
+ Args.getLastArgValue(clang::driver::options::OPT_hipstdpar_prim_path_EQ);
+ HasRocPrimLibrary = !HIPRocPrimPathArg.empty() &&
+ D.getVFS().exists(HIPRocPrimPathArg + "/rocprim");
+
if (auto *A = Args.getLastArg(clang::driver::options::OPT_hip_version_EQ)) {
HIPVersionArg = A->getValue();
unsigned Major = ~0U;
@@ -357,8 +386,9 @@ void RocmInstallationDetector::detectDeviceLibrary() {
if (!RocmDeviceLibPathArg.empty())
LibDevicePath = RocmDeviceLibPathArg[RocmDeviceLibPathArg.size() - 1];
- else if (const char *LibPathEnv = ::getenv("HIP_DEVICE_LIB_PATH"))
- LibDevicePath = LibPathEnv;
+ else if (std::optional<std::string> LibPathEnv =
+ llvm::sys::Process::GetEnv("HIP_DEVICE_LIB_PATH"))
+ LibDevicePath = std::move(*LibPathEnv);
auto &FS = D.getVFS();
if (!LibDevicePath.empty()) {
@@ -373,69 +403,57 @@ void RocmInstallationDetector::detectDeviceLibrary() {
return;
}
- // The install path situation in old versions of ROCm is a real mess, and
- // use a different install layout. Multiple copies of the device libraries
- // exist for each frontend project, and differ depending on which build
- // system produced the packages. Standalone OpenCL builds also have a
- // different directory structure from the ROCm OpenCL package.
- auto &ROCmDirs = getInstallationPathCandidates();
- for (const auto &Candidate : ROCmDirs) {
- auto CandidatePath = Candidate.Path;
-
- // Check device library exists at the given path.
- auto CheckDeviceLib = [&](StringRef Path) {
- bool CheckLibDevice = (!NoBuiltinLibs || Candidate.StrictChecking);
- if (CheckLibDevice && !FS.exists(Path))
- return false;
-
- scanLibDevicePath(Path);
+ // Check device library exists at the given path.
+ auto CheckDeviceLib = [&](StringRef Path, bool StrictChecking) {
+ bool CheckLibDevice = (!NoBuiltinLibs || StrictChecking);
+ if (CheckLibDevice && !FS.exists(Path))
+ return false;
- if (!NoBuiltinLibs) {
- // Check that the required non-target libraries are all available.
- if (!allGenericLibsValid())
- return false;
+ scanLibDevicePath(Path);
- // Check that we have found at least one libdevice that we can link in
- // if -nobuiltinlib hasn't been specified.
- if (LibDeviceMap.empty())
- return false;
- }
- return true;
- };
+ if (!NoBuiltinLibs) {
+ // Check that the required non-target libraries are all available.
+ if (!allGenericLibsValid())
+ return false;
- // The possible structures are:
- // - ${ROCM_ROOT}/amdgcn/bitcode/*
- // - ${ROCM_ROOT}/lib/*
- // - ${ROCM_ROOT}/lib/bitcode/*
- // so try to detect these layouts.
- static constexpr std::array<const char *, 2> SubDirsList[] = {
- {"amdgcn", "bitcode"},
- {"lib", ""},
- {"lib", "bitcode"},
- };
+ // Check that we have found at least one libdevice that we can link in
+ // if -nobuiltinlib hasn't been specified.
+ if (LibDeviceMap.empty())
+ return false;
+ }
+ return true;
+ };
- // Make a path by appending sub-directories to InstallPath.
- auto MakePath = [&](const llvm::ArrayRef<const char *> &SubDirs) {
- auto Path = CandidatePath;
- for (auto SubDir : SubDirs)
- llvm::sys::path::append(Path, SubDir);
- return Path;
- };
+ // Find device libraries in <LLVM_DIR>/lib/clang/<ver>/lib/amdgcn/bitcode
+ LibDevicePath = D.ResourceDir;
+ llvm::sys::path::append(LibDevicePath, CLANG_INSTALL_LIBDIR_BASENAME,
+ "amdgcn", "bitcode");
+ HasDeviceLibrary = CheckDeviceLib(LibDevicePath, true);
+ if (HasDeviceLibrary)
+ return;
- for (auto SubDirs : SubDirsList) {
- LibDevicePath = MakePath(SubDirs);
- HasDeviceLibrary = CheckDeviceLib(LibDevicePath);
- if (HasDeviceLibrary)
- return;
- }
+ // Find device libraries in a legacy ROCm directory structure
+ // ${ROCM_ROOT}/amdgcn/bitcode/*
+ auto &ROCmDirs = getInstallationPathCandidates();
+ for (const auto &Candidate : ROCmDirs) {
+ LibDevicePath = Candidate.Path;
+ llvm::sys::path::append(LibDevicePath, "amdgcn", "bitcode");
+ HasDeviceLibrary = CheckDeviceLib(LibDevicePath, Candidate.StrictChecking);
+ if (HasDeviceLibrary)
+ return;
}
}
void RocmInstallationDetector::detectHIPRuntime() {
SmallVector<Candidate, 4> HIPSearchDirs;
if (!HIPPathArg.empty())
- HIPSearchDirs.emplace_back(HIPPathArg.str(), /*StrictChecking=*/true);
- else
+ HIPSearchDirs.emplace_back(HIPPathArg.str());
+ else if (std::optional<std::string> HIPPathEnv =
+ llvm::sys::Process::GetEnv("HIP_PATH")) {
+ if (!HIPPathEnv->empty())
+ HIPSearchDirs.emplace_back(std::move(*HIPPathEnv));
+ }
+ if (HIPSearchDirs.empty())
HIPSearchDirs.append(getInstallationPathCandidates());
auto &FS = D.getVFS();
@@ -454,18 +472,41 @@ void RocmInstallationDetector::detectHIPRuntime() {
llvm::sys::path::append(IncludePath, "include");
LibPath = InstallPath;
llvm::sys::path::append(LibPath, "lib");
-
- llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> VersionFile =
- FS.getBufferForFile(BinPath + "/.hipVersion");
- if (!VersionFile && Candidate.StrictChecking)
- continue;
-
- if (HIPVersionArg.empty() && VersionFile)
- if (parseHIPVersionFile((*VersionFile)->getBuffer()))
+ SharePath = InstallPath;
+ llvm::sys::path::append(SharePath, "share");
+
+ // Get parent of InstallPath and append "share"
+ SmallString<0> ParentSharePath = llvm::sys::path::parent_path(InstallPath);
+ llvm::sys::path::append(ParentSharePath, "share");
+
+ auto Append = [](SmallString<0> &path, const Twine &a, const Twine &b = "",
+ const Twine &c = "", const Twine &d = "") {
+ SmallString<0> newpath = path;
+ llvm::sys::path::append(newpath, a, b, c, d);
+ return newpath;
+ };
+ // If HIP version file can be found and parsed, use HIP version from there.
+ for (const auto &VersionFilePath :
+ {Append(SharePath, "hip", "version"),
+ Append(ParentSharePath, "hip", "version"),
+ Append(BinPath, ".hipVersion")}) {
+ llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> VersionFile =
+ FS.getBufferForFile(VersionFilePath);
+ if (!VersionFile)
continue;
+ if (HIPVersionArg.empty() && VersionFile)
+ if (parseHIPVersionFile((*VersionFile)->getBuffer()))
+ continue;
- HasHIPRuntime = true;
- return;
+ HasHIPRuntime = true;
+ return;
+ }
+ // Otherwise, if -rocm-path is specified (no strict checking), use the
+ // default HIP version or specified by --hip-version.
+ if (!Candidate.StrictChecking) {
+ HasHIPRuntime = true;
+ return;
+ }
}
HasHIPRuntime = false;
}
@@ -478,7 +519,9 @@ void RocmInstallationDetector::print(raw_ostream &OS) const {
void RocmInstallationDetector::AddHIPIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
- bool UsesRuntimeWrapper = VersionMajorMinor > llvm::VersionTuple(3, 5);
+ bool UsesRuntimeWrapper = VersionMajorMinor > llvm::VersionTuple(3, 5) &&
+ !DriverArgs.hasArg(options::OPT_nohipwrapperinc);
+ bool HasHipStdPar = DriverArgs.hasArg(options::OPT_hipstdpar);
if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) {
// HIP header includes standard library wrapper headers under clang
@@ -501,18 +544,66 @@ void RocmInstallationDetector::AddHIPIncludeArgs(const ArgList &DriverArgs,
CC1Args.push_back(DriverArgs.MakeArgString(P));
}
- if (DriverArgs.hasArg(options::OPT_nogpuinc))
+ const auto HandleHipStdPar = [=, &DriverArgs, &CC1Args]() {
+ StringRef Inc = getIncludePath();
+ auto &FS = D.getVFS();
+
+ if (!hasHIPStdParLibrary())
+ if (!HIPStdParPathArg.empty() ||
+ !FS.exists(Inc + "/thrust/system/hip/hipstdpar/hipstdpar_lib.hpp")) {
+ D.Diag(diag::err_drv_no_hipstdpar_lib);
+ return;
+ }
+ if (!HasRocThrustLibrary && !FS.exists(Inc + "/thrust")) {
+ D.Diag(diag::err_drv_no_hipstdpar_thrust_lib);
+ return;
+ }
+ if (!HasRocPrimLibrary && !FS.exists(Inc + "/rocprim")) {
+ D.Diag(diag::err_drv_no_hipstdpar_prim_lib);
+ return;
+ }
+ const char *ThrustPath;
+ if (HasRocThrustLibrary)
+ ThrustPath = DriverArgs.MakeArgString(HIPRocThrustPathArg);
+ else
+ ThrustPath = DriverArgs.MakeArgString(Inc + "/thrust");
+
+ const char *HIPStdParPath;
+ if (hasHIPStdParLibrary())
+ HIPStdParPath = DriverArgs.MakeArgString(HIPStdParPathArg);
+ else
+ HIPStdParPath = DriverArgs.MakeArgString(StringRef(ThrustPath) +
+ "/system/hip/hipstdpar");
+
+ const char *PrimPath;
+ if (HasRocPrimLibrary)
+ PrimPath = DriverArgs.MakeArgString(HIPRocPrimPathArg);
+ else
+ PrimPath = DriverArgs.MakeArgString(getIncludePath() + "/rocprim");
+
+ CC1Args.append({"-idirafter", ThrustPath, "-idirafter", PrimPath,
+ "-idirafter", HIPStdParPath, "-include",
+ "hipstdpar_lib.hpp"});
+ };
+
+ if (DriverArgs.hasArg(options::OPT_nogpuinc)) {
+ if (HasHipStdPar)
+ HandleHipStdPar();
+
return;
+ }
if (!hasHIPRuntime()) {
D.Diag(diag::err_drv_no_hip_runtime);
return;
}
- CC1Args.push_back("-internal-isystem");
+ CC1Args.push_back("-idirafter");
CC1Args.push_back(DriverArgs.MakeArgString(getIncludePath()));
if (UsesRuntimeWrapper)
CC1Args.append({"-include", "__clang_hip_runtime_wrapper.h"});
+ if (HasHipStdPar)
+ HandleHipStdPar();
}
void amdgpu::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -523,9 +614,18 @@ void amdgpu::Linker::ConstructJob(Compilation &C, const JobAction &JA,
std::string Linker = getToolChain().GetProgramPath(getShortName());
ArgStringList CmdArgs;
+ CmdArgs.push_back("--no-undefined");
+ CmdArgs.push_back("-shared");
+
addLinkerCompressDebugSectionsOption(getToolChain(), Args, CmdArgs);
+ Args.AddAllArgs(CmdArgs, options::OPT_L);
AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs, JA);
- CmdArgs.push_back("-shared");
+ if (C.getDriver().isUsingLTO())
+ addLTOOptions(getToolChain(), Args, CmdArgs, Output, Inputs[0],
+ C.getDriver().getLTOMode() == LTOK_Thin);
+ else if (Args.hasArg(options::OPT_mcpu_EQ))
+ CmdArgs.push_back(Args.MakeArgString(
+ "-plugin-opt=mcpu=" + Args.getLastArgValue(options::OPT_mcpu_EQ)));
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
C.addCommand(std::make_unique<Command>(
@@ -544,7 +644,7 @@ void amdgpu::getAMDGPUTargetFeatures(const Driver &D,
llvm::StringMap<bool> FeatureMap;
auto OptionalGpuArch = parseTargetID(Triple, TargetID, &FeatureMap);
if (OptionalGpuArch) {
- StringRef GpuArch = OptionalGpuArch.getValue();
+ StringRef GpuArch = *OptionalGpuArch;
// Iterate through all possible target ID features for the given GPU.
// If it is mapped to true, add +feature.
// If it is mapped to false, add -feature.
@@ -563,8 +663,8 @@ void amdgpu::getAMDGPUTargetFeatures(const Driver &D,
options::OPT_mno_wavefrontsize64, false))
Features.push_back("+wavefrontsize64");
- handleTargetFeaturesGroup(
- Args, Features, options::OPT_m_amdgpu_Features_Group);
+ handleTargetFeaturesGroup(D, Triple, Args, Features,
+ options::OPT_m_amdgpu_Features_Group);
}
/// AMDGPU Toolchain
@@ -596,9 +696,28 @@ AMDGPUToolChain::TranslateArgs(const DerivedArgList &Args, StringRef BoundArch,
if (!DAL)
DAL = new DerivedArgList(Args.getBaseArgs());
- for (Arg *A : Args) {
- if (!shouldSkipArgument(A))
- DAL->append(A);
+ for (Arg *A : Args)
+ DAL->append(A);
+
+ // Replace -mcpu=native with detected GPU.
+ Arg *LastMCPUArg = DAL->getLastArg(options::OPT_mcpu_EQ);
+ if (LastMCPUArg && StringRef(LastMCPUArg->getValue()) == "native") {
+ DAL->eraseArg(options::OPT_mcpu_EQ);
+ auto GPUsOrErr = getSystemGPUArchs(Args);
+ if (!GPUsOrErr) {
+ getDriver().Diag(diag::err_drv_undetermined_gpu_arch)
+ << llvm::Triple::getArchTypeName(getArch())
+ << llvm::toString(GPUsOrErr.takeError()) << "-mcpu";
+ } else {
+ auto &GPUs = *GPUsOrErr;
+ if (GPUs.size() > 1) {
+ getDriver().Diag(diag::warn_drv_multi_gpu_arch)
+ << llvm::Triple::getArchTypeName(getArch())
+ << llvm::join(GPUs, ", ") << "-mcpu";
+ }
+ DAL->AddJoinedArg(nullptr, Opts.getOption(options::OPT_mcpu_EQ),
+ Args.MakeArgString(GPUs.front()));
+ }
}
checkTargetID(*DAL);
@@ -688,7 +807,7 @@ bool AMDGPUToolChain::isWave64(const llvm::opt::ArgList &DriverArgs,
ROCMToolChain::ROCMToolChain(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args)
: AMDGPUToolChain(D, Triple, Args) {
- RocmInstallation.detectDeviceLibrary();
+ RocmInstallation->detectDeviceLibrary();
}
void AMDGPUToolChain::addClangTargetOptions(
@@ -699,8 +818,7 @@ void AMDGPUToolChain::addClangTargetOptions(
// supported for the foreseeable future.
if (!DriverArgs.hasArg(options::OPT_fvisibility_EQ,
options::OPT_fvisibility_ms_compat)) {
- CC1Args.push_back("-fvisibility");
- CC1Args.push_back("hidden");
+ CC1Args.push_back("-fvisibility=hidden");
CC1Args.push_back("-fapply-global-visibility-to-externs");
}
}
@@ -715,14 +833,14 @@ AMDGPUToolChain::ParsedTargetIDType
AMDGPUToolChain::getParsedTargetID(const llvm::opt::ArgList &DriverArgs) const {
StringRef TargetID = DriverArgs.getLastArgValue(options::OPT_mcpu_EQ);
if (TargetID.empty())
- return {None, None, None};
+ return {std::nullopt, std::nullopt, std::nullopt};
llvm::StringMap<bool> FeatureMap;
auto OptionalGpuArch = parseTargetID(getTriple(), TargetID, &FeatureMap);
if (!OptionalGpuArch)
- return {TargetID.str(), None, None};
+ return {TargetID.str(), std::nullopt, std::nullopt};
- return {TargetID.str(), OptionalGpuArch.getValue().str(), FeatureMap};
+ return {TargetID.str(), OptionalGpuArch->str(), FeatureMap};
}
void AMDGPUToolChain::checkTargetID(
@@ -730,80 +848,33 @@ void AMDGPUToolChain::checkTargetID(
auto PTID = getParsedTargetID(DriverArgs);
if (PTID.OptionalTargetID && !PTID.OptionalGPUArch) {
getDriver().Diag(clang::diag::err_drv_bad_target_id)
- << PTID.OptionalTargetID.getValue();
+ << *PTID.OptionalTargetID;
}
}
-llvm::Error
-AMDGPUToolChain::detectSystemGPUs(const ArgList &Args,
- SmallVector<std::string, 1> &GPUArchs) const {
+Expected<SmallVector<std::string>>
+AMDGPUToolChain::getSystemGPUArchs(const ArgList &Args) const {
+ // Detect AMD GPUs availible on the system.
std::string Program;
if (Arg *A = Args.getLastArg(options::OPT_amdgpu_arch_tool_EQ))
Program = A->getValue();
else
- Program = GetProgramPath(AMDGPU_ARCH_PROGRAM_NAME);
- llvm::SmallString<64> OutputFile;
- llvm::sys::fs::createTemporaryFile("print-system-gpus", "" /* No Suffix */,
- OutputFile);
- llvm::FileRemover OutputRemover(OutputFile.c_str());
- llvm::Optional<llvm::StringRef> Redirects[] = {
- {""},
- OutputFile.str(),
- {""},
- };
-
- std::string ErrorMessage;
- if (int Result = llvm::sys::ExecuteAndWait(
- Program.c_str(), {}, {}, Redirects, /* SecondsToWait */ 0,
- /*MemoryLimit*/ 0, &ErrorMessage)) {
- if (Result > 0) {
- ErrorMessage = "Exited with error code " + std::to_string(Result);
- } else if (Result == -1) {
- ErrorMessage = "Execute failed: " + ErrorMessage;
- } else {
- ErrorMessage = "Crashed: " + ErrorMessage;
- }
+ Program = GetProgramPath("amdgpu-arch");
- return llvm::createStringError(std::error_code(),
- Program + ": " + ErrorMessage);
- }
-
- llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> OutputBuf =
- llvm::MemoryBuffer::getFile(OutputFile.c_str());
- if (!OutputBuf) {
- return llvm::createStringError(OutputBuf.getError(),
- "Failed to read stdout of " + Program +
- ": " + OutputBuf.getError().message());
- }
-
- for (llvm::line_iterator LineIt(**OutputBuf); !LineIt.is_at_end(); ++LineIt) {
- GPUArchs.push_back(LineIt->str());
- }
- return llvm::Error::success();
-}
+ auto StdoutOrErr = executeToolChainProgram(Program);
+ if (!StdoutOrErr)
+ return StdoutOrErr.takeError();
-llvm::Error AMDGPUToolChain::getSystemGPUArch(const ArgList &Args,
- std::string &GPUArch) const {
- // detect the AMDGPU installed in system
SmallVector<std::string, 1> GPUArchs;
- auto Err = detectSystemGPUs(Args, GPUArchs);
- if (Err) {
- return Err;
- }
- if (GPUArchs.empty()) {
+ for (StringRef Arch : llvm::split((*StdoutOrErr)->getBuffer(), "\n"))
+ if (!Arch.empty())
+ GPUArchs.push_back(Arch.str());
+
+ if (GPUArchs.empty())
return llvm::createStringError(std::error_code(),
"No AMD GPU detected in the system");
- }
- GPUArch = GPUArchs[0];
- if (GPUArchs.size() > 1) {
- bool AllSame = std::all_of(
- GPUArchs.begin(), GPUArchs.end(),
- [&](const StringRef &GPUArch) { return GPUArch == GPUArchs.front(); });
- if (!AllSame)
- return llvm::createStringError(
- std::error_code(), "Multiple AMD GPUs found with different archs");
- }
- return llvm::Error::success();
+
+ return std::move(GPUArchs);
}
void ROCMToolChain::addClangTargetOptions(
@@ -821,20 +892,16 @@ void ROCMToolChain::addClangTargetOptions(
if (DriverArgs.hasArg(options::OPT_nogpulib))
return;
- if (!RocmInstallation.hasDeviceLibrary()) {
- getDriver().Diag(diag::err_drv_no_rocm_device_lib) << 0;
- return;
- }
-
// Get the device name and canonicalize it
const StringRef GpuArch = getGPUArch(DriverArgs);
auto Kind = llvm::AMDGPU::parseArchAMDGCN(GpuArch);
const StringRef CanonArch = llvm::AMDGPU::getArchNameAMDGCN(Kind);
- std::string LibDeviceFile = RocmInstallation.getLibDeviceFile(CanonArch);
- if (LibDeviceFile.empty()) {
- getDriver().Diag(diag::err_drv_no_rocm_device_lib) << 1 << GpuArch;
+ StringRef LibDeviceFile = RocmInstallation->getLibDeviceFile(CanonArch);
+ auto ABIVer = DeviceLibABIVersion::fromCodeObjectVersion(
+ getAMDGPUCodeObjectVersion(getDriver(), DriverArgs));
+ if (!RocmInstallation->checkCommonBitcodeLibs(CanonArch, LibDeviceFile,
+ ABIVer))
return;
- }
bool Wave64 = isWave64(DriverArgs, Kind);
@@ -852,59 +919,75 @@ void ROCMToolChain::addClangTargetOptions(
// Add the OpenCL specific bitcode library.
llvm::SmallVector<std::string, 12> BCLibs;
- BCLibs.push_back(RocmInstallation.getOpenCLPath().str());
+ BCLibs.push_back(RocmInstallation->getOpenCLPath().str());
// Add the generic set of libraries.
- BCLibs.append(RocmInstallation.getCommonBitcodeLibs(
+ BCLibs.append(RocmInstallation->getCommonBitcodeLibs(
DriverArgs, LibDeviceFile, Wave64, DAZ, FiniteOnly, UnsafeMathOpt,
- FastRelaxedMath, CorrectSqrt));
+ FastRelaxedMath, CorrectSqrt, ABIVer, false));
- llvm::for_each(BCLibs, [&](StringRef BCFile) {
+ for (StringRef BCFile : BCLibs) {
CC1Args.push_back("-mlink-builtin-bitcode");
CC1Args.push_back(DriverArgs.MakeArgString(BCFile));
- });
+ }
+}
+
+bool RocmInstallationDetector::checkCommonBitcodeLibs(
+ StringRef GPUArch, StringRef LibDeviceFile,
+ DeviceLibABIVersion ABIVer) const {
+ if (!hasDeviceLibrary()) {
+ D.Diag(diag::err_drv_no_rocm_device_lib) << 0;
+ return false;
+ }
+ if (LibDeviceFile.empty()) {
+ D.Diag(diag::err_drv_no_rocm_device_lib) << 1 << GPUArch;
+ return false;
+ }
+ if (ABIVer.requiresLibrary() && getABIVersionPath(ABIVer).empty()) {
+ D.Diag(diag::err_drv_no_rocm_device_lib) << 2 << ABIVer.toString();
+ return false;
+ }
+ return true;
}
llvm::SmallVector<std::string, 12>
RocmInstallationDetector::getCommonBitcodeLibs(
const llvm::opt::ArgList &DriverArgs, StringRef LibDeviceFile, bool Wave64,
bool DAZ, bool FiniteOnly, bool UnsafeMathOpt, bool FastRelaxedMath,
- bool CorrectSqrt) const {
-
+ bool CorrectSqrt, DeviceLibABIVersion ABIVer, bool isOpenMP = false) const {
llvm::SmallVector<std::string, 12> BCLibs;
auto AddBCLib = [&](StringRef BCFile) { BCLibs.push_back(BCFile.str()); };
AddBCLib(getOCMLPath());
- AddBCLib(getOCKLPath());
+ if (!isOpenMP)
+ AddBCLib(getOCKLPath());
AddBCLib(getDenormalsAreZeroPath(DAZ));
AddBCLib(getUnsafeMathPath(UnsafeMathOpt || FastRelaxedMath));
AddBCLib(getFiniteOnlyPath(FiniteOnly || FastRelaxedMath));
AddBCLib(getCorrectlyRoundedSqrtPath(CorrectSqrt));
AddBCLib(getWavefrontSize64Path(Wave64));
AddBCLib(LibDeviceFile);
+ auto ABIVerPath = getABIVersionPath(ABIVer);
+ if (!ABIVerPath.empty())
+ AddBCLib(ABIVerPath);
return BCLibs;
}
-bool AMDGPUToolChain::shouldSkipArgument(const llvm::opt::Arg *A) const {
- Option O = A->getOption();
- if (O.matches(options::OPT_fPIE) || O.matches(options::OPT_fpie))
- return true;
- return false;
-}
-
llvm::SmallVector<std::string, 12>
ROCMToolChain::getCommonDeviceLibNames(const llvm::opt::ArgList &DriverArgs,
- const std::string &GPUArch) const {
+ const std::string &GPUArch,
+ bool isOpenMP) const {
auto Kind = llvm::AMDGPU::parseArchAMDGCN(GPUArch);
const StringRef CanonArch = llvm::AMDGPU::getArchNameAMDGCN(Kind);
- std::string LibDeviceFile = RocmInstallation.getLibDeviceFile(CanonArch);
- if (LibDeviceFile.empty()) {
- getDriver().Diag(diag::err_drv_no_rocm_device_lib) << 1 << GPUArch;
+ StringRef LibDeviceFile = RocmInstallation->getLibDeviceFile(CanonArch);
+ auto ABIVer = DeviceLibABIVersion::fromCodeObjectVersion(
+ getAMDGPUCodeObjectVersion(getDriver(), DriverArgs));
+ if (!RocmInstallation->checkCommonBitcodeLibs(CanonArch, LibDeviceFile,
+ ABIVer))
return {};
- }
// If --hip-device-lib is not set, add the default bitcode libraries.
// TODO: There are way too many flags that change this. Do we need to check
@@ -921,10 +1004,10 @@ ROCMToolChain::getCommonDeviceLibNames(const llvm::opt::ArgList &DriverArgs,
options::OPT_fno_fast_math, false);
bool CorrectSqrt = DriverArgs.hasFlag(
options::OPT_fhip_fp32_correctly_rounded_divide_sqrt,
- options::OPT_fno_hip_fp32_correctly_rounded_divide_sqrt);
+ options::OPT_fno_hip_fp32_correctly_rounded_divide_sqrt, true);
bool Wave64 = isWave64(DriverArgs, Kind);
- return RocmInstallation.getCommonBitcodeLibs(
+ return RocmInstallation->getCommonBitcodeLibs(
DriverArgs, LibDeviceFile, Wave64, DAZ, FiniteOnly, UnsafeMathOpt,
- FastRelaxedMath, CorrectSqrt);
-} \ No newline at end of file
+ FastRelaxedMath, CorrectSqrt, ABIVer, isOpenMP);
+}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.h
index a4bcf315ca76..b3361b1e3607 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.h
@@ -16,7 +16,7 @@
#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
#include "llvm/ADT/SmallString.h"
-#include "llvm/Support/TargetParser.h"
+#include "llvm/TargetParser/TargetParser.h"
#include <map>
@@ -26,7 +26,7 @@ namespace driver {
namespace tools {
namespace amdgpu {
-class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
+class LLVM_LIBRARY_VISIBILITY Linker final : public Tool {
public:
Linker(const ToolChain &TC) : Tool("amdgpu::Linker", "ld.lld", TC) {}
bool isLinkJob() const override { return true; }
@@ -51,7 +51,7 @@ protected:
const std::map<options::ID, const StringRef> OptionsDefault;
Tool *buildLinker() const override;
- const StringRef getOptionDefault(options::ID OptID) const {
+ StringRef getOptionDefault(options::ID OptID) const {
auto opt = OptionsDefault.find(OptID);
assert(opt != OptionsDefault.end() && "No Default for Option");
return opt->second;
@@ -60,15 +60,15 @@ protected:
public:
AMDGPUToolChain(const Driver &D, const llvm::Triple &Triple,
const llvm::opt::ArgList &Args);
- unsigned GetDefaultDwarfVersion() const override { return 4; }
- bool IsIntegratedAssemblerDefault() const override { return true; }
- bool IsMathErrnoDefault() const override { return false; }
+ unsigned GetDefaultDwarfVersion() const override { return 5; }
- bool useIntegratedAs() const override { return true; }
+ bool IsMathErrnoDefault() const override { return false; }
bool isCrossCompiling() const override { return true; }
- bool isPICDefault() const override { return false; }
- bool isPIEDefault() const override { return false; }
- bool isPICDefaultForced() const override { return false; }
+ bool isPICDefault() const override { return true; }
+ bool isPIEDefault(const llvm::opt::ArgList &Args) const override {
+ return false;
+ }
+ bool isPICDefaultForced() const override { return true; }
bool SupportsProfiling() const override { return false; }
llvm::opt::DerivedArgList *
@@ -97,13 +97,10 @@ public:
/// Needed for translating LTO options.
const char *getDefaultLinker() const override { return "ld.lld"; }
- /// Should skip argument.
- bool shouldSkipArgument(const llvm::opt::Arg *Arg) const;
-
- /// Uses amdgpu_arch tool to get arch of the system GPU. Will return error
+ /// Uses amdgpu-arch tool to get arch of the system GPU. Will return error
/// if unable to find one.
- llvm::Error getSystemGPUArch(const llvm::opt::ArgList &Args,
- std::string &GPUArch) const;
+ virtual Expected<SmallVector<std::string>>
+ getSystemGPUArchs(const llvm::opt::ArgList &Args) const override;
protected:
/// Check and diagnose invalid target ID specified by -mcpu.
@@ -111,9 +108,9 @@ protected:
/// The struct type returned by getParsedTargetID.
struct ParsedTargetIDType {
- Optional<std::string> OptionalTargetID;
- Optional<std::string> OptionalGPUArch;
- Optional<llvm::StringMap<bool>> OptionalFeatures;
+ std::optional<std::string> OptionalTargetID;
+ std::optional<std::string> OptionalGPUArch;
+ std::optional<llvm::StringMap<bool>> OptionalFeatures;
};
/// Get target ID, GPU arch, and target ID features if the target ID is
@@ -124,8 +121,6 @@ protected:
/// Get GPU arch from -mcpu without checking.
StringRef getGPUArch(const llvm::opt::ArgList &DriverArgs) const;
- llvm::Error detectSystemGPUs(const llvm::opt::ArgList &Args,
- SmallVector<std::string, 1> &GPUArchs) const;
};
class LLVM_LIBRARY_VISIBILITY ROCMToolChain : public AMDGPUToolChain {
@@ -140,7 +135,8 @@ public:
// Returns a list of device library names shared by different languages
llvm::SmallVector<std::string, 12>
getCommonDeviceLibNames(const llvm::opt::ArgList &DriverArgs,
- const std::string &GPUArch) const;
+ const std::string &GPUArch,
+ bool isOpenMP = false) const;
};
} // end namespace toolchains
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPUOpenMP.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPUOpenMP.cpp
index 135e3694434d..b012b7cb7293 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPUOpenMP.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPUOpenMP.cpp
@@ -16,6 +16,7 @@
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/InputInfo.h"
#include "clang/Driver/Options.h"
+#include "clang/Driver/Tool.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/FormatAdapters.h"
@@ -28,199 +29,13 @@ using namespace clang::driver::tools;
using namespace clang;
using namespace llvm::opt;
-namespace {
-
-static const char *getOutputFileName(Compilation &C, StringRef Base,
- const char *Postfix,
- const char *Extension) {
- const char *OutputFileName;
- if (C.getDriver().isSaveTempsEnabled()) {
- OutputFileName =
- C.getArgs().MakeArgString(Base.str() + Postfix + "." + Extension);
- } else {
- std::string TmpName =
- C.getDriver().GetTemporaryPath(Base.str() + Postfix, Extension);
- OutputFileName = C.addTempFile(C.getArgs().MakeArgString(TmpName));
- }
- return OutputFileName;
-}
-
-static void addLLCOptArg(const llvm::opt::ArgList &Args,
- llvm::opt::ArgStringList &CmdArgs) {
- if (Arg *A = Args.getLastArg(options::OPT_O_Group)) {
- StringRef OOpt = "0";
- if (A->getOption().matches(options::OPT_O4) ||
- A->getOption().matches(options::OPT_Ofast))
- OOpt = "3";
- else if (A->getOption().matches(options::OPT_O0))
- OOpt = "0";
- else if (A->getOption().matches(options::OPT_O)) {
- // Clang and opt support -Os/-Oz; llc only supports -O0, -O1, -O2 and -O3
- // so we map -Os/-Oz to -O2.
- // Only clang supports -Og, and maps it to -O1.
- // We map anything else to -O2.
- OOpt = llvm::StringSwitch<const char *>(A->getValue())
- .Case("1", "1")
- .Case("2", "2")
- .Case("3", "3")
- .Case("s", "2")
- .Case("z", "2")
- .Case("g", "1")
- .Default("0");
- }
- CmdArgs.push_back(Args.MakeArgString("-O" + OOpt));
- }
-}
-
-static bool checkSystemForAMDGPU(const ArgList &Args, const AMDGPUToolChain &TC,
- std::string &GPUArch) {
- if (auto Err = TC.getSystemGPUArch(Args, GPUArch)) {
- std::string ErrMsg =
- llvm::formatv("{0}", llvm::fmt_consume(std::move(Err)));
- TC.getDriver().Diag(diag::err_drv_undetermined_amdgpu_arch) << ErrMsg;
- return false;
- }
-
- return true;
-}
-} // namespace
-
-const char *AMDGCN::OpenMPLinker::constructLLVMLinkCommand(
- const toolchains::AMDGPUOpenMPToolChain &AMDGPUOpenMPTC, Compilation &C,
- const JobAction &JA, const InputInfoList &Inputs, const ArgList &Args,
- StringRef SubArchName, StringRef OutputFilePrefix) const {
- ArgStringList CmdArgs;
-
- for (const auto &II : Inputs)
- if (II.isFilename())
- CmdArgs.push_back(II.getFilename());
-
- if (Args.hasArg(options::OPT_l)) {
- auto Lm = Args.getAllArgValues(options::OPT_l);
- bool HasLibm = false;
- for (auto &Lib : Lm) {
- if (Lib == "m") {
- HasLibm = true;
- break;
- }
- }
-
- if (HasLibm) {
- SmallVector<std::string, 12> BCLibs =
- AMDGPUOpenMPTC.getCommonDeviceLibNames(Args, SubArchName.str());
- llvm::for_each(BCLibs, [&](StringRef BCFile) {
- CmdArgs.push_back(Args.MakeArgString(BCFile));
- });
- }
- }
-
- // Add an intermediate output file.
- CmdArgs.push_back("-o");
- const char *OutputFileName =
- getOutputFileName(C, OutputFilePrefix, "-linked", "bc");
- CmdArgs.push_back(OutputFileName);
- const char *Exec =
- Args.MakeArgString(getToolChain().GetProgramPath("llvm-link"));
- C.addCommand(std::make_unique<Command>(
- JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs,
- InputInfo(&JA, Args.MakeArgString(OutputFileName))));
- return OutputFileName;
-}
-
-const char *AMDGCN::OpenMPLinker::constructLlcCommand(
- Compilation &C, const JobAction &JA, const InputInfoList &Inputs,
- const llvm::opt::ArgList &Args, llvm::StringRef SubArchName,
- llvm::StringRef OutputFilePrefix, const char *InputFileName,
- bool OutputIsAsm) const {
- // Construct llc command.
- ArgStringList LlcArgs;
- // The input to llc is the output from opt.
- LlcArgs.push_back(InputFileName);
- // Pass optimization arg to llc.
- addLLCOptArg(Args, LlcArgs);
- LlcArgs.push_back("-mtriple=amdgcn-amd-amdhsa");
- LlcArgs.push_back(Args.MakeArgString("-mcpu=" + SubArchName));
- LlcArgs.push_back(
- Args.MakeArgString(Twine("-filetype=") + (OutputIsAsm ? "asm" : "obj")));
-
- for (const Arg *A : Args.filtered(options::OPT_mllvm)) {
- LlcArgs.push_back(A->getValue(0));
- }
-
- // Add output filename
- LlcArgs.push_back("-o");
- const char *LlcOutputFile =
- getOutputFileName(C, OutputFilePrefix, "", OutputIsAsm ? "s" : "o");
- LlcArgs.push_back(LlcOutputFile);
- const char *Llc = Args.MakeArgString(getToolChain().GetProgramPath("llc"));
- C.addCommand(std::make_unique<Command>(
- JA, *this, ResponseFileSupport::AtFileCurCP(), Llc, LlcArgs, Inputs,
- InputInfo(&JA, Args.MakeArgString(LlcOutputFile))));
- return LlcOutputFile;
-}
-
-void AMDGCN::OpenMPLinker::constructLldCommand(
- Compilation &C, const JobAction &JA, const InputInfoList &Inputs,
- const InputInfo &Output, const llvm::opt::ArgList &Args,
- const char *InputFileName) const {
- // Construct lld command.
- // The output from ld.lld is an HSA code object file.
- ArgStringList LldArgs{"-flavor", "gnu", "--no-undefined",
- "-shared", "-o", Output.getFilename(),
- InputFileName};
-
- const char *Lld = Args.MakeArgString(getToolChain().GetProgramPath("lld"));
- C.addCommand(std::make_unique<Command>(
- JA, *this, ResponseFileSupport::AtFileCurCP(), Lld, LldArgs, Inputs,
- InputInfo(&JA, Args.MakeArgString(Output.getFilename()))));
-}
-
-// For amdgcn the inputs of the linker job are device bitcode and output is
-// object file. It calls llvm-link, opt, llc, then lld steps.
-void AMDGCN::OpenMPLinker::ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output,
- const InputInfoList &Inputs,
- const ArgList &Args,
- const char *LinkingOutput) const {
- const ToolChain &TC = getToolChain();
- assert(getToolChain().getTriple().isAMDGCN() && "Unsupported target");
-
- const toolchains::AMDGPUOpenMPToolChain &AMDGPUOpenMPTC =
- static_cast<const toolchains::AMDGPUOpenMPToolChain &>(TC);
-
- std::string GPUArch = Args.getLastArgValue(options::OPT_march_EQ).str();
- if (GPUArch.empty()) {
- if (!checkSystemForAMDGPU(Args, AMDGPUOpenMPTC, GPUArch))
- return;
- }
-
- // Prefix for temporary file name.
- std::string Prefix;
- for (const auto &II : Inputs)
- if (II.isFilename())
- Prefix = llvm::sys::path::stem(II.getFilename()).str() + "-" + GPUArch;
- assert(Prefix.length() && "no linker inputs are files ");
-
- // Each command outputs different files.
- const char *LLVMLinkCommand = constructLLVMLinkCommand(
- AMDGPUOpenMPTC, C, JA, Inputs, Args, GPUArch, Prefix);
-
- // Produce readable assembly if save-temps is enabled.
- if (C.getDriver().isSaveTempsEnabled())
- constructLlcCommand(C, JA, Inputs, Args, GPUArch, Prefix, LLVMLinkCommand,
- /*OutputIsAsm=*/true);
- const char *LlcCommand = constructLlcCommand(C, JA, Inputs, Args, GPUArch,
- Prefix, LLVMLinkCommand);
- constructLldCommand(C, JA, Inputs, Output, Args, LlcCommand);
-}
-
AMDGPUOpenMPToolChain::AMDGPUOpenMPToolChain(const Driver &D,
const llvm::Triple &Triple,
const ToolChain &HostTC,
const ArgList &Args)
: ROCMToolChain(D, Triple, Args), HostTC(HostTC) {
// Lookup binaries into the driver directory, this is used to
- // discover the clang-offload-bundler executable.
+ // discover the 'amdgpu-arch' executable.
getProgramPaths().push_back(getDriver().Dir);
}
@@ -229,11 +44,8 @@ void AMDGPUOpenMPToolChain::addClangTargetOptions(
Action::OffloadKind DeviceOffloadingKind) const {
HostTC.addClangTargetOptions(DriverArgs, CC1Args, DeviceOffloadingKind);
- std::string GPUArch = DriverArgs.getLastArgValue(options::OPT_march_EQ).str();
- if (GPUArch.empty()) {
- if (!checkSystemForAMDGPU(DriverArgs, *this, GPUArch))
- return;
- }
+ StringRef GPUArch = DriverArgs.getLastArgValue(options::OPT_march_EQ);
+ assert(!GPUArch.empty() && "Must have an explicit GPU arch.");
assert(DeviceOffloadingKind == Action::OFK_OpenMP &&
"Only OpenMP offloading kinds are supported.");
@@ -245,15 +57,15 @@ void AMDGPUOpenMPToolChain::addClangTargetOptions(
if (DriverArgs.hasArg(options::OPT_nogpulib))
return;
- std::string BitcodeSuffix;
- if (DriverArgs.hasFlag(options::OPT_fopenmp_target_new_runtime,
- options::OPT_fno_openmp_target_new_runtime, false))
- BitcodeSuffix = "new-amdgcn-" + GPUArch;
- else
- BitcodeSuffix = "amdgcn-" + GPUArch;
+ for (auto BCFile : getDeviceLibs(DriverArgs)) {
+ CC1Args.push_back(BCFile.ShouldInternalize ? "-mlink-builtin-bitcode"
+ : "-mlink-bitcode-file");
+ CC1Args.push_back(DriverArgs.MakeArgString(BCFile.Path));
+ }
- addOpenMPDeviceRTL(getDriver(), DriverArgs, CC1Args, BitcodeSuffix,
- getTriple());
+ // Link the bitcode library late if we're using device LTO.
+ if (getDriver().isUsingLTO(/* IsOffload */ true))
+ return;
}
llvm::opt::DerivedArgList *AMDGPUOpenMPToolChain::TranslateArgs(
@@ -266,10 +78,33 @@ llvm::opt::DerivedArgList *AMDGPUOpenMPToolChain::TranslateArgs(
const OptTable &Opts = getDriver().getOpts();
- if (DeviceOffloadKind != Action::OFK_OpenMP) {
- for (Arg *A : Args) {
- DAL->append(A);
+ if (DeviceOffloadKind == Action::OFK_OpenMP) {
+ for (Arg *A : Args)
+ if (!llvm::is_contained(*DAL, A))
+ DAL->append(A);
+
+ if (!DAL->hasArg(options::OPT_march_EQ)) {
+ StringRef Arch = BoundArch;
+ if (Arch.empty()) {
+ auto ArchsOrErr = getSystemGPUArchs(Args);
+ if (!ArchsOrErr) {
+ std::string ErrMsg =
+ llvm::formatv("{0}", llvm::fmt_consume(ArchsOrErr.takeError()));
+ getDriver().Diag(diag::err_drv_undetermined_gpu_arch)
+ << llvm::Triple::getArchTypeName(getArch()) << ErrMsg << "-march";
+ Arch = CudaArchToString(CudaArch::HIPDefault);
+ } else {
+ Arch = Args.MakeArgString(ArchsOrErr->front());
+ }
+ }
+ DAL->AddJoinedArg(nullptr, Opts.getOption(options::OPT_march_EQ), Arch);
}
+
+ return DAL;
+ }
+
+ for (Arg *A : Args) {
+ DAL->append(A);
}
if (!BoundArch.empty()) {
@@ -281,11 +116,6 @@ llvm::opt::DerivedArgList *AMDGPUOpenMPToolChain::TranslateArgs(
return DAL;
}
-Tool *AMDGPUOpenMPToolChain::buildLinker() const {
- assert(getTriple().isAMDGCN());
- return new tools::AMDGCN::OpenMPLinker(*this);
-}
-
void AMDGPUOpenMPToolChain::addClangWarningOptions(
ArgStringList &CC1Args) const {
HostTC.addClangWarningOptions(CC1Args);
@@ -324,3 +154,24 @@ AMDGPUOpenMPToolChain::computeMSVCVersion(const Driver *D,
const ArgList &Args) const {
return HostTC.computeMSVCVersion(D, Args);
}
+
+llvm::SmallVector<ToolChain::BitCodeLibraryInfo, 12>
+AMDGPUOpenMPToolChain::getDeviceLibs(const llvm::opt::ArgList &Args) const {
+ if (Args.hasArg(options::OPT_nogpulib))
+ return {};
+
+ if (!RocmInstallation->hasDeviceLibrary()) {
+ getDriver().Diag(diag::err_drv_no_rocm_device_lib) << 0;
+ return {};
+ }
+
+ StringRef GpuArch = getProcessorFromTargetID(
+ getTriple(), Args.getLastArgValue(options::OPT_march_EQ));
+
+ SmallVector<BitCodeLibraryInfo, 12> BCLibs;
+ for (auto BCLib : getCommonDeviceLibNames(Args, GpuArch.str(),
+ /*IsOpenMP=*/true))
+ BCLibs.emplace_back(BCLib);
+
+ return BCLibs;
+}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPUOpenMP.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPUOpenMP.h
index 233256bf7378..2be444a42c55 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPUOpenMP.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPUOpenMP.h
@@ -20,49 +20,6 @@ namespace toolchains {
class AMDGPUOpenMPToolChain;
}
-namespace tools {
-
-namespace AMDGCN {
-// Runs llvm-link/opt/llc/lld, which links multiple LLVM bitcode, together with
-// device library, then compiles it to ISA in a shared object.
-class LLVM_LIBRARY_VISIBILITY OpenMPLinker : public Tool {
-public:
- OpenMPLinker(const ToolChain &TC)
- : Tool("AMDGCN::OpenMPLinker", "amdgcn-link", TC) {}
-
- bool hasIntegratedCPP() const override { return false; }
-
- void ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output, const InputInfoList &Inputs,
- const llvm::opt::ArgList &TCArgs,
- const char *LinkingOutput) const override;
-
-private:
- /// \return llvm-link output file name.
- const char *constructLLVMLinkCommand(
- const toolchains::AMDGPUOpenMPToolChain &AMDGPUOpenMPTC, Compilation &C,
- const JobAction &JA, const InputInfoList &Inputs,
- const llvm::opt::ArgList &Args, llvm::StringRef SubArchName,
- llvm::StringRef OutputFilePrefix) const;
-
- /// \return llc output file name.
- const char *constructLlcCommand(Compilation &C, const JobAction &JA,
- const InputInfoList &Inputs,
- const llvm::opt::ArgList &Args,
- llvm::StringRef SubArchName,
- llvm::StringRef OutputFilePrefix,
- const char *InputFileName,
- bool OutputIsAsm = false) const;
-
- void constructLldCommand(Compilation &C, const JobAction &JA,
- const InputInfoList &Inputs, const InputInfo &Output,
- const llvm::opt::ArgList &Args,
- const char *InputFileName) const;
-};
-
-} // end namespace AMDGCN
-} // end namespace tools
-
namespace toolchains {
class LLVM_LIBRARY_VISIBILITY AMDGPUOpenMPToolChain final
@@ -97,10 +54,10 @@ public:
computeMSVCVersion(const Driver *D,
const llvm::opt::ArgList &Args) const override;
- const ToolChain &HostTC;
+ llvm::SmallVector<BitCodeLibraryInfo, 12>
+ getDeviceLibs(const llvm::opt::ArgList &Args) const override;
-protected:
- Tool *buildLinker() const override;
+ const ToolChain &HostTC;
};
} // end namespace toolchains
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/AVR.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/AVR.cpp
index f147292038a8..bb5c0e6db997 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/AVR.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/AVR.cpp
@@ -12,13 +12,12 @@
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/InputInfo.h"
#include "clang/Driver/Options.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/StringExtras.h"
-#include "llvm/ADT/StringSwitch.h"
#include "llvm/MC/MCSubtargetInfo.h"
-#include "llvm/MC/SubtargetFeature.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
+#include "llvm/TargetParser/SubtargetFeature.h"
using namespace clang::driver;
using namespace clang::driver::toolchains;
@@ -28,9 +27,10 @@ using namespace llvm::opt;
namespace {
-const struct {
+// NOTE: This list has been synchronized with gcc-avr 7.3.0 and avr-libc 2.0.0.
+constexpr struct {
StringRef Name;
- std::string SubPath;
+ StringRef SubPath;
StringRef Family;
unsigned DataAddr;
} MCUInfo[] = {
@@ -62,6 +62,7 @@ const struct {
{"attiny261a", "avr25/tiny-stack", "avr25", 0x800060},
{"at86rf401", "avr25", "avr25", 0x800060},
{"ata5272", "avr25", "avr25", 0x800100},
+ {"ata6616c", "avr25", "avr25", 0x800100},
{"attiny4313", "avr25", "avr25", 0x800060},
{"attiny44", "avr25", "avr25", 0x800060},
{"attiny44a", "avr25", "avr25", 0x800060},
@@ -88,6 +89,8 @@ const struct {
{"at90usb82", "avr35", "avr35", 0x800100},
{"at90usb162", "avr35", "avr35", 0x800100},
{"ata5505", "avr35", "avr35", 0x800100},
+ {"ata6617c", "avr35", "avr35", 0x800100},
+ {"ata664251", "avr35", "avr35", 0x800100},
{"atmega8u2", "avr35", "avr35", 0x800100},
{"atmega16u2", "avr35", "avr35", 0x800100},
{"atmega32u2", "avr35", "avr35", 0x800100},
@@ -97,6 +100,7 @@ const struct {
{"atmega8a", "avr4", "avr4", 0x800060},
{"ata6285", "avr4", "avr4", 0x800100},
{"ata6286", "avr4", "avr4", 0x800100},
+ {"ata6612c", "avr4", "avr4", 0x800100},
{"atmega48", "avr4", "avr4", 0x800100},
{"atmega48a", "avr4", "avr4", 0x800100},
{"atmega48pa", "avr4", "avr4", 0x800100},
@@ -116,8 +120,17 @@ const struct {
{"at90pwm3", "avr4", "avr4", 0x800100},
{"at90pwm3b", "avr4", "avr4", 0x800100},
{"at90pwm81", "avr4", "avr4", 0x800100},
+ {"ata5702m322", "avr5", "avr5", 0x800200},
+ {"ata5782", "avr5", "avr5", 0x800200},
{"ata5790", "avr5", "avr5", 0x800100},
+ {"ata5790n", "avr5", "avr5", 0x800100},
+ {"ata5791", "avr5", "avr5", 0x800100},
{"ata5795", "avr5", "avr5", 0x800100},
+ {"ata5831", "avr5", "avr5", 0x800200},
+ {"ata6613c", "avr5", "avr5", 0x800100},
+ {"ata6614q", "avr5", "avr5", 0x800100},
+ {"ata8210", "avr5", "avr5", 0x800200},
+ {"ata8510", "avr5", "avr5", 0x800200},
{"atmega16", "avr5", "avr5", 0x800060},
{"atmega16a", "avr5", "avr5", 0x800060},
{"atmega161", "avr5", "avr5", 0x800060},
@@ -145,6 +158,7 @@ const struct {
{"atmega324a", "avr5", "avr5", 0x800100},
{"atmega324p", "avr5", "avr5", 0x800100},
{"atmega324pa", "avr5", "avr5", 0x800100},
+ {"atmega324pb", "avr5", "avr5", 0x800100},
{"atmega325", "avr5", "avr5", 0x800100},
{"atmega325a", "avr5", "avr5", 0x800100},
{"atmega325p", "avr5", "avr5", 0x800100},
@@ -155,6 +169,7 @@ const struct {
{"atmega3250pa", "avr5", "avr5", 0x800100},
{"atmega328", "avr5", "avr5", 0x800100},
{"atmega328p", "avr5", "avr5", 0x800100},
+ {"atmega328pb", "avr5", "avr5", 0x800100},
{"atmega329", "avr5", "avr5", 0x800100},
{"atmega329a", "avr5", "avr5", 0x800100},
{"atmega329p", "avr5", "avr5", 0x800100},
@@ -192,6 +207,7 @@ const struct {
{"atmega32hvb", "avr5", "avr5", 0x800100},
{"atmega32hvbrevb", "avr5", "avr5", 0x800100},
{"atmega64hve", "avr5", "avr5", 0x800100},
+ {"atmega64hve2", "avr5", "avr5", 0x800100},
{"at90can32", "avr5", "avr5", 0x800100},
{"at90can64", "avr5", "avr5", 0x800100},
{"at90pwm161", "avr5", "avr5", 0x800100},
@@ -232,17 +248,22 @@ const struct {
{"attiny10", "avrtiny", "avrtiny", 0x800040},
{"attiny20", "avrtiny", "avrtiny", 0x800040},
{"attiny40", "avrtiny", "avrtiny", 0x800040},
+ {"attiny102", "avrtiny", "avrtiny", 0x800040},
+ {"attiny104", "avrtiny", "avrtiny", 0x800040},
{"atxmega16a4", "avrxmega2", "avrxmega2", 0x802000},
{"atxmega16a4u", "avrxmega2", "avrxmega2", 0x802000},
{"atxmega16c4", "avrxmega2", "avrxmega2", 0x802000},
{"atxmega16d4", "avrxmega2", "avrxmega2", 0x802000},
{"atxmega32a4", "avrxmega2", "avrxmega2", 0x802000},
{"atxmega32a4u", "avrxmega2", "avrxmega2", 0x802000},
+ {"atxmega32c3", "avrxmega2", "avrxmega2", 0x802000},
{"atxmega32c4", "avrxmega2", "avrxmega2", 0x802000},
+ {"atxmega32d3", "avrxmega2", "avrxmega2", 0x802000},
{"atxmega32d4", "avrxmega2", "avrxmega2", 0x802000},
{"atxmega32e5", "avrxmega2", "avrxmega2", 0x802000},
{"atxmega16e5", "avrxmega2", "avrxmega2", 0x802000},
{"atxmega8e5", "avrxmega2", "avrxmega2", 0x802000},
+ {"atxmega64a3", "avrxmega4", "avrxmega4", 0x802000},
{"atxmega64a3u", "avrxmega4", "avrxmega4", 0x802000},
{"atxmega64a4u", "avrxmega4", "avrxmega4", 0x802000},
{"atxmega64b1", "avrxmega4", "avrxmega4", 0x802000},
@@ -274,6 +295,42 @@ const struct {
{"atxmega128a1", "avrxmega7", "avrxmega7", 0x802000},
{"atxmega128a1u", "avrxmega7", "avrxmega7", 0x802000},
{"atxmega128a4u", "avrxmega7", "avrxmega7", 0x802000},
+ {"attiny202", "avrxmega3/short-calls", "avrxmega3", 0x803F80},
+ {"attiny204", "avrxmega3/short-calls", "avrxmega3", 0x803F80},
+ {"attiny212", "avrxmega3/short-calls", "avrxmega3", 0x803F80},
+ {"attiny214", "avrxmega3/short-calls", "avrxmega3", 0x803F80},
+ {"attiny402", "avrxmega3/short-calls", "avrxmega3", 0x803F00},
+ {"attiny404", "avrxmega3/short-calls", "avrxmega3", 0x803F00},
+ {"attiny406", "avrxmega3/short-calls", "avrxmega3", 0x803F00},
+ {"attiny412", "avrxmega3/short-calls", "avrxmega3", 0x803F00},
+ {"attiny414", "avrxmega3/short-calls", "avrxmega3", 0x803F00},
+ {"attiny416", "avrxmega3/short-calls", "avrxmega3", 0x803F00},
+ {"attiny417", "avrxmega3/short-calls", "avrxmega3", 0x803F00},
+ {"attiny804", "avrxmega3/short-calls", "avrxmega3", 0x803E00},
+ {"attiny806", "avrxmega3/short-calls", "avrxmega3", 0x803E00},
+ {"attiny807", "avrxmega3/short-calls", "avrxmega3", 0x803E00},
+ {"attiny814", "avrxmega3/short-calls", "avrxmega3", 0x803E00},
+ {"attiny816", "avrxmega3/short-calls", "avrxmega3", 0x803E00},
+ {"attiny817", "avrxmega3/short-calls", "avrxmega3", 0x803E00},
+ {"atmega808", "avrxmega3/short-calls", "avrxmega3", 0x803C00},
+ {"atmega809", "avrxmega3/short-calls", "avrxmega3", 0x803C00},
+ {"atmega1608", "avrxmega3", "avrxmega3", 0x803800},
+ {"atmega1609", "avrxmega3", "avrxmega3", 0x803800},
+ {"atmega3208", "avrxmega3", "avrxmega3", 0x803000},
+ {"atmega3209", "avrxmega3", "avrxmega3", 0x803000},
+ {"atmega4808", "avrxmega3", "avrxmega3", 0x802800},
+ {"atmega4809", "avrxmega3", "avrxmega3", 0x802800},
+ {"attiny1604", "avrxmega3", "avrxmega3", 0x803C00},
+ {"attiny1606", "avrxmega3", "avrxmega3", 0x803C00},
+ {"attiny1607", "avrxmega3", "avrxmega3", 0x803C00},
+ {"attiny1614", "avrxmega3", "avrxmega3", 0x803800},
+ {"attiny1616", "avrxmega3", "avrxmega3", 0x803800},
+ {"attiny1617", "avrxmega3", "avrxmega3", 0x803800},
+ {"attiny1624", "avrxmega3", "avrxmega3", 0x803800},
+ {"attiny1626", "avrxmega3", "avrxmega3", 0x803800},
+ {"attiny1627", "avrxmega3", "avrxmega3", 0x803800},
+ {"attiny3216", "avrxmega3", "avrxmega3", 0x803800},
+ {"attiny3217", "avrxmega3", "avrxmega3", 0x803800},
};
std::string GetMCUSubPath(StringRef MCUName) {
@@ -283,21 +340,22 @@ std::string GetMCUSubPath(StringRef MCUName) {
return "";
}
-llvm::Optional<StringRef> GetMCUFamilyName(StringRef MCUName) {
+std::optional<StringRef> GetMCUFamilyName(StringRef MCUName) {
for (const auto &MCU : MCUInfo)
if (MCU.Name == MCUName)
- return Optional<StringRef>(MCU.Family);
- return Optional<StringRef>();
+ return std::optional<StringRef>(MCU.Family);
+ return std::nullopt;
}
-llvm::Optional<unsigned> GetMCUSectionAddressData(StringRef MCUName) {
+std::optional<unsigned> GetMCUSectionAddressData(StringRef MCUName) {
for (const auto &MCU : MCUInfo)
if (MCU.Name == MCUName && MCU.DataAddr > 0)
- return Optional<unsigned>(MCU.DataAddr);
- return Optional<unsigned>();
+ return std::optional<unsigned>(MCU.DataAddr);
+ return std::nullopt;
}
const StringRef PossibleAVRLibcLocations[] = {
+ "/avr",
"/usr/avr",
"/usr/lib/avr",
};
@@ -307,49 +365,18 @@ const StringRef PossibleAVRLibcLocations[] = {
/// AVR Toolchain
AVRToolChain::AVRToolChain(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args)
- : Generic_ELF(D, Triple, Args), LinkStdlib(false) {
+ : Generic_ELF(D, Triple, Args) {
GCCInstallation.init(Triple, Args);
+ if (getCPUName(D, Args, Triple).empty())
+ D.Diag(diag::warn_drv_avr_mcu_not_specified);
+
// Only add default libraries if the user hasn't explicitly opted out.
if (!Args.hasArg(options::OPT_nostdlib) &&
- !Args.hasArg(options::OPT_nodefaultlibs) &&
- !Args.hasArg(options::OPT_c /* does not apply when not linking */)) {
- std::string CPU = getCPUName(Args, Triple);
-
- if (CPU.empty()) {
- // We cannot link any standard libraries without an MCU specified.
- D.Diag(diag::warn_drv_avr_mcu_not_specified);
- } else {
- Optional<StringRef> FamilyName = GetMCUFamilyName(CPU);
- Optional<std::string> AVRLibcRoot = findAVRLibcInstallation();
-
- if (!FamilyName.hasValue()) {
- // We do not have an entry for this CPU in the family
- // mapping table yet.
- D.Diag(diag::warn_drv_avr_family_linking_stdlibs_not_implemented)
- << CPU;
- } else if (!GCCInstallation.isValid()) {
- // No avr-gcc found and so no runtime linked.
- D.Diag(diag::warn_drv_avr_gcc_not_found);
- } else if (!AVRLibcRoot.hasValue()) {
- // No avr-libc found and so no runtime linked.
- D.Diag(diag::warn_drv_avr_libc_not_found);
- } else { // We have enough information to link stdlibs
- std::string GCCRoot(GCCInstallation.getInstallPath());
- std::string GCCParentPath(GCCInstallation.getParentLibPath());
- std::string LibcRoot = AVRLibcRoot.getValue();
- std::string SubPath = GetMCUSubPath(CPU);
-
- getProgramPaths().push_back(GCCParentPath + "/../bin");
- getFilePaths().push_back(LibcRoot + std::string("/lib/") + SubPath);
- getFilePaths().push_back(GCCRoot + std::string("/") + SubPath);
-
- LinkStdlib = true;
- }
- }
-
- if (!LinkStdlib)
- D.Diag(diag::warn_drv_avr_stdlib_not_linked);
+ !Args.hasArg(options::OPT_nodefaultlibs) && GCCInstallation.isValid()) {
+ GCCInstallPath = GCCInstallation.getInstallPath();
+ std::string GCCParentPath(GCCInstallation.getParentLibPath());
+ getProgramPaths().push_back(GCCParentPath + "/../bin");
}
}
@@ -360,52 +387,142 @@ void AVRToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
return;
// Omit if there is no avr-libc installed.
- Optional<std::string> AVRLibcRoot = findAVRLibcInstallation();
- if (!AVRLibcRoot.hasValue())
+ std::optional<std::string> AVRLibcRoot = findAVRLibcInstallation();
+ if (!AVRLibcRoot)
return;
// Add 'avr-libc/include' to clang system include paths if applicable.
- std::string AVRInc = AVRLibcRoot.getValue() + "/include";
+ std::string AVRInc = *AVRLibcRoot + "/include";
if (llvm::sys::fs::is_directory(AVRInc))
addSystemInclude(DriverArgs, CC1Args, AVRInc);
}
+void AVRToolChain::addClangTargetOptions(
+ const llvm::opt::ArgList &DriverArgs, llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadKind) const {
+ // By default, use `.ctors` (not `.init_array`), as required by libgcc, which
+ // runs constructors/destructors on AVR.
+ if (!DriverArgs.hasFlag(options::OPT_fuse_init_array,
+ options::OPT_fno_use_init_array, false))
+ CC1Args.push_back("-fno-use-init-array");
+ // Use `-fno-use-cxa-atexit` as default, since avr-libc does not support
+ // `__cxa_atexit()`.
+ if (!DriverArgs.hasFlag(options::OPT_fuse_cxa_atexit,
+ options::OPT_fno_use_cxa_atexit, false))
+ CC1Args.push_back("-fno-use-cxa-atexit");
+}
+
Tool *AVRToolChain::buildLinker() const {
- return new tools::AVR::Linker(getTriple(), *this, LinkStdlib);
+ return new tools::AVR::Linker(getTriple(), *this);
+}
+
+std::string
+AVRToolChain::getCompilerRT(const llvm::opt::ArgList &Args, StringRef Component,
+ FileType Type = ToolChain::FT_Static) const {
+ assert(Type == ToolChain::FT_Static && "AVR only supports static libraries");
+ // Since AVR can never be a host environment, its compiler-rt library files
+ // should always have ".a" suffix, even on windows.
+ SmallString<32> File("/libclang_rt.");
+ File += Component.str();
+ File += ".a";
+ // Return the default compiler-rt path appended with
+ // "avr/libclang_rt.$COMPONENT.a".
+ SmallString<256> Path(ToolChain::getCompilerRTPath());
+ llvm::sys::path::append(Path, "avr");
+ llvm::sys::path::append(Path, File.str());
+ return std::string(Path);
}
void AVR::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
const InputInfoList &Inputs, const ArgList &Args,
const char *LinkingOutput) const {
+ const auto &TC = static_cast<const AVRToolChain &>(getToolChain());
+ const Driver &D = getToolChain().getDriver();
+
// Compute information about the target AVR.
- std::string CPU = getCPUName(Args, getToolChain().getTriple());
- llvm::Optional<StringRef> FamilyName = GetMCUFamilyName(CPU);
- llvm::Optional<unsigned> SectionAddressData = GetMCUSectionAddressData(CPU);
+ std::string CPU = getCPUName(D, Args, getToolChain().getTriple());
+ std::optional<StringRef> FamilyName = GetMCUFamilyName(CPU);
+ std::optional<std::string> AVRLibcRoot = TC.findAVRLibcInstallation();
+ std::optional<unsigned> SectionAddressData = GetMCUSectionAddressData(CPU);
+
+ // Compute the linker program path, and use GNU "avr-ld" as default.
+ const Arg *A = Args.getLastArg(options::OPT_fuse_ld_EQ);
+ std::string Linker = A ? getToolChain().GetLinkerPath(nullptr)
+ : getToolChain().GetProgramPath(getShortName());
- std::string Linker = getToolChain().GetProgramPath(getShortName());
ArgStringList CmdArgs;
- AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs, JA);
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
// Enable garbage collection of unused sections.
- CmdArgs.push_back("--gc-sections");
+ if (!Args.hasArg(options::OPT_r))
+ CmdArgs.push_back("--gc-sections");
// Add library search paths before we specify libraries.
Args.AddAllArgs(CmdArgs, options::OPT_L);
getToolChain().AddFilePathLibArgs(Args, CmdArgs);
- if (SectionAddressData.hasValue()) {
- std::string DataSectionArg = std::string("-Tdata=0x") +
- llvm::utohexstr(SectionAddressData.getValue());
- CmdArgs.push_back(Args.MakeArgString(DataSectionArg));
- } else {
- // We do not have an entry for this CPU in the address mapping table yet.
- getToolChain().getDriver().Diag(
- diag::warn_drv_avr_linker_section_addresses_not_implemented)
- << CPU;
+ // Currently we only support libgcc and compiler-rt.
+ auto RtLib = TC.GetRuntimeLibType(Args);
+ assert(
+ (RtLib == ToolChain::RLT_Libgcc || RtLib == ToolChain::RLT_CompilerRT) &&
+ "unknown runtime library");
+
+ // Only add default libraries if the user hasn't explicitly opted out.
+ bool LinkStdlib = false;
+ if (!Args.hasArg(options::OPT_nostdlib) && !Args.hasArg(options::OPT_r) &&
+ !Args.hasArg(options::OPT_nodefaultlibs)) {
+ if (!CPU.empty()) {
+ if (!FamilyName) {
+ // We do not have an entry for this CPU in the family
+ // mapping table yet.
+ D.Diag(diag::warn_drv_avr_family_linking_stdlibs_not_implemented)
+ << CPU;
+ } else if (!AVRLibcRoot) {
+ // No avr-libc found and so no runtime linked.
+ D.Diag(diag::warn_drv_avr_libc_not_found);
+ } else {
+ std::string SubPath = GetMCUSubPath(CPU);
+ // Add path of avr-libc.
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine("-L") + *AVRLibcRoot + "/lib/" + SubPath));
+ if (RtLib == ToolChain::RLT_Libgcc)
+ CmdArgs.push_back(Args.MakeArgString("-L" + TC.getGCCInstallPath() +
+ "/" + SubPath));
+ LinkStdlib = true;
+ }
+ }
+ if (!LinkStdlib)
+ D.Diag(diag::warn_drv_avr_stdlib_not_linked);
+ }
+
+ if (!Args.hasArg(options::OPT_r)) {
+ if (SectionAddressData) {
+ CmdArgs.push_back(
+ Args.MakeArgString("--defsym=__DATA_REGION_ORIGIN__=0x" +
+ Twine::utohexstr(*SectionAddressData)));
+ } else {
+ // We do not have an entry for this CPU in the address mapping table
+ // yet.
+ D.Diag(diag::warn_drv_avr_linker_section_addresses_not_implemented)
+ << CPU;
+ }
+ }
+
+ if (D.isUsingLTO()) {
+ assert(!Inputs.empty() && "Must have at least one input.");
+ // Find the first filename InputInfo object.
+ auto Input = llvm::find_if(
+ Inputs, [](const InputInfo &II) -> bool { return II.isFilename(); });
+ if (Input == Inputs.end())
+ // For a very rare case, all of the inputs to the linker are
+ // InputArg. If that happens, just use the first InputInfo.
+ Input = Inputs.begin();
+
+ addLTOOptions(TC, Args, CmdArgs, Output, *Input,
+ D.getLTOMode() == LTOK_Thin);
}
// If the family name is known, we can link with the device-specific libgcc.
@@ -414,36 +531,85 @@ void AVR::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (LinkStdlib) {
assert(!CPU.empty() && "CPU name must be known in order to link stdlibs");
+ CmdArgs.push_back("--start-group");
+
// Add the object file for the CRT.
std::string CrtFileName = std::string("-l:crt") + CPU + std::string(".o");
CmdArgs.push_back(Args.MakeArgString(CrtFileName));
- CmdArgs.push_back("-lgcc");
+ // Link to libgcc.
+ if (RtLib == ToolChain::RLT_Libgcc)
+ CmdArgs.push_back("-lgcc");
+
+ // Link to generic libraries of avr-libc.
CmdArgs.push_back("-lm");
CmdArgs.push_back("-lc");
// Add the link library specific to the MCU.
CmdArgs.push_back(Args.MakeArgString(std::string("-l") + CPU));
- // Specify the family name as the emulation mode to use.
- // This is almost always required because otherwise avr-ld
- // will assume 'avr2' and warn about the program being larger
- // than the bare minimum supports.
- CmdArgs.push_back(Args.MakeArgString(std::string("-m") + *FamilyName));
+ // Add the relocatable inputs.
+ AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs, JA);
+
+ // We directly use libclang_rt.builtins.a as input file, instead of using
+ // '-lclang_rt.builtins'.
+ if (RtLib == ToolChain::RLT_CompilerRT) {
+ std::string RtLib =
+ getToolChain().getCompilerRT(Args, "builtins", ToolChain::FT_Static);
+ if (llvm::sys::fs::exists(RtLib))
+ CmdArgs.push_back(Args.MakeArgString(RtLib));
+ }
+
+ CmdArgs.push_back("--end-group");
+
+ // Add avr-libc's linker script to lld by default, if it exists.
+ if (!Args.hasArg(options::OPT_T) &&
+ Linker.find("avr-ld") == std::string::npos) {
+ std::string Path(*AVRLibcRoot + "/lib/ldscripts/");
+ Path += *FamilyName;
+ Path += ".x";
+ if (llvm::sys::fs::exists(Path))
+ CmdArgs.push_back(Args.MakeArgString("-T" + Path));
+ }
+ // Otherwise add user specified linker script to either avr-ld or lld.
+ else
+ Args.AddAllArgs(CmdArgs, options::OPT_T);
+
+ if (Args.hasFlag(options::OPT_mrelax, options::OPT_mno_relax, true))
+ CmdArgs.push_back("--relax");
+ } else {
+ AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs, JA);
}
+ // Specify the family name as the emulation mode to use.
+ // This is almost always required because otherwise avr-ld
+ // will assume 'avr2' and warn about the program being larger
+ // than the bare minimum supports.
+ if (Linker.find("avr-ld") != std::string::npos && FamilyName)
+ CmdArgs.push_back(Args.MakeArgString(std::string("-m") + *FamilyName));
+
C.addCommand(std::make_unique<Command>(
JA, *this, ResponseFileSupport::AtFileCurCP(), Args.MakeArgString(Linker),
CmdArgs, Inputs, Output));
}
-llvm::Optional<std::string> AVRToolChain::findAVRLibcInstallation() const {
+std::optional<std::string> AVRToolChain::findAVRLibcInstallation() const {
+ // Search avr-libc installation according to avr-gcc installation.
+ std::string GCCParent(GCCInstallation.getParentLibPath());
+ std::string Path(GCCParent + "/avr");
+ if (llvm::sys::fs::is_directory(Path))
+ return Path;
+ Path = GCCParent + "/../avr";
+ if (llvm::sys::fs::is_directory(Path))
+ return Path;
+
+ // Search avr-libc installation from possible locations, and return the first
+ // one that exists, if there is no avr-gcc installed.
for (StringRef PossiblePath : PossibleAVRLibcLocations) {
std::string Path = getDriver().SysRoot + PossiblePath.str();
- // Return the first avr-libc installation that exists.
if (llvm::sys::fs::is_directory(Path))
- return Optional<std::string>(Path);
+ return Path;
}
- return llvm::None;
+ return std::nullopt;
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/AVR.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/AVR.h
index f612aa691182..247188b7eaad 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/AVR.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/AVR.h
@@ -11,8 +11,8 @@
#include "Gnu.h"
#include "clang/Driver/InputInfo.h"
-#include "clang/Driver/ToolChain.h"
#include "clang/Driver/Tool.h"
+#include "clang/Driver/ToolChain.h"
namespace clang {
namespace driver {
@@ -26,28 +26,33 @@ public:
AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
+ void
+ addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadKind) const override;
+
+ std::optional<std::string> findAVRLibcInstallation() const;
+ StringRef getGCCInstallPath() const { return GCCInstallPath; }
+ std::string getCompilerRT(const llvm::opt::ArgList &Args, StringRef Component,
+ FileType Type) const override;
+
+ bool HasNativeLLVMSupport() const override { return true; }
+
protected:
Tool *buildLinker() const override;
private:
- /// Whether libgcc, libct, and friends should be linked.
- ///
- /// This is not done if the user does not specify a
- /// microcontroller on the command line.
- bool LinkStdlib;
-
- llvm::Optional<std::string> findAVRLibcInstallation() const;
+ StringRef GCCInstallPath;
};
} // end namespace toolchains
namespace tools {
namespace AVR {
-class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
+class LLVM_LIBRARY_VISIBILITY Linker final : public Tool {
public:
- Linker(const llvm::Triple &Triple, const ToolChain &TC, bool LinkStdlib)
- : Tool("AVR::Linker", "avr-ld", TC), Triple(Triple),
- LinkStdlib(LinkStdlib) {}
+ Linker(const llvm::Triple &Triple, const ToolChain &TC)
+ : Tool("AVR::Linker", "avr-ld", TC), Triple(Triple) {}
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
@@ -58,7 +63,6 @@ public:
protected:
const llvm::Triple &Triple;
- bool LinkStdlib;
};
} // end namespace AVR
} // end namespace tools
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Ananas.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Ananas.cpp
deleted file mode 100644
index be1476a7636c..000000000000
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Ananas.cpp
+++ /dev/null
@@ -1,144 +0,0 @@
-//===--- Ananas.cpp - Ananas ToolChain Implementations ------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "Ananas.h"
-#include "CommonArgs.h"
-#include "clang/Driver/Compilation.h"
-#include "clang/Driver/Driver.h"
-#include "clang/Driver/InputInfo.h"
-#include "clang/Driver/Options.h"
-#include "llvm/ADT/SmallString.h"
-#include "llvm/Option/ArgList.h"
-#include "llvm/Support/Path.h"
-
-using namespace clang::driver;
-using namespace clang::driver::tools;
-using namespace clang::driver::toolchains;
-using namespace clang;
-using namespace llvm::opt;
-
-void ananas::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output,
- const InputInfoList &Inputs,
- const ArgList &Args,
- const char *LinkingOutput) const {
- claimNoWarnArgs(Args);
- ArgStringList CmdArgs;
-
- Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler);
-
- CmdArgs.push_back("-o");
- CmdArgs.push_back(Output.getFilename());
-
- for (const auto &II : Inputs)
- CmdArgs.push_back(II.getFilename());
-
- const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
- C.addCommand(std::make_unique<Command>(JA, *this,
- ResponseFileSupport::AtFileCurCP(),
- Exec, CmdArgs, Inputs, Output));
-}
-
-void ananas::Linker::ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output,
- const InputInfoList &Inputs,
- const ArgList &Args,
- const char *LinkingOutput) const {
- const ToolChain &ToolChain = getToolChain();
- const Driver &D = ToolChain.getDriver();
- ArgStringList CmdArgs;
-
- // Silence warning for "clang -g foo.o -o foo"
- Args.ClaimAllArgs(options::OPT_g_Group);
- // and "clang -emit-llvm foo.o -o foo"
- Args.ClaimAllArgs(options::OPT_emit_llvm);
- // and for "clang -w foo.o -o foo". Other warning options are already
- // handled somewhere else.
- Args.ClaimAllArgs(options::OPT_w);
-
- if (!D.SysRoot.empty())
- CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot));
-
- if (Args.hasArg(options::OPT_static)) {
- CmdArgs.push_back("-Bstatic");
- } else {
- if (Args.hasArg(options::OPT_rdynamic))
- CmdArgs.push_back("-export-dynamic");
- if (Args.hasArg(options::OPT_shared)) {
- CmdArgs.push_back("-Bshareable");
- } else {
- Args.AddAllArgs(CmdArgs, options::OPT_pie);
- CmdArgs.push_back("-dynamic-linker");
- CmdArgs.push_back("/lib/ld-ananas.so");
- }
- }
-
- if (Output.isFilename()) {
- CmdArgs.push_back("-o");
- CmdArgs.push_back(Output.getFilename());
- } else {
- assert(Output.isNothing() && "Invalid output.");
- }
-
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
- if (!Args.hasArg(options::OPT_shared)) {
- CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crt0.o")));
- }
- CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crti.o")));
- if (Args.hasArg(options::OPT_shared) || Args.hasArg(options::OPT_pie)) {
- CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtbeginS.o")));
- } else {
- CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtbegin.o")));
- }
- }
-
- Args.AddAllArgs(CmdArgs, options::OPT_L);
- ToolChain.AddFilePathLibArgs(Args, CmdArgs);
- Args.AddAllArgs(CmdArgs,
- {options::OPT_T_Group, options::OPT_e, options::OPT_s,
- options::OPT_t, options::OPT_Z_Flag, options::OPT_r});
-
- if (D.isUsingLTO()) {
- assert(!Inputs.empty() && "Must have at least one input.");
- addLTOOptions(ToolChain, Args, CmdArgs, Output, Inputs[0],
- D.getLTOMode() == LTOK_Thin);
- }
-
- AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
-
- if (ToolChain.ShouldLinkCXXStdlib(Args))
- ToolChain.AddCXXStdlibLibArgs(Args, CmdArgs);
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs))
- CmdArgs.push_back("-lc");
-
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
- if (Args.hasArg(options::OPT_shared) || Args.hasArg(options::OPT_pie))
- CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtendS.o")));
- else
- CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtend.o")));
- CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtn.o")));
- }
-
- const char *Exec = Args.MakeArgString(ToolChain.GetLinkerPath());
- C.addCommand(std::make_unique<Command>(JA, *this,
- ResponseFileSupport::AtFileCurCP(),
- Exec, CmdArgs, Inputs, Output));
-}
-
-// Ananas - Ananas tool chain which can call as(1) and ld(1) directly.
-
-Ananas::Ananas(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
- : Generic_ELF(D, Triple, Args) {
- getFilePaths().push_back(getDriver().SysRoot + "/usr/lib");
-}
-
-Tool *Ananas::buildAssembler() const {
- return new tools::ananas::Assembler(*this);
-}
-
-Tool *Ananas::buildLinker() const { return new tools::ananas::Linker(*this); }
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Ananas.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Ananas.h
deleted file mode 100644
index 72ad3edcf056..000000000000
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Ananas.h
+++ /dev/null
@@ -1,65 +0,0 @@
-//===--- Ananas.h - Ananas ToolChain Implementations --------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_ANANAS_H
-#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_ANANAS_H
-
-#include "Gnu.h"
-#include "clang/Driver/Tool.h"
-#include "clang/Driver/ToolChain.h"
-
-namespace clang {
-namespace driver {
-namespace tools {
-
-/// ananas -- Directly call GNU Binutils assembler and linker
-namespace ananas {
-class LLVM_LIBRARY_VISIBILITY Assembler : public Tool {
-public:
- Assembler(const ToolChain &TC) : Tool("ananas::Assembler", "assembler", TC) {}
-
- bool hasIntegratedCPP() const override { return false; }
-
- void ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output, const InputInfoList &Inputs,
- const llvm::opt::ArgList &TCArgs,
- const char *LinkingOutput) const override;
-};
-
-class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
-public:
- Linker(const ToolChain &TC) : Tool("ananas::Linker", "linker", TC) {}
-
- bool hasIntegratedCPP() const override { return false; }
- bool isLinkJob() const override { return true; }
-
- void ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output, const InputInfoList &Inputs,
- const llvm::opt::ArgList &TCArgs,
- const char *LinkingOutput) const override;
-};
-} // end namespace ananas
-} // end namespace tools
-
-namespace toolchains {
-
-class LLVM_LIBRARY_VISIBILITY Ananas : public Generic_ELF {
-public:
- Ananas(const Driver &D, const llvm::Triple &Triple,
- const llvm::opt::ArgList &Args);
-
-protected:
- Tool *buildAssembler() const override;
- Tool *buildLinker() const override;
-};
-
-} // end namespace toolchains
-} // end namespace driver
-} // end namespace clang
-
-#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_ANANAS_H
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/AArch64.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/AArch64.cpp
index 0e354a49b59a..0cf96bb5c9cb 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/AArch64.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/AArch64.cpp
@@ -7,12 +7,13 @@
//===----------------------------------------------------------------------===//
#include "AArch64.h"
+#include "../CommonArgs.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Options.h"
#include "llvm/Option/ArgList.h"
-#include "llvm/Support/TargetParser.h"
-#include "llvm/Support/Host.h"
+#include "llvm/TargetParser/AArch64TargetParser.h"
+#include "llvm/TargetParser/Host.h"
using namespace clang::driver;
using namespace clang::driver::tools;
@@ -37,6 +38,8 @@ std::string aarch64::getAArch64TargetCPU(const ArgList &Args,
CPU = Mcpu.split("+").first.lower();
}
+ CPU = llvm::AArch64::resolveCPUAlias(CPU);
+
// Handle CPU name is 'native'.
if (CPU == "native")
return std::string(llvm::sys::getHostCPUName());
@@ -50,13 +53,17 @@ std::string aarch64::getAArch64TargetCPU(const ArgList &Args,
return "apple-m1";
}
+ if (Triple.isXROS()) {
+ // The xrOS simulator runs on M1 as well, it should have been covered above.
+ assert(!Triple.isSimulatorEnvironment() && "xrossim should be mac-like");
+ return "apple-a12";
+ }
// arm64e requires v8.3a and only runs on apple-a12 and later CPUs.
if (Triple.isArm64e())
return "apple-a12";
- // Make sure we pick the appropriate Apple CPU if -arch is used or when
- // targetting a Darwin OS.
- if (Args.getLastArg(options::OPT_arch) || Triple.isOSDarwin())
+ // Make sure we pick the appropriate Apple CPU when targetting a Darwin OS.
+ if (Triple.isOSDarwin())
return Triple.getArch() == llvm::Triple::aarch64_32 ? "apple-s4"
: "apple-a7";
@@ -65,71 +72,68 @@ std::string aarch64::getAArch64TargetCPU(const ArgList &Args,
// Decode AArch64 features from string like +[no]featureA+[no]featureB+...
static bool DecodeAArch64Features(const Driver &D, StringRef text,
- std::vector<StringRef> &Features,
- llvm::AArch64::ArchKind ArchKind) {
+ llvm::AArch64::ExtensionSet &Extensions) {
SmallVector<StringRef, 8> Split;
text.split(Split, StringRef("+"), -1, false);
for (StringRef Feature : Split) {
- StringRef FeatureName = llvm::AArch64::getArchExtFeature(Feature);
- if (!FeatureName.empty())
- Features.push_back(FeatureName);
- else if (Feature == "neon" || Feature == "noneon")
+ if (Feature == "neon" || Feature == "noneon") {
D.Diag(clang::diag::err_drv_no_neon_modifier);
- else
+ continue;
+ }
+ if (!Extensions.parseModifier(Feature))
return false;
-
- // +sve implies +f32mm if the base architecture is v8.6A or v8.7A
- // it isn't the case in general that sve implies both f64mm and f32mm
- if ((ArchKind == llvm::AArch64::ArchKind::ARMV8_6A ||
- ArchKind == llvm::AArch64::ArchKind::ARMV8_7A) && Feature == "sve")
- Features.push_back("+f32mm");
}
+
return true;
}
// Check if the CPU name and feature modifiers in -mcpu are legal. If yes,
// decode CPU and feature.
static bool DecodeAArch64Mcpu(const Driver &D, StringRef Mcpu, StringRef &CPU,
- std::vector<StringRef> &Features) {
+ llvm::AArch64::ExtensionSet &Extensions) {
std::pair<StringRef, StringRef> Split = Mcpu.split("+");
CPU = Split.first;
- llvm::AArch64::ArchKind ArchKind = llvm::AArch64::ArchKind::ARMV8A;
if (CPU == "native")
CPU = llvm::sys::getHostCPUName();
if (CPU == "generic") {
- Features.push_back("+neon");
+ Extensions.enable(llvm::AArch64::AEK_SIMD);
} else {
- ArchKind = llvm::AArch64::parseCPUArch(CPU);
- if (!llvm::AArch64::getArchFeatures(ArchKind, Features))
+ const std::optional<llvm::AArch64::CpuInfo> CpuInfo =
+ llvm::AArch64::parseCpu(CPU);
+ if (!CpuInfo)
return false;
- uint64_t Extension = llvm::AArch64::getDefaultExtensions(CPU, ArchKind);
- if (!llvm::AArch64::getExtensionFeatures(Extension, Features))
- return false;
- }
+ Extensions.addCPUDefaults(*CpuInfo);
+ }
- if (Split.second.size() &&
- !DecodeAArch64Features(D, Split.second, Features, ArchKind))
- return false;
+ if (Split.second.size() &&
+ !DecodeAArch64Features(D, Split.second, Extensions))
+ return false;
- return true;
+ return true;
}
static bool
getAArch64ArchFeaturesFromMarch(const Driver &D, StringRef March,
const ArgList &Args,
- std::vector<StringRef> &Features) {
+ llvm::AArch64::ExtensionSet &Extensions) {
std::string MarchLowerCase = March.lower();
std::pair<StringRef, StringRef> Split = StringRef(MarchLowerCase).split("+");
- llvm::AArch64::ArchKind ArchKind = llvm::AArch64::parseArch(Split.first);
- if (ArchKind == llvm::AArch64::ArchKind::INVALID ||
- !llvm::AArch64::getArchFeatures(ArchKind, Features) ||
- (Split.second.size() &&
- !DecodeAArch64Features(D, Split.second, Features, ArchKind)))
+ const llvm::AArch64::ArchInfo *ArchInfo =
+ llvm::AArch64::parseArch(Split.first);
+ if (Split.first == "native")
+ ArchInfo = llvm::AArch64::getArchForCpu(llvm::sys::getHostCPUName().str());
+ if (!ArchInfo)
+ return false;
+
+ Extensions.addArchDefaults(*ArchInfo);
+
+ if ((Split.second.size() &&
+ !DecodeAArch64Features(D, Split.second, Extensions)))
return false;
return true;
@@ -138,10 +142,10 @@ getAArch64ArchFeaturesFromMarch(const Driver &D, StringRef March,
static bool
getAArch64ArchFeaturesFromMcpu(const Driver &D, StringRef Mcpu,
const ArgList &Args,
- std::vector<StringRef> &Features) {
+ llvm::AArch64::ExtensionSet &Extensions) {
StringRef CPU;
std::string McpuLowerCase = Mcpu.lower();
- if (!DecodeAArch64Mcpu(D, McpuLowerCase, CPU, Features))
+ if (!DecodeAArch64Mcpu(D, McpuLowerCase, CPU, Extensions))
return false;
return true;
@@ -152,17 +156,17 @@ getAArch64MicroArchFeaturesFromMtune(const Driver &D, StringRef Mtune,
const ArgList &Args,
std::vector<StringRef> &Features) {
std::string MtuneLowerCase = Mtune.lower();
- // Check CPU name is valid
- std::vector<StringRef> MtuneFeatures;
+ // Check CPU name is valid, but ignore any extensions on it.
+ llvm::AArch64::ExtensionSet Extensions;
StringRef Tune;
- if (!DecodeAArch64Mcpu(D, MtuneLowerCase, Tune, MtuneFeatures))
+ if (!DecodeAArch64Mcpu(D, MtuneLowerCase, Tune, Extensions))
return false;
// Handle CPU name is 'native'.
if (MtuneLowerCase == "native")
MtuneLowerCase = std::string(llvm::sys::getHostCPUName());
if (MtuneLowerCase == "cyclone" ||
- StringRef(MtuneLowerCase).startswith("apple")) {
+ StringRef(MtuneLowerCase).starts_with("apple")) {
Features.push_back("+zcm");
Features.push_back("+zcz");
}
@@ -174,7 +178,8 @@ getAArch64MicroArchFeaturesFromMcpu(const Driver &D, StringRef Mcpu,
const ArgList &Args,
std::vector<StringRef> &Features) {
StringRef CPU;
- std::vector<StringRef> DecodedFeature;
+ // Check CPU name is valid, but ignore any extensions on it.
+ llvm::AArch64::ExtensionSet DecodedFeature;
std::string McpuLowerCase = Mcpu.lower();
if (!DecodeAArch64Mcpu(D, McpuLowerCase, CPU, DecodedFeature))
return false;
@@ -189,27 +194,31 @@ void aarch64::getAArch64TargetFeatures(const Driver &D,
bool ForAS) {
Arg *A;
bool success = true;
- // Enable NEON by default.
- Features.push_back("+neon");
llvm::StringRef WaMArch;
+ llvm::AArch64::ExtensionSet Extensions;
if (ForAS)
for (const auto *A :
Args.filtered(options::OPT_Wa_COMMA, options::OPT_Xassembler))
for (StringRef Value : A->getValues())
- if (Value.startswith("-march="))
+ if (Value.starts_with("-march="))
WaMArch = Value.substr(7);
// Call getAArch64ArchFeaturesFromMarch only if "-Wa,-march=" or
// "-Xassembler -march" is detected. Otherwise it may return false
// and causes Clang to error out.
if (!WaMArch.empty())
- success = getAArch64ArchFeaturesFromMarch(D, WaMArch, Args, Features);
+ success = getAArch64ArchFeaturesFromMarch(D, WaMArch, Args, Extensions);
else if ((A = Args.getLastArg(options::OPT_march_EQ)))
- success = getAArch64ArchFeaturesFromMarch(D, A->getValue(), Args, Features);
+ success =
+ getAArch64ArchFeaturesFromMarch(D, A->getValue(), Args, Extensions);
else if ((A = Args.getLastArg(options::OPT_mcpu_EQ)))
- success = getAArch64ArchFeaturesFromMcpu(D, A->getValue(), Args, Features);
- else if (Args.hasArg(options::OPT_arch) || isCPUDeterminedByTriple(Triple))
+ success =
+ getAArch64ArchFeaturesFromMcpu(D, A->getValue(), Args, Extensions);
+ else if (isCPUDeterminedByTriple(Triple))
success = getAArch64ArchFeaturesFromMcpu(
- D, getAArch64TargetCPU(Args, Triple, A), Args, Features);
+ D, getAArch64TargetCPU(Args, Triple, A), Args, Extensions);
+ else
+ // Default to 'A' profile if the architecture is not specified.
+ success = getAArch64ArchFeaturesFromMarch(D, "armv8-a", Args, Extensions);
if (success && (A = Args.getLastArg(clang::driver::options::OPT_mtune_EQ)))
success =
@@ -217,36 +226,48 @@ void aarch64::getAArch64TargetFeatures(const Driver &D,
else if (success && (A = Args.getLastArg(options::OPT_mcpu_EQ)))
success =
getAArch64MicroArchFeaturesFromMcpu(D, A->getValue(), Args, Features);
- else if (success &&
- (Args.hasArg(options::OPT_arch) || isCPUDeterminedByTriple(Triple)))
+ else if (success && isCPUDeterminedByTriple(Triple))
success = getAArch64MicroArchFeaturesFromMcpu(
D, getAArch64TargetCPU(Args, Triple, A), Args, Features);
if (!success) {
- auto Diag = D.Diag(diag::err_drv_clang_unsupported);
+ auto Diag = D.Diag(diag::err_drv_unsupported_option_argument);
// If "-Wa,-march=" is used, 'WaMArch' will contain the argument's value,
// while 'A' is uninitialized. Only dereference 'A' in the other case.
if (!WaMArch.empty())
- Diag << "-march=" + WaMArch.str();
+ Diag << "-march=" << WaMArch;
else
- Diag << A->getAsString(Args);
+ Diag << A->getSpelling() << A->getValue();
}
+ // -mgeneral-regs-only disables all floating-point features.
if (Args.getLastArg(options::OPT_mgeneral_regs_only)) {
- Features.push_back("-fp-armv8");
- Features.push_back("-crypto");
- Features.push_back("-neon");
+ Extensions.disable(llvm::AArch64::AEK_FP);
+ }
+
+ // En/disable crc
+ if (Arg *A = Args.getLastArg(options::OPT_mcrc, options::OPT_mnocrc)) {
+ if (A->getOption().matches(options::OPT_mcrc))
+ Extensions.enable(llvm::AArch64::AEK_CRC);
+ else
+ Extensions.disable(llvm::AArch64::AEK_CRC);
}
+ // At this point all hardware features are decided, so convert the extensions
+ // set to a feature list.
+ Extensions.toLLVMFeatureList(Features);
+
if (Arg *A = Args.getLastArg(options::OPT_mtp_mode_EQ)) {
StringRef Mtp = A->getValue();
- if (Mtp == "el3")
+ if (Mtp == "el3" || Mtp == "tpidr_el3")
Features.push_back("+tpidr-el3");
- else if (Mtp == "el2")
+ else if (Mtp == "el2" || Mtp == "tpidr_el2")
Features.push_back("+tpidr-el2");
- else if (Mtp == "el1")
+ else if (Mtp == "el1" || Mtp == "tpidr_el1")
Features.push_back("+tpidr-el1");
- else if (Mtp != "el0")
+ else if (Mtp == "tpidrro_el0")
+ Features.push_back("+tpidrro-el0");
+ else if (Mtp != "el0" && Mtp != "tpidr_el0")
D.Diag(diag::err_drv_invalid_mtp) << A->getAsString(Args);
}
@@ -282,8 +303,8 @@ void aarch64::getAArch64TargetFeatures(const Driver &D,
DisableComdat = true;
continue;
}
- D.Diag(diag::err_invalid_sls_hardening)
- << Scope << A->getAsString(Args);
+ D.Diag(diag::err_drv_unsupported_option_argument)
+ << A->getSpelling() << Scope;
break;
}
}
@@ -297,124 +318,6 @@ void aarch64::getAArch64TargetFeatures(const Driver &D,
}
}
- // En/disable crc
- if (Arg *A = Args.getLastArg(options::OPT_mcrc, options::OPT_mnocrc)) {
- if (A->getOption().matches(options::OPT_mcrc))
- Features.push_back("+crc");
- else
- Features.push_back("-crc");
- }
-
- // Handle (arch-dependent) fp16fml/fullfp16 relationship.
- // FIXME: this fp16fml option handling will be reimplemented after the
- // TargetParser rewrite.
- const auto ItRNoFullFP16 = std::find(Features.rbegin(), Features.rend(), "-fullfp16");
- const auto ItRFP16FML = std::find(Features.rbegin(), Features.rend(), "+fp16fml");
- if (llvm::is_contained(Features, "+v8.4a")) {
- const auto ItRFullFP16 = std::find(Features.rbegin(), Features.rend(), "+fullfp16");
- if (ItRFullFP16 < ItRNoFullFP16 && ItRFullFP16 < ItRFP16FML) {
- // Only entangled feature that can be to the right of this +fullfp16 is -fp16fml.
- // Only append the +fp16fml if there is no -fp16fml after the +fullfp16.
- if (std::find(Features.rbegin(), ItRFullFP16, "-fp16fml") == ItRFullFP16)
- Features.push_back("+fp16fml");
- }
- else
- goto fp16_fml_fallthrough;
- } else {
-fp16_fml_fallthrough:
- // In both of these cases, putting the 'other' feature on the end of the vector will
- // result in the same effect as placing it immediately after the current feature.
- if (ItRNoFullFP16 < ItRFP16FML)
- Features.push_back("-fp16fml");
- else if (ItRNoFullFP16 > ItRFP16FML)
- Features.push_back("+fullfp16");
- }
-
- // FIXME: this needs reimplementation too after the TargetParser rewrite
- //
- // Context sensitive meaning of Crypto:
- // 1) For Arch >= ARMv8.4a: crypto = sm4 + sha3 + sha2 + aes
- // 2) For Arch <= ARMv8.3a: crypto = sha2 + aes
- const auto ItBegin = Features.begin();
- const auto ItEnd = Features.end();
- const auto ItRBegin = Features.rbegin();
- const auto ItREnd = Features.rend();
- const auto ItRCrypto = std::find(ItRBegin, ItREnd, "+crypto");
- const auto ItRNoCrypto = std::find(ItRBegin, ItREnd, "-crypto");
- const auto HasCrypto = ItRCrypto != ItREnd;
- const auto HasNoCrypto = ItRNoCrypto != ItREnd;
- const ptrdiff_t PosCrypto = ItRCrypto - ItRBegin;
- const ptrdiff_t PosNoCrypto = ItRNoCrypto - ItRBegin;
-
- bool NoCrypto = false;
- if (HasCrypto && HasNoCrypto) {
- if (PosNoCrypto < PosCrypto)
- NoCrypto = true;
- }
-
- if (std::find(ItBegin, ItEnd, "+v8.4a") != ItEnd) {
- if (HasCrypto && !NoCrypto) {
- // Check if we have NOT disabled an algorithm with something like:
- // +crypto, -algorithm
- // And if "-algorithm" does not occur, we enable that crypto algorithm.
- const bool HasSM4 = (std::find(ItBegin, ItEnd, "-sm4") == ItEnd);
- const bool HasSHA3 = (std::find(ItBegin, ItEnd, "-sha3") == ItEnd);
- const bool HasSHA2 = (std::find(ItBegin, ItEnd, "-sha2") == ItEnd);
- const bool HasAES = (std::find(ItBegin, ItEnd, "-aes") == ItEnd);
- if (HasSM4)
- Features.push_back("+sm4");
- if (HasSHA3)
- Features.push_back("+sha3");
- if (HasSHA2)
- Features.push_back("+sha2");
- if (HasAES)
- Features.push_back("+aes");
- } else if (HasNoCrypto) {
- // Check if we have NOT enabled a crypto algorithm with something like:
- // -crypto, +algorithm
- // And if "+algorithm" does not occur, we disable that crypto algorithm.
- const bool HasSM4 = (std::find(ItBegin, ItEnd, "+sm4") != ItEnd);
- const bool HasSHA3 = (std::find(ItBegin, ItEnd, "+sha3") != ItEnd);
- const bool HasSHA2 = (std::find(ItBegin, ItEnd, "+sha2") != ItEnd);
- const bool HasAES = (std::find(ItBegin, ItEnd, "+aes") != ItEnd);
- if (!HasSM4)
- Features.push_back("-sm4");
- if (!HasSHA3)
- Features.push_back("-sha3");
- if (!HasSHA2)
- Features.push_back("-sha2");
- if (!HasAES)
- Features.push_back("-aes");
- }
- } else {
- if (HasCrypto && !NoCrypto) {
- const bool HasSHA2 = (std::find(ItBegin, ItEnd, "-sha2") == ItEnd);
- const bool HasAES = (std::find(ItBegin, ItEnd, "-aes") == ItEnd);
- if (HasSHA2)
- Features.push_back("+sha2");
- if (HasAES)
- Features.push_back("+aes");
- } else if (HasNoCrypto) {
- const bool HasSHA2 = (std::find(ItBegin, ItEnd, "+sha2") != ItEnd);
- const bool HasAES = (std::find(ItBegin, ItEnd, "+aes") != ItEnd);
- const bool HasV82a = (std::find(ItBegin, ItEnd, "+v8.2a") != ItEnd);
- const bool HasV83a = (std::find(ItBegin, ItEnd, "+v8.3a") != ItEnd);
- const bool HasV84a = (std::find(ItBegin, ItEnd, "+v8.4a") != ItEnd);
- if (!HasSHA2)
- Features.push_back("-sha2");
- if (!HasAES)
- Features.push_back("-aes");
- if (HasV82a || HasV83a || HasV84a) {
- Features.push_back("-sm4");
- Features.push_back("-sha3");
- }
- }
- }
-
- auto V8_6Pos = llvm::find(Features, "+v8.6a");
- if (V8_6Pos != std::end(Features))
- V8_6Pos = Features.insert(std::next(V8_6Pos), {"+i8mm", "+bf16"});
-
if (Arg *A = Args.getLastArg(options::OPT_mno_unaligned_access,
options::OPT_munaligned_access)) {
if (A->getOption().matches(options::OPT_mno_unaligned_access))
@@ -526,4 +429,22 @@ fp16_fml_fallthrough:
if (Args.hasArg(options::OPT_mno_neg_immediates))
Features.push_back("+no-neg-immediates");
+
+ if (Arg *A = Args.getLastArg(options::OPT_mfix_cortex_a53_835769,
+ options::OPT_mno_fix_cortex_a53_835769)) {
+ if (A->getOption().matches(options::OPT_mfix_cortex_a53_835769))
+ Features.push_back("+fix-cortex-a53-835769");
+ else
+ Features.push_back("-fix-cortex-a53-835769");
+ } else if (Triple.isAndroid() || Triple.isOHOSFamily()) {
+ // Enabled A53 errata (835769) workaround by default on android
+ Features.push_back("+fix-cortex-a53-835769");
+ } else if (Triple.isOSFuchsia()) {
+ std::string CPU = getCPUName(D, Args, Triple);
+ if (CPU.empty() || CPU == "generic" || CPU == "cortex-a53")
+ Features.push_back("+fix-cortex-a53-835769");
+ }
+
+ if (Args.getLastArg(options::OPT_mno_bti_at_return_twice))
+ Features.push_back("+no-bti-at-return-twice");
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.cpp
index 4ab547fabe43..e6ee2f88a84e 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.cpp
@@ -12,8 +12,8 @@
#include "clang/Driver/Options.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Option/ArgList.h"
-#include "llvm/Support/TargetParser.h"
-#include "llvm/Support/Host.h"
+#include "llvm/TargetParser/ARMTargetParser.h"
+#include "llvm/TargetParser/Host.h"
using namespace clang::driver;
using namespace clang::driver::tools;
@@ -32,6 +32,20 @@ bool arm::isARMMProfile(const llvm::Triple &Triple) {
return llvm::ARM::parseArchProfile(Arch) == llvm::ARM::ProfileKind::M;
}
+// On Arm the endianness of the output file is determined by the target and
+// can be overridden by the pseudo-target flags '-mlittle-endian'/'-EL' and
+// '-mbig-endian'/'-EB'. Unlike other targets the flag does not result in a
+// normalized triple so we must handle the flag here.
+bool arm::isARMBigEndian(const llvm::Triple &Triple, const ArgList &Args) {
+ if (Arg *A = Args.getLastArg(options::OPT_mlittle_endian,
+ options::OPT_mbig_endian)) {
+ return !A->getOption().matches(options::OPT_mlittle_endian);
+ }
+
+ return Triple.getArch() == llvm::Triple::armeb ||
+ Triple.getArch() == llvm::Triple::thumbeb;
+}
+
// True if A-profile.
bool arm::isARMAProfile(const llvm::Triple &Triple) {
llvm::StringRef Arch = Triple.getArchName();
@@ -53,9 +67,9 @@ void arm::getARMArchCPUFromArgs(const ArgList &Args, llvm::StringRef &Arch,
// Use getValues because -Wa can have multiple arguments
// e.g. -Wa,-mcpu=foo,-mcpu=bar
for (StringRef Value : A->getValues()) {
- if (Value.startswith("-mcpu="))
+ if (Value.starts_with("-mcpu="))
CPU = Value.substr(6);
- if (Value.startswith("-march="))
+ if (Value.starts_with("-march="))
Arch = Value.substr(7);
}
}
@@ -72,25 +86,25 @@ static void getARMHWDivFeatures(const Driver &D, const Arg *A,
}
// Handle -mfpu=.
-static unsigned getARMFPUFeatures(const Driver &D, const Arg *A,
- const ArgList &Args, StringRef FPU,
- std::vector<StringRef> &Features) {
- unsigned FPUID = llvm::ARM::parseFPU(FPU);
- if (!llvm::ARM::getFPUFeatures(FPUID, Features))
+static llvm::ARM::FPUKind getARMFPUFeatures(const Driver &D, const Arg *A,
+ const ArgList &Args, StringRef FPU,
+ std::vector<StringRef> &Features) {
+ llvm::ARM::FPUKind FPUKind = llvm::ARM::parseFPU(FPU);
+ if (!llvm::ARM::getFPUFeatures(FPUKind, Features))
D.Diag(clang::diag::err_drv_clang_unsupported) << A->getAsString(Args);
- return FPUID;
+ return FPUKind;
}
// Decode ARM features from string like +[no]featureA+[no]featureB+...
static bool DecodeARMFeatures(const Driver &D, StringRef text, StringRef CPU,
llvm::ARM::ArchKind ArchKind,
std::vector<StringRef> &Features,
- unsigned &ArgFPUID) {
+ llvm::ARM::FPUKind &ArgFPUKind) {
SmallVector<StringRef, 8> Split;
text.split(Split, StringRef("+"), -1, false);
for (StringRef Feature : Split) {
- if (!appendArchExtFeatures(CPU, ArchKind, Feature, Features, ArgFPUID))
+ if (!appendArchExtFeatures(CPU, ArchKind, Feature, Features, ArgFPUKind))
return false;
}
return true;
@@ -112,31 +126,52 @@ static void DecodeARMFeaturesFromCPU(const Driver &D, StringRef CPU,
static void checkARMArchName(const Driver &D, const Arg *A, const ArgList &Args,
llvm::StringRef ArchName, llvm::StringRef CPUName,
std::vector<StringRef> &Features,
- const llvm::Triple &Triple, unsigned &ArgFPUID) {
+ const llvm::Triple &Triple,
+ llvm::ARM::FPUKind &ArgFPUKind) {
std::pair<StringRef, StringRef> Split = ArchName.split("+");
std::string MArch = arm::getARMArch(ArchName, Triple);
llvm::ARM::ArchKind ArchKind = llvm::ARM::parseArch(MArch);
if (ArchKind == llvm::ARM::ArchKind::INVALID ||
- (Split.second.size() && !DecodeARMFeatures(D, Split.second, CPUName,
- ArchKind, Features, ArgFPUID)))
- D.Diag(clang::diag::err_drv_clang_unsupported) << A->getAsString(Args);
+ (Split.second.size() &&
+ !DecodeARMFeatures(D, Split.second, CPUName, ArchKind, Features,
+ ArgFPUKind)))
+ D.Diag(clang::diag::err_drv_unsupported_option_argument)
+ << A->getSpelling() << A->getValue();
}
// Check -mcpu=. Needs ArchName to handle -mcpu=generic.
static void checkARMCPUName(const Driver &D, const Arg *A, const ArgList &Args,
llvm::StringRef CPUName, llvm::StringRef ArchName,
std::vector<StringRef> &Features,
- const llvm::Triple &Triple, unsigned &ArgFPUID) {
+ const llvm::Triple &Triple,
+ llvm::ARM::FPUKind &ArgFPUKind) {
std::pair<StringRef, StringRef> Split = CPUName.split("+");
std::string CPU = arm::getARMTargetCPU(CPUName, ArchName, Triple);
llvm::ARM::ArchKind ArchKind =
arm::getLLVMArchKindForARM(CPU, ArchName, Triple);
if (ArchKind == llvm::ARM::ArchKind::INVALID ||
- (Split.second.size() &&
- !DecodeARMFeatures(D, Split.second, CPU, ArchKind, Features, ArgFPUID)))
- D.Diag(clang::diag::err_drv_clang_unsupported) << A->getAsString(Args);
+ (Split.second.size() && !DecodeARMFeatures(D, Split.second, CPU, ArchKind,
+ Features, ArgFPUKind)))
+ D.Diag(clang::diag::err_drv_unsupported_option_argument)
+ << A->getSpelling() << A->getValue();
+}
+
+// If -mfloat-abi=hard or -mhard-float are specified explicitly then check that
+// floating point registers are available on the target CPU.
+static void checkARMFloatABI(const Driver &D, const ArgList &Args,
+ bool HasFPRegs) {
+ if (HasFPRegs)
+ return;
+ const Arg *A =
+ Args.getLastArg(options::OPT_msoft_float, options::OPT_mhard_float,
+ options::OPT_mfloat_abi_EQ);
+ if (A && (A->getOption().matches(options::OPT_mhard_float) ||
+ (A->getOption().matches(options::OPT_mfloat_abi_EQ) &&
+ A->getValue() == StringRef("hard"))))
+ D.Diag(clang::diag::warn_drv_no_floating_point_registers)
+ << A->getAsString(Args);
}
bool arm::useAAPCSForMachO(const llvm::Triple &T) {
@@ -147,14 +182,35 @@ bool arm::useAAPCSForMachO(const llvm::Triple &T) {
T.getOS() == llvm::Triple::UnknownOS || isARMMProfile(T);
}
+// We follow GCC and support when the backend has support for the MRC/MCR
+// instructions that are used to set the hard thread pointer ("CP15 C13
+// Thread id").
+bool arm::isHardTPSupported(const llvm::Triple &Triple) {
+ int Ver = getARMSubArchVersionNumber(Triple);
+ llvm::ARM::ArchKind AK = llvm::ARM::parseArch(Triple.getArchName());
+ return Triple.isARM() || AK == llvm::ARM::ArchKind::ARMV6T2 ||
+ (Ver >= 7 && AK != llvm::ARM::ArchKind::ARMV8MBaseline);
+}
+
// Select mode for reading thread pointer (-mtp=soft/cp15).
-arm::ReadTPMode arm::getReadTPMode(const Driver &D, const ArgList &Args) {
+arm::ReadTPMode arm::getReadTPMode(const Driver &D, const ArgList &Args,
+ const llvm::Triple &Triple, bool ForAS) {
if (Arg *A = Args.getLastArg(options::OPT_mtp_mode_EQ)) {
arm::ReadTPMode ThreadPointer =
llvm::StringSwitch<arm::ReadTPMode>(A->getValue())
- .Case("cp15", ReadTPMode::Cp15)
+ .Case("cp15", ReadTPMode::TPIDRURO)
+ .Case("tpidrurw", ReadTPMode::TPIDRURW)
+ .Case("tpidruro", ReadTPMode::TPIDRURO)
+ .Case("tpidrprw", ReadTPMode::TPIDRPRW)
.Case("soft", ReadTPMode::Soft)
.Default(ReadTPMode::Invalid);
+ if ((ThreadPointer == ReadTPMode::TPIDRURW ||
+ ThreadPointer == ReadTPMode::TPIDRURO ||
+ ThreadPointer == ReadTPMode::TPIDRPRW) &&
+ !isHardTPSupported(Triple) && !ForAS) {
+ D.Diag(diag::err_target_unsupported_tp_hard) << Triple.getArchName();
+ return ReadTPMode::Invalid;
+ }
if (ThreadPointer != ReadTPMode::Invalid)
return ThreadPointer;
if (StringRef(A->getValue()).empty())
@@ -229,9 +285,9 @@ void arm::setArchNameInTriple(const Driver &D, const ArgList &Args,
// There is no assembler equivalent of -mno-thumb, -marm, or -mno-arm.
if (Value == "-mthumb")
IsThumb = true;
- else if (Value.startswith("-march="))
+ else if (Value.starts_with("-march="))
WaMArch = Value.substr(7);
- else if (Value.startswith("-mcpu="))
+ else if (Value.starts_with("-mcpu="))
WaMCPU = Value.substr(6);
}
}
@@ -257,6 +313,11 @@ void arm::setArchNameInTriple(const Driver &D, const ArgList &Args,
void arm::setFloatABIInTriple(const Driver &D, const ArgList &Args,
llvm::Triple &Triple) {
+ if (Triple.isOSLiteOS()) {
+ Triple.setEnvironment(llvm::Triple::OpenHOS);
+ return;
+ }
+
bool isHardFloat =
(arm::getARMFloatABI(D, Triple, Args) == arm::FloatABI::Hard);
@@ -276,6 +337,8 @@ void arm::setFloatABIInTriple(const Driver &D, const ArgList &Args,
Triple.setEnvironment(isHardFloat ? llvm::Triple::MuslEABIHF
: llvm::Triple::MuslEABI);
break;
+ case llvm::Triple::OpenHOS:
+ break;
default: {
arm::FloatABI DefaultABI = arm::getDefaultFloatABI(Triple);
if (DefaultABI != arm::FloatABI::Invalid &&
@@ -303,6 +366,8 @@ arm::FloatABI arm::getDefaultFloatABI(const llvm::Triple &Triple) {
case llvm::Triple::MacOSX:
case llvm::Triple::IOS:
case llvm::Triple::TvOS:
+ case llvm::Triple::DriverKit:
+ case llvm::Triple::XROS:
// Darwin defaults to "softfp" for v6 and v7.
if (Triple.isWatchABI())
return FloatABI::Hard;
@@ -314,6 +379,10 @@ arm::FloatABI arm::getDefaultFloatABI(const llvm::Triple &Triple) {
// FIXME: this is invalid for WindowsCE
case llvm::Triple::Win32:
+ // It is incorrect to select hard float ABI on MachO platforms if the ABI is
+ // "apcs-gnu".
+ if (Triple.isOSBinFormatMachO() && !useAAPCSForMachO(Triple))
+ return FloatABI::Soft;
return FloatABI::Hard;
case llvm::Triple::NetBSD:
@@ -336,10 +405,13 @@ arm::FloatABI arm::getDefaultFloatABI(const llvm::Triple &Triple) {
}
break;
+ case llvm::Triple::Haiku:
case llvm::Triple::OpenBSD:
return FloatABI::SoftFP;
default:
+ if (Triple.isOHOSFamily())
+ return FloatABI::Soft;
switch (Triple.getEnvironment()) {
case llvm::Triple::GNUEABIHF:
case llvm::Triple::MuslEABIHF:
@@ -412,15 +484,15 @@ static bool hasIntegerMVE(const std::vector<StringRef> &F) {
(NoMVE == F.rend() || std::distance(MVE, NoMVE) > 0);
}
-void arm::getARMTargetFeatures(const Driver &D, const llvm::Triple &Triple,
- const ArgList &Args, ArgStringList &CmdArgs,
- std::vector<StringRef> &Features, bool ForAS) {
+llvm::ARM::FPUKind arm::getARMTargetFeatures(const Driver &D,
+ const llvm::Triple &Triple,
+ const ArgList &Args,
+ std::vector<StringRef> &Features,
+ bool ForAS, bool ForMultilib) {
bool KernelOrKext =
Args.hasArg(options::OPT_mkernel, options::OPT_fapple_kext);
arm::FloatABI ABI = arm::getARMFloatABI(D, Triple, Args);
- arm::ReadTPMode ThreadPointer = arm::getReadTPMode(D, Args);
- llvm::Optional<std::pair<const Arg *, StringRef>> WaCPU, WaFPU, WaHDiv,
- WaArch;
+ std::optional<std::pair<const Arg *, StringRef>> WaCPU, WaFPU, WaHDiv, WaArch;
// This vector will accumulate features from the architecture
// extension suffixes on -mcpu and -march (e.g. the 'bar' in
@@ -457,28 +529,38 @@ void arm::getARMTargetFeatures(const Driver &D, const llvm::Triple &Triple,
// We use getValues here because you can have many options per -Wa
// We will keep the last one we find for each of these
for (StringRef Value : A->getValues()) {
- if (Value.startswith("-mfpu=")) {
+ if (Value.starts_with("-mfpu=")) {
WaFPU = std::make_pair(A, Value.substr(6));
- } else if (Value.startswith("-mcpu=")) {
+ } else if (Value.starts_with("-mcpu=")) {
WaCPU = std::make_pair(A, Value.substr(6));
- } else if (Value.startswith("-mhwdiv=")) {
+ } else if (Value.starts_with("-mhwdiv=")) {
WaHDiv = std::make_pair(A, Value.substr(8));
- } else if (Value.startswith("-march=")) {
+ } else if (Value.starts_with("-march=")) {
WaArch = std::make_pair(A, Value.substr(7));
}
}
}
+
+ // The integrated assembler doesn't implement e_flags setting behavior for
+ // -meabi=gnu (gcc -mabi={apcs-gnu,atpcs} passes -meabi=gnu to gas). For
+ // compatibility we accept but warn.
+ if (Arg *A = Args.getLastArgNoClaim(options::OPT_mabi_EQ))
+ A->ignoreTargetSpecific();
}
- if (ThreadPointer == arm::ReadTPMode::Cp15)
- Features.push_back("+read-tp-hard");
+ if (getReadTPMode(D, Args, Triple, ForAS) == ReadTPMode::TPIDRURW)
+ Features.push_back("+read-tp-tpidrurw");
+ if (getReadTPMode(D, Args, Triple, ForAS) == ReadTPMode::TPIDRURO)
+ Features.push_back("+read-tp-tpidruro");
+ if (getReadTPMode(D, Args, Triple, ForAS) == ReadTPMode::TPIDRPRW)
+ Features.push_back("+read-tp-tpidrprw");
const Arg *ArchArg = Args.getLastArg(options::OPT_march_EQ);
const Arg *CPUArg = Args.getLastArg(options::OPT_mcpu_EQ);
StringRef ArchName;
StringRef CPUName;
- unsigned ArchArgFPUID = llvm::ARM::FK_INVALID;
- unsigned CPUArgFPUID = llvm::ARM::FK_INVALID;
+ llvm::ARM::FPUKind ArchArgFPUKind = llvm::ARM::FK_INVALID;
+ llvm::ARM::FPUKind CPUArgFPUKind = llvm::ARM::FK_INVALID;
// Check -mcpu. ClangAs gives preference to -Wa,-mcpu=.
if (WaCPU) {
@@ -498,13 +580,13 @@ void arm::getARMTargetFeatures(const Driver &D, const llvm::Triple &Triple,
ArchName = WaArch->second;
// This will set any features after the base architecture.
checkARMArchName(D, WaArch->first, Args, ArchName, CPUName,
- ExtensionFeatures, Triple, ArchArgFPUID);
+ ExtensionFeatures, Triple, ArchArgFPUKind);
// The base architecture was handled in ToolChain::ComputeLLVMTriple because
// triple is read only by this point.
} else if (ArchArg) {
ArchName = ArchArg->getValue();
checkARMArchName(D, ArchArg, Args, ArchName, CPUName, ExtensionFeatures,
- Triple, ArchArgFPUID);
+ Triple, ArchArgFPUKind);
}
// Add CPU features for generic CPUs
@@ -524,9 +606,14 @@ void arm::getARMTargetFeatures(const Driver &D, const llvm::Triple &Triple,
if (CPUArg)
checkARMCPUName(D, CPUArg, Args, CPUName, ArchName, ExtensionFeatures,
- Triple, CPUArgFPUID);
+ Triple, CPUArgFPUKind);
+
+ // TODO Handle -mtune=. Suppress -Wunused-command-line-argument as a
+ // longstanding behavior.
+ (void)Args.getLastArg(options::OPT_mtune_EQ);
+
// Honor -mfpu=. ClangAs gives preference to -Wa,-mfpu=.
- unsigned FPUID = llvm::ARM::FK_INVALID;
+ llvm::ARM::FPUKind FPUKind = llvm::ARM::FK_INVALID;
const Arg *FPUArg = Args.getLastArg(options::OPT_mfpu_EQ);
if (WaFPU) {
if (FPUArg)
@@ -534,20 +621,25 @@ void arm::getARMTargetFeatures(const Driver &D, const llvm::Triple &Triple,
<< FPUArg->getAsString(Args);
(void)getARMFPUFeatures(D, WaFPU->first, Args, WaFPU->second, Features);
} else if (FPUArg) {
- FPUID = getARMFPUFeatures(D, FPUArg, Args, FPUArg->getValue(), Features);
+ FPUKind = getARMFPUFeatures(D, FPUArg, Args, FPUArg->getValue(), Features);
} else if (Triple.isAndroid() && getARMSubArchVersionNumber(Triple) >= 7) {
const char *AndroidFPU = "neon";
- FPUID = llvm::ARM::parseFPU(AndroidFPU);
- if (!llvm::ARM::getFPUFeatures(FPUID, Features))
+ FPUKind = llvm::ARM::parseFPU(AndroidFPU);
+ if (!llvm::ARM::getFPUFeatures(FPUKind, Features))
D.Diag(clang::diag::err_drv_clang_unsupported)
<< std::string("-mfpu=") + AndroidFPU;
+ } else if (ArchArgFPUKind != llvm::ARM::FK_INVALID ||
+ CPUArgFPUKind != llvm::ARM::FK_INVALID) {
+ FPUKind =
+ CPUArgFPUKind != llvm::ARM::FK_INVALID ? CPUArgFPUKind : ArchArgFPUKind;
+ (void)llvm::ARM::getFPUFeatures(FPUKind, Features);
} else {
if (!ForAS) {
std::string CPU = arm::getARMTargetCPU(CPUName, ArchName, Triple);
llvm::ARM::ArchKind ArchKind =
arm::getLLVMArchKindForARM(CPU, ArchName, Triple);
- FPUID = llvm::ARM::getDefaultFPU(CPU, ArchKind);
- (void)llvm::ARM::getFPUFeatures(FPUID, Features);
+ FPUKind = llvm::ARM::getDefaultFPU(CPU, ArchKind);
+ (void)llvm::ARM::getFPUFeatures(FPUKind, Features);
}
}
@@ -598,25 +690,30 @@ fp16_fml_fallthrough:
// -march/-mcpu effectively disables the FPU (GCC ignores the -mfpu options in
// this case). Note that the ABI can also be set implicitly by the target
// selected.
+ bool HasFPRegs = true;
if (ABI == arm::FloatABI::Soft) {
llvm::ARM::getFPUFeatures(llvm::ARM::FK_NONE, Features);
// Disable all features relating to hardware FP, not already disabled by the
// above call.
- Features.insert(Features.end(), {"-dotprod", "-fp16fml", "-bf16", "-mve",
- "-mve.fp", "-fpregs"});
- } else if (FPUID == llvm::ARM::FK_NONE ||
- ArchArgFPUID == llvm::ARM::FK_NONE ||
- CPUArgFPUID == llvm::ARM::FK_NONE) {
+ Features.insert(Features.end(),
+ {"-dotprod", "-fp16fml", "-bf16", "-mve", "-mve.fp"});
+ HasFPRegs = false;
+ FPUKind = llvm::ARM::FK_NONE;
+ } else if (FPUKind == llvm::ARM::FK_NONE ||
+ ArchArgFPUKind == llvm::ARM::FK_NONE ||
+ CPUArgFPUKind == llvm::ARM::FK_NONE) {
// -mfpu=none, -march=armvX+nofp or -mcpu=X+nofp is *very* similar to
// -mfloat-abi=soft, only that it should not disable MVE-I. They disable the
// FPU, but not the FPU registers, thus MVE-I, which depends only on the
// latter, is still supported.
Features.insert(Features.end(),
{"-dotprod", "-fp16fml", "-bf16", "-mve.fp"});
- if (!hasIntegerMVE(Features))
- Features.emplace_back("-fpregs");
+ HasFPRegs = hasIntegerMVE(Features);
+ FPUKind = llvm::ARM::FK_NONE;
}
+ if (!HasFPRegs)
+ Features.emplace_back("-fpregs");
// En/disable crc code generation.
if (Arg *A = Args.getLastArg(options::OPT_mcrc, options::OPT_mnocrc)) {
@@ -697,10 +794,41 @@ fp16_fml_fallthrough:
}
}
+ // Propagate frame-chain model selection
+ if (Arg *A = Args.getLastArg(options::OPT_mframe_chain)) {
+ StringRef FrameChainOption = A->getValue();
+ if (FrameChainOption.starts_with("aapcs"))
+ Features.push_back("+aapcs-frame-chain");
+ if (FrameChainOption == "aapcs+leaf")
+ Features.push_back("+aapcs-frame-chain-leaf");
+ }
+
// CMSE: Check for target 8M (for -mcmse to be applicable) is performed later.
if (Args.getLastArg(options::OPT_mcmse))
Features.push_back("+8msecext");
+ if (Arg *A = Args.getLastArg(options::OPT_mfix_cmse_cve_2021_35465,
+ options::OPT_mno_fix_cmse_cve_2021_35465)) {
+ if (!Args.getLastArg(options::OPT_mcmse))
+ D.Diag(diag::err_opt_not_valid_without_opt)
+ << A->getOption().getName() << "-mcmse";
+
+ if (A->getOption().matches(options::OPT_mfix_cmse_cve_2021_35465))
+ Features.push_back("+fix-cmse-cve-2021-35465");
+ else
+ Features.push_back("-fix-cmse-cve-2021-35465");
+ }
+
+ // This also handles the -m(no-)fix-cortex-a72-1655431 arguments via aliases.
+ if (Arg *A = Args.getLastArg(options::OPT_mfix_cortex_a57_aes_1742098,
+ options::OPT_mno_fix_cortex_a57_aes_1742098)) {
+ if (A->getOption().matches(options::OPT_mfix_cortex_a57_aes_1742098)) {
+ Features.push_back("+fix-cortex-a57-aes-1742098");
+ } else {
+ Features.push_back("-fix-cortex-a57-aes-1742098");
+ }
+ }
+
// Look for the last occurrence of -mlong-calls or -mno-long-calls. If
// neither options are specified, see if we are compiling for kernel/kext and
// decide whether to pass "+long-calls" based on the OS and its version.
@@ -709,39 +837,42 @@ fp16_fml_fallthrough:
if (A->getOption().matches(options::OPT_mlong_calls))
Features.push_back("+long-calls");
} else if (KernelOrKext && (!Triple.isiOS() || Triple.isOSVersionLT(6)) &&
- !Triple.isWatchOS()) {
- Features.push_back("+long-calls");
+ !Triple.isWatchOS() && !Triple.isXROS()) {
+ Features.push_back("+long-calls");
}
// Generate execute-only output (no data access to code sections).
// This only makes sense for the compiler, not for the assembler.
- if (!ForAS) {
+ // It's not needed for multilib selection and may hide an unused
+ // argument diagnostic if the code is always run.
+ if (!ForAS && !ForMultilib) {
// Supported only on ARMv6T2 and ARMv7 and above.
- // Cannot be combined with -mno-movt or -mlong-calls
+ // Cannot be combined with -mno-movt.
if (Arg *A = Args.getLastArg(options::OPT_mexecute_only, options::OPT_mno_execute_only)) {
if (A->getOption().matches(options::OPT_mexecute_only)) {
if (getARMSubArchVersionNumber(Triple) < 7 &&
- llvm::ARM::parseArch(Triple.getArchName()) != llvm::ARM::ArchKind::ARMV6T2)
+ llvm::ARM::parseArch(Triple.getArchName()) != llvm::ARM::ArchKind::ARMV6T2 &&
+ llvm::ARM::parseArch(Triple.getArchName()) != llvm::ARM::ArchKind::ARMV6M)
D.Diag(diag::err_target_unsupported_execute_only) << Triple.getArchName();
- else if (Arg *B = Args.getLastArg(options::OPT_mno_movt))
- D.Diag(diag::err_opt_not_valid_with_opt) << A->getAsString(Args) << B->getAsString(Args);
- // Long calls create constant pool entries and have not yet been fixed up
- // to play nicely with execute-only. Hence, they cannot be used in
- // execute-only code for now
- else if (Arg *B = Args.getLastArg(options::OPT_mlong_calls, options::OPT_mno_long_calls)) {
- if (B->getOption().matches(options::OPT_mlong_calls))
- D.Diag(diag::err_opt_not_valid_with_opt) << A->getAsString(Args) << B->getAsString(Args);
- }
+ else if (llvm::ARM::parseArch(Triple.getArchName()) == llvm::ARM::ArchKind::ARMV6M) {
+ if (Arg *PIArg = Args.getLastArg(options::OPT_fropi, options::OPT_frwpi,
+ options::OPT_fpic, options::OPT_fpie,
+ options::OPT_fPIC, options::OPT_fPIE))
+ D.Diag(diag::err_opt_not_valid_with_opt_on_target)
+ << A->getAsString(Args) << PIArg->getAsString(Args) << Triple.getArchName();
+ } else if (Arg *B = Args.getLastArg(options::OPT_mno_movt))
+ D.Diag(diag::err_opt_not_valid_with_opt)
+ << A->getAsString(Args) << B->getAsString(Args);
Features.push_back("+execute-only");
}
}
}
// Kernel code has more strict alignment requirements.
- if (KernelOrKext)
+ if (KernelOrKext) {
Features.push_back("+strict-align");
- else if (Arg *A = Args.getLastArg(options::OPT_mno_unaligned_access,
- options::OPT_munaligned_access)) {
+ } else if (Arg *A = Args.getLastArg(options::OPT_mno_unaligned_access,
+ options::OPT_munaligned_access)) {
if (A->getOption().matches(options::OPT_munaligned_access)) {
// No v6M core supports unaligned memory access (v6M ARM ARM A3.2).
if (Triple.getSubArch() == llvm::Triple::SubArchType::ARMSubArch_v6m)
@@ -763,7 +894,8 @@ fp16_fml_fallthrough:
// which raises an alignment fault on unaligned accesses. Linux
// defaults this bit to 0 and handles it as a system-wide (not
// per-process) setting. It is therefore safe to assume that ARMv7+
- // Linux targets support unaligned accesses. The same goes for NaCl.
+ // Linux targets support unaligned accesses. The same goes for NaCl
+ // and Windows.
//
// The above behavior is consistent with GCC.
int VersionNum = getARMSubArchVersionNumber(Triple);
@@ -771,7 +903,8 @@ fp16_fml_fallthrough:
if (VersionNum < 6 ||
Triple.getSubArch() == llvm::Triple::SubArchType::ARMSubArch_v6m)
Features.push_back("+strict-align");
- } else if (Triple.isOSLinux() || Triple.isOSNaCl()) {
+ } else if (Triple.isOSLinux() || Triple.isOSNaCl() ||
+ Triple.isOSWindows()) {
if (VersionNum < 7)
Features.push_back("+strict-align");
} else
@@ -823,8 +956,8 @@ fp16_fml_fallthrough:
DisableComdat = true;
continue;
}
- D.Diag(diag::err_invalid_sls_hardening)
- << Scope << A->getAsString(Args);
+ D.Diag(diag::err_drv_unsupported_option_argument)
+ << A->getSpelling() << Scope;
break;
}
}
@@ -843,9 +976,15 @@ fp16_fml_fallthrough:
}
}
+ if (Args.getLastArg(options::OPT_mno_bti_at_return_twice))
+ Features.push_back("+no-bti-at-return-twice");
+
+ checkARMFloatABI(D, Args, HasFPRegs);
+
+ return FPUKind;
}
-const std::string arm::getARMArch(StringRef Arch, const llvm::Triple &Triple) {
+std::string arm::getARMArch(StringRef Arch, const llvm::Triple &Triple) {
std::string MArch;
if (!Arch.empty())
MArch = std::string(Arch);
@@ -881,7 +1020,7 @@ StringRef arm::getARMCPUForMArch(StringRef Arch, const llvm::Triple &Triple) {
// We need to return an empty string here on invalid MArch values as the
// various places that call this function can't cope with a null result.
- return Triple.getARMCPUForArch(MArch);
+ return llvm::ARM::getARMCPUForArch(Triple, MArch);
}
/// getARMTargetCPU - Get the (LLVM) name of the ARM cpu we are targeting.
@@ -914,7 +1053,8 @@ llvm::ARM::ArchKind arm::getLLVMArchKindForARM(StringRef CPU, StringRef Arch,
if (ArchKind == llvm::ARM::ArchKind::INVALID)
// In case of generic Arch, i.e. "arm",
// extract arch from default cpu of the Triple
- ArchKind = llvm::ARM::parseCPUArch(Triple.getARMCPUForArch(ARMArch));
+ ArchKind =
+ llvm::ARM::parseCPUArch(llvm::ARM::getARMCPUForArch(Triple, ARMArch));
} else {
// FIXME: horrible hack to get around the fact that Cortex-A7 is only an
// armv7k triple if it's actually been specified via "-arch armv7k".
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.h
index 8e7c10ecd5d6..fa62ac89e3a1 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.h
@@ -11,9 +11,10 @@
#include "clang/Driver/ToolChain.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/Triple.h"
+#include "llvm/Option/ArgList.h"
#include "llvm/Option/Option.h"
-#include "llvm/Support/TargetParser.h"
+#include "llvm/TargetParser/ARMTargetParser.h"
+#include "llvm/TargetParser/Triple.h"
#include <string>
#include <vector>
@@ -24,7 +25,7 @@ namespace arm {
std::string getARMTargetCPU(StringRef CPU, llvm::StringRef Arch,
const llvm::Triple &Triple);
-const std::string getARMArch(llvm::StringRef Arch, const llvm::Triple &Triple);
+std::string getARMArch(llvm::StringRef Arch, const llvm::Triple &Triple);
StringRef getARMCPUForMArch(llvm::StringRef Arch, const llvm::Triple &Triple);
llvm::ARM::ArchKind getLLVMArchKindForARM(StringRef CPU, StringRef Arch,
const llvm::Triple &Triple);
@@ -37,7 +38,9 @@ void appendBE8LinkFlag(const llvm::opt::ArgList &Args,
enum class ReadTPMode {
Invalid,
Soft,
- Cp15,
+ TPIDRURW,
+ TPIDRURO,
+ TPIDRPRW,
};
enum class FloatABI {
@@ -53,7 +56,9 @@ FloatABI getARMFloatABI(const Driver &D, const llvm::Triple &Triple,
const llvm::opt::ArgList &Args);
void setFloatABIInTriple(const Driver &D, const llvm::opt::ArgList &Args,
llvm::Triple &triple);
-ReadTPMode getReadTPMode(const Driver &D, const llvm::opt::ArgList &Args);
+bool isHardTPSupported(const llvm::Triple &Triple);
+ReadTPMode getReadTPMode(const Driver &D, const llvm::opt::ArgList &Args,
+ const llvm::Triple &Triple, bool ForAS);
void setArchNameInTriple(const Driver &D, const llvm::opt::ArgList &Args,
types::ID InputType, llvm::Triple &Triple);
@@ -61,13 +66,15 @@ bool useAAPCSForMachO(const llvm::Triple &T);
void getARMArchCPUFromArgs(const llvm::opt::ArgList &Args,
llvm::StringRef &Arch, llvm::StringRef &CPU,
bool FromAs = false);
-void getARMTargetFeatures(const Driver &D, const llvm::Triple &Triple,
- const llvm::opt::ArgList &Args,
- llvm::opt::ArgStringList &CmdArgs,
- std::vector<llvm::StringRef> &Features, bool ForAS);
+llvm::ARM::FPUKind getARMTargetFeatures(const Driver &D,
+ const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args,
+ std::vector<llvm::StringRef> &Features,
+ bool ForAS, bool ForMultilib = false);
int getARMSubArchVersionNumber(const llvm::Triple &Triple);
bool isARMMProfile(const llvm::Triple &Triple);
bool isARMAProfile(const llvm::Triple &Triple);
+bool isARMBigEndian(const llvm::Triple &Triple, const llvm::opt::ArgList &Args);
} // end namespace arm
} // end namespace tools
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/CSKY.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/CSKY.cpp
new file mode 100644
index 000000000000..e94ea12f46dc
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/CSKY.cpp
@@ -0,0 +1,169 @@
+//===--- CSKY.cpp - CSKY Helpers for Tools --------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "CSKY.h"
+#include "ToolChains/CommonArgs.h"
+#include "clang/Basic/CharInfo.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/Options.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Option/ArgList.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/CSKYTargetParser.h"
+#include "llvm/TargetParser/Host.h"
+#include "llvm/TargetParser/TargetParser.h"
+
+using namespace clang::driver;
+using namespace clang::driver::tools;
+using namespace clang;
+using namespace llvm::opt;
+
+std::optional<llvm::StringRef>
+csky::getCSKYArchName(const Driver &D, const ArgList &Args,
+ const llvm::Triple &Triple) {
+ if (const Arg *A = Args.getLastArg(options::OPT_march_EQ)) {
+ llvm::CSKY::ArchKind ArchKind = llvm::CSKY::parseArch(A->getValue());
+
+ if (ArchKind == llvm::CSKY::ArchKind::INVALID) {
+ D.Diag(clang::diag::err_drv_invalid_arch_name) << A->getAsString(Args);
+ return std::nullopt;
+ }
+ return std::optional<llvm::StringRef>(A->getValue());
+ }
+
+ if (const Arg *A = Args.getLastArg(clang::driver::options::OPT_mcpu_EQ)) {
+ llvm::CSKY::ArchKind ArchKind = llvm::CSKY::parseCPUArch(A->getValue());
+ if (ArchKind == llvm::CSKY::ArchKind::INVALID) {
+ D.Diag(clang::diag::err_drv_clang_unsupported) << A->getAsString(Args);
+ return std::nullopt;
+ }
+ return std::optional<llvm::StringRef>(llvm::CSKY::getArchName(ArchKind));
+ }
+
+ return std::optional<llvm::StringRef>("ck810");
+}
+
+csky::FloatABI csky::getCSKYFloatABI(const Driver &D, const ArgList &Args) {
+ csky::FloatABI ABI = FloatABI::Soft;
+ if (Arg *A =
+ Args.getLastArg(options::OPT_msoft_float, options::OPT_mhard_float,
+ options::OPT_mfloat_abi_EQ)) {
+ if (A->getOption().matches(options::OPT_msoft_float)) {
+ ABI = FloatABI::Soft;
+ } else if (A->getOption().matches(options::OPT_mhard_float)) {
+ ABI = FloatABI::Hard;
+ } else {
+ ABI = llvm::StringSwitch<csky::FloatABI>(A->getValue())
+ .Case("soft", FloatABI::Soft)
+ .Case("softfp", FloatABI::SoftFP)
+ .Case("hard", FloatABI::Hard)
+ .Default(FloatABI::Invalid);
+ if (ABI == FloatABI::Invalid) {
+ D.Diag(diag::err_drv_invalid_mfloat_abi) << A->getAsString(Args);
+ ABI = FloatABI::Soft;
+ }
+ }
+ }
+
+ return ABI;
+}
+
+// Handle -mfpu=.
+static llvm::CSKY::CSKYFPUKind
+getCSKYFPUFeatures(const Driver &D, const Arg *A, const ArgList &Args,
+ StringRef FPU, std::vector<StringRef> &Features) {
+
+ llvm::CSKY::CSKYFPUKind FPUID =
+ llvm::StringSwitch<llvm::CSKY::CSKYFPUKind>(FPU)
+ .Case("auto", llvm::CSKY::FK_AUTO)
+ .Case("fpv2", llvm::CSKY::FK_FPV2)
+ .Case("fpv2_divd", llvm::CSKY::FK_FPV2_DIVD)
+ .Case("fpv2_sf", llvm::CSKY::FK_FPV2_SF)
+ .Case("fpv3", llvm::CSKY::FK_FPV3)
+ .Case("fpv3_hf", llvm::CSKY::FK_FPV3_HF)
+ .Case("fpv3_hsf", llvm::CSKY::FK_FPV3_HSF)
+ .Case("fpv3_sdf", llvm::CSKY::FK_FPV3_SDF)
+ .Default(llvm::CSKY::FK_INVALID);
+ if (FPUID == llvm::CSKY::FK_INVALID) {
+ D.Diag(clang::diag::err_drv_clang_unsupported) << A->getAsString(Args);
+ return llvm::CSKY::FK_INVALID;
+ }
+
+ auto RemoveTargetFPUFeature =
+ [&Features](ArrayRef<const char *> FPUFeatures) {
+ for (auto FPUFeature : FPUFeatures) {
+ auto it = llvm::find(Features, FPUFeature);
+ if (it != Features.end())
+ Features.erase(it);
+ }
+ };
+
+ RemoveTargetFPUFeature({"+fpuv2_sf", "+fpuv2_df", "+fdivdu", "+fpuv3_hi",
+ "+fpuv3_hf", "+fpuv3_sf", "+fpuv3_df"});
+
+ if (!llvm::CSKY::getFPUFeatures(FPUID, Features)) {
+ D.Diag(clang::diag::err_drv_clang_unsupported) << A->getAsString(Args);
+ return llvm::CSKY::FK_INVALID;
+ }
+
+ return FPUID;
+}
+
+void csky::getCSKYTargetFeatures(const Driver &D, const llvm::Triple &Triple,
+ const ArgList &Args, ArgStringList &CmdArgs,
+ std::vector<llvm::StringRef> &Features) {
+ llvm::StringRef archName;
+ llvm::StringRef cpuName;
+ llvm::CSKY::ArchKind ArchKind = llvm::CSKY::ArchKind::INVALID;
+ if (const Arg *A = Args.getLastArg(options::OPT_march_EQ)) {
+ ArchKind = llvm::CSKY::parseArch(A->getValue());
+ if (ArchKind == llvm::CSKY::ArchKind::INVALID) {
+ D.Diag(clang::diag::err_drv_invalid_arch_name) << A->getAsString(Args);
+ return;
+ }
+ archName = A->getValue();
+ }
+
+ if (const Arg *A = Args.getLastArg(clang::driver::options::OPT_mcpu_EQ)) {
+ llvm::CSKY::ArchKind Kind = llvm::CSKY::parseCPUArch(A->getValue());
+ if (Kind == llvm::CSKY::ArchKind::INVALID) {
+ D.Diag(clang::diag::err_drv_clang_unsupported) << A->getAsString(Args);
+ return;
+ }
+ if (!archName.empty() && Kind != ArchKind) {
+ D.Diag(clang::diag::err_drv_clang_unsupported) << A->getAsString(Args);
+ return;
+ }
+ cpuName = A->getValue();
+ if (archName.empty())
+ archName = llvm::CSKY::getArchName(Kind);
+ }
+
+ if (archName.empty() && cpuName.empty()) {
+ archName = "ck810";
+ cpuName = "ck810";
+ } else if (!archName.empty() && cpuName.empty()) {
+ cpuName = archName;
+ }
+
+ csky::FloatABI FloatABI = csky::getCSKYFloatABI(D, Args);
+
+ if (FloatABI == csky::FloatABI::Hard) {
+ Features.push_back("+hard-float-abi");
+ Features.push_back("+hard-float");
+ } else if (FloatABI == csky::FloatABI::SoftFP) {
+ Features.push_back("+hard-float");
+ }
+
+ uint64_t Extension = llvm::CSKY::getDefaultExtensions(cpuName);
+ llvm::CSKY::getExtensionFeatures(Extension, Features);
+
+ if (const Arg *FPUArg = Args.getLastArg(options::OPT_mfpu_EQ))
+ getCSKYFPUFeatures(D, FPUArg, Args, FPUArg->getValue(), Features);
+}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/CSKY.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/CSKY.h
new file mode 100644
index 000000000000..f3730d2cf4a1
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/CSKY.h
@@ -0,0 +1,47 @@
+//===--- CSKY.h - CSKY-specific Tool Helpers ------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_ARCH_CSKY_H
+#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_ARCH_CSKY_H
+
+#include "clang/Driver/Driver.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Option/ArgList.h"
+#include "llvm/Option/Option.h"
+#include <string>
+#include <vector>
+
+namespace clang {
+namespace driver {
+namespace tools {
+namespace csky {
+
+enum class FloatABI {
+ Invalid,
+ Soft,
+ SoftFP,
+ Hard,
+};
+
+FloatABI getCSKYFloatABI(const Driver &D, const llvm::opt::ArgList &Args);
+
+void getCSKYTargetFeatures(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs,
+ std::vector<llvm::StringRef> &Features);
+
+std::optional<llvm::StringRef> getCSKYArchName(const Driver &D,
+ const llvm::opt::ArgList &Args,
+ const llvm::Triple &Triple);
+
+} // end namespace csky
+} // namespace tools
+} // end namespace driver
+} // end namespace clang
+
+#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_ARCH_CSKY_H
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/LoongArch.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/LoongArch.cpp
new file mode 100644
index 000000000000..31153a67ad28
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/LoongArch.cpp
@@ -0,0 +1,232 @@
+//===--- LoongArch.cpp - LoongArch Helpers for Tools ------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "LoongArch.h"
+#include "ToolChains/CommonArgs.h"
+#include "clang/Basic/DiagnosticDriver.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/Options.h"
+#include "llvm/TargetParser/Host.h"
+#include "llvm/TargetParser/LoongArchTargetParser.h"
+
+using namespace clang::driver;
+using namespace clang::driver::tools;
+using namespace clang;
+using namespace llvm::opt;
+
+StringRef loongarch::getLoongArchABI(const Driver &D, const ArgList &Args,
+ const llvm::Triple &Triple) {
+ assert((Triple.getArch() == llvm::Triple::loongarch32 ||
+ Triple.getArch() == llvm::Triple::loongarch64) &&
+ "Unexpected triple");
+ bool IsLA32 = Triple.getArch() == llvm::Triple::loongarch32;
+
+ // Record -mabi value for later use.
+ const Arg *MABIArg = Args.getLastArg(options::OPT_mabi_EQ);
+ StringRef MABIValue;
+ if (MABIArg) {
+ MABIValue = MABIArg->getValue();
+ }
+
+ // Parse -mfpu value for later use.
+ const Arg *MFPUArg = Args.getLastArg(options::OPT_mfpu_EQ);
+ int FPU = -1;
+ if (MFPUArg) {
+ StringRef V = MFPUArg->getValue();
+ if (V == "64")
+ FPU = 64;
+ else if (V == "32")
+ FPU = 32;
+ else if (V == "0" || V == "none")
+ FPU = 0;
+ else
+ D.Diag(diag::err_drv_loongarch_invalid_mfpu_EQ) << V;
+ }
+
+ // Check -m*-float firstly since they have highest priority.
+ if (const Arg *A = Args.getLastArg(options::OPT_mdouble_float,
+ options::OPT_msingle_float,
+ options::OPT_msoft_float)) {
+ StringRef ImpliedABI;
+ int ImpliedFPU = -1;
+ if (A->getOption().matches(options::OPT_mdouble_float)) {
+ ImpliedABI = IsLA32 ? "ilp32d" : "lp64d";
+ ImpliedFPU = 64;
+ }
+ if (A->getOption().matches(options::OPT_msingle_float)) {
+ ImpliedABI = IsLA32 ? "ilp32f" : "lp64f";
+ ImpliedFPU = 32;
+ }
+ if (A->getOption().matches(options::OPT_msoft_float)) {
+ ImpliedABI = IsLA32 ? "ilp32s" : "lp64s";
+ ImpliedFPU = 0;
+ }
+
+ // Check `-mabi=` and `-mfpu=` settings and report if they conflict with
+ // the higher-priority settings implied by -m*-float.
+ //
+ // ImpliedABI and ImpliedFPU are guaranteed to have valid values because
+ // one of the match arms must match if execution can arrive here at all.
+ if (!MABIValue.empty() && ImpliedABI != MABIValue)
+ D.Diag(diag::warn_drv_loongarch_conflicting_implied_val)
+ << MABIArg->getAsString(Args) << A->getAsString(Args) << ImpliedABI;
+
+ if (FPU != -1 && ImpliedFPU != FPU)
+ D.Diag(diag::warn_drv_loongarch_conflicting_implied_val)
+ << MFPUArg->getAsString(Args) << A->getAsString(Args) << ImpliedFPU;
+
+ return ImpliedABI;
+ }
+
+ // If `-mabi=` is specified, use it.
+ if (!MABIValue.empty())
+ return MABIValue;
+
+ // Select abi based on -mfpu=xx.
+ switch (FPU) {
+ case 64:
+ return IsLA32 ? "ilp32d" : "lp64d";
+ case 32:
+ return IsLA32 ? "ilp32f" : "lp64f";
+ case 0:
+ return IsLA32 ? "ilp32s" : "lp64s";
+ }
+
+ // Choose a default based on the triple.
+ // Honor the explicit ABI modifier suffix in triple's environment part if
+ // present, falling back to {ILP32,LP64}D otherwise.
+ switch (Triple.getEnvironment()) {
+ case llvm::Triple::GNUSF:
+ return IsLA32 ? "ilp32s" : "lp64s";
+ case llvm::Triple::GNUF32:
+ return IsLA32 ? "ilp32f" : "lp64f";
+ case llvm::Triple::GNUF64:
+ // This was originally permitted (and indeed the canonical way) to
+ // represent the {ILP32,LP64}D ABIs, but in Feb 2023 Loongson decided to
+ // drop the explicit suffix in favor of unmarked `-gnu` for the
+ // "general-purpose" ABIs, among other non-technical reasons.
+ //
+ // The spec change did not mention whether existing usages of "gnuf64"
+ // shall remain valid or not, so we are going to continue recognizing it
+ // for some time, until it is clear that everyone else has migrated away
+ // from it.
+ [[fallthrough]];
+ case llvm::Triple::GNU:
+ default:
+ return IsLA32 ? "ilp32d" : "lp64d";
+ }
+}
+
+void loongarch::getLoongArchTargetFeatures(const Driver &D,
+ const llvm::Triple &Triple,
+ const ArgList &Args,
+ std::vector<StringRef> &Features) {
+ std::string ArchName;
+ if (const Arg *A = Args.getLastArg(options::OPT_march_EQ))
+ ArchName = A->getValue();
+ ArchName = postProcessTargetCPUString(ArchName, Triple);
+ llvm::LoongArch::getArchFeatures(ArchName, Features);
+
+ // Select floating-point features determined by -mdouble-float,
+ // -msingle-float, -msoft-float and -mfpu.
+ // Note: -m*-float wins any other options.
+ if (const Arg *A = Args.getLastArg(options::OPT_mdouble_float,
+ options::OPT_msingle_float,
+ options::OPT_msoft_float)) {
+ if (A->getOption().matches(options::OPT_mdouble_float)) {
+ Features.push_back("+f");
+ Features.push_back("+d");
+ } else if (A->getOption().matches(options::OPT_msingle_float)) {
+ Features.push_back("+f");
+ Features.push_back("-d");
+ } else /*Soft-float*/ {
+ Features.push_back("-f");
+ Features.push_back("-d");
+ }
+ } else if (const Arg *A = Args.getLastArg(options::OPT_mfpu_EQ)) {
+ StringRef FPU = A->getValue();
+ if (FPU == "64") {
+ Features.push_back("+f");
+ Features.push_back("+d");
+ } else if (FPU == "32") {
+ Features.push_back("+f");
+ Features.push_back("-d");
+ } else if (FPU == "0" || FPU == "none") {
+ Features.push_back("-f");
+ Features.push_back("-d");
+ } else {
+ D.Diag(diag::err_drv_loongarch_invalid_mfpu_EQ) << FPU;
+ }
+ }
+
+ // Select the `ual` feature determined by -m[no-]unaligned-access
+ // or the alias -m[no-]strict-align.
+ AddTargetFeature(Args, Features, options::OPT_munaligned_access,
+ options::OPT_mno_unaligned_access, "ual");
+
+ // Accept but warn about these TargetSpecific options.
+ if (Arg *A = Args.getLastArgNoClaim(options::OPT_mabi_EQ))
+ A->ignoreTargetSpecific();
+ if (Arg *A = Args.getLastArgNoClaim(options::OPT_mfpu_EQ))
+ A->ignoreTargetSpecific();
+
+ // Select lsx feature determined by -m[no-]lsx.
+ if (const Arg *A = Args.getLastArg(options::OPT_mlsx, options::OPT_mno_lsx)) {
+ // LSX depends on 64-bit FPU.
+ // -m*-float and -mfpu=none/0/32 conflict with -mlsx.
+ if (A->getOption().matches(options::OPT_mlsx)) {
+ if (llvm::find(Features, "-d") != Features.end())
+ D.Diag(diag::err_drv_loongarch_wrong_fpu_width_for_lsx);
+ else /*-mlsx*/
+ Features.push_back("+lsx");
+ } else /*-mno-lsx*/ {
+ Features.push_back("-lsx");
+ }
+ }
+
+ // Select lasx feature determined by -m[no-]lasx.
+ if (const Arg *A =
+ Args.getLastArg(options::OPT_mlasx, options::OPT_mno_lasx)) {
+ // LASX depends on 64-bit FPU and LSX.
+ // -mno-lsx conflicts with -mlasx.
+ if (A->getOption().matches(options::OPT_mlasx)) {
+ if (llvm::find(Features, "-d") != Features.end())
+ D.Diag(diag::err_drv_loongarch_wrong_fpu_width_for_lasx);
+ else if (llvm::find(Features, "-lsx") != Features.end())
+ D.Diag(diag::err_drv_loongarch_invalid_simd_option_combination);
+ else { /*-mlasx*/
+ Features.push_back("+lsx");
+ Features.push_back("+lasx");
+ }
+ } else /*-mno-lasx*/
+ Features.push_back("-lasx");
+ }
+}
+
+std::string loongarch::postProcessTargetCPUString(const std::string &CPU,
+ const llvm::Triple &Triple) {
+ std::string CPUString = CPU;
+ if (CPUString == "native") {
+ CPUString = llvm::sys::getHostCPUName();
+ if (CPUString == "generic")
+ CPUString = llvm::LoongArch::getDefaultArch(Triple.isLoongArch64());
+ }
+ if (CPUString.empty())
+ CPUString = llvm::LoongArch::getDefaultArch(Triple.isLoongArch64());
+ return CPUString;
+}
+
+std::string loongarch::getLoongArchTargetCPU(const llvm::opt::ArgList &Args,
+ const llvm::Triple &Triple) {
+ std::string CPU;
+ // If we have -march, use that.
+ if (const Arg *A = Args.getLastArg(options::OPT_march_EQ))
+ CPU = A->getValue();
+ return postProcessTargetCPUString(CPU, Triple);
+}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/LoongArch.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/LoongArch.h
new file mode 100644
index 000000000000..d8280cd836f8
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/LoongArch.h
@@ -0,0 +1,37 @@
+//===--- LoongArch.h - LoongArch-specific Tool Helpers ----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_ARCH_LOONGARCH_H
+#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_ARCH_LOONGARCH_H
+
+#include "clang/Driver/Driver.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Option/Option.h"
+
+namespace clang {
+namespace driver {
+namespace tools {
+namespace loongarch {
+void getLoongArchTargetFeatures(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args,
+ std::vector<llvm::StringRef> &Features);
+
+StringRef getLoongArchABI(const Driver &D, const llvm::opt::ArgList &Args,
+ const llvm::Triple &Triple);
+
+std::string postProcessTargetCPUString(const std::string &CPU,
+ const llvm::Triple &Triple);
+
+std::string getLoongArchTargetCPU(const llvm::opt::ArgList &Args,
+ const llvm::Triple &Triple);
+} // end namespace loongarch
+} // end namespace tools
+} // end namespace driver
+} // end namespace clang
+
+#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_ARCH_LOONGARCH_H
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/M68k.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/M68k.cpp
index 119e24cedbab..963f7a187d63 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/M68k.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/M68k.cpp
@@ -14,8 +14,8 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Option/ArgList.h"
-#include "llvm/Support/Host.h"
#include "llvm/Support/Regex.h"
+#include "llvm/TargetParser/Host.h"
#include <sstream>
using namespace clang::driver;
@@ -65,13 +65,35 @@ std::string m68k::getM68kTargetCPU(const ArgList &Args) {
return "";
}
+static void addFloatABIFeatures(const llvm::opt::ArgList &Args,
+ std::vector<llvm::StringRef> &Features) {
+ Arg *A = Args.getLastArg(options::OPT_msoft_float, options::OPT_mhard_float,
+ options::OPT_m68881);
+ // Opt out FPU even for newer CPUs.
+ if (A && A->getOption().matches(options::OPT_msoft_float)) {
+ Features.push_back("-isa-68881");
+ Features.push_back("-isa-68882");
+ return;
+ }
+
+ std::string CPU = m68k::getM68kTargetCPU(Args);
+ // Only enable M68881 for CPU < 68020 if the related flags are present.
+ if ((A && (CPU == "M68000" || CPU == "M68010")) ||
+ // Otherwise, by default we assume newer CPUs have M68881/2.
+ CPU == "M68020")
+ Features.push_back("+isa-68881");
+ else if (CPU == "M68030" || CPU == "M68040" || CPU == "M68060")
+ // Note that although CPU >= M68040 imply M68882, we still add `isa-68882`
+ // anyway so that it's easier to add or not add the corresponding macro
+ // definitions later, in case we want to disable 68881/2 in newer CPUs
+ // (with -msoft-float, for instance).
+ Features.push_back("+isa-68882");
+}
+
void m68k::getM68kTargetFeatures(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args,
std::vector<StringRef> &Features) {
-
- m68k::FloatABI FloatABI = m68k::getM68kFloatABI(D, Args);
- if (FloatABI == m68k::FloatABI::Soft)
- Features.push_back("-hard-float");
+ addFloatABIFeatures(Args, Features);
// Handle '-ffixed-<register>' flags
if (Args.hasArg(options::OPT_ffixed_a0))
@@ -105,21 +127,3 @@ void m68k::getM68kTargetFeatures(const Driver &D, const llvm::Triple &Triple,
if (Args.hasArg(options::OPT_ffixed_d7))
Features.push_back("+reserve-d7");
}
-
-m68k::FloatABI m68k::getM68kFloatABI(const Driver &D, const ArgList &Args) {
- m68k::FloatABI ABI = m68k::FloatABI::Invalid;
- if (Arg *A =
- Args.getLastArg(options::OPT_msoft_float, options::OPT_mhard_float)) {
-
- if (A->getOption().matches(options::OPT_msoft_float))
- ABI = m68k::FloatABI::Soft;
- else if (A->getOption().matches(options::OPT_mhard_float))
- ABI = m68k::FloatABI::Hard;
- }
-
- // If unspecified, choose the default based on the platform.
- if (ABI == m68k::FloatABI::Invalid)
- ABI = m68k::FloatABI::Hard;
-
- return ABI;
-}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/M68k.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/M68k.h
index 41d53efb940b..051e7e1af103 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/M68k.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/M68k.h
@@ -20,14 +20,6 @@ namespace driver {
namespace tools {
namespace m68k {
-enum class FloatABI {
- Invalid,
- Soft,
- Hard,
-};
-
-FloatABI getM68kFloatABI(const Driver &D, const llvm::opt::ArgList &Args);
-
std::string getM68kTargetCPU(const llvm::opt::ArgList &Args);
void getM68kTargetFeatures(const Driver &D, const llvm::Triple &Triple,
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/Mips.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/Mips.cpp
index 5a509dbb2bd3..fe9d112b8800 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/Mips.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/Mips.cpp
@@ -39,12 +39,6 @@ void mips::getMipsCPUAndABI(const ArgList &Args, const llvm::Triple &Triple,
DefMips64CPU = "mips64r6";
}
- // MIPS64r6 is the default for Android MIPS64 (mips64el-linux-android).
- if (Triple.isAndroid()) {
- DefMips32CPU = "mips32";
- DefMips64CPU = "mips64r6";
- }
-
// MIPS3 is the default for mips64*-unknown-openbsd.
if (Triple.isOSOpenBSD())
DefMips64CPU = "mips3";
@@ -227,6 +221,7 @@ void mips::getMIPSTargetFeatures(const Driver &D, const llvm::Triple &Triple,
bool IsN64 = ABIName == "64";
bool IsPIC = false;
bool NonPIC = false;
+ bool HasNaN2008Opt = false;
Arg *LastPICArg = Args.getLastArg(options::OPT_fPIC, options::OPT_fno_PIC,
options::OPT_fpic, options::OPT_fno_pic,
@@ -291,9 +286,10 @@ void mips::getMIPSTargetFeatures(const Driver &D, const llvm::Triple &Triple,
if (Arg *A = Args.getLastArg(options::OPT_mnan_EQ)) {
StringRef Val = StringRef(A->getValue());
if (Val == "2008") {
- if (mips::getIEEE754Standard(CPUName) & mips::Std2008)
+ if (mips::getIEEE754Standard(CPUName) & mips::Std2008) {
Features.push_back("+nan2008");
- else {
+ HasNaN2008Opt = true;
+ } else {
Features.push_back("-nan2008");
D.Diag(diag::warn_target_unsupported_nan2008) << CPUName;
}
@@ -306,7 +302,7 @@ void mips::getMIPSTargetFeatures(const Driver &D, const llvm::Triple &Triple,
}
} else
D.Diag(diag::err_drv_unsupported_option_argument)
- << A->getOption().getName() << Val;
+ << A->getSpelling() << Val;
}
if (Arg *A = Args.getLastArg(options::OPT_mabs_EQ)) {
@@ -327,8 +323,10 @@ void mips::getMIPSTargetFeatures(const Driver &D, const llvm::Triple &Triple,
}
} else {
D.Diag(diag::err_drv_unsupported_option_argument)
- << A->getOption().getName() << Val;
+ << A->getSpelling() << Val;
}
+ } else if (HasNaN2008Opt) {
+ Features.push_back("+abs2008");
}
AddTargetFeature(Args, Features, options::OPT_msingle_float,
@@ -441,7 +439,8 @@ bool mips::isUCLibc(const ArgList &Args) {
return A && A->getOption().matches(options::OPT_muclibc);
}
-bool mips::isNaN2008(const ArgList &Args, const llvm::Triple &Triple) {
+bool mips::isNaN2008(const Driver &D, const ArgList &Args,
+ const llvm::Triple &Triple) {
if (Arg *NaNArg = Args.getLastArg(options::OPT_mnan_EQ))
return llvm::StringSwitch<bool>(NaNArg->getValue())
.Case("2008", true)
@@ -449,7 +448,7 @@ bool mips::isNaN2008(const ArgList &Args, const llvm::Triple &Triple) {
.Default(false);
// NaN2008 is the default for MIPS32r6/MIPS64r6.
- return llvm::StringSwitch<bool>(getCPUName(Args, Triple))
+ return llvm::StringSwitch<bool>(getCPUName(D, Args, Triple))
.Cases("mips32r6", "mips64r6", true)
.Default(false);
}
@@ -466,11 +465,6 @@ bool mips::isFP64ADefault(const llvm::Triple &Triple, StringRef CPUName) {
bool mips::isFPXXDefault(const llvm::Triple &Triple, StringRef CPUName,
StringRef ABIName, mips::FloatABI FloatABI) {
- if (Triple.getVendor() != llvm::Triple::ImaginationTechnologies &&
- Triple.getVendor() != llvm::Triple::MipsTechnologies &&
- !Triple.isAndroid())
- return false;
-
if (ABIName != "32")
return false;
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/Mips.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/Mips.h
index 074012f40fe5..62211c711420 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/Mips.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/Mips.h
@@ -11,8 +11,8 @@
#include "clang/Driver/Driver.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Option/Option.h"
+#include "llvm/TargetParser/Triple.h"
#include <string>
#include <vector>
@@ -44,7 +44,8 @@ std::string getMipsABILibSuffix(const llvm::opt::ArgList &Args,
const llvm::Triple &Triple);
bool hasMipsAbiArg(const llvm::opt::ArgList &Args, const char *Value);
bool isUCLibc(const llvm::opt::ArgList &Args);
-bool isNaN2008(const llvm::opt::ArgList &Args, const llvm::Triple &Triple);
+bool isNaN2008(const Driver &D, const llvm::opt::ArgList &Args,
+ const llvm::Triple &Triple);
bool isFP64ADefault(const llvm::Triple &Triple, StringRef CPUName);
bool isFPXXDefault(const llvm::Triple &Triple, StringRef CPUName,
StringRef ABIName, mips::FloatABI FloatABI);
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/PPC.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/PPC.cpp
index bcaecf4b2d98..ab24d14992cd 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/PPC.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/PPC.cpp
@@ -13,81 +13,83 @@
#include "clang/Driver/Options.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Option/ArgList.h"
-#include "llvm/Support/Host.h"
+#include "llvm/TargetParser/Host.h"
using namespace clang::driver;
using namespace clang::driver::tools;
using namespace clang;
using namespace llvm::opt;
-/// getPPCTargetCPU - Get the (LLVM) name of the PowerPC cpu we are targeting.
-std::string ppc::getPPCTargetCPU(const ArgList &Args) {
- if (Arg *A = Args.getLastArg(clang::driver::options::OPT_mcpu_EQ)) {
- StringRef CPUName = A->getValue();
-
- if (CPUName == "native") {
- std::string CPU = std::string(llvm::sys::getHostCPUName());
- if (!CPU.empty() && CPU != "generic")
- return CPU;
- else
- return "";
- }
+static std::string getPPCGenericTargetCPU(const llvm::Triple &T) {
+ // LLVM may default to generating code for the native CPU,
+ // but, like gcc, we default to a more generic option for
+ // each architecture. (except on AIX)
+ if (T.isOSAIX())
+ return "pwr7";
+ else if (T.getArch() == llvm::Triple::ppc64le)
+ return "ppc64le";
+ else if (T.getArch() == llvm::Triple::ppc64)
+ return "ppc64";
+ else
+ return "ppc";
+}
- return llvm::StringSwitch<const char *>(CPUName)
- .Case("common", "generic")
- .Case("440", "440")
- .Case("440fp", "440")
- .Case("450", "450")
- .Case("601", "601")
- .Case("602", "602")
- .Case("603", "603")
- .Case("603e", "603e")
- .Case("603ev", "603ev")
- .Case("604", "604")
- .Case("604e", "604e")
- .Case("620", "620")
- .Case("630", "pwr3")
- .Case("G3", "g3")
- .Case("7400", "7400")
- .Case("G4", "g4")
- .Case("7450", "7450")
- .Case("G4+", "g4+")
- .Case("750", "750")
- .Case("8548", "e500")
- .Case("970", "970")
- .Case("G5", "g5")
- .Case("a2", "a2")
- .Case("e500", "e500")
- .Case("e500mc", "e500mc")
- .Case("e5500", "e5500")
- .Case("power3", "pwr3")
- .Case("power4", "pwr4")
- .Case("power5", "pwr5")
- .Case("power5x", "pwr5x")
- .Case("power6", "pwr6")
- .Case("power6x", "pwr6x")
- .Case("power7", "pwr7")
- .Case("power8", "pwr8")
- .Case("power9", "pwr9")
- .Case("power10", "pwr10")
- .Case("future", "future")
- .Case("pwr3", "pwr3")
- .Case("pwr4", "pwr4")
- .Case("pwr5", "pwr5")
- .Case("pwr5x", "pwr5x")
- .Case("pwr6", "pwr6")
- .Case("pwr6x", "pwr6x")
- .Case("pwr7", "pwr7")
- .Case("pwr8", "pwr8")
- .Case("pwr9", "pwr9")
- .Case("pwr10", "pwr10")
- .Case("powerpc", "ppc")
- .Case("powerpc64", "ppc64")
- .Case("powerpc64le", "ppc64le")
- .Default("");
+static std::string normalizeCPUName(StringRef CPUName, const llvm::Triple &T) {
+ // Clang/LLVM does not actually support code generation
+ // for the 405 CPU. However, there are uses of this CPU ID
+ // in projects that previously used GCC and rely on Clang
+ // accepting it. Clang has always ignored it and passed the
+ // generic CPU ID to the back end.
+ if (CPUName == "generic" || CPUName == "405")
+ return getPPCGenericTargetCPU(T);
+
+ if (CPUName == "native") {
+ std::string CPU = std::string(llvm::sys::getHostCPUName());
+ if (!CPU.empty() && CPU != "generic")
+ return CPU;
+ else
+ return getPPCGenericTargetCPU(T);
}
- return "";
+ return llvm::StringSwitch<const char *>(CPUName)
+ .Case("common", "generic")
+ .Case("440fp", "440")
+ .Case("630", "pwr3")
+ .Case("G3", "g3")
+ .Case("G4", "g4")
+ .Case("G4+", "g4+")
+ .Case("8548", "e500")
+ .Case("G5", "g5")
+ .Case("power3", "pwr3")
+ .Case("power4", "pwr4")
+ .Case("power5", "pwr5")
+ .Case("power5x", "pwr5x")
+ .Case("power6", "pwr6")
+ .Case("power6x", "pwr6x")
+ .Case("power7", "pwr7")
+ .Case("power8", "pwr8")
+ .Case("power9", "pwr9")
+ .Case("power10", "pwr10")
+ .Case("future", "future")
+ .Case("powerpc", "ppc")
+ .Case("powerpc64", "ppc64")
+ .Case("powerpc64le", "ppc64le")
+ .Default(CPUName.data());
+}
+
+/// Get the (LLVM) name of the PowerPC cpu we are tuning for.
+std::string ppc::getPPCTuneCPU(const ArgList &Args, const llvm::Triple &T) {
+ if (Arg *A = Args.getLastArg(clang::driver::options::OPT_mtune_EQ))
+ return normalizeCPUName(A->getValue(), T);
+ return getPPCGenericTargetCPU(T);
+}
+
+/// Get the (LLVM) name of the PowerPC cpu we are targeting.
+std::string ppc::getPPCTargetCPU(const Driver &D, const ArgList &Args,
+ const llvm::Triple &T) {
+ if (Arg *A = Args.getLastArg(clang::driver::options::OPT_mcpu_EQ))
+ return normalizeCPUName(A->getValue(), T);
+ return getPPCGenericTargetCPU(T);
}
const char *ppc::getPPCAsmModeForCPU(StringRef Name) {
@@ -110,7 +112,8 @@ void ppc::getPPCTargetFeatures(const Driver &D, const llvm::Triple &Triple,
if (Triple.getSubArch() == llvm::Triple::PPCSubArch_spe)
Features.push_back("+spe");
- handleTargetFeaturesGroup(Args, Features, options::OPT_m_ppc_Features_Group);
+ handleTargetFeaturesGroup(D, Triple, Args, Features,
+ options::OPT_m_ppc_Features_Group);
ppc::FloatABI FloatABI = ppc::getPPCFloatABI(D, Args);
if (FloatABI == ppc::FloatABI::Soft)
@@ -125,8 +128,7 @@ ppc::ReadGOTPtrMode ppc::getPPCReadGOTPtrMode(const Driver &D, const llvm::Tripl
const ArgList &Args) {
if (Args.getLastArg(options::OPT_msecure_plt))
return ppc::ReadGOTPtrMode::SecurePlt;
- if ((Triple.isOSFreeBSD() && Triple.getOSMajorVersion() >= 13) ||
- Triple.isOSNetBSD() || Triple.isOSOpenBSD() || Triple.isMusl())
+ if (Triple.isPPC32SecurePlt())
return ppc::ReadGOTPtrMode::SecurePlt;
else
return ppc::ReadGOTPtrMode::Bss;
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/PPC.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/PPC.h
index e1c943955e81..ec5b3c8140b6 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/PPC.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/PPC.h
@@ -35,7 +35,10 @@ enum class ReadGOTPtrMode {
FloatABI getPPCFloatABI(const Driver &D, const llvm::opt::ArgList &Args);
-std::string getPPCTargetCPU(const llvm::opt::ArgList &Args);
+std::string getPPCTargetCPU(const Driver &D, const llvm::opt::ArgList &Args,
+ const llvm::Triple &T);
+std::string getPPCTuneCPU(const llvm::opt::ArgList &Args,
+ const llvm::Triple &T);
const char *getPPCAsmModeForCPU(StringRef Name);
ReadGOTPtrMode getPPCReadGOTPtrMode(const Driver &D, const llvm::Triple &Triple,
const llvm::opt::ArgList &Args);
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/RISCV.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/RISCV.cpp
index 2c2404acc54d..47b29e1577c2 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/RISCV.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/RISCV.cpp
@@ -1,4 +1,4 @@
-//===--- RISCV.cpp - RISCV Helpers for Tools --------------------*- C++ -*-===//
+//===--- RISCV.cpp - RISC-V Helpers for Tools -------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -7,476 +7,69 @@
//===----------------------------------------------------------------------===//
#include "RISCV.h"
+#include "../Clang.h"
+#include "ToolChains/CommonArgs.h"
#include "clang/Basic/CharInfo.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Options.h"
#include "llvm/Option/ArgList.h"
-#include "llvm/ADT/Optional.h"
-#include "llvm/Support/TargetParser.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/RISCVISAInfo.h"
#include "llvm/Support/raw_ostream.h"
-#include "ToolChains/CommonArgs.h"
+#include "llvm/TargetParser/Host.h"
+#include "llvm/TargetParser/RISCVTargetParser.h"
using namespace clang::driver;
using namespace clang::driver::tools;
using namespace clang;
using namespace llvm::opt;
-namespace {
-// Represents the major and version number components of a RISC-V extension
-struct RISCVExtensionVersion {
- StringRef Major;
- StringRef Minor;
-};
-} // end anonymous namespace
-
-static StringRef getExtensionTypeDesc(StringRef Ext) {
- if (Ext.startswith("sx"))
- return "non-standard supervisor-level extension";
- if (Ext.startswith("s"))
- return "standard supervisor-level extension";
- if (Ext.startswith("x"))
- return "non-standard user-level extension";
- if (Ext.startswith("z"))
- return "standard user-level extension";
- return StringRef();
-}
-
-static StringRef getExtensionType(StringRef Ext) {
- if (Ext.startswith("sx"))
- return "sx";
- if (Ext.startswith("s"))
- return "s";
- if (Ext.startswith("x"))
- return "x";
- if (Ext.startswith("z"))
- return "z";
- return StringRef();
-}
-
-// If the extension is supported as experimental, return the version of that
-// extension that the compiler currently supports.
-static Optional<RISCVExtensionVersion>
-isExperimentalExtension(StringRef Ext) {
- if (Ext == "b" || Ext == "zba" || Ext == "zbb" || Ext == "zbc" ||
- Ext == "zbe" || Ext == "zbf" || Ext == "zbm" || Ext == "zbp" ||
- Ext == "zbr" || Ext == "zbs" || Ext == "zbt" || Ext == "zbproposedc")
- return RISCVExtensionVersion{"0", "93"};
- if (Ext == "v" || Ext == "zvamo" || Ext == "zvlsseg")
- return RISCVExtensionVersion{"0", "10"};
- if (Ext == "zfh")
- return RISCVExtensionVersion{"0", "1"};
- return None;
-}
-
-static bool isSupportedExtension(StringRef Ext) {
- // LLVM supports "z" extensions which are marked as experimental.
- if (isExperimentalExtension(Ext))
- return true;
-
- // LLVM does not support "sx", "s" nor "x" extensions.
- return false;
-}
-
-// Extensions may have a version number, and may be separated by
-// an underscore '_' e.g.: rv32i2_m2.
-// Version number is divided into major and minor version numbers,
-// separated by a 'p'. If the minor version is 0 then 'p0' can be
-// omitted from the version string. E.g., rv32i2p0, rv32i2, rv32i2p1.
-static bool getExtensionVersion(const Driver &D, const ArgList &Args,
- StringRef MArch, StringRef Ext, StringRef In,
- std::string &Major, std::string &Minor) {
- Major = std::string(In.take_while(isDigit));
- In = In.substr(Major.size());
-
- if (Major.size() && In.consume_front("p")) {
- Minor = std::string(In.take_while(isDigit));
- In = In.substr(Major.size() + 1);
-
- // Expected 'p' to be followed by minor version number.
- if (Minor.empty()) {
- std::string Error =
- "minor version number missing after 'p' for extension";
- D.Diag(diag::err_drv_invalid_riscv_ext_arch_name)
- << MArch << Error << Ext;
- return false;
- }
- }
-
- // Expected multi-character extension with version number to have no
- // subsequent characters (i.e. must either end string or be followed by
- // an underscore).
- if (Ext.size() > 1 && In.size()) {
- std::string Error =
- "multi-character extensions must be separated by underscores";
- D.Diag(diag::err_drv_invalid_riscv_ext_arch_name) << MArch << Error << In;
- return false;
- }
-
- // If experimental extension, require use of current version number number
- if (auto ExperimentalExtension = isExperimentalExtension(Ext)) {
- if (!Args.hasArg(options::OPT_menable_experimental_extensions)) {
- std::string Error =
- "requires '-menable-experimental-extensions' for experimental extension";
- D.Diag(diag::err_drv_invalid_riscv_ext_arch_name)
- << MArch << Error << Ext;
- return false;
- } else if (Major.empty() && Minor.empty()) {
- std::string Error =
- "experimental extension requires explicit version number";
- D.Diag(diag::err_drv_invalid_riscv_ext_arch_name)
- << MArch << Error << Ext;
- return false;
- }
- auto SupportedVers = *ExperimentalExtension;
- if (Major != SupportedVers.Major || Minor != SupportedVers.Minor) {
- std::string Error =
- "unsupported version number " + Major;
- if (!Minor.empty())
- Error += "." + Minor;
- Error += " for experimental extension (this compiler supports "
- + SupportedVers.Major.str() + "."
- + SupportedVers.Minor.str() + ")";
-
- D.Diag(diag::err_drv_invalid_riscv_ext_arch_name)
- << MArch << Error << Ext;
- return false;
- }
- return true;
- }
-
- // Allow extensions to declare no version number
- if (Major.empty() && Minor.empty())
- return true;
-
- // TODO: Handle supported extensions with version number.
- std::string Error = "unsupported version number " + Major;
- if (!Minor.empty())
- Error += "." + Minor;
- Error += " for extension";
- D.Diag(diag::err_drv_invalid_riscv_ext_arch_name) << MArch << Error << Ext;
-
- return false;
-}
-
-// Handle other types of extensions other than the standard
-// general purpose and standard user-level extensions.
-// Parse the ISA string containing non-standard user-level
-// extensions, standard supervisor-level extensions and
-// non-standard supervisor-level extensions.
-// These extensions start with 'z', 'x', 's', 'sx' prefixes, follow a
-// canonical order, might have a version number (major, minor)
-// and are separated by a single underscore '_'.
-// Set the hardware features for the extensions that are supported.
-static void getExtensionFeatures(const Driver &D,
- const ArgList &Args,
- std::vector<StringRef> &Features,
- StringRef &MArch, StringRef &Exts) {
- if (Exts.empty())
- return;
-
- // Multi-letter extensions are seperated by a single underscore
- // as described in RISC-V User-Level ISA V2.2.
- SmallVector<StringRef, 8> Split;
- Exts.split(Split, StringRef("_"));
-
- SmallVector<StringRef, 4> Prefix{"z", "x", "s", "sx"};
- auto I = Prefix.begin();
- auto E = Prefix.end();
-
- SmallVector<StringRef, 8> AllExts;
-
- for (StringRef Ext : Split) {
- if (Ext.empty()) {
- D.Diag(diag::err_drv_invalid_riscv_arch_name) << MArch
- << "extension name missing after separator '_'";
- return;
- }
-
- StringRef Type = getExtensionType(Ext);
- StringRef Desc = getExtensionTypeDesc(Ext);
- auto Pos = Ext.find_if(isDigit);
- StringRef Name(Ext.substr(0, Pos));
- StringRef Vers(Ext.substr(Pos));
-
- if (Type.empty()) {
- D.Diag(diag::err_drv_invalid_riscv_ext_arch_name)
- << MArch << "invalid extension prefix" << Ext;
- return;
- }
-
- // Check ISA extensions are specified in the canonical order.
- while (I != E && *I != Type)
- ++I;
-
- if (I == E) {
- std::string Error = std::string(Desc);
- Error += " not given in canonical order";
- D.Diag(diag::err_drv_invalid_riscv_ext_arch_name)
- << MArch << Error << Ext;
- return;
- }
-
- // The order is OK, do not advance I to the next prefix
- // to allow repeated extension type, e.g.: rv32ixabc_xdef.
-
- if (Name.size() == Type.size()) {
- std::string Error = std::string(Desc);
- Error += " name missing after";
- D.Diag(diag::err_drv_invalid_riscv_ext_arch_name)
- << MArch << Error << Type;
- return;
- }
-
- std::string Major, Minor;
- if (!getExtensionVersion(D, Args, MArch, Name, Vers, Major, Minor))
- return;
-
- // Check if duplicated extension.
- if (llvm::is_contained(AllExts, Name)) {
- std::string Error = "duplicated ";
- Error += Desc;
- D.Diag(diag::err_drv_invalid_riscv_ext_arch_name)
- << MArch << Error << Name;
- return;
- }
-
- // Extension format is correct, keep parsing the extensions.
- // TODO: Save Type, Name, Major, Minor to avoid parsing them later.
- AllExts.push_back(Name);
- }
-
- // Set target features.
- // TODO: Hardware features to be handled in Support/TargetParser.cpp.
- // TODO: Use version number when setting target features.
- for (auto Ext : AllExts) {
- if (!isSupportedExtension(Ext)) {
- StringRef Desc = getExtensionTypeDesc(getExtensionType(Ext));
- std::string Error = "unsupported ";
- Error += Desc;
- D.Diag(diag::err_drv_invalid_riscv_ext_arch_name)
- << MArch << Error << Ext;
- return;
- }
- if (Ext == "zvlsseg") {
- Features.push_back("+experimental-v");
- Features.push_back("+experimental-zvlsseg");
- } else if (Ext == "zvamo") {
- Features.push_back("+experimental-v");
- Features.push_back("+experimental-zvlsseg");
- Features.push_back("+experimental-zvamo");
- } else if (isExperimentalExtension(Ext))
- Features.push_back(Args.MakeArgString("+experimental-" + Ext));
- else
- Features.push_back(Args.MakeArgString("+" + Ext));
- }
-}
-
// Returns false if an error is diagnosed.
-static bool getArchFeatures(const Driver &D, StringRef MArch,
+static bool getArchFeatures(const Driver &D, StringRef Arch,
std::vector<StringRef> &Features,
const ArgList &Args) {
- // RISC-V ISA strings must be lowercase.
- if (llvm::any_of(MArch, [](char c) { return isupper(c); })) {
- D.Diag(diag::err_drv_invalid_riscv_arch_name)
- << MArch << "string must be lowercase";
- return false;
- }
-
- // ISA string must begin with rv32 or rv64.
- if (!(MArch.startswith("rv32") || MArch.startswith("rv64")) ||
- (MArch.size() < 5)) {
- D.Diag(diag::err_drv_invalid_riscv_arch_name)
- << MArch << "string must begin with rv32{i,e,g} or rv64{i,g}";
- return false;
- }
-
- bool HasRV64 = MArch.startswith("rv64");
+ bool EnableExperimentalExtensions =
+ Args.hasArg(options::OPT_menable_experimental_extensions);
+ auto ISAInfo =
+ llvm::RISCVISAInfo::parseArchString(Arch, EnableExperimentalExtensions);
+ if (!ISAInfo) {
+ handleAllErrors(ISAInfo.takeError(), [&](llvm::StringError &ErrMsg) {
+ D.Diag(diag::err_drv_invalid_riscv_arch_name)
+ << Arch << ErrMsg.getMessage();
+ });
- // The canonical order specified in ISA manual.
- // Ref: Table 22.1 in RISC-V User-Level ISA V2.2
- StringRef StdExts = "mafdqlcbjtpvn";
- bool HasF = false, HasD = false;
- char Baseline = MArch[4];
-
- // First letter should be 'e', 'i' or 'g'.
- switch (Baseline) {
- default:
- D.Diag(diag::err_drv_invalid_riscv_arch_name)
- << MArch << "first letter should be 'e', 'i' or 'g'";
- return false;
- case 'e': {
- StringRef Error;
- // Currently LLVM does not support 'e'.
- // Extension 'e' is not allowed in rv64.
- if (HasRV64)
- Error = "standard user-level extension 'e' requires 'rv32'";
- else
- Error = "unsupported standard user-level extension 'e'";
- D.Diag(diag::err_drv_invalid_riscv_arch_name) << MArch << Error;
return false;
}
- case 'i':
- break;
- case 'g':
- // g = imafd
- StdExts = StdExts.drop_front(4);
- Features.push_back("+m");
- Features.push_back("+a");
- Features.push_back("+f");
- Features.push_back("+d");
- HasF = true;
- HasD = true;
- break;
- }
-
- // Skip rvxxx
- StringRef Exts = MArch.substr(5);
-
- // Remove multi-letter standard extensions, non-standard extensions and
- // supervisor-level extensions. They have 'z', 'x', 's', 'sx' prefixes.
- // Parse them at the end.
- // Find the very first occurrence of 's', 'x' or 'z'.
- StringRef OtherExts;
- size_t Pos = Exts.find_first_of("zsx");
- if (Pos != StringRef::npos) {
- OtherExts = Exts.substr(Pos);
- Exts = Exts.substr(0, Pos);
- }
-
- std::string Major, Minor;
- if (!getExtensionVersion(D, Args, MArch, std::string(1, Baseline), Exts,
- Major, Minor))
- return false;
-
- // Consume the base ISA version number and any '_' between rvxxx and the
- // first extension
- Exts = Exts.drop_front(Major.size());
- if (!Minor.empty())
- Exts = Exts.drop_front(Minor.size() + 1 /*'p'*/);
- Exts.consume_front("_");
-
- // TODO: Use version number when setting target features
-
- auto StdExtsItr = StdExts.begin();
- auto StdExtsEnd = StdExts.end();
-
- for (auto I = Exts.begin(), E = Exts.end(); I != E; ) {
- char c = *I;
-
- // Check ISA extensions are specified in the canonical order.
- while (StdExtsItr != StdExtsEnd && *StdExtsItr != c)
- ++StdExtsItr;
-
- if (StdExtsItr == StdExtsEnd) {
- // Either c contains a valid extension but it was not given in
- // canonical order or it is an invalid extension.
- StringRef Error;
- if (StdExts.contains(c))
- Error = "standard user-level extension not given in canonical order";
- else
- Error = "invalid standard user-level extension";
- D.Diag(diag::err_drv_invalid_riscv_ext_arch_name)
- << MArch << Error << std::string(1, c);
- return false;
- }
-
- // Move to next char to prevent repeated letter.
- ++StdExtsItr;
-
- std::string Next, Major, Minor;
- if (std::next(I) != E)
- Next = std::string(std::next(I), E);
- if (!getExtensionVersion(D, Args, MArch, std::string(1, c), Next, Major,
- Minor))
- return false;
-
- // The order is OK, then push it into features.
- // TODO: Use version number when setting target features
- switch (c) {
- default:
- // Currently LLVM supports only "mafdc".
- D.Diag(diag::err_drv_invalid_riscv_ext_arch_name)
- << MArch << "unsupported standard user-level extension"
- << std::string(1, c);
- return false;
- case 'm':
- Features.push_back("+m");
- break;
- case 'a':
- Features.push_back("+a");
- break;
- case 'f':
- Features.push_back("+f");
- HasF = true;
- break;
- case 'd':
- Features.push_back("+d");
- HasD = true;
- break;
- case 'c':
- Features.push_back("+c");
- break;
- case 'b':
- Features.push_back("+experimental-b");
- Features.push_back("+experimental-zba");
- Features.push_back("+experimental-zbb");
- Features.push_back("+experimental-zbc");
- Features.push_back("+experimental-zbe");
- Features.push_back("+experimental-zbf");
- Features.push_back("+experimental-zbm");
- Features.push_back("+experimental-zbp");
- Features.push_back("+experimental-zbr");
- Features.push_back("+experimental-zbs");
- Features.push_back("+experimental-zbt");
- break;
- case 'v':
- Features.push_back("+experimental-v");
- Features.push_back("+experimental-zvlsseg");
- break;
- }
- // Consume full extension name and version, including any optional '_'
- // between this extension and the next
- ++I;
- I += Major.size();
- if (Minor.size())
- I += Minor.size() + 1 /*'p'*/;
- if (*I == '_')
- ++I;
- }
+ for (const std::string &Str : (*ISAInfo)->toFeatures(/*AddAllExtension=*/true,
+ /*IgnoreUnknown=*/false))
+ Features.push_back(Args.MakeArgString(Str));
- // Dependency check.
- // It's illegal to specify the 'd' (double-precision floating point)
- // extension without also specifying the 'f' (single precision
- // floating-point) extension.
- if (HasD && !HasF) {
- D.Diag(diag::err_drv_invalid_riscv_arch_name)
- << MArch << "d requires f extension to also be specified";
- return false;
- }
-
- // Additional dependency checks.
- // TODO: The 'q' extension requires rv64.
- // TODO: It is illegal to specify 'e' extensions with 'f' and 'd'.
-
- // Handle all other types of extensions.
- getExtensionFeatures(D, Args, Features, MArch, OtherExts);
+ if (EnableExperimentalExtensions)
+ Features.push_back(Args.MakeArgString("+experimental"));
return true;
}
// Get features except standard extension feature
-static void getRISCFeaturesFromMcpu(const Driver &D, const llvm::Triple &Triple,
- const llvm::opt::ArgList &Args,
- const llvm::opt::Arg *A, StringRef Mcpu,
+static void getRISCFeaturesFromMcpu(const Driver &D, const Arg *A,
+ const llvm::Triple &Triple,
+ StringRef Mcpu,
std::vector<StringRef> &Features) {
- bool Is64Bit = (Triple.getArch() == llvm::Triple::riscv64);
- llvm::RISCV::CPUKind CPUKind = llvm::RISCV::parseCPUKind(Mcpu);
- if (!llvm::RISCV::checkCPUKind(CPUKind, Is64Bit) ||
- !llvm::RISCV::getCPUFeaturesExceptStdExt(CPUKind, Features)) {
- D.Diag(clang::diag::err_drv_clang_unsupported) << A->getAsString(Args);
+ bool Is64Bit = Triple.isRISCV64();
+ if (!llvm::RISCV::parseCPU(Mcpu, Is64Bit)) {
+ // Try inverting Is64Bit in case the CPU is valid, but for the wrong target.
+ if (llvm::RISCV::parseCPU(Mcpu, !Is64Bit))
+ D.Diag(clang::diag::err_drv_invalid_riscv_cpu_name_for_target)
+ << Mcpu << Is64Bit;
+ else
+ D.Diag(clang::diag::err_drv_unsupported_option_argument)
+ << A->getSpelling() << Mcpu;
}
+
+ if (llvm::RISCV::hasFastUnalignedAccess(Mcpu))
+ Features.push_back("+fast-unaligned-access");
}
void riscv::getRISCVTargetFeatures(const Driver &D, const llvm::Triple &Triple,
@@ -489,8 +82,13 @@ void riscv::getRISCVTargetFeatures(const Driver &D, const llvm::Triple &Triple,
// If users give march and mcpu, get std extension feature from MArch
// and other features (ex. mirco architecture feature) from mcpu
- if (Arg *A = Args.getLastArg(options::OPT_mcpu_EQ))
- getRISCFeaturesFromMcpu(D, Triple, Args, A, A->getValue(), Features);
+ if (Arg *A = Args.getLastArg(options::OPT_mcpu_EQ)) {
+ StringRef CPU = A->getValue();
+ if (CPU == "native")
+ CPU = llvm::sys::getHostCPUName();
+
+ getRISCFeaturesFromMcpu(D, A, Triple, CPU, Features);
+ }
// Handle features corresponding to "-ffixed-X" options
if (Args.hasArg(options::OPT_ffixed_x1))
@@ -558,27 +156,30 @@ void riscv::getRISCVTargetFeatures(const Driver &D, const llvm::Triple &Triple,
// FreeBSD local, because ld.lld doesn't support relaxations
// -mno-relax is default, unless -mrelax is specified.
- if (Args.hasFlag(options::OPT_mrelax, options::OPT_mno_relax, false))
+ if (Args.hasFlag(options::OPT_mrelax, options::OPT_mno_relax, false)) {
Features.push_back("+relax");
- else
+ // -gsplit-dwarf -mrelax requires DW_AT_high_pc/DW_AT_ranges/... indexing
+ // into .debug_addr, which is currently not implemented.
+ Arg *A;
+ if (getDebugFissionKind(D, Args, A) != DwarfFissionKind::None)
+ D.Diag(clang::diag::err_drv_riscv_unsupported_with_linker_relaxation)
+ << A->getAsString(Args);
+ } else {
Features.push_back("-relax");
+ }
- // GCC Compatibility: -mno-save-restore is default, unless -msave-restore is
- // specified.
- if (Args.hasFlag(options::OPT_msave_restore, options::OPT_mno_save_restore, false))
- Features.push_back("+save-restore");
- else
- Features.push_back("-save-restore");
+ // -mno-unaligned-access is default, unless -munaligned-access is specified.
+ AddTargetFeature(Args, Features, options::OPT_munaligned_access,
+ options::OPT_mno_unaligned_access, "fast-unaligned-access");
// Now add any that the user explicitly requested on the command line,
// which may override the defaults.
- handleTargetFeaturesGroup(Args, Features, options::OPT_m_riscv_Features_Group);
+ handleTargetFeaturesGroup(D, Triple, Args, Features,
+ options::OPT_m_riscv_Features_Group);
}
StringRef riscv::getRISCVABI(const ArgList &Args, const llvm::Triple &Triple) {
- assert((Triple.getArch() == llvm::Triple::riscv32 ||
- Triple.getArch() == llvm::Triple::riscv64) &&
- "Unexpected triple");
+ assert(Triple.isRISCV() && "Unexpected triple");
// GCC's logic around choosing a default `-mabi=` is complex. If GCC is not
// configured using `--with-abi=`, then the logic for the default choice is
@@ -610,33 +211,22 @@ StringRef riscv::getRISCVABI(const ArgList &Args, const llvm::Triple &Triple) {
// rv32e -> ilp32e
// rv32* -> ilp32
// rv64g | rv64*d -> lp64d
+ // rv64e -> lp64e
// rv64* -> lp64
- StringRef MArch = getRISCVArch(Args, Triple);
+ StringRef Arch = getRISCVArch(Args, Triple);
- if (MArch.startswith_insensitive("rv32")) {
- // FIXME: parse `March` to find `D` extension properly
- if (MArch.substr(4).contains_insensitive("d") ||
- MArch.startswith_insensitive("rv32g"))
- return "ilp32d";
- else if (MArch.startswith_insensitive("rv32e"))
- return "ilp32e";
- else
- return "ilp32";
- } else if (MArch.startswith_insensitive("rv64")) {
- // FIXME: parse `March` to find `D` extension properly
- if (MArch.substr(4).contains_insensitive("d") ||
- MArch.startswith_insensitive("rv64g"))
- return "lp64d";
- else
- return "lp64";
- }
+ auto ParseResult = llvm::RISCVISAInfo::parseArchString(
+ Arch, /* EnableExperimentalExtension */ true);
+ // Ignore parsing error, just go 3rd step.
+ if (!llvm::errorToBool(ParseResult.takeError()))
+ return (*ParseResult)->computeDefaultABI();
// 3. Choose a default based on the triple
//
// We deviate from GCC's defaults here:
// - On `riscv{XLEN}-unknown-elf` we use the integer calling convention only.
// - On all other OSs we use the double floating point calling convention.
- if (Triple.getArch() == llvm::Triple::riscv32) {
+ if (Triple.isRISCV32()) {
if (Triple.getOS() == llvm::Triple::UnknownOS)
return "ilp32";
else
@@ -651,9 +241,7 @@ StringRef riscv::getRISCVABI(const ArgList &Args, const llvm::Triple &Triple) {
StringRef riscv::getRISCVArch(const llvm::opt::ArgList &Args,
const llvm::Triple &Triple) {
- assert((Triple.getArch() == llvm::Triple::riscv32 ||
- Triple.getArch() == llvm::Triple::riscv64) &&
- "Unexpected triple");
+ assert(Triple.isRISCV() && "Unexpected triple");
// GCC's logic around choosing a default `-march=` is complex. If GCC is not
// configured using `--with-arch=`, then the logic for the default choice is
@@ -687,7 +275,10 @@ StringRef riscv::getRISCVArch(const llvm::opt::ArgList &Args,
// 2. Get march (isa string) based on `-mcpu=`
if (const Arg *A = Args.getLastArg(options::OPT_mcpu_EQ)) {
- StringRef MArch = llvm::RISCV::getMArchFromMcpu(A->getValue());
+ StringRef CPU = A->getValue();
+ if (CPU == "native")
+ CPU = llvm::sys::getHostCPUName();
+ StringRef MArch = llvm::RISCV::getMArchFromMcpu(CPU);
// Bypass if target cpu's default march is empty.
if (MArch != "")
return MArch;
@@ -696,6 +287,7 @@ StringRef riscv::getRISCVArch(const llvm::opt::ArgList &Args,
// 3. Choose a default based on `-mabi=`
//
// ilp32e -> rv32e
+ // lp64e -> rv64e
// ilp32 | ilp32f | ilp32d -> rv32imafdc
// lp64 | lp64f | lp64d -> rv64imafdc
if (const Arg *A = Args.getLastArg(options::OPT_mabi_EQ)) {
@@ -703,10 +295,16 @@ StringRef riscv::getRISCVArch(const llvm::opt::ArgList &Args,
if (MABI.equals_insensitive("ilp32e"))
return "rv32e";
- else if (MABI.startswith_insensitive("ilp32"))
+ else if (MABI.equals_insensitive("lp64e"))
+ return "rv64e";
+ else if (MABI.starts_with_insensitive("ilp32"))
return "rv32imafdc";
- else if (MABI.startswith_insensitive("lp64"))
+ else if (MABI.starts_with_insensitive("lp64")) {
+ if (Triple.isAndroid())
+ return "rv64imafdcv_zba_zbb_zbs";
+
return "rv64imafdc";
+ }
}
// 4. Choose a default based on the triple
@@ -714,7 +312,7 @@ StringRef riscv::getRISCVArch(const llvm::opt::ArgList &Args,
// We deviate from GCC's defaults here:
// - On `riscv{XLEN}-unknown-elf` we default to `rv{XLEN}imac`
// - On all other OSs we use `rv{XLEN}imafdc` (equivalent to `rv{XLEN}gc`)
- if (Triple.getArch() == llvm::Triple::riscv32) {
+ if (Triple.isRISCV32()) {
if (Triple.getOS() == llvm::Triple::UnknownOS)
return "rv32imac";
else
@@ -722,7 +320,26 @@ StringRef riscv::getRISCVArch(const llvm::opt::ArgList &Args,
} else {
if (Triple.getOS() == llvm::Triple::UnknownOS)
return "rv64imac";
+ else if (Triple.isAndroid())
+ return "rv64imafdcv_zba_zbb_zbs";
else
return "rv64imafdc";
}
}
+
+std::string riscv::getRISCVTargetCPU(const llvm::opt::ArgList &Args,
+ const llvm::Triple &Triple) {
+ std::string CPU;
+ // If we have -mcpu, use that.
+ if (const Arg *A = Args.getLastArg(options::OPT_mcpu_EQ))
+ CPU = A->getValue();
+
+ // Handle CPU name is 'native'.
+ if (CPU == "native")
+ CPU = llvm::sys::getHostCPUName();
+
+ if (!CPU.empty())
+ return CPU;
+
+ return Triple.isRISCV64() ? "generic-rv64" : "generic-rv32";
+}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/RISCV.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/RISCV.h
index d4a519cdab34..fcaf9d57ad13 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/RISCV.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/RISCV.h
@@ -1,4 +1,4 @@
-//===--- RISCV.h - RISCV-specific Tool Helpers ------------------*- C++ -*-===//
+//===--- RISCV.h - RISC-V-specific Tool Helpers -----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -26,6 +26,8 @@ StringRef getRISCVABI(const llvm::opt::ArgList &Args,
const llvm::Triple &Triple);
StringRef getRISCVArch(const llvm::opt::ArgList &Args,
const llvm::Triple &Triple);
+std::string getRISCVTargetCPU(const llvm::opt::ArgList &Args,
+ const llvm::Triple &Triple);
} // end namespace riscv
} // namespace tools
} // end namespace driver
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/Sparc.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/Sparc.cpp
index 70ba8eb2a7d0..ae1a4ba78826 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/Sparc.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/Sparc.cpp
@@ -12,6 +12,7 @@
#include "clang/Driver/Options.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Option/ArgList.h"
+#include "llvm/TargetParser/Host.h"
using namespace clang::driver;
using namespace clang::driver::tools;
@@ -62,10 +63,6 @@ const char *sparc::getSparcAsmModeForCPU(StringRef Name,
.Case("ma2480", "-Aleon")
.Case("ma2485", "-Aleon")
.Case("ma2x8x", "-Aleon")
- .Case("myriad2", "-Aleon")
- .Case("myriad2.1", "-Aleon")
- .Case("myriad2.2", "-Aleon")
- .Case("myriad2.3", "-Aleon")
.Case("leon2", "-Av8")
.Case("at697e", "-Av8")
.Case("at697f", "-Av8")
@@ -81,12 +78,14 @@ const char *sparc::getSparcAsmModeForCPU(StringRef Name,
sparc::FloatABI sparc::getSparcFloatABI(const Driver &D,
const ArgList &Args) {
sparc::FloatABI ABI = sparc::FloatABI::Invalid;
- if (Arg *A = Args.getLastArg(clang::driver::options::OPT_msoft_float,
- options::OPT_mhard_float,
+ if (Arg *A = Args.getLastArg(options::OPT_msoft_float, options::OPT_mno_fpu,
+ options::OPT_mhard_float, options::OPT_mfpu,
options::OPT_mfloat_abi_EQ)) {
- if (A->getOption().matches(clang::driver::options::OPT_msoft_float))
+ if (A->getOption().matches(options::OPT_msoft_float) ||
+ A->getOption().matches(options::OPT_mno_fpu))
ABI = sparc::FloatABI::Soft;
- else if (A->getOption().matches(options::OPT_mhard_float))
+ else if (A->getOption().matches(options::OPT_mhard_float) ||
+ A->getOption().matches(options::OPT_mfpu))
ABI = sparc::FloatABI::Hard;
else {
ABI = llvm::StringSwitch<sparc::FloatABI>(A->getValue())
@@ -113,9 +112,151 @@ sparc::FloatABI sparc::getSparcFloatABI(const Driver &D,
return ABI;
}
+std::string sparc::getSparcTargetCPU(const Driver &D, const ArgList &Args,
+ const llvm::Triple &Triple) {
+ if (const Arg *A = Args.getLastArg(clang::driver::options::OPT_mcpu_EQ)) {
+ StringRef CPUName = A->getValue();
+ if (CPUName == "native") {
+ std::string CPU = std::string(llvm::sys::getHostCPUName());
+ if (!CPU.empty() && CPU != "generic")
+ return CPU;
+ return "";
+ }
+ return std::string(CPUName);
+ }
+
+ if (Triple.getArch() == llvm::Triple::sparc && Triple.isOSSolaris())
+ return "v9";
+ return "";
+}
+
void sparc::getSparcTargetFeatures(const Driver &D, const ArgList &Args,
std::vector<StringRef> &Features) {
sparc::FloatABI FloatABI = sparc::getSparcFloatABI(D, Args);
if (FloatABI == sparc::FloatABI::Soft)
Features.push_back("+soft-float");
+
+ if (Arg *A = Args.getLastArg(options::OPT_mfsmuld, options::OPT_mno_fsmuld)) {
+ if (A->getOption().matches(options::OPT_mfsmuld))
+ Features.push_back("+fsmuld");
+ else
+ Features.push_back("-fsmuld");
+ }
+
+ if (Arg *A = Args.getLastArg(options::OPT_mpopc, options::OPT_mno_popc)) {
+ if (A->getOption().matches(options::OPT_mpopc))
+ Features.push_back("+popc");
+ else
+ Features.push_back("-popc");
+ }
+
+ if (Arg *A = Args.getLastArg(options::OPT_mvis, options::OPT_mno_vis)) {
+ if (A->getOption().matches(options::OPT_mvis))
+ Features.push_back("+vis");
+ else
+ Features.push_back("-vis");
+ }
+
+ if (Arg *A = Args.getLastArg(options::OPT_mvis2, options::OPT_mno_vis2)) {
+ if (A->getOption().matches(options::OPT_mvis2))
+ Features.push_back("+vis2");
+ else
+ Features.push_back("-vis2");
+ }
+
+ if (Arg *A = Args.getLastArg(options::OPT_mvis3, options::OPT_mno_vis3)) {
+ if (A->getOption().matches(options::OPT_mvis3))
+ Features.push_back("+vis3");
+ else
+ Features.push_back("-vis3");
+ }
+
+ if (Arg *A = Args.getLastArg(options::OPT_mhard_quad_float,
+ options::OPT_msoft_quad_float)) {
+ if (A->getOption().matches(options::OPT_mhard_quad_float))
+ Features.push_back("+hard-quad-float");
+ else
+ Features.push_back("-hard-quad-float");
+ }
+
+ if (Args.hasArg(options::OPT_ffixed_g1))
+ Features.push_back("+reserve-g1");
+
+ if (Args.hasArg(options::OPT_ffixed_g2))
+ Features.push_back("+reserve-g2");
+
+ if (Args.hasArg(options::OPT_ffixed_g3))
+ Features.push_back("+reserve-g3");
+
+ if (Args.hasArg(options::OPT_ffixed_g4))
+ Features.push_back("+reserve-g4");
+
+ if (Args.hasArg(options::OPT_ffixed_g5))
+ Features.push_back("+reserve-g5");
+
+ if (Args.hasArg(options::OPT_ffixed_g6))
+ Features.push_back("+reserve-g6");
+
+ if (Args.hasArg(options::OPT_ffixed_g7))
+ Features.push_back("+reserve-g7");
+
+ if (Args.hasArg(options::OPT_ffixed_o0))
+ Features.push_back("+reserve-o0");
+
+ if (Args.hasArg(options::OPT_ffixed_o1))
+ Features.push_back("+reserve-o1");
+
+ if (Args.hasArg(options::OPT_ffixed_o2))
+ Features.push_back("+reserve-o2");
+
+ if (Args.hasArg(options::OPT_ffixed_o3))
+ Features.push_back("+reserve-o3");
+
+ if (Args.hasArg(options::OPT_ffixed_o4))
+ Features.push_back("+reserve-o4");
+
+ if (Args.hasArg(options::OPT_ffixed_o5))
+ Features.push_back("+reserve-o5");
+
+ if (Args.hasArg(options::OPT_ffixed_l0))
+ Features.push_back("+reserve-l0");
+
+ if (Args.hasArg(options::OPT_ffixed_l1))
+ Features.push_back("+reserve-l1");
+
+ if (Args.hasArg(options::OPT_ffixed_l2))
+ Features.push_back("+reserve-l2");
+
+ if (Args.hasArg(options::OPT_ffixed_l3))
+ Features.push_back("+reserve-l3");
+
+ if (Args.hasArg(options::OPT_ffixed_l4))
+ Features.push_back("+reserve-l4");
+
+ if (Args.hasArg(options::OPT_ffixed_l5))
+ Features.push_back("+reserve-l5");
+
+ if (Args.hasArg(options::OPT_ffixed_l6))
+ Features.push_back("+reserve-l6");
+
+ if (Args.hasArg(options::OPT_ffixed_l7))
+ Features.push_back("+reserve-l7");
+
+ if (Args.hasArg(options::OPT_ffixed_i0))
+ Features.push_back("+reserve-i0");
+
+ if (Args.hasArg(options::OPT_ffixed_i1))
+ Features.push_back("+reserve-i1");
+
+ if (Args.hasArg(options::OPT_ffixed_i2))
+ Features.push_back("+reserve-i2");
+
+ if (Args.hasArg(options::OPT_ffixed_i3))
+ Features.push_back("+reserve-i3");
+
+ if (Args.hasArg(options::OPT_ffixed_i4))
+ Features.push_back("+reserve-i4");
+
+ if (Args.hasArg(options::OPT_ffixed_i5))
+ Features.push_back("+reserve-i5");
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/Sparc.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/Sparc.h
index d12a9a70e264..44658c4259c6 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/Sparc.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/Sparc.h
@@ -28,6 +28,9 @@ enum class FloatABI {
FloatABI getSparcFloatABI(const Driver &D, const llvm::opt::ArgList &Args);
+std::string getSparcTargetCPU(const Driver &D, const llvm::opt::ArgList &Args,
+ const llvm::Triple &Triple);
+
void getSparcTargetFeatures(const Driver &D, const llvm::opt::ArgList &Args,
std::vector<llvm::StringRef> &Features);
const char *getSparcAsmModeForCPU(llvm::StringRef Name,
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/SystemZ.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/SystemZ.cpp
index f81bf68172de..588bc3176d73 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/SystemZ.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/SystemZ.cpp
@@ -11,7 +11,7 @@
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Options.h"
#include "llvm/Option/ArgList.h"
-#include "llvm/Support/Host.h"
+#include "llvm/TargetParser/Host.h"
using namespace clang::driver;
using namespace clang::driver::tools;
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/VE.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/VE.cpp
index 9dfd37c2106d..b19760898c64 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/VE.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/VE.cpp
@@ -10,7 +10,6 @@
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Options.h"
-#include "llvm/ADT/StringSwitch.h"
#include "llvm/Option/ArgList.h"
using namespace clang::driver;
@@ -19,4 +18,9 @@ using namespace clang;
using namespace llvm::opt;
void ve::getVETargetFeatures(const Driver &D, const ArgList &Args,
- std::vector<StringRef> &Features) {}
+ std::vector<StringRef> &Features) {
+ if (Args.hasFlag(options::OPT_mvevpu, options::OPT_mno_vevpu, true))
+ Features.push_back("+vpu");
+ else
+ Features.push_back("-vpu");
+}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/VE.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/VE.h
index 531433534914..c47a41df25bc 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/VE.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/VE.h
@@ -24,7 +24,7 @@ void getVETargetFeatures(const Driver &D, const llvm::opt::ArgList &Args,
std::vector<llvm::StringRef> &Features);
} // end namespace ve
-} // namespace tools
+} // end namespace tools
} // end namespace driver
} // end namespace clang
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/X86.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/X86.cpp
index 12749c7ec871..53e26a9f8e22 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/X86.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/X86.cpp
@@ -11,16 +11,17 @@
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Options.h"
-#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringMap.h"
#include "llvm/Option/ArgList.h"
-#include "llvm/Support/Host.h"
+#include "llvm/TargetParser/Host.h"
using namespace clang::driver;
using namespace clang::driver::tools;
using namespace clang;
using namespace llvm::opt;
-std::string x86::getX86TargetCPU(const ArgList &Args,
+std::string x86::getX86TargetCPU(const Driver &D, const ArgList &Args,
const llvm::Triple &Triple) {
if (const Arg *A = Args.getLastArg(clang::driver::options::OPT_march_EQ)) {
StringRef CPU = A->getValue();
@@ -29,37 +30,39 @@ std::string x86::getX86TargetCPU(const ArgList &Args,
// FIXME: Reject attempts to use -march=native unless the target matches
// the host.
- //
- // FIXME: We should also incorporate the detected target features for use
- // with -native.
CPU = llvm::sys::getHostCPUName();
if (!CPU.empty() && CPU != "generic")
return std::string(CPU);
}
- if (const Arg *A = Args.getLastArgNoClaim(options::OPT__SLASH_arch)) {
+ if (const Arg *A = Args.getLastArg(options::OPT__SLASH_arch)) {
// Mapping built by looking at lib/Basic's X86TargetInfo::initFeatureMap().
- StringRef Arch = A->getValue();
- StringRef CPU;
- if (Triple.getArch() == llvm::Triple::x86) { // 32-bit-only /arch: flags.
- CPU = llvm::StringSwitch<StringRef>(Arch)
- .Case("IA32", "i386")
- .Case("SSE", "pentium3")
- .Case("SSE2", "pentium4")
- .Default("");
+ // The keys are case-sensitive; this matches link.exe.
+ // 32-bit and 64-bit /arch: flags.
+ llvm::StringMap<StringRef> ArchMap({
+ {"AVX", "sandybridge"},
+ {"AVX2", "haswell"},
+ {"AVX512F", "knl"},
+ {"AVX512", "skylake-avx512"},
+ });
+ if (Triple.getArch() == llvm::Triple::x86) {
+ // 32-bit-only /arch: flags.
+ ArchMap.insert({
+ {"IA32", "i386"},
+ {"SSE", "pentium3"},
+ {"SSE2", "pentium4"},
+ });
}
- if (CPU.empty()) { // 32-bit and 64-bit /arch: flags.
- CPU = llvm::StringSwitch<StringRef>(Arch)
- .Case("AVX", "sandybridge")
- .Case("AVX2", "haswell")
- .Case("AVX512F", "knl")
- .Case("AVX512", "skylake-avx512")
- .Default("");
- }
- if (!CPU.empty()) {
- A->claim();
- return std::string(CPU);
+ StringRef CPU = ArchMap.lookup(A->getValue());
+ if (CPU.empty()) {
+ std::vector<StringRef> ValidArchs{ArchMap.keys().begin(),
+ ArchMap.keys().end()};
+ sort(ValidArchs);
+ D.Diag(diag::warn_drv_invalid_arch_name_with_suggestion)
+ << A->getValue() << (Triple.getArch() == llvm::Triple::x86)
+ << join(ValidArchs, ", ");
}
+ return std::string(CPU);
}
// Select the default CPU if none was given (or detection failed).
@@ -77,13 +80,19 @@ std::string x86::getX86TargetCPU(const ArgList &Args,
// Simulators can still run on 10.11 though, like Xcode.
if (Triple.isMacOSX() && !Triple.isOSVersionLT(10, 12))
return "penryn";
+
+ if (Triple.isDriverKit())
+ return "nehalem";
+
// The oldest x86_64 Macs have core2/Merom; the oldest x86 Macs have Yonah.
return Is64Bit ? "core2" : "yonah";
}
- // Set up default CPU name for PS4 compilers.
- if (Triple.isPS4CPU())
+ // Set up default CPU name for PS4/PS5 compilers.
+ if (Triple.isPS4())
return "btver2";
+ if (Triple.isPS5())
+ return "znver2";
// On Android use targets compatible with gcc
if (Triple.isAndroid())
@@ -110,6 +119,15 @@ std::string x86::getX86TargetCPU(const ArgList &Args,
void x86::getX86TargetFeatures(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args,
std::vector<StringRef> &Features) {
+ // Claim and report unsupported -mabi=. Note: we don't support "sysv_abi" or
+ // "ms_abi" as default function attributes.
+ if (const Arg *A = Args.getLastArg(clang::driver::options::OPT_mabi_EQ)) {
+ StringRef DefaultAbi = Triple.isOSWindows() ? "ms" : "sysv";
+ if (A->getValue() != DefaultAbi)
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << A->getSpelling() << Triple.getTriple();
+ }
+
// If -march=native, autodetect the feature list.
if (const Arg *A = Args.getLastArg(clang::driver::options::OPT_march_EQ)) {
if (StringRef(A->getValue()) == "native") {
@@ -211,6 +229,27 @@ void x86::getX86TargetFeatures(const Driver &D, const llvm::Triple &Triple,
<< D.getOpts().getOptionName(LVIOpt);
}
+ for (const Arg *A : Args.filtered(options::OPT_m_x86_AVX10_Features_Group)) {
+ StringRef Name = A->getOption().getName();
+ A->claim();
+
+ // Skip over "-m".
+ assert(Name.starts_with("m") && "Invalid feature name.");
+ Name = Name.substr(1);
+
+ bool IsNegative = Name.consume_front("no-");
+
+#ifndef NDEBUG
+ assert(Name.starts_with("avx10.") && "Invalid AVX10 feature name.");
+ StringRef Version, Width;
+ std::tie(Version, Width) = Name.substr(6).split('-');
+ assert(Version == "1" && "Invalid AVX10 feature name.");
+ assert((Width == "256" || Width == "512") && "Invalid AVX10 feature name.");
+#endif
+
+ Features.push_back(Args.MakeArgString((IsNegative ? "-" : "+") + Name));
+ }
+
// Now add any that the user explicitly requested on the command line,
// which may override the defaults.
for (const Arg *A : Args.filtered(options::OPT_m_x86_Features_Group,
@@ -219,7 +258,7 @@ void x86::getX86TargetFeatures(const Driver &D, const llvm::Triple &Triple,
A->claim();
// Skip over "-m".
- assert(Name.startswith("m") && "Invalid feature name.");
+ assert(Name.starts_with("m") && "Invalid feature name.");
Name = Name.substr(1);
// Replace -mgeneral-regs-only with -x87, -mmx, -sse
@@ -228,9 +267,46 @@ void x86::getX86TargetFeatures(const Driver &D, const llvm::Triple &Triple,
continue;
}
- bool IsNegative = Name.startswith("no-");
+ bool IsNegative = Name.starts_with("no-");
+ if (A->getOption().matches(options::OPT_mapx_features_EQ) ||
+ A->getOption().matches(options::OPT_mno_apx_features_EQ)) {
+
+ for (StringRef Value : A->getValues()) {
+ if (Value == "egpr" || Value == "push2pop2" || Value == "ppx" ||
+ Value == "ndd" || Value == "ccmp" || Value == "cf") {
+ Features.push_back(
+ Args.MakeArgString((IsNegative ? "-" : "+") + Value));
+ continue;
+ }
+ D.Diag(clang::diag::err_drv_unsupported_option_argument)
+ << A->getSpelling() << Value;
+ }
+ continue;
+ }
if (IsNegative)
Name = Name.substr(3);
Features.push_back(Args.MakeArgString((IsNegative ? "-" : "+") + Name));
}
+
+ // Enable/disable straight line speculation hardening.
+ if (Arg *A = Args.getLastArg(options::OPT_mharden_sls_EQ)) {
+ StringRef Scope = A->getValue();
+ if (Scope == "all") {
+ Features.push_back("+harden-sls-ijmp");
+ Features.push_back("+harden-sls-ret");
+ } else if (Scope == "return") {
+ Features.push_back("+harden-sls-ret");
+ } else if (Scope == "indirect-jmp") {
+ Features.push_back("+harden-sls-ijmp");
+ } else if (Scope != "none") {
+ D.Diag(diag::err_drv_unsupported_option_argument)
+ << A->getSpelling() << Scope;
+ }
+ }
+
+ // -mno-gather, -mno-scatter support
+ if (Args.hasArg(options::OPT_mno_gather))
+ Features.push_back("+prefer-no-gather");
+ if (Args.hasArg(options::OPT_mno_scatter))
+ Features.push_back("+prefer-no-scatter");
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/X86.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/X86.h
index 14f0a26c8be4..e07387f3ece3 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/X86.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/X86.h
@@ -11,8 +11,8 @@
#include "clang/Driver/Driver.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Option/Option.h"
+#include "llvm/TargetParser/Triple.h"
#include <string>
#include <vector>
@@ -21,7 +21,7 @@ namespace driver {
namespace tools {
namespace x86 {
-std::string getX86TargetCPU(const llvm::opt::ArgList &Args,
+std::string getX86TargetCPU(const Driver &D, const llvm::opt::ArgList &Args,
const llvm::Triple &Triple);
void getX86TargetFeatures(const Driver &D, const llvm::Triple &Triple,
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/BareMetal.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/BareMetal.cpp
index ce73e39d1456..391c47f88bde 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/BareMetal.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/BareMetal.cpp
@@ -12,26 +12,27 @@
#include "Gnu.h"
#include "clang/Driver/InputInfo.h"
+#include "Arch/ARM.h"
#include "Arch/RISCV.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/MultilibBuilder.h"
#include "clang/Driver/Options.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/VirtualFileSystem.h"
#include "llvm/Support/raw_ostream.h"
+#include <sstream>
+
using namespace llvm::opt;
using namespace clang;
using namespace clang::driver;
using namespace clang::driver::tools;
using namespace clang::driver::toolchains;
-static Multilib makeMultilib(StringRef commonSuffix) {
- return Multilib(commonSuffix, commonSuffix, commonSuffix);
-}
-
static bool findRISCVMultilibs(const Driver &D,
const llvm::Triple &TargetTriple,
const ArgList &Args, DetectedMultilibs &Result) {
@@ -39,37 +40,41 @@ static bool findRISCVMultilibs(const Driver &D,
StringRef Arch = riscv::getRISCVArch(Args, TargetTriple);
StringRef Abi = tools::riscv::getRISCVABI(Args, TargetTriple);
- if (TargetTriple.getArch() == llvm::Triple::riscv64) {
- Multilib Imac = makeMultilib("").flag("+march=rv64imac").flag("+mabi=lp64");
- Multilib Imafdc = makeMultilib("/rv64imafdc/lp64d")
- .flag("+march=rv64imafdc")
- .flag("+mabi=lp64d");
+ if (TargetTriple.isRISCV64()) {
+ MultilibBuilder Imac =
+ MultilibBuilder().flag("-march=rv64imac").flag("-mabi=lp64");
+ MultilibBuilder Imafdc = MultilibBuilder("/rv64imafdc/lp64d")
+ .flag("-march=rv64imafdc")
+ .flag("-mabi=lp64d");
// Multilib reuse
bool UseImafdc =
(Arch == "rv64imafdc") || (Arch == "rv64gc"); // gc => imafdc
- addMultilibFlag((Arch == "rv64imac"), "march=rv64imac", Flags);
- addMultilibFlag(UseImafdc, "march=rv64imafdc", Flags);
- addMultilibFlag(Abi == "lp64", "mabi=lp64", Flags);
- addMultilibFlag(Abi == "lp64d", "mabi=lp64d", Flags);
+ addMultilibFlag((Arch == "rv64imac"), "-march=rv64imac", Flags);
+ addMultilibFlag(UseImafdc, "-march=rv64imafdc", Flags);
+ addMultilibFlag(Abi == "lp64", "-mabi=lp64", Flags);
+ addMultilibFlag(Abi == "lp64d", "-mabi=lp64d", Flags);
- Result.Multilibs = MultilibSet().Either(Imac, Imafdc);
- return Result.Multilibs.select(Flags, Result.SelectedMultilib);
+ Result.Multilibs =
+ MultilibSetBuilder().Either(Imac, Imafdc).makeMultilibSet();
+ return Result.Multilibs.select(Flags, Result.SelectedMultilibs);
}
- if (TargetTriple.getArch() == llvm::Triple::riscv32) {
- Multilib Imac =
- makeMultilib("").flag("+march=rv32imac").flag("+mabi=ilp32");
- Multilib I =
- makeMultilib("/rv32i/ilp32").flag("+march=rv32i").flag("+mabi=ilp32");
- Multilib Im =
- makeMultilib("/rv32im/ilp32").flag("+march=rv32im").flag("+mabi=ilp32");
- Multilib Iac = makeMultilib("/rv32iac/ilp32")
- .flag("+march=rv32iac")
- .flag("+mabi=ilp32");
- Multilib Imafc = makeMultilib("/rv32imafc/ilp32f")
- .flag("+march=rv32imafc")
- .flag("+mabi=ilp32f");
+ if (TargetTriple.isRISCV32()) {
+ MultilibBuilder Imac =
+ MultilibBuilder().flag("-march=rv32imac").flag("-mabi=ilp32");
+ MultilibBuilder I = MultilibBuilder("/rv32i/ilp32")
+ .flag("-march=rv32i")
+ .flag("-mabi=ilp32");
+ MultilibBuilder Im = MultilibBuilder("/rv32im/ilp32")
+ .flag("-march=rv32im")
+ .flag("-mabi=ilp32");
+ MultilibBuilder Iac = MultilibBuilder("/rv32iac/ilp32")
+ .flag("-march=rv32iac")
+ .flag("-mabi=ilp32");
+ MultilibBuilder Imafc = MultilibBuilder("/rv32imafc/ilp32f")
+ .flag("-march=rv32imafc")
+ .flag("-mabi=ilp32f");
// Multilib reuse
bool UseI = (Arch == "rv32i") || (Arch == "rv32ic"); // ic => i
@@ -77,22 +82,23 @@ static bool findRISCVMultilibs(const Driver &D,
bool UseImafc = (Arch == "rv32imafc") || (Arch == "rv32imafdc") ||
(Arch == "rv32gc"); // imafdc,gc => imafc
- addMultilibFlag(UseI, "march=rv32i", Flags);
- addMultilibFlag(UseIm, "march=rv32im", Flags);
- addMultilibFlag((Arch == "rv32iac"), "march=rv32iac", Flags);
- addMultilibFlag((Arch == "rv32imac"), "march=rv32imac", Flags);
- addMultilibFlag(UseImafc, "march=rv32imafc", Flags);
- addMultilibFlag(Abi == "ilp32", "mabi=ilp32", Flags);
- addMultilibFlag(Abi == "ilp32f", "mabi=ilp32f", Flags);
-
- Result.Multilibs = MultilibSet().Either(I, Im, Iac, Imac, Imafc);
- return Result.Multilibs.select(Flags, Result.SelectedMultilib);
+ addMultilibFlag(UseI, "-march=rv32i", Flags);
+ addMultilibFlag(UseIm, "-march=rv32im", Flags);
+ addMultilibFlag((Arch == "rv32iac"), "-march=rv32iac", Flags);
+ addMultilibFlag((Arch == "rv32imac"), "-march=rv32imac", Flags);
+ addMultilibFlag(UseImafc, "-march=rv32imafc", Flags);
+ addMultilibFlag(Abi == "ilp32", "-mabi=ilp32", Flags);
+ addMultilibFlag(Abi == "ilp32f", "-mabi=ilp32f", Flags);
+
+ Result.Multilibs =
+ MultilibSetBuilder().Either(I, Im, Iac, Imac, Imafc).makeMultilibSet();
+ return Result.Multilibs.select(Flags, Result.SelectedMultilibs);
}
return false;
}
BareMetal::BareMetal(const Driver &D, const llvm::Triple &Triple,
- const ArgList &Args)
+ const ArgList &Args)
: ToolChain(D, Triple, Args) {
getProgramPaths().push_back(getDriver().getInstalledDir());
if (getDriver().getInstalledDir() != getDriver().Dir)
@@ -101,15 +107,21 @@ BareMetal::BareMetal(const Driver &D, const llvm::Triple &Triple,
findMultilibs(D, Triple, Args);
SmallString<128> SysRoot(computeSysRoot());
if (!SysRoot.empty()) {
- llvm::sys::path::append(SysRoot, "lib");
- getFilePaths().push_back(std::string(SysRoot));
+ for (const Multilib &M : getOrderedMultilibs()) {
+ SmallString<128> Dir(SysRoot);
+ llvm::sys::path::append(Dir, M.osSuffix(), "lib");
+ getFilePaths().push_back(std::string(Dir));
+ getLibraryPaths().push_back(std::string(Dir));
+ }
}
}
-/// Is the triple {arm,thumb}-none-none-{eabi,eabihf} ?
+/// Is the triple {arm,armeb,thumb,thumbeb}-none-none-{eabi,eabihf} ?
static bool isARMBareMetal(const llvm::Triple &Triple) {
if (Triple.getArch() != llvm::Triple::arm &&
- Triple.getArch() != llvm::Triple::thumb)
+ Triple.getArch() != llvm::Triple::thumb &&
+ Triple.getArch() != llvm::Triple::armeb &&
+ Triple.getArch() != llvm::Triple::thumbeb)
return false;
if (Triple.getVendor() != llvm::Triple::UnknownVendor)
@@ -125,9 +137,23 @@ static bool isARMBareMetal(const llvm::Triple &Triple) {
return true;
}
+/// Is the triple {aarch64.aarch64_be}-none-elf?
+static bool isAArch64BareMetal(const llvm::Triple &Triple) {
+ if (Triple.getArch() != llvm::Triple::aarch64 &&
+ Triple.getArch() != llvm::Triple::aarch64_be)
+ return false;
+
+ if (Triple.getVendor() != llvm::Triple::UnknownVendor)
+ return false;
+
+ if (Triple.getOS() != llvm::Triple::UnknownOS)
+ return false;
+
+ return Triple.getEnvironmentName() == "elf";
+}
+
static bool isRISCVBareMetal(const llvm::Triple &Triple) {
- if (Triple.getArch() != llvm::Triple::riscv32 &&
- Triple.getArch() != llvm::Triple::riscv64)
+ if (!Triple.isRISCV())
return false;
if (Triple.getVendor() != llvm::Triple::UnknownVendor)
@@ -139,50 +165,100 @@ static bool isRISCVBareMetal(const llvm::Triple &Triple) {
return Triple.getEnvironmentName() == "elf";
}
+/// Is the triple powerpc[64][le]-*-none-eabi?
+static bool isPPCBareMetal(const llvm::Triple &Triple) {
+ return Triple.isPPC() && Triple.getOS() == llvm::Triple::UnknownOS &&
+ Triple.getEnvironment() == llvm::Triple::EABI;
+}
+
+static void findMultilibsFromYAML(const ToolChain &TC, const Driver &D,
+ StringRef MultilibPath, const ArgList &Args,
+ DetectedMultilibs &Result) {
+ llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> MB =
+ D.getVFS().getBufferForFile(MultilibPath);
+ if (!MB)
+ return;
+ Multilib::flags_list Flags = TC.getMultilibFlags(Args);
+ llvm::ErrorOr<MultilibSet> ErrorOrMultilibSet =
+ MultilibSet::parseYaml(*MB.get());
+ if (ErrorOrMultilibSet.getError())
+ return;
+ Result.Multilibs = ErrorOrMultilibSet.get();
+ if (Result.Multilibs.select(Flags, Result.SelectedMultilibs))
+ return;
+ D.Diag(clang::diag::warn_drv_missing_multilib) << llvm::join(Flags, " ");
+ std::stringstream ss;
+ for (const Multilib &Multilib : Result.Multilibs)
+ ss << "\n" << llvm::join(Multilib.flags(), " ");
+ D.Diag(clang::diag::note_drv_available_multilibs) << ss.str();
+}
+
+static constexpr llvm::StringLiteral MultilibFilename = "multilib.yaml";
+
+// Get the sysroot, before multilib takes effect.
+static std::string computeBaseSysRoot(const Driver &D,
+ const llvm::Triple &Triple) {
+ if (!D.SysRoot.empty())
+ return D.SysRoot;
+
+ SmallString<128> SysRootDir(D.Dir);
+ llvm::sys::path::append(SysRootDir, "..", "lib", "clang-runtimes");
+
+ SmallString<128> MultilibPath(SysRootDir);
+ llvm::sys::path::append(MultilibPath, MultilibFilename);
+
+ // New behaviour: if multilib.yaml is found then use clang-runtimes as the
+ // sysroot.
+ if (D.getVFS().exists(MultilibPath))
+ return std::string(SysRootDir);
+
+ // Otherwise fall back to the old behaviour of appending the target triple.
+ llvm::sys::path::append(SysRootDir, D.getTargetTriple());
+ return std::string(SysRootDir);
+}
+
void BareMetal::findMultilibs(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args) {
DetectedMultilibs Result;
if (isRISCVBareMetal(Triple)) {
if (findRISCVMultilibs(D, Triple, Args, Result)) {
- SelectedMultilib = Result.SelectedMultilib;
+ SelectedMultilibs = Result.SelectedMultilibs;
Multilibs = Result.Multilibs;
}
+ } else {
+ llvm::SmallString<128> MultilibPath(computeBaseSysRoot(D, Triple));
+ llvm::sys::path::append(MultilibPath, MultilibFilename);
+ findMultilibsFromYAML(*this, D, MultilibPath, Args, Result);
+ SelectedMultilibs = Result.SelectedMultilibs;
+ Multilibs = Result.Multilibs;
}
}
bool BareMetal::handlesTarget(const llvm::Triple &Triple) {
- return isARMBareMetal(Triple) || isRISCVBareMetal(Triple);
+ return isARMBareMetal(Triple) || isAArch64BareMetal(Triple) ||
+ isRISCVBareMetal(Triple) || isPPCBareMetal(Triple);
}
Tool *BareMetal::buildLinker() const {
return new tools::baremetal::Linker(*this);
}
-std::string BareMetal::getCompilerRTPath() const { return getRuntimesDir(); }
-
-std::string BareMetal::buildCompilerRTBasename(const llvm::opt::ArgList &,
- StringRef, FileType,
- bool) const {
- return ("libclang_rt.builtins-" + getTriple().getArchName() + ".a").str();
-}
-
-std::string BareMetal::getRuntimesDir() const {
- SmallString<128> Dir(getDriver().ResourceDir);
- llvm::sys::path::append(Dir, "lib", "baremetal");
- Dir += SelectedMultilib.gccSuffix();
- return std::string(Dir.str());
+Tool *BareMetal::buildStaticLibTool() const {
+ return new tools::baremetal::StaticLibTool(*this);
}
std::string BareMetal::computeSysRoot() const {
- if (!getDriver().SysRoot.empty())
- return getDriver().SysRoot + SelectedMultilib.osSuffix();
+ return computeBaseSysRoot(getDriver(), getTriple());
+}
- SmallString<128> SysRootDir;
- llvm::sys::path::append(SysRootDir, getDriver().Dir, "../lib/clang-runtimes",
- getDriver().getTargetTriple());
+BareMetal::OrderedMultilibs BareMetal::getOrderedMultilibs() const {
+ // Get multilibs in reverse order because they're ordered most-specific last.
+ if (!SelectedMultilibs.empty())
+ return llvm::reverse(SelectedMultilibs);
- SysRootDir += SelectedMultilib.osSuffix();
- return std::string(SysRootDir);
+ // No multilibs selected so return a single default multilib.
+ static const llvm::SmallVector<Multilib> Default = {Multilib()};
+ return llvm::reverse(Default);
}
void BareMetal::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
@@ -197,10 +273,14 @@ void BareMetal::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
}
if (!DriverArgs.hasArg(options::OPT_nostdlibinc)) {
- SmallString<128> Dir(computeSysRoot());
- if (!Dir.empty()) {
- llvm::sys::path::append(Dir, "include");
- addSystemInclude(DriverArgs, CC1Args, Dir.str());
+ const SmallString<128> SysRoot(computeSysRoot());
+ if (!SysRoot.empty()) {
+ for (const Multilib &M : getOrderedMultilibs()) {
+ SmallString<128> Dir(SysRoot);
+ llvm::sys::path::append(Dir, M.includeSuffix());
+ llvm::sys::path::append(Dir, "include");
+ addSystemInclude(DriverArgs, CC1Args, Dir.str());
+ }
}
}
}
@@ -211,48 +291,58 @@ void BareMetal::addClangTargetOptions(const ArgList &DriverArgs,
CC1Args.push_back("-nostdsysteminc");
}
-void BareMetal::AddClangCXXStdlibIncludeArgs(
- const ArgList &DriverArgs, ArgStringList &CC1Args) const {
- if (DriverArgs.hasArg(options::OPT_nostdinc) ||
- DriverArgs.hasArg(options::OPT_nostdlibinc) ||
- DriverArgs.hasArg(options::OPT_nostdincxx))
+void BareMetal::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ if (DriverArgs.hasArg(options::OPT_nostdinc, options::OPT_nostdlibinc,
+ options::OPT_nostdincxx))
return;
+ const Driver &D = getDriver();
std::string SysRoot(computeSysRoot());
if (SysRoot.empty())
return;
- switch (GetCXXStdlibType(DriverArgs)) {
- case ToolChain::CST_Libcxx: {
+ for (const Multilib &M : getOrderedMultilibs()) {
SmallString<128> Dir(SysRoot);
- llvm::sys::path::append(Dir, "include", "c++", "v1");
- addSystemInclude(DriverArgs, CC1Args, Dir.str());
- break;
- }
- case ToolChain::CST_Libstdcxx: {
- SmallString<128> Dir(SysRoot);
- llvm::sys::path::append(Dir, "include", "c++");
- std::error_code EC;
- Generic_GCC::GCCVersion Version = {"", -1, -1, -1, "", "", ""};
- // Walk the subdirs, and find the one with the newest gcc version:
- for (llvm::vfs::directory_iterator
- LI = getDriver().getVFS().dir_begin(Dir.str(), EC),
- LE;
- !EC && LI != LE; LI = LI.increment(EC)) {
- StringRef VersionText = llvm::sys::path::filename(LI->path());
- auto CandidateVersion = Generic_GCC::GCCVersion::Parse(VersionText);
- if (CandidateVersion.Major == -1)
- continue;
- if (CandidateVersion <= Version)
- continue;
- Version = CandidateVersion;
+ llvm::sys::path::append(Dir, M.gccSuffix());
+ switch (GetCXXStdlibType(DriverArgs)) {
+ case ToolChain::CST_Libcxx: {
+ // First check sysroot/usr/include/c++/v1 if it exists.
+ SmallString<128> TargetDir(Dir);
+ llvm::sys::path::append(TargetDir, "usr", "include", "c++", "v1");
+ if (D.getVFS().exists(TargetDir)) {
+ addSystemInclude(DriverArgs, CC1Args, TargetDir.str());
+ break;
+ }
+ // Add generic path if nothing else succeeded so far.
+ llvm::sys::path::append(Dir, "include", "c++", "v1");
+ addSystemInclude(DriverArgs, CC1Args, Dir.str());
+ break;
+ }
+ case ToolChain::CST_Libstdcxx: {
+ llvm::sys::path::append(Dir, "include", "c++");
+ std::error_code EC;
+ Generic_GCC::GCCVersion Version = {"", -1, -1, -1, "", "", ""};
+ // Walk the subdirs, and find the one with the newest gcc version:
+ for (llvm::vfs::directory_iterator
+ LI = D.getVFS().dir_begin(Dir.str(), EC),
+ LE;
+ !EC && LI != LE; LI = LI.increment(EC)) {
+ StringRef VersionText = llvm::sys::path::filename(LI->path());
+ auto CandidateVersion = Generic_GCC::GCCVersion::Parse(VersionText);
+ if (CandidateVersion.Major == -1)
+ continue;
+ if (CandidateVersion <= Version)
+ continue;
+ Version = CandidateVersion;
+ }
+ if (Version.Major != -1) {
+ llvm::sys::path::append(Dir, Version.Text);
+ addSystemInclude(DriverArgs, CC1Args, Dir.str());
+ }
+ break;
+ }
}
- if (Version.Major == -1)
- return;
- llvm::sys::path::append(Dir, Version.Text);
- addSystemInclude(DriverArgs, CC1Args, Dir.str());
- break;
- }
}
}
@@ -261,6 +351,8 @@ void BareMetal::AddCXXStdlibLibArgs(const ArgList &Args,
switch (GetCXXStdlibType(Args)) {
case ToolChain::CST_Libcxx:
CmdArgs.push_back("-lc++");
+ if (Args.hasArg(options::OPT_fexperimental_library))
+ CmdArgs.push_back("-lc++experimental");
CmdArgs.push_back("-lc++abi");
break;
case ToolChain::CST_Libstdcxx:
@@ -275,10 +367,14 @@ void BareMetal::AddLinkRuntimeLib(const ArgList &Args,
ArgStringList &CmdArgs) const {
ToolChain::RuntimeLibType RLT = GetRuntimeLibType(Args);
switch (RLT) {
- case ToolChain::RLT_CompilerRT:
- CmdArgs.push_back(
- Args.MakeArgString("-lclang_rt.builtins-" + getTriple().getArchName()));
+ case ToolChain::RLT_CompilerRT: {
+ const std::string FileName = getCompilerRT(Args, "builtins");
+ llvm::StringRef BaseName = llvm::sys::path::filename(FileName);
+ BaseName.consume_front("lib");
+ BaseName.consume_back(".a");
+ CmdArgs.push_back(Args.MakeArgString("-l" + BaseName));
return;
+ }
case ToolChain::RLT_Libgcc:
CmdArgs.push_back("-lgcc");
return;
@@ -286,6 +382,51 @@ void BareMetal::AddLinkRuntimeLib(const ArgList &Args,
llvm_unreachable("Unhandled RuntimeLibType.");
}
+void baremetal::StaticLibTool::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ const Driver &D = getToolChain().getDriver();
+
+ // Silence warning for "clang -g foo.o -o foo"
+ Args.ClaimAllArgs(options::OPT_g_Group);
+ // and "clang -emit-llvm foo.o -o foo"
+ Args.ClaimAllArgs(options::OPT_emit_llvm);
+ // and for "clang -w foo.o -o foo". Other warning options are already
+ // handled somewhere else.
+ Args.ClaimAllArgs(options::OPT_w);
+ // Silence warnings when linking C code with a C++ '-stdlib' argument.
+ Args.ClaimAllArgs(options::OPT_stdlib_EQ);
+
+ // ar tool command "llvm-ar <options> <output_file> <input_files>".
+ ArgStringList CmdArgs;
+ // Create and insert file members with a deterministic index.
+ CmdArgs.push_back("rcsD");
+ CmdArgs.push_back(Output.getFilename());
+
+ for (const auto &II : Inputs) {
+ if (II.isFilename()) {
+ CmdArgs.push_back(II.getFilename());
+ }
+ }
+
+ // Delete old output archive file if it already exists before generating a new
+ // archive file.
+ const char *OutputFileName = Output.getFilename();
+ if (Output.isFilename() && llvm::sys::fs::exists(OutputFileName)) {
+ if (std::error_code EC = llvm::sys::fs::remove(OutputFileName)) {
+ D.Diag(diag::err_drv_unable_to_remove_file) << EC.message();
+ return;
+ }
+ }
+
+ const char *Exec = Args.MakeArgString(getToolChain().GetStaticLibToolPath());
+ C.addCommand(std::make_unique<Command>(JA, *this,
+ ResponseFileSupport::AtFileCurCP(),
+ Exec, CmdArgs, Inputs, Output));
+}
+
void baremetal::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
const InputInfoList &Inputs,
@@ -293,22 +434,39 @@ void baremetal::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const char *LinkingOutput) const {
ArgStringList CmdArgs;
- auto &TC = static_cast<const toolchains::BareMetal&>(getToolChain());
+ auto &TC = static_cast<const toolchains::BareMetal &>(getToolChain());
+ const llvm::Triple::ArchType Arch = TC.getArch();
+ const llvm::Triple &Triple = getToolChain().getEffectiveTriple();
AddLinkerInputs(TC, Inputs, Args, CmdArgs, JA);
CmdArgs.push_back("-Bstatic");
- Args.AddAllArgs(CmdArgs, {options::OPT_L, options::OPT_T_Group,
- options::OPT_e, options::OPT_s, options::OPT_t,
- options::OPT_Z_Flag, options::OPT_r});
+ if (Triple.isARM() || Triple.isThumb()) {
+ bool IsBigEndian = arm::isARMBigEndian(Triple, Args);
+ if (IsBigEndian)
+ arm::appendBE8LinkFlag(Args, CmdArgs, Triple);
+ CmdArgs.push_back(IsBigEndian ? "-EB" : "-EL");
+ } else if (Triple.isAArch64()) {
+ CmdArgs.push_back(Arch == llvm::Triple::aarch64_be ? "-EB" : "-EL");
+ }
+
+ Args.addAllArgs(CmdArgs, {options::OPT_L, options::OPT_T_Group,
+ options::OPT_s, options::OPT_t, options::OPT_r});
TC.AddFilePathLibArgs(Args, CmdArgs);
- CmdArgs.push_back(Args.MakeArgString("-L" + TC.getRuntimesDir()));
+ for (const auto &LibPath : TC.getLibraryPaths())
+ CmdArgs.push_back(Args.MakeArgString(llvm::Twine("-L", LibPath)));
+
+ const std::string FileName = TC.getCompilerRT(Args, "builtins");
+ llvm::SmallString<128> PathBuf{FileName};
+ llvm::sys::path::remove_filename(PathBuf);
+ CmdArgs.push_back(Args.MakeArgString("-L" + PathBuf));
if (TC.ShouldLinkCXXStdlib(Args))
TC.AddCXXStdlibLibArgs(Args, CmdArgs);
+
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
CmdArgs.push_back("-lc");
CmdArgs.push_back("-lm");
@@ -316,10 +474,45 @@ void baremetal::Linker::ConstructJob(Compilation &C, const JobAction &JA,
TC.AddLinkRuntimeLib(Args, CmdArgs);
}
+ if (TC.getTriple().isRISCV())
+ CmdArgs.push_back("-X");
+
+ // The R_ARM_TARGET2 relocation must be treated as R_ARM_REL32 on arm*-*-elf
+ // and arm*-*-eabi (the default is R_ARM_GOT_PREL, used on arm*-*-linux and
+ // arm*-*-*bsd).
+ if (isARMBareMetal(TC.getTriple()))
+ CmdArgs.push_back("--target2=rel");
+
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
- C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
- Args.MakeArgString(TC.GetLinkerPath()),
- CmdArgs, Inputs, Output));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(),
+ Args.MakeArgString(TC.GetLinkerPath()), CmdArgs, Inputs, Output));
+}
+
+// BareMetal toolchain allows all sanitizers where the compiler generates valid
+// code, ignoring all runtime library support issues on the assumption that
+// baremetal targets typically implement their own runtime support.
+SanitizerMask BareMetal::getSupportedSanitizers() const {
+ const bool IsX86_64 = getTriple().getArch() == llvm::Triple::x86_64;
+ const bool IsAArch64 = getTriple().getArch() == llvm::Triple::aarch64 ||
+ getTriple().getArch() == llvm::Triple::aarch64_be;
+ const bool IsRISCV64 = getTriple().getArch() == llvm::Triple::riscv64;
+ SanitizerMask Res = ToolChain::getSupportedSanitizers();
+ Res |= SanitizerKind::Address;
+ Res |= SanitizerKind::KernelAddress;
+ Res |= SanitizerKind::PointerCompare;
+ Res |= SanitizerKind::PointerSubtract;
+ Res |= SanitizerKind::Fuzzer;
+ Res |= SanitizerKind::FuzzerNoLink;
+ Res |= SanitizerKind::Vptr;
+ Res |= SanitizerKind::SafeStack;
+ Res |= SanitizerKind::Thread;
+ Res |= SanitizerKind::Scudo;
+ if (IsX86_64 || IsAArch64 || IsRISCV64) {
+ Res |= SanitizerKind::HWAddress;
+ Res |= SanitizerKind::KernelHWAddress;
+ }
+ return Res;
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/BareMetal.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/BareMetal.h
index d68c43c64c97..67b5aa5998fc 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/BareMetal.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/BareMetal.h
@@ -1,4 +1,4 @@
-//===--- BareMetal.h - Bare Metal Tool and ToolChain -------------*- C++ -*-===//
+//===--- BareMetal.h - Bare Metal Tool and ToolChain ------------*- C++-*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -32,24 +32,22 @@ public:
protected:
Tool *buildLinker() const override;
-
- std::string buildCompilerRTBasename(const llvm::opt::ArgList &Args,
- StringRef Component,
- FileType Type = ToolChain::FT_Static,
- bool AddArch = true) const override;
+ Tool *buildStaticLibTool() const override;
public:
bool useIntegratedAs() const override { return true; }
+ bool isBareMetal() const override { return true; }
bool isCrossCompiling() const override { return true; }
+ bool HasNativeLLVMSupport() const override { return true; }
bool isPICDefault() const override { return false; }
- bool isPIEDefault() const override { return false; }
+ bool isPIEDefault(const llvm::opt::ArgList &Args) const override {
+ return false;
+ }
bool isPICDefaultForced() const override { return false; }
bool SupportsProfiling() const override { return false; }
StringRef getOSLibName() const override { return "baremetal"; }
- std::string getCompilerRTPath() const override;
-
RuntimeLibType GetDefaultRuntimeLibType() const override {
return ToolChain::RLT_CompilerRT;
}
@@ -59,12 +57,13 @@ public:
const char *getDefaultLinker() const override { return "ld.lld"; }
- std::string getRuntimesDir() const;
- void AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args) const override;
- void addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args,
- Action::OffloadKind DeviceOffloadKind) const override;
+ void
+ AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+ void
+ addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadKind) const override;
void AddClangCXXStdlibIncludeArgs(
const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
@@ -73,6 +72,12 @@ public:
void AddLinkRuntimeLib(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const;
std::string computeSysRoot() const override;
+ SanitizerMask getSupportedSanitizers() const override;
+
+private:
+ using OrderedMultilibs =
+ llvm::iterator_range<llvm::SmallVector<Multilib>::const_reverse_iterator>;
+ OrderedMultilibs getOrderedMultilibs() const;
};
} // namespace toolchains
@@ -80,7 +85,21 @@ public:
namespace tools {
namespace baremetal {
-class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
+class LLVM_LIBRARY_VISIBILITY StaticLibTool : public Tool {
+public:
+ StaticLibTool(const ToolChain &TC)
+ : Tool("baremetal::StaticLibTool", "llvm-ar", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+ bool isLinkJob() const override { return true; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY Linker final : public Tool {
public:
Linker(const ToolChain &TC) : Tool("baremetal::Linker", "ld.lld", TC) {}
bool isLinkJob() const override { return true; }
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/CSKYToolChain.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/CSKYToolChain.cpp
new file mode 100644
index 000000000000..feb3bc922920
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/CSKYToolChain.cpp
@@ -0,0 +1,204 @@
+//===--- CSKYToolchain.cpp - CSKY ToolChain Implementations ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "CSKYToolChain.h"
+#include "CommonArgs.h"
+#include "clang/Driver/Compilation.h"
+#include "clang/Driver/InputInfo.h"
+#include "clang/Driver/Options.h"
+#include "llvm/Option/ArgList.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang::driver;
+using namespace clang::driver::toolchains;
+using namespace clang::driver::tools;
+using namespace clang;
+using namespace llvm::opt;
+
+static void addMultilibsFilePaths(const Driver &D, const MultilibSet &Multilibs,
+ const Multilib &Multilib,
+ StringRef InstallPath,
+ ToolChain::path_list &Paths) {
+ if (const auto &PathsCallback = Multilibs.filePathsCallback())
+ for (const auto &Path : PathsCallback(Multilib))
+ addPathIfExists(D, InstallPath + Path, Paths);
+}
+
+/// CSKY Toolchain
+CSKYToolChain::CSKYToolChain(const Driver &D, const llvm::Triple &Triple,
+ const ArgList &Args)
+ : Generic_ELF(D, Triple, Args) {
+ GCCInstallation.init(Triple, Args);
+ if (GCCInstallation.isValid()) {
+ Multilibs = GCCInstallation.getMultilibs();
+ SelectedMultilibs.assign({GCCInstallation.getMultilib()});
+ path_list &Paths = getFilePaths();
+ // Add toolchain/multilib specific file paths.
+ addMultilibsFilePaths(D, Multilibs, SelectedMultilibs.back(),
+ GCCInstallation.getInstallPath(), Paths);
+ getFilePaths().push_back(GCCInstallation.getInstallPath().str() +
+ SelectedMultilibs.back().osSuffix());
+ ToolChain::path_list &PPaths = getProgramPaths();
+ // Multilib cross-compiler GCC installations put ld in a triple-prefixed
+ // directory off of the parent of the GCC installation.
+ PPaths.push_back(Twine(GCCInstallation.getParentLibPath() + "/../" +
+ GCCInstallation.getTriple().str() + "/bin")
+ .str());
+ PPaths.push_back((GCCInstallation.getParentLibPath() + "/../bin").str());
+ getFilePaths().push_back(computeSysRoot() + "/lib" +
+ SelectedMultilibs.back().osSuffix());
+ } else {
+ getProgramPaths().push_back(D.Dir);
+ getFilePaths().push_back(computeSysRoot() + "/lib");
+ }
+}
+
+Tool *CSKYToolChain::buildLinker() const {
+ return new tools::CSKY::Linker(*this);
+}
+
+ToolChain::RuntimeLibType CSKYToolChain::GetDefaultRuntimeLibType() const {
+ return GCCInstallation.isValid() ? ToolChain::RLT_Libgcc
+ : ToolChain::RLT_CompilerRT;
+}
+
+ToolChain::UnwindLibType
+CSKYToolChain::GetUnwindLibType(const llvm::opt::ArgList &Args) const {
+ return ToolChain::UNW_None;
+}
+
+void CSKYToolChain::addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind) const {
+ CC1Args.push_back("-nostdsysteminc");
+}
+
+void CSKYToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ if (DriverArgs.hasArg(options::OPT_nostdinc))
+ return;
+
+ if (!DriverArgs.hasArg(options::OPT_nostdlibinc)) {
+ SmallString<128> Dir(computeSysRoot());
+ llvm::sys::path::append(Dir, "include");
+ addSystemInclude(DriverArgs, CC1Args, Dir.str());
+ SmallString<128> Dir2(computeSysRoot());
+ llvm::sys::path::append(Dir2, "sys-include");
+ addSystemInclude(DriverArgs, CC1Args, Dir2.str());
+ }
+}
+
+void CSKYToolChain::addLibStdCxxIncludePaths(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const {
+ const GCCVersion &Version = GCCInstallation.getVersion();
+ StringRef TripleStr = GCCInstallation.getTriple().str();
+ const Multilib &Multilib = GCCInstallation.getMultilib();
+ addLibStdCXXIncludePaths(computeSysRoot() + "/include/c++/" + Version.Text,
+ TripleStr, Multilib.includeSuffix(), DriverArgs,
+ CC1Args);
+}
+
+std::string CSKYToolChain::computeSysRoot() const {
+ if (!getDriver().SysRoot.empty())
+ return getDriver().SysRoot;
+
+ SmallString<128> SysRootDir;
+ if (GCCInstallation.isValid()) {
+ StringRef LibDir = GCCInstallation.getParentLibPath();
+ StringRef TripleStr = GCCInstallation.getTriple().str();
+ llvm::sys::path::append(SysRootDir, LibDir, "..", TripleStr);
+ } else {
+ // Use the triple as provided to the driver. Unlike the parsed triple
+ // this has not been normalized to always contain every field.
+ llvm::sys::path::append(SysRootDir, getDriver().Dir, "..",
+ getDriver().getTargetTriple());
+ }
+
+ if (!llvm::sys::fs::exists(SysRootDir))
+ return std::string();
+
+ return std::string(SysRootDir);
+}
+
+void CSKY::Linker::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ const ToolChain &ToolChain = getToolChain();
+ const Driver &D = ToolChain.getDriver();
+ ArgStringList CmdArgs;
+
+ if (!D.SysRoot.empty())
+ CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot));
+
+ CmdArgs.push_back("-m");
+ CmdArgs.push_back("cskyelf");
+
+ std::string Linker = getToolChain().GetLinkerPath();
+
+ bool WantCRTs =
+ !Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles);
+
+ const char *crtbegin, *crtend;
+ auto RuntimeLib = ToolChain.GetRuntimeLibType(Args);
+ if (RuntimeLib == ToolChain::RLT_Libgcc) {
+ crtbegin = "crtbegin.o";
+ crtend = "crtend.o";
+ } else {
+ assert(RuntimeLib == ToolChain::RLT_CompilerRT);
+ crtbegin = ToolChain.getCompilerRTArgString(Args, "crtbegin",
+ ToolChain::FT_Object);
+ crtend =
+ ToolChain.getCompilerRTArgString(Args, "crtend", ToolChain::FT_Object);
+ }
+
+ if (WantCRTs) {
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crt0.o")));
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crti.o")));
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtbegin)));
+ }
+
+ Args.AddAllArgs(CmdArgs, options::OPT_L);
+ ToolChain.AddFilePathLibArgs(Args, CmdArgs);
+ Args.addAllArgs(CmdArgs, {options::OPT_T_Group, options::OPT_s,
+ options::OPT_t, options::OPT_r});
+
+ AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
+
+ // TODO: add C++ includes and libs if compiling C++.
+
+ if (!Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nodefaultlibs)) {
+ if (ToolChain.ShouldLinkCXXStdlib(Args))
+ ToolChain.AddCXXStdlibLibArgs(Args, CmdArgs);
+ CmdArgs.push_back("--start-group");
+ CmdArgs.push_back("-lc");
+ if (Args.hasArg(options::OPT_msim))
+ CmdArgs.push_back("-lsemi");
+ else
+ CmdArgs.push_back("-lnosys");
+ CmdArgs.push_back("--end-group");
+ AddRunTimeLibs(ToolChain, ToolChain.getDriver(), CmdArgs, Args);
+ }
+
+ if (WantCRTs) {
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtend)));
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtn.o")));
+ }
+
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), Args.MakeArgString(Linker),
+ CmdArgs, Inputs, Output));
+}
+// CSKY tools end.
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/CSKYToolChain.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/CSKYToolChain.h
new file mode 100644
index 000000000000..a57324a42641
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/CSKYToolChain.h
@@ -0,0 +1,63 @@
+//===--- CSKYToolchain.h - CSKY ToolChain Implementations -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_CSKYTOOLCHAIN_H
+#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_CSKYTOOLCHAIN_H
+
+#include "Gnu.h"
+#include "clang/Driver/ToolChain.h"
+
+namespace clang {
+namespace driver {
+namespace toolchains {
+
+class LLVM_LIBRARY_VISIBILITY CSKYToolChain : public Generic_ELF {
+public:
+ CSKYToolChain(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+
+ void addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind) const override;
+ RuntimeLibType GetDefaultRuntimeLibType() const override;
+ UnwindLibType GetUnwindLibType(const llvm::opt::ArgList &Args) const override;
+ void
+ AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+ void
+ addLibStdCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+
+protected:
+ Tool *buildLinker() const override;
+
+private:
+ std::string computeSysRoot() const override;
+};
+
+} // end namespace toolchains
+
+namespace tools {
+namespace CSKY {
+class LLVM_LIBRARY_VISIBILITY Linker final : public Tool {
+public:
+ Linker(const ToolChain &TC) : Tool("CSKY::Linker", "ld", TC) {}
+ bool hasIntegratedCPP() const override { return false; }
+ bool isLinkJob() const override { return true; }
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+} // end namespace CSKY
+} // end namespace tools
+
+} // end namespace driver
+} // end namespace clang
+
+#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_CSKYTOOLCHAIN_H
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.cpp
index cb38ab51327c..aa344b3465ab 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.cpp
@@ -10,6 +10,8 @@
#include "AMDGPU.h"
#include "Arch/AArch64.h"
#include "Arch/ARM.h"
+#include "Arch/CSKY.h"
+#include "Arch/LoongArch.h"
#include "Arch/M68k.h"
#include "Arch/Mips.h"
#include "Arch/PPC.h"
@@ -22,29 +24,43 @@
#include "Hexagon.h"
#include "MSP430.h"
#include "PS4CPU.h"
+#include "clang/Basic/CLWarnings.h"
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/CodeGenOptions.h"
+#include "clang/Basic/HeaderInclude.h"
#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/MakeSupport.h"
#include "clang/Basic/ObjCRuntime.h"
#include "clang/Basic/Version.h"
+#include "clang/Config/config.h"
+#include "clang/Driver/Action.h"
#include "clang/Driver/Distro.h"
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/InputInfo.h"
#include "clang/Driver/Options.h"
#include "clang/Driver/SanitizerArgs.h"
+#include "clang/Driver/Types.h"
#include "clang/Driver/XRayArgs.h"
+#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/BinaryFormat/Magic.h"
#include "llvm/Config/llvm-config.h"
+#include "llvm/Object/ObjectFile.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Compression.h"
+#include "llvm/Support/Error.h"
#include "llvm/Support/FileSystem.h"
-#include "llvm/Support/Host.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Process.h"
-#include "llvm/Support/TargetParser.h"
+#include "llvm/Support/RISCVISAInfo.h"
#include "llvm/Support/YAMLParser.h"
+#include "llvm/TargetParser/ARMTargetParserCommon.h"
+#include "llvm/TargetParser/Host.h"
+#include "llvm/TargetParser/LoongArchTargetParser.h"
+#include "llvm/TargetParser/RISCVTargetParser.h"
+#include <cctype>
using namespace clang::driver;
using namespace clang::driver::tools;
@@ -52,8 +68,11 @@ using namespace clang;
using namespace llvm::opt;
static void CheckPreprocessingOptions(const Driver &D, const ArgList &Args) {
- if (Arg *A =
- Args.getLastArg(clang::driver::options::OPT_C, options::OPT_CC)) {
+ if (Arg *A = Args.getLastArg(clang::driver::options::OPT_C, options::OPT_CC,
+ options::OPT_fminimize_whitespace,
+ options::OPT_fno_minimize_whitespace,
+ options::OPT_fkeep_system_includes,
+ options::OPT_fno_keep_system_includes)) {
if (!Args.hasArg(options::OPT_E) && !Args.hasArg(options::OPT__SLASH_P) &&
!Args.hasArg(options::OPT__SLASH_EP) && !D.CCCIsCPP()) {
D.Diag(clang::diag::err_drv_argument_only_allowed_with)
@@ -90,34 +109,6 @@ static void EscapeSpacesAndBackslashes(const char *Arg,
}
}
-// Quote target names for inclusion in GNU Make dependency files.
-// Only the characters '$', '#', ' ', '\t' are quoted.
-static void QuoteTarget(StringRef Target, SmallVectorImpl<char> &Res) {
- for (unsigned i = 0, e = Target.size(); i != e; ++i) {
- switch (Target[i]) {
- case ' ':
- case '\t':
- // Escape the preceding backslashes
- for (int j = i - 1; j >= 0 && Target[j] == '\\'; --j)
- Res.push_back('\\');
-
- // Escape the space/tab
- Res.push_back('\\');
- break;
- case '$':
- Res.push_back('$');
- break;
- case '#':
- Res.push_back('\\');
- break;
- default:
- break;
- }
-
- Res.push_back(Target[i]);
- }
-}
-
/// Apply \a Work on the current tool chain \a RegularToolChain and any other
/// offloading tool chain that is associated with the current action \a JA.
static void
@@ -220,17 +211,21 @@ static void ParseMRecip(const Driver &D, const ArgList &Args,
llvm::StringMap<bool> OptionStrings;
OptionStrings.insert(std::make_pair("divd", false));
OptionStrings.insert(std::make_pair("divf", false));
+ OptionStrings.insert(std::make_pair("divh", false));
OptionStrings.insert(std::make_pair("vec-divd", false));
OptionStrings.insert(std::make_pair("vec-divf", false));
+ OptionStrings.insert(std::make_pair("vec-divh", false));
OptionStrings.insert(std::make_pair("sqrtd", false));
OptionStrings.insert(std::make_pair("sqrtf", false));
+ OptionStrings.insert(std::make_pair("sqrth", false));
OptionStrings.insert(std::make_pair("vec-sqrtd", false));
OptionStrings.insert(std::make_pair("vec-sqrtf", false));
+ OptionStrings.insert(std::make_pair("vec-sqrth", false));
for (unsigned i = 0; i != NumOptions; ++i) {
StringRef Val = A->getValue(i);
- bool IsDisabled = Val.startswith(DisabledPrefixIn);
+ bool IsDisabled = Val.starts_with(DisabledPrefixIn);
// Ignore the disablement token for string matching.
if (IsDisabled)
Val = Val.substr(1);
@@ -249,10 +244,11 @@ static void ParseMRecip(const Driver &D, const ArgList &Args,
D.Diag(diag::err_drv_unknown_argument) << Val;
return;
}
- // The option was specified without a float or double suffix.
- // Make sure that the double entry was not already specified.
+ // The option was specified without a half or float or double suffix.
+ // Make sure that the double or half entry was not already specified.
// The float entry will be checked below.
- if (OptionStrings[ValBase.str() + 'd']) {
+ if (OptionStrings[ValBase.str() + 'd'] ||
+ OptionStrings[ValBase.str() + 'h']) {
D.Diag(diag::err_drv_invalid_value) << A->getOption().getName() << Val;
return;
}
@@ -267,9 +263,12 @@ static void ParseMRecip(const Driver &D, const ArgList &Args,
// Mark the matched option as found. Do not allow duplicate specifiers.
OptionIter->second = true;
- // If the precision was not specified, also mark the double entry as found.
- if (ValBase.back() != 'f' && ValBase.back() != 'd')
+ // If the precision was not specified, also mark the double and half entry
+ // as found.
+ if (ValBase.back() != 'f' && ValBase.back() != 'd' && ValBase.back() != 'h') {
OptionStrings[ValBase.str() + 'd'] = true;
+ OptionStrings[ValBase.str() + 'h'] = true;
+ }
// Build the output string.
StringRef Prefix = IsDisabled ? DisabledPrefixOut : EnabledPrefixOut;
@@ -302,87 +301,6 @@ static void ParseMPreferVectorWidth(const Driver &D, const ArgList &Args,
}
}
-static void getWebAssemblyTargetFeatures(const ArgList &Args,
- std::vector<StringRef> &Features) {
- handleTargetFeaturesGroup(Args, Features, options::OPT_m_wasm_Features_Group);
-}
-
-static void getTargetFeatures(const Driver &D, const llvm::Triple &Triple,
- const ArgList &Args, ArgStringList &CmdArgs,
- bool ForAS, bool IsAux = false) {
- std::vector<StringRef> Features;
- switch (Triple.getArch()) {
- default:
- break;
- case llvm::Triple::mips:
- case llvm::Triple::mipsel:
- case llvm::Triple::mips64:
- case llvm::Triple::mips64el:
- mips::getMIPSTargetFeatures(D, Triple, Args, Features);
- break;
-
- case llvm::Triple::arm:
- case llvm::Triple::armeb:
- case llvm::Triple::thumb:
- case llvm::Triple::thumbeb:
- arm::getARMTargetFeatures(D, Triple, Args, CmdArgs, Features, ForAS);
- break;
-
- case llvm::Triple::ppc:
- case llvm::Triple::ppcle:
- case llvm::Triple::ppc64:
- case llvm::Triple::ppc64le:
- ppc::getPPCTargetFeatures(D, Triple, Args, Features);
- break;
- case llvm::Triple::riscv32:
- case llvm::Triple::riscv64:
- riscv::getRISCVTargetFeatures(D, Triple, Args, Features);
- break;
- case llvm::Triple::systemz:
- systemz::getSystemZTargetFeatures(D, Args, Features);
- break;
- case llvm::Triple::aarch64:
- case llvm::Triple::aarch64_32:
- case llvm::Triple::aarch64_be:
- aarch64::getAArch64TargetFeatures(D, Triple, Args, Features, ForAS);
- break;
- case llvm::Triple::x86:
- case llvm::Triple::x86_64:
- x86::getX86TargetFeatures(D, Triple, Args, Features);
- break;
- case llvm::Triple::hexagon:
- hexagon::getHexagonTargetFeatures(D, Args, Features);
- break;
- case llvm::Triple::wasm32:
- case llvm::Triple::wasm64:
- getWebAssemblyTargetFeatures(Args, Features);
- break;
- case llvm::Triple::sparc:
- case llvm::Triple::sparcel:
- case llvm::Triple::sparcv9:
- sparc::getSparcTargetFeatures(D, Args, Features);
- break;
- case llvm::Triple::r600:
- case llvm::Triple::amdgcn:
- amdgpu::getAMDGPUTargetFeatures(D, Triple, Args, Features);
- break;
- case llvm::Triple::m68k:
- m68k::getM68kTargetFeatures(D, Triple, Args, Features);
- break;
- case llvm::Triple::msp430:
- msp430::getMSP430TargetFeatures(D, Args, Features);
- break;
- case llvm::Triple::ve:
- ve::getVETargetFeatures(D, Args, Features);
- break;
- }
-
- for (auto Feature : unifyTargetFeatures(Features)) {
- CmdArgs.push_back(IsAux ? "-aux-target-feature" : "-target-feature");
- CmdArgs.push_back(Feature.data());
- }
-}
-
static bool
shouldUseExceptionTablesForObjCExceptions(const ObjCRuntime &runtime,
const llvm::Triple &Triple) {
@@ -401,7 +319,7 @@ shouldUseExceptionTablesForObjCExceptions(const ObjCRuntime &runtime,
}
/// Adds exception related arguments to the driver command arguments. There's a
-/// master flag, -fexceptions and also language specific flags to enable/disable
+/// main flag, -fexceptions and also language specific flags to enable/disable
/// C++ and Objective-C exceptions. This makes it possible to for example
/// disable C++ exceptions but enable Objective-C exceptions.
static bool addExceptionArgs(const ArgList &Args, types::ID InputType,
@@ -446,9 +364,9 @@ static bool addExceptionArgs(const ArgList &Args, types::ID InputType,
}
if (types::isCXX(InputType)) {
- // Disable C++ EH by default on XCore and PS4.
- bool CXXExceptionsEnabled =
- Triple.getArch() != llvm::Triple::xcore && !Triple.isPS4CPU();
+ // Disable C++ EH by default on XCore and PS4/PS5.
+ bool CXXExceptionsEnabled = Triple.getArch() != llvm::Triple::xcore &&
+ !Triple.isPS() && !Triple.isDriverKit();
Arg *ExceptionArg = Args.getLastArg(
options::OPT_fcxx_exceptions, options::OPT_fno_cxx_exceptions,
options::OPT_fexceptions, options::OPT_fno_exceptions);
@@ -469,6 +387,9 @@ static bool addExceptionArgs(const ArgList &Args, types::ID InputType,
// So we do not set EH to false.
Args.AddLastArg(CmdArgs, options::OPT_fignore_exceptions);
+ Args.addOptInFlag(CmdArgs, options::OPT_fassume_nothrow_exception_dtor,
+ options::OPT_fno_assume_nothrow_exception_dtor);
+
if (EH)
CmdArgs.push_back("-fexceptions");
return EH;
@@ -490,140 +411,10 @@ static bool ShouldEnableAutolink(const ArgList &Args, const ToolChain &TC,
Default);
}
-// Convert an arg of the form "-gN" or "-ggdbN" or one of their aliases
-// to the corresponding DebugInfoKind.
-static codegenoptions::DebugInfoKind DebugLevelToInfoKind(const Arg &A) {
- assert(A.getOption().matches(options::OPT_gN_Group) &&
- "Not a -g option that specifies a debug-info level");
- if (A.getOption().matches(options::OPT_g0) ||
- A.getOption().matches(options::OPT_ggdb0))
- return codegenoptions::NoDebugInfo;
- if (A.getOption().matches(options::OPT_gline_tables_only) ||
- A.getOption().matches(options::OPT_ggdb1))
- return codegenoptions::DebugLineTablesOnly;
- if (A.getOption().matches(options::OPT_gline_directives_only))
- return codegenoptions::DebugDirectivesOnly;
- return codegenoptions::DebugInfoConstructor;
-}
-
-static bool mustUseNonLeafFramePointerForTarget(const llvm::Triple &Triple) {
- switch (Triple.getArch()){
- default:
- return false;
- case llvm::Triple::arm:
- case llvm::Triple::thumb:
- // ARM Darwin targets require a frame pointer to be always present to aid
- // offline debugging via backtraces.
- return Triple.isOSDarwin();
- }
-}
-
-static bool useFramePointerForTargetByDefault(const ArgList &Args,
- const llvm::Triple &Triple) {
- if (Args.hasArg(options::OPT_pg) && !Args.hasArg(options::OPT_mfentry))
- return true;
-
- switch (Triple.getArch()) {
- case llvm::Triple::xcore:
- case llvm::Triple::wasm32:
- case llvm::Triple::wasm64:
- case llvm::Triple::msp430:
- // XCore never wants frame pointers, regardless of OS.
- // WebAssembly never wants frame pointers.
- return false;
- case llvm::Triple::ppc:
- case llvm::Triple::ppcle:
- case llvm::Triple::ppc64:
- case llvm::Triple::ppc64le:
- case llvm::Triple::riscv32:
- case llvm::Triple::riscv64:
- case llvm::Triple::amdgcn:
- case llvm::Triple::r600:
- return !areOptimizationsEnabled(Args);
- default:
- break;
- }
-
- if (Triple.isOSNetBSD()) {
- return !areOptimizationsEnabled(Args);
- }
-
- if (Triple.isOSLinux() || Triple.getOS() == llvm::Triple::CloudABI ||
- Triple.isOSHurd()) {
- switch (Triple.getArch()) {
- // Don't use a frame pointer on linux if optimizing for certain targets.
- case llvm::Triple::arm:
- case llvm::Triple::armeb:
- case llvm::Triple::thumb:
- case llvm::Triple::thumbeb:
- if (Triple.isAndroid())
- return true;
- LLVM_FALLTHROUGH;
- case llvm::Triple::mips64:
- case llvm::Triple::mips64el:
- case llvm::Triple::mips:
- case llvm::Triple::mipsel:
- case llvm::Triple::systemz:
- case llvm::Triple::x86:
- case llvm::Triple::x86_64:
- return !areOptimizationsEnabled(Args);
- default:
- return true;
- }
- }
-
- if (Triple.isOSWindows()) {
- switch (Triple.getArch()) {
- case llvm::Triple::x86:
- return !areOptimizationsEnabled(Args);
- case llvm::Triple::x86_64:
- return Triple.isOSBinFormatMachO();
- case llvm::Triple::arm:
- case llvm::Triple::thumb:
- // Windows on ARM builds with FPO disabled to aid fast stack walking
- return true;
- default:
- // All other supported Windows ISAs use xdata unwind information, so frame
- // pointers are not generally useful.
- return false;
- }
- }
-
- return true;
-}
-
-static CodeGenOptions::FramePointerKind
-getFramePointerKind(const ArgList &Args, const llvm::Triple &Triple) {
- // We have 4 states:
- //
- // 00) leaf retained, non-leaf retained
- // 01) leaf retained, non-leaf omitted (this is invalid)
- // 10) leaf omitted, non-leaf retained
- // (what -momit-leaf-frame-pointer was designed for)
- // 11) leaf omitted, non-leaf omitted
- //
- // "omit" options taking precedence over "no-omit" options is the only way
- // to make 3 valid states representable
- Arg *A = Args.getLastArg(options::OPT_fomit_frame_pointer,
- options::OPT_fno_omit_frame_pointer);
- bool OmitFP = A && A->getOption().matches(options::OPT_fomit_frame_pointer);
- bool NoOmitFP =
- A && A->getOption().matches(options::OPT_fno_omit_frame_pointer);
- bool OmitLeafFP = Args.hasFlag(options::OPT_momit_leaf_frame_pointer,
- options::OPT_mno_omit_leaf_frame_pointer,
- Triple.isAArch64() || Triple.isPS4CPU());
- if (NoOmitFP || mustUseNonLeafFramePointerForTarget(Triple) ||
- (!OmitFP && useFramePointerForTargetByDefault(Args, Triple))) {
- if (OmitLeafFP)
- return CodeGenOptions::FramePointerKind::NonLeaf;
- return CodeGenOptions::FramePointerKind::All;
- }
- return CodeGenOptions::FramePointerKind::None;
-}
-
/// Add a CC1 option to specify the debug compilation directory.
-static void addDebugCompDirArg(const ArgList &Args, ArgStringList &CmdArgs,
- const llvm::vfs::FileSystem &VFS) {
+static const char *addDebugCompDirArg(const ArgList &Args,
+ ArgStringList &CmdArgs,
+ const llvm::vfs::FileSystem &VFS) {
if (Arg *A = Args.getLastArg(options::OPT_ffile_compilation_dir_EQ,
options::OPT_fdebug_compilation_dir_EQ)) {
if (A->getOption().matches(options::OPT_ffile_compilation_dir_EQ))
@@ -635,20 +426,62 @@ static void addDebugCompDirArg(const ArgList &Args, ArgStringList &CmdArgs,
VFS.getCurrentWorkingDirectory()) {
CmdArgs.push_back(Args.MakeArgString("-fdebug-compilation-dir=" + *CWD));
}
+ StringRef Path(CmdArgs.back());
+ return Path.substr(Path.find('=') + 1).data();
+}
+
+static void addDebugObjectName(const ArgList &Args, ArgStringList &CmdArgs,
+ const char *DebugCompilationDir,
+ const char *OutputFileName) {
+ // No need to generate a value for -object-file-name if it was provided.
+ for (auto *Arg : Args.filtered(options::OPT_Xclang))
+ if (StringRef(Arg->getValue()).starts_with("-object-file-name"))
+ return;
+
+ if (Args.hasArg(options::OPT_object_file_name_EQ))
+ return;
+
+ SmallString<128> ObjFileNameForDebug(OutputFileName);
+ if (ObjFileNameForDebug != "-" &&
+ !llvm::sys::path::is_absolute(ObjFileNameForDebug) &&
+ (!DebugCompilationDir ||
+ llvm::sys::path::is_absolute(DebugCompilationDir))) {
+ // Make the path absolute in the debug infos like MSVC does.
+ llvm::sys::fs::make_absolute(ObjFileNameForDebug);
+ }
+ // If the object file name is a relative path, then always use Windows
+ // backslash style as -object-file-name is used for embedding object file path
+ // in codeview and it can only be generated when targeting on Windows.
+ // Otherwise, just use native absolute path.
+ llvm::sys::path::Style Style =
+ llvm::sys::path::is_absolute(ObjFileNameForDebug)
+ ? llvm::sys::path::Style::native
+ : llvm::sys::path::Style::windows_backslash;
+ llvm::sys::path::remove_dots(ObjFileNameForDebug, /*remove_dot_dot=*/true,
+ Style);
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine("-object-file-name=") + ObjFileNameForDebug));
}
/// Add a CC1 and CC1AS option to specify the debug file path prefix map.
-static void addDebugPrefixMapArg(const Driver &D, const ArgList &Args, ArgStringList &CmdArgs) {
- for (const Arg *A : Args.filtered(options::OPT_ffile_prefix_map_EQ,
- options::OPT_fdebug_prefix_map_EQ)) {
- StringRef Map = A->getValue();
- if (Map.find('=') == StringRef::npos)
- D.Diag(diag::err_drv_invalid_argument_to_option)
- << Map << A->getOption().getName();
+static void addDebugPrefixMapArg(const Driver &D, const ToolChain &TC,
+ const ArgList &Args, ArgStringList &CmdArgs) {
+ auto AddOneArg = [&](StringRef Map, StringRef Name) {
+ if (!Map.contains('='))
+ D.Diag(diag::err_drv_invalid_argument_to_option) << Map << Name;
else
CmdArgs.push_back(Args.MakeArgString("-fdebug-prefix-map=" + Map));
+ };
+
+ for (const Arg *A : Args.filtered(options::OPT_ffile_prefix_map_EQ,
+ options::OPT_fdebug_prefix_map_EQ)) {
+ AddOneArg(A->getValue(), A->getOption().getName());
A->claim();
}
+ std::string GlobalRemapEntry = TC.GetGlobalDebugPathRemapping();
+ if (GlobalRemapEntry.empty())
+ return;
+ AddOneArg(GlobalRemapEntry, "environment");
}
/// Add a CC1 and CC1AS option to specify the macro file path prefix map.
@@ -657,7 +490,7 @@ static void addMacroPrefixMapArg(const Driver &D, const ArgList &Args,
for (const Arg *A : Args.filtered(options::OPT_ffile_prefix_map_EQ,
options::OPT_fmacro_prefix_map_EQ)) {
StringRef Map = A->getValue();
- if (Map.find('=') == StringRef::npos)
+ if (!Map.contains('='))
D.Diag(diag::err_drv_invalid_argument_to_option)
<< Map << A->getOption().getName();
else
@@ -672,7 +505,7 @@ static void addCoveragePrefixMapArg(const Driver &D, const ArgList &Args,
for (const Arg *A : Args.filtered(options::OPT_ffile_prefix_map_EQ,
options::OPT_fcoverage_prefix_map_EQ)) {
StringRef Map = A->getValue();
- if (Map.find('=') == StringRef::npos)
+ if (!Map.contains('='))
D.Diag(diag::err_drv_invalid_argument_to_option)
<< Map << A->getOption().getName();
else
@@ -746,10 +579,10 @@ static void addDashXForInput(const ArgList &Args, const InputInfo &Input,
}
static void addPGOAndCoverageFlags(const ToolChain &TC, Compilation &C,
- const Driver &D, const InputInfo &Output,
- const ArgList &Args,
+ const JobAction &JA, const InputInfo &Output,
+ const ArgList &Args, SanitizerArgs &SanArgs,
ArgStringList &CmdArgs) {
-
+ const Driver &D = TC.getDriver();
auto *PGOGenerateArg = Args.getLastArg(options::OPT_fprofile_generate,
options::OPT_fprofile_generate_EQ,
options::OPT_fno_profile_generate);
@@ -757,12 +590,7 @@ static void addPGOAndCoverageFlags(const ToolChain &TC, Compilation &C,
PGOGenerateArg->getOption().matches(options::OPT_fno_profile_generate))
PGOGenerateArg = nullptr;
- auto *CSPGOGenerateArg = Args.getLastArg(options::OPT_fcs_profile_generate,
- options::OPT_fcs_profile_generate_EQ,
- options::OPT_fno_profile_generate);
- if (CSPGOGenerateArg &&
- CSPGOGenerateArg->getOption().matches(options::OPT_fno_profile_generate))
- CSPGOGenerateArg = nullptr;
+ auto *CSPGOGenerateArg = getLastCSProfileGenerateArg(Args);
auto *ProfileGenerateArg = Args.getLastArg(
options::OPT_fprofile_instr_generate,
@@ -794,14 +622,6 @@ static void addPGOAndCoverageFlags(const ToolChain &TC, Compilation &C,
}
if (TC.getTriple().isOSAIX()) {
- if (PGOGenerateArg)
- if (!D.isUsingLTO(false /*IsDeviceOffloadAction */) ||
- D.getLTOMode() != LTOK_Full)
- D.Diag(clang::diag::err_drv_argument_only_allowed_with)
- << PGOGenerateArg->getSpelling() << "-flto";
- if (ProfileGenerateArg)
- D.Diag(diag::err_drv_unsupported_opt_for_target)
- << ProfileGenerateArg->getSpelling() << TC.getTriple().str();
if (Arg *ProfileSampleUseArg = getLastProfileSampleUseArg(Args))
D.Diag(diag::err_drv_unsupported_opt_for_target)
<< ProfileSampleUseArg->getSpelling() << TC.getTriple().str();
@@ -869,10 +689,6 @@ static void addPGOAndCoverageFlags(const ToolChain &TC, Compilation &C,
options::OPT_fno_test_coverage, false) ||
Args.hasArg(options::OPT_coverage);
bool EmitCovData = TC.needsGCovInstrumentation(Args);
- if (EmitCovNotes)
- CmdArgs.push_back("-ftest-coverage");
- if (EmitCovData)
- CmdArgs.push_back("-fprofile-arcs");
if (Args.hasFlag(options::OPT_fcoverage_mapping,
options::OPT_fno_coverage_mapping, false)) {
@@ -884,6 +700,17 @@ static void addPGOAndCoverageFlags(const ToolChain &TC, Compilation &C,
CmdArgs.push_back("-fcoverage-mapping");
}
+ if (Args.hasFlag(options::OPT_fmcdc_coverage, options::OPT_fno_mcdc_coverage,
+ false)) {
+ if (!Args.hasFlag(options::OPT_fcoverage_mapping,
+ options::OPT_fno_coverage_mapping, false))
+ D.Diag(clang::diag::err_drv_argument_only_allowed_with)
+ << "-fcoverage-mcdc"
+ << "-fcoverage-mapping";
+
+ CmdArgs.push_back("-fcoverage-mcdc");
+ }
+
if (Arg *A = Args.getLastArg(options::OPT_ffile_compilation_dir_EQ,
options::OPT_fcoverage_compilation_dir_EQ)) {
if (A->getOption().matches(options::OPT_ffile_compilation_dir_EQ))
@@ -925,11 +752,30 @@ static void addPGOAndCoverageFlags(const ToolChain &TC, Compilation &C,
CmdArgs.push_back("-fprofile-update=atomic");
else if (Val != "single")
D.Diag(diag::err_drv_unsupported_option_argument)
- << A->getOption().getName() << Val;
- } else if (TC.getSanitizerArgs().needsTsanRt()) {
- CmdArgs.push_back("-fprofile-update=atomic");
+ << A->getSpelling() << Val;
}
+ int FunctionGroups = 1;
+ int SelectedFunctionGroup = 0;
+ if (const auto *A = Args.getLastArg(options::OPT_fprofile_function_groups)) {
+ StringRef Val = A->getValue();
+ if (Val.getAsInteger(0, FunctionGroups) || FunctionGroups < 1)
+ D.Diag(diag::err_drv_invalid_int_value) << A->getAsString(Args) << Val;
+ }
+ if (const auto *A =
+ Args.getLastArg(options::OPT_fprofile_selected_function_group)) {
+ StringRef Val = A->getValue();
+ if (Val.getAsInteger(0, SelectedFunctionGroup) ||
+ SelectedFunctionGroup < 0 || SelectedFunctionGroup >= FunctionGroups)
+ D.Diag(diag::err_drv_invalid_int_value) << A->getAsString(Args) << Val;
+ }
+ if (FunctionGroups != 1)
+ CmdArgs.push_back(Args.MakeArgString("-fprofile-function-groups=" +
+ Twine(FunctionGroups)));
+ if (SelectedFunctionGroup != 0)
+ CmdArgs.push_back(Args.MakeArgString("-fprofile-selected-function-group=" +
+ Twine(SelectedFunctionGroup)));
+
// Leave -fprofile-dir= an unused argument unless .gcda emission is
// enabled. To be polite, with '-fprofile-arcs -fno-profile-arcs' consider
// the flag used. There is no -fno-profile-dir, so the user has no
@@ -939,36 +785,45 @@ static void addPGOAndCoverageFlags(const ToolChain &TC, Compilation &C,
Args.hasArg(options::OPT_coverage))
FProfileDir = Args.getLastArg(options::OPT_fprofile_dir);
- // Put the .gcno and .gcda files (if needed) next to the object file or
- // bitcode file in the case of LTO.
- // FIXME: There should be a simpler way to find the object file for this
- // input, and this code probably does the wrong thing for commands that
- // compile and link all at once.
- if ((Args.hasArg(options::OPT_c) || Args.hasArg(options::OPT_S)) &&
- (EmitCovNotes || EmitCovData) && Output.isFilename()) {
- SmallString<128> OutputFilename;
- if (Arg *FinalOutput = C.getArgs().getLastArg(options::OPT__SLASH_Fo))
- OutputFilename = FinalOutput->getValue();
- else if (Arg *FinalOutput = C.getArgs().getLastArg(options::OPT_o))
- OutputFilename = FinalOutput->getValue();
- else
- OutputFilename = llvm::sys::path::filename(Output.getBaseInput());
- SmallString<128> CoverageFilename = OutputFilename;
+ // TODO: Don't claim -c/-S to warn about -fsyntax-only -c/-S, -E -c/-S,
+ // like we warn about -fsyntax-only -E.
+ (void)(Args.hasArg(options::OPT_c) || Args.hasArg(options::OPT_S));
+
+ // Put the .gcno and .gcda files (if needed) next to the primary output file,
+ // or fall back to a file in the current directory for `clang -c --coverage
+ // d/a.c` in the absence of -o.
+ if (EmitCovNotes || EmitCovData) {
+ SmallString<128> CoverageFilename;
+ if (Arg *DumpDir = Args.getLastArgNoClaim(options::OPT_dumpdir)) {
+ // Form ${dumpdir}${basename}.gcno. Note that dumpdir may not end with a
+ // path separator.
+ CoverageFilename = DumpDir->getValue();
+ CoverageFilename += llvm::sys::path::filename(Output.getBaseInput());
+ } else if (Arg *FinalOutput =
+ C.getArgs().getLastArg(options::OPT__SLASH_Fo)) {
+ CoverageFilename = FinalOutput->getValue();
+ } else if (Arg *FinalOutput = C.getArgs().getLastArg(options::OPT_o)) {
+ CoverageFilename = FinalOutput->getValue();
+ } else {
+ CoverageFilename = llvm::sys::path::filename(Output.getBaseInput());
+ }
if (llvm::sys::path::is_relative(CoverageFilename))
(void)D.getVFS().makeAbsolute(CoverageFilename);
llvm::sys::path::replace_extension(CoverageFilename, "gcno");
-
- CmdArgs.push_back("-coverage-notes-file");
- CmdArgs.push_back(Args.MakeArgString(CoverageFilename));
+ if (EmitCovNotes) {
+ CmdArgs.push_back(
+ Args.MakeArgString("-coverage-notes-file=" + CoverageFilename));
+ }
if (EmitCovData) {
if (FProfileDir) {
+ SmallString<128> Gcno = std::move(CoverageFilename);
CoverageFilename = FProfileDir->getValue();
- llvm::sys::path::append(CoverageFilename, OutputFilename);
+ llvm::sys::path::append(CoverageFilename, Gcno);
}
llvm::sys::path::replace_extension(CoverageFilename, "gcda");
- CmdArgs.push_back("-coverage-data-file");
- CmdArgs.push_back(Args.MakeArgString(CoverageFilename));
+ CmdArgs.push_back(
+ Args.MakeArgString("-coverage-data-file=" + CoverageFilename));
}
}
}
@@ -978,11 +833,7 @@ static bool ContainsCompileAction(const Action *A) {
if (isa<CompileJobAction>(A) || isa<BackendJobAction>(A))
return true;
- for (const auto &AI : A->inputs())
- if (ContainsCompileAction(AI))
- return true;
-
- return false;
+ return llvm::any_of(A->inputs(), ContainsCompileAction);
}
/// Check if -relax-all should be passed to the internal assembler.
@@ -1007,52 +858,12 @@ static bool UseRelaxAll(Compilation &C, const ArgList &Args) {
RelaxDefault);
}
-// Extract the integer N from a string spelled "-dwarf-N", returning 0
-// on mismatch. The StringRef input (rather than an Arg) allows
-// for use by the "-Xassembler" option parser.
-static unsigned DwarfVersionNum(StringRef ArgValue) {
- return llvm::StringSwitch<unsigned>(ArgValue)
- .Case("-gdwarf-2", 2)
- .Case("-gdwarf-3", 3)
- .Case("-gdwarf-4", 4)
- .Case("-gdwarf-5", 5)
- .Default(0);
-}
-
-// Find a DWARF format version option.
-// This function is a complementary for DwarfVersionNum().
-static const Arg *getDwarfNArg(const ArgList &Args) {
- return Args.getLastArg(options::OPT_gdwarf_2, options::OPT_gdwarf_3,
- options::OPT_gdwarf_4, options::OPT_gdwarf_5,
- options::OPT_gdwarf);
-}
-
-static void RenderDebugEnablingArgs(const ArgList &Args, ArgStringList &CmdArgs,
- codegenoptions::DebugInfoKind DebugInfoKind,
- unsigned DwarfVersion,
- llvm::DebuggerKind DebuggerTuning) {
- switch (DebugInfoKind) {
- case codegenoptions::DebugDirectivesOnly:
- CmdArgs.push_back("-debug-info-kind=line-directives-only");
- break;
- case codegenoptions::DebugLineTablesOnly:
- CmdArgs.push_back("-debug-info-kind=line-tables-only");
- break;
- case codegenoptions::DebugInfoConstructor:
- CmdArgs.push_back("-debug-info-kind=constructor");
- break;
- case codegenoptions::LimitedDebugInfo:
- CmdArgs.push_back("-debug-info-kind=limited");
- break;
- case codegenoptions::FullDebugInfo:
- CmdArgs.push_back("-debug-info-kind=standalone");
- break;
- case codegenoptions::UnusedTypeInfo:
- CmdArgs.push_back("-debug-info-kind=unused-types");
- break;
- default:
- break;
- }
+static void
+RenderDebugEnablingArgs(const ArgList &Args, ArgStringList &CmdArgs,
+ llvm::codegenoptions::DebugInfoKind DebugInfoKind,
+ unsigned DwarfVersion,
+ llvm::DebuggerKind DebuggerTuning) {
+ addDebugInfoKind(CmdArgs, DebugInfoKind);
if (DwarfVersion > 0)
CmdArgs.push_back(
Args.MakeArgString("-dwarf-version=" + Twine(DwarfVersion)));
@@ -1095,40 +906,31 @@ static void RenderDebugInfoCompressionArgs(const ArgList &Args,
StringRef Value = A->getValue();
if (Value == "none") {
CmdArgs.push_back("--compress-debug-sections=none");
- } else if (Value == "zlib" || Value == "zlib-gnu") {
- if (llvm::zlib::isAvailable()) {
+ } else if (Value == "zlib") {
+ if (llvm::compression::zlib::isAvailable()) {
+ CmdArgs.push_back(
+ Args.MakeArgString("--compress-debug-sections=" + Twine(Value)));
+ } else {
+ D.Diag(diag::warn_debug_compression_unavailable) << "zlib";
+ }
+ } else if (Value == "zstd") {
+ if (llvm::compression::zstd::isAvailable()) {
CmdArgs.push_back(
Args.MakeArgString("--compress-debug-sections=" + Twine(Value)));
} else {
- D.Diag(diag::warn_debug_compression_unavailable);
+ D.Diag(diag::warn_debug_compression_unavailable) << "zstd";
}
} else {
D.Diag(diag::err_drv_unsupported_option_argument)
- << A->getOption().getName() << Value;
+ << A->getSpelling() << Value;
}
}
}
-static const char *RelocationModelName(llvm::Reloc::Model Model) {
- switch (Model) {
- case llvm::Reloc::Static:
- return "static";
- case llvm::Reloc::PIC_:
- return "pic";
- case llvm::Reloc::DynamicNoPIC:
- return "dynamic-no-pic";
- case llvm::Reloc::ROPI:
- return "ropi";
- case llvm::Reloc::RWPI:
- return "rwpi";
- case llvm::Reloc::ROPI_RWPI:
- return "ropi-rwpi";
- }
- llvm_unreachable("Unknown Reloc::Model kind");
-}
static void handleAMDGPUCodeObjectVersionOptions(const Driver &D,
const ArgList &Args,
- ArgStringList &CmdArgs) {
+ ArgStringList &CmdArgs,
+ bool IsCC1As = false) {
// If no version was requested by the user, use the default value from the
// back end. This is consistent with the value returned from
// getAMDGPUCodeObjectVersion. This lets clang emit IR for amdgpu without
@@ -1140,9 +942,53 @@ static void handleAMDGPUCodeObjectVersionOptions(const Driver &D,
Args.MakeArgString(Twine("--amdhsa-code-object-version=") +
Twine(CodeObjVer)));
CmdArgs.insert(CmdArgs.begin() + 1, "-mllvm");
+ // -cc1as does not accept -mcode-object-version option.
+ if (!IsCC1As)
+ CmdArgs.insert(CmdArgs.begin() + 1,
+ Args.MakeArgString(Twine("-mcode-object-version=") +
+ Twine(CodeObjVer)));
}
}
+static bool maybeHasClangPchSignature(const Driver &D, StringRef Path) {
+ llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> MemBuf =
+ D.getVFS().getBufferForFile(Path);
+ if (!MemBuf)
+ return false;
+ llvm::file_magic Magic = llvm::identify_magic((*MemBuf)->getBuffer());
+ if (Magic == llvm::file_magic::unknown)
+ return false;
+ // Return true for both raw Clang AST files and object files which may
+ // contain a __clangast section.
+ if (Magic == llvm::file_magic::clang_ast)
+ return true;
+ Expected<std::unique_ptr<llvm::object::ObjectFile>> Obj =
+ llvm::object::ObjectFile::createObjectFile(**MemBuf, Magic);
+ return !Obj.takeError();
+}
+
+static bool gchProbe(const Driver &D, StringRef Path) {
+ llvm::ErrorOr<llvm::vfs::Status> Status = D.getVFS().status(Path);
+ if (!Status)
+ return false;
+
+ if (Status->isDirectory()) {
+ std::error_code EC;
+ for (llvm::vfs::directory_iterator DI = D.getVFS().dir_begin(Path, EC), DE;
+ !EC && DI != DE; DI = DI.increment(EC)) {
+ if (maybeHasClangPchSignature(D, DI->path()))
+ return true;
+ }
+ D.Diag(diag::warn_drv_pch_ignoring_gch_dir) << Path;
+ return false;
+ }
+
+ if (maybeHasClangPchSignature(D, Path))
+ return true;
+ D.Diag(diag::warn_drv_pch_ignoring_gch_file) << Path;
+ return false;
+}
+
void Clang::AddPreprocessingOptions(Compilation &C, const JobAction &JA,
const Driver &D, const ArgList &Args,
ArgStringList &CmdArgs,
@@ -1195,7 +1041,7 @@ void Clang::AddPreprocessingOptions(Compilation &C, const JobAction &JA,
} else {
CmdArgs.push_back("-MT");
SmallString<128> Quoted;
- QuoteTarget(A->getValue(), Quoted);
+ quoteMakeTarget(A->getValue(), Quoted);
CmdArgs.push_back(Args.MakeArgString(Quoted));
}
}
@@ -1220,7 +1066,7 @@ void Clang::AddPreprocessingOptions(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-MT");
SmallString<128> Quoted;
- QuoteTarget(DepTarget, Quoted);
+ quoteMakeTarget(DepTarget, Quoted);
CmdArgs.push_back(Args.MakeArgString(Quoted));
}
@@ -1252,9 +1098,38 @@ void Clang::AddPreprocessingOptions(Compilation &C, const JobAction &JA,
if (JA.isOffloading(Action::OFK_HIP))
getToolChain().AddHIPIncludeArgs(Args, CmdArgs);
+ // If we are compiling for a GPU target we want to override the system headers
+ // with ones created by the 'libc' project if present.
+ if (!Args.hasArg(options::OPT_nostdinc) &&
+ !Args.hasArg(options::OPT_nogpuinc) &&
+ !Args.hasArg(options::OPT_nobuiltininc)) {
+ // Without an offloading language we will include these headers directly.
+ // Offloading languages will instead only use the declarations stored in
+ // the resource directory at clang/lib/Headers/llvm_libc_wrappers.
+ if ((getToolChain().getTriple().isNVPTX() ||
+ getToolChain().getTriple().isAMDGCN()) &&
+ C.getActiveOffloadKinds() == Action::OFK_None) {
+ SmallString<128> P(llvm::sys::path::parent_path(D.InstalledDir));
+ llvm::sys::path::append(P, "include");
+ llvm::sys::path::append(P, "gpu-none-llvm");
+ CmdArgs.push_back("-c-isystem");
+ CmdArgs.push_back(Args.MakeArgString(P));
+ } else if (C.getActiveOffloadKinds() == Action::OFK_OpenMP) {
+ // TODO: CUDA / HIP include their own headers for some common functions
+ // implemented here. We'll need to clean those up so they do not conflict.
+ SmallString<128> P(D.ResourceDir);
+ llvm::sys::path::append(P, "include");
+ llvm::sys::path::append(P, "llvm_libc_wrappers");
+ CmdArgs.push_back("-internal-isystem");
+ CmdArgs.push_back(Args.MakeArgString(P));
+ }
+ }
+
// If we are offloading to a target via OpenMP we need to include the
// openmp_wrappers folder which contains alternative system headers.
if (JA.isDeviceOffloading(Action::OFK_OpenMP) &&
+ !Args.hasArg(options::OPT_nostdinc) &&
+ !Args.hasArg(options::OPT_nogpuinc) &&
(getToolChain().getTriple().isNVPTX() ||
getToolChain().getTriple().isAMDGCN())) {
if (!Args.hasArg(options::OPT_nobuiltininc)) {
@@ -1311,7 +1186,8 @@ void Clang::AddPreprocessingOptions(Compilation &C, const JobAction &JA,
bool RenderedImplicitInclude = false;
for (const Arg *A : Args.filtered(options::OPT_clang_i_Group)) {
- if (A->getOption().matches(options::OPT_include)) {
+ if (A->getOption().matches(options::OPT_include) &&
+ D.getProbePrecompiled()) {
// Handling of gcc-style gch precompiled headers.
bool IsFirstImplicitInclude = !RenderedImplicitInclude;
RenderedImplicitInclude = true;
@@ -1322,14 +1198,13 @@ void Clang::AddPreprocessingOptions(Compilation &C, const JobAction &JA,
// so that replace_extension does the right thing.
P += ".dummy";
llvm::sys::path::replace_extension(P, "pch");
- if (llvm::sys::fs::exists(P))
+ if (D.getVFS().exists(P))
FoundPCH = true;
if (!FoundPCH) {
+ // For GCC compat, probe for a file or directory ending in .gch instead.
llvm::sys::path::replace_extension(P, "gch");
- if (llvm::sys::fs::exists(P)) {
- FoundPCH = true;
- }
+ FoundPCH = gchProbe(D, P.str());
}
if (FoundPCH) {
@@ -1354,6 +1229,9 @@ void Clang::AddPreprocessingOptions(Compilation &C, const JobAction &JA,
} else if (A->getOption().matches(options::OPT_stdlibxx_isystem)) {
// Translated to -internal-isystem by the driver, no need to pass to cc1.
continue;
+ } else if (A->getOption().matches(options::OPT_ibuiltininc)) {
+ // This is used only by the driver. No need to pass to cc1.
+ continue;
}
// Not translated, render as usual.
@@ -1361,7 +1239,7 @@ void Clang::AddPreprocessingOptions(Compilation &C, const JobAction &JA,
A->render(Args, CmdArgs);
}
- Args.AddAllArgs(CmdArgs,
+ Args.addAllArgs(CmdArgs,
{options::OPT_D, options::OPT_U, options::OPT_I_Group,
options::OPT_F, options::OPT_index_header_map});
@@ -1431,6 +1309,17 @@ void Clang::AddPreprocessingOptions(Compilation &C, const JobAction &JA,
addMacroPrefixMapArg(D, Args, CmdArgs);
addCoveragePrefixMapArg(D, Args, CmdArgs);
+
+ Args.AddLastArg(CmdArgs, options::OPT_ffile_reproducible,
+ options::OPT_fno_file_reproducible);
+
+ if (const char *Epoch = std::getenv("SOURCE_DATE_EPOCH")) {
+ CmdArgs.push_back("-source-date-epoch");
+ CmdArgs.push_back(Args.MakeArgString(Epoch));
+ }
+
+ Args.addOptInFlag(CmdArgs, options::OPT_fdefine_target_os_macros,
+ options::OPT_fno_define_target_os_macros);
}
// FIXME: Move to target hook.
@@ -1587,8 +1476,8 @@ void AddAAPCSVolatileBitfieldArgs(const ArgList &Args, ArgStringList &CmdArgs) {
}
namespace {
-void RenderARMABI(const llvm::Triple &Triple, const ArgList &Args,
- ArgStringList &CmdArgs) {
+void RenderARMABI(const Driver &D, const llvm::Triple &Triple,
+ const ArgList &Args, ArgStringList &CmdArgs) {
// Select the ABI to use.
// FIXME: Support -meabi.
// FIXME: Parts of this are duplicated in the backend, unify this somehow.
@@ -1596,18 +1485,85 @@ void RenderARMABI(const llvm::Triple &Triple, const ArgList &Args,
if (Arg *A = Args.getLastArg(options::OPT_mabi_EQ)) {
ABIName = A->getValue();
} else {
- std::string CPU = getCPUName(Args, Triple, /*FromAs*/ false);
+ std::string CPU = getCPUName(D, Args, Triple, /*FromAs*/ false);
ABIName = llvm::ARM::computeDefaultTargetABI(Triple, CPU).data();
}
CmdArgs.push_back("-target-abi");
CmdArgs.push_back(ABIName);
}
+
+void AddUnalignedAccessWarning(ArgStringList &CmdArgs) {
+ auto StrictAlignIter =
+ llvm::find_if(llvm::reverse(CmdArgs), [](StringRef Arg) {
+ return Arg == "+strict-align" || Arg == "-strict-align";
+ });
+ if (StrictAlignIter != CmdArgs.rend() &&
+ StringRef(*StrictAlignIter) == "+strict-align")
+ CmdArgs.push_back("-Wunaligned-access");
+}
+}
+
+static void CollectARMPACBTIOptions(const ToolChain &TC, const ArgList &Args,
+ ArgStringList &CmdArgs, bool isAArch64) {
+ const Arg *A = isAArch64
+ ? Args.getLastArg(options::OPT_msign_return_address_EQ,
+ options::OPT_mbranch_protection_EQ)
+ : Args.getLastArg(options::OPT_mbranch_protection_EQ);
+ if (!A)
+ return;
+
+ const Driver &D = TC.getDriver();
+ const llvm::Triple &Triple = TC.getEffectiveTriple();
+ if (!(isAArch64 || (Triple.isArmT32() && Triple.isArmMClass())))
+ D.Diag(diag::warn_incompatible_branch_protection_option)
+ << Triple.getArchName();
+
+ StringRef Scope, Key;
+ bool IndirectBranches, BranchProtectionPAuthLR, GuardedControlStack;
+
+ if (A->getOption().matches(options::OPT_msign_return_address_EQ)) {
+ Scope = A->getValue();
+ if (Scope != "none" && Scope != "non-leaf" && Scope != "all")
+ D.Diag(diag::err_drv_unsupported_option_argument)
+ << A->getSpelling() << Scope;
+ Key = "a_key";
+ IndirectBranches = false;
+ BranchProtectionPAuthLR = false;
+ GuardedControlStack = false;
+ } else {
+ StringRef DiagMsg;
+ llvm::ARM::ParsedBranchProtection PBP;
+ if (!llvm::ARM::parseBranchProtection(A->getValue(), PBP, DiagMsg))
+ D.Diag(diag::err_drv_unsupported_option_argument)
+ << A->getSpelling() << DiagMsg;
+ if (!isAArch64 && PBP.Key == "b_key")
+ D.Diag(diag::warn_unsupported_branch_protection)
+ << "b-key" << A->getAsString(Args);
+ Scope = PBP.Scope;
+ Key = PBP.Key;
+ BranchProtectionPAuthLR = PBP.BranchProtectionPAuthLR;
+ IndirectBranches = PBP.BranchTargetEnforcement;
+ GuardedControlStack = PBP.GuardedControlStack;
+ }
+
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine("-msign-return-address=") + Scope));
+ if (!Scope.equals("none"))
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine("-msign-return-address-key=") + Key));
+ if (BranchProtectionPAuthLR)
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine("-mbranch-protection-pauth-lr")));
+ if (IndirectBranches)
+ CmdArgs.push_back("-mbranch-target-enforce");
+ if (GuardedControlStack)
+ CmdArgs.push_back("-mguarded-control-stack");
}
void Clang::AddARMTargetArgs(const llvm::Triple &Triple, const ArgList &Args,
ArgStringList &CmdArgs, bool KernelOrKext) const {
- RenderARMABI(Triple, Args, CmdArgs);
+ RenderARMABI(getToolChain().getDriver(), Triple, Args, CmdArgs);
// Determine floating point ABI from the options & target defaults.
arm::FloatABI ABI = arm::getARMFloatABI(getToolChain(), Args);
@@ -1646,6 +1602,11 @@ void Clang::AddARMTargetArgs(const llvm::Triple &Triple, const ArgList &Args,
CmdArgs.push_back("-mcmse");
AddAAPCSVolatileBitfieldArgs(Args, CmdArgs);
+
+ // Enable/disable return address signing and indirect branch targets.
+ CollectARMPACBTIOptions(getToolChain(), Args, CmdArgs, false /*isAArch64*/);
+
+ AddUnalignedAccessWarning(CmdArgs);
}
void Clang::RenderTargetOptions(const llvm::Triple &EffectiveTriple,
@@ -1667,14 +1628,17 @@ void Clang::RenderTargetOptions(const llvm::Triple &EffectiveTriple,
case llvm::Triple::thumbeb:
// Use the effective triple, which takes into account the deployment target.
AddARMTargetArgs(EffectiveTriple, Args, CmdArgs, KernelOrKext);
- CmdArgs.push_back("-fallow-half-arguments-and-returns");
break;
case llvm::Triple::aarch64:
case llvm::Triple::aarch64_32:
case llvm::Triple::aarch64_be:
AddAArch64TargetArgs(Args, CmdArgs);
- CmdArgs.push_back("-fallow-half-arguments-and-returns");
+ break;
+
+ case llvm::Triple::loongarch32:
+ case llvm::Triple::loongarch64:
+ AddLoongArchTargetArgs(Args, CmdArgs);
break;
case llvm::Triple::mips:
@@ -1761,19 +1725,6 @@ void Clang::AddAArch64TargetArgs(const ArgList &Args,
RenderAArch64ABI(Triple, Args, CmdArgs);
- if (Arg *A = Args.getLastArg(options::OPT_mfix_cortex_a53_835769,
- options::OPT_mno_fix_cortex_a53_835769)) {
- CmdArgs.push_back("-mllvm");
- if (A->getOption().matches(options::OPT_mfix_cortex_a53_835769))
- CmdArgs.push_back("-aarch64-fix-cortex-a53-835769=1");
- else
- CmdArgs.push_back("-aarch64-fix-cortex-a53-835769=0");
- } else if (Triple.isAndroid()) {
- // Enabled A53 errata (835769) workaround by default on android
- CmdArgs.push_back("-mllvm");
- CmdArgs.push_back("-aarch64-fix-cortex-a53-835769=1");
- }
-
// Forward the -mglobal-merge option for explicit control over the pass.
if (Arg *A = Args.getLastArg(options::OPT_mglobal_merge,
options::OPT_mno_global_merge)) {
@@ -1785,57 +1736,66 @@ void Clang::AddAArch64TargetArgs(const ArgList &Args,
}
// Enable/disable return address signing and indirect branch targets.
- if (Arg *A = Args.getLastArg(options::OPT_msign_return_address_EQ,
- options::OPT_mbranch_protection_EQ)) {
-
- const Driver &D = getToolChain().getDriver();
-
- StringRef Scope, Key;
- bool IndirectBranches;
-
- if (A->getOption().matches(options::OPT_msign_return_address_EQ)) {
- Scope = A->getValue();
- if (!Scope.equals("none") && !Scope.equals("non-leaf") &&
- !Scope.equals("all"))
- D.Diag(diag::err_invalid_branch_protection)
- << Scope << A->getAsString(Args);
- Key = "a_key";
- IndirectBranches = false;
- } else {
- StringRef Err;
- llvm::AArch64::ParsedBranchProtection PBP;
- if (!llvm::AArch64::parseBranchProtection(A->getValue(), PBP, Err))
- D.Diag(diag::err_invalid_branch_protection)
- << Err << A->getAsString(Args);
- Scope = PBP.Scope;
- Key = PBP.Key;
- IndirectBranches = PBP.BranchTargetEnforcement;
- }
-
- CmdArgs.push_back(
- Args.MakeArgString(Twine("-msign-return-address=") + Scope));
- CmdArgs.push_back(
- Args.MakeArgString(Twine("-msign-return-address-key=") + Key));
- if (IndirectBranches)
- CmdArgs.push_back("-mbranch-target-enforce");
- }
+ CollectARMPACBTIOptions(getToolChain(), Args, CmdArgs, true /*isAArch64*/);
// Handle -msve_vector_bits=<bits>
if (Arg *A = Args.getLastArg(options::OPT_msve_vector_bits_EQ)) {
StringRef Val = A->getValue();
const Driver &D = getToolChain().getDriver();
if (Val.equals("128") || Val.equals("256") || Val.equals("512") ||
- Val.equals("1024") || Val.equals("2048"))
+ Val.equals("1024") || Val.equals("2048") || Val.equals("128+") ||
+ Val.equals("256+") || Val.equals("512+") || Val.equals("1024+") ||
+ Val.equals("2048+")) {
+ unsigned Bits = 0;
+ if (Val.ends_with("+"))
+ Val = Val.substr(0, Val.size() - 1);
+ else {
+ bool Invalid = Val.getAsInteger(10, Bits); (void)Invalid;
+ assert(!Invalid && "Failed to parse value");
+ CmdArgs.push_back(
+ Args.MakeArgString("-mvscale-max=" + llvm::Twine(Bits / 128)));
+ }
+
+ bool Invalid = Val.getAsInteger(10, Bits); (void)Invalid;
+ assert(!Invalid && "Failed to parse value");
CmdArgs.push_back(
- Args.MakeArgString(llvm::Twine("-msve-vector-bits=") + Val));
+ Args.MakeArgString("-mvscale-min=" + llvm::Twine(Bits / 128)));
// Silently drop requests for vector-length agnostic code as it's implied.
- else if (!Val.equals("scalable"))
+ } else if (!Val.equals("scalable"))
// Handle the unsupported values passed to msve-vector-bits.
D.Diag(diag::err_drv_unsupported_option_argument)
- << A->getOption().getName() << Val;
+ << A->getSpelling() << Val;
}
AddAAPCSVolatileBitfieldArgs(Args, CmdArgs);
+
+ if (const Arg *A = Args.getLastArg(clang::driver::options::OPT_mtune_EQ)) {
+ CmdArgs.push_back("-tune-cpu");
+ if (strcmp(A->getValue(), "native") == 0)
+ CmdArgs.push_back(Args.MakeArgString(llvm::sys::getHostCPUName()));
+ else
+ CmdArgs.push_back(A->getValue());
+ }
+
+ AddUnalignedAccessWarning(CmdArgs);
+}
+
+void Clang::AddLoongArchTargetArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ const llvm::Triple &Triple = getToolChain().getTriple();
+
+ CmdArgs.push_back("-target-abi");
+ CmdArgs.push_back(
+ loongarch::getLoongArchABI(getToolChain().getDriver(), Args, Triple)
+ .data());
+
+ // Handle -mtune.
+ if (const Arg *A = Args.getLastArg(options::OPT_mtune_EQ)) {
+ std::string TuneCPU = A->getValue();
+ TuneCPU = loongarch::postProcessTargetCPUString(TuneCPU, Triple);
+ CmdArgs.push_back("-tune-cpu");
+ CmdArgs.push_back(Args.MakeArgString(TuneCPU));
+ }
}
void Clang::AddMIPSTargetArgs(const ArgList &Args,
@@ -1878,6 +1838,11 @@ void Clang::AddMIPSTargetArgs(const ArgList &Args,
}
}
+ if (Args.getLastArg(options::OPT_mfix4300)) {
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back("-mfix4300");
+ }
+
if (Arg *A = Args.getLastArg(options::OPT_G)) {
StringRef v = A->getValue();
CmdArgs.push_back("-mllvm");
@@ -1967,7 +1932,7 @@ void Clang::AddMIPSTargetArgs(const ArgList &Args,
CmdArgs.push_back(Args.MakeArgString("-mips-compact-branches=" + Val));
} else
D.Diag(diag::err_drv_unsupported_option_argument)
- << A->getOption().getName() << Val;
+ << A->getSpelling() << Val;
} else
D.Diag(diag::warn_target_unsupported_compact_branches) << CPUName;
}
@@ -1983,14 +1948,20 @@ void Clang::AddMIPSTargetArgs(const ArgList &Args,
void Clang::AddPPCTargetArgs(const ArgList &Args,
ArgStringList &CmdArgs) const {
+ const Driver &D = getToolChain().getDriver();
+ const llvm::Triple &T = getToolChain().getTriple();
+ if (Args.getLastArg(options::OPT_mtune_EQ)) {
+ CmdArgs.push_back("-tune-cpu");
+ std::string CPU = ppc::getPPCTuneCPU(Args, T);
+ CmdArgs.push_back(Args.MakeArgString(CPU));
+ }
+
// Select the ABI to use.
const char *ABIName = nullptr;
- const llvm::Triple &T = getToolChain().getTriple();
if (T.isOSBinFormatELF()) {
switch (getToolChain().getArch()) {
case llvm::Triple::ppc64: {
- if ((T.isOSFreeBSD() && T.getOSMajorVersion() >= 13) ||
- T.isOSOpenBSD() || T.isMusl())
+ if (T.isPPC64ELFv2ABI())
ABIName = "elfv2";
else
ABIName = "elfv1";
@@ -2004,14 +1975,29 @@ void Clang::AddPPCTargetArgs(const ArgList &Args,
}
}
- bool IEEELongDouble = false;
+ bool IEEELongDouble = getToolChain().defaultToIEEELongDouble();
+ bool VecExtabi = false;
for (const Arg *A : Args.filtered(options::OPT_mabi_EQ)) {
StringRef V = A->getValue();
- if (V == "ieeelongdouble")
+ if (V == "ieeelongdouble") {
IEEELongDouble = true;
- else if (V == "ibmlongdouble")
+ A->claim();
+ } else if (V == "ibmlongdouble") {
IEEELongDouble = false;
- else if (V != "altivec")
+ A->claim();
+ } else if (V == "vec-default") {
+ VecExtabi = false;
+ A->claim();
+ } else if (V == "vec-extabi") {
+ VecExtabi = true;
+ A->claim();
+ } else if (V == "elfv1") {
+ ABIName = "elfv1";
+ A->claim();
+ } else if (V == "elfv2") {
+ ABIName = "elfv2";
+ A->claim();
+ } else if (V != "altivec")
// The ppc64 linux abis are all "altivec" abis by default. Accept and ignore
// the option if given as we don't have backend support for any targets
// that don't use the altivec abi.
@@ -2019,10 +2005,14 @@ void Clang::AddPPCTargetArgs(const ArgList &Args,
}
if (IEEELongDouble)
CmdArgs.push_back("-mabi=ieeelongdouble");
+ if (VecExtabi) {
+ if (!T.isOSAIX())
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << "-mabi=vec-extabi" << T.str();
+ CmdArgs.push_back("-mabi=vec-extabi");
+ }
- ppc::FloatABI FloatABI =
- ppc::getPPCFloatABI(getToolChain().getDriver(), Args);
-
+ ppc::FloatABI FloatABI = ppc::getPPCFloatABI(D, Args);
if (FloatABI == ppc::FloatABI::Soft) {
// Floating point operations and argument passing are soft.
CmdArgs.push_back("-msoft-float");
@@ -2063,6 +2053,12 @@ static void SetRISCVSmallDataLimit(const ToolChain &TC, const ArgList &Args,
if (Args.hasArg(options::OPT_G)) {
D.Diag(diag::warn_drv_unsupported_sdata);
}
+ } else if (Triple.isAndroid()) {
+ // GP relaxation is not supported on Android.
+ SmallDataLimit = "0";
+ if (Args.hasArg(options::OPT_G)) {
+ D.Diag(diag::warn_drv_unsupported_sdata);
+ }
} else if (Arg *A = Args.getLastArg(options::OPT_G)) {
SmallDataLimit = A->getValue();
}
@@ -2081,18 +2077,57 @@ void Clang::AddRISCVTargetArgs(const ArgList &Args,
SetRISCVSmallDataLimit(getToolChain(), Args, CmdArgs);
- std::string TuneCPU;
-
- if (const Arg *A = Args.getLastArg(clang::driver::options::OPT_mtune_EQ)) {
- StringRef Name = A->getValue();
+ if (!Args.hasFlag(options::OPT_mimplicit_float,
+ options::OPT_mno_implicit_float, true))
+ CmdArgs.push_back("-no-implicit-float");
- Name = llvm::RISCV::resolveTuneCPUAlias(Name, Triple.isArch64Bit());
- TuneCPU = std::string(Name);
+ if (const Arg *A = Args.getLastArg(options::OPT_mtune_EQ)) {
+ CmdArgs.push_back("-tune-cpu");
+ if (strcmp(A->getValue(), "native") == 0)
+ CmdArgs.push_back(Args.MakeArgString(llvm::sys::getHostCPUName()));
+ else
+ CmdArgs.push_back(A->getValue());
}
- if (!TuneCPU.empty()) {
- CmdArgs.push_back("-tune-cpu");
- CmdArgs.push_back(Args.MakeArgString(TuneCPU));
+ // Handle -mrvv-vector-bits=<bits>
+ if (Arg *A = Args.getLastArg(options::OPT_mrvv_vector_bits_EQ)) {
+ StringRef Val = A->getValue();
+ const Driver &D = getToolChain().getDriver();
+
+ // Get minimum VLen from march.
+ unsigned MinVLen = 0;
+ StringRef Arch = riscv::getRISCVArch(Args, Triple);
+ auto ISAInfo = llvm::RISCVISAInfo::parseArchString(
+ Arch, /*EnableExperimentalExtensions*/ true);
+ // Ignore parsing error.
+ if (!errorToBool(ISAInfo.takeError()))
+ MinVLen = (*ISAInfo)->getMinVLen();
+
+ // If the value is "zvl", use MinVLen from march. Otherwise, try to parse
+ // as integer as long as we have a MinVLen.
+ unsigned Bits = 0;
+ if (Val.equals("zvl") && MinVLen >= llvm::RISCV::RVVBitsPerBlock) {
+ Bits = MinVLen;
+ } else if (!Val.getAsInteger(10, Bits)) {
+ // Only accept power of 2 values beteen RVVBitsPerBlock and 65536 that
+ // at least MinVLen.
+ if (Bits < MinVLen || Bits < llvm::RISCV::RVVBitsPerBlock ||
+ Bits > 65536 || !llvm::isPowerOf2_32(Bits))
+ Bits = 0;
+ }
+
+ // If we got a valid value try to use it.
+ if (Bits != 0) {
+ unsigned VScaleMin = Bits / llvm::RISCV::RVVBitsPerBlock;
+ CmdArgs.push_back(
+ Args.MakeArgString("-mvscale-max=" + llvm::Twine(VScaleMin)));
+ CmdArgs.push_back(
+ Args.MakeArgString("-mvscale-min=" + llvm::Twine(VScaleMin)));
+ } else if (!Val.equals("scalable")) {
+ // Handle the unsupported values passed to mrvv-vector-bits.
+ D.Diag(diag::err_drv_unsupported_option_argument)
+ << A->getSpelling() << Val;
+ }
}
}
@@ -2112,12 +2147,32 @@ void Clang::AddSparcTargetArgs(const ArgList &Args,
CmdArgs.push_back("-mfloat-abi");
CmdArgs.push_back("hard");
}
+
+ if (const Arg *A = Args.getLastArg(clang::driver::options::OPT_mtune_EQ)) {
+ StringRef Name = A->getValue();
+ std::string TuneCPU;
+ if (Name == "native")
+ TuneCPU = std::string(llvm::sys::getHostCPUName());
+ else
+ TuneCPU = std::string(Name);
+
+ CmdArgs.push_back("-tune-cpu");
+ CmdArgs.push_back(Args.MakeArgString(TuneCPU));
+ }
}
void Clang::AddSystemZTargetArgs(const ArgList &Args,
ArgStringList &CmdArgs) const {
- bool HasBackchain = Args.hasFlag(options::OPT_mbackchain,
- options::OPT_mno_backchain, false);
+ if (const Arg *A = Args.getLastArg(options::OPT_mtune_EQ)) {
+ CmdArgs.push_back("-tune-cpu");
+ if (strcmp(A->getValue(), "native") == 0)
+ CmdArgs.push_back(Args.MakeArgString(llvm::sys::getHostCPUName()));
+ else
+ CmdArgs.push_back(A->getValue());
+ }
+
+ bool HasBackchain =
+ Args.hasFlag(options::OPT_mbackchain, options::OPT_mno_backchain, false);
bool HasPackedStack = Args.hasFlag(options::OPT_mpacked_stack,
options::OPT_mno_packed_stack, false);
systemz::FloatABI FloatABI =
@@ -2173,15 +2228,21 @@ void Clang::AddX86TargetArgs(const ArgList &Args,
if (Value == "intel" || Value == "att") {
CmdArgs.push_back("-mllvm");
CmdArgs.push_back(Args.MakeArgString("-x86-asm-syntax=" + Value));
+ CmdArgs.push_back(Args.MakeArgString("-inline-asm=" + Value));
} else {
D.Diag(diag::err_drv_unsupported_option_argument)
- << A->getOption().getName() << Value;
+ << A->getSpelling() << Value;
}
} else if (D.IsCLMode()) {
CmdArgs.push_back("-mllvm");
CmdArgs.push_back("-x86-asm-syntax=intel");
}
+ if (Arg *A = Args.getLastArg(options::OPT_mskip_rax_setup,
+ options::OPT_mno_skip_rax_setup))
+ if (A->getOption().matches(options::OPT_mskip_rax_setup))
+ CmdArgs.push_back(Args.MakeArgString("-mskip-rax-setup"));
+
// Set flags to support MCU ABI.
if (Args.hasFlag(options::OPT_miamcu, options::OPT_mno_iamcu, false)) {
CmdArgs.push_back("-mfloat-abi");
@@ -2191,10 +2252,10 @@ void Clang::AddX86TargetArgs(const ArgList &Args,
// Handle -mtune.
- // Default to "generic" unless -march is present or targetting the PS4.
+ // Default to "generic" unless -march is present or targetting the PS4/PS5.
std::string TuneCPU;
if (!Args.hasArg(clang::driver::options::OPT_march_EQ) &&
- !getToolChain().getTriple().isPS4CPU())
+ !getToolChain().getTriple().isPS())
TuneCPU = "generic";
// Override based on -mtune.
@@ -2222,8 +2283,8 @@ void Clang::AddHexagonTargetArgs(const ArgList &Args,
if (auto G = toolchains::HexagonToolChain::getSmallDataThreshold(Args)) {
CmdArgs.push_back("-mllvm");
- CmdArgs.push_back(Args.MakeArgString("-hexagon-small-data-threshold=" +
- Twine(G.getValue())));
+ CmdArgs.push_back(
+ Args.MakeArgString("-hexagon-small-data-threshold=" + Twine(*G)));
}
if (!Args.hasArg(options::OPT_fno_short_enums))
@@ -2253,7 +2314,7 @@ void Clang::AddLanaiTargetArgs(const ArgList &Args,
if (Mregparm != 4) {
getToolChain().getDriver().Diag(
diag::err_drv_unsupported_option_argument)
- << A->getOption().getName() << Value;
+ << A->getSpelling() << Value;
}
}
}
@@ -2263,10 +2324,8 @@ void Clang::AddWebAssemblyTargetArgs(const ArgList &Args,
ArgStringList &CmdArgs) const {
// Default to "hidden" visibility.
if (!Args.hasArg(options::OPT_fvisibility_EQ,
- options::OPT_fvisibility_ms_compat)) {
- CmdArgs.push_back("-fvisibility");
- CmdArgs.push_back("hidden");
- }
+ options::OPT_fvisibility_ms_compat))
+ CmdArgs.push_back("-fvisibility=hidden");
}
void Clang::AddVETargetArgs(const ArgList &Args, ArgStringList &CmdArgs) const {
@@ -2288,7 +2347,8 @@ void Clang::DumpCompilationDatabase(Compilation &C, StringRef Filename,
if (!CompilationDatabase) {
std::error_code EC;
auto File = std::make_unique<llvm::raw_fd_ostream>(
- Filename, EC, llvm::sys::fs::OF_TextWithCRLF);
+ Filename, EC,
+ llvm::sys::fs::OF_TextWithCRLF | llvm::sys::fs::OF_Append);
if (EC) {
D.Diag(clang::diag::err_drv_compilationdatabase) << Filename
<< EC.message();
@@ -2302,7 +2362,8 @@ void Clang::DumpCompilationDatabase(Compilation &C, StringRef Filename,
CWD = ".";
CDB << "{ \"directory\": \"" << escape(*CWD) << "\"";
CDB << ", \"file\": \"" << escape(Input.getFilename()) << "\"";
- CDB << ", \"output\": \"" << escape(Output.getFilename()) << "\"";
+ if (Output.isFilename())
+ CDB << ", \"output\": \"" << escape(Output.getFilename()) << "\"";
CDB << ", \"arguments\": [\"" << escape(D.ClangExecutable) << "\"";
SmallString<128> Buf;
Buf = "-x";
@@ -2314,6 +2375,8 @@ void Clang::DumpCompilationDatabase(Compilation &C, StringRef Filename,
CDB << ", \"" << escape(Buf) << "\"";
}
CDB << ", \"" << escape(Input.getFilename()) << "\"";
+ if (Output.isFilename())
+ CDB << ", \"-o\", \"" << escape(Output.getFilename()) << "\"";
for (auto &A: Args) {
auto &O = A->getOption();
// Skip language selection, which is positional.
@@ -2327,6 +2390,9 @@ void Clang::DumpCompilationDatabase(Compilation &C, StringRef Filename,
// Skip inputs.
if (O.getKind() == Option::InputClass)
continue;
+ // Skip output.
+ if (O.getID() == options::OPT_o)
+ continue;
// All other arguments are quoted and appended.
ArgStringList ASL;
A->render(Args, ASL);
@@ -2400,6 +2466,11 @@ static void CollectArgsForIntegratedAssembler(Compilation &C,
DefaultIncrementalLinkerCompatible))
CmdArgs.push_back("-mincremental-linker-compatible");
+ Args.AddLastArg(CmdArgs, options::OPT_femit_dwarf_unwind_EQ);
+
+ Args.addOptInFlag(CmdArgs, options::OPT_femit_compact_unwind_non_canonical,
+ options::OPT_fno_emit_compact_unwind_non_canonical);
+
// If you add more args here, also add them to the block below that
// starts with "// If CollectArgsForIntegratedAssembler() isn't called below".
@@ -2411,7 +2482,7 @@ static void CollectArgsForIntegratedAssembler(Compilation &C,
bool TakeNextArg = false;
bool UseRelaxRelocations = C.getDefaultToolChain().useRelaxRelocations();
- bool UseNoExecStack = C.getDefaultToolChain().isNoExecStackDefault();
+ bool UseNoExecStack = false;
const char *MipsTargetFeature = nullptr;
StringRef ImplicitIt;
for (const Arg *A :
@@ -2429,7 +2500,7 @@ static void CollectArgsForIntegratedAssembler(Compilation &C,
ImplicitIt = A->getValue();
if (!CheckARMImplicitITArg(ImplicitIt))
D.Diag(diag::err_drv_unsupported_option_argument)
- << A->getOption().getName() << ImplicitIt;
+ << A->getSpelling() << ImplicitIt;
continue;
default:
break;
@@ -2450,11 +2521,18 @@ static void CollectArgsForIntegratedAssembler(Compilation &C,
switch (C.getDefaultToolChain().getArch()) {
default:
break;
+ case llvm::Triple::wasm32:
+ case llvm::Triple::wasm64:
+ if (Value == "--no-type-check") {
+ CmdArgs.push_back("-mno-type-check");
+ continue;
+ }
+ break;
case llvm::Triple::thumb:
case llvm::Triple::thumbeb:
case llvm::Triple::arm:
case llvm::Triple::armeb:
- if (Value.startswith("-mimplicit-it=")) {
+ if (Value.starts_with("-mimplicit-it=")) {
// Only store the value; the last value set takes effect.
ImplicitIt = Value.split("=").second;
if (CheckARMImplicitITArg(ImplicitIt))
@@ -2479,12 +2557,12 @@ static void CollectArgsForIntegratedAssembler(Compilation &C,
CmdArgs.push_back("-use-tcc-in-div");
continue;
}
- if (Value.startswith("-msoft-float")) {
+ if (Value.starts_with("-msoft-float")) {
CmdArgs.push_back("-target-feature");
CmdArgs.push_back("+soft-float");
continue;
}
- if (Value.startswith("-mhard-float")) {
+ if (Value.starts_with("-mhard-float")) {
CmdArgs.push_back("-target-feature");
CmdArgs.push_back("-soft-float");
continue;
@@ -2521,8 +2599,8 @@ static void CollectArgsForIntegratedAssembler(Compilation &C,
CmdArgs.push_back("-massembler-no-warn");
} else if (Value == "--noexecstack") {
UseNoExecStack = true;
- } else if (Value.startswith("-compress-debug-sections") ||
- Value.startswith("--compress-debug-sections") ||
+ } else if (Value.starts_with("-compress-debug-sections") ||
+ Value.starts_with("--compress-debug-sections") ||
Value == "-nocompress-debug-sections" ||
Value == "--nocompress-debug-sections") {
CmdArgs.push_back(Value.data());
@@ -2532,46 +2610,46 @@ static void CollectArgsForIntegratedAssembler(Compilation &C,
} else if (Value == "-mrelax-relocations=no" ||
Value == "--mrelax-relocations=no") {
UseRelaxRelocations = false;
- } else if (Value.startswith("-I")) {
+ } else if (Value.starts_with("-I")) {
CmdArgs.push_back(Value.data());
// We need to consume the next argument if the current arg is a plain
// -I. The next arg will be the include directory.
if (Value == "-I")
TakeNextArg = true;
- } else if (Value.startswith("-gdwarf-")) {
+ } else if (Value.starts_with("-gdwarf-")) {
// "-gdwarf-N" options are not cc1as options.
unsigned DwarfVersion = DwarfVersionNum(Value);
if (DwarfVersion == 0) { // Send it onward, and let cc1as complain.
CmdArgs.push_back(Value.data());
} else {
RenderDebugEnablingArgs(Args, CmdArgs,
- codegenoptions::DebugInfoConstructor,
+ llvm::codegenoptions::DebugInfoConstructor,
DwarfVersion, llvm::DebuggerKind::Default);
}
- } else if (Value.startswith("-mcpu") || Value.startswith("-mfpu") ||
- Value.startswith("-mhwdiv") || Value.startswith("-march")) {
+ } else if (Value.starts_with("-mcpu") || Value.starts_with("-mfpu") ||
+ Value.starts_with("-mhwdiv") || Value.starts_with("-march")) {
// Do nothing, we'll validate it later.
} else if (Value == "-defsym") {
- if (A->getNumValues() != 2) {
- D.Diag(diag::err_drv_defsym_invalid_format) << Value;
- break;
- }
- const char *S = A->getValue(1);
- auto Pair = StringRef(S).split('=');
- auto Sym = Pair.first;
- auto SVal = Pair.second;
-
- if (Sym.empty() || SVal.empty()) {
- D.Diag(diag::err_drv_defsym_invalid_format) << S;
- break;
- }
- int64_t IVal;
- if (SVal.getAsInteger(0, IVal)) {
- D.Diag(diag::err_drv_defsym_invalid_symval) << SVal;
- break;
- }
- CmdArgs.push_back(Value.data());
- TakeNextArg = true;
+ if (A->getNumValues() != 2) {
+ D.Diag(diag::err_drv_defsym_invalid_format) << Value;
+ break;
+ }
+ const char *S = A->getValue(1);
+ auto Pair = StringRef(S).split('=');
+ auto Sym = Pair.first;
+ auto SVal = Pair.second;
+
+ if (Sym.empty() || SVal.empty()) {
+ D.Diag(diag::err_drv_defsym_invalid_format) << S;
+ break;
+ }
+ int64_t IVal;
+ if (SVal.getAsInteger(0, IVal)) {
+ D.Diag(diag::err_drv_defsym_invalid_symval) << SVal;
+ break;
+ }
+ CmdArgs.push_back(Value.data());
+ TakeNextArg = true;
} else if (Value == "-fdebug-compilation-dir") {
CmdArgs.push_back("-fdebug-compilation-dir");
TakeNextArg = true;
@@ -2585,14 +2663,14 @@ static void CollectArgsForIntegratedAssembler(Compilation &C,
D.PrintVersion(C, llvm::outs());
} else {
D.Diag(diag::err_drv_unsupported_option_argument)
- << A->getOption().getName() << Value;
+ << A->getSpelling() << Value;
}
}
}
if (ImplicitIt.size())
AddARMImplicitITArgs(Args, CmdArgs, ImplicitIt);
- if (UseRelaxRelocations)
- CmdArgs.push_back("--mrelax-relocations");
+ if (!UseRelaxRelocations)
+ CmdArgs.push_back("-mrelax-relocations=no");
if (UseNoExecStack)
CmdArgs.push_back("-mnoexecstack");
if (MipsTargetFeature != nullptr) {
@@ -2604,6 +2682,53 @@ static void CollectArgsForIntegratedAssembler(Compilation &C,
if (C.getDriver().embedBitcodeEnabled() ||
C.getDriver().embedBitcodeMarkerOnly())
Args.AddLastArg(CmdArgs, options::OPT_fembed_bitcode_EQ);
+
+ if (const char *AsSecureLogFile = getenv("AS_SECURE_LOG_FILE")) {
+ CmdArgs.push_back("-as-secure-log-file");
+ CmdArgs.push_back(Args.MakeArgString(AsSecureLogFile));
+ }
+}
+
+static StringRef EnumComplexRangeToStr(LangOptions::ComplexRangeKind Range) {
+ StringRef RangeStr = "";
+ switch (Range) {
+ case LangOptions::ComplexRangeKind::CX_Limited:
+ return "-fcx-limited-range";
+ break;
+ case LangOptions::ComplexRangeKind::CX_Fortran:
+ return "-fcx-fortran-rules";
+ break;
+ default:
+ return RangeStr;
+ break;
+ }
+}
+
+static void EmitComplexRangeDiag(const Driver &D,
+ LangOptions::ComplexRangeKind Range1,
+ LangOptions::ComplexRangeKind Range2) {
+ if (Range1 != LangOptions::ComplexRangeKind::CX_Full)
+ D.Diag(clang::diag::warn_drv_overriding_option)
+ << EnumComplexRangeToStr(Range1) << EnumComplexRangeToStr(Range2);
+}
+
+static std::string
+RenderComplexRangeOption(LangOptions::ComplexRangeKind Range) {
+ std::string ComplexRangeStr = "-complex-range=";
+ switch (Range) {
+ case LangOptions::ComplexRangeKind::CX_Full:
+ ComplexRangeStr += "full";
+ break;
+ case LangOptions::ComplexRangeKind::CX_Limited:
+ ComplexRangeStr += "limited";
+ break;
+ case LangOptions::ComplexRangeKind::CX_Fortran:
+ ComplexRangeStr += "fortran";
+ break;
+ default:
+ assert(0 && "Unexpected range option");
+ }
+ return ComplexRangeStr;
}
static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
@@ -2617,6 +2742,7 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
// LLVM flags based on the final state.
bool HonorINFs = true;
bool HonorNaNs = true;
+ bool ApproxFunc = false;
// -fmath-errno is the default on some platforms, e.g. BSD-derived OSes.
bool MathErrno = TC.IsMathErrnoDefault();
bool AssociativeMath = false;
@@ -2631,6 +2757,8 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
StringRef FPModel = "";
// -ffp-exception-behavior options: strict, maytrap, ignore
StringRef FPExceptionBehavior = "";
+ // -ffp-eval-method options: double, extended, source
+ StringRef FPEvalMethod = "";
const llvm::DenormalMode DefaultDenormalFPMath =
TC.getDefaultDenormalModeForType(Args, JA);
const llvm::DenormalMode DefaultDenormalFP32Math =
@@ -2638,9 +2766,19 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
llvm::DenormalMode DenormalFPMath = DefaultDenormalFPMath;
llvm::DenormalMode DenormalFP32Math = DefaultDenormalFP32Math;
- StringRef FPContract = "";
+ // CUDA and HIP don't rely on the frontend to pass an ffp-contract option.
+ // If one wasn't given by the user, don't pass it here.
+ StringRef FPContract;
+ StringRef LastSeenFfpContractOption;
+ bool SeenUnsafeMathModeOption = false;
+ if (!JA.isDeviceOffloading(Action::OFK_Cuda) &&
+ !JA.isOffloading(Action::OFK_HIP))
+ FPContract = "on";
bool StrictFPModel = false;
-
+ StringRef Float16ExcessPrecision = "";
+ StringRef BFloat16ExcessPrecision = "";
+ LangOptions::ComplexRangeKind Range = LangOptions::ComplexRangeKind::CX_None;
+ std::string ComplexRangeStr = "";
if (const Arg *A = Args.getLastArg(options::OPT_flimited_precision_EQ)) {
CmdArgs.push_back("-mlimit-float-precision");
@@ -2653,17 +2791,36 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
switch (optID) {
default:
break;
+ case options::OPT_fcx_limited_range: {
+ EmitComplexRangeDiag(D, Range, LangOptions::ComplexRangeKind::CX_Limited);
+ Range = LangOptions::ComplexRangeKind::CX_Limited;
+ break;
+ }
+ case options::OPT_fno_cx_limited_range:
+ EmitComplexRangeDiag(D, Range, LangOptions::ComplexRangeKind::CX_Full);
+ Range = LangOptions::ComplexRangeKind::CX_Full;
+ break;
+ case options::OPT_fcx_fortran_rules: {
+ EmitComplexRangeDiag(D, Range, LangOptions::ComplexRangeKind::CX_Fortran);
+ Range = LangOptions::ComplexRangeKind::CX_Fortran;
+ break;
+ }
+ case options::OPT_fno_cx_fortran_rules:
+ EmitComplexRangeDiag(D, Range, LangOptions::ComplexRangeKind::CX_Full);
+ Range = LangOptions::ComplexRangeKind::CX_Full;
+ break;
case options::OPT_ffp_model_EQ: {
// If -ffp-model= is seen, reset to fno-fast-math
HonorINFs = true;
HonorNaNs = true;
+ ApproxFunc = false;
// Turning *off* -ffast-math restores the toolchain default.
MathErrno = TC.IsMathErrnoDefault();
AssociativeMath = false;
ReciprocalMath = false;
SignedZeros = true;
// -fno_fast_math restores default denormal and fpcontract handling
- FPContract = "";
+ FPContract = "on";
DenormalFPMath = llvm::DenormalMode::getIEEE();
// FIXME: The target may have picked a non-IEEE default mode here based on
@@ -2673,9 +2830,8 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
StringRef Val = A->getValue();
if (OFastEnabled && !Val.equals("fast")) {
// Only -ffp-model=fast is compatible with OFast, ignore.
- D.Diag(clang::diag::warn_drv_overriding_flag_option)
- << Args.MakeArgString("-ffp-model=" + Val)
- << "-Ofast";
+ D.Diag(clang::diag::warn_drv_overriding_option)
+ << Args.MakeArgString("-ffp-model=" + Val) << "-Ofast";
break;
}
StrictFPModel = false;
@@ -2683,12 +2839,10 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
// ffp-model= is a Driver option, it is entirely rewritten into more
// granular options before being passed into cc1.
// Use the gcc option in the switch below.
- if (!FPModel.empty() && !FPModel.equals(Val)) {
- D.Diag(clang::diag::warn_drv_overriding_flag_option)
- << Args.MakeArgString("-ffp-model=" + FPModel)
- << Args.MakeArgString("-ffp-model=" + Val);
- FPContract = "";
- }
+ if (!FPModel.empty() && !FPModel.equals(Val))
+ D.Diag(clang::diag::warn_drv_overriding_option)
+ << Args.MakeArgString("-ffp-model=" + FPModel)
+ << Args.MakeArgString("-ffp-model=" + Val);
if (Val.equals("fast")) {
optID = options::OPT_ffast_math;
FPModel = Val;
@@ -2696,7 +2850,7 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
} else if (Val.equals("precise")) {
optID = options::OPT_ffp_contract;
FPModel = Val;
- FPContract = "fast";
+ FPContract = "on";
PreciseFPModel = true;
} else if (Val.equals("strict")) {
StrictFPModel = true;
@@ -2707,9 +2861,9 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
TrappingMath = true;
} else
D.Diag(diag::err_drv_unsupported_option_argument)
- << A->getOption().getName() << Val;
+ << A->getSpelling() << Val;
break;
- }
+ }
}
switch (optID) {
@@ -2721,6 +2875,8 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
case options::OPT_fno_honor_infinities: HonorINFs = false; break;
case options::OPT_fhonor_nans: HonorNaNs = true; break;
case options::OPT_fno_honor_nans: HonorNaNs = false; break;
+ case options::OPT_fapprox_func: ApproxFunc = true; break;
+ case options::OPT_fno_approx_func: ApproxFunc = false; break;
case options::OPT_fmath_errno: MathErrno = true; break;
case options::OPT_fno_math_errno: MathErrno = false; break;
case options::OPT_fassociative_math: AssociativeMath = true; break;
@@ -2733,9 +2889,10 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
if (!TrappingMathPresent && !FPExceptionBehavior.empty() &&
!FPExceptionBehavior.equals("strict"))
// Warn that previous value of option is overridden.
- D.Diag(clang::diag::warn_drv_overriding_flag_option)
- << Args.MakeArgString("-ffp-exception-behavior=" + FPExceptionBehavior)
- << "-ftrapping-math";
+ D.Diag(clang::diag::warn_drv_overriding_option)
+ << Args.MakeArgString("-ffp-exception-behavior=" +
+ FPExceptionBehavior)
+ << "-ftrapping-math";
TrappingMath = true;
TrappingMathPresent = true;
FPExceptionBehavior = "strict";
@@ -2744,9 +2901,10 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
if (!TrappingMathPresent && !FPExceptionBehavior.empty() &&
!FPExceptionBehavior.equals("ignore"))
// Warn that previous value of option is overridden.
- D.Diag(clang::diag::warn_drv_overriding_flag_option)
- << Args.MakeArgString("-ffp-exception-behavior=" + FPExceptionBehavior)
- << "-fno-trapping-math";
+ D.Diag(clang::diag::warn_drv_overriding_option)
+ << Args.MakeArgString("-ffp-exception-behavior=" +
+ FPExceptionBehavior)
+ << "-fno-trapping-math";
TrappingMath = false;
TrappingMathPresent = true;
FPExceptionBehavior = "ignore";
@@ -2764,6 +2922,7 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
case options::OPT_fdenormal_fp_math_EQ:
DenormalFPMath = llvm::parseDenormalFPAttribute(A->getValue());
+ DenormalFP32Math = DenormalFPMath;
if (!DenormalFPMath.isValid()) {
D.Diag(diag::err_drv_invalid_value)
<< A->getAsString(Args) << A->getValue();
@@ -2782,15 +2941,17 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
case options::OPT_ffp_contract: {
StringRef Val = A->getValue();
if (PreciseFPModel) {
- // -ffp-model=precise enables ffp-contract=fast as a side effect
- // the FPContract value has already been set to a string literal
- // and the Val string isn't a pertinent value.
+ // -ffp-model=precise enables ffp-contract=on.
+ // -ffp-model=precise sets PreciseFPModel to on and Val to
+ // "precise". FPContract is set.
;
- } else if (Val.equals("fast") || Val.equals("on") || Val.equals("off"))
+ } else if (Val.equals("fast") || Val.equals("on") || Val.equals("off") ||
+ Val.equals("fast-honor-pragmas")) {
FPContract = Val;
- else
+ LastSeenFfpContractOption = Val;
+ } else
D.Diag(diag::err_drv_unsupported_option_argument)
- << A->getOption().getName() << Val;
+ << A->getSpelling() << Val;
break;
}
@@ -2807,9 +2968,10 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
if (!TrappingMathPresent && !FPExceptionBehavior.empty() &&
!FPExceptionBehavior.equals(Val))
// Warn that previous value of option is overridden.
- D.Diag(clang::diag::warn_drv_overriding_flag_option)
- << Args.MakeArgString("-ffp-exception-behavior=" + FPExceptionBehavior)
- << Args.MakeArgString("-ffp-exception-behavior=" + Val);
+ D.Diag(clang::diag::warn_drv_overriding_option)
+ << Args.MakeArgString("-ffp-exception-behavior=" +
+ FPExceptionBehavior)
+ << Args.MakeArgString("-ffp-exception-behavior=" + Val);
TrappingMath = TrappingMathPresent = false;
if (Val.equals("ignore") || Val.equals("maytrap"))
FPExceptionBehavior = Val;
@@ -2818,10 +2980,44 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
TrappingMath = TrappingMathPresent = true;
} else
D.Diag(diag::err_drv_unsupported_option_argument)
- << A->getOption().getName() << Val;
+ << A->getSpelling() << Val;
+ break;
+ }
+
+ // Validate and pass through -ffp-eval-method option.
+ case options::OPT_ffp_eval_method_EQ: {
+ StringRef Val = A->getValue();
+ if (Val.equals("double") || Val.equals("extended") ||
+ Val.equals("source"))
+ FPEvalMethod = Val;
+ else
+ D.Diag(diag::err_drv_unsupported_option_argument)
+ << A->getSpelling() << Val;
break;
}
+ case options::OPT_fexcess_precision_EQ: {
+ StringRef Val = A->getValue();
+ const llvm::Triple::ArchType Arch = TC.getArch();
+ if (Arch == llvm::Triple::x86 || Arch == llvm::Triple::x86_64) {
+ if (Val.equals("standard") || Val.equals("fast"))
+ Float16ExcessPrecision = Val;
+ // To make it GCC compatible, allow the value of "16" which
+ // means disable excess precision, the same meaning than clang's
+ // equivalent value "none".
+ else if (Val.equals("16"))
+ Float16ExcessPrecision = "none";
+ else
+ D.Diag(diag::err_drv_unsupported_option_argument)
+ << A->getSpelling() << Val;
+ } else {
+ if (!(Val.equals("standard") || Val.equals("fast")))
+ D.Diag(diag::err_drv_unsupported_option_argument)
+ << A->getSpelling() << Val;
+ }
+ BFloat16ExcessPrecision = Float16ExcessPrecision;
+ break;
+ }
case options::OPT_ffinite_math_only:
HonorINFs = false;
HonorNaNs = false;
@@ -2835,38 +3031,56 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
AssociativeMath = true;
ReciprocalMath = true;
SignedZeros = false;
+ ApproxFunc = true;
TrappingMath = false;
FPExceptionBehavior = "";
+ FPContract = "fast";
+ SeenUnsafeMathModeOption = true;
break;
case options::OPT_fno_unsafe_math_optimizations:
AssociativeMath = false;
ReciprocalMath = false;
SignedZeros = true;
+ ApproxFunc = false;
TrappingMath = true;
FPExceptionBehavior = "strict";
// The target may have opted to flush by default, so force IEEE.
DenormalFPMath = llvm::DenormalMode::getIEEE();
DenormalFP32Math = llvm::DenormalMode::getIEEE();
+ if (!JA.isDeviceOffloading(Action::OFK_Cuda) &&
+ !JA.isOffloading(Action::OFK_HIP)) {
+ if (LastSeenFfpContractOption != "") {
+ FPContract = LastSeenFfpContractOption;
+ } else if (SeenUnsafeMathModeOption)
+ FPContract = "on";
+ }
break;
case options::OPT_Ofast:
// If -Ofast is the optimization level, then -ffast-math should be enabled
if (!OFastEnabled)
continue;
- LLVM_FALLTHROUGH;
- case options::OPT_ffast_math:
+ [[fallthrough]];
+ case options::OPT_ffast_math: {
HonorINFs = false;
HonorNaNs = false;
MathErrno = false;
AssociativeMath = true;
ReciprocalMath = true;
+ ApproxFunc = true;
SignedZeros = false;
TrappingMath = false;
RoundingFPMath = false;
+ FPExceptionBehavior = "";
// If fast-math is set then set the fp-contract mode to fast.
FPContract = "fast";
+ SeenUnsafeMathModeOption = true;
+ // ffast-math enables fortran rules for complex multiplication and
+ // division.
+ Range = LangOptions::ComplexRangeKind::CX_Limited;
break;
+ }
case options::OPT_fno_fast_math:
HonorINFs = true;
HonorNaNs = true;
@@ -2876,33 +3090,39 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
MathErrno = TC.IsMathErrnoDefault();
AssociativeMath = false;
ReciprocalMath = false;
+ ApproxFunc = false;
SignedZeros = true;
- TrappingMath = false;
- RoundingFPMath = false;
// -fno_fast_math restores default denormal and fpcontract handling
DenormalFPMath = DefaultDenormalFPMath;
DenormalFP32Math = llvm::DenormalMode::getIEEE();
- FPContract = "";
+ if (!JA.isDeviceOffloading(Action::OFK_Cuda) &&
+ !JA.isOffloading(Action::OFK_HIP)) {
+ if (LastSeenFfpContractOption != "") {
+ FPContract = LastSeenFfpContractOption;
+ } else if (SeenUnsafeMathModeOption)
+ FPContract = "on";
+ }
break;
}
if (StrictFPModel) {
// If -ffp-model=strict has been specified on command line but
// subsequent options conflict then emit warning diagnostic.
- if (HonorINFs && HonorNaNs &&
- !AssociativeMath && !ReciprocalMath &&
- SignedZeros && TrappingMath && RoundingFPMath &&
- (FPContract.equals("off") || FPContract.empty()) &&
- DenormalFPMath == llvm::DenormalMode::getIEEE() &&
- DenormalFP32Math == llvm::DenormalMode::getIEEE())
+ if (HonorINFs && HonorNaNs && !AssociativeMath && !ReciprocalMath &&
+ SignedZeros && TrappingMath && RoundingFPMath && !ApproxFunc &&
+ DenormalFPMath == llvm::DenormalMode::getIEEE() &&
+ DenormalFP32Math == llvm::DenormalMode::getIEEE() &&
+ FPContract.equals("off"))
// OK: Current Arg doesn't conflict with -ffp-model=strict
;
else {
StrictFPModel = false;
FPModel = "";
- D.Diag(clang::diag::warn_drv_overriding_flag_option)
- << "-ffp-model=strict" <<
- ((A->getNumValues() == 0) ? A->getSpelling()
- : Args.MakeArgString(A->getSpelling() + A->getValue()));
+ auto RHS = (A->getNumValues() == 0)
+ ? A->getSpelling()
+ : Args.MakeArgString(A->getSpelling() + A->getValue());
+ if (RHS != "-ffp-model=strict")
+ D.Diag(clang::diag::warn_drv_overriding_option)
+ << "-ffp-model=strict" << RHS;
}
}
@@ -2916,12 +3136,15 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
if (!HonorNaNs)
CmdArgs.push_back("-menable-no-nans");
+ if (ApproxFunc)
+ CmdArgs.push_back("-fapprox-func");
+
if (MathErrno)
CmdArgs.push_back("-fmath-errno");
- if (!MathErrno && AssociativeMath && ReciprocalMath && !SignedZeros &&
- !TrappingMath)
- CmdArgs.push_back("-menable-unsafe-fp-math");
+ if (AssociativeMath && ReciprocalMath && !SignedZeros && ApproxFunc &&
+ !TrappingMath)
+ CmdArgs.push_back("-funsafe-math-optimizations");
if (!SignedZeros)
CmdArgs.push_back("-fno-signed-zeros");
@@ -2966,12 +3189,22 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
CmdArgs.push_back(Args.MakeArgString("-ffp-exception-behavior=" +
FPExceptionBehavior));
+ if (!FPEvalMethod.empty())
+ CmdArgs.push_back(Args.MakeArgString("-ffp-eval-method=" + FPEvalMethod));
+
+ if (!Float16ExcessPrecision.empty())
+ CmdArgs.push_back(Args.MakeArgString("-ffloat16-excess-precision=" +
+ Float16ExcessPrecision));
+ if (!BFloat16ExcessPrecision.empty())
+ CmdArgs.push_back(Args.MakeArgString("-fbfloat16-excess-precision=" +
+ BFloat16ExcessPrecision));
+
ParseMRecip(D, Args, CmdArgs);
// -ffast-math enables the __FAST_MATH__ preprocessor macro, but check for the
// individual features enabled by -ffast-math instead of the option itself as
// that's consistent with gcc's behaviour.
- if (!HonorINFs && !HonorNaNs && !MathErrno && AssociativeMath &&
+ if (!HonorINFs && !HonorNaNs && !MathErrno && AssociativeMath && ApproxFunc &&
ReciprocalMath && !SignedZeros && !TrappingMath && !RoundingFPMath) {
CmdArgs.push_back("-ffast-math");
if (FPModel.equals("fast")) {
@@ -2982,9 +3215,9 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
// Enable -ffp-contract=fast
CmdArgs.push_back(Args.MakeArgString("-ffp-contract=fast"));
else
- D.Diag(clang::diag::warn_drv_overriding_flag_option)
- << "-ffp-model=fast"
- << Args.MakeArgString("-ffp-contract=" + FPContract);
+ D.Diag(clang::diag::warn_drv_overriding_option)
+ << "-ffp-model=fast"
+ << Args.MakeArgString("-ffp-contract=" + FPContract);
}
}
@@ -3001,17 +3234,24 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
if (Args.hasFlag(options::OPT_fno_strict_float_cast_overflow,
options::OPT_fstrict_float_cast_overflow, false))
CmdArgs.push_back("-fno-strict-float-cast-overflow");
+
+ if (Range != LangOptions::ComplexRangeKind::CX_None)
+ ComplexRangeStr = RenderComplexRangeOption(Range);
+ if (!ComplexRangeStr.empty())
+ CmdArgs.push_back(Args.MakeArgString(ComplexRangeStr));
+ if (Args.hasArg(options::OPT_fcx_limited_range))
+ CmdArgs.push_back("-fcx-limited-range");
+ if (Args.hasArg(options::OPT_fcx_fortran_rules))
+ CmdArgs.push_back("-fcx-fortran-rules");
+ if (Args.hasArg(options::OPT_fno_cx_limited_range))
+ CmdArgs.push_back("-fno-cx-limited-range");
+ if (Args.hasArg(options::OPT_fno_cx_fortran_rules))
+ CmdArgs.push_back("-fno-cx-fortran-rules");
}
static void RenderAnalyzerOptions(const ArgList &Args, ArgStringList &CmdArgs,
const llvm::Triple &Triple,
const InputInfo &Input) {
- // Enable region store model by default.
- CmdArgs.push_back("-analyzer-store=region");
-
- // Treat blocks as analysis entry points.
- CmdArgs.push_back("-analyzer-opt-analyze-nested-blocks");
-
// Add default argument set.
if (!Args.hasArg(options::OPT__analyzer_no_default_checks)) {
CmdArgs.push_back("-analyzer-checker=core");
@@ -3029,8 +3269,8 @@ static void RenderAnalyzerOptions(const ArgList &Args, ArgStringList &CmdArgs,
CmdArgs.push_back("-analyzer-checker=unix.cstring.NullArg");
}
- // Disable some unix checkers for PS4.
- if (Triple.isPS4CPU()) {
+ // Disable some unix checkers for PS4/PS5.
+ if (Triple.isPS()) {
CmdArgs.push_back("-analyzer-disable-checker=unix.API");
CmdArgs.push_back("-analyzer-disable-checker=unix.Vfork");
}
@@ -3048,7 +3288,7 @@ static void RenderAnalyzerOptions(const ArgList &Args, ArgStringList &CmdArgs,
if (types::isCXX(Input.getType()))
CmdArgs.push_back("-analyzer-checker=cplusplus");
- if (!Triple.isPS4CPU()) {
+ if (!Triple.isPS()) {
CmdArgs.push_back("-analyzer-checker=security.insecureAPI.UncheckedReturn");
CmdArgs.push_back("-analyzer-checker=security.insecureAPI.getpw");
CmdArgs.push_back("-analyzer-checker=security.insecureAPI.gets");
@@ -3078,6 +3318,16 @@ static void RenderAnalyzerOptions(const ArgList &Args, ArgStringList &CmdArgs,
Args.AddAllArgValues(CmdArgs, options::OPT_Xanalyzer);
}
+static bool isValidSymbolName(StringRef S) {
+ if (S.empty())
+ return false;
+
+ if (std::isdigit(S[0]))
+ return false;
+
+ return llvm::all_of(S, [](char C) { return std::isalnum(C) || C == '_'; });
+}
+
static void RenderSSPOptions(const Driver &D, const ToolChain &TC,
const ArgList &Args, ArgStringList &CmdArgs,
bool KernelOrKext) {
@@ -3104,6 +3354,12 @@ static void RenderSSPOptions(const Driver &D, const ToolChain &TC,
StackProtectorLevel = LangOptions::SSPStrong;
else if (A->getOption().matches(options::OPT_fstack_protector_all))
StackProtectorLevel = LangOptions::SSPReq;
+
+ if (EffectiveTriple.isBPF() && StackProtectorLevel != LangOptions::SSPOff) {
+ D.Diag(diag::warn_drv_unsupported_option_for_target)
+ << A->getSpelling() << EffectiveTriple.getTriple();
+ StackProtectorLevel = DefaultStackProtectorLevel;
+ }
} else {
StackProtectorLevel = DefaultStackProtectorLevel;
}
@@ -3116,7 +3372,7 @@ static void RenderSSPOptions(const Driver &D, const ToolChain &TC,
// --param ssp-buffer-size=
for (const Arg *A : Args.filtered(options::OPT__param)) {
StringRef Str(A->getValue());
- if (Str.startswith("ssp-buffer-size=")) {
+ if (Str.starts_with("ssp-buffer-size=")) {
if (StackProtectorLevel) {
CmdArgs.push_back("-stack-protector-buffer-size");
// FIXME: Verify the argument is a valid integer.
@@ -3129,14 +3385,42 @@ static void RenderSSPOptions(const Driver &D, const ToolChain &TC,
const std::string &TripleStr = EffectiveTriple.getTriple();
if (Arg *A = Args.getLastArg(options::OPT_mstack_protector_guard_EQ)) {
StringRef Value = A->getValue();
- if (!EffectiveTriple.isX86() && !EffectiveTriple.isAArch64())
+ if (!EffectiveTriple.isX86() && !EffectiveTriple.isAArch64() &&
+ !EffectiveTriple.isARM() && !EffectiveTriple.isThumb())
D.Diag(diag::err_drv_unsupported_opt_for_target)
<< A->getAsString(Args) << TripleStr;
- if (EffectiveTriple.isX86() && Value != "tls" && Value != "global") {
+ if ((EffectiveTriple.isX86() || EffectiveTriple.isARM() ||
+ EffectiveTriple.isThumb()) &&
+ Value != "tls" && Value != "global") {
D.Diag(diag::err_drv_invalid_value_with_suggestion)
<< A->getOption().getName() << Value << "tls global";
return;
}
+ if ((EffectiveTriple.isARM() || EffectiveTriple.isThumb()) &&
+ Value == "tls") {
+ if (!Args.hasArg(options::OPT_mstack_protector_guard_offset_EQ)) {
+ D.Diag(diag::err_drv_ssp_missing_offset_argument)
+ << A->getAsString(Args);
+ return;
+ }
+ // Check whether the target subarch supports the hardware TLS register
+ if (!arm::isHardTPSupported(EffectiveTriple)) {
+ D.Diag(diag::err_target_unsupported_tp_hard)
+ << EffectiveTriple.getArchName();
+ return;
+ }
+ // Check whether the user asked for something other than -mtp=cp15
+ if (Arg *A = Args.getLastArg(options::OPT_mtp_mode_EQ)) {
+ StringRef Value = A->getValue();
+ if (Value != "cp15") {
+ D.Diag(diag::err_drv_argument_not_allowed_with)
+ << A->getAsString(Args) << "-mstack-protector-guard=tls";
+ return;
+ }
+ }
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back("+read-tp-tpidruro");
+ }
if (EffectiveTriple.isAArch64() && Value != "sysreg" && Value != "global") {
D.Diag(diag::err_drv_invalid_value_with_suggestion)
<< A->getOption().getName() << Value << "sysreg global";
@@ -3147,7 +3431,8 @@ static void RenderSSPOptions(const Driver &D, const ToolChain &TC,
if (Arg *A = Args.getLastArg(options::OPT_mstack_protector_guard_offset_EQ)) {
StringRef Value = A->getValue();
- if (!EffectiveTriple.isX86() && !EffectiveTriple.isAArch64())
+ if (!EffectiveTriple.isX86() && !EffectiveTriple.isAArch64() &&
+ !EffectiveTriple.isARM() && !EffectiveTriple.isThumb())
D.Diag(diag::err_drv_unsupported_opt_for_target)
<< A->getAsString(Args) << TripleStr;
int Offset;
@@ -3155,6 +3440,12 @@ static void RenderSSPOptions(const Driver &D, const ToolChain &TC,
D.Diag(diag::err_drv_invalid_value) << A->getOption().getName() << Value;
return;
}
+ if ((EffectiveTriple.isARM() || EffectiveTriple.isThumb()) &&
+ (Offset < 0 || Offset > 0xfffff)) {
+ D.Diag(diag::err_drv_invalid_int_value)
+ << A->getOption().getName() << Value;
+ return;
+ }
A->render(Args, CmdArgs);
}
@@ -3174,6 +3465,16 @@ static void RenderSSPOptions(const Driver &D, const ToolChain &TC,
}
A->render(Args, CmdArgs);
}
+
+ if (Arg *A = Args.getLastArg(options::OPT_mstack_protector_guard_symbol_EQ)) {
+ StringRef Value = A->getValue();
+ if (!isValidSymbolName(Value)) {
+ D.Diag(diag::err_drv_argument_only_allowed_with)
+ << A->getOption().getName() << "legal symbol name";
+ return;
+ }
+ A->render(Args, CmdArgs);
+ }
}
static void RenderSCPOptions(const ToolChain &TC, const ArgList &Args,
@@ -3184,12 +3485,11 @@ static void RenderSCPOptions(const ToolChain &TC, const ArgList &Args,
return;
if (!EffectiveTriple.isX86() && !EffectiveTriple.isSystemZ() &&
- !EffectiveTriple.isPPC64())
+ !EffectiveTriple.isPPC64() && !EffectiveTriple.isAArch64())
return;
- if (Args.hasFlag(options::OPT_fstack_clash_protection,
- options::OPT_fno_stack_clash_protection, false))
- CmdArgs.push_back("-fstack-clash-protection");
+ Args.addOptInFlag(CmdArgs, options::OPT_fstack_clash_protection,
+ options::OPT_fno_stack_clash_protection);
}
static void RenderTrivialAutoVarInitOptions(const Driver &D,
@@ -3210,7 +3510,7 @@ static void RenderTrivialAutoVarInitOptions(const Driver &D,
TrivialAutoVarInit = Val;
else
D.Diag(diag::err_drv_unsupported_option_argument)
- << A->getOption().getName() << Val;
+ << A->getSpelling() << Val;
break;
}
}
@@ -3229,8 +3529,6 @@ static void RenderTrivialAutoVarInitOptions(const Driver &D,
}
if (!TrivialAutoVarInit.empty()) {
- if (TrivialAutoVarInit == "zero" && !Args.hasArg(options::OPT_enable_trivial_var_init_zero))
- D.Diag(diag::err_drv_trivial_auto_var_init_zero_disabled);
CmdArgs.push_back(
Args.MakeArgString("-ftrivial-auto-var-init=" + TrivialAutoVarInit));
}
@@ -3249,6 +3547,20 @@ static void RenderTrivialAutoVarInitOptions(const Driver &D,
CmdArgs.push_back(
Args.MakeArgString("-ftrivial-auto-var-init-stop-after=" + Val));
}
+
+ if (Arg *A = Args.getLastArg(options::OPT_ftrivial_auto_var_init_max_size)) {
+ if (!Args.hasArg(options::OPT_ftrivial_auto_var_init) ||
+ StringRef(
+ Args.getLastArg(options::OPT_ftrivial_auto_var_init)->getValue()) ==
+ "uninitialized")
+ D.Diag(diag::err_drv_trivial_auto_var_init_max_size_missing_dependency);
+ A->claim();
+ StringRef Val = A->getValue();
+ if (std::stoi(Val.str()) <= 0)
+ D.Diag(diag::err_drv_trivial_auto_var_init_max_size_invalid_value);
+ CmdArgs.push_back(
+ Args.MakeArgString("-ftrivial-auto-var-init-max-size=" + Val));
+ }
}
static void RenderOpenCLOptions(const ArgList &Args, ArgStringList &CmdArgs,
@@ -3272,6 +3584,9 @@ static void RenderOpenCLOptions(const ArgList &Args, ArgStringList &CmdArgs,
if (Arg *A = Args.getLastArg(options::OPT_cl_std_EQ)) {
std::string CLStdStr = std::string("-cl-std=") + A->getValue();
CmdArgs.push_back(Args.MakeArgString(CLStdStr));
+ } else if (Arg *A = Args.getLastArg(options::OPT_cl_ext_EQ)) {
+ std::string CLExtStr = std::string("-cl-ext=") + A->getValue();
+ CmdArgs.push_back(Args.MakeArgString(CLExtStr));
}
for (const auto &Arg : ForwardedArguments)
@@ -3287,6 +3602,46 @@ static void RenderOpenCLOptions(const ArgList &Args, ArgStringList &CmdArgs,
}
}
+static void RenderHLSLOptions(const ArgList &Args, ArgStringList &CmdArgs,
+ types::ID InputType) {
+ const unsigned ForwardedArguments[] = {options::OPT_dxil_validator_version,
+ options::OPT_D,
+ options::OPT_I,
+ options::OPT_S,
+ options::OPT_O,
+ options::OPT_emit_llvm,
+ options::OPT_emit_obj,
+ options::OPT_disable_llvm_passes,
+ options::OPT_fnative_half_type,
+ options::OPT_hlsl_entrypoint};
+ if (!types::isHLSL(InputType))
+ return;
+ for (const auto &Arg : ForwardedArguments)
+ if (const auto *A = Args.getLastArg(Arg))
+ A->renderAsInput(Args, CmdArgs);
+ // Add the default headers if dxc_no_stdinc is not set.
+ if (!Args.hasArg(options::OPT_dxc_no_stdinc) &&
+ !Args.hasArg(options::OPT_nostdinc))
+ CmdArgs.push_back("-finclude-default-header");
+}
+
+static void RenderOpenACCOptions(const Driver &D, const ArgList &Args,
+ ArgStringList &CmdArgs, types::ID InputType) {
+ if (!Args.hasArg(options::OPT_fopenacc))
+ return;
+
+ CmdArgs.push_back("-fopenacc");
+
+ if (Arg *A = Args.getLastArg(options::OPT_openacc_macro_override)) {
+ StringRef Value = A->getValue();
+ int Version;
+ if (!Value.getAsInteger(10, Version))
+ A->renderAsInput(Args, CmdArgs);
+ else
+ D.Diag(diag::err_drv_clang_unsupported) << Value;
+ }
+}
+
static void RenderARCMigrateToolOptions(const Driver &D, const ArgList &Args,
ArgStringList &CmdArgs) {
bool ARCMTEnabled = false;
@@ -3355,7 +3710,7 @@ static void RenderARCMigrateToolOptions(const Driver &D, const ArgList &Args,
Args.AddLastArg(CmdArgs, options::OPT_objcmt_returns_innerpointer_property);
Args.AddLastArg(CmdArgs, options::OPT_objcmt_ns_nonatomic_iosonly);
Args.AddLastArg(CmdArgs, options::OPT_objcmt_migrate_designated_init);
- Args.AddLastArg(CmdArgs, options::OPT_objcmt_whitelist_dir_path);
+ Args.AddLastArg(CmdArgs, options::OPT_objcmt_allowlist_dir_path);
}
}
@@ -3373,20 +3728,13 @@ static void RenderBuiltinOptions(const ToolChain &TC, const llvm::Triple &T,
UseBuiltins = false;
// Process the -fno-builtin-* options.
- for (const auto &Arg : Args) {
- const Option &O = Arg->getOption();
- if (!O.matches(options::OPT_fno_builtin_))
- continue;
-
- Arg->claim();
+ for (const Arg *A : Args.filtered(options::OPT_fno_builtin_)) {
+ A->claim();
// If -fno-builtin is specified, then there's no need to pass the option to
// the frontend.
- if (!UseBuiltins)
- continue;
-
- StringRef FuncName = Arg->getValue();
- CmdArgs.push_back(Args.MakeArgString("-fno-builtin-" + FuncName));
+ if (UseBuiltins)
+ A->render(Args, CmdArgs);
}
// le32-specific flags:
@@ -3397,6 +3745,11 @@ static void RenderBuiltinOptions(const ToolChain &TC, const llvm::Triple &T,
}
bool Driver::getDefaultModuleCachePath(SmallVectorImpl<char> &Result) {
+ if (const char *Str = std::getenv("CLANG_MODULE_CACHE_PATH")) {
+ Twine Path{Str};
+ Path.toVector(Result);
+ return Path.getSingleStringRef() != "";
+ }
if (llvm::sys::path::cache_directory(Result)) {
llvm::sys::path::append(Result, "clang");
llvm::sys::path::append(Result, "ModuleCache");
@@ -3405,10 +3758,14 @@ bool Driver::getDefaultModuleCachePath(SmallVectorImpl<char> &Result) {
return false;
}
-static void RenderModulesOptions(Compilation &C, const Driver &D,
+static bool RenderModulesOptions(Compilation &C, const Driver &D,
const ArgList &Args, const InputInfo &Input,
- const InputInfo &Output,
- ArgStringList &CmdArgs, bool &HaveModules) {
+ const InputInfo &Output, bool HaveStd20,
+ ArgStringList &CmdArgs) {
+ bool IsCXX = types::isCXX(Input.getType());
+ bool HaveStdCXXModules = IsCXX && HaveStd20;
+ bool HaveModules = HaveStdCXXModules;
+
// -fmodules enables the use of precompiled modules (off by default).
// Users can pass -fno-cxx-modules to turn off modules support for
// C++/Objective-C++ programs.
@@ -3416,17 +3773,13 @@ static void RenderModulesOptions(Compilation &C, const Driver &D,
if (Args.hasFlag(options::OPT_fmodules, options::OPT_fno_modules, false)) {
bool AllowedInCXX = Args.hasFlag(options::OPT_fcxx_modules,
options::OPT_fno_cxx_modules, true);
- if (AllowedInCXX || !types::isCXX(Input.getType())) {
+ if (AllowedInCXX || !IsCXX) {
CmdArgs.push_back("-fmodules");
HaveClangModules = true;
}
}
HaveModules |= HaveClangModules;
- if (Args.hasArg(options::OPT_fmodules_ts)) {
- CmdArgs.push_back("-fmodules-ts");
- HaveModules = true;
- }
// -fmodule-maps enables implicit reading of module map files. By default,
// this is enabled if we are using Clang's flavor of precompiled modules.
@@ -3435,9 +3788,8 @@ static void RenderModulesOptions(Compilation &C, const Driver &D,
CmdArgs.push_back("-fimplicit-module-maps");
// -fmodules-decluse checks that modules used are declared so (off by default)
- if (Args.hasFlag(options::OPT_fmodules_decluse,
- options::OPT_fno_modules_decluse, false))
- CmdArgs.push_back("-fmodules-decluse");
+ Args.addOptInFlag(CmdArgs, options::OPT_fmodules_decluse,
+ options::OPT_fno_modules_decluse);
// -fmodules-strict-decluse is like -fmodule-decluse, but also checks that
// all #included headers are part of modules.
@@ -3481,12 +3833,6 @@ static void RenderModulesOptions(Compilation &C, const Driver &D,
}
if (HaveModules) {
- // -fprebuilt-module-path specifies where to load the prebuilt module files.
- for (const Arg *A : Args.filtered(options::OPT_fprebuilt_module_path)) {
- CmdArgs.push_back(Args.MakeArgString(
- std::string("-fprebuilt-module-path=") + A->getValue()));
- A->claim();
- }
if (Args.hasFlag(options::OPT_fprebuilt_implicit_modules,
options::OPT_fno_prebuilt_implicit_modules, false))
CmdArgs.push_back("-fprebuilt-implicit-modules");
@@ -3519,9 +3865,16 @@ static void RenderModulesOptions(Compilation &C, const Driver &D,
// names to precompiled module files (the module is loaded only if used).
// The -fmodule-file=<file> form can be used to unconditionally load
// precompiled module files (whether used or not).
- if (HaveModules)
+ if (HaveModules || Input.getType() == clang::driver::types::TY_ModuleFile) {
Args.AddAllArgs(CmdArgs, options::OPT_fmodule_file);
- else
+
+ // -fprebuilt-module-path specifies where to load the prebuilt module files.
+ for (const Arg *A : Args.filtered(options::OPT_fprebuilt_module_path)) {
+ CmdArgs.push_back(Args.MakeArgString(
+ std::string("-fprebuilt-module-path=") + A->getValue()));
+ A->claim();
+ }
+ } else
Args.ClaimAllArgs(options::OPT_fmodule_file);
// When building modules and generating crashdumps, we need to dump a module
@@ -3545,38 +3898,59 @@ static void RenderModulesOptions(Compilation &C, const Driver &D,
Args.AddLastArg(CmdArgs, options::OPT_fmodules_prune_interval);
Args.AddLastArg(CmdArgs, options::OPT_fmodules_prune_after);
- Args.AddLastArg(CmdArgs, options::OPT_fbuild_session_timestamp);
+ if (HaveClangModules) {
+ Args.AddLastArg(CmdArgs, options::OPT_fbuild_session_timestamp);
- if (Arg *A = Args.getLastArg(options::OPT_fbuild_session_file)) {
- if (Args.hasArg(options::OPT_fbuild_session_timestamp))
- D.Diag(diag::err_drv_argument_not_allowed_with)
- << A->getAsString(Args) << "-fbuild-session-timestamp";
+ if (Arg *A = Args.getLastArg(options::OPT_fbuild_session_file)) {
+ if (Args.hasArg(options::OPT_fbuild_session_timestamp))
+ D.Diag(diag::err_drv_argument_not_allowed_with)
+ << A->getAsString(Args) << "-fbuild-session-timestamp";
- llvm::sys::fs::file_status Status;
- if (llvm::sys::fs::status(A->getValue(), Status))
- D.Diag(diag::err_drv_no_such_file) << A->getValue();
- CmdArgs.push_back(
- Args.MakeArgString("-fbuild-session-timestamp=" +
- Twine((uint64_t)Status.getLastModificationTime()
- .time_since_epoch()
- .count())));
- }
+ llvm::sys::fs::file_status Status;
+ if (llvm::sys::fs::status(A->getValue(), Status))
+ D.Diag(diag::err_drv_no_such_file) << A->getValue();
+ CmdArgs.push_back(Args.MakeArgString(
+ "-fbuild-session-timestamp=" +
+ Twine((uint64_t)std::chrono::duration_cast<std::chrono::seconds>(
+ Status.getLastModificationTime().time_since_epoch())
+ .count())));
+ }
- if (Args.getLastArg(options::OPT_fmodules_validate_once_per_build_session)) {
- if (!Args.getLastArg(options::OPT_fbuild_session_timestamp,
- options::OPT_fbuild_session_file))
- D.Diag(diag::err_drv_modules_validate_once_requires_timestamp);
+ if (Args.getLastArg(
+ options::OPT_fmodules_validate_once_per_build_session)) {
+ if (!Args.getLastArg(options::OPT_fbuild_session_timestamp,
+ options::OPT_fbuild_session_file))
+ D.Diag(diag::err_drv_modules_validate_once_requires_timestamp);
+
+ Args.AddLastArg(CmdArgs,
+ options::OPT_fmodules_validate_once_per_build_session);
+ }
+
+ if (Args.hasFlag(options::OPT_fmodules_validate_system_headers,
+ options::OPT_fno_modules_validate_system_headers,
+ ImplicitModules))
+ CmdArgs.push_back("-fmodules-validate-system-headers");
Args.AddLastArg(CmdArgs,
- options::OPT_fmodules_validate_once_per_build_session);
+ options::OPT_fmodules_disable_diagnostic_validation);
+ } else {
+ Args.ClaimAllArgs(options::OPT_fbuild_session_timestamp);
+ Args.ClaimAllArgs(options::OPT_fbuild_session_file);
+ Args.ClaimAllArgs(options::OPT_fmodules_validate_once_per_build_session);
+ Args.ClaimAllArgs(options::OPT_fmodules_validate_system_headers);
+ Args.ClaimAllArgs(options::OPT_fno_modules_validate_system_headers);
+ Args.ClaimAllArgs(options::OPT_fmodules_disable_diagnostic_validation);
}
- if (Args.hasFlag(options::OPT_fmodules_validate_system_headers,
- options::OPT_fno_modules_validate_system_headers,
- ImplicitModules))
- CmdArgs.push_back("-fmodules-validate-system-headers");
+ // FIXME: We provisionally don't check ODR violations for decls in the global
+ // module fragment.
+ CmdArgs.push_back("-fskip-odr-check-in-gmf");
- Args.AddLastArg(CmdArgs, options::OPT_fmodules_disable_diagnostic_validation);
+ // Claim `-fmodule-output` and `-fmodule-output=` to avoid unused warnings.
+ Args.ClaimAllArgs(options::OPT_fmodule_output);
+ Args.ClaimAllArgs(options::OPT_fmodule_output_EQ);
+
+ return HaveModules;
}
static void RenderCharacterOptions(const ArgList &Args, const llvm::Triple &T,
@@ -3611,7 +3985,8 @@ static void RenderCharacterOptions(const ArgList &Args, const llvm::Triple &T,
else
CmdArgs.push_back("-fsigned-wchar");
}
- }
+ } else if (T.isOSzOS())
+ CmdArgs.push_back("-fno-signed-wchar");
}
static void RenderObjCOptions(const ToolChain &TC, const Driver &D,
@@ -3736,15 +4111,10 @@ static void RenderDiagnosticsOptions(const Driver &D, const ArgList &Args,
options::OPT_fno_caret_diagnostics, CaretDefault))
CmdArgs.push_back("-fno-caret-diagnostics");
- // -fdiagnostics-fixit-info is default, only pass non-default.
- if (!Args.hasFlag(options::OPT_fdiagnostics_fixit_info,
- options::OPT_fno_diagnostics_fixit_info))
- CmdArgs.push_back("-fno-diagnostics-fixit-info");
-
- // Enable -fdiagnostics-show-option by default.
- if (!Args.hasFlag(options::OPT_fdiagnostics_show_option,
- options::OPT_fno_diagnostics_show_option, true))
- CmdArgs.push_back("-fno-diagnostics-show-option");
+ Args.addOptOutFlag(CmdArgs, options::OPT_fdiagnostics_fixit_info,
+ options::OPT_fno_diagnostics_fixit_info);
+ Args.addOptOutFlag(CmdArgs, options::OPT_fdiagnostics_show_option,
+ options::OPT_fno_diagnostics_show_option);
if (const Arg *A =
Args.getLastArg(options::OPT_fdiagnostics_show_category_EQ)) {
@@ -3752,9 +4122,8 @@ static void RenderDiagnosticsOptions(const Driver &D, const ArgList &Args,
CmdArgs.push_back(A->getValue());
}
- if (Args.hasFlag(options::OPT_fdiagnostics_show_hotness,
- options::OPT_fno_diagnostics_show_hotness, false))
- CmdArgs.push_back("-fdiagnostics-show-hotness");
+ Args.addOptInFlag(CmdArgs, options::OPT_fdiagnostics_show_hotness,
+ options::OPT_fno_diagnostics_show_hotness);
if (const Arg *A =
Args.getLastArg(options::OPT_fdiagnostics_hotness_threshold_EQ)) {
@@ -3763,9 +4132,19 @@ static void RenderDiagnosticsOptions(const Driver &D, const ArgList &Args,
CmdArgs.push_back(Args.MakeArgString(Opt));
}
+ if (const Arg *A =
+ Args.getLastArg(options::OPT_fdiagnostics_misexpect_tolerance_EQ)) {
+ std::string Opt =
+ std::string("-fdiagnostics-misexpect-tolerance=") + A->getValue();
+ CmdArgs.push_back(Args.MakeArgString(Opt));
+ }
+
if (const Arg *A = Args.getLastArg(options::OPT_fdiagnostics_format_EQ)) {
CmdArgs.push_back("-fdiagnostics-format");
CmdArgs.push_back(A->getValue());
+ if (StringRef(A->getValue()) == "sarif" ||
+ StringRef(A->getValue()) == "SARIF")
+ D.Diag(diag::warn_drv_sarif_format_unstable);
}
if (const Arg *A = Args.getLastArg(
@@ -3782,22 +4161,13 @@ static void RenderDiagnosticsOptions(const Driver &D, const ArgList &Args,
// re-parsed to construct this job; claim any possible color diagnostic here
// to avoid warn_drv_unused_argument and diagnose bad
// OPT_fdiagnostics_color_EQ values.
- for (const Arg *A : Args) {
- const Option &O = A->getOption();
- if (!O.matches(options::OPT_fcolor_diagnostics) &&
- !O.matches(options::OPT_fdiagnostics_color) &&
- !O.matches(options::OPT_fno_color_diagnostics) &&
- !O.matches(options::OPT_fno_diagnostics_color) &&
- !O.matches(options::OPT_fdiagnostics_color_EQ))
- continue;
-
- if (O.matches(options::OPT_fdiagnostics_color_EQ)) {
- StringRef Value(A->getValue());
- if (Value != "always" && Value != "never" && Value != "auto")
- D.Diag(diag::err_drv_clang_unsupported)
- << ("-fdiagnostics-color=" + Value).str();
- }
- A->claim();
+ Args.getLastArg(options::OPT_fcolor_diagnostics,
+ options::OPT_fno_color_diagnostics);
+ if (const Arg *A = Args.getLastArg(options::OPT_fdiagnostics_color_EQ)) {
+ StringRef Value(A->getValue());
+ if (Value != "always" && Value != "never" && Value != "auto")
+ D.Diag(diag::err_drv_invalid_argument_to_option)
+ << Value << A->getOption().getName();
}
if (D.getDiags().getDiagnosticOptions().ShowColors)
@@ -3806,9 +4176,11 @@ static void RenderDiagnosticsOptions(const Driver &D, const ArgList &Args,
if (Args.hasArg(options::OPT_fansi_escape_codes))
CmdArgs.push_back("-fansi-escape-codes");
- if (!Args.hasFlag(options::OPT_fshow_source_location,
- options::OPT_fno_show_source_location))
- CmdArgs.push_back("-fno-show-source-location");
+ Args.addOptOutFlag(CmdArgs, options::OPT_fshow_source_location,
+ options::OPT_fno_show_source_location);
+
+ Args.addOptOutFlag(CmdArgs, options::OPT_fdiagnostics_show_line_numbers,
+ options::OPT_fno_diagnostics_show_line_numbers);
if (Args.hasArg(options::OPT_fdiagnostics_absolute_paths))
CmdArgs.push_back("-fdiagnostics-absolute-paths");
@@ -3817,14 +4189,11 @@ static void RenderDiagnosticsOptions(const Driver &D, const ArgList &Args,
ColumnDefault))
CmdArgs.push_back("-fno-show-column");
- if (!Args.hasFlag(options::OPT_fspell_checking,
- options::OPT_fno_spell_checking))
- CmdArgs.push_back("-fno-spell-checking");
+ Args.addOptOutFlag(CmdArgs, options::OPT_fspell_checking,
+ options::OPT_fno_spell_checking);
}
-enum class DwarfFissionKind { None, Split, Single };
-
-static DwarfFissionKind getDebugFissionKind(const Driver &D,
+DwarfFissionKind tools::getDebugFissionKind(const Driver &D,
const ArgList &Args, Arg *&Arg) {
Arg = Args.getLastArg(options::OPT_gsplit_dwarf, options::OPT_gsplit_dwarf_EQ,
options::OPT_gno_split_dwarf);
@@ -3841,7 +4210,7 @@ static DwarfFissionKind getDebugFissionKind(const Driver &D,
return DwarfFissionKind::Single;
D.Diag(diag::err_drv_unsupported_option_argument)
- << Arg->getOption().getName() << Arg->getValue();
+ << Arg->getSpelling() << Arg->getValue();
return DwarfFissionKind::None;
}
@@ -3868,18 +4237,12 @@ static void renderDwarfFormat(const Driver &D, const llvm::Triple &T,
DwarfFormatArg->render(Args, CmdArgs);
}
-static void renderDebugOptions(const ToolChain &TC, const Driver &D,
- const llvm::Triple &T, const ArgList &Args,
- bool EmitCodeView, bool IRInput,
- ArgStringList &CmdArgs,
- codegenoptions::DebugInfoKind &DebugInfoKind,
- DwarfFissionKind &DwarfFission) {
- // These two forms of profiling info can't be used together.
- if (const Arg *A1 = Args.getLastArg(options::OPT_fpseudo_probe_for_profiling))
- if (const Arg *A2 = Args.getLastArg(options::OPT_fdebug_info_for_profiling))
- D.Diag(diag::err_drv_argument_not_allowed_with)
- << A1->getAsString(Args) << A2->getAsString(Args);
-
+static void
+renderDebugOptions(const ToolChain &TC, const Driver &D, const llvm::Triple &T,
+ const ArgList &Args, bool IRInput, ArgStringList &CmdArgs,
+ const InputInfo &Output,
+ llvm::codegenoptions::DebugInfoKind &DebugInfoKind,
+ DwarfFissionKind &DwarfFission) {
if (Args.hasFlag(options::OPT_fdebug_info_for_profiling,
options::OPT_fno_debug_info_for_profiling, false) &&
checkDebugInfoOption(
@@ -3914,27 +4277,29 @@ static void renderDebugOptions(const ToolChain &TC, const Driver &D,
}
}
if (const Arg *A = Args.getLastArg(options::OPT_g_Group)) {
- DebugInfoKind = codegenoptions::DebugInfoConstructor;
+ DebugInfoKind = llvm::codegenoptions::DebugInfoConstructor;
// If the last option explicitly specified a debug-info level, use it.
if (checkDebugInfoOption(A, Args, D, TC) &&
A->getOption().matches(options::OPT_gN_Group)) {
- DebugInfoKind = DebugLevelToInfoKind(*A);
+ DebugInfoKind = debugLevelToInfoKind(*A);
// For -g0 or -gline-tables-only, drop -gsplit-dwarf. This gets a bit more
// complicated if you've disabled inline info in the skeleton CUs
// (SplitDWARFInlining) - then there's value in composing split-dwarf and
// line-tables-only, so let those compose naturally in that case.
- if (DebugInfoKind == codegenoptions::NoDebugInfo ||
- DebugInfoKind == codegenoptions::DebugDirectivesOnly ||
- (DebugInfoKind == codegenoptions::DebugLineTablesOnly &&
+ if (DebugInfoKind == llvm::codegenoptions::NoDebugInfo ||
+ DebugInfoKind == llvm::codegenoptions::DebugDirectivesOnly ||
+ (DebugInfoKind == llvm::codegenoptions::DebugLineTablesOnly &&
SplitDWARFInlining))
DwarfFission = DwarfFissionKind::None;
}
}
// If a debugger tuning argument appeared, remember it.
+ bool HasDebuggerTuning = false;
if (const Arg *A =
Args.getLastArg(options::OPT_gTune_Group, options::OPT_ggdbN_Group)) {
+ HasDebuggerTuning = true;
if (checkDebugInfoOption(A, Args, D, TC)) {
if (A->getOption().matches(options::OPT_glldb))
DebuggerTuning = llvm::DebuggerKind::LLDB;
@@ -3948,29 +4313,23 @@ static void renderDebugOptions(const ToolChain &TC, const Driver &D,
}
// If a -gdwarf argument appeared, remember it.
- const Arg *GDwarfN = getDwarfNArg(Args);
bool EmitDwarf = false;
- if (GDwarfN) {
- if (checkDebugInfoOption(GDwarfN, Args, D, TC))
- EmitDwarf = true;
- else
- GDwarfN = nullptr;
- }
+ if (const Arg *A = getDwarfNArg(Args))
+ EmitDwarf = checkDebugInfoOption(A, Args, D, TC);
- if (const Arg *A = Args.getLastArg(options::OPT_gcodeview)) {
- if (checkDebugInfoOption(A, Args, D, TC))
- EmitCodeView = true;
- }
+ bool EmitCodeView = false;
+ if (const Arg *A = Args.getLastArg(options::OPT_gcodeview))
+ EmitCodeView = checkDebugInfoOption(A, Args, D, TC);
// If the user asked for debug info but did not explicitly specify -gcodeview
// or -gdwarf, ask the toolchain for the default format.
if (!EmitCodeView && !EmitDwarf &&
- DebugInfoKind != codegenoptions::NoDebugInfo) {
+ DebugInfoKind != llvm::codegenoptions::NoDebugInfo) {
switch (TC.getDefaultDebugFormat()) {
- case codegenoptions::DIF_CodeView:
+ case llvm::codegenoptions::DIF_CodeView:
EmitCodeView = true;
break;
- case codegenoptions::DIF_DWARF:
+ case llvm::codegenoptions::DIF_DWARF:
EmitDwarf = true;
break;
}
@@ -3979,31 +4338,19 @@ static void renderDebugOptions(const ToolChain &TC, const Driver &D,
unsigned RequestedDWARFVersion = 0; // DWARF version requested by the user
unsigned EffectiveDWARFVersion = 0; // DWARF version TC can generate. It may
// be lower than what the user wanted.
- unsigned DefaultDWARFVersion = ParseDebugDefaultVersion(TC, Args);
if (EmitDwarf) {
- // Start with the platform default DWARF version
- RequestedDWARFVersion = TC.GetDefaultDwarfVersion();
- assert(RequestedDWARFVersion &&
- "toolchain default DWARF version must be nonzero");
-
- // If the user specified a default DWARF version, that takes precedence
- // over the platform default.
- if (DefaultDWARFVersion)
- RequestedDWARFVersion = DefaultDWARFVersion;
-
- // Override with a user-specified DWARF version
- if (GDwarfN)
- if (auto ExplicitVersion = DwarfVersionNum(GDwarfN->getSpelling()))
- RequestedDWARFVersion = ExplicitVersion;
+ RequestedDWARFVersion = getDwarfVersion(TC, Args);
// Clamp effective DWARF version to the max supported by the toolchain.
EffectiveDWARFVersion =
std::min(RequestedDWARFVersion, TC.getMaxDwarfVersion());
+ } else {
+ Args.ClaimAllArgs(options::OPT_fdebug_default_version);
}
// -gline-directives-only supported only for the DWARF debug info.
if (RequestedDWARFVersion == 0 &&
- DebugInfoKind == codegenoptions::DebugDirectivesOnly)
- DebugInfoKind = codegenoptions::NoDebugInfo;
+ DebugInfoKind == llvm::codegenoptions::DebugDirectivesOnly)
+ DebugInfoKind = llvm::codegenoptions::NoDebugInfo;
// strict DWARF is set to false by default. But for DBX, we need it to be set
// as true by default.
@@ -4031,16 +4378,19 @@ static void renderDebugOptions(const ToolChain &TC, const Driver &D,
CmdArgs.push_back("-gno-column-info");
// FIXME: Move backend command line options to the module.
- // If -gline-tables-only or -gline-directives-only is the last option it wins.
- if (const Arg *A = Args.getLastArg(options::OPT_gmodules))
- if (checkDebugInfoOption(A, Args, D, TC)) {
- if (DebugInfoKind != codegenoptions::DebugLineTablesOnly &&
- DebugInfoKind != codegenoptions::DebugDirectivesOnly) {
- DebugInfoKind = codegenoptions::DebugInfoConstructor;
+ if (Args.hasFlag(options::OPT_gmodules, options::OPT_gno_modules, false)) {
+ // If -gline-tables-only or -gline-directives-only is the last option it
+ // wins.
+ if (checkDebugInfoOption(Args.getLastArg(options::OPT_gmodules), Args, D,
+ TC)) {
+ if (DebugInfoKind != llvm::codegenoptions::DebugLineTablesOnly &&
+ DebugInfoKind != llvm::codegenoptions::DebugDirectivesOnly) {
+ DebugInfoKind = llvm::codegenoptions::DebugInfoConstructor;
CmdArgs.push_back("-dwarf-ext-refs");
CmdArgs.push_back("-fmodule-format=obj");
}
}
+ }
if (T.isOSBinFormatELF() && SplitDWARFInlining)
CmdArgs.push_back("-fsplit-dwarf-inlining");
@@ -4057,13 +4407,13 @@ static void renderDebugOptions(const ToolChain &TC, const Driver &D,
if (const Arg *A = Args.getLastArg(options::OPT_fstandalone_debug))
(void)checkDebugInfoOption(A, Args, D, TC);
- if (DebugInfoKind == codegenoptions::LimitedDebugInfo ||
- DebugInfoKind == codegenoptions::DebugInfoConstructor) {
+ if (DebugInfoKind == llvm::codegenoptions::LimitedDebugInfo ||
+ DebugInfoKind == llvm::codegenoptions::DebugInfoConstructor) {
if (Args.hasFlag(options::OPT_fno_eliminate_unused_debug_types,
options::OPT_feliminate_unused_debug_types, false))
- DebugInfoKind = codegenoptions::UnusedTypeInfo;
+ DebugInfoKind = llvm::codegenoptions::UnusedTypeInfo;
else if (NeedFullDebug)
- DebugInfoKind = codegenoptions::FullDebugInfo;
+ DebugInfoKind = llvm::codegenoptions::FullDebugInfo;
}
if (Args.hasFlag(options::OPT_gembed_source, options::OPT_gno_embed_source,
@@ -4089,29 +4439,30 @@ static void renderDebugOptions(const ToolChain &TC, const Driver &D,
if (EmitCodeView) {
CmdArgs.push_back("-gcodeview");
- // Emit codeview type hashes if requested.
- if (Args.hasFlag(options::OPT_gcodeview_ghash,
- options::OPT_gno_codeview_ghash, false)) {
- CmdArgs.push_back("-gcodeview-ghash");
- }
- }
+ Args.addOptInFlag(CmdArgs, options::OPT_gcodeview_ghash,
+ options::OPT_gno_codeview_ghash);
- // Omit inline line tables if requested.
- if (Args.hasFlag(options::OPT_gno_inline_line_tables,
- options::OPT_ginline_line_tables, false)) {
- CmdArgs.push_back("-gno-inline-line-tables");
+ Args.addOptOutFlag(CmdArgs, options::OPT_gcodeview_command_line,
+ options::OPT_gno_codeview_command_line);
}
+ Args.addOptOutFlag(CmdArgs, options::OPT_ginline_line_tables,
+ options::OPT_gno_inline_line_tables);
+
// When emitting remarks, we need at least debug lines in the output.
if (willEmitRemarks(Args) &&
- DebugInfoKind <= codegenoptions::DebugDirectivesOnly)
- DebugInfoKind = codegenoptions::DebugLineTablesOnly;
+ DebugInfoKind <= llvm::codegenoptions::DebugDirectivesOnly)
+ DebugInfoKind = llvm::codegenoptions::DebugLineTablesOnly;
// Adjust the debug info kind for the given toolchain.
TC.adjustDebugInfoKind(DebugInfoKind, Args);
+ // On AIX, the debugger tuning option can be omitted if it is not explicitly
+ // set.
RenderDebugEnablingArgs(Args, CmdArgs, DebugInfoKind, EffectiveDWARFVersion,
- DebuggerTuning);
+ T.isOSAIX() && !HasDebuggerTuning
+ ? llvm::DebuggerKind::Default
+ : DebuggerTuning);
// -fdebug-macro turns on macro debug info generation.
if (Args.hasFlag(options::OPT_fdebug_macro, options::OPT_fno_debug_macro,
@@ -4133,12 +4484,27 @@ static void renderDebugOptions(const ToolChain &TC, const Driver &D,
options::OPT_gpubnames)
? "-gpubnames"
: "-ggnu-pubnames");
+ const auto *SimpleTemplateNamesArg =
+ Args.getLastArg(options::OPT_gsimple_template_names,
+ options::OPT_gno_simple_template_names);
+ bool ForwardTemplateParams = DebuggerTuning == llvm::DebuggerKind::SCE;
+ if (SimpleTemplateNamesArg &&
+ checkDebugInfoOption(SimpleTemplateNamesArg, Args, D, TC)) {
+ const auto &Opt = SimpleTemplateNamesArg->getOption();
+ if (Opt.matches(options::OPT_gsimple_template_names)) {
+ ForwardTemplateParams = true;
+ CmdArgs.push_back("-gsimple-template-names=simple");
+ }
+ }
- if (Args.hasFlag(options::OPT_fdebug_ranges_base_address,
- options::OPT_fno_debug_ranges_base_address, false)) {
- CmdArgs.push_back("-fdebug-ranges-base-address");
+ if (const Arg *A = Args.getLastArg(options::OPT_gsrc_hash_EQ)) {
+ StringRef v = A->getValue();
+ CmdArgs.push_back(Args.MakeArgString("-gsrc-hash=" + v));
}
+ Args.addOptInFlag(CmdArgs, options::OPT_fdebug_ranges_base_address,
+ options::OPT_fno_debug_ranges_base_address);
+
// -gdwarf-aranges turns on the emission of the aranges section in the
// backend.
// Always enabled for SCE tuning.
@@ -4150,9 +4516,8 @@ static void renderDebugOptions(const ToolChain &TC, const Driver &D,
CmdArgs.push_back("-generate-arange-section");
}
- if (Args.hasFlag(options::OPT_fforce_dwarf_frame,
- options::OPT_fno_force_dwarf_frame, false))
- CmdArgs.push_back("-fforce-dwarf-frame");
+ Args.addOptInFlag(CmdArgs, options::OPT_fforce_dwarf_frame,
+ options::OPT_fno_force_dwarf_frame);
if (Args.hasFlag(options::OPT_fdebug_types_section,
options::OPT_fno_debug_types_section, false)) {
@@ -4179,7 +4544,7 @@ static void renderDebugOptions(const ToolChain &TC, const Driver &D,
// Decide how to render forward declarations of template instantiations.
// SCE wants full descriptions, others just get them in the name.
- if (DebuggerTuning == llvm::DebuggerKind::SCE)
+ if (ForwardTemplateParams)
CmdArgs.push_back("-debug-forward-template-params");
// Do we need to explicitly import anonymous namespaces into the parent
@@ -4189,6 +4554,98 @@ static void renderDebugOptions(const ToolChain &TC, const Driver &D,
renderDwarfFormat(D, T, Args, CmdArgs, EffectiveDWARFVersion);
RenderDebugInfoCompressionArgs(Args, CmdArgs, D, TC);
+
+ // This controls whether or not we perform JustMyCode instrumentation.
+ if (Args.hasFlag(options::OPT_fjmc, options::OPT_fno_jmc, false)) {
+ if (TC.getTriple().isOSBinFormatELF() || D.IsCLMode()) {
+ if (DebugInfoKind >= llvm::codegenoptions::DebugInfoConstructor)
+ CmdArgs.push_back("-fjmc");
+ else if (D.IsCLMode())
+ D.Diag(clang::diag::warn_drv_jmc_requires_debuginfo) << "/JMC"
+ << "'/Zi', '/Z7'";
+ else
+ D.Diag(clang::diag::warn_drv_jmc_requires_debuginfo) << "-fjmc"
+ << "-g";
+ } else {
+ D.Diag(clang::diag::warn_drv_fjmc_for_elf_only);
+ }
+ }
+
+ // Add in -fdebug-compilation-dir if necessary.
+ const char *DebugCompilationDir =
+ addDebugCompDirArg(Args, CmdArgs, D.getVFS());
+
+ addDebugPrefixMapArg(D, TC, Args, CmdArgs);
+
+ // Add the output path to the object file for CodeView debug infos.
+ if (EmitCodeView && Output.isFilename())
+ addDebugObjectName(Args, CmdArgs, DebugCompilationDir,
+ Output.getFilename());
+}
+
+static void ProcessVSRuntimeLibrary(const ArgList &Args,
+ ArgStringList &CmdArgs) {
+ unsigned RTOptionID = options::OPT__SLASH_MT;
+
+ if (Args.hasArg(options::OPT__SLASH_LDd))
+ // The /LDd option implies /MTd. The dependent lib part can be overridden,
+ // but defining _DEBUG is sticky.
+ RTOptionID = options::OPT__SLASH_MTd;
+
+ if (Arg *A = Args.getLastArg(options::OPT__SLASH_M_Group))
+ RTOptionID = A->getOption().getID();
+
+ if (Arg *A = Args.getLastArg(options::OPT_fms_runtime_lib_EQ)) {
+ RTOptionID = llvm::StringSwitch<unsigned>(A->getValue())
+ .Case("static", options::OPT__SLASH_MT)
+ .Case("static_dbg", options::OPT__SLASH_MTd)
+ .Case("dll", options::OPT__SLASH_MD)
+ .Case("dll_dbg", options::OPT__SLASH_MDd)
+ .Default(options::OPT__SLASH_MT);
+ }
+
+ StringRef FlagForCRT;
+ switch (RTOptionID) {
+ case options::OPT__SLASH_MD:
+ if (Args.hasArg(options::OPT__SLASH_LDd))
+ CmdArgs.push_back("-D_DEBUG");
+ CmdArgs.push_back("-D_MT");
+ CmdArgs.push_back("-D_DLL");
+ FlagForCRT = "--dependent-lib=msvcrt";
+ break;
+ case options::OPT__SLASH_MDd:
+ CmdArgs.push_back("-D_DEBUG");
+ CmdArgs.push_back("-D_MT");
+ CmdArgs.push_back("-D_DLL");
+ FlagForCRT = "--dependent-lib=msvcrtd";
+ break;
+ case options::OPT__SLASH_MT:
+ if (Args.hasArg(options::OPT__SLASH_LDd))
+ CmdArgs.push_back("-D_DEBUG");
+ CmdArgs.push_back("-D_MT");
+ CmdArgs.push_back("-flto-visibility-public-std");
+ FlagForCRT = "--dependent-lib=libcmt";
+ break;
+ case options::OPT__SLASH_MTd:
+ CmdArgs.push_back("-D_DEBUG");
+ CmdArgs.push_back("-D_MT");
+ CmdArgs.push_back("-flto-visibility-public-std");
+ FlagForCRT = "--dependent-lib=libcmtd";
+ break;
+ default:
+ llvm_unreachable("Unexpected option ID.");
+ }
+
+ if (Args.hasArg(options::OPT_fms_omit_default_lib)) {
+ CmdArgs.push_back("-D_VC_NODEFAULTLIB");
+ } else {
+ CmdArgs.push_back(FlagForCRT.data());
+
+ // This provides POSIX compatibility (maps 'open' to '_open'), which most
+ // users want. The /Za flag to cl.exe turns this off, but it's not
+ // implemented in clang.
+ CmdArgs.push_back("--dependent-lib=oldnames");
+ }
}
void Clang::ConstructJob(Compilation &C, const JobAction &JA,
@@ -4204,50 +4661,57 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
const Driver &D = TC.getDriver();
ArgStringList CmdArgs;
- // Check number of inputs for sanity. We need at least one input.
assert(Inputs.size() >= 1 && "Must have at least one input.");
// CUDA/HIP compilation may have multiple inputs (source file + results of
// device-side compilations). OpenMP device jobs also take the host IR as a
// second input. Module precompilation accepts a list of header files to
- // include as part of the module. All other jobs are expected to have exactly
- // one input.
+ // include as part of the module. API extraction accepts a list of header
+ // files whose API information is emitted in the output. All other jobs are
+ // expected to have exactly one input.
bool IsCuda = JA.isOffloading(Action::OFK_Cuda);
bool IsCudaDevice = JA.isDeviceOffloading(Action::OFK_Cuda);
bool IsHIP = JA.isOffloading(Action::OFK_HIP);
bool IsHIPDevice = JA.isDeviceOffloading(Action::OFK_HIP);
bool IsOpenMPDevice = JA.isDeviceOffloading(Action::OFK_OpenMP);
- bool IsHeaderModulePrecompile = isa<HeaderModulePrecompileJobAction>(JA);
+ bool IsExtractAPI = isa<ExtractAPIJobAction>(JA);
bool IsDeviceOffloadAction = !(JA.isDeviceOffloading(Action::OFK_None) ||
JA.isDeviceOffloading(Action::OFK_Host));
+ bool IsHostOffloadingAction =
+ JA.isHostOffloading(Action::OFK_OpenMP) ||
+ (JA.isHostOffloading(C.getActiveOffloadKinds()) &&
+ Args.hasFlag(options::OPT_offload_new_driver,
+ options::OPT_no_offload_new_driver, false));
+
+ bool IsRDCMode =
+ Args.hasFlag(options::OPT_fgpu_rdc, options::OPT_fno_gpu_rdc, false);
bool IsUsingLTO = D.isUsingLTO(IsDeviceOffloadAction);
auto LTOMode = D.getLTOMode(IsDeviceOffloadAction);
- // A header module compilation doesn't have a main input file, so invent a
- // fake one as a placeholder.
- const char *ModuleName = [&]{
- auto *ModuleNameArg = Args.getLastArg(options::OPT_fmodule_name_EQ);
- return ModuleNameArg ? ModuleNameArg->getValue() : "";
- }();
- InputInfo HeaderModuleInput(Inputs[0].getType(), ModuleName, ModuleName);
+ // Extract API doesn't have a main input file, so invent a fake one as a
+ // placeholder.
+ InputInfo ExtractAPIPlaceholderInput(Inputs[0].getType(), "extract-api",
+ "extract-api");
const InputInfo &Input =
- IsHeaderModulePrecompile ? HeaderModuleInput : Inputs[0];
+ IsExtractAPI ? ExtractAPIPlaceholderInput : Inputs[0];
- InputInfoList ModuleHeaderInputs;
+ InputInfoList ExtractAPIInputs;
+ InputInfoList HostOffloadingInputs;
const InputInfo *CudaDeviceInput = nullptr;
const InputInfo *OpenMPDeviceInput = nullptr;
for (const InputInfo &I : Inputs) {
- if (&I == &Input) {
- // This is the primary input.
- } else if (IsHeaderModulePrecompile &&
- types::getPrecompiledType(I.getType()) == types::TY_PCH) {
- types::ID Expected = HeaderModuleInput.getType();
- if (I.getType() != Expected) {
- D.Diag(diag::err_drv_module_header_wrong_kind)
+ if (&I == &Input || I.getType() == types::TY_Nothing) {
+ // This is the primary input or contains nothing.
+ } else if (IsExtractAPI) {
+ auto ExpectedInputType = ExtractAPIPlaceholderInput.getType();
+ if (I.getType() != ExpectedInputType) {
+ D.Diag(diag::err_drv_extract_api_wrong_kind)
<< I.getFilename() << types::getTypeName(I.getType())
- << types::getTypeName(Expected);
+ << types::getTypeName(ExpectedInputType);
}
- ModuleHeaderInputs.push_back(I);
+ ExtractAPIInputs.push_back(I);
+ } else if (IsHostOffloadingAction) {
+ HostOffloadingInputs.push_back(I);
} else if ((IsCuda || IsHIP) && !CudaDeviceInput) {
CudaDeviceInput = &I;
} else if (IsOpenMPDevice && !OpenMPDeviceInput) {
@@ -4317,12 +4781,36 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString(
Twine("-target-sdk-version=") +
CudaVersionToString(CTC->CudaInstallation.version())));
+ // Unsized function arguments used for variadics were introduced in
+ // CUDA-9.0. We still do not support generating code that actually uses
+ // variadic arguments yet, but we do need to allow parsing them as
+ // recent CUDA headers rely on that.
+ // https://github.com/llvm/llvm-project/issues/58410
+ if (CTC->CudaInstallation.version() >= CudaVersion::CUDA_90)
+ CmdArgs.push_back("-fcuda-allow-variadic-functions");
}
}
CmdArgs.push_back("-aux-triple");
CmdArgs.push_back(Args.MakeArgString(NormalizedTriple));
+
+ if (JA.isDeviceOffloading(Action::OFK_HIP) &&
+ getToolChain().getTriple().isAMDGPU()) {
+ // Device side compilation printf
+ if (Args.getLastArg(options::OPT_mprintf_kind_EQ)) {
+ CmdArgs.push_back(Args.MakeArgString(
+ "-mprintf-kind=" +
+ Args.getLastArgValue(options::OPT_mprintf_kind_EQ)));
+ // Force compiler error on invalid conversion specifiers
+ CmdArgs.push_back(
+ Args.MakeArgString("-Werror=format-invalid-specifier"));
+ }
+ }
}
+ // Unconditionally claim the printf option now to avoid unused diagnostic.
+ if (const Arg *PF = Args.getLastArg(options::OPT_mprintf_kind_EQ))
+ PF->claim();
+
if (Args.hasFlag(options::OPT_fsycl, options::OPT_fno_sycl, false)) {
CmdArgs.push_back("-fsycl-is-device");
@@ -4361,12 +4849,20 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
TC.addClangWarningOptions(CmdArgs);
// FIXME: Subclass ToolChain for SPIR and move this to addClangWarningOptions.
- if (Triple.isSPIR())
+ if (Triple.isSPIR() || Triple.isSPIRV())
CmdArgs.push_back("-Wspir-compat");
// Select the appropriate action.
RewriteKind rewriteKind = RK_None;
+ bool UnifiedLTO = false;
+ if (IsUsingLTO) {
+ UnifiedLTO = Args.hasFlag(options::OPT_funified_lto,
+ options::OPT_fno_unified_lto, Triple.isPS());
+ if (UnifiedLTO)
+ CmdArgs.push_back("-funified-lto");
+ }
+
// If CollectArgsForIntegratedAssembler() isn't called below, claim the args
// it claims when not running an assembler. Otherwise, clang would emit
// "argument unused" warnings for assembler flags when e.g. adding "-E" to
@@ -4395,6 +4891,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
Args.ClaimAllArgs(options::OPT_Wa_COMMA);
Args.ClaimAllArgs(options::OPT_Xassembler);
+ Args.ClaimAllArgs(options::OPT_femit_dwarf_unwind_EQ);
}
if (isa<AnalyzeJobAction>(JA)) {
@@ -4410,6 +4907,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasArg(options::OPT_rewrite_objc) &&
!Args.hasArg(options::OPT_g_Group))
CmdArgs.push_back("-P");
+ else if (JA.getType() == types::TY_PP_CXXHeaderUnit)
+ CmdArgs.push_back("-fdirectives-only");
}
} else if (isa<AssembleJobAction>(JA)) {
CmdArgs.push_back("-emit-obj");
@@ -4422,13 +4921,22 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (JA.getType() == types::TY_Nothing)
CmdArgs.push_back("-fsyntax-only");
else if (JA.getType() == types::TY_ModuleFile)
- CmdArgs.push_back(IsHeaderModulePrecompile
- ? "-emit-header-module"
- : "-emit-module-interface");
+ CmdArgs.push_back("-emit-module-interface");
+ else if (JA.getType() == types::TY_HeaderUnit)
+ CmdArgs.push_back("-emit-header-unit");
else
CmdArgs.push_back("-emit-pch");
} else if (isa<VerifyPCHJobAction>(JA)) {
CmdArgs.push_back("-verify-pch");
+ } else if (isa<ExtractAPIJobAction>(JA)) {
+ assert(JA.getType() == types::TY_API_INFO &&
+ "Extract API actions must generate a API information.");
+ CmdArgs.push_back("-extract-api");
+ if (Arg *ProductNameArg = Args.getLastArg(options::OPT_product_name_EQ))
+ ProductNameArg->render(Args, CmdArgs);
+ if (Arg *ExtractAPIIgnoresFileArg =
+ Args.getLastArg(options::OPT_extract_api_ignores_EQ))
+ ExtractAPIIgnoresFileArg->render(Args, CmdArgs);
} else {
assert((isa<CompileJobAction>(JA) || isa<BackendJobAction>(JA)) &&
"Invalid action for clang tool.");
@@ -4479,38 +4987,47 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-emit-llvm-uselists");
if (IsUsingLTO) {
- if (!IsDeviceOffloadAction) {
- if (Args.hasArg(options::OPT_flto))
- CmdArgs.push_back("-flto");
- else {
- if (D.getLTOMode() == LTOK_Thin)
- CmdArgs.push_back("-flto=thin");
- else
- CmdArgs.push_back("-flto=full");
- }
- CmdArgs.push_back("-flto-unit");
- } else if (Triple.isAMDGPU()) {
- // Only AMDGPU supports device-side LTO
- assert(LTOMode == LTOK_Full || LTOMode == LTOK_Thin);
- CmdArgs.push_back(Args.MakeArgString(
- Twine("-flto=") + (LTOMode == LTOK_Thin ? "thin" : "full")));
- CmdArgs.push_back("-flto-unit");
- } else {
+ if (IsDeviceOffloadAction && !JA.isDeviceOffloading(Action::OFK_OpenMP) &&
+ !Args.hasFlag(options::OPT_offload_new_driver,
+ options::OPT_no_offload_new_driver, false) &&
+ !Triple.isAMDGPU()) {
D.Diag(diag::err_drv_unsupported_opt_for_target)
<< Args.getLastArg(options::OPT_foffload_lto,
options::OPT_foffload_lto_EQ)
->getAsString(Args)
<< Triple.getTriple();
+ } else if (Triple.isNVPTX() && !IsRDCMode &&
+ JA.isDeviceOffloading(Action::OFK_Cuda)) {
+ D.Diag(diag::err_drv_unsupported_opt_for_language_mode)
+ << Args.getLastArg(options::OPT_foffload_lto,
+ options::OPT_foffload_lto_EQ)
+ ->getAsString(Args)
+ << "-fno-gpu-rdc";
+ } else {
+ assert(LTOMode == LTOK_Full || LTOMode == LTOK_Thin);
+ CmdArgs.push_back(Args.MakeArgString(
+ Twine("-flto=") + (LTOMode == LTOK_Thin ? "thin" : "full")));
+ // PS4 uses the legacy LTO API, which does not support some of the
+ // features enabled by -flto-unit.
+ if (!RawTriple.isPS4() ||
+ (D.getLTOMode() == LTOK_Full) || !UnifiedLTO)
+ CmdArgs.push_back("-flto-unit");
}
}
}
+ Args.AddLastArg(CmdArgs, options::OPT_dumpdir);
+
if (const Arg *A = Args.getLastArg(options::OPT_fthinlto_index_EQ)) {
if (!types::isLLVMIR(Input.getType()))
D.Diag(diag::err_drv_arg_requires_bitcode_input) << A->getAsString(Args);
Args.AddLastArg(CmdArgs, options::OPT_fthinlto_index_EQ);
}
+ if (Triple.isPPC())
+ Args.addOptInFlag(CmdArgs, options::OPT_mregnames,
+ options::OPT_mno_regnames);
+
if (Args.getLastArg(options::OPT_fthin_link_bitcode_EQ))
Args.AddLastArg(CmdArgs, options::OPT_fthin_link_bitcode_EQ);
@@ -4524,6 +5041,18 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
!MemProfArg->getOption().matches(options::OPT_fno_memory_profile))
MemProfArg->render(Args, CmdArgs);
+ if (auto *MemProfUseArg =
+ Args.getLastArg(options::OPT_fmemory_profile_use_EQ)) {
+ if (MemProfArg)
+ D.Diag(diag::err_drv_argument_not_allowed_with)
+ << MemProfUseArg->getAsString(Args) << MemProfArg->getAsString(Args);
+ if (auto *PGOInstrArg = Args.getLastArg(options::OPT_fprofile_generate,
+ options::OPT_fprofile_generate_EQ))
+ D.Diag(diag::err_drv_argument_not_allowed_with)
+ << MemProfUseArg->getAsString(Args) << PGOInstrArg->getAsString(Args);
+ MemProfUseArg->render(Args, CmdArgs);
+ }
+
// Embed-bitcode option.
// Only white-listed flags below are allowed to be embedded.
if (C.getDriver().embedBitcodeInObject() && !IsUsingLTO &&
@@ -4538,7 +5067,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// reject options that shouldn't be supported in bitcode
// also reject kernel/kext
- static const constexpr unsigned kBitcodeOptionBlacklist[] = {
+ static const constexpr unsigned kBitcodeOptionIgnorelist[] = {
options::OPT_mkernel,
options::OPT_fapple_kext,
options::OPT_ffunction_sections,
@@ -4582,14 +5111,12 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_mllvm,
};
for (const auto &A : Args)
- if (llvm::find(kBitcodeOptionBlacklist, A->getOption().getID()) !=
- std::end(kBitcodeOptionBlacklist))
+ if (llvm::is_contained(kBitcodeOptionIgnorelist, A->getOption().getID()))
D.Diag(diag::err_drv_unsupported_embed_bitcode) << A->getSpelling();
// Render the CodeGen options that need to be passed.
- if (!Args.hasFlag(options::OPT_foptimize_sibling_calls,
- options::OPT_fno_optimize_sibling_calls))
- CmdArgs.push_back("-mdisable-tail-calls");
+ Args.addOptOutFlag(CmdArgs, options::OPT_foptimize_sibling_calls,
+ options::OPT_fno_optimize_sibling_calls);
RenderFloatingPointOptions(TC, D, isOptimizationLevelFast(Args), Args,
CmdArgs, JA);
@@ -4600,7 +5127,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
case llvm::Triple::arm:
case llvm::Triple::armeb:
case llvm::Triple::thumbeb:
- RenderARMABI(Triple, Args, CmdArgs);
+ RenderARMABI(D, Triple, Args, CmdArgs);
break;
case llvm::Triple::aarch64:
case llvm::Triple::aarch64_32:
@@ -4639,7 +5166,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
C.addCommand(std::make_unique<Command>(
JA, *this, ResponseFileSupport::AtFileUTF8(), D.getClangProgramPath(),
- CmdArgs, Inputs, Output));
+ CmdArgs, Inputs, Output, D.getPrependArg()));
return;
}
@@ -4651,6 +5178,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// cleanup.
if (!C.isForDiagnostics())
CmdArgs.push_back("-disable-free");
+ CmdArgs.push_back("-clear-ast-before-backend");
#ifdef NDEBUG
const bool IsAssertBuild = false;
@@ -4658,18 +5186,19 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
const bool IsAssertBuild = true;
#endif
- // Disable the verification pass in -asserts builds.
- if (!IsAssertBuild)
+ // Disable the verification pass in asserts builds unless otherwise specified.
+ if (Args.hasFlag(options::OPT_fno_verify_intermediate_code,
+ options::OPT_fverify_intermediate_code, !IsAssertBuild)) {
CmdArgs.push_back("-disable-llvm-verifier");
+ }
// Discard value names in assert builds unless otherwise specified.
if (Args.hasFlag(options::OPT_fdiscard_value_names,
options::OPT_fno_discard_value_names, !IsAssertBuild)) {
if (Args.hasArg(options::OPT_fdiscard_value_names) &&
- (std::any_of(Inputs.begin(), Inputs.end(),
- [](const clang::driver::InputInfo &II) {
- return types::isLLVMIR(II.getType());
- }))) {
+ llvm::any_of(Inputs, [](const clang::driver::InputInfo &II) {
+ return types::isLLVMIR(II.getType());
+ })) {
D.Diag(diag::warn_ignoring_fdiscard_for_bitcode);
}
CmdArgs.push_back("-discard-value-names");
@@ -4699,13 +5228,13 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// Since we can't access frontend flags through hasArg, let's manually iterate
// through them.
bool FoundAnalyzerConfig = false;
- for (auto Arg : Args.filtered(options::OPT_Xclang))
+ for (auto *Arg : Args.filtered(options::OPT_Xclang))
if (StringRef(Arg->getValue()) == "-analyzer-config") {
FoundAnalyzerConfig = true;
break;
}
if (!FoundAnalyzerConfig)
- for (auto Arg : Args.filtered(options::OPT_Xanalyzer))
+ for (auto *Arg : Args.filtered(options::OPT_Xanalyzer))
if (StringRef(Arg->getValue()) == "-analyzer-config") {
FoundAnalyzerConfig = true;
break;
@@ -4722,10 +5251,74 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString(std::to_string(FunctionAlignment)));
}
+ // We support -falign-loops=N where N is a power of 2. GCC supports more
+ // forms.
+ if (const Arg *A = Args.getLastArg(options::OPT_falign_loops_EQ)) {
+ unsigned Value = 0;
+ if (StringRef(A->getValue()).getAsInteger(10, Value) || Value > 65536)
+ TC.getDriver().Diag(diag::err_drv_invalid_int_value)
+ << A->getAsString(Args) << A->getValue();
+ else if (Value & (Value - 1))
+ TC.getDriver().Diag(diag::err_drv_alignment_not_power_of_two)
+ << A->getAsString(Args) << A->getValue();
+ // Treat =0 as unspecified (use the target preference).
+ if (Value)
+ CmdArgs.push_back(Args.MakeArgString("-falign-loops=" +
+ Twine(std::min(Value, 65536u))));
+ }
+
+ if (Triple.isOSzOS()) {
+ // On z/OS some of the system header feature macros need to
+ // be defined to enable most cross platform projects to build
+ // successfully. Ths include the libc++ library. A
+ // complicating factor is that users can define these
+ // macros to the same or different values. We need to add
+ // the definition for these macros to the compilation command
+ // if the user hasn't already defined them.
+
+ auto findMacroDefinition = [&](const std::string &Macro) {
+ auto MacroDefs = Args.getAllArgValues(options::OPT_D);
+ return llvm::any_of(MacroDefs, [&](const std::string &M) {
+ return M == Macro || M.find(Macro + '=') != std::string::npos;
+ });
+ };
+
+ // _UNIX03_WITHDRAWN is required for libcxx & porting.
+ if (!findMacroDefinition("_UNIX03_WITHDRAWN"))
+ CmdArgs.push_back("-D_UNIX03_WITHDRAWN");
+ // _OPEN_DEFAULT is required for XL compat
+ if (!findMacroDefinition("_OPEN_DEFAULT"))
+ CmdArgs.push_back("-D_OPEN_DEFAULT");
+ if (D.CCCIsCXX() || types::isCXX(Input.getType())) {
+ // _XOPEN_SOURCE=600 is required for libcxx.
+ if (!findMacroDefinition("_XOPEN_SOURCE"))
+ CmdArgs.push_back("-D_XOPEN_SOURCE=600");
+ }
+ }
+
llvm::Reloc::Model RelocationModel;
unsigned PICLevel;
bool IsPIE;
std::tie(RelocationModel, PICLevel, IsPIE) = ParsePICArgs(TC, Args);
+ Arg *LastPICDataRelArg =
+ Args.getLastArg(options::OPT_mno_pic_data_is_text_relative,
+ options::OPT_mpic_data_is_text_relative);
+ bool NoPICDataIsTextRelative = false;
+ if (LastPICDataRelArg) {
+ if (LastPICDataRelArg->getOption().matches(
+ options::OPT_mno_pic_data_is_text_relative)) {
+ NoPICDataIsTextRelative = true;
+ if (!PICLevel)
+ D.Diag(diag::err_drv_argument_only_allowed_with)
+ << "-mno-pic-data-is-text-relative"
+ << "-fpic/-fpie";
+ }
+ if (!Triple.isSystemZ())
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << (NoPICDataIsTextRelative ? "-mno-pic-data-is-text-relative"
+ : "-mpic-data-is-text-relative")
+ << RawTriple.str();
+ }
bool IsROPI = RelocationModel == llvm::Reloc::ROPI ||
RelocationModel == llvm::Reloc::ROPI_RWPI;
@@ -4754,6 +5347,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(PICLevel == 1 ? "1" : "2");
if (IsPIE)
CmdArgs.push_back("-pic-is-pie");
+ if (NoPICDataIsTextRelative)
+ CmdArgs.push_back("-mcmodel=medium");
}
if (RelocationModel == llvm::Reloc::ROPI ||
@@ -4810,37 +5405,41 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
}
- Args.AddLastArg(CmdArgs, options::OPT_fveclib);
+ if (Arg *A = Args.getLastArg(options::OPT_fveclib)) {
+ StringRef Name = A->getValue();
+ if (Name == "SVML") {
+ if (Triple.getArch() != llvm::Triple::x86 &&
+ Triple.getArch() != llvm::Triple::x86_64)
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << Name << Triple.getArchName();
+ } else if (Name == "LIBMVEC-X86") {
+ if (Triple.getArch() != llvm::Triple::x86 &&
+ Triple.getArch() != llvm::Triple::x86_64)
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << Name << Triple.getArchName();
+ } else if (Name == "SLEEF" || Name == "ArmPL") {
+ if (Triple.getArch() != llvm::Triple::aarch64 &&
+ Triple.getArch() != llvm::Triple::aarch64_be)
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << Name << Triple.getArchName();
+ }
+ A->render(Args, CmdArgs);
+ }
if (Args.hasFlag(options::OPT_fmerge_all_constants,
options::OPT_fno_merge_all_constants, false))
CmdArgs.push_back("-fmerge-all-constants");
- if (Args.hasFlag(options::OPT_fno_delete_null_pointer_checks,
- options::OPT_fdelete_null_pointer_checks, false))
- CmdArgs.push_back("-fno-delete-null-pointer-checks");
+ Args.addOptOutFlag(CmdArgs, options::OPT_fdelete_null_pointer_checks,
+ options::OPT_fno_delete_null_pointer_checks);
// LLVM Code Generator Options.
- for (const Arg *A : Args.filtered(options::OPT_frewrite_map_file_EQ)) {
- StringRef Map = A->getValue();
- if (!llvm::sys::fs::exists(Map)) {
- D.Diag(diag::err_drv_no_such_file) << Map;
- } else {
- A->render(Args, CmdArgs);
- A->claim();
- }
- }
-
- if (Arg *A = Args.getLastArg(options::OPT_mabi_EQ_vec_extabi,
- options::OPT_mabi_EQ_vec_default)) {
- if (!Triple.isOSAIX())
+ if (Arg *A = Args.getLastArg(options::OPT_mabi_EQ_quadword_atomics)) {
+ if (!Triple.isOSAIX() || Triple.isPPC32())
D.Diag(diag::err_drv_unsupported_opt_for_target)
- << A->getSpelling() << RawTriple.str();
- if (A->getOption().getID() == options::OPT_mabi_EQ_vec_extabi)
- CmdArgs.push_back("-mabi=vec-extabi");
- else
- CmdArgs.push_back("-mabi=vec-default");
+ << A->getSpelling() << RawTriple.str();
+ CmdArgs.push_back("-mabi=quadword-atomics");
}
if (Arg *A = Args.getLastArg(options::OPT_mlong_double_128)) {
@@ -4852,27 +5451,21 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
if (Arg *A = Args.getLastArg(options::OPT_Wframe_larger_than_EQ)) {
- StringRef v = A->getValue();
- // FIXME: Validate the argument here so we don't produce meaningless errors
- // about -fwarn-stack-size=.
- if (v.empty())
- D.Diag(diag::err_drv_missing_argument) << A->getSpelling() << 1;
+ StringRef V = A->getValue(), V1 = V;
+ unsigned Size;
+ if (V1.consumeInteger(10, Size) || !V1.empty())
+ D.Diag(diag::err_drv_invalid_argument_to_option)
+ << V << A->getOption().getName();
else
- CmdArgs.push_back(Args.MakeArgString("-fwarn-stack-size=" + v));
- A->claim();
+ CmdArgs.push_back(Args.MakeArgString("-fwarn-stack-size=" + V));
}
- if (!Args.hasFlag(options::OPT_fjump_tables, options::OPT_fno_jump_tables,
- true))
- CmdArgs.push_back("-fno-jump-tables");
-
- if (Args.hasFlag(options::OPT_fprofile_sample_accurate,
- options::OPT_fno_profile_sample_accurate, false))
- CmdArgs.push_back("-fprofile-sample-accurate");
-
- if (!Args.hasFlag(options::OPT_fpreserve_as_comments,
- options::OPT_fno_preserve_as_comments, true))
- CmdArgs.push_back("-fno-preserve-as-comments");
+ Args.addOptOutFlag(CmdArgs, options::OPT_fjump_tables,
+ options::OPT_fno_jump_tables);
+ Args.addOptInFlag(CmdArgs, options::OPT_fprofile_sample_accurate,
+ options::OPT_fno_profile_sample_accurate);
+ Args.addOptOutFlag(CmdArgs, options::OPT_fpreserve_as_comments,
+ options::OPT_fno_preserve_as_comments);
if (Arg *A = Args.getLastArg(options::OPT_mregparm_EQ)) {
CmdArgs.push_back("-mregparm");
@@ -4905,8 +5498,12 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
}
- if (Args.hasFlag(options::OPT_mrtd, options::OPT_mno_rtd, false))
- CmdArgs.push_back("-fdefault-calling-conv=stdcall");
+ if (Args.hasFlag(options::OPT_mrtd, options::OPT_mno_rtd, false)) {
+ if (Triple.getArch() == llvm::Triple::m68k)
+ CmdArgs.push_back("-fdefault-calling-conv=rtdcall");
+ else
+ CmdArgs.push_back("-fdefault-calling-conv=stdcall");
+ }
if (Args.hasArg(options::OPT_fenable_matrix)) {
// enable-matrix is needed by both the LangOpts and by LLVM.
@@ -4932,9 +5529,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
assert(FPKeepKindStr && "unknown FramePointerKind");
CmdArgs.push_back(FPKeepKindStr);
- if (!Args.hasFlag(options::OPT_fzero_initialized_in_bss,
- options::OPT_fno_zero_initialized_in_bss, true))
- CmdArgs.push_back("-fno-zero-initialized-in-bss");
+ Args.addOptOutFlag(CmdArgs, options::OPT_fzero_initialized_in_bss,
+ options::OPT_fno_zero_initialized_in_bss);
bool OFastEnabled = isOptimizationLevelFast(Args);
// If -Ofast is the optimization level, then -fstrict-aliasing should be
@@ -4948,31 +5544,22 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_fno_strict_aliasing, TBAAOnByDefault))
CmdArgs.push_back("-relaxed-aliasing");
if (!Args.hasFlag(options::OPT_fstruct_path_tbaa,
- options::OPT_fno_struct_path_tbaa))
+ options::OPT_fno_struct_path_tbaa, true))
CmdArgs.push_back("-no-struct-path-tbaa");
- if (Args.hasFlag(options::OPT_fstrict_enums, options::OPT_fno_strict_enums,
- false))
- CmdArgs.push_back("-fstrict-enums");
- if (!Args.hasFlag(options::OPT_fstrict_return, options::OPT_fno_strict_return,
- true))
- CmdArgs.push_back("-fno-strict-return");
- if (Args.hasFlag(options::OPT_fallow_editor_placeholders,
- options::OPT_fno_allow_editor_placeholders, false))
- CmdArgs.push_back("-fallow-editor-placeholders");
- if (Args.hasFlag(options::OPT_fstrict_vtable_pointers,
- options::OPT_fno_strict_vtable_pointers,
- false))
- CmdArgs.push_back("-fstrict-vtable-pointers");
- if (Args.hasFlag(options::OPT_fforce_emit_vtables,
- options::OPT_fno_force_emit_vtables,
- false))
- CmdArgs.push_back("-fforce-emit-vtables");
- if (!Args.hasFlag(options::OPT_foptimize_sibling_calls,
- options::OPT_fno_optimize_sibling_calls))
- CmdArgs.push_back("-mdisable-tail-calls");
- if (Args.hasFlag(options::OPT_fno_escaping_block_tail_calls,
- options::OPT_fescaping_block_tail_calls, false))
- CmdArgs.push_back("-fno-escaping-block-tail-calls");
+ Args.addOptInFlag(CmdArgs, options::OPT_fstrict_enums,
+ options::OPT_fno_strict_enums);
+ Args.addOptOutFlag(CmdArgs, options::OPT_fstrict_return,
+ options::OPT_fno_strict_return);
+ Args.addOptInFlag(CmdArgs, options::OPT_fallow_editor_placeholders,
+ options::OPT_fno_allow_editor_placeholders);
+ Args.addOptInFlag(CmdArgs, options::OPT_fstrict_vtable_pointers,
+ options::OPT_fno_strict_vtable_pointers);
+ Args.addOptInFlag(CmdArgs, options::OPT_fforce_emit_vtables,
+ options::OPT_fno_force_emit_vtables);
+ Args.addOptOutFlag(CmdArgs, options::OPT_foptimize_sibling_calls,
+ options::OPT_fno_optimize_sibling_calls);
+ Args.addOptOutFlag(CmdArgs, options::OPT_fescaping_block_tail_calls,
+ options::OPT_fno_escaping_block_tail_calls);
Args.AddLastArg(CmdArgs, options::OPT_ffine_grained_bitfield_accesses,
options::OPT_fno_fine_grained_bitfield_accesses);
@@ -4980,10 +5567,12 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_fexperimental_relative_cxx_abi_vtables,
options::OPT_fno_experimental_relative_cxx_abi_vtables);
+ Args.AddLastArg(CmdArgs, options::OPT_fexperimental_omit_vtable_rtti,
+ options::OPT_fno_experimental_omit_vtable_rtti);
+
// Handle segmented stacks.
- if (Args.hasFlag(options::OPT_fsplit_stack, options::OPT_fno_split_stack,
- false))
- CmdArgs.push_back("-fsplit-stack");
+ Args.addOptInFlag(CmdArgs, options::OPT_fsplit_stack,
+ options::OPT_fno_split_stack);
// -fprotect-parens=0 is default.
if (Args.hasFlag(options::OPT_fprotect_parens,
@@ -5063,9 +5652,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
// Enable -mconstructor-aliases except on darwin, where we have to work around
- // a linker bug (see <rdar://problem/7651567>), and CUDA/AMDGPU device code,
- // where aliases aren't supported.
- if (!RawTriple.isOSDarwin() && !RawTriple.isNVPTX() && !RawTriple.isAMDGPU())
+ // a linker bug (see https://openradar.appspot.com/7198997), and CUDA device
+ // code, where aliases aren't supported.
+ if (!RawTriple.isOSDarwin() && !RawTriple.isNVPTX())
CmdArgs.push_back("-mconstructor-aliases");
// Darwin's kernel doesn't support guard variables; just die if we
@@ -5078,14 +5667,28 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-mms-bitfields");
}
+ if (Triple.isWindowsGNUEnvironment()) {
+ Args.addOptOutFlag(CmdArgs, options::OPT_fauto_import,
+ options::OPT_fno_auto_import);
+ }
+
+ if (Args.hasFlag(options::OPT_fms_volatile, options::OPT_fno_ms_volatile,
+ Triple.isX86() && D.IsCLMode()))
+ CmdArgs.push_back("-fms-volatile");
+
// Non-PIC code defaults to -fdirect-access-external-data while PIC code
// defaults to -fno-direct-access-external-data. Pass the option if different
// from the default.
if (Arg *A = Args.getLastArg(options::OPT_fdirect_access_external_data,
- options::OPT_fno_direct_access_external_data))
+ options::OPT_fno_direct_access_external_data)) {
if (A->getOption().matches(options::OPT_fdirect_access_external_data) !=
(PICLevel == 0))
A->render(Args, CmdArgs);
+ } else if (PICLevel == 0 && Triple.isLoongArch()) {
+ // Some targets default to -fno-direct-access-external-data even for
+ // -fno-pic.
+ CmdArgs.push_back("-fno-direct-access-external-data");
+ }
if (Args.hasFlag(options::OPT_fno_plt, options::OPT_fplt, false)) {
CmdArgs.push_back("-fno-plt");
@@ -5100,19 +5703,30 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Freestanding)
CmdArgs.push_back("-ffreestanding");
+ Args.AddLastArg(CmdArgs, options::OPT_fno_knr_functions);
+
// This is a coarse approximation of what llvm-gcc actually does, both
// -fasynchronous-unwind-tables and -fnon-call-exceptions interact in more
// complicated ways.
+ auto SanitizeArgs = TC.getSanitizerArgs(Args);
+
+ bool IsAsyncUnwindTablesDefault =
+ TC.getDefaultUnwindTableLevel(Args) == ToolChain::UnwindTableLevel::Asynchronous;
+ bool IsSyncUnwindTablesDefault =
+ TC.getDefaultUnwindTableLevel(Args) == ToolChain::UnwindTableLevel::Synchronous;
+
+ bool AsyncUnwindTables = Args.hasFlag(
+ options::OPT_fasynchronous_unwind_tables,
+ options::OPT_fno_asynchronous_unwind_tables,
+ (IsAsyncUnwindTablesDefault || SanitizeArgs.needsUnwindTables()) &&
+ !Freestanding);
bool UnwindTables =
- Args.hasFlag(options::OPT_fasynchronous_unwind_tables,
- options::OPT_fno_asynchronous_unwind_tables,
- (TC.IsUnwindTablesDefault(Args) ||
- TC.getSanitizerArgs().needsUnwindTables()) &&
- !Freestanding);
- UnwindTables = Args.hasFlag(options::OPT_funwind_tables,
- options::OPT_fno_unwind_tables, UnwindTables);
- if (UnwindTables)
- CmdArgs.push_back("-munwind-tables");
+ Args.hasFlag(options::OPT_funwind_tables, options::OPT_fno_unwind_tables,
+ IsSyncUnwindTablesDefault && !Freestanding);
+ if (AsyncUnwindTables)
+ CmdArgs.push_back("-funwind-tables=2");
+ else if (UnwindTables)
+ CmdArgs.push_back("-funwind-tables=1");
// Prepare `-aux-target-cpu` and `-aux-target-feature` unless
// `--gpu-use-aux-triple-only` is specified.
@@ -5121,7 +5735,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
const ArgList &HostArgs =
C.getArgsForToolChain(nullptr, StringRef(), Action::OFK_None);
std::string HostCPU =
- getCPUName(HostArgs, *TC.getAuxTriple(), /*FromAs*/ false);
+ getCPUName(D, HostArgs, *TC.getAuxTriple(), /*FromAs*/ false);
if (!HostCPU.empty()) {
CmdArgs.push_back("-aux-target-cpu");
CmdArgs.push_back(Args.MakeArgString(HostCPU));
@@ -5132,20 +5746,78 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
TC.addClangTargetOptions(Args, CmdArgs, JA.getOffloadingDeviceKind());
- // FIXME: Handle -mtune=.
- (void)Args.hasArg(options::OPT_mtune_EQ);
-
if (Arg *A = Args.getLastArg(options::OPT_mcmodel_EQ)) {
StringRef CM = A->getValue();
- if (CM == "small" || CM == "kernel" || CM == "medium" || CM == "large" ||
- CM == "tiny") {
- if (Triple.isOSAIX() && CM == "medium")
- CmdArgs.push_back("-mcmodel=large");
- else
- A->render(Args, CmdArgs);
+ bool Ok = false;
+ if (Triple.isOSAIX() && CM == "medium")
+ CM = "large";
+ if (Triple.isAArch64(64)) {
+ Ok = CM == "tiny" || CM == "small" || CM == "large";
+ if (CM == "large" && RelocationModel != llvm::Reloc::Static)
+ D.Diag(diag::err_drv_argument_only_allowed_with)
+ << A->getAsString(Args) << "-fno-pic";
+ } else if (Triple.isLoongArch()) {
+ if (CM == "extreme" &&
+ Args.hasFlagNoClaim(options::OPT_fplt, options::OPT_fno_plt, false))
+ D.Diag(diag::err_drv_argument_not_allowed_with)
+ << A->getAsString(Args) << "-fplt";
+ Ok = CM == "normal" || CM == "medium" || CM == "extreme";
+ // Convert to LLVM recognizable names.
+ if (Ok)
+ CM = llvm::StringSwitch<StringRef>(CM)
+ .Case("normal", "small")
+ .Case("extreme", "large")
+ .Default(CM);
+ } else if (Triple.isPPC64() || Triple.isOSAIX()) {
+ Ok = CM == "small" || CM == "medium" || CM == "large";
+ } else if (Triple.isRISCV()) {
+ if (CM == "medlow")
+ CM = "small";
+ else if (CM == "medany")
+ CM = "medium";
+ Ok = CM == "small" || CM == "medium";
+ } else if (Triple.getArch() == llvm::Triple::x86_64) {
+ Ok = llvm::is_contained({"small", "kernel", "medium", "large", "tiny"},
+ CM);
+ } else if (Triple.isNVPTX() || Triple.isAMDGPU()) {
+ // NVPTX/AMDGPU does not care about the code model and will accept
+ // whatever works for the host.
+ Ok = true;
+ } else if (Triple.isSPARC64()) {
+ if (CM == "medlow")
+ CM = "small";
+ else if (CM == "medmid")
+ CM = "medium";
+ else if (CM == "medany")
+ CM = "large";
+ Ok = CM == "small" || CM == "medium" || CM == "large";
+ }
+ if (Ok) {
+ CmdArgs.push_back(Args.MakeArgString("-mcmodel=" + CM));
} else {
- D.Diag(diag::err_drv_invalid_argument_to_option)
- << CM << A->getOption().getName();
+ D.Diag(diag::err_drv_unsupported_option_argument_for_target)
+ << A->getSpelling() << CM << TripleStr;
+ }
+ }
+
+ if (Triple.getArch() == llvm::Triple::x86_64) {
+ bool IsMediumCM = false;
+ bool IsLargeCM = false;
+ if (Arg *A = Args.getLastArg(options::OPT_mcmodel_EQ)) {
+ IsMediumCM = StringRef(A->getValue()) == "medium";
+ IsLargeCM = StringRef(A->getValue()) == "large";
+ }
+ if (Arg *A = Args.getLastArg(options::OPT_mlarge_data_threshold_EQ)) {
+ if (!IsMediumCM && !IsLargeCM) {
+ D.Diag(diag::warn_drv_large_data_threshold_invalid_code_model)
+ << A->getOption().getRenderName();
+ } else {
+ A->render(Args, CmdArgs);
+ }
+ } else if (IsMediumCM) {
+ CmdArgs.push_back("-mlarge-data-threshold=65536");
+ } else if (IsLargeCM) {
+ CmdArgs.push_back("-mlarge-data-threshold=0");
}
}
@@ -5162,8 +5834,11 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_mtls_size_EQ);
}
+ if (isTLSDESCEnabled(TC, Args))
+ CmdArgs.push_back("-enable-tlsdesc");
+
// Add the target cpu
- std::string CPU = getCPUName(Args, Triple, /*FromAs*/ false);
+ std::string CPU = getCPUName(D, Args, Triple, /*FromAs*/ false);
if (!CPU.empty()) {
CmdArgs.push_back("-target-cpu");
CmdArgs.push_back(Args.MakeArgString(CPU));
@@ -5171,40 +5846,23 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
RenderTargetOptions(Triple, Args, KernelOrKext, CmdArgs);
- // FIXME: For now we want to demote any errors to warnings, when they have
- // been raised for asking the wrong question of scalable vectors, such as
- // asking for the fixed number of elements. This may happen because code that
- // is not yet ported to work for scalable vectors uses the wrong interfaces,
- // whereas the behaviour is actually correct. Emitting a warning helps bring
- // up scalable vector support in an incremental way. When scalable vector
- // support is stable enough, all uses of wrong interfaces should be considered
- // as errors, but until then, we can live with a warning being emitted by the
- // compiler. This way, Clang can be used to compile code with scalable vectors
- // and identify possible issues.
- if (isa<BackendJobAction>(JA)) {
- CmdArgs.push_back("-mllvm");
- CmdArgs.push_back("-treat-scalable-fixed-error-as-warning");
- }
-
- // These two are potentially updated by AddClangCLArgs.
- codegenoptions::DebugInfoKind DebugInfoKind = codegenoptions::NoDebugInfo;
- bool EmitCodeView = false;
-
// Add clang-cl arguments.
types::ID InputType = Input.getType();
if (D.IsCLMode())
- AddClangCLArgs(Args, InputType, CmdArgs, &DebugInfoKind, &EmitCodeView);
+ AddClangCLArgs(Args, InputType, CmdArgs);
+ llvm::codegenoptions::DebugInfoKind DebugInfoKind =
+ llvm::codegenoptions::NoDebugInfo;
DwarfFissionKind DwarfFission = DwarfFissionKind::None;
- renderDebugOptions(TC, D, RawTriple, Args, EmitCodeView,
- types::isLLVMIR(InputType), CmdArgs, DebugInfoKind,
- DwarfFission);
+ renderDebugOptions(TC, D, RawTriple, Args, types::isLLVMIR(InputType),
+ CmdArgs, Output, DebugInfoKind, DwarfFission);
// Add the split debug info name to the command lines here so we
// can propagate it to the backend.
bool SplitDWARF = (DwarfFission != DwarfFissionKind::None) &&
(TC.getTriple().isOSBinFormatELF() ||
- TC.getTriple().isOSBinFormatWasm()) &&
+ TC.getTriple().isOSBinFormatWasm() ||
+ TC.getTriple().isOSBinFormatCOFF()) &&
(isa<AssembleJobAction>(JA) || isa<CompileJobAction>(JA) ||
isa<BackendJobAction>(JA));
if (SplitDWARF) {
@@ -5252,12 +5910,19 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
Args.AddAllArgs(CmdArgs, options::OPT_fshow_skipped_includes);
- if (D.CCPrintHeaders && !D.CCGenDiagnostics) {
+ if (D.CCPrintHeadersFormat && !D.CCGenDiagnostics) {
CmdArgs.push_back("-header-include-file");
CmdArgs.push_back(!D.CCPrintHeadersFilename.empty()
? D.CCPrintHeadersFilename.c_str()
: "-");
CmdArgs.push_back("-sys-header-deps");
+ CmdArgs.push_back(Args.MakeArgString(
+ "-header-include-format=" +
+ std::string(headerIncludeFormatKindToString(D.CCPrintHeadersFormat))));
+ CmdArgs.push_back(
+ Args.MakeArgString("-header-include-filtering=" +
+ std::string(headerIncludeFilteringKindToString(
+ D.CCPrintHeadersFiltering))));
}
Args.AddLastArg(CmdArgs, options::OPT_P);
Args.AddLastArg(CmdArgs, options::OPT_print_ivar_layout);
@@ -5293,7 +5958,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
StringRef Val = A->getValue();
if (Triple.isX86() && Triple.isOSBinFormatELF()) {
if (Val != "all" && Val != "labels" && Val != "none" &&
- !Val.startswith("list="))
+ !Val.starts_with("list="))
D.Diag(diag::err_drv_invalid_value)
<< A->getAsString(Args) << A->getValue();
else
@@ -5315,27 +5980,24 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-fdata-sections");
}
- if (!Args.hasFlag(options::OPT_funique_section_names,
- options::OPT_fno_unique_section_names, true))
- CmdArgs.push_back("-fno-unique-section-names");
-
- if (Args.hasFlag(options::OPT_funique_internal_linkage_names,
- options::OPT_fno_unique_internal_linkage_names, false))
- CmdArgs.push_back("-funique-internal-linkage-names");
-
- if (Args.hasFlag(options::OPT_funique_basic_block_section_names,
- options::OPT_fno_unique_basic_block_section_names, false))
- CmdArgs.push_back("-funique-basic-block-section-names");
+ Args.addOptOutFlag(CmdArgs, options::OPT_funique_section_names,
+ options::OPT_fno_unique_section_names);
+ Args.addOptInFlag(CmdArgs, options::OPT_funique_internal_linkage_names,
+ options::OPT_fno_unique_internal_linkage_names);
+ Args.addOptInFlag(CmdArgs, options::OPT_funique_basic_block_section_names,
+ options::OPT_fno_unique_basic_block_section_names);
+ Args.addOptInFlag(CmdArgs, options::OPT_fconvergent_functions,
+ options::OPT_fno_convergent_functions);
if (Arg *A = Args.getLastArg(options::OPT_fsplit_machine_functions,
options::OPT_fno_split_machine_functions)) {
- // This codegen pass is only available on x86-elf targets.
- if (Triple.isX86() && Triple.isOSBinFormatELF()) {
- if (A->getOption().matches(options::OPT_fsplit_machine_functions))
+ if (!A->getOption().matches(options::OPT_fno_split_machine_functions)) {
+ // This codegen pass is only available on x86 and AArch64 ELF targets.
+ if ((Triple.isX86() || Triple.isAArch64()) && Triple.isOSBinFormatELF())
A->render(Args, CmdArgs);
- } else {
- D.Diag(diag::err_drv_unsupported_opt_for_target)
- << A->getAsString(Args) << TripleStr;
+ else
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << A->getAsString(Args) << TripleStr;
}
}
@@ -5347,15 +6009,21 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// for sampling, overhead of call arc collection is way too high and there's
// no way to collect the output.
if (!Triple.isNVPTX() && !Triple.isAMDGCN())
- addPGOAndCoverageFlags(TC, C, D, Output, Args, CmdArgs);
+ addPGOAndCoverageFlags(TC, C, JA, Output, Args, SanitizeArgs, CmdArgs);
Args.AddLastArg(CmdArgs, options::OPT_fclang_abi_compat_EQ);
- // Add runtime flag for PS4 when PGO, coverage, or sanitizers are enabled.
- if (RawTriple.isPS4CPU() &&
+ if (getLastProfileSampleUseArg(Args) &&
+ Args.hasArg(options::OPT_fsample_profile_use_profi)) {
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back("-sample-profile-use-profi");
+ }
+
+ // Add runtime flag for PS4/PS5 when PGO, coverage, or sanitizers are enabled.
+ if (RawTriple.isPS() &&
!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
- PS4cpu::addProfileRTArgs(TC, Args, CmdArgs);
- PS4cpu::addSanitizerArgs(TC, CmdArgs);
+ PScpu::addProfileRTArgs(TC, Args, CmdArgs);
+ PScpu::addSanitizerArgs(TC, Args, CmdArgs);
}
// Pass options for controlling the default header search paths.
@@ -5417,16 +6085,35 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_R_Group);
- Args.AddAllArgs(CmdArgs, options::OPT_W_Group);
+ for (const Arg *A :
+ Args.filtered(options::OPT_W_Group, options::OPT__SLASH_wd)) {
+ A->claim();
+ if (A->getOption().getID() == options::OPT__SLASH_wd) {
+ unsigned WarningNumber;
+ if (StringRef(A->getValue()).getAsInteger(10, WarningNumber)) {
+ D.Diag(diag::err_drv_invalid_int_value)
+ << A->getAsString(Args) << A->getValue();
+ continue;
+ }
+
+ if (auto Group = diagGroupFromCLWarningID(WarningNumber)) {
+ CmdArgs.push_back(Args.MakeArgString(
+ "-Wno-" + DiagnosticIDs::getWarningOptionForGroup(*Group)));
+ }
+ continue;
+ }
+ A->render(Args, CmdArgs);
+ }
+
+ Args.AddAllArgs(CmdArgs, options::OPT_Wsystem_headers_in_module_EQ);
+
if (Args.hasFlag(options::OPT_pedantic, options::OPT_no_pedantic, false))
CmdArgs.push_back("-pedantic");
Args.AddLastArg(CmdArgs, options::OPT_pedantic_errors);
Args.AddLastArg(CmdArgs, options::OPT_w);
- // Fixed point flags
- if (Args.hasFlag(options::OPT_ffixed_point, options::OPT_fno_fixed_point,
- /*Default=*/false))
- Args.AddLastArg(CmdArgs, options::OPT_ffixed_point);
+ Args.addOptInFlag(CmdArgs, options::OPT_ffixed_point,
+ options::OPT_fno_fixed_point);
if (Arg *A = Args.getLastArg(options::OPT_fcxx_abi_EQ))
A->render(Args, CmdArgs);
@@ -5434,6 +6121,12 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_fexperimental_relative_cxx_abi_vtables,
options::OPT_fno_experimental_relative_cxx_abi_vtables);
+ Args.AddLastArg(CmdArgs, options::OPT_fexperimental_omit_vtable_rtti,
+ options::OPT_fno_experimental_omit_vtable_rtti);
+
+ if (Arg *A = Args.getLastArg(options::OPT_ffuchsia_api_level_EQ))
+ A->render(Args, CmdArgs);
+
// Handle -{std, ansi, trigraphs} -- take the last of -{std, ansi}
// (-ansi is equivalent to -std=c89 or -std=c++98).
//
@@ -5476,11 +6169,6 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_ftrigraphs,
options::OPT_fno_trigraphs);
-
- // HIP headers has minimum C++ standard requirements. Therefore set the
- // default language standard.
- if (IsHIP)
- CmdArgs.push_back(IsWindowsMSVC ? "-std=c++14" : "-std=c++11");
}
// GCC's behavior for -Wwrite-strings is a bit strange:
@@ -5523,31 +6211,12 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (!ShouldEnableAutolink(Args, TC, JA))
CmdArgs.push_back("-fno-autolink");
- // Add in -fdebug-compilation-dir if necessary.
- addDebugCompDirArg(Args, CmdArgs, D.getVFS());
+ Args.AddLastArg(CmdArgs, options::OPT_ftemplate_depth_EQ);
+ Args.AddLastArg(CmdArgs, options::OPT_foperator_arrow_depth_EQ);
+ Args.AddLastArg(CmdArgs, options::OPT_fconstexpr_depth_EQ);
+ Args.AddLastArg(CmdArgs, options::OPT_fconstexpr_steps_EQ);
- addDebugPrefixMapArg(D, Args, CmdArgs);
-
- if (Arg *A = Args.getLastArg(options::OPT_ftemplate_depth_,
- options::OPT_ftemplate_depth_EQ)) {
- CmdArgs.push_back("-ftemplate-depth");
- CmdArgs.push_back(A->getValue());
- }
-
- if (Arg *A = Args.getLastArg(options::OPT_foperator_arrow_depth_EQ)) {
- CmdArgs.push_back("-foperator-arrow-depth");
- CmdArgs.push_back(A->getValue());
- }
-
- if (Arg *A = Args.getLastArg(options::OPT_fconstexpr_depth_EQ)) {
- CmdArgs.push_back("-fconstexpr-depth");
- CmdArgs.push_back(A->getValue());
- }
-
- if (Arg *A = Args.getLastArg(options::OPT_fconstexpr_steps_EQ)) {
- CmdArgs.push_back("-fconstexpr-steps");
- CmdArgs.push_back(A->getValue());
- }
+ Args.AddLastArg(CmdArgs, options::OPT_fexperimental_library);
if (Args.hasArg(options::OPT_fexperimental_new_constant_interpreter))
CmdArgs.push_back("-fexperimental-new-constant-interpreter");
@@ -5574,7 +6243,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
"standalone", "objc", "swift", "swift-5.0", "swift-4.2", "swift-4.1",
};
- if (find(kCFABIs, StringRef(A->getValue())) == std::end(kCFABIs))
+ if (!llvm::is_contained(kCFABIs, StringRef(A->getValue())))
D.Diag(diag::err_drv_invalid_cf_runtime_abi) << A->getValue();
else
A->render(Args, CmdArgs);
@@ -5590,9 +6259,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(A->getValue());
}
- if (Args.hasFlag(options::OPT_fstack_size_section,
- options::OPT_fno_stack_size_section, RawTriple.isPS4()))
- CmdArgs.push_back("-fstack-size-section");
+ Args.addOptInFlag(CmdArgs, options::OPT_fstack_size_section,
+ options::OPT_fno_stack_size_section);
if (Args.hasArg(options::OPT_fstack_usage)) {
CmdArgs.push_back("-stack-usage-file");
@@ -5612,25 +6280,11 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
else
CmdArgs.push_back("19");
- if (Arg *A = Args.getLastArg(options::OPT_fmacro_backtrace_limit_EQ)) {
- CmdArgs.push_back("-fmacro-backtrace-limit");
- CmdArgs.push_back(A->getValue());
- }
-
- if (Arg *A = Args.getLastArg(options::OPT_ftemplate_backtrace_limit_EQ)) {
- CmdArgs.push_back("-ftemplate-backtrace-limit");
- CmdArgs.push_back(A->getValue());
- }
-
- if (Arg *A = Args.getLastArg(options::OPT_fconstexpr_backtrace_limit_EQ)) {
- CmdArgs.push_back("-fconstexpr-backtrace-limit");
- CmdArgs.push_back(A->getValue());
- }
-
- if (Arg *A = Args.getLastArg(options::OPT_fspell_checking_limit_EQ)) {
- CmdArgs.push_back("-fspell-checking-limit");
- CmdArgs.push_back(A->getValue());
- }
+ Args.AddLastArg(CmdArgs, options::OPT_fconstexpr_backtrace_limit_EQ);
+ Args.AddLastArg(CmdArgs, options::OPT_fmacro_backtrace_limit_EQ);
+ Args.AddLastArg(CmdArgs, options::OPT_ftemplate_backtrace_limit_EQ);
+ Args.AddLastArg(CmdArgs, options::OPT_fspell_checking_limit_EQ);
+ Args.AddLastArg(CmdArgs, options::OPT_fcaret_diagnostics_max_lines_EQ);
// Pass -fmessage-length=.
unsigned MessageLength = 0;
@@ -5648,22 +6302,33 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(
Args.MakeArgString("-fmessage-length=" + Twine(MessageLength)));
+ if (Arg *A = Args.getLastArg(options::OPT_frandomize_layout_seed_EQ))
+ CmdArgs.push_back(
+ Args.MakeArgString("-frandomize-layout-seed=" + Twine(A->getValue(0))));
+
+ if (Arg *A = Args.getLastArg(options::OPT_frandomize_layout_seed_file_EQ))
+ CmdArgs.push_back(Args.MakeArgString("-frandomize-layout-seed-file=" +
+ Twine(A->getValue(0))));
+
// -fvisibility= and -fvisibility-ms-compat are of a piece.
if (const Arg *A = Args.getLastArg(options::OPT_fvisibility_EQ,
options::OPT_fvisibility_ms_compat)) {
if (A->getOption().matches(options::OPT_fvisibility_EQ)) {
- CmdArgs.push_back("-fvisibility");
- CmdArgs.push_back(A->getValue());
+ A->render(Args, CmdArgs);
} else {
assert(A->getOption().matches(options::OPT_fvisibility_ms_compat));
- CmdArgs.push_back("-fvisibility");
- CmdArgs.push_back("hidden");
- CmdArgs.push_back("-ftype-visibility");
- CmdArgs.push_back("default");
+ CmdArgs.push_back("-fvisibility=hidden");
+ CmdArgs.push_back("-ftype-visibility=default");
}
+ } else if (IsOpenMPDevice) {
+ // When compiling for the OpenMP device we want protected visibility by
+ // default. This prevents the device from accidentally preempting code on
+ // the host, makes the system more robust, and improves performance.
+ CmdArgs.push_back("-fvisibility=protected");
}
- if (!RawTriple.isPS4())
+ // PS4/PS5 process these options in addClangTargetOptions.
+ if (!RawTriple.isPS()) {
if (const Arg *A =
Args.getLastArg(options::OPT_fvisibility_from_dllstorageclass,
options::OPT_fno_visibility_from_dllstorageclass)) {
@@ -5677,26 +6342,42 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_fvisibility_externs_nodllstorageclass_EQ);
}
}
-
- if (const Arg *A = Args.getLastArg(options::OPT_mignore_xcoff_visibility)) {
- if (Triple.isOSAIX())
- CmdArgs.push_back("-mignore-xcoff-visibility");
- else
- D.Diag(diag::err_drv_unsupported_opt_for_target)
- << A->getAsString(Args) << TripleStr;
}
-
if (Args.hasFlag(options::OPT_fvisibility_inlines_hidden,
options::OPT_fno_visibility_inlines_hidden, false))
CmdArgs.push_back("-fvisibility-inlines-hidden");
Args.AddLastArg(CmdArgs, options::OPT_fvisibility_inlines_hidden_static_local_var,
options::OPT_fno_visibility_inlines_hidden_static_local_var);
- Args.AddLastArg(CmdArgs, options::OPT_fvisibility_global_new_delete_hidden);
+
+ // -fvisibility-global-new-delete-hidden is a deprecated spelling of
+ // -fvisibility-global-new-delete=force-hidden.
+ if (const Arg *A =
+ Args.getLastArg(options::OPT_fvisibility_global_new_delete_hidden)) {
+ D.Diag(diag::warn_drv_deprecated_arg)
+ << A->getAsString(Args)
+ << "-fvisibility-global-new-delete=force-hidden";
+ }
+
+ if (const Arg *A =
+ Args.getLastArg(options::OPT_fvisibility_global_new_delete_EQ,
+ options::OPT_fvisibility_global_new_delete_hidden)) {
+ if (A->getOption().matches(options::OPT_fvisibility_global_new_delete_EQ)) {
+ A->render(Args, CmdArgs);
+ } else {
+ assert(A->getOption().matches(
+ options::OPT_fvisibility_global_new_delete_hidden));
+ CmdArgs.push_back("-fvisibility-global-new-delete=force-hidden");
+ }
+ }
Args.AddLastArg(CmdArgs, options::OPT_ftlsmodel_EQ);
+ if (Args.hasFlag(options::OPT_fnew_infallible,
+ options::OPT_fno_new_infallible, false))
+ CmdArgs.push_back("-fnew-infallible");
+
if (Args.hasFlag(options::OPT_fno_operator_names,
options::OPT_foperator_names, false))
CmdArgs.push_back("-fno-operator-names");
@@ -5705,8 +6386,23 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_femit_all_decls);
Args.AddLastArg(CmdArgs, options::OPT_fheinous_gnu_extensions);
Args.AddLastArg(CmdArgs, options::OPT_fdigraphs, options::OPT_fno_digraphs);
- Args.AddLastArg(CmdArgs, options::OPT_femulated_tls,
- options::OPT_fno_emulated_tls);
+ Args.AddLastArg(CmdArgs, options::OPT_fzero_call_used_regs_EQ);
+
+ if (Args.hasFlag(options::OPT_femulated_tls, options::OPT_fno_emulated_tls,
+ Triple.hasDefaultEmulatedTLS()))
+ CmdArgs.push_back("-femulated-tls");
+
+ Args.addOptInFlag(CmdArgs, options::OPT_fcheck_new,
+ options::OPT_fno_check_new);
+
+ if (Arg *A = Args.getLastArg(options::OPT_fzero_call_used_regs_EQ)) {
+ // FIXME: There's no reason for this to be restricted to X86. The backend
+ // code needs to be changed to include the appropriate function calls
+ // automatically.
+ if (!Triple.isX86() && !Triple.isAArch64())
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << A->getAsString(Args) << TripleStr;
+ }
// AltiVec-like language extensions aren't relevant for assembling.
if (!isa<PreprocessJobAction>(JA) || Output.getType() != types::TY_PP_Asm)
@@ -5737,6 +6433,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_fno_openmp_simd);
Args.AddAllArgs(CmdArgs, options::OPT_fopenmp_enable_irbuilder);
Args.AddAllArgs(CmdArgs, options::OPT_fopenmp_version_EQ);
+ if (!Args.hasFlag(options::OPT_fopenmp_extensions,
+ options::OPT_fno_openmp_extensions, /*Default=*/true))
+ CmdArgs.push_back("-fno-openmp-extensions");
Args.AddAllArgs(CmdArgs, options::OPT_fopenmp_cuda_number_of_sm_EQ);
Args.AddAllArgs(CmdArgs, options::OPT_fopenmp_cuda_blocks_per_sm_EQ);
Args.AddAllArgs(CmdArgs,
@@ -5752,12 +6451,30 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_fno_openmp_cuda_mode, /*Default=*/false))
CmdArgs.push_back("-fopenmp-cuda-mode");
- // When in OpenMP offloading mode with NVPTX target, check if full runtime
- // is required.
- if (Args.hasFlag(options::OPT_fopenmp_cuda_force_full_runtime,
- options::OPT_fno_openmp_cuda_force_full_runtime,
+ // When in OpenMP offloading mode, enable debugging on the device.
+ Args.AddAllArgs(CmdArgs, options::OPT_fopenmp_target_debug_EQ);
+ if (Args.hasFlag(options::OPT_fopenmp_target_debug,
+ options::OPT_fno_openmp_target_debug, /*Default=*/false))
+ CmdArgs.push_back("-fopenmp-target-debug");
+
+ // When in OpenMP offloading mode, forward assumptions information about
+ // thread and team counts in the device.
+ if (Args.hasFlag(options::OPT_fopenmp_assume_teams_oversubscription,
+ options::OPT_fno_openmp_assume_teams_oversubscription,
+ /*Default=*/false))
+ CmdArgs.push_back("-fopenmp-assume-teams-oversubscription");
+ if (Args.hasFlag(options::OPT_fopenmp_assume_threads_oversubscription,
+ options::OPT_fno_openmp_assume_threads_oversubscription,
/*Default=*/false))
- CmdArgs.push_back("-fopenmp-cuda-force-full-runtime");
+ CmdArgs.push_back("-fopenmp-assume-threads-oversubscription");
+ if (Args.hasArg(options::OPT_fopenmp_assume_no_thread_state))
+ CmdArgs.push_back("-fopenmp-assume-no-thread-state");
+ if (Args.hasArg(options::OPT_fopenmp_assume_no_nested_parallelism))
+ CmdArgs.push_back("-fopenmp-assume-no-nested-parallelism");
+ if (Args.hasArg(options::OPT_fopenmp_offload_mandatory))
+ CmdArgs.push_back("-fopenmp-offload-mandatory");
+ if (Args.hasArg(options::OPT_fopenmp_force_usm))
+ CmdArgs.push_back("-fopenmp-force-usm");
break;
default:
// By default, if Clang doesn't know how to generate useful OpenMP code
@@ -5772,10 +6489,16 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_fopenmp_simd,
options::OPT_fno_openmp_simd);
Args.AddAllArgs(CmdArgs, options::OPT_fopenmp_version_EQ);
+ Args.addOptOutFlag(CmdArgs, options::OPT_fopenmp_extensions,
+ options::OPT_fno_openmp_extensions);
}
- const SanitizerArgs &Sanitize = TC.getSanitizerArgs();
- Sanitize.addArgs(TC, Args, CmdArgs, InputType);
+ // Forward the new driver to change offloading code generation.
+ if (Args.hasFlag(options::OPT_offload_new_driver,
+ options::OPT_no_offload_new_driver, false))
+ CmdArgs.push_back("--offload-new-driver");
+
+ SanitizeArgs.addArgs(TC, Args, CmdArgs, InputType);
const XRayArgs &XRay = TC.getXRayArgs();
XRay.addArgs(TC, Args, CmdArgs, InputType);
@@ -5791,7 +6514,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Arg *A = Args.getLastArg(options::OPT_fpatchable_function_entry_EQ)) {
StringRef S0 = A->getValue(), S = S0;
unsigned Size, Offset = 0;
- if (!Triple.isAArch64() && !Triple.isRISCV() && !Triple.isX86())
+ if (!Triple.isAArch64() && !Triple.isLoongArch() && !Triple.isRISCV() &&
+ !Triple.isX86())
D.Diag(diag::err_drv_unsupported_opt_for_target)
<< A->getAsString(Args) << TripleStr;
else if (S.consumeInteger(10, Size) ||
@@ -5808,6 +6532,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
}
+ Args.AddLastArg(CmdArgs, options::OPT_fms_hotpatch);
+
if (TC.SupportsProfiling()) {
Args.AddLastArg(CmdArgs, options::OPT_pg);
@@ -5835,6 +6561,35 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
}
+ if (Arg *A = Args.getLastArgNoClaim(options::OPT_pg)) {
+ if (TC.getTriple().isOSzOS()) {
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << A->getAsString(Args) << TripleStr;
+ }
+ }
+ if (Arg *A = Args.getLastArgNoClaim(options::OPT_p)) {
+ if (!(TC.getTriple().isOSAIX() || TC.getTriple().isOSOpenBSD())) {
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << A->getAsString(Args) << TripleStr;
+ }
+ }
+ if (Arg *A = Args.getLastArgNoClaim(options::OPT_p, options::OPT_pg)) {
+ if (A->getOption().matches(options::OPT_p)) {
+ A->claim();
+ if (TC.getTriple().isOSAIX() && !Args.hasArgNoClaim(options::OPT_pg))
+ CmdArgs.push_back("-pg");
+ }
+ }
+
+ // Reject AIX-specific link options on other targets.
+ if (!TC.getTriple().isOSAIX()) {
+ for (const Arg *A : Args.filtered(options::OPT_b, options::OPT_K,
+ options::OPT_mxcoff_build_id_EQ)) {
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << A->getSpelling() << TripleStr;
+ }
+ }
+
if (Args.getLastArg(options::OPT_fapple_kext) ||
(Args.hasArg(options::OPT_mkernel) && types::isCXX(InputType)))
CmdArgs.push_back("-fapple-kext");
@@ -5846,12 +6601,15 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_fdiagnostics_parseable_fixits);
Args.AddLastArg(CmdArgs, options::OPT_ftime_report);
Args.AddLastArg(CmdArgs, options::OPT_ftime_report_EQ);
- Args.AddLastArg(CmdArgs, options::OPT_ftime_trace);
- Args.AddLastArg(CmdArgs, options::OPT_ftime_trace_granularity_EQ);
Args.AddLastArg(CmdArgs, options::OPT_ftrapv);
Args.AddLastArg(CmdArgs, options::OPT_malign_double);
Args.AddLastArg(CmdArgs, options::OPT_fno_temp_file);
+ if (const char *Name = C.getTimeTraceFile(&JA)) {
+ CmdArgs.push_back(Args.MakeArgString("-ftime-trace=" + Twine(Name)));
+ Args.AddLastArg(CmdArgs, options::OPT_ftime_trace_granularity_EQ);
+ }
+
if (Arg *A = Args.getLastArg(options::OPT_ftrapv_handler_EQ)) {
CmdArgs.push_back("-ftrapv-handler");
CmdArgs.push_back(A->getValue());
@@ -5882,20 +6640,21 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_funroll_loops,
options::OPT_fno_unroll_loops);
+ Args.AddLastArg(CmdArgs, options::OPT_fstrict_flex_arrays_EQ);
+
Args.AddLastArg(CmdArgs, options::OPT_pthread);
- if (Args.hasFlag(options::OPT_mspeculative_load_hardening,
- options::OPT_mno_speculative_load_hardening, false))
- CmdArgs.push_back(Args.MakeArgString("-mspeculative-load-hardening"));
+ Args.addOptInFlag(CmdArgs, options::OPT_mspeculative_load_hardening,
+ options::OPT_mno_speculative_load_hardening);
RenderSSPOptions(D, TC, Args, CmdArgs, KernelOrKext);
RenderSCPOptions(TC, Args, CmdArgs);
RenderTrivialAutoVarInitOptions(D, TC, Args, CmdArgs);
- // Translate -mstackrealign
- if (Args.hasFlag(options::OPT_mstackrealign, options::OPT_mno_stackrealign,
- false))
- CmdArgs.push_back(Args.MakeArgString("-mstackrealign"));
+ Args.AddLastArg(CmdArgs, options::OPT_fswift_async_fp_EQ);
+
+ Args.addOptInFlag(CmdArgs, options::OPT_mstackrealign,
+ options::OPT_mno_stackrealign);
if (Args.hasArg(options::OPT_mstack_alignment)) {
StringRef alignment = Args.getLastArgValue(options::OPT_mstack_alignment);
@@ -5911,9 +6670,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-mstack-probe-size=0");
}
- if (!Args.hasFlag(options::OPT_mstack_arg_probe,
- options::OPT_mno_stack_arg_probe, true))
- CmdArgs.push_back(Args.MakeArgString("-mno-stack-arg-probe"));
+ Args.addOptOutFlag(CmdArgs, options::OPT_mstack_arg_probe,
+ options::OPT_mno_stack_arg_probe);
if (Arg *A = Args.getLastArg(options::OPT_mrestrict_it,
options::OPT_mno_restrict_it)) {
@@ -5922,34 +6680,36 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-arm-restrict-it");
} else {
CmdArgs.push_back("-mllvm");
- CmdArgs.push_back("-arm-no-restrict-it");
+ CmdArgs.push_back("-arm-default-it");
}
- } else if (Triple.isOSWindows() &&
- (Triple.getArch() == llvm::Triple::arm ||
- Triple.getArch() == llvm::Triple::thumb)) {
- // Windows on ARM expects restricted IT blocks
- CmdArgs.push_back("-mllvm");
- CmdArgs.push_back("-arm-restrict-it");
}
// Forward -cl options to -cc1
RenderOpenCLOptions(Args, CmdArgs, InputType);
+ // Forward hlsl options to -cc1
+ RenderHLSLOptions(Args, CmdArgs, InputType);
+
+ // Forward OpenACC options to -cc1
+ RenderOpenACCOptions(D, Args, CmdArgs, InputType);
+
if (IsHIP) {
if (Args.hasFlag(options::OPT_fhip_new_launch_api,
options::OPT_fno_hip_new_launch_api, true))
CmdArgs.push_back("-fhip-new-launch-api");
- if (Args.hasFlag(options::OPT_fgpu_allow_device_init,
- options::OPT_fno_gpu_allow_device_init, false))
- CmdArgs.push_back("-fgpu-allow-device-init");
+ Args.addOptInFlag(CmdArgs, options::OPT_fgpu_allow_device_init,
+ options::OPT_fno_gpu_allow_device_init);
+ Args.AddLastArg(CmdArgs, options::OPT_hipstdpar);
+ Args.AddLastArg(CmdArgs, options::OPT_hipstdpar_interpose_alloc);
+ Args.addOptInFlag(CmdArgs, options::OPT_fhip_kernel_arg_name,
+ options::OPT_fno_hip_kernel_arg_name);
}
if (IsCuda || IsHIP) {
- if (Args.hasFlag(options::OPT_fgpu_rdc, options::OPT_fno_gpu_rdc, false))
+ if (IsRDCMode)
CmdArgs.push_back("-fgpu-rdc");
- if (Args.hasFlag(options::OPT_fgpu_defer_diag,
- options::OPT_fno_gpu_defer_diag, false))
- CmdArgs.push_back("-fgpu-defer-diag");
+ Args.addOptInFlag(CmdArgs, options::OPT_fgpu_defer_diag,
+ options::OPT_fno_gpu_defer_diag);
if (Args.hasFlag(options::OPT_fgpu_exclude_wrong_side_overloads,
options::OPT_fno_gpu_exclude_wrong_side_overloads,
false)) {
@@ -5958,11 +6718,21 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
}
+ // Forward -nogpulib to -cc1.
+ if (Args.hasArg(options::OPT_nogpulib))
+ CmdArgs.push_back("-nogpulib");
+
if (Arg *A = Args.getLastArg(options::OPT_fcf_protection_EQ)) {
CmdArgs.push_back(
Args.MakeArgString(Twine("-fcf-protection=") + A->getValue()));
}
+ if (Arg *A = Args.getLastArg(options::OPT_mfunction_return_EQ))
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine("-mfunction-return=") + A->getValue()));
+
+ Args.AddLastArg(CmdArgs, options::OPT_mindirect_branch_cs_prefix);
+
// Forward -f options with positive and negative forms; we translate these by
// hand. Do not propagate PGO options to the GPU-side compilations as the
// profile info is for the host-side compilation only.
@@ -5997,9 +6767,15 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
RenderBuiltinOptions(TC, RawTriple, Args, CmdArgs);
- if (!Args.hasFlag(options::OPT_fassume_sane_operator_new,
- options::OPT_fno_assume_sane_operator_new))
- CmdArgs.push_back("-fno-assume-sane-operator-new");
+ Args.addOptOutFlag(CmdArgs, options::OPT_fassume_sane_operator_new,
+ options::OPT_fno_assume_sane_operator_new);
+
+ if (Args.hasFlag(options::OPT_fapinotes, options::OPT_fno_apinotes, false))
+ CmdArgs.push_back("-fapinotes");
+ if (Args.hasFlag(options::OPT_fapinotes_modules,
+ options::OPT_fno_apinotes_modules, false))
+ CmdArgs.push_back("-fapinotes-modules");
+ Args.AddLastArg(CmdArgs, options::OPT_fapinotes_swift_version);
// -fblocks=0 is default.
if (Args.hasFlag(options::OPT_fblocks, options::OPT_fno_blocks,
@@ -6017,24 +6793,18 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (TC.IsEncodeExtendedBlockSignatureDefault())
CmdArgs.push_back("-fencode-extended-block-signature");
- if (Args.hasFlag(options::OPT_fcoroutines_ts, options::OPT_fno_coroutines_ts,
- false) &&
- types::isCXX(InputType)) {
- CmdArgs.push_back("-fcoroutines-ts");
- }
+ if (Args.hasFlag(options::OPT_fcoro_aligned_allocation,
+ options::OPT_fno_coro_aligned_allocation, false) &&
+ types::isCXX(InputType))
+ CmdArgs.push_back("-fcoro-aligned-allocation");
Args.AddLastArg(CmdArgs, options::OPT_fdouble_square_bracket_attributes,
options::OPT_fno_double_square_bracket_attributes);
- // -faccess-control is default.
- if (Args.hasFlag(options::OPT_fno_access_control,
- options::OPT_faccess_control, false))
- CmdArgs.push_back("-fno-access-control");
-
- // -felide-constructors is the default.
- if (Args.hasFlag(options::OPT_fno_elide_constructors,
- options::OPT_felide_constructors, false))
- CmdArgs.push_back("-fno-elide-constructors");
+ Args.addOptOutFlag(CmdArgs, options::OPT_faccess_control,
+ options::OPT_fno_access_control);
+ Args.addOptOutFlag(CmdArgs, options::OPT_felide_constructors,
+ options::OPT_fno_elide_constructors);
ToolChain::RTTIMode RTTIMode = TC.getRTTIMode();
@@ -6053,7 +6823,6 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (!Args.hasFlag(
options::OPT_fuse_cxa_atexit, options::OPT_fno_use_cxa_atexit,
!RawTriple.isOSAIX() && !RawTriple.isOSWindows() &&
- TC.getArch() != llvm::Triple::xcore &&
((RawTriple.getVendor() != llvm::Triple::MipsTechnologies) ||
RawTriple.hasEnvironment())) ||
KernelOrKext)
@@ -6064,10 +6833,28 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
RawTriple.isOSDarwin() && !KernelOrKext))
CmdArgs.push_back("-fregister-global-dtors-with-atexit");
- // -fno-use-line-directives is default.
- if (Args.hasFlag(options::OPT_fuse_line_directives,
- options::OPT_fno_use_line_directives, false))
- CmdArgs.push_back("-fuse-line-directives");
+ Args.addOptInFlag(CmdArgs, options::OPT_fuse_line_directives,
+ options::OPT_fno_use_line_directives);
+
+ // -fno-minimize-whitespace is default.
+ if (Args.hasFlag(options::OPT_fminimize_whitespace,
+ options::OPT_fno_minimize_whitespace, false)) {
+ types::ID InputType = Inputs[0].getType();
+ if (!isDerivedFromC(InputType))
+ D.Diag(diag::err_drv_opt_unsupported_input_type)
+ << "-fminimize-whitespace" << types::getTypeName(InputType);
+ CmdArgs.push_back("-fminimize-whitespace");
+ }
+
+ // -fno-keep-system-includes is default.
+ if (Args.hasFlag(options::OPT_fkeep_system_includes,
+ options::OPT_fno_keep_system_includes, false)) {
+ types::ID InputType = Inputs[0].getType();
+ if (!isDerivedFromC(InputType))
+ D.Diag(diag::err_drv_opt_unsupported_input_type)
+ << "-fkeep-system-includes" << types::getTypeName(InputType);
+ CmdArgs.push_back("-fkeep-system-includes");
+ }
// -fms-extensions=0 is default.
if (Args.hasFlag(options::OPT_fms_extensions, options::OPT_fno_ms_extensions,
@@ -6082,6 +6869,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (IsMSVCCompat)
CmdArgs.push_back("-fms-compatibility");
+ if (Triple.isWindowsMSVCEnvironment() && !D.IsCLMode() &&
+ Args.hasArg(options::OPT_fms_runtime_lib_EQ))
+ ProcessVSRuntimeLibrary(Args, CmdArgs);
+
// Handle -fgcc-version, if present.
VersionTuple GNUCVer;
if (Arg *A = Args.getLastArg(options::OPT_fgnuc_version_EQ)) {
@@ -6090,8 +6881,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
StringRef Val = A->getValue();
Val = Val.empty() ? "0" : Val; // Treat "" as 0 or disable.
bool Invalid = GNUCVer.tryParse(Val);
- unsigned Minor = GNUCVer.getMinor().getValueOr(0);
- unsigned Patch = GNUCVer.getSubminor().getValueOr(0);
+ unsigned Minor = GNUCVer.getMinor().value_or(0);
+ unsigned Patch = GNUCVer.getSubminor().value_or(0);
if (Invalid || GNUCVer.getBuild() || Minor >= 100 || Patch >= 100) {
D.Diag(diag::err_drv_invalid_value)
<< A->getAsString(Args) << A->getValue();
@@ -6133,7 +6924,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
.Case("c++14", "-std=c++14")
.Case("c++17", "-std=c++17")
.Case("c++20", "-std=c++20")
- .Case("c++latest", "-std=c++2b")
+ // TODO add c++23 and c++26 when MSVC supports it.
+ .Case("c++latest", "-std=c++26")
.Default("");
if (LanguageStandard.empty())
D.Diag(clang::diag::warn_drv_unused_argument)
@@ -6150,14 +6942,12 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(LanguageStandard.data());
}
- // -fno-borland-extensions is default.
- if (Args.hasFlag(options::OPT_fborland_extensions,
- options::OPT_fno_borland_extensions, false))
- CmdArgs.push_back("-fborland-extensions");
+ Args.addOptInFlag(CmdArgs, options::OPT_fborland_extensions,
+ options::OPT_fno_borland_extensions);
- // -fno-declspec is default, except for PS4.
+ // -fno-declspec is default, except for PS4/PS5.
if (Args.hasFlag(options::OPT_fdeclspec, options::OPT_fno_declspec,
- RawTriple.isPS4()))
+ RawTriple.isPS()))
CmdArgs.push_back("-fdeclspec");
else if (Args.hasArg(options::OPT_fno_declspec))
CmdArgs.push_back("-fno-declspec"); // Explicitly disabling __declspec.
@@ -6166,39 +6956,62 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// than 19.
if (!Args.hasFlag(options::OPT_fthreadsafe_statics,
options::OPT_fno_threadsafe_statics,
- !IsWindowsMSVC || IsMSVC2015Compatible))
+ !types::isOpenCL(InputType) &&
+ (!IsWindowsMSVC || IsMSVC2015Compatible)))
CmdArgs.push_back("-fno-threadsafe-statics");
- // -fno-delayed-template-parsing is default, except when targeting MSVC.
- // Many old Windows SDK versions require this to parse.
- // FIXME: MSVC introduced /Zc:twoPhase- to disable this behavior in their
- // compiler. We should be able to disable this by default at some point.
- if (Args.hasFlag(options::OPT_fdelayed_template_parsing,
- options::OPT_fno_delayed_template_parsing, IsWindowsMSVC))
- CmdArgs.push_back("-fdelayed-template-parsing");
-
// -fgnu-keywords default varies depending on language; only pass if
// specified.
Args.AddLastArg(CmdArgs, options::OPT_fgnu_keywords,
options::OPT_fno_gnu_keywords);
- if (Args.hasFlag(options::OPT_fgnu89_inline, options::OPT_fno_gnu89_inline,
- false))
- CmdArgs.push_back("-fgnu89-inline");
+ Args.addOptInFlag(CmdArgs, options::OPT_fgnu89_inline,
+ options::OPT_fno_gnu89_inline);
- if (Args.hasArg(options::OPT_fno_inline))
- CmdArgs.push_back("-fno-inline");
+ const Arg *InlineArg = Args.getLastArg(options::OPT_finline_functions,
+ options::OPT_finline_hint_functions,
+ options::OPT_fno_inline_functions);
+ if (Arg *A = Args.getLastArg(options::OPT_finline, options::OPT_fno_inline)) {
+ if (A->getOption().matches(options::OPT_fno_inline))
+ A->render(Args, CmdArgs);
+ } else if (InlineArg) {
+ InlineArg->render(Args, CmdArgs);
+ }
+
+ Args.AddLastArg(CmdArgs, options::OPT_finline_max_stacksize_EQ);
+
+ // FIXME: Find a better way to determine whether we are in C++20.
+ bool HaveCxx20 =
+ Std &&
+ (Std->containsValue("c++2a") || Std->containsValue("gnu++2a") ||
+ Std->containsValue("c++20") || Std->containsValue("gnu++20") ||
+ Std->containsValue("c++2b") || Std->containsValue("gnu++2b") ||
+ Std->containsValue("c++23") || Std->containsValue("gnu++23") ||
+ Std->containsValue("c++2c") || Std->containsValue("gnu++2c") ||
+ Std->containsValue("c++26") || Std->containsValue("gnu++26") ||
+ Std->containsValue("c++latest") || Std->containsValue("gnu++latest"));
+ bool HaveModules =
+ RenderModulesOptions(C, D, Args, Input, Output, HaveCxx20, CmdArgs);
- Args.AddLastArg(CmdArgs, options::OPT_finline_functions,
- options::OPT_finline_hint_functions,
- options::OPT_fno_inline_functions);
+ // -fdelayed-template-parsing is default when targeting MSVC.
+ // Many old Windows SDK versions require this to parse.
+ //
+ // According to
+ // https://learn.microsoft.com/en-us/cpp/build/reference/permissive-standards-conformance?view=msvc-170,
+ // MSVC actually defaults to -fno-delayed-template-parsing (/Zc:twoPhase-
+ // with MSVC CLI) if using C++20. So we match the behavior with MSVC here to
+ // not enable -fdelayed-template-parsing by default after C++20.
+ //
+ // FIXME: Given -fdelayed-template-parsing is a source of bugs, we should be
+ // able to disable this by default at some point.
+ if (Args.hasFlag(options::OPT_fdelayed_template_parsing,
+ options::OPT_fno_delayed_template_parsing,
+ IsWindowsMSVC && !HaveCxx20)) {
+ if (HaveCxx20)
+ D.Diag(clang::diag::warn_drv_delayed_template_parsing_after_cxx20);
- // FIXME: Find a better way to determine whether the language has modules
- // support by default, or just assume that all languages do.
- bool HaveModules =
- Std && (Std->containsValue("c++2a") || Std->containsValue("c++20") ||
- Std->containsValue("c++latest"));
- RenderModulesOptions(C, D, Args, Input, Output, CmdArgs, HaveModules);
+ CmdArgs.push_back("-fdelayed-template-parsing");
+ }
if (Args.hasFlag(options::OPT_fpch_validate_input_files_content,
options::OPT_fno_pch_validate_input_files_content, false))
@@ -6213,9 +7026,6 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
false))
CmdArgs.push_back("-fmodules-debuginfo");
- Args.AddLastArg(CmdArgs, options::OPT_flegacy_pass_manager,
- options::OPT_fno_legacy_pass_manager);
-
ObjCRuntime Runtime = AddObjCRuntimeArgs(Args, Inputs, CmdArgs, rewriteKind);
RenderObjCOptions(TC, D, RawTriple, Args, Runtime, rewriteKind != RK_None,
Input, CmdArgs);
@@ -6266,22 +7076,23 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
// C++ "sane" operator new.
- if (!Args.hasFlag(options::OPT_fassume_sane_operator_new,
- options::OPT_fno_assume_sane_operator_new))
- CmdArgs.push_back("-fno-assume-sane-operator-new");
+ Args.addOptOutFlag(CmdArgs, options::OPT_fassume_sane_operator_new,
+ options::OPT_fno_assume_sane_operator_new);
+
+ // -fassume-unique-vtables is on by default.
+ Args.addOptOutFlag(CmdArgs, options::OPT_fassume_unique_vtables,
+ options::OPT_fno_assume_unique_vtables);
// -frelaxed-template-template-args is off by default, as it is a severe
// breaking change until a corresponding change to template partial ordering
// is provided.
- if (Args.hasFlag(options::OPT_frelaxed_template_template_args,
- options::OPT_fno_relaxed_template_template_args, false))
- CmdArgs.push_back("-frelaxed-template-template-args");
+ Args.addOptInFlag(CmdArgs, options::OPT_frelaxed_template_template_args,
+ options::OPT_fno_relaxed_template_template_args);
// -fsized-deallocation is off by default, as it is an ABI-breaking change for
// most platforms.
- if (Args.hasFlag(options::OPT_fsized_deallocation,
- options::OPT_fno_sized_deallocation, false))
- CmdArgs.push_back("-fsized-deallocation");
+ Args.addOptInFlag(CmdArgs, options::OPT_fsized_deallocation,
+ options::OPT_fno_sized_deallocation);
// -faligned-allocation is on by default in C++17 onwards and otherwise off
// by default.
@@ -6304,15 +7115,13 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// -fconstant-cfstrings is default, and may be subject to argument translation
// on Darwin.
if (!Args.hasFlag(options::OPT_fconstant_cfstrings,
- options::OPT_fno_constant_cfstrings) ||
+ options::OPT_fno_constant_cfstrings, true) ||
!Args.hasFlag(options::OPT_mconstant_cfstrings,
- options::OPT_mno_constant_cfstrings))
+ options::OPT_mno_constant_cfstrings, true))
CmdArgs.push_back("-fno-constant-cfstrings");
- // -fno-pascal-strings is default, only pass non-default.
- if (Args.hasFlag(options::OPT_fpascal_strings,
- options::OPT_fno_pascal_strings, false))
- CmdArgs.push_back("-fpascal-strings");
+ Args.addOptInFlag(CmdArgs, options::OPT_fpascal_strings,
+ options::OPT_fno_pascal_strings);
// Honor -fpack-struct= and -fpack-struct, if given. Note that
// -fno-pack-struct doesn't apply to -fpack-struct=.
@@ -6344,18 +7153,17 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-Qn");
// -fno-common is the default, set -fcommon only when that flag is set.
- if (Args.hasFlag(options::OPT_fcommon, options::OPT_fno_common, false))
- CmdArgs.push_back("-fcommon");
+ Args.addOptInFlag(CmdArgs, options::OPT_fcommon, options::OPT_fno_common);
// -fsigned-bitfields is default, and clang doesn't yet support
// -funsigned-bitfields.
if (!Args.hasFlag(options::OPT_fsigned_bitfields,
- options::OPT_funsigned_bitfields))
+ options::OPT_funsigned_bitfields, true))
D.Diag(diag::warn_drv_clang_unsupported)
<< Args.getLastArg(options::OPT_funsigned_bitfields)->getAsString(Args);
// -fsigned-bitfields is default, and clang doesn't support -fno-for-scope.
- if (!Args.hasFlag(options::OPT_ffor_scope, options::OPT_fno_for_scope))
+ if (!Args.hasFlag(options::OPT_ffor_scope, options::OPT_fno_for_scope, true))
D.Diag(diag::err_drv_clang_unsupported)
<< Args.getLastArg(options::OPT_fno_for_scope)->getAsString(Args);
@@ -6377,15 +7185,11 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
RenderDiagnosticsOptions(D, Args, CmdArgs);
- // -fno-asm-blocks is default.
- if (Args.hasFlag(options::OPT_fasm_blocks, options::OPT_fno_asm_blocks,
- false))
- CmdArgs.push_back("-fasm-blocks");
+ Args.addOptInFlag(CmdArgs, options::OPT_fasm_blocks,
+ options::OPT_fno_asm_blocks);
- // -fgnu-inline-asm is default.
- if (!Args.hasFlag(options::OPT_fgnu_inline_asm,
- options::OPT_fno_gnu_inline_asm, true))
- CmdArgs.push_back("-fno-gnu-inline-asm");
+ Args.addOptOutFlag(CmdArgs, options::OPT_fgnu_inline_asm,
+ options::OPT_fno_gnu_inline_asm);
// Enable vectorization per default according to the optimization level
// selected. For optimization levels that want vectorization we use the alias
@@ -6421,21 +7225,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-fno-dollars-in-identifiers");
}
- // -funit-at-a-time is default, and we don't support -fno-unit-at-a-time for
- // practical purposes.
- if (Arg *A = Args.getLastArg(options::OPT_funit_at_a_time,
- options::OPT_fno_unit_at_a_time)) {
- if (A->getOption().matches(options::OPT_fno_unit_at_a_time))
- D.Diag(diag::warn_drv_clang_unsupported) << A->getAsString(Args);
- }
-
- if (Args.hasFlag(options::OPT_fapple_pragma_pack,
- options::OPT_fno_apple_pragma_pack, false))
- CmdArgs.push_back("-fapple-pragma-pack");
-
- if (Args.hasFlag(options::OPT_fxl_pragma_pack,
- options::OPT_fno_xl_pragma_pack, RawTriple.isOSAIX()))
- CmdArgs.push_back("-fxl-pragma-pack");
+ Args.addOptInFlag(CmdArgs, options::OPT_fapple_pragma_pack,
+ options::OPT_fno_apple_pragma_pack);
// Remarks can be enabled with any of the `-f.*optimization-record.*` flags.
if (willEmitRemarks(Args) && checkRemarksOptions(D, Args, Triple))
@@ -6446,6 +7237,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (RewriteImports)
CmdArgs.push_back("-frewrite-imports");
+ Args.addOptInFlag(CmdArgs, options::OPT_fdirectives_only,
+ options::OPT_fno_directives_only);
+
// Enable rewrite includes if the user's asked for it or if we're generating
// diagnostics.
// TODO: Once -module-dependency-dir works with -frewrite-includes it'd be
@@ -6466,6 +7260,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_dM);
Args.AddLastArg(CmdArgs, options::OPT_dD);
+ Args.AddLastArg(CmdArgs, options::OPT_dI);
Args.AddLastArg(CmdArgs, options::OPT_fmax_tokens_EQ);
@@ -6490,6 +7285,35 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
A->claim();
}
+ // Turn -fplugin-arg-pluginname-key=value into
+ // -plugin-arg-pluginname key=value
+ // GCC has an actual plugin_argument struct with key/value pairs that it
+ // passes to its plugins, but we don't, so just pass it on as-is.
+ //
+ // The syntax for -fplugin-arg- is ambiguous if both plugin name and
+ // argument key are allowed to contain dashes. GCC therefore only
+ // allows dashes in the key. We do the same.
+ for (const Arg *A : Args.filtered(options::OPT_fplugin_arg)) {
+ auto ArgValue = StringRef(A->getValue());
+ auto FirstDashIndex = ArgValue.find('-');
+ StringRef PluginName = ArgValue.substr(0, FirstDashIndex);
+ StringRef Arg = ArgValue.substr(FirstDashIndex + 1);
+
+ A->claim();
+ if (FirstDashIndex == StringRef::npos || Arg.empty()) {
+ if (PluginName.empty()) {
+ D.Diag(diag::warn_drv_missing_plugin_name) << A->getAsString(Args);
+ } else {
+ D.Diag(diag::warn_drv_missing_plugin_arg)
+ << PluginName << A->getAsString(Args);
+ }
+ continue;
+ }
+
+ CmdArgs.push_back(Args.MakeArgString(Twine("-plugin-arg-") + PluginName));
+ CmdArgs.push_back(Args.MakeArgString(Arg));
+ }
+
// Forward -fpass-plugin=name.so to -cc1.
for (const Arg *A : Args.filtered(options::OPT_fpass_plugin_EQ)) {
CmdArgs.push_back(
@@ -6497,25 +7321,36 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
A->claim();
}
+ // Forward --vfsoverlay to -cc1.
+ for (const Arg *A : Args.filtered(options::OPT_vfsoverlay)) {
+ CmdArgs.push_back("--vfsoverlay");
+ CmdArgs.push_back(A->getValue());
+ A->claim();
+ }
+
+ Args.addOptInFlag(CmdArgs, options::OPT_fsafe_buffer_usage_suggestions,
+ options::OPT_fno_safe_buffer_usage_suggestions);
+
// Setup statistics file output.
SmallString<128> StatsFile = getStatsFileName(Args, Output, Input, D);
- if (!StatsFile.empty())
+ if (!StatsFile.empty()) {
CmdArgs.push_back(Args.MakeArgString(Twine("-stats-file=") + StatsFile));
+ if (D.CCPrintInternalStats)
+ CmdArgs.push_back("-stats-file-append");
+ }
// Forward -Xclang arguments to -cc1, and -mllvm arguments to the LLVM option
// parser.
- // -finclude-default-header flag is for preprocessor,
- // do not pass it to other cc1 commands when save-temps is enabled
- if (C.getDriver().isSaveTempsEnabled() &&
- !isa<PreprocessJobAction>(JA)) {
- for (auto Arg : Args.filtered(options::OPT_Xclang)) {
- Arg->claim();
- if (StringRef(Arg->getValue()) != "-finclude-default-header")
- CmdArgs.push_back(Arg->getValue());
+ for (auto Arg : Args.filtered(options::OPT_Xclang)) {
+ Arg->claim();
+ // -finclude-default-header flag is for preprocessor,
+ // do not pass it to other cc1 commands when save-temps is enabled
+ if (C.getDriver().isSaveTempsEnabled() &&
+ !isa<PreprocessJobAction>(JA)) {
+ if (StringRef(Arg->getValue()) == "-finclude-default-header")
+ continue;
}
- }
- else {
- Args.AddAllArgValues(CmdArgs, options::OPT_Xclang);
+ CmdArgs.push_back(Arg->getValue());
}
for (const Arg *A : Args.filtered(options::OPT_mllvm)) {
A->claim();
@@ -6559,7 +7394,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
auto FRecordSwitches =
Args.hasFlag(options::OPT_frecord_command_line,
options::OPT_fno_record_command_line, false);
- if (FRecordSwitches && !Triple.isOSBinFormatELF())
+ if (FRecordSwitches && !Triple.isOSBinFormatELF() &&
+ !Triple.isOSBinFormatXCOFF() && !Triple.isOSBinFormatMachO())
D.Diag(diag::err_drv_unsupported_opt_for_target)
<< Args.getLastArg(options::OPT_frecord_command_line)->getAsString(Args)
<< TripleStr;
@@ -6587,13 +7423,22 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
}
- // Host-side cuda compilation receives all device-side outputs in a single
- // fatbin as Inputs[1]. Include the binary with -fcuda-include-gpubinary.
+ // Host-side offloading compilation receives all device-side outputs. Include
+ // them in the host compilation depending on the target. If the host inputs
+ // are not empty we use the new-driver scheme, otherwise use the old scheme.
if ((IsCuda || IsHIP) && CudaDeviceInput) {
+ CmdArgs.push_back("-fcuda-include-gpubinary");
+ CmdArgs.push_back(CudaDeviceInput->getFilename());
+ } else if (!HostOffloadingInputs.empty()) {
+ if ((IsCuda || IsHIP) && !IsRDCMode) {
+ assert(HostOffloadingInputs.size() == 1 && "Only one input expected");
CmdArgs.push_back("-fcuda-include-gpubinary");
- CmdArgs.push_back(CudaDeviceInput->getFilename());
- if (Args.hasFlag(options::OPT_fgpu_rdc, options::OPT_fno_gpu_rdc, false))
- CmdArgs.push_back("-fgpu-rdc");
+ CmdArgs.push_back(HostOffloadingInputs.front().getFilename());
+ } else {
+ for (const InputInfo Input : HostOffloadingInputs)
+ CmdArgs.push_back(Args.MakeArgString("-fembed-offload-object=" +
+ TC.getInputFilename(Input)));
+ }
}
if (IsCuda) {
@@ -6612,10 +7457,30 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
auto CUID = cast<InputAction>(SourceAction)->getId();
if (!CUID.empty())
CmdArgs.push_back(Args.MakeArgString(Twine("-cuid=") + Twine(CUID)));
+
+ // -ffast-math turns on -fgpu-approx-transcendentals implicitly, but will
+ // be overriden by -fno-gpu-approx-transcendentals.
+ bool UseApproxTranscendentals = Args.hasFlag(
+ options::OPT_ffast_math, options::OPT_fno_fast_math, false);
+ if (Args.hasFlag(options::OPT_fgpu_approx_transcendentals,
+ options::OPT_fno_gpu_approx_transcendentals,
+ UseApproxTranscendentals))
+ CmdArgs.push_back("-fgpu-approx-transcendentals");
+ } else {
+ Args.claimAllArgs(options::OPT_fgpu_approx_transcendentals,
+ options::OPT_fno_gpu_approx_transcendentals);
}
- if (IsHIP)
+ if (IsHIP) {
CmdArgs.push_back("-fcuda-allow-variadic-functions");
+ Args.AddLastArg(CmdArgs, options::OPT_fgpu_default_stream_EQ);
+ }
+
+ Args.AddLastArg(CmdArgs, options::OPT_foffload_uniform_block,
+ options::OPT_fno_offload_uniform_block);
+
+ Args.AddLastArg(CmdArgs, options::OPT_foffload_implicit_host_device_templates,
+ options::OPT_fno_offload_implicit_host_device_templates);
if (IsCudaDevice || IsHIPDevice) {
StringRef InlineThresh =
@@ -6627,13 +7492,18 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
}
+ if (IsHIPDevice)
+ Args.addOptOutFlag(CmdArgs,
+ options::OPT_fhip_fp32_correctly_rounded_divide_sqrt,
+ options::OPT_fno_hip_fp32_correctly_rounded_divide_sqrt);
+
// OpenMP offloading device jobs take the argument -fopenmp-host-ir-file-path
// to specify the result of the compile phase on the host, so the meaningful
- // device declarations can be identified. Also, -fopenmp-is-device is passed
- // along to tell the frontend that it is generating code for a device, so that
- // only the relevant declarations are emitted.
+ // device declarations can be identified. Also, -fopenmp-is-target-device is
+ // passed along to tell the frontend that it is generating code for a device,
+ // so that only the relevant declarations are emitted.
if (IsOpenMPDevice) {
- CmdArgs.push_back("-fopenmp-is-device");
+ CmdArgs.push_back("-fopenmp-is-target-device");
if (OpenMPDeviceInput) {
CmdArgs.push_back("-fopenmp-host-ir-file-path");
CmdArgs.push_back(Args.MakeArgString(OpenMPDeviceInput->getFilename()));
@@ -6643,28 +7513,22 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Triple.isAMDGPU()) {
handleAMDGPUCodeObjectVersionOptions(D, Args, CmdArgs);
- if (Args.hasFlag(options::OPT_munsafe_fp_atomics,
- options::OPT_mno_unsafe_fp_atomics, /*Default=*/false))
- CmdArgs.push_back("-munsafe-fp-atomics");
+ Args.addOptInFlag(CmdArgs, options::OPT_munsafe_fp_atomics,
+ options::OPT_mno_unsafe_fp_atomics);
+ Args.addOptOutFlag(CmdArgs, options::OPT_mamdgpu_ieee,
+ options::OPT_mno_amdgpu_ieee);
}
// For all the host OpenMP offloading compile jobs we need to pass the targets
// information using -fopenmp-targets= option.
if (JA.isHostOffloading(Action::OFK_OpenMP)) {
- SmallString<128> TargetInfo("-fopenmp-targets=");
+ SmallString<128> Targets("-fopenmp-targets=");
- Arg *Tgts = Args.getLastArg(options::OPT_fopenmp_targets_EQ);
- assert(Tgts && Tgts->getNumValues() &&
- "OpenMP offloading has to have targets specified.");
- for (unsigned i = 0; i < Tgts->getNumValues(); ++i) {
- if (i)
- TargetInfo += ',';
- // We need to get the string from the triple because it may be not exactly
- // the same as the one we get directly from the arguments.
- llvm::Triple T(Tgts->getValue(i));
- TargetInfo += T.getTriple();
- }
- CmdArgs.push_back(Args.MakeArgString(TargetInfo.str()));
+ SmallVector<std::string, 4> Triples;
+ auto TCRange = C.getOffloadToolChains<Action::OFK_OpenMP>();
+ std::transform(TCRange.first, TCRange.second, std::back_inserter(Triples),
+ [](auto TC) { return TC.second->getTripleString(); });
+ CmdArgs.push_back(Args.MakeArgString(Targets + llvm::join(Triples, ",")));
}
bool VirtualFunctionElimination =
@@ -6692,31 +7556,55 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
if (WholeProgramVTables) {
- // Propagate -fwhole-program-vtables if this is an LTO compile.
- if (IsUsingLTO)
- CmdArgs.push_back("-fwhole-program-vtables");
+ // PS4 uses the legacy LTO API, which does not support this feature in
+ // ThinLTO mode.
+ bool IsPS4 = getToolChain().getTriple().isPS4();
+
// Check if we passed LTO options but they were suppressed because this is a
// device offloading action, or we passed device offload LTO options which
// were suppressed because this is not the device offload action.
+ // Check if we are using PS4 in regular LTO mode.
// Otherwise, issue an error.
- else if (!D.isUsingLTO(!IsDeviceOffloadAction))
+ if ((!IsUsingLTO && !D.isUsingLTO(!IsDeviceOffloadAction)) ||
+ (IsPS4 && !UnifiedLTO && (D.getLTOMode() != LTOK_Full)))
D.Diag(diag::err_drv_argument_only_allowed_with)
<< "-fwhole-program-vtables"
- << "-flto";
+ << ((IsPS4 && !UnifiedLTO) ? "-flto=full" : "-flto");
+
+ // Propagate -fwhole-program-vtables if this is an LTO compile.
+ if (IsUsingLTO)
+ CmdArgs.push_back("-fwhole-program-vtables");
}
bool DefaultsSplitLTOUnit =
- (WholeProgramVTables || Sanitize.needsLTO()) &&
- (LTOMode == LTOK_Full || TC.canSplitThinLTOUnit());
+ ((WholeProgramVTables || SanitizeArgs.needsLTO()) &&
+ (LTOMode == LTOK_Full || TC.canSplitThinLTOUnit())) ||
+ (!Triple.isPS4() && UnifiedLTO);
bool SplitLTOUnit =
Args.hasFlag(options::OPT_fsplit_lto_unit,
options::OPT_fno_split_lto_unit, DefaultsSplitLTOUnit);
- if (Sanitize.needsLTO() && !SplitLTOUnit)
+ if (SanitizeArgs.needsLTO() && !SplitLTOUnit)
D.Diag(diag::err_drv_argument_not_allowed_with) << "-fno-split-lto-unit"
<< "-fsanitize=cfi";
if (SplitLTOUnit)
CmdArgs.push_back("-fsplit-lto-unit");
+ if (Arg *A = Args.getLastArg(options::OPT_ffat_lto_objects,
+ options::OPT_fno_fat_lto_objects)) {
+ if (IsUsingLTO && A->getOption().matches(options::OPT_ffat_lto_objects)) {
+ assert(LTOMode == LTOK_Full || LTOMode == LTOK_Thin);
+ if (!Triple.isOSBinFormatELF()) {
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << A->getAsString(Args) << TC.getTripleString();
+ }
+ CmdArgs.push_back(Args.MakeArgString(
+ Twine("-flto=") + (LTOMode == LTOK_Thin ? "thin" : "full")));
+ CmdArgs.push_back("-flto-unit");
+ CmdArgs.push_back("-ffat-lto-objects");
+ A->render(Args, CmdArgs);
+ }
+ }
+
if (Arg *A = Args.getLastArg(options::OPT_fglobal_isel,
options::OPT_fno_global_isel)) {
CmdArgs.push_back("-mllvm");
@@ -6765,34 +7653,31 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-fforce-enable-int128");
}
- if (Args.hasFlag(options::OPT_fkeep_static_consts,
- options::OPT_fno_keep_static_consts, false))
- CmdArgs.push_back("-fkeep-static-consts");
-
- if (Args.hasFlag(options::OPT_fcomplete_member_pointers,
- options::OPT_fno_complete_member_pointers, false))
- CmdArgs.push_back("-fcomplete-member-pointers");
-
- if (!Args.hasFlag(options::OPT_fcxx_static_destructors,
- options::OPT_fno_cxx_static_destructors, true))
- CmdArgs.push_back("-fno-c++-static-destructors");
+ Args.addOptInFlag(CmdArgs, options::OPT_fkeep_static_consts,
+ options::OPT_fno_keep_static_consts);
+ Args.addOptInFlag(CmdArgs, options::OPT_fkeep_persistent_storage_variables,
+ options::OPT_fno_keep_persistent_storage_variables);
+ Args.addOptInFlag(CmdArgs, options::OPT_fcomplete_member_pointers,
+ options::OPT_fno_complete_member_pointers);
+ Args.addOptOutFlag(CmdArgs, options::OPT_fcxx_static_destructors,
+ options::OPT_fno_cxx_static_destructors);
addMachineOutlinerArgs(D, Args, CmdArgs, Triple, /*IsLTO=*/false);
if (Arg *A = Args.getLastArg(options::OPT_moutline_atomics,
options::OPT_mno_outline_atomics)) {
- if (A->getOption().matches(options::OPT_moutline_atomics)) {
- // Option -moutline-atomics supported for AArch64 target only.
- if (!Triple.isAArch64()) {
- D.Diag(diag::warn_drv_moutline_atomics_unsupported_opt)
- << Triple.getArchName();
- } else {
+ // Option -moutline-atomics supported for AArch64 target only.
+ if (!Triple.isAArch64()) {
+ D.Diag(diag::warn_drv_moutline_atomics_unsupported_opt)
+ << Triple.getArchName() << A->getOption().getName();
+ } else {
+ if (A->getOption().matches(options::OPT_moutline_atomics)) {
CmdArgs.push_back("-target-feature");
CmdArgs.push_back("+outline-atomics");
+ } else {
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back("-outline-atomics");
}
- } else {
- CmdArgs.push_back("-target-feature");
- CmdArgs.push_back("-outline-atomics");
}
} else if (Triple.isAArch64() &&
getToolChain().IsAArch64OutlineAtomicsDefault(Args)) {
@@ -6800,6 +7685,15 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("+outline-atomics");
}
+ if (Triple.isAArch64() &&
+ (Args.hasArg(options::OPT_mno_fmv) ||
+ (Triple.isAndroid() && Triple.isAndroidVersionLT(23)) ||
+ getToolChain().GetRuntimeLibType(Args) != ToolChain::RLT_CompilerRT)) {
+ // Disable Function Multiversioning on AArch64 target.
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back("-fmv");
+ }
+
if (Args.hasFlag(options::OPT_faddrsig, options::OPT_fno_addrsig,
(TC.getTriple().isOSBinFormatELF() ||
TC.getTriple().isOSBinFormatCOFF()) &&
@@ -6810,7 +7704,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-faddrsig");
if ((Triple.isOSBinFormatELF() || Triple.isOSBinFormatMachO()) &&
- (EH || UnwindTables || DebugInfoKind != codegenoptions::NoDebugInfo))
+ (EH || UnwindTables || AsyncUnwindTables ||
+ DebugInfoKind != llvm::codegenoptions::NoDebugInfo))
CmdArgs.push_back("-D__GCC_HAVE_DWARF2_CFI_ASM=1");
if (Arg *A = Args.getLastArg(options::OPT_fsymbol_partition_EQ)) {
@@ -6843,8 +7738,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
addDashXForInput(Args, Input, CmdArgs);
ArrayRef<InputInfo> FrontendInputs = Input;
- if (IsHeaderModulePrecompile)
- FrontendInputs = ModuleHeaderInputs;
+ if (IsExtractAPI)
+ FrontendInputs = ExtractAPIInputs;
else if (Input.isNothing())
FrontendInputs = {};
@@ -6857,13 +7752,13 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (D.CC1Main && !D.CCGenDiagnostics) {
// Invoke the CC1 directly in this process
- C.addCommand(std::make_unique<CC1Command>(JA, *this,
- ResponseFileSupport::AtFileUTF8(),
- Exec, CmdArgs, Inputs, Output));
+ C.addCommand(std::make_unique<CC1Command>(
+ JA, *this, ResponseFileSupport::AtFileUTF8(), Exec, CmdArgs, Inputs,
+ Output, D.getPrependArg()));
} else {
- C.addCommand(std::make_unique<Command>(JA, *this,
- ResponseFileSupport::AtFileUTF8(),
- Exec, CmdArgs, Inputs, Output));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileUTF8(), Exec, CmdArgs, Inputs,
+ Output, D.getPrependArg()));
}
// Make the compile command echo its inputs for /showFilenames.
@@ -6894,11 +7789,11 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.ClaimAllArgs(options::OPT_emit_llvm);
}
-Clang::Clang(const ToolChain &TC)
+Clang::Clang(const ToolChain &TC, bool HasIntegratedBackend)
// CAUTION! The first constructor argument ("clang") is not arbitrary,
// as it is for other tools. Some operations on a Tool actually test
// whether that tool is Clang based on the Tool's Name as a string.
- : Tool("clang", "clang frontend", TC) {}
+ : Tool("clang", "clang frontend", TC), HasBackend(HasIntegratedBackend) {}
Clang::~Clang() {}
@@ -7096,66 +7991,20 @@ static EHFlags parseClangCLEHFlags(const Driver &D, const ArgList &Args) {
EH.NoUnwindC = true;
}
+ if (Args.hasArg(options::OPT__SLASH_kernel)) {
+ EH.Synch = false;
+ EH.NoUnwindC = false;
+ EH.Asynch = false;
+ }
+
return EH;
}
void Clang::AddClangCLArgs(const ArgList &Args, types::ID InputType,
- ArgStringList &CmdArgs,
- codegenoptions::DebugInfoKind *DebugInfoKind,
- bool *EmitCodeView) const {
- unsigned RTOptionID = options::OPT__SLASH_MT;
+ ArgStringList &CmdArgs) const {
bool isNVPTX = getToolChain().getTriple().isNVPTX();
- if (Args.hasArg(options::OPT__SLASH_LDd))
- // The /LDd option implies /MTd. The dependent lib part can be overridden,
- // but defining _DEBUG is sticky.
- RTOptionID = options::OPT__SLASH_MTd;
-
- if (Arg *A = Args.getLastArg(options::OPT__SLASH_M_Group))
- RTOptionID = A->getOption().getID();
-
- StringRef FlagForCRT;
- switch (RTOptionID) {
- case options::OPT__SLASH_MD:
- if (Args.hasArg(options::OPT__SLASH_LDd))
- CmdArgs.push_back("-D_DEBUG");
- CmdArgs.push_back("-D_MT");
- CmdArgs.push_back("-D_DLL");
- FlagForCRT = "--dependent-lib=msvcrt";
- break;
- case options::OPT__SLASH_MDd:
- CmdArgs.push_back("-D_DEBUG");
- CmdArgs.push_back("-D_MT");
- CmdArgs.push_back("-D_DLL");
- FlagForCRT = "--dependent-lib=msvcrtd";
- break;
- case options::OPT__SLASH_MT:
- if (Args.hasArg(options::OPT__SLASH_LDd))
- CmdArgs.push_back("-D_DEBUG");
- CmdArgs.push_back("-D_MT");
- CmdArgs.push_back("-flto-visibility-public-std");
- FlagForCRT = "--dependent-lib=libcmt";
- break;
- case options::OPT__SLASH_MTd:
- CmdArgs.push_back("-D_DEBUG");
- CmdArgs.push_back("-D_MT");
- CmdArgs.push_back("-flto-visibility-public-std");
- FlagForCRT = "--dependent-lib=libcmtd";
- break;
- default:
- llvm_unreachable("Unexpected option ID.");
- }
-
- if (Args.hasArg(options::OPT__SLASH_Zl)) {
- CmdArgs.push_back("-D_VC_NODEFAULTLIB");
- } else {
- CmdArgs.push_back(FlagForCRT.data());
-
- // This provides POSIX compatibility (maps 'open' to '_open'), which most
- // users want. The /Za flag to cl.exe turns this off, but it's not
- // implemented in clang.
- CmdArgs.push_back("--dependent-lib=oldnames");
- }
+ ProcessVSRuntimeLibrary(Args, CmdArgs);
if (Arg *ShowIncludes =
Args.getLastArg(options::OPT__SLASH_showIncludes,
@@ -7178,24 +8027,15 @@ void Clang::AddClangCLArgs(const ArgList &Args, types::ID InputType,
CmdArgs.push_back(Args.MakeArgString(Twine(LangOptions::SSPStrong)));
}
- // Emit CodeView if -Z7 or -gline-tables-only are present.
- if (Arg *DebugInfoArg = Args.getLastArg(options::OPT__SLASH_Z7,
- options::OPT_gline_tables_only)) {
- *EmitCodeView = true;
- if (DebugInfoArg->getOption().matches(options::OPT__SLASH_Z7))
- *DebugInfoKind = codegenoptions::DebugInfoConstructor;
- else
- *DebugInfoKind = codegenoptions::DebugLineTablesOnly;
- } else {
- *EmitCodeView = false;
- }
-
const Driver &D = getToolChain().getDriver();
+
EHFlags EH = parseClangCLEHFlags(D, Args);
if (!isNVPTX && (EH.Synch || EH.Asynch)) {
if (types::isCXX(InputType))
CmdArgs.push_back("-fcxx-exceptions");
CmdArgs.push_back("-fexceptions");
+ if (EH.Asynch)
+ CmdArgs.push_back("-fasync-exceptions");
}
if (types::isCXX(InputType) && EH.Synch && EH.NoUnwindC)
CmdArgs.push_back("-fexternc-nounwind");
@@ -7206,24 +8046,38 @@ void Clang::AddClangCLArgs(const ArgList &Args, types::ID InputType,
CmdArgs.push_back("-P");
}
- unsigned VolatileOptionID;
- if (getToolChain().getTriple().isX86())
- VolatileOptionID = options::OPT__SLASH_volatile_ms;
- else
- VolatileOptionID = options::OPT__SLASH_volatile_iso;
-
- if (Arg *A = Args.getLastArg(options::OPT__SLASH_volatile_Group))
- VolatileOptionID = A->getOption().getID();
-
- if (VolatileOptionID == options::OPT__SLASH_volatile_ms)
- CmdArgs.push_back("-fms-volatile");
-
if (Args.hasFlag(options::OPT__SLASH_Zc_dllexportInlines_,
options::OPT__SLASH_Zc_dllexportInlines,
false)) {
CmdArgs.push_back("-fno-dllexport-inlines");
}
+ if (Args.hasFlag(options::OPT__SLASH_Zc_wchar_t_,
+ options::OPT__SLASH_Zc_wchar_t, false)) {
+ CmdArgs.push_back("-fno-wchar");
+ }
+
+ if (Args.hasArg(options::OPT__SLASH_kernel)) {
+ llvm::Triple::ArchType Arch = getToolChain().getArch();
+ std::vector<std::string> Values =
+ Args.getAllArgValues(options::OPT__SLASH_arch);
+ if (!Values.empty()) {
+ llvm::SmallSet<std::string, 4> SupportedArches;
+ if (Arch == llvm::Triple::x86)
+ SupportedArches.insert("IA32");
+
+ for (auto &V : Values)
+ if (!SupportedArches.contains(V))
+ D.Diag(diag::err_drv_argument_not_allowed_with)
+ << std::string("/arch:").append(V) << "/kernel";
+ }
+
+ CmdArgs.push_back("-fno-rtti");
+ if (Args.hasFlag(options::OPT__SLASH_GR, options::OPT__SLASH_GR_, false))
+ D.Diag(diag::err_drv_argument_not_allowed_with) << "/GR"
+ << "/kernel";
+ }
+
Arg *MostGeneralArg = Args.getLastArg(options::OPT__SLASH_vmg);
Arg *BestCaseArg = Args.getLastArg(options::OPT__SLASH_vmb);
if (MostGeneralArg && BestCaseArg)
@@ -7250,6 +8104,9 @@ void Clang::AddClangCLArgs(const ArgList &Args, types::ID InputType,
CmdArgs.push_back("-fms-memptr-rep=virtual");
}
+ if (Args.hasArg(options::OPT_regcall4))
+ CmdArgs.push_back("-regcall4");
+
// Parse the default calling convention options.
if (Arg *CCArg =
Args.getLastArg(options::OPT__SLASH_Gd, options::OPT__SLASH_Gr,
@@ -7286,6 +8143,9 @@ void Clang::AddClangCLArgs(const ArgList &Args, types::ID InputType,
CmdArgs.push_back(DCCFlag);
}
+ if (Args.hasArg(options::OPT__SLASH_Gregcall4))
+ CmdArgs.push_back("-regcall4");
+
Args.AddLastArg(CmdArgs, options::OPT_vtordisp_mode_EQ);
if (!Args.hasArg(options::OPT_fdiagnostics_format_EQ)) {
@@ -7293,7 +8153,10 @@ void Clang::AddClangCLArgs(const ArgList &Args, types::ID InputType,
CmdArgs.push_back("msvc");
}
- if (Arg *A = Args.getLastArg(options::OPT__SLASH_guard)) {
+ if (Args.hasArg(options::OPT__SLASH_kernel))
+ CmdArgs.push_back("-fms-kernel");
+
+ for (const Arg *A : Args.filtered(options::OPT__SLASH_guard)) {
StringRef GuardArgs = A->getValue();
// The only valid options are "cf", "cf,nochecks", "cf-", "ehcont" and
// "ehcont-".
@@ -7312,6 +8175,7 @@ void Clang::AddClangCLArgs(const ArgList &Args, types::ID InputType,
} else {
D.Diag(diag::err_drv_invalid_value) << A->getSpelling() << GuardArgs;
}
+ A->claim();
}
}
@@ -7368,11 +8232,19 @@ void ClangAs::AddX86TargetArgs(const ArgList &Args,
CmdArgs.push_back(Args.MakeArgString("-x86-asm-syntax=" + Value));
} else {
getToolChain().getDriver().Diag(diag::err_drv_unsupported_option_argument)
- << A->getOption().getName() << Value;
+ << A->getSpelling() << Value;
}
}
}
+void ClangAs::AddLoongArchTargetArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ CmdArgs.push_back("-target-abi");
+ CmdArgs.push_back(loongarch::getLoongArchABI(getToolChain().getDriver(), Args,
+ getToolChain().getTriple())
+ .data());
+}
+
void ClangAs::AddRISCVTargetArgs(const ArgList &Args,
ArgStringList &CmdArgs) const {
const llvm::Triple &Triple = getToolChain().getTriple();
@@ -7380,6 +8252,12 @@ void ClangAs::AddRISCVTargetArgs(const ArgList &Args,
CmdArgs.push_back("-target-abi");
CmdArgs.push_back(ABIName.data());
+
+ if (Args.hasFlag(options::OPT_mdefault_build_attributes,
+ options::OPT_mno_default_build_attributes, true)) {
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back("-riscv-add-build-attributes");
+ }
}
void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
@@ -7411,6 +8289,8 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-triple");
CmdArgs.push_back(Args.MakeArgString(TripleStr));
+ getToolChain().addClangCC1ASTargetOptions(Args, CmdArgs);
+
// Set the output mode, we currently only expect to be used as a real
// assembler.
CmdArgs.push_back("-filetype");
@@ -7422,7 +8302,7 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Clang::getBaseInputName(Args, Input));
// Add the target cpu
- std::string CPU = getCPUName(Args, Triple, /*FromAs*/ true);
+ std::string CPU = getCPUName(D, Args, Triple, /*FromAs*/ true);
if (!CPU.empty()) {
CmdArgs.push_back("-target-cpu");
CmdArgs.push_back(Args.MakeArgString(CPU));
@@ -7438,11 +8318,14 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_I_Group);
// Determine the original source input.
- const Action *SourceAction = &JA;
- while (SourceAction->getKind() != Action::InputClass) {
- assert(!SourceAction->getInputs().empty() && "unexpected root action!");
- SourceAction = SourceAction->getInputs()[0];
- }
+ auto FindSource = [](const Action *S) -> const Action * {
+ while (S->getKind() != Action::InputClass) {
+ assert(!S->getInputs().empty() && "unexpected root action!");
+ S = S->getInputs()[0];
+ }
+ return S;
+ };
+ const Action *SourceAction = FindSource(&JA);
// Forward -g and handle debug info related flags, assuming we are dealing
// with an actual assembly file.
@@ -7452,14 +8335,12 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
WantDebug = !A->getOption().matches(options::OPT_g0) &&
!A->getOption().matches(options::OPT_ggdb0);
- unsigned DwarfVersion = ParseDebugDefaultVersion(getToolChain(), Args);
- if (const Arg *GDwarfN = getDwarfNArg(Args))
- DwarfVersion = DwarfVersionNum(GDwarfN->getSpelling());
+ llvm::codegenoptions::DebugInfoKind DebugInfoKind =
+ llvm::codegenoptions::NoDebugInfo;
- if (DwarfVersion == 0)
- DwarfVersion = getToolChain().GetDefaultDwarfVersion();
-
- codegenoptions::DebugInfoKind DebugInfoKind = codegenoptions::NoDebugInfo;
+ // Add the -fdebug-compilation-dir flag if needed.
+ const char *DebugCompilationDir =
+ addDebugCompDirArg(Args, CmdArgs, C.getDriver().getVFS());
if (SourceAction->getType() == types::TY_Asm ||
SourceAction->getType() == types::TY_PP_Asm) {
@@ -7467,12 +8348,11 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
// the guard for source type, however there is a test which asserts
// that some assembler invocation receives no -debug-info-kind,
// and it's not clear whether that test is just overly restrictive.
- DebugInfoKind = (WantDebug ? codegenoptions::DebugInfoConstructor
- : codegenoptions::NoDebugInfo);
- // Add the -fdebug-compilation-dir flag if needed.
- addDebugCompDirArg(Args, CmdArgs, C.getDriver().getVFS());
+ DebugInfoKind = (WantDebug ? llvm::codegenoptions::DebugInfoConstructor
+ : llvm::codegenoptions::NoDebugInfo);
- addDebugPrefixMapArg(getToolChain().getDriver(), Args, CmdArgs);
+ addDebugPrefixMapArg(getToolChain().getDriver(), getToolChain(), Args,
+ CmdArgs);
// Set the AT_producer to the clang version when using the integrated
// assembler on assembly source files.
@@ -7482,12 +8362,12 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
// And pass along -I options
Args.AddAllArgs(CmdArgs, options::OPT_I);
}
+ const unsigned DwarfVersion = getDwarfVersion(getToolChain(), Args);
RenderDebugEnablingArgs(Args, CmdArgs, DebugInfoKind, DwarfVersion,
llvm::DebuggerKind::Default);
renderDwarfFormat(D, Triple, Args, CmdArgs, DwarfVersion);
RenderDebugInfoCompressionArgs(Args, CmdArgs, D, getToolChain());
-
// Handle -fPIC et al -- the relocation-model affects the assembler
// for some targets.
llvm::Reloc::Model RelocationModel;
@@ -7563,6 +8443,11 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
}
break;
+ case llvm::Triple::loongarch32:
+ case llvm::Triple::loongarch64:
+ AddLoongArchTargetArgs(Args, CmdArgs);
+ break;
+
case llvm::Triple::riscv32:
case llvm::Triple::riscv64:
AddRISCVTargetArgs(Args, CmdArgs);
@@ -7581,6 +8466,29 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_mllvm);
+ if (DebugInfoKind > llvm::codegenoptions::NoDebugInfo && Output.isFilename())
+ addDebugObjectName(Args, CmdArgs, DebugCompilationDir,
+ Output.getFilename());
+
+ // Fixup any previous commands that use -object-file-name because when we
+ // generated them, the final .obj name wasn't yet known.
+ for (Command &J : C.getJobs()) {
+ if (SourceAction != FindSource(&J.getSource()))
+ continue;
+ auto &JArgs = J.getArguments();
+ for (unsigned I = 0; I < JArgs.size(); ++I) {
+ if (StringRef(JArgs[I]).starts_with("-object-file-name=") &&
+ Output.isFilename()) {
+ ArgStringList NewArgs(JArgs.begin(), JArgs.begin() + I);
+ addDebugObjectName(Args, NewArgs, DebugCompilationDir,
+ Output.getFilename());
+ NewArgs.append(JArgs.begin() + I + 1, JArgs.end());
+ J.replaceArguments(NewArgs);
+ break;
+ }
+ }
+ }
+
assert(Output.isFilename() && "Unexpected lipo output.");
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
@@ -7594,7 +8502,7 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
}
if (Triple.isAMDGPU())
- handleAMDGPUCodeObjectVersionOptions(D, Args, CmdArgs);
+ handleAMDGPUCodeObjectVersionOptions(D, Args, CmdArgs, /*IsCC1As=*/true);
assert(Input.isFilename() && "Invalid input.");
CmdArgs.push_back(Input.getFilename());
@@ -7602,13 +8510,13 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
const char *Exec = getToolChain().getDriver().getClangProgramPath();
if (D.CC1Main && !D.CCGenDiagnostics) {
// Invoke cc1as directly in this process.
- C.addCommand(std::make_unique<CC1Command>(JA, *this,
- ResponseFileSupport::AtFileUTF8(),
- Exec, CmdArgs, Inputs, Output));
+ C.addCommand(std::make_unique<CC1Command>(
+ JA, *this, ResponseFileSupport::AtFileUTF8(), Exec, CmdArgs, Inputs,
+ Output, D.getPrependArg()));
} else {
- C.addCommand(std::make_unique<Command>(JA, *this,
- ResponseFileSupport::AtFileUTF8(),
- Exec, CmdArgs, Inputs, Output));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileUTF8(), Exec, CmdArgs, Inputs,
+ Output, D.getPrependArg()));
}
}
@@ -7625,8 +8533,10 @@ void OffloadBundler::ConstructJob(Compilation &C, const JobAction &JA,
// The bundling command looks like this:
// clang-offload-bundler -type=bc
// -targets=host-triple,openmp-triple1,openmp-triple2
- // -outputs=input_file
- // -inputs=unbundle_file_host,unbundle_file_tgt1,unbundle_file_tgt2"
+ // -output=output_file
+ // -input=unbundle_file_host
+ // -input=unbundle_file_tgt1
+ // -input=unbundle_file_tgt2
ArgStringList CmdArgs;
@@ -7658,31 +8568,41 @@ void OffloadBundler::ConstructJob(Compilation &C, const JobAction &JA,
});
}
Triples += Action::GetOffloadKindName(CurKind);
- Triples += "-";
- std::string NormalizedTriple = CurTC->getTriple().normalize();
- Triples += NormalizedTriple;
-
- if (CurDep->getOffloadingArch() != nullptr) {
- // If OffloadArch is present it can only appear as the 6th hypen
- // sepearated field of Bundle Entry ID. So, pad required number of
- // hyphens in Triple.
- for (int i = 4 - StringRef(NormalizedTriple).count("-"); i > 0; i--)
- Triples += "-";
+ Triples += '-';
+ Triples += CurTC->getTriple().normalize();
+ if ((CurKind == Action::OFK_HIP || CurKind == Action::OFK_Cuda) &&
+ !StringRef(CurDep->getOffloadingArch()).empty()) {
+ Triples += '-';
Triples += CurDep->getOffloadingArch();
}
+
+ // TODO: Replace parsing of -march flag. Can be done by storing GPUArch
+ // with each toolchain.
+ StringRef GPUArchName;
+ if (CurKind == Action::OFK_OpenMP) {
+ // Extract GPUArch from -march argument in TC argument list.
+ for (unsigned ArgIndex = 0; ArgIndex < TCArgs.size(); ArgIndex++) {
+ auto ArchStr = StringRef(TCArgs.getArgString(ArgIndex));
+ auto Arch = ArchStr.starts_with_insensitive("-march=");
+ if (Arch) {
+ GPUArchName = ArchStr.substr(7);
+ Triples += "-";
+ break;
+ }
+ }
+ Triples += GPUArchName.str();
+ }
}
CmdArgs.push_back(TCArgs.MakeArgString(Triples));
// Get bundled file command.
CmdArgs.push_back(
- TCArgs.MakeArgString(Twine("-outputs=") + Output.getFilename()));
+ TCArgs.MakeArgString(Twine("-output=") + Output.getFilename()));
// Get unbundled files command.
- SmallString<128> UB;
- UB += "-inputs=";
for (unsigned I = 0; I < Inputs.size(); ++I) {
- if (I)
- UB += ',';
+ SmallString<128> UB;
+ UB += "-input=";
// Find ToolChain for this input.
const ToolChain *CurTC = &getToolChain();
@@ -7697,14 +8617,18 @@ void OffloadBundler::ConstructJob(Compilation &C, const JobAction &JA,
} else {
UB += CurTC->getInputFilename(Inputs[I]);
}
+ CmdArgs.push_back(TCArgs.MakeArgString(UB));
}
- CmdArgs.push_back(TCArgs.MakeArgString(UB));
-
+ if (TCArgs.hasFlag(options::OPT_offload_compress,
+ options::OPT_no_offload_compress, false))
+ CmdArgs.push_back("-compress");
+ if (TCArgs.hasArg(options::OPT_v))
+ CmdArgs.push_back("-verbose");
// All the inputs are encoded as commands.
C.addCommand(std::make_unique<Command>(
JA, *this, ResponseFileSupport::None(),
TCArgs.MakeArgString(getToolChain().GetProgramPath(getShortName())),
- CmdArgs, None, Output));
+ CmdArgs, std::nullopt, Output));
}
void OffloadBundler::ConstructJobMultipleOutputs(
@@ -7717,8 +8641,10 @@ void OffloadBundler::ConstructJobMultipleOutputs(
// The unbundling command looks like this:
// clang-offload-bundler -type=bc
// -targets=host-triple,openmp-triple1,openmp-triple2
- // -inputs=input_file
- // -outputs=unbundle_file_host,unbundle_file_tgt1,unbundle_file_tgt2"
+ // -input=input_file
+ // -output=unbundle_file_host
+ // -output=unbundle_file_tgt1
+ // -output=unbundle_file_tgt2
// -unbundle
ArgStringList CmdArgs;
@@ -7740,68 +8666,112 @@ void OffloadBundler::ConstructJobMultipleOutputs(
auto &Dep = DepInfo[I];
Triples += Action::GetOffloadKindName(Dep.DependentOffloadKind);
- Triples += "-";
- std::string NormalizedTriple =
- Dep.DependentToolChain->getTriple().normalize();
- Triples += NormalizedTriple;
-
- if (!Dep.DependentBoundArch.empty()) {
- // If OffloadArch is present it can only appear as the 6th hypen
- // sepearated field of Bundle Entry ID. So, pad required number of
- // hyphens in Triple.
- for (int i = 4 - StringRef(NormalizedTriple).count("-"); i > 0; i--)
- Triples += "-";
+ Triples += '-';
+ Triples += Dep.DependentToolChain->getTriple().normalize();
+ if ((Dep.DependentOffloadKind == Action::OFK_HIP ||
+ Dep.DependentOffloadKind == Action::OFK_Cuda) &&
+ !Dep.DependentBoundArch.empty()) {
+ Triples += '-';
Triples += Dep.DependentBoundArch;
}
+ // TODO: Replace parsing of -march flag. Can be done by storing GPUArch
+ // with each toolchain.
+ StringRef GPUArchName;
+ if (Dep.DependentOffloadKind == Action::OFK_OpenMP) {
+ // Extract GPUArch from -march argument in TC argument list.
+ for (unsigned ArgIndex = 0; ArgIndex < TCArgs.size(); ArgIndex++) {
+ StringRef ArchStr = StringRef(TCArgs.getArgString(ArgIndex));
+ auto Arch = ArchStr.starts_with_insensitive("-march=");
+ if (Arch) {
+ GPUArchName = ArchStr.substr(7);
+ Triples += "-";
+ break;
+ }
+ }
+ Triples += GPUArchName.str();
+ }
}
CmdArgs.push_back(TCArgs.MakeArgString(Triples));
// Get bundled file command.
CmdArgs.push_back(
- TCArgs.MakeArgString(Twine("-inputs=") + Input.getFilename()));
+ TCArgs.MakeArgString(Twine("-input=") + Input.getFilename()));
// Get unbundled files command.
- SmallString<128> UB;
- UB += "-outputs=";
for (unsigned I = 0; I < Outputs.size(); ++I) {
- if (I)
- UB += ',';
+ SmallString<128> UB;
+ UB += "-output=";
UB += DepInfo[I].DependentToolChain->getInputFilename(Outputs[I]);
+ CmdArgs.push_back(TCArgs.MakeArgString(UB));
}
- CmdArgs.push_back(TCArgs.MakeArgString(UB));
CmdArgs.push_back("-unbundle");
CmdArgs.push_back("-allow-missing-bundles");
+ if (TCArgs.hasArg(options::OPT_v))
+ CmdArgs.push_back("-verbose");
// All the inputs are encoded as commands.
C.addCommand(std::make_unique<Command>(
JA, *this, ResponseFileSupport::None(),
TCArgs.MakeArgString(getToolChain().GetProgramPath(getShortName())),
- CmdArgs, None, Outputs));
+ CmdArgs, std::nullopt, Outputs));
}
-void OffloadWrapper::ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output,
- const InputInfoList &Inputs,
- const ArgList &Args,
- const char *LinkingOutput) const {
+void OffloadPackager::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const llvm::opt::ArgList &Args,
+ const char *LinkingOutput) const {
ArgStringList CmdArgs;
- const llvm::Triple &Triple = getToolChain().getEffectiveTriple();
-
- // Add the "effective" target triple.
- CmdArgs.push_back("-target");
- CmdArgs.push_back(Args.MakeArgString(Triple.getTriple()));
-
// Add the output file name.
assert(Output.isFilename() && "Invalid output.");
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
- // Add inputs.
- for (const InputInfo &I : Inputs) {
- assert(I.isFilename() && "Invalid input.");
- CmdArgs.push_back(I.getFilename());
+ // Create the inputs to bundle the needed metadata.
+ for (const InputInfo &Input : Inputs) {
+ const Action *OffloadAction = Input.getAction();
+ const ToolChain *TC = OffloadAction->getOffloadingToolChain();
+ const ArgList &TCArgs =
+ C.getArgsForToolChain(TC, OffloadAction->getOffloadingArch(),
+ OffloadAction->getOffloadingDeviceKind());
+ StringRef File = C.getArgs().MakeArgString(TC->getInputFilename(Input));
+ StringRef Arch = OffloadAction->getOffloadingArch()
+ ? OffloadAction->getOffloadingArch()
+ : TCArgs.getLastArgValue(options::OPT_march_EQ);
+ StringRef Kind =
+ Action::GetOffloadKindName(OffloadAction->getOffloadingDeviceKind());
+
+ ArgStringList Features;
+ SmallVector<StringRef> FeatureArgs;
+ getTargetFeatures(TC->getDriver(), TC->getTriple(), TCArgs, Features,
+ false);
+ llvm::copy_if(Features, std::back_inserter(FeatureArgs),
+ [](StringRef Arg) { return !Arg.starts_with("-target"); });
+
+ if (TC->getTriple().isAMDGPU()) {
+ for (StringRef Feature : llvm::split(Arch.split(':').second, ':')) {
+ FeatureArgs.emplace_back(
+ Args.MakeArgString(Feature.take_back() + Feature.drop_back()));
+ }
+ }
+
+ // TODO: We need to pass in the full target-id and handle it properly in the
+ // linker wrapper.
+ SmallVector<std::string> Parts{
+ "file=" + File.str(),
+ "triple=" + TC->getTripleString(),
+ "arch=" + Arch.str(),
+ "kind=" + Kind.str(),
+ };
+
+ if (TC->getDriver().isUsingLTO(/* IsOffload */ true) ||
+ TC->getTriple().isAMDGPU())
+ for (StringRef Feature : FeatureArgs)
+ Parts.emplace_back("feature=" + Feature.str());
+
+ CmdArgs.push_back(Args.MakeArgString("--image=" + llvm::join(Parts, ",")));
}
C.addCommand(std::make_unique<Command>(
@@ -7809,3 +8779,125 @@ void OffloadWrapper::ConstructJob(Compilation &C, const JobAction &JA,
Args.MakeArgString(getToolChain().GetProgramPath(getShortName())),
CmdArgs, Inputs, Output));
}
+
+void LinkerWrapper::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ const Driver &D = getToolChain().getDriver();
+ const llvm::Triple TheTriple = getToolChain().getTriple();
+ ArgStringList CmdArgs;
+
+ // Pass the CUDA path to the linker wrapper tool.
+ for (Action::OffloadKind Kind : {Action::OFK_Cuda, Action::OFK_OpenMP}) {
+ auto TCRange = C.getOffloadToolChains(Kind);
+ for (auto &I : llvm::make_range(TCRange.first, TCRange.second)) {
+ const ToolChain *TC = I.second;
+ if (TC->getTriple().isNVPTX()) {
+ CudaInstallationDetector CudaInstallation(D, TheTriple, Args);
+ if (CudaInstallation.isValid())
+ CmdArgs.push_back(Args.MakeArgString(
+ "--cuda-path=" + CudaInstallation.getInstallPath()));
+ break;
+ }
+ }
+ }
+
+ // Pass in the optimization level to use for LTO.
+ if (const Arg *A = Args.getLastArg(options::OPT_O_Group)) {
+ StringRef OOpt;
+ if (A->getOption().matches(options::OPT_O4) ||
+ A->getOption().matches(options::OPT_Ofast))
+ OOpt = "3";
+ else if (A->getOption().matches(options::OPT_O)) {
+ OOpt = A->getValue();
+ if (OOpt == "g")
+ OOpt = "1";
+ else if (OOpt == "s" || OOpt == "z")
+ OOpt = "2";
+ } else if (A->getOption().matches(options::OPT_O0))
+ OOpt = "0";
+ if (!OOpt.empty())
+ CmdArgs.push_back(Args.MakeArgString(Twine("--opt-level=O") + OOpt));
+ }
+
+ CmdArgs.push_back(
+ Args.MakeArgString("--host-triple=" + TheTriple.getTriple()));
+ if (Args.hasArg(options::OPT_v))
+ CmdArgs.push_back("--wrapper-verbose");
+
+ if (const Arg *A = Args.getLastArg(options::OPT_g_Group)) {
+ if (!A->getOption().matches(options::OPT_g0))
+ CmdArgs.push_back("--device-debug");
+ }
+
+ // code-object-version=X needs to be passed to clang-linker-wrapper to ensure
+ // that it is used by lld.
+ if (const Arg *A = Args.getLastArg(options::OPT_mcode_object_version_EQ)) {
+ CmdArgs.push_back(Args.MakeArgString("-mllvm"));
+ CmdArgs.push_back(Args.MakeArgString(
+ Twine("--amdhsa-code-object-version=") + A->getValue()));
+ }
+
+ for (const auto &A : Args.getAllArgValues(options::OPT_Xcuda_ptxas))
+ CmdArgs.push_back(Args.MakeArgString("--ptxas-arg=" + A));
+
+ // Forward remarks passes to the LLVM backend in the wrapper.
+ if (const Arg *A = Args.getLastArg(options::OPT_Rpass_EQ))
+ CmdArgs.push_back(Args.MakeArgString(Twine("--offload-opt=-pass-remarks=") +
+ A->getValue()));
+ if (const Arg *A = Args.getLastArg(options::OPT_Rpass_missed_EQ))
+ CmdArgs.push_back(Args.MakeArgString(
+ Twine("--offload-opt=-pass-remarks-missed=") + A->getValue()));
+ if (const Arg *A = Args.getLastArg(options::OPT_Rpass_analysis_EQ))
+ CmdArgs.push_back(Args.MakeArgString(
+ Twine("--offload-opt=-pass-remarks-analysis=") + A->getValue()));
+ if (Args.getLastArg(options::OPT_save_temps_EQ))
+ CmdArgs.push_back("--save-temps");
+
+ // Construct the link job so we can wrap around it.
+ Linker->ConstructJob(C, JA, Output, Inputs, Args, LinkingOutput);
+ const auto &LinkCommand = C.getJobs().getJobs().back();
+
+ // Forward -Xoffload-linker<-triple> arguments to the device link job.
+ for (Arg *A : Args.filtered(options::OPT_Xoffload_linker)) {
+ StringRef Val = A->getValue(0);
+ if (Val.empty())
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine("--device-linker=") + A->getValue(1)));
+ else
+ CmdArgs.push_back(Args.MakeArgString(
+ "--device-linker=" +
+ ToolChain::getOpenMPTriple(Val.drop_front()).getTriple() + "=" +
+ A->getValue(1)));
+ }
+ Args.ClaimAllArgs(options::OPT_Xoffload_linker);
+
+ // Embed bitcode instead of an object in JIT mode.
+ if (Args.hasFlag(options::OPT_fopenmp_target_jit,
+ options::OPT_fno_openmp_target_jit, false))
+ CmdArgs.push_back("--embed-bitcode");
+
+ // Forward `-mllvm` arguments to the LLVM invocations if present.
+ for (Arg *A : Args.filtered(options::OPT_mllvm)) {
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back(A->getValue());
+ A->claim();
+ }
+
+ // Add the linker arguments to be forwarded by the wrapper.
+ CmdArgs.push_back(Args.MakeArgString(Twine("--linker-path=") +
+ LinkCommand->getExecutable()));
+ CmdArgs.push_back("--");
+ for (const char *LinkArg : LinkCommand->getArguments())
+ CmdArgs.push_back(LinkArg);
+
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath("clang-linker-wrapper"));
+
+ // Replace the executable and arguments of the link job with the
+ // wrapper.
+ LinkCommand->replaceExecutable(Exec);
+ LinkCommand->replaceArguments(CmdArgs);
+}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.h
index d4b4988b4a8c..0f503c4bd1c4 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.h
@@ -6,17 +6,17 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_Clang_H
-#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_Clang_H
+#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_CLANG_H
+#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_CLANG_H
#include "MSVC.h"
-#include "clang/Basic/DebugInfoOptions.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/Tool.h"
#include "clang/Driver/Types.h"
-#include "llvm/ADT/Triple.h"
+#include "llvm/Frontend/Debug/Options.h"
#include "llvm/Option/Option.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/Triple.h"
namespace clang {
class ObjCRuntime;
@@ -26,6 +26,10 @@ namespace tools {
/// Clang compiler tool.
class LLVM_LIBRARY_VISIBILITY Clang : public Tool {
+ // Indicates whether this instance has integrated backend using
+ // internal LLVM infrastructure.
+ bool HasBackend;
+
public:
static const char *getBaseInputName(const llvm::opt::ArgList &Args,
const InputInfo &Input);
@@ -53,6 +57,8 @@ private:
bool KernelOrKext) const;
void AddARM64TargetArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const;
+ void AddLoongArchTargetArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const;
void AddMIPSTargetArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const;
void AddPPCTargetArgs(const llvm::opt::ArgList &Args,
@@ -84,9 +90,7 @@ private:
RewriteKind rewrite) const;
void AddClangCLArgs(const llvm::opt::ArgList &Args, types::ID InputType,
- llvm::opt::ArgStringList &CmdArgs,
- codegenoptions::DebugInfoKind *DebugInfoKind,
- bool *EmitCodeView) const;
+ llvm::opt::ArgStringList &CmdArgs) const;
mutable std::unique_ptr<llvm::raw_fd_ostream> CompilationDatabase = nullptr;
void DumpCompilationDatabase(Compilation &C, StringRef Filename,
@@ -99,11 +103,12 @@ private:
const InputInfo &Input, const llvm::opt::ArgList &Args) const;
public:
- Clang(const ToolChain &TC);
+ Clang(const ToolChain &TC, bool HasIntegratedBackend = true);
~Clang() override;
bool hasGoodDiagnostics() const override { return true; }
bool hasIntegratedAssembler() const override { return true; }
+ bool hasIntegratedBackend() const override { return HasBackend; }
bool hasIntegratedCPP() const override { return true; }
bool canEmitIR() const override { return true; }
@@ -118,6 +123,8 @@ class LLVM_LIBRARY_VISIBILITY ClangAs : public Tool {
public:
ClangAs(const ToolChain &TC)
: Tool("clang::as", "clang integrated assembler", TC) {}
+ void AddLoongArchTargetArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const;
void AddMIPSTargetArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const;
void AddX86TargetArgs(const llvm::opt::ArgList &Args,
@@ -152,11 +159,11 @@ public:
const char *LinkingOutput) const override;
};
-/// Offload wrapper tool.
-class LLVM_LIBRARY_VISIBILITY OffloadWrapper final : public Tool {
+/// Offload binary tool.
+class LLVM_LIBRARY_VISIBILITY OffloadPackager final : public Tool {
public:
- OffloadWrapper(const ToolChain &TC)
- : Tool("offload wrapper", "clang-offload-wrapper", TC) {}
+ OffloadPackager(const ToolChain &TC)
+ : Tool("Offload::Packager", "clang-offload-packager", TC) {}
bool hasIntegratedCPP() const override { return false; }
void ConstructJob(Compilation &C, const JobAction &JA,
@@ -165,6 +172,27 @@ public:
const char *LinkingOutput) const override;
};
+/// Linker wrapper tool.
+class LLVM_LIBRARY_VISIBILITY LinkerWrapper final : public Tool {
+ const Tool *Linker;
+
+public:
+ LinkerWrapper(const ToolChain &TC, const Tool *Linker)
+ : Tool("Offload::Linker", "linker", TC), Linker(Linker) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+
+enum class DwarfFissionKind { None, Split, Single };
+
+DwarfFissionKind getDebugFissionKind(const Driver &D,
+ const llvm::opt::ArgList &Args,
+ llvm::opt::Arg *&Arg);
+
} // end namespace tools
} // end namespace driver
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/CloudABI.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/CloudABI.cpp
deleted file mode 100644
index 9ee46ac857f0..000000000000
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/CloudABI.cpp
+++ /dev/null
@@ -1,149 +0,0 @@
-//===--- CloudABI.cpp - CloudABI ToolChain Implementations ------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "CloudABI.h"
-#include "CommonArgs.h"
-#include "clang/Driver/Compilation.h"
-#include "clang/Driver/Driver.h"
-#include "clang/Driver/InputInfo.h"
-#include "clang/Driver/Options.h"
-#include "llvm/ADT/SmallString.h"
-#include "llvm/Option/ArgList.h"
-#include "llvm/Support/Path.h"
-
-using namespace clang::driver;
-using namespace clang::driver::tools;
-using namespace clang::driver::toolchains;
-using namespace clang;
-using namespace llvm::opt;
-
-void cloudabi::Linker::ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output,
- const InputInfoList &Inputs,
- const ArgList &Args,
- const char *LinkingOutput) const {
- const ToolChain &ToolChain = getToolChain();
- const Driver &D = ToolChain.getDriver();
- ArgStringList CmdArgs;
-
- // Silence warning for "clang -g foo.o -o foo"
- Args.ClaimAllArgs(options::OPT_g_Group);
- // and "clang -emit-llvm foo.o -o foo"
- Args.ClaimAllArgs(options::OPT_emit_llvm);
- // and for "clang -w foo.o -o foo". Other warning options are already
- // handled somewhere else.
- Args.ClaimAllArgs(options::OPT_w);
-
- if (!D.SysRoot.empty())
- CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot));
-
- // CloudABI only supports static linkage.
- CmdArgs.push_back("-Bstatic");
- CmdArgs.push_back("--no-dynamic-linker");
-
- // Provide PIE linker flags in case PIE is default for the architecture.
- if (ToolChain.isPIEDefault()) {
- CmdArgs.push_back("-pie");
- CmdArgs.push_back("-zrelro");
- }
-
- CmdArgs.push_back("--eh-frame-hdr");
- CmdArgs.push_back("--gc-sections");
-
- if (Output.isFilename()) {
- CmdArgs.push_back("-o");
- CmdArgs.push_back(Output.getFilename());
- } else {
- assert(Output.isNothing() && "Invalid output.");
- }
-
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
- CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crt0.o")));
- CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtbegin.o")));
- }
-
- Args.AddAllArgs(CmdArgs, options::OPT_L);
- ToolChain.AddFilePathLibArgs(Args, CmdArgs);
- Args.AddAllArgs(CmdArgs,
- {options::OPT_T_Group, options::OPT_e, options::OPT_s,
- options::OPT_t, options::OPT_Z_Flag, options::OPT_r});
-
- if (D.isUsingLTO()) {
- assert(!Inputs.empty() && "Must have at least one input.");
- addLTOOptions(ToolChain, Args, CmdArgs, Output, Inputs[0],
- D.getLTOMode() == LTOK_Thin);
- }
-
- AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
-
- if (ToolChain.ShouldLinkCXXStdlib(Args))
- ToolChain.AddCXXStdlibLibArgs(Args, CmdArgs);
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
- CmdArgs.push_back("-lc");
- CmdArgs.push_back("-lcompiler_rt");
- }
-
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles))
- CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtend.o")));
-
- const char *Exec = Args.MakeArgString(ToolChain.GetLinkerPath());
- C.addCommand(std::make_unique<Command>(JA, *this,
- ResponseFileSupport::AtFileCurCP(),
- Exec, CmdArgs, Inputs, Output));
-}
-
-// CloudABI - CloudABI tool chain which can call ld(1) directly.
-
-CloudABI::CloudABI(const Driver &D, const llvm::Triple &Triple,
- const ArgList &Args)
- : Generic_ELF(D, Triple, Args) {
- SmallString<128> P(getDriver().Dir);
- llvm::sys::path::append(P, "..", getTriple().str(), "lib");
- getFilePaths().push_back(std::string(P.str()));
-}
-
-void CloudABI::addLibCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args) const {
- SmallString<128> P(getDriver().Dir);
- llvm::sys::path::append(P, "..", getTriple().str(), "include/c++/v1");
- addSystemInclude(DriverArgs, CC1Args, P.str());
-}
-
-void CloudABI::AddCXXStdlibLibArgs(const ArgList &Args,
- ArgStringList &CmdArgs) const {
- CmdArgs.push_back("-lc++");
- CmdArgs.push_back("-lc++abi");
- CmdArgs.push_back("-lunwind");
-}
-
-Tool *CloudABI::buildLinker() const {
- return new tools::cloudabi::Linker(*this);
-}
-
-bool CloudABI::isPIEDefault() const {
- // Only enable PIE on architectures that support PC-relative
- // addressing. PC-relative addressing is required, as the process
- // startup code must be able to relocate itself.
- switch (getTriple().getArch()) {
- case llvm::Triple::aarch64:
- case llvm::Triple::x86_64:
- return true;
- default:
- return false;
- }
-}
-
-SanitizerMask CloudABI::getSupportedSanitizers() const {
- SanitizerMask Res = ToolChain::getSupportedSanitizers();
- Res |= SanitizerKind::SafeStack;
- return Res;
-}
-
-SanitizerMask CloudABI::getDefaultSanitizers() const {
- return SanitizerKind::SafeStack;
-}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/CloudABI.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/CloudABI.h
deleted file mode 100644
index 98bf23127706..000000000000
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/CloudABI.h
+++ /dev/null
@@ -1,70 +0,0 @@
-//===--- CloudABI.h - CloudABI ToolChain Implementations --------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_CLOUDABI_H
-#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_CLOUDABI_H
-
-#include "Gnu.h"
-#include "clang/Driver/Tool.h"
-#include "clang/Driver/ToolChain.h"
-
-namespace clang {
-namespace driver {
-namespace tools {
-
-/// cloudabi -- Directly call GNU Binutils linker
-namespace cloudabi {
-class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
-public:
- Linker(const ToolChain &TC) : Tool("cloudabi::Linker", "linker", TC) {}
-
- bool hasIntegratedCPP() const override { return false; }
- bool isLinkJob() const override { return true; }
-
- void ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output, const InputInfoList &Inputs,
- const llvm::opt::ArgList &TCArgs,
- const char *LinkingOutput) const override;
-};
-} // end namespace cloudabi
-} // end namespace tools
-
-namespace toolchains {
-
-class LLVM_LIBRARY_VISIBILITY CloudABI : public Generic_ELF {
-public:
- CloudABI(const Driver &D, const llvm::Triple &Triple,
- const llvm::opt::ArgList &Args);
- bool HasNativeLLVMSupport() const override { return true; }
-
- bool IsMathErrnoDefault() const override { return false; }
- bool IsObjCNonFragileABIDefault() const override { return true; }
-
- CXXStdlibType
- GetCXXStdlibType(const llvm::opt::ArgList &Args) const override {
- return ToolChain::CST_Libcxx;
- }
- void addLibCxxIncludePaths(
- const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args) const override;
- void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
- llvm::opt::ArgStringList &CmdArgs) const override;
-
- bool isPIEDefault() const override;
- SanitizerMask getSupportedSanitizers() const override;
- SanitizerMask getDefaultSanitizers() const override;
-
-protected:
- Tool *buildLinker() const override;
-};
-
-} // end namespace toolchains
-} // end namespace driver
-} // end namespace clang
-
-#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_CLOUDABI_H
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.cpp
index 0ffe95795381..2b916f000336 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.cpp
@@ -9,15 +9,22 @@
#include "CommonArgs.h"
#include "Arch/AArch64.h"
#include "Arch/ARM.h"
+#include "Arch/CSKY.h"
+#include "Arch/LoongArch.h"
#include "Arch/M68k.h"
#include "Arch/Mips.h"
#include "Arch/PPC.h"
+#include "Arch/RISCV.h"
+#include "Arch/Sparc.h"
#include "Arch/SystemZ.h"
#include "Arch/VE.h"
#include "Arch/X86.h"
-#include "HIP.h"
+#include "HIPAMD.h"
#include "Hexagon.h"
+#include "MSP430.h"
+#include "Solaris.h"
#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/CodeGenOptions.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/ObjCRuntime.h"
#include "clang/Basic/Version.h"
@@ -34,10 +41,12 @@
#include "clang/Driver/Util.h"
#include "clang/Driver/XRayArgs.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Twine.h"
+#include "llvm/BinaryFormat/Magic.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/Option/Arg.h"
#include "llvm/Option/ArgList.h"
@@ -47,39 +56,180 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
-#include "llvm/Support/Host.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Process.h"
#include "llvm/Support/Program.h"
#include "llvm/Support/ScopedPrinter.h"
-#include "llvm/Support/TargetParser.h"
#include "llvm/Support/Threading.h"
#include "llvm/Support/VirtualFileSystem.h"
#include "llvm/Support/YAMLParser.h"
+#include "llvm/TargetParser/Host.h"
+#include "llvm/TargetParser/TargetParser.h"
+#include <optional>
using namespace clang::driver;
using namespace clang::driver::tools;
using namespace clang;
using namespace llvm::opt;
-static void renderRpassOptions(const ArgList &Args, ArgStringList &CmdArgs) {
+static bool useFramePointerForTargetByDefault(const llvm::opt::ArgList &Args,
+ const llvm::Triple &Triple) {
+ if (Args.hasArg(clang::driver::options::OPT_pg) &&
+ !Args.hasArg(clang::driver::options::OPT_mfentry))
+ return true;
+
+ if (Triple.isAndroid()) {
+ switch (Triple.getArch()) {
+ case llvm::Triple::aarch64:
+ case llvm::Triple::arm:
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumb:
+ case llvm::Triple::thumbeb:
+ case llvm::Triple::riscv64:
+ return true;
+ default:
+ break;
+ }
+ }
+
+ switch (Triple.getArch()) {
+ case llvm::Triple::xcore:
+ case llvm::Triple::wasm32:
+ case llvm::Triple::wasm64:
+ case llvm::Triple::msp430:
+ // XCore never wants frame pointers, regardless of OS.
+ // WebAssembly never wants frame pointers.
+ return false;
+ case llvm::Triple::ppc:
+ case llvm::Triple::ppcle:
+ case llvm::Triple::ppc64:
+ case llvm::Triple::ppc64le:
+ case llvm::Triple::riscv32:
+ case llvm::Triple::riscv64:
+ case llvm::Triple::sparc:
+ case llvm::Triple::sparcel:
+ case llvm::Triple::sparcv9:
+ case llvm::Triple::amdgcn:
+ case llvm::Triple::r600:
+ case llvm::Triple::csky:
+ case llvm::Triple::loongarch32:
+ case llvm::Triple::loongarch64:
+ return !clang::driver::tools::areOptimizationsEnabled(Args);
+ default:
+ break;
+ }
+
+ if (Triple.isOSFuchsia() || Triple.isOSNetBSD()) {
+ return !clang::driver::tools::areOptimizationsEnabled(Args);
+ }
+
+ if (Triple.isOSLinux() || Triple.isOSHurd()) {
+ switch (Triple.getArch()) {
+ // Don't use a frame pointer on linux if optimizing for certain targets.
+ case llvm::Triple::arm:
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumb:
+ case llvm::Triple::thumbeb:
+ case llvm::Triple::mips64:
+ case llvm::Triple::mips64el:
+ case llvm::Triple::mips:
+ case llvm::Triple::mipsel:
+ case llvm::Triple::systemz:
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ return !clang::driver::tools::areOptimizationsEnabled(Args);
+ default:
+ return true;
+ }
+ }
+
+ if (Triple.isOSWindows()) {
+ switch (Triple.getArch()) {
+ case llvm::Triple::x86:
+ return !clang::driver::tools::areOptimizationsEnabled(Args);
+ case llvm::Triple::x86_64:
+ return Triple.isOSBinFormatMachO();
+ case llvm::Triple::arm:
+ case llvm::Triple::thumb:
+ // Windows on ARM builds with FPO disabled to aid fast stack walking
+ return true;
+ default:
+ // All other supported Windows ISAs use xdata unwind information, so frame
+ // pointers are not generally useful.
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool mustUseNonLeafFramePointerForTarget(const llvm::Triple &Triple) {
+ switch (Triple.getArch()) {
+ default:
+ return false;
+ case llvm::Triple::arm:
+ case llvm::Triple::thumb:
+ // ARM Darwin targets require a frame pointer to be always present to aid
+ // offline debugging via backtraces.
+ return Triple.isOSDarwin();
+ }
+}
+
+clang::CodeGenOptions::FramePointerKind
+getFramePointerKind(const llvm::opt::ArgList &Args,
+ const llvm::Triple &Triple) {
+ // We have 4 states:
+ //
+ // 00) leaf retained, non-leaf retained
+ // 01) leaf retained, non-leaf omitted (this is invalid)
+ // 10) leaf omitted, non-leaf retained
+ // (what -momit-leaf-frame-pointer was designed for)
+ // 11) leaf omitted, non-leaf omitted
+ //
+ // "omit" options taking precedence over "no-omit" options is the only way
+ // to make 3 valid states representable
+ llvm::opt::Arg *A =
+ Args.getLastArg(clang::driver::options::OPT_fomit_frame_pointer,
+ clang::driver::options::OPT_fno_omit_frame_pointer);
+
+ bool OmitFP = A && A->getOption().matches(
+ clang::driver::options::OPT_fomit_frame_pointer);
+ bool NoOmitFP = A && A->getOption().matches(
+ clang::driver::options::OPT_fno_omit_frame_pointer);
+ bool OmitLeafFP =
+ Args.hasFlag(clang::driver::options::OPT_momit_leaf_frame_pointer,
+ clang::driver::options::OPT_mno_omit_leaf_frame_pointer,
+ Triple.isAArch64() || Triple.isPS() || Triple.isVE() ||
+ (Triple.isAndroid() && Triple.isRISCV64()));
+ if (NoOmitFP || mustUseNonLeafFramePointerForTarget(Triple) ||
+ (!OmitFP && useFramePointerForTargetByDefault(Args, Triple))) {
+ if (OmitLeafFP)
+ return clang::CodeGenOptions::FramePointerKind::NonLeaf;
+ return clang::CodeGenOptions::FramePointerKind::All;
+ }
+ return clang::CodeGenOptions::FramePointerKind::None;
+}
+
+static void renderRpassOptions(const ArgList &Args, ArgStringList &CmdArgs,
+ const StringRef PluginOptPrefix) {
if (const Arg *A = Args.getLastArg(options::OPT_Rpass_EQ))
- CmdArgs.push_back(Args.MakeArgString(Twine("--plugin-opt=-pass-remarks=") +
- A->getValue()));
+ CmdArgs.push_back(Args.MakeArgString(Twine(PluginOptPrefix) +
+ "-pass-remarks=" + A->getValue()));
if (const Arg *A = Args.getLastArg(options::OPT_Rpass_missed_EQ))
CmdArgs.push_back(Args.MakeArgString(
- Twine("--plugin-opt=-pass-remarks-missed=") + A->getValue()));
+ Twine(PluginOptPrefix) + "-pass-remarks-missed=" + A->getValue()));
if (const Arg *A = Args.getLastArg(options::OPT_Rpass_analysis_EQ))
CmdArgs.push_back(Args.MakeArgString(
- Twine("--plugin-opt=-pass-remarks-analysis=") + A->getValue()));
+ Twine(PluginOptPrefix) + "-pass-remarks-analysis=" + A->getValue()));
}
static void renderRemarksOptions(const ArgList &Args, ArgStringList &CmdArgs,
const llvm::Triple &Triple,
const InputInfo &Input,
- const InputInfo &Output) {
+ const InputInfo &Output,
+ const StringRef PluginOptPrefix) {
StringRef Format = "yaml";
if (const Arg *A = Args.getLastArg(options::OPT_fsave_optimization_record_EQ))
Format = A->getValue();
@@ -93,29 +243,47 @@ static void renderRemarksOptions(const ArgList &Args, ArgStringList &CmdArgs,
assert(!F.empty() && "Cannot determine remarks output name.");
// Append "opt.ld.<format>" to the end of the file name.
- CmdArgs.push_back(
- Args.MakeArgString(Twine("--plugin-opt=opt-remarks-filename=") + F +
- Twine(".opt.ld.") + Format));
+ CmdArgs.push_back(Args.MakeArgString(Twine(PluginOptPrefix) +
+ "opt-remarks-filename=" + F +
+ ".opt.ld." + Format));
if (const Arg *A =
Args.getLastArg(options::OPT_foptimization_record_passes_EQ))
CmdArgs.push_back(Args.MakeArgString(
- Twine("--plugin-opt=opt-remarks-passes=") + A->getValue()));
+ Twine(PluginOptPrefix) + "opt-remarks-passes=" + A->getValue()));
- CmdArgs.push_back(Args.MakeArgString(
- Twine("--plugin-opt=opt-remarks-format=") + Format.data()));
+ CmdArgs.push_back(Args.MakeArgString(Twine(PluginOptPrefix) +
+ "opt-remarks-format=" + Format.data()));
}
static void renderRemarksHotnessOptions(const ArgList &Args,
- ArgStringList &CmdArgs) {
+ ArgStringList &CmdArgs,
+ const StringRef PluginOptPrefix) {
if (Args.hasFlag(options::OPT_fdiagnostics_show_hotness,
options::OPT_fno_diagnostics_show_hotness, false))
- CmdArgs.push_back("--plugin-opt=opt-remarks-with-hotness");
+ CmdArgs.push_back(Args.MakeArgString(Twine(PluginOptPrefix) +
+ "opt-remarks-with-hotness"));
if (const Arg *A =
Args.getLastArg(options::OPT_fdiagnostics_hotness_threshold_EQ))
- CmdArgs.push_back(Args.MakeArgString(
- Twine("--plugin-opt=opt-remarks-hotness-threshold=") + A->getValue()));
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine(PluginOptPrefix) +
+ "opt-remarks-hotness-threshold=" + A->getValue()));
+}
+
+static bool shouldIgnoreUnsupportedTargetFeature(const Arg &TargetFeatureArg,
+ llvm::Triple T,
+ StringRef Processor) {
+ // Warn no-cumode for AMDGCN processors not supporing WGP mode.
+ if (!T.isAMDGPU())
+ return false;
+ auto GPUKind = T.isAMDGCN() ? llvm::AMDGPU::parseArchAMDGCN(Processor)
+ : llvm::AMDGPU::parseArchR600(Processor);
+ auto GPUFeatures = T.isAMDGCN() ? llvm::AMDGPU::getArchAttrAMDGCN(GPUKind)
+ : llvm::AMDGPU::getArchAttrR600(GPUKind);
+ if (GPUFeatures & llvm::AMDGPU::FEATURE_WGP)
+ return false;
+ return TargetFeatureArg.getOption().matches(options::OPT_mno_cumode);
}
void tools::addPathIfExists(const Driver &D, const Twine &Path,
@@ -124,46 +292,49 @@ void tools::addPathIfExists(const Driver &D, const Twine &Path,
Paths.push_back(Path.str());
}
-void tools::handleTargetFeaturesGroup(const ArgList &Args,
+void tools::handleTargetFeaturesGroup(const Driver &D,
+ const llvm::Triple &Triple,
+ const ArgList &Args,
std::vector<StringRef> &Features,
OptSpecifier Group) {
+ std::set<StringRef> Warned;
for (const Arg *A : Args.filtered(Group)) {
StringRef Name = A->getOption().getName();
A->claim();
// Skip over "-m".
- assert(Name.startswith("m") && "Invalid feature name.");
+ assert(Name.starts_with("m") && "Invalid feature name.");
Name = Name.substr(1);
- bool IsNegative = Name.startswith("no-");
+ auto Proc = getCPUName(D, Args, Triple);
+ if (shouldIgnoreUnsupportedTargetFeature(*A, Triple, Proc)) {
+ if (Warned.count(Name) == 0) {
+ D.getDiags().Report(
+ clang::diag::warn_drv_unsupported_option_for_processor)
+ << A->getAsString(Args) << Proc;
+ Warned.insert(Name);
+ }
+ continue;
+ }
+
+ bool IsNegative = Name.starts_with("no-");
if (IsNegative)
Name = Name.substr(3);
+
Features.push_back(Args.MakeArgString((IsNegative ? "-" : "+") + Name));
}
}
-std::vector<StringRef>
-tools::unifyTargetFeatures(const std::vector<StringRef> &Features) {
- std::vector<StringRef> UnifiedFeatures;
- // Find the last of each feature.
- llvm::StringMap<unsigned> LastOpt;
- for (unsigned I = 0, N = Features.size(); I < N; ++I) {
- StringRef Name = Features[I];
- assert(Name[0] == '-' || Name[0] == '+');
- LastOpt[Name.drop_front(1)] = I;
+SmallVector<StringRef>
+tools::unifyTargetFeatures(ArrayRef<StringRef> Features) {
+ // Only add a feature if it hasn't been seen before starting from the end.
+ SmallVector<StringRef> UnifiedFeatures;
+ llvm::DenseSet<StringRef> UsedFeatures;
+ for (StringRef Feature : llvm::reverse(Features)) {
+ if (UsedFeatures.insert(Feature.drop_front()).second)
+ UnifiedFeatures.insert(UnifiedFeatures.begin(), Feature);
}
- for (unsigned I = 0, N = Features.size(); I < N; ++I) {
- // If this feature was overridden, ignore it.
- StringRef Name = Features[I];
- llvm::StringMap<unsigned>::iterator LastI = LastOpt.find(Name.drop_front(1));
- assert(LastI != LastOpt.end());
- unsigned Last = LastI->second;
- if (Last != I)
- continue;
-
- UnifiedFeatures.push_back(Name);
- }
return UnifiedFeatures;
}
@@ -254,6 +425,10 @@ void tools::AddLinkerInputs(const ToolChain &TC, const InputInfoList &Inputs,
continue;
}
+ // In some error cases, the input could be Nothing; skip those.
+ if (II.isNothing())
+ continue;
+
// Otherwise, this is a linker input argument.
const Arg &A = II.getInputArg();
@@ -262,13 +437,8 @@ void tools::AddLinkerInputs(const ToolChain &TC, const InputInfoList &Inputs,
TC.AddCXXStdlibLibArgs(Args, CmdArgs);
else if (A.getOption().matches(options::OPT_Z_reserved_lib_cckext))
TC.AddCCKextLibArgs(Args, CmdArgs);
- else if (A.getOption().matches(options::OPT_z)) {
- // Pass -z prefix for gcc linker compatibility.
- A.claim();
- A.render(Args, CmdArgs);
- } else {
+ else
A.renderAsInput(Args, CmdArgs);
- }
}
}
@@ -276,17 +446,17 @@ void tools::addLinkerCompressDebugSectionsOption(
const ToolChain &TC, const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) {
// GNU ld supports --compress-debug-sections=none|zlib|zlib-gnu|zlib-gabi
- // whereas zlib is an alias to zlib-gabi. Therefore -gz=none|zlib|zlib-gnu
- // are translated to --compress-debug-sections=none|zlib|zlib-gnu.
- // -gz is not translated since ld --compress-debug-sections option requires an
+ // whereas zlib is an alias to zlib-gabi and zlib-gnu is obsoleted. Therefore
+ // -gz=none|zlib are translated to --compress-debug-sections=none|zlib. -gz
+ // is not translated since ld --compress-debug-sections option requires an
// argument.
if (const Arg *A = Args.getLastArg(options::OPT_gz_EQ)) {
StringRef V = A->getValue();
- if (V == "none" || V == "zlib" || V == "zlib-gnu")
+ if (V == "none" || V == "zlib" || V == "zstd")
CmdArgs.push_back(Args.MakeArgString("--compress-debug-sections=" + V));
else
TC.getDriver().Diag(diag::err_drv_unsupported_option_argument)
- << A->getOption().getName() << V;
+ << A->getSpelling() << V;
}
}
@@ -305,6 +475,7 @@ void tools::AddTargetFeature(const ArgList &Args,
/// Get the (LLVM) name of the AMDGPU gpu we are targeting.
static std::string getAMDGPUTargetGPU(const llvm::Triple &T,
const ArgList &Args) {
+ Arg *MArch = Args.getLastArg(options::OPT_march_EQ);
if (Arg *A = Args.getLastArg(options::OPT_mcpu_EQ)) {
auto GPUName = getProcessorFromTargetID(T, A->getValue());
return llvm::StringSwitch<std::string>(GPUName)
@@ -317,6 +488,8 @@ static std::string getAMDGPUTargetGPU(const llvm::Triple &T,
.Case("aruba", "cayman")
.Default(GPUName.str());
}
+ if (MArch)
+ return getProcessorFromTargetID(T, MArch->getValue()).str();
return "";
}
@@ -346,8 +519,8 @@ static StringRef getWebAssemblyTargetCPU(const ArgList &Args) {
return "generic";
}
-std::string tools::getCPUName(const ArgList &Args, const llvm::Triple &T,
- bool FromAs) {
+std::string tools::getCPUName(const Driver &D, const ArgList &Args,
+ const llvm::Triple &T, bool FromAs) {
Arg *A;
switch (T.getArch()) {
@@ -395,50 +568,34 @@ std::string tools::getCPUName(const ArgList &Args, const llvm::Triple &T,
case llvm::Triple::ppc:
case llvm::Triple::ppcle:
case llvm::Triple::ppc64:
- case llvm::Triple::ppc64le: {
- std::string TargetCPUName = ppc::getPPCTargetCPU(Args);
- // LLVM may default to generating code for the native CPU,
- // but, like gcc, we default to a more generic option for
- // each architecture. (except on AIX)
- if (!TargetCPUName.empty())
- return TargetCPUName;
-
- if (T.isOSAIX()) {
- unsigned major, minor, unused_micro;
- T.getOSVersion(major, minor, unused_micro);
- // The minimal arch level moved from pwr4 for AIX7.1 to
- // pwr7 for AIX7.2.
- TargetCPUName =
- (major < 7 || (major == 7 && minor < 2)) ? "pwr4" : "pwr7";
- } else if (T.getArch() == llvm::Triple::ppc64le)
- TargetCPUName = "ppc64le";
- else if (T.getArch() == llvm::Triple::ppc64)
- TargetCPUName = "ppc64";
- else
- TargetCPUName = "ppc";
+ case llvm::Triple::ppc64le:
+ return ppc::getPPCTargetCPU(D, Args, T);
- return TargetCPUName;
- }
+ case llvm::Triple::csky:
+ if (const Arg *A = Args.getLastArg(options::OPT_mcpu_EQ))
+ return A->getValue();
+ else if (const Arg *A = Args.getLastArg(options::OPT_march_EQ))
+ return A->getValue();
+ else
+ return "ck810";
case llvm::Triple::riscv32:
case llvm::Triple::riscv64:
+ return riscv::getRISCVTargetCPU(Args, T);
+
+ case llvm::Triple::bpfel:
+ case llvm::Triple::bpfeb:
if (const Arg *A = Args.getLastArg(options::OPT_mcpu_EQ))
return A->getValue();
return "";
- case llvm::Triple::bpfel:
- case llvm::Triple::bpfeb:
case llvm::Triple::sparc:
case llvm::Triple::sparcel:
case llvm::Triple::sparcv9:
- if (const Arg *A = Args.getLastArg(options::OPT_mcpu_EQ))
- return A->getValue();
- if (T.getArch() == llvm::Triple::sparc && T.isOSSolaris())
- return "v9";
- return "";
+ return sparc::getSparcTargetCPU(D, Args, T);
case llvm::Triple::x86:
case llvm::Triple::x86_64:
- return x86::getX86TargetCPU(Args, T);
+ return x86::getX86TargetCPU(D, Args, T);
case llvm::Triple::hexagon:
return "hexagon" +
@@ -457,6 +614,103 @@ std::string tools::getCPUName(const ArgList &Args, const llvm::Triple &T,
case llvm::Triple::wasm32:
case llvm::Triple::wasm64:
return std::string(getWebAssemblyTargetCPU(Args));
+
+ case llvm::Triple::loongarch32:
+ case llvm::Triple::loongarch64:
+ return loongarch::getLoongArchTargetCPU(Args, T);
+ }
+}
+
+static void getWebAssemblyTargetFeatures(const Driver &D,
+ const llvm::Triple &Triple,
+ const ArgList &Args,
+ std::vector<StringRef> &Features) {
+ handleTargetFeaturesGroup(D, Triple, Args, Features,
+ options::OPT_m_wasm_Features_Group);
+}
+
+void tools::getTargetFeatures(const Driver &D, const llvm::Triple &Triple,
+ const ArgList &Args, ArgStringList &CmdArgs,
+ bool ForAS, bool IsAux) {
+ std::vector<StringRef> Features;
+ switch (Triple.getArch()) {
+ default:
+ break;
+ case llvm::Triple::mips:
+ case llvm::Triple::mipsel:
+ case llvm::Triple::mips64:
+ case llvm::Triple::mips64el:
+ mips::getMIPSTargetFeatures(D, Triple, Args, Features);
+ break;
+ case llvm::Triple::arm:
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumb:
+ case llvm::Triple::thumbeb:
+ arm::getARMTargetFeatures(D, Triple, Args, Features, ForAS);
+ break;
+ case llvm::Triple::ppc:
+ case llvm::Triple::ppcle:
+ case llvm::Triple::ppc64:
+ case llvm::Triple::ppc64le:
+ ppc::getPPCTargetFeatures(D, Triple, Args, Features);
+ break;
+ case llvm::Triple::riscv32:
+ case llvm::Triple::riscv64:
+ riscv::getRISCVTargetFeatures(D, Triple, Args, Features);
+ break;
+ case llvm::Triple::systemz:
+ systemz::getSystemZTargetFeatures(D, Args, Features);
+ break;
+ case llvm::Triple::aarch64:
+ case llvm::Triple::aarch64_32:
+ case llvm::Triple::aarch64_be:
+ aarch64::getAArch64TargetFeatures(D, Triple, Args, Features, ForAS);
+ break;
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ x86::getX86TargetFeatures(D, Triple, Args, Features);
+ break;
+ case llvm::Triple::hexagon:
+ hexagon::getHexagonTargetFeatures(D, Triple, Args, Features);
+ break;
+ case llvm::Triple::wasm32:
+ case llvm::Triple::wasm64:
+ getWebAssemblyTargetFeatures(D, Triple, Args, Features);
+ break;
+ case llvm::Triple::sparc:
+ case llvm::Triple::sparcel:
+ case llvm::Triple::sparcv9:
+ sparc::getSparcTargetFeatures(D, Args, Features);
+ break;
+ case llvm::Triple::r600:
+ case llvm::Triple::amdgcn:
+ amdgpu::getAMDGPUTargetFeatures(D, Triple, Args, Features);
+ break;
+ case llvm::Triple::nvptx:
+ case llvm::Triple::nvptx64:
+ NVPTX::getNVPTXTargetFeatures(D, Triple, Args, Features);
+ break;
+ case llvm::Triple::m68k:
+ m68k::getM68kTargetFeatures(D, Triple, Args, Features);
+ break;
+ case llvm::Triple::msp430:
+ msp430::getMSP430TargetFeatures(D, Args, Features);
+ break;
+ case llvm::Triple::ve:
+ ve::getVETargetFeatures(D, Args, Features);
+ break;
+ case llvm::Triple::csky:
+ csky::getCSKYTargetFeatures(D, Triple, Args, CmdArgs, Features);
+ break;
+ case llvm::Triple::loongarch32:
+ case llvm::Triple::loongarch64:
+ loongarch::getLoongArchTargetFeatures(D, Triple, Args, Features);
+ break;
+ }
+
+ for (auto Feature : unifyTargetFeatures(Features)) {
+ CmdArgs.push_back(IsAux ? "-aux-target-feature" : "-target-feature");
+ CmdArgs.push_back(Feature.data());
}
}
@@ -470,22 +724,58 @@ llvm::StringRef tools::getLTOParallelism(const ArgList &Args, const Driver &D) {
return LtoJobsArg->getValue();
}
-// CloudABI uses -ffunction-sections and -fdata-sections by default.
+// PS4/PS5 uses -ffunction-sections and -fdata-sections by default.
bool tools::isUseSeparateSections(const llvm::Triple &Triple) {
- return Triple.getOS() == llvm::Triple::CloudABI;
+ return Triple.isPS();
+}
+
+bool tools::isTLSDESCEnabled(const ToolChain &TC,
+ const llvm::opt::ArgList &Args) {
+ const llvm::Triple &Triple = TC.getEffectiveTriple();
+ Arg *A = Args.getLastArg(options::OPT_mtls_dialect_EQ);
+ if (!A)
+ return Triple.hasDefaultTLSDESC();
+ StringRef V = A->getValue();
+ bool SupportedArgument = false, EnableTLSDESC = false;
+ bool Unsupported = !Triple.isOSBinFormatELF();
+ if (Triple.isRISCV()) {
+ SupportedArgument = V == "desc" || V == "trad";
+ EnableTLSDESC = V == "desc";
+ } else if (Triple.isX86()) {
+ SupportedArgument = V == "gnu";
+ } else {
+ Unsupported = true;
+ }
+ if (Unsupported) {
+ TC.getDriver().Diag(diag::err_drv_unsupported_opt_for_target)
+ << A->getSpelling() << Triple.getTriple();
+ } else if (!SupportedArgument) {
+ TC.getDriver().Diag(diag::err_drv_unsupported_option_argument_for_target)
+ << A->getSpelling() << V << Triple.getTriple();
+ }
+ return EnableTLSDESC;
}
void tools::addLTOOptions(const ToolChain &ToolChain, const ArgList &Args,
ArgStringList &CmdArgs, const InputInfo &Output,
const InputInfo &Input, bool IsThinLTO) {
+ const bool IsOSAIX = ToolChain.getTriple().isOSAIX();
+ const bool IsAMDGCN = ToolChain.getTriple().isAMDGCN();
const char *Linker = Args.MakeArgString(ToolChain.GetLinkerPath());
const Driver &D = ToolChain.getDriver();
+ const bool IsFatLTO = Args.hasArg(options::OPT_ffat_lto_objects);
+ const bool IsUnifiedLTO = Args.hasArg(options::OPT_funified_lto);
if (llvm::sys::path::filename(Linker) != "ld.lld" &&
- llvm::sys::path::stem(Linker) != "ld.lld") {
+ llvm::sys::path::stem(Linker) != "ld.lld" &&
+ !ToolChain.getTriple().isOSOpenBSD()) {
// Tell the linker to load the plugin. This has to come before
- // AddLinkerInputs as gold requires -plugin to come before any -plugin-opt
- // that -Wl might forward.
- CmdArgs.push_back("-plugin");
+ // AddLinkerInputs as gold requires -plugin and AIX ld requires -bplugin to
+ // come before any -plugin-opt/-bplugin_opt that -Wl might forward.
+ const char *PluginPrefix = IsOSAIX ? "-bplugin:" : "";
+ const char *PluginName = IsOSAIX ? "/libLTO" : "/LLVMgold";
+
+ if (!IsOSAIX)
+ CmdArgs.push_back("-plugin");
#if defined(_WIN32)
const char *Suffix = ".dll";
@@ -496,19 +786,60 @@ void tools::addLTOOptions(const ToolChain &ToolChain, const ArgList &Args,
#endif
SmallString<1024> Plugin;
- llvm::sys::path::native(
- Twine(D.Dir) + "/../lib" CLANG_LIBDIR_SUFFIX "/LLVMgold" + Suffix,
- Plugin);
- CmdArgs.push_back(Args.MakeArgString(Plugin));
+ llvm::sys::path::native(Twine(D.Dir) +
+ "/../" CLANG_INSTALL_LIBDIR_BASENAME +
+ PluginName + Suffix,
+ Plugin);
+ CmdArgs.push_back(Args.MakeArgString(Twine(PluginPrefix) + Plugin));
+ } else {
+ // Tell LLD to find and use .llvm.lto section in regular relocatable object
+ // files
+ if (IsFatLTO)
+ CmdArgs.push_back("--fat-lto-objects");
+ }
+
+ const char *PluginOptPrefix = IsOSAIX ? "-bplugin_opt:" : "-plugin-opt=";
+ const char *ExtraDash = IsOSAIX ? "-" : "";
+ const char *ParallelismOpt = IsOSAIX ? "-threads=" : "jobs=";
+
+ // Note, this solution is far from perfect, better to encode it into IR
+ // metadata, but this may not be worth it, since it looks like aranges is on
+ // the way out.
+ if (Args.hasArg(options::OPT_gdwarf_aranges)) {
+ CmdArgs.push_back(Args.MakeArgString(Twine(PluginOptPrefix) +
+ "-generate-arange-section"));
+ }
+
+ // Pass vector library arguments to LTO.
+ Arg *ArgVecLib = Args.getLastArg(options::OPT_fveclib);
+ if (ArgVecLib && ArgVecLib->getNumValues() == 1) {
+ // Map the vector library names from clang front-end to opt front-end. The
+ // values are taken from the TargetLibraryInfo class command line options.
+ std::optional<StringRef> OptVal =
+ llvm::StringSwitch<std::optional<StringRef>>(ArgVecLib->getValue())
+ .Case("Accelerate", "Accelerate")
+ .Case("LIBMVEC", "LIBMVEC-X86")
+ .Case("MASSV", "MASSV")
+ .Case("SVML", "SVML")
+ .Case("SLEEF", "sleefgnuabi")
+ .Case("Darwin_libsystem_m", "Darwin_libsystem_m")
+ .Case("ArmPL", "ArmPL")
+ .Case("none", "none")
+ .Default(std::nullopt);
+
+ if (OptVal)
+ CmdArgs.push_back(Args.MakeArgString(
+ Twine(PluginOptPrefix) + "-vector-library=" + OptVal.value()));
}
// Try to pass driver level flags relevant to LTO code generation down to
// the plugin.
// Handle flags for selecting CPU variants.
- std::string CPU = getCPUName(Args, ToolChain.getTriple());
+ std::string CPU = getCPUName(D, Args, ToolChain.getTriple());
if (!CPU.empty())
- CmdArgs.push_back(Args.MakeArgString(Twine("-plugin-opt=mcpu=") + CPU));
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine(PluginOptPrefix) + ExtraDash + "mcpu=" + CPU));
if (Arg *A = Args.getLastArg(options::OPT_O_Group)) {
// The optimization level matches
@@ -525,48 +856,140 @@ void tools::addLTOOptions(const ToolChain &ToolChain, const ArgList &Args,
OOpt = "2";
} else if (A->getOption().matches(options::OPT_O0))
OOpt = "0";
- if (!OOpt.empty())
- CmdArgs.push_back(Args.MakeArgString(Twine("-plugin-opt=O") + OOpt));
+ if (!OOpt.empty()) {
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine(PluginOptPrefix) + ExtraDash + "O" + OOpt));
+ if (IsAMDGCN)
+ CmdArgs.push_back(Args.MakeArgString(Twine("--lto-CGO") + OOpt));
+ }
}
- if (Args.hasArg(options::OPT_gsplit_dwarf)) {
+ if (Args.hasArg(options::OPT_gsplit_dwarf))
+ CmdArgs.push_back(Args.MakeArgString(
+ Twine(PluginOptPrefix) + "dwo_dir=" + Output.getFilename() + "_dwo"));
+
+ if (IsThinLTO && !IsOSAIX)
+ CmdArgs.push_back(Args.MakeArgString(Twine(PluginOptPrefix) + "thinlto"));
+ else if (IsThinLTO && IsOSAIX)
+ CmdArgs.push_back(Args.MakeArgString(Twine("-bdbg:thinlto")));
+
+ // Matrix intrinsic lowering happens at link time with ThinLTO. Enable
+ // LowerMatrixIntrinsicsPass, which is transitively called by
+ // buildThinLTODefaultPipeline under EnableMatrix.
+ if ((IsThinLTO || IsFatLTO || IsUnifiedLTO) &&
+ Args.hasArg(options::OPT_fenable_matrix))
CmdArgs.push_back(
- Args.MakeArgString(Twine("-plugin-opt=dwo_dir=") +
- Output.getFilename() + "_dwo"));
- }
-
- if (IsThinLTO)
- CmdArgs.push_back("-plugin-opt=thinlto");
+ Args.MakeArgString(Twine(PluginOptPrefix) + "-enable-matrix"));
StringRef Parallelism = getLTOParallelism(Args, D);
if (!Parallelism.empty())
- CmdArgs.push_back(
- Args.MakeArgString("-plugin-opt=jobs=" + Twine(Parallelism)));
+ CmdArgs.push_back(Args.MakeArgString(Twine(PluginOptPrefix) +
+ ParallelismOpt + Parallelism));
+
+ // Pass down GlobalISel options.
+ if (Arg *A = Args.getLastArg(options::OPT_fglobal_isel,
+ options::OPT_fno_global_isel)) {
+ // Parsing -fno-global-isel explicitly gives architectures that enable GISel
+ // by default a chance to disable it.
+ CmdArgs.push_back(Args.MakeArgString(
+ Twine(PluginOptPrefix) + "-global-isel=" +
+ (A->getOption().matches(options::OPT_fglobal_isel) ? "1" : "0")));
+ }
// If an explicit debugger tuning argument appeared, pass it along.
- if (Arg *A = Args.getLastArg(options::OPT_gTune_Group,
- options::OPT_ggdbN_Group)) {
+ if (Arg *A =
+ Args.getLastArg(options::OPT_gTune_Group, options::OPT_ggdbN_Group)) {
if (A->getOption().matches(options::OPT_glldb))
- CmdArgs.push_back("-plugin-opt=-debugger-tune=lldb");
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine(PluginOptPrefix) + "-debugger-tune=lldb"));
else if (A->getOption().matches(options::OPT_gsce))
- CmdArgs.push_back("-plugin-opt=-debugger-tune=sce");
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine(PluginOptPrefix) + "-debugger-tune=sce"));
else if (A->getOption().matches(options::OPT_gdbx))
- CmdArgs.push_back("-plugin-opt=-debugger-tune=dbx");
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine(PluginOptPrefix) + "-debugger-tune=dbx"));
else
- CmdArgs.push_back("-plugin-opt=-debugger-tune=gdb");
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine(PluginOptPrefix) + "-debugger-tune=gdb"));
+ }
+
+ if (IsOSAIX) {
+ if (!ToolChain.useIntegratedAs())
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine(PluginOptPrefix) + "-no-integrated-as=1"));
+
+ // On AIX, clang assumes strict-dwarf is true if any debug option is
+ // specified, unless it is told explicitly not to assume so.
+ Arg *A = Args.getLastArg(options::OPT_g_Group);
+ bool EnableDebugInfo = A && !A->getOption().matches(options::OPT_g0) &&
+ !A->getOption().matches(options::OPT_ggdb0);
+ if (EnableDebugInfo && Args.hasFlag(options::OPT_gstrict_dwarf,
+ options::OPT_gno_strict_dwarf, true))
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine(PluginOptPrefix) + "-strict-dwarf=true"));
+
+ for (const Arg *A : Args.filtered_reverse(options::OPT_mabi_EQ)) {
+ StringRef V = A->getValue();
+ if (V == "vec-default")
+ break;
+ if (V == "vec-extabi") {
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine(PluginOptPrefix) + "-vec-extabi"));
+ break;
+ }
+ }
}
bool UseSeparateSections =
isUseSeparateSections(ToolChain.getEffectiveTriple());
if (Args.hasFlag(options::OPT_ffunction_sections,
- options::OPT_fno_function_sections, UseSeparateSections)) {
- CmdArgs.push_back("-plugin-opt=-function-sections");
- }
+ options::OPT_fno_function_sections, UseSeparateSections))
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine(PluginOptPrefix) + "-function-sections=1"));
+ else if (Args.hasArg(options::OPT_fno_function_sections))
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine(PluginOptPrefix) + "-function-sections=0"));
+ bool DataSectionsTurnedOff = false;
if (Args.hasFlag(options::OPT_fdata_sections, options::OPT_fno_data_sections,
UseSeparateSections)) {
- CmdArgs.push_back("-plugin-opt=-data-sections");
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine(PluginOptPrefix) + "-data-sections=1"));
+ } else if (Args.hasArg(options::OPT_fno_data_sections)) {
+ DataSectionsTurnedOff = true;
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine(PluginOptPrefix) + "-data-sections=0"));
+ }
+
+ if (Args.hasArg(options::OPT_mxcoff_roptr) ||
+ Args.hasArg(options::OPT_mno_xcoff_roptr)) {
+ bool HasRoptr = Args.hasFlag(options::OPT_mxcoff_roptr,
+ options::OPT_mno_xcoff_roptr, false);
+ StringRef OptStr = HasRoptr ? "-mxcoff-roptr" : "-mno-xcoff-roptr";
+
+ if (!IsOSAIX)
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << OptStr << ToolChain.getTriple().str();
+
+ if (HasRoptr) {
+ // The data sections option is on by default on AIX. We only need to error
+ // out when -fno-data-sections is specified explicitly to turn off data
+ // sections.
+ if (DataSectionsTurnedOff)
+ D.Diag(diag::err_roptr_requires_data_sections);
+
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine(PluginOptPrefix) + "-mxcoff-roptr"));
+ }
+ }
+
+ // Pass an option to enable split machine functions.
+ if (auto *A = Args.getLastArg(options::OPT_fsplit_machine_functions,
+ options::OPT_fno_split_machine_functions)) {
+ if (A->getOption().matches(options::OPT_fsplit_machine_functions))
+ CmdArgs.push_back(Args.MakeArgString(Twine(PluginOptPrefix) +
+ "-split-machine-functions"));
}
if (Arg *A = getLastProfileSampleUseArg(Args)) {
@@ -574,90 +997,126 @@ void tools::addLTOOptions(const ToolChain &ToolChain, const ArgList &Args,
if (!llvm::sys::fs::exists(FName))
D.Diag(diag::err_drv_no_such_file) << FName;
else
- CmdArgs.push_back(
- Args.MakeArgString(Twine("-plugin-opt=sample-profile=") + FName));
+ CmdArgs.push_back(Args.MakeArgString(Twine(PluginOptPrefix) +
+ "sample-profile=" + FName));
}
- auto *CSPGOGenerateArg = Args.getLastArg(options::OPT_fcs_profile_generate,
- options::OPT_fcs_profile_generate_EQ,
- options::OPT_fno_profile_generate);
- if (CSPGOGenerateArg &&
- CSPGOGenerateArg->getOption().matches(options::OPT_fno_profile_generate))
- CSPGOGenerateArg = nullptr;
-
- auto *ProfileUseArg = getLastProfileUseArg(Args);
-
- if (CSPGOGenerateArg) {
- CmdArgs.push_back(Args.MakeArgString("-plugin-opt=cs-profile-generate"));
+ if (auto *CSPGOGenerateArg = getLastCSProfileGenerateArg(Args)) {
+ CmdArgs.push_back(Args.MakeArgString(Twine(PluginOptPrefix) + ExtraDash +
+ "cs-profile-generate"));
if (CSPGOGenerateArg->getOption().matches(
options::OPT_fcs_profile_generate_EQ)) {
SmallString<128> Path(CSPGOGenerateArg->getValue());
llvm::sys::path::append(Path, "default_%m.profraw");
- CmdArgs.push_back(
- Args.MakeArgString(Twine("-plugin-opt=cs-profile-path=") + Path));
+ CmdArgs.push_back(Args.MakeArgString(Twine(PluginOptPrefix) + ExtraDash +
+ "cs-profile-path=" + Path));
} else
CmdArgs.push_back(
- Args.MakeArgString("-plugin-opt=cs-profile-path=default_%m.profraw"));
- } else if (ProfileUseArg) {
+ Args.MakeArgString(Twine(PluginOptPrefix) + ExtraDash +
+ "cs-profile-path=default_%m.profraw"));
+ } else if (auto *ProfileUseArg = getLastProfileUseArg(Args)) {
SmallString<128> Path(
ProfileUseArg->getNumValues() == 0 ? "" : ProfileUseArg->getValue());
if (Path.empty() || llvm::sys::fs::is_directory(Path))
llvm::sys::path::append(Path, "default.profdata");
- CmdArgs.push_back(Args.MakeArgString(Twine("-plugin-opt=cs-profile-path=") +
- Path));
+ CmdArgs.push_back(Args.MakeArgString(Twine(PluginOptPrefix) + ExtraDash +
+ "cs-profile-path=" + Path));
}
- // Pass an option to enable/disable the new pass manager.
- if (auto *A = Args.getLastArg(options::OPT_flegacy_pass_manager,
- options::OPT_fno_legacy_pass_manager)) {
- if (A->getOption().matches(options::OPT_flegacy_pass_manager))
- CmdArgs.push_back("-plugin-opt=legacy-pass-manager");
+ // This controls whether or not we perform JustMyCode instrumentation.
+ if (Args.hasFlag(options::OPT_fjmc, options::OPT_fno_jmc, false)) {
+ if (ToolChain.getEffectiveTriple().isOSBinFormatELF())
+ CmdArgs.push_back(Args.MakeArgString(Twine(PluginOptPrefix) +
+ "-enable-jmc-instrument"));
else
- CmdArgs.push_back("-plugin-opt=new-pass-manager");
+ D.Diag(clang::diag::warn_drv_fjmc_for_elf_only);
+ }
+
+ if (Args.hasFlag(options::OPT_femulated_tls, options::OPT_fno_emulated_tls,
+ ToolChain.getTriple().hasDefaultEmulatedTLS())) {
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine(PluginOptPrefix) + "-emulated-tls"));
}
+ if (isTLSDESCEnabled(ToolChain, Args))
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine(PluginOptPrefix) + "-enable-tlsdesc"));
- // Pass an option to enable pseudo probe emission.
- if (Args.hasFlag(options::OPT_fpseudo_probe_for_profiling,
- options::OPT_fno_pseudo_probe_for_profiling, false))
- CmdArgs.push_back("-plugin-opt=pseudo-probe-for-profiling");
+ if (Args.hasFlag(options::OPT_fstack_size_section,
+ options::OPT_fno_stack_size_section, false))
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine(PluginOptPrefix) + "-stack-size-section"));
// Setup statistics file output.
SmallString<128> StatsFile = getStatsFileName(Args, Output, Input, D);
if (!StatsFile.empty())
CmdArgs.push_back(
- Args.MakeArgString(Twine("-plugin-opt=stats-file=") + StatsFile));
+ Args.MakeArgString(Twine(PluginOptPrefix) + "stats-file=" + StatsFile));
+
+ // Setup crash diagnostics dir.
+ if (Arg *A = Args.getLastArg(options::OPT_fcrash_diagnostics_dir))
+ CmdArgs.push_back(Args.MakeArgString(
+ Twine(PluginOptPrefix) + "-crash-diagnostics-dir=" + A->getValue()));
- addX86AlignBranchArgs(D, Args, CmdArgs, /*IsLTO=*/true);
+ addX86AlignBranchArgs(D, Args, CmdArgs, /*IsLTO=*/true, PluginOptPrefix);
// Handle remark diagnostics on screen options: '-Rpass-*'.
- renderRpassOptions(Args, CmdArgs);
+ renderRpassOptions(Args, CmdArgs, PluginOptPrefix);
// Handle serialized remarks options: '-fsave-optimization-record'
// and '-foptimization-record-*'.
if (willEmitRemarks(Args))
renderRemarksOptions(Args, CmdArgs, ToolChain.getEffectiveTriple(), Input,
- Output);
+ Output, PluginOptPrefix);
// Handle remarks hotness/threshold related options.
- renderRemarksHotnessOptions(Args, CmdArgs);
+ renderRemarksHotnessOptions(Args, CmdArgs, PluginOptPrefix);
addMachineOutlinerArgs(D, Args, CmdArgs, ToolChain.getEffectiveTriple(),
- /*IsLTO=*/true);
+ /*IsLTO=*/true, PluginOptPrefix);
+}
+
+/// Adds the '-lcgpu' and '-lmgpu' libraries to the compilation to include the
+/// LLVM C library for GPUs.
+static void addOpenMPDeviceLibC(const ToolChain &TC, const ArgList &Args,
+ ArgStringList &CmdArgs) {
+ if (Args.hasArg(options::OPT_nogpulib) || Args.hasArg(options::OPT_nolibc))
+ return;
+
+ // Check the resource directory for the LLVM libc GPU declarations. If it's
+ // found we can assume that LLVM was built with support for the GPU libc.
+ SmallString<256> LibCDecls(TC.getDriver().ResourceDir);
+ llvm::sys::path::append(LibCDecls, "include", "llvm_libc_wrappers",
+ "llvm-libc-decls");
+ bool HasLibC = llvm::sys::fs::exists(LibCDecls) &&
+ llvm::sys::fs::is_directory(LibCDecls);
+ if (Args.hasFlag(options::OPT_gpulibc, options::OPT_nogpulibc, HasLibC)) {
+ CmdArgs.push_back("-lcgpu");
+ CmdArgs.push_back("-lmgpu");
+ }
+}
+
+void tools::addOpenMPRuntimeLibraryPath(const ToolChain &TC,
+ const ArgList &Args,
+ ArgStringList &CmdArgs) {
+ // Default to clang lib / lib64 folder, i.e. the same location as device
+ // runtime.
+ SmallString<256> DefaultLibPath =
+ llvm::sys::path::parent_path(TC.getDriver().Dir);
+ llvm::sys::path::append(DefaultLibPath, CLANG_INSTALL_LIBDIR_BASENAME);
+ CmdArgs.push_back(Args.MakeArgString("-L" + DefaultLibPath));
}
void tools::addArchSpecificRPath(const ToolChain &TC, const ArgList &Args,
ArgStringList &CmdArgs) {
- // Enable -frtlib-add-rpath by default for the case of VE.
- const bool IsVE = TC.getTriple().isVE();
- bool DefaultValue = IsVE;
if (!Args.hasFlag(options::OPT_frtlib_add_rpath,
- options::OPT_fno_rtlib_add_rpath, DefaultValue))
+ options::OPT_fno_rtlib_add_rpath, false))
return;
- std::string CandidateRPath = TC.getArchSpecificLibPath();
- if (TC.getVFS().exists(CandidateRPath)) {
- CmdArgs.push_back("-rpath");
- CmdArgs.push_back(Args.MakeArgString(CandidateRPath.c_str()));
+ for (const auto &CandidateRPath : TC.getArchSpecificLibPaths()) {
+ if (TC.getVFS().exists(CandidateRPath)) {
+ CmdArgs.push_back("-rpath");
+ CmdArgs.push_back(Args.MakeArgString(CandidateRPath));
+ }
}
}
@@ -700,11 +1159,155 @@ bool tools::addOpenMPRuntime(ArgStringList &CmdArgs, const ToolChain &TC,
if (IsOffloadingHost)
CmdArgs.push_back("-lomptarget");
+ if (IsOffloadingHost && !Args.hasArg(options::OPT_nogpulib))
+ CmdArgs.push_back("-lomptarget.devicertl");
+
+ if (IsOffloadingHost)
+ addOpenMPDeviceLibC(TC, Args, CmdArgs);
+
addArchSpecificRPath(TC, Args, CmdArgs);
+ addOpenMPRuntimeLibraryPath(TC, Args, CmdArgs);
return true;
}
+/// Determines if --whole-archive is active in the list of arguments.
+static bool isWholeArchivePresent(const ArgList &Args) {
+ bool WholeArchiveActive = false;
+ for (auto *Arg : Args.filtered(options::OPT_Wl_COMMA)) {
+ if (Arg) {
+ for (StringRef ArgValue : Arg->getValues()) {
+ if (ArgValue == "--whole-archive")
+ WholeArchiveActive = true;
+ if (ArgValue == "--no-whole-archive")
+ WholeArchiveActive = false;
+ }
+ }
+ }
+
+ return WholeArchiveActive;
+}
+
+/// Determine if driver is invoked to create a shared object library (-static)
+static bool isSharedLinkage(const ArgList &Args) {
+ return Args.hasArg(options::OPT_shared);
+}
+
+/// Determine if driver is invoked to create a static object library (-shared)
+static bool isStaticLinkage(const ArgList &Args) {
+ return Args.hasArg(options::OPT_static);
+}
+
+/// Add Fortran runtime libs for MSVC
+static void addFortranRuntimeLibsMSVC(const ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) {
+ unsigned RTOptionID = options::OPT__SLASH_MT;
+ if (auto *rtl = Args.getLastArg(options::OPT_fms_runtime_lib_EQ)) {
+ RTOptionID = llvm::StringSwitch<unsigned>(rtl->getValue())
+ .Case("static", options::OPT__SLASH_MT)
+ .Case("static_dbg", options::OPT__SLASH_MTd)
+ .Case("dll", options::OPT__SLASH_MD)
+ .Case("dll_dbg", options::OPT__SLASH_MDd)
+ .Default(options::OPT__SLASH_MT);
+ }
+ switch (RTOptionID) {
+ case options::OPT__SLASH_MT:
+ CmdArgs.push_back("/WHOLEARCHIVE:Fortran_main.static.lib");
+ break;
+ case options::OPT__SLASH_MTd:
+ CmdArgs.push_back("/WHOLEARCHIVE:Fortran_main.static_dbg.lib");
+ break;
+ case options::OPT__SLASH_MD:
+ CmdArgs.push_back("/WHOLEARCHIVE:Fortran_main.dynamic.lib");
+ break;
+ case options::OPT__SLASH_MDd:
+ CmdArgs.push_back("/WHOLEARCHIVE:Fortran_main.dynamic_dbg.lib");
+ break;
+ }
+}
+
+// Add FortranMain runtime lib
+static void addFortranMain(const ToolChain &TC, const ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) {
+ // 0. Shared-library linkage
+ // If we are attempting to link a library, we should not add
+ // -lFortran_main.a to the link line, as the `main` symbol is not
+ // required for a library and should also be provided by one of
+ // the translation units of the code that this shared library
+ // will be linked against eventually.
+ if (isSharedLinkage(Args) || isStaticLinkage(Args)) {
+ return;
+ }
+
+ // 1. MSVC
+ if (TC.getTriple().isKnownWindowsMSVCEnvironment()) {
+ addFortranRuntimeLibsMSVC(Args, CmdArgs);
+ return;
+ }
+
+ // 2. GNU and similar
+ const Driver &D = TC.getDriver();
+ const char *FortranMainLinkFlag = "-lFortran_main";
+
+ // Warn if the user added `-lFortran_main` - this library is an implementation
+ // detail of Flang and should be handled automaticaly by the driver.
+ for (const char *arg : CmdArgs) {
+ if (strncmp(arg, FortranMainLinkFlag, strlen(FortranMainLinkFlag)) == 0)
+ D.Diag(diag::warn_drv_deprecated_custom)
+ << FortranMainLinkFlag
+ << "see the Flang driver documentation for correct usage";
+ }
+
+ // The --whole-archive option needs to be part of the link line to make
+ // sure that the main() function from Fortran_main.a is pulled in by the
+ // linker. However, it shouldn't be used if it's already active.
+ // TODO: Find an equivalent of `--whole-archive` for Darwin and AIX.
+ if (!isWholeArchivePresent(Args) && !TC.getTriple().isMacOSX() &&
+ !TC.getTriple().isOSAIX()) {
+ CmdArgs.push_back("--whole-archive");
+ CmdArgs.push_back(FortranMainLinkFlag);
+ CmdArgs.push_back("--no-whole-archive");
+ return;
+ }
+
+ CmdArgs.push_back(FortranMainLinkFlag);
+}
+
+/// Add Fortran runtime libs
+void tools::addFortranRuntimeLibs(const ToolChain &TC, const ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) {
+ // 1. Link FortranMain
+ // FortranMain depends on FortranRuntime, so needs to be listed first. If
+ // -fno-fortran-main has been passed, skip linking Fortran_main.a
+ if (!Args.hasArg(options::OPT_no_fortran_main))
+ addFortranMain(TC, Args, CmdArgs);
+
+ // 2. Link FortranRuntime and FortranDecimal
+ // These are handled earlier on Windows by telling the frontend driver to
+ // add the correct libraries to link against as dependents in the object
+ // file.
+ if (!TC.getTriple().isKnownWindowsMSVCEnvironment()) {
+ CmdArgs.push_back("-lFortranRuntime");
+ CmdArgs.push_back("-lFortranDecimal");
+ }
+}
+
+void tools::addFortranRuntimeLibraryPath(const ToolChain &TC,
+ const llvm::opt::ArgList &Args,
+ ArgStringList &CmdArgs) {
+ // Default to the <driver-path>/../lib directory. This works fine on the
+ // platforms that we have tested so far. We will probably have to re-fine
+ // this in the future. In particular, on some platforms, we may need to use
+ // lib64 instead of lib.
+ SmallString<256> DefaultLibPath =
+ llvm::sys::path::parent_path(TC.getDriver().Dir);
+ llvm::sys::path::append(DefaultLibPath, "lib");
+ if (TC.getTriple().isKnownWindowsMSVCEnvironment())
+ CmdArgs.push_back(Args.MakeArgString("-libpath:" + DefaultLibPath));
+ else
+ CmdArgs.push_back(Args.MakeArgString("-L" + DefaultLibPath));
+}
+
static void addSanitizerRuntime(const ToolChain &TC, const ArgList &Args,
ArgStringList &CmdArgs, StringRef Sanitizer,
bool IsShared, bool IsWhole) {
@@ -725,9 +1328,11 @@ static void addSanitizerRuntime(const ToolChain &TC, const ArgList &Args,
static bool addSanitizerDynamicList(const ToolChain &TC, const ArgList &Args,
ArgStringList &CmdArgs,
StringRef Sanitizer) {
+ bool LinkerIsGnuLd = solaris::isLinkerGnuLd(TC, Args);
+
// Solaris ld defaults to --export-dynamic behaviour but doesn't support
// the option, so don't try to pass it.
- if (TC.getTriple().getOS() == llvm::Triple::Solaris)
+ if (TC.getTriple().isOSSolaris() && !LinkerIsGnuLd)
return true;
SmallString<128> SanRT(TC.getCompilerRT(Args, Sanitizer));
if (llvm::sys::fs::exists(SanRT + ".syms")) {
@@ -737,32 +1342,36 @@ static bool addSanitizerDynamicList(const ToolChain &TC, const ArgList &Args,
return false;
}
-static const char *getAsNeededOption(const ToolChain &TC, bool as_needed) {
+void tools::addAsNeededOption(const ToolChain &TC,
+ const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs,
+ bool as_needed) {
assert(!TC.getTriple().isOSAIX() &&
"AIX linker does not support any form of --as-needed option yet.");
+ bool LinkerIsGnuLd = solaris::isLinkerGnuLd(TC, Args);
// While the Solaris 11.2 ld added --as-needed/--no-as-needed as aliases
// for the native forms -z ignore/-z record, they are missing in Illumos,
// so always use the native form.
- if (TC.getTriple().isOSSolaris())
- return as_needed ? "-zignore" : "-zrecord";
- else
- return as_needed ? "--as-needed" : "--no-as-needed";
+ // GNU ld doesn't support -z ignore/-z record, so don't use them even on
+ // Solaris.
+ if (TC.getTriple().isOSSolaris() && !LinkerIsGnuLd) {
+ CmdArgs.push_back("-z");
+ CmdArgs.push_back(as_needed ? "ignore" : "record");
+ } else {
+ CmdArgs.push_back(as_needed ? "--as-needed" : "--no-as-needed");
+ }
}
void tools::linkSanitizerRuntimeDeps(const ToolChain &TC,
+ const llvm::opt::ArgList &Args,
ArgStringList &CmdArgs) {
- // Fuchsia never needs these. Any sanitizer runtimes with system
- // dependencies use the `.deplibs` feature instead.
- if (TC.getTriple().isOSFuchsia())
- return;
-
// Force linking against the system libraries sanitizers depends on
// (see PR15823 why this is necessary).
- CmdArgs.push_back(getAsNeededOption(TC, false));
+ addAsNeededOption(TC, Args, CmdArgs, false);
// There's no libpthread or librt on RTEMS & Android.
if (TC.getTriple().getOS() != llvm::Triple::RTEMS &&
- !TC.getTriple().isAndroid()) {
+ !TC.getTriple().isAndroid() && !TC.getTriple().isOHOSFamily()) {
CmdArgs.push_back("-lpthread");
if (!TC.getTriple().isOSOpenBSD())
CmdArgs.push_back("-lrt");
@@ -778,6 +1387,12 @@ void tools::linkSanitizerRuntimeDeps(const ToolChain &TC,
TC.getTriple().isOSNetBSD() ||
TC.getTriple().isOSOpenBSD())
CmdArgs.push_back("-lexecinfo");
+ // There is no libresolv on Android, FreeBSD, OpenBSD, etc. On musl
+ // libresolv.a, even if exists, is an empty archive to satisfy POSIX -lresolv
+ // requirement.
+ if (TC.getTriple().isOSLinux() && !TC.getTriple().isAndroid() &&
+ !TC.getTriple().isMusl())
+ CmdArgs.push_back("-lresolv");
}
static void
@@ -787,45 +1402,48 @@ collectSanitizerRuntimes(const ToolChain &TC, const ArgList &Args,
SmallVectorImpl<StringRef> &NonWholeStaticRuntimes,
SmallVectorImpl<StringRef> &HelperStaticRuntimes,
SmallVectorImpl<StringRef> &RequiredSymbols) {
- const SanitizerArgs &SanArgs = TC.getSanitizerArgs();
+ const SanitizerArgs &SanArgs = TC.getSanitizerArgs(Args);
// Collect shared runtimes.
if (SanArgs.needsSharedRt()) {
- if (SanArgs.needsAsanRt() && SanArgs.linkRuntimes()) {
+ if (SanArgs.needsAsanRt()) {
SharedRuntimes.push_back("asan");
if (!Args.hasArg(options::OPT_shared) && !TC.getTriple().isAndroid())
HelperStaticRuntimes.push_back("asan-preinit");
}
- if (SanArgs.needsMemProfRt() && SanArgs.linkRuntimes()) {
+ if (SanArgs.needsMemProfRt()) {
SharedRuntimes.push_back("memprof");
if (!Args.hasArg(options::OPT_shared) && !TC.getTriple().isAndroid())
HelperStaticRuntimes.push_back("memprof-preinit");
}
- if (SanArgs.needsUbsanRt() && SanArgs.linkRuntimes()) {
+ if (SanArgs.needsUbsanRt()) {
if (SanArgs.requiresMinimalRuntime())
SharedRuntimes.push_back("ubsan_minimal");
else
SharedRuntimes.push_back("ubsan_standalone");
}
- if (SanArgs.needsScudoRt() && SanArgs.linkRuntimes()) {
- if (SanArgs.requiresMinimalRuntime())
- SharedRuntimes.push_back("scudo_minimal");
- else
- SharedRuntimes.push_back("scudo");
+ if (SanArgs.needsScudoRt()) {
+ SharedRuntimes.push_back("scudo_standalone");
}
- if (SanArgs.needsTsanRt() && SanArgs.linkRuntimes())
+ if (SanArgs.needsTsanRt())
SharedRuntimes.push_back("tsan");
- if (SanArgs.needsHwasanRt() && SanArgs.linkRuntimes()) {
+ if (SanArgs.needsHwasanRt()) {
if (SanArgs.needsHwasanAliasesRt())
SharedRuntimes.push_back("hwasan_aliases");
else
SharedRuntimes.push_back("hwasan");
+ if (!Args.hasArg(options::OPT_shared))
+ HelperStaticRuntimes.push_back("hwasan-preinit");
}
}
// The stats_client library is also statically linked into DSOs.
- if (SanArgs.needsStatsRt() && SanArgs.linkRuntimes())
+ if (SanArgs.needsStatsRt())
StaticRuntimes.push_back("stats_client");
+ // Always link the static runtime regardless of DSO or executable.
+ if (SanArgs.needsAsanRt())
+ HelperStaticRuntimes.push_back("asan_static");
+
// Collect static runtimes.
if (Args.hasArg(options::OPT_shared)) {
// Don't link static runtimes into DSOs.
@@ -835,20 +1453,19 @@ collectSanitizerRuntimes(const ToolChain &TC, const ArgList &Args,
// Each static runtime that has a DSO counterpart above is excluded below,
// but runtimes that exist only as static are not affected by needsSharedRt.
- if (!SanArgs.needsSharedRt() && SanArgs.needsAsanRt() && SanArgs.linkRuntimes()) {
+ if (!SanArgs.needsSharedRt() && SanArgs.needsAsanRt()) {
StaticRuntimes.push_back("asan");
if (SanArgs.linkCXXRuntimes())
StaticRuntimes.push_back("asan_cxx");
}
- if (!SanArgs.needsSharedRt() && SanArgs.needsMemProfRt() &&
- SanArgs.linkRuntimes()) {
+ if (!SanArgs.needsSharedRt() && SanArgs.needsMemProfRt()) {
StaticRuntimes.push_back("memprof");
if (SanArgs.linkCXXRuntimes())
StaticRuntimes.push_back("memprof_cxx");
}
- if (!SanArgs.needsSharedRt() && SanArgs.needsHwasanRt() && SanArgs.linkRuntimes()) {
+ if (!SanArgs.needsSharedRt() && SanArgs.needsHwasanRt()) {
if (SanArgs.needsHwasanAliasesRt()) {
StaticRuntimes.push_back("hwasan_aliases");
if (SanArgs.linkCXXRuntimes())
@@ -859,22 +1476,21 @@ collectSanitizerRuntimes(const ToolChain &TC, const ArgList &Args,
StaticRuntimes.push_back("hwasan_cxx");
}
}
- if (SanArgs.needsDfsanRt() && SanArgs.linkRuntimes())
+ if (SanArgs.needsDfsanRt())
StaticRuntimes.push_back("dfsan");
- if (SanArgs.needsLsanRt() && SanArgs.linkRuntimes())
+ if (SanArgs.needsLsanRt())
StaticRuntimes.push_back("lsan");
- if (SanArgs.needsMsanRt() && SanArgs.linkRuntimes()) {
+ if (SanArgs.needsMsanRt()) {
StaticRuntimes.push_back("msan");
if (SanArgs.linkCXXRuntimes())
StaticRuntimes.push_back("msan_cxx");
}
- if (!SanArgs.needsSharedRt() && SanArgs.needsTsanRt() &&
- SanArgs.linkRuntimes()) {
+ if (!SanArgs.needsSharedRt() && SanArgs.needsTsanRt()) {
StaticRuntimes.push_back("tsan");
if (SanArgs.linkCXXRuntimes())
StaticRuntimes.push_back("tsan_cxx");
}
- if (!SanArgs.needsSharedRt() && SanArgs.needsUbsanRt() && SanArgs.linkRuntimes()) {
+ if (!SanArgs.needsSharedRt() && SanArgs.needsUbsanRt()) {
if (SanArgs.requiresMinimalRuntime()) {
StaticRuntimes.push_back("ubsan_minimal");
} else {
@@ -883,33 +1499,27 @@ collectSanitizerRuntimes(const ToolChain &TC, const ArgList &Args,
StaticRuntimes.push_back("ubsan_standalone_cxx");
}
}
- if (SanArgs.needsSafeStackRt() && SanArgs.linkRuntimes()) {
+ if (SanArgs.needsSafeStackRt()) {
NonWholeStaticRuntimes.push_back("safestack");
RequiredSymbols.push_back("__safestack_init");
}
- if (!(SanArgs.needsSharedRt() && SanArgs.needsUbsanRt() && SanArgs.linkRuntimes())) {
- if (SanArgs.needsCfiRt() && SanArgs.linkRuntimes())
+ if (!(SanArgs.needsSharedRt() && SanArgs.needsUbsanRt())) {
+ if (SanArgs.needsCfiRt())
StaticRuntimes.push_back("cfi");
- if (SanArgs.needsCfiDiagRt() && SanArgs.linkRuntimes()) {
+ if (SanArgs.needsCfiDiagRt()) {
StaticRuntimes.push_back("cfi_diag");
if (SanArgs.linkCXXRuntimes())
StaticRuntimes.push_back("ubsan_standalone_cxx");
}
}
- if (SanArgs.needsStatsRt() && SanArgs.linkRuntimes()) {
+ if (SanArgs.needsStatsRt()) {
NonWholeStaticRuntimes.push_back("stats");
RequiredSymbols.push_back("__sanitizer_stats_register");
}
- if (!SanArgs.needsSharedRt() && SanArgs.needsScudoRt() && SanArgs.linkRuntimes()) {
- if (SanArgs.requiresMinimalRuntime()) {
- StaticRuntimes.push_back("scudo_minimal");
- if (SanArgs.linkCXXRuntimes())
- StaticRuntimes.push_back("scudo_cxx_minimal");
- } else {
- StaticRuntimes.push_back("scudo");
- if (SanArgs.linkCXXRuntimes())
- StaticRuntimes.push_back("scudo_cxx");
- }
+ if (!SanArgs.needsSharedRt() && SanArgs.needsScudoRt()) {
+ StaticRuntimes.push_back("scudo_standalone");
+ if (SanArgs.linkCXXRuntimes())
+ StaticRuntimes.push_back("scudo_standalone_cxx");
}
}
@@ -917,13 +1527,15 @@ collectSanitizerRuntimes(const ToolChain &TC, const ArgList &Args,
// C runtime, etc). Returns true if sanitizer system deps need to be linked in.
bool tools::addSanitizerRuntimes(const ToolChain &TC, const ArgList &Args,
ArgStringList &CmdArgs) {
+ const SanitizerArgs &SanArgs = TC.getSanitizerArgs(Args);
SmallVector<StringRef, 4> SharedRuntimes, StaticRuntimes,
NonWholeStaticRuntimes, HelperStaticRuntimes, RequiredSymbols;
- collectSanitizerRuntimes(TC, Args, SharedRuntimes, StaticRuntimes,
- NonWholeStaticRuntimes, HelperStaticRuntimes,
- RequiredSymbols);
+ if (SanArgs.linkRuntimes()) {
+ collectSanitizerRuntimes(TC, Args, SharedRuntimes, StaticRuntimes,
+ NonWholeStaticRuntimes, HelperStaticRuntimes,
+ RequiredSymbols);
+ }
- const SanitizerArgs &SanArgs = TC.getSanitizerArgs();
// Inject libfuzzer dependencies.
if (SanArgs.needsFuzzer() && SanArgs.linkRuntimes() &&
!Args.hasArg(options::OPT_shared)) {
@@ -968,6 +1580,19 @@ bool tools::addSanitizerRuntimes(const ToolChain &TC, const ArgList &Args,
if (SanArgs.hasCrossDsoCfi() && !AddExportDynamic)
CmdArgs.push_back("--export-dynamic-symbol=__cfi_check");
+ if (SanArgs.hasMemTag()) {
+ if (!TC.getTriple().isAndroid()) {
+ TC.getDriver().Diag(diag::err_drv_unsupported_opt_for_target)
+ << "-fsanitize=memtag*" << TC.getTriple().str();
+ }
+ CmdArgs.push_back(
+ Args.MakeArgString("--android-memtag-mode=" + SanArgs.getMemtagMode()));
+ if (SanArgs.hasMemtagHeap())
+ CmdArgs.push_back("--android-memtag-heap");
+ if (SanArgs.hasMemtagStack())
+ CmdArgs.push_back("--android-memtag-stack");
+ }
+
return !StaticRuntimes.empty() || !NonWholeStaticRuntimes.empty();
}
@@ -976,19 +1601,21 @@ bool tools::addXRayRuntime(const ToolChain&TC, const ArgList &Args, ArgStringLis
return false;
if (TC.getXRayArgs().needsXRayRt()) {
- CmdArgs.push_back("-whole-archive");
+ CmdArgs.push_back("--whole-archive");
CmdArgs.push_back(TC.getCompilerRTArgString(Args, "xray"));
for (const auto &Mode : TC.getXRayArgs().modeList())
CmdArgs.push_back(TC.getCompilerRTArgString(Args, Mode));
- CmdArgs.push_back("-no-whole-archive");
+ CmdArgs.push_back("--no-whole-archive");
return true;
}
return false;
}
-void tools::linkXRayRuntimeDeps(const ToolChain &TC, ArgStringList &CmdArgs) {
- CmdArgs.push_back(getAsNeededOption(TC, false));
+void tools::linkXRayRuntimeDeps(const ToolChain &TC,
+ const llvm::opt::ArgList &Args,
+ ArgStringList &CmdArgs) {
+ addAsNeededOption(TC, Args, CmdArgs, false);
CmdArgs.push_back("-lpthread");
if (!TC.getTriple().isOSOpenBSD())
CmdArgs.push_back("-lrt");
@@ -1017,26 +1644,27 @@ const char *tools::SplitDebugName(const JobAction &JA, const ArgList &Args,
F += ".dwo";
};
if (Arg *A = Args.getLastArg(options::OPT_gsplit_dwarf_EQ))
- if (StringRef(A->getValue()) == "single")
+ if (StringRef(A->getValue()) == "single" && Output.isFilename())
return Args.MakeArgString(Output.getFilename());
- Arg *FinalOutput = Args.getLastArg(options::OPT_o);
- if (FinalOutput && Args.hasArg(options::OPT_c)) {
- SmallString<128> T(FinalOutput->getValue());
- llvm::sys::path::remove_filename(T);
- llvm::sys::path::append(T, llvm::sys::path::stem(FinalOutput->getValue()));
- AddPostfix(T);
- return Args.MakeArgString(T);
+ SmallString<128> T;
+ if (const Arg *A = Args.getLastArg(options::OPT_dumpdir)) {
+ T = A->getValue();
} else {
- // Use the compilation dir.
- Arg *A = Args.getLastArg(options::OPT_ffile_compilation_dir_EQ,
- options::OPT_fdebug_compilation_dir_EQ);
- SmallString<128> T(A ? A->getValue() : "");
- SmallString<128> F(llvm::sys::path::stem(Input.getBaseInput()));
- AddPostfix(F);
- T += F;
- return Args.MakeArgString(T);
+ Arg *FinalOutput = Args.getLastArg(options::OPT_o, options::OPT__SLASH_o);
+ if (FinalOutput && Args.hasArg(options::OPT_c)) {
+ T = FinalOutput->getValue();
+ llvm::sys::path::remove_filename(T);
+ llvm::sys::path::append(T,
+ llvm::sys::path::stem(FinalOutput->getValue()));
+ AddPostfix(T);
+ return Args.MakeArgString(T);
+ }
}
+
+ T += llvm::sys::path::stem(Input.getBaseInput());
+ AddPostfix(T);
+ return Args.MakeArgString(T);
}
void tools::SplitDebugInfo(const ToolChain &TC, Compilation &C, const Tool &T,
@@ -1078,6 +1706,17 @@ void tools::claimNoWarnArgs(const ArgList &Args) {
Args.ClaimAllArgs(options::OPT_fno_lto);
}
+Arg *tools::getLastCSProfileGenerateArg(const ArgList &Args) {
+ auto *CSPGOGenerateArg = Args.getLastArg(options::OPT_fcs_profile_generate,
+ options::OPT_fcs_profile_generate_EQ,
+ options::OPT_fno_profile_generate);
+ if (CSPGOGenerateArg &&
+ CSPGOGenerateArg->getOption().matches(options::OPT_fno_profile_generate))
+ CSPGOGenerateArg = nullptr;
+
+ return CSPGOGenerateArg;
+}
+
Arg *tools::getLastProfileUseArg(const ArgList &Args) {
auto *ProfileUseArg = Args.getLastArg(
options::OPT_fprofile_instr_use, options::OPT_fprofile_instr_use_EQ,
@@ -1107,6 +1746,24 @@ Arg *tools::getLastProfileSampleUseArg(const ArgList &Args) {
options::OPT_fauto_profile_EQ);
}
+const char *tools::RelocationModelName(llvm::Reloc::Model Model) {
+ switch (Model) {
+ case llvm::Reloc::Static:
+ return "static";
+ case llvm::Reloc::PIC_:
+ return "pic";
+ case llvm::Reloc::DynamicNoPIC:
+ return "dynamic-no-pic";
+ case llvm::Reloc::ROPI:
+ return "ropi";
+ case llvm::Reloc::RWPI:
+ return "rwpi";
+ case llvm::Reloc::ROPI_RWPI:
+ return "ropi-rwpi";
+ }
+ llvm_unreachable("Unknown Reloc::Model kind");
+}
+
/// Parses the various -fpic/-fPIC/-fpie/-fPIE arguments. Then,
/// smooshes them together with platform defaults, to decide whether
/// this compile should be using PIC mode or not. Returns a tuple of
@@ -1116,7 +1773,7 @@ tools::ParsePICArgs(const ToolChain &ToolChain, const ArgList &Args) {
const llvm::Triple &EffectiveTriple = ToolChain.getEffectiveTriple();
const llvm::Triple &Triple = ToolChain.getTriple();
- bool PIE = ToolChain.isPIEDefault();
+ bool PIE = ToolChain.isPIEDefault(Args);
bool PIC = PIE || ToolChain.isPICDefault();
// The Darwin/MachO default to use PIC does not apply when using -static.
if (Triple.isOSBinFormatMachO() && Args.hasArg(options::OPT_static))
@@ -1152,6 +1809,10 @@ tools::ParsePICArgs(const ToolChain &ToolChain, const ArgList &Args) {
}
}
+ // OHOS-specific defaults for PIC/PIE
+ if (Triple.isOHOSFamily() && Triple.getArch() == llvm::Triple::aarch64)
+ PIC = true;
+
// OpenBSD-specific defaults for PIE
if (Triple.isOSOpenBSD()) {
switch (ToolChain.getArch()) {
@@ -1174,10 +1835,6 @@ tools::ParsePICArgs(const ToolChain &ToolChain, const ArgList &Args) {
}
}
- // AMDGPU-specific defaults for PIC.
- if (Triple.getArch() == llvm::Triple::amdgcn)
- PIC = true;
-
// The last argument relating to either PIC or PIE wins, and no
// other argument is used. If the last argument is any flavor of the
// '-fno-...' arguments, both PIC and PIE are disabled. Any PIE
@@ -1186,10 +1843,9 @@ tools::ParsePICArgs(const ToolChain &ToolChain, const ArgList &Args) {
options::OPT_fpic, options::OPT_fno_pic,
options::OPT_fPIE, options::OPT_fno_PIE,
options::OPT_fpie, options::OPT_fno_pie);
- if (Triple.isOSWindows() && LastPICArg &&
- LastPICArg ==
- Args.getLastArg(options::OPT_fPIC, options::OPT_fpic,
- options::OPT_fPIE, options::OPT_fpie)) {
+ if (Triple.isOSWindows() && !Triple.isOSCygMing() && LastPICArg &&
+ LastPICArg == Args.getLastArg(options::OPT_fPIC, options::OPT_fpic,
+ options::OPT_fPIE, options::OPT_fpie)) {
ToolChain.getDriver().Diag(diag::err_drv_unsupported_opt_for_target)
<< LastPICArg->getSpelling() << Triple.str();
if (Triple.getArch() == llvm::Triple::x86_64)
@@ -1211,30 +1867,31 @@ tools::ParsePICArgs(const ToolChain &ToolChain, const ArgList &Args) {
O.matches(options::OPT_fPIE) || O.matches(options::OPT_fPIC);
} else {
PIE = PIC = false;
- if (EffectiveTriple.isPS4CPU()) {
+ if (EffectiveTriple.isPS()) {
Arg *ModelArg = Args.getLastArg(options::OPT_mcmodel_EQ);
StringRef Model = ModelArg ? ModelArg->getValue() : "";
if (Model != "kernel") {
PIC = true;
- ToolChain.getDriver().Diag(diag::warn_drv_ps4_force_pic)
- << LastPICArg->getSpelling();
+ ToolChain.getDriver().Diag(diag::warn_drv_ps_force_pic)
+ << LastPICArg->getSpelling()
+ << (EffectiveTriple.isPS4() ? "PS4" : "PS5");
}
}
}
}
}
- // Introduce a Darwin and PS4-specific hack. If the default is PIC, but the
- // PIC level would've been set to level 1, force it back to level 2 PIC
+ // Introduce a Darwin and PS4/PS5-specific hack. If the default is PIC, but
+ // the PIC level would've been set to level 1, force it back to level 2 PIC
// instead.
- if (PIC && (Triple.isOSDarwin() || EffectiveTriple.isPS4CPU()))
+ if (PIC && (Triple.isOSDarwin() || EffectiveTriple.isPS()))
IsPICLevelTwo |= ToolChain.isPICDefault();
// This kernel flags are a trump-card: they will disable PIC/PIE
// generation, independent of the argument order.
if (KernelOrKext &&
((!EffectiveTriple.isiOS() || EffectiveTriple.isOSVersionLT(6)) &&
- !EffectiveTriple.isWatchOS()))
+ !EffectiveTriple.isWatchOS() && !EffectiveTriple.isDriverKit()))
PIC = PIE = false;
if (Arg *A = Args.getLastArg(options::OPT_mdynamic_no_pic)) {
@@ -1352,7 +2009,49 @@ unsigned tools::ParseFunctionAlignment(const ToolChain &TC,
return Value ? llvm::Log2_32_Ceil(std::min(Value, 65536u)) : Value;
}
-unsigned tools::ParseDebugDefaultVersion(const ToolChain &TC,
+void tools::addDebugInfoKind(
+ ArgStringList &CmdArgs, llvm::codegenoptions::DebugInfoKind DebugInfoKind) {
+ switch (DebugInfoKind) {
+ case llvm::codegenoptions::DebugDirectivesOnly:
+ CmdArgs.push_back("-debug-info-kind=line-directives-only");
+ break;
+ case llvm::codegenoptions::DebugLineTablesOnly:
+ CmdArgs.push_back("-debug-info-kind=line-tables-only");
+ break;
+ case llvm::codegenoptions::DebugInfoConstructor:
+ CmdArgs.push_back("-debug-info-kind=constructor");
+ break;
+ case llvm::codegenoptions::LimitedDebugInfo:
+ CmdArgs.push_back("-debug-info-kind=limited");
+ break;
+ case llvm::codegenoptions::FullDebugInfo:
+ CmdArgs.push_back("-debug-info-kind=standalone");
+ break;
+ case llvm::codegenoptions::UnusedTypeInfo:
+ CmdArgs.push_back("-debug-info-kind=unused-types");
+ break;
+ default:
+ break;
+ }
+}
+
+// Convert an arg of the form "-gN" or "-ggdbN" or one of their aliases
+// to the corresponding DebugInfoKind.
+llvm::codegenoptions::DebugInfoKind tools::debugLevelToInfoKind(const Arg &A) {
+ assert(A.getOption().matches(options::OPT_gN_Group) &&
+ "Not a -g option that specifies a debug-info level");
+ if (A.getOption().matches(options::OPT_g0) ||
+ A.getOption().matches(options::OPT_ggdb0))
+ return llvm::codegenoptions::NoDebugInfo;
+ if (A.getOption().matches(options::OPT_gline_tables_only) ||
+ A.getOption().matches(options::OPT_ggdb1))
+ return llvm::codegenoptions::DebugLineTablesOnly;
+ if (A.getOption().matches(options::OPT_gline_directives_only))
+ return llvm::codegenoptions::DebugDirectivesOnly;
+ return llvm::codegenoptions::DebugInfoConstructor;
+}
+
+static unsigned ParseDebugDefaultVersion(const ToolChain &TC,
const ArgList &Args) {
const Arg *A = Args.getLastArg(options::OPT_fdebug_default_version);
@@ -1367,6 +2066,34 @@ unsigned tools::ParseDebugDefaultVersion(const ToolChain &TC,
return Value;
}
+unsigned tools::DwarfVersionNum(StringRef ArgValue) {
+ return llvm::StringSwitch<unsigned>(ArgValue)
+ .Case("-gdwarf-2", 2)
+ .Case("-gdwarf-3", 3)
+ .Case("-gdwarf-4", 4)
+ .Case("-gdwarf-5", 5)
+ .Default(0);
+}
+
+const Arg *tools::getDwarfNArg(const ArgList &Args) {
+ return Args.getLastArg(options::OPT_gdwarf_2, options::OPT_gdwarf_3,
+ options::OPT_gdwarf_4, options::OPT_gdwarf_5,
+ options::OPT_gdwarf);
+}
+
+unsigned tools::getDwarfVersion(const ToolChain &TC,
+ const llvm::opt::ArgList &Args) {
+ unsigned DwarfVersion = ParseDebugDefaultVersion(TC, Args);
+ if (const Arg *GDwarfN = getDwarfNArg(Args))
+ if (int N = DwarfVersionNum(GDwarfN->getSpelling()))
+ DwarfVersion = N;
+ if (DwarfVersion == 0) {
+ DwarfVersion = TC.GetDefaultDwarfVersion();
+ assert(DwarfVersion && "toolchain default DWARF version must be nonzero");
+ }
+ return DwarfVersion;
+}
+
void tools::AddAssemblerKPIC(const ToolChain &ToolChain, const ArgList &Args,
ArgStringList &CmdArgs) {
llvm::Reloc::Model RelocationModel;
@@ -1389,17 +2116,12 @@ enum class LibGccType { UnspecifiedLibGcc, StaticLibGcc, SharedLibGcc };
static LibGccType getLibGccType(const ToolChain &TC, const Driver &D,
const ArgList &Args) {
if (Args.hasArg(options::OPT_static_libgcc) ||
- Args.hasArg(options::OPT_static) || Args.hasArg(options::OPT_static_pie))
+ Args.hasArg(options::OPT_static) || Args.hasArg(options::OPT_static_pie) ||
+ // The Android NDK only provides libunwind.a, not libunwind.so.
+ TC.getTriple().isAndroid())
return LibGccType::StaticLibGcc;
if (Args.hasArg(options::OPT_shared_libgcc))
return LibGccType::SharedLibGcc;
- // The Android NDK only provides libunwind.a, not libunwind.so.
- if (TC.getTriple().isAndroid())
- return LibGccType::StaticLibGcc;
- // For MinGW, don't imply a shared libgcc here, we only want to return
- // SharedLibGcc if that was explicitly requested.
- if (D.CCCIsCXX() && !TC.getTriple().isOSCygMing())
- return LibGccType::SharedLibGcc;
return LibGccType::UnspecifiedLibGcc;
}
@@ -1419,18 +2141,25 @@ static LibGccType getLibGccType(const ToolChain &TC, const Driver &D,
static void AddUnwindLibrary(const ToolChain &TC, const Driver &D,
ArgStringList &CmdArgs, const ArgList &Args) {
ToolChain::UnwindLibType UNW = TC.GetUnwindLibType(Args);
+ // By default OHOS binaries are linked statically to libunwind.
+ if (TC.getTriple().isOHOSFamily() && UNW == ToolChain::UNW_CompilerRT) {
+ CmdArgs.push_back("-l:libunwind.a");
+ return;
+ }
+
// Targets that don't use unwind libraries.
if ((TC.getTriple().isAndroid() && UNW == ToolChain::UNW_Libgcc) ||
TC.getTriple().isOSIAMCU() || TC.getTriple().isOSBinFormatWasm() ||
- UNW == ToolChain::UNW_None)
+ TC.getTriple().isWindowsMSVCEnvironment() || UNW == ToolChain::UNW_None)
return;
LibGccType LGT = getLibGccType(TC, D, Args);
bool AsNeeded = LGT == LibGccType::UnspecifiedLibGcc &&
+ (UNW == ToolChain::UNW_CompilerRT || !D.CCCIsCXX()) &&
!TC.getTriple().isAndroid() &&
!TC.getTriple().isOSCygMing() && !TC.getTriple().isOSAIX();
if (AsNeeded)
- CmdArgs.push_back(getAsNeededOption(TC, true));
+ addAsNeededOption(TC, Args, CmdArgs, true);
switch (UNW) {
case ToolChain::UNW_None:
@@ -1450,30 +2179,32 @@ static void AddUnwindLibrary(const ToolChain &TC, const Driver &D,
CmdArgs.push_back("-lunwind");
} else if (LGT == LibGccType::StaticLibGcc) {
CmdArgs.push_back("-l:libunwind.a");
- } else if (TC.getTriple().isOSCygMing()) {
- if (LGT == LibGccType::SharedLibGcc)
+ } else if (LGT == LibGccType::SharedLibGcc) {
+ if (TC.getTriple().isOSCygMing())
CmdArgs.push_back("-l:libunwind.dll.a");
else
- // Let the linker choose between libunwind.dll.a and libunwind.a
- // depending on what's available, and depending on the -static flag
- CmdArgs.push_back("-lunwind");
+ CmdArgs.push_back("-l:libunwind.so");
} else {
- CmdArgs.push_back("-l:libunwind.so");
+ // Let the linker choose between libunwind.so and libunwind.a
+ // depending on what's available, and depending on the -static flag
+ CmdArgs.push_back("-lunwind");
}
break;
}
if (AsNeeded)
- CmdArgs.push_back(getAsNeededOption(TC, false));
+ addAsNeededOption(TC, Args, CmdArgs, false);
}
static void AddLibgcc(const ToolChain &TC, const Driver &D,
ArgStringList &CmdArgs, const ArgList &Args) {
LibGccType LGT = getLibGccType(TC, D, Args);
- if (LGT != LibGccType::SharedLibGcc)
+ if (LGT == LibGccType::StaticLibGcc ||
+ (LGT == LibGccType::UnspecifiedLibGcc && !D.CCCIsCXX()))
CmdArgs.push_back("-lgcc");
AddUnwindLibrary(TC, D, CmdArgs, Args);
- if (LGT == LibGccType::SharedLibGcc)
+ if (LGT == LibGccType::SharedLibGcc ||
+ (LGT == LibGccType::UnspecifiedLibGcc && D.CCCIsCXX()))
CmdArgs.push_back("-lgcc");
}
@@ -1492,9 +2223,10 @@ void tools::AddRunTimeLibs(const ToolChain &TC, const Driver &D,
if (TC.getTriple().isKnownWindowsMSVCEnvironment()) {
// Issue error diagnostic if libgcc is explicitly specified
// through command line as --rtlib option argument.
- if (Args.hasArg(options::OPT_rtlib_EQ)) {
+ Arg *A = Args.getLastArg(options::OPT_rtlib_EQ);
+ if (A && A->getValue() != StringRef("platform")) {
TC.getDriver().Diag(diag::err_drv_unsupported_rtlib_for_platform)
- << Args.getLastArg(options::OPT_rtlib_EQ)->getValue() << "MSVC";
+ << A->getValue() << "MSVC";
}
} else
AddLibgcc(TC, D, CmdArgs, Args);
@@ -1514,35 +2246,49 @@ SmallString<128> tools::getStatsFileName(const llvm::opt::ArgList &Args,
const InputInfo &Input,
const Driver &D) {
const Arg *A = Args.getLastArg(options::OPT_save_stats_EQ);
- if (!A)
+ if (!A && !D.CCPrintInternalStats)
return {};
- StringRef SaveStats = A->getValue();
SmallString<128> StatsFile;
- if (SaveStats == "obj" && Output.isFilename()) {
- StatsFile.assign(Output.getFilename());
- llvm::sys::path::remove_filename(StatsFile);
- } else if (SaveStats != "cwd") {
- D.Diag(diag::err_drv_invalid_value) << A->getAsString(Args) << SaveStats;
- return {};
- }
+ if (A) {
+ StringRef SaveStats = A->getValue();
+ if (SaveStats == "obj" && Output.isFilename()) {
+ StatsFile.assign(Output.getFilename());
+ llvm::sys::path::remove_filename(StatsFile);
+ } else if (SaveStats != "cwd") {
+ D.Diag(diag::err_drv_invalid_value) << A->getAsString(Args) << SaveStats;
+ return {};
+ }
- StringRef BaseName = llvm::sys::path::filename(Input.getBaseInput());
- llvm::sys::path::append(StatsFile, BaseName);
- llvm::sys::path::replace_extension(StatsFile, "stats");
+ StringRef BaseName = llvm::sys::path::filename(Input.getBaseInput());
+ llvm::sys::path::append(StatsFile, BaseName);
+ llvm::sys::path::replace_extension(StatsFile, "stats");
+ } else {
+ assert(D.CCPrintInternalStats);
+ StatsFile.assign(D.CCPrintInternalStatReportFilename.empty()
+ ? "-"
+ : D.CCPrintInternalStatReportFilename);
+ }
return StatsFile;
}
-void tools::addMultilibFlag(bool Enabled, const char *const Flag,
+void tools::addMultilibFlag(bool Enabled, const StringRef Flag,
Multilib::flags_list &Flags) {
- Flags.push_back(std::string(Enabled ? "+" : "-") + Flag);
+ assert(Flag.front() == '-');
+ if (Enabled) {
+ Flags.push_back(Flag.str());
+ } else {
+ Flags.push_back(("!" + Flag.substr(1)).str());
+ }
}
void tools::addX86AlignBranchArgs(const Driver &D, const ArgList &Args,
- ArgStringList &CmdArgs, bool IsLTO) {
+ ArgStringList &CmdArgs, bool IsLTO,
+ const StringRef PluginOptPrefix) {
auto addArg = [&, IsLTO](const Twine &Arg) {
if (IsLTO) {
- CmdArgs.push_back(Args.MakeArgString("-plugin-opt=" + Arg));
+ assert(!PluginOptPrefix.empty() && "Cannot have empty PluginOptPrefix!");
+ CmdArgs.push_back(Args.MakeArgString(Twine(PluginOptPrefix) + Arg));
} else {
CmdArgs.push_back("-mllvm");
CmdArgs.push_back(Args.MakeArgString(Arg));
@@ -1588,28 +2334,325 @@ void tools::addX86AlignBranchArgs(const Driver &D, const ArgList &Args,
}
}
+/// SDLSearch: Search for Static Device Library
+/// The search for SDL bitcode files is consistent with how static host
+/// libraries are discovered. That is, the -l option triggers a search for
+/// files in a set of directories called the LINKPATH. The host library search
+/// procedure looks for a specific filename in the LINKPATH. The filename for
+/// a host library is lib<libname>.a or lib<libname>.so. For SDLs, there is an
+/// ordered-set of filenames that are searched. We call this ordered-set of
+/// filenames as SEARCH-ORDER. Since an SDL can either be device-type specific,
+/// architecture specific, or generic across all architectures, a naming
+/// convention and search order is used where the file name embeds the
+/// architecture name <arch-name> (nvptx or amdgcn) and the GPU device type
+/// <device-name> such as sm_30 and gfx906. <device-name> is absent in case of
+/// device-independent SDLs. To reduce congestion in host library directories,
+/// the search first looks for files in the “libdevice” subdirectory. SDLs that
+/// are bc files begin with the prefix “lib”.
+///
+/// Machine-code SDLs can also be managed as an archive (*.a file). The
+/// convention has been to use the prefix “lib”. To avoid confusion with host
+/// archive libraries, we use prefix "libbc-" for the bitcode SDL archives.
+///
+static bool SDLSearch(const Driver &D, const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ const SmallVectorImpl<std::string> &LibraryPaths,
+ StringRef Lib, StringRef Arch, StringRef Target,
+ bool isBitCodeSDL) {
+ SmallVector<std::string, 12> SDLs;
+
+ std::string LibDeviceLoc = "/libdevice";
+ std::string LibBcPrefix = "/libbc-";
+ std::string LibPrefix = "/lib";
+
+ if (isBitCodeSDL) {
+ // SEARCH-ORDER for Bitcode SDLs:
+ // libdevice/libbc-<libname>-<arch-name>-<device-type>.a
+ // libbc-<libname>-<arch-name>-<device-type>.a
+ // libdevice/libbc-<libname>-<arch-name>.a
+ // libbc-<libname>-<arch-name>.a
+ // libdevice/libbc-<libname>.a
+ // libbc-<libname>.a
+ // libdevice/lib<libname>-<arch-name>-<device-type>.bc
+ // lib<libname>-<arch-name>-<device-type>.bc
+ // libdevice/lib<libname>-<arch-name>.bc
+ // lib<libname>-<arch-name>.bc
+ // libdevice/lib<libname>.bc
+ // lib<libname>.bc
+
+ for (StringRef Base : {LibBcPrefix, LibPrefix}) {
+ const auto *Ext = Base.contains(LibBcPrefix) ? ".a" : ".bc";
+
+ for (auto Suffix : {Twine(Lib + "-" + Arch + "-" + Target).str(),
+ Twine(Lib + "-" + Arch).str(), Twine(Lib).str()}) {
+ SDLs.push_back(Twine(LibDeviceLoc + Base + Suffix + Ext).str());
+ SDLs.push_back(Twine(Base + Suffix + Ext).str());
+ }
+ }
+ } else {
+ // SEARCH-ORDER for Machine-code SDLs:
+ // libdevice/lib<libname>-<arch-name>-<device-type>.a
+ // lib<libname>-<arch-name>-<device-type>.a
+ // libdevice/lib<libname>-<arch-name>.a
+ // lib<libname>-<arch-name>.a
+
+ const auto *Ext = ".a";
+
+ for (auto Suffix : {Twine(Lib + "-" + Arch + "-" + Target).str(),
+ Twine(Lib + "-" + Arch).str()}) {
+ SDLs.push_back(Twine(LibDeviceLoc + LibPrefix + Suffix + Ext).str());
+ SDLs.push_back(Twine(LibPrefix + Suffix + Ext).str());
+ }
+ }
+
+ // The CUDA toolchain does not use a global device llvm-link before the LLVM
+ // backend generates ptx. So currently, the use of bitcode SDL for nvptx is
+ // only possible with post-clang-cc1 linking. Clang cc1 has a feature that
+ // will link libraries after clang compilation while the LLVM IR is still in
+ // memory. This utilizes a clang cc1 option called “-mlink-builtin-bitcode”.
+ // This is a clang -cc1 option that is generated by the clang driver. The
+ // option value must a full path to an existing file.
+ bool FoundSDL = false;
+ for (auto LPath : LibraryPaths) {
+ for (auto SDL : SDLs) {
+ auto FullName = Twine(LPath + SDL).str();
+ if (llvm::sys::fs::exists(FullName)) {
+ CC1Args.push_back(DriverArgs.MakeArgString(FullName));
+ FoundSDL = true;
+ break;
+ }
+ }
+ if (FoundSDL)
+ break;
+ }
+ return FoundSDL;
+}
+
+/// Search if a user provided archive file lib<libname>.a exists in any of
+/// the library paths. If so, add a new command to clang-offload-bundler to
+/// unbundle this archive and create a temporary device specific archive. Name
+/// of this SDL is passed to the llvm-link tool.
+static void GetSDLFromOffloadArchive(
+ Compilation &C, const Driver &D, const Tool &T, const JobAction &JA,
+ const InputInfoList &Inputs, const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ const SmallVectorImpl<std::string> &LibraryPaths, StringRef Lib,
+ StringRef Arch, StringRef Target, bool isBitCodeSDL) {
+
+ // We don't support bitcode archive bundles for nvptx
+ if (isBitCodeSDL && Arch.contains("nvptx"))
+ return;
+
+ bool FoundAOB = false;
+ std::string ArchiveOfBundles;
+
+ llvm::Triple Triple(D.getTargetTriple());
+ bool IsMSVC = Triple.isWindowsMSVCEnvironment();
+ auto Ext = IsMSVC ? ".lib" : ".a";
+ if (!Lib.starts_with(":") && !Lib.starts_with("-l")) {
+ if (llvm::sys::fs::exists(Lib)) {
+ ArchiveOfBundles = Lib;
+ FoundAOB = true;
+ }
+ } else {
+ Lib.consume_front("-l");
+ for (auto LPath : LibraryPaths) {
+ ArchiveOfBundles.clear();
+ auto LibFile = (Lib.starts_with(":") ? Lib.drop_front()
+ : IsMSVC ? Lib + Ext
+ : "lib" + Lib + Ext)
+ .str();
+ for (auto Prefix : {"/libdevice/", "/"}) {
+ auto AOB = Twine(LPath + Prefix + LibFile).str();
+ if (llvm::sys::fs::exists(AOB)) {
+ ArchiveOfBundles = AOB;
+ FoundAOB = true;
+ break;
+ }
+ }
+ if (FoundAOB)
+ break;
+ }
+ }
+
+ if (!FoundAOB)
+ return;
+
+ llvm::file_magic Magic;
+ auto EC = llvm::identify_magic(ArchiveOfBundles, Magic);
+ if (EC || Magic != llvm::file_magic::archive)
+ return;
+
+ StringRef Prefix = isBitCodeSDL ? "libbc-" : "lib";
+ std::string OutputLib =
+ D.GetTemporaryPath(Twine(Prefix + llvm::sys::path::filename(Lib) + "-" +
+ Arch + "-" + Target)
+ .str(),
+ "a");
+
+ C.addTempFile(C.getArgs().MakeArgString(OutputLib));
+
+ ArgStringList CmdArgs;
+ SmallString<128> DeviceTriple;
+ DeviceTriple += Action::GetOffloadKindName(JA.getOffloadingDeviceKind());
+ DeviceTriple += '-';
+ std::string NormalizedTriple = T.getToolChain().getTriple().normalize();
+ DeviceTriple += NormalizedTriple;
+ if (!Target.empty()) {
+ DeviceTriple += '-';
+ DeviceTriple += Target;
+ }
+
+ std::string UnbundleArg("-unbundle");
+ std::string TypeArg("-type=a");
+ std::string InputArg("-input=" + ArchiveOfBundles);
+ std::string OffloadArg("-targets=" + std::string(DeviceTriple));
+ std::string OutputArg("-output=" + OutputLib);
+
+ const char *UBProgram = DriverArgs.MakeArgString(
+ T.getToolChain().GetProgramPath("clang-offload-bundler"));
+
+ ArgStringList UBArgs;
+ UBArgs.push_back(C.getArgs().MakeArgString(UnbundleArg));
+ UBArgs.push_back(C.getArgs().MakeArgString(TypeArg));
+ UBArgs.push_back(C.getArgs().MakeArgString(InputArg));
+ UBArgs.push_back(C.getArgs().MakeArgString(OffloadArg));
+ UBArgs.push_back(C.getArgs().MakeArgString(OutputArg));
+
+ // Add this flag to not exit from clang-offload-bundler if no compatible
+ // code object is found in heterogenous archive library.
+ std::string AdditionalArgs("-allow-missing-bundles");
+ UBArgs.push_back(C.getArgs().MakeArgString(AdditionalArgs));
+
+ // Add this flag to treat hip and hipv4 offload kinds as compatible with
+ // openmp offload kind while extracting code objects from a heterogenous
+ // archive library. Vice versa is also considered compatible.
+ std::string HipCompatibleArgs("-hip-openmp-compatible");
+ UBArgs.push_back(C.getArgs().MakeArgString(HipCompatibleArgs));
+
+ C.addCommand(std::make_unique<Command>(
+ JA, T, ResponseFileSupport::AtFileCurCP(), UBProgram, UBArgs, Inputs,
+ InputInfo(&JA, C.getArgs().MakeArgString(OutputLib))));
+
+ CC1Args.push_back(DriverArgs.MakeArgString(OutputLib));
+
+ return;
+}
+
+// Wrapper function used by driver for adding SDLs during link phase.
+void tools::AddStaticDeviceLibsLinking(Compilation &C, const Tool &T,
+ const JobAction &JA,
+ const InputInfoList &Inputs,
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ StringRef Arch, StringRef Target,
+ bool isBitCodeSDL) {
+ AddStaticDeviceLibs(&C, &T, &JA, &Inputs, C.getDriver(), DriverArgs, CC1Args,
+ Arch, Target, isBitCodeSDL);
+}
+
+// User defined Static Device Libraries(SDLs) can be passed to clang for
+// offloading GPU compilers. Like static host libraries, the use of a SDL is
+// specified with the -l command line option. The primary difference between
+// host and SDLs is the filenames for SDLs (refer SEARCH-ORDER for Bitcode SDLs
+// and SEARCH-ORDER for Machine-code SDLs for the naming convention).
+// SDLs are of following types:
+//
+// * Bitcode SDLs: They can either be a *.bc file or an archive of *.bc files.
+// For NVPTX, these libraries are post-clang linked following each
+// compilation. For AMDGPU, these libraries are linked one time
+// during the application link phase.
+//
+// * Machine-code SDLs: They are archive files. For AMDGPU, the process for
+// machine code SDLs is still in development. But they will be linked
+// by the LLVM tool lld.
+//
+// * Bundled objects that contain both host and device codes: Bundled objects
+// may also contain library code compiled from source. For NVPTX, the
+// bundle contains cubin. For AMDGPU, the bundle contains bitcode.
+//
+// For Bitcode and Machine-code SDLs, current compiler toolchains hardcode the
+// inclusion of specific SDLs such as math libraries and the OpenMP device
+// library libomptarget.
+void tools::AddStaticDeviceLibs(Compilation *C, const Tool *T,
+ const JobAction *JA,
+ const InputInfoList *Inputs, const Driver &D,
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ StringRef Arch, StringRef Target,
+ bool isBitCodeSDL) {
+
+ SmallVector<std::string, 8> LibraryPaths;
+ // Add search directories from LIBRARY_PATH env variable
+ std::optional<std::string> LibPath =
+ llvm::sys::Process::GetEnv("LIBRARY_PATH");
+ if (LibPath) {
+ SmallVector<StringRef, 8> Frags;
+ const char EnvPathSeparatorStr[] = {llvm::sys::EnvPathSeparator, '\0'};
+ llvm::SplitString(*LibPath, Frags, EnvPathSeparatorStr);
+ for (StringRef Path : Frags)
+ LibraryPaths.emplace_back(Path.trim());
+ }
+
+ // Add directories from user-specified -L options
+ for (std::string Search_Dir : DriverArgs.getAllArgValues(options::OPT_L))
+ LibraryPaths.emplace_back(Search_Dir);
+
+ // Add path to lib-debug folders
+ SmallString<256> DefaultLibPath = llvm::sys::path::parent_path(D.Dir);
+ llvm::sys::path::append(DefaultLibPath, CLANG_INSTALL_LIBDIR_BASENAME);
+ LibraryPaths.emplace_back(DefaultLibPath.c_str());
+
+ // Build list of Static Device Libraries SDLs specified by -l option
+ llvm::SmallSet<std::string, 16> SDLNames;
+ static const StringRef HostOnlyArchives[] = {
+ "omp", "cudart", "m", "gcc", "gcc_s", "pthread", "hip_hcc"};
+ for (auto SDLName : DriverArgs.getAllArgValues(options::OPT_l)) {
+ if (!llvm::is_contained(HostOnlyArchives, SDLName)) {
+ SDLNames.insert(std::string("-l") + SDLName);
+ }
+ }
+
+ for (auto Input : DriverArgs.getAllArgValues(options::OPT_INPUT)) {
+ auto FileName = StringRef(Input);
+ // Clang treats any unknown file types as archives and passes them to the
+ // linker. Files with extension 'lib' are classified as TY_Object by clang
+ // but they are usually archives. It is OK if the file is not really an
+ // archive since GetSDLFromOffloadArchive will check the magic of the file
+ // and only unbundle it if it is really an archive.
+ const StringRef LibFileExt = ".lib";
+ if (!llvm::sys::path::has_extension(FileName) ||
+ types::lookupTypeForExtension(
+ llvm::sys::path::extension(FileName).drop_front()) ==
+ types::TY_INVALID ||
+ llvm::sys::path::extension(FileName) == LibFileExt)
+ SDLNames.insert(Input);
+ }
+
+ // The search stops as soon as an SDL file is found. The driver then provides
+ // the full filename of the SDL to the llvm-link command. If no SDL is found
+ // after searching each LINKPATH with SEARCH-ORDER, it is possible that an
+ // archive file lib<libname>.a exists and may contain bundled object files.
+ for (auto SDLName : SDLNames) {
+ // This is the only call to SDLSearch
+ if (!SDLSearch(D, DriverArgs, CC1Args, LibraryPaths, SDLName, Arch, Target,
+ isBitCodeSDL)) {
+ GetSDLFromOffloadArchive(*C, D, *T, *JA, *Inputs, DriverArgs, CC1Args,
+ LibraryPaths, SDLName, Arch, Target,
+ isBitCodeSDL);
+ }
+ }
+}
+
static llvm::opt::Arg *
getAMDGPUCodeObjectArgument(const Driver &D, const llvm::opt::ArgList &Args) {
- // The last of -mcode-object-v3, -mno-code-object-v3 and
- // -mcode-object-version=<version> wins.
- return Args.getLastArg(options::OPT_mcode_object_v3_legacy,
- options::OPT_mno_code_object_v3_legacy,
- options::OPT_mcode_object_version_EQ);
+ return Args.getLastArg(options::OPT_mcode_object_version_EQ);
}
void tools::checkAMDGPUCodeObjectVersion(const Driver &D,
const llvm::opt::ArgList &Args) {
- const unsigned MinCodeObjVer = 2;
- const unsigned MaxCodeObjVer = 4;
-
- // Emit warnings for legacy options even if they are overridden.
- if (Args.hasArg(options::OPT_mno_code_object_v3_legacy))
- D.Diag(diag::warn_drv_deprecated_arg) << "-mno-code-object-v3"
- << "-mcode-object-version=2";
-
- if (Args.hasArg(options::OPT_mcode_object_v3_legacy))
- D.Diag(diag::warn_drv_deprecated_arg) << "-mcode-object-v3"
- << "-mcode-object-version=3";
+ const unsigned MinCodeObjVer = 4;
+ const unsigned MaxCodeObjVer = 5;
if (auto *CodeObjArg = getAMDGPUCodeObjectArgument(D, Args)) {
if (CodeObjArg->getOption().getID() ==
@@ -1627,17 +2670,8 @@ void tools::checkAMDGPUCodeObjectVersion(const Driver &D,
unsigned tools::getAMDGPUCodeObjectVersion(const Driver &D,
const llvm::opt::ArgList &Args) {
unsigned CodeObjVer = 4; // default
- if (auto *CodeObjArg = getAMDGPUCodeObjectArgument(D, Args)) {
- if (CodeObjArg->getOption().getID() ==
- options::OPT_mno_code_object_v3_legacy) {
- CodeObjVer = 2;
- } else if (CodeObjArg->getOption().getID() ==
- options::OPT_mcode_object_v3_legacy) {
- CodeObjVer = 3;
- } else {
- StringRef(CodeObjArg->getValue()).getAsInteger(0, CodeObjVer);
- }
- }
+ if (auto *CodeObjArg = getAMDGPUCodeObjectArgument(D, Args))
+ StringRef(CodeObjArg->getValue()).getAsInteger(0, CodeObjVer);
return CodeObjVer;
}
@@ -1649,10 +2683,12 @@ bool tools::haveAMDGPUCodeObjectVersionArgument(
void tools::addMachineOutlinerArgs(const Driver &D,
const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs,
- const llvm::Triple &Triple, bool IsLTO) {
+ const llvm::Triple &Triple, bool IsLTO,
+ const StringRef PluginOptPrefix) {
auto addArg = [&, IsLTO](const Twine &Arg) {
if (IsLTO) {
- CmdArgs.push_back(Args.MakeArgString("-plugin-opt=" + Arg));
+ assert(!PluginOptPrefix.empty() && "Cannot have empty PluginOptPrefix!");
+ CmdArgs.push_back(Args.MakeArgString(Twine(PluginOptPrefix) + Arg));
} else {
CmdArgs.push_back("-mllvm");
CmdArgs.push_back(Args.MakeArgString(Arg));
@@ -1665,9 +2701,7 @@ void tools::addMachineOutlinerArgs(const Driver &D,
// We only support -moutline in AArch64 and ARM targets right now. If
// we're not compiling for these, emit a warning and ignore the flag.
// Otherwise, add the proper mllvm flags.
- if (!(Triple.isARM() || Triple.isThumb() ||
- Triple.getArch() == llvm::Triple::aarch64 ||
- Triple.getArch() == llvm::Triple::aarch64_32)) {
+ if (!(Triple.isARM() || Triple.isThumb() || Triple.isAArch64())) {
D.Diag(diag::warn_drv_moutline_unsupported_opt) << Triple.getArchName();
} else {
addArg(Twine("-enable-machine-outliner"));
@@ -1685,8 +2719,14 @@ void tools::addOpenMPDeviceRTL(const Driver &D,
StringRef BitcodeSuffix,
const llvm::Triple &Triple) {
SmallVector<StringRef, 8> LibraryPaths;
+
+ // Add path to clang lib / lib64 folder.
+ SmallString<256> DefaultLibPath = llvm::sys::path::parent_path(D.Dir);
+ llvm::sys::path::append(DefaultLibPath, CLANG_INSTALL_LIBDIR_BASENAME);
+ LibraryPaths.emplace_back(DefaultLibPath.c_str());
+
// Add user defined library paths from LIBRARY_PATH.
- llvm::Optional<std::string> LibPath =
+ std::optional<std::string> LibPath =
llvm::sys::Process::GetEnv("LIBRARY_PATH");
if (LibPath) {
SmallVector<StringRef, 8> Frags;
@@ -1696,32 +2736,32 @@ void tools::addOpenMPDeviceRTL(const Driver &D,
LibraryPaths.emplace_back(Path.trim());
}
- // Add path to lib / lib64 folder.
- SmallString<256> DefaultLibPath = llvm::sys::path::parent_path(D.Dir);
- llvm::sys::path::append(DefaultLibPath, Twine("lib") + CLANG_LIBDIR_SUFFIX);
- LibraryPaths.emplace_back(DefaultLibPath.c_str());
-
OptSpecifier LibomptargetBCPathOpt =
- Triple.isAMDGCN() ? options::OPT_libomptarget_amdgcn_bc_path_EQ
+ Triple.isAMDGCN() ? options::OPT_libomptarget_amdgpu_bc_path_EQ
: options::OPT_libomptarget_nvptx_bc_path_EQ;
- StringRef ArchPrefix = Triple.isAMDGCN() ? "amdgcn" : "nvptx";
+ StringRef ArchPrefix = Triple.isAMDGCN() ? "amdgpu" : "nvptx";
+ std::string LibOmpTargetName =
+ ("libomptarget-" + ArchPrefix + "-" + BitcodeSuffix + ".bc").str();
+
// First check whether user specifies bc library
if (const Arg *A = DriverArgs.getLastArg(LibomptargetBCPathOpt)) {
- std::string LibOmpTargetName(A->getValue());
- if (llvm::sys::fs::exists(LibOmpTargetName)) {
+ SmallString<128> LibOmpTargetFile(A->getValue());
+ if (llvm::sys::fs::exists(LibOmpTargetFile) &&
+ llvm::sys::fs::is_directory(LibOmpTargetFile)) {
+ llvm::sys::path::append(LibOmpTargetFile, LibOmpTargetName);
+ }
+
+ if (llvm::sys::fs::exists(LibOmpTargetFile)) {
CC1Args.push_back("-mlink-builtin-bitcode");
- CC1Args.push_back(DriverArgs.MakeArgString(LibOmpTargetName));
+ CC1Args.push_back(DriverArgs.MakeArgString(LibOmpTargetFile));
} else {
D.Diag(diag::err_drv_omp_offload_target_bcruntime_not_found)
- << LibOmpTargetName;
+ << LibOmpTargetFile;
}
} else {
bool FoundBCLibrary = false;
- std::string LibOmpTargetName =
- "libomptarget-" + BitcodeSuffix.str() + ".bc";
-
for (StringRef LibraryPath : LibraryPaths) {
SmallString<128> LibOmpTargetFile(LibraryPath);
llvm::sys::path::append(LibOmpTargetFile, LibOmpTargetName);
@@ -1738,3 +2778,17 @@ void tools::addOpenMPDeviceRTL(const Driver &D,
<< LibOmpTargetName << ArchPrefix;
}
}
+void tools::addHIPRuntimeLibArgs(const ToolChain &TC, Compilation &C,
+ const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) {
+ if ((C.getActiveOffloadKinds() & Action::OFK_HIP) &&
+ !Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_no_hip_rt)) {
+ TC.AddHIPRuntimeLibArgs(Args, CmdArgs);
+ } else {
+ // Claim "no HIP libraries" arguments if any
+ for (auto *Arg : Args.filtered(options::OPT_no_hip_rt)) {
+ Arg->claim();
+ }
+ }
+}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.h
index c94c15864661..807867f13a5c 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.h
@@ -9,11 +9,15 @@
#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_COMMONARGS_H
#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_COMMONARGS_H
+#include "clang/Basic/CodeGenOptions.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/InputInfo.h"
#include "clang/Driver/Multilib.h"
#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Option/Arg.h"
+#include "llvm/Option/ArgList.h"
#include "llvm/Support/CodeGen.h"
namespace clang {
@@ -37,18 +41,32 @@ bool addSanitizerRuntimes(const ToolChain &TC, const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs);
void linkSanitizerRuntimeDeps(const ToolChain &TC,
+ const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs);
bool addXRayRuntime(const ToolChain &TC, const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs);
-void linkXRayRuntimeDeps(const ToolChain &TC,
+void linkXRayRuntimeDeps(const ToolChain &TC, const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs);
void AddRunTimeLibs(const ToolChain &TC, const Driver &D,
llvm::opt::ArgStringList &CmdArgs,
const llvm::opt::ArgList &Args);
+void AddStaticDeviceLibsLinking(Compilation &C, const Tool &T,
+ const JobAction &JA,
+ const InputInfoList &Inputs,
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CmdArgs,
+ StringRef Arch, StringRef Target,
+ bool isBitCodeSDL);
+void AddStaticDeviceLibs(Compilation *C, const Tool *T, const JobAction *JA,
+ const InputInfoList *Inputs, const Driver &D,
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CmdArgs, StringRef Arch,
+ StringRef Target, bool isBitCodeSDL);
+
const char *SplitDebugName(const JobAction &JA, const llvm::opt::ArgList &Args,
const InputInfo &Input, const InputInfo &Output);
@@ -60,14 +78,28 @@ void addLTOOptions(const ToolChain &ToolChain, const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs, const InputInfo &Output,
const InputInfo &Input, bool IsThinLTO);
+const char *RelocationModelName(llvm::Reloc::Model Model);
+
std::tuple<llvm::Reloc::Model, unsigned, bool>
ParsePICArgs(const ToolChain &ToolChain, const llvm::opt::ArgList &Args);
unsigned ParseFunctionAlignment(const ToolChain &TC,
const llvm::opt::ArgList &Args);
-unsigned ParseDebugDefaultVersion(const ToolChain &TC,
- const llvm::opt::ArgList &Args);
+void addDebugInfoKind(llvm::opt::ArgStringList &CmdArgs,
+ llvm::codegenoptions::DebugInfoKind DebugInfoKind);
+
+llvm::codegenoptions::DebugInfoKind
+debugLevelToInfoKind(const llvm::opt::Arg &A);
+
+// Extract the integer N from a string spelled "-dwarf-N", returning 0
+// on mismatch. The StringRef input (rather than an Arg) allows
+// for use by the "-Xassembler" option parser.
+unsigned DwarfVersionNum(StringRef ArgValue);
+// Find a DWARF format version option.
+// This function is a complementary for DwarfVersionNum().
+const llvm::opt::Arg *getDwarfNArg(const llvm::opt::ArgList &Args);
+unsigned getDwarfVersion(const ToolChain &TC, const llvm::opt::ArgList &Args);
void AddAssemblerKPIC(const ToolChain &ToolChain,
const llvm::opt::ArgList &Args,
@@ -75,12 +107,32 @@ void AddAssemblerKPIC(const ToolChain &ToolChain,
void addArchSpecificRPath(const ToolChain &TC, const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs);
+void addOpenMPRuntimeLibraryPath(const ToolChain &TC,
+ const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs);
/// Returns true, if an OpenMP runtime has been added.
bool addOpenMPRuntime(llvm::opt::ArgStringList &CmdArgs, const ToolChain &TC,
const llvm::opt::ArgList &Args,
bool ForceStaticHostRuntime = false,
bool IsOffloadingHost = false, bool GompNeedsRT = false);
+/// Adds Fortran runtime libraries to \p CmdArgs.
+void addFortranRuntimeLibs(const ToolChain &TC, const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs);
+
+/// Adds the path for the Fortran runtime libraries to \p CmdArgs.
+void addFortranRuntimeLibraryPath(const ToolChain &TC,
+ const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs);
+
+void addHIPRuntimeLibArgs(const ToolChain &TC, Compilation &C,
+ const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs);
+
+void addAsNeededOption(const ToolChain &TC, const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs, bool as_needed);
+
+llvm::opt::Arg *getLastCSProfileGenerateArg(const llvm::opt::ArgList &Args);
llvm::opt::Arg *getLastProfileUseArg(const llvm::opt::ArgList &Args);
llvm::opt::Arg *getLastProfileSampleUseArg(const llvm::opt::ArgList &Args);
@@ -92,6 +144,9 @@ llvm::StringRef getLTOParallelism(const llvm::opt::ArgList &Args,
bool areOptimizationsEnabled(const llvm::opt::ArgList &Args);
bool isUseSeparateSections(const llvm::Triple &Triple);
+// Parse -mtls-dialect=. Return true if the target supports both general-dynamic
+// and TLSDESC, and TLSDESC is requested.
+bool isTLSDESCEnabled(const ToolChain &TC, const llvm::opt::ArgList &Args);
/// \p EnvVar is split by system delimiter for environment variables.
/// If \p ArgName is "-I", "-L", or an empty string, each entry from \p EnvVar
@@ -107,8 +162,13 @@ void AddTargetFeature(const llvm::opt::ArgList &Args,
llvm::opt::OptSpecifier OnOpt,
llvm::opt::OptSpecifier OffOpt, StringRef FeatureName);
-std::string getCPUName(const llvm::opt::ArgList &Args, const llvm::Triple &T,
- bool FromAs = false);
+std::string getCPUName(const Driver &D, const llvm::opt::ArgList &Args,
+ const llvm::Triple &T, bool FromAs = false);
+
+void getTargetFeatures(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs, bool ForAS,
+ bool IsAux = false);
/// Iterate \p Args and convert -mxxx to +xxx and -mno-xxx to -xxx and
/// append it to \p Features.
@@ -116,13 +176,13 @@ std::string getCPUName(const llvm::opt::ArgList &Args, const llvm::Triple &T,
/// Note: Since \p Features may contain default values before calling
/// this function, or may be appended with entries to override arguments,
/// entries in \p Features are not unique.
-void handleTargetFeaturesGroup(const llvm::opt::ArgList &Args,
+void handleTargetFeaturesGroup(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args,
std::vector<StringRef> &Features,
llvm::opt::OptSpecifier Group);
/// If there are multiple +xxx or -xxx features, keep the last one.
-std::vector<StringRef>
-unifyTargetFeatures(const std::vector<StringRef> &Features);
+SmallVector<StringRef> unifyTargetFeatures(ArrayRef<StringRef> Features);
/// Handles the -save-stats option and returns the filename to save statistics
/// to.
@@ -130,13 +190,13 @@ SmallString<128> getStatsFileName(const llvm::opt::ArgList &Args,
const InputInfo &Output,
const InputInfo &Input, const Driver &D);
-/// \p Flag must be a flag accepted by the driver with its leading '-' removed,
-// otherwise '-print-multi-lib' will not emit them correctly.
-void addMultilibFlag(bool Enabled, const char *const Flag,
+/// \p Flag must be a flag accepted by the driver.
+void addMultilibFlag(bool Enabled, const StringRef Flag,
Multilib::flags_list &Flags);
void addX86AlignBranchArgs(const Driver &D, const llvm::opt::ArgList &Args,
- llvm::opt::ArgStringList &CmdArgs, bool IsLTO);
+ llvm::opt::ArgStringList &CmdArgs, bool IsLTO,
+ const StringRef PluginOptPrefix = "");
void checkAMDGPUCodeObjectVersion(const Driver &D,
const llvm::opt::ArgList &Args);
@@ -149,7 +209,8 @@ bool haveAMDGPUCodeObjectVersionArgument(const Driver &D,
void addMachineOutlinerArgs(const Driver &D, const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs,
- const llvm::Triple &Triple, bool IsLTO);
+ const llvm::Triple &Triple, bool IsLTO,
+ const StringRef PluginOptPrefix = "");
void addOpenMPDeviceRTL(const Driver &D, const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args,
@@ -158,4 +219,7 @@ void addOpenMPDeviceRTL(const Driver &D, const llvm::opt::ArgList &DriverArgs,
} // end namespace driver
} // end namespace clang
+clang::CodeGenOptions::FramePointerKind
+getFramePointerKind(const llvm::opt::ArgList &Args, const llvm::Triple &Triple);
+
#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_COMMONARGS_H
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Contiki.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Contiki.cpp
deleted file mode 100644
index 5dda1b1b09fb..000000000000
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Contiki.cpp
+++ /dev/null
@@ -1,27 +0,0 @@
-//===--- Contiki.cpp - Contiki ToolChain Implementations --------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "Contiki.h"
-#include "CommonArgs.h"
-
-using namespace clang::driver;
-using namespace clang::driver::toolchains;
-using namespace clang;
-using namespace llvm::opt;
-
-Contiki::Contiki(const Driver &D, const llvm::Triple &Triple,
- const ArgList &Args)
- : Generic_ELF(D, Triple, Args) {}
-
-SanitizerMask Contiki::getSupportedSanitizers() const {
- const bool IsX86 = getTriple().getArch() == llvm::Triple::x86;
- SanitizerMask Res = ToolChain::getSupportedSanitizers();
- if (IsX86)
- Res |= SanitizerKind::SafeStack;
- return Res;
-}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Contiki.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Contiki.h
deleted file mode 100644
index 627d80bdda09..000000000000
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Contiki.h
+++ /dev/null
@@ -1,39 +0,0 @@
-//===--- Contiki.h - Contiki ToolChain Implementations ----------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_CONTIKI_H
-#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_CONTIKI_H
-
-#include "Gnu.h"
-#include "clang/Driver/ToolChain.h"
-
-namespace clang {
-namespace driver {
-namespace toolchains {
-
-class LLVM_LIBRARY_VISIBILITY Contiki : public Generic_ELF {
-public:
- Contiki(const Driver &D, const llvm::Triple &Triple,
- const llvm::opt::ArgList &Args);
-
- // No support for finding a C++ standard library yet.
- void addLibCxxIncludePaths(
- const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args) const override {}
- void addLibStdCxxIncludePaths(
- const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args) const override {}
-
- SanitizerMask getSupportedSanitizers() const override;
-};
-
-} // end namespace toolchains
-} // end namespace driver
-} // end namespace clang
-
-#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_CONTIKI_H
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/CrossWindows.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/CrossWindows.cpp
index 07abf4f83f7d..3c5dfba329cf 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/CrossWindows.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/CrossWindows.cpp
@@ -94,7 +94,8 @@ void tools::CrossWindows::Linker::ConstructJob(
CmdArgs.push_back("-m");
switch (TC.getArch()) {
default:
- llvm_unreachable("unsupported architecture");
+ D.Diag(diag::err_target_unknown_triple) << TC.getEffectiveTriple().str();
+ break;
case llvm::Triple::arm:
case llvm::Triple::thumb:
// FIXME: this is incorrect for WinCE
@@ -185,7 +186,7 @@ void tools::CrossWindows::Linker::ConstructJob(
}
}
- if (TC.getSanitizerArgs().needsAsanRt()) {
+ if (TC.getSanitizerArgs(Args).needsAsanRt()) {
// TODO handle /MT[d] /MD[d]
if (Args.hasArg(options::OPT_shared)) {
CmdArgs.push_back(TC.getCompilerRTArgString(Args, "asan_dll_thunk"));
@@ -213,17 +214,18 @@ CrossWindowsToolChain::CrossWindowsToolChain(const Driver &D,
const llvm::opt::ArgList &Args)
: Generic_GCC(D, T, Args) {}
-bool CrossWindowsToolChain::IsUnwindTablesDefault(const ArgList &Args) const {
+ToolChain::UnwindTableLevel
+CrossWindowsToolChain::getDefaultUnwindTableLevel(const ArgList &Args) const {
// FIXME: all non-x86 targets need unwind tables, however, LLVM currently does
// not know how to emit them.
- return getArch() == llvm::Triple::x86_64;
+ return getArch() == llvm::Triple::x86_64 ? UnwindTableLevel::Asynchronous : UnwindTableLevel::None;
}
bool CrossWindowsToolChain::isPICDefault() const {
return getArch() == llvm::Triple::x86_64;
}
-bool CrossWindowsToolChain::isPIEDefault() const {
+bool CrossWindowsToolChain::isPIEDefault(const llvm::opt::ArgList &Args) const {
return getArch() == llvm::Triple::x86_64;
}
@@ -273,8 +275,11 @@ AddClangCXXStdlibIncludeArgs(const llvm::opt::ArgList &DriverArgs,
void CrossWindowsToolChain::
AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const {
- if (GetCXXStdlibType(Args) == ToolChain::CST_Libcxx)
+ if (GetCXXStdlibType(Args) == ToolChain::CST_Libcxx) {
CmdArgs.push_back("-lc++");
+ if (Args.hasArg(options::OPT_fexperimental_library))
+ CmdArgs.push_back("-lc++experimental");
+ }
}
clang::SanitizerMask CrossWindowsToolChain::getSupportedSanitizers() const {
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/CrossWindows.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/CrossWindows.h
index ffe75332c2e8..c5df55a24296 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/CrossWindows.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/CrossWindows.h
@@ -20,7 +20,7 @@ namespace driver {
namespace tools {
namespace CrossWindows {
-class LLVM_LIBRARY_VISIBILITY Assembler : public Tool {
+class LLVM_LIBRARY_VISIBILITY Assembler final : public Tool {
public:
Assembler(const ToolChain &TC) : Tool("CrossWindows::Assembler", "as", TC) {}
@@ -32,7 +32,7 @@ public:
const char *LinkingOutput) const override;
};
-class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
+class LLVM_LIBRARY_VISIBILITY Linker final : public Tool {
public:
Linker(const ToolChain &TC) : Tool("CrossWindows::Linker", "ld", TC) {}
@@ -54,10 +54,10 @@ public:
CrossWindowsToolChain(const Driver &D, const llvm::Triple &T,
const llvm::opt::ArgList &Args);
- bool IsIntegratedAssemblerDefault() const override { return true; }
- bool IsUnwindTablesDefault(const llvm::opt::ArgList &Args) const override;
+ UnwindTableLevel
+ getDefaultUnwindTableLevel(const llvm::opt::ArgList &Args) const override;
bool isPICDefault() const override;
- bool isPIEDefault() const override;
+ bool isPIEDefault(const llvm::opt::ArgList &Args) const override;
bool isPICDefaultForced() const override;
LangOptions::StackProtectorMode
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.cpp
index 769eae14df51..1462576ca870 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.cpp
@@ -16,15 +16,17 @@
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/InputInfo.h"
#include "clang/Driver/Options.h"
-#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Support/FileSystem.h"
-#include "llvm/Support/Host.h"
+#include "llvm/Support/FormatAdapters.h"
+#include "llvm/Support/FormatVariadic.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Process.h"
#include "llvm/Support/Program.h"
-#include "llvm/Support/TargetParser.h"
#include "llvm/Support/VirtualFileSystem.h"
+#include "llvm/TargetParser/Host.h"
+#include "llvm/TargetParser/TargetParser.h"
#include <system_error>
using namespace clang::driver;
@@ -34,25 +36,6 @@ using namespace clang;
using namespace llvm::opt;
namespace {
-struct CudaVersionInfo {
- std::string DetectedVersion;
- CudaVersion Version;
-};
-// Parses the contents of version.txt in an CUDA installation. It should
-// contain one line of the from e.g. "CUDA Version 7.5.2".
-CudaVersionInfo parseCudaVersionFile(llvm::StringRef V) {
- V = V.trim();
- if (!V.startswith("CUDA Version "))
- return {V.str(), CudaVersion::UNKNOWN};
- V = V.substr(strlen("CUDA Version "));
- SmallVector<StringRef,4> VersionParts;
- V.split(VersionParts, '.');
- return {"version.txt: " + V.str() + ".",
- VersionParts.size() < 2
- ? CudaVersion::UNKNOWN
- : CudaStringToVersion(
- join_items(".", VersionParts[0], VersionParts[1]))};
-}
CudaVersion getCudaVersion(uint32_t raw_version) {
if (raw_version < 7050)
@@ -77,15 +60,37 @@ CudaVersion getCudaVersion(uint32_t raw_version) {
return CudaVersion::CUDA_110;
if (raw_version < 11020)
return CudaVersion::CUDA_111;
- return CudaVersion::LATEST;
+ if (raw_version < 11030)
+ return CudaVersion::CUDA_112;
+ if (raw_version < 11040)
+ return CudaVersion::CUDA_113;
+ if (raw_version < 11050)
+ return CudaVersion::CUDA_114;
+ if (raw_version < 11060)
+ return CudaVersion::CUDA_115;
+ if (raw_version < 11070)
+ return CudaVersion::CUDA_116;
+ if (raw_version < 11080)
+ return CudaVersion::CUDA_117;
+ if (raw_version < 11090)
+ return CudaVersion::CUDA_118;
+ if (raw_version < 12010)
+ return CudaVersion::CUDA_120;
+ if (raw_version < 12020)
+ return CudaVersion::CUDA_121;
+ if (raw_version < 12030)
+ return CudaVersion::CUDA_122;
+ if (raw_version < 12040)
+ return CudaVersion::CUDA_123;
+ return CudaVersion::NEW;
}
-CudaVersionInfo parseCudaHFile(llvm::StringRef Input) {
+CudaVersion parseCudaHFile(llvm::StringRef Input) {
// Helper lambda which skips the words if the line starts with them or returns
- // None otherwise.
+ // std::nullopt otherwise.
auto StartsWithWords =
[](llvm::StringRef Line,
- const SmallVector<StringRef, 3> words) -> llvm::Optional<StringRef> {
+ const SmallVector<StringRef, 3> words) -> std::optional<StringRef> {
for (StringRef word : words) {
if (!Line.consume_front(word))
return {};
@@ -100,21 +105,27 @@ CudaVersionInfo parseCudaHFile(llvm::StringRef Input) {
StartsWithWords(Input.ltrim(), {"#", "define", "CUDA_VERSION"})) {
uint32_t RawVersion;
Line->consumeInteger(10, RawVersion);
- return {"cuda.h: CUDA_VERSION=" + Twine(RawVersion).str() + ".",
- getCudaVersion(RawVersion)};
+ return getCudaVersion(RawVersion);
}
// Find next non-empty line.
Input = Input.drop_front(Input.find_first_of("\n\r")).ltrim();
}
- return {"cuda.h: CUDA_VERSION not found.", CudaVersion::UNKNOWN};
+ return CudaVersion::UNKNOWN;
}
} // namespace
void CudaInstallationDetector::WarnIfUnsupportedVersion() {
- if (DetectedVersionIsNotSupported)
- D.Diag(diag::warn_drv_unknown_cuda_version)
- << DetectedVersion
- << CudaVersionToString(CudaVersion::LATEST_SUPPORTED);
+ if (Version > CudaVersion::PARTIALLY_SUPPORTED) {
+ std::string VersionString = CudaVersionToString(Version);
+ if (!VersionString.empty())
+ VersionString.insert(0, " ");
+ D.Diag(diag::warn_drv_new_cuda_version)
+ << VersionString
+ << (CudaVersion::PARTIALLY_SUPPORTED != CudaVersion::FULLY_SUPPORTED)
+ << CudaVersionToString(CudaVersion::PARTIALLY_SUPPORTED);
+ } else if (Version > CudaVersion::FULLY_SUPPORTED)
+ D.Diag(diag::warn_drv_partially_supported_cuda_version)
+ << CudaVersionToString(Version);
}
CudaInstallationDetector::CudaInstallationDetector(
@@ -193,44 +204,17 @@ CudaInstallationDetector::CudaInstallationDetector(
if (CheckLibDevice && !FS.exists(LibDevicePath))
continue;
- // On Linux, we have both lib and lib64 directories, and we need to choose
- // based on our triple. On MacOS, we have only a lib directory.
- //
- // It's sufficient for our purposes to be flexible: If both lib and lib64
- // exist, we choose whichever one matches our triple. Otherwise, if only
- // lib exists, we use it.
- if (HostTriple.isArch64Bit() && FS.exists(InstallPath + "/lib64"))
- LibPath = InstallPath + "/lib64";
- else if (FS.exists(InstallPath + "/lib"))
- LibPath = InstallPath + "/lib";
- else
- continue;
-
- CudaVersionInfo VersionInfo = {"", CudaVersion::UNKNOWN};
- if (auto VersionFile = FS.getBufferForFile(InstallPath + "/version.txt"))
- VersionInfo = parseCudaVersionFile((*VersionFile)->getBuffer());
- // If version file didn't give us the version, try to find it in cuda.h
- if (VersionInfo.Version == CudaVersion::UNKNOWN)
- if (auto CudaHFile = FS.getBufferForFile(InstallPath + "/include/cuda.h"))
- VersionInfo = parseCudaHFile((*CudaHFile)->getBuffer());
- // As the last resort, make an educated guess between CUDA-7.0, (which had
- // no version.txt file and had old-style libdevice bitcode ) and an unknown
- // recent CUDA version (no version.txt, new style bitcode).
- if (VersionInfo.Version == CudaVersion::UNKNOWN) {
- VersionInfo.Version = (FS.exists(LibDevicePath + "/libdevice.10.bc"))
- ? Version = CudaVersion::LATEST
- : Version = CudaVersion::CUDA_70;
- VersionInfo.DetectedVersion =
- "No version found in version.txt or cuda.h.";
+ Version = CudaVersion::UNKNOWN;
+ if (auto CudaHFile = FS.getBufferForFile(InstallPath + "/include/cuda.h"))
+ Version = parseCudaHFile((*CudaHFile)->getBuffer());
+ // As the last resort, make an educated guess between CUDA-7.0, which had
+ // old-style libdevice bitcode, and an unknown recent CUDA version.
+ if (Version == CudaVersion::UNKNOWN) {
+ Version = FS.exists(LibDevicePath + "/libdevice.10.bc")
+ ? CudaVersion::NEW
+ : CudaVersion::CUDA_70;
}
- Version = VersionInfo.Version;
- DetectedVersion = VersionInfo.DetectedVersion;
-
- // TODO(tra): remove the warning once we have all features of 10.2
- // and 11.0 implemented.
- DetectedVersionIsNotSupported = Version > CudaVersion::LATEST_SUPPORTED;
-
if (Version >= CudaVersion::CUDA_90) {
// CUDA-9+ uses single libdevice file for all GPU variants.
std::string FilePath = LibDevicePath + "/libdevice.10.bc";
@@ -254,7 +238,7 @@ CudaInstallationDetector::CudaInstallationDetector(
// Process all bitcode filenames that look like
// libdevice.compute_XX.YY.bc
const StringRef LibDeviceName = "libdevice.";
- if (!(FileName.startswith(LibDeviceName) && FileName.endswith(".bc")))
+ if (!(FileName.starts_with(LibDeviceName) && FileName.ends_with(".bc")))
continue;
StringRef GpuArch = FileName.slice(
LibDeviceName.size(), FileName.find('.', LibDeviceName.size()));
@@ -319,8 +303,6 @@ void CudaInstallationDetector::AddCudaIncludeArgs(
return;
}
- CC1Args.push_back("-internal-isystem");
- CC1Args.push_back(DriverArgs.MakeArgString(getIncludePath()));
CC1Args.push_back("-include");
CC1Args.push_back("__clang_cuda_runtime_wrapper.h");
}
@@ -395,18 +377,20 @@ void NVPTX::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
const ArgList &Args,
const char *LinkingOutput) const {
const auto &TC =
- static_cast<const toolchains::CudaToolChain &>(getToolChain());
+ static_cast<const toolchains::NVPTXToolChain &>(getToolChain());
assert(TC.getTriple().isNVPTX() && "Wrong platform");
StringRef GPUArchName;
- // If this is an OpenMP action we need to extract the device architecture
- // from the -march=arch option. This option may come from -Xopenmp-target
- // flag or the default value.
- if (JA.isDeviceOffloading(Action::OFK_OpenMP)) {
+ // If this is a CUDA action we need to extract the device architecture
+ // from the Job's associated architecture, otherwise use the -march=arch
+ // option. This option may come from -Xopenmp-target flag or the default
+ // value.
+ if (JA.isDeviceOffloading(Action::OFK_Cuda)) {
+ GPUArchName = JA.getOffloadingArch();
+ } else {
GPUArchName = Args.getLastArgValue(options::OPT_march_EQ);
assert(!GPUArchName.empty() && "Must have an architecture passed in.");
- } else
- GPUArchName = JA.getOffloadingArch();
+ }
// Obtain architecture from the action.
CudaArch gpu_arch = StringToCudaArch(GPUArchName);
@@ -467,22 +451,38 @@ void NVPTX::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("--gpu-name");
CmdArgs.push_back(Args.MakeArgString(CudaArchToString(gpu_arch)));
CmdArgs.push_back("--output-file");
- CmdArgs.push_back(Args.MakeArgString(TC.getInputFilename(Output)));
- for (const auto& II : Inputs)
+ std::string OutputFileName = TC.getInputFilename(Output);
+
+ // If we are invoking `nvlink` internally we need to output a `.cubin` file.
+ // FIXME: This should hopefully be removed if NVIDIA updates their tooling.
+ if (!C.getInputArgs().getLastArg(options::OPT_c)) {
+ SmallString<256> Filename(Output.getFilename());
+ llvm::sys::path::replace_extension(Filename, "cubin");
+ OutputFileName = Filename.str();
+ }
+ if (Output.isFilename() && OutputFileName != Output.getFilename())
+ C.addTempFile(Args.MakeArgString(OutputFileName));
+
+ CmdArgs.push_back(Args.MakeArgString(OutputFileName));
+ for (const auto &II : Inputs)
CmdArgs.push_back(Args.MakeArgString(II.getFilename()));
- for (const auto& A : Args.getAllArgValues(options::OPT_Xcuda_ptxas))
+ for (const auto &A : Args.getAllArgValues(options::OPT_Xcuda_ptxas))
CmdArgs.push_back(Args.MakeArgString(A));
- bool Relocatable = false;
+ bool Relocatable;
if (JA.isOffloading(Action::OFK_OpenMP))
// In OpenMP we need to generate relocatable code.
Relocatable = Args.hasFlag(options::OPT_fopenmp_relocatable_target,
options::OPT_fnoopenmp_relocatable_target,
/*Default=*/true);
else if (JA.isOffloading(Action::OFK_Cuda))
- Relocatable = Args.hasFlag(options::OPT_fgpu_rdc,
- options::OPT_fno_gpu_rdc, /*Default=*/false);
+ // In CUDA we generate relocatable code by default.
+ Relocatable = Args.hasFlag(options::OPT_fgpu_rdc, options::OPT_fno_gpu_rdc,
+ /*Default=*/false);
+ else
+ // Otherwise, we are compiling directly and should create linkable output.
+ Relocatable = true;
if (Relocatable)
CmdArgs.push_back("-c");
@@ -518,11 +518,11 @@ static bool shouldIncludePTX(const ArgList &Args, const char *gpu_arch) {
// All inputs to this linker must be from CudaDeviceActions, as we need to look
// at the Inputs' Actions in order to figure out which GPU architecture they
// correspond to.
-void NVPTX::Linker::ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output,
- const InputInfoList &Inputs,
- const ArgList &Args,
- const char *LinkingOutput) const {
+void NVPTX::FatBinary::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
const auto &TC =
static_cast<const toolchains::CudaToolChain &>(getToolChain());
assert(TC.getTriple().isNVPTX() && "Wrong platform");
@@ -536,7 +536,7 @@ void NVPTX::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (mustEmitDebugInfo(Args) == EmitSameDebugInfoAsHost)
CmdArgs.push_back("-g");
- for (const auto& II : Inputs) {
+ for (const auto &II : Inputs) {
auto *A = II.getAction();
assert(A->getInputs().size() == 1 &&
"Device offload action is expected to have a single input");
@@ -553,11 +553,12 @@ void NVPTX::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const char *Arch = (II.getType() == types::TY_PP_Asm)
? CudaArchToVirtualArchString(gpu_arch)
: gpu_arch_str;
- CmdArgs.push_back(Args.MakeArgString(llvm::Twine("--image=profile=") +
- Arch + ",file=" + II.getFilename()));
+ CmdArgs.push_back(
+ Args.MakeArgString(llvm::Twine("--image=profile=") + Arch +
+ ",file=" + getToolChain().getInputFilename(II)));
}
- for (const auto& A : Args.getAllArgValues(options::OPT_Xcuda_fatbinary))
+ for (const auto &A : Args.getAllArgValues(options::OPT_Xcuda_fatbinary))
CmdArgs.push_back(Args.MakeArgString(A));
const char *Exec = Args.MakeArgString(TC.GetProgramPath("fatbinary"));
@@ -568,36 +569,31 @@ void NVPTX::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Exec, CmdArgs, Inputs, Output));
}
-void NVPTX::OpenMPLinker::ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output,
- const InputInfoList &Inputs,
- const ArgList &Args,
- const char *LinkingOutput) const {
+void NVPTX::Linker::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
const auto &TC =
- static_cast<const toolchains::CudaToolChain &>(getToolChain());
- assert(TC.getTriple().isNVPTX() && "Wrong platform");
-
+ static_cast<const toolchains::NVPTXToolChain &>(getToolChain());
ArgStringList CmdArgs;
- // OpenMP uses nvlink to link cubin files. The result will be embedded in the
- // host binary by the host linker.
- assert(!JA.isHostOffloading(Action::OFK_OpenMP) &&
- "CUDA toolchain not expected for an OpenMP host device.");
+ assert(TC.getTriple().isNVPTX() && "Wrong platform");
+ assert((Output.isFilename() || Output.isNothing()) && "Invalid output.");
if (Output.isFilename()) {
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
- } else
- assert(Output.isNothing() && "Invalid output.");
+ }
+
if (mustEmitDebugInfo(Args) == EmitSameDebugInfoAsHost)
CmdArgs.push_back("-g");
if (Args.hasArg(options::OPT_v))
CmdArgs.push_back("-v");
- StringRef GPUArch =
- Args.getLastArgValue(options::OPT_march_EQ);
- assert(!GPUArch.empty() && "At least one GPU Arch required for ptxas.");
+ StringRef GPUArch = Args.getLastArgValue(options::OPT_march_EQ);
+ assert(!GPUArch.empty() && "At least one GPU Arch required for nvlink.");
CmdArgs.push_back("-arch");
CmdArgs.push_back(Args.MakeArgString(GPUArch));
@@ -608,14 +604,12 @@ void NVPTX::OpenMPLinker::ConstructJob(Compilation &C, const JobAction &JA,
// Add paths for the default clang library path.
SmallString<256> DefaultLibPath =
llvm::sys::path::parent_path(TC.getDriver().Dir);
- llvm::sys::path::append(DefaultLibPath, "lib" CLANG_LIBDIR_SUFFIX);
+ llvm::sys::path::append(DefaultLibPath, CLANG_INSTALL_LIBDIR_BASENAME);
CmdArgs.push_back(Args.MakeArgString(Twine("-L") + DefaultLibPath));
for (const auto &II : Inputs) {
- if (II.getType() == types::TY_LLVM_IR ||
- II.getType() == types::TY_LTO_IR ||
- II.getType() == types::TY_LTO_BC ||
- II.getType() == types::TY_LLVM_BC) {
+ if (II.getType() == types::TY_LLVM_IR || II.getType() == types::TY_LTO_IR ||
+ II.getType() == types::TY_LTO_BC || II.getType() == types::TY_LLVM_BC) {
C.getDriver().Diag(diag::err_drv_no_linker_llvm_support)
<< getToolChain().getTripleString();
continue;
@@ -626,56 +620,179 @@ void NVPTX::OpenMPLinker::ConstructJob(Compilation &C, const JobAction &JA,
if (!II.isFilename())
continue;
- const char *CubinF = C.addTempFile(
- C.getArgs().MakeArgString(getToolChain().getInputFilename(II)));
+ // The 'nvlink' application performs RDC-mode linking when given a '.o'
+ // file and device linking when given a '.cubin' file. We always want to
+ // perform device linking, so just rename any '.o' files.
+ // FIXME: This should hopefully be removed if NVIDIA updates their tooling.
+ auto InputFile = getToolChain().getInputFilename(II);
+ if (llvm::sys::path::extension(InputFile) != ".cubin") {
+ // If there are no actions above this one then this is direct input and we
+ // can copy it. Otherwise the input is internal so a `.cubin` file should
+ // exist.
+ if (II.getAction() && II.getAction()->getInputs().size() == 0) {
+ const char *CubinF =
+ Args.MakeArgString(getToolChain().getDriver().GetTemporaryPath(
+ llvm::sys::path::stem(InputFile), "cubin"));
+ if (llvm::sys::fs::copy_file(InputFile, C.addTempFile(CubinF)))
+ continue;
- CmdArgs.push_back(CubinF);
+ CmdArgs.push_back(CubinF);
+ } else {
+ SmallString<256> Filename(InputFile);
+ llvm::sys::path::replace_extension(Filename, "cubin");
+ CmdArgs.push_back(Args.MakeArgString(Filename));
+ }
+ } else {
+ CmdArgs.push_back(Args.MakeArgString(InputFile));
+ }
}
- const char *Exec =
- Args.MakeArgString(getToolChain().GetProgramPath("nvlink"));
C.addCommand(std::make_unique<Command>(
JA, *this,
ResponseFileSupport{ResponseFileSupport::RF_Full, llvm::sys::WEM_UTF8,
"--options-file"},
- Exec, CmdArgs, Inputs, Output));
+ Args.MakeArgString(getToolChain().GetProgramPath("nvlink")), CmdArgs,
+ Inputs, Output));
}
-/// CUDA toolchain. Our assembler is ptxas, and our "linker" is fatbinary,
-/// which isn't properly a linker but nonetheless performs the step of stitching
-/// together object files from the assembler into a single blob.
+void NVPTX::getNVPTXTargetFeatures(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args,
+ std::vector<StringRef> &Features) {
+ if (Args.hasArg(options::OPT_cuda_feature_EQ)) {
+ StringRef PtxFeature =
+ Args.getLastArgValue(options::OPT_cuda_feature_EQ, "+ptx42");
+ Features.push_back(Args.MakeArgString(PtxFeature));
+ return;
+ }
+ CudaInstallationDetector CudaInstallation(D, Triple, Args);
-CudaToolChain::CudaToolChain(const Driver &D, const llvm::Triple &Triple,
- const ToolChain &HostTC, const ArgList &Args,
- const Action::OffloadKind OK)
- : ToolChain(D, Triple, Args), HostTC(HostTC),
- CudaInstallation(D, HostTC.getTriple(), Args), OK(OK) {
- if (CudaInstallation.isValid()) {
- CudaInstallation.WarnIfUnsupportedVersion();
- getProgramPaths().push_back(std::string(CudaInstallation.getBinPath()));
+ // New CUDA versions often introduce new instructions that are only supported
+ // by new PTX version, so we need to raise PTX level to enable them in NVPTX
+ // back-end.
+ const char *PtxFeature = nullptr;
+ switch (CudaInstallation.version()) {
+#define CASE_CUDA_VERSION(CUDA_VER, PTX_VER) \
+ case CudaVersion::CUDA_##CUDA_VER: \
+ PtxFeature = "+ptx" #PTX_VER; \
+ break;
+ CASE_CUDA_VERSION(123, 83);
+ CASE_CUDA_VERSION(122, 82);
+ CASE_CUDA_VERSION(121, 81);
+ CASE_CUDA_VERSION(120, 80);
+ CASE_CUDA_VERSION(118, 78);
+ CASE_CUDA_VERSION(117, 77);
+ CASE_CUDA_VERSION(116, 76);
+ CASE_CUDA_VERSION(115, 75);
+ CASE_CUDA_VERSION(114, 74);
+ CASE_CUDA_VERSION(113, 73);
+ CASE_CUDA_VERSION(112, 72);
+ CASE_CUDA_VERSION(111, 71);
+ CASE_CUDA_VERSION(110, 70);
+ CASE_CUDA_VERSION(102, 65);
+ CASE_CUDA_VERSION(101, 64);
+ CASE_CUDA_VERSION(100, 63);
+ CASE_CUDA_VERSION(92, 61);
+ CASE_CUDA_VERSION(91, 61);
+ CASE_CUDA_VERSION(90, 60);
+#undef CASE_CUDA_VERSION
+ default:
+ PtxFeature = "+ptx42";
}
+ Features.push_back(PtxFeature);
+}
+
+/// NVPTX toolchain. Our assembler is ptxas, and our linker is nvlink. This
+/// operates as a stand-alone version of the NVPTX tools without the host
+/// toolchain.
+NVPTXToolChain::NVPTXToolChain(const Driver &D, const llvm::Triple &Triple,
+ const llvm::Triple &HostTriple,
+ const ArgList &Args, bool Freestanding = false)
+ : ToolChain(D, Triple, Args), CudaInstallation(D, HostTriple, Args),
+ Freestanding(Freestanding) {
+ if (CudaInstallation.isValid())
+ getProgramPaths().push_back(std::string(CudaInstallation.getBinPath()));
// Lookup binaries into the driver directory, this is used to
- // discover the clang-offload-bundler executable.
+ // discover the 'nvptx-arch' executable.
getProgramPaths().push_back(getDriver().Dir);
}
-std::string CudaToolChain::getInputFilename(const InputInfo &Input) const {
- // Only object files are changed, for example assembly files keep their .s
- // extensions. CUDA also continues to use .o as they don't use nvlink but
- // fatbinary.
- if (!(OK == Action::OFK_OpenMP && Input.getType() == types::TY_Object))
- return ToolChain::getInputFilename(Input);
+/// We only need the host triple to locate the CUDA binary utilities, use the
+/// system's default triple if not provided.
+NVPTXToolChain::NVPTXToolChain(const Driver &D, const llvm::Triple &Triple,
+ const ArgList &Args)
+ : NVPTXToolChain(D, Triple, llvm::Triple(LLVM_HOST_TRIPLE), Args,
+ /*Freestanding=*/true) {}
- // Replace extension for object files with cubin because nvlink relies on
- // these particular file names.
- SmallString<256> Filename(ToolChain::getInputFilename(Input));
- llvm::sys::path::replace_extension(Filename, "cubin");
- return std::string(Filename.str());
+llvm::opt::DerivedArgList *
+NVPTXToolChain::TranslateArgs(const llvm::opt::DerivedArgList &Args,
+ StringRef BoundArch,
+ Action::OffloadKind DeviceOffloadKind) const {
+ DerivedArgList *DAL =
+ ToolChain::TranslateArgs(Args, BoundArch, DeviceOffloadKind);
+ if (!DAL)
+ DAL = new DerivedArgList(Args.getBaseArgs());
+
+ const OptTable &Opts = getDriver().getOpts();
+
+ for (Arg *A : Args)
+ if (!llvm::is_contained(*DAL, A))
+ DAL->append(A);
+
+ if (!DAL->hasArg(options::OPT_march_EQ))
+ DAL->AddJoinedArg(nullptr, Opts.getOption(options::OPT_march_EQ),
+ CudaArchToString(CudaArch::CudaDefault));
+
+ return DAL;
+}
+
+void NVPTXToolChain::addClangTargetOptions(
+ const llvm::opt::ArgList &DriverArgs, llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadingKind) const {
+ // If we are compiling with a standalone NVPTX toolchain we want to try to
+ // mimic a standard environment as much as possible. So we enable lowering
+ // ctor / dtor functions to global symbols that can be registered.
+ if (Freestanding)
+ CC1Args.append({"-mllvm", "--nvptx-lower-global-ctor-dtor"});
+}
+
+bool NVPTXToolChain::supportsDebugInfoOption(const llvm::opt::Arg *A) const {
+ const Option &O = A->getOption();
+ return (O.matches(options::OPT_gN_Group) &&
+ !O.matches(options::OPT_gmodules)) ||
+ O.matches(options::OPT_g_Flag) ||
+ O.matches(options::OPT_ggdbN_Group) || O.matches(options::OPT_ggdb) ||
+ O.matches(options::OPT_gdwarf) || O.matches(options::OPT_gdwarf_2) ||
+ O.matches(options::OPT_gdwarf_3) || O.matches(options::OPT_gdwarf_4) ||
+ O.matches(options::OPT_gdwarf_5) ||
+ O.matches(options::OPT_gcolumn_info);
}
+void NVPTXToolChain::adjustDebugInfoKind(
+ llvm::codegenoptions::DebugInfoKind &DebugInfoKind,
+ const ArgList &Args) const {
+ switch (mustEmitDebugInfo(Args)) {
+ case DisableDebugInfo:
+ DebugInfoKind = llvm::codegenoptions::NoDebugInfo;
+ break;
+ case DebugDirectivesOnly:
+ DebugInfoKind = llvm::codegenoptions::DebugDirectivesOnly;
+ break;
+ case EmitSameDebugInfoAsHost:
+ // Use same debug info level as the host.
+ break;
+ }
+}
+
+/// CUDA toolchain. Our assembler is ptxas, and our "linker" is fatbinary,
+/// which isn't properly a linker but nonetheless performs the step of stitching
+/// together object files from the assembler into a single blob.
+
+CudaToolChain::CudaToolChain(const Driver &D, const llvm::Triple &Triple,
+ const ToolChain &HostTC, const ArgList &Args)
+ : NVPTXToolChain(D, Triple, HostTC.getTriple(), Args), HostTC(HostTC) {}
+
void CudaToolChain::addClangTargetOptions(
- const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args,
+ const llvm::opt::ArgList &DriverArgs, llvm::opt::ArgStringList &CC1Args,
Action::OffloadKind DeviceOffloadingKind) const {
HostTC.addClangTargetOptions(DriverArgs, CC1Args, DeviceOffloadingKind);
@@ -686,11 +803,15 @@ void CudaToolChain::addClangTargetOptions(
"Only OpenMP or CUDA offloading kinds are supported for NVIDIA GPUs.");
if (DeviceOffloadingKind == Action::OFK_Cuda) {
- CC1Args.push_back("-fcuda-is-device");
-
- if (DriverArgs.hasFlag(options::OPT_fcuda_approx_transcendentals,
- options::OPT_fno_cuda_approx_transcendentals, false))
- CC1Args.push_back("-fcuda-approx-transcendentals");
+ CC1Args.append(
+ {"-fcuda-is-device", "-mllvm", "-enable-memcpyopt-without-libcalls"});
+
+ // Unsized function arguments used for variadics were introduced in CUDA-9.0
+ // We still do not support generating code that actually uses variadic
+ // arguments yet, but we do need to allow parsing them as recent CUDA
+ // headers rely on that. https://github.com/llvm/llvm-project/issues/58410
+ if (CudaInstallation.version() >= CudaVersion::CUDA_90)
+ CC1Args.push_back("-fcuda-allow-variadic-functions");
}
if (DriverArgs.hasArg(options::OPT_nogpulib))
@@ -711,29 +832,6 @@ void CudaToolChain::addClangTargetOptions(
clang::CudaVersion CudaInstallationVersion = CudaInstallation.version();
- // New CUDA versions often introduce new instructions that are only supported
- // by new PTX version, so we need to raise PTX level to enable them in NVPTX
- // back-end.
- const char *PtxFeature = nullptr;
- switch (CudaInstallationVersion) {
-#define CASE_CUDA_VERSION(CUDA_VER, PTX_VER) \
- case CudaVersion::CUDA_##CUDA_VER: \
- PtxFeature = "+ptx" #PTX_VER; \
- break;
- CASE_CUDA_VERSION(112, 72);
- CASE_CUDA_VERSION(111, 71);
- CASE_CUDA_VERSION(110, 70);
- CASE_CUDA_VERSION(102, 65);
- CASE_CUDA_VERSION(101, 64);
- CASE_CUDA_VERSION(100, 63);
- CASE_CUDA_VERSION(92, 61);
- CASE_CUDA_VERSION(91, 61);
- CASE_CUDA_VERSION(90, 60);
-#undef CASE_CUDA_VERSION
- default:
- PtxFeature = "+ptx42";
- }
- CC1Args.append({"-target-feature", PtxFeature});
if (DriverArgs.hasFlag(options::OPT_fcuda_short_ptr,
options::OPT_fno_cuda_short_ptr, false))
CC1Args.append({"-mllvm", "--nvptx-short-ptr"});
@@ -751,14 +849,11 @@ void CudaToolChain::addClangTargetOptions(
return;
}
- std::string BitcodeSuffix;
- if (DriverArgs.hasFlag(options::OPT_fopenmp_target_new_runtime,
- options::OPT_fno_openmp_target_new_runtime, false))
- BitcodeSuffix = "new-nvptx-" + GpuArch.str();
- else
- BitcodeSuffix = "nvptx-" + GpuArch.str();
+ // Link the bitcode library late if we're using device LTO.
+ if (getDriver().isUsingLTO(/* IsOffload */ true))
+ return;
- addOpenMPDeviceRTL(getDriver(), DriverArgs, CC1Args, BitcodeSuffix,
+ addOpenMPDeviceRTL(getDriver(), DriverArgs, CC1Args, GpuArch.str(),
getTriple());
}
}
@@ -777,33 +872,6 @@ llvm::DenormalMode CudaToolChain::getDefaultDenormalModeForType(
return llvm::DenormalMode::getIEEE();
}
-bool CudaToolChain::supportsDebugInfoOption(const llvm::opt::Arg *A) const {
- const Option &O = A->getOption();
- return (O.matches(options::OPT_gN_Group) &&
- !O.matches(options::OPT_gmodules)) ||
- O.matches(options::OPT_g_Flag) ||
- O.matches(options::OPT_ggdbN_Group) || O.matches(options::OPT_ggdb) ||
- O.matches(options::OPT_gdwarf) || O.matches(options::OPT_gdwarf_2) ||
- O.matches(options::OPT_gdwarf_3) || O.matches(options::OPT_gdwarf_4) ||
- O.matches(options::OPT_gdwarf_5) ||
- O.matches(options::OPT_gcolumn_info);
-}
-
-void CudaToolChain::adjustDebugInfoKind(
- codegenoptions::DebugInfoKind &DebugInfoKind, const ArgList &Args) const {
- switch (mustEmitDebugInfo(Args)) {
- case DisableDebugInfo:
- DebugInfoKind = codegenoptions::NoDebugInfo;
- break;
- case DebugDirectivesOnly:
- DebugInfoKind = codegenoptions::DebugDirectivesOnly;
- break;
- case EmitSameDebugInfoAsHost:
- // Use same debug info level as the host.
- break;
- }
-}
-
void CudaToolChain::AddCudaIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
// Check our CUDA version if we're going to include the CUDA headers.
@@ -816,6 +884,19 @@ void CudaToolChain::AddCudaIncludeArgs(const ArgList &DriverArgs,
CudaInstallation.AddCudaIncludeArgs(DriverArgs, CC1Args);
}
+std::string CudaToolChain::getInputFilename(const InputInfo &Input) const {
+ // Only object files are changed, for example assembly files keep their .s
+ // extensions. If the user requested device-only compilation don't change it.
+ if (Input.getType() != types::TY_Object || getDriver().offloadDeviceOnly())
+ return ToolChain::getInputFilename(Input);
+
+ // Replace extension for object files with cubin because nvlink relies on
+ // these particular file names.
+ SmallString<256> Filename(ToolChain::getInputFilename(Input));
+ llvm::sys::path::replace_extension(Filename, "cubin");
+ return std::string(Filename);
+}
+
llvm::opt::DerivedArgList *
CudaToolChain::TranslateArgs(const llvm::opt::DerivedArgList &Args,
StringRef BoundArch,
@@ -831,23 +912,27 @@ CudaToolChain::TranslateArgs(const llvm::opt::DerivedArgList &Args,
// flags are not duplicated.
// Also append the compute capability.
if (DeviceOffloadKind == Action::OFK_OpenMP) {
- for (Arg *A : Args) {
- bool IsDuplicate = false;
- for (Arg *DALArg : *DAL) {
- if (A == DALArg) {
- IsDuplicate = true;
- break;
+ for (Arg *A : Args)
+ if (!llvm::is_contained(*DAL, A))
+ DAL->append(A);
+
+ if (!DAL->hasArg(options::OPT_march_EQ)) {
+ StringRef Arch = BoundArch;
+ if (Arch.empty()) {
+ auto ArchsOrErr = getSystemGPUArchs(Args);
+ if (!ArchsOrErr) {
+ std::string ErrMsg =
+ llvm::formatv("{0}", llvm::fmt_consume(ArchsOrErr.takeError()));
+ getDriver().Diag(diag::err_drv_undetermined_gpu_arch)
+ << llvm::Triple::getArchTypeName(getArch()) << ErrMsg << "-march";
+ Arch = CudaArchToString(CudaArch::CudaDefault);
+ } else {
+ Arch = Args.MakeArgString(ArchsOrErr->front());
}
}
- if (!IsDuplicate)
- DAL->append(A);
+ DAL->AddJoinedArg(nullptr, Opts.getOption(options::OPT_march_EQ), Arch);
}
- StringRef Arch = DAL->getLastArgValue(options::OPT_march_EQ);
- if (Arch.empty())
- DAL->AddJoinedArg(nullptr, Opts.getOption(options::OPT_march_EQ),
- CLANG_OPENMP_NVPTX_DEFAULT_ARCH);
-
return DAL;
}
@@ -857,19 +942,51 @@ CudaToolChain::TranslateArgs(const llvm::opt::DerivedArgList &Args,
if (!BoundArch.empty()) {
DAL->eraseArg(options::OPT_march_EQ);
- DAL->AddJoinedArg(nullptr, Opts.getOption(options::OPT_march_EQ), BoundArch);
+ DAL->AddJoinedArg(nullptr, Opts.getOption(options::OPT_march_EQ),
+ BoundArch);
}
return DAL;
}
+Expected<SmallVector<std::string>>
+CudaToolChain::getSystemGPUArchs(const ArgList &Args) const {
+ // Detect NVIDIA GPUs availible on the system.
+ std::string Program;
+ if (Arg *A = Args.getLastArg(options::OPT_nvptx_arch_tool_EQ))
+ Program = A->getValue();
+ else
+ Program = GetProgramPath("nvptx-arch");
+
+ auto StdoutOrErr = executeToolChainProgram(Program);
+ if (!StdoutOrErr)
+ return StdoutOrErr.takeError();
+
+ SmallVector<std::string, 1> GPUArchs;
+ for (StringRef Arch : llvm::split((*StdoutOrErr)->getBuffer(), "\n"))
+ if (!Arch.empty())
+ GPUArchs.push_back(Arch.str());
+
+ if (GPUArchs.empty())
+ return llvm::createStringError(std::error_code(),
+ "No NVIDIA GPU detected in the system");
+
+ return std::move(GPUArchs);
+}
+
+Tool *NVPTXToolChain::buildAssembler() const {
+ return new tools::NVPTX::Assembler(*this);
+}
+
+Tool *NVPTXToolChain::buildLinker() const {
+ return new tools::NVPTX::Linker(*this);
+}
+
Tool *CudaToolChain::buildAssembler() const {
return new tools::NVPTX::Assembler(*this);
}
Tool *CudaToolChain::buildLinker() const {
- if (OK == Action::OFK_OpenMP)
- return new tools::NVPTX::OpenMPLinker(*this);
- return new tools::NVPTX::Linker(*this);
+ return new tools::NVPTX::FatBinary(*this);
}
void CudaToolChain::addClangWarningOptions(ArgStringList &CC1Args) const {
@@ -884,6 +1001,11 @@ CudaToolChain::GetCXXStdlibType(const ArgList &Args) const {
void CudaToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
HostTC.AddClangSystemIncludeArgs(DriverArgs, CC1Args);
+
+ if (!DriverArgs.hasArg(options::OPT_nogpuinc) && CudaInstallation.isValid())
+ CC1Args.append(
+ {"-internal-isystem",
+ DriverArgs.MakeArgString(CudaInstallation.getIncludePath())});
}
void CudaToolChain::AddClangCXXStdlibIncludeArgs(const ArgList &Args,
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.h
index 6ae4415a563a..8a053f3393e1 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.h
@@ -14,7 +14,6 @@
#include "clang/Driver/Multilib.h"
#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/VersionTuple.h"
#include <bitset>
@@ -30,11 +29,8 @@ private:
const Driver &D;
bool IsValid = false;
CudaVersion Version = CudaVersion::UNKNOWN;
- std::string DetectedVersion;
- bool DetectedVersionIsNotSupported = false;
std::string InstallPath;
std::string BinPath;
- std::string LibPath;
std::string LibDevicePath;
std::string IncludePath;
llvm::StringMap<std::string> LibDeviceMap;
@@ -62,15 +58,16 @@ public:
void print(raw_ostream &OS) const;
/// Get the detected Cuda install's version.
- CudaVersion version() const { return Version; }
+ CudaVersion version() const {
+ return Version == CudaVersion::NEW ? CudaVersion::PARTIALLY_SUPPORTED
+ : Version;
+ }
/// Get the detected Cuda installation path.
StringRef getInstallPath() const { return InstallPath; }
/// Get the detected path to Cuda's bin directory.
StringRef getBinPath() const { return BinPath; }
/// Get the detected Cuda Include path.
StringRef getIncludePath() const { return IncludePath; }
- /// Get the detected Cuda library path.
- StringRef getLibPath() const { return LibPath; }
/// Get the detected Cuda device library path.
StringRef getLibDevicePath() const { return LibDevicePath; }
/// Get libdevice file for given architecture
@@ -84,85 +81,125 @@ namespace tools {
namespace NVPTX {
// Run ptxas, the NVPTX assembler.
-class LLVM_LIBRARY_VISIBILITY Assembler : public Tool {
- public:
- Assembler(const ToolChain &TC) : Tool("NVPTX::Assembler", "ptxas", TC) {}
+class LLVM_LIBRARY_VISIBILITY Assembler final : public Tool {
+public:
+ Assembler(const ToolChain &TC) : Tool("NVPTX::Assembler", "ptxas", TC) {}
- bool hasIntegratedCPP() const override { return false; }
+ bool hasIntegratedCPP() const override { return false; }
- void ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output, const InputInfoList &Inputs,
- const llvm::opt::ArgList &TCArgs,
- const char *LinkingOutput) const override;
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
};
// Runs fatbinary, which combines GPU object files ("cubin" files) and/or PTX
// assembly into a single output file.
-class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
- public:
- Linker(const ToolChain &TC) : Tool("NVPTX::Linker", "fatbinary", TC) {}
+class LLVM_LIBRARY_VISIBILITY FatBinary : public Tool {
+public:
+ FatBinary(const ToolChain &TC) : Tool("NVPTX::Linker", "fatbinary", TC) {}
- bool hasIntegratedCPP() const override { return false; }
+ bool hasIntegratedCPP() const override { return false; }
- void ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output, const InputInfoList &Inputs,
- const llvm::opt::ArgList &TCArgs,
- const char *LinkingOutput) const override;
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
};
-class LLVM_LIBRARY_VISIBILITY OpenMPLinker : public Tool {
- public:
- OpenMPLinker(const ToolChain &TC)
- : Tool("NVPTX::OpenMPLinker", "nvlink", TC) {}
+// Runs nvlink, which links GPU object files ("cubin" files) into a single file.
+class LLVM_LIBRARY_VISIBILITY Linker final : public Tool {
+public:
+ Linker(const ToolChain &TC) : Tool("NVPTX::Linker", "nvlink", TC) {}
- bool hasIntegratedCPP() const override { return false; }
+ bool hasIntegratedCPP() const override { return false; }
- void ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output, const InputInfoList &Inputs,
- const llvm::opt::ArgList &TCArgs,
- const char *LinkingOutput) const override;
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
};
+void getNVPTXTargetFeatures(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args,
+ std::vector<StringRef> &Features);
+
} // end namespace NVPTX
} // end namespace tools
namespace toolchains {
-class LLVM_LIBRARY_VISIBILITY CudaToolChain : public ToolChain {
+class LLVM_LIBRARY_VISIBILITY NVPTXToolChain : public ToolChain {
public:
- CudaToolChain(const Driver &D, const llvm::Triple &Triple,
- const ToolChain &HostTC, const llvm::opt::ArgList &Args,
- const Action::OffloadKind OK);
+ NVPTXToolChain(const Driver &D, const llvm::Triple &Triple,
+ const llvm::Triple &HostTriple, const llvm::opt::ArgList &Args,
+ bool Freestanding);
- const llvm::Triple *getAuxTriple() const override {
- return &HostTC.getTriple();
- }
-
- std::string getInputFilename(const InputInfo &Input) const override;
+ NVPTXToolChain(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
llvm::opt::DerivedArgList *
TranslateArgs(const llvm::opt::DerivedArgList &Args, StringRef BoundArch,
Action::OffloadKind DeviceOffloadKind) const override;
- void addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args,
- Action::OffloadKind DeviceOffloadKind) const override;
- llvm::DenormalMode getDefaultDenormalModeForType(
- const llvm::opt::ArgList &DriverArgs, const JobAction &JA,
- const llvm::fltSemantics *FPType = nullptr) const override;
+ void
+ addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadKind) const override;
// Never try to use the integrated assembler with CUDA; always fork out to
// ptxas.
bool useIntegratedAs() const override { return false; }
bool isCrossCompiling() const override { return true; }
bool isPICDefault() const override { return false; }
- bool isPIEDefault() const override { return false; }
+ bool isPIEDefault(const llvm::opt::ArgList &Args) const override {
+ return false;
+ }
bool isPICDefaultForced() const override { return false; }
bool SupportsProfiling() const override { return false; }
+
+ bool IsMathErrnoDefault() const override { return false; }
+
bool supportsDebugInfoOption(const llvm::opt::Arg *A) const override;
- void adjustDebugInfoKind(codegenoptions::DebugInfoKind &DebugInfoKind,
+ void adjustDebugInfoKind(llvm::codegenoptions::DebugInfoKind &DebugInfoKind,
const llvm::opt::ArgList &Args) const override;
- bool IsMathErrnoDefault() const override { return false; }
+
+ // NVPTX supports only DWARF2.
+ unsigned GetDefaultDwarfVersion() const override { return 2; }
+ unsigned getMaxDwarfVersion() const override { return 2; }
+
+ CudaInstallationDetector CudaInstallation;
+
+protected:
+ Tool *buildAssembler() const override; // ptxas.
+ Tool *buildLinker() const override; // nvlink.
+
+private:
+ bool Freestanding = false;
+};
+
+class LLVM_LIBRARY_VISIBILITY CudaToolChain : public NVPTXToolChain {
+public:
+ CudaToolChain(const Driver &D, const llvm::Triple &Triple,
+ const ToolChain &HostTC, const llvm::opt::ArgList &Args);
+
+ const llvm::Triple *getAuxTriple() const override {
+ return &HostTC.getTriple();
+ }
+
+ std::string getInputFilename(const InputInfo &Input) const override;
+
+ llvm::opt::DerivedArgList *
+ TranslateArgs(const llvm::opt::DerivedArgList &Args, StringRef BoundArch,
+ Action::OffloadKind DeviceOffloadKind) const override;
+ void
+ addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadKind) const override;
+
+ llvm::DenormalMode getDefaultDenormalModeForType(
+ const llvm::opt::ArgList &DriverArgs, const JobAction &JA,
+ const llvm::fltSemantics *FPType = nullptr) const override;
void AddCudaIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
@@ -184,19 +221,16 @@ public:
computeMSVCVersion(const Driver *D,
const llvm::opt::ArgList &Args) const override;
- unsigned GetDefaultDwarfVersion() const override { return 2; }
- // NVPTX supports only DWARF2.
- unsigned getMaxDwarfVersion() const override { return 2; }
-
const ToolChain &HostTC;
- CudaInstallationDetector CudaInstallation;
-protected:
- Tool *buildAssembler() const override; // ptxas
- Tool *buildLinker() const override; // fatbinary (ok, not really a linker)
+ /// Uses nvptx-arch tool to get arch of the system GPU. Will return error
+ /// if unable to find one.
+ virtual Expected<SmallVector<std::string>>
+ getSystemGPUArchs(const llvm::opt::ArgList &Args) const override;
-private:
- const Action::OffloadKind OK;
+protected:
+ Tool *buildAssembler() const override; // ptxas
+ Tool *buildLinker() const override; // fatbinary (ok, not really a linker)
};
} // end namespace toolchains
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.cpp
index 261f522f6c49..fae8ad1a958a 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.cpp
@@ -23,9 +23,10 @@
#include "llvm/ProfileData/InstrProf.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/ScopedPrinter.h"
-#include "llvm/Support/TargetParser.h"
#include "llvm/Support/Threading.h"
#include "llvm/Support/VirtualFileSystem.h"
+#include "llvm/TargetParser/TargetParser.h"
+#include "llvm/TargetParser/Triple.h"
#include <cstdlib> // ::getenv
using namespace clang::driver;
@@ -34,7 +35,7 @@ using namespace clang::driver::toolchains;
using namespace clang;
using namespace llvm::opt;
-static const VersionTuple minimumMacCatalystDeploymentTarget() {
+static VersionTuple minimumMacCatalystDeploymentTarget() {
return VersionTuple(13, 1);
}
@@ -44,7 +45,7 @@ llvm::Triple::ArchType darwin::getArchTypeForMachOArchName(StringRef Str) {
// The matching this routine does is fairly pointless, since it is neither the
// complete architecture list, nor a reasonable subset. The problem is that
- // historically the driver driver accepts this and also ties its -march=
+ // historically the driver accepts this and also ties its -march=
// handling to the architecture name, so we need to be careful before removing
// support for it.
@@ -52,14 +53,11 @@ llvm::Triple::ArchType darwin::getArchTypeForMachOArchName(StringRef Str) {
// translation.
return llvm::StringSwitch<llvm::Triple::ArchType>(Str)
- .Cases("ppc", "ppc601", "ppc603", "ppc604", "ppc604e", llvm::Triple::ppc)
- .Cases("ppc750", "ppc7400", "ppc7450", "ppc970", llvm::Triple::ppc)
- .Case("ppc64", llvm::Triple::ppc64)
.Cases("i386", "i486", "i486SX", "i586", "i686", llvm::Triple::x86)
.Cases("pentium", "pentpro", "pentIIm3", "pentIIm5", "pentium4",
llvm::Triple::x86)
.Cases("x86_64", "x86_64h", llvm::Triple::x86_64)
- // This is derived from the driver driver.
+ // This is derived from the driver.
.Cases("arm", "armv4t", "armv5", "armv6", "armv6m", llvm::Triple::arm)
.Cases("armv7", "armv7em", "armv7k", "armv7m", llvm::Triple::arm)
.Cases("armv7s", "xscale", llvm::Triple::arm)
@@ -74,7 +72,8 @@ llvm::Triple::ArchType darwin::getArchTypeForMachOArchName(StringRef Str) {
.Default(llvm::Triple::UnknownArch);
}
-void darwin::setTripleTypeForMachOArchName(llvm::Triple &T, StringRef Str) {
+void darwin::setTripleTypeForMachOArchName(llvm::Triple &T, StringRef Str,
+ const ArgList &Args) {
const llvm::Triple::ArchType Arch = getArchTypeForMachOArchName(Str);
llvm::ARM::ArchKind ArchKind = llvm::ARM::parseArch(Str);
T.setArch(Arch);
@@ -84,6 +83,17 @@ void darwin::setTripleTypeForMachOArchName(llvm::Triple &T, StringRef Str) {
if (ArchKind == llvm::ARM::ArchKind::ARMV6M ||
ArchKind == llvm::ARM::ArchKind::ARMV7M ||
ArchKind == llvm::ARM::ArchKind::ARMV7EM) {
+ // Don't reject these -version-min= if we have the appropriate triple.
+ if (T.getOS() == llvm::Triple::IOS)
+ for (Arg *A : Args.filtered(options::OPT_mios_version_min_EQ))
+ A->ignoreTargetSpecific();
+ if (T.getOS() == llvm::Triple::WatchOS)
+ for (Arg *A : Args.filtered(options::OPT_mwatchos_version_min_EQ))
+ A->ignoreTargetSpecific();
+ if (T.getOS() == llvm::Triple::TvOS)
+ for (Arg *A : Args.filtered(options::OPT_mtvos_version_min_EQ))
+ A->ignoreTargetSpecific();
+
T.setOS(llvm::Triple::UnknownOS);
T.setObjectFormat(llvm::Triple::MachO);
}
@@ -94,6 +104,8 @@ void darwin::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfoList &Inputs,
const ArgList &Args,
const char *LinkingOutput) const {
+ const llvm::Triple &T(getToolChain().getTriple());
+
ArgStringList CmdArgs;
assert(Inputs.size() == 1 && "Unexpected number of inputs.");
@@ -112,7 +124,6 @@ void darwin::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
// FIXME: at run-time detect assembler capabilities or rely on version
// information forwarded by -target-assembler-version.
if (Args.hasArg(options::OPT_fno_integrated_as)) {
- const llvm::Triple &T(getToolChain().getTriple());
if (!(T.isMacOSX() && T.isMacOSXVersionLT(10, 7)))
CmdArgs.push_back("-Q");
}
@@ -130,8 +141,7 @@ void darwin::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
AddMachOArch(Args, CmdArgs);
// Use -force_cpusubtype_ALL on x86 by default.
- if (getToolChain().getTriple().isX86() ||
- Args.hasArg(options::OPT_force__cpusubtype__ALL))
+ if (T.isX86() || Args.hasArg(options::OPT_force__cpusubtype__ALL))
CmdArgs.push_back("-force_cpusubtype_ALL");
if (getToolChain().getArch() != llvm::Triple::x86_64 &&
@@ -209,20 +219,19 @@ static bool shouldLinkerNotDedup(bool IsLinkerOnlyAction, const ArgList &Args) {
void darwin::Linker::AddLinkArgs(Compilation &C, const ArgList &Args,
ArgStringList &CmdArgs,
const InputInfoList &Inputs,
- unsigned Version[5], bool LinkerIsLLD,
- bool LinkerIsLLDDarwinNew) const {
+ VersionTuple Version, bool LinkerIsLLD,
+ bool UsePlatformVersion) const {
const Driver &D = getToolChain().getDriver();
const toolchains::MachO &MachOTC = getMachOToolChain();
// Newer linkers support -demangle. Pass it if supported and not disabled by
// the user.
- if ((Version[0] >= 100 || LinkerIsLLD) &&
+ if ((Version >= VersionTuple(100) || LinkerIsLLD) &&
!Args.hasArg(options::OPT_Z_Xlinker__no_demangle))
CmdArgs.push_back("-demangle");
- // FIXME: Pass most of the flags below that check Version if LinkerIsLLD too.
-
- if (Args.hasArg(options::OPT_rdynamic) && Version[0] >= 137)
+ if (Args.hasArg(options::OPT_rdynamic) &&
+ (Version >= VersionTuple(137) || LinkerIsLLD))
CmdArgs.push_back("-export_dynamic");
// If we are using App Extension restrictions, pass a flag to the linker
@@ -231,7 +240,8 @@ void darwin::Linker::AddLinkArgs(Compilation &C, const ArgList &Args,
options::OPT_fno_application_extension, false))
CmdArgs.push_back("-application_extension");
- if (D.isUsingLTO() && Version[0] >= 116 && NeedsTempPath(Inputs)) {
+ if (D.isUsingLTO() && (Version >= VersionTuple(116) || LinkerIsLLD) &&
+ NeedsTempPath(Inputs)) {
std::string TmpPathName;
if (D.getLTOMode() == LTOK_Full) {
// If we are using full LTO, then automatically create a temporary file
@@ -260,7 +270,7 @@ void darwin::Linker::AddLinkArgs(Compilation &C, const ArgList &Args,
// clang version won't work anyways.
// lld is built at the same revision as clang and statically links in
// LLVM libraries, so it doesn't need libLTO.dylib.
- if (Version[0] >= 133 && !LinkerIsLLD) {
+ if (Version >= VersionTuple(133) && !LinkerIsLLD) {
// Search for libLTO in <InstalledDir>/../lib/libLTO.dylib
StringRef P = llvm::sys::path::parent_path(D.Dir);
SmallString<128> LibLTOPath(P);
@@ -270,8 +280,11 @@ void darwin::Linker::AddLinkArgs(Compilation &C, const ArgList &Args,
CmdArgs.push_back(C.getArgs().MakeArgString(LibLTOPath));
}
- // ld64 version 262 and above run the deduplicate pass by default.
- if (Version[0] >= 262 && shouldLinkerNotDedup(C.getJobs().empty(), Args))
+ // ld64 version 262 and above runs the deduplicate pass by default.
+ // FIXME: lld doesn't dedup by default. Should we pass `--icf=safe`
+ // if `!shouldLinkerNotDedup()` if LinkerIsLLD here?
+ if (Version >= VersionTuple(262) &&
+ shouldLinkerNotDedup(C.getJobs().empty(), Args))
CmdArgs.push_back("-no_deduplicate");
// Derived from the "link" spec.
@@ -343,7 +356,7 @@ void darwin::Linker::AddLinkArgs(Compilation &C, const ArgList &Args,
Args.AddAllArgs(CmdArgs, options::OPT_init);
// Add the deployment target.
- if (Version[0] >= 520 || LinkerIsLLDDarwinNew)
+ if (Version >= VersionTuple(520) || LinkerIsLLD || UsePlatformVersion)
MachOTC.addPlatformVersionArgs(Args, CmdArgs);
else
MachOTC.addMinVersionArgs(Args, CmdArgs);
@@ -369,7 +382,9 @@ void darwin::Linker::AddLinkArgs(Compilation &C, const ArgList &Args,
// Check if the toolchain supports bitcode build flow.
if (MachOTC.SupportsEmbeddedBitcode()) {
CmdArgs.push_back("-bitcode_bundle");
- if (C.getDriver().embedBitcodeMarkerOnly() && Version[0] >= 278) {
+ // FIXME: Pass this if LinkerIsLLD too, once it implements this flag.
+ if (C.getDriver().embedBitcodeMarkerOnly() &&
+ Version >= VersionTuple(278)) {
CmdArgs.push_back("-bitcode_process_mode");
CmdArgs.push_back("marker");
}
@@ -389,6 +404,13 @@ void darwin::Linker::AddLinkArgs(Compilation &C, const ArgList &Args,
}
}
+ if (Args.hasArg(options::OPT_mkernel) ||
+ Args.hasArg(options::OPT_fapple_kext) ||
+ Args.hasArg(options::OPT_ffreestanding)) {
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back("-disable-atexit-based-global-dtor-lowering");
+ }
+
Args.AddLastArg(CmdArgs, options::OPT_prebind);
Args.AddLastArg(CmdArgs, options::OPT_noprebind);
Args.AddLastArg(CmdArgs, options::OPT_nofixprebinding);
@@ -438,6 +460,23 @@ void darwin::Linker::AddLinkArgs(Compilation &C, const ArgList &Args,
Args.AddAllArgs(CmdArgs, options::OPT_dylinker__install__name);
Args.AddLastArg(CmdArgs, options::OPT_dylinker);
Args.AddLastArg(CmdArgs, options::OPT_Mach);
+
+ if (LinkerIsLLD) {
+ if (auto *CSPGOGenerateArg = getLastCSProfileGenerateArg(Args)) {
+ SmallString<128> Path(CSPGOGenerateArg->getNumValues() == 0
+ ? ""
+ : CSPGOGenerateArg->getValue());
+ llvm::sys::path::append(Path, "default_%m.profraw");
+ CmdArgs.push_back("--cs-profile-generate");
+ CmdArgs.push_back(Args.MakeArgString(Twine("--cs-profile-path=") + Path));
+ } else if (auto *ProfileUseArg = getLastProfileUseArg(Args)) {
+ SmallString<128> Path(
+ ProfileUseArg->getNumValues() == 0 ? "" : ProfileUseArg->getValue());
+ if (Path.empty() || llvm::sys::fs::is_directory(Path))
+ llvm::sys::path::append(Path, "default.profdata");
+ CmdArgs.push_back(Args.MakeArgString(Twine("--cs-profile-path=") + Path));
+ }
+ }
}
/// Determine whether we are linking the ObjC runtime.
@@ -518,6 +557,8 @@ static void renderRemarksOptions(const ArgList &Args, ArgStringList &CmdArgs,
}
}
+static void AppendPlatformPrefix(SmallString<128> &Path, const llvm::Triple &T);
+
void darwin::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
const InputInfoList &Inputs,
@@ -544,26 +585,25 @@ void darwin::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const char *Exec =
Args.MakeArgString(getToolChain().GetProgramPath("touch"));
CmdArgs.push_back(Output.getFilename());
- C.addCommand(std::make_unique<Command>(
- JA, *this, ResponseFileSupport::None(), Exec, CmdArgs, None, Output));
+ C.addCommand(std::make_unique<Command>(JA, *this,
+ ResponseFileSupport::None(), Exec,
+ CmdArgs, std::nullopt, Output));
return;
}
- unsigned Version[5] = {0, 0, 0, 0, 0};
- if (Arg *A = Args.getLastArg(options::OPT_mlinker_version_EQ)) {
- if (!Driver::GetReleaseVersion(A->getValue(), Version))
- getToolChain().getDriver().Diag(diag::err_drv_invalid_version_number)
- << A->getAsString(Args);
- }
+ VersionTuple Version = getMachOToolChain().getLinkerVersion(Args);
- bool LinkerIsLLD, LinkerIsLLDDarwinNew;
- const char *Exec = Args.MakeArgString(
- getToolChain().GetLinkerPath(&LinkerIsLLD, &LinkerIsLLDDarwinNew));
+ bool LinkerIsLLD;
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetLinkerPath(&LinkerIsLLD));
+
+ // xrOS always uses -platform-version.
+ bool UsePlatformVersion = getToolChain().getTriple().isXROS();
// I'm not sure why this particular decomposition exists in gcc, but
// we follow suite for ease of comparison.
AddLinkArgs(C, Args, CmdArgs, Inputs, Version, LinkerIsLLD,
- LinkerIsLLDDarwinNew);
+ UsePlatformVersion);
if (willEmitRemarks(Args) &&
checkRemarksOptions(getToolChain().getDriver(), Args,
@@ -577,10 +617,6 @@ void darwin::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (getMachOToolChain().getMachOArchName(Args) == "arm64") {
CmdArgs.push_back("-mllvm");
CmdArgs.push_back("-enable-machine-outliner");
-
- // Outline from linkonceodr functions by default in LTO.
- CmdArgs.push_back("-mllvm");
- CmdArgs.push_back("-enable-linkonceodr-outlining");
}
} else {
// Disable all outlining behaviour if we have mno-outline. We need to do
@@ -591,6 +627,12 @@ void darwin::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
}
+ // Outline from linkonceodr functions by default in LTO, whenever the outliner
+ // is enabled. Note that the target may enable the machine outliner
+ // independently of -moutline.
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back("-enable-linkonceodr-outlining");
+
// Setup statistics file output.
SmallString<128> StatsFile =
getStatsFileName(Args, Output, Inputs[0], getToolChain().getDriver());
@@ -601,9 +643,9 @@ void darwin::Linker::ConstructJob(Compilation &C, const JobAction &JA,
// It seems that the 'e' option is completely ignored for dynamic executables
// (the default), and with static executables, the last one wins, as expected.
- Args.AddAllArgs(CmdArgs, {options::OPT_d_Flag, options::OPT_s, options::OPT_t,
- options::OPT_Z_Flag, options::OPT_u_Group,
- options::OPT_e, options::OPT_r});
+ Args.addAllArgs(CmdArgs,
+ {options::OPT_d_Flag, options::OPT_s, options::OPT_t,
+ options::OPT_Z_Flag, options::OPT_u_Group, options::OPT_r});
// Forward -ObjC when either -ObjC or -ObjC++ is used, to force loading
// members of static archive libraries which implement Objective-C classes or
@@ -637,6 +679,13 @@ void darwin::Linker::ConstructJob(Compilation &C, const JobAction &JA,
InputFileList.push_back(II.getFilename());
}
+ // Additional linker set-up and flags for Fortran. This is required in order
+ // to generate executables.
+ if (getToolChain().getDriver().IsFlangMode()) {
+ addFortranRuntimeLibraryPath(getToolChain(), Args, CmdArgs);
+ addFortranRuntimeLibs(getToolChain(), Args, CmdArgs);
+ }
+
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs))
addOpenMPRuntime(CmdArgs, getToolChain(), Args);
@@ -714,8 +763,37 @@ void darwin::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
}
+ // Add non-standard, platform-specific search paths, e.g., for DriverKit:
+ // -L<sysroot>/System/DriverKit/usr/lib
+ // -F<sysroot>/System/DriverKit/System/Library/Framework
+ {
+ bool NonStandardSearchPath = false;
+ const auto &Triple = getToolChain().getTriple();
+ if (Triple.isDriverKit()) {
+ // ld64 fixed the implicit -F and -L paths in ld64-605.1+.
+ NonStandardSearchPath =
+ Version.getMajor() < 605 ||
+ (Version.getMajor() == 605 && Version.getMinor().value_or(0) < 1);
+ }
+
+ if (NonStandardSearchPath) {
+ if (auto *Sysroot = Args.getLastArg(options::OPT_isysroot)) {
+ auto AddSearchPath = [&](StringRef Flag, StringRef SearchPath) {
+ SmallString<128> P(Sysroot->getValue());
+ AppendPlatformPrefix(P, Triple);
+ llvm::sys::path::append(P, SearchPath);
+ if (getToolChain().getVFS().exists(P)) {
+ CmdArgs.push_back(Args.MakeArgString(Flag + P));
+ }
+ };
+ AddSearchPath("-L", "/usr/lib");
+ AddSearchPath("-F", "/System/Library/Frameworks");
+ }
+ }
+ }
+
ResponseFileSupport ResponseSupport;
- if (Version[0] >= 705 || LinkerIsLLDDarwinNew) {
+ if (Version >= VersionTuple(705) || LinkerIsLLD) {
ResponseSupport = ResponseFileSupport::AtFileUTF8();
} else {
// For older versions of the linker, use the legacy filelist method instead.
@@ -729,6 +807,54 @@ void darwin::Linker::ConstructJob(Compilation &C, const JobAction &JA,
C.addCommand(std::move(Cmd));
}
+void darwin::StaticLibTool::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ const Driver &D = getToolChain().getDriver();
+
+ // Silence warning for "clang -g foo.o -o foo"
+ Args.ClaimAllArgs(options::OPT_g_Group);
+ // and "clang -emit-llvm foo.o -o foo"
+ Args.ClaimAllArgs(options::OPT_emit_llvm);
+ // and for "clang -w foo.o -o foo". Other warning options are already
+ // handled somewhere else.
+ Args.ClaimAllArgs(options::OPT_w);
+ // Silence warnings when linking C code with a C++ '-stdlib' argument.
+ Args.ClaimAllArgs(options::OPT_stdlib_EQ);
+
+ // libtool <options> <output_file> <input_files>
+ ArgStringList CmdArgs;
+ // Create and insert file members with a deterministic index.
+ CmdArgs.push_back("-static");
+ CmdArgs.push_back("-D");
+ CmdArgs.push_back("-no_warning_for_no_symbols");
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+
+ for (const auto &II : Inputs) {
+ if (II.isFilename()) {
+ CmdArgs.push_back(II.getFilename());
+ }
+ }
+
+ // Delete old output archive file if it already exists before generating a new
+ // archive file.
+ const auto *OutputFileName = Output.getFilename();
+ if (Output.isFilename() && llvm::sys::fs::exists(OutputFileName)) {
+ if (std::error_code EC = llvm::sys::fs::remove(OutputFileName)) {
+ D.Diag(diag::err_drv_unable_to_remove_file) << EC.message();
+ return;
+ }
+ }
+
+ const char *Exec = Args.MakeArgString(getToolChain().GetStaticLibToolPath());
+ C.addCommand(std::make_unique<Command>(JA, *this,
+ ResponseFileSupport::AtFileUTF8(),
+ Exec, CmdArgs, Inputs, Output));
+}
+
void darwin::Lipo::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
const InputInfoList &Inputs,
@@ -823,13 +949,8 @@ types::ID MachO::LookupTypeForExtension(StringRef Ext) const {
bool MachO::HasNativeLLVMSupport() const { return true; }
ToolChain::CXXStdlibType Darwin::GetDefaultCXXStdlibType() const {
- // Default to use libc++ on OS X 10.9+ and iOS 7+.
- if ((isTargetMacOSBased() && !isMacosxVersionLT(10, 9)) ||
- (isTargetIOSBased() && !isIPhoneOSVersionLT(7, 0)) ||
- isTargetWatchOSBased())
- return ToolChain::CST_Libcxx;
-
- return ToolChain::CST_Libstdcxx;
+ // Always use libc++ by default
+ return ToolChain::CST_Libcxx;
}
/// Darwin provides an ARC runtime starting in MacOS X 10.7 and iOS 5.0.
@@ -838,6 +959,13 @@ ObjCRuntime Darwin::getDefaultObjCRuntime(bool isNonFragile) const {
return ObjCRuntime(ObjCRuntime::WatchOS, TargetVersion);
if (isTargetIOSBased())
return ObjCRuntime(ObjCRuntime::iOS, TargetVersion);
+ if (isTargetXROS()) {
+ // XROS uses the iOS runtime.
+ auto T = llvm::Triple(Twine("arm64-apple-") +
+ llvm::Triple::getOSTypeName(llvm::Triple::XROS) +
+ TargetVersion.getAsString());
+ return ObjCRuntime(ObjCRuntime::iOS, T.getiOSVersion());
+ }
if (isNonFragile)
return ObjCRuntime(ObjCRuntime::MacOSX, TargetVersion);
return ObjCRuntime(ObjCRuntime::FragileMacOSX, TargetVersion);
@@ -845,7 +973,7 @@ ObjCRuntime Darwin::getDefaultObjCRuntime(bool isNonFragile) const {
/// Darwin provides a blocks runtime starting in MacOS X 10.6 and iOS 3.2.
bool Darwin::hasBlocksRuntime() const {
- if (isTargetWatchOSBased())
+ if (isTargetWatchOSBased() || isTargetDriverKit() || isTargetXROS())
return true;
else if (isTargetIOSBased())
return !isIPhoneOSVersionLT(3, 2);
@@ -857,12 +985,12 @@ bool Darwin::hasBlocksRuntime() const {
void Darwin::AddCudaIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
- CudaInstallation.AddCudaIncludeArgs(DriverArgs, CC1Args);
+ CudaInstallation->AddCudaIncludeArgs(DriverArgs, CC1Args);
}
void Darwin::AddHIPIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
- RocmInstallation.AddHIPIncludeArgs(DriverArgs, CC1Args);
+ RocmInstallation->AddHIPIncludeArgs(DriverArgs, CC1Args);
}
// This is just a MachO name translation routine and there's no
@@ -894,13 +1022,13 @@ static const char *ArmMachOArchNameCPU(StringRef CPU) {
// FIXME: Make sure this MachO triple mangling is really necessary.
// ARMv5* normalises to ARMv5.
- if (Arch.startswith("armv5"))
+ if (Arch.starts_with("armv5"))
Arch = Arch.substr(0, 5);
// ARMv6*, except ARMv6M, normalises to ARMv6.
- else if (Arch.startswith("armv6") && !Arch.endswith("6m"))
+ else if (Arch.starts_with("armv6") && !Arch.ends_with("6m"))
Arch = Arch.substr(0, 5);
// ARMv7A normalises to ARMv7.
- else if (Arch.endswith("v7a"))
+ else if (Arch.ends_with("v7a"))
Arch = Arch.substr(0, 5);
return Arch.data();
}
@@ -933,6 +1061,27 @@ StringRef MachO::getMachOArchName(const ArgList &Args) const {
}
}
+VersionTuple MachO::getLinkerVersion(const llvm::opt::ArgList &Args) const {
+ if (LinkerVersion) {
+#ifndef NDEBUG
+ VersionTuple NewLinkerVersion;
+ if (Arg *A = Args.getLastArg(options::OPT_mlinker_version_EQ))
+ (void)NewLinkerVersion.tryParse(A->getValue());
+ assert(NewLinkerVersion == LinkerVersion);
+#endif
+ return *LinkerVersion;
+ }
+
+ VersionTuple NewLinkerVersion;
+ if (Arg *A = Args.getLastArg(options::OPT_mlinker_version_EQ))
+ if (NewLinkerVersion.tryParse(A->getValue()))
+ getDriver().Diag(diag::err_drv_invalid_version_number)
+ << A->getAsString(Args);
+
+ LinkerVersion = NewLinkerVersion;
+ return *LinkerVersion;
+}
+
Darwin::~Darwin() {}
MachO::~MachO() {}
@@ -951,8 +1100,12 @@ std::string Darwin::ComputeEffectiveClangTriple(const ArgList &Args,
Str += "watchos";
else if (isTargetTvOSBased())
Str += "tvos";
+ else if (isTargetDriverKit())
+ Str += "driverkit";
else if (isTargetIOSBased() || isTargetMacCatalyst())
Str += "ios";
+ else if (isTargetXROS())
+ Str += llvm::Triple::getOSTypeName(llvm::Triple::XROS);
else
Str += "macosx";
Str += getTripleTargetVersion().getAsString();
@@ -982,6 +1135,10 @@ Tool *MachO::getTool(Action::ActionClass AC) const {
Tool *MachO::buildLinker() const { return new tools::darwin::Linker(*this); }
+Tool *MachO::buildStaticLibTool() const {
+ return new tools::darwin::StaticLibTool(*this);
+}
+
Tool *MachO::buildAssembler() const {
return new tools::darwin::Assembler(*this);
}
@@ -1031,6 +1188,8 @@ void DarwinClang::AddLinkARCArgs(const ArgList &Args,
// ARC runtime is supported everywhere on arm64e.
if (getTriple().isArm64e())
return;
+ if (isTargetXROS())
+ return;
ObjCRuntime runtime = getDefaultObjCRuntime(/*nonfragile*/ true);
@@ -1041,25 +1200,38 @@ void DarwinClang::AddLinkARCArgs(const ArgList &Args,
SmallString<128> P(getDriver().ClangExecutable);
llvm::sys::path::remove_filename(P); // 'clang'
llvm::sys::path::remove_filename(P); // 'bin'
+ llvm::sys::path::append(P, "lib", "arc");
// 'libarclite' usually lives in the same toolchain as 'clang'. However, the
// Swift open source toolchains for macOS distribute Clang without libarclite.
// In that case, to allow the linker to find 'libarclite', we point to the
// 'libarclite' in the XcodeDefault toolchain instead.
- if (getXcodeDeveloperPath(P).empty()) {
- if (const Arg *A = Args.getLastArg(options::OPT_isysroot)) {
+ if (!getVFS().exists(P)) {
+ auto updatePath = [&](const Arg *A) {
// Try to infer the path to 'libarclite' in the toolchain from the
// specified SDK path.
StringRef XcodePathForSDK = getXcodeDeveloperPath(A->getValue());
- if (!XcodePathForSDK.empty()) {
- P = XcodePathForSDK;
- llvm::sys::path::append(P, "Toolchains/XcodeDefault.xctoolchain/usr");
- }
+ if (XcodePathForSDK.empty())
+ return false;
+
+ P = XcodePathForSDK;
+ llvm::sys::path::append(P, "Toolchains/XcodeDefault.xctoolchain/usr",
+ "lib", "arc");
+ return getVFS().exists(P);
+ };
+
+ bool updated = false;
+ if (const Arg *A = Args.getLastArg(options::OPT_isysroot))
+ updated = updatePath(A);
+
+ if (!updated) {
+ if (const Arg *A = Args.getLastArg(options::OPT__sysroot_EQ))
+ updatePath(A);
}
}
CmdArgs.push_back("-force_load");
- llvm::sys::path::append(P, "lib", "arc", "libarclite_");
+ llvm::sys::path::append(P, "libarclite_");
// Mash in the platform.
if (isTargetWatchOSSimulator())
P += "watchsimulator";
@@ -1077,6 +1249,9 @@ void DarwinClang::AddLinkARCArgs(const ArgList &Args,
P += "macosx";
P += ".a";
+ if (!getVFS().exists(P))
+ getDriver().Diag(clang::diag::err_drv_darwin_sdk_missing_arclite) << P;
+
CmdArgs.push_back(Args.MakeArgString(P));
}
@@ -1102,8 +1277,9 @@ void MachO::AddLinkRuntimeLib(const ArgList &Args, ArgStringList &CmdArgs,
DarwinLibName += getOSLibraryNameSuffix();
DarwinLibName += IsShared ? "_dynamic.dylib" : ".a";
SmallString<128> Dir(getDriver().ResourceDir);
- llvm::sys::path::append(
- Dir, "lib", (Opts & RLO_IsEmbedded) ? "macho_embedded" : "darwin");
+ llvm::sys::path::append(Dir, "lib", "darwin");
+ if (Opts & RLO_IsEmbedded)
+ llvm::sys::path::append(Dir, "macho_embedded");
SmallString<128> P(Dir);
llvm::sys::path::append(P, DarwinLibName);
@@ -1121,7 +1297,7 @@ void MachO::AddLinkRuntimeLib(const ArgList &Args, ArgStringList &CmdArgs,
// rpaths. This is currently true from this place, but we need to be
// careful if this function is ever called before user's rpaths are emitted.
if (Opts & RLO_AddRPath) {
- assert(DarwinLibName.endswith(".dylib") && "must be a dynamic library");
+ assert(DarwinLibName.ends_with(".dylib") && "must be a dynamic library");
// Add @executable_path to rpath to support having the dylib copied with
// the executable.
@@ -1147,6 +1323,10 @@ StringRef Darwin::getPlatformFamily() const {
return "AppleTV";
case DarwinPlatformKind::WatchOS:
return "Watch";
+ case DarwinPlatformKind::DriverKit:
+ return "DriverKit";
+ case DarwinPlatformKind::XROS:
+ return "XR";
}
llvm_unreachable("Unsupported platform");
}
@@ -1157,8 +1337,8 @@ StringRef Darwin::getSDKName(StringRef isysroot) {
auto EndSDK = llvm::sys::path::rend(isysroot);
for (auto IT = BeginSDK; IT != EndSDK; ++IT) {
StringRef SDK = *IT;
- if (SDK.endswith(".sdk"))
- return SDK.slice(0, SDK.size() - 4);
+ if (SDK.ends_with(".sdk"))
+ return SDK.slice(0, SDK.size() - 4);
}
return "";
}
@@ -1178,6 +1358,11 @@ StringRef Darwin::getOSLibraryNameSuffix(bool IgnoreSim) const {
case DarwinPlatformKind::WatchOS:
return TargetEnvironment == NativeEnvironment || IgnoreSim ? "watchos"
: "watchossim";
+ case DarwinPlatformKind::XROS:
+ return TargetEnvironment == NativeEnvironment || IgnoreSim ? "xros"
+ : "xrossim";
+ case DarwinPlatformKind::DriverKit:
+ return "driverkit";
}
llvm_unreachable("Unsupported platform");
}
@@ -1229,20 +1414,14 @@ void Darwin::addProfileRTLibs(const ArgList &Args,
// If we have a symbol export directive and we're linking in the profile
// runtime, automatically export symbols necessary to implement some of the
// runtime's functionality.
- if (hasExportSymbolDirective(Args)) {
- if (ForGCOV) {
- addExportedSymbol(CmdArgs, "___gcov_dump");
- addExportedSymbol(CmdArgs, "___gcov_reset");
- addExportedSymbol(CmdArgs, "_writeout_fn_list");
- addExportedSymbol(CmdArgs, "_reset_fn_list");
- } else {
- addExportedSymbol(CmdArgs, "___llvm_profile_filename");
- addExportedSymbol(CmdArgs, "___llvm_profile_raw_version");
- }
- addExportedSymbol(CmdArgs, "_lprofDirMode");
+ if (hasExportSymbolDirective(Args) && ForGCOV) {
+ addExportedSymbol(CmdArgs, "___gcov_dump");
+ addExportedSymbol(CmdArgs, "___gcov_reset");
+ addExportedSymbol(CmdArgs, "_writeout_fn_list");
+ addExportedSymbol(CmdArgs, "_reset_fn_list");
}
- // Align __llvm_prf_{cnts,data} sections to the maximum expected page
+ // Align __llvm_prf_{cnts,bits,data} sections to the maximum expected page
// alignment. This allows profile counters to be mmap()'d to disk. Note that
// it's not enough to just page-align __llvm_prf_cnts: the following section
// must also be page-aligned so that its data is not clobbered by mmap().
@@ -1252,7 +1431,7 @@ void Darwin::addProfileRTLibs(const ArgList &Args,
// extra alignment also allows the same binary to be used with/without sync
// enabled.
if (!ForGCOV) {
- for (auto IPSK : {llvm::IPSK_cnts, llvm::IPSK_data}) {
+ for (auto IPSK : {llvm::IPSK_cnts, llvm::IPSK_bitmap, llvm::IPSK_data}) {
addSectalignToPage(
Args, CmdArgs, "__DATA",
llvm::getInstrProfSectionName(IPSK, llvm::Triple::MachO,
@@ -1273,7 +1452,7 @@ ToolChain::RuntimeLibType DarwinClang::GetRuntimeLibType(
const ArgList &Args) const {
if (Arg* A = Args.getLastArg(options::OPT_rtlib_EQ)) {
StringRef Value = A->getValue();
- if (Value != "compiler-rt")
+ if (Value != "compiler-rt" && Value != "platform")
getDriver().Diag(clang::diag::err_drv_unsupported_rtlib_for_platform)
<< Value << "darwin";
}
@@ -1305,27 +1484,58 @@ void DarwinClang::AddLinkRuntimeLibArgs(const ArgList &Args,
return;
}
- const SanitizerArgs &Sanitize = getSanitizerArgs();
- if (Sanitize.needsAsanRt())
- AddLinkSanitizerLibArgs(Args, CmdArgs, "asan");
- if (Sanitize.needsLsanRt())
- AddLinkSanitizerLibArgs(Args, CmdArgs, "lsan");
- if (Sanitize.needsUbsanRt())
- AddLinkSanitizerLibArgs(Args, CmdArgs,
- Sanitize.requiresMinimalRuntime() ? "ubsan_minimal"
- : "ubsan",
- Sanitize.needsSharedRt());
- if (Sanitize.needsTsanRt())
- AddLinkSanitizerLibArgs(Args, CmdArgs, "tsan");
- if (Sanitize.needsFuzzer() && !Args.hasArg(options::OPT_dynamiclib)) {
- AddLinkSanitizerLibArgs(Args, CmdArgs, "fuzzer", /*shared=*/false);
+ const SanitizerArgs &Sanitize = getSanitizerArgs(Args);
- // Libfuzzer is written in C++ and requires libcxx.
- AddCXXStdlibLibArgs(Args, CmdArgs);
+ if (!Sanitize.needsSharedRt()) {
+ const char *sanitizer = nullptr;
+ if (Sanitize.needsUbsanRt()) {
+ sanitizer = "UndefinedBehaviorSanitizer";
+ } else if (Sanitize.needsAsanRt()) {
+ sanitizer = "AddressSanitizer";
+ } else if (Sanitize.needsTsanRt()) {
+ sanitizer = "ThreadSanitizer";
+ }
+ if (sanitizer) {
+ getDriver().Diag(diag::err_drv_unsupported_static_sanitizer_darwin)
+ << sanitizer;
+ return;
+ }
}
- if (Sanitize.needsStatsRt()) {
- AddLinkRuntimeLib(Args, CmdArgs, "stats_client", RLO_AlwaysLink);
- AddLinkSanitizerLibArgs(Args, CmdArgs, "stats");
+
+ if (Sanitize.linkRuntimes()) {
+ if (Sanitize.needsAsanRt()) {
+ if (Sanitize.needsStableAbi()) {
+ AddLinkSanitizerLibArgs(Args, CmdArgs, "asan_abi", /*shared=*/false);
+ } else {
+ assert(Sanitize.needsSharedRt() &&
+ "Static sanitizer runtimes not supported");
+ AddLinkSanitizerLibArgs(Args, CmdArgs, "asan");
+ }
+ }
+ if (Sanitize.needsLsanRt())
+ AddLinkSanitizerLibArgs(Args, CmdArgs, "lsan");
+ if (Sanitize.needsUbsanRt()) {
+ assert(Sanitize.needsSharedRt() &&
+ "Static sanitizer runtimes not supported");
+ AddLinkSanitizerLibArgs(
+ Args, CmdArgs,
+ Sanitize.requiresMinimalRuntime() ? "ubsan_minimal" : "ubsan");
+ }
+ if (Sanitize.needsTsanRt()) {
+ assert(Sanitize.needsSharedRt() &&
+ "Static sanitizer runtimes not supported");
+ AddLinkSanitizerLibArgs(Args, CmdArgs, "tsan");
+ }
+ if (Sanitize.needsFuzzer() && !Args.hasArg(options::OPT_dynamiclib)) {
+ AddLinkSanitizerLibArgs(Args, CmdArgs, "fuzzer", /*shared=*/false);
+
+ // Libfuzzer is written in C++ and requires libcxx.
+ AddCXXStdlibLibArgs(Args, CmdArgs);
+ }
+ if (Sanitize.needsStatsRt()) {
+ AddLinkRuntimeLib(Args, CmdArgs, "stats_client", RLO_AlwaysLink);
+ AddLinkSanitizerLibArgs(Args, CmdArgs, "stats");
+ }
}
const XRayArgs &XRay = getXRayArgs();
@@ -1335,9 +1545,15 @@ void DarwinClang::AddLinkRuntimeLibArgs(const ArgList &Args,
AddLinkRuntimeLib(Args, CmdArgs, "xray-fdr");
}
+ if (isTargetDriverKit() && !Args.hasArg(options::OPT_nodriverkitlib)) {
+ CmdArgs.push_back("-framework");
+ CmdArgs.push_back("DriverKit");
+ }
+
// Otherwise link libSystem, then the dynamic runtime library, and finally any
// target specific static runtime library.
- CmdArgs.push_back("-lSystem");
+ if (!isTargetDriverKit())
+ CmdArgs.push_back("-lSystem");
// Select the dynamic runtime library and the target specific static library.
if (isTargetIOSBased()) {
@@ -1356,17 +1572,19 @@ void DarwinClang::AddLinkRuntimeLibArgs(const ArgList &Args,
/// If the macOS SDK version is the same or earlier than the system version,
/// then the SDK version is returned. Otherwise the system version is returned.
static std::string getSystemOrSDKMacOSVersion(StringRef MacOSSDKVersion) {
- unsigned Major, Minor, Micro;
llvm::Triple SystemTriple(llvm::sys::getProcessTriple());
if (!SystemTriple.isMacOSX())
return std::string(MacOSSDKVersion);
- SystemTriple.getMacOSXVersion(Major, Minor, Micro);
- VersionTuple SystemVersion(Major, Minor, Micro);
+ VersionTuple SystemVersion;
+ SystemTriple.getMacOSXVersion(SystemVersion);
+
+ unsigned Major, Minor, Micro;
bool HadExtra;
if (!Driver::GetReleaseVersion(MacOSSDKVersion, Major, Minor, Micro,
HadExtra))
return std::string(MacOSSDKVersion);
VersionTuple SDKVersion(Major, Minor, Micro);
+
if (SDKVersion > SystemVersion)
return SystemVersion.getAsString();
return std::string(MacOSSDKVersion);
@@ -1379,6 +1597,8 @@ struct DarwinPlatform {
enum SourceKind {
/// The OS was specified using the -target argument.
TargetArg,
+ /// The OS was specified using the -mtargetos= argument.
+ MTargetOSArg,
/// The OS was specified using the -m<os>-version-min argument.
OSVersionArg,
/// The OS was specified using the OS_DEPLOYMENT_TARGET environment.
@@ -1426,18 +1646,23 @@ struct DarwinPlatform {
/// Returns true if the simulator environment can be inferred from the arch.
bool canInferSimulatorFromArch() const { return InferSimulatorFromArch; }
+ const std::optional<llvm::Triple> &getTargetVariantTriple() const {
+ return TargetVariantTriple;
+ }
+
/// Adds the -m<os>-version-min argument to the compiler invocation.
void addOSVersionMinArgument(DerivedArgList &Args, const OptTable &Opts) {
if (Argument)
return;
- assert(Kind != TargetArg && Kind != OSVersionArg && "Invalid kind");
+ assert(Kind != TargetArg && Kind != MTargetOSArg && Kind != OSVersionArg &&
+ "Invalid kind");
options::ID Opt;
switch (Platform) {
case DarwinPlatformKind::MacOS:
- Opt = options::OPT_mmacosx_version_min_EQ;
+ Opt = options::OPT_mmacos_version_min_EQ;
break;
case DarwinPlatformKind::IPhoneOS:
- Opt = options::OPT_miphoneos_version_min_EQ;
+ Opt = options::OPT_mios_version_min_EQ;
break;
case DarwinPlatformKind::TvOS:
Opt = options::OPT_mtvos_version_min_EQ;
@@ -1445,6 +1670,12 @@ struct DarwinPlatform {
case DarwinPlatformKind::WatchOS:
Opt = options::OPT_mwatchos_version_min_EQ;
break;
+ case DarwinPlatformKind::XROS:
+ // xrOS always explicitly provides a version in the triple.
+ return;
+ case DarwinPlatformKind::DriverKit:
+ // DriverKit always explicitly provides a version in the triple.
+ return;
}
Argument = Args.MakeJoinedArg(nullptr, Opts.getOption(Opt), OSVersion);
Args.append(Argument);
@@ -1455,6 +1686,7 @@ struct DarwinPlatform {
std::string getAsString(DerivedArgList &Args, const OptTable &Opts) {
switch (Kind) {
case TargetArg:
+ case MTargetOSArg:
case OSVersionArg:
case InferredFromSDK:
case InferredFromArch:
@@ -1466,45 +1698,72 @@ struct DarwinPlatform {
llvm_unreachable("Unsupported Darwin Source Kind");
}
- static DarwinPlatform
- createFromTarget(const llvm::Triple &TT, StringRef OSVersion, Arg *A,
- const Optional<DarwinSDKInfo> &SDKInfo) {
- DarwinPlatform Result(TargetArg, getPlatformFromOS(TT.getOS()), OSVersion,
- A);
- unsigned Major, Minor, Micro;
- TT.getOSVersion(Major, Minor, Micro);
- if (Major == 0)
- Result.HasOSVersion = false;
-
- switch (TT.getEnvironment()) {
+ void setEnvironment(llvm::Triple::EnvironmentType EnvType,
+ const VersionTuple &OSVersion,
+ const std::optional<DarwinSDKInfo> &SDKInfo) {
+ switch (EnvType) {
case llvm::Triple::Simulator:
- Result.Environment = DarwinEnvironmentKind::Simulator;
+ Environment = DarwinEnvironmentKind::Simulator;
break;
case llvm::Triple::MacABI: {
+ Environment = DarwinEnvironmentKind::MacCatalyst;
// The minimum native macOS target for MacCatalyst is macOS 10.15.
- auto NativeTargetVersion = VersionTuple(10, 15);
- if (Result.HasOSVersion && SDKInfo) {
+ NativeTargetVersion = VersionTuple(10, 15);
+ if (HasOSVersion && SDKInfo) {
if (const auto *MacCatalystToMacOSMapping = SDKInfo->getVersionMapping(
DarwinSDKInfo::OSEnvPair::macCatalystToMacOSPair())) {
if (auto MacOSVersion = MacCatalystToMacOSMapping->map(
- VersionTuple(Major, Minor, Micro), NativeTargetVersion,
- None)) {
+ OSVersion, NativeTargetVersion, std::nullopt)) {
NativeTargetVersion = *MacOSVersion;
}
}
}
- Result.Environment = DarwinEnvironmentKind::MacCatalyst;
- Result.NativeTargetVersion = NativeTargetVersion;
+ // In a zippered build, we could be building for a macOS target that's
+ // lower than the version that's implied by the OS version. In that case
+ // we need to use the minimum version as the native target version.
+ if (TargetVariantTriple) {
+ auto TargetVariantVersion = TargetVariantTriple->getOSVersion();
+ if (TargetVariantVersion.getMajor()) {
+ if (TargetVariantVersion < NativeTargetVersion)
+ NativeTargetVersion = TargetVariantVersion;
+ }
+ }
break;
}
default:
break;
}
+ }
+
+ static DarwinPlatform
+ createFromTarget(const llvm::Triple &TT, StringRef OSVersion, Arg *A,
+ std::optional<llvm::Triple> TargetVariantTriple,
+ const std::optional<DarwinSDKInfo> &SDKInfo) {
+ DarwinPlatform Result(TargetArg, getPlatformFromOS(TT.getOS()), OSVersion,
+ A);
+ VersionTuple OsVersion = TT.getOSVersion();
+ if (OsVersion.getMajor() == 0)
+ Result.HasOSVersion = false;
+ Result.TargetVariantTriple = TargetVariantTriple;
+ Result.setEnvironment(TT.getEnvironment(), OsVersion, SDKInfo);
return Result;
}
- static DarwinPlatform createOSVersionArg(DarwinPlatformKind Platform,
- Arg *A) {
- return DarwinPlatform(OSVersionArg, Platform, A);
+ static DarwinPlatform
+ createFromMTargetOS(llvm::Triple::OSType OS, VersionTuple OSVersion,
+ llvm::Triple::EnvironmentType Environment, Arg *A,
+ const std::optional<DarwinSDKInfo> &SDKInfo) {
+ DarwinPlatform Result(MTargetOSArg, getPlatformFromOS(OS),
+ OSVersion.getAsString(), A);
+ Result.InferSimulatorFromArch = false;
+ Result.setEnvironment(Environment, OSVersion, SDKInfo);
+ return Result;
+ }
+ static DarwinPlatform createOSVersionArg(DarwinPlatformKind Platform, Arg *A,
+ bool IsSimulator) {
+ DarwinPlatform Result{OSVersionArg, Platform, A};
+ if (IsSimulator)
+ Result.Environment = DarwinEnvironmentKind::Simulator;
+ return Result;
}
static DarwinPlatform createDeploymentTargetEnv(DarwinPlatformKind Platform,
StringRef EnvVarName,
@@ -1559,6 +1818,10 @@ private:
return DarwinPlatformKind::TvOS;
case llvm::Triple::WatchOS:
return DarwinPlatformKind::WatchOS;
+ case llvm::Triple::XROS:
+ return DarwinPlatformKind::XROS;
+ case llvm::Triple::DriverKit:
+ return DarwinPlatformKind::DriverKit;
default:
llvm_unreachable("Unable to infer Darwin variant");
}
@@ -1572,15 +1835,16 @@ private:
bool HasOSVersion = true, InferSimulatorFromArch = true;
Arg *Argument;
StringRef EnvVarName;
+ std::optional<llvm::Triple> TargetVariantTriple;
};
/// Returns the deployment target that's specified using the -m<os>-version-min
/// argument.
-Optional<DarwinPlatform>
+std::optional<DarwinPlatform>
getDeploymentTargetFromOSVersionArg(DerivedArgList &Args,
const Driver &TheDriver) {
- Arg *OSXVersion = Args.getLastArg(options::OPT_mmacosx_version_min_EQ);
- Arg *iOSVersion = Args.getLastArg(options::OPT_miphoneos_version_min_EQ,
+ Arg *macOSVersion = Args.getLastArg(options::OPT_mmacos_version_min_EQ);
+ Arg *iOSVersion = Args.getLastArg(options::OPT_mios_version_min_EQ,
options::OPT_mios_simulator_version_min_EQ);
Arg *TvOSVersion =
Args.getLastArg(options::OPT_mtvos_version_min_EQ,
@@ -1588,37 +1852,47 @@ getDeploymentTargetFromOSVersionArg(DerivedArgList &Args,
Arg *WatchOSVersion =
Args.getLastArg(options::OPT_mwatchos_version_min_EQ,
options::OPT_mwatchos_simulator_version_min_EQ);
- if (OSXVersion) {
+ if (macOSVersion) {
if (iOSVersion || TvOSVersion || WatchOSVersion) {
TheDriver.Diag(diag::err_drv_argument_not_allowed_with)
- << OSXVersion->getAsString(Args)
+ << macOSVersion->getAsString(Args)
<< (iOSVersion ? iOSVersion
: TvOSVersion ? TvOSVersion : WatchOSVersion)
->getAsString(Args);
}
- return DarwinPlatform::createOSVersionArg(Darwin::MacOS, OSXVersion);
+ return DarwinPlatform::createOSVersionArg(Darwin::MacOS, macOSVersion,
+ /*IsSimulator=*/false);
} else if (iOSVersion) {
if (TvOSVersion || WatchOSVersion) {
TheDriver.Diag(diag::err_drv_argument_not_allowed_with)
<< iOSVersion->getAsString(Args)
<< (TvOSVersion ? TvOSVersion : WatchOSVersion)->getAsString(Args);
}
- return DarwinPlatform::createOSVersionArg(Darwin::IPhoneOS, iOSVersion);
+ return DarwinPlatform::createOSVersionArg(
+ Darwin::IPhoneOS, iOSVersion,
+ iOSVersion->getOption().getID() ==
+ options::OPT_mios_simulator_version_min_EQ);
} else if (TvOSVersion) {
if (WatchOSVersion) {
TheDriver.Diag(diag::err_drv_argument_not_allowed_with)
<< TvOSVersion->getAsString(Args)
<< WatchOSVersion->getAsString(Args);
}
- return DarwinPlatform::createOSVersionArg(Darwin::TvOS, TvOSVersion);
+ return DarwinPlatform::createOSVersionArg(
+ Darwin::TvOS, TvOSVersion,
+ TvOSVersion->getOption().getID() ==
+ options::OPT_mtvos_simulator_version_min_EQ);
} else if (WatchOSVersion)
- return DarwinPlatform::createOSVersionArg(Darwin::WatchOS, WatchOSVersion);
- return None;
+ return DarwinPlatform::createOSVersionArg(
+ Darwin::WatchOS, WatchOSVersion,
+ WatchOSVersion->getOption().getID() ==
+ options::OPT_mwatchos_simulator_version_min_EQ);
+ return std::nullopt;
}
/// Returns the deployment target that's specified using the
/// OS_DEPLOYMENT_TARGET environment variable.
-Optional<DarwinPlatform>
+std::optional<DarwinPlatform>
getDeploymentTargetFromEnvironmentVariables(const Driver &TheDriver,
const llvm::Triple &Triple) {
std::string Targets[Darwin::LastDarwinPlatform + 1];
@@ -1627,10 +1901,11 @@ getDeploymentTargetFromEnvironmentVariables(const Driver &TheDriver,
"IPHONEOS_DEPLOYMENT_TARGET",
"TVOS_DEPLOYMENT_TARGET",
"WATCHOS_DEPLOYMENT_TARGET",
+ "DRIVERKIT_DEPLOYMENT_TARGET",
};
- static_assert(llvm::array_lengthof(EnvVars) == Darwin::LastDarwinPlatform + 1,
+ static_assert(std::size(EnvVars) == Darwin::LastDarwinPlatform + 1,
"Missing platform");
- for (const auto &I : llvm::enumerate(llvm::makeArrayRef(EnvVars))) {
+ for (const auto &I : llvm::enumerate(llvm::ArrayRef(EnvVars))) {
if (char *Env = ::getenv(I.value()))
Targets[I.index()] = Env;
}
@@ -1649,11 +1924,11 @@ getDeploymentTargetFromEnvironmentVariables(const Driver &TheDriver,
Targets[Darwin::TvOS] = "";
} else {
// Don't allow conflicts in any other platform.
- unsigned FirstTarget = llvm::array_lengthof(Targets);
- for (unsigned I = 0; I != llvm::array_lengthof(Targets); ++I) {
+ unsigned FirstTarget = std::size(Targets);
+ for (unsigned I = 0; I != std::size(Targets); ++I) {
if (Targets[I].empty())
continue;
- if (FirstTarget == llvm::array_lengthof(Targets))
+ if (FirstTarget == std::size(Targets))
FirstTarget = I;
else
TheDriver.Diag(diag::err_drv_conflicting_deployment_targets)
@@ -1661,13 +1936,13 @@ getDeploymentTargetFromEnvironmentVariables(const Driver &TheDriver,
}
}
- for (const auto &Target : llvm::enumerate(llvm::makeArrayRef(Targets))) {
+ for (const auto &Target : llvm::enumerate(llvm::ArrayRef(Targets))) {
if (!Target.value().empty())
return DarwinPlatform::createDeploymentTargetEnv(
(Darwin::DarwinPlatformKind)Target.index(), EnvVars[Target.index()],
Target.value());
}
- return None;
+ return std::nullopt;
}
/// Returns the SDK name without the optional prefix that ends with a '.' or an
@@ -1682,16 +1957,16 @@ static StringRef dropSDKNamePrefix(StringRef SDKName) {
/// Tries to infer the deployment target from the SDK specified by -isysroot
/// (or SDKROOT). Uses the version specified in the SDKSettings.json file if
/// it's available.
-Optional<DarwinPlatform>
+std::optional<DarwinPlatform>
inferDeploymentTargetFromSDK(DerivedArgList &Args,
- const Optional<DarwinSDKInfo> &SDKInfo) {
+ const std::optional<DarwinSDKInfo> &SDKInfo) {
const Arg *A = Args.getLastArg(options::OPT_isysroot);
if (!A)
- return None;
+ return std::nullopt;
StringRef isysroot = A->getValue();
StringRef SDK = Darwin::getSDKName(isysroot);
if (!SDK.size())
- return None;
+ return std::nullopt;
std::string Version;
if (SDKInfo) {
@@ -1706,26 +1981,33 @@ inferDeploymentTargetFromSDK(DerivedArgList &Args,
Version = std::string(SDK.slice(StartVer, EndVer + 1));
}
if (Version.empty())
- return None;
+ return std::nullopt;
auto CreatePlatformFromSDKName =
- [&](StringRef SDK) -> Optional<DarwinPlatform> {
- if (SDK.startswith("iPhoneOS") || SDK.startswith("iPhoneSimulator"))
+ [&](StringRef SDK) -> std::optional<DarwinPlatform> {
+ if (SDK.starts_with("iPhoneOS") || SDK.starts_with("iPhoneSimulator"))
return DarwinPlatform::createFromSDK(
Darwin::IPhoneOS, Version,
- /*IsSimulator=*/SDK.startswith("iPhoneSimulator"));
- else if (SDK.startswith("MacOSX"))
+ /*IsSimulator=*/SDK.starts_with("iPhoneSimulator"));
+ else if (SDK.starts_with("MacOSX"))
return DarwinPlatform::createFromSDK(Darwin::MacOS,
getSystemOrSDKMacOSVersion(Version));
- else if (SDK.startswith("WatchOS") || SDK.startswith("WatchSimulator"))
+ else if (SDK.starts_with("WatchOS") || SDK.starts_with("WatchSimulator"))
return DarwinPlatform::createFromSDK(
Darwin::WatchOS, Version,
- /*IsSimulator=*/SDK.startswith("WatchSimulator"));
- else if (SDK.startswith("AppleTVOS") || SDK.startswith("AppleTVSimulator"))
+ /*IsSimulator=*/SDK.starts_with("WatchSimulator"));
+ else if (SDK.starts_with("AppleTVOS") ||
+ SDK.starts_with("AppleTVSimulator"))
return DarwinPlatform::createFromSDK(
Darwin::TvOS, Version,
- /*IsSimulator=*/SDK.startswith("AppleTVSimulator"));
- return None;
+ /*IsSimulator=*/SDK.starts_with("AppleTVSimulator"));
+ else if (SDK.starts_with("XR"))
+ return DarwinPlatform::createFromSDK(
+ Darwin::XROS, Version,
+ /*IsSimulator=*/SDK.contains("Simulator"));
+ else if (SDK.starts_with("DriverKit"))
+ return DarwinPlatform::createFromSDK(Darwin::DriverKit, Version);
+ return std::nullopt;
};
if (auto Result = CreatePlatformFromSDKName(SDK))
return Result;
@@ -1735,7 +2017,7 @@ inferDeploymentTargetFromSDK(DerivedArgList &Args,
std::string getOSVersion(llvm::Triple::OSType OS, const llvm::Triple &Triple,
const Driver &TheDriver) {
- unsigned Major, Minor, Micro;
+ VersionTuple OsVersion;
llvm::Triple SystemTriple(llvm::sys::getProcessTriple());
switch (OS) {
case llvm::Triple::Darwin:
@@ -1744,19 +2026,30 @@ std::string getOSVersion(llvm::Triple::OSType OS, const llvm::Triple &Triple,
// macos, use the host triple to infer OS version.
if (Triple.isMacOSX() && SystemTriple.isMacOSX() &&
!Triple.getOSMajorVersion())
- SystemTriple.getMacOSXVersion(Major, Minor, Micro);
- else if (!Triple.getMacOSXVersion(Major, Minor, Micro))
+ SystemTriple.getMacOSXVersion(OsVersion);
+ else if (!Triple.getMacOSXVersion(OsVersion))
TheDriver.Diag(diag::err_drv_invalid_darwin_version)
<< Triple.getOSName();
break;
case llvm::Triple::IOS:
- Triple.getiOSVersion(Major, Minor, Micro);
+ if (Triple.isMacCatalystEnvironment() && !Triple.getOSMajorVersion()) {
+ OsVersion = VersionTuple(13, 1);
+ } else
+ OsVersion = Triple.getiOSVersion();
break;
case llvm::Triple::TvOS:
- Triple.getOSVersion(Major, Minor, Micro);
+ OsVersion = Triple.getOSVersion();
break;
case llvm::Triple::WatchOS:
- Triple.getWatchOSVersion(Major, Minor, Micro);
+ OsVersion = Triple.getWatchOSVersion();
+ break;
+ case llvm::Triple::XROS:
+ OsVersion = Triple.getOSVersion();
+ if (!OsVersion.getMajor())
+ OsVersion = OsVersion.withMajorReplaced(1);
+ break;
+ case llvm::Triple::DriverKit:
+ OsVersion = Triple.getDriverKitVersion();
break;
default:
llvm_unreachable("Unexpected OS type");
@@ -1764,67 +2057,118 @@ std::string getOSVersion(llvm::Triple::OSType OS, const llvm::Triple &Triple,
}
std::string OSVersion;
- llvm::raw_string_ostream(OSVersion) << Major << '.' << Minor << '.' << Micro;
+ llvm::raw_string_ostream(OSVersion)
+ << OsVersion.getMajor() << '.' << OsVersion.getMinor().value_or(0) << '.'
+ << OsVersion.getSubminor().value_or(0);
return OSVersion;
}
/// Tries to infer the target OS from the -arch.
-Optional<DarwinPlatform>
+std::optional<DarwinPlatform>
inferDeploymentTargetFromArch(DerivedArgList &Args, const Darwin &Toolchain,
const llvm::Triple &Triple,
const Driver &TheDriver) {
llvm::Triple::OSType OSTy = llvm::Triple::UnknownOS;
StringRef MachOArchName = Toolchain.getMachOArchName(Args);
- if (MachOArchName == "arm64" || MachOArchName == "arm64e") {
-#if __arm64__
- // A clang running on an Apple Silicon mac defaults
- // to building for mac when building for arm64 rather than
- // defaulting to iOS.
+ if (MachOArchName == "arm64" || MachOArchName == "arm64e")
OSTy = llvm::Triple::MacOSX;
-#else
- OSTy = llvm::Triple::IOS;
-#endif
- } else if (MachOArchName == "armv7" || MachOArchName == "armv7s")
+ else if (MachOArchName == "armv7" || MachOArchName == "armv7s")
OSTy = llvm::Triple::IOS;
else if (MachOArchName == "armv7k" || MachOArchName == "arm64_32")
OSTy = llvm::Triple::WatchOS;
else if (MachOArchName != "armv6m" && MachOArchName != "armv7m" &&
MachOArchName != "armv7em")
OSTy = llvm::Triple::MacOSX;
-
if (OSTy == llvm::Triple::UnknownOS)
- return None;
+ return std::nullopt;
return DarwinPlatform::createFromArch(OSTy,
getOSVersion(OSTy, Triple, TheDriver));
}
/// Returns the deployment target that's specified using the -target option.
-Optional<DarwinPlatform> getDeploymentTargetFromTargetArg(
+std::optional<DarwinPlatform> getDeploymentTargetFromTargetArg(
DerivedArgList &Args, const llvm::Triple &Triple, const Driver &TheDriver,
- const Optional<DarwinSDKInfo> &SDKInfo) {
+ const std::optional<DarwinSDKInfo> &SDKInfo) {
if (!Args.hasArg(options::OPT_target))
- return None;
+ return std::nullopt;
if (Triple.getOS() == llvm::Triple::Darwin ||
Triple.getOS() == llvm::Triple::UnknownOS)
- return None;
+ return std::nullopt;
std::string OSVersion = getOSVersion(Triple.getOS(), Triple, TheDriver);
- return DarwinPlatform::createFromTarget(
- Triple, OSVersion, Args.getLastArg(options::OPT_target), SDKInfo);
+ std::optional<llvm::Triple> TargetVariantTriple;
+ for (const Arg *A : Args.filtered(options::OPT_darwin_target_variant)) {
+ llvm::Triple TVT(A->getValue());
+ // Find a matching <arch>-<vendor> target variant triple that can be used.
+ if ((Triple.getArch() == llvm::Triple::aarch64 ||
+ TVT.getArchName() == Triple.getArchName()) &&
+ TVT.getArch() == Triple.getArch() &&
+ TVT.getSubArch() == Triple.getSubArch() &&
+ TVT.getVendor() == Triple.getVendor()) {
+ if (TargetVariantTriple)
+ continue;
+ A->claim();
+ // Accept a -target-variant triple when compiling code that may run on
+ // macOS or Mac Catalyst.
+ if ((Triple.isMacOSX() && TVT.getOS() == llvm::Triple::IOS &&
+ TVT.isMacCatalystEnvironment()) ||
+ (TVT.isMacOSX() && Triple.getOS() == llvm::Triple::IOS &&
+ Triple.isMacCatalystEnvironment())) {
+ TargetVariantTriple = TVT;
+ continue;
+ }
+ TheDriver.Diag(diag::err_drv_target_variant_invalid)
+ << A->getSpelling() << A->getValue();
+ }
+ }
+ return DarwinPlatform::createFromTarget(Triple, OSVersion,
+ Args.getLastArg(options::OPT_target),
+ TargetVariantTriple, SDKInfo);
}
-Optional<DarwinSDKInfo> parseSDKSettings(llvm::vfs::FileSystem &VFS,
- const ArgList &Args,
- const Driver &TheDriver) {
+/// Returns the deployment target that's specified using the -mtargetos option.
+std::optional<DarwinPlatform> getDeploymentTargetFromMTargetOSArg(
+ DerivedArgList &Args, const Driver &TheDriver,
+ const std::optional<DarwinSDKInfo> &SDKInfo) {
+ auto *A = Args.getLastArg(options::OPT_mtargetos_EQ);
+ if (!A)
+ return std::nullopt;
+ llvm::Triple TT(llvm::Twine("unknown-apple-") + A->getValue());
+ switch (TT.getOS()) {
+ case llvm::Triple::MacOSX:
+ case llvm::Triple::IOS:
+ case llvm::Triple::TvOS:
+ case llvm::Triple::WatchOS:
+ case llvm::Triple::XROS:
+ break;
+ default:
+ TheDriver.Diag(diag::err_drv_invalid_os_in_arg)
+ << TT.getOSName() << A->getAsString(Args);
+ return std::nullopt;
+ }
+
+ VersionTuple Version = TT.getOSVersion();
+ if (!Version.getMajor()) {
+ TheDriver.Diag(diag::err_drv_invalid_version_number)
+ << A->getAsString(Args);
+ return std::nullopt;
+ }
+ return DarwinPlatform::createFromMTargetOS(TT.getOS(), Version,
+ TT.getEnvironment(), A, SDKInfo);
+}
+
+std::optional<DarwinSDKInfo> parseSDKSettings(llvm::vfs::FileSystem &VFS,
+ const ArgList &Args,
+ const Driver &TheDriver) {
const Arg *A = Args.getLastArg(options::OPT_isysroot);
if (!A)
- return None;
+ return std::nullopt;
StringRef isysroot = A->getValue();
auto SDKInfoOrErr = parseDarwinSDKInfo(VFS, isysroot);
if (!SDKInfoOrErr) {
llvm::consumeError(SDKInfoOrErr.takeError());
TheDriver.Diag(diag::warn_drv_darwin_sdk_invalid_settings);
- return None;
+ return std::nullopt;
}
return *SDKInfoOrErr;
}
@@ -1858,10 +2202,17 @@ void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
SDKInfo = parseSDKSettings(getVFS(), Args, getDriver());
// The OS and the version can be specified using the -target argument.
- Optional<DarwinPlatform> OSTarget =
+ std::optional<DarwinPlatform> OSTarget =
getDeploymentTargetFromTargetArg(Args, getTriple(), getDriver(), SDKInfo);
if (OSTarget) {
- Optional<DarwinPlatform> OSVersionArgTarget =
+ // Disallow mixing -target and -mtargetos=.
+ if (const auto *MTargetOSArg = Args.getLastArg(options::OPT_mtargetos_EQ)) {
+ std::string TargetArgStr = OSTarget->getAsString(Args, Opts);
+ std::string MTargetOSArgStr = MTargetOSArg->getAsString(Args);
+ getDriver().Diag(diag::err_drv_cannot_mix_options)
+ << TargetArgStr << MTargetOSArgStr;
+ }
+ std::optional<DarwinPlatform> OSVersionArgTarget =
getDeploymentTargetFromOSVersionArg(Args, getDriver());
if (OSVersionArgTarget) {
unsigned TargetMajor, TargetMinor, TargetMicro;
@@ -1887,11 +2238,23 @@ void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
std::string OSVersionArg =
OSVersionArgTarget->getAsString(Args, Opts);
std::string TargetArg = OSTarget->getAsString(Args, Opts);
- getDriver().Diag(clang::diag::warn_drv_overriding_flag_option)
+ getDriver().Diag(clang::diag::warn_drv_overriding_option)
<< OSVersionArg << TargetArg;
}
}
}
+ } else if ((OSTarget = getDeploymentTargetFromMTargetOSArg(Args, getDriver(),
+ SDKInfo))) {
+ // The OS target can be specified using the -mtargetos= argument.
+ // Disallow mixing -mtargetos= and -m<os>version-min=.
+ std::optional<DarwinPlatform> OSVersionArgTarget =
+ getDeploymentTargetFromOSVersionArg(Args, getDriver());
+ if (OSVersionArgTarget) {
+ std::string MTargetOSArgStr = OSTarget->getAsString(Args, Opts);
+ std::string OSVersionArgStr = OSVersionArgTarget->getAsString(Args, Opts);
+ getDriver().Diag(diag::err_drv_cannot_mix_options)
+ << MTargetOSArgStr << OSVersionArgStr;
+ }
} else {
// The OS target can be specified using the -m<os>version-min argument.
OSTarget = getDeploymentTargetFromOSVersionArg(Args, getDriver());
@@ -1902,7 +2265,7 @@ void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
getDeploymentTargetFromEnvironmentVariables(getDriver(), getTriple());
if (OSTarget) {
// Don't infer simulator from the arch when the SDK is also specified.
- Optional<DarwinPlatform> SDKTarget =
+ std::optional<DarwinPlatform> SDKTarget =
inferDeploymentTargetFromSDK(Args, SDKInfo);
if (SDKTarget)
OSTarget->setEnvironment(SDKTarget->getEnvironment());
@@ -1931,17 +2294,20 @@ void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
unsigned Major, Minor, Micro;
bool HadExtra;
+ // The major version should not be over this number.
+ const unsigned MajorVersionLimit = 1000;
// Set the tool chain target information.
if (Platform == MacOS) {
if (!Driver::GetReleaseVersion(OSTarget->getOSVersion(), Major, Minor,
Micro, HadExtra) ||
- HadExtra || Major < 10 || Major >= 100 || Minor >= 100 || Micro >= 100)
+ HadExtra || Major < 10 || Major >= MajorVersionLimit || Minor >= 100 ||
+ Micro >= 100)
getDriver().Diag(diag::err_drv_invalid_version_number)
<< OSTarget->getAsString(Args, Opts);
} else if (Platform == IPhoneOS) {
if (!Driver::GetReleaseVersion(OSTarget->getOSVersion(), Major, Minor,
Micro, HadExtra) ||
- HadExtra || Major >= 100 || Minor >= 100 || Micro >= 100)
+ HadExtra || Major >= MajorVersionLimit || Minor >= 100 || Micro >= 100)
getDriver().Diag(diag::err_drv_invalid_version_number)
<< OSTarget->getAsString(Args, Opts);
;
@@ -1973,13 +2339,27 @@ void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
} else if (Platform == TvOS) {
if (!Driver::GetReleaseVersion(OSTarget->getOSVersion(), Major, Minor,
Micro, HadExtra) ||
- HadExtra || Major >= 100 || Minor >= 100 || Micro >= 100)
+ HadExtra || Major >= MajorVersionLimit || Minor >= 100 || Micro >= 100)
getDriver().Diag(diag::err_drv_invalid_version_number)
<< OSTarget->getAsString(Args, Opts);
} else if (Platform == WatchOS) {
if (!Driver::GetReleaseVersion(OSTarget->getOSVersion(), Major, Minor,
Micro, HadExtra) ||
- HadExtra || Major >= 10 || Minor >= 100 || Micro >= 100)
+ HadExtra || Major >= MajorVersionLimit || Minor >= 100 || Micro >= 100)
+ getDriver().Diag(diag::err_drv_invalid_version_number)
+ << OSTarget->getAsString(Args, Opts);
+ } else if (Platform == DriverKit) {
+ if (!Driver::GetReleaseVersion(OSTarget->getOSVersion(), Major, Minor,
+ Micro, HadExtra) ||
+ HadExtra || Major < 19 || Major >= MajorVersionLimit || Minor >= 100 ||
+ Micro >= 100)
+ getDriver().Diag(diag::err_drv_invalid_version_number)
+ << OSTarget->getAsString(Args, Opts);
+ } else if (Platform == XROS) {
+ if (!Driver::GetReleaseVersion(OSTarget->getOSVersion(), Major, Minor,
+ Micro, HadExtra) ||
+ HadExtra || Major < 1 || Major >= MajorVersionLimit || Minor >= 100 ||
+ Micro >= 100)
getDriver().Diag(diag::err_drv_invalid_version_number)
<< OSTarget->getAsString(Args, Opts);
} else
@@ -1988,42 +2368,60 @@ void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
DarwinEnvironmentKind Environment = OSTarget->getEnvironment();
// Recognize iOS targets with an x86 architecture as the iOS simulator.
if (Environment == NativeEnvironment && Platform != MacOS &&
- OSTarget->canInferSimulatorFromArch() && getTriple().isX86())
+ Platform != DriverKit && OSTarget->canInferSimulatorFromArch() &&
+ getTriple().isX86())
Environment = Simulator;
VersionTuple NativeTargetVersion;
if (Environment == MacCatalyst)
NativeTargetVersion = OSTarget->getNativeTargetVersion();
setTarget(Platform, Environment, Major, Minor, Micro, NativeTargetVersion);
+ TargetVariantTriple = OSTarget->getTargetVariantTriple();
if (const Arg *A = Args.getLastArg(options::OPT_isysroot)) {
StringRef SDK = getSDKName(A->getValue());
if (SDK.size() > 0) {
size_t StartVer = SDK.find_first_of("0123456789");
StringRef SDKName = SDK.slice(0, StartVer);
- if (!SDKName.startswith(getPlatformFamily()) &&
- !dropSDKNamePrefix(SDKName).startswith(getPlatformFamily()))
+ if (!SDKName.starts_with(getPlatformFamily()) &&
+ !dropSDKNamePrefix(SDKName).starts_with(getPlatformFamily()))
getDriver().Diag(diag::warn_incompatible_sysroot)
<< SDKName << getPlatformFamily();
}
}
}
-// Returns the effective header sysroot path to use. This comes either from
-// -isysroot or --sysroot.
-llvm::StringRef DarwinClang::GetHeaderSysroot(const llvm::opt::ArgList &DriverArgs) const {
- if(DriverArgs.hasArg(options::OPT_isysroot))
- return DriverArgs.getLastArgValue(options::OPT_isysroot);
- if (!getDriver().SysRoot.empty())
- return getDriver().SysRoot;
- return "/";
+// For certain platforms/environments almost all resources (e.g., headers) are
+// located in sub-directories, e.g., for DriverKit they live in
+// <SYSROOT>/System/DriverKit/usr/include (instead of <SYSROOT>/usr/include).
+static void AppendPlatformPrefix(SmallString<128> &Path,
+ const llvm::Triple &T) {
+ if (T.isDriverKit()) {
+ llvm::sys::path::append(Path, "System", "DriverKit");
+ }
+}
+
+// Returns the effective sysroot from either -isysroot or --sysroot, plus the
+// platform prefix (if any).
+llvm::SmallString<128>
+DarwinClang::GetEffectiveSysroot(const llvm::opt::ArgList &DriverArgs) const {
+ llvm::SmallString<128> Path("/");
+ if (DriverArgs.hasArg(options::OPT_isysroot))
+ Path = DriverArgs.getLastArgValue(options::OPT_isysroot);
+ else if (!getDriver().SysRoot.empty())
+ Path = getDriver().SysRoot;
+
+ if (hasEffectiveTriple()) {
+ AppendPlatformPrefix(Path, getEffectiveTriple());
+ }
+ return Path;
}
void DarwinClang::AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const {
const Driver &D = getDriver();
- llvm::StringRef Sysroot = GetHeaderSysroot(DriverArgs);
+ llvm::SmallString<128> Sysroot = GetEffectiveSysroot(DriverArgs);
bool NoStdInc = DriverArgs.hasArg(options::OPT_nostdinc);
bool NoStdlibInc = DriverArgs.hasArg(options::OPT_nostdlibinc);
@@ -2108,22 +2506,27 @@ void DarwinClang::AddClangCXXStdlibIncludeArgs(
// Also check whether this is used for setting library search paths.
ToolChain::AddClangCXXStdlibIncludeArgs(DriverArgs, CC1Args);
- if (DriverArgs.hasArg(options::OPT_nostdlibinc) ||
- DriverArgs.hasArg(options::OPT_nostdincxx))
+ if (DriverArgs.hasArg(options::OPT_nostdinc, options::OPT_nostdlibinc,
+ options::OPT_nostdincxx))
return;
- llvm::StringRef Sysroot = GetHeaderSysroot(DriverArgs);
+ llvm::SmallString<128> Sysroot = GetEffectiveSysroot(DriverArgs);
switch (GetCXXStdlibType(DriverArgs)) {
case ToolChain::CST_Libcxx: {
- // On Darwin, libc++ can be installed in one of the following two places:
- // 1. Alongside the compiler in <install>/include/c++/v1
- // 2. In a SDK (or a custom sysroot) in <sysroot>/usr/include/c++/v1
+ // On Darwin, libc++ can be installed in one of the following places:
+ // 1. Alongside the compiler in <install>/include/c++/v1
+ // 2. Alongside the compiler in <clang-executable-folder>/../include/c++/v1
+ // 3. In a SDK (or a custom sysroot) in <sysroot>/usr/include/c++/v1
//
- // The precendence of paths is as listed above, i.e. we take the first path
- // that exists. Also note that we never include libc++ twice -- we take the
- // first path that exists and don't send the other paths to CC1 (otherwise
+ // The precedence of paths is as listed above, i.e. we take the first path
+ // that exists. Note that we never include libc++ twice -- we take the first
+ // path that exists and don't send the other paths to CC1 (otherwise
// include_next could break).
+ //
+ // Also note that in most cases, (1) and (2) are exactly the same path.
+ // Those two paths will differ only when the `clang` program being run
+ // is actually a symlink to the real executable.
// Check for (1)
// Get from '<install>/bin' to '<install>/include/c++/v1'.
@@ -2140,7 +2543,20 @@ void DarwinClang::AddClangCXXStdlibIncludeArgs(
<< "\"\n";
}
- // Otherwise, check for (2)
+ // (2) Check for the folder where the executable is located, if different.
+ if (getDriver().getInstalledDir() != getDriver().Dir) {
+ InstallBin = llvm::StringRef(getDriver().Dir);
+ llvm::sys::path::append(InstallBin, "..", "include", "c++", "v1");
+ if (getVFS().exists(InstallBin)) {
+ addSystemInclude(DriverArgs, CC1Args, InstallBin);
+ return;
+ } else if (DriverArgs.hasArg(options::OPT_v)) {
+ llvm::errs() << "ignoring nonexistent directory \"" << InstallBin
+ << "\"\n";
+ }
+ }
+
+ // Otherwise, check for (3)
llvm::SmallString<128> SysrootUsr = Sysroot;
llvm::sys::path::append(SysrootUsr, "usr", "include", "c++", "v1");
if (getVFS().exists(SysrootUsr)) {
@@ -2164,17 +2580,6 @@ void DarwinClang::AddClangCXXStdlibIncludeArgs(
switch (arch) {
default: break;
- case llvm::Triple::ppc:
- case llvm::Triple::ppc64:
- IsBaseFound = AddGnuCPlusPlusIncludePaths(DriverArgs, CC1Args, UsrIncludeCxx,
- "4.2.1",
- "powerpc-apple-darwin10",
- arch == llvm::Triple::ppc64 ? "ppc64" : "");
- IsBaseFound |= AddGnuCPlusPlusIncludePaths(DriverArgs, CC1Args, UsrIncludeCxx,
- "4.0.0", "powerpc-apple-darwin10",
- arch == llvm::Triple::ppc64 ? "ppc64" : "");
- break;
-
case llvm::Triple::x86:
case llvm::Triple::x86_64:
IsBaseFound = AddGnuCPlusPlusIncludePaths(DriverArgs, CC1Args, UsrIncludeCxx,
@@ -2213,6 +2618,7 @@ void DarwinClang::AddClangCXXStdlibIncludeArgs(
break;
}
}
+
void DarwinClang::AddCXXStdlibLibArgs(const ArgList &Args,
ArgStringList &CmdArgs) const {
CXXStdlibType Type = GetCXXStdlibType(Args);
@@ -2220,6 +2626,8 @@ void DarwinClang::AddCXXStdlibLibArgs(const ArgList &Args,
switch (Type) {
case ToolChain::CST_Libcxx:
CmdArgs.push_back("-lc++");
+ if (Args.hasArg(options::OPT_fexperimental_library))
+ CmdArgs.push_back("-lc++experimental");
break;
case ToolChain::CST_Libstdcxx:
@@ -2274,6 +2682,12 @@ void DarwinClang::AddCCKextLibArgs(const ArgList &Args,
llvm::sys::path::append(P, "libclang_rt.cc_kext_tvos.a");
} else if (isTargetIPhoneOS()) {
llvm::sys::path::append(P, "libclang_rt.cc_kext_ios.a");
+ } else if (isTargetDriverKit()) {
+ // DriverKit doesn't want extra runtime support.
+ } else if (isTargetXROSDevice()) {
+ llvm::sys::path::append(
+ P, llvm::Twine("libclang_rt.cc_kext_") +
+ llvm::Triple::getOSTypeName(llvm::Triple::XROS) + ".a");
} else {
llvm::sys::path::append(P, "libclang_rt.cc_kext.a");
}
@@ -2301,12 +2715,9 @@ DerivedArgList *MachO::TranslateArgs(const DerivedArgList &Args,
if (A->getOption().matches(options::OPT_Xarch__)) {
// Skip this argument unless the architecture matches either the toolchain
// triple arch, or the arch being bound.
- llvm::Triple::ArchType XarchArch =
- tools::darwin::getArchTypeForMachOArchName(A->getValue(0));
- if (!(XarchArch == getArch() ||
- (!BoundArch.empty() &&
- XarchArch ==
- tools::darwin::getArchTypeForMachOArchName(BoundArch))))
+ StringRef XarchArch = A->getValue(0);
+ if (!(XarchArch == getArchName() ||
+ (!BoundArch.empty() && XarchArch == BoundArch)))
continue;
Arg *OriginalArg = A;
@@ -2376,19 +2787,11 @@ DerivedArgList *MachO::TranslateArgs(const DerivedArgList &Args,
DAL->AddFlagArg(
A, Opts.getOption(options::OPT_mno_warn_nonportable_cfstrings));
break;
-
- case options::OPT_fpascal_strings:
- DAL->AddFlagArg(A, Opts.getOption(options::OPT_mpascal_strings));
- break;
-
- case options::OPT_fno_pascal_strings:
- DAL->AddFlagArg(A, Opts.getOption(options::OPT_mno_pascal_strings));
- break;
}
}
// Add the arch options based on the particular spelling of -arch, to match
- // how the driver driver works.
+ // how the driver works.
if (!BoundArch.empty()) {
StringRef Name = BoundArch;
const Option MCpu = Opts.getOption(options::OPT_mcpu_EQ);
@@ -2499,11 +2902,36 @@ bool Darwin::isAlignedAllocationUnavailable() const {
case WatchOS: // Earlier than 4.0.
OS = llvm::Triple::WatchOS;
break;
+ case XROS: // Always available.
+ return false;
+ case DriverKit: // Always available.
+ return false;
}
return TargetVersion < alignedAllocMinVersion(OS);
}
+static bool sdkSupportsBuiltinModules(const Darwin::DarwinPlatformKind &TargetPlatform, const std::optional<DarwinSDKInfo> &SDKInfo) {
+ if (!SDKInfo)
+ return false;
+
+ VersionTuple SDKVersion = SDKInfo->getVersion();
+ switch (TargetPlatform) {
+ case Darwin::MacOS:
+ return SDKVersion >= VersionTuple(99U);
+ case Darwin::IPhoneOS:
+ return SDKVersion >= VersionTuple(99U);
+ case Darwin::TvOS:
+ return SDKVersion >= VersionTuple(99U);
+ case Darwin::WatchOS:
+ return SDKVersion >= VersionTuple(99U);
+ case Darwin::XROS:
+ return SDKVersion >= VersionTuple(99U);
+ default:
+ return true;
+ }
+}
+
void Darwin::addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args,
Action::OffloadKind DeviceOffloadKind) const {
@@ -2514,6 +2942,45 @@ void Darwin::addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
isAlignedAllocationUnavailable())
CC1Args.push_back("-faligned-alloc-unavailable");
+ addClangCC1ASTargetOptions(DriverArgs, CC1Args);
+
+ // Enable compatibility mode for NSItemProviderCompletionHandler in
+ // Foundation/NSItemProvider.h.
+ CC1Args.push_back("-fcompatibility-qualified-id-block-type-checking");
+
+ // Give static local variables in inline functions hidden visibility when
+ // -fvisibility-inlines-hidden is enabled.
+ if (!DriverArgs.getLastArgNoClaim(
+ options::OPT_fvisibility_inlines_hidden_static_local_var,
+ options::OPT_fno_visibility_inlines_hidden_static_local_var))
+ CC1Args.push_back("-fvisibility-inlines-hidden-static-local-var");
+
+ // Earlier versions of the darwin SDK have the C standard library headers
+ // all together in the Darwin module. That leads to module cycles with
+ // the _Builtin_ modules. e.g. <inttypes.h> on darwin includes <stdint.h>.
+ // The builtin <stdint.h> include-nexts <stdint.h>. When both of those
+ // darwin headers are in the Darwin module, there's a module cycle Darwin ->
+ // _Builtin_stdint -> Darwin (i.e. inttypes.h (darwin) -> stdint.h (builtin) ->
+ // stdint.h (darwin)). This is fixed in later versions of the darwin SDK,
+ // but until then, the builtin headers need to join the system modules.
+ // i.e. when the builtin stdint.h is in the Darwin module too, the cycle
+ // goes away. Note that -fbuiltin-headers-in-system-modules does nothing
+ // to fix the same problem with C++ headers, and is generally fragile.
+ if (!sdkSupportsBuiltinModules(TargetPlatform, SDKInfo))
+ CC1Args.push_back("-fbuiltin-headers-in-system-modules");
+
+ if (!DriverArgs.hasArgNoClaim(options::OPT_fdefine_target_os_macros,
+ options::OPT_fno_define_target_os_macros))
+ CC1Args.push_back("-fdefine-target-os-macros");
+}
+
+void Darwin::addClangCC1ASTargetOptions(
+ const llvm::opt::ArgList &Args, llvm::opt::ArgStringList &CC1ASArgs) const {
+ if (TargetVariantTriple) {
+ CC1ASArgs.push_back("-darwin-target-variant-triple");
+ CC1ASArgs.push_back(Args.MakeArgString(TargetVariantTriple->getTriple()));
+ }
+
if (SDKInfo) {
/// Pass the SDK version to the compiler when the SDK information is
/// available.
@@ -2521,32 +2988,45 @@ void Darwin::addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
std::string Arg;
llvm::raw_string_ostream OS(Arg);
OS << "-target-sdk-version=" << V;
- CC1Args.push_back(DriverArgs.MakeArgString(OS.str()));
+ CC1ASArgs.push_back(Args.MakeArgString(OS.str()));
};
if (isTargetMacCatalyst()) {
if (const auto *MacOStoMacCatalystMapping = SDKInfo->getVersionMapping(
DarwinSDKInfo::OSEnvPair::macOStoMacCatalystPair())) {
- Optional<VersionTuple> SDKVersion = MacOStoMacCatalystMapping->map(
- SDKInfo->getVersion(), minimumMacCatalystDeploymentTarget(), None);
+ std::optional<VersionTuple> SDKVersion = MacOStoMacCatalystMapping->map(
+ SDKInfo->getVersion(), minimumMacCatalystDeploymentTarget(),
+ std::nullopt);
EmitTargetSDKVersionArg(
SDKVersion ? *SDKVersion : minimumMacCatalystDeploymentTarget());
}
} else {
EmitTargetSDKVersionArg(SDKInfo->getVersion());
}
- }
-
- // Enable compatibility mode for NSItemProviderCompletionHandler in
- // Foundation/NSItemProvider.h.
- CC1Args.push_back("-fcompatibility-qualified-id-block-type-checking");
- // Give static local variables in inline functions hidden visibility when
- // -fvisibility-inlines-hidden is enabled.
- if (!DriverArgs.getLastArgNoClaim(
- options::OPT_fvisibility_inlines_hidden_static_local_var,
- options::OPT_fno_visibility_inlines_hidden_static_local_var))
- CC1Args.push_back("-fvisibility-inlines-hidden-static-local-var");
+ /// Pass the target variant SDK version to the compiler when the SDK
+ /// information is available and is required for target variant.
+ if (TargetVariantTriple) {
+ if (isTargetMacCatalyst()) {
+ std::string Arg;
+ llvm::raw_string_ostream OS(Arg);
+ OS << "-darwin-target-variant-sdk-version=" << SDKInfo->getVersion();
+ CC1ASArgs.push_back(Args.MakeArgString(OS.str()));
+ } else if (const auto *MacOStoMacCatalystMapping =
+ SDKInfo->getVersionMapping(
+ DarwinSDKInfo::OSEnvPair::macOStoMacCatalystPair())) {
+ if (std::optional<VersionTuple> SDKVersion =
+ MacOStoMacCatalystMapping->map(
+ SDKInfo->getVersion(), minimumMacCatalystDeploymentTarget(),
+ std::nullopt)) {
+ std::string Arg;
+ llvm::raw_string_ostream OS(Arg);
+ OS << "-darwin-target-variant-sdk-version=" << *SDKVersion;
+ CC1ASArgs.push_back(Args.MakeArgString(OS.str()));
+ }
+ }
+ }
+ }
}
DerivedArgList *
@@ -2555,7 +3035,6 @@ Darwin::TranslateArgs(const DerivedArgList &Args, StringRef BoundArch,
// First get the generic Apple args, before moving onto Darwin-specific ones.
DerivedArgList *DAL =
MachO::TranslateArgs(Args, BoundArch, DeviceOffloadKind);
- const OptTable &Opts = getDriver().getOpts();
// If no architecture is bound, none of the translations here are relevant.
if (BoundArch.empty())
@@ -2570,7 +3049,7 @@ Darwin::TranslateArgs(const DerivedArgList &Args, StringRef BoundArch,
// FIXME: It would be far better to avoid inserting those -static arguments,
// but we can't check the deployment target in the translation code until
// it is set here.
- if (isTargetWatchOSBased() ||
+ if (isTargetWatchOSBased() || isTargetDriverKit() || isTargetXROS() ||
(isTargetIOSBased() && !isIPhoneOSVersionLT(6, 0))) {
for (ArgList::iterator it = DAL->begin(), ie = DAL->end(); it != ie; ) {
Arg *A = *it;
@@ -2587,26 +3066,6 @@ Darwin::TranslateArgs(const DerivedArgList &Args, StringRef BoundArch,
}
}
- if (!Args.getLastArg(options::OPT_stdlib_EQ) &&
- GetCXXStdlibType(Args) == ToolChain::CST_Libcxx)
- DAL->AddJoinedArg(nullptr, Opts.getOption(options::OPT_stdlib_EQ),
- "libc++");
-
- // Validate the C++ standard library choice.
- CXXStdlibType Type = GetCXXStdlibType(*DAL);
- if (Type == ToolChain::CST_Libcxx) {
- // Check whether the target provides libc++.
- StringRef where;
-
- // Complain about targeting iOS < 5.0 in any way.
- if (isTargetIOSBased() && isIPhoneOSVersionLT(5, 0))
- where = "iOS 5.0";
-
- if (where != StringRef()) {
- getDriver().Diag(clang::diag::err_drv_invalid_libcxx_deployment) << where;
- }
- }
-
auto Arch = tools::darwin::getArchTypeForMachOArchName(BoundArch);
if ((Arch == llvm::Triple::arm || Arch == llvm::Triple::thumb)) {
if (Args.hasFlag(options::OPT_fomit_frame_pointer,
@@ -2618,13 +3077,19 @@ Darwin::TranslateArgs(const DerivedArgList &Args, StringRef BoundArch,
return DAL;
}
-bool MachO::IsUnwindTablesDefault(const ArgList &Args) const {
+ToolChain::UnwindTableLevel MachO::getDefaultUnwindTableLevel(const ArgList &Args) const {
// Unwind tables are not emitted if -fno-exceptions is supplied (except when
// targeting x86_64).
- return getArch() == llvm::Triple::x86_64 ||
- (GetExceptionModel(Args) != llvm::ExceptionHandling::SjLj &&
- Args.hasFlag(options::OPT_fexceptions, options::OPT_fno_exceptions,
- true));
+ if (getArch() == llvm::Triple::x86_64 ||
+ (GetExceptionModel(Args) != llvm::ExceptionHandling::SjLj &&
+ Args.hasFlag(options::OPT_fexceptions, options::OPT_fno_exceptions,
+ true)))
+ return (getArch() == llvm::Triple::aarch64 ||
+ getArch() == llvm::Triple::aarch64_32)
+ ? UnwindTableLevel::Synchronous
+ : UnwindTableLevel::Asynchronous;
+
+ return UnwindTableLevel::None;
}
bool MachO::UseDwarfDebugFlags() const {
@@ -2633,6 +3098,12 @@ bool MachO::UseDwarfDebugFlags() const {
return false;
}
+std::string MachO::GetGlobalDebugPathRemapping() const {
+ if (const char *S = ::getenv("RC_DEBUG_PREFIX_MAP"))
+ return S;
+ return {};
+}
+
llvm::ExceptionHandling Darwin::GetExceptionModel(const ArgList &Args) const {
// Darwin uses SjLj exceptions on ARM.
if (getTriple().getArch() != llvm::Triple::arm &&
@@ -2656,7 +3127,7 @@ bool Darwin::SupportsEmbeddedBitcode() const {
bool MachO::isPICDefault() const { return true; }
-bool MachO::isPIEDefault() const { return false; }
+bool MachO::isPIEDefault(const llvm::opt::ArgList &Args) const { return false; }
bool MachO::isPICDefaultForced() const {
return (getArch() == llvm::Triple::x86_64 ||
@@ -2672,6 +3143,8 @@ void Darwin::addMinVersionArgs(const ArgList &Args,
ArgStringList &CmdArgs) const {
VersionTuple TargetVersion = getTripleTargetVersion();
+ assert(!isTargetXROS() && "xrOS always uses -platform-version");
+
if (isTargetWatchOS())
CmdArgs.push_back("-watchos_version_min");
else if (isTargetWatchOSSimulator())
@@ -2680,6 +3153,8 @@ void Darwin::addMinVersionArgs(const ArgList &Args,
CmdArgs.push_back("-tvos_version_min");
else if (isTargetTvOSSimulator())
CmdArgs.push_back("-tvos_simulator_version_min");
+ else if (isTargetDriverKit())
+ CmdArgs.push_back("-driverkit_version_min");
else if (isTargetIOSSimulator())
CmdArgs.push_back("-ios_simulator_version_min");
else if (isTargetIOSBased())
@@ -2695,6 +3170,25 @@ void Darwin::addMinVersionArgs(const ArgList &Args,
if (!MinTgtVers.empty() && MinTgtVers > TargetVersion)
TargetVersion = MinTgtVers;
CmdArgs.push_back(Args.MakeArgString(TargetVersion.getAsString()));
+ if (TargetVariantTriple) {
+ assert(isTargetMacOSBased() && "unexpected target");
+ VersionTuple VariantTargetVersion;
+ if (TargetVariantTriple->isMacOSX()) {
+ CmdArgs.push_back("-macosx_version_min");
+ TargetVariantTriple->getMacOSXVersion(VariantTargetVersion);
+ } else {
+ assert(TargetVariantTriple->isiOS() &&
+ TargetVariantTriple->isMacCatalystEnvironment() &&
+ "unexpected target variant triple");
+ CmdArgs.push_back("-maccatalyst_version_min");
+ VariantTargetVersion = TargetVariantTriple->getiOSVersion();
+ }
+ VersionTuple MinTgtVers =
+ TargetVariantTriple->getMinimumSupportedOSVersion();
+ if (MinTgtVers.getMajor() && MinTgtVers > VariantTargetVersion)
+ VariantTargetVersion = MinTgtVers;
+ CmdArgs.push_back(Args.MakeArgString(VariantTargetVersion.getAsString()));
+ }
}
static const char *getPlatformName(Darwin::DarwinPlatformKind Platform,
@@ -2710,58 +3204,100 @@ static const char *getPlatformName(Darwin::DarwinPlatformKind Platform,
return "tvos";
case Darwin::WatchOS:
return "watchos";
+ case Darwin::XROS:
+ return "xros";
+ case Darwin::DriverKit:
+ return "driverkit";
}
llvm_unreachable("invalid platform");
}
void Darwin::addPlatformVersionArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const {
- // -platform_version <platform> <target_version> <sdk_version>
- // Both the target and SDK version support only up to 3 components.
- CmdArgs.push_back("-platform_version");
- std::string PlatformName = getPlatformName(TargetPlatform, TargetEnvironment);
- if (TargetEnvironment == Darwin::Simulator)
- PlatformName += "-simulator";
- CmdArgs.push_back(Args.MakeArgString(PlatformName));
- VersionTuple TargetVersion = getTripleTargetVersion().withoutBuild();
- VersionTuple MinTgtVers = getEffectiveTriple().getMinimumSupportedOSVersion();
- if (!MinTgtVers.empty() && MinTgtVers > TargetVersion)
- TargetVersion = MinTgtVers;
- CmdArgs.push_back(Args.MakeArgString(TargetVersion.getAsString()));
+ auto EmitPlatformVersionArg =
+ [&](const VersionTuple &TV, Darwin::DarwinPlatformKind TargetPlatform,
+ Darwin::DarwinEnvironmentKind TargetEnvironment,
+ const llvm::Triple &TT) {
+ // -platform_version <platform> <target_version> <sdk_version>
+ // Both the target and SDK version support only up to 3 components.
+ CmdArgs.push_back("-platform_version");
+ std::string PlatformName =
+ getPlatformName(TargetPlatform, TargetEnvironment);
+ if (TargetEnvironment == Darwin::Simulator)
+ PlatformName += "-simulator";
+ CmdArgs.push_back(Args.MakeArgString(PlatformName));
+ VersionTuple TargetVersion = TV.withoutBuild();
+ if ((TargetPlatform == Darwin::IPhoneOS ||
+ TargetPlatform == Darwin::TvOS) &&
+ getTriple().getArchName() == "arm64e" &&
+ TargetVersion.getMajor() < 14) {
+ // arm64e slice is supported on iOS/tvOS 14+ only.
+ TargetVersion = VersionTuple(14, 0);
+ }
+ VersionTuple MinTgtVers = TT.getMinimumSupportedOSVersion();
+ if (!MinTgtVers.empty() && MinTgtVers > TargetVersion)
+ TargetVersion = MinTgtVers;
+ CmdArgs.push_back(Args.MakeArgString(TargetVersion.getAsString()));
+
+ if (TargetPlatform == IPhoneOS && TargetEnvironment == MacCatalyst) {
+ // Mac Catalyst programs must use the appropriate iOS SDK version
+ // that corresponds to the macOS SDK version used for the compilation.
+ std::optional<VersionTuple> iOSSDKVersion;
+ if (SDKInfo) {
+ if (const auto *MacOStoMacCatalystMapping =
+ SDKInfo->getVersionMapping(
+ DarwinSDKInfo::OSEnvPair::macOStoMacCatalystPair())) {
+ iOSSDKVersion = MacOStoMacCatalystMapping->map(
+ SDKInfo->getVersion().withoutBuild(),
+ minimumMacCatalystDeploymentTarget(), std::nullopt);
+ }
+ }
+ CmdArgs.push_back(Args.MakeArgString(
+ (iOSSDKVersion ? *iOSSDKVersion
+ : minimumMacCatalystDeploymentTarget())
+ .getAsString()));
+ return;
+ }
- if (isTargetMacCatalyst()) {
- // Mac Catalyst programs must use the appropriate iOS SDK version
- // that corresponds to the macOS SDK version used for the compilation.
- Optional<VersionTuple> iOSSDKVersion;
- if (SDKInfo) {
- if (const auto *MacOStoMacCatalystMapping = SDKInfo->getVersionMapping(
- DarwinSDKInfo::OSEnvPair::macOStoMacCatalystPair())) {
- iOSSDKVersion = MacOStoMacCatalystMapping->map(
- SDKInfo->getVersion().withoutBuild(),
- minimumMacCatalystDeploymentTarget(), None);
- }
- }
- CmdArgs.push_back(Args.MakeArgString(
- (iOSSDKVersion ? *iOSSDKVersion : minimumMacCatalystDeploymentTarget())
- .getAsString()));
+ if (SDKInfo) {
+ VersionTuple SDKVersion = SDKInfo->getVersion().withoutBuild();
+ if (!SDKVersion.getMinor())
+ SDKVersion = VersionTuple(SDKVersion.getMajor(), 0);
+ CmdArgs.push_back(Args.MakeArgString(SDKVersion.getAsString()));
+ } else {
+ // Use an SDK version that's matching the deployment target if the SDK
+ // version is missing. This is preferred over an empty SDK version
+ // (0.0.0) as the system's runtime might expect the linked binary to
+ // contain a valid SDK version in order for the binary to work
+ // correctly. It's reasonable to use the deployment target version as
+ // a proxy for the SDK version because older SDKs don't guarantee
+ // support for deployment targets newer than the SDK versions, so that
+ // rules out using some predetermined older SDK version, which leaves
+ // the deployment target version as the only reasonable choice.
+ CmdArgs.push_back(Args.MakeArgString(TargetVersion.getAsString()));
+ }
+ };
+ EmitPlatformVersionArg(getTripleTargetVersion(), TargetPlatform,
+ TargetEnvironment, getEffectiveTriple());
+ if (!TargetVariantTriple)
return;
- }
-
- if (SDKInfo) {
- VersionTuple SDKVersion = SDKInfo->getVersion().withoutBuild();
- CmdArgs.push_back(Args.MakeArgString(SDKVersion.getAsString()));
+ Darwin::DarwinPlatformKind Platform;
+ Darwin::DarwinEnvironmentKind Environment;
+ VersionTuple TargetVariantVersion;
+ if (TargetVariantTriple->isMacOSX()) {
+ TargetVariantTriple->getMacOSXVersion(TargetVariantVersion);
+ Platform = Darwin::MacOS;
+ Environment = Darwin::NativeEnvironment;
} else {
- // Use an SDK version that's matching the deployment target if the SDK
- // version is missing. This is preferred over an empty SDK version (0.0.0)
- // as the system's runtime might expect the linked binary to contain a
- // valid SDK version in order for the binary to work correctly. It's
- // reasonable to use the deployment target version as a proxy for the
- // SDK version because older SDKs don't guarantee support for deployment
- // targets newer than the SDK versions, so that rules out using some
- // predetermined older SDK version, which leaves the deployment target
- // version as the only reasonable choice.
- CmdArgs.push_back(Args.MakeArgString(TargetVersion.getAsString()));
+ assert(TargetVariantTriple->isiOS() &&
+ TargetVariantTriple->isMacCatalystEnvironment() &&
+ "unexpected target variant triple");
+ TargetVariantVersion = TargetVariantTriple->getiOSVersion();
+ Platform = Darwin::IPhoneOS;
+ Environment = Darwin::MacCatalyst;
}
+ EmitPlatformVersionArg(TargetVariantVersion, Platform, Environment,
+ *TargetVariantTriple);
}
// Add additional link args for the -dynamiclib option.
@@ -2866,7 +3402,7 @@ void Darwin::addStartObjectFileArgs(const ArgList &Args,
}
void Darwin::CheckObjCARC() const {
- if (isTargetIOSBased() || isTargetWatchOSBased() ||
+ if (isTargetIOSBased() || isTargetWatchOSBased() || isTargetXROS() ||
(isTargetMacOSBased() && !isMacosxVersionLT(10, 6)))
return;
getDriver().Diag(diag::err_arc_unsupported_on_toolchain);
@@ -2882,7 +3418,6 @@ SanitizerMask Darwin::getSupportedSanitizers() const {
Res |= SanitizerKind::Leak;
Res |= SanitizerKind::Fuzzer;
Res |= SanitizerKind::FuzzerNoLink;
- Res |= SanitizerKind::Function;
Res |= SanitizerKind::ObjCCast;
// Prior to 10.9, macOS shipped a version of the C++ standard library without
@@ -2892,16 +3427,15 @@ SanitizerMask Darwin::getSupportedSanitizers() const {
!(isTargetIPhoneOS() && isIPhoneOSVersionLT(5, 0)))
Res |= SanitizerKind::Vptr;
- if ((IsX86_64 || IsAArch64) && isTargetMacOSBased()) {
+ if ((IsX86_64 || IsAArch64) &&
+ (isTargetMacOSBased() || isTargetIOSSimulator() ||
+ isTargetTvOSSimulator() || isTargetWatchOSSimulator())) {
Res |= SanitizerKind::Thread;
- } else if (isTargetIOSSimulator() || isTargetTvOSSimulator()) {
- if (IsX86_64)
- Res |= SanitizerKind::Thread;
}
return Res;
}
void Darwin::printVerboseInfo(raw_ostream &OS) const {
- CudaInstallation.print(OS);
- RocmInstallation.print(OS);
+ CudaInstallation->print(OS);
+ RocmInstallation->print(OS);
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.h
index 4de122c8d513..5e60b0841d6d 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.h
@@ -10,6 +10,7 @@
#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_DARWIN_H
#include "Cuda.h"
+#include "LazyDetector.h"
#include "ROCm.h"
#include "clang/Basic/DarwinSDKInfo.h"
#include "clang/Basic/LangOptions.h"
@@ -28,7 +29,8 @@ namespace tools {
namespace darwin {
llvm::Triple::ArchType getArchTypeForMachOArchName(StringRef Str);
-void setTripleTypeForMachOArchName(llvm::Triple &T, StringRef Str);
+void setTripleTypeForMachOArchName(llvm::Triple &T, StringRef Str,
+ const llvm::opt::ArgList &Args);
class LLVM_LIBRARY_VISIBILITY MachOTool : public Tool {
virtual void anchor();
@@ -63,8 +65,8 @@ class LLVM_LIBRARY_VISIBILITY Linker : public MachOTool {
bool NeedsTempPath(const InputInfoList &Inputs) const;
void AddLinkArgs(Compilation &C, const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs,
- const InputInfoList &Inputs, unsigned Version[5],
- bool LinkerIsLLD, bool LinkerIsLLDDarwinNew) const;
+ const InputInfoList &Inputs, VersionTuple Version,
+ bool LinkerIsLLD, bool UsePlatformVersion) const;
public:
Linker(const ToolChain &TC) : MachOTool("darwin::Linker", "linker", TC) {}
@@ -78,6 +80,20 @@ public:
const char *LinkingOutput) const override;
};
+class LLVM_LIBRARY_VISIBILITY StaticLibTool : public MachOTool {
+public:
+ StaticLibTool(const ToolChain &TC)
+ : MachOTool("darwin::StaticLibTool", "static-lib-linker", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+ bool isLinkJob() const override { return true; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+
class LLVM_LIBRARY_VISIBILITY Lipo : public MachOTool {
public:
Lipo(const ToolChain &TC) : MachOTool("darwin::Lipo", "lipo", TC) {}
@@ -125,6 +141,7 @@ class LLVM_LIBRARY_VISIBILITY MachO : public ToolChain {
protected:
Tool *buildAssembler() const override;
Tool *buildLinker() const override;
+ Tool *buildStaticLibTool() const override;
Tool *getTool(Action::ActionClass AC) const override;
private:
@@ -132,6 +149,9 @@ private:
mutable std::unique_ptr<tools::darwin::Dsymutil> Dsymutil;
mutable std::unique_ptr<tools::darwin::VerifyDebug> VerifyDebug;
+ /// The version of the linker known to be available in the tool chain.
+ mutable std::optional<VersionTuple> LinkerVersion;
+
public:
MachO(const Driver &D, const llvm::Triple &Triple,
const llvm::opt::ArgList &Args);
@@ -144,6 +164,10 @@ public:
/// example, Apple treats different ARM variations as distinct architectures.
StringRef getMachOArchName(const llvm::opt::ArgList &Args) const;
+ /// Get the version of the linker known to be available for a particular
+ /// compiler invocation (via the `-mlinker-version=` arg).
+ VersionTuple getLinkerVersion(const llvm::opt::ArgList &Args) const;
+
/// Add the linker arguments to link the ARC runtime library.
virtual void AddLinkARCArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const {}
@@ -216,10 +240,6 @@ public:
// expected to use /usr/include/Block.h.
return true;
}
- bool IsIntegratedAssemblerDefault() const override {
- // Default integrated assembler to on for Apple's MachO targets.
- return true;
- }
bool IsMathErrnoDefault() const override { return false; }
@@ -232,19 +252,21 @@ public:
bool UseObjCMixedDispatch() const override { return true; }
- bool IsUnwindTablesDefault(const llvm::opt::ArgList &Args) const override;
+ UnwindTableLevel
+ getDefaultUnwindTableLevel(const llvm::opt::ArgList &Args) const override;
RuntimeLibType GetDefaultRuntimeLibType() const override {
return ToolChain::RLT_CompilerRT;
}
bool isPICDefault() const override;
- bool isPIEDefault() const override;
+ bool isPIEDefault(const llvm::opt::ArgList &Args) const override;
bool isPICDefaultForced() const override;
bool SupportsProfiling() const override;
bool UseDwarfDebugFlags() const override;
+ std::string GetGlobalDebugPathRemapping() const override;
llvm::ExceptionHandling
GetExceptionModel(const llvm::opt::ArgList &Args) const override {
@@ -276,7 +298,9 @@ public:
IPhoneOS,
TvOS,
WatchOS,
- LastDarwinPlatform = WatchOS
+ DriverKit,
+ XROS,
+ LastDarwinPlatform = DriverKit
};
enum DarwinEnvironmentKind {
NativeEnvironment,
@@ -293,10 +317,13 @@ public:
mutable VersionTuple OSTargetVersion;
/// The information about the darwin SDK that was used.
- mutable Optional<DarwinSDKInfo> SDKInfo;
+ mutable std::optional<DarwinSDKInfo> SDKInfo;
+
+ /// The target variant triple that was specified (if any).
+ mutable std::optional<llvm::Triple> TargetVariantTriple;
- CudaInstallationDetector CudaInstallation;
- RocmInstallationDetector RocmInstallation;
+ LazyDetector<CudaInstallationDetector> CudaInstallation;
+ LazyDetector<RocmInstallationDetector> RocmInstallation;
private:
void AddDeploymentTarget(llvm::opt::DerivedArgList &Args) const;
@@ -323,7 +350,7 @@ public:
bool isKernelStatic() const override {
return (!(isTargetIPhoneOS() && !isIPhoneOSVersionLT(6, 0)) &&
- !isTargetWatchOS());
+ !isTargetWatchOS() && !isTargetDriverKit());
}
void addProfileRTLibs(const llvm::opt::ArgList &Args,
@@ -379,6 +406,16 @@ public:
return isTargetIPhoneOS() || isTargetIOSSimulator();
}
+ bool isTargetXROSDevice() const {
+ return TargetPlatform == XROS && TargetEnvironment == NativeEnvironment;
+ }
+
+ bool isTargetXROSSimulator() const {
+ return TargetPlatform == XROS && TargetEnvironment == Simulator;
+ }
+
+ bool isTargetXROS() const { return TargetPlatform == XROS; }
+
bool isTargetTvOS() const {
assert(TargetInitialized && "Target not initialized!");
return TargetPlatform == TvOS && TargetEnvironment == NativeEnvironment;
@@ -409,6 +446,11 @@ public:
return TargetPlatform == WatchOS;
}
+ bool isTargetDriverKit() const {
+ assert(TargetInitialized && "Target not initialized!");
+ return TargetPlatform == DriverKit;
+ }
+
bool isTargetMacCatalyst() const {
return TargetPlatform == IPhoneOS && TargetEnvironment == MacCatalyst;
}
@@ -473,6 +515,10 @@ protected:
llvm::opt::ArgStringList &CC1Args,
Action::OffloadKind DeviceOffloadKind) const override;
+ void addClangCC1ASTargetOptions(
+ const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CC1ASArgs) const override;
+
StringRef getPlatformFamily() const;
StringRef getOSLibraryNameSuffix(bool IgnoreSim = false) const override;
@@ -512,7 +558,8 @@ public:
GetDefaultStackProtectorLevel(bool KernelOrKext) const override {
// Stack protectors default to on for user code on 10.5,
// and for everything in 10.6 and beyond
- if (isTargetIOSBased() || isTargetWatchOSBased())
+ if (isTargetIOSBased() || isTargetWatchOSBased() || isTargetDriverKit() ||
+ isTargetXROS())
return LangOptions::SSPOn;
else if (isTargetMacOSBased() && !isMacosxVersionLT(10, 6))
return LangOptions::SSPOn;
@@ -590,7 +637,8 @@ private:
llvm::StringRef ArchDir,
llvm::StringRef BitDir) const;
- llvm::StringRef GetHeaderSysroot(const llvm::opt::ArgList &DriverArgs) const;
+ llvm::SmallString<128>
+ GetEffectiveSysroot(const llvm::opt::ArgList &DriverArgs) const;
};
} // end namespace toolchains
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/DragonFly.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/DragonFly.cpp
index 9568b47e89e6..9942fc632e0a 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/DragonFly.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/DragonFly.cpp
@@ -12,6 +12,7 @@
#include "clang/Driver/Driver.h"
#include "clang/Driver/Options.h"
#include "llvm/Option/ArgList.h"
+#include "llvm/Support/Path.h"
using namespace clang::driver;
using namespace clang::driver::tools;
@@ -19,21 +20,19 @@ using namespace clang::driver::toolchains;
using namespace clang;
using namespace llvm::opt;
-/// DragonFly Tools
-
-// For now, DragonFly Assemble does just about the same as for
-// FreeBSD, but this may change soon.
void dragonfly::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
const InputInfoList &Inputs,
const ArgList &Args,
const char *LinkingOutput) const {
- claimNoWarnArgs(Args);
+ const auto &ToolChain = static_cast<const DragonFly &>(getToolChain());
ArgStringList CmdArgs;
+ claimNoWarnArgs(Args);
+
// When building 32-bit code on DragonFly/pc64, we have to explicitly
// instruct as in the base system to assemble 32-bit code.
- if (getToolChain().getArch() == llvm::Triple::x86)
+ if (ToolChain.getArch() == llvm::Triple::x86)
CmdArgs.push_back("--32");
Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler);
@@ -44,7 +43,7 @@ void dragonfly::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
for (const auto &II : Inputs)
CmdArgs.push_back(II.getFilename());
- const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
+ const char *Exec = Args.MakeArgString(ToolChain.GetProgramPath("as"));
C.addCommand(std::make_unique<Command>(JA, *this,
ResponseFileSupport::AtFileCurCP(),
Exec, CmdArgs, Inputs, Output));
@@ -55,21 +54,27 @@ void dragonfly::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfoList &Inputs,
const ArgList &Args,
const char *LinkingOutput) const {
- const Driver &D = getToolChain().getDriver();
+ const auto &ToolChain = static_cast<const DragonFly &>(getToolChain());
+ const Driver &D = ToolChain.getDriver();
+ const llvm::Triple::ArchType Arch = ToolChain.getArch();
+ const bool Static = Args.hasArg(options::OPT_static);
+ const bool Shared = Args.hasArg(options::OPT_shared);
+ const bool Profiling = Args.hasArg(options::OPT_pg);
+ const bool Pie = Args.hasArg(options::OPT_pie);
ArgStringList CmdArgs;
if (!D.SysRoot.empty())
CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot));
CmdArgs.push_back("--eh-frame-hdr");
- if (Args.hasArg(options::OPT_static)) {
+ if (Static) {
CmdArgs.push_back("-Bstatic");
} else {
if (Args.hasArg(options::OPT_rdynamic))
CmdArgs.push_back("-export-dynamic");
- if (Args.hasArg(options::OPT_shared))
- CmdArgs.push_back("-Bshareable");
- else {
+ if (Shared)
+ CmdArgs.push_back("-shared");
+ else if (!Args.hasArg(options::OPT_r)) {
CmdArgs.push_back("-dynamic-linker");
CmdArgs.push_back("/usr/libexec/ld-elf.so.2");
}
@@ -79,75 +84,92 @@ void dragonfly::Linker::ConstructJob(Compilation &C, const JobAction &JA,
// When building 32-bit code on DragonFly/pc64, we have to explicitly
// instruct ld in the base system to link 32-bit code.
- if (getToolChain().getArch() == llvm::Triple::x86) {
+ if (Arch == llvm::Triple::x86) {
CmdArgs.push_back("-m");
CmdArgs.push_back("elf_i386");
}
+ assert((Output.isFilename() || Output.isNothing()) && "Invalid output.");
if (Output.isFilename()) {
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
- } else {
- assert(Output.isNothing() && "Invalid output.");
}
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
- if (!Args.hasArg(options::OPT_shared)) {
- if (Args.hasArg(options::OPT_pg))
- CmdArgs.push_back(
- Args.MakeArgString(getToolChain().GetFilePath("gcrt1.o")));
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles,
+ options::OPT_r)) {
+ const char *crt1 = nullptr;
+ const char *crtbegin = nullptr;
+ if (!Shared) {
+ if (Profiling)
+ crt1 = "gcrt1.o";
else {
- if (Args.hasArg(options::OPT_pie))
- CmdArgs.push_back(
- Args.MakeArgString(getToolChain().GetFilePath("Scrt1.o")));
+ if (Pie)
+ crt1 = "Scrt1.o";
else
- CmdArgs.push_back(
- Args.MakeArgString(getToolChain().GetFilePath("crt1.o")));
+ crt1 = "crt1.o";
}
}
- CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath("crti.o")));
- if (Args.hasArg(options::OPT_shared) || Args.hasArg(options::OPT_pie))
- CmdArgs.push_back(
- Args.MakeArgString(getToolChain().GetFilePath("crtbeginS.o")));
+
+ if (Shared || Pie)
+ crtbegin = "crtbeginS.o";
else
- CmdArgs.push_back(
- Args.MakeArgString(getToolChain().GetFilePath("crtbegin.o")));
- }
+ crtbegin = "crtbegin.o";
- Args.AddAllArgs(CmdArgs,
- {options::OPT_L, options::OPT_T_Group, options::OPT_e});
+ if (crt1)
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crt1)));
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crti.o")));
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtbegin)));
+ }
- AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs, JA);
+ Args.addAllArgs(CmdArgs, {options::OPT_L, options::OPT_T_Group,
+ options::OPT_s, options::OPT_t, options::OPT_r});
+ ToolChain.AddFilePathLibArgs(Args, CmdArgs);
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
- CmdArgs.push_back("-L/usr/lib/gcc80");
+ AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
- if (!Args.hasArg(options::OPT_static)) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs,
+ options::OPT_r)) {
+ if (!Static) {
CmdArgs.push_back("-rpath");
CmdArgs.push_back("/usr/lib/gcc80");
}
+ // Use the static OpenMP runtime with -static-openmp
+ bool StaticOpenMP = Args.hasArg(options::OPT_static_openmp) && !Static;
+ addOpenMPRuntime(CmdArgs, ToolChain, Args, StaticOpenMP);
+
if (D.CCCIsCXX()) {
- if (getToolChain().ShouldLinkCXXStdlib(Args))
- getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs);
+ if (ToolChain.ShouldLinkCXXStdlib(Args))
+ ToolChain.AddCXXStdlibLibArgs(Args, CmdArgs);
+ CmdArgs.push_back("-lm");
+ }
+
+ // Silence warnings when linking C code with a C++ '-stdlib' argument.
+ Args.ClaimAllArgs(options::OPT_stdlib_EQ);
+
+ // Additional linker set-up and flags for Fortran. This is required in order
+ // to generate executables. As Fortran runtime depends on the C runtime,
+ // these dependencies need to be listed before the C runtime below (i.e.
+ // AddRunTimeLibs).
+ if (D.IsFlangMode()) {
+ addFortranRuntimeLibraryPath(ToolChain, Args, CmdArgs);
+ addFortranRuntimeLibs(ToolChain, Args, CmdArgs);
CmdArgs.push_back("-lm");
}
if (Args.hasArg(options::OPT_pthread))
CmdArgs.push_back("-lpthread");
- if (!Args.hasArg(options::OPT_nolibc)) {
+ if (!Args.hasArg(options::OPT_nolibc))
CmdArgs.push_back("-lc");
- }
- if (Args.hasArg(options::OPT_static) ||
- Args.hasArg(options::OPT_static_libgcc)) {
+ if (Static || Args.hasArg(options::OPT_static_libgcc)) {
CmdArgs.push_back("-lgcc");
CmdArgs.push_back("-lgcc_eh");
} else {
if (Args.hasArg(options::OPT_shared_libgcc)) {
CmdArgs.push_back("-lgcc_pic");
- if (!Args.hasArg(options::OPT_shared))
+ if (!Shared)
CmdArgs.push_back("-lgcc");
} else {
CmdArgs.push_back("-lgcc");
@@ -158,19 +180,21 @@ void dragonfly::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
}
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
- if (Args.hasArg(options::OPT_shared) || Args.hasArg(options::OPT_pie))
- CmdArgs.push_back(
- Args.MakeArgString(getToolChain().GetFilePath("crtendS.o")));
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles,
+ options::OPT_r)) {
+ const char *crtend = nullptr;
+ if (Shared || Pie)
+ crtend ="crtendS.o";
else
- CmdArgs.push_back(
- Args.MakeArgString(getToolChain().GetFilePath("crtend.o")));
- CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath("crtn.o")));
+ crtend = "crtend.o";
+
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtend)));
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtn.o")));
}
- getToolChain().addProfileRTLibs(Args, CmdArgs);
+ ToolChain.addProfileRTLibs(Args, CmdArgs);
- const char *Exec = Args.MakeArgString(getToolChain().GetLinkerPath());
+ const char *Exec = Args.MakeArgString(ToolChain.GetLinkerPath());
C.addCommand(std::make_unique<Command>(JA, *this,
ResponseFileSupport::AtFileCurCP(),
Exec, CmdArgs, Inputs, Output));
@@ -188,8 +212,35 @@ DragonFly::DragonFly(const Driver &D, const llvm::Triple &Triple,
getProgramPaths().push_back(getDriver().Dir);
getFilePaths().push_back(getDriver().Dir + "/../lib");
- getFilePaths().push_back("/usr/lib");
- getFilePaths().push_back("/usr/lib/gcc80");
+ getFilePaths().push_back(concat(getDriver().SysRoot, "/usr/lib"));
+ getFilePaths().push_back(concat(getDriver().SysRoot, "/usr/lib/gcc80"));
+}
+
+void DragonFly::AddClangSystemIncludeArgs(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const {
+ const Driver &D = getDriver();
+
+ if (DriverArgs.hasArg(clang::driver::options::OPT_nostdinc))
+ return;
+
+ if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) {
+ SmallString<128> Dir(D.ResourceDir);
+ llvm::sys::path::append(Dir, "include");
+ addSystemInclude(DriverArgs, CC1Args, Dir.str());
+ }
+
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc))
+ return;
+
+ addExternCSystemInclude(DriverArgs, CC1Args,
+ concat(D.SysRoot, "/usr/include"));
+}
+
+void DragonFly::addLibStdCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const {
+ addLibStdCXXIncludePaths(concat(getDriver().SysRoot, "/usr/include/c++/8.0"), "", "",
+ DriverArgs, CC1Args);
}
Tool *DragonFly::buildAssembler() const {
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/DragonFly.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/DragonFly.h
index 3ed5acefaefb..715f6b45519b 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/DragonFly.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/DragonFly.h
@@ -16,9 +16,10 @@
namespace clang {
namespace driver {
namespace tools {
-/// dragonfly -- Directly call GNU Binutils assembler and linker
+
+/// Directly call GNU Binutils assembler and linker
namespace dragonfly {
-class LLVM_LIBRARY_VISIBILITY Assembler : public Tool {
+class LLVM_LIBRARY_VISIBILITY Assembler final : public Tool {
public:
Assembler(const ToolChain &TC)
: Tool("dragonfly::Assembler", "assembler", TC) {}
@@ -31,7 +32,7 @@ public:
const char *LinkingOutput) const override;
};
-class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
+class LLVM_LIBRARY_VISIBILITY Linker final : public Tool {
public:
Linker(const ToolChain &TC) : Tool("dragonfly::Linker", "linker", TC) {}
@@ -55,6 +56,13 @@ public:
bool IsMathErrnoDefault() const override { return false; }
+ void
+ AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+ void addLibStdCxxIncludePaths(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+
protected:
Tool *buildAssembler() const override;
Tool *buildLinker() const override;
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Flang.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Flang.cpp
index 1bfad6115d51..03d68c3df7fb 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Flang.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Flang.cpp
@@ -6,11 +6,17 @@
//
//===----------------------------------------------------------------------===//
-
#include "Flang.h"
+#include "Arch/RISCV.h"
#include "CommonArgs.h"
+#include "clang/Basic/CodeGenOptions.h"
#include "clang/Driver/Options.h"
+#include "llvm/Frontend/Debug/Options.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/RISCVISAInfo.h"
+#include "llvm/TargetParser/RISCVTargetParser.h"
#include <cassert>
@@ -19,54 +25,653 @@ using namespace clang::driver::tools;
using namespace clang;
using namespace llvm::opt;
-void Flang::AddFortranDialectOptions(const ArgList &Args,
+/// Add -x lang to \p CmdArgs for \p Input.
+static void addDashXForInput(const ArgList &Args, const InputInfo &Input,
+ ArgStringList &CmdArgs) {
+ CmdArgs.push_back("-x");
+ // Map the driver type to the frontend type.
+ CmdArgs.push_back(types::getTypeName(Input.getType()));
+}
+
+void Flang::addFortranDialectOptions(const ArgList &Args,
ArgStringList &CmdArgs) const {
- Args.AddAllArgs(
- CmdArgs, {options::OPT_ffixed_form, options::OPT_ffree_form,
- options::OPT_ffixed_line_length_EQ, options::OPT_fopenmp,
- options::OPT_fopenacc, options::OPT_finput_charset_EQ,
- options::OPT_fimplicit_none, options::OPT_fno_implicit_none,
- options::OPT_fbackslash, options::OPT_fno_backslash,
- options::OPT_flogical_abbreviations,
- options::OPT_fno_logical_abbreviations,
- options::OPT_fxor_operator, options::OPT_fno_xor_operator,
- options::OPT_falternative_parameter_statement,
- options::OPT_fdefault_real_8, options::OPT_fdefault_integer_8,
- options::OPT_fdefault_double_8, options::OPT_flarge_sizes});
-}
-
-void Flang::AddPreprocessingOptions(const ArgList &Args,
+ Args.addAllArgs(CmdArgs, {options::OPT_ffixed_form,
+ options::OPT_ffree_form,
+ options::OPT_ffixed_line_length_EQ,
+ options::OPT_fopenmp,
+ options::OPT_fopenmp_version_EQ,
+ options::OPT_fopenacc,
+ options::OPT_finput_charset_EQ,
+ options::OPT_fimplicit_none,
+ options::OPT_fno_implicit_none,
+ options::OPT_fbackslash,
+ options::OPT_fno_backslash,
+ options::OPT_flogical_abbreviations,
+ options::OPT_fno_logical_abbreviations,
+ options::OPT_fxor_operator,
+ options::OPT_fno_xor_operator,
+ options::OPT_falternative_parameter_statement,
+ options::OPT_fdefault_real_8,
+ options::OPT_fdefault_integer_8,
+ options::OPT_fdefault_double_8,
+ options::OPT_flarge_sizes,
+ options::OPT_fno_automatic});
+}
+
+void Flang::addPreprocessingOptions(const ArgList &Args,
ArgStringList &CmdArgs) const {
- Args.AddAllArgs(CmdArgs, {options::OPT_D, options::OPT_U, options::OPT_I,
- options::OPT_cpp, options::OPT_nocpp});
+ Args.addAllArgs(CmdArgs,
+ {options::OPT_P, options::OPT_D, options::OPT_U,
+ options::OPT_I, options::OPT_cpp, options::OPT_nocpp});
+}
+
+/// @C shouldLoopVersion
+///
+/// Check if Loop Versioning should be enabled.
+/// We look for the last of one of the following:
+/// -Ofast, -O4, -O<number> and -f[no-]version-loops-for-stride.
+/// Loop versioning is disabled if the last option is
+/// -fno-version-loops-for-stride.
+/// Loop versioning is enabled if the last option is one of:
+/// -floop-versioning
+/// -Ofast
+/// -O4
+/// -O3
+/// For all other cases, loop versioning is is disabled.
+///
+/// The gfortran compiler automatically enables the option for -O3 or -Ofast.
+///
+/// @return true if loop-versioning should be enabled, otherwise false.
+static bool shouldLoopVersion(const ArgList &Args) {
+ const Arg *LoopVersioningArg = Args.getLastArg(
+ options::OPT_Ofast, options::OPT_O, options::OPT_O4,
+ options::OPT_floop_versioning, options::OPT_fno_loop_versioning);
+ if (!LoopVersioningArg)
+ return false;
+
+ if (LoopVersioningArg->getOption().matches(options::OPT_fno_loop_versioning))
+ return false;
+
+ if (LoopVersioningArg->getOption().matches(options::OPT_floop_versioning))
+ return true;
+
+ if (LoopVersioningArg->getOption().matches(options::OPT_Ofast) ||
+ LoopVersioningArg->getOption().matches(options::OPT_O4))
+ return true;
+
+ if (LoopVersioningArg->getOption().matches(options::OPT_O)) {
+ StringRef S(LoopVersioningArg->getValue());
+ unsigned OptLevel = 0;
+ // Note -Os or Oz woould "fail" here, so return false. Which is the
+ // desiered behavior.
+ if (S.getAsInteger(10, OptLevel))
+ return false;
+
+ return OptLevel > 2;
+ }
+
+ llvm_unreachable("We should not end up here");
+ return false;
}
-void Flang::AddOtherOptions(const ArgList &Args, ArgStringList &CmdArgs) const {
- Args.AddAllArgs(CmdArgs,
+void Flang::addOtherOptions(const ArgList &Args, ArgStringList &CmdArgs) const {
+ Args.addAllArgs(CmdArgs,
{options::OPT_module_dir, options::OPT_fdebug_module_writer,
options::OPT_fintrinsic_modules_path, options::OPT_pedantic,
- options::OPT_std_EQ, options::OPT_W_Joined});
+ options::OPT_std_EQ, options::OPT_W_Joined,
+ options::OPT_fconvert_EQ, options::OPT_fpass_plugin_EQ,
+ options::OPT_funderscoring, options::OPT_fno_underscoring});
+
+ llvm::codegenoptions::DebugInfoKind DebugInfoKind;
+ if (Args.hasArg(options::OPT_gN_Group)) {
+ Arg *gNArg = Args.getLastArg(options::OPT_gN_Group);
+ DebugInfoKind = debugLevelToInfoKind(*gNArg);
+ } else if (Args.hasArg(options::OPT_g_Flag)) {
+ DebugInfoKind = llvm::codegenoptions::DebugLineTablesOnly;
+ } else {
+ DebugInfoKind = llvm::codegenoptions::NoDebugInfo;
+ }
+ addDebugInfoKind(CmdArgs, DebugInfoKind);
+}
+
+void Flang::addCodegenOptions(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ Arg *stackArrays =
+ Args.getLastArg(options::OPT_Ofast, options::OPT_fstack_arrays,
+ options::OPT_fno_stack_arrays);
+ if (stackArrays &&
+ !stackArrays->getOption().matches(options::OPT_fno_stack_arrays))
+ CmdArgs.push_back("-fstack-arrays");
+
+ if (shouldLoopVersion(Args))
+ CmdArgs.push_back("-fversion-loops-for-stride");
+
+ Args.addAllArgs(CmdArgs, {options::OPT_flang_experimental_hlfir,
+ options::OPT_flang_deprecated_no_hlfir,
+ options::OPT_flang_experimental_polymorphism,
+ options::OPT_fno_ppc_native_vec_elem_order,
+ options::OPT_fppc_native_vec_elem_order});
+}
+
+void Flang::addPicOptions(const ArgList &Args, ArgStringList &CmdArgs) const {
+ // ParsePICArgs parses -fPIC/-fPIE and their variants and returns a tuple of
+ // (RelocationModel, PICLevel, IsPIE).
+ llvm::Reloc::Model RelocationModel;
+ unsigned PICLevel;
+ bool IsPIE;
+ std::tie(RelocationModel, PICLevel, IsPIE) =
+ ParsePICArgs(getToolChain(), Args);
+
+ if (auto *RMName = RelocationModelName(RelocationModel)) {
+ CmdArgs.push_back("-mrelocation-model");
+ CmdArgs.push_back(RMName);
+ }
+ if (PICLevel > 0) {
+ CmdArgs.push_back("-pic-level");
+ CmdArgs.push_back(PICLevel == 1 ? "1" : "2");
+ if (IsPIE)
+ CmdArgs.push_back("-pic-is-pie");
+ }
+}
+
+void Flang::AddAArch64TargetArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ // Handle -msve_vector_bits=<bits>
+ if (Arg *A = Args.getLastArg(options::OPT_msve_vector_bits_EQ)) {
+ StringRef Val = A->getValue();
+ const Driver &D = getToolChain().getDriver();
+ if (Val.equals("128") || Val.equals("256") || Val.equals("512") ||
+ Val.equals("1024") || Val.equals("2048") || Val.equals("128+") ||
+ Val.equals("256+") || Val.equals("512+") || Val.equals("1024+") ||
+ Val.equals("2048+")) {
+ unsigned Bits = 0;
+ if (Val.ends_with("+"))
+ Val = Val.substr(0, Val.size() - 1);
+ else {
+ [[maybe_unused]] bool Invalid = Val.getAsInteger(10, Bits);
+ assert(!Invalid && "Failed to parse value");
+ CmdArgs.push_back(
+ Args.MakeArgString("-mvscale-max=" + llvm::Twine(Bits / 128)));
+ }
+
+ [[maybe_unused]] bool Invalid = Val.getAsInteger(10, Bits);
+ assert(!Invalid && "Failed to parse value");
+ CmdArgs.push_back(
+ Args.MakeArgString("-mvscale-min=" + llvm::Twine(Bits / 128)));
+ // Silently drop requests for vector-length agnostic code as it's implied.
+ } else if (!Val.equals("scalable"))
+ // Handle the unsupported values passed to msve-vector-bits.
+ D.Diag(diag::err_drv_unsupported_option_argument)
+ << A->getSpelling() << Val;
+ }
+}
+
+void Flang::AddRISCVTargetArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ const llvm::Triple &Triple = getToolChain().getTriple();
+ // Handle -mrvv-vector-bits=<bits>
+ if (Arg *A = Args.getLastArg(options::OPT_mrvv_vector_bits_EQ)) {
+ StringRef Val = A->getValue();
+ const Driver &D = getToolChain().getDriver();
+
+ // Get minimum VLen from march.
+ unsigned MinVLen = 0;
+ StringRef Arch = riscv::getRISCVArch(Args, Triple);
+ auto ISAInfo = llvm::RISCVISAInfo::parseArchString(
+ Arch, /*EnableExperimentalExtensions*/ true);
+ // Ignore parsing error.
+ if (!errorToBool(ISAInfo.takeError()))
+ MinVLen = (*ISAInfo)->getMinVLen();
+
+ // If the value is "zvl", use MinVLen from march. Otherwise, try to parse
+ // as integer as long as we have a MinVLen.
+ unsigned Bits = 0;
+ if (Val.equals("zvl") && MinVLen >= llvm::RISCV::RVVBitsPerBlock) {
+ Bits = MinVLen;
+ } else if (!Val.getAsInteger(10, Bits)) {
+ // Only accept power of 2 values beteen RVVBitsPerBlock and 65536 that
+ // at least MinVLen.
+ if (Bits < MinVLen || Bits < llvm::RISCV::RVVBitsPerBlock ||
+ Bits > 65536 || !llvm::isPowerOf2_32(Bits))
+ Bits = 0;
+ }
+
+ // If we got a valid value try to use it.
+ if (Bits != 0) {
+ unsigned VScaleMin = Bits / llvm::RISCV::RVVBitsPerBlock;
+ CmdArgs.push_back(
+ Args.MakeArgString("-mvscale-max=" + llvm::Twine(VScaleMin)));
+ CmdArgs.push_back(
+ Args.MakeArgString("-mvscale-min=" + llvm::Twine(VScaleMin)));
+ } else if (!Val.equals("scalable")) {
+ // Handle the unsupported values passed to mrvv-vector-bits.
+ D.Diag(diag::err_drv_unsupported_option_argument)
+ << A->getSpelling() << Val;
+ }
+ }
+}
+
+static void addVSDefines(const ToolChain &TC, const ArgList &Args,
+ ArgStringList &CmdArgs) {
+
+ unsigned ver = 0;
+ const VersionTuple vt = TC.computeMSVCVersion(nullptr, Args);
+ ver = vt.getMajor() * 10000000 + vt.getMinor().value_or(0) * 100000 +
+ vt.getSubminor().value_or(0);
+ CmdArgs.push_back(Args.MakeArgString("-D_MSC_VER=" + Twine(ver / 100000)));
+ CmdArgs.push_back(Args.MakeArgString("-D_MSC_FULL_VER=" + Twine(ver)));
+ CmdArgs.push_back(Args.MakeArgString("-D_WIN32"));
+
+ llvm::Triple triple = TC.getTriple();
+ if (triple.isAArch64()) {
+ CmdArgs.push_back("-D_M_ARM64=1");
+ } else if (triple.isX86() && triple.isArch32Bit()) {
+ CmdArgs.push_back("-D_M_IX86=600");
+ } else if (triple.isX86() && triple.isArch64Bit()) {
+ CmdArgs.push_back("-D_M_X64=100");
+ } else {
+ llvm_unreachable(
+ "Flang on Windows only supports X86_32, X86_64 and AArch64");
+ }
+}
+
+static void processVSRuntimeLibrary(const ToolChain &TC, const ArgList &Args,
+ ArgStringList &CmdArgs) {
+ assert(TC.getTriple().isKnownWindowsMSVCEnvironment() &&
+ "can only add VS runtime library on Windows!");
+ // if -fno-fortran-main has been passed, skip linking Fortran_main.a
+ bool LinkFortranMain = !Args.hasArg(options::OPT_no_fortran_main);
+ if (TC.getTriple().isKnownWindowsMSVCEnvironment()) {
+ CmdArgs.push_back(Args.MakeArgString(
+ "--dependent-lib=" + TC.getCompilerRTBasename(Args, "builtins")));
+ }
+ unsigned RTOptionID = options::OPT__SLASH_MT;
+ if (auto *rtl = Args.getLastArg(options::OPT_fms_runtime_lib_EQ)) {
+ RTOptionID = llvm::StringSwitch<unsigned>(rtl->getValue())
+ .Case("static", options::OPT__SLASH_MT)
+ .Case("static_dbg", options::OPT__SLASH_MTd)
+ .Case("dll", options::OPT__SLASH_MD)
+ .Case("dll_dbg", options::OPT__SLASH_MDd)
+ .Default(options::OPT__SLASH_MT);
+ }
+ switch (RTOptionID) {
+ case options::OPT__SLASH_MT:
+ CmdArgs.push_back("-D_MT");
+ CmdArgs.push_back("--dependent-lib=libcmt");
+ if (LinkFortranMain)
+ CmdArgs.push_back("--dependent-lib=Fortran_main.static.lib");
+ CmdArgs.push_back("--dependent-lib=FortranRuntime.static.lib");
+ CmdArgs.push_back("--dependent-lib=FortranDecimal.static.lib");
+ break;
+ case options::OPT__SLASH_MTd:
+ CmdArgs.push_back("-D_MT");
+ CmdArgs.push_back("-D_DEBUG");
+ CmdArgs.push_back("--dependent-lib=libcmtd");
+ if (LinkFortranMain)
+ CmdArgs.push_back("--dependent-lib=Fortran_main.static_dbg.lib");
+ CmdArgs.push_back("--dependent-lib=FortranRuntime.static_dbg.lib");
+ CmdArgs.push_back("--dependent-lib=FortranDecimal.static_dbg.lib");
+ break;
+ case options::OPT__SLASH_MD:
+ CmdArgs.push_back("-D_MT");
+ CmdArgs.push_back("-D_DLL");
+ CmdArgs.push_back("--dependent-lib=msvcrt");
+ if (LinkFortranMain)
+ CmdArgs.push_back("--dependent-lib=Fortran_main.dynamic.lib");
+ CmdArgs.push_back("--dependent-lib=FortranRuntime.dynamic.lib");
+ CmdArgs.push_back("--dependent-lib=FortranDecimal.dynamic.lib");
+ break;
+ case options::OPT__SLASH_MDd:
+ CmdArgs.push_back("-D_MT");
+ CmdArgs.push_back("-D_DEBUG");
+ CmdArgs.push_back("-D_DLL");
+ CmdArgs.push_back("--dependent-lib=msvcrtd");
+ if (LinkFortranMain)
+ CmdArgs.push_back("--dependent-lib=Fortran_main.dynamic_dbg.lib");
+ CmdArgs.push_back("--dependent-lib=FortranRuntime.dynamic_dbg.lib");
+ CmdArgs.push_back("--dependent-lib=FortranDecimal.dynamic_dbg.lib");
+ break;
+ }
+}
+
+void Flang::AddAMDGPUTargetArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ if (Arg *A = Args.getLastArg(options::OPT_mcode_object_version_EQ)) {
+ StringRef Val = A->getValue();
+ CmdArgs.push_back(Args.MakeArgString("-mcode-object-version=" + Val));
+ }
+}
+
+void Flang::addTargetOptions(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ const ToolChain &TC = getToolChain();
+ const llvm::Triple &Triple = TC.getEffectiveTriple();
+ const Driver &D = TC.getDriver();
+
+ std::string CPU = getCPUName(D, Args, Triple);
+ if (!CPU.empty()) {
+ CmdArgs.push_back("-target-cpu");
+ CmdArgs.push_back(Args.MakeArgString(CPU));
+ }
+
+ // Add the target features.
+ switch (TC.getArch()) {
+ default:
+ break;
+ case llvm::Triple::aarch64:
+ getTargetFeatures(D, Triple, Args, CmdArgs, /*ForAs*/ false);
+ AddAArch64TargetArgs(Args, CmdArgs);
+ break;
+
+ case llvm::Triple::r600:
+ case llvm::Triple::amdgcn:
+ getTargetFeatures(D, Triple, Args, CmdArgs, /*ForAs*/ false);
+ AddAMDGPUTargetArgs(Args, CmdArgs);
+ break;
+ case llvm::Triple::riscv64:
+ getTargetFeatures(D, Triple, Args, CmdArgs, /*ForAs*/ false);
+ AddRISCVTargetArgs(Args, CmdArgs);
+ break;
+ case llvm::Triple::x86_64:
+ getTargetFeatures(D, Triple, Args, CmdArgs, /*ForAs*/ false);
+ break;
+ }
+
+ if (Arg *A = Args.getLastArg(options::OPT_fveclib)) {
+ StringRef Name = A->getValue();
+ if (Name == "SVML") {
+ if (Triple.getArch() != llvm::Triple::x86 &&
+ Triple.getArch() != llvm::Triple::x86_64)
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << Name << Triple.getArchName();
+ } else if (Name == "LIBMVEC-X86") {
+ if (Triple.getArch() != llvm::Triple::x86 &&
+ Triple.getArch() != llvm::Triple::x86_64)
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << Name << Triple.getArchName();
+ } else if (Name == "SLEEF" || Name == "ArmPL") {
+ if (Triple.getArch() != llvm::Triple::aarch64 &&
+ Triple.getArch() != llvm::Triple::aarch64_be)
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << Name << Triple.getArchName();
+ }
+
+ if (Triple.isOSDarwin()) {
+ // flang doesn't currently suport nostdlib, nodefaultlibs. Adding these
+ // here incase they are added someday
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
+ if (A->getValue() == StringRef{"Accelerate"}) {
+ CmdArgs.push_back("-framework");
+ CmdArgs.push_back("Accelerate");
+ }
+ }
+ }
+ A->render(Args, CmdArgs);
+ }
+
+ if (Triple.isKnownWindowsMSVCEnvironment()) {
+ processVSRuntimeLibrary(TC, Args, CmdArgs);
+ addVSDefines(TC, Args, CmdArgs);
+ }
+
+ // TODO: Add target specific flags, ABI, mtune option etc.
+}
+
+void Flang::addOffloadOptions(Compilation &C, const InputInfoList &Inputs,
+ const JobAction &JA, const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ bool IsOpenMPDevice = JA.isDeviceOffloading(Action::OFK_OpenMP);
+ bool IsHostOffloadingAction = JA.isHostOffloading(Action::OFK_OpenMP) ||
+ JA.isHostOffloading(C.getActiveOffloadKinds());
+
+ // Skips the primary input file, which is the input file that the compilation
+ // proccess will be executed upon (e.g. the host bitcode file) and
+ // adds other secondary input (e.g. device bitcode files for embedding to the
+ // -fembed-offload-object argument or the host IR file for proccessing
+ // during device compilation to the fopenmp-host-ir-file-path argument via
+ // OpenMPDeviceInput). This is condensed logic from the ConstructJob
+ // function inside of the Clang driver for pushing on further input arguments
+ // needed for offloading during various phases of compilation.
+ for (size_t i = 1; i < Inputs.size(); ++i) {
+ if (Inputs[i].getType() == types::TY_Nothing) {
+ // contains nothing, so it's skippable
+ } else if (IsHostOffloadingAction) {
+ CmdArgs.push_back(
+ Args.MakeArgString("-fembed-offload-object=" +
+ getToolChain().getInputFilename(Inputs[i])));
+ } else if (IsOpenMPDevice) {
+ if (Inputs[i].getFilename()) {
+ CmdArgs.push_back("-fopenmp-host-ir-file-path");
+ CmdArgs.push_back(Args.MakeArgString(Inputs[i].getFilename()));
+ } else {
+ llvm_unreachable("missing openmp host-ir file for device offloading");
+ }
+ } else {
+ llvm_unreachable(
+ "unexpectedly given multiple inputs or given unknown input");
+ }
+ }
+
+ if (IsOpenMPDevice) {
+ // -fopenmp-is-target-device is passed along to tell the frontend that it is
+ // generating code for a device, so that only the relevant code is emitted.
+ CmdArgs.push_back("-fopenmp-is-target-device");
+
+ // When in OpenMP offloading mode, enable debugging on the device.
+ Args.AddAllArgs(CmdArgs, options::OPT_fopenmp_target_debug_EQ);
+ if (Args.hasFlag(options::OPT_fopenmp_target_debug,
+ options::OPT_fno_openmp_target_debug, /*Default=*/false))
+ CmdArgs.push_back("-fopenmp-target-debug");
+
+ // When in OpenMP offloading mode, forward assumptions information about
+ // thread and team counts in the device.
+ if (Args.hasFlag(options::OPT_fopenmp_assume_teams_oversubscription,
+ options::OPT_fno_openmp_assume_teams_oversubscription,
+ /*Default=*/false))
+ CmdArgs.push_back("-fopenmp-assume-teams-oversubscription");
+ if (Args.hasFlag(options::OPT_fopenmp_assume_threads_oversubscription,
+ options::OPT_fno_openmp_assume_threads_oversubscription,
+ /*Default=*/false))
+ CmdArgs.push_back("-fopenmp-assume-threads-oversubscription");
+ if (Args.hasArg(options::OPT_fopenmp_assume_no_thread_state))
+ CmdArgs.push_back("-fopenmp-assume-no-thread-state");
+ if (Args.hasArg(options::OPT_fopenmp_assume_no_nested_parallelism))
+ CmdArgs.push_back("-fopenmp-assume-no-nested-parallelism");
+ if (Args.hasArg(options::OPT_nogpulib))
+ CmdArgs.push_back("-nogpulib");
+ }
+}
+
+static void addFloatingPointOptions(const Driver &D, const ArgList &Args,
+ ArgStringList &CmdArgs) {
+ StringRef FPContract;
+ bool HonorINFs = true;
+ bool HonorNaNs = true;
+ bool ApproxFunc = false;
+ bool SignedZeros = true;
+ bool AssociativeMath = false;
+ bool ReciprocalMath = false;
+
+ if (const Arg *A = Args.getLastArg(options::OPT_ffp_contract)) {
+ const StringRef Val = A->getValue();
+ if (Val == "fast" || Val == "off") {
+ FPContract = Val;
+ } else if (Val == "on") {
+ // Warn instead of error because users might have makefiles written for
+ // gfortran (which accepts -ffp-contract=on)
+ D.Diag(diag::warn_drv_unsupported_option_for_flang)
+ << Val << A->getOption().getName() << "off";
+ FPContract = "off";
+ } else
+ // Clang's "fast-honor-pragmas" option is not supported because it is
+ // non-standard
+ D.Diag(diag::err_drv_unsupported_option_argument)
+ << A->getSpelling() << Val;
+ }
+
+ for (const Arg *A : Args) {
+ auto optId = A->getOption().getID();
+ switch (optId) {
+ // if this isn't an FP option, skip the claim below
+ default:
+ continue;
+
+ case options::OPT_fhonor_infinities:
+ HonorINFs = true;
+ break;
+ case options::OPT_fno_honor_infinities:
+ HonorINFs = false;
+ break;
+ case options::OPT_fhonor_nans:
+ HonorNaNs = true;
+ break;
+ case options::OPT_fno_honor_nans:
+ HonorNaNs = false;
+ break;
+ case options::OPT_fapprox_func:
+ ApproxFunc = true;
+ break;
+ case options::OPT_fno_approx_func:
+ ApproxFunc = false;
+ break;
+ case options::OPT_fsigned_zeros:
+ SignedZeros = true;
+ break;
+ case options::OPT_fno_signed_zeros:
+ SignedZeros = false;
+ break;
+ case options::OPT_fassociative_math:
+ AssociativeMath = true;
+ break;
+ case options::OPT_fno_associative_math:
+ AssociativeMath = false;
+ break;
+ case options::OPT_freciprocal_math:
+ ReciprocalMath = true;
+ break;
+ case options::OPT_fno_reciprocal_math:
+ ReciprocalMath = false;
+ break;
+ case options::OPT_Ofast:
+ [[fallthrough]];
+ case options::OPT_ffast_math:
+ HonorINFs = false;
+ HonorNaNs = false;
+ AssociativeMath = true;
+ ReciprocalMath = true;
+ ApproxFunc = true;
+ SignedZeros = false;
+ FPContract = "fast";
+ break;
+ case options::OPT_fno_fast_math:
+ HonorINFs = true;
+ HonorNaNs = true;
+ AssociativeMath = false;
+ ReciprocalMath = false;
+ ApproxFunc = false;
+ SignedZeros = true;
+ // -fno-fast-math should undo -ffast-math so I return FPContract to the
+ // default. It is important to check it is "fast" (the default) so that
+ // --ffp-contract=off -fno-fast-math --> -ffp-contract=off
+ if (FPContract == "fast")
+ FPContract = "";
+ break;
+ }
+
+ // If we handled this option claim it
+ A->claim();
+ }
+
+ if (!HonorINFs && !HonorNaNs && AssociativeMath && ReciprocalMath &&
+ ApproxFunc && !SignedZeros &&
+ (FPContract == "fast" || FPContract == "")) {
+ CmdArgs.push_back("-ffast-math");
+ return;
+ }
+
+ if (!FPContract.empty())
+ CmdArgs.push_back(Args.MakeArgString("-ffp-contract=" + FPContract));
+
+ if (!HonorINFs)
+ CmdArgs.push_back("-menable-no-infs");
+
+ if (!HonorNaNs)
+ CmdArgs.push_back("-menable-no-nans");
+
+ if (ApproxFunc)
+ CmdArgs.push_back("-fapprox-func");
+
+ if (!SignedZeros)
+ CmdArgs.push_back("-fno-signed-zeros");
+
+ if (AssociativeMath && !SignedZeros)
+ CmdArgs.push_back("-mreassociate");
+
+ if (ReciprocalMath)
+ CmdArgs.push_back("-freciprocal-math");
+}
+
+static void renderRemarksOptions(const ArgList &Args, ArgStringList &CmdArgs,
+ const InputInfo &Input) {
+ StringRef Format = "yaml";
+ if (const Arg *A = Args.getLastArg(options::OPT_fsave_optimization_record_EQ))
+ Format = A->getValue();
+
+ CmdArgs.push_back("-opt-record-file");
+
+ const Arg *A = Args.getLastArg(options::OPT_foptimization_record_file_EQ);
+ if (A) {
+ CmdArgs.push_back(A->getValue());
+ } else {
+ SmallString<128> F;
+
+ if (Args.hasArg(options::OPT_c) || Args.hasArg(options::OPT_S)) {
+ if (Arg *FinalOutput = Args.getLastArg(options::OPT_o))
+ F = FinalOutput->getValue();
+ }
+
+ if (F.empty()) {
+ // Use the input filename.
+ F = llvm::sys::path::stem(Input.getBaseInput());
+ }
+
+ SmallString<32> Extension;
+ Extension += "opt.";
+ Extension += Format;
+
+ llvm::sys::path::replace_extension(F, Extension);
+ CmdArgs.push_back(Args.MakeArgString(F));
+ }
+
+ if (const Arg *A =
+ Args.getLastArg(options::OPT_foptimization_record_passes_EQ)) {
+ CmdArgs.push_back("-opt-record-passes");
+ CmdArgs.push_back(A->getValue());
+ }
+
+ if (!Format.empty()) {
+ CmdArgs.push_back("-opt-record-format");
+ CmdArgs.push_back(Format.data());
+ }
}
void Flang::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output, const InputInfoList &Inputs,
const ArgList &Args, const char *LinkingOutput) const {
const auto &TC = getToolChain();
- // TODO: Once code-generation is available, this will need to be commented
- // out.
- // const llvm::Triple &Triple = TC.getEffectiveTriple();
- // const std::string &TripleStr = Triple.getTriple();
+ const llvm::Triple &Triple = TC.getEffectiveTriple();
+ const std::string &TripleStr = Triple.getTriple();
+ const Driver &D = TC.getDriver();
ArgStringList CmdArgs;
+ DiagnosticsEngine &Diags = D.getDiags();
// Invoke ourselves in -fc1 mode.
CmdArgs.push_back("-fc1");
- // TODO: Once code-generation is available, this will need to be commented
- // out.
// Add the "effective" target triple.
- // CmdArgs.push_back("-triple");
- // CmdArgs.push_back(Args.MakeArgString(TripleStr));
+ CmdArgs.push_back("-triple");
+ CmdArgs.push_back(Args.MakeArgString(TripleStr));
if (isa<PreprocessJobAction>(JA)) {
CmdArgs.push_back("-E");
@@ -98,27 +703,122 @@ void Flang::ConstructJob(Compilation &C, const JobAction &JA,
// Add preprocessing options like -I, -D, etc. if we are using the
// preprocessor (i.e. skip when dealing with e.g. binary files).
if (types::getPreprocessedType(InputType) != types::TY_INVALID)
- AddPreprocessingOptions(Args, CmdArgs);
+ addPreprocessingOptions(Args, CmdArgs);
- AddFortranDialectOptions(Args, CmdArgs);
+ addFortranDialectOptions(Args, CmdArgs);
+
+ // Color diagnostics are parsed by the driver directly from argv and later
+ // re-parsed to construct this job; claim any possible color diagnostic here
+ // to avoid warn_drv_unused_argument.
+ Args.getLastArg(options::OPT_fcolor_diagnostics,
+ options::OPT_fno_color_diagnostics);
+ if (Diags.getDiagnosticOptions().ShowColors)
+ CmdArgs.push_back("-fcolor-diagnostics");
+
+ // LTO mode is parsed by the Clang driver library.
+ LTOKind LTOMode = D.getLTOMode(/* IsOffload */ false);
+ assert(LTOMode != LTOK_Unknown && "Unknown LTO mode.");
+ if (LTOMode == LTOK_Full)
+ CmdArgs.push_back("-flto=full");
+ else if (LTOMode == LTOK_Thin) {
+ Diags.Report(
+ Diags.getCustomDiagID(DiagnosticsEngine::Warning,
+ "the option '-flto=thin' is a work in progress"));
+ CmdArgs.push_back("-flto=thin");
+ }
+
+ // -fPIC and related options.
+ addPicOptions(Args, CmdArgs);
+
+ // Floating point related options
+ addFloatingPointOptions(D, Args, CmdArgs);
+
+ // Add target args, features, etc.
+ addTargetOptions(Args, CmdArgs);
+
+ // Add Codegen options
+ addCodegenOptions(Args, CmdArgs);
+
+ // Add R Group options
+ Args.AddAllArgs(CmdArgs, options::OPT_R_Group);
+
+ // Remarks can be enabled with any of the `-f.*optimization-record.*` flags.
+ if (willEmitRemarks(Args))
+ renderRemarksOptions(Args, CmdArgs, Input);
// Add other compile options
- AddOtherOptions(Args, CmdArgs);
+ addOtherOptions(Args, CmdArgs);
+
+ // Offloading related options
+ addOffloadOptions(C, Inputs, JA, Args, CmdArgs);
// Forward -Xflang arguments to -fc1
Args.AddAllArgValues(CmdArgs, options::OPT_Xflang);
+ CodeGenOptions::FramePointerKind FPKeepKind =
+ getFramePointerKind(Args, Triple);
+
+ const char *FPKeepKindStr = nullptr;
+ switch (FPKeepKind) {
+ case CodeGenOptions::FramePointerKind::None:
+ FPKeepKindStr = "-mframe-pointer=none";
+ break;
+ case CodeGenOptions::FramePointerKind::NonLeaf:
+ FPKeepKindStr = "-mframe-pointer=non-leaf";
+ break;
+ case CodeGenOptions::FramePointerKind::All:
+ FPKeepKindStr = "-mframe-pointer=all";
+ break;
+ }
+ assert(FPKeepKindStr && "unknown FramePointerKind");
+ CmdArgs.push_back(FPKeepKindStr);
+
+ // Forward -mllvm options to the LLVM option parser. In practice, this means
+ // forwarding to `-fc1` as that's where the LLVM parser is run.
+ for (const Arg *A : Args.filtered(options::OPT_mllvm)) {
+ A->claim();
+ A->render(Args, CmdArgs);
+ }
+
+ for (const Arg *A : Args.filtered(options::OPT_mmlir)) {
+ A->claim();
+ A->render(Args, CmdArgs);
+ }
+
+ // Remove any unsupported gfortran diagnostic options
+ for (const Arg *A : Args.filtered(options::OPT_flang_ignored_w_Group)) {
+ A->claim();
+ D.Diag(diag::warn_drv_unsupported_diag_option_for_flang)
+ << A->getOption().getName();
+ }
+
+ // Optimization level for CodeGen.
+ if (const Arg *A = Args.getLastArg(options::OPT_O_Group)) {
+ if (A->getOption().matches(options::OPT_O4)) {
+ CmdArgs.push_back("-O3");
+ D.Diag(diag::warn_O4_is_O3);
+ } else if (A->getOption().matches(options::OPT_Ofast)) {
+ CmdArgs.push_back("-O3");
+ } else {
+ A->render(Args, CmdArgs);
+ }
+ }
+
+ assert((Output.isFilename() || Output.isNothing()) && "Invalid output.");
if (Output.isFilename()) {
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
- } else {
- assert(Output.isNothing() && "Invalid output.");
}
assert(Input.isFilename() && "Invalid input.");
+
+ if (Args.getLastArg(options::OPT_save_temps_EQ))
+ Args.AddLastArg(CmdArgs, options::OPT_save_temps_EQ);
+
+ addDashXForInput(Args, Input, CmdArgs);
+
CmdArgs.push_back(Input.getFilename());
- const auto& D = C.getDriver();
// TODO: Replace flang-new with flang once the new driver replaces the
// throwaway driver
const char *Exec = Args.MakeArgString(D.GetProgramPath("flang-new", TC));
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Flang.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Flang.h
index efbdbe854e24..ec2e545a1d0b 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Flang.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Flang.h
@@ -29,7 +29,7 @@ private:
///
/// \param [in] Args The list of input driver arguments
/// \param [out] CmdArgs The list of output command arguments
- void AddFortranDialectOptions(const llvm::opt::ArgList &Args,
+ void addFortranDialectOptions(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const;
/// Extract preprocessing options from the driver arguments and add them to
@@ -37,14 +37,71 @@ private:
///
/// \param [in] Args The list of input driver arguments
/// \param [out] CmdArgs The list of output command arguments
- void AddPreprocessingOptions(const llvm::opt::ArgList &Args,
+ void addPreprocessingOptions(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const;
+
+ /// Extract PIC options from the driver arguments and add them to
+ /// the command arguments.
+ ///
+ /// \param [in] Args The list of input driver arguments
+ /// \param [out] CmdArgs The list of output command arguments
+ void addPicOptions(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const;
+
+ /// Extract target options from the driver arguments and add them to
+ /// the command arguments.
+ ///
+ /// \param [in] Args The list of input driver arguments
+ /// \param [out] CmdArgs The list of output command arguments
+ void addTargetOptions(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const;
+
+ /// Add specific options for AArch64 target.
+ ///
+ /// \param [in] Args The list of input driver arguments
+ /// \param [out] CmdArgs The list of output command arguments
+ void AddAArch64TargetArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const;
+
+ /// Add specific options for AMDGPU target.
+ ///
+ /// \param [in] Args The list of input driver arguments
+ /// \param [out] CmdArgs The list of output command arguments
+ void AddAMDGPUTargetArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const;
+
+ /// Add specific options for RISC-V target.
+ ///
+ /// \param [in] Args The list of input driver arguments
+ /// \param [out] CmdArgs The list of output command arguments
+ void AddRISCVTargetArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const;
+
+ /// Extract offload options from the driver arguments and add them to
+ /// the command arguments.
+ /// \param [in] C The current compilation for the driver invocation
+ /// \param [in] Inputs The input infomration on the current file inputs
+ /// \param [in] JA The job action
+ /// \param [in] Args The list of input driver arguments
+ /// \param [out] CmdArgs The list of output command arguments
+ void addOffloadOptions(Compilation &C, const InputInfoList &Inputs,
+ const JobAction &JA, const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const;
+
+ /// Extract options for code generation from the driver arguments and add them
+ /// to the command arguments.
+ ///
+ /// \param [in] Args The list of input driver arguments
+ /// \param [out] CmdArgs The list of output command arguments
+ void addCodegenOptions(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const;
+
/// Extract other compilation options from the driver arguments and add them
/// to the command arguments.
///
/// \param [in] Args The list of input driver arguments
/// \param [out] CmdArgs The list of output command arguments
- void AddOtherOptions(const llvm::opt::ArgList &Args,
+ void addOtherOptions(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const;
public:
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.cpp
index 5dcf74dabf4f..b7c9e0e51cdb 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.cpp
@@ -11,6 +11,7 @@
#include "Arch/Mips.h"
#include "Arch/Sparc.h"
#include "CommonArgs.h"
+#include "clang/Config/config.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Options.h"
@@ -29,13 +30,16 @@ void freebsd::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfoList &Inputs,
const ArgList &Args,
const char *LinkingOutput) const {
- claimNoWarnArgs(Args);
- ArgStringList CmdArgs;
+ const auto &ToolChain = static_cast<const FreeBSD &>(getToolChain());
const auto &D = getToolChain().getDriver();
+ const llvm::Triple &Triple = ToolChain.getTriple();
+ ArgStringList CmdArgs;
+
+ claimNoWarnArgs(Args);
// When building 32-bit code on FreeBSD/amd64, we have to explicitly
// instruct as in the base system to assemble 32-bit code.
- switch (getToolChain().getArch()) {
+ switch (ToolChain.getArch()) {
default:
break;
case llvm::Triple::x86:
@@ -51,7 +55,7 @@ void freebsd::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
case llvm::Triple::mips64el: {
StringRef CPUName;
StringRef ABIName;
- mips::getMipsCPUAndABI(Args, getToolChain().getTriple(), CPUName, ABIName);
+ mips::getMipsCPUAndABI(Args, Triple, CPUName, ABIName);
CmdArgs.push_back("-march");
CmdArgs.push_back(CPUName.data());
@@ -59,7 +63,7 @@ void freebsd::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-mabi");
CmdArgs.push_back(mips::getGnuCompatibleMipsABIName(ABIName).data());
- if (getToolChain().getTriple().isLittleEndian())
+ if (Triple.isLittleEndian())
CmdArgs.push_back("-EL");
else
CmdArgs.push_back("-EB");
@@ -70,39 +74,27 @@ void freebsd::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
A->claim();
}
- AddAssemblerKPIC(getToolChain(), Args, CmdArgs);
+ AddAssemblerKPIC(ToolChain, Args, CmdArgs);
break;
}
case llvm::Triple::arm:
case llvm::Triple::armeb:
case llvm::Triple::thumb:
case llvm::Triple::thumbeb: {
- arm::FloatABI ABI = arm::getARMFloatABI(getToolChain(), Args);
+ arm::FloatABI ABI = arm::getARMFloatABI(ToolChain, Args);
if (ABI == arm::FloatABI::Hard)
CmdArgs.push_back("-mfpu=vfp");
else
CmdArgs.push_back("-mfpu=softvfp");
- switch (getToolChain().getTriple().getEnvironment()) {
- case llvm::Triple::GNUEABIHF:
- case llvm::Triple::GNUEABI:
- case llvm::Triple::EABI:
- CmdArgs.push_back("-meabi=5");
- break;
-
- default:
- CmdArgs.push_back("-matpcs");
- }
+ CmdArgs.push_back("-meabi=5");
break;
}
- case llvm::Triple::sparc:
- case llvm::Triple::sparcel:
case llvm::Triple::sparcv9: {
- std::string CPU = getCPUName(Args, getToolChain().getTriple());
- CmdArgs.push_back(
- sparc::getSparcAsmModeForCPU(CPU, getToolChain().getTriple()));
- AddAssemblerKPIC(getToolChain(), Args, CmdArgs);
+ std::string CPU = getCPUName(D, Args, Triple);
+ CmdArgs.push_back(sparc::getSparcAsmModeForCPU(CPU, Triple));
+ AddAssemblerKPIC(ToolChain, Args, CmdArgs);
break;
}
}
@@ -110,7 +102,7 @@ void freebsd::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
for (const Arg *A : Args.filtered(options::OPT_ffile_prefix_map_EQ,
options::OPT_fdebug_prefix_map_EQ)) {
StringRef Map = A->getValue();
- if (Map.find('=') == StringRef::npos)
+ if (!Map.contains('='))
D.Diag(diag::err_drv_invalid_argument_to_option)
<< Map << A->getOption().getName();
else {
@@ -128,7 +120,7 @@ void freebsd::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
for (const auto &II : Inputs)
CmdArgs.push_back(II.getFilename());
- const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
+ const char *Exec = Args.MakeArgString(ToolChain.GetProgramPath("as"));
C.addCommand(std::make_unique<Command>(JA, *this,
ResponseFileSupport::AtFileCurCP(),
Exec, CmdArgs, Inputs, Output));
@@ -139,13 +131,12 @@ void freebsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfoList &Inputs,
const ArgList &Args,
const char *LinkingOutput) const {
- const toolchains::FreeBSD &ToolChain =
- static_cast<const toolchains::FreeBSD &>(getToolChain());
+ const auto &ToolChain = static_cast<const FreeBSD &>(getToolChain());
const Driver &D = ToolChain.getDriver();
const llvm::Triple::ArchType Arch = ToolChain.getArch();
const bool IsPIE =
!Args.hasArg(options::OPT_shared) &&
- (Args.hasArg(options::OPT_pie) || ToolChain.isPIEDefault());
+ (Args.hasArg(options::OPT_pie) || ToolChain.isPIEDefault(Args));
ArgStringList CmdArgs;
// Silence warning for "clang -g foo.o -o foo"
@@ -169,16 +160,14 @@ void freebsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasArg(options::OPT_rdynamic))
CmdArgs.push_back("-export-dynamic");
if (Args.hasArg(options::OPT_shared)) {
- CmdArgs.push_back("-Bshareable");
- } else {
+ CmdArgs.push_back("-shared");
+ } else if (!Args.hasArg(options::OPT_r)) {
CmdArgs.push_back("-dynamic-linker");
CmdArgs.push_back("/libexec/ld-elf.so.1");
}
const llvm::Triple &T = ToolChain.getTriple();
- if (T.getOSMajorVersion() >= 9) {
- if (Arch == llvm::Triple::arm || Arch == llvm::Triple::sparc || T.isX86())
- CmdArgs.push_back("--hash-style=both");
- }
+ if (Arch == llvm::Triple::arm || T.isX86())
+ CmdArgs.push_back("--hash-style=both");
CmdArgs.push_back("--enable-new-dtags");
}
@@ -220,13 +209,10 @@ void freebsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
else
CmdArgs.push_back("elf64ltsmip_fbsd");
break;
- case llvm::Triple::riscv32:
- CmdArgs.push_back("-m");
- CmdArgs.push_back("elf32lriscv");
- break;
case llvm::Triple::riscv64:
CmdArgs.push_back("-m");
CmdArgs.push_back("elf64lriscv");
+ CmdArgs.push_back("-X");
break;
default:
break;
@@ -240,14 +226,14 @@ void freebsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
}
+ assert((Output.isFilename() || Output.isNothing()) && "Invalid output.");
if (Output.isFilename()) {
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
- } else {
- assert(Output.isNothing() && "Invalid output.");
}
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles,
+ options::OPT_r)) {
const char *crt1 = nullptr;
if (!Args.hasArg(options::OPT_shared)) {
if (Args.hasArg(options::OPT_pg))
@@ -275,16 +261,20 @@ void freebsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_L);
ToolChain.AddFilePathLibArgs(Args, CmdArgs);
- Args.AddAllArgs(CmdArgs, options::OPT_T_Group);
- Args.AddAllArgs(CmdArgs, options::OPT_e);
- Args.AddAllArgs(CmdArgs, options::OPT_s);
- Args.AddAllArgs(CmdArgs, options::OPT_t);
- Args.AddAllArgs(CmdArgs, options::OPT_Z_Flag);
- Args.AddAllArgs(CmdArgs, options::OPT_r);
+ Args.addAllArgs(CmdArgs, {options::OPT_T_Group, options::OPT_s,
+ options::OPT_t, options::OPT_r});
if (D.isUsingLTO()) {
assert(!Inputs.empty() && "Must have at least one input.");
- addLTOOptions(ToolChain, Args, CmdArgs, Output, Inputs[0],
+ // Find the first filename InputInfo object.
+ auto Input = llvm::find_if(
+ Inputs, [](const InputInfo &II) -> bool { return II.isFilename(); });
+ if (Input == Inputs.end())
+ // For a very rare case, all of the inputs to the linker are
+ // InputArg. If that happens, just use the first InputInfo.
+ Input = Inputs.begin();
+
+ addLTOOptions(ToolChain, Args, CmdArgs, Output, *Input,
D.getLTOMode() == LTOK_Thin);
}
@@ -293,9 +283,10 @@ void freebsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
addLinkerCompressDebugSectionsOption(ToolChain, Args, CmdArgs);
AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
- bool Profiling = Args.hasArg(options::OPT_pg) &&
- ToolChain.getTriple().getOSMajorVersion() < 14;
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
+ unsigned Major = ToolChain.getTriple().getOSMajorVersion();
+ bool Profiling = Args.hasArg(options::OPT_pg) && Major != 0 && Major < 14;
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs,
+ options::OPT_r)) {
// Use the static OpenMP runtime with -static-openmp
bool StaticOpenMP = Args.hasArg(options::OPT_static_openmp) &&
!Args.hasArg(options::OPT_static);
@@ -309,10 +300,27 @@ void freebsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
else
CmdArgs.push_back("-lm");
}
+
+ // Silence warnings when linking C code with a C++ '-stdlib' argument.
+ Args.ClaimAllArgs(options::OPT_stdlib_EQ);
+
+ // Additional linker set-up and flags for Fortran. This is required in order
+ // to generate executables. As Fortran runtime depends on the C runtime,
+ // these dependencies need to be listed before the C runtime below (i.e.
+ // AddRunTimeLibs).
+ if (D.IsFlangMode()) {
+ addFortranRuntimeLibraryPath(ToolChain, Args, CmdArgs);
+ addFortranRuntimeLibs(ToolChain, Args, CmdArgs);
+ if (Profiling)
+ CmdArgs.push_back("-lm_p");
+ else
+ CmdArgs.push_back("-lm");
+ }
+
if (NeedsSanitizerDeps)
- linkSanitizerRuntimeDeps(ToolChain, CmdArgs);
+ linkSanitizerRuntimeDeps(ToolChain, Args, CmdArgs);
if (NeedsXRayDeps)
- linkXRayRuntimeDeps(ToolChain, CmdArgs);
+ linkXRayRuntimeDeps(ToolChain, Args, CmdArgs);
// FIXME: For some reason GCC passes -lgcc and -lgcc_s before adding
// the default system libraries. Just mimic this for now.
if (Profiling)
@@ -358,11 +366,14 @@ void freebsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
}
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles,
+ options::OPT_r)) {
+ const char *crtend = nullptr;
if (Args.hasArg(options::OPT_shared) || IsPIE)
- CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtendS.o")));
+ crtend = "crtendS.o";
else
- CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtend.o")));
+ crtend = "crtend.o";
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtend)));
CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtn.o")));
}
@@ -382,64 +393,71 @@ FreeBSD::FreeBSD(const Driver &D, const llvm::Triple &Triple,
// When targeting 32-bit platforms, look for '/usr/lib32/crt1.o' and fall
// back to '/usr/lib' if it doesn't exist.
- if ((Triple.getArch() == llvm::Triple::x86 || Triple.isMIPS32() ||
- Triple.isPPC32()) &&
- D.getVFS().exists(getDriver().SysRoot + "/usr/lib32/crt1.o"))
- getFilePaths().push_back(getDriver().SysRoot + "/usr/lib32");
+ if (Triple.isArch32Bit() &&
+ D.getVFS().exists(concat(getDriver().SysRoot, "/usr/lib32/crt1.o")))
+ getFilePaths().push_back(concat(getDriver().SysRoot, "/usr/lib32"));
else
- getFilePaths().push_back(getDriver().SysRoot + "/usr/lib");
+ getFilePaths().push_back(concat(getDriver().SysRoot, "/usr/lib"));
}
-ToolChain::CXXStdlibType FreeBSD::GetDefaultCXXStdlibType() const {
- if (getTriple().getOSMajorVersion() >= 10)
- return ToolChain::CST_Libcxx;
- return ToolChain::CST_Libstdcxx;
-}
+void FreeBSD::AddClangSystemIncludeArgs(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const {
+ const Driver &D = getDriver();
-unsigned FreeBSD::GetDefaultDwarfVersion() const {
- if (getTriple().getOSMajorVersion() < 12)
- return 2;
- return 4;
+ if (DriverArgs.hasArg(clang::driver::options::OPT_nostdinc))
+ return;
+
+ if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) {
+ SmallString<128> Dir(D.ResourceDir);
+ llvm::sys::path::append(Dir, "include");
+ addSystemInclude(DriverArgs, CC1Args, Dir.str());
+ }
+
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc))
+ return;
+
+ // Check for configure-time C include directories.
+ StringRef CIncludeDirs(C_INCLUDE_DIRS);
+ if (CIncludeDirs != "") {
+ SmallVector<StringRef, 5> dirs;
+ CIncludeDirs.split(dirs, ":");
+ for (StringRef dir : dirs) {
+ StringRef Prefix =
+ llvm::sys::path::is_absolute(dir) ? StringRef(D.SysRoot) : "";
+ addExternCSystemInclude(DriverArgs, CC1Args, Prefix + dir);
+ }
+ return;
+ }
+
+ addExternCSystemInclude(DriverArgs, CC1Args,
+ concat(D.SysRoot, "/usr/include"));
}
void FreeBSD::addLibCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const {
addSystemInclude(DriverArgs, CC1Args,
- getDriver().SysRoot + "/usr/include/c++/v1");
-}
-
-void FreeBSD::addLibStdCxxIncludePaths(
- const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args) const {
- addLibStdCXXIncludePaths(getDriver().SysRoot + "/usr/include/c++/4.2", "", "",
- DriverArgs, CC1Args);
+ concat(getDriver().SysRoot, "/usr/include/c++/v1"));
}
void FreeBSD::AddCXXStdlibLibArgs(const ArgList &Args,
ArgStringList &CmdArgs) const {
- CXXStdlibType Type = GetCXXStdlibType(Args);
- bool Profiling =
- Args.hasArg(options::OPT_pg) && getTriple().getOSMajorVersion() < 14;
-
- switch (Type) {
- case ToolChain::CST_Libcxx:
- CmdArgs.push_back(Profiling ? "-lc++_p" : "-lc++");
- break;
+ unsigned Major = getTriple().getOSMajorVersion();
+ bool Profiling = Args.hasArg(options::OPT_pg) && Major != 0 && Major < 14;
- case ToolChain::CST_Libstdcxx:
- CmdArgs.push_back(Profiling ? "-lstdc++_p" : "-lstdc++");
- break;
- }
+ CmdArgs.push_back(Profiling ? "-lc++_p" : "-lc++");
+ if (Args.hasArg(options::OPT_fexperimental_library))
+ CmdArgs.push_back("-lc++experimental");
}
void FreeBSD::AddCudaIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
- CudaInstallation.AddCudaIncludeArgs(DriverArgs, CC1Args);
+ CudaInstallation->AddCudaIncludeArgs(DriverArgs, CC1Args);
}
void FreeBSD::AddHIPIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
- RocmInstallation.AddHIPIncludeArgs(DriverArgs, CC1Args);
+ RocmInstallation->AddHIPIncludeArgs(DriverArgs, CC1Args);
}
Tool *FreeBSD::buildAssembler() const {
@@ -448,26 +466,16 @@ Tool *FreeBSD::buildAssembler() const {
Tool *FreeBSD::buildLinker() const { return new tools::freebsd::Linker(*this); }
-llvm::ExceptionHandling FreeBSD::GetExceptionModel(const ArgList &Args) const {
- // FreeBSD uses SjLj exceptions on ARM oabi.
- switch (getTriple().getEnvironment()) {
- case llvm::Triple::GNUEABIHF:
- case llvm::Triple::GNUEABI:
- case llvm::Triple::EABI:
- return llvm::ExceptionHandling::None;
- default:
- if (getTriple().getArch() == llvm::Triple::arm ||
- getTriple().getArch() == llvm::Triple::thumb)
- return llvm::ExceptionHandling::SjLj;
- return llvm::ExceptionHandling::None;
- }
-}
-
bool FreeBSD::HasNativeLLVMSupport() const { return true; }
-bool FreeBSD::IsUnwindTablesDefault(const ArgList &Args) const { return true; }
+ToolChain::UnwindTableLevel
+FreeBSD::getDefaultUnwindTableLevel(const ArgList &Args) const {
+ return UnwindTableLevel::Asynchronous;
+}
-bool FreeBSD::isPIEDefault() const { return getSanitizerArgs().requiresPIE(); }
+bool FreeBSD::isPIEDefault(const llvm::opt::ArgList &Args) const {
+ return getSanitizerArgs(Args).requiresPIE();
+}
SanitizerMask FreeBSD::getSupportedSanitizers() const {
const bool IsAArch64 = getTriple().getArch() == llvm::Triple::aarch64;
@@ -479,12 +487,11 @@ SanitizerMask FreeBSD::getSupportedSanitizers() const {
Res |= SanitizerKind::PointerCompare;
Res |= SanitizerKind::PointerSubtract;
Res |= SanitizerKind::Vptr;
- if (IsX86_64 || IsMIPS64) {
+ if (IsAArch64 || IsX86_64 || IsMIPS64) {
Res |= SanitizerKind::Leak;
Res |= SanitizerKind::Thread;
}
- if (IsX86 || IsX86_64) {
- Res |= SanitizerKind::Function;
+ if (IsAArch64 || IsX86 || IsX86_64) {
Res |= SanitizerKind::SafeStack;
Res |= SanitizerKind::Fuzzer;
Res |= SanitizerKind::FuzzerNoLink;
@@ -492,18 +499,7 @@ SanitizerMask FreeBSD::getSupportedSanitizers() const {
if (IsAArch64 || IsX86_64) {
Res |= SanitizerKind::KernelAddress;
Res |= SanitizerKind::KernelMemory;
- }
- if (IsX86_64) {
Res |= SanitizerKind::Memory;
}
return Res;
}
-
-void FreeBSD::addClangTargetOptions(const ArgList &DriverArgs,
- ArgStringList &CC1Args,
- Action::OffloadKind) const {
- if (!DriverArgs.hasFlag(options::OPT_fuse_init_array,
- options::OPT_fno_use_init_array,
- getTriple().getOSMajorVersion() >= 12))
- CC1Args.push_back("-fno-use-init-array");
-}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.h
index abc0876cef26..7ab63905ed4f 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.h
@@ -17,9 +17,9 @@ namespace clang {
namespace driver {
namespace tools {
-/// freebsd -- Directly call GNU Binutils assembler and linker
+/// Directly call GNU Binutils assembler and linker
namespace freebsd {
-class LLVM_LIBRARY_VISIBILITY Assembler : public Tool {
+class LLVM_LIBRARY_VISIBILITY Assembler final : public Tool {
public:
Assembler(const ToolChain &TC)
: Tool("freebsd::Assembler", "assembler", TC) {}
@@ -32,7 +32,7 @@ public:
const char *LinkingOutput) const override;
};
-class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
+class LLVM_LIBRARY_VISIBILITY Linker final : public Tool {
public:
Linker(const ToolChain &TC) : Tool("freebsd::Linker", "linker", TC) {}
@@ -58,12 +58,19 @@ public:
bool IsMathErrnoDefault() const override { return false; }
bool IsObjCNonFragileABIDefault() const override { return true; }
- CXXStdlibType GetDefaultCXXStdlibType() const override;
+ void
+ AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+
+ RuntimeLibType GetDefaultRuntimeLibType() const override {
+ return ToolChain::RLT_CompilerRT;
+ }
+ CXXStdlibType GetDefaultCXXStdlibType() const override {
+ return ToolChain::CST_Libcxx;
+ }
+
void addLibCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
- void
- addLibStdCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args) const override;
void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const override;
void AddCudaIncludeArgs(const llvm::opt::ArgList &DriverArgs,
@@ -71,19 +78,14 @@ public:
void AddHIPIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
- llvm::ExceptionHandling
- GetExceptionModel(const llvm::opt::ArgList &Args) const override;
- bool IsUnwindTablesDefault(const llvm::opt::ArgList &Args) const override;
- bool isPIEDefault() const override;
+ UnwindTableLevel
+ getDefaultUnwindTableLevel(const llvm::opt::ArgList &Args) const override;
+ bool isPIEDefault(const llvm::opt::ArgList &Args) const override;
SanitizerMask getSupportedSanitizers() const override;
- unsigned GetDefaultDwarfVersion() const override;
+ unsigned GetDefaultDwarfVersion() const override { return 4; }
// Until dtrace (via CTF) and LLDB can deal with distributed debug info,
// FreeBSD defaults to standalone/full debug info.
bool GetDefaultStandaloneDebug() const override { return true; }
- void
- addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args,
- Action::OffloadKind DeviceOffloadKind) const override;
protected:
Tool *buildAssembler() const override;
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.cpp
index fd9804a7f353..14b838500bec 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.cpp
@@ -12,6 +12,7 @@
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/MultilibBuilder.h"
#include "clang/Driver/Options.h"
#include "clang/Driver/SanitizerArgs.h"
#include "llvm/Option/ArgList.h"
@@ -33,10 +34,11 @@ void fuchsia::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfoList &Inputs,
const ArgList &Args,
const char *LinkingOutput) const {
- const toolchains::Fuchsia &ToolChain =
- static_cast<const toolchains::Fuchsia &>(getToolChain());
+ const auto &ToolChain = static_cast<const Fuchsia &>(getToolChain());
const Driver &D = ToolChain.getDriver();
+ const llvm::Triple &Triple = ToolChain.getEffectiveTriple();
+
ArgStringList CmdArgs;
// Silence warning for "clang -g foo.o -o foo"
@@ -53,6 +55,9 @@ void fuchsia::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-z");
CmdArgs.push_back("now");
+ CmdArgs.push_back("-z");
+ CmdArgs.push_back("start-stop-visibility=hidden");
+
const char *Exec = Args.MakeArgString(ToolChain.GetLinkerPath());
if (llvm::sys::path::filename(Exec).equals_insensitive("ld.lld") ||
llvm::sys::path::stem(Exec).equals_insensitive("ld.lld")) {
@@ -60,6 +65,8 @@ void fuchsia::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("rodynamic");
CmdArgs.push_back("-z");
CmdArgs.push_back("separate-loadable-segments");
+ CmdArgs.push_back("-z");
+ CmdArgs.push_back("rel");
CmdArgs.push_back("--pack-dyn-relocs=relr");
}
@@ -82,6 +89,14 @@ void fuchsia::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("--hash-style=gnu");
}
+ if (ToolChain.getArch() == llvm::Triple::aarch64) {
+ CmdArgs.push_back("--execute-only");
+
+ std::string CPU = getCPUName(D, Args, Triple);
+ if (CPU.empty() || CPU == "generic" || CPU == "cortex-a53")
+ CmdArgs.push_back("--fix-cortex-a53-843419");
+ }
+
CmdArgs.push_back("--eh-frame-hdr");
if (Args.hasArg(options::OPT_static))
@@ -89,9 +104,9 @@ void fuchsia::Linker::ConstructJob(Compilation &C, const JobAction &JA,
else if (Args.hasArg(options::OPT_shared))
CmdArgs.push_back("-shared");
- const SanitizerArgs &SanArgs = ToolChain.getSanitizerArgs();
+ const SanitizerArgs &SanArgs = ToolChain.getSanitizerArgs(Args);
- if (!Args.hasArg(options::OPT_shared)) {
+ if (!Args.hasArg(options::OPT_shared) && !Args.hasArg(options::OPT_r)) {
std::string Dyld = D.DyldPrefix;
if (SanArgs.needsAsanRt() && SanArgs.needsSharedRt())
Dyld += "asan/";
@@ -104,32 +119,42 @@ void fuchsia::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString(Dyld));
}
+ if (ToolChain.getArch() == llvm::Triple::riscv64)
+ CmdArgs.push_back("-X");
+
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles,
+ options::OPT_r)) {
if (!Args.hasArg(options::OPT_shared)) {
CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("Scrt1.o")));
}
}
- Args.AddAllArgs(CmdArgs, options::OPT_L);
- Args.AddAllArgs(CmdArgs, options::OPT_u);
+ Args.addAllArgs(CmdArgs, {options::OPT_L, options::OPT_u});
ToolChain.AddFilePathLibArgs(Args, CmdArgs);
if (D.isUsingLTO()) {
assert(!Inputs.empty() && "Must have at least one input.");
- addLTOOptions(ToolChain, Args, CmdArgs, Output, Inputs[0],
+ // Find the first filename InputInfo object.
+ auto Input = llvm::find_if(
+ Inputs, [](const InputInfo &II) -> bool { return II.isFilename(); });
+ if (Input == Inputs.end())
+ // For a very rare case, all of the inputs to the linker are
+ // InputArg. If that happens, just use the first InputInfo.
+ Input = Inputs.begin();
+
+ addLTOOptions(ToolChain, Args, CmdArgs, Output, *Input,
D.getLTOMode() == LTOK_Thin);
}
- bool NeedsSanitizerDeps = addSanitizerRuntimes(ToolChain, Args, CmdArgs);
- bool NeedsXRayDeps = addXRayRuntime(ToolChain, Args, CmdArgs);
+ addLinkerCompressDebugSectionsOption(ToolChain, Args, CmdArgs);
AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
- ToolChain.addProfileRTLibs(Args, CmdArgs);
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs,
+ options::OPT_r)) {
if (Args.hasArg(options::OPT_static))
CmdArgs.push_back("-Bdynamic");
@@ -149,11 +174,14 @@ void fuchsia::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
}
- if (NeedsSanitizerDeps)
- linkSanitizerRuntimeDeps(ToolChain, CmdArgs);
+ // Note that Fuchsia never needs to link in sanitizer runtime deps. Any
+ // sanitizer runtimes with system dependencies use the `.deplibs` feature
+ // instead.
+ addSanitizerRuntimes(ToolChain, Args, CmdArgs);
- if (NeedsXRayDeps)
- linkXRayRuntimeDeps(ToolChain, CmdArgs);
+ addXRayRuntime(ToolChain, Args, CmdArgs);
+
+ ToolChain.addProfileRTLibs(Args, CmdArgs);
AddRunTimeLibs(ToolChain, D, CmdArgs, Args);
@@ -168,7 +196,53 @@ void fuchsia::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-lc");
}
- C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ C.addCommand(std::make_unique<Command>(JA, *this,
+ ResponseFileSupport::AtFileCurCP(),
+ Exec, CmdArgs, Inputs, Output));
+}
+
+void fuchsia::StaticLibTool::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ const Driver &D = getToolChain().getDriver();
+
+ // Silence warning for "clang -g foo.o -o foo"
+ Args.ClaimAllArgs(options::OPT_g_Group);
+ // and "clang -emit-llvm foo.o -o foo"
+ Args.ClaimAllArgs(options::OPT_emit_llvm);
+ // and for "clang -w foo.o -o foo". Other warning options are already
+ // handled somewhere else.
+ Args.ClaimAllArgs(options::OPT_w);
+ // Silence warnings when linking C code with a C++ '-stdlib' argument.
+ Args.ClaimAllArgs(options::OPT_stdlib_EQ);
+
+ // ar tool command "llvm-ar <options> <output_file> <input_files>".
+ ArgStringList CmdArgs;
+ // Create and insert file members with a deterministic index.
+ CmdArgs.push_back("rcsD");
+ CmdArgs.push_back(Output.getFilename());
+
+ for (const auto &II : Inputs) {
+ if (II.isFilename()) {
+ CmdArgs.push_back(II.getFilename());
+ }
+ }
+
+ // Delete old output archive file if it already exists before generating a new
+ // archive file.
+ const char *OutputFileName = Output.getFilename();
+ if (Output.isFilename() && llvm::sys::fs::exists(OutputFileName)) {
+ if (std::error_code EC = llvm::sys::fs::remove(OutputFileName)) {
+ D.Diag(diag::err_drv_unable_to_remove_file) << EC.message();
+ return;
+ }
+ }
+
+ const char *Exec = Args.MakeArgString(getToolChain().GetStaticLibToolPath());
+ C.addCommand(std::make_unique<Command>(JA, *this,
+ ResponseFileSupport::AtFileCurCP(),
Exec, CmdArgs, Inputs, Output));
}
@@ -184,98 +258,82 @@ Fuchsia::Fuchsia(const Driver &D, const llvm::Triple &Triple,
if (!D.SysRoot.empty()) {
SmallString<128> P(D.SysRoot);
llvm::sys::path::append(P, "lib");
- getFilePaths().push_back(std::string(P.str()));
+ getFilePaths().push_back(std::string(P));
}
auto FilePaths = [&](const Multilib &M) -> std::vector<std::string> {
std::vector<std::string> FP;
- SmallString<128> P(getStdlibPath());
- llvm::sys::path::append(P, M.gccSuffix());
- FP.push_back(std::string(P.str()));
+ if (std::optional<std::string> Path = getStdlibPath()) {
+ SmallString<128> P(*Path);
+ llvm::sys::path::append(P, M.gccSuffix());
+ FP.push_back(std::string(P));
+ }
return FP;
};
Multilibs.push_back(Multilib());
// Use the noexcept variant with -fno-exceptions to avoid the extra overhead.
- Multilibs.push_back(Multilib("noexcept", {}, {}, 1)
- .flag("-fexceptions")
- .flag("+fno-exceptions"));
+ Multilibs.push_back(MultilibBuilder("noexcept", {}, {})
+ .flag("-fexceptions", /*Disallow=*/true)
+ .flag("-fno-exceptions")
+ .makeMultilib());
// ASan has higher priority because we always want the instrumentated version.
- Multilibs.push_back(Multilib("asan", {}, {}, 2)
- .flag("+fsanitize=address"));
+ Multilibs.push_back(MultilibBuilder("asan", {}, {})
+ .flag("-fsanitize=address")
+ .makeMultilib());
// Use the asan+noexcept variant with ASan and -fno-exceptions.
- Multilibs.push_back(Multilib("asan+noexcept", {}, {}, 3)
- .flag("+fsanitize=address")
- .flag("-fexceptions")
- .flag("+fno-exceptions"));
+ Multilibs.push_back(MultilibBuilder("asan+noexcept", {}, {})
+ .flag("-fsanitize=address")
+ .flag("-fexceptions", /*Disallow=*/true)
+ .flag("-fno-exceptions")
+ .makeMultilib());
// HWASan has higher priority because we always want the instrumentated
// version.
- Multilibs.push_back(
- Multilib("hwasan", {}, {}, 4).flag("+fsanitize=hwaddress"));
+ Multilibs.push_back(MultilibBuilder("hwasan", {}, {})
+ .flag("-fsanitize=hwaddress")
+ .makeMultilib());
// Use the hwasan+noexcept variant with HWASan and -fno-exceptions.
- Multilibs.push_back(Multilib("hwasan+noexcept", {}, {}, 5)
- .flag("+fsanitize=hwaddress")
- .flag("-fexceptions")
- .flag("+fno-exceptions"));
- // Use the relative vtables ABI.
- // TODO: Remove these multilibs once relative vtables are enabled by default
- // for Fuchsia.
- Multilibs.push_back(Multilib("relative-vtables", {}, {}, 6)
- .flag("+fexperimental-relative-c++-abi-vtables"));
- Multilibs.push_back(Multilib("relative-vtables+noexcept", {}, {}, 7)
- .flag("+fexperimental-relative-c++-abi-vtables")
- .flag("-fexceptions")
- .flag("+fno-exceptions"));
- Multilibs.push_back(Multilib("relative-vtables+asan", {}, {}, 8)
- .flag("+fexperimental-relative-c++-abi-vtables")
- .flag("+fsanitize=address"));
- Multilibs.push_back(Multilib("relative-vtables+asan+noexcept", {}, {}, 9)
- .flag("+fexperimental-relative-c++-abi-vtables")
- .flag("+fsanitize=address")
- .flag("-fexceptions")
- .flag("+fno-exceptions"));
- Multilibs.push_back(Multilib("relative-vtables+hwasan", {}, {}, 10)
- .flag("+fexperimental-relative-c++-abi-vtables")
- .flag("+fsanitize=hwaddress"));
- Multilibs.push_back(Multilib("relative-vtables+hwasan+noexcept", {}, {}, 11)
- .flag("+fexperimental-relative-c++-abi-vtables")
- .flag("+fsanitize=hwaddress")
- .flag("-fexceptions")
- .flag("+fno-exceptions"));
+ Multilibs.push_back(MultilibBuilder("hwasan+noexcept", {}, {})
+ .flag("-fsanitize=hwaddress")
+ .flag("-fexceptions", /*Disallow=*/true)
+ .flag("-fno-exceptions")
+ .makeMultilib());
// Use Itanium C++ ABI for the compat multilib.
- Multilibs.push_back(Multilib("compat", {}, {}, 12).flag("+fc++-abi=itanium"));
+ Multilibs.push_back(MultilibBuilder("compat", {}, {})
+ .flag("-fc++-abi=itanium")
+ .makeMultilib());
Multilibs.FilterOut([&](const Multilib &M) {
std::vector<std::string> RD = FilePaths(M);
- return std::all_of(RD.begin(), RD.end(), [&](std::string P) {
- return !getVFS().exists(P);
- });
+ return llvm::all_of(RD, [&](std::string P) { return !getVFS().exists(P); });
});
Multilib::flags_list Flags;
- addMultilibFlag(
- Args.hasFlag(options::OPT_fexceptions, options::OPT_fno_exceptions, true),
- "fexceptions", Flags);
- addMultilibFlag(getSanitizerArgs().needsAsanRt(), "fsanitize=address", Flags);
- addMultilibFlag(getSanitizerArgs().needsHwasanRt(), "fsanitize=hwaddress",
+ bool Exceptions =
+ Args.hasFlag(options::OPT_fexceptions, options::OPT_fno_exceptions, true);
+ addMultilibFlag(Exceptions, "-fexceptions", Flags);
+ addMultilibFlag(!Exceptions, "-fno-exceptions", Flags);
+ addMultilibFlag(getSanitizerArgs(Args).needsAsanRt(), "-fsanitize=address",
Flags);
+ addMultilibFlag(getSanitizerArgs(Args).needsHwasanRt(),
+ "-fsanitize=hwaddress", Flags);
- addMultilibFlag(
- Args.hasFlag(options::OPT_fexperimental_relative_cxx_abi_vtables,
- options::OPT_fno_experimental_relative_cxx_abi_vtables,
- /*default=*/false),
- "fexperimental-relative-c++-abi-vtables", Flags);
addMultilibFlag(Args.getLastArgValue(options::OPT_fcxx_abi_EQ) == "itanium",
- "fc++-abi=itanium", Flags);
+ "-fc++-abi=itanium", Flags);
Multilibs.setFilePathsCallback(FilePaths);
- if (Multilibs.select(Flags, SelectedMultilib))
- if (!SelectedMultilib.isDefault())
+ if (Multilibs.select(Flags, SelectedMultilibs)) {
+ // Ensure that -print-multi-directory only outputs one multilib directory.
+ Multilib LastSelected = SelectedMultilibs.back();
+ SelectedMultilibs = {LastSelected};
+
+ if (!SelectedMultilibs.back().isDefault())
if (const auto &PathsCallback = Multilibs.filePathsCallback())
- for (const auto &Path : PathsCallback(SelectedMultilib))
+ for (const auto &Path : PathsCallback(SelectedMultilibs.back()))
// Prepend the multilib path to ensure it takes the precedence.
getFilePaths().insert(getFilePaths().begin(), Path);
+ }
}
std::string Fuchsia::ComputeEffectiveClangTriple(const ArgList &Args,
@@ -288,6 +346,10 @@ Tool *Fuchsia::buildLinker() const {
return new tools::fuchsia::Linker(*this);
}
+Tool *Fuchsia::buildStaticLibTool() const {
+ return new tools::fuchsia::StaticLibTool(*this);
+}
+
ToolChain::RuntimeLibType Fuchsia::GetRuntimeLibType(
const ArgList &Args) const {
if (Arg *A = Args.getLastArg(clang::driver::options::OPT_rtlib_EQ)) {
@@ -358,8 +420,8 @@ void Fuchsia::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
void Fuchsia::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
- if (DriverArgs.hasArg(options::OPT_nostdlibinc) ||
- DriverArgs.hasArg(options::OPT_nostdincxx))
+ if (DriverArgs.hasArg(options::OPT_nostdinc, options::OPT_nostdlibinc,
+ options::OPT_nostdincxx))
return;
const Driver &D = getDriver();
@@ -400,6 +462,8 @@ void Fuchsia::AddCXXStdlibLibArgs(const ArgList &Args,
switch (GetCXXStdlibType(Args)) {
case ToolChain::CST_Libcxx:
CmdArgs.push_back("-lc++");
+ if (Args.hasArg(options::OPT_fexperimental_library))
+ CmdArgs.push_back("-lc++experimental");
break;
case ToolChain::CST_Libstdcxx:
@@ -426,24 +490,14 @@ SanitizerMask Fuchsia::getDefaultSanitizers() const {
SanitizerMask Res;
switch (getTriple().getArch()) {
case llvm::Triple::aarch64:
+ case llvm::Triple::riscv64:
Res |= SanitizerKind::ShadowCallStack;
break;
case llvm::Triple::x86_64:
Res |= SanitizerKind::SafeStack;
break;
default:
- // TODO: Enable SafeStack on RISC-V once tested.
break;
}
return Res;
}
-
-void Fuchsia::addProfileRTLibs(const llvm::opt::ArgList &Args,
- llvm::opt::ArgStringList &CmdArgs) const {
- // Add linker option -u__llvm_profile_runtime to cause runtime
- // initialization module to be linked in.
- if (needsProfileRT(Args))
- CmdArgs.push_back(Args.MakeArgString(
- Twine("-u", llvm::getInstrProfRuntimeHookVarName())));
- ToolChain::addProfileRTLibs(Args, CmdArgs);
-}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.h
index 07adf9b7101d..619968f58502 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.h
@@ -18,7 +18,21 @@ namespace clang {
namespace driver {
namespace tools {
namespace fuchsia {
-class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
+class LLVM_LIBRARY_VISIBILITY StaticLibTool : public Tool {
+public:
+ StaticLibTool(const ToolChain &TC)
+ : Tool("fuchsia::StaticLibTool", "llvm-ar", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+ bool isLinkJob() const override { return true; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY Linker final : public Tool {
public:
Linker(const ToolChain &TC) : Tool("fuchsia::Linker", "ld.lld", TC) {}
@@ -41,20 +55,21 @@ public:
const llvm::opt::ArgList &Args);
bool HasNativeLLVMSupport() const override { return true; }
- bool IsIntegratedAssemblerDefault() const override { return true; }
bool IsMathErrnoDefault() const override { return false; }
- bool useRelaxRelocations() const override { return true; };
RuntimeLibType GetDefaultRuntimeLibType() const override {
return ToolChain::RLT_CompilerRT;
}
CXXStdlibType GetDefaultCXXStdlibType() const override {
return ToolChain::CST_Libcxx;
}
- bool IsUnwindTablesDefault(const llvm::opt::ArgList &Args) const override {
- return true;
+ UnwindTableLevel
+ getDefaultUnwindTableLevel(const llvm::opt::ArgList &Args) const override {
+ return UnwindTableLevel::Asynchronous;
}
bool isPICDefault() const override { return false; }
- bool isPIEDefault() const override { return true; }
+ bool isPIEDefault(const llvm::opt::ArgList &Args) const override {
+ return true;
+ }
bool isPICDefaultForced() const override { return false; }
llvm::DebuggerKind getDefaultDebuggerTuning() const override {
return llvm::DebuggerKind::GDB;
@@ -71,32 +86,33 @@ public:
SanitizerMask getSupportedSanitizers() const override;
SanitizerMask getDefaultSanitizers() const override;
- void addProfileRTLibs(const llvm::opt::ArgList &Args,
- llvm::opt::ArgStringList &CmdArgs) const override;
-
RuntimeLibType
GetRuntimeLibType(const llvm::opt::ArgList &Args) const override;
- CXXStdlibType
- GetCXXStdlibType(const llvm::opt::ArgList &Args) const override;
+ CXXStdlibType GetCXXStdlibType(const llvm::opt::ArgList &Args) const override;
+
+ bool IsAArch64OutlineAtomicsDefault(
+ const llvm::opt::ArgList &Args) const override {
+ return true;
+ }
- void addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args,
- Action::OffloadKind DeviceOffloadKind) const override;
+ void
+ addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadKind) const override;
void
AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
- void
- AddClangCXXStdlibIncludeArgs(const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args) const override;
+ void AddClangCXXStdlibIncludeArgs(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const override;
- const char *getDefaultLinker() const override {
- return "ld.lld";
- }
+ const char *getDefaultLinker() const override { return "ld.lld"; }
protected:
Tool *buildLinker() const override;
+ Tool *buildStaticLibTool() const override;
};
} // end namespace toolchains
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Gnu.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Gnu.cpp
index da39f29e4619..e5e1b1d77269 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Gnu.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Gnu.cpp
@@ -8,6 +8,8 @@
#include "Gnu.h"
#include "Arch/ARM.h"
+#include "Arch/CSKY.h"
+#include "Arch/LoongArch.h"
#include "Arch/Mips.h"
#include "Arch/PPC.h"
#include "Arch/RISCV.h"
@@ -19,14 +21,18 @@
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/MultilibBuilder.h"
#include "clang/Driver/Options.h"
#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/ADT/Twine.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/Path.h"
-#include "llvm/Support/TargetParser.h"
+#include "llvm/Support/RISCVISAInfo.h"
#include "llvm/Support/VirtualFileSystem.h"
+#include "llvm/TargetParser/TargetParser.h"
#include <system_error>
using namespace clang::driver;
@@ -82,7 +88,7 @@ void tools::gcc::Common::ConstructJob(Compilation &C, const JobAction &JA,
RenderExtraToolArgs(JA, CmdArgs);
- // If using a driver driver, force the arch.
+ // If using a driver, force the arch.
if (getToolChain().getTriple().isOSDarwin()) {
CmdArgs.push_back("-arch");
CmdArgs.push_back(
@@ -112,11 +118,11 @@ void tools::gcc::Common::ConstructJob(Compilation &C, const JobAction &JA,
break;
}
+ assert((Output.isFilename() || Output.isNothing()) && "Invalid output.");
if (Output.isFilename()) {
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
} else {
- assert(Output.isNothing() && "Unexpected output");
CmdArgs.push_back("-fsyntax-only");
}
@@ -215,30 +221,6 @@ void tools::gcc::Linker::RenderExtraToolArgs(const JobAction &JA,
// The types are (hopefully) good enough.
}
-// On Arm the endianness of the output file is determined by the target and
-// can be overridden by the pseudo-target flags '-mlittle-endian'/'-EL' and
-// '-mbig-endian'/'-EB'. Unlike other targets the flag does not result in a
-// normalized triple so we must handle the flag here.
-static bool isArmBigEndian(const llvm::Triple &Triple,
- const ArgList &Args) {
- bool IsBigEndian = false;
- switch (Triple.getArch()) {
- case llvm::Triple::armeb:
- case llvm::Triple::thumbeb:
- IsBigEndian = true;
- LLVM_FALLTHROUGH;
- case llvm::Triple::arm:
- case llvm::Triple::thumb:
- if (Arg *A = Args.getLastArg(options::OPT_mlittle_endian,
- options::OPT_mbig_endian))
- IsBigEndian = !A->getOption().matches(options::OPT_mlittle_endian);
- break;
- default:
- break;
- }
- return IsBigEndian;
-}
-
static const char *getLDMOption(const llvm::Triple &T, const ArgList &Args) {
switch (T.getArch()) {
case llvm::Triple::x86:
@@ -253,7 +235,8 @@ static const char *getLDMOption(const llvm::Triple &T, const ArgList &Args) {
case llvm::Triple::thumb:
case llvm::Triple::armeb:
case llvm::Triple::thumbeb:
- return isArmBigEndian(T, Args) ? "armelfb_linux_eabi" : "armelf_linux_eabi";
+ return tools::arm::isARMBigEndian(T, Args) ? "armelfb_linux_eabi"
+ : "armelf_linux_eabi";
case llvm::Triple::m68k:
return "m68kelf";
case llvm::Triple::ppc:
@@ -277,6 +260,10 @@ static const char *getLDMOption(const llvm::Triple &T, const ArgList &Args) {
return "elf32_sparc";
case llvm::Triple::sparcv9:
return "elf64_sparc";
+ case llvm::Triple::loongarch32:
+ return "elf32loongarch";
+ case llvm::Triple::loongarch64:
+ return "elf64loongarch";
case llvm::Triple::mips:
return "elf32btsmip";
case llvm::Triple::mipsel:
@@ -299,32 +286,20 @@ static const char *getLDMOption(const llvm::Triple &T, const ArgList &Args) {
return "elf_x86_64";
case llvm::Triple::ve:
return "elf64ve";
+ case llvm::Triple::csky:
+ return "cskyelf_linux";
default:
return nullptr;
}
}
-static bool getPIE(const ArgList &Args, const ToolChain &TC) {
- if (Args.hasArg(options::OPT_shared) || Args.hasArg(options::OPT_static) ||
- Args.hasArg(options::OPT_r) || Args.hasArg(options::OPT_static_pie))
- return false;
-
- Arg *A = Args.getLastArg(options::OPT_pie, options::OPT_no_pie,
- options::OPT_nopie);
- if (!A)
- return TC.isPIEDefault();
- return A->getOption().matches(options::OPT_pie);
-}
-
static bool getStaticPIE(const ArgList &Args, const ToolChain &TC) {
bool HasStaticPIE = Args.hasArg(options::OPT_static_pie);
- // -no-pie is an alias for -nopie. So, handling -nopie takes care of
- // -no-pie as well.
- if (HasStaticPIE && Args.hasArg(options::OPT_nopie)) {
+ if (HasStaticPIE && Args.hasArg(options::OPT_no_pie)) {
const Driver &D = TC.getDriver();
const llvm::opt::OptTable &Opts = D.getOpts();
- const char *StaticPIEName = Opts.getOptionName(options::OPT_static_pie);
- const char *NoPIEName = Opts.getOptionName(options::OPT_nopie);
+ StringRef StaticPIEName = Opts.getOptionName(options::OPT_static_pie);
+ StringRef NoPIEName = Opts.getOptionName(options::OPT_nopie);
D.Diag(diag::err_drv_cannot_mix_options) << StaticPIEName << NoPIEName;
}
return HasStaticPIE;
@@ -388,17 +363,16 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
// Generic_ELF, so the static_cast might return a reference to a invalid
// instance (see PR45061). Ideally, the Linker constructor needs to take a
// Generic_ELF instead.
- const toolchains::Generic_ELF &ToolChain =
- static_cast<const toolchains::Generic_ELF &>(getToolChain());
+ const auto &ToolChain = static_cast<const Generic_ELF &>(getToolChain());
const Driver &D = ToolChain.getDriver();
const llvm::Triple &Triple = getToolChain().getEffectiveTriple();
const llvm::Triple::ArchType Arch = ToolChain.getArch();
+ const bool isOHOSFamily = ToolChain.getTriple().isOHOSFamily();
const bool isAndroid = ToolChain.getTriple().isAndroid();
const bool IsIAMCU = ToolChain.getTriple().isOSIAMCU();
const bool IsVE = ToolChain.getTriple().isVE();
- const bool IsPIE = getPIE(Args, ToolChain);
const bool IsStaticPIE = getStaticPIE(Args, ToolChain);
const bool IsStatic = getStatic(Args);
const bool HasCRTBeginEndFiles =
@@ -418,49 +392,26 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (!D.SysRoot.empty())
CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot));
- if (IsPIE)
- CmdArgs.push_back("-pie");
-
- if (IsStaticPIE) {
- CmdArgs.push_back("-static");
- CmdArgs.push_back("-pie");
- CmdArgs.push_back("--no-dynamic-linker");
- CmdArgs.push_back("-z");
- CmdArgs.push_back("text");
- }
-
- if (ToolChain.isNoExecStackDefault()) {
- CmdArgs.push_back("-z");
- CmdArgs.push_back("noexecstack");
- }
-
- if (Args.hasArg(options::OPT_rdynamic))
- CmdArgs.push_back("-export-dynamic");
-
if (Args.hasArg(options::OPT_s))
CmdArgs.push_back("-s");
- if (Triple.isARM() || Triple.isThumb() || Triple.isAArch64()) {
- bool IsBigEndian = isArmBigEndian(Triple, Args);
+ if (Triple.isARM() || Triple.isThumb()) {
+ bool IsBigEndian = arm::isARMBigEndian(Triple, Args);
if (IsBigEndian)
arm::appendBE8LinkFlag(Args, CmdArgs, Triple);
- IsBigEndian = IsBigEndian || Arch == llvm::Triple::aarch64_be;
CmdArgs.push_back(IsBigEndian ? "-EB" : "-EL");
+ } else if (Triple.isAArch64()) {
+ CmdArgs.push_back(Arch == llvm::Triple::aarch64_be ? "-EB" : "-EL");
}
// Most Android ARM64 targets should enable the linker fix for erratum
// 843419. Only non-Cortex-A53 devices are allowed to skip this flag.
- if (Arch == llvm::Triple::aarch64 && isAndroid) {
- std::string CPU = getCPUName(Args, Triple);
+ if (Arch == llvm::Triple::aarch64 && (isAndroid || isOHOSFamily)) {
+ std::string CPU = getCPUName(D, Args, Triple);
if (CPU.empty() || CPU == "generic" || CPU == "cortex-a53")
CmdArgs.push_back("--fix-cortex-a53-843419");
}
- // Android does not allow shared text relocations. Emit a warning if the
- // user's code contains any.
- if (isAndroid)
- CmdArgs.push_back("--warn-shared-textrel");
-
ToolChain.addExtraOpts(CmdArgs);
CmdArgs.push_back("--eh-frame-hdr");
@@ -472,22 +423,29 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
D.Diag(diag::err_target_unknown_triple) << Triple.str();
return;
}
+ if (Triple.isRISCV())
+ CmdArgs.push_back("-X");
- if (IsStatic) {
- if (Arch == llvm::Triple::arm || Arch == llvm::Triple::armeb ||
- Arch == llvm::Triple::thumb || Arch == llvm::Triple::thumbeb)
- CmdArgs.push_back("-Bstatic");
- else
- CmdArgs.push_back("-static");
- } else if (Args.hasArg(options::OPT_shared)) {
+ const bool IsShared = Args.hasArg(options::OPT_shared);
+ if (IsShared)
CmdArgs.push_back("-shared");
- }
-
- if (!IsStatic) {
+ bool IsPIE = false;
+ if (IsStaticPIE) {
+ CmdArgs.push_back("-static");
+ CmdArgs.push_back("-pie");
+ CmdArgs.push_back("--no-dynamic-linker");
+ CmdArgs.push_back("-z");
+ CmdArgs.push_back("text");
+ } else if (IsStatic) {
+ CmdArgs.push_back("-static");
+ } else if (!Args.hasArg(options::OPT_r)) {
if (Args.hasArg(options::OPT_rdynamic))
CmdArgs.push_back("-export-dynamic");
-
- if (!Args.hasArg(options::OPT_shared) && !IsStaticPIE) {
+ if (!IsShared) {
+ IsPIE = Args.hasFlag(options::OPT_pie, options::OPT_no_pie,
+ ToolChain.isPIEDefault(Args));
+ if (IsPIE)
+ CmdArgs.push_back("-pie");
CmdArgs.push_back("-dynamic-linker");
CmdArgs.push_back(Args.MakeArgString(Twine(D.DyldPrefix) +
ToolChain.getDynamicLinker(Args)));
@@ -497,7 +455,8 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles,
+ options::OPT_r)) {
if (!isAndroid && !IsIAMCU) {
const char *crt1 = nullptr;
if (!Args.hasArg(options::OPT_shared)) {
@@ -534,10 +493,10 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
if (P.empty()) {
const char *crtbegin;
- if (IsStatic)
- crtbegin = isAndroid ? "crtbegin_static.o" : "crtbeginT.o";
- else if (Args.hasArg(options::OPT_shared))
+ if (Args.hasArg(options::OPT_shared))
crtbegin = isAndroid ? "crtbegin_so.o" : "crtbeginS.o";
+ else if (IsStatic)
+ crtbegin = isAndroid ? "crtbegin_static.o" : "crtbeginT.o";
else if (IsPIE || IsStaticPIE)
crtbegin = isAndroid ? "crtbegin_dynamic.o" : "crtbeginS.o";
else
@@ -549,16 +508,28 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
// Add crtfastmath.o if available and fast math is enabled.
ToolChain.addFastMathRuntimeIfAvailable(Args, CmdArgs);
+
+ if (isAndroid && Args.hasFlag(options::OPT_fandroid_pad_segment,
+ options::OPT_fno_android_pad_segment, false))
+ CmdArgs.push_back(
+ Args.MakeArgString(ToolChain.GetFilePath("crt_pad_segment.o")));
}
- Args.AddAllArgs(CmdArgs, options::OPT_L);
- Args.AddAllArgs(CmdArgs, options::OPT_u);
+ Args.addAllArgs(CmdArgs, {options::OPT_L, options::OPT_u});
ToolChain.AddFilePathLibArgs(Args, CmdArgs);
if (D.isUsingLTO()) {
assert(!Inputs.empty() && "Must have at least one input.");
- addLTOOptions(ToolChain, Args, CmdArgs, Output, Inputs[0],
+ // Find the first filename InputInfo object.
+ auto Input = llvm::find_if(
+ Inputs, [](const InputInfo &II) -> bool { return II.isFilename(); });
+ if (Input == Inputs.end())
+ // For a very rare case, all of the inputs to the linker are
+ // InputArg. If that happens, just use the first InputInfo.
+ Input = Inputs.begin();
+
+ addLTOOptions(ToolChain, Args, CmdArgs, Output, *Input,
D.getLTOMode() == LTOK_Thin);
}
@@ -569,11 +540,15 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
bool NeedsXRayDeps = addXRayRuntime(ToolChain, Args, CmdArgs);
addLinkerCompressDebugSectionsOption(ToolChain, Args, CmdArgs);
AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
+
+ addHIPRuntimeLibArgs(ToolChain, C, Args, CmdArgs);
+
// The profile runtime also needs access to system libraries.
getToolChain().addProfileRTLibs(Args, CmdArgs);
if (D.CCCIsCXX() &&
- !Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
+ !Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs,
+ options::OPT_r)) {
if (ToolChain.ShouldLinkCXXStdlib(Args)) {
bool OnlyLibstdcxxStatic = Args.hasArg(options::OPT_static_libstdcxx) &&
!Args.hasArg(options::OPT_static);
@@ -585,19 +560,30 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
CmdArgs.push_back("-lm");
}
+
// Silence warnings when linking C code with a C++ '-stdlib' argument.
Args.ClaimAllArgs(options::OPT_stdlib_EQ);
- if (!Args.hasArg(options::OPT_nostdlib)) {
+ // Additional linker set-up and flags for Fortran. This is required in order
+ // to generate executables. As Fortran runtime depends on the C runtime,
+ // these dependencies need to be listed before the C runtime below (i.e.
+ // AddRunTimeLibs).
+ if (D.IsFlangMode()) {
+ addFortranRuntimeLibraryPath(ToolChain, Args, CmdArgs);
+ addFortranRuntimeLibs(ToolChain, Args, CmdArgs);
+ CmdArgs.push_back("-lm");
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_r)) {
if (!Args.hasArg(options::OPT_nodefaultlibs)) {
if (IsStatic || IsStaticPIE)
CmdArgs.push_back("--start-group");
if (NeedsSanitizerDeps)
- linkSanitizerRuntimeDeps(ToolChain, CmdArgs);
+ linkSanitizerRuntimeDeps(ToolChain, Args, CmdArgs);
if (NeedsXRayDeps)
- linkXRayRuntimeDeps(ToolChain, CmdArgs);
+ linkXRayRuntimeDeps(ToolChain, Args, CmdArgs);
bool WantPthread = Args.hasArg(options::OPT_pthread) ||
Args.hasArg(options::OPT_pthreads);
@@ -617,7 +603,19 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
AddRunTimeLibs(ToolChain, D, CmdArgs, Args);
- if (WantPthread && !isAndroid)
+ // LLVM support for atomics on 32-bit SPARC V8+ is incomplete, so
+ // forcibly link with libatomic as a workaround.
+ // TODO: Issue #41880 and D118021.
+ if (getToolChain().getTriple().getArch() == llvm::Triple::sparc) {
+ CmdArgs.push_back("--push-state");
+ CmdArgs.push_back("--as-needed");
+ CmdArgs.push_back("-latomic");
+ CmdArgs.push_back("--pop-state");
+ }
+
+ // We don't need libpthread neither for bionic (Android) nor for musl,
+ // (used by OHOS as runtime library).
+ if (WantPthread && !isAndroid && !isOHOSFamily)
CmdArgs.push_back("-lpthread");
if (Args.hasArg(options::OPT_fsplit_stack))
@@ -694,6 +692,10 @@ void tools::gnutools::Assembler::ConstructJob(Compilation &C,
unsigned PICLevel;
bool IsPIE;
const char *DefaultAssembler = "as";
+ // Enforce GNU as on Solaris; the native assembler's input syntax isn't fully
+ // compatible.
+ if (getToolChain().getTriple().isOSSolaris())
+ DefaultAssembler = "gas";
std::tie(RelocationModel, PICLevel, IsPIE) =
ParsePICArgs(getToolChain(), Args);
@@ -702,20 +704,16 @@ void tools::gnutools::Assembler::ConstructJob(Compilation &C,
CmdArgs.push_back("--compress-debug-sections");
} else {
StringRef Value = A->getValue();
- if (Value == "none" || Value == "zlib" || Value == "zlib-gnu") {
+ if (Value == "none" || Value == "zlib" || Value == "zstd") {
CmdArgs.push_back(
Args.MakeArgString("--compress-debug-sections=" + Twine(Value)));
} else {
D.Diag(diag::err_drv_unsupported_option_argument)
- << A->getOption().getName() << Value;
+ << A->getSpelling() << Value;
}
}
}
- if (getToolChain().isNoExecStackDefault()) {
- CmdArgs.push_back("--noexecstack");
- }
-
switch (getToolChain().getArch()) {
default:
break;
@@ -734,32 +732,32 @@ void tools::gnutools::Assembler::ConstructJob(Compilation &C,
CmdArgs.push_back("-a32");
CmdArgs.push_back("-mppc");
CmdArgs.push_back("-mbig-endian");
- CmdArgs.push_back(
- ppc::getPPCAsmModeForCPU(getCPUName(Args, getToolChain().getTriple())));
+ CmdArgs.push_back(ppc::getPPCAsmModeForCPU(
+ getCPUName(D, Args, getToolChain().getTriple())));
break;
}
case llvm::Triple::ppcle: {
CmdArgs.push_back("-a32");
CmdArgs.push_back("-mppc");
CmdArgs.push_back("-mlittle-endian");
- CmdArgs.push_back(
- ppc::getPPCAsmModeForCPU(getCPUName(Args, getToolChain().getTriple())));
+ CmdArgs.push_back(ppc::getPPCAsmModeForCPU(
+ getCPUName(D, Args, getToolChain().getTriple())));
break;
}
case llvm::Triple::ppc64: {
CmdArgs.push_back("-a64");
CmdArgs.push_back("-mppc64");
CmdArgs.push_back("-mbig-endian");
- CmdArgs.push_back(
- ppc::getPPCAsmModeForCPU(getCPUName(Args, getToolChain().getTriple())));
+ CmdArgs.push_back(ppc::getPPCAsmModeForCPU(
+ getCPUName(D, Args, getToolChain().getTriple())));
break;
}
case llvm::Triple::ppc64le: {
CmdArgs.push_back("-a64");
CmdArgs.push_back("-mppc64");
CmdArgs.push_back("-mlittle-endian");
- CmdArgs.push_back(
- ppc::getPPCAsmModeForCPU(getCPUName(Args, getToolChain().getTriple())));
+ CmdArgs.push_back(ppc::getPPCAsmModeForCPU(
+ getCPUName(D, Args, getToolChain().getTriple())));
break;
}
case llvm::Triple::riscv32:
@@ -770,12 +768,14 @@ void tools::gnutools::Assembler::ConstructJob(Compilation &C,
StringRef MArchName = riscv::getRISCVArch(Args, getToolChain().getTriple());
CmdArgs.push_back("-march");
CmdArgs.push_back(MArchName.data());
+ if (!Args.hasFlag(options::OPT_mrelax, options::OPT_mno_relax, true))
+ Args.addOptOutFlag(CmdArgs, options::OPT_mrelax, options::OPT_mno_relax);
break;
}
case llvm::Triple::sparc:
case llvm::Triple::sparcel: {
CmdArgs.push_back("-32");
- std::string CPU = getCPUName(Args, getToolChain().getTriple());
+ std::string CPU = getCPUName(D, Args, getToolChain().getTriple());
CmdArgs.push_back(
sparc::getSparcAsmModeForCPU(CPU, getToolChain().getTriple()));
AddAssemblerKPIC(getToolChain(), Args, CmdArgs);
@@ -783,7 +783,7 @@ void tools::gnutools::Assembler::ConstructJob(Compilation &C,
}
case llvm::Triple::sparcv9: {
CmdArgs.push_back("-64");
- std::string CPU = getCPUName(Args, getToolChain().getTriple());
+ std::string CPU = getCPUName(D, Args, getToolChain().getTriple());
CmdArgs.push_back(
sparc::getSparcAsmModeForCPU(CPU, getToolChain().getTriple()));
AddAssemblerKPIC(getToolChain(), Args, CmdArgs);
@@ -794,7 +794,7 @@ void tools::gnutools::Assembler::ConstructJob(Compilation &C,
case llvm::Triple::thumb:
case llvm::Triple::thumbeb: {
const llvm::Triple &Triple2 = getToolChain().getTriple();
- CmdArgs.push_back(isArmBigEndian(Triple2, Args) ? "-EB" : "-EL");
+ CmdArgs.push_back(arm::isARMBigEndian(Triple2, Args) ? "-EB" : "-EL");
switch (Triple2.getSubArch()) {
case llvm::Triple::ARMSubArch_v7:
CmdArgs.push_back("-mfpu=neon");
@@ -823,6 +823,11 @@ void tools::gnutools::Assembler::ConstructJob(Compilation &C,
normalizeCPUNamesForAssembler(Args, CmdArgs);
Args.AddLastArg(CmdArgs, options::OPT_mfpu_EQ);
+ // The integrated assembler doesn't implement e_flags setting behavior for
+ // -meabi=gnu (gcc -mabi={apcs-gnu,atpcs} passes -meabi=gnu to gas). For
+ // compatibility we accept but warn.
+ if (Arg *A = Args.getLastArgNoClaim(options::OPT_mabi_EQ))
+ A->ignoreTargetSpecific();
break;
}
case llvm::Triple::aarch64:
@@ -834,6 +839,13 @@ void tools::gnutools::Assembler::ConstructJob(Compilation &C,
break;
}
+ // TODO: handle loongarch32.
+ case llvm::Triple::loongarch64: {
+ StringRef ABIName =
+ loongarch::getLoongArchABI(D, Args, getToolChain().getTriple());
+ CmdArgs.push_back(Args.MakeArgString("-mabi=" + ABIName));
+ break;
+ }
case llvm::Triple::mips:
case llvm::Triple::mipsel:
case llvm::Triple::mips64:
@@ -931,7 +943,7 @@ void tools::gnutools::Assembler::ConstructJob(Compilation &C,
for (const Arg *A : Args.filtered(options::OPT_ffile_prefix_map_EQ,
options::OPT_fdebug_prefix_map_EQ)) {
StringRef Map = A->getValue();
- if (Map.find('=') == StringRef::npos)
+ if (!Map.contains('='))
D.Diag(diag::err_drv_invalid_argument_to_option)
<< Map << A->getOption().getName();
else {
@@ -950,6 +962,17 @@ void tools::gnutools::Assembler::ConstructJob(Compilation &C,
for (const auto &II : Inputs)
CmdArgs.push_back(II.getFilename());
+ if (Arg *A = Args.getLastArg(options::OPT_g_Flag, options::OPT_gN_Group,
+ options::OPT_gdwarf_2, options::OPT_gdwarf_3,
+ options::OPT_gdwarf_4, options::OPT_gdwarf_5,
+ options::OPT_gdwarf))
+ if (!A->getOption().matches(options::OPT_g0)) {
+ Args.AddLastArg(CmdArgs, options::OPT_g_Flag);
+
+ unsigned DwarfVersion = getDwarfVersion(getToolChain(), Args);
+ CmdArgs.push_back(Args.MakeArgString("-gdwarf-" + Twine(DwarfVersion)));
+ }
+
const char *Exec =
Args.MakeArgString(getToolChain().GetProgramPath(DefaultAssembler));
C.addCommand(std::make_unique<Command>(JA, *this,
@@ -1013,46 +1036,47 @@ static bool isMSP430(llvm::Triple::ArchType Arch) {
return Arch == llvm::Triple::msp430;
}
-static Multilib makeMultilib(StringRef commonSuffix) {
- return Multilib(commonSuffix, commonSuffix, commonSuffix);
-}
-
static bool findMipsCsMultilibs(const Multilib::flags_list &Flags,
FilterNonExistent &NonExistent,
DetectedMultilibs &Result) {
// Check for Code Sourcery toolchain multilibs
MultilibSet CSMipsMultilibs;
{
- auto MArchMips16 = makeMultilib("/mips16").flag("+m32").flag("+mips16");
+ auto MArchMips16 = MultilibBuilder("/mips16").flag("-m32").flag("-mips16");
auto MArchMicroMips =
- makeMultilib("/micromips").flag("+m32").flag("+mmicromips");
+ MultilibBuilder("/micromips").flag("-m32").flag("-mmicromips");
- auto MArchDefault = makeMultilib("").flag("-mips16").flag("-mmicromips");
+ auto MArchDefault = MultilibBuilder("")
+ .flag("-mips16", /*Disallow=*/true)
+ .flag("-mmicromips", /*Disallow=*/true);
- auto UCLibc = makeMultilib("/uclibc").flag("+muclibc");
+ auto UCLibc = MultilibBuilder("/uclibc").flag("-muclibc");
- auto SoftFloat = makeMultilib("/soft-float").flag("+msoft-float");
+ auto SoftFloat = MultilibBuilder("/soft-float").flag("-msoft-float");
- auto Nan2008 = makeMultilib("/nan2008").flag("+mnan=2008");
+ auto Nan2008 = MultilibBuilder("/nan2008").flag("-mnan=2008");
- auto DefaultFloat =
- makeMultilib("").flag("-msoft-float").flag("-mnan=2008");
+ auto DefaultFloat = MultilibBuilder("")
+ .flag("-msoft-float", /*Disallow=*/true)
+ .flag("-mnan=2008", /*Disallow=*/true);
- auto BigEndian = makeMultilib("").flag("+EB").flag("-EL");
+ auto BigEndian =
+ MultilibBuilder("").flag("-EB").flag("-EL", /*Disallow=*/true);
- auto LittleEndian = makeMultilib("/el").flag("+EL").flag("-EB");
+ auto LittleEndian =
+ MultilibBuilder("/el").flag("-EL").flag("-EB", /*Disallow=*/true);
// Note that this one's osSuffix is ""
- auto MAbi64 = makeMultilib("")
+ auto MAbi64 = MultilibBuilder("")
.gccSuffix("/64")
.includeSuffix("/64")
- .flag("+mabi=n64")
- .flag("-mabi=n32")
- .flag("-m32");
+ .flag("-mabi=n64")
+ .flag("-mabi=n32", /*Disallow=*/true)
+ .flag("-m32", /*Disallow=*/true);
CSMipsMultilibs =
- MultilibSet()
+ MultilibSetBuilder()
.Either(MArchMips16, MArchMicroMips, MArchDefault)
.Maybe(UCLibc)
.Either(SoftFloat, Nan2008, DefaultFloat)
@@ -1062,10 +1086,11 @@ static bool findMipsCsMultilibs(const Multilib::flags_list &Flags,
.Maybe(MAbi64)
.FilterOut("/mips16.*/64")
.FilterOut("/micromips.*/64")
+ .makeMultilibSet()
.FilterOut(NonExistent)
.setIncludeDirsCallback([](const Multilib &M) {
std::vector<std::string> Dirs({"/include"});
- if (StringRef(M.includeSuffix()).startswith("/uclibc"))
+ if (StringRef(M.includeSuffix()).starts_with("/uclibc"))
Dirs.push_back(
"/../../../../mips-linux-gnu/libc/uclibc/usr/include");
else
@@ -1076,20 +1101,27 @@ static bool findMipsCsMultilibs(const Multilib::flags_list &Flags,
MultilibSet DebianMipsMultilibs;
{
- Multilib MAbiN32 =
- Multilib().gccSuffix("/n32").includeSuffix("/n32").flag("+mabi=n32");
+ MultilibBuilder MAbiN32 =
+ MultilibBuilder().gccSuffix("/n32").includeSuffix("/n32").flag(
+ "-mabi=n32");
- Multilib M64 = Multilib()
- .gccSuffix("/64")
- .includeSuffix("/64")
- .flag("+m64")
- .flag("-m32")
- .flag("-mabi=n32");
+ MultilibBuilder M64 = MultilibBuilder()
+ .gccSuffix("/64")
+ .includeSuffix("/64")
+ .flag("-m64")
+ .flag("-m32", /*Disallow=*/true)
+ .flag("-mabi=n32", /*Disallow=*/true);
- Multilib M32 = Multilib().flag("-m64").flag("+m32").flag("-mabi=n32");
+ MultilibBuilder M32 = MultilibBuilder()
+ .gccSuffix("/32")
+ .flag("-m64", /*Disallow=*/true)
+ .flag("-m32")
+ .flag("-mabi=n32", /*Disallow=*/true);
- DebianMipsMultilibs =
- MultilibSet().Either(M32, M64, MAbiN32).FilterOut(NonExistent);
+ DebianMipsMultilibs = MultilibSetBuilder()
+ .Either(M32, M64, MAbiN32)
+ .makeMultilibSet()
+ .FilterOut(NonExistent);
}
// Sort candidates. Toolchain that best meets the directories tree goes first.
@@ -1098,7 +1130,7 @@ static bool findMipsCsMultilibs(const Multilib::flags_list &Flags,
if (CSMipsMultilibs.size() < DebianMipsMultilibs.size())
std::iter_swap(Candidates, Candidates + 1);
for (const MultilibSet *Candidate : Candidates) {
- if (Candidate->select(Flags, Result.SelectedMultilib)) {
+ if (Candidate->select(Flags, Result.SelectedMultilibs)) {
if (Candidate == &DebianMipsMultilibs)
Result.BiarchSibling = Multilib();
Result.Multilibs = *Candidate;
@@ -1114,25 +1146,32 @@ static bool findMipsAndroidMultilibs(llvm::vfs::FileSystem &VFS, StringRef Path,
DetectedMultilibs &Result) {
MultilibSet AndroidMipsMultilibs =
- MultilibSet()
- .Maybe(Multilib("/mips-r2").flag("+march=mips32r2"))
- .Maybe(Multilib("/mips-r6").flag("+march=mips32r6"))
+ MultilibSetBuilder()
+ .Maybe(MultilibBuilder("/mips-r2", {}, {}).flag("-march=mips32r2"))
+ .Maybe(MultilibBuilder("/mips-r6", {}, {}).flag("-march=mips32r6"))
+ .makeMultilibSet()
.FilterOut(NonExistent);
MultilibSet AndroidMipselMultilibs =
- MultilibSet()
- .Either(Multilib().flag("+march=mips32"),
- Multilib("/mips-r2", "", "/mips-r2").flag("+march=mips32r2"),
- Multilib("/mips-r6", "", "/mips-r6").flag("+march=mips32r6"))
+ MultilibSetBuilder()
+ .Either(MultilibBuilder().flag("-march=mips32"),
+ MultilibBuilder("/mips-r2", "", "/mips-r2")
+ .flag("-march=mips32r2"),
+ MultilibBuilder("/mips-r6", "", "/mips-r6")
+ .flag("-march=mips32r6"))
+ .makeMultilibSet()
.FilterOut(NonExistent);
MultilibSet AndroidMips64elMultilibs =
- MultilibSet()
- .Either(
- Multilib().flag("+march=mips64r6"),
- Multilib("/32/mips-r1", "", "/mips-r1").flag("+march=mips32"),
- Multilib("/32/mips-r2", "", "/mips-r2").flag("+march=mips32r2"),
- Multilib("/32/mips-r6", "", "/mips-r6").flag("+march=mips32r6"))
+ MultilibSetBuilder()
+ .Either(MultilibBuilder().flag("-march=mips64r6"),
+ MultilibBuilder("/32/mips-r1", "", "/mips-r1")
+ .flag("-march=mips32"),
+ MultilibBuilder("/32/mips-r2", "", "/mips-r2")
+ .flag("-march=mips32r2"),
+ MultilibBuilder("/32/mips-r6", "", "/mips-r6")
+ .flag("-march=mips32r6"))
+ .makeMultilibSet()
.FilterOut(NonExistent);
MultilibSet *MS = &AndroidMipsMultilibs;
@@ -1140,7 +1179,7 @@ static bool findMipsAndroidMultilibs(llvm::vfs::FileSystem &VFS, StringRef Path,
MS = &AndroidMipselMultilibs;
else if (VFS.exists(Path + "/32"))
MS = &AndroidMips64elMultilibs;
- if (MS->select(Flags, Result.SelectedMultilib)) {
+ if (MS->select(Flags, Result.SelectedMultilibs)) {
Result.Multilibs = *MS;
return true;
}
@@ -1153,18 +1192,20 @@ static bool findMipsMuslMultilibs(const Multilib::flags_list &Flags,
// Musl toolchain multilibs
MultilibSet MuslMipsMultilibs;
{
- auto MArchMipsR2 = makeMultilib("")
+ auto MArchMipsR2 = MultilibBuilder("")
.osSuffix("/mips-r2-hard-musl")
- .flag("+EB")
- .flag("-EL")
- .flag("+march=mips32r2");
+ .flag("-EB")
+ .flag("-EL", /*Disallow=*/true)
+ .flag("-march=mips32r2");
- auto MArchMipselR2 = makeMultilib("/mipsel-r2-hard-musl")
- .flag("-EB")
- .flag("+EL")
- .flag("+march=mips32r2");
+ auto MArchMipselR2 = MultilibBuilder("/mipsel-r2-hard-musl")
+ .flag("-EB", /*Disallow=*/true)
+ .flag("-EL")
+ .flag("-march=mips32r2");
- MuslMipsMultilibs = MultilibSet().Either(MArchMipsR2, MArchMipselR2);
+ MuslMipsMultilibs = MultilibSetBuilder()
+ .Either(MArchMipsR2, MArchMipselR2)
+ .makeMultilibSet();
// Specify the callback that computes the include directories.
MuslMipsMultilibs.setIncludeDirsCallback([](const Multilib &M) {
@@ -1172,7 +1213,7 @@ static bool findMipsMuslMultilibs(const Multilib::flags_list &Flags,
{"/../sysroot" + M.osSuffix() + "/usr/include"});
});
}
- if (MuslMipsMultilibs.select(Flags, Result.SelectedMultilib)) {
+ if (MuslMipsMultilibs.select(Flags, Result.SelectedMultilibs)) {
Result.Multilibs = MuslMipsMultilibs;
return true;
}
@@ -1185,48 +1226,54 @@ static bool findMipsMtiMultilibs(const Multilib::flags_list &Flags,
// CodeScape MTI toolchain v1.2 and early.
MultilibSet MtiMipsMultilibsV1;
{
- auto MArchMips32 = makeMultilib("/mips32")
- .flag("+m32")
+ auto MArchMips32 = MultilibBuilder("/mips32")
+ .flag("-m32")
+ .flag("-m64", /*Disallow=*/true)
+ .flag("-mmicromips", /*Disallow=*/true)
+ .flag("-march=mips32");
+
+ auto MArchMicroMips = MultilibBuilder("/micromips")
+ .flag("-m32")
+ .flag("-m64", /*Disallow=*/true)
+ .flag("-mmicromips");
+
+ auto MArchMips64r2 = MultilibBuilder("/mips64r2")
+ .flag("-m32", /*Disallow=*/true)
+ .flag("-m64")
+ .flag("-march=mips64r2");
+
+ auto MArchMips64 = MultilibBuilder("/mips64")
+ .flag("-m32", /*Disallow=*/true)
.flag("-m64")
- .flag("-mmicromips")
- .flag("+march=mips32");
-
- auto MArchMicroMips = makeMultilib("/micromips")
- .flag("+m32")
- .flag("-m64")
- .flag("+mmicromips");
+ .flag("-march=mips64r2", /*Disallow=*/true);
- auto MArchMips64r2 = makeMultilib("/mips64r2")
- .flag("-m32")
- .flag("+m64")
- .flag("+march=mips64r2");
+ auto MArchDefault = MultilibBuilder("")
+ .flag("-m32")
+ .flag("-m64", /*Disallow=*/true)
+ .flag("-mmicromips", /*Disallow=*/true)
+ .flag("-march=mips32r2");
- auto MArchMips64 = makeMultilib("/mips64").flag("-m32").flag("+m64").flag(
- "-march=mips64r2");
+ auto Mips16 = MultilibBuilder("/mips16").flag("-mips16");
- auto MArchDefault = makeMultilib("")
- .flag("+m32")
- .flag("-m64")
- .flag("-mmicromips")
- .flag("+march=mips32r2");
+ auto UCLibc = MultilibBuilder("/uclibc").flag("-muclibc");
- auto Mips16 = makeMultilib("/mips16").flag("+mips16");
+ auto MAbi64 = MultilibBuilder("/64")
+ .flag("-mabi=n64")
+ .flag("-mabi=n32", /*Disallow=*/true)
+ .flag("-m32", /*Disallow=*/true);
- auto UCLibc = makeMultilib("/uclibc").flag("+muclibc");
+ auto BigEndian =
+ MultilibBuilder("").flag("-EB").flag("-EL", /*Disallow=*/true);
- auto MAbi64 =
- makeMultilib("/64").flag("+mabi=n64").flag("-mabi=n32").flag("-m32");
+ auto LittleEndian =
+ MultilibBuilder("/el").flag("-EL").flag("-EB", /*Disallow=*/true);
- auto BigEndian = makeMultilib("").flag("+EB").flag("-EL");
+ auto SoftFloat = MultilibBuilder("/sof").flag("-msoft-float");
- auto LittleEndian = makeMultilib("/el").flag("+EL").flag("-EB");
-
- auto SoftFloat = makeMultilib("/sof").flag("+msoft-float");
-
- auto Nan2008 = makeMultilib("/nan2008").flag("+mnan=2008");
+ auto Nan2008 = MultilibBuilder("/nan2008").flag("-mnan=2008");
MtiMipsMultilibsV1 =
- MultilibSet()
+ MultilibSetBuilder()
.Either(MArchMips32, MArchMicroMips, MArchMips64r2, MArchMips64,
MArchDefault)
.Maybe(UCLibc)
@@ -1243,10 +1290,11 @@ static bool findMipsMtiMultilibs(const Multilib::flags_list &Flags,
.Maybe(SoftFloat)
.Maybe(Nan2008)
.FilterOut(".*sof/nan2008")
+ .makeMultilibSet()
.FilterOut(NonExistent)
.setIncludeDirsCallback([](const Multilib &M) {
std::vector<std::string> Dirs({"/include"});
- if (StringRef(M.includeSuffix()).startswith("/uclibc"))
+ if (StringRef(M.includeSuffix()).starts_with("/uclibc"))
Dirs.push_back("/../../../../sysroot/uclibc/usr/include");
else
Dirs.push_back("/../../../../sysroot/usr/include");
@@ -1257,80 +1305,87 @@ static bool findMipsMtiMultilibs(const Multilib::flags_list &Flags,
// CodeScape IMG toolchain starting from v1.3.
MultilibSet MtiMipsMultilibsV2;
{
- auto BeHard = makeMultilib("/mips-r2-hard")
- .flag("+EB")
+ auto BeHard = MultilibBuilder("/mips-r2-hard")
+ .flag("-EB")
+ .flag("-msoft-float", /*Disallow=*/true)
+ .flag("-mnan=2008", /*Disallow=*/true)
+ .flag("-muclibc", /*Disallow=*/true);
+ auto BeSoft = MultilibBuilder("/mips-r2-soft")
+ .flag("-EB")
.flag("-msoft-float")
- .flag("-mnan=2008")
- .flag("-muclibc");
- auto BeSoft = makeMultilib("/mips-r2-soft")
- .flag("+EB")
- .flag("+msoft-float")
- .flag("-mnan=2008");
- auto ElHard = makeMultilib("/mipsel-r2-hard")
- .flag("+EL")
+ .flag("-mnan=2008", /*Disallow=*/true);
+ auto ElHard = MultilibBuilder("/mipsel-r2-hard")
+ .flag("-EL")
+ .flag("-msoft-float", /*Disallow=*/true)
+ .flag("-mnan=2008", /*Disallow=*/true)
+ .flag("-muclibc", /*Disallow=*/true);
+ auto ElSoft = MultilibBuilder("/mipsel-r2-soft")
+ .flag("-EL")
.flag("-msoft-float")
- .flag("-mnan=2008")
- .flag("-muclibc");
- auto ElSoft = makeMultilib("/mipsel-r2-soft")
- .flag("+EL")
- .flag("+msoft-float")
- .flag("-mnan=2008")
- .flag("-mmicromips");
- auto BeHardNan = makeMultilib("/mips-r2-hard-nan2008")
- .flag("+EB")
- .flag("-msoft-float")
- .flag("+mnan=2008")
- .flag("-muclibc");
- auto ElHardNan = makeMultilib("/mipsel-r2-hard-nan2008")
- .flag("+EL")
- .flag("-msoft-float")
- .flag("+mnan=2008")
- .flag("-muclibc")
- .flag("-mmicromips");
- auto BeHardNanUclibc = makeMultilib("/mips-r2-hard-nan2008-uclibc")
- .flag("+EB")
- .flag("-msoft-float")
- .flag("+mnan=2008")
- .flag("+muclibc");
- auto ElHardNanUclibc = makeMultilib("/mipsel-r2-hard-nan2008-uclibc")
- .flag("+EL")
- .flag("-msoft-float")
- .flag("+mnan=2008")
- .flag("+muclibc");
- auto BeHardUclibc = makeMultilib("/mips-r2-hard-uclibc")
- .flag("+EB")
- .flag("-msoft-float")
- .flag("-mnan=2008")
- .flag("+muclibc");
- auto ElHardUclibc = makeMultilib("/mipsel-r2-hard-uclibc")
- .flag("+EL")
- .flag("-msoft-float")
- .flag("-mnan=2008")
- .flag("+muclibc");
- auto ElMicroHardNan = makeMultilib("/micromipsel-r2-hard-nan2008")
- .flag("+EL")
- .flag("-msoft-float")
- .flag("+mnan=2008")
- .flag("+mmicromips");
- auto ElMicroSoft = makeMultilib("/micromipsel-r2-soft")
- .flag("+EL")
- .flag("+msoft-float")
- .flag("-mnan=2008")
- .flag("+mmicromips");
-
- auto O32 =
- makeMultilib("/lib").osSuffix("").flag("-mabi=n32").flag("-mabi=n64");
- auto N32 =
- makeMultilib("/lib32").osSuffix("").flag("+mabi=n32").flag("-mabi=n64");
- auto N64 =
- makeMultilib("/lib64").osSuffix("").flag("-mabi=n32").flag("+mabi=n64");
+ .flag("-mnan=2008", /*Disallow=*/true)
+ .flag("-mmicromips", /*Disallow=*/true);
+ auto BeHardNan = MultilibBuilder("/mips-r2-hard-nan2008")
+ .flag("-EB")
+ .flag("-msoft-float", /*Disallow=*/true)
+ .flag("-mnan=2008")
+ .flag("-muclibc", /*Disallow=*/true);
+ auto ElHardNan = MultilibBuilder("/mipsel-r2-hard-nan2008")
+ .flag("-EL")
+ .flag("-msoft-float", /*Disallow=*/true)
+ .flag("-mnan=2008")
+ .flag("-muclibc", /*Disallow=*/true)
+ .flag("-mmicromips", /*Disallow=*/true);
+ auto BeHardNanUclibc = MultilibBuilder("/mips-r2-hard-nan2008-uclibc")
+ .flag("-EB")
+ .flag("-msoft-float", /*Disallow=*/true)
+ .flag("-mnan=2008")
+ .flag("-muclibc");
+ auto ElHardNanUclibc = MultilibBuilder("/mipsel-r2-hard-nan2008-uclibc")
+ .flag("-EL")
+ .flag("-msoft-float", /*Disallow=*/true)
+ .flag("-mnan=2008")
+ .flag("-muclibc");
+ auto BeHardUclibc = MultilibBuilder("/mips-r2-hard-uclibc")
+ .flag("-EB")
+ .flag("-msoft-float", /*Disallow=*/true)
+ .flag("-mnan=2008", /*Disallow=*/true)
+ .flag("-muclibc");
+ auto ElHardUclibc = MultilibBuilder("/mipsel-r2-hard-uclibc")
+ .flag("-EL")
+ .flag("-msoft-float", /*Disallow=*/true)
+ .flag("-mnan=2008", /*Disallow=*/true)
+ .flag("-muclibc");
+ auto ElMicroHardNan = MultilibBuilder("/micromipsel-r2-hard-nan2008")
+ .flag("-EL")
+ .flag("-msoft-float", /*Disallow=*/true)
+ .flag("-mnan=2008")
+ .flag("-mmicromips");
+ auto ElMicroSoft = MultilibBuilder("/micromipsel-r2-soft")
+ .flag("-EL")
+ .flag("-msoft-float")
+ .flag("-mnan=2008", /*Disallow=*/true)
+ .flag("-mmicromips");
+
+ auto O32 = MultilibBuilder("/lib")
+ .osSuffix("")
+ .flag("-mabi=n32", /*Disallow=*/true)
+ .flag("-mabi=n64", /*Disallow=*/true);
+ auto N32 = MultilibBuilder("/lib32")
+ .osSuffix("")
+ .flag("-mabi=n32")
+ .flag("-mabi=n64", /*Disallow=*/true);
+ auto N64 = MultilibBuilder("/lib64")
+ .osSuffix("")
+ .flag("-mabi=n32", /*Disallow=*/true)
+ .flag("-mabi=n64");
MtiMipsMultilibsV2 =
- MultilibSet()
+ MultilibSetBuilder()
.Either({BeHard, BeSoft, ElHard, ElSoft, BeHardNan, ElHardNan,
BeHardNanUclibc, ElHardNanUclibc, BeHardUclibc,
ElHardUclibc, ElMicroHardNan, ElMicroSoft})
.Either(O32, N32, N64)
+ .makeMultilibSet()
.FilterOut(NonExistent)
.setIncludeDirsCallback([](const Multilib &M) {
return std::vector<std::string>({"/../../../../sysroot" +
@@ -1342,8 +1397,8 @@ static bool findMipsMtiMultilibs(const Multilib::flags_list &Flags,
{"/../../../../mips-mti-linux-gnu/lib" + M.gccSuffix()});
});
}
- for (auto Candidate : {&MtiMipsMultilibsV1, &MtiMipsMultilibsV2}) {
- if (Candidate->select(Flags, Result.SelectedMultilib)) {
+ for (auto *Candidate : {&MtiMipsMultilibsV1, &MtiMipsMultilibsV2}) {
+ if (Candidate->select(Flags, Result.SelectedMultilibs)) {
Result.Multilibs = *Candidate;
return true;
}
@@ -1357,18 +1412,24 @@ static bool findMipsImgMultilibs(const Multilib::flags_list &Flags,
// CodeScape IMG toolchain v1.2 and early.
MultilibSet ImgMultilibsV1;
{
- auto Mips64r6 = makeMultilib("/mips64r6").flag("+m64").flag("-m32");
+ auto Mips64r6 = MultilibBuilder("/mips64r6")
+ .flag("-m64")
+ .flag("-m32", /*Disallow=*/true);
- auto LittleEndian = makeMultilib("/el").flag("+EL").flag("-EB");
+ auto LittleEndian =
+ MultilibBuilder("/el").flag("-EL").flag("-EB", /*Disallow=*/true);
- auto MAbi64 =
- makeMultilib("/64").flag("+mabi=n64").flag("-mabi=n32").flag("-m32");
+ auto MAbi64 = MultilibBuilder("/64")
+ .flag("-mabi=n64")
+ .flag("-mabi=n32", /*Disallow=*/true)
+ .flag("-m32", /*Disallow=*/true);
ImgMultilibsV1 =
- MultilibSet()
+ MultilibSetBuilder()
.Maybe(Mips64r6)
.Maybe(MAbi64)
.Maybe(LittleEndian)
+ .makeMultilibSet()
.FilterOut(NonExistent)
.setIncludeDirsCallback([](const Multilib &M) {
return std::vector<std::string>(
@@ -1379,51 +1440,58 @@ static bool findMipsImgMultilibs(const Multilib::flags_list &Flags,
// CodeScape IMG toolchain starting from v1.3.
MultilibSet ImgMultilibsV2;
{
- auto BeHard = makeMultilib("/mips-r6-hard")
- .flag("+EB")
+ auto BeHard = MultilibBuilder("/mips-r6-hard")
+ .flag("-EB")
+ .flag("-msoft-float", /*Disallow=*/true)
+ .flag("-mmicromips", /*Disallow=*/true);
+ auto BeSoft = MultilibBuilder("/mips-r6-soft")
+ .flag("-EB")
.flag("-msoft-float")
- .flag("-mmicromips");
- auto BeSoft = makeMultilib("/mips-r6-soft")
- .flag("+EB")
- .flag("+msoft-float")
- .flag("-mmicromips");
- auto ElHard = makeMultilib("/mipsel-r6-hard")
- .flag("+EL")
+ .flag("-mmicromips", /*Disallow=*/true);
+ auto ElHard = MultilibBuilder("/mipsel-r6-hard")
+ .flag("-EL")
+ .flag("-msoft-float", /*Disallow=*/true)
+ .flag("-mmicromips", /*Disallow=*/true);
+ auto ElSoft = MultilibBuilder("/mipsel-r6-soft")
+ .flag("-EL")
.flag("-msoft-float")
- .flag("-mmicromips");
- auto ElSoft = makeMultilib("/mipsel-r6-soft")
- .flag("+EL")
- .flag("+msoft-float")
- .flag("-mmicromips");
- auto BeMicroHard = makeMultilib("/micromips-r6-hard")
- .flag("+EB")
+ .flag("-mmicromips", /*Disallow=*/true);
+ auto BeMicroHard = MultilibBuilder("/micromips-r6-hard")
+ .flag("-EB")
+ .flag("-msoft-float", /*Disallow=*/true)
+ .flag("-mmicromips");
+ auto BeMicroSoft = MultilibBuilder("/micromips-r6-soft")
+ .flag("-EB")
.flag("-msoft-float")
- .flag("+mmicromips");
- auto BeMicroSoft = makeMultilib("/micromips-r6-soft")
- .flag("+EB")
- .flag("+msoft-float")
- .flag("+mmicromips");
- auto ElMicroHard = makeMultilib("/micromipsel-r6-hard")
- .flag("+EL")
+ .flag("-mmicromips");
+ auto ElMicroHard = MultilibBuilder("/micromipsel-r6-hard")
+ .flag("-EL")
+ .flag("-msoft-float", /*Disallow=*/true)
+ .flag("-mmicromips");
+ auto ElMicroSoft = MultilibBuilder("/micromipsel-r6-soft")
+ .flag("-EL")
.flag("-msoft-float")
- .flag("+mmicromips");
- auto ElMicroSoft = makeMultilib("/micromipsel-r6-soft")
- .flag("+EL")
- .flag("+msoft-float")
- .flag("+mmicromips");
-
- auto O32 =
- makeMultilib("/lib").osSuffix("").flag("-mabi=n32").flag("-mabi=n64");
- auto N32 =
- makeMultilib("/lib32").osSuffix("").flag("+mabi=n32").flag("-mabi=n64");
- auto N64 =
- makeMultilib("/lib64").osSuffix("").flag("-mabi=n32").flag("+mabi=n64");
+ .flag("-mmicromips");
+
+ auto O32 = MultilibBuilder("/lib")
+ .osSuffix("")
+ .flag("-mabi=n32", /*Disallow=*/true)
+ .flag("-mabi=n64", /*Disallow=*/true);
+ auto N32 = MultilibBuilder("/lib32")
+ .osSuffix("")
+ .flag("-mabi=n32")
+ .flag("-mabi=n64", /*Disallow=*/true);
+ auto N64 = MultilibBuilder("/lib64")
+ .osSuffix("")
+ .flag("-mabi=n32", /*Disallow=*/true)
+ .flag("-mabi=n64");
ImgMultilibsV2 =
- MultilibSet()
+ MultilibSetBuilder()
.Either({BeHard, BeSoft, ElHard, ElSoft, BeMicroHard, BeMicroSoft,
ElMicroHard, ElMicroSoft})
.Either(O32, N32, N64)
+ .makeMultilibSet()
.FilterOut(NonExistent)
.setIncludeDirsCallback([](const Multilib &M) {
return std::vector<std::string>({"/../../../../sysroot" +
@@ -1435,8 +1503,8 @@ static bool findMipsImgMultilibs(const Multilib::flags_list &Flags,
{"/../../../../mips-img-linux-gnu/lib" + M.gccSuffix()});
});
}
- for (auto Candidate : {&ImgMultilibsV1, &ImgMultilibsV2}) {
- if (Candidate->select(Flags, Result.SelectedMultilib)) {
+ for (auto *Candidate : {&ImgMultilibsV1, &ImgMultilibsV2}) {
+ if (Candidate->select(Flags, Result.SelectedMultilibs)) {
Result.Multilibs = *Candidate;
return true;
}
@@ -1457,30 +1525,30 @@ bool clang::driver::findMIPSMultilibs(const Driver &D,
llvm::Triple::ArchType TargetArch = TargetTriple.getArch();
Multilib::flags_list Flags;
- addMultilibFlag(TargetTriple.isMIPS32(), "m32", Flags);
- addMultilibFlag(TargetTriple.isMIPS64(), "m64", Flags);
- addMultilibFlag(isMips16(Args), "mips16", Flags);
- addMultilibFlag(CPUName == "mips32", "march=mips32", Flags);
+ addMultilibFlag(TargetTriple.isMIPS32(), "-m32", Flags);
+ addMultilibFlag(TargetTriple.isMIPS64(), "-m64", Flags);
+ addMultilibFlag(isMips16(Args), "-mips16", Flags);
+ addMultilibFlag(CPUName == "mips32", "-march=mips32", Flags);
addMultilibFlag(CPUName == "mips32r2" || CPUName == "mips32r3" ||
CPUName == "mips32r5" || CPUName == "p5600",
- "march=mips32r2", Flags);
- addMultilibFlag(CPUName == "mips32r6", "march=mips32r6", Flags);
- addMultilibFlag(CPUName == "mips64", "march=mips64", Flags);
+ "-march=mips32r2", Flags);
+ addMultilibFlag(CPUName == "mips32r6", "-march=mips32r6", Flags);
+ addMultilibFlag(CPUName == "mips64", "-march=mips64", Flags);
addMultilibFlag(CPUName == "mips64r2" || CPUName == "mips64r3" ||
CPUName == "mips64r5" || CPUName == "octeon" ||
CPUName == "octeon+",
- "march=mips64r2", Flags);
- addMultilibFlag(CPUName == "mips64r6", "march=mips64r6", Flags);
- addMultilibFlag(isMicroMips(Args), "mmicromips", Flags);
- addMultilibFlag(tools::mips::isUCLibc(Args), "muclibc", Flags);
- addMultilibFlag(tools::mips::isNaN2008(Args, TargetTriple), "mnan=2008",
+ "-march=mips64r2", Flags);
+ addMultilibFlag(CPUName == "mips64r6", "-march=mips64r6", Flags);
+ addMultilibFlag(isMicroMips(Args), "-mmicromips", Flags);
+ addMultilibFlag(tools::mips::isUCLibc(Args), "-muclibc", Flags);
+ addMultilibFlag(tools::mips::isNaN2008(D, Args, TargetTriple), "-mnan=2008",
Flags);
- addMultilibFlag(ABIName == "n32", "mabi=n32", Flags);
- addMultilibFlag(ABIName == "n64", "mabi=n64", Flags);
- addMultilibFlag(isSoftFloatABI(Args), "msoft-float", Flags);
- addMultilibFlag(!isSoftFloatABI(Args), "mhard-float", Flags);
- addMultilibFlag(isMipsEL(TargetArch), "EL", Flags);
- addMultilibFlag(!isMipsEL(TargetArch), "EB", Flags);
+ addMultilibFlag(ABIName == "n32", "-mabi=n32", Flags);
+ addMultilibFlag(ABIName == "n64", "-mabi=n64", Flags);
+ addMultilibFlag(isSoftFloatABI(Args), "-msoft-float", Flags);
+ addMultilibFlag(!isSoftFloatABI(Args), "-mhard-float", Flags);
+ addMultilibFlag(isMipsEL(TargetArch), "-EL", Flags);
+ addMultilibFlag(!isMipsEL(TargetArch), "-EB", Flags);
if (TargetTriple.isAndroid())
return findMipsAndroidMultilibs(D.getVFS(), Path, Flags, NonExistent,
@@ -1509,7 +1577,7 @@ bool clang::driver::findMIPSMultilibs(const Driver &D,
Result.Multilibs.push_back(Default);
Result.Multilibs.FilterOut(NonExistent);
- if (Result.Multilibs.select(Flags, Result.SelectedMultilib)) {
+ if (Result.Multilibs.select(Flags, Result.SelectedMultilibs)) {
Result.BiarchSibling = Multilib();
return true;
}
@@ -1523,22 +1591,23 @@ static void findAndroidArmMultilibs(const Driver &D,
DetectedMultilibs &Result) {
// Find multilibs with subdirectories like armv7-a, thumb, armv7-a/thumb.
FilterNonExistent NonExistent(Path, "/crtbegin.o", D.getVFS());
- Multilib ArmV7Multilib = makeMultilib("/armv7-a")
- .flag("+march=armv7-a")
- .flag("-mthumb");
- Multilib ThumbMultilib = makeMultilib("/thumb")
- .flag("-march=armv7-a")
- .flag("+mthumb");
- Multilib ArmV7ThumbMultilib = makeMultilib("/armv7-a/thumb")
- .flag("+march=armv7-a")
- .flag("+mthumb");
- Multilib DefaultMultilib = makeMultilib("")
- .flag("-march=armv7-a")
- .flag("-mthumb");
+ MultilibBuilder ArmV7Multilib = MultilibBuilder("/armv7-a")
+ .flag("-march=armv7-a")
+ .flag("-mthumb", /*Disallow=*/true);
+ MultilibBuilder ThumbMultilib = MultilibBuilder("/thumb")
+ .flag("-march=armv7-a", /*Disallow=*/true)
+ .flag("-mthumb");
+ MultilibBuilder ArmV7ThumbMultilib =
+ MultilibBuilder("/armv7-a/thumb").flag("-march=armv7-a").flag("-mthumb");
+ MultilibBuilder DefaultMultilib =
+ MultilibBuilder("")
+ .flag("-march=armv7-a", /*Disallow=*/true)
+ .flag("-mthumb", /*Disallow=*/true);
MultilibSet AndroidArmMultilibs =
- MultilibSet()
- .Either(ThumbMultilib, ArmV7Multilib,
- ArmV7ThumbMultilib, DefaultMultilib)
+ MultilibSetBuilder()
+ .Either(ThumbMultilib, ArmV7Multilib, ArmV7ThumbMultilib,
+ DefaultMultilib)
+ .makeMultilibSet()
.FilterOut(NonExistent);
Multilib::flags_list Flags;
@@ -1552,10 +1621,10 @@ static void findAndroidArmMultilibs(const Driver &D,
bool IsArmV7Mode = (IsArmArch || IsThumbArch) &&
(llvm::ARM::parseArchVersion(Arch) == 7 ||
(IsArmArch && Arch == "" && IsV7SubArch));
- addMultilibFlag(IsArmV7Mode, "march=armv7-a", Flags);
- addMultilibFlag(IsThumbMode, "mthumb", Flags);
+ addMultilibFlag(IsArmV7Mode, "-march=armv7-a", Flags);
+ addMultilibFlag(IsThumbMode, "-mthumb", Flags);
- if (AndroidArmMultilibs.select(Flags, Result.SelectedMultilib))
+ if (AndroidArmMultilibs.select(Flags, Result.SelectedMultilibs))
Result.Multilibs = AndroidArmMultilibs;
}
@@ -1564,24 +1633,210 @@ static bool findMSP430Multilibs(const Driver &D,
StringRef Path, const ArgList &Args,
DetectedMultilibs &Result) {
FilterNonExistent NonExistent(Path, "/crtbegin.o", D.getVFS());
- Multilib WithoutExceptions = makeMultilib("/430").flag("-exceptions");
- Multilib WithExceptions = makeMultilib("/430/exceptions").flag("+exceptions");
+ MultilibBuilder WithoutExceptions =
+ MultilibBuilder("/430").flag("-exceptions", /*Disallow=*/true);
+ MultilibBuilder WithExceptions =
+ MultilibBuilder("/430/exceptions").flag("-exceptions");
// FIXME: when clang starts to support msp430x ISA additional logic
// to select between multilib must be implemented
- // Multilib MSP430xMultilib = makeMultilib("/large");
+ // MultilibBuilder MSP430xMultilib = MultilibBuilder("/large");
- Result.Multilibs.push_back(WithoutExceptions);
- Result.Multilibs.push_back(WithExceptions);
+ Result.Multilibs.push_back(WithoutExceptions.makeMultilib());
+ Result.Multilibs.push_back(WithExceptions.makeMultilib());
Result.Multilibs.FilterOut(NonExistent);
Multilib::flags_list Flags;
addMultilibFlag(Args.hasFlag(options::OPT_fexceptions,
options::OPT_fno_exceptions, false),
- "exceptions", Flags);
- if (Result.Multilibs.select(Flags, Result.SelectedMultilib))
+ "-exceptions", Flags);
+ if (Result.Multilibs.select(Flags, Result.SelectedMultilibs))
+ return true;
+
+ return false;
+}
+
+static void findCSKYMultilibs(const Driver &D, const llvm::Triple &TargetTriple,
+ StringRef Path, const ArgList &Args,
+ DetectedMultilibs &Result) {
+ FilterNonExistent NonExistent(Path, "/crtbegin.o", D.getVFS());
+
+ tools::csky::FloatABI TheFloatABI = tools::csky::getCSKYFloatABI(D, Args);
+ std::optional<llvm::StringRef> Res =
+ tools::csky::getCSKYArchName(D, Args, TargetTriple);
+
+ if (!Res)
+ return;
+ auto ARCHName = *Res;
+
+ Multilib::flags_list Flags;
+ addMultilibFlag(TheFloatABI == tools::csky::FloatABI::Hard, "-hard-fp",
+ Flags);
+ addMultilibFlag(TheFloatABI == tools::csky::FloatABI::SoftFP, "-soft-fp",
+ Flags);
+ addMultilibFlag(TheFloatABI == tools::csky::FloatABI::Soft, "-soft", Flags);
+ addMultilibFlag(ARCHName == "ck801", "-march=ck801", Flags);
+ addMultilibFlag(ARCHName == "ck802", "-march=ck802", Flags);
+ addMultilibFlag(ARCHName == "ck803", "-march=ck803", Flags);
+ addMultilibFlag(ARCHName == "ck804", "-march=ck804", Flags);
+ addMultilibFlag(ARCHName == "ck805", "-march=ck805", Flags);
+ addMultilibFlag(ARCHName == "ck807", "-march=ck807", Flags);
+ addMultilibFlag(ARCHName == "ck810", "-march=ck810", Flags);
+ addMultilibFlag(ARCHName == "ck810v", "-march=ck810v", Flags);
+ addMultilibFlag(ARCHName == "ck860", "-march=ck860", Flags);
+ addMultilibFlag(ARCHName == "ck860v", "-march=ck860v", Flags);
+
+ bool isBigEndian = false;
+ if (Arg *A = Args.getLastArg(options::OPT_mlittle_endian,
+ options::OPT_mbig_endian))
+ isBigEndian = !A->getOption().matches(options::OPT_mlittle_endian);
+ addMultilibFlag(isBigEndian, "-EB", Flags);
+
+ auto HardFloat = MultilibBuilder("/hard-fp").flag("-hard-fp");
+ auto SoftFpFloat = MultilibBuilder("/soft-fp").flag("-soft-fp");
+ auto SoftFloat = MultilibBuilder("").flag("-soft");
+ auto Arch801 = MultilibBuilder("/ck801").flag("-march=ck801");
+ auto Arch802 = MultilibBuilder("/ck802").flag("-march=ck802");
+ auto Arch803 = MultilibBuilder("/ck803").flag("-march=ck803");
+ // CK804 use the same library as CK803
+ auto Arch804 = MultilibBuilder("/ck803").flag("-march=ck804");
+ auto Arch805 = MultilibBuilder("/ck805").flag("-march=ck805");
+ auto Arch807 = MultilibBuilder("/ck807").flag("-march=ck807");
+ auto Arch810 = MultilibBuilder("").flag("-march=ck810");
+ auto Arch810v = MultilibBuilder("/ck810v").flag("-march=ck810v");
+ auto Arch860 = MultilibBuilder("/ck860").flag("-march=ck860");
+ auto Arch860v = MultilibBuilder("/ck860v").flag("-march=ck860v");
+ auto BigEndian = MultilibBuilder("/big").flag("-EB");
+
+ MultilibSet CSKYMultilibs =
+ MultilibSetBuilder()
+ .Maybe(BigEndian)
+ .Either({Arch801, Arch802, Arch803, Arch804, Arch805, Arch807,
+ Arch810, Arch810v, Arch860, Arch860v})
+ .Either(HardFloat, SoftFpFloat, SoftFloat)
+ .makeMultilibSet()
+ .FilterOut(NonExistent);
+
+ if (CSKYMultilibs.select(Flags, Result.SelectedMultilibs))
+ Result.Multilibs = CSKYMultilibs;
+}
+
+/// Extend the multi-lib re-use selection mechanism for RISC-V.
+/// This function will try to re-use multi-lib if they are compatible.
+/// Definition of compatible:
+/// - ABI must be the same.
+/// - multi-lib is a subset of current arch, e.g. multi-lib=march=rv32im
+/// is a subset of march=rv32imc.
+/// - march that contains atomic extension can't reuse multi-lib that
+/// doesn't have atomic, vice versa. e.g. multi-lib=march=rv32im and
+/// march=rv32ima are not compatible, because software and hardware
+/// atomic operation can't work together correctly.
+static bool
+selectRISCVMultilib(const MultilibSet &RISCVMultilibSet, StringRef Arch,
+ const Multilib::flags_list &Flags,
+ llvm::SmallVectorImpl<Multilib> &SelectedMultilibs) {
+ // Try to find the perfect matching multi-lib first.
+ if (RISCVMultilibSet.select(Flags, SelectedMultilibs))
return true;
+ Multilib::flags_list NewFlags;
+ std::vector<MultilibBuilder> NewMultilibs;
+
+ llvm::Expected<std::unique_ptr<llvm::RISCVISAInfo>> ParseResult =
+ llvm::RISCVISAInfo::parseArchString(
+ Arch, /*EnableExperimentalExtension=*/true,
+ /*ExperimentalExtensionVersionCheck=*/false);
+ // Ignore any error here, we assume it will be handled in another place.
+ if (llvm::errorToBool(ParseResult.takeError()))
+ return false;
+
+ auto &ISAInfo = *ParseResult;
+
+ addMultilibFlag(ISAInfo->getXLen() == 32, "-m32", NewFlags);
+ addMultilibFlag(ISAInfo->getXLen() == 64, "-m64", NewFlags);
+
+ // Collect all flags except march=*
+ for (StringRef Flag : Flags) {
+ if (Flag.starts_with("!march=") || Flag.starts_with("-march="))
+ continue;
+
+ NewFlags.push_back(Flag.str());
+ }
+
+ llvm::StringSet<> AllArchExts;
+ // Reconstruct multi-lib list, and break march option into separated
+ // extension. e.g. march=rv32im -> +i +m
+ for (const auto &M : RISCVMultilibSet) {
+ bool Skip = false;
+
+ MultilibBuilder NewMultilib =
+ MultilibBuilder(M.gccSuffix(), M.osSuffix(), M.includeSuffix());
+ for (StringRef Flag : M.flags()) {
+ // Add back all flags except -march.
+ if (!Flag.consume_front("-march=")) {
+ NewMultilib.flag(Flag);
+ continue;
+ }
+
+ // Break down -march into individual extension.
+ llvm::Expected<std::unique_ptr<llvm::RISCVISAInfo>> MLConfigParseResult =
+ llvm::RISCVISAInfo::parseArchString(
+ Flag, /*EnableExperimentalExtension=*/true,
+ /*ExperimentalExtensionVersionCheck=*/false);
+ // Ignore any error here, we assume it will handled in another place.
+ if (llvm::errorToBool(MLConfigParseResult.takeError())) {
+ // We might get a parsing error if rv32e in the list, we could just skip
+ // that and process the rest of multi-lib configs.
+ Skip = true;
+ continue;
+ }
+ auto &MLConfigISAInfo = *MLConfigParseResult;
+
+ const llvm::RISCVISAInfo::OrderedExtensionMap &MLConfigArchExts =
+ MLConfigISAInfo->getExtensions();
+ for (auto MLConfigArchExt : MLConfigArchExts) {
+ auto ExtName = MLConfigArchExt.first;
+ NewMultilib.flag(Twine("-", ExtName).str());
+
+ if (AllArchExts.insert(ExtName).second) {
+ addMultilibFlag(ISAInfo->hasExtension(ExtName),
+ Twine("-", ExtName).str(), NewFlags);
+ }
+ }
+
+ // Check the XLEN explicitly.
+ if (MLConfigISAInfo->getXLen() == 32) {
+ NewMultilib.flag("-m32");
+ NewMultilib.flag("-m64", /*Disallow*/ true);
+ } else {
+ NewMultilib.flag("-m32", /*Disallow*/ true);
+ NewMultilib.flag("-m64");
+ }
+
+ // Atomic extension must be explicitly checked, soft and hard atomic
+ // operation never co-work correctly.
+ if (!MLConfigISAInfo->hasExtension("a"))
+ NewMultilib.flag("-a", /*Disallow*/ true);
+ }
+
+ if (Skip)
+ continue;
+
+ NewMultilibs.emplace_back(NewMultilib);
+ }
+
+ // Build an internal used only multi-lib list, used for checking any
+ // compatible multi-lib.
+ MultilibSet NewRISCVMultilibs =
+ MultilibSetBuilder().Either(NewMultilibs).makeMultilibSet();
+
+ if (NewRISCVMultilibs.select(NewFlags, SelectedMultilibs))
+ for (const Multilib &NewSelectedM : SelectedMultilibs)
+ for (const auto &M : RISCVMultilibSet)
+ // Look up the corresponding multi-lib entry in original multi-lib set.
+ if (M.gccSuffix() == NewSelectedM.gccSuffix())
+ return true;
+
return false;
}
@@ -1601,17 +1856,19 @@ static void findRISCVBareMetalMultilibs(const Driver &D,
{"rv32imac", "ilp32"}, {"rv32imafc", "ilp32f"}, {"rv64imac", "lp64"},
{"rv64imafdc", "lp64d"}};
- std::vector<Multilib> Ms;
+ std::vector<MultilibBuilder> Ms;
for (auto Element : RISCVMultilibSet) {
// multilib path rule is ${march}/${mabi}
Ms.emplace_back(
- makeMultilib((Twine(Element.march) + "/" + Twine(Element.mabi)).str())
- .flag(Twine("+march=", Element.march).str())
- .flag(Twine("+mabi=", Element.mabi).str()));
+ MultilibBuilder(
+ (Twine(Element.march) + "/" + Twine(Element.mabi)).str())
+ .flag(Twine("-march=", Element.march).str())
+ .flag(Twine("-mabi=", Element.mabi).str()));
}
MultilibSet RISCVMultilibs =
- MultilibSet()
- .Either(ArrayRef<Multilib>(Ms))
+ MultilibSetBuilder()
+ .Either(Ms)
+ .makeMultilibSet()
.FilterOut(NonExistent)
.setFilePathsCallback([](const Multilib &M) {
return std::vector<std::string>(
@@ -1620,22 +1877,22 @@ static void findRISCVBareMetalMultilibs(const Driver &D,
"/../../../../riscv32-unknown-elf/lib" + M.gccSuffix()});
});
-
Multilib::flags_list Flags;
llvm::StringSet<> Added_ABIs;
StringRef ABIName = tools::riscv::getRISCVABI(Args, TargetTriple);
StringRef MArch = tools::riscv::getRISCVArch(Args, TargetTriple);
for (auto Element : RISCVMultilibSet) {
addMultilibFlag(MArch == Element.march,
- Twine("march=", Element.march).str().c_str(), Flags);
+ Twine("-march=", Element.march).str().c_str(), Flags);
if (!Added_ABIs.count(Element.mabi)) {
Added_ABIs.insert(Element.mabi);
addMultilibFlag(ABIName == Element.mabi,
- Twine("mabi=", Element.mabi).str().c_str(), Flags);
+ Twine("-mabi=", Element.mabi).str().c_str(), Flags);
}
}
- if (RISCVMultilibs.select(Flags, Result.SelectedMultilib))
+ if (selectRISCVMultilib(RISCVMultilibs, MArch, Flags,
+ Result.SelectedMultilibs))
Result.Multilibs = RISCVMultilibs;
}
@@ -1646,33 +1903,38 @@ static void findRISCVMultilibs(const Driver &D,
return findRISCVBareMetalMultilibs(D, TargetTriple, Path, Args, Result);
FilterNonExistent NonExistent(Path, "/crtbegin.o", D.getVFS());
- Multilib Ilp32 = makeMultilib("lib32/ilp32").flag("+m32").flag("+mabi=ilp32");
- Multilib Ilp32f =
- makeMultilib("lib32/ilp32f").flag("+m32").flag("+mabi=ilp32f");
- Multilib Ilp32d =
- makeMultilib("lib32/ilp32d").flag("+m32").flag("+mabi=ilp32d");
- Multilib Lp64 = makeMultilib("lib64/lp64").flag("+m64").flag("+mabi=lp64");
- Multilib Lp64f = makeMultilib("lib64/lp64f").flag("+m64").flag("+mabi=lp64f");
- Multilib Lp64d = makeMultilib("lib64/lp64d").flag("+m64").flag("+mabi=lp64d");
+ MultilibBuilder Ilp32 =
+ MultilibBuilder("lib32/ilp32").flag("-m32").flag("-mabi=ilp32");
+ MultilibBuilder Ilp32f =
+ MultilibBuilder("lib32/ilp32f").flag("-m32").flag("-mabi=ilp32f");
+ MultilibBuilder Ilp32d =
+ MultilibBuilder("lib32/ilp32d").flag("-m32").flag("-mabi=ilp32d");
+ MultilibBuilder Lp64 =
+ MultilibBuilder("lib64/lp64").flag("-m64").flag("-mabi=lp64");
+ MultilibBuilder Lp64f =
+ MultilibBuilder("lib64/lp64f").flag("-m64").flag("-mabi=lp64f");
+ MultilibBuilder Lp64d =
+ MultilibBuilder("lib64/lp64d").flag("-m64").flag("-mabi=lp64d");
MultilibSet RISCVMultilibs =
- MultilibSet()
+ MultilibSetBuilder()
.Either({Ilp32, Ilp32f, Ilp32d, Lp64, Lp64f, Lp64d})
+ .makeMultilibSet()
.FilterOut(NonExistent);
Multilib::flags_list Flags;
bool IsRV64 = TargetTriple.getArch() == llvm::Triple::riscv64;
StringRef ABIName = tools::riscv::getRISCVABI(Args, TargetTriple);
- addMultilibFlag(!IsRV64, "m32", Flags);
- addMultilibFlag(IsRV64, "m64", Flags);
- addMultilibFlag(ABIName == "ilp32", "mabi=ilp32", Flags);
- addMultilibFlag(ABIName == "ilp32f", "mabi=ilp32f", Flags);
- addMultilibFlag(ABIName == "ilp32d", "mabi=ilp32d", Flags);
- addMultilibFlag(ABIName == "lp64", "mabi=lp64", Flags);
- addMultilibFlag(ABIName == "lp64f", "mabi=lp64f", Flags);
- addMultilibFlag(ABIName == "lp64d", "mabi=lp64d", Flags);
+ addMultilibFlag(!IsRV64, "-m32", Flags);
+ addMultilibFlag(IsRV64, "-m64", Flags);
+ addMultilibFlag(ABIName == "ilp32", "-mabi=ilp32", Flags);
+ addMultilibFlag(ABIName == "ilp32f", "-mabi=ilp32f", Flags);
+ addMultilibFlag(ABIName == "ilp32d", "-mabi=ilp32d", Flags);
+ addMultilibFlag(ABIName == "lp64", "-mabi=lp64", Flags);
+ addMultilibFlag(ABIName == "lp64f", "-mabi=lp64f", Flags);
+ addMultilibFlag(ABIName == "lp64d", "-mabi=lp64d", Flags);
- if (RISCVMultilibs.select(Flags, Result.SelectedMultilib))
+ if (RISCVMultilibs.select(Flags, Result.SelectedMultilibs))
Result.Multilibs = RISCVMultilibs;
}
@@ -1681,7 +1943,7 @@ static bool findBiarchMultilibs(const Driver &D,
StringRef Path, const ArgList &Args,
bool NeedsBiarchSuffix,
DetectedMultilibs &Result) {
- Multilib Default;
+ MultilibBuilder DefaultBuilder;
// Some versions of SUSE and Fedora on ppc64 put 32-bit libs
// in what would normally be GCCInstallPath and put the 64-bit
@@ -1692,7 +1954,7 @@ static bool findBiarchMultilibs(const Driver &D,
StringRef Suff64 = "/64";
// Solaris uses platform-specific suffixes instead of /64.
- if (TargetTriple.getOS() == llvm::Triple::Solaris) {
+ if (TargetTriple.isOSSolaris()) {
switch (TargetTriple.getArch()) {
case llvm::Triple::x86:
case llvm::Triple::x86_64:
@@ -1707,24 +1969,33 @@ static bool findBiarchMultilibs(const Driver &D,
}
}
- Multilib Alt64 = Multilib()
+ Multilib Alt64 = MultilibBuilder()
.gccSuffix(Suff64)
.includeSuffix(Suff64)
- .flag("-m32")
- .flag("+m64")
- .flag("-mx32");
- Multilib Alt32 = Multilib()
+ .flag("-m32", /*Disallow=*/true)
+ .flag("-m64")
+ .flag("-mx32", /*Disallow=*/true)
+ .makeMultilib();
+ Multilib Alt32 = MultilibBuilder()
.gccSuffix("/32")
.includeSuffix("/32")
- .flag("+m32")
- .flag("-m64")
- .flag("-mx32");
- Multilib Altx32 = Multilib()
+ .flag("-m32")
+ .flag("-m64", /*Disallow=*/true)
+ .flag("-mx32", /*Disallow=*/true)
+ .makeMultilib();
+ Multilib Altx32 = MultilibBuilder()
.gccSuffix("/x32")
.includeSuffix("/x32")
- .flag("-m32")
- .flag("-m64")
- .flag("+mx32");
+ .flag("-m32", /*Disallow=*/true)
+ .flag("-m64", /*Disallow=*/true)
+ .flag("-mx32")
+ .makeMultilib();
+ Multilib Alt32sparc = MultilibBuilder()
+ .gccSuffix("/sparcv8plus")
+ .includeSuffix("/sparcv8plus")
+ .flag("-m32")
+ .flag("-m64", /*Disallow=*/true)
+ .makeMultilib();
// GCC toolchain for IAMCU doesn't have crtbegin.o, so look for libgcc.a.
FilterNonExistent NonExistent(
@@ -1736,10 +2007,14 @@ static bool findBiarchMultilibs(const Driver &D,
const bool IsX32 = TargetTriple.isX32();
if (TargetTriple.isArch32Bit() && !NonExistent(Alt32))
Want = WANT64;
+ if (TargetTriple.isArch32Bit() && !NonExistent(Alt32sparc))
+ Want = WANT64;
else if (TargetTriple.isArch64Bit() && IsX32 && !NonExistent(Altx32))
Want = WANT64;
else if (TargetTriple.isArch64Bit() && !IsX32 && !NonExistent(Alt64))
Want = WANT32;
+ else if (TargetTriple.isArch64Bit() && !NonExistent(Alt32sparc))
+ Want = WANT64;
else {
if (TargetTriple.isArch32Bit())
Want = NeedsBiarchSuffix ? WANT64 : WANT32;
@@ -1750,31 +2025,42 @@ static bool findBiarchMultilibs(const Driver &D,
}
if (Want == WANT32)
- Default.flag("+m32").flag("-m64").flag("-mx32");
+ DefaultBuilder.flag("-m32")
+ .flag("-m64", /*Disallow=*/true)
+ .flag("-mx32", /*Disallow=*/true);
else if (Want == WANT64)
- Default.flag("-m32").flag("+m64").flag("-mx32");
+ DefaultBuilder.flag("-m32", /*Disallow=*/true)
+ .flag("-m64")
+ .flag("-mx32", /*Disallow=*/true);
else if (Want == WANTX32)
- Default.flag("-m32").flag("-m64").flag("+mx32");
+ DefaultBuilder.flag("-m32", /*Disallow=*/true)
+ .flag("-m64", /*Disallow=*/true)
+ .flag("-mx32");
else
return false;
+ Multilib Default = DefaultBuilder.makeMultilib();
+
Result.Multilibs.push_back(Default);
Result.Multilibs.push_back(Alt64);
Result.Multilibs.push_back(Alt32);
Result.Multilibs.push_back(Altx32);
+ Result.Multilibs.push_back(Alt32sparc);
Result.Multilibs.FilterOut(NonExistent);
Multilib::flags_list Flags;
- addMultilibFlag(TargetTriple.isArch64Bit() && !IsX32, "m64", Flags);
- addMultilibFlag(TargetTriple.isArch32Bit(), "m32", Flags);
- addMultilibFlag(TargetTriple.isArch64Bit() && IsX32, "mx32", Flags);
+ addMultilibFlag(TargetTriple.isArch64Bit() && !IsX32, "-m64", Flags);
+ addMultilibFlag(TargetTriple.isArch32Bit(), "-m32", Flags);
+ addMultilibFlag(TargetTriple.isArch64Bit() && IsX32, "-mx32", Flags);
- if (!Result.Multilibs.select(Flags, Result.SelectedMultilib))
+ if (!Result.Multilibs.select(Flags, Result.SelectedMultilibs))
return false;
- if (Result.SelectedMultilib == Alt64 || Result.SelectedMultilib == Alt32 ||
- Result.SelectedMultilib == Altx32)
+ if (Result.SelectedMultilibs.back() == Alt64 ||
+ Result.SelectedMultilibs.back() == Alt32 ||
+ Result.SelectedMultilibs.back() == Altx32 ||
+ Result.SelectedMultilibs.back() == Alt32sparc)
Result.BiarchSibling = Default;
return true;
@@ -1790,8 +2076,15 @@ bool Generic_GCC::GCCVersion::isOlderThan(int RHSMajor, int RHSMinor,
StringRef RHSPatchSuffix) const {
if (Major != RHSMajor)
return Major < RHSMajor;
- if (Minor != RHSMinor)
+ if (Minor != RHSMinor) {
+ // Note that versions without a specified minor sort higher than those with
+ // a minor.
+ if (RHSMinor == -1)
+ return true;
+ if (Minor == -1)
+ return false;
return Minor < RHSMinor;
+ }
if (Patch != RHSPatch) {
// Note that versions without a specified patch sort higher than those with
// a patch.
@@ -1827,45 +2120,72 @@ Generic_GCC::GCCVersion Generic_GCC::GCCVersion::Parse(StringRef VersionText) {
std::pair<StringRef, StringRef> First = VersionText.split('.');
std::pair<StringRef, StringRef> Second = First.second.split('.');
- GCCVersion GoodVersion = {VersionText.str(), -1, -1, -1, "", "", ""};
- if (First.first.getAsInteger(10, GoodVersion.Major) || GoodVersion.Major < 0)
- return BadVersion;
- GoodVersion.MajorStr = First.first.str();
- if (First.second.empty())
- return GoodVersion;
+ StringRef MajorStr = First.first;
StringRef MinorStr = Second.first;
- if (Second.second.empty()) {
- if (size_t EndNumber = MinorStr.find_first_not_of("0123456789")) {
- GoodVersion.PatchSuffix = std::string(MinorStr.substr(EndNumber));
- MinorStr = MinorStr.slice(0, EndNumber);
- }
- }
- if (MinorStr.getAsInteger(10, GoodVersion.Minor) || GoodVersion.Minor < 0)
- return BadVersion;
- GoodVersion.MinorStr = MinorStr.str();
+ StringRef PatchStr = Second.second;
- // First look for a number prefix and parse that if present. Otherwise just
- // stash the entire patch string in the suffix, and leave the number
- // unspecified. This covers versions strings such as:
- // 5 (handled above)
+ GCCVersion GoodVersion = {VersionText.str(), -1, -1, -1, "", "", ""};
+
+ // Parse version number strings such as:
+ // 5
// 4.4
// 4.4-patched
// 4.4.0
// 4.4.x
// 4.4.2-rc4
// 4.4.x-patched
- // And retains any patch number it finds.
- StringRef PatchText = Second.second;
- if (!PatchText.empty()) {
- if (size_t EndNumber = PatchText.find_first_not_of("0123456789")) {
- // Try to parse the number and any suffix.
- if (PatchText.slice(0, EndNumber).getAsInteger(10, GoodVersion.Patch) ||
- GoodVersion.Patch < 0)
- return BadVersion;
- GoodVersion.PatchSuffix = std::string(PatchText.substr(EndNumber));
+ // 10-win32
+ // Split on '.', handle 1, 2 or 3 such segments. Each segment must contain
+ // purely a number, except for the last one, where a non-number suffix
+ // is stored in PatchSuffix. The third segment is allowed to not contain
+ // a number at all.
+
+ auto TryParseLastNumber = [&](StringRef Segment, int &Number,
+ std::string &OutStr) -> bool {
+ // Look for a number prefix and parse that, and split out any trailing
+ // string into GoodVersion.PatchSuffix.
+
+ if (size_t EndNumber = Segment.find_first_not_of("0123456789")) {
+ StringRef NumberStr = Segment.slice(0, EndNumber);
+ if (NumberStr.getAsInteger(10, Number) || Number < 0)
+ return false;
+ OutStr = NumberStr;
+ GoodVersion.PatchSuffix = Segment.substr(EndNumber);
+ return true;
}
+ return false;
+ };
+ auto TryParseNumber = [](StringRef Segment, int &Number) -> bool {
+ if (Segment.getAsInteger(10, Number) || Number < 0)
+ return false;
+ return true;
+ };
+
+ if (MinorStr.empty()) {
+ // If no minor string, major is the last segment
+ if (!TryParseLastNumber(MajorStr, GoodVersion.Major, GoodVersion.MajorStr))
+ return BadVersion;
+ return GoodVersion;
+ }
+
+ if (!TryParseNumber(MajorStr, GoodVersion.Major))
+ return BadVersion;
+ GoodVersion.MajorStr = MajorStr;
+
+ if (PatchStr.empty()) {
+ // If no patch string, minor is the last segment
+ if (!TryParseLastNumber(MinorStr, GoodVersion.Minor, GoodVersion.MinorStr))
+ return BadVersion;
+ return GoodVersion;
}
+ if (!TryParseNumber(MinorStr, GoodVersion.Minor))
+ return BadVersion;
+ GoodVersion.MinorStr = MinorStr;
+
+ // For the last segment, tolerate a missing number.
+ std::string DummyStr;
+ TryParseLastNumber(PatchStr, GoodVersion.Patch, DummyStr);
return GoodVersion;
}
@@ -1894,8 +2214,7 @@ static llvm::StringRef getGCCToolchainDir(const ArgList &Args,
/// necessary because the driver doesn't store the final version of the target
/// triple.
void Generic_GCC::GCCInstallationDetector::init(
- const llvm::Triple &TargetTriple, const ArgList &Args,
- ArrayRef<std::string> ExtraTripleAliases) {
+ const llvm::Triple &TargetTriple, const ArgList &Args) {
llvm::Triple BiarchVariantTriple = TargetTriple.isArch32Bit()
? TargetTriple.get64BitArchVariant()
: TargetTriple.get32BitArchVariant();
@@ -1904,10 +2223,48 @@ void Generic_GCC::GCCInstallationDetector::init(
// The compatible GCC triples for this particular architecture.
SmallVector<StringRef, 16> CandidateTripleAliases;
SmallVector<StringRef, 16> CandidateBiarchTripleAliases;
+ // Add some triples that we want to check first.
+ CandidateTripleAliases.push_back(TargetTriple.str());
+ std::string TripleNoVendor = TargetTriple.getArchName().str() + "-" +
+ TargetTriple.getOSAndEnvironmentName().str();
+ if (TargetTriple.getVendor() == llvm::Triple::UnknownVendor)
+ CandidateTripleAliases.push_back(TripleNoVendor);
+
CollectLibDirsAndTriples(TargetTriple, BiarchVariantTriple, CandidateLibDirs,
CandidateTripleAliases, CandidateBiarchLibDirs,
CandidateBiarchTripleAliases);
+ // If --gcc-install-dir= is specified, skip filesystem detection.
+ if (const Arg *A =
+ Args.getLastArg(clang::driver::options::OPT_gcc_install_dir_EQ);
+ A && A->getValue()[0]) {
+ StringRef InstallDir = A->getValue();
+ if (!ScanGCCForMultilibs(TargetTriple, Args, InstallDir, false)) {
+ D.Diag(diag::err_drv_invalid_gcc_install_dir) << InstallDir;
+ } else {
+ (void)InstallDir.consume_back("/");
+ StringRef VersionText = llvm::sys::path::filename(InstallDir);
+ StringRef TripleText =
+ llvm::sys::path::filename(llvm::sys::path::parent_path(InstallDir));
+
+ Version = GCCVersion::Parse(VersionText);
+ GCCTriple.setTriple(TripleText);
+ GCCInstallPath = std::string(InstallDir);
+ GCCParentLibPath = GCCInstallPath + "/../../..";
+ IsValid = true;
+ }
+ return;
+ }
+
+ // If --gcc-triple is specified use this instead of trying to
+ // auto-detect a triple.
+ if (const Arg *A =
+ Args.getLastArg(clang::driver::options::OPT_gcc_triple_EQ)) {
+ StringRef GCCTriple = A->getValue();
+ CandidateTripleAliases.clear();
+ CandidateTripleAliases.push_back(GCCTriple);
+ }
+
// Compute the set of prefixes for our search.
SmallVector<std::string, 8> Prefixes;
StringRef GCCToolchainDir = getGCCToolchainDir(Args, D.SysRoot);
@@ -1943,9 +2300,6 @@ void Generic_GCC::GCCInstallationDetector::init(
// may pick the libraries for x86_64-pc-linux-gnu even when exact matching
// triple x86_64-gentoo-linux-gnu is present.
GentooTestTriples.push_back(TargetTriple.str());
- // Check rest of triples.
- GentooTestTriples.append(ExtraTripleAliases.begin(),
- ExtraTripleAliases.end());
GentooTestTriples.append(CandidateTripleAliases.begin(),
CandidateTripleAliases.end());
if (ScanGentooConfigs(TargetTriple, Args, GentooTestTriples,
@@ -1962,19 +2316,12 @@ void Generic_GCC::GCCInstallationDetector::init(
if (!VFS.exists(Prefix))
continue;
for (StringRef Suffix : CandidateLibDirs) {
- const std::string LibDir = Prefix + Suffix.str();
+ const std::string LibDir = concat(Prefix, Suffix);
if (!VFS.exists(LibDir))
continue;
// Maybe filter out <libdir>/gcc and <libdir>/gcc-cross.
bool GCCDirExists = VFS.exists(LibDir + "/gcc");
bool GCCCrossDirExists = VFS.exists(LibDir + "/gcc-cross");
- // Try to match the exact target triple first.
- ScanLibDirForGCCTriple(TargetTriple, Args, LibDir, TargetTriple.str(),
- false, GCCDirExists, GCCCrossDirExists);
- // Try rest of possible triples.
- for (StringRef Candidate : ExtraTripleAliases) // Try these first.
- ScanLibDirForGCCTriple(TargetTriple, Args, LibDir, Candidate, false,
- GCCDirExists, GCCCrossDirExists);
for (StringRef Candidate : CandidateTripleAliases)
ScanLibDirForGCCTriple(TargetTriple, Args, LibDir, Candidate, false,
GCCDirExists, GCCCrossDirExists);
@@ -2011,8 +2358,8 @@ void Generic_GCC::GCCInstallationDetector::print(raw_ostream &OS) const {
}
bool Generic_GCC::GCCInstallationDetector::getBiarchSibling(Multilib &M) const {
- if (BiarchSibling.hasValue()) {
- M = BiarchSibling.getValue();
+ if (BiarchSibling) {
+ M = *BiarchSibling;
return true;
}
return false;
@@ -2021,14 +2368,21 @@ bool Generic_GCC::GCCInstallationDetector::getBiarchSibling(Multilib &M) const {
void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
const llvm::Triple &TargetTriple, SmallVectorImpl<std::string> &Prefixes,
StringRef SysRoot) {
- if (TargetTriple.getOS() == llvm::Triple::Solaris) {
+
+ if (TargetTriple.isOSHaiku()) {
+ Prefixes.push_back(concat(SysRoot, "/boot/system/develop/tools"));
+ return;
+ }
+
+ if (TargetTriple.isOSSolaris()) {
// Solaris is a special case.
// The GCC installation is under
// /usr/gcc/<major>.<minor>/lib/gcc/<triple>/<major>.<minor>.<patch>/
// so we need to find those /usr/gcc/*/lib/gcc libdirs and go with
// /usr/gcc/<version> as a prefix.
- std::string PrefixDir = SysRoot.str() + "/usr/gcc";
+ SmallVector<std::pair<GCCVersion, std::string>, 8> SolarisPrefixes;
+ std::string PrefixDir = concat(SysRoot, "/usr/gcc");
std::error_code EC;
for (llvm::vfs::directory_iterator LI = D.getVFS().dir_begin(PrefixDir, EC),
LE;
@@ -2045,14 +2399,27 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
if (!D.getVFS().exists(CandidateLibPath))
continue;
- Prefixes.push_back(CandidatePrefix);
+ SolarisPrefixes.emplace_back(
+ std::make_pair(CandidateVersion, CandidatePrefix));
}
+ // Sort in reverse order so GCCInstallationDetector::init picks the latest.
+ std::sort(SolarisPrefixes.rbegin(), SolarisPrefixes.rend());
+ for (auto p : SolarisPrefixes)
+ Prefixes.emplace_back(p.second);
return;
}
- // Non-Solaris is much simpler - most systems just go with "/usr".
- if (SysRoot.empty() && TargetTriple.getOS() == llvm::Triple::Linux) {
- // Yet, still look for RHEL devtoolsets.
+ // For Linux, if --sysroot is not specified, look for RHEL/CentOS devtoolsets
+ // and gcc-toolsets.
+ if (SysRoot.empty() && TargetTriple.getOS() == llvm::Triple::Linux &&
+ D.getVFS().exists("/opt/rh")) {
+ // TODO: We may want to remove this, since the functionality
+ // can be achieved using config files.
+ Prefixes.push_back("/opt/rh/gcc-toolset-12/root/usr");
+ Prefixes.push_back("/opt/rh/gcc-toolset-11/root/usr");
+ Prefixes.push_back("/opt/rh/gcc-toolset-10/root/usr");
+ Prefixes.push_back("/opt/rh/devtoolset-12/root/usr");
+ Prefixes.push_back("/opt/rh/devtoolset-11/root/usr");
Prefixes.push_back("/opt/rh/devtoolset-10/root/usr");
Prefixes.push_back("/opt/rh/devtoolset-9/root/usr");
Prefixes.push_back("/opt/rh/devtoolset-8/root/usr");
@@ -2062,7 +2429,9 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
Prefixes.push_back("/opt/rh/devtoolset-3/root/usr");
Prefixes.push_back("/opt/rh/devtoolset-2/root/usr");
}
- Prefixes.push_back(SysRoot.str() + "/usr");
+
+ // Fall back to /usr which is used by most non-Solaris systems.
+ Prefixes.push_back(concat(SysRoot, "/usr"));
}
/*static*/ void Generic_GCC::GCCInstallationDetector::CollectLibDirsAndTriples(
@@ -2074,30 +2443,38 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
// Declare a bunch of static data sets that we'll select between below. These
// are specifically designed to always refer to string literals to avoid any
// lifetime or initialization issues.
+ //
+ // The *Triples variables hard code some triples so that, for example,
+ // --target=aarch64 (incomplete triple) can detect lib/aarch64-linux-gnu.
+ // They are not needed when the user has correct LLVM_DEFAULT_TARGET_TRIPLE
+ // and always uses the full --target (e.g. --target=aarch64-linux-gnu). The
+ // lists should shrink over time. Please don't add more elements to *Triples.
static const char *const AArch64LibDirs[] = {"/lib64", "/lib"};
static const char *const AArch64Triples[] = {
"aarch64-none-linux-gnu", "aarch64-linux-gnu", "aarch64-redhat-linux",
- "aarch64-suse-linux", "aarch64-linux-android"};
+ "aarch64-suse-linux"};
static const char *const AArch64beLibDirs[] = {"/lib"};
static const char *const AArch64beTriples[] = {"aarch64_be-none-linux-gnu",
"aarch64_be-linux-gnu"};
static const char *const ARMLibDirs[] = {"/lib"};
- static const char *const ARMTriples[] = {"arm-linux-gnueabi",
- "arm-linux-androideabi"};
+ static const char *const ARMTriples[] = {"arm-linux-gnueabi"};
static const char *const ARMHFTriples[] = {"arm-linux-gnueabihf",
"armv7hl-redhat-linux-gnueabi",
"armv6hl-suse-linux-gnueabi",
"armv7hl-suse-linux-gnueabi"};
static const char *const ARMebLibDirs[] = {"/lib"};
- static const char *const ARMebTriples[] = {"armeb-linux-gnueabi",
- "armeb-linux-androideabi"};
+ static const char *const ARMebTriples[] = {"armeb-linux-gnueabi"};
static const char *const ARMebHFTriples[] = {
"armeb-linux-gnueabihf", "armebv7hl-redhat-linux-gnueabi"};
static const char *const AVRLibDirs[] = {"/lib"};
static const char *const AVRTriples[] = {"avr"};
+ static const char *const CSKYLibDirs[] = {"/lib"};
+ static const char *const CSKYTriples[] = {
+ "csky-linux-gnuabiv2", "csky-linux-uclibcabiv2", "csky-elf-noneabiv2"};
+
static const char *const X86_64LibDirs[] = {"/lib64", "/lib"};
static const char *const X86_64Triples[] = {
"x86_64-linux-gnu", "x86_64-unknown-linux-gnu",
@@ -2105,31 +2482,32 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
"x86_64-redhat-linux", "x86_64-suse-linux",
"x86_64-manbo-linux-gnu", "x86_64-linux-gnu",
"x86_64-slackware-linux", "x86_64-unknown-linux",
- "x86_64-amazon-linux", "x86_64-linux-android"};
+ "x86_64-amazon-linux"};
static const char *const X32Triples[] = {"x86_64-linux-gnux32",
"x86_64-pc-linux-gnux32"};
static const char *const X32LibDirs[] = {"/libx32", "/lib"};
static const char *const X86LibDirs[] = {"/lib32", "/lib"};
static const char *const X86Triples[] = {
- "i586-linux-gnu", "i686-linux-gnu",
- "i686-pc-linux-gnu", "i386-redhat-linux6E",
- "i686-redhat-linux", "i386-redhat-linux",
- "i586-suse-linux", "i686-montavista-linux",
- "i686-linux-android", "i686-gnu",
+ "i586-linux-gnu", "i686-linux-gnu", "i686-pc-linux-gnu",
+ "i386-redhat-linux6E", "i686-redhat-linux", "i386-redhat-linux",
+ "i586-suse-linux", "i686-montavista-linux",
};
+ static const char *const LoongArch64LibDirs[] = {"/lib64", "/lib"};
+ static const char *const LoongArch64Triples[] = {
+ "loongarch64-linux-gnu", "loongarch64-unknown-linux-gnu"};
+
static const char *const M68kLibDirs[] = {"/lib"};
static const char *const M68kTriples[] = {
"m68k-linux-gnu", "m68k-unknown-linux-gnu", "m68k-suse-linux"};
- static const char *const MIPSLibDirs[] = {"/lib"};
+ static const char *const MIPSLibDirs[] = {"/libo32", "/lib"};
static const char *const MIPSTriples[] = {
"mips-linux-gnu", "mips-mti-linux", "mips-mti-linux-gnu",
"mips-img-linux-gnu", "mipsisa32r6-linux-gnu"};
- static const char *const MIPSELLibDirs[] = {"/lib"};
+ static const char *const MIPSELLibDirs[] = {"/libo32", "/lib"};
static const char *const MIPSELTriples[] = {
- "mipsel-linux-gnu", "mips-img-linux-gnu", "mipsisa32r6el-linux-gnu",
- "mipsel-linux-android"};
+ "mipsel-linux-gnu", "mips-img-linux-gnu", "mipsisa32r6el-linux-gnu"};
static const char *const MIPS64LibDirs[] = {"/lib64", "/lib"};
static const char *const MIPS64Triples[] = {
@@ -2140,8 +2518,7 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
static const char *const MIPS64ELTriples[] = {
"mips64el-linux-gnu", "mips-mti-linux-gnu",
"mips-img-linux-gnu", "mips64el-linux-gnuabi64",
- "mipsisa64r6el-linux-gnu", "mipsisa64r6el-linux-gnuabi64",
- "mips64el-linux-android"};
+ "mipsisa64r6el-linux-gnu", "mipsisa64r6el-linux-gnuabi64"};
static const char *const MIPSN32LibDirs[] = {"/lib32"};
static const char *const MIPSN32Triples[] = {"mips64-linux-gnuabin32",
@@ -2181,9 +2558,7 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
static const char *const RISCV64LibDirs[] = {"/lib64", "/lib"};
static const char *const RISCV64Triples[] = {"riscv64-unknown-linux-gnu",
"riscv64-linux-gnu",
- "riscv64-unknown-elf",
- "riscv64-redhat-linux",
- "riscv64-suse-linux"};
+ "riscv64-unknown-elf"};
static const char *const SPARCv8LibDirs[] = {"/lib32", "/lib"};
static const char *const SPARCv8Triples[] = {"sparc-linux-gnu",
@@ -2201,16 +2576,14 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
using std::begin;
using std::end;
- if (TargetTriple.getOS() == llvm::Triple::Solaris) {
+ if (TargetTriple.isOSSolaris()) {
static const char *const SolarisLibDirs[] = {"/lib"};
static const char *const SolarisSparcV8Triples[] = {
- "sparc-sun-solaris2.11", "sparc-sun-solaris2.12"};
+ "sparc-sun-solaris2.11"};
static const char *const SolarisSparcV9Triples[] = {
- "sparcv9-sun-solaris2.11", "sparcv9-sun-solaris2.12"};
- static const char *const SolarisX86Triples[] = {"i386-pc-solaris2.11",
- "i386-pc-solaris2.12"};
- static const char *const SolarisX86_64Triples[] = {"x86_64-pc-solaris2.11",
- "x86_64-pc-solaris2.12"};
+ "sparcv9-sun-solaris2.11"};
+ static const char *const SolarisX86Triples[] = {"i386-pc-solaris2.11"};
+ static const char *const SolarisX86_64Triples[] = {"x86_64-pc-solaris2.11"};
LibDirs.append(begin(SolarisLibDirs), end(SolarisLibDirs));
BiarchLibDirs.append(begin(SolarisLibDirs), end(SolarisLibDirs));
switch (TargetTriple.getArch()) {
@@ -2248,9 +2621,6 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
static const char *const AArch64AndroidTriples[] = {
"aarch64-linux-android"};
static const char *const ARMAndroidTriples[] = {"arm-linux-androideabi"};
- static const char *const MIPSELAndroidTriples[] = {"mipsel-linux-android"};
- static const char *const MIPS64ELAndroidTriples[] = {
- "mips64el-linux-android"};
static const char *const X86AndroidTriples[] = {"i686-linux-android"};
static const char *const X86_64AndroidTriples[] = {"x86_64-linux-android"};
@@ -2265,22 +2635,6 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
LibDirs.append(begin(ARMLibDirs), end(ARMLibDirs));
TripleAliases.append(begin(ARMAndroidTriples), end(ARMAndroidTriples));
break;
- case llvm::Triple::mipsel:
- LibDirs.append(begin(MIPSELLibDirs), end(MIPSELLibDirs));
- TripleAliases.append(begin(MIPSELAndroidTriples),
- end(MIPSELAndroidTriples));
- BiarchLibDirs.append(begin(MIPS64ELLibDirs), end(MIPS64ELLibDirs));
- BiarchTripleAliases.append(begin(MIPS64ELAndroidTriples),
- end(MIPS64ELAndroidTriples));
- break;
- case llvm::Triple::mips64el:
- LibDirs.append(begin(MIPS64ELLibDirs), end(MIPS64ELLibDirs));
- TripleAliases.append(begin(MIPS64ELAndroidTriples),
- end(MIPS64ELAndroidTriples));
- BiarchLibDirs.append(begin(MIPSELLibDirs), end(MIPSELLibDirs));
- BiarchTripleAliases.append(begin(MIPSELAndroidTriples),
- end(MIPSELAndroidTriples));
- break;
case llvm::Triple::x86_64:
LibDirs.append(begin(X86_64LibDirs), end(X86_64LibDirs));
TripleAliases.append(begin(X86_64AndroidTriples),
@@ -2303,6 +2657,23 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
return;
}
+ if (TargetTriple.isOSHurd()) {
+ switch (TargetTriple.getArch()) {
+ case llvm::Triple::x86_64:
+ LibDirs.append(begin(X86_64LibDirs), end(X86_64LibDirs));
+ TripleAliases.push_back("x86_64-gnu");
+ break;
+ case llvm::Triple::x86:
+ LibDirs.append(begin(X86LibDirs), end(X86LibDirs));
+ TripleAliases.push_back("i686-gnu");
+ break;
+ default:
+ break;
+ }
+
+ return;
+ }
+
switch (TargetTriple.getArch()) {
case llvm::Triple::aarch64:
LibDirs.append(begin(AArch64LibDirs), end(AArch64LibDirs));
@@ -2319,7 +2690,9 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
case llvm::Triple::arm:
case llvm::Triple::thumb:
LibDirs.append(begin(ARMLibDirs), end(ARMLibDirs));
- if (TargetTriple.getEnvironment() == llvm::Triple::GNUEABIHF) {
+ if (TargetTriple.getEnvironment() == llvm::Triple::GNUEABIHF ||
+ TargetTriple.getEnvironment() == llvm::Triple::MuslEABIHF ||
+ TargetTriple.getEnvironment() == llvm::Triple::EABIHF) {
TripleAliases.append(begin(ARMHFTriples), end(ARMHFTriples));
} else {
TripleAliases.append(begin(ARMTriples), end(ARMTriples));
@@ -2328,7 +2701,9 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
case llvm::Triple::armeb:
case llvm::Triple::thumbeb:
LibDirs.append(begin(ARMebLibDirs), end(ARMebLibDirs));
- if (TargetTriple.getEnvironment() == llvm::Triple::GNUEABIHF) {
+ if (TargetTriple.getEnvironment() == llvm::Triple::GNUEABIHF ||
+ TargetTriple.getEnvironment() == llvm::Triple::MuslEABIHF ||
+ TargetTriple.getEnvironment() == llvm::Triple::EABIHF) {
TripleAliases.append(begin(ARMebHFTriples), end(ARMebHFTriples));
} else {
TripleAliases.append(begin(ARMebTriples), end(ARMebTriples));
@@ -2338,6 +2713,10 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
LibDirs.append(begin(AVRLibDirs), end(AVRLibDirs));
TripleAliases.append(begin(AVRTriples), end(AVRTriples));
break;
+ case llvm::Triple::csky:
+ LibDirs.append(begin(CSKYLibDirs), end(CSKYLibDirs));
+ TripleAliases.append(begin(CSKYTriples), end(CSKYTriples));
+ break;
case llvm::Triple::x86_64:
if (TargetTriple.isX32()) {
LibDirs.append(begin(X32LibDirs), end(X32LibDirs));
@@ -2365,6 +2744,11 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
BiarchTripleAliases.append(begin(X32Triples), end(X32Triples));
}
break;
+ // TODO: Handle loongarch32.
+ case llvm::Triple::loongarch64:
+ LibDirs.append(begin(LoongArch64LibDirs), end(LoongArch64LibDirs));
+ TripleAliases.append(begin(LoongArch64Triples), end(LoongArch64Triples));
+ break;
case llvm::Triple::m68k:
LibDirs.append(begin(M68kLibDirs), end(M68kLibDirs));
TripleAliases.append(begin(M68kTriples), end(M68kTriples));
@@ -2466,10 +2850,6 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
break;
}
- // Always append the drivers target triple to the end, in case it doesn't
- // match any of our aliases.
- TripleAliases.push_back(TargetTriple.str());
-
// Also include the multiarch variant if it's different.
if (TargetTriple.str() != BiarchTriple.str())
BiarchTripleAliases.push_back(BiarchTriple.str());
@@ -2487,6 +2867,8 @@ bool Generic_GCC::GCCInstallationDetector::ScanGCCForMultilibs(
if (isArmOrThumbArch(TargetArch) && TargetTriple.isAndroid()) {
// It should also work without multilibs in a simplified toolchain.
findAndroidArmMultilibs(D, TargetTriple, Path, Args, Detected);
+ } else if (TargetTriple.isCSKY()) {
+ findCSKYMultilibs(D, TargetTriple, Path, Args, Detected);
} else if (TargetTriple.isMIPS()) {
if (!findMIPSMultilibs(D, TargetTriple, Path, Args, Detected))
return false;
@@ -2502,7 +2884,9 @@ bool Generic_GCC::GCCInstallationDetector::ScanGCCForMultilibs(
}
Multilibs = Detected.Multilibs;
- SelectedMultilib = Detected.SelectedMultilib;
+ SelectedMultilib = Detected.SelectedMultilibs.empty()
+ ? Multilib()
+ : Detected.SelectedMultilibs.back();
BiarchSibling = Detected.BiarchSibling;
return true;
@@ -2577,7 +2961,7 @@ bool Generic_GCC::GCCInstallationDetector::ScanGentooConfigs(
const llvm::Triple &TargetTriple, const ArgList &Args,
const SmallVectorImpl<StringRef> &CandidateTriples,
const SmallVectorImpl<StringRef> &CandidateBiarchTriples) {
- if (!D.getVFS().exists(D.SysRoot + GentooConfigDir))
+ if (!D.getVFS().exists(concat(D.SysRoot, GentooConfigDir)))
return false;
for (StringRef CandidateTriple : CandidateTriples) {
@@ -2596,8 +2980,8 @@ bool Generic_GCC::GCCInstallationDetector::ScanGentooGccConfig(
const llvm::Triple &TargetTriple, const ArgList &Args,
StringRef CandidateTriple, bool NeedsBiarchSuffix) {
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> File =
- D.getVFS().getBufferForFile(D.SysRoot + GentooConfigDir + "/config-" +
- CandidateTriple.str());
+ D.getVFS().getBufferForFile(concat(D.SysRoot, GentooConfigDir,
+ "/config-" + CandidateTriple.str()));
if (File) {
SmallVector<StringRef, 2> Lines;
File.get()->getBuffer().split(Lines, "\n");
@@ -2608,8 +2992,8 @@ bool Generic_GCC::GCCInstallationDetector::ScanGentooGccConfig(
continue;
// Process the config file pointed to by CURRENT.
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> ConfigFile =
- D.getVFS().getBufferForFile(D.SysRoot + GentooConfigDir + "/" +
- Line.str());
+ D.getVFS().getBufferForFile(
+ concat(D.SysRoot, GentooConfigDir, "/" + Line));
std::pair<StringRef, StringRef> ActiveVersion = Line.rsplit('-');
// List of paths to scan for libraries.
SmallVector<StringRef, 4> GentooScanPaths;
@@ -2642,7 +3026,7 @@ bool Generic_GCC::GCCInstallationDetector::ScanGentooGccConfig(
// Scan all paths for GCC libraries.
for (const auto &GentooScanPath : GentooScanPaths) {
- std::string GentooPath = D.SysRoot + std::string(GentooScanPath);
+ std::string GentooPath = concat(D.SysRoot, GentooScanPath);
if (D.getVFS().exists(GentooPath + "/crtbegin.o")) {
if (!ScanGCCForMultilibs(TargetTriple, Args, GentooPath,
NeedsBiarchSuffix))
@@ -2697,21 +3081,26 @@ Tool *Generic_GCC::buildLinker() const { return new tools::gcc::Linker(*this); }
void Generic_GCC::printVerboseInfo(raw_ostream &OS) const {
// Print the information about how we detected the GCC installation.
GCCInstallation.print(OS);
- CudaInstallation.print(OS);
- RocmInstallation.print(OS);
+ CudaInstallation->print(OS);
+ RocmInstallation->print(OS);
}
-bool Generic_GCC::IsUnwindTablesDefault(const ArgList &Args) const {
+ToolChain::UnwindTableLevel
+Generic_GCC::getDefaultUnwindTableLevel(const ArgList &Args) const {
switch (getArch()) {
case llvm::Triple::aarch64:
+ case llvm::Triple::aarch64_be:
case llvm::Triple::ppc:
case llvm::Triple::ppcle:
case llvm::Triple::ppc64:
case llvm::Triple::ppc64le:
+ case llvm::Triple::riscv32:
+ case llvm::Triple::riscv64:
+ case llvm::Triple::x86:
case llvm::Triple::x86_64:
- return true;
+ return UnwindTableLevel::Asynchronous;
default:
- return false;
+ return UnwindTableLevel::None;
}
}
@@ -2727,7 +3116,9 @@ bool Generic_GCC::isPICDefault() const {
}
}
-bool Generic_GCC::isPIEDefault() const { return false; }
+bool Generic_GCC::isPIEDefault(const llvm::opt::ArgList &Args) const {
+ return false;
+}
bool Generic_GCC::isPICDefaultForced() const {
return getArch() == llvm::Triple::x86_64 && getTriple().isOSWindows();
@@ -2735,40 +3126,12 @@ bool Generic_GCC::isPICDefaultForced() const {
bool Generic_GCC::IsIntegratedAssemblerDefault() const {
switch (getTriple().getArch()) {
- case llvm::Triple::x86:
- case llvm::Triple::x86_64:
- case llvm::Triple::aarch64:
- case llvm::Triple::aarch64_be:
- case llvm::Triple::arm:
- case llvm::Triple::armeb:
- case llvm::Triple::avr:
- case llvm::Triple::bpfel:
- case llvm::Triple::bpfeb:
- case llvm::Triple::thumb:
- case llvm::Triple::thumbeb:
- case llvm::Triple::ppc:
- case llvm::Triple::ppcle:
- case llvm::Triple::ppc64:
- case llvm::Triple::ppc64le:
- case llvm::Triple::riscv32:
- case llvm::Triple::riscv64:
- case llvm::Triple::systemz:
- case llvm::Triple::mips:
- case llvm::Triple::mipsel:
- case llvm::Triple::mips64:
- case llvm::Triple::mips64el:
- case llvm::Triple::msp430:
- case llvm::Triple::m68k:
- return true;
- case llvm::Triple::sparc:
- case llvm::Triple::sparcel:
- case llvm::Triple::sparcv9:
- if (getTriple().isOSFreeBSD() || getTriple().isOSOpenBSD() ||
- getTriple().isOSSolaris())
- return true;
+ case llvm::Triple::nvptx:
+ case llvm::Triple::nvptx64:
+ case llvm::Triple::xcore:
return false;
default:
- return false;
+ return true;
}
}
@@ -2794,6 +3157,7 @@ void Generic_GCC::AddMultilibPaths(const Driver &D,
path_list &Paths) {
// Add the multilib suffixed paths where they are available.
if (GCCInstallation.isValid()) {
+ assert(!SelectedMultilibs.empty());
const llvm::Triple &GCCTriple = GCCInstallation.getTriple();
const std::string &LibPath =
std::string(GCCInstallation.getParentLibPath());
@@ -2801,13 +3165,19 @@ void Generic_GCC::AddMultilibPaths(const Driver &D,
// Sourcery CodeBench MIPS toolchain holds some libraries under
// a biarch-like suffix of the GCC installation.
if (const auto &PathsCallback = Multilibs.filePathsCallback())
- for (const auto &Path : PathsCallback(SelectedMultilib))
+ for (const auto &Path : PathsCallback(SelectedMultilibs.back()))
addPathIfExists(D, GCCInstallation.getInstallPath() + Path, Paths);
// Add lib/gcc/$triple/$version, with an optional /multilib suffix.
- addPathIfExists(
- D, GCCInstallation.getInstallPath() + SelectedMultilib.gccSuffix(),
- Paths);
+ addPathIfExists(D,
+ GCCInstallation.getInstallPath() +
+ SelectedMultilibs.back().gccSuffix(),
+ Paths);
+
+ // Add lib/gcc/$triple/$libdir
+ // For GCC built with --enable-version-specific-runtime-libs.
+ addPathIfExists(D, GCCInstallation.getInstallPath() + "/../" + OSLibDir,
+ Paths);
// GCC cross compiling toolchains will install target libraries which ship
// as part of the toolchain under <prefix>/<triple>/<libdir> rather than as
@@ -2829,7 +3199,7 @@ void Generic_GCC::AddMultilibPaths(const Driver &D,
// Clang diverges from GCC's behavior.
addPathIfExists(D,
LibPath + "/../" + GCCTriple.str() + "/lib/../" + OSLibDir +
- SelectedMultilib.osSuffix(),
+ SelectedMultilibs.back().osSuffix(),
Paths);
// If the GCC installation we found is inside of the sysroot, we want to
@@ -2841,7 +3211,7 @@ void Generic_GCC::AddMultilibPaths(const Driver &D,
// the cross. Note that GCC does include some of these directories in some
// configurations but this seems somewhere between questionable and simply
// a bug.
- if (StringRef(LibPath).startswith(SysRoot))
+ if (StringRef(LibPath).starts_with(SysRoot))
addPathIfExists(D, LibPath + "/../" + OSLibDir, Paths);
}
}
@@ -2902,34 +3272,53 @@ Generic_GCC::addLibCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const {
const Driver &D = getDriver();
std::string SysRoot = computeSysRoot();
- std::string Target = getTripleString();
+ if (SysRoot.empty())
+ SysRoot = llvm::sys::path::get_separator();
- auto AddIncludePath = [&](std::string Path) {
+ auto AddIncludePath = [&](StringRef Path, bool TargetDirRequired = false) {
std::string Version = detectLibcxxVersion(Path);
if (Version.empty())
return false;
// First add the per-target include path if it exists.
- std::string TargetDir = Path + "/" + Target + "/c++/" + Version;
- if (D.getVFS().exists(TargetDir))
- addSystemInclude(DriverArgs, CC1Args, TargetDir);
+ bool TargetDirExists = false;
+ std::optional<std::string> TargetIncludeDir = getTargetSubDirPath(Path);
+ if (TargetIncludeDir) {
+ SmallString<128> TargetDir(*TargetIncludeDir);
+ llvm::sys::path::append(TargetDir, "c++", Version);
+ if (D.getVFS().exists(TargetDir)) {
+ addSystemInclude(DriverArgs, CC1Args, TargetDir);
+ TargetDirExists = true;
+ }
+ }
+ if (TargetDirRequired && !TargetDirExists)
+ return false;
// Second add the generic one.
- addSystemInclude(DriverArgs, CC1Args, Path + "/c++/" + Version);
+ SmallString<128> GenericDir(Path);
+ llvm::sys::path::append(GenericDir, "c++", Version);
+ addSystemInclude(DriverArgs, CC1Args, GenericDir);
return true;
};
- // Android never uses the libc++ headers installed alongside the toolchain,
- // which are generally incompatible with the NDK libraries anyway.
- if (!getTriple().isAndroid())
- if (AddIncludePath(getDriver().Dir + "/../include"))
- return;
+ // Android only uses the libc++ headers installed alongside the toolchain if
+ // they contain an Android-specific target include path, otherwise they're
+ // incompatible with the NDK libraries.
+ SmallString<128> DriverIncludeDir(getDriver().Dir);
+ llvm::sys::path::append(DriverIncludeDir, "..", "include");
+ if (AddIncludePath(DriverIncludeDir,
+ /*TargetDirRequired=*/getTriple().isAndroid()))
+ return;
// If this is a development, non-installed, clang, libcxx will
// not be found at ../include/c++ but it likely to be found at
// one of the following two locations:
- if (AddIncludePath(SysRoot + "/usr/local/include"))
+ SmallString<128> UsrLocalIncludeDir(SysRoot);
+ llvm::sys::path::append(UsrLocalIncludeDir, "usr", "local", "include");
+ if (AddIncludePath(UsrLocalIncludeDir))
return;
- if (AddIncludePath(SysRoot + "/usr/include"))
+ SmallString<128> UsrIncludeDir(SysRoot);
+ llvm::sys::path::append(UsrIncludeDir, "usr", "include");
+ if (AddIncludePath(UsrIncludeDir))
return;
}
@@ -2987,6 +3376,15 @@ bool Generic_GCC::addGCCLibStdCxxIncludePaths(
TripleStr, Multilib.includeSuffix(), DriverArgs, CC1Args))
return true;
+ // Try /gcc/$triple/$version/include/c++/ (gcc --print-multiarch is not
+ // empty). Like above but for GCC built with
+ // --enable-version-specific-runtime-libs.
+ if (addLibStdCXXIncludePaths(LibDir.str() + "/gcc/" + TripleStr + "/" +
+ Version.Text + "/include/c++/",
+ TripleStr, Multilib.includeSuffix(), DriverArgs,
+ CC1Args))
+ return true;
+
// Detect Debian g++-multiarch-incdir.diff.
if (addLibStdCXXIncludePaths(LibDir.str() + "/../include/c++/" + Version.Text,
DebianMultiarch, Multilib.includeSuffix(),
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Gnu.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Gnu.h
index 40fd756a5653..0b664a182d75 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Gnu.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Gnu.h
@@ -10,6 +10,7 @@
#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_GNU_H
#include "Cuda.h"
+#include "LazyDetector.h"
#include "ROCm.h"
#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
@@ -22,12 +23,12 @@ struct DetectedMultilibs {
/// The set of multilibs that the detected installation supports.
MultilibSet Multilibs;
- /// The primary multilib appropriate for the given flags.
- Multilib SelectedMultilib;
+ /// The multilibs appropriate for the given flags.
+ llvm::SmallVector<Multilib> SelectedMultilibs;
/// On Biarch systems, this corresponds to the default multilib when
/// targeting the non-default multilib. Otherwise, it is empty.
- llvm::Optional<Multilib> BiarchSibling;
+ std::optional<Multilib> BiarchSibling;
};
bool findMIPSMultilibs(const Driver &D, const llvm::Triple &TargetTriple,
@@ -201,7 +202,7 @@ public:
Multilib SelectedMultilib;
/// On Biarch systems, this corresponds to the default multilib when
/// targeting the non-default multilib. Otherwise, it is empty.
- llvm::Optional<Multilib> BiarchSibling;
+ std::optional<Multilib> BiarchSibling;
GCCVersion Version;
@@ -217,8 +218,7 @@ public:
public:
explicit GCCInstallationDetector(const Driver &D) : IsValid(false), D(D) {}
- void init(const llvm::Triple &TargetTriple, const llvm::opt::ArgList &Args,
- ArrayRef<std::string> ExtraTripleAliases = None);
+ void init(const llvm::Triple &TargetTriple, const llvm::opt::ArgList &Args);
/// Check whether we detected a valid GCC install.
bool isValid() const { return IsValid; }
@@ -286,8 +286,8 @@ public:
protected:
GCCInstallationDetector GCCInstallation;
- CudaInstallationDetector CudaInstallation;
- RocmInstallationDetector RocmInstallation;
+ LazyDetector<CudaInstallationDetector> CudaInstallation;
+ LazyDetector<RocmInstallationDetector> RocmInstallation;
public:
Generic_GCC(const Driver &D, const llvm::Triple &Triple,
@@ -296,9 +296,10 @@ public:
void printVerboseInfo(raw_ostream &OS) const override;
- bool IsUnwindTablesDefault(const llvm::opt::ArgList &Args) const override;
+ UnwindTableLevel
+ getDefaultUnwindTableLevel(const llvm::opt::ArgList &Args) const override;
bool isPICDefault() const override;
- bool isPIEDefault() const override;
+ bool isPIEDefault(const llvm::opt::ArgList &Args) const override;
bool isPICDefaultForced() const override;
bool IsIntegratedAssemblerDefault() const override;
llvm::opt::DerivedArgList *
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/HIP.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/HIP.cpp
deleted file mode 100644
index c4e840de86e1..000000000000
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/HIP.cpp
+++ /dev/null
@@ -1,458 +0,0 @@
-//===--- HIP.cpp - HIP Tool and ToolChain Implementations -------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "HIP.h"
-#include "AMDGPU.h"
-#include "CommonArgs.h"
-#include "clang/Basic/Cuda.h"
-#include "clang/Basic/TargetID.h"
-#include "clang/Driver/Compilation.h"
-#include "clang/Driver/Driver.h"
-#include "clang/Driver/DriverDiagnostic.h"
-#include "clang/Driver/InputInfo.h"
-#include "clang/Driver/Options.h"
-#include "llvm/Support/Alignment.h"
-#include "llvm/Support/FileSystem.h"
-#include "llvm/Support/Path.h"
-#include "llvm/Support/TargetParser.h"
-
-using namespace clang::driver;
-using namespace clang::driver::toolchains;
-using namespace clang::driver::tools;
-using namespace clang;
-using namespace llvm::opt;
-
-#if defined(_WIN32) || defined(_WIN64)
-#define NULL_FILE "nul"
-#else
-#define NULL_FILE "/dev/null"
-#endif
-
-namespace {
-const unsigned HIPCodeObjectAlign = 4096;
-} // namespace
-
-void AMDGCN::Linker::constructLldCommand(Compilation &C, const JobAction &JA,
- const InputInfoList &Inputs,
- const InputInfo &Output,
- const llvm::opt::ArgList &Args) const {
- // Construct lld command.
- // The output from ld.lld is an HSA code object file.
- ArgStringList LldArgs{"-flavor", "gnu", "--no-undefined", "-shared",
- "-plugin-opt=-amdgpu-internalize-symbols"};
-
- auto &TC = getToolChain();
- auto &D = TC.getDriver();
- assert(!Inputs.empty() && "Must have at least one input.");
- bool IsThinLTO = D.getLTOMode(/*IsOffload=*/true) == LTOK_Thin;
- addLTOOptions(TC, Args, LldArgs, Output, Inputs[0], IsThinLTO);
-
- // Extract all the -m options
- std::vector<llvm::StringRef> Features;
- amdgpu::getAMDGPUTargetFeatures(D, TC.getTriple(), Args, Features);
-
- // Add features to mattr such as cumode
- std::string MAttrString = "-plugin-opt=-mattr=";
- for (auto OneFeature : unifyTargetFeatures(Features)) {
- MAttrString.append(Args.MakeArgString(OneFeature));
- if (OneFeature != Features.back())
- MAttrString.append(",");
- }
- if (!Features.empty())
- LldArgs.push_back(Args.MakeArgString(MAttrString));
-
- // ToDo: Remove this option after AMDGPU backend supports ISA-level linking.
- // Since AMDGPU backend currently does not support ISA-level linking, all
- // called functions need to be imported.
- if (IsThinLTO)
- LldArgs.push_back(Args.MakeArgString("-plugin-opt=-force-import-all"));
-
- for (const Arg *A : Args.filtered(options::OPT_mllvm)) {
- LldArgs.push_back(
- Args.MakeArgString(Twine("-plugin-opt=") + A->getValue(0)));
- }
-
- if (C.getDriver().isSaveTempsEnabled())
- LldArgs.push_back("-save-temps");
-
- addLinkerCompressDebugSectionsOption(TC, Args, LldArgs);
-
- LldArgs.append({"-o", Output.getFilename()});
- for (auto Input : Inputs)
- LldArgs.push_back(Input.getFilename());
-
- if (Args.hasFlag(options::OPT_fgpu_sanitize, options::OPT_fno_gpu_sanitize,
- false))
- llvm::for_each(TC.getHIPDeviceLibs(Args), [&](StringRef BCFile) {
- LldArgs.push_back(Args.MakeArgString(BCFile));
- });
-
- const char *Lld = Args.MakeArgString(getToolChain().GetProgramPath("lld"));
- C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
- Lld, LldArgs, Inputs, Output));
-}
-
-// Construct a clang-offload-bundler command to bundle code objects for
-// different GPU's into a HIP fat binary.
-void AMDGCN::constructHIPFatbinCommand(Compilation &C, const JobAction &JA,
- StringRef OutputFileName, const InputInfoList &Inputs,
- const llvm::opt::ArgList &Args, const Tool& T) {
- // Construct clang-offload-bundler command to bundle object files for
- // for different GPU archs.
- ArgStringList BundlerArgs;
- BundlerArgs.push_back(Args.MakeArgString("-type=o"));
- BundlerArgs.push_back(
- Args.MakeArgString("-bundle-align=" + Twine(HIPCodeObjectAlign)));
-
- // ToDo: Remove the dummy host binary entry which is required by
- // clang-offload-bundler.
- std::string BundlerTargetArg = "-targets=host-x86_64-unknown-linux";
- std::string BundlerInputArg = "-inputs=" NULL_FILE;
-
- // For code object version 2 and 3, the offload kind in bundle ID is 'hip'
- // for backward compatibility. For code object version 4 and greater, the
- // offload kind in bundle ID is 'hipv4'.
- std::string OffloadKind = "hip";
- if (getAMDGPUCodeObjectVersion(C.getDriver(), Args) >= 4)
- OffloadKind = OffloadKind + "v4";
- for (const auto &II : Inputs) {
- const auto* A = II.getAction();
- BundlerTargetArg = BundlerTargetArg + "," + OffloadKind +
- "-amdgcn-amd-amdhsa--" +
- StringRef(A->getOffloadingArch()).str();
- BundlerInputArg = BundlerInputArg + "," + II.getFilename();
- }
- BundlerArgs.push_back(Args.MakeArgString(BundlerTargetArg));
- BundlerArgs.push_back(Args.MakeArgString(BundlerInputArg));
-
- std::string Output = std::string(OutputFileName);
- auto BundlerOutputArg =
- Args.MakeArgString(std::string("-outputs=").append(Output));
- BundlerArgs.push_back(BundlerOutputArg);
-
- const char *Bundler = Args.MakeArgString(
- T.getToolChain().GetProgramPath("clang-offload-bundler"));
- C.addCommand(std::make_unique<Command>(
- JA, T, ResponseFileSupport::None(), Bundler, BundlerArgs, Inputs,
- InputInfo(&JA, Args.MakeArgString(Output))));
-}
-
-/// Add Generated HIP Object File which has device images embedded into the
-/// host to the argument list for linking. Using MC directives, embed the
-/// device code and also define symbols required by the code generation so that
-/// the image can be retrieved at runtime.
-void AMDGCN::Linker::constructGenerateObjFileFromHIPFatBinary(
- Compilation &C, const InputInfo &Output,
- const InputInfoList &Inputs, const ArgList &Args,
- const JobAction &JA) const {
- const ToolChain &TC = getToolChain();
- std::string Name =
- std::string(llvm::sys::path::stem(Output.getFilename()));
-
- // Create Temp Object File Generator,
- // Offload Bundled file and Bundled Object file.
- // Keep them if save-temps is enabled.
- const char *McinFile;
- const char *BundleFile;
- if (C.getDriver().isSaveTempsEnabled()) {
- McinFile = C.getArgs().MakeArgString(Name + ".mcin");
- BundleFile = C.getArgs().MakeArgString(Name + ".hipfb");
- } else {
- auto TmpNameMcin = C.getDriver().GetTemporaryPath(Name, "mcin");
- McinFile = C.addTempFile(C.getArgs().MakeArgString(TmpNameMcin));
- auto TmpNameFb = C.getDriver().GetTemporaryPath(Name, "hipfb");
- BundleFile = C.addTempFile(C.getArgs().MakeArgString(TmpNameFb));
- }
- constructHIPFatbinCommand(C, JA, BundleFile, Inputs, Args, *this);
-
- // Create a buffer to write the contents of the temp obj generator.
- std::string ObjBuffer;
- llvm::raw_string_ostream ObjStream(ObjBuffer);
-
- // Add MC directives to embed target binaries. We ensure that each
- // section and image is 16-byte aligned. This is not mandatory, but
- // increases the likelihood of data to be aligned with a cache block
- // in several main host machines.
- ObjStream << "# HIP Object Generator\n";
- ObjStream << "# *** Automatically generated by Clang ***\n";
- ObjStream << " .protected __hip_fatbin\n";
- ObjStream << " .type __hip_fatbin,@object\n";
- ObjStream << " .section .hip_fatbin,\"a\",@progbits\n";
- ObjStream << " .globl __hip_fatbin\n";
- ObjStream << " .p2align " << llvm::Log2(llvm::Align(HIPCodeObjectAlign))
- << "\n";
- ObjStream << "__hip_fatbin:\n";
- ObjStream << " .incbin \"" << BundleFile << "\"\n";
- ObjStream.flush();
-
- // Dump the contents of the temp object file gen if the user requested that.
- // We support this option to enable testing of behavior with -###.
- if (C.getArgs().hasArg(options::OPT_fhip_dump_offload_linker_script))
- llvm::errs() << ObjBuffer;
-
- // Open script file and write the contents.
- std::error_code EC;
- llvm::raw_fd_ostream Objf(McinFile, EC, llvm::sys::fs::OF_None);
-
- if (EC) {
- C.getDriver().Diag(clang::diag::err_unable_to_make_temp) << EC.message();
- return;
- }
-
- Objf << ObjBuffer;
-
- ArgStringList McArgs{"-o", Output.getFilename(),
- McinFile, "--filetype=obj"};
- const char *Mc = Args.MakeArgString(TC.GetProgramPath("llvm-mc"));
- C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
- Mc, McArgs, Inputs, Output));
-}
-
-// For amdgcn the inputs of the linker job are device bitcode and output is
-// object file. It calls llvm-link, opt, llc, then lld steps.
-void AMDGCN::Linker::ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output,
- const InputInfoList &Inputs,
- const ArgList &Args,
- const char *LinkingOutput) const {
- if (Inputs.size() > 0 &&
- Inputs[0].getType() == types::TY_Image &&
- JA.getType() == types::TY_Object)
- return constructGenerateObjFileFromHIPFatBinary(C, Output, Inputs, Args, JA);
-
- if (JA.getType() == types::TY_HIP_FATBIN)
- return constructHIPFatbinCommand(C, JA, Output.getFilename(), Inputs, Args, *this);
-
- return constructLldCommand(C, JA, Inputs, Output, Args);
-}
-
-HIPToolChain::HIPToolChain(const Driver &D, const llvm::Triple &Triple,
- const ToolChain &HostTC, const ArgList &Args)
- : ROCMToolChain(D, Triple, Args), HostTC(HostTC) {
- // Lookup binaries into the driver directory, this is used to
- // discover the clang-offload-bundler executable.
- getProgramPaths().push_back(getDriver().Dir);
-}
-
-void HIPToolChain::addClangTargetOptions(
- const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args,
- Action::OffloadKind DeviceOffloadingKind) const {
- HostTC.addClangTargetOptions(DriverArgs, CC1Args, DeviceOffloadingKind);
-
- assert(DeviceOffloadingKind == Action::OFK_HIP &&
- "Only HIP offloading kinds are supported for GPUs.");
-
- CC1Args.push_back("-fcuda-is-device");
-
- if (DriverArgs.hasFlag(options::OPT_fcuda_approx_transcendentals,
- options::OPT_fno_cuda_approx_transcendentals, false))
- CC1Args.push_back("-fcuda-approx-transcendentals");
-
- if (!DriverArgs.hasFlag(options::OPT_fgpu_rdc, options::OPT_fno_gpu_rdc,
- false))
- CC1Args.append({"-mllvm", "-amdgpu-internalize-symbols"});
-
- StringRef MaxThreadsPerBlock =
- DriverArgs.getLastArgValue(options::OPT_gpu_max_threads_per_block_EQ);
- if (!MaxThreadsPerBlock.empty()) {
- std::string ArgStr =
- std::string("--gpu-max-threads-per-block=") + MaxThreadsPerBlock.str();
- CC1Args.push_back(DriverArgs.MakeArgStringRef(ArgStr));
- }
-
- CC1Args.push_back("-fcuda-allow-variadic-functions");
-
- // Default to "hidden" visibility, as object level linking will not be
- // supported for the foreseeable future.
- if (!DriverArgs.hasArg(options::OPT_fvisibility_EQ,
- options::OPT_fvisibility_ms_compat)) {
- CC1Args.append({"-fvisibility", "hidden"});
- CC1Args.push_back("-fapply-global-visibility-to-externs");
- }
-
- llvm::for_each(getHIPDeviceLibs(DriverArgs), [&](StringRef BCFile) {
- CC1Args.push_back("-mlink-builtin-bitcode");
- CC1Args.push_back(DriverArgs.MakeArgString(BCFile));
- });
-}
-
-llvm::opt::DerivedArgList *
-HIPToolChain::TranslateArgs(const llvm::opt::DerivedArgList &Args,
- StringRef BoundArch,
- Action::OffloadKind DeviceOffloadKind) const {
- DerivedArgList *DAL =
- HostTC.TranslateArgs(Args, BoundArch, DeviceOffloadKind);
- if (!DAL)
- DAL = new DerivedArgList(Args.getBaseArgs());
-
- const OptTable &Opts = getDriver().getOpts();
-
- for (Arg *A : Args) {
- if (!shouldSkipArgument(A))
- DAL->append(A);
- }
-
- if (!BoundArch.empty()) {
- DAL->eraseArg(options::OPT_mcpu_EQ);
- DAL->AddJoinedArg(nullptr, Opts.getOption(options::OPT_mcpu_EQ), BoundArch);
- checkTargetID(*DAL);
- }
-
- return DAL;
-}
-
-Tool *HIPToolChain::buildLinker() const {
- assert(getTriple().getArch() == llvm::Triple::amdgcn);
- return new tools::AMDGCN::Linker(*this);
-}
-
-void HIPToolChain::addClangWarningOptions(ArgStringList &CC1Args) const {
- HostTC.addClangWarningOptions(CC1Args);
-}
-
-ToolChain::CXXStdlibType
-HIPToolChain::GetCXXStdlibType(const ArgList &Args) const {
- return HostTC.GetCXXStdlibType(Args);
-}
-
-void HIPToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
- ArgStringList &CC1Args) const {
- HostTC.AddClangSystemIncludeArgs(DriverArgs, CC1Args);
-}
-
-void HIPToolChain::AddClangCXXStdlibIncludeArgs(const ArgList &Args,
- ArgStringList &CC1Args) const {
- HostTC.AddClangCXXStdlibIncludeArgs(Args, CC1Args);
-}
-
-void HIPToolChain::AddIAMCUIncludeArgs(const ArgList &Args,
- ArgStringList &CC1Args) const {
- HostTC.AddIAMCUIncludeArgs(Args, CC1Args);
-}
-
-void HIPToolChain::AddHIPIncludeArgs(const ArgList &DriverArgs,
- ArgStringList &CC1Args) const {
- RocmInstallation.AddHIPIncludeArgs(DriverArgs, CC1Args);
-}
-
-SanitizerMask HIPToolChain::getSupportedSanitizers() const {
- // The HIPToolChain only supports sanitizers in the sense that it allows
- // sanitizer arguments on the command line if they are supported by the host
- // toolchain. The HIPToolChain will actually ignore any command line
- // arguments for any of these "supported" sanitizers. That means that no
- // sanitization of device code is actually supported at this time.
- //
- // This behavior is necessary because the host and device toolchains
- // invocations often share the command line, so the device toolchain must
- // tolerate flags meant only for the host toolchain.
- return HostTC.getSupportedSanitizers();
-}
-
-VersionTuple HIPToolChain::computeMSVCVersion(const Driver *D,
- const ArgList &Args) const {
- return HostTC.computeMSVCVersion(D, Args);
-}
-
-llvm::SmallVector<std::string, 12>
-HIPToolChain::getHIPDeviceLibs(const llvm::opt::ArgList &DriverArgs) const {
- llvm::SmallVector<std::string, 12> BCLibs;
- if (DriverArgs.hasArg(options::OPT_nogpulib))
- return {};
- ArgStringList LibraryPaths;
-
- // Find in --hip-device-lib-path and HIP_LIBRARY_PATH.
- for (auto Path : RocmInstallation.getRocmDeviceLibPathArg())
- LibraryPaths.push_back(DriverArgs.MakeArgString(Path));
-
- addDirectoryList(DriverArgs, LibraryPaths, "", "HIP_DEVICE_LIB_PATH");
-
- // Maintain compatability with --hip-device-lib.
- auto BCLibArgs = DriverArgs.getAllArgValues(options::OPT_hip_device_lib_EQ);
- if (!BCLibArgs.empty()) {
- llvm::for_each(BCLibArgs, [&](StringRef BCName) {
- StringRef FullName;
- for (std::string LibraryPath : LibraryPaths) {
- SmallString<128> Path(LibraryPath);
- llvm::sys::path::append(Path, BCName);
- FullName = Path;
- if (llvm::sys::fs::exists(FullName)) {
- BCLibs.push_back(FullName.str());
- return;
- }
- }
- getDriver().Diag(diag::err_drv_no_such_file) << BCName;
- });
- } else {
- if (!RocmInstallation.hasDeviceLibrary()) {
- getDriver().Diag(diag::err_drv_no_rocm_device_lib) << 0;
- return {};
- }
- StringRef GpuArch = getGPUArch(DriverArgs);
- assert(!GpuArch.empty() && "Must have an explicit GPU arch.");
-
- // If --hip-device-lib is not set, add the default bitcode libraries.
- if (DriverArgs.hasFlag(options::OPT_fgpu_sanitize,
- options::OPT_fno_gpu_sanitize, false)) {
- auto AsanRTL = RocmInstallation.getAsanRTLPath();
- if (AsanRTL.empty()) {
- unsigned DiagID = getDriver().getDiags().getCustomDiagID(
- DiagnosticsEngine::Error,
- "AMDGPU address sanitizer runtime library (asanrtl) is not found. "
- "Please install ROCm device library which supports address "
- "sanitizer");
- getDriver().Diag(DiagID);
- return {};
- } else
- BCLibs.push_back(AsanRTL.str());
- }
-
- // Add the HIP specific bitcode library.
- BCLibs.push_back(RocmInstallation.getHIPPath().str());
-
- // Add common device libraries like ocml etc.
- BCLibs.append(getCommonDeviceLibNames(DriverArgs, GpuArch.str()));
-
- // Add instrument lib.
- auto InstLib =
- DriverArgs.getLastArgValue(options::OPT_gpu_instrument_lib_EQ);
- if (InstLib.empty())
- return BCLibs;
- if (llvm::sys::fs::exists(InstLib))
- BCLibs.push_back(InstLib.str());
- else
- getDriver().Diag(diag::err_drv_no_such_file) << InstLib;
- }
-
- return BCLibs;
-}
-
-void HIPToolChain::checkTargetID(const llvm::opt::ArgList &DriverArgs) const {
- auto PTID = getParsedTargetID(DriverArgs);
- if (PTID.OptionalTargetID && !PTID.OptionalGPUArch) {
- getDriver().Diag(clang::diag::err_drv_bad_target_id)
- << PTID.OptionalTargetID.getValue();
- return;
- }
-
- assert(PTID.OptionalFeatures && "Invalid return from getParsedTargetID");
- auto &FeatureMap = PTID.OptionalFeatures.getValue();
- // Sanitizer is not supported with xnack-.
- if (DriverArgs.hasFlag(options::OPT_fgpu_sanitize,
- options::OPT_fno_gpu_sanitize, false)) {
- auto Loc = FeatureMap.find("xnack");
- if (Loc != FeatureMap.end() && !Loc->second) {
- auto &Diags = getDriver().getDiags();
- auto DiagID = Diags.getCustomDiagID(
- DiagnosticsEngine::Error,
- "'-fgpu-sanitize' is not compatible with offload arch '%0'. "
- "Use an offload arch without 'xnack-' instead");
- Diags.Report(DiagID) << PTID.OptionalTargetID.getValue();
- }
- }
-}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPAMD.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPAMD.cpp
new file mode 100644
index 000000000000..ccb36a6c846c
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPAMD.cpp
@@ -0,0 +1,434 @@
+//===--- HIPAMD.cpp - HIP Tool and ToolChain Implementations ----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "HIPAMD.h"
+#include "AMDGPU.h"
+#include "CommonArgs.h"
+#include "HIPUtility.h"
+#include "clang/Basic/Cuda.h"
+#include "clang/Basic/TargetID.h"
+#include "clang/Driver/Compilation.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/InputInfo.h"
+#include "clang/Driver/Options.h"
+#include "clang/Driver/SanitizerArgs.h"
+#include "llvm/Support/Alignment.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
+#include "llvm/TargetParser/TargetParser.h"
+
+using namespace clang::driver;
+using namespace clang::driver::toolchains;
+using namespace clang::driver::tools;
+using namespace clang;
+using namespace llvm::opt;
+
+#if defined(_WIN32) || defined(_WIN64)
+#define NULL_FILE "nul"
+#else
+#define NULL_FILE "/dev/null"
+#endif
+
+static bool shouldSkipSanitizeOption(const ToolChain &TC,
+ const llvm::opt::ArgList &DriverArgs,
+ StringRef TargetID,
+ const llvm::opt::Arg *A) {
+ // For actions without targetID, do nothing.
+ if (TargetID.empty())
+ return false;
+ Option O = A->getOption();
+ if (!O.matches(options::OPT_fsanitize_EQ))
+ return false;
+
+ if (!DriverArgs.hasFlag(options::OPT_fgpu_sanitize,
+ options::OPT_fno_gpu_sanitize, true))
+ return true;
+
+ auto &Diags = TC.getDriver().getDiags();
+
+ // For simplicity, we only allow -fsanitize=address
+ SanitizerMask K = parseSanitizerValue(A->getValue(), /*AllowGroups=*/false);
+ if (K != SanitizerKind::Address)
+ return true;
+
+ llvm::StringMap<bool> FeatureMap;
+ auto OptionalGpuArch = parseTargetID(TC.getTriple(), TargetID, &FeatureMap);
+
+ assert(OptionalGpuArch && "Invalid Target ID");
+ (void)OptionalGpuArch;
+ auto Loc = FeatureMap.find("xnack");
+ if (Loc == FeatureMap.end() || !Loc->second) {
+ Diags.Report(
+ clang::diag::warn_drv_unsupported_option_for_offload_arch_req_feature)
+ << A->getAsString(DriverArgs) << TargetID << "xnack+";
+ return true;
+ }
+ return false;
+}
+
+void AMDGCN::Linker::constructLlvmLinkCommand(Compilation &C,
+ const JobAction &JA,
+ const InputInfoList &Inputs,
+ const InputInfo &Output,
+ const llvm::opt::ArgList &Args) const {
+ // Construct llvm-link command.
+ // The output from llvm-link is a bitcode file.
+ ArgStringList LlvmLinkArgs;
+
+ assert(!Inputs.empty() && "Must have at least one input.");
+
+ LlvmLinkArgs.append({"-o", Output.getFilename()});
+ for (auto Input : Inputs)
+ LlvmLinkArgs.push_back(Input.getFilename());
+
+ // Look for archive of bundled bitcode in arguments, and add temporary files
+ // for the extracted archive of bitcode to inputs.
+ auto TargetID = Args.getLastArgValue(options::OPT_mcpu_EQ);
+ AddStaticDeviceLibsLinking(C, *this, JA, Inputs, Args, LlvmLinkArgs, "amdgcn",
+ TargetID, /*IsBitCodeSDL=*/true);
+
+ const char *LlvmLink =
+ Args.MakeArgString(getToolChain().GetProgramPath("llvm-link"));
+ C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ LlvmLink, LlvmLinkArgs, Inputs,
+ Output));
+}
+
+void AMDGCN::Linker::constructLldCommand(Compilation &C, const JobAction &JA,
+ const InputInfoList &Inputs,
+ const InputInfo &Output,
+ const llvm::opt::ArgList &Args) const {
+ // Construct lld command.
+ // The output from ld.lld is an HSA code object file.
+ ArgStringList LldArgs{"-flavor",
+ "gnu",
+ "-m",
+ "elf64_amdgpu",
+ "--no-undefined",
+ "-shared",
+ "-plugin-opt=-amdgpu-internalize-symbols"};
+ if (Args.hasArg(options::OPT_hipstdpar))
+ LldArgs.push_back("-plugin-opt=-amdgpu-enable-hipstdpar");
+
+ auto &TC = getToolChain();
+ auto &D = TC.getDriver();
+ assert(!Inputs.empty() && "Must have at least one input.");
+ bool IsThinLTO = D.getLTOMode(/*IsOffload=*/true) == LTOK_Thin;
+ addLTOOptions(TC, Args, LldArgs, Output, Inputs[0], IsThinLTO);
+
+ // Extract all the -m options
+ std::vector<llvm::StringRef> Features;
+ amdgpu::getAMDGPUTargetFeatures(D, TC.getTriple(), Args, Features);
+
+ // Add features to mattr such as cumode
+ std::string MAttrString = "-plugin-opt=-mattr=";
+ for (auto OneFeature : unifyTargetFeatures(Features)) {
+ MAttrString.append(Args.MakeArgString(OneFeature));
+ if (OneFeature != Features.back())
+ MAttrString.append(",");
+ }
+ if (!Features.empty())
+ LldArgs.push_back(Args.MakeArgString(MAttrString));
+
+ // ToDo: Remove this option after AMDGPU backend supports ISA-level linking.
+ // Since AMDGPU backend currently does not support ISA-level linking, all
+ // called functions need to be imported.
+ if (IsThinLTO)
+ LldArgs.push_back(Args.MakeArgString("-plugin-opt=-force-import-all"));
+
+ if (C.getDriver().isSaveTempsEnabled())
+ LldArgs.push_back("-save-temps");
+
+ addLinkerCompressDebugSectionsOption(TC, Args, LldArgs);
+
+ // Given that host and device linking happen in separate processes, the device
+ // linker doesn't always have the visibility as to which device symbols are
+ // needed by a program, especially for the device symbol dependencies that are
+ // introduced through the host symbol resolution.
+ // For example: host_A() (A.obj) --> host_B(B.obj) --> device_kernel_B()
+ // (B.obj) In this case, the device linker doesn't know that A.obj actually
+ // depends on the kernel functions in B.obj. When linking to static device
+ // library, the device linker may drop some of the device global symbols if
+ // they aren't referenced. As a workaround, we are adding to the
+ // --whole-archive flag such that all global symbols would be linked in.
+ LldArgs.push_back("--whole-archive");
+
+ for (auto *Arg : Args.filtered(options::OPT_Xoffload_linker)) {
+ StringRef ArgVal = Arg->getValue(1);
+ auto SplitArg = ArgVal.split("-mllvm=");
+ if (!SplitArg.second.empty()) {
+ LldArgs.push_back(
+ Args.MakeArgString(Twine("-plugin-opt=") + SplitArg.second));
+ } else {
+ LldArgs.push_back(Args.MakeArgString(ArgVal));
+ }
+ Arg->claim();
+ }
+
+ LldArgs.append({"-o", Output.getFilename()});
+ for (auto Input : Inputs)
+ LldArgs.push_back(Input.getFilename());
+
+ // Look for archive of bundled bitcode in arguments, and add temporary files
+ // for the extracted archive of bitcode to inputs.
+ auto TargetID = Args.getLastArgValue(options::OPT_mcpu_EQ);
+ AddStaticDeviceLibsLinking(C, *this, JA, Inputs, Args, LldArgs, "amdgcn",
+ TargetID, /*IsBitCodeSDL=*/true);
+
+ LldArgs.push_back("--no-whole-archive");
+
+ const char *Lld = Args.MakeArgString(getToolChain().GetProgramPath("lld"));
+ C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ Lld, LldArgs, Inputs, Output));
+}
+
+// For amdgcn the inputs of the linker job are device bitcode and output is
+// either an object file or bitcode (-emit-llvm). It calls llvm-link, opt,
+// llc, then lld steps.
+void AMDGCN::Linker::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ if (Inputs.size() > 0 &&
+ Inputs[0].getType() == types::TY_Image &&
+ JA.getType() == types::TY_Object)
+ return HIP::constructGenerateObjFileFromHIPFatBinary(C, Output, Inputs,
+ Args, JA, *this);
+
+ if (JA.getType() == types::TY_HIP_FATBIN)
+ return HIP::constructHIPFatbinCommand(C, JA, Output.getFilename(), Inputs,
+ Args, *this);
+
+ if (JA.getType() == types::TY_LLVM_BC)
+ return constructLlvmLinkCommand(C, JA, Inputs, Output, Args);
+
+ return constructLldCommand(C, JA, Inputs, Output, Args);
+}
+
+HIPAMDToolChain::HIPAMDToolChain(const Driver &D, const llvm::Triple &Triple,
+ const ToolChain &HostTC, const ArgList &Args)
+ : ROCMToolChain(D, Triple, Args), HostTC(HostTC) {
+ // Lookup binaries into the driver directory, this is used to
+ // discover the clang-offload-bundler executable.
+ getProgramPaths().push_back(getDriver().Dir);
+
+ // Diagnose unsupported sanitizer options only once.
+ if (!Args.hasFlag(options::OPT_fgpu_sanitize, options::OPT_fno_gpu_sanitize,
+ true))
+ return;
+ for (auto *A : Args.filtered(options::OPT_fsanitize_EQ)) {
+ SanitizerMask K = parseSanitizerValue(A->getValue(), /*AllowGroups=*/false);
+ if (K != SanitizerKind::Address)
+ D.getDiags().Report(clang::diag::warn_drv_unsupported_option_for_target)
+ << A->getAsString(Args) << getTriple().str();
+ }
+}
+
+void HIPAMDToolChain::addClangTargetOptions(
+ const llvm::opt::ArgList &DriverArgs, llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadingKind) const {
+ HostTC.addClangTargetOptions(DriverArgs, CC1Args, DeviceOffloadingKind);
+
+ assert(DeviceOffloadingKind == Action::OFK_HIP &&
+ "Only HIP offloading kinds are supported for GPUs.");
+
+ CC1Args.push_back("-fcuda-is-device");
+
+ if (!DriverArgs.hasFlag(options::OPT_fgpu_rdc, options::OPT_fno_gpu_rdc,
+ false))
+ CC1Args.append({"-mllvm", "-amdgpu-internalize-symbols"});
+ if (DriverArgs.hasArgNoClaim(options::OPT_hipstdpar))
+ CC1Args.append({"-mllvm", "-amdgpu-enable-hipstdpar"});
+
+ StringRef MaxThreadsPerBlock =
+ DriverArgs.getLastArgValue(options::OPT_gpu_max_threads_per_block_EQ);
+ if (!MaxThreadsPerBlock.empty()) {
+ std::string ArgStr =
+ (Twine("--gpu-max-threads-per-block=") + MaxThreadsPerBlock).str();
+ CC1Args.push_back(DriverArgs.MakeArgStringRef(ArgStr));
+ }
+
+ CC1Args.push_back("-fcuda-allow-variadic-functions");
+
+ // Default to "hidden" visibility, as object level linking will not be
+ // supported for the foreseeable future.
+ if (!DriverArgs.hasArg(options::OPT_fvisibility_EQ,
+ options::OPT_fvisibility_ms_compat)) {
+ CC1Args.append({"-fvisibility=hidden"});
+ CC1Args.push_back("-fapply-global-visibility-to-externs");
+ }
+
+ for (auto BCFile : getDeviceLibs(DriverArgs)) {
+ CC1Args.push_back(BCFile.ShouldInternalize ? "-mlink-builtin-bitcode"
+ : "-mlink-bitcode-file");
+ CC1Args.push_back(DriverArgs.MakeArgString(BCFile.Path));
+ }
+}
+
+llvm::opt::DerivedArgList *
+HIPAMDToolChain::TranslateArgs(const llvm::opt::DerivedArgList &Args,
+ StringRef BoundArch,
+ Action::OffloadKind DeviceOffloadKind) const {
+ DerivedArgList *DAL =
+ HostTC.TranslateArgs(Args, BoundArch, DeviceOffloadKind);
+ if (!DAL)
+ DAL = new DerivedArgList(Args.getBaseArgs());
+
+ const OptTable &Opts = getDriver().getOpts();
+
+ for (Arg *A : Args) {
+ if (!shouldSkipSanitizeOption(*this, Args, BoundArch, A))
+ DAL->append(A);
+ }
+
+ if (!BoundArch.empty()) {
+ DAL->eraseArg(options::OPT_mcpu_EQ);
+ DAL->AddJoinedArg(nullptr, Opts.getOption(options::OPT_mcpu_EQ), BoundArch);
+ checkTargetID(*DAL);
+ }
+
+ return DAL;
+}
+
+Tool *HIPAMDToolChain::buildLinker() const {
+ assert(getTriple().getArch() == llvm::Triple::amdgcn);
+ return new tools::AMDGCN::Linker(*this);
+}
+
+void HIPAMDToolChain::addClangWarningOptions(ArgStringList &CC1Args) const {
+ HostTC.addClangWarningOptions(CC1Args);
+}
+
+ToolChain::CXXStdlibType
+HIPAMDToolChain::GetCXXStdlibType(const ArgList &Args) const {
+ return HostTC.GetCXXStdlibType(Args);
+}
+
+void HIPAMDToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ HostTC.AddClangSystemIncludeArgs(DriverArgs, CC1Args);
+}
+
+void HIPAMDToolChain::AddClangCXXStdlibIncludeArgs(
+ const ArgList &Args, ArgStringList &CC1Args) const {
+ HostTC.AddClangCXXStdlibIncludeArgs(Args, CC1Args);
+}
+
+void HIPAMDToolChain::AddIAMCUIncludeArgs(const ArgList &Args,
+ ArgStringList &CC1Args) const {
+ HostTC.AddIAMCUIncludeArgs(Args, CC1Args);
+}
+
+void HIPAMDToolChain::AddHIPIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ RocmInstallation->AddHIPIncludeArgs(DriverArgs, CC1Args);
+}
+
+SanitizerMask HIPAMDToolChain::getSupportedSanitizers() const {
+ // The HIPAMDToolChain only supports sanitizers in the sense that it allows
+ // sanitizer arguments on the command line if they are supported by the host
+ // toolchain. The HIPAMDToolChain will actually ignore any command line
+ // arguments for any of these "supported" sanitizers. That means that no
+ // sanitization of device code is actually supported at this time.
+ //
+ // This behavior is necessary because the host and device toolchains
+ // invocations often share the command line, so the device toolchain must
+ // tolerate flags meant only for the host toolchain.
+ return HostTC.getSupportedSanitizers();
+}
+
+VersionTuple HIPAMDToolChain::computeMSVCVersion(const Driver *D,
+ const ArgList &Args) const {
+ return HostTC.computeMSVCVersion(D, Args);
+}
+
+llvm::SmallVector<ToolChain::BitCodeLibraryInfo, 12>
+HIPAMDToolChain::getDeviceLibs(const llvm::opt::ArgList &DriverArgs) const {
+ llvm::SmallVector<BitCodeLibraryInfo, 12> BCLibs;
+ if (DriverArgs.hasArg(options::OPT_nogpulib))
+ return {};
+ ArgStringList LibraryPaths;
+
+ // Find in --hip-device-lib-path and HIP_LIBRARY_PATH.
+ for (StringRef Path : RocmInstallation->getRocmDeviceLibPathArg())
+ LibraryPaths.push_back(DriverArgs.MakeArgString(Path));
+
+ addDirectoryList(DriverArgs, LibraryPaths, "", "HIP_DEVICE_LIB_PATH");
+
+ // Maintain compatability with --hip-device-lib.
+ auto BCLibArgs = DriverArgs.getAllArgValues(options::OPT_hip_device_lib_EQ);
+ if (!BCLibArgs.empty()) {
+ llvm::for_each(BCLibArgs, [&](StringRef BCName) {
+ StringRef FullName;
+ for (StringRef LibraryPath : LibraryPaths) {
+ SmallString<128> Path(LibraryPath);
+ llvm::sys::path::append(Path, BCName);
+ FullName = Path;
+ if (llvm::sys::fs::exists(FullName)) {
+ BCLibs.push_back(FullName);
+ return;
+ }
+ }
+ getDriver().Diag(diag::err_drv_no_such_file) << BCName;
+ });
+ } else {
+ if (!RocmInstallation->hasDeviceLibrary()) {
+ getDriver().Diag(diag::err_drv_no_rocm_device_lib) << 0;
+ return {};
+ }
+ StringRef GpuArch = getGPUArch(DriverArgs);
+ assert(!GpuArch.empty() && "Must have an explicit GPU arch.");
+
+ // If --hip-device-lib is not set, add the default bitcode libraries.
+ if (DriverArgs.hasFlag(options::OPT_fgpu_sanitize,
+ options::OPT_fno_gpu_sanitize, true) &&
+ getSanitizerArgs(DriverArgs).needsAsanRt()) {
+ auto AsanRTL = RocmInstallation->getAsanRTLPath();
+ if (AsanRTL.empty()) {
+ unsigned DiagID = getDriver().getDiags().getCustomDiagID(
+ DiagnosticsEngine::Error,
+ "AMDGPU address sanitizer runtime library (asanrtl) is not found. "
+ "Please install ROCm device library which supports address "
+ "sanitizer");
+ getDriver().Diag(DiagID);
+ return {};
+ } else
+ BCLibs.emplace_back(AsanRTL, /*ShouldInternalize=*/false);
+ }
+
+ // Add the HIP specific bitcode library.
+ BCLibs.push_back(RocmInstallation->getHIPPath());
+
+ // Add common device libraries like ocml etc.
+ for (StringRef N : getCommonDeviceLibNames(DriverArgs, GpuArch.str()))
+ BCLibs.emplace_back(N);
+
+ // Add instrument lib.
+ auto InstLib =
+ DriverArgs.getLastArgValue(options::OPT_gpu_instrument_lib_EQ);
+ if (InstLib.empty())
+ return BCLibs;
+ if (llvm::sys::fs::exists(InstLib))
+ BCLibs.push_back(InstLib);
+ else
+ getDriver().Diag(diag::err_drv_no_such_file) << InstLib;
+ }
+
+ return BCLibs;
+}
+
+void HIPAMDToolChain::checkTargetID(
+ const llvm::opt::ArgList &DriverArgs) const {
+ auto PTID = getParsedTargetID(DriverArgs);
+ if (PTID.OptionalTargetID && !PTID.OptionalGPUArch) {
+ getDriver().Diag(clang::diag::err_drv_bad_target_id)
+ << *PTID.OptionalTargetID;
+ }
+}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/HIP.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPAMD.h
index 3cced0a320dc..d81a9733014c 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/HIP.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPAMD.h
@@ -1,4 +1,4 @@
-//===--- HIP.h - HIP ToolChain Implementations ------------------*- C++ -*-===//
+//===--- HIPAMD.h - HIP ToolChain Implementations ---------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -6,12 +6,12 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_HIP_H
-#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_HIP_H
+#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_HIPAMD_H
+#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_HIPAMD_H
-#include "clang/Driver/ToolChain.h"
-#include "clang/Driver/Tool.h"
#include "AMDGPU.h"
+#include "clang/Driver/Tool.h"
+#include "clang/Driver/ToolChain.h"
namespace clang {
namespace driver {
@@ -19,14 +19,9 @@ namespace driver {
namespace tools {
namespace AMDGCN {
- // Construct command for creating HIP fatbin.
- void constructHIPFatbinCommand(Compilation &C, const JobAction &JA,
- StringRef OutputFileName, const InputInfoList &Inputs,
- const llvm::opt::ArgList &TCArgs, const Tool& T);
-
// Runs llvm-link/opt/llc/lld, which links multiple LLVM bitcode, together with
// device library, then compiles it to ISA in a shared object.
-class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
+class LLVM_LIBRARY_VISIBILITY Linker final : public Tool {
public:
Linker(const ToolChain &TC) : Tool("AMDGCN::Linker", "amdgcn-link", TC) {}
@@ -38,17 +33,13 @@ public:
const char *LinkingOutput) const override;
private:
-
void constructLldCommand(Compilation &C, const JobAction &JA,
const InputInfoList &Inputs, const InputInfo &Output,
const llvm::opt::ArgList &Args) const;
-
- // Construct command for creating Object from HIP fatbin.
- void constructGenerateObjFileFromHIPFatBinary(Compilation &C,
- const InputInfo &Output,
- const InputInfoList &Inputs,
- const llvm::opt::ArgList &Args,
- const JobAction &JA) const;
+ void constructLlvmLinkCommand(Compilation &C, const JobAction &JA,
+ const InputInfoList &Inputs,
+ const InputInfo &Output,
+ const llvm::opt::ArgList &Args) const;
};
} // end namespace AMDGCN
@@ -56,10 +47,10 @@ private:
namespace toolchains {
-class LLVM_LIBRARY_VISIBILITY HIPToolChain final : public ROCMToolChain {
+class LLVM_LIBRARY_VISIBILITY HIPAMDToolChain final : public ROCMToolChain {
public:
- HIPToolChain(const Driver &D, const llvm::Triple &Triple,
- const ToolChain &HostTC, const llvm::opt::ArgList &Args);
+ HIPAMDToolChain(const Driver &D, const llvm::Triple &Triple,
+ const ToolChain &HostTC, const llvm::opt::ArgList &Args);
const llvm::Triple *getAuxTriple() const override {
return &HostTC.getTriple();
@@ -68,9 +59,10 @@ public:
llvm::opt::DerivedArgList *
TranslateArgs(const llvm::opt::DerivedArgList &Args, StringRef BoundArch,
Action::OffloadKind DeviceOffloadKind) const override;
- void addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args,
- Action::OffloadKind DeviceOffloadKind) const override;
+ void
+ addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadKind) const override;
void addClangWarningOptions(llvm::opt::ArgStringList &CC1Args) const override;
CXXStdlibType GetCXXStdlibType(const llvm::opt::ArgList &Args) const override;
void
@@ -83,8 +75,8 @@ public:
llvm::opt::ArgStringList &CC1Args) const override;
void AddHIPIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
- llvm::SmallVector<std::string, 12>
- getHIPDeviceLibs(const llvm::opt::ArgList &Args) const override;
+ llvm::SmallVector<BitCodeLibraryInfo, 12>
+ getDeviceLibs(const llvm::opt::ArgList &Args) const override;
SanitizerMask getSupportedSanitizers() const override;
@@ -92,7 +84,7 @@ public:
computeMSVCVersion(const Driver *D,
const llvm::opt::ArgList &Args) const override;
- unsigned GetDefaultDwarfVersion() const override { return 4; }
+ unsigned GetDefaultDwarfVersion() const override { return 5; }
const ToolChain &HostTC;
void checkTargetID(const llvm::opt::ArgList &DriverArgs) const override;
@@ -105,4 +97,4 @@ protected:
} // end namespace driver
} // end namespace clang
-#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_HIP_H
+#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_HIPAMD_H
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPSPV.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPSPV.cpp
new file mode 100644
index 000000000000..a144b28057f4
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPSPV.cpp
@@ -0,0 +1,288 @@
+//===--- HIPSPV.cpp - HIPSPV ToolChain Implementation -----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "HIPSPV.h"
+#include "CommonArgs.h"
+#include "HIPUtility.h"
+#include "clang/Driver/Compilation.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/InputInfo.h"
+#include "clang/Driver/Options.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
+
+using namespace clang::driver;
+using namespace clang::driver::toolchains;
+using namespace clang::driver::tools;
+using namespace clang;
+using namespace llvm::opt;
+
+// Convenience function for creating temporary file for both modes of
+// isSaveTempsEnabled().
+static const char *getTempFile(Compilation &C, StringRef Prefix,
+ StringRef Extension) {
+ if (C.getDriver().isSaveTempsEnabled()) {
+ return C.getArgs().MakeArgString(Prefix + "." + Extension);
+ }
+ auto TmpFile = C.getDriver().GetTemporaryPath(Prefix, Extension);
+ return C.addTempFile(C.getArgs().MakeArgString(TmpFile));
+}
+
+// Locates HIP pass plugin.
+static std::string findPassPlugin(const Driver &D,
+ const llvm::opt::ArgList &Args) {
+ StringRef Path = Args.getLastArgValue(options::OPT_hipspv_pass_plugin_EQ);
+ if (!Path.empty()) {
+ if (llvm::sys::fs::exists(Path))
+ return Path.str();
+ D.Diag(diag::err_drv_no_such_file) << Path;
+ }
+
+ StringRef hipPath = Args.getLastArgValue(options::OPT_hip_path_EQ);
+ if (!hipPath.empty()) {
+ SmallString<128> PluginPath(hipPath);
+ llvm::sys::path::append(PluginPath, "lib", "libLLVMHipSpvPasses.so");
+ if (llvm::sys::fs::exists(PluginPath))
+ return PluginPath.str().str();
+ PluginPath.assign(hipPath);
+ llvm::sys::path::append(PluginPath, "lib", "llvm",
+ "libLLVMHipSpvPasses.so");
+ if (llvm::sys::fs::exists(PluginPath))
+ return PluginPath.str().str();
+ }
+
+ return std::string();
+}
+
+void HIPSPV::Linker::constructLinkAndEmitSpirvCommand(
+ Compilation &C, const JobAction &JA, const InputInfoList &Inputs,
+ const InputInfo &Output, const llvm::opt::ArgList &Args) const {
+
+ assert(!Inputs.empty() && "Must have at least one input.");
+ std::string Name = std::string(llvm::sys::path::stem(Output.getFilename()));
+ const char *TempFile = getTempFile(C, Name + "-link", "bc");
+
+ // Link LLVM bitcode.
+ ArgStringList LinkArgs{};
+ for (auto Input : Inputs)
+ LinkArgs.push_back(Input.getFilename());
+ LinkArgs.append({"-o", TempFile});
+ const char *LlvmLink =
+ Args.MakeArgString(getToolChain().GetProgramPath("llvm-link"));
+ C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ LlvmLink, LinkArgs, Inputs, Output));
+
+ // Post-link HIP lowering.
+
+ // Run LLVM IR passes to lower/expand/emulate HIP code that does not translate
+ // to SPIR-V (E.g. dynamic shared memory).
+ auto PassPluginPath = findPassPlugin(C.getDriver(), Args);
+ if (!PassPluginPath.empty()) {
+ const char *PassPathCStr = C.getArgs().MakeArgString(PassPluginPath);
+ const char *OptOutput = getTempFile(C, Name + "-lower", "bc");
+ ArgStringList OptArgs{TempFile, "-load-pass-plugin",
+ PassPathCStr, "-passes=hip-post-link-passes",
+ "-o", OptOutput};
+ const char *Opt = Args.MakeArgString(getToolChain().GetProgramPath("opt"));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::None(), Opt, OptArgs, Inputs, Output));
+ TempFile = OptOutput;
+ }
+
+ // Emit SPIR-V binary.
+
+ llvm::opt::ArgStringList TrArgs{"--spirv-max-version=1.1",
+ "--spirv-ext=+all"};
+ InputInfo TrInput = InputInfo(types::TY_LLVM_BC, TempFile, "");
+ SPIRV::constructTranslateCommand(C, *this, JA, Output, TrInput, TrArgs);
+}
+
+void HIPSPV::Linker::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ if (Inputs.size() > 0 && Inputs[0].getType() == types::TY_Image &&
+ JA.getType() == types::TY_Object)
+ return HIP::constructGenerateObjFileFromHIPFatBinary(C, Output, Inputs,
+ Args, JA, *this);
+
+ if (JA.getType() == types::TY_HIP_FATBIN)
+ return HIP::constructHIPFatbinCommand(C, JA, Output.getFilename(), Inputs,
+ Args, *this);
+
+ constructLinkAndEmitSpirvCommand(C, JA, Inputs, Output, Args);
+}
+
+HIPSPVToolChain::HIPSPVToolChain(const Driver &D, const llvm::Triple &Triple,
+ const ToolChain &HostTC, const ArgList &Args)
+ : ToolChain(D, Triple, Args), HostTC(HostTC) {
+ // Lookup binaries into the driver directory, this is used to
+ // discover the clang-offload-bundler executable.
+ getProgramPaths().push_back(getDriver().Dir);
+}
+
+void HIPSPVToolChain::addClangTargetOptions(
+ const llvm::opt::ArgList &DriverArgs, llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadingKind) const {
+ HostTC.addClangTargetOptions(DriverArgs, CC1Args, DeviceOffloadingKind);
+
+ assert(DeviceOffloadingKind == Action::OFK_HIP &&
+ "Only HIP offloading kinds are supported for GPUs.");
+
+ CC1Args.append(
+ {"-fcuda-is-device", "-fcuda-allow-variadic-functions",
+ // A crude workaround for llvm-spirv which does not handle the
+ // autovectorized code well (vector reductions, non-i{8,16,32,64} types).
+ // TODO: Allow autovectorization when SPIR-V backend arrives.
+ "-mllvm", "-vectorize-loops=false", "-mllvm", "-vectorize-slp=false"});
+
+ // Default to "hidden" visibility, as object level linking will not be
+ // supported for the foreseeable future.
+ if (!DriverArgs.hasArg(options::OPT_fvisibility_EQ,
+ options::OPT_fvisibility_ms_compat))
+ CC1Args.append(
+ {"-fvisibility=hidden", "-fapply-global-visibility-to-externs"});
+
+ llvm::for_each(getDeviceLibs(DriverArgs),
+ [&](const BitCodeLibraryInfo &BCFile) {
+ CC1Args.append({"-mlink-builtin-bitcode",
+ DriverArgs.MakeArgString(BCFile.Path)});
+ });
+}
+
+Tool *HIPSPVToolChain::buildLinker() const {
+ assert(getTriple().getArch() == llvm::Triple::spirv64);
+ return new tools::HIPSPV::Linker(*this);
+}
+
+void HIPSPVToolChain::addClangWarningOptions(ArgStringList &CC1Args) const {
+ HostTC.addClangWarningOptions(CC1Args);
+}
+
+ToolChain::CXXStdlibType
+HIPSPVToolChain::GetCXXStdlibType(const ArgList &Args) const {
+ return HostTC.GetCXXStdlibType(Args);
+}
+
+void HIPSPVToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ HostTC.AddClangSystemIncludeArgs(DriverArgs, CC1Args);
+}
+
+void HIPSPVToolChain::AddClangCXXStdlibIncludeArgs(
+ const ArgList &Args, ArgStringList &CC1Args) const {
+ HostTC.AddClangCXXStdlibIncludeArgs(Args, CC1Args);
+}
+
+void HIPSPVToolChain::AddIAMCUIncludeArgs(const ArgList &Args,
+ ArgStringList &CC1Args) const {
+ HostTC.AddIAMCUIncludeArgs(Args, CC1Args);
+}
+
+void HIPSPVToolChain::AddHIPIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ if (DriverArgs.hasArg(options::OPT_nogpuinc))
+ return;
+
+ StringRef hipPath = DriverArgs.getLastArgValue(options::OPT_hip_path_EQ);
+ if (hipPath.empty()) {
+ getDriver().Diag(diag::err_drv_hipspv_no_hip_path) << 1 << "'-nogpuinc'";
+ return;
+ }
+ SmallString<128> P(hipPath);
+ llvm::sys::path::append(P, "include");
+ CC1Args.append({"-isystem", DriverArgs.MakeArgString(P)});
+}
+
+llvm::SmallVector<ToolChain::BitCodeLibraryInfo, 12>
+HIPSPVToolChain::getDeviceLibs(const llvm::opt::ArgList &DriverArgs) const {
+ llvm::SmallVector<ToolChain::BitCodeLibraryInfo, 12> BCLibs;
+ if (DriverArgs.hasArg(options::OPT_nogpulib))
+ return {};
+
+ ArgStringList LibraryPaths;
+ // Find device libraries in --hip-device-lib-path and HIP_DEVICE_LIB_PATH.
+ auto HipDeviceLibPathArgs = DriverArgs.getAllArgValues(
+ // --hip-device-lib-path is alias to this option.
+ clang::driver::options::OPT_rocm_device_lib_path_EQ);
+ for (auto Path : HipDeviceLibPathArgs)
+ LibraryPaths.push_back(DriverArgs.MakeArgString(Path));
+
+ StringRef HipPath = DriverArgs.getLastArgValue(options::OPT_hip_path_EQ);
+ if (!HipPath.empty()) {
+ SmallString<128> Path(HipPath);
+ llvm::sys::path::append(Path, "lib", "hip-device-lib");
+ LibraryPaths.push_back(DriverArgs.MakeArgString(Path));
+ }
+
+ addDirectoryList(DriverArgs, LibraryPaths, "", "HIP_DEVICE_LIB_PATH");
+
+ // Maintain compatability with --hip-device-lib.
+ auto BCLibArgs = DriverArgs.getAllArgValues(options::OPT_hip_device_lib_EQ);
+ if (!BCLibArgs.empty()) {
+ llvm::for_each(BCLibArgs, [&](StringRef BCName) {
+ StringRef FullName;
+ for (std::string LibraryPath : LibraryPaths) {
+ SmallString<128> Path(LibraryPath);
+ llvm::sys::path::append(Path, BCName);
+ FullName = Path;
+ if (llvm::sys::fs::exists(FullName)) {
+ BCLibs.emplace_back(FullName.str());
+ return;
+ }
+ }
+ getDriver().Diag(diag::err_drv_no_such_file) << BCName;
+ });
+ } else {
+ // Search device library named as 'hipspv-<triple>.bc'.
+ auto TT = getTriple().normalize();
+ std::string BCName = "hipspv-" + TT + ".bc";
+ for (auto *LibPath : LibraryPaths) {
+ SmallString<128> Path(LibPath);
+ llvm::sys::path::append(Path, BCName);
+ if (llvm::sys::fs::exists(Path)) {
+ BCLibs.emplace_back(Path.str().str());
+ return BCLibs;
+ }
+ }
+ getDriver().Diag(diag::err_drv_no_hipspv_device_lib)
+ << 1 << ("'" + TT + "' target");
+ return {};
+ }
+
+ return BCLibs;
+}
+
+SanitizerMask HIPSPVToolChain::getSupportedSanitizers() const {
+ // The HIPSPVToolChain only supports sanitizers in the sense that it allows
+ // sanitizer arguments on the command line if they are supported by the host
+ // toolchain. The HIPSPVToolChain will actually ignore any command line
+ // arguments for any of these "supported" sanitizers. That means that no
+ // sanitization of device code is actually supported at this time.
+ //
+ // This behavior is necessary because the host and device toolchains
+ // invocations often share the command line, so the device toolchain must
+ // tolerate flags meant only for the host toolchain.
+ return HostTC.getSupportedSanitizers();
+}
+
+VersionTuple HIPSPVToolChain::computeMSVCVersion(const Driver *D,
+ const ArgList &Args) const {
+ return HostTC.computeMSVCVersion(D, Args);
+}
+
+void HIPSPVToolChain::adjustDebugInfoKind(
+ llvm::codegenoptions::DebugInfoKind &DebugInfoKind,
+ const llvm::opt::ArgList &Args) const {
+ // Debug info generation is disabled for SPIRV-LLVM-Translator
+ // which currently aborts on the presence of DW_OP_LLVM_convert.
+ // TODO: Enable debug info when the SPIR-V backend arrives.
+ DebugInfoKind = llvm::codegenoptions::NoDebugInfo;
+}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPSPV.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPSPV.h
new file mode 100644
index 000000000000..ecd82e7052e4
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPSPV.h
@@ -0,0 +1,102 @@
+//===--- HIPSPV.h - HIP ToolChain Implementations ---------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_HIPSPV_H
+#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_HIPSPV_H
+
+#include "SPIRV.h"
+#include "clang/Driver/Tool.h"
+#include "clang/Driver/ToolChain.h"
+
+namespace clang {
+namespace driver {
+namespace tools {
+namespace HIPSPV {
+
+// Runs llvm-link/opt/llc/lld, which links multiple LLVM bitcode, together with
+// device library, then compiles it to SPIR-V in a shared object.
+class LLVM_LIBRARY_VISIBILITY Linker final : public Tool {
+public:
+ Linker(const ToolChain &TC) : Tool("HIPSPV::Linker", "hipspv-link", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+
+private:
+ void constructLinkAndEmitSpirvCommand(Compilation &C, const JobAction &JA,
+ const InputInfoList &Inputs,
+ const InputInfo &Output,
+ const llvm::opt::ArgList &Args) const;
+};
+
+} // namespace HIPSPV
+} // namespace tools
+
+namespace toolchains {
+
+class LLVM_LIBRARY_VISIBILITY HIPSPVToolChain final : public ToolChain {
+public:
+ HIPSPVToolChain(const Driver &D, const llvm::Triple &Triple,
+ const ToolChain &HostTC, const llvm::opt::ArgList &Args);
+
+ const llvm::Triple *getAuxTriple() const override {
+ return &HostTC.getTriple();
+ }
+
+ void
+ addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadKind) const override;
+ void addClangWarningOptions(llvm::opt::ArgStringList &CC1Args) const override;
+ CXXStdlibType GetCXXStdlibType(const llvm::opt::ArgList &Args) const override;
+ void
+ AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+ void AddClangCXXStdlibIncludeArgs(
+ const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CC1Args) const override;
+ void AddIAMCUIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+ void AddHIPIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+ llvm::SmallVector<BitCodeLibraryInfo, 12>
+ getDeviceLibs(const llvm::opt::ArgList &Args) const override;
+
+ SanitizerMask getSupportedSanitizers() const override;
+
+ VersionTuple
+ computeMSVCVersion(const Driver *D,
+ const llvm::opt::ArgList &Args) const override;
+
+ void adjustDebugInfoKind(llvm::codegenoptions::DebugInfoKind &DebugInfoKind,
+ const llvm::opt::ArgList &Args) const override;
+ bool IsMathErrnoDefault() const override { return false; }
+ bool useIntegratedAs() const override { return true; }
+ bool isCrossCompiling() const override { return true; }
+ bool isPICDefault() const override { return false; }
+ bool isPIEDefault(const llvm::opt::ArgList &Args) const override {
+ return false;
+ }
+ bool isPICDefaultForced() const override { return false; }
+ bool SupportsProfiling() const override { return false; }
+
+ const ToolChain &HostTC;
+
+protected:
+ Tool *buildLinker() const override;
+};
+
+} // end namespace toolchains
+} // end namespace driver
+} // end namespace clang
+
+#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_HIPSPV_H
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPUtility.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPUtility.cpp
new file mode 100644
index 000000000000..f692458b775d
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPUtility.cpp
@@ -0,0 +1,179 @@
+//===--- HIPUtility.cpp - Common HIP Tool Chain Utilities -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "HIPUtility.h"
+#include "CommonArgs.h"
+#include "clang/Driver/Compilation.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Path.h"
+#include "llvm/TargetParser/Triple.h"
+
+using namespace clang::driver;
+using namespace clang::driver::tools;
+using namespace llvm::opt;
+
+#if defined(_WIN32) || defined(_WIN64)
+#define NULL_FILE "nul"
+#else
+#define NULL_FILE "/dev/null"
+#endif
+
+namespace {
+const unsigned HIPCodeObjectAlign = 4096;
+} // namespace
+
+// Constructs a triple string for clang offload bundler.
+static std::string normalizeForBundler(const llvm::Triple &T,
+ bool HasTargetID) {
+ return HasTargetID ? (T.getArchName() + "-" + T.getVendorName() + "-" +
+ T.getOSName() + "-" + T.getEnvironmentName())
+ .str()
+ : T.normalize();
+}
+
+// Construct a clang-offload-bundler command to bundle code objects for
+// different devices into a HIP fat binary.
+void HIP::constructHIPFatbinCommand(Compilation &C, const JobAction &JA,
+ llvm::StringRef OutputFileName,
+ const InputInfoList &Inputs,
+ const llvm::opt::ArgList &Args,
+ const Tool &T) {
+ // Construct clang-offload-bundler command to bundle object files for
+ // for different GPU archs.
+ ArgStringList BundlerArgs;
+ BundlerArgs.push_back(Args.MakeArgString("-type=o"));
+ BundlerArgs.push_back(
+ Args.MakeArgString("-bundle-align=" + Twine(HIPCodeObjectAlign)));
+
+ // ToDo: Remove the dummy host binary entry which is required by
+ // clang-offload-bundler.
+ std::string BundlerTargetArg = "-targets=host-x86_64-unknown-linux";
+ // AMDGCN:
+ // For code object version 2 and 3, the offload kind in bundle ID is 'hip'
+ // for backward compatibility. For code object version 4 and greater, the
+ // offload kind in bundle ID is 'hipv4'.
+ std::string OffloadKind = "hip";
+ auto &TT = T.getToolChain().getTriple();
+ if (TT.isAMDGCN() && getAMDGPUCodeObjectVersion(C.getDriver(), Args) >= 4)
+ OffloadKind = OffloadKind + "v4";
+ for (const auto &II : Inputs) {
+ const auto *A = II.getAction();
+ auto ArchStr = llvm::StringRef(A->getOffloadingArch());
+ BundlerTargetArg +=
+ "," + OffloadKind + "-" + normalizeForBundler(TT, !ArchStr.empty());
+ if (!ArchStr.empty())
+ BundlerTargetArg += "-" + ArchStr.str();
+ }
+ BundlerArgs.push_back(Args.MakeArgString(BundlerTargetArg));
+
+ // Use a NULL file as input for the dummy host binary entry
+ std::string BundlerInputArg = "-input=" NULL_FILE;
+ BundlerArgs.push_back(Args.MakeArgString(BundlerInputArg));
+ for (const auto &II : Inputs) {
+ BundlerInputArg = std::string("-input=") + II.getFilename();
+ BundlerArgs.push_back(Args.MakeArgString(BundlerInputArg));
+ }
+
+ std::string Output = std::string(OutputFileName);
+ auto *BundlerOutputArg =
+ Args.MakeArgString(std::string("-output=").append(Output));
+ BundlerArgs.push_back(BundlerOutputArg);
+
+ if (Args.hasFlag(options::OPT_offload_compress,
+ options::OPT_no_offload_compress, false))
+ BundlerArgs.push_back("-compress");
+ if (Args.hasArg(options::OPT_v))
+ BundlerArgs.push_back("-verbose");
+
+ const char *Bundler = Args.MakeArgString(
+ T.getToolChain().GetProgramPath("clang-offload-bundler"));
+ C.addCommand(std::make_unique<Command>(
+ JA, T, ResponseFileSupport::None(), Bundler, BundlerArgs, Inputs,
+ InputInfo(&JA, Args.MakeArgString(Output))));
+}
+
+/// Add Generated HIP Object File which has device images embedded into the
+/// host to the argument list for linking. Using MC directives, embed the
+/// device code and also define symbols required by the code generation so that
+/// the image can be retrieved at runtime.
+void HIP::constructGenerateObjFileFromHIPFatBinary(
+ Compilation &C, const InputInfo &Output, const InputInfoList &Inputs,
+ const ArgList &Args, const JobAction &JA, const Tool &T) {
+ const ToolChain &TC = T.getToolChain();
+ std::string Name = std::string(llvm::sys::path::stem(Output.getFilename()));
+
+ // Create Temp Object File Generator,
+ // Offload Bundled file and Bundled Object file.
+ // Keep them if save-temps is enabled.
+ const char *McinFile;
+ const char *BundleFile;
+ if (C.getDriver().isSaveTempsEnabled()) {
+ McinFile = C.getArgs().MakeArgString(Name + ".mcin");
+ BundleFile = C.getArgs().MakeArgString(Name + ".hipfb");
+ } else {
+ auto TmpNameMcin = C.getDriver().GetTemporaryPath(Name, "mcin");
+ McinFile = C.addTempFile(C.getArgs().MakeArgString(TmpNameMcin));
+ auto TmpNameFb = C.getDriver().GetTemporaryPath(Name, "hipfb");
+ BundleFile = C.addTempFile(C.getArgs().MakeArgString(TmpNameFb));
+ }
+ HIP::constructHIPFatbinCommand(C, JA, BundleFile, Inputs, Args, T);
+
+ // Create a buffer to write the contents of the temp obj generator.
+ std::string ObjBuffer;
+ llvm::raw_string_ostream ObjStream(ObjBuffer);
+
+ auto HostTriple =
+ C.getSingleOffloadToolChain<Action::OFK_Host>()->getTriple();
+
+ // Add MC directives to embed target binaries. We ensure that each
+ // section and image is 16-byte aligned. This is not mandatory, but
+ // increases the likelihood of data to be aligned with a cache block
+ // in several main host machines.
+ ObjStream << "# HIP Object Generator\n";
+ ObjStream << "# *** Automatically generated by Clang ***\n";
+ if (HostTriple.isWindowsMSVCEnvironment()) {
+ ObjStream << " .section .hip_fatbin, \"dw\"\n";
+ } else {
+ ObjStream << " .protected __hip_fatbin\n";
+ ObjStream << " .type __hip_fatbin,@object\n";
+ ObjStream << " .section .hip_fatbin,\"a\",@progbits\n";
+ }
+ ObjStream << " .globl __hip_fatbin\n";
+ ObjStream << " .p2align " << llvm::Log2(llvm::Align(HIPCodeObjectAlign))
+ << "\n";
+ ObjStream << "__hip_fatbin:\n";
+ ObjStream << " .incbin ";
+ llvm::sys::printArg(ObjStream, BundleFile, /*Quote=*/true);
+ ObjStream << "\n";
+ if (HostTriple.isOSLinux() && HostTriple.isOSBinFormatELF())
+ ObjStream << " .section .note.GNU-stack, \"\", @progbits\n";
+ ObjStream.flush();
+
+ // Dump the contents of the temp object file gen if the user requested that.
+ // We support this option to enable testing of behavior with -###.
+ if (C.getArgs().hasArg(options::OPT_fhip_dump_offload_linker_script))
+ llvm::errs() << ObjBuffer;
+
+ // Open script file and write the contents.
+ std::error_code EC;
+ llvm::raw_fd_ostream Objf(McinFile, EC, llvm::sys::fs::OF_None);
+
+ if (EC) {
+ C.getDriver().Diag(clang::diag::err_unable_to_make_temp) << EC.message();
+ return;
+ }
+
+ Objf << ObjBuffer;
+
+ ArgStringList McArgs{"-triple", Args.MakeArgString(HostTriple.normalize()),
+ "-o", Output.getFilename(),
+ McinFile, "--filetype=obj"};
+ const char *Mc = Args.MakeArgString(TC.GetProgramPath("llvm-mc"));
+ C.addCommand(std::make_unique<Command>(JA, T, ResponseFileSupport::None(), Mc,
+ McArgs, Inputs, Output));
+}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPUtility.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPUtility.h
new file mode 100644
index 000000000000..29e5a922024a
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPUtility.h
@@ -0,0 +1,35 @@
+//===--- HIPUtility.h - Common HIP Tool Chain Utilities ---------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_HIPUTILITY_H
+#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_HIPUTILITY_H
+
+#include "clang/Driver/Tool.h"
+
+namespace clang {
+namespace driver {
+namespace tools {
+namespace HIP {
+
+// Construct command for creating HIP fatbin.
+void constructHIPFatbinCommand(Compilation &C, const JobAction &JA,
+ StringRef OutputFileName,
+ const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs, const Tool &T);
+
+// Construct command for creating Object from HIP fatbin.
+void constructGenerateObjFileFromHIPFatBinary(
+ Compilation &C, const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &Args, const JobAction &JA, const Tool &T);
+
+} // namespace HIP
+} // namespace tools
+} // namespace driver
+} // namespace clang
+
+#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_HIPUTILITY_H
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/HLSL.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/HLSL.cpp
new file mode 100644
index 000000000000..c6ad862b2294
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/HLSL.cpp
@@ -0,0 +1,259 @@
+//===--- HLSL.cpp - HLSL ToolChain Implementations --------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "HLSL.h"
+#include "CommonArgs.h"
+#include "clang/Driver/Compilation.h"
+#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/Job.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/TargetParser/Triple.h"
+
+using namespace clang::driver;
+using namespace clang::driver::tools;
+using namespace clang::driver::toolchains;
+using namespace clang;
+using namespace llvm::opt;
+using namespace llvm;
+
+namespace {
+
+const unsigned OfflineLibMinor = 0xF;
+
+bool isLegalShaderModel(Triple &T) {
+ if (T.getOS() != Triple::OSType::ShaderModel)
+ return false;
+
+ auto Version = T.getOSVersion();
+ if (Version.getBuild())
+ return false;
+ if (Version.getSubminor())
+ return false;
+
+ auto Kind = T.getEnvironment();
+
+ switch (Kind) {
+ default:
+ return false;
+ case Triple::EnvironmentType::Vertex:
+ case Triple::EnvironmentType::Hull:
+ case Triple::EnvironmentType::Domain:
+ case Triple::EnvironmentType::Geometry:
+ case Triple::EnvironmentType::Pixel:
+ case Triple::EnvironmentType::Compute: {
+ VersionTuple MinVer(4, 0);
+ return MinVer <= Version;
+ } break;
+ case Triple::EnvironmentType::Library: {
+ VersionTuple SM6x(6, OfflineLibMinor);
+ if (Version == SM6x)
+ return true;
+
+ VersionTuple MinVer(6, 3);
+ return MinVer <= Version;
+ } break;
+ case Triple::EnvironmentType::Amplification:
+ case Triple::EnvironmentType::Mesh: {
+ VersionTuple MinVer(6, 5);
+ return MinVer <= Version;
+ } break;
+ }
+ return false;
+}
+
+std::optional<std::string> tryParseProfile(StringRef Profile) {
+ // [ps|vs|gs|hs|ds|cs|ms|as]_[major]_[minor]
+ SmallVector<StringRef, 3> Parts;
+ Profile.split(Parts, "_");
+ if (Parts.size() != 3)
+ return std::nullopt;
+
+ Triple::EnvironmentType Kind =
+ StringSwitch<Triple::EnvironmentType>(Parts[0])
+ .Case("ps", Triple::EnvironmentType::Pixel)
+ .Case("vs", Triple::EnvironmentType::Vertex)
+ .Case("gs", Triple::EnvironmentType::Geometry)
+ .Case("hs", Triple::EnvironmentType::Hull)
+ .Case("ds", Triple::EnvironmentType::Domain)
+ .Case("cs", Triple::EnvironmentType::Compute)
+ .Case("lib", Triple::EnvironmentType::Library)
+ .Case("ms", Triple::EnvironmentType::Mesh)
+ .Case("as", Triple::EnvironmentType::Amplification)
+ .Default(Triple::EnvironmentType::UnknownEnvironment);
+ if (Kind == Triple::EnvironmentType::UnknownEnvironment)
+ return std::nullopt;
+
+ unsigned long long Major = 0;
+ if (llvm::getAsUnsignedInteger(Parts[1], 0, Major))
+ return std::nullopt;
+
+ unsigned long long Minor = 0;
+ if (Parts[2] == "x" && Kind == Triple::EnvironmentType::Library)
+ Minor = OfflineLibMinor;
+ else if (llvm::getAsUnsignedInteger(Parts[2], 0, Minor))
+ return std::nullopt;
+
+ // dxil-unknown-shadermodel-hull
+ llvm::Triple T;
+ T.setArch(Triple::ArchType::dxil);
+ T.setOSName(Triple::getOSTypeName(Triple::OSType::ShaderModel).str() +
+ VersionTuple(Major, Minor).getAsString());
+ T.setEnvironment(Kind);
+ if (isLegalShaderModel(T))
+ return T.getTriple();
+ else
+ return std::nullopt;
+}
+
+bool isLegalValidatorVersion(StringRef ValVersionStr, const Driver &D) {
+ VersionTuple Version;
+ if (Version.tryParse(ValVersionStr) || Version.getBuild() ||
+ Version.getSubminor() || !Version.getMinor()) {
+ D.Diag(diag::err_drv_invalid_format_dxil_validator_version)
+ << ValVersionStr;
+ return false;
+ }
+
+ uint64_t Major = Version.getMajor();
+ uint64_t Minor = *Version.getMinor();
+ if (Major == 0 && Minor != 0) {
+ D.Diag(diag::err_drv_invalid_empty_dxil_validator_version) << ValVersionStr;
+ return false;
+ }
+ VersionTuple MinVer(1, 0);
+ if (Version < MinVer) {
+ D.Diag(diag::err_drv_invalid_range_dxil_validator_version) << ValVersionStr;
+ return false;
+ }
+ return true;
+}
+
+} // namespace
+
+void tools::hlsl::Validator::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ std::string DxvPath = getToolChain().GetProgramPath("dxv");
+ assert(DxvPath != "dxv" && "cannot find dxv");
+
+ ArgStringList CmdArgs;
+ assert(Inputs.size() == 1 && "Unable to handle multiple inputs.");
+ const InputInfo &Input = Inputs[0];
+ assert(Input.isFilename() && "Unexpected verify input");
+ // Grabbing the output of the earlier cc1 run.
+ CmdArgs.push_back(Input.getFilename());
+ // Use the same name as output.
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Input.getFilename());
+
+ const char *Exec = Args.MakeArgString(DxvPath);
+ C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ Exec, CmdArgs, Inputs, Input));
+}
+
+/// DirectX Toolchain
+HLSLToolChain::HLSLToolChain(const Driver &D, const llvm::Triple &Triple,
+ const ArgList &Args)
+ : ToolChain(D, Triple, Args) {
+ if (Args.hasArg(options::OPT_dxc_validator_path_EQ))
+ getProgramPaths().push_back(
+ Args.getLastArgValue(options::OPT_dxc_validator_path_EQ).str());
+}
+
+Tool *clang::driver::toolchains::HLSLToolChain::getTool(
+ Action::ActionClass AC) const {
+ switch (AC) {
+ case Action::BinaryAnalyzeJobClass:
+ if (!Validator)
+ Validator.reset(new tools::hlsl::Validator(*this));
+ return Validator.get();
+ default:
+ return ToolChain::getTool(AC);
+ }
+}
+
+std::optional<std::string>
+clang::driver::toolchains::HLSLToolChain::parseTargetProfile(
+ StringRef TargetProfile) {
+ return tryParseProfile(TargetProfile);
+}
+
+DerivedArgList *
+HLSLToolChain::TranslateArgs(const DerivedArgList &Args, StringRef BoundArch,
+ Action::OffloadKind DeviceOffloadKind) const {
+ DerivedArgList *DAL = new DerivedArgList(Args.getBaseArgs());
+
+ const OptTable &Opts = getDriver().getOpts();
+
+ for (Arg *A : Args) {
+ if (A->getOption().getID() == options::OPT_dxil_validator_version) {
+ StringRef ValVerStr = A->getValue();
+ std::string ErrorMsg;
+ if (!isLegalValidatorVersion(ValVerStr, getDriver()))
+ continue;
+ }
+ if (A->getOption().getID() == options::OPT_dxc_entrypoint) {
+ DAL->AddSeparateArg(nullptr, Opts.getOption(options::OPT_hlsl_entrypoint),
+ A->getValue());
+ A->claim();
+ continue;
+ }
+ if (A->getOption().getID() == options::OPT__SLASH_O) {
+ StringRef OStr = A->getValue();
+ if (OStr == "d") {
+ DAL->AddFlagArg(nullptr, Opts.getOption(options::OPT_O0));
+ A->claim();
+ continue;
+ } else {
+ DAL->AddJoinedArg(nullptr, Opts.getOption(options::OPT_O), OStr);
+ A->claim();
+ continue;
+ }
+ }
+ if (A->getOption().getID() == options::OPT_emit_pristine_llvm) {
+ // Translate fcgl into -S -emit-llvm and -disable-llvm-passes.
+ DAL->AddFlagArg(nullptr, Opts.getOption(options::OPT_S));
+ DAL->AddFlagArg(nullptr, Opts.getOption(options::OPT_emit_llvm));
+ DAL->AddFlagArg(nullptr,
+ Opts.getOption(options::OPT_disable_llvm_passes));
+ A->claim();
+ continue;
+ }
+ DAL->append(A);
+ }
+
+ // Add default validator version if not set.
+ // TODO: remove this once read validator version from validator.
+ if (!DAL->hasArg(options::OPT_dxil_validator_version)) {
+ const StringRef DefaultValidatorVer = "1.7";
+ DAL->AddSeparateArg(nullptr,
+ Opts.getOption(options::OPT_dxil_validator_version),
+ DefaultValidatorVer);
+ }
+ if (!DAL->hasArg(options::OPT_O_Group)) {
+ DAL->AddJoinedArg(nullptr, Opts.getOption(options::OPT_O), "3");
+ }
+ // FIXME: add validation for enable_16bit_types should be after HLSL 2018 and
+ // shader model 6.2.
+ // See: https://github.com/llvm/llvm-project/issues/57876
+ return DAL;
+}
+
+bool HLSLToolChain::requiresValidation(DerivedArgList &Args) const {
+ if (Args.getLastArg(options::OPT_dxc_disable_validation))
+ return false;
+
+ std::string DxvPath = GetProgramPath("dxv");
+ if (DxvPath != "dxv")
+ return true;
+
+ getDriver().Diag(diag::warn_drv_dxc_missing_dxv);
+ return false;
+}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/HLSL.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/HLSL.h
new file mode 100644
index 000000000000..7b775b897431
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/HLSL.h
@@ -0,0 +1,63 @@
+//===--- HLSL.h - HLSL ToolChain Implementations ----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_HLSL_H
+#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_HLSL_H
+
+#include "clang/Driver/Tool.h"
+#include "clang/Driver/ToolChain.h"
+
+namespace clang {
+namespace driver {
+
+namespace tools {
+
+namespace hlsl {
+class LLVM_LIBRARY_VISIBILITY Validator : public Tool {
+public:
+ Validator(const ToolChain &TC) : Tool("hlsl::Validator", "dxv", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+} // namespace hlsl
+} // namespace tools
+
+namespace toolchains {
+
+class LLVM_LIBRARY_VISIBILITY HLSLToolChain : public ToolChain {
+public:
+ HLSLToolChain(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+ Tool *getTool(Action::ActionClass AC) const override;
+
+ bool isPICDefault() const override { return false; }
+ bool isPIEDefault(const llvm::opt::ArgList &Args) const override {
+ return false;
+ }
+ bool isPICDefaultForced() const override { return false; }
+
+ llvm::opt::DerivedArgList *
+ TranslateArgs(const llvm::opt::DerivedArgList &Args, StringRef BoundArch,
+ Action::OffloadKind DeviceOffloadKind) const override;
+ static std::optional<std::string> parseTargetProfile(StringRef TargetProfile);
+ bool requiresValidation(llvm::opt::DerivedArgList &Args) const;
+
+private:
+ mutable std::unique_ptr<tools::hlsl::Validator> Validator;
+};
+
+} // end namespace toolchains
+} // end namespace driver
+} // end namespace clang
+
+#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_HLSL_H
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Haiku.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Haiku.cpp
index a79f0f7622ad..e0d94035823f 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Haiku.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Haiku.cpp
@@ -8,27 +8,272 @@
#include "Haiku.h"
#include "CommonArgs.h"
+#include "clang/Config/config.h"
+#include "clang/Driver/Compilation.h"
+#include "llvm/Support/Path.h"
using namespace clang::driver;
+using namespace clang::driver::tools;
using namespace clang::driver::toolchains;
using namespace clang;
using namespace llvm::opt;
+void haiku::Linker::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ const auto &ToolChain = static_cast<const Haiku &>(getToolChain());
+ const Driver &D = ToolChain.getDriver();
+ const llvm::Triple::ArchType Arch = ToolChain.getArch();
+ const bool Static = Args.hasArg(options::OPT_static);
+ const bool Shared = Args.hasArg(options::OPT_shared);
+ ArgStringList CmdArgs;
+
+ // Silence warning for "clang -g foo.o -o foo"
+ Args.ClaimAllArgs(options::OPT_g_Group);
+ // and "clang -emit-llvm foo.o -o foo"
+ Args.ClaimAllArgs(options::OPT_emit_llvm);
+ // and for "clang -w foo.o -o foo". Other warning options are already
+ // handled somewhere else.
+ Args.ClaimAllArgs(options::OPT_w);
+
+ // Silence warning for "clang -pie foo.o -o foo"
+ Args.ClaimAllArgs(options::OPT_pie);
+
+ // -rdynamic is a no-op with Haiku. Claim argument to avoid warning.
+ Args.ClaimAllArgs(options::OPT_rdynamic);
+
+ if (!D.SysRoot.empty())
+ CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot));
+
+ CmdArgs.push_back("--eh-frame-hdr");
+ if (Static) {
+ CmdArgs.push_back("-Bstatic");
+ } else {
+ if (Shared)
+ CmdArgs.push_back("-shared");
+ CmdArgs.push_back("--enable-new-dtags");
+ }
+
+ CmdArgs.push_back("-shared");
+
+ if (!Shared)
+ CmdArgs.push_back("--no-undefined");
+
+ if (Arch == llvm::Triple::riscv64)
+ CmdArgs.push_back("-X");
+
+ assert((Output.isFilename() || Output.isNothing()) && "Invalid output.");
+ if (Output.isFilename()) {
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles,
+ options::OPT_r)) {
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crti.o")));
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtbeginS.o")));
+ if (!Shared)
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("start_dyn.o")));
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("init_term_dyn.o")));
+ }
+
+ Args.addAllArgs(CmdArgs, {options::OPT_L, options::OPT_T_Group,
+ options::OPT_s, options::OPT_t, options::OPT_r});
+ ToolChain.AddFilePathLibArgs(Args, CmdArgs);
+
+ if (D.isUsingLTO()) {
+ assert(!Inputs.empty() && "Must have at least one input.");
+ // Find the first filename InputInfo object.
+ auto Input = llvm::find_if(
+ Inputs, [](const InputInfo &II) -> bool { return II.isFilename(); });
+ if (Input == Inputs.end())
+ // For a very rare case, all of the inputs to the linker are
+ // InputArg. If that happens, just use the first InputInfo.
+ Input = Inputs.begin();
+
+ addLTOOptions(ToolChain, Args, CmdArgs, Output, *Input,
+ D.getLTOMode() == LTOK_Thin);
+ }
+
+ addLinkerCompressDebugSectionsOption(ToolChain, Args, CmdArgs);
+ AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs,
+ options::OPT_r)) {
+ // Use the static OpenMP runtime with -static-openmp
+ bool StaticOpenMP = Args.hasArg(options::OPT_static_openmp) && !Static;
+ addOpenMPRuntime(CmdArgs, ToolChain, Args, StaticOpenMP);
+
+ if (D.CCCIsCXX() && ToolChain.ShouldLinkCXXStdlib(Args))
+ ToolChain.AddCXXStdlibLibArgs(Args, CmdArgs);
+
+ // Silence warnings when linking C code with a C++ '-stdlib' argument.
+ Args.ClaimAllArgs(options::OPT_stdlib_EQ);
+
+ // Additional linker set-up and flags for Fortran. This is required in order
+ // to generate executables. As Fortran runtime depends on the C runtime,
+ // these dependencies need to be listed before the C runtime below (i.e.
+ // AddRunTimeLibs).
+ if (D.IsFlangMode()) {
+ addFortranRuntimeLibraryPath(ToolChain, Args, CmdArgs);
+ addFortranRuntimeLibs(ToolChain, Args, CmdArgs);
+ }
+
+ CmdArgs.push_back("-lgcc");
+
+ CmdArgs.push_back("--push-state");
+ CmdArgs.push_back("--as-needed");
+ CmdArgs.push_back("-lgcc_s");
+ CmdArgs.push_back("--no-as-needed");
+ CmdArgs.push_back("--pop-state");
+
+ CmdArgs.push_back("-lroot");
+
+ CmdArgs.push_back("-lgcc");
+
+ CmdArgs.push_back("--push-state");
+ CmdArgs.push_back("--as-needed");
+ CmdArgs.push_back("-lgcc_s");
+ CmdArgs.push_back("--no-as-needed");
+ CmdArgs.push_back("--pop-state");
+ }
+
+ // No need to do anything for pthreads. Claim argument to avoid warning.
+ Args.claimAllArgs(options::OPT_pthread, options::OPT_pthreads);
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles,
+ options::OPT_r)) {
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtendS.o")));
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtn.o")));
+ }
+
+ ToolChain.addProfileRTLibs(Args, CmdArgs);
+
+ const char *Exec = Args.MakeArgString(getToolChain().GetLinkerPath());
+ C.addCommand(std::make_unique<Command>(JA, *this,
+ ResponseFileSupport::AtFileCurCP(),
+ Exec, CmdArgs, Inputs, Output));
+}
+
/// Haiku - Haiku tool chain which can call as(1) and ld(1) directly.
Haiku::Haiku(const Driver &D, const llvm::Triple& Triple, const ArgList &Args)
: Generic_ELF(D, Triple, Args) {
+ GCCInstallation.init(Triple, Args);
+
+ getFilePaths().push_back(concat(getDriver().SysRoot, "/boot/system/lib"));
+ getFilePaths().push_back(concat(getDriver().SysRoot, "/boot/system/develop/lib"));
+
+ if (GCCInstallation.isValid())
+ getFilePaths().push_back(GCCInstallation.getInstallPath().str());
+}
+
+void Haiku::AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const {
+ const Driver &D = getDriver();
+
+ if (DriverArgs.hasArg(options::OPT_nostdinc))
+ return;
+
+ if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) {
+ SmallString<128> Dir(D.ResourceDir);
+ llvm::sys::path::append(Dir, "include");
+ addSystemInclude(DriverArgs, CC1Args, Dir.str());
+ }
+
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc))
+ return;
+
+ // Add dirs specified via 'configure --with-c-include-dirs'.
+ StringRef CIncludeDirs(C_INCLUDE_DIRS);
+ if (!CIncludeDirs.empty()) {
+ SmallVector<StringRef, 5> dirs;
+ CIncludeDirs.split(dirs, ":");
+ for (StringRef dir : dirs) {
+ StringRef Prefix =
+ llvm::sys::path::is_absolute(dir) ? StringRef(D.SysRoot) : "";
+ addExternCSystemInclude(DriverArgs, CC1Args, Prefix + dir);
+ }
+ return;
+ }
+
+ addSystemInclude(DriverArgs, CC1Args, concat(D.SysRoot,
+ "/boot/system/non-packaged/develop/headers"));
+ addSystemInclude(DriverArgs, CC1Args, concat(D.SysRoot,
+ "/boot/system/develop/headers/os"));
+ addSystemInclude(DriverArgs, CC1Args, concat(D.SysRoot,
+ "/boot/system/develop/headers/os/app"));
+ addSystemInclude(DriverArgs, CC1Args, concat(D.SysRoot,
+ "/boot/system/develop/headers/os/device"));
+ addSystemInclude(DriverArgs, CC1Args, concat(D.SysRoot,
+ "/boot/system/develop/headers/os/drivers"));
+ addSystemInclude(DriverArgs, CC1Args, concat(D.SysRoot,
+ "/boot/system/develop/headers/os/game"));
+ addSystemInclude(DriverArgs, CC1Args, concat(D.SysRoot,
+ "/boot/system/develop/headers/os/interface"));
+ addSystemInclude(DriverArgs, CC1Args, concat(D.SysRoot,
+ "/boot/system/develop/headers/os/kernel"));
+ addSystemInclude(DriverArgs, CC1Args, concat(D.SysRoot,
+ "/boot/system/develop/headers/os/locale"));
+ addSystemInclude(DriverArgs, CC1Args, concat(D.SysRoot,
+ "/boot/system/develop/headers/os/mail"));
+ addSystemInclude(DriverArgs, CC1Args, concat(D.SysRoot,
+ "/boot/system/develop/headers/os/media"));
+ addSystemInclude(DriverArgs, CC1Args, concat(D.SysRoot,
+ "/boot/system/develop/headers/os/midi"));
+ addSystemInclude(DriverArgs, CC1Args, concat(D.SysRoot,
+ "/boot/system/develop/headers/os/midi2"));
+ addSystemInclude(DriverArgs, CC1Args, concat(D.SysRoot,
+ "/boot/system/develop/headers/os/net"));
+ addSystemInclude(DriverArgs, CC1Args, concat(D.SysRoot,
+ "/boot/system/develop/headers/os/opengl"));
+ addSystemInclude(DriverArgs, CC1Args, concat(D.SysRoot,
+ "/boot/system/develop/headers/os/storage"));
+ addSystemInclude(DriverArgs, CC1Args, concat(D.SysRoot,
+ "/boot/system/develop/headers/os/support"));
+ addSystemInclude(DriverArgs, CC1Args, concat(D.SysRoot,
+ "/boot/system/develop/headers/os/translation"));
+ addSystemInclude(DriverArgs, CC1Args, concat(D.SysRoot,
+ "/boot/system/develop/headers/os/add-ons/graphics"));
+ addSystemInclude(DriverArgs, CC1Args, concat(D.SysRoot,
+ "/boot/system/develop/headers/os/add-ons/input_server"));
+ addSystemInclude(DriverArgs, CC1Args, concat(D.SysRoot,
+ "/boot/system/develop/headers/os/add-ons/mail_daemon"));
+ addSystemInclude(DriverArgs, CC1Args, concat(D.SysRoot,
+ "/boot/system/develop/headers/os/add-ons/registrar"));
+ addSystemInclude(DriverArgs, CC1Args, concat(D.SysRoot,
+ "/boot/system/develop/headers/os/add-ons/screen_saver"));
+ addSystemInclude(DriverArgs, CC1Args, concat(D.SysRoot,
+ "/boot/system/develop/headers/os/add-ons/tracker"));
+ addSystemInclude(DriverArgs, CC1Args, concat(D.SysRoot,
+ "/boot/system/develop/headers/os/be_apps/Deskbar"));
+ addSystemInclude(DriverArgs, CC1Args, concat(D.SysRoot,
+ "/boot/system/develop/headers/os/be_apps/NetPositive"));
+ addSystemInclude(DriverArgs, CC1Args, concat(D.SysRoot,
+ "/boot/system/develop/headers/os/be_apps/Tracker"));
+ addSystemInclude(DriverArgs, CC1Args, concat(D.SysRoot,
+ "/boot/system/develop/headers/3rdparty"));
+ addSystemInclude(DriverArgs, CC1Args, concat(D.SysRoot,
+ "/boot/system/develop/headers/bsd"));
+ addSystemInclude(DriverArgs, CC1Args, concat(D.SysRoot,
+ "/boot/system/develop/headers/glibc"));
+ addSystemInclude(DriverArgs, CC1Args, concat(D.SysRoot,
+ "/boot/system/develop/headers/gnu"));
+ addSystemInclude(DriverArgs, CC1Args, concat(D.SysRoot,
+ "/boot/system/develop/headers/posix"));
+ addSystemInclude(DriverArgs, CC1Args, concat(D.SysRoot,
+ "/boot/system/develop/headers"));
}
void Haiku::addLibCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const {
addSystemInclude(DriverArgs, CC1Args,
- getDriver().SysRoot + "/system/develop/headers/c++/v1");
+ concat(getDriver().SysRoot, "/boot/system/develop/headers/c++/v1"));
}
-void Haiku::addLibStdCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args) const {
- addLibStdCXXIncludePaths(getDriver().SysRoot + "/system/develop/headers/c++",
- getTriple().str(), "", DriverArgs, CC1Args);
-}
+Tool *Haiku::buildLinker() const { return new tools::haiku::Linker(*this); }
+
+bool Haiku::HasNativeLLVMSupport() const { return true; }
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Haiku.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Haiku.h
index 2bc98322bebf..a34f76e22284 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Haiku.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Haiku.h
@@ -15,6 +15,25 @@
namespace clang {
namespace driver {
+namespace tools {
+
+/// Directly call GNU Binutils assembler and linker
+namespace haiku {
+class LLVM_LIBRARY_VISIBILITY Linker final : public Tool {
+public:
+ Linker(const ToolChain &TC) : Tool("haiku::Linker", "linker", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+ bool isLinkJob() const override { return true; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+} // end namespace haiku
+} // end namespace tools
+
namespace toolchains {
class LLVM_LIBRARY_VISIBILITY Haiku : public Generic_ELF {
@@ -22,16 +41,27 @@ public:
Haiku(const Driver &D, const llvm::Triple &Triple,
const llvm::opt::ArgList &Args);
- bool isPIEDefault() const override {
- return getTriple().getArch() == llvm::Triple::x86_64;
- }
+ bool HasNativeLLVMSupport() const override;
- void addLibCxxIncludePaths(
+ bool IsMathErrnoDefault() const override { return false; }
+ bool IsObjCNonFragileABIDefault() const override { return true; }
+ bool isPICDefault() const override { return true; }
+
+ const char *getDefaultLinker() const override { return "ld.lld"; }
+
+ void AddClangSystemIncludeArgs(
const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
- void addLibStdCxxIncludePaths(
+ void addLibCxxIncludePaths(
const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
+
+ unsigned GetDefaultDwarfVersion() const override { return 4; }
+
+ bool GetDefaultStandaloneDebug() const override { return true; }
+
+protected:
+ Tool *buildLinker() const override;
};
} // end namespace toolchains
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Hexagon.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Hexagon.cpp
index 314d0efce441..d1eed931be5f 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Hexagon.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Hexagon.cpp
@@ -26,8 +26,8 @@ using namespace clang;
using namespace llvm::opt;
// Default hvx-length for various versions.
-static StringRef getDefaultHvxLength(StringRef Cpu) {
- return llvm::StringSwitch<StringRef>(Cpu)
+static StringRef getDefaultHvxLength(StringRef HvxVer) {
+ return llvm::StringSwitch<StringRef>(HvxVer)
.Case("v60", "64b")
.Case("v62", "64b")
.Case("v65", "64b")
@@ -40,7 +40,7 @@ static void handleHVXWarnings(const Driver &D, const ArgList &Args) {
StringRef Val = A->getValue();
if (!Val.equals_insensitive("64b") && !Val.equals_insensitive("128b"))
D.Diag(diag::err_drv_unsupported_option_argument)
- << A->getOption().getName() << Val;
+ << A->getSpelling() << Val;
}
}
@@ -51,49 +51,119 @@ static void handleHVXTargetFeatures(const Driver &D, const ArgList &Args,
// Handle HVX warnings.
handleHVXWarnings(D, Args);
- // Add the +hvx* features based on commandline flags.
- StringRef HVXFeature, HVXLength;
-
- // Handle -mhvx, -mhvx=, -mno-hvx.
- if (Arg *A = Args.getLastArg(options::OPT_mno_hexagon_hvx,
- options::OPT_mhexagon_hvx,
- options::OPT_mhexagon_hvx_EQ)) {
- if (A->getOption().matches(options::OPT_mno_hexagon_hvx))
- return;
- if (A->getOption().matches(options::OPT_mhexagon_hvx_EQ)) {
- HasHVX = true;
- HVXFeature = Cpu = A->getValue();
- HVXFeature = Args.MakeArgString(llvm::Twine("+hvx") + HVXFeature.lower());
- } else if (A->getOption().matches(options::OPT_mhexagon_hvx)) {
- HasHVX = true;
- HVXFeature = Args.MakeArgString(llvm::Twine("+hvx") + Cpu);
+ auto makeFeature = [&Args](Twine T, bool Enable) -> StringRef {
+ const std::string &S = T.str();
+ StringRef Opt(S);
+ if (Opt.ends_with("="))
+ Opt = Opt.drop_back(1);
+ if (Opt.starts_with("mno-"))
+ Opt = Opt.drop_front(4);
+ else if (Opt.starts_with("m"))
+ Opt = Opt.drop_front(1);
+ return Args.MakeArgString(Twine(Enable ? "+" : "-") + Twine(Opt));
+ };
+
+ auto withMinus = [](StringRef S) -> std::string {
+ return "-" + S.str();
+ };
+
+ // Drop tiny core suffix for HVX version.
+ std::string HvxVer =
+ (Cpu.back() == 'T' || Cpu.back() == 't' ? Cpu.drop_back(1) : Cpu).str();
+ HasHVX = false;
+
+ // Handle -mhvx, -mhvx=, -mno-hvx. If versioned and versionless flags
+ // are both present, the last one wins.
+ Arg *HvxEnablingArg =
+ Args.getLastArg(options::OPT_mhexagon_hvx, options::OPT_mhexagon_hvx_EQ,
+ options::OPT_mno_hexagon_hvx);
+ if (HvxEnablingArg) {
+ if (HvxEnablingArg->getOption().matches(options::OPT_mno_hexagon_hvx))
+ HvxEnablingArg = nullptr;
+ }
+
+ if (HvxEnablingArg) {
+ // If -mhvx[=] was given, it takes precedence.
+ if (Arg *A = Args.getLastArg(options::OPT_mhexagon_hvx,
+ options::OPT_mhexagon_hvx_EQ)) {
+ // If the version was given, set HvxVer. Otherwise HvxVer
+ // will remain equal to the CPU version.
+ if (A->getOption().matches(options::OPT_mhexagon_hvx_EQ))
+ HvxVer = StringRef(A->getValue()).lower();
}
- Features.push_back(HVXFeature);
+ HasHVX = true;
+ Features.push_back(makeFeature(Twine("hvx") + HvxVer, true));
+ } else if (Arg *A = Args.getLastArg(options::OPT_mno_hexagon_hvx)) {
+ // If there was an explicit -mno-hvx, add -hvx to target features.
+ Features.push_back(makeFeature(A->getOption().getName(), false));
}
+ StringRef HvxLen = getDefaultHvxLength(HvxVer);
+
// Handle -mhvx-length=.
if (Arg *A = Args.getLastArg(options::OPT_mhexagon_hvx_length_EQ)) {
// These flags are valid only if HVX in enabled.
if (!HasHVX)
- D.Diag(diag::err_drv_invalid_hvx_length);
+ D.Diag(diag::err_drv_needs_hvx) << withMinus(A->getOption().getName());
else if (A->getOption().matches(options::OPT_mhexagon_hvx_length_EQ))
- HVXLength = A->getValue();
+ HvxLen = A->getValue();
}
- // Default hvx-length based on Cpu.
- else if (HasHVX)
- HVXLength = getDefaultHvxLength(Cpu);
-
- if (!HVXLength.empty()) {
- HVXFeature =
- Args.MakeArgString(llvm::Twine("+hvx-length") + HVXLength.lower());
- Features.push_back(HVXFeature);
+
+ if (HasHVX) {
+ StringRef L = makeFeature(Twine("hvx-length") + HvxLen.lower(), true);
+ Features.push_back(L);
+ }
+
+ unsigned HvxVerNum;
+ // getAsInteger returns 'true' on error.
+ if (StringRef(HvxVer).drop_front(1).getAsInteger(10, HvxVerNum))
+ HvxVerNum = 0;
+
+ // Handle HVX floating point flags.
+ auto checkFlagHvxVersion =
+ [&](auto FlagOn, auto FlagOff,
+ unsigned MinVerNum) -> std::optional<StringRef> {
+ // Return an std::optional<StringRef>:
+ // - std::nullopt indicates a verification failure, or that the flag was not
+ // present in Args.
+ // - Otherwise the returned value is that name of the feature to add
+ // to Features.
+ Arg *A = Args.getLastArg(FlagOn, FlagOff);
+ if (!A)
+ return std::nullopt;
+
+ StringRef OptName = A->getOption().getName();
+ if (A->getOption().matches(FlagOff))
+ return makeFeature(OptName, false);
+
+ if (!HasHVX) {
+ D.Diag(diag::err_drv_needs_hvx) << withMinus(OptName);
+ return std::nullopt;
+ }
+ if (HvxVerNum < MinVerNum) {
+ D.Diag(diag::err_drv_needs_hvx_version)
+ << withMinus(OptName) << ("v" + std::to_string(HvxVerNum));
+ return std::nullopt;
+ }
+ return makeFeature(OptName, true);
+ };
+
+ if (auto F = checkFlagHvxVersion(options::OPT_mhexagon_hvx_qfloat,
+ options::OPT_mno_hexagon_hvx_qfloat, 68)) {
+ Features.push_back(*F);
+ }
+ if (auto F = checkFlagHvxVersion(options::OPT_mhexagon_hvx_ieee_fp,
+ options::OPT_mno_hexagon_hvx_ieee_fp, 68)) {
+ Features.push_back(*F);
}
}
// Hexagon target features.
-void hexagon::getHexagonTargetFeatures(const Driver &D, const ArgList &Args,
+void hexagon::getHexagonTargetFeatures(const Driver &D,
+ const llvm::Triple &Triple,
+ const ArgList &Args,
std::vector<StringRef> &Features) {
- handleTargetFeaturesGroup(Args, Features,
+ handleTargetFeaturesGroup(D, Triple, Args, Features,
options::OPT_m_hexagon_Features_Group);
bool UseLongCalls = false;
@@ -117,7 +187,7 @@ void hexagon::getHexagonTargetFeatures(const Driver &D, const ArgList &Args,
handleHVXTargetFeatures(D, Args, Features, Cpu, HasHVX);
if (HexagonToolChain::isAutoHVXEnabled(Args) && !HasHVX)
- D.Diag(diag::warn_drv_vectorize_needs_hvx);
+ D.Diag(diag::warn_drv_needs_hvx) << "auto-vectorization";
}
// Hexagon tools start.
@@ -146,16 +216,24 @@ void hexagon::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
"-mcpu=hexagon" +
toolchains::HexagonToolChain::GetTargetCPUVersion(Args)));
+ addSanitizerRuntimes(HTC, Args, CmdArgs);
+
+ assert((Output.isFilename() || Output.isNothing()) && "Invalid output.");
if (Output.isFilename()) {
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
} else {
- assert(Output.isNothing() && "Unexpected output");
CmdArgs.push_back("-fsyntax-only");
}
+ if (Arg *A = Args.getLastArg(options::OPT_mhexagon_hvx_ieee_fp,
+ options::OPT_mno_hexagon_hvx_ieee_fp)) {
+ if (A->getOption().matches(options::OPT_mhexagon_hvx_ieee_fp))
+ CmdArgs.push_back("-mhvx-ieee-fp");
+ }
+
if (auto G = toolchains::HexagonToolChain::getSmallDataThreshold(Args)) {
- CmdArgs.push_back(Args.MakeArgString("-gpsize=" + Twine(G.getValue())));
+ CmdArgs.push_back(Args.MakeArgString("-gpsize=" + Twine(*G)));
}
Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler);
@@ -223,6 +301,9 @@ constructHexagonLinkArgs(Compilation &C, const JobAction &JA,
bool UseShared = IsShared && !IsStatic;
StringRef CpuVer = toolchains::HexagonToolChain::GetTargetCPUVersion(Args);
+ bool NeedsSanitizerDeps = addSanitizerRuntimes(HTC, Args, CmdArgs);
+ bool NeedsXRayDeps = addXRayRuntime(HTC, Args, CmdArgs);
+
//----------------------------------------------------------------------------
// Silence warnings for various options
//----------------------------------------------------------------------------
@@ -262,8 +343,8 @@ constructHexagonLinkArgs(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-pie");
if (auto G = toolchains::HexagonToolChain::getSmallDataThreshold(Args)) {
- CmdArgs.push_back(Args.MakeArgString("-G" + Twine(G.getValue())));
- UseG0 = G.getValue() == 0;
+ CmdArgs.push_back(Args.MakeArgString("-G" + Twine(*G)));
+ UseG0 = *G == 0;
}
CmdArgs.push_back("-o");
@@ -282,19 +363,31 @@ constructHexagonLinkArgs(Compilation &C, const JobAction &JA,
CmdArgs.push_back(
Args.MakeArgString(StringRef("-L") + D.SysRoot + "/usr/lib"));
- Args.AddAllArgs(CmdArgs,
- {options::OPT_T_Group, options::OPT_e, options::OPT_s,
- options::OPT_t, options::OPT_u_Group});
+ Args.addAllArgs(CmdArgs, {options::OPT_T_Group, options::OPT_s,
+ options::OPT_t, options::OPT_u_Group});
AddLinkerInputs(HTC, Inputs, Args, CmdArgs, JA);
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
+ if (NeedsSanitizerDeps) {
+ linkSanitizerRuntimeDeps(HTC, Args, CmdArgs);
+
+ CmdArgs.push_back("-lunwind");
+ }
+ if (NeedsXRayDeps)
+ linkXRayRuntimeDeps(HTC, Args, CmdArgs);
+
CmdArgs.push_back("-lclang_rt.builtins-hexagon");
- CmdArgs.push_back("-lc");
+ if (!Args.hasArg(options::OPT_nolibc))
+ CmdArgs.push_back("-lc");
}
if (D.CCCIsCXX()) {
if (HTC.ShouldLinkCXXStdlib(Args))
HTC.AddCXXStdlibLibArgs(Args, CmdArgs);
}
+ const ToolChain::path_list &LibPaths = HTC.getFilePaths();
+ for (const auto &LibPath : LibPaths)
+ CmdArgs.push_back(Args.MakeArgString(StringRef("-L") + LibPath));
+ Args.ClaimAllArgs(options::OPT_L);
return;
}
@@ -353,13 +446,13 @@ constructHexagonLinkArgs(Compilation &C, const JobAction &JA,
const ToolChain::path_list &LibPaths = HTC.getFilePaths();
for (const auto &LibPath : LibPaths)
CmdArgs.push_back(Args.MakeArgString(StringRef("-L") + LibPath));
+ Args.ClaimAllArgs(options::OPT_L);
//----------------------------------------------------------------------------
//
//----------------------------------------------------------------------------
- Args.AddAllArgs(CmdArgs,
- {options::OPT_T_Group, options::OPT_e, options::OPT_s,
- options::OPT_t, options::OPT_u_Group});
+ Args.addAllArgs(CmdArgs, {options::OPT_T_Group, options::OPT_s,
+ options::OPT_t, options::OPT_u_Group});
AddLinkerInputs(HTC, Inputs, Args, CmdArgs, JA);
@@ -378,7 +471,8 @@ constructHexagonLinkArgs(Compilation &C, const JobAction &JA,
if (!IsShared) {
for (StringRef Lib : OsLibs)
CmdArgs.push_back(Args.MakeArgString("-l" + Lib));
- CmdArgs.push_back("-lc");
+ if (!Args.hasArg(options::OPT_nolibc))
+ CmdArgs.push_back("-lc");
}
CmdArgs.push_back("-lgcc");
@@ -433,8 +527,8 @@ std::string HexagonToolChain::getHexagonTargetDir(
return InstalledDir;
}
-Optional<unsigned> HexagonToolChain::getSmallDataThreshold(
- const ArgList &Args) {
+std::optional<unsigned>
+HexagonToolChain::getSmallDataThreshold(const ArgList &Args) {
StringRef Gn = "";
if (Arg *A = Args.getLastArg(options::OPT_G)) {
Gn = A->getValue();
@@ -447,7 +541,16 @@ Optional<unsigned> HexagonToolChain::getSmallDataThreshold(
if (!Gn.getAsInteger(10, G))
return G;
- return None;
+ return std::nullopt;
+}
+
+std::string HexagonToolChain::getCompilerRTPath() const {
+ SmallString<128> Dir(getDriver().SysRoot);
+ llvm::sys::path::append(Dir, "usr", "lib");
+ if (!SelectedMultilibs.empty()) {
+ Dir += SelectedMultilibs.back().gccSuffix();
+ }
+ return std::string(Dir);
}
void HexagonToolChain::getHexagonLibraryPaths(const ArgList &Args,
@@ -458,8 +561,7 @@ void HexagonToolChain::getHexagonLibraryPaths(const ArgList &Args,
// -L Args
//----------------------------------------------------------------------------
for (Arg *A : Args.filtered(options::OPT_L))
- for (const char *Value : A->getValues())
- LibPaths.push_back(Value);
+ llvm::append_range(LibPaths, A->getValues());
//----------------------------------------------------------------------------
// Other standard paths
@@ -470,14 +572,14 @@ void HexagonToolChain::getHexagonLibraryPaths(const ArgList &Args,
std::string TargetDir = getHexagonTargetDir(D.getInstalledDir(),
D.PrefixDirs);
- if (llvm::find(RootDirs, TargetDir) == RootDirs.end())
+ if (!llvm::is_contained(RootDirs, TargetDir))
RootDirs.push_back(TargetDir);
bool HasPIC = Args.hasArg(options::OPT_fpic, options::OPT_fPIC);
// Assume G0 with -shared.
bool HasG0 = Args.hasArg(options::OPT_shared);
if (auto G = getSmallDataThreshold(Args))
- HasG0 = G.getValue() == 0;
+ HasG0 = *G == 0;
const std::string CpuVer = GetTargetCPUVersion(Args).str();
for (auto &Dir : RootDirs) {
@@ -522,6 +624,8 @@ void HexagonToolChain::AddCXXStdlibLibArgs(const ArgList &Args,
switch (Type) {
case ToolChain::CST_Libcxx:
CmdArgs.push_back("-lc++");
+ if (Args.hasArg(options::OPT_fexperimental_library))
+ CmdArgs.push_back("-lc++experimental");
CmdArgs.push_back("-lc++abi");
CmdArgs.push_back("-lunwind");
break;
@@ -687,17 +791,17 @@ bool HexagonToolChain::isAutoHVXEnabled(const llvm::opt::ArgList &Args) {
// Returns the default CPU for Hexagon. This is the default compilation target
// if no Hexagon processor is selected at the command-line.
//
-const StringRef HexagonToolChain::GetDefaultCPU() {
+StringRef HexagonToolChain::GetDefaultCPU() {
return "hexagonv60";
}
-const StringRef HexagonToolChain::GetTargetCPUVersion(const ArgList &Args) {
+StringRef HexagonToolChain::GetTargetCPUVersion(const ArgList &Args) {
Arg *CpuArg = nullptr;
if (Arg *A = Args.getLastArg(options::OPT_mcpu_EQ))
CpuArg = A;
StringRef CPU = CpuArg ? CpuArg->getValue() : GetDefaultCPU();
- if (CPU.startswith("hexagon"))
+ if (CPU.starts_with("hexagon"))
return CPU.substr(sizeof("hexagon") - 1);
return CPU;
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Hexagon.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Hexagon.h
index c32cb7f09591..e35a224dced4 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Hexagon.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Hexagon.h
@@ -20,7 +20,7 @@ namespace hexagon {
// For Hexagon, we do not need to instantiate tools for PreProcess, PreCompile
// and Compile.
// We simply use "clang -cc1" for those actions.
-class LLVM_LIBRARY_VISIBILITY Assembler : public Tool {
+class LLVM_LIBRARY_VISIBILITY Assembler final : public Tool {
public:
Assembler(const ToolChain &TC)
: Tool("hexagon::Assembler", "hexagon-as", TC) {}
@@ -35,7 +35,7 @@ public:
const char *LinkingOutput) const override;
};
-class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
+class LLVM_LIBRARY_VISIBILITY Linker final : public Tool {
public:
Linker(const ToolChain &TC) : Tool("hexagon::Linker", "hexagon-ld", TC) {}
@@ -50,7 +50,8 @@ public:
const char *LinkingOutput) const override;
};
-void getHexagonTargetFeatures(const Driver &D, const llvm::opt::ArgList &Args,
+void getHexagonTargetFeatures(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args,
std::vector<StringRef> &Features);
} // end namespace hexagon.
@@ -94,9 +95,6 @@ public:
llvm::opt::ArgStringList &CmdArgs) const override;
StringRef GetGCCLibAndIncVersion() const { return GCCLibAndIncVersion.Text; }
- bool IsIntegratedAssemblerDefault() const override {
- return true;
- }
std::string getHexagonTargetDir(
const std::string &InstalledDir,
@@ -104,12 +102,14 @@ public:
void getHexagonLibraryPaths(const llvm::opt::ArgList &Args,
ToolChain::path_list &LibPaths) const;
+ std::string getCompilerRTPath() const override;
+
static bool isAutoHVXEnabled(const llvm::opt::ArgList &Args);
- static const StringRef GetDefaultCPU();
- static const StringRef GetTargetCPUVersion(const llvm::opt::ArgList &Args);
+ static StringRef GetDefaultCPU();
+ static StringRef GetTargetCPUVersion(const llvm::opt::ArgList &Args);
- static Optional<unsigned> getSmallDataThreshold(
- const llvm::opt::ArgList &Args);
+ static std::optional<unsigned>
+ getSmallDataThreshold(const llvm::opt::ArgList &Args);
};
} // end namespace toolchains
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Hurd.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Hurd.cpp
index 48b9ccadf36f..0bc114b90ffc 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Hurd.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Hurd.cpp
@@ -30,13 +30,21 @@ using tools::addPathIfExists;
std::string Hurd::getMultiarchTriple(const Driver &D,
const llvm::Triple &TargetTriple,
StringRef SysRoot) const {
- if (TargetTriple.getArch() == llvm::Triple::x86) {
+ switch (TargetTriple.getArch()) {
+ default:
+ break;
+
+ case llvm::Triple::x86:
// We use the existence of '/lib/<triple>' as a directory to detect some
// common hurd triples that don't quite match the Clang triple for both
// 32-bit and 64-bit targets. Multiarch fixes its install triples to these
// regardless of what the actual target triple is.
if (D.getVFS().exists(SysRoot + "/lib/i386-gnu"))
return "i386-gnu";
+ break;
+
+ case llvm::Triple::x86_64:
+ return "x86_64-gnu";
}
// For most architectures, just use whatever we have rather than trying to be
@@ -65,7 +73,7 @@ Hurd::Hurd(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
: Generic_ELF(D, Triple, Args) {
GCCInstallation.init(Triple, Args);
Multilibs = GCCInstallation.getMultilibs();
- SelectedMultilib = GCCInstallation.getMultilib();
+ SelectedMultilibs.assign({GCCInstallation.getMultilib()});
std::string SysRoot = computeSysRoot();
ToolChain::path_list &PPaths = getProgramPaths();
@@ -92,7 +100,7 @@ Hurd::Hurd(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
// those searched.
// FIXME: It's not clear whether we should use the driver's installed
// directory ('Dir' below) or the ResourceDir.
- if (StringRef(D.Dir).startswith(SysRoot)) {
+ if (StringRef(D.Dir).starts_with(SysRoot)) {
addPathIfExists(D, D.Dir + "/../lib/" + MultiarchTriple, Paths);
addPathIfExists(D, D.Dir + "/../" + OSLibDir, Paths);
}
@@ -110,7 +118,7 @@ Hurd::Hurd(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
// searched.
// FIXME: It's not clear whether we should use the driver's installed
// directory ('Dir' below) or the ResourceDir.
- if (StringRef(D.Dir).startswith(SysRoot))
+ if (StringRef(D.Dir).starts_with(SysRoot))
addPathIfExists(D, D.Dir + "/../lib", Paths);
addPathIfExists(D, SysRoot + "/lib", Paths);
@@ -126,8 +134,14 @@ Tool *Hurd::buildAssembler() const {
}
std::string Hurd::getDynamicLinker(const ArgList &Args) const {
- if (getArch() == llvm::Triple::x86)
+ switch (getArch()) {
+ case llvm::Triple::x86:
return "/lib/ld.so";
+ case llvm::Triple::x86_64:
+ return "/lib/ld-x86-64.so.1";
+ default:
+ break;
+ }
llvm_unreachable("unsupported architecture");
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Lanai.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Lanai.h
index dc04b0cfe2ee..33701f7cc045 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Lanai.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Lanai.h
@@ -29,8 +29,6 @@ public:
void addLibStdCxxIncludePaths(
const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override {}
-
- bool IsIntegratedAssemblerDefault() const override { return true; }
};
} // end namespace toolchains
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/LazyDetector.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/LazyDetector.h
new file mode 100644
index 000000000000..813d00a87bb8
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/LazyDetector.h
@@ -0,0 +1,45 @@
+//===--- LazyDetector.h - Lazy ToolChain Detection --------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_LAZYDETECTOR_H
+#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_LAZYDETECTOR_H
+
+#include "clang/Driver/Tool.h"
+#include "clang/Driver/ToolChain.h"
+#include <optional>
+
+namespace clang {
+
+/// Simple wrapper for toolchain detector with costly initialization. This
+/// delays the creation of the actual detector until its first usage.
+
+template <class T> class LazyDetector {
+ const driver::Driver &D;
+ llvm::Triple Triple;
+ const llvm::opt::ArgList &Args;
+
+ std::optional<T> Detector;
+
+public:
+ LazyDetector(const driver::Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args)
+ : D(D), Triple(Triple), Args(Args) {}
+ T *operator->() {
+ if (!Detector)
+ Detector.emplace(D, Triple, Args);
+ return &*Detector;
+ }
+ const T *operator->() const {
+ return const_cast<T const *>(
+ const_cast<LazyDetector &>(*this).operator->());
+ }
+};
+
+} // end namespace clang
+
+#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_LAZYDETECTOR_H
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.cpp
index c9360fc67165..4300a2bdff17 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.cpp
@@ -8,6 +8,7 @@
#include "Linux.h"
#include "Arch/ARM.h"
+#include "Arch/LoongArch.h"
#include "Arch/Mips.h"
#include "Arch/PPC.h"
#include "Arch/RISCV.h"
@@ -60,12 +61,16 @@ std::string Linux::getMultiarchTriple(const Driver &D,
case llvm::Triple::thumb:
if (IsAndroid)
return "arm-linux-androideabi";
- if (TargetEnvironment == llvm::Triple::GNUEABIHF)
+ if (TargetEnvironment == llvm::Triple::GNUEABIHF ||
+ TargetEnvironment == llvm::Triple::MuslEABIHF ||
+ TargetEnvironment == llvm::Triple::EABIHF)
return "arm-linux-gnueabihf";
return "arm-linux-gnueabi";
case llvm::Triple::armeb:
case llvm::Triple::thumbeb:
- if (TargetEnvironment == llvm::Triple::GNUEABIHF)
+ if (TargetEnvironment == llvm::Triple::GNUEABIHF ||
+ TargetEnvironment == llvm::Triple::MuslEABIHF ||
+ TargetEnvironment == llvm::Triple::EABIHF)
return "armeb-linux-gnueabihf";
return "armeb-linux-gnueabi";
case llvm::Triple::x86:
@@ -85,37 +90,66 @@ std::string Linux::getMultiarchTriple(const Driver &D,
case llvm::Triple::aarch64_be:
return "aarch64_be-linux-gnu";
+ case llvm::Triple::loongarch64: {
+ const char *Libc;
+ const char *FPFlavor;
+
+ if (TargetTriple.isGNUEnvironment()) {
+ Libc = "gnu";
+ } else if (TargetTriple.isMusl()) {
+ Libc = "musl";
+ } else {
+ return TargetTriple.str();
+ }
+
+ switch (TargetEnvironment) {
+ default:
+ return TargetTriple.str();
+ case llvm::Triple::GNUSF:
+ FPFlavor = "sf";
+ break;
+ case llvm::Triple::GNUF32:
+ FPFlavor = "f32";
+ break;
+ case llvm::Triple::GNU:
+ case llvm::Triple::GNUF64:
+ // This was going to be "f64" in an earlier Toolchain Conventions
+ // revision, but starting from Feb 2023 the F64 ABI variants are
+ // unmarked in their canonical forms.
+ FPFlavor = "";
+ break;
+ }
+
+ return (Twine("loongarch64-linux-") + Libc + FPFlavor).str();
+ }
+
case llvm::Triple::m68k:
return "m68k-linux-gnu";
case llvm::Triple::mips:
return IsMipsR6 ? "mipsisa32r6-linux-gnu" : "mips-linux-gnu";
case llvm::Triple::mipsel:
- if (IsAndroid)
- return "mipsel-linux-android";
return IsMipsR6 ? "mipsisa32r6el-linux-gnu" : "mipsel-linux-gnu";
case llvm::Triple::mips64: {
std::string MT = std::string(IsMipsR6 ? "mipsisa64r6" : "mips64") +
"-linux-" + (IsMipsN32Abi ? "gnuabin32" : "gnuabi64");
- if (D.getVFS().exists(SysRoot + "/lib/" + MT))
+ if (D.getVFS().exists(concat(SysRoot, "/lib", MT)))
return MT;
- if (D.getVFS().exists(SysRoot + "/lib/mips64-linux-gnu"))
+ if (D.getVFS().exists(concat(SysRoot, "/lib/mips64-linux-gnu")))
return "mips64-linux-gnu";
break;
}
case llvm::Triple::mips64el: {
- if (IsAndroid)
- return "mips64el-linux-android";
std::string MT = std::string(IsMipsR6 ? "mipsisa64r6el" : "mips64el") +
"-linux-" + (IsMipsN32Abi ? "gnuabin32" : "gnuabi64");
- if (D.getVFS().exists(SysRoot + "/lib/" + MT))
+ if (D.getVFS().exists(concat(SysRoot, "/lib", MT)))
return MT;
- if (D.getVFS().exists(SysRoot + "/lib/mips64el-linux-gnu"))
+ if (D.getVFS().exists(concat(SysRoot, "/lib/mips64el-linux-gnu")))
return "mips64el-linux-gnu";
break;
}
case llvm::Triple::ppc:
- if (D.getVFS().exists(SysRoot + "/lib/powerpc-linux-gnuspe"))
+ if (D.getVFS().exists(concat(SysRoot, "/lib/powerpc-linux-gnuspe")))
return "powerpc-linux-gnuspe";
return "powerpc-linux-gnu";
case llvm::Triple::ppcle:
@@ -124,6 +158,10 @@ std::string Linux::getMultiarchTriple(const Driver &D,
return "powerpc64-linux-gnu";
case llvm::Triple::ppc64le:
return "powerpc64le-linux-gnu";
+ case llvm::Triple::riscv64:
+ if (IsAndroid)
+ return "riscv64-linux-android";
+ return "riscv64-linux-gnu";
case llvm::Triple::sparc:
return "sparc-linux-gnu";
case llvm::Triple::sparcv9:
@@ -179,7 +217,7 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
: Generic_ELF(D, Triple, Args) {
GCCInstallation.init(Triple, Args);
Multilibs = GCCInstallation.getMultilibs();
- SelectedMultilib = GCCInstallation.getMultilib();
+ SelectedMultilibs.assign({GCCInstallation.getMultilib()});
llvm::Triple::ArchType Arch = Triple.getArch();
std::string SysRoot = computeSysRoot();
ToolChain::path_list &PPaths = getProgramPaths();
@@ -206,8 +244,7 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
ExtraOpts.push_back("max-page-size=4096");
}
- if (GCCInstallation.getParentLibPath().find("opt/rh/devtoolset") !=
- StringRef::npos)
+ if (GCCInstallation.getParentLibPath().contains("opt/rh/"))
// With devtoolset on RHEL, we want to add a bin directory that is relative
// to the detected gcc install, because if we are using devtoolset gcc then
// we want to use other tools from devtoolset (e.g. ld) instead of the
@@ -222,8 +259,12 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
const bool IsMips = Triple.isMIPS();
const bool IsHexagon = Arch == llvm::Triple::hexagon;
const bool IsRISCV = Triple.isRISCV();
+ const bool IsCSKY = Triple.isCSKY();
- if (IsMips && !SysRoot.empty())
+ if (IsCSKY && !SelectedMultilibs.empty())
+ SysRoot = SysRoot + SelectedMultilibs.back().osSuffix();
+
+ if ((IsMips || IsCSKY) && !SysRoot.empty())
ExtraOpts.push_back("--sysroot=" + SysRoot);
// Do not use 'gnu' hash style for Mips targets because .gnu.hash
@@ -233,25 +274,18 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
// Android loader does not support .gnu.hash until API 23.
// Hexagon linker/loader does not support .gnu.hash
if (!IsMips && !IsHexagon) {
- if (Distro.IsRedhat() || Distro.IsOpenSUSE() || Distro.IsAlpineLinux() ||
- (Distro.IsUbuntu() && Distro >= Distro::UbuntuMaverick) ||
- (IsAndroid && !Triple.isAndroidVersionLT(23)))
- ExtraOpts.push_back("--hash-style=gnu");
-
- if (Distro.IsDebian() || Distro.IsOpenSUSE() ||
- Distro == Distro::UbuntuLucid || Distro == Distro::UbuntuJaunty ||
- Distro == Distro::UbuntuKarmic ||
+ if (Distro.IsOpenSUSE() || Distro == Distro::UbuntuLucid ||
+ Distro == Distro::UbuntuJaunty || Distro == Distro::UbuntuKarmic ||
(IsAndroid && Triple.isAndroidVersionLT(23)))
ExtraOpts.push_back("--hash-style=both");
+ else
+ ExtraOpts.push_back("--hash-style=gnu");
}
#ifdef ENABLE_LINKER_BUILD_ID
ExtraOpts.push_back("--build-id");
#endif
- if (IsAndroid || Distro.IsOpenSUSE())
- ExtraOpts.push_back("--enable-new-dtags");
-
// The selection of paths to try here is designed to match the patterns which
// the GCC driver itself uses, as this is part of the GCC-compatible driver.
// This was determined by running GCC in a fake filesystem, creating all
@@ -262,52 +296,48 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
const std::string OSLibDir = std::string(getOSLibDir(Triple, Args));
const std::string MultiarchTriple = getMultiarchTriple(D, Triple, SysRoot);
+ // mips32: Debian multilib, we use /libo32, while in other case, /lib is
+ // used. We need add both libo32 and /lib.
+ if (Arch == llvm::Triple::mips || Arch == llvm::Triple::mipsel) {
+ Generic_GCC::AddMultilibPaths(D, SysRoot, "libo32", MultiarchTriple, Paths);
+ addPathIfExists(D, concat(SysRoot, "/libo32"), Paths);
+ addPathIfExists(D, concat(SysRoot, "/usr/libo32"), Paths);
+ }
Generic_GCC::AddMultilibPaths(D, SysRoot, OSLibDir, MultiarchTriple, Paths);
- addPathIfExists(D, SysRoot + "/lib/" + MultiarchTriple, Paths);
- addPathIfExists(D, SysRoot + "/lib/../" + OSLibDir, Paths);
+ addPathIfExists(D, concat(SysRoot, "/lib", MultiarchTriple), Paths);
+ addPathIfExists(D, concat(SysRoot, "/lib/..", OSLibDir), Paths);
if (IsAndroid) {
// Android sysroots contain a library directory for each supported OS
// version as well as some unversioned libraries in the usual multiarch
// directory.
- unsigned Major;
- unsigned Minor;
- unsigned Micro;
- Triple.getEnvironmentVersion(Major, Minor, Micro);
- addPathIfExists(D,
- SysRoot + "/usr/lib/" + MultiarchTriple + "/" +
- llvm::to_string(Major),
- Paths);
+ addPathIfExists(
+ D,
+ concat(SysRoot, "/usr/lib", MultiarchTriple,
+ llvm::to_string(Triple.getEnvironmentVersion().getMajor())),
+ Paths);
}
- addPathIfExists(D, SysRoot + "/usr/lib/" + MultiarchTriple, Paths);
+ addPathIfExists(D, concat(SysRoot, "/usr/lib", MultiarchTriple), Paths);
// 64-bit OpenEmbedded sysroots may not have a /usr/lib dir. So they cannot
// find /usr/lib64 as it is referenced as /usr/lib/../lib64. So we handle
// this here.
if (Triple.getVendor() == llvm::Triple::OpenEmbedded &&
Triple.isArch64Bit())
- addPathIfExists(D, SysRoot + "/usr/" + OSLibDir, Paths);
+ addPathIfExists(D, concat(SysRoot, "/usr", OSLibDir), Paths);
else
- addPathIfExists(D, SysRoot + "/usr/lib/../" + OSLibDir, Paths);
+ addPathIfExists(D, concat(SysRoot, "/usr/lib/..", OSLibDir), Paths);
if (IsRISCV) {
StringRef ABIName = tools::riscv::getRISCVABI(Args, Triple);
- addPathIfExists(D, SysRoot + "/" + OSLibDir + "/" + ABIName, Paths);
- addPathIfExists(D, SysRoot + "/usr/" + OSLibDir + "/" + ABIName, Paths);
+ addPathIfExists(D, concat(SysRoot, "/", OSLibDir, ABIName), Paths);
+ addPathIfExists(D, concat(SysRoot, "/usr", OSLibDir, ABIName), Paths);
}
Generic_GCC::AddMultiarchPaths(D, SysRoot, OSLibDir, Paths);
- // Similar to the logic for GCC above, if we are currently running Clang
- // inside of the requested system root, add its parent library path to those
- // searched.
- // FIXME: It's not clear whether we should use the driver's installed
- // directory ('Dir' below) or the ResourceDir.
- if (StringRef(D.Dir).startswith(SysRoot))
- addPathIfExists(D, D.Dir + "/../lib", Paths);
-
- addPathIfExists(D, SysRoot + "/lib", Paths);
- addPathIfExists(D, SysRoot + "/usr/lib", Paths);
+ addPathIfExists(D, concat(SysRoot, "/lib"), Paths);
+ addPathIfExists(D, concat(SysRoot, "/usr/lib"), Paths);
}
ToolChain::RuntimeLibType Linux::GetDefaultRuntimeLibType() const {
@@ -316,6 +346,12 @@ ToolChain::RuntimeLibType Linux::GetDefaultRuntimeLibType() const {
return Generic_ELF::GetDefaultRuntimeLibType();
}
+unsigned Linux::GetDefaultDwarfVersion() const {
+ if (getTriple().isAndroid())
+ return 4;
+ return ToolChain::GetDefaultDwarfVersion();
+}
+
ToolChain::CXXStdlibType Linux::GetDefaultCXXStdlibType() const {
if (getTriple().isAndroid())
return ToolChain::CST_Libcxx;
@@ -347,6 +383,21 @@ std::string Linux::computeSysRoot() const {
return AndroidSysRootPath;
}
+ if (getTriple().isCSKY()) {
+ // CSKY toolchains use different names for sysroot folder.
+ if (!GCCInstallation.isValid())
+ return std::string();
+ // GCCInstallation.getInstallPath() =
+ // $GCCToolchainPath/lib/gcc/csky-linux-gnuabiv2/6.3.0
+ // Path = $GCCToolchainPath/csky-linux-gnuabiv2/libc
+ std::string Path = (GCCInstallation.getInstallPath() + "/../../../../" +
+ GCCInstallation.getTriple().str() + "/libc")
+ .str();
+ if (getVFS().exists(Path))
+ return Path;
+ return std::string();
+ }
+
if (!GCCInstallation.isValid() || !getTriple().isMIPS())
return std::string();
@@ -379,9 +430,17 @@ std::string Linux::getDynamicLinker(const ArgList &Args) const {
const Distro Distro(getDriver().getVFS(), Triple);
- if (Triple.isAndroid())
+ if (Triple.isAndroid()) {
+ if (getSanitizerArgs(Args).needsHwasanRt() &&
+ !Triple.isAndroidVersionLT(34) && Triple.isArch64Bit()) {
+ // On Android 14 and newer, there is a special linker_hwasan64 that
+ // allows to run HWASan binaries on non-HWASan system images. This
+ // is also available on HWASan system images, so we can just always
+ // use that instead.
+ return "/system/bin/linker_hwasan64";
+ }
return Triple.isArch64Bit() ? "/system/bin/linker64" : "/system/bin/linker";
-
+ }
if (Triple.isMusl()) {
std::string ArchName;
bool IsArm = false;
@@ -410,6 +469,9 @@ std::string Linux::getDynamicLinker(const ArgList &Args) const {
(Triple.getEnvironment() == llvm::Triple::MuslEABIHF ||
tools::arm::getARMFloatABI(*this, Args) == tools::arm::FloatABI::Hard))
ArchName += "hf";
+ if (Arch == llvm::Triple::ppc &&
+ Triple.getSubArch() == llvm::Triple::PPCSubArch_spe)
+ ArchName = "powerpc-sf";
return "/lib/ld-musl-" + ArchName + ".so.1";
}
@@ -441,6 +503,22 @@ std::string Linux::getDynamicLinker(const ArgList &Args) const {
Loader = HF ? "ld-linux-armhf.so.3" : "ld-linux.so.3";
break;
}
+ case llvm::Triple::loongarch32: {
+ LibDir = "lib32";
+ Loader =
+ ("ld-linux-loongarch-" +
+ tools::loongarch::getLoongArchABI(getDriver(), Args, Triple) + ".so.1")
+ .str();
+ break;
+ }
+ case llvm::Triple::loongarch64: {
+ LibDir = "lib64";
+ Loader =
+ ("ld-linux-loongarch-" +
+ tools::loongarch::getLoongArchABI(getDriver(), Args, Triple) + ".so.1")
+ .str();
+ break;
+ }
case llvm::Triple::m68k:
LibDir = "lib";
Loader = "ld.so.1";
@@ -449,7 +527,7 @@ std::string Linux::getDynamicLinker(const ArgList &Args) const {
case llvm::Triple::mipsel:
case llvm::Triple::mips64:
case llvm::Triple::mips64el: {
- bool IsNaN2008 = tools::mips::isNaN2008(Args, Triple);
+ bool IsNaN2008 = tools::mips::isNaN2008(getDriver(), Args, Triple);
LibDir = "lib" + tools::mips::getMipsABILibSuffix(Args, Triple);
@@ -520,6 +598,11 @@ std::string Linux::getDynamicLinker(const ArgList &Args) const {
}
case llvm::Triple::ve:
return "/opt/nec/ve/lib/ld-linux-ve.so.1";
+ case llvm::Triple::csky: {
+ LibDir = "lib";
+ Loader = "ld.so.1";
+ break;
+ }
}
if (Distro == Distro::Exherbo &&
@@ -551,7 +634,7 @@ void Linux::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
return;
// LOCAL_INCLUDE_DIR
- addSystemInclude(DriverArgs, CC1Args, SysRoot + "/usr/local/include");
+ addSystemInclude(DriverArgs, CC1Args, concat(SysRoot, "/usr/local/include"));
// TOOL_INCLUDE_DIR
AddMultilibIncludeArgs(DriverArgs, CC1Args);
@@ -572,9 +655,10 @@ void Linux::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
// /usr/include.
std::string MultiarchIncludeDir = getMultiarchTriple(D, getTriple(), SysRoot);
if (!MultiarchIncludeDir.empty() &&
- D.getVFS().exists(SysRoot + "/usr/include/" + MultiarchIncludeDir))
- addExternCSystemInclude(DriverArgs, CC1Args,
- SysRoot + "/usr/include/" + MultiarchIncludeDir);
+ D.getVFS().exists(concat(SysRoot, "/usr/include", MultiarchIncludeDir)))
+ addExternCSystemInclude(
+ DriverArgs, CC1Args,
+ concat(SysRoot, "/usr/include", MultiarchIncludeDir));
if (getTriple().getOS() == llvm::Triple::RTEMS)
return;
@@ -582,9 +666,9 @@ void Linux::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
// Add an include of '/include' directly. This isn't provided by default by
// system GCCs, but is often used with cross-compiling GCCs, and harmless to
// add even when Clang is acting as-if it were a system compiler.
- addExternCSystemInclude(DriverArgs, CC1Args, SysRoot + "/include");
+ addExternCSystemInclude(DriverArgs, CC1Args, concat(SysRoot, "/include"));
- addExternCSystemInclude(DriverArgs, CC1Args, SysRoot + "/usr/include");
+ addExternCSystemInclude(DriverArgs, CC1Args, concat(SysRoot, "/usr/include"));
if (!DriverArgs.hasArg(options::OPT_nobuiltininc) && getTriple().isMusl())
addSystemInclude(DriverArgs, CC1Args, ResourceDirInclude);
@@ -633,12 +717,25 @@ void Linux::addLibStdCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
void Linux::AddCudaIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
- CudaInstallation.AddCudaIncludeArgs(DriverArgs, CC1Args);
+ CudaInstallation->AddCudaIncludeArgs(DriverArgs, CC1Args);
}
void Linux::AddHIPIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
- RocmInstallation.AddHIPIncludeArgs(DriverArgs, CC1Args);
+ RocmInstallation->AddHIPIncludeArgs(DriverArgs, CC1Args);
+}
+
+void Linux::AddHIPRuntimeLibArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ CmdArgs.push_back(
+ Args.MakeArgString(StringRef("-L") + RocmInstallation->getLibPath()));
+
+ if (Args.hasFlag(options::OPT_frtlib_add_rpath,
+ options::OPT_fno_rtlib_add_rpath, false))
+ CmdArgs.append(
+ {"-rpath", Args.MakeArgString(RocmInstallation->getLibPath())});
+
+ CmdArgs.push_back("-lamdhip64");
}
void Linux::AddIAMCUIncludeArgs(const ArgList &DriverArgs,
@@ -651,9 +748,9 @@ void Linux::AddIAMCUIncludeArgs(const ArgList &DriverArgs,
}
}
-bool Linux::isPIEDefault() const {
- return (getTriple().isAndroid() && !getTriple().isAndroidVersionLT(16)) ||
- getTriple().isMusl() || getSanitizerArgs().requiresPIE();
+bool Linux::isPIEDefault(const llvm::opt::ArgList &Args) const {
+ return CLANG_DEFAULT_PIE_ON_LINUX || getTriple().isAndroid() ||
+ getTriple().isMusl() || getSanitizerArgs(Args).requiresPIE();
}
bool Linux::IsAArch64OutlineAtomicsDefault(const ArgList &Args) const {
@@ -669,12 +766,8 @@ bool Linux::IsAArch64OutlineAtomicsDefault(const ArgList &Args) const {
return true;
}
-bool Linux::isNoExecStackDefault() const {
- return getTriple().isAndroid();
-}
-
bool Linux::IsMathErrnoDefault() const {
- if (getTriple().isAndroid())
+ if (getTriple().isAndroid() || getTriple().isMusl())
return false;
return Generic_ELF::IsMathErrnoDefault();
}
@@ -692,8 +785,10 @@ SanitizerMask Linux::getSupportedSanitizers() const {
getTriple().getArch() == llvm::Triple::thumb ||
getTriple().getArch() == llvm::Triple::armeb ||
getTriple().getArch() == llvm::Triple::thumbeb;
+ const bool IsLoongArch64 = getTriple().getArch() == llvm::Triple::loongarch64;
const bool IsRISCV64 = getTriple().getArch() == llvm::Triple::riscv64;
const bool IsSystemZ = getTriple().getArch() == llvm::Triple::systemz;
+ const bool IsHexagon = getTriple().getArch() == llvm::Triple::hexagon;
SanitizerMask Res = ToolChain::getSupportedSanitizers();
Res |= SanitizerKind::Address;
Res |= SanitizerKind::PointerCompare;
@@ -704,24 +799,28 @@ SanitizerMask Linux::getSupportedSanitizers() const {
Res |= SanitizerKind::Memory;
Res |= SanitizerKind::Vptr;
Res |= SanitizerKind::SafeStack;
- if (IsX86_64 || IsMIPS64 || IsAArch64)
+ if (IsX86_64 || IsMIPS64 || IsAArch64 || IsLoongArch64)
Res |= SanitizerKind::DataFlow;
if (IsX86_64 || IsMIPS64 || IsAArch64 || IsX86 || IsArmArch || IsPowerPC64 ||
- IsRISCV64 || IsSystemZ)
+ IsRISCV64 || IsSystemZ || IsHexagon || IsLoongArch64)
Res |= SanitizerKind::Leak;
- if (IsX86_64 || IsMIPS64 || IsAArch64 || IsPowerPC64 || IsSystemZ)
+ if (IsX86_64 || IsMIPS64 || IsAArch64 || IsPowerPC64 || IsSystemZ ||
+ IsLoongArch64 || IsRISCV64)
Res |= SanitizerKind::Thread;
- if (IsX86_64)
+ if (IsX86_64 || IsSystemZ)
Res |= SanitizerKind::KernelMemory;
- if (IsX86 || IsX86_64)
- Res |= SanitizerKind::Function;
if (IsX86_64 || IsMIPS64 || IsAArch64 || IsX86 || IsMIPS || IsArmArch ||
- IsPowerPC64)
+ IsPowerPC64 || IsHexagon || IsLoongArch64 || IsRISCV64)
Res |= SanitizerKind::Scudo;
- if (IsX86_64 || IsAArch64) {
+ if (IsX86_64 || IsAArch64 || IsRISCV64) {
Res |= SanitizerKind::HWAddress;
+ }
+ if (IsX86_64 || IsAArch64) {
Res |= SanitizerKind::KernelHWAddress;
}
+ // Work around "Cannot represent a difference across sections".
+ if (getTriple().getArch() == llvm::Triple::ppc64)
+ Res &= ~SanitizerKind::Function;
return Res;
}
@@ -758,3 +857,9 @@ void Linux::addExtraOpts(llvm::opt::ArgStringList &CmdArgs) const {
for (const auto &Opt : ExtraOpts)
CmdArgs.push_back(Opt.c_str());
}
+
+const char *Linux::getDefaultLinker() const {
+ if (getTriple().isAndroid())
+ return "ld.lld";
+ return Generic_ELF::getDefaultLinker();
+}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.h
index 169a37c44072..524391743090 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.h
@@ -37,14 +37,16 @@ public:
llvm::opt::ArgStringList &CC1Args) const override;
void AddHIPIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
+ void AddHIPRuntimeLibArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const override;
void AddIAMCUIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
RuntimeLibType GetDefaultRuntimeLibType() const override;
+ unsigned GetDefaultDwarfVersion() const override;
CXXStdlibType GetDefaultCXXStdlibType() const override;
bool
IsAArch64OutlineAtomicsDefault(const llvm::opt::ArgList &Args) const override;
- bool isPIEDefault() const override;
- bool isNoExecStackDefault() const override;
+ bool isPIEDefault(const llvm::opt::ArgList &Args) const override;
bool IsMathErrnoDefault() const override;
SanitizerMask getSupportedSanitizers() const override;
void addProfileRTLibs(const llvm::opt::ArgList &Args,
@@ -61,6 +63,8 @@ public:
const llvm::opt::ArgList &DriverArgs, const JobAction &JA,
const llvm::fltSemantics *FPType = nullptr) const override;
+ const char *getDefaultLinker() const override;
+
protected:
Tool *buildAssembler() const override;
Tool *buildLinker() const override;
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/MSP430.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/MSP430.cpp
index 96994ba77fac..07e875c64960 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/MSP430.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/MSP430.cpp
@@ -101,7 +101,7 @@ void msp430::getMSP430TargetFeatures(const Driver &D, const ArgList &Args,
Features.push_back("+hwmultf5");
} else {
D.Diag(clang::diag::err_drv_unsupported_option_argument)
- << HWMultArg->getAsString(Args) << HWMult;
+ << HWMultArg->getSpelling() << HWMult;
}
}
@@ -142,7 +142,7 @@ std::string MSP430ToolChain::computeSysRoot() const {
else
llvm::sys::path::append(Dir, getDriver().Dir, "..");
- return std::string(Dir.str());
+ return std::string(Dir);
}
void MSP430ToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
@@ -166,7 +166,7 @@ void MSP430ToolChain::addClangTargetOptions(const ArgList &DriverArgs,
return;
const StringRef MCU = MCUArg->getValue();
- if (MCU.startswith("msp430i")) {
+ if (MCU.starts_with("msp430i")) {
// 'i' should be in lower case as it's defined in TI MSP430-GCC headers
CC1Args.push_back(DriverArgs.MakeArgString(
"-D__MSP430i" + MCU.drop_front(7).upper() + "__"));
@@ -279,8 +279,7 @@ void msp430::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (!Args.hasArg(options::OPT_r, options::OPT_g_Group))
CmdArgs.push_back("--gc-sections");
- Args.AddAllArgs(CmdArgs, {
- options::OPT_e,
+ Args.addAllArgs(CmdArgs, {
options::OPT_n,
options::OPT_s,
options::OPT_t,
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/MSP430.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/MSP430.h
index 9d247ca3a896..a224c6375411 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/MSP430.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/MSP430.h
@@ -37,7 +37,9 @@ public:
Action::OffloadKind) const override;
bool isPICDefault() const override { return false; }
- bool isPIEDefault() const override { return false; }
+ bool isPIEDefault(const llvm::opt::ArgList &Args) const override {
+ return false;
+ }
bool isPICDefaultForced() const override { return true; }
UnwindLibType
@@ -57,7 +59,7 @@ private:
namespace tools {
namespace msp430 {
-class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
+class LLVM_LIBRARY_VISIBILITY Linker final : public Tool {
public:
Linker(const ToolChain &TC) : Tool("MSP430::Linker", "msp430-elf-ld", TC) {}
bool hasIntegratedCPP() const override { return false; }
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.cpp
index 0dc94a4c6c7d..396522225158 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.cpp
@@ -18,17 +18,16 @@
#include "clang/Driver/Options.h"
#include "clang/Driver/SanitizerArgs.h"
#include "llvm/ADT/StringExtras.h"
-#include "llvm/ADT/StringSwitch.h"
#include "llvm/Option/Arg.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
-#include "llvm/Support/Host.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Process.h"
#include "llvm/Support/VirtualFileSystem.h"
+#include "llvm/TargetParser/Host.h"
#include <cstdio>
#ifdef _WIN32
@@ -40,23 +39,6 @@
#include <windows.h>
#endif
-#ifdef _MSC_VER
-// Don't support SetupApi on MinGW.
-#define USE_MSVC_SETUP_API
-
-// Make sure this comes before MSVCSetupApi.h
-#include <comdef.h>
-
-#include "MSVCSetupApi.h"
-#include "llvm/Support/COM.h"
-_COM_SMARTPTR_TYPEDEF(ISetupConfiguration, __uuidof(ISetupConfiguration));
-_COM_SMARTPTR_TYPEDEF(ISetupConfiguration2, __uuidof(ISetupConfiguration2));
-_COM_SMARTPTR_TYPEDEF(ISetupHelper, __uuidof(ISetupHelper));
-_COM_SMARTPTR_TYPEDEF(IEnumSetupInstances, __uuidof(IEnumSetupInstances));
-_COM_SMARTPTR_TYPEDEF(ISetupInstance, __uuidof(ISetupInstance));
-_COM_SMARTPTR_TYPEDEF(ISetupInstance2, __uuidof(ISetupInstance2));
-#endif
-
using namespace clang::driver;
using namespace clang::driver::toolchains;
using namespace clang::driver::tools;
@@ -70,294 +52,6 @@ static bool canExecute(llvm::vfs::FileSystem &VFS, StringRef Path) {
return (Status->getPermissions() & llvm::sys::fs::perms::all_exe) != 0;
}
-// Defined below.
-// Forward declare this so there aren't too many things above the constructor.
-static bool getSystemRegistryString(const char *keyPath, const char *valueName,
- std::string &value, std::string *phValue);
-
-static std::string getHighestNumericTupleInDirectory(llvm::vfs::FileSystem &VFS,
- StringRef Directory) {
- std::string Highest;
- llvm::VersionTuple HighestTuple;
-
- std::error_code EC;
- for (llvm::vfs::directory_iterator DirIt = VFS.dir_begin(Directory, EC),
- DirEnd;
- !EC && DirIt != DirEnd; DirIt.increment(EC)) {
- auto Status = VFS.status(DirIt->path());
- if (!Status || !Status->isDirectory())
- continue;
- StringRef CandidateName = llvm::sys::path::filename(DirIt->path());
- llvm::VersionTuple Tuple;
- if (Tuple.tryParse(CandidateName)) // tryParse() returns true on error.
- continue;
- if (Tuple > HighestTuple) {
- HighestTuple = Tuple;
- Highest = CandidateName.str();
- }
- }
-
- return Highest;
-}
-
-// Check command line arguments to try and find a toolchain.
-static bool
-findVCToolChainViaCommandLine(llvm::vfs::FileSystem &VFS, const ArgList &Args,
- std::string &Path,
- MSVCToolChain::ToolsetLayout &VSLayout) {
- // Don't validate the input; trust the value supplied by the user.
- // The primary motivation is to prevent unnecessary file and registry access.
- if (Arg *A = Args.getLastArg(options::OPT__SLASH_vctoolsdir,
- options::OPT__SLASH_winsysroot)) {
- if (A->getOption().getID() == options::OPT__SLASH_winsysroot) {
- llvm::SmallString<128> ToolsPath(A->getValue());
- llvm::sys::path::append(ToolsPath, "VC", "Tools", "MSVC");
- std::string VCToolsVersion;
- if (Arg *A = Args.getLastArg(options::OPT__SLASH_vctoolsversion))
- VCToolsVersion = A->getValue();
- else
- VCToolsVersion = getHighestNumericTupleInDirectory(VFS, ToolsPath);
- llvm::sys::path::append(ToolsPath, VCToolsVersion);
- Path = std::string(ToolsPath.str());
- } else {
- Path = A->getValue();
- }
- VSLayout = MSVCToolChain::ToolsetLayout::VS2017OrNewer;
- return true;
- }
- return false;
-}
-
-// Check various environment variables to try and find a toolchain.
-static bool
-findVCToolChainViaEnvironment(llvm::vfs::FileSystem &VFS, std::string &Path,
- MSVCToolChain::ToolsetLayout &VSLayout) {
- // These variables are typically set by vcvarsall.bat
- // when launching a developer command prompt.
- if (llvm::Optional<std::string> VCToolsInstallDir =
- llvm::sys::Process::GetEnv("VCToolsInstallDir")) {
- // This is only set by newer Visual Studios, and it leads straight to
- // the toolchain directory.
- Path = std::move(*VCToolsInstallDir);
- VSLayout = MSVCToolChain::ToolsetLayout::VS2017OrNewer;
- return true;
- }
- if (llvm::Optional<std::string> VCInstallDir =
- llvm::sys::Process::GetEnv("VCINSTALLDIR")) {
- // If the previous variable isn't set but this one is, then we've found
- // an older Visual Studio. This variable is set by newer Visual Studios too,
- // so this check has to appear second.
- // In older Visual Studios, the VC directory is the toolchain.
- Path = std::move(*VCInstallDir);
- VSLayout = MSVCToolChain::ToolsetLayout::OlderVS;
- return true;
- }
-
- // We couldn't find any VC environment variables. Let's walk through PATH and
- // see if it leads us to a VC toolchain bin directory. If it does, pick the
- // first one that we find.
- if (llvm::Optional<std::string> PathEnv =
- llvm::sys::Process::GetEnv("PATH")) {
- llvm::SmallVector<llvm::StringRef, 8> PathEntries;
- llvm::StringRef(*PathEnv).split(PathEntries, llvm::sys::EnvPathSeparator);
- for (llvm::StringRef PathEntry : PathEntries) {
- if (PathEntry.empty())
- continue;
-
- llvm::SmallString<256> ExeTestPath;
-
- // If cl.exe doesn't exist, then this definitely isn't a VC toolchain.
- ExeTestPath = PathEntry;
- llvm::sys::path::append(ExeTestPath, "cl.exe");
- if (!VFS.exists(ExeTestPath))
- continue;
-
- // cl.exe existing isn't a conclusive test for a VC toolchain; clang also
- // has a cl.exe. So let's check for link.exe too.
- ExeTestPath = PathEntry;
- llvm::sys::path::append(ExeTestPath, "link.exe");
- if (!VFS.exists(ExeTestPath))
- continue;
-
- // whatever/VC/bin --> old toolchain, VC dir is toolchain dir.
- llvm::StringRef TestPath = PathEntry;
- bool IsBin =
- llvm::sys::path::filename(TestPath).equals_insensitive("bin");
- if (!IsBin) {
- // Strip any architecture subdir like "amd64".
- TestPath = llvm::sys::path::parent_path(TestPath);
- IsBin = llvm::sys::path::filename(TestPath).equals_insensitive("bin");
- }
- if (IsBin) {
- llvm::StringRef ParentPath = llvm::sys::path::parent_path(TestPath);
- llvm::StringRef ParentFilename = llvm::sys::path::filename(ParentPath);
- if (ParentFilename.equals_insensitive("VC")) {
- Path = std::string(ParentPath);
- VSLayout = MSVCToolChain::ToolsetLayout::OlderVS;
- return true;
- }
- if (ParentFilename.equals_insensitive("x86ret") ||
- ParentFilename.equals_insensitive("x86chk") ||
- ParentFilename.equals_insensitive("amd64ret") ||
- ParentFilename.equals_insensitive("amd64chk")) {
- Path = std::string(ParentPath);
- VSLayout = MSVCToolChain::ToolsetLayout::DevDivInternal;
- return true;
- }
-
- } else {
- // This could be a new (>=VS2017) toolchain. If it is, we should find
- // path components with these prefixes when walking backwards through
- // the path.
- // Note: empty strings match anything.
- llvm::StringRef ExpectedPrefixes[] = {"", "Host", "bin", "",
- "MSVC", "Tools", "VC"};
-
- auto It = llvm::sys::path::rbegin(PathEntry);
- auto End = llvm::sys::path::rend(PathEntry);
- for (llvm::StringRef Prefix : ExpectedPrefixes) {
- if (It == End)
- goto NotAToolChain;
- if (!It->startswith_insensitive(Prefix))
- goto NotAToolChain;
- ++It;
- }
-
- // We've found a new toolchain!
- // Back up 3 times (/bin/Host/arch) to get the root path.
- llvm::StringRef ToolChainPath(PathEntry);
- for (int i = 0; i < 3; ++i)
- ToolChainPath = llvm::sys::path::parent_path(ToolChainPath);
-
- Path = std::string(ToolChainPath);
- VSLayout = MSVCToolChain::ToolsetLayout::VS2017OrNewer;
- return true;
- }
-
- NotAToolChain:
- continue;
- }
- }
- return false;
-}
-
-// Query the Setup Config server for installs, then pick the newest version
-// and find its default VC toolchain.
-// This is the preferred way to discover new Visual Studios, as they're no
-// longer listed in the registry.
-static bool
-findVCToolChainViaSetupConfig(llvm::vfs::FileSystem &VFS, std::string &Path,
- MSVCToolChain::ToolsetLayout &VSLayout) {
-#if !defined(USE_MSVC_SETUP_API)
- return false;
-#else
- // FIXME: This really should be done once in the top-level program's main
- // function, as it may have already been initialized with a different
- // threading model otherwise.
- llvm::sys::InitializeCOMRAII COM(llvm::sys::COMThreadingMode::SingleThreaded);
- HRESULT HR;
-
- // _com_ptr_t will throw a _com_error if a COM calls fail.
- // The LLVM coding standards forbid exception handling, so we'll have to
- // stop them from being thrown in the first place.
- // The destructor will put the regular error handler back when we leave
- // this scope.
- struct SuppressCOMErrorsRAII {
- static void __stdcall handler(HRESULT hr, IErrorInfo *perrinfo) {}
-
- SuppressCOMErrorsRAII() { _set_com_error_handler(handler); }
-
- ~SuppressCOMErrorsRAII() { _set_com_error_handler(_com_raise_error); }
-
- } COMErrorSuppressor;
-
- ISetupConfigurationPtr Query;
- HR = Query.CreateInstance(__uuidof(SetupConfiguration));
- if (FAILED(HR))
- return false;
-
- IEnumSetupInstancesPtr EnumInstances;
- HR = ISetupConfiguration2Ptr(Query)->EnumAllInstances(&EnumInstances);
- if (FAILED(HR))
- return false;
-
- ISetupInstancePtr Instance;
- HR = EnumInstances->Next(1, &Instance, nullptr);
- if (HR != S_OK)
- return false;
-
- ISetupInstancePtr NewestInstance;
- Optional<uint64_t> NewestVersionNum;
- do {
- bstr_t VersionString;
- uint64_t VersionNum;
- HR = Instance->GetInstallationVersion(VersionString.GetAddress());
- if (FAILED(HR))
- continue;
- HR = ISetupHelperPtr(Query)->ParseVersion(VersionString, &VersionNum);
- if (FAILED(HR))
- continue;
- if (!NewestVersionNum || (VersionNum > NewestVersionNum)) {
- NewestInstance = Instance;
- NewestVersionNum = VersionNum;
- }
- } while ((HR = EnumInstances->Next(1, &Instance, nullptr)) == S_OK);
-
- if (!NewestInstance)
- return false;
-
- bstr_t VCPathWide;
- HR = NewestInstance->ResolvePath(L"VC", VCPathWide.GetAddress());
- if (FAILED(HR))
- return false;
-
- std::string VCRootPath;
- llvm::convertWideToUTF8(std::wstring(VCPathWide), VCRootPath);
-
- llvm::SmallString<256> ToolsVersionFilePath(VCRootPath);
- llvm::sys::path::append(ToolsVersionFilePath, "Auxiliary", "Build",
- "Microsoft.VCToolsVersion.default.txt");
-
- auto ToolsVersionFile = llvm::MemoryBuffer::getFile(ToolsVersionFilePath);
- if (!ToolsVersionFile)
- return false;
-
- llvm::SmallString<256> ToolchainPath(VCRootPath);
- llvm::sys::path::append(ToolchainPath, "Tools", "MSVC",
- ToolsVersionFile->get()->getBuffer().rtrim());
- auto Status = VFS.status(ToolchainPath);
- if (!Status || !Status->isDirectory())
- return false;
-
- Path = std::string(ToolchainPath.str());
- VSLayout = MSVCToolChain::ToolsetLayout::VS2017OrNewer;
- return true;
-#endif
-}
-
-// Look in the registry for Visual Studio installs, and use that to get
-// a toolchain path. VS2017 and newer don't get added to the registry.
-// So if we find something here, we know that it's an older version.
-static bool findVCToolChainViaRegistry(std::string &Path,
- MSVCToolChain::ToolsetLayout &VSLayout) {
- std::string VSInstallPath;
- if (getSystemRegistryString(R"(SOFTWARE\Microsoft\VisualStudio\$VERSION)",
- "InstallDir", VSInstallPath, nullptr) ||
- getSystemRegistryString(R"(SOFTWARE\Microsoft\VCExpress\$VERSION)",
- "InstallDir", VSInstallPath, nullptr)) {
- if (!VSInstallPath.empty()) {
- llvm::SmallString<256> VCPath(llvm::StringRef(
- VSInstallPath.c_str(), VSInstallPath.find(R"(\Common7\IDE)")));
- llvm::sys::path::append(VCPath, "VC");
-
- Path = std::string(VCPath.str());
- VSLayout = MSVCToolChain::ToolsetLayout::OlderVS;
- return true;
- }
- }
- return false;
-}
-
// Try to find Exe from a Visual Studio distribution. This first tries to find
// an installed copy of Visual Studio and, failing that, looks in the PATH,
// making sure that whatever executable that's found is not a same-named exe
@@ -365,8 +59,8 @@ static bool findVCToolChainViaRegistry(std::string &Path,
static std::string FindVisualStudioExecutable(const ToolChain &TC,
const char *Exe) {
const auto &MSVC = static_cast<const toolchains::MSVCToolChain &>(TC);
- SmallString<128> FilePath(MSVC.getSubDirectoryPath(
- toolchains::MSVCToolChain::SubDirectoryType::Bin));
+ SmallString<128> FilePath(
+ MSVC.getSubDirectoryPath(llvm::SubDirectoryType::Bin));
llvm::sys::path::append(FilePath, Exe);
return std::string(canExecute(TC.getVFS(), FilePath) ? FilePath.str() : Exe);
}
@@ -386,7 +80,7 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.MakeArgString(std::string("-out:") + Output.getFilename()));
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles) &&
- !C.getDriver().IsCLMode()) {
+ !C.getDriver().IsCLMode() && !C.getDriver().IsFlangMode()) {
CmdArgs.push_back("-defaultlib:libcmt");
CmdArgs.push_back("-defaultlib:oldnames");
}
@@ -396,17 +90,29 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
// the environment variable is set however, assume the user knows what
// they're doing. If the user passes /vctoolsdir or /winsdkdir, trust that
// over env vars.
+ if (const Arg *A = Args.getLastArg(options::OPT__SLASH_diasdkdir,
+ options::OPT__SLASH_winsysroot)) {
+ // cl.exe doesn't find the DIA SDK automatically, so this too requires
+ // explicit flags and doesn't automatically look in "DIA SDK" relative
+ // to the path we found for VCToolChainPath.
+ llvm::SmallString<128> DIAPath(A->getValue());
+ if (A->getOption().getID() == options::OPT__SLASH_winsysroot)
+ llvm::sys::path::append(DIAPath, "DIA SDK");
+
+ // The DIA SDK always uses the legacy vc arch, even in new MSVC versions.
+ llvm::sys::path::append(DIAPath, "lib",
+ llvm::archToLegacyVCArch(TC.getArch()));
+ CmdArgs.push_back(Args.MakeArgString(Twine("-libpath:") + DIAPath));
+ }
if (!llvm::sys::Process::GetEnv("LIB") ||
Args.getLastArg(options::OPT__SLASH_vctoolsdir,
options::OPT__SLASH_winsysroot)) {
CmdArgs.push_back(Args.MakeArgString(
Twine("-libpath:") +
- TC.getSubDirectoryPath(
- toolchains::MSVCToolChain::SubDirectoryType::Lib)));
+ TC.getSubDirectoryPath(llvm::SubDirectoryType::Lib)));
CmdArgs.push_back(Args.MakeArgString(
Twine("-libpath:") +
- TC.getSubDirectoryPath(toolchains::MSVCToolChain::SubDirectoryType::Lib,
- "atlmfc")));
+ TC.getSubDirectoryPath(llvm::SubDirectoryType::Lib, "atlmfc")));
}
if (!llvm::sys::Process::GetEnv("LIB") ||
Args.getLastArg(options::OPT__SLASH_winsdkdir,
@@ -423,6 +129,16 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.MakeArgString(std::string("-libpath:") + WindowsSdkLibPath));
}
+ if (C.getDriver().IsFlangMode()) {
+ addFortranRuntimeLibraryPath(TC, Args, CmdArgs);
+ addFortranRuntimeLibs(TC, Args, CmdArgs);
+
+ // Inform the MSVC linker that we're generating a console application, i.e.
+ // one with `main` as the "user-defined" entry point. The `main` function is
+ // defined in flang's runtime libraries.
+ CmdArgs.push_back("/subsystem:console");
+ }
+
// Add the compiler-rt library directories to libpath if they exist to help
// the linker find the various sanitizer, builtin, and profiling runtimes.
for (const auto &LibPath : TC.getLibraryPaths()) {
@@ -442,6 +158,11 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasArg(options::OPT_g_Group, options::OPT__SLASH_Z7))
CmdArgs.push_back("-debug");
+ // If we specify /hotpatch, let the linker add padding in front of each
+ // function, like MSVC does.
+ if (Args.hasArg(options::OPT_fms_hotpatch, options::OPT__SLASH_hotpatch))
+ CmdArgs.push_back("-functionpadmin");
+
// Pass on /Brepro if it was passed to the compiler.
// Note that /Brepro maps to -mno-incremental-linker-compatible.
bool DefaultIncrementalLinkerCompatible =
@@ -461,7 +182,7 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString(std::string("-implib:") + ImplibName));
}
- if (TC.getSanitizerArgs().needsFuzzer()) {
+ if (TC.getSanitizerArgs(Args).needsFuzzer()) {
if (!Args.hasArg(options::OPT_shared))
CmdArgs.push_back(
Args.MakeArgString(std::string("-wholearchive:") +
@@ -472,10 +193,10 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString("-incremental:no"));
}
- if (TC.getSanitizerArgs().needsAsanRt()) {
+ if (TC.getSanitizerArgs(Args).needsAsanRt()) {
CmdArgs.push_back(Args.MakeArgString("-debug"));
CmdArgs.push_back(Args.MakeArgString("-incremental:no"));
- if (TC.getSanitizerArgs().needsSharedRt() ||
+ if (TC.getSanitizerArgs(Args).needsSharedRt() ||
Args.hasArg(options::OPT__SLASH_MD, options::OPT__SLASH_MDd)) {
for (const auto &Lib : {"asan_dynamic", "asan_dynamic_runtime_thunk"})
CmdArgs.push_back(TC.getCompilerRTArgString(Args, Lib));
@@ -506,7 +227,7 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgValues(CmdArgs, options::OPT__SLASH_link);
// Control Flow Guard checks
- if (Arg *A = Args.getLastArg(options::OPT__SLASH_guard)) {
+ for (const Arg *A : Args.filtered(options::OPT__SLASH_guard)) {
StringRef GuardArgs = A->getValue();
if (GuardArgs.equals_insensitive("cf") ||
GuardArgs.equals_insensitive("cf,nochecks")) {
@@ -548,6 +269,26 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
AddRunTimeLibs(TC, TC.getDriver(), CmdArgs, Args);
}
+ StringRef Linker =
+ Args.getLastArgValue(options::OPT_fuse_ld_EQ, CLANG_DEFAULT_LINKER);
+ if (Linker.empty())
+ Linker = "link";
+ // We need to translate 'lld' into 'lld-link'.
+ else if (Linker.equals_insensitive("lld"))
+ Linker = "lld-link";
+
+ if (Linker == "lld-link") {
+ for (Arg *A : Args.filtered(options::OPT_vfsoverlay))
+ CmdArgs.push_back(
+ Args.MakeArgString(std::string("/vfsoverlay:") + A->getValue()));
+
+ if (C.getDriver().isUsingLTO() &&
+ Args.hasFlag(options::OPT_gsplit_dwarf, options::OPT_gno_split_dwarf,
+ false))
+ CmdArgs.push_back(Args.MakeArgString(Twine("/dwodir:") +
+ Output.getFilename() + "_dwo"));
+ }
+
// Add filenames, libraries, and other linker inputs.
for (const auto &Input : Inputs) {
if (Input.isFilename()) {
@@ -561,7 +302,7 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (A.getOption().matches(options::OPT_l)) {
StringRef Lib = A.getValue();
const char *LinkLibArg;
- if (Lib.endswith(".lib"))
+ if (Lib.ends_with(".lib"))
LinkLibArg = Args.MakeArgString(Lib);
else
LinkLibArg = Args.MakeArgString(Lib + ".lib");
@@ -574,21 +315,15 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
A.renderAsInput(Args, CmdArgs);
}
+ addHIPRuntimeLibArgs(TC, C, Args, CmdArgs);
+
TC.addProfileRTLibs(Args, CmdArgs);
std::vector<const char *> Environment;
- // We need to special case some linker paths. In the case of lld, we need to
- // translate 'lld' into 'lld-link', and in the case of the regular msvc
+ // We need to special case some linker paths. In the case of the regular msvc
// linker, we need to use a special search algorithm.
llvm::SmallString<128> linkPath;
- StringRef Linker
- = Args.getLastArgValue(options::OPT_fuse_ld_EQ, CLANG_DEFAULT_LINKER);
- if (Linker.empty())
- Linker = "link";
- if (Linker.equals_insensitive("lld"))
- Linker = "lld-link";
-
if (Linker.equals_insensitive("link")) {
// If we're using the MSVC linker, it's not sufficient to just use link
// from the program PATH, because other environments like GnuWin32 install
@@ -608,13 +343,18 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
}
+ // Clang handles passing the proper asan libs to the linker, which goes
+ // against link.exe's /INFERASANLIBS which automatically finds asan libs.
+ if (TC.getSanitizerArgs(Args).needsAsanRt())
+ CmdArgs.push_back("/INFERASANLIBS:NO");
+
#ifdef _WIN32
// When cross-compiling with VS2017 or newer, link.exe expects to have
// its containing bin directory at the top of PATH, followed by the
// native target bin directory.
// e.g. when compiling for x86 on an x64 host, PATH should start with:
// /bin/Hostx64/x86;/bin/Hostx64/x64
- // This doesn't attempt to handle ToolsetLayout::DevDivInternal.
+ // This doesn't attempt to handle llvm::ToolsetLayout::DevDivInternal.
if (TC.getIsVS2017OrNewer() &&
llvm::Triple(llvm::sys::getProcessTriple()).getArch() != TC.getArch()) {
auto HostArch = llvm::Triple(llvm::sys::getProcessTriple()).getArch();
@@ -648,14 +388,13 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
// find it.
for (const char *Cursor = EnvBlock.data(); *Cursor != '\0';) {
llvm::StringRef EnvVar(Cursor);
- if (EnvVar.startswith_insensitive("path=")) {
- using SubDirectoryType = toolchains::MSVCToolChain::SubDirectoryType;
+ if (EnvVar.starts_with_insensitive("path=")) {
constexpr size_t PrefixLen = 5; // strlen("path=")
Environment.push_back(Args.MakeArgString(
EnvVar.substr(0, PrefixLen) +
- TC.getSubDirectoryPath(SubDirectoryType::Bin) +
+ TC.getSubDirectoryPath(llvm::SubDirectoryType::Bin) +
llvm::Twine(llvm::sys::EnvPathSeparator) +
- TC.getSubDirectoryPath(SubDirectoryType::Bin, "", HostArch) +
+ TC.getSubDirectoryPath(llvm::SubDirectoryType::Bin, HostArch) +
(EnvVar.size() > PrefixLen
? llvm::Twine(llvm::sys::EnvPathSeparator) +
EnvVar.substr(PrefixLen)
@@ -688,14 +427,29 @@ MSVCToolChain::MSVCToolChain(const Driver &D, const llvm::Triple &Triple,
if (getDriver().getInstalledDir() != getDriver().Dir)
getProgramPaths().push_back(getDriver().Dir);
+ std::optional<llvm::StringRef> VCToolsDir, VCToolsVersion;
+ if (Arg *A = Args.getLastArg(options::OPT__SLASH_vctoolsdir))
+ VCToolsDir = A->getValue();
+ if (Arg *A = Args.getLastArg(options::OPT__SLASH_vctoolsversion))
+ VCToolsVersion = A->getValue();
+ if (Arg *A = Args.getLastArg(options::OPT__SLASH_winsdkdir))
+ WinSdkDir = A->getValue();
+ if (Arg *A = Args.getLastArg(options::OPT__SLASH_winsdkversion))
+ WinSdkVersion = A->getValue();
+ if (Arg *A = Args.getLastArg(options::OPT__SLASH_winsysroot))
+ WinSysRoot = A->getValue();
+
// Check the command line first, that's the user explicitly telling us what to
// use. Check the environment next, in case we're being invoked from a VS
// command prompt. Failing that, just try to find the newest Visual Studio
// version we can and use its default VC toolchain.
- findVCToolChainViaCommandLine(getVFS(), Args, VCToolChainPath, VSLayout) ||
- findVCToolChainViaEnvironment(getVFS(), VCToolChainPath, VSLayout) ||
- findVCToolChainViaSetupConfig(getVFS(), VCToolChainPath, VSLayout) ||
- findVCToolChainViaRegistry(VCToolChainPath, VSLayout);
+ llvm::findVCToolChainViaCommandLine(getVFS(), VCToolsDir, VCToolsVersion,
+ WinSysRoot, VCToolChainPath, VSLayout) ||
+ llvm::findVCToolChainViaEnvironment(getVFS(), VCToolChainPath,
+ VSLayout) ||
+ llvm::findVCToolChainViaSetupConfig(getVFS(), VCToolsVersion,
+ VCToolChainPath, VSLayout) ||
+ llvm::findVCToolChainViaRegistry(VCToolChainPath, VSLayout);
}
Tool *MSVCToolChain::buildLinker() const {
@@ -709,453 +463,104 @@ Tool *MSVCToolChain::buildAssembler() const {
return nullptr;
}
-bool MSVCToolChain::IsIntegratedAssemblerDefault() const {
- return true;
-}
-
-bool MSVCToolChain::IsUnwindTablesDefault(const ArgList &Args) const {
+ToolChain::UnwindTableLevel
+MSVCToolChain::getDefaultUnwindTableLevel(const ArgList &Args) const {
// Don't emit unwind tables by default for MachO targets.
if (getTriple().isOSBinFormatMachO())
- return false;
+ return UnwindTableLevel::None;
// All non-x86_32 Windows targets require unwind tables. However, LLVM
// doesn't know how to generate them for all targets, so only enable
// the ones that are actually implemented.
- return getArch() == llvm::Triple::x86_64 ||
- getArch() == llvm::Triple::aarch64;
+ if (getArch() == llvm::Triple::x86_64 || getArch() == llvm::Triple::arm ||
+ getArch() == llvm::Triple::thumb || getArch() == llvm::Triple::aarch64)
+ return UnwindTableLevel::Asynchronous;
+
+ return UnwindTableLevel::None;
}
bool MSVCToolChain::isPICDefault() const {
- return getArch() == llvm::Triple::x86_64;
+ return getArch() == llvm::Triple::x86_64 ||
+ getArch() == llvm::Triple::aarch64;
}
-bool MSVCToolChain::isPIEDefault() const {
+bool MSVCToolChain::isPIEDefault(const llvm::opt::ArgList &Args) const {
return false;
}
bool MSVCToolChain::isPICDefaultForced() const {
- return getArch() == llvm::Triple::x86_64;
+ return getArch() == llvm::Triple::x86_64 ||
+ getArch() == llvm::Triple::aarch64;
}
void MSVCToolChain::AddCudaIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
- CudaInstallation.AddCudaIncludeArgs(DriverArgs, CC1Args);
+ CudaInstallation->AddCudaIncludeArgs(DriverArgs, CC1Args);
}
void MSVCToolChain::AddHIPIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
- RocmInstallation.AddHIPIncludeArgs(DriverArgs, CC1Args);
-}
-
-void MSVCToolChain::printVerboseInfo(raw_ostream &OS) const {
- CudaInstallation.print(OS);
- RocmInstallation.print(OS);
+ RocmInstallation->AddHIPIncludeArgs(DriverArgs, CC1Args);
}
-// Windows SDKs and VC Toolchains group their contents into subdirectories based
-// on the target architecture. This function converts an llvm::Triple::ArchType
-// to the corresponding subdirectory name.
-static const char *llvmArchToWindowsSDKArch(llvm::Triple::ArchType Arch) {
- using ArchType = llvm::Triple::ArchType;
- switch (Arch) {
- case ArchType::x86:
- return "x86";
- case ArchType::x86_64:
- return "x64";
- case ArchType::arm:
- return "arm";
- case ArchType::aarch64:
- return "arm64";
- default:
- return "";
- }
+void MSVCToolChain::AddHIPRuntimeLibArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ CmdArgs.append({Args.MakeArgString(StringRef("-libpath:") +
+ RocmInstallation->getLibPath()),
+ "amdhip64.lib"});
}
-// Similar to the above function, but for Visual Studios before VS2017.
-static const char *llvmArchToLegacyVCArch(llvm::Triple::ArchType Arch) {
- using ArchType = llvm::Triple::ArchType;
- switch (Arch) {
- case ArchType::x86:
- // x86 is default in legacy VC toolchains.
- // e.g. x86 libs are directly in /lib as opposed to /lib/x86.
- return "";
- case ArchType::x86_64:
- return "amd64";
- case ArchType::arm:
- return "arm";
- case ArchType::aarch64:
- return "arm64";
- default:
- return "";
- }
+void MSVCToolChain::printVerboseInfo(raw_ostream &OS) const {
+ CudaInstallation->print(OS);
+ RocmInstallation->print(OS);
}
-// Similar to the above function, but for DevDiv internal builds.
-static const char *llvmArchToDevDivInternalArch(llvm::Triple::ArchType Arch) {
- using ArchType = llvm::Triple::ArchType;
- switch (Arch) {
- case ArchType::x86:
- return "i386";
- case ArchType::x86_64:
- return "amd64";
- case ArchType::arm:
- return "arm";
- case ArchType::aarch64:
- return "arm64";
- default:
- return "";
- }
+std::string
+MSVCToolChain::getSubDirectoryPath(llvm::SubDirectoryType Type,
+ llvm::StringRef SubdirParent) const {
+ return llvm::getSubDirectoryPath(Type, VSLayout, VCToolChainPath, getArch(),
+ SubdirParent);
}
-// Get the path to a specific subdirectory in the current toolchain for
-// a given target architecture.
-// VS2017 changed the VC toolchain layout, so this should be used instead
-// of hardcoding paths.
std::string
-MSVCToolChain::getSubDirectoryPath(SubDirectoryType Type,
- llvm::StringRef SubdirParent,
+MSVCToolChain::getSubDirectoryPath(llvm::SubDirectoryType Type,
llvm::Triple::ArchType TargetArch) const {
- const char *SubdirName;
- const char *IncludeName;
- switch (VSLayout) {
- case ToolsetLayout::OlderVS:
- SubdirName = llvmArchToLegacyVCArch(TargetArch);
- IncludeName = "include";
- break;
- case ToolsetLayout::VS2017OrNewer:
- SubdirName = llvmArchToWindowsSDKArch(TargetArch);
- IncludeName = "include";
- break;
- case ToolsetLayout::DevDivInternal:
- SubdirName = llvmArchToDevDivInternalArch(TargetArch);
- IncludeName = "inc";
- break;
- }
-
- llvm::SmallString<256> Path(VCToolChainPath);
- if (!SubdirParent.empty())
- llvm::sys::path::append(Path, SubdirParent);
-
- switch (Type) {
- case SubDirectoryType::Bin:
- if (VSLayout == ToolsetLayout::VS2017OrNewer) {
- const bool HostIsX64 =
- llvm::Triple(llvm::sys::getProcessTriple()).isArch64Bit();
- const char *const HostName = HostIsX64 ? "Hostx64" : "Hostx86";
- llvm::sys::path::append(Path, "bin", HostName, SubdirName);
- } else { // OlderVS or DevDivInternal
- llvm::sys::path::append(Path, "bin", SubdirName);
- }
- break;
- case SubDirectoryType::Include:
- llvm::sys::path::append(Path, IncludeName);
- break;
- case SubDirectoryType::Lib:
- llvm::sys::path::append(Path, "lib", SubdirName);
- break;
- }
- return std::string(Path.str());
-}
-
-#ifdef _WIN32
-static bool readFullStringValue(HKEY hkey, const char *valueName,
- std::string &value) {
- std::wstring WideValueName;
- if (!llvm::ConvertUTF8toWide(valueName, WideValueName))
- return false;
-
- DWORD result = 0;
- DWORD valueSize = 0;
- DWORD type = 0;
- // First just query for the required size.
- result = RegQueryValueExW(hkey, WideValueName.c_str(), NULL, &type, NULL,
- &valueSize);
- if (result != ERROR_SUCCESS || type != REG_SZ || !valueSize)
- return false;
- std::vector<BYTE> buffer(valueSize);
- result = RegQueryValueExW(hkey, WideValueName.c_str(), NULL, NULL, &buffer[0],
- &valueSize);
- if (result == ERROR_SUCCESS) {
- std::wstring WideValue(reinterpret_cast<const wchar_t *>(buffer.data()),
- valueSize / sizeof(wchar_t));
- if (valueSize && WideValue.back() == L'\0') {
- WideValue.pop_back();
- }
- // The destination buffer must be empty as an invariant of the conversion
- // function; but this function is sometimes called in a loop that passes in
- // the same buffer, however. Simply clear it out so we can overwrite it.
- value.clear();
- return llvm::convertWideToUTF8(WideValue, value);
- }
- return false;
-}
-#endif
-
-/// Read registry string.
-/// This also supports a means to look for high-versioned keys by use
-/// of a $VERSION placeholder in the key path.
-/// $VERSION in the key path is a placeholder for the version number,
-/// causing the highest value path to be searched for and used.
-/// I.e. "SOFTWARE\\Microsoft\\VisualStudio\\$VERSION".
-/// There can be additional characters in the component. Only the numeric
-/// characters are compared. This function only searches HKLM.
-static bool getSystemRegistryString(const char *keyPath, const char *valueName,
- std::string &value, std::string *phValue) {
-#ifndef _WIN32
- return false;
-#else
- HKEY hRootKey = HKEY_LOCAL_MACHINE;
- HKEY hKey = NULL;
- long lResult;
- bool returnValue = false;
-
- const char *placeHolder = strstr(keyPath, "$VERSION");
- std::string bestName;
- // If we have a $VERSION placeholder, do the highest-version search.
- if (placeHolder) {
- const char *keyEnd = placeHolder - 1;
- const char *nextKey = placeHolder;
- // Find end of previous key.
- while ((keyEnd > keyPath) && (*keyEnd != '\\'))
- keyEnd--;
- // Find end of key containing $VERSION.
- while (*nextKey && (*nextKey != '\\'))
- nextKey++;
- size_t partialKeyLength = keyEnd - keyPath;
- char partialKey[256];
- if (partialKeyLength >= sizeof(partialKey))
- partialKeyLength = sizeof(partialKey) - 1;
- strncpy(partialKey, keyPath, partialKeyLength);
- partialKey[partialKeyLength] = '\0';
- HKEY hTopKey = NULL;
- lResult = RegOpenKeyExA(hRootKey, partialKey, 0, KEY_READ | KEY_WOW64_32KEY,
- &hTopKey);
- if (lResult == ERROR_SUCCESS) {
- char keyName[256];
- double bestValue = 0.0;
- DWORD index, size = sizeof(keyName) - 1;
- for (index = 0; RegEnumKeyExA(hTopKey, index, keyName, &size, NULL, NULL,
- NULL, NULL) == ERROR_SUCCESS;
- index++) {
- const char *sp = keyName;
- while (*sp && !isDigit(*sp))
- sp++;
- if (!*sp)
- continue;
- const char *ep = sp + 1;
- while (*ep && (isDigit(*ep) || (*ep == '.')))
- ep++;
- char numBuf[32];
- strncpy(numBuf, sp, sizeof(numBuf) - 1);
- numBuf[sizeof(numBuf) - 1] = '\0';
- double dvalue = strtod(numBuf, NULL);
- if (dvalue > bestValue) {
- // Test that InstallDir is indeed there before keeping this index.
- // Open the chosen key path remainder.
- bestName = keyName;
- // Append rest of key.
- bestName.append(nextKey);
- lResult = RegOpenKeyExA(hTopKey, bestName.c_str(), 0,
- KEY_READ | KEY_WOW64_32KEY, &hKey);
- if (lResult == ERROR_SUCCESS) {
- if (readFullStringValue(hKey, valueName, value)) {
- bestValue = dvalue;
- if (phValue)
- *phValue = bestName;
- returnValue = true;
- }
- RegCloseKey(hKey);
- }
- }
- size = sizeof(keyName) - 1;
- }
- RegCloseKey(hTopKey);
- }
- } else {
- lResult =
- RegOpenKeyExA(hRootKey, keyPath, 0, KEY_READ | KEY_WOW64_32KEY, &hKey);
- if (lResult == ERROR_SUCCESS) {
- if (readFullStringValue(hKey, valueName, value))
- returnValue = true;
- if (phValue)
- phValue->clear();
- RegCloseKey(hKey);
- }
- }
- return returnValue;
-#endif // _WIN32
+ return llvm::getSubDirectoryPath(Type, VSLayout, VCToolChainPath, TargetArch,
+ "");
}
// Find the most recent version of Universal CRT or Windows 10 SDK.
// vcvarsqueryregistry.bat from Visual Studio 2015 sorts entries in the include
// directory by name and uses the last one of the list.
// So we compare entry names lexicographically to find the greatest one.
-static bool getWindows10SDKVersionFromPath(llvm::vfs::FileSystem &VFS,
- const std::string &SDKPath,
- std::string &SDKVersion) {
- llvm::SmallString<128> IncludePath(SDKPath);
- llvm::sys::path::append(IncludePath, "Include");
- SDKVersion = getHighestNumericTupleInDirectory(VFS, IncludePath);
- return !SDKVersion.empty();
-}
-
-static bool getWindowsSDKDirViaCommandLine(llvm::vfs::FileSystem &VFS,
- const ArgList &Args,
- std::string &Path, int &Major,
- std::string &Version) {
- if (Arg *A = Args.getLastArg(options::OPT__SLASH_winsdkdir,
- options::OPT__SLASH_winsysroot)) {
- // Don't validate the input; trust the value supplied by the user.
- // The motivation is to prevent unnecessary file and registry access.
- llvm::VersionTuple SDKVersion;
- if (Arg *A = Args.getLastArg(options::OPT__SLASH_winsdkversion))
- SDKVersion.tryParse(A->getValue());
-
- if (A->getOption().getID() == options::OPT__SLASH_winsysroot) {
- llvm::SmallString<128> SDKPath(A->getValue());
- llvm::sys::path::append(SDKPath, "Windows Kits");
- if (!SDKVersion.empty())
- llvm::sys::path::append(SDKPath, Twine(SDKVersion.getMajor()));
- else
- llvm::sys::path::append(
- SDKPath, getHighestNumericTupleInDirectory(VFS, SDKPath));
- Path = std::string(SDKPath.str());
- } else {
- Path = A->getValue();
- }
-
- if (!SDKVersion.empty()) {
- Major = SDKVersion.getMajor();
- Version = SDKVersion.getAsString();
- } else if (getWindows10SDKVersionFromPath(VFS, Path, Version)) {
- Major = 10;
- }
- return true;
- }
- return false;
-}
-
-/// Get Windows SDK installation directory.
-static bool getWindowsSDKDir(llvm::vfs::FileSystem &VFS, const ArgList &Args,
- std::string &Path, int &Major,
- std::string &WindowsSDKIncludeVersion,
- std::string &WindowsSDKLibVersion) {
- // Trust /winsdkdir and /winsdkversion if present.
- if (getWindowsSDKDirViaCommandLine(VFS, Args, Path, Major,
- WindowsSDKIncludeVersion)) {
- WindowsSDKLibVersion = WindowsSDKIncludeVersion;
- return true;
- }
-
- // FIXME: Try env vars (%WindowsSdkDir%, %UCRTVersion%) before going to registry.
-
- // Try the Windows registry.
- std::string RegistrySDKVersion;
- if (!getSystemRegistryString(
- "SOFTWARE\\Microsoft\\Microsoft SDKs\\Windows\\$VERSION",
- "InstallationFolder", Path, &RegistrySDKVersion))
- return false;
- if (Path.empty() || RegistrySDKVersion.empty())
- return false;
-
- WindowsSDKIncludeVersion.clear();
- WindowsSDKLibVersion.clear();
- Major = 0;
- std::sscanf(RegistrySDKVersion.c_str(), "v%d.", &Major);
- if (Major <= 7)
- return true;
- if (Major == 8) {
- // Windows SDK 8.x installs libraries in a folder whose names depend on the
- // version of the OS you're targeting. By default choose the newest, which
- // usually corresponds to the version of the OS you've installed the SDK on.
- const char *Tests[] = {"winv6.3", "win8", "win7"};
- for (const char *Test : Tests) {
- llvm::SmallString<128> TestPath(Path);
- llvm::sys::path::append(TestPath, "Lib", Test);
- if (VFS.exists(TestPath)) {
- WindowsSDKLibVersion = Test;
- break;
- }
- }
- return !WindowsSDKLibVersion.empty();
- }
- if (Major == 10) {
- if (!getWindows10SDKVersionFromPath(VFS, Path, WindowsSDKIncludeVersion))
- return false;
- WindowsSDKLibVersion = WindowsSDKIncludeVersion;
- return true;
- }
- // Unsupported SDK version
- return false;
-}
-
// Gets the library path required to link against the Windows SDK.
-bool MSVCToolChain::getWindowsSDKLibraryPath(
- const ArgList &Args, std::string &path) const {
+bool MSVCToolChain::getWindowsSDKLibraryPath(const ArgList &Args,
+ std::string &path) const {
std::string sdkPath;
int sdkMajor = 0;
std::string windowsSDKIncludeVersion;
std::string windowsSDKLibVersion;
path.clear();
- if (!getWindowsSDKDir(getVFS(), Args, sdkPath, sdkMajor,
- windowsSDKIncludeVersion, windowsSDKLibVersion))
+ if (!llvm::getWindowsSDKDir(getVFS(), WinSdkDir, WinSdkVersion, WinSysRoot,
+ sdkPath, sdkMajor, windowsSDKIncludeVersion,
+ windowsSDKLibVersion))
return false;
llvm::SmallString<128> libPath(sdkPath);
llvm::sys::path::append(libPath, "Lib");
- if (sdkMajor >= 8) {
- llvm::sys::path::append(libPath, windowsSDKLibVersion, "um",
- llvmArchToWindowsSDKArch(getArch()));
- } else {
- switch (getArch()) {
- // In Windows SDK 7.x, x86 libraries are directly in the Lib folder.
- case llvm::Triple::x86:
- break;
- case llvm::Triple::x86_64:
- llvm::sys::path::append(libPath, "x64");
- break;
- case llvm::Triple::arm:
- // It is not necessary to link against Windows SDK 7.x when targeting ARM.
- return false;
- default:
- return false;
- }
- }
-
- path = std::string(libPath.str());
- return true;
+ if (sdkMajor >= 10)
+ if (!(WinSdkDir.has_value() || WinSysRoot.has_value()) &&
+ WinSdkVersion.has_value())
+ windowsSDKLibVersion = *WinSdkVersion;
+ if (sdkMajor >= 8)
+ llvm::sys::path::append(libPath, windowsSDKLibVersion, "um");
+ return llvm::appendArchToWindowsSDKLibPath(sdkMajor, libPath, getArch(),
+ path);
}
-// Check if the Include path of a specified version of Visual Studio contains
-// specific header files. If not, they are probably shipped with Universal CRT.
bool MSVCToolChain::useUniversalCRT() const {
- llvm::SmallString<128> TestPath(
- getSubDirectoryPath(SubDirectoryType::Include));
- llvm::sys::path::append(TestPath, "stdlib.h");
- return !getVFS().exists(TestPath);
-}
-
-static bool getUniversalCRTSdkDir(llvm::vfs::FileSystem &VFS,
- const ArgList &Args, std::string &Path,
- std::string &UCRTVersion) {
- // If /winsdkdir is passed, use it as location for the UCRT too.
- // FIXME: Should there be a dedicated /ucrtdir to override /winsdkdir?
- int Major;
- if (getWindowsSDKDirViaCommandLine(VFS, Args, Path, Major, UCRTVersion))
- return true;
-
- // FIXME: Try env vars (%UniversalCRTSdkDir%, %UCRTVersion%) before going to
- // registry.
-
- // vcvarsqueryregistry.bat for Visual Studio 2015 queries the registry
- // for the specific key "KitsRoot10". So do we.
- if (!getSystemRegistryString(
- "SOFTWARE\\Microsoft\\Windows Kits\\Installed Roots", "KitsRoot10",
- Path, nullptr))
- return false;
-
- return getWindows10SDKVersionFromPath(VFS, Path, UCRTVersion);
+ return llvm::useUniversalCRT(VSLayout, VCToolChainPath, getArch(), getVFS());
}
bool MSVCToolChain::getUniversalCRTLibraryPath(const ArgList &Args,
@@ -1164,28 +569,26 @@ bool MSVCToolChain::getUniversalCRTLibraryPath(const ArgList &Args,
std::string UCRTVersion;
Path.clear();
- if (!getUniversalCRTSdkDir(getVFS(), Args, UniversalCRTSdkPath, UCRTVersion))
+ if (!llvm::getUniversalCRTSdkDir(getVFS(), WinSdkDir, WinSdkVersion,
+ WinSysRoot, UniversalCRTSdkPath,
+ UCRTVersion))
return false;
- StringRef ArchName = llvmArchToWindowsSDKArch(getArch());
+ if (!(WinSdkDir.has_value() || WinSysRoot.has_value()) &&
+ WinSdkVersion.has_value())
+ UCRTVersion = *WinSdkVersion;
+
+ StringRef ArchName = llvm::archToWindowsSDKArch(getArch());
if (ArchName.empty())
return false;
llvm::SmallString<128> LibPath(UniversalCRTSdkPath);
llvm::sys::path::append(LibPath, "Lib", UCRTVersion, "ucrt", ArchName);
- Path = std::string(LibPath.str());
+ Path = std::string(LibPath);
return true;
}
-static VersionTuple getMSVCVersionFromTriple(const llvm::Triple &Triple) {
- unsigned Major, Minor, Micro;
- Triple.getEnvironmentVersion(Major, Minor, Micro);
- if (Major || Minor || Micro)
- return VersionTuple(Major, Minor, Micro);
- return VersionTuple();
-}
-
static VersionTuple getMSVCVersionFromExe(const std::string &BinDir) {
VersionTuple Version;
#ifdef _WIN32
@@ -1263,6 +666,19 @@ void MSVCToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
AddSystemIncludesFromEnv(Var);
}
+ // Add DIA SDK include if requested.
+ if (const Arg *A = DriverArgs.getLastArg(options::OPT__SLASH_diasdkdir,
+ options::OPT__SLASH_winsysroot)) {
+ // cl.exe doesn't find the DIA SDK automatically, so this too requires
+ // explicit flags and doesn't automatically look in "DIA SDK" relative
+ // to the path we found for VCToolChainPath.
+ llvm::SmallString<128> DIASDKPath(A->getValue());
+ if (A->getOption().getID() == options::OPT__SLASH_winsysroot)
+ llvm::sys::path::append(DIASDKPath, "DIA SDK");
+ AddSystemIncludeWithSubfolder(DriverArgs, CC1Args, std::string(DIASDKPath),
+ "include");
+ }
+
if (DriverArgs.hasArg(options::OPT_nostdlibinc))
return;
@@ -1280,15 +696,20 @@ void MSVCToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
// the correct include paths first.
if (!VCToolChainPath.empty()) {
addSystemInclude(DriverArgs, CC1Args,
- getSubDirectoryPath(SubDirectoryType::Include));
- addSystemInclude(DriverArgs, CC1Args,
- getSubDirectoryPath(SubDirectoryType::Include, "atlmfc"));
+ getSubDirectoryPath(llvm::SubDirectoryType::Include));
+ addSystemInclude(
+ DriverArgs, CC1Args,
+ getSubDirectoryPath(llvm::SubDirectoryType::Include, "atlmfc"));
if (useUniversalCRT()) {
std::string UniversalCRTSdkPath;
std::string UCRTVersion;
- if (getUniversalCRTSdkDir(getVFS(), DriverArgs, UniversalCRTSdkPath,
- UCRTVersion)) {
+ if (llvm::getUniversalCRTSdkDir(getVFS(), WinSdkDir, WinSdkVersion,
+ WinSysRoot, UniversalCRTSdkPath,
+ UCRTVersion)) {
+ if (!(WinSdkDir.has_value() || WinSysRoot.has_value()) &&
+ WinSdkVersion.has_value())
+ UCRTVersion = *WinSdkVersion;
AddSystemIncludeWithSubfolder(DriverArgs, CC1Args, UniversalCRTSdkPath,
"Include", UCRTVersion, "ucrt");
}
@@ -1298,8 +719,13 @@ void MSVCToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
int major = 0;
std::string windowsSDKIncludeVersion;
std::string windowsSDKLibVersion;
- if (getWindowsSDKDir(getVFS(), DriverArgs, WindowsSDKDir, major,
- windowsSDKIncludeVersion, windowsSDKLibVersion)) {
+ if (llvm::getWindowsSDKDir(getVFS(), WinSdkDir, WinSdkVersion, WinSysRoot,
+ WindowsSDKDir, major, windowsSDKIncludeVersion,
+ windowsSDKLibVersion)) {
+ if (major >= 10)
+ if (!(WinSdkDir.has_value() || WinSysRoot.has_value()) &&
+ WinSdkVersion.has_value())
+ windowsSDKIncludeVersion = windowsSDKLibVersion = *WinSdkVersion;
if (major >= 8) {
// Note: windowsSDKIncludeVersion is empty for SDKs prior to v10.
// Anyway, llvm::sys::path::append is able to manage it.
@@ -1312,6 +738,15 @@ void MSVCToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
AddSystemIncludeWithSubfolder(DriverArgs, CC1Args, WindowsSDKDir,
"Include", windowsSDKIncludeVersion,
"winrt");
+ if (major >= 10) {
+ llvm::VersionTuple Tuple;
+ if (!Tuple.tryParse(windowsSDKIncludeVersion) &&
+ Tuple.getSubminor().value_or(0) >= 17134) {
+ AddSystemIncludeWithSubfolder(DriverArgs, CC1Args, WindowsSDKDir,
+ "Include", windowsSDKIncludeVersion,
+ "cppwinrt");
+ }
+ }
} else {
AddSystemIncludeWithSubfolder(DriverArgs, CC1Args, WindowsSDKDir,
"Include");
@@ -1345,14 +780,18 @@ VersionTuple MSVCToolChain::computeMSVCVersion(const Driver *D,
bool IsWindowsMSVC = getTriple().isWindowsMSVCEnvironment();
VersionTuple MSVT = ToolChain::computeMSVCVersion(D, Args);
if (MSVT.empty())
- MSVT = getMSVCVersionFromTriple(getTriple());
+ MSVT = getTriple().getEnvironmentVersion();
if (MSVT.empty() && IsWindowsMSVC)
- MSVT = getMSVCVersionFromExe(getSubDirectoryPath(SubDirectoryType::Bin));
+ MSVT =
+ getMSVCVersionFromExe(getSubDirectoryPath(llvm::SubDirectoryType::Bin));
if (MSVT.empty() &&
Args.hasFlag(options::OPT_fms_extensions, options::OPT_fno_ms_extensions,
IsWindowsMSVC)) {
- // -fms-compatibility-version=19.14 is default, aka 2017, 15.7
- MSVT = VersionTuple(19, 14);
+ // -fms-compatibility-version=19.33 is default, aka 2022, 17.3
+ // NOTE: when changing this value, also update
+ // clang/docs/CommandGuide/clang.rst and clang/docs/UsersManual.rst
+ // accordingly.
+ MSVT = VersionTuple(19, 33);
}
return MSVT;
}
@@ -1363,8 +802,8 @@ MSVCToolChain::ComputeEffectiveClangTriple(const ArgList &Args,
// The MSVC version doesn't care about the architecture, even though it
// may look at the triple internally.
VersionTuple MSVT = computeMSVCVersion(/*D=*/nullptr, Args);
- MSVT = VersionTuple(MSVT.getMajor(), MSVT.getMinor().getValueOr(0),
- MSVT.getSubminor().getValueOr(0));
+ MSVT = VersionTuple(MSVT.getMajor(), MSVT.getMinor().value_or(0),
+ MSVT.getSubminor().value_or(0));
// For the rest of the triple, however, a computed architecture name may
// be needed.
@@ -1577,7 +1016,7 @@ void MSVCToolChain::addClangTargetOptions(
Action::OffloadKind DeviceOffloadKind) const {
// MSVC STL kindly allows removing all usages of typeid by defining
// _HAS_STATIC_RTTI to 0. Do so, when compiling with -fno-rtti
- if (DriverArgs.hasArg(options::OPT_fno_rtti, options::OPT_frtti,
- /*Default=*/false))
+ if (DriverArgs.hasFlag(options::OPT_fno_rtti, options::OPT_frtti,
+ /*Default=*/false))
CC1Args.push_back("-D_HAS_STATIC_RTTI=0");
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.h
index 19d94c5c606e..48369e030aad 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.h
@@ -11,10 +11,12 @@
#include "AMDGPU.h"
#include "Cuda.h"
-#include "clang/Basic/DebugInfoOptions.h"
+#include "LazyDetector.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
+#include "llvm/Frontend/Debug/Options.h"
+#include "llvm/WindowsDriver/MSVCPaths.h"
namespace clang {
namespace driver {
@@ -22,7 +24,7 @@ namespace tools {
/// Visual studio tools.
namespace visualstudio {
-class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
+class LLVM_LIBRARY_VISIBILITY Linker final : public Tool {
public:
Linker(const ToolChain &TC) : Tool("visualstudio::Linker", "linker", TC) {}
@@ -49,18 +51,19 @@ public:
TranslateArgs(const llvm::opt::DerivedArgList &Args, StringRef BoundArch,
Action::OffloadKind DeviceOffloadKind) const override;
- bool IsIntegratedAssemblerDefault() const override;
- bool IsUnwindTablesDefault(const llvm::opt::ArgList &Args) const override;
+ UnwindTableLevel
+ getDefaultUnwindTableLevel(const llvm::opt::ArgList &Args) const override;
bool isPICDefault() const override;
- bool isPIEDefault() const override;
+ bool isPIEDefault(const llvm::opt::ArgList &Args) const override;
bool isPICDefaultForced() const override;
/// Set CodeView as the default debug info format for non-MachO binary
/// formats, and to DWARF otherwise. Users can use -gcodeview and -gdwarf to
/// override the default.
- codegenoptions::DebugInfoFormat getDefaultDebugFormat() const override {
- return getTriple().isOSBinFormatMachO() ? codegenoptions::DIF_DWARF
- : codegenoptions::DIF_CodeView;
+ llvm::codegenoptions::DebugInfoFormat getDefaultDebugFormat() const override {
+ return getTriple().isOSBinFormatMachO()
+ ? llvm::codegenoptions::DIF_DWARF
+ : llvm::codegenoptions::DIF_CodeView;
}
/// Set the debugger tuning to "default", since we're definitely not tuning
@@ -69,29 +72,19 @@ public:
return llvm::DebuggerKind::Default;
}
- enum class SubDirectoryType {
- Bin,
- Include,
- Lib,
- };
- std::string getSubDirectoryPath(SubDirectoryType Type,
- llvm::StringRef SubdirParent,
+ unsigned GetDefaultDwarfVersion() const override {
+ return 4;
+ }
+
+ std::string getSubDirectoryPath(llvm::SubDirectoryType Type,
+ llvm::StringRef SubdirParent = "") const;
+ std::string getSubDirectoryPath(llvm::SubDirectoryType Type,
llvm::Triple::ArchType TargetArch) const;
- // Convenience overload.
- // Uses the current target arch.
- std::string getSubDirectoryPath(SubDirectoryType Type,
- llvm::StringRef SubdirParent = "") const {
- return getSubDirectoryPath(Type, SubdirParent, getArch());
+ bool getIsVS2017OrNewer() const {
+ return VSLayout == llvm::ToolsetLayout::VS2017OrNewer;
}
- enum class ToolsetLayout {
- OlderVS,
- VS2017OrNewer,
- DevDivInternal,
- };
- bool getIsVS2017OrNewer() const { return VSLayout == ToolsetLayout::VS2017OrNewer; }
-
void
AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
@@ -105,6 +98,9 @@ public:
void AddHIPIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
+ void AddHIPRuntimeLibArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const override;
+
bool getWindowsSDKLibraryPath(
const llvm::opt::ArgList &Args, std::string &path) const;
bool getUniversalCRTLibraryPath(const llvm::opt::ArgList &Args,
@@ -138,10 +134,11 @@ protected:
Tool *buildLinker() const override;
Tool *buildAssembler() const override;
private:
+ std::optional<llvm::StringRef> WinSdkDir, WinSdkVersion, WinSysRoot;
std::string VCToolChainPath;
- ToolsetLayout VSLayout = ToolsetLayout::OlderVS;
- CudaInstallationDetector CudaInstallation;
- RocmInstallationDetector RocmInstallation;
+ llvm::ToolsetLayout VSLayout = llvm::ToolsetLayout::OlderVS;
+ LazyDetector<CudaInstallationDetector> CudaInstallation;
+ LazyDetector<RocmInstallationDetector> RocmInstallation;
};
} // end namespace toolchains
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVCSetupApi.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVCSetupApi.h
deleted file mode 100644
index a890b85fd5e9..000000000000
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVCSetupApi.h
+++ /dev/null
@@ -1,514 +0,0 @@
-// <copyright file="Program.cpp" company="Microsoft Corporation">
-// Copyright (C) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT license.
-// </copyright>
-// <license>
-// The MIT License (MIT)
-//
-// Copyright (C) Microsoft Corporation. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining
-// a copy of this software and associated documentation files (the "Software"),
-// to deal in the Software without restriction, including without limitation the
-// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-// sell copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-// SOFTWARE.
-// </license>
-
-#pragma once
-
-// Constants
-//
-#ifndef E_NOTFOUND
-#define E_NOTFOUND HRESULT_FROM_WIN32(ERROR_NOT_FOUND)
-#endif
-
-#ifndef E_FILENOTFOUND
-#define E_FILENOTFOUND HRESULT_FROM_WIN32(ERROR_FILE_NOT_FOUND)
-#endif
-
-// Enumerations
-//
-/// <summary>
-/// The state of an instance.
-/// </summary>
-enum InstanceState : unsigned {
- /// <summary>
- /// The instance state has not been determined.
- /// </summary>
- eNone = 0,
-
- /// <summary>
- /// The instance installation path exists.
- /// </summary>
- eLocal = 1,
-
- /// <summary>
- /// A product is registered to the instance.
- /// </summary>
- eRegistered = 2,
-
- /// <summary>
- /// No reboot is required for the instance.
- /// </summary>
- eNoRebootRequired = 4,
-
- /// <summary>
- /// The instance represents a complete install.
- /// </summary>
- eComplete = MAXUINT,
-};
-
-// Forward interface declarations
-//
-#ifndef __ISetupInstance_FWD_DEFINED__
-#define __ISetupInstance_FWD_DEFINED__
-typedef struct ISetupInstance ISetupInstance;
-#endif
-
-#ifndef __ISetupInstance2_FWD_DEFINED__
-#define __ISetupInstance2_FWD_DEFINED__
-typedef struct ISetupInstance2 ISetupInstance2;
-#endif
-
-#ifndef __IEnumSetupInstances_FWD_DEFINED__
-#define __IEnumSetupInstances_FWD_DEFINED__
-typedef struct IEnumSetupInstances IEnumSetupInstances;
-#endif
-
-#ifndef __ISetupConfiguration_FWD_DEFINED__
-#define __ISetupConfiguration_FWD_DEFINED__
-typedef struct ISetupConfiguration ISetupConfiguration;
-#endif
-
-#ifndef __ISetupConfiguration2_FWD_DEFINED__
-#define __ISetupConfiguration2_FWD_DEFINED__
-typedef struct ISetupConfiguration2 ISetupConfiguration2;
-#endif
-
-#ifndef __ISetupPackageReference_FWD_DEFINED__
-#define __ISetupPackageReference_FWD_DEFINED__
-typedef struct ISetupPackageReference ISetupPackageReference;
-#endif
-
-#ifndef __ISetupHelper_FWD_DEFINED__
-#define __ISetupHelper_FWD_DEFINED__
-typedef struct ISetupHelper ISetupHelper;
-#endif
-
-// Forward class declarations
-//
-#ifndef __SetupConfiguration_FWD_DEFINED__
-#define __SetupConfiguration_FWD_DEFINED__
-
-#ifdef __cplusplus
-typedef class SetupConfiguration SetupConfiguration;
-#endif
-
-#endif
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-// Interface definitions
-//
-EXTERN_C const IID IID_ISetupInstance;
-
-#if defined(__cplusplus) && !defined(CINTERFACE)
-/// <summary>
-/// Information about an instance of a product.
-/// </summary>
-struct DECLSPEC_UUID("B41463C3-8866-43B5-BC33-2B0676F7F42E")
- DECLSPEC_NOVTABLE ISetupInstance : public IUnknown {
- /// <summary>
- /// Gets the instance identifier (should match the name of the parent instance
- /// directory).
- /// </summary>
- /// <param name="pbstrInstanceId">The instance identifier.</param>
- /// <returns>Standard HRESULT indicating success or failure, including
- /// E_FILENOTFOUND if the instance state does not exist.</returns>
- STDMETHOD(GetInstanceId)(_Out_ BSTR *pbstrInstanceId) = 0;
-
- /// <summary>
- /// Gets the local date and time when the installation was originally
- /// installed.
- /// </summary>
- /// <param name="pInstallDate">The local date and time when the installation
- /// was originally installed.</param>
- /// <returns>Standard HRESULT indicating success or failure, including
- /// E_FILENOTFOUND if the instance state does not exist and E_NOTFOUND if the
- /// property is not defined.</returns>
- STDMETHOD(GetInstallDate)(_Out_ LPFILETIME pInstallDate) = 0;
-
- /// <summary>
- /// Gets the unique name of the installation, often indicating the branch and
- /// other information used for telemetry.
- /// </summary>
- /// <param name="pbstrInstallationName">The unique name of the installation,
- /// often indicating the branch and other information used for
- /// telemetry.</param>
- /// <returns>Standard HRESULT indicating success or failure, including
- /// E_FILENOTFOUND if the instance state does not exist and E_NOTFOUND if the
- /// property is not defined.</returns>
- STDMETHOD(GetInstallationName)(_Out_ BSTR *pbstrInstallationName) = 0;
-
- /// <summary>
- /// Gets the path to the installation root of the product.
- /// </summary>
- /// <param name="pbstrInstallationPath">The path to the installation root of
- /// the product.</param>
- /// <returns>Standard HRESULT indicating success or failure, including
- /// E_FILENOTFOUND if the instance state does not exist and E_NOTFOUND if the
- /// property is not defined.</returns>
- STDMETHOD(GetInstallationPath)(_Out_ BSTR *pbstrInstallationPath) = 0;
-
- /// <summary>
- /// Gets the version of the product installed in this instance.
- /// </summary>
- /// <param name="pbstrInstallationVersion">The version of the product
- /// installed in this instance.</param>
- /// <returns>Standard HRESULT indicating success or failure, including
- /// E_FILENOTFOUND if the instance state does not exist and E_NOTFOUND if the
- /// property is not defined.</returns>
- STDMETHOD(GetInstallationVersion)(_Out_ BSTR *pbstrInstallationVersion) = 0;
-
- /// <summary>
- /// Gets the display name (title) of the product installed in this instance.
- /// </summary>
- /// <param name="lcid">The LCID for the display name.</param>
- /// <param name="pbstrDisplayName">The display name (title) of the product
- /// installed in this instance.</param>
- /// <returns>Standard HRESULT indicating success or failure, including
- /// E_FILENOTFOUND if the instance state does not exist and E_NOTFOUND if the
- /// property is not defined.</returns>
- STDMETHOD(GetDisplayName)(_In_ LCID lcid, _Out_ BSTR *pbstrDisplayName) = 0;
-
- /// <summary>
- /// Gets the description of the product installed in this instance.
- /// </summary>
- /// <param name="lcid">The LCID for the description.</param>
- /// <param name="pbstrDescription">The description of the product installed in
- /// this instance.</param>
- /// <returns>Standard HRESULT indicating success or failure, including
- /// E_FILENOTFOUND if the instance state does not exist and E_NOTFOUND if the
- /// property is not defined.</returns>
- STDMETHOD(GetDescription)(_In_ LCID lcid, _Out_ BSTR *pbstrDescription) = 0;
-
- /// <summary>
- /// Resolves the optional relative path to the root path of the instance.
- /// </summary>
- /// <param name="pwszRelativePath">A relative path within the instance to
- /// resolve, or NULL to get the root path.</param>
- /// <param name="pbstrAbsolutePath">The full path to the optional relative
- /// path within the instance. If the relative path is NULL, the root path will
- /// always terminate in a backslash.</param>
- /// <returns>Standard HRESULT indicating success or failure, including
- /// E_FILENOTFOUND if the instance state does not exist and E_NOTFOUND if the
- /// property is not defined.</returns>
- STDMETHOD(ResolvePath)
- (_In_opt_z_ LPCOLESTR pwszRelativePath, _Out_ BSTR *pbstrAbsolutePath) = 0;
-};
-#endif
-
-EXTERN_C const IID IID_ISetupInstance2;
-
-#if defined(__cplusplus) && !defined(CINTERFACE)
-/// <summary>
-/// Information about an instance of a product.
-/// </summary>
-struct DECLSPEC_UUID("89143C9A-05AF-49B0-B717-72E218A2185C")
- DECLSPEC_NOVTABLE ISetupInstance2 : public ISetupInstance {
- /// <summary>
- /// Gets the state of the instance.
- /// </summary>
- /// <param name="pState">The state of the instance.</param>
- /// <returns>Standard HRESULT indicating success or failure, including
- /// E_FILENOTFOUND if the instance state does not exist.</returns>
- STDMETHOD(GetState)(_Out_ InstanceState *pState) = 0;
-
- /// <summary>
- /// Gets an array of package references registered to the instance.
- /// </summary>
- /// <param name="ppsaPackages">Pointer to an array of <see
- /// cref="ISetupPackageReference"/>.</param>
- /// <returns>Standard HRESULT indicating success or failure, including
- /// E_FILENOTFOUND if the instance state does not exist and E_NOTFOUND if the
- /// packages property is not defined.</returns>
- STDMETHOD(GetPackages)(_Out_ LPSAFEARRAY *ppsaPackages) = 0;
-
- /// <summary>
- /// Gets a pointer to the <see cref="ISetupPackageReference"/> that represents
- /// the registered product.
- /// </summary>
- /// <param name="ppPackage">Pointer to an instance of <see
- /// cref="ISetupPackageReference"/>. This may be NULL if <see
- /// cref="GetState"/> does not return <see cref="eComplete"/>.</param>
- /// <returns>Standard HRESULT indicating success or failure, including
- /// E_FILENOTFOUND if the instance state does not exist and E_NOTFOUND if the
- /// packages property is not defined.</returns>
- STDMETHOD(GetProduct)
- (_Outptr_result_maybenull_ ISetupPackageReference **ppPackage) = 0;
-
- /// <summary>
- /// Gets the relative path to the product application, if available.
- /// </summary>
- /// <param name="pbstrProductPath">The relative path to the product
- /// application, if available.</param>
- /// <returns>Standard HRESULT indicating success or failure, including
- /// E_FILENOTFOUND if the instance state does not exist.</returns>
- STDMETHOD(GetProductPath)
- (_Outptr_result_maybenull_ BSTR *pbstrProductPath) = 0;
-};
-#endif
-
-EXTERN_C const IID IID_IEnumSetupInstances;
-
-#if defined(__cplusplus) && !defined(CINTERFACE)
-/// <summary>
-/// A enumerator of installed <see cref="ISetupInstance"/> objects.
-/// </summary>
-struct DECLSPEC_UUID("6380BCFF-41D3-4B2E-8B2E-BF8A6810C848")
- DECLSPEC_NOVTABLE IEnumSetupInstances : public IUnknown {
- /// <summary>
- /// Retrieves the next set of product instances in the enumeration sequence.
- /// </summary>
- /// <param name="celt">The number of product instances to retrieve.</param>
- /// <param name="rgelt">A pointer to an array of <see
- /// cref="ISetupInstance"/>.</param>
- /// <param name="pceltFetched">A pointer to the number of product instances
- /// retrieved. If celt is 1 this parameter may be NULL.</param>
- /// <returns>S_OK if the number of elements were fetched, S_FALSE if nothing
- /// was fetched (at end of enumeration), E_INVALIDARG if celt is greater than
- /// 1 and pceltFetched is NULL, or E_OUTOFMEMORY if an <see
- /// cref="ISetupInstance"/> could not be allocated.</returns>
- STDMETHOD(Next)
- (_In_ ULONG celt, _Out_writes_to_(celt, *pceltFetched) ISetupInstance **rgelt,
- _Out_opt_ _Deref_out_range_(0, celt) ULONG *pceltFetched) = 0;
-
- /// <summary>
- /// Skips the next set of product instances in the enumeration sequence.
- /// </summary>
- /// <param name="celt">The number of product instances to skip.</param>
- /// <returns>S_OK if the number of elements could be skipped; otherwise,
- /// S_FALSE;</returns>
- STDMETHOD(Skip)(_In_ ULONG celt) = 0;
-
- /// <summary>
- /// Resets the enumeration sequence to the beginning.
- /// </summary>
- /// <returns>Always returns S_OK;</returns>
- STDMETHOD(Reset)(void) = 0;
-
- /// <summary>
- /// Creates a new enumeration object in the same state as the current
- /// enumeration object: the new object points to the same place in the
- /// enumeration sequence.
- /// </summary>
- /// <param name="ppenum">A pointer to a pointer to a new <see
- /// cref="IEnumSetupInstances"/> interface. If the method fails, this
- /// parameter is undefined.</param>
- /// <returns>S_OK if a clone was returned; otherwise, E_OUTOFMEMORY.</returns>
- STDMETHOD(Clone)(_Deref_out_opt_ IEnumSetupInstances **ppenum) = 0;
-};
-#endif
-
-EXTERN_C const IID IID_ISetupConfiguration;
-
-#if defined(__cplusplus) && !defined(CINTERFACE)
-/// <summary>
-/// Gets information about product instances set up on the machine.
-/// </summary>
-struct DECLSPEC_UUID("42843719-DB4C-46C2-8E7C-64F1816EFD5B")
- DECLSPEC_NOVTABLE ISetupConfiguration : public IUnknown {
- /// <summary>
- /// Enumerates all completed product instances installed.
- /// </summary>
- /// <param name="ppEnumInstances">An enumeration of completed, installed
- /// product instances.</param>
- /// <returns>Standard HRESULT indicating success or failure.</returns>
- STDMETHOD(EnumInstances)(_Out_ IEnumSetupInstances **ppEnumInstances) = 0;
-
- /// <summary>
- /// Gets the instance for the current process path.
- /// </summary>
- /// <param name="ppInstance">The instance for the current process
- /// path.</param>
- /// <returns>The instance for the current process path, or E_NOTFOUND if not
- /// found.</returns>
- STDMETHOD(GetInstanceForCurrentProcess)
- (_Out_ ISetupInstance **ppInstance) = 0;
-
- /// <summary>
- /// Gets the instance for the given path.
- /// </summary>
- /// <param name="ppInstance">The instance for the given path.</param>
- /// <returns>The instance for the given path, or E_NOTFOUND if not
- /// found.</returns>
- STDMETHOD(GetInstanceForPath)
- (_In_z_ LPCWSTR wzPath, _Out_ ISetupInstance **ppInstance) = 0;
-};
-#endif
-
-EXTERN_C const IID IID_ISetupConfiguration2;
-
-#if defined(__cplusplus) && !defined(CINTERFACE)
-/// <summary>
-/// Gets information about product instances.
-/// </summary>
-struct DECLSPEC_UUID("26AAB78C-4A60-49D6-AF3B-3C35BC93365D")
- DECLSPEC_NOVTABLE ISetupConfiguration2 : public ISetupConfiguration {
- /// <summary>
- /// Enumerates all product instances.
- /// </summary>
- /// <param name="ppEnumInstances">An enumeration of all product
- /// instances.</param>
- /// <returns>Standard HRESULT indicating success or failure.</returns>
- STDMETHOD(EnumAllInstances)(_Out_ IEnumSetupInstances **ppEnumInstances) = 0;
-};
-#endif
-
-EXTERN_C const IID IID_ISetupPackageReference;
-
-#if defined(__cplusplus) && !defined(CINTERFACE)
-/// <summary>
-/// A reference to a package.
-/// </summary>
-struct DECLSPEC_UUID("da8d8a16-b2b6-4487-a2f1-594ccccd6bf5")
- DECLSPEC_NOVTABLE ISetupPackageReference : public IUnknown {
- /// <summary>
- /// Gets the general package identifier.
- /// </summary>
- /// <param name="pbstrId">The general package identifier.</param>
- /// <returns>Standard HRESULT indicating success or failure.</returns>
- STDMETHOD(GetId)(_Out_ BSTR *pbstrId) = 0;
-
- /// <summary>
- /// Gets the version of the package.
- /// </summary>
- /// <param name="pbstrVersion">The version of the package.</param>
- /// <returns>Standard HRESULT indicating success or failure.</returns>
- STDMETHOD(GetVersion)(_Out_ BSTR *pbstrVersion) = 0;
-
- /// <summary>
- /// Gets the target process architecture of the package.
- /// </summary>
- /// <param name="pbstrChip">The target process architecture of the
- /// package.</param>
- /// <returns>Standard HRESULT indicating success or failure.</returns>
- STDMETHOD(GetChip)(_Out_ BSTR *pbstrChip) = 0;
-
- /// <summary>
- /// Gets the language and optional region identifier.
- /// </summary>
- /// <param name="pbstrLanguage">The language and optional region
- /// identifier.</param>
- /// <returns>Standard HRESULT indicating success or failure.</returns>
- STDMETHOD(GetLanguage)(_Out_ BSTR *pbstrLanguage) = 0;
-
- /// <summary>
- /// Gets the build branch of the package.
- /// </summary>
- /// <param name="pbstrBranch">The build branch of the package.</param>
- /// <returns>Standard HRESULT indicating success or failure.</returns>
- STDMETHOD(GetBranch)(_Out_ BSTR *pbstrBranch) = 0;
-
- /// <summary>
- /// Gets the type of the package.
- /// </summary>
- /// <param name="pbstrType">The type of the package.</param>
- /// <returns>Standard HRESULT indicating success or failure.</returns>
- STDMETHOD(GetType)(_Out_ BSTR *pbstrType) = 0;
-
- /// <summary>
- /// Gets the unique identifier consisting of all defined tokens.
- /// </summary>
- /// <param name="pbstrUniqueId">The unique identifier consisting of all
- /// defined tokens.</param>
- /// <returns>Standard HRESULT indicating success or failure, including
- /// E_UNEXPECTED if no Id was defined (required).</returns>
- STDMETHOD(GetUniqueId)(_Out_ BSTR *pbstrUniqueId) = 0;
-};
-#endif
-
-EXTERN_C const IID IID_ISetupHelper;
-
-#if defined(__cplusplus) && !defined(CINTERFACE)
-/// <summary>
-/// Helper functions.
-/// </summary>
-/// <remarks>
-/// You can query for this interface from the <see cref="SetupConfiguration"/>
-/// class.
-/// </remarks>
-struct DECLSPEC_UUID("42b21b78-6192-463e-87bf-d577838f1d5c")
- DECLSPEC_NOVTABLE ISetupHelper : public IUnknown {
- /// <summary>
- /// Parses a dotted quad version string into a 64-bit unsigned integer.
- /// </summary>
- /// <param name="pwszVersion">The dotted quad version string to parse, e.g.
- /// 1.2.3.4.</param>
- /// <param name="pullVersion">A 64-bit unsigned integer representing the
- /// version. You can compare this to other versions.</param>
- /// <returns>Standard HRESULT indicating success or failure.</returns>
- STDMETHOD(ParseVersion)
- (_In_ LPCOLESTR pwszVersion, _Out_ PULONGLONG pullVersion) = 0;
-
- /// <summary>
- /// Parses a dotted quad version string into a 64-bit unsigned integer.
- /// </summary>
- /// <param name="pwszVersionRange">The string containing 1 or 2 dotted quad
- /// version strings to parse, e.g. [1.0,) that means 1.0.0.0 or newer.</param>
- /// <param name="pullMinVersion">A 64-bit unsigned integer representing the
- /// minimum version, which may be 0. You can compare this to other
- /// versions.</param>
- /// <param name="pullMaxVersion">A 64-bit unsigned integer representing the
- /// maximum version, which may be MAXULONGLONG. You can compare this to other
- /// versions.</param>
- /// <returns>Standard HRESULT indicating success or failure.</returns>
- STDMETHOD(ParseVersionRange)
- (_In_ LPCOLESTR pwszVersionRange, _Out_ PULONGLONG pullMinVersion,
- _Out_ PULONGLONG pullMaxVersion) = 0;
-};
-#endif
-
-// Class declarations
-//
-EXTERN_C const CLSID CLSID_SetupConfiguration;
-
-#ifdef __cplusplus
-/// <summary>
-/// This class implements <see cref="ISetupConfiguration"/>, <see
-/// cref="ISetupConfiguration2"/>, and <see cref="ISetupHelper"/>.
-/// </summary>
-class DECLSPEC_UUID("177F0C4A-1CD3-4DE7-A32C-71DBBB9FA36D") SetupConfiguration;
-#endif
-
-// Function declarations
-//
-/// <summary>
-/// Gets an <see cref="ISetupConfiguration"/> that provides information about
-/// product instances installed on the machine.
-/// </summary>
-/// <param name="ppConfiguration">The <see cref="ISetupConfiguration"/> that
-/// provides information about product instances installed on the
-/// machine.</param>
-/// <param name="pReserved">Reserved for future use.</param>
-/// <returns>Standard HRESULT indicating success or failure.</returns>
-STDMETHODIMP GetSetupConfiguration(_Out_ ISetupConfiguration **ppConfiguration,
- _Reserved_ LPVOID pReserved);
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/MinGW.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/MinGW.cpp
index 7ba729f36bd8..067758c05e97 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/MinGW.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/MinGW.cpp
@@ -86,7 +86,9 @@ void tools::MinGW::Linker::AddLibGCC(const ArgList &Args,
CmdArgs.push_back("-lmoldname");
CmdArgs.push_back("-lmingwex");
for (auto Lib : Args.getAllArgValues(options::OPT_l))
- if (StringRef(Lib).startswith("msvcr") || StringRef(Lib).startswith("ucrt"))
+ if (StringRef(Lib).starts_with("msvcr") ||
+ StringRef(Lib).starts_with("ucrt") ||
+ StringRef(Lib).starts_with("crtdll"))
return;
CmdArgs.push_back("-lmsvcrt");
}
@@ -98,7 +100,7 @@ void tools::MinGW::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const char *LinkingOutput) const {
const ToolChain &TC = getToolChain();
const Driver &D = TC.getDriver();
- const SanitizerArgs &Sanitize = TC.getSanitizerArgs();
+ const SanitizerArgs &Sanitize = TC.getSanitizerArgs(Args);
ArgStringList CmdArgs;
@@ -133,7 +135,7 @@ void tools::MinGW::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("arm64pe");
break;
default:
- llvm_unreachable("Unsupported target architecture.");
+ D.Diag(diag::err_target_unknown_triple) << TC.getEffectiveTriple().str();
}
Arg *SubsysArg =
@@ -164,6 +166,24 @@ void tools::MinGW::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("--enable-auto-image-base");
}
+ if (Args.hasArg(options::OPT_Z_Xlinker__no_demangle))
+ CmdArgs.push_back("--no-demangle");
+
+ if (!Args.hasFlag(options::OPT_fauto_import, options::OPT_fno_auto_import,
+ true))
+ CmdArgs.push_back("--disable-auto-import");
+
+ if (Arg *A = Args.getLastArg(options::OPT_mguard_EQ)) {
+ StringRef GuardArgs = A->getValue();
+ if (GuardArgs == "none")
+ CmdArgs.push_back("--no-guard-cf");
+ else if (GuardArgs == "cf" || GuardArgs == "cf-nochecks")
+ CmdArgs.push_back("--guard-cf");
+ else
+ D.Diag(diag::err_drv_unsupported_option_argument)
+ << A->getSpelling() << GuardArgs;
+ }
+
CmdArgs.push_back("-o");
const char *OutputFile = Output.getFilename();
// GCC implicitly adds an .exe extension if it is given an output file name
@@ -176,13 +196,21 @@ void tools::MinGW::Linker::ConstructJob(Compilation &C, const JobAction &JA,
} else
CmdArgs.push_back(OutputFile);
- Args.AddAllArgs(CmdArgs, options::OPT_e);
// FIXME: add -N, -n flags
Args.AddLastArg(CmdArgs, options::OPT_r);
Args.AddLastArg(CmdArgs, options::OPT_s);
Args.AddLastArg(CmdArgs, options::OPT_t);
Args.AddAllArgs(CmdArgs, options::OPT_u_Group);
- Args.AddLastArg(CmdArgs, options::OPT_Z_Flag);
+
+ // Add asan_dynamic as the first import lib before other libs. This allows
+ // asan to be initialized as early as possible to increase its instrumentation
+ // coverage to include other user DLLs which has not been built with asan.
+ if (Sanitize.needsAsanRt() && !Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nodefaultlibs)) {
+ // MinGW always links against a shared MSVCRT.
+ CmdArgs.push_back(
+ TC.getCompilerRTArgString(Args, "asan_dynamic", ToolChain::FT_Shared));
+ }
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
if (Args.hasArg(options::OPT_shared) || Args.hasArg(options::OPT_mdll)) {
@@ -213,6 +241,17 @@ void tools::MinGW::Linker::ConstructJob(Compilation &C, const JobAction &JA,
AddLinkerInputs(TC, Inputs, Args, CmdArgs, JA);
+ if (D.isUsingLTO()) {
+ assert(!Inputs.empty() && "Must have at least one input.");
+ addLTOOptions(TC, Args, CmdArgs, Output, Inputs[0],
+ D.getLTOMode() == LTOK_Thin);
+ }
+
+ if (C.getDriver().IsFlangMode()) {
+ addFortranRuntimeLibraryPath(TC, Args, CmdArgs);
+ addFortranRuntimeLibs(TC, Args, CmdArgs);
+ }
+
// TODO: Add profile stuff here
if (TC.ShouldLinkCXXStdlib(Args)) {
@@ -327,10 +366,20 @@ void tools::MinGW::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Exec, CmdArgs, Inputs, Output));
}
+static bool isCrossCompiling(const llvm::Triple &T, bool RequireArchMatch) {
+ llvm::Triple HostTriple(llvm::Triple::normalize(LLVM_HOST_TRIPLE));
+ if (HostTriple.getOS() != llvm::Triple::Win32)
+ return true;
+ if (RequireArchMatch && HostTriple.getArch() != T.getArch())
+ return true;
+ return false;
+}
+
// Simplified from Generic_GCC::GCCInstallationDetector::ScanLibDirForGCCTriple.
static bool findGccVersion(StringRef LibDir, std::string &GccLibDir,
- std::string &Ver) {
- auto Version = toolchains::Generic_GCC::GCCVersion::Parse("0.0.0");
+ std::string &Ver,
+ toolchains::Generic_GCC::GCCVersion &Version) {
+ Version = toolchains::Generic_GCC::GCCVersion::Parse("0.0.0");
std::error_code EC;
for (llvm::sys::fs::directory_iterator LI(LibDir, EC), LE; !EC && LI != LE;
LI = LI.increment(EC)) {
@@ -348,31 +397,51 @@ static bool findGccVersion(StringRef LibDir, std::string &GccLibDir,
return Ver.size();
}
-void toolchains::MinGW::findGccLibDir() {
- llvm::SmallVector<llvm::SmallString<32>, 2> Archs;
- Archs.emplace_back(getTriple().getArchName());
- Archs[0] += "-w64-mingw32";
- Archs.emplace_back("mingw32");
- if (Arch.empty())
- Arch = std::string(Archs[0].str());
+static llvm::Triple getLiteralTriple(const Driver &D, const llvm::Triple &T) {
+ llvm::Triple LiteralTriple(D.getTargetTriple());
+ // The arch portion of the triple may be overridden by -m32/-m64.
+ LiteralTriple.setArchName(T.getArchName());
+ return LiteralTriple;
+}
+
+void toolchains::MinGW::findGccLibDir(const llvm::Triple &LiteralTriple) {
+ llvm::SmallVector<llvm::SmallString<32>, 5> SubdirNames;
+ SubdirNames.emplace_back(LiteralTriple.str());
+ SubdirNames.emplace_back(getTriple().str());
+ SubdirNames.emplace_back(getTriple().getArchName());
+ SubdirNames.back() += "-w64-mingw32";
+ SubdirNames.emplace_back(getTriple().getArchName());
+ SubdirNames.back() += "-w64-mingw32ucrt";
+ SubdirNames.emplace_back("mingw32");
+ if (SubdirName.empty()) {
+ SubdirName = getTriple().getArchName();
+ SubdirName += "-w64-mingw32";
+ }
// lib: Arch Linux, Ubuntu, Windows
// lib64: openSUSE Linux
for (StringRef CandidateLib : {"lib", "lib64"}) {
- for (StringRef CandidateArch : Archs) {
+ for (StringRef CandidateSysroot : SubdirNames) {
llvm::SmallString<1024> LibDir(Base);
- llvm::sys::path::append(LibDir, CandidateLib, "gcc", CandidateArch);
- if (findGccVersion(LibDir, GccLibDir, Ver)) {
- Arch = std::string(CandidateArch);
+ llvm::sys::path::append(LibDir, CandidateLib, "gcc", CandidateSysroot);
+ if (findGccVersion(LibDir, GccLibDir, Ver, GccVer)) {
+ SubdirName = std::string(CandidateSysroot);
return;
}
}
}
}
-llvm::ErrorOr<std::string> toolchains::MinGW::findGcc() {
- llvm::SmallVector<llvm::SmallString<32>, 2> Gccs;
- Gccs.emplace_back(getTriple().getArchName());
- Gccs[0] += "-w64-mingw32-gcc";
+static llvm::ErrorOr<std::string> findGcc(const llvm::Triple &LiteralTriple,
+ const llvm::Triple &T) {
+ llvm::SmallVector<llvm::SmallString<32>, 5> Gccs;
+ Gccs.emplace_back(LiteralTriple.str());
+ Gccs.back() += "-gcc";
+ Gccs.emplace_back(T.str());
+ Gccs.back() += "-gcc";
+ Gccs.emplace_back(T.getArchName());
+ Gccs.back() += "-w64-mingw32-gcc";
+ Gccs.emplace_back(T.getArchName());
+ Gccs.back() += "-w64-mingw32ucrt-gcc";
Gccs.emplace_back("mingw32-gcc");
// Please do not add "gcc" here
for (StringRef CandidateGcc : Gccs)
@@ -381,60 +450,98 @@ llvm::ErrorOr<std::string> toolchains::MinGW::findGcc() {
return make_error_code(std::errc::no_such_file_or_directory);
}
-llvm::ErrorOr<std::string> toolchains::MinGW::findClangRelativeSysroot() {
- llvm::SmallVector<llvm::SmallString<32>, 2> Subdirs;
- Subdirs.emplace_back(getTriple().str());
- Subdirs.emplace_back(getTriple().getArchName());
- Subdirs[1] += "-w64-mingw32";
- StringRef ClangRoot =
- llvm::sys::path::parent_path(getDriver().getInstalledDir());
+static llvm::ErrorOr<std::string>
+findClangRelativeSysroot(const Driver &D, const llvm::Triple &LiteralTriple,
+ const llvm::Triple &T, std::string &SubdirName) {
+ llvm::SmallVector<llvm::SmallString<32>, 4> Subdirs;
+ Subdirs.emplace_back(LiteralTriple.str());
+ Subdirs.emplace_back(T.str());
+ Subdirs.emplace_back(T.getArchName());
+ Subdirs.back() += "-w64-mingw32";
+ Subdirs.emplace_back(T.getArchName());
+ Subdirs.back() += "-w64-mingw32ucrt";
+ StringRef ClangRoot = llvm::sys::path::parent_path(D.getInstalledDir());
StringRef Sep = llvm::sys::path::get_separator();
for (StringRef CandidateSubdir : Subdirs) {
if (llvm::sys::fs::is_directory(ClangRoot + Sep + CandidateSubdir)) {
- Arch = std::string(CandidateSubdir);
+ SubdirName = std::string(CandidateSubdir);
return (ClangRoot + Sep + CandidateSubdir).str();
}
}
return make_error_code(std::errc::no_such_file_or_directory);
}
+static bool looksLikeMinGWSysroot(const std::string &Directory) {
+ StringRef Sep = llvm::sys::path::get_separator();
+ if (!llvm::sys::fs::exists(Directory + Sep + "include" + Sep + "_mingw.h"))
+ return false;
+ if (!llvm::sys::fs::exists(Directory + Sep + "lib" + Sep + "libkernel32.a"))
+ return false;
+ return true;
+}
+
toolchains::MinGW::MinGW(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args)
: ToolChain(D, Triple, Args), CudaInstallation(D, Triple, Args),
RocmInstallation(D, Triple, Args) {
getProgramPaths().push_back(getDriver().getInstalledDir());
+ std::string InstallBase =
+ std::string(llvm::sys::path::parent_path(getDriver().getInstalledDir()));
+ // The sequence for detecting a sysroot here should be kept in sync with
+ // the testTriple function below.
+ llvm::Triple LiteralTriple = getLiteralTriple(D, getTriple());
if (getDriver().SysRoot.size())
Base = getDriver().SysRoot;
// Look for <clang-bin>/../<triplet>; if found, use <clang-bin>/.. as the
// base as it could still be a base for a gcc setup with libgcc.
- else if (llvm::ErrorOr<std::string> TargetSubdir = findClangRelativeSysroot())
+ else if (llvm::ErrorOr<std::string> TargetSubdir = findClangRelativeSysroot(
+ getDriver(), LiteralTriple, getTriple(), SubdirName))
Base = std::string(llvm::sys::path::parent_path(TargetSubdir.get()));
- else if (llvm::ErrorOr<std::string> GPPName = findGcc())
+ // If the install base of Clang seems to have mingw sysroot files directly
+ // in the toplevel include and lib directories, use this as base instead of
+ // looking for a triple prefixed GCC in the path.
+ else if (looksLikeMinGWSysroot(InstallBase))
+ Base = InstallBase;
+ else if (llvm::ErrorOr<std::string> GPPName =
+ findGcc(LiteralTriple, getTriple()))
Base = std::string(llvm::sys::path::parent_path(
llvm::sys::path::parent_path(GPPName.get())));
else
- Base = std::string(
- llvm::sys::path::parent_path(getDriver().getInstalledDir()));
+ Base = InstallBase;
Base += llvm::sys::path::get_separator();
- findGccLibDir();
+ findGccLibDir(LiteralTriple);
+ TripleDirName = SubdirName;
// GccLibDir must precede Base/lib so that the
// correct crtbegin.o ,cetend.o would be found.
getFilePaths().push_back(GccLibDir);
+
+ // openSUSE/Fedora
+ std::string CandidateSubdir = SubdirName + "/sys-root/mingw";
+ if (getDriver().getVFS().exists(Base + CandidateSubdir))
+ SubdirName = CandidateSubdir;
+
+ getFilePaths().push_back(
+ (Base + SubdirName + llvm::sys::path::get_separator() + "lib").str());
+
+ // Gentoo
getFilePaths().push_back(
- (Base + Arch + llvm::sys::path::get_separator() + "lib").str());
- getFilePaths().push_back(Base + "lib");
- // openSUSE
- getFilePaths().push_back(Base + Arch + "/sys-root/mingw/lib");
+ (Base + SubdirName + llvm::sys::path::get_separator() + "mingw/lib").str());
+
+ // Only include <base>/lib if we're not cross compiling (not even for
+ // windows->windows to a different arch), or if the sysroot has been set
+ // (where we presume the user has pointed it at an arch specific
+ // subdirectory).
+ if (!::isCrossCompiling(getTriple(), /*RequireArchMatch=*/true) ||
+ getDriver().SysRoot.size())
+ getFilePaths().push_back(Base + "lib");
NativeLLVMSupport =
Args.getLastArgValue(options::OPT_fuse_ld_EQ, CLANG_DEFAULT_LINKER)
.equals_insensitive("lld");
}
-bool toolchains::MinGW::IsIntegratedAssemblerDefault() const { return true; }
-
Tool *toolchains::MinGW::getTool(Action::ActionClass AC) const {
switch (AC) {
case Action::PreprocessJobClass:
@@ -462,30 +569,36 @@ bool toolchains::MinGW::HasNativeLLVMSupport() const {
return NativeLLVMSupport;
}
-bool toolchains::MinGW::IsUnwindTablesDefault(const ArgList &Args) const {
+ToolChain::UnwindTableLevel
+toolchains::MinGW::getDefaultUnwindTableLevel(const ArgList &Args) const {
Arg *ExceptionArg = Args.getLastArg(options::OPT_fsjlj_exceptions,
options::OPT_fseh_exceptions,
options::OPT_fdwarf_exceptions);
if (ExceptionArg &&
ExceptionArg->getOption().matches(options::OPT_fseh_exceptions))
- return true;
- return getArch() == llvm::Triple::x86_64 ||
- getArch() == llvm::Triple::aarch64;
+ return UnwindTableLevel::Asynchronous;
+
+ if (getArch() == llvm::Triple::x86_64 || getArch() == llvm::Triple::arm ||
+ getArch() == llvm::Triple::thumb || getArch() == llvm::Triple::aarch64)
+ return UnwindTableLevel::Asynchronous;
+ return UnwindTableLevel::None;
}
bool toolchains::MinGW::isPICDefault() const {
- return getArch() == llvm::Triple::x86_64;
+ return getArch() == llvm::Triple::x86_64 ||
+ getArch() == llvm::Triple::aarch64;
}
-bool toolchains::MinGW::isPIEDefault() const { return false; }
-
-bool toolchains::MinGW::isPICDefaultForced() const {
- return getArch() == llvm::Triple::x86_64;
+bool toolchains::MinGW::isPIEDefault(const llvm::opt::ArgList &Args) const {
+ return false;
}
+bool toolchains::MinGW::isPICDefaultForced() const { return true; }
+
llvm::ExceptionHandling
toolchains::MinGW::GetExceptionModel(const ArgList &Args) const {
- if (getArch() == llvm::Triple::x86_64 || getArch() == llvm::Triple::aarch64)
+ if (getArch() == llvm::Triple::x86_64 || getArch() == llvm::Triple::aarch64 ||
+ getArch() == llvm::Triple::arm || getArch() == llvm::Triple::thumb)
return llvm::ExceptionHandling::WinEH;
return llvm::ExceptionHandling::DwarfCFI;
}
@@ -501,17 +614,17 @@ SanitizerMask toolchains::MinGW::getSupportedSanitizers() const {
void toolchains::MinGW::AddCudaIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
- CudaInstallation.AddCudaIncludeArgs(DriverArgs, CC1Args);
+ CudaInstallation->AddCudaIncludeArgs(DriverArgs, CC1Args);
}
void toolchains::MinGW::AddHIPIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
- RocmInstallation.AddHIPIncludeArgs(DriverArgs, CC1Args);
+ RocmInstallation->AddHIPIncludeArgs(DriverArgs, CC1Args);
}
void toolchains::MinGW::printVerboseInfo(raw_ostream &OS) const {
- CudaInstallation.print(OS);
- RocmInstallation.print(OS);
+ CudaInstallation->print(OS);
+ RocmInstallation->print(OS);
}
// Include directories for various hosts:
@@ -554,6 +667,12 @@ void toolchains::MinGW::printVerboseInfo(raw_ostream &OS) const {
// /usr/include/c++/4.8/backward
// /usr/x86_64-w64-mingw32/include
+// Fedora
+// /usr/x86_64-w64-mingw32ucrt/sys-root/mingw/include/c++/x86_64-w64-mingw32ucrt
+// /usr/x86_64-w64-mingw32ucrt/sys-root/mingw/include/c++/backward
+// /usr/x86_64-w64-mingw32ucrt/sys-root/mingw/include
+// /usr/lib/gcc/x86_64-w64-mingw32ucrt/12.2.1/include-fixed
+
void toolchains::MinGW::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
if (DriverArgs.hasArg(options::OPT_nostdinc))
@@ -568,49 +687,159 @@ void toolchains::MinGW::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
if (DriverArgs.hasArg(options::OPT_nostdlibinc))
return;
- if (GetRuntimeLibType(DriverArgs) == ToolChain::RLT_Libgcc) {
- // openSUSE
- addSystemInclude(DriverArgs, CC1Args,
- Base + Arch + "/sys-root/mingw/include");
- }
+ addSystemInclude(DriverArgs, CC1Args,
+ Base + SubdirName + llvm::sys::path::get_separator() +
+ "include");
+ // Gentoo
addSystemInclude(DriverArgs, CC1Args,
- Base + Arch + llvm::sys::path::get_separator() + "include");
- addSystemInclude(DriverArgs, CC1Args, Base + "include");
+ Base + SubdirName + llvm::sys::path::get_separator() + "usr/include");
+
+ // Only include <base>/include if we're not cross compiling (but do allow it
+ // if we're on Windows and building for Windows on another architecture),
+ // or if the sysroot has been set (where we presume the user has pointed it
+ // at an arch specific subdirectory).
+ if (!::isCrossCompiling(getTriple(), /*RequireArchMatch=*/false) ||
+ getDriver().SysRoot.size())
+ addSystemInclude(DriverArgs, CC1Args, Base + "include");
+}
+
+void toolchains::MinGW::addClangTargetOptions(
+ const llvm::opt::ArgList &DriverArgs, llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadKind) const {
+ if (Arg *A = DriverArgs.getLastArg(options::OPT_mguard_EQ)) {
+ StringRef GuardArgs = A->getValue();
+ if (GuardArgs == "none") {
+ // Do nothing.
+ } else if (GuardArgs == "cf") {
+ // Emit CFG instrumentation and the table of address-taken functions.
+ CC1Args.push_back("-cfguard");
+ } else if (GuardArgs == "cf-nochecks") {
+ // Emit only the table of address-taken functions.
+ CC1Args.push_back("-cfguard-no-checks");
+ } else {
+ getDriver().Diag(diag::err_drv_unsupported_option_argument)
+ << A->getSpelling() << GuardArgs;
+ }
+ }
+
+ CC1Args.push_back("-fno-use-init-array");
+
+ for (auto Opt : {options::OPT_mthreads, options::OPT_mwindows,
+ options::OPT_mconsole, options::OPT_mdll}) {
+ if (Arg *A = DriverArgs.getLastArgNoClaim(Opt))
+ A->ignoreTargetSpecific();
+ }
}
void toolchains::MinGW::AddClangCXXStdlibIncludeArgs(
const ArgList &DriverArgs, ArgStringList &CC1Args) const {
- if (DriverArgs.hasArg(options::OPT_nostdlibinc) ||
- DriverArgs.hasArg(options::OPT_nostdincxx))
+ if (DriverArgs.hasArg(options::OPT_nostdinc, options::OPT_nostdlibinc,
+ options::OPT_nostdincxx))
return;
StringRef Slash = llvm::sys::path::get_separator();
switch (GetCXXStdlibType(DriverArgs)) {
- case ToolChain::CST_Libcxx:
- addSystemInclude(DriverArgs, CC1Args, Base + Arch + Slash + "include" +
- Slash + "c++" + Slash + "v1");
+ case ToolChain::CST_Libcxx: {
+ std::string TargetDir = (Base + "include" + Slash + getTripleString() +
+ Slash + "c++" + Slash + "v1")
+ .str();
+ if (getDriver().getVFS().exists(TargetDir))
+ addSystemInclude(DriverArgs, CC1Args, TargetDir);
+ addSystemInclude(DriverArgs, CC1Args,
+ Base + SubdirName + Slash + "include" + Slash + "c++" +
+ Slash + "v1");
addSystemInclude(DriverArgs, CC1Args,
Base + "include" + Slash + "c++" + Slash + "v1");
break;
+ }
case ToolChain::CST_Libstdcxx:
- llvm::SmallVector<llvm::SmallString<1024>, 4> CppIncludeBases;
+ llvm::SmallVector<llvm::SmallString<1024>, 7> CppIncludeBases;
CppIncludeBases.emplace_back(Base);
- llvm::sys::path::append(CppIncludeBases[0], Arch, "include", "c++");
+ llvm::sys::path::append(CppIncludeBases[0], SubdirName, "include", "c++");
CppIncludeBases.emplace_back(Base);
- llvm::sys::path::append(CppIncludeBases[1], Arch, "include", "c++", Ver);
+ llvm::sys::path::append(CppIncludeBases[1], SubdirName, "include", "c++",
+ Ver);
CppIncludeBases.emplace_back(Base);
llvm::sys::path::append(CppIncludeBases[2], "include", "c++", Ver);
CppIncludeBases.emplace_back(GccLibDir);
llvm::sys::path::append(CppIncludeBases[3], "include", "c++");
+ CppIncludeBases.emplace_back(GccLibDir);
+ llvm::sys::path::append(CppIncludeBases[4], "include",
+ "g++-v" + GccVer.Text);
+ CppIncludeBases.emplace_back(GccLibDir);
+ llvm::sys::path::append(CppIncludeBases[5], "include",
+ "g++-v" + GccVer.MajorStr + "." + GccVer.MinorStr);
+ CppIncludeBases.emplace_back(GccLibDir);
+ llvm::sys::path::append(CppIncludeBases[6], "include",
+ "g++-v" + GccVer.MajorStr);
for (auto &CppIncludeBase : CppIncludeBases) {
addSystemInclude(DriverArgs, CC1Args, CppIncludeBase);
CppIncludeBase += Slash;
- addSystemInclude(DriverArgs, CC1Args, CppIncludeBase + Arch);
+ addSystemInclude(DriverArgs, CC1Args, CppIncludeBase + TripleDirName);
addSystemInclude(DriverArgs, CC1Args, CppIncludeBase + "backward");
}
break;
}
}
+
+static bool testTriple(const Driver &D, const llvm::Triple &Triple,
+ const ArgList &Args) {
+ // If an explicit sysroot is set, that will be used and we shouldn't try to
+ // detect anything else.
+ std::string SubdirName;
+ if (D.SysRoot.size())
+ return true;
+ llvm::Triple LiteralTriple = getLiteralTriple(D, Triple);
+ std::string InstallBase =
+ std::string(llvm::sys::path::parent_path(D.getInstalledDir()));
+ if (llvm::ErrorOr<std::string> TargetSubdir =
+ findClangRelativeSysroot(D, LiteralTriple, Triple, SubdirName))
+ return true;
+ // If the install base itself looks like a mingw sysroot, we'll use that
+ // - don't use any potentially unrelated gcc to influence what triple to use.
+ if (looksLikeMinGWSysroot(InstallBase))
+ return false;
+ if (llvm::ErrorOr<std::string> GPPName = findGcc(LiteralTriple, Triple))
+ return true;
+ // If we neither found a colocated sysroot or a matching gcc executable,
+ // conclude that we can't know if this is the correct spelling of the triple.
+ return false;
+}
+
+static llvm::Triple adjustTriple(const Driver &D, const llvm::Triple &Triple,
+ const ArgList &Args) {
+ // First test if the original triple can find a sysroot with the triple
+ // name.
+ if (testTriple(D, Triple, Args))
+ return Triple;
+ llvm::SmallVector<llvm::StringRef, 3> Archs;
+ // If not, test a couple other possible arch names that might be what was
+ // intended.
+ if (Triple.getArch() == llvm::Triple::x86) {
+ Archs.emplace_back("i386");
+ Archs.emplace_back("i586");
+ Archs.emplace_back("i686");
+ } else if (Triple.getArch() == llvm::Triple::arm ||
+ Triple.getArch() == llvm::Triple::thumb) {
+ Archs.emplace_back("armv7");
+ }
+ for (auto A : Archs) {
+ llvm::Triple TestTriple(Triple);
+ TestTriple.setArchName(A);
+ if (testTriple(D, TestTriple, Args))
+ return TestTriple;
+ }
+ // If none was found, just proceed with the original value.
+ return Triple;
+}
+
+void toolchains::MinGW::fixTripleArch(const Driver &D, llvm::Triple &Triple,
+ const ArgList &Args) {
+ if (Triple.getArch() == llvm::Triple::x86 ||
+ Triple.getArch() == llvm::Triple::arm ||
+ Triple.getArch() == llvm::Triple::thumb)
+ Triple = adjustTriple(D, Triple, Args);
+}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/MinGW.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/MinGW.h
index 2f1559fcf34c..a9963d8d06c2 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/MinGW.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/MinGW.h
@@ -11,6 +11,7 @@
#include "Cuda.h"
#include "Gnu.h"
+#include "LazyDetector.h"
#include "ROCm.h"
#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
@@ -20,7 +21,7 @@ namespace clang {
namespace driver {
namespace tools {
-/// MinGW -- Directly call GNU Binutils assembler and linker
+/// Directly call GNU Binutils assembler and linker
namespace MinGW {
class LLVM_LIBRARY_VISIBILITY Assembler : public Tool {
public:
@@ -34,7 +35,7 @@ public:
const char *LinkingOutput) const override;
};
-class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
+class LLVM_LIBRARY_VISIBILITY Linker final : public Tool {
public:
Linker(const ToolChain &TC) : Tool("MinGW::Linker", "linker", TC) {}
@@ -60,12 +61,15 @@ public:
MinGW(const Driver &D, const llvm::Triple &Triple,
const llvm::opt::ArgList &Args);
+ static void fixTripleArch(const Driver &D, llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+
bool HasNativeLLVMSupport() const override;
- bool IsIntegratedAssemblerDefault() const override;
- bool IsUnwindTablesDefault(const llvm::opt::ArgList &Args) const override;
+ UnwindTableLevel
+ getDefaultUnwindTableLevel(const llvm::opt::ArgList &Args) const override;
bool isPICDefault() const override;
- bool isPIEDefault() const override;
+ bool isPIEDefault(const llvm::opt::ArgList &Args) const override;
bool isPICDefaultForced() const override;
SanitizerMask getSupportedSanitizers() const override;
@@ -76,6 +80,10 @@ public:
void
AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
+ void
+ addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadKind) const override;
void AddClangCXXStdlibIncludeArgs(
const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
@@ -87,24 +95,26 @@ public:
void printVerboseInfo(raw_ostream &OS) const override;
+ unsigned GetDefaultDwarfVersion() const override { return 4; }
+
protected:
Tool *getTool(Action::ActionClass AC) const override;
Tool *buildLinker() const override;
Tool *buildAssembler() const override;
private:
- CudaInstallationDetector CudaInstallation;
- RocmInstallationDetector RocmInstallation;
+ LazyDetector<CudaInstallationDetector> CudaInstallation;
+ LazyDetector<RocmInstallationDetector> RocmInstallation;
std::string Base;
std::string GccLibDir;
+ clang::driver::toolchains::Generic_GCC::GCCVersion GccVer;
std::string Ver;
- std::string Arch;
+ std::string SubdirName;
+ std::string TripleDirName;
mutable std::unique_ptr<tools::gcc::Preprocessor> Preprocessor;
mutable std::unique_ptr<tools::gcc::Compiler> Compiler;
- void findGccLibDir();
- llvm::ErrorOr<std::string> findGcc();
- llvm::ErrorOr<std::string> findClangRelativeSysroot();
+ void findGccLibDir(const llvm::Triple &LiteralTriple);
bool NativeLLVMSupport;
};
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Minix.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Minix.cpp
deleted file mode 100644
index 5bceb9aba3e9..000000000000
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Minix.cpp
+++ /dev/null
@@ -1,113 +0,0 @@
-//===--- Minix.cpp - Minix ToolChain Implementations ------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "Minix.h"
-#include "CommonArgs.h"
-#include "clang/Driver/Compilation.h"
-#include "clang/Driver/Driver.h"
-#include "clang/Driver/InputInfo.h"
-#include "clang/Driver/Options.h"
-#include "llvm/Option/ArgList.h"
-#include "llvm/Support/VirtualFileSystem.h"
-
-using namespace clang::driver;
-using namespace clang;
-using namespace llvm::opt;
-
-void tools::minix::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output,
- const InputInfoList &Inputs,
- const ArgList &Args,
- const char *LinkingOutput) const {
- claimNoWarnArgs(Args);
- ArgStringList CmdArgs;
-
- Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler);
-
- CmdArgs.push_back("-o");
- CmdArgs.push_back(Output.getFilename());
-
- for (const auto &II : Inputs)
- CmdArgs.push_back(II.getFilename());
-
- const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
- C.addCommand(std::make_unique<Command>(JA, *this,
- ResponseFileSupport::AtFileCurCP(),
- Exec, CmdArgs, Inputs, Output));
-}
-
-void tools::minix::Linker::ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output,
- const InputInfoList &Inputs,
- const ArgList &Args,
- const char *LinkingOutput) const {
- const Driver &D = getToolChain().getDriver();
- ArgStringList CmdArgs;
-
- if (Output.isFilename()) {
- CmdArgs.push_back("-o");
- CmdArgs.push_back(Output.getFilename());
- } else {
- assert(Output.isNothing() && "Invalid output.");
- }
-
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
- CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath("crt1.o")));
- CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath("crti.o")));
- CmdArgs.push_back(
- Args.MakeArgString(getToolChain().GetFilePath("crtbegin.o")));
- CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath("crtn.o")));
- }
-
- Args.AddAllArgs(CmdArgs,
- {options::OPT_L, options::OPT_T_Group, options::OPT_e});
-
- AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs, JA);
-
- getToolChain().addProfileRTLibs(Args, CmdArgs);
-
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
- if (D.CCCIsCXX()) {
- if (getToolChain().ShouldLinkCXXStdlib(Args))
- getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs);
- CmdArgs.push_back("-lm");
- }
- }
-
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
- if (Args.hasArg(options::OPT_pthread))
- CmdArgs.push_back("-lpthread");
- CmdArgs.push_back("-lc");
- CmdArgs.push_back("-lCompilerRT-Generic");
- CmdArgs.push_back("-L/usr/pkg/compiler-rt/lib");
- CmdArgs.push_back(
- Args.MakeArgString(getToolChain().GetFilePath("crtend.o")));
- }
-
- const char *Exec = Args.MakeArgString(getToolChain().GetLinkerPath());
- C.addCommand(std::make_unique<Command>(JA, *this,
- ResponseFileSupport::AtFileCurCP(),
- Exec, CmdArgs, Inputs, Output));
-}
-
-/// Minix - Minix tool chain which can call as(1) and ld(1) directly.
-
-toolchains::Minix::Minix(const Driver &D, const llvm::Triple &Triple,
- const ArgList &Args)
- : Generic_ELF(D, Triple, Args) {
- getFilePaths().push_back(getDriver().Dir + "/../lib");
- getFilePaths().push_back("/usr/lib");
-}
-
-Tool *toolchains::Minix::buildAssembler() const {
- return new tools::minix::Assembler(*this);
-}
-
-Tool *toolchains::Minix::buildLinker() const {
- return new tools::minix::Linker(*this);
-}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Minix.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Minix.h
deleted file mode 100644
index af8d59c5085a..000000000000
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Minix.h
+++ /dev/null
@@ -1,64 +0,0 @@
-//===--- Minix.h - Minix ToolChain Implementations --------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_MINIX_H
-#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_MINIX_H
-
-#include "Gnu.h"
-#include "clang/Driver/Tool.h"
-#include "clang/Driver/ToolChain.h"
-
-namespace clang {
-namespace driver {
-namespace tools {
-/// minix -- Directly call GNU Binutils assembler and linker
-namespace minix {
-class LLVM_LIBRARY_VISIBILITY Assembler : public Tool {
-public:
- Assembler(const ToolChain &TC) : Tool("minix::Assembler", "assembler", TC) {}
-
- bool hasIntegratedCPP() const override { return false; }
-
- void ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output, const InputInfoList &Inputs,
- const llvm::opt::ArgList &TCArgs,
- const char *LinkingOutput) const override;
-};
-
-class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
-public:
- Linker(const ToolChain &TC) : Tool("minix::Linker", "linker", TC) {}
-
- bool hasIntegratedCPP() const override { return false; }
- bool isLinkJob() const override { return true; }
-
- void ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output, const InputInfoList &Inputs,
- const llvm::opt::ArgList &TCArgs,
- const char *LinkingOutput) const override;
-};
-} // end namespace minix
-} // end namespace tools
-
-namespace toolchains {
-
-class LLVM_LIBRARY_VISIBILITY Minix : public Generic_ELF {
-public:
- Minix(const Driver &D, const llvm::Triple &Triple,
- const llvm::opt::ArgList &Args);
-
-protected:
- Tool *buildAssembler() const override;
- Tool *buildLinker() const override;
-};
-
-} // end namespace toolchains
-} // end namespace driver
-} // end namespace clang
-
-#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_MINIX_H
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/MipsLinux.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/MipsLinux.cpp
index 41b7b839f3b3..4183eccceedb 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/MipsLinux.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/MipsLinux.cpp
@@ -30,7 +30,7 @@ MipsLLVMToolChain::MipsLLVMToolChain(const Driver &D,
DetectedMultilibs Result;
findMIPSMultilibs(D, Triple, "", Args, Result);
Multilibs = Result.Multilibs;
- SelectedMultilib = Result.SelectedMultilib;
+ SelectedMultilibs = Result.SelectedMultilibs;
// Find out the library suffix based on the ABI.
LibSuffix = tools::mips::getMipsABILibSuffix(Args, Triple);
@@ -56,7 +56,7 @@ void MipsLLVMToolChain::AddClangSystemIncludeArgs(
const auto &Callback = Multilibs.includeDirsCallback();
if (Callback) {
- for (const auto &Path : Callback(SelectedMultilib))
+ for (const auto &Path : Callback(SelectedMultilibs.back()))
addExternCSystemIncludeIfExists(DriverArgs, CC1Args,
D.getInstalledDir() + Path);
}
@@ -68,11 +68,11 @@ Tool *MipsLLVMToolChain::buildLinker() const {
std::string MipsLLVMToolChain::computeSysRoot() const {
if (!getDriver().SysRoot.empty())
- return getDriver().SysRoot + SelectedMultilib.osSuffix();
+ return getDriver().SysRoot + SelectedMultilibs.back().osSuffix();
const std::string InstalledDir(getDriver().getInstalledDir());
std::string SysRootPath =
- InstalledDir + "/../sysroot" + SelectedMultilib.osSuffix();
+ InstalledDir + "/../sysroot" + SelectedMultilibs.back().osSuffix();
if (llvm::sys::fs::exists(SysRootPath))
return SysRootPath;
@@ -96,7 +96,7 @@ void MipsLLVMToolChain::addLibCxxIncludePaths(
const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const {
if (const auto &Callback = Multilibs.includeDirsCallback()) {
- for (std::string Path : Callback(SelectedMultilib)) {
+ for (std::string Path : Callback(SelectedMultilibs.back())) {
Path = getDriver().getInstalledDir() + Path + "/c++/v1";
if (llvm::sys::fs::exists(Path)) {
addSystemInclude(DriverArgs, CC1Args, Path);
@@ -112,6 +112,8 @@ void MipsLLVMToolChain::AddCXXStdlibLibArgs(const ArgList &Args,
"Only -lc++ (aka libxx) is supported in this toolchain.");
CmdArgs.push_back("-lc++");
+ if (Args.hasArg(options::OPT_fexperimental_library))
+ CmdArgs.push_back("-lc++experimental");
CmdArgs.push_back("-lc++abi");
CmdArgs.push_back("-lunwind");
}
@@ -120,7 +122,7 @@ std::string MipsLLVMToolChain::getCompilerRT(const ArgList &Args,
StringRef Component,
FileType Type) const {
SmallString<128> Path(getDriver().ResourceDir);
- llvm::sys::path::append(Path, SelectedMultilib.osSuffix(), "lib" + LibSuffix,
+ llvm::sys::path::append(Path, SelectedMultilibs.back().osSuffix(), "lib" + LibSuffix,
getOS());
const char *Suffix;
switch (Type) {
@@ -136,5 +138,5 @@ std::string MipsLLVMToolChain::getCompilerRT(const ArgList &Args,
}
llvm::sys::path::append(
Path, Twine("libclang_rt." + Component + "-" + "mips" + Suffix));
- return std::string(Path.str());
+ return std::string(Path);
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/MipsLinux.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/MipsLinux.h
index 31b547c0063c..a968804f2a6e 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/MipsLinux.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/MipsLinux.h
@@ -53,7 +53,6 @@ public:
}
private:
- Multilib SelectedMultilib;
std::string LibSuffix;
};
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Myriad.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Myriad.cpp
deleted file mode 100644
index f31466633104..000000000000
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Myriad.cpp
+++ /dev/null
@@ -1,293 +0,0 @@
-//===--- Myriad.cpp - Myriad ToolChain Implementations ----------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "Myriad.h"
-#include "CommonArgs.h"
-#include "clang/Driver/Compilation.h"
-#include "clang/Driver/Driver.h"
-#include "clang/Driver/DriverDiagnostic.h"
-#include "clang/Driver/Options.h"
-#include "llvm/Option/ArgList.h"
-
-using namespace clang::driver;
-using namespace clang::driver::toolchains;
-using namespace clang;
-using namespace llvm::opt;
-
-using tools::addPathIfExists;
-
-void tools::SHAVE::Compiler::ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output,
- const InputInfoList &Inputs,
- const ArgList &Args,
- const char *LinkingOutput) const {
- ArgStringList CmdArgs;
- assert(Inputs.size() == 1);
- const InputInfo &II = Inputs[0];
- assert(II.getType() == types::TY_C || II.getType() == types::TY_CXX ||
- II.getType() == types::TY_PP_CXX);
-
- if (JA.getKind() == Action::PreprocessJobClass) {
- Args.ClaimAllArgs();
- CmdArgs.push_back("-E");
- } else {
- assert(Output.getType() == types::TY_PP_Asm); // Require preprocessed asm.
- CmdArgs.push_back("-S");
- CmdArgs.push_back("-fno-exceptions"); // Always do this even if unspecified.
- }
- CmdArgs.push_back("-DMYRIAD2");
-
- // Append all -I, -iquote, -isystem paths, defines/undefines, 'f'
- // flags, 'g' flags, 'M' flags, optimize flags, warning options,
- // mcpu flags, mllvm flags, and Xclang flags.
- // These are spelled the same way in clang and moviCompile.
- Args.AddAllArgsExcept(
- CmdArgs,
- {options::OPT_I_Group, options::OPT_clang_i_Group, options::OPT_std_EQ,
- options::OPT_D, options::OPT_U, options::OPT_f_Group,
- options::OPT_f_clang_Group, options::OPT_g_Group, options::OPT_M_Group,
- options::OPT_O_Group, options::OPT_W_Group, options::OPT_mcpu_EQ,
- options::OPT_mllvm, options::OPT_Xclang},
- {options::OPT_fno_split_dwarf_inlining});
- Args.hasArg(options::OPT_fno_split_dwarf_inlining); // Claim it if present.
-
- // If we're producing a dependency file, and assembly is the final action,
- // then the name of the target in the dependency file should be the '.o'
- // file, not the '.s' file produced by this step. For example, instead of
- // /tmp/mumble.s: mumble.c .../someheader.h
- // the filename on the lefthand side should be "mumble.o"
- if (Args.getLastArg(options::OPT_MF) && !Args.getLastArg(options::OPT_MT) &&
- C.getActions().size() == 1 &&
- C.getActions()[0]->getKind() == Action::AssembleJobClass) {
- Arg *A = Args.getLastArg(options::OPT_o);
- if (A) {
- CmdArgs.push_back("-MT");
- CmdArgs.push_back(Args.MakeArgString(A->getValue()));
- }
- }
-
- CmdArgs.push_back(II.getFilename());
- CmdArgs.push_back("-o");
- CmdArgs.push_back(Output.getFilename());
-
- std::string Exec =
- Args.MakeArgString(getToolChain().GetProgramPath("moviCompile"));
- C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
- Args.MakeArgString(Exec), CmdArgs,
- Inputs, Output));
-}
-
-void tools::SHAVE::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output,
- const InputInfoList &Inputs,
- const ArgList &Args,
- const char *LinkingOutput) const {
- ArgStringList CmdArgs;
-
- assert(Inputs.size() == 1);
- const InputInfo &II = Inputs[0];
- assert(II.getType() == types::TY_PP_Asm); // Require preprocessed asm input.
- assert(Output.getType() == types::TY_Object);
-
- CmdArgs.push_back("-no6thSlotCompression");
- const Arg *CPUArg = Args.getLastArg(options::OPT_mcpu_EQ);
- if (CPUArg)
- CmdArgs.push_back(
- Args.MakeArgString("-cv:" + StringRef(CPUArg->getValue())));
- CmdArgs.push_back("-noSPrefixing");
- CmdArgs.push_back("-a"); // Mystery option.
- Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler);
- for (const Arg *A : Args.filtered(options::OPT_I, options::OPT_isystem)) {
- A->claim();
- CmdArgs.push_back(
- Args.MakeArgString(std::string("-i:") + A->getValue(0)));
- }
- CmdArgs.push_back(II.getFilename());
- CmdArgs.push_back(
- Args.MakeArgString(std::string("-o:") + Output.getFilename()));
-
- std::string Exec =
- Args.MakeArgString(getToolChain().GetProgramPath("moviAsm"));
- C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
- Args.MakeArgString(Exec), CmdArgs,
- Inputs, Output));
-}
-
-void tools::Myriad::Linker::ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output,
- const InputInfoList &Inputs,
- const ArgList &Args,
- const char *LinkingOutput) const {
- const auto &TC =
- static_cast<const toolchains::MyriadToolChain &>(getToolChain());
- const llvm::Triple &T = TC.getTriple();
- ArgStringList CmdArgs;
- bool UseStartfiles =
- !Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles);
- bool UseDefaultLibs =
- !Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs);
- // Silence warning if the args contain both -nostdlib and -stdlib=.
- Args.getLastArg(options::OPT_stdlib_EQ);
-
- if (T.getArch() == llvm::Triple::sparc)
- CmdArgs.push_back("-EB");
- else // SHAVE assumes little-endian, and sparcel is expressly so.
- CmdArgs.push_back("-EL");
-
- // The remaining logic is mostly like gnutools::Linker::ConstructJob,
- // but we never pass through a --sysroot option and various other bits.
- // For example, there are no sanitizers (yet) nor gold linker.
-
- // Eat some arguments that may be present but have no effect.
- Args.ClaimAllArgs(options::OPT_g_Group);
- Args.ClaimAllArgs(options::OPT_w);
- Args.ClaimAllArgs(options::OPT_static_libgcc);
-
- if (Args.hasArg(options::OPT_s)) // Pass the 'strip' option.
- CmdArgs.push_back("-s");
-
- CmdArgs.push_back("-o");
- CmdArgs.push_back(Output.getFilename());
-
- if (UseStartfiles) {
- // If you want startfiles, it means you want the builtin crti and crtbegin,
- // but not crt0. Myriad link commands provide their own crt0.o as needed.
- CmdArgs.push_back(Args.MakeArgString(TC.GetFilePath("crti.o")));
- CmdArgs.push_back(Args.MakeArgString(TC.GetFilePath("crtbegin.o")));
- }
-
- Args.AddAllArgs(CmdArgs, {options::OPT_L, options::OPT_T_Group,
- options::OPT_e, options::OPT_s, options::OPT_t,
- options::OPT_Z_Flag, options::OPT_r});
-
- TC.AddFilePathLibArgs(Args, CmdArgs);
-
- bool NeedsSanitizerDeps = addSanitizerRuntimes(TC, Args, CmdArgs);
- AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs, JA);
-
- if (UseDefaultLibs) {
- if (NeedsSanitizerDeps)
- linkSanitizerRuntimeDeps(TC, CmdArgs);
- if (C.getDriver().CCCIsCXX()) {
- if (TC.GetCXXStdlibType(Args) == ToolChain::CST_Libcxx) {
- CmdArgs.push_back("-lc++");
- CmdArgs.push_back("-lc++abi");
- } else
- CmdArgs.push_back("-lstdc++");
- }
- if (T.getOS() == llvm::Triple::RTEMS) {
- CmdArgs.push_back("--start-group");
- CmdArgs.push_back("-lc");
- CmdArgs.push_back("-lgcc"); // circularly dependent on rtems
- // You must provide your own "-L" option to enable finding these.
- CmdArgs.push_back("-lrtemscpu");
- CmdArgs.push_back("-lrtemsbsp");
- CmdArgs.push_back("--end-group");
- } else {
- CmdArgs.push_back("-lc");
- CmdArgs.push_back("-lgcc");
- }
- }
- if (UseStartfiles) {
- CmdArgs.push_back(Args.MakeArgString(TC.GetFilePath("crtend.o")));
- CmdArgs.push_back(Args.MakeArgString(TC.GetFilePath("crtn.o")));
- }
-
- std::string Exec =
- Args.MakeArgString(TC.GetProgramPath("sparc-myriad-rtems-ld"));
- C.addCommand(std::make_unique<Command>(
- JA, *this, ResponseFileSupport::AtFileCurCP(), Args.MakeArgString(Exec),
- CmdArgs, Inputs, Output));
-}
-
-MyriadToolChain::MyriadToolChain(const Driver &D, const llvm::Triple &Triple,
- const ArgList &Args)
- : Generic_ELF(D, Triple, Args) {
- // If a target of 'sparc-myriad-elf' is specified to clang, it wants to use
- // 'sparc-myriad--elf' (note the unknown OS) as the canonical triple.
- // This won't work to find gcc. Instead we give the installation detector an
- // extra triple, which is preferable to further hacks of the logic that at
- // present is based solely on getArch(). In particular, it would be wrong to
- // choose the myriad installation when targeting a non-myriad sparc install.
- switch (Triple.getArch()) {
- default:
- D.Diag(clang::diag::err_target_unsupported_arch)
- << Triple.getArchName() << "myriad";
- LLVM_FALLTHROUGH;
- case llvm::Triple::shave:
- return;
- case llvm::Triple::sparc:
- case llvm::Triple::sparcel:
- GCCInstallation.init(Triple, Args, {"sparc-myriad-rtems"});
- }
-
- if (GCCInstallation.isValid()) {
- // This directory contains crt{i,n,begin,end}.o as well as libgcc.
- // These files are tied to a particular version of gcc.
- SmallString<128> CompilerSupportDir(GCCInstallation.getInstallPath());
- addPathIfExists(D, CompilerSupportDir, getFilePaths());
- }
- // libstd++ and libc++ must both be found in this one place.
- addPathIfExists(D, D.Dir + "/../sparc-myriad-rtems/lib", getFilePaths());
-}
-
-MyriadToolChain::~MyriadToolChain() {}
-
-void MyriadToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
- ArgStringList &CC1Args) const {
- if (!DriverArgs.hasArg(clang::driver::options::OPT_nostdinc))
- addSystemInclude(DriverArgs, CC1Args, getDriver().SysRoot + "/include");
-}
-
-void MyriadToolChain::addLibCxxIncludePaths(
- const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args) const {
- std::string Path(getDriver().getInstalledDir());
- addSystemInclude(DriverArgs, CC1Args, Path + "/../include/c++/v1");
-}
-
-void MyriadToolChain::addLibStdCxxIncludePaths(
- const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args) const {
- StringRef LibDir = GCCInstallation.getParentLibPath();
- const GCCVersion &Version = GCCInstallation.getVersion();
- StringRef TripleStr = GCCInstallation.getTriple().str();
- const Multilib &Multilib = GCCInstallation.getMultilib();
- addLibStdCXXIncludePaths(
- LibDir.str() + "/../" + TripleStr.str() + "/include/c++/" + Version.Text,
- TripleStr, Multilib.includeSuffix(), DriverArgs, CC1Args);
-}
-
-// MyriadToolChain handles several triples:
-// {shave,sparc{,el}}-myriad-{rtems,unknown}-elf
-Tool *MyriadToolChain::SelectTool(const JobAction &JA) const {
- // The inherited method works fine if not targeting the SHAVE.
- if (!isShaveCompilation(getTriple()))
- return ToolChain::SelectTool(JA);
- switch (JA.getKind()) {
- case Action::PreprocessJobClass:
- case Action::CompileJobClass:
- if (!Compiler)
- Compiler.reset(new tools::SHAVE::Compiler(*this));
- return Compiler.get();
- case Action::AssembleJobClass:
- if (!Assembler)
- Assembler.reset(new tools::SHAVE::Assembler(*this));
- return Assembler.get();
- default:
- return ToolChain::getTool(JA.getKind());
- }
-}
-
-Tool *MyriadToolChain::buildLinker() const {
- return new tools::Myriad::Linker(*this);
-}
-
-SanitizerMask MyriadToolChain::getSupportedSanitizers() const {
- return SanitizerKind::Address;
-}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Myriad.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Myriad.h
deleted file mode 100644
index cae574bdcfea..000000000000
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Myriad.h
+++ /dev/null
@@ -1,103 +0,0 @@
-//===--- Myriad.h - Myriad ToolChain Implementations ------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_MYRIAD_H
-#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_MYRIAD_H
-
-#include "Gnu.h"
-#include "clang/Driver/Tool.h"
-#include "clang/Driver/ToolChain.h"
-
-namespace clang {
-namespace driver {
-namespace tools {
-
-/// SHAVE tools -- Directly call moviCompile and moviAsm
-namespace SHAVE {
-class LLVM_LIBRARY_VISIBILITY Compiler : public Tool {
-public:
- Compiler(const ToolChain &TC) : Tool("moviCompile", "movicompile", TC) {}
-
- bool hasIntegratedCPP() const override { return true; }
-
- void ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output, const InputInfoList &Inputs,
- const llvm::opt::ArgList &TCArgs,
- const char *LinkingOutput) const override;
-};
-
-class LLVM_LIBRARY_VISIBILITY Assembler : public Tool {
-public:
- Assembler(const ToolChain &TC) : Tool("moviAsm", "moviAsm", TC) {}
-
- bool hasIntegratedCPP() const override { return false; } // not sure.
-
- void ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output, const InputInfoList &Inputs,
- const llvm::opt::ArgList &TCArgs,
- const char *LinkingOutput) const override;
-};
-} // end namespace SHAVE
-
-/// The Myriad toolchain uses tools that are in two different namespaces.
-/// The Compiler and Assembler as defined above are in the SHAVE namespace,
-/// whereas the linker, which accepts code for a mixture of Sparc and SHAVE,
-/// is in the Myriad namespace.
-namespace Myriad {
-class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
-public:
- Linker(const ToolChain &TC) : Tool("shave::Linker", "ld", TC) {}
- bool hasIntegratedCPP() const override { return false; }
- bool isLinkJob() const override { return true; }
- void ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output, const InputInfoList &Inputs,
- const llvm::opt::ArgList &TCArgs,
- const char *LinkingOutput) const override;
-};
-} // end namespace Myriad
-} // end namespace tools
-
-namespace toolchains {
-
-/// MyriadToolChain - A tool chain using either clang or the external compiler
-/// installed by the Movidius SDK to perform all subcommands.
-class LLVM_LIBRARY_VISIBILITY MyriadToolChain : public Generic_ELF {
-public:
- MyriadToolChain(const Driver &D, const llvm::Triple &Triple,
- const llvm::opt::ArgList &Args);
- ~MyriadToolChain() override;
-
- void
- AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args) const override;
- void addLibCxxIncludePaths(
- const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args) const override;
- void addLibStdCxxIncludePaths(
- const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args) const override;
- Tool *SelectTool(const JobAction &JA) const override;
- unsigned GetDefaultDwarfVersion() const override { return 2; }
- SanitizerMask getSupportedSanitizers() const override;
-
-protected:
- Tool *buildLinker() const override;
- bool isShaveCompilation(const llvm::Triple &T) const {
- return T.getArch() == llvm::Triple::shave;
- }
-
-private:
- mutable std::unique_ptr<Tool> Compiler;
- mutable std::unique_ptr<Tool> Assembler;
-};
-
-} // end namespace toolchains
-} // end namespace driver
-} // end namespace clang
-
-#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_MYRIAD_H
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/NaCl.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/NaCl.cpp
index 753459cb230b..22f038e5152f 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/NaCl.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/NaCl.cpp
@@ -31,8 +31,7 @@ void nacltools::AssemblerARM::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfoList &Inputs,
const ArgList &Args,
const char *LinkingOutput) const {
- const toolchains::NaClToolChain &ToolChain =
- static_cast<const toolchains::NaClToolChain &>(getToolChain());
+ const auto &ToolChain = static_cast<const NaClToolChain &>(getToolChain());
InputInfo NaClMacros(types::TY_PP_Asm, ToolChain.GetNaClArmMacrosPath(),
"nacl-arm-macros.s");
InputInfoList NewInputs;
@@ -52,8 +51,7 @@ void nacltools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const ArgList &Args,
const char *LinkingOutput) const {
- const toolchains::NaClToolChain &ToolChain =
- static_cast<const toolchains::NaClToolChain &>(getToolChain());
+ const auto &ToolChain = static_cast<const NaClToolChain &>(getToolChain());
const Driver &D = ToolChain.getDriver();
const llvm::Triple::ArchType Arch = ToolChain.getArch();
const bool IsStatic =
@@ -120,8 +118,7 @@ void nacltools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtbegin)));
}
- Args.AddAllArgs(CmdArgs, options::OPT_L);
- Args.AddAllArgs(CmdArgs, options::OPT_u);
+ Args.addAllArgs(CmdArgs, {options::OPT_L, options::OPT_u});
ToolChain.AddFilePathLibArgs(Args, CmdArgs);
@@ -308,6 +305,8 @@ void NaClToolChain::AddCXXStdlibLibArgs(const ArgList &Args,
// if the value is libc++, and emits an error for other values.
GetCXXStdlibType(Args);
CmdArgs.push_back("-lc++");
+ if (Args.hasArg(options::OPT_fexperimental_library))
+ CmdArgs.push_back("-lc++experimental");
}
void NaClToolChain::addLibCxxIncludePaths(
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/NaCl.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/NaCl.h
index 5e5fdb583bb6..01d4719e7b92 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/NaCl.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/NaCl.h
@@ -27,7 +27,7 @@ public:
const char *LinkingOutput) const override;
};
-class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
+class LLVM_LIBRARY_VISIBILITY Linker final : public Tool {
public:
Linker(const ToolChain &TC) : Tool("NaCl::Linker", "linker", TC) {}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/NetBSD.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/NetBSD.cpp
index 1ce5a2a203c2..240bf5764b9c 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/NetBSD.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/NetBSD.cpp
@@ -11,6 +11,7 @@
#include "Arch/Mips.h"
#include "Arch/Sparc.h"
#include "CommonArgs.h"
+#include "clang/Config/config.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/Options.h"
@@ -29,12 +30,16 @@ void netbsd::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfoList &Inputs,
const ArgList &Args,
const char *LinkingOutput) const {
- claimNoWarnArgs(Args);
+ const auto &ToolChain = static_cast<const NetBSD &>(getToolChain());
+ const Driver &D = ToolChain.getDriver();
+ const llvm::Triple &Triple = ToolChain.getTriple();
ArgStringList CmdArgs;
+ claimNoWarnArgs(Args);
+
// GNU as needs different flags for creating the correct output format
// on architectures with different ABIs or optional feature sets.
- switch (getToolChain().getArch()) {
+ switch (ToolChain.getArch()) {
case llvm::Triple::x86:
CmdArgs.push_back("--32");
break;
@@ -44,8 +49,7 @@ void netbsd::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
case llvm::Triple::thumbeb: {
StringRef MArch, MCPU;
arm::getARMArchCPUFromArgs(Args, MArch, MCPU, /*FromAs*/ true);
- std::string Arch =
- arm::getARMTargetCPU(MCPU, MArch, getToolChain().getTriple());
+ std::string Arch = arm::getARMTargetCPU(MCPU, MArch, Triple);
CmdArgs.push_back(Args.MakeArgString("-mcpu=" + Arch));
break;
}
@@ -56,7 +60,7 @@ void netbsd::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
case llvm::Triple::mips64el: {
StringRef CPUName;
StringRef ABIName;
- mips::getMipsCPUAndABI(Args, getToolChain().getTriple(), CPUName, ABIName);
+ mips::getMipsCPUAndABI(Args, Triple, CPUName, ABIName);
CmdArgs.push_back("-march");
CmdArgs.push_back(CPUName.data());
@@ -64,29 +68,28 @@ void netbsd::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-mabi");
CmdArgs.push_back(mips::getGnuCompatibleMipsABIName(ABIName).data());
- if (getToolChain().getTriple().isLittleEndian())
+ if (Triple.isLittleEndian())
CmdArgs.push_back("-EL");
else
CmdArgs.push_back("-EB");
- AddAssemblerKPIC(getToolChain(), Args, CmdArgs);
+ AddAssemblerKPIC(ToolChain, Args, CmdArgs);
break;
}
- case llvm::Triple::sparc:
- case llvm::Triple::sparcel: {
+ case llvm::Triple::sparc: {
CmdArgs.push_back("-32");
- std::string CPU = getCPUName(Args, getToolChain().getTriple());
- CmdArgs.push_back(sparc::getSparcAsmModeForCPU(CPU, getToolChain().getTriple()));
- AddAssemblerKPIC(getToolChain(), Args, CmdArgs);
+ std::string CPU = getCPUName(D, Args, Triple);
+ CmdArgs.push_back(sparc::getSparcAsmModeForCPU(CPU, Triple));
+ AddAssemblerKPIC(ToolChain, Args, CmdArgs);
break;
}
case llvm::Triple::sparcv9: {
CmdArgs.push_back("-64");
- std::string CPU = getCPUName(Args, getToolChain().getTriple());
- CmdArgs.push_back(sparc::getSparcAsmModeForCPU(CPU, getToolChain().getTriple()));
- AddAssemblerKPIC(getToolChain(), Args, CmdArgs);
+ std::string CPU = getCPUName(D, Args, Triple);
+ CmdArgs.push_back(sparc::getSparcAsmModeForCPU(CPU, Triple));
+ AddAssemblerKPIC(ToolChain, Args, CmdArgs);
break;
}
@@ -102,7 +105,7 @@ void netbsd::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
for (const auto &II : Inputs)
CmdArgs.push_back(II.getFilename());
- const char *Exec = Args.MakeArgString((getToolChain().GetProgramPath("as")));
+ const char *Exec = Args.MakeArgString((ToolChain.GetProgramPath("as")));
C.addCommand(std::make_unique<Command>(JA, *this,
ResponseFileSupport::AtFileCurCP(),
Exec, CmdArgs, Inputs, Output));
@@ -113,27 +116,31 @@ void netbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfoList &Inputs,
const ArgList &Args,
const char *LinkingOutput) const {
- const toolchains::NetBSD &ToolChain =
- static_cast<const toolchains::NetBSD &>(getToolChain());
+ const auto &ToolChain = static_cast<const NetBSD &>(getToolChain());
const Driver &D = ToolChain.getDriver();
+ const llvm::Triple &Triple = ToolChain.getTriple();
+ const llvm::Triple::ArchType Arch = ToolChain.getArch();
+ const bool Static = Args.hasArg(options::OPT_static);
+ const bool Shared = Args.hasArg(options::OPT_shared);
+ const bool Pie = Args.hasArg(options::OPT_pie);
ArgStringList CmdArgs;
if (!D.SysRoot.empty())
CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot));
CmdArgs.push_back("--eh-frame-hdr");
- if (Args.hasArg(options::OPT_static)) {
+ if (Static) {
CmdArgs.push_back("-Bstatic");
- if (Args.hasArg(options::OPT_pie)) {
+ if (Pie) {
Args.AddAllArgs(CmdArgs, options::OPT_pie);
CmdArgs.push_back("--no-dynamic-linker");
}
} else {
if (Args.hasArg(options::OPT_rdynamic))
CmdArgs.push_back("-export-dynamic");
- if (Args.hasArg(options::OPT_shared)) {
- CmdArgs.push_back("-Bshareable");
- } else {
+ if (Shared) {
+ CmdArgs.push_back("-shared");
+ } else if (!Args.hasArg(options::OPT_r)) {
Args.AddAllArgs(CmdArgs, options::OPT_pie);
CmdArgs.push_back("-dynamic-linker");
CmdArgs.push_back("/libexec/ld.elf_so");
@@ -142,7 +149,7 @@ void netbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
// Many NetBSD architectures support more than one ABI.
// Determine the correct emulation for ld.
- switch (ToolChain.getArch()) {
+ switch (Arch) {
case llvm::Triple::x86:
CmdArgs.push_back("-m");
CmdArgs.push_back("elf_i386");
@@ -150,7 +157,7 @@ void netbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
case llvm::Triple::arm:
case llvm::Triple::thumb:
CmdArgs.push_back("-m");
- switch (ToolChain.getTriple().getEnvironment()) {
+ switch (Triple.getEnvironment()) {
case llvm::Triple::EABI:
case llvm::Triple::GNUEABI:
CmdArgs.push_back("armelf_nbsd_eabi");
@@ -168,7 +175,7 @@ void netbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
case llvm::Triple::thumbeb:
arm::appendBE8LinkFlag(Args, CmdArgs, ToolChain.getEffectiveTriple());
CmdArgs.push_back("-m");
- switch (ToolChain.getTriple().getEnvironment()) {
+ switch (Triple.getEnvironment()) {
case llvm::Triple::EABI:
case llvm::Triple::GNUEABI:
CmdArgs.push_back("armelfb_nbsd_eabi");
@@ -186,13 +193,13 @@ void netbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
case llvm::Triple::mips64el:
if (mips::hasMipsAbiArg(Args, "32")) {
CmdArgs.push_back("-m");
- if (ToolChain.getArch() == llvm::Triple::mips64)
+ if (Arch == llvm::Triple::mips64)
CmdArgs.push_back("elf32btsmip");
else
CmdArgs.push_back("elf32ltsmip");
} else if (mips::hasMipsAbiArg(Args, "64")) {
CmdArgs.push_back("-m");
- if (ToolChain.getArch() == llvm::Triple::mips64)
+ if (Arch == llvm::Triple::mips64)
CmdArgs.push_back("elf64btsmip");
else
CmdArgs.push_back("elf64ltsmip");
@@ -209,6 +216,16 @@ void netbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("elf64ppc");
break;
+ case llvm::Triple::riscv32:
+ CmdArgs.push_back("-m");
+ CmdArgs.push_back("elf32lriscv");
+ break;
+
+ case llvm::Triple::riscv64:
+ CmdArgs.push_back("-m");
+ CmdArgs.push_back("elf64lriscv");
+ break;
+
case llvm::Triple::sparc:
CmdArgs.push_back("-m");
CmdArgs.push_back("elf32_sparc");
@@ -223,94 +240,105 @@ void netbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
break;
}
+ if (Triple.isRISCV())
+ CmdArgs.push_back("-X");
+
+ assert((Output.isFilename() || Output.isNothing()) && "Invalid output.");
if (Output.isFilename()) {
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
- } else {
- assert(Output.isNothing() && "Invalid output.");
}
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
- if (!Args.hasArg(options::OPT_shared)) {
- CmdArgs.push_back(
- Args.MakeArgString(ToolChain.GetFilePath("crt0.o")));
- }
- CmdArgs.push_back(
- Args.MakeArgString(ToolChain.GetFilePath("crti.o")));
- if (Args.hasArg(options::OPT_shared) || Args.hasArg(options::OPT_pie)) {
- CmdArgs.push_back(
- Args.MakeArgString(ToolChain.GetFilePath("crtbeginS.o")));
- } else {
- CmdArgs.push_back(
- Args.MakeArgString(ToolChain.GetFilePath("crtbegin.o")));
- }
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles,
+ options::OPT_r)) {
+ const char *crt0 = nullptr;
+ const char *crtbegin = nullptr;
+ if (!Shared)
+ crt0 = "crt0.o";
+
+ if (Shared || Pie)
+ crtbegin = "crtbeginS.o";
+ else
+ crtbegin = "crtbegin.o";
+
+ if (crt0)
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crt0)));
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crti.o")));
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtbegin)));
}
- Args.AddAllArgs(CmdArgs, options::OPT_L);
- Args.AddAllArgs(CmdArgs, options::OPT_T_Group);
- Args.AddAllArgs(CmdArgs, options::OPT_e);
- Args.AddAllArgs(CmdArgs, options::OPT_s);
- Args.AddAllArgs(CmdArgs, options::OPT_t);
- Args.AddAllArgs(CmdArgs, options::OPT_Z_Flag);
- Args.AddAllArgs(CmdArgs, options::OPT_r);
+ Args.addAllArgs(CmdArgs, {options::OPT_L, options::OPT_T_Group,
+ options::OPT_s, options::OPT_t, options::OPT_r});
+ ToolChain.AddFilePathLibArgs(Args, CmdArgs);
- bool NeedsSanitizerDeps = addSanitizerRuntimes(getToolChain(), Args, CmdArgs);
+ bool NeedsSanitizerDeps = addSanitizerRuntimes(ToolChain, Args, CmdArgs);
bool NeedsXRayDeps = addXRayRuntime(ToolChain, Args, CmdArgs);
- AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs, JA);
+ AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
- const SanitizerArgs &SanArgs = ToolChain.getSanitizerArgs();
+ const SanitizerArgs &SanArgs = ToolChain.getSanitizerArgs(Args);
if (SanArgs.needsSharedRt()) {
CmdArgs.push_back("-rpath");
- CmdArgs.push_back(Args.MakeArgString(
- ToolChain.getCompilerRTPath().c_str()));
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.getCompilerRTPath()));
}
- unsigned Major, Minor, Micro;
- ToolChain.getTriple().getOSVersion(Major, Minor, Micro);
bool useLibgcc = true;
- if (Major >= 7 || Major == 0) {
- switch (ToolChain.getArch()) {
- case llvm::Triple::aarch64:
- case llvm::Triple::aarch64_be:
- case llvm::Triple::arm:
- case llvm::Triple::armeb:
- case llvm::Triple::thumb:
- case llvm::Triple::thumbeb:
- case llvm::Triple::ppc:
- case llvm::Triple::ppc64:
- case llvm::Triple::ppc64le:
- case llvm::Triple::sparc:
- case llvm::Triple::sparcv9:
- case llvm::Triple::x86:
- case llvm::Triple::x86_64:
- useLibgcc = false;
- break;
- default:
- break;
- }
+ switch (ToolChain.getArch()) {
+ case llvm::Triple::aarch64:
+ case llvm::Triple::aarch64_be:
+ case llvm::Triple::arm:
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumb:
+ case llvm::Triple::thumbeb:
+ case llvm::Triple::ppc:
+ case llvm::Triple::ppc64:
+ case llvm::Triple::ppc64le:
+ case llvm::Triple::riscv32:
+ case llvm::Triple::riscv64:
+ case llvm::Triple::sparc:
+ case llvm::Triple::sparcv9:
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ useLibgcc = false;
+ break;
+ default:
+ break;
}
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs,
+ options::OPT_r)) {
// Use the static OpenMP runtime with -static-openmp
- bool StaticOpenMP = Args.hasArg(options::OPT_static_openmp) &&
- !Args.hasArg(options::OPT_static);
- addOpenMPRuntime(CmdArgs, getToolChain(), Args, StaticOpenMP);
+ bool StaticOpenMP = Args.hasArg(options::OPT_static_openmp) && !Static;
+ addOpenMPRuntime(CmdArgs, ToolChain, Args, StaticOpenMP);
if (D.CCCIsCXX()) {
if (ToolChain.ShouldLinkCXXStdlib(Args))
ToolChain.AddCXXStdlibLibArgs(Args, CmdArgs);
CmdArgs.push_back("-lm");
}
+
+ // Silence warnings when linking C code with a C++ '-stdlib' argument.
+ Args.ClaimAllArgs(options::OPT_stdlib_EQ);
+
+ // Additional linker set-up and flags for Fortran. This is required in order
+ // to generate executables. As Fortran runtime depends on the C runtime,
+ // these dependencies need to be listed before the C runtime below (i.e.
+ // AddRunTimeLibs).
+ if (D.IsFlangMode()) {
+ addFortranRuntimeLibraryPath(ToolChain, Args, CmdArgs);
+ addFortranRuntimeLibs(ToolChain, Args, CmdArgs);
+ CmdArgs.push_back("-lm");
+ }
+
if (NeedsSanitizerDeps)
- linkSanitizerRuntimeDeps(getToolChain(), CmdArgs);
+ linkSanitizerRuntimeDeps(ToolChain, Args, CmdArgs);
if (NeedsXRayDeps)
- linkXRayRuntimeDeps(ToolChain, CmdArgs);
+ linkXRayRuntimeDeps(ToolChain, Args, CmdArgs);
if (Args.hasArg(options::OPT_pthread))
CmdArgs.push_back("-lpthread");
CmdArgs.push_back("-lc");
if (useLibgcc) {
- if (Args.hasArg(options::OPT_static)) {
+ if (Static) {
// libgcc_eh depends on libc, so resolve as much as possible,
// pull in any new requirements from libc and then get the rest
// of libgcc.
@@ -326,13 +354,15 @@ void netbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
}
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
- if (Args.hasArg(options::OPT_shared) || Args.hasArg(options::OPT_pie))
- CmdArgs.push_back(
- Args.MakeArgString(ToolChain.GetFilePath("crtendS.o")));
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles,
+ options::OPT_r)) {
+ const char *crtend = nullptr;
+ if (Shared || Pie)
+ crtend = "crtendS.o";
else
- CmdArgs.push_back(
- Args.MakeArgString(ToolChain.GetFilePath("crtend.o")));
+ crtend = "crtend.o";
+
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtend)));
CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtn.o")));
}
@@ -356,7 +386,7 @@ NetBSD::NetBSD(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
// what all logic is needed to emulate the '=' prefix here.
switch (Triple.getArch()) {
case llvm::Triple::x86:
- getFilePaths().push_back("=/usr/lib/i386");
+ getFilePaths().push_back(concat(getDriver().SysRoot, "/usr/lib/i386"));
break;
case llvm::Triple::arm:
case llvm::Triple::armeb:
@@ -365,35 +395,35 @@ NetBSD::NetBSD(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
switch (Triple.getEnvironment()) {
case llvm::Triple::EABI:
case llvm::Triple::GNUEABI:
- getFilePaths().push_back("=/usr/lib/eabi");
+ getFilePaths().push_back(concat(getDriver().SysRoot, "/usr/lib/eabi"));
break;
case llvm::Triple::EABIHF:
case llvm::Triple::GNUEABIHF:
- getFilePaths().push_back("=/usr/lib/eabihf");
+ getFilePaths().push_back(concat(getDriver().SysRoot, "/usr/lib/eabihf"));
break;
default:
- getFilePaths().push_back("=/usr/lib/oabi");
+ getFilePaths().push_back(concat(getDriver().SysRoot, "/usr/lib/oabi"));
break;
}
break;
case llvm::Triple::mips64:
case llvm::Triple::mips64el:
if (tools::mips::hasMipsAbiArg(Args, "o32"))
- getFilePaths().push_back("=/usr/lib/o32");
+ getFilePaths().push_back(concat(getDriver().SysRoot, "/usr/lib/o32"));
else if (tools::mips::hasMipsAbiArg(Args, "64"))
- getFilePaths().push_back("=/usr/lib/64");
+ getFilePaths().push_back(concat(getDriver().SysRoot, "/usr/lib/64"));
break;
case llvm::Triple::ppc:
- getFilePaths().push_back("=/usr/lib/powerpc");
+ getFilePaths().push_back(concat(getDriver().SysRoot, "/usr/lib/powerpc"));
break;
case llvm::Triple::sparc:
- getFilePaths().push_back("=/usr/lib/sparc");
+ getFilePaths().push_back(concat(getDriver().SysRoot, "/usr/lib/sparc"));
break;
default:
break;
}
- getFilePaths().push_back("=/usr/lib");
+ getFilePaths().push_back(concat(getDriver().SysRoot, "/usr/lib"));
}
}
@@ -404,40 +434,72 @@ Tool *NetBSD::buildAssembler() const {
Tool *NetBSD::buildLinker() const { return new tools::netbsd::Linker(*this); }
ToolChain::CXXStdlibType NetBSD::GetDefaultCXXStdlibType() const {
- unsigned Major, Minor, Micro;
- getTriple().getOSVersion(Major, Minor, Micro);
- if (Major >= 7 || Major == 0) {
- switch (getArch()) {
- case llvm::Triple::aarch64:
- case llvm::Triple::aarch64_be:
- case llvm::Triple::arm:
- case llvm::Triple::armeb:
- case llvm::Triple::thumb:
- case llvm::Triple::thumbeb:
- case llvm::Triple::ppc:
- case llvm::Triple::ppc64:
- case llvm::Triple::ppc64le:
- case llvm::Triple::sparc:
- case llvm::Triple::sparcv9:
- case llvm::Triple::x86:
- case llvm::Triple::x86_64:
- return ToolChain::CST_Libcxx;
- default:
- break;
- }
+ switch (getArch()) {
+ case llvm::Triple::aarch64:
+ case llvm::Triple::aarch64_be:
+ case llvm::Triple::arm:
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumb:
+ case llvm::Triple::thumbeb:
+ case llvm::Triple::ppc:
+ case llvm::Triple::ppc64:
+ case llvm::Triple::ppc64le:
+ case llvm::Triple::riscv32:
+ case llvm::Triple::riscv64:
+ case llvm::Triple::sparc:
+ case llvm::Triple::sparcv9:
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ return ToolChain::CST_Libcxx;
+ default:
+ break;
}
return ToolChain::CST_Libstdcxx;
}
+void NetBSD::AddClangSystemIncludeArgs(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const {
+ const Driver &D = getDriver();
+
+ if (DriverArgs.hasArg(clang::driver::options::OPT_nostdinc))
+ return;
+
+ if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) {
+ SmallString<128> Dir(D.ResourceDir);
+ llvm::sys::path::append(Dir, "include");
+ addSystemInclude(DriverArgs, CC1Args, Dir.str());
+ }
+
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc))
+ return;
+
+ // Check for configure-time C include directories.
+ StringRef CIncludeDirs(C_INCLUDE_DIRS);
+ if (CIncludeDirs != "") {
+ SmallVector<StringRef, 5> dirs;
+ CIncludeDirs.split(dirs, ":");
+ for (StringRef dir : dirs) {
+ StringRef Prefix =
+ llvm::sys::path::is_absolute(dir) ? StringRef(D.SysRoot) : "";
+ addExternCSystemInclude(DriverArgs, CC1Args, Prefix + dir);
+ }
+ return;
+ }
+
+ addExternCSystemInclude(DriverArgs, CC1Args,
+ concat(D.SysRoot, "/usr/include"));
+}
+
void NetBSD::addLibCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const {
const std::string Candidates[] = {
// directory relative to build tree
- getDriver().Dir + "/../include/c++/v1",
+ concat(getDriver().Dir, "/../include/c++/v1"),
// system install with full upstream path
- getDriver().SysRoot + "/usr/include/c++/v1",
+ concat(getDriver().SysRoot, "/usr/include/c++/v1"),
// system install from src
- getDriver().SysRoot + "/usr/include/c++",
+ concat(getDriver().SysRoot, "/usr/include/c++"),
};
for (const auto &IncludePath : Candidates) {
@@ -452,7 +514,7 @@ void NetBSD::addLibCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
void NetBSD::addLibStdCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const {
- addLibStdCXXIncludePaths(getDriver().SysRoot + "/usr/include/g++", "", "",
+ addLibStdCXXIncludePaths(concat(getDriver().SysRoot, "/usr/include/g++"), "", "",
DriverArgs, CC1Args);
}
@@ -473,7 +535,6 @@ SanitizerMask NetBSD::getSupportedSanitizers() const {
Res |= SanitizerKind::Address;
Res |= SanitizerKind::PointerCompare;
Res |= SanitizerKind::PointerSubtract;
- Res |= SanitizerKind::Function;
Res |= SanitizerKind::Leak;
Res |= SanitizerKind::SafeStack;
Res |= SanitizerKind::Scudo;
@@ -496,18 +557,19 @@ SanitizerMask NetBSD::getSupportedSanitizers() const {
void NetBSD::addClangTargetOptions(const ArgList &DriverArgs,
ArgStringList &CC1Args,
Action::OffloadKind) const {
- const SanitizerArgs &SanArgs = getSanitizerArgs();
+ const SanitizerArgs &SanArgs = getSanitizerArgs(DriverArgs);
if (SanArgs.hasAnySanitizer())
CC1Args.push_back("-D_REENTRANT");
- unsigned Major, Minor, Micro;
- getTriple().getOSVersion(Major, Minor, Micro);
+ VersionTuple OsVersion = getTriple().getOSVersion();
bool UseInitArrayDefault =
- Major >= 9 || Major == 0 ||
- getTriple().getArch() == llvm::Triple::aarch64 ||
- getTriple().getArch() == llvm::Triple::aarch64_be ||
- getTriple().getArch() == llvm::Triple::arm ||
- getTriple().getArch() == llvm::Triple::armeb;
+ OsVersion >= VersionTuple(9) || OsVersion.getMajor() == 0 ||
+ getTriple().getArch() == llvm::Triple::aarch64 ||
+ getTriple().getArch() == llvm::Triple::aarch64_be ||
+ getTriple().getArch() == llvm::Triple::arm ||
+ getTriple().getArch() == llvm::Triple::armeb ||
+ getTriple().getArch() == llvm::Triple::riscv32 ||
+ getTriple().getArch() == llvm::Triple::riscv64;
if (!DriverArgs.hasFlag(options::OPT_fuse_init_array,
options::OPT_fno_use_init_array, UseInitArrayDefault))
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/NetBSD.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/NetBSD.h
index 8348554fd149..96303acaa009 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/NetBSD.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/NetBSD.h
@@ -17,9 +17,9 @@ namespace clang {
namespace driver {
namespace tools {
-/// netbsd -- Directly call GNU Binutils assembler and linker
+/// Directly call GNU Binutils assembler and linker
namespace netbsd {
-class LLVM_LIBRARY_VISIBILITY Assembler : public Tool {
+class LLVM_LIBRARY_VISIBILITY Assembler final : public Tool {
public:
Assembler(const ToolChain &TC) : Tool("netbsd::Assembler", "assembler", TC) {}
@@ -31,7 +31,7 @@ public:
const char *LinkingOutput) const override;
};
-class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
+class LLVM_LIBRARY_VISIBILITY Linker final : public Tool {
public:
Linker(const ToolChain &TC) : Tool("netbsd::Linker", "linker", TC) {}
@@ -58,6 +58,9 @@ public:
CXXStdlibType GetDefaultCXXStdlibType() const override;
+ void
+ AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
void addLibCxxIncludePaths(
const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
@@ -65,8 +68,9 @@ public:
const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
- bool IsUnwindTablesDefault(const llvm::opt::ArgList &Args) const override {
- return true;
+ UnwindTableLevel
+ getDefaultUnwindTableLevel(const llvm::opt::ArgList &Args) const override {
+ return UnwindTableLevel::Asynchronous;
}
llvm::ExceptionHandling GetExceptionModel(
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/OHOS.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/OHOS.cpp
new file mode 100644
index 000000000000..1e50c9d71d59
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/OHOS.cpp
@@ -0,0 +1,419 @@
+//===--- OHOS.cpp - OHOS ToolChain Implementations --------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "OHOS.h"
+#include "Arch/ARM.h"
+#include "CommonArgs.h"
+#include "clang/Config/config.h"
+#include "clang/Driver/Compilation.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/Options.h"
+#include "clang/Driver/SanitizerArgs.h"
+#include "llvm/Option/ArgList.h"
+#include "llvm/ProfileData/InstrProf.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/VirtualFileSystem.h"
+#include "llvm/Support/ScopedPrinter.h"
+
+using namespace clang::driver;
+using namespace clang::driver::toolchains;
+using namespace clang::driver::tools;
+using namespace clang;
+using namespace llvm::opt;
+using namespace clang::driver::tools::arm;
+
+using tools::addMultilibFlag;
+using tools::addPathIfExists;
+
+static bool findOHOSMuslMultilibs(const Multilib::flags_list &Flags,
+ DetectedMultilibs &Result) {
+ MultilibSet Multilibs;
+ Multilibs.push_back(Multilib());
+ // -mcpu=cortex-a7
+ // -mfloat-abi=soft -mfloat-abi=softfp -mfloat-abi=hard
+ // -mfpu=neon-vfpv4
+ Multilibs.push_back(
+ Multilib("/a7_soft", {}, {}, {"-mcpu=cortex-a7", "-mfloat-abi=soft"}));
+
+ Multilibs.push_back(
+ Multilib("/a7_softfp_neon-vfpv4", {}, {},
+ {"-mcpu=cortex-a7", "-mfloat-abi=softfp", "-mfpu=neon-vfpv4"}));
+
+ Multilibs.push_back(
+ Multilib("/a7_hard_neon-vfpv4", {}, {},
+ {"-mcpu=cortex-a7", "-mfloat-abi=hard", "-mfpu=neon-vfpv4"}));
+
+ if (Multilibs.select(Flags, Result.SelectedMultilibs)) {
+ Result.Multilibs = Multilibs;
+ return true;
+ }
+ return false;
+}
+
+static bool findOHOSMultilibs(const Driver &D,
+ const ToolChain &TC,
+ const llvm::Triple &TargetTriple,
+ StringRef Path, const ArgList &Args,
+ DetectedMultilibs &Result) {
+ Multilib::flags_list Flags;
+ bool IsA7 = false;
+ if (const Arg *A = Args.getLastArg(options::OPT_mcpu_EQ))
+ IsA7 = A->getValue() == StringRef("cortex-a7");
+ addMultilibFlag(IsA7, "-mcpu=cortex-a7", Flags);
+
+ bool IsMFPU = false;
+ if (const Arg *A = Args.getLastArg(options::OPT_mfpu_EQ))
+ IsMFPU = A->getValue() == StringRef("neon-vfpv4");
+ addMultilibFlag(IsMFPU, "-mfpu=neon-vfpv4", Flags);
+
+ tools::arm::FloatABI ARMFloatABI = getARMFloatABI(D, TargetTriple, Args);
+ addMultilibFlag((ARMFloatABI == tools::arm::FloatABI::Soft),
+ "-mfloat-abi=soft", Flags);
+ addMultilibFlag((ARMFloatABI == tools::arm::FloatABI::SoftFP),
+ "-mfloat-abi=softfp", Flags);
+ addMultilibFlag((ARMFloatABI == tools::arm::FloatABI::Hard),
+ "-mfloat-abi=hard", Flags);
+
+ return findOHOSMuslMultilibs(Flags, Result);
+}
+
+std::string OHOS::getMultiarchTriple(const llvm::Triple &T) const {
+ // For most architectures, just use whatever we have rather than trying to be
+ // clever.
+ switch (T.getArch()) {
+ default:
+ break;
+
+ // We use the existence of '/lib/<triple>' as a directory to detect some
+ // common linux triples that don't quite match the Clang triple for both
+ // 32-bit and 64-bit targets. Multiarch fixes its install triples to these
+ // regardless of what the actual target triple is.
+ case llvm::Triple::arm:
+ case llvm::Triple::thumb:
+ return T.isOSLiteOS() ? "arm-liteos-ohos" : "arm-linux-ohos";
+ case llvm::Triple::riscv32:
+ return "riscv32-linux-ohos";
+ case llvm::Triple::riscv64:
+ return "riscv64-linux-ohos";
+ case llvm::Triple::mipsel:
+ return "mipsel-linux-ohos";
+ case llvm::Triple::x86:
+ return "i686-linux-ohos";
+ case llvm::Triple::x86_64:
+ return "x86_64-linux-ohos";
+ case llvm::Triple::aarch64:
+ return "aarch64-linux-ohos";
+ }
+ return T.str();
+}
+
+std::string OHOS::getMultiarchTriple(const Driver &D,
+ const llvm::Triple &TargetTriple,
+ StringRef SysRoot) const {
+ return getMultiarchTriple(TargetTriple);
+}
+
+static std::string makePath(const std::initializer_list<std::string> &IL) {
+ SmallString<128> P;
+ for (const auto &S : IL)
+ llvm::sys::path::append(P, S);
+ return static_cast<std::string>(P.str());
+}
+
+/// OHOS Toolchain
+OHOS::OHOS(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
+ : Generic_ELF(D, Triple, Args) {
+ std::string SysRoot = computeSysRoot();
+
+ // Select the correct multilib according to the given arguments.
+ DetectedMultilibs Result;
+ findOHOSMultilibs(D, *this, Triple, "", Args, Result);
+ Multilibs = Result.Multilibs;
+ SelectedMultilibs = Result.SelectedMultilibs;
+ if (!SelectedMultilibs.empty()) {
+ SelectedMultilib = SelectedMultilibs.back();
+ }
+
+ getFilePaths().clear();
+ for (const auto &CandidateLibPath : getArchSpecificLibPaths())
+ if (getVFS().exists(CandidateLibPath))
+ getFilePaths().push_back(CandidateLibPath);
+
+ getLibraryPaths().clear();
+ for (auto &Path : getRuntimePaths())
+ if (getVFS().exists(Path))
+ getLibraryPaths().push_back(Path);
+
+ // OHOS sysroots contain a library directory for each supported OS
+ // version as well as some unversioned libraries in the usual multiarch
+ // directory. Support --target=aarch64-linux-ohosX.Y.Z or
+ // --target=aarch64-linux-ohosX.Y or --target=aarch64-linux-ohosX
+ path_list &Paths = getFilePaths();
+ std::string SysRootLibPath = makePath({SysRoot, "usr", "lib"});
+ std::string MultiarchTriple = getMultiarchTriple(getTriple());
+ addPathIfExists(D, makePath({SysRootLibPath, SelectedMultilib.gccSuffix()}),
+ Paths);
+ addPathIfExists(D,
+ makePath({D.Dir, "..", "lib", MultiarchTriple,
+ SelectedMultilib.gccSuffix()}),
+ Paths);
+
+ addPathIfExists(
+ D,
+ makePath({SysRootLibPath, MultiarchTriple, SelectedMultilib.gccSuffix()}),
+ Paths);
+}
+
+ToolChain::RuntimeLibType OHOS::GetRuntimeLibType(
+ const ArgList &Args) const {
+ if (Arg *A = Args.getLastArg(clang::driver::options::OPT_rtlib_EQ)) {
+ StringRef Value = A->getValue();
+ if (Value != "compiler-rt")
+ getDriver().Diag(clang::diag::err_drv_invalid_rtlib_name)
+ << A->getAsString(Args);
+ }
+
+ return ToolChain::RLT_CompilerRT;
+}
+
+ToolChain::CXXStdlibType
+OHOS::GetCXXStdlibType(const ArgList &Args) const {
+ if (Arg *A = Args.getLastArg(options::OPT_stdlib_EQ)) {
+ StringRef Value = A->getValue();
+ if (Value != "libc++")
+ getDriver().Diag(diag::err_drv_invalid_stdlib_name)
+ << A->getAsString(Args);
+ }
+
+ return ToolChain::CST_Libcxx;
+}
+
+void OHOS::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ const Driver &D = getDriver();
+ const llvm::Triple &Triple = getTriple();
+ std::string SysRoot = computeSysRoot();
+
+ if (DriverArgs.hasArg(options::OPT_nostdinc))
+ return;
+
+ if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) {
+ SmallString<128> P(D.ResourceDir);
+ llvm::sys::path::append(P, "include");
+ addSystemInclude(DriverArgs, CC1Args, P);
+ }
+
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc))
+ return;
+
+ // Check for configure-time C include directories.
+ StringRef CIncludeDirs(C_INCLUDE_DIRS);
+ if (CIncludeDirs != "") {
+ SmallVector<StringRef, 5> dirs;
+ CIncludeDirs.split(dirs, ":");
+ for (StringRef dir : dirs) {
+ StringRef Prefix =
+ llvm::sys::path::is_absolute(dir) ? StringRef(SysRoot) : "";
+ addExternCSystemInclude(DriverArgs, CC1Args, Prefix + dir);
+ }
+ return;
+ }
+
+ addExternCSystemInclude(DriverArgs, CC1Args,
+ SysRoot + "/usr/include/" +
+ getMultiarchTriple(Triple));
+ addExternCSystemInclude(DriverArgs, CC1Args, SysRoot + "/include");
+ addExternCSystemInclude(DriverArgs, CC1Args, SysRoot + "/usr/include");
+}
+
+void OHOS::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc) ||
+ DriverArgs.hasArg(options::OPT_nostdincxx))
+ return;
+
+ switch (GetCXXStdlibType(DriverArgs)) {
+ case ToolChain::CST_Libcxx: {
+ std::string IncPath = makePath({getDriver().Dir, "..", "include"});
+ std::string IncTargetPath =
+ makePath({IncPath, getMultiarchTriple(getTriple()), "c++", "v1"});
+ if (getVFS().exists(IncTargetPath)) {
+ addSystemInclude(DriverArgs, CC1Args, makePath({IncPath, "c++", "v1"}));
+ addSystemInclude(DriverArgs, CC1Args, IncTargetPath);
+ }
+ break;
+ }
+
+ default:
+ llvm_unreachable("invalid stdlib name");
+ }
+}
+
+void OHOS::AddCXXStdlibLibArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ switch (GetCXXStdlibType(Args)) {
+ case ToolChain::CST_Libcxx:
+ CmdArgs.push_back("-lc++");
+ CmdArgs.push_back("-lc++abi");
+ CmdArgs.push_back("-lunwind");
+ break;
+
+ case ToolChain::CST_Libstdcxx:
+ llvm_unreachable("invalid stdlib name");
+ }
+}
+
+std::string OHOS::computeSysRoot() const {
+ std::string SysRoot =
+ !getDriver().SysRoot.empty()
+ ? getDriver().SysRoot
+ : makePath({getDriver().getInstalledDir(), "..", "..", "sysroot"});
+ if (!llvm::sys::fs::exists(SysRoot))
+ return std::string();
+
+ std::string ArchRoot = makePath({SysRoot, getMultiarchTriple(getTriple())});
+ return llvm::sys::fs::exists(ArchRoot) ? ArchRoot : SysRoot;
+}
+
+ToolChain::path_list OHOS::getRuntimePaths() const {
+ SmallString<128> P;
+ path_list Paths;
+ const Driver &D = getDriver();
+ const llvm::Triple &Triple = getTriple();
+
+ // First try the triple passed to driver as --target=<triple>.
+ P.assign(D.ResourceDir);
+ llvm::sys::path::append(P, "lib", D.getTargetTriple(), SelectedMultilib.gccSuffix());
+ Paths.push_back(P.c_str());
+
+ // Second try the normalized triple.
+ P.assign(D.ResourceDir);
+ llvm::sys::path::append(P, "lib", Triple.str(), SelectedMultilib.gccSuffix());
+ Paths.push_back(P.c_str());
+
+ // Third try the effective triple.
+ P.assign(D.ResourceDir);
+ std::string SysRoot = computeSysRoot();
+ llvm::sys::path::append(P, "lib", getMultiarchTriple(Triple),
+ SelectedMultilib.gccSuffix());
+ Paths.push_back(P.c_str());
+
+ return Paths;
+}
+
+std::string OHOS::getDynamicLinker(const ArgList &Args) const {
+ const llvm::Triple &Triple = getTriple();
+ const llvm::Triple::ArchType Arch = getArch();
+
+ assert(Triple.isMusl());
+ std::string ArchName;
+ bool IsArm = false;
+
+ switch (Arch) {
+ case llvm::Triple::arm:
+ case llvm::Triple::thumb:
+ ArchName = "arm";
+ IsArm = true;
+ break;
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumbeb:
+ ArchName = "armeb";
+ IsArm = true;
+ break;
+ default:
+ ArchName = Triple.getArchName().str();
+ }
+ if (IsArm &&
+ (tools::arm::getARMFloatABI(*this, Args) == tools::arm::FloatABI::Hard))
+ ArchName += "hf";
+
+ return "/lib/ld-musl-" + ArchName + ".so.1";
+}
+
+std::string OHOS::getCompilerRT(const ArgList &Args, StringRef Component,
+ FileType Type) const {
+ SmallString<128> Path(getDriver().ResourceDir);
+ llvm::sys::path::append(Path, "lib", getMultiarchTriple(getTriple()),
+ SelectedMultilib.gccSuffix());
+ const char *Prefix =
+ Type == ToolChain::FT_Object ? "" : "lib";
+ const char *Suffix;
+ switch (Type) {
+ case ToolChain::FT_Object:
+ Suffix = ".o";
+ break;
+ case ToolChain::FT_Static:
+ Suffix = ".a";
+ break;
+ case ToolChain::FT_Shared:
+ Suffix = ".so";
+ break;
+ }
+ llvm::sys::path::append(
+ Path, Prefix + Twine("clang_rt.") + Component + Suffix);
+ return static_cast<std::string>(Path.str());
+}
+
+void OHOS::addExtraOpts(llvm::opt::ArgStringList &CmdArgs) const {
+ CmdArgs.push_back("-z");
+ CmdArgs.push_back("now");
+ CmdArgs.push_back("-z");
+ CmdArgs.push_back("relro");
+ CmdArgs.push_back("-z");
+ CmdArgs.push_back("max-page-size=4096");
+ // .gnu.hash section is not compatible with the MIPS target
+ if (getArch() != llvm::Triple::mipsel)
+ CmdArgs.push_back("--hash-style=both");
+#ifdef ENABLE_LINKER_BUILD_ID
+ CmdArgs.push_back("--build-id");
+#endif
+ CmdArgs.push_back("--enable-new-dtags");
+}
+
+SanitizerMask OHOS::getSupportedSanitizers() const {
+ SanitizerMask Res = ToolChain::getSupportedSanitizers();
+ Res |= SanitizerKind::Address;
+ Res |= SanitizerKind::PointerCompare;
+ Res |= SanitizerKind::PointerSubtract;
+ Res |= SanitizerKind::Fuzzer;
+ Res |= SanitizerKind::FuzzerNoLink;
+ Res |= SanitizerKind::Memory;
+ Res |= SanitizerKind::Vptr;
+ Res |= SanitizerKind::SafeStack;
+ Res |= SanitizerKind::Scudo;
+ // TODO: kASAN for liteos ??
+ // TODO: Support TSAN and HWASAN and update mask.
+ return Res;
+}
+
+// TODO: Make a base class for Linux and OHOS and move this there.
+void OHOS::addProfileRTLibs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const {
+ // Add linker option -u__llvm_profile_runtime to cause runtime
+ // initialization module to be linked in.
+ if (needsProfileRT(Args))
+ CmdArgs.push_back(Args.MakeArgString(
+ Twine("-u", llvm::getInstrProfRuntimeHookVarName())));
+ ToolChain::addProfileRTLibs(Args, CmdArgs);
+}
+
+ToolChain::path_list OHOS::getArchSpecificLibPaths() const {
+ ToolChain::path_list Paths;
+ llvm::Triple Triple = getTriple();
+ Paths.push_back(
+ makePath({getDriver().ResourceDir, "lib", getMultiarchTriple(Triple)}));
+ return Paths;
+}
+
+ToolChain::UnwindLibType OHOS::GetUnwindLibType(const llvm::opt::ArgList &Args) const {
+ if (Args.getLastArg(options::OPT_unwindlib_EQ))
+ return Generic_ELF::GetUnwindLibType(Args);
+ return GetDefaultUnwindLibType();
+}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/OHOS.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/OHOS.h
new file mode 100644
index 000000000000..2a380420922d
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/OHOS.h
@@ -0,0 +1,95 @@
+//===--- OHOS.h - OHOS ToolChain Implementations ----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_OHOS_H
+#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_OHOS_H
+
+#include "Linux.h"
+#include "clang/Driver/Tool.h"
+#include "clang/Driver/ToolChain.h"
+
+namespace clang {
+namespace driver {
+namespace toolchains {
+
+class LLVM_LIBRARY_VISIBILITY OHOS : public Generic_ELF {
+public:
+ OHOS(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+
+ bool HasNativeLLVMSupport() const override { return true; }
+
+ bool IsMathErrnoDefault() const override { return false; }
+
+ RuntimeLibType GetDefaultRuntimeLibType() const override {
+ return ToolChain::RLT_CompilerRT;
+ }
+ CXXStdlibType GetDefaultCXXStdlibType() const override {
+ return ToolChain::CST_Libcxx;
+ }
+ // Not add -funwind-tables by default
+ bool isPICDefault() const override { return false; }
+ bool isPIEDefault(const llvm::opt::ArgList &Args) const override { return true; }
+ bool isPICDefaultForced() const override { return false; }
+ UnwindLibType GetUnwindLibType(const llvm::opt::ArgList &Args) const override;
+ UnwindLibType GetDefaultUnwindLibType() const override { return UNW_CompilerRT; }
+
+ RuntimeLibType
+ GetRuntimeLibType(const llvm::opt::ArgList &Args) const override;
+ CXXStdlibType
+ GetCXXStdlibType(const llvm::opt::ArgList &Args) const override;
+
+ void
+ AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+ void
+ AddClangCXXStdlibIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+ void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const override;
+
+ std::string computeSysRoot() const override;
+ std::string getDynamicLinker(const llvm::opt::ArgList &Args) const override;
+
+ std::string
+ getCompilerRT(const llvm::opt::ArgList &Args, StringRef Component,
+ FileType Type = ToolChain::FT_Static) const override;
+
+ const char *getDefaultLinker() const override {
+ return "ld.lld";
+ }
+
+ Tool *buildLinker() const override {
+ return new tools::gnutools::Linker(*this);
+ }
+ Tool *buildAssembler() const override {
+ return new tools::gnutools::Assembler(*this);
+ }
+
+ path_list getRuntimePaths() const;
+
+protected:
+ std::string getMultiarchTriple(const llvm::Triple &T) const;
+ std::string getMultiarchTriple(const Driver &D,
+ const llvm::Triple &TargetTriple,
+ StringRef SysRoot) const override;
+ void addExtraOpts(llvm::opt::ArgStringList &CmdArgs) const override;
+ SanitizerMask getSupportedSanitizers() const override;
+ void addProfileRTLibs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const override;
+ path_list getArchSpecificLibPaths() const override;
+
+private:
+ Multilib SelectedMultilib;
+};
+
+} // end namespace toolchains
+} // end namespace driver
+} // end namespace clang
+
+#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_OHOS_H
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/OpenBSD.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/OpenBSD.cpp
index 89828fbb6f5f..fd6aa4d7e684 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/OpenBSD.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/OpenBSD.cpp
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "OpenBSD.h"
+#include "Arch/ARM.h"
#include "Arch/Mips.h"
#include "Arch/Sparc.h"
#include "CommonArgs.h"
@@ -16,6 +17,7 @@
#include "clang/Driver/SanitizerArgs.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Support/Path.h"
+#include "llvm/Support/VirtualFileSystem.h"
using namespace clang::driver;
using namespace clang::driver::tools;
@@ -28,16 +30,28 @@ void openbsd::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfoList &Inputs,
const ArgList &Args,
const char *LinkingOutput) const {
- claimNoWarnArgs(Args);
+ const auto &ToolChain = static_cast<const OpenBSD &>(getToolChain());
+ const Driver &D = ToolChain.getDriver();
+ const llvm::Triple &Triple = ToolChain.getTriple();
ArgStringList CmdArgs;
- switch (getToolChain().getArch()) {
+ claimNoWarnArgs(Args);
+
+ switch (ToolChain.getArch()) {
case llvm::Triple::x86:
// When building 32-bit code on OpenBSD/amd64, we have to explicitly
// instruct as in the base system to assemble 32-bit code.
CmdArgs.push_back("--32");
break;
+ case llvm::Triple::arm: {
+ StringRef MArch, MCPU;
+ arm::getARMArchCPUFromArgs(Args, MArch, MCPU, /*FromAs*/ true);
+ std::string Arch = arm::getARMTargetCPU(MCPU, MArch, Triple);
+ CmdArgs.push_back(Args.MakeArgString("-mcpu=" + Arch));
+ break;
+ }
+
case llvm::Triple::ppc:
CmdArgs.push_back("-mppc");
CmdArgs.push_back("-many");
@@ -45,9 +59,9 @@ void openbsd::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
case llvm::Triple::sparcv9: {
CmdArgs.push_back("-64");
- std::string CPU = getCPUName(Args, getToolChain().getTriple());
- CmdArgs.push_back(sparc::getSparcAsmModeForCPU(CPU, getToolChain().getTriple()));
- AddAssemblerKPIC(getToolChain(), Args, CmdArgs);
+ std::string CPU = getCPUName(D, Args, Triple);
+ CmdArgs.push_back(sparc::getSparcAsmModeForCPU(CPU, Triple));
+ AddAssemblerKPIC(ToolChain, Args, CmdArgs);
break;
}
@@ -55,17 +69,20 @@ void openbsd::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
case llvm::Triple::mips64el: {
StringRef CPUName;
StringRef ABIName;
- mips::getMipsCPUAndABI(Args, getToolChain().getTriple(), CPUName, ABIName);
+ mips::getMipsCPUAndABI(Args, Triple, CPUName, ABIName);
+
+ CmdArgs.push_back("-march");
+ CmdArgs.push_back(CPUName.data());
CmdArgs.push_back("-mabi");
CmdArgs.push_back(mips::getGnuCompatibleMipsABIName(ABIName).data());
- if (getToolChain().getTriple().isLittleEndian())
+ if (Triple.isLittleEndian())
CmdArgs.push_back("-EL");
else
CmdArgs.push_back("-EB");
- AddAssemblerKPIC(getToolChain(), Args, CmdArgs);
+ AddAssemblerKPIC(ToolChain, Args, CmdArgs);
break;
}
@@ -81,7 +98,7 @@ void openbsd::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
for (const auto &II : Inputs)
CmdArgs.push_back(II.getFilename());
- const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
+ const char *Exec = Args.MakeArgString(ToolChain.GetProgramPath("as"));
C.addCommand(std::make_unique<Command>(JA, *this,
ResponseFileSupport::AtFileCurCP(),
Exec, CmdArgs, Inputs, Output));
@@ -92,9 +109,15 @@ void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfoList &Inputs,
const ArgList &Args,
const char *LinkingOutput) const {
- const toolchains::OpenBSD &ToolChain =
- static_cast<const toolchains::OpenBSD &>(getToolChain());
- const Driver &D = getToolChain().getDriver();
+ const auto &ToolChain = static_cast<const OpenBSD &>(getToolChain());
+ const Driver &D = ToolChain.getDriver();
+ const llvm::Triple::ArchType Arch = ToolChain.getArch();
+ const bool Static = Args.hasArg(options::OPT_static);
+ const bool Shared = Args.hasArg(options::OPT_shared);
+ const bool Profiling = Args.hasArg(options::OPT_pg);
+ const bool Pie = Args.hasArg(options::OPT_pie);
+ const bool Nopie = Args.hasArg(options::OPT_no_pie, options::OPT_nopie);
+ const bool Relocatable = Args.hasArg(options::OPT_r);
ArgStringList CmdArgs;
// Silence warning for "clang -g foo.o -o foo"
@@ -105,51 +128,55 @@ void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
// handled somewhere else.
Args.ClaimAllArgs(options::OPT_w);
- if (ToolChain.getArch() == llvm::Triple::mips64)
+ if (!D.SysRoot.empty())
+ CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot));
+
+ if (Arch == llvm::Triple::mips64)
CmdArgs.push_back("-EB");
- else if (ToolChain.getArch() == llvm::Triple::mips64el)
+ else if (Arch == llvm::Triple::mips64el)
CmdArgs.push_back("-EL");
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_shared)) {
+ if (!Args.hasArg(options::OPT_nostdlib) && !Shared && !Relocatable) {
CmdArgs.push_back("-e");
CmdArgs.push_back("__start");
}
CmdArgs.push_back("--eh-frame-hdr");
- if (Args.hasArg(options::OPT_static)) {
+ if (Static) {
CmdArgs.push_back("-Bstatic");
} else {
if (Args.hasArg(options::OPT_rdynamic))
CmdArgs.push_back("-export-dynamic");
- CmdArgs.push_back("-Bdynamic");
- if (Args.hasArg(options::OPT_shared)) {
+ if (Shared) {
CmdArgs.push_back("-shared");
- } else {
+ } else if (!Relocatable) {
CmdArgs.push_back("-dynamic-linker");
CmdArgs.push_back("/usr/libexec/ld.so");
}
}
- if (Args.hasArg(options::OPT_pie))
+ if (Pie)
CmdArgs.push_back("-pie");
- if (Args.hasArg(options::OPT_nopie) || Args.hasArg(options::OPT_pg))
+ if (Nopie || Profiling)
CmdArgs.push_back("-nopie");
+ if (Arch == llvm::Triple::riscv64)
+ CmdArgs.push_back("-X");
+
+ assert((Output.isFilename() || Output.isNothing()) && "Invalid output.");
if (Output.isFilename()) {
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
- } else {
- assert(Output.isNothing() && "Invalid output.");
}
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles,
+ options::OPT_r)) {
const char *crt0 = nullptr;
const char *crtbegin = nullptr;
- if (!Args.hasArg(options::OPT_shared)) {
- if (Args.hasArg(options::OPT_pg))
+ if (!Shared) {
+ if (Profiling)
crt0 = "gcrt0.o";
- else if (Args.hasArg(options::OPT_static) &&
- !Args.hasArg(options::OPT_nopie))
+ else if (Static && !Nopie)
crt0 = "rcrt0.o";
else
crt0 = "crt0.o";
@@ -165,49 +192,79 @@ void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_L);
ToolChain.AddFilePathLibArgs(Args, CmdArgs);
- Args.AddAllArgs(CmdArgs, {options::OPT_T_Group, options::OPT_e,
- options::OPT_s, options::OPT_t,
- options::OPT_Z_Flag, options::OPT_r});
+ Args.addAllArgs(CmdArgs, {options::OPT_T_Group, options::OPT_s,
+ options::OPT_t, options::OPT_r});
+
+ if (D.isUsingLTO()) {
+ assert(!Inputs.empty() && "Must have at least one input.");
+ // Find the first filename InputInfo object.
+ auto Input = llvm::find_if(
+ Inputs, [](const InputInfo &II) -> bool { return II.isFilename(); });
+ if (Input == Inputs.end())
+ // For a very rare case, all of the inputs to the linker are
+ // InputArg. If that happens, just use the first InputInfo.
+ Input = Inputs.begin();
+
+ addLTOOptions(ToolChain, Args, CmdArgs, Output, *Input,
+ D.getLTOMode() == LTOK_Thin);
+ }
bool NeedsSanitizerDeps = addSanitizerRuntimes(ToolChain, Args, CmdArgs);
bool NeedsXRayDeps = addXRayRuntime(ToolChain, Args, CmdArgs);
AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs,
+ options::OPT_r)) {
// Use the static OpenMP runtime with -static-openmp
- bool StaticOpenMP = Args.hasArg(options::OPT_static_openmp) &&
- !Args.hasArg(options::OPT_static);
+ bool StaticOpenMP = Args.hasArg(options::OPT_static_openmp) && !Static;
addOpenMPRuntime(CmdArgs, ToolChain, Args, StaticOpenMP);
if (D.CCCIsCXX()) {
if (ToolChain.ShouldLinkCXXStdlib(Args))
ToolChain.AddCXXStdlibLibArgs(Args, CmdArgs);
- if (Args.hasArg(options::OPT_pg))
+ if (Profiling)
CmdArgs.push_back("-lm_p");
else
CmdArgs.push_back("-lm");
}
+
+ // Silence warnings when linking C code with a C++ '-stdlib' argument.
+ Args.ClaimAllArgs(options::OPT_stdlib_EQ);
+
+ // Additional linker set-up and flags for Fortran. This is required in order
+ // to generate executables. As Fortran runtime depends on the C runtime,
+ // these dependencies need to be listed before the C runtime below (i.e.
+ // AddRunTimeLibs).
+ if (D.IsFlangMode()) {
+ addFortranRuntimeLibraryPath(ToolChain, Args, CmdArgs);
+ addFortranRuntimeLibs(ToolChain, Args, CmdArgs);
+ if (Profiling)
+ CmdArgs.push_back("-lm_p");
+ else
+ CmdArgs.push_back("-lm");
+ }
+
if (NeedsSanitizerDeps) {
CmdArgs.push_back(ToolChain.getCompilerRTArgString(Args, "builtins"));
- linkSanitizerRuntimeDeps(ToolChain, CmdArgs);
+ linkSanitizerRuntimeDeps(ToolChain, Args, CmdArgs);
}
if (NeedsXRayDeps) {
CmdArgs.push_back(ToolChain.getCompilerRTArgString(Args, "builtins"));
- linkXRayRuntimeDeps(ToolChain, CmdArgs);
+ linkXRayRuntimeDeps(ToolChain, Args, CmdArgs);
}
// FIXME: For some reason GCC passes -lgcc before adding
// the default system libraries. Just mimic this for now.
CmdArgs.push_back("-lcompiler_rt");
if (Args.hasArg(options::OPT_pthread)) {
- if (!Args.hasArg(options::OPT_shared) && Args.hasArg(options::OPT_pg))
+ if (!Shared && Profiling)
CmdArgs.push_back("-lpthread_p");
else
CmdArgs.push_back("-lpthread");
}
- if (!Args.hasArg(options::OPT_shared)) {
- if (Args.hasArg(options::OPT_pg))
+ if (!Shared) {
+ if (Profiling)
CmdArgs.push_back("-lc_p");
else
CmdArgs.push_back("-lc");
@@ -216,9 +273,10 @@ void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-lcompiler_rt");
}
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles,
+ options::OPT_r)) {
const char *crtend = nullptr;
- if (!Args.hasArg(options::OPT_shared))
+ if (!Shared)
crtend = "crtend.o";
else
crtend = "crtendS.o";
@@ -237,16 +295,15 @@ void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
SanitizerMask OpenBSD::getSupportedSanitizers() const {
const bool IsX86 = getTriple().getArch() == llvm::Triple::x86;
const bool IsX86_64 = getTriple().getArch() == llvm::Triple::x86_64;
-
- // For future use, only UBsan at the moment
SanitizerMask Res = ToolChain::getSupportedSanitizers();
-
if (IsX86 || IsX86_64) {
Res |= SanitizerKind::Vptr;
Res |= SanitizerKind::Fuzzer;
Res |= SanitizerKind::FuzzerNoLink;
}
-
+ if (IsX86_64) {
+ Res |= SanitizerKind::KernelAddress;
+ }
return Res;
}
@@ -255,7 +312,7 @@ SanitizerMask OpenBSD::getSupportedSanitizers() const {
OpenBSD::OpenBSD(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args)
: Generic_ELF(D, Triple, Args) {
- getFilePaths().push_back(getDriver().SysRoot + "/usr/lib");
+ getFilePaths().push_back(concat(getDriver().SysRoot, "/usr/lib"));
}
void OpenBSD::AddClangSystemIncludeArgs(
@@ -288,13 +345,14 @@ void OpenBSD::AddClangSystemIncludeArgs(
return;
}
- addExternCSystemInclude(DriverArgs, CC1Args, D.SysRoot + "/usr/include");
+ addExternCSystemInclude(DriverArgs, CC1Args,
+ concat(D.SysRoot, "/usr/include"));
}
void OpenBSD::addLibCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const {
addSystemInclude(DriverArgs, CC1Args,
- getDriver().SysRoot + "/usr/include/c++/v1");
+ concat(getDriver().SysRoot, "/usr/include/c++/v1"));
}
void OpenBSD::AddCXXStdlibLibArgs(const ArgList &Args,
@@ -302,16 +360,27 @@ void OpenBSD::AddCXXStdlibLibArgs(const ArgList &Args,
bool Profiling = Args.hasArg(options::OPT_pg);
CmdArgs.push_back(Profiling ? "-lc++_p" : "-lc++");
+ if (Args.hasArg(options::OPT_fexperimental_library))
+ CmdArgs.push_back("-lc++experimental");
CmdArgs.push_back(Profiling ? "-lc++abi_p" : "-lc++abi");
CmdArgs.push_back(Profiling ? "-lpthread_p" : "-lpthread");
}
-std::string OpenBSD::getCompilerRT(const ArgList &Args,
- StringRef Component,
+std::string OpenBSD::getCompilerRT(const ArgList &Args, StringRef Component,
FileType Type) const {
- SmallString<128> Path(getDriver().SysRoot);
- llvm::sys::path::append(Path, "/usr/lib/libcompiler_rt.a");
- return std::string(Path.str());
+ if (Component == "builtins") {
+ SmallString<128> Path(getDriver().SysRoot);
+ llvm::sys::path::append(Path, "/usr/lib/libcompiler_rt.a");
+ return std::string(Path);
+ }
+ SmallString<128> P(getDriver().ResourceDir);
+ std::string CRTBasename =
+ buildCompilerRTBasename(Args, Component, Type, /*AddArch=*/false);
+ llvm::sys::path::append(P, "lib", CRTBasename);
+ // Checks if this is the base system case which uses a different location.
+ if (getVFS().exists(P))
+ return std::string(P);
+ return ToolChain::getCompilerRT(Args, Component, Type);
}
Tool *OpenBSD::buildAssembler() const {
@@ -321,3 +390,13 @@ Tool *OpenBSD::buildAssembler() const {
Tool *OpenBSD::buildLinker() const { return new tools::openbsd::Linker(*this); }
bool OpenBSD::HasNativeLLVMSupport() const { return true; }
+
+ToolChain::UnwindTableLevel
+OpenBSD::getDefaultUnwindTableLevel(const ArgList &Args) const {
+ switch (getArch()) {
+ case llvm::Triple::arm:
+ return UnwindTableLevel::None;
+ default:
+ return UnwindTableLevel::Asynchronous;
+ }
+}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/OpenBSD.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/OpenBSD.h
index 4932ed5c609c..b4350e72d5d2 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/OpenBSD.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/OpenBSD.h
@@ -18,9 +18,9 @@ namespace clang {
namespace driver {
namespace tools {
-/// openbsd -- Directly call GNU Binutils assembler and linker
+/// Directly call GNU Binutils assembler and linker
namespace openbsd {
-class LLVM_LIBRARY_VISIBILITY Assembler : public Tool {
+class LLVM_LIBRARY_VISIBILITY Assembler final : public Tool {
public:
Assembler(const ToolChain &TC)
: Tool("openbsd::Assembler", "assembler", TC) {}
@@ -33,7 +33,7 @@ public:
const char *LinkingOutput) const override;
};
-class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
+class LLVM_LIBRARY_VISIBILITY Linker final : public Tool {
public:
Linker(const ToolChain &TC) : Tool("openbsd::Linker", "linker", TC) {}
@@ -59,7 +59,9 @@ public:
bool IsMathErrnoDefault() const override { return false; }
bool IsObjCNonFragileABIDefault() const override { return true; }
- bool isPIEDefault() const override { return true; }
+ bool isPIEDefault(const llvm::opt::ArgList &Args) const override {
+ return true;
+ }
RuntimeLibType GetDefaultRuntimeLibType() const override {
return ToolChain::RLT_CompilerRT;
@@ -80,6 +82,9 @@ public:
std::string getCompilerRT(const llvm::opt::ArgList &Args, StringRef Component,
FileType Type = ToolChain::FT_Static) const override;
+ UnwindTableLevel
+ getDefaultUnwindTableLevel(const llvm::opt::ArgList &Args) const override;
+
LangOptions::StackProtectorMode
GetDefaultStackProtectorLevel(bool KernelOrKext) const override {
return LangOptions::SSPStrong;
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/PPCFreeBSD.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/PPCFreeBSD.cpp
new file mode 100644
index 000000000000..8d381c4f1437
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/PPCFreeBSD.cpp
@@ -0,0 +1,28 @@
+//===-- PPCFreeBSD.cpp - PowerPC ToolChain Implementations ------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "PPCFreeBSD.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/Options.h"
+#include "llvm/Support/Path.h"
+
+using namespace clang::driver::toolchains;
+using namespace llvm::opt;
+
+void PPCFreeBSDToolChain::AddClangSystemIncludeArgs(
+ const ArgList &DriverArgs, ArgStringList &CC1Args) const {
+ if (!DriverArgs.hasArg(clang::driver::options::OPT_nostdinc) &&
+ !DriverArgs.hasArg(options::OPT_nobuiltininc)) {
+ const Driver &D = getDriver();
+ SmallString<128> P(D.ResourceDir);
+ llvm::sys::path::append(P, "include", "ppc_wrappers");
+ addSystemInclude(DriverArgs, CC1Args, P);
+ }
+
+ FreeBSD::AddClangSystemIncludeArgs(DriverArgs, CC1Args);
+}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/PPCFreeBSD.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/PPCFreeBSD.h
new file mode 100644
index 000000000000..d5d9cf4e83a0
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/PPCFreeBSD.h
@@ -0,0 +1,33 @@
+//===--- PPCFreeBSD.h - PowerPC ToolChain Implementations -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_PPC_FREEBSD_H
+#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_PPC_FREEBSD_H
+
+#include "FreeBSD.h"
+
+namespace clang {
+namespace driver {
+namespace toolchains {
+
+class LLVM_LIBRARY_VISIBILITY PPCFreeBSDToolChain : public FreeBSD {
+public:
+ PPCFreeBSDToolChain(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args)
+ : FreeBSD(D, Triple, Args) {}
+
+ void
+ AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+};
+
+} // end namespace toolchains
+} // end namespace driver
+} // end namespace clang
+
+#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_PPC_FREEBSD_H
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/PPCLinux.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/PPCLinux.cpp
index af2e3a21a0af..0ed0f91ad166 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/PPCLinux.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/PPCLinux.cpp
@@ -8,11 +8,54 @@
#include "PPCLinux.h"
#include "clang/Driver/Driver.h"
+#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Options.h"
+#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
+using namespace clang::driver;
using namespace clang::driver::toolchains;
using namespace llvm::opt;
+using namespace llvm::sys;
+
+// Glibc older than 2.32 doesn't fully support IEEE float128. Here we check
+// glibc version by looking at dynamic linker name.
+static bool GlibcSupportsFloat128(const std::string &Linker) {
+ llvm::SmallVector<char, 16> Path;
+
+ // Resolve potential symlinks to linker.
+ if (fs::real_path(Linker, Path))
+ return false;
+ llvm::StringRef LinkerName =
+ path::filename(llvm::StringRef(Path.data(), Path.size()));
+
+ // Since glibc 2.34, the installed .so file is not symlink anymore. But we can
+ // still safely assume it's newer than 2.32.
+ if (LinkerName.starts_with("ld64.so"))
+ return true;
+
+ if (!LinkerName.starts_with("ld-2."))
+ return false;
+ unsigned Minor = (LinkerName[5] - '0') * 10 + (LinkerName[6] - '0');
+ if (Minor < 32)
+ return false;
+
+ return true;
+}
+
+PPCLinuxToolChain::PPCLinuxToolChain(const Driver &D,
+ const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args)
+ : Linux(D, Triple, Args) {
+ if (Arg *A = Args.getLastArg(options::OPT_mabi_EQ)) {
+ StringRef ABIName = A->getValue();
+
+ if ((ABIName == "ieeelongdouble" &&
+ !SupportIEEEFloat128(D, Triple, Args)) ||
+ (ABIName == "ibmlongdouble" && !supportIBMLongDouble(D, Args)))
+ D.Diag(diag::warn_drv_unsupported_float_abi_by_lib) << ABIName;
+ }
+}
void PPCLinuxToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
@@ -26,3 +69,35 @@ void PPCLinuxToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
Linux::AddClangSystemIncludeArgs(DriverArgs, CC1Args);
}
+
+bool PPCLinuxToolChain::supportIBMLongDouble(
+ const Driver &D, const llvm::opt::ArgList &Args) const {
+ if (Args.hasArg(options::OPT_nostdlib, options::OPT_nostdlibxx))
+ return true;
+
+ CXXStdlibType StdLib = ToolChain::GetCXXStdlibType(Args);
+ if (StdLib == CST_Libstdcxx)
+ return true;
+
+ return StdLib == CST_Libcxx && !defaultToIEEELongDouble();
+}
+
+bool PPCLinuxToolChain::SupportIEEEFloat128(
+ const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args) const {
+ if (!Triple.isLittleEndian() || !Triple.isPPC64())
+ return false;
+
+ if (Args.hasArg(options::OPT_nostdlib, options::OPT_nostdlibxx))
+ return true;
+
+ CXXStdlibType StdLib = ToolChain::GetCXXStdlibType(Args);
+ bool HasUnsupportedCXXLib =
+ (StdLib == CST_Libcxx && !defaultToIEEELongDouble()) ||
+ (StdLib == CST_Libstdcxx &&
+ GCCInstallation.getVersion().isOlderThan(12, 1, 0));
+
+ std::string Linker = Linux::getDynamicLinker(Args);
+ return GlibcSupportsFloat128((Twine(D.DyldPrefix) + Linker).str()) &&
+ !(D.CCCIsCXX() && HasUnsupportedCXXLib);
+}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/PPCLinux.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/PPCLinux.h
index b3ef7b61dc3a..63adaff6be9c 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/PPCLinux.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/PPCLinux.h
@@ -18,12 +18,17 @@ namespace toolchains {
class LLVM_LIBRARY_VISIBILITY PPCLinuxToolChain : public Linux {
public:
PPCLinuxToolChain(const Driver &D, const llvm::Triple &Triple,
- const llvm::opt::ArgList &Args)
- : Linux(D, Triple, Args) {}
+ const llvm::opt::ArgList &Args);
void
AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
+
+private:
+ bool SupportIEEEFloat128(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args) const;
+ bool supportIBMLongDouble(const Driver &D,
+ const llvm::opt::ArgList &Args) const;
};
} // end namespace toolchains
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.cpp
index 383b0c50d410..8ba8b80cfec7 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.cpp
@@ -7,8 +7,8 @@
//===----------------------------------------------------------------------===//
#include "PS4CPU.h"
-#include "FreeBSD.h"
#include "CommonArgs.h"
+#include "clang/Config/config.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
@@ -23,10 +23,18 @@ using namespace clang::driver;
using namespace clang;
using namespace llvm::opt;
-using clang::driver::tools::AddLinkerInputs;
+// Helper to paste bits of an option together and return a saved string.
+static const char *makeArgString(const ArgList &Args, const char *Prefix,
+ const char *Base, const char *Suffix) {
+ // Basically "Prefix + Base + Suffix" all converted to Twine then saved.
+ return Args.MakeArgString(Twine(StringRef(Prefix), Base) + Suffix);
+}
+
+void tools::PScpu::addProfileRTArgs(const ToolChain &TC, const ArgList &Args,
+ ArgStringList &CmdArgs) {
+ assert(TC.getTriple().isPS());
+ auto &PSTC = static_cast<const toolchains::PS4PS5Base &>(TC);
-void tools::PS4cpu::addProfileRTArgs(const ToolChain &TC, const ArgList &Args,
- ArgStringList &CmdArgs) {
if ((Args.hasFlag(options::OPT_fprofile_arcs, options::OPT_fno_profile_arcs,
false) ||
Args.hasFlag(options::OPT_fprofile_generate,
@@ -43,14 +51,16 @@ void tools::PS4cpu::addProfileRTArgs(const ToolChain &TC, const ArgList &Args,
options::OPT_fno_profile_generate, false) ||
Args.hasArg(options::OPT_fcreate_profile) ||
Args.hasArg(options::OPT_coverage)))
- CmdArgs.push_back("--dependent-lib=libclang_rt.profile-x86_64.a");
+ CmdArgs.push_back(makeArgString(
+ Args, "--dependent-lib=", PSTC.getProfileRTLibName(), ""));
}
-void tools::PS4cpu::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
+void tools::PScpu::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
const InputInfoList &Inputs,
const ArgList &Args,
const char *LinkingOutput) const {
+ auto &TC = static_cast<const toolchains::PS4PS5Base &>(getToolChain());
claimNoWarnArgs(Args);
ArgStringList CmdArgs;
@@ -64,40 +74,57 @@ void tools::PS4cpu::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
assert(Input.isFilename() && "Invalid input.");
CmdArgs.push_back(Input.getFilename());
- const char *Exec =
- Args.MakeArgString(getToolChain().GetProgramPath("orbis-as"));
+ std::string AsName = TC.qualifyPSCmdName("as");
+ const char *Exec = Args.MakeArgString(TC.GetProgramPath(AsName.c_str()));
C.addCommand(std::make_unique<Command>(JA, *this,
ResponseFileSupport::AtFileUTF8(),
Exec, CmdArgs, Inputs, Output));
}
-static void AddPS4SanitizerArgs(const ToolChain &TC, ArgStringList &CmdArgs) {
- const SanitizerArgs &SanArgs = TC.getSanitizerArgs();
- if (SanArgs.needsUbsanRt()) {
- CmdArgs.push_back("-lSceDbgUBSanitizer_stub_weak");
- }
- if (SanArgs.needsAsanRt()) {
- CmdArgs.push_back("-lSceDbgAddressSanitizer_stub_weak");
- }
+void tools::PScpu::addSanitizerArgs(const ToolChain &TC, const ArgList &Args,
+ ArgStringList &CmdArgs) {
+ assert(TC.getTriple().isPS());
+ auto &PSTC = static_cast<const toolchains::PS4PS5Base &>(TC);
+ PSTC.addSanitizerArgs(Args, CmdArgs, "--dependent-lib=lib", ".a");
}
-void tools::PS4cpu::addSanitizerArgs(const ToolChain &TC,
- ArgStringList &CmdArgs) {
- const SanitizerArgs &SanArgs = TC.getSanitizerArgs();
+void toolchains::PS4CPU::addSanitizerArgs(const ArgList &Args,
+ ArgStringList &CmdArgs,
+ const char *Prefix,
+ const char *Suffix) const {
+ auto arg = [&](const char *Name) -> const char * {
+ return makeArgString(Args, Prefix, Name, Suffix);
+ };
+ const SanitizerArgs &SanArgs = getSanitizerArgs(Args);
if (SanArgs.needsUbsanRt())
- CmdArgs.push_back("--dependent-lib=libSceDbgUBSanitizer_stub_weak.a");
+ CmdArgs.push_back(arg("SceDbgUBSanitizer_stub_weak"));
if (SanArgs.needsAsanRt())
- CmdArgs.push_back("--dependent-lib=libSceDbgAddressSanitizer_stub_weak.a");
+ CmdArgs.push_back(arg("SceDbgAddressSanitizer_stub_weak"));
}
-void tools::PS4cpu::Link::ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output,
- const InputInfoList &Inputs,
- const ArgList &Args,
- const char *LinkingOutput) const {
- const toolchains::FreeBSD &ToolChain =
- static_cast<const toolchains::FreeBSD &>(getToolChain());
- const Driver &D = ToolChain.getDriver();
+void toolchains::PS5CPU::addSanitizerArgs(const ArgList &Args,
+ ArgStringList &CmdArgs,
+ const char *Prefix,
+ const char *Suffix) const {
+ auto arg = [&](const char *Name) -> const char * {
+ return makeArgString(Args, Prefix, Name, Suffix);
+ };
+ const SanitizerArgs &SanArgs = getSanitizerArgs(Args);
+ if (SanArgs.needsUbsanRt())
+ CmdArgs.push_back(arg("SceUBSanitizer_nosubmission_stub_weak"));
+ if (SanArgs.needsAsanRt())
+ CmdArgs.push_back(arg("SceAddressSanitizer_nosubmission_stub_weak"));
+ if (SanArgs.needsTsanRt())
+ CmdArgs.push_back(arg("SceThreadSanitizer_nosubmission_stub_weak"));
+}
+
+void tools::PScpu::Linker::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ auto &TC = static_cast<const toolchains::PS4PS5Base &>(getToolChain());
+ const Driver &D = TC.getDriver();
ArgStringList CmdArgs;
// Silence warning for "clang -g foo.o -o foo"
@@ -117,119 +144,195 @@ void tools::PS4cpu::Link::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasArg(options::OPT_rdynamic))
CmdArgs.push_back("-export-dynamic");
if (Args.hasArg(options::OPT_shared))
- CmdArgs.push_back("--oformat=so");
+ CmdArgs.push_back("--shared");
+ assert((Output.isFilename() || Output.isNothing()) && "Invalid output.");
if (Output.isFilename()) {
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
- } else {
- assert(Output.isNothing() && "Invalid output.");
}
- if(!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs))
- AddPS4SanitizerArgs(ToolChain, CmdArgs);
+ const bool UseLTO = D.isUsingLTO();
+ const bool UseJMC =
+ Args.hasFlag(options::OPT_fjmc, options::OPT_fno_jmc, false);
+ const bool IsPS4 = TC.getTriple().isPS4();
- Args.AddAllArgs(CmdArgs, options::OPT_L);
- Args.AddAllArgs(CmdArgs, options::OPT_T_Group);
- Args.AddAllArgs(CmdArgs, options::OPT_e);
- Args.AddAllArgs(CmdArgs, options::OPT_s);
- Args.AddAllArgs(CmdArgs, options::OPT_t);
- Args.AddAllArgs(CmdArgs, options::OPT_r);
+ const char *PS4LTOArgs = "";
+ auto AddCodeGenFlag = [&](Twine Flag) {
+ if (IsPS4)
+ PS4LTOArgs = Args.MakeArgString(Twine(PS4LTOArgs) + " " + Flag);
+ else
+ CmdArgs.push_back(Args.MakeArgString(Twine("-plugin-opt=") + Flag));
+ };
+
+ if (UseLTO) {
+ // We default to creating the arange section, but LTO does not. Enable it
+ // here.
+ AddCodeGenFlag("-generate-arange-section");
+
+ // This tells LTO to perform JustMyCode instrumentation.
+ if (UseJMC)
+ AddCodeGenFlag("-enable-jmc-instrument");
+
+ if (Arg *A = Args.getLastArg(options::OPT_fcrash_diagnostics_dir))
+ AddCodeGenFlag(Twine("-crash-diagnostics-dir=") + A->getValue());
+
+ StringRef Parallelism = getLTOParallelism(Args, D);
+ if (!Parallelism.empty()) {
+ if (IsPS4)
+ AddCodeGenFlag(Twine("-threads=") + Parallelism);
+ else
+ CmdArgs.push_back(Args.MakeArgString(Twine("-plugin-opt=jobs=") + Parallelism));
+ }
+
+ if (IsPS4) {
+ const char *Prefix = nullptr;
+ if (D.getLTOMode() == LTOK_Thin)
+ Prefix = "-lto-thin-debug-options=";
+ else if (D.getLTOMode() == LTOK_Full)
+ Prefix = "-lto-debug-options=";
+ else
+ llvm_unreachable("new LTO mode?");
+
+ CmdArgs.push_back(Args.MakeArgString(Twine(Prefix) + PS4LTOArgs));
+ }
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs))
+ TC.addSanitizerArgs(Args, CmdArgs, "-l", "");
+
+ if (D.isUsingLTO() && Args.hasArg(options::OPT_funified_lto)) {
+ if (D.getLTOMode() == LTOK_Thin)
+ CmdArgs.push_back("--lto=thin");
+ else if (D.getLTOMode() == LTOK_Full)
+ CmdArgs.push_back("--lto=full");
+ }
+
+ Args.addAllArgs(CmdArgs,
+ {options::OPT_L, options::OPT_T_Group, options::OPT_s,
+ options::OPT_t, options::OPT_r});
if (Args.hasArg(options::OPT_Z_Xlinker__no_demangle))
CmdArgs.push_back("--no-demangle");
- AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
+ AddLinkerInputs(TC, Inputs, Args, CmdArgs, JA);
if (Args.hasArg(options::OPT_pthread)) {
CmdArgs.push_back("-lpthread");
}
+ if (UseJMC) {
+ CmdArgs.push_back("--whole-archive");
+ if (IsPS4)
+ CmdArgs.push_back("-lSceDbgJmc");
+ else
+ CmdArgs.push_back("-lSceJmc_nosubmission");
+ CmdArgs.push_back("--no-whole-archive");
+ }
+
if (Args.hasArg(options::OPT_fuse_ld_EQ)) {
D.Diag(diag::err_drv_unsupported_opt_for_target)
- << "-fuse-ld" << getToolChain().getTriple().str();
+ << "-fuse-ld" << TC.getTriple().str();
}
- const char *Exec =
- Args.MakeArgString(ToolChain.GetProgramPath("orbis-ld"));
+ std::string LdName = TC.qualifyPSCmdName(TC.getLinkerBaseName());
+ const char *Exec = Args.MakeArgString(TC.GetProgramPath(LdName.c_str()));
C.addCommand(std::make_unique<Command>(JA, *this,
ResponseFileSupport::AtFileUTF8(),
Exec, CmdArgs, Inputs, Output));
}
-toolchains::PS4CPU::PS4CPU(const Driver &D, const llvm::Triple &Triple,
- const ArgList &Args)
+toolchains::PS4PS5Base::PS4PS5Base(const Driver &D, const llvm::Triple &Triple,
+ const ArgList &Args, StringRef Platform,
+ const char *EnvVar)
: Generic_ELF(D, Triple, Args) {
if (Args.hasArg(clang::driver::options::OPT_static))
- D.Diag(clang::diag::err_drv_unsupported_opt_for_target) << "-static"
- << "PS4";
-
- // Determine where to find the PS4 libraries. We use SCE_ORBIS_SDK_DIR
- // if it exists; otherwise use the driver's installation path, which
- // should be <SDK_DIR>/host_tools/bin.
-
- SmallString<512> PS4SDKDir;
- if (const char *EnvValue = getenv("SCE_ORBIS_SDK_DIR")) {
- if (!llvm::sys::fs::exists(EnvValue))
- getDriver().Diag(clang::diag::warn_drv_ps4_sdk_dir) << EnvValue;
- PS4SDKDir = EnvValue;
- } else {
- PS4SDKDir = getDriver().Dir;
- llvm::sys::path::append(PS4SDKDir, "/../../");
- }
+ D.Diag(clang::diag::err_drv_unsupported_opt_for_target)
+ << "-static" << Platform;
- // By default, the driver won't report a warning if it can't find
- // PS4's include or lib directories. This behavior could be changed if
- // -Weverything or -Winvalid-or-nonexistent-directory options are passed.
+ // Determine where to find the PS4/PS5 libraries.
// If -isysroot was passed, use that as the SDK base path.
- std::string PrefixDir;
+ // If not, we use the EnvVar if it exists; otherwise use the driver's
+ // installation path, which should be <SDK_DIR>/host_tools/bin.
+ SmallString<80> Whence;
if (const Arg *A = Args.getLastArg(options::OPT_isysroot)) {
- PrefixDir = A->getValue();
- if (!llvm::sys::fs::exists(PrefixDir))
- getDriver().Diag(clang::diag::warn_missing_sysroot) << PrefixDir;
- } else
- PrefixDir = std::string(PS4SDKDir.str());
-
- SmallString<512> PS4SDKIncludeDir(PrefixDir);
- llvm::sys::path::append(PS4SDKIncludeDir, "target/include");
+ SDKRootDir = A->getValue();
+ if (!llvm::sys::fs::exists(SDKRootDir))
+ D.Diag(clang::diag::warn_missing_sysroot) << SDKRootDir;
+ Whence = A->getSpelling();
+ } else if (const char *EnvValue = getenv(EnvVar)) {
+ SDKRootDir = EnvValue;
+ Whence = { "environment variable '", EnvVar, "'" };
+ } else {
+ SDKRootDir = D.Dir + "/../../";
+ Whence = "compiler's location";
+ }
+
+ SmallString<512> SDKIncludeDir(SDKRootDir);
+ llvm::sys::path::append(SDKIncludeDir, "target/include");
if (!Args.hasArg(options::OPT_nostdinc) &&
!Args.hasArg(options::OPT_nostdlibinc) &&
!Args.hasArg(options::OPT_isysroot) &&
!Args.hasArg(options::OPT__sysroot_EQ) &&
- !llvm::sys::fs::exists(PS4SDKIncludeDir)) {
- getDriver().Diag(clang::diag::warn_drv_unable_to_find_directory_expected)
- << "PS4 system headers" << PS4SDKIncludeDir;
+ !llvm::sys::fs::exists(SDKIncludeDir)) {
+ D.Diag(clang::diag::warn_drv_unable_to_find_directory_expected)
+ << Twine(Platform, " system headers").str() << SDKIncludeDir << Whence;
}
- SmallString<512> PS4SDKLibDir(PS4SDKDir);
- llvm::sys::path::append(PS4SDKLibDir, "target/lib");
+ SmallString<512> SDKLibDir(SDKRootDir);
+ llvm::sys::path::append(SDKLibDir, "target/lib");
if (!Args.hasArg(options::OPT_nostdlib) &&
!Args.hasArg(options::OPT_nodefaultlibs) &&
!Args.hasArg(options::OPT__sysroot_EQ) && !Args.hasArg(options::OPT_E) &&
!Args.hasArg(options::OPT_c) && !Args.hasArg(options::OPT_S) &&
!Args.hasArg(options::OPT_emit_ast) &&
- !llvm::sys::fs::exists(PS4SDKLibDir)) {
- getDriver().Diag(clang::diag::warn_drv_unable_to_find_directory_expected)
- << "PS4 system libraries" << PS4SDKLibDir;
+ !llvm::sys::fs::exists(SDKLibDir)) {
+ D.Diag(clang::diag::warn_drv_unable_to_find_directory_expected)
+ << Twine(Platform, " system libraries").str() << SDKLibDir << Whence;
return;
}
- getFilePaths().push_back(std::string(PS4SDKLibDir.str()));
+ getFilePaths().push_back(std::string(SDKLibDir));
}
-Tool *toolchains::PS4CPU::buildAssembler() const {
- return new tools::PS4cpu::Assemble(*this);
+void toolchains::PS4PS5Base::AddClangSystemIncludeArgs(
+ const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ const Driver &D = getDriver();
+
+ if (DriverArgs.hasArg(options::OPT_nostdinc))
+ return;
+
+ if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) {
+ SmallString<128> Dir(D.ResourceDir);
+ llvm::sys::path::append(Dir, "include");
+ addSystemInclude(DriverArgs, CC1Args, Dir.str());
+ }
+
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc))
+ return;
+
+ addExternCSystemInclude(DriverArgs, CC1Args,
+ SDKRootDir + "/target/include");
+ addExternCSystemInclude(DriverArgs, CC1Args,
+ SDKRootDir + "/target/include_common");
}
-Tool *toolchains::PS4CPU::buildLinker() const {
- return new tools::PS4cpu::Link(*this);
+Tool *toolchains::PS4CPU::buildAssembler() const {
+ return new tools::PScpu::Assembler(*this);
}
-bool toolchains::PS4CPU::isPICDefault() const { return true; }
+Tool *toolchains::PS5CPU::buildAssembler() const {
+ // PS5 does not support an external assembler.
+ getDriver().Diag(clang::diag::err_no_external_assembler);
+ return nullptr;
+}
-bool toolchains::PS4CPU::HasNativeLLVMSupport() const { return true; }
+Tool *toolchains::PS4PS5Base::buildLinker() const {
+ return new tools::PScpu::Linker(*this);
+}
-SanitizerMask toolchains::PS4CPU::getSupportedSanitizers() const {
+SanitizerMask toolchains::PS4PS5Base::getSupportedSanitizers() const {
SanitizerMask Res = ToolChain::getSupportedSanitizers();
Res |= SanitizerKind::Address;
Res |= SanitizerKind::PointerCompare;
@@ -238,10 +341,16 @@ SanitizerMask toolchains::PS4CPU::getSupportedSanitizers() const {
return Res;
}
-void toolchains::PS4CPU::addClangTargetOptions(
+SanitizerMask toolchains::PS5CPU::getSupportedSanitizers() const {
+ SanitizerMask Res = PS4PS5Base::getSupportedSanitizers();
+ Res |= SanitizerKind::Thread;
+ return Res;
+}
+
+void toolchains::PS4PS5Base::addClangTargetOptions(
const ArgList &DriverArgs, ArgStringList &CC1Args,
Action::OffloadKind DeviceOffloadingKind) const {
- // PS4 does not use init arrays.
+ // PS4/PS5 do not use init arrays.
if (DriverArgs.hasArg(options::OPT_fuse_init_array)) {
Arg *A = DriverArgs.getLastArg(options::OPT_fuse_init_array);
getDriver().Diag(clang::diag::err_drv_unsupported_opt_for_target)
@@ -250,6 +359,12 @@ void toolchains::PS4CPU::addClangTargetOptions(
CC1Args.push_back("-fno-use-init-array");
+ // Default to -fvisibility-global-new-delete=source for PS5.
+ if (getTriple().isPS5() &&
+ !DriverArgs.hasArg(options::OPT_fvisibility_global_new_delete_EQ,
+ options::OPT_fvisibility_global_new_delete_hidden))
+ CC1Args.push_back("-fvisibility-global-new-delete=source");
+
const Arg *A =
DriverArgs.getLastArg(options::OPT_fvisibility_from_dllstorageclass,
options::OPT_fno_visibility_from_dllstorageclass);
@@ -282,3 +397,13 @@ void toolchains::PS4CPU::addClangTargetOptions(
CC1Args.push_back("-fvisibility-externs-nodllstorageclass=default");
}
}
+
+// PS4 toolchain.
+toolchains::PS4CPU::PS4CPU(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args)
+ : PS4PS5Base(D, Triple, Args, "PS4", "SCE_ORBIS_SDK_DIR") {}
+
+// PS5 toolchain.
+toolchains::PS5CPU::PS5CPU(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args)
+ : PS4PS5Base(D, Triple, Args, "PS5", "SCE_PROSPERO_SDK_DIR") {}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.h
index 5f5d0e57d4ea..fee80e77462f 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.h
@@ -18,61 +18,66 @@ namespace clang {
namespace driver {
namespace tools {
-namespace PS4cpu {
+namespace PScpu {
+// Functions/classes in this namespace support both PS4 and PS5.
void addProfileRTArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs);
-void addSanitizerArgs(const ToolChain &TC, llvm::opt::ArgStringList &CmdArgs);
+void addSanitizerArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs);
-class LLVM_LIBRARY_VISIBILITY Assemble : public Tool {
+class LLVM_LIBRARY_VISIBILITY Assembler final : public Tool {
public:
- Assemble(const ToolChain &TC) : Tool("PS4cpu::Assemble", "assembler", TC) {}
+ Assembler(const ToolChain &TC) : Tool("PScpu::Assembler", "assembler", TC) {}
bool hasIntegratedCPP() const override { return false; }
void ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output,
- const InputInfoList &Inputs,
+ const InputInfo &Output, const InputInfoList &Inputs,
const llvm::opt::ArgList &TCArgs,
const char *LinkingOutput) const override;
};
-class LLVM_LIBRARY_VISIBILITY Link : public Tool {
+class LLVM_LIBRARY_VISIBILITY Linker final : public Tool {
public:
- Link(const ToolChain &TC) : Tool("PS4cpu::Link", "linker", TC) {}
+ Linker(const ToolChain &TC) : Tool("PScpu::Linker", "linker", TC) {}
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
void ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output,
- const InputInfoList &Inputs,
+ const InputInfo &Output, const InputInfoList &Inputs,
const llvm::opt::ArgList &TCArgs,
const char *LinkingOutput) const override;
};
-} // end namespace PS4cpu
+} // namespace PScpu
} // namespace tools
namespace toolchains {
-class LLVM_LIBRARY_VISIBILITY PS4CPU : public Generic_ELF {
+// Common Toolchain base class for PS4 and PS5.
+class LLVM_LIBRARY_VISIBILITY PS4PS5Base : public Generic_ELF {
public:
- PS4CPU(const Driver &D, const llvm::Triple &Triple,
- const llvm::opt::ArgList &Args);
+ PS4PS5Base(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args, StringRef Platform,
+ const char *EnvVar);
+ void
+ AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
// No support for finding a C++ standard library yet.
- void addLibCxxIncludePaths(
- const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args) const override {}
- void addLibStdCxxIncludePaths(
- const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args) const override {}
+ void addLibCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override {
+ }
+ void
+ addLibStdCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override {}
bool IsMathErrnoDefault() const override { return false; }
bool IsObjCNonFragileABIDefault() const override { return true; }
- bool HasNativeLLVMSupport() const override;
- bool isPICDefault() const override;
+ bool HasNativeLLVMSupport() const override { return true; }
+ bool isPICDefault() const override { return true; }
LangOptions::StackProtectorMode
GetDefaultStackProtectorLevel(bool KernelOrKext) const override {
@@ -85,14 +90,9 @@ public:
SanitizerMask getSupportedSanitizers() const override;
- // PS4 toolchain uses legacy thin LTO API, which is not
- // capable of unit splitting.
- bool canSplitThinLTOUnit() const override { return false; }
-
void addClangTargetOptions(
- const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args,
- Action::OffloadKind DeviceOffloadingKind) const override;
+ const llvm::opt::ArgList &DriverArgs, llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadingKind) const override;
llvm::DenormalMode getDefaultDenormalModeForType(
const llvm::opt::ArgList &DriverArgs, const JobAction &JA,
@@ -101,11 +101,73 @@ public:
return llvm::DenormalMode::getPreserveSign();
}
- bool useRelaxRelocations() const override { return true; }
+ // Helper methods for PS4/PS5.
+ virtual const char *getLinkerBaseName() const = 0;
+ virtual std::string qualifyPSCmdName(StringRef CmdName) const = 0;
+ virtual void addSanitizerArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs,
+ const char *Prefix,
+ const char *Suffix) const = 0;
+ virtual const char *getProfileRTLibName() const = 0;
protected:
- Tool *buildAssembler() const override;
Tool *buildLinker() const override;
+
+private:
+ // We compute the SDK root dir in the ctor, and use it later.
+ std::string SDKRootDir;
+};
+
+// PS4-specific Toolchain class.
+class LLVM_LIBRARY_VISIBILITY PS4CPU : public PS4PS5Base {
+public:
+ PS4CPU(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+
+ unsigned GetDefaultDwarfVersion() const override { return 4; }
+
+ // PS4 toolchain uses legacy thin LTO API, which is not
+ // capable of unit splitting.
+ bool canSplitThinLTOUnit() const override { return false; }
+
+ const char *getLinkerBaseName() const override { return "ld"; }
+ std::string qualifyPSCmdName(StringRef CmdName) const override {
+ return Twine("orbis-", CmdName).str();
+ }
+ void addSanitizerArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs, const char *Prefix,
+ const char *Suffix) const override;
+ const char *getProfileRTLibName() const override {
+ return "libclang_rt.profile-x86_64.a";
+ }
+
+protected:
+ Tool *buildAssembler() const override;
+};
+
+// PS5-specific Toolchain class.
+class LLVM_LIBRARY_VISIBILITY PS5CPU : public PS4PS5Base {
+public:
+ PS5CPU(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+
+ unsigned GetDefaultDwarfVersion() const override { return 5; }
+
+ SanitizerMask getSupportedSanitizers() const override;
+
+ const char *getLinkerBaseName() const override { return "lld"; }
+ std::string qualifyPSCmdName(StringRef CmdName) const override {
+ return Twine("prospero-", CmdName).str();
+ }
+ void addSanitizerArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs, const char *Prefix,
+ const char *Suffix) const override;
+ const char *getProfileRTLibName() const override {
+ return "libclang_rt.profile-x86_64_nosubmission.a";
+ }
+
+protected:
+ Tool *buildAssembler() const override;
};
} // end namespace toolchains
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/RISCVToolchain.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/RISCVToolchain.cpp
index 714325a2db39..5e4fa4d5331f 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/RISCVToolchain.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/RISCVToolchain.cpp
@@ -1,4 +1,4 @@
-//===--- RISCVToolchain.cpp - RISCV ToolChain Implementations ---*- C++ -*-===//
+//===--- RISCVToolchain.cpp - RISC-V ToolChain Implementations --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -46,17 +46,17 @@ bool RISCVToolChain::hasGCCToolchain(const Driver &D,
return llvm::sys::fs::exists(GCCDir);
}
-/// RISCV Toolchain
+/// RISC-V Toolchain
RISCVToolChain::RISCVToolChain(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args)
: Generic_ELF(D, Triple, Args) {
GCCInstallation.init(Triple, Args);
if (GCCInstallation.isValid()) {
Multilibs = GCCInstallation.getMultilibs();
- SelectedMultilib = GCCInstallation.getMultilib();
+ SelectedMultilibs.assign({GCCInstallation.getMultilib()});
path_list &Paths = getFilePaths();
// Add toolchain/multilib specific file paths.
- addMultilibsFilePaths(D, Multilibs, SelectedMultilib,
+ addMultilibsFilePaths(D, Multilibs, SelectedMultilibs.back(),
GCCInstallation.getInstallPath(), Paths);
getFilePaths().push_back(GCCInstallation.getInstallPath().str());
ToolChain::path_list &PPaths = getProgramPaths();
@@ -98,6 +98,12 @@ void RISCVToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
if (DriverArgs.hasArg(options::OPT_nostdinc))
return;
+ if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) {
+ SmallString<128> Dir(getDriver().ResourceDir);
+ llvm::sys::path::append(Dir, "include");
+ addSystemInclude(DriverArgs, CC1Args, Dir.str());
+ }
+
if (!DriverArgs.hasArg(options::OPT_nostdlibinc)) {
SmallString<128> Dir(computeSysRoot());
llvm::sys::path::append(Dir, "include");
@@ -135,7 +141,7 @@ std::string RISCVToolChain::computeSysRoot() const {
if (!llvm::sys::fs::exists(SysRootDir))
return std::string();
- return std::string(SysRootDir.str());
+ return std::string(SysRootDir);
}
void RISCV::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -157,6 +163,7 @@ void RISCV::Linker::ConstructJob(Compilation &C, const JobAction &JA,
} else {
CmdArgs.push_back("elf32lriscv");
}
+ CmdArgs.push_back("-X");
std::string Linker = getToolChain().GetLinkerPath();
@@ -183,19 +190,21 @@ void RISCV::Linker::ConstructJob(Compilation &C, const JobAction &JA,
AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
- Args.AddAllArgs(CmdArgs, options::OPT_L);
- Args.AddAllArgs(CmdArgs, options::OPT_u);
+ Args.addAllArgs(CmdArgs, {options::OPT_L, options::OPT_u});
+
ToolChain.AddFilePathLibArgs(Args, CmdArgs);
- Args.AddAllArgs(CmdArgs,
- {options::OPT_T_Group, options::OPT_e, options::OPT_s,
- options::OPT_t, options::OPT_Z_Flag, options::OPT_r});
+ Args.addAllArgs(CmdArgs, {options::OPT_T_Group, options::OPT_s,
+ options::OPT_t, options::OPT_r});
// TODO: add C++ includes and libs if compiling C++.
if (!Args.hasArg(options::OPT_nostdlib) &&
!Args.hasArg(options::OPT_nodefaultlibs)) {
- if (ToolChain.ShouldLinkCXXStdlib(Args))
- ToolChain.AddCXXStdlibLibArgs(Args, CmdArgs);
+ if (D.CCCIsCXX()) {
+ if (ToolChain.ShouldLinkCXXStdlib(Args))
+ ToolChain.AddCXXStdlibLibArgs(Args, CmdArgs);
+ CmdArgs.push_back("-lm");
+ }
CmdArgs.push_back("--start-group");
CmdArgs.push_back("-lc");
CmdArgs.push_back("-lgloss");
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/RISCVToolchain.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/RISCVToolchain.h
index 62099bee0404..cec817ef7190 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/RISCVToolchain.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/RISCVToolchain.h
@@ -1,4 +1,4 @@
-//===--- RISCVToolchain.h - RISCV ToolChain Implementations -----*- C++ -*-===//
+//===--- RISCVToolchain.h - RISC-V ToolChain Implementations ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -22,7 +22,6 @@ public:
const llvm::opt::ArgList &Args);
static bool hasGCCToolchain(const Driver &D, const llvm::opt::ArgList &Args);
- bool IsIntegratedAssemblerDefault() const override { return true; }
void addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args,
Action::OffloadKind) const override;
@@ -47,7 +46,7 @@ private:
namespace tools {
namespace RISCV {
-class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
+class LLVM_LIBRARY_VISIBILITY Linker final : public Tool {
public:
Linker(const ToolChain &TC) : Tool("RISCV::Linker", "ld", TC) {}
bool hasIntegratedCPP() const override { return false; }
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/ROCm.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/ROCm.h
index bb482be68260..dceb0ab03669 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/ROCm.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/ROCm.h
@@ -15,13 +15,33 @@
#include "clang/Driver/Options.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringMap.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Support/VersionTuple.h"
+#include "llvm/TargetParser/Triple.h"
namespace clang {
namespace driver {
+/// ABI version of device library.
+struct DeviceLibABIVersion {
+ unsigned ABIVersion = 0;
+ DeviceLibABIVersion(unsigned V) : ABIVersion(V) {}
+ static DeviceLibABIVersion fromCodeObjectVersion(unsigned CodeObjectVersion) {
+ if (CodeObjectVersion < 4)
+ CodeObjectVersion = 4;
+ return DeviceLibABIVersion(CodeObjectVersion * 100);
+ }
+ /// Whether ABI version bc file is requested.
+ /// ABIVersion is code object version multiplied by 100. Code object v4
+ /// and below works with ROCm 5.0 and below which does not have
+ /// abi_version_*.bc. Code object v5 requires abi_version_500.bc.
+ bool requiresLibrary() { return ABIVersion >= 500; }
+ std::string toString() {
+ assert(ABIVersion % 100 == 0 && "Not supported");
+ return Twine(ABIVersion / 100).str();
+ }
+};
+
/// A class to find a viable ROCM installation
/// TODO: Generalize to handle libclc.
class RocmInstallationDetector {
@@ -57,6 +77,9 @@ private:
const Driver &D;
bool HasHIPRuntime = false;
bool HasDeviceLibrary = false;
+ bool HasHIPStdParLibrary = false;
+ bool HasRocThrustLibrary = false;
+ bool HasRocPrimLibrary = false;
// Default version if not detected or specified.
const unsigned DefaultVersionMajor = 3;
@@ -76,6 +99,13 @@ private:
std::vector<std::string> RocmDeviceLibPathArg;
// HIP runtime path specified by --hip-path.
StringRef HIPPathArg;
+ // HIP Standard Parallel Algorithm acceleration library specified by
+ // --hipstdpar-path
+ StringRef HIPStdParPathArg;
+ // rocThrust algorithm library specified by --hipstdpar-thrust-path
+ StringRef HIPRocThrustPathArg;
+ // rocPrim algorithm library specified by --hipstdpar-prim-path
+ StringRef HIPRocPrimPathArg;
// HIP version specified by --hip-version.
StringRef HIPVersionArg;
// Wheter -nogpulib is specified.
@@ -87,6 +117,7 @@ private:
SmallString<0> LibPath;
SmallString<0> LibDevicePath;
SmallString<0> IncludePath;
+ SmallString<0> SharePath;
llvm::StringMap<std::string> LibDeviceMap;
// Libraries that are always linked.
@@ -107,6 +138,10 @@ private:
ConditionalLibrary DenormalsAreZero;
ConditionalLibrary CorrectlyRoundedSqrt;
+ // Maps ABI version to library path. The version number is in the format of
+ // three digits as used in the ABI version library name.
+ std::map<unsigned, std::string> ABIVersionMap;
+
// Cache ROCm installation search paths.
SmallVector<Candidate, 4> ROCmSearchDirs;
bool PrintROCmSearchDirs;
@@ -142,7 +177,12 @@ public:
getCommonBitcodeLibs(const llvm::opt::ArgList &DriverArgs,
StringRef LibDeviceFile, bool Wave64, bool DAZ,
bool FiniteOnly, bool UnsafeMathOpt,
- bool FastRelaxedMath, bool CorrectSqrt) const;
+ bool FastRelaxedMath, bool CorrectSqrt,
+ DeviceLibABIVersion ABIVer, bool isOpenMP) const;
+ /// Check file paths of default bitcode libraries common to AMDGPU based
+ /// toolchains. \returns false if there are invalid or missing files.
+ bool checkCommonBitcodeLibs(StringRef GPUArch, StringRef LibDeviceFile,
+ DeviceLibABIVersion ABIVer) const;
/// Check whether we detected a valid HIP runtime.
bool hasHIPRuntime() const { return HasHIPRuntime; }
@@ -150,6 +190,9 @@ public:
/// Check whether we detected a valid ROCm device library.
bool hasDeviceLibrary() const { return HasDeviceLibrary; }
+ /// Check whether we detected a valid HIP STDPAR Acceleration library.
+ bool hasHIPStdParLibrary() const { return HasHIPStdParLibrary; }
+
/// Print information about the detected ROCm installation.
void print(raw_ostream &OS) const;
@@ -214,9 +257,19 @@ public:
return CorrectlyRoundedSqrt.get(Enabled);
}
+ StringRef getABIVersionPath(DeviceLibABIVersion ABIVer) const {
+ auto Loc = ABIVersionMap.find(ABIVer.ABIVersion);
+ if (Loc == ABIVersionMap.end())
+ return StringRef();
+ return Loc->second;
+ }
+
/// Get libdevice file for given architecture
- std::string getLibDeviceFile(StringRef Gpu) const {
- return LibDeviceMap.lookup(Gpu);
+ StringRef getLibDeviceFile(StringRef Gpu) const {
+ auto Loc = LibDeviceMap.find(Gpu);
+ if (Loc == LibDeviceMap.end())
+ return "";
+ return Loc->second;
}
void AddHIPIncludeArgs(const llvm::opt::ArgList &DriverArgs,
@@ -226,7 +279,7 @@ public:
void detectHIPRuntime();
/// Get the values for --rocm-device-lib-path arguments
- std::vector<std::string> getRocmDeviceLibPathArg() const {
+ ArrayRef<std::string> getRocmDeviceLibPathArg() const {
return RocmDeviceLibPathArg;
}
@@ -236,7 +289,7 @@ public:
/// Get the value for --hip-version argument
StringRef getHIPVersionArg() const { return HIPVersionArg; }
- std::string getHIPVersion() const { return DetectedVersion; }
+ StringRef getHIPVersion() const { return DetectedVersion; }
};
} // end namespace driver
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/SPIRV.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/SPIRV.cpp
new file mode 100644
index 000000000000..27de69550853
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/SPIRV.cpp
@@ -0,0 +1,93 @@
+//===--- SPIRV.cpp - SPIR-V Tool Implementations ----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#include "SPIRV.h"
+#include "CommonArgs.h"
+#include "clang/Driver/Compilation.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/InputInfo.h"
+#include "clang/Driver/Options.h"
+
+using namespace clang::driver;
+using namespace clang::driver::toolchains;
+using namespace clang::driver::tools;
+using namespace llvm::opt;
+
+void SPIRV::constructTranslateCommand(Compilation &C, const Tool &T,
+ const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfo &Input,
+ const llvm::opt::ArgStringList &Args) {
+ llvm::opt::ArgStringList CmdArgs(Args);
+ CmdArgs.push_back(Input.getFilename());
+
+ if (Input.getType() == types::TY_PP_Asm)
+ CmdArgs.push_back("-to-binary");
+ if (Output.getType() == types::TY_PP_Asm)
+ CmdArgs.push_back("--spirv-tools-dis");
+
+ CmdArgs.append({"-o", Output.getFilename()});
+
+ const char *Exec =
+ C.getArgs().MakeArgString(T.getToolChain().GetProgramPath("llvm-spirv"));
+ C.addCommand(std::make_unique<Command>(JA, T, ResponseFileSupport::None(),
+ Exec, CmdArgs, Input, Output));
+}
+
+void SPIRV::Translator::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ claimNoWarnArgs(Args);
+ if (Inputs.size() != 1)
+ llvm_unreachable("Invalid number of input files.");
+ constructTranslateCommand(C, *this, JA, Output, Inputs[0], {});
+}
+
+clang::driver::Tool *SPIRVToolChain::getTranslator() const {
+ if (!Translator)
+ Translator = std::make_unique<SPIRV::Translator>(*this);
+ return Translator.get();
+}
+
+clang::driver::Tool *SPIRVToolChain::SelectTool(const JobAction &JA) const {
+ Action::ActionClass AC = JA.getKind();
+ return SPIRVToolChain::getTool(AC);
+}
+
+clang::driver::Tool *SPIRVToolChain::getTool(Action::ActionClass AC) const {
+ switch (AC) {
+ default:
+ break;
+ case Action::BackendJobClass:
+ case Action::AssembleJobClass:
+ return SPIRVToolChain::getTranslator();
+ }
+ return ToolChain::getTool(AC);
+}
+clang::driver::Tool *SPIRVToolChain::buildLinker() const {
+ return new tools::SPIRV::Linker(*this);
+}
+
+void SPIRV::Linker::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ const ToolChain &ToolChain = getToolChain();
+ std::string Linker = ToolChain.GetProgramPath(getShortName());
+ ArgStringList CmdArgs;
+ AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs, JA);
+
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+
+ C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ Args.MakeArgString(Linker), CmdArgs,
+ Inputs, Output));
+}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/SPIRV.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/SPIRV.h
new file mode 100644
index 000000000000..d4247ee0557f
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/SPIRV.h
@@ -0,0 +1,89 @@
+//===--- SPIRV.h - SPIR-V Tool Implementations ------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_SPIRV_H
+#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_SPIRV_H
+
+#include "clang/Driver/Tool.h"
+#include "clang/Driver/ToolChain.h"
+
+namespace clang {
+namespace driver {
+namespace tools {
+namespace SPIRV {
+
+void constructTranslateCommand(Compilation &C, const Tool &T,
+ const JobAction &JA, const InputInfo &Output,
+ const InputInfo &Input,
+ const llvm::opt::ArgStringList &Args);
+
+class LLVM_LIBRARY_VISIBILITY Translator : public Tool {
+public:
+ Translator(const ToolChain &TC)
+ : Tool("SPIR-V::Translator", "llvm-spirv", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+ bool hasIntegratedAssembler() const override { return true; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY Linker final : public Tool {
+public:
+ Linker(const ToolChain &TC) : Tool("SPIRV::Linker", "spirv-link", TC) {}
+ bool hasIntegratedCPP() const override { return false; }
+ bool isLinkJob() const override { return true; }
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+
+} // namespace SPIRV
+} // namespace tools
+
+namespace toolchains {
+
+class LLVM_LIBRARY_VISIBILITY SPIRVToolChain final : public ToolChain {
+ mutable std::unique_ptr<Tool> Translator;
+
+public:
+ SPIRVToolChain(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args)
+ : ToolChain(D, Triple, Args) {}
+
+ bool useIntegratedAs() const override { return true; }
+
+ bool IsIntegratedBackendDefault() const override { return false; }
+ bool IsNonIntegratedBackendSupported() const override { return true; }
+ bool IsMathErrnoDefault() const override { return false; }
+ bool isCrossCompiling() const override { return true; }
+ bool isPICDefault() const override { return false; }
+ bool isPIEDefault(const llvm::opt::ArgList &Args) const override {
+ return false;
+ }
+ bool isPICDefaultForced() const override { return false; }
+ bool SupportsProfiling() const override { return false; }
+
+ clang::driver::Tool *SelectTool(const JobAction &JA) const override;
+
+protected:
+ clang::driver::Tool *getTool(Action::ActionClass AC) const override;
+ Tool *buildLinker() const override;
+
+private:
+ clang::driver::Tool *getTranslator() const;
+};
+
+} // namespace toolchains
+} // namespace driver
+} // namespace clang
+#endif
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Solaris.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Solaris.cpp
index 4d1af094f481..200ac46aa534 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Solaris.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Solaris.cpp
@@ -8,12 +8,16 @@
#include "Solaris.h"
#include "CommonArgs.h"
+#include "Gnu.h"
#include "clang/Basic/LangStandard.h"
#include "clang/Config/config.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Options.h"
+#include "clang/Driver/SanitizerArgs.h"
+#include "clang/Driver/ToolChain.h"
+#include "llvm/ADT/StringSwitch.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
@@ -29,20 +33,50 @@ void solaris::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfoList &Inputs,
const ArgList &Args,
const char *LinkingOutput) const {
- claimNoWarnArgs(Args);
- ArgStringList CmdArgs;
+ // Just call the Gnu version, which enforces gas on Solaris.
+ gnutools::Assembler::ConstructJob(C, JA, Output, Inputs, Args, LinkingOutput);
+}
- Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler);
+bool solaris::isLinkerGnuLd(const ToolChain &TC, const ArgList &Args) {
+ // Only used if targetting Solaris.
+ const Arg *A = Args.getLastArg(options::OPT_fuse_ld_EQ);
+ StringRef UseLinker = A ? A->getValue() : CLANG_DEFAULT_LINKER;
+ return UseLinker == "bfd" || UseLinker == "gld";
+}
- CmdArgs.push_back("-o");
- CmdArgs.push_back(Output.getFilename());
+static bool getPIE(const ArgList &Args, const ToolChain &TC) {
+ if (Args.hasArg(options::OPT_shared) || Args.hasArg(options::OPT_static) ||
+ Args.hasArg(options::OPT_r))
+ return false;
- for (const auto &II : Inputs)
- CmdArgs.push_back(II.getFilename());
+ return Args.hasFlag(options::OPT_pie, options::OPT_no_pie,
+ TC.isPIEDefault(Args));
+}
- const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
- C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
- Exec, CmdArgs, Inputs, Output));
+// FIXME: Need to handle CLANG_DEFAULT_LINKER here?
+std::string solaris::Linker::getLinkerPath(const ArgList &Args) const {
+ const ToolChain &ToolChain = getToolChain();
+ if (const Arg *A = Args.getLastArg(options::OPT_fuse_ld_EQ)) {
+ StringRef UseLinker = A->getValue();
+ if (!UseLinker.empty()) {
+ if (llvm::sys::path::is_absolute(UseLinker) &&
+ llvm::sys::fs::can_execute(UseLinker))
+ return std::string(UseLinker);
+
+ // Accept 'bfd' and 'gld' as aliases for the GNU linker.
+ if (UseLinker == "bfd" || UseLinker == "gld")
+ // FIXME: Could also use /usr/bin/gld here.
+ return "/usr/gnu/bin/ld";
+
+ // Accept 'ld' as alias for the default linker
+ if (UseLinker != "ld")
+ ToolChain.getDriver().Diag(diag::err_drv_invalid_linker_name)
+ << A->getAsString(Args);
+ }
+ }
+
+ // getDefaultLinker() always returns an absolute path.
+ return ToolChain.getDefaultLinker();
}
void solaris::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -50,24 +84,38 @@ void solaris::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfoList &Inputs,
const ArgList &Args,
const char *LinkingOutput) const {
+ const auto &ToolChain = static_cast<const Solaris &>(getToolChain());
+ const Driver &D = ToolChain.getDriver();
+ const llvm::Triple::ArchType Arch = ToolChain.getArch();
+ const bool IsPIE = getPIE(Args, ToolChain);
+ const bool LinkerIsGnuLd = isLinkerGnuLd(ToolChain, Args);
ArgStringList CmdArgs;
- // Demangle C++ names in errors
- CmdArgs.push_back("-C");
+ // Demangle C++ names in errors. GNU ld already defaults to --demangle.
+ if (!LinkerIsGnuLd)
+ CmdArgs.push_back("-C");
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_shared)) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_shared,
+ options::OPT_r)) {
CmdArgs.push_back("-e");
CmdArgs.push_back("_start");
}
+ if (IsPIE) {
+ if (LinkerIsGnuLd) {
+ CmdArgs.push_back("-pie");
+ } else {
+ CmdArgs.push_back("-z");
+ CmdArgs.push_back("type=pie");
+ }
+ }
+
if (Args.hasArg(options::OPT_static)) {
CmdArgs.push_back("-Bstatic");
CmdArgs.push_back("-dn");
} else {
- CmdArgs.push_back("-Bdynamic");
- if (Args.hasArg(options::OPT_shared)) {
+ if (!Args.hasArg(options::OPT_r) && Args.hasArg(options::OPT_shared))
CmdArgs.push_back("-shared");
- }
// libpthread has been folded into libc since Solaris 10, no need to do
// anything for pthreads. Claim argument to avoid warning.
@@ -75,19 +123,50 @@ void solaris::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.ClaimAllArgs(options::OPT_pthreads);
}
+ if (LinkerIsGnuLd) {
+ // Set the correct linker emulation for 32- and 64-bit Solaris.
+ switch (Arch) {
+ case llvm::Triple::x86:
+ CmdArgs.push_back("-m");
+ CmdArgs.push_back("elf_i386_sol2");
+ break;
+ case llvm::Triple::x86_64:
+ CmdArgs.push_back("-m");
+ CmdArgs.push_back("elf_x86_64_sol2");
+ break;
+ case llvm::Triple::sparc:
+ CmdArgs.push_back("-m");
+ CmdArgs.push_back("elf32_sparc_sol2");
+ break;
+ case llvm::Triple::sparcv9:
+ CmdArgs.push_back("-m");
+ CmdArgs.push_back("elf64_sparc_sol2");
+ break;
+ default:
+ break;
+ }
+
+ if (Args.hasArg(options::OPT_rdynamic))
+ CmdArgs.push_back("-export-dynamic");
+
+ CmdArgs.push_back("--eh-frame-hdr");
+ } else {
+ // -rdynamic is a no-op with Solaris ld. Claim argument to avoid warning.
+ Args.ClaimAllArgs(options::OPT_rdynamic);
+ }
+
+ assert((Output.isFilename() || Output.isNothing()) && "Invalid output.");
if (Output.isFilename()) {
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
- } else {
- assert(Output.isNothing() && "Invalid output.");
}
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles,
+ options::OPT_r)) {
if (!Args.hasArg(options::OPT_shared))
- CmdArgs.push_back(
- Args.MakeArgString(getToolChain().GetFilePath("crt1.o")));
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crt1.o")));
- CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath("crti.o")));
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crti.o")));
const Arg *Std = Args.getLastArg(options::OPT_std_EQ, options::OPT_ansi);
bool HaveAnsi = false;
@@ -102,29 +181,54 @@ void solaris::Linker::ConstructJob(Compilation &C, const JobAction &JA,
// Use values-Xc.o for -ansi, -std=c*, -std=iso9899:199409.
if (HaveAnsi || (LangStd && !LangStd->isGNUMode()))
values_X = "values-Xc.o";
- CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(values_X)));
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(values_X)));
const char *values_xpg = "values-xpg6.o";
// Use values-xpg4.o for -std=c90, -std=gnu90, -std=iso9899:199409.
if (LangStd && LangStd->getLanguage() == Language::C && !LangStd->isC99())
values_xpg = "values-xpg4.o";
- CmdArgs.push_back(
- Args.MakeArgString(getToolChain().GetFilePath(values_xpg)));
- CmdArgs.push_back(
- Args.MakeArgString(getToolChain().GetFilePath("crtbegin.o")));
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(values_xpg)));
+
+ const char *crtbegin = nullptr;
+ if (Args.hasArg(options::OPT_shared) || IsPIE)
+ crtbegin = "crtbeginS.o";
+ else
+ crtbegin = "crtbegin.o";
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtbegin)));
+ // Add crtfastmath.o if available and fast math is enabled.
+ ToolChain.addFastMathRuntimeIfAvailable(Args, CmdArgs);
}
- getToolChain().AddFilePathLibArgs(Args, CmdArgs);
+ ToolChain.AddFilePathLibArgs(Args, CmdArgs);
+
+ Args.addAllArgs(CmdArgs,
+ {options::OPT_L, options::OPT_T_Group, options::OPT_r});
- Args.AddAllArgs(CmdArgs, {options::OPT_L, options::OPT_T_Group,
- options::OPT_e, options::OPT_r});
+ bool NeedsSanitizerDeps = addSanitizerRuntimes(ToolChain, Args, CmdArgs);
+ AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
- bool NeedsSanitizerDeps = addSanitizerRuntimes(getToolChain(), Args, CmdArgs);
- AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs, JA);
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs,
+ options::OPT_r)) {
+ // Use the static OpenMP runtime with -static-openmp
+ bool StaticOpenMP = Args.hasArg(options::OPT_static_openmp) &&
+ !Args.hasArg(options::OPT_static);
+ addOpenMPRuntime(CmdArgs, ToolChain, Args, StaticOpenMP);
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
- if (getToolChain().ShouldLinkCXXStdlib(Args))
- getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs);
+ if (D.CCCIsCXX()) {
+ if (ToolChain.ShouldLinkCXXStdlib(Args))
+ ToolChain.AddCXXStdlibLibArgs(Args, CmdArgs);
+ CmdArgs.push_back("-lm");
+ }
+ // Silence warnings when linking C code with a C++ '-stdlib' argument.
+ Args.ClaimAllArgs(options::OPT_stdlib_EQ);
+ // Additional linker set-up and flags for Fortran. This is required in order
+ // to generate executables. As Fortran runtime depends on the C runtime,
+ // these dependencies need to be listed before the C runtime below.
+ if (D.IsFlangMode()) {
+ addFortranRuntimeLibraryPath(getToolChain(), Args, CmdArgs);
+ addFortranRuntimeLibs(getToolChain(), Args, CmdArgs);
+ CmdArgs.push_back("-lm");
+ }
if (Args.hasArg(options::OPT_fstack_protector) ||
Args.hasArg(options::OPT_fstack_protector_strong) ||
Args.hasArg(options::OPT_fstack_protector_all)) {
@@ -132,25 +236,57 @@ void solaris::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-lssp_nonshared");
CmdArgs.push_back("-lssp");
}
+ // LLVM support for atomics on 32-bit SPARC V8+ is incomplete, so
+ // forcibly link with libatomic as a workaround.
+ if (Arch == llvm::Triple::sparc) {
+ addAsNeededOption(ToolChain, Args, CmdArgs, true);
+ CmdArgs.push_back("-latomic");
+ addAsNeededOption(ToolChain, Args, CmdArgs, false);
+ }
+ addAsNeededOption(ToolChain, Args, CmdArgs, true);
CmdArgs.push_back("-lgcc_s");
+ addAsNeededOption(ToolChain, Args, CmdArgs, false);
CmdArgs.push_back("-lc");
if (!Args.hasArg(options::OPT_shared)) {
CmdArgs.push_back("-lgcc");
- CmdArgs.push_back("-lm");
}
- if (NeedsSanitizerDeps)
- linkSanitizerRuntimeDeps(getToolChain(), CmdArgs);
+ const SanitizerArgs &SA = ToolChain.getSanitizerArgs(Args);
+ if (NeedsSanitizerDeps) {
+ linkSanitizerRuntimeDeps(ToolChain, Args, CmdArgs);
+
+ // Work around Solaris/amd64 ld bug when calling __tls_get_addr directly.
+ // However, ld -z relax=transtls is available since Solaris 11.2, but not
+ // in Illumos.
+ if (Arch == llvm::Triple::x86_64 &&
+ (SA.needsAsanRt() || SA.needsStatsRt() ||
+ (SA.needsUbsanRt() && !SA.requiresMinimalRuntime())) &&
+ !LinkerIsGnuLd) {
+ CmdArgs.push_back("-z");
+ CmdArgs.push_back("relax=transtls");
+ }
+ }
+ // Avoid AsanInitInternal cycle, Issue #64126.
+ if (ToolChain.getTriple().isX86() && SA.needsSharedRt() &&
+ SA.needsAsanRt()) {
+ CmdArgs.push_back("-z");
+ CmdArgs.push_back("now");
+ }
}
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
- CmdArgs.push_back(
- Args.MakeArgString(getToolChain().GetFilePath("crtend.o")));
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles,
+ options::OPT_r)) {
+ const char *crtend = nullptr;
+ if (Args.hasArg(options::OPT_shared) || IsPIE)
+ crtend = "crtendS.o";
+ else
+ crtend = "crtend.o";
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtend)));
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtn.o")));
}
- CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath("crtn.o")));
- getToolChain().addProfileRTLibs(Args, CmdArgs);
+ ToolChain.addProfileRTLibs(Args, CmdArgs);
- const char *Exec = Args.MakeArgString(getToolChain().GetLinkerPath());
+ const char *Exec = Args.MakeArgString(getLinkerPath(Args));
C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
Exec, CmdArgs, Inputs, Output));
}
@@ -159,13 +295,12 @@ static StringRef getSolarisLibSuffix(const llvm::Triple &Triple) {
switch (Triple.getArch()) {
case llvm::Triple::x86:
case llvm::Triple::sparc:
+ default:
break;
case llvm::Triple::x86_64:
return "/amd64";
case llvm::Triple::sparcv9:
return "/sparcv9";
- default:
- llvm_unreachable("Unsupported architecture");
}
return "";
}
@@ -192,7 +327,7 @@ Solaris::Solaris(const Driver &D, const llvm::Triple &Triple,
// If we are currently running Clang inside of the requested system root,
// add its parent library path to those searched.
- if (StringRef(D.Dir).startswith(D.SysRoot))
+ if (StringRef(D.Dir).starts_with(D.SysRoot))
addPathIfExists(D, D.Dir + "/../lib", Paths);
addPathIfExists(D, D.SysRoot + "/usr/lib" + LibSuffix, Paths);
@@ -200,7 +335,6 @@ Solaris::Solaris(const Driver &D, const llvm::Triple &Triple,
SanitizerMask Solaris::getSupportedSanitizers() const {
const bool IsX86 = getTriple().getArch() == llvm::Triple::x86;
- const bool IsX86_64 = getTriple().getArch() == llvm::Triple::x86_64;
SanitizerMask Res = ToolChain::getSupportedSanitizers();
// FIXME: Omit X86_64 until 64-bit support is figured out.
if (IsX86) {
@@ -208,12 +342,17 @@ SanitizerMask Solaris::getSupportedSanitizers() const {
Res |= SanitizerKind::PointerCompare;
Res |= SanitizerKind::PointerSubtract;
}
- if (IsX86 || IsX86_64)
- Res |= SanitizerKind::Function;
Res |= SanitizerKind::Vptr;
return Res;
}
+const char *Solaris::getDefaultLinker() const {
+ // FIXME: Only handle Solaris ld and GNU ld here.
+ return llvm::StringSwitch<const char *>(CLANG_DEFAULT_LINKER)
+ .Cases("bfd", "gld", "/usr/gnu/bin/ld")
+ .Default("/usr/bin/ld");
+}
+
Tool *Solaris::buildAssembler() const {
return new tools::solaris::Assembler(*this);
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Solaris.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Solaris.h
index fbac92c2c0f3..9ec83b773da4 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Solaris.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Solaris.h
@@ -17,12 +17,11 @@ namespace clang {
namespace driver {
namespace tools {
-/// solaris -- Directly call Solaris assembler and linker
+/// Directly call Solaris assembler and linker
namespace solaris {
-class LLVM_LIBRARY_VISIBILITY Assembler : public Tool {
+class LLVM_LIBRARY_VISIBILITY Assembler final : public gnutools::Assembler {
public:
- Assembler(const ToolChain &TC)
- : Tool("solaris::Assembler", "assembler", TC) {}
+ Assembler(const ToolChain &TC) : gnutools::Assembler(TC) {}
bool hasIntegratedCPP() const override { return false; }
@@ -32,12 +31,15 @@ public:
const char *LinkingOutput) const override;
};
-class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
+bool isLinkerGnuLd(const ToolChain &TC, const llvm::opt::ArgList &Args);
+
+class LLVM_LIBRARY_VISIBILITY Linker final : public Tool {
public:
Linker(const ToolChain &TC) : Tool("solaris::Linker", "linker", TC) {}
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
+ std::string getLinkerPath(const llvm::opt::ArgList &Args) const;
void ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output, const InputInfoList &Inputs,
@@ -63,12 +65,8 @@ public:
llvm::opt::ArgStringList &CC1Args) const override;
SanitizerMask getSupportedSanitizers() const override;
- unsigned GetDefaultDwarfVersion() const override { return 2; }
- const char *getDefaultLinker() const override {
- // clang currently uses Solaris ld-only options.
- return "/usr/bin/ld";
- }
+ const char *getDefaultLinker() const override;
protected:
Tool *buildAssembler() const override;
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/TCE.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/TCE.cpp
index 33a81c54bd42..5f4051d31168 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/TCE.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/TCE.cpp
@@ -34,7 +34,9 @@ bool TCEToolChain::IsMathErrnoDefault() const { return true; }
bool TCEToolChain::isPICDefault() const { return false; }
-bool TCEToolChain::isPIEDefault() const { return false; }
+bool TCEToolChain::isPIEDefault(const llvm::opt::ArgList &Args) const {
+ return false;
+}
bool TCEToolChain::isPICDefaultForced() const { return false; }
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/TCE.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/TCE.h
index 72933dae965e..31a64cfe878a 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/TCE.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/TCE.h
@@ -27,7 +27,7 @@ public:
bool IsMathErrnoDefault() const override;
bool isPICDefault() const override;
- bool isPIEDefault() const override;
+ bool isPIEDefault(const llvm::opt::ArgList &Args) const override;
bool isPICDefaultForced() const override;
};
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/VEToolchain.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/VEToolchain.cpp
index e28f340f9aad..39529e0b6b35 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/VEToolchain.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/VEToolchain.cpp
@@ -28,18 +28,32 @@ VEToolChain::VEToolChain(const Driver &D, const llvm::Triple &Triple,
getProgramPaths().push_back("/opt/nec/ve/bin");
// ProgramPaths are found via 'PATH' environment variable.
- // default file paths are:
- // ${RESOURCEDIR}/lib/linux/ve (== getArchSpecificLibPath)
- // /lib/../lib64
- // /usr/lib/../lib64
- // ${BINPATH}/../lib
- // /lib
- // /usr/lib
- //
- // These are OK for host, but no go for VE. So, defines them all
- // from scratch here.
+ // Default library paths are following:
+ // ${RESOURCEDIR}/lib/ve-unknown-linux-gnu,
+ // These are OK.
+
+ // Default file paths are following:
+ // ${RESOURCEDIR}/lib/ve-unknown-linux-gnu, (== getArchSpecificLibPaths)
+ // ${RESOURCEDIR}/lib/linux/ve, (== getArchSpecificLibPaths)
+ // /lib/../lib64,
+ // /usr/lib/../lib64,
+ // ${BINPATH}/../lib,
+ // /lib,
+ // /usr/lib,
+ // These are OK for host, but no go for VE.
+
+ // Define file paths from scratch here.
getFilePaths().clear();
- getFilePaths().push_back(getArchSpecificLibPath());
+
+ // Add library directories:
+ // ${BINPATH}/../lib/ve-unknown-linux-gnu, (== getStdlibPath)
+ // ${RESOURCEDIR}/lib/ve-unknown-linux-gnu, (== getArchSpecificLibPaths)
+ // ${RESOURCEDIR}/lib/linux/ve, (== getArchSpecificLibPaths)
+ // ${SYSROOT}/opt/nec/ve/lib,
+ if (std::optional<std::string> Path = getStdlibPath())
+ getFilePaths().push_back(std::move(*Path));
+ for (const auto &Path : getArchSpecificLibPaths())
+ getFilePaths().push_back(Path);
getFilePaths().push_back(computeSysRoot() + "/opt/nec/ve/lib");
}
@@ -53,7 +67,9 @@ Tool *VEToolChain::buildLinker() const {
bool VEToolChain::isPICDefault() const { return false; }
-bool VEToolChain::isPIEDefault() const { return false; }
+bool VEToolChain::isPIEDefault(const llvm::opt::ArgList &Args) const {
+ return false;
+}
bool VEToolChain::isPICDefaultForced() const { return false; }
@@ -113,9 +129,10 @@ void VEToolChain::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
ArrayRef<StringRef> DirVec(Dirs);
addSystemIncludes(DriverArgs, CC1Args, DirVec);
} else {
- SmallString<128> P(getDriver().ResourceDir);
- llvm::sys::path::append(P, "include/c++/v1");
- addSystemInclude(DriverArgs, CC1Args, P);
+ // Add following paths for multiple target installation.
+ // ${INSTALLDIR}/include/ve-unknown-linux-gnu/c++/v1,
+ // ${INSTALLDIR}/include/c++/v1,
+ addLibCxxIncludePaths(DriverArgs, CC1Args);
}
}
@@ -126,7 +143,15 @@ void VEToolChain::AddCXXStdlibLibArgs(const ArgList &Args,
tools::addArchSpecificRPath(*this, Args, CmdArgs);
+ // Add paths for libc++.so and other shared libraries.
+ if (std::optional<std::string> Path = getStdlibPath()) {
+ CmdArgs.push_back("-rpath");
+ CmdArgs.push_back(Args.MakeArgString(*Path));
+ }
+
CmdArgs.push_back("-lc++");
+ if (Args.hasArg(options::OPT_fexperimental_library))
+ CmdArgs.push_back("-lc++experimental");
CmdArgs.push_back("-lc++abi");
CmdArgs.push_back("-lunwind");
// libc++ requires -lpthread under glibc environment
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/VEToolchain.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/VEToolchain.h
index b330331ca84e..8b9ccaa7fada 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/VEToolchain.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/VEToolchain.h
@@ -26,9 +26,8 @@ protected:
Tool *buildLinker() const override;
public:
- bool IsIntegratedAssemblerDefault() const override { return true; }
bool isPICDefault() const override;
- bool isPIEDefault() const override;
+ bool isPIEDefault(const llvm::opt::ArgList &Args) const override;
bool isPICDefaultForced() const override;
bool SupportsProfiling() const override;
bool hasBlocksRuntime() const override;
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.cpp
index 19f3571e6b38..0b16b660364f 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.cpp
@@ -8,15 +8,17 @@
#include "WebAssembly.h"
#include "CommonArgs.h"
+#include "Gnu.h"
#include "clang/Basic/Version.h"
#include "clang/Config/config.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Options.h"
+#include "llvm/Option/ArgList.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
-#include "llvm/Option/ArgList.h"
+#include "llvm/Support/VirtualFileSystem.h"
using namespace clang::driver;
using namespace clang::driver::tools;
@@ -42,8 +44,15 @@ std::string wasm::Linker::getLinkerPath(const ArgList &Args) const {
llvm::sys::fs::can_execute(UseLinker))
return std::string(UseLinker);
- // Accept 'lld', and 'ld' as aliases for the default linker
- if (UseLinker != "lld" && UseLinker != "ld")
+ // Interpret 'lld' as explicitly requesting `wasm-ld`, so look for that
+ // linker. Note that for `wasm32-wasip2` this overrides the default linker
+ // of `wasm-component-ld`.
+ if (UseLinker == "lld") {
+ return ToolChain.GetProgramPath("wasm-ld");
+ }
+
+ // Allow 'ld' as an alias for the default linker
+ if (UseLinker != "ld")
ToolChain.getDriver().Diag(diag::err_drv_invalid_linker_name)
<< A->getAsString(Args);
}
@@ -63,7 +72,7 @@ void wasm::Linker::ConstructJob(Compilation &C, const JobAction &JA,
ArgStringList CmdArgs;
CmdArgs.push_back("-m");
- if (getToolChain().getTriple().isArch64Bit())
+ if (ToolChain.getTriple().isArch64Bit())
CmdArgs.push_back("wasm64");
else
CmdArgs.push_back("wasm32");
@@ -71,34 +80,56 @@ void wasm::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasArg(options::OPT_s))
CmdArgs.push_back("--strip-all");
- Args.AddAllArgs(CmdArgs, options::OPT_L);
- Args.AddAllArgs(CmdArgs, options::OPT_u);
+ // On `wasip2` the default linker is `wasm-component-ld` which wraps the
+ // execution of `wasm-ld`. Find `wasm-ld` and pass it as an argument of where
+ // to find it to avoid it needing to hunt and rediscover or search `PATH` for
+ // where it is.
+ if (llvm::sys::path::stem(Linker).ends_with_insensitive(
+ "wasm-component-ld")) {
+ CmdArgs.push_back("--wasm-ld-path");
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetProgramPath("wasm-ld")));
+ }
+
+ Args.addAllArgs(CmdArgs, {options::OPT_L, options::OPT_u});
+
ToolChain.AddFilePathLibArgs(Args, CmdArgs);
- const char *Crt1 = "crt1.o";
- const char *Entry = NULL;
+ bool IsCommand = true;
+ const char *Crt1;
+ const char *Entry = nullptr;
- // If crt1-command.o exists, it supports new-style commands, so use it.
- // Otherwise, use the old crt1.o. This is a temporary transition measure.
- // Once WASI libc no longer needs to support LLVM versions which lack
- // support for new-style command, it can make crt1.o the same as
- // crt1-command.o. And once LLVM no longer needs to support WASI libc
- // versions before that, it can switch to using crt1-command.o.
- if (ToolChain.GetFilePath("crt1-command.o") != "crt1-command.o")
- Crt1 = "crt1-command.o";
+ // When -shared is specified, use the reactor exec model unless
+ // specified otherwise.
+ if (Args.hasArg(options::OPT_shared))
+ IsCommand = false;
if (const Arg *A = Args.getLastArg(options::OPT_mexec_model_EQ)) {
StringRef CM = A->getValue();
if (CM == "command") {
- // Use default values.
+ IsCommand = true;
} else if (CM == "reactor") {
- Crt1 = "crt1-reactor.o";
- Entry = "_initialize";
+ IsCommand = false;
} else {
ToolChain.getDriver().Diag(diag::err_drv_invalid_argument_to_option)
<< CM << A->getOption().getName();
}
}
+
+ if (IsCommand) {
+ // If crt1-command.o exists, it supports new-style commands, so use it.
+ // Otherwise, use the old crt1.o. This is a temporary transition measure.
+ // Once WASI libc no longer needs to support LLVM versions which lack
+ // support for new-style command, it can make crt1.o the same as
+ // crt1-command.o. And once LLVM no longer needs to support WASI libc
+ // versions before that, it can switch to using crt1-command.o.
+ Crt1 = "crt1.o";
+ if (ToolChain.GetFilePath("crt1-command.o") != "crt1-command.o")
+ Crt1 = "crt1-command.o";
+ } else {
+ Crt1 = "crt1-reactor.o";
+ Entry = "_initialize";
+ }
+
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles))
CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(Crt1)));
if (Entry) {
@@ -106,6 +137,9 @@ void wasm::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString(Entry));
}
+ if (Args.hasArg(options::OPT_shared))
+ CmdArgs.push_back(Args.MakeArgString("-shared"));
+
AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
@@ -124,14 +158,25 @@ void wasm::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
+ // When optimizing, if wasm-opt is available, run it.
+ std::string WasmOptPath;
+ if (Args.getLastArg(options::OPT_O_Group)) {
+ WasmOptPath = ToolChain.GetProgramPath("wasm-opt");
+ if (WasmOptPath == "wasm-opt") {
+ WasmOptPath = {};
+ }
+ }
+
+ if (!WasmOptPath.empty()) {
+ CmdArgs.push_back("--keep-section=target_features");
+ }
+
C.addCommand(std::make_unique<Command>(JA, *this,
ResponseFileSupport::AtFileCurCP(),
Linker, CmdArgs, Inputs, Output));
- // When optimizing, if wasm-opt is available, run it.
if (Arg *A = Args.getLastArg(options::OPT_O_Group)) {
- auto WasmOptPath = getToolChain().GetProgramPath("wasm-opt");
- if (WasmOptPath != "wasm-opt") {
+ if (!WasmOptPath.empty()) {
StringRef OOpt = "s";
if (A->getOption().matches(options::OPT_O4) ||
A->getOption().matches(options::OPT_Ofast))
@@ -143,13 +188,13 @@ void wasm::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (OOpt != "0") {
const char *WasmOpt = Args.MakeArgString(WasmOptPath);
- ArgStringList CmdArgs;
- CmdArgs.push_back(Output.getFilename());
- CmdArgs.push_back(Args.MakeArgString(llvm::Twine("-O") + OOpt));
- CmdArgs.push_back("-o");
- CmdArgs.push_back(Output.getFilename());
+ ArgStringList OptArgs;
+ OptArgs.push_back(Output.getFilename());
+ OptArgs.push_back(Args.MakeArgString(llvm::Twine("-O") + OOpt));
+ OptArgs.push_back("-o");
+ OptArgs.push_back(Output.getFilename());
C.addCommand(std::make_unique<Command>(
- JA, *this, ResponseFileSupport::AtFileCurCP(), WasmOpt, CmdArgs,
+ JA, *this, ResponseFileSupport::AtFileCurCP(), WasmOpt, OptArgs,
Inputs, Output));
}
}
@@ -193,6 +238,12 @@ WebAssembly::WebAssembly(const Driver &D, const llvm::Triple &Triple,
}
}
+const char *WebAssembly::getDefaultLinker() const {
+ if (getOS() == "wasip2")
+ return "wasm-component-ld";
+ return "wasm-ld";
+}
+
bool WebAssembly::IsMathErrnoDefault() const { return false; }
bool WebAssembly::IsObjCNonFragileABIDefault() const { return true; }
@@ -201,12 +252,12 @@ bool WebAssembly::UseObjCMixedDispatch() const { return true; }
bool WebAssembly::isPICDefault() const { return false; }
-bool WebAssembly::isPIEDefault() const { return false; }
+bool WebAssembly::isPIEDefault(const llvm::opt::ArgList &Args) const {
+ return false;
+}
bool WebAssembly::isPICDefaultForced() const { return false; }
-bool WebAssembly::IsIntegratedAssemblerDefault() const { return true; }
-
bool WebAssembly::hasBlocksRuntime() const { return false; }
// TODO: Support profiling.
@@ -293,21 +344,24 @@ void WebAssembly::addClangTargetOptions(const ArgList &DriverArgs,
// '-fwasm-exceptions' implies exception-handling feature
CC1Args.push_back("-target-feature");
CC1Args.push_back("+exception-handling");
+ // Backend needs -wasm-enable-eh to enable Wasm EH
+ CC1Args.push_back("-mllvm");
+ CC1Args.push_back("-wasm-enable-eh");
}
for (const Arg *A : DriverArgs.filtered(options::OPT_mllvm)) {
StringRef Opt = A->getValue(0);
- if (Opt.startswith("-emscripten-cxx-exceptions-allowed")) {
+ if (Opt.starts_with("-emscripten-cxx-exceptions-allowed")) {
// '-mllvm -emscripten-cxx-exceptions-allowed' should be used with
// '-mllvm -enable-emscripten-cxx-exceptions'
- bool EmExceptionArgExists = false;
+ bool EmEHArgExists = false;
for (const Arg *A : DriverArgs.filtered(options::OPT_mllvm)) {
if (StringRef(A->getValue(0)) == "-enable-emscripten-cxx-exceptions") {
- EmExceptionArgExists = true;
+ EmEHArgExists = true;
break;
}
}
- if (!EmExceptionArgExists)
+ if (!EmEHArgExists)
getDriver().Diag(diag::err_drv_argument_only_allowed_with)
<< "-mllvm -emscripten-cxx-exceptions-allowed"
<< "-mllvm -enable-emscripten-cxx-exceptions";
@@ -323,6 +377,38 @@ void WebAssembly::addClangTargetOptions(const ArgList &DriverArgs,
":noinline"));
}
}
+
+ if (Opt.starts_with("-wasm-enable-sjlj")) {
+ // '-mllvm -wasm-enable-sjlj' is not compatible with
+ // '-mno-exception-handling'
+ if (DriverArgs.hasFlag(options::OPT_mno_exception_handing,
+ options::OPT_mexception_handing, false))
+ getDriver().Diag(diag::err_drv_argument_not_allowed_with)
+ << "-mllvm -wasm-enable-sjlj"
+ << "-mno-exception-handling";
+ // '-mllvm -wasm-enable-sjlj' is not compatible with
+ // '-mllvm -enable-emscripten-cxx-exceptions'
+ // because we don't allow Emscripten EH + Wasm SjLj
+ for (const Arg *A : DriverArgs.filtered(options::OPT_mllvm)) {
+ if (StringRef(A->getValue(0)) == "-enable-emscripten-cxx-exceptions")
+ getDriver().Diag(diag::err_drv_argument_not_allowed_with)
+ << "-mllvm -wasm-enable-sjlj"
+ << "-mllvm -enable-emscripten-cxx-exceptions";
+ }
+ // '-mllvm -wasm-enable-sjlj' is not compatible with
+ // '-mllvm -enable-emscripten-sjlj'
+ for (const Arg *A : DriverArgs.filtered(options::OPT_mllvm)) {
+ if (StringRef(A->getValue(0)) == "-enable-emscripten-sjlj")
+ getDriver().Diag(diag::err_drv_argument_not_allowed_with)
+ << "-mllvm -wasm-enable-sjlj"
+ << "-mllvm -enable-emscripten-sjlj";
+ }
+ // '-mllvm -wasm-enable-sjlj' implies exception-handling feature
+ CC1Args.push_back("-target-feature");
+ CC1Args.push_back("+exception-handling");
+ // Backend needs '-exception-model=wasm' to use Wasm EH instructions
+ CC1Args.push_back("-exception-model=wasm");
+ }
}
}
@@ -334,7 +420,11 @@ ToolChain::CXXStdlibType
WebAssembly::GetCXXStdlibType(const ArgList &Args) const {
if (Arg *A = Args.getLastArg(options::OPT_stdlib_EQ)) {
StringRef Value = A->getValue();
- if (Value != "libc++")
+ if (Value == "libc++")
+ return ToolChain::CST_Libcxx;
+ else if (Value == "libstdc++")
+ return ToolChain::CST_Libstdcxx;
+ else
getDriver().Diag(diag::err_drv_invalid_stdlib_name)
<< A->getAsString(Args);
}
@@ -380,17 +470,18 @@ void WebAssembly::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
void WebAssembly::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
- if (!DriverArgs.hasArg(options::OPT_nostdlibinc) &&
- !DriverArgs.hasArg(options::OPT_nostdincxx)) {
- if (getTriple().getOS() != llvm::Triple::UnknownOS) {
- const std::string MultiarchTriple =
- getMultiarchTriple(getDriver(), getTriple(), getDriver().SysRoot);
- addSystemInclude(DriverArgs, CC1Args,
- getDriver().SysRoot + "/include/" + MultiarchTriple +
- "/c++/v1");
- }
- addSystemInclude(DriverArgs, CC1Args,
- getDriver().SysRoot + "/include/c++/v1");
+
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc, options::OPT_nostdinc,
+ options::OPT_nostdincxx))
+ return;
+
+ switch (GetCXXStdlibType(DriverArgs)) {
+ case ToolChain::CST_Libcxx:
+ addLibCxxIncludePaths(DriverArgs, CC1Args);
+ break;
+ case ToolChain::CST_Libstdcxx:
+ addLibStdCXXIncludePaths(DriverArgs, CC1Args);
+ break;
}
}
@@ -400,10 +491,13 @@ void WebAssembly::AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
switch (GetCXXStdlibType(Args)) {
case ToolChain::CST_Libcxx:
CmdArgs.push_back("-lc++");
+ if (Args.hasArg(options::OPT_fexperimental_library))
+ CmdArgs.push_back("-lc++experimental");
CmdArgs.push_back("-lc++abi");
break;
case ToolChain::CST_Libstdcxx:
- llvm_unreachable("invalid stdlib name");
+ CmdArgs.push_back("-lstdc++");
+ break;
}
}
@@ -412,9 +506,86 @@ SanitizerMask WebAssembly::getSupportedSanitizers() const {
if (getTriple().isOSEmscripten()) {
Res |= SanitizerKind::Vptr | SanitizerKind::Leak | SanitizerKind::Address;
}
+ // -fsanitize=function places two words before the function label, which are
+ // -unsupported.
+ Res &= ~SanitizerKind::Function;
return Res;
}
Tool *WebAssembly::buildLinker() const {
return new tools::wasm::Linker(*this);
}
+
+void WebAssembly::addLibCxxIncludePaths(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const {
+ const Driver &D = getDriver();
+ std::string SysRoot = computeSysRoot();
+ std::string LibPath = SysRoot + "/include";
+ const std::string MultiarchTriple =
+ getMultiarchTriple(D, getTriple(), SysRoot);
+ bool IsKnownOs = (getTriple().getOS() != llvm::Triple::UnknownOS);
+
+ std::string Version = detectLibcxxVersion(LibPath);
+ if (Version.empty())
+ return;
+
+ // First add the per-target include path if the OS is known.
+ if (IsKnownOs) {
+ std::string TargetDir = LibPath + "/" + MultiarchTriple + "/c++/" + Version;
+ addSystemInclude(DriverArgs, CC1Args, TargetDir);
+ }
+
+ // Second add the generic one.
+ addSystemInclude(DriverArgs, CC1Args, LibPath + "/c++/" + Version);
+}
+
+void WebAssembly::addLibStdCXXIncludePaths(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const {
+ // We cannot use GCCInstallationDetector here as the sysroot usually does
+ // not contain a full GCC installation.
+ // Instead, we search the given sysroot for /usr/include/xx, similar
+ // to how we do it for libc++.
+ const Driver &D = getDriver();
+ std::string SysRoot = computeSysRoot();
+ std::string LibPath = SysRoot + "/include";
+ const std::string MultiarchTriple =
+ getMultiarchTriple(D, getTriple(), SysRoot);
+ bool IsKnownOs = (getTriple().getOS() != llvm::Triple::UnknownOS);
+
+ // This is similar to detectLibcxxVersion()
+ std::string Version;
+ {
+ std::error_code EC;
+ Generic_GCC::GCCVersion MaxVersion =
+ Generic_GCC::GCCVersion::Parse("0.0.0");
+ SmallString<128> Path(LibPath);
+ llvm::sys::path::append(Path, "c++");
+ for (llvm::vfs::directory_iterator LI = getVFS().dir_begin(Path, EC), LE;
+ !EC && LI != LE; LI = LI.increment(EC)) {
+ StringRef VersionText = llvm::sys::path::filename(LI->path());
+ if (VersionText[0] != 'v') {
+ auto Version = Generic_GCC::GCCVersion::Parse(VersionText);
+ if (Version > MaxVersion)
+ MaxVersion = Version;
+ }
+ }
+ if (MaxVersion.Major > 0)
+ Version = MaxVersion.Text;
+ }
+
+ if (Version.empty())
+ return;
+
+ // First add the per-target include path if the OS is known.
+ if (IsKnownOs) {
+ std::string TargetDir = LibPath + "/c++/" + Version + "/" + MultiarchTriple;
+ addSystemInclude(DriverArgs, CC1Args, TargetDir);
+ }
+
+ // Second add the generic one.
+ addSystemInclude(DriverArgs, CC1Args, LibPath + "/c++/" + Version);
+ // Third the backward one.
+ addSystemInclude(DriverArgs, CC1Args, LibPath + "/c++/" + Version + "/backward");
+}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.h
index 8a3f82d9efdf..76e0ca39bd74 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.h
@@ -18,7 +18,7 @@ namespace driver {
namespace tools {
namespace wasm {
-class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
+class LLVM_LIBRARY_VISIBILITY Linker final : public Tool {
public:
explicit Linker(const ToolChain &TC) : Tool("wasm::Linker", "linker", TC) {}
bool isLinkJob() const override { return true; }
@@ -45,12 +45,12 @@ private:
bool IsObjCNonFragileABIDefault() const override;
bool UseObjCMixedDispatch() const override;
bool isPICDefault() const override;
- bool isPIEDefault() const override;
+ bool isPIEDefault(const llvm::opt::ArgList &Args) const override;
bool isPICDefaultForced() const override;
- bool IsIntegratedAssemblerDefault() const override;
bool hasBlocksRuntime() const override;
bool SupportsProfiling() const override;
bool HasNativeLLVMSupport() const override;
+ unsigned GetDefaultDwarfVersion() const override { return 4; }
void
addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args,
@@ -67,13 +67,22 @@ private:
llvm::opt::ArgStringList &CmdArgs) const override;
SanitizerMask getSupportedSanitizers() const override;
- const char *getDefaultLinker() const override { return "wasm-ld"; }
+ const char *getDefaultLinker() const override;
+
+ CXXStdlibType GetDefaultCXXStdlibType() const override {
+ return ToolChain::CST_Libcxx;
+ }
Tool *buildLinker() const override;
std::string getMultiarchTriple(const Driver &D,
const llvm::Triple &TargetTriple,
StringRef SysRoot) const override;
+
+ void addLibCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const;
+ void addLibStdCXXIncludePaths(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const;
};
} // end namespace toolchains
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/XCore.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/XCore.cpp
index 5f94f83d3691..c95ebabdd30c 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/XCore.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/XCore.cpp
@@ -63,11 +63,10 @@ void tools::XCore::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const char *LinkingOutput) const {
ArgStringList CmdArgs;
+ assert((Output.isFilename() || Output.isNothing()) && "Invalid output.");
if (Output.isFilename()) {
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
- } else {
- assert(Output.isNothing() && "Invalid output.");
}
if (Args.hasArg(options::OPT_v))
@@ -102,7 +101,9 @@ Tool *XCoreToolChain::buildLinker() const {
bool XCoreToolChain::isPICDefault() const { return false; }
-bool XCoreToolChain::isPIEDefault() const { return false; }
+bool XCoreToolChain::isPIEDefault(const llvm::opt::ArgList &Args) const {
+ return false;
+}
bool XCoreToolChain::isPICDefaultForced() const { return false; }
@@ -128,6 +129,10 @@ void XCoreToolChain::addClangTargetOptions(const ArgList &DriverArgs,
ArgStringList &CC1Args,
Action::OffloadKind) const {
CC1Args.push_back("-nostdsysteminc");
+ // Set `-fno-use-cxa-atexit` to default.
+ if (!DriverArgs.hasFlag(options::OPT_fuse_cxa_atexit,
+ options::OPT_fno_use_cxa_atexit, false))
+ CC1Args.push_back("-fno-use-cxa-atexit");
}
void XCoreToolChain::AddClangCXXStdlibIncludeArgs(
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/XCore.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/XCore.h
index 41dce08454c0..95359a6e2542 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/XCore.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/XCore.h
@@ -20,7 +20,7 @@ namespace XCore {
// For XCore, we do not need to instantiate tools for PreProcess, PreCompile and
// Compile.
// We simply use "clang -cc1" for those actions.
-class LLVM_LIBRARY_VISIBILITY Assembler : public Tool {
+class LLVM_LIBRARY_VISIBILITY Assembler final : public Tool {
public:
Assembler(const ToolChain &TC) : Tool("XCore::Assembler", "XCore-as", TC) {}
@@ -31,7 +31,7 @@ public:
const char *LinkingOutput) const override;
};
-class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
+class LLVM_LIBRARY_VISIBILITY Linker final : public Tool {
public:
Linker(const ToolChain &TC) : Tool("XCore::Linker", "XCore-ld", TC) {}
@@ -57,8 +57,9 @@ protected:
Tool *buildLinker() const override;
public:
+ bool IsIntegratedAssemblerDefault() const override { return false; }
bool isPICDefault() const override;
- bool isPIEDefault() const override;
+ bool isPIEDefault(const llvm::opt::ArgList &Args) const override;
bool isPICDefaultForced() const override;
bool SupportsProfiling() const override;
bool hasBlocksRuntime() const override;
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/ZOS.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/ZOS.cpp
index f921227076a5..96dbf602e7c1 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/ZOS.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/ZOS.cpp
@@ -11,11 +11,17 @@
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Options.h"
#include "llvm/Option/ArgList.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/VirtualFileSystem.h"
+#include "llvm/Support/WithColor.h"
+using namespace clang;
using namespace clang::driver;
+using namespace clang::driver::tools;
using namespace clang::driver::toolchains;
+using namespace llvm;
using namespace llvm::opt;
-using namespace clang;
+using namespace llvm::sys;
ZOS::ZOS(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
: ToolChain(D, Triple, Args) {}
@@ -31,3 +37,303 @@ void ZOS::addClangTargetOptions(const ArgList &DriverArgs,
options::OPT_fno_aligned_allocation))
CC1Args.push_back("-faligned-alloc-unavailable");
}
+
+void zos::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ ArgStringList CmdArgs;
+
+ Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler);
+
+ // Specify assembler output file.
+ assert((Output.isFilename() || Output.isNothing()) && "Invalid output.");
+ if (Output.isFilename()) {
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+ }
+
+ // Specify assembler input file.
+ // The system assembler on z/OS takes exactly one input file. The driver is
+ // expected to invoke as(1) separately for each assembler source input file.
+ if (Inputs.size() != 1)
+ llvm_unreachable("Invalid number of input files.");
+ const InputInfo &II = Inputs[0];
+ assert((II.isFilename() || II.isNothing()) && "Invalid input.");
+ if (II.isFilename())
+ CmdArgs.push_back(II.getFilename());
+
+ const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
+ C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ Exec, CmdArgs, Inputs));
+}
+
+static std::string getLEHLQ(const ArgList &Args) {
+ if (Args.hasArg(options::OPT_mzos_hlq_le_EQ)) {
+ Arg *LEHLQArg = Args.getLastArg(options::OPT_mzos_hlq_le_EQ);
+ StringRef HLQ = LEHLQArg->getValue();
+ if (!HLQ.empty())
+ return HLQ.str();
+ }
+ return "CEE";
+}
+
+static std::string getClangHLQ(const ArgList &Args) {
+ if (Args.hasArg(options::OPT_mzos_hlq_clang_EQ)) {
+ Arg *ClangHLQArg = Args.getLastArg(options::OPT_mzos_hlq_clang_EQ);
+ StringRef HLQ = ClangHLQArg->getValue();
+ if (!HLQ.empty())
+ return HLQ.str();
+ }
+ return getLEHLQ(Args);
+}
+
+static std::string getCSSHLQ(const ArgList &Args) {
+ if (Args.hasArg(options::OPT_mzos_hlq_csslib_EQ)) {
+ Arg *CsslibHLQArg = Args.getLastArg(options::OPT_mzos_hlq_csslib_EQ);
+ StringRef HLQ = CsslibHLQArg->getValue();
+ if (!HLQ.empty())
+ return HLQ.str();
+ }
+ return "SYS1";
+}
+
+void zos::Linker::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs, const ArgList &Args,
+ const char *LinkingOutput) const {
+ const ZOS &ToolChain = static_cast<const ZOS &>(getToolChain());
+ ArgStringList CmdArgs;
+
+ const bool IsSharedLib =
+ Args.hasFlag(options::OPT_shared, options::OPT_static, false);
+
+ assert((Output.isFilename() || Output.isNothing()) && "Invalid output.");
+ if (Output.isFilename()) {
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+ }
+
+ SmallString<128> LinkerOptions;
+ LinkerOptions = "AMODE=";
+ LinkerOptions += "64";
+ LinkerOptions += ",LIST";
+ LinkerOptions += ",DYNAM=DLL";
+ LinkerOptions += ",MSGLEVEL=4";
+ LinkerOptions += ",CASE=MIXED";
+ LinkerOptions += ",REUS=RENT";
+
+ CmdArgs.push_back("-b");
+ CmdArgs.push_back(Args.MakeArgString(LinkerOptions));
+
+ if (!IsSharedLib) {
+ CmdArgs.push_back("-e");
+ CmdArgs.push_back("CELQSTRT");
+
+ CmdArgs.push_back("-O");
+ CmdArgs.push_back("CELQSTRT");
+
+ CmdArgs.push_back("-u");
+ CmdArgs.push_back("CELQMAIN");
+ }
+
+ // Generate side file if -shared option is present.
+ if (IsSharedLib) {
+ StringRef OutputName = Output.getFilename();
+ // Strip away the last file suffix in presence from output name and add
+ // a new .x suffix.
+ size_t Suffix = OutputName.find_last_of('.');
+ const char *SideDeckName =
+ Args.MakeArgString(OutputName.substr(0, Suffix) + ".x");
+ CmdArgs.push_back("-x");
+ CmdArgs.push_back(SideDeckName);
+ } else {
+ // We need to direct side file to /dev/null to suppress linker warning when
+ // the object file contains exported symbols, and -shared or
+ // -Wl,-x<sidedeck>.x is not specified.
+ CmdArgs.push_back("-x");
+ CmdArgs.push_back("/dev/null");
+ }
+
+ // Add archive library search paths.
+ Args.addAllArgs(CmdArgs, {options::OPT_L, options::OPT_u});
+
+ ToolChain.AddFilePathLibArgs(Args, CmdArgs);
+
+ // Specify linker input file(s)
+ AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
+
+ // z/OS tool chain depends on LE data sets and the CSSLIB data set.
+ // These data sets can have different high level qualifiers (HLQs)
+ // as each installation can define them differently.
+
+ std::string LEHLQ = getLEHLQ(Args);
+ std::string CsslibHLQ = getCSSHLQ(Args);
+
+ StringRef ld_env_var = StringRef(getenv("_LD_SYSLIB")).trim();
+ if (ld_env_var.empty()) {
+ CmdArgs.push_back("-S");
+ CmdArgs.push_back(Args.MakeArgString("//'" + LEHLQ + ".SCEEBND2'"));
+ CmdArgs.push_back("-S");
+ CmdArgs.push_back(Args.MakeArgString("//'" + CsslibHLQ + ".CSSLIB'"));
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
+ ld_env_var = StringRef(getenv("_LD_SIDE_DECKS")).trim();
+ if (ld_env_var.empty()) {
+ CmdArgs.push_back(
+ Args.MakeArgString("//'" + LEHLQ + ".SCEELIB(CELQS001)'"));
+ CmdArgs.push_back(
+ Args.MakeArgString("//'" + LEHLQ + ".SCEELIB(CELQS003)'"));
+ } else {
+ SmallVector<StringRef> ld_side_deck;
+ ld_env_var.split(ld_side_deck, ":");
+ for (StringRef ld_loc : ld_side_deck) {
+ CmdArgs.push_back((ld_loc.str()).c_str());
+ }
+ }
+ }
+ // Link libc++ library
+ if (ToolChain.ShouldLinkCXXStdlib(Args)) {
+ ToolChain.AddCXXStdlibLibArgs(Args, CmdArgs);
+ }
+
+ // Specify compiler-rt library path for linker
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs))
+ AddRunTimeLibs(ToolChain, ToolChain.getDriver(), CmdArgs, Args);
+
+ const char *Exec = Args.MakeArgString(ToolChain.GetLinkerPath());
+ C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ Exec, CmdArgs, Inputs));
+}
+
+ToolChain::RuntimeLibType ZOS::GetDefaultRuntimeLibType() const {
+ return ToolChain::RLT_CompilerRT;
+}
+
+ToolChain::CXXStdlibType ZOS::GetDefaultCXXStdlibType() const {
+ return ToolChain::CST_Libcxx;
+}
+
+void ZOS::AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const {
+ switch (GetCXXStdlibType(Args)) {
+ case ToolChain::CST_Libstdcxx:
+ llvm::report_fatal_error("linking libstdc++ is unimplemented on z/OS");
+ break;
+ case ToolChain::CST_Libcxx: {
+ std::string ClangHLQ = getClangHLQ(Args);
+ CmdArgs.push_back(
+ Args.MakeArgString("//'" + ClangHLQ + ".SCEELIB(CRTDQCXE)'"));
+ CmdArgs.push_back(
+ Args.MakeArgString("//'" + ClangHLQ + ".SCEELIB(CRTDQCXS)'"));
+ CmdArgs.push_back(
+ Args.MakeArgString("//'" + ClangHLQ + ".SCEELIB(CRTDQCXP)'"));
+ CmdArgs.push_back(
+ Args.MakeArgString("//'" + ClangHLQ + ".SCEELIB(CRTDQCXA)'"));
+ CmdArgs.push_back(
+ Args.MakeArgString("//'" + ClangHLQ + ".SCEELIB(CRTDQXLA)'"));
+ CmdArgs.push_back(
+ Args.MakeArgString("//'" + ClangHLQ + ".SCEELIB(CRTDQUNW)'"));
+ } break;
+ }
+}
+
+auto ZOS::buildAssembler() const -> Tool * { return new zos::Assembler(*this); }
+
+auto ZOS::buildLinker() const -> Tool * { return new zos::Linker(*this); }
+
+void ZOS::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ if (DriverArgs.hasArg(options::OPT_nostdinc))
+ return;
+
+ const Driver &D = getDriver();
+
+ // resolve ResourceDir
+ std::string ResourceDir(D.ResourceDir);
+
+ // zos_wrappers must take highest precedence
+
+ // - <clang>/lib/clang/<ver>/include/zos_wrappers
+ if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) {
+ SmallString<128> P(ResourceDir);
+ path::append(P, "include", "zos_wrappers");
+ addSystemInclude(DriverArgs, CC1Args, P.str());
+
+ // - <clang>/lib/clang/<ver>/include
+ SmallString<128> P2(ResourceDir);
+ path::append(P2, "include");
+ addSystemInclude(DriverArgs, CC1Args, P2.str());
+ }
+
+ // - /usr/include
+ if (Arg *SysIncludeArg =
+ DriverArgs.getLastArg(options::OPT_mzos_sys_include_EQ)) {
+ StringRef SysInclude = SysIncludeArg->getValue();
+
+ // fall back to the default include path
+ if (!SysInclude.empty()) {
+
+ // -mzos-sys-include opton can have colon separated
+ // list of paths, so we need to parse the value.
+ StringRef PathLE(SysInclude);
+ size_t Colon = PathLE.find(':');
+ if (Colon == StringRef::npos) {
+ addSystemInclude(DriverArgs, CC1Args, PathLE.str());
+ return;
+ }
+
+ while (Colon != StringRef::npos) {
+ SmallString<128> P = PathLE.substr(0, Colon);
+ addSystemInclude(DriverArgs, CC1Args, P.str());
+ PathLE = PathLE.substr(Colon + 1);
+ Colon = PathLE.find(':');
+ }
+ if (PathLE.size())
+ addSystemInclude(DriverArgs, CC1Args, PathLE.str());
+
+ return;
+ }
+ }
+
+ addSystemInclude(DriverArgs, CC1Args, "/usr/include");
+}
+
+void ZOS::TryAddIncludeFromPath(llvm::SmallString<128> Path,
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const {
+ if (!getVFS().exists(Path)) {
+ if (DriverArgs.hasArg(options::OPT_v))
+ WithColor::warning(errs(), "Clang")
+ << "ignoring nonexistent directory \"" << Path << "\"\n";
+ if (!DriverArgs.hasArg(options::OPT__HASH_HASH_HASH))
+ return;
+ }
+ addSystemInclude(DriverArgs, CC1Args, Path);
+}
+
+void ZOS::AddClangCXXStdlibIncludeArgs(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const {
+ if (DriverArgs.hasArg(options::OPT_nostdinc) ||
+ DriverArgs.hasArg(options::OPT_nostdincxx) ||
+ DriverArgs.hasArg(options::OPT_nostdlibinc))
+ return;
+
+ switch (GetCXXStdlibType(DriverArgs)) {
+ case ToolChain::CST_Libcxx: {
+ // <install>/bin/../include/c++/v1
+ llvm::SmallString<128> InstallBin =
+ llvm::StringRef(getDriver().getInstalledDir());
+ llvm::sys::path::append(InstallBin, "..", "include", "c++", "v1");
+ TryAddIncludeFromPath(InstallBin, DriverArgs, CC1Args);
+ break;
+ }
+ case ToolChain::CST_Libstdcxx:
+ llvm::report_fatal_error(
+ "picking up libstdc++ headers is unimplemented on z/OS");
+ break;
+ }
+}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/ZOS.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/ZOS.h
index cace85d6da77..45204ba0a543 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/ZOS.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/ZOS.h
@@ -14,6 +14,39 @@
namespace clang {
namespace driver {
+namespace tools {
+
+/// Directly call system default assembler and linker.
+namespace zos {
+
+class LLVM_LIBRARY_VISIBILITY Assembler final : public Tool {
+public:
+ Assembler(const ToolChain &TC) : Tool("zos::Assembler", "assembler", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+
+class LLVM_LIBRARY_VISIBILITY Linker final : public Tool {
+public:
+ Linker(const ToolChain &TC) : Tool("zos::Linker", "linker", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+ bool isLinkJob() const override { return true; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+
+} // end namespace zos
+} // end namespace tools
+
namespace toolchains {
class LLVM_LIBRARY_VISIBILITY ZOS : public ToolChain {
@@ -23,14 +56,39 @@ public:
~ZOS() override;
bool isPICDefault() const override { return false; }
- bool isPIEDefault() const override { return false; }
+ bool isPIEDefault(const llvm::opt::ArgList &Args) const override {
+ return false;
+ }
bool isPICDefaultForced() const override { return false; }
- bool IsIntegratedAssemblerDefault() const override { return true; }
+ void TryAddIncludeFromPath(llvm::SmallString<128> Path,
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const;
+ void
+ AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+
+ void AddClangCXXStdlibIncludeArgs(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+
+ unsigned GetDefaultDwarfVersion() const override { return 4; }
+ CXXStdlibType GetDefaultCXXStdlibType() const override;
+
+ void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const override;
+
+ RuntimeLibType GetDefaultRuntimeLibType() const override;
void addClangTargetOptions(
const llvm::opt::ArgList &DriverArgs, llvm::opt::ArgStringList &CC1Args,
Action::OffloadKind DeviceOffloadingKind) const override;
+
+ const char *getDefaultLinker() const override { return "/bin/ld"; }
+
+protected:
+ Tool *buildAssembler() const override;
+ Tool *buildLinker() const override;
};
} // end namespace toolchains
diff --git a/contrib/llvm-project/clang/lib/Driver/Types.cpp b/contrib/llvm-project/clang/lib/Driver/Types.cpp
index b7ccdf23cbaa..a7b6b9000e1d 100644
--- a/contrib/llvm-project/clang/lib/Driver/Types.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/Types.cpp
@@ -42,7 +42,7 @@ static constexpr TypeInfo TypeInfos[] = {
#include "clang/Driver/Types.def"
#undef TYPE
};
-static const unsigned numTypes = llvm::array_lengthof(TypeInfos);
+static const unsigned numTypes = std::size(TypeInfos);
static const TypeInfo &getInfo(unsigned id) {
assert(id > 0 && id - 1 < numTypes && "Invalid Type ID.");
@@ -65,16 +65,23 @@ static bool isPreprocessedModuleType(ID Id) {
return Id == TY_CXXModule || Id == TY_PP_CXXModule;
}
+static bool isPreprocessedHeaderUnitType(ID Id) {
+ return Id == TY_CXXSHeader || Id == TY_CXXUHeader || Id == TY_CXXHUHeader ||
+ Id == TY_PP_CXXHeaderUnit;
+}
+
types::ID types::getPrecompiledType(ID Id) {
if (isPreprocessedModuleType(Id))
return TY_ModuleFile;
+ if (isPreprocessedHeaderUnitType(Id))
+ return TY_HeaderUnit;
if (onlyPrecompileType(Id))
return TY_PCH;
return TY_INVALID;
}
-const char *types::getTypeTempSuffix(ID Id, bool CLMode) {
- if (CLMode) {
+const char *types::getTypeTempSuffix(ID Id, bool CLStyle) {
+ if (CLStyle) {
switch (Id) {
case TY_Object:
case TY_LTO_BC:
@@ -126,7 +133,7 @@ bool types::isAcceptedByClang(ID Id) {
case TY_Asm:
case TY_C: case TY_PP_C:
- case TY_CL: case TY_CLCXX:
+ case TY_CL: case TY_PP_CL: case TY_CLCXX: case TY_PP_CLCXX:
case TY_CUDA: case TY_PP_CUDA:
case TY_CUDA_DEVICE:
case TY_HIP:
@@ -139,10 +146,70 @@ bool types::isAcceptedByClang(ID Id) {
case TY_CLHeader:
case TY_ObjCHeader: case TY_PP_ObjCHeader:
case TY_CXXHeader: case TY_PP_CXXHeader:
+ case TY_CXXSHeader:
+ case TY_CXXUHeader:
+ case TY_CXXHUHeader:
+ case TY_PP_CXXHeaderUnit:
case TY_ObjCXXHeader: case TY_PP_ObjCXXHeader:
case TY_CXXModule: case TY_PP_CXXModule:
case TY_AST: case TY_ModuleFile: case TY_PCH:
case TY_LLVM_IR: case TY_LLVM_BC:
+ case TY_API_INFO:
+ return true;
+ }
+}
+
+bool types::isAcceptedByFlang(ID Id) {
+ switch (Id) {
+ default:
+ return false;
+
+ case TY_Fortran:
+ case TY_PP_Fortran:
+ return true;
+ case TY_LLVM_IR:
+ case TY_LLVM_BC:
+ return true;
+ }
+}
+
+bool types::isDerivedFromC(ID Id) {
+ switch (Id) {
+ default:
+ return false;
+
+ case TY_PP_C:
+ case TY_C:
+ case TY_CL:
+ case TY_PP_CL:
+ case TY_CLCXX:
+ case TY_PP_CLCXX:
+ case TY_PP_CUDA:
+ case TY_CUDA:
+ case TY_CUDA_DEVICE:
+ case TY_PP_HIP:
+ case TY_HIP:
+ case TY_HIP_DEVICE:
+ case TY_PP_ObjC:
+ case TY_PP_ObjC_Alias:
+ case TY_ObjC:
+ case TY_PP_CXX:
+ case TY_CXX:
+ case TY_PP_ObjCXX:
+ case TY_PP_ObjCXX_Alias:
+ case TY_ObjCXX:
+ case TY_RenderScript:
+ case TY_PP_CHeader:
+ case TY_CHeader:
+ case TY_CLHeader:
+ case TY_PP_ObjCHeader:
+ case TY_ObjCHeader:
+ case TY_PP_CXXHeader:
+ case TY_CXXHeader:
+ case TY_PP_ObjCXXHeader:
+ case TY_ObjCXXHeader:
+ case TY_CXXModule:
+ case TY_PP_CXXModule:
return true;
}
}
@@ -170,8 +237,13 @@ bool types::isCXX(ID Id) {
case TY_CXX: case TY_PP_CXX:
case TY_ObjCXX: case TY_PP_ObjCXX: case TY_PP_ObjCXX_Alias:
case TY_CXXHeader: case TY_PP_CXXHeader:
+ case TY_CXXSHeader:
+ case TY_CXXUHeader:
+ case TY_CXXHUHeader:
+ case TY_PP_CXXHeaderUnit:
case TY_ObjCXXHeader: case TY_PP_ObjCXXHeader:
case TY_CXXModule: case TY_PP_CXXModule:
+ case TY_PP_CLCXX:
case TY_CUDA: case TY_PP_CUDA: case TY_CUDA_DEVICE:
case TY_HIP:
case TY_PP_HIP:
@@ -217,15 +289,7 @@ bool types::isHIP(ID Id) {
}
}
-bool types::isFortran(ID Id) {
- switch (Id) {
- default:
- return false;
-
- case TY_Fortran: case TY_PP_Fortran:
- return true;
- }
-}
+bool types::isHLSL(ID Id) { return Id == TY_HLSL; }
bool types::isSrcFile(ID Id) {
return Id != TY_Object && getPreprocessedType(Id) != TY_INVALID;
@@ -233,66 +297,75 @@ bool types::isSrcFile(ID Id) {
types::ID types::lookupTypeForExtension(llvm::StringRef Ext) {
return llvm::StringSwitch<types::ID>(Ext)
- .Case("c", TY_C)
- .Case("C", TY_CXX)
- .Case("F", TY_Fortran)
- .Case("f", TY_PP_Fortran)
- .Case("h", TY_CHeader)
- .Case("H", TY_CXXHeader)
- .Case("i", TY_PP_C)
- .Case("m", TY_ObjC)
- .Case("M", TY_ObjCXX)
- .Case("o", TY_Object)
- .Case("S", TY_Asm)
- .Case("s", TY_PP_Asm)
- .Case("bc", TY_LLVM_BC)
- .Case("cc", TY_CXX)
- .Case("CC", TY_CXX)
- .Case("cl", TY_CL)
- .Case("clcpp", TY_CLCXX)
- .Case("cp", TY_CXX)
- .Case("cu", TY_CUDA)
- .Case("hh", TY_CXXHeader)
- .Case("ii", TY_PP_CXX)
- .Case("ll", TY_LLVM_IR)
- .Case("mi", TY_PP_ObjC)
- .Case("mm", TY_ObjCXX)
- .Case("rs", TY_RenderScript)
- .Case("adb", TY_Ada)
- .Case("ads", TY_Ada)
- .Case("asm", TY_PP_Asm)
- .Case("ast", TY_AST)
- .Case("ccm", TY_CXXModule)
- .Case("cpp", TY_CXX)
- .Case("CPP", TY_CXX)
- .Case("c++", TY_CXX)
- .Case("C++", TY_CXX)
- .Case("cui", TY_PP_CUDA)
- .Case("cxx", TY_CXX)
- .Case("CXX", TY_CXX)
- .Case("F90", TY_Fortran)
- .Case("f90", TY_PP_Fortran)
- .Case("F95", TY_Fortran)
- .Case("f95", TY_PP_Fortran)
- .Case("for", TY_PP_Fortran)
- .Case("FOR", TY_PP_Fortran)
- .Case("fpp", TY_Fortran)
- .Case("FPP", TY_Fortran)
- .Case("gch", TY_PCH)
- .Case("hip", TY_HIP)
- .Case("hpp", TY_CXXHeader)
- .Case("hxx", TY_CXXHeader)
- .Case("iim", TY_PP_CXXModule)
- .Case("lib", TY_Object)
- .Case("mii", TY_PP_ObjCXX)
- .Case("obj", TY_Object)
- .Case("ifs", TY_IFS)
- .Case("pch", TY_PCH)
- .Case("pcm", TY_ModuleFile)
- .Case("c++m", TY_CXXModule)
- .Case("cppm", TY_CXXModule)
- .Case("cxxm", TY_CXXModule)
- .Default(TY_INVALID);
+ .Case("c", TY_C)
+ .Case("C", TY_CXX)
+ .Case("F", TY_Fortran)
+ .Case("f", TY_PP_Fortran)
+ .Case("h", TY_CHeader)
+ .Case("H", TY_CXXHeader)
+ .Case("i", TY_PP_C)
+ .Case("m", TY_ObjC)
+ .Case("M", TY_ObjCXX)
+ .Case("o", TY_Object)
+ .Case("S", TY_Asm)
+ .Case("s", TY_PP_Asm)
+ .Case("bc", TY_LLVM_BC)
+ .Case("cc", TY_CXX)
+ .Case("CC", TY_CXX)
+ .Case("cl", TY_CL)
+ .Case("cli", TY_PP_CL)
+ .Case("clcpp", TY_CLCXX)
+ .Case("clii", TY_PP_CLCXX)
+ .Case("cp", TY_CXX)
+ .Case("cu", TY_CUDA)
+ .Case("hh", TY_CXXHeader)
+ .Case("ii", TY_PP_CXX)
+ .Case("ll", TY_LLVM_IR)
+ .Case("mi", TY_PP_ObjC)
+ .Case("mm", TY_ObjCXX)
+ .Case("rs", TY_RenderScript)
+ .Case("adb", TY_Ada)
+ .Case("ads", TY_Ada)
+ .Case("asm", TY_PP_Asm)
+ .Case("ast", TY_AST)
+ .Case("ccm", TY_CXXModule)
+ .Case("cpp", TY_CXX)
+ .Case("CPP", TY_CXX)
+ .Case("c++", TY_CXX)
+ .Case("C++", TY_CXX)
+ .Case("cui", TY_PP_CUDA)
+ .Case("cxx", TY_CXX)
+ .Case("CXX", TY_CXX)
+ .Case("F03", TY_Fortran)
+ .Case("f03", TY_PP_Fortran)
+ .Case("F08", TY_Fortran)
+ .Case("f08", TY_PP_Fortran)
+ .Case("F90", TY_Fortran)
+ .Case("f90", TY_PP_Fortran)
+ .Case("F95", TY_Fortran)
+ .Case("f95", TY_PP_Fortran)
+ .Case("for", TY_PP_Fortran)
+ .Case("FOR", TY_PP_Fortran)
+ .Case("fpp", TY_Fortran)
+ .Case("FPP", TY_Fortran)
+ .Case("gch", TY_PCH)
+ .Case("hip", TY_HIP)
+ .Case("hipi", TY_PP_HIP)
+ .Case("hpp", TY_CXXHeader)
+ .Case("hxx", TY_CXXHeader)
+ .Case("iim", TY_PP_CXXModule)
+ .Case("iih", TY_PP_CXXHeaderUnit)
+ .Case("lib", TY_Object)
+ .Case("mii", TY_PP_ObjCXX)
+ .Case("obj", TY_Object)
+ .Case("ifs", TY_IFS)
+ .Case("pch", TY_PCH)
+ .Case("pcm", TY_ModuleFile)
+ .Case("c++m", TY_CXXModule)
+ .Case("cppm", TY_CXXModule)
+ .Case("cxxm", TY_CXXModule)
+ .Case("hlsl", TY_HLSL)
+ .Default(TY_INVALID);
}
types::ID types::lookupTypeForTypeSpecifier(const char *Name) {
@@ -323,46 +396,7 @@ types::getCompilationPhases(ID Id, phases::ID LastPhase) {
llvm::SmallVector<phases::ID, phases::MaxNumberOfPhases>
types::getCompilationPhases(const clang::driver::Driver &Driver,
llvm::opt::DerivedArgList &DAL, ID Id) {
- phases::ID LastPhase;
-
- // Filter to compiler mode. When the compiler is run as a preprocessor then
- // compilation is not an option.
- // -S runs the compiler in Assembly listing mode.
- if (Driver.CCCIsCPP() || DAL.getLastArg(options::OPT_E) ||
- DAL.getLastArg(options::OPT__SLASH_EP) ||
- DAL.getLastArg(options::OPT_M, options::OPT_MM) ||
- DAL.getLastArg(options::OPT__SLASH_P))
- LastPhase = phases::Preprocess;
-
- // --precompile only runs up to precompilation.
- // This is a clang extension and is not compatible with GCC.
- else if (DAL.getLastArg(options::OPT__precompile))
- LastPhase = phases::Precompile;
-
- // -{fsyntax-only,-analyze,emit-ast} only run up to the compiler.
- else if (DAL.getLastArg(options::OPT_fsyntax_only) ||
- DAL.getLastArg(options::OPT_print_supported_cpus) ||
- DAL.getLastArg(options::OPT_module_file_info) ||
- DAL.getLastArg(options::OPT_verify_pch) ||
- DAL.getLastArg(options::OPT_rewrite_objc) ||
- DAL.getLastArg(options::OPT_rewrite_legacy_objc) ||
- DAL.getLastArg(options::OPT__migrate) ||
- DAL.getLastArg(options::OPT__analyze) ||
- DAL.getLastArg(options::OPT_emit_ast))
- LastPhase = phases::Compile;
-
- else if (DAL.getLastArg(options::OPT_S) ||
- DAL.getLastArg(options::OPT_emit_llvm))
- LastPhase = phases::Backend;
-
- else if (DAL.getLastArg(options::OPT_c))
- LastPhase = phases::Assemble;
-
- // Generally means, do every phase until Link.
- else
- LastPhase = phases::LastPhase;
-
- return types::getCompilationPhases(Id, LastPhase);
+ return types::getCompilationPhases(Id, Driver.getFinalPhase(DAL));
}
ID types::lookupCXXTypeForCType(ID Id) {
diff --git a/contrib/llvm-project/clang/lib/Driver/XRayArgs.cpp b/contrib/llvm-project/clang/lib/Driver/XRayArgs.cpp
index b44509ad3b88..8c5134e25013 100644
--- a/contrib/llvm-project/clang/lib/Driver/XRayArgs.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/XRayArgs.cpp
@@ -22,12 +22,7 @@ using namespace clang;
using namespace clang::driver;
using namespace llvm::opt;
-namespace {
-constexpr char XRayInstrumentOption[] = "-fxray-instrument";
-constexpr char XRayInstructionThresholdOption[] =
- "-fxray-instruction-threshold=";
-constexpr const char *const XRaySupportedModes[] = {"xray-fdr", "xray-basic"};
-} // namespace
+constexpr const char *XRaySupportedModes[] = {"xray-fdr", "xray-basic"};
XRayArgs::XRayArgs(const ToolChain &TC, const ArgList &Args) {
const Driver &D = TC.getDriver();
@@ -35,79 +30,49 @@ XRayArgs::XRayArgs(const ToolChain &TC, const ArgList &Args) {
if (!Args.hasFlag(options::OPT_fxray_instrument,
options::OPT_fno_xray_instrument, false))
return;
- if (Triple.getOS() == llvm::Triple::Linux) {
+ XRayInstrument = Args.getLastArg(options::OPT_fxray_instrument);
+ if (Triple.isMacOSX()) {
+ switch (Triple.getArch()) {
+ case llvm::Triple::aarch64:
+ case llvm::Triple::x86_64:
+ break;
+ default:
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << XRayInstrument->getSpelling() << Triple.str();
+ break;
+ }
+ } else if (Triple.isOSBinFormatELF()) {
switch (Triple.getArch()) {
case llvm::Triple::x86_64:
case llvm::Triple::arm:
case llvm::Triple::aarch64:
+ case llvm::Triple::hexagon:
case llvm::Triple::ppc64le:
+ case llvm::Triple::loongarch64:
case llvm::Triple::mips:
case llvm::Triple::mipsel:
case llvm::Triple::mips64:
case llvm::Triple::mips64el:
break;
default:
- D.Diag(diag::err_drv_clang_unsupported)
- << (std::string(XRayInstrumentOption) + " on " + Triple.str());
- }
- } else if (Triple.isOSFreeBSD() || Triple.isOSOpenBSD() ||
- Triple.isOSNetBSD() || Triple.isMacOSX()) {
- if (Triple.getArch() != llvm::Triple::x86_64) {
- D.Diag(diag::err_drv_clang_unsupported)
- << (std::string(XRayInstrumentOption) + " on " + Triple.str());
- }
- } else if (Triple.getOS() == llvm::Triple::Fuchsia) {
- switch (Triple.getArch()) {
- case llvm::Triple::x86_64:
- case llvm::Triple::aarch64:
- break;
- default:
- D.Diag(diag::err_drv_clang_unsupported)
- << (std::string(XRayInstrumentOption) + " on " + Triple.str());
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << XRayInstrument->getSpelling() << Triple.str();
}
} else {
- D.Diag(diag::err_drv_clang_unsupported)
- << (std::string(XRayInstrumentOption) + " on " + Triple.str());
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << XRayInstrument->getSpelling() << Triple.str();
}
// Both XRay and -fpatchable-function-entry use
// TargetOpcode::PATCHABLE_FUNCTION_ENTER.
if (Arg *A = Args.getLastArg(options::OPT_fpatchable_function_entry_EQ))
D.Diag(diag::err_drv_argument_not_allowed_with)
- << "-fxray-instrument" << A->getSpelling();
-
- XRayInstrument = true;
- if (const Arg *A =
- Args.getLastArg(options::OPT_fxray_instruction_threshold_,
- options::OPT_fxray_instruction_threshold_EQ)) {
- StringRef S = A->getValue();
- if (S.getAsInteger(0, InstructionThreshold) || InstructionThreshold < 0)
- D.Diag(clang::diag::err_drv_invalid_value) << A->getAsString(Args) << S;
- }
-
- // By default, the back-end will not emit the lowering for XRay customevent
- // calls if the function is not instrumented. In the future we will change
- // this default to be the reverse, but in the meantime we're going to
- // introduce the new functionality behind a flag.
- if (Args.hasFlag(options::OPT_fxray_always_emit_customevents,
- options::OPT_fno_xray_always_emit_customevents, false))
- XRayAlwaysEmitCustomEvents = true;
-
- if (Args.hasFlag(options::OPT_fxray_always_emit_typedevents,
- options::OPT_fno_xray_always_emit_typedevents, false))
- XRayAlwaysEmitTypedEvents = true;
+ << XRayInstrument->getSpelling() << A->getSpelling();
if (!Args.hasFlag(options::OPT_fxray_link_deps,
- options::OPT_fnoxray_link_deps, true))
+ options::OPT_fno_xray_link_deps, true))
XRayRT = false;
- if (Args.hasFlag(options::OPT_fxray_ignore_loops,
- options::OPT_fno_xray_ignore_loops, false))
- XRayIgnoreLoops = true;
-
- XRayFunctionIndex = Args.hasFlag(options::OPT_fxray_function_index,
- options::OPT_fno_xray_function_index, true);
-
auto Bundles =
Args.getAllArgValues(options::OPT_fxray_instrumentation_bundle);
if (Bundles.empty())
@@ -186,21 +151,6 @@ XRayArgs::XRayArgs(const ToolChain &TC, const ArgList &Args) {
Modes.push_back(std::string(M));
}
- if (const Arg *A = Args.getLastArg(options::OPT_fxray_function_groups)) {
- StringRef S = A->getValue();
- if (S.getAsInteger(0, XRayFunctionGroups) || XRayFunctionGroups < 1)
- D.Diag(clang::diag::err_drv_invalid_value) << A->getAsString(Args) << S;
- }
-
- if (const Arg *A =
- Args.getLastArg(options::OPT_fxray_selected_function_group)) {
- StringRef S = A->getValue();
- if (S.getAsInteger(0, XRaySelectedFunctionGroup) ||
- XRaySelectedFunctionGroup < 0 ||
- XRaySelectedFunctionGroup >= XRayFunctionGroups)
- D.Diag(clang::diag::err_drv_invalid_value) << A->getAsString(Args) << S;
- }
-
// Then we want to sort and unique the modes we've collected.
llvm::sort(Modes);
Modes.erase(std::unique(Modes.begin(), Modes.end()), Modes.end());
@@ -210,34 +160,52 @@ void XRayArgs::addArgs(const ToolChain &TC, const ArgList &Args,
ArgStringList &CmdArgs, types::ID InputType) const {
if (!XRayInstrument)
return;
+ const Driver &D = TC.getDriver();
+ XRayInstrument->render(Args, CmdArgs);
- CmdArgs.push_back(XRayInstrumentOption);
-
- if (XRayAlwaysEmitCustomEvents)
- CmdArgs.push_back("-fxray-always-emit-customevents");
-
- if (XRayAlwaysEmitTypedEvents)
- CmdArgs.push_back("-fxray-always-emit-typedevents");
-
- if (XRayIgnoreLoops)
- CmdArgs.push_back("-fxray-ignore-loops");
+ // By default, the back-end will not emit the lowering for XRay customevent
+ // calls if the function is not instrumented. In the future we will change
+ // this default to be the reverse, but in the meantime we're going to
+ // introduce the new functionality behind a flag.
+ Args.addOptInFlag(CmdArgs, options::OPT_fxray_always_emit_customevents,
+ options::OPT_fno_xray_always_emit_customevents);
- if (!XRayFunctionIndex)
- CmdArgs.push_back("-fno-xray-function-index");
+ Args.addOptInFlag(CmdArgs, options::OPT_fxray_always_emit_typedevents,
+ options::OPT_fno_xray_always_emit_typedevents);
+ Args.addOptInFlag(CmdArgs, options::OPT_fxray_ignore_loops,
+ options::OPT_fno_xray_ignore_loops);
+ Args.addOptOutFlag(CmdArgs, options::OPT_fxray_function_index,
+ options::OPT_fno_xray_function_index);
- if (XRayFunctionGroups > 1) {
- CmdArgs.push_back(Args.MakeArgString(Twine("-fxray-function-groups=") +
- Twine(XRayFunctionGroups)));
+ if (const Arg *A =
+ Args.getLastArg(options::OPT_fxray_instruction_threshold_EQ)) {
+ int Value;
+ StringRef S = A->getValue();
+ if (S.getAsInteger(0, Value) || Value < 0)
+ D.Diag(clang::diag::err_drv_invalid_value) << A->getAsString(Args) << S;
+ else
+ A->render(Args, CmdArgs);
}
- if (XRaySelectedFunctionGroup != 0) {
- CmdArgs.push_back(
- Args.MakeArgString(Twine("-fxray-selected-function-group=") +
- Twine(XRaySelectedFunctionGroup)));
+ int XRayFunctionGroups = 1;
+ int XRaySelectedFunctionGroup = 0;
+ if (const Arg *A = Args.getLastArg(options::OPT_fxray_function_groups)) {
+ StringRef S = A->getValue();
+ if (S.getAsInteger(0, XRayFunctionGroups) || XRayFunctionGroups < 1)
+ D.Diag(clang::diag::err_drv_invalid_value) << A->getAsString(Args) << S;
+ if (XRayFunctionGroups > 1)
+ A->render(Args, CmdArgs);
+ }
+ if (const Arg *A =
+ Args.getLastArg(options::OPT_fxray_selected_function_group)) {
+ StringRef S = A->getValue();
+ if (S.getAsInteger(0, XRaySelectedFunctionGroup) ||
+ XRaySelectedFunctionGroup < 0 ||
+ XRaySelectedFunctionGroup >= XRayFunctionGroups)
+ D.Diag(clang::diag::err_drv_invalid_value) << A->getAsString(Args) << S;
+ if (XRaySelectedFunctionGroup != 0)
+ A->render(Args, CmdArgs);
}
-
- CmdArgs.push_back(Args.MakeArgString(Twine(XRayInstructionThresholdOption) +
- Twine(InstructionThreshold)));
for (const auto &Always : AlwaysInstrumentFiles) {
SmallString<64> AlwaysInstrumentOpt("-fxray-always-instrument=");
diff --git a/contrib/llvm-project/clang/lib/Edit/Commit.cpp b/contrib/llvm-project/clang/lib/Edit/Commit.cpp
index 7c5aea6e5069..6e785e866666 100644
--- a/contrib/llvm-project/clang/lib/Edit/Commit.cpp
+++ b/contrib/llvm-project/clang/lib/Edit/Commit.cpp
@@ -334,7 +334,7 @@ bool Commit::canReplaceText(SourceLocation loc, StringRef text,
return false;
Len = text.size();
- return file.substr(Offs.getOffset()).startswith(text);
+ return file.substr(Offs.getOffset()).starts_with(text);
}
bool Commit::isAtStartOfMacroExpansion(SourceLocation loc,
diff --git a/contrib/llvm-project/clang/lib/Edit/EditedSource.cpp b/contrib/llvm-project/clang/lib/Edit/EditedSource.cpp
index 74e6005faeb0..a3386b2489b0 100644
--- a/contrib/llvm-project/clang/lib/Edit/EditedSource.cpp
+++ b/contrib/llvm-project/clang/lib/Edit/EditedSource.cpp
@@ -60,7 +60,7 @@ void EditedSource::finishedCommit() {
MacroArgUse ArgUse;
std::tie(ExpLoc, ArgUse) = ExpArg;
auto &ArgUses = ExpansionToArgMap[ExpLoc];
- if (llvm::find(ArgUses, ArgUse) == ArgUses.end())
+ if (!llvm::is_contained(ArgUses, ArgUse))
ArgUses.push_back(ArgUse);
}
CurrCommitMacroArgExps.clear();
@@ -84,11 +84,11 @@ bool EditedSource::canInsertInOffset(SourceLocation OrigLoc, FileOffset Offs) {
deconstructMacroArgLoc(OrigLoc, ExpLoc, ArgUse);
auto I = ExpansionToArgMap.find(ExpLoc);
if (I != ExpansionToArgMap.end() &&
- find_if(I->second, [&](const MacroArgUse &U) {
+ llvm::any_of(I->second, [&](const MacroArgUse &U) {
return ArgUse.Identifier == U.Identifier &&
std::tie(ArgUse.ImmediateExpansionLoc, ArgUse.UseLoc) !=
std::tie(U.ImmediateExpansionLoc, U.UseLoc);
- }) != I->second.end()) {
+ })) {
// Trying to write in a macro argument input that has already been
// written by a previous commit for another expansion of the same macro
// argument name. For example:
@@ -314,8 +314,8 @@ bool EditedSource::commit(const Commit &commit) {
static bool canBeJoined(char left, char right, const LangOptions &LangOpts) {
// FIXME: Should use TokenConcatenation to make sure we don't allow stuff like
// making two '<' adjacent.
- return !(Lexer::isIdentifierBodyChar(left, LangOpts) &&
- Lexer::isIdentifierBodyChar(right, LangOpts));
+ return !(Lexer::isAsciiIdentifierContinueChar(left, LangOpts) &&
+ Lexer::isAsciiIdentifierContinueChar(right, LangOpts));
}
/// Returns true if it is ok to eliminate the trailing whitespace between
diff --git a/contrib/llvm-project/clang/lib/Edit/RewriteObjCFoundationAPI.cpp b/contrib/llvm-project/clang/lib/Edit/RewriteObjCFoundationAPI.cpp
index a3d388a5ae44..d5bf553e2412 100644
--- a/contrib/llvm-project/clang/lib/Edit/RewriteObjCFoundationAPI.cpp
+++ b/contrib/llvm-project/clang/lib/Edit/RewriteObjCFoundationAPI.cpp
@@ -18,6 +18,7 @@
#include "clang/AST/ParentMap.h"
#include "clang/Edit/Commit.h"
#include "clang/Lex/Lexer.h"
+#include <optional>
using namespace clang;
using namespace edit;
@@ -642,7 +643,7 @@ static bool shouldNotRewriteImmediateMessageArgs(const ObjCMessageExpr *Msg,
static bool rewriteToCharLiteral(const ObjCMessageExpr *Msg,
const CharacterLiteral *Arg,
const NSAPI &NS, Commit &commit) {
- if (Arg->getKind() != CharacterLiteral::Ascii)
+ if (Arg->getKind() != CharacterLiteralKind::Ascii)
return false;
if (NS.isNSNumberLiteralSelector(NSAPI::NSNumberWithChar,
Msg->getSelector())) {
@@ -691,12 +692,12 @@ static bool getLiteralInfo(SourceRange literalRange,
if (text.empty())
return false;
- Optional<bool> UpperU, UpperL;
+ std::optional<bool> UpperU, UpperL;
bool UpperF = false;
struct Suff {
static bool has(StringRef suff, StringRef &text) {
- if (text.endswith(suff)) {
+ if (text.ends_with(suff)) {
text = text.substr(0, text.size()-suff.size());
return true;
}
@@ -704,7 +705,7 @@ static bool getLiteralInfo(SourceRange literalRange,
}
};
- while (1) {
+ while (true) {
if (Suff::has("u", text)) {
UpperU = false;
} else if (Suff::has("U", text)) {
@@ -725,11 +726,11 @@ static bool getLiteralInfo(SourceRange literalRange,
break;
}
- if (!UpperU.hasValue() && !UpperL.hasValue())
+ if (!UpperU && !UpperL)
UpperU = UpperL = true;
- else if (UpperU.hasValue() && !UpperL.hasValue())
+ else if (UpperU && !UpperL)
UpperL = UpperU;
- else if (UpperL.hasValue() && !UpperU.hasValue())
+ else if (UpperL && !UpperU)
UpperU = UpperL;
Info.U = *UpperU ? "U" : "u";
@@ -738,9 +739,9 @@ static bool getLiteralInfo(SourceRange literalRange,
Info.F = UpperF ? "F" : "f";
Info.Hex = Info.Octal = false;
- if (text.startswith("0x"))
+ if (text.starts_with("0x"))
Info.Hex = true;
- else if (!isFloat && !isIntZero && text.startswith("0"))
+ else if (!isFloat && !isIntZero && text.starts_with("0"))
Info.Octal = true;
SourceLocation B = literalRange.getBegin();
@@ -775,8 +776,8 @@ static bool rewriteToNumberLiteral(const ObjCMessageExpr *Msg,
ASTContext &Ctx = NS.getASTContext();
Selector Sel = Msg->getSelector();
- Optional<NSAPI::NSNumberLiteralMethodKind>
- MKOpt = NS.getNSNumberLiteralMethodKind(Sel);
+ std::optional<NSAPI::NSNumberLiteralMethodKind> MKOpt =
+ NS.getNSNumberLiteralMethodKind(Sel);
if (!MKOpt)
return false;
NSAPI::NSNumberLiteralMethodKind MK = *MKOpt;
@@ -796,28 +797,28 @@ static bool rewriteToNumberLiteral(const ObjCMessageExpr *Msg,
case NSAPI::NSNumberWithUnsignedInt:
case NSAPI::NSNumberWithUnsignedInteger:
CallIsUnsigned = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case NSAPI::NSNumberWithInt:
case NSAPI::NSNumberWithInteger:
break;
case NSAPI::NSNumberWithUnsignedLong:
CallIsUnsigned = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case NSAPI::NSNumberWithLong:
CallIsLong = true;
break;
case NSAPI::NSNumberWithUnsignedLongLong:
CallIsUnsigned = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case NSAPI::NSNumberWithLongLong:
CallIsLongLong = true;
break;
case NSAPI::NSNumberWithDouble:
CallIsDouble = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case NSAPI::NSNumberWithFloat:
CallIsFloating = true;
break;
@@ -983,8 +984,8 @@ static bool rewriteToNumericBoxedExpression(const ObjCMessageExpr *Msg,
ASTContext &Ctx = NS.getASTContext();
Selector Sel = Msg->getSelector();
- Optional<NSAPI::NSNumberLiteralMethodKind>
- MKOpt = NS.getNSNumberLiteralMethodKind(Sel);
+ std::optional<NSAPI::NSNumberLiteralMethodKind> MKOpt =
+ NS.getNSNumberLiteralMethodKind(Sel);
if (!MKOpt)
return false;
NSAPI::NSNumberLiteralMethodKind MK = *MKOpt;
diff --git a/contrib/llvm-project/clang/lib/ExtractAPI/API.cpp b/contrib/llvm-project/clang/lib/ExtractAPI/API.cpp
new file mode 100644
index 000000000000..aa7a1e9360f4
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/ExtractAPI/API.cpp
@@ -0,0 +1,566 @@
+//===- ExtractAPI/API.cpp ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements the APIRecord and derived record structs,
+/// and the APISet class.
+///
+//===----------------------------------------------------------------------===//
+
+#include "clang/ExtractAPI/API.h"
+#include "clang/AST/CommentCommandTraits.h"
+#include "clang/AST/CommentLexer.h"
+#include "clang/AST/RawCommentList.h"
+#include "clang/Index/USRGeneration.h"
+#include "llvm/ADT/StringRef.h"
+#include <memory>
+
+using namespace clang::extractapi;
+using namespace llvm;
+
+namespace {
+
+template <typename RecordTy, typename... CtorArgsTy>
+RecordTy *addTopLevelRecord(DenseMap<StringRef, APIRecord *> &USRLookupTable,
+ APISet::RecordMap<RecordTy> &RecordMap,
+ StringRef USR, CtorArgsTy &&...CtorArgs) {
+ auto Result = RecordMap.insert({USR, nullptr});
+
+ // Create the record if it does not already exist
+ if (Result.second)
+ Result.first->second =
+ std::make_unique<RecordTy>(USR, std::forward<CtorArgsTy>(CtorArgs)...);
+
+ auto *Record = Result.first->second.get();
+ USRLookupTable.insert({USR, Record});
+ return Record;
+}
+
+} // namespace
+
+NamespaceRecord *
+APISet::addNamespace(APIRecord *Parent, StringRef Name, StringRef USR,
+ PresumedLoc Loc, AvailabilityInfo Availability,
+ LinkageInfo Linkage, const DocComment &Comment,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading, bool IsFromSystemHeader) {
+ auto *Record = addTopLevelRecord(
+ USRBasedLookupTable, Namespaces, USR, Name, Loc, std::move(Availability),
+ Linkage, Comment, Declaration, SubHeading, IsFromSystemHeader);
+
+ if (Parent)
+ Record->ParentInformation = APIRecord::HierarchyInformation(
+ Parent->USR, Parent->Name, Parent->getKind(), Parent);
+ return Record;
+}
+
+GlobalVariableRecord *
+APISet::addGlobalVar(StringRef Name, StringRef USR, PresumedLoc Loc,
+ AvailabilityInfo Availability, LinkageInfo Linkage,
+ const DocComment &Comment, DeclarationFragments Fragments,
+ DeclarationFragments SubHeading, bool IsFromSystemHeader) {
+ return addTopLevelRecord(USRBasedLookupTable, GlobalVariables, USR, Name, Loc,
+ std::move(Availability), Linkage, Comment, Fragments,
+ SubHeading, IsFromSystemHeader);
+}
+
+GlobalVariableTemplateRecord *APISet::addGlobalVariableTemplate(
+ StringRef Name, StringRef USR, PresumedLoc Loc,
+ AvailabilityInfo Availability, LinkageInfo Linkage,
+ const DocComment &Comment, DeclarationFragments Declaration,
+ DeclarationFragments SubHeading, Template Template,
+ bool IsFromSystemHeader) {
+ return addTopLevelRecord(USRBasedLookupTable, GlobalVariableTemplates, USR,
+ Name, Loc, std::move(Availability), Linkage, Comment,
+ Declaration, SubHeading, Template,
+ IsFromSystemHeader);
+}
+
+GlobalFunctionRecord *APISet::addGlobalFunction(
+ StringRef Name, StringRef USR, PresumedLoc Loc,
+ AvailabilityInfo Availability, LinkageInfo Linkage,
+ const DocComment &Comment, DeclarationFragments Fragments,
+ DeclarationFragments SubHeading, FunctionSignature Signature,
+ bool IsFromSystemHeader) {
+ return addTopLevelRecord(USRBasedLookupTable, GlobalFunctions, USR, Name, Loc,
+ std::move(Availability), Linkage, Comment, Fragments,
+ SubHeading, Signature, IsFromSystemHeader);
+}
+
+GlobalFunctionTemplateRecord *APISet::addGlobalFunctionTemplate(
+ StringRef Name, StringRef USR, PresumedLoc Loc,
+ AvailabilityInfo Availability, LinkageInfo Linkage,
+ const DocComment &Comment, DeclarationFragments Declaration,
+ DeclarationFragments SubHeading, FunctionSignature Signature,
+ Template Template, bool IsFromSystemHeader) {
+ return addTopLevelRecord(USRBasedLookupTable, GlobalFunctionTemplates, USR,
+ Name, Loc, std::move(Availability), Linkage, Comment,
+ Declaration, SubHeading, Signature, Template,
+ IsFromSystemHeader);
+}
+
+GlobalFunctionTemplateSpecializationRecord *
+APISet::addGlobalFunctionTemplateSpecialization(
+ StringRef Name, StringRef USR, PresumedLoc Loc,
+ AvailabilityInfo Availability, LinkageInfo Linkage,
+ const DocComment &Comment, DeclarationFragments Declaration,
+ DeclarationFragments SubHeading, FunctionSignature Signature,
+ bool IsFromSystemHeader) {
+ return addTopLevelRecord(
+ USRBasedLookupTable, GlobalFunctionTemplateSpecializations, USR, Name,
+ Loc, std::move(Availability), Linkage, Comment, Declaration, SubHeading,
+ Signature, IsFromSystemHeader);
+}
+
+EnumConstantRecord *APISet::addEnumConstant(EnumRecord *Enum, StringRef Name,
+ StringRef USR, PresumedLoc Loc,
+ AvailabilityInfo Availability,
+ const DocComment &Comment,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading,
+ bool IsFromSystemHeader) {
+ auto Record = std::make_unique<EnumConstantRecord>(
+ USR, Name, Loc, std::move(Availability), Comment, Declaration, SubHeading,
+ IsFromSystemHeader);
+ Record->ParentInformation = APIRecord::HierarchyInformation(
+ Enum->USR, Enum->Name, Enum->getKind(), Enum);
+ USRBasedLookupTable.insert({USR, Record.get()});
+ return Enum->Constants.emplace_back(std::move(Record)).get();
+}
+
+EnumRecord *APISet::addEnum(StringRef Name, StringRef USR, PresumedLoc Loc,
+ AvailabilityInfo Availability,
+ const DocComment &Comment,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading,
+ bool IsFromSystemHeader) {
+ return addTopLevelRecord(USRBasedLookupTable, Enums, USR, Name, Loc,
+ std::move(Availability), Comment, Declaration,
+ SubHeading, IsFromSystemHeader);
+}
+
+RecordFieldRecord *APISet::addRecordField(
+ RecordRecord *Record, StringRef Name, StringRef USR, PresumedLoc Loc,
+ AvailabilityInfo Availability, const DocComment &Comment,
+ DeclarationFragments Declaration, DeclarationFragments SubHeading,
+ APIRecord::RecordKind Kind, bool IsFromSystemHeader) {
+ auto RecordField = std::make_unique<RecordFieldRecord>(
+ USR, Name, Loc, std::move(Availability), Comment, Declaration, SubHeading,
+ Kind, IsFromSystemHeader);
+ RecordField->ParentInformation = APIRecord::HierarchyInformation(
+ Record->USR, Record->Name, Record->getKind(), Record);
+ USRBasedLookupTable.insert({USR, RecordField.get()});
+ return Record->Fields.emplace_back(std::move(RecordField)).get();
+}
+
+RecordRecord *APISet::addRecord(StringRef Name, StringRef USR, PresumedLoc Loc,
+ AvailabilityInfo Availability,
+ const DocComment &Comment,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading,
+ APIRecord::RecordKind Kind,
+ bool IsFromSystemHeader) {
+ return addTopLevelRecord(USRBasedLookupTable, Records, USR, Name, Loc,
+ std::move(Availability), Comment, Declaration,
+ SubHeading, Kind, IsFromSystemHeader);
+}
+
+StaticFieldRecord *
+APISet::addStaticField(StringRef Name, StringRef USR, PresumedLoc Loc,
+ AvailabilityInfo Availability, LinkageInfo Linkage,
+ const DocComment &Comment,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading, SymbolReference Context,
+ AccessControl Access, bool IsFromSystemHeader) {
+ return addTopLevelRecord(USRBasedLookupTable, StaticFields, USR, Name, Loc,
+ std::move(Availability), Linkage, Comment,
+ Declaration, SubHeading, Context, Access,
+ IsFromSystemHeader);
+}
+
+CXXFieldRecord *
+APISet::addCXXField(APIRecord *CXXClass, StringRef Name, StringRef USR,
+ PresumedLoc Loc, AvailabilityInfo Availability,
+ const DocComment &Comment, DeclarationFragments Declaration,
+ DeclarationFragments SubHeading, AccessControl Access,
+ bool IsFromSystemHeader) {
+ auto *Record = addTopLevelRecord(
+ USRBasedLookupTable, CXXFields, USR, Name, Loc, std::move(Availability),
+ Comment, Declaration, SubHeading, Access, IsFromSystemHeader);
+ Record->ParentInformation = APIRecord::HierarchyInformation(
+ CXXClass->USR, CXXClass->Name, CXXClass->getKind(), CXXClass);
+ return Record;
+}
+
+CXXFieldTemplateRecord *APISet::addCXXFieldTemplate(
+ APIRecord *Parent, StringRef Name, StringRef USR, PresumedLoc Loc,
+ AvailabilityInfo Availability, const DocComment &Comment,
+ DeclarationFragments Declaration, DeclarationFragments SubHeading,
+ AccessControl Access, Template Template, bool IsFromSystemHeader) {
+ auto *Record =
+ addTopLevelRecord(USRBasedLookupTable, CXXFieldTemplates, USR, Name, Loc,
+ std::move(Availability), Comment, Declaration,
+ SubHeading, Access, Template, IsFromSystemHeader);
+ Record->ParentInformation = APIRecord::HierarchyInformation(
+ Parent->USR, Parent->Name, Parent->getKind(), Parent);
+
+ return Record;
+}
+
+CXXClassRecord *
+APISet::addCXXClass(APIRecord *Parent, StringRef Name, StringRef USR,
+ PresumedLoc Loc, AvailabilityInfo Availability,
+ const DocComment &Comment, DeclarationFragments Declaration,
+ DeclarationFragments SubHeading, APIRecord::RecordKind Kind,
+ AccessControl Access, bool IsFromSystemHeader) {
+ auto *Record = addTopLevelRecord(
+ USRBasedLookupTable, CXXClasses, USR, Name, Loc, std::move(Availability),
+ Comment, Declaration, SubHeading, Kind, Access, IsFromSystemHeader);
+ if (Parent)
+ Record->ParentInformation = APIRecord::HierarchyInformation(
+ Parent->USR, Parent->Name, Parent->getKind(), Parent);
+ return Record;
+}
+
+ClassTemplateRecord *APISet::addClassTemplate(
+ APIRecord *Parent, StringRef Name, StringRef USR, PresumedLoc Loc,
+ AvailabilityInfo Availability, const DocComment &Comment,
+ DeclarationFragments Declaration, DeclarationFragments SubHeading,
+ Template Template, AccessControl Access, bool IsFromSystemHeader) {
+ auto *Record =
+ addTopLevelRecord(USRBasedLookupTable, ClassTemplates, USR, Name, Loc,
+ std::move(Availability), Comment, Declaration,
+ SubHeading, Template, Access, IsFromSystemHeader);
+ if (Parent)
+ Record->ParentInformation = APIRecord::HierarchyInformation(
+ Parent->USR, Parent->Name, Parent->getKind(), Parent);
+ return Record;
+}
+
+ClassTemplateSpecializationRecord *APISet::addClassTemplateSpecialization(
+ APIRecord *Parent, StringRef Name, StringRef USR, PresumedLoc Loc,
+ AvailabilityInfo Availability, const DocComment &Comment,
+ DeclarationFragments Declaration, DeclarationFragments SubHeading,
+ AccessControl Access, bool IsFromSystemHeader) {
+ auto *Record =
+ addTopLevelRecord(USRBasedLookupTable, ClassTemplateSpecializations, USR,
+ Name, Loc, std::move(Availability), Comment,
+ Declaration, SubHeading, Access, IsFromSystemHeader);
+ if (Parent)
+ Record->ParentInformation = APIRecord::HierarchyInformation(
+ Parent->USR, Parent->Name, Parent->getKind(), Parent);
+ return Record;
+}
+
+ClassTemplatePartialSpecializationRecord *
+APISet::addClassTemplatePartialSpecialization(
+ APIRecord *Parent, StringRef Name, StringRef USR, PresumedLoc Loc,
+ AvailabilityInfo Availability, const DocComment &Comment,
+ DeclarationFragments Declaration, DeclarationFragments SubHeading,
+ Template Template, AccessControl Access, bool IsFromSystemHeader) {
+ auto *Record = addTopLevelRecord(
+ USRBasedLookupTable, ClassTemplatePartialSpecializations, USR, Name, Loc,
+ std::move(Availability), Comment, Declaration, SubHeading, Template,
+ Access, IsFromSystemHeader);
+ if (Parent)
+ Record->ParentInformation = APIRecord::HierarchyInformation(
+ Parent->USR, Parent->Name, Parent->getKind(), Parent);
+ return Record;
+}
+
+GlobalVariableTemplateSpecializationRecord *
+APISet::addGlobalVariableTemplateSpecialization(
+ StringRef Name, StringRef USR, PresumedLoc Loc,
+ AvailabilityInfo Availability, LinkageInfo Linkage,
+ const DocComment &Comment, DeclarationFragments Declaration,
+ DeclarationFragments SubHeading, bool IsFromSystemHeader) {
+ return addTopLevelRecord(USRBasedLookupTable,
+ GlobalVariableTemplateSpecializations, USR, Name,
+ Loc, std::move(Availability), Linkage, Comment,
+ Declaration, SubHeading, IsFromSystemHeader);
+}
+
+GlobalVariableTemplatePartialSpecializationRecord *
+APISet::addGlobalVariableTemplatePartialSpecialization(
+ StringRef Name, StringRef USR, PresumedLoc Loc,
+ AvailabilityInfo Availability, LinkageInfo Linkage,
+ const DocComment &Comment, DeclarationFragments Declaration,
+ DeclarationFragments SubHeading, Template Template,
+ bool IsFromSystemHeader) {
+ return addTopLevelRecord(
+ USRBasedLookupTable, GlobalVariableTemplatePartialSpecializations, USR,
+ Name, Loc, std::move(Availability), Linkage, Comment, Declaration,
+ SubHeading, Template, IsFromSystemHeader);
+}
+
+ConceptRecord *APISet::addConcept(StringRef Name, StringRef USR,
+ PresumedLoc Loc,
+ AvailabilityInfo Availability,
+ const DocComment &Comment,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading,
+ Template Template, bool IsFromSystemHeader) {
+ return addTopLevelRecord(USRBasedLookupTable, Concepts, USR, Name, Loc,
+ std::move(Availability), Comment, Declaration,
+ SubHeading, Template, IsFromSystemHeader);
+}
+
+CXXMethodRecord *APISet::addCXXInstanceMethod(
+ APIRecord *CXXClassRecord, StringRef Name, StringRef USR, PresumedLoc Loc,
+ AvailabilityInfo Availability, const DocComment &Comment,
+ DeclarationFragments Declaration, DeclarationFragments SubHeading,
+ FunctionSignature Signature, AccessControl Access,
+ bool IsFromSystemHeader) {
+ CXXMethodRecord *Record =
+ addTopLevelRecord(USRBasedLookupTable, CXXInstanceMethods, USR, Name, Loc,
+ std::move(Availability), Comment, Declaration,
+ SubHeading, Signature, Access, IsFromSystemHeader);
+
+ Record->ParentInformation = APIRecord::HierarchyInformation(
+ CXXClassRecord->USR, CXXClassRecord->Name, CXXClassRecord->getKind(),
+ CXXClassRecord);
+ return Record;
+}
+
+CXXMethodRecord *APISet::addCXXStaticMethod(
+ APIRecord *CXXClassRecord, StringRef Name, StringRef USR, PresumedLoc Loc,
+ AvailabilityInfo Availability, const DocComment &Comment,
+ DeclarationFragments Declaration, DeclarationFragments SubHeading,
+ FunctionSignature Signature, AccessControl Access,
+ bool IsFromSystemHeader) {
+ CXXMethodRecord *Record =
+ addTopLevelRecord(USRBasedLookupTable, CXXStaticMethods, USR, Name, Loc,
+ std::move(Availability), Comment, Declaration,
+ SubHeading, Signature, Access, IsFromSystemHeader);
+
+ Record->ParentInformation = APIRecord::HierarchyInformation(
+ CXXClassRecord->USR, CXXClassRecord->Name, CXXClassRecord->getKind(),
+ CXXClassRecord);
+ return Record;
+}
+
+CXXMethodTemplateRecord *APISet::addCXXMethodTemplate(
+ APIRecord *Parent, StringRef Name, StringRef USR, PresumedLoc Loc,
+ AvailabilityInfo Availability, const DocComment &Comment,
+ DeclarationFragments Declaration, DeclarationFragments SubHeading,
+ FunctionSignature Signature, AccessControl Access, Template Template,
+ bool IsFromSystemHeader) {
+ auto *Record = addTopLevelRecord(USRBasedLookupTable, CXXMethodTemplates, USR,
+ Name, Loc, std::move(Availability), Comment,
+ Declaration, SubHeading, Signature, Access,
+ Template, IsFromSystemHeader);
+ Record->ParentInformation = APIRecord::HierarchyInformation(
+ Parent->USR, Parent->Name, Parent->getKind(), Parent);
+
+ return Record;
+}
+
+CXXMethodTemplateSpecializationRecord *APISet::addCXXMethodTemplateSpec(
+ APIRecord *Parent, StringRef Name, StringRef USR, PresumedLoc Loc,
+ AvailabilityInfo Availability, const DocComment &Comment,
+ DeclarationFragments Declaration, DeclarationFragments SubHeading,
+ FunctionSignature Signature, AccessControl Access,
+ bool IsFromSystemHeader) {
+
+ auto *Record = addTopLevelRecord(
+ USRBasedLookupTable, CXXMethodTemplateSpecializations, USR, Name, Loc,
+ std::move(Availability), Comment, Declaration, SubHeading, Signature,
+ Access, IsFromSystemHeader);
+ Record->ParentInformation = APIRecord::HierarchyInformation(
+ Parent->USR, Parent->Name, Parent->getKind(), Parent);
+
+ return Record;
+}
+
+ObjCCategoryRecord *APISet::addObjCCategory(
+ StringRef Name, StringRef USR, PresumedLoc Loc,
+ AvailabilityInfo Availability, const DocComment &Comment,
+ DeclarationFragments Declaration, DeclarationFragments SubHeading,
+ SymbolReference Interface, bool IsFromSystemHeader,
+ bool IsFromExternalModule) {
+ // Create the category record.
+ auto *Record =
+ addTopLevelRecord(USRBasedLookupTable, ObjCCategories, USR, Name, Loc,
+ std::move(Availability), Comment, Declaration,
+ SubHeading, Interface, IsFromSystemHeader);
+
+ Record->IsFromExternalModule = IsFromExternalModule;
+
+ auto It = ObjCInterfaces.find(Interface.USR);
+ if (It != ObjCInterfaces.end())
+ It->second->Categories.push_back(Record);
+
+ return Record;
+}
+
+ObjCInterfaceRecord *
+APISet::addObjCInterface(StringRef Name, StringRef USR, PresumedLoc Loc,
+ AvailabilityInfo Availability, LinkageInfo Linkage,
+ const DocComment &Comment,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading,
+ SymbolReference SuperClass, bool IsFromSystemHeader) {
+ return addTopLevelRecord(USRBasedLookupTable, ObjCInterfaces, USR, Name, Loc,
+ std::move(Availability), Linkage, Comment,
+ Declaration, SubHeading, SuperClass,
+ IsFromSystemHeader);
+}
+
+ObjCMethodRecord *APISet::addObjCMethod(
+ ObjCContainerRecord *Container, StringRef Name, StringRef USR,
+ PresumedLoc Loc, AvailabilityInfo Availability, const DocComment &Comment,
+ DeclarationFragments Declaration, DeclarationFragments SubHeading,
+ FunctionSignature Signature, bool IsInstanceMethod,
+ bool IsFromSystemHeader) {
+ std::unique_ptr<ObjCMethodRecord> Record;
+ if (IsInstanceMethod)
+ Record = std::make_unique<ObjCInstanceMethodRecord>(
+ USR, Name, Loc, std::move(Availability), Comment, Declaration,
+ SubHeading, Signature, IsFromSystemHeader);
+ else
+ Record = std::make_unique<ObjCClassMethodRecord>(
+ USR, Name, Loc, std::move(Availability), Comment, Declaration,
+ SubHeading, Signature, IsFromSystemHeader);
+
+ Record->ParentInformation = APIRecord::HierarchyInformation(
+ Container->USR, Container->Name, Container->getKind(), Container);
+ USRBasedLookupTable.insert({USR, Record.get()});
+ return Container->Methods.emplace_back(std::move(Record)).get();
+}
+
+ObjCPropertyRecord *APISet::addObjCProperty(
+ ObjCContainerRecord *Container, StringRef Name, StringRef USR,
+ PresumedLoc Loc, AvailabilityInfo Availability, const DocComment &Comment,
+ DeclarationFragments Declaration, DeclarationFragments SubHeading,
+ ObjCPropertyRecord::AttributeKind Attributes, StringRef GetterName,
+ StringRef SetterName, bool IsOptional, bool IsInstanceProperty,
+ bool IsFromSystemHeader) {
+ std::unique_ptr<ObjCPropertyRecord> Record;
+ if (IsInstanceProperty)
+ Record = std::make_unique<ObjCInstancePropertyRecord>(
+ USR, Name, Loc, std::move(Availability), Comment, Declaration,
+ SubHeading, Attributes, GetterName, SetterName, IsOptional,
+ IsFromSystemHeader);
+ else
+ Record = std::make_unique<ObjCClassPropertyRecord>(
+ USR, Name, Loc, std::move(Availability), Comment, Declaration,
+ SubHeading, Attributes, GetterName, SetterName, IsOptional,
+ IsFromSystemHeader);
+ Record->ParentInformation = APIRecord::HierarchyInformation(
+ Container->USR, Container->Name, Container->getKind(), Container);
+ USRBasedLookupTable.insert({USR, Record.get()});
+ return Container->Properties.emplace_back(std::move(Record)).get();
+}
+
+ObjCInstanceVariableRecord *APISet::addObjCInstanceVariable(
+ ObjCContainerRecord *Container, StringRef Name, StringRef USR,
+ PresumedLoc Loc, AvailabilityInfo Availability, const DocComment &Comment,
+ DeclarationFragments Declaration, DeclarationFragments SubHeading,
+ ObjCInstanceVariableRecord::AccessControl Access, bool IsFromSystemHeader) {
+ auto Record = std::make_unique<ObjCInstanceVariableRecord>(
+ USR, Name, Loc, std::move(Availability), Comment, Declaration, SubHeading,
+ Access, IsFromSystemHeader);
+ Record->ParentInformation = APIRecord::HierarchyInformation(
+ Container->USR, Container->Name, Container->getKind(), Container);
+ USRBasedLookupTable.insert({USR, Record.get()});
+ return Container->Ivars.emplace_back(std::move(Record)).get();
+}
+
+ObjCProtocolRecord *APISet::addObjCProtocol(StringRef Name, StringRef USR,
+ PresumedLoc Loc,
+ AvailabilityInfo Availability,
+ const DocComment &Comment,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading,
+ bool IsFromSystemHeader) {
+ return addTopLevelRecord(USRBasedLookupTable, ObjCProtocols, USR, Name, Loc,
+ std::move(Availability), Comment, Declaration,
+ SubHeading, IsFromSystemHeader);
+}
+
+MacroDefinitionRecord *
+APISet::addMacroDefinition(StringRef Name, StringRef USR, PresumedLoc Loc,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading,
+ bool IsFromSystemHeader) {
+ return addTopLevelRecord(USRBasedLookupTable, Macros, USR, Name, Loc,
+ Declaration, SubHeading, IsFromSystemHeader);
+}
+
+TypedefRecord *
+APISet::addTypedef(StringRef Name, StringRef USR, PresumedLoc Loc,
+ AvailabilityInfo Availability, const DocComment &Comment,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading,
+ SymbolReference UnderlyingType, bool IsFromSystemHeader) {
+ return addTopLevelRecord(USRBasedLookupTable, Typedefs, USR, Name, Loc,
+ std::move(Availability), Comment, Declaration,
+ SubHeading, UnderlyingType, IsFromSystemHeader);
+}
+
+APIRecord *APISet::findRecordForUSR(StringRef USR) const {
+ if (USR.empty())
+ return nullptr;
+
+ return USRBasedLookupTable.lookup(USR);
+}
+
+StringRef APISet::recordUSR(const Decl *D) {
+ SmallString<128> USR;
+ index::generateUSRForDecl(D, USR);
+ return copyString(USR);
+}
+
+StringRef APISet::recordUSRForMacro(StringRef Name, SourceLocation SL,
+ const SourceManager &SM) {
+ SmallString<128> USR;
+ index::generateUSRForMacro(Name, SL, SM, USR);
+ return copyString(USR);
+}
+
+StringRef APISet::copyString(StringRef String) {
+ if (String.empty())
+ return {};
+
+ // No need to allocate memory and copy if the string has already been stored.
+ if (StringAllocator.identifyObject(String.data()))
+ return String;
+
+ void *Ptr = StringAllocator.Allocate(String.size(), 1);
+ memcpy(Ptr, String.data(), String.size());
+ return StringRef(reinterpret_cast<const char *>(Ptr), String.size());
+}
+
+APIRecord::~APIRecord() {}
+ObjCContainerRecord::~ObjCContainerRecord() {}
+ObjCMethodRecord::~ObjCMethodRecord() {}
+ObjCPropertyRecord::~ObjCPropertyRecord() {}
+CXXMethodRecord::~CXXMethodRecord() {}
+
+void GlobalFunctionRecord::anchor() {}
+void GlobalVariableRecord::anchor() {}
+void EnumConstantRecord::anchor() {}
+void EnumRecord::anchor() {}
+void RecordFieldRecord::anchor() {}
+void RecordRecord::anchor() {}
+void CXXFieldRecord::anchor() {}
+void CXXClassRecord::anchor() {}
+void CXXConstructorRecord::anchor() {}
+void CXXDestructorRecord::anchor() {}
+void CXXInstanceMethodRecord::anchor() {}
+void CXXStaticMethodRecord::anchor() {}
+void ObjCInstancePropertyRecord::anchor() {}
+void ObjCClassPropertyRecord::anchor() {}
+void ObjCInstanceVariableRecord::anchor() {}
+void ObjCInstanceMethodRecord::anchor() {}
+void ObjCClassMethodRecord::anchor() {}
+void ObjCCategoryRecord::anchor() {}
+void ObjCInterfaceRecord::anchor() {}
+void ObjCProtocolRecord::anchor() {}
+void MacroDefinitionRecord::anchor() {}
+void TypedefRecord::anchor() {}
diff --git a/contrib/llvm-project/clang/lib/ExtractAPI/APIIgnoresList.cpp b/contrib/llvm-project/clang/lib/ExtractAPI/APIIgnoresList.cpp
new file mode 100644
index 000000000000..d6bbc6692d2b
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/ExtractAPI/APIIgnoresList.cpp
@@ -0,0 +1,62 @@
+//===- ExtractAPI/APIIgnoresList.cpp -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements APIIgnoresList that allows users to specifiy a file
+/// containing symbols to ignore during API extraction.
+///
+//===----------------------------------------------------------------------===//
+
+#include "clang/ExtractAPI/APIIgnoresList.h"
+#include "clang/Basic/FileManager.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/Error.h"
+
+using namespace clang;
+using namespace clang::extractapi;
+using namespace llvm;
+
+char IgnoresFileNotFound::ID;
+
+void IgnoresFileNotFound::log(llvm::raw_ostream &os) const {
+ os << "Could not find API ignores file " << Path;
+}
+
+std::error_code IgnoresFileNotFound::convertToErrorCode() const {
+ return llvm::inconvertibleErrorCode();
+}
+
+Expected<APIIgnoresList>
+APIIgnoresList::create(const FilePathList &IgnoresFilePathList,
+ FileManager &FM) {
+ SmallVector<StringRef, 32> Lines;
+ BufferList symbolBufferList;
+
+ for (const auto &CurrentIgnoresFilePath : IgnoresFilePathList) {
+ auto BufferOrErr = FM.getBufferForFile(CurrentIgnoresFilePath);
+
+ if (!BufferOrErr)
+ return make_error<IgnoresFileNotFound>(CurrentIgnoresFilePath);
+
+ auto Buffer = std::move(BufferOrErr.get());
+ Buffer->getBuffer().split(Lines, '\n', /*MaxSplit*/ -1,
+ /*KeepEmpty*/ false);
+ symbolBufferList.push_back(std::move(Buffer));
+ }
+
+ // Symbol names don't have spaces in them, let's just remove these in case
+ // the input is slighlty malformed.
+ transform(Lines, Lines.begin(), [](StringRef Line) { return Line.trim(); });
+ sort(Lines);
+ return APIIgnoresList(std::move(Lines), std::move(symbolBufferList));
+}
+
+bool APIIgnoresList::shouldIgnore(StringRef SymbolName) const {
+ auto It = lower_bound(SymbolsToIgnore, SymbolName);
+ return (It != SymbolsToIgnore.end()) && (*It == SymbolName);
+}
diff --git a/contrib/llvm-project/clang/lib/ExtractAPI/AvailabilityInfo.cpp b/contrib/llvm-project/clang/lib/ExtractAPI/AvailabilityInfo.cpp
new file mode 100644
index 000000000000..18e4d16b45bb
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/ExtractAPI/AvailabilityInfo.cpp
@@ -0,0 +1,35 @@
+#include "clang/ExtractAPI/AvailabilityInfo.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Attr.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/STLExtras.h"
+
+using namespace clang::extractapi;
+using namespace llvm;
+
+AvailabilityInfo AvailabilityInfo::createFromDecl(const Decl *Decl) {
+ ASTContext &Context = Decl->getASTContext();
+ StringRef PlatformName = Context.getTargetInfo().getPlatformName();
+ AvailabilityInfo Availability;
+
+ // Collect availability attributes from all redeclarations.
+ for (const auto *RD : Decl->redecls()) {
+ for (const auto *A : RD->specific_attrs<AvailabilityAttr>()) {
+ if (A->getPlatform()->getName() != PlatformName)
+ continue;
+ Availability =
+ AvailabilityInfo(A->getPlatform()->getName(), A->getIntroduced(),
+ A->getDeprecated(), A->getObsoleted(), false, false);
+ break;
+ }
+
+ if (const auto *A = RD->getAttr<UnavailableAttr>())
+ if (!A->isImplicit())
+ Availability.UnconditionallyUnavailable = true;
+
+ if (const auto *A = RD->getAttr<DeprecatedAttr>())
+ if (!A->isImplicit())
+ Availability.UnconditionallyDeprecated = true;
+ }
+ return Availability;
+}
diff --git a/contrib/llvm-project/clang/lib/ExtractAPI/DeclarationFragments.cpp b/contrib/llvm-project/clang/lib/ExtractAPI/DeclarationFragments.cpp
new file mode 100644
index 000000000000..56c1f5bf5eab
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/ExtractAPI/DeclarationFragments.cpp
@@ -0,0 +1,1477 @@
+//===- ExtractAPI/DeclarationFragments.cpp ----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements Declaration Fragments related classes.
+///
+//===----------------------------------------------------------------------===//
+
+#include "clang/ExtractAPI/DeclarationFragments.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/QualTypeNames.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/Basic/OperatorKinds.h"
+#include "clang/ExtractAPI/TypedefUnderlyingTypeResolver.h"
+#include "clang/Index/USRGeneration.h"
+#include "llvm/ADT/StringSwitch.h"
+#include <typeinfo>
+
+using namespace clang::extractapi;
+using namespace llvm;
+
+namespace {
+
+void findTypeLocForBlockDecl(const clang::TypeSourceInfo *TSInfo,
+ clang::FunctionTypeLoc &Block,
+ clang::FunctionProtoTypeLoc &BlockProto) {
+ if (!TSInfo)
+ return;
+
+ clang::TypeLoc TL = TSInfo->getTypeLoc().getUnqualifiedLoc();
+ while (true) {
+ // Look through qualified types
+ if (auto QualifiedTL = TL.getAs<clang::QualifiedTypeLoc>()) {
+ TL = QualifiedTL.getUnqualifiedLoc();
+ continue;
+ }
+
+ if (auto AttrTL = TL.getAs<clang::AttributedTypeLoc>()) {
+ TL = AttrTL.getModifiedLoc();
+ continue;
+ }
+
+ // Try to get the function prototype behind the block pointer type,
+ // then we're done.
+ if (auto BlockPtr = TL.getAs<clang::BlockPointerTypeLoc>()) {
+ TL = BlockPtr.getPointeeLoc().IgnoreParens();
+ Block = TL.getAs<clang::FunctionTypeLoc>();
+ BlockProto = TL.getAs<clang::FunctionProtoTypeLoc>();
+ }
+ break;
+ }
+}
+
+} // namespace
+
+DeclarationFragments &DeclarationFragments::appendSpace() {
+ if (!Fragments.empty()) {
+ Fragment &Last = Fragments.back();
+ if (Last.Kind == FragmentKind::Text) {
+ // Merge the extra space into the last fragment if the last fragment is
+ // also text.
+ if (Last.Spelling.back() != ' ') { // avoid extra trailing spaces.
+ Last.Spelling.push_back(' ');
+ }
+ } else {
+ append(" ", FragmentKind::Text);
+ }
+ }
+
+ return *this;
+}
+
+StringRef DeclarationFragments::getFragmentKindString(
+ DeclarationFragments::FragmentKind Kind) {
+ switch (Kind) {
+ case DeclarationFragments::FragmentKind::None:
+ return "none";
+ case DeclarationFragments::FragmentKind::Keyword:
+ return "keyword";
+ case DeclarationFragments::FragmentKind::Attribute:
+ return "attribute";
+ case DeclarationFragments::FragmentKind::NumberLiteral:
+ return "number";
+ case DeclarationFragments::FragmentKind::StringLiteral:
+ return "string";
+ case DeclarationFragments::FragmentKind::Identifier:
+ return "identifier";
+ case DeclarationFragments::FragmentKind::TypeIdentifier:
+ return "typeIdentifier";
+ case DeclarationFragments::FragmentKind::GenericParameter:
+ return "genericParameter";
+ case DeclarationFragments::FragmentKind::ExternalParam:
+ return "externalParam";
+ case DeclarationFragments::FragmentKind::InternalParam:
+ return "internalParam";
+ case DeclarationFragments::FragmentKind::Text:
+ return "text";
+ }
+
+ llvm_unreachable("Unhandled FragmentKind");
+}
+
+DeclarationFragments::FragmentKind
+DeclarationFragments::parseFragmentKindFromString(StringRef S) {
+ return llvm::StringSwitch<FragmentKind>(S)
+ .Case("keyword", DeclarationFragments::FragmentKind::Keyword)
+ .Case("attribute", DeclarationFragments::FragmentKind::Attribute)
+ .Case("number", DeclarationFragments::FragmentKind::NumberLiteral)
+ .Case("string", DeclarationFragments::FragmentKind::StringLiteral)
+ .Case("identifier", DeclarationFragments::FragmentKind::Identifier)
+ .Case("typeIdentifier",
+ DeclarationFragments::FragmentKind::TypeIdentifier)
+ .Case("genericParameter",
+ DeclarationFragments::FragmentKind::GenericParameter)
+ .Case("internalParam", DeclarationFragments::FragmentKind::InternalParam)
+ .Case("externalParam", DeclarationFragments::FragmentKind::ExternalParam)
+ .Case("text", DeclarationFragments::FragmentKind::Text)
+ .Default(DeclarationFragments::FragmentKind::None);
+}
+
+DeclarationFragments DeclarationFragments::getExceptionSpecificationString(
+ ExceptionSpecificationType ExceptionSpec) {
+ DeclarationFragments Fragments;
+ switch (ExceptionSpec) {
+ case ExceptionSpecificationType::EST_None:
+ return Fragments;
+ case ExceptionSpecificationType::EST_DynamicNone:
+ return Fragments.append(" ", DeclarationFragments::FragmentKind::Text)
+ .append("throw", DeclarationFragments::FragmentKind::Keyword)
+ .append("(", DeclarationFragments::FragmentKind::Text)
+ .append(")", DeclarationFragments::FragmentKind::Text);
+ case ExceptionSpecificationType::EST_Dynamic:
+ // FIXME: throw(int), get types of inner expression
+ return Fragments;
+ case ExceptionSpecificationType::EST_BasicNoexcept:
+ return Fragments.append(" ", DeclarationFragments::FragmentKind::Text)
+ .append("noexcept", DeclarationFragments::FragmentKind::Keyword);
+ case ExceptionSpecificationType::EST_DependentNoexcept:
+ // FIXME: throw(conditional-expression), get expression
+ break;
+ case ExceptionSpecificationType::EST_NoexceptFalse:
+ return Fragments.append(" ", DeclarationFragments::FragmentKind::Text)
+ .append("noexcept", DeclarationFragments::FragmentKind::Keyword)
+ .append("(", DeclarationFragments::FragmentKind::Text)
+ .append("false", DeclarationFragments::FragmentKind::Keyword)
+ .append(")", DeclarationFragments::FragmentKind::Text);
+ case ExceptionSpecificationType::EST_NoexceptTrue:
+ return Fragments.append(" ", DeclarationFragments::FragmentKind::Text)
+ .append("noexcept", DeclarationFragments::FragmentKind::Keyword)
+ .append("(", DeclarationFragments::FragmentKind::Text)
+ .append("true", DeclarationFragments::FragmentKind::Keyword)
+ .append(")", DeclarationFragments::FragmentKind::Text);
+ default:
+ return Fragments;
+ }
+
+ llvm_unreachable("Unhandled exception specification");
+}
+
+DeclarationFragments
+DeclarationFragments::getStructureTypeFragment(const RecordDecl *Record) {
+ DeclarationFragments Fragments;
+ if (Record->isStruct())
+ Fragments.append("struct", DeclarationFragments::FragmentKind::Keyword);
+ else if (Record->isUnion())
+ Fragments.append("union", DeclarationFragments::FragmentKind::Keyword);
+ else
+ Fragments.append("class", DeclarationFragments::FragmentKind::Keyword);
+
+ return Fragments;
+}
+
+// NNS stores C++ nested name specifiers, which are prefixes to qualified names.
+// Build declaration fragments for NNS recursively so that we have the USR for
+// every part in a qualified name, and also leaves the actual underlying type
+// cleaner for its own fragment.
+DeclarationFragments
+DeclarationFragmentsBuilder::getFragmentsForNNS(const NestedNameSpecifier *NNS,
+ ASTContext &Context,
+ DeclarationFragments &After) {
+ DeclarationFragments Fragments;
+ if (NNS->getPrefix())
+ Fragments.append(getFragmentsForNNS(NNS->getPrefix(), Context, After));
+
+ switch (NNS->getKind()) {
+ case NestedNameSpecifier::Identifier:
+ Fragments.append(NNS->getAsIdentifier()->getName(),
+ DeclarationFragments::FragmentKind::Identifier);
+ break;
+
+ case NestedNameSpecifier::Namespace: {
+ const NamespaceDecl *NS = NNS->getAsNamespace();
+ if (NS->isAnonymousNamespace())
+ return Fragments;
+ SmallString<128> USR;
+ index::generateUSRForDecl(NS, USR);
+ Fragments.append(NS->getName(),
+ DeclarationFragments::FragmentKind::Identifier, USR, NS);
+ break;
+ }
+
+ case NestedNameSpecifier::NamespaceAlias: {
+ const NamespaceAliasDecl *Alias = NNS->getAsNamespaceAlias();
+ SmallString<128> USR;
+ index::generateUSRForDecl(Alias, USR);
+ Fragments.append(Alias->getName(),
+ DeclarationFragments::FragmentKind::Identifier, USR,
+ Alias);
+ break;
+ }
+
+ case NestedNameSpecifier::Global:
+ // The global specifier `::` at the beginning. No stored value.
+ break;
+
+ case NestedNameSpecifier::Super:
+ // Microsoft's `__super` specifier.
+ Fragments.append("__super", DeclarationFragments::FragmentKind::Keyword);
+ break;
+
+ case NestedNameSpecifier::TypeSpecWithTemplate:
+ // A type prefixed by the `template` keyword.
+ Fragments.append("template", DeclarationFragments::FragmentKind::Keyword);
+ Fragments.appendSpace();
+ // Fallthrough after adding the keyword to handle the actual type.
+ [[fallthrough]];
+
+ case NestedNameSpecifier::TypeSpec: {
+ const Type *T = NNS->getAsType();
+ // FIXME: Handle C++ template specialization type
+ Fragments.append(getFragmentsForType(T, Context, After));
+ break;
+ }
+ }
+
+ // Add the separator text `::` for this segment.
+ return Fragments.append("::", DeclarationFragments::FragmentKind::Text);
+}
+
+// Recursively build the declaration fragments for an underlying `Type` with
+// qualifiers removed.
+DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForType(
+ const Type *T, ASTContext &Context, DeclarationFragments &After) {
+ assert(T && "invalid type");
+
+ DeclarationFragments Fragments;
+
+ // An ElaboratedType is a sugar for types that are referred to using an
+ // elaborated keyword, e.g., `struct S`, `enum E`, or (in C++) via a
+ // qualified name, e.g., `N::M::type`, or both.
+ if (const ElaboratedType *ET = dyn_cast<ElaboratedType>(T)) {
+ ElaboratedTypeKeyword Keyword = ET->getKeyword();
+ if (Keyword != ElaboratedTypeKeyword::None) {
+ Fragments
+ .append(ElaboratedType::getKeywordName(Keyword),
+ DeclarationFragments::FragmentKind::Keyword)
+ .appendSpace();
+ }
+
+ if (const NestedNameSpecifier *NNS = ET->getQualifier())
+ Fragments.append(getFragmentsForNNS(NNS, Context, After));
+
+ // After handling the elaborated keyword or qualified name, build
+ // declaration fragments for the desugared underlying type.
+ return Fragments.append(getFragmentsForType(ET->desugar(), Context, After));
+ }
+
+ // If the type is a typedefed type, get the underlying TypedefNameDecl for a
+ // direct reference to the typedef instead of the wrapped type.
+
+ // 'id' type is a typedef for an ObjCObjectPointerType
+ // we treat it as a typedef
+ if (const TypedefType *TypedefTy = dyn_cast<TypedefType>(T)) {
+ const TypedefNameDecl *Decl = TypedefTy->getDecl();
+ TypedefUnderlyingTypeResolver TypedefResolver(Context);
+ std::string USR = TypedefResolver.getUSRForType(QualType(T, 0));
+
+ if (T->isObjCIdType()) {
+ return Fragments.append(Decl->getName(),
+ DeclarationFragments::FragmentKind::Keyword);
+ }
+
+ return Fragments.append(
+ Decl->getName(), DeclarationFragments::FragmentKind::TypeIdentifier,
+ USR, TypedefResolver.getUnderlyingTypeDecl(QualType(T, 0)));
+ }
+
+ // Declaration fragments of a pointer type is the declaration fragments of
+ // the pointee type followed by a `*`,
+ if (T->isPointerType() && !T->isFunctionPointerType())
+ return Fragments
+ .append(getFragmentsForType(T->getPointeeType(), Context, After))
+ .append(" *", DeclarationFragments::FragmentKind::Text);
+
+ // For Objective-C `id` and `Class` pointers
+ // we do not spell out the `*`.
+ if (T->isObjCObjectPointerType() &&
+ !T->getAs<ObjCObjectPointerType>()->isObjCIdOrClassType()) {
+
+ Fragments.append(getFragmentsForType(T->getPointeeType(), Context, After));
+
+ // id<protocol> is an qualified id type
+ // id<protocol>* is not an qualified id type
+ if (!T->getAs<ObjCObjectPointerType>()->isObjCQualifiedIdType()) {
+ Fragments.append(" *", DeclarationFragments::FragmentKind::Text);
+ }
+
+ return Fragments;
+ }
+
+ // Declaration fragments of a lvalue reference type is the declaration
+ // fragments of the underlying type followed by a `&`.
+ if (const LValueReferenceType *LRT = dyn_cast<LValueReferenceType>(T))
+ return Fragments
+ .append(
+ getFragmentsForType(LRT->getPointeeTypeAsWritten(), Context, After))
+ .append(" &", DeclarationFragments::FragmentKind::Text);
+
+ // Declaration fragments of a rvalue reference type is the declaration
+ // fragments of the underlying type followed by a `&&`.
+ if (const RValueReferenceType *RRT = dyn_cast<RValueReferenceType>(T))
+ return Fragments
+ .append(
+ getFragmentsForType(RRT->getPointeeTypeAsWritten(), Context, After))
+ .append(" &&", DeclarationFragments::FragmentKind::Text);
+
+ // Declaration fragments of an array-typed variable have two parts:
+ // 1. the element type of the array that appears before the variable name;
+ // 2. array brackets `[(0-9)?]` that appear after the variable name.
+ if (const ArrayType *AT = T->getAsArrayTypeUnsafe()) {
+ // Build the "after" part first because the inner element type might also
+ // be an array-type. For example `int matrix[3][4]` which has a type of
+ // "(array 3 of (array 4 of ints))."
+ // Push the array size part first to make sure they are in the right order.
+ After.append("[", DeclarationFragments::FragmentKind::Text);
+
+ switch (AT->getSizeModifier()) {
+ case ArraySizeModifier::Normal:
+ break;
+ case ArraySizeModifier::Static:
+ Fragments.append("static", DeclarationFragments::FragmentKind::Keyword);
+ break;
+ case ArraySizeModifier::Star:
+ Fragments.append("*", DeclarationFragments::FragmentKind::Text);
+ break;
+ }
+
+ if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT)) {
+ // FIXME: right now this would evaluate any expressions/macros written in
+ // the original source to concrete values. For example
+ // `int nums[MAX]` -> `int nums[100]`
+ // `char *str[5 + 1]` -> `char *str[6]`
+ SmallString<128> Size;
+ CAT->getSize().toStringUnsigned(Size);
+ After.append(Size, DeclarationFragments::FragmentKind::NumberLiteral);
+ }
+
+ After.append("]", DeclarationFragments::FragmentKind::Text);
+
+ return Fragments.append(
+ getFragmentsForType(AT->getElementType(), Context, After));
+ }
+
+ // Everything we care about has been handled now, reduce to the canonical
+ // unqualified base type.
+ QualType Base = T->getCanonicalTypeUnqualified();
+
+ // If the base type is a TagType (struct/interface/union/class/enum), let's
+ // get the underlying Decl for better names and USRs.
+ if (const TagType *TagTy = dyn_cast<TagType>(Base)) {
+ const TagDecl *Decl = TagTy->getDecl();
+ // Anonymous decl, skip this fragment.
+ if (Decl->getName().empty())
+ return Fragments;
+ SmallString<128> TagUSR;
+ clang::index::generateUSRForDecl(Decl, TagUSR);
+ return Fragments.append(Decl->getName(),
+ DeclarationFragments::FragmentKind::TypeIdentifier,
+ TagUSR, Decl);
+ }
+
+ // If the base type is an ObjCInterfaceType, use the underlying
+ // ObjCInterfaceDecl for the true USR.
+ if (const auto *ObjCIT = dyn_cast<ObjCInterfaceType>(Base)) {
+ const auto *Decl = ObjCIT->getDecl();
+ SmallString<128> USR;
+ index::generateUSRForDecl(Decl, USR);
+ return Fragments.append(Decl->getName(),
+ DeclarationFragments::FragmentKind::TypeIdentifier,
+ USR, Decl);
+ }
+
+ // Default fragment builder for other kinds of types (BuiltinType etc.)
+ SmallString<128> USR;
+ clang::index::generateUSRForType(Base, Context, USR);
+ Fragments.append(Base.getAsString(),
+ DeclarationFragments::FragmentKind::TypeIdentifier, USR);
+
+ return Fragments;
+}
+
+DeclarationFragments
+DeclarationFragmentsBuilder::getFragmentsForQualifiers(const Qualifiers Quals) {
+ DeclarationFragments Fragments;
+ if (Quals.hasConst())
+ Fragments.append("const", DeclarationFragments::FragmentKind::Keyword);
+ if (Quals.hasVolatile())
+ Fragments.append("volatile", DeclarationFragments::FragmentKind::Keyword);
+ if (Quals.hasRestrict())
+ Fragments.append("restrict", DeclarationFragments::FragmentKind::Keyword);
+
+ return Fragments;
+}
+
+DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForType(
+ const QualType QT, ASTContext &Context, DeclarationFragments &After) {
+ assert(!QT.isNull() && "invalid type");
+
+ if (const ParenType *PT = dyn_cast<ParenType>(QT)) {
+ After.append(")", DeclarationFragments::FragmentKind::Text);
+ return getFragmentsForType(PT->getInnerType(), Context, After)
+ .append("(", DeclarationFragments::FragmentKind::Text);
+ }
+
+ const SplitQualType SQT = QT.split();
+ DeclarationFragments QualsFragments = getFragmentsForQualifiers(SQT.Quals),
+ TypeFragments =
+ getFragmentsForType(SQT.Ty, Context, After);
+ if (QT.getAsString() == "_Bool")
+ TypeFragments.replace("bool", 0);
+
+ if (QualsFragments.getFragments().empty())
+ return TypeFragments;
+
+ // Use east qualifier for pointer types
+ // For example:
+ // ```
+ // int * const
+ // ^---- ^----
+ // type qualifier
+ // ^-----------------
+ // const pointer to int
+ // ```
+ // should not be reconstructed as
+ // ```
+ // const int *
+ // ^---- ^--
+ // qualifier type
+ // ^---------------- ^
+ // pointer to const int
+ // ```
+ if (SQT.Ty->isAnyPointerType())
+ return TypeFragments.appendSpace().append(std::move(QualsFragments));
+
+ return QualsFragments.appendSpace().append(std::move(TypeFragments));
+}
+
+DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForNamespace(
+ const NamespaceDecl *Decl) {
+ DeclarationFragments Fragments;
+ Fragments.append("namespace", DeclarationFragments::FragmentKind::Keyword);
+ if (!Decl->isAnonymousNamespace())
+ Fragments.appendSpace().append(
+ Decl->getName(), DeclarationFragments::FragmentKind::Identifier);
+ return Fragments.append(";", DeclarationFragments::FragmentKind::Text);
+}
+
+DeclarationFragments
+DeclarationFragmentsBuilder::getFragmentsForVar(const VarDecl *Var) {
+ DeclarationFragments Fragments;
+ if (Var->isConstexpr())
+ Fragments.append("constexpr", DeclarationFragments::FragmentKind::Keyword)
+ .appendSpace();
+
+ StorageClass SC = Var->getStorageClass();
+ if (SC != SC_None)
+ Fragments
+ .append(VarDecl::getStorageClassSpecifierString(SC),
+ DeclarationFragments::FragmentKind::Keyword)
+ .appendSpace();
+
+ // Capture potential fragments that needs to be placed after the variable name
+ // ```
+ // int nums[5];
+ // char (*ptr_to_array)[6];
+ // ```
+ DeclarationFragments After;
+ FunctionTypeLoc BlockLoc;
+ FunctionProtoTypeLoc BlockProtoLoc;
+ findTypeLocForBlockDecl(Var->getTypeSourceInfo(), BlockLoc, BlockProtoLoc);
+
+ if (!BlockLoc) {
+ QualType T = Var->getTypeSourceInfo()
+ ? Var->getTypeSourceInfo()->getType()
+ : Var->getASTContext().getUnqualifiedObjCPointerType(
+ Var->getType());
+
+ Fragments.append(getFragmentsForType(T, Var->getASTContext(), After))
+ .appendSpace();
+ } else {
+ Fragments.append(getFragmentsForBlock(Var, BlockLoc, BlockProtoLoc, After));
+ }
+
+ return Fragments
+ .append(Var->getName(), DeclarationFragments::FragmentKind::Identifier)
+ .append(std::move(After))
+ .append(";", DeclarationFragments::FragmentKind::Text);
+}
+
+DeclarationFragments
+DeclarationFragmentsBuilder::getFragmentsForVarTemplate(const VarDecl *Var) {
+ DeclarationFragments Fragments;
+ if (Var->isConstexpr())
+ Fragments.append("constexpr", DeclarationFragments::FragmentKind::Keyword)
+ .appendSpace();
+ QualType T =
+ Var->getTypeSourceInfo()
+ ? Var->getTypeSourceInfo()->getType()
+ : Var->getASTContext().getUnqualifiedObjCPointerType(Var->getType());
+
+ // Might be a member, so might be static.
+ if (Var->isStaticDataMember())
+ Fragments.append("static", DeclarationFragments::FragmentKind::Keyword)
+ .appendSpace();
+
+ DeclarationFragments After;
+ DeclarationFragments ArgumentFragment =
+ getFragmentsForType(T, Var->getASTContext(), After);
+ if (ArgumentFragment.begin()->Spelling.substr(0, 14).compare(
+ "type-parameter") == 0) {
+ std::string ProperArgName = getNameForTemplateArgument(
+ Var->getDescribedVarTemplate()->getTemplateParameters()->asArray(),
+ ArgumentFragment.begin()->Spelling);
+ ArgumentFragment.begin()->Spelling.swap(ProperArgName);
+ }
+ Fragments.append(std::move(ArgumentFragment))
+ .appendSpace()
+ .append(Var->getName(), DeclarationFragments::FragmentKind::Identifier)
+ .append(";", DeclarationFragments::FragmentKind::Text);
+ return Fragments;
+}
+
+DeclarationFragments
+DeclarationFragmentsBuilder::getFragmentsForParam(const ParmVarDecl *Param) {
+ DeclarationFragments Fragments, After;
+
+ auto *TSInfo = Param->getTypeSourceInfo();
+
+ QualType T = TSInfo ? TSInfo->getType()
+ : Param->getASTContext().getUnqualifiedObjCPointerType(
+ Param->getType());
+
+ FunctionTypeLoc BlockLoc;
+ FunctionProtoTypeLoc BlockProtoLoc;
+ findTypeLocForBlockDecl(TSInfo, BlockLoc, BlockProtoLoc);
+
+ DeclarationFragments TypeFragments;
+ if (BlockLoc)
+ TypeFragments.append(
+ getFragmentsForBlock(Param, BlockLoc, BlockProtoLoc, After));
+ else
+ TypeFragments.append(getFragmentsForType(T, Param->getASTContext(), After));
+
+ if (TypeFragments.begin()->Spelling.substr(0, 14).compare("type-parameter") ==
+ 0) {
+ std::string ProperArgName = getNameForTemplateArgument(
+ dyn_cast<FunctionDecl>(Param->getDeclContext())
+ ->getDescribedFunctionTemplate()
+ ->getTemplateParameters()
+ ->asArray(),
+ TypeFragments.begin()->Spelling);
+ TypeFragments.begin()->Spelling.swap(ProperArgName);
+ }
+
+ if (Param->isObjCMethodParameter()) {
+ Fragments.append("(", DeclarationFragments::FragmentKind::Text)
+ .append(std::move(TypeFragments))
+ .append(std::move(After))
+ .append(") ", DeclarationFragments::FragmentKind::Text)
+ .append(Param->getName(),
+ DeclarationFragments::FragmentKind::InternalParam);
+ } else {
+ Fragments.append(std::move(TypeFragments));
+ if (!T->isBlockPointerType())
+ Fragments.appendSpace();
+ Fragments
+ .append(Param->getName(),
+ DeclarationFragments::FragmentKind::InternalParam)
+ .append(std::move(After));
+ }
+ return Fragments;
+}
+
+DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForBlock(
+ const NamedDecl *BlockDecl, FunctionTypeLoc &Block,
+ FunctionProtoTypeLoc &BlockProto, DeclarationFragments &After) {
+ DeclarationFragments Fragments;
+
+ DeclarationFragments RetTyAfter;
+ auto ReturnValueFragment = getFragmentsForType(
+ Block.getTypePtr()->getReturnType(), BlockDecl->getASTContext(), After);
+
+ Fragments.append(std::move(ReturnValueFragment))
+ .append(std::move(RetTyAfter))
+ .appendSpace()
+ .append("(^", DeclarationFragments::FragmentKind::Text);
+
+ After.append(")", DeclarationFragments::FragmentKind::Text);
+ unsigned NumParams = Block.getNumParams();
+
+ if (!BlockProto || NumParams == 0) {
+ if (BlockProto && BlockProto.getTypePtr()->isVariadic())
+ After.append("(...)", DeclarationFragments::FragmentKind::Text);
+ else
+ After.append("()", DeclarationFragments::FragmentKind::Text);
+ } else {
+ After.append("(", DeclarationFragments::FragmentKind::Text);
+ for (unsigned I = 0; I != NumParams; ++I) {
+ if (I)
+ After.append(", ", DeclarationFragments::FragmentKind::Text);
+ After.append(getFragmentsForParam(Block.getParam(I)));
+ if (I == NumParams - 1 && BlockProto.getTypePtr()->isVariadic())
+ After.append(", ...", DeclarationFragments::FragmentKind::Text);
+ }
+ After.append(")", DeclarationFragments::FragmentKind::Text);
+ }
+
+ return Fragments;
+}
+
+DeclarationFragments
+DeclarationFragmentsBuilder::getFragmentsForFunction(const FunctionDecl *Func) {
+ DeclarationFragments Fragments;
+ // FIXME: Handle template specialization
+ switch (Func->getStorageClass()) {
+ case SC_None:
+ case SC_PrivateExtern:
+ break;
+ case SC_Extern:
+ Fragments.append("extern", DeclarationFragments::FragmentKind::Keyword)
+ .appendSpace();
+ break;
+ case SC_Static:
+ Fragments.append("static", DeclarationFragments::FragmentKind::Keyword)
+ .appendSpace();
+ break;
+ case SC_Auto:
+ case SC_Register:
+ llvm_unreachable("invalid for functions");
+ }
+ if (Func->isConsteval()) // if consteval, it is also constexpr
+ Fragments.append("consteval", DeclarationFragments::FragmentKind::Keyword)
+ .appendSpace();
+ else if (Func->isConstexpr())
+ Fragments.append("constexpr", DeclarationFragments::FragmentKind::Keyword)
+ .appendSpace();
+
+ // FIXME: Is `after` actually needed here?
+ DeclarationFragments After;
+ auto ReturnValueFragment =
+ getFragmentsForType(Func->getReturnType(), Func->getASTContext(), After);
+ if (ReturnValueFragment.begin()->Spelling.substr(0, 14).compare(
+ "type-parameter") == 0) {
+ std::string ProperArgName =
+ getNameForTemplateArgument(Func->getDescribedFunctionTemplate()
+ ->getTemplateParameters()
+ ->asArray(),
+ ReturnValueFragment.begin()->Spelling);
+ ReturnValueFragment.begin()->Spelling.swap(ProperArgName);
+ }
+
+ Fragments.append(std::move(ReturnValueFragment))
+ .appendSpace()
+ .append(Func->getName(), DeclarationFragments::FragmentKind::Identifier);
+
+ if (Func->getTemplateSpecializationInfo()) {
+ Fragments.append("<", DeclarationFragments::FragmentKind::Text);
+
+ for (unsigned i = 0, end = Func->getNumParams(); i != end; ++i) {
+ if (i)
+ Fragments.append(", ", DeclarationFragments::FragmentKind::Text);
+ Fragments.append(
+ getFragmentsForType(Func->getParamDecl(i)->getType(),
+ Func->getParamDecl(i)->getASTContext(), After));
+ }
+ Fragments.append(">", DeclarationFragments::FragmentKind::Text);
+ }
+ Fragments.append(std::move(After));
+
+ Fragments.append("(", DeclarationFragments::FragmentKind::Text);
+ unsigned NumParams = Func->getNumParams();
+ for (unsigned i = 0; i != NumParams; ++i) {
+ if (i)
+ Fragments.append(", ", DeclarationFragments::FragmentKind::Text);
+ Fragments.append(getFragmentsForParam(Func->getParamDecl(i)));
+ }
+
+ if (Func->isVariadic()) {
+ if (NumParams > 0)
+ Fragments.append(", ", DeclarationFragments::FragmentKind::Text);
+ Fragments.append("...", DeclarationFragments::FragmentKind::Text);
+ }
+ Fragments.append(")", DeclarationFragments::FragmentKind::Text);
+
+ Fragments.append(DeclarationFragments::getExceptionSpecificationString(
+ Func->getExceptionSpecType()));
+
+ return Fragments.append(";", DeclarationFragments::FragmentKind::Text);
+}
+
+DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForEnumConstant(
+ const EnumConstantDecl *EnumConstDecl) {
+ DeclarationFragments Fragments;
+ return Fragments.append(EnumConstDecl->getName(),
+ DeclarationFragments::FragmentKind::Identifier);
+}
+
+DeclarationFragments
+DeclarationFragmentsBuilder::getFragmentsForEnum(const EnumDecl *EnumDecl) {
+ if (const auto *TypedefNameDecl = EnumDecl->getTypedefNameForAnonDecl())
+ return getFragmentsForTypedef(TypedefNameDecl);
+
+ DeclarationFragments Fragments, After;
+ Fragments.append("enum", DeclarationFragments::FragmentKind::Keyword);
+
+ if (!EnumDecl->getName().empty())
+ Fragments.appendSpace().append(
+ EnumDecl->getName(), DeclarationFragments::FragmentKind::Identifier);
+
+ QualType IntegerType = EnumDecl->getIntegerType();
+ if (!IntegerType.isNull())
+ Fragments.append(": ", DeclarationFragments::FragmentKind::Text)
+ .append(
+ getFragmentsForType(IntegerType, EnumDecl->getASTContext(), After))
+ .append(std::move(After));
+
+ return Fragments.append(";", DeclarationFragments::FragmentKind::Text);
+}
+
+DeclarationFragments
+DeclarationFragmentsBuilder::getFragmentsForField(const FieldDecl *Field) {
+ DeclarationFragments After;
+ DeclarationFragments Fragments;
+ if (Field->isMutable())
+ Fragments.append("mutable", DeclarationFragments::FragmentKind::Keyword)
+ .appendSpace();
+ return Fragments
+ .append(
+ getFragmentsForType(Field->getType(), Field->getASTContext(), After))
+ .appendSpace()
+ .append(Field->getName(), DeclarationFragments::FragmentKind::Identifier)
+ .append(std::move(After))
+ .append(";", DeclarationFragments::FragmentKind::Text);
+}
+
+DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForRecordDecl(
+ const RecordDecl *Record) {
+ if (const auto *TypedefNameDecl = Record->getTypedefNameForAnonDecl())
+ return getFragmentsForTypedef(TypedefNameDecl);
+
+ DeclarationFragments Fragments;
+ if (Record->isUnion())
+ Fragments.append("union", DeclarationFragments::FragmentKind::Keyword);
+ else
+ Fragments.append("struct", DeclarationFragments::FragmentKind::Keyword);
+
+ if (!Record->getName().empty())
+ Fragments.appendSpace().append(
+ Record->getName(), DeclarationFragments::FragmentKind::Identifier);
+
+ return Fragments.append(";", DeclarationFragments::FragmentKind::Text);
+}
+
+DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForCXXClass(
+ const CXXRecordDecl *Record) {
+ if (const auto *TypedefNameDecl = Record->getTypedefNameForAnonDecl())
+ return getFragmentsForTypedef(TypedefNameDecl);
+
+ DeclarationFragments Fragments;
+ Fragments.append(DeclarationFragments::getStructureTypeFragment(Record));
+
+ if (!Record->getName().empty())
+ Fragments.appendSpace().append(
+ Record->getName(), DeclarationFragments::FragmentKind::Identifier);
+
+ return Fragments.append(";", DeclarationFragments::FragmentKind::Text);
+}
+
+DeclarationFragments
+DeclarationFragmentsBuilder::getFragmentsForSpecialCXXMethod(
+ const CXXMethodDecl *Method) {
+ DeclarationFragments Fragments;
+ std::string Name;
+ if (const auto *Constructor = dyn_cast<CXXConstructorDecl>(Method)) {
+ Name = Method->getNameAsString();
+ if (Constructor->isExplicit())
+ Fragments.append("explicit", DeclarationFragments::FragmentKind::Keyword)
+ .appendSpace();
+ } else if (isa<CXXDestructorDecl>(Method))
+ Name = Method->getNameAsString();
+
+ DeclarationFragments After;
+ Fragments.append(Name, DeclarationFragments::FragmentKind::Identifier)
+ .append(std::move(After));
+ Fragments.append("(", DeclarationFragments::FragmentKind::Text);
+ for (unsigned i = 0, end = Method->getNumParams(); i != end; ++i) {
+ if (i)
+ Fragments.append(", ", DeclarationFragments::FragmentKind::Text);
+ Fragments.append(getFragmentsForParam(Method->getParamDecl(i)));
+ }
+ Fragments.append(")", DeclarationFragments::FragmentKind::Text);
+
+ Fragments.append(DeclarationFragments::getExceptionSpecificationString(
+ Method->getExceptionSpecType()));
+
+ return Fragments.append(";", DeclarationFragments::FragmentKind::Text);
+}
+
+DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForCXXMethod(
+ const CXXMethodDecl *Method) {
+ DeclarationFragments Fragments;
+ StringRef Name = Method->getName();
+ if (Method->isStatic())
+ Fragments.append("static", DeclarationFragments::FragmentKind::Keyword)
+ .appendSpace();
+ if (Method->isConstexpr())
+ Fragments.append("constexpr", DeclarationFragments::FragmentKind::Keyword)
+ .appendSpace();
+ if (Method->isVolatile())
+ Fragments.append("volatile", DeclarationFragments::FragmentKind::Keyword)
+ .appendSpace();
+
+ // Build return type
+ DeclarationFragments After;
+ Fragments
+ .append(getFragmentsForType(Method->getReturnType(),
+ Method->getASTContext(), After))
+ .appendSpace()
+ .append(Name, DeclarationFragments::FragmentKind::Identifier)
+ .append(std::move(After));
+ Fragments.append("(", DeclarationFragments::FragmentKind::Text);
+ for (unsigned i = 0, end = Method->getNumParams(); i != end; ++i) {
+ if (i)
+ Fragments.append(", ", DeclarationFragments::FragmentKind::Text);
+ Fragments.append(getFragmentsForParam(Method->getParamDecl(i)));
+ }
+ Fragments.append(")", DeclarationFragments::FragmentKind::Text);
+
+ if (Method->isConst())
+ Fragments.appendSpace().append("const",
+ DeclarationFragments::FragmentKind::Keyword);
+
+ Fragments.append(DeclarationFragments::getExceptionSpecificationString(
+ Method->getExceptionSpecType()));
+
+ return Fragments.append(";", DeclarationFragments::FragmentKind::Text);
+}
+
+DeclarationFragments
+DeclarationFragmentsBuilder::getFragmentsForConversionFunction(
+ const CXXConversionDecl *ConversionFunction) {
+ DeclarationFragments Fragments;
+
+ if (ConversionFunction->isExplicit())
+ Fragments.append("explicit", DeclarationFragments::FragmentKind::Keyword)
+ .appendSpace();
+
+ Fragments.append("operator", DeclarationFragments::FragmentKind::Keyword)
+ .appendSpace();
+
+ Fragments
+ .append(ConversionFunction->getConversionType().getAsString(),
+ DeclarationFragments::FragmentKind::TypeIdentifier)
+ .append("(", DeclarationFragments::FragmentKind::Text);
+ for (unsigned i = 0, end = ConversionFunction->getNumParams(); i != end;
+ ++i) {
+ if (i)
+ Fragments.append(", ", DeclarationFragments::FragmentKind::Text);
+ Fragments.append(getFragmentsForParam(ConversionFunction->getParamDecl(i)));
+ }
+ Fragments.append(")", DeclarationFragments::FragmentKind::Text);
+
+ if (ConversionFunction->isConst())
+ Fragments.appendSpace().append("const",
+ DeclarationFragments::FragmentKind::Keyword);
+
+ return Fragments.append(";", DeclarationFragments::FragmentKind::Text);
+}
+
+DeclarationFragments
+DeclarationFragmentsBuilder::getFragmentsForOverloadedOperator(
+ const CXXMethodDecl *Method) {
+ DeclarationFragments Fragments;
+
+ // Build return type
+ DeclarationFragments After;
+ Fragments
+ .append(getFragmentsForType(Method->getReturnType(),
+ Method->getASTContext(), After))
+ .appendSpace()
+ .append(Method->getNameAsString(),
+ DeclarationFragments::FragmentKind::Identifier)
+ .append(std::move(After));
+ Fragments.append("(", DeclarationFragments::FragmentKind::Text);
+ for (unsigned i = 0, end = Method->getNumParams(); i != end; ++i) {
+ if (i)
+ Fragments.append(", ", DeclarationFragments::FragmentKind::Text);
+ Fragments.append(getFragmentsForParam(Method->getParamDecl(i)));
+ }
+ Fragments.append(")", DeclarationFragments::FragmentKind::Text);
+
+ if (Method->isConst())
+ Fragments.appendSpace().append("const",
+ DeclarationFragments::FragmentKind::Keyword);
+
+ Fragments.append(DeclarationFragments::getExceptionSpecificationString(
+ Method->getExceptionSpecType()));
+
+ return Fragments.append(";", DeclarationFragments::FragmentKind::Text);
+}
+
+// Get fragments for template parameters, e.g. T in tempalte<typename T> ...
+DeclarationFragments
+DeclarationFragmentsBuilder::getFragmentsForTemplateParameters(
+ ArrayRef<NamedDecl *> ParameterArray) {
+ DeclarationFragments Fragments;
+ for (unsigned i = 0, end = ParameterArray.size(); i != end; ++i) {
+ if (i)
+ Fragments.append(",", DeclarationFragments::FragmentKind::Text)
+ .appendSpace();
+
+ const auto *TemplateParam =
+ dyn_cast<TemplateTypeParmDecl>(ParameterArray[i]);
+ if (!TemplateParam)
+ continue;
+ if (TemplateParam->hasTypeConstraint())
+ Fragments.append(TemplateParam->getTypeConstraint()
+ ->getNamedConcept()
+ ->getName()
+ .str(),
+ DeclarationFragments::FragmentKind::TypeIdentifier);
+ else if (TemplateParam->wasDeclaredWithTypename())
+ Fragments.append("typename", DeclarationFragments::FragmentKind::Keyword);
+ else
+ Fragments.append("class", DeclarationFragments::FragmentKind::Keyword);
+
+ if (TemplateParam->isParameterPack())
+ Fragments.append("...", DeclarationFragments::FragmentKind::Text);
+
+ Fragments.appendSpace().append(
+ TemplateParam->getName(),
+ DeclarationFragments::FragmentKind::GenericParameter);
+ }
+ return Fragments;
+}
+
+// Find the name of a template argument from the template's parameters.
+std::string DeclarationFragmentsBuilder::getNameForTemplateArgument(
+ const ArrayRef<NamedDecl *> TemplateParameters, std::string TypeParameter) {
+ // The arg is a generic parameter from a partial spec, e.g.
+ // T in template<typename T> Foo<T, int>.
+ //
+ // Those names appear as "type-parameter-<index>-<depth>", so we must find its
+ // name from the template's parameter list.
+ for (unsigned i = 0; i < TemplateParameters.size(); ++i) {
+ const auto *Parameter =
+ dyn_cast<TemplateTypeParmDecl>(TemplateParameters[i]);
+ if (TypeParameter.compare("type-parameter-" +
+ std::to_string(Parameter->getDepth()) + "-" +
+ std::to_string(Parameter->getIndex())) == 0)
+ return std::string(TemplateParameters[i]->getName());
+ }
+ llvm_unreachable("Could not find the name of a template argument.");
+}
+
+// Get fragments for template arguments, e.g. int in template<typename T>
+// Foo<int>;
+//
+// Note: TemplateParameters is only necessary if the Decl is a
+// PartialSpecialization, where we need the parameters to deduce the name of the
+// generic arguments.
+DeclarationFragments
+DeclarationFragmentsBuilder::getFragmentsForTemplateArguments(
+ const ArrayRef<TemplateArgument> TemplateArguments, ASTContext &Context,
+ const std::optional<ArrayRef<NamedDecl *>> TemplateParameters) {
+ DeclarationFragments Fragments;
+ for (unsigned i = 0, end = TemplateArguments.size(); i != end; ++i) {
+ if (i)
+ Fragments.append(",", DeclarationFragments::FragmentKind::Text)
+ .appendSpace();
+
+ std::string Type = TemplateArguments[i].getAsType().getAsString();
+ DeclarationFragments After;
+ DeclarationFragments ArgumentFragment =
+ getFragmentsForType(TemplateArguments[i].getAsType(), Context, After);
+
+ if (ArgumentFragment.begin()->Spelling.substr(0, 14).compare(
+ "type-parameter") == 0) {
+ std::string ProperArgName = getNameForTemplateArgument(
+ TemplateParameters.value(), ArgumentFragment.begin()->Spelling);
+ ArgumentFragment.begin()->Spelling.swap(ProperArgName);
+ }
+ Fragments.append(std::move(ArgumentFragment));
+
+ if (TemplateArguments[i].isPackExpansion())
+ Fragments.append("...", DeclarationFragments::FragmentKind::Text);
+ }
+ return Fragments;
+}
+
+DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForConcept(
+ const ConceptDecl *Concept) {
+ DeclarationFragments Fragments;
+ return Fragments
+ .append("template", DeclarationFragments::FragmentKind::Keyword)
+ .append("<", DeclarationFragments::FragmentKind::Text)
+ .append(getFragmentsForTemplateParameters(
+ Concept->getTemplateParameters()->asArray()))
+ .append("> ", DeclarationFragments::FragmentKind::Text)
+ .append("concept", DeclarationFragments::FragmentKind::Keyword)
+ .appendSpace()
+ .append(Concept->getName().str(),
+ DeclarationFragments::FragmentKind::Identifier)
+ .append(";", DeclarationFragments::FragmentKind::Text);
+}
+
+DeclarationFragments
+DeclarationFragmentsBuilder::getFragmentsForRedeclarableTemplate(
+ const RedeclarableTemplateDecl *RedeclarableTemplate) {
+ DeclarationFragments Fragments;
+ Fragments.append("template", DeclarationFragments::FragmentKind::Keyword)
+ .append("<", DeclarationFragments::FragmentKind::Text)
+ .append(getFragmentsForTemplateParameters(
+ RedeclarableTemplate->getTemplateParameters()->asArray()))
+ .append(">", DeclarationFragments::FragmentKind::Text)
+ .appendSpace();
+
+ if (isa<TypeAliasTemplateDecl>(RedeclarableTemplate))
+ Fragments.appendSpace()
+ .append("using", DeclarationFragments::FragmentKind::Keyword)
+ .appendSpace()
+ .append(RedeclarableTemplate->getName(),
+ DeclarationFragments::FragmentKind::Identifier);
+ // the templated records will be resposbible for injecting their templates
+ return Fragments.appendSpace();
+}
+
+DeclarationFragments
+DeclarationFragmentsBuilder::getFragmentsForClassTemplateSpecialization(
+ const ClassTemplateSpecializationDecl *Decl) {
+ DeclarationFragments Fragments;
+ return Fragments
+ .append("template", DeclarationFragments::FragmentKind::Keyword)
+ .append("<", DeclarationFragments::FragmentKind::Text)
+ .append(">", DeclarationFragments::FragmentKind::Text)
+ .appendSpace()
+ .append(DeclarationFragmentsBuilder::getFragmentsForCXXClass(
+ cast<CXXRecordDecl>(Decl)))
+ .pop_back() // there is an extra semicolon now
+ .append("<", DeclarationFragments::FragmentKind::Text)
+ .append(
+ getFragmentsForTemplateArguments(Decl->getTemplateArgs().asArray(),
+ Decl->getASTContext(), std::nullopt))
+ .append(">", DeclarationFragments::FragmentKind::Text)
+ .append(";", DeclarationFragments::FragmentKind::Text);
+}
+
+DeclarationFragments
+DeclarationFragmentsBuilder::getFragmentsForClassTemplatePartialSpecialization(
+ const ClassTemplatePartialSpecializationDecl *Decl) {
+ DeclarationFragments Fragments;
+ return Fragments
+ .append("template", DeclarationFragments::FragmentKind::Keyword)
+ .append("<", DeclarationFragments::FragmentKind::Text)
+ .append(getFragmentsForTemplateParameters(
+ Decl->getTemplateParameters()->asArray()))
+ .append(">", DeclarationFragments::FragmentKind::Text)
+ .appendSpace()
+ .append(DeclarationFragmentsBuilder::getFragmentsForCXXClass(
+ cast<CXXRecordDecl>(Decl)))
+ .pop_back() // there is an extra semicolon now
+ .append("<", DeclarationFragments::FragmentKind::Text)
+ .append(getFragmentsForTemplateArguments(
+ Decl->getTemplateArgs().asArray(), Decl->getASTContext(),
+ Decl->getTemplateParameters()->asArray()))
+ .append(">", DeclarationFragments::FragmentKind::Text)
+ .append(";", DeclarationFragments::FragmentKind::Text);
+}
+
+DeclarationFragments
+DeclarationFragmentsBuilder::getFragmentsForVarTemplateSpecialization(
+ const VarTemplateSpecializationDecl *Decl) {
+ DeclarationFragments Fragments;
+ return Fragments
+ .append("template", DeclarationFragments::FragmentKind::Keyword)
+ .append("<", DeclarationFragments::FragmentKind::Text)
+ .append(">", DeclarationFragments::FragmentKind::Text)
+ .appendSpace()
+ .append(DeclarationFragmentsBuilder::getFragmentsForVarTemplate(Decl))
+ .pop_back() // there is an extra semicolon now
+ .append("<", DeclarationFragments::FragmentKind::Text)
+ .append(
+ getFragmentsForTemplateArguments(Decl->getTemplateArgs().asArray(),
+ Decl->getASTContext(), std::nullopt))
+ .append(">", DeclarationFragments::FragmentKind::Text)
+ .append(";", DeclarationFragments::FragmentKind::Text);
+}
+
+DeclarationFragments
+DeclarationFragmentsBuilder::getFragmentsForVarTemplatePartialSpecialization(
+ const VarTemplatePartialSpecializationDecl *Decl) {
+ DeclarationFragments Fragments;
+ return Fragments
+ .append("template", DeclarationFragments::FragmentKind::Keyword)
+ .append("<", DeclarationFragments::FragmentKind::Text)
+ // Partial specs may have new params.
+ .append(getFragmentsForTemplateParameters(
+ Decl->getTemplateParameters()->asArray()))
+ .append(">", DeclarationFragments::FragmentKind::Text)
+ .appendSpace()
+ .append(DeclarationFragmentsBuilder::getFragmentsForVarTemplate(Decl))
+ .pop_back() // there is an extra semicolon now
+ .append("<", DeclarationFragments::FragmentKind::Text)
+ .append(getFragmentsForTemplateArguments(
+ Decl->getTemplateArgs().asArray(), Decl->getASTContext(),
+ Decl->getTemplateParameters()->asArray()))
+ .append(">", DeclarationFragments::FragmentKind::Text)
+ .append(";", DeclarationFragments::FragmentKind::Text);
+}
+
+DeclarationFragments
+DeclarationFragmentsBuilder::getFragmentsForFunctionTemplate(
+ const FunctionTemplateDecl *Decl) {
+ DeclarationFragments Fragments;
+ return Fragments
+ .append("template", DeclarationFragments::FragmentKind::Keyword)
+ .append("<", DeclarationFragments::FragmentKind::Text)
+ // Partial specs may have new params.
+ .append(getFragmentsForTemplateParameters(
+ Decl->getTemplateParameters()->asArray()))
+ .append(">", DeclarationFragments::FragmentKind::Text)
+ .appendSpace()
+ .append(DeclarationFragmentsBuilder::getFragmentsForFunction(
+ Decl->getAsFunction()));
+}
+
+DeclarationFragments
+DeclarationFragmentsBuilder::getFragmentsForFunctionTemplateSpecialization(
+ const FunctionDecl *Decl) {
+ DeclarationFragments Fragments;
+ return Fragments
+ .append("template", DeclarationFragments::FragmentKind::Keyword)
+ .append("<>", DeclarationFragments::FragmentKind::Text)
+ .appendSpace()
+ .append(DeclarationFragmentsBuilder::getFragmentsForFunction(Decl));
+}
+
+DeclarationFragments
+DeclarationFragmentsBuilder::getFragmentsForMacro(StringRef Name,
+ const MacroDirective *MD) {
+ DeclarationFragments Fragments;
+ Fragments.append("#define", DeclarationFragments::FragmentKind::Keyword)
+ .appendSpace();
+ Fragments.append(Name, DeclarationFragments::FragmentKind::Identifier);
+
+ auto *MI = MD->getMacroInfo();
+
+ if (MI->isFunctionLike()) {
+ Fragments.append("(", DeclarationFragments::FragmentKind::Text);
+ unsigned numParameters = MI->getNumParams();
+ if (MI->isC99Varargs())
+ --numParameters;
+ for (unsigned i = 0; i < numParameters; ++i) {
+ if (i)
+ Fragments.append(", ", DeclarationFragments::FragmentKind::Text);
+ Fragments.append(MI->params()[i]->getName(),
+ DeclarationFragments::FragmentKind::InternalParam);
+ }
+ if (MI->isVariadic()) {
+ if (numParameters && MI->isC99Varargs())
+ Fragments.append(", ", DeclarationFragments::FragmentKind::Text);
+ Fragments.append("...", DeclarationFragments::FragmentKind::Text);
+ }
+ Fragments.append(")", DeclarationFragments::FragmentKind::Text);
+ }
+ return Fragments;
+}
+
+DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForObjCCategory(
+ const ObjCCategoryDecl *Category) {
+ DeclarationFragments Fragments;
+
+ auto *Interface = Category->getClassInterface();
+ SmallString<128> InterfaceUSR;
+ index::generateUSRForDecl(Interface, InterfaceUSR);
+
+ Fragments.append("@interface", DeclarationFragments::FragmentKind::Keyword)
+ .appendSpace()
+ .append(Category->getClassInterface()->getName(),
+ DeclarationFragments::FragmentKind::TypeIdentifier, InterfaceUSR,
+ Interface)
+ .append(" (", DeclarationFragments::FragmentKind::Text)
+ .append(Category->getName(),
+ DeclarationFragments::FragmentKind::Identifier)
+ .append(")", DeclarationFragments::FragmentKind::Text);
+
+ return Fragments;
+}
+
+DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForObjCInterface(
+ const ObjCInterfaceDecl *Interface) {
+ DeclarationFragments Fragments;
+ // Build the base of the Objective-C interface declaration.
+ Fragments.append("@interface", DeclarationFragments::FragmentKind::Keyword)
+ .appendSpace()
+ .append(Interface->getName(),
+ DeclarationFragments::FragmentKind::Identifier);
+
+ // Build the inheritance part of the declaration.
+ if (const ObjCInterfaceDecl *SuperClass = Interface->getSuperClass()) {
+ SmallString<128> SuperUSR;
+ index::generateUSRForDecl(SuperClass, SuperUSR);
+ Fragments.append(" : ", DeclarationFragments::FragmentKind::Text)
+ .append(SuperClass->getName(),
+ DeclarationFragments::FragmentKind::TypeIdentifier, SuperUSR,
+ SuperClass);
+ }
+
+ return Fragments;
+}
+
+DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForObjCMethod(
+ const ObjCMethodDecl *Method) {
+ DeclarationFragments Fragments, After;
+ // Build the instance/class method indicator.
+ if (Method->isClassMethod())
+ Fragments.append("+ ", DeclarationFragments::FragmentKind::Text);
+ else if (Method->isInstanceMethod())
+ Fragments.append("- ", DeclarationFragments::FragmentKind::Text);
+
+ // Build the return type.
+ Fragments.append("(", DeclarationFragments::FragmentKind::Text)
+ .append(getFragmentsForType(Method->getReturnType(),
+ Method->getASTContext(), After))
+ .append(std::move(After))
+ .append(")", DeclarationFragments::FragmentKind::Text);
+
+ // Build the selector part.
+ Selector Selector = Method->getSelector();
+ if (Selector.getNumArgs() == 0)
+ // For Objective-C methods that don't take arguments, the first (and only)
+ // slot of the selector is the method name.
+ Fragments.appendSpace().append(
+ Selector.getNameForSlot(0),
+ DeclarationFragments::FragmentKind::Identifier);
+
+ // For Objective-C methods that take arguments, build the selector slots.
+ for (unsigned i = 0, end = Method->param_size(); i != end; ++i) {
+ // Objective-C method selector parts are considered as identifiers instead
+ // of "external parameters" as in Swift. This is because Objective-C method
+ // symbols are referenced with the entire selector, instead of just the
+ // method name in Swift.
+ SmallString<32> ParamID(Selector.getNameForSlot(i));
+ ParamID.append(":");
+ Fragments.appendSpace().append(
+ ParamID, DeclarationFragments::FragmentKind::Identifier);
+
+ // Build the internal parameter.
+ const ParmVarDecl *Param = Method->getParamDecl(i);
+ Fragments.append(getFragmentsForParam(Param));
+ }
+
+ return Fragments.append(";", DeclarationFragments::FragmentKind::Text);
+}
+
+DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForObjCProperty(
+ const ObjCPropertyDecl *Property) {
+ DeclarationFragments Fragments, After;
+
+ // Build the Objective-C property keyword.
+ Fragments.append("@property", DeclarationFragments::FragmentKind::Keyword);
+
+ const auto Attributes = Property->getPropertyAttributesAsWritten();
+ // Build the attributes if there is any associated with the property.
+ if (Attributes != ObjCPropertyAttribute::kind_noattr) {
+ // No leading comma for the first attribute.
+ bool First = true;
+ Fragments.append(" (", DeclarationFragments::FragmentKind::Text);
+ // Helper function to render the attribute.
+ auto RenderAttribute =
+ [&](ObjCPropertyAttribute::Kind Kind, StringRef Spelling,
+ StringRef Arg = "",
+ DeclarationFragments::FragmentKind ArgKind =
+ DeclarationFragments::FragmentKind::Identifier) {
+ // Check if the `Kind` attribute is set for this property.
+ if ((Attributes & Kind) && !Spelling.empty()) {
+ // Add a leading comma if this is not the first attribute rendered.
+ if (!First)
+ Fragments.append(", ", DeclarationFragments::FragmentKind::Text);
+ // Render the spelling of this attribute `Kind` as a keyword.
+ Fragments.append(Spelling,
+ DeclarationFragments::FragmentKind::Keyword);
+ // If this attribute takes in arguments (e.g. `getter=getterName`),
+ // render the arguments.
+ if (!Arg.empty())
+ Fragments.append("=", DeclarationFragments::FragmentKind::Text)
+ .append(Arg, ArgKind);
+ First = false;
+ }
+ };
+
+ // Go through all possible Objective-C property attributes and render set
+ // ones.
+ RenderAttribute(ObjCPropertyAttribute::kind_class, "class");
+ RenderAttribute(ObjCPropertyAttribute::kind_direct, "direct");
+ RenderAttribute(ObjCPropertyAttribute::kind_nonatomic, "nonatomic");
+ RenderAttribute(ObjCPropertyAttribute::kind_atomic, "atomic");
+ RenderAttribute(ObjCPropertyAttribute::kind_assign, "assign");
+ RenderAttribute(ObjCPropertyAttribute::kind_retain, "retain");
+ RenderAttribute(ObjCPropertyAttribute::kind_strong, "strong");
+ RenderAttribute(ObjCPropertyAttribute::kind_copy, "copy");
+ RenderAttribute(ObjCPropertyAttribute::kind_weak, "weak");
+ RenderAttribute(ObjCPropertyAttribute::kind_unsafe_unretained,
+ "unsafe_unretained");
+ RenderAttribute(ObjCPropertyAttribute::kind_readwrite, "readwrite");
+ RenderAttribute(ObjCPropertyAttribute::kind_readonly, "readonly");
+ RenderAttribute(ObjCPropertyAttribute::kind_getter, "getter",
+ Property->getGetterName().getAsString());
+ RenderAttribute(ObjCPropertyAttribute::kind_setter, "setter",
+ Property->getSetterName().getAsString());
+
+ // Render nullability attributes.
+ if (Attributes & ObjCPropertyAttribute::kind_nullability) {
+ QualType Type = Property->getType();
+ if (const auto Nullability =
+ AttributedType::stripOuterNullability(Type)) {
+ if (!First)
+ Fragments.append(", ", DeclarationFragments::FragmentKind::Text);
+ if (*Nullability == NullabilityKind::Unspecified &&
+ (Attributes & ObjCPropertyAttribute::kind_null_resettable))
+ Fragments.append("null_resettable",
+ DeclarationFragments::FragmentKind::Keyword);
+ else
+ Fragments.append(
+ getNullabilitySpelling(*Nullability, /*isContextSensitive=*/true),
+ DeclarationFragments::FragmentKind::Keyword);
+ First = false;
+ }
+ }
+
+ Fragments.append(")", DeclarationFragments::FragmentKind::Text);
+ }
+
+ Fragments.appendSpace();
+
+ FunctionTypeLoc BlockLoc;
+ FunctionProtoTypeLoc BlockProtoLoc;
+ findTypeLocForBlockDecl(Property->getTypeSourceInfo(), BlockLoc,
+ BlockProtoLoc);
+
+ auto PropType = Property->getType();
+ if (!BlockLoc)
+ Fragments
+ .append(getFragmentsForType(PropType, Property->getASTContext(), After))
+ .appendSpace();
+ else
+ Fragments.append(
+ getFragmentsForBlock(Property, BlockLoc, BlockProtoLoc, After));
+
+ return Fragments
+ .append(Property->getName(),
+ DeclarationFragments::FragmentKind::Identifier)
+ .append(std::move(After))
+ .append(";", DeclarationFragments::FragmentKind::Text);
+}
+
+DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForObjCProtocol(
+ const ObjCProtocolDecl *Protocol) {
+ DeclarationFragments Fragments;
+ // Build basic protocol declaration.
+ Fragments.append("@protocol", DeclarationFragments::FragmentKind::Keyword)
+ .appendSpace()
+ .append(Protocol->getName(),
+ DeclarationFragments::FragmentKind::Identifier);
+
+ // If this protocol conforms to other protocols, build the conformance list.
+ if (!Protocol->protocols().empty()) {
+ Fragments.append(" <", DeclarationFragments::FragmentKind::Text);
+ for (ObjCProtocolDecl::protocol_iterator It = Protocol->protocol_begin();
+ It != Protocol->protocol_end(); It++) {
+ // Add a leading comma if this is not the first protocol rendered.
+ if (It != Protocol->protocol_begin())
+ Fragments.append(", ", DeclarationFragments::FragmentKind::Text);
+
+ SmallString<128> USR;
+ index::generateUSRForDecl(*It, USR);
+ Fragments.append((*It)->getName(),
+ DeclarationFragments::FragmentKind::TypeIdentifier, USR,
+ *It);
+ }
+ Fragments.append(">", DeclarationFragments::FragmentKind::Text);
+ }
+
+ return Fragments;
+}
+
+DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForTypedef(
+ const TypedefNameDecl *Decl) {
+ DeclarationFragments Fragments, After;
+ Fragments.append("typedef", DeclarationFragments::FragmentKind::Keyword)
+ .appendSpace()
+ .append(getFragmentsForType(Decl->getUnderlyingType(),
+ Decl->getASTContext(), After))
+ .append(std::move(After))
+ .appendSpace()
+ .append(Decl->getName(), DeclarationFragments::FragmentKind::Identifier);
+
+ return Fragments.append(";", DeclarationFragments::FragmentKind::Text);
+}
+
+// Instantiate template for FunctionDecl.
+template FunctionSignature
+DeclarationFragmentsBuilder::getFunctionSignature(const FunctionDecl *);
+
+// Instantiate template for ObjCMethodDecl.
+template FunctionSignature
+DeclarationFragmentsBuilder::getFunctionSignature(const ObjCMethodDecl *);
+
+// Subheading of a symbol defaults to its name.
+DeclarationFragments
+DeclarationFragmentsBuilder::getSubHeading(const NamedDecl *Decl) {
+ DeclarationFragments Fragments;
+ if (isa<CXXConstructorDecl>(Decl) || isa<CXXDestructorDecl>(Decl))
+ Fragments.append(cast<CXXRecordDecl>(Decl->getDeclContext())->getName(),
+ DeclarationFragments::FragmentKind::Identifier);
+ else if (isa<CXXConversionDecl>(Decl)) {
+ Fragments.append(
+ cast<CXXConversionDecl>(Decl)->getConversionType().getAsString(),
+ DeclarationFragments::FragmentKind::Identifier);
+ } else if (isa<CXXMethodDecl>(Decl) &&
+ cast<CXXMethodDecl>(Decl)->isOverloadedOperator()) {
+ Fragments.append(Decl->getNameAsString(),
+ DeclarationFragments::FragmentKind::Identifier);
+ } else if (!Decl->getName().empty())
+ Fragments.append(Decl->getName(),
+ DeclarationFragments::FragmentKind::Identifier);
+ return Fragments;
+}
+
+// Subheading of an Objective-C method is a `+` or `-` sign indicating whether
+// it's a class method or an instance method, followed by the selector name.
+DeclarationFragments
+DeclarationFragmentsBuilder::getSubHeading(const ObjCMethodDecl *Method) {
+ DeclarationFragments Fragments;
+ if (Method->isClassMethod())
+ Fragments.append("+ ", DeclarationFragments::FragmentKind::Text);
+ else if (Method->isInstanceMethod())
+ Fragments.append("- ", DeclarationFragments::FragmentKind::Text);
+
+ return Fragments.append(Method->getNameAsString(),
+ DeclarationFragments::FragmentKind::Identifier);
+}
+
+// Subheading of a symbol defaults to its name.
+DeclarationFragments
+DeclarationFragmentsBuilder::getSubHeadingForMacro(StringRef Name) {
+ DeclarationFragments Fragments;
+ Fragments.append(Name, DeclarationFragments::FragmentKind::Identifier);
+ return Fragments;
+}
diff --git a/contrib/llvm-project/clang/lib/ExtractAPI/ExtractAPIConsumer.cpp b/contrib/llvm-project/clang/lib/ExtractAPI/ExtractAPIConsumer.cpp
new file mode 100644
index 000000000000..fd62d841197d
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/ExtractAPI/ExtractAPIConsumer.cpp
@@ -0,0 +1,584 @@
+//===- ExtractAPI/ExtractAPIConsumer.cpp ------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements the ExtractAPIAction, and ASTConsumer to collect API
+/// information.
+///
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ASTConcept.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/Basic/DiagnosticFrontend.h"
+#include "clang/Basic/FileEntry.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/ExtractAPI/API.h"
+#include "clang/ExtractAPI/APIIgnoresList.h"
+#include "clang/ExtractAPI/ExtractAPIVisitor.h"
+#include "clang/ExtractAPI/FrontendActions.h"
+#include "clang/ExtractAPI/Serialization/SymbolGraphSerializer.h"
+#include "clang/Frontend/ASTConsumers.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/FrontendOptions.h"
+#include "clang/Frontend/MultiplexConsumer.h"
+#include "clang/Lex/MacroInfo.h"
+#include "clang/Lex/PPCallbacks.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/PreprocessorOptions.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/Regex.h"
+#include "llvm/Support/raw_ostream.h"
+#include <memory>
+#include <optional>
+#include <utility>
+
+using namespace clang;
+using namespace extractapi;
+
+namespace {
+
+std::optional<std::string> getRelativeIncludeName(const CompilerInstance &CI,
+ StringRef File,
+ bool *IsQuoted = nullptr) {
+ assert(CI.hasFileManager() &&
+ "CompilerInstance does not have a FileNamager!");
+
+ using namespace llvm::sys;
+ // Matches framework include patterns
+ const llvm::Regex Rule("/(.+)\\.framework/(.+)?Headers/(.+)");
+
+ const auto &FS = CI.getVirtualFileSystem();
+
+ SmallString<128> FilePath(File.begin(), File.end());
+ FS.makeAbsolute(FilePath);
+ path::remove_dots(FilePath, true);
+ FilePath = path::convert_to_slash(FilePath);
+ File = FilePath;
+
+ // Checks whether `Dir` is a strict path prefix of `File`. If so returns
+ // the prefix length. Otherwise return 0.
+ auto CheckDir = [&](llvm::StringRef Dir) -> unsigned {
+ llvm::SmallString<32> DirPath(Dir.begin(), Dir.end());
+ FS.makeAbsolute(DirPath);
+ path::remove_dots(DirPath, true);
+ Dir = DirPath;
+ for (auto NI = path::begin(File), NE = path::end(File),
+ DI = path::begin(Dir), DE = path::end(Dir);
+ /*termination condition in loop*/; ++NI, ++DI) {
+ // '.' components in File are ignored.
+ while (NI != NE && *NI == ".")
+ ++NI;
+ if (NI == NE)
+ break;
+
+ // '.' components in Dir are ignored.
+ while (DI != DE && *DI == ".")
+ ++DI;
+
+ // Dir is a prefix of File, up to '.' components and choice of path
+ // separators.
+ if (DI == DE)
+ return NI - path::begin(File);
+
+ // Consider all path separators equal.
+ if (NI->size() == 1 && DI->size() == 1 &&
+ path::is_separator(NI->front()) && path::is_separator(DI->front()))
+ continue;
+
+ // Special case Apple .sdk folders since the search path is typically a
+ // symlink like `iPhoneSimulator14.5.sdk` while the file is instead
+ // located in `iPhoneSimulator.sdk` (the real folder).
+ if (NI->ends_with(".sdk") && DI->ends_with(".sdk")) {
+ StringRef NBasename = path::stem(*NI);
+ StringRef DBasename = path::stem(*DI);
+ if (DBasename.starts_with(NBasename))
+ continue;
+ }
+
+ if (*NI != *DI)
+ break;
+ }
+ return 0;
+ };
+
+ unsigned PrefixLength = 0;
+
+ // Go through the search paths and find the first one that is a prefix of
+ // the header.
+ for (const auto &Entry : CI.getHeaderSearchOpts().UserEntries) {
+ // Note whether the match is found in a quoted entry.
+ if (IsQuoted)
+ *IsQuoted = Entry.Group == frontend::Quoted;
+
+ if (auto EntryFile = CI.getFileManager().getOptionalFileRef(Entry.Path)) {
+ if (auto HMap = HeaderMap::Create(*EntryFile, CI.getFileManager())) {
+ // If this is a headermap entry, try to reverse lookup the full path
+ // for a spelled name before mapping.
+ StringRef SpelledFilename = HMap->reverseLookupFilename(File);
+ if (!SpelledFilename.empty())
+ return SpelledFilename.str();
+
+ // No matching mapping in this headermap, try next search entry.
+ continue;
+ }
+ }
+
+ // Entry is a directory search entry, try to check if it's a prefix of File.
+ PrefixLength = CheckDir(Entry.Path);
+ if (PrefixLength > 0) {
+ // The header is found in a framework path, construct the framework-style
+ // include name `<Framework/Header.h>`
+ if (Entry.IsFramework) {
+ SmallVector<StringRef, 4> Matches;
+ Rule.match(File, &Matches);
+ // Returned matches are always in stable order.
+ if (Matches.size() != 4)
+ return std::nullopt;
+
+ return path::convert_to_slash(
+ (Matches[1].drop_front(Matches[1].rfind('/') + 1) + "/" +
+ Matches[3])
+ .str());
+ }
+
+ // The header is found in a normal search path, strip the search path
+ // prefix to get an include name.
+ return path::convert_to_slash(File.drop_front(PrefixLength));
+ }
+ }
+
+ // Couldn't determine a include name, use full path instead.
+ return std::nullopt;
+}
+
+std::optional<std::string> getRelativeIncludeName(const CompilerInstance &CI,
+ FileEntryRef FE,
+ bool *IsQuoted = nullptr) {
+ return getRelativeIncludeName(CI, FE.getNameAsRequested(), IsQuoted);
+}
+
+struct LocationFileChecker {
+ bool operator()(SourceLocation Loc) {
+ // If the loc refers to a macro expansion we need to first get the file
+ // location of the expansion.
+ auto &SM = CI.getSourceManager();
+ auto FileLoc = SM.getFileLoc(Loc);
+ FileID FID = SM.getFileID(FileLoc);
+ if (FID.isInvalid())
+ return false;
+
+ OptionalFileEntryRef File = SM.getFileEntryRefForID(FID);
+ if (!File)
+ return false;
+
+ if (KnownFileEntries.count(*File))
+ return true;
+
+ if (ExternalFileEntries.count(*File))
+ return false;
+
+ // Try to reduce the include name the same way we tried to include it.
+ bool IsQuoted = false;
+ if (auto IncludeName = getRelativeIncludeName(CI, *File, &IsQuoted))
+ if (llvm::any_of(KnownFiles,
+ [&IsQuoted, &IncludeName](const auto &KnownFile) {
+ return KnownFile.first.equals(*IncludeName) &&
+ KnownFile.second == IsQuoted;
+ })) {
+ KnownFileEntries.insert(*File);
+ return true;
+ }
+
+ // Record that the file was not found to avoid future reverse lookup for
+ // the same file.
+ ExternalFileEntries.insert(*File);
+ return false;
+ }
+
+ LocationFileChecker(const CompilerInstance &CI,
+ SmallVector<std::pair<SmallString<32>, bool>> &KnownFiles)
+ : CI(CI), KnownFiles(KnownFiles), ExternalFileEntries() {
+ for (const auto &KnownFile : KnownFiles)
+ if (auto FileEntry = CI.getFileManager().getFile(KnownFile.first))
+ KnownFileEntries.insert(*FileEntry);
+ }
+
+private:
+ const CompilerInstance &CI;
+ SmallVector<std::pair<SmallString<32>, bool>> &KnownFiles;
+ llvm::DenseSet<const FileEntry *> KnownFileEntries;
+ llvm::DenseSet<const FileEntry *> ExternalFileEntries;
+};
+
+struct BatchExtractAPIVisitor : ExtractAPIVisitor<BatchExtractAPIVisitor> {
+ bool shouldDeclBeIncluded(const Decl *D) const {
+ bool ShouldBeIncluded = true;
+ // Check that we have the definition for redeclarable types.
+ if (auto *TD = llvm::dyn_cast<TagDecl>(D))
+ ShouldBeIncluded = TD->isThisDeclarationADefinition();
+ else if (auto *Interface = llvm::dyn_cast<ObjCInterfaceDecl>(D))
+ ShouldBeIncluded = Interface->isThisDeclarationADefinition();
+ else if (auto *Protocol = llvm::dyn_cast<ObjCProtocolDecl>(D))
+ ShouldBeIncluded = Protocol->isThisDeclarationADefinition();
+
+ ShouldBeIncluded = ShouldBeIncluded && LCF(D->getLocation());
+ return ShouldBeIncluded;
+ }
+
+ BatchExtractAPIVisitor(LocationFileChecker &LCF, ASTContext &Context,
+ APISet &API)
+ : ExtractAPIVisitor<BatchExtractAPIVisitor>(Context, API), LCF(LCF) {}
+
+private:
+ LocationFileChecker &LCF;
+};
+
+class WrappingExtractAPIConsumer : public ASTConsumer {
+public:
+ WrappingExtractAPIConsumer(ASTContext &Context, APISet &API)
+ : Visitor(Context, API) {}
+
+ void HandleTranslationUnit(ASTContext &Context) override {
+ // Use ExtractAPIVisitor to traverse symbol declarations in the context.
+ Visitor.TraverseDecl(Context.getTranslationUnitDecl());
+ }
+
+private:
+ ExtractAPIVisitor<> Visitor;
+};
+
+class ExtractAPIConsumer : public ASTConsumer {
+public:
+ ExtractAPIConsumer(ASTContext &Context,
+ std::unique_ptr<LocationFileChecker> LCF, APISet &API)
+ : Visitor(*LCF, Context, API), LCF(std::move(LCF)) {}
+
+ void HandleTranslationUnit(ASTContext &Context) override {
+ // Use ExtractAPIVisitor to traverse symbol declarations in the context.
+ Visitor.TraverseDecl(Context.getTranslationUnitDecl());
+ }
+
+private:
+ BatchExtractAPIVisitor Visitor;
+ std::unique_ptr<LocationFileChecker> LCF;
+};
+
+class MacroCallback : public PPCallbacks {
+public:
+ MacroCallback(const SourceManager &SM, APISet &API, Preprocessor &PP)
+ : SM(SM), API(API), PP(PP) {}
+
+ void MacroDefined(const Token &MacroNameToken,
+ const MacroDirective *MD) override {
+ auto *MacroInfo = MD->getMacroInfo();
+
+ if (MacroInfo->isBuiltinMacro())
+ return;
+
+ auto SourceLoc = MacroNameToken.getLocation();
+ if (SM.isWrittenInBuiltinFile(SourceLoc) ||
+ SM.isWrittenInCommandLineFile(SourceLoc))
+ return;
+
+ PendingMacros.emplace_back(MacroNameToken, MD);
+ }
+
+ // If a macro gets undefined at some point during preprocessing of the inputs
+ // it means that it isn't an exposed API and we should therefore not add a
+ // macro definition for it.
+ void MacroUndefined(const Token &MacroNameToken, const MacroDefinition &MD,
+ const MacroDirective *Undef) override {
+ // If this macro wasn't previously defined we don't need to do anything
+ // here.
+ if (!Undef)
+ return;
+
+ llvm::erase_if(PendingMacros, [&MD, this](const PendingMacro &PM) {
+ return MD.getMacroInfo()->isIdenticalTo(*PM.MD->getMacroInfo(), PP,
+ /*Syntactically*/ false);
+ });
+ }
+
+ void EndOfMainFile() override {
+ for (auto &PM : PendingMacros) {
+ // `isUsedForHeaderGuard` is only set when the preprocessor leaves the
+ // file so check for it here.
+ if (PM.MD->getMacroInfo()->isUsedForHeaderGuard())
+ continue;
+
+ if (!shouldMacroBeIncluded(PM))
+ continue;
+
+ StringRef Name = PM.MacroNameToken.getIdentifierInfo()->getName();
+ PresumedLoc Loc = SM.getPresumedLoc(PM.MacroNameToken.getLocation());
+ StringRef USR =
+ API.recordUSRForMacro(Name, PM.MacroNameToken.getLocation(), SM);
+
+ API.addMacroDefinition(
+ Name, USR, Loc,
+ DeclarationFragmentsBuilder::getFragmentsForMacro(Name, PM.MD),
+ DeclarationFragmentsBuilder::getSubHeadingForMacro(Name),
+ SM.isInSystemHeader(PM.MacroNameToken.getLocation()));
+ }
+
+ PendingMacros.clear();
+ }
+
+protected:
+ struct PendingMacro {
+ Token MacroNameToken;
+ const MacroDirective *MD;
+
+ PendingMacro(const Token &MacroNameToken, const MacroDirective *MD)
+ : MacroNameToken(MacroNameToken), MD(MD) {}
+ };
+
+ virtual bool shouldMacroBeIncluded(const PendingMacro &PM) { return true; }
+
+ const SourceManager &SM;
+ APISet &API;
+ Preprocessor &PP;
+ llvm::SmallVector<PendingMacro> PendingMacros;
+};
+
+class APIMacroCallback : public MacroCallback {
+public:
+ APIMacroCallback(const SourceManager &SM, APISet &API, Preprocessor &PP,
+ LocationFileChecker &LCF)
+ : MacroCallback(SM, API, PP), LCF(LCF) {}
+
+ bool shouldMacroBeIncluded(const PendingMacro &PM) override {
+ // Do not include macros from external files
+ return LCF(PM.MacroNameToken.getLocation());
+ }
+
+private:
+ LocationFileChecker &LCF;
+};
+
+} // namespace
+
+void ExtractAPIActionBase::ImplEndSourceFileAction() {
+ if (!OS)
+ return;
+
+ // Setup a SymbolGraphSerializer to write out collected API information in
+ // the Symbol Graph format.
+ // FIXME: Make the kind of APISerializer configurable.
+ SymbolGraphSerializer SGSerializer(*API, IgnoresList);
+ SGSerializer.serialize(*OS);
+ OS.reset();
+}
+
+std::unique_ptr<raw_pwrite_stream>
+ExtractAPIAction::CreateOutputFile(CompilerInstance &CI, StringRef InFile) {
+ std::unique_ptr<raw_pwrite_stream> OS;
+ OS = CI.createDefaultOutputFile(/*Binary=*/false, InFile,
+ /*Extension=*/"json",
+ /*RemoveFileOnSignal=*/false);
+ if (!OS)
+ return nullptr;
+ return OS;
+}
+
+std::unique_ptr<ASTConsumer>
+ExtractAPIAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
+ OS = CreateOutputFile(CI, InFile);
+
+ if (!OS)
+ return nullptr;
+
+ auto ProductName = CI.getFrontendOpts().ProductName;
+
+ // Now that we have enough information about the language options and the
+ // target triple, let's create the APISet before anyone uses it.
+ API = std::make_unique<APISet>(
+ CI.getTarget().getTriple(),
+ CI.getFrontendOpts().Inputs.back().getKind().getLanguage(), ProductName);
+
+ auto LCF = std::make_unique<LocationFileChecker>(CI, KnownInputFiles);
+
+ CI.getPreprocessor().addPPCallbacks(std::make_unique<APIMacroCallback>(
+ CI.getSourceManager(), *API, CI.getPreprocessor(), *LCF));
+
+ // Do not include location in anonymous decls.
+ PrintingPolicy Policy = CI.getASTContext().getPrintingPolicy();
+ Policy.AnonymousTagLocations = false;
+ CI.getASTContext().setPrintingPolicy(Policy);
+
+ if (!CI.getFrontendOpts().ExtractAPIIgnoresFileList.empty()) {
+ llvm::handleAllErrors(
+ APIIgnoresList::create(CI.getFrontendOpts().ExtractAPIIgnoresFileList,
+ CI.getFileManager())
+ .moveInto(IgnoresList),
+ [&CI](const IgnoresFileNotFound &Err) {
+ CI.getDiagnostics().Report(
+ diag::err_extract_api_ignores_file_not_found)
+ << Err.Path;
+ });
+ }
+
+ return std::make_unique<ExtractAPIConsumer>(CI.getASTContext(),
+ std::move(LCF), *API);
+}
+
+bool ExtractAPIAction::PrepareToExecuteAction(CompilerInstance &CI) {
+ auto &Inputs = CI.getFrontendOpts().Inputs;
+ if (Inputs.empty())
+ return true;
+
+ if (!CI.hasFileManager())
+ if (!CI.createFileManager())
+ return false;
+
+ auto Kind = Inputs[0].getKind();
+
+ // Convert the header file inputs into a single input buffer.
+ SmallString<256> HeaderContents;
+ bool IsQuoted = false;
+ for (const FrontendInputFile &FIF : Inputs) {
+ if (Kind.isObjectiveC())
+ HeaderContents += "#import";
+ else
+ HeaderContents += "#include";
+
+ StringRef FilePath = FIF.getFile();
+ if (auto RelativeName = getRelativeIncludeName(CI, FilePath, &IsQuoted)) {
+ if (IsQuoted)
+ HeaderContents += " \"";
+ else
+ HeaderContents += " <";
+
+ HeaderContents += *RelativeName;
+
+ if (IsQuoted)
+ HeaderContents += "\"\n";
+ else
+ HeaderContents += ">\n";
+ KnownInputFiles.emplace_back(static_cast<SmallString<32>>(*RelativeName),
+ IsQuoted);
+ } else {
+ HeaderContents += " \"";
+ HeaderContents += FilePath;
+ HeaderContents += "\"\n";
+ KnownInputFiles.emplace_back(FilePath, true);
+ }
+ }
+
+ if (CI.getHeaderSearchOpts().Verbose)
+ CI.getVerboseOutputStream() << getInputBufferName() << ":\n"
+ << HeaderContents << "\n";
+
+ Buffer = llvm::MemoryBuffer::getMemBufferCopy(HeaderContents,
+ getInputBufferName());
+
+ // Set that buffer up as our "real" input in the CompilerInstance.
+ Inputs.clear();
+ Inputs.emplace_back(Buffer->getMemBufferRef(), Kind, /*IsSystem*/ false);
+
+ return true;
+}
+
+void ExtractAPIAction::EndSourceFileAction() { ImplEndSourceFileAction(); }
+
+std::unique_ptr<ASTConsumer>
+WrappingExtractAPIAction::CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
+ auto OtherConsumer = WrapperFrontendAction::CreateASTConsumer(CI, InFile);
+ if (!OtherConsumer)
+ return nullptr;
+
+ CreatedASTConsumer = true;
+
+ OS = CreateOutputFile(CI, InFile);
+ if (!OS)
+ return nullptr;
+
+ auto ProductName = CI.getFrontendOpts().ProductName;
+
+ // Now that we have enough information about the language options and the
+ // target triple, let's create the APISet before anyone uses it.
+ API = std::make_unique<APISet>(
+ CI.getTarget().getTriple(),
+ CI.getFrontendOpts().Inputs.back().getKind().getLanguage(), ProductName);
+
+ CI.getPreprocessor().addPPCallbacks(std::make_unique<MacroCallback>(
+ CI.getSourceManager(), *API, CI.getPreprocessor()));
+
+ // Do not include location in anonymous decls.
+ PrintingPolicy Policy = CI.getASTContext().getPrintingPolicy();
+ Policy.AnonymousTagLocations = false;
+ CI.getASTContext().setPrintingPolicy(Policy);
+
+ if (!CI.getFrontendOpts().ExtractAPIIgnoresFileList.empty()) {
+ llvm::handleAllErrors(
+ APIIgnoresList::create(CI.getFrontendOpts().ExtractAPIIgnoresFileList,
+ CI.getFileManager())
+ .moveInto(IgnoresList),
+ [&CI](const IgnoresFileNotFound &Err) {
+ CI.getDiagnostics().Report(
+ diag::err_extract_api_ignores_file_not_found)
+ << Err.Path;
+ });
+ }
+
+ auto WrappingConsumer =
+ std::make_unique<WrappingExtractAPIConsumer>(CI.getASTContext(), *API);
+ std::vector<std::unique_ptr<ASTConsumer>> Consumers;
+ Consumers.push_back(std::move(OtherConsumer));
+ Consumers.push_back(std::move(WrappingConsumer));
+
+ return std::make_unique<MultiplexConsumer>(std::move(Consumers));
+}
+
+void WrappingExtractAPIAction::EndSourceFileAction() {
+ // Invoke wrapped action's method.
+ WrapperFrontendAction::EndSourceFileAction();
+
+ if (CreatedASTConsumer) {
+ ImplEndSourceFileAction();
+ }
+}
+
+std::unique_ptr<raw_pwrite_stream>
+WrappingExtractAPIAction::CreateOutputFile(CompilerInstance &CI,
+ StringRef InFile) {
+ std::unique_ptr<raw_pwrite_stream> OS;
+ std::string OutputDir = CI.getFrontendOpts().SymbolGraphOutputDir;
+
+ // The symbol graphs need to be generated as a side effect of regular
+ // compilation so the output should be dumped in the directory provided with
+ // the command line option.
+ llvm::SmallString<128> OutFilePath(OutputDir);
+ auto Seperator = llvm::sys::path::get_separator();
+ auto Infilename = llvm::sys::path::filename(InFile);
+ OutFilePath.append({Seperator, Infilename});
+ llvm::sys::path::replace_extension(OutFilePath, "json");
+ // StringRef outputFilePathref = *OutFilePath;
+
+ // don't use the default output file
+ OS = CI.createOutputFile(/*OutputPath=*/OutFilePath, /*Binary=*/false,
+ /*RemoveFileOnSignal=*/true,
+ /*UseTemporary=*/true,
+ /*CreateMissingDirectories=*/true);
+ if (!OS)
+ return nullptr;
+ return OS;
+}
diff --git a/contrib/llvm-project/clang/lib/ExtractAPI/Serialization/SymbolGraphSerializer.cpp b/contrib/llvm-project/clang/lib/ExtractAPI/Serialization/SymbolGraphSerializer.cpp
new file mode 100644
index 000000000000..349b93e2a232
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/ExtractAPI/Serialization/SymbolGraphSerializer.cpp
@@ -0,0 +1,1302 @@
+//===- ExtractAPI/Serialization/SymbolGraphSerializer.cpp -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements the SymbolGraphSerializer.
+///
+//===----------------------------------------------------------------------===//
+
+#include "clang/ExtractAPI/Serialization/SymbolGraphSerializer.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/Version.h"
+#include "clang/ExtractAPI/DeclarationFragments.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/STLFunctionalExtras.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/VersionTuple.h"
+#include <optional>
+#include <type_traits>
+
+using namespace clang;
+using namespace clang::extractapi;
+using namespace llvm;
+using namespace llvm::json;
+
+namespace {
+
+/// Helper function to inject a JSON object \p Obj into another object \p Paren
+/// at position \p Key.
+void serializeObject(Object &Paren, StringRef Key, std::optional<Object> Obj) {
+ if (Obj)
+ Paren[Key] = std::move(*Obj);
+}
+
+/// Helper function to inject a StringRef \p String into an object \p Paren at
+/// position \p Key
+void serializeString(Object &Paren, StringRef Key,
+ std::optional<std::string> String) {
+ if (String)
+ Paren[Key] = std::move(*String);
+}
+
+/// Helper function to inject a JSON array \p Array into object \p Paren at
+/// position \p Key.
+void serializeArray(Object &Paren, StringRef Key, std::optional<Array> Array) {
+ if (Array)
+ Paren[Key] = std::move(*Array);
+}
+
+/// Serialize a \c VersionTuple \p V with the Symbol Graph semantic version
+/// format.
+///
+/// A semantic version object contains three numeric fields, representing the
+/// \c major, \c minor, and \c patch parts of the version tuple.
+/// For example version tuple 1.0.3 is serialized as:
+/// \code
+/// {
+/// "major" : 1,
+/// "minor" : 0,
+/// "patch" : 3
+/// }
+/// \endcode
+///
+/// \returns \c std::nullopt if the version \p V is empty, or an \c Object
+/// containing the semantic version representation of \p V.
+std::optional<Object> serializeSemanticVersion(const VersionTuple &V) {
+ if (V.empty())
+ return std::nullopt;
+
+ Object Version;
+ Version["major"] = V.getMajor();
+ Version["minor"] = V.getMinor().value_or(0);
+ Version["patch"] = V.getSubminor().value_or(0);
+ return Version;
+}
+
+/// Serialize the OS information in the Symbol Graph platform property.
+///
+/// The OS information in Symbol Graph contains the \c name of the OS, and an
+/// optional \c minimumVersion semantic version field.
+Object serializeOperatingSystem(const Triple &T) {
+ Object OS;
+ OS["name"] = T.getOSTypeName(T.getOS());
+ serializeObject(OS, "minimumVersion",
+ serializeSemanticVersion(T.getMinimumSupportedOSVersion()));
+ return OS;
+}
+
+/// Serialize the platform information in the Symbol Graph module section.
+///
+/// The platform object describes a target platform triple in corresponding
+/// three fields: \c architecture, \c vendor, and \c operatingSystem.
+Object serializePlatform(const Triple &T) {
+ Object Platform;
+ Platform["architecture"] = T.getArchName();
+ Platform["vendor"] = T.getVendorName();
+ Platform["operatingSystem"] = serializeOperatingSystem(T);
+ return Platform;
+}
+
+/// Serialize a source position.
+Object serializeSourcePosition(const PresumedLoc &Loc) {
+ assert(Loc.isValid() && "invalid source position");
+
+ Object SourcePosition;
+ SourcePosition["line"] = Loc.getLine() - 1;
+ SourcePosition["character"] = Loc.getColumn() - 1;
+
+ return SourcePosition;
+}
+
+/// Serialize a source location in file.
+///
+/// \param Loc The presumed location to serialize.
+/// \param IncludeFileURI If true, include the file path of \p Loc as a URI.
+/// Defaults to false.
+Object serializeSourceLocation(const PresumedLoc &Loc,
+ bool IncludeFileURI = false) {
+ Object SourceLocation;
+ serializeObject(SourceLocation, "position", serializeSourcePosition(Loc));
+
+ if (IncludeFileURI) {
+ std::string FileURI = "file://";
+ // Normalize file path to use forward slashes for the URI.
+ FileURI += sys::path::convert_to_slash(Loc.getFilename());
+ SourceLocation["uri"] = FileURI;
+ }
+
+ return SourceLocation;
+}
+
+/// Serialize a source range with begin and end locations.
+Object serializeSourceRange(const PresumedLoc &BeginLoc,
+ const PresumedLoc &EndLoc) {
+ Object SourceRange;
+ serializeObject(SourceRange, "start", serializeSourcePosition(BeginLoc));
+ serializeObject(SourceRange, "end", serializeSourcePosition(EndLoc));
+ return SourceRange;
+}
+
+/// Serialize the availability attributes of a symbol.
+///
+/// Availability information contains the introduced, deprecated, and obsoleted
+/// versions of the symbol as semantic versions, if not default.
+/// Availability information also contains flags to indicate if the symbol is
+/// unconditionally unavailable or deprecated,
+/// i.e. \c __attribute__((unavailable)) and \c __attribute__((deprecated)).
+///
+/// \returns \c std::nullopt if the symbol has default availability attributes,
+/// or an \c Array containing an object with the formatted availability
+/// information.
+std::optional<Array> serializeAvailability(const AvailabilityInfo &Avail) {
+ if (Avail.isDefault())
+ return std::nullopt;
+
+ Object Availability;
+ Array AvailabilityArray;
+ Availability["domain"] = Avail.Domain;
+ serializeObject(Availability, "introduced",
+ serializeSemanticVersion(Avail.Introduced));
+ serializeObject(Availability, "deprecated",
+ serializeSemanticVersion(Avail.Deprecated));
+ serializeObject(Availability, "obsoleted",
+ serializeSemanticVersion(Avail.Obsoleted));
+ if (Avail.isUnconditionallyDeprecated()) {
+ Object UnconditionallyDeprecated;
+ UnconditionallyDeprecated["domain"] = "*";
+ UnconditionallyDeprecated["isUnconditionallyDeprecated"] = true;
+ AvailabilityArray.emplace_back(std::move(UnconditionallyDeprecated));
+ }
+ if (Avail.isUnconditionallyUnavailable()) {
+ Object UnconditionallyUnavailable;
+ UnconditionallyUnavailable["domain"] = "*";
+ UnconditionallyUnavailable["isUnconditionallyUnavailable"] = true;
+ AvailabilityArray.emplace_back(std::move(UnconditionallyUnavailable));
+ }
+ AvailabilityArray.emplace_back(std::move(Availability));
+ return AvailabilityArray;
+}
+
+/// Get the language name string for interface language references.
+StringRef getLanguageName(Language Lang) {
+ switch (Lang) {
+ case Language::C:
+ return "c";
+ case Language::ObjC:
+ return "objective-c";
+ case Language::CXX:
+ return "c++";
+ case Language::ObjCXX:
+ return "objective-c++";
+
+ // Unsupported language currently
+ case Language::OpenCL:
+ case Language::OpenCLCXX:
+ case Language::CUDA:
+ case Language::RenderScript:
+ case Language::HIP:
+ case Language::HLSL:
+
+ // Languages that the frontend cannot parse and compile
+ case Language::Unknown:
+ case Language::Asm:
+ case Language::LLVM_IR:
+ llvm_unreachable("Unsupported language kind");
+ }
+
+ llvm_unreachable("Unhandled language kind");
+}
+
+/// Serialize the identifier object as specified by the Symbol Graph format.
+///
+/// The identifier property of a symbol contains the USR for precise and unique
+/// references, and the interface language name.
+Object serializeIdentifier(const APIRecord &Record, Language Lang) {
+ Object Identifier;
+ Identifier["precise"] = Record.USR;
+ Identifier["interfaceLanguage"] = getLanguageName(Lang);
+
+ return Identifier;
+}
+
+/// Serialize the documentation comments attached to a symbol, as specified by
+/// the Symbol Graph format.
+///
+/// The Symbol Graph \c docComment object contains an array of lines. Each line
+/// represents one line of striped documentation comment, with source range
+/// information.
+/// e.g.
+/// \code
+/// /// This is a documentation comment
+/// ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' First line.
+/// /// with multiple lines.
+/// ^~~~~~~~~~~~~~~~~~~~~~~' Second line.
+/// \endcode
+///
+/// \returns \c std::nullopt if \p Comment is empty, or an \c Object containing
+/// the formatted lines.
+std::optional<Object> serializeDocComment(const DocComment &Comment) {
+ if (Comment.empty())
+ return std::nullopt;
+
+ Object DocComment;
+ Array LinesArray;
+ for (const auto &CommentLine : Comment) {
+ Object Line;
+ Line["text"] = CommentLine.Text;
+ serializeObject(Line, "range",
+ serializeSourceRange(CommentLine.Begin, CommentLine.End));
+ LinesArray.emplace_back(std::move(Line));
+ }
+ serializeArray(DocComment, "lines", LinesArray);
+
+ return DocComment;
+}
+
+/// Serialize the declaration fragments of a symbol.
+///
+/// The Symbol Graph declaration fragments is an array of tagged important
+/// parts of a symbol's declaration. The fragments sequence can be joined to
+/// form spans of declaration text, with attached information useful for
+/// purposes like syntax-highlighting etc. For example:
+/// \code
+/// const int pi; -> "declarationFragments" : [
+/// {
+/// "kind" : "keyword",
+/// "spelling" : "const"
+/// },
+/// {
+/// "kind" : "text",
+/// "spelling" : " "
+/// },
+/// {
+/// "kind" : "typeIdentifier",
+/// "preciseIdentifier" : "c:I",
+/// "spelling" : "int"
+/// },
+/// {
+/// "kind" : "text",
+/// "spelling" : " "
+/// },
+/// {
+/// "kind" : "identifier",
+/// "spelling" : "pi"
+/// }
+/// ]
+/// \endcode
+///
+/// \returns \c std::nullopt if \p DF is empty, or an \c Array containing the
+/// formatted declaration fragments array.
+std::optional<Array>
+serializeDeclarationFragments(const DeclarationFragments &DF) {
+ if (DF.getFragments().empty())
+ return std::nullopt;
+
+ Array Fragments;
+ for (const auto &F : DF.getFragments()) {
+ Object Fragment;
+ Fragment["spelling"] = F.Spelling;
+ Fragment["kind"] = DeclarationFragments::getFragmentKindString(F.Kind);
+ if (!F.PreciseIdentifier.empty())
+ Fragment["preciseIdentifier"] = F.PreciseIdentifier;
+ Fragments.emplace_back(std::move(Fragment));
+ }
+
+ return Fragments;
+}
+
+/// Serialize the \c names field of a symbol as specified by the Symbol Graph
+/// format.
+///
+/// The Symbol Graph names field contains multiple representations of a symbol
+/// that can be used for different applications:
+/// - \c title : The simple declared name of the symbol;
+/// - \c subHeading : An array of declaration fragments that provides tags,
+/// and potentially more tokens (for example the \c +/- symbol for
+/// Objective-C methods). Can be used as sub-headings for documentation.
+Object serializeNames(const APIRecord &Record) {
+ Object Names;
+ if (auto *CategoryRecord =
+ dyn_cast_or_null<const ObjCCategoryRecord>(&Record))
+ Names["title"] =
+ (CategoryRecord->Interface.Name + " (" + Record.Name + ")").str();
+ else
+ Names["title"] = Record.Name;
+
+ serializeArray(Names, "subHeading",
+ serializeDeclarationFragments(Record.SubHeading));
+ DeclarationFragments NavigatorFragments;
+ NavigatorFragments.append(Record.Name,
+ DeclarationFragments::FragmentKind::Identifier,
+ /*PreciseIdentifier*/ "");
+ serializeArray(Names, "navigator",
+ serializeDeclarationFragments(NavigatorFragments));
+
+ return Names;
+}
+
+Object serializeSymbolKind(APIRecord::RecordKind RK, Language Lang) {
+ auto AddLangPrefix = [&Lang](StringRef S) -> std::string {
+ return (getLanguageName(Lang) + "." + S).str();
+ };
+
+ Object Kind;
+ switch (RK) {
+ case APIRecord::RK_Unknown:
+ llvm_unreachable("Records should have an explicit kind");
+ break;
+ case APIRecord::RK_Namespace:
+ Kind["identifier"] = AddLangPrefix("namespace");
+ Kind["displayName"] = "Namespace";
+ break;
+ case APIRecord::RK_GlobalFunction:
+ Kind["identifier"] = AddLangPrefix("func");
+ Kind["displayName"] = "Function";
+ break;
+ case APIRecord::RK_GlobalFunctionTemplate:
+ Kind["identifier"] = AddLangPrefix("func");
+ Kind["displayName"] = "Function Template";
+ break;
+ case APIRecord::RK_GlobalFunctionTemplateSpecialization:
+ Kind["identifier"] = AddLangPrefix("func");
+ Kind["displayName"] = "Function Template Specialization";
+ break;
+ case APIRecord::RK_GlobalVariableTemplate:
+ Kind["identifier"] = AddLangPrefix("var");
+ Kind["displayName"] = "Global Variable Template";
+ break;
+ case APIRecord::RK_GlobalVariableTemplateSpecialization:
+ Kind["identifier"] = AddLangPrefix("var");
+ Kind["displayName"] = "Global Variable Template Specialization";
+ break;
+ case APIRecord::RK_GlobalVariableTemplatePartialSpecialization:
+ Kind["identifier"] = AddLangPrefix("var");
+ Kind["displayName"] = "Global Variable Template Partial Specialization";
+ break;
+ case APIRecord::RK_GlobalVariable:
+ Kind["identifier"] = AddLangPrefix("var");
+ Kind["displayName"] = "Global Variable";
+ break;
+ case APIRecord::RK_EnumConstant:
+ Kind["identifier"] = AddLangPrefix("enum.case");
+ Kind["displayName"] = "Enumeration Case";
+ break;
+ case APIRecord::RK_Enum:
+ Kind["identifier"] = AddLangPrefix("enum");
+ Kind["displayName"] = "Enumeration";
+ break;
+ case APIRecord::RK_StructField:
+ Kind["identifier"] = AddLangPrefix("property");
+ Kind["displayName"] = "Instance Property";
+ break;
+ case APIRecord::RK_Struct:
+ Kind["identifier"] = AddLangPrefix("struct");
+ Kind["displayName"] = "Structure";
+ break;
+ case APIRecord::RK_UnionField:
+ Kind["identifier"] = AddLangPrefix("property");
+ Kind["displayName"] = "Instance Property";
+ break;
+ case APIRecord::RK_Union:
+ Kind["identifier"] = AddLangPrefix("union");
+ Kind["displayName"] = "Union";
+ break;
+ case APIRecord::RK_CXXField:
+ Kind["identifier"] = AddLangPrefix("property");
+ Kind["displayName"] = "Instance Property";
+ break;
+ case APIRecord::RK_StaticField:
+ Kind["identifier"] = AddLangPrefix("type.property");
+ Kind["displayName"] = "Type Property";
+ break;
+ case APIRecord::RK_ClassTemplate:
+ case APIRecord::RK_ClassTemplateSpecialization:
+ case APIRecord::RK_ClassTemplatePartialSpecialization:
+ case APIRecord::RK_CXXClass:
+ Kind["identifier"] = AddLangPrefix("class");
+ Kind["displayName"] = "Class";
+ break;
+ case APIRecord::RK_CXXMethodTemplate:
+ Kind["identifier"] = AddLangPrefix("method");
+ Kind["displayName"] = "Method Template";
+ break;
+ case APIRecord::RK_CXXMethodTemplateSpecialization:
+ Kind["identifier"] = AddLangPrefix("method");
+ Kind["displayName"] = "Method Template Specialization";
+ break;
+ case APIRecord::RK_CXXFieldTemplate:
+ Kind["identifier"] = AddLangPrefix("property");
+ Kind["displayName"] = "Template Property";
+ break;
+ case APIRecord::RK_Concept:
+ Kind["identifier"] = AddLangPrefix("concept");
+ Kind["displayName"] = "Concept";
+ break;
+ case APIRecord::RK_CXXStaticMethod:
+ Kind["identifier"] = AddLangPrefix("type.method");
+ Kind["displayName"] = "Static Method";
+ break;
+ case APIRecord::RK_CXXInstanceMethod:
+ Kind["identifier"] = AddLangPrefix("method");
+ Kind["displayName"] = "Instance Method";
+ break;
+ case APIRecord::RK_CXXConstructorMethod:
+ Kind["identifier"] = AddLangPrefix("method");
+ Kind["displayName"] = "Constructor";
+ break;
+ case APIRecord::RK_CXXDestructorMethod:
+ Kind["identifier"] = AddLangPrefix("method");
+ Kind["displayName"] = "Destructor";
+ break;
+ case APIRecord::RK_ObjCIvar:
+ Kind["identifier"] = AddLangPrefix("ivar");
+ Kind["displayName"] = "Instance Variable";
+ break;
+ case APIRecord::RK_ObjCInstanceMethod:
+ Kind["identifier"] = AddLangPrefix("method");
+ Kind["displayName"] = "Instance Method";
+ break;
+ case APIRecord::RK_ObjCClassMethod:
+ Kind["identifier"] = AddLangPrefix("type.method");
+ Kind["displayName"] = "Type Method";
+ break;
+ case APIRecord::RK_ObjCInstanceProperty:
+ Kind["identifier"] = AddLangPrefix("property");
+ Kind["displayName"] = "Instance Property";
+ break;
+ case APIRecord::RK_ObjCClassProperty:
+ Kind["identifier"] = AddLangPrefix("type.property");
+ Kind["displayName"] = "Type Property";
+ break;
+ case APIRecord::RK_ObjCInterface:
+ Kind["identifier"] = AddLangPrefix("class");
+ Kind["displayName"] = "Class";
+ break;
+ case APIRecord::RK_ObjCCategory:
+ Kind["identifier"] = AddLangPrefix("class.extension");
+ Kind["displayName"] = "Class Extension";
+ break;
+ case APIRecord::RK_ObjCCategoryModule:
+ Kind["identifier"] = AddLangPrefix("module.extension");
+ Kind["displayName"] = "Module Extension";
+ break;
+ case APIRecord::RK_ObjCProtocol:
+ Kind["identifier"] = AddLangPrefix("protocol");
+ Kind["displayName"] = "Protocol";
+ break;
+ case APIRecord::RK_MacroDefinition:
+ Kind["identifier"] = AddLangPrefix("macro");
+ Kind["displayName"] = "Macro";
+ break;
+ case APIRecord::RK_Typedef:
+ Kind["identifier"] = AddLangPrefix("typealias");
+ Kind["displayName"] = "Type Alias";
+ break;
+ }
+
+ return Kind;
+}
+
+/// Serialize the symbol kind information.
+///
+/// The Symbol Graph symbol kind property contains a shorthand \c identifier
+/// which is prefixed by the source language name, useful for tooling to parse
+/// the kind, and a \c displayName for rendering human-readable names.
+Object serializeSymbolKind(const APIRecord &Record, Language Lang) {
+ return serializeSymbolKind(Record.getKind(), Lang);
+}
+
+template <typename RecordTy>
+std::optional<Object>
+serializeFunctionSignatureMixinImpl(const RecordTy &Record, std::true_type) {
+ const auto &FS = Record.Signature;
+ if (FS.empty())
+ return std::nullopt;
+
+ Object Signature;
+ serializeArray(Signature, "returns",
+ serializeDeclarationFragments(FS.getReturnType()));
+
+ Array Parameters;
+ for (const auto &P : FS.getParameters()) {
+ Object Parameter;
+ Parameter["name"] = P.Name;
+ serializeArray(Parameter, "declarationFragments",
+ serializeDeclarationFragments(P.Fragments));
+ Parameters.emplace_back(std::move(Parameter));
+ }
+
+ if (!Parameters.empty())
+ Signature["parameters"] = std::move(Parameters);
+
+ return Signature;
+}
+
+template <typename RecordTy>
+std::optional<Object>
+serializeFunctionSignatureMixinImpl(const RecordTy &Record, std::false_type) {
+ return std::nullopt;
+}
+
+/// Serialize the function signature field, as specified by the
+/// Symbol Graph format.
+///
+/// The Symbol Graph function signature property contains two arrays.
+/// - The \c returns array is the declaration fragments of the return type;
+/// - The \c parameters array contains names and declaration fragments of the
+/// parameters.
+///
+/// \returns \c std::nullopt if \p FS is empty, or an \c Object containing the
+/// formatted function signature.
+template <typename RecordTy>
+void serializeFunctionSignatureMixin(Object &Paren, const RecordTy &Record) {
+ serializeObject(Paren, "functionSignature",
+ serializeFunctionSignatureMixinImpl(
+ Record, has_function_signature<RecordTy>()));
+}
+
+template <typename RecordTy>
+std::optional<std::string> serializeAccessMixinImpl(const RecordTy &Record,
+ std::true_type) {
+ const auto &AccessControl = Record.Access;
+ std::string Access;
+ if (AccessControl.empty())
+ return std::nullopt;
+ Access = AccessControl.getAccess();
+ return Access;
+}
+
+template <typename RecordTy>
+std::optional<std::string> serializeAccessMixinImpl(const RecordTy &Record,
+ std::false_type) {
+ return std::nullopt;
+}
+
+template <typename RecordTy>
+void serializeAccessMixin(Object &Paren, const RecordTy &Record) {
+ auto accessLevel = serializeAccessMixinImpl(Record, has_access<RecordTy>());
+ if (!accessLevel.has_value())
+ accessLevel = "public";
+ serializeString(Paren, "accessLevel", accessLevel);
+}
+
+template <typename RecordTy>
+std::optional<Object> serializeTemplateMixinImpl(const RecordTy &Record,
+ std::true_type) {
+ const auto &Template = Record.Templ;
+ if (Template.empty())
+ return std::nullopt;
+
+ Object Generics;
+ Array GenericParameters;
+ for (const auto &Param : Template.getParameters()) {
+ Object Parameter;
+ Parameter["name"] = Param.Name;
+ Parameter["index"] = Param.Index;
+ Parameter["depth"] = Param.Depth;
+ GenericParameters.emplace_back(std::move(Parameter));
+ }
+ if (!GenericParameters.empty())
+ Generics["parameters"] = std::move(GenericParameters);
+
+ Array GenericConstraints;
+ for (const auto &Constr : Template.getConstraints()) {
+ Object Constraint;
+ Constraint["kind"] = Constr.Kind;
+ Constraint["lhs"] = Constr.LHS;
+ Constraint["rhs"] = Constr.RHS;
+ GenericConstraints.emplace_back(std::move(Constraint));
+ }
+
+ if (!GenericConstraints.empty())
+ Generics["constraints"] = std::move(GenericConstraints);
+
+ return Generics;
+}
+
+template <typename RecordTy>
+std::optional<Object> serializeTemplateMixinImpl(const RecordTy &Record,
+ std::false_type) {
+ return std::nullopt;
+}
+
+template <typename RecordTy>
+void serializeTemplateMixin(Object &Paren, const RecordTy &Record) {
+ serializeObject(Paren, "swiftGenerics",
+ serializeTemplateMixinImpl(Record, has_template<RecordTy>()));
+}
+
+struct PathComponent {
+ StringRef USR;
+ StringRef Name;
+ APIRecord::RecordKind Kind;
+
+ PathComponent(StringRef USR, StringRef Name, APIRecord::RecordKind Kind)
+ : USR(USR), Name(Name), Kind(Kind) {}
+};
+
+template <typename RecordTy>
+bool generatePathComponents(
+ const RecordTy &Record, const APISet &API,
+ function_ref<void(const PathComponent &)> ComponentTransformer) {
+ SmallVector<PathComponent, 4> ReverseComponenents;
+ ReverseComponenents.emplace_back(Record.USR, Record.Name, Record.getKind());
+ const auto *CurrentParent = &Record.ParentInformation;
+ bool FailedToFindParent = false;
+ while (CurrentParent && !CurrentParent->empty()) {
+ PathComponent CurrentParentComponent(CurrentParent->ParentUSR,
+ CurrentParent->ParentName,
+ CurrentParent->ParentKind);
+
+ auto *ParentRecord = CurrentParent->ParentRecord;
+ // Slow path if we don't have a direct reference to the ParentRecord
+ if (!ParentRecord)
+ ParentRecord = API.findRecordForUSR(CurrentParent->ParentUSR);
+
+ // If the parent is a category extended from internal module then we need to
+ // pretend this belongs to the associated interface.
+ if (auto *CategoryRecord =
+ dyn_cast_or_null<ObjCCategoryRecord>(ParentRecord)) {
+ if (!CategoryRecord->IsFromExternalModule) {
+ ParentRecord = API.findRecordForUSR(CategoryRecord->Interface.USR);
+ CurrentParentComponent = PathComponent(CategoryRecord->Interface.USR,
+ CategoryRecord->Interface.Name,
+ APIRecord::RK_ObjCInterface);
+ }
+ }
+
+ // The parent record doesn't exist which means the symbol shouldn't be
+ // treated as part of the current product.
+ if (!ParentRecord) {
+ FailedToFindParent = true;
+ break;
+ }
+
+ ReverseComponenents.push_back(std::move(CurrentParentComponent));
+ CurrentParent = &ParentRecord->ParentInformation;
+ }
+
+ for (const auto &PC : reverse(ReverseComponenents))
+ ComponentTransformer(PC);
+
+ return FailedToFindParent;
+}
+
+Object serializeParentContext(const PathComponent &PC, Language Lang) {
+ Object ParentContextElem;
+ ParentContextElem["usr"] = PC.USR;
+ ParentContextElem["name"] = PC.Name;
+ ParentContextElem["kind"] = serializeSymbolKind(PC.Kind, Lang)["identifier"];
+ return ParentContextElem;
+}
+
+template <typename RecordTy>
+Array generateParentContexts(const RecordTy &Record, const APISet &API,
+ Language Lang) {
+ Array ParentContexts;
+ generatePathComponents(
+ Record, API, [Lang, &ParentContexts](const PathComponent &PC) {
+ ParentContexts.push_back(serializeParentContext(PC, Lang));
+ });
+
+ return ParentContexts;
+}
+} // namespace
+
+/// Defines the format version emitted by SymbolGraphSerializer.
+const VersionTuple SymbolGraphSerializer::FormatVersion{0, 5, 3};
+
+Object SymbolGraphSerializer::serializeMetadata() const {
+ Object Metadata;
+ serializeObject(Metadata, "formatVersion",
+ serializeSemanticVersion(FormatVersion));
+ Metadata["generator"] = clang::getClangFullVersion();
+ return Metadata;
+}
+
+Object SymbolGraphSerializer::serializeModule() const {
+ Object Module;
+ // The user is expected to always pass `--product-name=` on the command line
+ // to populate this field.
+ Module["name"] = API.ProductName;
+ serializeObject(Module, "platform", serializePlatform(API.getTarget()));
+ return Module;
+}
+
+bool SymbolGraphSerializer::shouldSkip(const APIRecord &Record) const {
+ // Skip explicitly ignored symbols.
+ if (IgnoresList.shouldIgnore(Record.Name))
+ return true;
+
+ // Skip unconditionally unavailable symbols
+ if (Record.Availability.isUnconditionallyUnavailable())
+ return true;
+
+ // Filter out symbols prefixed with an underscored as they are understood to
+ // be symbols clients should not use.
+ if (Record.Name.starts_with("_"))
+ return true;
+
+ return false;
+}
+
+template <typename RecordTy>
+std::optional<Object>
+SymbolGraphSerializer::serializeAPIRecord(const RecordTy &Record) const {
+ if (shouldSkip(Record))
+ return std::nullopt;
+
+ Object Obj;
+ serializeObject(Obj, "identifier",
+ serializeIdentifier(Record, API.getLanguage()));
+ serializeObject(Obj, "kind", serializeSymbolKind(Record, API.getLanguage()));
+ serializeObject(Obj, "names", serializeNames(Record));
+ serializeObject(
+ Obj, "location",
+ serializeSourceLocation(Record.Location, /*IncludeFileURI=*/true));
+ serializeArray(Obj, "availability",
+ serializeAvailability(Record.Availability));
+ serializeObject(Obj, "docComment", serializeDocComment(Record.Comment));
+ serializeArray(Obj, "declarationFragments",
+ serializeDeclarationFragments(Record.Declaration));
+ SmallVector<StringRef, 4> PathComponentsNames;
+ // If this returns true it indicates that we couldn't find a symbol in the
+ // hierarchy.
+ if (generatePathComponents(Record, API,
+ [&PathComponentsNames](const PathComponent &PC) {
+ PathComponentsNames.push_back(PC.Name);
+ }))
+ return {};
+
+ serializeArray(Obj, "pathComponents", Array(PathComponentsNames));
+
+ serializeFunctionSignatureMixin(Obj, Record);
+ serializeAccessMixin(Obj, Record);
+ serializeTemplateMixin(Obj, Record);
+
+ return Obj;
+}
+
+template <typename MemberTy>
+void SymbolGraphSerializer::serializeMembers(
+ const APIRecord &Record,
+ const SmallVector<std::unique_ptr<MemberTy>> &Members) {
+ // Members should not be serialized if we aren't recursing.
+ if (!ShouldRecurse)
+ return;
+ for (const auto &Member : Members) {
+ auto MemberRecord = serializeAPIRecord(*Member);
+ if (!MemberRecord)
+ continue;
+
+ Symbols.emplace_back(std::move(*MemberRecord));
+ serializeRelationship(RelationshipKind::MemberOf, *Member, Record);
+ }
+}
+
+StringRef SymbolGraphSerializer::getRelationshipString(RelationshipKind Kind) {
+ switch (Kind) {
+ case RelationshipKind::MemberOf:
+ return "memberOf";
+ case RelationshipKind::InheritsFrom:
+ return "inheritsFrom";
+ case RelationshipKind::ConformsTo:
+ return "conformsTo";
+ case RelationshipKind::ExtensionTo:
+ return "extensionTo";
+ }
+ llvm_unreachable("Unhandled relationship kind");
+}
+
+StringRef SymbolGraphSerializer::getConstraintString(ConstraintKind Kind) {
+ switch (Kind) {
+ case ConstraintKind::Conformance:
+ return "conformance";
+ case ConstraintKind::ConditionalConformance:
+ return "conditionalConformance";
+ }
+ llvm_unreachable("Unhandled constraint kind");
+}
+
+void SymbolGraphSerializer::serializeRelationship(RelationshipKind Kind,
+ SymbolReference Source,
+ SymbolReference Target) {
+ Object Relationship;
+ Relationship["source"] = Source.USR;
+ Relationship["target"] = Target.USR;
+ Relationship["targetFallback"] = Target.Name;
+ Relationship["kind"] = getRelationshipString(Kind);
+
+ Relationships.emplace_back(std::move(Relationship));
+}
+
+void SymbolGraphSerializer::visitNamespaceRecord(
+ const NamespaceRecord &Record) {
+ auto Namespace = serializeAPIRecord(Record);
+ if (!Namespace)
+ return;
+ Symbols.emplace_back(std::move(*Namespace));
+ if (!Record.ParentInformation.empty())
+ serializeRelationship(RelationshipKind::MemberOf, Record,
+ Record.ParentInformation.ParentRecord);
+}
+
+void SymbolGraphSerializer::visitGlobalFunctionRecord(
+ const GlobalFunctionRecord &Record) {
+ auto Obj = serializeAPIRecord(Record);
+ if (!Obj)
+ return;
+
+ Symbols.emplace_back(std::move(*Obj));
+}
+
+void SymbolGraphSerializer::visitGlobalVariableRecord(
+ const GlobalVariableRecord &Record) {
+ auto Obj = serializeAPIRecord(Record);
+ if (!Obj)
+ return;
+
+ Symbols.emplace_back(std::move(*Obj));
+}
+
+void SymbolGraphSerializer::visitEnumRecord(const EnumRecord &Record) {
+ auto Enum = serializeAPIRecord(Record);
+ if (!Enum)
+ return;
+
+ Symbols.emplace_back(std::move(*Enum));
+ serializeMembers(Record, Record.Constants);
+}
+
+void SymbolGraphSerializer::visitRecordRecord(const RecordRecord &Record) {
+ auto SerializedRecord = serializeAPIRecord(Record);
+ if (!SerializedRecord)
+ return;
+
+ Symbols.emplace_back(std::move(*SerializedRecord));
+ serializeMembers(Record, Record.Fields);
+}
+
+void SymbolGraphSerializer::visitStaticFieldRecord(
+ const StaticFieldRecord &Record) {
+ auto StaticField = serializeAPIRecord(Record);
+ if (!StaticField)
+ return;
+ Symbols.emplace_back(std::move(*StaticField));
+ serializeRelationship(RelationshipKind::MemberOf, Record, Record.Context);
+}
+
+void SymbolGraphSerializer::visitCXXClassRecord(const CXXClassRecord &Record) {
+ auto Class = serializeAPIRecord(Record);
+ if (!Class)
+ return;
+
+ Symbols.emplace_back(std::move(*Class));
+ for (const auto &Base : Record.Bases)
+ serializeRelationship(RelationshipKind::InheritsFrom, Record, Base);
+ if (!Record.ParentInformation.empty())
+ serializeRelationship(RelationshipKind::MemberOf, Record,
+ Record.ParentInformation.ParentRecord);
+}
+
+void SymbolGraphSerializer::visitClassTemplateRecord(
+ const ClassTemplateRecord &Record) {
+ auto Class = serializeAPIRecord(Record);
+ if (!Class)
+ return;
+
+ Symbols.emplace_back(std::move(*Class));
+ for (const auto &Base : Record.Bases)
+ serializeRelationship(RelationshipKind::InheritsFrom, Record, Base);
+ if (!Record.ParentInformation.empty())
+ serializeRelationship(RelationshipKind::MemberOf, Record,
+ Record.ParentInformation.ParentRecord);
+}
+
+void SymbolGraphSerializer::visitClassTemplateSpecializationRecord(
+ const ClassTemplateSpecializationRecord &Record) {
+ auto Class = serializeAPIRecord(Record);
+ if (!Class)
+ return;
+
+ Symbols.emplace_back(std::move(*Class));
+
+ for (const auto &Base : Record.Bases)
+ serializeRelationship(RelationshipKind::InheritsFrom, Record, Base);
+ if (!Record.ParentInformation.empty())
+ serializeRelationship(RelationshipKind::MemberOf, Record,
+ Record.ParentInformation.ParentRecord);
+}
+
+void SymbolGraphSerializer::visitClassTemplatePartialSpecializationRecord(
+ const ClassTemplatePartialSpecializationRecord &Record) {
+ auto Class = serializeAPIRecord(Record);
+ if (!Class)
+ return;
+
+ Symbols.emplace_back(std::move(*Class));
+
+ for (const auto &Base : Record.Bases)
+ serializeRelationship(RelationshipKind::InheritsFrom, Record, Base);
+ if (!Record.ParentInformation.empty())
+ serializeRelationship(RelationshipKind::MemberOf, Record,
+ Record.ParentInformation.ParentRecord);
+}
+
+void SymbolGraphSerializer::visitCXXInstanceMethodRecord(
+ const CXXInstanceMethodRecord &Record) {
+ auto InstanceMethod = serializeAPIRecord(Record);
+ if (!InstanceMethod)
+ return;
+
+ Symbols.emplace_back(std::move(*InstanceMethod));
+ serializeRelationship(RelationshipKind::MemberOf, Record,
+ Record.ParentInformation.ParentRecord);
+}
+
+void SymbolGraphSerializer::visitCXXStaticMethodRecord(
+ const CXXStaticMethodRecord &Record) {
+ auto StaticMethod = serializeAPIRecord(Record);
+ if (!StaticMethod)
+ return;
+
+ Symbols.emplace_back(std::move(*StaticMethod));
+ serializeRelationship(RelationshipKind::MemberOf, Record,
+ Record.ParentInformation.ParentRecord);
+}
+
+void SymbolGraphSerializer::visitMethodTemplateRecord(
+ const CXXMethodTemplateRecord &Record) {
+ if (!ShouldRecurse)
+ // Ignore child symbols
+ return;
+ auto MethodTemplate = serializeAPIRecord(Record);
+ if (!MethodTemplate)
+ return;
+ Symbols.emplace_back(std::move(*MethodTemplate));
+ serializeRelationship(RelationshipKind::MemberOf, Record,
+ Record.ParentInformation.ParentRecord);
+}
+
+void SymbolGraphSerializer::visitMethodTemplateSpecializationRecord(
+ const CXXMethodTemplateSpecializationRecord &Record) {
+ if (!ShouldRecurse)
+ // Ignore child symbols
+ return;
+ auto MethodTemplateSpecialization = serializeAPIRecord(Record);
+ if (!MethodTemplateSpecialization)
+ return;
+ Symbols.emplace_back(std::move(*MethodTemplateSpecialization));
+ serializeRelationship(RelationshipKind::MemberOf, Record,
+ Record.ParentInformation.ParentRecord);
+}
+
+void SymbolGraphSerializer::visitCXXFieldRecord(const CXXFieldRecord &Record) {
+ if (!ShouldRecurse)
+ return;
+ auto CXXField = serializeAPIRecord(Record);
+ if (!CXXField)
+ return;
+ Symbols.emplace_back(std::move(*CXXField));
+ serializeRelationship(RelationshipKind::MemberOf, Record,
+ Record.ParentInformation.ParentRecord);
+}
+
+void SymbolGraphSerializer::visitCXXFieldTemplateRecord(
+ const CXXFieldTemplateRecord &Record) {
+ if (!ShouldRecurse)
+ // Ignore child symbols
+ return;
+ auto CXXFieldTemplate = serializeAPIRecord(Record);
+ if (!CXXFieldTemplate)
+ return;
+ Symbols.emplace_back(std::move(*CXXFieldTemplate));
+ serializeRelationship(RelationshipKind::MemberOf, Record,
+ Record.ParentInformation.ParentRecord);
+}
+
+void SymbolGraphSerializer::visitConceptRecord(const ConceptRecord &Record) {
+ auto Concept = serializeAPIRecord(Record);
+ if (!Concept)
+ return;
+
+ Symbols.emplace_back(std::move(*Concept));
+}
+
+void SymbolGraphSerializer::visitGlobalVariableTemplateRecord(
+ const GlobalVariableTemplateRecord &Record) {
+ auto GlobalVariableTemplate = serializeAPIRecord(Record);
+ if (!GlobalVariableTemplate)
+ return;
+ Symbols.emplace_back(std::move(*GlobalVariableTemplate));
+}
+
+void SymbolGraphSerializer::visitGlobalVariableTemplateSpecializationRecord(
+ const GlobalVariableTemplateSpecializationRecord &Record) {
+ auto GlobalVariableTemplateSpecialization = serializeAPIRecord(Record);
+ if (!GlobalVariableTemplateSpecialization)
+ return;
+ Symbols.emplace_back(std::move(*GlobalVariableTemplateSpecialization));
+}
+
+void SymbolGraphSerializer::
+ visitGlobalVariableTemplatePartialSpecializationRecord(
+ const GlobalVariableTemplatePartialSpecializationRecord &Record) {
+ auto GlobalVariableTemplatePartialSpecialization = serializeAPIRecord(Record);
+ if (!GlobalVariableTemplatePartialSpecialization)
+ return;
+ Symbols.emplace_back(std::move(*GlobalVariableTemplatePartialSpecialization));
+}
+
+void SymbolGraphSerializer::visitGlobalFunctionTemplateRecord(
+ const GlobalFunctionTemplateRecord &Record) {
+ auto GlobalFunctionTemplate = serializeAPIRecord(Record);
+ if (!GlobalFunctionTemplate)
+ return;
+ Symbols.emplace_back(std::move(*GlobalFunctionTemplate));
+}
+
+void SymbolGraphSerializer::visitGlobalFunctionTemplateSpecializationRecord(
+ const GlobalFunctionTemplateSpecializationRecord &Record) {
+ auto GlobalFunctionTemplateSpecialization = serializeAPIRecord(Record);
+ if (!GlobalFunctionTemplateSpecialization)
+ return;
+ Symbols.emplace_back(std::move(*GlobalFunctionTemplateSpecialization));
+}
+
+void SymbolGraphSerializer::visitObjCContainerRecord(
+ const ObjCContainerRecord &Record) {
+ auto ObjCContainer = serializeAPIRecord(Record);
+ if (!ObjCContainer)
+ return;
+
+ Symbols.emplace_back(std::move(*ObjCContainer));
+
+ serializeMembers(Record, Record.Ivars);
+ serializeMembers(Record, Record.Methods);
+ serializeMembers(Record, Record.Properties);
+
+ for (const auto &Protocol : Record.Protocols)
+ // Record that Record conforms to Protocol.
+ serializeRelationship(RelationshipKind::ConformsTo, Record, Protocol);
+
+ if (auto *ObjCInterface = dyn_cast<ObjCInterfaceRecord>(&Record)) {
+ if (!ObjCInterface->SuperClass.empty())
+ // If Record is an Objective-C interface record and it has a super class,
+ // record that Record is inherited from SuperClass.
+ serializeRelationship(RelationshipKind::InheritsFrom, Record,
+ ObjCInterface->SuperClass);
+
+ // Members of categories extending an interface are serialized as members of
+ // the interface.
+ for (const auto *Category : ObjCInterface->Categories) {
+ serializeMembers(Record, Category->Ivars);
+ serializeMembers(Record, Category->Methods);
+ serializeMembers(Record, Category->Properties);
+
+ // Surface the protocols of the category to the interface.
+ for (const auto &Protocol : Category->Protocols)
+ serializeRelationship(RelationshipKind::ConformsTo, Record, Protocol);
+ }
+ }
+}
+
+void SymbolGraphSerializer::visitObjCCategoryRecord(
+ const ObjCCategoryRecord &Record) {
+ if (!Record.IsFromExternalModule)
+ return;
+
+ // Check if the current Category' parent has been visited before, if so skip.
+ if (!visitedCategories.contains(Record.Interface.Name)) {
+ visitedCategories.insert(Record.Interface.Name);
+ Object Obj;
+ serializeObject(Obj, "identifier",
+ serializeIdentifier(Record, API.getLanguage()));
+ serializeObject(Obj, "kind",
+ serializeSymbolKind(APIRecord::RK_ObjCCategoryModule,
+ API.getLanguage()));
+ Obj["accessLevel"] = "public";
+ Symbols.emplace_back(std::move(Obj));
+ }
+
+ Object Relationship;
+ Relationship["source"] = Record.USR;
+ Relationship["target"] = Record.Interface.USR;
+ Relationship["targetFallback"] = Record.Interface.Name;
+ Relationship["kind"] = getRelationshipString(RelationshipKind::ExtensionTo);
+ Relationships.emplace_back(std::move(Relationship));
+
+ auto ObjCCategory = serializeAPIRecord(Record);
+
+ if (!ObjCCategory)
+ return;
+
+ Symbols.emplace_back(std::move(*ObjCCategory));
+ serializeMembers(Record, Record.Methods);
+ serializeMembers(Record, Record.Properties);
+
+ // Surface the protocols of the category to the interface.
+ for (const auto &Protocol : Record.Protocols)
+ serializeRelationship(RelationshipKind::ConformsTo, Record, Protocol);
+}
+
+void SymbolGraphSerializer::visitMacroDefinitionRecord(
+ const MacroDefinitionRecord &Record) {
+ auto Macro = serializeAPIRecord(Record);
+
+ if (!Macro)
+ return;
+
+ Symbols.emplace_back(std::move(*Macro));
+}
+
+void SymbolGraphSerializer::serializeSingleRecord(const APIRecord *Record) {
+ switch (Record->getKind()) {
+ case APIRecord::RK_Unknown:
+ llvm_unreachable("Records should have a known kind!");
+ case APIRecord::RK_GlobalFunction:
+ visitGlobalFunctionRecord(*cast<GlobalFunctionRecord>(Record));
+ break;
+ case APIRecord::RK_GlobalVariable:
+ visitGlobalVariableRecord(*cast<GlobalVariableRecord>(Record));
+ break;
+ case APIRecord::RK_Enum:
+ visitEnumRecord(*cast<EnumRecord>(Record));
+ break;
+ case APIRecord::RK_Struct:
+ LLVM_FALLTHROUGH;
+ case APIRecord::RK_Union:
+ visitRecordRecord(*cast<RecordRecord>(Record));
+ break;
+ case APIRecord::RK_StaticField:
+ visitStaticFieldRecord(*cast<StaticFieldRecord>(Record));
+ break;
+ case APIRecord::RK_CXXClass:
+ visitCXXClassRecord(*cast<CXXClassRecord>(Record));
+ break;
+ case APIRecord::RK_ObjCInterface:
+ visitObjCContainerRecord(*cast<ObjCInterfaceRecord>(Record));
+ break;
+ case APIRecord::RK_ObjCProtocol:
+ visitObjCContainerRecord(*cast<ObjCProtocolRecord>(Record));
+ break;
+ case APIRecord::RK_ObjCCategory:
+ visitObjCCategoryRecord(*cast<ObjCCategoryRecord>(Record));
+ break;
+ case APIRecord::RK_MacroDefinition:
+ visitMacroDefinitionRecord(*cast<MacroDefinitionRecord>(Record));
+ break;
+ case APIRecord::RK_Typedef:
+ visitTypedefRecord(*cast<TypedefRecord>(Record));
+ break;
+ default:
+ if (auto Obj = serializeAPIRecord(*Record)) {
+ Symbols.emplace_back(std::move(*Obj));
+ auto &ParentInformation = Record->ParentInformation;
+ if (!ParentInformation.empty())
+ serializeRelationship(RelationshipKind::MemberOf, *Record,
+ *ParentInformation.ParentRecord);
+ }
+ break;
+ }
+}
+
+void SymbolGraphSerializer::visitTypedefRecord(const TypedefRecord &Record) {
+ // Typedefs of anonymous types have their entries unified with the underlying
+ // type.
+ bool ShouldDrop = Record.UnderlyingType.Name.empty();
+ // enums declared with `NS_OPTION` have a named enum and a named typedef, with
+ // the same name
+ ShouldDrop |= (Record.UnderlyingType.Name == Record.Name);
+ if (ShouldDrop)
+ return;
+
+ auto Typedef = serializeAPIRecord(Record);
+ if (!Typedef)
+ return;
+
+ (*Typedef)["type"] = Record.UnderlyingType.USR;
+
+ Symbols.emplace_back(std::move(*Typedef));
+}
+
+Object SymbolGraphSerializer::serialize() {
+ traverseAPISet();
+ return serializeCurrentGraph();
+}
+
+Object SymbolGraphSerializer::serializeCurrentGraph() {
+ Object Root;
+ serializeObject(Root, "metadata", serializeMetadata());
+ serializeObject(Root, "module", serializeModule());
+
+ Root["symbols"] = std::move(Symbols);
+ Root["relationships"] = std::move(Relationships);
+
+ return Root;
+}
+
+void SymbolGraphSerializer::serialize(raw_ostream &os) {
+ Object root = serialize();
+ if (Options.Compact)
+ os << formatv("{0}", Value(std::move(root))) << "\n";
+ else
+ os << formatv("{0:2}", Value(std::move(root))) << "\n";
+}
+
+std::optional<Object>
+SymbolGraphSerializer::serializeSingleSymbolSGF(StringRef USR,
+ const APISet &API) {
+ APIRecord *Record = API.findRecordForUSR(USR);
+ if (!Record)
+ return {};
+
+ Object Root;
+ APIIgnoresList EmptyIgnores;
+ SymbolGraphSerializer Serializer(API, EmptyIgnores,
+ /*Options.Compact*/ {true},
+ /*ShouldRecurse*/ false);
+ Serializer.serializeSingleRecord(Record);
+ serializeObject(Root, "symbolGraph", Serializer.serializeCurrentGraph());
+
+ Language Lang = API.getLanguage();
+ serializeArray(Root, "parentContexts",
+ generateParentContexts(*Record, API, Lang));
+
+ Array RelatedSymbols;
+
+ for (const auto &Fragment : Record->Declaration.getFragments()) {
+ // If we don't have a USR there isn't much we can do.
+ if (Fragment.PreciseIdentifier.empty())
+ continue;
+
+ APIRecord *RelatedRecord = API.findRecordForUSR(Fragment.PreciseIdentifier);
+
+ // If we can't find the record let's skip.
+ if (!RelatedRecord)
+ continue;
+
+ Object RelatedSymbol;
+ RelatedSymbol["usr"] = RelatedRecord->USR;
+ RelatedSymbol["declarationLanguage"] = getLanguageName(Lang);
+ // TODO: once we record this properly let's serialize it right.
+ RelatedSymbol["accessLevel"] = "public";
+ RelatedSymbol["filePath"] = RelatedRecord->Location.getFilename();
+ RelatedSymbol["moduleName"] = API.ProductName;
+ RelatedSymbol["isSystem"] = RelatedRecord->IsFromSystemHeader;
+
+ serializeArray(RelatedSymbol, "parentContexts",
+ generateParentContexts(*RelatedRecord, API, Lang));
+ RelatedSymbols.push_back(std::move(RelatedSymbol));
+ }
+
+ serializeArray(Root, "relatedSymbols", RelatedSymbols);
+ return Root;
+}
diff --git a/contrib/llvm-project/clang/lib/ExtractAPI/TypedefUnderlyingTypeResolver.cpp b/contrib/llvm-project/clang/lib/ExtractAPI/TypedefUnderlyingTypeResolver.cpp
new file mode 100644
index 000000000000..3a5f62c9b2e6
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/ExtractAPI/TypedefUnderlyingTypeResolver.cpp
@@ -0,0 +1,76 @@
+//===- ExtractAPI/TypedefUnderlyingTypeResolver.cpp -------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements UnderlyingTypeResolver.
+///
+//===----------------------------------------------------------------------===//
+
+#include "clang/ExtractAPI/TypedefUnderlyingTypeResolver.h"
+#include "clang/Index/USRGeneration.h"
+
+using namespace clang;
+using namespace extractapi;
+
+const NamedDecl *
+TypedefUnderlyingTypeResolver::getUnderlyingTypeDecl(QualType Type) const {
+ const NamedDecl *TypeDecl = nullptr;
+
+ const TypedefType *TypedefTy = Type->getAs<TypedefType>();
+ if (TypedefTy)
+ TypeDecl = TypedefTy->getDecl();
+ if (const TagType *TagTy = Type->getAs<TagType>()) {
+ TypeDecl = TagTy->getDecl();
+ } else if (const ObjCInterfaceType *ObjCITy =
+ Type->getAs<ObjCInterfaceType>()) {
+ TypeDecl = ObjCITy->getDecl();
+ }
+
+ if (TypeDecl && TypedefTy) {
+ // if this is a typedef to another typedef, use the typedef's decl for the
+ // USR - this will actually be in the output, unlike a typedef to an
+ // anonymous decl
+ const TypedefNameDecl *TypedefDecl = TypedefTy->getDecl();
+ if (TypedefDecl->getUnderlyingType()->isTypedefNameType())
+ TypeDecl = TypedefDecl;
+ }
+
+ return TypeDecl;
+}
+
+SymbolReference
+TypedefUnderlyingTypeResolver::getSymbolReferenceForType(QualType Type,
+ APISet &API) const {
+ std::string TypeName = Type.getAsString();
+ SmallString<128> TypeUSR;
+ const NamedDecl *TypeDecl = getUnderlyingTypeDecl(Type);
+ const TypedefType *TypedefTy = Type->getAs<TypedefType>();
+
+ if (TypeDecl) {
+ if (!TypedefTy)
+ TypeName = TypeDecl->getName().str();
+
+ clang::index::generateUSRForDecl(TypeDecl, TypeUSR);
+ } else {
+ clang::index::generateUSRForType(Type, Context, TypeUSR);
+ }
+
+ return {API.copyString(TypeName), API.copyString(TypeUSR)};
+}
+
+std::string TypedefUnderlyingTypeResolver::getUSRForType(QualType Type) const {
+ SmallString<128> TypeUSR;
+ const NamedDecl *TypeDecl = getUnderlyingTypeDecl(Type);
+
+ if (TypeDecl)
+ clang::index::generateUSRForDecl(TypeDecl, TypeUSR);
+ else
+ clang::index::generateUSRForType(Type, Context, TypeUSR);
+
+ return std::string(TypeUSR);
+}
diff --git a/contrib/llvm-project/clang/lib/Format/AffectedRangeManager.cpp b/contrib/llvm-project/clang/lib/Format/AffectedRangeManager.cpp
index 7ad1f7070d0a..bf124d73e89e 100644
--- a/contrib/llvm-project/clang/lib/Format/AffectedRangeManager.cpp
+++ b/contrib/llvm-project/clang/lib/Format/AffectedRangeManager.cpp
@@ -27,6 +27,7 @@ bool AffectedRangeManager::computeAffectedLines(
const AnnotatedLine *PreviousLine = nullptr;
while (I != E) {
AnnotatedLine *Line = *I;
+ assert(Line->First);
Line->LeadingEmptyLinesAffected = affectsLeadingEmptyLines(*Line->First);
// If a line is part of a preprocessor directive, it needs to be formatted
@@ -59,12 +60,11 @@ bool AffectedRangeManager::computeAffectedLines(
bool AffectedRangeManager::affectsCharSourceRange(
const CharSourceRange &Range) {
- for (SmallVectorImpl<CharSourceRange>::const_iterator I = Ranges.begin(),
- E = Ranges.end();
- I != E; ++I) {
- if (!SourceMgr.isBeforeInTranslationUnit(Range.getEnd(), I->getBegin()) &&
- !SourceMgr.isBeforeInTranslationUnit(I->getEnd(), Range.getBegin()))
+ for (const CharSourceRange &R : Ranges) {
+ if (!SourceMgr.isBeforeInTranslationUnit(Range.getEnd(), R.getBegin()) &&
+ !SourceMgr.isBeforeInTranslationUnit(R.getEnd(), Range.getBegin())) {
return true;
+ }
}
return false;
}
@@ -116,6 +116,7 @@ bool AffectedRangeManager::nonPPLineAffected(
// affected.
bool SomeFirstChildAffected = false;
+ assert(Line->First);
for (FormatToken *Tok = Line->First; Tok; Tok = Tok->Next) {
// Determine whether 'Tok' was affected.
if (affectsTokenRange(*Tok, *Tok, IncludeLeadingNewlines))
@@ -134,7 +135,7 @@ bool AffectedRangeManager::nonPPLineAffected(
Line->First->NewlinesBefore == 0;
bool IsContinuedComment =
- Line->First->is(tok::comment) && Line->First->Next == nullptr &&
+ Line->First->is(tok::comment) && !Line->First->Next &&
Line->First->NewlinesBefore < 2 && PreviousLine &&
PreviousLine->Affected && PreviousLine->Last->is(tok::comment);
diff --git a/contrib/llvm-project/clang/lib/Format/BreakableToken.cpp b/contrib/llvm-project/clang/lib/Format/BreakableToken.cpp
index 455904895848..473908e8fee3 100644
--- a/contrib/llvm-project/clang/lib/Format/BreakableToken.cpp
+++ b/contrib/llvm-project/clang/lib/Format/BreakableToken.cpp
@@ -49,13 +49,13 @@ static StringRef getLineCommentIndentPrefix(StringRef Comment,
if (Style.Language == FormatStyle::LK_TextProto)
KnownPrefixes = KnownTextProtoPrefixes;
- assert(std::is_sorted(KnownPrefixes.begin(), KnownPrefixes.end(),
- [](StringRef Lhs, StringRef Rhs) noexcept {
- return Lhs.size() > Rhs.size();
- }));
+ assert(
+ llvm::is_sorted(KnownPrefixes, [](StringRef Lhs, StringRef Rhs) noexcept {
+ return Lhs.size() > Rhs.size();
+ }));
for (StringRef KnownPrefix : KnownPrefixes) {
- if (Comment.startswith(KnownPrefix)) {
+ if (Comment.starts_with(KnownPrefix)) {
const auto PrefixLength =
Comment.find_first_not_of(' ', KnownPrefix.size());
return Comment.substr(0, PrefixLength);
@@ -82,16 +82,16 @@ getCommentSplit(StringRef Text, unsigned ContentStartColumn,
NumChars < MaxSplit && MaxSplitBytes < Text.size();) {
unsigned BytesInChar =
encoding::getCodePointNumBytes(Text[MaxSplitBytes], Encoding);
- NumChars +=
- encoding::columnWidthWithTabs(Text.substr(MaxSplitBytes, BytesInChar),
- ContentStartColumn, TabWidth, Encoding);
+ NumChars += encoding::columnWidthWithTabs(
+ Text.substr(MaxSplitBytes, BytesInChar), ContentStartColumn + NumChars,
+ TabWidth, Encoding);
MaxSplitBytes += BytesInChar;
}
// In JavaScript, some @tags can be followed by {, and machinery that parses
// these comments will fail to understand the comment if followed by a line
// break. So avoid ever breaking before a {.
- if (Style.Language == FormatStyle::LK_JavaScript) {
+ if (Style.isJavaScript()) {
StringRef::size_type SpaceOffset =
Text.find_first_of(Blanks, MaxSplitBytes);
if (SpaceOffset != StringRef::npos && SpaceOffset + 1 < Text.size() &&
@@ -127,8 +127,7 @@ getCommentSplit(StringRef Text, unsigned ContentStartColumn,
}
// Avoid ever breaking before a @tag or a { in JavaScript.
- if (Style.Language == FormatStyle::LK_JavaScript &&
- SpaceOffset + 1 < Text.size() &&
+ if (Style.isJavaScript() && SpaceOffset + 1 < Text.size() &&
(Text[SpaceOffset + 1] == '{' || Text[SpaceOffset + 1] == '@')) {
SpaceOffset = Text.find_last_of(Blanks, SpaceOffset);
continue;
@@ -143,9 +142,10 @@ getCommentSplit(StringRef Text, unsigned ContentStartColumn,
// Make sure that we don't break at leading whitespace that
// reaches past MaxSplit.
StringRef::size_type FirstNonWhitespace = Text.find_first_not_of(Blanks);
- if (FirstNonWhitespace == StringRef::npos)
+ if (FirstNonWhitespace == StringRef::npos) {
// If the comment is only whitespace, we cannot split.
return BreakableToken::Split(StringRef::npos, 0);
+ }
SpaceOffset = Text.find_first_of(
Blanks, std::max<unsigned>(MaxSplitBytes, FirstNonWhitespace));
}
@@ -220,8 +220,8 @@ bool switchesFormatting(const FormatToken &Token) {
assert((Token.is(TT_BlockComment) || Token.is(TT_LineComment)) &&
"formatting regions are switched by comment tokens");
StringRef Content = Token.TokenText.substr(2).ltrim();
- return Content.startswith("clang-format on") ||
- Content.startswith("clang-format off");
+ return Content.starts_with("clang-format on") ||
+ Content.starts_with("clang-format off");
}
unsigned
@@ -255,8 +255,8 @@ unsigned
BreakableStringLiteral::getRemainingLength(unsigned LineIndex, unsigned Offset,
unsigned StartColumn) const {
return UnbreakableTailLength + Postfix.size() +
- encoding::columnWidthWithTabs(Line.substr(Offset, StringRef::npos),
- StartColumn, Style.TabWidth, Encoding);
+ encoding::columnWidthWithTabs(Line.substr(Offset), StartColumn,
+ Style.TabWidth, Encoding);
}
unsigned BreakableStringLiteral::getContentStartColumn(unsigned LineIndex,
@@ -271,7 +271,7 @@ BreakableStringLiteral::BreakableStringLiteral(
: BreakableToken(Tok, InPPDirective, Encoding, Style),
StartColumn(StartColumn), Prefix(Prefix), Postfix(Postfix),
UnbreakableTailLength(UnbreakableTailLength) {
- assert(Tok.TokenText.startswith(Prefix) && Tok.TokenText.endswith(Postfix));
+ assert(Tok.TokenText.starts_with(Prefix) && Tok.TokenText.ends_with(Postfix));
Line = Tok.TokenText.substr(
Prefix.size(), Tok.TokenText.size() - Prefix.size() - Postfix.size());
}
@@ -292,6 +292,120 @@ void BreakableStringLiteral::insertBreak(unsigned LineIndex,
Prefix, InPPDirective, 1, StartColumn);
}
+BreakableStringLiteralUsingOperators::BreakableStringLiteralUsingOperators(
+ const FormatToken &Tok, QuoteStyleType QuoteStyle, bool UnindentPlus,
+ unsigned StartColumn, unsigned UnbreakableTailLength, bool InPPDirective,
+ encoding::Encoding Encoding, const FormatStyle &Style)
+ : BreakableStringLiteral(
+ Tok, StartColumn, /*Prefix=*/QuoteStyle == SingleQuotes ? "'"
+ : QuoteStyle == AtDoubleQuotes ? "@\""
+ : "\"",
+ /*Postfix=*/QuoteStyle == SingleQuotes ? "'" : "\"",
+ UnbreakableTailLength, InPPDirective, Encoding, Style),
+ BracesNeeded(Tok.isNot(TT_StringInConcatenation)),
+ QuoteStyle(QuoteStyle) {
+ // Find the replacement text for inserting braces and quotes and line breaks.
+ // We don't create an allocated string concatenated from parts here because it
+ // has to outlive the BreakableStringliteral object. The brace replacements
+ // include a quote so that WhitespaceManager can tell it apart from whitespace
+ // replacements between the string and surrounding tokens.
+
+ // The option is not implemented in JavaScript.
+ bool SignOnNewLine =
+ !Style.isJavaScript() &&
+ Style.BreakBeforeBinaryOperators != FormatStyle::BOS_None;
+
+ if (Style.isVerilog()) {
+ // In Verilog, all strings are quoted by double quotes, joined by commas,
+ // and wrapped in braces. The comma is always before the newline.
+ assert(QuoteStyle == DoubleQuotes);
+ LeftBraceQuote = Style.Cpp11BracedListStyle ? "{\"" : "{ \"";
+ RightBraceQuote = Style.Cpp11BracedListStyle ? "\"}" : "\" }";
+ Postfix = "\",";
+ Prefix = "\"";
+ } else {
+ // The plus sign may be on either line. And also C# and JavaScript have
+ // several quoting styles.
+ if (QuoteStyle == SingleQuotes) {
+ LeftBraceQuote = Style.SpacesInParensOptions.Other ? "( '" : "('";
+ RightBraceQuote = Style.SpacesInParensOptions.Other ? "' )" : "')";
+ Postfix = SignOnNewLine ? "'" : "' +";
+ Prefix = SignOnNewLine ? "+ '" : "'";
+ } else {
+ if (QuoteStyle == AtDoubleQuotes) {
+ LeftBraceQuote = Style.SpacesInParensOptions.Other ? "( @" : "(@";
+ Prefix = SignOnNewLine ? "+ @\"" : "@\"";
+ } else {
+ LeftBraceQuote = Style.SpacesInParensOptions.Other ? "( \"" : "(\"";
+ Prefix = SignOnNewLine ? "+ \"" : "\"";
+ }
+ RightBraceQuote = Style.SpacesInParensOptions.Other ? "\" )" : "\")";
+ Postfix = SignOnNewLine ? "\"" : "\" +";
+ }
+ }
+
+ // Following lines are indented by the width of the brace and space if any.
+ ContinuationIndent = BracesNeeded ? LeftBraceQuote.size() - 1 : 0;
+ // The plus sign may need to be unindented depending on the style.
+ // FIXME: Add support for DontAlign.
+ if (!Style.isVerilog() && SignOnNewLine && !BracesNeeded && UnindentPlus &&
+ Style.AlignOperands == FormatStyle::OAS_AlignAfterOperator) {
+ ContinuationIndent -= 2;
+ }
+}
+
+unsigned BreakableStringLiteralUsingOperators::getRemainingLength(
+ unsigned LineIndex, unsigned Offset, unsigned StartColumn) const {
+ return UnbreakableTailLength + (BracesNeeded ? RightBraceQuote.size() : 1) +
+ encoding::columnWidthWithTabs(Line.substr(Offset), StartColumn,
+ Style.TabWidth, Encoding);
+}
+
+unsigned
+BreakableStringLiteralUsingOperators::getContentStartColumn(unsigned LineIndex,
+ bool Break) const {
+ return std::max(
+ 0,
+ static_cast<int>(StartColumn) +
+ (Break ? ContinuationIndent + static_cast<int>(Prefix.size())
+ : (BracesNeeded ? static_cast<int>(LeftBraceQuote.size()) - 1
+ : 0) +
+ (QuoteStyle == AtDoubleQuotes ? 2 : 1)));
+}
+
+void BreakableStringLiteralUsingOperators::insertBreak(
+ unsigned LineIndex, unsigned TailOffset, Split Split,
+ unsigned ContentIndent, WhitespaceManager &Whitespaces) const {
+ Whitespaces.replaceWhitespaceInToken(
+ Tok, /*Offset=*/(QuoteStyle == AtDoubleQuotes ? 2 : 1) + TailOffset +
+ Split.first,
+ /*ReplaceChars=*/Split.second, /*PreviousPostfix=*/Postfix,
+ /*CurrentPrefix=*/Prefix, InPPDirective, /*NewLines=*/1,
+ /*Spaces=*/
+ std::max(0, static_cast<int>(StartColumn) + ContinuationIndent));
+}
+
+void BreakableStringLiteralUsingOperators::updateAfterBroken(
+ WhitespaceManager &Whitespaces) const {
+ // Add the braces required for breaking the token if they are needed.
+ if (!BracesNeeded)
+ return;
+
+ // To add a brace or parenthesis, we replace the quote (or the at sign) with a
+ // brace and another quote. This is because the rest of the program requires
+ // one replacement for each source range. If we replace the empty strings
+ // around the string, it may conflict with whitespace replacements between the
+ // string and adjacent tokens.
+ Whitespaces.replaceWhitespaceInToken(
+ Tok, /*Offset=*/0, /*ReplaceChars=*/1, /*PreviousPostfix=*/"",
+ /*CurrentPrefix=*/LeftBraceQuote, InPPDirective, /*NewLines=*/0,
+ /*Spaces=*/0);
+ Whitespaces.replaceWhitespaceInToken(
+ Tok, /*Offset=*/Tok.TokenText.size() - 1, /*ReplaceChars=*/1,
+ /*PreviousPostfix=*/RightBraceQuote,
+ /*CurrentPrefix=*/"", InPPDirective, /*NewLines=*/0, /*Spaces=*/0);
+}
+
BreakableComment::BreakableComment(const FormatToken &Token,
unsigned StartColumn, bool InPPDirective,
encoding::Encoding Encoding,
@@ -340,7 +454,7 @@ static bool mayReflowContent(StringRef Content) {
bool hasSpecialMeaningPrefix = false;
for (StringRef Prefix :
{"@", "TODO", "FIXME", "XXX", "-# ", "- ", "+ ", "* "}) {
- if (Content.startswith(Prefix)) {
+ if (Content.starts_with(Prefix)) {
hasSpecialMeaningPrefix = true;
break;
}
@@ -357,7 +471,7 @@ static bool mayReflowContent(StringRef Content) {
// characters and either the first or second character must be
// non-punctuation.
return Content.size() >= 2 && !hasSpecialMeaningPrefix &&
- !Content.endswith("\\") &&
+ !Content.ends_with("\\") &&
// Note that this is UTF-8 safe, since if isPunctuation(Content[0]) is
// true, then the first code point must be 1 byte long.
(!isPunctuation(Content[0]) || !isPunctuation(Content[1]));
@@ -374,7 +488,7 @@ BreakableBlockComment::BreakableBlockComment(
"block comment section must start with a block comment");
StringRef TokenText(Tok.TokenText);
- assert(TokenText.startswith("/*") && TokenText.endswith("*/"));
+ assert(TokenText.starts_with("/*") && TokenText.ends_with("*/"));
TokenText.substr(2, TokenText.size() - 4)
.split(Lines, UseCRLF ? "\r\n" : "\n");
@@ -397,7 +511,7 @@ BreakableBlockComment::BreakableBlockComment(
// /*
// ** blah blah blah
// */
- if (Lines.size() >= 2 && Content[1].startswith("**") &&
+ if (Lines.size() >= 2 && Content[1].starts_with("**") &&
static_cast<unsigned>(ContentColumn[1]) == StartColumn) {
DecorationColumn = StartColumn;
}
@@ -411,14 +525,17 @@ BreakableBlockComment::BreakableBlockComment(
// now we just wrap them without stars.
Decoration = "";
}
- for (size_t i = 1, e = Lines.size(); i < e && !Decoration.empty(); ++i) {
- // If the last line is empty, the closing "*/" will have a star.
- if (i + 1 == e && Content[i].empty())
- break;
- if (!Content[i].empty() && i + 1 != e && Decoration.startswith(Content[i]))
+ for (size_t i = 1, e = Content.size(); i < e && !Decoration.empty(); ++i) {
+ const StringRef &Text = Content[i];
+ if (i + 1 == e) {
+ // If the last line is empty, the closing "*/" will have a star.
+ if (Text.empty())
+ break;
+ } else if (!Text.empty() && Decoration.starts_with(Text)) {
continue;
- while (!Content[i].startswith(Decoration))
- Decoration = Decoration.substr(0, Decoration.size() - 1);
+ }
+ while (!Text.starts_with(Decoration))
+ Decoration = Decoration.drop_back(1);
}
LastLineNeedsDecoration = true;
@@ -431,9 +548,8 @@ BreakableBlockComment::BreakableBlockComment(
// correctly indented.
LastLineNeedsDecoration = false;
// Align the star in the last '*/' with the stars on the previous lines.
- if (e >= 2 && !Decoration.empty()) {
+ if (e >= 2 && !Decoration.empty())
ContentColumn[i] = DecorationColumn;
- }
} else if (Decoration.empty()) {
// For all other lines, set the start column to 0 if they're empty, so
// we do not insert trailing whitespace anywhere.
@@ -446,26 +562,25 @@ BreakableBlockComment::BreakableBlockComment(
// The last line excludes the star if LastLineNeedsDecoration is false.
// For all other lines, adjust the line to exclude the star and
// (optionally) the first whitespace.
- unsigned DecorationSize = Decoration.startswith(Content[i])
+ unsigned DecorationSize = Decoration.starts_with(Content[i])
? Content[i].size()
: Decoration.size();
- if (DecorationSize) {
+ if (DecorationSize)
ContentColumn[i] = DecorationColumn + DecorationSize;
- }
Content[i] = Content[i].substr(DecorationSize);
- if (!Decoration.startswith(Content[i]))
+ if (!Decoration.starts_with(Content[i])) {
IndentAtLineBreak =
std::min<int>(IndentAtLineBreak, std::max(0, ContentColumn[i]));
+ }
}
IndentAtLineBreak = std::max<unsigned>(IndentAtLineBreak, Decoration.size());
// Detect a multiline jsdoc comment and set DelimitersOnNewline in that case.
- if (Style.Language == FormatStyle::LK_JavaScript ||
- Style.Language == FormatStyle::LK_Java) {
- if ((Lines[0] == "*" || Lines[0].startswith("* ")) && Lines.size() > 1) {
+ if (Style.isJavaScript() || Style.Language == FormatStyle::LK_Java) {
+ if ((Lines[0] == "*" || Lines[0].starts_with("* ")) && Lines.size() > 1) {
// This is a multiline jsdoc comment.
DelimitersOnNewline = true;
- } else if (Lines[0].startswith("* ") && Lines.size() == 1) {
+ } else if (Lines[0].starts_with("* ") && Lines.size() == 1) {
// Detect a long single-line comment, like:
// /** long long long */
// Below, '2' is the width of '*/'.
@@ -497,7 +612,7 @@ BreakableToken::Split BreakableBlockComment::getSplit(
return Split(StringRef::npos, 0);
return getCommentSplit(Content[LineIndex].substr(TailOffset),
ContentStartColumn, ColumnLimit, Style.TabWidth,
- Encoding, Style, Decoration.endswith("*"));
+ Encoding, Style, Decoration.ends_with("*"));
}
void BreakableBlockComment::adjustWhitespace(unsigned LineIndex,
@@ -508,7 +623,7 @@ void BreakableBlockComment::adjustWhitespace(unsigned LineIndex,
// trimming the trailing whitespace. The backslash will be re-added later when
// inserting a line break.
size_t EndOfPreviousLine = Lines[LineIndex - 1].size();
- if (InPPDirective && Lines[LineIndex - 1].endswith("\\"))
+ if (InPPDirective && Lines[LineIndex - 1].ends_with("\\"))
--EndOfPreviousLine;
// Calculate the end of the non-whitespace text in the previous line.
@@ -541,31 +656,30 @@ unsigned BreakableBlockComment::getRangeLength(unsigned LineIndex,
unsigned Offset,
StringRef::size_type Length,
unsigned StartColumn) const {
+ return encoding::columnWidthWithTabs(
+ Content[LineIndex].substr(Offset, Length), StartColumn, Style.TabWidth,
+ Encoding);
+}
+
+unsigned BreakableBlockComment::getRemainingLength(unsigned LineIndex,
+ unsigned Offset,
+ unsigned StartColumn) const {
unsigned LineLength =
- encoding::columnWidthWithTabs(Content[LineIndex].substr(Offset, Length),
- StartColumn, Style.TabWidth, Encoding);
- // FIXME: This should go into getRemainingLength instead, but we currently
- // break tests when putting it there. Investigate how to fix those tests.
- // The last line gets a "*/" postfix.
+ UnbreakableTailLength +
+ getRangeLength(LineIndex, Offset, StringRef::npos, StartColumn);
if (LineIndex + 1 == Lines.size()) {
LineLength += 2;
// We never need a decoration when breaking just the trailing "*/" postfix.
- // Note that checking that Length == 0 is not enough, since Length could
- // also be StringRef::npos.
- if (Content[LineIndex].substr(Offset, StringRef::npos).empty()) {
- LineLength -= Decoration.size();
+ bool HasRemainingText = Offset < Content[LineIndex].size();
+ if (!HasRemainingText) {
+ bool HasDecoration = Lines[LineIndex].ltrim().starts_with(Decoration);
+ if (HasDecoration)
+ LineLength -= Decoration.size();
}
}
return LineLength;
}
-unsigned BreakableBlockComment::getRemainingLength(unsigned LineIndex,
- unsigned Offset,
- unsigned StartColumn) const {
- return UnbreakableTailLength +
- getRangeLength(LineIndex, Offset, StringRef::npos, StartColumn);
-}
-
unsigned BreakableBlockComment::getContentStartColumn(unsigned LineIndex,
bool Break) const {
if (Break)
@@ -580,20 +694,17 @@ const llvm::StringSet<>
};
unsigned BreakableBlockComment::getContentIndent(unsigned LineIndex) const {
- if (Style.Language != FormatStyle::LK_Java &&
- Style.Language != FormatStyle::LK_JavaScript)
+ if (Style.Language != FormatStyle::LK_Java && !Style.isJavaScript())
return 0;
// The content at LineIndex 0 of a comment like:
// /** line 0 */
// is "* line 0", so we need to skip over the decoration in that case.
StringRef ContentWithNoDecoration = Content[LineIndex];
- if (LineIndex == 0 && ContentWithNoDecoration.startswith("*")) {
+ if (LineIndex == 0 && ContentWithNoDecoration.starts_with("*"))
ContentWithNoDecoration = ContentWithNoDecoration.substr(1).ltrim(Blanks);
- }
StringRef FirstWord = ContentWithNoDecoration.substr(
0, ContentWithNoDecoration.find_first_of(Blanks));
- if (ContentIndentingJavadocAnnotations.find(FirstWord) !=
- ContentIndentingJavadocAnnotations.end())
+ if (ContentIndentingJavadocAnnotations.contains(FirstWord))
return Style.ContinuationIndentWidth;
return 0;
}
@@ -640,8 +751,9 @@ BreakableToken::Split BreakableBlockComment::getReflowSplit(
if (LineIndex) {
unsigned PreviousContentIndent = getContentIndent(LineIndex - 1);
if (PreviousContentIndent && Trimmed != StringRef::npos &&
- Trimmed != PreviousContentIndent)
+ Trimmed != PreviousContentIndent) {
return Split(StringRef::npos, 0);
+ }
}
return Split(0, Trimmed != StringRef::npos ? Trimmed : 0);
@@ -683,9 +795,10 @@ void BreakableBlockComment::adaptStartOfLine(
// Note: this works because getCommentSplit is careful never to split at
// the beginning of a line.
size_t BreakLength = Lines[0].substr(1).find_first_not_of(Blanks);
- if (BreakLength != StringRef::npos)
+ if (BreakLength != StringRef::npos) {
insertBreak(LineIndex, 0, Split(1, BreakLength), /*ContentIndent=*/0,
Whitespaces);
+ }
}
return;
}
@@ -704,11 +817,9 @@ void BreakableBlockComment::adaptStartOfLine(
// contain a trailing whitespace.
Prefix = Prefix.substr(0, 1);
}
- } else {
- if (ContentColumn[LineIndex] == 1) {
- // This line starts immediately after the decorating *.
- Prefix = Prefix.substr(0, 1);
- }
+ } else if (ContentColumn[LineIndex] == 1) {
+ // This line starts immediately after the decorating *.
+ Prefix = Prefix.substr(0, 1);
}
// This is the offset of the end of the last line relative to the start of the
// token text in the token.
@@ -742,9 +853,8 @@ bool BreakableBlockComment::mayReflow(
// Content[LineIndex] may exclude the indent after the '*' decoration. In that
// case, we compute the start of the comment pragma manually.
StringRef IndentContent = Content[LineIndex];
- if (Lines[LineIndex].ltrim(Blanks).startswith("*")) {
+ if (Lines[LineIndex].ltrim(Blanks).starts_with("*"))
IndentContent = Lines[LineIndex].ltrim(Blanks).substr(1);
- }
return LineIndex > 0 && !CommentPragmasRegex.match(IndentContent) &&
mayReflowContent(Content[LineIndex]) && !Tok.Finalized &&
!switchesFormatting(tokenAt(LineIndex));
@@ -757,6 +867,7 @@ BreakableLineCommentSection::BreakableLineCommentSection(
assert(Tok.is(TT_LineComment) &&
"line comment section must start with a line comment");
FormatToken *LineTok = nullptr;
+ const int Minimum = Style.SpacesInLineCommentPrefix.Minimum;
// How many spaces we changed in the first line of the section, this will be
// applied in all following lines
int FirstLineSpaceChange = 0;
@@ -765,7 +876,7 @@ BreakableLineCommentSection::BreakableLineCommentSection(
CurrentTok = CurrentTok->Next) {
LastLineTok = LineTok;
StringRef TokenText(CurrentTok->TokenText);
- assert((TokenText.startswith("//") || TokenText.startswith("#")) &&
+ assert((TokenText.starts_with("//") || TokenText.starts_with("#")) &&
"unsupported line comment prefix, '//' and '#' are supported");
size_t FirstLineIndex = Lines.size();
TokenText.split(Lines, "\n");
@@ -779,8 +890,34 @@ BreakableLineCommentSection::BreakableLineCommentSection(
Lines[i] = Lines[i].ltrim(Blanks);
StringRef IndentPrefix = getLineCommentIndentPrefix(Lines[i], Style);
OriginalPrefix[i] = IndentPrefix;
- const unsigned SpacesInPrefix =
- std::count(IndentPrefix.begin(), IndentPrefix.end(), ' ');
+ const int SpacesInPrefix = llvm::count(IndentPrefix, ' ');
+
+ // This lambda also considers multibyte character that is not handled in
+ // functions like isPunctuation provided by CharInfo.
+ const auto NoSpaceBeforeFirstCommentChar = [&]() {
+ assert(Lines[i].size() > IndentPrefix.size());
+ const char FirstCommentChar = Lines[i][IndentPrefix.size()];
+ const unsigned FirstCharByteSize =
+ encoding::getCodePointNumBytes(FirstCommentChar, Encoding);
+ if (encoding::columnWidth(
+ Lines[i].substr(IndentPrefix.size(), FirstCharByteSize),
+ Encoding) != 1) {
+ return false;
+ }
+ // In C-like comments, add a space before #. For example this is useful
+ // to preserve the relative indentation when commenting out code with
+ // #includes.
+ //
+ // In languages using # as the comment leader such as proto, don't
+ // add a space to support patterns like:
+ // #########
+ // # section
+ // #########
+ if (FirstCommentChar == '#' && !TokenText.starts_with("#"))
+ return false;
+ return FirstCommentChar == '\\' || isPunctuation(FirstCommentChar) ||
+ isHorizontalWhitespace(FirstCommentChar);
+ };
// On the first line of the comment section we calculate how many spaces
// are to be added or removed, all lines after that just get only the
@@ -789,12 +926,11 @@ BreakableLineCommentSection::BreakableLineCommentSection(
// e.g. from "///" to "//".
if (i == 0 || OriginalPrefix[i].rtrim(Blanks) !=
OriginalPrefix[i - 1].rtrim(Blanks)) {
- if (SpacesInPrefix < Style.SpacesInLineCommentPrefix.Minimum &&
- Lines[i].size() > IndentPrefix.size() &&
- isAlphanumeric(Lines[i][IndentPrefix.size()])) {
- FirstLineSpaceChange =
- Style.SpacesInLineCommentPrefix.Minimum - SpacesInPrefix;
- } else if (SpacesInPrefix > Style.SpacesInLineCommentPrefix.Maximum) {
+ if (SpacesInPrefix < Minimum && Lines[i].size() > IndentPrefix.size() &&
+ !NoSpaceBeforeFirstCommentChar()) {
+ FirstLineSpaceChange = Minimum - SpacesInPrefix;
+ } else if (static_cast<unsigned>(SpacesInPrefix) >
+ Style.SpacesInLineCommentPrefix.Maximum) {
FirstLineSpaceChange =
Style.SpacesInLineCommentPrefix.Maximum - SpacesInPrefix;
} else {
@@ -805,18 +941,20 @@ BreakableLineCommentSection::BreakableLineCommentSection(
if (Lines[i].size() != IndentPrefix.size()) {
PrefixSpaceChange[i] = FirstLineSpaceChange;
- if (SpacesInPrefix + PrefixSpaceChange[i] <
- Style.SpacesInLineCommentPrefix.Minimum) {
- PrefixSpaceChange[i] += Style.SpacesInLineCommentPrefix.Minimum -
- (SpacesInPrefix + PrefixSpaceChange[i]);
+ if (SpacesInPrefix + PrefixSpaceChange[i] < Minimum) {
+ PrefixSpaceChange[i] +=
+ Minimum - (SpacesInPrefix + PrefixSpaceChange[i]);
}
assert(Lines[i].size() > IndentPrefix.size());
const auto FirstNonSpace = Lines[i][IndentPrefix.size()];
- const auto AllowsSpaceChange =
- SpacesInPrefix != 0 ||
- (isAlphanumeric(FirstNonSpace) ||
- (FirstNonSpace == '}' && FirstLineSpaceChange != 0));
+ const bool IsFormatComment = LineTok && switchesFormatting(*LineTok);
+ const bool LineRequiresLeadingSpace =
+ !NoSpaceBeforeFirstCommentChar() ||
+ (FirstNonSpace == '}' && FirstLineSpaceChange != 0);
+ const bool AllowsSpaceChange =
+ !IsFormatComment &&
+ (SpacesInPrefix != 0 || LineRequiresLeadingSpace);
if (PrefixSpaceChange[i] > 0 && AllowsSpaceChange) {
Prefix[i] = IndentPrefix.str();
@@ -1005,9 +1143,8 @@ void BreakableLineCommentSection::adaptStartOfLine(
}
void BreakableLineCommentSection::updateNextToken(LineState &State) const {
- if (LastLineTok) {
+ if (LastLineTok)
State.NextToken = LastLineTok->Next;
- }
}
bool BreakableLineCommentSection::mayReflow(
@@ -1015,9 +1152,8 @@ bool BreakableLineCommentSection::mayReflow(
// Line comments have the indent as part of the prefix, so we need to
// recompute the start of the line.
StringRef IndentContent = Content[LineIndex];
- if (Lines[LineIndex].startswith("//")) {
+ if (Lines[LineIndex].starts_with("//"))
IndentContent = Lines[LineIndex].substr(2);
- }
// FIXME: Decide whether we want to reflow non-regular indents:
// Currently, we only reflow when the OriginalPrefix[LineIndex] matches the
// OriginalPrefix[LineIndex-1]. That means we don't reflow
diff --git a/contrib/llvm-project/clang/lib/Format/BreakableToken.h b/contrib/llvm-project/clang/lib/Format/BreakableToken.h
index 190144ad1be9..e7c0680641e2 100644
--- a/contrib/llvm-project/clang/lib/Format/BreakableToken.h
+++ b/contrib/llvm-project/clang/lib/Format/BreakableToken.h
@@ -230,6 +230,11 @@ public:
/// as a unit and is responsible for the formatting of the them.
virtual void updateNextToken(LineState &State) const {}
+ /// Adds replacements that are needed when the token is broken. Such as
+ /// wrapping a JavaScript string in parentheses after it gets broken with plus
+ /// signs.
+ virtual void updateAfterBroken(WhitespaceManager &Whitespaces) const {}
+
protected:
BreakableToken(const FormatToken &Tok, bool InPPDirective,
encoding::Encoding Encoding, const FormatStyle &Style)
@@ -283,6 +288,45 @@ protected:
unsigned UnbreakableTailLength;
};
+class BreakableStringLiteralUsingOperators : public BreakableStringLiteral {
+public:
+ enum QuoteStyleType {
+ DoubleQuotes, // The string is quoted with double quotes.
+ SingleQuotes, // The JavaScript string is quoted with single quotes.
+ AtDoubleQuotes, // The C# verbatim string is quoted with the at sign and
+ // double quotes.
+ };
+ /// Creates a breakable token for a single line string literal for C#, Java,
+ /// JavaScript, or Verilog.
+ ///
+ /// \p StartColumn specifies the column in which the token will start
+ /// after formatting.
+ BreakableStringLiteralUsingOperators(
+ const FormatToken &Tok, QuoteStyleType QuoteStyle, bool UnindentPlus,
+ unsigned StartColumn, unsigned UnbreakableTailLength, bool InPPDirective,
+ encoding::Encoding Encoding, const FormatStyle &Style);
+ unsigned getRemainingLength(unsigned LineIndex, unsigned Offset,
+ unsigned StartColumn) const override;
+ unsigned getContentStartColumn(unsigned LineIndex, bool Break) const override;
+ void insertBreak(unsigned LineIndex, unsigned TailOffset, Split Split,
+ unsigned ContentIndent,
+ WhitespaceManager &Whitespaces) const override;
+ void updateAfterBroken(WhitespaceManager &Whitespaces) const override;
+
+protected:
+ // Whether braces or parentheses should be inserted around the string to form
+ // a concatenation.
+ bool BracesNeeded;
+ QuoteStyleType QuoteStyle;
+ // The braces or parentheses along with the first character which they
+ // replace, either a quote or at sign.
+ StringRef LeftBraceQuote;
+ StringRef RightBraceQuote;
+ // Width added to the left due to the added brace or parenthesis. Does not
+ // apply to the first line.
+ int ContinuationIndent;
+};
+
class BreakableComment : public BreakableToken {
protected:
/// Creates a breakable token for a comment.
diff --git a/contrib/llvm-project/clang/lib/Format/ContinuationIndenter.cpp b/contrib/llvm-project/clang/lib/Format/ContinuationIndenter.cpp
index 8fbc15f27922..a3eb9138b218 100644
--- a/contrib/llvm-project/clang/lib/Format/ContinuationIndenter.cpp
+++ b/contrib/llvm-project/clang/lib/Format/ContinuationIndenter.cpp
@@ -14,11 +14,15 @@
#include "ContinuationIndenter.h"
#include "BreakableToken.h"
#include "FormatInternal.h"
+#include "FormatToken.h"
#include "WhitespaceManager.h"
#include "clang/Basic/OperatorPrecedence.h"
#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TokenKinds.h"
#include "clang/Format/Format.h"
+#include "llvm/ADT/StringSet.h"
#include "llvm/Support/Debug.h"
+#include <optional>
#define DEBUG_TYPE "format-indenter"
@@ -32,10 +36,18 @@ static bool shouldIndentWrappedSelectorName(const FormatStyle &Style,
return Style.IndentWrappedFunctionNames || LineType == LT_ObjCMethodDecl;
}
+// Returns true if a binary operator following \p Tok should be unindented when
+// the style permits it.
+static bool shouldUnindentNextOperator(const FormatToken &Tok) {
+ const FormatToken *Previous = Tok.getPreviousNonComment();
+ return Previous && (Previous->getPrecedence() == prec::Assignment ||
+ Previous->isOneOf(tok::kw_return, TT_RequiresClause));
+}
+
// Returns the length of everything up to the first possible line break after
// the ), ], } or > matching \c Tok.
static unsigned getLengthToMatchingParen(const FormatToken &Tok,
- const std::vector<ParenState> &Stack) {
+ ArrayRef<ParenState> Stack) {
// Normally whether or not a break before T is possible is calculated and
// stored in T.CanBreakBefore. Braces, array initializers and text proto
// messages like `key: < ... >` are an exception: a break is possible
@@ -121,8 +133,9 @@ static bool startsNextParameter(const FormatToken &Current,
const FormatStyle &Style) {
const FormatToken &Previous = *Current.Previous;
if (Current.is(TT_CtorInitializerComma) &&
- Style.BreakConstructorInitializers == FormatStyle::BCIS_BeforeComma)
+ Style.BreakConstructorInitializers == FormatStyle::BCIS_BeforeComma) {
return true;
+ }
if (Style.Language == FormatStyle::LK_Proto && Current.is(TT_SelectorName))
return true;
return Previous.is(tok::comma) && !Current.isTrailingComment() &&
@@ -143,28 +156,29 @@ static bool opensProtoMessageField(const FormatToken &LessTok,
(LessTok.Previous && LessTok.Previous->is(tok::equal))));
}
-// Returns the delimiter of a raw string literal, or None if TokenText is not
-// the text of a raw string literal. The delimiter could be the empty string.
-// For example, the delimiter of R"deli(cont)deli" is deli.
-static llvm::Optional<StringRef> getRawStringDelimiter(StringRef TokenText) {
+// Returns the delimiter of a raw string literal, or std::nullopt if TokenText
+// is not the text of a raw string literal. The delimiter could be the empty
+// string. For example, the delimiter of R"deli(cont)deli" is deli.
+static std::optional<StringRef> getRawStringDelimiter(StringRef TokenText) {
if (TokenText.size() < 5 // The smallest raw string possible is 'R"()"'.
- || !TokenText.startswith("R\"") || !TokenText.endswith("\""))
- return None;
+ || !TokenText.starts_with("R\"") || !TokenText.ends_with("\"")) {
+ return std::nullopt;
+ }
// A raw string starts with 'R"<delimiter>(' and delimiter is ascii and has
// size at most 16 by the standard, so the first '(' must be among the first
// 19 bytes.
size_t LParenPos = TokenText.substr(0, 19).find_first_of('(');
if (LParenPos == StringRef::npos)
- return None;
+ return std::nullopt;
StringRef Delimiter = TokenText.substr(2, LParenPos - 2);
// Check that the string ends in ')Delimiter"'.
size_t RParenPos = TokenText.size() - Delimiter.size() - 2;
if (TokenText[RParenPos] != ')')
- return None;
- if (!TokenText.substr(RParenPos + 1).startswith(Delimiter))
- return None;
+ return std::nullopt;
+ if (!TokenText.substr(RParenPos + 1).starts_with(Delimiter))
+ return std::nullopt;
return Delimiter;
}
@@ -173,17 +187,16 @@ static llvm::Optional<StringRef> getRawStringDelimiter(StringRef TokenText) {
static StringRef
getCanonicalRawStringDelimiter(const FormatStyle &Style,
FormatStyle::LanguageKind Language) {
- for (const auto &Format : Style.RawStringFormats) {
+ for (const auto &Format : Style.RawStringFormats)
if (Format.Language == Language)
return StringRef(Format.CanonicalDelimiter);
- }
return "";
}
RawStringFormatStyleManager::RawStringFormatStyleManager(
const FormatStyle &CodeStyle) {
for (const auto &RawStringFormat : CodeStyle.RawStringFormats) {
- llvm::Optional<FormatStyle> LanguageStyle =
+ std::optional<FormatStyle> LanguageStyle =
CodeStyle.GetLanguageStyle(RawStringFormat.Language);
if (!LanguageStyle) {
FormatStyle PredefinedStyle;
@@ -195,29 +208,27 @@ RawStringFormatStyleManager::RawStringFormatStyleManager(
LanguageStyle = PredefinedStyle;
}
LanguageStyle->ColumnLimit = CodeStyle.ColumnLimit;
- for (StringRef Delimiter : RawStringFormat.Delimiters) {
+ for (StringRef Delimiter : RawStringFormat.Delimiters)
DelimiterStyle.insert({Delimiter, *LanguageStyle});
- }
- for (StringRef EnclosingFunction : RawStringFormat.EnclosingFunctions) {
+ for (StringRef EnclosingFunction : RawStringFormat.EnclosingFunctions)
EnclosingFunctionStyle.insert({EnclosingFunction, *LanguageStyle});
- }
}
}
-llvm::Optional<FormatStyle>
+std::optional<FormatStyle>
RawStringFormatStyleManager::getDelimiterStyle(StringRef Delimiter) const {
auto It = DelimiterStyle.find(Delimiter);
if (It == DelimiterStyle.end())
- return None;
+ return std::nullopt;
return It->second;
}
-llvm::Optional<FormatStyle>
+std::optional<FormatStyle>
RawStringFormatStyleManager::getEnclosingFunctionStyle(
StringRef EnclosingFunction) const {
auto It = EnclosingFunctionStyle.find(EnclosingFunction);
if (It == EnclosingFunctionStyle.end())
- return None;
+ return std::nullopt;
return It->second;
}
@@ -247,16 +258,17 @@ LineState ContinuationIndenter::getInitialState(unsigned FirstIndent,
// preprocessor indent.
if (Style.IndentPPDirectives == FormatStyle::PPDIS_AfterHash &&
(Line->Type == LT_PreprocessorDirective ||
- Line->Type == LT_ImportStatement))
+ Line->Type == LT_ImportStatement)) {
State.Column = 0;
+ }
State.Line = Line;
State.NextToken = Line->First;
State.Stack.push_back(ParenState(/*Tok=*/nullptr, FirstIndent, FirstIndent,
/*AvoidBinPacking=*/false,
/*NoLineBreak=*/false));
- State.LineContainsContinuedForLoopSection = false;
State.NoContinuation = false;
State.StartOfStringLiteral = 0;
+ State.NoLineBreak = false;
State.StartOfLineLevel = 0;
State.LowestLevelOnLine = 0;
State.IgnoreStackForComparison = false;
@@ -264,9 +276,10 @@ LineState ContinuationIndenter::getInitialState(unsigned FirstIndent,
if (Style.Language == FormatStyle::LK_TextProto) {
// We need this in order to deal with the bin packing of text fields at
// global scope.
- State.Stack.back().AvoidBinPacking = true;
- State.Stack.back().BreakBeforeParameter = true;
- State.Stack.back().AlignColons = false;
+ auto &CurrentState = State.Stack.back();
+ CurrentState.AvoidBinPacking = true;
+ CurrentState.BreakBeforeParameter = true;
+ CurrentState.AlignColons = false;
}
// The first token has already been indented and thus consumed.
@@ -277,17 +290,20 @@ LineState ContinuationIndenter::getInitialState(unsigned FirstIndent,
bool ContinuationIndenter::canBreak(const LineState &State) {
const FormatToken &Current = *State.NextToken;
const FormatToken &Previous = *Current.Previous;
+ const auto &CurrentState = State.Stack.back();
assert(&Previous == Current.Previous);
- if (!Current.CanBreakBefore && !(State.Stack.back().BreakBeforeClosingBrace &&
- Current.closesBlockOrBlockTypeList(Style)))
+ if (!Current.CanBreakBefore && !(CurrentState.BreakBeforeClosingBrace &&
+ Current.closesBlockOrBlockTypeList(Style))) {
return false;
+ }
// The opening "{" of a braced list has to be on the same line as the first
// element if it is nested in another braced init list or function call.
if (!Current.MustBreakBefore && Previous.is(tok::l_brace) &&
Previous.isNot(TT_DictLiteral) && Previous.is(BK_BracedInit) &&
Previous.Previous &&
- Previous.Previous->isOneOf(tok::l_brace, tok::l_paren, tok::comma))
+ Previous.Previous->isOneOf(tok::l_brace, tok::l_paren, tok::comma)) {
return false;
+ }
// This prevents breaks like:
// ...
// SomeParameter, OtherParameter).DoSomething(
@@ -295,17 +311,20 @@ bool ContinuationIndenter::canBreak(const LineState &State) {
// As they hide "DoSomething" and are generally bad for readability.
if (Previous.opensScope() && Previous.isNot(tok::l_brace) &&
State.LowestLevelOnLine < State.StartOfLineLevel &&
- State.LowestLevelOnLine < Current.NestingLevel)
+ State.LowestLevelOnLine < Current.NestingLevel) {
return false;
- if (Current.isMemberAccess() && State.Stack.back().ContainsUnwrappedBuilder)
+ }
+ if (Current.isMemberAccess() && CurrentState.ContainsUnwrappedBuilder)
return false;
// Don't create a 'hanging' indent if there are multiple blocks in a single
- // statement.
+ // statement and we are aligning lambda blocks to their signatures.
if (Previous.is(tok::l_brace) && State.Stack.size() > 1 &&
State.Stack[State.Stack.size() - 2].NestedBlockInlined &&
- State.Stack[State.Stack.size() - 2].HasMultipleNestedBlocks)
+ State.Stack[State.Stack.size() - 2].HasMultipleNestedBlocks &&
+ Style.LambdaBodyIndentation == FormatStyle::LBI_Signature) {
return false;
+ }
// Don't break after very short return types (e.g. "void") as that is often
// unexpected.
@@ -317,29 +336,53 @@ bool ContinuationIndenter::canBreak(const LineState &State) {
// If binary operators are moved to the next line (including commas for some
// styles of constructor initializers), that's always ok.
if (!Current.isOneOf(TT_BinaryOperator, tok::comma) &&
- State.Stack.back().NoLineBreakInOperand)
+ // Allow breaking opening brace of lambdas (when passed as function
+ // arguments) to a new line when BeforeLambdaBody brace wrapping is
+ // enabled.
+ (!Style.BraceWrapping.BeforeLambdaBody ||
+ Current.isNot(TT_LambdaLBrace)) &&
+ CurrentState.NoLineBreakInOperand) {
return false;
+ }
if (Previous.is(tok::l_square) && Previous.is(TT_ObjCMethodExpr))
return false;
- return !State.Stack.back().NoLineBreak;
+ if (Current.is(TT_ConditionalExpr) && Previous.is(tok::r_paren) &&
+ Previous.MatchingParen && Previous.MatchingParen->Previous &&
+ Previous.MatchingParen->Previous->MatchingParen &&
+ Previous.MatchingParen->Previous->MatchingParen->is(TT_LambdaLBrace)) {
+ // We have a lambda within a conditional expression, allow breaking here.
+ assert(Previous.MatchingParen->Previous->is(tok::r_brace));
+ return true;
+ }
+
+ return !State.NoLineBreak && !CurrentState.NoLineBreak;
}
bool ContinuationIndenter::mustBreak(const LineState &State) {
const FormatToken &Current = *State.NextToken;
const FormatToken &Previous = *Current.Previous;
+ const auto &CurrentState = State.Stack.back();
if (Style.BraceWrapping.BeforeLambdaBody && Current.CanBreakBefore &&
Current.is(TT_LambdaLBrace) && Previous.isNot(TT_LineComment)) {
auto LambdaBodyLength = getLengthToMatchingParen(Current, State.Stack);
- return (LambdaBodyLength > getColumnLimit(State));
+ return LambdaBodyLength > getColumnLimit(State);
}
- if (Current.MustBreakBefore || Current.is(TT_InlineASMColon))
+ if (Current.MustBreakBefore ||
+ (Current.is(TT_InlineASMColon) &&
+ (Style.BreakBeforeInlineASMColon == FormatStyle::BBIAS_Always ||
+ (Style.BreakBeforeInlineASMColon == FormatStyle::BBIAS_OnlyMultiline &&
+ Style.ColumnLimit > 0)))) {
return true;
- if (State.Stack.back().BreakBeforeClosingBrace &&
- Current.closesBlockOrBlockTypeList(Style))
+ }
+ if (CurrentState.BreakBeforeClosingBrace &&
+ (Current.closesBlockOrBlockTypeList(Style) ||
+ (Current.is(tok::r_brace) &&
+ Current.isBlockIndentedInitRBrace(Style)))) {
return true;
- if (Previous.is(tok::semi) && State.LineContainsContinuedForLoopSection)
+ }
+ if (CurrentState.BreakBeforeClosingParen && Current.is(tok::r_paren))
return true;
if (Style.Language == FormatStyle::LK_ObjC &&
Style.ObjCBreakBeforeNestedBlockParam &&
@@ -349,12 +392,13 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
}
// Avoid producing inconsistent states by requiring breaks where they are not
// permitted for C# generic type constraints.
- if (State.Stack.back().IsCSharpGenericTypeConstraint &&
- Previous.isNot(TT_CSharpGenericTypeConstraintComma))
+ if (CurrentState.IsCSharpGenericTypeConstraint &&
+ Previous.isNot(TT_CSharpGenericTypeConstraintComma)) {
return false;
+ }
if ((startsNextParameter(Current, Style) || Previous.is(tok::semi) ||
(Previous.is(TT_TemplateCloser) && Current.is(TT_StartOfName) &&
- Style.isCpp() &&
+ State.Line->First->isNot(TT_AttributeSquare) && Style.isCpp() &&
// FIXME: This is a temporary workaround for the case where clang-format
// sets BreakBeforeParameter to avoid bin packing and this creates a
// completely unnecessary line break after a template type that isn't
@@ -364,23 +408,26 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
Previous.isNot(tok::question)) ||
(!Style.BreakBeforeTernaryOperators &&
Previous.is(TT_ConditionalExpr))) &&
- State.Stack.back().BreakBeforeParameter && !Current.isTrailingComment() &&
- !Current.isOneOf(tok::r_paren, tok::r_brace))
+ CurrentState.BreakBeforeParameter && !Current.isTrailingComment() &&
+ !Current.isOneOf(tok::r_paren, tok::r_brace)) {
return true;
- if (State.Stack.back().IsChainedConditional &&
+ }
+ if (CurrentState.IsChainedConditional &&
((Style.BreakBeforeTernaryOperators && Current.is(TT_ConditionalExpr) &&
Current.is(tok::colon)) ||
(!Style.BreakBeforeTernaryOperators && Previous.is(TT_ConditionalExpr) &&
- Previous.is(tok::colon))))
+ Previous.is(tok::colon)))) {
return true;
+ }
if (((Previous.is(TT_DictLiteral) && Previous.is(tok::l_brace)) ||
(Previous.is(TT_ArrayInitializerLSquare) &&
Previous.ParameterCount > 1) ||
opensProtoMessageField(Previous, Style)) &&
Style.ColumnLimit > 0 &&
getLengthToMatchingParen(Previous, State.Stack) + State.Column - 1 >
- getColumnLimit(State))
+ getColumnLimit(State)) {
return true;
+ }
const FormatToken &BreakConstructorInitializersToken =
Style.BreakConstructorInitializers == FormatStyle::BCIS_AfterColon
@@ -389,49 +436,54 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
if (BreakConstructorInitializersToken.is(TT_CtorInitializerColon) &&
(State.Column + State.Line->Last->TotalLength - Previous.TotalLength >
getColumnLimit(State) ||
- State.Stack.back().BreakBeforeParameter) &&
+ CurrentState.BreakBeforeParameter) &&
+ (!Current.isTrailingComment() || Current.NewlinesBefore > 0) &&
(Style.AllowShortFunctionsOnASingleLine != FormatStyle::SFS_All ||
Style.BreakConstructorInitializers != FormatStyle::BCIS_BeforeColon ||
- Style.ColumnLimit != 0))
+ Style.ColumnLimit != 0)) {
return true;
+ }
- if (Current.is(TT_ObjCMethodExpr) && !Previous.is(TT_SelectorName) &&
- State.Line->startsWith(TT_ObjCMethodSpecifier))
+ if (Current.is(TT_ObjCMethodExpr) && Previous.isNot(TT_SelectorName) &&
+ State.Line->startsWith(TT_ObjCMethodSpecifier)) {
return true;
- if (Current.is(TT_SelectorName) && !Previous.is(tok::at) &&
- State.Stack.back().ObjCSelectorNameFound &&
- State.Stack.back().BreakBeforeParameter &&
+ }
+ if (Current.is(TT_SelectorName) && Previous.isNot(tok::at) &&
+ CurrentState.ObjCSelectorNameFound && CurrentState.BreakBeforeParameter &&
(Style.ObjCBreakBeforeNestedBlockParam ||
- !Current.startsSequence(TT_SelectorName, tok::colon, tok::caret)))
+ !Current.startsSequence(TT_SelectorName, tok::colon, tok::caret))) {
return true;
+ }
unsigned NewLineColumn = getNewLineColumn(State);
if (Current.isMemberAccess() && Style.ColumnLimit != 0 &&
State.Column + getLengthToNextOperator(Current) > Style.ColumnLimit &&
(State.Column > NewLineColumn ||
- Current.NestingLevel < State.StartOfLineLevel))
+ Current.NestingLevel < State.StartOfLineLevel)) {
return true;
+ }
if (startsSegmentOfBuilderTypeCall(Current) &&
- (State.Stack.back().CallContinuation != 0 ||
- State.Stack.back().BreakBeforeParameter) &&
+ (CurrentState.CallContinuation != 0 ||
+ CurrentState.BreakBeforeParameter) &&
// JavaScript is treated different here as there is a frequent pattern:
// SomeFunction(function() {
// ...
// }.bind(...));
// FIXME: We should find a more generic solution to this problem.
- !(State.Column <= NewLineColumn &&
- Style.Language == FormatStyle::LK_JavaScript) &&
- !(Previous.closesScopeAfterBlock() && State.Column <= NewLineColumn))
+ !(State.Column <= NewLineColumn && Style.isJavaScript()) &&
+ !(Previous.closesScopeAfterBlock() && State.Column <= NewLineColumn)) {
return true;
+ }
// If the template declaration spans multiple lines, force wrap before the
- // function/class declaration
- if (Previous.ClosesTemplateDeclaration &&
- State.Stack.back().BreakBeforeParameter && Current.CanBreakBefore)
+ // function/class declaration.
+ if (Previous.ClosesTemplateDeclaration && CurrentState.BreakBeforeParameter &&
+ Current.CanBreakBefore) {
return true;
+ }
- if (!State.Line->First->is(tok::kw_enum) && State.Column <= NewLineColumn)
+ if (State.Line->First->isNot(tok::kw_enum) && State.Column <= NewLineColumn)
return false;
if (Style.AlwaysBreakBeforeMultilineStrings &&
@@ -440,78 +492,124 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
!Previous.isOneOf(tok::kw_return, tok::lessless, tok::at,
Keywords.kw_dollar) &&
!Previous.isOneOf(TT_InlineASMColon, TT_ConditionalExpr) &&
- nextIsMultilineString(State))
+ nextIsMultilineString(State)) {
return true;
+ }
// Using CanBreakBefore here and below takes care of the decision whether the
// current style uses wrapping before or after operators for the given
// operator.
if (Previous.is(TT_BinaryOperator) && Current.CanBreakBefore) {
- // If we need to break somewhere inside the LHS of a binary expression, we
- // should also break after the operator. Otherwise, the formatting would
- // hide the operator precedence, e.g. in:
- // if (aaaaaaaaaaaaaa ==
- // bbbbbbbbbbbbbb && c) {..
- // For comparisons, we only apply this rule, if the LHS is a binary
- // expression itself as otherwise, the line breaks seem superfluous.
- // We need special cases for ">>" which we have split into two ">" while
- // lexing in order to make template parsing easier.
- bool IsComparison = (Previous.getPrecedence() == prec::Relational ||
- Previous.getPrecedence() == prec::Equality ||
- Previous.getPrecedence() == prec::Spaceship) &&
- Previous.Previous &&
- Previous.Previous->isNot(TT_BinaryOperator); // For >>.
- bool LHSIsBinaryExpr =
- Previous.Previous && Previous.Previous->EndsBinaryExpression;
- if ((!IsComparison || LHSIsBinaryExpr) && !Current.isTrailingComment() &&
- Previous.getPrecedence() != prec::Assignment &&
- State.Stack.back().BreakBeforeParameter)
- return true;
+ const auto PreviousPrecedence = Previous.getPrecedence();
+ if (PreviousPrecedence != prec::Assignment &&
+ CurrentState.BreakBeforeParameter && !Current.isTrailingComment()) {
+ const bool LHSIsBinaryExpr =
+ Previous.Previous && Previous.Previous->EndsBinaryExpression;
+ if (LHSIsBinaryExpr)
+ return true;
+ // If we need to break somewhere inside the LHS of a binary expression, we
+ // should also break after the operator. Otherwise, the formatting would
+ // hide the operator precedence, e.g. in:
+ // if (aaaaaaaaaaaaaa ==
+ // bbbbbbbbbbbbbb && c) {..
+ // For comparisons, we only apply this rule, if the LHS is a binary
+ // expression itself as otherwise, the line breaks seem superfluous.
+ // We need special cases for ">>" which we have split into two ">" while
+ // lexing in order to make template parsing easier.
+ const bool IsComparison =
+ (PreviousPrecedence == prec::Relational ||
+ PreviousPrecedence == prec::Equality ||
+ PreviousPrecedence == prec::Spaceship) &&
+ Previous.Previous &&
+ Previous.Previous->isNot(TT_BinaryOperator); // For >>.
+ if (!IsComparison)
+ return true;
+ }
} else if (Current.is(TT_BinaryOperator) && Current.CanBreakBefore &&
- State.Stack.back().BreakBeforeParameter) {
+ CurrentState.BreakBeforeParameter) {
return true;
}
// Same as above, but for the first "<<" operator.
if (Current.is(tok::lessless) && Current.isNot(TT_OverloadedOperator) &&
- State.Stack.back().BreakBeforeParameter &&
- State.Stack.back().FirstLessLess == 0)
+ CurrentState.BreakBeforeParameter && CurrentState.FirstLessLess == 0) {
return true;
+ }
if (Current.NestingLevel == 0 && !Current.isTrailingComment()) {
- // Always break after "template <...>" and leading annotations. This is only
- // for cases where the entire line does not fit on a single line as a
+ // Always break after "template <...>"(*) and leading annotations. This is
+ // only for cases where the entire line does not fit on a single line as a
// different LineFormatter would be used otherwise.
- if (Previous.ClosesTemplateDeclaration)
+ // *: Except when another option interferes with that, like concepts.
+ if (Previous.ClosesTemplateDeclaration) {
+ if (Current.is(tok::kw_concept)) {
+ switch (Style.BreakBeforeConceptDeclarations) {
+ case FormatStyle::BBCDS_Allowed:
+ break;
+ case FormatStyle::BBCDS_Always:
+ return true;
+ case FormatStyle::BBCDS_Never:
+ return false;
+ }
+ }
+ if (Current.is(TT_RequiresClause)) {
+ switch (Style.RequiresClausePosition) {
+ case FormatStyle::RCPS_SingleLine:
+ case FormatStyle::RCPS_WithPreceding:
+ return false;
+ default:
+ return true;
+ }
+ }
return Style.AlwaysBreakTemplateDeclarations != FormatStyle::BTDS_No;
- if (Previous.is(TT_FunctionAnnotationRParen))
+ }
+ if (Previous.is(TT_FunctionAnnotationRParen) &&
+ State.Line->Type != LT_PreprocessorDirective) {
return true;
+ }
if (Previous.is(TT_LeadingJavaAnnotation) && Current.isNot(tok::l_paren) &&
- Current.isNot(TT_LeadingJavaAnnotation))
+ Current.isNot(TT_LeadingJavaAnnotation)) {
return true;
+ }
}
- // If the return type spans multiple lines, wrap before the function name.
- if (((Current.is(TT_FunctionDeclarationName) &&
- // Don't break before a C# function when no break after return type
- (!Style.isCSharp() ||
- Style.AlwaysBreakAfterReturnType != FormatStyle::RTBS_None)) ||
- (Current.is(tok::kw_operator) && !Previous.is(tok::coloncolon))) &&
- !Previous.is(tok::kw_template) && State.Stack.back().BreakBeforeParameter)
+ if (Style.isJavaScript() && Previous.is(tok::r_paren) &&
+ Previous.is(TT_JavaAnnotation)) {
+ // Break after the closing parenthesis of TypeScript decorators before
+ // functions, getters and setters.
+ static const llvm::StringSet<> BreakBeforeDecoratedTokens = {"get", "set",
+ "function"};
+ if (BreakBeforeDecoratedTokens.contains(Current.TokenText))
+ return true;
+ }
+
+ if (Current.is(TT_FunctionDeclarationName) &&
+ !State.Line->ReturnTypeWrapped &&
+ // Don't break before a C# function when no break after return type.
+ (!Style.isCSharp() ||
+ Style.AlwaysBreakAfterReturnType != FormatStyle::RTBS_None) &&
+ // Don't always break between a JavaScript `function` and the function
+ // name.
+ !Style.isJavaScript() && Previous.isNot(tok::kw_template) &&
+ CurrentState.BreakBeforeParameter) {
return true;
+ }
// The following could be precomputed as they do not depend on the state.
// However, as they should take effect only if the UnwrappedLine does not fit
// into the ColumnLimit, they are checked here in the ContinuationIndenter.
if (Style.ColumnLimit != 0 && Previous.is(BK_Block) &&
- Previous.is(tok::l_brace) && !Current.isOneOf(tok::r_brace, tok::comment))
+ Previous.is(tok::l_brace) &&
+ !Current.isOneOf(tok::r_brace, tok::comment)) {
return true;
+ }
if (Current.is(tok::lessless) &&
((Previous.is(tok::identifier) && Previous.TokenText == "endl") ||
- (Previous.Tok.isLiteral() && (Previous.TokenText.endswith("\\n\"") ||
- Previous.TokenText == "\'\\n\'"))))
+ (Previous.Tok.isLiteral() && (Previous.TokenText.ends_with("\\n\"") ||
+ Previous.TokenText == "\'\\n\'")))) {
return true;
+ }
if (Previous.is(TT_BlockComment) && Previous.IsMultiline)
return true;
@@ -526,14 +624,16 @@ unsigned ContinuationIndenter::addTokenToState(LineState &State, bool Newline,
bool DryRun,
unsigned ExtraSpaces) {
const FormatToken &Current = *State.NextToken;
+ assert(State.NextToken->Previous);
+ const FormatToken &Previous = *State.NextToken->Previous;
assert(!State.Stack.empty());
State.NoContinuation = false;
- if ((Current.is(TT_ImplicitStringLiteral) &&
- (Current.Previous->Tok.getIdentifierInfo() == nullptr ||
- Current.Previous->Tok.getIdentifierInfo()->getPPKeywordID() ==
- tok::pp_not_keyword))) {
+ if (Current.is(TT_ImplicitStringLiteral) &&
+ (!Previous.Tok.getIdentifierInfo() ||
+ Previous.Tok.getIdentifierInfo()->getPPKeywordID() ==
+ tok::pp_not_keyword)) {
unsigned EndColumn =
SourceMgr.getSpellingColumnNumber(Current.WhitespaceRange.getEnd());
if (Current.LastNewlineOffset != 0) {
@@ -562,21 +662,61 @@ unsigned ContinuationIndenter::addTokenToState(LineState &State, bool Newline,
void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
unsigned ExtraSpaces) {
FormatToken &Current = *State.NextToken;
+ assert(State.NextToken->Previous);
const FormatToken &Previous = *State.NextToken->Previous;
+ auto &CurrentState = State.Stack.back();
+
+ bool DisallowLineBreaksOnThisLine =
+ Style.LambdaBodyIndentation == FormatStyle::LBI_Signature &&
+ Style.isCpp() && [&Current] {
+ // Deal with lambda arguments in C++. The aim here is to ensure that we
+ // don't over-indent lambda function bodies when lambdas are passed as
+ // arguments to function calls. We do this by ensuring that either all
+ // arguments (including any lambdas) go on the same line as the function
+ // call, or we break before the first argument.
+ auto PrevNonComment = Current.getPreviousNonComment();
+ if (!PrevNonComment || PrevNonComment->isNot(tok::l_paren))
+ return false;
+ if (Current.isOneOf(tok::comment, tok::l_paren, TT_LambdaLSquare))
+ return false;
+ auto BlockParameterCount = PrevNonComment->BlockParameterCount;
+ if (BlockParameterCount == 0)
+ return false;
+
+ // Multiple lambdas in the same function call.
+ if (BlockParameterCount > 1)
+ return true;
+
+ // A lambda followed by another arg.
+ if (!PrevNonComment->Role)
+ return false;
+ auto Comma = PrevNonComment->Role->lastComma();
+ if (!Comma)
+ return false;
+ auto Next = Comma->getNextNonComment();
+ return Next &&
+ !Next->isOneOf(TT_LambdaLSquare, tok::l_brace, tok::caret);
+ }();
+
+ if (DisallowLineBreaksOnThisLine)
+ State.NoLineBreak = true;
+
if (Current.is(tok::equal) &&
(State.Line->First->is(tok::kw_for) || Current.NestingLevel == 0) &&
- State.Stack.back().VariablePos == 0) {
- State.Stack.back().VariablePos = State.Column;
+ CurrentState.VariablePos == 0 &&
+ (!Previous.Previous ||
+ Previous.Previous->isNot(TT_DesignatedInitializerPeriod))) {
+ CurrentState.VariablePos = State.Column;
// Move over * and & if they are bound to the variable name.
const FormatToken *Tok = &Previous;
- while (Tok && State.Stack.back().VariablePos >= Tok->ColumnWidth) {
- State.Stack.back().VariablePos -= Tok->ColumnWidth;
+ while (Tok && CurrentState.VariablePos >= Tok->ColumnWidth) {
+ CurrentState.VariablePos -= Tok->ColumnWidth;
if (Tok->SpacesRequiredBefore != 0)
break;
Tok = Tok->Previous;
}
if (Previous.PartOfMultiVariableDeclStmt)
- State.Stack.back().LastSpace = State.Stack.back().VariablePos;
+ CurrentState.LastSpace = CurrentState.VariablePos;
}
unsigned Spaces = Current.SpacesRequiredBefore + ExtraSpaces;
@@ -585,6 +725,7 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
int PPColumnCorrection = 0;
if (Style.IndentPPDirectives == FormatStyle::PPDIS_AfterHash &&
Previous.is(tok::hash) && State.FirstIndent > 0 &&
+ &Previous == State.Line->First &&
(State.Line->Type == LT_PreprocessorDirective ||
State.Line->Type == LT_ImportStatement)) {
Spaces += State.FirstIndent;
@@ -597,43 +738,58 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
PPColumnCorrection = -1;
}
- if (!DryRun)
+ if (!DryRun) {
Whitespaces.replaceWhitespace(Current, /*Newlines=*/0, Spaces,
- State.Column + Spaces + PPColumnCorrection);
+ State.Column + Spaces + PPColumnCorrection,
+ /*IsAligned=*/false, State.Line->InMacroBody);
+ }
// If "BreakBeforeInheritanceComma" mode, don't break within the inheritance
// declaration unless there is multiple inheritance.
if (Style.BreakInheritanceList == FormatStyle::BILS_BeforeComma &&
- Current.is(TT_InheritanceColon))
- State.Stack.back().NoLineBreak = true;
+ Current.is(TT_InheritanceColon)) {
+ CurrentState.NoLineBreak = true;
+ }
if (Style.BreakInheritanceList == FormatStyle::BILS_AfterColon &&
- Previous.is(TT_InheritanceColon))
- State.Stack.back().NoLineBreak = true;
-
- if (Current.is(TT_SelectorName) &&
- !State.Stack.back().ObjCSelectorNameFound) {
- unsigned MinIndent =
- std::max(State.FirstIndent + Style.ContinuationIndentWidth,
- State.Stack.back().Indent);
+ Previous.is(TT_InheritanceColon)) {
+ CurrentState.NoLineBreak = true;
+ }
+
+ if (Current.is(TT_SelectorName) && !CurrentState.ObjCSelectorNameFound) {
+ unsigned MinIndent = std::max(
+ State.FirstIndent + Style.ContinuationIndentWidth, CurrentState.Indent);
unsigned FirstColonPos = State.Column + Spaces + Current.ColumnWidth;
if (Current.LongestObjCSelectorName == 0)
- State.Stack.back().AlignColons = false;
+ CurrentState.AlignColons = false;
else if (MinIndent + Current.LongestObjCSelectorName > FirstColonPos)
- State.Stack.back().ColonPos = MinIndent + Current.LongestObjCSelectorName;
+ CurrentState.ColonPos = MinIndent + Current.LongestObjCSelectorName;
else
- State.Stack.back().ColonPos = FirstColonPos;
- }
-
- // In "AlwaysBreak" mode, enforce wrapping directly after the parenthesis by
- // disallowing any further line breaks if there is no line break after the
- // opening parenthesis. Don't break if it doesn't conserve columns.
- if (Style.AlignAfterOpenBracket == FormatStyle::BAS_AlwaysBreak &&
- (Previous.isOneOf(tok::l_paren, TT_TemplateOpener, tok::l_square) ||
- (Previous.is(tok::l_brace) && Previous.isNot(BK_Block) &&
- Style.Cpp11BracedListStyle)) &&
- State.Column > getNewLineColumn(State) &&
- (!Previous.Previous || !Previous.Previous->isOneOf(
- tok::kw_for, tok::kw_while, tok::kw_switch)) &&
+ CurrentState.ColonPos = FirstColonPos;
+ }
+
+ // In "AlwaysBreak" or "BlockIndent" mode, enforce wrapping directly after the
+ // parenthesis by disallowing any further line breaks if there is no line
+ // break after the opening parenthesis. Don't break if it doesn't conserve
+ // columns.
+ auto IsOpeningBracket = [&](const FormatToken &Tok) {
+ auto IsStartOfBracedList = [&]() {
+ return Tok.is(tok::l_brace) && Tok.isNot(BK_Block) &&
+ Style.Cpp11BracedListStyle;
+ };
+ if (!Tok.isOneOf(tok::l_paren, TT_TemplateOpener, tok::l_square) &&
+ !IsStartOfBracedList()) {
+ return false;
+ }
+ if (!Tok.Previous)
+ return true;
+ if (Tok.Previous->isIf())
+ return Style.AlignAfterOpenBracket == FormatStyle::BAS_AlwaysBreak;
+ return !Tok.Previous->isOneOf(TT_CastRParen, tok::kw_for, tok::kw_while,
+ tok::kw_switch);
+ };
+ if ((Style.AlignAfterOpenBracket == FormatStyle::BAS_AlwaysBreak ||
+ Style.AlignAfterOpenBracket == FormatStyle::BAS_BlockIndent) &&
+ IsOpeningBracket(Previous) && State.Column > getNewLineColumn(State) &&
// Don't do this for simple (no expressions) one-argument function calls
// as that feels like needlessly wasting whitespace, e.g.:
//
@@ -642,43 +798,54 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
// caaaaaaaaaaaall(
// caaaaaaaaaaaaaaaaaaaaaaall(aaaaaaaaaaaaaa, aaaaaaaaa))));
Current.FakeLParens.size() > 0 &&
- Current.FakeLParens.back() > prec::Unknown)
- State.Stack.back().NoLineBreak = true;
+ Current.FakeLParens.back() > prec::Unknown) {
+ CurrentState.NoLineBreak = true;
+ }
if (Previous.is(TT_TemplateString) && Previous.opensScope())
- State.Stack.back().NoLineBreak = true;
+ CurrentState.NoLineBreak = true;
+ // Align following lines within parentheses / brackets if configured.
+ // Note: This doesn't apply to macro expansion lines, which are MACRO( , , )
+ // with args as children of the '(' and ',' tokens. It does not make sense to
+ // align the commas with the opening paren.
if (Style.AlignAfterOpenBracket != FormatStyle::BAS_DontAlign &&
- !State.Stack.back().IsCSharpGenericTypeConstraint &&
- Previous.opensScope() && Previous.isNot(TT_ObjCMethodExpr) &&
- (Current.isNot(TT_LineComment) || Previous.is(BK_BracedInit))) {
- State.Stack.back().Indent = State.Column + Spaces;
- State.Stack.back().IsAligned = true;
- }
- if (State.Stack.back().AvoidBinPacking && startsNextParameter(Current, Style))
- State.Stack.back().NoLineBreak = true;
+ !CurrentState.IsCSharpGenericTypeConstraint && Previous.opensScope() &&
+ Previous.isNot(TT_ObjCMethodExpr) && Previous.isNot(TT_RequiresClause) &&
+ !(Current.MacroParent && Previous.MacroParent) &&
+ (Current.isNot(TT_LineComment) ||
+ Previous.isOneOf(BK_BracedInit, TT_VerilogMultiLineListLParen))) {
+ CurrentState.Indent = State.Column + Spaces;
+ CurrentState.IsAligned = true;
+ }
+ if (CurrentState.AvoidBinPacking && startsNextParameter(Current, Style))
+ CurrentState.NoLineBreak = true;
if (startsSegmentOfBuilderTypeCall(Current) &&
- State.Column > getNewLineColumn(State))
- State.Stack.back().ContainsUnwrappedBuilder = true;
+ State.Column > getNewLineColumn(State)) {
+ CurrentState.ContainsUnwrappedBuilder = true;
+ }
- if (Current.is(TT_LambdaArrow) && Style.Language == FormatStyle::LK_Java)
- State.Stack.back().NoLineBreak = true;
+ if (Current.is(TT_TrailingReturnArrow) &&
+ Style.Language == FormatStyle::LK_Java) {
+ CurrentState.NoLineBreak = true;
+ }
if (Current.isMemberAccess() && Previous.is(tok::r_paren) &&
(Previous.MatchingParen &&
- (Previous.TotalLength - Previous.MatchingParen->TotalLength > 10)))
+ (Previous.TotalLength - Previous.MatchingParen->TotalLength > 10))) {
// If there is a function call with long parameters, break before trailing
// calls. This prevents things like:
// EXPECT_CALL(SomeLongParameter).Times(
// 2);
// We don't want to do this for short parameters as they can just be
// indexes.
- State.Stack.back().NoLineBreak = true;
+ CurrentState.NoLineBreak = true;
+ }
// Don't allow the RHS of an operator to be split over multiple lines unless
// there is a line-break right after the operator.
// Exclude relational operators, as there, it is always more desirable to
// have the LHS 'left' of the RHS.
const FormatToken *P = Current.getPreviousNonComment();
- if (!Current.is(tok::comment) && P &&
+ if (Current.isNot(tok::comment) && P &&
(P->isOneOf(TT_BinaryOperator, tok::comma) ||
(P->is(TT_ConditionalExpr) && P->is(tok::colon))) &&
!P->isOneOf(TT_OverloadedOperator, TT_CtorInitializerComma) &&
@@ -693,13 +860,14 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
// Don't do this if there are only two operands. In these cases, there is
// always a nice vertical separation between them and the extra line break
// does not help.
- bool HasTwoOperands =
- P->OperatorIndex == 0 && !P->NextOperator && !P->is(TT_ConditionalExpr);
+ bool HasTwoOperands = P->OperatorIndex == 0 && !P->NextOperator &&
+ P->isNot(TT_ConditionalExpr);
if ((!BreakBeforeOperator &&
!(HasTwoOperands &&
Style.AlignOperands != FormatStyle::OAS_DontAlign)) ||
- (!State.Stack.back().LastOperatorWrapped && BreakBeforeOperator))
- State.Stack.back().NoLineBreakInOperand = true;
+ (!CurrentState.LastOperatorWrapped && BreakBeforeOperator)) {
+ CurrentState.NoLineBreakInOperand = true;
+ }
}
State.Column += Spaces;
@@ -708,55 +876,57 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
(Previous.Previous->is(tok::kw_for) || Previous.Previous->isIf())) {
// Treat the condition inside an if as if it was a second function
// parameter, i.e. let nested calls have a continuation indent.
- State.Stack.back().LastSpace = State.Column;
- State.Stack.back().NestedBlockIndent = State.Column;
+ CurrentState.LastSpace = State.Column;
+ CurrentState.NestedBlockIndent = State.Column;
} else if (!Current.isOneOf(tok::comment, tok::caret) &&
((Previous.is(tok::comma) &&
- !Previous.is(TT_OverloadedOperator)) ||
+ Previous.isNot(TT_OverloadedOperator)) ||
(Previous.is(tok::colon) && Previous.is(TT_ObjCMethodExpr)))) {
- State.Stack.back().LastSpace = State.Column;
+ CurrentState.LastSpace = State.Column;
} else if (Previous.is(TT_CtorInitializerColon) &&
+ (!Current.isTrailingComment() || Current.NewlinesBefore > 0) &&
Style.BreakConstructorInitializers ==
FormatStyle::BCIS_AfterColon) {
- State.Stack.back().Indent = State.Column;
- State.Stack.back().LastSpace = State.Column;
- } else if ((Previous.isOneOf(TT_BinaryOperator, TT_ConditionalExpr,
- TT_CtorInitializerColon)) &&
+ CurrentState.Indent = State.Column;
+ CurrentState.LastSpace = State.Column;
+ } else if (Previous.isOneOf(TT_ConditionalExpr, TT_CtorInitializerColon)) {
+ CurrentState.LastSpace = State.Column;
+ } else if (Previous.is(TT_BinaryOperator) &&
((Previous.getPrecedence() != prec::Assignment &&
(Previous.isNot(tok::lessless) || Previous.OperatorIndex != 0 ||
Previous.NextOperator)) ||
Current.StartsBinaryExpression)) {
// Indent relative to the RHS of the expression unless this is a simple
- // assignment without binary expression on the RHS. Also indent relative to
- // unary operators and the colons of constructor initializers.
+ // assignment without binary expression on the RHS.
if (Style.BreakBeforeBinaryOperators == FormatStyle::BOS_None)
- State.Stack.back().LastSpace = State.Column;
+ CurrentState.LastSpace = State.Column;
} else if (Previous.is(TT_InheritanceColon)) {
- State.Stack.back().Indent = State.Column;
- State.Stack.back().LastSpace = State.Column;
+ CurrentState.Indent = State.Column;
+ CurrentState.LastSpace = State.Column;
} else if (Current.is(TT_CSharpGenericTypeConstraintColon)) {
- State.Stack.back().ColonPos = State.Column;
+ CurrentState.ColonPos = State.Column;
} else if (Previous.opensScope()) {
// If a function has a trailing call, indent all parameters from the
// opening parenthesis. This avoids confusing indents like:
// OuterFunction(InnerFunctionCall( // break
// ParameterToInnerFunction)) // break
// .SecondInnerFunctionCall();
- bool HasTrailingCall = false;
if (Previous.MatchingParen) {
const FormatToken *Next = Previous.MatchingParen->getNextNonComment();
- HasTrailingCall = Next && Next->isMemberAccess();
+ if (Next && Next->isMemberAccess() && State.Stack.size() > 1 &&
+ State.Stack[State.Stack.size() - 2].CallContinuation == 0) {
+ CurrentState.LastSpace = State.Column;
+ }
}
- if (HasTrailingCall && State.Stack.size() > 1 &&
- State.Stack[State.Stack.size() - 2].CallContinuation == 0)
- State.Stack.back().LastSpace = State.Column;
}
}
unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State,
bool DryRun) {
FormatToken &Current = *State.NextToken;
+ assert(State.NextToken->Previous);
const FormatToken &Previous = *State.NextToken->Previous;
+ auto &CurrentState = State.Stack.back();
// Extra penalty that needs to be added because of the way certain line
// breaks are chosen.
@@ -768,20 +938,20 @@ unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State,
NextNonComment = &Current;
// The first line break on any NestingLevel causes an extra penalty in order
// prefer similar line breaks.
- if (!State.Stack.back().ContainsLineBreak)
+ if (!CurrentState.ContainsLineBreak)
Penalty += 15;
- State.Stack.back().ContainsLineBreak = true;
+ CurrentState.ContainsLineBreak = true;
Penalty += State.NextToken->SplitPenalty;
// Breaking before the first "<<" is generally not desirable if the LHS is
// short. Also always add the penalty if the LHS is split over multiple lines
// to avoid unnecessary line breaks that just work around this penalty.
- if (NextNonComment->is(tok::lessless) &&
- State.Stack.back().FirstLessLess == 0 &&
+ if (NextNonComment->is(tok::lessless) && CurrentState.FirstLessLess == 0 &&
(State.Column <= Style.ColumnLimit / 3 ||
- State.Stack.back().BreakBeforeParameter))
+ CurrentState.BreakBeforeParameter)) {
Penalty += Style.PenaltyBreakFirstLessLess;
+ }
State.Column = getNewLineColumn(State);
@@ -797,9 +967,10 @@ unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State,
// member(value),
// looooooooooooooooong_member(
// looooooooooong_call(param_1, param_2, param_3))
- if (State.Column > State.FirstIndent)
+ if (State.Column > State.FirstIndent) {
Penalty +=
Style.PenaltyIndentedWhitespace * (State.Column - State.FirstIndent);
+ }
// Indent nested blocks relative to this column, unless in a very specific
// JavaScript special case where:
@@ -811,32 +982,32 @@ unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State,
//
// is common and should be formatted like a free-standing function. The same
// goes for wrapping before the lambda return type arrow.
- if (!Current.is(TT_LambdaArrow) &&
- (Style.Language != FormatStyle::LK_JavaScript ||
- Current.NestingLevel != 0 || !PreviousNonComment ||
- !PreviousNonComment->is(tok::equal) ||
- !Current.isOneOf(Keywords.kw_async, Keywords.kw_function)))
- State.Stack.back().NestedBlockIndent = State.Column;
+ if (Current.isNot(TT_TrailingReturnArrow) &&
+ (!Style.isJavaScript() || Current.NestingLevel != 0 ||
+ !PreviousNonComment || PreviousNonComment->isNot(tok::equal) ||
+ !Current.isOneOf(Keywords.kw_async, Keywords.kw_function))) {
+ CurrentState.NestedBlockIndent = State.Column;
+ }
if (NextNonComment->isMemberAccess()) {
- if (State.Stack.back().CallContinuation == 0)
- State.Stack.back().CallContinuation = State.Column;
+ if (CurrentState.CallContinuation == 0)
+ CurrentState.CallContinuation = State.Column;
} else if (NextNonComment->is(TT_SelectorName)) {
- if (!State.Stack.back().ObjCSelectorNameFound) {
+ if (!CurrentState.ObjCSelectorNameFound) {
if (NextNonComment->LongestObjCSelectorName == 0) {
- State.Stack.back().AlignColons = false;
+ CurrentState.AlignColons = false;
} else {
- State.Stack.back().ColonPos =
+ CurrentState.ColonPos =
(shouldIndentWrappedSelectorName(Style, State.Line->Type)
- ? std::max(State.Stack.back().Indent,
+ ? std::max(CurrentState.Indent,
State.FirstIndent + Style.ContinuationIndentWidth)
- : State.Stack.back().Indent) +
+ : CurrentState.Indent) +
std::max(NextNonComment->LongestObjCSelectorName,
NextNonComment->ColumnWidth);
}
- } else if (State.Stack.back().AlignColons &&
- State.Stack.back().ColonPos <= NextNonComment->ColumnWidth) {
- State.Stack.back().ColonPos = State.Column + NextNonComment->ColumnWidth;
+ } else if (CurrentState.AlignColons &&
+ CurrentState.ColonPos <= NextNonComment->ColumnWidth) {
+ CurrentState.ColonPos = State.Column + NextNonComment->ColumnWidth;
}
} else if (PreviousNonComment && PreviousNonComment->is(tok::colon) &&
PreviousNonComment->isOneOf(TT_ObjCMethodExpr, TT_DictLiteral)) {
@@ -849,26 +1020,31 @@ unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State,
// }];
// Thus, we set LastSpace of the next higher NestingLevel, to which we move
// when we consume all of the "}"'s FakeRParens at the "{".
- if (State.Stack.size() > 1)
+ if (State.Stack.size() > 1) {
State.Stack[State.Stack.size() - 2].LastSpace =
- std::max(State.Stack.back().LastSpace, State.Stack.back().Indent) +
+ std::max(CurrentState.LastSpace, CurrentState.Indent) +
Style.ContinuationIndentWidth;
+ }
}
if ((PreviousNonComment &&
PreviousNonComment->isOneOf(tok::comma, tok::semi) &&
- !State.Stack.back().AvoidBinPacking) ||
- Previous.is(TT_BinaryOperator))
- State.Stack.back().BreakBeforeParameter = false;
+ !CurrentState.AvoidBinPacking) ||
+ Previous.is(TT_BinaryOperator)) {
+ CurrentState.BreakBeforeParameter = false;
+ }
if (PreviousNonComment &&
- PreviousNonComment->isOneOf(TT_TemplateCloser, TT_JavaAnnotation) &&
- Current.NestingLevel == 0)
- State.Stack.back().BreakBeforeParameter = false;
+ (PreviousNonComment->isOneOf(TT_TemplateCloser, TT_JavaAnnotation) ||
+ PreviousNonComment->ClosesRequiresClause) &&
+ Current.NestingLevel == 0) {
+ CurrentState.BreakBeforeParameter = false;
+ }
if (NextNonComment->is(tok::question) ||
- (PreviousNonComment && PreviousNonComment->is(tok::question)))
- State.Stack.back().BreakBeforeParameter = true;
+ (PreviousNonComment && PreviousNonComment->is(tok::question))) {
+ CurrentState.BreakBeforeParameter = true;
+ }
if (Current.is(TT_BinaryOperator) && Current.CanBreakBefore)
- State.Stack.back().BreakBeforeParameter = false;
+ CurrentState.BreakBeforeParameter = false;
if (!DryRun) {
unsigned MaxEmptyLinesToKeep = Style.MaxEmptyLinesToKeep + 1;
@@ -886,17 +1062,17 @@ unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State,
bool ContinuePPDirective =
State.Line->InPPDirective && State.Line->Type != LT_ImportStatement;
Whitespaces.replaceWhitespace(Current, Newlines, State.Column, State.Column,
- State.Stack.back().IsAligned,
- ContinuePPDirective);
+ CurrentState.IsAligned, ContinuePPDirective);
}
if (!Current.isTrailingComment())
- State.Stack.back().LastSpace = State.Column;
- if (Current.is(tok::lessless))
+ CurrentState.LastSpace = State.Column;
+ if (Current.is(tok::lessless)) {
// If we are breaking before a "<<", we always want to indent relative to
// RHS. This is necessary only for "<<", as we special-case it and don't
// always indent relative to the RHS.
- State.Stack.back().LastSpace += 3; // 3 -> width of "<< ".
+ CurrentState.LastSpace += 3; // 3 -> width of "<< ".
+ }
State.StartOfLineLevel = Current.NestingLevel;
State.LowestLevelOnLine = Current.NestingLevel;
@@ -908,52 +1084,105 @@ unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State,
State.Stack[State.Stack.size() - 2].NestedBlockInlined) ||
(Style.Language == FormatStyle::LK_ObjC && Current.is(tok::r_brace) &&
State.Stack.size() > 1 && !Style.ObjCBreakBeforeNestedBlockParam);
- if (!NestedBlockSpecialCase)
- for (unsigned i = 0, e = State.Stack.size() - 1; i != e; ++i)
- State.Stack[i].BreakBeforeParameter = true;
+ // Do not force parameter break for statements with requires expressions.
+ NestedBlockSpecialCase =
+ NestedBlockSpecialCase ||
+ (Current.MatchingParen &&
+ Current.MatchingParen->is(TT_RequiresExpressionLBrace));
+ if (!NestedBlockSpecialCase) {
+ auto ParentLevelIt = std::next(State.Stack.rbegin());
+ if (Style.LambdaBodyIndentation == FormatStyle::LBI_OuterScope &&
+ Current.MatchingParen && Current.MatchingParen->is(TT_LambdaLBrace)) {
+ // If the first character on the new line is a lambda's closing brace, the
+ // stack still contains that lambda's parenthesis. As such, we need to
+ // recurse further down the stack than usual to find the parenthesis level
+ // containing the lambda, which is where we want to set
+ // BreakBeforeParameter.
+ //
+ // We specifically special case "OuterScope"-formatted lambdas here
+ // because, when using that setting, breaking before the parameter
+ // directly following the lambda is particularly unsightly. However, when
+ // "OuterScope" is not set, the logic to find the parent parenthesis level
+ // still appears to be sometimes incorrect. It has not been fixed yet
+ // because it would lead to significant changes in existing behaviour.
+ //
+ // TODO: fix the non-"OuterScope" case too.
+ auto FindCurrentLevel = [&](const auto &It) {
+ return std::find_if(It, State.Stack.rend(), [](const auto &PState) {
+ return PState.Tok != nullptr; // Ignore fake parens.
+ });
+ };
+ auto MaybeIncrement = [&](const auto &It) {
+ return It != State.Stack.rend() ? std::next(It) : It;
+ };
+ auto LambdaLevelIt = FindCurrentLevel(State.Stack.rbegin());
+ auto LevelContainingLambdaIt =
+ FindCurrentLevel(MaybeIncrement(LambdaLevelIt));
+ ParentLevelIt = MaybeIncrement(LevelContainingLambdaIt);
+ }
+ for (auto I = ParentLevelIt, E = State.Stack.rend(); I != E; ++I)
+ I->BreakBeforeParameter = true;
+ }
if (PreviousNonComment &&
!PreviousNonComment->isOneOf(tok::comma, tok::colon, tok::semi) &&
- (PreviousNonComment->isNot(TT_TemplateCloser) ||
+ ((PreviousNonComment->isNot(TT_TemplateCloser) &&
+ !PreviousNonComment->ClosesRequiresClause) ||
Current.NestingLevel != 0) &&
!PreviousNonComment->isOneOf(
TT_BinaryOperator, TT_FunctionAnnotationRParen, TT_JavaAnnotation,
TT_LeadingJavaAnnotation) &&
- Current.isNot(TT_BinaryOperator) && !PreviousNonComment->opensScope())
- State.Stack.back().BreakBeforeParameter = true;
+ Current.isNot(TT_BinaryOperator) && !PreviousNonComment->opensScope() &&
+ // We don't want to enforce line breaks for subsequent arguments just
+ // because we have been forced to break before a lambda body.
+ (!Style.BraceWrapping.BeforeLambdaBody ||
+ Current.isNot(TT_LambdaLBrace))) {
+ CurrentState.BreakBeforeParameter = true;
+ }
// If we break after { or the [ of an array initializer, we should also break
// before the corresponding } or ].
if (PreviousNonComment &&
(PreviousNonComment->isOneOf(tok::l_brace, TT_ArrayInitializerLSquare) ||
- opensProtoMessageField(*PreviousNonComment, Style)))
- State.Stack.back().BreakBeforeClosingBrace = true;
+ opensProtoMessageField(*PreviousNonComment, Style))) {
+ CurrentState.BreakBeforeClosingBrace = true;
+ }
+
+ if (PreviousNonComment && PreviousNonComment->is(tok::l_paren)) {
+ CurrentState.BreakBeforeClosingParen =
+ Style.AlignAfterOpenBracket == FormatStyle::BAS_BlockIndent;
+ }
- if (State.Stack.back().AvoidBinPacking) {
+ if (CurrentState.AvoidBinPacking) {
// If we are breaking after '(', '{', '<', or this is the break after a ':'
- // to start a member initializater list in a constructor, this should not
+ // to start a member initializer list in a constructor, this should not
// be considered bin packing unless the relevant AllowAll option is false or
// this is a dict/object literal.
bool PreviousIsBreakingCtorInitializerColon =
- Previous.is(TT_CtorInitializerColon) &&
+ PreviousNonComment && PreviousNonComment->is(TT_CtorInitializerColon) &&
Style.BreakConstructorInitializers == FormatStyle::BCIS_AfterColon;
+ bool AllowAllConstructorInitializersOnNextLine =
+ Style.PackConstructorInitializers == FormatStyle::PCIS_NextLine ||
+ Style.PackConstructorInitializers == FormatStyle::PCIS_NextLineOnly;
if (!(Previous.isOneOf(tok::l_paren, tok::l_brace, TT_BinaryOperator) ||
PreviousIsBreakingCtorInitializerColon) ||
(!Style.AllowAllParametersOfDeclarationOnNextLine &&
State.Line->MustBeDeclaration) ||
(!Style.AllowAllArgumentsOnNextLine &&
!State.Line->MustBeDeclaration) ||
- (!Style.AllowAllConstructorInitializersOnNextLine &&
+ (!AllowAllConstructorInitializersOnNextLine &&
PreviousIsBreakingCtorInitializerColon) ||
- Previous.is(TT_DictLiteral))
- State.Stack.back().BreakBeforeParameter = true;
+ Previous.is(TT_DictLiteral)) {
+ CurrentState.BreakBeforeParameter = true;
+ }
// If we are breaking after a ':' to start a member initializer list,
// and we allow all arguments on the next line, we should not break
// before the next parameter.
if (PreviousIsBreakingCtorInitializerColon &&
- Style.AllowAllConstructorInitializersOnNextLine)
- State.Stack.back().BreakBeforeParameter = false;
+ AllowAllConstructorInitializersOnNextLine) {
+ CurrentState.BreakBeforeParameter = false;
+ }
}
return Penalty;
@@ -964,15 +1193,17 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) {
return 0;
FormatToken &Current = *State.NextToken;
+ const auto &CurrentState = State.Stack.back();
- if (State.Stack.back().IsCSharpGenericTypeConstraint &&
- Current.isNot(TT_CSharpGenericTypeConstraint))
- return State.Stack.back().ColonPos + 2;
+ if (CurrentState.IsCSharpGenericTypeConstraint &&
+ Current.isNot(TT_CSharpGenericTypeConstraint)) {
+ return CurrentState.ColonPos + 2;
+ }
const FormatToken &Previous = *Current.Previous;
// If we are continuing an expression, we want to use the continuation indent.
unsigned ContinuationIndent =
- std::max(State.Stack.back().LastSpace, State.Stack.back().Indent) +
+ std::max(CurrentState.LastSpace, CurrentState.Indent) +
Style.ContinuationIndentWidth;
const FormatToken *PreviousNonComment = Current.getPreviousNonComment();
const FormatToken *NextNonComment = Previous.getNextNonComment();
@@ -981,22 +1212,35 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) {
// Java specific bits.
if (Style.Language == FormatStyle::LK_Java &&
- Current.isOneOf(Keywords.kw_implements, Keywords.kw_extends))
- return std::max(State.Stack.back().LastSpace,
- State.Stack.back().Indent + Style.ContinuationIndentWidth);
+ Current.isOneOf(Keywords.kw_implements, Keywords.kw_extends)) {
+ return std::max(CurrentState.LastSpace,
+ CurrentState.Indent + Style.ContinuationIndentWidth);
+ }
+
+ // Indentation of the statement following a Verilog case label is taken care
+ // of in moveStateToNextToken.
+ if (Style.isVerilog() && PreviousNonComment &&
+ Keywords.isVerilogEndOfLabel(*PreviousNonComment)) {
+ return State.FirstIndent;
+ }
if (Style.BreakBeforeBraces == FormatStyle::BS_Whitesmiths &&
- State.Line->First->is(tok::kw_enum))
+ State.Line->First->is(tok::kw_enum)) {
return (Style.IndentWidth * State.Line->First->IndentLevel) +
Style.IndentWidth;
+ }
- if (NextNonComment->is(tok::l_brace) && NextNonComment->is(BK_Block))
- return Current.NestingLevel == 0 ? State.FirstIndent
- : State.Stack.back().Indent;
+ if ((NextNonComment->is(tok::l_brace) && NextNonComment->is(BK_Block)) ||
+ (Style.isVerilog() && Keywords.isVerilogBegin(*NextNonComment))) {
+ if (Current.NestingLevel == 0 ||
+ (Style.LambdaBodyIndentation == FormatStyle::LBI_OuterScope &&
+ State.NextToken->is(TT_LambdaLBrace))) {
+ return State.FirstIndent;
+ }
+ return CurrentState.Indent;
+ }
if ((Current.isOneOf(tok::r_brace, tok::r_square) ||
- (Current.is(tok::greater) &&
- (Style.Language == FormatStyle::LK_Proto ||
- Style.Language == FormatStyle::LK_TextProto))) &&
+ (Current.is(tok::greater) && Style.isProto())) &&
State.Stack.size() > 1) {
if (Current.closesBlockOrBlockTypeList(Style))
return State.Stack[State.Stack.size() - 2].NestedBlockIndent;
@@ -1021,30 +1265,45 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) {
// }
if (Current.is(tok::r_paren) && State.Stack.size() > 1 &&
(!Current.Next ||
- Current.Next->isOneOf(tok::semi, tok::kw_const, tok::l_brace)))
+ Current.Next->isOneOf(tok::semi, tok::kw_const, tok::l_brace))) {
+ return State.Stack[State.Stack.size() - 2].LastSpace;
+ }
+ if (Style.AlignAfterOpenBracket == FormatStyle::BAS_BlockIndent &&
+ (Current.is(tok::r_paren) ||
+ (Current.is(tok::r_brace) && Current.MatchingParen &&
+ Current.MatchingParen->is(BK_BracedInit))) &&
+ State.Stack.size() > 1) {
return State.Stack[State.Stack.size() - 2].LastSpace;
+ }
if (NextNonComment->is(TT_TemplateString) && NextNonComment->closesScope())
return State.Stack[State.Stack.size() - 2].LastSpace;
+ // Field labels in a nested type should be aligned to the brace. For example
+ // in ProtoBuf:
+ // optional int32 b = 2 [(foo_options) = {aaaaaaaaaaaaaaaaaaa: 123,
+ // bbbbbbbbbbbbbbbbbbbbbbbb:"baz"}];
+ // For Verilog, a quote following a brace is treated as an identifier. And
+ // Both braces and colons get annotated as TT_DictLiteral. So we have to
+ // check.
if (Current.is(tok::identifier) && Current.Next &&
+ (!Style.isVerilog() || Current.Next->is(tok::colon)) &&
(Current.Next->is(TT_DictLiteral) ||
- ((Style.Language == FormatStyle::LK_Proto ||
- Style.Language == FormatStyle::LK_TextProto) &&
- Current.Next->isOneOf(tok::less, tok::l_brace))))
- return State.Stack.back().Indent;
+ (Style.isProto() && Current.Next->isOneOf(tok::less, tok::l_brace)))) {
+ return CurrentState.Indent;
+ }
if (NextNonComment->is(TT_ObjCStringLiteral) &&
- State.StartOfStringLiteral != 0)
+ State.StartOfStringLiteral != 0) {
return State.StartOfStringLiteral - 1;
+ }
if (NextNonComment->isStringLiteral() && State.StartOfStringLiteral != 0)
return State.StartOfStringLiteral;
- if (NextNonComment->is(tok::lessless) &&
- State.Stack.back().FirstLessLess != 0)
- return State.Stack.back().FirstLessLess;
+ if (NextNonComment->is(tok::lessless) && CurrentState.FirstLessLess != 0)
+ return CurrentState.FirstLessLess;
if (NextNonComment->isMemberAccess()) {
- if (State.Stack.back().CallContinuation == 0)
+ if (CurrentState.CallContinuation == 0)
return ContinuationIndent;
- return State.Stack.back().CallContinuation;
+ return CurrentState.CallContinuation;
}
- if (State.Stack.back().QuestionColumn != 0 &&
+ if (CurrentState.QuestionColumn != 0 &&
((NextNonComment->is(tok::colon) &&
NextNonComment->is(TT_ConditionalExpr)) ||
Previous.is(TT_ConditionalExpr))) {
@@ -1053,38 +1312,56 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) {
NextNonComment->Next->FakeLParens.back() == prec::Conditional) ||
(Previous.is(tok::colon) && !Current.FakeLParens.empty() &&
Current.FakeLParens.back() == prec::Conditional)) &&
- !State.Stack.back().IsWrappedConditional) {
+ !CurrentState.IsWrappedConditional) {
// NOTE: we may tweak this slightly:
// * not remove the 'lead' ContinuationIndentWidth
// * always un-indent by the operator when
// BreakBeforeTernaryOperators=true
- unsigned Indent = State.Stack.back().Indent;
- if (Style.AlignOperands != FormatStyle::OAS_DontAlign) {
+ unsigned Indent = CurrentState.Indent;
+ if (Style.AlignOperands != FormatStyle::OAS_DontAlign)
Indent -= Style.ContinuationIndentWidth;
- }
- if (Style.BreakBeforeTernaryOperators &&
- State.Stack.back().UnindentOperator)
+ if (Style.BreakBeforeTernaryOperators && CurrentState.UnindentOperator)
Indent -= 2;
return Indent;
}
- return State.Stack.back().QuestionColumn;
+ return CurrentState.QuestionColumn;
+ }
+ if (Previous.is(tok::comma) && CurrentState.VariablePos != 0)
+ return CurrentState.VariablePos;
+ if (Current.is(TT_RequiresClause)) {
+ if (Style.IndentRequiresClause)
+ return CurrentState.Indent + Style.IndentWidth;
+ switch (Style.RequiresClausePosition) {
+ case FormatStyle::RCPS_OwnLine:
+ case FormatStyle::RCPS_WithFollowing:
+ return CurrentState.Indent;
+ default:
+ break;
+ }
+ }
+ if (NextNonComment->isOneOf(TT_CtorInitializerColon, TT_InheritanceColon,
+ TT_InheritanceComma)) {
+ return State.FirstIndent + Style.ConstructorInitializerIndentWidth;
}
- if (Previous.is(tok::comma) && State.Stack.back().VariablePos != 0)
- return State.Stack.back().VariablePos;
if ((PreviousNonComment &&
(PreviousNonComment->ClosesTemplateDeclaration ||
+ PreviousNonComment->ClosesRequiresClause ||
+ (PreviousNonComment->is(TT_AttributeMacro) &&
+ Current.isNot(tok::l_paren)) ||
PreviousNonComment->isOneOf(
- TT_AttributeParen, TT_AttributeSquare, TT_FunctionAnnotationRParen,
+ TT_AttributeRParen, TT_AttributeSquare, TT_FunctionAnnotationRParen,
TT_JavaAnnotation, TT_LeadingJavaAnnotation))) ||
(!Style.IndentWrappedFunctionNames &&
- NextNonComment->isOneOf(tok::kw_operator, TT_FunctionDeclarationName)))
- return std::max(State.Stack.back().LastSpace, State.Stack.back().Indent);
+ NextNonComment->isOneOf(tok::kw_operator, TT_FunctionDeclarationName))) {
+ return std::max(CurrentState.LastSpace, CurrentState.Indent);
+ }
if (NextNonComment->is(TT_SelectorName)) {
- if (!State.Stack.back().ObjCSelectorNameFound) {
- unsigned MinIndent = State.Stack.back().Indent;
- if (shouldIndentWrappedSelectorName(Style, State.Line->Type))
+ if (!CurrentState.ObjCSelectorNameFound) {
+ unsigned MinIndent = CurrentState.Indent;
+ if (shouldIndentWrappedSelectorName(Style, State.Line->Type)) {
MinIndent = std::max(MinIndent,
State.FirstIndent + Style.ContinuationIndentWidth);
+ }
// If LongestObjCSelectorName is 0, we are indenting the first
// part of an ObjC selector (or a selector component which is
// not colon-aligned due to block formatting).
@@ -1099,64 +1376,79 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) {
NextNonComment->ColumnWidth) -
NextNonComment->ColumnWidth;
}
- if (!State.Stack.back().AlignColons)
- return State.Stack.back().Indent;
- if (State.Stack.back().ColonPos > NextNonComment->ColumnWidth)
- return State.Stack.back().ColonPos - NextNonComment->ColumnWidth;
- return State.Stack.back().Indent;
+ if (!CurrentState.AlignColons)
+ return CurrentState.Indent;
+ if (CurrentState.ColonPos > NextNonComment->ColumnWidth)
+ return CurrentState.ColonPos - NextNonComment->ColumnWidth;
+ return CurrentState.Indent;
}
if (NextNonComment->is(tok::colon) && NextNonComment->is(TT_ObjCMethodExpr))
- return State.Stack.back().ColonPos;
+ return CurrentState.ColonPos;
if (NextNonComment->is(TT_ArraySubscriptLSquare)) {
- if (State.Stack.back().StartOfArraySubscripts != 0)
- return State.Stack.back().StartOfArraySubscripts;
- else if (Style.isCSharp()) // C# allows `["key"] = value` inside object
- // initializers.
- return State.Stack.back().Indent;
+ if (CurrentState.StartOfArraySubscripts != 0) {
+ return CurrentState.StartOfArraySubscripts;
+ } else if (Style.isCSharp()) { // C# allows `["key"] = value` inside object
+ // initializers.
+ return CurrentState.Indent;
+ }
return ContinuationIndent;
}
+ // OpenMP clauses want to get additional indentation when they are pushed onto
+ // the next line.
+ if (State.Line->InPragmaDirective) {
+ FormatToken *PragmaType = State.Line->First->Next->Next;
+ if (PragmaType && PragmaType->TokenText.equals("omp"))
+ return CurrentState.Indent + Style.ContinuationIndentWidth;
+ }
+
// This ensure that we correctly format ObjC methods calls without inputs,
// i.e. where the last element isn't selector like: [callee method];
if (NextNonComment->is(tok::identifier) && NextNonComment->FakeRParens == 0 &&
- NextNonComment->Next && NextNonComment->Next->is(TT_ObjCMethodExpr))
- return State.Stack.back().Indent;
+ NextNonComment->Next && NextNonComment->Next->is(TT_ObjCMethodExpr)) {
+ return CurrentState.Indent;
+ }
if (NextNonComment->isOneOf(TT_StartOfName, TT_PointerOrReference) ||
- Previous.isOneOf(tok::coloncolon, tok::equal, TT_JsTypeColon))
+ Previous.isOneOf(tok::coloncolon, tok::equal, TT_JsTypeColon)) {
return ContinuationIndent;
+ }
if (PreviousNonComment && PreviousNonComment->is(tok::colon) &&
- PreviousNonComment->isOneOf(TT_ObjCMethodExpr, TT_DictLiteral))
+ PreviousNonComment->isOneOf(TT_ObjCMethodExpr, TT_DictLiteral)) {
return ContinuationIndent;
+ }
if (NextNonComment->is(TT_CtorInitializerComma))
- return State.Stack.back().Indent;
+ return CurrentState.Indent;
if (PreviousNonComment && PreviousNonComment->is(TT_CtorInitializerColon) &&
- Style.BreakConstructorInitializers == FormatStyle::BCIS_AfterColon)
- return State.Stack.back().Indent;
+ Style.BreakConstructorInitializers == FormatStyle::BCIS_AfterColon) {
+ return CurrentState.Indent;
+ }
if (PreviousNonComment && PreviousNonComment->is(TT_InheritanceColon) &&
- Style.BreakInheritanceList == FormatStyle::BILS_AfterColon)
- return State.Stack.back().Indent;
- if (NextNonComment->isOneOf(TT_CtorInitializerColon, TT_InheritanceColon,
- TT_InheritanceComma))
- return State.FirstIndent + Style.ConstructorInitializerIndentWidth;
+ Style.BreakInheritanceList == FormatStyle::BILS_AfterColon) {
+ return CurrentState.Indent;
+ }
if (Previous.is(tok::r_paren) && !Current.isBinaryOperator() &&
- !Current.isOneOf(tok::colon, tok::comment))
+ !Current.isOneOf(tok::colon, tok::comment)) {
return ContinuationIndent;
+ }
if (Current.is(TT_ProtoExtensionLSquare))
- return State.Stack.back().Indent;
- if (Current.isBinaryOperator() && State.Stack.back().UnindentOperator)
- return State.Stack.back().Indent - Current.Tok.getLength() -
+ return CurrentState.Indent;
+ if (Current.isBinaryOperator() && CurrentState.UnindentOperator) {
+ return CurrentState.Indent - Current.Tok.getLength() -
Current.SpacesRequiredBefore;
- if (Current.isOneOf(tok::comment, TT_BlockComment, TT_LineComment) &&
- NextNonComment->isBinaryOperator() && State.Stack.back().UnindentOperator)
- return State.Stack.back().Indent - NextNonComment->Tok.getLength() -
+ }
+ if (Current.is(tok::comment) && NextNonComment->isBinaryOperator() &&
+ CurrentState.UnindentOperator) {
+ return CurrentState.Indent - NextNonComment->Tok.getLength() -
NextNonComment->SpacesRequiredBefore;
- if (State.Stack.back().Indent == State.FirstIndent && PreviousNonComment &&
- !PreviousNonComment->isOneOf(tok::r_brace, TT_CtorInitializerComma))
+ }
+ if (CurrentState.Indent == State.FirstIndent && PreviousNonComment &&
+ !PreviousNonComment->isOneOf(tok::r_brace, TT_CtorInitializerComma)) {
// Ensure that we fall back to the continuation indent width instead of
// just flushing continuations left.
- return State.Stack.back().Indent + Style.ContinuationIndentWidth;
- return State.Stack.back().Indent;
+ return CurrentState.Indent + Style.ContinuationIndentWidth;
+ }
+ return CurrentState.Indent;
}
static bool hasNestedBlockInlined(const FormatToken *Previous,
@@ -1167,58 +1459,68 @@ static bool hasNestedBlockInlined(const FormatToken *Previous,
if (Previous->ParameterCount > 1)
return true;
- // Also a nested block if contains a lambda inside function with 1 parameter
- return (Style.BraceWrapping.BeforeLambdaBody && Current.is(TT_LambdaLSquare));
+ // Also a nested block if contains a lambda inside function with 1 parameter.
+ return Style.BraceWrapping.BeforeLambdaBody && Current.is(TT_LambdaLSquare);
}
unsigned ContinuationIndenter::moveStateToNextToken(LineState &State,
bool DryRun, bool Newline) {
assert(State.Stack.size());
const FormatToken &Current = *State.NextToken;
+ auto &CurrentState = State.Stack.back();
if (Current.is(TT_CSharpGenericTypeConstraint))
- State.Stack.back().IsCSharpGenericTypeConstraint = true;
+ CurrentState.IsCSharpGenericTypeConstraint = true;
if (Current.isOneOf(tok::comma, TT_BinaryOperator))
- State.Stack.back().NoLineBreakInOperand = false;
+ CurrentState.NoLineBreakInOperand = false;
if (Current.isOneOf(TT_InheritanceColon, TT_CSharpGenericTypeConstraintColon))
- State.Stack.back().AvoidBinPacking = true;
+ CurrentState.AvoidBinPacking = true;
if (Current.is(tok::lessless) && Current.isNot(TT_OverloadedOperator)) {
- if (State.Stack.back().FirstLessLess == 0)
- State.Stack.back().FirstLessLess = State.Column;
+ if (CurrentState.FirstLessLess == 0)
+ CurrentState.FirstLessLess = State.Column;
else
- State.Stack.back().LastOperatorWrapped = Newline;
+ CurrentState.LastOperatorWrapped = Newline;
}
if (Current.is(TT_BinaryOperator) && Current.isNot(tok::lessless))
- State.Stack.back().LastOperatorWrapped = Newline;
+ CurrentState.LastOperatorWrapped = Newline;
if (Current.is(TT_ConditionalExpr) && Current.Previous &&
- !Current.Previous->is(TT_ConditionalExpr))
- State.Stack.back().LastOperatorWrapped = Newline;
+ Current.Previous->isNot(TT_ConditionalExpr)) {
+ CurrentState.LastOperatorWrapped = Newline;
+ }
if (Current.is(TT_ArraySubscriptLSquare) &&
- State.Stack.back().StartOfArraySubscripts == 0)
- State.Stack.back().StartOfArraySubscripts = State.Column;
- if (Current.is(TT_ConditionalExpr) && Current.is(tok::question) &&
- ((Current.MustBreakBefore) ||
- (Current.getNextNonComment() &&
- Current.getNextNonComment()->MustBreakBefore)))
- State.Stack.back().IsWrappedConditional = true;
+ CurrentState.StartOfArraySubscripts == 0) {
+ CurrentState.StartOfArraySubscripts = State.Column;
+ }
+
+ auto IsWrappedConditional = [](const FormatToken &Tok) {
+ if (!(Tok.is(TT_ConditionalExpr) && Tok.is(tok::question)))
+ return false;
+ if (Tok.MustBreakBefore)
+ return true;
+
+ const FormatToken *Next = Tok.getNextNonComment();
+ return Next && Next->MustBreakBefore;
+ };
+ if (IsWrappedConditional(Current))
+ CurrentState.IsWrappedConditional = true;
if (Style.BreakBeforeTernaryOperators && Current.is(tok::question))
- State.Stack.back().QuestionColumn = State.Column;
+ CurrentState.QuestionColumn = State.Column;
if (!Style.BreakBeforeTernaryOperators && Current.isNot(tok::colon)) {
const FormatToken *Previous = Current.Previous;
while (Previous && Previous->isTrailingComment())
Previous = Previous->Previous;
if (Previous && Previous->is(tok::question))
- State.Stack.back().QuestionColumn = State.Column;
+ CurrentState.QuestionColumn = State.Column;
}
if (!Current.opensScope() && !Current.closesScope() &&
- !Current.is(TT_PointerOrReference))
+ Current.isNot(TT_PointerOrReference)) {
State.LowestLevelOnLine =
std::min(State.LowestLevelOnLine, Current.NestingLevel);
+ }
if (Current.isMemberAccess())
- State.Stack.back().StartOfFunctionCall =
- !Current.NextOperator ? 0 : State.Column;
+ CurrentState.StartOfFunctionCall = !Current.NextOperator ? 0 : State.Column;
if (Current.is(TT_SelectorName))
- State.Stack.back().ObjCSelectorNameFound = true;
+ CurrentState.ObjCSelectorNameFound = true;
if (Current.is(TT_CtorInitializerColon) &&
Style.BreakConstructorInitializers != FormatStyle::BCIS_AfterColon) {
// Indent 2 from the column, so:
@@ -1226,36 +1528,43 @@ unsigned ContinuationIndenter::moveStateToNextToken(LineState &State,
// : First(...), ...
// Next(...)
// ^ line up here.
- State.Stack.back().Indent =
- State.Column +
- (Style.BreakConstructorInitializers == FormatStyle::BCIS_BeforeComma
- ? 0
- : 2);
- State.Stack.back().NestedBlockIndent = State.Stack.back().Indent;
- if (Style.ConstructorInitializerAllOnOneLineOrOnePerLine) {
- State.Stack.back().AvoidBinPacking = true;
- State.Stack.back().BreakBeforeParameter =
- !Style.AllowAllConstructorInitializersOnNextLine;
+ CurrentState.Indent = State.Column + (Style.BreakConstructorInitializers ==
+ FormatStyle::BCIS_BeforeComma
+ ? 0
+ : 2);
+ CurrentState.NestedBlockIndent = CurrentState.Indent;
+ if (Style.PackConstructorInitializers > FormatStyle::PCIS_BinPack) {
+ CurrentState.AvoidBinPacking = true;
+ CurrentState.BreakBeforeParameter =
+ Style.ColumnLimit > 0 &&
+ Style.PackConstructorInitializers != FormatStyle::PCIS_NextLine &&
+ Style.PackConstructorInitializers != FormatStyle::PCIS_NextLineOnly;
} else {
- State.Stack.back().BreakBeforeParameter = false;
+ CurrentState.BreakBeforeParameter = false;
}
}
if (Current.is(TT_CtorInitializerColon) &&
Style.BreakConstructorInitializers == FormatStyle::BCIS_AfterColon) {
- State.Stack.back().Indent =
+ CurrentState.Indent =
State.FirstIndent + Style.ConstructorInitializerIndentWidth;
- State.Stack.back().NestedBlockIndent = State.Stack.back().Indent;
- if (Style.ConstructorInitializerAllOnOneLineOrOnePerLine)
- State.Stack.back().AvoidBinPacking = true;
+ CurrentState.NestedBlockIndent = CurrentState.Indent;
+ if (Style.PackConstructorInitializers > FormatStyle::PCIS_BinPack)
+ CurrentState.AvoidBinPacking = true;
+ else
+ CurrentState.BreakBeforeParameter = false;
}
- if (Current.is(TT_InheritanceColon))
- State.Stack.back().Indent =
+ if (Current.is(TT_InheritanceColon)) {
+ CurrentState.Indent =
State.FirstIndent + Style.ConstructorInitializerIndentWidth;
+ }
if (Current.isOneOf(TT_BinaryOperator, TT_ConditionalExpr) && Newline)
- State.Stack.back().NestedBlockIndent =
- State.Column + Current.ColumnWidth + 1;
- if (Current.isOneOf(TT_LambdaLSquare, TT_LambdaArrow))
- State.Stack.back().LastSpace = State.Column;
+ CurrentState.NestedBlockIndent = State.Column + Current.ColumnWidth + 1;
+ if (Current.isOneOf(TT_LambdaLSquare, TT_TrailingReturnArrow))
+ CurrentState.LastSpace = State.Column;
+ if (Current.is(TT_RequiresExpression) &&
+ Style.RequiresExpressionIndentation == FormatStyle::REI_Keyword) {
+ CurrentState.NestedBlockIndent = State.Column;
+ }
// Insert scopes created by fake parenthesis.
const FormatToken *Previous = Current.getPreviousNonComment();
@@ -1266,25 +1575,26 @@ unsigned ContinuationIndenter::moveStateToNextToken(LineState &State,
// foo();
// bar();
// }, a, b, c);
- if (Current.isNot(tok::comment) && Previous &&
- Previous->isOneOf(tok::l_brace, TT_ArrayInitializerLSquare) &&
- !Previous->is(TT_DictLiteral) && State.Stack.size() > 1 &&
- !State.Stack.back().HasMultipleNestedBlocks) {
+ if (Current.isNot(tok::comment) && !Current.ClosesRequiresClause &&
+ Previous && Previous->isOneOf(tok::l_brace, TT_ArrayInitializerLSquare) &&
+ Previous->isNot(TT_DictLiteral) && State.Stack.size() > 1 &&
+ !CurrentState.HasMultipleNestedBlocks) {
if (State.Stack[State.Stack.size() - 2].NestedBlockInlined && Newline)
- for (unsigned i = 0, e = State.Stack.size() - 1; i != e; ++i)
- State.Stack[i].NoLineBreak = true;
+ for (ParenState &PState : llvm::drop_end(State.Stack))
+ PState.NoLineBreak = true;
State.Stack[State.Stack.size() - 2].NestedBlockInlined = false;
}
- if (Previous &&
- (Previous->isOneOf(tok::l_paren, tok::comma, tok::colon) ||
- Previous->isOneOf(TT_BinaryOperator, TT_ConditionalExpr)) &&
- !Previous->isOneOf(TT_DictLiteral, TT_ObjCMethodExpr)) {
- State.Stack.back().NestedBlockInlined =
+ if (Previous && (Previous->isOneOf(TT_BinaryOperator, TT_ConditionalExpr) ||
+ (Previous->isOneOf(tok::l_paren, tok::comma, tok::colon) &&
+ !Previous->isOneOf(TT_DictLiteral, TT_ObjCMethodExpr)))) {
+ CurrentState.NestedBlockInlined =
!Newline && hasNestedBlockInlined(Previous, Current, Style);
}
moveStatePastFakeLParens(State, Newline);
moveStatePastScopeCloser(State);
+ // Do not use CurrentState here, since the two functions before may change the
+ // Stack.
bool AllowBreak = !State.Stack.back().NoLineBreak &&
!State.Stack.back().NoLineBreakInOperand;
moveStatePastScopeOpener(State, Newline);
@@ -1292,16 +1602,30 @@ unsigned ContinuationIndenter::moveStateToNextToken(LineState &State,
if (Current.is(TT_ObjCStringLiteral) && State.StartOfStringLiteral == 0)
State.StartOfStringLiteral = State.Column + 1;
- if (Current.is(TT_CSharpStringLiteral) && State.StartOfStringLiteral == 0)
+ if (Current.is(TT_CSharpStringLiteral) && State.StartOfStringLiteral == 0) {
+ State.StartOfStringLiteral = State.Column + 1;
+ } else if (Current.is(TT_TableGenMultiLineString) &&
+ State.StartOfStringLiteral == 0) {
State.StartOfStringLiteral = State.Column + 1;
- else if (Current.isStringLiteral() && State.StartOfStringLiteral == 0)
+ } else if (Current.isStringLiteral() && State.StartOfStringLiteral == 0) {
State.StartOfStringLiteral = State.Column;
- else if (!Current.isOneOf(tok::comment, tok::identifier, tok::hash) &&
- !Current.isStringLiteral())
+ } else if (!Current.isOneOf(tok::comment, tok::identifier, tok::hash) &&
+ !Current.isStringLiteral()) {
State.StartOfStringLiteral = 0;
+ }
State.Column += Current.ColumnWidth;
State.NextToken = State.NextToken->Next;
+ // Verilog case labels are on the same unwrapped lines as the statements that
+ // follow. TokenAnnotator identifies them and sets MustBreakBefore.
+ // Indentation is taken care of here. A case label can only have 1 statement
+ // in Verilog, so we don't have to worry about lines that follow.
+ if (Style.isVerilog() && State.NextToken &&
+ State.NextToken->MustBreakBefore &&
+ Keywords.isVerilogEndOfLabel(Current)) {
+ State.FirstIndent += Style.IndentWidth;
+ CurrentState.Indent = State.FirstIndent;
+ }
unsigned Penalty =
handleEndOfLine(Current, State, DryRun, AllowBreak, Newline);
@@ -1322,22 +1646,24 @@ unsigned ContinuationIndenter::moveStateToNextToken(LineState &State,
void ContinuationIndenter::moveStatePastFakeLParens(LineState &State,
bool Newline) {
const FormatToken &Current = *State.NextToken;
+ if (Current.FakeLParens.empty())
+ return;
+
const FormatToken *Previous = Current.getPreviousNonComment();
// Don't add extra indentation for the first fake parenthesis after
- // 'return', assignments or opening <({[. The indentation for these cases
- // is special cased.
+ // 'return', assignments, opening <({[, or requires clauses. The indentation
+ // for these cases is special cased.
bool SkipFirstExtraIndent =
- (Previous && (Previous->opensScope() ||
- Previous->isOneOf(tok::semi, tok::kw_return) ||
- (Previous->getPrecedence() == prec::Assignment &&
- Style.AlignOperands != FormatStyle::OAS_DontAlign) ||
- Previous->is(TT_ObjCMethodExpr)));
- for (SmallVectorImpl<prec::Level>::const_reverse_iterator
- I = Current.FakeLParens.rbegin(),
- E = Current.FakeLParens.rend();
- I != E; ++I) {
- ParenState NewParenState = State.Stack.back();
+ Previous &&
+ (Previous->opensScope() ||
+ Previous->isOneOf(tok::semi, tok::kw_return, TT_RequiresClause) ||
+ (Previous->getPrecedence() == prec::Assignment &&
+ Style.AlignOperands != FormatStyle::OAS_DontAlign) ||
+ Previous->is(TT_ObjCMethodExpr));
+ for (const auto &PrecedenceLevel : llvm::reverse(Current.FakeLParens)) {
+ const auto &CurrentState = State.Stack.back();
+ ParenState NewParenState = CurrentState;
NewParenState.Tok = nullptr;
NewParenState.ContainsLineBreak = false;
NewParenState.LastOperatorWrapped = true;
@@ -1345,10 +1671,10 @@ void ContinuationIndenter::moveStatePastFakeLParens(LineState &State,
NewParenState.IsWrappedConditional = false;
NewParenState.UnindentOperator = false;
NewParenState.NoLineBreak =
- NewParenState.NoLineBreak || State.Stack.back().NoLineBreakInOperand;
+ NewParenState.NoLineBreak || CurrentState.NoLineBreakInOperand;
// Don't propagate AvoidBinPacking into subexpressions of arg/param lists.
- if (*I > prec::Comma)
+ if (PrecedenceLevel > prec::Comma)
NewParenState.AvoidBinPacking = false;
// Indent from 'LastSpace' unless these are fake parentheses encapsulating
@@ -1356,24 +1682,28 @@ void ContinuationIndenter::moveStatePastFakeLParens(LineState &State,
// brackets is disabled.
if (!Current.isTrailingComment() &&
(Style.AlignOperands != FormatStyle::OAS_DontAlign ||
- *I < prec::Assignment) &&
+ PrecedenceLevel < prec::Assignment) &&
(!Previous || Previous->isNot(tok::kw_return) ||
- (Style.Language != FormatStyle::LK_Java && *I > 0)) &&
+ (Style.Language != FormatStyle::LK_Java && PrecedenceLevel > 0)) &&
(Style.AlignAfterOpenBracket != FormatStyle::BAS_DontAlign ||
- *I != prec::Comma || Current.NestingLevel == 0)) {
- NewParenState.Indent =
- std::max(std::max(State.Column, NewParenState.Indent),
- State.Stack.back().LastSpace);
+ PrecedenceLevel != prec::Comma || Current.NestingLevel == 0)) {
+ NewParenState.Indent = std::max(
+ std::max(State.Column, NewParenState.Indent), CurrentState.LastSpace);
}
- if (Previous &&
- (Previous->getPrecedence() == prec::Assignment ||
- Previous->is(tok::kw_return) ||
- (*I == prec::Conditional && Previous->is(tok::question) &&
- Previous->is(TT_ConditionalExpr))) &&
+ // Special case for generic selection expressions, its comma-separated
+ // expressions are not aligned to the opening paren like regular calls, but
+ // rather continuation-indented relative to the _Generic keyword.
+ if (Previous && Previous->endsSequence(tok::l_paren, tok::kw__Generic))
+ NewParenState.Indent = CurrentState.LastSpace;
+
+ if ((shouldUnindentNextOperator(Current) ||
+ (Previous &&
+ (PrecedenceLevel == prec::Conditional &&
+ Previous->is(tok::question) && Previous->is(TT_ConditionalExpr)))) &&
!Newline) {
// If BreakBeforeBinaryOperators is set, un-indent a bit to account for
- // the operator and keep the operands aligned
+ // the operator and keep the operands aligned.
if (Style.AlignOperands == FormatStyle::OAS_AlignAfterOperator)
NewParenState.UnindentOperator = true;
// Mark indentation as alignment if the expression is aligned.
@@ -1387,28 +1717,31 @@ void ContinuationIndenter::moveStatePastFakeLParens(LineState &State,
// ParameterToInnerFunction));
// OuterFunction(SomeObject.InnerFunctionCall( // break
// ParameterToInnerFunction));
- if (*I > prec::Unknown)
+ if (PrecedenceLevel > prec::Unknown)
NewParenState.LastSpace = std::max(NewParenState.LastSpace, State.Column);
- if (*I != prec::Conditional && !Current.is(TT_UnaryOperator) &&
- Style.AlignAfterOpenBracket != FormatStyle::BAS_DontAlign)
+ if (PrecedenceLevel != prec::Conditional &&
+ Current.isNot(TT_UnaryOperator) &&
+ Style.AlignAfterOpenBracket != FormatStyle::BAS_DontAlign) {
NewParenState.StartOfFunctionCall = State.Column;
+ }
// Indent conditional expressions, unless they are chained "else-if"
// conditionals. Never indent expression where the 'operator' is ',', ';' or
// an assignment (i.e. *I <= prec::Assignment) as those have different
// indentation rules. Indent other expression, unless the indentation needs
// to be skipped.
- if (*I == prec::Conditional && Previous && Previous->is(tok::colon) &&
- Previous->is(TT_ConditionalExpr) && I == Current.FakeLParens.rbegin() &&
- !State.Stack.back().IsWrappedConditional) {
+ if (PrecedenceLevel == prec::Conditional && Previous &&
+ Previous->is(tok::colon) && Previous->is(TT_ConditionalExpr) &&
+ &PrecedenceLevel == &Current.FakeLParens.back() &&
+ !CurrentState.IsWrappedConditional) {
NewParenState.IsChainedConditional = true;
NewParenState.UnindentOperator = State.Stack.back().UnindentOperator;
- } else if (*I == prec::Conditional ||
- (!SkipFirstExtraIndent && *I > prec::Assignment &&
+ } else if (PrecedenceLevel == prec::Conditional ||
+ (!SkipFirstExtraIndent && PrecedenceLevel > prec::Assignment &&
!Current.isTrailingComment())) {
NewParenState.Indent += Style.ContinuationIndentWidth;
}
- if ((Previous && !Previous->opensScope()) || *I != prec::Comma)
+ if ((Previous && !Previous->opensScope()) || PrecedenceLevel != prec::Comma)
NewParenState.BreakBeforeParameter = false;
State.Stack.push_back(NewParenState);
SkipFirstExtraIndent = false;
@@ -1425,6 +1758,12 @@ void ContinuationIndenter::moveStatePastFakeRParens(LineState &State) {
State.Stack.pop_back();
State.Stack.back().VariablePos = VariablePos;
}
+
+ if (State.NextToken->ClosesRequiresClause && Style.IndentRequiresClause) {
+ // Remove the indentation of the requires clauses (which is not in Indent,
+ // but in LastSpace).
+ State.Stack.back().LastSpace -= Style.IndentWidth;
+ }
}
void ContinuationIndenter::moveStatePastScopeOpener(LineState &State,
@@ -1433,10 +1772,13 @@ void ContinuationIndenter::moveStatePastScopeOpener(LineState &State,
if (!Current.opensScope())
return;
+ const auto &CurrentState = State.Stack.back();
+
// Don't allow '<' or '(' in C# generic type constraints to start new scopes.
if (Current.isOneOf(tok::less, tok::l_paren) &&
- State.Stack.back().IsCSharpGenericTypeConstraint)
+ CurrentState.IsCSharpGenericTypeConstraint) {
return;
+ }
if (Current.MatchingParen && Current.is(BK_Block)) {
moveStateToNewBlock(State);
@@ -1444,45 +1786,47 @@ void ContinuationIndenter::moveStatePastScopeOpener(LineState &State,
}
unsigned NewIndent;
- unsigned LastSpace = State.Stack.back().LastSpace;
+ unsigned LastSpace = CurrentState.LastSpace;
bool AvoidBinPacking;
bool BreakBeforeParameter = false;
- unsigned NestedBlockIndent = std::max(State.Stack.back().StartOfFunctionCall,
- State.Stack.back().NestedBlockIndent);
+ unsigned NestedBlockIndent = std::max(CurrentState.StartOfFunctionCall,
+ CurrentState.NestedBlockIndent);
if (Current.isOneOf(tok::l_brace, TT_ArrayInitializerLSquare) ||
opensProtoMessageField(Current, Style)) {
if (Current.opensBlockOrBlockTypeList(Style)) {
NewIndent = Style.IndentWidth +
- std::min(State.Column, State.Stack.back().NestedBlockIndent);
+ std::min(State.Column, CurrentState.NestedBlockIndent);
+ } else if (Current.is(tok::l_brace)) {
+ NewIndent =
+ CurrentState.LastSpace + Style.BracedInitializerIndentWidth.value_or(
+ Style.ContinuationIndentWidth);
} else {
- NewIndent = State.Stack.back().LastSpace + Style.ContinuationIndentWidth;
+ NewIndent = CurrentState.LastSpace + Style.ContinuationIndentWidth;
}
- const FormatToken *NextNoComment = Current.getNextNonComment();
+ const FormatToken *NextNonComment = Current.getNextNonComment();
bool EndsInComma = Current.MatchingParen &&
Current.MatchingParen->Previous &&
Current.MatchingParen->Previous->is(tok::comma);
AvoidBinPacking = EndsInComma || Current.is(TT_DictLiteral) ||
- Style.Language == FormatStyle::LK_Proto ||
- Style.Language == FormatStyle::LK_TextProto ||
- !Style.BinPackArguments ||
- (NextNoComment &&
- NextNoComment->isOneOf(TT_DesignatedInitializerPeriod,
- TT_DesignatedInitializerLSquare));
+ Style.isProto() || !Style.BinPackArguments ||
+ (NextNonComment && NextNonComment->isOneOf(
+ TT_DesignatedInitializerPeriod,
+ TT_DesignatedInitializerLSquare));
BreakBeforeParameter = EndsInComma;
if (Current.ParameterCount > 1)
NestedBlockIndent = std::max(NestedBlockIndent, State.Column + 1);
} else {
- NewIndent = Style.ContinuationIndentWidth +
- std::max(State.Stack.back().LastSpace,
- State.Stack.back().StartOfFunctionCall);
+ NewIndent =
+ Style.ContinuationIndentWidth +
+ std::max(CurrentState.LastSpace, CurrentState.StartOfFunctionCall);
// Ensure that different different brackets force relative alignment, e.g.:
// void SomeFunction(vector< // break
// int> v);
// FIXME: We likely want to do this for more combinations of brackets.
if (Current.is(tok::less) && Current.ParentBracket == tok::l_paren) {
- NewIndent = std::max(NewIndent, State.Stack.back().Indent);
- LastSpace = std::max(LastSpace, State.Stack.back().Indent);
+ NewIndent = std::max(NewIndent, CurrentState.Indent);
+ LastSpace = std::max(LastSpace, CurrentState.Indent);
}
bool EndsInComma =
@@ -1501,9 +1845,13 @@ void ContinuationIndenter::moveStatePastScopeOpener(LineState &State,
(State.Line->Type != LT_ObjCDecl && Style.BinPackParameters) ||
(State.Line->Type == LT_ObjCDecl && ObjCBinPackProtocolList);
+ bool GenericSelection =
+ Current.getPreviousNonComment() &&
+ Current.getPreviousNonComment()->is(tok::kw__Generic);
+
AvoidBinPacking =
- (State.Stack.back().IsCSharpGenericTypeConstraint) ||
- (Style.Language == FormatStyle::LK_JavaScript && EndsInComma) ||
+ (CurrentState.IsCSharpGenericTypeConstraint) || GenericSelection ||
+ (Style.isJavaScript() && EndsInComma) ||
(State.Line->MustBeDeclaration && !BinPackDeclaration) ||
(!State.Line->MustBeDeclaration && !Style.BinPackArguments) ||
(Style.ExperimentalAutoDetectBinPacking &&
@@ -1516,8 +1864,9 @@ void ContinuationIndenter::moveStatePastScopeOpener(LineState &State,
// If this '[' opens an ObjC call, determine whether all parameters fit
// into one line and put one per line if they don't.
if (getLengthToMatchingParen(Current, State.Stack) + State.Column >
- getColumnLimit(State))
+ getColumnLimit(State)) {
BreakBeforeParameter = true;
+ }
} else {
// For ColumnLimit = 0, we have to figure out whether there is or has to
// be a line break within this call.
@@ -1532,7 +1881,7 @@ void ContinuationIndenter::moveStatePastScopeOpener(LineState &State,
}
}
- if (Style.Language == FormatStyle::LK_JavaScript && EndsInComma)
+ if (Style.isJavaScript() && EndsInComma)
BreakBeforeParameter = true;
}
// Generally inherit NoLineBreak from the current scope to nested scope.
@@ -1541,33 +1890,32 @@ void ContinuationIndenter::moveStatePastScopeOpener(LineState &State,
bool NoLineBreak =
Current.Children.empty() &&
!Current.isOneOf(TT_DictLiteral, TT_ArrayInitializerLSquare) &&
- (State.Stack.back().NoLineBreak ||
- State.Stack.back().NoLineBreakInOperand ||
+ (CurrentState.NoLineBreak || CurrentState.NoLineBreakInOperand ||
(Current.is(TT_TemplateOpener) &&
- State.Stack.back().ContainsUnwrappedBuilder));
+ CurrentState.ContainsUnwrappedBuilder));
State.Stack.push_back(
ParenState(&Current, NewIndent, LastSpace, AvoidBinPacking, NoLineBreak));
- State.Stack.back().NestedBlockIndent = NestedBlockIndent;
- State.Stack.back().BreakBeforeParameter = BreakBeforeParameter;
- State.Stack.back().HasMultipleNestedBlocks =
- (Current.BlockParameterCount > 1);
-
- if (Style.BraceWrapping.BeforeLambdaBody && Current.Next != nullptr &&
- Current.Tok.is(tok::l_paren)) {
- // Search for any parameter that is a lambda
+ auto &NewState = State.Stack.back();
+ NewState.NestedBlockIndent = NestedBlockIndent;
+ NewState.BreakBeforeParameter = BreakBeforeParameter;
+ NewState.HasMultipleNestedBlocks = (Current.BlockParameterCount > 1);
+
+ if (Style.BraceWrapping.BeforeLambdaBody && Current.Next &&
+ Current.is(tok::l_paren)) {
+ // Search for any parameter that is a lambda.
FormatToken const *next = Current.Next;
- while (next != nullptr) {
+ while (next) {
if (next->is(TT_LambdaLSquare)) {
- State.Stack.back().HasMultipleNestedBlocks = true;
+ NewState.HasMultipleNestedBlocks = true;
break;
}
next = next->Next;
}
}
- State.Stack.back().IsInsideObjCArrayLiteral =
- Current.is(TT_ArrayInitializerLSquare) && Current.Previous &&
- Current.Previous->is(tok::at);
+ NewState.IsInsideObjCArrayLiteral = Current.is(TT_ArrayInitializerLSquare) &&
+ Current.Previous &&
+ Current.Previous->is(tok::at);
}
void ContinuationIndenter::moveStatePastScopeCloser(LineState &State) {
@@ -1581,8 +1929,11 @@ void ContinuationIndenter::moveStatePastScopeCloser(LineState &State) {
(Current.isOneOf(tok::r_paren, tok::r_square, TT_TemplateString) ||
(Current.is(tok::r_brace) && State.NextToken != State.Line->First) ||
State.NextToken->is(TT_TemplateCloser) ||
- (Current.is(tok::greater) && Current.is(TT_DictLiteral))))
+ (Current.is(tok::greater) && Current.is(TT_DictLiteral)))) {
State.Stack.pop_back();
+ }
+
+ auto &CurrentState = State.Stack.back();
// Reevaluate whether ObjC message arguments fit into one line.
// If a receiver spans multiple lines, e.g.:
@@ -1592,9 +1943,9 @@ void ContinuationIndenter::moveStatePastScopeCloser(LineState &State) {
// BreakBeforeParameter is calculated based on an incorrect assumption
// (it is checked whether the whole expression fits into one line without
// considering a line break inside a message receiver).
- // We check whether arguements fit after receiver scope closer (into the same
+ // We check whether arguments fit after receiver scope closer (into the same
// line).
- if (State.Stack.back().BreakBeforeParameter && Current.MatchingParen &&
+ if (CurrentState.BreakBeforeParameter && Current.MatchingParen &&
Current.MatchingParen->Previous) {
const FormatToken &CurrentScopeOpener = *Current.MatchingParen->Previous;
if (CurrentScopeOpener.is(TT_ObjCMethodExpr) &&
@@ -1603,8 +1954,9 @@ void ContinuationIndenter::moveStatePastScopeCloser(LineState &State) {
getLengthToMatchingParen(CurrentScopeOpener, State.Stack) +
CurrentScopeOpener.TotalLength - Current.TotalLength - 1;
if (State.Column + Current.ColumnWidth + NecessarySpaceInLine <=
- Style.ColumnLimit)
- State.Stack.back().BreakBeforeParameter = false;
+ Style.ColumnLimit) {
+ CurrentState.BreakBeforeParameter = false;
+ }
}
}
@@ -1612,11 +1964,16 @@ void ContinuationIndenter::moveStatePastScopeCloser(LineState &State) {
// If this ends the array subscript expr, reset the corresponding value.
const FormatToken *NextNonComment = Current.getNextNonComment();
if (NextNonComment && NextNonComment->isNot(tok::l_square))
- State.Stack.back().StartOfArraySubscripts = 0;
+ CurrentState.StartOfArraySubscripts = 0;
}
}
void ContinuationIndenter::moveStateToNewBlock(LineState &State) {
+ if (Style.LambdaBodyIndentation == FormatStyle::LBI_OuterScope &&
+ State.NextToken->is(TT_LambdaLBrace) &&
+ !State.Line->MightBeFunctionDecl) {
+ State.Stack.back().NestedBlockIndent = State.FirstIndent;
+ }
unsigned NestedBlockIndent = State.Stack.back().NestedBlockIndent;
// ObjC block sometimes follow special indentation rules.
unsigned NewIndent =
@@ -1732,9 +2089,8 @@ unsigned ContinuationIndenter::reformatRawStringLiteral(
auto NewCode = applyAllReplacements(RawText, Fixes.first);
tooling::Replacements NoFixes;
- if (!NewCode) {
+ if (!NewCode)
return addMultilineToken(Current, State);
- }
if (!DryRun) {
if (NewDelimiter != OldDelimiter) {
// In 'R"delimiter(...', the delimiter starts 2 characters after the start
@@ -1787,8 +2143,8 @@ unsigned ContinuationIndenter::reformatRawStringLiteral(
ContentStartsOnNewline || (NewCode->find('\n') != std::string::npos);
if (IsMultiline) {
// Break before further function parameters on all levels.
- for (unsigned i = 0, e = State.Stack.size(); i != e; ++i)
- State.Stack[i].BreakBeforeParameter = true;
+ for (ParenState &Paren : State.Stack)
+ Paren.BreakBeforeParameter = true;
}
return Fixes.second + PrefixExcessCharacters * Style.PenaltyExcessCharacter;
}
@@ -1796,8 +2152,8 @@ unsigned ContinuationIndenter::reformatRawStringLiteral(
unsigned ContinuationIndenter::addMultilineToken(const FormatToken &Current,
LineState &State) {
// Break before further function parameters on all levels.
- for (unsigned i = 0, e = State.Stack.size(); i != e; ++i)
- State.Stack[i].BreakBeforeParameter = true;
+ for (ParenState &Paren : State.Stack)
+ Paren.BreakBeforeParameter = true;
unsigned ColumnsUsed = State.Column;
// We can only affect layout of the first and the last line, so the penalty
@@ -1868,7 +2224,7 @@ unsigned ContinuationIndenter::handleEndOfLine(const FormatToken &Current,
static StringRef getEnclosingFunctionName(const FormatToken &Current) {
// Look for: 'function(' or 'function<templates>(' before Current.
auto Tok = Current.getPreviousNonComment();
- if (!Tok || !Tok->is(tok::l_paren))
+ if (!Tok || Tok->isNot(tok::l_paren))
return "";
Tok = Tok->getPreviousNonComment();
if (!Tok)
@@ -1878,25 +2234,26 @@ static StringRef getEnclosingFunctionName(const FormatToken &Current) {
if (Tok)
Tok = Tok->getPreviousNonComment();
}
- if (!Tok || !Tok->is(tok::identifier))
+ if (!Tok || Tok->isNot(tok::identifier))
return "";
return Tok->TokenText;
}
-llvm::Optional<FormatStyle>
+std::optional<FormatStyle>
ContinuationIndenter::getRawStringStyle(const FormatToken &Current,
const LineState &State) {
if (!Current.isStringLiteral())
- return None;
+ return std::nullopt;
auto Delimiter = getRawStringDelimiter(Current.TokenText);
if (!Delimiter)
- return None;
+ return std::nullopt;
auto RawStringStyle = RawStringFormats.getDelimiterStyle(*Delimiter);
- if (!RawStringStyle && Delimiter->empty())
+ if (!RawStringStyle && Delimiter->empty()) {
RawStringStyle = RawStringFormats.getEnclosingFunctionStyle(
getEnclosingFunctionName(Current));
+ }
if (!RawStringStyle)
- return None;
+ return std::nullopt;
RawStringStyle->ColumnLimit = getColumnLimit(State);
return RawStringStyle;
}
@@ -1906,13 +2263,12 @@ ContinuationIndenter::createBreakableToken(const FormatToken &Current,
LineState &State, bool AllowBreak) {
unsigned StartColumn = State.Column - Current.ColumnWidth;
if (Current.isStringLiteral()) {
- // FIXME: String literal breaking is currently disabled for C#, Java, Json
- // and JavaScript, as it requires strings to be merged using "+" which we
- // don't support.
- if (Style.Language == FormatStyle::LK_Java ||
- Style.Language == FormatStyle::LK_JavaScript || Style.isCSharp() ||
- Style.isJson() || !Style.BreakStringLiterals || !AllowBreak)
+ // Strings in JSON cannot be broken. Breaking strings in JavaScript is
+ // disabled for now.
+ if (Style.isJson() || Style.isJavaScript() || !Style.BreakStringLiterals ||
+ !AllowBreak) {
return nullptr;
+ }
// Don't break string literals inside preprocessor directives (except for
// #define directives, as their contents are stored in separate lines and
@@ -1927,30 +2283,61 @@ ContinuationIndenter::createBreakableToken(const FormatToken &Current,
return nullptr;
// Don't break string literals inside Objective-C array literals (doing so
// raises the warning -Wobjc-string-concatenation).
- if (State.Stack.back().IsInsideObjCArrayLiteral) {
+ if (State.Stack.back().IsInsideObjCArrayLiteral)
return nullptr;
- }
+ // The "DPI"/"DPI-C" in SystemVerilog direct programming interface
+ // imports/exports cannot be split, e.g.
+ // `import "DPI" function foo();`
+ // FIXME: make this use same infra as C++ import checks
+ if (Style.isVerilog() && Current.Previous &&
+ Current.Previous->isOneOf(tok::kw_export, Keywords.kw_import)) {
+ return nullptr;
+ }
StringRef Text = Current.TokenText;
+
+ // We need this to address the case where there is an unbreakable tail only
+ // if certain other formatting decisions have been taken. The
+ // UnbreakableTailLength of Current is an overapproximation in that case and
+ // we need to be correct here.
+ unsigned UnbreakableTailLength = (State.NextToken && canBreak(State))
+ ? 0
+ : Current.UnbreakableTailLength;
+
+ if (Style.isVerilog() || Style.Language == FormatStyle::LK_Java ||
+ Style.isJavaScript() || Style.isCSharp()) {
+ BreakableStringLiteralUsingOperators::QuoteStyleType QuoteStyle;
+ if (Style.isJavaScript() && Text.starts_with("'") &&
+ Text.ends_with("'")) {
+ QuoteStyle = BreakableStringLiteralUsingOperators::SingleQuotes;
+ } else if (Style.isCSharp() && Text.starts_with("@\"") &&
+ Text.ends_with("\"")) {
+ QuoteStyle = BreakableStringLiteralUsingOperators::AtDoubleQuotes;
+ } else if (Text.starts_with("\"") && Text.ends_with("\"")) {
+ QuoteStyle = BreakableStringLiteralUsingOperators::DoubleQuotes;
+ } else {
+ return nullptr;
+ }
+ return std::make_unique<BreakableStringLiteralUsingOperators>(
+ Current, QuoteStyle,
+ /*UnindentPlus=*/shouldUnindentNextOperator(Current), StartColumn,
+ UnbreakableTailLength, State.Line->InPPDirective, Encoding, Style);
+ }
+
StringRef Prefix;
StringRef Postfix;
// FIXME: Handle whitespace between '_T', '(', '"..."', and ')'.
// FIXME: Store Prefix and Suffix (or PrefixLength and SuffixLength to
// reduce the overhead) for each FormatToken, which is a string, so that we
// don't run multiple checks here on the hot path.
- if ((Text.endswith(Postfix = "\"") &&
- (Text.startswith(Prefix = "@\"") || Text.startswith(Prefix = "\"") ||
- Text.startswith(Prefix = "u\"") || Text.startswith(Prefix = "U\"") ||
- Text.startswith(Prefix = "u8\"") ||
- Text.startswith(Prefix = "L\""))) ||
- (Text.startswith(Prefix = "_T(\"") && Text.endswith(Postfix = "\")"))) {
- // We need this to address the case where there is an unbreakable tail
- // only if certain other formatting decisions have been taken. The
- // UnbreakableTailLength of Current is an overapproximation is that case
- // and we need to be correct here.
- unsigned UnbreakableTailLength = (State.NextToken && canBreak(State))
- ? 0
- : Current.UnbreakableTailLength;
+ if ((Text.ends_with(Postfix = "\"") &&
+ (Text.starts_with(Prefix = "@\"") || Text.starts_with(Prefix = "\"") ||
+ Text.starts_with(Prefix = "u\"") ||
+ Text.starts_with(Prefix = "U\"") ||
+ Text.starts_with(Prefix = "u8\"") ||
+ Text.starts_with(Prefix = "L\""))) ||
+ (Text.starts_with(Prefix = "_T(\"") &&
+ Text.ends_with(Postfix = "\")"))) {
return std::make_unique<BreakableStringLiteral>(
Current, StartColumn, Prefix, Postfix, UnbreakableTailLength,
State.Line->InPPDirective, Encoding, Style);
@@ -1967,12 +2354,21 @@ ContinuationIndenter::createBreakableToken(const FormatToken &Current,
Current, StartColumn, Current.OriginalColumn, !Current.Previous,
State.Line->InPPDirective, Encoding, Style, Whitespaces.useCRLF());
} else if (Current.is(TT_LineComment) &&
- (Current.Previous == nullptr ||
+ (!Current.Previous ||
Current.Previous->isNot(TT_ImplicitStringLiteral))) {
+ bool RegularComments = [&]() {
+ for (const FormatToken *T = &Current; T && T->is(TT_LineComment);
+ T = T->Next) {
+ if (!(T->TokenText.starts_with("//") || T->TokenText.starts_with("#")))
+ return false;
+ }
+ return true;
+ }();
if (!Style.ReflowComments ||
CommentPragmasRegex.match(Current.TokenText.substr(2)) ||
- switchesFormatting(Current))
+ switchesFormatting(Current) || !RegularComments) {
return nullptr;
+ }
return std::make_unique<BreakableLineCommentSection>(
Current, StartColumn, /*InPPDirective=*/false, Encoding, Style);
}
@@ -2061,11 +2457,12 @@ ContinuationIndenter::breakProtrudingToken(const FormatToken &Current,
if (Split.first == StringRef::npos) {
// No break opportunity - update the penalty and continue with the next
// logical line.
- if (LineIndex < EndIndex - 1)
+ if (LineIndex < EndIndex - 1) {
// The last line's penalty is handled in addNextStateToQueue() or when
// calling replaceWhitespaceAfterLastLine below.
Penalty += Style.PenaltyExcessCharacter *
(ContentStartColumn + RemainingTokenColumns - ColumnLimit);
+ }
LLVM_DEBUG(llvm::dbgs() << " No break opportunity.\n");
break;
}
@@ -2141,9 +2538,10 @@ ContinuationIndenter::breakProtrudingToken(const FormatToken &Current,
// The current line fits after compressing the whitespace - reflow
// the next line into it if possible.
TryReflow = true;
- if (!DryRun)
+ if (!DryRun) {
Token->compressWhitespace(LineIndex, TailOffset, Split,
Whitespaces);
+ }
// When we continue on the same line, leave one space between content.
ContentStartColumn += ToSplitColumns + 1;
Penalty += ExcessCharactersPenalty;
@@ -2180,17 +2578,17 @@ ContinuationIndenter::breakProtrudingToken(const FormatToken &Current,
// When breaking before a tab character, it may be moved by a few columns,
// but will still be expanded to the next tab stop, so we don't save any
// columns.
- if (NewRemainingTokenColumns == RemainingTokenColumns) {
+ if (NewRemainingTokenColumns >= RemainingTokenColumns) {
// FIXME: Do we need to adjust the penalty?
break;
}
- assert(NewRemainingTokenColumns < RemainingTokenColumns);
LLVM_DEBUG(llvm::dbgs() << " Breaking at: " << TailOffset + Split.first
<< ", " << Split.second << "\n");
- if (!DryRun)
+ if (!DryRun) {
Token->insertBreak(LineIndex, TailOffset, Split, ContentIndent,
Whitespaces);
+ }
Penalty += NewBreakPenalty;
TailOffset += Split.first + Split.second;
@@ -2202,10 +2600,11 @@ ContinuationIndenter::breakProtrudingToken(const FormatToken &Current,
// line.
if (LineIndex + 1 != EndIndex) {
unsigned NextLineIndex = LineIndex + 1;
- if (NewBreakBefore)
+ if (NewBreakBefore) {
// After breaking a line, try to reflow the next line into the current
// one once RemainingTokenColumns fits.
TryReflow = true;
+ }
if (TryReflow) {
// We decided that we want to try reflowing the next line into the
// current one.
@@ -2268,9 +2667,8 @@ ContinuationIndenter::breakProtrudingToken(const FormatToken &Current,
unsigned ExcessCharactersPenalty =
(ContentStartColumn + ToSplitColumns - ColumnLimit) *
Style.PenaltyExcessCharacter;
- if (NewBreakPenalty < ExcessCharactersPenalty) {
+ if (NewBreakPenalty < ExcessCharactersPenalty)
Reflow = false;
- }
}
}
}
@@ -2324,9 +2722,10 @@ ContinuationIndenter::breakProtrudingToken(const FormatToken &Current,
Penalty += Style.PenaltyExcessCharacter *
(ContentStartColumn + RemainingTokenColumns - ColumnLimit);
- if (!DryRun)
+ if (!DryRun) {
Token->replaceWhitespaceAfterLastLine(TailOffset, SplitAfterLastLine,
Whitespaces);
+ }
ContentStartColumn =
Token->getContentStartColumn(Token->getLineCount() - 1, /*Break=*/true);
RemainingTokenColumns = Token->getRemainingLength(
@@ -2339,13 +2738,15 @@ ContinuationIndenter::breakProtrudingToken(const FormatToken &Current,
Current.UnbreakableTailLength;
if (BreakInserted) {
+ if (!DryRun)
+ Token->updateAfterBroken(Whitespaces);
+
// If we break the token inside a parameter list, we need to break before
// the next parameter on all levels, so that the next parameter is clearly
// visible. Line comments already introduce a break.
- if (Current.isNot(TT_LineComment)) {
- for (unsigned i = 0, e = State.Stack.size(); i != e; ++i)
- State.Stack[i].BreakBeforeParameter = true;
- }
+ if (Current.isNot(TT_LineComment))
+ for (ParenState &Paren : State.Stack)
+ Paren.BreakBeforeParameter = true;
if (Current.is(TT_BlockComment))
State.NoContinuation = true;
@@ -2359,7 +2760,7 @@ ContinuationIndenter::breakProtrudingToken(const FormatToken &Current,
}
unsigned ContinuationIndenter::getColumnLimit(const LineState &State) const {
- // In preprocessor directives reserve two chars for trailing " \"
+ // In preprocessor directives reserve two chars for trailing " \".
return Style.ColumnLimit - (State.Line->InPPDirective ? 2 : 0);
}
@@ -2370,17 +2771,19 @@ bool ContinuationIndenter::nextIsMultilineString(const LineState &State) {
// We never consider raw string literals "multiline" for the purpose of
// AlwaysBreakBeforeMultilineStrings implementation as they are special-cased
// (see TokenAnnotator::mustBreakBefore().
- if (Current.TokenText.startswith("R\""))
+ if (Current.TokenText.starts_with("R\""))
return false;
if (Current.IsMultiline)
return true;
if (Current.getNextNonComment() &&
- Current.getNextNonComment()->isStringLiteral())
+ Current.getNextNonComment()->isStringLiteral()) {
return true; // Implicit concatenation.
+ }
if (Style.ColumnLimit != 0 && Style.BreakStringLiterals &&
State.Column + Current.ColumnWidth + Current.UnbreakableTailLength >
- Style.ColumnLimit)
+ Style.ColumnLimit) {
return true; // String will be split.
+ }
return false;
}
diff --git a/contrib/llvm-project/clang/lib/Format/ContinuationIndenter.h b/contrib/llvm-project/clang/lib/Format/ContinuationIndenter.h
index b1b2611263a9..057b85bd32d5 100644
--- a/contrib/llvm-project/clang/lib/Format/ContinuationIndenter.h
+++ b/contrib/llvm-project/clang/lib/Format/ContinuationIndenter.h
@@ -20,6 +20,7 @@
#include "clang/Format/Format.h"
#include "llvm/Support/Regex.h"
#include <map>
+#include <optional>
#include <tuple>
namespace clang {
@@ -41,9 +42,9 @@ struct RawStringFormatStyleManager {
RawStringFormatStyleManager(const FormatStyle &CodeStyle);
- llvm::Optional<FormatStyle> getDelimiterStyle(StringRef Delimiter) const;
+ std::optional<FormatStyle> getDelimiterStyle(StringRef Delimiter) const;
- llvm::Optional<FormatStyle>
+ std::optional<FormatStyle>
getEnclosingFunctionStyle(StringRef EnclosingFunction) const;
};
@@ -120,8 +121,8 @@ private:
/// If \p Current is a raw string that is configured to be reformatted,
/// return the style to be used.
- llvm::Optional<FormatStyle> getRawStringStyle(const FormatToken &Current,
- const LineState &State);
+ std::optional<FormatStyle> getRawStringStyle(const FormatToken &Current,
+ const LineState &State);
/// If the current token sticks out over the end of the line, break
/// it if possible.
@@ -203,15 +204,15 @@ struct ParenState {
bool AvoidBinPacking, bool NoLineBreak)
: Tok(Tok), Indent(Indent), LastSpace(LastSpace),
NestedBlockIndent(Indent), IsAligned(false),
- BreakBeforeClosingBrace(false), AvoidBinPacking(AvoidBinPacking),
- BreakBeforeParameter(false), NoLineBreak(NoLineBreak),
- NoLineBreakInOperand(false), LastOperatorWrapped(true),
- ContainsLineBreak(false), ContainsUnwrappedBuilder(false),
- AlignColons(true), ObjCSelectorNameFound(false),
- HasMultipleNestedBlocks(false), NestedBlockInlined(false),
- IsInsideObjCArrayLiteral(false), IsCSharpGenericTypeConstraint(false),
- IsChainedConditional(false), IsWrappedConditional(false),
- UnindentOperator(false) {}
+ BreakBeforeClosingBrace(false), BreakBeforeClosingParen(false),
+ AvoidBinPacking(AvoidBinPacking), BreakBeforeParameter(false),
+ NoLineBreak(NoLineBreak), NoLineBreakInOperand(false),
+ LastOperatorWrapped(true), ContainsLineBreak(false),
+ ContainsUnwrappedBuilder(false), AlignColons(true),
+ ObjCSelectorNameFound(false), HasMultipleNestedBlocks(false),
+ NestedBlockInlined(false), IsInsideObjCArrayLiteral(false),
+ IsCSharpGenericTypeConstraint(false), IsChainedConditional(false),
+ IsWrappedConditional(false), UnindentOperator(false) {}
/// \brief The token opening this parenthesis level, or nullptr if this level
/// is opened by fake parenthesis.
@@ -277,6 +278,13 @@ struct ParenState {
/// was a newline after the beginning left brace.
bool BreakBeforeClosingBrace : 1;
+ /// Whether a newline needs to be inserted before the block's closing
+ /// paren.
+ ///
+ /// We only want to insert a newline before the closing paren if there also
+ /// was a newline after the beginning left paren.
+ bool BreakBeforeClosingParen : 1;
+
/// Avoid bin packing, i.e. multiple parameters/elements on multiple
/// lines, in this context.
bool AvoidBinPacking : 1;
@@ -362,6 +370,8 @@ struct ParenState {
return IsAligned;
if (BreakBeforeClosingBrace != Other.BreakBeforeClosingBrace)
return BreakBeforeClosingBrace;
+ if (BreakBeforeClosingParen != Other.BreakBeforeClosingParen)
+ return BreakBeforeClosingParen;
if (QuestionColumn != Other.QuestionColumn)
return QuestionColumn < Other.QuestionColumn;
if (AvoidBinPacking != Other.AvoidBinPacking)
@@ -410,9 +420,6 @@ struct LineState {
/// The token that needs to be next formatted.
FormatToken *NextToken;
- /// \c true if this line contains a continued for-loop section.
- bool LineContainsContinuedForLoopSection;
-
/// \c true if \p NextToken should not continue this line.
bool NoContinuation;
@@ -426,9 +433,12 @@ struct LineState {
/// literal sequence, 0 otherwise.
unsigned StartOfStringLiteral;
+ /// Disallow line breaks for this line.
+ bool NoLineBreak;
+
/// A stack keeping track of properties applying to parenthesis
/// levels.
- std::vector<ParenState> Stack;
+ SmallVector<ParenState> Stack;
/// Ignore the stack of \c ParenStates for state comparison.
///
@@ -459,9 +469,6 @@ struct LineState {
return NextToken < Other.NextToken;
if (Column != Other.Column)
return Column < Other.Column;
- if (LineContainsContinuedForLoopSection !=
- Other.LineContainsContinuedForLoopSection)
- return LineContainsContinuedForLoopSection;
if (NoContinuation != Other.NoContinuation)
return NoContinuation;
if (StartOfLineLevel != Other.StartOfLineLevel)
diff --git a/contrib/llvm-project/clang/lib/Format/DefinitionBlockSeparator.cpp b/contrib/llvm-project/clang/lib/Format/DefinitionBlockSeparator.cpp
new file mode 100644
index 000000000000..319236d3bd61
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Format/DefinitionBlockSeparator.cpp
@@ -0,0 +1,261 @@
+//===--- DefinitionBlockSeparator.cpp ---------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements DefinitionBlockSeparator, a TokenAnalyzer that inserts
+/// or removes empty lines separating definition blocks like classes, structs,
+/// functions, enums, and namespaces in between.
+///
+//===----------------------------------------------------------------------===//
+
+#include "DefinitionBlockSeparator.h"
+#include "llvm/Support/Debug.h"
+#define DEBUG_TYPE "definition-block-separator"
+
+namespace clang {
+namespace format {
+std::pair<tooling::Replacements, unsigned> DefinitionBlockSeparator::analyze(
+ TokenAnnotator &Annotator, SmallVectorImpl<AnnotatedLine *> &AnnotatedLines,
+ FormatTokenLexer &Tokens) {
+ assert(Style.SeparateDefinitionBlocks != FormatStyle::SDS_Leave);
+ AffectedRangeMgr.computeAffectedLines(AnnotatedLines);
+ tooling::Replacements Result;
+ separateBlocks(AnnotatedLines, Result, Tokens);
+ return {Result, 0};
+}
+
+void DefinitionBlockSeparator::separateBlocks(
+ SmallVectorImpl<AnnotatedLine *> &Lines, tooling::Replacements &Result,
+ FormatTokenLexer &Tokens) {
+ const bool IsNeverStyle =
+ Style.SeparateDefinitionBlocks == FormatStyle::SDS_Never;
+ const AdditionalKeywords &ExtraKeywords = Tokens.getKeywords();
+ auto GetBracketLevelChange = [](const FormatToken *Tok) {
+ if (Tok->isOneOf(tok::l_brace, tok::l_paren, tok::l_square))
+ return 1;
+ if (Tok->isOneOf(tok::r_brace, tok::r_paren, tok::r_square))
+ return -1;
+ return 0;
+ };
+ auto LikelyDefinition = [&](const AnnotatedLine *Line,
+ bool ExcludeEnum = false) {
+ if ((Line->MightBeFunctionDecl && Line->mightBeFunctionDefinition()) ||
+ Line->startsWithNamespace()) {
+ return true;
+ }
+ int BracketLevel = 0;
+ for (const FormatToken *CurrentToken = Line->First; CurrentToken;
+ CurrentToken = CurrentToken->Next) {
+ if (BracketLevel == 0) {
+ if (CurrentToken->isOneOf(tok::kw_class, tok::kw_struct,
+ tok::kw_union) ||
+ (Style.isJavaScript() &&
+ CurrentToken->is(ExtraKeywords.kw_function))) {
+ return true;
+ }
+ if (!ExcludeEnum && CurrentToken->is(tok::kw_enum))
+ return true;
+ }
+ BracketLevel += GetBracketLevelChange(CurrentToken);
+ }
+ return false;
+ };
+ unsigned NewlineCount =
+ (Style.SeparateDefinitionBlocks == FormatStyle::SDS_Always ? 1 : 0) + 1;
+ WhitespaceManager Whitespaces(
+ Env.getSourceManager(), Style,
+ Style.LineEnding > FormatStyle::LE_CRLF
+ ? WhitespaceManager::inputUsesCRLF(
+ Env.getSourceManager().getBufferData(Env.getFileID()),
+ Style.LineEnding == FormatStyle::LE_DeriveCRLF)
+ : Style.LineEnding == FormatStyle::LE_CRLF);
+ for (unsigned I = 0; I < Lines.size(); ++I) {
+ const auto &CurrentLine = Lines[I];
+ if (CurrentLine->InPPDirective)
+ continue;
+ FormatToken *TargetToken = nullptr;
+ AnnotatedLine *TargetLine;
+ auto OpeningLineIndex = CurrentLine->MatchingOpeningBlockLineIndex;
+ AnnotatedLine *OpeningLine = nullptr;
+ const auto IsAccessSpecifierToken = [](const FormatToken *Token) {
+ return Token->isAccessSpecifier() || Token->isObjCAccessSpecifier();
+ };
+ const auto InsertReplacement = [&](const int NewlineToInsert) {
+ assert(TargetLine);
+ assert(TargetToken);
+
+ // Do not handle EOF newlines.
+ if (TargetToken->is(tok::eof))
+ return;
+ if (IsAccessSpecifierToken(TargetToken) ||
+ (OpeningLineIndex > 0 &&
+ IsAccessSpecifierToken(Lines[OpeningLineIndex - 1]->First))) {
+ return;
+ }
+ if (!TargetLine->Affected)
+ return;
+ Whitespaces.replaceWhitespace(*TargetToken, NewlineToInsert,
+ TargetToken->OriginalColumn,
+ TargetToken->OriginalColumn);
+ };
+ const auto IsPPConditional = [&](const size_t LineIndex) {
+ const auto &Line = Lines[LineIndex];
+ return Line->First->is(tok::hash) && Line->First->Next &&
+ Line->First->Next->isOneOf(tok::pp_if, tok::pp_ifdef, tok::pp_else,
+ tok::pp_ifndef, tok::pp_elifndef,
+ tok::pp_elifdef, tok::pp_elif,
+ tok::pp_endif);
+ };
+ const auto FollowingOtherOpening = [&]() {
+ return OpeningLineIndex == 0 ||
+ Lines[OpeningLineIndex - 1]->Last->opensScope() ||
+ IsPPConditional(OpeningLineIndex - 1);
+ };
+ const auto HasEnumOnLine = [&]() {
+ bool FoundEnumKeyword = false;
+ int BracketLevel = 0;
+ for (const FormatToken *CurrentToken = CurrentLine->First; CurrentToken;
+ CurrentToken = CurrentToken->Next) {
+ if (BracketLevel == 0) {
+ if (CurrentToken->is(tok::kw_enum))
+ FoundEnumKeyword = true;
+ else if (FoundEnumKeyword && CurrentToken->is(tok::l_brace))
+ return true;
+ }
+ BracketLevel += GetBracketLevelChange(CurrentToken);
+ }
+ return FoundEnumKeyword && I + 1 < Lines.size() &&
+ Lines[I + 1]->First->is(tok::l_brace);
+ };
+
+ bool IsDefBlock = false;
+ const auto MayPrecedeDefinition = [&](const int Direction = -1) {
+ assert(Direction >= -1);
+ assert(Direction <= 1);
+ const size_t OperateIndex = OpeningLineIndex + Direction;
+ assert(OperateIndex < Lines.size());
+ const auto &OperateLine = Lines[OperateIndex];
+ if (LikelyDefinition(OperateLine))
+ return false;
+
+ if (const auto *Tok = OperateLine->First;
+ Tok->is(tok::comment) && !isClangFormatOn(Tok->TokenText)) {
+ return true;
+ }
+
+ // A single line identifier that is not in the last line.
+ if (OperateLine->First->is(tok::identifier) &&
+ OperateLine->First == OperateLine->Last &&
+ OperateIndex + 1 < Lines.size()) {
+ // UnwrappedLineParser's recognition of free-standing macro like
+ // Q_OBJECT may also recognize some uppercased type names that may be
+ // used as return type as that kind of macros, which is a bit hard to
+ // distinguish one from another purely from token patterns. Here, we
+ // try not to add new lines below those identifiers.
+ AnnotatedLine *NextLine = Lines[OperateIndex + 1];
+ if (NextLine->MightBeFunctionDecl &&
+ NextLine->mightBeFunctionDefinition() &&
+ NextLine->First->NewlinesBefore == 1 &&
+ OperateLine->First->is(TT_FunctionLikeOrFreestandingMacro)) {
+ return true;
+ }
+ }
+
+ if (Style.isCSharp() && OperateLine->First->is(TT_AttributeSquare))
+ return true;
+ return false;
+ };
+
+ if (HasEnumOnLine() &&
+ !LikelyDefinition(CurrentLine, /*ExcludeEnum=*/true)) {
+ // We have no scope opening/closing information for enum.
+ IsDefBlock = true;
+ OpeningLineIndex = I;
+ while (OpeningLineIndex > 0 && MayPrecedeDefinition())
+ --OpeningLineIndex;
+ OpeningLine = Lines[OpeningLineIndex];
+ TargetLine = OpeningLine;
+ TargetToken = TargetLine->First;
+ if (!FollowingOtherOpening())
+ InsertReplacement(NewlineCount);
+ else if (IsNeverStyle)
+ InsertReplacement(OpeningLineIndex != 0);
+ TargetLine = CurrentLine;
+ TargetToken = TargetLine->First;
+ while (TargetToken && TargetToken->isNot(tok::r_brace))
+ TargetToken = TargetToken->Next;
+ if (!TargetToken)
+ while (I < Lines.size() && Lines[I]->First->isNot(tok::r_brace))
+ ++I;
+ } else if (CurrentLine->First->closesScope()) {
+ if (OpeningLineIndex > Lines.size())
+ continue;
+ // Handling the case that opening brace has its own line, with checking
+ // whether the last line already had an opening brace to guard against
+ // misrecognition.
+ if (OpeningLineIndex > 0 &&
+ Lines[OpeningLineIndex]->First->is(tok::l_brace) &&
+ Lines[OpeningLineIndex - 1]->Last->isNot(tok::l_brace)) {
+ --OpeningLineIndex;
+ }
+ OpeningLine = Lines[OpeningLineIndex];
+ // Closing a function definition.
+ if (LikelyDefinition(OpeningLine)) {
+ IsDefBlock = true;
+ while (OpeningLineIndex > 0 && MayPrecedeDefinition())
+ --OpeningLineIndex;
+ OpeningLine = Lines[OpeningLineIndex];
+ TargetLine = OpeningLine;
+ TargetToken = TargetLine->First;
+ if (!FollowingOtherOpening()) {
+ // Avoid duplicated replacement.
+ if (TargetToken->isNot(tok::l_brace))
+ InsertReplacement(NewlineCount);
+ } else if (IsNeverStyle) {
+ InsertReplacement(OpeningLineIndex != 0);
+ }
+ }
+ }
+
+ // Not the last token.
+ if (IsDefBlock && I + 1 < Lines.size()) {
+ OpeningLineIndex = I + 1;
+ TargetLine = Lines[OpeningLineIndex];
+ TargetToken = TargetLine->First;
+
+ // No empty line for continuously closing scopes. The token will be
+ // handled in another case if the line following is opening a
+ // definition.
+ if (!TargetToken->closesScope() && !IsPPConditional(OpeningLineIndex)) {
+ // Check whether current line may precede a definition line.
+ while (OpeningLineIndex + 1 < Lines.size() &&
+ MayPrecedeDefinition(/*Direction=*/0)) {
+ ++OpeningLineIndex;
+ }
+ TargetLine = Lines[OpeningLineIndex];
+ if (!LikelyDefinition(TargetLine)) {
+ OpeningLineIndex = I + 1;
+ TargetLine = Lines[I + 1];
+ TargetToken = TargetLine->First;
+ InsertReplacement(NewlineCount);
+ }
+ } else if (IsNeverStyle) {
+ InsertReplacement(/*NewlineToInsert=*/1);
+ }
+ }
+ }
+ for (const auto &R : Whitespaces.generateReplacements()) {
+ // The add method returns an Error instance which simulates program exit
+ // code through overloading boolean operator, thus false here indicates
+ // success.
+ if (Result.add(R))
+ return;
+ }
+}
+} // namespace format
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Format/DefinitionBlockSeparator.h b/contrib/llvm-project/clang/lib/Format/DefinitionBlockSeparator.h
new file mode 100644
index 000000000000..31c0f34d6e19
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Format/DefinitionBlockSeparator.h
@@ -0,0 +1,41 @@
+//===--- DefinitionBlockSeparator.h -----------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file declares DefinitionBlockSeparator, a TokenAnalyzer that inserts or
+/// removes empty lines separating definition blocks like classes, structs,
+/// functions, enums, and namespaces in between.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_FORMAT_DEFINITIONBLOCKSEPARATOR_H
+#define LLVM_CLANG_LIB_FORMAT_DEFINITIONBLOCKSEPARATOR_H
+
+#include "TokenAnalyzer.h"
+#include "WhitespaceManager.h"
+
+namespace clang {
+namespace format {
+class DefinitionBlockSeparator : public TokenAnalyzer {
+public:
+ DefinitionBlockSeparator(const Environment &Env, const FormatStyle &Style)
+ : TokenAnalyzer(Env, Style) {}
+
+ std::pair<tooling::Replacements, unsigned>
+ analyze(TokenAnnotator &Annotator,
+ SmallVectorImpl<AnnotatedLine *> &AnnotatedLines,
+ FormatTokenLexer &Tokens) override;
+
+private:
+ void separateBlocks(SmallVectorImpl<AnnotatedLine *> &Lines,
+ tooling::Replacements &Result, FormatTokenLexer &Tokens);
+};
+} // namespace format
+} // namespace clang
+
+#endif
diff --git a/contrib/llvm-project/clang/lib/Format/Format.cpp b/contrib/llvm-project/clang/lib/Format/Format.cpp
index 2b860d2a25f7..10fe35c79a4f 100644
--- a/contrib/llvm-project/clang/lib/Format/Format.cpp
+++ b/contrib/llvm-project/clang/lib/Format/Format.cpp
@@ -16,9 +16,14 @@
#include "AffectedRangeManager.h"
#include "BreakableToken.h"
#include "ContinuationIndenter.h"
+#include "DefinitionBlockSeparator.h"
#include "FormatInternal.h"
+#include "FormatToken.h"
#include "FormatTokenLexer.h"
+#include "IntegerLiteralSeparatorFixer.h"
#include "NamespaceEndCommentsFixer.h"
+#include "ObjCPropertyAttributeOrderFixer.h"
+#include "QualifierAlignmentFixer.h"
#include "SortJavaScriptImports.h"
#include "TokenAnalyzer.h"
#include "TokenAnnotator.h"
@@ -32,6 +37,7 @@
#include "clang/Lex/Lexer.h"
#include "clang/Tooling/Inclusions/HeaderIncludes.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/Sequence.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Debug.h"
@@ -42,6 +48,7 @@
#include <algorithm>
#include <memory>
#include <mutex>
+#include <optional>
#include <string>
#include <unordered_map>
@@ -53,103 +60,84 @@ LLVM_YAML_IS_SEQUENCE_VECTOR(clang::format::FormatStyle::RawStringFormat)
namespace llvm {
namespace yaml {
-template <> struct ScalarEnumerationTraits<FormatStyle::LanguageKind> {
- static void enumeration(IO &IO, FormatStyle::LanguageKind &Value) {
- IO.enumCase(Value, "Cpp", FormatStyle::LK_Cpp);
- IO.enumCase(Value, "Java", FormatStyle::LK_Java);
- IO.enumCase(Value, "JavaScript", FormatStyle::LK_JavaScript);
- IO.enumCase(Value, "ObjC", FormatStyle::LK_ObjC);
- IO.enumCase(Value, "Proto", FormatStyle::LK_Proto);
- IO.enumCase(Value, "TableGen", FormatStyle::LK_TableGen);
- IO.enumCase(Value, "TextProto", FormatStyle::LK_TextProto);
- IO.enumCase(Value, "CSharp", FormatStyle::LK_CSharp);
- IO.enumCase(Value, "Json", FormatStyle::LK_Json);
- }
-};
-
-template <> struct ScalarEnumerationTraits<FormatStyle::LanguageStandard> {
- static void enumeration(IO &IO, FormatStyle::LanguageStandard &Value) {
- IO.enumCase(Value, "c++03", FormatStyle::LS_Cpp03);
- IO.enumCase(Value, "C++03", FormatStyle::LS_Cpp03); // Legacy alias
- IO.enumCase(Value, "Cpp03", FormatStyle::LS_Cpp03); // Legacy alias
-
- IO.enumCase(Value, "c++11", FormatStyle::LS_Cpp11);
- IO.enumCase(Value, "C++11", FormatStyle::LS_Cpp11); // Legacy alias
-
- IO.enumCase(Value, "c++14", FormatStyle::LS_Cpp14);
- IO.enumCase(Value, "c++17", FormatStyle::LS_Cpp17);
- IO.enumCase(Value, "c++20", FormatStyle::LS_Cpp20);
-
- IO.enumCase(Value, "Latest", FormatStyle::LS_Latest);
- IO.enumCase(Value, "Cpp11", FormatStyle::LS_Latest); // Legacy alias
- IO.enumCase(Value, "Auto", FormatStyle::LS_Auto);
- }
-};
-
template <>
-struct ScalarEnumerationTraits<FormatStyle::LambdaBodyIndentationKind> {
- static void enumeration(IO &IO,
- FormatStyle::LambdaBodyIndentationKind &Value) {
- IO.enumCase(Value, "Signature", FormatStyle::LBI_Signature);
- IO.enumCase(Value, "OuterScope", FormatStyle::LBI_OuterScope);
+struct ScalarEnumerationTraits<FormatStyle::BreakBeforeNoexceptSpecifierStyle> {
+ static void
+ enumeration(IO &IO, FormatStyle::BreakBeforeNoexceptSpecifierStyle &Value) {
+ IO.enumCase(Value, "Never", FormatStyle::BBNSS_Never);
+ IO.enumCase(Value, "OnlyWithParen", FormatStyle::BBNSS_OnlyWithParen);
+ IO.enumCase(Value, "Always", FormatStyle::BBNSS_Always);
}
};
-template <> struct ScalarEnumerationTraits<FormatStyle::UseTabStyle> {
- static void enumeration(IO &IO, FormatStyle::UseTabStyle &Value) {
- IO.enumCase(Value, "Never", FormatStyle::UT_Never);
- IO.enumCase(Value, "false", FormatStyle::UT_Never);
- IO.enumCase(Value, "Always", FormatStyle::UT_Always);
- IO.enumCase(Value, "true", FormatStyle::UT_Always);
- IO.enumCase(Value, "ForIndentation", FormatStyle::UT_ForIndentation);
- IO.enumCase(Value, "ForContinuationAndIndentation",
- FormatStyle::UT_ForContinuationAndIndentation);
- IO.enumCase(Value, "AlignWithSpaces", FormatStyle::UT_AlignWithSpaces);
- }
-};
+template <> struct MappingTraits<FormatStyle::AlignConsecutiveStyle> {
+ static void enumInput(IO &IO, FormatStyle::AlignConsecutiveStyle &Value) {
+ IO.enumCase(Value, "None",
+ FormatStyle::AlignConsecutiveStyle(
+ {/*Enabled=*/false, /*AcrossEmptyLines=*/false,
+ /*AcrossComments=*/false, /*AlignCompound=*/false,
+ /*AlignFunctionPointers=*/false, /*PadOperators=*/true}));
+ IO.enumCase(Value, "Consecutive",
+ FormatStyle::AlignConsecutiveStyle(
+ {/*Enabled=*/true, /*AcrossEmptyLines=*/false,
+ /*AcrossComments=*/false, /*AlignCompound=*/false,
+ /*AlignFunctionPointers=*/false, /*PadOperators=*/true}));
+ IO.enumCase(Value, "AcrossEmptyLines",
+ FormatStyle::AlignConsecutiveStyle(
+ {/*Enabled=*/true, /*AcrossEmptyLines=*/true,
+ /*AcrossComments=*/false, /*AlignCompound=*/false,
+ /*AlignFunctionPointers=*/false, /*PadOperators=*/true}));
+ IO.enumCase(Value, "AcrossComments",
+ FormatStyle::AlignConsecutiveStyle(
+ {/*Enabled=*/true, /*AcrossEmptyLines=*/false,
+ /*AcrossComments=*/true, /*AlignCompound=*/false,
+ /*AlignFunctionPointers=*/false, /*PadOperators=*/true}));
+ IO.enumCase(Value, "AcrossEmptyLinesAndComments",
+ FormatStyle::AlignConsecutiveStyle(
+ {/*Enabled=*/true, /*AcrossEmptyLines=*/true,
+ /*AcrossComments=*/true, /*AlignCompound=*/false,
+ /*AlignFunctionPointers=*/false, /*PadOperators=*/true}));
-template <> struct ScalarEnumerationTraits<FormatStyle::JavaScriptQuoteStyle> {
- static void enumeration(IO &IO, FormatStyle::JavaScriptQuoteStyle &Value) {
- IO.enumCase(Value, "Leave", FormatStyle::JSQS_Leave);
- IO.enumCase(Value, "Single", FormatStyle::JSQS_Single);
- IO.enumCase(Value, "Double", FormatStyle::JSQS_Double);
+ // For backward compatibility.
+ IO.enumCase(Value, "true",
+ FormatStyle::AlignConsecutiveStyle(
+ {/*Enabled=*/true, /*AcrossEmptyLines=*/false,
+ /*AcrossComments=*/false, /*AlignCompound=*/false,
+ /*AlignFunctionPointers=*/false, /*PadOperators=*/true}));
+ IO.enumCase(Value, "false",
+ FormatStyle::AlignConsecutiveStyle(
+ {/*Enabled=*/false, /*AcrossEmptyLines=*/false,
+ /*AcrossComments=*/false, /*AlignCompound=*/false,
+ /*AlignFunctionPointers=*/false, /*PadOperators=*/true}));
}
-};
-template <> struct ScalarEnumerationTraits<FormatStyle::ShortBlockStyle> {
- static void enumeration(IO &IO, FormatStyle::ShortBlockStyle &Value) {
- IO.enumCase(Value, "Never", FormatStyle::SBS_Never);
- IO.enumCase(Value, "false", FormatStyle::SBS_Never);
- IO.enumCase(Value, "Always", FormatStyle::SBS_Always);
- IO.enumCase(Value, "true", FormatStyle::SBS_Always);
- IO.enumCase(Value, "Empty", FormatStyle::SBS_Empty);
+ static void mapping(IO &IO, FormatStyle::AlignConsecutiveStyle &Value) {
+ IO.mapOptional("Enabled", Value.Enabled);
+ IO.mapOptional("AcrossEmptyLines", Value.AcrossEmptyLines);
+ IO.mapOptional("AcrossComments", Value.AcrossComments);
+ IO.mapOptional("AlignCompound", Value.AlignCompound);
+ IO.mapOptional("AlignFunctionPointers", Value.AlignFunctionPointers);
+ IO.mapOptional("PadOperators", Value.PadOperators);
}
};
-template <> struct ScalarEnumerationTraits<FormatStyle::ShortFunctionStyle> {
- static void enumeration(IO &IO, FormatStyle::ShortFunctionStyle &Value) {
- IO.enumCase(Value, "None", FormatStyle::SFS_None);
- IO.enumCase(Value, "false", FormatStyle::SFS_None);
- IO.enumCase(Value, "All", FormatStyle::SFS_All);
- IO.enumCase(Value, "true", FormatStyle::SFS_All);
- IO.enumCase(Value, "Inline", FormatStyle::SFS_Inline);
- IO.enumCase(Value, "InlineOnly", FormatStyle::SFS_InlineOnly);
- IO.enumCase(Value, "Empty", FormatStyle::SFS_Empty);
+template <>
+struct MappingTraits<FormatStyle::ShortCaseStatementsAlignmentStyle> {
+ static void mapping(IO &IO,
+ FormatStyle::ShortCaseStatementsAlignmentStyle &Value) {
+ IO.mapOptional("Enabled", Value.Enabled);
+ IO.mapOptional("AcrossEmptyLines", Value.AcrossEmptyLines);
+ IO.mapOptional("AcrossComments", Value.AcrossComments);
+ IO.mapOptional("AlignCaseColons", Value.AlignCaseColons);
}
};
-template <> struct ScalarEnumerationTraits<FormatStyle::AlignConsecutiveStyle> {
- static void enumeration(IO &IO, FormatStyle::AlignConsecutiveStyle &Value) {
- IO.enumCase(Value, "None", FormatStyle::ACS_None);
- IO.enumCase(Value, "Consecutive", FormatStyle::ACS_Consecutive);
- IO.enumCase(Value, "AcrossEmptyLines", FormatStyle::ACS_AcrossEmptyLines);
- IO.enumCase(Value, "AcrossComments", FormatStyle::ACS_AcrossComments);
- IO.enumCase(Value, "AcrossEmptyLinesAndComments",
- FormatStyle::ACS_AcrossEmptyLinesAndComments);
-
- // For backward compability.
- IO.enumCase(Value, "true", FormatStyle::ACS_Consecutive);
- IO.enumCase(Value, "false", FormatStyle::ACS_None);
+template <>
+struct ScalarEnumerationTraits<FormatStyle::AttributeBreakingStyle> {
+ static void enumeration(IO &IO, FormatStyle::AttributeBreakingStyle &Value) {
+ IO.enumCase(Value, "Always", FormatStyle::ABS_Always);
+ IO.enumCase(Value, "Leave", FormatStyle::ABS_Leave);
+ IO.enumCase(Value, "Never", FormatStyle::ABS_Never);
}
};
@@ -163,28 +151,13 @@ struct ScalarEnumerationTraits<FormatStyle::ArrayInitializerAlignmentStyle> {
}
};
-template <> struct ScalarEnumerationTraits<FormatStyle::ShortIfStyle> {
- static void enumeration(IO &IO, FormatStyle::ShortIfStyle &Value) {
- IO.enumCase(Value, "Never", FormatStyle::SIS_Never);
- IO.enumCase(Value, "WithoutElse", FormatStyle::SIS_WithoutElse);
- IO.enumCase(Value, "OnlyFirstIf", FormatStyle::SIS_OnlyFirstIf);
- IO.enumCase(Value, "AllIfsAndElse", FormatStyle::SIS_AllIfsAndElse);
-
- // For backward compatibility.
- IO.enumCase(Value, "Always", FormatStyle::SIS_OnlyFirstIf);
- IO.enumCase(Value, "false", FormatStyle::SIS_Never);
- IO.enumCase(Value, "true", FormatStyle::SIS_WithoutElse);
- }
-};
-
-template <> struct ScalarEnumerationTraits<FormatStyle::ShortLambdaStyle> {
- static void enumeration(IO &IO, FormatStyle::ShortLambdaStyle &Value) {
- IO.enumCase(Value, "None", FormatStyle::SLS_None);
- IO.enumCase(Value, "false", FormatStyle::SLS_None);
- IO.enumCase(Value, "Empty", FormatStyle::SLS_Empty);
- IO.enumCase(Value, "Inline", FormatStyle::SLS_Inline);
- IO.enumCase(Value, "All", FormatStyle::SLS_All);
- IO.enumCase(Value, "true", FormatStyle::SLS_All);
+template <> struct ScalarEnumerationTraits<FormatStyle::BinaryOperatorStyle> {
+ static void enumeration(IO &IO, FormatStyle::BinaryOperatorStyle &Value) {
+ IO.enumCase(Value, "All", FormatStyle::BOS_All);
+ IO.enumCase(Value, "true", FormatStyle::BOS_All);
+ IO.enumCase(Value, "None", FormatStyle::BOS_None);
+ IO.enumCase(Value, "false", FormatStyle::BOS_None);
+ IO.enumCase(Value, "NonAssignment", FormatStyle::BOS_NonAssignment);
}
};
@@ -196,20 +169,14 @@ template <> struct ScalarEnumerationTraits<FormatStyle::BinPackStyle> {
}
};
-template <> struct ScalarEnumerationTraits<FormatStyle::TrailingCommaStyle> {
- static void enumeration(IO &IO, FormatStyle::TrailingCommaStyle &Value) {
- IO.enumCase(Value, "None", FormatStyle::TCS_None);
- IO.enumCase(Value, "Wrapped", FormatStyle::TCS_Wrapped);
- }
-};
-
-template <> struct ScalarEnumerationTraits<FormatStyle::BinaryOperatorStyle> {
- static void enumeration(IO &IO, FormatStyle::BinaryOperatorStyle &Value) {
- IO.enumCase(Value, "All", FormatStyle::BOS_All);
- IO.enumCase(Value, "true", FormatStyle::BOS_All);
- IO.enumCase(Value, "None", FormatStyle::BOS_None);
- IO.enumCase(Value, "false", FormatStyle::BOS_None);
- IO.enumCase(Value, "NonAssignment", FormatStyle::BOS_NonAssignment);
+template <>
+struct ScalarEnumerationTraits<FormatStyle::BitFieldColonSpacingStyle> {
+ static void enumeration(IO &IO,
+ FormatStyle::BitFieldColonSpacingStyle &Value) {
+ IO.enumCase(Value, "Both", FormatStyle::BFCS_Both);
+ IO.enumCase(Value, "None", FormatStyle::BFCS_None);
+ IO.enumCase(Value, "Before", FormatStyle::BFCS_Before);
+ IO.enumCase(Value, "After", FormatStyle::BFCS_After);
}
};
@@ -227,6 +194,42 @@ template <> struct ScalarEnumerationTraits<FormatStyle::BraceBreakingStyle> {
}
};
+template <> struct MappingTraits<FormatStyle::BraceWrappingFlags> {
+ static void mapping(IO &IO, FormatStyle::BraceWrappingFlags &Wrapping) {
+ IO.mapOptional("AfterCaseLabel", Wrapping.AfterCaseLabel);
+ IO.mapOptional("AfterClass", Wrapping.AfterClass);
+ IO.mapOptional("AfterControlStatement", Wrapping.AfterControlStatement);
+ IO.mapOptional("AfterEnum", Wrapping.AfterEnum);
+ IO.mapOptional("AfterExternBlock", Wrapping.AfterExternBlock);
+ IO.mapOptional("AfterFunction", Wrapping.AfterFunction);
+ IO.mapOptional("AfterNamespace", Wrapping.AfterNamespace);
+ IO.mapOptional("AfterObjCDeclaration", Wrapping.AfterObjCDeclaration);
+ IO.mapOptional("AfterStruct", Wrapping.AfterStruct);
+ IO.mapOptional("AfterUnion", Wrapping.AfterUnion);
+ IO.mapOptional("BeforeCatch", Wrapping.BeforeCatch);
+ IO.mapOptional("BeforeElse", Wrapping.BeforeElse);
+ IO.mapOptional("BeforeLambdaBody", Wrapping.BeforeLambdaBody);
+ IO.mapOptional("BeforeWhile", Wrapping.BeforeWhile);
+ IO.mapOptional("IndentBraces", Wrapping.IndentBraces);
+ IO.mapOptional("SplitEmptyFunction", Wrapping.SplitEmptyFunction);
+ IO.mapOptional("SplitEmptyRecord", Wrapping.SplitEmptyRecord);
+ IO.mapOptional("SplitEmptyNamespace", Wrapping.SplitEmptyNamespace);
+ }
+};
+
+template <> struct ScalarEnumerationTraits<FormatStyle::BracketAlignmentStyle> {
+ static void enumeration(IO &IO, FormatStyle::BracketAlignmentStyle &Value) {
+ IO.enumCase(Value, "Align", FormatStyle::BAS_Align);
+ IO.enumCase(Value, "DontAlign", FormatStyle::BAS_DontAlign);
+ IO.enumCase(Value, "AlwaysBreak", FormatStyle::BAS_AlwaysBreak);
+ IO.enumCase(Value, "BlockIndent", FormatStyle::BAS_BlockIndent);
+
+ // For backward compatibility.
+ IO.enumCase(Value, "true", FormatStyle::BAS_Align);
+ IO.enumCase(Value, "false", FormatStyle::BAS_DontAlign);
+ }
+};
+
template <>
struct ScalarEnumerationTraits<
FormatStyle::BraceWrappingAfterControlStatementStyle> {
@@ -244,6 +247,31 @@ struct ScalarEnumerationTraits<
};
template <>
+struct ScalarEnumerationTraits<
+ FormatStyle::BreakBeforeConceptDeclarationsStyle> {
+ static void
+ enumeration(IO &IO, FormatStyle::BreakBeforeConceptDeclarationsStyle &Value) {
+ IO.enumCase(Value, "Never", FormatStyle::BBCDS_Never);
+ IO.enumCase(Value, "Allowed", FormatStyle::BBCDS_Allowed);
+ IO.enumCase(Value, "Always", FormatStyle::BBCDS_Always);
+
+ // For backward compatibility.
+ IO.enumCase(Value, "true", FormatStyle::BBCDS_Always);
+ IO.enumCase(Value, "false", FormatStyle::BBCDS_Allowed);
+ }
+};
+
+template <>
+struct ScalarEnumerationTraits<FormatStyle::BreakBeforeInlineASMColonStyle> {
+ static void enumeration(IO &IO,
+ FormatStyle::BreakBeforeInlineASMColonStyle &Value) {
+ IO.enumCase(Value, "Never", FormatStyle::BBIAS_Never);
+ IO.enumCase(Value, "OnlyMultiline", FormatStyle::BBIAS_OnlyMultiline);
+ IO.enumCase(Value, "Always", FormatStyle::BBIAS_Always);
+ }
+};
+
+template <>
struct ScalarEnumerationTraits<FormatStyle::BreakConstructorInitializersStyle> {
static void
enumeration(IO &IO, FormatStyle::BreakConstructorInitializersStyle &Value) {
@@ -265,6 +293,48 @@ struct ScalarEnumerationTraits<FormatStyle::BreakInheritanceListStyle> {
};
template <>
+struct ScalarEnumerationTraits<FormatStyle::BreakTemplateDeclarationsStyle> {
+ static void enumeration(IO &IO,
+ FormatStyle::BreakTemplateDeclarationsStyle &Value) {
+ IO.enumCase(Value, "No", FormatStyle::BTDS_No);
+ IO.enumCase(Value, "MultiLine", FormatStyle::BTDS_MultiLine);
+ IO.enumCase(Value, "Yes", FormatStyle::BTDS_Yes);
+
+ // For backward compatibility.
+ IO.enumCase(Value, "false", FormatStyle::BTDS_MultiLine);
+ IO.enumCase(Value, "true", FormatStyle::BTDS_Yes);
+ }
+};
+
+template <>
+struct ScalarEnumerationTraits<FormatStyle::DefinitionReturnTypeBreakingStyle> {
+ static void
+ enumeration(IO &IO, FormatStyle::DefinitionReturnTypeBreakingStyle &Value) {
+ IO.enumCase(Value, "None", FormatStyle::DRTBS_None);
+ IO.enumCase(Value, "All", FormatStyle::DRTBS_All);
+ IO.enumCase(Value, "TopLevel", FormatStyle::DRTBS_TopLevel);
+
+ // For backward compatibility.
+ IO.enumCase(Value, "false", FormatStyle::DRTBS_None);
+ IO.enumCase(Value, "true", FormatStyle::DRTBS_All);
+ }
+};
+
+template <>
+struct ScalarEnumerationTraits<FormatStyle::EscapedNewlineAlignmentStyle> {
+ static void enumeration(IO &IO,
+ FormatStyle::EscapedNewlineAlignmentStyle &Value) {
+ IO.enumCase(Value, "DontAlign", FormatStyle::ENAS_DontAlign);
+ IO.enumCase(Value, "Left", FormatStyle::ENAS_Left);
+ IO.enumCase(Value, "Right", FormatStyle::ENAS_Right);
+
+ // For backward compatibility.
+ IO.enumCase(Value, "true", FormatStyle::ENAS_Left);
+ IO.enumCase(Value, "false", FormatStyle::ENAS_Right);
+ }
+};
+
+template <>
struct ScalarEnumerationTraits<FormatStyle::EmptyLineAfterAccessModifierStyle> {
static void
enumeration(IO &IO, FormatStyle::EmptyLineAfterAccessModifierStyle &Value) {
@@ -287,15 +357,6 @@ struct ScalarEnumerationTraits<
};
template <>
-struct ScalarEnumerationTraits<FormatStyle::PPDirectiveIndentStyle> {
- static void enumeration(IO &IO, FormatStyle::PPDirectiveIndentStyle &Value) {
- IO.enumCase(Value, "None", FormatStyle::PPDIS_None);
- IO.enumCase(Value, "AfterHash", FormatStyle::PPDIS_AfterHash);
- IO.enumCase(Value, "BeforeHash", FormatStyle::PPDIS_BeforeHash);
- }
-};
-
-template <>
struct ScalarEnumerationTraits<FormatStyle::IndentExternBlockStyle> {
static void enumeration(IO &IO, FormatStyle::IndentExternBlockStyle &Value) {
IO.enumCase(Value, "AfterExternBlock", FormatStyle::IEBS_AfterExternBlock);
@@ -306,79 +367,84 @@ struct ScalarEnumerationTraits<FormatStyle::IndentExternBlockStyle> {
}
};
-template <>
-struct ScalarEnumerationTraits<FormatStyle::ReturnTypeBreakingStyle> {
- static void enumeration(IO &IO, FormatStyle::ReturnTypeBreakingStyle &Value) {
- IO.enumCase(Value, "None", FormatStyle::RTBS_None);
- IO.enumCase(Value, "All", FormatStyle::RTBS_All);
- IO.enumCase(Value, "TopLevel", FormatStyle::RTBS_TopLevel);
- IO.enumCase(Value, "TopLevelDefinitions",
- FormatStyle::RTBS_TopLevelDefinitions);
- IO.enumCase(Value, "AllDefinitions", FormatStyle::RTBS_AllDefinitions);
+template <> struct MappingTraits<FormatStyle::IntegerLiteralSeparatorStyle> {
+ static void mapping(IO &IO, FormatStyle::IntegerLiteralSeparatorStyle &Base) {
+ IO.mapOptional("Binary", Base.Binary);
+ IO.mapOptional("BinaryMinDigits", Base.BinaryMinDigits);
+ IO.mapOptional("Decimal", Base.Decimal);
+ IO.mapOptional("DecimalMinDigits", Base.DecimalMinDigits);
+ IO.mapOptional("Hex", Base.Hex);
+ IO.mapOptional("HexMinDigits", Base.HexMinDigits);
}
};
-template <>
-struct ScalarEnumerationTraits<FormatStyle::BreakTemplateDeclarationsStyle> {
- static void enumeration(IO &IO,
- FormatStyle::BreakTemplateDeclarationsStyle &Value) {
- IO.enumCase(Value, "No", FormatStyle::BTDS_No);
- IO.enumCase(Value, "MultiLine", FormatStyle::BTDS_MultiLine);
- IO.enumCase(Value, "Yes", FormatStyle::BTDS_Yes);
+template <> struct ScalarEnumerationTraits<FormatStyle::JavaScriptQuoteStyle> {
+ static void enumeration(IO &IO, FormatStyle::JavaScriptQuoteStyle &Value) {
+ IO.enumCase(Value, "Leave", FormatStyle::JSQS_Leave);
+ IO.enumCase(Value, "Single", FormatStyle::JSQS_Single);
+ IO.enumCase(Value, "Double", FormatStyle::JSQS_Double);
+ }
+};
- // For backward compatibility.
- IO.enumCase(Value, "false", FormatStyle::BTDS_MultiLine);
- IO.enumCase(Value, "true", FormatStyle::BTDS_Yes);
+template <> struct ScalarEnumerationTraits<FormatStyle::LanguageKind> {
+ static void enumeration(IO &IO, FormatStyle::LanguageKind &Value) {
+ IO.enumCase(Value, "Cpp", FormatStyle::LK_Cpp);
+ IO.enumCase(Value, "Java", FormatStyle::LK_Java);
+ IO.enumCase(Value, "JavaScript", FormatStyle::LK_JavaScript);
+ IO.enumCase(Value, "ObjC", FormatStyle::LK_ObjC);
+ IO.enumCase(Value, "Proto", FormatStyle::LK_Proto);
+ IO.enumCase(Value, "TableGen", FormatStyle::LK_TableGen);
+ IO.enumCase(Value, "TextProto", FormatStyle::LK_TextProto);
+ IO.enumCase(Value, "CSharp", FormatStyle::LK_CSharp);
+ IO.enumCase(Value, "Json", FormatStyle::LK_Json);
+ IO.enumCase(Value, "Verilog", FormatStyle::LK_Verilog);
}
};
-template <>
-struct ScalarEnumerationTraits<FormatStyle::DefinitionReturnTypeBreakingStyle> {
- static void
- enumeration(IO &IO, FormatStyle::DefinitionReturnTypeBreakingStyle &Value) {
- IO.enumCase(Value, "None", FormatStyle::DRTBS_None);
- IO.enumCase(Value, "All", FormatStyle::DRTBS_All);
- IO.enumCase(Value, "TopLevel", FormatStyle::DRTBS_TopLevel);
+template <> struct ScalarEnumerationTraits<FormatStyle::LanguageStandard> {
+ static void enumeration(IO &IO, FormatStyle::LanguageStandard &Value) {
+ IO.enumCase(Value, "c++03", FormatStyle::LS_Cpp03);
+ IO.enumCase(Value, "C++03", FormatStyle::LS_Cpp03); // Legacy alias
+ IO.enumCase(Value, "Cpp03", FormatStyle::LS_Cpp03); // Legacy alias
- // For backward compatibility.
- IO.enumCase(Value, "false", FormatStyle::DRTBS_None);
- IO.enumCase(Value, "true", FormatStyle::DRTBS_All);
+ IO.enumCase(Value, "c++11", FormatStyle::LS_Cpp11);
+ IO.enumCase(Value, "C++11", FormatStyle::LS_Cpp11); // Legacy alias
+
+ IO.enumCase(Value, "c++14", FormatStyle::LS_Cpp14);
+ IO.enumCase(Value, "c++17", FormatStyle::LS_Cpp17);
+ IO.enumCase(Value, "c++20", FormatStyle::LS_Cpp20);
+
+ IO.enumCase(Value, "Latest", FormatStyle::LS_Latest);
+ IO.enumCase(Value, "Cpp11", FormatStyle::LS_Latest); // Legacy alias
+ IO.enumCase(Value, "Auto", FormatStyle::LS_Auto);
}
};
template <>
-struct ScalarEnumerationTraits<FormatStyle::NamespaceIndentationKind> {
+struct ScalarEnumerationTraits<FormatStyle::LambdaBodyIndentationKind> {
static void enumeration(IO &IO,
- FormatStyle::NamespaceIndentationKind &Value) {
- IO.enumCase(Value, "None", FormatStyle::NI_None);
- IO.enumCase(Value, "Inner", FormatStyle::NI_Inner);
- IO.enumCase(Value, "All", FormatStyle::NI_All);
+ FormatStyle::LambdaBodyIndentationKind &Value) {
+ IO.enumCase(Value, "Signature", FormatStyle::LBI_Signature);
+ IO.enumCase(Value, "OuterScope", FormatStyle::LBI_OuterScope);
}
};
-template <> struct ScalarEnumerationTraits<FormatStyle::BracketAlignmentStyle> {
- static void enumeration(IO &IO, FormatStyle::BracketAlignmentStyle &Value) {
- IO.enumCase(Value, "Align", FormatStyle::BAS_Align);
- IO.enumCase(Value, "DontAlign", FormatStyle::BAS_DontAlign);
- IO.enumCase(Value, "AlwaysBreak", FormatStyle::BAS_AlwaysBreak);
-
- // For backward compatibility.
- IO.enumCase(Value, "true", FormatStyle::BAS_Align);
- IO.enumCase(Value, "false", FormatStyle::BAS_DontAlign);
+template <> struct ScalarEnumerationTraits<FormatStyle::LineEndingStyle> {
+ static void enumeration(IO &IO, FormatStyle::LineEndingStyle &Value) {
+ IO.enumCase(Value, "LF", FormatStyle::LE_LF);
+ IO.enumCase(Value, "CRLF", FormatStyle::LE_CRLF);
+ IO.enumCase(Value, "DeriveLF", FormatStyle::LE_DeriveLF);
+ IO.enumCase(Value, "DeriveCRLF", FormatStyle::LE_DeriveCRLF);
}
};
template <>
-struct ScalarEnumerationTraits<FormatStyle::EscapedNewlineAlignmentStyle> {
+struct ScalarEnumerationTraits<FormatStyle::NamespaceIndentationKind> {
static void enumeration(IO &IO,
- FormatStyle::EscapedNewlineAlignmentStyle &Value) {
- IO.enumCase(Value, "DontAlign", FormatStyle::ENAS_DontAlign);
- IO.enumCase(Value, "Left", FormatStyle::ENAS_Left);
- IO.enumCase(Value, "Right", FormatStyle::ENAS_Right);
-
- // For backward compatibility.
- IO.enumCase(Value, "true", FormatStyle::ENAS_Left);
- IO.enumCase(Value, "false", FormatStyle::ENAS_Right);
+ FormatStyle::NamespaceIndentationKind &Value) {
+ IO.enumCase(Value, "None", FormatStyle::NI_None);
+ IO.enumCase(Value, "Inner", FormatStyle::NI_Inner);
+ IO.enumCase(Value, "All", FormatStyle::NI_All);
}
};
@@ -395,6 +461,18 @@ template <> struct ScalarEnumerationTraits<FormatStyle::OperandAlignmentStyle> {
}
};
+template <>
+struct ScalarEnumerationTraits<FormatStyle::PackConstructorInitializersStyle> {
+ static void
+ enumeration(IO &IO, FormatStyle::PackConstructorInitializersStyle &Value) {
+ IO.enumCase(Value, "Never", FormatStyle::PCIS_Never);
+ IO.enumCase(Value, "BinPack", FormatStyle::PCIS_BinPack);
+ IO.enumCase(Value, "CurrentLine", FormatStyle::PCIS_CurrentLine);
+ IO.enumCase(Value, "NextLine", FormatStyle::PCIS_NextLine);
+ IO.enumCase(Value, "NextLineOnly", FormatStyle::PCIS_NextLineOnly);
+ }
+};
+
template <> struct ScalarEnumerationTraits<FormatStyle::PointerAlignmentStyle> {
static void enumeration(IO &IO, FormatStyle::PointerAlignmentStyle &Value) {
IO.enumCase(Value, "Middle", FormatStyle::PAS_Middle);
@@ -408,13 +486,31 @@ template <> struct ScalarEnumerationTraits<FormatStyle::PointerAlignmentStyle> {
};
template <>
-struct ScalarEnumerationTraits<FormatStyle::SpaceAroundPointerQualifiersStyle> {
- static void
- enumeration(IO &IO, FormatStyle::SpaceAroundPointerQualifiersStyle &Value) {
- IO.enumCase(Value, "Default", FormatStyle::SAPQ_Default);
- IO.enumCase(Value, "Before", FormatStyle::SAPQ_Before);
- IO.enumCase(Value, "After", FormatStyle::SAPQ_After);
- IO.enumCase(Value, "Both", FormatStyle::SAPQ_Both);
+struct ScalarEnumerationTraits<FormatStyle::PPDirectiveIndentStyle> {
+ static void enumeration(IO &IO, FormatStyle::PPDirectiveIndentStyle &Value) {
+ IO.enumCase(Value, "None", FormatStyle::PPDIS_None);
+ IO.enumCase(Value, "AfterHash", FormatStyle::PPDIS_AfterHash);
+ IO.enumCase(Value, "BeforeHash", FormatStyle::PPDIS_BeforeHash);
+ }
+};
+
+template <>
+struct ScalarEnumerationTraits<FormatStyle::QualifierAlignmentStyle> {
+ static void enumeration(IO &IO, FormatStyle::QualifierAlignmentStyle &Value) {
+ IO.enumCase(Value, "Leave", FormatStyle::QAS_Leave);
+ IO.enumCase(Value, "Left", FormatStyle::QAS_Left);
+ IO.enumCase(Value, "Right", FormatStyle::QAS_Right);
+ IO.enumCase(Value, "Custom", FormatStyle::QAS_Custom);
+ }
+};
+
+template <> struct MappingTraits<FormatStyle::RawStringFormat> {
+ static void mapping(IO &IO, FormatStyle::RawStringFormat &Format) {
+ IO.mapOptional("Language", Format.Language);
+ IO.mapOptional("Delimiters", Format.Delimiters);
+ IO.mapOptional("EnclosingFunctions", Format.EnclosingFunctions);
+ IO.mapOptional("CanonicalDelimiter", Format.CanonicalDelimiter);
+ IO.mapOptional("BasedOnStyle", Format.BasedOnStyle);
}
};
@@ -429,34 +525,100 @@ struct ScalarEnumerationTraits<FormatStyle::ReferenceAlignmentStyle> {
};
template <>
-struct ScalarEnumerationTraits<FormatStyle::SpaceBeforeParensOptions> {
+struct ScalarEnumerationTraits<FormatStyle::RemoveParenthesesStyle> {
+ static void enumeration(IO &IO, FormatStyle::RemoveParenthesesStyle &Value) {
+ IO.enumCase(Value, "Leave", FormatStyle::RPS_Leave);
+ IO.enumCase(Value, "MultipleParentheses",
+ FormatStyle::RPS_MultipleParentheses);
+ IO.enumCase(Value, "ReturnStatement", FormatStyle::RPS_ReturnStatement);
+ }
+};
+
+template <>
+struct ScalarEnumerationTraits<FormatStyle::RequiresClausePositionStyle> {
static void enumeration(IO &IO,
- FormatStyle::SpaceBeforeParensOptions &Value) {
- IO.enumCase(Value, "Never", FormatStyle::SBPO_Never);
- IO.enumCase(Value, "ControlStatements",
- FormatStyle::SBPO_ControlStatements);
- IO.enumCase(Value, "ControlStatementsExceptControlMacros",
- FormatStyle::SBPO_ControlStatementsExceptControlMacros);
- IO.enumCase(Value, "NonEmptyParentheses",
- FormatStyle::SBPO_NonEmptyParentheses);
- IO.enumCase(Value, "Always", FormatStyle::SBPO_Always);
+ FormatStyle::RequiresClausePositionStyle &Value) {
+ IO.enumCase(Value, "OwnLine", FormatStyle::RCPS_OwnLine);
+ IO.enumCase(Value, "WithPreceding", FormatStyle::RCPS_WithPreceding);
+ IO.enumCase(Value, "WithFollowing", FormatStyle::RCPS_WithFollowing);
+ IO.enumCase(Value, "SingleLine", FormatStyle::RCPS_SingleLine);
+ }
+};
- // For backward compatibility.
- IO.enumCase(Value, "false", FormatStyle::SBPO_Never);
- IO.enumCase(Value, "true", FormatStyle::SBPO_ControlStatements);
- IO.enumCase(Value, "ControlStatementsExceptForEachMacros",
- FormatStyle::SBPO_ControlStatementsExceptControlMacros);
+template <>
+struct ScalarEnumerationTraits<FormatStyle::RequiresExpressionIndentationKind> {
+ static void
+ enumeration(IO &IO, FormatStyle::RequiresExpressionIndentationKind &Value) {
+ IO.enumCase(Value, "Keyword", FormatStyle::REI_Keyword);
+ IO.enumCase(Value, "OuterScope", FormatStyle::REI_OuterScope);
}
};
template <>
-struct ScalarEnumerationTraits<FormatStyle::BitFieldColonSpacingStyle> {
- static void enumeration(IO &IO,
- FormatStyle::BitFieldColonSpacingStyle &Value) {
- IO.enumCase(Value, "Both", FormatStyle::BFCS_Both);
- IO.enumCase(Value, "None", FormatStyle::BFCS_None);
- IO.enumCase(Value, "Before", FormatStyle::BFCS_Before);
- IO.enumCase(Value, "After", FormatStyle::BFCS_After);
+struct ScalarEnumerationTraits<FormatStyle::ReturnTypeBreakingStyle> {
+ static void enumeration(IO &IO, FormatStyle::ReturnTypeBreakingStyle &Value) {
+ IO.enumCase(Value, "None", FormatStyle::RTBS_None);
+ IO.enumCase(Value, "All", FormatStyle::RTBS_All);
+ IO.enumCase(Value, "TopLevel", FormatStyle::RTBS_TopLevel);
+ IO.enumCase(Value, "TopLevelDefinitions",
+ FormatStyle::RTBS_TopLevelDefinitions);
+ IO.enumCase(Value, "AllDefinitions", FormatStyle::RTBS_AllDefinitions);
+ }
+};
+
+template <>
+struct ScalarEnumerationTraits<FormatStyle::SeparateDefinitionStyle> {
+ static void enumeration(IO &IO, FormatStyle::SeparateDefinitionStyle &Value) {
+ IO.enumCase(Value, "Leave", FormatStyle::SDS_Leave);
+ IO.enumCase(Value, "Always", FormatStyle::SDS_Always);
+ IO.enumCase(Value, "Never", FormatStyle::SDS_Never);
+ }
+};
+
+template <> struct ScalarEnumerationTraits<FormatStyle::ShortBlockStyle> {
+ static void enumeration(IO &IO, FormatStyle::ShortBlockStyle &Value) {
+ IO.enumCase(Value, "Never", FormatStyle::SBS_Never);
+ IO.enumCase(Value, "false", FormatStyle::SBS_Never);
+ IO.enumCase(Value, "Always", FormatStyle::SBS_Always);
+ IO.enumCase(Value, "true", FormatStyle::SBS_Always);
+ IO.enumCase(Value, "Empty", FormatStyle::SBS_Empty);
+ }
+};
+
+template <> struct ScalarEnumerationTraits<FormatStyle::ShortFunctionStyle> {
+ static void enumeration(IO &IO, FormatStyle::ShortFunctionStyle &Value) {
+ IO.enumCase(Value, "None", FormatStyle::SFS_None);
+ IO.enumCase(Value, "false", FormatStyle::SFS_None);
+ IO.enumCase(Value, "All", FormatStyle::SFS_All);
+ IO.enumCase(Value, "true", FormatStyle::SFS_All);
+ IO.enumCase(Value, "Inline", FormatStyle::SFS_Inline);
+ IO.enumCase(Value, "InlineOnly", FormatStyle::SFS_InlineOnly);
+ IO.enumCase(Value, "Empty", FormatStyle::SFS_Empty);
+ }
+};
+
+template <> struct ScalarEnumerationTraits<FormatStyle::ShortIfStyle> {
+ static void enumeration(IO &IO, FormatStyle::ShortIfStyle &Value) {
+ IO.enumCase(Value, "Never", FormatStyle::SIS_Never);
+ IO.enumCase(Value, "WithoutElse", FormatStyle::SIS_WithoutElse);
+ IO.enumCase(Value, "OnlyFirstIf", FormatStyle::SIS_OnlyFirstIf);
+ IO.enumCase(Value, "AllIfsAndElse", FormatStyle::SIS_AllIfsAndElse);
+
+ // For backward compatibility.
+ IO.enumCase(Value, "Always", FormatStyle::SIS_OnlyFirstIf);
+ IO.enumCase(Value, "false", FormatStyle::SIS_Never);
+ IO.enumCase(Value, "true", FormatStyle::SIS_WithoutElse);
+ }
+};
+
+template <> struct ScalarEnumerationTraits<FormatStyle::ShortLambdaStyle> {
+ static void enumeration(IO &IO, FormatStyle::ShortLambdaStyle &Value) {
+ IO.enumCase(Value, "None", FormatStyle::SLS_None);
+ IO.enumCase(Value, "false", FormatStyle::SLS_None);
+ IO.enumCase(Value, "Empty", FormatStyle::SLS_Empty);
+ IO.enumCase(Value, "Inline", FormatStyle::SLS_Inline);
+ IO.enumCase(Value, "All", FormatStyle::SLS_All);
+ IO.enumCase(Value, "true", FormatStyle::SLS_All);
}
};
@@ -481,6 +643,72 @@ struct ScalarEnumerationTraits<FormatStyle::SortJavaStaticImportOptions> {
}
};
+template <>
+struct ScalarEnumerationTraits<FormatStyle::SortUsingDeclarationsOptions> {
+ static void enumeration(IO &IO,
+ FormatStyle::SortUsingDeclarationsOptions &Value) {
+ IO.enumCase(Value, "Never", FormatStyle::SUD_Never);
+ IO.enumCase(Value, "Lexicographic", FormatStyle::SUD_Lexicographic);
+ IO.enumCase(Value, "LexicographicNumeric",
+ FormatStyle::SUD_LexicographicNumeric);
+
+ // For backward compatibility.
+ IO.enumCase(Value, "false", FormatStyle::SUD_Never);
+ IO.enumCase(Value, "true", FormatStyle::SUD_LexicographicNumeric);
+ }
+};
+
+template <>
+struct ScalarEnumerationTraits<FormatStyle::SpaceAroundPointerQualifiersStyle> {
+ static void
+ enumeration(IO &IO, FormatStyle::SpaceAroundPointerQualifiersStyle &Value) {
+ IO.enumCase(Value, "Default", FormatStyle::SAPQ_Default);
+ IO.enumCase(Value, "Before", FormatStyle::SAPQ_Before);
+ IO.enumCase(Value, "After", FormatStyle::SAPQ_After);
+ IO.enumCase(Value, "Both", FormatStyle::SAPQ_Both);
+ }
+};
+
+template <> struct MappingTraits<FormatStyle::SpaceBeforeParensCustom> {
+ static void mapping(IO &IO, FormatStyle::SpaceBeforeParensCustom &Spacing) {
+ IO.mapOptional("AfterControlStatements", Spacing.AfterControlStatements);
+ IO.mapOptional("AfterForeachMacros", Spacing.AfterForeachMacros);
+ IO.mapOptional("AfterFunctionDefinitionName",
+ Spacing.AfterFunctionDefinitionName);
+ IO.mapOptional("AfterFunctionDeclarationName",
+ Spacing.AfterFunctionDeclarationName);
+ IO.mapOptional("AfterIfMacros", Spacing.AfterIfMacros);
+ IO.mapOptional("AfterOverloadedOperator", Spacing.AfterOverloadedOperator);
+ IO.mapOptional("AfterPlacementOperator", Spacing.AfterPlacementOperator);
+ IO.mapOptional("AfterRequiresInClause", Spacing.AfterRequiresInClause);
+ IO.mapOptional("AfterRequiresInExpression",
+ Spacing.AfterRequiresInExpression);
+ IO.mapOptional("BeforeNonEmptyParentheses",
+ Spacing.BeforeNonEmptyParentheses);
+ }
+};
+
+template <>
+struct ScalarEnumerationTraits<FormatStyle::SpaceBeforeParensStyle> {
+ static void enumeration(IO &IO, FormatStyle::SpaceBeforeParensStyle &Value) {
+ IO.enumCase(Value, "Never", FormatStyle::SBPO_Never);
+ IO.enumCase(Value, "ControlStatements",
+ FormatStyle::SBPO_ControlStatements);
+ IO.enumCase(Value, "ControlStatementsExceptControlMacros",
+ FormatStyle::SBPO_ControlStatementsExceptControlMacros);
+ IO.enumCase(Value, "NonEmptyParentheses",
+ FormatStyle::SBPO_NonEmptyParentheses);
+ IO.enumCase(Value, "Always", FormatStyle::SBPO_Always);
+ IO.enumCase(Value, "Custom", FormatStyle::SBPO_Custom);
+
+ // For backward compatibility.
+ IO.enumCase(Value, "false", FormatStyle::SBPO_Never);
+ IO.enumCase(Value, "true", FormatStyle::SBPO_ControlStatements);
+ IO.enumCase(Value, "ControlStatementsExceptForEachMacros",
+ FormatStyle::SBPO_ControlStatementsExceptControlMacros);
+ }
+};
+
template <> struct ScalarEnumerationTraits<FormatStyle::SpacesInAnglesStyle> {
static void enumeration(IO &IO, FormatStyle::SpacesInAnglesStyle &Value) {
IO.enumCase(Value, "Never", FormatStyle::SIAS_Never);
@@ -493,26 +721,115 @@ template <> struct ScalarEnumerationTraits<FormatStyle::SpacesInAnglesStyle> {
}
};
+template <> struct MappingTraits<FormatStyle::SpacesInLineComment> {
+ static void mapping(IO &IO, FormatStyle::SpacesInLineComment &Space) {
+ // Transform the maximum to signed, to parse "-1" correctly
+ int signedMaximum = static_cast<int>(Space.Maximum);
+ IO.mapOptional("Minimum", Space.Minimum);
+ IO.mapOptional("Maximum", signedMaximum);
+ Space.Maximum = static_cast<unsigned>(signedMaximum);
+
+ if (Space.Maximum != -1u)
+ Space.Minimum = std::min(Space.Minimum, Space.Maximum);
+ }
+};
+
+template <> struct MappingTraits<FormatStyle::SpacesInParensCustom> {
+ static void mapping(IO &IO, FormatStyle::SpacesInParensCustom &Spaces) {
+ IO.mapOptional("InCStyleCasts", Spaces.InCStyleCasts);
+ IO.mapOptional("InConditionalStatements", Spaces.InConditionalStatements);
+ IO.mapOptional("InEmptyParentheses", Spaces.InEmptyParentheses);
+ IO.mapOptional("Other", Spaces.Other);
+ }
+};
+
+template <> struct ScalarEnumerationTraits<FormatStyle::SpacesInParensStyle> {
+ static void enumeration(IO &IO, FormatStyle::SpacesInParensStyle &Value) {
+ IO.enumCase(Value, "Never", FormatStyle::SIPO_Never);
+ IO.enumCase(Value, "Custom", FormatStyle::SIPO_Custom);
+ }
+};
+
+template <> struct ScalarEnumerationTraits<FormatStyle::TrailingCommaStyle> {
+ static void enumeration(IO &IO, FormatStyle::TrailingCommaStyle &Value) {
+ IO.enumCase(Value, "None", FormatStyle::TCS_None);
+ IO.enumCase(Value, "Wrapped", FormatStyle::TCS_Wrapped);
+ }
+};
+
+template <>
+struct ScalarEnumerationTraits<FormatStyle::TrailingCommentsAlignmentKinds> {
+ static void enumeration(IO &IO,
+ FormatStyle::TrailingCommentsAlignmentKinds &Value) {
+ IO.enumCase(Value, "Leave", FormatStyle::TCAS_Leave);
+ IO.enumCase(Value, "Always", FormatStyle::TCAS_Always);
+ IO.enumCase(Value, "Never", FormatStyle::TCAS_Never);
+ }
+};
+
+template <> struct MappingTraits<FormatStyle::TrailingCommentsAlignmentStyle> {
+ static void enumInput(IO &IO,
+ FormatStyle::TrailingCommentsAlignmentStyle &Value) {
+ IO.enumCase(Value, "Leave",
+ FormatStyle::TrailingCommentsAlignmentStyle(
+ {FormatStyle::TCAS_Leave, 0}));
+
+ IO.enumCase(Value, "Always",
+ FormatStyle::TrailingCommentsAlignmentStyle(
+ {FormatStyle::TCAS_Always, 0}));
+
+ IO.enumCase(Value, "Never",
+ FormatStyle::TrailingCommentsAlignmentStyle(
+ {FormatStyle::TCAS_Never, 0}));
+
+ // For backwards compatibility
+ IO.enumCase(Value, "true",
+ FormatStyle::TrailingCommentsAlignmentStyle(
+ {FormatStyle::TCAS_Always, 0}));
+ IO.enumCase(Value, "false",
+ FormatStyle::TrailingCommentsAlignmentStyle(
+ {FormatStyle::TCAS_Never, 0}));
+ }
+
+ static void mapping(IO &IO,
+ FormatStyle::TrailingCommentsAlignmentStyle &Value) {
+ IO.mapOptional("Kind", Value.Kind);
+ IO.mapOptional("OverEmptyLines", Value.OverEmptyLines);
+ }
+};
+
+template <> struct ScalarEnumerationTraits<FormatStyle::UseTabStyle> {
+ static void enumeration(IO &IO, FormatStyle::UseTabStyle &Value) {
+ IO.enumCase(Value, "Never", FormatStyle::UT_Never);
+ IO.enumCase(Value, "false", FormatStyle::UT_Never);
+ IO.enumCase(Value, "Always", FormatStyle::UT_Always);
+ IO.enumCase(Value, "true", FormatStyle::UT_Always);
+ IO.enumCase(Value, "ForIndentation", FormatStyle::UT_ForIndentation);
+ IO.enumCase(Value, "ForContinuationAndIndentation",
+ FormatStyle::UT_ForContinuationAndIndentation);
+ IO.enumCase(Value, "AlignWithSpaces", FormatStyle::UT_AlignWithSpaces);
+ }
+};
+
template <> struct MappingTraits<FormatStyle> {
static void mapping(IO &IO, FormatStyle &Style) {
// When reading, read the language first, we need it for getPredefinedStyle.
IO.mapOptional("Language", Style.Language);
+ StringRef BasedOnStyle;
if (IO.outputting()) {
- StringRef StylesArray[] = {"LLVM", "Google", "Chromium", "Mozilla",
- "WebKit", "GNU", "Microsoft"};
- ArrayRef<StringRef> Styles(StylesArray);
- for (size_t i = 0, e = Styles.size(); i < e; ++i) {
- StringRef StyleName(Styles[i]);
+ StringRef Styles[] = {"LLVM", "Google", "Chromium", "Mozilla",
+ "WebKit", "GNU", "Microsoft", "clang-format"};
+ for (StringRef StyleName : Styles) {
FormatStyle PredefinedStyle;
if (getPredefinedStyle(StyleName, Style.Language, &PredefinedStyle) &&
Style == PredefinedStyle) {
IO.mapOptional("# BasedOnStyle", StyleName);
+ BasedOnStyle = StyleName;
break;
}
}
} else {
- StringRef BasedOnStyle;
IO.mapOptional("BasedOnStyle", BasedOnStyle);
if (!BasedOnStyle.empty()) {
FormatStyle::LanguageKind OldLanguage = Style.Language;
@@ -526,68 +843,101 @@ template <> struct MappingTraits<FormatStyle> {
}
}
+ // Initialize some variables used in the parsing. The using logic is at the
+ // end.
+
+ // For backward compatibility:
+ // The default value of ConstructorInitializerAllOnOneLineOrOnePerLine was
+ // false unless BasedOnStyle was Google or Chromium whereas that of
+ // AllowAllConstructorInitializersOnNextLine was always true, so the
+ // equivalent default value of PackConstructorInitializers is PCIS_NextLine
+ // for Google/Chromium or PCIS_BinPack otherwise. If the deprecated options
+ // had a non-default value while PackConstructorInitializers has a default
+ // value, set the latter to an equivalent non-default value if needed.
+ const bool IsGoogleOrChromium = BasedOnStyle.equals_insensitive("google") ||
+ BasedOnStyle.equals_insensitive("chromium");
+ bool OnCurrentLine = IsGoogleOrChromium;
+ bool OnNextLine = true;
+
+ bool BreakBeforeInheritanceComma = false;
+ bool BreakConstructorInitializersBeforeComma = false;
+
+ bool DeriveLineEnding = true;
+ bool UseCRLF = false;
+
+ bool SpaceInEmptyParentheses = false;
+ bool SpacesInConditionalStatement = false;
+ bool SpacesInCStyleCastParentheses = false;
+ bool SpacesInParentheses = false;
+
// For backward compatibility.
if (!IO.outputting()) {
IO.mapOptional("AlignEscapedNewlinesLeft", Style.AlignEscapedNewlines);
+ IO.mapOptional("AllowAllConstructorInitializersOnNextLine", OnNextLine);
+ IO.mapOptional("BreakBeforeInheritanceComma",
+ BreakBeforeInheritanceComma);
+ IO.mapOptional("BreakConstructorInitializersBeforeComma",
+ BreakConstructorInitializersBeforeComma);
+ IO.mapOptional("ConstructorInitializerAllOnOneLineOrOnePerLine",
+ OnCurrentLine);
+ IO.mapOptional("DeriveLineEnding", DeriveLineEnding);
IO.mapOptional("DerivePointerBinding", Style.DerivePointerAlignment);
IO.mapOptional("IndentFunctionDeclarationAfterType",
Style.IndentWrappedFunctionNames);
+ IO.mapOptional("IndentRequires", Style.IndentRequiresClause);
IO.mapOptional("PointerBindsToType", Style.PointerAlignment);
IO.mapOptional("SpaceAfterControlStatementKeyword",
Style.SpaceBeforeParens);
+ IO.mapOptional("SpaceInEmptyParentheses", SpaceInEmptyParentheses);
+ IO.mapOptional("SpacesInConditionalStatement",
+ SpacesInConditionalStatement);
+ IO.mapOptional("SpacesInCStyleCastParentheses",
+ SpacesInCStyleCastParentheses);
+ IO.mapOptional("SpacesInParentheses", SpacesInParentheses);
+ IO.mapOptional("UseCRLF", UseCRLF);
}
IO.mapOptional("AccessModifierOffset", Style.AccessModifierOffset);
IO.mapOptional("AlignAfterOpenBracket", Style.AlignAfterOpenBracket);
IO.mapOptional("AlignArrayOfStructures", Style.AlignArrayOfStructures);
- IO.mapOptional("AlignConsecutiveMacros", Style.AlignConsecutiveMacros);
IO.mapOptional("AlignConsecutiveAssignments",
Style.AlignConsecutiveAssignments);
IO.mapOptional("AlignConsecutiveBitFields",
Style.AlignConsecutiveBitFields);
IO.mapOptional("AlignConsecutiveDeclarations",
Style.AlignConsecutiveDeclarations);
+ IO.mapOptional("AlignConsecutiveMacros", Style.AlignConsecutiveMacros);
+ IO.mapOptional("AlignConsecutiveShortCaseStatements",
+ Style.AlignConsecutiveShortCaseStatements);
IO.mapOptional("AlignEscapedNewlines", Style.AlignEscapedNewlines);
IO.mapOptional("AlignOperands", Style.AlignOperands);
IO.mapOptional("AlignTrailingComments", Style.AlignTrailingComments);
IO.mapOptional("AllowAllArgumentsOnNextLine",
Style.AllowAllArgumentsOnNextLine);
- IO.mapOptional("AllowAllConstructorInitializersOnNextLine",
- Style.AllowAllConstructorInitializersOnNextLine);
IO.mapOptional("AllowAllParametersOfDeclarationOnNextLine",
Style.AllowAllParametersOfDeclarationOnNextLine);
- IO.mapOptional("AllowShortEnumsOnASingleLine",
- Style.AllowShortEnumsOnASingleLine);
+ IO.mapOptional("AllowBreakBeforeNoexceptSpecifier",
+ Style.AllowBreakBeforeNoexceptSpecifier);
IO.mapOptional("AllowShortBlocksOnASingleLine",
Style.AllowShortBlocksOnASingleLine);
IO.mapOptional("AllowShortCaseLabelsOnASingleLine",
Style.AllowShortCaseLabelsOnASingleLine);
+ IO.mapOptional("AllowShortCompoundRequirementOnASingleLine",
+ Style.AllowShortCompoundRequirementOnASingleLine);
+ IO.mapOptional("AllowShortEnumsOnASingleLine",
+ Style.AllowShortEnumsOnASingleLine);
IO.mapOptional("AllowShortFunctionsOnASingleLine",
Style.AllowShortFunctionsOnASingleLine);
- IO.mapOptional("AllowShortLambdasOnASingleLine",
- Style.AllowShortLambdasOnASingleLine);
IO.mapOptional("AllowShortIfStatementsOnASingleLine",
Style.AllowShortIfStatementsOnASingleLine);
+ IO.mapOptional("AllowShortLambdasOnASingleLine",
+ Style.AllowShortLambdasOnASingleLine);
IO.mapOptional("AllowShortLoopsOnASingleLine",
Style.AllowShortLoopsOnASingleLine);
IO.mapOptional("AlwaysBreakAfterDefinitionReturnType",
Style.AlwaysBreakAfterDefinitionReturnType);
IO.mapOptional("AlwaysBreakAfterReturnType",
Style.AlwaysBreakAfterReturnType);
-
- // If AlwaysBreakAfterDefinitionReturnType was specified but
- // AlwaysBreakAfterReturnType was not, initialize the latter from the
- // former for backwards compatibility.
- if (Style.AlwaysBreakAfterDefinitionReturnType != FormatStyle::DRTBS_None &&
- Style.AlwaysBreakAfterReturnType == FormatStyle::RTBS_None) {
- if (Style.AlwaysBreakAfterDefinitionReturnType == FormatStyle::DRTBS_All)
- Style.AlwaysBreakAfterReturnType = FormatStyle::RTBS_AllDefinitions;
- else if (Style.AlwaysBreakAfterDefinitionReturnType ==
- FormatStyle::DRTBS_TopLevel)
- Style.AlwaysBreakAfterReturnType =
- FormatStyle::RTBS_TopLevelDefinitions;
- }
-
IO.mapOptional("AlwaysBreakBeforeMultilineStrings",
Style.AlwaysBreakBeforeMultilineStrings);
IO.mapOptional("AlwaysBreakTemplateDeclarations",
@@ -595,51 +945,36 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("AttributeMacros", Style.AttributeMacros);
IO.mapOptional("BinPackArguments", Style.BinPackArguments);
IO.mapOptional("BinPackParameters", Style.BinPackParameters);
+ IO.mapOptional("BitFieldColonSpacing", Style.BitFieldColonSpacing);
+ IO.mapOptional("BracedInitializerIndentWidth",
+ Style.BracedInitializerIndentWidth);
IO.mapOptional("BraceWrapping", Style.BraceWrapping);
+ IO.mapOptional("BreakAdjacentStringLiterals",
+ Style.BreakAdjacentStringLiterals);
+ IO.mapOptional("BreakAfterAttributes", Style.BreakAfterAttributes);
+ IO.mapOptional("BreakAfterJavaFieldAnnotations",
+ Style.BreakAfterJavaFieldAnnotations);
+ IO.mapOptional("BreakArrays", Style.BreakArrays);
IO.mapOptional("BreakBeforeBinaryOperators",
Style.BreakBeforeBinaryOperators);
IO.mapOptional("BreakBeforeConceptDeclarations",
Style.BreakBeforeConceptDeclarations);
IO.mapOptional("BreakBeforeBraces", Style.BreakBeforeBraces);
-
- bool BreakBeforeInheritanceComma = false;
- IO.mapOptional("BreakBeforeInheritanceComma", BreakBeforeInheritanceComma);
- IO.mapOptional("BreakInheritanceList", Style.BreakInheritanceList);
- // If BreakBeforeInheritanceComma was specified but
- // BreakInheritance was not, initialize the latter from the
- // former for backwards compatibility.
- if (BreakBeforeInheritanceComma &&
- Style.BreakInheritanceList == FormatStyle::BILS_BeforeColon)
- Style.BreakInheritanceList = FormatStyle::BILS_BeforeComma;
-
+ IO.mapOptional("BreakBeforeInlineASMColon",
+ Style.BreakBeforeInlineASMColon);
IO.mapOptional("BreakBeforeTernaryOperators",
Style.BreakBeforeTernaryOperators);
-
- bool BreakConstructorInitializersBeforeComma = false;
- IO.mapOptional("BreakConstructorInitializersBeforeComma",
- BreakConstructorInitializersBeforeComma);
IO.mapOptional("BreakConstructorInitializers",
Style.BreakConstructorInitializers);
- // If BreakConstructorInitializersBeforeComma was specified but
- // BreakConstructorInitializers was not, initialize the latter from the
- // former for backwards compatibility.
- if (BreakConstructorInitializersBeforeComma &&
- Style.BreakConstructorInitializers == FormatStyle::BCIS_BeforeColon)
- Style.BreakConstructorInitializers = FormatStyle::BCIS_BeforeComma;
-
- IO.mapOptional("BreakAfterJavaFieldAnnotations",
- Style.BreakAfterJavaFieldAnnotations);
+ IO.mapOptional("BreakInheritanceList", Style.BreakInheritanceList);
IO.mapOptional("BreakStringLiterals", Style.BreakStringLiterals);
IO.mapOptional("ColumnLimit", Style.ColumnLimit);
IO.mapOptional("CommentPragmas", Style.CommentPragmas);
IO.mapOptional("CompactNamespaces", Style.CompactNamespaces);
- IO.mapOptional("ConstructorInitializerAllOnOneLineOrOnePerLine",
- Style.ConstructorInitializerAllOnOneLineOrOnePerLine);
IO.mapOptional("ConstructorInitializerIndentWidth",
Style.ConstructorInitializerIndentWidth);
IO.mapOptional("ContinuationIndentWidth", Style.ContinuationIndentWidth);
IO.mapOptional("Cpp11BracedListStyle", Style.Cpp11BracedListStyle);
- IO.mapOptional("DeriveLineEnding", Style.DeriveLineEnding);
IO.mapOptional("DerivePointerAlignment", Style.DerivePointerAlignment);
IO.mapOptional("DisableFormat", Style.DisableFormat);
IO.mapOptional("EmptyLineAfterAccessModifier",
@@ -651,31 +986,36 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("FixNamespaceComments", Style.FixNamespaceComments);
IO.mapOptional("ForEachMacros", Style.ForEachMacros);
IO.mapOptional("IfMacros", Style.IfMacros);
-
IO.mapOptional("IncludeBlocks", Style.IncludeStyle.IncludeBlocks);
IO.mapOptional("IncludeCategories", Style.IncludeStyle.IncludeCategories);
IO.mapOptional("IncludeIsMainRegex", Style.IncludeStyle.IncludeIsMainRegex);
IO.mapOptional("IncludeIsMainSourceRegex",
Style.IncludeStyle.IncludeIsMainSourceRegex);
IO.mapOptional("IndentAccessModifiers", Style.IndentAccessModifiers);
- IO.mapOptional("IndentCaseLabels", Style.IndentCaseLabels);
IO.mapOptional("IndentCaseBlocks", Style.IndentCaseBlocks);
+ IO.mapOptional("IndentCaseLabels", Style.IndentCaseLabels);
+ IO.mapOptional("IndentExternBlock", Style.IndentExternBlock);
IO.mapOptional("IndentGotoLabels", Style.IndentGotoLabels);
IO.mapOptional("IndentPPDirectives", Style.IndentPPDirectives);
- IO.mapOptional("IndentExternBlock", Style.IndentExternBlock);
- IO.mapOptional("IndentRequires", Style.IndentRequires);
+ IO.mapOptional("IndentRequiresClause", Style.IndentRequiresClause);
IO.mapOptional("IndentWidth", Style.IndentWidth);
IO.mapOptional("IndentWrappedFunctionNames",
Style.IndentWrappedFunctionNames);
+ IO.mapOptional("InsertBraces", Style.InsertBraces);
+ IO.mapOptional("InsertNewlineAtEOF", Style.InsertNewlineAtEOF);
IO.mapOptional("InsertTrailingCommas", Style.InsertTrailingCommas);
+ IO.mapOptional("IntegerLiteralSeparator", Style.IntegerLiteralSeparator);
IO.mapOptional("JavaImportGroups", Style.JavaImportGroups);
IO.mapOptional("JavaScriptQuotes", Style.JavaScriptQuotes);
IO.mapOptional("JavaScriptWrapImports", Style.JavaScriptWrapImports);
IO.mapOptional("KeepEmptyLinesAtTheStartOfBlocks",
Style.KeepEmptyLinesAtTheStartOfBlocks);
+ IO.mapOptional("KeepEmptyLinesAtEOF", Style.KeepEmptyLinesAtEOF);
IO.mapOptional("LambdaBodyIndentation", Style.LambdaBodyIndentation);
+ IO.mapOptional("LineEnding", Style.LineEnding);
IO.mapOptional("MacroBlockBegin", Style.MacroBlockBegin);
IO.mapOptional("MacroBlockEnd", Style.MacroBlockEnd);
+ IO.mapOptional("Macros", Style.Macros);
IO.mapOptional("MaxEmptyLinesToKeep", Style.MaxEmptyLinesToKeep);
IO.mapOptional("NamespaceIndentation", Style.NamespaceIndentation);
IO.mapOptional("NamespaceMacros", Style.NamespaceMacros);
@@ -683,29 +1023,53 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("ObjCBlockIndentWidth", Style.ObjCBlockIndentWidth);
IO.mapOptional("ObjCBreakBeforeNestedBlockParam",
Style.ObjCBreakBeforeNestedBlockParam);
+ IO.mapOptional("ObjCPropertyAttributeOrder",
+ Style.ObjCPropertyAttributeOrder);
IO.mapOptional("ObjCSpaceAfterProperty", Style.ObjCSpaceAfterProperty);
IO.mapOptional("ObjCSpaceBeforeProtocolList",
Style.ObjCSpaceBeforeProtocolList);
+ IO.mapOptional("PackConstructorInitializers",
+ Style.PackConstructorInitializers);
IO.mapOptional("PenaltyBreakAssignment", Style.PenaltyBreakAssignment);
IO.mapOptional("PenaltyBreakBeforeFirstCallParameter",
Style.PenaltyBreakBeforeFirstCallParameter);
IO.mapOptional("PenaltyBreakComment", Style.PenaltyBreakComment);
IO.mapOptional("PenaltyBreakFirstLessLess",
Style.PenaltyBreakFirstLessLess);
+ IO.mapOptional("PenaltyBreakOpenParenthesis",
+ Style.PenaltyBreakOpenParenthesis);
+ IO.mapOptional("PenaltyBreakScopeResolution",
+ Style.PenaltyBreakScopeResolution);
IO.mapOptional("PenaltyBreakString", Style.PenaltyBreakString);
IO.mapOptional("PenaltyBreakTemplateDeclaration",
Style.PenaltyBreakTemplateDeclaration);
IO.mapOptional("PenaltyExcessCharacter", Style.PenaltyExcessCharacter);
- IO.mapOptional("PenaltyReturnTypeOnItsOwnLine",
- Style.PenaltyReturnTypeOnItsOwnLine);
IO.mapOptional("PenaltyIndentedWhitespace",
Style.PenaltyIndentedWhitespace);
+ IO.mapOptional("PenaltyReturnTypeOnItsOwnLine",
+ Style.PenaltyReturnTypeOnItsOwnLine);
IO.mapOptional("PointerAlignment", Style.PointerAlignment);
IO.mapOptional("PPIndentWidth", Style.PPIndentWidth);
+ IO.mapOptional("QualifierAlignment", Style.QualifierAlignment);
+ // Default Order for Left/Right based Qualifier alignment.
+ if (Style.QualifierAlignment == FormatStyle::QAS_Right)
+ Style.QualifierOrder = {"type", "const", "volatile"};
+ else if (Style.QualifierAlignment == FormatStyle::QAS_Left)
+ Style.QualifierOrder = {"const", "volatile", "type"};
+ else if (Style.QualifierAlignment == FormatStyle::QAS_Custom)
+ IO.mapOptional("QualifierOrder", Style.QualifierOrder);
IO.mapOptional("RawStringFormats", Style.RawStringFormats);
IO.mapOptional("ReferenceAlignment", Style.ReferenceAlignment);
IO.mapOptional("ReflowComments", Style.ReflowComments);
+ IO.mapOptional("RemoveBracesLLVM", Style.RemoveBracesLLVM);
+ IO.mapOptional("RemoveParentheses", Style.RemoveParentheses);
+ IO.mapOptional("RemoveSemicolon", Style.RemoveSemicolon);
+ IO.mapOptional("RequiresClausePosition", Style.RequiresClausePosition);
+ IO.mapOptional("RequiresExpressionIndentation",
+ Style.RequiresExpressionIndentation);
+ IO.mapOptional("SeparateDefinitionBlocks", Style.SeparateDefinitionBlocks);
IO.mapOptional("ShortNamespaceLines", Style.ShortNamespaceLines);
+ IO.mapOptional("SkipMacroDefinitionBody", Style.SkipMacroDefinitionBody);
IO.mapOptional("SortIncludes", Style.SortIncludes);
IO.mapOptional("SortJavaStaticImport", Style.SortJavaStaticImport);
IO.mapOptional("SortUsingDeclarations", Style.SortUsingDeclarations);
@@ -713,6 +1077,8 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("SpaceAfterLogicalNot", Style.SpaceAfterLogicalNot);
IO.mapOptional("SpaceAfterTemplateKeyword",
Style.SpaceAfterTemplateKeyword);
+ IO.mapOptional("SpaceAroundPointerQualifiers",
+ Style.SpaceAroundPointerQualifiers);
IO.mapOptional("SpaceBeforeAssignmentOperators",
Style.SpaceBeforeAssignmentOperators);
IO.mapOptional("SpaceBeforeCaseColon", Style.SpaceBeforeCaseColon);
@@ -722,85 +1088,111 @@ template <> struct MappingTraits<FormatStyle> {
Style.SpaceBeforeCtorInitializerColon);
IO.mapOptional("SpaceBeforeInheritanceColon",
Style.SpaceBeforeInheritanceColon);
+ IO.mapOptional("SpaceBeforeJsonColon", Style.SpaceBeforeJsonColon);
IO.mapOptional("SpaceBeforeParens", Style.SpaceBeforeParens);
- IO.mapOptional("SpaceAroundPointerQualifiers",
- Style.SpaceAroundPointerQualifiers);
+ IO.mapOptional("SpaceBeforeParensOptions", Style.SpaceBeforeParensOptions);
IO.mapOptional("SpaceBeforeRangeBasedForLoopColon",
Style.SpaceBeforeRangeBasedForLoopColon);
+ IO.mapOptional("SpaceBeforeSquareBrackets",
+ Style.SpaceBeforeSquareBrackets);
IO.mapOptional("SpaceInEmptyBlock", Style.SpaceInEmptyBlock);
- IO.mapOptional("SpaceInEmptyParentheses", Style.SpaceInEmptyParentheses);
IO.mapOptional("SpacesBeforeTrailingComments",
Style.SpacesBeforeTrailingComments);
IO.mapOptional("SpacesInAngles", Style.SpacesInAngles);
- IO.mapOptional("SpacesInConditionalStatement",
- Style.SpacesInConditionalStatement);
IO.mapOptional("SpacesInContainerLiterals",
Style.SpacesInContainerLiterals);
- IO.mapOptional("SpacesInCStyleCastParentheses",
- Style.SpacesInCStyleCastParentheses);
IO.mapOptional("SpacesInLineCommentPrefix",
Style.SpacesInLineCommentPrefix);
- IO.mapOptional("SpacesInParentheses", Style.SpacesInParentheses);
+ IO.mapOptional("SpacesInParens", Style.SpacesInParens);
+ IO.mapOptional("SpacesInParensOptions", Style.SpacesInParensOptions);
IO.mapOptional("SpacesInSquareBrackets", Style.SpacesInSquareBrackets);
- IO.mapOptional("SpaceBeforeSquareBrackets",
- Style.SpaceBeforeSquareBrackets);
- IO.mapOptional("BitFieldColonSpacing", Style.BitFieldColonSpacing);
IO.mapOptional("Standard", Style.Standard);
IO.mapOptional("StatementAttributeLikeMacros",
Style.StatementAttributeLikeMacros);
IO.mapOptional("StatementMacros", Style.StatementMacros);
IO.mapOptional("TabWidth", Style.TabWidth);
+ IO.mapOptional("TypeNames", Style.TypeNames);
IO.mapOptional("TypenameMacros", Style.TypenameMacros);
- IO.mapOptional("UseCRLF", Style.UseCRLF);
IO.mapOptional("UseTab", Style.UseTab);
+ IO.mapOptional("VerilogBreakBetweenInstancePorts",
+ Style.VerilogBreakBetweenInstancePorts);
IO.mapOptional("WhitespaceSensitiveMacros",
Style.WhitespaceSensitiveMacros);
- }
-};
-template <> struct MappingTraits<FormatStyle::BraceWrappingFlags> {
- static void mapping(IO &IO, FormatStyle::BraceWrappingFlags &Wrapping) {
- IO.mapOptional("AfterCaseLabel", Wrapping.AfterCaseLabel);
- IO.mapOptional("AfterClass", Wrapping.AfterClass);
- IO.mapOptional("AfterControlStatement", Wrapping.AfterControlStatement);
- IO.mapOptional("AfterEnum", Wrapping.AfterEnum);
- IO.mapOptional("AfterFunction", Wrapping.AfterFunction);
- IO.mapOptional("AfterNamespace", Wrapping.AfterNamespace);
- IO.mapOptional("AfterObjCDeclaration", Wrapping.AfterObjCDeclaration);
- IO.mapOptional("AfterStruct", Wrapping.AfterStruct);
- IO.mapOptional("AfterUnion", Wrapping.AfterUnion);
- IO.mapOptional("AfterExternBlock", Wrapping.AfterExternBlock);
- IO.mapOptional("BeforeCatch", Wrapping.BeforeCatch);
- IO.mapOptional("BeforeElse", Wrapping.BeforeElse);
- IO.mapOptional("BeforeLambdaBody", Wrapping.BeforeLambdaBody);
- IO.mapOptional("BeforeWhile", Wrapping.BeforeWhile);
- IO.mapOptional("IndentBraces", Wrapping.IndentBraces);
- IO.mapOptional("SplitEmptyFunction", Wrapping.SplitEmptyFunction);
- IO.mapOptional("SplitEmptyRecord", Wrapping.SplitEmptyRecord);
- IO.mapOptional("SplitEmptyNamespace", Wrapping.SplitEmptyNamespace);
- }
-};
+ // If AlwaysBreakAfterDefinitionReturnType was specified but
+ // AlwaysBreakAfterReturnType was not, initialize the latter from the
+ // former for backwards compatibility.
+ if (Style.AlwaysBreakAfterDefinitionReturnType != FormatStyle::DRTBS_None &&
+ Style.AlwaysBreakAfterReturnType == FormatStyle::RTBS_None) {
+ if (Style.AlwaysBreakAfterDefinitionReturnType ==
+ FormatStyle::DRTBS_All) {
+ Style.AlwaysBreakAfterReturnType = FormatStyle::RTBS_AllDefinitions;
+ } else if (Style.AlwaysBreakAfterDefinitionReturnType ==
+ FormatStyle::DRTBS_TopLevel) {
+ Style.AlwaysBreakAfterReturnType =
+ FormatStyle::RTBS_TopLevelDefinitions;
+ }
+ }
-template <> struct MappingTraits<FormatStyle::RawStringFormat> {
- static void mapping(IO &IO, FormatStyle::RawStringFormat &Format) {
- IO.mapOptional("Language", Format.Language);
- IO.mapOptional("Delimiters", Format.Delimiters);
- IO.mapOptional("EnclosingFunctions", Format.EnclosingFunctions);
- IO.mapOptional("CanonicalDelimiter", Format.CanonicalDelimiter);
- IO.mapOptional("BasedOnStyle", Format.BasedOnStyle);
- }
-};
+ // If BreakBeforeInheritanceComma was specified but BreakInheritance was
+ // not, initialize the latter from the former for backwards compatibility.
+ if (BreakBeforeInheritanceComma &&
+ Style.BreakInheritanceList == FormatStyle::BILS_BeforeColon) {
+ Style.BreakInheritanceList = FormatStyle::BILS_BeforeComma;
+ }
-template <> struct MappingTraits<FormatStyle::SpacesInLineComment> {
- static void mapping(IO &IO, FormatStyle::SpacesInLineComment &Space) {
- // Transform the maximum to signed, to parse "-1" correctly
- int signedMaximum = static_cast<int>(Space.Maximum);
- IO.mapOptional("Minimum", Space.Minimum);
- IO.mapOptional("Maximum", signedMaximum);
- Space.Maximum = static_cast<unsigned>(signedMaximum);
+ // If BreakConstructorInitializersBeforeComma was specified but
+ // BreakConstructorInitializers was not, initialize the latter from the
+ // former for backwards compatibility.
+ if (BreakConstructorInitializersBeforeComma &&
+ Style.BreakConstructorInitializers == FormatStyle::BCIS_BeforeColon) {
+ Style.BreakConstructorInitializers = FormatStyle::BCIS_BeforeComma;
+ }
- if (Space.Maximum != -1u) {
- Space.Minimum = std::min(Space.Minimum, Space.Maximum);
+ if (!IsGoogleOrChromium) {
+ if (Style.PackConstructorInitializers == FormatStyle::PCIS_BinPack &&
+ OnCurrentLine) {
+ Style.PackConstructorInitializers = OnNextLine
+ ? FormatStyle::PCIS_NextLine
+ : FormatStyle::PCIS_CurrentLine;
+ }
+ } else if (Style.PackConstructorInitializers ==
+ FormatStyle::PCIS_NextLine) {
+ if (!OnCurrentLine)
+ Style.PackConstructorInitializers = FormatStyle::PCIS_BinPack;
+ else if (!OnNextLine)
+ Style.PackConstructorInitializers = FormatStyle::PCIS_CurrentLine;
+ }
+
+ if (Style.LineEnding == FormatStyle::LE_DeriveLF) {
+ if (!DeriveLineEnding)
+ Style.LineEnding = UseCRLF ? FormatStyle::LE_CRLF : FormatStyle::LE_LF;
+ else if (UseCRLF)
+ Style.LineEnding = FormatStyle::LE_DeriveCRLF;
+ }
+
+ if (Style.SpacesInParens != FormatStyle::SIPO_Custom &&
+ (SpacesInParentheses || SpaceInEmptyParentheses ||
+ SpacesInConditionalStatement || SpacesInCStyleCastParentheses)) {
+ if (SpacesInParentheses) {
+ // set all options except InCStyleCasts and InEmptyParentheses
+ // to true for backward compatibility.
+ Style.SpacesInParensOptions.InConditionalStatements = true;
+ Style.SpacesInParensOptions.InCStyleCasts =
+ SpacesInCStyleCastParentheses;
+ Style.SpacesInParensOptions.InEmptyParentheses =
+ SpaceInEmptyParentheses;
+ Style.SpacesInParensOptions.Other = true;
+ } else {
+ Style.SpacesInParensOptions = {};
+ Style.SpacesInParensOptions.InConditionalStatements =
+ SpacesInConditionalStatement;
+ Style.SpacesInParensOptions.InCStyleCasts =
+ SpacesInCStyleCastParentheses;
+ Style.SpacesInParensOptions.InEmptyParentheses =
+ SpaceInEmptyParentheses;
+ }
+ Style.SpacesInParens = FormatStyle::SIPO_Custom;
}
}
};
@@ -863,14 +1255,21 @@ std::string ParseErrorCategory::message(int EV) const {
return "Unsuitable";
case ParseError::BinPackTrailingCommaConflict:
return "trailing comma insertion cannot be used with bin packing";
+ case ParseError::InvalidQualifierSpecified:
+ return "Invalid qualifier specified in QualifierOrder";
+ case ParseError::DuplicateQualifierSpecified:
+ return "Duplicate qualifier specified in QualifierOrder";
+ case ParseError::MissingQualifierType:
+ return "Missing type in QualifierOrder";
+ case ParseError::MissingQualifierOrder:
+ return "Missing QualifierOrder";
}
llvm_unreachable("unexpected parse error");
}
-static FormatStyle expandPresets(const FormatStyle &Style) {
- if (Style.BreakBeforeBraces == FormatStyle::BS_Custom)
- return Style;
- FormatStyle Expanded = Style;
+static void expandPresetsBraceWrapping(FormatStyle &Expanded) {
+ if (Expanded.BreakBeforeBraces == FormatStyle::BS_Custom)
+ return;
Expanded.BraceWrapping = {/*AfterCaseLabel=*/false,
/*AfterClass=*/false,
/*AfterControlStatement=*/FormatStyle::BWACS_Never,
@@ -889,7 +1288,7 @@ static FormatStyle expandPresets(const FormatStyle &Style) {
/*SplitEmptyFunction=*/true,
/*SplitEmptyRecord=*/true,
/*SplitEmptyNamespace=*/true};
- switch (Style.BreakBeforeBraces) {
+ switch (Expanded.BreakBeforeBraces) {
case FormatStyle::BS_Linux:
Expanded.BraceWrapping.AfterClass = true;
Expanded.BraceWrapping.AfterFunction = true;
@@ -902,7 +1301,6 @@ static FormatStyle expandPresets(const FormatStyle &Style) {
Expanded.BraceWrapping.AfterStruct = true;
Expanded.BraceWrapping.AfterUnion = true;
Expanded.BraceWrapping.AfterExternBlock = true;
- Expanded.IndentExternBlock = FormatStyle::IEBS_AfterExternBlock;
Expanded.BraceWrapping.SplitEmptyFunction = true;
Expanded.BraceWrapping.SplitEmptyRecord = false;
break;
@@ -922,7 +1320,6 @@ static FormatStyle expandPresets(const FormatStyle &Style) {
Expanded.BraceWrapping.AfterStruct = true;
Expanded.BraceWrapping.AfterUnion = true;
Expanded.BraceWrapping.AfterExternBlock = true;
- Expanded.IndentExternBlock = FormatStyle::IEBS_AfterExternBlock;
Expanded.BraceWrapping.BeforeCatch = true;
Expanded.BraceWrapping.BeforeElse = true;
Expanded.BraceWrapping.BeforeLambdaBody = true;
@@ -937,7 +1334,6 @@ static FormatStyle expandPresets(const FormatStyle &Style) {
Expanded.BraceWrapping.AfterObjCDeclaration = true;
Expanded.BraceWrapping.AfterStruct = true;
Expanded.BraceWrapping.AfterExternBlock = true;
- Expanded.IndentExternBlock = FormatStyle::IEBS_AfterExternBlock;
Expanded.BraceWrapping.BeforeCatch = true;
Expanded.BraceWrapping.BeforeElse = true;
Expanded.BraceWrapping.BeforeLambdaBody = true;
@@ -962,7 +1358,6 @@ static FormatStyle expandPresets(const FormatStyle &Style) {
/*SplitEmptyFunction=*/true,
/*SplitEmptyRecord=*/true,
/*SplitEmptyNamespace=*/true};
- Expanded.IndentExternBlock = FormatStyle::IEBS_AfterExternBlock;
break;
case FormatStyle::BS_WebKit:
Expanded.BraceWrapping.AfterFunction = true;
@@ -970,7 +1365,38 @@ static FormatStyle expandPresets(const FormatStyle &Style) {
default:
break;
}
- return Expanded;
+}
+
+static void expandPresetsSpaceBeforeParens(FormatStyle &Expanded) {
+ if (Expanded.SpaceBeforeParens == FormatStyle::SBPO_Custom)
+ return;
+ // Reset all flags
+ Expanded.SpaceBeforeParensOptions = {};
+ Expanded.SpaceBeforeParensOptions.AfterPlacementOperator = true;
+
+ switch (Expanded.SpaceBeforeParens) {
+ case FormatStyle::SBPO_ControlStatements:
+ Expanded.SpaceBeforeParensOptions.AfterControlStatements = true;
+ Expanded.SpaceBeforeParensOptions.AfterForeachMacros = true;
+ Expanded.SpaceBeforeParensOptions.AfterIfMacros = true;
+ break;
+ case FormatStyle::SBPO_ControlStatementsExceptControlMacros:
+ Expanded.SpaceBeforeParensOptions.AfterControlStatements = true;
+ break;
+ case FormatStyle::SBPO_NonEmptyParentheses:
+ Expanded.SpaceBeforeParensOptions.BeforeNonEmptyParentheses = true;
+ break;
+ default:
+ break;
+ }
+}
+
+static void expandPresetsSpacesInParens(FormatStyle &Expanded) {
+ if (Expanded.SpacesInParens == FormatStyle::SIPO_Custom)
+ return;
+ assert(Expanded.SpacesInParens == FormatStyle::SIPO_Never);
+ // Reset all flags
+ Expanded.SpacesInParensOptions = {};
}
FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
@@ -982,18 +1408,27 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.AlignAfterOpenBracket = FormatStyle::BAS_Align;
LLVMStyle.AlignArrayOfStructures = FormatStyle::AIAS_None;
LLVMStyle.AlignOperands = FormatStyle::OAS_Align;
- LLVMStyle.AlignTrailingComments = true;
- LLVMStyle.AlignConsecutiveAssignments = FormatStyle::ACS_None;
- LLVMStyle.AlignConsecutiveBitFields = FormatStyle::ACS_None;
- LLVMStyle.AlignConsecutiveDeclarations = FormatStyle::ACS_None;
- LLVMStyle.AlignConsecutiveMacros = FormatStyle::ACS_None;
+ LLVMStyle.AlignConsecutiveAssignments = {};
+ LLVMStyle.AlignConsecutiveAssignments.Enabled = false;
+ LLVMStyle.AlignConsecutiveAssignments.AcrossEmptyLines = false;
+ LLVMStyle.AlignConsecutiveAssignments.AcrossComments = false;
+ LLVMStyle.AlignConsecutiveAssignments.AlignCompound = false;
+ LLVMStyle.AlignConsecutiveAssignments.AlignFunctionPointers = false;
+ LLVMStyle.AlignConsecutiveAssignments.PadOperators = true;
+ LLVMStyle.AlignConsecutiveBitFields = {};
+ LLVMStyle.AlignConsecutiveDeclarations = {};
+ LLVMStyle.AlignConsecutiveMacros = {};
+ LLVMStyle.AlignConsecutiveShortCaseStatements = {};
+ LLVMStyle.AlignTrailingComments = {};
+ LLVMStyle.AlignTrailingComments.Kind = FormatStyle::TCAS_Always;
+ LLVMStyle.AlignTrailingComments.OverEmptyLines = 0;
LLVMStyle.AllowAllArgumentsOnNextLine = true;
- LLVMStyle.AllowAllConstructorInitializersOnNextLine = true;
LLVMStyle.AllowAllParametersOfDeclarationOnNextLine = true;
- LLVMStyle.AllowShortEnumsOnASingleLine = true;
- LLVMStyle.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_All;
LLVMStyle.AllowShortBlocksOnASingleLine = FormatStyle::SBS_Never;
LLVMStyle.AllowShortCaseLabelsOnASingleLine = false;
+ LLVMStyle.AllowShortCompoundRequirementOnASingleLine = true;
+ LLVMStyle.AllowShortEnumsOnASingleLine = true;
+ LLVMStyle.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_All;
LLVMStyle.AllowShortIfStatementsOnASingleLine = FormatStyle::SIS_Never;
LLVMStyle.AllowShortLambdasOnASingleLine = FormatStyle::SLS_All;
LLVMStyle.AllowShortLoopsOnASingleLine = false;
@@ -1002,12 +1437,10 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.AlwaysBreakBeforeMultilineStrings = false;
LLVMStyle.AlwaysBreakTemplateDeclarations = FormatStyle::BTDS_MultiLine;
LLVMStyle.AttributeMacros.push_back("__capability");
+ LLVMStyle.BitFieldColonSpacing = FormatStyle::BFCS_Both;
LLVMStyle.BinPackArguments = true;
LLVMStyle.BinPackParameters = true;
- LLVMStyle.BreakBeforeBinaryOperators = FormatStyle::BOS_None;
- LLVMStyle.BreakBeforeConceptDeclarations = true;
- LLVMStyle.BreakBeforeTernaryOperators = true;
- LLVMStyle.BreakBeforeBraces = FormatStyle::BS_Attach;
+ LLVMStyle.BracedInitializerIndentWidth = std::nullopt;
LLVMStyle.BraceWrapping = {/*AfterCaseLabel=*/false,
/*AfterClass=*/false,
/*AfterControlStatement=*/FormatStyle::BWACS_Never,
@@ -1026,20 +1459,27 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
/*SplitEmptyFunction=*/true,
/*SplitEmptyRecord=*/true,
/*SplitEmptyNamespace=*/true};
- LLVMStyle.IndentExternBlock = FormatStyle::IEBS_AfterExternBlock;
+ LLVMStyle.BreakAdjacentStringLiterals = true;
+ LLVMStyle.BreakAfterAttributes = FormatStyle::ABS_Leave;
LLVMStyle.BreakAfterJavaFieldAnnotations = false;
+ LLVMStyle.BreakArrays = true;
+ LLVMStyle.BreakBeforeBinaryOperators = FormatStyle::BOS_None;
+ LLVMStyle.BreakBeforeBraces = FormatStyle::BS_Attach;
+ LLVMStyle.BreakBeforeConceptDeclarations = FormatStyle::BBCDS_Always;
+ LLVMStyle.BreakBeforeInlineASMColon = FormatStyle::BBIAS_OnlyMultiline;
+ LLVMStyle.AllowBreakBeforeNoexceptSpecifier = FormatStyle::BBNSS_Never;
+ LLVMStyle.BreakBeforeTernaryOperators = true;
LLVMStyle.BreakConstructorInitializers = FormatStyle::BCIS_BeforeColon;
LLVMStyle.BreakInheritanceList = FormatStyle::BILS_BeforeColon;
LLVMStyle.BreakStringLiterals = true;
LLVMStyle.ColumnLimit = 80;
LLVMStyle.CommentPragmas = "^ IWYU pragma:";
LLVMStyle.CompactNamespaces = false;
- LLVMStyle.ConstructorInitializerAllOnOneLineOrOnePerLine = false;
LLVMStyle.ConstructorInitializerIndentWidth = 4;
LLVMStyle.ContinuationIndentWidth = 4;
LLVMStyle.Cpp11BracedListStyle = true;
- LLVMStyle.DeriveLineEnding = true;
LLVMStyle.DerivePointerAlignment = false;
+ LLVMStyle.DisableFormat = false;
LLVMStyle.EmptyLineAfterAccessModifier = FormatStyle::ELAAMS_Never;
LLVMStyle.EmptyLineBeforeAccessModifier = FormatStyle::ELBAMS_LogicalBlock;
LLVMStyle.ExperimentalAutoDetectBinPacking = false;
@@ -1057,40 +1497,49 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.IndentAccessModifiers = false;
LLVMStyle.IndentCaseLabels = false;
LLVMStyle.IndentCaseBlocks = false;
+ LLVMStyle.IndentExternBlock = FormatStyle::IEBS_AfterExternBlock;
LLVMStyle.IndentGotoLabels = true;
LLVMStyle.IndentPPDirectives = FormatStyle::PPDIS_None;
- LLVMStyle.IndentRequires = false;
- LLVMStyle.IndentWrappedFunctionNames = false;
+ LLVMStyle.IndentRequiresClause = true;
LLVMStyle.IndentWidth = 2;
- LLVMStyle.PPIndentWidth = -1;
+ LLVMStyle.IndentWrappedFunctionNames = false;
+ LLVMStyle.InsertBraces = false;
+ LLVMStyle.InsertNewlineAtEOF = false;
LLVMStyle.InsertTrailingCommas = FormatStyle::TCS_None;
+ LLVMStyle.IntegerLiteralSeparator = {
+ /*Binary=*/0, /*BinaryMinDigits=*/0,
+ /*Decimal=*/0, /*DecimalMinDigits=*/0,
+ /*Hex=*/0, /*HexMinDigits=*/0};
LLVMStyle.JavaScriptQuotes = FormatStyle::JSQS_Leave;
LLVMStyle.JavaScriptWrapImports = true;
- LLVMStyle.TabWidth = 8;
+ LLVMStyle.KeepEmptyLinesAtEOF = false;
+ LLVMStyle.KeepEmptyLinesAtTheStartOfBlocks = true;
LLVMStyle.LambdaBodyIndentation = FormatStyle::LBI_Signature;
+ LLVMStyle.LineEnding = FormatStyle::LE_DeriveLF;
LLVMStyle.MaxEmptyLinesToKeep = 1;
- LLVMStyle.KeepEmptyLinesAtTheStartOfBlocks = true;
LLVMStyle.NamespaceIndentation = FormatStyle::NI_None;
LLVMStyle.ObjCBinPackProtocolList = FormatStyle::BPS_Auto;
LLVMStyle.ObjCBlockIndentWidth = 2;
LLVMStyle.ObjCBreakBeforeNestedBlockParam = true;
LLVMStyle.ObjCSpaceAfterProperty = false;
LLVMStyle.ObjCSpaceBeforeProtocolList = true;
+ LLVMStyle.PackConstructorInitializers = FormatStyle::PCIS_BinPack;
LLVMStyle.PointerAlignment = FormatStyle::PAS_Right;
+ LLVMStyle.PPIndentWidth = -1;
+ LLVMStyle.QualifierAlignment = FormatStyle::QAS_Leave;
LLVMStyle.ReferenceAlignment = FormatStyle::RAS_Pointer;
- LLVMStyle.ShortNamespaceLines = 1;
- LLVMStyle.SpacesBeforeTrailingComments = 1;
- LLVMStyle.Standard = FormatStyle::LS_Latest;
- LLVMStyle.UseCRLF = false;
- LLVMStyle.UseTab = FormatStyle::UT_Never;
LLVMStyle.ReflowComments = true;
- LLVMStyle.SpacesInParentheses = false;
- LLVMStyle.SpacesInSquareBrackets = false;
- LLVMStyle.SpaceInEmptyBlock = false;
- LLVMStyle.SpaceInEmptyParentheses = false;
- LLVMStyle.SpacesInContainerLiterals = true;
- LLVMStyle.SpacesInCStyleCastParentheses = false;
- LLVMStyle.SpacesInLineCommentPrefix = {/*Minimum=*/1, /*Maximum=*/-1u};
+ LLVMStyle.RemoveBracesLLVM = false;
+ LLVMStyle.RemoveParentheses = FormatStyle::RPS_Leave;
+ LLVMStyle.RemoveSemicolon = false;
+ LLVMStyle.RequiresClausePosition = FormatStyle::RCPS_OwnLine;
+ LLVMStyle.RequiresExpressionIndentation = FormatStyle::REI_OuterScope;
+ LLVMStyle.SeparateDefinitionBlocks = FormatStyle::SDS_Leave;
+ LLVMStyle.ShortNamespaceLines = 1;
+ LLVMStyle.SkipMacroDefinitionBody = false;
+ LLVMStyle.SortIncludes = FormatStyle::SI_CaseSensitive;
+ LLVMStyle.SortJavaStaticImport = FormatStyle::SJSIO_Before;
+ LLVMStyle.SortUsingDeclarations = FormatStyle::SUD_LexicographicNumeric;
LLVMStyle.SpaceAfterCStyleCast = false;
LLVMStyle.SpaceAfterLogicalNot = false;
LLVMStyle.SpaceAfterTemplateKeyword = true;
@@ -1098,14 +1547,35 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.SpaceBeforeCaseColon = false;
LLVMStyle.SpaceBeforeCtorInitializerColon = true;
LLVMStyle.SpaceBeforeInheritanceColon = true;
+ LLVMStyle.SpaceBeforeJsonColon = false;
LLVMStyle.SpaceBeforeParens = FormatStyle::SBPO_ControlStatements;
+ LLVMStyle.SpaceBeforeParensOptions = {};
+ LLVMStyle.SpaceBeforeParensOptions.AfterControlStatements = true;
+ LLVMStyle.SpaceBeforeParensOptions.AfterForeachMacros = true;
+ LLVMStyle.SpaceBeforeParensOptions.AfterIfMacros = true;
LLVMStyle.SpaceBeforeRangeBasedForLoopColon = true;
LLVMStyle.SpaceBeforeAssignmentOperators = true;
LLVMStyle.SpaceBeforeCpp11BracedList = false;
LLVMStyle.SpaceBeforeSquareBrackets = false;
- LLVMStyle.BitFieldColonSpacing = FormatStyle::BFCS_Both;
+ LLVMStyle.SpaceInEmptyBlock = false;
+ LLVMStyle.SpacesBeforeTrailingComments = 1;
LLVMStyle.SpacesInAngles = FormatStyle::SIAS_Never;
- LLVMStyle.SpacesInConditionalStatement = false;
+ LLVMStyle.SpacesInContainerLiterals = true;
+ LLVMStyle.SpacesInLineCommentPrefix = {/*Minimum=*/1, /*Maximum=*/-1u};
+ LLVMStyle.SpacesInParens = FormatStyle::SIPO_Never;
+ LLVMStyle.SpacesInSquareBrackets = false;
+ LLVMStyle.Standard = FormatStyle::LS_Latest;
+ LLVMStyle.StatementAttributeLikeMacros.push_back("Q_EMIT");
+ LLVMStyle.StatementMacros.push_back("Q_UNUSED");
+ LLVMStyle.StatementMacros.push_back("QT_REQUIRE_VERSION");
+ LLVMStyle.TabWidth = 8;
+ LLVMStyle.UseTab = FormatStyle::UT_Never;
+ LLVMStyle.VerilogBreakBetweenInstancePorts = true;
+ LLVMStyle.WhitespaceSensitiveMacros.push_back("BOOST_PP_STRINGIZE");
+ LLVMStyle.WhitespaceSensitiveMacros.push_back("CF_SWIFT_NAME");
+ LLVMStyle.WhitespaceSensitiveMacros.push_back("NS_SWIFT_NAME");
+ LLVMStyle.WhitespaceSensitiveMacros.push_back("PP_STRINGIZE");
+ LLVMStyle.WhitespaceSensitiveMacros.push_back("STRINGIZE");
LLVMStyle.PenaltyBreakAssignment = prec::Assignment;
LLVMStyle.PenaltyBreakComment = 300;
@@ -1114,28 +1584,25 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.PenaltyExcessCharacter = 1000000;
LLVMStyle.PenaltyReturnTypeOnItsOwnLine = 60;
LLVMStyle.PenaltyBreakBeforeFirstCallParameter = 19;
+ LLVMStyle.PenaltyBreakOpenParenthesis = 0;
+ LLVMStyle.PenaltyBreakScopeResolution = 500;
LLVMStyle.PenaltyBreakTemplateDeclaration = prec::Relational;
LLVMStyle.PenaltyIndentedWhitespace = 0;
- LLVMStyle.DisableFormat = false;
- LLVMStyle.SortIncludes = FormatStyle::SI_CaseSensitive;
- LLVMStyle.SortJavaStaticImport = FormatStyle::SJSIO_Before;
- LLVMStyle.SortUsingDeclarations = true;
- LLVMStyle.StatementAttributeLikeMacros.push_back("Q_EMIT");
- LLVMStyle.StatementMacros.push_back("Q_UNUSED");
- LLVMStyle.StatementMacros.push_back("QT_REQUIRE_VERSION");
- LLVMStyle.WhitespaceSensitiveMacros.push_back("STRINGIZE");
- LLVMStyle.WhitespaceSensitiveMacros.push_back("PP_STRINGIZE");
- LLVMStyle.WhitespaceSensitiveMacros.push_back("BOOST_PP_STRINGIZE");
- LLVMStyle.WhitespaceSensitiveMacros.push_back("NS_SWIFT_NAME");
- LLVMStyle.WhitespaceSensitiveMacros.push_back("CF_SWIFT_NAME");
-
// Defaults that differ when not C++.
- if (Language == FormatStyle::LK_TableGen) {
+ switch (Language) {
+ case FormatStyle::LK_TableGen:
LLVMStyle.SpacesInContainerLiterals = false;
- }
- if (LLVMStyle.isJson()) {
+ break;
+ case FormatStyle::LK_Json:
LLVMStyle.ColumnLimit = 0;
+ break;
+ case FormatStyle::LK_Verilog:
+ LLVMStyle.IndentCaseLabels = true;
+ LLVMStyle.SpacesInContainerLiterals = false;
+ break;
+ default:
+ break;
}
return LLVMStyle;
@@ -1158,7 +1625,6 @@ FormatStyle getGoogleStyle(FormatStyle::LanguageKind Language) {
GoogleStyle.AllowShortLoopsOnASingleLine = true;
GoogleStyle.AlwaysBreakBeforeMultilineStrings = true;
GoogleStyle.AlwaysBreakTemplateDeclarations = FormatStyle::BTDS_Yes;
- GoogleStyle.ConstructorInitializerAllOnOneLineOrOnePerLine = true;
GoogleStyle.DerivePointerAlignment = true;
GoogleStyle.IncludeStyle.IncludeCategories = {{"^<ext/.*\\.h>", 2, 0, false},
{"^<.*\\.h>", 1, 0, false},
@@ -1171,6 +1637,7 @@ FormatStyle getGoogleStyle(FormatStyle::LanguageKind Language) {
GoogleStyle.ObjCBinPackProtocolList = FormatStyle::BPS_Never;
GoogleStyle.ObjCSpaceAfterProperty = false;
GoogleStyle.ObjCSpaceBeforeProtocolList = true;
+ GoogleStyle.PackConstructorInitializers = FormatStyle::PCIS_NextLine;
GoogleStyle.PointerAlignment = FormatStyle::PAS_Left;
GoogleStyle.RawStringFormats = {
{
@@ -1215,6 +1682,7 @@ FormatStyle getGoogleStyle(FormatStyle::LanguageKind Language) {
/*BasedOnStyle=*/"google",
},
};
+
GoogleStyle.SpacesBeforeTrailingComments = 2;
GoogleStyle.Standard = FormatStyle::LS_Auto;
@@ -1224,7 +1692,8 @@ FormatStyle getGoogleStyle(FormatStyle::LanguageKind Language) {
if (Language == FormatStyle::LK_Java) {
GoogleStyle.AlignAfterOpenBracket = FormatStyle::BAS_DontAlign;
GoogleStyle.AlignOperands = FormatStyle::OAS_DontAlign;
- GoogleStyle.AlignTrailingComments = false;
+ GoogleStyle.AlignTrailingComments = {};
+ GoogleStyle.AlignTrailingComments.Kind = FormatStyle::TCAS_Never;
GoogleStyle.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_Empty;
GoogleStyle.AllowShortIfStatementsOnASingleLine = FormatStyle::SIS_Never;
GoogleStyle.AlwaysBreakBeforeMultilineStrings = false;
@@ -1311,7 +1780,7 @@ FormatStyle getChromiumStyle(FormatStyle::LanguageKind Language) {
ChromiumStyle.ContinuationIndentWidth = 8;
ChromiumStyle.IndentWidth = 4;
// See styleguide for import groups:
- // https://chromium.googlesource.com/chromium/src/+/master/styleguide/java/java.md#Import-Order
+ // https://chromium.googlesource.com/chromium/src/+/refs/heads/main/styleguide/java/java.md#Import-Order
ChromiumStyle.JavaImportGroups = {
"android",
"androidx",
@@ -1372,7 +1841,8 @@ FormatStyle getWebKitStyle() {
Style.AccessModifierOffset = -4;
Style.AlignAfterOpenBracket = FormatStyle::BAS_DontAlign;
Style.AlignOperands = FormatStyle::OAS_DontAlign;
- Style.AlignTrailingComments = false;
+ Style.AlignTrailingComments = {};
+ Style.AlignTrailingComments.Kind = FormatStyle::TCAS_Never;
Style.AllowShortBlocksOnASingleLine = FormatStyle::SBS_Empty;
Style.BreakBeforeBinaryOperators = FormatStyle::BOS_All;
Style.BreakBeforeBraces = FormatStyle::BS_WebKit;
@@ -1420,7 +1890,6 @@ FormatStyle getMicrosoftStyle(FormatStyle::LanguageKind Language) {
Style.BraceWrapping.AfterObjCDeclaration = true;
Style.BraceWrapping.AfterStruct = true;
Style.BraceWrapping.AfterExternBlock = true;
- Style.IndentExternBlock = FormatStyle::IEBS_AfterExternBlock;
Style.BraceWrapping.BeforeCatch = true;
Style.BraceWrapping.BeforeElse = true;
Style.BraceWrapping.BeforeWhile = false;
@@ -1435,42 +1904,85 @@ FormatStyle getMicrosoftStyle(FormatStyle::LanguageKind Language) {
return Style;
}
+FormatStyle getClangFormatStyle() {
+ FormatStyle Style = getLLVMStyle();
+ Style.InsertBraces = true;
+ Style.InsertNewlineAtEOF = true;
+ Style.LineEnding = FormatStyle::LE_LF;
+ Style.RemoveBracesLLVM = true;
+ Style.RemoveParentheses = FormatStyle::RPS_ReturnStatement;
+ return Style;
+}
+
FormatStyle getNoStyle() {
FormatStyle NoStyle = getLLVMStyle();
NoStyle.DisableFormat = true;
NoStyle.SortIncludes = FormatStyle::SI_Never;
- NoStyle.SortUsingDeclarations = false;
+ NoStyle.SortUsingDeclarations = FormatStyle::SUD_Never;
return NoStyle;
}
bool getPredefinedStyle(StringRef Name, FormatStyle::LanguageKind Language,
FormatStyle *Style) {
- if (Name.equals_insensitive("llvm")) {
+ if (Name.equals_insensitive("llvm"))
*Style = getLLVMStyle(Language);
- } else if (Name.equals_insensitive("chromium")) {
+ else if (Name.equals_insensitive("chromium"))
*Style = getChromiumStyle(Language);
- } else if (Name.equals_insensitive("mozilla")) {
+ else if (Name.equals_insensitive("mozilla"))
*Style = getMozillaStyle();
- } else if (Name.equals_insensitive("google")) {
+ else if (Name.equals_insensitive("google"))
*Style = getGoogleStyle(Language);
- } else if (Name.equals_insensitive("webkit")) {
+ else if (Name.equals_insensitive("webkit"))
*Style = getWebKitStyle();
- } else if (Name.equals_insensitive("gnu")) {
+ else if (Name.equals_insensitive("gnu"))
*Style = getGNUStyle();
- } else if (Name.equals_insensitive("microsoft")) {
+ else if (Name.equals_insensitive("microsoft"))
*Style = getMicrosoftStyle(Language);
- } else if (Name.equals_insensitive("none")) {
+ else if (Name.equals_insensitive("clang-format"))
+ *Style = getClangFormatStyle();
+ else if (Name.equals_insensitive("none"))
*Style = getNoStyle();
- } else if (Name.equals_insensitive("inheritparentconfig")) {
+ else if (Name.equals_insensitive("inheritparentconfig"))
Style->InheritsParentConfig = true;
- } else {
+ else
return false;
- }
Style->Language = Language;
return true;
}
+ParseError validateQualifierOrder(FormatStyle *Style) {
+ // If its empty then it means don't do anything.
+ if (Style->QualifierOrder.empty())
+ return ParseError::MissingQualifierOrder;
+
+ // Ensure the list contains only currently valid qualifiers.
+ for (const auto &Qualifier : Style->QualifierOrder) {
+ if (Qualifier == "type")
+ continue;
+ auto token =
+ LeftRightQualifierAlignmentFixer::getTokenFromQualifier(Qualifier);
+ if (token == tok::identifier)
+ return ParseError::InvalidQualifierSpecified;
+ }
+
+ // Ensure the list is unique (no duplicates).
+ std::set<std::string> UniqueQualifiers(Style->QualifierOrder.begin(),
+ Style->QualifierOrder.end());
+ if (Style->QualifierOrder.size() != UniqueQualifiers.size()) {
+ LLVM_DEBUG(llvm::dbgs()
+ << "Duplicate Qualifiers " << Style->QualifierOrder.size()
+ << " vs " << UniqueQualifiers.size() << "\n");
+ return ParseError::DuplicateQualifierSpecified;
+ }
+
+ // Ensure the list has 'type' in it.
+ if (!llvm::is_contained(Style->QualifierOrder, "type"))
+ return ParseError::MissingQualifierType;
+
+ return ParseError::Success;
+}
+
std::error_code parseConfiguration(llvm::MemoryBufferRef Config,
FormatStyle *Style, bool AllowUnknownOptions,
llvm::SourceMgr::DiagHandlerTy DiagHandler,
@@ -1479,7 +1991,7 @@ std::error_code parseConfiguration(llvm::MemoryBufferRef Config,
FormatStyle::LanguageKind Language = Style->Language;
assert(Language != FormatStyle::LK_None);
if (Config.getBuffer().trim().empty())
- return make_error_code(ParseError::Error);
+ return make_error_code(ParseError::Success);
Style->StyleSet.Clear();
std::vector<FormatStyle> Styles;
llvm::yaml::Input Input(Config, /*Ctxt=*/nullptr, DiagHandler,
@@ -1513,10 +2025,10 @@ std::error_code parseConfiguration(llvm::MemoryBufferRef Config,
// configuration (which can only be at slot 0) after it.
FormatStyle::FormatStyleSet StyleSet;
bool LanguageFound = false;
- for (int i = Styles.size() - 1; i >= 0; --i) {
- if (Styles[i].Language != FormatStyle::LK_None)
- StyleSet.Add(Styles[i]);
- if (Styles[i].Language == Language)
+ for (const FormatStyle &Style : llvm::reverse(Styles)) {
+ if (Style.Language != FormatStyle::LK_None)
+ StyleSet.Add(Style);
+ if (Style.Language == Language)
LanguageFound = true;
}
if (!LanguageFound) {
@@ -1532,6 +2044,8 @@ std::error_code parseConfiguration(llvm::MemoryBufferRef Config,
// See comment on FormatStyle::TSC_Wrapped.
return make_error_code(ParseError::BinPackTrailingCommaConflict);
}
+ if (Style->QualifierAlignment != FormatStyle::QAS_Leave)
+ return make_error_code(validateQualifierOrder(Style));
return make_error_code(ParseError::Success);
}
@@ -1541,18 +2055,22 @@ std::string configurationAsText(const FormatStyle &Style) {
llvm::yaml::Output Output(Stream);
// We use the same mapping method for input and output, so we need a non-const
// reference here.
- FormatStyle NonConstStyle = expandPresets(Style);
+ FormatStyle NonConstStyle = Style;
+ expandPresetsBraceWrapping(NonConstStyle);
+ expandPresetsSpaceBeforeParens(NonConstStyle);
+ expandPresetsSpacesInParens(NonConstStyle);
Output << NonConstStyle;
+
return Stream.str();
}
-llvm::Optional<FormatStyle>
+std::optional<FormatStyle>
FormatStyle::FormatStyleSet::Get(FormatStyle::LanguageKind Language) const {
if (!Styles)
- return None;
+ return std::nullopt;
auto It = Styles->find(Language);
if (It == Styles->end())
- return None;
+ return std::nullopt;
FormatStyle Style = It->second;
Style.StyleSet = *this;
return Style;
@@ -1571,13 +2089,212 @@ void FormatStyle::FormatStyleSet::Add(FormatStyle Style) {
void FormatStyle::FormatStyleSet::Clear() { Styles.reset(); }
-llvm::Optional<FormatStyle>
+std::optional<FormatStyle>
FormatStyle::GetLanguageStyle(FormatStyle::LanguageKind Language) const {
return StyleSet.Get(Language);
}
namespace {
+class ParensRemover : public TokenAnalyzer {
+public:
+ ParensRemover(const Environment &Env, const FormatStyle &Style)
+ : TokenAnalyzer(Env, Style) {}
+
+ std::pair<tooling::Replacements, unsigned>
+ analyze(TokenAnnotator &Annotator,
+ SmallVectorImpl<AnnotatedLine *> &AnnotatedLines,
+ FormatTokenLexer &Tokens) override {
+ AffectedRangeMgr.computeAffectedLines(AnnotatedLines);
+ tooling::Replacements Result;
+ removeParens(AnnotatedLines, Result);
+ return {Result, 0};
+ }
+
+private:
+ void removeParens(SmallVectorImpl<AnnotatedLine *> &Lines,
+ tooling::Replacements &Result) {
+ const auto &SourceMgr = Env.getSourceManager();
+ for (auto *Line : Lines) {
+ removeParens(Line->Children, Result);
+ if (!Line->Affected)
+ continue;
+ for (const auto *Token = Line->First; Token && !Token->Finalized;
+ Token = Token->Next) {
+ if (!Token->Optional || !Token->isOneOf(tok::l_paren, tok::r_paren))
+ continue;
+ auto *Next = Token->Next;
+ assert(Next && Next->isNot(tok::eof));
+ SourceLocation Start;
+ if (Next->NewlinesBefore == 0) {
+ Start = Token->Tok.getLocation();
+ Next->WhitespaceRange = Token->WhitespaceRange;
+ } else {
+ Start = Token->WhitespaceRange.getBegin();
+ }
+ const auto &Range =
+ CharSourceRange::getCharRange(Start, Token->Tok.getEndLoc());
+ cantFail(Result.add(tooling::Replacement(SourceMgr, Range, " ")));
+ }
+ }
+ }
+};
+
+class BracesInserter : public TokenAnalyzer {
+public:
+ BracesInserter(const Environment &Env, const FormatStyle &Style)
+ : TokenAnalyzer(Env, Style) {}
+
+ std::pair<tooling::Replacements, unsigned>
+ analyze(TokenAnnotator &Annotator,
+ SmallVectorImpl<AnnotatedLine *> &AnnotatedLines,
+ FormatTokenLexer &Tokens) override {
+ AffectedRangeMgr.computeAffectedLines(AnnotatedLines);
+ tooling::Replacements Result;
+ insertBraces(AnnotatedLines, Result);
+ return {Result, 0};
+ }
+
+private:
+ void insertBraces(SmallVectorImpl<AnnotatedLine *> &Lines,
+ tooling::Replacements &Result) {
+ const auto &SourceMgr = Env.getSourceManager();
+ int OpeningBraceSurplus = 0;
+ for (AnnotatedLine *Line : Lines) {
+ insertBraces(Line->Children, Result);
+ if (!Line->Affected && OpeningBraceSurplus == 0)
+ continue;
+ for (FormatToken *Token = Line->First; Token && !Token->Finalized;
+ Token = Token->Next) {
+ int BraceCount = Token->BraceCount;
+ if (BraceCount == 0)
+ continue;
+ std::string Brace;
+ if (BraceCount < 0) {
+ assert(BraceCount == -1);
+ if (!Line->Affected)
+ break;
+ Brace = Token->is(tok::comment) ? "\n{" : "{";
+ ++OpeningBraceSurplus;
+ } else {
+ if (OpeningBraceSurplus == 0)
+ break;
+ if (OpeningBraceSurplus < BraceCount)
+ BraceCount = OpeningBraceSurplus;
+ Brace = '\n' + std::string(BraceCount, '}');
+ OpeningBraceSurplus -= BraceCount;
+ }
+ Token->BraceCount = 0;
+ const auto Start = Token->Tok.getEndLoc();
+ cantFail(Result.add(tooling::Replacement(SourceMgr, Start, 0, Brace)));
+ }
+ }
+ assert(OpeningBraceSurplus == 0);
+ }
+};
+
+class BracesRemover : public TokenAnalyzer {
+public:
+ BracesRemover(const Environment &Env, const FormatStyle &Style)
+ : TokenAnalyzer(Env, Style) {}
+
+ std::pair<tooling::Replacements, unsigned>
+ analyze(TokenAnnotator &Annotator,
+ SmallVectorImpl<AnnotatedLine *> &AnnotatedLines,
+ FormatTokenLexer &Tokens) override {
+ AffectedRangeMgr.computeAffectedLines(AnnotatedLines);
+ tooling::Replacements Result;
+ removeBraces(AnnotatedLines, Result);
+ return {Result, 0};
+ }
+
+private:
+ void removeBraces(SmallVectorImpl<AnnotatedLine *> &Lines,
+ tooling::Replacements &Result) {
+ const auto &SourceMgr = Env.getSourceManager();
+ const auto End = Lines.end();
+ for (auto I = Lines.begin(); I != End; ++I) {
+ const auto Line = *I;
+ removeBraces(Line->Children, Result);
+ if (!Line->Affected)
+ continue;
+ const auto NextLine = I + 1 == End ? nullptr : I[1];
+ for (auto Token = Line->First; Token && !Token->Finalized;
+ Token = Token->Next) {
+ if (!Token->Optional)
+ continue;
+ if (!Token->isOneOf(tok::l_brace, tok::r_brace))
+ continue;
+ auto Next = Token->Next;
+ assert(Next || Token == Line->Last);
+ if (!Next && NextLine)
+ Next = NextLine->First;
+ SourceLocation Start;
+ if (Next && Next->NewlinesBefore == 0 && Next->isNot(tok::eof)) {
+ Start = Token->Tok.getLocation();
+ Next->WhitespaceRange = Token->WhitespaceRange;
+ } else {
+ Start = Token->WhitespaceRange.getBegin();
+ }
+ const auto Range =
+ CharSourceRange::getCharRange(Start, Token->Tok.getEndLoc());
+ cantFail(Result.add(tooling::Replacement(SourceMgr, Range, "")));
+ }
+ }
+ }
+};
+
+class SemiRemover : public TokenAnalyzer {
+public:
+ SemiRemover(const Environment &Env, const FormatStyle &Style)
+ : TokenAnalyzer(Env, Style) {}
+
+ std::pair<tooling::Replacements, unsigned>
+ analyze(TokenAnnotator &Annotator,
+ SmallVectorImpl<AnnotatedLine *> &AnnotatedLines,
+ FormatTokenLexer &Tokens) override {
+ AffectedRangeMgr.computeAffectedLines(AnnotatedLines);
+ tooling::Replacements Result;
+ removeSemi(AnnotatedLines, Result);
+ return {Result, 0};
+ }
+
+private:
+ void removeSemi(SmallVectorImpl<AnnotatedLine *> &Lines,
+ tooling::Replacements &Result) {
+ const auto &SourceMgr = Env.getSourceManager();
+ const auto End = Lines.end();
+ for (auto I = Lines.begin(); I != End; ++I) {
+ const auto Line = *I;
+ removeSemi(Line->Children, Result);
+ if (!Line->Affected)
+ continue;
+ const auto NextLine = I + 1 == End ? nullptr : I[1];
+ for (auto Token = Line->First; Token && !Token->Finalized;
+ Token = Token->Next) {
+ if (!Token->Optional)
+ continue;
+ if (Token->isNot(tok::semi))
+ continue;
+ auto Next = Token->Next;
+ assert(Next || Token == Line->Last);
+ if (!Next && NextLine)
+ Next = NextLine->First;
+ SourceLocation Start;
+ if (Next && Next->NewlinesBefore == 0 && Next->isNot(tok::eof)) {
+ Start = Token->Tok.getLocation();
+ Next->WhitespaceRange = Token->WhitespaceRange;
+ } else {
+ Start = Token->WhitespaceRange.getBegin();
+ }
+ const auto Range =
+ CharSourceRange::getCharRange(Start, Token->Tok.getEndLoc());
+ cantFail(Result.add(tooling::Replacement(SourceMgr, Range, "")));
+ }
+ }
+ }
+};
+
class JavaScriptRequoter : public TokenAnalyzer {
public:
JavaScriptRequoter(const Environment &Env, const FormatStyle &Style)
@@ -1609,10 +2326,11 @@ private:
// NB: testing for not starting with a double quote to avoid
// breaking `template strings`.
(Style.JavaScriptQuotes == FormatStyle::JSQS_Single &&
- !Input.startswith("\"")) ||
+ !Input.starts_with("\"")) ||
(Style.JavaScriptQuotes == FormatStyle::JSQS_Double &&
- !Input.startswith("\'")))
+ !Input.starts_with("\'"))) {
continue;
+ }
// Change start and end quote.
bool IsSingle = Style.JavaScriptQuotes == FormatStyle::JSQS_Single;
@@ -1678,18 +2396,17 @@ public:
tooling::Replacements Result;
deriveLocalStyle(AnnotatedLines);
AffectedRangeMgr.computeAffectedLines(AnnotatedLines);
- for (unsigned i = 0, e = AnnotatedLines.size(); i != e; ++i) {
- Annotator.calculateFormattingInformation(*AnnotatedLines[i]);
- }
+ for (AnnotatedLine *Line : AnnotatedLines)
+ Annotator.calculateFormattingInformation(*Line);
Annotator.setCommentLineLevels(AnnotatedLines);
WhitespaceManager Whitespaces(
Env.getSourceManager(), Style,
- Style.DeriveLineEnding
- ? inputUsesCRLF(
+ Style.LineEnding > FormatStyle::LE_CRLF
+ ? WhitespaceManager::inputUsesCRLF(
Env.getSourceManager().getBufferData(Env.getFileID()),
- Style.UseCRLF)
- : Style.UseCRLF);
+ Style.LineEnding == FormatStyle::LE_DeriveCRLF)
+ : Style.LineEnding == FormatStyle::LE_CRLF);
ContinuationIndenter Indenter(Style, Tokens.getKeywords(),
Env.getSourceManager(), Whitespaces, Encoding,
BinPackInconclusiveFunctions);
@@ -1710,24 +2427,19 @@ public:
}
private:
- static bool inputUsesCRLF(StringRef Text, bool DefaultToCRLF) {
- size_t LF = Text.count('\n');
- size_t CR = Text.count('\r') * 2;
- return LF == CR ? DefaultToCRLF : CR > LF;
- }
-
bool
hasCpp03IncompatibleFormat(const SmallVectorImpl<AnnotatedLine *> &Lines) {
for (const AnnotatedLine *Line : Lines) {
if (hasCpp03IncompatibleFormat(Line->Children))
return true;
for (FormatToken *Tok = Line->First->Next; Tok; Tok = Tok->Next) {
- if (Tok->WhitespaceRange.getBegin() == Tok->WhitespaceRange.getEnd()) {
+ if (!Tok->hasWhitespaceBefore()) {
if (Tok->is(tok::coloncolon) && Tok->Previous->is(TT_TemplateOpener))
return true;
if (Tok->is(TT_TemplateCloser) &&
- Tok->Previous->is(TT_TemplateCloser))
+ Tok->Previous->is(TT_TemplateCloser)) {
return true;
+ }
}
}
}
@@ -1739,12 +2451,22 @@ private:
for (const AnnotatedLine *Line : Lines) {
AlignmentDiff += countVariableAlignments(Line->Children);
for (FormatToken *Tok = Line->First; Tok && Tok->Next; Tok = Tok->Next) {
- if (!Tok->is(TT_PointerOrReference))
+ if (Tok->isNot(TT_PointerOrReference))
continue;
- bool SpaceBefore =
- Tok->WhitespaceRange.getBegin() != Tok->WhitespaceRange.getEnd();
- bool SpaceAfter = Tok->Next->WhitespaceRange.getBegin() !=
- Tok->Next->WhitespaceRange.getEnd();
+ // Don't treat space in `void foo() &&` as evidence.
+ if (const auto *Prev = Tok->getPreviousNonComment()) {
+ if (Prev->is(tok::r_paren) && Prev->MatchingParen) {
+ if (const auto *Func =
+ Prev->MatchingParen->getPreviousNonComment()) {
+ if (Func->isOneOf(TT_FunctionDeclarationName, TT_StartOfName,
+ TT_OverloadedOperator)) {
+ continue;
+ }
+ }
+ }
+ }
+ bool SpaceBefore = Tok->hasWhitespaceBefore();
+ bool SpaceAfter = Tok->Next->hasWhitespaceBefore();
if (SpaceBefore && !SpaceAfter)
++AlignmentDiff;
if (!SpaceBefore && SpaceAfter)
@@ -1758,10 +2480,10 @@ private:
deriveLocalStyle(const SmallVectorImpl<AnnotatedLine *> &AnnotatedLines) {
bool HasBinPackedFunction = false;
bool HasOnePerLineFunction = false;
- for (unsigned i = 0, e = AnnotatedLines.size(); i != e; ++i) {
- if (!AnnotatedLines[i]->First->Next)
+ for (AnnotatedLine *Line : AnnotatedLines) {
+ if (!Line->First->Next)
continue;
- FormatToken *Tok = AnnotatedLines[i]->First->Next;
+ FormatToken *Tok = Line->First->Next;
while (Tok->Next) {
if (Tok->is(PPK_BinPacked))
HasBinPackedFunction = true;
@@ -1772,15 +2494,18 @@ private:
}
}
if (Style.DerivePointerAlignment) {
- Style.PointerAlignment = countVariableAlignments(AnnotatedLines) <= 0
- ? FormatStyle::PAS_Left
- : FormatStyle::PAS_Right;
+ const auto NetRightCount = countVariableAlignments(AnnotatedLines);
+ if (NetRightCount > 0)
+ Style.PointerAlignment = FormatStyle::PAS_Right;
+ else if (NetRightCount < 0)
+ Style.PointerAlignment = FormatStyle::PAS_Left;
Style.ReferenceAlignment = FormatStyle::RAS_Pointer;
}
- if (Style.Standard == FormatStyle::LS_Auto)
+ if (Style.Standard == FormatStyle::LS_Auto) {
Style.Standard = hasCpp03IncompatibleFormat(AnnotatedLines)
? FormatStyle::LS_Latest
: FormatStyle::LS_Cpp03;
+ }
BinPackInconclusiveFunctions =
HasBinPackedFunction || !HasOnePerLineFunction;
}
@@ -1833,8 +2558,9 @@ private:
continue;
if (!(FormatTok->is(tok::r_square) &&
Matching->is(TT_ArrayInitializerLSquare)) &&
- !(FormatTok->is(tok::r_brace) && Matching->is(TT_DictLiteral)))
+ !(FormatTok->is(tok::r_brace) && Matching->is(TT_DictLiteral))) {
continue;
+ }
FormatToken *Prev = FormatTok->getPreviousNonComment();
if (Prev->is(tok::comma) || Prev->is(tok::semi))
continue;
@@ -1890,9 +2616,8 @@ public:
private:
void cleanupLine(AnnotatedLine *Line) {
- for (auto *Child : Line->Children) {
+ for (auto *Child : Line->Children)
cleanupLine(Child);
- }
if (Line->Affected) {
cleanupRight(Line->First, tok::comma, tok::comma);
@@ -1906,10 +2631,9 @@ private:
}
bool containsOnlyComments(const AnnotatedLine &Line) {
- for (FormatToken *Tok = Line.First; Tok != nullptr; Tok = Tok->Next) {
+ for (FormatToken *Tok = Line.First; Tok; Tok = Tok->Next)
if (Tok->isNot(tok::comment))
return false;
- }
return true;
}
@@ -1918,9 +2642,8 @@ private:
std::set<unsigned> DeletedLines;
for (unsigned i = 0, e = AnnotatedLines.size(); i != e; ++i) {
auto &Line = *AnnotatedLines[i];
- if (Line.startsWithNamespace()) {
+ if (Line.startsWithNamespace())
checkEmptyNamespace(AnnotatedLines, i, i, DeletedLines);
- }
}
for (auto Line : DeletedLines) {
@@ -1957,8 +2680,9 @@ private:
if (AnnotatedLines[CurrentLine]->startsWithNamespace()) {
if (!checkEmptyNamespace(AnnotatedLines, CurrentLine, NewLine,
- DeletedLines))
+ DeletedLines)) {
return false;
+ }
CurrentLine = NewLine;
continue;
}
@@ -1979,12 +2703,12 @@ private:
// Check if the empty namespace is actually affected by changed ranges.
if (!AffectedRangeMgr.affectsCharSourceRange(CharSourceRange::getCharRange(
AnnotatedLines[InitLine]->First->Tok.getLocation(),
- AnnotatedLines[CurrentLine]->Last->Tok.getEndLoc())))
+ AnnotatedLines[CurrentLine]->Last->Tok.getEndLoc()))) {
return false;
+ }
- for (unsigned i = InitLine; i <= CurrentLine; ++i) {
+ for (unsigned i = InitLine; i <= CurrentLine; ++i)
DeletedLines.insert(i);
- }
return true;
}
@@ -1997,10 +2721,12 @@ private:
void cleanupPair(FormatToken *Start, LeftKind LK, RightKind RK,
bool DeleteLeft) {
auto NextNotDeleted = [this](const FormatToken &Tok) -> FormatToken * {
- for (auto *Res = Tok.Next; Res; Res = Res->Next)
- if (!Res->is(tok::comment) &&
- DeletedTokens.find(Res) == DeletedTokens.end())
+ for (auto *Res = Tok.Next; Res; Res = Res->Next) {
+ if (Res->isNot(tok::comment) &&
+ DeletedTokens.find(Res) == DeletedTokens.end()) {
return Res;
+ }
+ }
return nullptr;
};
for (auto *Left = Start; Left;) {
@@ -2038,7 +2764,7 @@ private:
tooling::Replacements generateFixes() {
tooling::Replacements Fixes;
- std::vector<FormatToken *> Tokens;
+ SmallVector<FormatToken *> Tokens;
std::copy(DeletedTokens.begin(), DeletedTokens.end(),
std::back_inserter(Tokens));
@@ -2048,10 +2774,8 @@ private:
unsigned Idx = 0;
while (Idx < Tokens.size()) {
unsigned St = Idx, End = Idx;
- while ((End + 1) < Tokens.size() &&
- Tokens[End]->Next == Tokens[End + 1]) {
- End++;
- }
+ while ((End + 1) < Tokens.size() && Tokens[End]->Next == Tokens[End + 1])
+ ++End;
auto SR = CharSourceRange::getCharRange(Tokens[St]->Tok.getLocation(),
Tokens[End]->Tok.getEndLoc());
auto Err =
@@ -2124,6 +2848,8 @@ private:
"CGSizeMake",
"CGVector",
"CGVectorMake",
+ "FOUNDATION_EXPORT", // This is an alias for FOUNDATION_EXTERN.
+ "FOUNDATION_EXTERN",
"NSAffineTransform",
"NSArray",
"NSAttributedString",
@@ -2139,6 +2865,8 @@ private:
"NSDecimalNumber",
"NSDictionary",
"NSEdgeInsets",
+ "NSError",
+ "NSErrorDomain",
"NSHashTable",
"NSIndexPath",
"NSIndexSet",
@@ -2178,15 +2906,17 @@ private:
"NSURLQueryItem",
"NSUUID",
"NSValue",
+ "NS_ASSUME_NONNULL_BEGIN",
"UIImage",
"UIView",
};
- for (auto Line : AnnotatedLines) {
- if (Line->First && (Line->First->TokenText.startswith("#") ||
+ for (auto *Line : AnnotatedLines) {
+ if (Line->First && (Line->First->TokenText.starts_with("#") ||
Line->First->TokenText == "__pragma" ||
- Line->First->TokenText == "_Pragma"))
+ Line->First->TokenText == "_Pragma")) {
continue;
+ }
for (const FormatToken *FormatTok = Line->First; FormatTok;
FormatTok = FormatTok->Next) {
if ((FormatTok->Previous && FormatTok->Previous->is(tok::at) &&
@@ -2199,6 +2929,7 @@ private:
FormatTok->TokenText)) ||
FormatTok->is(TT_ObjCStringLiteral) ||
FormatTok->isOneOf(Keywords.kw_NS_CLOSED_ENUM, Keywords.kw_NS_ENUM,
+ Keywords.kw_NS_ERROR_ENUM,
Keywords.kw_NS_OPTIONS, TT_ObjCBlockLBrace,
TT_ObjCBlockLParen, TT_ObjCDecl, TT_ObjCForIn,
TT_ObjCMethodExpr, TT_ObjCMethodSpecifier,
@@ -2233,7 +2964,7 @@ struct JavaImportDirective {
StringRef Identifier;
StringRef Text;
unsigned Offset;
- std::vector<StringRef> AssociatedCommentLines;
+ SmallVector<StringRef> AssociatedCommentLines;
bool IsStatic;
};
@@ -2242,10 +2973,11 @@ struct JavaImportDirective {
// Determines whether 'Ranges' intersects with ('Start', 'End').
static bool affectsRange(ArrayRef<tooling::Range> Ranges, unsigned Start,
unsigned End) {
- for (auto Range : Ranges) {
+ for (const auto &Range : Ranges) {
if (Range.getOffset() < End &&
- Range.getOffset() + Range.getLength() > Start)
+ Range.getOffset() + Range.getLength() > Start) {
return true;
+ }
}
return false;
}
@@ -2286,7 +3018,7 @@ std::string replaceCRLF(const std::string &Code) {
do {
Pos = Code.find("\r\n", LastPos);
if (Pos == LastPos) {
- LastPos++;
+ ++LastPos;
continue;
}
if (Pos == std::string::npos) {
@@ -2313,16 +3045,14 @@ static void sortCppIncludes(const FormatStyle &Style,
StringRef Code, tooling::Replacements &Replaces,
unsigned *Cursor) {
tooling::IncludeCategoryManager Categories(Style.IncludeStyle, FileName);
- unsigned IncludesBeginOffset = Includes.front().Offset;
- unsigned IncludesEndOffset =
+ const unsigned IncludesBeginOffset = Includes.front().Offset;
+ const unsigned IncludesEndOffset =
Includes.back().Offset + Includes.back().Text.size();
- unsigned IncludesBlockSize = IncludesEndOffset - IncludesBeginOffset;
+ const unsigned IncludesBlockSize = IncludesEndOffset - IncludesBeginOffset;
if (!affectsRange(Ranges, IncludesBeginOffset, IncludesEndOffset))
return;
- SmallVector<unsigned, 16> Indices;
- for (unsigned i = 0, e = Includes.size(); i != e; ++i) {
- Indices.push_back(i);
- }
+ SmallVector<unsigned, 16> Indices =
+ llvm::to_vector<16>(llvm::seq<unsigned>(0, Includes.size()));
if (Style.SortIncludes == FormatStyle::SI_CaseInsensitive) {
llvm::stable_sort(Indices, [&](unsigned LHSI, unsigned RHSI) {
@@ -2345,9 +3075,10 @@ static void sortCppIncludes(const FormatStyle &Style,
unsigned CursorIndex;
// The offset from cursor to the end of line.
unsigned CursorToEOLOffset;
- if (Cursor)
+ if (Cursor) {
std::tie(CursorIndex, CursorToEOLOffset) =
FindCursorIndex(Includes, Indices, *Cursor);
+ }
// Deduplicate #includes.
Indices.erase(std::unique(Indices.begin(), Indices.end(),
@@ -2363,11 +3094,12 @@ static void sortCppIncludes(const FormatStyle &Style,
// the entire block. Otherwise, no replacement is generated.
// In case Style.IncldueStyle.IncludeBlocks != IBS_Preserve, this check is not
// enough as additional newlines might be added or removed across #include
- // blocks. This we handle below by generating the updated #imclude blocks and
+ // blocks. This we handle below by generating the updated #include blocks and
// comparing it to the original.
if (Indices.size() == Includes.size() && llvm::is_sorted(Indices) &&
- Style.IncludeStyle.IncludeBlocks == tooling::IncludeStyle::IBS_Preserve)
+ Style.IncludeStyle.IncludeBlocks == tooling::IncludeStyle::IBS_Preserve) {
return;
+ }
std::string result;
for (unsigned Index : Indices) {
@@ -2375,8 +3107,9 @@ static void sortCppIncludes(const FormatStyle &Style,
result += "\n";
if (Style.IncludeStyle.IncludeBlocks ==
tooling::IncludeStyle::IBS_Regroup &&
- CurrentCategory != Includes[Index].Category)
+ CurrentCategory != Includes[Index].Category) {
result += "\n";
+ }
}
result += Includes[Index].Text;
if (Cursor && CursorIndex == Index)
@@ -2384,11 +3117,15 @@ static void sortCppIncludes(const FormatStyle &Style,
CurrentCategory = Includes[Index].Category;
}
+ if (Cursor && *Cursor >= IncludesEndOffset)
+ *Cursor += result.size() - IncludesBlockSize;
+
// If the #includes are out of order, we generate a single replacement fixing
// the entire range of blocks. Otherwise, no replacement is generated.
if (replaceCRLF(result) == replaceCRLF(std::string(Code.substr(
- IncludesBeginOffset, IncludesBlockSize))))
+ IncludesBeginOffset, IncludesBlockSize)))) {
return;
+ }
auto Err = Replaces.add(tooling::Replacement(
FileName, Includes.front().Offset, IncludesBlockSize, result));
@@ -2400,13 +3137,6 @@ static void sortCppIncludes(const FormatStyle &Style,
}
}
-namespace {
-
-const char CppIncludeRegexPattern[] =
- R"(^[\t\ ]*#[\t\ ]*(import|include)[^"<]*(["<][^">]*[">]))";
-
-} // anonymous namespace
-
tooling::Replacements sortCppIncludes(const FormatStyle &Style, StringRef Code,
ArrayRef<tooling::Range> Ranges,
StringRef FileName,
@@ -2416,7 +3146,6 @@ tooling::Replacements sortCppIncludes(const FormatStyle &Style, StringRef Code,
.StartsWith("\xEF\xBB\xBF", 3) // UTF-8 BOM
.Default(0);
unsigned SearchFrom = 0;
- llvm::Regex IncludeRegex(CppIncludeRegexPattern);
SmallVector<StringRef, 4> Matches;
SmallVector<IncludeDirective, 16> IncludesInBlock;
@@ -2425,23 +3154,41 @@ tooling::Replacements sortCppIncludes(const FormatStyle &Style, StringRef Code,
// doesn't have hidden dependencies
// (http://llvm.org/docs/CodingStandards.html#include-style).
//
- // FIXME: Do some sanity checking, e.g. edit distance of the base name, to fix
+ // FIXME: Do some validation, e.g. edit distance of the base name, to fix
// cases where the first #include is unlikely to be the main header.
tooling::IncludeCategoryManager Categories(Style.IncludeStyle, FileName);
bool FirstIncludeBlock = true;
bool MainIncludeFound = false;
bool FormattingOff = false;
+ // '[' must be the first and '-' the last character inside [...].
+ llvm::Regex RawStringRegex(
+ "R\"([][A-Za-z0-9_{}#<>%:;.?*+/^&\\$|~!=,'-]*)\\(");
+ SmallVector<StringRef, 2> RawStringMatches;
+ std::string RawStringTermination = ")\"";
+
for (;;) {
auto Pos = Code.find('\n', SearchFrom);
StringRef Line =
Code.substr(Prev, (Pos != StringRef::npos ? Pos : Code.size()) - Prev);
StringRef Trimmed = Line.trim();
- if (Trimmed == "// clang-format off" || Trimmed == "/* clang-format off */")
+
+ // #includes inside raw string literals need to be ignored.
+ // or we will sort the contents of the string.
+ // Skip past until we think we are at the rawstring literal close.
+ if (RawStringRegex.match(Trimmed, &RawStringMatches)) {
+ std::string CharSequence = RawStringMatches[1].str();
+ RawStringTermination = ")" + CharSequence + "\"";
FormattingOff = true;
- else if (Trimmed == "// clang-format on" ||
- Trimmed == "/* clang-format on */")
+ }
+
+ if (Trimmed.contains(RawStringTermination))
+ FormattingOff = false;
+
+ if (isClangFormatOff(Trimmed))
+ FormattingOff = true;
+ else if (isClangFormatOn(Trimmed))
FormattingOff = false;
const bool EmptyLineSkipped =
@@ -2450,10 +3197,19 @@ tooling::Replacements sortCppIncludes(const FormatStyle &Style, StringRef Code,
Style.IncludeStyle.IncludeBlocks ==
tooling::IncludeStyle::IBS_Regroup);
- bool MergeWithNextLine = Trimmed.endswith("\\");
+ bool MergeWithNextLine = Trimmed.ends_with("\\");
if (!FormattingOff && !MergeWithNextLine) {
- if (IncludeRegex.match(Line, &Matches)) {
+ if (tooling::HeaderIncludes::IncludeRegex.match(Line, &Matches)) {
StringRef IncludeName = Matches[2];
+ if (Line.contains("/*") && !Line.contains("*/")) {
+ // #include with a start of a block comment, but without the end.
+ // Need to keep all the lines until the end of the comment together.
+ // FIXME: This is somehow simplified check that probably does not work
+ // correctly if there are multiple comments on a line.
+ Pos = Code.find("*/", SearchFrom);
+ Line = Code.substr(
+ Prev, (Pos != StringRef::npos ? Pos + 2 : Code.size()) - Prev);
+ }
int Category = Categories.getIncludePriority(
IncludeName,
/*CheckMainHeader=*/!MainIncludeFound && FirstIncludeBlock);
@@ -2467,7 +3223,7 @@ tooling::Replacements sortCppIncludes(const FormatStyle &Style, StringRef Code,
sortCppIncludes(Style, IncludesInBlock, Ranges, FileName, Code,
Replaces, Cursor);
IncludesInBlock.clear();
- if (Trimmed.startswith("#pragma hdrstop")) // Precompiled headers.
+ if (Trimmed.starts_with("#pragma hdrstop")) // Precompiled headers.
FirstIncludeBlock = true;
else
FirstIncludeBlock = false;
@@ -2494,8 +3250,8 @@ static unsigned findJavaImportGroup(const FormatStyle &Style,
unsigned LongestMatchIndex = UINT_MAX;
unsigned LongestMatchLength = 0;
for (unsigned I = 0; I < Style.JavaImportGroups.size(); I++) {
- std::string GroupPrefix = Style.JavaImportGroups[I];
- if (ImportIdentifier.startswith(GroupPrefix) &&
+ const std::string &GroupPrefix = Style.JavaImportGroups[I];
+ if (ImportIdentifier.starts_with(GroupPrefix) &&
GroupPrefix.length() > LongestMatchLength) {
LongestMatchIndex = I;
LongestMatchLength = GroupPrefix.length();
@@ -2519,13 +3275,14 @@ static void sortJavaImports(const FormatStyle &Style,
unsigned ImportsBlockSize = ImportsEndOffset - ImportsBeginOffset;
if (!affectsRange(Ranges, ImportsBeginOffset, ImportsEndOffset))
return;
- SmallVector<unsigned, 16> Indices;
+
+ SmallVector<unsigned, 16> Indices =
+ llvm::to_vector<16>(llvm::seq<unsigned>(0, Imports.size()));
SmallVector<unsigned, 16> JavaImportGroups;
- for (unsigned i = 0, e = Imports.size(); i != e; ++i) {
- Indices.push_back(i);
- JavaImportGroups.push_back(
- findJavaImportGroup(Style, Imports[i].Identifier));
- }
+ JavaImportGroups.reserve(Imports.size());
+ for (const JavaImportDirective &Import : Imports)
+ JavaImportGroups.push_back(findJavaImportGroup(Style, Import.Identifier));
+
bool StaticImportAfterNormalImport =
Style.SortJavaStaticImport == FormatStyle::SJSIO_After;
llvm::sort(Indices, [&](unsigned LHSI, unsigned RHSI) {
@@ -2553,8 +3310,9 @@ static void sortJavaImports(const FormatStyle &Style,
if (!result.empty()) {
result += "\n";
if (CurrentIsStatic != Imports[Index].IsStatic ||
- CurrentImportGroup != JavaImportGroups[Index])
+ CurrentImportGroup != JavaImportGroups[Index]) {
result += "\n";
+ }
}
for (StringRef CommentLine : Imports[Index].AssociatedCommentLines) {
result += CommentLine;
@@ -2568,8 +3326,9 @@ static void sortJavaImports(const FormatStyle &Style,
// If the imports are out of order, we generate a single replacement fixing
// the entire block. Otherwise, no replacement is generated.
if (replaceCRLF(result) == replaceCRLF(std::string(Code.substr(
- Imports.front().Offset, ImportsBlockSize))))
+ Imports.front().Offset, ImportsBlockSize)))) {
return;
+ }
auto Err = Replaces.add(tooling::Replacement(FileName, Imports.front().Offset,
ImportsBlockSize, result));
@@ -2597,7 +3356,7 @@ tooling::Replacements sortJavaImports(const FormatStyle &Style, StringRef Code,
llvm::Regex ImportRegex(JavaImportRegexPattern);
SmallVector<StringRef, 4> Matches;
SmallVector<JavaImportDirective, 16> ImportsInBlock;
- std::vector<StringRef> AssociatedCommentLines;
+ SmallVector<StringRef> AssociatedCommentLines;
bool FormattingOff = false;
@@ -2607,9 +3366,9 @@ tooling::Replacements sortJavaImports(const FormatStyle &Style, StringRef Code,
Code.substr(Prev, (Pos != StringRef::npos ? Pos : Code.size()) - Prev);
StringRef Trimmed = Line.trim();
- if (Trimmed == "// clang-format off")
+ if (isClangFormatOff(Trimmed))
FormattingOff = true;
- else if (Trimmed == "// clang-format on")
+ else if (isClangFormatOn(Trimmed))
FormattingOff = false;
if (ImportRegex.match(Line, &Matches)) {
@@ -2621,9 +3380,8 @@ tooling::Replacements sortJavaImports(const FormatStyle &Style, StringRef Code,
StringRef Static = Matches[1];
StringRef Identifier = Matches[2];
bool IsStatic = false;
- if (Static.contains("static")) {
+ if (Static.contains("static"))
IsStatic = true;
- }
ImportsInBlock.push_back(
{Identifier, Line, Prev, AssociatedCommentLines, IsStatic});
AssociatedCommentLines.clear();
@@ -2648,7 +3406,7 @@ bool isMpegTS(StringRef Code) {
return Code.size() > 188 && Code[0] == 0x47 && Code[188] == 0x47;
}
-bool isLikelyXml(StringRef Code) { return Code.ltrim().startswith("<"); }
+bool isLikelyXml(StringRef Code) { return Code.ltrim().starts_with("<"); }
tooling::Replacements sortIncludes(const FormatStyle &Style, StringRef Code,
ArrayRef<tooling::Range> Ranges,
@@ -2659,8 +3417,9 @@ tooling::Replacements sortIncludes(const FormatStyle &Style, StringRef Code,
if (isLikelyXml(Code))
return Replaces;
if (Style.Language == FormatStyle::LanguageKind::LK_JavaScript &&
- isMpegTS(Code))
+ isMpegTS(Code)) {
return Replaces;
+ }
if (Style.Language == FormatStyle::LanguageKind::LK_JavaScript)
return sortJavaScriptImports(Style, Code, Ranges, FileName);
if (Style.Language == FormatStyle::LanguageKind::LK_Java)
@@ -2718,8 +3477,8 @@ namespace {
inline bool isHeaderInsertion(const tooling::Replacement &Replace) {
return Replace.getOffset() == UINT_MAX && Replace.getLength() == 0 &&
- llvm::Regex(CppIncludeRegexPattern)
- .match(Replace.getReplacementText());
+ tooling::HeaderIncludes::IncludeRegex.match(
+ Replace.getReplacementText());
}
inline bool isHeaderDeletion(const tooling::Replacement &Replace) {
@@ -2759,7 +3518,7 @@ fixCppIncludeInsertions(StringRef Code, const tooling::Replacements &Replaces,
for (const auto &Header : HeadersToDelete) {
tooling::Replacements Replaces =
- Includes.remove(Header.trim("\"<>"), Header.startswith("<"));
+ Includes.remove(Header.trim("\"<>"), Header.starts_with("<"));
for (const auto &R : Replaces) {
auto Err = Result.add(R);
if (Err) {
@@ -2771,17 +3530,18 @@ fixCppIncludeInsertions(StringRef Code, const tooling::Replacements &Replaces,
}
}
- llvm::Regex IncludeRegex = llvm::Regex(CppIncludeRegexPattern);
llvm::SmallVector<StringRef, 4> Matches;
for (const auto &R : HeaderInsertions) {
auto IncludeDirective = R.getReplacementText();
- bool Matched = IncludeRegex.match(IncludeDirective, &Matches);
+ bool Matched =
+ tooling::HeaderIncludes::IncludeRegex.match(IncludeDirective, &Matches);
assert(Matched && "Header insertion replacement must have replacement text "
"'#include ...'");
(void)Matched;
auto IncludeName = Matches[2];
auto Replace =
- Includes.insert(IncludeName.trim("\"<>"), IncludeName.startswith("<"));
+ Includes.insert(IncludeName.trim("\"<>"), IncludeName.starts_with("<"),
+ tooling::IncludeDirective::Include);
if (Replace) {
auto Err = Result.add(*Replace);
if (Err) {
@@ -2812,7 +3572,7 @@ cleanupAroundReplacements(StringRef Code, const tooling::Replacements &Replaces,
// Make header insertion replacements insert new headers into correct blocks.
tooling::Replacements NewReplaces =
fixCppIncludeInsertions(Code, Replaces, Style);
- return processReplacements(Cleanup, Code, NewReplaces, Style);
+ return cantFail(processReplacements(Cleanup, Code, NewReplaces, Style));
}
namespace internal {
@@ -2821,7 +3581,23 @@ reformat(const FormatStyle &Style, StringRef Code,
ArrayRef<tooling::Range> Ranges, unsigned FirstStartColumn,
unsigned NextStartColumn, unsigned LastStartColumn, StringRef FileName,
FormattingAttemptStatus *Status) {
- FormatStyle Expanded = expandPresets(Style);
+ FormatStyle Expanded = Style;
+ expandPresetsBraceWrapping(Expanded);
+ expandPresetsSpaceBeforeParens(Expanded);
+ expandPresetsSpacesInParens(Expanded);
+ Expanded.InsertBraces = false;
+ Expanded.RemoveBracesLLVM = false;
+ Expanded.RemoveParentheses = FormatStyle::RPS_Leave;
+ Expanded.RemoveSemicolon = false;
+ switch (Expanded.RequiresClausePosition) {
+ case FormatStyle::RCPS_SingleLine:
+ case FormatStyle::RCPS_WithPreceding:
+ Expanded.IndentRequiresClause = false;
+ break;
+ default:
+ break;
+ }
+
if (Expanded.DisableFormat)
return {tooling::Replacements(), 0};
if (isLikelyXml(Code))
@@ -2832,59 +3608,118 @@ reformat(const FormatStyle &Style, StringRef Code,
// JSON only needs the formatting passing.
if (Style.isJson()) {
std::vector<tooling::Range> Ranges(1, tooling::Range(0, Code.size()));
- auto Env =
- std::make_unique<Environment>(Code, FileName, Ranges, FirstStartColumn,
- NextStartColumn, LastStartColumn);
+ auto Env = Environment::make(Code, FileName, Ranges, FirstStartColumn,
+ NextStartColumn, LastStartColumn);
+ if (!Env)
+ return {};
// Perform the actual formatting pass.
tooling::Replacements Replaces =
Formatter(*Env, Style, Status).process().first;
// add a replacement to remove the "x = " from the result.
- if (!Replaces.add(tooling::Replacement(FileName, 0, 4, ""))) {
- // apply the reformatting changes and the removal of "x = ".
- if (applyAllReplacements(Code, Replaces)) {
- return {Replaces, 0};
- }
- }
+ Replaces = Replaces.merge(
+ tooling::Replacements(tooling::Replacement(FileName, 0, 4, "")));
+ // apply the reformatting changes and the removal of "x = ".
+ if (applyAllReplacements(Code, Replaces))
+ return {Replaces, 0};
return {tooling::Replacements(), 0};
}
+ auto Env = Environment::make(Code, FileName, Ranges, FirstStartColumn,
+ NextStartColumn, LastStartColumn);
+ if (!Env)
+ return {};
+
typedef std::function<std::pair<tooling::Replacements, unsigned>(
const Environment &)>
AnalyzerPass;
- SmallVector<AnalyzerPass, 4> Passes;
- if (Style.Language == FormatStyle::LK_Cpp) {
- if (Style.FixNamespaceComments)
+ SmallVector<AnalyzerPass, 16> Passes;
+
+ Passes.emplace_back([&](const Environment &Env) {
+ return IntegerLiteralSeparatorFixer().process(Env, Expanded);
+ });
+
+ if (Style.isCpp()) {
+ if (Style.QualifierAlignment != FormatStyle::QAS_Leave)
+ addQualifierAlignmentFixerPasses(Expanded, Passes);
+
+ if (Style.RemoveParentheses != FormatStyle::RPS_Leave) {
+ FormatStyle S = Expanded;
+ S.RemoveParentheses = Style.RemoveParentheses;
+ Passes.emplace_back([&, S = std::move(S)](const Environment &Env) {
+ return ParensRemover(Env, S).process(/*SkipAnnotation=*/true);
+ });
+ }
+
+ if (Style.InsertBraces) {
+ FormatStyle S = Expanded;
+ S.InsertBraces = true;
+ Passes.emplace_back([&, S = std::move(S)](const Environment &Env) {
+ return BracesInserter(Env, S).process(/*SkipAnnotation=*/true);
+ });
+ }
+
+ if (Style.RemoveBracesLLVM) {
+ FormatStyle S = Expanded;
+ S.RemoveBracesLLVM = true;
+ Passes.emplace_back([&, S = std::move(S)](const Environment &Env) {
+ return BracesRemover(Env, S).process(/*SkipAnnotation=*/true);
+ });
+ }
+
+ if (Style.RemoveSemicolon) {
+ FormatStyle S = Expanded;
+ S.RemoveSemicolon = true;
+ Passes.emplace_back([&, S = std::move(S)](const Environment &Env) {
+ return SemiRemover(Env, S).process(/*SkipAnnotation=*/true);
+ });
+ }
+
+ if (Style.FixNamespaceComments) {
Passes.emplace_back([&](const Environment &Env) {
return NamespaceEndCommentsFixer(Env, Expanded).process();
});
+ }
- if (Style.SortUsingDeclarations)
+ if (Style.SortUsingDeclarations != FormatStyle::SUD_Never) {
Passes.emplace_back([&](const Environment &Env) {
return UsingDeclarationsSorter(Env, Expanded).process();
});
+ }
+ }
+
+ if (Style.SeparateDefinitionBlocks != FormatStyle::SDS_Leave) {
+ Passes.emplace_back([&](const Environment &Env) {
+ return DefinitionBlockSeparator(Env, Expanded).process();
+ });
}
- if (Style.Language == FormatStyle::LK_JavaScript &&
- Style.JavaScriptQuotes != FormatStyle::JSQS_Leave)
+ if (Style.Language == FormatStyle::LK_ObjC &&
+ !Style.ObjCPropertyAttributeOrder.empty()) {
Passes.emplace_back([&](const Environment &Env) {
- return JavaScriptRequoter(Env, Expanded).process();
+ return ObjCPropertyAttributeOrderFixer(Env, Expanded).process();
});
+ }
+
+ if (Style.isJavaScript() &&
+ Style.JavaScriptQuotes != FormatStyle::JSQS_Leave) {
+ Passes.emplace_back([&](const Environment &Env) {
+ return JavaScriptRequoter(Env, Expanded).process(/*SkipAnnotation=*/true);
+ });
+ }
Passes.emplace_back([&](const Environment &Env) {
return Formatter(Env, Expanded, Status).process();
});
- if (Style.Language == FormatStyle::LK_JavaScript &&
- Style.InsertTrailingCommas == FormatStyle::TCS_Wrapped)
+ if (Style.isJavaScript() &&
+ Style.InsertTrailingCommas == FormatStyle::TCS_Wrapped) {
Passes.emplace_back([&](const Environment &Env) {
return TrailingCommaInserter(Env, Expanded).process();
});
+ }
- auto Env =
- std::make_unique<Environment>(Code, FileName, Ranges, FirstStartColumn,
- NextStartColumn, LastStartColumn);
- llvm::Optional<std::string> CurrentCode = None;
+ std::optional<std::string> CurrentCode;
tooling::Replacements Fixes;
unsigned Penalty = 0;
for (size_t I = 0, E = Passes.size(); I < E; ++I) {
@@ -2896,12 +3731,32 @@ reformat(const FormatStyle &Style, StringRef Code,
Penalty += PassFixes.second;
if (I + 1 < E) {
CurrentCode = std::move(*NewCode);
- Env = std::make_unique<Environment>(
+ Env = Environment::make(
*CurrentCode, FileName,
tooling::calculateRangesAfterReplacements(Fixes, Ranges),
FirstStartColumn, NextStartColumn, LastStartColumn);
+ if (!Env)
+ return {};
+ }
+ }
+ }
+
+ if (Style.QualifierAlignment != FormatStyle::QAS_Leave) {
+ // Don't make replacements that replace nothing. QualifierAlignment can
+ // produce them if one of its early passes changes e.g. `const volatile` to
+ // `volatile const` and then a later pass changes it back again.
+ tooling::Replacements NonNoOpFixes;
+ for (const tooling::Replacement &Fix : Fixes) {
+ StringRef OriginalCode = Code.substr(Fix.getOffset(), Fix.getLength());
+ if (!OriginalCode.equals(Fix.getReplacementText())) {
+ auto Err = NonNoOpFixes.add(Fix);
+ if (Err) {
+ llvm::errs() << "Error adding replacements : "
+ << llvm::toString(std::move(Err)) << "\n";
+ }
}
}
+ Fixes = std::move(NonNoOpFixes);
}
return {Fixes, Penalty};
@@ -2925,7 +3780,10 @@ tooling::Replacements cleanup(const FormatStyle &Style, StringRef Code,
// cleanups only apply to C++ (they mostly concern ctor commas etc.)
if (Style.Language != FormatStyle::LK_Cpp)
return tooling::Replacements();
- return Cleaner(Environment(Code, FileName, Ranges), Style).process().first;
+ auto Env = Environment::make(Code, FileName, Ranges);
+ if (!Env)
+ return {};
+ return Cleaner(*Env, Style).process().first;
}
tooling::Replacements reformat(const FormatStyle &Style, StringRef Code,
@@ -2942,18 +3800,20 @@ tooling::Replacements fixNamespaceEndComments(const FormatStyle &Style,
StringRef Code,
ArrayRef<tooling::Range> Ranges,
StringRef FileName) {
- return NamespaceEndCommentsFixer(Environment(Code, FileName, Ranges), Style)
- .process()
- .first;
+ auto Env = Environment::make(Code, FileName, Ranges);
+ if (!Env)
+ return {};
+ return NamespaceEndCommentsFixer(*Env, Style).process().first;
}
tooling::Replacements sortUsingDeclarations(const FormatStyle &Style,
StringRef Code,
ArrayRef<tooling::Range> Ranges,
StringRef FileName) {
- return UsingDeclarationsSorter(Environment(Code, FileName, Ranges), Style)
- .process()
- .first;
+ auto Env = Environment::make(Code, FileName, Ranges);
+ if (!Env)
+ return {};
+ return UsingDeclarationsSorter(*Env, Style).process().first;
}
LangOptions getFormattingLangOpts(const FormatStyle &Style) {
@@ -2970,6 +3830,10 @@ LangOptions getFormattingLangOpts(const FormatStyle &Style) {
LangOpts.CPlusPlus17 = LexingStd >= FormatStyle::LS_Cpp17;
LangOpts.CPlusPlus20 = LexingStd >= FormatStyle::LS_Cpp20;
LangOpts.Char8 = LexingStd >= FormatStyle::LS_Cpp20;
+ // Turning on digraphs in standards before C++0x is error-prone, because e.g.
+ // the sequence "<::" will be unconditionally treated as "[:".
+ // Cf. Lexer::LexTokenInternal.
+ LangOpts.Digraphs = LexingStd >= FormatStyle::LS_Cpp11;
LangOpts.LineComment = 1;
bool AlternativeOperators = Style.isCpp();
@@ -2983,39 +3847,52 @@ LangOptions getFormattingLangOpts(const FormatStyle &Style) {
}
const char *StyleOptionHelpDescription =
- "Coding style, currently supports:\n"
- " LLVM, GNU, Google, Chromium, Microsoft, Mozilla, WebKit.\n"
- "Use -style=file to load style configuration from\n"
- ".clang-format file located in one of the parent\n"
- "directories of the source file (or current\n"
- "directory for stdin).\n"
- "Use -style=\"{key: value, ...}\" to set specific\n"
- "parameters, e.g.:\n"
- " -style=\"{BasedOnStyle: llvm, IndentWidth: 8}\"";
+ "Set coding style. <string> can be:\n"
+ "1. A preset: LLVM, GNU, Google, Chromium, Microsoft,\n"
+ " Mozilla, WebKit.\n"
+ "2. 'file' to load style configuration from a\n"
+ " .clang-format file in one of the parent directories\n"
+ " of the source file (for stdin, see --assume-filename).\n"
+ " If no .clang-format file is found, falls back to\n"
+ " --fallback-style.\n"
+ " --style=file is the default.\n"
+ "3. 'file:<format_file_path>' to explicitly specify\n"
+ " the configuration file.\n"
+ "4. \"{key: value, ...}\" to set specific parameters, e.g.:\n"
+ " --style=\"{BasedOnStyle: llvm, IndentWidth: 8}\"";
static FormatStyle::LanguageKind getLanguageByFileName(StringRef FileName) {
- if (FileName.endswith(".java"))
+ if (FileName.ends_with(".java"))
return FormatStyle::LK_Java;
- if (FileName.endswith_insensitive(".js") ||
- FileName.endswith_insensitive(".mjs") ||
- FileName.endswith_insensitive(".ts"))
+ if (FileName.ends_with_insensitive(".js") ||
+ FileName.ends_with_insensitive(".mjs") ||
+ FileName.ends_with_insensitive(".ts")) {
return FormatStyle::LK_JavaScript; // (module) JavaScript or TypeScript.
- if (FileName.endswith(".m") || FileName.endswith(".mm"))
+ }
+ if (FileName.ends_with(".m") || FileName.ends_with(".mm"))
return FormatStyle::LK_ObjC;
- if (FileName.endswith_insensitive(".proto") ||
- FileName.endswith_insensitive(".protodevel"))
+ if (FileName.ends_with_insensitive(".proto") ||
+ FileName.ends_with_insensitive(".protodevel")) {
return FormatStyle::LK_Proto;
- if (FileName.endswith_insensitive(".textpb") ||
- FileName.endswith_insensitive(".pb.txt") ||
- FileName.endswith_insensitive(".textproto") ||
- FileName.endswith_insensitive(".asciipb"))
+ }
+ if (FileName.ends_with_insensitive(".textpb") ||
+ FileName.ends_with_insensitive(".pb.txt") ||
+ FileName.ends_with_insensitive(".textproto") ||
+ FileName.ends_with_insensitive(".asciipb")) {
return FormatStyle::LK_TextProto;
- if (FileName.endswith_insensitive(".td"))
+ }
+ if (FileName.ends_with_insensitive(".td"))
return FormatStyle::LK_TableGen;
- if (FileName.endswith_insensitive(".cs"))
+ if (FileName.ends_with_insensitive(".cs"))
return FormatStyle::LK_CSharp;
- if (FileName.endswith_insensitive(".json"))
+ if (FileName.ends_with_insensitive(".json"))
return FormatStyle::LK_Json;
+ if (FileName.ends_with_insensitive(".sv") ||
+ FileName.ends_with_insensitive(".svh") ||
+ FileName.ends_with_insensitive(".v") ||
+ FileName.ends_with_insensitive(".vh")) {
+ return FormatStyle::LK_Verilog;
+ }
return FormatStyle::LK_Cpp;
}
@@ -3037,38 +3914,76 @@ FormatStyle::LanguageKind guessLanguage(StringRef FileName, StringRef Code) {
return GuessedLanguage;
}
+// Update StyleOptionHelpDescription above when changing this.
const char *DefaultFormatStyle = "file";
const char *DefaultFallbackStyle = "LLVM";
+llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>>
+loadAndParseConfigFile(StringRef ConfigFile, llvm::vfs::FileSystem *FS,
+ FormatStyle *Style, bool AllowUnknownOptions) {
+ llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> Text =
+ FS->getBufferForFile(ConfigFile.str());
+ if (auto EC = Text.getError())
+ return EC;
+ if (auto EC = parseConfiguration(*Text.get(), Style, AllowUnknownOptions))
+ return EC;
+ return Text;
+}
+
llvm::Expected<FormatStyle> getStyle(StringRef StyleName, StringRef FileName,
StringRef FallbackStyleName,
StringRef Code, llvm::vfs::FileSystem *FS,
bool AllowUnknownOptions) {
- if (!FS) {
- FS = llvm::vfs::getRealFileSystem().get();
- }
FormatStyle Style = getLLVMStyle(guessLanguage(FileName, Code));
-
FormatStyle FallbackStyle = getNoStyle();
if (!getPredefinedStyle(FallbackStyleName, Style.Language, &FallbackStyle))
- return make_string_error("Invalid fallback style \"" + FallbackStyleName);
+ return make_string_error("Invalid fallback style: " + FallbackStyleName);
llvm::SmallVector<std::unique_ptr<llvm::MemoryBuffer>, 1>
ChildFormatTextToApply;
- if (StyleName.startswith("{")) {
+ if (StyleName.starts_with("{")) {
// Parse YAML/JSON style from the command line.
StringRef Source = "<command-line>";
if (std::error_code ec =
parseConfiguration(llvm::MemoryBufferRef(StyleName, Source), &Style,
- AllowUnknownOptions))
+ AllowUnknownOptions)) {
return make_string_error("Error parsing -style: " + ec.message());
- if (Style.InheritsParentConfig)
- ChildFormatTextToApply.emplace_back(
- llvm::MemoryBuffer::getMemBuffer(StyleName, Source, false));
- else
+ }
+
+ if (!Style.InheritsParentConfig)
+ return Style;
+
+ ChildFormatTextToApply.emplace_back(
+ llvm::MemoryBuffer::getMemBuffer(StyleName, Source, false));
+ }
+
+ if (!FS)
+ FS = llvm::vfs::getRealFileSystem().get();
+ assert(FS);
+
+ // User provided clang-format file using -style=file:path/to/format/file.
+ if (!Style.InheritsParentConfig &&
+ StyleName.starts_with_insensitive("file:")) {
+ auto ConfigFile = StyleName.substr(5);
+ llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> Text =
+ loadAndParseConfigFile(ConfigFile, FS, &Style, AllowUnknownOptions);
+ if (auto EC = Text.getError()) {
+ return make_string_error("Error reading " + ConfigFile + ": " +
+ EC.message());
+ }
+
+ LLVM_DEBUG(llvm::dbgs()
+ << "Using configuration file " << ConfigFile << "\n");
+
+ if (!Style.InheritsParentConfig)
return Style;
+
+ // Search for parent configs starting from the parent directory of
+ // ConfigFile.
+ FileName = ConfigFile;
+ ChildFormatTextToApply.emplace_back(std::move(*Text));
}
// If the style inherits the parent configuration it is a command line
@@ -3081,24 +3996,33 @@ llvm::Expected<FormatStyle> getStyle(StringRef StyleName, StringRef FileName,
return Style;
}
- // Reset possible inheritance
- Style.InheritsParentConfig = false;
-
- // Look for .clang-format/_clang-format file in the file's parent directories.
- SmallString<128> UnsuitableConfigFiles;
SmallString<128> Path(FileName);
if (std::error_code EC = FS->makeAbsolute(Path))
return make_string_error(EC.message());
+ // Reset possible inheritance
+ Style.InheritsParentConfig = false;
+
+ auto dropDiagnosticHandler = [](const llvm::SMDiagnostic &, void *) {};
+
+ auto applyChildFormatTexts = [&](FormatStyle *Style) {
+ for (const auto &MemBuf : llvm::reverse(ChildFormatTextToApply)) {
+ auto EC = parseConfiguration(*MemBuf, Style, AllowUnknownOptions,
+ dropDiagnosticHandler);
+ // It was already correctly parsed.
+ assert(!EC);
+ static_cast<void>(EC);
+ }
+ };
+
+ // Look for .clang-format/_clang-format file in the file's parent directories.
llvm::SmallVector<std::string, 2> FilesToLookFor;
FilesToLookFor.push_back(".clang-format");
FilesToLookFor.push_back("_clang-format");
- auto dropDiagnosticHandler = [](const llvm::SMDiagnostic &, void *) {};
-
+ SmallString<128> UnsuitableConfigFiles;
for (StringRef Directory = Path; !Directory.empty();
Directory = llvm::sys::path::parent_path(Directory)) {
-
auto Status = FS->status(Directory);
if (!Status ||
Status->getType() != llvm::sys::fs::file_type::directory_file) {
@@ -3111,81 +4035,85 @@ llvm::Expected<FormatStyle> getStyle(StringRef StyleName, StringRef FileName,
llvm::sys::path::append(ConfigFile, F);
LLVM_DEBUG(llvm::dbgs() << "Trying " << ConfigFile << "...\n");
- Status = FS->status(ConfigFile.str());
-
- if (Status &&
- (Status->getType() == llvm::sys::fs::file_type::regular_file)) {
- llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> Text =
- FS->getBufferForFile(ConfigFile.str());
- if (std::error_code EC = Text.getError())
- return make_string_error(EC.message());
- if (std::error_code ec =
- parseConfiguration(*Text.get(), &Style, AllowUnknownOptions)) {
- if (ec == ParseError::Unsuitable) {
- if (!UnsuitableConfigFiles.empty())
- UnsuitableConfigFiles.append(", ");
- UnsuitableConfigFiles.append(ConfigFile);
- continue;
- }
+ Status = FS->status(ConfigFile);
+ if (!Status ||
+ Status->getType() != llvm::sys::fs::file_type::regular_file) {
+ continue;
+ }
+
+ llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> Text =
+ loadAndParseConfigFile(ConfigFile, FS, &Style, AllowUnknownOptions);
+ if (auto EC = Text.getError()) {
+ if (EC != ParseError::Unsuitable) {
return make_string_error("Error reading " + ConfigFile + ": " +
- ec.message());
+ EC.message());
}
- LLVM_DEBUG(llvm::dbgs()
- << "Using configuration file " << ConfigFile << "\n");
+ if (!UnsuitableConfigFiles.empty())
+ UnsuitableConfigFiles.append(", ");
+ UnsuitableConfigFiles.append(ConfigFile);
+ continue;
+ }
- if (!Style.InheritsParentConfig) {
- if (ChildFormatTextToApply.empty())
- return Style;
+ LLVM_DEBUG(llvm::dbgs()
+ << "Using configuration file " << ConfigFile << "\n");
+ if (!Style.InheritsParentConfig) {
+ if (!ChildFormatTextToApply.empty()) {
LLVM_DEBUG(llvm::dbgs() << "Applying child configurations\n");
-
- for (const auto &MemBuf : llvm::reverse(ChildFormatTextToApply)) {
- auto Ec = parseConfiguration(*MemBuf, &Style, AllowUnknownOptions,
- dropDiagnosticHandler);
- // It was already correctly parsed.
- assert(!Ec);
- static_cast<void>(Ec);
- }
-
- return Style;
+ applyChildFormatTexts(&Style);
}
+ return Style;
+ }
- LLVM_DEBUG(llvm::dbgs() << "Inherits parent configuration\n");
+ LLVM_DEBUG(llvm::dbgs() << "Inherits parent configuration\n");
- // Reset inheritance of style
- Style.InheritsParentConfig = false;
+ // Reset inheritance of style
+ Style.InheritsParentConfig = false;
- ChildFormatTextToApply.emplace_back(std::move(*Text));
+ ChildFormatTextToApply.emplace_back(std::move(*Text));
- // Breaking out of the inner loop, since we don't want to parse
- // .clang-format AND _clang-format, if both exist. Then we continue the
- // inner loop (parent directories) in search for the parent
- // configuration.
- break;
- }
+ // Breaking out of the inner loop, since we don't want to parse
+ // .clang-format AND _clang-format, if both exist. Then we continue the
+ // outer loop (parent directories) in search for the parent
+ // configuration.
+ break;
}
}
- if (!UnsuitableConfigFiles.empty())
+
+ if (!UnsuitableConfigFiles.empty()) {
return make_string_error("Configuration file(s) do(es) not support " +
getLanguageName(Style.Language) + ": " +
UnsuitableConfigFiles);
+ }
if (!ChildFormatTextToApply.empty()) {
- assert(ChildFormatTextToApply.size() == 1);
-
LLVM_DEBUG(llvm::dbgs()
- << "Applying child configuration on fallback style\n");
-
- auto Ec =
- parseConfiguration(*ChildFormatTextToApply.front(), &FallbackStyle,
- AllowUnknownOptions, dropDiagnosticHandler);
- // It was already correctly parsed.
- assert(!Ec);
- static_cast<void>(Ec);
+ << "Applying child configurations on fallback style\n");
+ applyChildFormatTexts(&FallbackStyle);
}
return FallbackStyle;
}
+static bool isClangFormatOnOff(StringRef Comment, bool On) {
+ if (Comment == (On ? "/* clang-format on */" : "/* clang-format off */"))
+ return true;
+
+ static const char ClangFormatOn[] = "// clang-format on";
+ static const char ClangFormatOff[] = "// clang-format off";
+ const unsigned Size = (On ? sizeof ClangFormatOn : sizeof ClangFormatOff) - 1;
+
+ return Comment.starts_with(On ? ClangFormatOn : ClangFormatOff) &&
+ (Comment.size() == Size || Comment[Size] == ':');
+}
+
+bool isClangFormatOn(StringRef Comment) {
+ return isClangFormatOnOff(Comment, /*On=*/true);
+}
+
+bool isClangFormatOff(StringRef Comment) {
+ return isClangFormatOnOff(Comment, /*On=*/false);
+}
+
} // namespace format
} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Format/FormatToken.cpp b/contrib/llvm-project/clang/lib/Format/FormatToken.cpp
index 8e4994f4c0d5..b791c5a26bbe 100644
--- a/contrib/llvm-project/clang/lib/Format/FormatToken.cpp
+++ b/contrib/llvm-project/clang/lib/Format/FormatToken.cpp
@@ -53,9 +53,11 @@ bool FormatToken::isSimpleTypeSpecifier() const {
case tok::kw___bf16:
case tok::kw__Float16:
case tok::kw___float128:
+ case tok::kw___ibm128:
case tok::kw_wchar_t:
case tok::kw_bool:
- case tok::kw___underlying_type:
+#define TRANSFORM_TYPE_TRAIT_DEF(_, Trait) case tok::kw___##Trait:
+#include "clang/Basic/TransformTypeTraits.def"
case tok::annot_typename:
case tok::kw_char8_t:
case tok::kw_char16_t:
@@ -69,6 +71,38 @@ bool FormatToken::isSimpleTypeSpecifier() const {
}
}
+bool FormatToken::isTypeOrIdentifier() const {
+ return isSimpleTypeSpecifier() || Tok.isOneOf(tok::kw_auto, tok::identifier);
+}
+
+bool FormatToken::isBlockIndentedInitRBrace(const FormatStyle &Style) const {
+ assert(is(tok::r_brace));
+ if (!Style.Cpp11BracedListStyle ||
+ Style.AlignAfterOpenBracket != FormatStyle::BAS_BlockIndent) {
+ return false;
+ }
+ const auto *LBrace = MatchingParen;
+ assert(LBrace && LBrace->is(tok::l_brace));
+ if (LBrace->is(BK_BracedInit))
+ return true;
+ if (LBrace->Previous && LBrace->Previous->is(tok::equal))
+ return true;
+ return false;
+}
+
+bool FormatToken::opensBlockOrBlockTypeList(const FormatStyle &Style) const {
+ // C# Does not indent object initialisers as continuations.
+ if (is(tok::l_brace) && getBlockKind() == BK_BracedInit && Style.isCSharp())
+ return true;
+ if (is(TT_TemplateString) && opensScope())
+ return true;
+ return is(TT_ArrayInitializerLSquare) || is(TT_ProtoExtensionLSquare) ||
+ (is(tok::l_brace) &&
+ (getBlockKind() == BK_Block || is(TT_DictLiteral) ||
+ (!Style.Cpp11BracedListStyle && NestingLevel == 0))) ||
+ (is(tok::less) && Style.isProto());
+}
+
TokenRole::~TokenRole() {}
void TokenRole::precomputeFormattingInfos(const FormatToken *Token) {}
@@ -76,19 +110,20 @@ void TokenRole::precomputeFormattingInfos(const FormatToken *Token) {}
unsigned CommaSeparatedList::formatAfterToken(LineState &State,
ContinuationIndenter *Indenter,
bool DryRun) {
- if (State.NextToken == nullptr || !State.NextToken->Previous)
+ if (!State.NextToken || !State.NextToken->Previous)
return 0;
- if (Formats.size() == 1)
- return 0; // Handled by formatFromToken
+ if (Formats.size() <= 1)
+ return 0; // Handled by formatFromToken (1) or avoid severe penalty (0).
// Ensure that we start on the opening brace.
const FormatToken *LBrace =
State.NextToken->Previous->getPreviousNonComment();
if (!LBrace || !LBrace->isOneOf(tok::l_brace, TT_ArrayInitializerLSquare) ||
LBrace->is(BK_Block) || LBrace->is(TT_DictLiteral) ||
- LBrace->Next->is(TT_DesignatedInitializerPeriod))
+ LBrace->Next->is(TT_DesignatedInitializerPeriod)) {
return 0;
+ }
// Calculate the number of code points we have to format this list. As the
// first token is already placed, we have to subtract it.
@@ -153,15 +188,17 @@ static unsigned CodePointsBetween(const FormatToken *Begin,
void CommaSeparatedList::precomputeFormattingInfos(const FormatToken *Token) {
// FIXME: At some point we might want to do this for other lists, too.
if (!Token->MatchingParen ||
- !Token->isOneOf(tok::l_brace, TT_ArrayInitializerLSquare))
+ !Token->isOneOf(tok::l_brace, TT_ArrayInitializerLSquare)) {
return;
+ }
// In C++11 braced list style, we should not format in columns unless they
// have many items (20 or more) or we allow bin-packing of function call
// arguments.
if (Style.Cpp11BracedListStyle && !Style.BinPackArguments &&
- Commas.size() < 19)
+ Commas.size() < 19) {
return;
+ }
// Limit column layout for JavaScript array initializers to 20 or more items
// for now to introduce it carefully. We can become more aggressive if this
@@ -181,9 +218,13 @@ void CommaSeparatedList::precomputeFormattingInfos(const FormatToken *Token) {
// The lengths of an item if it is put at the end of the line. This includes
// trailing comments which are otherwise ignored for column alignment.
SmallVector<unsigned, 8> EndOfLineItemLength;
+ MustBreakBeforeItem.reserve(Commas.size() + 1);
+ EndOfLineItemLength.reserve(Commas.size() + 1);
+ ItemLengths.reserve(Commas.size() + 1);
bool HasSeparatingComment = false;
for (unsigned i = 0, e = Commas.size() + 1; i != e; ++i) {
+ assert(ItemBegin);
// Skip comments on their own line.
while (ItemBegin->HasUnescapedNewline && ItemBegin->isTrailingComment()) {
ItemBegin = ItemBegin->Next;
@@ -215,8 +256,9 @@ void CommaSeparatedList::precomputeFormattingInfos(const FormatToken *Token) {
// Consume trailing comments so the are included in EndOfLineItemLength.
if (ItemEnd->Next && !ItemEnd->Next->HasUnescapedNewline &&
- ItemEnd->Next->isTrailingComment())
+ ItemEnd->Next->isTrailingComment()) {
ItemEnd = ItemEnd->Next;
+ }
}
EndOfLineItemLength.push_back(CodePointsBetween(ItemBegin, ItemEnd));
// If there is a trailing comma in the list, the next item will start at the
@@ -237,7 +279,7 @@ void CommaSeparatedList::precomputeFormattingInfos(const FormatToken *Token) {
// We can never place more than ColumnLimit / 3 items in a row (because of the
// spaces and the comma).
unsigned MaxItems = Style.ColumnLimit / 3;
- std::vector<unsigned> MinSizeInColumn;
+ SmallVector<unsigned> MinSizeInColumn;
MinSizeInColumn.reserve(MaxItems);
for (unsigned Columns = 1; Columns <= MaxItems; ++Columns) {
ColumnFormat Format;
@@ -277,8 +319,9 @@ void CommaSeparatedList::precomputeFormattingInfos(const FormatToken *Token) {
if (Format.ColumnSizes[i] - MinSizeInColumn[i] > 10)
return true;
return false;
- }())
+ }()) {
continue;
+ }
// Ignore layouts that are bound to violate the column limit.
if (Format.TotalWidth > Style.ColumnLimit && Columns > 1)
@@ -291,14 +334,11 @@ void CommaSeparatedList::precomputeFormattingInfos(const FormatToken *Token) {
const CommaSeparatedList::ColumnFormat *
CommaSeparatedList::getColumnFormat(unsigned RemainingCharacters) const {
const ColumnFormat *BestFormat = nullptr;
- for (SmallVector<ColumnFormat, 4>::const_reverse_iterator
- I = Formats.rbegin(),
- E = Formats.rend();
- I != E; ++I) {
- if (I->TotalWidth <= RemainingCharacters || I->Columns == 1) {
- if (BestFormat && I->LineCount > BestFormat->LineCount)
+ for (const ColumnFormat &Format : llvm::reverse(Formats)) {
+ if (Format.TotalWidth <= RemainingCharacters || Format.Columns == 1) {
+ if (BestFormat && Format.LineCount > BestFormat->LineCount)
break;
- BestFormat = &*I;
+ BestFormat = &Format;
}
}
return BestFormat;
diff --git a/contrib/llvm-project/clang/lib/Format/FormatToken.h b/contrib/llvm-project/clang/lib/Format/FormatToken.h
index 0506cd554bcb..dede89f26001 100644
--- a/contrib/llvm-project/clang/lib/Format/FormatToken.h
+++ b/contrib/llvm-project/clang/lib/Format/FormatToken.h
@@ -20,6 +20,7 @@
#include "clang/Format/Format.h"
#include "clang/Lex/Lexer.h"
#include <memory>
+#include <optional>
#include <unordered_set>
namespace clang {
@@ -29,29 +30,59 @@ namespace format {
TYPE(ArrayInitializerLSquare) \
TYPE(ArraySubscriptLSquare) \
TYPE(AttributeColon) \
+ TYPE(AttributeLParen) \
TYPE(AttributeMacro) \
- TYPE(AttributeParen) \
+ TYPE(AttributeRParen) \
TYPE(AttributeSquare) \
TYPE(BinaryOperator) \
TYPE(BitFieldColon) \
TYPE(BlockComment) \
+ TYPE(BracedListLBrace) \
+ /* The colon at the end of a case label. */ \
+ TYPE(CaseLabelColon) \
TYPE(CastRParen) \
+ TYPE(ClassLBrace) \
+ TYPE(ClassRBrace) \
+ /* ternary ?: expression */ \
TYPE(ConditionalExpr) \
+ /* the condition in an if statement */ \
+ TYPE(ConditionLParen) \
TYPE(ConflictAlternative) \
TYPE(ConflictEnd) \
TYPE(ConflictStart) \
- TYPE(ConstraintJunctions) \
+ /* l_brace of if/for/while */ \
+ TYPE(ControlStatementLBrace) \
+ TYPE(ControlStatementRBrace) \
+ TYPE(CppCastLParen) \
+ TYPE(CSharpGenericTypeConstraint) \
+ TYPE(CSharpGenericTypeConstraintColon) \
+ TYPE(CSharpGenericTypeConstraintComma) \
+ TYPE(CSharpNamedArgumentColon) \
+ TYPE(CSharpNullable) \
+ TYPE(CSharpNullConditionalLSquare) \
+ TYPE(CSharpStringLiteral) \
TYPE(CtorInitializerColon) \
TYPE(CtorInitializerComma) \
+ TYPE(CtorDtorDeclName) \
TYPE(DesignatedInitializerLSquare) \
TYPE(DesignatedInitializerPeriod) \
TYPE(DictLiteral) \
+ TYPE(DoWhile) \
+ TYPE(ElseLBrace) \
+ TYPE(ElseRBrace) \
+ TYPE(EnumLBrace) \
+ TYPE(EnumRBrace) \
TYPE(FatArrow) \
TYPE(ForEachMacro) \
TYPE(FunctionAnnotationRParen) \
TYPE(FunctionDeclarationName) \
TYPE(FunctionLBrace) \
+ TYPE(FunctionLikeOrFreestandingMacro) \
TYPE(FunctionTypeLParen) \
+ /* The colons as part of a C11 _Generic selection */ \
+ TYPE(GenericSelectionColon) \
+ /* The colon at the end of a goto label. */ \
+ TYPE(GotoLabelColon) \
TYPE(IfMacro) \
TYPE(ImplicitStringLiteral) \
TYPE(InheritanceColon) \
@@ -60,6 +91,7 @@ namespace format {
TYPE(InlineASMColon) \
TYPE(InlineASMSymbolicNameLSquare) \
TYPE(JavaAnnotation) \
+ TYPE(JsAndAndEqual) \
TYPE(JsComputedPropertyName) \
TYPE(JsExponentiation) \
TYPE(JsExponentiationEqual) \
@@ -68,15 +100,16 @@ namespace format {
TYPE(JsTypeColon) \
TYPE(JsTypeOperator) \
TYPE(JsTypeOptionalQuestion) \
- TYPE(JsAndAndEqual) \
- TYPE(LambdaArrow) \
TYPE(LambdaLBrace) \
TYPE(LambdaLSquare) \
TYPE(LeadingJavaAnnotation) \
TYPE(LineComment) \
TYPE(MacroBlockBegin) \
TYPE(MacroBlockEnd) \
+ TYPE(ModulePartitionColon) \
+ TYPE(NamespaceLBrace) \
TYPE(NamespaceMacro) \
+ TYPE(NamespaceRBrace) \
TYPE(NonNullAssertion) \
TYPE(NullCoalescingEqual) \
TYPE(NullCoalescingOperator) \
@@ -92,32 +125,65 @@ namespace format {
TYPE(OverloadedOperator) \
TYPE(OverloadedOperatorLParen) \
TYPE(PointerOrReference) \
+ TYPE(ProtoExtensionLSquare) \
TYPE(PureVirtualSpecifier) \
TYPE(RangeBasedForLoopColon) \
+ TYPE(RecordLBrace) \
+ TYPE(RecordRBrace) \
TYPE(RegexLiteral) \
+ TYPE(RequiresClause) \
+ TYPE(RequiresClauseInARequiresExpression) \
+ TYPE(RequiresExpression) \
+ TYPE(RequiresExpressionLBrace) \
+ TYPE(RequiresExpressionLParen) \
TYPE(SelectorName) \
TYPE(StartOfName) \
TYPE(StatementAttributeLikeMacro) \
TYPE(StatementMacro) \
+ /* A string that is part of a string concatenation. For C#, JavaScript, and \
+ * Java, it is used for marking whether a string needs parentheses around it \
+ * if it is to be split into parts joined by `+`. For Verilog, whether \
+ * braces need to be added to split it. Not used for other languages. */ \
+ TYPE(StringInConcatenation) \
+ TYPE(StructLBrace) \
+ TYPE(StructRBrace) \
TYPE(StructuredBindingLSquare) \
+ TYPE(TableGenMultiLineString) \
TYPE(TemplateCloser) \
TYPE(TemplateOpener) \
TYPE(TemplateString) \
- TYPE(ProtoExtensionLSquare) \
TYPE(TrailingAnnotation) \
TYPE(TrailingReturnArrow) \
TYPE(TrailingUnaryOperator) \
TYPE(TypeDeclarationParen) \
+ TYPE(TypeName) \
TYPE(TypenameMacro) \
TYPE(UnaryOperator) \
+ TYPE(UnionLBrace) \
+ TYPE(UnionRBrace) \
TYPE(UntouchableMacroFunc) \
- TYPE(CSharpStringLiteral) \
- TYPE(CSharpNamedArgumentColon) \
- TYPE(CSharpNullable) \
- TYPE(CSharpNullConditionalLSquare) \
- TYPE(CSharpGenericTypeConstraint) \
- TYPE(CSharpGenericTypeConstraintColon) \
- TYPE(CSharpGenericTypeConstraintComma) \
+ /* Like in 'assign x = 0, y = 1;' . */ \
+ TYPE(VerilogAssignComma) \
+ /* like in begin : block */ \
+ TYPE(VerilogBlockLabelColon) \
+ /* The square bracket for the dimension part of the type name. \
+ * In 'logic [1:0] x[1:0]', only the first '['. This way we can have space \
+ * before the first bracket but not the second. */ \
+ TYPE(VerilogDimensionedTypeName) \
+ /* list of port connections or parameters in a module instantiation */ \
+ TYPE(VerilogInstancePortComma) \
+ TYPE(VerilogInstancePortLParen) \
+ /* A parenthesized list within which line breaks are inserted by the \
+ * formatter, for example the list of ports in a module header. */ \
+ TYPE(VerilogMultiLineListLParen) \
+ /* for the base in a number literal, not including the quote */ \
+ TYPE(VerilogNumberBase) \
+ /* like `(strong1, pull0)` */ \
+ TYPE(VerilogStrength) \
+ /* Things inside the table in user-defined primitives. */ \
+ TYPE(VerilogTableItem) \
+ /* those that separate ports of different types */ \
+ TYPE(VerilogTypeComma) \
TYPE(Unknown)
/// Determines the semantic type of a syntactic token, e.g. whether "<" is a
@@ -210,12 +276,15 @@ class AnnotatedLine;
struct FormatToken {
FormatToken()
: HasUnescapedNewline(false), IsMultiline(false), IsFirst(false),
- MustBreakBefore(false), IsUnterminatedLiteral(false),
- CanBreakBefore(false), ClosesTemplateDeclaration(false),
- StartsBinaryExpression(false), EndsBinaryExpression(false),
- PartOfMultiVariableDeclStmt(false), ContinuesLineCommentSection(false),
- Finalized(false), BlockKind(BK_Unknown), Decision(FD_Unformatted),
- PackingKind(PPK_Inconclusive), Type(TT_Unknown) {}
+ MustBreakBefore(false), MustBreakBeforeFinalized(false),
+ IsUnterminatedLiteral(false), CanBreakBefore(false),
+ ClosesTemplateDeclaration(false), StartsBinaryExpression(false),
+ EndsBinaryExpression(false), PartOfMultiVariableDeclStmt(false),
+ ContinuesLineCommentSection(false), Finalized(false),
+ ClosesRequiresClause(false), EndsCppAttributeGroup(false),
+ BlockKind(BK_Unknown), Decision(FD_Unformatted),
+ PackingKind(PPK_Inconclusive), TypeIsFinalized(false),
+ Type(TT_Unknown) {}
/// The \c Token.
Token Tok;
@@ -251,6 +320,10 @@ struct FormatToken {
/// before the token.
unsigned MustBreakBefore : 1;
+ /// Whether MustBreakBefore is finalized during parsing and must not
+ /// be reset between runs.
+ unsigned MustBreakBeforeFinalized : 1;
+
/// Set to \c true if this token is an unterminated literal.
unsigned IsUnterminatedLiteral : 1;
@@ -281,6 +354,12 @@ struct FormatToken {
/// changes.
unsigned Finalized : 1;
+ /// \c true if this is the last token within requires clause.
+ unsigned ClosesRequiresClause : 1;
+
+ /// \c true if this token ends a group of C++ attributes.
+ unsigned EndsCppAttributeGroup : 1;
+
private:
/// Contains the kind of block if this token is a brace.
unsigned BlockKind : 2;
@@ -321,13 +400,43 @@ public:
}
private:
+ unsigned TypeIsFinalized : 1;
TokenType Type;
public:
/// Returns the token's type, e.g. whether "<" is a template opener or
/// binary operator.
TokenType getType() const { return Type; }
- void setType(TokenType T) { Type = T; }
+ void setType(TokenType T) {
+ // If this token is a macro argument while formatting an unexpanded macro
+ // call, we do not change its type any more - the type was deduced from
+ // formatting the expanded macro stream already.
+ if (MacroCtx && MacroCtx->Role == MR_UnexpandedArg)
+ return;
+ assert((!TypeIsFinalized || T == Type) &&
+ "Please use overwriteFixedType to change a fixed type.");
+ Type = T;
+ }
+ /// Sets the type and also the finalized flag. This prevents the type to be
+ /// reset in TokenAnnotator::resetTokenMetadata(). If the type needs to be set
+ /// to another one please use overwriteFixedType, or even better remove the
+ /// need to reassign the type.
+ void setFinalizedType(TokenType T) {
+ if (MacroCtx && MacroCtx->Role == MR_UnexpandedArg)
+ return;
+ Type = T;
+ TypeIsFinalized = true;
+ }
+ void overwriteFixedType(TokenType T) {
+ if (MacroCtx && MacroCtx->Role == MR_UnexpandedArg)
+ return;
+ TypeIsFinalized = false;
+ setType(T);
+ }
+ bool isTypeFinalized() const { return TypeIsFinalized; }
+
+ /// Used to set an operator precedence explicitly.
+ prec::Level ForcedPrecedence = prec::Unknown;
/// The number of newlines immediately before the \c Token.
///
@@ -335,6 +444,12 @@ public:
/// and thereby e.g. leave an empty line between two function definitions.
unsigned NewlinesBefore = 0;
+ /// The number of newlines immediately before the \c Token after formatting.
+ ///
+ /// This is used to avoid overlapping whitespace replacements when \c Newlines
+ /// is recomputed for a finalized preprocessor branching directive.
+ int Newlines = -1;
+
/// The offset just past the last '\n' in this token's leading
/// whitespace (relative to \c WhiteSpaceStart). 0 if there is no '\n'.
unsigned LastNewlineOffset = 0;
@@ -441,13 +556,31 @@ public:
/// This starts an array initializer.
bool IsArrayInitializer = false;
+ /// Is optional and can be removed.
+ bool Optional = false;
+
+ /// Number of optional braces to be inserted after this token:
+ /// -1: a single left brace
+ /// 0: no braces
+ /// >0: number of right braces
+ int8_t BraceCount = 0;
+
/// If this token starts a block, this contains all the unwrapped lines
/// in it.
SmallVector<AnnotatedLine *, 1> Children;
// Contains all attributes related to how this token takes part
// in a configured macro expansion.
- llvm::Optional<MacroExpansion> MacroCtx;
+ std::optional<MacroExpansion> MacroCtx;
+
+ /// When macro expansion introduces nodes with children, those are marked as
+ /// \c MacroParent.
+ /// FIXME: The formatting code currently hard-codes the assumption that
+ /// child nodes are introduced by blocks following an opening brace.
+ /// This is deeply baked into the code and disentangling this will require
+ /// signficant refactorings. \c MacroParent allows us to special-case the
+ /// cases in which we treat parents as block-openers for now.
+ bool MacroParent = false;
bool is(tok::TokenKind Kind) const { return Tok.is(Kind); }
bool is(TokenType TT) const { return getType() == TT; }
@@ -503,24 +636,35 @@ public:
bool isStringLiteral() const { return tok::isStringLiteral(Tok.getKind()); }
+ bool isAttribute() const {
+ return isOneOf(tok::kw___attribute, tok::kw___declspec, TT_AttributeMacro);
+ }
+
bool isObjCAtKeyword(tok::ObjCKeywordKind Kind) const {
return Tok.isObjCAtKeyword(Kind);
}
bool isAccessSpecifier(bool ColonRequired = true) const {
- return isOneOf(tok::kw_public, tok::kw_protected, tok::kw_private) &&
- (!ColonRequired || (Next && Next->is(tok::colon)));
+ if (!isOneOf(tok::kw_public, tok::kw_protected, tok::kw_private))
+ return false;
+ if (!ColonRequired)
+ return true;
+ const auto NextNonComment = getNextNonComment();
+ return NextNonComment && NextNonComment->is(tok::colon);
}
bool canBePointerOrReferenceQualifier() const {
return isOneOf(tok::kw_const, tok::kw_restrict, tok::kw_volatile,
- tok::kw___attribute, tok::kw__Nonnull, tok::kw__Nullable,
+ tok::kw__Nonnull, tok::kw__Nullable,
tok::kw__Null_unspecified, tok::kw___ptr32, tok::kw___ptr64,
- TT_AttributeMacro);
+ tok::kw___funcref) ||
+ isAttribute();
}
/// Determine whether the token is a simple-type-specifier.
- bool isSimpleTypeSpecifier() const;
+ [[nodiscard]] bool isSimpleTypeSpecifier() const;
+
+ [[nodiscard]] bool isTypeOrIdentifier() const;
bool isObjCAccessSpecifier() const {
return is(tok::at) && Next &&
@@ -533,7 +677,7 @@ public:
/// Returns whether \p Tok is ([{ or an opening < of a template or in
/// protos.
bool opensScope() const {
- if (is(TT_TemplateString) && TokenText.endswith("${"))
+ if (is(TT_TemplateString) && TokenText.ends_with("${"))
return true;
if (is(TT_DictLiteral) && is(tok::less))
return true;
@@ -543,7 +687,7 @@ public:
/// Returns whether \p Tok is )]} or a closing > of a template or in
/// protos.
bool closesScope() const {
- if (is(TT_TemplateString) && TokenText.startswith("}"))
+ if (is(TT_TemplateString) && TokenText.starts_with("}"))
return true;
if (is(TT_DictLiteral) && is(tok::greater))
return true;
@@ -555,7 +699,11 @@ public:
bool isMemberAccess() const {
return isOneOf(tok::arrow, tok::period, tok::arrowstar) &&
!isOneOf(TT_DesignatedInitializerPeriod, TT_TrailingReturnArrow,
- TT_LambdaArrow, TT_LeadingJavaAnnotation);
+ TT_LeadingJavaAnnotation);
+ }
+
+ bool isPointerOrReference() const {
+ return isOneOf(tok::star, tok::amp, tok::ampamp);
}
bool isUnaryOperator() const {
@@ -587,35 +735,27 @@ public:
/// Returns \c true if this is a keyword that can be used
/// like a function call (e.g. sizeof, typeid, ...).
bool isFunctionLikeKeyword() const {
- switch (Tok.getKind()) {
- case tok::kw_throw:
- case tok::kw_typeid:
- case tok::kw_return:
- case tok::kw_sizeof:
- case tok::kw_alignof:
- case tok::kw_alignas:
- case tok::kw_decltype:
- case tok::kw_noexcept:
- case tok::kw_static_assert:
- case tok::kw__Atomic:
- case tok::kw___attribute:
- case tok::kw___underlying_type:
- case tok::kw_requires:
+ if (isAttribute())
return true;
- default:
- return false;
- }
+
+ return isOneOf(tok::kw_throw, tok::kw_typeid, tok::kw_return,
+ tok::kw_sizeof, tok::kw_alignof, tok::kw_alignas,
+ tok::kw_decltype, tok::kw_noexcept, tok::kw_static_assert,
+ tok::kw__Atomic,
+#define TRANSFORM_TYPE_TRAIT_DEF(_, Trait) tok::kw___##Trait,
+#include "clang/Basic/TransformTypeTraits.def"
+ tok::kw_requires);
}
/// Returns \c true if this is a string literal that's like a label,
/// e.g. ends with "=" or ":".
bool isLabelString() const {
- if (!is(tok::string_literal))
+ if (isNot(tok::string_literal))
return false;
StringRef Content = TokenText;
- if (Content.startswith("\"") || Content.startswith("'"))
+ if (Content.starts_with("\"") || Content.starts_with("'"))
Content = Content.drop_front(1);
- if (Content.endswith("\"") || Content.endswith("'"))
+ if (Content.ends_with("\"") || Content.ends_with("'"))
Content = Content.drop_back(1);
Content = Content.trim();
return Content.size() > 1 &&
@@ -631,13 +771,21 @@ public:
return WhitespaceRange.getEnd();
}
+ /// Returns \c true if the range of whitespace immediately preceding the \c
+ /// Token is not empty.
+ bool hasWhitespaceBefore() const {
+ return WhitespaceRange.getBegin() != WhitespaceRange.getEnd();
+ }
+
prec::Level getPrecedence() const {
+ if (ForcedPrecedence != prec::Unknown)
+ return ForcedPrecedence;
return getBinOpPrecedence(Tok.getKind(), /*GreaterThanIsOperator=*/true,
/*CPlusPlus11=*/true);
}
/// Returns the previous token ignoring comments.
- FormatToken *getPreviousNonComment() const {
+ [[nodiscard]] FormatToken *getPreviousNonComment() const {
FormatToken *Tok = Previous;
while (Tok && Tok->is(tok::comment))
Tok = Tok->Previous;
@@ -645,28 +793,19 @@ public:
}
/// Returns the next token ignoring comments.
- const FormatToken *getNextNonComment() const {
- const FormatToken *Tok = Next;
+ [[nodiscard]] FormatToken *getNextNonComment() const {
+ FormatToken *Tok = Next;
while (Tok && Tok->is(tok::comment))
Tok = Tok->Next;
return Tok;
}
+ /// Returns \c true if this token ends a block indented initializer list.
+ [[nodiscard]] bool isBlockIndentedInitRBrace(const FormatStyle &Style) const;
+
/// Returns \c true if this tokens starts a block-type list, i.e. a
/// list that should be indented with a block indent.
- bool opensBlockOrBlockTypeList(const FormatStyle &Style) const {
- // C# Does not indent object initialisers as continuations.
- if (is(tok::l_brace) && getBlockKind() == BK_BracedInit && Style.isCSharp())
- return true;
- if (is(TT_TemplateString) && opensScope())
- return true;
- return is(TT_ArrayInitializerLSquare) || is(TT_ProtoExtensionLSquare) ||
- (is(tok::l_brace) &&
- (getBlockKind() == BK_Block || is(TT_DictLiteral) ||
- (!Style.Cpp11BracedListStyle && NestingLevel == 0))) ||
- (is(tok::less) && (Style.Language == FormatStyle::LK_Proto ||
- Style.Language == FormatStyle::LK_TextProto));
- }
+ [[nodiscard]] bool opensBlockOrBlockTypeList(const FormatStyle &Style) const;
/// Returns whether the token is the left square bracket of a C++
/// structured binding declaration.
@@ -849,6 +988,7 @@ struct AdditionalKeywords {
kw_CF_OPTIONS = &IdentTable.get("CF_OPTIONS");
kw_NS_CLOSED_ENUM = &IdentTable.get("NS_CLOSED_ENUM");
kw_NS_ENUM = &IdentTable.get("NS_ENUM");
+ kw_NS_ERROR_ENUM = &IdentTable.get("NS_ERROR_ENUM");
kw_NS_OPTIONS = &IdentTable.get("NS_OPTIONS");
kw_as = &IdentTable.get("as");
@@ -886,6 +1026,7 @@ struct AdditionalKeywords {
kw___has_include_next = &IdentTable.get("__has_include_next");
kw_mark = &IdentTable.get("mark");
+ kw_region = &IdentTable.get("region");
kw_extend = &IdentTable.get("extend");
kw_option = &IdentTable.get("option");
@@ -899,6 +1040,10 @@ struct AdditionalKeywords {
kw_slots = &IdentTable.get("slots");
kw_qslots = &IdentTable.get("Q_SLOTS");
+ // For internal clang-format use.
+ kw_internal_ident_after_define =
+ &IdentTable.get("__CLANG_FORMAT_INTERNAL_IDENT_AFTER_DEFINE__");
+
// C# keywords
kw_dollar = &IdentTable.get("dollar");
kw_base = &IdentTable.get("base");
@@ -909,6 +1054,7 @@ struct AdditionalKeywords {
kw_event = &IdentTable.get("event");
kw_fixed = &IdentTable.get("fixed");
kw_foreach = &IdentTable.get("foreach");
+ kw_init = &IdentTable.get("init");
kw_implicit = &IdentTable.get("implicit");
kw_internal = &IdentTable.get("internal");
kw_lock = &IdentTable.get("lock");
@@ -929,29 +1075,271 @@ struct AdditionalKeywords {
kw_when = &IdentTable.get("when");
kw_where = &IdentTable.get("where");
+ // Verilog keywords
+ kw_always = &IdentTable.get("always");
+ kw_always_comb = &IdentTable.get("always_comb");
+ kw_always_ff = &IdentTable.get("always_ff");
+ kw_always_latch = &IdentTable.get("always_latch");
+ kw_assign = &IdentTable.get("assign");
+ kw_assume = &IdentTable.get("assume");
+ kw_automatic = &IdentTable.get("automatic");
+ kw_before = &IdentTable.get("before");
+ kw_begin = &IdentTable.get("begin");
+ kw_begin_keywords = &IdentTable.get("begin_keywords");
+ kw_bins = &IdentTable.get("bins");
+ kw_binsof = &IdentTable.get("binsof");
+ kw_casex = &IdentTable.get("casex");
+ kw_casez = &IdentTable.get("casez");
+ kw_celldefine = &IdentTable.get("celldefine");
+ kw_checker = &IdentTable.get("checker");
+ kw_clocking = &IdentTable.get("clocking");
+ kw_constraint = &IdentTable.get("constraint");
+ kw_cover = &IdentTable.get("cover");
+ kw_covergroup = &IdentTable.get("covergroup");
+ kw_coverpoint = &IdentTable.get("coverpoint");
+ kw_default_decay_time = &IdentTable.get("default_decay_time");
+ kw_default_nettype = &IdentTable.get("default_nettype");
+ kw_default_trireg_strength = &IdentTable.get("default_trireg_strength");
+ kw_delay_mode_distributed = &IdentTable.get("delay_mode_distributed");
+ kw_delay_mode_path = &IdentTable.get("delay_mode_path");
+ kw_delay_mode_unit = &IdentTable.get("delay_mode_unit");
+ kw_delay_mode_zero = &IdentTable.get("delay_mode_zero");
+ kw_disable = &IdentTable.get("disable");
+ kw_dist = &IdentTable.get("dist");
+ kw_edge = &IdentTable.get("edge");
+ kw_elsif = &IdentTable.get("elsif");
+ kw_end = &IdentTable.get("end");
+ kw_end_keywords = &IdentTable.get("end_keywords");
+ kw_endcase = &IdentTable.get("endcase");
+ kw_endcelldefine = &IdentTable.get("endcelldefine");
+ kw_endchecker = &IdentTable.get("endchecker");
+ kw_endclass = &IdentTable.get("endclass");
+ kw_endclocking = &IdentTable.get("endclocking");
+ kw_endfunction = &IdentTable.get("endfunction");
+ kw_endgenerate = &IdentTable.get("endgenerate");
+ kw_endgroup = &IdentTable.get("endgroup");
+ kw_endinterface = &IdentTable.get("endinterface");
+ kw_endmodule = &IdentTable.get("endmodule");
+ kw_endpackage = &IdentTable.get("endpackage");
+ kw_endprimitive = &IdentTable.get("endprimitive");
+ kw_endprogram = &IdentTable.get("endprogram");
+ kw_endproperty = &IdentTable.get("endproperty");
+ kw_endsequence = &IdentTable.get("endsequence");
+ kw_endspecify = &IdentTable.get("endspecify");
+ kw_endtable = &IdentTable.get("endtable");
+ kw_endtask = &IdentTable.get("endtask");
+ kw_forever = &IdentTable.get("forever");
+ kw_fork = &IdentTable.get("fork");
+ kw_generate = &IdentTable.get("generate");
+ kw_highz0 = &IdentTable.get("highz0");
+ kw_highz1 = &IdentTable.get("highz1");
+ kw_iff = &IdentTable.get("iff");
+ kw_ifnone = &IdentTable.get("ifnone");
+ kw_ignore_bins = &IdentTable.get("ignore_bins");
+ kw_illegal_bins = &IdentTable.get("illegal_bins");
+ kw_initial = &IdentTable.get("initial");
+ kw_inout = &IdentTable.get("inout");
+ kw_input = &IdentTable.get("input");
+ kw_inside = &IdentTable.get("inside");
+ kw_interconnect = &IdentTable.get("interconnect");
+ kw_intersect = &IdentTable.get("intersect");
+ kw_join = &IdentTable.get("join");
+ kw_join_any = &IdentTable.get("join_any");
+ kw_join_none = &IdentTable.get("join_none");
+ kw_large = &IdentTable.get("large");
+ kw_local = &IdentTable.get("local");
+ kw_localparam = &IdentTable.get("localparam");
+ kw_macromodule = &IdentTable.get("macromodule");
+ kw_matches = &IdentTable.get("matches");
+ kw_medium = &IdentTable.get("medium");
+ kw_negedge = &IdentTable.get("negedge");
+ kw_nounconnected_drive = &IdentTable.get("nounconnected_drive");
+ kw_output = &IdentTable.get("output");
+ kw_packed = &IdentTable.get("packed");
+ kw_parameter = &IdentTable.get("parameter");
+ kw_posedge = &IdentTable.get("posedge");
+ kw_primitive = &IdentTable.get("primitive");
+ kw_priority = &IdentTable.get("priority");
+ kw_program = &IdentTable.get("program");
+ kw_property = &IdentTable.get("property");
+ kw_pull0 = &IdentTable.get("pull0");
+ kw_pull1 = &IdentTable.get("pull1");
+ kw_pure = &IdentTable.get("pure");
+ kw_rand = &IdentTable.get("rand");
+ kw_randc = &IdentTable.get("randc");
+ kw_randcase = &IdentTable.get("randcase");
+ kw_randsequence = &IdentTable.get("randsequence");
+ kw_repeat = &IdentTable.get("repeat");
+ kw_resetall = &IdentTable.get("resetall");
+ kw_sample = &IdentTable.get("sample");
+ kw_scalared = &IdentTable.get("scalared");
+ kw_sequence = &IdentTable.get("sequence");
+ kw_small = &IdentTable.get("small");
+ kw_soft = &IdentTable.get("soft");
+ kw_solve = &IdentTable.get("solve");
+ kw_specify = &IdentTable.get("specify");
+ kw_specparam = &IdentTable.get("specparam");
+ kw_strong0 = &IdentTable.get("strong0");
+ kw_strong1 = &IdentTable.get("strong1");
+ kw_supply0 = &IdentTable.get("supply0");
+ kw_supply1 = &IdentTable.get("supply1");
+ kw_table = &IdentTable.get("table");
+ kw_tagged = &IdentTable.get("tagged");
+ kw_task = &IdentTable.get("task");
+ kw_timescale = &IdentTable.get("timescale");
+ kw_tri = &IdentTable.get("tri");
+ kw_tri0 = &IdentTable.get("tri0");
+ kw_tri1 = &IdentTable.get("tri1");
+ kw_triand = &IdentTable.get("triand");
+ kw_trior = &IdentTable.get("trior");
+ kw_trireg = &IdentTable.get("trireg");
+ kw_unconnected_drive = &IdentTable.get("unconnected_drive");
+ kw_undefineall = &IdentTable.get("undefineall");
+ kw_unique = &IdentTable.get("unique");
+ kw_unique0 = &IdentTable.get("unique0");
+ kw_uwire = &IdentTable.get("uwire");
+ kw_vectored = &IdentTable.get("vectored");
+ kw_wand = &IdentTable.get("wand");
+ kw_weak0 = &IdentTable.get("weak0");
+ kw_weak1 = &IdentTable.get("weak1");
+ kw_wildcard = &IdentTable.get("wildcard");
+ kw_wire = &IdentTable.get("wire");
+ kw_with = &IdentTable.get("with");
+ kw_wor = &IdentTable.get("wor");
+
+ // Symbols that are treated as keywords.
+ kw_verilogHash = &IdentTable.get("#");
+ kw_verilogHashHash = &IdentTable.get("##");
+ kw_apostrophe = &IdentTable.get("\'");
+
+ // TableGen keywords
+ kw_bit = &IdentTable.get("bit");
+ kw_bits = &IdentTable.get("bits");
+ kw_code = &IdentTable.get("code");
+ kw_dag = &IdentTable.get("dag");
+ kw_def = &IdentTable.get("def");
+ kw_defm = &IdentTable.get("defm");
+ kw_defset = &IdentTable.get("defset");
+ kw_defvar = &IdentTable.get("defvar");
+ kw_dump = &IdentTable.get("dump");
+ kw_include = &IdentTable.get("include");
+ kw_list = &IdentTable.get("list");
+ kw_multiclass = &IdentTable.get("multiclass");
+ kw_then = &IdentTable.get("then");
+
// Keep this at the end of the constructor to make sure everything here
// is
// already initialized.
JsExtraKeywords = std::unordered_set<IdentifierInfo *>(
{kw_as, kw_async, kw_await, kw_declare, kw_finally, kw_from,
- kw_function, kw_get, kw_import, kw_is, kw_let, kw_module, kw_readonly,
- kw_set, kw_type, kw_typeof, kw_var, kw_yield,
+ kw_function, kw_get, kw_import, kw_is, kw_let, kw_module, kw_override,
+ kw_readonly, kw_set, kw_type, kw_typeof, kw_var, kw_yield,
// Keywords from the Java section.
kw_abstract, kw_extends, kw_implements, kw_instanceof, kw_interface});
CSharpExtraKeywords = std::unordered_set<IdentifierInfo *>(
{kw_base, kw_byte, kw_checked, kw_decimal, kw_delegate, kw_event,
- kw_fixed, kw_foreach, kw_implicit, kw_in, kw_interface, kw_internal,
- kw_is, kw_lock, kw_null, kw_object, kw_out, kw_override, kw_params,
- kw_readonly, kw_ref, kw_string, kw_stackalloc, kw_sbyte, kw_sealed,
- kw_uint, kw_ulong, kw_unchecked, kw_unsafe, kw_ushort, kw_when,
- kw_where,
+ kw_fixed, kw_foreach, kw_implicit, kw_in, kw_init, kw_interface,
+ kw_internal, kw_is, kw_lock, kw_null, kw_object, kw_out, kw_override,
+ kw_params, kw_readonly, kw_ref, kw_string, kw_stackalloc, kw_sbyte,
+ kw_sealed, kw_uint, kw_ulong, kw_unchecked, kw_unsafe, kw_ushort,
+ kw_when, kw_where,
// Keywords from the JavaScript section.
kw_as, kw_async, kw_await, kw_declare, kw_finally, kw_from,
kw_function, kw_get, kw_import, kw_is, kw_let, kw_module, kw_readonly,
kw_set, kw_type, kw_typeof, kw_var, kw_yield,
// Keywords from the Java section.
kw_abstract, kw_extends, kw_implements, kw_instanceof, kw_interface});
+
+ // Some keywords are not included here because they don't need special
+ // treatment like `showcancelled` or they should be treated as identifiers
+ // like `int` and `logic`.
+ VerilogExtraKeywords = std::unordered_set<IdentifierInfo *>(
+ {kw_always, kw_always_comb,
+ kw_always_ff, kw_always_latch,
+ kw_assert, kw_assign,
+ kw_assume, kw_automatic,
+ kw_before, kw_begin,
+ kw_bins, kw_binsof,
+ kw_casex, kw_casez,
+ kw_celldefine, kw_checker,
+ kw_clocking, kw_constraint,
+ kw_cover, kw_covergroup,
+ kw_coverpoint, kw_disable,
+ kw_dist, kw_edge,
+ kw_end, kw_endcase,
+ kw_endchecker, kw_endclass,
+ kw_endclocking, kw_endfunction,
+ kw_endgenerate, kw_endgroup,
+ kw_endinterface, kw_endmodule,
+ kw_endpackage, kw_endprimitive,
+ kw_endprogram, kw_endproperty,
+ kw_endsequence, kw_endspecify,
+ kw_endtable, kw_endtask,
+ kw_extends, kw_final,
+ kw_foreach, kw_forever,
+ kw_fork, kw_function,
+ kw_generate, kw_highz0,
+ kw_highz1, kw_iff,
+ kw_ifnone, kw_ignore_bins,
+ kw_illegal_bins, kw_implements,
+ kw_import, kw_initial,
+ kw_inout, kw_input,
+ kw_inside, kw_interconnect,
+ kw_interface, kw_intersect,
+ kw_join, kw_join_any,
+ kw_join_none, kw_large,
+ kw_let, kw_local,
+ kw_localparam, kw_macromodule,
+ kw_matches, kw_medium,
+ kw_negedge, kw_output,
+ kw_package, kw_packed,
+ kw_parameter, kw_posedge,
+ kw_primitive, kw_priority,
+ kw_program, kw_property,
+ kw_pull0, kw_pull1,
+ kw_pure, kw_rand,
+ kw_randc, kw_randcase,
+ kw_randsequence, kw_ref,
+ kw_repeat, kw_sample,
+ kw_scalared, kw_sequence,
+ kw_small, kw_soft,
+ kw_solve, kw_specify,
+ kw_specparam, kw_strong0,
+ kw_strong1, kw_supply0,
+ kw_supply1, kw_table,
+ kw_tagged, kw_task,
+ kw_tri, kw_tri0,
+ kw_tri1, kw_triand,
+ kw_trior, kw_trireg,
+ kw_unique, kw_unique0,
+ kw_uwire, kw_var,
+ kw_vectored, kw_wand,
+ kw_weak0, kw_weak1,
+ kw_wildcard, kw_wire,
+ kw_with, kw_wor,
+ kw_verilogHash, kw_verilogHashHash});
+
+ TableGenExtraKeywords = std::unordered_set<IdentifierInfo *>({
+ kw_assert,
+ kw_bit,
+ kw_bits,
+ kw_code,
+ kw_dag,
+ kw_def,
+ kw_defm,
+ kw_defset,
+ kw_defvar,
+ kw_dump,
+ kw_foreach,
+ kw_in,
+ kw_include,
+ kw_let,
+ kw_list,
+ kw_multiclass,
+ kw_string,
+ kw_then,
+ });
}
// Context sensitive keywords.
@@ -964,6 +1352,7 @@ struct AdditionalKeywords {
IdentifierInfo *kw_CF_OPTIONS;
IdentifierInfo *kw_NS_CLOSED_ENUM;
IdentifierInfo *kw_NS_ENUM;
+ IdentifierInfo *kw_NS_ERROR_ENUM;
IdentifierInfo *kw_NS_OPTIONS;
IdentifierInfo *kw___except;
IdentifierInfo *kw___has_include;
@@ -1004,6 +1393,7 @@ struct AdditionalKeywords {
// Pragma keywords.
IdentifierInfo *kw_mark;
+ IdentifierInfo *kw_region;
// Proto keywords.
IdentifierInfo *kw_extend;
@@ -1019,6 +1409,9 @@ struct AdditionalKeywords {
IdentifierInfo *kw_slots;
IdentifierInfo *kw_qslots;
+ // For internal use by clang-format.
+ IdentifierInfo *kw_internal_ident_after_define;
+
// C# keywords
IdentifierInfo *kw_dollar;
IdentifierInfo *kw_base;
@@ -1030,6 +1423,7 @@ struct AdditionalKeywords {
IdentifierInfo *kw_fixed;
IdentifierInfo *kw_foreach;
IdentifierInfo *kw_implicit;
+ IdentifierInfo *kw_init;
IdentifierInfo *kw_internal;
IdentifierInfo *kw_lock;
@@ -1052,6 +1446,167 @@ struct AdditionalKeywords {
IdentifierInfo *kw_when;
IdentifierInfo *kw_where;
+ // Verilog keywords
+ IdentifierInfo *kw_always;
+ IdentifierInfo *kw_always_comb;
+ IdentifierInfo *kw_always_ff;
+ IdentifierInfo *kw_always_latch;
+ IdentifierInfo *kw_assign;
+ IdentifierInfo *kw_assume;
+ IdentifierInfo *kw_automatic;
+ IdentifierInfo *kw_before;
+ IdentifierInfo *kw_begin;
+ IdentifierInfo *kw_begin_keywords;
+ IdentifierInfo *kw_bins;
+ IdentifierInfo *kw_binsof;
+ IdentifierInfo *kw_casex;
+ IdentifierInfo *kw_casez;
+ IdentifierInfo *kw_celldefine;
+ IdentifierInfo *kw_checker;
+ IdentifierInfo *kw_clocking;
+ IdentifierInfo *kw_constraint;
+ IdentifierInfo *kw_cover;
+ IdentifierInfo *kw_covergroup;
+ IdentifierInfo *kw_coverpoint;
+ IdentifierInfo *kw_default_decay_time;
+ IdentifierInfo *kw_default_nettype;
+ IdentifierInfo *kw_default_trireg_strength;
+ IdentifierInfo *kw_delay_mode_distributed;
+ IdentifierInfo *kw_delay_mode_path;
+ IdentifierInfo *kw_delay_mode_unit;
+ IdentifierInfo *kw_delay_mode_zero;
+ IdentifierInfo *kw_disable;
+ IdentifierInfo *kw_dist;
+ IdentifierInfo *kw_elsif;
+ IdentifierInfo *kw_edge;
+ IdentifierInfo *kw_end;
+ IdentifierInfo *kw_end_keywords;
+ IdentifierInfo *kw_endcase;
+ IdentifierInfo *kw_endcelldefine;
+ IdentifierInfo *kw_endchecker;
+ IdentifierInfo *kw_endclass;
+ IdentifierInfo *kw_endclocking;
+ IdentifierInfo *kw_endfunction;
+ IdentifierInfo *kw_endgenerate;
+ IdentifierInfo *kw_endgroup;
+ IdentifierInfo *kw_endinterface;
+ IdentifierInfo *kw_endmodule;
+ IdentifierInfo *kw_endpackage;
+ IdentifierInfo *kw_endprimitive;
+ IdentifierInfo *kw_endprogram;
+ IdentifierInfo *kw_endproperty;
+ IdentifierInfo *kw_endsequence;
+ IdentifierInfo *kw_endspecify;
+ IdentifierInfo *kw_endtable;
+ IdentifierInfo *kw_endtask;
+ IdentifierInfo *kw_forever;
+ IdentifierInfo *kw_fork;
+ IdentifierInfo *kw_generate;
+ IdentifierInfo *kw_highz0;
+ IdentifierInfo *kw_highz1;
+ IdentifierInfo *kw_iff;
+ IdentifierInfo *kw_ifnone;
+ IdentifierInfo *kw_ignore_bins;
+ IdentifierInfo *kw_illegal_bins;
+ IdentifierInfo *kw_initial;
+ IdentifierInfo *kw_inout;
+ IdentifierInfo *kw_input;
+ IdentifierInfo *kw_inside;
+ IdentifierInfo *kw_interconnect;
+ IdentifierInfo *kw_intersect;
+ IdentifierInfo *kw_join;
+ IdentifierInfo *kw_join_any;
+ IdentifierInfo *kw_join_none;
+ IdentifierInfo *kw_large;
+ IdentifierInfo *kw_local;
+ IdentifierInfo *kw_localparam;
+ IdentifierInfo *kw_macromodule;
+ IdentifierInfo *kw_matches;
+ IdentifierInfo *kw_medium;
+ IdentifierInfo *kw_negedge;
+ IdentifierInfo *kw_nounconnected_drive;
+ IdentifierInfo *kw_output;
+ IdentifierInfo *kw_packed;
+ IdentifierInfo *kw_parameter;
+ IdentifierInfo *kw_posedge;
+ IdentifierInfo *kw_primitive;
+ IdentifierInfo *kw_priority;
+ IdentifierInfo *kw_program;
+ IdentifierInfo *kw_property;
+ IdentifierInfo *kw_pull0;
+ IdentifierInfo *kw_pull1;
+ IdentifierInfo *kw_pure;
+ IdentifierInfo *kw_rand;
+ IdentifierInfo *kw_randc;
+ IdentifierInfo *kw_randcase;
+ IdentifierInfo *kw_randsequence;
+ IdentifierInfo *kw_repeat;
+ IdentifierInfo *kw_resetall;
+ IdentifierInfo *kw_sample;
+ IdentifierInfo *kw_scalared;
+ IdentifierInfo *kw_sequence;
+ IdentifierInfo *kw_small;
+ IdentifierInfo *kw_soft;
+ IdentifierInfo *kw_solve;
+ IdentifierInfo *kw_specify;
+ IdentifierInfo *kw_specparam;
+ IdentifierInfo *kw_strong0;
+ IdentifierInfo *kw_strong1;
+ IdentifierInfo *kw_supply0;
+ IdentifierInfo *kw_supply1;
+ IdentifierInfo *kw_table;
+ IdentifierInfo *kw_tagged;
+ IdentifierInfo *kw_task;
+ IdentifierInfo *kw_timescale;
+ IdentifierInfo *kw_tri0;
+ IdentifierInfo *kw_tri1;
+ IdentifierInfo *kw_tri;
+ IdentifierInfo *kw_triand;
+ IdentifierInfo *kw_trior;
+ IdentifierInfo *kw_trireg;
+ IdentifierInfo *kw_unconnected_drive;
+ IdentifierInfo *kw_undefineall;
+ IdentifierInfo *kw_unique;
+ IdentifierInfo *kw_unique0;
+ IdentifierInfo *kw_uwire;
+ IdentifierInfo *kw_vectored;
+ IdentifierInfo *kw_wand;
+ IdentifierInfo *kw_weak0;
+ IdentifierInfo *kw_weak1;
+ IdentifierInfo *kw_wildcard;
+ IdentifierInfo *kw_wire;
+ IdentifierInfo *kw_with;
+ IdentifierInfo *kw_wor;
+
+ // Workaround for hashes and backticks in Verilog.
+ IdentifierInfo *kw_verilogHash;
+ IdentifierInfo *kw_verilogHashHash;
+
+ // Symbols in Verilog that don't exist in C++.
+ IdentifierInfo *kw_apostrophe;
+
+ // TableGen keywords
+ IdentifierInfo *kw_bit;
+ IdentifierInfo *kw_bits;
+ IdentifierInfo *kw_code;
+ IdentifierInfo *kw_dag;
+ IdentifierInfo *kw_def;
+ IdentifierInfo *kw_defm;
+ IdentifierInfo *kw_defset;
+ IdentifierInfo *kw_defvar;
+ IdentifierInfo *kw_dump;
+ IdentifierInfo *kw_include;
+ IdentifierInfo *kw_list;
+ IdentifierInfo *kw_multiclass;
+ IdentifierInfo *kw_then;
+
+ /// Returns \c true if \p Tok is a keyword or an identifier.
+ bool isWordLike(const FormatToken &Tok) const {
+ // getIdentifierinfo returns non-null for keywords as well as identifiers.
+ return Tok.Tok.getIdentifierInfo() &&
+ !Tok.isOneOf(kw_verilogHash, kw_verilogHashHash, kw_apostrophe);
+ }
+
/// Returns \c true if \p Tok is a true JavaScript identifier, returns
/// \c false if it is a keyword or a pseudo keyword.
/// If \c AcceptIdentifierName is true, returns true not only for keywords,
@@ -1060,7 +1615,7 @@ struct AdditionalKeywords {
bool IsJavaScriptIdentifier(const FormatToken &Tok,
bool AcceptIdentifierName = true) const {
// Based on the list of JavaScript & TypeScript keywords here:
- // https://github.com/microsoft/TypeScript/blob/master/src/compiler/scanner.ts#L74
+ // https://github.com/microsoft/TypeScript/blob/main/src/compiler/scanner.ts#L74
switch (Tok.Tok.getKind()) {
case tok::kw_break:
case tok::kw_case:
@@ -1178,14 +1733,199 @@ struct AdditionalKeywords {
}
}
+ bool isVerilogWordOperator(const FormatToken &Tok) const {
+ return Tok.isOneOf(kw_before, kw_intersect, kw_dist, kw_iff, kw_inside,
+ kw_with);
+ }
+
+ bool isVerilogIdentifier(const FormatToken &Tok) const {
+ switch (Tok.Tok.getKind()) {
+ case tok::kw_case:
+ case tok::kw_class:
+ case tok::kw_const:
+ case tok::kw_continue:
+ case tok::kw_default:
+ case tok::kw_do:
+ case tok::kw_extern:
+ case tok::kw_else:
+ case tok::kw_enum:
+ case tok::kw_for:
+ case tok::kw_if:
+ case tok::kw_restrict:
+ case tok::kw_signed:
+ case tok::kw_static:
+ case tok::kw_struct:
+ case tok::kw_typedef:
+ case tok::kw_union:
+ case tok::kw_unsigned:
+ case tok::kw_virtual:
+ case tok::kw_while:
+ return false;
+ case tok::identifier:
+ return isWordLike(Tok) &&
+ VerilogExtraKeywords.find(Tok.Tok.getIdentifierInfo()) ==
+ VerilogExtraKeywords.end();
+ default:
+ // getIdentifierInfo returns non-null for both identifiers and keywords.
+ return Tok.Tok.getIdentifierInfo();
+ }
+ }
+
+ /// Returns whether \p Tok is a Verilog preprocessor directive. This is
+ /// needed because macro expansions start with a backtick as well and they
+ /// need to be treated differently.
+ bool isVerilogPPDirective(const FormatToken &Tok) const {
+ auto Info = Tok.Tok.getIdentifierInfo();
+ if (!Info)
+ return false;
+ switch (Info->getPPKeywordID()) {
+ case tok::pp_define:
+ case tok::pp_else:
+ case tok::pp_endif:
+ case tok::pp_ifdef:
+ case tok::pp_ifndef:
+ case tok::pp_include:
+ case tok::pp_line:
+ case tok::pp_pragma:
+ case tok::pp_undef:
+ return true;
+ default:
+ return Tok.isOneOf(kw_begin_keywords, kw_celldefine,
+ kw_default_decay_time, kw_default_nettype,
+ kw_default_trireg_strength, kw_delay_mode_distributed,
+ kw_delay_mode_path, kw_delay_mode_unit,
+ kw_delay_mode_zero, kw_elsif, kw_end_keywords,
+ kw_endcelldefine, kw_nounconnected_drive, kw_resetall,
+ kw_timescale, kw_unconnected_drive, kw_undefineall);
+ }
+ }
+
+ /// Returns whether \p Tok is a Verilog keyword that opens a block.
+ bool isVerilogBegin(const FormatToken &Tok) const {
+ // `table` is not included since it needs to be treated specially.
+ return !Tok.endsSequence(kw_fork, kw_disable) &&
+ Tok.isOneOf(kw_begin, kw_fork, kw_generate, kw_specify);
+ }
+
+ /// Returns whether \p Tok is a Verilog keyword that closes a block.
+ bool isVerilogEnd(const FormatToken &Tok) const {
+ return !Tok.endsSequence(kw_join, kw_rand) &&
+ Tok.isOneOf(TT_MacroBlockEnd, kw_end, kw_endcase, kw_endclass,
+ kw_endclocking, kw_endchecker, kw_endfunction,
+ kw_endgenerate, kw_endgroup, kw_endinterface,
+ kw_endmodule, kw_endpackage, kw_endprimitive,
+ kw_endprogram, kw_endproperty, kw_endsequence,
+ kw_endspecify, kw_endtable, kw_endtask, kw_join,
+ kw_join_any, kw_join_none);
+ }
+
+ /// Returns whether \p Tok is a Verilog keyword that opens a module, etc.
+ bool isVerilogHierarchy(const FormatToken &Tok) const {
+ if (Tok.endsSequence(kw_function, kw_with))
+ return false;
+ if (Tok.is(kw_property)) {
+ const FormatToken *Prev = Tok.getPreviousNonComment();
+ return !(Prev &&
+ Prev->isOneOf(tok::kw_restrict, kw_assert, kw_assume, kw_cover));
+ }
+ return Tok.isOneOf(tok::kw_case, tok::kw_class, kw_function, kw_module,
+ kw_interface, kw_package, kw_casex, kw_casez, kw_checker,
+ kw_clocking, kw_covergroup, kw_macromodule, kw_primitive,
+ kw_program, kw_property, kw_randcase, kw_randsequence,
+ kw_task);
+ }
+
+ bool isVerilogEndOfLabel(const FormatToken &Tok) const {
+ const FormatToken *Next = Tok.getNextNonComment();
+ // In Verilog the colon in a default label is optional.
+ return Tok.is(TT_CaseLabelColon) ||
+ (Tok.is(tok::kw_default) &&
+ !(Next && Next->isOneOf(tok::colon, tok::semi, kw_clocking, kw_iff,
+ kw_input, kw_output, kw_sequence)));
+ }
+
+ /// Returns whether \p Tok is a Verilog keyword that starts a
+ /// structured procedure like 'always'.
+ bool isVerilogStructuredProcedure(const FormatToken &Tok) const {
+ return Tok.isOneOf(kw_always, kw_always_comb, kw_always_ff, kw_always_latch,
+ kw_final, kw_forever, kw_initial);
+ }
+
+ bool isVerilogQualifier(const FormatToken &Tok) const {
+ switch (Tok.Tok.getKind()) {
+ case tok::kw_extern:
+ case tok::kw_signed:
+ case tok::kw_static:
+ case tok::kw_unsigned:
+ case tok::kw_virtual:
+ return true;
+ case tok::identifier:
+ return Tok.isOneOf(
+ kw_let, kw_var, kw_ref, kw_automatic, kw_bins, kw_coverpoint,
+ kw_ignore_bins, kw_illegal_bins, kw_inout, kw_input, kw_interconnect,
+ kw_local, kw_localparam, kw_output, kw_parameter, kw_pure, kw_rand,
+ kw_randc, kw_scalared, kw_specparam, kw_tri, kw_tri0, kw_tri1,
+ kw_triand, kw_trior, kw_trireg, kw_uwire, kw_vectored, kw_wand,
+ kw_wildcard, kw_wire, kw_wor);
+ default:
+ return false;
+ }
+ }
+
+ bool isTableGenDefinition(const FormatToken &Tok) const {
+ return Tok.isOneOf(kw_def, kw_defm, kw_defset, kw_defvar, kw_multiclass,
+ kw_let, tok::kw_class);
+ }
+
+ bool isTableGenKeyword(const FormatToken &Tok) const {
+ switch (Tok.Tok.getKind()) {
+ case tok::kw_class:
+ case tok::kw_else:
+ case tok::kw_false:
+ case tok::kw_if:
+ case tok::kw_int:
+ case tok::kw_true:
+ return true;
+ default:
+ return Tok.is(tok::identifier) &&
+ TableGenExtraKeywords.find(Tok.Tok.getIdentifierInfo()) !=
+ TableGenExtraKeywords.end();
+ }
+ }
+
private:
/// The JavaScript keywords beyond the C++ keyword set.
std::unordered_set<IdentifierInfo *> JsExtraKeywords;
/// The C# keywords beyond the C++ keyword set
std::unordered_set<IdentifierInfo *> CSharpExtraKeywords;
+
+ /// The Verilog keywords beyond the C++ keyword set.
+ std::unordered_set<IdentifierInfo *> VerilogExtraKeywords;
+
+ /// The TableGen keywords beyond the C++ keyword set.
+ std::unordered_set<IdentifierInfo *> TableGenExtraKeywords;
};
+inline bool isLineComment(const FormatToken &FormatTok) {
+ return FormatTok.is(tok::comment) && !FormatTok.TokenText.starts_with("/*");
+}
+
+// Checks if \p FormatTok is a line comment that continues the line comment
+// \p Previous. The original column of \p MinColumnToken is used to determine
+// whether \p FormatTok is indented enough to the right to continue \p Previous.
+inline bool continuesLineComment(const FormatToken &FormatTok,
+ const FormatToken *Previous,
+ const FormatToken *MinColumnToken) {
+ if (!Previous || !MinColumnToken)
+ return false;
+ unsigned MinContinueColumn =
+ MinColumnToken->OriginalColumn + (isLineComment(*MinColumnToken) ? 0 : 1);
+ return isLineComment(FormatTok) && FormatTok.NewlinesBefore == 1 &&
+ isLineComment(*Previous) &&
+ FormatTok.OriginalColumn >= MinContinueColumn;
+}
+
} // namespace format
} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Format/FormatTokenLexer.cpp b/contrib/llvm-project/clang/lib/Format/FormatTokenLexer.cpp
index a9cfb4a247f0..52a55ea23b5f 100644
--- a/contrib/llvm-project/clang/lib/Format/FormatTokenLexer.cpp
+++ b/contrib/llvm-project/clang/lib/Format/FormatTokenLexer.cpp
@@ -28,36 +28,52 @@ FormatTokenLexer::FormatTokenLexer(
llvm::SpecificBumpPtrAllocator<FormatToken> &Allocator,
IdentifierTable &IdentTable)
: FormatTok(nullptr), IsFirstToken(true), StateStack({LexerState::NORMAL}),
- Column(Column), TrailingWhitespace(0), SourceMgr(SourceMgr), ID(ID),
+ Column(Column), TrailingWhitespace(0),
+ LangOpts(getFormattingLangOpts(Style)), SourceMgr(SourceMgr), ID(ID),
Style(Style), IdentTable(IdentTable), Keywords(IdentTable),
Encoding(Encoding), Allocator(Allocator), FirstInLineIndex(0),
FormattingDisabled(false), MacroBlockBeginRegex(Style.MacroBlockBegin),
MacroBlockEndRegex(Style.MacroBlockEnd) {
- Lex.reset(new Lexer(ID, SourceMgr.getBufferOrFake(ID), SourceMgr,
- getFormattingLangOpts(Style)));
+ Lex.reset(new Lexer(ID, SourceMgr.getBufferOrFake(ID), SourceMgr, LangOpts));
Lex->SetKeepWhitespaceMode(true);
- for (const std::string &ForEachMacro : Style.ForEachMacros)
- Macros.insert({&IdentTable.get(ForEachMacro), TT_ForEachMacro});
- for (const std::string &IfMacro : Style.IfMacros)
- Macros.insert({&IdentTable.get(IfMacro), TT_IfMacro});
- for (const std::string &AttributeMacro : Style.AttributeMacros)
- Macros.insert({&IdentTable.get(AttributeMacro), TT_AttributeMacro});
- for (const std::string &StatementMacro : Style.StatementMacros)
- Macros.insert({&IdentTable.get(StatementMacro), TT_StatementMacro});
- for (const std::string &TypenameMacro : Style.TypenameMacros)
- Macros.insert({&IdentTable.get(TypenameMacro), TT_TypenameMacro});
- for (const std::string &NamespaceMacro : Style.NamespaceMacros)
- Macros.insert({&IdentTable.get(NamespaceMacro), TT_NamespaceMacro});
+ for (const std::string &ForEachMacro : Style.ForEachMacros) {
+ auto Identifier = &IdentTable.get(ForEachMacro);
+ Macros.insert({Identifier, TT_ForEachMacro});
+ }
+ for (const std::string &IfMacro : Style.IfMacros) {
+ auto Identifier = &IdentTable.get(IfMacro);
+ Macros.insert({Identifier, TT_IfMacro});
+ }
+ for (const std::string &AttributeMacro : Style.AttributeMacros) {
+ auto Identifier = &IdentTable.get(AttributeMacro);
+ Macros.insert({Identifier, TT_AttributeMacro});
+ }
+ for (const std::string &StatementMacro : Style.StatementMacros) {
+ auto Identifier = &IdentTable.get(StatementMacro);
+ Macros.insert({Identifier, TT_StatementMacro});
+ }
+ for (const std::string &TypenameMacro : Style.TypenameMacros) {
+ auto Identifier = &IdentTable.get(TypenameMacro);
+ Macros.insert({Identifier, TT_TypenameMacro});
+ }
+ for (const std::string &NamespaceMacro : Style.NamespaceMacros) {
+ auto Identifier = &IdentTable.get(NamespaceMacro);
+ Macros.insert({Identifier, TT_NamespaceMacro});
+ }
for (const std::string &WhitespaceSensitiveMacro :
Style.WhitespaceSensitiveMacros) {
- Macros.insert(
- {&IdentTable.get(WhitespaceSensitiveMacro), TT_UntouchableMacroFunc});
+ auto Identifier = &IdentTable.get(WhitespaceSensitiveMacro);
+ Macros.insert({Identifier, TT_UntouchableMacroFunc});
}
for (const std::string &StatementAttributeLikeMacro :
- Style.StatementAttributeLikeMacros)
- Macros.insert({&IdentTable.get(StatementAttributeLikeMacro),
- TT_StatementAttributeLikeMacro});
+ Style.StatementAttributeLikeMacros) {
+ auto Identifier = &IdentTable.get(StatementAttributeLikeMacro);
+ Macros.insert({Identifier, TT_StatementAttributeLikeMacro});
+ }
+
+ for (const auto &TypeName : Style.TypeNames)
+ TypeNames.insert(&IdentTable.get(TypeName));
}
ArrayRef<FormatToken *> FormatTokenLexer::lex() {
@@ -65,20 +81,25 @@ ArrayRef<FormatToken *> FormatTokenLexer::lex() {
assert(FirstInLineIndex == 0);
do {
Tokens.push_back(getNextToken());
- if (Style.Language == FormatStyle::LK_JavaScript) {
+ if (Style.isJavaScript()) {
tryParseJSRegexLiteral();
handleTemplateStrings();
}
if (Style.Language == FormatStyle::LK_TextProto)
tryParsePythonComment();
tryMergePreviousTokens();
- if (Style.isCSharp())
+ if (Style.isCSharp()) {
// This needs to come after tokens have been merged so that C#
// string literals are correctly identified.
handleCSharpVerbatimAndInterpolatedStrings();
+ }
+ if (Style.isTableGen()) {
+ handleTableGenMultilineString();
+ handleTableGenNumericLikeIdentifier();
+ }
if (Tokens.back()->NewlinesBefore > 0 || Tokens.back()->IsMultiline)
FirstInLineIndex = Tokens.size() - 1;
- } while (Tokens.back()->Tok.isNot(tok::eof));
+ } while (Tokens.back()->isNot(tok::eof));
return Tokens;
}
@@ -89,12 +110,14 @@ void FormatTokenLexer::tryMergePreviousTokens() {
return;
if (tryMergeLessLess())
return;
+ if (tryMergeGreaterGreater())
+ return;
if (tryMergeForEach())
return;
if (Style.isCpp() && tryTransformTryUsageForC())
return;
- if (Style.Language == FormatStyle::LK_JavaScript || Style.isCSharp()) {
+ if (Style.isJavaScript() || Style.isCSharp()) {
static const tok::TokenKind NullishCoalescingOperator[] = {tok::question,
tok::question};
static const tok::TokenKind NullPropagatingOperator[] = {tok::question,
@@ -113,9 +136,8 @@ void FormatTokenLexer::tryMergePreviousTokens() {
Tokens.back()->Tok.setKind(tok::period);
return;
}
- if (tryMergeNullishCoalescingEqual()) {
+ if (tryMergeNullishCoalescingEqual())
return;
- }
}
if (Style.isCSharp()) {
@@ -139,7 +161,7 @@ void FormatTokenLexer::tryMergePreviousTokens() {
if (tryMergeNSStringLiteral())
return;
- if (Style.Language == FormatStyle::LK_JavaScript) {
+ if (Style.isJavaScript()) {
static const tok::TokenKind JSIdentity[] = {tok::equalequal, tok::equal};
static const tok::TokenKind JSNotIdentity[] = {tok::exclaimequal,
tok::equal};
@@ -180,6 +202,88 @@ void FormatTokenLexer::tryMergePreviousTokens() {
if (tryMergeTokens(JavaRightLogicalShiftAssign, TT_BinaryOperator))
return;
}
+
+ if (Style.isVerilog()) {
+ // Merge the number following a base like `'h?a0`.
+ if (Tokens.size() >= 3 && Tokens.end()[-3]->is(TT_VerilogNumberBase) &&
+ Tokens.end()[-2]->is(tok::numeric_constant) &&
+ Tokens.back()->isOneOf(tok::numeric_constant, tok::identifier,
+ tok::question) &&
+ tryMergeTokens(2, TT_Unknown)) {
+ return;
+ }
+ // Part select.
+ if (tryMergeTokensAny({{tok::minus, tok::colon}, {tok::plus, tok::colon}},
+ TT_BitFieldColon)) {
+ return;
+ }
+ // Xnor. The combined token is treated as a caret which can also be either a
+ // unary or binary operator. The actual type is determined in
+ // TokenAnnotator. We also check the token length so we know it is not
+ // already a merged token.
+ if (Tokens.back()->TokenText.size() == 1 &&
+ tryMergeTokensAny({{tok::caret, tok::tilde}, {tok::tilde, tok::caret}},
+ TT_BinaryOperator)) {
+ Tokens.back()->Tok.setKind(tok::caret);
+ return;
+ }
+ // Signed shift and distribution weight.
+ if (tryMergeTokens({tok::less, tok::less}, TT_BinaryOperator)) {
+ Tokens.back()->Tok.setKind(tok::lessless);
+ return;
+ }
+ if (tryMergeTokens({tok::greater, tok::greater}, TT_BinaryOperator)) {
+ Tokens.back()->Tok.setKind(tok::greatergreater);
+ return;
+ }
+ if (tryMergeTokensAny({{tok::lessless, tok::equal},
+ {tok::lessless, tok::lessequal},
+ {tok::greatergreater, tok::equal},
+ {tok::greatergreater, tok::greaterequal},
+ {tok::colon, tok::equal},
+ {tok::colon, tok::slash}},
+ TT_BinaryOperator)) {
+ Tokens.back()->ForcedPrecedence = prec::Assignment;
+ return;
+ }
+ // Exponentiation, signed shift, case equality, and wildcard equality.
+ if (tryMergeTokensAny({{tok::star, tok::star},
+ {tok::lessless, tok::less},
+ {tok::greatergreater, tok::greater},
+ {tok::exclaimequal, tok::equal},
+ {tok::exclaimequal, tok::question},
+ {tok::equalequal, tok::equal},
+ {tok::equalequal, tok::question}},
+ TT_BinaryOperator)) {
+ return;
+ }
+ // Module paths in specify blocks and the implication and boolean equality
+ // operators.
+ if (tryMergeTokensAny({{tok::plusequal, tok::greater},
+ {tok::plus, tok::star, tok::greater},
+ {tok::minusequal, tok::greater},
+ {tok::minus, tok::star, tok::greater},
+ {tok::less, tok::arrow},
+ {tok::equal, tok::greater},
+ {tok::star, tok::greater},
+ {tok::pipeequal, tok::greater},
+ {tok::pipe, tok::arrow},
+ {tok::hash, tok::minus, tok::hash},
+ {tok::hash, tok::equal, tok::hash}},
+ TT_BinaryOperator) ||
+ Tokens.back()->is(tok::arrow)) {
+ Tokens.back()->ForcedPrecedence = prec::Comma;
+ return;
+ }
+ }
+ // TableGen's Multi line string starts with [{
+ if (Style.isTableGen() && tryMergeTokens({tok::l_square, tok::l_brace},
+ TT_TableGenMultiLineString)) {
+ // Set again with finalizing. This must never be annotated as other types.
+ Tokens.back()->setFinalizedType(TT_TableGenMultiLineString);
+ Tokens.back()->Tok.setKind(tok::string_literal);
+ return;
+ }
}
bool FormatTokenLexer::tryMergeNSStringLiteral() {
@@ -187,7 +291,7 @@ bool FormatTokenLexer::tryMergeNSStringLiteral() {
return false;
auto &At = *(Tokens.end() - 2);
auto &String = *(Tokens.end() - 1);
- if (!At->is(tok::at) || !String->is(tok::string_literal))
+ if (At->isNot(tok::at) || String->isNot(tok::string_literal))
return false;
At->Tok.setKind(tok::string_literal);
At->TokenText = StringRef(At->TokenText.begin(),
@@ -205,7 +309,7 @@ bool FormatTokenLexer::tryMergeJSPrivateIdentifier() {
return false;
auto &Hash = *(Tokens.end() - 2);
auto &Identifier = *(Tokens.end() - 1);
- if (!Hash->is(tok::hash) || !Identifier->is(tok::identifier))
+ if (Hash->isNot(tok::hash) || Identifier->isNot(tok::identifier))
return false;
Hash->Tok.setKind(tok::identifier);
Hash->TokenText =
@@ -226,86 +330,33 @@ bool FormatTokenLexer::tryMergeCSharpStringLiteral() {
if (Tokens.size() < 2)
return false;
- // Interpolated strings could contain { } with " characters inside.
- // $"{x ?? "null"}"
- // should not be split into $"{x ?? ", null, "}" but should treated as a
- // single string-literal.
- //
- // We opt not to try and format expressions inside {} within a C#
- // interpolated string. Formatting expressions within an interpolated string
- // would require similar work as that done for JavaScript template strings
- // in `handleTemplateStrings()`.
- auto &CSharpInterpolatedString = *(Tokens.end() - 2);
- if (CSharpInterpolatedString->getType() == TT_CSharpStringLiteral &&
- (CSharpInterpolatedString->TokenText.startswith(R"($")") ||
- CSharpInterpolatedString->TokenText.startswith(R"($@")"))) {
- int UnmatchedOpeningBraceCount = 0;
-
- auto TokenTextSize = CSharpInterpolatedString->TokenText.size();
- for (size_t Index = 0; Index < TokenTextSize; ++Index) {
- char C = CSharpInterpolatedString->TokenText[Index];
- if (C == '{') {
- // "{{" inside an interpolated string is an escaped '{' so skip it.
- if (Index + 1 < TokenTextSize &&
- CSharpInterpolatedString->TokenText[Index + 1] == '{') {
- ++Index;
- continue;
- }
- ++UnmatchedOpeningBraceCount;
- } else if (C == '}') {
- // "}}" inside an interpolated string is an escaped '}' so skip it.
- if (Index + 1 < TokenTextSize &&
- CSharpInterpolatedString->TokenText[Index + 1] == '}') {
- ++Index;
- continue;
- }
- --UnmatchedOpeningBraceCount;
- }
- }
-
- if (UnmatchedOpeningBraceCount > 0) {
- auto &NextToken = *(Tokens.end() - 1);
- CSharpInterpolatedString->TokenText =
- StringRef(CSharpInterpolatedString->TokenText.begin(),
- NextToken->TokenText.end() -
- CSharpInterpolatedString->TokenText.begin());
- CSharpInterpolatedString->ColumnWidth += NextToken->ColumnWidth;
- Tokens.erase(Tokens.end() - 1);
- return true;
- }
- }
-
// Look for @"aaaaaa" or $"aaaaaa".
- auto &String = *(Tokens.end() - 1);
- if (!String->is(tok::string_literal))
+ const auto String = *(Tokens.end() - 1);
+ if (String->isNot(tok::string_literal))
return false;
- auto &At = *(Tokens.end() - 2);
- if (!(At->is(tok::at) || At->TokenText == "$"))
+ auto Prefix = *(Tokens.end() - 2);
+ if (Prefix->isNot(tok::at) && Prefix->TokenText != "$")
return false;
- if (Tokens.size() > 2 && At->is(tok::at)) {
- auto &Dollar = *(Tokens.end() - 3);
- if (Dollar->TokenText == "$") {
- // This looks like $@"aaaaa" so we need to combine all 3 tokens.
- Dollar->Tok.setKind(tok::string_literal);
- Dollar->TokenText =
- StringRef(Dollar->TokenText.begin(),
- String->TokenText.end() - Dollar->TokenText.begin());
- Dollar->ColumnWidth += (At->ColumnWidth + String->ColumnWidth);
- Dollar->setType(TT_CSharpStringLiteral);
+ if (Tokens.size() > 2) {
+ const auto Tok = *(Tokens.end() - 3);
+ if ((Tok->TokenText == "$" && Prefix->is(tok::at)) ||
+ (Tok->is(tok::at) && Prefix->TokenText == "$")) {
+ // This looks like $@"aaa" or @$"aaa" so we need to combine all 3 tokens.
+ Tok->ColumnWidth += Prefix->ColumnWidth;
Tokens.erase(Tokens.end() - 2);
- Tokens.erase(Tokens.end() - 1);
- return true;
+ Prefix = Tok;
}
}
// Convert back into just a string_literal.
- At->Tok.setKind(tok::string_literal);
- At->TokenText = StringRef(At->TokenText.begin(),
- String->TokenText.end() - At->TokenText.begin());
- At->ColumnWidth += String->ColumnWidth;
- At->setType(TT_CSharpStringLiteral);
+ Prefix->Tok.setKind(tok::string_literal);
+ Prefix->TokenText =
+ StringRef(Prefix->TokenText.begin(),
+ String->TokenText.end() - Prefix->TokenText.begin());
+ Prefix->ColumnWidth += String->ColumnWidth;
+ Prefix->setType(TT_CSharpStringLiteral);
Tokens.erase(Tokens.end() - 1);
return true;
}
@@ -323,8 +374,9 @@ bool FormatTokenLexer::tryMergeNullishCoalescingEqual() {
auto &NullishCoalescing = *(Tokens.end() - 2);
auto &Equal = *(Tokens.end() - 1);
if (NullishCoalescing->getType() != TT_NullCoalescingOperator ||
- !Equal->is(tok::equal))
+ Equal->isNot(tok::equal)) {
return false;
+ }
NullishCoalescing->Tok.setKind(tok::equal); // no '??=' in clang tokens.
NullishCoalescing->TokenText =
StringRef(NullishCoalescing->TokenText.begin(),
@@ -338,9 +390,11 @@ bool FormatTokenLexer::tryMergeNullishCoalescingEqual() {
bool FormatTokenLexer::tryMergeCSharpKeywordVariables() {
if (Tokens.size() < 2)
return false;
- auto &At = *(Tokens.end() - 2);
- auto &Keyword = *(Tokens.end() - 1);
- if (!At->is(tok::at))
+ const auto At = *(Tokens.end() - 2);
+ if (At->isNot(tok::at))
+ return false;
+ const auto Keyword = *(Tokens.end() - 1);
+ if (Keyword->TokenText == "$")
return false;
if (!Keywords.isCSharpKeyword(*Keyword))
return false;
@@ -359,7 +413,7 @@ bool FormatTokenLexer::tryTransformCSharpForEach() {
if (Tokens.size() < 1)
return false;
auto &Identifier = *(Tokens.end() - 1);
- if (!Identifier->is(tok::identifier))
+ if (Identifier->isNot(tok::identifier))
return false;
if (Identifier->TokenText != "foreach")
return false;
@@ -374,9 +428,9 @@ bool FormatTokenLexer::tryMergeForEach() {
return false;
auto &For = *(Tokens.end() - 2);
auto &Each = *(Tokens.end() - 1);
- if (!For->is(tok::kw_for))
+ if (For->isNot(tok::kw_for))
return false;
- if (!Each->is(tok::identifier))
+ if (Each->isNot(tok::identifier))
return false;
if (Each->TokenText != "each")
return false;
@@ -395,7 +449,7 @@ bool FormatTokenLexer::tryTransformTryUsageForC() {
if (Tokens.size() < 2)
return false;
auto &Try = *(Tokens.end() - 2);
- if (!Try->is(tok::kw_try))
+ if (Try->isNot(tok::kw_try))
return false;
auto &Next = *(Tokens.end() - 1);
if (Next->isOneOf(tok::l_brace, tok::colon, tok::hash, tok::comment))
@@ -416,18 +470,20 @@ bool FormatTokenLexer::tryMergeLessLess() {
if (Tokens.size() < 3)
return false;
- bool FourthTokenIsLess = false;
- if (Tokens.size() > 3)
- FourthTokenIsLess = (Tokens.end() - 4)[0]->is(tok::less);
-
auto First = Tokens.end() - 3;
- if (First[2]->is(tok::less) || First[1]->isNot(tok::less) ||
- First[0]->isNot(tok::less) || FourthTokenIsLess)
+ if (First[0]->isNot(tok::less) || First[1]->isNot(tok::less))
return false;
// Only merge if there currently is no whitespace between the two "<".
- if (First[1]->WhitespaceRange.getBegin() !=
- First[1]->WhitespaceRange.getEnd())
+ if (First[1]->hasWhitespaceBefore())
+ return false;
+
+ auto X = Tokens.size() > 3 ? First[-1] : nullptr;
+ if (X && X->is(tok::less))
+ return false;
+
+ auto Y = First[2];
+ if ((!X || X->isNot(tok::kw_operator)) && Y->is(tok::less))
return false;
First[0]->Tok.setKind(tok::lessless);
@@ -437,6 +493,30 @@ bool FormatTokenLexer::tryMergeLessLess() {
return true;
}
+bool FormatTokenLexer::tryMergeGreaterGreater() {
+ // Merge kw_operator,greater,greater into kw_operator,greatergreater.
+ if (Tokens.size() < 2)
+ return false;
+
+ auto First = Tokens.end() - 2;
+ if (First[0]->isNot(tok::greater) || First[1]->isNot(tok::greater))
+ return false;
+
+ // Only merge if there currently is no whitespace between the first two ">".
+ if (First[1]->hasWhitespaceBefore())
+ return false;
+
+ auto Tok = Tokens.size() > 2 ? First[-1] : nullptr;
+ if (Tok && Tok->isNot(tok::kw_operator))
+ return false;
+
+ First[0]->Tok.setKind(tok::greatergreater);
+ First[0]->TokenText = ">>";
+ First[0]->ColumnWidth += 1;
+ Tokens.erase(Tokens.end() - 1);
+ return true;
+}
+
bool FormatTokenLexer::tryMergeTokens(ArrayRef<tok::TokenKind> Kinds,
TokenType NewType) {
if (Tokens.size() < Kinds.size())
@@ -444,16 +524,28 @@ bool FormatTokenLexer::tryMergeTokens(ArrayRef<tok::TokenKind> Kinds,
SmallVectorImpl<FormatToken *>::const_iterator First =
Tokens.end() - Kinds.size();
- if (!First[0]->is(Kinds[0]))
+ for (unsigned i = 0; i < Kinds.size(); ++i)
+ if (First[i]->isNot(Kinds[i]))
+ return false;
+
+ return tryMergeTokens(Kinds.size(), NewType);
+}
+
+bool FormatTokenLexer::tryMergeTokens(size_t Count, TokenType NewType) {
+ if (Tokens.size() < Count)
return false;
+
+ SmallVectorImpl<FormatToken *>::const_iterator First = Tokens.end() - Count;
unsigned AddLength = 0;
- for (unsigned i = 1; i < Kinds.size(); ++i) {
- if (!First[i]->is(Kinds[i]) || First[i]->WhitespaceRange.getBegin() !=
- First[i]->WhitespaceRange.getEnd())
+ for (size_t i = 1; i < Count; ++i) {
+ // If there is whitespace separating the token and the previous one,
+ // they should not be merged.
+ if (First[i]->hasWhitespaceBefore())
return false;
AddLength += First[i]->TokenText.size();
}
- Tokens.resize(Tokens.size() - Kinds.size() + 1);
+
+ Tokens.resize(Tokens.size() - Count + 1);
First[0]->TokenText = StringRef(First[0]->TokenText.data(),
First[0]->TokenText.size() + AddLength);
First[0]->ColumnWidth += AddLength;
@@ -461,6 +553,13 @@ bool FormatTokenLexer::tryMergeTokens(ArrayRef<tok::TokenKind> Kinds,
return true;
}
+bool FormatTokenLexer::tryMergeTokensAny(
+ ArrayRef<ArrayRef<tok::TokenKind>> Kinds, TokenType NewType) {
+ return llvm::any_of(Kinds, [this, NewType](ArrayRef<tok::TokenKind> Kinds) {
+ return tryMergeTokens(Kinds, NewType);
+ });
+}
+
// Returns \c true if \p Tok can only be followed by an operand in JavaScript.
bool FormatTokenLexer::precedesOperand(FormatToken *Tok) {
// NB: This is not entirely correct, as an r_paren can introduce an operand
@@ -486,7 +585,7 @@ bool FormatTokenLexer::canPrecedeRegexLiteral(FormatToken *Prev) {
// `!` is an unary prefix operator, but also a post-fix operator that casts
// away nullability, so the same check applies.
if (Prev->isOneOf(tok::plusplus, tok::minusminus, tok::exclaim))
- return (Tokens.size() < 3 || precedesOperand(Tokens[Tokens.size() - 3]));
+ return Tokens.size() < 3 || precedesOperand(Tokens[Tokens.size() - 3]);
// The previous token must introduce an operand location where regex
// literals can occur.
@@ -506,11 +605,11 @@ void FormatTokenLexer::tryParseJSRegexLiteral() {
return;
FormatToken *Prev = nullptr;
- for (auto I = Tokens.rbegin() + 1, E = Tokens.rend(); I != E; ++I) {
+ for (FormatToken *FT : llvm::drop_begin(llvm::reverse(Tokens))) {
// NB: Because previous pointers are not initialized yet, this cannot use
// Token.getPreviousNonComment.
- if ((*I)->isNot(tok::comment)) {
- Prev = *I;
+ if (FT->isNot(tok::comment)) {
+ Prev = FT;
break;
}
}
@@ -555,44 +654,105 @@ void FormatTokenLexer::tryParseJSRegexLiteral() {
resetLexer(SourceMgr.getFileOffset(Lex->getSourceLocation(Offset)));
}
+static auto lexCSharpString(const char *Begin, const char *End, bool Verbatim,
+ bool Interpolated) {
+ auto Repeated = [&Begin, End]() {
+ return Begin + 1 < End && Begin[1] == Begin[0];
+ };
+
+ // Look for a terminating '"' in the current file buffer.
+ // Make no effort to format code within an interpolated or verbatim string.
+ //
+ // Interpolated strings could contain { } with " characters inside.
+ // $"{x ?? "null"}"
+ // should not be split into $"{x ?? ", null, "}" but should be treated as a
+ // single string-literal.
+ //
+ // We opt not to try and format expressions inside {} within a C#
+ // interpolated string. Formatting expressions within an interpolated string
+ // would require similar work as that done for JavaScript template strings
+ // in `handleTemplateStrings()`.
+ for (int UnmatchedOpeningBraceCount = 0; Begin < End; ++Begin) {
+ switch (*Begin) {
+ case '\\':
+ if (!Verbatim)
+ ++Begin;
+ break;
+ case '{':
+ if (Interpolated) {
+ // {{ inside an interpolated string is escaped, so skip it.
+ if (Repeated())
+ ++Begin;
+ else
+ ++UnmatchedOpeningBraceCount;
+ }
+ break;
+ case '}':
+ if (Interpolated) {
+ // }} inside an interpolated string is escaped, so skip it.
+ if (Repeated())
+ ++Begin;
+ else if (UnmatchedOpeningBraceCount > 0)
+ --UnmatchedOpeningBraceCount;
+ else
+ return End;
+ }
+ break;
+ case '"':
+ if (UnmatchedOpeningBraceCount > 0)
+ break;
+ // "" within a verbatim string is an escaped double quote: skip it.
+ if (Verbatim && Repeated()) {
+ ++Begin;
+ break;
+ }
+ return Begin;
+ }
+ }
+
+ return End;
+}
+
void FormatTokenLexer::handleCSharpVerbatimAndInterpolatedStrings() {
FormatToken *CSharpStringLiteral = Tokens.back();
- if (CSharpStringLiteral->getType() != TT_CSharpStringLiteral)
+ if (CSharpStringLiteral->isNot(TT_CSharpStringLiteral))
return;
+ auto &TokenText = CSharpStringLiteral->TokenText;
+
+ bool Verbatim = false;
+ bool Interpolated = false;
+ if (TokenText.starts_with(R"($@")") || TokenText.starts_with(R"(@$")")) {
+ Verbatim = true;
+ Interpolated = true;
+ } else if (TokenText.starts_with(R"(@")")) {
+ Verbatim = true;
+ } else if (TokenText.starts_with(R"($")")) {
+ Interpolated = true;
+ }
+
// Deal with multiline strings.
- if (!(CSharpStringLiteral->TokenText.startswith(R"(@")") ||
- CSharpStringLiteral->TokenText.startswith(R"($@")")))
+ if (!Verbatim && !Interpolated)
return;
- const char *StrBegin =
- Lex->getBufferLocation() - CSharpStringLiteral->TokenText.size();
+ const char *StrBegin = Lex->getBufferLocation() - TokenText.size();
const char *Offset = StrBegin;
- if (CSharpStringLiteral->TokenText.startswith(R"(@")"))
- Offset += 2;
- else // CSharpStringLiteral->TokenText.startswith(R"($@")")
+ if (Verbatim && Interpolated)
Offset += 3;
+ else
+ Offset += 2;
- // Look for a terminating '"' in the current file buffer.
- // Make no effort to format code within an interpolated or verbatim string.
- for (; Offset != Lex->getBuffer().end(); ++Offset) {
- if (Offset[0] == '"') {
- // "" within a verbatim string is an escaped double quote: skip it.
- if (Offset + 1 < Lex->getBuffer().end() && Offset[1] == '"')
- ++Offset;
- else
- break;
- }
- }
+ const auto End = Lex->getBuffer().end();
+ Offset = lexCSharpString(Offset, End, Verbatim, Interpolated);
// Make no attempt to format code properly if a verbatim string is
// unterminated.
- if (Offset == Lex->getBuffer().end())
+ if (Offset >= End)
return;
StringRef LiteralText(StrBegin, Offset - StrBegin + 1);
- CSharpStringLiteral->TokenText = LiteralText;
+ TokenText = LiteralText;
// Adjust width for potentially multiline string literals.
size_t FirstBreak = LiteralText.find('\n');
@@ -606,15 +766,82 @@ void FormatTokenLexer::handleCSharpVerbatimAndInterpolatedStrings() {
if (LastBreak != StringRef::npos) {
CSharpStringLiteral->IsMultiline = true;
unsigned StartColumn = 0;
- CSharpStringLiteral->LastLineColumnWidth = encoding::columnWidthWithTabs(
- LiteralText.substr(LastBreak + 1, LiteralText.size()), StartColumn,
+ CSharpStringLiteral->LastLineColumnWidth =
+ encoding::columnWidthWithTabs(LiteralText.substr(LastBreak + 1),
+ StartColumn, Style.TabWidth, Encoding);
+ }
+
+ assert(Offset < End);
+ resetLexer(SourceMgr.getFileOffset(Lex->getSourceLocation(Offset + 1)));
+}
+
+void FormatTokenLexer::handleTableGenMultilineString() {
+ FormatToken *MultiLineString = Tokens.back();
+ if (MultiLineString->isNot(TT_TableGenMultiLineString))
+ return;
+
+ auto OpenOffset = Lex->getCurrentBufferOffset() - 2 /* "[{" */;
+ // "}]" is the end of multi line string.
+ auto CloseOffset = Lex->getBuffer().find("}]", OpenOffset);
+ if (CloseOffset == StringRef::npos)
+ return;
+ auto Text = Lex->getBuffer().substr(OpenOffset, CloseOffset + 2);
+ MultiLineString->TokenText = Text;
+ resetLexer(SourceMgr.getFileOffset(
+ Lex->getSourceLocation(Lex->getBufferLocation() - 2 + Text.size())));
+ auto FirstLineText = Text;
+ auto FirstBreak = Text.find('\n');
+ // Set ColumnWidth and LastLineColumnWidth when it has multiple lines.
+ if (FirstBreak != StringRef::npos) {
+ MultiLineString->IsMultiline = true;
+ FirstLineText = Text.substr(0, FirstBreak + 1);
+ // LastLineColumnWidth holds the width of the last line.
+ auto LastBreak = Text.rfind('\n');
+ MultiLineString->LastLineColumnWidth = encoding::columnWidthWithTabs(
+ Text.substr(LastBreak + 1), MultiLineString->OriginalColumn,
Style.TabWidth, Encoding);
}
+ // ColumnWidth holds only the width of the first line.
+ MultiLineString->ColumnWidth = encoding::columnWidthWithTabs(
+ FirstLineText, MultiLineString->OriginalColumn, Style.TabWidth, Encoding);
+}
- SourceLocation loc = Offset < Lex->getBuffer().end()
- ? Lex->getSourceLocation(Offset + 1)
- : SourceMgr.getLocForEndOfFile(ID);
- resetLexer(SourceMgr.getFileOffset(loc));
+void FormatTokenLexer::handleTableGenNumericLikeIdentifier() {
+ FormatToken *Tok = Tokens.back();
+ // TableGen identifiers can begin with digits. Such tokens are lexed as
+ // numeric_constant now.
+ if (Tok->isNot(tok::numeric_constant))
+ return;
+ StringRef Text = Tok->TokenText;
+ // The following check is based on llvm::TGLexer::LexToken.
+ // That lexes the token as a number if any of the following holds:
+ // 1. It starts with '+', '-'.
+ // 2. All the characters are digits.
+ // 3. The first non-digit character is 'b', and the next is '0' or '1'.
+ // 4. The first non-digit character is 'x', and the next is a hex digit.
+ // Note that in the case 3 and 4, if the next character does not exists in
+ // this token, the token is an identifier.
+ if (Text.size() < 1 || Text[0] == '+' || Text[0] == '-')
+ return;
+ const auto NonDigitPos = Text.find_if([](char C) { return !isdigit(C); });
+ // All the characters are digits
+ if (NonDigitPos == StringRef::npos)
+ return;
+ char FirstNonDigit = Text[NonDigitPos];
+ if (NonDigitPos < Text.size() - 1) {
+ char TheNext = Text[NonDigitPos + 1];
+ // Regarded as a binary number.
+ if (FirstNonDigit == 'b' && (TheNext == '0' || TheNext == '1'))
+ return;
+ // Regarded as hex number.
+ if (FirstNonDigit == 'x' && isxdigit(TheNext))
+ return;
+ }
+ if (isalpha(FirstNonDigit) || FirstNonDigit == '_') {
+ // This is actually an identifier in TableGen.
+ Tok->Tok.setKind(tok::identifier);
+ Tok->Tok.setIdentifierInfo(nullptr);
+ }
}
void FormatTokenLexer::handleTemplateStrings() {
@@ -644,6 +871,7 @@ void FormatTokenLexer::handleTemplateStrings() {
for (; Offset != Lex->getBuffer().end(); ++Offset) {
if (Offset[0] == '`') {
StateStack.pop();
+ ++Offset;
break;
}
if (Offset[0] == '\\') {
@@ -652,12 +880,12 @@ void FormatTokenLexer::handleTemplateStrings() {
Offset[1] == '{') {
// '${' introduces an expression interpolation in the template string.
StateStack.push(LexerState::NORMAL);
- ++Offset;
+ Offset += 2;
break;
}
}
- StringRef LiteralText(TmplBegin, Offset - TmplBegin + 1);
+ StringRef LiteralText(TmplBegin, Offset - TmplBegin);
BacktickToken->setType(TT_TemplateString);
BacktickToken->Tok.setKind(tok::string_literal);
BacktickToken->TokenText = LiteralText;
@@ -673,14 +901,12 @@ void FormatTokenLexer::handleTemplateStrings() {
if (LastBreak != StringRef::npos) {
BacktickToken->IsMultiline = true;
unsigned StartColumn = 0; // The template tail spans the entire line.
- BacktickToken->LastLineColumnWidth = encoding::columnWidthWithTabs(
- LiteralText.substr(LastBreak + 1, LiteralText.size()), StartColumn,
- Style.TabWidth, Encoding);
+ BacktickToken->LastLineColumnWidth =
+ encoding::columnWidthWithTabs(LiteralText.substr(LastBreak + 1),
+ StartColumn, Style.TabWidth, Encoding);
}
- SourceLocation loc = Offset < Lex->getBuffer().end()
- ? Lex->getSourceLocation(Offset + 1)
- : SourceMgr.getLocForEndOfFile(ID);
+ SourceLocation loc = Lex->getSourceLocation(Offset);
resetLexer(SourceMgr.getFileOffset(loc));
}
@@ -709,14 +935,14 @@ bool FormatTokenLexer::tryMerge_TMacro() {
if (Tokens.size() < 4)
return false;
FormatToken *Last = Tokens.back();
- if (!Last->is(tok::r_paren))
+ if (Last->isNot(tok::r_paren))
return false;
FormatToken *String = Tokens[Tokens.size() - 2];
- if (!String->is(tok::string_literal) || String->IsMultiline)
+ if (String->isNot(tok::string_literal) || String->IsMultiline)
return false;
- if (!Tokens[Tokens.size() - 3]->is(tok::l_paren))
+ if (Tokens[Tokens.size() - 3]->isNot(tok::l_paren))
return false;
FormatToken *Macro = Tokens[Tokens.size() - 4];
@@ -739,6 +965,8 @@ bool FormatTokenLexer::tryMerge_TMacro() {
Tokens.pop_back();
Tokens.pop_back();
Tokens.back() = String;
+ if (FirstInLineIndex >= Tokens.size())
+ FirstInLineIndex = Tokens.size() - 1;
return true;
}
@@ -763,19 +991,17 @@ bool FormatTokenLexer::tryMergeConflictMarkers() {
StringRef Buffer = SourceMgr.getBufferOrFake(ID).getBuffer();
// Calculate the offset of the start of the current line.
auto LineOffset = Buffer.rfind('\n', FirstInLineOffset);
- if (LineOffset == StringRef::npos) {
+ if (LineOffset == StringRef::npos)
LineOffset = 0;
- } else {
+ else
++LineOffset;
- }
auto FirstSpace = Buffer.find_first_of(" \n", LineOffset);
StringRef LineStart;
- if (FirstSpace == StringRef::npos) {
+ if (FirstSpace == StringRef::npos)
LineStart = Buffer.substr(LineOffset);
- } else {
+ else
LineStart = Buffer.substr(LineOffset, FirstSpace - LineOffset);
- }
TokenType Type = TT_Unknown;
if (LineStart == "<<<<<<<" || LineStart == ">>>>") {
@@ -823,6 +1049,58 @@ FormatToken *FormatTokenLexer::getStashedToken() {
return FormatTok;
}
+/// Truncate the current token to the new length and make the lexer continue
+/// from the end of the truncated token. Used for other languages that have
+/// different token boundaries, like JavaScript in which a comment ends at a
+/// line break regardless of whether the line break follows a backslash. Also
+/// used to set the lexer to the end of whitespace if the lexer regards
+/// whitespace and an unrecognized symbol as one token.
+void FormatTokenLexer::truncateToken(size_t NewLen) {
+ assert(NewLen <= FormatTok->TokenText.size());
+ resetLexer(SourceMgr.getFileOffset(Lex->getSourceLocation(
+ Lex->getBufferLocation() - FormatTok->TokenText.size() + NewLen)));
+ FormatTok->TokenText = FormatTok->TokenText.substr(0, NewLen);
+ FormatTok->ColumnWidth = encoding::columnWidthWithTabs(
+ FormatTok->TokenText, FormatTok->OriginalColumn, Style.TabWidth,
+ Encoding);
+ FormatTok->Tok.setLength(NewLen);
+}
+
+/// Count the length of leading whitespace in a token.
+static size_t countLeadingWhitespace(StringRef Text) {
+ // Basically counting the length matched by this regex.
+ // "^([\n\r\f\v \t]|(\\\\|\\?\\?/)[\n\r])+"
+ // Directly using the regex turned out to be slow. With the regex
+ // version formatting all files in this directory took about 1.25
+ // seconds. This version took about 0.5 seconds.
+ const unsigned char *const Begin = Text.bytes_begin();
+ const unsigned char *const End = Text.bytes_end();
+ const unsigned char *Cur = Begin;
+ while (Cur < End) {
+ if (isspace(Cur[0])) {
+ ++Cur;
+ } else if (Cur[0] == '\\' && (Cur[1] == '\n' || Cur[1] == '\r')) {
+ // A '\' followed by a newline always escapes the newline, regardless
+ // of whether there is another '\' before it.
+ // The source has a null byte at the end. So the end of the entire input
+ // isn't reached yet. Also the lexer doesn't break apart an escaped
+ // newline.
+ assert(End - Cur >= 2);
+ Cur += 2;
+ } else if (Cur[0] == '?' && Cur[1] == '?' && Cur[2] == '/' &&
+ (Cur[3] == '\n' || Cur[3] == '\r')) {
+ // Newlines can also be escaped by a '?' '?' '/' trigraph. By the way, the
+ // characters are quoted individually in this comment because if we write
+ // them together some compilers warn that we have a trigraph in the code.
+ assert(End - Cur >= 4);
+ Cur += 4;
+ } else {
+ break;
+ }
+ }
+ return Cur - Begin;
+}
+
FormatToken *FormatTokenLexer::getNextToken() {
if (StateStack.top() == LexerState::TOKEN_STASHED) {
StateStack.pop();
@@ -837,34 +1115,33 @@ FormatToken *FormatTokenLexer::getNextToken() {
IsFirstToken = false;
// Consume and record whitespace until we find a significant token.
+ // Some tok::unknown tokens are not just whitespace, e.g. whitespace
+ // followed by a symbol such as backtick. Those symbols may be
+ // significant in other languages.
unsigned WhitespaceLength = TrailingWhitespace;
- while (FormatTok->Tok.is(tok::unknown)) {
+ while (FormatTok->isNot(tok::eof)) {
+ auto LeadingWhitespace = countLeadingWhitespace(FormatTok->TokenText);
+ if (LeadingWhitespace == 0)
+ break;
+ if (LeadingWhitespace < FormatTok->TokenText.size())
+ truncateToken(LeadingWhitespace);
StringRef Text = FormatTok->TokenText;
- auto EscapesNewline = [&](int pos) {
- // A '\r' here is just part of '\r\n'. Skip it.
- if (pos >= 0 && Text[pos] == '\r')
- --pos;
- // See whether there is an odd number of '\' before this.
- // FIXME: This is wrong. A '\' followed by a newline is always removed,
- // regardless of whether there is another '\' before it.
- // FIXME: Newlines can also be escaped by a '?' '?' '/' trigraph.
- unsigned count = 0;
- for (; pos >= 0; --pos, ++count)
- if (Text[pos] != '\\')
- break;
- return count & 1;
- };
- // FIXME: This miscounts tok:unknown tokens that are not just
- // whitespace, e.g. a '`' character.
+ bool InEscape = false;
for (int i = 0, e = Text.size(); i != e; ++i) {
switch (Text[i]) {
+ case '\r':
+ // If this is a CRLF sequence, break here and the LF will be handled on
+ // the next loop iteration. Otherwise, this is a single Mac CR, treat it
+ // the same as a single LF.
+ if (i + 1 < e && Text[i + 1] == '\n')
+ break;
+ [[fallthrough]];
case '\n':
++FormatTok->NewlinesBefore;
- FormatTok->HasUnescapedNewline = !EscapesNewline(i - 1);
- FormatTok->LastNewlineOffset = WhitespaceLength + i + 1;
- Column = 0;
- break;
- case '\r':
+ if (!InEscape)
+ FormatTok->HasUnescapedNewline = true;
+ else
+ InEscape = false;
FormatTok->LastNewlineOffset = WhitespaceLength + i + 1;
Column = 0;
break;
@@ -880,24 +1157,32 @@ FormatToken *FormatTokenLexer::getNextToken() {
Style.TabWidth - (Style.TabWidth ? Column % Style.TabWidth : 0);
break;
case '\\':
- if (i + 1 == e || (Text[i + 1] != '\r' && Text[i + 1] != '\n'))
- FormatTok->setType(TT_ImplicitStringLiteral);
+ case '?':
+ case '/':
+ // The text was entirely whitespace when this loop was entered. Thus
+ // this has to be an escape sequence.
+ assert(Text.substr(i, 2) == "\\\r" || Text.substr(i, 2) == "\\\n" ||
+ Text.substr(i, 4) == "\?\?/\r" ||
+ Text.substr(i, 4) == "\?\?/\n" ||
+ (i >= 1 && (Text.substr(i - 1, 4) == "\?\?/\r" ||
+ Text.substr(i - 1, 4) == "\?\?/\n")) ||
+ (i >= 2 && (Text.substr(i - 2, 4) == "\?\?/\r" ||
+ Text.substr(i - 2, 4) == "\?\?/\n")));
+ InEscape = true;
break;
default:
- FormatTok->setType(TT_ImplicitStringLiteral);
+ // This shouldn't happen.
+ assert(false);
break;
}
- if (FormatTok->getType() == TT_ImplicitStringLiteral)
- break;
}
-
- if (FormatTok->is(TT_ImplicitStringLiteral))
- break;
- WhitespaceLength += FormatTok->Tok.getLength();
-
+ WhitespaceLength += Text.size();
readRawToken(*FormatTok);
}
+ if (FormatTok->is(tok::unknown))
+ FormatTok->setType(TT_ImplicitStringLiteral);
+
// JavaScript and Java do not allow to escape the end of the line with a
// backslash. Backslashes are syntax errors in plain source, but can occur in
// comments. When a single line comment ends with a \, it'll cause the next
@@ -905,46 +1190,52 @@ FormatToken *FormatTokenLexer::getNextToken() {
// finds comments that contain a backslash followed by a line break, truncates
// the comment token at the backslash, and resets the lexer to restart behind
// the backslash.
- if ((Style.Language == FormatStyle::LK_JavaScript ||
- Style.Language == FormatStyle::LK_Java) &&
- FormatTok->is(tok::comment) && FormatTok->TokenText.startswith("//")) {
+ if ((Style.isJavaScript() || Style.Language == FormatStyle::LK_Java) &&
+ FormatTok->is(tok::comment) && FormatTok->TokenText.starts_with("//")) {
size_t BackslashPos = FormatTok->TokenText.find('\\');
while (BackslashPos != StringRef::npos) {
if (BackslashPos + 1 < FormatTok->TokenText.size() &&
FormatTok->TokenText[BackslashPos + 1] == '\n') {
- const char *Offset = Lex->getBufferLocation();
- Offset -= FormatTok->TokenText.size();
- Offset += BackslashPos + 1;
- resetLexer(SourceMgr.getFileOffset(Lex->getSourceLocation(Offset)));
- FormatTok->TokenText = FormatTok->TokenText.substr(0, BackslashPos + 1);
- FormatTok->ColumnWidth = encoding::columnWidthWithTabs(
- FormatTok->TokenText, FormatTok->OriginalColumn, Style.TabWidth,
- Encoding);
+ truncateToken(BackslashPos + 1);
break;
}
BackslashPos = FormatTok->TokenText.find('\\', BackslashPos + 1);
}
}
- // In case the token starts with escaped newlines, we want to
- // take them into account as whitespace - this pattern is quite frequent
- // in macro definitions.
- // FIXME: Add a more explicit test.
- while (FormatTok->TokenText.size() > 1 && FormatTok->TokenText[0] == '\\') {
- unsigned SkippedWhitespace = 0;
- if (FormatTok->TokenText.size() > 2 &&
- (FormatTok->TokenText[1] == '\r' && FormatTok->TokenText[2] == '\n'))
- SkippedWhitespace = 3;
- else if (FormatTok->TokenText[1] == '\n')
- SkippedWhitespace = 2;
- else
- break;
-
- ++FormatTok->NewlinesBefore;
- WhitespaceLength += SkippedWhitespace;
- FormatTok->LastNewlineOffset = SkippedWhitespace;
- Column = 0;
- FormatTok->TokenText = FormatTok->TokenText.substr(SkippedWhitespace);
+ if (Style.isVerilog()) {
+ static const llvm::Regex NumberBase("^s?[bdho]", llvm::Regex::IgnoreCase);
+ SmallVector<StringRef, 1> Matches;
+ // Verilog uses the backtick instead of the hash for preprocessor stuff.
+ // And it uses the hash for delays and parameter lists. In order to continue
+ // using `tok::hash` in other places, the backtick gets marked as the hash
+ // here. And in order to tell the backtick and hash apart for
+ // Verilog-specific stuff, the hash becomes an identifier.
+ if (FormatTok->is(tok::numeric_constant)) {
+ // In Verilog the quote is not part of a number.
+ auto Quote = FormatTok->TokenText.find('\'');
+ if (Quote != StringRef::npos)
+ truncateToken(Quote);
+ } else if (FormatTok->isOneOf(tok::hash, tok::hashhash)) {
+ FormatTok->Tok.setKind(tok::raw_identifier);
+ } else if (FormatTok->is(tok::raw_identifier)) {
+ if (FormatTok->TokenText == "`") {
+ FormatTok->Tok.setIdentifierInfo(nullptr);
+ FormatTok->Tok.setKind(tok::hash);
+ } else if (FormatTok->TokenText == "``") {
+ FormatTok->Tok.setIdentifierInfo(nullptr);
+ FormatTok->Tok.setKind(tok::hashhash);
+ } else if (Tokens.size() > 0 &&
+ Tokens.back()->is(Keywords.kw_apostrophe) &&
+ NumberBase.match(FormatTok->TokenText, &Matches)) {
+ // In Verilog in a based number literal like `'b10`, there may be
+ // whitespace between `'b` and `10`. Therefore we handle the base and
+ // the rest of the number literal as two tokens. But if there is no
+ // space in the input code, we need to manually separate the two parts.
+ truncateToken(Matches[0].size());
+ FormatTok->setFinalizedType(TT_VerilogNumberBase);
+ }
+ }
}
FormatTok->WhitespaceRange = SourceRange(
@@ -953,12 +1244,12 @@ FormatToken *FormatTokenLexer::getNextToken() {
FormatTok->OriginalColumn = Column;
TrailingWhitespace = 0;
- if (FormatTok->Tok.is(tok::comment)) {
+ if (FormatTok->is(tok::comment)) {
// FIXME: Add the trimmed whitespace to Column.
StringRef UntrimmedText = FormatTok->TokenText;
FormatTok->TokenText = FormatTok->TokenText.rtrim(" \t\v\f");
TrailingWhitespace = UntrimmedText.size() - FormatTok->TokenText.size();
- } else if (FormatTok->Tok.is(tok::raw_identifier)) {
+ } else if (FormatTok->is(tok::raw_identifier)) {
IdentifierInfo &Info = IdentTable.get(FormatTok->TokenText);
FormatTok->Tok.setIdentifierInfo(&Info);
FormatTok->Tok.setKind(Info.getTokenID());
@@ -967,24 +1258,34 @@ FormatToken *FormatTokenLexer::getNextToken() {
tok::kw_operator)) {
FormatTok->Tok.setKind(tok::identifier);
FormatTok->Tok.setIdentifierInfo(nullptr);
- } else if (Style.Language == FormatStyle::LK_JavaScript &&
+ } else if (Style.isJavaScript() &&
FormatTok->isOneOf(tok::kw_struct, tok::kw_union,
tok::kw_operator)) {
FormatTok->Tok.setKind(tok::identifier);
FormatTok->Tok.setIdentifierInfo(nullptr);
+ } else if (Style.isTableGen() && !Keywords.isTableGenKeyword(*FormatTok)) {
+ FormatTok->Tok.setKind(tok::identifier);
+ FormatTok->Tok.setIdentifierInfo(nullptr);
}
- } else if (FormatTok->Tok.is(tok::greatergreater)) {
+ } else if (FormatTok->is(tok::greatergreater)) {
FormatTok->Tok.setKind(tok::greater);
FormatTok->TokenText = FormatTok->TokenText.substr(0, 1);
++Column;
StateStack.push(LexerState::TOKEN_STASHED);
- } else if (FormatTok->Tok.is(tok::lessless)) {
+ } else if (FormatTok->is(tok::lessless)) {
FormatTok->Tok.setKind(tok::less);
FormatTok->TokenText = FormatTok->TokenText.substr(0, 1);
++Column;
StateStack.push(LexerState::TOKEN_STASHED);
}
+ if (Style.isVerilog() && Tokens.size() > 0 &&
+ Tokens.back()->is(TT_VerilogNumberBase) &&
+ FormatTok->Tok.isOneOf(tok::identifier, tok::question)) {
+ // Mark the number following a base like `'h?a0` as a number.
+ FormatTok->Tok.setKind(tok::numeric_constant);
+ }
+
// Now FormatTok is the next non-whitespace token.
StringRef Text = FormatTok->TokenText;
@@ -1010,7 +1311,8 @@ FormatToken *FormatTokenLexer::getNextToken() {
}
if (Style.isCpp()) {
- auto it = Macros.find(FormatTok->Tok.getIdentifierInfo());
+ auto *Identifier = FormatTok->Tok.getIdentifierInfo();
+ auto it = Macros.find(Identifier);
if (!(Tokens.size() > 0 && Tokens.back()->Tok.getIdentifierInfo() &&
Tokens.back()->Tok.getIdentifierInfo()->getPPKeywordID() ==
tok::pp_define) &&
@@ -1024,19 +1326,64 @@ FormatToken *FormatTokenLexer::getNextToken() {
FormatTok->Tok.setKind(tok::kw_if);
}
} else if (FormatTok->is(tok::identifier)) {
- if (MacroBlockBeginRegex.match(Text)) {
+ if (MacroBlockBeginRegex.match(Text))
FormatTok->setType(TT_MacroBlockBegin);
- } else if (MacroBlockEndRegex.match(Text)) {
+ else if (MacroBlockEndRegex.match(Text))
FormatTok->setType(TT_MacroBlockEnd);
- }
+ else if (TypeNames.contains(Identifier))
+ FormatTok->setFinalizedType(TT_TypeName);
}
}
return FormatTok;
}
+bool FormatTokenLexer::readRawTokenVerilogSpecific(Token &Tok) {
+ // In Verilog the quote is not a character literal.
+ //
+ // Make the backtick and double backtick identifiers to match against them
+ // more easily.
+ //
+ // In Verilog an escaped identifier starts with backslash and ends with
+ // whitespace. Unless that whitespace is an escaped newline. A backslash can
+ // also begin an escaped newline outside of an escaped identifier. We check
+ // for that outside of the Regex since we can't use negative lookhead
+ // assertions. Simply changing the '*' to '+' breaks stuff as the escaped
+ // identifier may have a length of 0 according to Section A.9.3.
+ // FIXME: If there is an escaped newline in the middle of an escaped
+ // identifier, allow for pasting the two lines together, But escaped
+ // identifiers usually occur only in generated code anyway.
+ static const llvm::Regex VerilogToken(R"re(^('|``?|\\(\\)re"
+ "(\r?\n|\r)|[^[:space:]])*)");
+
+ SmallVector<StringRef, 4> Matches;
+ const char *Start = Lex->getBufferLocation();
+ if (!VerilogToken.match(StringRef(Start, Lex->getBuffer().end() - Start),
+ &Matches)) {
+ return false;
+ }
+ // There is a null byte at the end of the buffer, so we don't have to check
+ // Start[1] is within the buffer.
+ if (Start[0] == '\\' && (Start[1] == '\r' || Start[1] == '\n'))
+ return false;
+ size_t Len = Matches[0].size();
+
+ // The kind has to be an identifier so we can match it against those defined
+ // in Keywords. The kind has to be set before the length because the setLength
+ // function checks that the kind is not an annotation.
+ Tok.setKind(tok::raw_identifier);
+ Tok.setLength(Len);
+ Tok.setLocation(Lex->getSourceLocation(Start, Len));
+ Tok.setRawIdentifierData(Start);
+ Lex->seek(Lex->getCurrentBufferOffset() + Len, /*IsAtStartofline=*/false);
+ return true;
+}
+
void FormatTokenLexer::readRawToken(FormatToken &Tok) {
- Lex->LexFromRawLexer(Tok.Tok);
+ // For Verilog, first see if there is a special token, and fall back to the
+ // normal lexer if there isn't one.
+ if (!Style.isVerilog() || !readRawTokenVerilogSpecific(Tok.Tok))
+ Lex->LexFromRawLexer(Tok.Tok);
Tok.TokenText = StringRef(SourceMgr.getCharacterData(Tok.Tok.getLocation()),
Tok.Tok.getLength());
// For formatting, treat unterminated string literals like normal string
@@ -1045,37 +1392,28 @@ void FormatTokenLexer::readRawToken(FormatToken &Tok) {
if (!Tok.TokenText.empty() && Tok.TokenText[0] == '"') {
Tok.Tok.setKind(tok::string_literal);
Tok.IsUnterminatedLiteral = true;
- } else if (Style.Language == FormatStyle::LK_JavaScript &&
- Tok.TokenText == "''") {
+ } else if (Style.isJavaScript() && Tok.TokenText == "''") {
Tok.Tok.setKind(tok::string_literal);
}
}
- if ((Style.Language == FormatStyle::LK_JavaScript ||
- Style.Language == FormatStyle::LK_Proto ||
- Style.Language == FormatStyle::LK_TextProto) &&
- Tok.is(tok::char_constant)) {
+ if ((Style.isJavaScript() || Style.isProto()) && Tok.is(tok::char_constant))
Tok.Tok.setKind(tok::string_literal);
- }
- if (Tok.is(tok::comment) && (Tok.TokenText == "// clang-format on" ||
- Tok.TokenText == "/* clang-format on */")) {
+ if (Tok.is(tok::comment) && isClangFormatOn(Tok.TokenText))
FormattingDisabled = false;
- }
Tok.Finalized = FormattingDisabled;
- if (Tok.is(tok::comment) && (Tok.TokenText == "// clang-format off" ||
- Tok.TokenText == "/* clang-format off */")) {
+ if (Tok.is(tok::comment) && isClangFormatOff(Tok.TokenText))
FormattingDisabled = true;
- }
}
void FormatTokenLexer::resetLexer(unsigned Offset) {
StringRef Buffer = SourceMgr.getBufferData(ID);
- Lex.reset(new Lexer(SourceMgr.getLocForStartOfFile(ID),
- getFormattingLangOpts(Style), Buffer.begin(),
- Buffer.begin() + Offset, Buffer.end()));
+ LangOpts = getFormattingLangOpts(Style);
+ Lex.reset(new Lexer(SourceMgr.getLocForStartOfFile(ID), LangOpts,
+ Buffer.begin(), Buffer.begin() + Offset, Buffer.end()));
Lex->SetKeepWhitespaceMode(true);
TrailingWhitespace = 0;
}
diff --git a/contrib/llvm-project/clang/lib/Format/FormatTokenLexer.h b/contrib/llvm-project/clang/lib/Format/FormatTokenLexer.h
index a9e3b2fd498a..65dd733bd533 100644
--- a/contrib/llvm-project/clang/lib/Format/FormatTokenLexer.h
+++ b/contrib/llvm-project/clang/lib/Format/FormatTokenLexer.h
@@ -17,10 +17,12 @@
#include "Encoding.h"
#include "FormatToken.h"
+#include "clang/Basic/LangOptions.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Format/Format.h"
#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/Support/Regex.h"
@@ -50,6 +52,7 @@ private:
void tryMergePreviousTokens();
bool tryMergeLessLess();
+ bool tryMergeGreaterGreater();
bool tryMergeNSStringLiteral();
bool tryMergeJSPrivateIdentifier();
bool tryMergeCSharpStringLiteral();
@@ -59,7 +62,14 @@ private:
bool tryMergeForEach();
bool tryTransformTryUsageForC();
+ // Merge the most recently lexed tokens into a single token if their kinds are
+ // correct.
bool tryMergeTokens(ArrayRef<tok::TokenKind> Kinds, TokenType NewType);
+ // Merge without checking their kinds.
+ bool tryMergeTokens(size_t Count, TokenType NewType);
+ // Merge if their kinds match any one of Kinds.
+ bool tryMergeTokensAny(ArrayRef<ArrayRef<tok::TokenKind>> Kinds,
+ TokenType NewType);
// Returns \c true if \p Tok can only be followed by an operand in JavaScript.
bool precedesOperand(FormatToken *Tok);
@@ -85,12 +95,21 @@ private:
void handleCSharpVerbatimAndInterpolatedStrings();
+ // Handles TableGen multiline strings. It has the form [{ ... }].
+ void handleTableGenMultilineString();
+ // Handles TableGen numeric like identifiers.
+ // They have a forms of [0-9]*[_a-zA-Z]([_a-zA-Z0-9]*). But limited to the
+ // case it is not lexed as an integer.
+ void handleTableGenNumericLikeIdentifier();
+
void tryParsePythonComment();
bool tryMerge_TMacro();
bool tryMergeConflictMarkers();
+ void truncateToken(size_t NewLen);
+
FormatToken *getStashedToken();
FormatToken *getNextToken();
@@ -101,6 +120,7 @@ private:
unsigned Column;
unsigned TrailingWhitespace;
std::unique_ptr<Lexer> Lex;
+ LangOptions LangOpts;
const SourceManager &SourceMgr;
FileID ID;
const FormatStyle &Style;
@@ -114,6 +134,8 @@ private:
llvm::SmallMapVector<IdentifierInfo *, TokenType, 8> Macros;
+ llvm::SmallPtrSet<IdentifierInfo *, 8> TypeNames;
+
bool FormattingDisabled;
llvm::Regex MacroBlockBeginRegex;
@@ -122,6 +144,9 @@ private:
// Targets that may appear inside a C# attribute.
static const llvm::StringSet<> CSharpAttributeTargets;
+ /// Handle Verilog-specific tokens.
+ bool readRawTokenVerilogSpecific(Token &Tok);
+
void readRawToken(FormatToken &Tok);
void resetLexer(unsigned Offset);
diff --git a/contrib/llvm-project/clang/lib/Format/FormatTokenSource.h b/contrib/llvm-project/clang/lib/Format/FormatTokenSource.h
new file mode 100644
index 000000000000..7819244eb7d1
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Format/FormatTokenSource.h
@@ -0,0 +1,267 @@
+//===--- FormatTokenSource.h - Format C++ code ------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines the \c FormatTokenSource interface, which provides a token
+/// stream as well as the ability to manipulate the token stream.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_FORMAT_FORMATTOKENSOURCE_H
+#define LLVM_CLANG_LIB_FORMAT_FORMATTOKENSOURCE_H
+
+#include "FormatToken.h"
+#include "UnwrappedLineParser.h"
+#include "llvm/ADT/DenseMap.h"
+#include <cstddef>
+
+#define DEBUG_TYPE "format-token-source"
+
+namespace clang {
+namespace format {
+
+// Navigate a token stream.
+//
+// Enables traversal of a token stream, resetting the position in a token
+// stream, as well as inserting new tokens.
+class FormatTokenSource {
+public:
+ virtual ~FormatTokenSource() {}
+
+ // Returns the next token in the token stream.
+ virtual FormatToken *getNextToken() = 0;
+
+ // Returns the token preceding the token returned by the last call to
+ // getNextToken() in the token stream, or nullptr if no such token exists.
+ //
+ // Must not be called directly at the position directly after insertTokens()
+ // is called.
+ virtual FormatToken *getPreviousToken() = 0;
+
+ // Returns the token that would be returned by the next call to
+ // getNextToken().
+ virtual FormatToken *peekNextToken(bool SkipComment = false) = 0;
+
+ // Returns whether we are at the end of the file.
+ // This can be different from whether getNextToken() returned an eof token
+ // when the FormatTokenSource is a view on a part of the token stream.
+ virtual bool isEOF() = 0;
+
+ // Gets the current position in the token stream, to be used by setPosition().
+ //
+ // Note that the value of the position is not meaningful, and specifically
+ // should not be used to get relative token positions.
+ virtual unsigned getPosition() = 0;
+
+ // Resets the token stream to the state it was in when getPosition() returned
+ // Position, and return the token at that position in the stream.
+ virtual FormatToken *setPosition(unsigned Position) = 0;
+
+ // Insert the given tokens before the current position.
+ // Returns the first token in \c Tokens.
+ // The next returned token will be the second token in \c Tokens.
+ // Requires the last token in Tokens to be EOF; once the EOF token is reached,
+ // the next token will be the last token returned by getNextToken();
+ //
+ // For example, given the token sequence 'a1 a2':
+ // getNextToken() -> a1
+ // insertTokens('b1 b2') -> b1
+ // getNextToken() -> b2
+ // getNextToken() -> a1
+ // getNextToken() -> a2
+ virtual FormatToken *insertTokens(ArrayRef<FormatToken *> Tokens) = 0;
+};
+
+class IndexedTokenSource : public FormatTokenSource {
+public:
+ IndexedTokenSource(ArrayRef<FormatToken *> Tokens)
+ : Tokens(Tokens), Position(-1) {}
+
+ FormatToken *getNextToken() override {
+ if (Position >= 0 && isEOF()) {
+ LLVM_DEBUG({
+ llvm::dbgs() << "Next ";
+ dbgToken(Position);
+ });
+ return Tokens[Position];
+ }
+ Position = successor(Position);
+ LLVM_DEBUG({
+ llvm::dbgs() << "Next ";
+ dbgToken(Position);
+ });
+ return Tokens[Position];
+ }
+
+ FormatToken *getPreviousToken() override {
+ assert(Position <= 0 || Tokens[Position - 1]->isNot(tok::eof));
+ return Position > 0 ? Tokens[Position - 1] : nullptr;
+ }
+
+ FormatToken *peekNextToken(bool SkipComment = false) override {
+ if (isEOF())
+ return Tokens[Position];
+ int Next = successor(Position);
+ if (SkipComment)
+ while (Tokens[Next]->is(tok::comment))
+ Next = successor(Next);
+ LLVM_DEBUG({
+ llvm::dbgs() << "Peeking ";
+ dbgToken(Next);
+ });
+ return Tokens[Next];
+ }
+
+ bool isEOF() override {
+ return Position == -1 ? false : Tokens[Position]->is(tok::eof);
+ }
+
+ unsigned getPosition() override {
+ LLVM_DEBUG(llvm::dbgs() << "Getting Position: " << Position << "\n");
+ assert(Position >= 0);
+ return Position;
+ }
+
+ FormatToken *setPosition(unsigned P) override {
+ LLVM_DEBUG(llvm::dbgs() << "Setting Position: " << P << "\n");
+ Position = P;
+ return Tokens[Position];
+ }
+
+ FormatToken *insertTokens(ArrayRef<FormatToken *> New) override {
+ assert(Position != -1);
+ assert((*New.rbegin())->Tok.is(tok::eof));
+ int Next = Tokens.size();
+ Tokens.append(New.begin(), New.end());
+ LLVM_DEBUG({
+ llvm::dbgs() << "Inserting:\n";
+ for (int I = Next, E = Tokens.size(); I != E; ++I)
+ dbgToken(I, " ");
+ llvm::dbgs() << " Jump from: " << (Tokens.size() - 1) << " -> "
+ << Position << "\n";
+ });
+ Jumps[Tokens.size() - 1] = Position;
+ Position = Next;
+ LLVM_DEBUG({
+ llvm::dbgs() << "At inserted token ";
+ dbgToken(Position);
+ });
+ return Tokens[Position];
+ }
+
+ void reset() { Position = -1; }
+
+private:
+ int successor(int Current) const {
+ int Next = Current + 1;
+ auto it = Jumps.find(Next);
+ if (it != Jumps.end()) {
+ Next = it->second;
+ assert(!Jumps.contains(Next));
+ }
+ return Next;
+ }
+
+ void dbgToken(int Position, llvm::StringRef Indent = "") {
+ FormatToken *Tok = Tokens[Position];
+ llvm::dbgs() << Indent << "[" << Position
+ << "] Token: " << Tok->Tok.getName() << " / " << Tok->TokenText
+ << ", Macro: " << !!Tok->MacroCtx << "\n";
+ }
+
+ SmallVector<FormatToken *> Tokens;
+ int Position;
+
+ // Maps from position a to position b, so that when we reach a, the token
+ // stream continues at position b instead.
+ llvm::DenseMap<int, int> Jumps;
+};
+
+class ScopedMacroState : public FormatTokenSource {
+public:
+ ScopedMacroState(UnwrappedLine &Line, FormatTokenSource *&TokenSource,
+ FormatToken *&ResetToken)
+ : Line(Line), TokenSource(TokenSource), ResetToken(ResetToken),
+ PreviousLineLevel(Line.Level), PreviousTokenSource(TokenSource),
+ Token(nullptr), PreviousToken(nullptr) {
+ FakeEOF.Tok.startToken();
+ FakeEOF.Tok.setKind(tok::eof);
+ TokenSource = this;
+ Line.Level = 0;
+ Line.InPPDirective = true;
+ // InMacroBody gets set after the `#define x` part.
+ }
+
+ ~ScopedMacroState() override {
+ TokenSource = PreviousTokenSource;
+ ResetToken = Token;
+ Line.InPPDirective = false;
+ Line.InMacroBody = false;
+ Line.Level = PreviousLineLevel;
+ }
+
+ FormatToken *getNextToken() override {
+ // The \c UnwrappedLineParser guards against this by never calling
+ // \c getNextToken() after it has encountered the first eof token.
+ assert(!eof());
+ PreviousToken = Token;
+ Token = PreviousTokenSource->getNextToken();
+ if (eof())
+ return &FakeEOF;
+ return Token;
+ }
+
+ FormatToken *getPreviousToken() override {
+ return PreviousTokenSource->getPreviousToken();
+ }
+
+ FormatToken *peekNextToken(bool SkipComment) override {
+ if (eof())
+ return &FakeEOF;
+ return PreviousTokenSource->peekNextToken(SkipComment);
+ }
+
+ bool isEOF() override { return PreviousTokenSource->isEOF(); }
+
+ unsigned getPosition() override { return PreviousTokenSource->getPosition(); }
+
+ FormatToken *setPosition(unsigned Position) override {
+ PreviousToken = nullptr;
+ Token = PreviousTokenSource->setPosition(Position);
+ return Token;
+ }
+
+ FormatToken *insertTokens(ArrayRef<FormatToken *> Tokens) override {
+ llvm_unreachable("Cannot insert tokens while parsing a macro.");
+ return nullptr;
+ }
+
+private:
+ bool eof() {
+ return Token && Token->HasUnescapedNewline &&
+ !continuesLineComment(*Token, PreviousToken,
+ /*MinColumnToken=*/PreviousToken);
+ }
+
+ FormatToken FakeEOF;
+ UnwrappedLine &Line;
+ FormatTokenSource *&TokenSource;
+ FormatToken *&ResetToken;
+ unsigned PreviousLineLevel;
+ FormatTokenSource *PreviousTokenSource;
+
+ FormatToken *Token;
+ FormatToken *PreviousToken;
+};
+
+} // namespace format
+} // namespace clang
+
+#undef DEBUG_TYPE
+
+#endif
diff --git a/contrib/llvm-project/clang/lib/Format/IntegerLiteralSeparatorFixer.cpp b/contrib/llvm-project/clang/lib/Format/IntegerLiteralSeparatorFixer.cpp
new file mode 100644
index 000000000000..87823ae32b11
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Format/IntegerLiteralSeparatorFixer.cpp
@@ -0,0 +1,221 @@
+//===--- IntegerLiteralSeparatorFixer.cpp -----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements IntegerLiteralSeparatorFixer that fixes C++ integer
+/// literal separators.
+///
+//===----------------------------------------------------------------------===//
+
+#include "IntegerLiteralSeparatorFixer.h"
+
+namespace clang {
+namespace format {
+
+enum class Base { Binary, Decimal, Hex, Other };
+
+static Base getBase(const StringRef IntegerLiteral) {
+ assert(IntegerLiteral.size() > 1);
+
+ if (IntegerLiteral[0] > '0') {
+ assert(IntegerLiteral[0] <= '9');
+ return Base::Decimal;
+ }
+
+ assert(IntegerLiteral[0] == '0');
+
+ switch (IntegerLiteral[1]) {
+ case 'b':
+ case 'B':
+ return Base::Binary;
+ case 'x':
+ case 'X':
+ return Base::Hex;
+ default:
+ return Base::Other;
+ }
+}
+
+std::pair<tooling::Replacements, unsigned>
+IntegerLiteralSeparatorFixer::process(const Environment &Env,
+ const FormatStyle &Style) {
+ switch (Style.Language) {
+ case FormatStyle::LK_Cpp:
+ case FormatStyle::LK_ObjC:
+ Separator = '\'';
+ break;
+ case FormatStyle::LK_CSharp:
+ case FormatStyle::LK_Java:
+ case FormatStyle::LK_JavaScript:
+ Separator = '_';
+ break;
+ default:
+ return {};
+ }
+
+ const auto &Option = Style.IntegerLiteralSeparator;
+ const auto Binary = Option.Binary;
+ const auto Decimal = Option.Decimal;
+ const auto Hex = Option.Hex;
+ const bool SkipBinary = Binary == 0;
+ const bool SkipDecimal = Decimal == 0;
+ const bool SkipHex = Hex == 0;
+
+ if (SkipBinary && SkipDecimal && SkipHex)
+ return {};
+
+ const auto BinaryMinDigits =
+ std::max((int)Option.BinaryMinDigits, Binary + 1);
+ const auto DecimalMinDigits =
+ std::max((int)Option.DecimalMinDigits, Decimal + 1);
+ const auto HexMinDigits = std::max((int)Option.HexMinDigits, Hex + 1);
+
+ const auto &SourceMgr = Env.getSourceManager();
+ AffectedRangeManager AffectedRangeMgr(SourceMgr, Env.getCharRanges());
+
+ const auto ID = Env.getFileID();
+ const auto LangOpts = getFormattingLangOpts(Style);
+ Lexer Lex(ID, SourceMgr.getBufferOrFake(ID), SourceMgr, LangOpts);
+ Lex.SetCommentRetentionState(true);
+
+ Token Tok;
+ tooling::Replacements Result;
+
+ for (bool Skip = false; !Lex.LexFromRawLexer(Tok);) {
+ auto Length = Tok.getLength();
+ if (Length < 2)
+ continue;
+ auto Location = Tok.getLocation();
+ auto Text = StringRef(SourceMgr.getCharacterData(Location), Length);
+ if (Tok.is(tok::comment)) {
+ if (isClangFormatOff(Text))
+ Skip = true;
+ else if (isClangFormatOn(Text))
+ Skip = false;
+ continue;
+ }
+ if (Skip || Tok.isNot(tok::numeric_constant) || Text[0] == '.' ||
+ !AffectedRangeMgr.affectsCharSourceRange(
+ CharSourceRange::getCharRange(Location, Tok.getEndLoc()))) {
+ continue;
+ }
+ const auto B = getBase(Text);
+ const bool IsBase2 = B == Base::Binary;
+ const bool IsBase10 = B == Base::Decimal;
+ const bool IsBase16 = B == Base::Hex;
+ if ((IsBase2 && SkipBinary) || (IsBase10 && SkipDecimal) ||
+ (IsBase16 && SkipHex) || B == Base::Other) {
+ continue;
+ }
+ if (Style.isCpp()) {
+ // Hex alpha digits a-f/A-F must be at the end of the string literal.
+ StringRef Suffixes = "_himnsuyd";
+ if (const auto Pos =
+ Text.find_first_of(IsBase16 ? Suffixes.drop_back() : Suffixes);
+ Pos != StringRef::npos) {
+ Text = Text.substr(0, Pos);
+ Length = Pos;
+ }
+ }
+ if ((IsBase10 && Text.find_last_of(".eEfFdDmM") != StringRef::npos) ||
+ (IsBase16 && Text.find_last_of(".pP") != StringRef::npos)) {
+ continue;
+ }
+ const auto Start = Text[0] == '0' ? 2 : 0;
+ auto End = Text.find_first_of("uUlLzZn", Start);
+ if (End == StringRef::npos)
+ End = Length;
+ if (Start > 0 || End < Length) {
+ Length = End - Start;
+ Text = Text.substr(Start, Length);
+ }
+ auto DigitsPerGroup = Decimal;
+ auto MinDigits = DecimalMinDigits;
+ if (IsBase2) {
+ DigitsPerGroup = Binary;
+ MinDigits = BinaryMinDigits;
+ } else if (IsBase16) {
+ DigitsPerGroup = Hex;
+ MinDigits = HexMinDigits;
+ }
+ const auto SeparatorCount = Text.count(Separator);
+ const int DigitCount = Length - SeparatorCount;
+ const bool RemoveSeparator = DigitsPerGroup < 0 || DigitCount < MinDigits;
+ if (RemoveSeparator && SeparatorCount == 0)
+ continue;
+ if (!RemoveSeparator && SeparatorCount > 0 &&
+ checkSeparator(Text, DigitsPerGroup)) {
+ continue;
+ }
+ const auto &Formatted =
+ format(Text, DigitsPerGroup, DigitCount, RemoveSeparator);
+ assert(Formatted != Text);
+ if (Start > 0)
+ Location = Location.getLocWithOffset(Start);
+ cantFail(Result.add(
+ tooling::Replacement(SourceMgr, Location, Length, Formatted)));
+ }
+
+ return {Result, 0};
+}
+
+bool IntegerLiteralSeparatorFixer::checkSeparator(
+ const StringRef IntegerLiteral, int DigitsPerGroup) const {
+ assert(DigitsPerGroup > 0);
+
+ int I = 0;
+ for (auto C : llvm::reverse(IntegerLiteral)) {
+ if (C == Separator) {
+ if (I < DigitsPerGroup)
+ return false;
+ I = 0;
+ } else {
+ if (I == DigitsPerGroup)
+ return false;
+ ++I;
+ }
+ }
+
+ return true;
+}
+
+std::string IntegerLiteralSeparatorFixer::format(const StringRef IntegerLiteral,
+ int DigitsPerGroup,
+ int DigitCount,
+ bool RemoveSeparator) const {
+ assert(DigitsPerGroup != 0);
+
+ std::string Formatted;
+
+ if (RemoveSeparator) {
+ for (auto C : IntegerLiteral)
+ if (C != Separator)
+ Formatted.push_back(C);
+ return Formatted;
+ }
+
+ int Remainder = DigitCount % DigitsPerGroup;
+
+ int I = 0;
+ for (auto C : IntegerLiteral) {
+ if (C == Separator)
+ continue;
+ if (I == (Remainder > 0 ? Remainder : DigitsPerGroup)) {
+ Formatted.push_back(Separator);
+ I = 0;
+ Remainder = 0;
+ }
+ Formatted.push_back(C);
+ ++I;
+ }
+
+ return Formatted;
+}
+
+} // namespace format
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Format/IntegerLiteralSeparatorFixer.h b/contrib/llvm-project/clang/lib/Format/IntegerLiteralSeparatorFixer.h
new file mode 100644
index 000000000000..2c158e4473bf
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Format/IntegerLiteralSeparatorFixer.h
@@ -0,0 +1,39 @@
+//===--- IntegerLiteralSeparatorFixer.h -------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file declares IntegerLiteralSeparatorFixer that fixes C++ integer
+/// literal separators.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_FORMAT_INTEGERLITERALSEPARATORFIXER_H
+#define LLVM_CLANG_LIB_FORMAT_INTEGERLITERALSEPARATORFIXER_H
+
+#include "TokenAnalyzer.h"
+
+namespace clang {
+namespace format {
+
+class IntegerLiteralSeparatorFixer {
+public:
+ std::pair<tooling::Replacements, unsigned> process(const Environment &Env,
+ const FormatStyle &Style);
+
+private:
+ bool checkSeparator(const StringRef IntegerLiteral, int DigitsPerGroup) const;
+ std::string format(const StringRef IntegerLiteral, int DigitsPerGroup,
+ int DigitCount, bool RemoveSeparator) const;
+
+ char Separator;
+};
+
+} // end namespace format
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm-project/clang/lib/Format/MacroCallReconstructor.cpp b/contrib/llvm-project/clang/lib/Format/MacroCallReconstructor.cpp
new file mode 100644
index 000000000000..cbdd1683c54d
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Format/MacroCallReconstructor.cpp
@@ -0,0 +1,569 @@
+//===--- MacroCallReconstructor.cpp - Format C++ code -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file contains the implementation of MacroCallReconstructor, which fits
+/// an reconstructed macro call to a parsed set of UnwrappedLines.
+///
+//===----------------------------------------------------------------------===//
+
+#include "Macros.h"
+
+#include "UnwrappedLineParser.h"
+#include "clang/Basic/TokenKinds.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/Support/Debug.h"
+#include <cassert>
+
+#define DEBUG_TYPE "format-reconstruct"
+
+namespace clang {
+namespace format {
+
+// Call \p Call for each token in the unwrapped line given, passing
+// the token, its parent and whether it is the first token in the line.
+template <typename T>
+void forEachToken(const UnwrappedLine &Line, const T &Call,
+ FormatToken *Parent = nullptr) {
+ bool First = true;
+ for (const auto &N : Line.Tokens) {
+ Call(N.Tok, Parent, First);
+ First = false;
+ for (const auto &Child : N.Children)
+ forEachToken(Child, Call, N.Tok);
+ }
+}
+
+MacroCallReconstructor::MacroCallReconstructor(
+ unsigned Level,
+ const llvm::DenseMap<FormatToken *, std::unique_ptr<UnwrappedLine>>
+ &ActiveExpansions)
+ : Level(Level), IdToReconstructed(ActiveExpansions) {
+ Result.Tokens.push_back(std::make_unique<LineNode>());
+ ActiveReconstructedLines.push_back(&Result);
+}
+
+void MacroCallReconstructor::addLine(const UnwrappedLine &Line) {
+ assert(State != Finalized);
+ LLVM_DEBUG(llvm::dbgs() << "MCR: new line...\n");
+ forEachToken(Line, [&](FormatToken *Token, FormatToken *Parent, bool First) {
+ add(Token, Parent, First);
+ });
+ assert(InProgress || finished());
+}
+
+UnwrappedLine MacroCallReconstructor::takeResult() && {
+ finalize();
+ assert(Result.Tokens.size() == 1 &&
+ Result.Tokens.front()->Children.size() == 1);
+ UnwrappedLine Final =
+ createUnwrappedLine(*Result.Tokens.front()->Children.front(), Level);
+ assert(!Final.Tokens.empty());
+ return Final;
+}
+
+// Reconstruct the position of the next \p Token, given its parent \p
+// ExpandedParent in the incoming unwrapped line. \p First specifies whether it
+// is the first token in a given unwrapped line.
+void MacroCallReconstructor::add(FormatToken *Token,
+ FormatToken *ExpandedParent, bool First) {
+ LLVM_DEBUG(
+ llvm::dbgs() << "MCR: Token: " << Token->TokenText << ", Parent: "
+ << (ExpandedParent ? ExpandedParent->TokenText : "<null>")
+ << ", First: " << First << "\n");
+ // In order to be able to find the correct parent in the reconstructed token
+ // stream, we need to continue the last open reconstruction until we find the
+ // given token if it is part of the reconstructed token stream.
+ //
+ // Note that hidden tokens can be part of the reconstructed stream in nested
+ // macro calls.
+ // For example, given
+ // #define C(x, y) x y
+ // #define B(x) {x}
+ // And the call:
+ // C(a, B(b))
+ // The outer macro call will be C(a, {b}), and the hidden token '}' can be
+ // found in the reconstructed token stream of that expansion level.
+ // In the expanded token stream
+ // a {b}
+ // 'b' is a child of '{'. We need to continue the open expansion of the ','
+ // in the call of 'C' in order to correctly set the ',' as the parent of '{',
+ // so we later set the spelled token 'b' as a child of the ','.
+ if (!ActiveExpansions.empty() && Token->MacroCtx &&
+ (Token->MacroCtx->Role != MR_Hidden ||
+ ActiveExpansions.size() != Token->MacroCtx->ExpandedFrom.size())) {
+ if (/*PassedMacroComma = */ reconstructActiveCallUntil(Token))
+ First = true;
+ }
+
+ prepareParent(ExpandedParent, First);
+
+ if (Token->MacroCtx) {
+ // If this token was generated by a macro call, add the reconstructed
+ // equivalent of the token.
+ reconstruct(Token);
+ } else {
+ // Otherwise, we add it to the current line.
+ appendToken(Token);
+ }
+}
+
+// Adjusts the stack of active reconstructed lines so we're ready to push
+// tokens. The tokens to be pushed are children of ExpandedParent in the
+// expanded code.
+//
+// This may entail:
+// - creating a new line, if the parent is on the active line
+// - popping active lines, if the parent is further up the stack
+//
+// Postcondition:
+// ActiveReconstructedLines.back() is the line that has \p ExpandedParent or its
+// reconstructed replacement token as a parent (when possible) - that is, the
+// last token in \c ActiveReconstructedLines[ActiveReconstructedLines.size()-2]
+// is the parent of ActiveReconstructedLines.back() in the reconstructed
+// unwrapped line.
+void MacroCallReconstructor::prepareParent(FormatToken *ExpandedParent,
+ bool NewLine) {
+ LLVM_DEBUG({
+ llvm::dbgs() << "ParentMap:\n";
+ debugParentMap();
+ });
+ // We want to find the parent in the new unwrapped line, where the expanded
+ // parent might have been replaced during reconstruction.
+ FormatToken *Parent = getParentInResult(ExpandedParent);
+ LLVM_DEBUG(llvm::dbgs() << "MCR: New parent: "
+ << (Parent ? Parent->TokenText : "<null>") << "\n");
+
+ FormatToken *OpenMacroParent = nullptr;
+ if (!MacroCallStructure.empty()) {
+ // Inside a macro expansion, it is possible to lose track of the correct
+ // parent - either because it is already popped, for example because it was
+ // in a different macro argument (e.g. M({, })), or when we work on invalid
+ // code.
+ // Thus, we use the innermost macro call's parent as the parent at which
+ // we stop; this allows us to stay within the macro expansion and keeps
+ // any problems confined to the extent of the macro call.
+ OpenMacroParent =
+ getParentInResult(MacroCallStructure.back().MacroCallLParen);
+ LLVM_DEBUG(llvm::dbgs()
+ << "MacroCallLParen: "
+ << MacroCallStructure.back().MacroCallLParen->TokenText
+ << ", OpenMacroParent: "
+ << (OpenMacroParent ? OpenMacroParent->TokenText : "<null>")
+ << "\n");
+ }
+ if (NewLine ||
+ (!ActiveReconstructedLines.back()->Tokens.empty() &&
+ Parent == ActiveReconstructedLines.back()->Tokens.back()->Tok)) {
+ // If we are at the first token in a new line, we want to also
+ // create a new line in the resulting reconstructed unwrapped line.
+ while (ActiveReconstructedLines.back()->Tokens.empty() ||
+ (Parent != ActiveReconstructedLines.back()->Tokens.back()->Tok &&
+ ActiveReconstructedLines.back()->Tokens.back()->Tok !=
+ OpenMacroParent)) {
+ ActiveReconstructedLines.pop_back();
+ assert(!ActiveReconstructedLines.empty());
+ }
+ assert(!ActiveReconstructedLines.empty());
+ ActiveReconstructedLines.back()->Tokens.back()->Children.push_back(
+ std::make_unique<ReconstructedLine>());
+ ActiveReconstructedLines.push_back(
+ &*ActiveReconstructedLines.back()->Tokens.back()->Children.back());
+ } else if (parentLine().Tokens.back()->Tok != Parent) {
+ // If we're not the first token in a new line, pop lines until we find
+ // the child of \c Parent in the stack.
+ while (Parent != parentLine().Tokens.back()->Tok &&
+ parentLine().Tokens.back()->Tok &&
+ parentLine().Tokens.back()->Tok != OpenMacroParent) {
+ ActiveReconstructedLines.pop_back();
+ assert(!ActiveReconstructedLines.empty());
+ }
+ }
+ assert(!ActiveReconstructedLines.empty());
+}
+
+// For a given \p Parent in the incoming expanded token stream, find the
+// corresponding parent in the output.
+FormatToken *MacroCallReconstructor::getParentInResult(FormatToken *Parent) {
+ FormatToken *Mapped = SpelledParentToReconstructedParent.lookup(Parent);
+ if (!Mapped)
+ return Parent;
+ for (; Mapped; Mapped = SpelledParentToReconstructedParent.lookup(Parent))
+ Parent = Mapped;
+ // If we use a different token than the parent in the expanded token stream
+ // as parent, mark it as a special parent, so the formatting code knows it
+ // needs to have its children formatted.
+ Parent->MacroParent = true;
+ return Parent;
+}
+
+// Reconstruct a \p Token that was expanded from a macro call.
+void MacroCallReconstructor::reconstruct(FormatToken *Token) {
+ assert(Token->MacroCtx);
+ // A single token can be the only result of a macro call:
+ // Given: #define ID(x, y) ;
+ // And the call: ID(<some>, <tokens>)
+ // ';' in the expanded stream will reconstruct all of ID(<some>, <tokens>).
+ if (Token->MacroCtx->StartOfExpansion) {
+ startReconstruction(Token);
+ // If the order of tokens in the expanded token stream is not the
+ // same as the order of tokens in the reconstructed stream, we need
+ // to reconstruct tokens that arrive later in the stream.
+ if (Token->MacroCtx->Role != MR_Hidden)
+ reconstructActiveCallUntil(Token);
+ }
+ assert(!ActiveExpansions.empty());
+ if (ActiveExpansions.back().SpelledI != ActiveExpansions.back().SpelledE) {
+ assert(ActiveExpansions.size() == Token->MacroCtx->ExpandedFrom.size());
+ if (Token->MacroCtx->Role != MR_Hidden) {
+ // The current token in the reconstructed token stream must be the token
+ // we're looking for - we either arrive here after startReconstruction,
+ // which initiates the stream to the first token, or after
+ // continueReconstructionUntil skipped until the expected token in the
+ // reconstructed stream at the start of add(...).
+ assert(ActiveExpansions.back().SpelledI->Tok == Token);
+ processNextReconstructed();
+ } else if (!currentLine()->Tokens.empty()) {
+ // Map all hidden tokens to the last visible token in the output.
+ // If the hidden token is a parent, we'll use the last visible
+ // token as the parent of the hidden token's children.
+ SpelledParentToReconstructedParent[Token] =
+ currentLine()->Tokens.back()->Tok;
+ } else {
+ for (auto I = ActiveReconstructedLines.rbegin(),
+ E = ActiveReconstructedLines.rend();
+ I != E; ++I) {
+ if (!(*I)->Tokens.empty()) {
+ SpelledParentToReconstructedParent[Token] = (*I)->Tokens.back()->Tok;
+ break;
+ }
+ }
+ }
+ }
+ if (Token->MacroCtx->EndOfExpansion)
+ endReconstruction(Token);
+}
+
+// Given a \p Token that starts an expansion, reconstruct the beginning of the
+// macro call.
+// For example, given: #define ID(x) x
+// And the call: ID(int a)
+// Reconstructs: ID(
+void MacroCallReconstructor::startReconstruction(FormatToken *Token) {
+ assert(Token->MacroCtx);
+ assert(!Token->MacroCtx->ExpandedFrom.empty());
+ assert(ActiveExpansions.size() <= Token->MacroCtx->ExpandedFrom.size());
+#ifndef NDEBUG
+ // Check that the token's reconstruction stack matches our current
+ // reconstruction stack.
+ for (size_t I = 0; I < ActiveExpansions.size(); ++I) {
+ assert(ActiveExpansions[I].ID ==
+ Token->MacroCtx
+ ->ExpandedFrom[Token->MacroCtx->ExpandedFrom.size() - 1 - I]);
+ }
+#endif
+ // Start reconstruction for all calls for which this token is the first token
+ // generated by the call.
+ // Note that the token's expanded from stack is inside-to-outside, and the
+ // expansions for which this token is not the first are the outermost ones.
+ ArrayRef<FormatToken *> StartedMacros =
+ ArrayRef(Token->MacroCtx->ExpandedFrom)
+ .drop_back(ActiveExpansions.size());
+ assert(StartedMacros.size() == Token->MacroCtx->StartOfExpansion);
+ // We reconstruct macro calls outside-to-inside.
+ for (FormatToken *ID : llvm::reverse(StartedMacros)) {
+ // We found a macro call to be reconstructed; the next time our
+ // reconstruction stack is empty we know we finished an reconstruction.
+#ifndef NDEBUG
+ State = InProgress;
+#endif
+ // Put the reconstructed macro call's token into our reconstruction stack.
+ auto IU = IdToReconstructed.find(ID);
+ assert(IU != IdToReconstructed.end());
+ ActiveExpansions.push_back(
+ {ID, IU->second->Tokens.begin(), IU->second->Tokens.end()});
+ // Process the macro call's identifier.
+ processNextReconstructed();
+ if (ActiveExpansions.back().SpelledI == ActiveExpansions.back().SpelledE)
+ continue;
+ if (ActiveExpansions.back().SpelledI->Tok->is(tok::l_paren)) {
+ // Process the optional opening parenthesis.
+ processNextReconstructed();
+ }
+ }
+}
+
+// Add all tokens in the reconstruction stream to the output until we find the
+// given \p Token.
+bool MacroCallReconstructor::reconstructActiveCallUntil(FormatToken *Token) {
+ assert(!ActiveExpansions.empty());
+ bool PassedMacroComma = false;
+ // FIXME: If Token was already expanded earlier, due to
+ // a change in order, we will not find it, but need to
+ // skip it.
+ while (ActiveExpansions.back().SpelledI != ActiveExpansions.back().SpelledE &&
+ ActiveExpansions.back().SpelledI->Tok != Token) {
+ PassedMacroComma = processNextReconstructed() || PassedMacroComma;
+ }
+ return PassedMacroComma;
+}
+
+// End all reconstructions for which \p Token is the final token.
+void MacroCallReconstructor::endReconstruction(FormatToken *Token) {
+ assert(Token->MacroCtx &&
+ (ActiveExpansions.size() >= Token->MacroCtx->EndOfExpansion));
+ for (size_t I = 0; I < Token->MacroCtx->EndOfExpansion; ++I) {
+ LLVM_DEBUG([&] {
+ // Check all remaining tokens but the final closing parenthesis and
+ // optional trailing comment were already reconstructed at an inner
+ // expansion level.
+ for (auto T = ActiveExpansions.back().SpelledI;
+ T != ActiveExpansions.back().SpelledE; ++T) {
+ FormatToken *Token = T->Tok;
+ bool ClosingParen = (std::next(T) == ActiveExpansions.back().SpelledE ||
+ std::next(T)->Tok->isTrailingComment()) &&
+ !Token->MacroCtx && Token->is(tok::r_paren);
+ bool TrailingComment = Token->isTrailingComment();
+ bool PreviousLevel =
+ Token->MacroCtx &&
+ (ActiveExpansions.size() < Token->MacroCtx->ExpandedFrom.size());
+ if (!ClosingParen && !TrailingComment && !PreviousLevel)
+ llvm::dbgs() << "At token: " << Token->TokenText << "\n";
+ // In addition to the following cases, we can also run into this
+ // when a macro call had more arguments than expected; in that case,
+ // the comma and the remaining tokens in the macro call will
+ // potentially end up in the line when we finish the expansion.
+ // FIXME: Add the information which arguments are unused, and assert
+ // one of the cases below plus reconstructed macro argument tokens.
+ // assert(ClosingParen || TrailingComment || PreviousLevel);
+ }
+ }());
+ // Handle the remaining open tokens:
+ // - expand the closing parenthesis, if it exists, including an optional
+ // trailing comment
+ // - handle tokens that were already reconstructed at an inner expansion
+ // level
+ // - handle tokens when a macro call had more than the expected number of
+ // arguments, i.e. when #define M(x) is called as M(a, b, c) we'll end
+ // up with the sequence ", b, c)" being open at the end of the
+ // reconstruction; we want to gracefully handle that case
+ //
+ // FIXME: See the above debug-check for what we will need to do to be
+ // able to assert this.
+ for (auto T = ActiveExpansions.back().SpelledI;
+ T != ActiveExpansions.back().SpelledE; ++T) {
+ processNextReconstructed();
+ }
+ ActiveExpansions.pop_back();
+ }
+}
+
+void MacroCallReconstructor::debugParentMap() const {
+ llvm::DenseSet<FormatToken *> Values;
+ for (const auto &P : SpelledParentToReconstructedParent)
+ Values.insert(P.second);
+
+ for (const auto &P : SpelledParentToReconstructedParent) {
+ if (Values.contains(P.first))
+ continue;
+ llvm::dbgs() << (P.first ? P.first->TokenText : "<null>");
+ for (auto I = SpelledParentToReconstructedParent.find(P.first),
+ E = SpelledParentToReconstructedParent.end();
+ I != E; I = SpelledParentToReconstructedParent.find(I->second)) {
+ llvm::dbgs() << " -> " << (I->second ? I->second->TokenText : "<null>");
+ }
+ llvm::dbgs() << "\n";
+ }
+}
+
+// If visible, add the next token of the reconstructed token sequence to the
+// output. Returns whether reconstruction passed a comma that is part of a
+// macro call.
+bool MacroCallReconstructor::processNextReconstructed() {
+ FormatToken *Token = ActiveExpansions.back().SpelledI->Tok;
+ ++ActiveExpansions.back().SpelledI;
+ if (Token->MacroCtx) {
+ // Skip tokens that are not part of the macro call.
+ if (Token->MacroCtx->Role == MR_Hidden)
+ return false;
+ // Skip tokens we already expanded during an inner reconstruction.
+ // For example, given: #define ID(x) {x}
+ // And the call: ID(ID(f))
+ // We get two reconstructions:
+ // ID(f) -> {f}
+ // ID({f}) -> {{f}}
+ // We reconstruct f during the first reconstruction, and skip it during the
+ // second reconstruction.
+ if (ActiveExpansions.size() < Token->MacroCtx->ExpandedFrom.size())
+ return false;
+ }
+ // Tokens that do not have a macro context are tokens in that are part of the
+ // macro call that have not taken part in expansion.
+ if (!Token->MacroCtx) {
+ // Put the parentheses and commas of a macro call into the same line;
+ // if the arguments produce new unwrapped lines, they will become children
+ // of the corresponding opening parenthesis or comma tokens in the
+ // reconstructed call.
+ if (Token->is(tok::l_paren)) {
+ MacroCallStructure.push_back(MacroCallState(
+ currentLine(), parentLine().Tokens.back()->Tok, Token));
+ // All tokens that are children of the previous line's last token in the
+ // reconstructed token stream will now be children of the l_paren token.
+ // For example, for the line containing the macro calls:
+ // auto x = ID({ID(2)});
+ // We will build up a map <null> -> ( -> ( with the first and second
+ // l_paren of the macro call respectively. New lines that come in with a
+ // <null> parent will then become children of the l_paren token of the
+ // currently innermost macro call.
+ SpelledParentToReconstructedParent[MacroCallStructure.back()
+ .ParentLastToken] = Token;
+ appendToken(Token);
+ prepareParent(Token, /*NewLine=*/true);
+ Token->MacroParent = true;
+ return false;
+ }
+ if (!MacroCallStructure.empty()) {
+ if (Token->is(tok::comma)) {
+ // Make new lines inside the next argument children of the comma token.
+ SpelledParentToReconstructedParent
+ [MacroCallStructure.back().Line->Tokens.back()->Tok] = Token;
+ Token->MacroParent = true;
+ appendToken(Token, MacroCallStructure.back().Line);
+ prepareParent(Token, /*NewLine=*/true);
+ return true;
+ }
+ if (Token->is(tok::r_paren)) {
+ appendToken(Token, MacroCallStructure.back().Line);
+ SpelledParentToReconstructedParent.erase(
+ MacroCallStructure.back().ParentLastToken);
+ MacroCallStructure.pop_back();
+ return false;
+ }
+ }
+ }
+ // Note that any tokens that are tagged with MR_None have been passed as
+ // arguments to the macro that have not been expanded, for example:
+ // Given: #define ID(X) x
+ // When calling: ID(a, b)
+ // 'b' will be part of the reconstructed token stream, but tagged MR_None.
+ // Given that erroring out in this case would be disruptive, we continue
+ // pushing the (unformatted) token.
+ // FIXME: This can lead to unfortunate formatting decisions - give the user
+ // a hint that their macro definition is broken.
+ appendToken(Token);
+ return false;
+}
+
+void MacroCallReconstructor::finalize() {
+#ifndef NDEBUG
+ assert(State != Finalized && finished());
+ State = Finalized;
+#endif
+
+ // We created corresponding unwrapped lines for each incoming line as children
+ // the the toplevel null token.
+ assert(Result.Tokens.size() == 1 && !Result.Tokens.front()->Children.empty());
+ LLVM_DEBUG({
+ llvm::dbgs() << "Finalizing reconstructed lines:\n";
+ debug(Result, 0);
+ });
+
+ // The first line becomes the top level line in the resulting unwrapped line.
+ LineNode &Top = *Result.Tokens.front();
+ auto *I = Top.Children.begin();
+ // Every subsequent line will become a child of the last token in the previous
+ // line, which is the token prior to the first token in the line.
+ LineNode *Last = (*I)->Tokens.back().get();
+ ++I;
+ for (auto *E = Top.Children.end(); I != E; ++I) {
+ assert(Last->Children.empty());
+ Last->Children.push_back(std::move(*I));
+
+ // Mark the previous line's last token as generated by a macro expansion
+ // so the formatting algorithm can take that into account.
+ Last->Tok->MacroParent = true;
+
+ Last = Last->Children.back()->Tokens.back().get();
+ }
+ Top.Children.resize(1);
+}
+
+void MacroCallReconstructor::appendToken(FormatToken *Token,
+ ReconstructedLine *L) {
+ L = L ? L : currentLine();
+ LLVM_DEBUG(llvm::dbgs() << "-> " << Token->TokenText << "\n");
+ L->Tokens.push_back(std::make_unique<LineNode>(Token));
+}
+
+UnwrappedLine
+MacroCallReconstructor::createUnwrappedLine(const ReconstructedLine &Line,
+ int Level) {
+ UnwrappedLine Result;
+ Result.Level = Level;
+ for (const auto &N : Line.Tokens) {
+ Result.Tokens.push_back(N->Tok);
+ UnwrappedLineNode &Current = Result.Tokens.back();
+ for (const auto &Child : N->Children) {
+ if (Child->Tokens.empty())
+ continue;
+ Current.Children.push_back(createUnwrappedLine(*Child, Level + 1));
+ }
+ if (Current.Children.size() == 1 &&
+ Current.Tok->isOneOf(tok::l_paren, tok::comma)) {
+ Result.Tokens.splice(Result.Tokens.end(),
+ Current.Children.front().Tokens);
+ Current.Children.clear();
+ }
+ }
+ return Result;
+}
+
+void MacroCallReconstructor::debug(const ReconstructedLine &Line, int Level) {
+ for (int i = 0; i < Level; ++i)
+ llvm::dbgs() << " ";
+ for (const auto &N : Line.Tokens) {
+ if (!N)
+ continue;
+ if (N->Tok)
+ llvm::dbgs() << N->Tok->TokenText << " ";
+ for (const auto &Child : N->Children) {
+ llvm::dbgs() << "\n";
+ debug(*Child, Level + 1);
+ for (int i = 0; i < Level; ++i)
+ llvm::dbgs() << " ";
+ }
+ }
+ llvm::dbgs() << "\n";
+}
+
+MacroCallReconstructor::ReconstructedLine &
+MacroCallReconstructor::parentLine() {
+ return **std::prev(std::prev(ActiveReconstructedLines.end()));
+}
+
+MacroCallReconstructor::ReconstructedLine *
+MacroCallReconstructor::currentLine() {
+ return ActiveReconstructedLines.back();
+}
+
+MacroCallReconstructor::MacroCallState::MacroCallState(
+ MacroCallReconstructor::ReconstructedLine *Line,
+ FormatToken *ParentLastToken, FormatToken *MacroCallLParen)
+ : Line(Line), ParentLastToken(ParentLastToken),
+ MacroCallLParen(MacroCallLParen) {
+ LLVM_DEBUG(
+ llvm::dbgs() << "ParentLastToken: "
+ << (ParentLastToken ? ParentLastToken->TokenText : "<null>")
+ << "\n");
+
+ assert(MacroCallLParen->is(tok::l_paren));
+}
+
+} // namespace format
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Format/MacroExpander.cpp b/contrib/llvm-project/clang/lib/Format/MacroExpander.cpp
index e50c80446963..5a1cdd884c5e 100644
--- a/contrib/llvm-project/clang/lib/Format/MacroExpander.cpp
+++ b/contrib/llvm-project/clang/lib/Format/MacroExpander.cpp
@@ -1,9 +1,8 @@
//===--- MacroExpander.cpp - Format C++ code --------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
@@ -53,10 +52,10 @@ public:
Current = Tokens[0];
}
- // Parse the token stream and return the corresonding Definition object.
+ // Parse the token stream and return the corresponding Definition object.
// Returns an empty definition object with a null-Name on error.
MacroExpander::Definition parse() {
- if (!Current->is(tok::identifier))
+ if (Current->isNot(tok::identifier))
return {};
Def.Name = Current->TokenText;
nextToken();
@@ -126,9 +125,8 @@ MacroExpander::MacroExpander(
IdentifierTable &IdentTable)
: SourceMgr(SourceMgr), Style(Style), Allocator(Allocator),
IdentTable(IdentTable) {
- for (const std::string &Macro : Macros) {
+ for (const std::string &Macro : Macros)
parseDefinition(Macro);
- }
}
MacroExpander::~MacroExpander() = default;
@@ -143,24 +141,42 @@ void MacroExpander::parseDefinition(const std::string &Macro) {
if (!Tokens.empty()) {
DefinitionParser Parser(Tokens);
auto Definition = Parser.parse();
- Definitions[Definition.Name] = std::move(Definition);
+ if (Definition.ObjectLike) {
+ ObjectLike[Definition.Name] = std::move(Definition);
+ } else {
+ FunctionLike[Definition.Name][Definition.Params.size()] =
+ std::move(Definition);
+ }
}
}
bool MacroExpander::defined(llvm::StringRef Name) const {
- return Definitions.find(Name) != Definitions.end();
+ return FunctionLike.contains(Name) || ObjectLike.contains(Name);
}
bool MacroExpander::objectLike(llvm::StringRef Name) const {
- return Definitions.find(Name)->second.ObjectLike;
+ return ObjectLike.contains(Name);
}
-llvm::SmallVector<FormatToken *, 8> MacroExpander::expand(FormatToken *ID,
- ArgsList Args) const {
- assert(defined(ID->TokenText));
- SmallVector<FormatToken *, 8> Result;
- const Definition &Def = Definitions.find(ID->TokenText)->second;
+bool MacroExpander::hasArity(llvm::StringRef Name, unsigned Arity) const {
+ auto it = FunctionLike.find(Name);
+ return it != FunctionLike.end() && it->second.contains(Arity);
+}
+llvm::SmallVector<FormatToken *, 8>
+MacroExpander::expand(FormatToken *ID,
+ std::optional<ArgsList> OptionalArgs) const {
+ if (OptionalArgs)
+ assert(hasArity(ID->TokenText, OptionalArgs->size()));
+ else
+ assert(objectLike(ID->TokenText));
+ const Definition &Def = OptionalArgs
+ ? FunctionLike.find(ID->TokenText)
+ ->second.find(OptionalArgs.value().size())
+ ->second
+ : ObjectLike.find(ID->TokenText)->second;
+ ArgsList Args = OptionalArgs ? OptionalArgs.value() : ArgsList();
+ SmallVector<FormatToken *, 8> Result;
// Expand each argument at most once.
llvm::StringSet<> ExpandedArgs;
@@ -175,7 +191,7 @@ llvm::SmallVector<FormatToken *, 8> MacroExpander::expand(FormatToken *ID,
auto expandArgument = [&](FormatToken *Tok) -> bool {
// If the current token references a parameter, expand the corresponding
// argument.
- if (!Tok->is(tok::identifier) || ExpandedArgs.contains(Tok->TokenText))
+ if (Tok->isNot(tok::identifier) || ExpandedArgs.contains(Tok->TokenText))
return false;
ExpandedArgs.insert(Tok->TokenText);
auto I = Def.ArgMap.find(Tok->TokenText);
diff --git a/contrib/llvm-project/clang/lib/Format/Macros.h b/contrib/llvm-project/clang/lib/Format/Macros.h
index 591ef8b5be3c..1964624e828c 100644
--- a/contrib/llvm-project/clang/lib/Format/Macros.h
+++ b/contrib/llvm-project/clang/lib/Format/Macros.h
@@ -1,9 +1,8 @@
-//===--- MacroExpander.h - Format C++ code ----------------------*- C++ -*-===//
+//===--- Macros.h - Format C++ code -----------------------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
@@ -23,40 +22,38 @@
/// spelled token streams into expanded token streams when it encounters a
/// macro call. The UnwrappedLineParser continues to parse UnwrappedLines
/// from the expanded token stream.
-/// After the expanded unwrapped lines are parsed, the MacroUnexpander matches
-/// the spelled token stream into unwrapped lines that best resemble the
-/// structure of the expanded unwrapped lines.
+/// After the expanded unwrapped lines are parsed, the MacroCallReconstructor
+/// matches the spelled token stream into unwrapped lines that best resemble the
+/// structure of the expanded unwrapped lines. These reconstructed unwrapped
+/// lines are aliasing the tokens in the expanded token stream, so that token
+/// annotations will be reused when formatting the spelled macro calls.
///
-/// When formatting, clang-format formats the expanded unwrapped lines first,
-/// determining the token types. Next, it formats the spelled unwrapped lines,
-/// keeping the token types fixed, while allowing other formatting decisions
-/// to change.
+/// When formatting, clang-format annotates and formats the expanded unwrapped
+/// lines first, determining the token types. Next, it formats the spelled
+/// unwrapped lines, keeping the token types fixed, while allowing other
+/// formatting decisions to change.
///
//===----------------------------------------------------------------------===//
#ifndef CLANG_LIB_FORMAT_MACROS_H
#define CLANG_LIB_FORMAT_MACROS_H
+#include <list>
+#include <map>
#include <string>
-#include <unordered_map>
#include <vector>
-#include "Encoding.h"
#include "FormatToken.h"
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
-namespace llvm {
-class MemoryBuffer;
-} // namespace llvm
-
namespace clang {
-class IdentifierTable;
-class SourceManager;
-
namespace format {
-struct FormatStyle;
+
+struct UnwrappedLine;
+struct UnwrappedLineNode;
/// Takes a set of macro definitions as strings and allows expanding calls to
/// those macros.
@@ -109,17 +106,23 @@ public:
IdentifierTable &IdentTable);
~MacroExpander();
- /// Returns whether a macro \p Name is defined.
+ /// Returns whether any macro \p Name is defined, regardless of overloads.
bool defined(llvm::StringRef Name) const;
- /// Returns whether the macro has no arguments and should not consume
- /// subsequent parentheses.
+ /// Returns whetherh there is an object-like overload, i.e. where the macro
+ /// has no arguments and should not consume subsequent parentheses.
bool objectLike(llvm::StringRef Name) const;
+ /// Returns whether macro \p Name provides an overload with the given arity.
+ bool hasArity(llvm::StringRef Name, unsigned Arity) const;
+
/// Returns the expanded stream of format tokens for \p ID, where
/// each element in \p Args is a positional argument to the macro call.
- llvm::SmallVector<FormatToken *, 8> expand(FormatToken *ID,
- ArgsList Args) const;
+ /// If \p Args is not set, the object-like overload is used.
+ /// If \p Args is set, the overload with the arity equal to \c Args.size() is
+ /// used.
+ llvm::SmallVector<FormatToken *, 8>
+ expand(FormatToken *ID, std::optional<ArgsList> OptionalArgs) const;
private:
struct Definition;
@@ -131,8 +134,252 @@ private:
const FormatStyle &Style;
llvm::SpecificBumpPtrAllocator<FormatToken> &Allocator;
IdentifierTable &IdentTable;
- std::vector<std::unique_ptr<llvm::MemoryBuffer>> Buffers;
- llvm::StringMap<Definition> Definitions;
+ SmallVector<std::unique_ptr<llvm::MemoryBuffer>> Buffers;
+ llvm::StringMap<llvm::DenseMap<int, Definition>> FunctionLike;
+ llvm::StringMap<Definition> ObjectLike;
+};
+
+/// Converts a sequence of UnwrappedLines containing expanded macros into a
+/// single UnwrappedLine containing the macro calls. This UnwrappedLine may be
+/// broken into child lines, in a way that best conveys the structure of the
+/// expanded code.
+///
+/// In the simplest case, a spelled UnwrappedLine contains one macro, and after
+/// expanding it we have one expanded UnwrappedLine. In general, macro
+/// expansions can span UnwrappedLines, and multiple macros can contribute
+/// tokens to the same line. We keep consuming expanded lines until:
+/// * all expansions that started have finished (we're not chopping any macros
+/// in half)
+/// * *and* we've reached the end of a *spelled* unwrapped line.
+///
+/// A single UnwrappedLine represents this chunk of code.
+///
+/// After this point, the state of the spelled/expanded stream is "in sync"
+/// (both at the start of an UnwrappedLine, with no macros open), so the
+/// Reconstructor can be thrown away and parsing can continue.
+///
+/// Given a mapping from the macro name identifier token in the macro call
+/// to the tokens of the macro call, for example:
+/// CLASSA -> CLASSA({public: void x();})
+///
+/// When getting the formatted lines of the expansion via the \c addLine method
+/// (each '->' specifies a call to \c addLine ):
+/// -> class A {
+/// -> public:
+/// -> void x();
+/// -> };
+///
+/// Creates the tree of unwrapped lines containing the macro call tokens so that
+/// the macro call tokens fit the semantic structure of the expanded formatted
+/// lines:
+/// -> CLASSA({
+/// -> public:
+/// -> void x();
+/// -> })
+class MacroCallReconstructor {
+public:
+ /// Create an Reconstructor whose resulting \p UnwrappedLine will start at
+ /// \p Level, using the map from name identifier token to the corresponding
+ /// tokens of the spelled macro call.
+ MacroCallReconstructor(
+ unsigned Level,
+ const llvm::DenseMap<FormatToken *, std::unique_ptr<UnwrappedLine>>
+ &ActiveExpansions);
+
+ /// For the given \p Line, match all occurences of tokens expanded from a
+ /// macro to unwrapped lines in the spelled macro call so that the resulting
+ /// tree of unwrapped lines best resembles the structure of unwrapped lines
+ /// passed in via \c addLine.
+ void addLine(const UnwrappedLine &Line);
+
+ /// Check whether at the current state there is no open macro expansion
+ /// that needs to be processed to finish an macro call.
+ /// Only when \c finished() is true, \c takeResult() can be called to retrieve
+ /// the resulting \c UnwrappedLine.
+ /// If there are multiple subsequent macro calls within an unwrapped line in
+ /// the spelled token stream, the calling code may also continue to call
+ /// \c addLine() when \c finished() is true.
+ bool finished() const { return ActiveExpansions.empty(); }
+
+ /// Retrieve the formatted \c UnwrappedLine containing the orginal
+ /// macro calls, formatted according to the expanded token stream received
+ /// via \c addLine().
+ /// Generally, this line tries to have the same structure as the expanded,
+ /// formatted unwrapped lines handed in via \c addLine(), with the exception
+ /// that for multiple top-level lines, each subsequent line will be the
+ /// child of the last token in its predecessor. This representation is chosen
+ /// because it is a precondition to the formatter that we get what looks like
+ /// a single statement in a single \c UnwrappedLine (i.e. matching parens).
+ ///
+ /// If a token in a macro argument is a child of a token in the expansion,
+ /// the parent will be the corresponding token in the macro call.
+ /// For example:
+ /// #define C(a, b) class C { a b
+ /// C(int x;, int y;)
+ /// would expand to
+ /// class C { int x; int y;
+ /// where in a formatted line "int x;" and "int y;" would both be new separate
+ /// lines.
+ ///
+ /// In the result, "int x;" will be a child of the opening parenthesis in "C("
+ /// and "int y;" will be a child of the "," token:
+ /// C (
+ /// \- int x;
+ /// ,
+ /// \- int y;
+ /// )
+ UnwrappedLine takeResult() &&;
+
+private:
+ void add(FormatToken *Token, FormatToken *ExpandedParent, bool First);
+ void prepareParent(FormatToken *ExpandedParent, bool First);
+ FormatToken *getParentInResult(FormatToken *Parent);
+ void reconstruct(FormatToken *Token);
+ void startReconstruction(FormatToken *Token);
+ bool reconstructActiveCallUntil(FormatToken *Token);
+ void endReconstruction(FormatToken *Token);
+ bool processNextReconstructed();
+ void finalize();
+
+ struct ReconstructedLine;
+
+ void appendToken(FormatToken *Token, ReconstructedLine *L = nullptr);
+ UnwrappedLine createUnwrappedLine(const ReconstructedLine &Line, int Level);
+ void debug(const ReconstructedLine &Line, int Level);
+ ReconstructedLine &parentLine();
+ ReconstructedLine *currentLine();
+ void debugParentMap() const;
+
+#ifndef NDEBUG
+ enum ReconstructorState {
+ Start, // No macro expansion was found in the input yet.
+ InProgress, // During a macro reconstruction.
+ Finalized, // Past macro reconstruction, the result is finalized.
+ };
+ ReconstructorState State = Start;
+#endif
+
+ // Node in which we build up the resulting unwrapped line; this type is
+ // analogous to UnwrappedLineNode.
+ struct LineNode {
+ LineNode() = default;
+ LineNode(FormatToken *Tok) : Tok(Tok) {}
+ FormatToken *Tok = nullptr;
+ llvm::SmallVector<std::unique_ptr<ReconstructedLine>> Children;
+ };
+
+ // Line in which we build up the resulting unwrapped line.
+ // FIXME: Investigate changing UnwrappedLine to a pointer type and using it
+ // instead of rolling our own type.
+ struct ReconstructedLine {
+ llvm::SmallVector<std::unique_ptr<LineNode>> Tokens;
+ };
+
+ // The line in which we collect the resulting reconstructed output.
+ // To reduce special cases in the algorithm, the first level of the line
+ // contains a single null token that has the reconstructed incoming
+ // lines as children.
+ // In the end, we stich the lines together so that each subsequent line
+ // is a child of the last token of the previous line. This is necessary
+ // in order to format the overall expression as a single logical line -
+ // if we created separate lines, we'd format them with their own top-level
+ // indent depending on the semantic structure, which is not desired.
+ ReconstructedLine Result;
+
+ // Stack of currently "open" lines, where each line's predecessor's last
+ // token is the parent token for that line.
+ llvm::SmallVector<ReconstructedLine *> ActiveReconstructedLines;
+
+ // Maps from the expanded token to the token that takes its place in the
+ // reconstructed token stream in terms of parent-child relationships.
+ // Note that it might take multiple steps to arrive at the correct
+ // parent in the output.
+ // Given: #define C(a, b) []() { a; b; }
+ // And a call: C(f(), g())
+ // The structure in the incoming formatted unwrapped line will be:
+ // []() {
+ // |- f();
+ // \- g();
+ // }
+ // with f and g being children of the opening brace.
+ // In the reconstructed call:
+ // C(f(), g())
+ // \- f()
+ // \- g()
+ // We want f to be a child of the opening parenthesis and g to be a child
+ // of the comma token in the macro call.
+ // Thus, we map
+ // { -> (
+ // and add
+ // ( -> ,
+ // once we're past the comma in the reconstruction.
+ llvm::DenseMap<FormatToken *, FormatToken *>
+ SpelledParentToReconstructedParent;
+
+ // Keeps track of a single expansion while we're reconstructing tokens it
+ // generated.
+ struct Expansion {
+ // The identifier token of the macro call.
+ FormatToken *ID;
+ // Our current position in the reconstruction.
+ std::list<UnwrappedLineNode>::iterator SpelledI;
+ // The end of the reconstructed token sequence.
+ std::list<UnwrappedLineNode>::iterator SpelledE;
+ };
+
+ // Stack of macro calls for which we're in the middle of an expansion.
+ llvm::SmallVector<Expansion> ActiveExpansions;
+
+ struct MacroCallState {
+ MacroCallState(ReconstructedLine *Line, FormatToken *ParentLastToken,
+ FormatToken *MacroCallLParen);
+
+ ReconstructedLine *Line;
+
+ // The last token in the parent line or expansion, or nullptr if the macro
+ // expansion is on a top-level line.
+ //
+ // For example, in the macro call:
+ // auto f = []() { ID(1); };
+ // The MacroCallState for ID will have '{' as ParentLastToken.
+ //
+ // In the macro call:
+ // ID(ID(void f()));
+ // The MacroCallState of the outer ID will have nullptr as ParentLastToken,
+ // while the MacroCallState for the inner ID will have the '(' of the outer
+ // ID as ParentLastToken.
+ //
+ // In the macro call:
+ // ID2(a, ID(b));
+ // The MacroCallState of ID will have ',' as ParentLastToken.
+ FormatToken *ParentLastToken;
+
+ // The l_paren of this MacroCallState's macro call.
+ FormatToken *MacroCallLParen;
+ };
+
+ // Keeps track of the lines into which the opening brace/parenthesis &
+ // argument separating commas for each level in the macro call go in order to
+ // put the corresponding closing brace/parenthesis into the same line in the
+ // output and keep track of which parents in the expanded token stream map to
+ // which tokens in the reconstructed stream.
+ // When an opening brace/parenthesis has children, we want the structure of
+ // the output line to be:
+ // |- MACRO
+ // |- (
+ // | \- <argument>
+ // |- ,
+ // | \- <argument>
+ // \- )
+ llvm::SmallVector<MacroCallState> MacroCallStructure;
+
+ // Level the generated UnwrappedLine will be at.
+ const unsigned Level;
+
+ // Maps from identifier of the macro call to an unwrapped line containing
+ // all tokens of the macro call.
+ const llvm::DenseMap<FormatToken *, std::unique_ptr<UnwrappedLine>>
+ &IdToReconstructed;
};
} // namespace format
diff --git a/contrib/llvm-project/clang/lib/Format/MatchFilePath.cpp b/contrib/llvm-project/clang/lib/Format/MatchFilePath.cpp
new file mode 100644
index 000000000000..062b334dcdd8
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Format/MatchFilePath.cpp
@@ -0,0 +1,122 @@
+//===--- MatchFilePath.cpp - Match file path with pattern -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements the functionality of matching a file path name to
+/// a pattern, similar to the POSIX fnmatch() function.
+///
+//===----------------------------------------------------------------------===//
+
+#include "MatchFilePath.h"
+
+using namespace llvm;
+
+namespace clang {
+namespace format {
+
+// Check whether `FilePath` matches `Pattern` based on POSIX 2.13.1, 2.13.2, and
+// Rule 1 of 2.13.3.
+bool matchFilePath(StringRef Pattern, StringRef FilePath) {
+ assert(!Pattern.empty());
+ assert(!FilePath.empty());
+
+ // No match if `Pattern` ends with a non-meta character not equal to the last
+ // character of `FilePath`.
+ if (const auto C = Pattern.back(); !strchr("?*]", C) && C != FilePath.back())
+ return false;
+
+ constexpr auto Separator = '/';
+ const auto EOP = Pattern.size(); // End of `Pattern`.
+ const auto End = FilePath.size(); // End of `FilePath`.
+ unsigned I = 0; // Index to `Pattern`.
+
+ for (unsigned J = 0; J < End; ++J) {
+ if (I == EOP)
+ return false;
+
+ switch (const auto F = FilePath[J]; Pattern[I]) {
+ case '\\':
+ if (++I == EOP || F != Pattern[I])
+ return false;
+ break;
+ case '?':
+ if (F == Separator)
+ return false;
+ break;
+ case '*': {
+ while (++I < EOP && Pattern[I] == '*') { // Skip consecutive stars.
+ }
+ const auto K = FilePath.find(Separator, J); // Index of next `Separator`.
+ const bool NoMoreSeparatorsInFilePath = K == StringRef::npos;
+ if (I == EOP) // `Pattern` ends with a star.
+ return NoMoreSeparatorsInFilePath;
+ // `Pattern` ends with a lone backslash.
+ if (Pattern[I] == '\\' && ++I == EOP)
+ return false;
+ // The star is followed by a (possibly escaped) `Separator`.
+ if (Pattern[I] == Separator) {
+ if (NoMoreSeparatorsInFilePath)
+ return false;
+ J = K; // Skip to next `Separator` in `FilePath`.
+ break;
+ }
+ // Recurse.
+ for (auto Pat = Pattern.substr(I); J < End && FilePath[J] != Separator;
+ ++J) {
+ if (matchFilePath(Pat, FilePath.substr(J)))
+ return true;
+ }
+ return false;
+ }
+ case '[':
+ // Skip e.g. `[!]`.
+ if (I + 3 < EOP || (I + 3 == EOP && Pattern[I + 1] != '!')) {
+ // Skip unpaired `[`, brackets containing slashes, and `[]`.
+ if (const auto K = Pattern.find_first_of("]/", I + 1);
+ K != StringRef::npos && Pattern[K] == ']' && K > I + 1) {
+ if (F == Separator)
+ return false;
+ ++I; // After the `[`.
+ bool Negated = false;
+ if (Pattern[I] == '!') {
+ Negated = true;
+ ++I; // After the `!`.
+ }
+ bool Match = false;
+ do {
+ if (I + 2 < K && Pattern[I + 1] == '-') {
+ Match = Pattern[I] <= F && F <= Pattern[I + 2];
+ I += 3; // After the range, e.g. `A-Z`.
+ } else {
+ Match = F == Pattern[I++];
+ }
+ } while (!Match && I < K);
+ if (Negated ? Match : !Match)
+ return false;
+ I = K + 1; // After the `]`.
+ continue;
+ }
+ }
+ [[fallthrough]]; // Match `[` literally.
+ default:
+ if (F != Pattern[I])
+ return false;
+ }
+
+ ++I;
+ }
+
+ // Match trailing stars with null strings.
+ while (I < EOP && Pattern[I] == '*')
+ ++I;
+
+ return I == EOP;
+}
+
+} // namespace format
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Format/MatchFilePath.h b/contrib/llvm-project/clang/lib/Format/MatchFilePath.h
new file mode 100644
index 000000000000..482dab7c748e
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Format/MatchFilePath.h
@@ -0,0 +1,22 @@
+//===--- MatchFilePath.h ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_FORMAT_MATCHFILEPATH_H
+#define LLVM_CLANG_LIB_FORMAT_MATCHFILEPATH_H
+
+#include "llvm/ADT/StringRef.h"
+
+namespace clang {
+namespace format {
+
+bool matchFilePath(llvm::StringRef Pattern, llvm::StringRef FilePath);
+
+} // end namespace format
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm-project/clang/lib/Format/NamespaceEndCommentsFixer.cpp b/contrib/llvm-project/clang/lib/Format/NamespaceEndCommentsFixer.cpp
index def551f863cd..08f8d6840fe0 100644
--- a/contrib/llvm-project/clang/lib/Format/NamespaceEndCommentsFixer.cpp
+++ b/contrib/llvm-project/clang/lib/Format/NamespaceEndCommentsFixer.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "NamespaceEndCommentsFixer.h"
+#include "clang/Basic/TokenKinds.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Regex.h"
@@ -22,13 +23,47 @@ namespace clang {
namespace format {
namespace {
+// Iterates all tokens starting from StartTok to EndTok and apply Fn to all
+// tokens between them including StartTok and EndTok. Returns the token after
+// EndTok.
+const FormatToken *
+processTokens(const FormatToken *Tok, tok::TokenKind StartTok,
+ tok::TokenKind EndTok,
+ llvm::function_ref<void(const FormatToken *)> Fn) {
+ if (!Tok || Tok->isNot(StartTok))
+ return Tok;
+ int NestLevel = 0;
+ do {
+ if (Tok->is(StartTok))
+ ++NestLevel;
+ else if (Tok->is(EndTok))
+ --NestLevel;
+ if (Fn)
+ Fn(Tok);
+ Tok = Tok->getNextNonComment();
+ } while (Tok && NestLevel > 0);
+ return Tok;
+}
+
+const FormatToken *skipAttribute(const FormatToken *Tok) {
+ if (!Tok)
+ return nullptr;
+ if (Tok->isAttribute()) {
+ Tok = Tok->getNextNonComment();
+ Tok = processTokens(Tok, tok::l_paren, tok::r_paren, nullptr);
+ } else if (Tok->is(tok::l_square)) {
+ Tok = processTokens(Tok, tok::l_square, tok::r_square, nullptr);
+ }
+ return Tok;
+}
+
// Computes the name of a namespace given the namespace token.
// Returns "" for anonymous namespace.
std::string computeName(const FormatToken *NamespaceTok) {
assert(NamespaceTok &&
NamespaceTok->isOneOf(tok::kw_namespace, TT_NamespaceMacro) &&
"expecting a namespace token");
- std::string name = "";
+ std::string name;
const FormatToken *Tok = NamespaceTok->getNextNonComment();
if (NamespaceTok->is(TT_NamespaceMacro)) {
// Collects all the non-comment tokens between opening parenthesis
@@ -39,26 +74,69 @@ std::string computeName(const FormatToken *NamespaceTok) {
name += Tok->TokenText;
Tok = Tok->getNextNonComment();
}
- } else {
- // For `namespace [[foo]] A::B::inline C {` or
- // `namespace MACRO1 MACRO2 A::B::inline C {`, returns "A::B::inline C".
- // Peek for the first '::' (or '{') and then return all tokens from one
- // token before that up until the '{'.
- const FormatToken *FirstNSTok = Tok;
- while (Tok && !Tok->is(tok::l_brace) && !Tok->is(tok::coloncolon)) {
- FirstNSTok = Tok;
- Tok = Tok->getNextNonComment();
- }
+ return name;
+ }
+ Tok = skipAttribute(Tok);
+
+ std::string FirstNSName;
+ // For `namespace [[foo]] A::B::inline C {` or
+ // `namespace MACRO1 MACRO2 A::B::inline C {`, returns "A::B::inline C".
+ // Peek for the first '::' (or '{' or '(')) and then return all tokens from
+ // one token before that up until the '{'. A '(' might be a macro with
+ // arguments.
+ const FormatToken *FirstNSTok = nullptr;
+ while (Tok && !Tok->isOneOf(tok::l_brace, tok::coloncolon, tok::l_paren)) {
+ if (FirstNSTok)
+ FirstNSName += FirstNSTok->TokenText;
+ FirstNSTok = Tok;
+ Tok = Tok->getNextNonComment();
+ }
+ if (FirstNSTok)
Tok = FirstNSTok;
- while (Tok && !Tok->is(tok::l_brace)) {
- name += Tok->TokenText;
- if (Tok->is(tok::kw_inline))
+ Tok = skipAttribute(Tok);
+
+ FirstNSTok = nullptr;
+ // Add everything from '(' to ')'.
+ auto AddToken = [&name](const FormatToken *Tok) { name += Tok->TokenText; };
+ bool IsPrevColoncolon = false;
+ bool HasColoncolon = false;
+ bool IsPrevInline = false;
+ bool NameFinished = false;
+ // If we found '::' in name, then it's the name. Otherwise, we can't tell
+ // which one is name. For example, `namespace A B {`.
+ while (Tok && Tok->isNot(tok::l_brace)) {
+ if (FirstNSTok) {
+ if (!IsPrevInline && HasColoncolon && !IsPrevColoncolon) {
+ if (FirstNSTok->is(tok::l_paren)) {
+ FirstNSTok = Tok =
+ processTokens(FirstNSTok, tok::l_paren, tok::r_paren, AddToken);
+ continue;
+ }
+ if (FirstNSTok->isNot(tok::coloncolon)) {
+ NameFinished = true;
+ break;
+ }
+ }
+ name += FirstNSTok->TokenText;
+ IsPrevColoncolon = FirstNSTok->is(tok::coloncolon);
+ HasColoncolon = HasColoncolon || IsPrevColoncolon;
+ if (FirstNSTok->is(tok::kw_inline)) {
name += " ";
- Tok = Tok->getNextNonComment();
+ IsPrevInline = true;
+ }
}
+ FirstNSTok = Tok;
+ Tok = Tok->getNextNonComment();
+ const FormatToken *TokAfterAttr = skipAttribute(Tok);
+ if (TokAfterAttr != Tok)
+ FirstNSTok = Tok = TokAfterAttr;
}
- return name;
+ if (!NameFinished && FirstNSTok && FirstNSTok->isNot(tok::l_brace))
+ name += FirstNSTok->TokenText;
+ if (FirstNSName.empty() || HasColoncolon)
+ return name;
+ return name.empty() ? FirstNSName : FirstNSName + " " + name;
}
std::string computeEndCommentText(StringRef NamespaceName, bool AddNewline,
@@ -92,11 +170,11 @@ bool validEndComment(const FormatToken *RBraceTok, StringRef NamespaceName,
// Valid namespace end comments don't need to be edited.
static const llvm::Regex NamespaceCommentPattern =
llvm::Regex("^/[/*] *(end (of )?)? *(anonymous|unnamed)? *"
- "namespace( +([a-zA-Z0-9:_]+))?\\.? *(\\*/)?$",
+ "namespace( +([a-zA-Z0-9:_ ]+))?\\.? *(\\*/)?$",
llvm::Regex::IgnoreCase);
static const llvm::Regex NamespaceMacroCommentPattern =
llvm::Regex("^/[/*] *(end (of )?)? *(anonymous|unnamed)? *"
- "([a-zA-Z0-9_]+)\\(([a-zA-Z0-9:_]*)\\)\\.? *(\\*/)?$",
+ "([a-zA-Z0-9_]+)\\(([a-zA-Z0-9:_]*|\".+\")\\)\\.? *(\\*/)?$",
llvm::Regex::IgnoreCase);
SmallVector<StringRef, 8> Groups;
@@ -111,7 +189,7 @@ bool validEndComment(const FormatToken *RBraceTok, StringRef NamespaceName,
// Comment does not match regex.
return false;
}
- StringRef NamespaceNameInComment = Groups.size() > 5 ? Groups[5] : "";
+ StringRef NamespaceNameInComment = Groups.size() > 5 ? Groups[5].rtrim() : "";
// Anonymous namespace comments must not mention a namespace name.
if (NamespaceName.empty() && !NamespaceNameInComment.empty())
return false;
@@ -132,12 +210,11 @@ bool validEndComment(const FormatToken *RBraceTok, StringRef NamespaceName,
"^/[/*] *( +([a-zA-Z0-9:_]+))?\\.? *(\\*/)?$", llvm::Regex::IgnoreCase);
// Pull out just the comment text.
- if (!CommentPattern.match(Comment->Next->TokenText, &Groups)) {
+ if (!CommentPattern.match(Comment->Next->TokenText, &Groups))
return false;
- }
NamespaceNameInComment = Groups.size() > 2 ? Groups[2] : "";
- return (NamespaceNameInComment == NamespaceName);
+ return NamespaceNameInComment == NamespaceName;
}
void addEndComment(const FormatToken *RBraceTok, StringRef EndCommentText,
@@ -180,9 +257,13 @@ getNamespaceToken(const AnnotatedLine *Line,
if (NamespaceTok->is(tok::l_brace)) {
// "namespace" keyword can be on the line preceding '{', e.g. in styles
// where BraceWrapping.AfterNamespace is true.
- if (StartLineIndex > 0)
+ if (StartLineIndex > 0) {
NamespaceTok = AnnotatedLines[StartLineIndex - 1]->First;
+ if (AnnotatedLines[StartLineIndex - 1]->endsWith(tok::semi))
+ return nullptr;
+ }
}
+
return NamespaceTok->getNamespaceToken();
}
@@ -206,8 +287,8 @@ std::pair<tooling::Replacements, unsigned> NamespaceEndCommentsFixer::analyze(
// Spin through the lines and ensure we have balanced braces.
int Braces = 0;
- for (size_t I = 0, E = AnnotatedLines.size(); I != E; ++I) {
- FormatToken *Tok = AnnotatedLines[I]->First;
+ for (AnnotatedLine *Line : AnnotatedLines) {
+ FormatToken *Tok = Line->First;
while (Tok) {
Braces += Tok->is(tok::l_brace) ? 1 : Tok->is(tok::r_brace) ? -1 : 0;
Tok = Tok->Next;
@@ -216,11 +297,10 @@ std::pair<tooling::Replacements, unsigned> NamespaceEndCommentsFixer::analyze(
// Don't attempt to comment unbalanced braces or this can
// lead to comments being placed on the closing brace which isn't
// the matching brace of the namespace. (occurs during incomplete editing).
- if (Braces != 0) {
+ if (Braces != 0)
return {Fixes, 0};
- }
- std::string AllNamespaceNames = "";
+ std::string AllNamespaceNames;
size_t StartLineIndex = SIZE_MAX;
StringRef NamespaceTokenText;
unsigned int CompactedNamespacesCount = 0;
@@ -237,9 +317,8 @@ std::pair<tooling::Replacements, unsigned> NamespaceEndCommentsFixer::analyze(
const FormatToken *EndCommentPrevTok = RBraceTok;
// Namespaces often end with '};'. In that case, attach namespace end
// comments to the semicolon tokens.
- if (RBraceTok->Next && RBraceTok->Next->is(tok::semi)) {
+ if (RBraceTok->Next && RBraceTok->Next->is(tok::semi))
EndCommentPrevTok = RBraceTok->Next;
- }
if (StartLineIndex == SIZE_MAX)
StartLineIndex = EndLine->MatchingOpeningBlockLineIndex;
std::string NamespaceName = computeName(NamespaceTok);
@@ -256,8 +335,9 @@ std::pair<tooling::Replacements, unsigned> NamespaceEndCommentsFixer::analyze(
// remove end comment, it will be merged in next one
updateEndComment(EndCommentPrevTok, std::string(), SourceMgr, &Fixes);
}
- CompactedNamespacesCount++;
- AllNamespaceNames = "::" + NamespaceName + AllNamespaceNames;
+ ++CompactedNamespacesCount;
+ if (!NamespaceName.empty())
+ AllNamespaceNames = "::" + NamespaceName + AllNamespaceNames;
continue;
}
NamespaceName += AllNamespaceNames;
@@ -279,9 +359,15 @@ std::pair<tooling::Replacements, unsigned> NamespaceEndCommentsFixer::analyze(
computeEndCommentText(NamespaceName, AddNewline, NamespaceTok,
Style.SpacesInLineCommentPrefix.Minimum);
if (!hasEndComment(EndCommentPrevTok)) {
- bool isShort = I - StartLineIndex <= Style.ShortNamespaceLines + 1;
- if (!isShort)
- addEndComment(EndCommentPrevTok, EndCommentText, SourceMgr, &Fixes);
+ unsigned LineCount = 0;
+ for (auto J = StartLineIndex + 1; J < I; ++J)
+ LineCount += AnnotatedLines[J]->size();
+ if (LineCount > Style.ShortNamespaceLines) {
+ addEndComment(EndCommentPrevTok,
+ std::string(Style.SpacesBeforeTrailingComments, ' ') +
+ EndCommentText,
+ SourceMgr, &Fixes);
+ }
} else if (!validEndComment(EndCommentPrevTok, NamespaceName,
NamespaceTok)) {
updateEndComment(EndCommentPrevTok, EndCommentText, SourceMgr, &Fixes);
diff --git a/contrib/llvm-project/clang/lib/Format/ObjCPropertyAttributeOrderFixer.cpp b/contrib/llvm-project/clang/lib/Format/ObjCPropertyAttributeOrderFixer.cpp
new file mode 100644
index 000000000000..c91d6251425e
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Format/ObjCPropertyAttributeOrderFixer.cpp
@@ -0,0 +1,220 @@
+//===--- ObjCPropertyAttributeOrderFixer.cpp -------------------*- C++--*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements ObjCPropertyAttributeOrderFixer, a TokenAnalyzer that
+/// adjusts the order of attributes in an ObjC `@property(...)` declaration,
+/// depending on the style.
+///
+//===----------------------------------------------------------------------===//
+
+#include "ObjCPropertyAttributeOrderFixer.h"
+
+#include <algorithm>
+
+namespace clang {
+namespace format {
+
+ObjCPropertyAttributeOrderFixer::ObjCPropertyAttributeOrderFixer(
+ const Environment &Env, const FormatStyle &Style)
+ : TokenAnalyzer(Env, Style) {
+ // Create an "order priority" map to use to sort properties.
+ unsigned Index = 0;
+ for (const auto &Property : Style.ObjCPropertyAttributeOrder)
+ SortOrderMap[Property] = Index++;
+}
+
+struct ObjCPropertyEntry {
+ StringRef Attribute; // eg, `readwrite`
+ StringRef Value; // eg, the `foo` of the attribute `getter=foo`
+};
+
+void ObjCPropertyAttributeOrderFixer::sortPropertyAttributes(
+ const SourceManager &SourceMgr, tooling::Replacements &Fixes,
+ const FormatToken *BeginTok, const FormatToken *EndTok) {
+ assert(BeginTok);
+ assert(EndTok);
+ assert(EndTok->Previous);
+
+ // If there are zero or one tokens, nothing to do.
+ if (BeginTok == EndTok || BeginTok->Next == EndTok)
+ return;
+
+ // Use a set to sort attributes and remove duplicates.
+ std::set<unsigned> Ordinals;
+
+ // Create a "remapping index" on how to reorder the attributes.
+ SmallVector<int> Indices;
+
+ // Collect the attributes.
+ SmallVector<ObjCPropertyEntry> PropertyAttributes;
+ bool HasDuplicates = false;
+ int Index = 0;
+ for (auto Tok = BeginTok; Tok != EndTok; Tok = Tok->Next) {
+ assert(Tok);
+ if (Tok->is(tok::comma)) {
+ // Ignore the comma separators.
+ continue;
+ }
+
+ // Most attributes look like identifiers, but `class` is a keyword.
+ if (!Tok->isOneOf(tok::identifier, tok::kw_class)) {
+ // If we hit any other kind of token, just bail.
+ return;
+ }
+
+ const StringRef Attribute{Tok->TokenText};
+ StringRef Value;
+
+ // Also handle `getter=getFoo` attributes.
+ // (Note: no check needed against `EndTok`, since its type is not
+ // BinaryOperator or Identifier)
+ assert(Tok->Next);
+ if (Tok->Next->is(tok::equal)) {
+ Tok = Tok->Next;
+ assert(Tok->Next);
+ if (Tok->Next->isNot(tok::identifier)) {
+ // If we hit any other kind of token, just bail. It's unusual/illegal.
+ return;
+ }
+ Tok = Tok->Next;
+ Value = Tok->TokenText;
+ }
+
+ auto It = SortOrderMap.find(Attribute);
+ if (It == SortOrderMap.end())
+ It = SortOrderMap.insert({Attribute, SortOrderMap.size()}).first;
+
+ // Sort the indices based on the priority stored in `SortOrderMap`.
+ const auto Ordinal = It->second;
+ if (!Ordinals.insert(Ordinal).second) {
+ HasDuplicates = true;
+ continue;
+ }
+
+ if (Ordinal >= Indices.size())
+ Indices.resize(Ordinal + 1);
+ Indices[Ordinal] = Index++;
+
+ // Memoize the attribute.
+ PropertyAttributes.push_back({Attribute, Value});
+ }
+
+ if (!HasDuplicates) {
+ // There's nothing to do unless there's more than one attribute.
+ if (PropertyAttributes.size() < 2)
+ return;
+
+ int PrevIndex = -1;
+ bool IsSorted = true;
+ for (const auto Ordinal : Ordinals) {
+ const auto Index = Indices[Ordinal];
+ if (Index < PrevIndex) {
+ IsSorted = false;
+ break;
+ }
+ assert(Index > PrevIndex);
+ PrevIndex = Index;
+ }
+
+ // If the property order is already correct, then no fix-up is needed.
+ if (IsSorted)
+ return;
+ }
+
+ // Generate the replacement text.
+ std::string NewText;
+ bool IsFirst = true;
+ for (const auto Ordinal : Ordinals) {
+ if (IsFirst)
+ IsFirst = false;
+ else
+ NewText += ", ";
+
+ const auto &PropertyEntry = PropertyAttributes[Indices[Ordinal]];
+ NewText += PropertyEntry.Attribute;
+
+ if (const auto Value = PropertyEntry.Value; !Value.empty()) {
+ NewText += '=';
+ NewText += Value;
+ }
+ }
+
+ auto Range = CharSourceRange::getCharRange(
+ BeginTok->getStartOfNonWhitespace(), EndTok->Previous->Tok.getEndLoc());
+ auto Replacement = tooling::Replacement(SourceMgr, Range, NewText);
+ auto Err = Fixes.add(Replacement);
+ if (Err) {
+ llvm::errs() << "Error while reodering ObjC property attributes : "
+ << llvm::toString(std::move(Err)) << "\n";
+ }
+}
+
+void ObjCPropertyAttributeOrderFixer::analyzeObjCPropertyDecl(
+ const SourceManager &SourceMgr, const AdditionalKeywords &Keywords,
+ tooling::Replacements &Fixes, const FormatToken *Tok) {
+ assert(Tok);
+
+ // Expect `property` to be the very next token or else just bail early.
+ const FormatToken *const PropertyTok = Tok->Next;
+ if (!PropertyTok || PropertyTok->isNot(Keywords.kw_property))
+ return;
+
+ // Expect the opening paren to be the next token or else just bail early.
+ const FormatToken *const LParenTok = PropertyTok->getNextNonComment();
+ if (!LParenTok || LParenTok->isNot(tok::l_paren))
+ return;
+
+ // Get the matching right-paren, the bounds for property attributes.
+ const FormatToken *const RParenTok = LParenTok->MatchingParen;
+ if (!RParenTok)
+ return;
+
+ sortPropertyAttributes(SourceMgr, Fixes, LParenTok->Next, RParenTok);
+}
+
+std::pair<tooling::Replacements, unsigned>
+ObjCPropertyAttributeOrderFixer::analyze(
+ TokenAnnotator & /*Annotator*/,
+ SmallVectorImpl<AnnotatedLine *> &AnnotatedLines,
+ FormatTokenLexer &Tokens) {
+ tooling::Replacements Fixes;
+ const AdditionalKeywords &Keywords = Tokens.getKeywords();
+ const SourceManager &SourceMgr = Env.getSourceManager();
+ AffectedRangeMgr.computeAffectedLines(AnnotatedLines);
+
+ for (AnnotatedLine *Line : AnnotatedLines) {
+ assert(Line);
+ if (!Line->Affected || Line->Type != LT_ObjCProperty)
+ continue;
+ FormatToken *First = Line->First;
+ assert(First);
+ if (First->Finalized)
+ continue;
+
+ const auto *Last = Line->Last;
+
+ for (const auto *Tok = First; Tok != Last; Tok = Tok->Next) {
+ assert(Tok);
+
+ // Skip until the `@` of a `@property` declaration.
+ if (Tok->isNot(TT_ObjCProperty))
+ continue;
+
+ analyzeObjCPropertyDecl(SourceMgr, Keywords, Fixes, Tok);
+
+ // There are never two `@property` in a line (they are split
+ // by other passes), so this pass can break after just one.
+ break;
+ }
+ }
+ return {Fixes, 0};
+}
+
+} // namespace format
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Format/ObjCPropertyAttributeOrderFixer.h b/contrib/llvm-project/clang/lib/Format/ObjCPropertyAttributeOrderFixer.h
new file mode 100644
index 000000000000..d9ce85d144af
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Format/ObjCPropertyAttributeOrderFixer.h
@@ -0,0 +1,51 @@
+//===--- ObjCPropertyAttributeOrderFixer.h ------------------------------*- C++
+//-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file declares ObjCPropertyAttributeOrderFixer, a TokenAnalyzer that
+/// adjusts the order of attributes in an ObjC `@property(...)` declaration,
+/// depending on the style.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_FORMAT_OBJCPROPERTYATTRIBUTEORDERFIXER_H
+#define LLVM_CLANG_LIB_FORMAT_OBJCPROPERTYATTRIBUTEORDERFIXER_H
+
+#include "TokenAnalyzer.h"
+
+namespace clang {
+namespace format {
+
+class ObjCPropertyAttributeOrderFixer : public TokenAnalyzer {
+ llvm::StringMap<unsigned> SortOrderMap;
+
+ void analyzeObjCPropertyDecl(const SourceManager &SourceMgr,
+ const AdditionalKeywords &Keywords,
+ tooling::Replacements &Fixes,
+ const FormatToken *Tok);
+
+ void sortPropertyAttributes(const SourceManager &SourceMgr,
+ tooling::Replacements &Fixes,
+ const FormatToken *BeginTok,
+ const FormatToken *EndTok);
+
+ std::pair<tooling::Replacements, unsigned>
+ analyze(TokenAnnotator &Annotator,
+ SmallVectorImpl<AnnotatedLine *> &AnnotatedLines,
+ FormatTokenLexer &Tokens) override;
+
+public:
+ ObjCPropertyAttributeOrderFixer(const Environment &Env,
+ const FormatStyle &Style);
+};
+
+} // end namespace format
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm-project/clang/lib/Format/QualifierAlignmentFixer.cpp b/contrib/llvm-project/clang/lib/Format/QualifierAlignmentFixer.cpp
new file mode 100644
index 000000000000..84941746f0df
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Format/QualifierAlignmentFixer.cpp
@@ -0,0 +1,639 @@
+//===--- QualifierAlignmentFixer.cpp ----------------------------*- C++--*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements QualifierAlignmentFixer, a TokenAnalyzer that
+/// enforces either left or right const depending on the style.
+///
+//===----------------------------------------------------------------------===//
+
+#include "QualifierAlignmentFixer.h"
+#include "FormatToken.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Regex.h"
+
+#include <algorithm>
+#include <optional>
+
+#define DEBUG_TYPE "format-qualifier-alignment-fixer"
+
+namespace clang {
+namespace format {
+
+void addQualifierAlignmentFixerPasses(const FormatStyle &Style,
+ SmallVectorImpl<AnalyzerPass> &Passes) {
+ std::vector<std::string> LeftOrder;
+ std::vector<std::string> RightOrder;
+ std::vector<tok::TokenKind> ConfiguredQualifierTokens;
+ prepareLeftRightOrderingForQualifierAlignmentFixer(
+ Style.QualifierOrder, LeftOrder, RightOrder, ConfiguredQualifierTokens);
+
+ // Handle the left and right alignment separately.
+ for (const auto &Qualifier : LeftOrder) {
+ Passes.emplace_back(
+ [&, Qualifier, ConfiguredQualifierTokens](const Environment &Env) {
+ return LeftRightQualifierAlignmentFixer(Env, Style, Qualifier,
+ ConfiguredQualifierTokens,
+ /*RightAlign=*/false)
+ .process();
+ });
+ }
+ for (const auto &Qualifier : RightOrder) {
+ Passes.emplace_back(
+ [&, Qualifier, ConfiguredQualifierTokens](const Environment &Env) {
+ return LeftRightQualifierAlignmentFixer(Env, Style, Qualifier,
+ ConfiguredQualifierTokens,
+ /*RightAlign=*/true)
+ .process();
+ });
+ }
+}
+
+static void replaceToken(const SourceManager &SourceMgr,
+ tooling::Replacements &Fixes,
+ const CharSourceRange &Range, std::string NewText) {
+ auto Replacement = tooling::Replacement(SourceMgr, Range, NewText);
+ auto Err = Fixes.add(Replacement);
+
+ if (Err) {
+ llvm::errs() << "Error while rearranging Qualifier : "
+ << llvm::toString(std::move(Err)) << "\n";
+ }
+}
+
+static void removeToken(const SourceManager &SourceMgr,
+ tooling::Replacements &Fixes,
+ const FormatToken *First) {
+ auto Range = CharSourceRange::getCharRange(First->getStartOfNonWhitespace(),
+ First->Tok.getEndLoc());
+ replaceToken(SourceMgr, Fixes, Range, "");
+}
+
+static void insertQualifierAfter(const SourceManager &SourceMgr,
+ tooling::Replacements &Fixes,
+ const FormatToken *First,
+ const std::string &Qualifier) {
+ auto Range = CharSourceRange::getCharRange(First->Tok.getLocation(),
+ First->Tok.getEndLoc());
+
+ std::string NewText{};
+ NewText += First->TokenText;
+ NewText += " " + Qualifier;
+ replaceToken(SourceMgr, Fixes, Range, NewText);
+}
+
+static void insertQualifierBefore(const SourceManager &SourceMgr,
+ tooling::Replacements &Fixes,
+ const FormatToken *First,
+ const std::string &Qualifier) {
+ auto Range = CharSourceRange::getCharRange(First->getStartOfNonWhitespace(),
+ First->Tok.getEndLoc());
+
+ std::string NewText = " " + Qualifier + " ";
+ NewText += First->TokenText;
+
+ replaceToken(SourceMgr, Fixes, Range, NewText);
+}
+
+static bool endsWithSpace(const std::string &s) {
+ if (s.empty())
+ return false;
+ return isspace(s.back());
+}
+
+static bool startsWithSpace(const std::string &s) {
+ if (s.empty())
+ return false;
+ return isspace(s.front());
+}
+
+static void rotateTokens(const SourceManager &SourceMgr,
+ tooling::Replacements &Fixes, const FormatToken *First,
+ const FormatToken *Last, bool Left) {
+ auto *End = Last;
+ auto *Begin = First;
+ if (!Left) {
+ End = Last->Next;
+ Begin = First->Next;
+ }
+
+ std::string NewText;
+ // If we are rotating to the left we move the Last token to the front.
+ if (Left) {
+ NewText += Last->TokenText;
+ NewText += " ";
+ }
+
+ // Then move through the other tokens.
+ auto *Tok = Begin;
+ while (Tok != End) {
+ if (!NewText.empty() && !endsWithSpace(NewText))
+ NewText += " ";
+
+ NewText += Tok->TokenText;
+ Tok = Tok->Next;
+ }
+
+ // If we are rotating to the right we move the first token to the back.
+ if (!Left) {
+ if (!NewText.empty() && !startsWithSpace(NewText))
+ NewText += " ";
+ NewText += First->TokenText;
+ }
+
+ auto Range = CharSourceRange::getCharRange(First->getStartOfNonWhitespace(),
+ Last->Tok.getEndLoc());
+
+ replaceToken(SourceMgr, Fixes, Range, NewText);
+}
+
+static bool
+isConfiguredQualifier(const FormatToken *const Tok,
+ const std::vector<tok::TokenKind> &Qualifiers) {
+ return Tok && llvm::is_contained(Qualifiers, Tok->Tok.getKind());
+}
+
+static bool isQualifier(const FormatToken *const Tok) {
+ if (!Tok)
+ return false;
+
+ switch (Tok->Tok.getKind()) {
+ case tok::kw_const:
+ case tok::kw_volatile:
+ case tok::kw_static:
+ case tok::kw_inline:
+ case tok::kw_constexpr:
+ case tok::kw_restrict:
+ case tok::kw_friend:
+ return true;
+ default:
+ return false;
+ }
+}
+
+const FormatToken *LeftRightQualifierAlignmentFixer::analyzeRight(
+ const SourceManager &SourceMgr, const AdditionalKeywords &Keywords,
+ tooling::Replacements &Fixes, const FormatToken *const Tok,
+ const std::string &Qualifier, tok::TokenKind QualifierType) {
+ // We only need to think about streams that begin with a qualifier.
+ if (Tok->isNot(QualifierType))
+ return Tok;
+ // Don't concern yourself if nothing follows the qualifier.
+ if (!Tok->Next)
+ return Tok;
+
+ // Skip qualifiers to the left to find what preceeds the qualifiers.
+ // Use isQualifier rather than isConfiguredQualifier to cover all qualifiers.
+ const FormatToken *PreviousCheck = Tok->getPreviousNonComment();
+ while (isQualifier(PreviousCheck))
+ PreviousCheck = PreviousCheck->getPreviousNonComment();
+
+ // Examples given in order of ['type', 'const', 'volatile']
+ const bool IsRightQualifier = PreviousCheck && [PreviousCheck]() {
+ // The cases:
+ // `Foo() const` -> `Foo() const`
+ // `Foo() const final` -> `Foo() const final`
+ // `Foo() const override` -> `Foo() const final`
+ // `Foo() const volatile override` -> `Foo() const volatile override`
+ // `Foo() volatile const final` -> `Foo() const volatile final`
+ if (PreviousCheck->is(tok::r_paren))
+ return true;
+
+ // The cases:
+ // `struct {} volatile const a;` -> `struct {} const volatile a;`
+ // `class {} volatile const a;` -> `class {} const volatile a;`
+ if (PreviousCheck->is(tok::r_brace))
+ return true;
+
+ // The case:
+ // `template <class T> const Bar Foo()` ->
+ // `template <class T> Bar const Foo()`
+ // The cases:
+ // `Foo<int> const foo` -> `Foo<int> const foo`
+ // `Foo<int> volatile const` -> `Foo<int> const volatile`
+ // The case:
+ // ```
+ // template <class T>
+ // requires Concept1<T> && requires Concept2<T>
+ // const Foo f();
+ // ```
+ // ->
+ // ```
+ // template <class T>
+ // requires Concept1<T> && requires Concept2<T>
+ // Foo const f();
+ // ```
+ if (PreviousCheck->is(TT_TemplateCloser)) {
+ // If the token closes a template<> or requires clause, then it is a left
+ // qualifier and should be moved to the right.
+ return !(PreviousCheck->ClosesTemplateDeclaration ||
+ PreviousCheck->ClosesRequiresClause);
+ }
+
+ // The case `Foo* const` -> `Foo* const`
+ // The case `Foo* volatile const` -> `Foo* const volatile`
+ // The case `int32_t const` -> `int32_t const`
+ // The case `auto volatile const` -> `auto const volatile`
+ if (PreviousCheck->isOneOf(TT_PointerOrReference, tok::identifier,
+ tok::kw_auto)) {
+ return true;
+ }
+
+ return false;
+ }();
+
+ // Find the last qualifier to the right.
+ const FormatToken *LastQual = Tok;
+ while (isQualifier(LastQual->getNextNonComment()))
+ LastQual = LastQual->getNextNonComment();
+
+ // If this qualifier is to the right of a type or pointer do a partial sort
+ // and return.
+ if (IsRightQualifier) {
+ if (LastQual != Tok)
+ rotateTokens(SourceMgr, Fixes, Tok, LastQual, /*Left=*/false);
+ return Tok;
+ }
+
+ const FormatToken *TypeToken = LastQual->getNextNonComment();
+ if (!TypeToken)
+ return Tok;
+
+ // Stay safe and don't move past macros, also don't bother with sorting.
+ if (isPossibleMacro(TypeToken))
+ return Tok;
+
+ // The case `const long long int volatile` -> `long long int const volatile`
+ // The case `long const long int volatile` -> `long long int const volatile`
+ // The case `long long volatile int const` -> `long long int const volatile`
+ // The case `const long long volatile int` -> `long long int const volatile`
+ if (TypeToken->isSimpleTypeSpecifier()) {
+ // The case `const decltype(foo)` -> `const decltype(foo)`
+ // The case `const typeof(foo)` -> `const typeof(foo)`
+ // The case `const _Atomic(foo)` -> `const _Atomic(foo)`
+ if (TypeToken->isOneOf(tok::kw_decltype, tok::kw_typeof, tok::kw__Atomic))
+ return Tok;
+
+ const FormatToken *LastSimpleTypeSpecifier = TypeToken;
+ while (isQualifierOrType(LastSimpleTypeSpecifier->getNextNonComment()))
+ LastSimpleTypeSpecifier = LastSimpleTypeSpecifier->getNextNonComment();
+
+ rotateTokens(SourceMgr, Fixes, Tok, LastSimpleTypeSpecifier,
+ /*Left=*/false);
+ return LastSimpleTypeSpecifier;
+ }
+
+ // The case `unsigned short const` -> `unsigned short const`
+ // The case:
+ // `unsigned short volatile const` -> `unsigned short const volatile`
+ if (PreviousCheck && PreviousCheck->isSimpleTypeSpecifier()) {
+ if (LastQual != Tok)
+ rotateTokens(SourceMgr, Fixes, Tok, LastQual, /*Left=*/false);
+ return Tok;
+ }
+
+ // Skip the typename keyword.
+ // The case `const typename C::type` -> `typename C::type const`
+ if (TypeToken->is(tok::kw_typename))
+ TypeToken = TypeToken->getNextNonComment();
+
+ // Skip the initial :: of a global-namespace type.
+ // The case `const ::...` -> `::... const`
+ if (TypeToken->is(tok::coloncolon)) {
+ // The case `const ::template Foo...` -> `::template Foo... const`
+ TypeToken = TypeToken->getNextNonComment();
+ if (TypeToken && TypeToken->is(tok::kw_template))
+ TypeToken = TypeToken->getNextNonComment();
+ }
+
+ // Don't change declarations such as
+ // `foo(const struct Foo a);` -> `foo(const struct Foo a);`
+ // as they would currently change code such as
+ // `const struct my_struct_t {} my_struct;` -> `struct my_struct_t const {}
+ // my_struct;`
+ if (TypeToken->isOneOf(tok::kw_struct, tok::kw_class))
+ return Tok;
+
+ if (TypeToken->isOneOf(tok::kw_auto, tok::identifier)) {
+ // The case `const auto` -> `auto const`
+ // The case `const Foo` -> `Foo const`
+ // The case `const ::Foo` -> `::Foo const`
+ // The case `const Foo *` -> `Foo const *`
+ // The case `const Foo &` -> `Foo const &`
+ // The case `const Foo &&` -> `Foo const &&`
+ // The case `const std::Foo &&` -> `std::Foo const &&`
+ // The case `const std::Foo<T> &&` -> `std::Foo<T> const &&`
+ // The case `const ::template Foo` -> `::template Foo const`
+ // The case `const T::template Foo` -> `T::template Foo const`
+ const FormatToken *Next = nullptr;
+ while ((Next = TypeToken->getNextNonComment()) &&
+ (Next->is(TT_TemplateOpener) ||
+ Next->startsSequence(tok::coloncolon, tok::identifier) ||
+ Next->startsSequence(tok::coloncolon, tok::kw_template,
+ tok::identifier))) {
+ if (Next->is(TT_TemplateOpener)) {
+ assert(Next->MatchingParen && "Missing template closer");
+ TypeToken = Next->MatchingParen;
+ } else if (Next->startsSequence(tok::coloncolon, tok::identifier)) {
+ TypeToken = Next->getNextNonComment();
+ } else {
+ TypeToken = Next->getNextNonComment()->getNextNonComment();
+ }
+ }
+
+ if (Next->is(tok::kw_auto))
+ TypeToken = Next;
+
+ // Place the Qualifier at the end of the list of qualifiers.
+ while (isQualifier(TypeToken->getNextNonComment())) {
+ // The case `volatile Foo::iter const` -> `Foo::iter const volatile`
+ TypeToken = TypeToken->getNextNonComment();
+ }
+
+ insertQualifierAfter(SourceMgr, Fixes, TypeToken, Qualifier);
+ // Remove token and following whitespace.
+ auto Range = CharSourceRange::getCharRange(
+ Tok->getStartOfNonWhitespace(), Tok->Next->getStartOfNonWhitespace());
+ replaceToken(SourceMgr, Fixes, Range, "");
+ }
+
+ return Tok;
+}
+
+const FormatToken *LeftRightQualifierAlignmentFixer::analyzeLeft(
+ const SourceManager &SourceMgr, const AdditionalKeywords &Keywords,
+ tooling::Replacements &Fixes, const FormatToken *const Tok,
+ const std::string &Qualifier, tok::TokenKind QualifierType) {
+ // We only need to think about streams that begin with a qualifier.
+ if (Tok->isNot(QualifierType))
+ return Tok;
+ // Don't concern yourself if nothing preceeds the qualifier.
+ if (!Tok->getPreviousNonComment())
+ return Tok;
+
+ // Skip qualifiers to the left to find what preceeds the qualifiers.
+ const FormatToken *TypeToken = Tok->getPreviousNonComment();
+ while (isQualifier(TypeToken))
+ TypeToken = TypeToken->getPreviousNonComment();
+
+ // For left qualifiers preceeded by nothing, a template declaration, or *,&,&&
+ // we only perform sorting.
+ if (!TypeToken || TypeToken->isPointerOrReference() ||
+ TypeToken->ClosesRequiresClause || TypeToken->ClosesTemplateDeclaration) {
+
+ // Don't sort past a non-configured qualifier token.
+ const FormatToken *FirstQual = Tok;
+ while (isConfiguredQualifier(FirstQual->getPreviousNonComment(),
+ ConfiguredQualifierTokens)) {
+ FirstQual = FirstQual->getPreviousNonComment();
+ }
+
+ if (FirstQual != Tok)
+ rotateTokens(SourceMgr, Fixes, FirstQual, Tok, /*Left=*/true);
+ return Tok;
+ }
+
+ // Stay safe and don't move past macros, also don't bother with sorting.
+ if (isPossibleMacro(TypeToken))
+ return Tok;
+
+ // Examples given in order of ['const', 'volatile', 'type']
+
+ // The case `volatile long long int const` -> `const volatile long long int`
+ // The case `volatile long long const int` -> `const volatile long long int`
+ // The case `const long long volatile int` -> `const volatile long long int`
+ // The case `long volatile long int const` -> `const volatile long long int`
+ if (TypeToken->isSimpleTypeSpecifier()) {
+ const FormatToken *LastSimpleTypeSpecifier = TypeToken;
+ while (isConfiguredQualifierOrType(
+ LastSimpleTypeSpecifier->getPreviousNonComment(),
+ ConfiguredQualifierTokens)) {
+ LastSimpleTypeSpecifier =
+ LastSimpleTypeSpecifier->getPreviousNonComment();
+ }
+
+ rotateTokens(SourceMgr, Fixes, LastSimpleTypeSpecifier, Tok,
+ /*Left=*/true);
+ return Tok;
+ }
+
+ if (TypeToken->isOneOf(tok::kw_auto, tok::identifier, TT_TemplateCloser)) {
+ const auto IsStartOfType = [](const FormatToken *const Tok) -> bool {
+ if (!Tok)
+ return true;
+
+ // A template closer is not the start of a type.
+ // The case `?<> const` -> `const ?<>`
+ if (Tok->is(TT_TemplateCloser))
+ return false;
+
+ const FormatToken *const Previous = Tok->getPreviousNonComment();
+ if (!Previous)
+ return true;
+
+ // An identifier preceeded by :: is not the start of a type.
+ // The case `?::Foo const` -> `const ?::Foo`
+ if (Tok->is(tok::identifier) && Previous->is(tok::coloncolon))
+ return false;
+
+ const FormatToken *const PrePrevious = Previous->getPreviousNonComment();
+ // An identifier preceeded by ::template is not the start of a type.
+ // The case `?::template Foo const` -> `const ?::template Foo`
+ if (Tok->is(tok::identifier) && Previous->is(tok::kw_template) &&
+ PrePrevious && PrePrevious->is(tok::coloncolon)) {
+ return false;
+ }
+
+ if (Tok->endsSequence(tok::kw_auto, tok::identifier))
+ return false;
+
+ return true;
+ };
+
+ while (!IsStartOfType(TypeToken)) {
+ // The case `?<>`
+ if (TypeToken->is(TT_TemplateCloser)) {
+ assert(TypeToken->MatchingParen && "Missing template opener");
+ TypeToken = TypeToken->MatchingParen->getPreviousNonComment();
+ } else {
+ // The cases
+ // `::Foo`
+ // `?>::Foo`
+ // `?Bar::Foo`
+ // `::template Foo`
+ // `?>::template Foo`
+ // `?Bar::template Foo`
+ if (TypeToken->getPreviousNonComment()->is(tok::kw_template))
+ TypeToken = TypeToken->getPreviousNonComment();
+
+ const FormatToken *const ColonColon =
+ TypeToken->getPreviousNonComment();
+ const FormatToken *const PreColonColon =
+ ColonColon->getPreviousNonComment();
+ if (PreColonColon &&
+ PreColonColon->isOneOf(TT_TemplateCloser, tok::identifier)) {
+ TypeToken = PreColonColon;
+ } else {
+ TypeToken = ColonColon;
+ }
+ }
+ }
+
+ assert(TypeToken && "Should be auto or identifier");
+
+ // Place the Qualifier at the start of the list of qualifiers.
+ const FormatToken *Previous = nullptr;
+ while ((Previous = TypeToken->getPreviousNonComment()) &&
+ (isConfiguredQualifier(Previous, ConfiguredQualifierTokens) ||
+ Previous->is(tok::kw_typename))) {
+ // The case `volatile Foo::iter const` -> `const volatile Foo::iter`
+ // The case `typename C::type const` -> `const typename C::type`
+ TypeToken = Previous;
+ }
+
+ // Don't change declarations such as
+ // `foo(struct Foo const a);` -> `foo(struct Foo const a);`
+ if (!Previous || !Previous->isOneOf(tok::kw_struct, tok::kw_class)) {
+ insertQualifierBefore(SourceMgr, Fixes, TypeToken, Qualifier);
+ removeToken(SourceMgr, Fixes, Tok);
+ }
+ }
+
+ return Tok;
+}
+
+tok::TokenKind LeftRightQualifierAlignmentFixer::getTokenFromQualifier(
+ const std::string &Qualifier) {
+ // Don't let 'type' be an identifier, but steal typeof token.
+ return llvm::StringSwitch<tok::TokenKind>(Qualifier)
+ .Case("type", tok::kw_typeof)
+ .Case("const", tok::kw_const)
+ .Case("volatile", tok::kw_volatile)
+ .Case("static", tok::kw_static)
+ .Case("inline", tok::kw_inline)
+ .Case("constexpr", tok::kw_constexpr)
+ .Case("restrict", tok::kw_restrict)
+ .Case("friend", tok::kw_friend)
+ .Default(tok::identifier);
+}
+
+LeftRightQualifierAlignmentFixer::LeftRightQualifierAlignmentFixer(
+ const Environment &Env, const FormatStyle &Style,
+ const std::string &Qualifier,
+ const std::vector<tok::TokenKind> &QualifierTokens, bool RightAlign)
+ : TokenAnalyzer(Env, Style), Qualifier(Qualifier), RightAlign(RightAlign),
+ ConfiguredQualifierTokens(QualifierTokens) {}
+
+std::pair<tooling::Replacements, unsigned>
+LeftRightQualifierAlignmentFixer::analyze(
+ TokenAnnotator & /*Annotator*/,
+ SmallVectorImpl<AnnotatedLine *> &AnnotatedLines,
+ FormatTokenLexer &Tokens) {
+ tooling::Replacements Fixes;
+ AffectedRangeMgr.computeAffectedLines(AnnotatedLines);
+ fixQualifierAlignment(AnnotatedLines, Tokens, Fixes);
+ return {Fixes, 0};
+}
+
+void LeftRightQualifierAlignmentFixer::fixQualifierAlignment(
+ SmallVectorImpl<AnnotatedLine *> &AnnotatedLines, FormatTokenLexer &Tokens,
+ tooling::Replacements &Fixes) {
+ const AdditionalKeywords &Keywords = Tokens.getKeywords();
+ const SourceManager &SourceMgr = Env.getSourceManager();
+ tok::TokenKind QualifierToken = getTokenFromQualifier(Qualifier);
+ assert(QualifierToken != tok::identifier && "Unrecognised Qualifier");
+
+ for (AnnotatedLine *Line : AnnotatedLines) {
+ fixQualifierAlignment(Line->Children, Tokens, Fixes);
+ if (!Line->Affected || Line->InPPDirective)
+ continue;
+ FormatToken *First = Line->First;
+ assert(First);
+ if (First->Finalized)
+ continue;
+
+ const auto *Last = Line->Last;
+
+ for (const auto *Tok = First; Tok && Tok != Last && Tok->Next;
+ Tok = Tok->Next) {
+ if (Tok->is(tok::comment))
+ continue;
+ if (RightAlign) {
+ Tok = analyzeRight(SourceMgr, Keywords, Fixes, Tok, Qualifier,
+ QualifierToken);
+ } else {
+ Tok = analyzeLeft(SourceMgr, Keywords, Fixes, Tok, Qualifier,
+ QualifierToken);
+ }
+ }
+ }
+}
+
+void prepareLeftRightOrderingForQualifierAlignmentFixer(
+ const std::vector<std::string> &Order, std::vector<std::string> &LeftOrder,
+ std::vector<std::string> &RightOrder,
+ std::vector<tok::TokenKind> &Qualifiers) {
+
+ // Depending on the position of type in the order you need
+ // To iterate forward or backward through the order list as qualifier
+ // can push through each other.
+ // The Order list must define the position of "type" to signify
+ assert(llvm::is_contained(Order, "type") &&
+ "QualifierOrder must contain type");
+ // Split the Order list by type and reverse the left side.
+
+ bool left = true;
+ for (const auto &s : Order) {
+ if (s == "type") {
+ left = false;
+ continue;
+ }
+
+ tok::TokenKind QualifierToken =
+ LeftRightQualifierAlignmentFixer::getTokenFromQualifier(s);
+ if (QualifierToken != tok::kw_typeof && QualifierToken != tok::identifier)
+ Qualifiers.push_back(QualifierToken);
+
+ if (left) {
+ // Reverse the order for left aligned items.
+ LeftOrder.insert(LeftOrder.begin(), s);
+ } else {
+ RightOrder.push_back(s);
+ }
+ }
+}
+
+bool LeftRightQualifierAlignmentFixer::isQualifierOrType(
+ const FormatToken *const Tok) {
+ return Tok && (Tok->isSimpleTypeSpecifier() || Tok->is(tok::kw_auto) ||
+ isQualifier(Tok));
+}
+
+bool LeftRightQualifierAlignmentFixer::isConfiguredQualifierOrType(
+ const FormatToken *const Tok,
+ const std::vector<tok::TokenKind> &Qualifiers) {
+ return Tok && (Tok->isSimpleTypeSpecifier() || Tok->is(tok::kw_auto) ||
+ isConfiguredQualifier(Tok, Qualifiers));
+}
+
+// If a token is an identifier and it's upper case, it could
+// be a macro and hence we need to be able to ignore it.
+bool LeftRightQualifierAlignmentFixer::isPossibleMacro(const FormatToken *Tok) {
+ if (!Tok)
+ return false;
+ if (Tok->isNot(tok::identifier))
+ return false;
+ if (Tok->TokenText.upper() == Tok->TokenText.str()) {
+ // T,K,U,V likely could be template arguments
+ return Tok->TokenText.size() != 1;
+ }
+ return false;
+}
+
+} // namespace format
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Format/QualifierAlignmentFixer.h b/contrib/llvm-project/clang/lib/Format/QualifierAlignmentFixer.h
new file mode 100644
index 000000000000..e922d8005595
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Format/QualifierAlignmentFixer.h
@@ -0,0 +1,86 @@
+//===--- QualifierAlignmentFixer.h -------------------------------*- C++-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file declares QualifierAlignmentFixer, a TokenAnalyzer that
+/// enforces either east or west const depending on the style.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_FORMAT_QUALIFIERALIGNMENTFIXER_H
+#define LLVM_CLANG_LIB_FORMAT_QUALIFIERALIGNMENTFIXER_H
+
+#include "TokenAnalyzer.h"
+
+namespace clang {
+namespace format {
+
+typedef std::function<std::pair<tooling::Replacements, unsigned>(
+ const Environment &)>
+ AnalyzerPass;
+
+void addQualifierAlignmentFixerPasses(const FormatStyle &Style,
+ SmallVectorImpl<AnalyzerPass> &Passes);
+
+void prepareLeftRightOrderingForQualifierAlignmentFixer(
+ const std::vector<std::string> &Order, std::vector<std::string> &LeftOrder,
+ std::vector<std::string> &RightOrder,
+ std::vector<tok::TokenKind> &Qualifiers);
+
+class LeftRightQualifierAlignmentFixer : public TokenAnalyzer {
+ std::string Qualifier;
+ bool RightAlign;
+ SmallVector<tok::TokenKind, 8> QualifierTokens;
+ std::vector<tok::TokenKind> ConfiguredQualifierTokens;
+
+public:
+ LeftRightQualifierAlignmentFixer(
+ const Environment &Env, const FormatStyle &Style,
+ const std::string &Qualifier,
+ const std::vector<tok::TokenKind> &ConfiguredQualifierTokens,
+ bool RightAlign);
+
+ std::pair<tooling::Replacements, unsigned>
+ analyze(TokenAnnotator &Annotator,
+ SmallVectorImpl<AnnotatedLine *> &AnnotatedLines,
+ FormatTokenLexer &Tokens) override;
+
+ static tok::TokenKind getTokenFromQualifier(const std::string &Qualifier);
+
+ void fixQualifierAlignment(SmallVectorImpl<AnnotatedLine *> &AnnotatedLines,
+ FormatTokenLexer &Tokens,
+ tooling::Replacements &Fixes);
+
+ const FormatToken *analyzeRight(const SourceManager &SourceMgr,
+ const AdditionalKeywords &Keywords,
+ tooling::Replacements &Fixes,
+ const FormatToken *Tok,
+ const std::string &Qualifier,
+ tok::TokenKind QualifierType);
+
+ const FormatToken *analyzeLeft(const SourceManager &SourceMgr,
+ const AdditionalKeywords &Keywords,
+ tooling::Replacements &Fixes,
+ const FormatToken *Tok,
+ const std::string &Qualifier,
+ tok::TokenKind QualifierType);
+
+ // Is the Token a simple or qualifier type
+ static bool isQualifierOrType(const FormatToken *Tok);
+ static bool
+ isConfiguredQualifierOrType(const FormatToken *Tok,
+ const std::vector<tok::TokenKind> &Qualifiers);
+
+ // Is the Token likely a Macro
+ static bool isPossibleMacro(const FormatToken *Tok);
+};
+
+} // end namespace format
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm-project/clang/lib/Format/SortJavaScriptImports.cpp b/contrib/llvm-project/clang/lib/Format/SortJavaScriptImports.cpp
index a5e3ce69207b..1a6a1b19e702 100644
--- a/contrib/llvm-project/clang/lib/Format/SortJavaScriptImports.cpp
+++ b/contrib/llvm-project/clang/lib/Format/SortJavaScriptImports.cpp
@@ -72,12 +72,14 @@ struct JsImportedSymbol {
struct JsModuleReference {
bool FormattingOff = false;
bool IsExport = false;
+ bool IsTypeOnly = false;
// Module references are sorted into these categories, in order.
enum ReferenceCategory {
SIDE_EFFECT, // "import 'something';"
ABSOLUTE, // from 'something'
RELATIVE_PARENT, // from '../*'
RELATIVE, // from './*'
+ ALIAS, // import X = A.B;
};
ReferenceCategory Category = ReferenceCategory::SIDE_EFFECT;
// The URL imported, e.g. `import .. from 'url';`. Empty for `export {a, b};`.
@@ -105,11 +107,14 @@ bool operator<(const JsModuleReference &LHS, const JsModuleReference &RHS) {
return LHS.IsExport < RHS.IsExport;
if (LHS.Category != RHS.Category)
return LHS.Category < RHS.Category;
- if (LHS.Category == JsModuleReference::ReferenceCategory::SIDE_EFFECT)
- // Side effect imports might be ordering sensitive. Consider them equal so
- // that they maintain their relative order in the stable sort below.
- // This retains transitivity because LHS.Category == RHS.Category here.
+ if (LHS.Category == JsModuleReference::ReferenceCategory::SIDE_EFFECT ||
+ LHS.Category == JsModuleReference::ReferenceCategory::ALIAS) {
+ // Side effect imports and aliases might be ordering sensitive. Consider
+ // them equal so that they maintain their relative order in the stable sort
+ // below. This retains transitivity because LHS.Category == RHS.Category
+ // here.
return false;
+ }
// Empty URLs sort *last* (for export {...};).
if (LHS.URL.empty() != RHS.URL.empty())
return LHS.URL.empty() < RHS.URL.empty();
@@ -130,7 +135,10 @@ class JavaScriptImportSorter : public TokenAnalyzer {
public:
JavaScriptImportSorter(const Environment &Env, const FormatStyle &Style)
: TokenAnalyzer(Env, Style),
- FileContents(Env.getSourceManager().getBufferData(Env.getFileID())) {}
+ FileContents(Env.getSourceManager().getBufferData(Env.getFileID())) {
+ // FormatToken.Tok starts out in an uninitialized state.
+ invalidToken.Tok.startToken();
+ }
std::pair<tooling::Replacements, unsigned>
analyze(TokenAnnotator &Annotator,
@@ -165,8 +173,9 @@ public:
// in a single group.
if (!Reference.IsExport &&
(Reference.IsExport != References[I + 1].IsExport ||
- Reference.Category != References[I + 1].Category))
+ Reference.Category != References[I + 1].Category)) {
ReferencesText += "\n";
+ }
}
}
llvm::StringRef PreviousText = getSourceText(InsertionPoint);
@@ -181,15 +190,15 @@ public:
// harmless and will be stripped by the subsequent formatting pass.
// FIXME: A better long term fix is to re-calculate Ranges after sorting.
unsigned PreviousSize = PreviousText.size();
- while (ReferencesText.size() < PreviousSize) {
+ while (ReferencesText.size() < PreviousSize)
ReferencesText += " ";
- }
// Separate references from the main code body of the file.
if (FirstNonImportLine && FirstNonImportLine->First->NewlinesBefore < 2 &&
!(FirstNonImportLine->First->is(tok::comment) &&
- FirstNonImportLine->First->TokenText.trim() == "// clang-format on"))
+ isClangFormatOn(FirstNonImportLine->First->TokenText.trim()))) {
ReferencesText += "\n";
+ }
LLVM_DEBUG(llvm::dbgs() << "Replacing imports:\n"
<< PreviousText << "\nwith:\n"
@@ -208,8 +217,8 @@ public:
}
private:
- FormatToken *Current;
- FormatToken *LineEnd;
+ FormatToken *Current = nullptr;
+ FormatToken *LineEnd = nullptr;
FormatToken invalidToken;
@@ -229,7 +238,6 @@ private:
if (!Current || Current == LineEnd->Next) {
// Set the current token to an invalid token, so that further parsing on
// this line fails.
- invalidToken.Tok.setKind(tok::unknown);
Current = &invalidToken;
}
}
@@ -260,13 +268,13 @@ private:
while (Start != References.end() && Start->FormattingOff) {
// Skip over all imports w/ disabled formatting.
ReferencesSorted.push_back(*Start);
- Start++;
+ ++Start;
}
SmallVector<JsModuleReference, 16> SortChunk;
while (Start != References.end() && !Start->FormattingOff) {
// Skip over all imports w/ disabled formatting.
SortChunk.push_back(*Start);
- Start++;
+ ++Start;
}
llvm::stable_sort(SortChunk);
mergeModuleReferences(SortChunk);
@@ -299,6 +307,7 @@ private:
if (Reference->Category == JsModuleReference::SIDE_EFFECT ||
PreviousReference->Category == JsModuleReference::SIDE_EFFECT ||
Reference->IsExport != PreviousReference->IsExport ||
+ Reference->IsTypeOnly != PreviousReference->IsTypeOnly ||
!PreviousReference->Prefix.empty() || !Reference->Prefix.empty() ||
!PreviousReference->DefaultImport.empty() ||
!Reference->DefaultImport.empty() || Reference->Symbols.empty() ||
@@ -338,10 +347,12 @@ private:
// Stitch together the module reference start...
Buffer += getSourceText(Reference.Range.getBegin(), Reference.SymbolsStart);
// ... then the references in order ...
- for (auto I = Symbols.begin(), E = Symbols.end(); I != E; ++I) {
- if (I != Symbols.begin())
+ if (!Symbols.empty()) {
+ Buffer += getSourceText(Symbols.front().Range);
+ for (const JsImportedSymbol &Symbol : llvm::drop_begin(Symbols)) {
Buffer += ",";
- Buffer += getSourceText(I->Range);
+ Buffer += getSourceText(Symbol.Range);
+ }
}
// ... followed by the module reference end.
Buffer += getSourceText(Reference.SymbolsEnd, Reference.Range.getEnd());
@@ -359,15 +370,16 @@ private:
bool AnyImportAffected = false;
bool FormattingOff = false;
for (auto *Line : AnnotatedLines) {
+ assert(Line->First);
Current = Line->First;
LineEnd = Line->Last;
// clang-format comments toggle formatting on/off.
// This is tracked in FormattingOff here and on JsModuleReference.
while (Current && Current->is(tok::comment)) {
StringRef CommentText = Current->TokenText.trim();
- if (CommentText == "// clang-format off") {
+ if (isClangFormatOff(CommentText)) {
FormattingOff = true;
- } else if (CommentText == "// clang-format on") {
+ } else if (isClangFormatOn(CommentText)) {
FormattingOff = false;
// Special case: consider a trailing "clang-format on" line to be part
// of the module reference, so that it gets moved around together with
@@ -382,11 +394,12 @@ private:
Current = Current->Next;
}
skipComments();
- if (Start.isInvalid() || References.empty())
+ if (Start.isInvalid() || References.empty()) {
// After the first file level comment, consider line comments to be part
// of the import that immediately follows them by using the previously
// set Start.
Start = Line->First->Tok.getLocation();
+ }
if (!Current) {
// Only comments on this line. Could be the first non-import line.
FirstNonImportLine = Line;
@@ -395,6 +408,8 @@ private:
JsModuleReference Reference;
Reference.FormattingOff = FormattingOff;
Reference.Range.setBegin(Start);
+ // References w/o a URL, e.g. export {A}, groups with RELATIVE.
+ Reference.Category = JsModuleReference::ReferenceCategory::RELATIVE;
if (!parseModuleReference(Keywords, Reference)) {
if (!FirstNonImportLine)
FirstNonImportLine = Line; // if no comment before.
@@ -410,9 +425,8 @@ private:
<< ", cat: " << Reference.Category
<< ", url: " << Reference.URL
<< ", prefix: " << Reference.Prefix;
- for (size_t I = 0; I < Reference.Symbols.size(); ++I)
- llvm::dbgs() << ", " << Reference.Symbols[I].Symbol << " as "
- << Reference.Symbols[I].Alias;
+ for (const JsImportedSymbol &Symbol : Reference.Symbols)
+ llvm::dbgs() << ", " << Symbol.Symbol << " as " << Symbol.Alias;
llvm::dbgs() << ", text: " << getSourceText(Reference.Range);
llvm::dbgs() << "}\n";
});
@@ -454,16 +468,14 @@ private:
// URL = TokenText without the quotes.
Reference.URL =
Current->TokenText.substr(1, Current->TokenText.size() - 2);
- if (Reference.URL.startswith(".."))
+ if (Reference.URL.starts_with("..")) {
Reference.Category =
JsModuleReference::ReferenceCategory::RELATIVE_PARENT;
- else if (Reference.URL.startswith("."))
+ } else if (Reference.URL.starts_with(".")) {
Reference.Category = JsModuleReference::ReferenceCategory::RELATIVE;
- else
+ } else {
Reference.Category = JsModuleReference::ReferenceCategory::ABSOLUTE;
- } else {
- // w/o URL groups with "empty".
- Reference.Category = JsModuleReference::ReferenceCategory::RELATIVE;
+ }
}
return true;
}
@@ -478,6 +490,11 @@ private:
bool parseStarBinding(const AdditionalKeywords &Keywords,
JsModuleReference &Reference) {
// * as prefix from '...';
+ if (Current->is(Keywords.kw_type) && Current->Next &&
+ Current->Next->is(tok::star)) {
+ Reference.IsTypeOnly = true;
+ nextToken();
+ }
if (Current->isNot(tok::star))
return false;
nextToken();
@@ -493,12 +510,31 @@ private:
bool parseNamedBindings(const AdditionalKeywords &Keywords,
JsModuleReference &Reference) {
+ if (Current->is(Keywords.kw_type) && Current->Next &&
+ Current->Next->isOneOf(tok::identifier, tok::l_brace)) {
+ Reference.IsTypeOnly = true;
+ nextToken();
+ }
+
// eat a potential "import X, " prefix.
- if (Current->is(tok::identifier)) {
+ if (!Reference.IsExport && Current->is(tok::identifier)) {
Reference.DefaultImport = Current->TokenText;
nextToken();
if (Current->is(Keywords.kw_from))
return true;
+ // import X = A.B.C;
+ if (Current->is(tok::equal)) {
+ Reference.Category = JsModuleReference::ReferenceCategory::ALIAS;
+ nextToken();
+ while (Current->is(tok::identifier)) {
+ nextToken();
+ if (Current->is(tok::semi))
+ return true;
+ if (Current->isNot(tok::period))
+ return false;
+ nextToken();
+ }
+ }
if (Current->isNot(tok::comma))
return false;
nextToken(); // eat comma.
@@ -512,19 +548,26 @@ private:
nextToken();
if (Current->is(tok::r_brace))
break;
- if (!Current->isOneOf(tok::identifier, tok::kw_default))
+ auto IsIdentifier = [](const auto *Tok) {
+ return Tok->isOneOf(tok::identifier, tok::kw_default, tok::kw_template);
+ };
+ bool isTypeOnly = Current->is(Keywords.kw_type) && Current->Next &&
+ IsIdentifier(Current->Next);
+ if (!isTypeOnly && !IsIdentifier(Current))
return false;
JsImportedSymbol Symbol;
- Symbol.Symbol = Current->TokenText;
// Make sure to include any preceding comments.
Symbol.Range.setBegin(
Current->getPreviousNonComment()->Next->WhitespaceRange.getBegin());
+ if (isTypeOnly)
+ nextToken();
+ Symbol.Symbol = Current->TokenText;
nextToken();
if (Current->is(Keywords.kw_as)) {
nextToken();
- if (!Current->isOneOf(tok::identifier, tok::kw_default))
+ if (!IsIdentifier(Current))
return false;
Symbol.Alias = Current->TokenText;
nextToken();
@@ -550,9 +593,10 @@ tooling::Replacements sortJavaScriptImports(const FormatStyle &Style,
ArrayRef<tooling::Range> Ranges,
StringRef FileName) {
// FIXME: Cursor support.
- return JavaScriptImportSorter(Environment(Code, FileName, Ranges), Style)
- .process()
- .first;
+ auto Env = Environment::make(Code, FileName, Ranges);
+ if (!Env)
+ return {};
+ return JavaScriptImportSorter(*Env, Style).process().first;
}
} // end namespace format
diff --git a/contrib/llvm-project/clang/lib/Format/TokenAnalyzer.cpp b/contrib/llvm-project/clang/lib/Format/TokenAnalyzer.cpp
index f1459a808ff8..bd648c430f9b 100644
--- a/contrib/llvm-project/clang/lib/Format/TokenAnalyzer.cpp
+++ b/contrib/llvm-project/clang/lib/Format/TokenAnalyzer.cpp
@@ -26,28 +26,63 @@
#include "clang/Basic/SourceManager.h"
#include "clang/Format/Format.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Debug.h"
+#include <type_traits>
#define DEBUG_TYPE "format-formatter"
namespace clang {
namespace format {
-Environment::Environment(StringRef Code, StringRef FileName,
- ArrayRef<tooling::Range> Ranges,
- unsigned FirstStartColumn, unsigned NextStartColumn,
- unsigned LastStartColumn)
- : VirtualSM(new SourceManagerForFile(FileName, Code)), SM(VirtualSM->get()),
- ID(VirtualSM->get().getMainFileID()), FirstStartColumn(FirstStartColumn),
- NextStartColumn(NextStartColumn), LastStartColumn(LastStartColumn) {
- SourceLocation StartOfFile = SM.getLocForStartOfFile(ID);
+// FIXME: Instead of printing the diagnostic we should store it and have a
+// better way to return errors through the format APIs.
+class FatalDiagnosticConsumer : public DiagnosticConsumer {
+public:
+ void HandleDiagnostic(DiagnosticsEngine::Level DiagLevel,
+ const Diagnostic &Info) override {
+ if (DiagLevel == DiagnosticsEngine::Fatal) {
+ Fatal = true;
+ llvm::SmallVector<char, 128> Message;
+ Info.FormatDiagnostic(Message);
+ llvm::errs() << Message << "\n";
+ }
+ }
+
+ bool fatalError() const { return Fatal; }
+
+private:
+ bool Fatal = false;
+};
+
+std::unique_ptr<Environment>
+Environment::make(StringRef Code, StringRef FileName,
+ ArrayRef<tooling::Range> Ranges, unsigned FirstStartColumn,
+ unsigned NextStartColumn, unsigned LastStartColumn) {
+ auto Env = std::make_unique<Environment>(Code, FileName, FirstStartColumn,
+ NextStartColumn, LastStartColumn);
+ FatalDiagnosticConsumer Diags;
+ Env->SM.getDiagnostics().setClient(&Diags, /*ShouldOwnClient=*/false);
+ SourceLocation StartOfFile = Env->SM.getLocForStartOfFile(Env->ID);
for (const tooling::Range &Range : Ranges) {
SourceLocation Start = StartOfFile.getLocWithOffset(Range.getOffset());
SourceLocation End = Start.getLocWithOffset(Range.getLength());
- CharRanges.push_back(CharSourceRange::getCharRange(Start, End));
+ Env->CharRanges.push_back(CharSourceRange::getCharRange(Start, End));
}
+ // Validate that we can get the buffer data without a fatal error.
+ Env->SM.getBufferData(Env->ID);
+ if (Diags.fatalError())
+ return nullptr;
+ return Env;
}
+Environment::Environment(StringRef Code, StringRef FileName,
+ unsigned FirstStartColumn, unsigned NextStartColumn,
+ unsigned LastStartColumn)
+ : VirtualSM(new SourceManagerForFile(FileName, Code)), SM(VirtualSM->get()),
+ ID(VirtualSM->get().getMainFileID()), FirstStartColumn(FirstStartColumn),
+ NextStartColumn(NextStartColumn), LastStartColumn(LastStartColumn) {}
+
TokenAnalyzer::TokenAnalyzer(const Environment &Env, const FormatStyle &Style)
: Style(Style), Env(Env),
AffectedRangeMgr(Env.getSourceManager(), Env.getCharRanges()),
@@ -62,29 +97,33 @@ TokenAnalyzer::TokenAnalyzer(const Environment &Env, const FormatStyle &Style)
<< "\n");
}
-std::pair<tooling::Replacements, unsigned> TokenAnalyzer::process() {
+std::pair<tooling::Replacements, unsigned>
+TokenAnalyzer::process(bool SkipAnnotation) {
tooling::Replacements Result;
llvm::SpecificBumpPtrAllocator<FormatToken> Allocator;
IdentifierTable IdentTable(getFormattingLangOpts(Style));
FormatTokenLexer Lex(Env.getSourceManager(), Env.getFileID(),
Env.getFirstStartColumn(), Style, Encoding, Allocator,
-
IdentTable);
ArrayRef<FormatToken *> Toks(Lex.lex());
SmallVector<FormatToken *, 10> Tokens(Toks.begin(), Toks.end());
- UnwrappedLineParser Parser(Style, Lex.getKeywords(),
- Env.getFirstStartColumn(), Tokens, *this);
+ UnwrappedLineParser Parser(Env.getSourceManager(), Style, Lex.getKeywords(),
+ Env.getFirstStartColumn(), Tokens, *this,
+ Allocator, IdentTable);
Parser.parse();
- assert(UnwrappedLines.rbegin()->empty());
+ assert(UnwrappedLines.back().empty());
unsigned Penalty = 0;
for (unsigned Run = 0, RunE = UnwrappedLines.size(); Run + 1 != RunE; ++Run) {
+ const auto &Lines = UnwrappedLines[Run];
LLVM_DEBUG(llvm::dbgs() << "Run " << Run << "...\n");
SmallVector<AnnotatedLine *, 16> AnnotatedLines;
+ AnnotatedLines.reserve(Lines.size());
TokenAnnotator Annotator(Style, Lex.getKeywords());
- for (unsigned i = 0, e = UnwrappedLines[Run].size(); i != e; ++i) {
- AnnotatedLines.push_back(new AnnotatedLine(UnwrappedLines[Run][i]));
- Annotator.annotate(*AnnotatedLines.back());
+ for (const UnwrappedLine &Line : Lines) {
+ AnnotatedLines.push_back(new AnnotatedLine(Line));
+ if (!SkipAnnotation)
+ Annotator.annotate(*AnnotatedLines.back());
}
std::pair<tooling::Replacements, unsigned> RunResult =
@@ -92,15 +131,11 @@ std::pair<tooling::Replacements, unsigned> TokenAnalyzer::process() {
LLVM_DEBUG({
llvm::dbgs() << "Replacements for run " << Run << ":\n";
- for (tooling::Replacements::const_iterator I = RunResult.first.begin(),
- E = RunResult.first.end();
- I != E; ++I) {
- llvm::dbgs() << I->toString() << "\n";
- }
+ for (const tooling::Replacement &Fix : RunResult.first)
+ llvm::dbgs() << Fix.toString() << "\n";
});
- for (unsigned i = 0, e = AnnotatedLines.size(); i != e; ++i) {
- delete AnnotatedLines[i];
- }
+ for (AnnotatedLine *Line : AnnotatedLines)
+ delete Line;
Penalty += RunResult.second;
for (const auto &R : RunResult.first) {
diff --git a/contrib/llvm-project/clang/lib/Format/TokenAnalyzer.h b/contrib/llvm-project/clang/lib/Format/TokenAnalyzer.h
index 5ce44a0f3ea7..4086dab1c94c 100644
--- a/contrib/llvm-project/clang/lib/Format/TokenAnalyzer.h
+++ b/contrib/llvm-project/clang/lib/Format/TokenAnalyzer.h
@@ -29,6 +29,7 @@
#include "clang/Format/Format.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Debug.h"
+#include <memory>
namespace clang {
namespace format {
@@ -40,13 +41,12 @@ public:
// that the next lines of \p Code should start at \p NextStartColumn, and
// that \p Code should end at \p LastStartColumn if it ends in newline.
// See also the documentation of clang::format::internal::reformat.
- Environment(StringRef Code, StringRef FileName,
- ArrayRef<tooling::Range> Ranges, unsigned FirstStartColumn = 0,
+ Environment(StringRef Code, StringRef FileName, unsigned FirstStartColumn = 0,
unsigned NextStartColumn = 0, unsigned LastStartColumn = 0);
FileID getFileID() const { return ID; }
- const SourceManager &getSourceManager() const { return SM; }
+ SourceManager &getSourceManager() const { return SM; }
ArrayRef<CharSourceRange> getCharRanges() const { return CharRanges; }
@@ -62,6 +62,14 @@ public:
// environment should end if it ends in a newline.
unsigned getLastStartColumn() const { return LastStartColumn; }
+ // Returns nullptr and prints a diagnostic to stderr if the environment
+ // can't be created.
+ static std::unique_ptr<Environment> make(StringRef Code, StringRef FileName,
+ ArrayRef<tooling::Range> Ranges,
+ unsigned FirstStartColumn = 0,
+ unsigned NextStartColumn = 0,
+ unsigned LastStartColumn = 0);
+
private:
// This is only set if constructed from string.
std::unique_ptr<SourceManagerForFile> VirtualSM;
@@ -81,7 +89,8 @@ class TokenAnalyzer : public UnwrappedLineConsumer {
public:
TokenAnalyzer(const Environment &Env, const FormatStyle &Style);
- std::pair<tooling::Replacements, unsigned> process();
+ std::pair<tooling::Replacements, unsigned>
+ process(bool SkipAnnotation = false);
protected:
virtual std::pair<tooling::Replacements, unsigned>
diff --git a/contrib/llvm-project/clang/lib/Format/TokenAnnotator.cpp b/contrib/llvm-project/clang/lib/Format/TokenAnnotator.cpp
index 11dc661abc24..4d482e6543d6 100644
--- a/contrib/llvm-project/clang/lib/Format/TokenAnnotator.cpp
+++ b/contrib/llvm-project/clang/lib/Format/TokenAnnotator.cpp
@@ -24,8 +24,27 @@
namespace clang {
namespace format {
+static bool mustBreakAfterAttributes(const FormatToken &Tok,
+ const FormatStyle &Style) {
+ switch (Style.BreakAfterAttributes) {
+ case FormatStyle::ABS_Always:
+ return true;
+ case FormatStyle::ABS_Leave:
+ return Tok.NewlinesBefore > 0;
+ default:
+ return false;
+ }
+}
+
namespace {
+/// Returns \c true if the line starts with a token that can start a statement
+/// with an initializer.
+static bool startsWithInitStatement(const AnnotatedLine &Line) {
+ return Line.startsWith(tok::kw_for) || Line.startsWith(tok::kw_if) ||
+ Line.startsWith(tok::kw_switch);
+}
+
/// Returns \c true if the token can be used as an identifier in
/// an Objective-C \c \@selector, \c false otherwise.
///
@@ -38,7 +57,7 @@ namespace {
/// invokes @selector(...)). So, we allow treat any identifier or
/// keyword as a potential Objective-C selector component.
static bool canBeObjCSelectorComponent(const FormatToken &Tok) {
- return Tok.Tok.getIdentifierInfo() != nullptr;
+ return Tok.Tok.getIdentifierInfo();
}
/// With `Left` being '(', check if we're at either `[...](` or
@@ -47,8 +66,9 @@ static bool isLambdaParameterList(const FormatToken *Left) {
// Skip <...> if present.
if (Left->Previous && Left->Previous->is(tok::greater) &&
Left->Previous->MatchingParen &&
- Left->Previous->MatchingParen->is(TT_TemplateOpener))
+ Left->Previous->MatchingParen->is(TT_TemplateOpener)) {
Left = Left->Previous->MatchingParen;
+ }
// Check for `[...]`.
return Left->Previous && Left->Previous->is(tok::r_square) &&
@@ -63,6 +83,38 @@ static bool isKeywordWithCondition(const FormatToken &Tok) {
tok::kw_constexpr, tok::kw_catch);
}
+/// Returns \c true if the token starts a C++ attribute, \c false otherwise.
+static bool isCppAttribute(bool IsCpp, const FormatToken &Tok) {
+ if (!IsCpp || !Tok.startsSequence(tok::l_square, tok::l_square))
+ return false;
+ // The first square bracket is part of an ObjC array literal
+ if (Tok.Previous && Tok.Previous->is(tok::at))
+ return false;
+ const FormatToken *AttrTok = Tok.Next->Next;
+ if (!AttrTok)
+ return false;
+ // C++17 '[[using ns: foo, bar(baz, blech)]]'
+ // We assume nobody will name an ObjC variable 'using'.
+ if (AttrTok->startsSequence(tok::kw_using, tok::identifier, tok::colon))
+ return true;
+ if (AttrTok->isNot(tok::identifier))
+ return false;
+ while (AttrTok && !AttrTok->startsSequence(tok::r_square, tok::r_square)) {
+ // ObjC message send. We assume nobody will use : in a C++11 attribute
+ // specifier parameter, although this is technically valid:
+ // [[foo(:)]].
+ if (AttrTok->is(tok::colon) ||
+ AttrTok->startsSequence(tok::identifier, tok::identifier) ||
+ AttrTok->startsSequence(tok::r_paren, tok::identifier)) {
+ return false;
+ }
+ if (AttrTok->is(tok::ellipsis))
+ return true;
+ AttrTok = AttrTok->Next;
+ }
+ return AttrTok && AttrTok->startsSequence(tok::r_square, tok::r_square);
+}
+
/// A parser that gathers additional information about tokens.
///
/// The \c TokenAnnotator tries to match parenthesis and square brakets and
@@ -71,28 +123,51 @@ static bool isKeywordWithCondition(const FormatToken &Tok) {
class AnnotatingParser {
public:
AnnotatingParser(const FormatStyle &Style, AnnotatedLine &Line,
- const AdditionalKeywords &Keywords)
+ const AdditionalKeywords &Keywords,
+ SmallVector<ScopeType> &Scopes)
: Style(Style), Line(Line), CurrentToken(Line.First), AutoFound(false),
- Keywords(Keywords) {
+ Keywords(Keywords), Scopes(Scopes) {
Contexts.push_back(Context(tok::unknown, 1, /*IsExpression=*/false));
- resetTokenMetadata(CurrentToken);
+ resetTokenMetadata();
}
private:
+ ScopeType getScopeType(const FormatToken &Token) const {
+ switch (Token.getType()) {
+ case TT_FunctionLBrace:
+ case TT_LambdaLBrace:
+ return ST_Function;
+ case TT_ClassLBrace:
+ case TT_StructLBrace:
+ case TT_UnionLBrace:
+ return ST_Class;
+ default:
+ return ST_Other;
+ }
+ }
+
bool parseAngle() {
if (!CurrentToken || !CurrentToken->Previous)
return false;
- if (NonTemplateLess.count(CurrentToken->Previous))
+ if (NonTemplateLess.count(CurrentToken->Previous) > 0)
return false;
const FormatToken &Previous = *CurrentToken->Previous; // The '<'.
if (Previous.Previous) {
if (Previous.Previous->Tok.isLiteral())
return false;
+ if (Previous.Previous->is(tok::r_brace))
+ return false;
if (Previous.Previous->is(tok::r_paren) && Contexts.size() > 1 &&
(!Previous.Previous->MatchingParen ||
- !Previous.Previous->MatchingParen->is(TT_OverloadedOperatorLParen)))
+ Previous.Previous->MatchingParen->isNot(
+ TT_OverloadedOperatorLParen))) {
+ return false;
+ }
+ if (Previous.Previous->is(tok::kw_operator) &&
+ CurrentToken->is(tok::l_paren)) {
return false;
+ }
}
FormatToken *Left = CurrentToken->Previous;
@@ -106,12 +181,13 @@ private:
Contexts.back().IsExpression = false;
// If there's a template keyword before the opening angle bracket, this is a
// template parameter, not an argument.
- Contexts.back().InTemplateArgument =
- Left->Previous && Left->Previous->Tok.isNot(tok::kw_template);
+ if (Left->Previous && Left->Previous->isNot(tok::kw_template))
+ Contexts.back().ContextType = Context::TemplateArgument;
if (Style.Language == FormatStyle::LK_Java &&
- CurrentToken->is(tok::question))
+ CurrentToken->is(tok::question)) {
next();
+ }
while (CurrentToken) {
if (CurrentToken->is(tok::greater)) {
@@ -123,11 +199,11 @@ private:
// parameter cases, but should not alter program semantics.
if (CurrentToken->Next && CurrentToken->Next->is(tok::greater) &&
Left->ParentBracket != tok::less &&
- (isKeywordWithCondition(*Line.First) ||
- CurrentToken->getStartOfNonWhitespace() ==
- CurrentToken->Next->getStartOfNonWhitespace().getLocWithOffset(
- -1)))
+ CurrentToken->getStartOfNonWhitespace() ==
+ CurrentToken->Next->getStartOfNonWhitespace().getLocWithOffset(
+ -1)) {
return false;
+ }
Left->MatchingParen = CurrentToken;
CurrentToken->MatchingParen = Left;
// In TT_Proto, we must distignuish between:
@@ -137,10 +213,14 @@ private:
// In TT_TextProto, map<key, value> does not occur.
if (Style.Language == FormatStyle::LK_TextProto ||
(Style.Language == FormatStyle::LK_Proto && Left->Previous &&
- Left->Previous->isOneOf(TT_SelectorName, TT_DictLiteral)))
+ Left->Previous->isOneOf(TT_SelectorName, TT_DictLiteral))) {
CurrentToken->setType(TT_DictLiteral);
- else
+ } else {
CurrentToken->setType(TT_TemplateCloser);
+ CurrentToken->Tok.setLength(1);
+ }
+ if (CurrentToken->Next && CurrentToken->Next->Tok.isLiteral())
+ return false;
next();
return true;
}
@@ -151,9 +231,9 @@ private:
}
if (CurrentToken->isOneOf(tok::r_paren, tok::r_square, tok::r_brace) ||
(CurrentToken->isOneOf(tok::colon, tok::question) && InExprContext &&
- !Style.isCSharp() && Style.Language != FormatStyle::LK_Proto &&
- Style.Language != FormatStyle::LK_TextProto))
+ !Style.isCSharp() && !Style.isProto())) {
return false;
+ }
// If a && or || is found and interpreted as a binary operator, this set
// of angles is likely part of something like "a < b && c > d". If the
// angles are inside an expression, the ||/&& might also be a binary
@@ -163,15 +243,17 @@ private:
if (CurrentToken->Previous->isOneOf(tok::pipepipe, tok::ampamp) &&
CurrentToken->Previous->is(TT_BinaryOperator) &&
Contexts[Contexts.size() - 2].IsExpression &&
- !Line.startsWith(tok::kw_template))
+ !Line.startsWith(tok::kw_template)) {
return false;
+ }
updateParameterCount(Left, CurrentToken);
if (Style.Language == FormatStyle::LK_Proto) {
if (FormatToken *Previous = CurrentToken->getPreviousNonComment()) {
if (CurrentToken->is(tok::colon) ||
(CurrentToken->isOneOf(tok::l_brace, tok::less) &&
- Previous->isNot(tok::colon)))
+ Previous->isNot(tok::colon))) {
Previous->setType(TT_SelectorName);
+ }
}
}
if (!consumeToken())
@@ -204,34 +286,38 @@ private:
bool parseParens(bool LookForDecls = false) {
if (!CurrentToken)
return false;
- FormatToken *Left = CurrentToken->Previous;
- assert(Left && "Unknown previous token");
- FormatToken *PrevNonComment = Left->getPreviousNonComment();
- Left->ParentBracket = Contexts.back().ContextKind;
+ assert(CurrentToken->Previous && "Unknown previous token");
+ FormatToken &OpeningParen = *CurrentToken->Previous;
+ assert(OpeningParen.is(tok::l_paren));
+ FormatToken *PrevNonComment = OpeningParen.getPreviousNonComment();
+ OpeningParen.ParentBracket = Contexts.back().ContextKind;
ScopedContextCreator ContextCreator(*this, tok::l_paren, 1);
// FIXME: This is a bit of a hack. Do better.
Contexts.back().ColonIsForRangeExpr =
Contexts.size() == 2 && Contexts[0].ColonIsForRangeExpr;
- if (Left->Previous && Left->Previous->is(TT_UntouchableMacroFunc)) {
- Left->Finalized = true;
+ if (OpeningParen.Previous &&
+ OpeningParen.Previous->is(TT_UntouchableMacroFunc)) {
+ OpeningParen.Finalized = true;
return parseUntouchableParens();
}
bool StartsObjCMethodExpr = false;
- if (FormatToken *MaybeSel = Left->Previous) {
- // @selector( starts a selector.
- if (MaybeSel->isObjCAtKeyword(tok::objc_selector) && MaybeSel->Previous &&
- MaybeSel->Previous->is(tok::at)) {
- StartsObjCMethodExpr = true;
+ if (!Style.isVerilog()) {
+ if (FormatToken *MaybeSel = OpeningParen.Previous) {
+ // @selector( starts a selector.
+ if (MaybeSel->isObjCAtKeyword(tok::objc_selector) &&
+ MaybeSel->Previous && MaybeSel->Previous->is(tok::at)) {
+ StartsObjCMethodExpr = true;
+ }
}
}
- if (Left->is(TT_OverloadedOperatorLParen)) {
+ if (OpeningParen.is(TT_OverloadedOperatorLParen)) {
// Find the previous kw_operator token.
- FormatToken *Prev = Left;
- while (!Prev->is(tok::kw_operator)) {
+ FormatToken *Prev = &OpeningParen;
+ while (Prev->isNot(tok::kw_operator)) {
Prev = Prev->Previous;
assert(Prev && "Expect a kw_operator prior to the OperatorLParen!");
}
@@ -242,61 +328,77 @@ private:
bool OperatorCalledAsMemberFunction =
Prev->Previous && Prev->Previous->isOneOf(tok::period, tok::arrow);
Contexts.back().IsExpression = OperatorCalledAsMemberFunction;
- } else if (Style.Language == FormatStyle::LK_JavaScript &&
+ } else if (OpeningParen.is(TT_VerilogInstancePortLParen)) {
+ Contexts.back().IsExpression = true;
+ Contexts.back().ContextType = Context::VerilogInstancePortList;
+ } else if (Style.isJavaScript() &&
(Line.startsWith(Keywords.kw_type, tok::identifier) ||
Line.startsWith(tok::kw_export, Keywords.kw_type,
tok::identifier))) {
// type X = (...);
// export type X = (...);
Contexts.back().IsExpression = false;
- } else if (Left->Previous &&
- (Left->Previous->isOneOf(tok::kw_static_assert, tok::kw_while,
- tok::l_paren, tok::comma) ||
- Left->Previous->isIf() ||
- Left->Previous->is(TT_BinaryOperator))) {
+ } else if (OpeningParen.Previous &&
+ (OpeningParen.Previous->isOneOf(
+ tok::kw_static_assert, tok::kw_noexcept, tok::kw_explicit,
+ tok::kw_while, tok::l_paren, tok::comma,
+ TT_BinaryOperator) ||
+ OpeningParen.Previous->isIf())) {
// static_assert, if and while usually contain expressions.
Contexts.back().IsExpression = true;
- } else if (Style.Language == FormatStyle::LK_JavaScript && Left->Previous &&
- (Left->Previous->is(Keywords.kw_function) ||
- (Left->Previous->endsSequence(tok::identifier,
- Keywords.kw_function)))) {
+ } else if (Style.isJavaScript() && OpeningParen.Previous &&
+ (OpeningParen.Previous->is(Keywords.kw_function) ||
+ (OpeningParen.Previous->endsSequence(tok::identifier,
+ Keywords.kw_function)))) {
// function(...) or function f(...)
Contexts.back().IsExpression = false;
- } else if (Style.Language == FormatStyle::LK_JavaScript && Left->Previous &&
- Left->Previous->is(TT_JsTypeColon)) {
+ } else if (Style.isJavaScript() && OpeningParen.Previous &&
+ OpeningParen.Previous->is(TT_JsTypeColon)) {
// let x: (SomeType);
Contexts.back().IsExpression = false;
- } else if (isLambdaParameterList(Left)) {
+ } else if (isLambdaParameterList(&OpeningParen)) {
// This is a parameter list of a lambda expression.
Contexts.back().IsExpression = false;
+ } else if (OpeningParen.is(TT_RequiresExpressionLParen)) {
+ Contexts.back().IsExpression = false;
+ } else if (OpeningParen.Previous &&
+ OpeningParen.Previous->is(tok::kw__Generic)) {
+ Contexts.back().ContextType = Context::C11GenericSelection;
+ Contexts.back().IsExpression = true;
} else if (Line.InPPDirective &&
- (!Left->Previous || !Left->Previous->is(tok::identifier))) {
+ (!OpeningParen.Previous ||
+ OpeningParen.Previous->isNot(tok::identifier))) {
Contexts.back().IsExpression = true;
} else if (Contexts[Contexts.size() - 2].CaretFound) {
// This is the parameter list of an ObjC block.
Contexts.back().IsExpression = false;
- } else if (Left->Previous && Left->Previous->is(TT_ForEachMacro)) {
+ } else if (OpeningParen.Previous &&
+ OpeningParen.Previous->is(TT_ForEachMacro)) {
// The first argument to a foreach macro is a declaration.
- Contexts.back().IsForEachMacro = true;
+ Contexts.back().ContextType = Context::ForEachMacro;
Contexts.back().IsExpression = false;
- } else if (Left->Previous && Left->Previous->MatchingParen &&
- Left->Previous->MatchingParen->is(TT_ObjCBlockLParen)) {
+ } else if (OpeningParen.Previous && OpeningParen.Previous->MatchingParen &&
+ OpeningParen.Previous->MatchingParen->isOneOf(
+ TT_ObjCBlockLParen, TT_FunctionTypeLParen)) {
Contexts.back().IsExpression = false;
} else if (!Line.MustBeDeclaration && !Line.InPPDirective) {
bool IsForOrCatch =
- Left->Previous && Left->Previous->isOneOf(tok::kw_for, tok::kw_catch);
+ OpeningParen.Previous &&
+ OpeningParen.Previous->isOneOf(tok::kw_for, tok::kw_catch);
Contexts.back().IsExpression = !IsForOrCatch;
}
// Infer the role of the l_paren based on the previous token if we haven't
- // detected one one yet.
- if (PrevNonComment && Left->is(TT_Unknown)) {
- if (PrevNonComment->is(tok::kw___attribute)) {
- Left->setType(TT_AttributeParen);
+ // detected one yet.
+ if (PrevNonComment && OpeningParen.is(TT_Unknown)) {
+ if (PrevNonComment->isAttribute()) {
+ OpeningParen.setType(TT_AttributeLParen);
} else if (PrevNonComment->isOneOf(TT_TypenameMacro, tok::kw_decltype,
- tok::kw_typeof, tok::kw__Atomic,
- tok::kw___underlying_type)) {
- Left->setType(TT_TypeDeclarationParen);
+ tok::kw_typeof,
+#define TRANSFORM_TYPE_TRAIT_DEF(_, Trait) tok::kw___##Trait,
+#include "clang/Basic/TransformTypeTraits.def"
+ tok::kw__Atomic)) {
+ OpeningParen.setType(TT_TypeDeclarationParen);
// decltype() and typeof() usually contain expressions.
if (PrevNonComment->isOneOf(tok::kw_decltype, tok::kw_typeof))
Contexts.back().IsExpression = true;
@@ -305,7 +407,7 @@ private:
if (StartsObjCMethodExpr) {
Contexts.back().ColonIsObjCMethodExpr = true;
- Left->setType(TT_ObjCMethodExpr);
+ OpeningParen.setType(TT_ObjCMethodExpr);
}
// MightBeFunctionType and ProbablyFunctionType are used for
@@ -314,14 +416,15 @@ private:
//
// void (*FunctionPointer)(void);
// void (&FunctionReference)(void);
+ // void (&&FunctionReference)(void);
// void (^ObjCBlock)(void);
bool MightBeFunctionType = !Contexts[Contexts.size() - 2].IsExpression;
bool ProbablyFunctionType =
- CurrentToken->isOneOf(tok::star, tok::amp, tok::caret);
+ CurrentToken->isPointerOrReference() || CurrentToken->is(tok::caret);
bool HasMultipleLines = false;
bool HasMultipleParametersOnALine = false;
bool MightBeObjCForRangeLoop =
- Left->Previous && Left->Previous->is(tok::kw_for);
+ OpeningParen.Previous && OpeningParen.Previous->is(tok::kw_for);
FormatToken *PossibleObjCForInToken = nullptr;
while (CurrentToken) {
// LookForDecls is set when "if (" has been seen. Check for
@@ -334,7 +437,7 @@ private:
FormatToken *PrevPrev = Prev->getPreviousNonComment();
FormatToken *Next = CurrentToken->Next;
if (PrevPrev && PrevPrev->is(tok::identifier) &&
- Prev->isOneOf(tok::star, tok::amp, tok::ampamp) &&
+ PrevPrev->isNot(TT_TypeName) && Prev->isPointerOrReference() &&
CurrentToken->is(tok::identifier) && Next->isNot(tok::equal)) {
Prev->setType(TT_BinaryOperator);
LookForDecls = false;
@@ -344,29 +447,34 @@ private:
if (CurrentToken->Previous->is(TT_PointerOrReference) &&
CurrentToken->Previous->Previous->isOneOf(tok::l_paren,
- tok::coloncolon))
+ tok::coloncolon)) {
ProbablyFunctionType = true;
+ }
if (CurrentToken->is(tok::comma))
MightBeFunctionType = false;
if (CurrentToken->Previous->is(TT_BinaryOperator))
Contexts.back().IsExpression = true;
if (CurrentToken->is(tok::r_paren)) {
- if (MightBeFunctionType && ProbablyFunctionType && CurrentToken->Next &&
+ if (OpeningParen.isNot(TT_CppCastLParen) && MightBeFunctionType &&
+ ProbablyFunctionType && CurrentToken->Next &&
(CurrentToken->Next->is(tok::l_paren) ||
- (CurrentToken->Next->is(tok::l_square) && Line.MustBeDeclaration)))
- Left->setType(Left->Next->is(tok::caret) ? TT_ObjCBlockLParen
- : TT_FunctionTypeLParen);
- Left->MatchingParen = CurrentToken;
- CurrentToken->MatchingParen = Left;
+ (CurrentToken->Next->is(tok::l_square) &&
+ Line.MustBeDeclaration))) {
+ OpeningParen.setType(OpeningParen.Next->is(tok::caret)
+ ? TT_ObjCBlockLParen
+ : TT_FunctionTypeLParen);
+ }
+ OpeningParen.MatchingParen = CurrentToken;
+ CurrentToken->MatchingParen = &OpeningParen;
if (CurrentToken->Next && CurrentToken->Next->is(tok::l_brace) &&
- Left->Previous && Left->Previous->is(tok::l_paren)) {
+ OpeningParen.Previous && OpeningParen.Previous->is(tok::l_paren)) {
// Detect the case where macros are used to generate lambdas or
// function bodies, e.g.:
// auto my_lambda = MACRO((Type *type, int i) { .. body .. });
- for (FormatToken *Tok = Left; Tok != CurrentToken; Tok = Tok->Next) {
- if (Tok->is(TT_BinaryOperator) &&
- Tok->isOneOf(tok::star, tok::amp, tok::ampamp))
+ for (FormatToken *Tok = &OpeningParen; Tok != CurrentToken;
+ Tok = Tok->Next) {
+ if (Tok->is(TT_BinaryOperator) && Tok->isPointerOrReference())
Tok->setType(TT_PointerOrReference);
}
}
@@ -379,23 +487,29 @@ private:
}
}
- if (Left->is(TT_AttributeParen))
- CurrentToken->setType(TT_AttributeParen);
- if (Left->is(TT_TypeDeclarationParen))
+ if (OpeningParen.is(TT_AttributeLParen))
+ CurrentToken->setType(TT_AttributeRParen);
+ if (OpeningParen.is(TT_TypeDeclarationParen))
CurrentToken->setType(TT_TypeDeclarationParen);
- if (Left->Previous && Left->Previous->is(TT_JavaAnnotation))
+ if (OpeningParen.Previous &&
+ OpeningParen.Previous->is(TT_JavaAnnotation)) {
CurrentToken->setType(TT_JavaAnnotation);
- if (Left->Previous && Left->Previous->is(TT_LeadingJavaAnnotation))
+ }
+ if (OpeningParen.Previous &&
+ OpeningParen.Previous->is(TT_LeadingJavaAnnotation)) {
CurrentToken->setType(TT_LeadingJavaAnnotation);
- if (Left->Previous && Left->Previous->is(TT_AttributeSquare))
+ }
+ if (OpeningParen.Previous &&
+ OpeningParen.Previous->is(TT_AttributeSquare)) {
CurrentToken->setType(TT_AttributeSquare);
+ }
if (!HasMultipleLines)
- Left->setPackingKind(PPK_Inconclusive);
+ OpeningParen.setPackingKind(PPK_Inconclusive);
else if (HasMultipleParametersOnALine)
- Left->setPackingKind(PPK_BinPacked);
+ OpeningParen.setPackingKind(PPK_BinPacked);
else
- Left->setPackingKind(PPK_OnePerLine);
+ OpeningParen.setPackingKind(PPK_OnePerLine);
next();
return true;
@@ -403,20 +517,22 @@ private:
if (CurrentToken->isOneOf(tok::r_square, tok::r_brace))
return false;
- if (CurrentToken->is(tok::l_brace))
- Left->setType(TT_Unknown); // Not TT_ObjCBlockLParen
+ if (CurrentToken->is(tok::l_brace) && OpeningParen.is(TT_ObjCBlockLParen))
+ OpeningParen.setType(TT_Unknown);
if (CurrentToken->is(tok::comma) && CurrentToken->Next &&
!CurrentToken->Next->HasUnescapedNewline &&
- !CurrentToken->Next->isTrailingComment())
+ !CurrentToken->Next->isTrailingComment()) {
HasMultipleParametersOnALine = true;
+ }
bool ProbablyFunctionTypeLParen =
(CurrentToken->is(tok::l_paren) && CurrentToken->Next &&
CurrentToken->Next->isOneOf(tok::star, tok::amp, tok::caret));
if ((CurrentToken->Previous->isOneOf(tok::kw_const, tok::kw_auto) ||
CurrentToken->Previous->isSimpleTypeSpecifier()) &&
!(CurrentToken->is(tok::l_brace) ||
- (CurrentToken->is(tok::l_paren) && !ProbablyFunctionTypeLParen)))
+ (CurrentToken->is(tok::l_paren) && !ProbablyFunctionTypeLParen))) {
Contexts.back().IsExpression = false;
+ }
if (CurrentToken->isOneOf(tok::semi, tok::colon)) {
MightBeObjCForRangeLoop = false;
if (PossibleObjCForInToken) {
@@ -436,7 +552,7 @@ private:
FormatToken *Tok = CurrentToken;
if (!consumeToken())
return false;
- updateParameterCount(Left, Tok);
+ updateParameterCount(&OpeningParen, Tok);
if (CurrentToken && CurrentToken->HasUnescapedNewline)
HasMultipleLines = true;
}
@@ -467,9 +583,8 @@ private:
return false;
// Move along the tokens inbetween the '[' and ']' e.g. [STAThread].
- while (AttrTok && AttrTok->isNot(tok::r_square)) {
+ while (AttrTok && AttrTok->isNot(tok::r_square))
AttrTok = AttrTok->Next;
- }
if (!AttrTok)
return false;
@@ -488,43 +603,13 @@ private:
// incase its a [XXX] retval func(....
if (AttrTok->Next &&
- AttrTok->Next->startsSequence(tok::identifier, tok::l_paren))
+ AttrTok->Next->startsSequence(tok::identifier, tok::l_paren)) {
return true;
+ }
return false;
}
- bool isCpp11AttributeSpecifier(const FormatToken &Tok) {
- if (!Style.isCpp() || !Tok.startsSequence(tok::l_square, tok::l_square))
- return false;
- // The first square bracket is part of an ObjC array literal
- if (Tok.Previous && Tok.Previous->is(tok::at)) {
- return false;
- }
- const FormatToken *AttrTok = Tok.Next->Next;
- if (!AttrTok)
- return false;
- // C++17 '[[using ns: foo, bar(baz, blech)]]'
- // We assume nobody will name an ObjC variable 'using'.
- if (AttrTok->startsSequence(tok::kw_using, tok::identifier, tok::colon))
- return true;
- if (AttrTok->isNot(tok::identifier))
- return false;
- while (AttrTok && !AttrTok->startsSequence(tok::r_square, tok::r_square)) {
- // ObjC message send. We assume nobody will use : in a C++11 attribute
- // specifier parameter, although this is technically valid:
- // [[foo(:)]].
- if (AttrTok->is(tok::colon) ||
- AttrTok->startsSequence(tok::identifier, tok::identifier) ||
- AttrTok->startsSequence(tok::r_paren, tok::identifier))
- return false;
- if (AttrTok->is(tok::ellipsis))
- return true;
- AttrTok = AttrTok->Next;
- }
- return AttrTok && AttrTok->startsSequence(tok::r_square, tok::r_square);
- }
-
bool parseSquare() {
if (!CurrentToken)
return false;
@@ -543,10 +628,11 @@ private:
bool CppArrayTemplates =
Style.isCpp() && Parent && Parent->is(TT_TemplateCloser) &&
(Contexts.back().CanBeExpression || Contexts.back().IsExpression ||
- Contexts.back().InTemplateArgument);
+ Contexts.back().ContextType == Context::TemplateArgument);
- bool IsCpp11AttributeSpecifier = isCpp11AttributeSpecifier(*Left) ||
- Contexts.back().InCpp11AttributeSpecifier;
+ const bool IsInnerSquare = Contexts.back().InCpp11AttributeSpecifier;
+ const bool IsCpp11AttributeSpecifier =
+ isCppAttribute(Style.isCpp(), *Left) || IsInnerSquare;
// Treat C# Attributes [STAThread] much like C++ attributes [[...]].
bool IsCSharpAttributeSpecifier =
@@ -581,7 +667,9 @@ private:
Left->setType(TT_InlineASMSymbolicNameLSquare);
} else if (IsCpp11AttributeSpecifier) {
Left->setType(TT_AttributeSquare);
- } else if (Style.Language == FormatStyle::LK_JavaScript && Parent &&
+ if (!IsInnerSquare && Left->Previous)
+ Left->Previous->EndsCppAttributeGroup = false;
+ } else if (Style.isJavaScript() && Parent &&
Contexts.back().ContextKind == tok::l_brace &&
Parent->isOneOf(tok::l_brace, tok::comma)) {
Left->setType(TT_JsComputedPropertyName);
@@ -593,8 +681,7 @@ private:
} else if (CurrentToken->is(tok::r_square) && Parent &&
Parent->is(TT_TemplateCloser)) {
Left->setType(TT_ArraySubscriptLSquare);
- } else if (Style.Language == FormatStyle::LK_Proto ||
- Style.Language == FormatStyle::LK_TextProto) {
+ } else if (Style.isProto()) {
// Square braces in LK_Proto can either be message field attributes:
//
// optional Aaa aaa = 1 [
@@ -645,8 +732,7 @@ private:
ScopedContextCreator ContextCreator(*this, tok::l_square, BindingIncrease);
Contexts.back().IsExpression = true;
- if (Style.Language == FormatStyle::LK_JavaScript && Parent &&
- Parent->is(TT_JsTypeColon))
+ if (Style.isJavaScript() && Parent && Parent->is(TT_JsTypeColon))
Contexts.back().IsExpression = false;
Contexts.back().ColonIsObjCMethodExpr = StartsObjCMethodExpr;
@@ -655,15 +741,18 @@ private:
while (CurrentToken) {
if (CurrentToken->is(tok::r_square)) {
- if (IsCpp11AttributeSpecifier)
+ if (IsCpp11AttributeSpecifier) {
CurrentToken->setType(TT_AttributeSquare);
- if (IsCSharpAttributeSpecifier)
+ if (!IsInnerSquare)
+ CurrentToken->EndsCppAttributeGroup = true;
+ }
+ if (IsCSharpAttributeSpecifier) {
CurrentToken->setType(TT_AttributeSquare);
- else if (((CurrentToken->Next &&
- CurrentToken->Next->is(tok::l_paren)) ||
- (CurrentToken->Previous &&
- CurrentToken->Previous->Previous == Left)) &&
- Left->is(TT_ObjCMethodExpr)) {
+ } else if (((CurrentToken->Next &&
+ CurrentToken->Next->is(tok::l_paren)) ||
+ (CurrentToken->Previous &&
+ CurrentToken->Previous->Previous == Left)) &&
+ Left->is(TT_ObjCMethodExpr)) {
// An ObjC method call is rarely followed by an open parenthesis. It
// also can't be composed of just one token, unless it's a macro that
// will be expanded to more tokens.
@@ -677,18 +766,21 @@ private:
// before the r_square is tagged as a selector name component.
if (!ColonFound && CurrentToken->Previous &&
CurrentToken->Previous->is(TT_Unknown) &&
- canBeObjCSelectorComponent(*CurrentToken->Previous))
+ canBeObjCSelectorComponent(*CurrentToken->Previous)) {
CurrentToken->Previous->setType(TT_SelectorName);
+ }
// determineStarAmpUsage() thinks that '*' '[' is allocating an
// array of pointers, but if '[' starts a selector then '*' is a
// binary operator.
if (Parent && Parent->is(TT_PointerOrReference))
- Parent->setType(TT_BinaryOperator);
+ Parent->overwriteFixedType(TT_BinaryOperator);
}
// An arrow after an ObjC method expression is not a lambda arrow.
if (CurrentToken->getType() == TT_ObjCMethodExpr &&
- CurrentToken->Next && CurrentToken->Next->is(TT_LambdaArrow))
- CurrentToken->Next->setType(TT_Unknown);
+ CurrentToken->Next &&
+ CurrentToken->Next->is(TT_TrailingReturnArrow)) {
+ CurrentToken->Next->overwriteFixedType(TT_Unknown);
+ }
Left->MatchingParen = CurrentToken;
CurrentToken->MatchingParen = Left;
// FirstObjCSelectorName is set when a colon is found. This does
@@ -723,20 +815,23 @@ private:
// Remember that this is a [[using ns: foo]] C++ attribute, so we
// don't add a space before the colon (unlike other colons).
CurrentToken->setType(TT_AttributeColon);
- } else if (Left->isOneOf(TT_ArraySubscriptLSquare,
+ } else if (!Style.isVerilog() && !Line.InPragmaDirective &&
+ Left->isOneOf(TT_ArraySubscriptLSquare,
TT_DesignatedInitializerLSquare)) {
Left->setType(TT_ObjCMethodExpr);
StartsObjCMethodExpr = true;
Contexts.back().ColonIsObjCMethodExpr = true;
- if (Parent && Parent->is(tok::r_paren))
+ if (Parent && Parent->is(tok::r_paren)) {
// FIXME(bug 36976): ObjC return types shouldn't use TT_CastRParen.
Parent->setType(TT_CastRParen);
+ }
}
ColonFound = true;
}
if (CurrentToken->is(tok::comma) && Left->is(TT_ObjCMethodExpr) &&
- !ColonFound)
+ !ColonFound) {
Left->setType(TT_ArrayInitializerLSquare);
+ }
FormatToken *Tok = CurrentToken;
if (!consumeToken())
return false;
@@ -753,72 +848,85 @@ private:
const auto End = std::next(Contexts.rbegin(), 2);
auto Last = Contexts.rbegin();
unsigned Depth = 0;
- for (; Last != End; ++Last) {
+ for (; Last != End; ++Last)
if (Last->ContextKind == tok::l_brace)
++Depth;
- }
return Depth == 2 && Last->ContextKind != tok::l_brace;
}
bool parseBrace() {
- if (CurrentToken) {
- FormatToken *Left = CurrentToken->Previous;
- Left->ParentBracket = Contexts.back().ContextKind;
+ if (!CurrentToken)
+ return true;
- if (Contexts.back().CaretFound)
- Left->setType(TT_ObjCBlockLBrace);
- Contexts.back().CaretFound = false;
+ assert(CurrentToken->Previous);
+ FormatToken &OpeningBrace = *CurrentToken->Previous;
+ assert(OpeningBrace.is(tok::l_brace));
+ OpeningBrace.ParentBracket = Contexts.back().ContextKind;
- ScopedContextCreator ContextCreator(*this, tok::l_brace, 1);
- Contexts.back().ColonIsDictLiteral = true;
- if (Left->is(BK_BracedInit))
- Contexts.back().IsExpression = true;
- if (Style.Language == FormatStyle::LK_JavaScript && Left->Previous &&
- Left->Previous->is(TT_JsTypeColon))
- Contexts.back().IsExpression = false;
+ if (Contexts.back().CaretFound)
+ OpeningBrace.overwriteFixedType(TT_ObjCBlockLBrace);
+ Contexts.back().CaretFound = false;
- unsigned CommaCount = 0;
- while (CurrentToken) {
- if (CurrentToken->is(tok::r_brace)) {
- Left->MatchingParen = CurrentToken;
- CurrentToken->MatchingParen = Left;
- if (Style.AlignArrayOfStructures != FormatStyle::AIAS_None) {
- if (Left->ParentBracket == tok::l_brace &&
- couldBeInStructArrayInitializer() && CommaCount > 0) {
- Contexts.back().InStructArrayInitializer = true;
- }
+ ScopedContextCreator ContextCreator(*this, tok::l_brace, 1);
+ Contexts.back().ColonIsDictLiteral = true;
+ if (OpeningBrace.is(BK_BracedInit))
+ Contexts.back().IsExpression = true;
+ if (Style.isJavaScript() && OpeningBrace.Previous &&
+ OpeningBrace.Previous->is(TT_JsTypeColon)) {
+ Contexts.back().IsExpression = false;
+ }
+ if (Style.isVerilog() &&
+ (!OpeningBrace.getPreviousNonComment() ||
+ OpeningBrace.getPreviousNonComment()->isNot(Keywords.kw_apostrophe))) {
+ Contexts.back().VerilogMayBeConcatenation = true;
+ }
+
+ unsigned CommaCount = 0;
+ while (CurrentToken) {
+ if (CurrentToken->is(tok::r_brace)) {
+ assert(!Scopes.empty());
+ assert(Scopes.back() == getScopeType(OpeningBrace));
+ Scopes.pop_back();
+ assert(OpeningBrace.Optional == CurrentToken->Optional);
+ OpeningBrace.MatchingParen = CurrentToken;
+ CurrentToken->MatchingParen = &OpeningBrace;
+ if (Style.AlignArrayOfStructures != FormatStyle::AIAS_None) {
+ if (OpeningBrace.ParentBracket == tok::l_brace &&
+ couldBeInStructArrayInitializer() && CommaCount > 0) {
+ Contexts.back().ContextType = Context::StructArrayInitializer;
}
- next();
- return true;
}
- if (CurrentToken->isOneOf(tok::r_paren, tok::r_square))
- return false;
- updateParameterCount(Left, CurrentToken);
- if (CurrentToken->isOneOf(tok::colon, tok::l_brace, tok::less)) {
- FormatToken *Previous = CurrentToken->getPreviousNonComment();
- if (Previous->is(TT_JsTypeOptionalQuestion))
- Previous = Previous->getPreviousNonComment();
- if ((CurrentToken->is(tok::colon) &&
- (!Contexts.back().ColonIsDictLiteral || !Style.isCpp())) ||
- Style.Language == FormatStyle::LK_Proto ||
- Style.Language == FormatStyle::LK_TextProto) {
- Left->setType(TT_DictLiteral);
- if (Previous->Tok.getIdentifierInfo() ||
- Previous->is(tok::string_literal))
- Previous->setType(TT_SelectorName);
+ next();
+ return true;
+ }
+ if (CurrentToken->isOneOf(tok::r_paren, tok::r_square))
+ return false;
+ updateParameterCount(&OpeningBrace, CurrentToken);
+ if (CurrentToken->isOneOf(tok::colon, tok::l_brace, tok::less)) {
+ FormatToken *Previous = CurrentToken->getPreviousNonComment();
+ if (Previous->is(TT_JsTypeOptionalQuestion))
+ Previous = Previous->getPreviousNonComment();
+ if ((CurrentToken->is(tok::colon) &&
+ (!Contexts.back().ColonIsDictLiteral || !Style.isCpp())) ||
+ Style.isProto()) {
+ OpeningBrace.setType(TT_DictLiteral);
+ if (Previous->Tok.getIdentifierInfo() ||
+ Previous->is(tok::string_literal)) {
+ Previous->setType(TT_SelectorName);
}
- if (CurrentToken->is(tok::colon) ||
- Style.Language == FormatStyle::LK_JavaScript)
- Left->setType(TT_DictLiteral);
- }
- if (CurrentToken->is(tok::comma)) {
- if (Style.Language == FormatStyle::LK_JavaScript)
- Left->setType(TT_DictLiteral);
- ++CommaCount;
}
- if (!consumeToken())
- return false;
+ if (CurrentToken->is(tok::colon) && OpeningBrace.is(TT_Unknown))
+ OpeningBrace.setType(TT_DictLiteral);
+ else if (Style.isJavaScript())
+ OpeningBrace.overwriteFixedType(TT_DictLiteral);
}
+ if (CurrentToken->is(tok::comma)) {
+ if (Style.isJavaScript())
+ OpeningBrace.overwriteFixedType(TT_DictLiteral);
+ ++CommaCount;
+ }
+ if (!consumeToken())
+ return false;
}
return true;
}
@@ -866,8 +974,21 @@ private:
}
bool consumeToken() {
+ if (Style.isCpp()) {
+ const auto *Prev = CurrentToken->getPreviousNonComment();
+ if (Prev && Prev->is(tok::r_square) && Prev->is(TT_AttributeSquare) &&
+ CurrentToken->isOneOf(tok::kw_if, tok::kw_switch, tok::kw_case,
+ tok::kw_default, tok::kw_for, tok::kw_while) &&
+ mustBreakAfterAttributes(*CurrentToken, Style)) {
+ CurrentToken->MustBreakBefore = true;
+ }
+ }
FormatToken *Tok = CurrentToken;
next();
+ // In Verilog primitives' state tables, `:`, `?`, and `-` aren't normal
+ // operators.
+ if (Tok->is(TT_VerilogTableItem))
+ return true;
switch (Tok->Tok.getKind()) {
case tok::plus:
case tok::minus:
@@ -877,8 +998,12 @@ private:
case tok::colon:
if (!Tok->Previous)
return false;
+ // Goto labels and case labels are already identified in
+ // UnwrappedLineParser.
+ if (Tok->isTypeFinalized())
+ break;
// Colons from ?: are handled in parseConditional().
- if (Style.Language == FormatStyle::LK_JavaScript) {
+ if (Style.isJavaScript()) {
if (Contexts.back().ColonIsForRangeExpr || // colon in for loop
(Contexts.size() == 1 && // switch/case labels
!Line.First->isOneOf(tok::kw_enum, tok::kw_case)) ||
@@ -901,10 +1026,31 @@ private:
Tok->setType(TT_CSharpNamedArgumentColon);
break;
}
+ } else if (Style.isVerilog() && Tok->isNot(TT_BinaryOperator)) {
+ // The distribution weight operators are labeled
+ // TT_BinaryOperator by the lexer.
+ if (Keywords.isVerilogEnd(*Tok->Previous) ||
+ Keywords.isVerilogBegin(*Tok->Previous)) {
+ Tok->setType(TT_VerilogBlockLabelColon);
+ } else if (Contexts.back().ContextKind == tok::l_square) {
+ Tok->setType(TT_BitFieldColon);
+ } else if (Contexts.back().ColonIsDictLiteral) {
+ Tok->setType(TT_DictLiteral);
+ } else if (Contexts.size() == 1) {
+ // In Verilog a case label doesn't have the case keyword. We
+ // assume a colon following an expression is a case label.
+ // Colons from ?: are annotated in parseConditional().
+ Tok->setType(TT_CaseLabelColon);
+ if (Line.Level > 1 || (!Line.InPPDirective && Line.Level > 0))
+ --Line.Level;
+ }
+ break;
}
- if (Contexts.back().ColonIsDictLiteral ||
- Style.Language == FormatStyle::LK_Proto ||
- Style.Language == FormatStyle::LK_TextProto) {
+ if (Line.First->isOneOf(Keywords.kw_module, Keywords.kw_import) ||
+ Line.First->startsSequence(tok::kw_export, Keywords.kw_module) ||
+ Line.First->startsSequence(tok::kw_export, Keywords.kw_import)) {
+ Tok->setType(TT_ModulePartitionColon);
+ } else if (Contexts.back().ColonIsDictLiteral || Style.isProto()) {
Tok->setType(TT_DictLiteral);
if (Style.Language == FormatStyle::LK_TextProto) {
if (FormatToken *Previous = Tok->getPreviousNonComment())
@@ -928,33 +1074,42 @@ private:
Contexts.back().LongestObjCSelectorName == 0 ||
UnknownIdentifierInMethodDeclaration) {
Tok->Previous->setType(TT_SelectorName);
- if (!Contexts.back().FirstObjCSelectorName)
+ if (!Contexts.back().FirstObjCSelectorName) {
Contexts.back().FirstObjCSelectorName = Tok->Previous;
- else if (Tok->Previous->ColumnWidth >
- Contexts.back().LongestObjCSelectorName)
+ } else if (Tok->Previous->ColumnWidth >
+ Contexts.back().LongestObjCSelectorName) {
Contexts.back().LongestObjCSelectorName =
Tok->Previous->ColumnWidth;
+ }
Tok->Previous->ParameterIndex =
Contexts.back().FirstObjCSelectorName->ObjCSelectorNameParts;
++Contexts.back().FirstObjCSelectorName->ObjCSelectorNameParts;
}
} else if (Contexts.back().ColonIsForRangeExpr) {
Tok->setType(TT_RangeBasedForLoopColon);
+ } else if (Contexts.back().ContextType == Context::C11GenericSelection) {
+ Tok->setType(TT_GenericSelectionColon);
} else if (CurrentToken && CurrentToken->is(tok::numeric_constant)) {
Tok->setType(TT_BitFieldColon);
} else if (Contexts.size() == 1 &&
!Line.First->isOneOf(tok::kw_enum, tok::kw_case,
tok::kw_default)) {
FormatToken *Prev = Tok->getPreviousNonComment();
- if (Prev->isOneOf(tok::r_paren, tok::kw_noexcept))
+ if (!Prev)
+ break;
+ if (Prev->isOneOf(tok::r_paren, tok::kw_noexcept) ||
+ Prev->ClosesRequiresClause) {
Tok->setType(TT_CtorInitializerColon);
- else if (Prev->is(tok::kw_try)) {
+ } else if (Prev->is(tok::kw_try)) {
// Member initializer list within function try block.
FormatToken *PrevPrev = Prev->getPreviousNonComment();
+ if (!PrevPrev)
+ break;
if (PrevPrev && PrevPrev->isOneOf(tok::r_paren, tok::kw_noexcept))
Tok->setType(TT_CtorInitializerColon);
- } else
+ } else {
Tok->setType(TT_InheritanceColon);
+ }
} else if (canBeObjCSelectorComponent(*Tok->Previous) && Tok->Next &&
(Tok->Next->isOneOf(tok::r_paren, tok::comma) ||
(canBeObjCSelectorComponent(*Tok->Next) && Tok->Next->Next &&
@@ -962,7 +1117,8 @@ private:
// This handles a special macro in ObjC code where selectors including
// the colon are passed as macro arguments.
Tok->setType(TT_ObjCMethodExpr);
- } else if (Contexts.back().ContextKind == tok::l_paren) {
+ } else if (Contexts.back().ContextKind == tok::l_paren &&
+ !Line.InPragmaDirective) {
Tok->setType(TT_InlineASMColon);
}
break;
@@ -970,15 +1126,16 @@ private:
case tok::amp:
// | and & in declarations/type expressions represent union and
// intersection types, respectively.
- if (Style.Language == FormatStyle::LK_JavaScript &&
- !Contexts.back().IsExpression)
+ if (Style.isJavaScript() && !Contexts.back().IsExpression)
Tok->setType(TT_JsTypeOperator);
break;
case tok::kw_if:
- case tok::kw_while:
- if (Tok->is(tok::kw_if) && CurrentToken &&
- CurrentToken->isOneOf(tok::kw_constexpr, tok::identifier))
+ if (CurrentToken &&
+ CurrentToken->isOneOf(tok::kw_constexpr, tok::identifier)) {
next();
+ }
+ [[fallthrough]];
+ case tok::kw_while:
if (CurrentToken && CurrentToken->is(tok::l_paren)) {
next();
if (!parseParens(/*LookForDecls=*/true))
@@ -986,16 +1143,21 @@ private:
}
break;
case tok::kw_for:
- if (Style.Language == FormatStyle::LK_JavaScript) {
+ if (Style.isJavaScript()) {
// x.for and {for: ...}
if ((Tok->Previous && Tok->Previous->is(tok::period)) ||
- (Tok->Next && Tok->Next->is(tok::colon)))
+ (Tok->Next && Tok->Next->is(tok::colon))) {
break;
+ }
// JS' for await ( ...
if (CurrentToken && CurrentToken->is(Keywords.kw_await))
next();
}
+ if (Style.isCpp() && CurrentToken && CurrentToken->is(tok::kw_co_await))
+ next();
Contexts.back().ColonIsForRangeExpr = true;
+ if (!CurrentToken || CurrentToken->isNot(tok::l_paren))
+ return false;
next();
if (!parseParens())
return false;
@@ -1013,14 +1175,64 @@ private:
Tok->setType(TT_OverloadedOperatorLParen);
}
+ if (Style.isVerilog()) {
+ // Identify the parameter list and port list in a module instantiation.
+ // This is still needed when we already have
+ // UnwrappedLineParser::parseVerilogHierarchyHeader because that
+ // function is only responsible for the definition, not the
+ // instantiation.
+ auto IsInstancePort = [&]() {
+ const FormatToken *Prev = Tok->getPreviousNonComment();
+ const FormatToken *PrevPrev;
+ // In the following example all 4 left parentheses will be treated as
+ // 'TT_VerilogInstancePortLParen'.
+ //
+ // module_x instance_1(port_1); // Case A.
+ // module_x #(parameter_1) // Case B.
+ // instance_2(port_1), // Case C.
+ // instance_3(port_1); // Case D.
+ if (!Prev || !(PrevPrev = Prev->getPreviousNonComment()))
+ return false;
+ // Case A.
+ if (Keywords.isVerilogIdentifier(*Prev) &&
+ Keywords.isVerilogIdentifier(*PrevPrev)) {
+ return true;
+ }
+ // Case B.
+ if (Prev->is(Keywords.kw_verilogHash) &&
+ Keywords.isVerilogIdentifier(*PrevPrev)) {
+ return true;
+ }
+ // Case C.
+ if (Keywords.isVerilogIdentifier(*Prev) && PrevPrev->is(tok::r_paren))
+ return true;
+ // Case D.
+ if (Keywords.isVerilogIdentifier(*Prev) && PrevPrev->is(tok::comma)) {
+ const FormatToken *PrevParen = PrevPrev->getPreviousNonComment();
+ if (PrevParen->is(tok::r_paren) && PrevParen->MatchingParen &&
+ PrevParen->MatchingParen->is(TT_VerilogInstancePortLParen)) {
+ return true;
+ }
+ }
+ return false;
+ };
+
+ if (IsInstancePort())
+ Tok->setFinalizedType(TT_VerilogInstancePortLParen);
+ }
+
if (!parseParens())
return false;
if (Line.MustBeDeclaration && Contexts.size() == 1 &&
!Contexts.back().IsExpression && !Line.startsWith(TT_ObjCProperty) &&
- !Tok->is(TT_TypeDeclarationParen) &&
- (!Tok->Previous || !Tok->Previous->isOneOf(tok::kw___attribute,
- TT_LeadingJavaAnnotation)))
- Line.MightBeFunctionDecl = true;
+ !Tok->isOneOf(TT_TypeDeclarationParen, TT_RequiresExpressionLParen)) {
+ if (const auto *Previous = Tok->Previous;
+ !Previous ||
+ (!Previous->isAttribute() &&
+ !Previous->isOneOf(TT_RequiresClause, TT_LeadingJavaAnnotation))) {
+ Line.MightBeFunctionDecl = true;
+ }
+ }
break;
case tok::l_square:
if (!parseSquare())
@@ -1032,6 +1244,7 @@ private:
if (Previous && Previous->getType() != TT_DictLiteral)
Previous->setType(TT_SelectorName);
}
+ Scopes.push_back(getScopeType(*Tok));
if (!parseBrace())
return false;
break;
@@ -1062,6 +1275,9 @@ private:
case tok::r_square:
return false;
case tok::r_brace:
+ // Don't pop scope when encountering unbalanced r_brace.
+ if (!Scopes.empty())
+ Scopes.pop_back();
// Lines can start with '}'.
if (Tok->Previous)
return false;
@@ -1073,21 +1289,33 @@ private:
Tok->SpacesRequiredBefore = 1;
break;
case tok::kw_operator:
- if (Style.Language == FormatStyle::LK_TextProto ||
- Style.Language == FormatStyle::LK_Proto)
+ if (Style.isProto())
break;
while (CurrentToken &&
!CurrentToken->isOneOf(tok::l_paren, tok::semi, tok::r_paren)) {
if (CurrentToken->isOneOf(tok::star, tok::amp))
CurrentToken->setType(TT_PointerOrReference);
- consumeToken();
- if (CurrentToken && CurrentToken->is(tok::comma) &&
- CurrentToken->Previous->isNot(tok::kw_operator))
+ auto Next = CurrentToken->getNextNonComment();
+ if (!Next)
break;
- if (CurrentToken && CurrentToken->Previous->isOneOf(
- TT_BinaryOperator, TT_UnaryOperator, tok::comma,
- tok::star, tok::arrow, tok::amp, tok::ampamp))
- CurrentToken->Previous->setType(TT_OverloadedOperator);
+ if (Next->is(tok::less))
+ next();
+ else
+ consumeToken();
+ if (!CurrentToken)
+ break;
+ auto Previous = CurrentToken->getPreviousNonComment();
+ assert(Previous);
+ if (CurrentToken->is(tok::comma) && Previous->isNot(tok::kw_operator))
+ break;
+ if (Previous->isOneOf(TT_BinaryOperator, TT_UnaryOperator, tok::comma,
+ tok::star, tok::arrow, tok::amp, tok::ampamp) ||
+ // User defined literal.
+ Previous->TokenText.starts_with("\"\"")) {
+ Previous->setType(TT_OverloadedOperator);
+ if (CurrentToken->isOneOf(tok::less, tok::greater))
+ break;
+ }
}
if (CurrentToken && CurrentToken->is(tok::l_paren))
CurrentToken->setType(TT_OverloadedOperatorLParen);
@@ -1095,9 +1323,9 @@ private:
CurrentToken->Previous->setType(TT_OverloadedOperator);
break;
case tok::question:
- if (Style.Language == FormatStyle::LK_JavaScript && Tok->Next &&
+ if (Style.isJavaScript() && Tok->Next &&
Tok->Next->isOneOf(tok::semi, tok::comma, tok::colon, tok::r_paren,
- tok::r_brace)) {
+ tok::r_brace, tok::r_square)) {
// Question marks before semicolons, colons, etc. indicate optional
// types (fields, parameters), e.g.
// function(x?: string, y?) {...}
@@ -1108,16 +1336,40 @@ private:
// Declarations cannot be conditional expressions, this can only be part
// of a type declaration.
if (Line.MustBeDeclaration && !Contexts.back().IsExpression &&
- Style.Language == FormatStyle::LK_JavaScript)
+ Style.isJavaScript()) {
break;
+ }
if (Style.isCSharp()) {
// `Type?)`, `Type?>`, `Type? name;` and `Type? name =` can only be
// nullable types.
+
+ // `Type?)`, `Type?>`, `Type? name;`
+ if (Tok->Next &&
+ (Tok->Next->startsSequence(tok::question, tok::r_paren) ||
+ Tok->Next->startsSequence(tok::question, tok::greater) ||
+ Tok->Next->startsSequence(tok::question, tok::identifier,
+ tok::semi))) {
+ Tok->setType(TT_CSharpNullable);
+ break;
+ }
+
+ // `Type? name =`
+ if (Tok->Next && Tok->Next->is(tok::identifier) && Tok->Next->Next &&
+ Tok->Next->Next->is(tok::equal)) {
+ Tok->setType(TT_CSharpNullable);
+ break;
+ }
+
// Line.MustBeDeclaration will be true for `Type? name;`.
- if ((!Contexts.back().IsExpression && Line.MustBeDeclaration) ||
- (Tok->Next && Tok->Next->isOneOf(tok::r_paren, tok::greater)) ||
- (Tok->Next && Tok->Next->is(tok::identifier) && Tok->Next->Next &&
- Tok->Next->Next->is(tok::equal))) {
+ // But not
+ // cond ? "A" : "B";
+ // cond ? id : "B";
+ // cond ? cond2 ? "A" : "B" : "C";
+ if (!Contexts.back().IsExpression && Line.MustBeDeclaration &&
+ (!Tok->Next ||
+ !Tok->Next->isOneOf(tok::identifier, tok::string_literal) ||
+ !Tok->Next->Next ||
+ !Tok->Next->Next->isOneOf(tok::colon, tok::question))) {
Tok->setType(TT_CSharpNullable);
break;
}
@@ -1128,18 +1380,37 @@ private:
parseTemplateDeclaration();
break;
case tok::comma:
- if (Contexts.back().InCtorInitializer)
+ switch (Contexts.back().ContextType) {
+ case Context::CtorInitializer:
Tok->setType(TT_CtorInitializerComma);
- else if (Contexts.back().InInheritanceList)
+ break;
+ case Context::InheritanceList:
Tok->setType(TT_InheritanceComma);
- else if (Contexts.back().FirstStartOfName &&
- (Contexts.size() == 1 || Line.startsWith(tok::kw_for))) {
- Contexts.back().FirstStartOfName->PartOfMultiVariableDeclStmt = true;
- Line.IsMultiVariableDeclStmt = true;
+ break;
+ case Context::VerilogInstancePortList:
+ Tok->setFinalizedType(TT_VerilogInstancePortComma);
+ break;
+ default:
+ if (Style.isVerilog() && Contexts.size() == 1 &&
+ Line.startsWith(Keywords.kw_assign)) {
+ Tok->setFinalizedType(TT_VerilogAssignComma);
+ } else if (Contexts.back().FirstStartOfName &&
+ (Contexts.size() == 1 || startsWithInitStatement(Line))) {
+ Contexts.back().FirstStartOfName->PartOfMultiVariableDeclStmt = true;
+ Line.IsMultiVariableDeclStmt = true;
+ }
+ break;
}
- if (Contexts.back().IsForEachMacro)
+ if (Contexts.back().ContextType == Context::ForEachMacro)
Contexts.back().IsExpression = true;
break;
+ case tok::kw_default:
+ // Unindent case labels.
+ if (Style.isVerilog() && Keywords.isVerilogEndOfLabel(*Tok) &&
+ (Line.Level > 1 || (!Line.InPPDirective && Line.Level > 0))) {
+ --Line.Level;
+ }
+ break;
case tok::identifier:
if (Tok->isOneOf(Keywords.kw___has_include,
Keywords.kw___has_include_next)) {
@@ -1149,8 +1420,14 @@ private:
Tok->Next->isNot(tok::l_paren)) {
Tok->setType(TT_CSharpGenericTypeConstraint);
parseCSharpGenericTypeConstraint();
+ if (!Tok->getPreviousNonComment())
+ Line.IsContinuation = true;
}
break;
+ case tok::arrow:
+ if (Tok->Previous && Tok->Previous->is(tok::kw_noexcept))
+ Tok->setType(TT_TrailingReturnArrow);
+ break;
default:
break;
}
@@ -1193,8 +1470,9 @@ private:
// Mark tokens up to the trailing line comments as implicit string
// literals.
if (CurrentToken->isNot(tok::comment) &&
- !CurrentToken->TokenText.startswith("//"))
+ !CurrentToken->TokenText.starts_with("//")) {
CurrentToken->setType(TT_ImplicitStringLiteral);
+ }
next();
}
}
@@ -1214,12 +1492,14 @@ private:
void parsePragma() {
next(); // Consume "pragma".
if (CurrentToken &&
- CurrentToken->isOneOf(Keywords.kw_mark, Keywords.kw_option)) {
- bool IsMark = CurrentToken->is(Keywords.kw_mark);
- next(); // Consume "mark".
+ CurrentToken->isOneOf(Keywords.kw_mark, Keywords.kw_option,
+ Keywords.kw_region)) {
+ bool IsMarkOrRegion =
+ CurrentToken->isOneOf(Keywords.kw_mark, Keywords.kw_region);
+ next();
next(); // Consume first token (so we fix leading whitespace).
while (CurrentToken) {
- if (IsMark || CurrentToken->Previous->is(TT_BinaryOperator))
+ if (IsMarkOrRegion || CurrentToken->Previous->is(TT_BinaryOperator))
CurrentToken->setType(TT_ImplicitStringLiteral);
next();
}
@@ -1227,7 +1507,7 @@ private:
}
void parseHasInclude() {
- if (!CurrentToken || !CurrentToken->is(tok::l_paren))
+ if (!CurrentToken || CurrentToken->isNot(tok::l_paren))
return;
next(); // '('
parseIncludeDirective();
@@ -1241,7 +1521,7 @@ private:
if (!CurrentToken)
return Type;
- if (Style.Language == FormatStyle::LK_JavaScript && IsFirstToken) {
+ if (Style.isJavaScript() && IsFirstToken) {
// JavaScript files can contain shebang lines of the form:
// #!/usr/bin/env node
// Treat these like C++ #include directives.
@@ -1253,7 +1533,7 @@ private:
return LT_ImportStatement;
}
- if (CurrentToken->Tok.is(tok::numeric_constant)) {
+ if (CurrentToken->is(tok::numeric_constant)) {
CurrentToken->SpacesRequiredBefore = 1;
return Type;
}
@@ -1261,6 +1541,10 @@ private:
// sequence.
if (!CurrentToken->Tok.getIdentifierInfo())
return Type;
+ // In Verilog macro expansions start with a backtick just like preprocessor
+ // directives. Thus we stop if the word is not a preprocessor directive.
+ if (Style.isVerilog() && !Keywords.isVerilogPPDirective(*CurrentToken))
+ return LT_Invalid;
switch (CurrentToken->Tok.getIdentifierInfo()->getPPKeywordID()) {
case tok::pp_include:
case tok::pp_include_next:
@@ -1288,11 +1572,12 @@ private:
while (CurrentToken) {
FormatToken *Tok = CurrentToken;
next();
- if (Tok->is(tok::l_paren))
+ if (Tok->is(tok::l_paren)) {
parseParens();
- else if (Tok->isOneOf(Keywords.kw___has_include,
- Keywords.kw___has_include_next))
+ } else if (Tok->isOneOf(Keywords.kw___has_include,
+ Keywords.kw___has_include_next)) {
parseHasInclude();
+ }
}
return Type;
}
@@ -1302,8 +1587,14 @@ public:
if (!CurrentToken)
return LT_Invalid;
NonTemplateLess.clear();
- if (CurrentToken->is(tok::hash))
- return parsePreprocessorDirective();
+ if (!Line.InMacroBody && CurrentToken->is(tok::hash)) {
+ // We were not yet allowed to use C++17 optional when this was being
+ // written. So we used LT_Invalid to mark that the line is not a
+ // preprocessor directive.
+ auto Type = parsePreprocessorDirective();
+ if (Type != LT_Invalid)
+ return Type;
+ }
// Directly allow to 'import <string-literal>' to support protocol buffer
// definitions (github.com/google/protobuf) or missing "#" (either way we
@@ -1311,8 +1602,8 @@ public:
IdentifierInfo *Info = CurrentToken->Tok.getIdentifierInfo();
if ((Style.Language == FormatStyle::LK_Java &&
CurrentToken->is(Keywords.kw_package)) ||
- (Info && Info->getPPKeywordID() == tok::pp_import &&
- CurrentToken->Next &&
+ (!Style.isVerilog() && Info &&
+ Info->getPPKeywordID() == tok::pp_import && CurrentToken->Next &&
CurrentToken->Next->isOneOf(tok::string_literal, tok::identifier,
tok::kw_static))) {
next();
@@ -1343,14 +1634,13 @@ public:
bool ImportStatement = false;
// import {...} from '...';
- if (Style.Language == FormatStyle::LK_JavaScript &&
- CurrentToken->is(Keywords.kw_import))
+ if (Style.isJavaScript() && CurrentToken->is(Keywords.kw_import))
ImportStatement = true;
while (CurrentToken) {
if (CurrentToken->is(tok::kw_virtual))
KeywordVirtualFound = true;
- if (Style.Language == FormatStyle::LK_JavaScript) {
+ if (Style.isJavaScript()) {
// export {...} from '...';
// An export followed by "from 'some string';" is a re-export from
// another module identified by a URI and is treated as a
@@ -1359,8 +1649,9 @@ public:
// an import in this sense.
if (Line.First->is(tok::kw_export) &&
CurrentToken->is(Keywords.kw_from) && CurrentToken->Next &&
- CurrentToken->Next->isStringLiteral())
+ CurrentToken->Next->isStringLiteral()) {
ImportStatement = true;
+ }
if (isClosureImportStatement(*CurrentToken))
ImportStatement = true;
}
@@ -1373,17 +1664,16 @@ public:
return LT_ImportStatement;
if (Line.startsWith(TT_ObjCMethodSpecifier)) {
- if (Contexts.back().FirstObjCSelectorName)
+ if (Contexts.back().FirstObjCSelectorName) {
Contexts.back().FirstObjCSelectorName->LongestObjCSelectorName =
Contexts.back().LongestObjCSelectorName;
+ }
return LT_ObjCMethodDecl;
}
- for (const auto &ctx : Contexts) {
- if (ctx.InStructArrayInitializer) {
+ for (const auto &ctx : Contexts)
+ if (ctx.ContextType == Context::StructArrayInitializer)
return LT_ArrayOfStructInitializer;
- }
- }
return LT_Other;
}
@@ -1402,21 +1692,27 @@ private:
Tok.Next->Next->Next && Tok.Next->Next->Next->is(tok::l_paren);
}
- void resetTokenMetadata(FormatToken *Token) {
- if (!Token)
+ void resetTokenMetadata() {
+ if (!CurrentToken)
return;
// Reset token type in case we have already looked at it and then
// recovered from an error (e.g. failure to find the matching >).
- if (!CurrentToken->isOneOf(
+ if (!CurrentToken->isTypeFinalized() &&
+ !CurrentToken->isOneOf(
TT_LambdaLSquare, TT_LambdaLBrace, TT_AttributeMacro, TT_IfMacro,
TT_ForEachMacro, TT_TypenameMacro, TT_FunctionLBrace,
TT_ImplicitStringLiteral, TT_InlineASMBrace, TT_FatArrow,
- TT_LambdaArrow, TT_NamespaceMacro, TT_OverloadedOperator,
- TT_RegexLiteral, TT_TemplateString, TT_ObjCStringLiteral,
- TT_UntouchableMacroFunc, TT_ConstraintJunctions,
- TT_StatementAttributeLikeMacro))
+ TT_NamespaceMacro, TT_OverloadedOperator, TT_RegexLiteral,
+ TT_TemplateString, TT_ObjCStringLiteral, TT_UntouchableMacroFunc,
+ TT_StatementAttributeLikeMacro, TT_FunctionLikeOrFreestandingMacro,
+ TT_ClassLBrace, TT_EnumLBrace, TT_RecordLBrace, TT_StructLBrace,
+ TT_UnionLBrace, TT_RequiresClause,
+ TT_RequiresClauseInARequiresExpression, TT_RequiresExpression,
+ TT_RequiresExpressionLParen, TT_RequiresExpressionLBrace,
+ TT_BracedListLBrace)) {
CurrentToken->setType(TT_Unknown);
+ }
CurrentToken->Role.reset();
CurrentToken->MatchingParen = nullptr;
CurrentToken->FakeLParens.clear();
@@ -1424,15 +1720,16 @@ private:
}
void next() {
- if (CurrentToken) {
- CurrentToken->NestingLevel = Contexts.size() - 1;
- CurrentToken->BindingStrength = Contexts.back().BindingStrength;
- modifyContext(*CurrentToken);
- determineTokenType(*CurrentToken);
- CurrentToken = CurrentToken->Next;
- }
+ if (!CurrentToken)
+ return;
+
+ CurrentToken->NestingLevel = Contexts.size() - 1;
+ CurrentToken->BindingStrength = Contexts.back().BindingStrength;
+ modifyContext(*CurrentToken);
+ determineTokenType(*CurrentToken);
+ CurrentToken = CurrentToken->Next;
- resetTokenMetadata(CurrentToken);
+ resetTokenMetadata();
}
/// A struct to hold information valid in a specific context, e.g.
@@ -1453,14 +1750,33 @@ private:
FormatToken *FirstObjCSelectorName = nullptr;
FormatToken *FirstStartOfName = nullptr;
bool CanBeExpression = true;
- bool InTemplateArgument = false;
- bool InCtorInitializer = false;
- bool InInheritanceList = false;
bool CaretFound = false;
- bool IsForEachMacro = false;
bool InCpp11AttributeSpecifier = false;
bool InCSharpAttributeSpecifier = false;
- bool InStructArrayInitializer = false;
+ bool VerilogAssignmentFound = false;
+ // Whether the braces may mean concatenation instead of structure or array
+ // literal.
+ bool VerilogMayBeConcatenation = false;
+ enum {
+ Unknown,
+ // Like the part after `:` in a constructor.
+ // Context(...) : IsExpression(IsExpression)
+ CtorInitializer,
+ // Like in the parentheses in a foreach.
+ ForEachMacro,
+ // Like the inheritance list in a class declaration.
+ // class Input : public IO
+ InheritanceList,
+ // Like in the braced list.
+ // int x[] = {};
+ StructArrayInitializer,
+ // Like in `static_cast<int>`.
+ TemplateArgument,
+ // C11 _Generic selection.
+ C11GenericSelection,
+ // Like in the outer parentheses in `ffnand ff1(.q());`.
+ VerilogInstancePortList,
+ } ContextType = Unknown;
};
/// Puts a new \c Context onto the stack \c Contexts for the lifetime
@@ -1478,9 +1794,9 @@ private:
~ScopedContextCreator() {
if (P.Style.AlignArrayOfStructures != FormatStyle::AIAS_None) {
- if (P.Contexts.back().InStructArrayInitializer) {
+ if (P.Contexts.back().ContextType == Context::StructArrayInitializer) {
P.Contexts.pop_back();
- P.Contexts.back().InStructArrayInitializer = true;
+ P.Contexts.back().ContextType = Context::StructArrayInitializer;
return;
}
}
@@ -1489,22 +1805,70 @@ private:
};
void modifyContext(const FormatToken &Current) {
- if (Current.getPrecedence() == prec::Assignment &&
- !Line.First->isOneOf(tok::kw_template, tok::kw_using, tok::kw_return) &&
- // Type aliases use `type X = ...;` in TypeScript and can be exported
- // using `export type ...`.
- !(Style.Language == FormatStyle::LK_JavaScript &&
+ auto AssignmentStartsExpression = [&]() {
+ if (Current.getPrecedence() != prec::Assignment)
+ return false;
+
+ if (Line.First->isOneOf(tok::kw_using, tok::kw_return))
+ return false;
+ if (Line.First->is(tok::kw_template)) {
+ assert(Current.Previous);
+ if (Current.Previous->is(tok::kw_operator)) {
+ // `template ... operator=` cannot be an expression.
+ return false;
+ }
+
+ // `template` keyword can start a variable template.
+ const FormatToken *Tok = Line.First->getNextNonComment();
+ assert(Tok); // Current token is on the same line.
+ if (Tok->isNot(TT_TemplateOpener)) {
+ // Explicit template instantiations do not have `<>`.
+ return false;
+ }
+
+ // This is the default value of a template parameter, determine if it's
+ // type or non-type.
+ if (Contexts.back().ContextKind == tok::less) {
+ assert(Current.Previous->Previous);
+ return !Current.Previous->Previous->isOneOf(tok::kw_typename,
+ tok::kw_class);
+ }
+
+ Tok = Tok->MatchingParen;
+ if (!Tok)
+ return false;
+ Tok = Tok->getNextNonComment();
+ if (!Tok)
+ return false;
+
+ if (Tok->isOneOf(tok::kw_class, tok::kw_enum, tok::kw_struct,
+ tok::kw_using)) {
+ return false;
+ }
+
+ return true;
+ }
+
+ // Type aliases use `type X = ...;` in TypeScript and can be exported
+ // using `export type ...`.
+ if (Style.isJavaScript() &&
(Line.startsWith(Keywords.kw_type, tok::identifier) ||
Line.startsWith(tok::kw_export, Keywords.kw_type,
- tok::identifier))) &&
- (!Current.Previous || Current.Previous->isNot(tok::kw_operator))) {
+ tok::identifier))) {
+ return false;
+ }
+
+ return !Current.Previous || Current.Previous->isNot(tok::kw_operator);
+ };
+
+ if (AssignmentStartsExpression()) {
Contexts.back().IsExpression = true;
if (!Line.startsWith(TT_UnaryOperator)) {
for (FormatToken *Previous = Current.Previous;
Previous && Previous->Previous &&
!Previous->Previous->isOneOf(tok::comma, tok::semi);
Previous = Previous->Previous) {
- if (Previous->isOneOf(tok::r_square, tok::r_paren)) {
+ if (Previous->isOneOf(tok::r_square, tok::r_paren, tok::greater)) {
Previous = Previous->MatchingParen;
if (!Previous)
break;
@@ -1512,38 +1876,43 @@ private:
if (Previous->opensScope())
break;
if (Previous->isOneOf(TT_BinaryOperator, TT_UnaryOperator) &&
- Previous->isOneOf(tok::star, tok::amp, tok::ampamp) &&
- Previous->Previous && Previous->Previous->isNot(tok::equal))
+ Previous->isPointerOrReference() && Previous->Previous &&
+ Previous->Previous->isNot(tok::equal)) {
Previous->setType(TT_PointerOrReference);
+ }
}
}
} else if (Current.is(tok::lessless) &&
- (!Current.Previous || !Current.Previous->is(tok::kw_operator))) {
+ (!Current.Previous ||
+ Current.Previous->isNot(tok::kw_operator))) {
Contexts.back().IsExpression = true;
} else if (Current.isOneOf(tok::kw_return, tok::kw_throw)) {
Contexts.back().IsExpression = true;
} else if (Current.is(TT_TrailingReturnArrow)) {
Contexts.back().IsExpression = false;
- } else if (Current.is(TT_LambdaArrow) || Current.is(Keywords.kw_assert)) {
+ } else if (Current.is(Keywords.kw_assert)) {
Contexts.back().IsExpression = Style.Language == FormatStyle::LK_Java;
} else if (Current.Previous &&
Current.Previous->is(TT_CtorInitializerColon)) {
Contexts.back().IsExpression = true;
- Contexts.back().InCtorInitializer = true;
+ Contexts.back().ContextType = Context::CtorInitializer;
} else if (Current.Previous && Current.Previous->is(TT_InheritanceColon)) {
- Contexts.back().InInheritanceList = true;
+ Contexts.back().ContextType = Context::InheritanceList;
} else if (Current.isOneOf(tok::r_paren, tok::greater, tok::comma)) {
for (FormatToken *Previous = Current.Previous;
Previous && Previous->isOneOf(tok::star, tok::amp);
- Previous = Previous->Previous)
+ Previous = Previous->Previous) {
Previous->setType(TT_PointerOrReference);
- if (Line.MustBeDeclaration && !Contexts.front().InCtorInitializer)
+ }
+ if (Line.MustBeDeclaration &&
+ Contexts.front().ContextType != Context::CtorInitializer) {
Contexts.back().IsExpression = false;
+ }
} else if (Current.is(tok::kw_new)) {
Contexts.back().CanBeExpression = false;
} else if (Current.is(tok::semi) ||
(Current.is(tok::exclaim) && Current.Previous &&
- !Current.Previous->is(tok::kw_operator))) {
+ Current.Previous->isNot(tok::kw_operator))) {
// This should be the condition or increment in a for-loop.
// But not operator !() (can't use TT_OverloadedOperator here as its not
// been annotated yet).
@@ -1556,9 +1925,9 @@ private:
int ParenLevel = 0;
while (Current) {
if (Current->is(tok::l_paren))
- ParenLevel++;
+ ++ParenLevel;
if (Current->is(tok::r_paren))
- ParenLevel--;
+ --ParenLevel;
if (ParenLevel < 1)
break;
Current = Current->Next;
@@ -1578,11 +1947,13 @@ private:
if (TemplateCloser->is(tok::l_paren)) {
// No Matching Paren yet so skip to matching paren
TemplateCloser = untilMatchingParen(TemplateCloser);
+ if (!TemplateCloser)
+ break;
}
if (TemplateCloser->is(tok::less))
- NestingLevel++;
+ ++NestingLevel;
if (TemplateCloser->is(tok::greater))
- NestingLevel--;
+ --NestingLevel;
if (NestingLevel < 1)
break;
TemplateCloser = TemplateCloser->Next;
@@ -1597,42 +1968,32 @@ private:
FormatToken *LeadingIdentifier =
Current.Previous->MatchingParen->Previous;
- // Differentiate a deduction guide by seeing the
- // > of the template prior to the leading identifier.
- if (LeadingIdentifier) {
- FormatToken *PriorLeadingIdentifier = LeadingIdentifier->Previous;
- // Skip back past explicit decoration
- if (PriorLeadingIdentifier &&
- PriorLeadingIdentifier->is(tok::kw_explicit))
- PriorLeadingIdentifier = PriorLeadingIdentifier->Previous;
-
- return (PriorLeadingIdentifier &&
- PriorLeadingIdentifier->is(TT_TemplateCloser) &&
- LeadingIdentifier->TokenText == Current.Next->TokenText);
- }
+ return LeadingIdentifier &&
+ LeadingIdentifier->TokenText == Current.Next->TokenText;
}
}
return false;
}
void determineTokenType(FormatToken &Current) {
- if (!Current.is(TT_Unknown))
+ if (Current.isNot(TT_Unknown)) {
// The token type is already known.
return;
+ }
- if ((Style.Language == FormatStyle::LK_JavaScript || Style.isCSharp()) &&
+ if ((Style.isJavaScript() || Style.isCSharp()) &&
Current.is(tok::exclaim)) {
if (Current.Previous) {
bool IsIdentifier =
- Style.Language == FormatStyle::LK_JavaScript
+ Style.isJavaScript()
? Keywords.IsJavaScriptIdentifier(
*Current.Previous, /* AcceptIdentifierName= */ true)
: Current.Previous->is(tok::identifier);
if (IsIdentifier ||
Current.Previous->isOneOf(
- tok::kw_namespace, tok::r_paren, tok::r_square, tok::r_brace,
- tok::kw_false, tok::kw_true, Keywords.kw_type, Keywords.kw_get,
- Keywords.kw_set) ||
+ tok::kw_default, tok::kw_namespace, tok::r_paren, tok::r_square,
+ tok::r_brace, tok::kw_false, tok::kw_true, Keywords.kw_type,
+ Keywords.kw_get, Keywords.kw_init, Keywords.kw_set) ||
Current.Previous->Tok.isLiteral()) {
Current.setType(TT_NonNullAssertion);
return;
@@ -1663,26 +2024,30 @@ private:
AutoFound = true;
} else if (Current.is(tok::arrow) &&
Style.Language == FormatStyle::LK_Java) {
- Current.setType(TT_LambdaArrow);
- } else if (Current.is(tok::arrow) && AutoFound && Line.MustBeDeclaration &&
- Current.NestingLevel == 0 &&
- !Current.Previous->is(tok::kw_operator)) {
+ Current.setType(TT_TrailingReturnArrow);
+ } else if (Current.is(tok::arrow) && Style.isVerilog()) {
+ // The implication operator.
+ Current.setType(TT_BinaryOperator);
+ } else if (Current.is(tok::arrow) && AutoFound &&
+ Line.MightBeFunctionDecl && Current.NestingLevel == 0 &&
+ !Current.Previous->isOneOf(tok::kw_operator, tok::identifier)) {
// not auto operator->() -> xxx;
Current.setType(TT_TrailingReturnArrow);
} else if (Current.is(tok::arrow) && Current.Previous &&
Current.Previous->is(tok::r_brace)) {
- // Concept implicit conversion contraint needs to be treated like
+ // Concept implicit conversion constraint needs to be treated like
// a trailing return type ... } -> <type>.
Current.setType(TT_TrailingReturnArrow);
} else if (isDeductionGuide(Current)) {
// Deduction guides trailing arrow " A(...) -> A<T>;".
Current.setType(TT_TrailingReturnArrow);
- } else if (Current.isOneOf(tok::star, tok::amp, tok::ampamp)) {
+ } else if (Current.isPointerOrReference()) {
Current.setType(determineStarAmpUsage(
Current,
Contexts.back().CanBeExpression && Contexts.back().IsExpression,
- Contexts.back().InTemplateArgument));
- } else if (Current.isOneOf(tok::minus, tok::plus, tok::caret)) {
+ Contexts.back().ContextType == Context::TemplateArgument));
+ } else if (Current.isOneOf(tok::minus, tok::plus, tok::caret) ||
+ (Style.isVerilog() && Current.is(tok::pipe))) {
Current.setType(determinePlusMinusCaretUsage(Current));
if (Current.is(TT_UnaryOperator) && Current.is(tok::caret))
Contexts.back().CaretFound = true;
@@ -1691,8 +2056,8 @@ private:
} else if (Current.isOneOf(tok::exclaim, tok::tilde)) {
Current.setType(TT_UnaryOperator);
} else if (Current.is(tok::question)) {
- if (Style.Language == FormatStyle::LK_JavaScript &&
- Line.MustBeDeclaration && !Contexts.back().IsExpression) {
+ if (Style.isJavaScript() && Line.MustBeDeclaration &&
+ !Contexts.back().IsExpression) {
// In JavaScript, `interface X { foo?(): bar; }` is an optional method
// on the interface, not a ternary expression.
Current.setType(TT_JsTypeOptionalQuestion);
@@ -1701,20 +2066,43 @@ private:
}
} else if (Current.isBinaryOperator() &&
(!Current.Previous || Current.Previous->isNot(tok::l_square)) &&
- (!Current.is(tok::greater) &&
+ (Current.isNot(tok::greater) &&
Style.Language != FormatStyle::LK_TextProto)) {
+ if (Style.isVerilog()) {
+ if (Current.is(tok::lessequal) && Contexts.size() == 1 &&
+ !Contexts.back().VerilogAssignmentFound) {
+ // In Verilog `<=` is assignment if in its own statement. It is a
+ // statement instead of an expression, that is it can not be chained.
+ Current.ForcedPrecedence = prec::Assignment;
+ Current.setFinalizedType(TT_BinaryOperator);
+ }
+ if (Current.getPrecedence() == prec::Assignment)
+ Contexts.back().VerilogAssignmentFound = true;
+ }
Current.setType(TT_BinaryOperator);
} else if (Current.is(tok::comment)) {
- if (Current.TokenText.startswith("/*")) {
- if (Current.TokenText.endswith("*/"))
+ if (Current.TokenText.starts_with("/*")) {
+ if (Current.TokenText.ends_with("*/")) {
Current.setType(TT_BlockComment);
- else
+ } else {
// The lexer has for some reason determined a comment here. But we
// cannot really handle it, if it isn't properly terminated.
Current.Tok.setKind(tok::unknown);
+ }
} else {
Current.setType(TT_LineComment);
}
+ } else if (Current.is(tok::string_literal)) {
+ if (Style.isVerilog() && Contexts.back().VerilogMayBeConcatenation &&
+ Current.getPreviousNonComment() &&
+ Current.getPreviousNonComment()->isOneOf(tok::comma, tok::l_brace) &&
+ Current.getNextNonComment() &&
+ Current.getNextNonComment()->isOneOf(tok::comma, tok::r_brace)) {
+ Current.setType(TT_StringInConcatenation);
+ }
+ } else if (Current.is(tok::l_paren)) {
+ if (lParenStartsCppCast(Current))
+ Current.setType(TT_CppCastLParen);
} else if (Current.is(tok::r_paren)) {
if (rParenEndsCast(Current))
Current.setType(TT_CastRParen);
@@ -1722,21 +2110,22 @@ private:
!Current.Next->isBinaryOperator() &&
!Current.Next->isOneOf(tok::semi, tok::colon, tok::l_brace,
tok::comma, tok::period, tok::arrow,
- tok::coloncolon))
- if (FormatToken *AfterParen = Current.MatchingParen->Next) {
- // Make sure this isn't the return type of an Obj-C block declaration
- if (AfterParen->Tok.isNot(tok::caret)) {
- if (FormatToken *BeforeParen = Current.MatchingParen->Previous)
- if (BeforeParen->is(tok::identifier) &&
- !BeforeParen->is(TT_TypenameMacro) &&
- BeforeParen->TokenText == BeforeParen->TokenText.upper() &&
- (!BeforeParen->Previous ||
- BeforeParen->Previous->ClosesTemplateDeclaration))
- Current.setType(TT_FunctionAnnotationRParen);
+ tok::coloncolon, tok::kw_noexcept)) {
+ if (FormatToken *AfterParen = Current.MatchingParen->Next;
+ AfterParen && AfterParen->isNot(tok::caret)) {
+ // Make sure this isn't the return type of an Obj-C block declaration.
+ if (FormatToken *BeforeParen = Current.MatchingParen->Previous;
+ BeforeParen && BeforeParen->is(tok::identifier) &&
+ BeforeParen->isNot(TT_TypenameMacro) &&
+ BeforeParen->TokenText == BeforeParen->TokenText.upper() &&
+ (!BeforeParen->Previous ||
+ BeforeParen->Previous->ClosesTemplateDeclaration ||
+ BeforeParen->Previous->ClosesRequiresClause)) {
+ Current.setType(TT_FunctionAnnotationRParen);
}
}
- } else if (Current.is(tok::at) && Current.Next &&
- Style.Language != FormatStyle::LK_JavaScript &&
+ }
+ } else if (Current.is(tok::at) && Current.Next && !Style.isJavaScript() &&
Style.Language != FormatStyle::LK_Java) {
// In Java & JavaScript, "@..." is a decorator or annotation. In ObjC, it
// marks declarations and properties that need special formatting.
@@ -1755,11 +2144,11 @@ private:
} else if (Current.is(tok::period)) {
FormatToken *PreviousNoComment = Current.getPreviousNonComment();
if (PreviousNoComment &&
- PreviousNoComment->isOneOf(tok::comma, tok::l_brace))
+ PreviousNoComment->isOneOf(tok::comma, tok::l_brace)) {
Current.setType(TT_DesignatedInitializerPeriod);
- else if (Style.Language == FormatStyle::LK_Java && Current.Previous &&
- Current.Previous->isOneOf(TT_JavaAnnotation,
- TT_LeadingJavaAnnotation)) {
+ } else if (Style.Language == FormatStyle::LK_Java && Current.Previous &&
+ Current.Previous->isOneOf(TT_JavaAnnotation,
+ TT_LeadingJavaAnnotation)) {
Current.setType(Current.Previous->getType());
}
} else if (canBeObjCSelectorComponent(Current) &&
@@ -1777,13 +2166,15 @@ private:
} else if (Current.isOneOf(tok::identifier, tok::kw_const, tok::kw_noexcept,
tok::kw_requires) &&
Current.Previous &&
- !Current.Previous->isOneOf(tok::equal, tok::at) &&
+ !Current.Previous->isOneOf(tok::equal, tok::at,
+ TT_CtorInitializerComma,
+ TT_CtorInitializerColon) &&
Line.MightBeFunctionDecl && Contexts.size() == 1) {
// Line.MightBeFunctionDecl can only be true after the parentheses of a
// function declaration have been found.
Current.setType(TT_TrailingAnnotation);
} else if ((Style.Language == FormatStyle::LK_Java ||
- Style.Language == FormatStyle::LK_JavaScript) &&
+ Style.isJavaScript()) &&
Current.Previous) {
if (Current.Previous->is(tok::at) &&
Current.isNot(Keywords.kw_interface)) {
@@ -1807,81 +2198,185 @@ private:
/// This is a heuristic based on whether \p Tok is an identifier following
/// something that is likely a type.
bool isStartOfName(const FormatToken &Tok) {
+ // Handled in ExpressionParser for Verilog.
+ if (Style.isVerilog())
+ return false;
+
if (Tok.isNot(tok::identifier) || !Tok.Previous)
return false;
+ if (const auto *NextNonComment = Tok.getNextNonComment();
+ (!NextNonComment && !Line.InMacroBody) ||
+ (NextNonComment &&
+ (NextNonComment->isPointerOrReference() ||
+ NextNonComment->is(tok::string_literal) ||
+ (Line.InPragmaDirective && NextNonComment->is(tok::identifier))))) {
+ return false;
+ }
+
if (Tok.Previous->isOneOf(TT_LeadingJavaAnnotation, Keywords.kw_instanceof,
- Keywords.kw_as))
+ Keywords.kw_as)) {
return false;
- if (Style.Language == FormatStyle::LK_JavaScript &&
- Tok.Previous->is(Keywords.kw_in))
+ }
+ if (Style.isJavaScript() && Tok.Previous->is(Keywords.kw_in))
return false;
// Skip "const" as it does not have an influence on whether this is a name.
FormatToken *PreviousNotConst = Tok.getPreviousNonComment();
- while (PreviousNotConst && PreviousNotConst->is(tok::kw_const))
- PreviousNotConst = PreviousNotConst->getPreviousNonComment();
+
+ // For javascript const can be like "let" or "var"
+ if (!Style.isJavaScript())
+ while (PreviousNotConst && PreviousNotConst->is(tok::kw_const))
+ PreviousNotConst = PreviousNotConst->getPreviousNonComment();
if (!PreviousNotConst)
return false;
+ if (PreviousNotConst->ClosesRequiresClause)
+ return false;
+
+ if (Style.isTableGen()) {
+ // keywords such as let and def* defines names.
+ if (Keywords.isTableGenDefinition(*PreviousNotConst))
+ return true;
+ }
+
bool IsPPKeyword = PreviousNotConst->is(tok::identifier) &&
PreviousNotConst->Previous &&
PreviousNotConst->Previous->is(tok::hash);
- if (PreviousNotConst->is(TT_TemplateCloser))
+ if (PreviousNotConst->is(TT_TemplateCloser)) {
return PreviousNotConst && PreviousNotConst->MatchingParen &&
PreviousNotConst->MatchingParen->Previous &&
PreviousNotConst->MatchingParen->Previous->isNot(tok::period) &&
PreviousNotConst->MatchingParen->Previous->isNot(tok::kw_template);
+ }
- if (PreviousNotConst->is(tok::r_paren) &&
- PreviousNotConst->is(TT_TypeDeclarationParen))
+ if ((PreviousNotConst->is(tok::r_paren) &&
+ PreviousNotConst->is(TT_TypeDeclarationParen)) ||
+ PreviousNotConst->is(TT_AttributeRParen)) {
return true;
+ }
- return (!IsPPKeyword &&
- PreviousNotConst->isOneOf(tok::identifier, tok::kw_auto)) ||
- PreviousNotConst->is(TT_PointerOrReference) ||
- PreviousNotConst->isSimpleTypeSpecifier();
+ // If is a preprocess keyword like #define.
+ if (IsPPKeyword)
+ return false;
+
+ // int a or auto a.
+ if (PreviousNotConst->isOneOf(tok::identifier, tok::kw_auto))
+ return true;
+
+ // *a or &a or &&a.
+ if (PreviousNotConst->is(TT_PointerOrReference))
+ return true;
+
+ // MyClass a;
+ if (PreviousNotConst->isSimpleTypeSpecifier())
+ return true;
+
+ // type[] a in Java
+ if (Style.Language == FormatStyle::LK_Java &&
+ PreviousNotConst->is(tok::r_square)) {
+ return true;
+ }
+
+ // const a = in JavaScript.
+ return Style.isJavaScript() && PreviousNotConst->is(tok::kw_const);
+ }
+
+ /// Determine whether '(' is starting a C++ cast.
+ bool lParenStartsCppCast(const FormatToken &Tok) {
+ // C-style casts are only used in C++.
+ if (!Style.isCpp())
+ return false;
+
+ FormatToken *LeftOfParens = Tok.getPreviousNonComment();
+ if (LeftOfParens && LeftOfParens->is(TT_TemplateCloser) &&
+ LeftOfParens->MatchingParen) {
+ auto *Prev = LeftOfParens->MatchingParen->getPreviousNonComment();
+ if (Prev &&
+ Prev->isOneOf(tok::kw_const_cast, tok::kw_dynamic_cast,
+ tok::kw_reinterpret_cast, tok::kw_static_cast)) {
+ // FIXME: Maybe we should handle identifiers ending with "_cast",
+ // e.g. any_cast?
+ return true;
+ }
+ }
+ return false;
}
/// Determine whether ')' is ending a cast.
bool rParenEndsCast(const FormatToken &Tok) {
// C-style casts are only used in C++, C# and Java.
if (!Style.isCSharp() && !Style.isCpp() &&
- Style.Language != FormatStyle::LK_Java)
+ Style.Language != FormatStyle::LK_Java) {
return false;
+ }
// Empty parens aren't casts and there are no casts at the end of the line.
if (Tok.Previous == Tok.MatchingParen || !Tok.Next || !Tok.MatchingParen)
return false;
+ if (Tok.MatchingParen->is(TT_OverloadedOperatorLParen))
+ return false;
+
FormatToken *LeftOfParens = Tok.MatchingParen->getPreviousNonComment();
if (LeftOfParens) {
- // If there is a closing parenthesis left of the current parentheses,
- // look past it as these might be chained casts.
- if (LeftOfParens->is(tok::r_paren)) {
+ // If there is a closing parenthesis left of the current
+ // parentheses, look past it as these might be chained casts.
+ if (LeftOfParens->is(tok::r_paren) &&
+ LeftOfParens->isNot(TT_CastRParen)) {
if (!LeftOfParens->MatchingParen ||
- !LeftOfParens->MatchingParen->Previous)
+ !LeftOfParens->MatchingParen->Previous) {
return false;
+ }
LeftOfParens = LeftOfParens->MatchingParen->Previous;
}
+ if (LeftOfParens->is(tok::r_square)) {
+ // delete[] (void *)ptr;
+ auto MayBeArrayDelete = [](FormatToken *Tok) -> FormatToken * {
+ if (Tok->isNot(tok::r_square))
+ return nullptr;
+
+ Tok = Tok->getPreviousNonComment();
+ if (!Tok || Tok->isNot(tok::l_square))
+ return nullptr;
+
+ Tok = Tok->getPreviousNonComment();
+ if (!Tok || Tok->isNot(tok::kw_delete))
+ return nullptr;
+ return Tok;
+ };
+ if (FormatToken *MaybeDelete = MayBeArrayDelete(LeftOfParens))
+ LeftOfParens = MaybeDelete;
+ }
+
+ // The Condition directly below this one will see the operator arguments
+ // as a (void *foo) cast.
+ // void operator delete(void *foo) ATTRIB;
+ if (LeftOfParens->Tok.getIdentifierInfo() && LeftOfParens->Previous &&
+ LeftOfParens->Previous->is(tok::kw_operator)) {
+ return false;
+ }
+
// If there is an identifier (or with a few exceptions a keyword) right
// before the parentheses, this is unlikely to be a cast.
if (LeftOfParens->Tok.getIdentifierInfo() &&
!LeftOfParens->isOneOf(Keywords.kw_in, tok::kw_return, tok::kw_case,
- tok::kw_delete))
+ tok::kw_delete, tok::kw_throw)) {
return false;
+ }
// Certain other tokens right before the parentheses are also signals that
// this cannot be a cast.
if (LeftOfParens->isOneOf(tok::at, tok::r_square, TT_OverloadedOperator,
- TT_TemplateCloser, tok::ellipsis))
+ TT_TemplateCloser, tok::ellipsis)) {
return false;
+ }
}
- if (Tok.Next->is(tok::question))
+ if (Tok.Next->isOneOf(tok::question, tok::ampamp))
return false;
// `foreach((A a, B b) in someList)` should not be seen as a cast.
@@ -1893,8 +2388,9 @@ private:
if (Tok.Next->isOneOf(tok::kw_noexcept, tok::kw_volatile, tok::kw_const,
tok::kw_requires, tok::kw_throw, tok::arrow,
Keywords.kw_override, Keywords.kw_final) ||
- isCpp11AttributeSpecifier(*Tok.Next))
+ isCppAttribute(Style.isCpp(), *Tok.Next)) {
return false;
+ }
// As Java has no function types, a "(" after the ")" likely means that this
// is a cast.
@@ -1902,10 +2398,10 @@ private:
return true;
// If a (non-string) literal follows, this is likely a cast.
- if (Tok.Next->isNot(tok::string_literal) &&
- (Tok.Next->Tok.isLiteral() ||
- Tok.Next->isOneOf(tok::kw_sizeof, tok::kw_alignof)))
+ if (Tok.Next->isOneOf(tok::kw_sizeof, tok::kw_alignof) ||
+ (Tok.Next->Tok.isLiteral() && Tok.Next->isNot(tok::string_literal))) {
return true;
+ }
// Heuristically try to determine whether the parentheses contain a type.
auto IsQualifiedPointerOrReference = [](FormatToken *T) {
@@ -1914,11 +2410,15 @@ private:
// Strip trailing qualifiers such as const or volatile when checking
// whether the parens could be a cast to a pointer/reference type.
while (T) {
- if (T->is(TT_AttributeParen)) {
+ if (T->is(TT_AttributeRParen)) {
// Handle `x = (foo *__attribute__((foo)))&v;`:
- if (T->MatchingParen && T->MatchingParen->Previous &&
- T->MatchingParen->Previous->is(tok::kw___attribute)) {
- T = T->MatchingParen->Previous->Previous;
+ assert(T->is(tok::r_paren));
+ assert(T->MatchingParen);
+ assert(T->MatchingParen->is(tok::l_paren));
+ assert(T->MatchingParen->is(TT_AttributeLParen));
+ if (const auto *Tok = T->MatchingParen->Previous;
+ Tok && Tok->isAttribute()) {
+ T = Tok->Previous;
continue;
}
} else if (T->is(TT_AttributeSquare)) {
@@ -1954,9 +2454,10 @@ private:
// Certain token types inside the parentheses mean that this can't be a
// cast.
for (const FormatToken *Token = Tok.MatchingParen->Next; Token != &Tok;
- Token = Token->Next)
+ Token = Token->Next) {
if (Token->is(TT_BinaryOperator))
return false;
+ }
// If the following token is an identifier or 'this', this is a cast. All
// cases where this can be something else are handled above.
@@ -1966,8 +2467,9 @@ private:
// Look for a cast `( x ) (`.
if (Tok.Next->is(tok::l_paren) && Tok.Previous && Tok.Previous->Previous) {
if (Tok.Previous->is(tok::identifier) &&
- Tok.Previous->Previous->is(tok::l_paren))
+ Tok.Previous->Previous->is(tok::l_paren)) {
return true;
+ }
}
if (!Tok.Next->Next)
@@ -1976,10 +2478,17 @@ private:
// If the next token after the parenthesis is a unary operator, assume
// that this is cast, unless there are unexpected tokens inside the
// parenthesis.
- bool NextIsUnary =
- Tok.Next->isUnaryOperator() || Tok.Next->isOneOf(tok::amp, tok::star);
- if (!NextIsUnary || Tok.Next->is(tok::plus) ||
- !Tok.Next->Next->isOneOf(tok::identifier, tok::numeric_constant))
+ const bool NextIsAmpOrStar = Tok.Next->isOneOf(tok::amp, tok::star);
+ if (!(Tok.Next->isUnaryOperator() || NextIsAmpOrStar) ||
+ Tok.Next->is(tok::plus) ||
+ !Tok.Next->Next->isOneOf(tok::identifier, tok::numeric_constant)) {
+ return false;
+ }
+ if (NextIsAmpOrStar &&
+ (Tok.Next->Next->is(tok::numeric_constant) || Line.InPPDirective)) {
+ return false;
+ }
+ if (Line.InPPDirective && Tok.Next->is(tok::minus))
return false;
// Search for unexpected tokens.
for (FormatToken *Prev = Tok.Previous; Prev != Tok.MatchingParen;
@@ -1990,26 +2499,83 @@ private:
return true;
}
+ /// Returns true if the token is used as a unary operator.
+ bool determineUnaryOperatorByUsage(const FormatToken &Tok) {
+ const FormatToken *PrevToken = Tok.getPreviousNonComment();
+ if (!PrevToken)
+ return true;
+
+ // These keywords are deliberately not included here because they may
+ // precede only one of unary star/amp and plus/minus but not both. They are
+ // either included in determineStarAmpUsage or determinePlusMinusCaretUsage.
+ //
+ // @ - It may be followed by a unary `-` in Objective-C literals. We don't
+ // know how they can be followed by a star or amp.
+ if (PrevToken->isOneOf(
+ TT_ConditionalExpr, tok::l_paren, tok::comma, tok::colon, tok::semi,
+ tok::equal, tok::question, tok::l_square, tok::l_brace,
+ tok::kw_case, tok::kw_co_await, tok::kw_co_return, tok::kw_co_yield,
+ tok::kw_delete, tok::kw_return, tok::kw_throw)) {
+ return true;
+ }
+
+ // We put sizeof here instead of only in determineStarAmpUsage. In the cases
+ // where the unary `+` operator is overloaded, it is reasonable to write
+ // things like `sizeof +x`. Like commit 446d6ec996c6c3.
+ if (PrevToken->is(tok::kw_sizeof))
+ return true;
+
+ // A sequence of leading unary operators.
+ if (PrevToken->isOneOf(TT_CastRParen, TT_UnaryOperator))
+ return true;
+
+ // There can't be two consecutive binary operators.
+ if (PrevToken->is(TT_BinaryOperator))
+ return true;
+
+ return false;
+ }
+
/// Return the type of the given token assuming it is * or &.
TokenType determineStarAmpUsage(const FormatToken &Tok, bool IsExpression,
bool InTemplateArgument) {
- if (Style.Language == FormatStyle::LK_JavaScript)
+ if (Style.isJavaScript())
return TT_BinaryOperator;
// && in C# must be a binary operator.
if (Style.isCSharp() && Tok.is(tok::ampamp))
return TT_BinaryOperator;
+ if (Style.isVerilog()) {
+ // In Verilog, `*` can only be a binary operator. `&` can be either unary
+ // or binary. `*` also includes `*>` in module path declarations in
+ // specify blocks because merged tokens take the type of the first one by
+ // default.
+ if (Tok.is(tok::star))
+ return TT_BinaryOperator;
+ return determineUnaryOperatorByUsage(Tok) ? TT_UnaryOperator
+ : TT_BinaryOperator;
+ }
+
const FormatToken *PrevToken = Tok.getPreviousNonComment();
if (!PrevToken)
return TT_UnaryOperator;
+ if (PrevToken->is(TT_TypeName))
+ return TT_PointerOrReference;
const FormatToken *NextToken = Tok.getNextNonComment();
+
+ if (InTemplateArgument && NextToken && NextToken->is(tok::kw_noexcept))
+ return TT_BinaryOperator;
+
if (!NextToken ||
- NextToken->isOneOf(tok::arrow, tok::equal, tok::kw_noexcept) ||
+ NextToken->isOneOf(tok::arrow, tok::equal, tok::comma, tok::r_paren,
+ TT_RequiresClause) ||
+ (NextToken->is(tok::kw_noexcept) && !IsExpression) ||
NextToken->canBePointerOrReferenceQualifier() ||
- (NextToken->is(tok::l_brace) && !NextToken->getNextNonComment()))
+ (NextToken->is(tok::l_brace) && !NextToken->getNextNonComment())) {
return TT_PointerOrReference;
+ }
if (PrevToken->is(tok::coloncolon))
return TT_PointerOrReference;
@@ -2017,13 +2583,7 @@ private:
if (PrevToken->is(tok::r_paren) && PrevToken->is(TT_TypeDeclarationParen))
return TT_PointerOrReference;
- if (PrevToken->isOneOf(tok::l_paren, tok::l_square, tok::l_brace,
- tok::comma, tok::semi, tok::kw_return, tok::colon,
- tok::kw_co_return, tok::kw_co_await,
- tok::kw_co_yield, tok::equal, tok::kw_delete,
- tok::kw_sizeof, tok::kw_throw) ||
- PrevToken->isOneOf(TT_BinaryOperator, TT_ConditionalExpr,
- TT_UnaryOperator, TT_CastRParen))
+ if (determineUnaryOperatorByUsage(Tok))
return TT_UnaryOperator;
if (NextToken->is(tok::l_square) && NextToken->isNot(TT_LambdaLSquare))
@@ -2033,22 +2593,52 @@ private:
if (NextToken->isOneOf(tok::comma, tok::semi))
return TT_PointerOrReference;
+ // After right braces, star tokens are likely to be pointers to struct,
+ // union, or class.
+ // struct {} *ptr;
+ // This by itself is not sufficient to distinguish from multiplication
+ // following a brace-initialized expression, as in:
+ // int i = int{42} * 2;
+ // In the struct case, the part of the struct declaration until the `{` and
+ // the `}` are put on separate unwrapped lines; in the brace-initialized
+ // case, the matching `{` is on the same unwrapped line, so check for the
+ // presence of the matching brace to distinguish between those.
+ if (PrevToken->is(tok::r_brace) && Tok.is(tok::star) &&
+ !PrevToken->MatchingParen) {
+ return TT_PointerOrReference;
+ }
+
+ if (PrevToken->endsSequence(tok::r_square, tok::l_square, tok::kw_delete))
+ return TT_UnaryOperator;
+
if (PrevToken->Tok.isLiteral() ||
PrevToken->isOneOf(tok::r_paren, tok::r_square, tok::kw_true,
- tok::kw_false, tok::r_brace) ||
- NextToken->Tok.isLiteral() ||
- NextToken->isOneOf(tok::kw_true, tok::kw_false) ||
- NextToken->isUnaryOperator() ||
- // If we know we're in a template argument, there are no named
- // declarations. Thus, having an identifier on the right-hand side
- // indicates a binary operator.
- (InTemplateArgument && NextToken->Tok.isAnyIdentifier()))
+ tok::kw_false, tok::r_brace)) {
return TT_BinaryOperator;
+ }
+
+ const FormatToken *NextNonParen = NextToken;
+ while (NextNonParen && NextNonParen->is(tok::l_paren))
+ NextNonParen = NextNonParen->getNextNonComment();
+ if (NextNonParen && (NextNonParen->Tok.isLiteral() ||
+ NextNonParen->isOneOf(tok::kw_true, tok::kw_false) ||
+ NextNonParen->isUnaryOperator())) {
+ return TT_BinaryOperator;
+ }
- // "&&(" is quite unlikely to be two successive unary "&".
- if (Tok.is(tok::ampamp) && NextToken->is(tok::l_paren))
+ // If we know we're in a template argument, there are no named declarations.
+ // Thus, having an identifier on the right-hand side indicates a binary
+ // operator.
+ if (InTemplateArgument && NextToken->Tok.isAnyIdentifier())
return TT_BinaryOperator;
+ // "&&" followed by "(", "*", or "&" is quite unlikely to be two successive
+ // unary "&".
+ if (Tok.is(tok::ampamp) &&
+ NextToken->isOneOf(tok::l_paren, tok::star, tok::amp)) {
+ return TT_BinaryOperator;
+ }
+
// This catches some cases where evaluation order is used as control flow:
// aaa && aaa->f();
if (NextToken->Tok.isAnyIdentifier()) {
@@ -2062,27 +2652,40 @@ private:
if (IsExpression && !Contexts.back().CaretFound)
return TT_BinaryOperator;
+ // Opeartors at class scope are likely pointer or reference members.
+ if (!Scopes.empty() && Scopes.back() == ST_Class)
+ return TT_PointerOrReference;
+
+ // Tokens that indicate member access or chained operator& use.
+ auto IsChainedOperatorAmpOrMember = [](const FormatToken *token) {
+ return !token || token->isOneOf(tok::amp, tok::period, tok::arrow,
+ tok::arrowstar, tok::periodstar);
+ };
+
+ // It's more likely that & represents operator& than an uninitialized
+ // reference.
+ if (Tok.is(tok::amp) && PrevToken && PrevToken->Tok.isAnyIdentifier() &&
+ IsChainedOperatorAmpOrMember(PrevToken->getPreviousNonComment()) &&
+ NextToken && NextToken->Tok.isAnyIdentifier()) {
+ if (auto NextNext = NextToken->getNextNonComment();
+ NextNext &&
+ (IsChainedOperatorAmpOrMember(NextNext) || NextNext->is(tok::semi))) {
+ return TT_BinaryOperator;
+ }
+ }
+
return TT_PointerOrReference;
}
TokenType determinePlusMinusCaretUsage(const FormatToken &Tok) {
- const FormatToken *PrevToken = Tok.getPreviousNonComment();
- if (!PrevToken)
- return TT_UnaryOperator;
-
- if (PrevToken->isOneOf(TT_CastRParen, TT_UnaryOperator))
- // This must be a sequence of leading unary operators.
+ if (determineUnaryOperatorByUsage(Tok))
return TT_UnaryOperator;
- // Use heuristics to recognize unary operators.
- if (PrevToken->isOneOf(tok::equal, tok::l_paren, tok::comma, tok::l_square,
- tok::question, tok::colon, tok::kw_return,
- tok::kw_case, tok::at, tok::l_brace, tok::kw_throw,
- tok::kw_co_return, tok::kw_co_yield))
+ const FormatToken *PrevToken = Tok.getPreviousNonComment();
+ if (!PrevToken)
return TT_UnaryOperator;
- // There can't be two consecutive binary operators.
- if (PrevToken->is(TT_BinaryOperator))
+ if (PrevToken->is(tok::at))
return TT_UnaryOperator;
// Fall back to marking the token as binary operator.
@@ -2108,6 +2711,8 @@ private:
bool AutoFound;
const AdditionalKeywords &Keywords;
+ SmallVector<ScopeType> &Scopes;
+
// Set of "<" tokens that do not open a template parameter list. If parseAngle
// determines that a specific token can't be a template opener, it will make
// same decision irrespective of the decisions for tokens leading up to it.
@@ -2124,7 +2729,7 @@ class ExpressionParser {
public:
ExpressionParser(const FormatStyle &Style, const AdditionalKeywords &Keywords,
AnnotatedLine &Line)
- : Style(Style), Keywords(Keywords), Current(Line.First) {}
+ : Style(Style), Keywords(Keywords), Line(Line), Current(Line.First) {}
/// Parse expressions with the given operator precedence.
void parse(int Precedence = 0) {
@@ -2132,8 +2737,9 @@ public:
// expression.
while (Current && (Current->is(tok::kw_return) ||
(Current->is(tok::colon) &&
- Current->isOneOf(TT_ObjCMethodExpr, TT_DictLiteral))))
+ Current->isOneOf(TT_ObjCMethodExpr, TT_DictLiteral)))) {
next();
+ }
if (!Current || Precedence > PrecedenceArrowAndPeriod)
return;
@@ -2154,21 +2760,47 @@ public:
FormatToken *Start = Current;
FormatToken *LatestOperator = nullptr;
unsigned OperatorIndex = 0;
+ // The first name of the current type in a port list.
+ FormatToken *VerilogFirstOfType = nullptr;
while (Current) {
+ // In Verilog ports in a module header that don't have a type take the
+ // type of the previous one. For example,
+ // module a(output b,
+ // c,
+ // output d);
+ // In this case there need to be fake parentheses around b and c.
+ if (Style.isVerilog() && Precedence == prec::Comma) {
+ VerilogFirstOfType =
+ verilogGroupDecl(VerilogFirstOfType, LatestOperator);
+ }
+
// Consume operators with higher precedence.
parse(Precedence + 1);
int CurrentPrecedence = getCurrentPrecedence();
- if (Current && Current->is(TT_SelectorName) &&
- Precedence == CurrentPrecedence) {
+ if (Precedence == CurrentPrecedence && Current &&
+ Current->is(TT_SelectorName)) {
if (LatestOperator)
addFakeParenthesis(Start, prec::Level(Precedence));
Start = Current;
}
- // At the end of the line or when an operator with higher precedence is
+ if ((Style.isCSharp() || Style.isJavaScript() ||
+ Style.Language == FormatStyle::LK_Java) &&
+ Precedence == prec::Additive && Current) {
+ // A string can be broken without parentheses around it when it is
+ // already in a sequence of strings joined by `+` signs.
+ FormatToken *Prev = Current->getPreviousNonComment();
+ if (Prev && Prev->is(tok::string_literal) &&
+ (Prev == Start || Prev->endsSequence(tok::string_literal, tok::plus,
+ TT_StringInConcatenation))) {
+ Prev->setType(TT_StringInConcatenation);
+ }
+ }
+
+ // At the end of the line or when an operator with lower precedence is
// found, insert fake parenthesis and return.
if (!Current ||
(Current->closesScope() &&
@@ -2180,7 +2812,11 @@ public:
}
// Consume scopes: (), [], <> and {}
- if (Current->opensScope()) {
+ // In addition to that we handle require clauses as scope, so that the
+ // constraints in that are correctly indented.
+ if (Current->opensScope() ||
+ Current->isOneOf(TT_RequiresClause,
+ TT_RequiresClauseInARequiresExpression)) {
// In fragment of a JavaScript template string can look like '}..${' and
// thus close a scope and open a new one at the same time.
while (Current && (!Current->closesScope() || Current->opensScope())) {
@@ -2201,13 +2837,31 @@ public:
}
}
+ // Group variables of the same type.
+ if (Style.isVerilog() && Precedence == prec::Comma && VerilogFirstOfType)
+ addFakeParenthesis(VerilogFirstOfType, prec::Comma);
+
if (LatestOperator && (Current || Precedence > 0)) {
- // LatestOperator->LastOperator = true;
+ // The requires clauses do not neccessarily end in a semicolon or a brace,
+ // but just go over to struct/class or a function declaration, we need to
+ // intervene so that the fake right paren is inserted correctly.
+ auto End =
+ (Start->Previous &&
+ Start->Previous->isOneOf(TT_RequiresClause,
+ TT_RequiresClauseInARequiresExpression))
+ ? [this]() {
+ auto Ret = Current ? Current : Line.Last;
+ while (!Ret->ClosesRequiresClause && Ret->Previous)
+ Ret = Ret->Previous;
+ return Ret;
+ }()
+ : nullptr;
+
if (Precedence == PrecedenceArrowAndPeriod) {
// Call expressions don't have a binary operator precedence.
- addFakeParenthesis(Start, prec::Unknown);
+ addFakeParenthesis(Start, prec::Unknown, End);
} else {
- addFakeParenthesis(Start, prec::Level(Precedence));
+ addFakeParenthesis(Start, prec::Level(Precedence), End);
}
}
}
@@ -2222,53 +2876,67 @@ private:
return prec::Conditional;
if (NextNonComment && Current->is(TT_SelectorName) &&
(NextNonComment->isOneOf(TT_DictLiteral, TT_JsTypeColon) ||
- ((Style.Language == FormatStyle::LK_Proto ||
- Style.Language == FormatStyle::LK_TextProto) &&
- NextNonComment->is(tok::less))))
+ (Style.isProto() && NextNonComment->is(tok::less)))) {
return prec::Assignment;
+ }
if (Current->is(TT_JsComputedPropertyName))
return prec::Assignment;
- if (Current->is(TT_LambdaArrow))
+ if (Current->is(TT_TrailingReturnArrow))
return prec::Comma;
if (Current->is(TT_FatArrow))
return prec::Assignment;
if (Current->isOneOf(tok::semi, TT_InlineASMColon, TT_SelectorName) ||
(Current->is(tok::comment) && NextNonComment &&
- NextNonComment->is(TT_SelectorName)))
+ NextNonComment->is(TT_SelectorName))) {
return 0;
+ }
if (Current->is(TT_RangeBasedForLoopColon))
return prec::Comma;
- if ((Style.Language == FormatStyle::LK_Java ||
- Style.Language == FormatStyle::LK_JavaScript) &&
- Current->is(Keywords.kw_instanceof))
+ if ((Style.Language == FormatStyle::LK_Java || Style.isJavaScript()) &&
+ Current->is(Keywords.kw_instanceof)) {
return prec::Relational;
- if (Style.Language == FormatStyle::LK_JavaScript &&
- Current->isOneOf(Keywords.kw_in, Keywords.kw_as))
+ }
+ if (Style.isJavaScript() &&
+ Current->isOneOf(Keywords.kw_in, Keywords.kw_as)) {
return prec::Relational;
+ }
if (Current->is(TT_BinaryOperator) || Current->is(tok::comma))
return Current->getPrecedence();
- if (Current->isOneOf(tok::period, tok::arrow))
+ if (Current->isOneOf(tok::period, tok::arrow) &&
+ Current->isNot(TT_TrailingReturnArrow)) {
return PrecedenceArrowAndPeriod;
- if ((Style.Language == FormatStyle::LK_Java ||
- Style.Language == FormatStyle::LK_JavaScript) &&
+ }
+ if ((Style.Language == FormatStyle::LK_Java || Style.isJavaScript()) &&
Current->isOneOf(Keywords.kw_extends, Keywords.kw_implements,
- Keywords.kw_throws))
+ Keywords.kw_throws)) {
+ return 0;
+ }
+ // In Verilog case labels are not on separate lines straight out of
+ // UnwrappedLineParser. The colon is not part of an expression.
+ if (Style.isVerilog() && Current->is(tok::colon))
return 0;
}
return -1;
}
- void addFakeParenthesis(FormatToken *Start, prec::Level Precedence) {
+ void addFakeParenthesis(FormatToken *Start, prec::Level Precedence,
+ FormatToken *End = nullptr) {
+ // Do not assign fake parenthesis to tokens that are part of an
+ // unexpanded macro call. The line within the macro call contains
+ // the parenthesis and commas, and we will not find operators within
+ // that structure.
+ if (Start->MacroParent)
+ return;
+
Start->FakeLParens.push_back(Precedence);
if (Precedence > prec::Unknown)
Start->StartsBinaryExpression = true;
- if (Current) {
- FormatToken *Previous = Current->Previous;
- while (Previous->is(tok::comment) && Previous->Previous)
- Previous = Previous->Previous;
- ++Previous->FakeRParens;
+ if (!End && Current)
+ End = Current->getPreviousNonComment();
+ if (End) {
+ ++End->FakeRParens;
if (Precedence > prec::Unknown)
- Previous->EndsBinaryExpression = true;
+ End->EndsBinaryExpression = true;
}
}
@@ -2281,18 +2949,18 @@ private:
next();
}
parse(PrecedenceArrowAndPeriod);
- for (FormatToken *Token : llvm::reverse(Tokens))
+ for (FormatToken *Token : llvm::reverse(Tokens)) {
// The actual precedence doesn't matter.
addFakeParenthesis(Token, prec::Unknown);
+ }
}
void parseConditionalExpr() {
- while (Current && Current->isTrailingComment()) {
+ while (Current && Current->isTrailingComment())
next();
- }
FormatToken *Start = Current;
parse(prec::LogicalOr);
- if (!Current || !Current->is(tok::question))
+ if (!Current || Current->isNot(tok::question))
return;
next();
parse(prec::Assignment);
@@ -2308,70 +2976,273 @@ private:
Current = Current->Next;
while (Current &&
(Current->NewlinesBefore == 0 || SkipPastLeadingComments) &&
- Current->isTrailingComment())
+ Current->isTrailingComment()) {
Current = Current->Next;
+ }
+ }
+
+ // Add fake parenthesis around declarations of the same type for example in a
+ // module prototype. Return the first port / variable of the current type.
+ FormatToken *verilogGroupDecl(FormatToken *FirstOfType,
+ FormatToken *PreviousComma) {
+ if (!Current)
+ return nullptr;
+
+ FormatToken *Start = Current;
+
+ // Skip attributes.
+ while (Start->startsSequence(tok::l_paren, tok::star)) {
+ if (!(Start = Start->MatchingParen) ||
+ !(Start = Start->getNextNonComment())) {
+ return nullptr;
+ }
+ }
+
+ FormatToken *Tok = Start;
+
+ if (Tok->is(Keywords.kw_assign))
+ Tok = Tok->getNextNonComment();
+
+ // Skip any type qualifiers to find the first identifier. It may be either a
+ // new type name or a variable name. There can be several type qualifiers
+ // preceding a variable name, and we can not tell them apart by looking at
+ // the word alone since a macro can be defined as either a type qualifier or
+ // a variable name. Thus we use the last word before the dimensions instead
+ // of the first word as the candidate for the variable or type name.
+ FormatToken *First = nullptr;
+ while (Tok) {
+ FormatToken *Next = Tok->getNextNonComment();
+
+ if (Tok->is(tok::hash)) {
+ // Start of a macro expansion.
+ First = Tok;
+ Tok = Next;
+ if (Tok)
+ Tok = Tok->getNextNonComment();
+ } else if (Tok->is(tok::hashhash)) {
+ // Concatenation. Skip.
+ Tok = Next;
+ if (Tok)
+ Tok = Tok->getNextNonComment();
+ } else if (Keywords.isVerilogQualifier(*Tok) ||
+ Keywords.isVerilogIdentifier(*Tok)) {
+ First = Tok;
+ Tok = Next;
+ // The name may have dots like `interface_foo.modport_foo`.
+ while (Tok && Tok->isOneOf(tok::period, tok::coloncolon) &&
+ (Tok = Tok->getNextNonComment())) {
+ if (Keywords.isVerilogIdentifier(*Tok))
+ Tok = Tok->getNextNonComment();
+ }
+ } else if (!Next) {
+ Tok = nullptr;
+ } else if (Tok->is(tok::l_paren)) {
+ // Make sure the parenthesized list is a drive strength. Otherwise the
+ // statement may be a module instantiation in which case we have already
+ // found the instance name.
+ if (Next->isOneOf(
+ Keywords.kw_highz0, Keywords.kw_highz1, Keywords.kw_large,
+ Keywords.kw_medium, Keywords.kw_pull0, Keywords.kw_pull1,
+ Keywords.kw_small, Keywords.kw_strong0, Keywords.kw_strong1,
+ Keywords.kw_supply0, Keywords.kw_supply1, Keywords.kw_weak0,
+ Keywords.kw_weak1)) {
+ Tok->setType(TT_VerilogStrength);
+ Tok = Tok->MatchingParen;
+ if (Tok) {
+ Tok->setType(TT_VerilogStrength);
+ Tok = Tok->getNextNonComment();
+ }
+ } else {
+ break;
+ }
+ } else if (Tok->is(tok::hash)) {
+ if (Next->is(tok::l_paren))
+ Next = Next->MatchingParen;
+ if (Next)
+ Tok = Next->getNextNonComment();
+ } else {
+ break;
+ }
+ }
+
+ // Find the second identifier. If it exists it will be the name.
+ FormatToken *Second = nullptr;
+ // Dimensions.
+ while (Tok && Tok->is(tok::l_square) && (Tok = Tok->MatchingParen))
+ Tok = Tok->getNextNonComment();
+ if (Tok && (Tok->is(tok::hash) || Keywords.isVerilogIdentifier(*Tok)))
+ Second = Tok;
+
+ // If the second identifier doesn't exist and there are qualifiers, the type
+ // is implied.
+ FormatToken *TypedName = nullptr;
+ if (Second) {
+ TypedName = Second;
+ if (First && First->is(TT_Unknown))
+ First->setType(TT_VerilogDimensionedTypeName);
+ } else if (First != Start) {
+ // If 'First' is null, then this isn't a declaration, 'TypedName' gets set
+ // to null as intended.
+ TypedName = First;
+ }
+
+ if (TypedName) {
+ // This is a declaration with a new type.
+ if (TypedName->is(TT_Unknown))
+ TypedName->setType(TT_StartOfName);
+ // Group variables of the previous type.
+ if (FirstOfType && PreviousComma) {
+ PreviousComma->setType(TT_VerilogTypeComma);
+ addFakeParenthesis(FirstOfType, prec::Comma, PreviousComma->Previous);
+ }
+
+ FirstOfType = TypedName;
+
+ // Don't let higher precedence handle the qualifiers. For example if we
+ // have:
+ // parameter x = 0
+ // We skip `parameter` here. This way the fake parentheses for the
+ // assignment will be around `x = 0`.
+ while (Current && Current != FirstOfType) {
+ if (Current->opensScope()) {
+ next();
+ parse();
+ }
+ next();
+ }
+ }
+
+ return FirstOfType;
}
const FormatStyle &Style;
const AdditionalKeywords &Keywords;
+ const AnnotatedLine &Line;
FormatToken *Current;
};
} // end anonymous namespace
void TokenAnnotator::setCommentLineLevels(
- SmallVectorImpl<AnnotatedLine *> &Lines) {
+ SmallVectorImpl<AnnotatedLine *> &Lines) const {
const AnnotatedLine *NextNonCommentLine = nullptr;
- for (SmallVectorImpl<AnnotatedLine *>::reverse_iterator I = Lines.rbegin(),
- E = Lines.rend();
- I != E; ++I) {
- bool CommentLine = true;
- for (const FormatToken *Tok = (*I)->First; Tok; Tok = Tok->Next) {
- if (!Tok->is(tok::comment)) {
- CommentLine = false;
- break;
- }
- }
+ for (AnnotatedLine *Line : llvm::reverse(Lines)) {
+ assert(Line->First);
// If the comment is currently aligned with the line immediately following
// it, that's probably intentional and we should keep it.
- if (NextNonCommentLine && CommentLine &&
- NextNonCommentLine->First->NewlinesBefore <= 1 &&
+ if (NextNonCommentLine && NextNonCommentLine->First->NewlinesBefore < 2 &&
+ Line->isComment() && !isClangFormatOff(Line->First->TokenText) &&
NextNonCommentLine->First->OriginalColumn ==
- (*I)->First->OriginalColumn) {
+ Line->First->OriginalColumn) {
+ const bool PPDirectiveOrImportStmt =
+ NextNonCommentLine->Type == LT_PreprocessorDirective ||
+ NextNonCommentLine->Type == LT_ImportStatement;
+ if (PPDirectiveOrImportStmt)
+ Line->Type = LT_CommentAbovePPDirective;
// Align comments for preprocessor lines with the # in column 0 if
// preprocessor lines are not indented. Otherwise, align with the next
// line.
- (*I)->Level =
- (Style.IndentPPDirectives != FormatStyle::PPDIS_BeforeHash &&
- (NextNonCommentLine->Type == LT_PreprocessorDirective ||
- NextNonCommentLine->Type == LT_ImportStatement))
- ? 0
- : NextNonCommentLine->Level;
+ Line->Level = Style.IndentPPDirectives != FormatStyle::PPDIS_BeforeHash &&
+ PPDirectiveOrImportStmt
+ ? 0
+ : NextNonCommentLine->Level;
} else {
- NextNonCommentLine = (*I)->First->isNot(tok::r_brace) ? (*I) : nullptr;
+ NextNonCommentLine = Line->First->isNot(tok::r_brace) ? Line : nullptr;
}
- setCommentLineLevels((*I)->Children);
+ setCommentLineLevels(Line->Children);
}
}
static unsigned maxNestingDepth(const AnnotatedLine &Line) {
unsigned Result = 0;
- for (const auto *Tok = Line.First; Tok != nullptr; Tok = Tok->Next)
+ for (const auto *Tok = Line.First; Tok; Tok = Tok->Next)
Result = std::max(Result, Tok->NestingLevel);
return Result;
}
-void TokenAnnotator::annotate(AnnotatedLine &Line) {
- for (SmallVectorImpl<AnnotatedLine *>::iterator I = Line.Children.begin(),
- E = Line.Children.end();
- I != E; ++I) {
- annotate(**I);
+// Returns the name of a function with no return type, e.g. a constructor or
+// destructor.
+static FormatToken *getFunctionName(const AnnotatedLine &Line) {
+ for (FormatToken *Tok = Line.getFirstNonComment(), *Name = nullptr; Tok;
+ Tok = Tok->getNextNonComment()) {
+ // Skip C++11 attributes both before and after the function name.
+ if (Tok->is(tok::l_square) && Tok->is(TT_AttributeSquare)) {
+ Tok = Tok->MatchingParen;
+ if (!Tok)
+ break;
+ continue;
+ }
+
+ // Make sure the name is followed by a pair of parentheses.
+ if (Name) {
+ return Tok->is(tok::l_paren) && Tok->isNot(TT_FunctionTypeLParen) &&
+ Tok->MatchingParen
+ ? Name
+ : nullptr;
+ }
+
+ // Skip keywords that may precede the constructor/destructor name.
+ if (Tok->isOneOf(tok::kw_friend, tok::kw_inline, tok::kw_virtual,
+ tok::kw_constexpr, tok::kw_consteval, tok::kw_explicit)) {
+ continue;
+ }
+
+ // A qualified name may start from the global namespace.
+ if (Tok->is(tok::coloncolon)) {
+ Tok = Tok->Next;
+ if (!Tok)
+ break;
+ }
+
+ // Skip to the unqualified part of the name.
+ while (Tok->startsSequence(tok::identifier, tok::coloncolon)) {
+ assert(Tok->Next);
+ Tok = Tok->Next->Next;
+ if (!Tok)
+ return nullptr;
+ }
+
+ // Skip the `~` if a destructor name.
+ if (Tok->is(tok::tilde)) {
+ Tok = Tok->Next;
+ if (!Tok)
+ break;
+ }
+
+ // Make sure the name is not already annotated, e.g. as NamespaceMacro.
+ if (Tok->isNot(tok::identifier) || Tok->isNot(TT_Unknown))
+ break;
+
+ Name = Tok;
}
- AnnotatingParser Parser(Style, Line, Keywords);
+
+ return nullptr;
+}
+
+// Checks if Tok is a constructor/destructor name qualified by its class name.
+static bool isCtorOrDtorName(const FormatToken *Tok) {
+ assert(Tok && Tok->is(tok::identifier));
+ const auto *Prev = Tok->Previous;
+
+ if (Prev && Prev->is(tok::tilde))
+ Prev = Prev->Previous;
+
+ if (!Prev || !Prev->endsSequence(tok::coloncolon, tok::identifier))
+ return false;
+
+ assert(Prev->Previous);
+ return Prev->Previous->TokenText == Tok->TokenText;
+}
+
+void TokenAnnotator::annotate(AnnotatedLine &Line) {
+ AnnotatingParser Parser(Style, Line, Keywords, Scopes);
Line.Type = Parser.parseLine();
+ for (auto &Child : Line.Children)
+ annotate(*Child);
+
// With very deep nesting, ExpressionParser uses lots of stack and the
// formatting algorithm is very slow. We're not going to do a good job here
// anyway - it's probably generated code being formatted by mistake.
@@ -2385,6 +3256,14 @@ void TokenAnnotator::annotate(AnnotatedLine &Line) {
ExpressionParser ExprParser(Style, Keywords, Line);
ExprParser.parse();
+ if (Style.isCpp()) {
+ auto *Tok = getFunctionName(Line);
+ if (Tok && ((!Scopes.empty() && Scopes.back() == ST_Class) ||
+ Line.endsWith(TT_FunctionLBrace) || isCtorOrDtorName(Tok))) {
+ Tok->setFinalizedType(TT_CtorDtorDeclName);
+ }
+ }
+
if (Line.startsWith(TT_ObjCMethodSpecifier))
Line.Type = LT_ObjCMethodDecl;
else if (Line.startsWith(TT_ObjCDecl))
@@ -2392,14 +3271,29 @@ void TokenAnnotator::annotate(AnnotatedLine &Line) {
else if (Line.startsWith(TT_ObjCProperty))
Line.Type = LT_ObjCProperty;
- Line.First->SpacesRequiredBefore = 1;
- Line.First->CanBreakBefore = Line.First->MustBreakBefore;
+ auto *First = Line.First;
+ First->SpacesRequiredBefore = 1;
+ First->CanBreakBefore = First->MustBreakBefore;
+
+ if (First->is(tok::eof) && First->NewlinesBefore == 0 &&
+ Style.InsertNewlineAtEOF) {
+ First->NewlinesBefore = 1;
+ }
}
// This function heuristically determines whether 'Current' starts the name of a
// function declaration.
static bool isFunctionDeclarationName(bool IsCpp, const FormatToken &Current,
- const AnnotatedLine &Line) {
+ const AnnotatedLine &Line,
+ FormatToken *&ClosingParen) {
+ assert(Current.Previous);
+
+ if (Current.is(TT_FunctionDeclarationName))
+ return true;
+
+ if (!Current.Tok.getIdentifierInfo())
+ return false;
+
auto skipOperatorName = [](const FormatToken *Next) -> const FormatToken * {
for (; Next; Next = Next->Next) {
if (Next->is(TT_OverloadedOperatorLParen))
@@ -2409,8 +3303,9 @@ static bool isFunctionDeclarationName(bool IsCpp, const FormatToken &Current,
if (Next->isOneOf(tok::kw_new, tok::kw_delete)) {
// For 'new[]' and 'delete[]'.
if (Next->Next &&
- Next->Next->startsSequence(tok::l_square, tok::r_square))
+ Next->Next->startsSequence(tok::l_square, tok::r_square)) {
Next = Next->Next->Next;
+ }
continue;
}
if (Next->startsSequence(tok::l_square, tok::r_square)) {
@@ -2419,7 +3314,7 @@ static bool isFunctionDeclarationName(bool IsCpp, const FormatToken &Current,
continue;
}
if ((Next->isSimpleTypeSpecifier() || Next->is(tok::identifier)) &&
- Next->Next && Next->Next->isOneOf(tok::star, tok::amp, tok::ampamp)) {
+ Next->Next && Next->Next->isPointerOrReference()) {
// For operator void*(), operator char*(), operator Foo*().
Next = Next->Next;
continue;
@@ -2437,14 +3332,25 @@ static bool isFunctionDeclarationName(bool IsCpp, const FormatToken &Current,
// Find parentheses of parameter list.
const FormatToken *Next = Current.Next;
if (Current.is(tok::kw_operator)) {
- if (Current.Previous && Current.Previous->is(tok::coloncolon))
+ const auto *Previous = Current.Previous;
+ if (Previous->Tok.getIdentifierInfo() &&
+ !Previous->isOneOf(tok::kw_return, tok::kw_co_return)) {
+ return true;
+ }
+ if (Previous->is(tok::r_paren) && Previous->is(TT_TypeDeclarationParen)) {
+ assert(Previous->MatchingParen);
+ assert(Previous->MatchingParen->is(tok::l_paren));
+ assert(Previous->MatchingParen->is(TT_TypeDeclarationParen));
+ return true;
+ }
+ if (!Previous->isPointerOrReference() && Previous->isNot(TT_TemplateCloser))
return false;
Next = skipOperatorName(Next);
} else {
- if (!Current.is(TT_StartOfName) || Current.NestingLevel != 0)
+ if (Current.isNot(TT_StartOfName) || Current.NestingLevel != 0)
return false;
for (; Next; Next = Next->Next) {
- if (Next->is(TT_TemplateOpener)) {
+ if (Next->is(TT_TemplateOpener) && Next->MatchingParen) {
Next = Next->MatchingParen;
} else if (Next->is(tok::coloncolon)) {
Next = Next->Next;
@@ -2454,7 +3360,11 @@ static bool isFunctionDeclarationName(bool IsCpp, const FormatToken &Current,
Next = skipOperatorName(Next->Next);
break;
}
- if (!Next->is(tok::identifier))
+ if (Next->isNot(tok::identifier))
+ return false;
+ } else if (isCppAttribute(IsCpp, *Next)) {
+ Next = Next->MatchingParen;
+ if (!Next)
return false;
} else if (Next->is(tok::l_paren)) {
break;
@@ -2465,16 +3375,17 @@ static bool isFunctionDeclarationName(bool IsCpp, const FormatToken &Current,
}
// Check whether parameter list can belong to a function declaration.
- if (!Next || !Next->is(tok::l_paren) || !Next->MatchingParen)
+ if (!Next || Next->isNot(tok::l_paren) || !Next->MatchingParen)
return false;
- // If the lines ends with "{", this is likely an function definition.
+ ClosingParen = Next->MatchingParen;
+ assert(ClosingParen->is(tok::r_paren));
+ // If the lines ends with "{", this is likely a function definition.
if (Line.Last->is(tok::l_brace))
return true;
- if (Next->Next == Next->MatchingParen)
+ if (Next->Next == ClosingParen)
return true; // Empty parentheses.
// If there is an &/&& after the r_paren, this is likely a function.
- if (Next->MatchingParen->Next &&
- Next->MatchingParen->Next->is(TT_PointerOrReference))
+ if (ClosingParen->Next && ClosingParen->Next->is(TT_PointerOrReference))
return true;
// Check for K&R C function definitions (and C++ function definitions with
@@ -2488,10 +3399,11 @@ static bool isFunctionDeclarationName(bool IsCpp, const FormatToken &Current,
// return !b;
// }
if (IsCpp && Next->Next && Next->Next->is(tok::identifier) &&
- !Line.endsWith(tok::semi))
+ !Line.endsWith(tok::semi)) {
return true;
+ }
- for (const FormatToken *Tok = Next->Next; Tok && Tok != Next->MatchingParen;
+ for (const FormatToken *Tok = Next->Next; Tok && Tok != ClosingParen;
Tok = Tok->Next) {
if (Tok->is(TT_TypeDeclarationParen))
return true;
@@ -2500,10 +3412,11 @@ static bool isFunctionDeclarationName(bool IsCpp, const FormatToken &Current,
continue;
}
if (Tok->is(tok::kw_const) || Tok->isSimpleTypeSpecifier() ||
- Tok->isOneOf(TT_PointerOrReference, TT_StartOfName, tok::ellipsis))
+ Tok->isOneOf(TT_PointerOrReference, TT_StartOfName, tok::ellipsis,
+ TT_TypeName)) {
return true;
- if (Tok->isOneOf(tok::l_brace, tok::string_literal, TT_ObjCMethodExpr) ||
- Tok->Tok.isLiteral())
+ }
+ if (Tok->isOneOf(tok::l_brace, TT_ObjCMethodExpr) || Tok->Tok.isLiteral())
return false;
}
return false;
@@ -2515,8 +3428,9 @@ bool TokenAnnotator::mustBreakForReturnType(const AnnotatedLine &Line) const {
if ((Style.AlwaysBreakAfterReturnType == FormatStyle::RTBS_TopLevel ||
Style.AlwaysBreakAfterReturnType ==
FormatStyle::RTBS_TopLevelDefinitions) &&
- Line.Level > 0)
+ Line.Level > 0) {
return false;
+ }
switch (Style.AlwaysBreakAfterReturnType) {
case FormatStyle::RTBS_None:
@@ -2532,17 +3446,15 @@ bool TokenAnnotator::mustBreakForReturnType(const AnnotatedLine &Line) const {
return false;
}
-void TokenAnnotator::calculateFormattingInformation(AnnotatedLine &Line) {
- for (SmallVectorImpl<AnnotatedLine *>::iterator I = Line.Children.begin(),
- E = Line.Children.end();
- I != E; ++I) {
- calculateFormattingInformation(**I);
- }
+void TokenAnnotator::calculateFormattingInformation(AnnotatedLine &Line) const {
+ for (AnnotatedLine *ChildLine : Line.Children)
+ calculateFormattingInformation(*ChildLine);
- Line.First->TotalLength =
- Line.First->IsMultiline ? Style.ColumnLimit
- : Line.FirstStartColumn + Line.First->ColumnWidth;
- FormatToken *Current = Line.First->Next;
+ auto *First = Line.First;
+ First->TotalLength = First->IsMultiline
+ ? Style.ColumnLimit
+ : Line.FirstStartColumn + First->ColumnWidth;
+ FormatToken *Current = First->Next;
bool InFunctionDecl = Line.MightBeFunctionDecl;
bool AlignArrayOfStructures =
(Style.AlignArrayOfStructures != FormatStyle::AIAS_None &&
@@ -2550,16 +3462,105 @@ void TokenAnnotator::calculateFormattingInformation(AnnotatedLine &Line) {
if (AlignArrayOfStructures)
calculateArrayInitializerColumnList(Line);
+ const bool IsCpp = Style.isCpp();
+ bool SeenName = false;
+ bool LineIsFunctionDeclaration = false;
+ FormatToken *ClosingParen = nullptr;
+ FormatToken *AfterLastAttribute = nullptr;
+
+ for (auto *Tok = Current; Tok; Tok = Tok->Next) {
+ if (Tok->is(TT_StartOfName))
+ SeenName = true;
+ if (Tok->Previous->EndsCppAttributeGroup)
+ AfterLastAttribute = Tok;
+ if (const bool IsCtorOrDtor = Tok->is(TT_CtorDtorDeclName);
+ IsCtorOrDtor ||
+ isFunctionDeclarationName(Style.isCpp(), *Tok, Line, ClosingParen)) {
+ if (!IsCtorOrDtor)
+ Tok->setFinalizedType(TT_FunctionDeclarationName);
+ LineIsFunctionDeclaration = true;
+ SeenName = true;
+ break;
+ }
+ }
+
+ if (IsCpp && (LineIsFunctionDeclaration || First->is(TT_CtorDtorDeclName)) &&
+ Line.endsWith(tok::semi, tok::r_brace)) {
+ auto *Tok = Line.Last->Previous;
+ while (Tok->isNot(tok::r_brace))
+ Tok = Tok->Previous;
+ if (auto *LBrace = Tok->MatchingParen; LBrace) {
+ assert(LBrace->is(tok::l_brace));
+ Tok->setBlockKind(BK_Block);
+ LBrace->setBlockKind(BK_Block);
+ LBrace->setFinalizedType(TT_FunctionLBrace);
+ }
+ }
+
+ if (IsCpp && SeenName && AfterLastAttribute &&
+ mustBreakAfterAttributes(*AfterLastAttribute, Style)) {
+ AfterLastAttribute->MustBreakBefore = true;
+ if (LineIsFunctionDeclaration)
+ Line.ReturnTypeWrapped = true;
+ }
+
+ if (IsCpp) {
+ if (!LineIsFunctionDeclaration) {
+ // Annotate */&/&& in `operator` function calls as binary operators.
+ for (const auto *Tok = First; Tok; Tok = Tok->Next) {
+ if (Tok->isNot(tok::kw_operator))
+ continue;
+ do {
+ Tok = Tok->Next;
+ } while (Tok && Tok->isNot(TT_OverloadedOperatorLParen));
+ if (!Tok)
+ break;
+ const auto *LeftParen = Tok;
+ for (Tok = Tok->Next; Tok && Tok != LeftParen->MatchingParen;
+ Tok = Tok->Next) {
+ if (Tok->isNot(tok::identifier))
+ continue;
+ auto *Next = Tok->Next;
+ const bool NextIsBinaryOperator =
+ Next && Next->isPointerOrReference() && Next->Next &&
+ Next->Next->is(tok::identifier);
+ if (!NextIsBinaryOperator)
+ continue;
+ Next->setType(TT_BinaryOperator);
+ Tok = Next;
+ }
+ }
+ } else if (ClosingParen) {
+ for (auto *Tok = ClosingParen->Next; Tok; Tok = Tok->Next) {
+ if (Tok->is(tok::arrow)) {
+ Tok->setType(TT_TrailingReturnArrow);
+ break;
+ }
+ if (Tok->isNot(TT_TrailingAnnotation))
+ continue;
+ const auto *Next = Tok->Next;
+ if (!Next || Next->isNot(tok::l_paren))
+ continue;
+ Tok = Next->MatchingParen;
+ if (!Tok)
+ break;
+ }
+ }
+ }
+
while (Current) {
- if (isFunctionDeclarationName(Style.isCpp(), *Current, Line))
- Current->setType(TT_FunctionDeclarationName);
+ const FormatToken *Prev = Current->Previous;
if (Current->is(TT_LineComment)) {
- if (Current->Previous->is(BK_BracedInit) &&
- Current->Previous->opensScope())
+ if (Prev->is(BK_BracedInit) && Prev->opensScope()) {
Current->SpacesRequiredBefore =
- (Style.Cpp11BracedListStyle && !Style.SpacesInParentheses) ? 0 : 1;
- else
+ (Style.Cpp11BracedListStyle && !Style.SpacesInParensOptions.Other)
+ ? 0
+ : 1;
+ } else if (Prev->is(TT_VerilogMultiLineListLParen)) {
+ Current->SpacesRequiredBefore = 0;
+ } else {
Current->SpacesRequiredBefore = Style.SpacesBeforeTrailingComments;
+ }
// If we find a trailing comment, iterate backwards to determine whether
// it seems to relate to a specific parameter. If so, break before that
@@ -2574,42 +3575,48 @@ void TokenAnnotator::calculateFormattingInformation(AnnotatedLine &Line) {
if (Parameter->isOneOf(tok::comment, tok::r_brace))
break;
if (Parameter->Previous && Parameter->Previous->is(tok::comma)) {
- if (!Parameter->Previous->is(TT_CtorInitializerComma) &&
- Parameter->HasUnescapedNewline)
+ if (Parameter->Previous->isNot(TT_CtorInitializerComma) &&
+ Parameter->HasUnescapedNewline) {
Parameter->MustBreakBefore = true;
+ }
break;
}
}
}
- } else if (Current->SpacesRequiredBefore == 0 &&
+ } else if (!Current->Finalized && Current->SpacesRequiredBefore == 0 &&
spaceRequiredBefore(Line, *Current)) {
Current->SpacesRequiredBefore = 1;
}
- Current->MustBreakBefore =
- Current->MustBreakBefore || mustBreakBefore(Line, *Current);
-
- if (!Current->MustBreakBefore && InFunctionDecl &&
- Current->is(TT_FunctionDeclarationName))
- Current->MustBreakBefore = mustBreakForReturnType(Line);
+ const auto &Children = Prev->Children;
+ if (!Children.empty() && Children.back()->Last->is(TT_LineComment)) {
+ Current->MustBreakBefore = true;
+ } else {
+ Current->MustBreakBefore =
+ Current->MustBreakBefore || mustBreakBefore(Line, *Current);
+ if (!Current->MustBreakBefore && InFunctionDecl &&
+ Current->is(TT_FunctionDeclarationName)) {
+ Current->MustBreakBefore = mustBreakForReturnType(Line);
+ }
+ }
Current->CanBreakBefore =
Current->MustBreakBefore || canBreakBefore(Line, *Current);
unsigned ChildSize = 0;
- if (Current->Previous->Children.size() == 1) {
- FormatToken &LastOfChild = *Current->Previous->Children[0]->Last;
+ if (Prev->Children.size() == 1) {
+ FormatToken &LastOfChild = *Prev->Children[0]->Last;
ChildSize = LastOfChild.isTrailingComment() ? Style.ColumnLimit
: LastOfChild.TotalLength + 1;
}
- const FormatToken *Prev = Current->Previous;
if (Current->MustBreakBefore || Prev->Children.size() > 1 ||
(Prev->Children.size() == 1 &&
Prev->Children[0]->First->MustBreakBefore) ||
- Current->IsMultiline)
+ Current->IsMultiline) {
Current->TotalLength = Prev->TotalLength + Style.ColumnLimit;
- else
+ } else {
Current->TotalLength = Prev->TotalLength + Current->ColumnWidth +
ChildSize + Current->SpacesRequiredBefore;
+ }
if (Current->is(TT_CtorInitializerColon))
InFunctionDecl = false;
@@ -2637,12 +3644,12 @@ void TokenAnnotator::calculateFormattingInformation(AnnotatedLine &Line) {
calculateUnbreakableTailLengths(Line);
unsigned IndentLevel = Line.Level;
- for (Current = Line.First; Current != nullptr; Current = Current->Next) {
+ for (Current = First; Current; Current = Current->Next) {
if (Current->Role)
Current->Role->precomputeFormattingInfos(Current);
if (Current->MatchingParen &&
- Current->MatchingParen->opensBlockOrBlockTypeList(Style)) {
- assert(IndentLevel > 0);
+ Current->MatchingParen->opensBlockOrBlockTypeList(Style) &&
+ IndentLevel > 0) {
--IndentLevel;
}
Current->IndentLevel = IndentLevel;
@@ -2653,7 +3660,8 @@ void TokenAnnotator::calculateFormattingInformation(AnnotatedLine &Line) {
LLVM_DEBUG({ printDebugInfo(Line); });
}
-void TokenAnnotator::calculateUnbreakableTailLengths(AnnotatedLine &Line) {
+void TokenAnnotator::calculateUnbreakableTailLengths(
+ AnnotatedLine &Line) const {
unsigned UnbreakableTailLength = 0;
FormatToken *Current = Line.Last;
while (Current) {
@@ -2669,17 +3677,17 @@ void TokenAnnotator::calculateUnbreakableTailLengths(AnnotatedLine &Line) {
}
}
-void TokenAnnotator::calculateArrayInitializerColumnList(AnnotatedLine &Line) {
- if (Line.First == Line.Last) {
+void TokenAnnotator::calculateArrayInitializerColumnList(
+ AnnotatedLine &Line) const {
+ if (Line.First == Line.Last)
return;
- }
auto *CurrentToken = Line.First;
CurrentToken->ArrayInitializerLineStart = true;
unsigned Depth = 0;
- while (CurrentToken != nullptr && CurrentToken != Line.Last) {
+ while (CurrentToken && CurrentToken != Line.Last) {
if (CurrentToken->is(tok::l_brace)) {
CurrentToken->IsArrayInitializer = true;
- if (CurrentToken->Next != nullptr)
+ if (CurrentToken->Next)
CurrentToken->Next->MustBreakBefore = true;
CurrentToken =
calculateInitializerColumnList(Line, CurrentToken->Next, Depth + 1);
@@ -2690,15 +3698,15 @@ void TokenAnnotator::calculateArrayInitializerColumnList(AnnotatedLine &Line) {
}
FormatToken *TokenAnnotator::calculateInitializerColumnList(
- AnnotatedLine &Line, FormatToken *CurrentToken, unsigned Depth) {
- while (CurrentToken != nullptr && CurrentToken != Line.Last) {
+ AnnotatedLine &Line, FormatToken *CurrentToken, unsigned Depth) const {
+ while (CurrentToken && CurrentToken != Line.Last) {
if (CurrentToken->is(tok::l_brace))
++Depth;
else if (CurrentToken->is(tok::r_brace))
--Depth;
if (Depth == 2 && CurrentToken->isOneOf(tok::l_brace, tok::comma)) {
CurrentToken = CurrentToken->Next;
- if (CurrentToken == nullptr)
+ if (!CurrentToken)
break;
CurrentToken->StartsColumn = true;
CurrentToken = CurrentToken->Previous;
@@ -2710,13 +3718,14 @@ FormatToken *TokenAnnotator::calculateInitializerColumnList(
unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
const FormatToken &Tok,
- bool InFunctionDecl) {
+ bool InFunctionDecl) const {
const FormatToken &Left = *Tok.Previous;
const FormatToken &Right = Tok;
if (Left.is(tok::semi))
return 0;
+ // Language specific handling.
if (Style.Language == FormatStyle::LK_Java) {
if (Right.isOneOf(Keywords.kw_extends, Keywords.kw_throws))
return 1;
@@ -2724,24 +3733,28 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
return 2;
if (Left.is(tok::comma) && Left.NestingLevel == 0)
return 3;
- } else if (Style.Language == FormatStyle::LK_JavaScript) {
+ } else if (Style.isJavaScript()) {
if (Right.is(Keywords.kw_function) && Left.isNot(tok::comma))
return 100;
if (Left.is(TT_JsTypeColon))
return 35;
- if ((Left.is(TT_TemplateString) && Left.TokenText.endswith("${")) ||
- (Right.is(TT_TemplateString) && Right.TokenText.startswith("}")))
+ if ((Left.is(TT_TemplateString) && Left.TokenText.ends_with("${")) ||
+ (Right.is(TT_TemplateString) && Right.TokenText.starts_with("}"))) {
return 100;
+ }
// Prefer breaking call chains (".foo") over empty "{}", "[]" or "()".
if (Left.opensScope() && Right.closesScope())
return 200;
+ } else if (Style.Language == FormatStyle::LK_Proto) {
+ if (Right.is(tok::l_square))
+ return 1;
+ if (Right.is(tok::period))
+ return 500;
}
if (Right.is(tok::identifier) && Right.Next && Right.Next->is(TT_DictLiteral))
return 1;
if (Right.is(tok::l_square)) {
- if (Style.Language == FormatStyle::LK_Proto)
- return 1;
if (Left.is(tok::r_square))
return 200;
// Slightly prefer formatting local lambda definitions like functions.
@@ -2749,13 +3762,13 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
return 35;
if (!Right.isOneOf(TT_ObjCMethodExpr, TT_LambdaLSquare,
TT_ArrayInitializerLSquare,
- TT_DesignatedInitializerLSquare, TT_AttributeSquare))
+ TT_DesignatedInitializerLSquare, TT_AttributeSquare)) {
return 500;
+ }
}
- if (Left.is(tok::coloncolon) ||
- (Right.is(tok::period) && Style.Language == FormatStyle::LK_Proto))
- return 500;
+ if (Left.is(tok::coloncolon))
+ return Style.PenaltyBreakScopeResolution;
if (Right.isOneOf(TT_StartOfName, TT_FunctionDeclarationName) ||
Right.is(tok::kw_operator)) {
if (Line.startsWith(tok::kw_for) && Right.PartOfMultiVariableDeclStmt)
@@ -2768,20 +3781,21 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
}
if (Right.is(TT_PointerOrReference))
return 190;
- if (Right.is(TT_LambdaArrow))
+ if (Right.is(TT_TrailingReturnArrow))
return 110;
if (Left.is(tok::equal) && Right.is(tok::l_brace))
return 160;
if (Left.is(TT_CastRParen))
return 100;
- if (Left.isOneOf(tok::kw_class, tok::kw_struct))
+ if (Left.isOneOf(tok::kw_class, tok::kw_struct, tok::kw_union))
return 5000;
if (Left.is(tok::comment))
return 1000;
if (Left.isOneOf(TT_RangeBasedForLoopColon, TT_InheritanceColon,
- TT_CtorInitializerColon))
+ TT_CtorInitializerColon)) {
return 2;
+ }
if (Right.isMemberAccess()) {
// Breaking before the "./->" of a chained call/member access is reasonably
@@ -2838,15 +3852,21 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
// open paren (we'll prefer breaking after the protocol list's opening
// angle bracket, if present).
if (Line.Type == LT_ObjCDecl && Left.is(tok::l_paren) && Left.Previous &&
- Left.Previous->isOneOf(tok::identifier, tok::greater))
+ Left.Previous->isOneOf(tok::identifier, tok::greater)) {
return 500;
+ }
+ if (Left.is(tok::l_paren) && Style.PenaltyBreakOpenParenthesis != 0)
+ return Style.PenaltyBreakOpenParenthesis;
if (Left.is(tok::l_paren) && InFunctionDecl &&
- Style.AlignAfterOpenBracket != FormatStyle::BAS_DontAlign)
+ Style.AlignAfterOpenBracket != FormatStyle::BAS_DontAlign) {
return 100;
+ }
if (Left.is(tok::l_paren) && Left.Previous &&
- (Left.Previous->is(tok::kw_for) || Left.Previous->isIf()))
+ (Left.Previous->isOneOf(tok::kw_for, tok::kw__Generic) ||
+ Left.Previous->isIf())) {
return 1000;
+ }
if (Left.is(tok::equal) && InFunctionDecl)
return 110;
if (Right.is(tok::r_brace))
@@ -2858,8 +3878,9 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
// here unless the style does not want us to place all arguments on the
// next line.
if (Style.AlignAfterOpenBracket == FormatStyle::BAS_DontAlign &&
- (Left.ParameterCount <= 1 || Style.AllowAllArgumentsOnNextLine))
+ (Left.ParameterCount <= 1 || Style.AllowAllArgumentsOnNextLine)) {
return 0;
+ }
if (Left.is(tok::l_brace) && !Style.Cpp11BracedListStyle)
return 19;
return Left.ParameterCount > 1 ? Style.PenaltyBreakBeforeFirstCallParameter
@@ -2872,25 +3893,31 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
return 60;
if (Left.isOneOf(tok::plus, tok::comma) && Left.Previous &&
Left.Previous->isLabelString() &&
- (Left.NextOperator || Left.OperatorIndex != 0))
+ (Left.NextOperator || Left.OperatorIndex != 0)) {
return 50;
+ }
if (Right.is(tok::plus) && Left.isLabelString() &&
- (Right.NextOperator || Right.OperatorIndex != 0))
+ (Right.NextOperator || Right.OperatorIndex != 0)) {
return 25;
+ }
if (Left.is(tok::comma))
return 1;
if (Right.is(tok::lessless) && Left.isLabelString() &&
- (Right.NextOperator || Right.OperatorIndex != 1))
+ (Right.NextOperator || Right.OperatorIndex != 1)) {
return 25;
+ }
if (Right.is(tok::lessless)) {
// Breaking at a << is really cheap.
- if (!Left.is(tok::r_paren) || Right.OperatorIndex > 0)
+ if (Left.isNot(tok::r_paren) || Right.OperatorIndex > 0) {
// Slightly prefer to break before the first one in log-like statements.
return 2;
+ }
return 1;
}
if (Left.ClosesTemplateDeclaration)
return Style.PenaltyBreakTemplateDeclaration;
+ if (Left.ClosesRequiresClause)
+ return 0;
if (Left.is(TT_ConditionalExpr))
return prec::Conditional;
prec::Level Level = Left.getPrecedence();
@@ -2905,53 +3932,87 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
}
bool TokenAnnotator::spaceRequiredBeforeParens(const FormatToken &Right) const {
- return Style.SpaceBeforeParens == FormatStyle::SBPO_Always ||
- (Style.SpaceBeforeParens == FormatStyle::SBPO_NonEmptyParentheses &&
- Right.ParameterCount > 0);
+ if (Style.SpaceBeforeParens == FormatStyle::SBPO_Always)
+ return true;
+ if (Right.is(TT_OverloadedOperatorLParen) &&
+ Style.SpaceBeforeParensOptions.AfterOverloadedOperator) {
+ return true;
+ }
+ if (Style.SpaceBeforeParensOptions.BeforeNonEmptyParentheses &&
+ Right.ParameterCount > 0) {
+ return true;
+ }
+ return false;
}
bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
const FormatToken &Left,
- const FormatToken &Right) {
- if (Left.is(tok::kw_return) && Right.isNot(tok::semi))
+ const FormatToken &Right) const {
+ if (Left.is(tok::kw_return) &&
+ !Right.isOneOf(tok::semi, tok::r_paren, tok::hashhash)) {
return true;
- if (Style.isJson() && Left.is(tok::string_literal) && Right.is(tok::colon))
- return false;
+ }
+ if (Left.is(tok::kw_throw) && Right.is(tok::l_paren) && Right.MatchingParen &&
+ Right.MatchingParen->is(TT_CastRParen)) {
+ return true;
+ }
if (Left.is(Keywords.kw_assert) && Style.Language == FormatStyle::LK_Java)
return true;
if (Style.ObjCSpaceAfterProperty && Line.Type == LT_ObjCProperty &&
- Left.Tok.getObjCKeywordID() == tok::objc_property)
+ Left.Tok.getObjCKeywordID() == tok::objc_property) {
return true;
+ }
if (Right.is(tok::hashhash))
return Left.is(tok::hash);
if (Left.isOneOf(tok::hashhash, tok::hash))
return Right.is(tok::hash);
if ((Left.is(tok::l_paren) && Right.is(tok::r_paren)) ||
(Left.is(tok::l_brace) && Left.isNot(BK_Block) &&
- Right.is(tok::r_brace) && Right.isNot(BK_Block)))
- return Style.SpaceInEmptyParentheses;
- if (Style.SpacesInConditionalStatement) {
- if (Left.is(tok::l_paren) && Left.Previous &&
- isKeywordWithCondition(*Left.Previous))
- return true;
- if (Right.is(tok::r_paren) && Right.MatchingParen &&
- Right.MatchingParen->Previous &&
- isKeywordWithCondition(*Right.MatchingParen->Previous))
- return true;
+ Right.is(tok::r_brace) && Right.isNot(BK_Block))) {
+ return Style.SpacesInParensOptions.InEmptyParentheses;
}
+ if (Style.SpacesInParensOptions.InConditionalStatements) {
+ const FormatToken *LeftParen = nullptr;
+ if (Left.is(tok::l_paren))
+ LeftParen = &Left;
+ else if (Right.is(tok::r_paren) && Right.MatchingParen)
+ LeftParen = Right.MatchingParen;
+ if (LeftParen) {
+ if (LeftParen->is(TT_ConditionLParen))
+ return true;
+ if (LeftParen->Previous && isKeywordWithCondition(*LeftParen->Previous))
+ return true;
+ }
+ }
+
+ // trailing return type 'auto': []() -> auto {}, auto foo() -> auto {}
+ if (Left.is(tok::kw_auto) && Right.isOneOf(TT_LambdaLBrace, TT_FunctionLBrace,
+ // function return type 'auto'
+ TT_FunctionTypeLParen)) {
+ return true;
+ }
+
+ // auto{x} auto(x)
+ if (Left.is(tok::kw_auto) && Right.isOneOf(tok::l_paren, tok::l_brace))
+ return false;
- // requires ( or requires(
- if (Right.is(tok::l_paren) && Left.is(tok::kw_requires))
- return spaceRequiredBeforeParens(Right);
- // requires clause Concept1<T> && Concept2<T>
- if (Left.is(TT_ConstraintJunctions) && Right.is(tok::identifier))
+ // operator co_await(x)
+ if (Right.is(tok::l_paren) && Left.is(tok::kw_co_await) && Left.Previous &&
+ Left.Previous->is(tok::kw_operator)) {
+ return false;
+ }
+ // co_await (x), co_yield (x), co_return (x)
+ if (Left.isOneOf(tok::kw_co_await, tok::kw_co_yield, tok::kw_co_return) &&
+ !Right.isOneOf(tok::semi, tok::r_paren)) {
return true;
+ }
- if (Left.is(tok::l_paren) || Right.is(tok::r_paren))
+ if (Left.is(tok::l_paren) || Right.is(tok::r_paren)) {
return (Right.is(TT_CastRParen) ||
(Left.MatchingParen && Left.MatchingParen->is(TT_CastRParen)))
- ? Style.SpacesInCStyleCastParentheses
- : Style.SpacesInParentheses;
+ ? Style.SpacesInParensOptions.InCStyleCasts
+ : Style.SpacesInParensOptions.Other;
+ }
if (Right.isOneOf(tok::semi, tok::comma))
return false;
if (Right.is(tok::less) && Line.Type == LT_ObjCDecl) {
@@ -2967,10 +4028,11 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
if (Left.is(tok::at) &&
Right.isOneOf(tok::identifier, tok::string_literal, tok::char_constant,
tok::numeric_constant, tok::l_paren, tok::l_brace,
- tok::kw_true, tok::kw_false))
+ tok::kw_true, tok::kw_false)) {
return false;
+ }
if (Left.is(tok::colon))
- return !Left.is(TT_ObjCMethodExpr);
+ return Left.isNot(TT_ObjCMethodExpr);
if (Left.is(tok::coloncolon))
return false;
if (Left.is(tok::less) || Right.isOneOf(tok::greater, tok::less)) {
@@ -2982,11 +4044,14 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
return false;
return !Style.Cpp11BracedListStyle;
}
- return false;
+ // Don't attempt to format operator<(), as it is handled later.
+ if (Right.isNot(TT_OverloadedOperatorLParen))
+ return false;
}
- if (Right.is(tok::ellipsis))
+ if (Right.is(tok::ellipsis)) {
return Left.Tok.isLiteral() || (Left.is(tok::identifier) && Left.Previous &&
Left.Previous->is(tok::kw_case));
+ }
if (Left.is(tok::l_square) && Right.is(tok::amp))
return Style.SpacesInSquareBrackets;
if (Right.is(TT_PointerOrReference)) {
@@ -2995,61 +4060,103 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
return true;
FormatToken *TokenBeforeMatchingParen =
Left.MatchingParen->getPreviousNonComment();
- if (!TokenBeforeMatchingParen || !Left.is(TT_TypeDeclarationParen))
+ if (!TokenBeforeMatchingParen || Left.isNot(TT_TypeDeclarationParen))
return true;
}
- // Add a space if the previous token is a pointer qualifer or the closing
+ // Add a space if the previous token is a pointer qualifier or the closing
// parenthesis of __attribute__(()) expression and the style requires spaces
// after pointer qualifiers.
if ((Style.SpaceAroundPointerQualifiers == FormatStyle::SAPQ_After ||
Style.SpaceAroundPointerQualifiers == FormatStyle::SAPQ_Both) &&
- (Left.is(TT_AttributeParen) || Left.canBePointerOrReferenceQualifier()))
+ (Left.is(TT_AttributeRParen) ||
+ Left.canBePointerOrReferenceQualifier())) {
+ return true;
+ }
+ if (Left.Tok.isLiteral())
return true;
- return (
- Left.Tok.isLiteral() ||
- (!Left.isOneOf(TT_PointerOrReference, tok::l_paren) &&
- (getTokenPointerOrReferenceAlignment(Right) != FormatStyle::PAS_Left ||
- (Line.IsMultiVariableDeclStmt &&
- (Left.NestingLevel == 0 ||
- (Left.NestingLevel == 1 && Line.First->is(tok::kw_for)))))));
+ // for (auto a = 0, b = 0; const auto & c : {1, 2, 3})
+ if (Left.isTypeOrIdentifier() && Right.Next && Right.Next->Next &&
+ Right.Next->Next->is(TT_RangeBasedForLoopColon)) {
+ return getTokenPointerOrReferenceAlignment(Right) !=
+ FormatStyle::PAS_Left;
+ }
+ return !Left.isOneOf(TT_PointerOrReference, tok::l_paren) &&
+ (getTokenPointerOrReferenceAlignment(Right) !=
+ FormatStyle::PAS_Left ||
+ (Line.IsMultiVariableDeclStmt &&
+ (Left.NestingLevel == 0 ||
+ (Left.NestingLevel == 1 && startsWithInitStatement(Line)))));
}
if (Right.is(TT_FunctionTypeLParen) && Left.isNot(tok::l_paren) &&
- (!Left.is(TT_PointerOrReference) ||
+ (Left.isNot(TT_PointerOrReference) ||
(getTokenPointerOrReferenceAlignment(Left) != FormatStyle::PAS_Right &&
- !Line.IsMultiVariableDeclStmt)))
+ !Line.IsMultiVariableDeclStmt))) {
return true;
+ }
if (Left.is(TT_PointerOrReference)) {
- // Add a space if the next token is a pointer qualifer and the style
+ // Add a space if the next token is a pointer qualifier and the style
// requires spaces before pointer qualifiers.
if ((Style.SpaceAroundPointerQualifiers == FormatStyle::SAPQ_Before ||
Style.SpaceAroundPointerQualifiers == FormatStyle::SAPQ_Both) &&
- Right.canBePointerOrReferenceQualifier())
+ Right.canBePointerOrReferenceQualifier()) {
+ return true;
+ }
+ // & 1
+ if (Right.Tok.isLiteral())
+ return true;
+ // & /* comment
+ if (Right.is(TT_BlockComment))
return true;
- return Right.Tok.isLiteral() || Right.is(TT_BlockComment) ||
- (Right.isOneOf(Keywords.kw_override, Keywords.kw_final) &&
- !Right.is(TT_StartOfName)) ||
- (Right.is(tok::l_brace) && Right.is(BK_Block)) ||
- (!Right.isOneOf(TT_PointerOrReference, TT_ArraySubscriptLSquare,
- tok::l_paren) &&
- (getTokenPointerOrReferenceAlignment(Left) !=
- FormatStyle::PAS_Right &&
- !Line.IsMultiVariableDeclStmt) &&
- Left.Previous &&
- !Left.Previous->isOneOf(tok::l_paren, tok::coloncolon,
- tok::l_square));
- }
- // Ensure right pointer alignement with ellipsis e.g. int *...P
+ // foo() -> const Bar * override/final
+ // S::foo() & noexcept/requires
+ if (Right.isOneOf(Keywords.kw_override, Keywords.kw_final, tok::kw_noexcept,
+ TT_RequiresClause) &&
+ Right.isNot(TT_StartOfName)) {
+ return true;
+ }
+ // & {
+ if (Right.is(tok::l_brace) && Right.is(BK_Block))
+ return true;
+ // for (auto a = 0, b = 0; const auto& c : {1, 2, 3})
+ if (Left.Previous && Left.Previous->isTypeOrIdentifier() && Right.Next &&
+ Right.Next->is(TT_RangeBasedForLoopColon)) {
+ return getTokenPointerOrReferenceAlignment(Left) !=
+ FormatStyle::PAS_Right;
+ }
+ if (Right.isOneOf(TT_PointerOrReference, TT_ArraySubscriptLSquare,
+ tok::l_paren)) {
+ return false;
+ }
+ if (getTokenPointerOrReferenceAlignment(Left) == FormatStyle::PAS_Right)
+ return false;
+ // FIXME: Setting IsMultiVariableDeclStmt for the whole line is error-prone,
+ // because it does not take into account nested scopes like lambdas.
+ // In multi-variable declaration statements, attach */& to the variable
+ // independently of the style. However, avoid doing it if we are in a nested
+ // scope, e.g. lambda. We still need to special-case statements with
+ // initializers.
+ if (Line.IsMultiVariableDeclStmt &&
+ (Left.NestingLevel == Line.First->NestingLevel ||
+ ((Left.NestingLevel == Line.First->NestingLevel + 1) &&
+ startsWithInitStatement(Line)))) {
+ return false;
+ }
+ return Left.Previous && !Left.Previous->isOneOf(
+ tok::l_paren, tok::coloncolon, tok::l_square);
+ }
+ // Ensure right pointer alignment with ellipsis e.g. int *...P
if (Left.is(tok::ellipsis) && Left.Previous &&
- Left.Previous->isOneOf(tok::star, tok::amp, tok::ampamp))
+ Left.Previous->isPointerOrReference()) {
return Style.PointerAlignment != FormatStyle::PAS_Right;
+ }
if (Right.is(tok::star) && Left.is(tok::l_paren))
return false;
- if (Left.is(tok::star) && Right.isOneOf(tok::star, tok::amp, tok::ampamp))
+ if (Left.is(tok::star) && Right.isPointerOrReference())
return false;
- if (Right.isOneOf(tok::star, tok::amp, tok::ampamp)) {
+ if (Right.isPointerOrReference()) {
const FormatToken *Previous = &Left;
- while (Previous && !Previous->is(tok::kw_operator)) {
+ while (Previous && Previous->isNot(tok::kw_operator)) {
if (Previous->is(tok::identifier) || Previous->isSimpleTypeSpecifier()) {
Previous = Previous->getPreviousNonComment();
continue;
@@ -3078,30 +4185,32 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
// dependent on PointerAlignment style.
if (Previous) {
if (Previous->endsSequence(tok::kw_operator))
- return (Style.PointerAlignment != FormatStyle::PAS_Left);
- if (Previous->is(tok::kw_const) || Previous->is(tok::kw_volatile))
+ return Style.PointerAlignment != FormatStyle::PAS_Left;
+ if (Previous->is(tok::kw_const) || Previous->is(tok::kw_volatile)) {
return (Style.PointerAlignment != FormatStyle::PAS_Left) ||
(Style.SpaceAroundPointerQualifiers ==
FormatStyle::SAPQ_After) ||
(Style.SpaceAroundPointerQualifiers == FormatStyle::SAPQ_Both);
+ }
}
}
+ if (Style.isCSharp() && Left.is(Keywords.kw_is) && Right.is(tok::l_square))
+ return true;
const auto SpaceRequiredForArrayInitializerLSquare =
[](const FormatToken &LSquareTok, const FormatStyle &Style) {
return Style.SpacesInContainerLiterals ||
- ((Style.Language == FormatStyle::LK_Proto ||
- Style.Language == FormatStyle::LK_TextProto) &&
- !Style.Cpp11BracedListStyle &&
+ (Style.isProto() && !Style.Cpp11BracedListStyle &&
LSquareTok.endsSequence(tok::l_square, tok::colon,
TT_SelectorName));
};
- if (Left.is(tok::l_square))
+ if (Left.is(tok::l_square)) {
return (Left.is(TT_ArrayInitializerLSquare) && Right.isNot(tok::r_square) &&
SpaceRequiredForArrayInitializerLSquare(Left, Style)) ||
(Left.isOneOf(TT_ArraySubscriptLSquare, TT_StructuredBindingLSquare,
TT_LambdaLSquare) &&
Style.SpacesInSquareBrackets && Right.isNot(tok::r_square));
- if (Right.is(tok::r_square))
+ }
+ if (Right.is(tok::r_square)) {
return Right.MatchingParen &&
((Right.MatchingParen->is(TT_ArrayInitializerLSquare) &&
SpaceRequiredForArrayInitializerLSquare(*Right.MatchingParen,
@@ -3109,125 +4218,256 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
(Style.SpacesInSquareBrackets &&
Right.MatchingParen->isOneOf(TT_ArraySubscriptLSquare,
TT_StructuredBindingLSquare,
- TT_LambdaLSquare)) ||
- Right.MatchingParen->is(TT_AttributeParen));
+ TT_LambdaLSquare)));
+ }
if (Right.is(tok::l_square) &&
!Right.isOneOf(TT_ObjCMethodExpr, TT_LambdaLSquare,
TT_DesignatedInitializerLSquare,
TT_StructuredBindingLSquare, TT_AttributeSquare) &&
!Left.isOneOf(tok::numeric_constant, TT_DictLiteral) &&
- !(!Left.is(tok::r_square) && Style.SpaceBeforeSquareBrackets &&
- Right.is(TT_ArraySubscriptLSquare)))
+ !(Left.isNot(tok::r_square) && Style.SpaceBeforeSquareBrackets &&
+ Right.is(TT_ArraySubscriptLSquare))) {
return false;
+ }
if (Left.is(tok::l_brace) && Right.is(tok::r_brace))
return !Left.Children.empty(); // No spaces in "{}".
if ((Left.is(tok::l_brace) && Left.isNot(BK_Block)) ||
(Right.is(tok::r_brace) && Right.MatchingParen &&
- Right.MatchingParen->isNot(BK_Block)))
- return Style.Cpp11BracedListStyle ? Style.SpacesInParentheses : true;
- if (Left.is(TT_BlockComment))
+ Right.MatchingParen->isNot(BK_Block))) {
+ return !Style.Cpp11BracedListStyle || Style.SpacesInParensOptions.Other;
+ }
+ if (Left.is(TT_BlockComment)) {
// No whitespace in x(/*foo=*/1), except for JavaScript.
- return Style.Language == FormatStyle::LK_JavaScript ||
- !Left.TokenText.endswith("=*/");
+ return Style.isJavaScript() || !Left.TokenText.ends_with("=*/");
+ }
// Space between template and attribute.
// e.g. template <typename T> [[nodiscard]] ...
if (Left.is(TT_TemplateCloser) && Right.is(TT_AttributeSquare))
return true;
+ // Space before parentheses common for all languages
if (Right.is(tok::l_paren)) {
- if ((Left.is(tok::r_paren) && Left.is(TT_AttributeParen)) ||
- (Left.is(tok::r_square) && Left.is(TT_AttributeSquare)))
+ if (Left.is(TT_TemplateCloser) && Right.isNot(TT_FunctionTypeLParen))
+ return spaceRequiredBeforeParens(Right);
+ if (Left.isOneOf(TT_RequiresClause,
+ TT_RequiresClauseInARequiresExpression)) {
+ return Style.SpaceBeforeParensOptions.AfterRequiresInClause ||
+ spaceRequiredBeforeParens(Right);
+ }
+ if (Left.is(TT_RequiresExpression)) {
+ return Style.SpaceBeforeParensOptions.AfterRequiresInExpression ||
+ spaceRequiredBeforeParens(Right);
+ }
+ if (Left.is(TT_AttributeRParen) ||
+ (Left.is(tok::r_square) && Left.is(TT_AttributeSquare))) {
return true;
- if (Style.SpaceBeforeParens ==
- FormatStyle::SBPO_ControlStatementsExceptControlMacros &&
- Left.is(TT_ForEachMacro))
- return false;
- if (Style.SpaceBeforeParens ==
- FormatStyle::SBPO_ControlStatementsExceptControlMacros &&
- Left.is(TT_IfMacro))
- return false;
- return Line.Type == LT_ObjCDecl || Left.is(tok::semi) ||
- (Style.SpaceBeforeParens != FormatStyle::SBPO_Never &&
- (Left.isOneOf(tok::pp_elif, tok::kw_for, tok::kw_while,
- tok::kw_switch, tok::kw_case, TT_ForEachMacro,
- TT_ObjCForIn) ||
- Left.isIf(Line.Type != LT_PreprocessorDirective) ||
- (Left.isOneOf(tok::kw_try, Keywords.kw___except, tok::kw_catch,
- tok::kw_new, tok::kw_delete) &&
- (!Left.Previous || Left.Previous->isNot(tok::period))))) ||
- (spaceRequiredBeforeParens(Right) &&
- (Left.is(tok::identifier) || Left.isFunctionLikeKeyword() ||
- Left.is(tok::r_paren) || Left.isSimpleTypeSpecifier() ||
- (Left.is(tok::r_square) && Left.MatchingParen &&
- Left.MatchingParen->is(TT_LambdaLSquare))) &&
- Line.Type != LT_PreprocessorDirective);
+ }
+ if (Left.is(TT_ForEachMacro)) {
+ return Style.SpaceBeforeParensOptions.AfterForeachMacros ||
+ spaceRequiredBeforeParens(Right);
+ }
+ if (Left.is(TT_IfMacro)) {
+ return Style.SpaceBeforeParensOptions.AfterIfMacros ||
+ spaceRequiredBeforeParens(Right);
+ }
+ if (Style.SpaceBeforeParens == FormatStyle::SBPO_Custom &&
+ Left.isOneOf(tok::kw_new, tok::kw_delete) &&
+ Right.isNot(TT_OverloadedOperatorLParen) &&
+ !(Line.MightBeFunctionDecl && Left.is(TT_FunctionDeclarationName))) {
+ return Style.SpaceBeforeParensOptions.AfterPlacementOperator;
+ }
+ if (Line.Type == LT_ObjCDecl)
+ return true;
+ if (Left.is(tok::semi))
+ return true;
+ if (Left.isOneOf(tok::pp_elif, tok::kw_for, tok::kw_while, tok::kw_switch,
+ tok::kw_case, TT_ForEachMacro, TT_ObjCForIn) ||
+ Left.isIf(Line.Type != LT_PreprocessorDirective) ||
+ Right.is(TT_ConditionLParen)) {
+ return Style.SpaceBeforeParensOptions.AfterControlStatements ||
+ spaceRequiredBeforeParens(Right);
+ }
+
+ // TODO add Operator overloading specific Options to
+ // SpaceBeforeParensOptions
+ if (Right.is(TT_OverloadedOperatorLParen))
+ return spaceRequiredBeforeParens(Right);
+ // Function declaration or definition
+ if (Line.MightBeFunctionDecl && (Left.is(TT_FunctionDeclarationName))) {
+ if (Line.mightBeFunctionDefinition()) {
+ return Style.SpaceBeforeParensOptions.AfterFunctionDefinitionName ||
+ spaceRequiredBeforeParens(Right);
+ } else {
+ return Style.SpaceBeforeParensOptions.AfterFunctionDeclarationName ||
+ spaceRequiredBeforeParens(Right);
+ }
+ }
+ // Lambda
+ if (Line.Type != LT_PreprocessorDirective && Left.is(tok::r_square) &&
+ Left.MatchingParen && Left.MatchingParen->is(TT_LambdaLSquare)) {
+ return Style.SpaceBeforeParensOptions.AfterFunctionDefinitionName ||
+ spaceRequiredBeforeParens(Right);
+ }
+ if (!Left.Previous || Left.Previous->isNot(tok::period)) {
+ if (Left.isOneOf(tok::kw_try, Keywords.kw___except, tok::kw_catch)) {
+ return Style.SpaceBeforeParensOptions.AfterControlStatements ||
+ spaceRequiredBeforeParens(Right);
+ }
+ if (Left.isOneOf(tok::kw_new, tok::kw_delete)) {
+ return ((!Line.MightBeFunctionDecl || !Left.Previous) &&
+ Style.SpaceBeforeParens != FormatStyle::SBPO_Never) ||
+ spaceRequiredBeforeParens(Right);
+ }
+
+ if (Left.is(tok::r_square) && Left.MatchingParen &&
+ Left.MatchingParen->Previous &&
+ Left.MatchingParen->Previous->is(tok::kw_delete)) {
+ return (Style.SpaceBeforeParens != FormatStyle::SBPO_Never) ||
+ spaceRequiredBeforeParens(Right);
+ }
+ }
+ // Handle builtins like identifiers.
+ if (Line.Type != LT_PreprocessorDirective &&
+ (Left.Tok.getIdentifierInfo() || Left.is(tok::r_paren))) {
+ return spaceRequiredBeforeParens(Right);
+ }
+ return false;
}
if (Left.is(tok::at) && Right.Tok.getObjCKeywordID() != tok::objc_not_keyword)
return false;
- if (Right.is(TT_UnaryOperator))
+ if (Right.is(TT_UnaryOperator)) {
return !Left.isOneOf(tok::l_paren, tok::l_square, tok::at) &&
(Left.isNot(tok::colon) || Left.isNot(TT_ObjCMethodExpr));
- if ((Left.isOneOf(tok::identifier, tok::greater, tok::r_square,
+ }
+ // No space between the variable name and the initializer list.
+ // A a1{1};
+ // Verilog doesn't have such syntax, but it has word operators that are C++
+ // identifiers like `a inside {b, c}`. So the rule is not applicable.
+ if (!Style.isVerilog() &&
+ (Left.isOneOf(tok::identifier, tok::greater, tok::r_square,
tok::r_paren) ||
Left.isSimpleTypeSpecifier()) &&
Right.is(tok::l_brace) && Right.getNextNonComment() &&
- Right.isNot(BK_Block))
+ Right.isNot(BK_Block)) {
return false;
+ }
if (Left.is(tok::period) || Right.is(tok::period))
return false;
- if (Right.is(tok::hash) && Left.is(tok::identifier) && Left.TokenText == "L")
+ // u#str, U#str, L#str, u8#str
+ // uR#str, UR#str, LR#str, u8R#str
+ if (Right.is(tok::hash) && Left.is(tok::identifier) &&
+ (Left.TokenText == "L" || Left.TokenText == "u" ||
+ Left.TokenText == "U" || Left.TokenText == "u8" ||
+ Left.TokenText == "LR" || Left.TokenText == "uR" ||
+ Left.TokenText == "UR" || Left.TokenText == "u8R")) {
return false;
+ }
if (Left.is(TT_TemplateCloser) && Left.MatchingParen &&
Left.MatchingParen->Previous &&
(Left.MatchingParen->Previous->is(tok::period) ||
- Left.MatchingParen->Previous->is(tok::coloncolon)))
+ Left.MatchingParen->Previous->is(tok::coloncolon))) {
// Java call to generic function with explicit type:
// A.<B<C<...>>>DoSomething();
// A::<B<C<...>>>DoSomething(); // With a Java 8 method reference.
return false;
+ }
if (Left.is(TT_TemplateCloser) && Right.is(tok::l_square))
return false;
- if (Left.is(tok::l_brace) && Left.endsSequence(TT_DictLiteral, tok::at))
+ if (Left.is(tok::l_brace) && Left.endsSequence(TT_DictLiteral, tok::at)) {
// Objective-C dictionary literal -> no space after opening brace.
return false;
+ }
if (Right.is(tok::r_brace) && Right.MatchingParen &&
- Right.MatchingParen->endsSequence(TT_DictLiteral, tok::at))
+ Right.MatchingParen->endsSequence(TT_DictLiteral, tok::at)) {
// Objective-C dictionary literal -> no space before closing brace.
return false;
+ }
if (Right.getType() == TT_TrailingAnnotation &&
Right.isOneOf(tok::amp, tok::ampamp) &&
Left.isOneOf(tok::kw_const, tok::kw_volatile) &&
- (!Right.Next || Right.Next->is(tok::semi)))
+ (!Right.Next || Right.Next->is(tok::semi))) {
// Match const and volatile ref-qualifiers without any additional
// qualifiers such as
// void Fn() const &;
return getTokenReferenceAlignment(Right) != FormatStyle::PAS_Left;
+ }
+
return true;
}
bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
- const FormatToken &Right) {
+ const FormatToken &Right) const {
const FormatToken &Left = *Right.Previous;
- auto HasExistingWhitespace = [&Right]() {
- return Right.WhitespaceRange.getBegin() != Right.WhitespaceRange.getEnd();
- };
- if (Right.Tok.getIdentifierInfo() && Left.Tok.getIdentifierInfo())
- return true; // Never ever merge two identifiers.
+
+ // If the token is finalized don't touch it (as it could be in a
+ // clang-format-off section).
+ if (Left.Finalized)
+ return Right.hasWhitespaceBefore();
+
+ // Never ever merge two words.
+ if (Keywords.isWordLike(Right) && Keywords.isWordLike(Left))
+ return true;
+
+ // Leave a space between * and /* to avoid C4138 `comment end` found outside
+ // of comment.
+ if (Left.is(tok::star) && Right.is(tok::comment))
+ return true;
+
if (Style.isCpp()) {
+ if (Left.is(TT_OverloadedOperator) &&
+ Right.isOneOf(TT_TemplateOpener, TT_TemplateCloser)) {
+ return true;
+ }
+ // Space between UDL and dot: auto b = 4s .count();
+ if (Right.is(tok::period) && Left.is(tok::numeric_constant))
+ return true;
+ // Space between import <iostream>.
+ // or import .....;
+ if (Left.is(Keywords.kw_import) && Right.isOneOf(tok::less, tok::ellipsis))
+ return true;
+ // Space between `module :` and `import :`.
+ if (Left.isOneOf(Keywords.kw_module, Keywords.kw_import) &&
+ Right.is(TT_ModulePartitionColon)) {
+ return true;
+ }
+ // No space between import foo:bar but keep a space between import :bar;
+ if (Left.is(tok::identifier) && Right.is(TT_ModulePartitionColon))
+ return false;
+ // No space between :bar;
+ if (Left.is(TT_ModulePartitionColon) &&
+ Right.isOneOf(tok::identifier, tok::kw_private)) {
+ return false;
+ }
+ if (Left.is(tok::ellipsis) && Right.is(tok::identifier) &&
+ Line.First->is(Keywords.kw_import)) {
+ return false;
+ }
+ // Space in __attribute__((attr)) ::type.
+ if (Left.isOneOf(TT_AttributeRParen, TT_AttributeMacro) &&
+ Right.is(tok::coloncolon)) {
+ return true;
+ }
+
if (Left.is(tok::kw_operator))
return Right.is(tok::coloncolon);
if (Right.is(tok::l_brace) && Right.is(BK_BracedInit) &&
- !Left.opensScope() && Style.SpaceBeforeCpp11BracedList)
+ !Left.opensScope() && Style.SpaceBeforeCpp11BracedList) {
+ return true;
+ }
+ if (Left.is(tok::less) && Left.is(TT_OverloadedOperator) &&
+ Right.is(TT_TemplateOpener)) {
return true;
- } else if (Style.Language == FormatStyle::LK_Proto ||
- Style.Language == FormatStyle::LK_TextProto) {
+ }
+ } else if (Style.isProto()) {
if (Right.is(tok::period) &&
Left.isOneOf(Keywords.kw_optional, Keywords.kw_required,
- Keywords.kw_repeated, Keywords.kw_extend))
+ Keywords.kw_repeated, Keywords.kw_extend)) {
return true;
+ }
if (Right.is(tok::l_paren) &&
- Left.isOneOf(Keywords.kw_returns, Keywords.kw_option))
+ Left.isOneOf(Keywords.kw_returns, Keywords.kw_option)) {
return true;
+ }
if (Right.isOneOf(tok::l_brace, tok::less) && Left.is(TT_SelectorName))
return true;
// Slashes occur in text protocol extension syntax: [type/type] { ... }.
@@ -3235,18 +4475,19 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
return false;
if (Left.MatchingParen &&
Left.MatchingParen->is(TT_ProtoExtensionLSquare) &&
- Right.isOneOf(tok::l_brace, tok::less))
+ Right.isOneOf(tok::l_brace, tok::less)) {
return !Style.Cpp11BracedListStyle;
+ }
// A percent is probably part of a formatting specification, such as %lld.
if (Left.is(tok::percent))
return false;
// Preserve the existence of a space before a percent for cases like 0x%04x
// and "%d %d"
if (Left.is(tok::numeric_constant) && Right.is(tok::percent))
- return HasExistingWhitespace();
+ return Right.hasWhitespaceBefore();
} else if (Style.isJson()) {
- if (Right.is(tok::colon))
- return false;
+ if (Right.is(tok::colon) && Left.is(tok::string_literal))
+ return Style.SpaceBeforeJsonColon;
} else if (Style.isCSharp()) {
// Require spaces around '{' and before '}' unless they appear in
// interpolated strings. Interpolated strings are merged into a single token
@@ -3304,11 +4545,13 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
return true;
// space between keywords and paren e.g. "using ("
- if (Right.is(tok::l_paren))
+ if (Right.is(tok::l_paren)) {
if (Left.isOneOf(tok::kw_using, Keywords.kw_async, Keywords.kw_when,
- Keywords.kw_lock))
- return Style.SpaceBeforeParens == FormatStyle::SBPO_ControlStatements ||
+ Keywords.kw_lock)) {
+ return Style.SpaceBeforeParensOptions.AfterControlStatements ||
spaceRequiredBeforeParens(Right);
+ }
+ }
// space between method modifier and opening parenthesis of a tuple return
// type
@@ -3317,15 +4560,17 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
Keywords.kw_internal, Keywords.kw_abstract,
Keywords.kw_sealed, Keywords.kw_override,
Keywords.kw_async, Keywords.kw_unsafe) &&
- Right.is(tok::l_paren))
+ Right.is(tok::l_paren)) {
return true;
- } else if (Style.Language == FormatStyle::LK_JavaScript) {
+ }
+ } else if (Style.isJavaScript()) {
if (Left.is(TT_FatArrow))
return true;
// for await ( ...
if (Right.is(tok::l_paren) && Left.is(Keywords.kw_await) && Left.Previous &&
- Left.Previous->is(tok::kw_for))
+ Left.Previous->is(tok::kw_for)) {
return true;
+ }
if (Left.is(Keywords.kw_async) && Right.is(tok::l_paren) &&
Right.MatchingParen) {
const FormatToken *Next = Right.MatchingParen->getNextNonComment();
@@ -3334,22 +4579,26 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
if (Next && Next->is(TT_FatArrow))
return true;
}
- if ((Left.is(TT_TemplateString) && Left.TokenText.endswith("${")) ||
- (Right.is(TT_TemplateString) && Right.TokenText.startswith("}")))
+ if ((Left.is(TT_TemplateString) && Left.TokenText.ends_with("${")) ||
+ (Right.is(TT_TemplateString) && Right.TokenText.starts_with("}"))) {
return false;
+ }
// In tagged template literals ("html`bar baz`"), there is no space between
// the tag identifier and the template string.
if (Keywords.IsJavaScriptIdentifier(Left,
/* AcceptIdentifierName= */ false) &&
- Right.is(TT_TemplateString))
+ Right.is(TT_TemplateString)) {
return false;
+ }
if (Right.is(tok::star) &&
- Left.isOneOf(Keywords.kw_function, Keywords.kw_yield))
+ Left.isOneOf(Keywords.kw_function, Keywords.kw_yield)) {
return false;
+ }
if (Right.isOneOf(tok::l_brace, tok::l_square) &&
Left.isOneOf(Keywords.kw_function, Keywords.kw_yield,
- Keywords.kw_extends, Keywords.kw_implements))
+ Keywords.kw_extends, Keywords.kw_implements)) {
return true;
+ }
if (Right.is(tok::l_paren)) {
// JS methods can use some keywords as names (e.g. `delete()`).
if (Line.MustBeDeclaration && Left.Tok.getIdentifierInfo())
@@ -3357,36 +4606,41 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
// Valid JS method names can include keywords, e.g. `foo.delete()` or
// `bar.instanceof()`. Recognize call positions by preceding period.
if (Left.Previous && Left.Previous->is(tok::period) &&
- Left.Tok.getIdentifierInfo())
+ Left.Tok.getIdentifierInfo()) {
return false;
+ }
// Additional unary JavaScript operators that need a space after.
if (Left.isOneOf(tok::kw_throw, Keywords.kw_await, Keywords.kw_typeof,
- tok::kw_void))
+ tok::kw_void)) {
return true;
+ }
}
// `foo as const;` casts into a const type.
- if (Left.endsSequence(tok::kw_const, Keywords.kw_as)) {
+ if (Left.endsSequence(tok::kw_const, Keywords.kw_as))
return false;
- }
if ((Left.isOneOf(Keywords.kw_let, Keywords.kw_var, Keywords.kw_in,
tok::kw_const) ||
// "of" is only a keyword if it appears after another identifier
// (e.g. as "const x of y" in a for loop), or after a destructuring
// operation (const [x, y] of z, const {a, b} of c).
(Left.is(Keywords.kw_of) && Left.Previous &&
- (Left.Previous->Tok.is(tok::identifier) ||
+ (Left.Previous->is(tok::identifier) ||
Left.Previous->isOneOf(tok::r_square, tok::r_brace)))) &&
- (!Left.Previous || !Left.Previous->is(tok::period)))
+ (!Left.Previous || Left.Previous->isNot(tok::period))) {
return true;
+ }
if (Left.isOneOf(tok::kw_for, Keywords.kw_as) && Left.Previous &&
- Left.Previous->is(tok::period) && Right.is(tok::l_paren))
+ Left.Previous->is(tok::period) && Right.is(tok::l_paren)) {
return false;
+ }
if (Left.is(Keywords.kw_as) &&
- Right.isOneOf(tok::l_square, tok::l_brace, tok::l_paren))
+ Right.isOneOf(tok::l_square, tok::l_brace, tok::l_paren)) {
return true;
+ }
if (Left.is(tok::kw_default) && Left.Previous &&
- Left.Previous->is(tok::kw_export))
+ Left.Previous->is(tok::kw_export)) {
return true;
+ }
if (Left.is(Keywords.kw_is) && Right.is(tok::l_brace))
return true;
if (Right.isOneOf(TT_JsTypeColon, TT_JsTypeOptionalQuestion))
@@ -3394,56 +4648,154 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
if (Left.is(TT_JsTypeOperator) || Right.is(TT_JsTypeOperator))
return false;
if ((Left.is(tok::l_brace) || Right.is(tok::r_brace)) &&
- Line.First->isOneOf(Keywords.kw_import, tok::kw_export))
+ Line.First->isOneOf(Keywords.kw_import, tok::kw_export)) {
return false;
+ }
if (Left.is(tok::ellipsis))
return false;
if (Left.is(TT_TemplateCloser) &&
!Right.isOneOf(tok::equal, tok::l_brace, tok::comma, tok::l_square,
- Keywords.kw_implements, Keywords.kw_extends))
+ Keywords.kw_implements, Keywords.kw_extends)) {
// Type assertions ('<type>expr') are not followed by whitespace. Other
// locations that should have whitespace following are identified by the
// above set of follower tokens.
return false;
+ }
if (Right.is(TT_NonNullAssertion))
return false;
if (Left.is(TT_NonNullAssertion) &&
- Right.isOneOf(Keywords.kw_as, Keywords.kw_in))
+ Right.isOneOf(Keywords.kw_as, Keywords.kw_in)) {
return true; // "x! as string", "x! in y"
+ }
} else if (Style.Language == FormatStyle::LK_Java) {
if (Left.is(tok::r_square) && Right.is(tok::l_brace))
return true;
- if (Left.is(Keywords.kw_synchronized) && Right.is(tok::l_paren))
- return Style.SpaceBeforeParens != FormatStyle::SBPO_Never;
+ // spaces inside square brackets.
+ if (Left.is(tok::l_square) || Right.is(tok::r_square))
+ return Style.SpacesInSquareBrackets;
+
+ if (Left.is(Keywords.kw_synchronized) && Right.is(tok::l_paren)) {
+ return Style.SpaceBeforeParensOptions.AfterControlStatements ||
+ spaceRequiredBeforeParens(Right);
+ }
if ((Left.isOneOf(tok::kw_static, tok::kw_public, tok::kw_private,
tok::kw_protected) ||
Left.isOneOf(Keywords.kw_final, Keywords.kw_abstract,
Keywords.kw_native)) &&
- Right.is(TT_TemplateOpener))
+ Right.is(TT_TemplateOpener)) {
+ return true;
+ }
+ } else if (Style.isVerilog()) {
+ // An escaped identifier ends with whitespace.
+ if (Style.isVerilog() && Left.is(tok::identifier) &&
+ Left.TokenText[0] == '\\') {
+ return true;
+ }
+ // Add space between things in a primitive's state table unless in a
+ // transition like `(0?)`.
+ if ((Left.is(TT_VerilogTableItem) &&
+ !Right.isOneOf(tok::r_paren, tok::semi)) ||
+ (Right.is(TT_VerilogTableItem) && Left.isNot(tok::l_paren))) {
+ const FormatToken *Next = Right.getNextNonComment();
+ return !(Next && Next->is(tok::r_paren));
+ }
+ // Don't add space within a delay like `#0`.
+ if (Left.isNot(TT_BinaryOperator) &&
+ Left.isOneOf(Keywords.kw_verilogHash, Keywords.kw_verilogHashHash)) {
+ return false;
+ }
+ // Add space after a delay.
+ if (Right.isNot(tok::semi) &&
+ (Left.endsSequence(tok::numeric_constant, Keywords.kw_verilogHash) ||
+ Left.endsSequence(tok::numeric_constant,
+ Keywords.kw_verilogHashHash) ||
+ (Left.is(tok::r_paren) && Left.MatchingParen &&
+ Left.MatchingParen->endsSequence(tok::l_paren, tok::at)))) {
return true;
+ }
+ // Don't add embedded spaces in a number literal like `16'h1?ax` or an array
+ // literal like `'{}`.
+ if (Left.is(Keywords.kw_apostrophe) ||
+ (Left.is(TT_VerilogNumberBase) && Right.is(tok::numeric_constant))) {
+ return false;
+ }
+ // Add spaces around the implication operator `->`.
+ if (Left.is(tok::arrow) || Right.is(tok::arrow))
+ return true;
+ // Don't add spaces between two at signs. Like in a coverage event.
+ // Don't add spaces between at and a sensitivity list like
+ // `@(posedge clk)`.
+ if (Left.is(tok::at) && Right.isOneOf(tok::l_paren, tok::star, tok::at))
+ return false;
+ // Add space between the type name and dimension like `logic [1:0]`.
+ if (Right.is(tok::l_square) &&
+ Left.isOneOf(TT_VerilogDimensionedTypeName, Keywords.kw_function)) {
+ return true;
+ }
+ // In a tagged union expression, there should be a space after the tag.
+ if (Right.isOneOf(tok::period, Keywords.kw_apostrophe) &&
+ Keywords.isVerilogIdentifier(Left) && Left.getPreviousNonComment() &&
+ Left.getPreviousNonComment()->is(Keywords.kw_tagged)) {
+ return true;
+ }
+ // Don't add spaces between a casting type and the quote or repetition count
+ // and the brace. The case of tagged union expressions is handled by the
+ // previous rule.
+ if ((Right.is(Keywords.kw_apostrophe) ||
+ (Right.is(BK_BracedInit) && Right.is(tok::l_brace))) &&
+ !(Left.isOneOf(Keywords.kw_assign, Keywords.kw_unique) ||
+ Keywords.isVerilogWordOperator(Left)) &&
+ (Left.isOneOf(tok::r_square, tok::r_paren, tok::r_brace,
+ tok::numeric_constant) ||
+ Keywords.isWordLike(Left))) {
+ return false;
+ }
+ // Don't add spaces in imports like `import foo::*;`.
+ if ((Right.is(tok::star) && Left.is(tok::coloncolon)) ||
+ (Left.is(tok::star) && Right.is(tok::semi))) {
+ return false;
+ }
+ // Add space in attribute like `(* ASYNC_REG = "TRUE" *)`.
+ if (Left.endsSequence(tok::star, tok::l_paren) && Right.is(tok::identifier))
+ return true;
+ // Add space before drive strength like in `wire (strong1, pull0)`.
+ if (Right.is(tok::l_paren) && Right.is(TT_VerilogStrength))
+ return true;
+ // Don't add space in a streaming concatenation like `{>>{j}}`.
+ if ((Left.is(tok::l_brace) &&
+ Right.isOneOf(tok::lessless, tok::greatergreater)) ||
+ (Left.endsSequence(tok::lessless, tok::l_brace) ||
+ Left.endsSequence(tok::greatergreater, tok::l_brace))) {
+ return false;
+ }
}
if (Left.is(TT_ImplicitStringLiteral))
- return HasExistingWhitespace();
+ return Right.hasWhitespaceBefore();
if (Line.Type == LT_ObjCMethodDecl) {
if (Left.is(TT_ObjCMethodSpecifier))
return true;
- if (Left.is(tok::r_paren) && canBeObjCSelectorComponent(Right))
+ if (Left.is(tok::r_paren) && Left.isNot(TT_AttributeRParen) &&
+ canBeObjCSelectorComponent(Right)) {
// Don't space between ')' and <id> or ')' and 'new'. 'new' is not a
// keyword in Objective-C, and '+ (instancetype)new;' is a standard class
// method declaration.
return false;
+ }
}
if (Line.Type == LT_ObjCProperty &&
- (Right.is(tok::equal) || Left.is(tok::equal)))
+ (Right.is(tok::equal) || Left.is(tok::equal))) {
return false;
+ }
- if (Right.isOneOf(TT_TrailingReturnArrow, TT_LambdaArrow) ||
- Left.isOneOf(TT_TrailingReturnArrow, TT_LambdaArrow))
+ if (Right.is(TT_TrailingReturnArrow) || Left.is(TT_TrailingReturnArrow))
return true;
- if (Right.is(TT_OverloadedOperatorLParen))
- return spaceRequiredBeforeParens(Right);
- if (Left.is(tok::comma))
+
+ if (Left.is(tok::comma) && Right.isNot(TT_OverloadedOperatorLParen) &&
+ // In an unexpanded macro call we only find the parentheses and commas
+ // in a line; the commas and closing parenthesis do not require a space.
+ (Left.Children.empty() || !Left.MacroParent)) {
return true;
+ }
if (Right.is(tok::comma))
return false;
if (Right.is(TT_ObjCBlockLParen))
@@ -3453,15 +4805,20 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
if (Right.is(TT_InheritanceColon) && !Style.SpaceBeforeInheritanceColon)
return false;
if (Right.is(TT_RangeBasedForLoopColon) &&
- !Style.SpaceBeforeRangeBasedForLoopColon)
+ !Style.SpaceBeforeRangeBasedForLoopColon) {
return false;
- if (Left.is(TT_BitFieldColon))
+ }
+ if (Left.is(TT_BitFieldColon)) {
return Style.BitFieldColonSpacing == FormatStyle::BFCS_Both ||
Style.BitFieldColonSpacing == FormatStyle::BFCS_After;
+ }
if (Right.is(tok::colon)) {
- if (Line.First->isOneOf(tok::kw_default, tok::kw_case))
+ if (Right.is(TT_CaseLabelColon))
return Style.SpaceBeforeCaseColon;
- if (!Right.getNextNonComment() || Right.getNextNonComment()->is(tok::semi))
+ if (Right.is(TT_GotoLabelColon))
+ return false;
+ // `private:` and `public:`.
+ if (!Right.getNextNonComment())
return false;
if (Right.is(TT_ObjCMethodExpr))
return false;
@@ -3475,19 +4832,23 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
return false;
if (Right.is(TT_CSharpNamedArgumentColon))
return false;
- if (Right.is(TT_BitFieldColon))
+ if (Right.is(TT_GenericSelectionColon))
+ return false;
+ if (Right.is(TT_BitFieldColon)) {
return Style.BitFieldColonSpacing == FormatStyle::BFCS_Both ||
Style.BitFieldColonSpacing == FormatStyle::BFCS_Before;
+ }
return true;
}
// Do not merge "- -" into "--".
if ((Left.isOneOf(tok::minus, tok::minusminus) &&
Right.isOneOf(tok::minus, tok::minusminus)) ||
(Left.isOneOf(tok::plus, tok::plusplus) &&
- Right.isOneOf(tok::plus, tok::plusplus)))
+ Right.isOneOf(tok::plus, tok::plusplus))) {
return true;
+ }
if (Left.is(TT_UnaryOperator)) {
- if (!Right.is(tok::l_paren)) {
+ if (Right.isNot(tok::l_paren)) {
// The alternative operators for ~ and ! are "compl" and "not".
// If they are used instead, we do not want to combine them with
// the token to the right, unless that is a left paren.
@@ -3506,43 +4867,49 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
// If the next token is a binary operator or a selector name, we have
// incorrectly classified the parenthesis as a cast. FIXME: Detect correctly.
- if (Left.is(TT_CastRParen))
+ if (Left.is(TT_CastRParen)) {
return Style.SpaceAfterCStyleCast ||
Right.isOneOf(TT_BinaryOperator, TT_SelectorName);
+ }
- auto ShouldAddSpacesInAngles = [this, &HasExistingWhitespace]() {
+ auto ShouldAddSpacesInAngles = [this, &Right]() {
if (this->Style.SpacesInAngles == FormatStyle::SIAS_Always)
return true;
if (this->Style.SpacesInAngles == FormatStyle::SIAS_Leave)
- return HasExistingWhitespace();
+ return Right.hasWhitespaceBefore();
return false;
};
if (Left.is(tok::greater) && Right.is(tok::greater)) {
if (Style.Language == FormatStyle::LK_TextProto ||
- (Style.Language == FormatStyle::LK_Proto && Left.is(TT_DictLiteral)))
+ (Style.Language == FormatStyle::LK_Proto && Left.is(TT_DictLiteral))) {
return !Style.Cpp11BracedListStyle;
+ }
return Right.is(TT_TemplateCloser) && Left.is(TT_TemplateCloser) &&
((Style.Standard < FormatStyle::LS_Cpp11) ||
ShouldAddSpacesInAngles());
}
if (Right.isOneOf(tok::arrow, tok::arrowstar, tok::periodstar) ||
Left.isOneOf(tok::arrow, tok::period, tok::arrowstar, tok::periodstar) ||
- (Right.is(tok::period) && Right.isNot(TT_DesignatedInitializerPeriod)))
+ (Right.is(tok::period) && Right.isNot(TT_DesignatedInitializerPeriod))) {
return false;
+ }
if (!Style.SpaceBeforeAssignmentOperators && Left.isNot(TT_TemplateCloser) &&
- Right.getPrecedence() == prec::Assignment)
+ Right.getPrecedence() == prec::Assignment) {
return false;
+ }
if (Style.Language == FormatStyle::LK_Java && Right.is(tok::coloncolon) &&
- (Left.is(tok::identifier) || Left.is(tok::kw_this)))
+ (Left.is(tok::identifier) || Left.is(tok::kw_this))) {
return false;
- if (Right.is(tok::coloncolon) && Left.is(tok::identifier))
+ }
+ if (Right.is(tok::coloncolon) && Left.is(tok::identifier)) {
// Generally don't remove existing spaces between an identifier and "::".
// The identifier might actually be a macro name such as ALWAYS_INLINE. If
// this turns out to be too lenient, add analysis of the identifier itself.
- return HasExistingWhitespace();
+ return Right.hasWhitespaceBefore();
+ }
if (Right.is(tok::coloncolon) &&
- !Left.isOneOf(tok::l_brace, tok::comment, tok::l_paren))
+ !Left.isOneOf(tok::l_brace, tok::comment, tok::l_paren)) {
// Put a space between < and :: in vector< ::std::string >
return (Left.is(TT_TemplateOpener) &&
((Style.Standard < FormatStyle::LS_Cpp11) ||
@@ -3550,30 +4917,34 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
!(Left.isOneOf(tok::l_paren, tok::r_paren, tok::l_square,
tok::kw___super, TT_TemplateOpener,
TT_TemplateCloser)) ||
- (Left.is(tok::l_paren) && Style.SpacesInParentheses);
+ (Left.is(tok::l_paren) && Style.SpacesInParensOptions.Other);
+ }
if ((Left.is(TT_TemplateOpener)) != (Right.is(TT_TemplateCloser)))
return ShouldAddSpacesInAngles();
// Space before TT_StructuredBindingLSquare.
- if (Right.is(TT_StructuredBindingLSquare))
+ if (Right.is(TT_StructuredBindingLSquare)) {
return !Left.isOneOf(tok::amp, tok::ampamp) ||
getTokenReferenceAlignment(Left) != FormatStyle::PAS_Right;
+ }
// Space before & or && following a TT_StructuredBindingLSquare.
if (Right.Next && Right.Next->is(TT_StructuredBindingLSquare) &&
- Right.isOneOf(tok::amp, tok::ampamp))
+ Right.isOneOf(tok::amp, tok::ampamp)) {
return getTokenReferenceAlignment(Right) != FormatStyle::PAS_Left;
- if ((Right.is(TT_BinaryOperator) && !Left.is(tok::l_paren)) ||
+ }
+ if ((Right.is(TT_BinaryOperator) && Left.isNot(tok::l_paren)) ||
(Left.isOneOf(TT_BinaryOperator, TT_ConditionalExpr) &&
- !Right.is(tok::r_paren)))
+ Right.isNot(tok::r_paren))) {
return true;
- if (Left.is(TT_TemplateCloser) && Right.is(tok::l_paren) &&
- Right.isNot(TT_FunctionTypeLParen))
- return spaceRequiredBeforeParens(Right);
+ }
if (Right.is(TT_TemplateOpener) && Left.is(tok::r_paren) &&
- Left.MatchingParen && Left.MatchingParen->is(TT_OverloadedOperatorLParen))
+ Left.MatchingParen &&
+ Left.MatchingParen->is(TT_OverloadedOperatorLParen)) {
return false;
+ }
if (Right.is(tok::less) && Left.isNot(tok::l_paren) &&
- Line.startsWith(tok::hash))
+ Line.Type == LT_ImportStatement) {
return true;
+ }
if (Right.is(TT_TrailingUnaryOperator))
return false;
if (Left.is(TT_RegexLiteral))
@@ -3587,7 +4958,7 @@ static bool isAllmanBrace(const FormatToken &Tok) {
!Tok.isOneOf(TT_ObjCBlockLBrace, TT_LambdaLBrace, TT_DictLiteral);
}
-// Returns 'true' if 'Tok' is an function argument.
+// Returns 'true' if 'Tok' is a function argument.
static bool IsFunctionArgument(const FormatToken &Tok) {
return Tok.MatchingParen && Tok.MatchingParen->Next &&
Tok.MatchingParen->Next->isOneOf(tok::comma, tok::r_paren);
@@ -3600,56 +4971,71 @@ isItAnEmptyLambdaAllowed(const FormatToken &Tok,
}
static bool isAllmanLambdaBrace(const FormatToken &Tok) {
- return (Tok.is(tok::l_brace) && Tok.is(BK_Block) &&
- !Tok.isOneOf(TT_ObjCBlockLBrace, TT_DictLiteral));
+ return Tok.is(tok::l_brace) && Tok.is(BK_Block) &&
+ !Tok.isOneOf(TT_ObjCBlockLBrace, TT_DictLiteral);
}
bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
- const FormatToken &Right) {
+ const FormatToken &Right) const {
const FormatToken &Left = *Right.Previous;
if (Right.NewlinesBefore > 1 && Style.MaxEmptyLinesToKeep > 0)
return true;
if (Style.isCSharp()) {
+ if (Left.is(TT_FatArrow) && Right.is(tok::l_brace) &&
+ Style.BraceWrapping.AfterFunction) {
+ return true;
+ }
if (Right.is(TT_CSharpNamedArgumentColon) ||
- Left.is(TT_CSharpNamedArgumentColon))
+ Left.is(TT_CSharpNamedArgumentColon)) {
return false;
+ }
if (Right.is(TT_CSharpGenericTypeConstraint))
return true;
+ if (Right.Next && Right.Next->is(TT_FatArrow) &&
+ (Right.is(tok::numeric_constant) ||
+ (Right.is(tok::identifier) && Right.TokenText == "_"))) {
+ return true;
+ }
// Break after C# [...] and before public/protected/private/internal.
if (Left.is(TT_AttributeSquare) && Left.is(tok::r_square) &&
(Right.isAccessSpecifier(/*ColonRequired=*/false) ||
- Right.is(Keywords.kw_internal)))
+ Right.is(Keywords.kw_internal))) {
return true;
+ }
// Break between ] and [ but only when there are really 2 attributes.
if (Left.is(TT_AttributeSquare) && Right.is(TT_AttributeSquare) &&
- Left.is(tok::r_square) && Right.is(tok::l_square))
+ Left.is(tok::r_square) && Right.is(tok::l_square)) {
return true;
+ }
- } else if (Style.Language == FormatStyle::LK_JavaScript) {
+ } else if (Style.isJavaScript()) {
// FIXME: This might apply to other languages and token kinds.
if (Right.is(tok::string_literal) && Left.is(tok::plus) && Left.Previous &&
- Left.Previous->is(tok::string_literal))
+ Left.Previous->is(tok::string_literal)) {
return true;
+ }
if (Left.is(TT_DictLiteral) && Left.is(tok::l_brace) && Line.Level == 0 &&
Left.Previous && Left.Previous->is(tok::equal) &&
Line.First->isOneOf(tok::identifier, Keywords.kw_import, tok::kw_export,
tok::kw_const) &&
// kw_var/kw_let are pseudo-tokens that are tok::identifier, so match
// above.
- !Line.First->isOneOf(Keywords.kw_var, Keywords.kw_let))
+ !Line.First->isOneOf(Keywords.kw_var, Keywords.kw_let)) {
// Object literals on the top level of a file are treated as "enum-style".
// Each key/value pair is put on a separate line, instead of bin-packing.
return true;
+ }
if (Left.is(tok::l_brace) && Line.Level == 0 &&
(Line.startsWith(tok::kw_enum) ||
Line.startsWith(tok::kw_const, tok::kw_enum) ||
Line.startsWith(tok::kw_export, tok::kw_enum) ||
- Line.startsWith(tok::kw_export, tok::kw_const, tok::kw_enum)))
+ Line.startsWith(tok::kw_export, tok::kw_const, tok::kw_enum))) {
// JavaScript top-level enum key/value pairs are put on separate lines
// instead of bin-packing.
return true;
+ }
if (Right.is(tok::r_brace) && Left.is(tok::l_brace) && Left.Previous &&
Left.Previous->is(TT_FatArrow)) {
// JS arrow function (=> {...}).
@@ -3670,22 +5056,42 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
}
if (Right.is(tok::r_brace) && Left.is(tok::l_brace) &&
- !Left.Children.empty())
+ !Left.Children.empty()) {
// Support AllowShortFunctionsOnASingleLine for JavaScript.
return Style.AllowShortFunctionsOnASingleLine == FormatStyle::SFS_None ||
Style.AllowShortFunctionsOnASingleLine == FormatStyle::SFS_Empty ||
(Left.NestingLevel == 0 && Line.Level == 0 &&
Style.AllowShortFunctionsOnASingleLine &
FormatStyle::SFS_InlineOnly);
+ }
} else if (Style.Language == FormatStyle::LK_Java) {
if (Right.is(tok::plus) && Left.is(tok::string_literal) && Right.Next &&
- Right.Next->is(tok::string_literal))
+ Right.Next->is(tok::string_literal)) {
+ return true;
+ }
+ } else if (Style.isVerilog()) {
+ // Break between assignments.
+ if (Left.is(TT_VerilogAssignComma))
return true;
- } else if (Style.Language == FormatStyle::LK_Cpp ||
- Style.Language == FormatStyle::LK_ObjC ||
- Style.Language == FormatStyle::LK_Proto ||
- Style.Language == FormatStyle::LK_TableGen ||
- Style.Language == FormatStyle::LK_TextProto) {
+ // Break between ports of different types.
+ if (Left.is(TT_VerilogTypeComma))
+ return true;
+ // Break between ports in a module instantiation and after the parameter
+ // list.
+ if (Style.VerilogBreakBetweenInstancePorts &&
+ (Left.is(TT_VerilogInstancePortComma) ||
+ (Left.is(tok::r_paren) && Keywords.isVerilogIdentifier(Right) &&
+ Left.MatchingParen &&
+ Left.MatchingParen->is(TT_VerilogInstancePortLParen)))) {
+ return true;
+ }
+ // Break after labels. In Verilog labels don't have the 'case' keyword, so
+ // it is hard to identify them in UnwrappedLineParser.
+ if (!Keywords.isVerilogBegin(Right) && Keywords.isVerilogEndOfLabel(Left))
+ return true;
+ } else if (Style.BreakAdjacentStringLiterals &&
+ (Style.isCpp() || Style.isProto() ||
+ Style.Language == FormatStyle::LK_TableGen)) {
if (Left.isStringLiteral() && Right.isStringLiteral())
return true;
}
@@ -3697,17 +5103,27 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
// }
if (Left.is(TT_DictLiteral) && Left.is(tok::l_brace))
return true;
- // Always break after a JSON array opener.
- // [
- // ]
- if (Left.is(TT_ArrayInitializerLSquare) && Left.is(tok::l_square) &&
- !Right.is(tok::r_square))
- return true;
- // Always break afer successive entries.
- // 1,
- // 2
- if (Left.is(tok::comma))
- return true;
+ // Always break after a JSON array opener based on BreakArrays.
+ if ((Left.is(TT_ArrayInitializerLSquare) && Left.is(tok::l_square) &&
+ Right.isNot(tok::r_square)) ||
+ Left.is(tok::comma)) {
+ if (Right.is(tok::l_brace))
+ return true;
+ // scan to the right if an we see an object or an array inside
+ // then break.
+ for (const auto *Tok = &Right; Tok; Tok = Tok->Next) {
+ if (Tok->isOneOf(tok::l_brace, tok::l_square))
+ return true;
+ if (Tok->isOneOf(tok::r_brace, tok::r_square))
+ break;
+ }
+ return Style.BreakArrays;
+ }
+ }
+
+ if (Line.startsWith(tok::kw_asm) && Right.is(TT_InlineASMColon) &&
+ Style.BreakBeforeInlineASMColon == FormatStyle::BBIAS_Always) {
+ return true;
}
// If the last token before a '}', ']', or ')' is a comma or a trailing
@@ -3717,66 +5133,112 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
if (Style.JavaScriptWrapImports || Line.Type != LT_ImportStatement) {
const FormatToken *BeforeClosingBrace = nullptr;
if ((Left.isOneOf(tok::l_brace, TT_ArrayInitializerLSquare) ||
- (Style.Language == FormatStyle::LK_JavaScript &&
- Left.is(tok::l_paren))) &&
- Left.isNot(BK_Block) && Left.MatchingParen)
+ (Style.isJavaScript() && Left.is(tok::l_paren))) &&
+ Left.isNot(BK_Block) && Left.MatchingParen) {
BeforeClosingBrace = Left.MatchingParen->Previous;
- else if (Right.MatchingParen &&
- (Right.MatchingParen->isOneOf(tok::l_brace,
- TT_ArrayInitializerLSquare) ||
- (Style.Language == FormatStyle::LK_JavaScript &&
- Right.MatchingParen->is(tok::l_paren))))
+ } else if (Right.MatchingParen &&
+ (Right.MatchingParen->isOneOf(tok::l_brace,
+ TT_ArrayInitializerLSquare) ||
+ (Style.isJavaScript() &&
+ Right.MatchingParen->is(tok::l_paren)))) {
BeforeClosingBrace = &Left;
+ }
if (BeforeClosingBrace && (BeforeClosingBrace->is(tok::comma) ||
- BeforeClosingBrace->isTrailingComment()))
+ BeforeClosingBrace->isTrailingComment())) {
return true;
+ }
}
- if (Right.is(tok::comment))
+ if (Right.is(tok::comment)) {
return Left.isNot(BK_BracedInit) && Left.isNot(TT_CtorInitializerColon) &&
(Right.NewlinesBefore > 0 && Right.HasUnescapedNewline);
+ }
if (Left.isTrailingComment())
return true;
- if (Right.Previous->IsUnterminatedLiteral)
+ if (Left.IsUnterminatedLiteral)
return true;
+ // FIXME: Breaking after newlines seems useful in general. Turn this into an
+ // option and recognize more cases like endl etc, and break independent of
+ // what comes after operator lessless.
if (Right.is(tok::lessless) && Right.Next &&
- Right.Previous->is(tok::string_literal) &&
- Right.Next->is(tok::string_literal))
+ Right.Next->is(tok::string_literal) && Left.is(tok::string_literal) &&
+ Left.TokenText.ends_with("\\n\"")) {
return true;
+ }
+ if (Right.is(TT_RequiresClause)) {
+ switch (Style.RequiresClausePosition) {
+ case FormatStyle::RCPS_OwnLine:
+ case FormatStyle::RCPS_WithFollowing:
+ return true;
+ default:
+ break;
+ }
+ }
// Can break after template<> declaration
- if (Right.Previous->ClosesTemplateDeclaration &&
- Right.Previous->MatchingParen &&
- Right.Previous->MatchingParen->NestingLevel == 0) {
+ if (Left.ClosesTemplateDeclaration && Left.MatchingParen &&
+ Left.MatchingParen->NestingLevel == 0) {
// Put concepts on the next line e.g.
// template<typename T>
// concept ...
if (Right.is(tok::kw_concept))
- return Style.BreakBeforeConceptDeclarations;
- return (Style.AlwaysBreakTemplateDeclarations == FormatStyle::BTDS_Yes);
+ return Style.BreakBeforeConceptDeclarations == FormatStyle::BBCDS_Always;
+ return Style.AlwaysBreakTemplateDeclarations == FormatStyle::BTDS_Yes;
}
- if (Right.is(TT_CtorInitializerComma) &&
- Style.BreakConstructorInitializers == FormatStyle::BCIS_BeforeComma &&
- !Style.ConstructorInitializerAllOnOneLineOrOnePerLine)
- return true;
- if (Right.is(TT_CtorInitializerColon) &&
+ if (Left.ClosesRequiresClause && Right.isNot(tok::semi)) {
+ switch (Style.RequiresClausePosition) {
+ case FormatStyle::RCPS_OwnLine:
+ case FormatStyle::RCPS_WithPreceding:
+ return true;
+ default:
+ break;
+ }
+ }
+ if (Style.PackConstructorInitializers == FormatStyle::PCIS_Never) {
+ if (Style.BreakConstructorInitializers == FormatStyle::BCIS_BeforeColon &&
+ (Left.is(TT_CtorInitializerComma) ||
+ Right.is(TT_CtorInitializerColon))) {
+ return true;
+ }
+
+ if (Style.BreakConstructorInitializers == FormatStyle::BCIS_AfterColon &&
+ Left.isOneOf(TT_CtorInitializerColon, TT_CtorInitializerComma)) {
+ return true;
+ }
+ }
+ if (Style.PackConstructorInitializers < FormatStyle::PCIS_CurrentLine &&
Style.BreakConstructorInitializers == FormatStyle::BCIS_BeforeComma &&
- !Style.ConstructorInitializerAllOnOneLineOrOnePerLine)
+ Right.isOneOf(TT_CtorInitializerComma, TT_CtorInitializerColon)) {
return true;
+ }
+ if (Style.PackConstructorInitializers == FormatStyle::PCIS_NextLineOnly) {
+ if ((Style.BreakConstructorInitializers == FormatStyle::BCIS_BeforeColon ||
+ Style.BreakConstructorInitializers == FormatStyle::BCIS_BeforeComma) &&
+ Right.is(TT_CtorInitializerColon)) {
+ return true;
+ }
+
+ if (Style.BreakConstructorInitializers == FormatStyle::BCIS_AfterColon &&
+ Left.is(TT_CtorInitializerColon)) {
+ return true;
+ }
+ }
// Break only if we have multiple inheritance.
if (Style.BreakInheritanceList == FormatStyle::BILS_BeforeComma &&
- Right.is(TT_InheritanceComma))
+ Right.is(TT_InheritanceComma)) {
return true;
+ }
if (Style.BreakInheritanceList == FormatStyle::BILS_AfterComma &&
- Left.is(TT_InheritanceComma))
+ Left.is(TT_InheritanceComma)) {
return true;
- if (Right.is(tok::string_literal) && Right.TokenText.startswith("R\""))
+ }
+ if (Right.is(tok::string_literal) && Right.TokenText.starts_with("R\"")) {
// Multiline raw string literals are special wrt. line breaks. The author
// has made a deliberate choice and might have aligned the contents of the
// string literal accordingly. Thus, we try keep existing line breaks.
return Right.IsMultiline && Right.NewlinesBefore > 0;
- if ((Right.Previous->is(tok::l_brace) ||
- (Right.Previous->is(tok::less) && Right.Previous->Previous &&
- Right.Previous->Previous->is(tok::equal))) &&
+ }
+ if ((Left.is(tok::l_brace) || (Left.is(tok::less) && Left.Previous &&
+ Left.Previous->is(tok::equal))) &&
Right.NestingLevel == 1 && Style.Language == FormatStyle::LK_Proto) {
// Don't put enums or option definitions onto single lines in protocol
// buffers.
@@ -3785,40 +5247,79 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
if (Right.is(TT_InlineASMBrace))
return Right.HasUnescapedNewline;
- if (isAllmanBrace(Left) || isAllmanBrace(Right))
- return (Line.startsWith(tok::kw_enum) && Style.BraceWrapping.AfterEnum) ||
- (Line.startsWith(tok::kw_typedef, tok::kw_enum) &&
- Style.BraceWrapping.AfterEnum) ||
- (Line.startsWith(tok::kw_class) && Style.BraceWrapping.AfterClass) ||
- (Line.startsWith(tok::kw_struct) && Style.BraceWrapping.AfterStruct);
+ if (isAllmanBrace(Left) || isAllmanBrace(Right)) {
+ auto *FirstNonComment = Line.getFirstNonComment();
+ bool AccessSpecifier =
+ FirstNonComment &&
+ FirstNonComment->isOneOf(Keywords.kw_internal, tok::kw_public,
+ tok::kw_private, tok::kw_protected);
+
+ if (Style.BraceWrapping.AfterEnum) {
+ if (Line.startsWith(tok::kw_enum) ||
+ Line.startsWith(tok::kw_typedef, tok::kw_enum)) {
+ return true;
+ }
+ // Ensure BraceWrapping for `public enum A {`.
+ if (AccessSpecifier && FirstNonComment->Next &&
+ FirstNonComment->Next->is(tok::kw_enum)) {
+ return true;
+ }
+ }
+
+ // Ensure BraceWrapping for `public interface A {`.
+ if (Style.BraceWrapping.AfterClass &&
+ ((AccessSpecifier && FirstNonComment->Next &&
+ FirstNonComment->Next->is(Keywords.kw_interface)) ||
+ Line.startsWith(Keywords.kw_interface))) {
+ return true;
+ }
+
+ // Don't attempt to interpret struct return types as structs.
+ if (Right.isNot(TT_FunctionLBrace)) {
+ return (Line.startsWith(tok::kw_class) &&
+ Style.BraceWrapping.AfterClass) ||
+ (Line.startsWith(tok::kw_struct) &&
+ Style.BraceWrapping.AfterStruct);
+ }
+ }
+
if (Left.is(TT_ObjCBlockLBrace) &&
- Style.AllowShortBlocksOnASingleLine == FormatStyle::SBS_Never)
+ Style.AllowShortBlocksOnASingleLine == FormatStyle::SBS_Never) {
+ return true;
+ }
+
+ // Ensure wrapping after __attribute__((XX)) and @interface etc.
+ if (Left.isOneOf(TT_AttributeRParen, TT_AttributeMacro) &&
+ Right.is(TT_ObjCDecl)) {
return true;
+ }
if (Left.is(TT_LambdaLBrace)) {
if (IsFunctionArgument(Left) &&
- Style.AllowShortLambdasOnASingleLine == FormatStyle::SLS_Inline)
+ Style.AllowShortLambdasOnASingleLine == FormatStyle::SLS_Inline) {
return false;
+ }
if (Style.AllowShortLambdasOnASingleLine == FormatStyle::SLS_None ||
Style.AllowShortLambdasOnASingleLine == FormatStyle::SLS_Inline ||
(!Left.Children.empty() &&
- Style.AllowShortLambdasOnASingleLine == FormatStyle::SLS_Empty))
+ Style.AllowShortLambdasOnASingleLine == FormatStyle::SLS_Empty)) {
return true;
+ }
}
if (Style.BraceWrapping.BeforeLambdaBody && Right.is(TT_LambdaLBrace) &&
- Left.isOneOf(tok::star, tok::amp, tok::ampamp, TT_TemplateCloser)) {
+ (Left.isPointerOrReference() || Left.is(TT_TemplateCloser))) {
return true;
}
// Put multiple Java annotation on a new line.
- if ((Style.Language == FormatStyle::LK_Java ||
- Style.Language == FormatStyle::LK_JavaScript) &&
+ if ((Style.Language == FormatStyle::LK_Java || Style.isJavaScript()) &&
Left.is(TT_LeadingJavaAnnotation) &&
Right.isNot(TT_LeadingJavaAnnotation) && Right.isNot(tok::l_paren) &&
- (Line.Last->is(tok::l_brace) || Style.BreakAfterJavaFieldAnnotations))
+ (Line.Last->is(tok::l_brace) || Style.BreakAfterJavaFieldAnnotations)) {
return true;
+ }
if (Right.is(TT_ProtoExtensionLSquare))
return true;
@@ -3850,12 +5351,11 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
// together.
//
// We ensure elsewhere that extensions are always on their own line.
- if ((Style.Language == FormatStyle::LK_Proto ||
- Style.Language == FormatStyle::LK_TextProto) &&
- Right.is(TT_SelectorName) && !Right.is(tok::r_square) && Right.Next) {
+ if (Style.isProto() && Right.is(TT_SelectorName) &&
+ Right.isNot(tok::r_square) && Right.Next) {
// Keep `@submessage` together in:
// @submessage { key: value }
- if (Right.Previous && Right.Previous->is(tok::at))
+ if (Left.is(tok::at))
return false;
// Look for the scope opener after selector in cases like:
// selector { ...
@@ -3908,73 +5408,55 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
return true;
}
- // Deal with lambda arguments in C++ - we want consistent line breaks whether
- // they happen to be at arg0, arg1 or argN. The selection is a bit nuanced
- // as aggressive line breaks are placed when the lambda is not the last arg.
- if ((Style.Language == FormatStyle::LK_Cpp ||
- Style.Language == FormatStyle::LK_ObjC) &&
- Left.is(tok::l_paren) && Left.BlockParameterCount > 0 &&
- !Right.isOneOf(tok::l_paren, TT_LambdaLSquare)) {
- // Multiple lambdas in the same function call force line breaks.
- if (Left.BlockParameterCount > 1)
- return true;
-
- // A lambda followed by another arg forces a line break.
- if (!Left.Role)
- return false;
- auto Comma = Left.Role->lastComma();
- if (!Comma)
- return false;
- auto Next = Comma->getNextNonComment();
- if (!Next)
- return false;
- if (!Next->isOneOf(TT_LambdaLSquare, tok::l_brace, tok::caret))
- return true;
- }
-
return false;
}
bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
- const FormatToken &Right) {
+ const FormatToken &Right) const {
const FormatToken &Left = *Right.Previous;
// Language-specific stuff.
if (Style.isCSharp()) {
if (Left.isOneOf(TT_CSharpNamedArgumentColon, TT_AttributeColon) ||
- Right.isOneOf(TT_CSharpNamedArgumentColon, TT_AttributeColon))
+ Right.isOneOf(TT_CSharpNamedArgumentColon, TT_AttributeColon)) {
return false;
+ }
// Only break after commas for generic type constraints.
if (Line.First->is(TT_CSharpGenericTypeConstraint))
return Left.is(TT_CSharpGenericTypeConstraintComma);
// Keep nullable operators attached to their identifiers.
- if (Right.is(TT_CSharpNullable)) {
+ if (Right.is(TT_CSharpNullable))
return false;
- }
} else if (Style.Language == FormatStyle::LK_Java) {
if (Left.isOneOf(Keywords.kw_throws, Keywords.kw_extends,
- Keywords.kw_implements))
+ Keywords.kw_implements)) {
return false;
+ }
if (Right.isOneOf(Keywords.kw_throws, Keywords.kw_extends,
- Keywords.kw_implements))
+ Keywords.kw_implements)) {
return true;
- } else if (Style.Language == FormatStyle::LK_JavaScript) {
+ }
+ } else if (Style.isJavaScript()) {
const FormatToken *NonComment = Right.getPreviousNonComment();
if (NonComment &&
NonComment->isOneOf(
tok::kw_return, Keywords.kw_yield, tok::kw_continue, tok::kw_break,
tok::kw_throw, Keywords.kw_interface, Keywords.kw_type,
tok::kw_static, tok::kw_public, tok::kw_private, tok::kw_protected,
- Keywords.kw_readonly, Keywords.kw_abstract, Keywords.kw_get,
- Keywords.kw_set, Keywords.kw_async, Keywords.kw_await))
+ Keywords.kw_readonly, Keywords.kw_override, Keywords.kw_abstract,
+ Keywords.kw_get, Keywords.kw_set, Keywords.kw_async,
+ Keywords.kw_await)) {
return false; // Otherwise automatic semicolon insertion would trigger.
+ }
if (Right.NestingLevel == 0 &&
(Left.Tok.getIdentifierInfo() ||
Left.isOneOf(tok::r_square, tok::r_paren)) &&
- Right.isOneOf(tok::l_square, tok::l_paren))
+ Right.isOneOf(tok::l_square, tok::l_paren)) {
return false; // Otherwise automatic semicolon insertion would trigger.
+ }
if (NonComment && NonComment->is(tok::identifier) &&
- NonComment->TokenText == "asserts")
+ NonComment->TokenText == "asserts") {
return false;
+ }
if (Left.is(TT_FatArrow) && Right.is(tok::l_brace))
return false;
if (Left.is(TT_JsTypeColon))
@@ -3994,7 +5476,7 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
// is: 'name',
// ...
// });
- if (!Next || !Next->is(tok::colon))
+ if (!Next || Next->isNot(tok::colon))
return false;
}
if (Left.is(Keywords.kw_in))
@@ -4017,20 +5499,21 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
Right.isOneOf(Keywords.kw_module, tok::kw_namespace,
Keywords.kw_function, tok::kw_class, tok::kw_enum,
Keywords.kw_interface, Keywords.kw_type, Keywords.kw_var,
- Keywords.kw_let, tok::kw_const))
+ Keywords.kw_let, tok::kw_const)) {
// See grammar for 'declare' statements at:
- // https://github.com/Microsoft/TypeScript/blob/master/doc/spec.md#A.10
+ // https://github.com/Microsoft/TypeScript/blob/main/doc/spec-ARCHIVED.md#A.10
return false;
+ }
if (Left.isOneOf(Keywords.kw_module, tok::kw_namespace) &&
- Right.isOneOf(tok::identifier, tok::string_literal))
+ Right.isOneOf(tok::identifier, tok::string_literal)) {
return false; // must not break in "module foo { ...}"
+ }
if (Right.is(TT_TemplateString) && Right.closesScope())
return false;
// Don't split tagged template literal so there is a break between the tag
// identifier and template string.
- if (Left.is(tok::identifier) && Right.is(TT_TemplateString)) {
+ if (Left.is(tok::identifier) && Right.is(TT_TemplateString))
return false;
- }
if (Left.is(TT_TemplateString) && Left.opensScope())
return true;
}
@@ -4040,26 +5523,29 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
if (Left.Tok.getObjCKeywordID() == tok::objc_interface)
return false;
if (Left.isOneOf(TT_JavaAnnotation, TT_LeadingJavaAnnotation))
- return !Right.is(tok::l_paren);
- if (Right.is(TT_PointerOrReference))
+ return Right.isNot(tok::l_paren);
+ if (Right.is(TT_PointerOrReference)) {
return Line.IsMultiVariableDeclStmt ||
(getTokenPointerOrReferenceAlignment(Right) ==
FormatStyle::PAS_Right &&
(!Right.Next || Right.Next->isNot(TT_FunctionDeclarationName)));
+ }
if (Right.isOneOf(TT_StartOfName, TT_FunctionDeclarationName) ||
- Right.is(tok::kw_operator))
+ Right.is(tok::kw_operator)) {
return true;
+ }
if (Left.is(TT_PointerOrReference))
return false;
- if (Right.isTrailingComment())
+ if (Right.isTrailingComment()) {
// We rely on MustBreakBefore being set correctly here as we should not
// change the "binding" behavior of a comment.
// The first comment in a braced lists is always interpreted as belonging to
// the first list element. Otherwise, it should be placed outside of the
// list.
return Left.is(BK_BracedInit) ||
- (Left.is(TT_CtorInitializerColon) &&
+ (Left.is(TT_CtorInitializerColon) && Right.NewlinesBefore > 0 &&
Style.BreakConstructorInitializers == FormatStyle::BCIS_AfterColon);
+ }
if (Left.is(tok::question) && Right.is(tok::colon))
return false;
if (Right.is(TT_ConditionalExpr) || Right.is(tok::question))
@@ -4070,16 +5556,17 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
return Style.BreakInheritanceList == FormatStyle::BILS_AfterColon;
if (Right.is(TT_InheritanceColon))
return Style.BreakInheritanceList != FormatStyle::BILS_AfterColon;
- if (Right.is(TT_ObjCMethodExpr) && !Right.is(tok::r_square) &&
- Left.isNot(TT_SelectorName))
+ if (Right.is(TT_ObjCMethodExpr) && Right.isNot(tok::r_square) &&
+ Left.isNot(TT_SelectorName)) {
return true;
+ }
if (Right.is(tok::colon) &&
- !Right.isOneOf(TT_CtorInitializerColon, TT_InlineASMColon))
+ !Right.isOneOf(TT_CtorInitializerColon, TT_InlineASMColon)) {
return false;
+ }
if (Left.is(tok::colon) && Left.isOneOf(TT_DictLiteral, TT_ObjCMethodExpr)) {
- if (Style.Language == FormatStyle::LK_Proto ||
- Style.Language == FormatStyle::LK_TextProto) {
+ if (Style.isProto()) {
if (!Style.AlwaysBreakBeforeMultilineStrings && Right.isStringLiteral())
return false;
// Prevent cases like:
@@ -4096,7 +5583,7 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
//
// instead, even if it is longer by one line.
//
- // Note that this allows allows the "{" to go over the column limit
+ // Note that this allows the "{" to go over the column limit
// when the column limit is just between ":" and "{", but that does
// not happen too often and alternative formattings in this case are
// not much better.
@@ -4108,68 +5595,114 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
// repeated: [ ... ]
if (((Right.is(tok::l_brace) || Right.is(tok::less)) &&
Right.is(TT_DictLiteral)) ||
- Right.is(TT_ArrayInitializerLSquare))
+ Right.is(TT_ArrayInitializerLSquare)) {
return false;
+ }
}
return true;
}
if (Right.is(tok::r_square) && Right.MatchingParen &&
- Right.MatchingParen->is(TT_ProtoExtensionLSquare))
+ Right.MatchingParen->is(TT_ProtoExtensionLSquare)) {
return false;
+ }
if (Right.is(TT_SelectorName) || (Right.is(tok::identifier) && Right.Next &&
- Right.Next->is(TT_ObjCMethodExpr)))
+ Right.Next->is(TT_ObjCMethodExpr))) {
return Left.isNot(tok::period); // FIXME: Properly parse ObjC calls.
+ }
if (Left.is(tok::r_paren) && Line.Type == LT_ObjCProperty)
return true;
+ if (Right.is(tok::kw_concept))
+ return Style.BreakBeforeConceptDeclarations != FormatStyle::BBCDS_Never;
+ if (Right.is(TT_RequiresClause))
+ return true;
if (Left.ClosesTemplateDeclaration || Left.is(TT_FunctionAnnotationRParen))
return true;
+ if (Left.ClosesRequiresClause)
+ return true;
if (Right.isOneOf(TT_RangeBasedForLoopColon, TT_OverloadedOperatorLParen,
- TT_OverloadedOperator))
+ TT_OverloadedOperator)) {
return false;
+ }
if (Left.is(TT_RangeBasedForLoopColon))
return true;
if (Right.is(TT_RangeBasedForLoopColon))
return false;
if (Left.is(TT_TemplateCloser) && Right.is(TT_TemplateOpener))
return true;
+ if ((Left.is(tok::greater) && Right.is(tok::greater)) ||
+ (Left.is(tok::less) && Right.is(tok::less))) {
+ return false;
+ }
+ if (Right.is(TT_BinaryOperator) &&
+ Style.BreakBeforeBinaryOperators != FormatStyle::BOS_None &&
+ (Style.BreakBeforeBinaryOperators == FormatStyle::BOS_All ||
+ Right.getPrecedence() != prec::Assignment)) {
+ return true;
+ }
if (Left.isOneOf(TT_TemplateCloser, TT_UnaryOperator) ||
- Left.is(tok::kw_operator))
+ Left.is(tok::kw_operator)) {
return false;
+ }
if (Left.is(tok::equal) && !Right.isOneOf(tok::kw_default, tok::kw_delete) &&
- Line.Type == LT_VirtualFunctionDecl && Left.NestingLevel == 0)
+ Line.Type == LT_VirtualFunctionDecl && Left.NestingLevel == 0) {
return false;
+ }
if (Left.is(tok::equal) && Right.is(tok::l_brace) &&
- !Style.Cpp11BracedListStyle)
+ !Style.Cpp11BracedListStyle) {
return false;
- if (Left.is(tok::l_paren) &&
- Left.isOneOf(TT_AttributeParen, TT_TypeDeclarationParen))
+ }
+ if (Left.is(TT_AttributeLParen) ||
+ (Left.is(tok::l_paren) && Left.is(TT_TypeDeclarationParen))) {
return false;
+ }
if (Left.is(tok::l_paren) && Left.Previous &&
- (Left.Previous->isOneOf(TT_BinaryOperator, TT_CastRParen)))
+ (Left.Previous->isOneOf(TT_BinaryOperator, TT_CastRParen))) {
return false;
+ }
if (Right.is(TT_ImplicitStringLiteral))
return false;
- if (Right.is(tok::r_paren) || Right.is(TT_TemplateCloser))
+ if (Right.is(TT_TemplateCloser))
return false;
if (Right.is(tok::r_square) && Right.MatchingParen &&
- Right.MatchingParen->is(TT_LambdaLSquare))
+ Right.MatchingParen->is(TT_LambdaLSquare)) {
return false;
+ }
// We only break before r_brace if there was a corresponding break before
// the l_brace, which is tracked by BreakBeforeClosingBrace.
- if (Right.is(tok::r_brace))
- return Right.MatchingParen && Right.MatchingParen->is(BK_Block);
+ if (Right.is(tok::r_brace)) {
+ return Right.MatchingParen && (Right.MatchingParen->is(BK_Block) ||
+ (Right.isBlockIndentedInitRBrace(Style)));
+ }
+
+ // We only break before r_paren if we're in a block indented context.
+ if (Right.is(tok::r_paren)) {
+ if (Style.AlignAfterOpenBracket != FormatStyle::BAS_BlockIndent ||
+ !Right.MatchingParen) {
+ return false;
+ }
+ auto Next = Right.Next;
+ if (Next && Next->is(tok::r_paren))
+ Next = Next->Next;
+ if (Next && Next->is(tok::l_paren))
+ return false;
+ const FormatToken *Previous = Right.MatchingParen->Previous;
+ return !(Previous && (Previous->is(tok::kw_for) || Previous->isIf()));
+ }
// Allow breaking after a trailing annotation, e.g. after a method
// declaration.
- if (Left.is(TT_TrailingAnnotation))
+ if (Left.is(TT_TrailingAnnotation)) {
return !Right.isOneOf(tok::l_brace, tok::semi, tok::equal, tok::l_paren,
tok::less, tok::coloncolon);
+ }
- if (Right.is(tok::kw___attribute) ||
- (Right.is(tok::l_square) && Right.is(TT_AttributeSquare)))
- return !Left.is(TT_AttributeSquare);
+ if (Right.isAttribute())
+ return true;
+
+ if (Right.is(tok::l_square) && Right.is(TT_AttributeSquare))
+ return Left.isNot(TT_AttributeSquare);
if (Left.is(tok::identifier) && Right.is(tok::string_literal))
return true;
@@ -4177,30 +5710,28 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
if (Right.is(tok::identifier) && Right.Next && Right.Next->is(TT_DictLiteral))
return true;
- if (Left.is(TT_CtorInitializerColon))
- return Style.BreakConstructorInitializers == FormatStyle::BCIS_AfterColon;
+ if (Left.is(TT_CtorInitializerColon)) {
+ return Style.BreakConstructorInitializers == FormatStyle::BCIS_AfterColon &&
+ (!Right.isTrailingComment() || Right.NewlinesBefore > 0);
+ }
if (Right.is(TT_CtorInitializerColon))
return Style.BreakConstructorInitializers != FormatStyle::BCIS_AfterColon;
if (Left.is(TT_CtorInitializerComma) &&
- Style.BreakConstructorInitializers == FormatStyle::BCIS_BeforeComma)
+ Style.BreakConstructorInitializers == FormatStyle::BCIS_BeforeComma) {
return false;
+ }
if (Right.is(TT_CtorInitializerComma) &&
- Style.BreakConstructorInitializers == FormatStyle::BCIS_BeforeComma)
+ Style.BreakConstructorInitializers == FormatStyle::BCIS_BeforeComma) {
return true;
+ }
if (Left.is(TT_InheritanceComma) &&
- Style.BreakInheritanceList == FormatStyle::BILS_BeforeComma)
+ Style.BreakInheritanceList == FormatStyle::BILS_BeforeComma) {
return false;
+ }
if (Right.is(TT_InheritanceComma) &&
- Style.BreakInheritanceList == FormatStyle::BILS_BeforeComma)
- return true;
- if ((Left.is(tok::greater) && Right.is(tok::greater)) ||
- (Left.is(tok::less) && Right.is(tok::less)))
- return false;
- if (Right.is(TT_BinaryOperator) &&
- Style.BreakBeforeBinaryOperators != FormatStyle::BOS_None &&
- (Style.BreakBeforeBinaryOperators == FormatStyle::BOS_All ||
- Right.getPrecedence() != prec::Assignment))
+ Style.BreakInheritanceList == FormatStyle::BILS_BeforeComma) {
return true;
+ }
if (Left.is(TT_ArrayInitializerLSquare))
return true;
if (Right.is(tok::kw_typename) && Left.isNot(tok::kw_const))
@@ -4209,11 +5740,13 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
!Left.isOneOf(tok::arrowstar, tok::lessless) &&
Style.BreakBeforeBinaryOperators != FormatStyle::BOS_All &&
(Style.BreakBeforeBinaryOperators == FormatStyle::BOS_None ||
- Left.getPrecedence() == prec::Assignment))
+ Left.getPrecedence() == prec::Assignment)) {
return true;
+ }
if ((Left.is(TT_AttributeSquare) && Right.is(tok::l_square)) ||
- (Left.is(tok::r_square) && Right.is(TT_AttributeSquare)))
+ (Left.is(tok::r_square) && Right.is(TT_AttributeSquare))) {
return false;
+ }
auto ShortLambdaOption = Style.AllowShortLambdasOnASingleLine;
if (Style.BraceWrapping.BeforeLambdaBody && Right.is(TT_LambdaLBrace)) {
@@ -4223,19 +5756,32 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
return !isItAnEmptyLambdaAllowed(Right, ShortLambdaOption);
}
+ if (Right.is(tok::kw_noexcept) && Right.is(TT_TrailingAnnotation)) {
+ switch (Style.AllowBreakBeforeNoexceptSpecifier) {
+ case FormatStyle::BBNSS_Never:
+ return false;
+ case FormatStyle::BBNSS_Always:
+ return true;
+ case FormatStyle::BBNSS_OnlyWithParen:
+ return Right.Next && Right.Next->is(tok::l_paren);
+ }
+ }
+
return Left.isOneOf(tok::comma, tok::coloncolon, tok::semi, tok::l_brace,
tok::kw_class, tok::kw_struct, tok::comment) ||
Right.isMemberAccess() ||
- Right.isOneOf(TT_TrailingReturnArrow, TT_LambdaArrow, tok::lessless,
- tok::colon, tok::l_square, tok::at) ||
+ Right.isOneOf(TT_TrailingReturnArrow, tok::lessless, tok::colon,
+ tok::l_square, tok::at) ||
(Left.is(tok::r_paren) &&
Right.isOneOf(tok::identifier, tok::kw_const)) ||
- (Left.is(tok::l_paren) && !Right.is(tok::r_paren)) ||
- (Left.is(TT_TemplateOpener) && !Right.is(TT_TemplateCloser));
+ (Left.is(tok::l_paren) && Right.isNot(tok::r_paren)) ||
+ (Left.is(TT_TemplateOpener) && Right.isNot(TT_TemplateCloser));
}
-void TokenAnnotator::printDebugInfo(const AnnotatedLine &Line) {
- llvm::errs() << "AnnotatedTokens(L=" << Line.Level << "):\n";
+void TokenAnnotator::printDebugInfo(const AnnotatedLine &Line) const {
+ llvm::errs() << "AnnotatedTokens(L=" << Line.Level << ", P=" << Line.PPLevel
+ << ", T=" << Line.Type << ", C=" << Line.IsContinuation
+ << "):\n";
const FormatToken *Tok = Line.First;
while (Tok) {
llvm::errs() << " M=" << Tok->MustBreakBefore
@@ -4246,8 +5792,8 @@ void TokenAnnotator::printDebugInfo(const AnnotatedLine &Line) {
<< " BK=" << Tok->getBlockKind() << " P=" << Tok->SplitPenalty
<< " Name=" << Tok->Tok.getName() << " L=" << Tok->TotalLength
<< " PPK=" << Tok->getPackingKind() << " FakeLParens=";
- for (unsigned i = 0, e = Tok->FakeLParens.size(); i != e; ++i)
- llvm::errs() << Tok->FakeLParens[i] << "/";
+ for (prec::Level LParen : Tok->FakeLParens)
+ llvm::errs() << LParen << "/";
llvm::errs() << " FakeRParens=" << Tok->FakeRParens;
llvm::errs() << " II=" << Tok->Tok.getIdentifierInfo();
llvm::errs() << " Text='" << Tok->TokenText << "'\n";
@@ -4259,7 +5805,7 @@ void TokenAnnotator::printDebugInfo(const AnnotatedLine &Line) {
}
FormatStyle::PointerAlignmentStyle
-TokenAnnotator::getTokenReferenceAlignment(const FormatToken &Reference) {
+TokenAnnotator::getTokenReferenceAlignment(const FormatToken &Reference) const {
assert(Reference.isOneOf(tok::amp, tok::ampamp));
switch (Style.ReferenceAlignment) {
case FormatStyle::RAS_Pointer:
@@ -4277,7 +5823,7 @@ TokenAnnotator::getTokenReferenceAlignment(const FormatToken &Reference) {
FormatStyle::PointerAlignmentStyle
TokenAnnotator::getTokenPointerOrReferenceAlignment(
- const FormatToken &PointerOrReference) {
+ const FormatToken &PointerOrReference) const {
if (PointerOrReference.isOneOf(tok::amp, tok::ampamp)) {
switch (Style.ReferenceAlignment) {
case FormatStyle::RAS_Pointer:
diff --git a/contrib/llvm-project/clang/lib/Format/TokenAnnotator.h b/contrib/llvm-project/clang/lib/Format/TokenAnnotator.h
index 0f9c02dbeb34..05a6daa87d80 100644
--- a/contrib/llvm-project/clang/lib/Format/TokenAnnotator.h
+++ b/contrib/llvm-project/clang/lib/Format/TokenAnnotator.h
@@ -19,8 +19,6 @@
#include "clang/Format/Format.h"
namespace clang {
-class SourceManager;
-
namespace format {
enum LineType {
@@ -33,18 +31,32 @@ enum LineType {
LT_PreprocessorDirective,
LT_VirtualFunctionDecl,
LT_ArrayOfStructInitializer,
+ LT_CommentAbovePPDirective,
+};
+
+enum ScopeType {
+ // Contained in class declaration/definition.
+ ST_Class,
+ // Contained within function definition.
+ ST_Function,
+ // Contained within other scope block (loop, if/else, etc).
+ ST_Other,
};
class AnnotatedLine {
public:
AnnotatedLine(const UnwrappedLine &Line)
: First(Line.Tokens.front().Tok), Level(Line.Level),
+ PPLevel(Line.PPLevel),
MatchingOpeningBlockLineIndex(Line.MatchingOpeningBlockLineIndex),
MatchingClosingBlockLineIndex(Line.MatchingClosingBlockLineIndex),
InPPDirective(Line.InPPDirective),
+ InPragmaDirective(Line.InPragmaDirective),
+ InMacroBody(Line.InMacroBody),
MustBeDeclaration(Line.MustBeDeclaration), MightBeFunctionDecl(false),
IsMultiVariableDeclStmt(false), Affected(false),
LeadingEmptyLinesAffected(false), ChildrenAffected(false),
+ ReturnTypeWrapped(false), IsContinuation(Line.IsContinuation),
FirstStartColumn(Line.FirstStartColumn) {
assert(!Line.Tokens.empty());
@@ -53,27 +65,42 @@ public:
// left them in a different state.
First->Previous = nullptr;
FormatToken *Current = First;
- for (std::list<UnwrappedLineNode>::const_iterator I = ++Line.Tokens.begin(),
- E = Line.Tokens.end();
- I != E; ++I) {
- const UnwrappedLineNode &Node = *I;
- Current->Next = I->Tok;
- I->Tok->Previous = Current;
+ addChildren(Line.Tokens.front(), Current);
+ for (const UnwrappedLineNode &Node : llvm::drop_begin(Line.Tokens)) {
+ if (Node.Tok->MacroParent)
+ ContainsMacroCall = true;
+ Current->Next = Node.Tok;
+ Node.Tok->Previous = Current;
Current = Current->Next;
- Current->Children.clear();
- for (const auto &Child : Node.Children) {
- Children.push_back(new AnnotatedLine(Child));
- Current->Children.push_back(Children.back());
- }
+ addChildren(Node, Current);
+ // FIXME: if we add children, previous will point to the token before
+ // the children; changing this requires significant changes across
+ // clang-format.
}
Last = Current;
Last->Next = nullptr;
}
- ~AnnotatedLine() {
- for (unsigned i = 0, e = Children.size(); i != e; ++i) {
- delete Children[i];
+ void addChildren(const UnwrappedLineNode &Node, FormatToken *Current) {
+ Current->Children.clear();
+ for (const auto &Child : Node.Children) {
+ Children.push_back(new AnnotatedLine(Child));
+ if (Children.back()->ContainsMacroCall)
+ ContainsMacroCall = true;
+ Current->Children.push_back(Children.back());
}
+ }
+
+ size_t size() const {
+ size_t Size = 1;
+ for (const auto *Child : Children)
+ Size += Child->size();
+ return Size;
+ }
+
+ ~AnnotatedLine() {
+ for (AnnotatedLine *Child : Children)
+ delete Child;
FormatToken *Current = First;
while (Current) {
Current->Children.clear();
@@ -82,6 +109,10 @@ public:
}
}
+ bool isComment() const {
+ return First && First->is(tok::comment) && !First->getNextNonComment();
+ }
+
/// \c true if this line starts with the given tokens in order, ignoring
/// comments.
template <typename... Ts> bool startsWith(Ts... Tokens) const {
@@ -120,6 +151,16 @@ public:
startsWith(tok::kw_export, tok::kw_namespace);
}
+ FormatToken *getFirstNonComment() const {
+ assert(First);
+ return First->is(tok::comment) ? First->getNextNonComment() : First;
+ }
+
+ FormatToken *getLastNonComment() const {
+ assert(Last);
+ return Last->is(tok::comment) ? Last->getPreviousNonComment() : Last;
+ }
+
FormatToken *First;
FormatToken *Last;
@@ -127,13 +168,19 @@ public:
LineType Type;
unsigned Level;
+ unsigned PPLevel;
size_t MatchingOpeningBlockLineIndex;
size_t MatchingClosingBlockLineIndex;
bool InPPDirective;
+ bool InPragmaDirective;
+ bool InMacroBody;
bool MustBeDeclaration;
bool MightBeFunctionDecl;
bool IsMultiVariableDeclStmt;
+ /// \c True if this line contains a macro call for which an expansion exists.
+ bool ContainsMacroCall = false;
+
/// \c True if this line should be formatted, i.e. intersects directly or
/// indirectly with one of the input ranges.
bool Affected;
@@ -145,6 +192,13 @@ public:
/// \c True if one of this line's children intersects with an input range.
bool ChildrenAffected;
+ /// \c True if breaking after last attribute group in function return type.
+ bool ReturnTypeWrapped;
+
+ /// \c True if this line should be indented by ContinuationIndent in addition
+ /// to the normal indention level.
+ bool IsContinuation;
+
unsigned FirstStartColumn;
private:
@@ -163,47 +217,52 @@ public:
/// Adapts the indent levels of comment lines to the indent of the
/// subsequent line.
// FIXME: Can/should this be done in the UnwrappedLineParser?
- void setCommentLineLevels(SmallVectorImpl<AnnotatedLine *> &Lines);
+ void setCommentLineLevels(SmallVectorImpl<AnnotatedLine *> &Lines) const;
void annotate(AnnotatedLine &Line);
- void calculateFormattingInformation(AnnotatedLine &Line);
+ void calculateFormattingInformation(AnnotatedLine &Line) const;
private:
/// Calculate the penalty for splitting before \c Tok.
unsigned splitPenalty(const AnnotatedLine &Line, const FormatToken &Tok,
- bool InFunctionDecl);
+ bool InFunctionDecl) const;
bool spaceRequiredBeforeParens(const FormatToken &Right) const;
bool spaceRequiredBetween(const AnnotatedLine &Line, const FormatToken &Left,
- const FormatToken &Right);
+ const FormatToken &Right) const;
- bool spaceRequiredBefore(const AnnotatedLine &Line, const FormatToken &Right);
+ bool spaceRequiredBefore(const AnnotatedLine &Line,
+ const FormatToken &Right) const;
- bool mustBreakBefore(const AnnotatedLine &Line, const FormatToken &Right);
+ bool mustBreakBefore(const AnnotatedLine &Line,
+ const FormatToken &Right) const;
- bool canBreakBefore(const AnnotatedLine &Line, const FormatToken &Right);
+ bool canBreakBefore(const AnnotatedLine &Line,
+ const FormatToken &Right) const;
bool mustBreakForReturnType(const AnnotatedLine &Line) const;
- void printDebugInfo(const AnnotatedLine &Line);
+ void printDebugInfo(const AnnotatedLine &Line) const;
- void calculateUnbreakableTailLengths(AnnotatedLine &Line);
+ void calculateUnbreakableTailLengths(AnnotatedLine &Line) const;
- void calculateArrayInitializerColumnList(AnnotatedLine &Line);
+ void calculateArrayInitializerColumnList(AnnotatedLine &Line) const;
FormatToken *calculateInitializerColumnList(AnnotatedLine &Line,
FormatToken *CurrentToken,
- unsigned Depth);
+ unsigned Depth) const;
FormatStyle::PointerAlignmentStyle
- getTokenReferenceAlignment(const FormatToken &PointerOrReference);
+ getTokenReferenceAlignment(const FormatToken &PointerOrReference) const;
- FormatStyle::PointerAlignmentStyle
- getTokenPointerOrReferenceAlignment(const FormatToken &PointerOrReference);
+ FormatStyle::PointerAlignmentStyle getTokenPointerOrReferenceAlignment(
+ const FormatToken &PointerOrReference) const;
const FormatStyle &Style;
const AdditionalKeywords &Keywords;
+
+ SmallVector<ScopeType> Scopes;
};
} // end namespace format
diff --git a/contrib/llvm-project/clang/lib/Format/UnwrappedLineFormatter.cpp b/contrib/llvm-project/clang/lib/Format/UnwrappedLineFormatter.cpp
index cca85c1074de..adeb07243487 100644
--- a/contrib/llvm-project/clang/lib/Format/UnwrappedLineFormatter.cpp
+++ b/contrib/llvm-project/clang/lib/Format/UnwrappedLineFormatter.cpp
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "UnwrappedLineFormatter.h"
+#include "FormatToken.h"
#include "NamespaceEndCommentsFixer.h"
#include "WhitespaceManager.h"
#include "llvm/Support/Debug.h"
@@ -26,6 +27,11 @@ bool startsExternCBlock(const AnnotatedLine &Line) {
NextNext && NextNext->is(tok::l_brace);
}
+bool isRecordLBrace(const FormatToken &Tok) {
+ return Tok.isOneOf(TT_ClassLBrace, TT_EnumLBrace, TT_RecordLBrace,
+ TT_StructLBrace, TT_UnionLBrace);
+}
+
/// Tracks the indent level of \c AnnotatedLines across levels.
///
/// \c nextLine must be called for each \c AnnotatedLine, after which \c
@@ -54,41 +60,50 @@ public:
Offset = getIndentOffset(*Line.First);
// Update the indent level cache size so that we can rely on it
// having the right size in adjustToUnmodifiedline.
- while (IndentForLevel.size() <= Line.Level)
- IndentForLevel.push_back(-1);
- if (Line.InPPDirective) {
- unsigned IndentWidth =
+ if (Line.Level >= IndentForLevel.size())
+ IndentForLevel.resize(Line.Level + 1, -1);
+ if (Style.IndentPPDirectives != FormatStyle::PPDIS_None &&
+ (Line.InPPDirective ||
+ (Style.IndentPPDirectives == FormatStyle::PPDIS_BeforeHash &&
+ Line.Type == LT_CommentAbovePPDirective))) {
+ unsigned PPIndentWidth =
(Style.PPIndentWidth >= 0) ? Style.PPIndentWidth : Style.IndentWidth;
- Indent = Line.Level * IndentWidth + AdditionalIndent;
+ Indent = Line.InMacroBody
+ ? Line.PPLevel * PPIndentWidth +
+ (Line.Level - Line.PPLevel) * Style.IndentWidth
+ : Line.Level * PPIndentWidth;
+ Indent += AdditionalIndent;
} else {
- IndentForLevel.resize(Line.Level + 1);
- Indent = getIndent(IndentForLevel, Line.Level);
+ // When going to lower levels, forget previous higher levels so that we
+ // recompute future higher levels. But don't forget them if we enter a PP
+ // directive, since these do not terminate a C++ code block.
+ if (!Line.InPPDirective) {
+ assert(Line.Level <= IndentForLevel.size());
+ IndentForLevel.resize(Line.Level + 1);
+ }
+ Indent = getIndent(Line.Level);
}
if (static_cast<int>(Indent) + Offset >= 0)
Indent += Offset;
- if (Line.First->is(TT_CSharpGenericTypeConstraint))
+ if (Line.IsContinuation)
Indent = Line.Level * Style.IndentWidth + Style.ContinuationIndentWidth;
}
- /// Update the indent state given that \p Line indent should be
- /// skipped.
- void skipLine(const AnnotatedLine &Line) {
- while (IndentForLevel.size() <= Line.Level)
- IndentForLevel.push_back(Indent);
- }
-
/// Update the level indent to adapt to the given \p Line.
///
/// When a line is not formatted, we move the subsequent lines on the same
/// level to the same indent.
/// Note that \c nextLine must have been called before this method.
void adjustToUnmodifiedLine(const AnnotatedLine &Line) {
+ if (Line.InPPDirective || Line.IsContinuation)
+ return;
+ assert(Line.Level < IndentForLevel.size());
+ if (Line.First->is(tok::comment) && IndentForLevel[Line.Level] != -1)
+ return;
unsigned LevelIndent = Line.First->OriginalColumn;
if (static_cast<int>(LevelIndent) - Offset >= 0)
LevelIndent -= Offset;
- if ((!Line.First->is(tok::comment) || IndentForLevel[Line.Level] == -1) &&
- !Line.InPPDirective)
- IndentForLevel[Line.Level] = LevelIndent;
+ IndentForLevel[Line.Level] = LevelIndent;
}
private:
@@ -97,14 +112,36 @@ private:
/// For example, 'public:' labels in classes are offset by 1 or 2
/// characters to the left from their level.
int getIndentOffset(const FormatToken &RootToken) {
- if (Style.Language == FormatStyle::LK_Java ||
- Style.Language == FormatStyle::LK_JavaScript || Style.isCSharp())
+ if (Style.Language == FormatStyle::LK_Java || Style.isJavaScript() ||
+ Style.isCSharp()) {
return 0;
- if (RootToken.isAccessSpecifier(false) ||
- RootToken.isObjCAccessSpecifier() ||
- (RootToken.isOneOf(Keywords.kw_signals, Keywords.kw_qsignals) &&
- RootToken.Next && RootToken.Next->is(tok::colon))) {
- // The AccessModifierOffset may be overriden by IndentAccessModifiers,
+ }
+
+ auto IsAccessModifier = [this, &RootToken]() {
+ if (RootToken.isAccessSpecifier(Style.isCpp())) {
+ return true;
+ } else if (RootToken.isObjCAccessSpecifier()) {
+ return true;
+ }
+ // Handle Qt signals.
+ else if (RootToken.isOneOf(Keywords.kw_signals, Keywords.kw_qsignals) &&
+ RootToken.Next && RootToken.Next->is(tok::colon)) {
+ return true;
+ } else if (RootToken.Next &&
+ RootToken.Next->isOneOf(Keywords.kw_slots,
+ Keywords.kw_qslots) &&
+ RootToken.Next->Next && RootToken.Next->Next->is(tok::colon)) {
+ return true;
+ }
+ // Handle malformed access specifier e.g. 'private' without trailing ':'.
+ else if (!RootToken.Next && RootToken.isAccessSpecifier(false)) {
+ return true;
+ }
+ return false;
+ };
+
+ if (IsAccessModifier()) {
+ // The AccessModifierOffset may be overridden by IndentAccessModifiers,
// in which case we take a negative value of the IndentWidth to simulate
// the upper indent level.
return Style.IndentAccessModifiers ? -Style.IndentWidth
@@ -118,20 +155,24 @@ private:
/// \p IndentForLevel must contain the indent for the level \c l
/// at \p IndentForLevel[l], or a value < 0 if the indent for
/// that level is unknown.
- unsigned getIndent(ArrayRef<int> IndentForLevel, unsigned Level) {
+ unsigned getIndent(unsigned Level) const {
+ assert(Level < IndentForLevel.size());
if (IndentForLevel[Level] != -1)
return IndentForLevel[Level];
if (Level == 0)
return 0;
- return getIndent(IndentForLevel, Level - 1) + Style.IndentWidth;
+ return getIndent(Level - 1) + Style.IndentWidth;
}
const FormatStyle &Style;
const AdditionalKeywords &Keywords;
const unsigned AdditionalIndent;
- /// The indent in characters for each level.
- std::vector<int> IndentForLevel;
+ /// The indent in characters for each level. It remembers the indent of
+ /// previous lines (that are not PP directives) of equal or lower levels. This
+ /// is used to align formatted lines to the indent of previous non-formatted
+ /// lines. Think about the --lines parameter of clang-format.
+ SmallVector<int> IndentForLevel;
/// Offset of the current line relative to the indent level.
///
@@ -183,12 +224,13 @@ public:
const AnnotatedLine *Current = *Next;
IndentTracker.nextLine(*Current);
unsigned MergedLines = tryFitMultipleLinesInOne(IndentTracker, Next, End);
- if (MergedLines > 0 && Style.ColumnLimit == 0)
+ if (MergedLines > 0 && Style.ColumnLimit == 0) {
// Disallow line merging if there is a break at the start of one of the
// input lines.
for (unsigned i = 0; i < MergedLines; ++i)
if (Next[i + 1]->First->NewlinesBefore > 0)
MergedLines = 0;
+ }
if (!DryRun)
for (unsigned i = 0; i < MergedLines; ++i)
join(*Next[0], *Next[i + 1]);
@@ -211,11 +253,13 @@ private:
const AnnotatedLine *TheLine = *I;
if (TheLine->Last->is(TT_LineComment))
return 0;
- if (I[1]->Type == LT_Invalid || I[1]->First->MustBreakBefore)
+ const auto &NextLine = *I[1];
+ if (NextLine.Type == LT_Invalid || NextLine.First->MustBreakBefore)
return 0;
if (TheLine->InPPDirective &&
- (!I[1]->InPPDirective || I[1]->First->HasUnescapedNewline))
+ (!NextLine.InPPDirective || NextLine.First->HasUnescapedNewline)) {
return 0;
+ }
if (Style.ColumnLimit > 0 && Indent > Style.ColumnLimit)
return 0;
@@ -231,30 +275,34 @@ private:
if (TheLine->Last->is(TT_FunctionLBrace) &&
TheLine->First == TheLine->Last &&
!Style.BraceWrapping.SplitEmptyFunction &&
- I[1]->First->is(tok::r_brace))
+ NextLine.First->is(tok::r_brace)) {
return tryMergeSimpleBlock(I, E, Limit);
+ }
- // Handle empty record blocks where the brace has already been wrapped
- if (TheLine->Last->is(tok::l_brace) && TheLine->First == TheLine->Last &&
- I != AnnotatedLines.begin()) {
- bool EmptyBlock = I[1]->First->is(tok::r_brace);
+ const auto *PreviousLine = I != AnnotatedLines.begin() ? I[-1] : nullptr;
+ // Handle empty record blocks where the brace has already been wrapped.
+ if (PreviousLine && TheLine->Last->is(tok::l_brace) &&
+ TheLine->First == TheLine->Last) {
+ bool EmptyBlock = NextLine.First->is(tok::r_brace);
- const FormatToken *Tok = I[-1]->First;
+ const FormatToken *Tok = PreviousLine->First;
if (Tok && Tok->is(tok::comment))
Tok = Tok->getNextNonComment();
- if (Tok && Tok->getNamespaceToken())
+ if (Tok && Tok->getNamespaceToken()) {
return !Style.BraceWrapping.SplitEmptyNamespace && EmptyBlock
? tryMergeSimpleBlock(I, E, Limit)
: 0;
+ }
if (Tok && Tok->is(tok::kw_typedef))
Tok = Tok->getNextNonComment();
if (Tok && Tok->isOneOf(tok::kw_class, tok::kw_struct, tok::kw_union,
- tok::kw_extern, Keywords.kw_interface))
+ tok::kw_extern, Keywords.kw_interface)) {
return !Style.BraceWrapping.SplitEmptyRecord && EmptyBlock
? tryMergeSimpleBlock(I, E, Limit)
: 0;
+ }
if (Tok && Tok->is(tok::kw_template) &&
Style.BraceWrapping.SplitEmptyRecord && EmptyBlock) {
@@ -262,30 +310,86 @@ private:
}
}
- // FIXME: TheLine->Level != 0 might or might not be the right check to do.
- // If necessary, change to something smarter.
- bool MergeShortFunctions =
- Style.AllowShortFunctionsOnASingleLine == FormatStyle::SFS_All ||
- (Style.AllowShortFunctionsOnASingleLine >= FormatStyle::SFS_Empty &&
- I[1]->First->is(tok::r_brace)) ||
- (Style.AllowShortFunctionsOnASingleLine & FormatStyle::SFS_InlineOnly &&
- TheLine->Level != 0);
+ auto ShouldMergeShortFunctions = [this, &I, &NextLine, PreviousLine,
+ TheLine]() {
+ if (Style.AllowShortFunctionsOnASingleLine == FormatStyle::SFS_All)
+ return true;
+ if (Style.AllowShortFunctionsOnASingleLine >= FormatStyle::SFS_Empty &&
+ NextLine.First->is(tok::r_brace)) {
+ return true;
+ }
+
+ if (Style.AllowShortFunctionsOnASingleLine &
+ FormatStyle::SFS_InlineOnly) {
+ // Just checking TheLine->Level != 0 is not enough, because it
+ // provokes treating functions inside indented namespaces as short.
+ if (Style.isJavaScript() && TheLine->Last->is(TT_FunctionLBrace))
+ return true;
+
+ if (TheLine->Level != 0) {
+ if (!PreviousLine)
+ return false;
+
+ // TODO: Use IndentTracker to avoid loop?
+ // Find the last line with lower level.
+ const AnnotatedLine *Line = nullptr;
+ for (auto J = I - 1; J >= AnnotatedLines.begin(); --J) {
+ assert(*J);
+ if (!(*J)->InPPDirective && !(*J)->isComment() &&
+ (*J)->Level < TheLine->Level) {
+ Line = *J;
+ break;
+ }
+ }
+
+ if (!Line)
+ return false;
+
+ // Check if the found line starts a record.
+ const auto *LastNonComment = Line->getLastNonComment();
+ // There must be another token (usually `{`), because we chose a
+ // non-PPDirective and non-comment line that has a smaller level.
+ assert(LastNonComment);
+ return isRecordLBrace(*LastNonComment);
+ }
+ }
+
+ return false;
+ };
+
+ bool MergeShortFunctions = ShouldMergeShortFunctions();
+
+ const auto *FirstNonComment = TheLine->getFirstNonComment();
+ if (!FirstNonComment)
+ return 0;
+ // FIXME: There are probably cases where we should use FirstNonComment
+ // instead of TheLine->First.
if (Style.CompactNamespaces) {
- if (auto nsToken = TheLine->First->getNamespaceToken()) {
- int i = 0;
- unsigned closingLine = TheLine->MatchingClosingBlockLineIndex - 1;
- for (; I + 1 + i != E &&
- nsToken->TokenText == getNamespaceTokenText(I[i + 1]) &&
- closingLine == I[i + 1]->MatchingClosingBlockLineIndex &&
- I[i + 1]->Last->TotalLength < Limit;
- i++, closingLine--) {
- // No extra indent for compacted namespaces
- IndentTracker.skipLine(*I[i + 1]);
-
- Limit -= I[i + 1]->Last->TotalLength;
+ if (const auto *NSToken = TheLine->First->getNamespaceToken()) {
+ int J = 1;
+ assert(TheLine->MatchingClosingBlockLineIndex > 0);
+ for (auto ClosingLineIndex = TheLine->MatchingClosingBlockLineIndex - 1;
+ I + J != E && NSToken->TokenText == getNamespaceTokenText(I[J]) &&
+ ClosingLineIndex == I[J]->MatchingClosingBlockLineIndex &&
+ I[J]->Last->TotalLength < Limit;
+ ++J, --ClosingLineIndex) {
+ Limit -= I[J]->Last->TotalLength;
+
+ // Reduce indent level for bodies of namespaces which were compacted,
+ // but only if their content was indented in the first place.
+ auto *ClosingLine = AnnotatedLines.begin() + ClosingLineIndex + 1;
+ const int OutdentBy = I[J]->Level - TheLine->Level;
+ assert(OutdentBy >= 0);
+ for (auto *CompactedLine = I + J; CompactedLine <= ClosingLine;
+ ++CompactedLine) {
+ if (!(*CompactedLine)->InPPDirective) {
+ const int Level = (*CompactedLine)->Level;
+ (*CompactedLine)->Level = std::max(Level - OutdentBy, 0);
+ }
+ }
}
- return i;
+ return J - 1;
}
if (auto nsToken = getMatchingNamespaceToken(TheLine, AnnotatedLines)) {
@@ -295,114 +399,152 @@ private:
nsToken->TokenText ==
getMatchingNamespaceTokenText(I[i + 1], AnnotatedLines) &&
openingLine == I[i + 1]->MatchingOpeningBlockLineIndex;
- i++, openingLine--) {
- // No space between consecutive braces
- I[i + 1]->First->SpacesRequiredBefore = !I[i]->Last->is(tok::r_brace);
+ i++, --openingLine) {
+ // No space between consecutive braces.
+ I[i + 1]->First->SpacesRequiredBefore =
+ I[i]->Last->isNot(tok::r_brace);
- // Indent like the outer-most namespace
+ // Indent like the outer-most namespace.
IndentTracker.nextLine(*I[i + 1]);
}
return i;
}
}
- // Try to merge a function block with left brace unwrapped
- if (TheLine->Last->is(TT_FunctionLBrace) &&
- TheLine->First != TheLine->Last) {
+ const auto *LastNonComment = TheLine->getLastNonComment();
+ assert(LastNonComment);
+ // FIXME: There are probably cases where we should use LastNonComment
+ // instead of TheLine->Last.
+
+ // Try to merge a function block with left brace unwrapped.
+ if (LastNonComment->is(TT_FunctionLBrace) &&
+ TheLine->First != LastNonComment) {
return MergeShortFunctions ? tryMergeSimpleBlock(I, E, Limit) : 0;
}
- // Try to merge a control statement block with left brace unwrapped
- if (TheLine->Last->is(tok::l_brace) && TheLine->First != TheLine->Last &&
- TheLine->First->isOneOf(tok::kw_if, tok::kw_while, tok::kw_for)) {
+ // Try to merge a control statement block with left brace unwrapped.
+ if (TheLine->Last->is(tok::l_brace) && FirstNonComment != TheLine->Last &&
+ FirstNonComment->isOneOf(tok::kw_if, tok::kw_while, tok::kw_for,
+ TT_ForEachMacro)) {
return Style.AllowShortBlocksOnASingleLine != FormatStyle::SBS_Never
? tryMergeSimpleBlock(I, E, Limit)
: 0;
}
- // Try to merge a control statement block with left brace wrapped
- if (I[1]->First->is(tok::l_brace) &&
- (TheLine->First->isOneOf(tok::kw_if, tok::kw_while, tok::kw_for,
- tok::kw_switch, tok::kw_try, tok::kw_do,
- TT_ForEachMacro) ||
- (TheLine->First->is(tok::r_brace) && TheLine->First->Next &&
- TheLine->First->Next->isOneOf(tok::kw_else, tok::kw_catch))) &&
- Style.BraceWrapping.AfterControlStatement ==
- FormatStyle::BWACS_MultiLine) {
- // If possible, merge the next line's wrapped left brace with the current
- // line. Otherwise, leave it on the next line, as this is a multi-line
- // control statement.
- return (Style.ColumnLimit == 0 ||
- TheLine->Last->TotalLength <= Style.ColumnLimit)
- ? 1
- : 0;
- } else if (I[1]->First->is(tok::l_brace) &&
- TheLine->First->isOneOf(tok::kw_if, tok::kw_while,
- tok::kw_for)) {
- return (Style.BraceWrapping.AfterControlStatement ==
- FormatStyle::BWACS_Always)
- ? tryMergeSimpleBlock(I, E, Limit)
- : 0;
- } else if (I[1]->First->is(tok::l_brace) &&
- TheLine->First->isOneOf(tok::kw_else, tok::kw_catch) &&
- Style.BraceWrapping.AfterControlStatement ==
- FormatStyle::BWACS_MultiLine) {
- // This case if different from the upper BWACS_MultiLine processing
- // in that a preceding r_brace is not on the same line as else/catch
- // most likely because of BeforeElse/BeforeCatch set to true.
- // If the line length doesn't fit ColumnLimit, leave l_brace on the
- // next line to respect the BWACS_MultiLine.
- return (Style.ColumnLimit == 0 ||
- TheLine->Last->TotalLength <= Style.ColumnLimit)
- ? 1
- : 0;
+ // Try to merge a control statement block with left brace wrapped.
+ if (NextLine.First->is(tok::l_brace)) {
+ if ((TheLine->First->isOneOf(tok::kw_if, tok::kw_else, tok::kw_while,
+ tok::kw_for, tok::kw_switch, tok::kw_try,
+ tok::kw_do, TT_ForEachMacro) ||
+ (TheLine->First->is(tok::r_brace) && TheLine->First->Next &&
+ TheLine->First->Next->isOneOf(tok::kw_else, tok::kw_catch))) &&
+ Style.BraceWrapping.AfterControlStatement ==
+ FormatStyle::BWACS_MultiLine) {
+ // If possible, merge the next line's wrapped left brace with the
+ // current line. Otherwise, leave it on the next line, as this is a
+ // multi-line control statement.
+ return (Style.ColumnLimit == 0 || TheLine->Level * Style.IndentWidth +
+ TheLine->Last->TotalLength <=
+ Style.ColumnLimit)
+ ? 1
+ : 0;
+ }
+ if (TheLine->First->isOneOf(tok::kw_if, tok::kw_else, tok::kw_while,
+ tok::kw_for, TT_ForEachMacro)) {
+ return (Style.BraceWrapping.AfterControlStatement ==
+ FormatStyle::BWACS_Always)
+ ? tryMergeSimpleBlock(I, E, Limit)
+ : 0;
+ }
+ if (TheLine->First->isOneOf(tok::kw_else, tok::kw_catch) &&
+ Style.BraceWrapping.AfterControlStatement ==
+ FormatStyle::BWACS_MultiLine) {
+ // This case if different from the upper BWACS_MultiLine processing
+ // in that a preceding r_brace is not on the same line as else/catch
+ // most likely because of BeforeElse/BeforeCatch set to true.
+ // If the line length doesn't fit ColumnLimit, leave l_brace on the
+ // next line to respect the BWACS_MultiLine.
+ return (Style.ColumnLimit == 0 ||
+ TheLine->Last->TotalLength <= Style.ColumnLimit)
+ ? 1
+ : 0;
+ }
}
- // Don't merge block with left brace wrapped after ObjC special blocks
- if (TheLine->First->is(tok::l_brace) && I != AnnotatedLines.begin() &&
- I[-1]->First->is(tok::at) && I[-1]->First->Next) {
- tok::ObjCKeywordKind kwId = I[-1]->First->Next->Tok.getObjCKeywordID();
- if (kwId == clang::tok::objc_autoreleasepool ||
- kwId == clang::tok::objc_synchronized)
+ if (PreviousLine && TheLine->First->is(tok::l_brace)) {
+ switch (PreviousLine->First->Tok.getKind()) {
+ case tok::at:
+ // Don't merge block with left brace wrapped after ObjC special blocks.
+ if (PreviousLine->First->Next) {
+ tok::ObjCKeywordKind kwId =
+ PreviousLine->First->Next->Tok.getObjCKeywordID();
+ if (kwId == tok::objc_autoreleasepool ||
+ kwId == tok::objc_synchronized) {
+ return 0;
+ }
+ }
+ break;
+
+ case tok::kw_case:
+ case tok::kw_default:
+ // Don't merge block with left brace wrapped after case labels.
return 0;
+
+ default:
+ break;
+ }
}
- // Don't merge block with left brace wrapped after case labels
- if (TheLine->First->is(tok::l_brace) && I != AnnotatedLines.begin() &&
- I[-1]->First->isOneOf(tok::kw_case, tok::kw_default))
- return 0;
// Don't merge an empty template class or struct if SplitEmptyRecords
// is defined.
- if (Style.BraceWrapping.SplitEmptyRecord &&
- TheLine->Last->is(tok::l_brace) && I != AnnotatedLines.begin() &&
- I[-1]->Last) {
- const FormatToken *Previous = I[-1]->Last;
+ if (PreviousLine && Style.BraceWrapping.SplitEmptyRecord &&
+ TheLine->Last->is(tok::l_brace) && PreviousLine->Last) {
+ const FormatToken *Previous = PreviousLine->Last;
if (Previous) {
if (Previous->is(tok::comment))
Previous = Previous->getPreviousNonComment();
if (Previous) {
- if (Previous->is(tok::greater) && !I[-1]->InPPDirective)
+ if (Previous->is(tok::greater) && !PreviousLine->InPPDirective)
return 0;
if (Previous->is(tok::identifier)) {
const FormatToken *PreviousPrevious =
Previous->getPreviousNonComment();
if (PreviousPrevious &&
- PreviousPrevious->isOneOf(tok::kw_class, tok::kw_struct))
+ PreviousPrevious->isOneOf(tok::kw_class, tok::kw_struct)) {
return 0;
+ }
}
}
}
}
- // Try to merge a block with left brace wrapped that wasn't yet covered
if (TheLine->Last->is(tok::l_brace)) {
- return !Style.BraceWrapping.AfterFunction ||
- (I[1]->First->is(tok::r_brace) &&
- !Style.BraceWrapping.SplitEmptyRecord)
- ? tryMergeSimpleBlock(I, E, Limit)
- : 0;
+ bool ShouldMerge = false;
+ // Try to merge records.
+ if (TheLine->Last->is(TT_EnumLBrace)) {
+ ShouldMerge = Style.AllowShortEnumsOnASingleLine;
+ } else if (TheLine->Last->is(TT_RequiresExpressionLBrace)) {
+ ShouldMerge = Style.AllowShortCompoundRequirementOnASingleLine;
+ } else if (TheLine->Last->isOneOf(TT_ClassLBrace, TT_StructLBrace)) {
+ // NOTE: We use AfterClass (whereas AfterStruct exists) for both classes
+ // and structs, but it seems that wrapping is still handled correctly
+ // elsewhere.
+ ShouldMerge = !Style.BraceWrapping.AfterClass ||
+ (NextLine.First->is(tok::r_brace) &&
+ !Style.BraceWrapping.SplitEmptyRecord);
+ } else if (TheLine->InPPDirective ||
+ !TheLine->First->isOneOf(tok::kw_class, tok::kw_enum,
+ tok::kw_struct)) {
+ // Try to merge a block with left brace unwrapped that wasn't yet
+ // covered.
+ ShouldMerge = !Style.BraceWrapping.AfterFunction ||
+ (NextLine.First->is(tok::r_brace) &&
+ !Style.BraceWrapping.SplitEmptyFunction);
+ }
+ return ShouldMerge ? tryMergeSimpleBlock(I, E, Limit) : 0;
}
- // Try to merge a function block with left brace wrapped
- if (I[1]->First->is(TT_FunctionLBrace) &&
+
+ // Try to merge a function block with left brace wrapped.
+ if (NextLine.First->is(TT_FunctionLBrace) &&
Style.BraceWrapping.AfterFunction) {
- if (I[1]->Last->is(TT_LineComment))
+ if (NextLine.Last->is(TT_LineComment))
return 0;
// Check for Limit <= 2 to account for the " {".
@@ -413,7 +555,7 @@ private:
unsigned MergedLines = 0;
if (MergeShortFunctions ||
(Style.AllowShortFunctionsOnASingleLine >= FormatStyle::SFS_Empty &&
- I[1]->First == I[1]->Last && I + 2 != E &&
+ NextLine.First == NextLine.Last && I + 2 != E &&
I[2]->First->is(tok::r_brace))) {
MergedLines = tryMergeSimpleBlock(I + 1, E, Limit);
// If we managed to merge the block, count the function header, which is
@@ -438,7 +580,8 @@ private:
? tryMergeSimpleControlStatement(I, E, Limit)
: 0;
}
- if (TheLine->First->isOneOf(tok::kw_for, tok::kw_while, tok::kw_do)) {
+ if (TheLine->First->isOneOf(tok::kw_for, tok::kw_while, tok::kw_do,
+ TT_ForEachMacro)) {
return Style.AllowShortLoopsOnASingleLine
? tryMergeSimpleControlStatement(I, E, Limit)
: 0;
@@ -476,30 +619,36 @@ private:
if (Style.BraceWrapping.AfterControlStatement ==
FormatStyle::BWACS_Always &&
I[1]->First->is(tok::l_brace) &&
- Style.AllowShortBlocksOnASingleLine == FormatStyle::SBS_Never)
+ Style.AllowShortBlocksOnASingleLine == FormatStyle::SBS_Never) {
return 0;
+ }
if (I[1]->InPPDirective != (*I)->InPPDirective ||
- (I[1]->InPPDirective && I[1]->First->HasUnescapedNewline))
+ (I[1]->InPPDirective && I[1]->First->HasUnescapedNewline)) {
return 0;
+ }
Limit = limitConsideringMacros(I + 1, E, Limit);
AnnotatedLine &Line = **I;
- if (!Line.First->is(tok::kw_do) && !Line.First->is(tok::kw_else) &&
- !Line.Last->is(tok::kw_else) && Line.Last->isNot(tok::r_paren))
+ if (Line.First->isNot(tok::kw_do) && Line.First->isNot(tok::kw_else) &&
+ Line.Last->isNot(tok::kw_else) && Line.Last->isNot(tok::r_paren)) {
return 0;
- // Only merge do while if do is the only statement on the line.
- if (Line.First->is(tok::kw_do) && !Line.Last->is(tok::kw_do))
+ }
+ // Only merge `do while` if `do` is the only statement on the line.
+ if (Line.First->is(tok::kw_do) && Line.Last->isNot(tok::kw_do))
return 0;
if (1 + I[1]->Last->TotalLength > Limit)
return 0;
+ // Don't merge with loops, ifs, a single semicolon or a line comment.
if (I[1]->First->isOneOf(tok::semi, tok::kw_if, tok::kw_for, tok::kw_while,
- TT_LineComment))
+ TT_ForEachMacro, TT_LineComment)) {
return 0;
+ }
// Only inline simple if's (no nested if or else), unless specified
if (Style.AllowShortIfStatementsOnASingleLine ==
FormatStyle::SIS_WithoutElse) {
if (I + 2 != E && Line.startsWith(tok::kw_if) &&
- I[2]->First->is(tok::kw_else))
+ I[2]->First->is(tok::kw_else)) {
return 0;
+ }
}
return 1;
}
@@ -509,14 +658,16 @@ private:
SmallVectorImpl<AnnotatedLine *>::const_iterator E,
unsigned Limit) {
if (Limit == 0 || I + 1 == E ||
- I[1]->First->isOneOf(tok::kw_case, tok::kw_default))
+ I[1]->First->isOneOf(tok::kw_case, tok::kw_default)) {
return 0;
+ }
if (I[0]->Last->is(tok::l_brace) || I[1]->First->is(tok::l_brace))
return 0;
unsigned NumStmts = 0;
unsigned Length = 0;
bool EndsWithComment = false;
bool InPPDirective = I[0]->InPPDirective;
+ bool InMacroBody = I[0]->InMacroBody;
const unsigned Level = I[0]->Level;
for (; NumStmts < 3; ++NumStmts) {
if (I + 1 + NumStmts == E)
@@ -524,12 +675,15 @@ private:
const AnnotatedLine *Line = I[1 + NumStmts];
if (Line->InPPDirective != InPPDirective)
break;
+ if (Line->InMacroBody != InMacroBody)
+ break;
if (Line->First->isOneOf(tok::kw_case, tok::kw_default, tok::r_brace))
break;
if (Line->First->isOneOf(tok::kw_if, tok::kw_for, tok::kw_switch,
tok::kw_while) ||
- EndsWithComment)
+ EndsWithComment) {
return 0;
+ }
if (Line->First->is(tok::comment)) {
if (Level != Line->Level)
return 0;
@@ -558,88 +712,115 @@ private:
tryMergeSimpleBlock(SmallVectorImpl<AnnotatedLine *>::const_iterator I,
SmallVectorImpl<AnnotatedLine *>::const_iterator E,
unsigned Limit) {
+ // Don't merge with a preprocessor directive.
+ if (I[1]->Type == LT_PreprocessorDirective)
+ return 0;
+
AnnotatedLine &Line = **I;
// Don't merge ObjC @ keywords and methods.
// FIXME: If an option to allow short exception handling clauses on a single
// line is added, change this to not return for @try and friends.
if (Style.Language != FormatStyle::LK_Java &&
- Line.First->isOneOf(tok::at, tok::minus, tok::plus))
+ Line.First->isOneOf(tok::at, tok::minus, tok::plus)) {
return 0;
+ }
// Check that the current line allows merging. This depends on whether we
// are in a control flow statements as well as several style flags.
- if (Line.First->isOneOf(tok::kw_else, tok::kw_case) ||
- (Line.First->Next && Line.First->Next->is(tok::kw_else)))
+ if (Line.First->is(tok::kw_case) ||
+ (Line.First->Next && Line.First->Next->is(tok::kw_else))) {
return 0;
+ }
// default: in switch statement
if (Line.First->is(tok::kw_default)) {
const FormatToken *Tok = Line.First->getNextNonComment();
if (Tok && Tok->is(tok::colon))
return 0;
}
- if (Line.First->isOneOf(tok::kw_if, tok::kw_while, tok::kw_do, tok::kw_try,
- tok::kw___try, tok::kw_catch, tok::kw___finally,
- tok::kw_for, tok::r_brace, Keywords.kw___except)) {
- if (Style.AllowShortBlocksOnASingleLine == FormatStyle::SBS_Never)
+
+ auto IsCtrlStmt = [](const auto &Line) {
+ return Line.First->isOneOf(tok::kw_if, tok::kw_else, tok::kw_while,
+ tok::kw_do, tok::kw_for, TT_ForEachMacro);
+ };
+
+ const bool IsSplitBlock =
+ Style.AllowShortBlocksOnASingleLine == FormatStyle::SBS_Never ||
+ (Style.AllowShortBlocksOnASingleLine == FormatStyle::SBS_Empty &&
+ I[1]->First->isNot(tok::r_brace));
+
+ if (IsCtrlStmt(Line) ||
+ Line.First->isOneOf(tok::kw_try, tok::kw___try, tok::kw_catch,
+ tok::kw___finally, tok::r_brace,
+ Keywords.kw___except)) {
+ if (IsSplitBlock)
return 0;
// Don't merge when we can't except the case when
// the control statement block is empty
if (!Style.AllowShortIfStatementsOnASingleLine &&
- Line.startsWith(tok::kw_if) &&
+ Line.First->isOneOf(tok::kw_if, tok::kw_else) &&
!Style.BraceWrapping.AfterControlStatement &&
- !I[1]->First->is(tok::r_brace))
+ I[1]->First->isNot(tok::r_brace)) {
return 0;
+ }
if (!Style.AllowShortIfStatementsOnASingleLine &&
- Line.startsWith(tok::kw_if) &&
+ Line.First->isOneOf(tok::kw_if, tok::kw_else) &&
Style.BraceWrapping.AfterControlStatement ==
FormatStyle::BWACS_Always &&
- I + 2 != E && !I[2]->First->is(tok::r_brace))
+ I + 2 != E && I[2]->First->isNot(tok::r_brace)) {
return 0;
+ }
if (!Style.AllowShortLoopsOnASingleLine &&
- Line.First->isOneOf(tok::kw_while, tok::kw_do, tok::kw_for) &&
+ Line.First->isOneOf(tok::kw_while, tok::kw_do, tok::kw_for,
+ TT_ForEachMacro) &&
!Style.BraceWrapping.AfterControlStatement &&
- !I[1]->First->is(tok::r_brace))
+ I[1]->First->isNot(tok::r_brace)) {
return 0;
+ }
if (!Style.AllowShortLoopsOnASingleLine &&
- Line.First->isOneOf(tok::kw_while, tok::kw_do, tok::kw_for) &&
+ Line.First->isOneOf(tok::kw_while, tok::kw_do, tok::kw_for,
+ TT_ForEachMacro) &&
Style.BraceWrapping.AfterControlStatement ==
FormatStyle::BWACS_Always &&
- I + 2 != E && !I[2]->First->is(tok::r_brace))
+ I + 2 != E && I[2]->First->isNot(tok::r_brace)) {
return 0;
+ }
// FIXME: Consider an option to allow short exception handling clauses on
// a single line.
// FIXME: This isn't covered by tests.
// FIXME: For catch, __except, __finally the first token on the line
// is '}', so this isn't correct here.
if (Line.First->isOneOf(tok::kw_try, tok::kw___try, tok::kw_catch,
- Keywords.kw___except, tok::kw___finally))
+ Keywords.kw___except, tok::kw___finally)) {
return 0;
+ }
}
- if (Line.Last->is(tok::l_brace)) {
+ if (const auto *LastNonComment = Line.getLastNonComment();
+ LastNonComment && LastNonComment->is(tok::l_brace)) {
+ if (IsSplitBlock && Line.First == Line.Last &&
+ I > AnnotatedLines.begin() &&
+ (I[-1]->endsWith(tok::kw_else) || IsCtrlStmt(*I[-1]))) {
+ return 0;
+ }
FormatToken *Tok = I[1]->First;
- if (Tok->is(tok::r_brace) && !Tok->MustBreakBefore &&
- (Tok->getNextNonComment() == nullptr ||
- Tok->getNextNonComment()->is(tok::semi))) {
+ auto ShouldMerge = [Tok]() {
+ if (Tok->isNot(tok::r_brace) || Tok->MustBreakBefore)
+ return false;
+ const FormatToken *Next = Tok->getNextNonComment();
+ return !Next || Next->is(tok::semi);
+ };
+
+ if (ShouldMerge()) {
// We merge empty blocks even if the line exceeds the column limit.
- Tok->SpacesRequiredBefore = Style.SpaceInEmptyBlock ? 1 : 0;
+ Tok->SpacesRequiredBefore =
+ (Style.SpaceInEmptyBlock || Line.Last->is(tok::comment)) ? 1 : 0;
Tok->CanBreakBefore = true;
return 1;
} else if (Limit != 0 && !Line.startsWithNamespace() &&
!startsExternCBlock(Line)) {
// We don't merge short records.
- FormatToken *RecordTok = Line.First;
- // Skip record modifiers.
- while (RecordTok->Next &&
- RecordTok->isOneOf(
- tok::kw_typedef, tok::kw_export, Keywords.kw_declare,
- Keywords.kw_abstract, tok::kw_default, tok::kw_public,
- tok::kw_private, tok::kw_protected, Keywords.kw_internal))
- RecordTok = RecordTok->Next;
- if (RecordTok &&
- RecordTok->isOneOf(tok::kw_class, tok::kw_union, tok::kw_struct,
- Keywords.kw_interface))
+ if (isRecordLBrace(*Line.Last))
return 0;
// Check that we still have three lines and they fit into the limit.
@@ -675,10 +856,11 @@ private:
// { <-- current Line
// baz();
// }
- if (Line.First == Line.Last &&
+ if (Line.First == Line.Last && Line.First->isNot(TT_FunctionLBrace) &&
Style.BraceWrapping.AfterControlStatement ==
- FormatStyle::BWACS_MultiLine)
+ FormatStyle::BWACS_MultiLine) {
return 0;
+ }
return 2;
}
@@ -712,7 +894,7 @@ private:
SmallVectorImpl<AnnotatedLine *>::const_iterator E,
unsigned Limit) {
if (I[0]->InPPDirective && I + 1 != E &&
- !I[1]->First->HasUnescapedNewline && !I[1]->First->is(tok::eof)) {
+ !I[1]->First->HasUnescapedNewline && I[1]->First->isNot(tok::eof)) {
return Limit < 2 ? 0 : Limit - 2;
}
return Limit;
@@ -726,10 +908,12 @@ private:
}
bool containsMustBreak(const AnnotatedLine *Line) {
- for (const FormatToken *Tok = Line->First; Tok; Tok = Tok->Next) {
+ assert(Line->First);
+ // Ignore the first token, because in this situation, it applies more to the
+ // last token of the previous line.
+ for (const FormatToken *Tok = Line->First->Next; Tok; Tok = Tok->Next)
if (Tok->MustBreakBefore)
return true;
- }
return false;
}
@@ -757,10 +941,31 @@ private:
};
static void markFinalized(FormatToken *Tok) {
+ if (Tok->is(tok::hash) && !Tok->Previous && Tok->Next &&
+ Tok->Next->isOneOf(tok::pp_if, tok::pp_ifdef, tok::pp_ifndef,
+ tok::pp_elif, tok::pp_elifdef, tok::pp_elifndef,
+ tok::pp_else, tok::pp_endif)) {
+ Tok = Tok->Next;
+ }
for (; Tok; Tok = Tok->Next) {
- Tok->Finalized = true;
- for (AnnotatedLine *Child : Tok->Children)
- markFinalized(Child->First);
+ if (Tok->MacroCtx && Tok->MacroCtx->Role == MR_ExpandedArg) {
+ // In the first pass we format all macro arguments in the expanded token
+ // stream. Instead of finalizing the macro arguments, we mark that they
+ // will be modified as unexpanded arguments (as part of the macro call
+ // formatting) in the next pass.
+ Tok->MacroCtx->Role = MR_UnexpandedArg;
+ // Reset whether spaces or a line break are required before this token, as
+ // that is context dependent, and that context may change when formatting
+ // the macro call. For example, given M(x) -> 2 * x, and the macro call
+ // M(var), the token 'var' will have SpacesRequiredBefore = 1 after being
+ // formatted as part of the expanded macro, but SpacesRequiredBefore = 0
+ // for its position within the macro call.
+ Tok->SpacesRequiredBefore = 0;
+ if (!Tok->MustBreakBeforeFinalized)
+ Tok->MustBreakBefore = 0;
+ } else {
+ Tok->Finalized = true;
+ }
}
}
@@ -815,29 +1020,19 @@ protected:
bool formatChildren(LineState &State, bool NewLine, bool DryRun,
unsigned &Penalty) {
const FormatToken *LBrace = State.NextToken->getPreviousNonComment();
+ bool HasLBrace = LBrace && LBrace->is(tok::l_brace) && LBrace->is(BK_Block);
FormatToken &Previous = *State.NextToken->Previous;
- if (!LBrace || LBrace->isNot(tok::l_brace) || LBrace->isNot(BK_Block) ||
- Previous.Children.size() == 0)
+ if (Previous.Children.size() == 0 || (!HasLBrace && !LBrace->MacroParent)) {
// The previous token does not open a block. Nothing to do. We don't
// assert so that we can simply call this function for all tokens.
return true;
+ }
- if (NewLine) {
+ if (NewLine || Previous.MacroParent) {
const ParenState &P = State.Stack.back();
int AdditionalIndent =
P.Indent - Previous.Children[0]->Level * Style.IndentWidth;
-
- if (Style.LambdaBodyIndentation == FormatStyle::LBI_OuterScope &&
- P.NestedBlockIndent == P.LastSpace) {
- if (State.NextToken->MatchingParen &&
- State.NextToken->MatchingParen->is(TT_LambdaLBrace)) {
- State.Stack.pop_back();
- }
- if (LBrace->is(TT_LambdaLBrace))
- AdditionalIndent = 0;
- }
-
Penalty +=
BlockFormatter->format(Previous.Children, DryRun, AdditionalIndent,
/*FixBadIndentation=*/true);
@@ -863,8 +1058,9 @@ protected:
// If the child line exceeds the column limit, we wouldn't want to merge it.
// We add +2 for the trailing " }".
if (Style.ColumnLimit > 0 &&
- Child->Last->TotalLength + State.Column + 2 > Style.ColumnLimit)
+ Child->Last->TotalLength + State.Column + 2 > Style.ColumnLimit) {
return false;
+ }
if (!DryRun) {
Whitespaces->replaceWhitespace(
@@ -874,6 +1070,8 @@ protected:
}
Penalty +=
formatLine(*Child, State.Column + 1, /*FirstStartColumn=*/0, DryRun);
+ if (!DryRun)
+ markFinalized(Child->First);
State.Column += 1 + Child->Last->TotalLength;
return true;
@@ -992,7 +1190,7 @@ private:
typedef std::pair<OrderedPenalty, StateNode *> QueueItem;
/// The BFS queue type.
- typedef std::priority_queue<QueueItem, std::vector<QueueItem>,
+ typedef std::priority_queue<QueueItem, SmallVector<QueueItem>,
std::greater<QueueItem>>
QueueType;
@@ -1013,15 +1211,19 @@ private:
QueueType Queue;
// Insert start element into queue.
- StateNode *Node =
+ StateNode *RootNode =
new (Allocator.Allocate()) StateNode(InitialState, false, nullptr);
- Queue.push(QueueItem(OrderedPenalty(0, Count), Node));
+ Queue.push(QueueItem(OrderedPenalty(0, Count), RootNode));
++Count;
unsigned Penalty = 0;
// While not empty, take first element and follow edges.
while (!Queue.empty()) {
+ // Quit if we still haven't found a solution by now.
+ if (Count > 25000000)
+ return 0;
+
Penalty = Queue.top().first.first;
StateNode *Node = Queue.top().second;
if (!Node->State.NextToken) {
@@ -1036,9 +1238,10 @@ private:
if (Count > 50000)
Node->State.IgnoreStackForComparison = true;
- if (!Seen.insert(&Node->State).second)
+ if (!Seen.insert(&Node->State).second) {
// State already examined with lower penalty.
continue;
+ }
FormatDecision LastFormat = Node->State.NextToken->getDecision();
if (LastFormat == FD_Unformatted || LastFormat == FD_Continue)
@@ -1090,22 +1293,22 @@ private:
/// Applies the best formatting by reconstructing the path in the
/// solution space that leads to \c Best.
void reconstructPath(LineState &State, StateNode *Best) {
- std::deque<StateNode *> Path;
+ llvm::SmallVector<StateNode *> Path;
// We do not need a break before the initial token.
while (Best->Previous) {
- Path.push_front(Best);
+ Path.push_back(Best);
Best = Best->Previous;
}
- for (auto I = Path.begin(), E = Path.end(); I != E; ++I) {
+ for (const auto &Node : llvm::reverse(Path)) {
unsigned Penalty = 0;
- formatChildren(State, (*I)->NewLine, /*DryRun=*/false, Penalty);
- Penalty += Indenter->addTokenToState(State, (*I)->NewLine, false);
+ formatChildren(State, Node->NewLine, /*DryRun=*/false, Penalty);
+ Penalty += Indenter->addTokenToState(State, Node->NewLine, false);
LLVM_DEBUG({
- printLineState((*I)->Previous->State);
- if ((*I)->NewLine) {
+ printLineState(Node->Previous->State);
+ if (Node->NewLine) {
llvm::dbgs() << "Penalty for placing "
- << (*I)->Previous->State.NextToken->Tok.getName()
+ << Node->Previous->State.NextToken->Tok.getName()
<< " on a new line: " << Penalty << "\n";
}
});
@@ -1144,13 +1347,15 @@ unsigned UnwrappedLineFormatter::format(
bool FirstLine = true;
for (const AnnotatedLine *Line =
Joiner.getNextMergedLine(DryRun, IndentTracker);
- Line; Line = NextLine, FirstLine = false) {
+ Line; PrevPrevLine = PreviousLine, PreviousLine = Line, Line = NextLine,
+ FirstLine = false) {
+ assert(Line->First);
const AnnotatedLine &TheLine = *Line;
unsigned Indent = IndentTracker.getIndent();
// We continue formatting unchanged lines to adjust their indent, e.g. if a
// scope was added. However, we need to carefully stop doing this when we
- // exit the scope of affected lines to prevent indenting a the entire
+ // exit the scope of affected lines to prevent indenting the entire
// remaining file if it currently missing a closing brace.
bool PreviousRBrace =
PreviousLine && PreviousLine->startsWith(tok::r_brace);
@@ -1172,7 +1377,7 @@ unsigned UnwrappedLineFormatter::format(
if (ShouldFormat && TheLine.Type != LT_Invalid) {
if (!DryRun) {
- bool LastLine = Line->First->is(tok::eof);
+ bool LastLine = TheLine.First->is(tok::eof);
formatFirstToken(TheLine, PreviousLine, PrevPrevLine, Lines, Indent,
LastLine ? LastStartColumn : NextStartColumn + Indent);
}
@@ -1180,32 +1385,34 @@ unsigned UnwrappedLineFormatter::format(
NextLine = Joiner.getNextMergedLine(DryRun, IndentTracker);
unsigned ColumnLimit = getColumnLimit(TheLine.InPPDirective, NextLine);
bool FitsIntoOneLine =
- TheLine.Last->TotalLength + Indent <= ColumnLimit ||
- (TheLine.Type == LT_ImportStatement &&
- (Style.Language != FormatStyle::LK_JavaScript ||
- !Style.JavaScriptWrapImports)) ||
- (Style.isCSharp() &&
- TheLine.InPPDirective); // don't split #regions in C#
- if (Style.ColumnLimit == 0)
+ !TheLine.ContainsMacroCall &&
+ (TheLine.Last->TotalLength + Indent <= ColumnLimit ||
+ (TheLine.Type == LT_ImportStatement &&
+ (!Style.isJavaScript() || !Style.JavaScriptWrapImports)) ||
+ (Style.isCSharp() &&
+ TheLine.InPPDirective)); // don't split #regions in C#
+ if (Style.ColumnLimit == 0) {
NoColumnLimitLineFormatter(Indenter, Whitespaces, Style, this)
.formatLine(TheLine, NextStartColumn + Indent,
FirstLine ? FirstStartColumn : 0, DryRun);
- else if (FitsIntoOneLine)
+ } else if (FitsIntoOneLine) {
Penalty += NoLineBreakFormatter(Indenter, Whitespaces, Style, this)
.formatLine(TheLine, NextStartColumn + Indent,
FirstLine ? FirstStartColumn : 0, DryRun);
- else
+ } else {
Penalty += OptimizingLineFormatter(Indenter, Whitespaces, Style, this)
.formatLine(TheLine, NextStartColumn + Indent,
FirstLine ? FirstStartColumn : 0, DryRun);
+ }
RangeMinLevel = std::min(RangeMinLevel, TheLine.Level);
} else {
// If no token in the current line is affected, we still need to format
// affected children.
- if (TheLine.ChildrenAffected)
+ if (TheLine.ChildrenAffected) {
for (const FormatToken *Tok = TheLine.First; Tok; Tok = Tok->Next)
if (!Tok->Children.empty())
format(Tok->Children, DryRun);
+ }
// Adapt following lines on the current indent level to the same level
// unless the current \c AnnotatedLine is not at the beginning of a line.
@@ -1218,13 +1425,14 @@ unsigned UnwrappedLineFormatter::format(
StartsNewLine && ((PreviousLine && PreviousLine->Affected) ||
TheLine.LeadingEmptyLinesAffected);
// Format the first token.
- if (ReformatLeadingWhitespace)
+ if (ReformatLeadingWhitespace) {
formatFirstToken(TheLine, PreviousLine, PrevPrevLine, Lines,
TheLine.First->OriginalColumn,
TheLine.First->OriginalColumn);
- else
+ } else {
Whitespaces->addUntouchableToken(*TheLine.First,
TheLine.InPPDirective);
+ }
// Notify the WhitespaceManager about the unchanged whitespace.
for (FormatToken *Tok = TheLine.First->Next; Tok; Tok = Tok->Next)
@@ -1235,37 +1443,29 @@ unsigned UnwrappedLineFormatter::format(
}
if (!DryRun)
markFinalized(TheLine.First);
- PrevPrevLine = PreviousLine;
- PreviousLine = &TheLine;
}
PenaltyCache[CacheKey] = Penalty;
return Penalty;
}
-void UnwrappedLineFormatter::formatFirstToken(
- const AnnotatedLine &Line, const AnnotatedLine *PreviousLine,
- const AnnotatedLine *PrevPrevLine,
- const SmallVectorImpl<AnnotatedLine *> &Lines, unsigned Indent,
- unsigned NewlineIndent) {
- FormatToken &RootToken = *Line.First;
- if (RootToken.is(tok::eof)) {
- unsigned Newlines = std::min(RootToken.NewlinesBefore, 1u);
- unsigned TokenIndent = Newlines ? NewlineIndent : 0;
- Whitespaces->replaceWhitespace(RootToken, Newlines, TokenIndent,
- TokenIndent);
- return;
- }
- unsigned Newlines =
+static auto computeNewlines(const AnnotatedLine &Line,
+ const AnnotatedLine *PreviousLine,
+ const AnnotatedLine *PrevPrevLine,
+ const SmallVectorImpl<AnnotatedLine *> &Lines,
+ const FormatStyle &Style) {
+ const auto &RootToken = *Line.First;
+ auto Newlines =
std::min(RootToken.NewlinesBefore, Style.MaxEmptyLinesToKeep + 1);
// Remove empty lines before "}" where applicable.
if (RootToken.is(tok::r_brace) &&
(!RootToken.Next ||
(RootToken.Next->is(tok::semi) && !RootToken.Next->Next)) &&
// Do not remove empty lines before namespace closing "}".
- !getNamespaceToken(&Line, Lines))
+ !getNamespaceToken(&Line, Lines)) {
Newlines = std::min(Newlines, 1u);
+ }
// Remove empty lines at the start of nested blocks (lambdas/arrow functions)
- if (PreviousLine == nullptr && Line.Level > 0)
+ if (!PreviousLine && Line.Level > 0)
Newlines = std::min(Newlines, 1u);
if (Newlines == 0 && !RootToken.IsFirst)
Newlines = 1;
@@ -1278,8 +1478,9 @@ void UnwrappedLineFormatter::formatFirstToken(
!PreviousLine->startsWithNamespace() &&
!(PrevPrevLine && PrevPrevLine->startsWithNamespace() &&
PreviousLine->startsWith(tok::l_brace)) &&
- !startsExternCBlock(*PreviousLine))
+ !startsExternCBlock(*PreviousLine)) {
Newlines = 1;
+ }
// Insert or remove empty line before access specifiers.
if (PreviousLine && RootToken.isAccessSpecifier()) {
@@ -1303,8 +1504,10 @@ void UnwrappedLineFormatter::formatFirstToken(
previousToken = PreviousLine->Last->getPreviousNonComment();
else
previousToken = PreviousLine->Last;
- if ((!previousToken || !previousToken->is(tok::l_brace)) && Newlines <= 1)
+ if ((!previousToken || previousToken->isNot(tok::l_brace)) &&
+ Newlines <= 1) {
Newlines = 2;
+ }
} break;
}
}
@@ -1332,16 +1535,44 @@ void UnwrappedLineFormatter::formatFirstToken(
}
}
- if (Newlines)
+ return Newlines;
+}
+
+void UnwrappedLineFormatter::formatFirstToken(
+ const AnnotatedLine &Line, const AnnotatedLine *PreviousLine,
+ const AnnotatedLine *PrevPrevLine,
+ const SmallVectorImpl<AnnotatedLine *> &Lines, unsigned Indent,
+ unsigned NewlineIndent) {
+ FormatToken &RootToken = *Line.First;
+ if (RootToken.is(tok::eof)) {
+ unsigned Newlines =
+ std::min(RootToken.NewlinesBefore,
+ Style.KeepEmptyLinesAtEOF ? Style.MaxEmptyLinesToKeep + 1 : 1);
+ unsigned TokenIndent = Newlines ? NewlineIndent : 0;
+ Whitespaces->replaceWhitespace(RootToken, Newlines, TokenIndent,
+ TokenIndent);
+ return;
+ }
+
+ if (RootToken.Newlines < 0) {
+ RootToken.Newlines =
+ computeNewlines(Line, PreviousLine, PrevPrevLine, Lines, Style);
+ assert(RootToken.Newlines >= 0);
+ }
+
+ if (RootToken.Newlines > 0)
Indent = NewlineIndent;
- // Preprocessor directives get indented before the hash only if specified
- if (Style.IndentPPDirectives != FormatStyle::PPDIS_BeforeHash &&
+ // Preprocessor directives get indented before the hash only if specified. In
+ // Javascript import statements are indented like normal statements.
+ if (!Style.isJavaScript() &&
+ Style.IndentPPDirectives != FormatStyle::PPDIS_BeforeHash &&
(Line.Type == LT_PreprocessorDirective ||
- Line.Type == LT_ImportStatement))
+ Line.Type == LT_ImportStatement)) {
Indent = 0;
+ }
- Whitespaces->replaceWhitespace(RootToken, Newlines, Indent, Indent,
+ Whitespaces->replaceWhitespace(RootToken, RootToken.Newlines, Indent, Indent,
/*IsAligned=*/false,
Line.InPPDirective &&
!RootToken.HasUnescapedNewline);
diff --git a/contrib/llvm-project/clang/lib/Format/UnwrappedLineFormatter.h b/contrib/llvm-project/clang/lib/Format/UnwrappedLineFormatter.h
index 3e33de07fa12..ee6d31de8c42 100644
--- a/contrib/llvm-project/clang/lib/Format/UnwrappedLineFormatter.h
+++ b/contrib/llvm-project/clang/lib/Format/UnwrappedLineFormatter.h
@@ -7,7 +7,7 @@
//===----------------------------------------------------------------------===//
///
/// \file
-/// Implements a combinartorial exploration of all the different
+/// Implements a combinatorial exploration of all the different
/// linebreaks unwrapped lines can be formatted in.
///
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.cpp b/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.cpp
index 673986d16af2..573919798870 100644
--- a/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.cpp
+++ b/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.cpp
@@ -14,31 +14,62 @@
#include "UnwrappedLineParser.h"
#include "FormatToken.h"
+#include "FormatTokenLexer.h"
+#include "FormatTokenSource.h"
+#include "Macros.h"
+#include "TokenAnnotator.h"
+#include "clang/Basic/TokenKinds.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_os_ostream.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
+#include <utility>
#define DEBUG_TYPE "format-parser"
namespace clang {
namespace format {
-class FormatTokenSource {
-public:
- virtual ~FormatTokenSource() {}
- virtual FormatToken *getNextToken() = 0;
+namespace {
- virtual unsigned getPosition() = 0;
- virtual FormatToken *setPosition(unsigned Position) = 0;
-};
+void printLine(llvm::raw_ostream &OS, const UnwrappedLine &Line,
+ StringRef Prefix = "", bool PrintText = false) {
+ OS << Prefix << "Line(" << Line.Level << ", FSC=" << Line.FirstStartColumn
+ << ")" << (Line.InPPDirective ? " MACRO" : "") << ": ";
+ bool NewLine = false;
+ for (std::list<UnwrappedLineNode>::const_iterator I = Line.Tokens.begin(),
+ E = Line.Tokens.end();
+ I != E; ++I) {
+ if (NewLine) {
+ OS << Prefix;
+ NewLine = false;
+ }
+ OS << I->Tok->Tok.getName() << "[" << "T=" << (unsigned)I->Tok->getType()
+ << ", OC=" << I->Tok->OriginalColumn << ", \"" << I->Tok->TokenText
+ << "\"] ";
+ for (SmallVectorImpl<UnwrappedLine>::const_iterator
+ CI = I->Children.begin(),
+ CE = I->Children.end();
+ CI != CE; ++CI) {
+ OS << "\n";
+ printLine(OS, *CI, (Prefix + " ").str());
+ NewLine = true;
+ }
+ }
+ if (!NewLine)
+ OS << "\n";
+}
-namespace {
+LLVM_ATTRIBUTE_UNUSED static void printDebugInfo(const UnwrappedLine &Line) {
+ printLine(llvm::dbgs(), Line);
+}
class ScopedDeclarationState {
public:
- ScopedDeclarationState(UnwrappedLine &Line, std::vector<bool> &Stack,
+ ScopedDeclarationState(UnwrappedLine &Line, llvm::BitVector &Stack,
bool MustBeDeclaration)
: Line(Line), Stack(Stack) {
Line.MustBeDeclaration = MustBeDeclaration;
@@ -54,84 +85,7 @@ public:
private:
UnwrappedLine &Line;
- std::vector<bool> &Stack;
-};
-
-static bool isLineComment(const FormatToken &FormatTok) {
- return FormatTok.is(tok::comment) && !FormatTok.TokenText.startswith("/*");
-}
-
-// Checks if \p FormatTok is a line comment that continues the line comment
-// \p Previous. The original column of \p MinColumnToken is used to determine
-// whether \p FormatTok is indented enough to the right to continue \p Previous.
-static bool continuesLineComment(const FormatToken &FormatTok,
- const FormatToken *Previous,
- const FormatToken *MinColumnToken) {
- if (!Previous || !MinColumnToken)
- return false;
- unsigned MinContinueColumn =
- MinColumnToken->OriginalColumn + (isLineComment(*MinColumnToken) ? 0 : 1);
- return isLineComment(FormatTok) && FormatTok.NewlinesBefore == 1 &&
- isLineComment(*Previous) &&
- FormatTok.OriginalColumn >= MinContinueColumn;
-}
-
-class ScopedMacroState : public FormatTokenSource {
-public:
- ScopedMacroState(UnwrappedLine &Line, FormatTokenSource *&TokenSource,
- FormatToken *&ResetToken)
- : Line(Line), TokenSource(TokenSource), ResetToken(ResetToken),
- PreviousLineLevel(Line.Level), PreviousTokenSource(TokenSource),
- Token(nullptr), PreviousToken(nullptr) {
- FakeEOF.Tok.startToken();
- FakeEOF.Tok.setKind(tok::eof);
- TokenSource = this;
- Line.Level = 0;
- Line.InPPDirective = true;
- }
-
- ~ScopedMacroState() override {
- TokenSource = PreviousTokenSource;
- ResetToken = Token;
- Line.InPPDirective = false;
- Line.Level = PreviousLineLevel;
- }
-
- FormatToken *getNextToken() override {
- // The \c UnwrappedLineParser guards against this by never calling
- // \c getNextToken() after it has encountered the first eof token.
- assert(!eof());
- PreviousToken = Token;
- Token = PreviousTokenSource->getNextToken();
- if (eof())
- return &FakeEOF;
- return Token;
- }
-
- unsigned getPosition() override { return PreviousTokenSource->getPosition(); }
-
- FormatToken *setPosition(unsigned Position) override {
- PreviousToken = nullptr;
- Token = PreviousTokenSource->setPosition(Position);
- return Token;
- }
-
-private:
- bool eof() {
- return Token && Token->HasUnescapedNewline &&
- !continuesLineComment(*Token, PreviousToken,
- /*MinColumnToken=*/PreviousToken);
- }
-
- FormatToken FakeEOF;
- UnwrappedLine &Line;
- FormatTokenSource *&TokenSource;
- FormatToken *&ResetToken;
- unsigned PreviousLineLevel;
- FormatTokenSource *PreviousTokenSource;
-
- FormatToken *Token;
- FormatToken *PreviousToken;
+ llvm::BitVector &Stack;
};
} // end anonymous namespace
@@ -148,13 +102,14 @@ public:
PreBlockLine = std::move(Parser.Line);
Parser.Line = std::make_unique<UnwrappedLine>();
Parser.Line->Level = PreBlockLine->Level;
+ Parser.Line->PPLevel = PreBlockLine->PPLevel;
Parser.Line->InPPDirective = PreBlockLine->InPPDirective;
+ Parser.Line->InMacroBody = PreBlockLine->InMacroBody;
}
~ScopedLineState() {
- if (!Parser.Line->Tokens.empty()) {
+ if (!Parser.Line->Tokens.empty())
Parser.addUnwrappedLine();
- }
assert(Parser.Line->Tokens.empty());
Parser.Line = std::move(PreBlockLine);
if (Parser.CurrentLines == &Parser.PreprocessorDirectives)
@@ -191,42 +146,12 @@ private:
unsigned OldLineLevel;
};
-namespace {
-
-class IndexedTokenSource : public FormatTokenSource {
-public:
- IndexedTokenSource(ArrayRef<FormatToken *> Tokens)
- : Tokens(Tokens), Position(-1) {}
-
- FormatToken *getNextToken() override {
- ++Position;
- return Tokens[Position];
- }
-
- unsigned getPosition() override {
- assert(Position >= 0);
- return Position;
- }
-
- FormatToken *setPosition(unsigned P) override {
- Position = P;
- return Tokens[Position];
- }
-
- void reset() { Position = -1; }
-
-private:
- ArrayRef<FormatToken *> Tokens;
- int Position;
-};
-
-} // end anonymous namespace
-
-UnwrappedLineParser::UnwrappedLineParser(const FormatStyle &Style,
- const AdditionalKeywords &Keywords,
- unsigned FirstStartColumn,
- ArrayRef<FormatToken *> Tokens,
- UnwrappedLineConsumer &Callback)
+UnwrappedLineParser::UnwrappedLineParser(
+ SourceManager &SourceMgr, const FormatStyle &Style,
+ const AdditionalKeywords &Keywords, unsigned FirstStartColumn,
+ ArrayRef<FormatToken *> Tokens, UnwrappedLineConsumer &Callback,
+ llvm::SpecificBumpPtrAllocator<FormatToken> &Allocator,
+ IdentifierTable &IdentTable)
: Line(new UnwrappedLine), MustBreakBeforeNextToken(false),
CurrentLines(&Lines), Style(Style), Keywords(Keywords),
CommentPragmasRegex(Style.CommentPragmas), Tokens(nullptr),
@@ -234,7 +159,8 @@ UnwrappedLineParser::UnwrappedLineParser(const FormatStyle &Style,
IncludeGuard(Style.IndentPPDirectives == FormatStyle::PPDIS_None
? IG_Rejected
: IG_Inited),
- IncludeGuardToken(nullptr), FirstStartColumn(FirstStartColumn) {}
+ IncludeGuardToken(nullptr), FirstStartColumn(FirstStartColumn),
+ Macros(Style.Macros, SourceMgr, Style, Allocator, IdentTable) {}
void UnwrappedLineParser::reset() {
PPBranchLevel = -1;
@@ -246,11 +172,23 @@ void UnwrappedLineParser::reset() {
CommentsBeforeNextToken.clear();
FormatTok = nullptr;
MustBreakBeforeNextToken = false;
+ IsDecltypeAutoFunction = false;
PreprocessorDirectives.clear();
CurrentLines = &Lines;
DeclarationScopeStack.clear();
+ NestedTooDeep.clear();
+ NestedLambdas.clear();
PPStack.clear();
Line->FirstStartColumn = FirstStartColumn;
+
+ if (!Unexpanded.empty())
+ for (FormatToken *Token : AllTokens)
+ Token->MacroCtx.reset();
+ CurrentExpandedLines.clear();
+ ExpandedLines.clear();
+ Unexpanded.clear();
+ InExpansion = false;
+ Reconstruct.reset();
}
void UnwrappedLineParser::parse() {
@@ -267,19 +205,42 @@ void UnwrappedLineParser::parse() {
// If we found an include guard then all preprocessor directives (other than
// the guard) are over-indented by one.
- if (IncludeGuard == IG_Found)
+ if (IncludeGuard == IG_Found) {
for (auto &Line : Lines)
if (Line.InPPDirective && Line.Level > 0)
--Line.Level;
+ }
// Create line with eof token.
+ assert(eof());
pushToken(FormatTok);
addUnwrappedLine();
- for (SmallVectorImpl<UnwrappedLine>::iterator I = Lines.begin(),
- E = Lines.end();
- I != E; ++I) {
- Callback.consumeUnwrappedLine(*I);
+ // In a first run, format everything with the lines containing macro calls
+ // replaced by the expansion.
+ if (!ExpandedLines.empty()) {
+ LLVM_DEBUG(llvm::dbgs() << "Expanded lines:\n");
+ for (const auto &Line : Lines) {
+ if (!Line.Tokens.empty()) {
+ auto it = ExpandedLines.find(Line.Tokens.begin()->Tok);
+ if (it != ExpandedLines.end()) {
+ for (const auto &Expanded : it->second) {
+ LLVM_DEBUG(printDebugInfo(Expanded));
+ Callback.consumeUnwrappedLine(Expanded);
+ }
+ continue;
+ }
+ }
+ LLVM_DEBUG(printDebugInfo(Line));
+ Callback.consumeUnwrappedLine(Line);
+ }
+ Callback.finishRun();
+ }
+
+ LLVM_DEBUG(llvm::dbgs() << "Unwrapped lines:\n");
+ for (const UnwrappedLine &Line : Lines) {
+ LLVM_DEBUG(printDebugInfo(Line));
+ Callback.consumeUnwrappedLine(Line);
}
Callback.finishRun();
Lines.clear();
@@ -299,14 +260,13 @@ void UnwrappedLineParser::parse() {
void UnwrappedLineParser::parseFile() {
// The top-level context in a file always has declarations, except for pre-
// processor directives and JavaScript files.
- bool MustBeDeclaration =
- !Line->InPPDirective && Style.Language != FormatStyle::LK_JavaScript;
+ bool MustBeDeclaration = !Line->InPPDirective && !Style.isJavaScript();
ScopedDeclarationState DeclarationState(*Line, DeclarationScopeStack,
MustBeDeclaration);
if (Style.Language == FormatStyle::LK_TextProto)
parseBracedList();
else
- parseLevel(/*HasOpeningBrace=*/false);
+ parseLevel();
// Make sure to format the remaining tokens.
//
// LK_TextProto is special since its top-level is parsed as the body of a
@@ -318,8 +278,9 @@ void UnwrappedLineParser::parseFile() {
// do not have a chance to be put on a line of their own until this point.
// Here we add this newline before end-of-file comments.
if (Style.Language == FormatStyle::LK_TextProto &&
- !CommentsBeforeNextToken.empty())
+ !CommentsBeforeNextToken.empty()) {
addUnwrappedLine();
+ }
flushComments(true);
addUnwrappedLine();
}
@@ -365,15 +326,53 @@ void UnwrappedLineParser::parseCSharpAttribute() {
} while (!eof());
}
-void UnwrappedLineParser::parseLevel(bool HasOpeningBrace) {
+bool UnwrappedLineParser::precededByCommentOrPPDirective() const {
+ if (!Lines.empty() && Lines.back().InPPDirective)
+ return true;
+
+ const FormatToken *Previous = Tokens->getPreviousToken();
+ return Previous && Previous->is(tok::comment) &&
+ (Previous->IsMultiline || Previous->NewlinesBefore > 0);
+}
+
+/// \brief Parses a level, that is ???.
+/// \param OpeningBrace Opening brace (\p nullptr if absent) of that level.
+/// \param IfKind The \p if statement kind in the level.
+/// \param IfLeftBrace The left brace of the \p if block in the level.
+/// \returns true if a simple block of if/else/for/while, or false otherwise.
+/// (A simple block has a single statement.)
+bool UnwrappedLineParser::parseLevel(const FormatToken *OpeningBrace,
+ IfStmtKind *IfKind,
+ FormatToken **IfLeftBrace) {
+ const bool InRequiresExpression =
+ OpeningBrace && OpeningBrace->is(TT_RequiresExpressionLBrace);
+ const bool IsPrecededByCommentOrPPDirective =
+ !Style.RemoveBracesLLVM || precededByCommentOrPPDirective();
+ FormatToken *IfLBrace = nullptr;
+ bool HasDoWhile = false;
+ bool HasLabel = false;
+ unsigned StatementCount = 0;
bool SwitchLabelEncountered = false;
+
do {
+ if (FormatTok->isAttribute()) {
+ nextToken();
+ continue;
+ }
tok::TokenKind kind = FormatTok->Tok.getKind();
- if (FormatTok->getType() == TT_MacroBlockBegin) {
+ if (FormatTok->getType() == TT_MacroBlockBegin)
kind = tok::l_brace;
- } else if (FormatTok->getType() == TT_MacroBlockEnd) {
+ else if (FormatTok->getType() == TT_MacroBlockEnd)
kind = tok::r_brace;
- }
+
+ auto ParseDefault = [this, OpeningBrace, IfKind, &IfLBrace, &HasDoWhile,
+ &HasLabel, &StatementCount] {
+ parseStructuralElement(OpeningBrace, IfKind, &IfLBrace,
+ HasDoWhile ? nullptr : &HasDoWhile,
+ HasLabel ? nullptr : &HasLabel);
+ ++StatementCount;
+ assert(StatementCount > 0 && "StatementCount overflow!");
+ };
switch (kind) {
case tok::comment:
@@ -381,16 +380,42 @@ void UnwrappedLineParser::parseLevel(bool HasOpeningBrace) {
addUnwrappedLine();
break;
case tok::l_brace:
- // FIXME: Add parameter whether this can happen - if this happens, we must
- // be in a non-declaration context.
- if (!FormatTok->is(TT_MacroBlockBegin) && tryToParseBracedList())
+ if (InRequiresExpression) {
+ FormatTok->setFinalizedType(TT_RequiresExpressionLBrace);
+ } else if (FormatTok->Previous &&
+ FormatTok->Previous->ClosesRequiresClause) {
+ // We need the 'default' case here to correctly parse a function
+ // l_brace.
+ ParseDefault();
+ continue;
+ }
+ if (!InRequiresExpression && FormatTok->isNot(TT_MacroBlockBegin) &&
+ tryToParseBracedList()) {
continue;
- parseBlock(/*MustBeDeclaration=*/false);
+ }
+ parseBlock();
+ ++StatementCount;
+ assert(StatementCount > 0 && "StatementCount overflow!");
addUnwrappedLine();
break;
case tok::r_brace:
- if (HasOpeningBrace)
- return;
+ if (OpeningBrace) {
+ if (!Style.RemoveBracesLLVM || Line->InPPDirective ||
+ !OpeningBrace->isOneOf(TT_ControlStatementLBrace, TT_ElseLBrace)) {
+ return false;
+ }
+ if (FormatTok->isNot(tok::r_brace) || StatementCount != 1 || HasLabel ||
+ HasDoWhile || IsPrecededByCommentOrPPDirective ||
+ precededByCommentOrPPDirective()) {
+ return false;
+ }
+ const FormatToken *Next = Tokens->peekNextToken();
+ if (Next->is(tok::comment) && Next->NewlinesBefore == 0)
+ return false;
+ if (IfLeftBrace)
+ *IfLeftBrace = IfLBrace;
+ return true;
+ }
nextToken();
addUnwrappedLine();
break;
@@ -399,27 +424,33 @@ void UnwrappedLineParser::parseLevel(bool HasOpeningBrace) {
FormatToken *Next;
do {
Next = Tokens->getNextToken();
- } while (Next && Next->is(tok::comment));
+ assert(Next);
+ } while (Next->is(tok::comment));
FormatTok = Tokens->setPosition(StoredPosition);
- if (Next && Next->isNot(tok::colon)) {
+ if (Next->isNot(tok::colon)) {
// default not followed by ':' is not a case label; treat it like
// an identifier.
parseStructuralElement();
break;
}
// Else, if it is 'default:', fall through to the case handling.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case tok::kw_case:
- if (Style.Language == FormatStyle::LK_JavaScript &&
- Line->MustBeDeclaration) {
- // A 'case: string' style field declaration.
- parseStructuralElement();
+ if (Style.Language == FormatStyle::LK_Proto || Style.isVerilog() ||
+ (Style.isJavaScript() && Line->MustBeDeclaration)) {
+ // Proto: there are no switch/case statements
+ // Verilog: Case labels don't have this word. We handle case
+ // labels including default in TokenAnnotator.
+ // JavaScript: A 'case: string' style field declaration.
+ ParseDefault();
break;
}
if (!SwitchLabelEncountered &&
- (Style.IndentCaseLabels || (Line->InPPDirective && Line->Level == 1)))
+ (Style.IndentCaseLabels ||
+ (Line->InPPDirective && Line->Level == 1))) {
++Line->Level;
+ }
SwitchLabelEncountered = true;
parseStructuralElement();
break;
@@ -429,12 +460,16 @@ void UnwrappedLineParser::parseLevel(bool HasOpeningBrace) {
parseCSharpAttribute();
break;
}
- LLVM_FALLTHROUGH;
+ if (handleCppAttributes())
+ break;
+ [[fallthrough]];
default:
- parseStructuralElement(/*IsTopLevel=*/true);
+ ParseDefault();
break;
}
} while (!eof());
+
+ return false;
}
void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) {
@@ -448,21 +483,30 @@ void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) {
// Keep a stack of positions of lbrace tokens. We will
// update information about whether an lbrace starts a
// braced init list or a different block during the loop.
- SmallVector<FormatToken *, 8> LBraceStack;
- assert(Tok->Tok.is(tok::l_brace));
+ struct StackEntry {
+ FormatToken *Tok;
+ const FormatToken *PrevTok;
+ };
+ SmallVector<StackEntry, 8> LBraceStack;
+ assert(Tok->is(tok::l_brace));
do {
- // Get next non-comment token.
+ // Get next non-comment, non-preprocessor token.
FormatToken *NextTok;
- unsigned ReadTokens = 0;
do {
NextTok = Tokens->getNextToken();
- ++ReadTokens;
} while (NextTok->is(tok::comment));
+ while (NextTok->is(tok::hash) && !Line->InMacroBody) {
+ NextTok = Tokens->getNextToken();
+ do {
+ NextTok = Tokens->getNextToken();
+ } while (NextTok->is(tok::comment) ||
+ (NextTok->NewlinesBefore == 0 && NextTok->isNot(tok::eof)));
+ }
switch (Tok->Tok.getKind()) {
case tok::l_brace:
- if (Style.Language == FormatStyle::LK_JavaScript && PrevTok) {
- if (PrevTok->isOneOf(tok::colon, tok::less))
+ if (Style.isJavaScript() && PrevTok) {
+ if (PrevTok->isOneOf(tok::colon, tok::less)) {
// A ':' indicates this code is in a type, or a braced list
// following a label in an object literal ({a: {b: 1}}).
// A '<' could be an object used in a comparison, but that is nonsense
@@ -473,70 +517,106 @@ void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) {
// trigger BK_Block. In both cases, this must be parsed as an inline
// braced init.
Tok->setBlockKind(BK_BracedInit);
- else if (PrevTok->is(tok::r_paren))
+ } else if (PrevTok->is(tok::r_paren)) {
// `) { }` can only occur in function or method declarations in JS.
Tok->setBlockKind(BK_Block);
+ }
} else {
Tok->setBlockKind(BK_Unknown);
}
- LBraceStack.push_back(Tok);
+ LBraceStack.push_back({Tok, PrevTok});
break;
case tok::r_brace:
if (LBraceStack.empty())
break;
- if (LBraceStack.back()->is(BK_Unknown)) {
+ if (LBraceStack.back().Tok->is(BK_Unknown)) {
bool ProbablyBracedList = false;
if (Style.Language == FormatStyle::LK_Proto) {
ProbablyBracedList = NextTok->isOneOf(tok::comma, tok::r_square);
} else {
+ // Skip NextTok over preprocessor lines, otherwise we may not
+ // properly diagnose the block as a braced intializer
+ // if the comma separator appears after the pp directive.
+ while (NextTok->is(tok::hash)) {
+ ScopedMacroState MacroState(*Line, Tokens, NextTok);
+ do {
+ NextTok = Tokens->getNextToken();
+ } while (NextTok->isNot(tok::eof));
+ }
+
// Using OriginalColumn to distinguish between ObjC methods and
// binary operators is a bit hacky.
bool NextIsObjCMethod = NextTok->isOneOf(tok::plus, tok::minus) &&
NextTok->OriginalColumn == 0;
+ // Try to detect a braced list. Note that regardless how we mark inner
+ // braces here, we will overwrite the BlockKind later if we parse a
+ // braced list (where all blocks inside are by default braced lists),
+ // or when we explicitly detect blocks (for example while parsing
+ // lambdas).
+
+ // If we already marked the opening brace as braced list, the closing
+ // must also be part of it.
+ ProbablyBracedList = LBraceStack.back().Tok->is(TT_BracedListLBrace);
+
+ ProbablyBracedList = ProbablyBracedList ||
+ (Style.isJavaScript() &&
+ NextTok->isOneOf(Keywords.kw_of, Keywords.kw_in,
+ Keywords.kw_as));
+ ProbablyBracedList = ProbablyBracedList ||
+ (Style.isCpp() && NextTok->is(tok::l_paren));
+
// If there is a comma, semicolon or right paren after the closing
- // brace, we assume this is a braced initializer list. Note that
- // regardless how we mark inner braces here, we will overwrite the
- // BlockKind later if we parse a braced list (where all blocks
- // inside are by default braced lists), or when we explicitly detect
- // blocks (for example while parsing lambdas).
+ // brace, we assume this is a braced initializer list.
// FIXME: Some of these do not apply to JS, e.g. "} {" can never be a
// braced list in JS.
ProbablyBracedList =
- (Style.Language == FormatStyle::LK_JavaScript &&
- NextTok->isOneOf(Keywords.kw_of, Keywords.kw_in,
- Keywords.kw_as)) ||
- (Style.isCpp() && NextTok->is(tok::l_paren)) ||
+ ProbablyBracedList ||
NextTok->isOneOf(tok::comma, tok::period, tok::colon,
- tok::r_paren, tok::r_square, tok::l_brace,
- tok::ellipsis) ||
+ tok::r_paren, tok::r_square, tok::ellipsis);
+
+ // Distinguish between braced list in a constructor initializer list
+ // followed by constructor body, or just adjacent blocks.
+ ProbablyBracedList =
+ ProbablyBracedList ||
+ (NextTok->is(tok::l_brace) && LBraceStack.back().PrevTok &&
+ LBraceStack.back().PrevTok->isOneOf(tok::identifier,
+ tok::greater));
+
+ ProbablyBracedList =
+ ProbablyBracedList ||
(NextTok->is(tok::identifier) &&
- !PrevTok->isOneOf(tok::semi, tok::r_brace, tok::l_brace)) ||
- (NextTok->is(tok::semi) &&
- (!ExpectClassBody || LBraceStack.size() != 1)) ||
+ !PrevTok->isOneOf(tok::semi, tok::r_brace, tok::l_brace));
+
+ ProbablyBracedList = ProbablyBracedList ||
+ (NextTok->is(tok::semi) &&
+ (!ExpectClassBody || LBraceStack.size() != 1));
+
+ ProbablyBracedList =
+ ProbablyBracedList ||
(NextTok->isBinaryOperator() && !NextIsObjCMethod);
+
if (!Style.isCSharp() && NextTok->is(tok::l_square)) {
// We can have an array subscript after a braced init
// list, but C++11 attributes are expected after blocks.
NextTok = Tokens->getNextToken();
- ++ReadTokens;
ProbablyBracedList = NextTok->isNot(tok::l_square);
}
}
if (ProbablyBracedList) {
Tok->setBlockKind(BK_BracedInit);
- LBraceStack.back()->setBlockKind(BK_BracedInit);
+ LBraceStack.back().Tok->setBlockKind(BK_BracedInit);
} else {
Tok->setBlockKind(BK_Block);
- LBraceStack.back()->setBlockKind(BK_Block);
+ LBraceStack.back().Tok->setBlockKind(BK_Block);
}
}
LBraceStack.pop_back();
break;
case tok::identifier:
- if (!Tok->is(TT_StatementMacro))
+ if (Tok->isNot(TT_StatementMacro))
break;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case tok::at:
case tok::semi:
case tok::kw_if:
@@ -545,25 +625,32 @@ void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) {
case tok::kw_switch:
case tok::kw_try:
case tok::kw___try:
- if (!LBraceStack.empty() && LBraceStack.back()->is(BK_Unknown))
- LBraceStack.back()->setBlockKind(BK_Block);
+ if (!LBraceStack.empty() && LBraceStack.back().Tok->is(BK_Unknown))
+ LBraceStack.back().Tok->setBlockKind(BK_Block);
break;
default:
break;
}
PrevTok = Tok;
Tok = NextTok;
- } while (Tok->Tok.isNot(tok::eof) && !LBraceStack.empty());
+ } while (Tok->isNot(tok::eof) && !LBraceStack.empty());
// Assume other blocks for all unclosed opening braces.
- for (unsigned i = 0, e = LBraceStack.size(); i != e; ++i) {
- if (LBraceStack[i]->is(BK_Unknown))
- LBraceStack[i]->setBlockKind(BK_Block);
- }
+ for (const auto &Entry : LBraceStack)
+ if (Entry.Tok->is(BK_Unknown))
+ Entry.Tok->setBlockKind(BK_Block);
FormatTok = Tokens->setPosition(StoredPosition);
}
+// Sets the token type of the directly previous right brace.
+void UnwrappedLineParser::setPreviousRBraceType(TokenType Type) {
+ if (auto Prev = FormatTok->getPreviousNonComment();
+ Prev && Prev->is(tok::r_brace)) {
+ Prev->setFinalizedType(Type);
+ }
+}
+
template <class T>
static inline void hash_combine(std::size_t &seed, const T &v) {
std::hash<T> hasher;
@@ -579,29 +666,123 @@ size_t UnwrappedLineParser::computePPHash() const {
return h;
}
-void UnwrappedLineParser::parseBlock(bool MustBeDeclaration, unsigned AddLevels,
- bool MunchSemi,
- bool UnindentWhitesmithsBraces) {
- assert(FormatTok->isOneOf(tok::l_brace, TT_MacroBlockBegin) &&
+// Checks whether \p ParsedLine might fit on a single line. If \p OpeningBrace
+// is not null, subtracts its length (plus the preceding space) when computing
+// the length of \p ParsedLine. We must clone the tokens of \p ParsedLine before
+// running the token annotator on it so that we can restore them afterward.
+bool UnwrappedLineParser::mightFitOnOneLine(
+ UnwrappedLine &ParsedLine, const FormatToken *OpeningBrace) const {
+ const auto ColumnLimit = Style.ColumnLimit;
+ if (ColumnLimit == 0)
+ return true;
+
+ auto &Tokens = ParsedLine.Tokens;
+ assert(!Tokens.empty());
+
+ const auto *LastToken = Tokens.back().Tok;
+ assert(LastToken);
+
+ SmallVector<UnwrappedLineNode> SavedTokens(Tokens.size());
+
+ int Index = 0;
+ for (const auto &Token : Tokens) {
+ assert(Token.Tok);
+ auto &SavedToken = SavedTokens[Index++];
+ SavedToken.Tok = new FormatToken;
+ SavedToken.Tok->copyFrom(*Token.Tok);
+ SavedToken.Children = std::move(Token.Children);
+ }
+
+ AnnotatedLine Line(ParsedLine);
+ assert(Line.Last == LastToken);
+
+ TokenAnnotator Annotator(Style, Keywords);
+ Annotator.annotate(Line);
+ Annotator.calculateFormattingInformation(Line);
+
+ auto Length = LastToken->TotalLength;
+ if (OpeningBrace) {
+ assert(OpeningBrace != Tokens.front().Tok);
+ if (auto Prev = OpeningBrace->Previous;
+ Prev && Prev->TotalLength + ColumnLimit == OpeningBrace->TotalLength) {
+ Length -= ColumnLimit;
+ }
+ Length -= OpeningBrace->TokenText.size() + 1;
+ }
+
+ if (const auto *FirstToken = Line.First; FirstToken->is(tok::r_brace)) {
+ assert(!OpeningBrace || OpeningBrace->is(TT_ControlStatementLBrace));
+ Length -= FirstToken->TokenText.size() + 1;
+ }
+
+ Index = 0;
+ for (auto &Token : Tokens) {
+ const auto &SavedToken = SavedTokens[Index++];
+ Token.Tok->copyFrom(*SavedToken.Tok);
+ Token.Children = std::move(SavedToken.Children);
+ delete SavedToken.Tok;
+ }
+
+ // If these change PPLevel needs to be used for get correct indentation.
+ assert(!Line.InMacroBody);
+ assert(!Line.InPPDirective);
+ return Line.Level * Style.IndentWidth + Length <= ColumnLimit;
+}
+
+FormatToken *UnwrappedLineParser::parseBlock(bool MustBeDeclaration,
+ unsigned AddLevels, bool MunchSemi,
+ bool KeepBraces,
+ IfStmtKind *IfKind,
+ bool UnindentWhitesmithsBraces) {
+ auto HandleVerilogBlockLabel = [this]() {
+ // ":" name
+ if (Style.isVerilog() && FormatTok->is(tok::colon)) {
+ nextToken();
+ if (Keywords.isVerilogIdentifier(*FormatTok))
+ nextToken();
+ }
+ };
+
+ // Whether this is a Verilog-specific block that has a special header like a
+ // module.
+ const bool VerilogHierarchy =
+ Style.isVerilog() && Keywords.isVerilogHierarchy(*FormatTok);
+ assert((FormatTok->isOneOf(tok::l_brace, TT_MacroBlockBegin) ||
+ (Style.isVerilog() &&
+ (Keywords.isVerilogBegin(*FormatTok) || VerilogHierarchy))) &&
"'{' or macro block token expected");
+ FormatToken *Tok = FormatTok;
+ const bool FollowedByComment = Tokens->peekNextToken()->is(tok::comment);
+ auto Index = CurrentLines->size();
const bool MacroBlock = FormatTok->is(TT_MacroBlockBegin);
FormatTok->setBlockKind(BK_Block);
// For Whitesmiths mode, jump to the next level prior to skipping over the
// braces.
- if (AddLevels > 0 && Style.BreakBeforeBraces == FormatStyle::BS_Whitesmiths)
+ if (!VerilogHierarchy && AddLevels > 0 &&
+ Style.BreakBeforeBraces == FormatStyle::BS_Whitesmiths) {
++Line->Level;
+ }
size_t PPStartHash = computePPHash();
- unsigned InitialLevel = Line->Level;
- nextToken(/*LevelDifference=*/AddLevels);
+ const unsigned InitialLevel = Line->Level;
+ if (VerilogHierarchy) {
+ AddLevels += parseVerilogHierarchyHeader();
+ } else {
+ nextToken(/*LevelDifference=*/AddLevels);
+ HandleVerilogBlockLabel();
+ }
+
+ // Bail out if there are too many levels. Otherwise, the stack might overflow.
+ if (Line->Level > 300)
+ return nullptr;
if (MacroBlock && FormatTok->is(tok::l_paren))
parseParens();
size_t NbPreprocessorDirectives =
- CurrentLines == &Lines ? PreprocessorDirectives.size() : 0;
+ !parsingPPDirective() ? PreprocessorDirectives.size() : 0;
addUnwrappedLine();
size_t OpeningLineIndex =
CurrentLines->empty()
@@ -618,16 +799,59 @@ void UnwrappedLineParser::parseBlock(bool MustBeDeclaration, unsigned AddLevels,
MustBeDeclaration);
if (AddLevels > 0u && Style.BreakBeforeBraces != FormatStyle::BS_Whitesmiths)
Line->Level += AddLevels;
- parseLevel(/*HasOpeningBrace=*/true);
+
+ FormatToken *IfLBrace = nullptr;
+ const bool SimpleBlock = parseLevel(Tok, IfKind, &IfLBrace);
if (eof())
- return;
+ return IfLBrace;
- if (MacroBlock ? !FormatTok->is(TT_MacroBlockEnd)
- : !FormatTok->is(tok::r_brace)) {
+ if (MacroBlock ? FormatTok->isNot(TT_MacroBlockEnd)
+ : FormatTok->isNot(tok::r_brace)) {
Line->Level = InitialLevel;
FormatTok->setBlockKind(BK_Block);
- return;
+ return IfLBrace;
+ }
+
+ if (FormatTok->is(tok::r_brace) && Tok->is(TT_NamespaceLBrace))
+ FormatTok->setFinalizedType(TT_NamespaceRBrace);
+
+ const bool IsFunctionRBrace =
+ FormatTok->is(tok::r_brace) && Tok->is(TT_FunctionLBrace);
+
+ auto RemoveBraces = [=]() mutable {
+ if (!SimpleBlock)
+ return false;
+ assert(Tok->isOneOf(TT_ControlStatementLBrace, TT_ElseLBrace));
+ assert(FormatTok->is(tok::r_brace));
+ const bool WrappedOpeningBrace = !Tok->Previous;
+ if (WrappedOpeningBrace && FollowedByComment)
+ return false;
+ const bool HasRequiredIfBraces = IfLBrace && !IfLBrace->Optional;
+ if (KeepBraces && !HasRequiredIfBraces)
+ return false;
+ if (Tok->isNot(TT_ElseLBrace) || !HasRequiredIfBraces) {
+ const FormatToken *Previous = Tokens->getPreviousToken();
+ assert(Previous);
+ if (Previous->is(tok::r_brace) && !Previous->Optional)
+ return false;
+ }
+ assert(!CurrentLines->empty());
+ auto &LastLine = CurrentLines->back();
+ if (LastLine.Level == InitialLevel + 1 && !mightFitOnOneLine(LastLine))
+ return false;
+ if (Tok->is(TT_ElseLBrace))
+ return true;
+ if (WrappedOpeningBrace) {
+ assert(Index > 0);
+ --Index; // The line above the wrapped l_brace.
+ Tok = nullptr;
+ }
+ return mightFitOnOneLine((*CurrentLines)[Index], Tok);
+ };
+ if (RemoveBraces()) {
+ Tok->MatchingParen = FormatTok;
+ FormatTok->MatchingParen = Tok;
}
size_t PPEndHash = computePPHash();
@@ -635,21 +859,38 @@ void UnwrappedLineParser::parseBlock(bool MustBeDeclaration, unsigned AddLevels,
// Munch the closing brace.
nextToken(/*LevelDifference=*/-AddLevels);
+ // When this is a function block and there is an unnecessary semicolon
+ // afterwards then mark it as optional (so the RemoveSemi pass can get rid of
+ // it later).
+ if (Style.RemoveSemicolon && IsFunctionRBrace) {
+ while (FormatTok->is(tok::semi)) {
+ FormatTok->Optional = true;
+ nextToken();
+ }
+ }
+
+ HandleVerilogBlockLabel();
+
if (MacroBlock && FormatTok->is(tok::l_paren))
parseParens();
+ Line->Level = InitialLevel;
+
+ if (FormatTok->is(tok::kw_noexcept)) {
+ // A noexcept in a requires expression.
+ nextToken();
+ }
+
if (FormatTok->is(tok::arrow)) {
- // Following the } we can find a trailing return type arrow
+ // Following the } or noexcept we can find a trailing return type arrow
// as part of an implicit conversion constraint.
nextToken();
parseStructuralElement();
}
- if (MunchSemi && FormatTok->Tok.is(tok::semi))
+ if (MunchSemi && FormatTok->is(tok::semi))
nextToken();
- Line->Level = InitialLevel;
-
if (PPStartHash == PPEndHash) {
Line->MatchingOpeningBlockLineIndex = OpeningLineIndex;
if (OpeningLineIndex != UnwrappedLine::kInvalidIndex) {
@@ -658,6 +899,8 @@ void UnwrappedLineParser::parseBlock(bool MustBeDeclaration, unsigned AddLevels,
CurrentLines->size() - 1;
}
}
+
+ return IfLBrace;
}
static bool isGoogScope(const UnwrappedLine &Line) {
@@ -698,28 +941,39 @@ static bool isIIFE(const UnwrappedLine &Line,
static bool ShouldBreakBeforeBrace(const FormatStyle &Style,
const FormatToken &InitialToken) {
- if (InitialToken.isOneOf(tok::kw_namespace, TT_NamespaceMacro))
+ tok::TokenKind Kind = InitialToken.Tok.getKind();
+ if (InitialToken.is(TT_NamespaceMacro))
+ Kind = tok::kw_namespace;
+
+ switch (Kind) {
+ case tok::kw_namespace:
return Style.BraceWrapping.AfterNamespace;
- if (InitialToken.is(tok::kw_class))
+ case tok::kw_class:
return Style.BraceWrapping.AfterClass;
- if (InitialToken.is(tok::kw_union))
+ case tok::kw_union:
return Style.BraceWrapping.AfterUnion;
- if (InitialToken.is(tok::kw_struct))
+ case tok::kw_struct:
return Style.BraceWrapping.AfterStruct;
- return false;
+ case tok::kw_enum:
+ return Style.BraceWrapping.AfterEnum;
+ default:
+ return false;
+ }
}
void UnwrappedLineParser::parseChildBlock() {
+ assert(FormatTok->is(tok::l_brace));
FormatTok->setBlockKind(BK_Block);
+ const FormatToken *OpeningBrace = FormatTok;
nextToken();
{
- bool SkipIndent = (Style.Language == FormatStyle::LK_JavaScript &&
+ bool SkipIndent = (Style.isJavaScript() &&
(isGoogScope(*Line) || isIIFE(*Line, Keywords)));
ScopedLineState LineState(*this);
ScopedDeclarationState DeclarationState(*Line, DeclarationScopeStack,
/*MustBeDeclaration=*/false);
Line->Level += SkipIndent ? 0 : 1;
- parseLevel(/*HasOpeningBrace=*/true);
+ parseLevel(OpeningBrace);
flushComments(isOnNewLine(*FormatTok));
Line->Level -= SkipIndent ? 0 : 1;
}
@@ -727,7 +981,7 @@ void UnwrappedLineParser::parseChildBlock() {
}
void UnwrappedLineParser::parsePPDirective() {
- assert(FormatTok->Tok.is(tok::hash) && "'#' expected");
+ assert(FormatTok->is(tok::hash) && "'#' expected");
ScopedMacroState MacroState(*Line, Tokens, FormatTok);
nextToken();
@@ -749,16 +1003,17 @@ void UnwrappedLineParser::parsePPDirective() {
parsePPIf(/*IfDef=*/true);
break;
case tok::pp_else:
- parsePPElse();
- break;
case tok::pp_elifdef:
case tok::pp_elifndef:
case tok::pp_elif:
- parsePPElIf();
+ parsePPElse();
break;
case tok::pp_endif:
parsePPEndIf();
break;
+ case tok::pp_pragma:
+ parsePPPragma();
+ break;
default:
parsePPUnknown();
break;
@@ -771,10 +1026,11 @@ void UnwrappedLineParser::conditionalCompilationCondition(bool Unreachable) {
Line += Lines.size();
if (Unreachable ||
- (!PPStack.empty() && PPStack.back().Kind == PP_Unreachable))
+ (!PPStack.empty() && PPStack.back().Kind == PP_Unreachable)) {
PPStack.push_back({PP_Unreachable, Line});
- else
+ } else {
PPStack.push_back({PP_Conditional, Line});
+ }
}
void UnwrappedLineParser::conditionalCompilationStart(bool Unreachable) {
@@ -784,7 +1040,7 @@ void UnwrappedLineParser::conditionalCompilationStart(bool Unreachable) {
PPLevelBranchIndex.push_back(0);
PPLevelBranchCount.push_back(0);
}
- PPChainBranchIndex.push(0);
+ PPChainBranchIndex.push(Unreachable ? -1 : 0);
bool Skip = PPLevelBranchIndex[PPBranchLevel] > 0;
conditionalCompilationCondition(Unreachable || Skip);
}
@@ -803,9 +1059,8 @@ void UnwrappedLineParser::conditionalCompilationAlternative() {
void UnwrappedLineParser::conditionalCompilationEnd() {
assert(PPBranchLevel < (int)PPLevelBranchIndex.size());
if (PPBranchLevel >= 0 && !PPChainBranchIndex.empty()) {
- if (PPChainBranchIndex.top() + 1 > PPLevelBranchCount[PPBranchLevel]) {
+ if (PPChainBranchIndex.top() + 1 > PPLevelBranchCount[PPBranchLevel])
PPLevelBranchCount[PPBranchLevel] = PPChainBranchIndex.top() + 1;
- }
}
// Guard against #endif's without #if.
if (PPBranchLevel > -1)
@@ -829,14 +1084,15 @@ void UnwrappedLineParser::parsePPIf(bool IfDef) {
// If there's a #ifndef on the first line, and the only lines before it are
// comments, it could be an include guard.
bool MaybeIncludeGuard = IfNDef;
- if (IncludeGuard == IG_Inited && MaybeIncludeGuard)
+ if (IncludeGuard == IG_Inited && MaybeIncludeGuard) {
for (auto &Line : Lines) {
- if (!Line.Tokens.front().Tok->is(tok::comment)) {
+ if (Line.Tokens.front().Tok->isNot(tok::comment)) {
MaybeIncludeGuard = false;
IncludeGuard = IG_Rejected;
break;
}
}
+ }
--PPBranchLevel;
parsePPUnknown();
++PPBranchLevel;
@@ -850,26 +1106,25 @@ void UnwrappedLineParser::parsePPElse() {
// If a potential include guard has an #else, it's not an include guard.
if (IncludeGuard == IG_Defined && PPBranchLevel == 0)
IncludeGuard = IG_Rejected;
+ // Don't crash when there is an #else without an #if.
+ assert(PPBranchLevel >= -1);
+ if (PPBranchLevel == -1)
+ conditionalCompilationStart(/*Unreachable=*/true);
conditionalCompilationAlternative();
- if (PPBranchLevel > -1)
- --PPBranchLevel;
+ --PPBranchLevel;
parsePPUnknown();
++PPBranchLevel;
}
-void UnwrappedLineParser::parsePPElIf() { parsePPElse(); }
-
void UnwrappedLineParser::parsePPEndIf() {
conditionalCompilationEnd();
parsePPUnknown();
// If the #endif of a potential include guard is the last thing in the file,
// then we found an include guard.
- unsigned TokenPosition = Tokens->getPosition();
- FormatToken *PeekNext = AllTokens[TokenPosition];
- if (IncludeGuard == IG_Defined && PPBranchLevel == -1 &&
- PeekNext->is(tok::eof) &&
- Style.IndentPPDirectives != FormatStyle::PPDIS_None)
+ if (IncludeGuard == IG_Defined && PPBranchLevel == -1 && Tokens->isEOF() &&
+ Style.IndentPPDirectives != FormatStyle::PPDIS_None) {
IncludeGuard = IG_Found;
+ }
}
void UnwrappedLineParser::parsePPDefine() {
@@ -894,10 +1149,16 @@ void UnwrappedLineParser::parsePPDefine() {
}
}
+ // In the context of a define, even keywords should be treated as normal
+ // identifiers. Setting the kind to identifier is not enough, because we need
+ // to treat additional keywords like __except as well, which are already
+ // identifiers. Setting the identifier info to null interferes with include
+ // guard processing above, and changes preprocessing nesting.
+ FormatTok->Tok.setKind(tok::identifier);
+ FormatTok->Tok.setIdentifierInfo(Keywords.kw_internal_ident_after_define);
nextToken();
if (FormatTok->Tok.getKind() == tok::l_paren &&
- FormatTok->WhitespaceRange.getBegin() ==
- FormatTok->WhitespaceRange.getEnd()) {
+ !FormatTok->hasWhitespaceBefore()) {
parseParens();
}
if (Style.IndentPPDirectives != FormatStyle::PPDIS_None)
@@ -905,6 +1166,25 @@ void UnwrappedLineParser::parsePPDefine() {
addUnwrappedLine();
++Line->Level;
+ Line->PPLevel = PPBranchLevel + (IncludeGuard == IG_Defined ? 0 : 1);
+ assert((int)Line->PPLevel >= 0);
+ Line->InMacroBody = true;
+
+ if (Style.SkipMacroDefinitionBody) {
+ do {
+ FormatTok->Finalized = true;
+ nextToken();
+ } while (!eof());
+ addUnwrappedLine();
+ return;
+ }
+
+ if (FormatTok->is(tok::identifier) &&
+ Tokens->peekNextToken()->is(tok::colon)) {
+ nextToken();
+ nextToken();
+ }
+
// Errors during a preprocessor directive can only affect the layout of the
// preprocessor directive, and thus we ignore them. An alternative approach
// would be to use the same approach we use on the file level (no
@@ -913,6 +1193,11 @@ void UnwrappedLineParser::parsePPDefine() {
parseFile();
}
+void UnwrappedLineParser::parsePPPragma() {
+ Line->InPragmaDirective = true;
+ parsePPUnknown();
+}
+
void UnwrappedLineParser::parsePPUnknown() {
do {
nextToken();
@@ -928,42 +1213,38 @@ void UnwrappedLineParser::parsePPUnknown() {
static bool tokenCanStartNewLine(const FormatToken &Tok) {
// Semicolon can be a null-statement, l_square can be a start of a macro or
// a C++11 attribute, but this doesn't seem to be common.
- return Tok.isNot(tok::semi) && Tok.isNot(tok::l_brace) &&
- Tok.isNot(TT_AttributeSquare) &&
- // Tokens that can only be used as binary operators and a part of
- // overloaded operator names.
- Tok.isNot(tok::period) && Tok.isNot(tok::periodstar) &&
- Tok.isNot(tok::arrow) && Tok.isNot(tok::arrowstar) &&
- Tok.isNot(tok::less) && Tok.isNot(tok::greater) &&
- Tok.isNot(tok::slash) && Tok.isNot(tok::percent) &&
- Tok.isNot(tok::lessless) && Tok.isNot(tok::greatergreater) &&
- Tok.isNot(tok::equal) && Tok.isNot(tok::plusequal) &&
- Tok.isNot(tok::minusequal) && Tok.isNot(tok::starequal) &&
- Tok.isNot(tok::slashequal) && Tok.isNot(tok::percentequal) &&
- Tok.isNot(tok::ampequal) && Tok.isNot(tok::pipeequal) &&
- Tok.isNot(tok::caretequal) && Tok.isNot(tok::greatergreaterequal) &&
- Tok.isNot(tok::lesslessequal) &&
- // Colon is used in labels, base class lists, initializer lists,
- // range-based for loops, ternary operator, but should never be the
- // first token in an unwrapped line.
- Tok.isNot(tok::colon) &&
- // 'noexcept' is a trailing annotation.
- Tok.isNot(tok::kw_noexcept);
+ assert(Tok.isNot(TT_AttributeSquare));
+ return !Tok.isOneOf(tok::semi, tok::l_brace,
+ // Tokens that can only be used as binary operators and a
+ // part of overloaded operator names.
+ tok::period, tok::periodstar, tok::arrow, tok::arrowstar,
+ tok::less, tok::greater, tok::slash, tok::percent,
+ tok::lessless, tok::greatergreater, tok::equal,
+ tok::plusequal, tok::minusequal, tok::starequal,
+ tok::slashequal, tok::percentequal, tok::ampequal,
+ tok::pipeequal, tok::caretequal, tok::greatergreaterequal,
+ tok::lesslessequal,
+ // Colon is used in labels, base class lists, initializer
+ // lists, range-based for loops, ternary operator, but
+ // should never be the first token in an unwrapped line.
+ tok::colon,
+ // 'noexcept' is a trailing annotation.
+ tok::kw_noexcept);
}
static bool mustBeJSIdent(const AdditionalKeywords &Keywords,
const FormatToken *FormatTok) {
// FIXME: This returns true for C/C++ keywords like 'struct'.
return FormatTok->is(tok::identifier) &&
- (FormatTok->Tok.getIdentifierInfo() == nullptr ||
+ (!FormatTok->Tok.getIdentifierInfo() ||
!FormatTok->isOneOf(
Keywords.kw_in, Keywords.kw_of, Keywords.kw_as, Keywords.kw_async,
Keywords.kw_await, Keywords.kw_yield, Keywords.kw_finally,
Keywords.kw_function, Keywords.kw_import, Keywords.kw_is,
Keywords.kw_let, Keywords.kw_var, tok::kw_const,
Keywords.kw_abstract, Keywords.kw_extends, Keywords.kw_implements,
- Keywords.kw_instanceof, Keywords.kw_interface, Keywords.kw_throws,
- Keywords.kw_from));
+ Keywords.kw_instanceof, Keywords.kw_interface,
+ Keywords.kw_override, Keywords.kw_throws, Keywords.kw_from));
}
static bool mustBeJSIdentOrValue(const AdditionalKeywords &Keywords,
@@ -1022,8 +1303,9 @@ static bool isC78ParameterDecl(const FormatToken *Tok, const FormatToken *Next,
return false;
if (!isC78Type(*Tok) &&
- !Tok->isOneOf(tok::kw_register, tok::kw_struct, tok::kw_union))
+ !Tok->isOneOf(tok::kw_register, tok::kw_struct, tok::kw_union)) {
return false;
+ }
if (Next->isNot(tok::star) && !Next->Tok.getIdentifierInfo())
return false;
@@ -1039,6 +1321,44 @@ static bool isC78ParameterDecl(const FormatToken *Tok, const FormatToken *Next,
return Tok->Previous && Tok->Previous->isOneOf(tok::l_paren, tok::comma);
}
+bool UnwrappedLineParser::parseModuleImport() {
+ assert(FormatTok->is(Keywords.kw_import) && "'import' expected");
+
+ if (auto Token = Tokens->peekNextToken(/*SkipComment=*/true);
+ !Token->Tok.getIdentifierInfo() &&
+ !Token->isOneOf(tok::colon, tok::less, tok::string_literal)) {
+ return false;
+ }
+
+ nextToken();
+ while (!eof()) {
+ if (FormatTok->is(tok::colon)) {
+ FormatTok->setFinalizedType(TT_ModulePartitionColon);
+ }
+ // Handle import <foo/bar.h> as we would an include statement.
+ else if (FormatTok->is(tok::less)) {
+ nextToken();
+ while (!FormatTok->isOneOf(tok::semi, tok::greater, tok::eof)) {
+ // Mark tokens up to the trailing line comments as implicit string
+ // literals.
+ if (FormatTok->isNot(tok::comment) &&
+ !FormatTok->TokenText.starts_with("//")) {
+ FormatTok->setFinalizedType(TT_ImplicitStringLiteral);
+ }
+ nextToken();
+ }
+ }
+ if (FormatTok->is(tok::semi)) {
+ nextToken();
+ break;
+ }
+ nextToken();
+ }
+
+ addUnwrappedLine();
+ return true;
+}
+
// readTokenWithJavaScriptASI reads the next token and terminates the current
// line if JavaScript Automatic Semicolon Insertion must
// happen between the current token and the next token.
@@ -1060,14 +1380,13 @@ void UnwrappedLineParser::readTokenWithJavaScriptASI() {
bool PreviousMustBeValue = mustBeJSIdentOrValue(Keywords, Previous);
bool PreviousStartsTemplateExpr =
- Previous->is(TT_TemplateString) && Previous->TokenText.endswith("${");
+ Previous->is(TT_TemplateString) && Previous->TokenText.ends_with("${");
if (PreviousMustBeValue || Previous->is(tok::r_paren)) {
// If the line contains an '@' sign, the previous token might be an
// annotation, which can precede another identifier/value.
- bool HasAt = std::find_if(Line->Tokens.begin(), Line->Tokens.end(),
- [](UnwrappedLineNode &LineNode) {
- return LineNode.Tok->is(tok::at);
- }) != Line->Tokens.end();
+ bool HasAt = llvm::any_of(Line->Tokens, [](UnwrappedLineNode &LineNode) {
+ return LineNode.Tok->is(tok::at);
+ });
if (HasAt)
return;
}
@@ -1075,19 +1394,22 @@ void UnwrappedLineParser::readTokenWithJavaScriptASI() {
return addUnwrappedLine();
bool NextMustBeValue = mustBeJSIdentOrValue(Keywords, Next);
bool NextEndsTemplateExpr =
- Next->is(TT_TemplateString) && Next->TokenText.startswith("}");
+ Next->is(TT_TemplateString) && Next->TokenText.starts_with("}");
if (NextMustBeValue && !NextEndsTemplateExpr && !PreviousStartsTemplateExpr &&
(PreviousMustBeValue ||
Previous->isOneOf(tok::r_square, tok::r_paren, tok::plusplus,
- tok::minusminus)))
+ tok::minusminus))) {
return addUnwrappedLine();
+ }
if ((PreviousMustBeValue || Previous->is(tok::r_paren)) &&
- isJSDeclOrStmt(Keywords, Next))
+ isJSDeclOrStmt(Keywords, Next)) {
return addUnwrappedLine();
+ }
}
-void UnwrappedLineParser::parseStructuralElement(bool IsTopLevel) {
- assert(!FormatTok->is(tok::l_brace));
+void UnwrappedLineParser::parseStructuralElement(
+ const FormatToken *OpeningBrace, IfStmtKind *IfKind,
+ FormatToken **IfLeftBrace, bool *HasDoWhile, bool *HasLabel) {
if (Style.Language == FormatStyle::LK_TableGen &&
FormatTok->is(tok::pp_include)) {
nextToken();
@@ -1096,15 +1418,49 @@ void UnwrappedLineParser::parseStructuralElement(bool IsTopLevel) {
addUnwrappedLine();
return;
}
+
+ if (Style.isCpp()) {
+ while (FormatTok->is(tok::l_square) && handleCppAttributes()) {
+ }
+ } else if (Style.isVerilog()) {
+ if (Keywords.isVerilogStructuredProcedure(*FormatTok)) {
+ parseForOrWhileLoop(/*HasParens=*/false);
+ return;
+ }
+ if (FormatTok->isOneOf(Keywords.kw_foreach, Keywords.kw_repeat)) {
+ parseForOrWhileLoop();
+ return;
+ }
+ if (FormatTok->isOneOf(tok::kw_restrict, Keywords.kw_assert,
+ Keywords.kw_assume, Keywords.kw_cover)) {
+ parseIfThenElse(IfKind, /*KeepBraces=*/false, /*IsVerilogAssert=*/true);
+ return;
+ }
+
+ // Skip things that can exist before keywords like 'if' and 'case'.
+ while (true) {
+ if (FormatTok->isOneOf(Keywords.kw_priority, Keywords.kw_unique,
+ Keywords.kw_unique0)) {
+ nextToken();
+ } else if (FormatTok->is(tok::l_paren) &&
+ Tokens->peekNextToken()->is(tok::star)) {
+ parseParens();
+ } else {
+ break;
+ }
+ }
+ }
+
+ // Tokens that only make sense at the beginning of a line.
switch (FormatTok->Tok.getKind()) {
case tok::kw_asm:
nextToken();
if (FormatTok->is(tok::l_brace)) {
- FormatTok->setType(TT_InlineASMBrace);
+ FormatTok->setFinalizedType(TT_InlineASMBrace);
nextToken();
- while (FormatTok && FormatTok->isNot(tok::eof)) {
+ while (FormatTok && !eof()) {
if (FormatTok->is(tok::r_brace)) {
- FormatTok->setType(TT_InlineASMBrace);
+ FormatTok->setFinalizedType(TT_InlineASMBrace);
nextToken();
addUnwrappedLine();
break;
@@ -1120,94 +1476,136 @@ void UnwrappedLineParser::parseStructuralElement(bool IsTopLevel) {
case tok::kw_public:
case tok::kw_protected:
case tok::kw_private:
- if (Style.Language == FormatStyle::LK_Java ||
- Style.Language == FormatStyle::LK_JavaScript || Style.isCSharp())
+ if (Style.Language == FormatStyle::LK_Java || Style.isJavaScript() ||
+ Style.isCSharp()) {
nextToken();
- else
+ } else {
parseAccessSpecifier();
+ }
return;
- case tok::kw_if:
- if (Style.Language == FormatStyle::LK_JavaScript && Line->MustBeDeclaration)
+ case tok::kw_if: {
+ if (Style.isJavaScript() && Line->MustBeDeclaration) {
// field/method declaration.
break;
- parseIfThenElse();
+ }
+ FormatToken *Tok = parseIfThenElse(IfKind);
+ if (IfLeftBrace)
+ *IfLeftBrace = Tok;
return;
+ }
case tok::kw_for:
case tok::kw_while:
- if (Style.Language == FormatStyle::LK_JavaScript && Line->MustBeDeclaration)
+ if (Style.isJavaScript() && Line->MustBeDeclaration) {
// field/method declaration.
break;
+ }
parseForOrWhileLoop();
return;
case tok::kw_do:
- if (Style.Language == FormatStyle::LK_JavaScript && Line->MustBeDeclaration)
+ if (Style.isJavaScript() && Line->MustBeDeclaration) {
// field/method declaration.
break;
+ }
parseDoWhile();
+ if (HasDoWhile)
+ *HasDoWhile = true;
return;
case tok::kw_switch:
- if (Style.Language == FormatStyle::LK_JavaScript && Line->MustBeDeclaration)
+ if (Style.isJavaScript() && Line->MustBeDeclaration) {
// 'switch: string' field declaration.
break;
+ }
parseSwitch();
return;
case tok::kw_default:
- if (Style.Language == FormatStyle::LK_JavaScript && Line->MustBeDeclaration)
+ // In Verilog default along with other labels are handled in the next loop.
+ if (Style.isVerilog())
+ break;
+ if (Style.isJavaScript() && Line->MustBeDeclaration) {
// 'default: string' field declaration.
break;
+ }
nextToken();
if (FormatTok->is(tok::colon)) {
+ FormatTok->setFinalizedType(TT_CaseLabelColon);
parseLabel();
return;
}
// e.g. "default void f() {}" in a Java interface.
break;
case tok::kw_case:
- if (Style.Language == FormatStyle::LK_JavaScript && Line->MustBeDeclaration)
+ // Proto: there are no switch/case statements.
+ if (Style.Language == FormatStyle::LK_Proto) {
+ nextToken();
+ return;
+ }
+ if (Style.isVerilog()) {
+ parseBlock();
+ addUnwrappedLine();
+ return;
+ }
+ if (Style.isJavaScript() && Line->MustBeDeclaration) {
// 'case: string' field declaration.
+ nextToken();
break;
+ }
parseCaseLabel();
return;
case tok::kw_try:
case tok::kw___try:
- if (Style.Language == FormatStyle::LK_JavaScript && Line->MustBeDeclaration)
+ if (Style.isJavaScript() && Line->MustBeDeclaration) {
// field/method declaration.
break;
+ }
parseTryCatch();
return;
case tok::kw_extern:
nextToken();
- if (FormatTok->Tok.is(tok::string_literal)) {
+ if (Style.isVerilog()) {
+ // In Verilog and extern module declaration looks like a start of module.
+ // But there is no body and endmodule. So we handle it separately.
+ if (Keywords.isVerilogHierarchy(*FormatTok)) {
+ parseVerilogHierarchyHeader();
+ return;
+ }
+ } else if (FormatTok->is(tok::string_literal)) {
nextToken();
- if (FormatTok->Tok.is(tok::l_brace)) {
- if (!Style.IndentExternBlock) {
- if (Style.BraceWrapping.AfterExternBlock) {
- addUnwrappedLine();
- }
- unsigned AddLevels = Style.BraceWrapping.AfterExternBlock ? 1u : 0u;
- parseBlock(/*MustBeDeclaration=*/true, AddLevels);
- } else {
- unsigned AddLevels =
- Style.IndentExternBlock == FormatStyle::IEBS_Indent ? 1u : 0u;
- parseBlock(/*MustBeDeclaration=*/true, AddLevels);
- }
+ if (FormatTok->is(tok::l_brace)) {
+ if (Style.BraceWrapping.AfterExternBlock)
+ addUnwrappedLine();
+ // Either we indent or for backwards compatibility we follow the
+ // AfterExternBlock style.
+ unsigned AddLevels =
+ (Style.IndentExternBlock == FormatStyle::IEBS_Indent) ||
+ (Style.BraceWrapping.AfterExternBlock &&
+ Style.IndentExternBlock ==
+ FormatStyle::IEBS_AfterExternBlock)
+ ? 1u
+ : 0u;
+ parseBlock(/*MustBeDeclaration=*/true, AddLevels);
addUnwrappedLine();
return;
}
}
break;
case tok::kw_export:
- if (Style.Language == FormatStyle::LK_JavaScript) {
+ if (Style.isJavaScript()) {
parseJavaScriptEs6ImportExport();
return;
}
- if (!Style.isCpp())
- break;
- // Handle C++ "(inline|export) namespace".
- LLVM_FALLTHROUGH;
+ if (Style.isCpp()) {
+ nextToken();
+ if (FormatTok->is(tok::kw_namespace)) {
+ parseNamespace();
+ return;
+ }
+ if (FormatTok->is(Keywords.kw_import) && parseModuleImport())
+ return;
+ }
+ break;
case tok::kw_inline:
nextToken();
- if (FormatTok->Tok.is(tok::kw_namespace)) {
+ if (FormatTok->is(tok::kw_namespace)) {
parseNamespace();
return;
}
@@ -1223,7 +1621,7 @@ void UnwrappedLineParser::parseStructuralElement(bool IsTopLevel) {
return;
}
if (FormatTok->is(Keywords.kw_import)) {
- if (Style.Language == FormatStyle::LK_JavaScript) {
+ if (Style.isJavaScript()) {
parseJavaScriptEs6ImportExport();
return;
}
@@ -1231,7 +1629,7 @@ void UnwrappedLineParser::parseStructuralElement(bool IsTopLevel) {
nextToken();
if (FormatTok->is(tok::kw_public))
nextToken();
- if (!FormatTok->is(tok::string_literal))
+ if (FormatTok->isNot(tok::string_literal))
return;
nextToken();
if (FormatTok->is(tok::semi))
@@ -1239,6 +1637,8 @@ void UnwrappedLineParser::parseStructuralElement(bool IsTopLevel) {
addUnwrappedLine();
return;
}
+ if (Style.isCpp() && parseModuleImport())
+ return;
}
if (Style.isCpp() &&
FormatTok->isOneOf(Keywords.kw_signals, Keywords.kw_qsignals,
@@ -1258,17 +1658,34 @@ void UnwrappedLineParser::parseStructuralElement(bool IsTopLevel) {
parseNamespace();
return;
}
+ // In Verilog labels can be any expression, so we don't do them here.
+ // JS doesn't have macros, and within classes colons indicate fields, not
+ // labels.
+ // TableGen doesn't have labels.
+ if (!Style.isJavaScript() && !Style.isVerilog() && !Style.isTableGen() &&
+ Tokens->peekNextToken()->is(tok::colon) && !Line->MustBeDeclaration) {
+ nextToken();
+ Line->Tokens.begin()->Tok->MustBreakBefore = true;
+ FormatTok->setFinalizedType(TT_GotoLabelColon);
+ parseLabel(!Style.IndentGotoLabels);
+ if (HasLabel)
+ *HasLabel = true;
+ return;
+ }
// In all other cases, parse the declaration.
break;
default:
break;
}
+
+ const bool InRequiresExpression =
+ OpeningBrace && OpeningBrace->is(TT_RequiresExpressionLBrace);
do {
const FormatToken *Previous = FormatTok->Previous;
switch (FormatTok->Tok.getKind()) {
case tok::at:
nextToken();
- if (FormatTok->Tok.is(tok::l_brace)) {
+ if (FormatTok->is(tok::l_brace)) {
nextToken();
parseBracedList();
break;
@@ -1299,24 +1716,27 @@ void UnwrappedLineParser::parseStructuralElement(bool IsTopLevel) {
return;
case tok::objc_autoreleasepool:
nextToken();
- if (FormatTok->Tok.is(tok::l_brace)) {
+ if (FormatTok->is(tok::l_brace)) {
if (Style.BraceWrapping.AfterControlStatement ==
- FormatStyle::BWACS_Always)
+ FormatStyle::BWACS_Always) {
addUnwrappedLine();
- parseBlock(/*MustBeDeclaration=*/false);
+ }
+ parseBlock();
}
addUnwrappedLine();
return;
case tok::objc_synchronized:
nextToken();
- if (FormatTok->Tok.is(tok::l_paren))
+ if (FormatTok->is(tok::l_paren)) {
// Skip synchronization object
parseParens();
- if (FormatTok->Tok.is(tok::l_brace)) {
+ }
+ if (FormatTok->is(tok::l_brace)) {
if (Style.BraceWrapping.AfterControlStatement ==
- FormatStyle::BWACS_Always)
+ FormatStyle::BWACS_Always) {
addUnwrappedLine();
- parseBlock(/*MustBeDeclaration=*/false);
+ }
+ parseBlock();
}
addUnwrappedLine();
return;
@@ -1329,12 +1749,16 @@ void UnwrappedLineParser::parseStructuralElement(bool IsTopLevel) {
break;
}
break;
- case tok::kw_concept:
- parseConcept();
- break;
- case tok::kw_requires:
- parseRequires();
+ case tok::kw_requires: {
+ if (Style.isCpp()) {
+ bool ParsedClause = parseRequires();
+ if (ParsedClause)
+ return;
+ } else {
+ nextToken();
+ }
break;
+ }
case tok::kw_enum:
// Ignore if this is part of "template <enum ...".
if (Previous && Previous->is(tok::less)) {
@@ -1346,8 +1770,8 @@ void UnwrappedLineParser::parseStructuralElement(bool IsTopLevel) {
// enum definition can start a structural element.
if (!parseEnum())
break;
- // This only applies for C++.
- if (!Style.isCpp()) {
+ // This only applies to C++ and Verilog.
+ if (!Style.isCpp() && !Style.isVerilog()) {
addUnwrappedLine();
return;
}
@@ -1357,27 +1781,52 @@ void UnwrappedLineParser::parseStructuralElement(bool IsTopLevel) {
if (FormatTok->isOneOf(Keywords.kw_NS_ENUM, Keywords.kw_NS_OPTIONS,
Keywords.kw_CF_ENUM, Keywords.kw_CF_OPTIONS,
Keywords.kw_CF_CLOSED_ENUM,
- Keywords.kw_NS_CLOSED_ENUM))
+ Keywords.kw_NS_CLOSED_ENUM)) {
parseEnum();
+ }
break;
+ case tok::kw_class:
+ if (Style.isVerilog()) {
+ parseBlock();
+ addUnwrappedLine();
+ return;
+ }
+ if (Style.isTableGen()) {
+ // Do nothing special. In this case the l_brace becomes FunctionLBrace.
+ // This is same as def and so on.
+ nextToken();
+ break;
+ }
+ [[fallthrough]];
case tok::kw_struct:
case tok::kw_union:
- case tok::kw_class:
- if (parseStructLike()) {
+ if (parseStructLike())
return;
+ break;
+ case tok::kw_decltype:
+ nextToken();
+ if (FormatTok->is(tok::l_paren)) {
+ parseParens();
+ assert(FormatTok->Previous);
+ if (FormatTok->Previous->endsSequence(tok::r_paren, tok::kw_auto,
+ tok::l_paren)) {
+ Line->SeenDecltypeAuto = true;
+ }
}
break;
case tok::period:
nextToken();
// In Java, classes have an implicit static member "class".
if (Style.Language == FormatStyle::LK_Java && FormatTok &&
- FormatTok->is(tok::kw_class))
+ FormatTok->is(tok::kw_class)) {
nextToken();
- if (Style.Language == FormatStyle::LK_JavaScript && FormatTok &&
- FormatTok->Tok.getIdentifierInfo())
+ }
+ if (Style.isJavaScript() && FormatTok &&
+ FormatTok->Tok.getIdentifierInfo()) {
// JavaScript only has pseudo keywords, all keywords are allowed to
// appear in "IdentifierName" positions. See http://es5.github.io/#x7.6
nextToken();
+ }
break;
case tok::semi:
nextToken();
@@ -1390,11 +1839,11 @@ void UnwrappedLineParser::parseStructuralElement(bool IsTopLevel) {
parseParens();
// Break the unwrapped line if a K&R C function definition has a parameter
// declaration.
- if (!IsTopLevel || !Style.isCpp() || !Previous || FormatTok->is(tok::eof))
+ if (OpeningBrace || !Style.isCpp() || !Previous || eof())
break;
- const unsigned Position = Tokens->getPosition() + 1;
- assert(Position < AllTokens.size());
- if (isC78ParameterDecl(FormatTok, AllTokens[Position], Previous)) {
+ if (isC78ParameterDecl(FormatTok,
+ Tokens->peekNextToken(/*SkipComment=*/true),
+ Previous)) {
addUnwrappedLine();
return;
}
@@ -1407,24 +1856,44 @@ void UnwrappedLineParser::parseStructuralElement(bool IsTopLevel) {
break;
case tok::caret:
nextToken();
+ // Block return type.
if (FormatTok->Tok.isAnyIdentifier() ||
- FormatTok->isSimpleTypeSpecifier())
+ FormatTok->isSimpleTypeSpecifier()) {
nextToken();
+ // Return types: pointers are ok too.
+ while (FormatTok->is(tok::star))
+ nextToken();
+ }
+ // Block argument list.
if (FormatTok->is(tok::l_paren))
parseParens();
+ // Block body.
if (FormatTok->is(tok::l_brace))
parseChildBlock();
break;
case tok::l_brace:
+ if (InRequiresExpression)
+ FormatTok->setFinalizedType(TT_BracedListLBrace);
if (!tryToParsePropertyAccessor() && !tryToParseBracedList()) {
+ IsDecltypeAutoFunction = Line->SeenDecltypeAuto;
// A block outside of parentheses must be the last part of a
// structural element.
// FIXME: Figure out cases where this is not true, and add projections
// for them (the one we know is missing are lambdas).
- if (Style.BraceWrapping.AfterFunction)
+ if (Style.Language == FormatStyle::LK_Java &&
+ Line->Tokens.front().Tok->is(Keywords.kw_synchronized)) {
+ // If necessary, we could set the type to something different than
+ // TT_FunctionLBrace.
+ if (Style.BraceWrapping.AfterControlStatement ==
+ FormatStyle::BWACS_Always) {
+ addUnwrappedLine();
+ }
+ } else if (Style.BraceWrapping.AfterFunction) {
addUnwrappedLine();
- FormatTok->setType(TT_FunctionLBrace);
- parseBlock(/*MustBeDeclaration=*/false);
+ }
+ FormatTok->setFinalizedType(TT_FunctionLBrace);
+ parseBlock();
+ IsDecltypeAutoFunction = false;
addUnwrappedLine();
return;
}
@@ -1432,8 +1901,7 @@ void UnwrappedLineParser::parseStructuralElement(bool IsTopLevel) {
// element continues.
break;
case tok::kw_try:
- if (Style.Language == FormatStyle::LK_JavaScript &&
- Line->MustBeDeclaration) {
+ if (Style.isJavaScript() && Line->MustBeDeclaration) {
// field/method declaration.
nextToken();
break;
@@ -1460,17 +1928,16 @@ void UnwrappedLineParser::parseStructuralElement(bool IsTopLevel) {
// expressions (functions that are not on their own line) must not create
// a new unwrapped line, so they are special cased below.
size_t TokenCount = Line->Tokens.size();
- if (Style.Language == FormatStyle::LK_JavaScript &&
- FormatTok->is(Keywords.kw_function) &&
- (TokenCount > 1 || (TokenCount == 1 && !Line->Tokens.front().Tok->is(
- Keywords.kw_async)))) {
+ if (Style.isJavaScript() && FormatTok->is(Keywords.kw_function) &&
+ (TokenCount > 1 ||
+ (TokenCount == 1 &&
+ Line->Tokens.front().Tok->isNot(Keywords.kw_async)))) {
tryToParseJSFunction();
break;
}
- if ((Style.Language == FormatStyle::LK_JavaScript ||
- Style.Language == FormatStyle::LK_Java) &&
+ if ((Style.isJavaScript() || Style.Language == FormatStyle::LK_Java) &&
FormatTok->is(Keywords.kw_interface)) {
- if (Style.Language == FormatStyle::LK_JavaScript) {
+ if (Style.isJavaScript()) {
// In JavaScript/TypeScript, "interface" can be used as a standalone
// identifier, e.g. in `var interface = 1;`. If "interface" is
// followed by another identifier, it is very like to be an actual
@@ -1478,7 +1945,7 @@ void UnwrappedLineParser::parseStructuralElement(bool IsTopLevel) {
unsigned StoredPosition = Tokens->getPosition();
FormatToken *Next = Tokens->getNextToken();
FormatTok = Tokens->setPosition(StoredPosition);
- if (Next && !mustBeJSIdent(Keywords, Next)) {
+ if (!mustBeJSIdent(Keywords, Next)) {
nextToken();
break;
}
@@ -1488,10 +1955,22 @@ void UnwrappedLineParser::parseStructuralElement(bool IsTopLevel) {
return;
}
- if (FormatTok->is(Keywords.kw_interface)) {
- if (parseStructLike()) {
+ if (Style.isVerilog()) {
+ if (FormatTok->is(Keywords.kw_table)) {
+ parseVerilogTable();
return;
}
+ if (Keywords.isVerilogBegin(*FormatTok) ||
+ Keywords.isVerilogHierarchy(*FormatTok)) {
+ parseBlock();
+ addUnwrappedLine();
+ return;
+ }
+ }
+
+ if (!Style.isCpp() && FormatTok->is(Keywords.kw_interface)) {
+ if (parseStructLike())
+ return;
break;
}
@@ -1502,21 +1981,25 @@ void UnwrappedLineParser::parseStructuralElement(bool IsTopLevel) {
// See if the following token should start a new unwrapped line.
StringRef Text = FormatTok->TokenText;
+
+ FormatToken *PreviousToken = FormatTok;
nextToken();
// JS doesn't have macros, and within classes colons indicate fields, not
// labels.
- if (Style.Language == FormatStyle::LK_JavaScript)
+ if (Style.isJavaScript())
break;
- TokenCount = Line->Tokens.size();
- if (TokenCount == 1 ||
- (TokenCount == 2 && Line->Tokens.front().Tok->is(tok::comment))) {
- if (FormatTok->Tok.is(tok::colon) && !Line->MustBeDeclaration) {
- Line->Tokens.begin()->Tok->MustBreakBefore = true;
- parseLabel(!Style.IndentGotoLabels);
- return;
- }
+ auto OneTokenSoFar = [&]() {
+ auto I = Line->Tokens.begin(), E = Line->Tokens.end();
+ while (I != E && I->Tok->is(tok::comment))
+ ++I;
+ if (Style.isVerilog())
+ while (I != E && I->Tok->is(tok::hash))
+ ++I;
+ return I != E && (++I == E);
+ };
+ if (OneTokenSoFar()) {
// Recognize function-like macro usages without trailing semicolon as
// well as free-standing macros like Q_OBJECT.
bool FunctionLike = FormatTok->is(tok::l_paren);
@@ -1530,6 +2013,8 @@ void UnwrappedLineParser::parseStructuralElement(bool IsTopLevel) {
if (FollowedByNewline && (Text.size() >= 5 || FunctionLike) &&
tokenCanStartNewLine(*FormatTok) && Text == Text.upper()) {
+ if (PreviousToken->isNot(TT_UntouchableMacroFunc))
+ PreviousToken->setFinalizedType(TT_FunctionLikeOrFreestandingMacro);
addUnwrappedLine();
return;
}
@@ -1537,36 +2022,35 @@ void UnwrappedLineParser::parseStructuralElement(bool IsTopLevel) {
break;
}
case tok::equal:
- // Fat arrows (=>) have tok::TokenKind tok::equal but TokenType
- // TT_FatArrow. They always start an expression or a child block if
- // followed by a curly brace.
- if (FormatTok->is(TT_FatArrow)) {
- nextToken();
- if (FormatTok->is(tok::l_brace)) {
- // C# may break after => if the next character is a newline.
- if (Style.isCSharp() && Style.BraceWrapping.AfterFunction == true) {
- // calling `addUnwrappedLine()` here causes odd parsing errors.
- FormatTok->MustBreakBefore = true;
- }
- parseChildBlock();
- }
+ if ((Style.isJavaScript() || Style.isCSharp()) &&
+ FormatTok->is(TT_FatArrow)) {
+ tryToParseChildBlock();
break;
}
nextToken();
- if (FormatTok->Tok.is(tok::l_brace)) {
+ if (FormatTok->is(tok::l_brace)) {
// Block kind should probably be set to BK_BracedInit for any language.
// C# needs this change to ensure that array initialisers and object
// initialisers are indented the same way.
if (Style.isCSharp())
FormatTok->setBlockKind(BK_BracedInit);
+ // TableGen's defset statement has syntax of the form,
+ // `defset <type> <name> = { <statement>... }`
+ if (Style.isTableGen() &&
+ Line->Tokens.begin()->Tok->is(Keywords.kw_defset)) {
+ FormatTok->setFinalizedType(TT_FunctionLBrace);
+ parseBlock(/*MustBeDeclaration=*/false, /*AddLevels=*/1u,
+ /*MunchSemi=*/false);
+ addUnwrappedLine();
+ break;
+ }
nextToken();
parseBracedList();
} else if (Style.Language == FormatStyle::LK_Proto &&
- FormatTok->Tok.is(tok::less)) {
+ FormatTok->is(tok::less)) {
nextToken();
- parseBracedList(/*ContinueOnSemicolons=*/false, /*IsEnum=*/false,
- /*ClosingBraceKind=*/tok::greater);
+ parseBracedList(/*IsAngleBracket=*/true);
}
break;
case tok::l_square:
@@ -1575,6 +2059,49 @@ void UnwrappedLineParser::parseStructuralElement(bool IsTopLevel) {
case tok::kw_new:
parseNew();
break;
+ case tok::kw_case:
+ // Proto: there are no switch/case statements.
+ if (Style.Language == FormatStyle::LK_Proto) {
+ nextToken();
+ return;
+ }
+ // In Verilog switch is called case.
+ if (Style.isVerilog()) {
+ parseBlock();
+ addUnwrappedLine();
+ return;
+ }
+ if (Style.isJavaScript() && Line->MustBeDeclaration) {
+ // 'case: string' field declaration.
+ nextToken();
+ break;
+ }
+ parseCaseLabel();
+ break;
+ case tok::kw_default:
+ nextToken();
+ if (Style.isVerilog()) {
+ if (FormatTok->is(tok::colon)) {
+ // The label will be handled in the next iteration.
+ break;
+ }
+ if (FormatTok->is(Keywords.kw_clocking)) {
+ // A default clocking block.
+ parseBlock();
+ addUnwrappedLine();
+ return;
+ }
+ parseVerilogCaseLabel();
+ return;
+ }
+ break;
+ case tok::colon:
+ nextToken();
+ if (Style.isVerilog()) {
+ parseVerilogCaseLabel();
+ return;
+ }
+ break;
default:
nextToken();
break;
@@ -1599,16 +2126,16 @@ bool UnwrappedLineParser::tryToParsePropertyAccessor() {
FormatToken *Tok = Tokens->getNextToken();
// A trivial property accessor is of the form:
- // { [ACCESS_SPECIFIER] [get]; [ACCESS_SPECIFIER] [set] }
+ // { [ACCESS_SPECIFIER] [get]; [ACCESS_SPECIFIER] [set|init] }
// Track these as they do not require line breaks to be introduced.
- bool HasGetOrSet = false;
+ bool HasSpecialAccessor = false;
bool IsTrivialPropertyAccessor = true;
while (!eof()) {
if (Tok->isOneOf(tok::semi, tok::kw_public, tok::kw_private,
tok::kw_protected, Keywords.kw_internal, Keywords.kw_get,
- Keywords.kw_set)) {
- if (Tok->isOneOf(Keywords.kw_get, Keywords.kw_set))
- HasGetOrSet = true;
+ Keywords.kw_init, Keywords.kw_set)) {
+ if (Tok->isOneOf(Keywords.kw_get, Keywords.kw_init, Keywords.kw_set))
+ HasSpecialAccessor = true;
Tok = Tokens->getNextToken();
continue;
}
@@ -1617,7 +2144,7 @@ bool UnwrappedLineParser::tryToParsePropertyAccessor() {
break;
}
- if (!HasGetOrSet) {
+ if (!HasSpecialAccessor) {
Tokens->setPosition(StoredPosition);
return false;
}
@@ -1625,7 +2152,7 @@ bool UnwrappedLineParser::tryToParsePropertyAccessor() {
// Try to parse the property accessor:
// https://docs.microsoft.com/en-us/dotnet/csharp/programming-guide/classes-and-structs/properties
Tokens->setPosition(StoredPosition);
- if (!IsTrivialPropertyAccessor && Style.BraceWrapping.AfterFunction == true)
+ if (!IsTrivialPropertyAccessor && Style.BraceWrapping.AfterFunction)
addUnwrappedLine();
nextToken();
do {
@@ -1659,7 +2186,8 @@ bool UnwrappedLineParser::tryToParsePropertyAccessor() {
nextToken();
break;
default:
- if (FormatTok->isOneOf(Keywords.kw_get, Keywords.kw_set) &&
+ if (FormatTok->isOneOf(Keywords.kw_get, Keywords.kw_init,
+ Keywords.kw_set) &&
!IsTrivialPropertyAccessor) {
// Non-trivial get/set needs to be on its own line.
addUnwrappedLine();
@@ -1673,16 +2201,17 @@ bool UnwrappedLineParser::tryToParsePropertyAccessor() {
}
bool UnwrappedLineParser::tryToParseLambda() {
+ assert(FormatTok->is(tok::l_square));
if (!Style.isCpp()) {
nextToken();
return false;
}
- assert(FormatTok->is(tok::l_square));
FormatToken &LSquare = *FormatTok;
if (!tryToParseLambdaIntroducer())
return false;
bool SeenArrow = false;
+ bool InTemplateParameterList = false;
while (FormatTok->isNot(tok::l_brace)) {
if (FormatTok->isSimpleTypeSpecifier()) {
@@ -1693,22 +2222,34 @@ bool UnwrappedLineParser::tryToParseLambda() {
case tok::l_brace:
break;
case tok::l_paren:
- parseParens();
+ parseParens(/*AmpAmpTokenType=*/TT_PointerOrReference);
+ break;
+ case tok::l_square:
+ parseSquare();
+ break;
+ case tok::less:
+ assert(FormatTok->Previous);
+ if (FormatTok->Previous->is(tok::r_square))
+ InTemplateParameterList = true;
+ nextToken();
break;
+ case tok::kw_auto:
+ case tok::kw_class:
+ case tok::kw_template:
+ case tok::kw_typename:
case tok::amp:
case tok::star:
case tok::kw_const:
+ case tok::kw_constexpr:
+ case tok::kw_consteval:
case tok::comma:
- case tok::less:
case tok::greater:
case tok::identifier:
case tok::numeric_constant:
case tok::coloncolon:
- case tok::kw_class:
case tok::kw_mutable:
case tok::kw_noexcept:
- case tok::kw_template:
- case tok::kw_typename:
+ case tok::kw_static:
nextToken();
break;
// Specialization of a template with an integer parameter can contain
@@ -1722,9 +2263,6 @@ bool UnwrappedLineParser::tryToParseLambda() {
// followed by an `a->b` expression, such as:
// ([obj func:arg] + a->b)
// Otherwise the code below would parse as a lambda.
- //
- // FIXME: This heuristic is incorrect for C++20 generic lambdas with
- // explicit template lists: []<bool b = true && false>(U &&u){}
case tok::plus:
case tok::minus:
case tok::exclaim:
@@ -1745,7 +2283,7 @@ bool UnwrappedLineParser::tryToParseLambda() {
case tok::ellipsis:
case tok::kw_true:
case tok::kw_false:
- if (SeenArrow) {
+ if (SeenArrow || InTemplateParameterList) {
nextToken();
break;
}
@@ -1754,41 +2292,61 @@ bool UnwrappedLineParser::tryToParseLambda() {
// This might or might not actually be a lambda arrow (this could be an
// ObjC method invocation followed by a dereferencing arrow). We might
// reset this back to TT_Unknown in TokenAnnotator.
- FormatTok->setType(TT_LambdaArrow);
+ FormatTok->setFinalizedType(TT_TrailingReturnArrow);
SeenArrow = true;
nextToken();
break;
+ case tok::kw_requires: {
+ auto *RequiresToken = FormatTok;
+ nextToken();
+ parseRequiresClause(RequiresToken);
+ break;
+ }
+ case tok::equal:
+ if (!InTemplateParameterList)
+ return true;
+ nextToken();
+ break;
default:
return true;
}
}
- FormatTok->setType(TT_LambdaLBrace);
- LSquare.setType(TT_LambdaLSquare);
+
+ FormatTok->setFinalizedType(TT_LambdaLBrace);
+ LSquare.setFinalizedType(TT_LambdaLSquare);
+
+ NestedLambdas.push_back(Line->SeenDecltypeAuto);
parseChildBlock();
+ assert(!NestedLambdas.empty());
+ NestedLambdas.pop_back();
+
return true;
}
bool UnwrappedLineParser::tryToParseLambdaIntroducer() {
const FormatToken *Previous = FormatTok->Previous;
- if (Previous &&
- (Previous->isOneOf(tok::identifier, tok::kw_operator, tok::kw_new,
- tok::kw_delete, tok::l_square) ||
- FormatTok->isCppStructuredBinding(Style) || Previous->closesScope() ||
- Previous->isSimpleTypeSpecifier())) {
- nextToken();
+ const FormatToken *LeftSquare = FormatTok;
+ nextToken();
+ if ((Previous && ((Previous->Tok.getIdentifierInfo() &&
+ !Previous->isOneOf(tok::kw_return, tok::kw_co_await,
+ tok::kw_co_yield, tok::kw_co_return)) ||
+ Previous->closesScope())) ||
+ LeftSquare->isCppStructuredBinding(Style)) {
return false;
}
- nextToken();
- if (FormatTok->is(tok::l_square)) {
+ if (FormatTok->is(tok::l_square) || tok::isLiteral(FormatTok->Tok.getKind()))
return false;
+ if (FormatTok->is(tok::r_square)) {
+ const FormatToken *Next = Tokens->peekNextToken(/*SkipComment=*/true);
+ if (Next->is(tok::greater))
+ return false;
}
parseSquare(/*LambdaIntroducer=*/true);
return true;
}
void UnwrappedLineParser::tryToParseJSFunction() {
- assert(FormatTok->is(Keywords.kw_function) ||
- FormatTok->startsSequence(Keywords.kw_async, Keywords.kw_function));
+ assert(FormatTok->is(Keywords.kw_function));
if (FormatTok->is(Keywords.kw_async))
nextToken();
// Consume "function".
@@ -1796,7 +2354,7 @@ void UnwrappedLineParser::tryToParseJSFunction() {
// Consume * (generator function). Treat it like C++'s overloaded operators.
if (FormatTok->is(tok::star)) {
- FormatTok->setType(TT_OverloadedOperator);
+ FormatTok->setFinalizedType(TT_OverloadedOperator);
nextToken();
}
@@ -1840,46 +2398,34 @@ bool UnwrappedLineParser::tryToParseBracedList() {
return true;
}
-bool UnwrappedLineParser::parseBracedList(bool ContinueOnSemicolons,
- bool IsEnum,
- tok::TokenKind ClosingBraceKind) {
+bool UnwrappedLineParser::tryToParseChildBlock() {
+ assert(Style.isJavaScript() || Style.isCSharp());
+ assert(FormatTok->is(TT_FatArrow));
+ // Fat arrows (=>) have tok::TokenKind tok::equal but TokenType TT_FatArrow.
+ // They always start an expression or a child block if followed by a curly
+ // brace.
+ nextToken();
+ if (FormatTok->isNot(tok::l_brace))
+ return false;
+ parseChildBlock();
+ return true;
+}
+
+bool UnwrappedLineParser::parseBracedList(bool IsAngleBracket, bool IsEnum) {
bool HasError = false;
// FIXME: Once we have an expression parser in the UnwrappedLineParser,
// replace this by using parseAssignmentExpression() inside.
do {
- if (Style.isCSharp()) {
- // Fat arrows (=>) have tok::TokenKind tok::equal but TokenType
- // TT_FatArrow. They always start an expression or a child block if
- // followed by a curly brace.
- if (FormatTok->is(TT_FatArrow)) {
- nextToken();
- if (FormatTok->is(tok::l_brace)) {
- // C# may break after => if the next character is a newline.
- if (Style.isCSharp() && Style.BraceWrapping.AfterFunction == true) {
- // calling `addUnwrappedLine()` here causes odd parsing errors.
- FormatTok->MustBreakBefore = true;
- }
- parseChildBlock();
- continue;
- }
- }
+ if (Style.isCSharp() && FormatTok->is(TT_FatArrow) &&
+ tryToParseChildBlock()) {
+ continue;
}
- if (Style.Language == FormatStyle::LK_JavaScript) {
- if (FormatTok->is(Keywords.kw_function) ||
- FormatTok->startsSequence(Keywords.kw_async, Keywords.kw_function)) {
+ if (Style.isJavaScript()) {
+ if (FormatTok->is(Keywords.kw_function)) {
tryToParseJSFunction();
continue;
}
- if (FormatTok->is(TT_FatArrow)) {
- nextToken();
- // Fat arrows can be followed by simple expressions or by child blocks
- // in curly braces.
- if (FormatTok->is(tok::l_brace)) {
- parseChildBlock();
- continue;
- }
- }
if (FormatTok->is(tok::l_brace)) {
// Could be a method inside of a braced list `{a() { return 1; }}`.
if (tryToParseBracedList())
@@ -1887,19 +2433,13 @@ bool UnwrappedLineParser::parseBracedList(bool ContinueOnSemicolons,
parseChildBlock();
}
}
- if (FormatTok->Tok.getKind() == ClosingBraceKind) {
+ if (FormatTok->is(IsAngleBracket ? tok::greater : tok::r_brace)) {
if (IsEnum && !Style.AllowShortEnumsOnASingleLine)
addUnwrappedLine();
nextToken();
return !HasError;
}
switch (FormatTok->Tok.getKind()) {
- case tok::caret:
- nextToken();
- if (FormatTok->is(tok::l_brace)) {
- parseChildBlock();
- }
- break;
case tok::l_square:
if (Style.isCSharp())
parseSquare();
@@ -1910,7 +2450,7 @@ bool UnwrappedLineParser::parseBracedList(bool ContinueOnSemicolons,
parseParens();
// JavaScript can just have free standing methods and getters/setters in
// object literals. Detect them by a "{" following ")".
- if (Style.Language == FormatStyle::LK_JavaScript) {
+ if (Style.isJavaScript()) {
if (FormatTok->is(tok::l_brace))
parseChildBlock();
break;
@@ -1924,26 +2464,22 @@ bool UnwrappedLineParser::parseBracedList(bool ContinueOnSemicolons,
parseBracedList();
break;
case tok::less:
- if (Style.Language == FormatStyle::LK_Proto) {
- nextToken();
- parseBracedList(/*ContinueOnSemicolons=*/false, /*IsEnum=*/false,
- /*ClosingBraceKind=*/tok::greater);
- } else {
- nextToken();
- }
+ nextToken();
+ if (IsAngleBracket)
+ parseBracedList(/*IsAngleBracket=*/true);
break;
case tok::semi:
// JavaScript (or more precisely TypeScript) can have semicolons in braced
// lists (in so-called TypeMemberLists). Thus, the semicolon cannot be
// used for error recovery if we have otherwise determined that this is
// a braced list.
- if (Style.Language == FormatStyle::LK_JavaScript) {
+ if (Style.isJavaScript()) {
nextToken();
break;
}
HasError = true;
- if (!ContinueOnSemicolons)
- return !HasError;
+ if (!IsEnum)
+ return false;
nextToken();
break;
case tok::comma:
@@ -1959,22 +2495,55 @@ bool UnwrappedLineParser::parseBracedList(bool ContinueOnSemicolons,
return false;
}
-void UnwrappedLineParser::parseParens() {
- assert(FormatTok->Tok.is(tok::l_paren) && "'(' expected.");
+/// \brief Parses a pair of parentheses (and everything between them).
+/// \param AmpAmpTokenType If different than TT_Unknown sets this type for all
+/// double ampersands. This applies for all nested scopes as well.
+///
+/// Returns whether there is a `=` token between the parentheses.
+bool UnwrappedLineParser::parseParens(TokenType AmpAmpTokenType) {
+ assert(FormatTok->is(tok::l_paren) && "'(' expected.");
+ auto *LeftParen = FormatTok;
+ bool SeenEqual = false;
+ const bool MightBeStmtExpr = Tokens->peekNextToken()->is(tok::l_brace);
nextToken();
do {
switch (FormatTok->Tok.getKind()) {
case tok::l_paren:
- parseParens();
+ if (parseParens(AmpAmpTokenType))
+ SeenEqual = true;
if (Style.Language == FormatStyle::LK_Java && FormatTok->is(tok::l_brace))
parseChildBlock();
break;
case tok::r_paren:
+ if (!MightBeStmtExpr && !Line->InMacroBody &&
+ Style.RemoveParentheses > FormatStyle::RPS_Leave) {
+ const auto *Prev = LeftParen->Previous;
+ const auto *Next = Tokens->peekNextToken();
+ const bool DoubleParens =
+ Prev && Prev->is(tok::l_paren) && Next && Next->is(tok::r_paren);
+ const auto *PrevPrev = Prev ? Prev->getPreviousNonComment() : nullptr;
+ const bool Blacklisted =
+ PrevPrev &&
+ (PrevPrev->isOneOf(tok::kw___attribute, tok::kw_decltype) ||
+ (SeenEqual &&
+ (PrevPrev->isOneOf(tok::kw_if, tok::kw_while) ||
+ PrevPrev->endsSequence(tok::kw_constexpr, tok::kw_if))));
+ const bool ReturnParens =
+ Style.RemoveParentheses == FormatStyle::RPS_ReturnStatement &&
+ ((NestedLambdas.empty() && !IsDecltypeAutoFunction) ||
+ (!NestedLambdas.empty() && !NestedLambdas.back())) &&
+ Prev && Prev->isOneOf(tok::kw_return, tok::kw_co_return) && Next &&
+ Next->is(tok::semi);
+ if ((DoubleParens && !Blacklisted) || ReturnParens) {
+ LeftParen->Optional = true;
+ FormatTok->Optional = true;
+ }
+ }
nextToken();
- return;
+ return SeenEqual;
case tok::r_brace:
// A "}" inside parenthesis is an error if there wasn't a matching "{".
- return;
+ return SeenEqual;
case tok::l_square:
tryToParseLambda();
break;
@@ -1984,41 +2553,51 @@ void UnwrappedLineParser::parseParens() {
break;
case tok::at:
nextToken();
- if (FormatTok->Tok.is(tok::l_brace)) {
+ if (FormatTok->is(tok::l_brace)) {
nextToken();
parseBracedList();
}
break;
case tok::equal:
+ SeenEqual = true;
if (Style.isCSharp() && FormatTok->is(TT_FatArrow))
- parseStructuralElement();
+ tryToParseChildBlock();
else
nextToken();
break;
case tok::kw_class:
- if (Style.Language == FormatStyle::LK_JavaScript)
+ if (Style.isJavaScript())
parseRecord(/*ParseAsExpr=*/true);
else
nextToken();
break;
case tok::identifier:
- if (Style.Language == FormatStyle::LK_JavaScript &&
- (FormatTok->is(Keywords.kw_function) ||
- FormatTok->startsSequence(Keywords.kw_async, Keywords.kw_function)))
+ if (Style.isJavaScript() && (FormatTok->is(Keywords.kw_function)))
tryToParseJSFunction();
else
nextToken();
break;
+ case tok::kw_requires: {
+ auto RequiresToken = FormatTok;
+ nextToken();
+ parseRequiresExpression(RequiresToken);
+ break;
+ }
+ case tok::ampamp:
+ if (AmpAmpTokenType != TT_Unknown)
+ FormatTok->setFinalizedType(AmpAmpTokenType);
+ [[fallthrough]];
default:
nextToken();
break;
}
} while (!eof());
+ return SeenEqual;
}
void UnwrappedLineParser::parseSquare(bool LambdaIntroducer) {
if (!LambdaIntroducer) {
- assert(FormatTok->Tok.is(tok::l_square) && "'[' expected.");
+ assert(FormatTok->is(tok::l_square) && "'[' expected.");
if (tryToParseLambda())
return;
}
@@ -2043,7 +2622,7 @@ void UnwrappedLineParser::parseSquare(bool LambdaIntroducer) {
}
case tok::at:
nextToken();
- if (FormatTok->Tok.is(tok::l_brace)) {
+ if (FormatTok->is(tok::l_brace)) {
nextToken();
parseBracedList();
}
@@ -2055,60 +2634,281 @@ void UnwrappedLineParser::parseSquare(bool LambdaIntroducer) {
} while (!eof());
}
-void UnwrappedLineParser::parseIfThenElse() {
- assert(FormatTok->Tok.is(tok::kw_if) && "'if' expected");
+void UnwrappedLineParser::keepAncestorBraces() {
+ if (!Style.RemoveBracesLLVM)
+ return;
+
+ const int MaxNestingLevels = 2;
+ const int Size = NestedTooDeep.size();
+ if (Size >= MaxNestingLevels)
+ NestedTooDeep[Size - MaxNestingLevels] = true;
+ NestedTooDeep.push_back(false);
+}
+
+static FormatToken *getLastNonComment(const UnwrappedLine &Line) {
+ for (const auto &Token : llvm::reverse(Line.Tokens))
+ if (Token.Tok->isNot(tok::comment))
+ return Token.Tok;
+
+ return nullptr;
+}
+
+void UnwrappedLineParser::parseUnbracedBody(bool CheckEOF) {
+ FormatToken *Tok = nullptr;
+
+ if (Style.InsertBraces && !Line->InPPDirective && !Line->Tokens.empty() &&
+ PreprocessorDirectives.empty() && FormatTok->isNot(tok::semi)) {
+ Tok = Style.BraceWrapping.AfterControlStatement == FormatStyle::BWACS_Never
+ ? getLastNonComment(*Line)
+ : Line->Tokens.back().Tok;
+ assert(Tok);
+ if (Tok->BraceCount < 0) {
+ assert(Tok->BraceCount == -1);
+ Tok = nullptr;
+ } else {
+ Tok->BraceCount = -1;
+ }
+ }
+
+ addUnwrappedLine();
+ ++Line->Level;
+ parseStructuralElement();
+
+ if (Tok) {
+ assert(!Line->InPPDirective);
+ Tok = nullptr;
+ for (const auto &L : llvm::reverse(*CurrentLines)) {
+ if (!L.InPPDirective && getLastNonComment(L)) {
+ Tok = L.Tokens.back().Tok;
+ break;
+ }
+ }
+ assert(Tok);
+ ++Tok->BraceCount;
+ }
+
+ if (CheckEOF && eof())
+ addUnwrappedLine();
+
+ --Line->Level;
+}
+
+static void markOptionalBraces(FormatToken *LeftBrace) {
+ if (!LeftBrace)
+ return;
+
+ assert(LeftBrace->is(tok::l_brace));
+
+ FormatToken *RightBrace = LeftBrace->MatchingParen;
+ if (!RightBrace) {
+ assert(!LeftBrace->Optional);
+ return;
+ }
+
+ assert(RightBrace->is(tok::r_brace));
+ assert(RightBrace->MatchingParen == LeftBrace);
+ assert(LeftBrace->Optional == RightBrace->Optional);
+
+ LeftBrace->Optional = true;
+ RightBrace->Optional = true;
+}
+
+void UnwrappedLineParser::handleAttributes() {
+ // Handle AttributeMacro, e.g. `if (x) UNLIKELY`.
+ if (FormatTok->isAttribute())
+ nextToken();
+ else if (FormatTok->is(tok::l_square))
+ handleCppAttributes();
+}
+
+bool UnwrappedLineParser::handleCppAttributes() {
+ // Handle [[likely]] / [[unlikely]] attributes.
+ assert(FormatTok->is(tok::l_square));
+ if (!tryToParseSimpleAttribute())
+ return false;
+ parseSquare();
+ return true;
+}
+
+/// Returns whether \c Tok begins a block.
+bool UnwrappedLineParser::isBlockBegin(const FormatToken &Tok) const {
+ // FIXME: rename the function or make
+ // Tok.isOneOf(tok::l_brace, TT_MacroBlockBegin) work.
+ return Style.isVerilog() ? Keywords.isVerilogBegin(Tok)
+ : Tok.is(tok::l_brace);
+}
+
+FormatToken *UnwrappedLineParser::parseIfThenElse(IfStmtKind *IfKind,
+ bool KeepBraces,
+ bool IsVerilogAssert) {
+ assert((FormatTok->is(tok::kw_if) ||
+ (Style.isVerilog() &&
+ FormatTok->isOneOf(tok::kw_restrict, Keywords.kw_assert,
+ Keywords.kw_assume, Keywords.kw_cover))) &&
+ "'if' expected");
nextToken();
- if (FormatTok->Tok.isOneOf(tok::kw_constexpr, tok::identifier))
+
+ if (IsVerilogAssert) {
+ // Handle `assert #0` and `assert final`.
+ if (FormatTok->is(Keywords.kw_verilogHash)) {
+ nextToken();
+ if (FormatTok->is(tok::numeric_constant))
+ nextToken();
+ } else if (FormatTok->isOneOf(Keywords.kw_final, Keywords.kw_property,
+ Keywords.kw_sequence)) {
+ nextToken();
+ }
+ }
+
+ // TableGen's if statement has the form of `if <cond> then { ... }`.
+ if (Style.isTableGen()) {
+ while (!eof() && FormatTok->isNot(Keywords.kw_then)) {
+ // Simply skip until then. This range only contains a value.
+ nextToken();
+ }
+ }
+
+ // Handle `if !consteval`.
+ if (FormatTok->is(tok::exclaim))
nextToken();
- if (FormatTok->Tok.is(tok::l_paren))
- parseParens();
- // handle [[likely]] / [[unlikely]]
- if (FormatTok->is(tok::l_square) && tryToParseSimpleAttribute())
- parseSquare();
+
+ bool KeepIfBraces = true;
+ if (FormatTok->is(tok::kw_consteval)) {
+ nextToken();
+ } else {
+ KeepIfBraces = !Style.RemoveBracesLLVM || KeepBraces;
+ if (FormatTok->isOneOf(tok::kw_constexpr, tok::identifier))
+ nextToken();
+ if (FormatTok->is(tok::l_paren)) {
+ FormatTok->setFinalizedType(TT_ConditionLParen);
+ parseParens();
+ }
+ }
+ handleAttributes();
+ // The then action is optional in Verilog assert statements.
+ if (IsVerilogAssert && FormatTok->is(tok::semi)) {
+ nextToken();
+ addUnwrappedLine();
+ return nullptr;
+ }
+
bool NeedsUnwrappedLine = false;
- if (FormatTok->Tok.is(tok::l_brace)) {
+ keepAncestorBraces();
+
+ FormatToken *IfLeftBrace = nullptr;
+ IfStmtKind IfBlockKind = IfStmtKind::NotIf;
+
+ if (isBlockBegin(*FormatTok)) {
+ FormatTok->setFinalizedType(TT_ControlStatementLBrace);
+ IfLeftBrace = FormatTok;
CompoundStatementIndenter Indenter(this, Style, Line->Level);
- parseBlock(/*MustBeDeclaration=*/false);
+ parseBlock(/*MustBeDeclaration=*/false, /*AddLevels=*/1u,
+ /*MunchSemi=*/true, KeepIfBraces, &IfBlockKind);
+ setPreviousRBraceType(TT_ControlStatementRBrace);
if (Style.BraceWrapping.BeforeElse)
addUnwrappedLine();
else
NeedsUnwrappedLine = true;
- } else {
+ } else if (IsVerilogAssert && FormatTok->is(tok::kw_else)) {
addUnwrappedLine();
- ++Line->Level;
- parseStructuralElement();
- --Line->Level;
+ } else {
+ parseUnbracedBody();
}
- if (FormatTok->Tok.is(tok::kw_else)) {
+
+ if (Style.RemoveBracesLLVM) {
+ assert(!NestedTooDeep.empty());
+ KeepIfBraces = KeepIfBraces ||
+ (IfLeftBrace && !IfLeftBrace->MatchingParen) ||
+ NestedTooDeep.back() || IfBlockKind == IfStmtKind::IfOnly ||
+ IfBlockKind == IfStmtKind::IfElseIf;
+ }
+
+ bool KeepElseBraces = KeepIfBraces;
+ FormatToken *ElseLeftBrace = nullptr;
+ IfStmtKind Kind = IfStmtKind::IfOnly;
+
+ if (FormatTok->is(tok::kw_else)) {
+ if (Style.RemoveBracesLLVM) {
+ NestedTooDeep.back() = false;
+ Kind = IfStmtKind::IfElse;
+ }
nextToken();
- // handle [[likely]] / [[unlikely]]
- if (FormatTok->Tok.is(tok::l_square) && tryToParseSimpleAttribute())
- parseSquare();
- if (FormatTok->Tok.is(tok::l_brace)) {
+ handleAttributes();
+ if (isBlockBegin(*FormatTok)) {
+ const bool FollowedByIf = Tokens->peekNextToken()->is(tok::kw_if);
+ FormatTok->setFinalizedType(TT_ElseLBrace);
+ ElseLeftBrace = FormatTok;
CompoundStatementIndenter Indenter(this, Style, Line->Level);
- parseBlock(/*MustBeDeclaration=*/false);
+ IfStmtKind ElseBlockKind = IfStmtKind::NotIf;
+ FormatToken *IfLBrace =
+ parseBlock(/*MustBeDeclaration=*/false, /*AddLevels=*/1u,
+ /*MunchSemi=*/true, KeepElseBraces, &ElseBlockKind);
+ setPreviousRBraceType(TT_ElseRBrace);
+ if (FormatTok->is(tok::kw_else)) {
+ KeepElseBraces = KeepElseBraces ||
+ ElseBlockKind == IfStmtKind::IfOnly ||
+ ElseBlockKind == IfStmtKind::IfElseIf;
+ } else if (FollowedByIf && IfLBrace && !IfLBrace->Optional) {
+ KeepElseBraces = true;
+ assert(ElseLeftBrace->MatchingParen);
+ markOptionalBraces(ElseLeftBrace);
+ }
addUnwrappedLine();
- } else if (FormatTok->Tok.is(tok::kw_if)) {
- FormatToken *Previous = AllTokens[Tokens->getPosition() - 1];
- bool PrecededByComment = Previous->is(tok::comment);
- if (PrecededByComment) {
+ } else if (!IsVerilogAssert && FormatTok->is(tok::kw_if)) {
+ const FormatToken *Previous = Tokens->getPreviousToken();
+ assert(Previous);
+ const bool IsPrecededByComment = Previous->is(tok::comment);
+ if (IsPrecededByComment) {
addUnwrappedLine();
++Line->Level;
}
- parseIfThenElse();
- if (PrecededByComment)
+ bool TooDeep = true;
+ if (Style.RemoveBracesLLVM) {
+ Kind = IfStmtKind::IfElseIf;
+ TooDeep = NestedTooDeep.pop_back_val();
+ }
+ ElseLeftBrace = parseIfThenElse(/*IfKind=*/nullptr, KeepIfBraces);
+ if (Style.RemoveBracesLLVM)
+ NestedTooDeep.push_back(TooDeep);
+ if (IsPrecededByComment)
--Line->Level;
} else {
+ parseUnbracedBody(/*CheckEOF=*/true);
+ }
+ } else {
+ KeepIfBraces = KeepIfBraces || IfBlockKind == IfStmtKind::IfElse;
+ if (NeedsUnwrappedLine)
addUnwrappedLine();
- ++Line->Level;
- parseStructuralElement();
- if (FormatTok->is(tok::eof))
- addUnwrappedLine();
- --Line->Level;
+ }
+
+ if (!Style.RemoveBracesLLVM)
+ return nullptr;
+
+ assert(!NestedTooDeep.empty());
+ KeepElseBraces = KeepElseBraces ||
+ (ElseLeftBrace && !ElseLeftBrace->MatchingParen) ||
+ NestedTooDeep.back();
+
+ NestedTooDeep.pop_back();
+
+ if (!KeepIfBraces && !KeepElseBraces) {
+ markOptionalBraces(IfLeftBrace);
+ markOptionalBraces(ElseLeftBrace);
+ } else if (IfLeftBrace) {
+ FormatToken *IfRightBrace = IfLeftBrace->MatchingParen;
+ if (IfRightBrace) {
+ assert(IfRightBrace->MatchingParen == IfLeftBrace);
+ assert(!IfLeftBrace->Optional);
+ assert(!IfRightBrace->Optional);
+ IfLeftBrace->MatchingParen = nullptr;
+ IfRightBrace->MatchingParen = nullptr;
}
- } else if (NeedsUnwrappedLine) {
- addUnwrappedLine();
}
+
+ if (IfKind)
+ *IfKind = Kind;
+
+ return IfLeftBrace;
}
void UnwrappedLineParser::parseTryCatch() {
@@ -2132,7 +2932,7 @@ void UnwrappedLineParser::parseTryCatch() {
FormatTok->is(tok::l_brace)) {
do {
nextToken();
- } while (!FormatTok->is(tok::r_brace));
+ } while (FormatTok->isNot(tok::r_brace));
nextToken();
}
@@ -2143,18 +2943,19 @@ void UnwrappedLineParser::parseTryCatch() {
}
}
// Parse try with resource.
- if (Style.Language == FormatStyle::LK_Java && FormatTok->is(tok::l_paren)) {
+ if (Style.Language == FormatStyle::LK_Java && FormatTok->is(tok::l_paren))
parseParens();
- }
+
+ keepAncestorBraces();
+
if (FormatTok->is(tok::l_brace)) {
CompoundStatementIndenter Indenter(this, Style, Line->Level);
- parseBlock(/*MustBeDeclaration=*/false);
- if (Style.BraceWrapping.BeforeCatch) {
+ parseBlock();
+ if (Style.BraceWrapping.BeforeCatch)
addUnwrappedLine();
- } else {
+ else
NeedsUnwrappedLine = true;
- }
- } else if (!FormatTok->is(tok::kw_catch)) {
+ } else if (FormatTok->isNot(tok::kw_catch)) {
// The C++ standard requires a compound-statement after a try.
// If there's none, we try to assume there's a structuralElement
// and try to continue.
@@ -2163,35 +2964,43 @@ void UnwrappedLineParser::parseTryCatch() {
parseStructuralElement();
--Line->Level;
}
- while (1) {
+ while (true) {
if (FormatTok->is(tok::at))
nextToken();
if (!(FormatTok->isOneOf(tok::kw_catch, Keywords.kw___except,
tok::kw___finally) ||
- ((Style.Language == FormatStyle::LK_Java ||
- Style.Language == FormatStyle::LK_JavaScript) &&
+ ((Style.Language == FormatStyle::LK_Java || Style.isJavaScript()) &&
FormatTok->is(Keywords.kw_finally)) ||
- (FormatTok->Tok.isObjCAtKeyword(tok::objc_catch) ||
- FormatTok->Tok.isObjCAtKeyword(tok::objc_finally))))
+ (FormatTok->isObjCAtKeyword(tok::objc_catch) ||
+ FormatTok->isObjCAtKeyword(tok::objc_finally)))) {
break;
+ }
nextToken();
while (FormatTok->isNot(tok::l_brace)) {
if (FormatTok->is(tok::l_paren)) {
parseParens();
continue;
}
- if (FormatTok->isOneOf(tok::semi, tok::r_brace, tok::eof))
+ if (FormatTok->isOneOf(tok::semi, tok::r_brace, tok::eof)) {
+ if (Style.RemoveBracesLLVM)
+ NestedTooDeep.pop_back();
return;
+ }
nextToken();
}
NeedsUnwrappedLine = false;
+ Line->MustBeDeclaration = false;
CompoundStatementIndenter Indenter(this, Style, Line->Level);
- parseBlock(/*MustBeDeclaration=*/false);
+ parseBlock();
if (Style.BraceWrapping.BeforeCatch)
addUnwrappedLine();
else
NeedsUnwrappedLine = true;
}
+
+ if (Style.RemoveBracesLLVM)
+ NestedTooDeep.pop_back();
+
if (NeedsUnwrappedLine)
addUnwrappedLine();
}
@@ -2206,14 +3015,19 @@ void UnwrappedLineParser::parseNamespace() {
parseParens();
} else {
while (FormatTok->isOneOf(tok::identifier, tok::coloncolon, tok::kw_inline,
- tok::l_square)) {
+ tok::l_square, tok::period, tok::l_paren) ||
+ (Style.isCSharp() && FormatTok->is(tok::kw_union))) {
if (FormatTok->is(tok::l_square))
parseSquare();
+ else if (FormatTok->is(tok::l_paren))
+ parseParens();
else
nextToken();
}
}
- if (FormatTok->Tok.is(tok::l_brace)) {
+ if (FormatTok->is(tok::l_brace)) {
+ FormatTok->setFinalizedType(TT_NamespaceLBrace);
+
if (ShouldBreakBeforeBrace(Style, InitialToken))
addUnwrappedLine();
@@ -2232,14 +3046,11 @@ void UnwrappedLineParser::parseNamespace() {
if (ManageWhitesmithsBraces)
++Line->Level;
- parseBlock(/*MustBeDeclaration=*/true, AddLevels,
- /*MunchSemi=*/true,
- /*UnindentWhitesmithsBraces=*/ManageWhitesmithsBraces);
-
// Munch the semicolon after a namespace. This is more common than one would
// think. Putting the semicolon into its own line is very ugly.
- if (FormatTok->Tok.is(tok::semi))
- nextToken();
+ parseBlock(/*MustBeDeclaration=*/true, AddLevels, /*MunchSemi=*/true,
+ /*KeepBraces=*/true, /*IfKind=*/nullptr,
+ ManageWhitesmithsBraces);
addUnwrappedLine(AddLevels > 0 ? LineLevel::Remove : LineLevel::Keep);
@@ -2255,6 +3066,11 @@ void UnwrappedLineParser::parseNew() {
if (Style.isCSharp()) {
do {
+ // Handle constructor invocation, e.g. `new(field: value)`.
+ if (FormatTok->is(tok::l_paren))
+ parseParens();
+
+ // Handle array initialization syntax, e.g. `new[] {10, 20, 30}`.
if (FormatTok->is(tok::l_brace))
parseBracedList();
@@ -2287,49 +3103,86 @@ void UnwrappedLineParser::parseNew() {
} while (!eof());
}
-void UnwrappedLineParser::parseForOrWhileLoop() {
- assert(FormatTok->isOneOf(tok::kw_for, tok::kw_while, TT_ForEachMacro) &&
+void UnwrappedLineParser::parseLoopBody(bool KeepBraces, bool WrapRightBrace) {
+ keepAncestorBraces();
+
+ if (isBlockBegin(*FormatTok)) {
+ FormatTok->setFinalizedType(TT_ControlStatementLBrace);
+ FormatToken *LeftBrace = FormatTok;
+ CompoundStatementIndenter Indenter(this, Style, Line->Level);
+ parseBlock(/*MustBeDeclaration=*/false, /*AddLevels=*/1u,
+ /*MunchSemi=*/true, KeepBraces);
+ setPreviousRBraceType(TT_ControlStatementRBrace);
+ if (!KeepBraces) {
+ assert(!NestedTooDeep.empty());
+ if (!NestedTooDeep.back())
+ markOptionalBraces(LeftBrace);
+ }
+ if (WrapRightBrace)
+ addUnwrappedLine();
+ } else {
+ parseUnbracedBody();
+ }
+
+ if (!KeepBraces)
+ NestedTooDeep.pop_back();
+}
+
+void UnwrappedLineParser::parseForOrWhileLoop(bool HasParens) {
+ assert((FormatTok->isOneOf(tok::kw_for, tok::kw_while, TT_ForEachMacro) ||
+ (Style.isVerilog() &&
+ FormatTok->isOneOf(Keywords.kw_always, Keywords.kw_always_comb,
+ Keywords.kw_always_ff, Keywords.kw_always_latch,
+ Keywords.kw_final, Keywords.kw_initial,
+ Keywords.kw_foreach, Keywords.kw_forever,
+ Keywords.kw_repeat))) &&
"'for', 'while' or foreach macro expected");
+ const bool KeepBraces = !Style.RemoveBracesLLVM ||
+ !FormatTok->isOneOf(tok::kw_for, tok::kw_while);
+
nextToken();
// JS' for await ( ...
- if (Style.Language == FormatStyle::LK_JavaScript &&
- FormatTok->is(Keywords.kw_await))
+ if (Style.isJavaScript() && FormatTok->is(Keywords.kw_await))
+ nextToken();
+ if (Style.isCpp() && FormatTok->is(tok::kw_co_await))
nextToken();
- if (FormatTok->Tok.is(tok::l_paren))
+ if (HasParens && FormatTok->is(tok::l_paren)) {
+ // The type is only set for Verilog basically because we were afraid to
+ // change the existing behavior for loops. See the discussion on D121756 for
+ // details.
+ if (Style.isVerilog())
+ FormatTok->setFinalizedType(TT_ConditionLParen);
parseParens();
- if (FormatTok->Tok.is(tok::l_brace)) {
- CompoundStatementIndenter Indenter(this, Style, Line->Level);
- parseBlock(/*MustBeDeclaration=*/false);
- addUnwrappedLine();
- } else {
+ }
+
+ if (Style.isVerilog()) {
+ // Event control.
+ parseVerilogSensitivityList();
+ } else if (Style.AllowShortLoopsOnASingleLine && FormatTok->is(tok::semi) &&
+ Tokens->getPreviousToken()->is(tok::r_paren)) {
+ nextToken();
addUnwrappedLine();
- ++Line->Level;
- parseStructuralElement();
- --Line->Level;
+ return;
}
+
+ handleAttributes();
+ parseLoopBody(KeepBraces, /*WrapRightBrace=*/true);
}
void UnwrappedLineParser::parseDoWhile() {
- assert(FormatTok->Tok.is(tok::kw_do) && "'do' expected");
+ assert(FormatTok->is(tok::kw_do) && "'do' expected");
nextToken();
- if (FormatTok->Tok.is(tok::l_brace)) {
- CompoundStatementIndenter Indenter(this, Style, Line->Level);
- parseBlock(/*MustBeDeclaration=*/false);
- if (Style.BraceWrapping.BeforeWhile)
- addUnwrappedLine();
- } else {
- addUnwrappedLine();
- ++Line->Level;
- parseStructuralElement();
- --Line->Level;
- }
+
+ parseLoopBody(/*KeepBraces=*/true, Style.BraceWrapping.BeforeWhile);
// FIXME: Add error handling.
- if (!FormatTok->Tok.is(tok::kw_while)) {
+ if (FormatTok->isNot(tok::kw_while)) {
addUnwrappedLine();
return;
}
+ FormatTok->setFinalizedType(TT_DoWhile);
+
// If in Whitesmiths mode, the line with the while() needs to be indented
// to the same level as the block.
if (Style.BreakBeforeBraces == FormatStyle::BS_Whitesmiths)
@@ -2348,19 +3201,19 @@ void UnwrappedLineParser::parseLabel(bool LeftAlignLabel) {
Line->Level = 0;
if (!Style.IndentCaseBlocks && CommentsBeforeNextToken.empty() &&
- FormatTok->Tok.is(tok::l_brace)) {
+ FormatTok->is(tok::l_brace)) {
CompoundStatementIndenter Indenter(this, Line->Level,
Style.BraceWrapping.AfterCaseLabel,
Style.BraceWrapping.IndentBraces);
- parseBlock(/*MustBeDeclaration=*/false);
- if (FormatTok->Tok.is(tok::kw_break)) {
+ parseBlock();
+ if (FormatTok->is(tok::kw_break)) {
if (Style.BraceWrapping.AfterControlStatement ==
FormatStyle::BWACS_Always) {
addUnwrappedLine();
if (!Style.IndentCaseBlocks &&
Style.BreakBeforeBraces == FormatStyle::BS_Whitesmiths) {
- Line->Level++;
+ ++Line->Level;
}
}
parseStructuralElement();
@@ -2379,23 +3232,32 @@ void UnwrappedLineParser::parseLabel(bool LeftAlignLabel) {
}
void UnwrappedLineParser::parseCaseLabel() {
- assert(FormatTok->Tok.is(tok::kw_case) && "'case' expected");
+ assert(FormatTok->is(tok::kw_case) && "'case' expected");
// FIXME: fix handling of complex expressions here.
do {
nextToken();
- } while (!eof() && !FormatTok->Tok.is(tok::colon));
+ if (FormatTok->is(tok::colon)) {
+ FormatTok->setFinalizedType(TT_CaseLabelColon);
+ break;
+ }
+ } while (!eof());
parseLabel();
}
void UnwrappedLineParser::parseSwitch() {
- assert(FormatTok->Tok.is(tok::kw_switch) && "'switch' expected");
+ assert(FormatTok->is(tok::kw_switch) && "'switch' expected");
nextToken();
- if (FormatTok->Tok.is(tok::l_paren))
+ if (FormatTok->is(tok::l_paren))
parseParens();
- if (FormatTok->Tok.is(tok::l_brace)) {
+
+ keepAncestorBraces();
+
+ if (FormatTok->is(tok::l_brace)) {
CompoundStatementIndenter Indenter(this, Style, Line->Level);
- parseBlock(/*MustBeDeclaration=*/false);
+ FormatTok->setFinalizedType(TT_ControlStatementLBrace);
+ parseBlock();
+ setPreviousRBraceType(TT_ControlStatementRBrace);
addUnwrappedLine();
} else {
addUnwrappedLine();
@@ -2403,140 +3265,442 @@ void UnwrappedLineParser::parseSwitch() {
parseStructuralElement();
--Line->Level;
}
+
+ if (Style.RemoveBracesLLVM)
+ NestedTooDeep.pop_back();
+}
+
+// Operators that can follow a C variable.
+static bool isCOperatorFollowingVar(tok::TokenKind kind) {
+ switch (kind) {
+ case tok::ampamp:
+ case tok::ampequal:
+ case tok::arrow:
+ case tok::caret:
+ case tok::caretequal:
+ case tok::comma:
+ case tok::ellipsis:
+ case tok::equal:
+ case tok::equalequal:
+ case tok::exclaim:
+ case tok::exclaimequal:
+ case tok::greater:
+ case tok::greaterequal:
+ case tok::greatergreater:
+ case tok::greatergreaterequal:
+ case tok::l_paren:
+ case tok::l_square:
+ case tok::less:
+ case tok::lessequal:
+ case tok::lessless:
+ case tok::lesslessequal:
+ case tok::minus:
+ case tok::minusequal:
+ case tok::minusminus:
+ case tok::percent:
+ case tok::percentequal:
+ case tok::period:
+ case tok::pipe:
+ case tok::pipeequal:
+ case tok::pipepipe:
+ case tok::plus:
+ case tok::plusequal:
+ case tok::plusplus:
+ case tok::question:
+ case tok::r_brace:
+ case tok::r_paren:
+ case tok::r_square:
+ case tok::semi:
+ case tok::slash:
+ case tok::slashequal:
+ case tok::star:
+ case tok::starequal:
+ return true;
+ default:
+ return false;
+ }
}
void UnwrappedLineParser::parseAccessSpecifier() {
+ FormatToken *AccessSpecifierCandidate = FormatTok;
nextToken();
// Understand Qt's slots.
if (FormatTok->isOneOf(Keywords.kw_slots, Keywords.kw_qslots))
nextToken();
// Otherwise, we don't know what it is, and we'd better keep the next token.
- if (FormatTok->Tok.is(tok::colon))
+ if (FormatTok->is(tok::colon)) {
nextToken();
- addUnwrappedLine();
+ addUnwrappedLine();
+ } else if (FormatTok->isNot(tok::coloncolon) &&
+ !isCOperatorFollowingVar(FormatTok->Tok.getKind())) {
+ // Not a variable name nor namespace name.
+ addUnwrappedLine();
+ } else if (AccessSpecifierCandidate) {
+ // Consider the access specifier to be a C identifier.
+ AccessSpecifierCandidate->Tok.setKind(tok::identifier);
+ }
}
-void UnwrappedLineParser::parseConcept() {
- assert(FormatTok->Tok.is(tok::kw_concept) && "'concept' expected");
- nextToken();
- if (!FormatTok->Tok.is(tok::identifier))
- return;
- nextToken();
- if (!FormatTok->Tok.is(tok::equal))
- return;
+/// \brief Parses a requires, decides if it is a clause or an expression.
+/// \pre The current token has to be the requires keyword.
+/// \returns true if it parsed a clause.
+bool clang::format::UnwrappedLineParser::parseRequires() {
+ assert(FormatTok->is(tok::kw_requires) && "'requires' expected");
+ auto RequiresToken = FormatTok;
+
+ // We try to guess if it is a requires clause, or a requires expression. For
+ // that we first consume the keyword and check the next token.
nextToken();
- if (FormatTok->Tok.is(tok::kw_requires)) {
- nextToken();
- parseRequiresExpression(Line->Level);
- } else {
- parseConstraintExpression(Line->Level);
- }
-}
-void UnwrappedLineParser::parseRequiresExpression(unsigned int OriginalLevel) {
- // requires (R range)
- if (FormatTok->Tok.is(tok::l_paren)) {
- parseParens();
- if (Style.IndentRequires && OriginalLevel != Line->Level) {
- addUnwrappedLine();
- --Line->Level;
- }
+ switch (FormatTok->Tok.getKind()) {
+ case tok::l_brace:
+ // This can only be an expression, never a clause.
+ parseRequiresExpression(RequiresToken);
+ return false;
+ case tok::l_paren:
+ // Clauses and expression can start with a paren, it's unclear what we have.
+ break;
+ default:
+ // All other tokens can only be a clause.
+ parseRequiresClause(RequiresToken);
+ return true;
}
- if (FormatTok->Tok.is(tok::l_brace)) {
- if (Style.BraceWrapping.AfterFunction)
- addUnwrappedLine();
- FormatTok->setType(TT_FunctionLBrace);
- parseBlock(/*MustBeDeclaration=*/false);
- addUnwrappedLine();
- } else {
- parseConstraintExpression(OriginalLevel);
+ // Looking forward we would have to decide if there are function declaration
+ // like arguments to the requires expression:
+ // requires (T t) {
+ // Or there is a constraint expression for the requires clause:
+ // requires (C<T> && ...
+
+ // But first let's look behind.
+ auto *PreviousNonComment = RequiresToken->getPreviousNonComment();
+
+ if (!PreviousNonComment ||
+ PreviousNonComment->is(TT_RequiresExpressionLBrace)) {
+ // If there is no token, or an expression left brace, we are a requires
+ // clause within a requires expression.
+ parseRequiresClause(RequiresToken);
+ return true;
}
-}
-void UnwrappedLineParser::parseConstraintExpression(
- unsigned int OriginalLevel) {
- // requires Id<T> && Id<T> || Id<T>
- while (
- FormatTok->isOneOf(tok::identifier, tok::kw_requires, tok::coloncolon)) {
- nextToken();
- while (FormatTok->isOneOf(tok::identifier, tok::coloncolon, tok::less,
- tok::greater, tok::comma, tok::ellipsis)) {
- if (FormatTok->Tok.is(tok::less)) {
- parseBracedList(/*ContinueOnSemicolons=*/false, /*IsEnum=*/false,
- /*ClosingBraceKind=*/tok::greater);
- continue;
- }
- nextToken();
- }
- if (FormatTok->Tok.is(tok::kw_requires)) {
- parseRequiresExpression(OriginalLevel);
+ switch (PreviousNonComment->Tok.getKind()) {
+ case tok::greater:
+ case tok::r_paren:
+ case tok::kw_noexcept:
+ case tok::kw_const:
+ // This is a requires clause.
+ parseRequiresClause(RequiresToken);
+ return true;
+ case tok::amp:
+ case tok::ampamp: {
+ // This can be either:
+ // if (... && requires (T t) ...)
+ // Or
+ // void member(...) && requires (C<T> ...
+ // We check the one token before that for a const:
+ // void member(...) const && requires (C<T> ...
+ auto PrevPrev = PreviousNonComment->getPreviousNonComment();
+ if (PrevPrev && PrevPrev->is(tok::kw_const)) {
+ parseRequiresClause(RequiresToken);
+ return true;
}
- if (FormatTok->Tok.is(tok::less)) {
- parseBracedList(/*ContinueOnSemicolons=*/false, /*IsEnum=*/false,
- /*ClosingBraceKind=*/tok::greater);
+ break;
+ }
+ default:
+ if (PreviousNonComment->isTypeOrIdentifier()) {
+ // This is a requires clause.
+ parseRequiresClause(RequiresToken);
+ return true;
}
+ // It's an expression.
+ parseRequiresExpression(RequiresToken);
+ return false;
+ }
- if (FormatTok->Tok.is(tok::l_paren)) {
- parseParens();
- }
- if (FormatTok->Tok.is(tok::l_brace)) {
- if (Style.BraceWrapping.AfterFunction)
- addUnwrappedLine();
- FormatTok->setType(TT_FunctionLBrace);
- parseBlock(/*MustBeDeclaration=*/false);
- }
- if (FormatTok->Tok.is(tok::semi)) {
- // Eat any trailing semi.
- nextToken();
- addUnwrappedLine();
- }
- if (FormatTok->Tok.is(tok::colon)) {
- return;
- }
- if (!FormatTok->Tok.isOneOf(tok::ampamp, tok::pipepipe)) {
- if (FormatTok->Previous &&
- !FormatTok->Previous->isOneOf(tok::identifier, tok::kw_requires,
- tok::coloncolon)) {
- addUnwrappedLine();
+ // Now we look forward and try to check if the paren content is a parameter
+ // list. The parameters can be cv-qualified and contain references or
+ // pointers.
+ // So we want basically to check for TYPE NAME, but TYPE can contain all kinds
+ // of stuff: typename, const, *, &, &&, ::, identifiers.
+
+ unsigned StoredPosition = Tokens->getPosition();
+ FormatToken *NextToken = Tokens->getNextToken();
+ int Lookahead = 0;
+ auto PeekNext = [&Lookahead, &NextToken, this] {
+ ++Lookahead;
+ NextToken = Tokens->getNextToken();
+ };
+
+ bool FoundType = false;
+ bool LastWasColonColon = false;
+ int OpenAngles = 0;
+
+ for (; Lookahead < 50; PeekNext()) {
+ switch (NextToken->Tok.getKind()) {
+ case tok::kw_volatile:
+ case tok::kw_const:
+ case tok::comma:
+ if (OpenAngles == 0) {
+ FormatTok = Tokens->setPosition(StoredPosition);
+ parseRequiresExpression(RequiresToken);
+ return false;
}
- if (Style.IndentRequires && OriginalLevel != Line->Level) {
- --Line->Level;
+ break;
+ case tok::r_paren:
+ case tok::pipepipe:
+ FormatTok = Tokens->setPosition(StoredPosition);
+ parseRequiresClause(RequiresToken);
+ return true;
+ case tok::eof:
+ // Break out of the loop.
+ Lookahead = 50;
+ break;
+ case tok::coloncolon:
+ LastWasColonColon = true;
+ break;
+ case tok::identifier:
+ if (FoundType && !LastWasColonColon && OpenAngles == 0) {
+ FormatTok = Tokens->setPosition(StoredPosition);
+ parseRequiresExpression(RequiresToken);
+ return false;
+ }
+ FoundType = true;
+ LastWasColonColon = false;
+ break;
+ case tok::less:
+ ++OpenAngles;
+ break;
+ case tok::greater:
+ --OpenAngles;
+ break;
+ default:
+ if (NextToken->isSimpleTypeSpecifier()) {
+ FormatTok = Tokens->setPosition(StoredPosition);
+ parseRequiresExpression(RequiresToken);
+ return false;
}
break;
- } else {
- FormatTok->setType(TT_ConstraintJunctions);
}
+ }
+ // This seems to be a complicated expression, just assume it's a clause.
+ FormatTok = Tokens->setPosition(StoredPosition);
+ parseRequiresClause(RequiresToken);
+ return true;
+}
- nextToken();
+/// \brief Parses a requires clause.
+/// \param RequiresToken The requires keyword token, which starts this clause.
+/// \pre We need to be on the next token after the requires keyword.
+/// \sa parseRequiresExpression
+///
+/// Returns if it either has finished parsing the clause, or it detects, that
+/// the clause is incorrect.
+void UnwrappedLineParser::parseRequiresClause(FormatToken *RequiresToken) {
+ assert(FormatTok->getPreviousNonComment() == RequiresToken);
+ assert(RequiresToken->is(tok::kw_requires) && "'requires' expected");
+
+ // If there is no previous token, we are within a requires expression,
+ // otherwise we will always have the template or function declaration in front
+ // of it.
+ bool InRequiresExpression =
+ !RequiresToken->Previous ||
+ RequiresToken->Previous->is(TT_RequiresExpressionLBrace);
+
+ RequiresToken->setFinalizedType(InRequiresExpression
+ ? TT_RequiresClauseInARequiresExpression
+ : TT_RequiresClause);
+
+ // NOTE: parseConstraintExpression is only ever called from this function.
+ // It could be inlined into here.
+ parseConstraintExpression();
+
+ if (!InRequiresExpression)
+ FormatTok->Previous->ClosesRequiresClause = true;
+}
+
+/// \brief Parses a requires expression.
+/// \param RequiresToken The requires keyword token, which starts this clause.
+/// \pre We need to be on the next token after the requires keyword.
+/// \sa parseRequiresClause
+///
+/// Returns if it either has finished parsing the expression, or it detects,
+/// that the expression is incorrect.
+void UnwrappedLineParser::parseRequiresExpression(FormatToken *RequiresToken) {
+ assert(FormatTok->getPreviousNonComment() == RequiresToken);
+ assert(RequiresToken->is(tok::kw_requires) && "'requires' expected");
+
+ RequiresToken->setFinalizedType(TT_RequiresExpression);
+
+ if (FormatTok->is(tok::l_paren)) {
+ FormatTok->setFinalizedType(TT_RequiresExpressionLParen);
+ parseParens();
+ }
+
+ if (FormatTok->is(tok::l_brace)) {
+ FormatTok->setFinalizedType(TT_RequiresExpressionLBrace);
+ parseChildBlock();
}
}
-void UnwrappedLineParser::parseRequires() {
- assert(FormatTok->Tok.is(tok::kw_requires) && "'requires' expected");
+/// \brief Parses a constraint expression.
+///
+/// This is the body of a requires clause. It returns, when the parsing is
+/// complete, or the expression is incorrect.
+void UnwrappedLineParser::parseConstraintExpression() {
+ // The special handling for lambdas is needed since tryToParseLambda() eats a
+ // token and if a requires expression is the last part of a requires clause
+ // and followed by an attribute like [[nodiscard]] the ClosesRequiresClause is
+ // not set on the correct token. Thus we need to be aware if we even expect a
+ // lambda to be possible.
+ // template <typename T> requires requires { ... } [[nodiscard]] ...;
+ bool LambdaNextTimeAllowed = true;
+
+ // Within lambda declarations, it is permitted to put a requires clause after
+ // its template parameter list, which would place the requires clause right
+ // before the parentheses of the parameters of the lambda declaration. Thus,
+ // we track if we expect to see grouping parentheses at all.
+ // Without this check, `requires foo<T> (T t)` in the below example would be
+ // seen as the whole requires clause, accidentally eating the parameters of
+ // the lambda.
+ // [&]<typename T> requires foo<T> (T t) { ... };
+ bool TopLevelParensAllowed = true;
- unsigned OriginalLevel = Line->Level;
- if (FormatTok->Previous && FormatTok->Previous->is(tok::greater)) {
- addUnwrappedLine();
- if (Style.IndentRequires) {
- Line->Level++;
+ do {
+ bool LambdaThisTimeAllowed = std::exchange(LambdaNextTimeAllowed, false);
+
+ switch (FormatTok->Tok.getKind()) {
+ case tok::kw_requires: {
+ auto RequiresToken = FormatTok;
+ nextToken();
+ parseRequiresExpression(RequiresToken);
+ break;
}
- }
- nextToken();
- parseRequiresExpression(OriginalLevel);
+ case tok::l_paren:
+ if (!TopLevelParensAllowed)
+ return;
+ parseParens(/*AmpAmpTokenType=*/TT_BinaryOperator);
+ TopLevelParensAllowed = false;
+ break;
+
+ case tok::l_square:
+ if (!LambdaThisTimeAllowed || !tryToParseLambda())
+ return;
+ break;
+
+ case tok::kw_const:
+ case tok::semi:
+ case tok::kw_class:
+ case tok::kw_struct:
+ case tok::kw_union:
+ return;
+
+ case tok::l_brace:
+ // Potential function body.
+ return;
+
+ case tok::ampamp:
+ case tok::pipepipe:
+ FormatTok->setFinalizedType(TT_BinaryOperator);
+ nextToken();
+ LambdaNextTimeAllowed = true;
+ TopLevelParensAllowed = true;
+ break;
+
+ case tok::comma:
+ case tok::comment:
+ LambdaNextTimeAllowed = LambdaThisTimeAllowed;
+ nextToken();
+ break;
+
+ case tok::kw_sizeof:
+ case tok::greater:
+ case tok::greaterequal:
+ case tok::greatergreater:
+ case tok::less:
+ case tok::lessequal:
+ case tok::lessless:
+ case tok::equalequal:
+ case tok::exclaim:
+ case tok::exclaimequal:
+ case tok::plus:
+ case tok::minus:
+ case tok::star:
+ case tok::slash:
+ LambdaNextTimeAllowed = true;
+ TopLevelParensAllowed = true;
+ // Just eat them.
+ nextToken();
+ break;
+
+ case tok::numeric_constant:
+ case tok::coloncolon:
+ case tok::kw_true:
+ case tok::kw_false:
+ TopLevelParensAllowed = false;
+ // Just eat them.
+ nextToken();
+ break;
+
+ case tok::kw_static_cast:
+ case tok::kw_const_cast:
+ case tok::kw_reinterpret_cast:
+ case tok::kw_dynamic_cast:
+ nextToken();
+ if (FormatTok->isNot(tok::less))
+ return;
+
+ nextToken();
+ parseBracedList(/*IsAngleBracket=*/true);
+ break;
+
+ default:
+ if (!FormatTok->Tok.getIdentifierInfo()) {
+ // Identifiers are part of the default case, we check for more then
+ // tok::identifier to handle builtin type traits.
+ return;
+ }
+
+ // We need to differentiate identifiers for a template deduction guide,
+ // variables, or function return types (the constraint expression has
+ // ended before that), and basically all other cases. But it's easier to
+ // check the other way around.
+ assert(FormatTok->Previous);
+ switch (FormatTok->Previous->Tok.getKind()) {
+ case tok::coloncolon: // Nested identifier.
+ case tok::ampamp: // Start of a function or variable for the
+ case tok::pipepipe: // constraint expression. (binary)
+ case tok::exclaim: // The same as above, but unary.
+ case tok::kw_requires: // Initial identifier of a requires clause.
+ case tok::equal: // Initial identifier of a concept declaration.
+ break;
+ default:
+ return;
+ }
+
+ // Read identifier with optional template declaration.
+ nextToken();
+ if (FormatTok->is(tok::less)) {
+ nextToken();
+ parseBracedList(/*IsAngleBracket=*/true);
+ }
+ TopLevelParensAllowed = false;
+ break;
+ }
+ } while (!eof());
}
bool UnwrappedLineParser::parseEnum() {
+ const FormatToken &InitialToken = *FormatTok;
+
// Won't be 'enum' for NS_ENUMs.
- if (FormatTok->Tok.is(tok::kw_enum))
+ if (FormatTok->is(tok::kw_enum))
nextToken();
// In TypeScript, "enum" can also be used as property name, e.g. in interface
// declarations. An "enum" keyword followed by a colon would be a syntax
// error and thus assume it is just an identifier.
- if (Style.Language == FormatStyle::LK_JavaScript &&
- FormatTok->isOneOf(tok::colon, tok::question))
+ if (Style.isJavaScript() && FormatTok->isOneOf(tok::colon, tok::question))
return false;
// In protobuf, "enum" can be used as a field name.
@@ -2544,16 +3708,26 @@ bool UnwrappedLineParser::parseEnum() {
return false;
// Eat up enum class ...
- if (FormatTok->Tok.is(tok::kw_class) || FormatTok->Tok.is(tok::kw_struct))
+ if (FormatTok->isOneOf(tok::kw_class, tok::kw_struct))
nextToken();
while (FormatTok->Tok.getIdentifierInfo() ||
FormatTok->isOneOf(tok::colon, tok::coloncolon, tok::less,
- tok::greater, tok::comma, tok::question)) {
- nextToken();
+ tok::greater, tok::comma, tok::question,
+ tok::l_square, tok::r_square)) {
+ if (Style.isVerilog()) {
+ FormatTok->setFinalizedType(TT_VerilogDimensionedTypeName);
+ nextToken();
+ // In Verilog the base type can have dimensions.
+ while (FormatTok->is(tok::l_square))
+ parseSquare();
+ } else {
+ nextToken();
+ }
// We can have macros or attributes in between 'enum' and the enum name.
if (FormatTok->is(tok::l_paren))
parseParens();
+ assert(FormatTok->isNot(TT_AttributeSquare));
if (FormatTok->is(tok::identifier)) {
nextToken();
// If there are two identifiers in a row, this is likely an elaborate
@@ -2566,6 +3740,7 @@ bool UnwrappedLineParser::parseEnum() {
// Just a declaration or something is wrong.
if (FormatTok->isNot(tok::l_brace))
return true;
+ FormatTok->setFinalizedType(TT_EnumLBrace);
FormatTok->setBlockKind(BK_Block);
if (Style.Language == FormatStyle::LK_Java) {
@@ -2578,16 +3753,17 @@ bool UnwrappedLineParser::parseEnum() {
return true;
}
- if (!Style.AllowShortEnumsOnASingleLine)
+ if (!Style.AllowShortEnumsOnASingleLine &&
+ ShouldBreakBeforeBrace(Style, InitialToken)) {
addUnwrappedLine();
+ }
// Parse enum body.
nextToken();
if (!Style.AllowShortEnumsOnASingleLine) {
addUnwrappedLine();
Line->Level += 1;
}
- bool HasError = !parseBracedList(/*ContinueOnSemicolons=*/true,
- /*IsEnum=*/true);
+ bool HasError = !parseBracedList(/*IsAngleBracket=*/false, /*IsEnum=*/true);
if (!Style.AllowShortEnumsOnASingleLine)
Line->Level -= 1;
if (HasError) {
@@ -2595,6 +3771,7 @@ bool UnwrappedLineParser::parseEnum() {
nextToken();
addUnwrappedLine();
}
+ setPreviousRBraceType(TT_EnumRBrace);
return true;
// There is no addUnwrappedLine() here so that we fall through to parsing a
@@ -2607,8 +3784,8 @@ bool UnwrappedLineParser::parseStructLike() {
// record declaration or definition can start a structural element.
parseRecord();
// This does not apply to Java, JavaScript and C#.
- if (Style.Language == FormatStyle::LK_Java ||
- Style.Language == FormatStyle::LK_JavaScript || Style.isCSharp()) {
+ if (Style.Language == FormatStyle::LK_Java || Style.isJavaScript() ||
+ Style.isCSharp()) {
if (FormatTok->is(tok::semi))
nextToken();
addUnwrappedLine();
@@ -2640,36 +3817,37 @@ bool UnwrappedLineParser::tryToParseSimpleAttribute() {
ScopedTokenPosition AutoPosition(Tokens);
FormatToken *Tok = Tokens->getNextToken();
// We already read the first [ check for the second.
- if (Tok && !Tok->is(tok::l_square)) {
+ if (Tok->isNot(tok::l_square))
return false;
- }
// Double check that the attribute is just something
// fairly simple.
- while (Tok) {
- if (Tok->is(tok::r_square)) {
+ while (Tok->isNot(tok::eof)) {
+ if (Tok->is(tok::r_square))
break;
- }
Tok = Tokens->getNextToken();
}
+ if (Tok->is(tok::eof))
+ return false;
Tok = Tokens->getNextToken();
- if (Tok && !Tok->is(tok::r_square)) {
+ if (Tok->isNot(tok::r_square))
return false;
- }
Tok = Tokens->getNextToken();
- if (Tok && Tok->is(tok::semi)) {
+ if (Tok->is(tok::semi))
return false;
- }
return true;
}
void UnwrappedLineParser::parseJavaEnumBody() {
+ assert(FormatTok->is(tok::l_brace));
+ const FormatToken *OpeningBrace = FormatTok;
+
// Determine whether the enum is simple, i.e. does not have a semicolon or
// constants with class bodies. Simple enums can be formatted like braced
// lists, contracted to a single line, etc.
unsigned StoredPosition = Tokens->getPosition();
bool IsSimple = true;
FormatToken *Tok = Tokens->getNextToken();
- while (Tok) {
+ while (Tok->isNot(tok::eof)) {
if (Tok->is(tok::r_brace))
break;
if (Tok->isOneOf(tok::l_brace, tok::semi)) {
@@ -2696,7 +3874,7 @@ void UnwrappedLineParser::parseJavaEnumBody() {
++Line->Level;
// Parse the enum constants.
- while (FormatTok) {
+ while (!eof()) {
if (FormatTok->is(tok::l_brace)) {
// Parse the constant's class body.
parseBlock(/*MustBeDeclaration=*/true, /*AddLevels=*/1u,
@@ -2719,7 +3897,7 @@ void UnwrappedLineParser::parseJavaEnumBody() {
}
// Parse the class body after the enum's ";" if any.
- parseLevel(/*HasOpeningBrace=*/true);
+ parseLevel(OpeningBrace);
nextToken();
--Line->Level;
addUnwrappedLine();
@@ -2729,16 +3907,18 @@ void UnwrappedLineParser::parseRecord(bool ParseAsExpr) {
const FormatToken &InitialToken = *FormatTok;
nextToken();
+ auto IsNonMacroIdentifier = [](const FormatToken *Tok) {
+ return Tok->is(tok::identifier) && Tok->TokenText != Tok->TokenText.upper();
+ };
// The actual identifier can be a nested name specifier, and in macros
// it is often token-pasted.
// An [[attribute]] can be before the identifier.
while (FormatTok->isOneOf(tok::identifier, tok::coloncolon, tok::hashhash,
- tok::kw___attribute, tok::kw___declspec,
- tok::kw_alignas, tok::l_square, tok::r_square) ||
- ((Style.Language == FormatStyle::LK_Java ||
- Style.Language == FormatStyle::LK_JavaScript) &&
+ tok::kw_alignas, tok::l_square) ||
+ FormatTok->isAttribute() ||
+ ((Style.Language == FormatStyle::LK_Java || Style.isJavaScript()) &&
FormatTok->isOneOf(tok::period, tok::comma))) {
- if (Style.Language == FormatStyle::LK_JavaScript &&
+ if (Style.isJavaScript() &&
FormatTok->isOneOf(Keywords.kw_extends, Keywords.kw_implements)) {
// JavaScript/TypeScript supports inline object types in
// extends/implements positions:
@@ -2749,41 +3929,47 @@ void UnwrappedLineParser::parseRecord(bool ParseAsExpr) {
continue;
}
}
- bool IsNonMacroIdentifier =
- FormatTok->is(tok::identifier) &&
- FormatTok->TokenText != FormatTok->TokenText.upper();
+ if (FormatTok->is(tok::l_square) && handleCppAttributes())
+ continue;
nextToken();
- // We can have macros or attributes in between 'class' and the class name.
- if (!IsNonMacroIdentifier) {
- if (FormatTok->Tok.is(tok::l_paren)) {
- parseParens();
- } else if (FormatTok->is(TT_AttributeSquare)) {
- parseSquare();
- // Consume the closing TT_AttributeSquare.
- if (FormatTok->Next && FormatTok->is(TT_AttributeSquare))
- nextToken();
- }
+ // We can have macros in between 'class' and the class name.
+ if (!IsNonMacroIdentifier(FormatTok->Previous) &&
+ FormatTok->is(tok::l_paren)) {
+ parseParens();
}
}
- // Note that parsing away template declarations here leads to incorrectly
- // accepting function declarations as record declarations.
- // In general, we cannot solve this problem. Consider:
- // class A<int> B() {}
- // which can be a function definition or a class definition when B() is a
- // macro. If we find enough real-world cases where this is a problem, we
- // can parse for the 'template' keyword in the beginning of the statement,
- // and thus rule out the record production in case there is no template
- // (this would still leave us with an ambiguity between template function
- // and class declarations).
if (FormatTok->isOneOf(tok::colon, tok::less)) {
- while (!eof()) {
+ int AngleNestingLevel = 0;
+ do {
+ if (FormatTok->is(tok::less))
+ ++AngleNestingLevel;
+ else if (FormatTok->is(tok::greater))
+ --AngleNestingLevel;
+
+ if (AngleNestingLevel == 0 && FormatTok->is(tok::l_paren) &&
+ IsNonMacroIdentifier(FormatTok->Previous)) {
+ break;
+ }
if (FormatTok->is(tok::l_brace)) {
calculateBraceTypes(/*ExpectClassBody=*/true);
if (!tryToParseBracedList())
break;
}
- if (FormatTok->Tok.is(tok::semi))
+ if (FormatTok->is(tok::l_square)) {
+ FormatToken *Previous = FormatTok->Previous;
+ if (!Previous ||
+ !(Previous->is(tok::r_paren) || Previous->isTypeOrIdentifier())) {
+ // Don't try parsing a lambda if we had a closing parenthesis before,
+ // it was probably a pointer to an array: int (*)[].
+ if (!tryToParseLambda())
+ continue;
+ } else {
+ parseSquare();
+ continue;
+ }
+ }
+ if (FormatTok->is(tok::semi))
return;
if (Style.isCSharp() && FormatTok->is(Keywords.kw_where)) {
addUnwrappedLine();
@@ -2792,9 +3978,26 @@ void UnwrappedLineParser::parseRecord(bool ParseAsExpr) {
break;
}
nextToken();
- }
+ } while (!eof());
}
- if (FormatTok->Tok.is(tok::l_brace)) {
+
+ auto GetBraceTypes =
+ [](const FormatToken &RecordTok) -> std::pair<TokenType, TokenType> {
+ switch (RecordTok.Tok.getKind()) {
+ case tok::kw_class:
+ return {TT_ClassLBrace, TT_ClassRBrace};
+ case tok::kw_struct:
+ return {TT_StructLBrace, TT_StructRBrace};
+ case tok::kw_union:
+ return {TT_UnionLBrace, TT_UnionRBrace};
+ default:
+ // Useful for e.g. interface.
+ return {TT_RecordLBrace, TT_RecordRBrace};
+ }
+ };
+ if (FormatTok->is(tok::l_brace)) {
+ auto [OpenBraceType, ClosingBraceType] = GetBraceTypes(InitialToken);
+ FormatTok->setFinalizedType(OpenBraceType);
if (ParseAsExpr) {
parseChildBlock();
} else {
@@ -2804,6 +4007,7 @@ void UnwrappedLineParser::parseRecord(bool ParseAsExpr) {
unsigned AddLevels = Style.IndentAccessModifiers ? 2u : 1u;
parseBlock(/*MustBeDeclaration=*/true, AddLevels, /*MunchSemi=*/false);
}
+ setPreviousRBraceType(ClosingBraceType);
}
// There is no addUnwrappedLine() here so that we fall through to parsing a
// structural element afterwards. Thus, in "class A {} n, m;",
@@ -2811,17 +4015,17 @@ void UnwrappedLineParser::parseRecord(bool ParseAsExpr) {
}
void UnwrappedLineParser::parseObjCMethod() {
- assert(FormatTok->Tok.isOneOf(tok::l_paren, tok::identifier) &&
+ assert(FormatTok->isOneOf(tok::l_paren, tok::identifier) &&
"'(' or identifier expected.");
do {
- if (FormatTok->Tok.is(tok::semi)) {
+ if (FormatTok->is(tok::semi)) {
nextToken();
addUnwrappedLine();
return;
- } else if (FormatTok->Tok.is(tok::l_brace)) {
+ } else if (FormatTok->is(tok::l_brace)) {
if (Style.BraceWrapping.AfterFunction)
addUnwrappedLine();
- parseBlock(/*MustBeDeclaration=*/false);
+ parseBlock();
addUnwrappedLine();
return;
} else {
@@ -2831,26 +4035,27 @@ void UnwrappedLineParser::parseObjCMethod() {
}
void UnwrappedLineParser::parseObjCProtocolList() {
- assert(FormatTok->Tok.is(tok::less) && "'<' expected.");
+ assert(FormatTok->is(tok::less) && "'<' expected.");
do {
nextToken();
// Early exit in case someone forgot a close angle.
if (FormatTok->isOneOf(tok::semi, tok::l_brace) ||
- FormatTok->Tok.isObjCAtKeyword(tok::objc_end))
+ FormatTok->isObjCAtKeyword(tok::objc_end)) {
return;
- } while (!eof() && FormatTok->Tok.isNot(tok::greater));
+ }
+ } while (!eof() && FormatTok->isNot(tok::greater));
nextToken(); // Skip '>'.
}
void UnwrappedLineParser::parseObjCUntilAtEnd() {
do {
- if (FormatTok->Tok.isObjCAtKeyword(tok::objc_end)) {
+ if (FormatTok->isObjCAtKeyword(tok::objc_end)) {
nextToken();
addUnwrappedLine();
break;
}
if (FormatTok->is(tok::l_brace)) {
- parseBlock(/*MustBeDeclaration=*/false);
+ parseBlock();
// In ObjC interfaces, nothing should be following the "}".
addUnwrappedLine();
} else if (FormatTok->is(tok::r_brace)) {
@@ -2874,24 +4079,23 @@ void UnwrappedLineParser::parseObjCInterfaceOrImplementation() {
// @interface can be followed by a lightweight generic
// specialization list, then either a base class or a category.
- if (FormatTok->Tok.is(tok::less)) {
+ if (FormatTok->is(tok::less))
parseObjCLightweightGenerics();
- }
- if (FormatTok->Tok.is(tok::colon)) {
+ if (FormatTok->is(tok::colon)) {
nextToken();
nextToken(); // base class name
// The base class can also have lightweight generics applied to it.
- if (FormatTok->Tok.is(tok::less)) {
+ if (FormatTok->is(tok::less))
parseObjCLightweightGenerics();
- }
- } else if (FormatTok->Tok.is(tok::l_paren))
+ } else if (FormatTok->is(tok::l_paren)) {
// Skip category, if present.
parseParens();
+ }
- if (FormatTok->Tok.is(tok::less))
+ if (FormatTok->is(tok::less))
parseObjCProtocolList();
- if (FormatTok->Tok.is(tok::l_brace)) {
+ if (FormatTok->is(tok::l_brace)) {
if (Style.BraceWrapping.AfterObjCDeclaration)
addUnwrappedLine();
parseBlock(/*MustBeDeclaration=*/true);
@@ -2905,7 +4109,7 @@ void UnwrappedLineParser::parseObjCInterfaceOrImplementation() {
}
void UnwrappedLineParser::parseObjCLightweightGenerics() {
- assert(FormatTok->Tok.is(tok::less));
+ assert(FormatTok->is(tok::less));
// Unlike protocol lists, generic parameterizations support
// nested angles:
//
@@ -2918,11 +4122,12 @@ void UnwrappedLineParser::parseObjCLightweightGenerics() {
nextToken();
// Early exit in case someone forgot a close angle.
if (FormatTok->isOneOf(tok::semi, tok::l_brace) ||
- FormatTok->Tok.isObjCAtKeyword(tok::objc_end))
+ FormatTok->isObjCAtKeyword(tok::objc_end)) {
break;
- if (FormatTok->Tok.is(tok::less))
+ }
+ if (FormatTok->is(tok::less)) {
++NumOpenAngles;
- else if (FormatTok->Tok.is(tok::greater)) {
+ } else if (FormatTok->is(tok::greater)) {
assert(NumOpenAngles > 0 && "'>' makes NumOpenAngles negative");
--NumOpenAngles;
}
@@ -2936,9 +4141,10 @@ bool UnwrappedLineParser::parseObjCProtocol() {
assert(FormatTok->Tok.getObjCKeywordID() == tok::objc_protocol);
nextToken();
- if (FormatTok->is(tok::l_paren))
+ if (FormatTok->is(tok::l_paren)) {
// The expression form of @protocol, e.g. "Protocol* p = @protocol(foo);".
return false;
+ }
// The definition/declaration form,
// @protocol Foo
@@ -2947,11 +4153,11 @@ bool UnwrappedLineParser::parseObjCProtocol() {
nextToken(); // protocol name
- if (FormatTok->Tok.is(tok::less))
+ if (FormatTok->is(tok::less))
parseObjCProtocolList();
// Check for protocol declaration.
- if (FormatTok->Tok.is(tok::semi)) {
+ if (FormatTok->is(tok::semi)) {
nextToken();
addUnwrappedLine();
return true;
@@ -2986,8 +4192,11 @@ void UnwrappedLineParser::parseJavaScriptEs6ImportExport() {
// parsing the structural element, i.e. the declaration or expression for
// `export default`.
if (!IsImport && !FormatTok->isOneOf(tok::l_brace, tok::star) &&
- !FormatTok->isStringLiteral())
+ !FormatTok->isStringLiteral() &&
+ !(FormatTok->is(Keywords.kw_type) &&
+ Tokens->peekNextToken()->isOneOf(tok::l_brace, tok::star))) {
return;
+ }
while (!eof()) {
if (FormatTok->is(tok::semi))
@@ -3016,38 +4225,200 @@ void UnwrappedLineParser::parseStatementMacro() {
addUnwrappedLine();
}
-LLVM_ATTRIBUTE_UNUSED static void printDebugInfo(const UnwrappedLine &Line,
- StringRef Prefix = "") {
- llvm::dbgs() << Prefix << "Line(" << Line.Level
- << ", FSC=" << Line.FirstStartColumn << ")"
- << (Line.InPPDirective ? " MACRO" : "") << ": ";
- for (std::list<UnwrappedLineNode>::const_iterator I = Line.Tokens.begin(),
- E = Line.Tokens.end();
- I != E; ++I) {
- llvm::dbgs() << I->Tok->Tok.getName() << "["
- << "T=" << (unsigned)I->Tok->getType()
- << ", OC=" << I->Tok->OriginalColumn << "] ";
+void UnwrappedLineParser::parseVerilogHierarchyIdentifier() {
+ // consume things like a::`b.c[d:e] or a::*
+ while (true) {
+ if (FormatTok->isOneOf(tok::star, tok::period, tok::periodstar,
+ tok::coloncolon, tok::hash) ||
+ Keywords.isVerilogIdentifier(*FormatTok)) {
+ nextToken();
+ } else if (FormatTok->is(tok::l_square)) {
+ parseSquare();
+ } else {
+ break;
+ }
}
- for (std::list<UnwrappedLineNode>::const_iterator I = Line.Tokens.begin(),
- E = Line.Tokens.end();
- I != E; ++I) {
- const UnwrappedLineNode &Node = *I;
- for (SmallVectorImpl<UnwrappedLine>::const_iterator
- I = Node.Children.begin(),
- E = Node.Children.end();
- I != E; ++I) {
- printDebugInfo(*I, "\nChild: ");
+}
+
+void UnwrappedLineParser::parseVerilogSensitivityList() {
+ if (FormatTok->isNot(tok::at))
+ return;
+ nextToken();
+ // A block event expression has 2 at signs.
+ if (FormatTok->is(tok::at))
+ nextToken();
+ switch (FormatTok->Tok.getKind()) {
+ case tok::star:
+ nextToken();
+ break;
+ case tok::l_paren:
+ parseParens();
+ break;
+ default:
+ parseVerilogHierarchyIdentifier();
+ break;
+ }
+}
+
+unsigned UnwrappedLineParser::parseVerilogHierarchyHeader() {
+ unsigned AddLevels = 0;
+
+ if (FormatTok->is(Keywords.kw_clocking)) {
+ nextToken();
+ if (Keywords.isVerilogIdentifier(*FormatTok))
+ nextToken();
+ parseVerilogSensitivityList();
+ if (FormatTok->is(tok::semi))
+ nextToken();
+ } else if (FormatTok->isOneOf(tok::kw_case, Keywords.kw_casex,
+ Keywords.kw_casez, Keywords.kw_randcase,
+ Keywords.kw_randsequence)) {
+ if (Style.IndentCaseLabels)
+ AddLevels++;
+ nextToken();
+ if (FormatTok->is(tok::l_paren)) {
+ FormatTok->setFinalizedType(TT_ConditionLParen);
+ parseParens();
+ }
+ if (FormatTok->isOneOf(Keywords.kw_inside, Keywords.kw_matches))
+ nextToken();
+ // The case header has no semicolon.
+ } else {
+ // "module" etc.
+ nextToken();
+ // all the words like the name of the module and specifiers like
+ // "automatic" and the width of function return type
+ while (true) {
+ if (FormatTok->is(tok::l_square)) {
+ auto Prev = FormatTok->getPreviousNonComment();
+ if (Prev && Keywords.isVerilogIdentifier(*Prev))
+ Prev->setFinalizedType(TT_VerilogDimensionedTypeName);
+ parseSquare();
+ } else if (Keywords.isVerilogIdentifier(*FormatTok) ||
+ FormatTok->isOneOf(Keywords.kw_automatic, tok::kw_static)) {
+ nextToken();
+ } else {
+ break;
+ }
}
+
+ auto NewLine = [this]() {
+ addUnwrappedLine();
+ Line->IsContinuation = true;
+ };
+
+ // package imports
+ while (FormatTok->is(Keywords.kw_import)) {
+ NewLine();
+ nextToken();
+ parseVerilogHierarchyIdentifier();
+ if (FormatTok->is(tok::semi))
+ nextToken();
+ }
+
+ // parameters and ports
+ if (FormatTok->is(Keywords.kw_verilogHash)) {
+ NewLine();
+ nextToken();
+ if (FormatTok->is(tok::l_paren)) {
+ FormatTok->setFinalizedType(TT_VerilogMultiLineListLParen);
+ parseParens();
+ }
+ }
+ if (FormatTok->is(tok::l_paren)) {
+ NewLine();
+ FormatTok->setFinalizedType(TT_VerilogMultiLineListLParen);
+ parseParens();
+ }
+
+ // extends and implements
+ if (FormatTok->is(Keywords.kw_extends)) {
+ NewLine();
+ nextToken();
+ parseVerilogHierarchyIdentifier();
+ if (FormatTok->is(tok::l_paren))
+ parseParens();
+ }
+ if (FormatTok->is(Keywords.kw_implements)) {
+ NewLine();
+ do {
+ nextToken();
+ parseVerilogHierarchyIdentifier();
+ } while (FormatTok->is(tok::comma));
+ }
+
+ // Coverage event for cover groups.
+ if (FormatTok->is(tok::at)) {
+ NewLine();
+ parseVerilogSensitivityList();
+ }
+
+ if (FormatTok->is(tok::semi))
+ nextToken(/*LevelDifference=*/1);
+ addUnwrappedLine();
}
- llvm::dbgs() << "\n";
+
+ return AddLevels;
+}
+
+void UnwrappedLineParser::parseVerilogTable() {
+ assert(FormatTok->is(Keywords.kw_table));
+ nextToken(/*LevelDifference=*/1);
+ addUnwrappedLine();
+
+ auto InitialLevel = Line->Level++;
+ while (!eof() && !Keywords.isVerilogEnd(*FormatTok)) {
+ FormatToken *Tok = FormatTok;
+ nextToken();
+ if (Tok->is(tok::semi))
+ addUnwrappedLine();
+ else if (Tok->isOneOf(tok::star, tok::colon, tok::question, tok::minus))
+ Tok->setFinalizedType(TT_VerilogTableItem);
+ }
+ Line->Level = InitialLevel;
+ nextToken(/*LevelDifference=*/-1);
+ addUnwrappedLine();
+}
+
+void UnwrappedLineParser::parseVerilogCaseLabel() {
+ // The label will get unindented in AnnotatingParser. If there are no leading
+ // spaces, indent the rest here so that things inside the block will be
+ // indented relative to things outside. We don't use parseLabel because we
+ // don't know whether this colon is a label or a ternary expression at this
+ // point.
+ auto OrigLevel = Line->Level;
+ auto FirstLine = CurrentLines->size();
+ if (Line->Level == 0 || (Line->InPPDirective && Line->Level <= 1))
+ ++Line->Level;
+ else if (!Style.IndentCaseBlocks && Keywords.isVerilogBegin(*FormatTok))
+ --Line->Level;
+ parseStructuralElement();
+ // Restore the indentation in both the new line and the line that has the
+ // label.
+ if (CurrentLines->size() > FirstLine)
+ (*CurrentLines)[FirstLine].Level = OrigLevel;
+ Line->Level = OrigLevel;
+}
+
+bool UnwrappedLineParser::containsExpansion(const UnwrappedLine &Line) const {
+ for (const auto &N : Line.Tokens) {
+ if (N.Tok->MacroCtx)
+ return true;
+ for (const UnwrappedLine &Child : N.Children)
+ if (containsExpansion(Child))
+ return true;
+ }
+ return false;
}
void UnwrappedLineParser::addUnwrappedLine(LineLevel AdjustLevel) {
if (Line->Tokens.empty())
return;
LLVM_DEBUG({
- if (CurrentLines == &Lines)
+ if (!parsingPPDirective()) {
+ llvm::dbgs() << "Adding unwrapped line:\n";
printDebugInfo(*Line);
+ }
});
// If this line closes a block when in Whitesmiths mode, remember that
@@ -3058,14 +4429,48 @@ void UnwrappedLineParser::addUnwrappedLine(LineLevel AdjustLevel) {
Line->MatchingOpeningBlockLineIndex != UnwrappedLine::kInvalidIndex &&
Style.BreakBeforeBraces == FormatStyle::BS_Whitesmiths;
- CurrentLines->push_back(std::move(*Line));
+ // If the current line was expanded from a macro call, we use it to
+ // reconstruct an unwrapped line from the structure of the expanded unwrapped
+ // line and the unexpanded token stream.
+ if (!parsingPPDirective() && !InExpansion && containsExpansion(*Line)) {
+ if (!Reconstruct)
+ Reconstruct.emplace(Line->Level, Unexpanded);
+ Reconstruct->addLine(*Line);
+
+ // While the reconstructed unexpanded lines are stored in the normal
+ // flow of lines, the expanded lines are stored on the side to be analyzed
+ // in an extra step.
+ CurrentExpandedLines.push_back(std::move(*Line));
+
+ if (Reconstruct->finished()) {
+ UnwrappedLine Reconstructed = std::move(*Reconstruct).takeResult();
+ assert(!Reconstructed.Tokens.empty() &&
+ "Reconstructed must at least contain the macro identifier.");
+ assert(!parsingPPDirective());
+ LLVM_DEBUG({
+ llvm::dbgs() << "Adding unexpanded line:\n";
+ printDebugInfo(Reconstructed);
+ });
+ ExpandedLines[Reconstructed.Tokens.begin()->Tok] = CurrentExpandedLines;
+ Lines.push_back(std::move(Reconstructed));
+ CurrentExpandedLines.clear();
+ Reconstruct.reset();
+ }
+ } else {
+ // At the top level we only get here when no unexpansion is going on, or
+ // when conditional formatting led to unfinished macro reconstructions.
+ assert(!Reconstruct || (CurrentLines != &Lines) || PPStack.size() > 0);
+ CurrentLines->push_back(std::move(*Line));
+ }
Line->Tokens.clear();
Line->MatchingOpeningBlockLineIndex = UnwrappedLine::kInvalidIndex;
Line->FirstStartColumn = 0;
+ Line->IsContinuation = false;
+ Line->SeenDecltypeAuto = false;
if (ClosesWhitesmithsBlock && AdjustLevel == LineLevel::Remove)
--Line->Level;
- if (CurrentLines == &Lines && !PreprocessorDirectives.empty()) {
+ if (!parsingPPDirective() && !PreprocessorDirectives.empty()) {
CurrentLines->append(
std::make_move_iterator(PreprocessorDirectives.begin()),
std::make_move_iterator(PreprocessorDirectives.end()));
@@ -3075,7 +4480,7 @@ void UnwrappedLineParser::addUnwrappedLine(LineLevel AdjustLevel) {
FormatTok->Previous = nullptr;
}
-bool UnwrappedLineParser::eof() const { return FormatTok->Tok.is(tok::eof); }
+bool UnwrappedLineParser::eof() const { return FormatTok->is(tok::eof); }
bool UnwrappedLineParser::isOnNewLine(const FormatToken &FormatTok) {
return (Line->InPPDirective || FormatTok.HasUnescapedNewline) &&
@@ -3092,9 +4497,10 @@ continuesLineCommentSection(const FormatToken &FormatTok,
return false;
StringRef IndentContent = FormatTok.TokenText;
- if (FormatTok.TokenText.startswith("//") ||
- FormatTok.TokenText.startswith("/*"))
+ if (FormatTok.TokenText.starts_with("//") ||
+ FormatTok.TokenText.starts_with("/*")) {
IndentContent = FormatTok.TokenText.substr(2);
+ }
if (CommentPragmasRegex.match(IndentContent))
return false;
@@ -3178,13 +4584,11 @@ continuesLineCommentSection(const FormatToken &FormatTok,
PreviousToken = Node.Tok;
// Grab the last newline preceding a token in this unwrapped line.
- if (Node.Tok->NewlinesBefore > 0) {
+ if (Node.Tok->NewlinesBefore > 0)
MinColumnToken = Node.Tok;
- }
}
- if (PreviousToken && PreviousToken->is(tok::l_brace)) {
+ if (PreviousToken && PreviousToken->is(tok::l_brace))
MinColumnToken = PreviousToken;
- }
return continuesLineComment(FormatTok, /*Previous=*/Line.Tokens.back().Tok,
MinColumnToken);
@@ -3192,10 +4596,7 @@ continuesLineCommentSection(const FormatToken &FormatTok,
void UnwrappedLineParser::flushComments(bool NewlineBeforeNext) {
bool JustComments = Line->Tokens.empty();
- for (SmallVectorImpl<FormatToken *>::const_iterator
- I = CommentsBeforeNextToken.begin(),
- E = CommentsBeforeNextToken.end();
- I != E; ++I) {
+ for (FormatToken *Tok : CommentsBeforeNextToken) {
// Line comments that belong to the same line comment section are put on the
// same line since later we might want to reflow content between them.
// Additional fine-grained breaking of line comment sections is controlled
@@ -3204,11 +4605,11 @@ void UnwrappedLineParser::flushComments(bool NewlineBeforeNext) {
//
// FIXME: Consider putting separate line comment sections as children to the
// unwrapped line instead.
- (*I)->ContinuesLineCommentSection =
- continuesLineCommentSection(**I, *Line, CommentPragmasRegex);
- if (isOnNewLine(**I) && JustComments && !(*I)->ContinuesLineCommentSection)
+ Tok->ContinuesLineCommentSection =
+ continuesLineCommentSection(*Tok, *Line, CommentPragmasRegex);
+ if (isOnNewLine(*Tok) && JustComments && !Tok->ContinuesLineCommentSection)
addUnwrappedLine();
- pushToken(*I);
+ pushToken(Tok);
}
if (NewlineBeforeNext && JustComments)
addUnwrappedLine();
@@ -3221,11 +4622,21 @@ void UnwrappedLineParser::nextToken(int LevelDifference) {
flushComments(isOnNewLine(*FormatTok));
pushToken(FormatTok);
FormatToken *Previous = FormatTok;
- if (Style.Language != FormatStyle::LK_JavaScript)
+ if (!Style.isJavaScript())
readToken(LevelDifference);
else
readTokenWithJavaScriptASI();
FormatTok->Previous = Previous;
+ if (Style.isVerilog()) {
+ // Blocks in Verilog can have `begin` and `end` instead of braces. For
+ // keywords like `begin`, we can't treat them the same as left braces
+ // because some contexts require one of them. For example structs use
+ // braces and if blocks use keywords, and a left brace can occur in an if
+ // statement, but it is not a block. For keywords like `end`, we simply
+ // treat them the same as right braces.
+ if (Keywords.isVerilogEnd(*FormatTok))
+ FormatTok->Tok.setKind(tok::r_brace);
+ }
}
void UnwrappedLineParser::distributeComments(
@@ -3275,21 +4686,56 @@ void UnwrappedLineParser::distributeComments(
(isOnNewLine(*FormatTok) || FormatTok->IsFirst)) {
ShouldPushCommentsInCurrentLine = false;
}
- if (ShouldPushCommentsInCurrentLine) {
+ if (ShouldPushCommentsInCurrentLine)
pushToken(FormatTok);
- } else {
+ else
CommentsBeforeNextToken.push_back(FormatTok);
- }
}
}
void UnwrappedLineParser::readToken(int LevelDifference) {
SmallVector<FormatToken *, 1> Comments;
+ bool PreviousWasComment = false;
+ bool FirstNonCommentOnLine = false;
do {
FormatTok = Tokens->getNextToken();
assert(FormatTok);
- while (!Line->InPPDirective && FormatTok->Tok.is(tok::hash) &&
- (FormatTok->HasUnescapedNewline || FormatTok->IsFirst)) {
+ while (FormatTok->getType() == TT_ConflictStart ||
+ FormatTok->getType() == TT_ConflictEnd ||
+ FormatTok->getType() == TT_ConflictAlternative) {
+ if (FormatTok->getType() == TT_ConflictStart)
+ conditionalCompilationStart(/*Unreachable=*/false);
+ else if (FormatTok->getType() == TT_ConflictAlternative)
+ conditionalCompilationAlternative();
+ else if (FormatTok->getType() == TT_ConflictEnd)
+ conditionalCompilationEnd();
+ FormatTok = Tokens->getNextToken();
+ FormatTok->MustBreakBefore = true;
+ FormatTok->MustBreakBeforeFinalized = true;
+ }
+
+ auto IsFirstNonCommentOnLine = [](bool FirstNonCommentOnLine,
+ const FormatToken &Tok,
+ bool PreviousWasComment) {
+ auto IsFirstOnLine = [](const FormatToken &Tok) {
+ return Tok.HasUnescapedNewline || Tok.IsFirst;
+ };
+
+ // Consider preprocessor directives preceded by block comments as first
+ // on line.
+ if (PreviousWasComment)
+ return FirstNonCommentOnLine || IsFirstOnLine(Tok);
+ return IsFirstOnLine(Tok);
+ };
+
+ FirstNonCommentOnLine = IsFirstNonCommentOnLine(
+ FirstNonCommentOnLine, *FormatTok, PreviousWasComment);
+ PreviousWasComment = FormatTok->is(tok::comment);
+
+ while (!Line->InPPDirective && FormatTok->is(tok::hash) &&
+ (!Style.isVerilog() ||
+ Keywords.isVerilogPPDirective(*Tokens->peekNextToken())) &&
+ FirstNonCommentOnLine) {
distributeComments(Comments, FormatTok);
Comments.clear();
// If there is an unfinished unwrapped line, we flush the preprocessor
@@ -3304,23 +4750,14 @@ void UnwrappedLineParser::readToken(int LevelDifference) {
// before the preprocessor directive, at the same level as the
// preprocessor directive, as we consider them to apply to the directive.
if (Style.IndentPPDirectives == FormatStyle::PPDIS_BeforeHash &&
- PPBranchLevel > 0)
+ PPBranchLevel > 0) {
Line->Level += PPBranchLevel;
+ }
flushComments(isOnNewLine(*FormatTok));
parsePPDirective();
- }
- while (FormatTok->getType() == TT_ConflictStart ||
- FormatTok->getType() == TT_ConflictEnd ||
- FormatTok->getType() == TT_ConflictAlternative) {
- if (FormatTok->getType() == TT_ConflictStart) {
- conditionalCompilationStart(/*Unreachable=*/false);
- } else if (FormatTok->getType() == TT_ConflictAlternative) {
- conditionalCompilationAlternative();
- } else if (FormatTok->getType() == TT_ConflictEnd) {
- conditionalCompilationEnd();
- }
- FormatTok = Tokens->getNextToken();
- FormatTok->MustBreakBefore = true;
+ PreviousWasComment = FormatTok->is(tok::comment);
+ FirstNonCommentOnLine = IsFirstNonCommentOnLine(
+ FirstNonCommentOnLine, *FormatTok, PreviousWasComment);
}
if (!PPStack.empty() && (PPStack.back().Kind == PP_Unreachable) &&
@@ -3328,7 +4765,88 @@ void UnwrappedLineParser::readToken(int LevelDifference) {
continue;
}
- if (!FormatTok->Tok.is(tok::comment)) {
+ if (FormatTok->is(tok::identifier) &&
+ Macros.defined(FormatTok->TokenText) &&
+ // FIXME: Allow expanding macros in preprocessor directives.
+ !Line->InPPDirective) {
+ FormatToken *ID = FormatTok;
+ unsigned Position = Tokens->getPosition();
+
+ // To correctly parse the code, we need to replace the tokens of the macro
+ // call with its expansion.
+ auto PreCall = std::move(Line);
+ Line.reset(new UnwrappedLine);
+ bool OldInExpansion = InExpansion;
+ InExpansion = true;
+ // We parse the macro call into a new line.
+ auto Args = parseMacroCall();
+ InExpansion = OldInExpansion;
+ assert(Line->Tokens.front().Tok == ID);
+ // And remember the unexpanded macro call tokens.
+ auto UnexpandedLine = std::move(Line);
+ // Reset to the old line.
+ Line = std::move(PreCall);
+
+ LLVM_DEBUG({
+ llvm::dbgs() << "Macro call: " << ID->TokenText << "(";
+ if (Args) {
+ llvm::dbgs() << "(";
+ for (const auto &Arg : Args.value())
+ for (const auto &T : Arg)
+ llvm::dbgs() << T->TokenText << " ";
+ llvm::dbgs() << ")";
+ }
+ llvm::dbgs() << "\n";
+ });
+ if (Macros.objectLike(ID->TokenText) && Args &&
+ !Macros.hasArity(ID->TokenText, Args->size())) {
+ // The macro is either
+ // - object-like, but we got argumnets, or
+ // - overloaded to be both object-like and function-like, but none of
+ // the function-like arities match the number of arguments.
+ // Thus, expand as object-like macro.
+ LLVM_DEBUG(llvm::dbgs()
+ << "Macro \"" << ID->TokenText
+ << "\" not overloaded for arity " << Args->size()
+ << "or not function-like, using object-like overload.");
+ Args.reset();
+ UnexpandedLine->Tokens.resize(1);
+ Tokens->setPosition(Position);
+ nextToken();
+ assert(!Args && Macros.objectLike(ID->TokenText));
+ }
+ if ((!Args && Macros.objectLike(ID->TokenText)) ||
+ (Args && Macros.hasArity(ID->TokenText, Args->size()))) {
+ // Next, we insert the expanded tokens in the token stream at the
+ // current position, and continue parsing.
+ Unexpanded[ID] = std::move(UnexpandedLine);
+ SmallVector<FormatToken *, 8> Expansion =
+ Macros.expand(ID, std::move(Args));
+ if (!Expansion.empty())
+ FormatTok = Tokens->insertTokens(Expansion);
+
+ LLVM_DEBUG({
+ llvm::dbgs() << "Expanded: ";
+ for (const auto &T : Expansion)
+ llvm::dbgs() << T->TokenText << " ";
+ llvm::dbgs() << "\n";
+ });
+ } else {
+ LLVM_DEBUG({
+ llvm::dbgs() << "Did not expand macro \"" << ID->TokenText
+ << "\", because it was used ";
+ if (Args)
+ llvm::dbgs() << "with " << Args->size();
+ else
+ llvm::dbgs() << "without";
+ llvm::dbgs() << " arguments, which doesn't match any definition.\n";
+ });
+ Tokens->setPosition(Position);
+ FormatTok = ID;
+ }
+ }
+
+ if (FormatTok->isNot(tok::comment)) {
distributeComments(Comments, FormatTok);
Comments.clear();
return;
@@ -3341,10 +4859,76 @@ void UnwrappedLineParser::readToken(int LevelDifference) {
Comments.clear();
}
+namespace {
+template <typename Iterator>
+void pushTokens(Iterator Begin, Iterator End,
+ llvm::SmallVectorImpl<FormatToken *> &Into) {
+ for (auto I = Begin; I != End; ++I) {
+ Into.push_back(I->Tok);
+ for (const auto &Child : I->Children)
+ pushTokens(Child.Tokens.begin(), Child.Tokens.end(), Into);
+ }
+}
+} // namespace
+
+std::optional<llvm::SmallVector<llvm::SmallVector<FormatToken *, 8>, 1>>
+UnwrappedLineParser::parseMacroCall() {
+ std::optional<llvm::SmallVector<llvm::SmallVector<FormatToken *, 8>, 1>> Args;
+ assert(Line->Tokens.empty());
+ nextToken();
+ if (FormatTok->isNot(tok::l_paren))
+ return Args;
+ unsigned Position = Tokens->getPosition();
+ FormatToken *Tok = FormatTok;
+ nextToken();
+ Args.emplace();
+ auto ArgStart = std::prev(Line->Tokens.end());
+
+ int Parens = 0;
+ do {
+ switch (FormatTok->Tok.getKind()) {
+ case tok::l_paren:
+ ++Parens;
+ nextToken();
+ break;
+ case tok::r_paren: {
+ if (Parens > 0) {
+ --Parens;
+ nextToken();
+ break;
+ }
+ Args->push_back({});
+ pushTokens(std::next(ArgStart), Line->Tokens.end(), Args->back());
+ nextToken();
+ return Args;
+ }
+ case tok::comma: {
+ if (Parens > 0) {
+ nextToken();
+ break;
+ }
+ Args->push_back({});
+ pushTokens(std::next(ArgStart), Line->Tokens.end(), Args->back());
+ nextToken();
+ ArgStart = std::prev(Line->Tokens.end());
+ break;
+ }
+ default:
+ nextToken();
+ break;
+ }
+ } while (!eof());
+ Line->Tokens.resize(1);
+ Tokens->setPosition(Position);
+ FormatTok = Tok;
+ return {};
+}
+
void UnwrappedLineParser::pushToken(FormatToken *Tok) {
Line->Tokens.push_back(UnwrappedLineNode(Tok));
if (MustBreakBeforeNextToken) {
Line->Tokens.back().Tok->MustBreakBefore = true;
+ Line->Tokens.back().Tok->MustBreakBeforeFinalized = true;
MustBreakBeforeNextToken = false;
}
}
diff --git a/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.h b/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.h
index f22bb6323e3d..739298690bbd 100644
--- a/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.h
+++ b/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.h
@@ -15,12 +15,17 @@
#ifndef LLVM_CLANG_LIB_FORMAT_UNWRAPPEDLINEPARSER_H
#define LLVM_CLANG_LIB_FORMAT_UNWRAPPEDLINEPARSER_H
+#include "Encoding.h"
#include "FormatToken.h"
+#include "Macros.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Format/Format.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/BitVector.h"
#include "llvm/Support/Regex.h"
#include <list>
#include <stack>
+#include <vector>
namespace clang {
namespace format {
@@ -34,19 +39,33 @@ struct UnwrappedLineNode;
/// \c UnwrappedLineFormatter. The key property is that changing the formatting
/// within an unwrapped line does not affect any other unwrapped lines.
struct UnwrappedLine {
- UnwrappedLine();
+ UnwrappedLine() = default;
- // FIXME: Don't use std::list here.
/// The \c Tokens comprising this \c UnwrappedLine.
std::list<UnwrappedLineNode> Tokens;
/// The indent level of the \c UnwrappedLine.
- unsigned Level;
+ unsigned Level = 0;
+
+ /// The \c PPBranchLevel (adjusted for header guards) if this line is a
+ /// \c InMacroBody line, and 0 otherwise.
+ unsigned PPLevel = 0;
/// Whether this \c UnwrappedLine is part of a preprocessor directive.
- bool InPPDirective;
+ bool InPPDirective = false;
+ /// Whether this \c UnwrappedLine is part of a pramga directive.
+ bool InPragmaDirective = false;
+ /// Whether it is part of a macro body.
+ bool InMacroBody = false;
+
+ bool MustBeDeclaration = false;
- bool MustBeDeclaration;
+ /// Whether the parser has seen \c decltype(auto) in this line.
+ bool SeenDecltypeAuto = false;
+
+ /// \c True if this line should be indented by ContinuationIndent in
+ /// addition to the normal indention level.
+ bool IsContinuation = false;
/// If this \c UnwrappedLine closes a block in a sequence of lines,
/// \c MatchingOpeningBlockLineIndex stores the index of the corresponding
@@ -63,6 +82,19 @@ struct UnwrappedLine {
unsigned FirstStartColumn = 0;
};
+/// Interface for users of the UnwrappedLineParser to receive the parsed lines.
+/// Parsing a single snippet of code can lead to multiple runs, where each
+/// run is a coherent view of the file.
+///
+/// For example, different runs are generated:
+/// - for different combinations of #if blocks
+/// - when macros are involved, for the expanded code and the as-written code
+///
+/// Some tokens will only be visible in a subset of the runs.
+/// For each run, \c UnwrappedLineParser will call \c consumeUnwrappedLine
+/// for each parsed unwrapped line, and then \c finishRun to indicate
+/// that the set of unwrapped lines before is one coherent view of the
+/// code snippet to be formatted.
class UnwrappedLineConsumer {
public:
virtual ~UnwrappedLineConsumer() {}
@@ -74,51 +106,77 @@ class FormatTokenSource;
class UnwrappedLineParser {
public:
- UnwrappedLineParser(const FormatStyle &Style,
+ UnwrappedLineParser(SourceManager &SourceMgr, const FormatStyle &Style,
const AdditionalKeywords &Keywords,
unsigned FirstStartColumn, ArrayRef<FormatToken *> Tokens,
- UnwrappedLineConsumer &Callback);
+ UnwrappedLineConsumer &Callback,
+ llvm::SpecificBumpPtrAllocator<FormatToken> &Allocator,
+ IdentifierTable &IdentTable);
void parse();
private:
+ enum class IfStmtKind {
+ NotIf, // Not an if statement.
+ IfOnly, // An if statement without the else clause.
+ IfElse, // An if statement followed by else but not else if.
+ IfElseIf // An if statement followed by else if.
+ };
+
void reset();
void parseFile();
- void parseLevel(bool HasOpeningBrace);
- void parseBlock(bool MustBeDeclaration, unsigned AddLevels = 1u,
- bool MunchSemi = true,
- bool UnindentWhitesmithsBraces = false);
+ bool precededByCommentOrPPDirective() const;
+ bool parseLevel(const FormatToken *OpeningBrace = nullptr,
+ IfStmtKind *IfKind = nullptr,
+ FormatToken **IfLeftBrace = nullptr);
+ bool mightFitOnOneLine(UnwrappedLine &Line,
+ const FormatToken *OpeningBrace = nullptr) const;
+ FormatToken *parseBlock(bool MustBeDeclaration = false,
+ unsigned AddLevels = 1u, bool MunchSemi = true,
+ bool KeepBraces = true, IfStmtKind *IfKind = nullptr,
+ bool UnindentWhitesmithsBraces = false);
void parseChildBlock();
void parsePPDirective();
void parsePPDefine();
void parsePPIf(bool IfDef);
- void parsePPElIf();
void parsePPElse();
void parsePPEndIf();
+ void parsePPPragma();
void parsePPUnknown();
void readTokenWithJavaScriptASI();
- void parseStructuralElement(bool IsTopLevel = false);
+ void parseStructuralElement(const FormatToken *OpeningBrace = nullptr,
+ IfStmtKind *IfKind = nullptr,
+ FormatToken **IfLeftBrace = nullptr,
+ bool *HasDoWhile = nullptr,
+ bool *HasLabel = nullptr);
bool tryToParseBracedList();
- bool parseBracedList(bool ContinueOnSemicolons = false, bool IsEnum = false,
- tok::TokenKind ClosingBraceKind = tok::r_brace);
- void parseParens();
+ bool parseBracedList(bool IsAngleBracket = false, bool IsEnum = false);
+ bool parseParens(TokenType AmpAmpTokenType = TT_Unknown);
void parseSquare(bool LambdaIntroducer = false);
- void parseIfThenElse();
+ void keepAncestorBraces();
+ void parseUnbracedBody(bool CheckEOF = false);
+ void handleAttributes();
+ bool handleCppAttributes();
+ bool isBlockBegin(const FormatToken &Tok) const;
+ FormatToken *parseIfThenElse(IfStmtKind *IfKind, bool KeepBraces = false,
+ bool IsVerilogAssert = false);
void parseTryCatch();
- void parseForOrWhileLoop();
+ void parseLoopBody(bool KeepBraces, bool WrapRightBrace);
+ void parseForOrWhileLoop(bool HasParens = true);
void parseDoWhile();
void parseLabel(bool LeftAlignLabel = false);
void parseCaseLabel();
void parseSwitch();
void parseNamespace();
+ bool parseModuleImport();
void parseNew();
void parseAccessSpecifier();
bool parseEnum();
bool parseStructLike();
- void parseConcept();
- void parseRequires();
- void parseRequiresExpression(unsigned int OriginalLevel);
- void parseConstraintExpression(unsigned int OriginalLevel);
+ bool parseRequires();
+ void parseRequiresClause(FormatToken *RequiresToken);
+ void parseRequiresExpression(FormatToken *RequiresToken);
+ void parseConstraintExpression();
void parseJavaEnumBody();
// Parses a record (aka class) as a top level element. If ParseAsExpr is true,
// parses the record as a child block, i.e. if the class declaration is an
@@ -138,10 +196,20 @@ private:
// https://docs.microsoft.com/en-us/dotnet/csharp/language-reference/keywords/where-generic-type-constraint
void parseCSharpGenericTypeConstraint();
bool tryToParseLambda();
+ bool tryToParseChildBlock();
bool tryToParseLambdaIntroducer();
bool tryToParsePropertyAccessor();
void tryToParseJSFunction();
bool tryToParseSimpleAttribute();
+ void parseVerilogHierarchyIdentifier();
+ void parseVerilogSensitivityList();
+ // Returns the number of levels of indentation in addition to the normal 1
+ // level for a block, used for indenting case labels.
+ unsigned parseVerilogHierarchyHeader();
+ void parseVerilogTable();
+ void parseVerilogCaseLabel();
+ std::optional<llvm::SmallVector<llvm::SmallVector<FormatToken *, 8>, 1>>
+ parseMacroCall();
// Used by addUnwrappedLine to denote whether to keep or remove a level
// when resetting the line state.
@@ -165,7 +233,7 @@ private:
//
// NextTok specifies the next token. A null pointer NextTok is supported, and
// signifies either the absence of a next token, or that the next token
- // shouldn't be taken into accunt for the analysis.
+ // shouldn't be taken into account for the analysis.
void distributeComments(const SmallVectorImpl<FormatToken *> &Comments,
const FormatToken *NextTok);
@@ -173,6 +241,7 @@ private:
void flushComments(bool NewlineBeforeNext);
void pushToken(FormatToken *Tok);
void calculateBraceTypes(bool ExpectClassBody = false);
+ void setPreviousRBraceType(TokenType Type);
// Marks a conditional compilation edge (for example, an '#if', '#ifdef',
// '#else' or merge conflict marker). If 'Unreachable' is true, assumes
@@ -185,22 +254,55 @@ private:
bool isOnNewLine(const FormatToken &FormatTok);
+ // Returns whether there is a macro expansion in the line, i.e. a token that
+ // was expanded from a macro call.
+ bool containsExpansion(const UnwrappedLine &Line) const;
+
// Compute hash of the current preprocessor branch.
// This is used to identify the different branches, and thus track if block
// open and close in the same branch.
size_t computePPHash() const;
+ bool parsingPPDirective() const { return CurrentLines != &Lines; }
+
// FIXME: We are constantly running into bugs where Line.Level is incorrectly
// subtracted from beyond 0. Introduce a method to subtract from Line.Level
// and use that everywhere in the Parser.
std::unique_ptr<UnwrappedLine> Line;
+ // Lines that are created by macro expansion.
+ // When formatting code containing macro calls, we first format the expanded
+ // lines to set the token types correctly. Afterwards, we format the
+ // reconstructed macro calls, re-using the token types determined in the first
+ // step.
+ // ExpandedLines will be reset every time we create a new LineAndExpansion
+ // instance once a line containing macro calls has been parsed.
+ SmallVector<UnwrappedLine, 8> CurrentExpandedLines;
+
+ // Maps from the first token of a top-level UnwrappedLine that contains
+ // a macro call to the replacement UnwrappedLines expanded from the macro
+ // call.
+ llvm::DenseMap<FormatToken *, SmallVector<UnwrappedLine, 8>> ExpandedLines;
+
+ // Map from the macro identifier to a line containing the full unexpanded
+ // macro call.
+ llvm::DenseMap<FormatToken *, std::unique_ptr<UnwrappedLine>> Unexpanded;
+
+ // For recursive macro expansions, trigger reconstruction only on the
+ // outermost expansion.
+ bool InExpansion = false;
+
+ // Set while we reconstruct a macro call.
+ // For reconstruction, we feed the expanded lines into the reconstructor
+ // until it is finished.
+ std::optional<MacroCallReconstructor> Reconstruct;
+
// Comments are sorted into unwrapped lines by whether they are in the same
// line as the previous token, or not. If not, they belong to the next token.
// Since the next token might already be in a new unwrapped line, we need to
// store the comments belonging to that token.
SmallVector<FormatToken *, 1> CommentsBeforeNextToken;
- FormatToken *FormatTok;
+ FormatToken *FormatTok = nullptr;
bool MustBreakBeforeNextToken;
// The parsed lines. Only added to through \c CurrentLines.
@@ -219,7 +321,7 @@ private:
// We store for each line whether it must be a declaration depending on
// whether we are in a compound statement or not.
- std::vector<bool> DeclarationScopeStack;
+ llvm::BitVector DeclarationScopeStack;
const FormatStyle &Style;
const AdditionalKeywords &Keywords;
@@ -229,11 +331,20 @@ private:
FormatTokenSource *Tokens;
UnwrappedLineConsumer &Callback;
- // FIXME: This is a temporary measure until we have reworked the ownership
- // of the format tokens. The goal is to have the actual tokens created and
- // owned outside of and handed into the UnwrappedLineParser.
ArrayRef<FormatToken *> AllTokens;
+ // Keeps a stack of the states of nested control statements (true if the
+ // statement contains more than some predefined number of nested statements).
+ SmallVector<bool, 8> NestedTooDeep;
+
+ // Keeps a stack of the states of nested lambdas (true if the return type of
+ // the lambda is `decltype(auto)`).
+ SmallVector<bool, 4> NestedLambdas;
+
+ // Whether the parser is parsing the body of a function whose return type is
+ // `decltype(auto)`.
+ bool IsDecltypeAutoFunction = false;
+
// Represents preprocessor branch type, so we can find matching
// #if/#else/#endif directives.
enum PPBranchKind {
@@ -293,22 +404,22 @@ private:
// does not start at the beginning of the file.
unsigned FirstStartColumn;
+ MacroExpander Macros;
+
friend class ScopedLineState;
friend class CompoundStatementIndenter;
};
struct UnwrappedLineNode {
UnwrappedLineNode() : Tok(nullptr) {}
- UnwrappedLineNode(FormatToken *Tok) : Tok(Tok) {}
+ UnwrappedLineNode(FormatToken *Tok,
+ llvm::ArrayRef<UnwrappedLine> Children = {})
+ : Tok(Tok), Children(Children.begin(), Children.end()) {}
FormatToken *Tok;
SmallVector<UnwrappedLine, 0> Children;
};
-inline UnwrappedLine::UnwrappedLine()
- : Level(0), InPPDirective(false), MustBeDeclaration(false),
- MatchingOpeningBlockLineIndex(kInvalidIndex) {}
-
} // end namespace format
} // end namespace clang
diff --git a/contrib/llvm-project/clang/lib/Format/UsingDeclarationsSorter.cpp b/contrib/llvm-project/clang/lib/Format/UsingDeclarationsSorter.cpp
index 5608a5a75953..2f4b1e0e4627 100644
--- a/contrib/llvm-project/clang/lib/Format/UsingDeclarationsSorter.cpp
+++ b/contrib/llvm-project/clang/lib/Format/UsingDeclarationsSorter.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "UsingDeclarationsSorter.h"
+#include "clang/Format/Format.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Regex.h"
@@ -32,7 +33,7 @@ namespace {
// individual names is that all non-namespace names come before all namespace
// names, and within those groups, names are in case-insensitive lexicographic
// order.
-int compareLabels(StringRef A, StringRef B) {
+int compareLabelsLexicographicNumeric(StringRef A, StringRef B) {
SmallVector<StringRef, 2> NamesA;
A.split(NamesA, "::", /*MaxSplit=*/-1, /*KeepEmpty=*/false);
SmallVector<StringRef, 2> NamesB;
@@ -64,16 +65,38 @@ int compareLabels(StringRef A, StringRef B) {
return 0;
}
+int compareLabelsLexicographic(StringRef A, StringRef B) {
+ SmallVector<StringRef, 2> NamesA;
+ A.split(NamesA, "::", /*MaxSplit=*/-1, /*KeepEmpty=*/false);
+ SmallVector<StringRef, 2> NamesB;
+ B.split(NamesB, "::", /*MaxSplit=*/-1, /*KeepEmpty=*/false);
+ size_t SizeA = NamesA.size();
+ size_t SizeB = NamesB.size();
+ for (size_t I = 0, E = std::min(SizeA, SizeB); I < E; ++I) {
+ // Two namespaces names within a group compare case-insensitively.
+ int C = NamesA[I].compare_insensitive(NamesB[I]);
+ if (C != 0)
+ return C;
+ }
+ if (SizeA < SizeB)
+ return -1;
+ return SizeA == SizeB ? 0 : 1;
+}
+
+int compareLabels(
+ StringRef A, StringRef B,
+ FormatStyle::SortUsingDeclarationsOptions SortUsingDeclarations) {
+ if (SortUsingDeclarations == FormatStyle::SUD_LexicographicNumeric)
+ return compareLabelsLexicographicNumeric(A, B);
+ return compareLabelsLexicographic(A, B);
+}
+
struct UsingDeclaration {
const AnnotatedLine *Line;
std::string Label;
UsingDeclaration(const AnnotatedLine *Line, const std::string &Label)
: Line(Line), Label(Label) {}
-
- bool operator<(const UsingDeclaration &Other) const {
- return compareLabels(Label, Other.Label) < 0;
- }
};
/// Computes the label of a using declaration starting at tthe using token
@@ -113,7 +136,8 @@ std::string computeUsingDeclarationLabel(const FormatToken *UsingTok) {
void endUsingDeclarationBlock(
SmallVectorImpl<UsingDeclaration> *UsingDeclarations,
- const SourceManager &SourceMgr, tooling::Replacements *Fixes) {
+ const SourceManager &SourceMgr, tooling::Replacements *Fixes,
+ FormatStyle::SortUsingDeclarationsOptions SortUsingDeclarations) {
bool BlockAffected = false;
for (const UsingDeclaration &Declaration : *UsingDeclarations) {
if (Declaration.Line->Affected) {
@@ -127,7 +151,11 @@ void endUsingDeclarationBlock(
}
SmallVector<UsingDeclaration, 4> SortedUsingDeclarations(
UsingDeclarations->begin(), UsingDeclarations->end());
- llvm::stable_sort(SortedUsingDeclarations);
+ auto Comp = [SortUsingDeclarations](const UsingDeclaration &Lhs,
+ const UsingDeclaration &Rhs) -> bool {
+ return compareLabels(Lhs.Label, Rhs.Label, SortUsingDeclarations) < 0;
+ };
+ llvm::stable_sort(SortedUsingDeclarations, Comp);
SortedUsingDeclarations.erase(
std::unique(SortedUsingDeclarations.begin(),
SortedUsingDeclarations.end(),
@@ -188,25 +216,30 @@ std::pair<tooling::Replacements, unsigned> UsingDeclarationsSorter::analyze(
AffectedRangeMgr.computeAffectedLines(AnnotatedLines);
tooling::Replacements Fixes;
SmallVector<UsingDeclaration, 4> UsingDeclarations;
- for (size_t I = 0, E = AnnotatedLines.size(); I != E; ++I) {
- const auto *FirstTok = AnnotatedLines[I]->First;
- if (AnnotatedLines[I]->InPPDirective ||
- !AnnotatedLines[I]->startsWith(tok::kw_using) || FirstTok->Finalized) {
- endUsingDeclarationBlock(&UsingDeclarations, SourceMgr, &Fixes);
+ for (const AnnotatedLine *Line : AnnotatedLines) {
+ const auto *FirstTok = Line->First;
+ if (Line->InPPDirective || !Line->startsWith(tok::kw_using) ||
+ FirstTok->Finalized) {
+ endUsingDeclarationBlock(&UsingDeclarations, SourceMgr, &Fixes,
+ Style.SortUsingDeclarations);
continue;
}
- if (FirstTok->NewlinesBefore > 1)
- endUsingDeclarationBlock(&UsingDeclarations, SourceMgr, &Fixes);
+ if (FirstTok->NewlinesBefore > 1) {
+ endUsingDeclarationBlock(&UsingDeclarations, SourceMgr, &Fixes,
+ Style.SortUsingDeclarations);
+ }
const auto *UsingTok =
FirstTok->is(tok::comment) ? FirstTok->getNextNonComment() : FirstTok;
std::string Label = computeUsingDeclarationLabel(UsingTok);
if (Label.empty()) {
- endUsingDeclarationBlock(&UsingDeclarations, SourceMgr, &Fixes);
+ endUsingDeclarationBlock(&UsingDeclarations, SourceMgr, &Fixes,
+ Style.SortUsingDeclarations);
continue;
}
- UsingDeclarations.push_back(UsingDeclaration(AnnotatedLines[I], Label));
+ UsingDeclarations.push_back(UsingDeclaration(Line, Label));
}
- endUsingDeclarationBlock(&UsingDeclarations, SourceMgr, &Fixes);
+ endUsingDeclarationBlock(&UsingDeclarations, SourceMgr, &Fixes,
+ Style.SortUsingDeclarations);
return {Fixes, 0};
}
diff --git a/contrib/llvm-project/clang/lib/Format/WhitespaceManager.cpp b/contrib/llvm-project/clang/lib/Format/WhitespaceManager.cpp
index a822e0aaf1f9..df84f97a8e8a 100644
--- a/contrib/llvm-project/clang/lib/Format/WhitespaceManager.cpp
+++ b/contrib/llvm-project/clang/lib/Format/WhitespaceManager.cpp
@@ -22,8 +22,13 @@ namespace format {
bool WhitespaceManager::Change::IsBeforeInFile::operator()(
const Change &C1, const Change &C2) const {
return SourceMgr.isBeforeInTranslationUnit(
- C1.OriginalWhitespaceRange.getBegin(),
- C2.OriginalWhitespaceRange.getBegin());
+ C1.OriginalWhitespaceRange.getBegin(),
+ C2.OriginalWhitespaceRange.getBegin()) ||
+ (C1.OriginalWhitespaceRange.getBegin() ==
+ C2.OriginalWhitespaceRange.getBegin() &&
+ SourceMgr.isBeforeInTranslationUnit(
+ C1.OriginalWhitespaceRange.getEnd(),
+ C2.OriginalWhitespaceRange.getEnd()));
}
WhitespaceManager::Change::Change(const FormatToken &Tok,
@@ -49,7 +54,7 @@ void WhitespaceManager::replaceWhitespace(FormatToken &Tok, unsigned Newlines,
unsigned Spaces,
unsigned StartOfTokenColumn,
bool IsAligned, bool InPPDirective) {
- if (Tok.Finalized)
+ if (Tok.Finalized || (Tok.MacroCtx && Tok.MacroCtx->Role == MR_ExpandedArg))
return;
Tok.setDecision((Newlines > 0) ? FD_Break : FD_Continue);
Changes.push_back(Change(Tok, /*CreateReplacement=*/true, Tok.WhitespaceRange,
@@ -60,7 +65,7 @@ void WhitespaceManager::replaceWhitespace(FormatToken &Tok, unsigned Newlines,
void WhitespaceManager::addUntouchableToken(const FormatToken &Tok,
bool InPPDirective) {
- if (Tok.Finalized)
+ if (Tok.Finalized || (Tok.MacroCtx && Tok.MacroCtx->Role == MR_ExpandedArg))
return;
Changes.push_back(Change(Tok, /*CreateReplacement=*/false,
Tok.WhitespaceRange, /*Spaces=*/0,
@@ -74,11 +79,17 @@ WhitespaceManager::addReplacement(const tooling::Replacement &Replacement) {
return Replaces.add(Replacement);
}
+bool WhitespaceManager::inputUsesCRLF(StringRef Text, bool DefaultToCRLF) {
+ size_t LF = Text.count('\n');
+ size_t CR = Text.count('\r') * 2;
+ return LF == CR ? DefaultToCRLF : CR > LF;
+}
+
void WhitespaceManager::replaceWhitespaceInToken(
const FormatToken &Tok, unsigned Offset, unsigned ReplaceChars,
StringRef PreviousPostfix, StringRef CurrentPrefix, bool InPPDirective,
unsigned Newlines, int Spaces) {
- if (Tok.Finalized)
+ if (Tok.Finalized || (Tok.MacroCtx && Tok.MacroCtx->Role == MR_ExpandedArg))
return;
SourceLocation Start = Tok.getStartOfNonWhitespace().getLocWithOffset(Offset);
Changes.push_back(
@@ -96,6 +107,7 @@ const tooling::Replacements &WhitespaceManager::generateReplacements() {
llvm::sort(Changes, Change::IsBeforeInFile(SourceMgr));
calculateLineBreakInformation();
alignConsecutiveMacros();
+ alignConsecutiveShortCaseStatements();
alignConsecutiveDeclarations();
alignConsecutiveBitFields();
alignConsecutiveAssignments();
@@ -161,11 +173,12 @@ void WhitespaceManager::calculateLineBreakInformation() {
// If there are multiple changes in this token, sum up all the changes until
// the end of the line.
- if (Changes[i - 1].IsInsideToken && Changes[i - 1].NewlinesBefore == 0)
+ if (Changes[i - 1].IsInsideToken && Changes[i - 1].NewlinesBefore == 0) {
LastOutsideTokenChange->TokenLength +=
Changes[i - 1].TokenLength + Changes[i - 1].Spaces;
- else
+ } else {
LastOutsideTokenChange = &Changes[i - 1];
+ }
Changes[i].PreviousEndOfTokenColumn =
Changes[i - 1].StartOfTokenColumn + Changes[i - 1].TokenLength;
@@ -221,13 +234,12 @@ void WhitespaceManager::calculateLineBreakInformation() {
Change.StartOfBlockComment = nullptr;
Change.IndentationOffset = 0;
if (Change.Tok->is(tok::comment)) {
- if (Change.Tok->is(TT_LineComment) || !Change.IsInsideToken)
+ if (Change.Tok->is(TT_LineComment) || !Change.IsInsideToken) {
LastBlockComment = &Change;
- else {
- if ((Change.StartOfBlockComment = LastBlockComment))
- Change.IndentationOffset =
- Change.StartOfTokenColumn -
- Change.StartOfBlockComment->StartOfTokenColumn;
+ } else if ((Change.StartOfBlockComment = LastBlockComment)) {
+ Change.IndentationOffset =
+ Change.StartOfTokenColumn -
+ Change.StartOfBlockComment->StartOfTokenColumn;
}
} else {
LastBlockComment = nullptr;
@@ -254,19 +266,20 @@ void WhitespaceManager::calculateLineBreakInformation() {
Change.ConditionalsLevel = ConditionalsLevel;
- for (unsigned i = Change.Tok->FakeRParens; i > 0 && ScopeStack.size();
- --i) {
+ for (unsigned i = Change.Tok->FakeRParens; i > 0 && ScopeStack.size(); --i)
if (ScopeStack.pop_back_val())
--ConditionalsLevel;
- }
}
}
// Align a single sequence of tokens, see AlignTokens below.
+// Column - The token for which Matches returns true is moved to this column.
+// RightJustify - Whether it is the token's right end or left end that gets
+// moved to that column.
template <typename F>
static void
AlignTokenSequence(const FormatStyle &Style, unsigned Start, unsigned End,
- unsigned Column, F &&Matches,
+ unsigned Column, bool RightJustify, F &&Matches,
SmallVector<WhitespaceManager::Change, 16> &Changes) {
bool FoundMatchOnLine = false;
int Shift = 0;
@@ -294,28 +307,32 @@ AlignTokenSequence(const FormatStyle &Style, unsigned Start, unsigned End,
SmallVector<unsigned, 16> ScopeStack;
for (unsigned i = Start; i != End; ++i) {
+ auto &CurrentChange = Changes[i];
if (ScopeStack.size() != 0 &&
- Changes[i].indentAndNestingLevel() <
- Changes[ScopeStack.back()].indentAndNestingLevel())
+ CurrentChange.indentAndNestingLevel() <
+ Changes[ScopeStack.back()].indentAndNestingLevel()) {
ScopeStack.pop_back();
+ }
// Compare current token to previous non-comment token to ensure whether
// it is in a deeper scope or not.
unsigned PreviousNonComment = i - 1;
while (PreviousNonComment > Start &&
- Changes[PreviousNonComment].Tok->is(tok::comment))
- PreviousNonComment--;
- if (i != Start && Changes[i].indentAndNestingLevel() >
- Changes[PreviousNonComment].indentAndNestingLevel())
+ Changes[PreviousNonComment].Tok->is(tok::comment)) {
+ --PreviousNonComment;
+ }
+ if (i != Start && CurrentChange.indentAndNestingLevel() >
+ Changes[PreviousNonComment].indentAndNestingLevel()) {
ScopeStack.push_back(i);
+ }
bool InsideNestedScope = ScopeStack.size() != 0;
bool ContinuedStringLiteral = i > Start &&
- Changes[i].Tok->is(tok::string_literal) &&
+ CurrentChange.Tok->is(tok::string_literal) &&
Changes[i - 1].Tok->is(tok::string_literal);
bool SkipMatchCheck = InsideNestedScope || ContinuedStringLiteral;
- if (Changes[i].NewlinesBefore > 0 && !SkipMatchCheck) {
+ if (CurrentChange.NewlinesBefore > 0 && !SkipMatchCheck) {
Shift = 0;
FoundMatchOnLine = false;
}
@@ -323,70 +340,146 @@ AlignTokenSequence(const FormatStyle &Style, unsigned Start, unsigned End,
// If this is the first matching token to be aligned, remember by how many
// spaces it has to be shifted, so the rest of the changes on the line are
// shifted by the same amount
- if (!FoundMatchOnLine && !SkipMatchCheck && Matches(Changes[i])) {
+ if (!FoundMatchOnLine && !SkipMatchCheck && Matches(CurrentChange)) {
FoundMatchOnLine = true;
- Shift = Column - Changes[i].StartOfTokenColumn;
- Changes[i].Spaces += Shift;
+ Shift = Column - (RightJustify ? CurrentChange.TokenLength : 0) -
+ CurrentChange.StartOfTokenColumn;
+ CurrentChange.Spaces += Shift;
+ // FIXME: This is a workaround that should be removed when we fix
+ // http://llvm.org/PR53699. An assertion later below verifies this.
+ if (CurrentChange.NewlinesBefore == 0) {
+ CurrentChange.Spaces =
+ std::max(CurrentChange.Spaces,
+ static_cast<int>(CurrentChange.Tok->SpacesRequiredBefore));
+ }
}
+ if (Shift == 0)
+ continue;
+
// This is for function parameters that are split across multiple lines,
// as mentioned in the ScopeStack comment.
- if (InsideNestedScope && Changes[i].NewlinesBefore > 0) {
+ if (InsideNestedScope && CurrentChange.NewlinesBefore > 0) {
unsigned ScopeStart = ScopeStack.back();
auto ShouldShiftBeAdded = [&] {
// Function declaration
if (Changes[ScopeStart - 1].Tok->is(TT_FunctionDeclarationName))
return true;
+ // Lambda.
+ if (Changes[ScopeStart - 1].Tok->is(TT_LambdaLBrace))
+ return false;
+
// Continued function declaration
if (ScopeStart > Start + 1 &&
- Changes[ScopeStart - 2].Tok->is(TT_FunctionDeclarationName))
+ Changes[ScopeStart - 2].Tok->is(TT_FunctionDeclarationName)) {
return true;
+ }
- // Continued function call
+ // Continued (template) function call.
if (ScopeStart > Start + 1 &&
- Changes[ScopeStart - 2].Tok->is(tok::identifier) &&
- Changes[ScopeStart - 1].Tok->is(tok::l_paren))
+ Changes[ScopeStart - 2].Tok->isOneOf(tok::identifier,
+ TT_TemplateCloser) &&
+ Changes[ScopeStart - 1].Tok->is(tok::l_paren) &&
+ Changes[ScopeStart].Tok->isNot(TT_LambdaLSquare)) {
+ if (CurrentChange.Tok->MatchingParen &&
+ CurrentChange.Tok->MatchingParen->is(TT_LambdaLBrace)) {
+ return false;
+ }
+ if (Changes[ScopeStart].NewlinesBefore > 0)
+ return false;
+ if (CurrentChange.Tok->is(tok::l_brace) &&
+ CurrentChange.Tok->is(BK_BracedInit)) {
+ return true;
+ }
return Style.BinPackArguments;
+ }
// Ternary operator
- if (Changes[i].Tok->is(TT_ConditionalExpr))
+ if (CurrentChange.Tok->is(TT_ConditionalExpr))
return true;
// Period Initializer .XXX = 1.
- if (Changes[i].Tok->is(TT_DesignatedInitializerPeriod))
+ if (CurrentChange.Tok->is(TT_DesignatedInitializerPeriod))
return true;
// Continued ternary operator
- if (Changes[i].Tok->Previous &&
- Changes[i].Tok->Previous->is(TT_ConditionalExpr))
+ if (CurrentChange.Tok->Previous &&
+ CurrentChange.Tok->Previous->is(TT_ConditionalExpr)) {
+ return true;
+ }
+
+ // Continued direct-list-initialization using braced list.
+ if (ScopeStart > Start + 1 &&
+ Changes[ScopeStart - 2].Tok->is(tok::identifier) &&
+ Changes[ScopeStart - 1].Tok->is(tok::l_brace) &&
+ CurrentChange.Tok->is(tok::l_brace) &&
+ CurrentChange.Tok->is(BK_BracedInit)) {
+ return true;
+ }
+
+ // Continued braced list.
+ if (ScopeStart > Start + 1 &&
+ Changes[ScopeStart - 2].Tok->isNot(tok::identifier) &&
+ Changes[ScopeStart - 1].Tok->is(tok::l_brace) &&
+ CurrentChange.Tok->isNot(tok::r_brace)) {
+ for (unsigned OuterScopeStart : llvm::reverse(ScopeStack)) {
+ // Lambda.
+ if (OuterScopeStart > Start &&
+ Changes[OuterScopeStart - 1].Tok->is(TT_LambdaLBrace)) {
+ return false;
+ }
+ }
+ if (Changes[ScopeStart].NewlinesBefore > 0)
+ return false;
+ return true;
+ }
+
+ // Continued template parameter.
+ if (Changes[ScopeStart - 1].Tok->is(TT_TemplateOpener))
return true;
return false;
};
if (ShouldShiftBeAdded())
- Changes[i].Spaces += Shift;
+ CurrentChange.Spaces += Shift;
}
if (ContinuedStringLiteral)
- Changes[i].Spaces += Shift;
+ CurrentChange.Spaces += Shift;
- assert(Shift >= 0);
+ // We should not remove required spaces unless we break the line before.
+ assert(Shift > 0 || Changes[i].NewlinesBefore > 0 ||
+ CurrentChange.Spaces >=
+ static_cast<int>(Changes[i].Tok->SpacesRequiredBefore) ||
+ CurrentChange.Tok->is(tok::eof));
- Changes[i].StartOfTokenColumn += Shift;
+ CurrentChange.StartOfTokenColumn += Shift;
if (i + 1 != Changes.size())
Changes[i + 1].PreviousEndOfTokenColumn += Shift;
// If PointerAlignment is PAS_Right, keep *s or &s next to the token
- if (Style.PointerAlignment == FormatStyle::PAS_Right &&
- Changes[i].Spaces != 0) {
+ if ((Style.PointerAlignment == FormatStyle::PAS_Right ||
+ Style.ReferenceAlignment == FormatStyle::RAS_Right) &&
+ CurrentChange.Spaces != 0) {
+ const bool ReferenceNotRightAligned =
+ Style.ReferenceAlignment != FormatStyle::RAS_Right &&
+ Style.ReferenceAlignment != FormatStyle::RAS_Pointer;
for (int Previous = i - 1;
Previous >= 0 &&
Changes[Previous].Tok->getType() == TT_PointerOrReference;
--Previous) {
+ assert(Changes[Previous].Tok->isPointerOrReference());
+ if (Changes[Previous].Tok->isNot(tok::star)) {
+ if (ReferenceNotRightAligned)
+ continue;
+ } else if (Style.PointerAlignment != FormatStyle::PAS_Right) {
+ continue;
+ }
Changes[Previous + 1].Spaces -= Shift;
Changes[Previous].Spaces += Shift;
+ Changes[Previous].StartOfTokenColumn += Shift;
}
}
}
@@ -419,13 +512,31 @@ AlignTokenSequence(const FormatStyle &Style, unsigned Start, unsigned End,
// However, the special exception is that we do NOT skip function parameters
// that are split across multiple lines. See the test case in FormatTest.cpp
// that mentions "split function parameter alignment" for an example of this.
+// When the parameter RightJustify is true, the operator will be
+// right-justified. It is used to align compound assignments like `+=` and `=`.
+// When RightJustify and ACS.PadOperators are true, operators in each block to
+// be aligned will be padded on the left to the same length before aligning.
template <typename F>
-static unsigned AlignTokens(
- const FormatStyle &Style, F &&Matches,
- SmallVector<WhitespaceManager::Change, 16> &Changes, unsigned StartAt,
- const FormatStyle::AlignConsecutiveStyle &ACS = FormatStyle::ACS_None) {
- unsigned MinColumn = 0;
- unsigned MaxColumn = UINT_MAX;
+static unsigned AlignTokens(const FormatStyle &Style, F &&Matches,
+ SmallVector<WhitespaceManager::Change, 16> &Changes,
+ unsigned StartAt,
+ const FormatStyle::AlignConsecutiveStyle &ACS = {},
+ bool RightJustify = false) {
+ // We arrange each line in 3 parts. The operator to be aligned (the anchor),
+ // and text to its left and right. In the aligned text the width of each part
+ // will be the maximum of that over the block that has been aligned. Maximum
+ // widths of each part so far. When RightJustify is true and ACS.PadOperators
+ // is false, the part from start of line to the right end of the anchor.
+ // Otherwise, only the part to the left of the anchor. Including the space
+ // that exists on its left from the start. Not including the padding added on
+ // the left to right-justify the anchor.
+ unsigned WidthLeft = 0;
+ // The operator to be aligned when RightJustify is true and ACS.PadOperators
+ // is false. 0 otherwise.
+ unsigned WidthAnchor = 0;
+ // Width to the right of the anchor. Plus width of the anchor when
+ // RightJustify is false.
+ unsigned WidthRight = 0;
// Line number of the start and the end of the current token sequence.
unsigned StartOfSequence = 0;
@@ -457,63 +568,63 @@ static unsigned AlignTokens(
// We need to adjust the StartOfTokenColumn of each Change that is on a line
// containing any matching token to be aligned and located after such token.
auto AlignCurrentSequence = [&] {
- if (StartOfSequence > 0 && StartOfSequence < EndOfSequence)
- AlignTokenSequence(Style, StartOfSequence, EndOfSequence, MinColumn,
- Matches, Changes);
- MinColumn = 0;
- MaxColumn = UINT_MAX;
+ if (StartOfSequence > 0 && StartOfSequence < EndOfSequence) {
+ AlignTokenSequence(Style, StartOfSequence, EndOfSequence,
+ WidthLeft + WidthAnchor, RightJustify, Matches,
+ Changes);
+ }
+ WidthLeft = 0;
+ WidthAnchor = 0;
+ WidthRight = 0;
StartOfSequence = 0;
EndOfSequence = 0;
};
unsigned i = StartAt;
for (unsigned e = Changes.size(); i != e; ++i) {
- if (Changes[i].indentAndNestingLevel() < IndentAndNestingLevel)
+ auto &CurrentChange = Changes[i];
+ if (CurrentChange.indentAndNestingLevel() < IndentAndNestingLevel)
break;
- if (Changes[i].NewlinesBefore != 0) {
+ if (CurrentChange.NewlinesBefore != 0) {
CommasBeforeMatch = 0;
EndOfSequence = i;
// Whether to break the alignment sequence because of an empty line.
bool EmptyLineBreak =
- (Changes[i].NewlinesBefore > 1) &&
- (ACS != FormatStyle::ACS_AcrossEmptyLines) &&
- (ACS != FormatStyle::ACS_AcrossEmptyLinesAndComments);
+ (CurrentChange.NewlinesBefore > 1) && !ACS.AcrossEmptyLines;
// Whether to break the alignment sequence because of a line without a
// match.
bool NoMatchBreak =
- !FoundMatchOnLine &&
- !(LineIsComment &&
- ((ACS == FormatStyle::ACS_AcrossComments) ||
- (ACS == FormatStyle::ACS_AcrossEmptyLinesAndComments)));
+ !FoundMatchOnLine && !(LineIsComment && ACS.AcrossComments);
if (EmptyLineBreak || NoMatchBreak)
AlignCurrentSequence();
// A new line starts, re-initialize line status tracking bools.
// Keep the match state if a string literal is continued on this line.
- if (i == 0 || !Changes[i].Tok->is(tok::string_literal) ||
- !Changes[i - 1].Tok->is(tok::string_literal))
+ if (i == 0 || CurrentChange.Tok->isNot(tok::string_literal) ||
+ Changes[i - 1].Tok->isNot(tok::string_literal)) {
FoundMatchOnLine = false;
+ }
LineIsComment = true;
}
- if (!Changes[i].Tok->is(tok::comment)) {
+ if (CurrentChange.Tok->isNot(tok::comment))
LineIsComment = false;
- }
- if (Changes[i].Tok->is(tok::comma)) {
+ if (CurrentChange.Tok->is(tok::comma)) {
++CommasBeforeMatch;
- } else if (Changes[i].indentAndNestingLevel() > IndentAndNestingLevel) {
+ } else if (CurrentChange.indentAndNestingLevel() > IndentAndNestingLevel) {
// Call AlignTokens recursively, skipping over this scope block.
- unsigned StoppedAt = AlignTokens(Style, Matches, Changes, i, ACS);
+ unsigned StoppedAt =
+ AlignTokens(Style, Matches, Changes, i, ACS, RightJustify);
i = StoppedAt - 1;
continue;
}
- if (!Matches(Changes[i]))
+ if (!Matches(CurrentChange))
continue;
// If there is more than one matching token per line, or if the number of
@@ -527,29 +638,44 @@ static unsigned AlignTokens(
if (StartOfSequence == 0)
StartOfSequence = i;
- unsigned ChangeMinColumn = Changes[i].StartOfTokenColumn;
- int LineLengthAfter = Changes[i].TokenLength;
+ unsigned ChangeWidthLeft = CurrentChange.StartOfTokenColumn;
+ unsigned ChangeWidthAnchor = 0;
+ unsigned ChangeWidthRight = 0;
+ if (RightJustify)
+ if (ACS.PadOperators)
+ ChangeWidthAnchor = CurrentChange.TokenLength;
+ else
+ ChangeWidthLeft += CurrentChange.TokenLength;
+ else
+ ChangeWidthRight = CurrentChange.TokenLength;
for (unsigned j = i + 1; j != e && Changes[j].NewlinesBefore == 0; ++j) {
- LineLengthAfter += Changes[j].Spaces;
+ ChangeWidthRight += Changes[j].Spaces;
// Changes are generally 1:1 with the tokens, but a change could also be
// inside of a token, in which case it's counted more than once: once for
// the whitespace surrounding the token (!IsInsideToken) and once for
// each whitespace change within it (IsInsideToken).
// Therefore, changes inside of a token should only count the space.
if (!Changes[j].IsInsideToken)
- LineLengthAfter += Changes[j].TokenLength;
+ ChangeWidthRight += Changes[j].TokenLength;
}
- unsigned ChangeMaxColumn = Style.ColumnLimit - LineLengthAfter;
// If we are restricted by the maximum column width, end the sequence.
- if (ChangeMinColumn > MaxColumn || ChangeMaxColumn < MinColumn ||
- CommasBeforeLastMatch != CommasBeforeMatch) {
+ unsigned NewLeft = std::max(ChangeWidthLeft, WidthLeft);
+ unsigned NewAnchor = std::max(ChangeWidthAnchor, WidthAnchor);
+ unsigned NewRight = std::max(ChangeWidthRight, WidthRight);
+ // `ColumnLimit == 0` means there is no column limit.
+ if (Style.ColumnLimit != 0 &&
+ Style.ColumnLimit < NewLeft + NewAnchor + NewRight) {
AlignCurrentSequence();
StartOfSequence = i;
+ WidthLeft = ChangeWidthLeft;
+ WidthAnchor = ChangeWidthAnchor;
+ WidthRight = ChangeWidthRight;
+ } else {
+ WidthLeft = NewLeft;
+ WidthAnchor = NewAnchor;
+ WidthRight = NewRight;
}
-
- MinColumn = std::max(MinColumn, ChangeMinColumn);
- MaxColumn = std::min(MaxColumn, ChangeMaxColumn);
}
EndOfSequence = i;
@@ -564,14 +690,12 @@ static unsigned AlignTokens(
//
// We need to adjust the StartOfTokenColumn of each Change that is on a line
// containing any matching token to be aligned and located after such token.
-static void AlignMacroSequence(
+static void AlignMatchingTokenSequence(
unsigned &StartOfSequence, unsigned &EndOfSequence, unsigned &MinColumn,
- unsigned &MaxColumn, bool &FoundMatchOnLine,
- std::function<bool(const WhitespaceManager::Change &C)> AlignMacrosMatches,
+ std::function<bool(const WhitespaceManager::Change &C)> Matches,
SmallVector<WhitespaceManager::Change, 16> &Changes) {
if (StartOfSequence > 0 && StartOfSequence < EndOfSequence) {
-
- FoundMatchOnLine = false;
+ bool FoundMatchOnLine = false;
int Shift = 0;
for (unsigned I = StartOfSequence; I != EndOfSequence; ++I) {
@@ -582,8 +706,8 @@ static void AlignMacroSequence(
// If this is the first matching token to be aligned, remember by how many
// spaces it has to be shifted, so the rest of the changes on the line are
- // shifted by the same amount
- if (!FoundMatchOnLine && AlignMacrosMatches(Changes[I])) {
+ // shifted by the same amount.
+ if (!FoundMatchOnLine && Matches(Changes[I])) {
FoundMatchOnLine = true;
Shift = MinColumn - Changes[I].StartOfTokenColumn;
Changes[I].Spaces += Shift;
@@ -597,13 +721,12 @@ static void AlignMacroSequence(
}
MinColumn = 0;
- MaxColumn = UINT_MAX;
StartOfSequence = 0;
EndOfSequence = 0;
}
void WhitespaceManager::alignConsecutiveMacros() {
- if (Style.AlignConsecutiveMacros == FormatStyle::ACS_None)
+ if (!Style.AlignConsecutiveMacros.Enabled)
return;
auto AlignMacrosMatches = [](const Change &C) {
@@ -622,10 +745,10 @@ void WhitespaceManager::alignConsecutiveMacros() {
SpacesRequiredBefore = 0;
}
- if (!Current || !Current->is(tok::identifier))
+ if (!Current || Current->isNot(tok::identifier))
return false;
- if (!Current->Previous || !Current->Previous->is(tok::pp_define))
+ if (!Current->Previous || Current->Previous->isNot(tok::pp_define))
return false;
// For a macro function, 0 spaces are required between the
@@ -636,7 +759,6 @@ void WhitespaceManager::alignConsecutiveMacros() {
};
unsigned MinColumn = 0;
- unsigned MaxColumn = UINT_MAX;
// Start and end of the token sequence we're processing.
unsigned StartOfSequence = 0;
@@ -654,33 +776,27 @@ void WhitespaceManager::alignConsecutiveMacros() {
EndOfSequence = I;
// Whether to break the alignment sequence because of an empty line.
- bool EmptyLineBreak =
- (Changes[I].NewlinesBefore > 1) &&
- (Style.AlignConsecutiveMacros != FormatStyle::ACS_AcrossEmptyLines) &&
- (Style.AlignConsecutiveMacros !=
- FormatStyle::ACS_AcrossEmptyLinesAndComments);
+ bool EmptyLineBreak = (Changes[I].NewlinesBefore > 1) &&
+ !Style.AlignConsecutiveMacros.AcrossEmptyLines;
// Whether to break the alignment sequence because of a line without a
// match.
bool NoMatchBreak =
!FoundMatchOnLine &&
- !(LineIsComment && ((Style.AlignConsecutiveMacros ==
- FormatStyle::ACS_AcrossComments) ||
- (Style.AlignConsecutiveMacros ==
- FormatStyle::ACS_AcrossEmptyLinesAndComments)));
+ !(LineIsComment && Style.AlignConsecutiveMacros.AcrossComments);
- if (EmptyLineBreak || NoMatchBreak)
- AlignMacroSequence(StartOfSequence, EndOfSequence, MinColumn, MaxColumn,
- FoundMatchOnLine, AlignMacrosMatches, Changes);
+ if (EmptyLineBreak || NoMatchBreak) {
+ AlignMatchingTokenSequence(StartOfSequence, EndOfSequence, MinColumn,
+ AlignMacrosMatches, Changes);
+ }
// A new line starts, re-initialize line status tracking bools.
FoundMatchOnLine = false;
LineIsComment = true;
}
- if (!Changes[I].Tok->is(tok::comment)) {
+ if (Changes[I].Tok->isNot(tok::comment))
LineIsComment = false;
- }
if (!AlignMacrosMatches(Changes[I]))
continue;
@@ -691,22 +807,16 @@ void WhitespaceManager::alignConsecutiveMacros() {
StartOfSequence = I;
unsigned ChangeMinColumn = Changes[I].StartOfTokenColumn;
- int LineLengthAfter = -Changes[I].Spaces;
- for (unsigned j = I; j != E && Changes[j].NewlinesBefore == 0; ++j)
- LineLengthAfter += Changes[j].Spaces + Changes[j].TokenLength;
- unsigned ChangeMaxColumn = Style.ColumnLimit - LineLengthAfter;
-
MinColumn = std::max(MinColumn, ChangeMinColumn);
- MaxColumn = std::min(MaxColumn, ChangeMaxColumn);
}
EndOfSequence = I;
- AlignMacroSequence(StartOfSequence, EndOfSequence, MinColumn, MaxColumn,
- FoundMatchOnLine, AlignMacrosMatches, Changes);
+ AlignMatchingTokenSequence(StartOfSequence, EndOfSequence, MinColumn,
+ AlignMacrosMatches, Changes);
}
void WhitespaceManager::alignConsecutiveAssignments() {
- if (Style.AlignConsecutiveAssignments == FormatStyle::ACS_None)
+ if (!Style.AlignConsecutiveAssignments.Enabled)
return;
AlignTokens(
@@ -720,13 +830,26 @@ void WhitespaceManager::alignConsecutiveAssignments() {
if (&C != &Changes.back() && (&C + 1)->NewlinesBefore > 0)
return false;
- return C.Tok->is(tok::equal);
+ // Do not align operator= overloads.
+ FormatToken *Previous = C.Tok->getPreviousNonComment();
+ if (Previous && Previous->is(tok::kw_operator))
+ return false;
+
+ return Style.AlignConsecutiveAssignments.AlignCompound
+ ? C.Tok->getPrecedence() == prec::Assignment
+ : (C.Tok->is(tok::equal) ||
+ // In Verilog the '<=' is not a compound assignment, thus
+ // it is aligned even when the AlignCompound option is not
+ // set.
+ (Style.isVerilog() && C.Tok->is(tok::lessequal) &&
+ C.Tok->getPrecedence() == prec::Assignment));
},
- Changes, /*StartAt=*/0, Style.AlignConsecutiveAssignments);
+ Changes, /*StartAt=*/0, Style.AlignConsecutiveAssignments,
+ /*RightJustify=*/true);
}
void WhitespaceManager::alignConsecutiveBitFields() {
- if (Style.AlignConsecutiveBitFields == FormatStyle::ACS_None)
+ if (!Style.AlignConsecutiveBitFields.Enabled)
return;
AlignTokens(
@@ -745,16 +868,125 @@ void WhitespaceManager::alignConsecutiveBitFields() {
Changes, /*StartAt=*/0, Style.AlignConsecutiveBitFields);
}
+void WhitespaceManager::alignConsecutiveShortCaseStatements() {
+ if (!Style.AlignConsecutiveShortCaseStatements.Enabled ||
+ !Style.AllowShortCaseLabelsOnASingleLine) {
+ return;
+ }
+
+ auto Matches = [&](const Change &C) {
+ if (Style.AlignConsecutiveShortCaseStatements.AlignCaseColons)
+ return C.Tok->is(TT_CaseLabelColon);
+
+ // Ignore 'IsInsideToken' to allow matching trailing comments which
+ // need to be reflowed as that causes the token to appear in two
+ // different changes, which will cause incorrect alignment as we'll
+ // reflow early due to detecting multiple aligning tokens per line.
+ return !C.IsInsideToken && C.Tok->Previous &&
+ C.Tok->Previous->is(TT_CaseLabelColon);
+ };
+
+ unsigned MinColumn = 0;
+
+ // Empty case statements don't break the alignment, but don't necessarily
+ // match our predicate, so we need to track their column so they can push out
+ // our alignment.
+ unsigned MinEmptyCaseColumn = 0;
+
+ // Start and end of the token sequence we're processing.
+ unsigned StartOfSequence = 0;
+ unsigned EndOfSequence = 0;
+
+ // Whether a matching token has been found on the current line.
+ bool FoundMatchOnLine = false;
+
+ bool LineIsComment = true;
+ bool LineIsEmptyCase = false;
+
+ unsigned I = 0;
+ for (unsigned E = Changes.size(); I != E; ++I) {
+ if (Changes[I].NewlinesBefore != 0) {
+ // Whether to break the alignment sequence because of an empty line.
+ bool EmptyLineBreak =
+ (Changes[I].NewlinesBefore > 1) &&
+ !Style.AlignConsecutiveShortCaseStatements.AcrossEmptyLines;
+
+ // Whether to break the alignment sequence because of a line without a
+ // match.
+ bool NoMatchBreak =
+ !FoundMatchOnLine &&
+ !(LineIsComment &&
+ Style.AlignConsecutiveShortCaseStatements.AcrossComments) &&
+ !LineIsEmptyCase;
+
+ if (EmptyLineBreak || NoMatchBreak) {
+ AlignMatchingTokenSequence(StartOfSequence, EndOfSequence, MinColumn,
+ Matches, Changes);
+ MinEmptyCaseColumn = 0;
+ }
+
+ // A new line starts, re-initialize line status tracking bools.
+ FoundMatchOnLine = false;
+ LineIsComment = true;
+ LineIsEmptyCase = false;
+ }
+
+ if (Changes[I].Tok->isNot(tok::comment))
+ LineIsComment = false;
+
+ if (Changes[I].Tok->is(TT_CaseLabelColon)) {
+ LineIsEmptyCase =
+ !Changes[I].Tok->Next || Changes[I].Tok->Next->isTrailingComment();
+
+ if (LineIsEmptyCase) {
+ if (Style.AlignConsecutiveShortCaseStatements.AlignCaseColons) {
+ MinEmptyCaseColumn =
+ std::max(MinEmptyCaseColumn, Changes[I].StartOfTokenColumn);
+ } else {
+ MinEmptyCaseColumn =
+ std::max(MinEmptyCaseColumn, Changes[I].StartOfTokenColumn + 2);
+ }
+ }
+ }
+
+ if (!Matches(Changes[I]))
+ continue;
+
+ if (LineIsEmptyCase)
+ continue;
+
+ FoundMatchOnLine = true;
+
+ if (StartOfSequence == 0)
+ StartOfSequence = I;
+
+ EndOfSequence = I + 1;
+
+ MinColumn = std::max(MinColumn, Changes[I].StartOfTokenColumn);
+
+ // Allow empty case statements to push out our alignment.
+ MinColumn = std::max(MinColumn, MinEmptyCaseColumn);
+ }
+
+ AlignMatchingTokenSequence(StartOfSequence, EndOfSequence, MinColumn, Matches,
+ Changes);
+}
+
void WhitespaceManager::alignConsecutiveDeclarations() {
- if (Style.AlignConsecutiveDeclarations == FormatStyle::ACS_None)
+ if (!Style.AlignConsecutiveDeclarations.Enabled)
return;
AlignTokens(
Style,
- [](Change const &C) {
- // tok::kw_operator is necessary for aligning operator overload
- // definitions.
- if (C.Tok->isOneOf(TT_FunctionDeclarationName, tok::kw_operator))
+ [&](Change const &C) {
+ if (Style.AlignConsecutiveDeclarations.AlignFunctionPointers) {
+ for (const auto *Prev = C.Tok->Previous; Prev; Prev = Prev->Previous)
+ if (Prev->is(tok::equal))
+ return false;
+ if (C.Tok->is(TT_FunctionTypeLParen))
+ return true;
+ }
+ if (C.Tok->is(TT_FunctionDeclarationName))
return true;
if (C.Tok->isNot(TT_StartOfName))
return false;
@@ -770,8 +1002,9 @@ void WhitespaceManager::alignConsecutiveDeclarations() {
if (!Next->Tok.getIdentifierInfo())
break;
if (Next->isOneOf(TT_StartOfName, TT_FunctionDeclarationName,
- tok::kw_operator))
+ tok::kw_operator)) {
return false;
+ }
}
return true;
},
@@ -802,10 +1035,9 @@ void WhitespaceManager::alignChainedConditionals() {
// Ensure we keep alignment of wrapped operands with non-wrapped operands
// Since we actually align the operators, the wrapped operands need the
// extra offset to be properly aligned.
- for (Change &C : Changes) {
+ for (Change &C : Changes)
if (AlignWrappedOperand(C))
C.StartOfTokenColumn -= 2;
- }
AlignTokens(
Style,
[this](Change const &C) {
@@ -822,50 +1054,70 @@ void WhitespaceManager::alignChainedConditionals() {
}
void WhitespaceManager::alignTrailingComments() {
- unsigned MinColumn = 0;
- unsigned MaxColumn = UINT_MAX;
- unsigned StartOfSequence = 0;
+ if (Style.AlignTrailingComments.Kind == FormatStyle::TCAS_Never)
+ return;
+
+ const int Size = Changes.size();
+ int MinColumn = 0;
+ int StartOfSequence = 0;
bool BreakBeforeNext = false;
- unsigned Newlines = 0;
- for (unsigned i = 0, e = Changes.size(); i != e; ++i) {
- if (Changes[i].StartOfBlockComment)
+ int NewLineThreshold = 1;
+ if (Style.AlignTrailingComments.Kind == FormatStyle::TCAS_Always)
+ NewLineThreshold = Style.AlignTrailingComments.OverEmptyLines + 1;
+
+ for (int I = 0, MaxColumn = INT_MAX, Newlines = 0; I < Size; ++I) {
+ auto &C = Changes[I];
+ if (C.StartOfBlockComment)
continue;
- Newlines += Changes[i].NewlinesBefore;
- if (!Changes[i].IsTrailingComment)
+ Newlines += C.NewlinesBefore;
+ if (!C.IsTrailingComment)
continue;
- unsigned ChangeMinColumn = Changes[i].StartOfTokenColumn;
- unsigned ChangeMaxColumn;
+ if (Style.AlignTrailingComments.Kind == FormatStyle::TCAS_Leave) {
+ const int OriginalSpaces =
+ C.OriginalWhitespaceRange.getEnd().getRawEncoding() -
+ C.OriginalWhitespaceRange.getBegin().getRawEncoding() -
+ C.Tok->LastNewlineOffset;
+ assert(OriginalSpaces >= 0);
+ const auto RestoredLineLength =
+ C.StartOfTokenColumn + C.TokenLength + OriginalSpaces;
+ // If leaving comments makes the line exceed the column limit, give up to
+ // leave the comments.
+ if (RestoredLineLength >= Style.ColumnLimit && Style.ColumnLimit > 0)
+ break;
+ C.Spaces = OriginalSpaces;
+ continue;
+ }
- if (Style.ColumnLimit == 0)
- ChangeMaxColumn = UINT_MAX;
- else if (Style.ColumnLimit >= Changes[i].TokenLength)
- ChangeMaxColumn = Style.ColumnLimit - Changes[i].TokenLength;
- else
- ChangeMaxColumn = ChangeMinColumn;
+ const int ChangeMinColumn = C.StartOfTokenColumn;
+ int ChangeMaxColumn;
// If we don't create a replacement for this change, we have to consider
// it to be immovable.
- if (!Changes[i].CreateReplacement)
+ if (!C.CreateReplacement)
+ ChangeMaxColumn = ChangeMinColumn;
+ else if (Style.ColumnLimit == 0)
+ ChangeMaxColumn = INT_MAX;
+ else if (Style.ColumnLimit >= C.TokenLength)
+ ChangeMaxColumn = Style.ColumnLimit - C.TokenLength;
+ else
ChangeMaxColumn = ChangeMinColumn;
- if (i + 1 != e && Changes[i + 1].ContinuesPPDirective)
+ if (I + 1 < Size && Changes[I + 1].ContinuesPPDirective &&
+ ChangeMaxColumn >= 2) {
ChangeMaxColumn -= 2;
- // If this comment follows an } in column 0, it probably documents the
- // closing of a namespace and we don't want to align it.
- bool FollowsRBraceInColumn0 = i > 0 && Changes[i].NewlinesBefore == 0 &&
- Changes[i - 1].Tok->is(tok::r_brace) &&
- Changes[i - 1].StartOfTokenColumn == 0;
+ }
+
bool WasAlignedWithStartOfNextLine = false;
- if (Changes[i].NewlinesBefore == 1) { // A comment on its own line.
- unsigned CommentColumn = SourceMgr.getSpellingColumnNumber(
- Changes[i].OriginalWhitespaceRange.getEnd());
- for (unsigned j = i + 1; j != e; ++j) {
- if (Changes[j].Tok->is(tok::comment))
+ if (C.NewlinesBefore >= 1) { // A comment on its own line.
+ const auto CommentColumn =
+ SourceMgr.getSpellingColumnNumber(C.OriginalWhitespaceRange.getEnd());
+ for (int J = I + 1; J < Size; ++J) {
+ if (Changes[J].Tok->is(tok::comment))
continue;
- unsigned NextColumn = SourceMgr.getSpellingColumnNumber(
- Changes[j].OriginalWhitespaceRange.getEnd());
+ const auto NextColumn = SourceMgr.getSpellingColumnNumber(
+ Changes[J].OriginalWhitespaceRange.getEnd());
// The start of the next token was previously aligned with the
// start of this comment.
WasAlignedWithStartOfNextLine =
@@ -874,48 +1126,86 @@ void WhitespaceManager::alignTrailingComments() {
break;
}
}
- if (!Style.AlignTrailingComments || FollowsRBraceInColumn0) {
- alignTrailingComments(StartOfSequence, i, MinColumn);
- MinColumn = ChangeMinColumn;
- MaxColumn = ChangeMinColumn;
- StartOfSequence = i;
- } else if (BreakBeforeNext || Newlines > 1 ||
+
+ // We don't want to align comments which end a scope, which are here
+ // identified by most closing braces.
+ auto DontAlignThisComment = [](const auto *Tok) {
+ if (Tok->is(tok::semi)) {
+ Tok = Tok->getPreviousNonComment();
+ if (!Tok)
+ return false;
+ }
+ if (Tok->is(tok::r_paren)) {
+ // Back up past the parentheses and a `TT_DoWhile` that may precede.
+ Tok = Tok->MatchingParen;
+ if (!Tok)
+ return false;
+ Tok = Tok->getPreviousNonComment();
+ if (!Tok)
+ return false;
+ if (Tok->is(TT_DoWhile)) {
+ const auto *Prev = Tok->getPreviousNonComment();
+ if (!Prev) {
+ // A do-while-loop without braces.
+ return true;
+ }
+ Tok = Prev;
+ }
+ }
+
+ if (Tok->isNot(tok::r_brace))
+ return false;
+
+ while (Tok->Previous && Tok->Previous->is(tok::r_brace))
+ Tok = Tok->Previous;
+ return Tok->NewlinesBefore > 0;
+ };
+
+ if (I > 0 && C.NewlinesBefore == 0 &&
+ DontAlignThisComment(Changes[I - 1].Tok)) {
+ alignTrailingComments(StartOfSequence, I, MinColumn);
+ // Reset to initial values, but skip this change for the next alignment
+ // pass.
+ MinColumn = 0;
+ MaxColumn = INT_MAX;
+ StartOfSequence = I + 1;
+ } else if (BreakBeforeNext || Newlines > NewLineThreshold ||
(ChangeMinColumn > MaxColumn || ChangeMaxColumn < MinColumn) ||
// Break the comment sequence if the previous line did not end
// in a trailing comment.
- (Changes[i].NewlinesBefore == 1 && i > 0 &&
- !Changes[i - 1].IsTrailingComment) ||
+ (C.NewlinesBefore == 1 && I > 0 &&
+ !Changes[I - 1].IsTrailingComment) ||
WasAlignedWithStartOfNextLine) {
- alignTrailingComments(StartOfSequence, i, MinColumn);
+ alignTrailingComments(StartOfSequence, I, MinColumn);
MinColumn = ChangeMinColumn;
MaxColumn = ChangeMaxColumn;
- StartOfSequence = i;
+ StartOfSequence = I;
} else {
MinColumn = std::max(MinColumn, ChangeMinColumn);
MaxColumn = std::min(MaxColumn, ChangeMaxColumn);
}
- BreakBeforeNext = (i == 0) || (Changes[i].NewlinesBefore > 1) ||
+ BreakBeforeNext = (I == 0) || (C.NewlinesBefore > 1) ||
// Never start a sequence with a comment at the beginning
// of the line.
- (Changes[i].NewlinesBefore == 1 && StartOfSequence == i);
+ (C.NewlinesBefore == 1 && StartOfSequence == I);
Newlines = 0;
}
- alignTrailingComments(StartOfSequence, Changes.size(), MinColumn);
+ alignTrailingComments(StartOfSequence, Size, MinColumn);
}
void WhitespaceManager::alignTrailingComments(unsigned Start, unsigned End,
unsigned Column) {
for (unsigned i = Start; i != End; ++i) {
int Shift = 0;
- if (Changes[i].IsTrailingComment) {
+ if (Changes[i].IsTrailingComment)
Shift = Column - Changes[i].StartOfTokenColumn;
- }
if (Changes[i].StartOfBlockComment) {
Shift = Changes[i].IndentationOffset +
Changes[i].StartOfBlockComment->StartOfTokenColumn -
Changes[i].StartOfTokenColumn;
}
- assert(Shift >= 0);
+ if (Shift <= 0)
+ continue;
Changes[i].Spaces += Shift;
if (i + 1 != Changes.size())
Changes[i + 1].PreviousEndOfTokenColumn += Shift;
@@ -993,11 +1283,14 @@ void WhitespaceManager::alignArrayInitializers(unsigned Start, unsigned End) {
void WhitespaceManager::alignArrayInitializersRightJustified(
CellDescriptions &&CellDescs) {
- auto &Cells = CellDescs.Cells;
+ if (!CellDescs.isRectangular())
+ return;
+ const int BracePadding = Style.Cpp11BracedListStyle ? 0 : 1;
+ auto &Cells = CellDescs.Cells;
// Now go through and fixup the spaces.
auto *CellIter = Cells.begin();
- for (auto i = 0U; i < CellDescs.CellCount; i++, ++CellIter) {
+ for (auto i = 0U; i < CellDescs.CellCounts[0]; ++i, ++CellIter) {
unsigned NetWidth = 0U;
if (isSplitCell(*CellIter))
NetWidth = getNetWidth(Cells.begin(), CellIter, CellDescs.InitialSpaces);
@@ -1007,28 +1300,32 @@ void WhitespaceManager::alignArrayInitializersRightJustified(
// So in here we want to see if there is a brace that falls
// on a line that was split. If so on that line we make sure that
// the spaces in front of the brace are enough.
- Changes[CellIter->Index].NewlinesBefore = 0;
- Changes[CellIter->Index].Spaces = 0;
- for (const auto *Next = CellIter->NextColumnElement; Next != nullptr;
- Next = Next->NextColumnElement) {
- Changes[Next->Index].Spaces = 0;
- Changes[Next->Index].NewlinesBefore = 0;
- }
+ const auto *Next = CellIter;
+ do {
+ const FormatToken *Previous = Changes[Next->Index].Tok->Previous;
+ if (Previous && Previous->isNot(TT_LineComment)) {
+ Changes[Next->Index].Spaces = BracePadding;
+ Changes[Next->Index].NewlinesBefore = 0;
+ }
+ Next = Next->NextColumnElement;
+ } while (Next);
// Unless the array is empty, we need the position of all the
// immediately adjacent cells
if (CellIter != Cells.begin()) {
auto ThisNetWidth =
getNetWidth(Cells.begin(), CellIter, CellDescs.InitialSpaces);
- auto MaxNetWidth =
- getMaximumNetWidth(Cells.begin(), CellIter, CellDescs.InitialSpaces,
- CellDescs.CellCount);
+ auto MaxNetWidth = getMaximumNetWidth(
+ Cells.begin(), CellIter, CellDescs.InitialSpaces,
+ CellDescs.CellCounts[0], CellDescs.CellCounts.size());
if (ThisNetWidth < MaxNetWidth)
Changes[CellIter->Index].Spaces = (MaxNetWidth - ThisNetWidth);
auto RowCount = 1U;
auto Offset = std::distance(Cells.begin(), CellIter);
- for (const auto *Next = CellIter->NextColumnElement; Next != nullptr;
+ for (const auto *Next = CellIter->NextColumnElement; Next;
Next = Next->NextColumnElement) {
- auto *Start = (Cells.begin() + RowCount * CellDescs.CellCount);
+ if (RowCount >= CellDescs.CellCounts.size())
+ break;
+ auto *Start = (Cells.begin() + RowCount * CellDescs.CellCounts[0]);
auto *End = Start + Offset;
ThisNetWidth = getNetWidth(Start, End, CellDescs.InitialSpaces);
if (ThisNetWidth < MaxNetWidth)
@@ -1042,16 +1339,16 @@ void WhitespaceManager::alignArrayInitializersRightJustified(
NetWidth;
if (Changes[CellIter->Index].NewlinesBefore == 0) {
Changes[CellIter->Index].Spaces = (CellWidth - (ThisWidth + NetWidth));
- Changes[CellIter->Index].Spaces += (i > 0) ? 1 : 0;
+ Changes[CellIter->Index].Spaces += (i > 0) ? 1 : BracePadding;
}
alignToStartOfCell(CellIter->Index, CellIter->EndIndex);
- for (const auto *Next = CellIter->NextColumnElement; Next != nullptr;
+ for (const auto *Next = CellIter->NextColumnElement; Next;
Next = Next->NextColumnElement) {
ThisWidth =
calculateCellWidth(Next->Index, Next->EndIndex, true) + NetWidth;
if (Changes[Next->Index].NewlinesBefore == 0) {
Changes[Next->Index].Spaces = (CellWidth - ThisWidth);
- Changes[Next->Index].Spaces += (i > 0) ? 1 : 0;
+ Changes[Next->Index].Spaces += (i > 0) ? 1 : BracePadding;
}
alignToStartOfCell(Next->Index, Next->EndIndex);
}
@@ -1061,37 +1358,46 @@ void WhitespaceManager::alignArrayInitializersRightJustified(
void WhitespaceManager::alignArrayInitializersLeftJustified(
CellDescriptions &&CellDescs) {
- auto &Cells = CellDescs.Cells;
+ if (!CellDescs.isRectangular())
+ return;
+
+ const int BracePadding = Style.Cpp11BracedListStyle ? 0 : 1;
+ auto &Cells = CellDescs.Cells;
// Now go through and fixup the spaces.
auto *CellIter = Cells.begin();
- // The first cell needs to be against the left brace.
- if (Changes[CellIter->Index].NewlinesBefore == 0)
- Changes[CellIter->Index].Spaces = 0;
- else
- Changes[CellIter->Index].Spaces = CellDescs.InitialSpaces;
+ // The first cell of every row needs to be against the left brace.
+ for (const auto *Next = CellIter; Next; Next = Next->NextColumnElement) {
+ auto &Change = Changes[Next->Index];
+ Change.Spaces =
+ Change.NewlinesBefore == 0 ? BracePadding : CellDescs.InitialSpaces;
+ }
++CellIter;
- for (auto i = 1U; i < CellDescs.CellCount; i++, ++CellIter) {
+ for (auto i = 1U; i < CellDescs.CellCounts[0]; i++, ++CellIter) {
auto MaxNetWidth = getMaximumNetWidth(
- Cells.begin(), CellIter, CellDescs.InitialSpaces, CellDescs.CellCount);
+ Cells.begin(), CellIter, CellDescs.InitialSpaces,
+ CellDescs.CellCounts[0], CellDescs.CellCounts.size());
auto ThisNetWidth =
getNetWidth(Cells.begin(), CellIter, CellDescs.InitialSpaces);
if (Changes[CellIter->Index].NewlinesBefore == 0) {
Changes[CellIter->Index].Spaces =
MaxNetWidth - ThisNetWidth +
- (Changes[CellIter->Index].Tok->isNot(tok::r_brace) ? 1 : 0);
+ (Changes[CellIter->Index].Tok->isNot(tok::r_brace) ? 1
+ : BracePadding);
}
auto RowCount = 1U;
auto Offset = std::distance(Cells.begin(), CellIter);
- for (const auto *Next = CellIter->NextColumnElement; Next != nullptr;
+ for (const auto *Next = CellIter->NextColumnElement; Next;
Next = Next->NextColumnElement) {
- auto *Start = (Cells.begin() + RowCount * CellDescs.CellCount);
+ if (RowCount >= CellDescs.CellCounts.size())
+ break;
+ auto *Start = (Cells.begin() + RowCount * CellDescs.CellCounts[0]);
auto *End = Start + Offset;
auto ThisNetWidth = getNetWidth(Start, End, CellDescs.InitialSpaces);
if (Changes[Next->Index].NewlinesBefore == 0) {
Changes[Next->Index].Spaces =
MaxNetWidth - ThisNetWidth +
- (Changes[Next->Index].Tok->isNot(tok::r_brace) ? 1 : 0);
+ (Changes[Next->Index].Tok->isNot(tok::r_brace) ? 1 : BracePadding);
}
++RowCount;
}
@@ -1101,7 +1407,7 @@ void WhitespaceManager::alignArrayInitializersLeftJustified(
bool WhitespaceManager::isSplitCell(const CellDescription &Cell) {
if (Cell.HasSplit)
return true;
- for (const auto *Next = Cell.NextColumnElement; Next != nullptr;
+ for (const auto *Next = Cell.NextColumnElement; Next;
Next = Next->NextColumnElement) {
if (Next->HasSplit)
return true;
@@ -1114,7 +1420,7 @@ WhitespaceManager::CellDescriptions WhitespaceManager::getCells(unsigned Start,
unsigned Depth = 0;
unsigned Cell = 0;
- unsigned CellCount = 0;
+ SmallVector<unsigned> CellCounts;
unsigned InitialSpaces = 0;
unsigned InitialTokenLength = 0;
unsigned EndSpaces = 0;
@@ -1146,28 +1452,32 @@ WhitespaceManager::CellDescriptions WhitespaceManager::getCells(unsigned Start,
} else if (C.Tok->is(tok::comma)) {
if (!Cells.empty())
Cells.back().EndIndex = i;
- Cell++;
+ if (const auto *Next = C.Tok->getNextNonComment();
+ Next && Next->isNot(tok::r_brace)) { // dangling comma
+ ++Cell;
+ }
}
} else if (Depth == 1) {
if (C.Tok == MatchingParen) {
if (!Cells.empty())
Cells.back().EndIndex = i;
Cells.push_back(CellDescription{i, ++Cell, i + 1, false, nullptr});
- CellCount = Cell + 1;
+ CellCounts.push_back(C.Tok->Previous->isNot(tok::comma) ? Cell + 1
+ : Cell);
// Go to the next non-comment and ensure there is a break in front
const auto *NextNonComment = C.Tok->getNextNonComment();
while (NextNonComment->is(tok::comma))
NextNonComment = NextNonComment->getNextNonComment();
auto j = i;
while (Changes[j].Tok != NextNonComment && j < End)
- j++;
+ ++j;
if (j < End && Changes[j].NewlinesBefore == 0 &&
Changes[j].Tok->isNot(tok::r_brace)) {
Changes[j].NewlinesBefore = 1;
// Account for the added token lengths
Changes[j].Spaces = InitialSpaces - InitialTokenLength;
}
- } else if (C.Tok->is(tok::comment)) {
+ } else if (C.Tok->is(tok::comment) && C.Tok->NewlinesBefore == 0) {
// Trailing comments stay at a space past the last token
C.Spaces = Changes[i - 1].Tok->is(tok::comma) ? 1 : 2;
} else if (C.Tok->is(tok::l_brace)) {
@@ -1190,6 +1500,17 @@ WhitespaceManager::CellDescriptions WhitespaceManager::getCells(unsigned Start,
// So if we split a line previously and the tail line + this token is
// less then the column limit we remove the split here and just put
// the column start at a space past the comma
+ //
+ // FIXME This if branch covers the cases where the column is not
+ // the first column. This leads to weird pathologies like the formatting
+ // auto foo = Items{
+ // Section{
+ // 0, bar(),
+ // }
+ // };
+ // Well if it doesn't lead to that it's indicative that the line
+ // breaking should be revisited. Unfortunately alot of other options
+ // interact with this
auto j = i - 1;
if ((j - 1) > Start && Changes[j].Tok->is(tok::comma) &&
Changes[j - 1].NewlinesBefore > 0) {
@@ -1212,7 +1533,7 @@ WhitespaceManager::CellDescriptions WhitespaceManager::getCells(unsigned Start,
}
}
- return linkCells({Cells, CellCount, InitialSpaces});
+ return linkCells({Cells, CellCounts, InitialSpaces});
}
unsigned WhitespaceManager::calculateCellWidth(unsigned Start, unsigned End,
@@ -1232,18 +1553,16 @@ void WhitespaceManager::alignToStartOfCell(unsigned Start, unsigned End) {
return;
// If the line is broken anywhere in there make sure everything
// is aligned to the parent
- for (auto i = Start + 1; i < End; i++) {
+ for (auto i = Start + 1; i < End; i++)
if (Changes[i].NewlinesBefore > 0)
Changes[i].Spaces = Changes[Start].Spaces;
- }
}
WhitespaceManager::CellDescriptions
WhitespaceManager::linkCells(CellDescriptions &&CellDesc) {
auto &Cells = CellDesc.Cells;
for (auto *CellIter = Cells.begin(); CellIter != Cells.end(); ++CellIter) {
- if (CellIter->NextColumnElement == nullptr &&
- ((CellIter + 1) != Cells.end())) {
+ if (!CellIter->NextColumnElement && (CellIter + 1) != Cells.end()) {
for (auto *NextIter = CellIter + 1; NextIter != Cells.end(); ++NextIter) {
if (NextIter->Cell == CellIter->Cell) {
CellIter->NextColumnElement = &(*NextIter);
@@ -1259,21 +1578,70 @@ void WhitespaceManager::generateChanges() {
for (unsigned i = 0, e = Changes.size(); i != e; ++i) {
const Change &C = Changes[i];
if (i > 0) {
- assert(Changes[i - 1].OriginalWhitespaceRange.getBegin() !=
- C.OriginalWhitespaceRange.getBegin() &&
- "Generating two replacements for the same location");
+ auto Last = Changes[i - 1].OriginalWhitespaceRange;
+ auto New = Changes[i].OriginalWhitespaceRange;
+ // Do not generate two replacements for the same location. As a special
+ // case, it is allowed if there is a replacement for the empty range
+ // between 2 tokens and another non-empty range at the start of the second
+ // token. We didn't implement logic to combine replacements for 2
+ // consecutive source ranges into a single replacement, because the
+ // program works fine without it.
+ //
+ // We can't eliminate empty original whitespace ranges. They appear when
+ // 2 tokens have no whitespace in between in the input. It does not
+ // matter whether whitespace is to be added. If no whitespace is to be
+ // added, the replacement will be empty, and it gets eliminated after this
+ // step in storeReplacement. For example, if the input is `foo();`,
+ // there will be a replacement for the range between every consecutive
+ // pair of tokens.
+ //
+ // A replacement at the start of a token can be added by
+ // BreakableStringLiteralUsingOperators::insertBreak when it adds braces
+ // around the string literal. Say Verilog code is being formatted and the
+ // first line is to become the next 2 lines.
+ // x("long string");
+ // x({"long ",
+ // "string"});
+ // There will be a replacement for the empty range between the parenthesis
+ // and the string and another replacement for the quote character. The
+ // replacement for the empty range between the parenthesis and the quote
+ // comes from ContinuationIndenter::addTokenOnCurrentLine when it changes
+ // the original empty range between the parenthesis and the string to
+ // another empty one. The replacement for the quote character comes from
+ // BreakableStringLiteralUsingOperators::insertBreak when it adds the
+ // brace. In the example, the replacement for the empty range is the same
+ // as the original text. However, eliminating replacements that are same
+ // as the original does not help in general. For example, a newline can
+ // be inserted, causing the first line to become the next 3 lines.
+ // xxxxxxxxxxx("long string");
+ // xxxxxxxxxxx(
+ // {"long ",
+ // "string"});
+ // In that case, the empty range between the parenthesis and the string
+ // will be replaced by a newline and 4 spaces. So we will still have to
+ // deal with a replacement for an empty source range followed by a
+ // replacement for a non-empty source range.
+ if (Last.getBegin() == New.getBegin() &&
+ (Last.getEnd() != Last.getBegin() ||
+ New.getEnd() == New.getBegin())) {
+ continue;
+ }
}
if (C.CreateReplacement) {
std::string ReplacementText = C.PreviousLinePostfix;
- if (C.ContinuesPPDirective)
+ if (C.ContinuesPPDirective) {
appendEscapedNewlineText(ReplacementText, C.NewlinesBefore,
C.PreviousEndOfTokenColumn,
C.EscapedNewlineColumn);
- else
+ } else {
appendNewlineText(ReplacementText, C.NewlinesBefore);
+ }
+ // FIXME: This assert should hold if we computed the column correctly.
+ // assert((int)C.StartOfTokenColumn >= C.Spaces);
appendIndentText(
ReplacementText, C.Tok->IndentLevel, std::max(0, C.Spaces),
- C.StartOfTokenColumn - std::max(0, C.Spaces), C.IsAligned);
+ std::max((int)C.StartOfTokenColumn, C.Spaces) - std::max(0, C.Spaces),
+ C.IsAligned);
ReplacementText.append(C.CurrentLinePrefix);
storeReplacement(C.OriginalWhitespaceRange, ReplacementText);
}
@@ -1285,8 +1653,9 @@ void WhitespaceManager::storeReplacement(SourceRange Range, StringRef Text) {
SourceMgr.getFileOffset(Range.getBegin());
// Don't create a replacement, if it does not change anything.
if (StringRef(SourceMgr.getCharacterData(Range.getBegin()),
- WhitespaceLength) == Text)
+ WhitespaceLength) == Text) {
return;
+ }
auto Err = Replaces.add(tooling::Replacement(
SourceMgr, CharSourceRange::getCharRange(Range), Text));
// FIXME: better error handling. For now, just print an error message in the
@@ -1299,8 +1668,13 @@ void WhitespaceManager::storeReplacement(SourceRange Range, StringRef Text) {
void WhitespaceManager::appendNewlineText(std::string &Text,
unsigned Newlines) {
- for (unsigned i = 0; i < Newlines; ++i)
- Text.append(UseCRLF ? "\r\n" : "\n");
+ if (UseCRLF) {
+ Text.reserve(Text.size() + 2 * Newlines);
+ for (unsigned i = 0; i < Newlines; ++i)
+ Text.append("\r\n");
+ } else {
+ Text.append(Newlines, '\n');
+ }
}
void WhitespaceManager::appendEscapedNewlineText(
diff --git a/contrib/llvm-project/clang/lib/Format/WhitespaceManager.h b/contrib/llvm-project/clang/lib/Format/WhitespaceManager.h
index 4f8f95040af6..8ac73305871a 100644
--- a/contrib/llvm-project/clang/lib/Format/WhitespaceManager.h
+++ b/contrib/llvm-project/clang/lib/Format/WhitespaceManager.h
@@ -45,6 +45,9 @@ public:
bool useCRLF() const { return UseCRLF; }
+ /// Infers whether the input is using CRLF.
+ static bool inputUsesCRLF(StringRef Text, bool DefaultToCRLF);
+
/// Replaces the whitespace in front of \p Tok. Only call once for
/// each \c AnnotatedToken.
///
@@ -52,7 +55,7 @@ public:
/// this replacement. It is needed for determining how \p Spaces is turned
/// into tabs and spaces for some format styles.
void replaceWhitespace(FormatToken &Tok, unsigned Newlines, unsigned Spaces,
- unsigned StartOfTokenColumn, bool isAligned = false,
+ unsigned StartOfTokenColumn, bool IsAligned = false,
bool InPPDirective = false);
/// Adds information about an unchangeable token's whitespace.
@@ -193,8 +196,20 @@ private:
struct CellDescriptions {
SmallVector<CellDescription> Cells;
- unsigned CellCount = 0;
+ SmallVector<unsigned> CellCounts;
unsigned InitialSpaces = 0;
+
+ // Determine if every row in the array
+ // has the same number of columns.
+ bool isRectangular() const {
+ if (CellCounts.size() < 2)
+ return false;
+
+ for (auto NumberOfColumns : CellCounts)
+ if (NumberOfColumns != CellCounts[0])
+ return false;
+ return true;
+ }
};
/// Calculate \c IsTrailingComment, \c TokenLength for the last tokens
@@ -217,6 +232,9 @@ private:
/// Align consecutive declarations over all \c Changes.
void alignChainedConditionals();
+ /// Align consecutive short case statements over all \c Changes.
+ void alignConsecutiveShortCaseStatements();
+
/// Align trailing comments over all \c Changes.
void alignTrailingComments();
@@ -242,7 +260,7 @@ private:
/// as described by \p CellDescs.
void alignArrayInitializersRightJustified(CellDescriptions &&CellDescs);
- /// Align Array Initializers being careful to leftt justify the columns
+ /// Align Array Initializers being careful to left justify the columns
/// as described by \p CellDescs.
void alignArrayInitializersLeftJustified(CellDescriptions &&CellDescs);
@@ -257,13 +275,14 @@ private:
/// Does this \p Cell contain a split element?
static bool isSplitCell(const CellDescription &Cell);
- /// Get the width of the preceeding cells from \p Start to \p End.
+ /// Get the width of the preceding cells from \p Start to \p End.
template <typename I>
auto getNetWidth(const I &Start, const I &End, unsigned InitialSpaces) const {
auto NetWidth = InitialSpaces;
for (auto PrevIter = Start; PrevIter != End; ++PrevIter) {
// If we broke the line the initial spaces are already
// accounted for.
+ assert(PrevIter->Index < Changes.size());
if (Changes[PrevIter->Index].NewlinesBefore > 0)
NetWidth = 0;
NetWidth +=
@@ -279,7 +298,7 @@ private:
calculateCellWidth(CellIter->Index, CellIter->EndIndex, true);
if (Changes[CellIter->Index].NewlinesBefore == 0)
CellWidth += NetWidth;
- for (const auto *Next = CellIter->NextColumnElement; Next != nullptr;
+ for (const auto *Next = CellIter->NextColumnElement; Next;
Next = Next->NextColumnElement) {
auto ThisWidth = calculateCellWidth(Next->Index, Next->EndIndex, true);
if (Changes[Next->Index].NewlinesBefore == 0)
@@ -292,13 +311,15 @@ private:
/// Get The maximum width of all columns to a given cell.
template <typename I>
unsigned getMaximumNetWidth(const I &CellStart, const I &CellStop,
- unsigned InitialSpaces,
- unsigned CellCount) const {
+ unsigned InitialSpaces, unsigned CellCount,
+ unsigned MaxRowCount) const {
auto MaxNetWidth = getNetWidth(CellStart, CellStop, InitialSpaces);
auto RowCount = 1U;
auto Offset = std::distance(CellStart, CellStop);
- for (const auto *Next = CellStop->NextColumnElement; Next != nullptr;
+ for (const auto *Next = CellStop->NextColumnElement; Next;
Next = Next->NextColumnElement) {
+ if (RowCount >= MaxRowCount)
+ break;
auto Start = (CellStart + RowCount * CellCount);
auto End = Start + Offset;
MaxNetWidth =
diff --git a/contrib/llvm-project/clang/lib/Frontend/ASTConsumers.cpp b/contrib/llvm-project/clang/lib/Frontend/ASTConsumers.cpp
index a73cc8876d5d..7b58eaa04df9 100644
--- a/contrib/llvm-project/clang/lib/Frontend/ASTConsumers.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/ASTConsumers.cpp
@@ -57,8 +57,11 @@ namespace {
bool ShowColors = Out.has_colors();
if (ShowColors)
Out.changeColor(raw_ostream::BLUE);
- Out << (OutputKind != Print ? "Dumping " : "Printing ") << getName(D)
- << ":\n";
+
+ if (OutputFormat == ADOF_Default)
+ Out << (OutputKind != Print ? "Dumping " : "Printing ") << getName(D)
+ << ":\n";
+
if (ShowColors)
Out.resetColor();
print(D);
@@ -180,21 +183,20 @@ std::unique_ptr<ASTConsumer> clang::CreateASTDeclNodeLister() {
/// ASTViewer - AST Visualization
namespace {
- class ASTViewer : public ASTConsumer {
- ASTContext *Context;
- public:
- void Initialize(ASTContext &Context) override {
- this->Context = &Context;
- }
+class ASTViewer : public ASTConsumer {
+ ASTContext *Context = nullptr;
- bool HandleTopLevelDecl(DeclGroupRef D) override {
- for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I)
- HandleTopLevelSingleDecl(*I);
- return true;
- }
+public:
+ void Initialize(ASTContext &Context) override { this->Context = &Context; }
- void HandleTopLevelSingleDecl(Decl *D);
- };
+ bool HandleTopLevelDecl(DeclGroupRef D) override {
+ for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I)
+ HandleTopLevelSingleDecl(*I);
+ return true;
+ }
+
+ void HandleTopLevelSingleDecl(Decl *D);
+};
}
void ASTViewer::HandleTopLevelSingleDecl(Decl *D) {
diff --git a/contrib/llvm-project/clang/lib/Frontend/ASTMerge.cpp b/contrib/llvm-project/clang/lib/Frontend/ASTMerge.cpp
index 14d781ccdf93..1e3a5c04c4e9 100644
--- a/contrib/llvm-project/clang/lib/Frontend/ASTMerge.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/ASTMerge.cpp
@@ -48,7 +48,7 @@ void ASTMergeAction::ExecuteAction() {
/*ShouldOwnClient=*/true));
std::unique_ptr<ASTUnit> Unit = ASTUnit::LoadFromASTFile(
ASTFiles[I], CI.getPCHContainerReader(), ASTUnit::LoadEverything, Diags,
- CI.getFileSystemOpts(), false);
+ CI.getFileSystemOpts(), CI.getHeaderSearchOptsPtr());
if (!Unit)
continue;
diff --git a/contrib/llvm-project/clang/lib/Frontend/ASTUnit.cpp b/contrib/llvm-project/clang/lib/Frontend/ASTUnit.cpp
index 996783aa9cf4..f09a01b5dd4a 100644
--- a/contrib/llvm-project/clang/lib/Frontend/ASTUnit.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/ASTUnit.cpp
@@ -66,11 +66,8 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/IntrusiveRefCntPtr.h"
-#include "llvm/ADT/None.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/ScopeExit.h"
-#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
@@ -85,8 +82,8 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/ErrorOr.h"
#include "llvm/Support/FileSystem.h"
-#include "llvm/Support/FileUtilities.h"
#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/SaveAndRestore.h"
#include "llvm/Support/Timer.h"
#include "llvm/Support/VirtualFileSystem.h"
#include "llvm/Support/raw_ostream.h"
@@ -98,6 +95,7 @@
#include <cstdlib>
#include <memory>
#include <mutex>
+#include <optional>
#include <string>
#include <tuple>
#include <utility>
@@ -322,6 +320,7 @@ static uint64_t getDeclShowContexts(const NamedDecl *ND,
if (ID->getDefinition())
Contexts |= (1LL << CodeCompletionContext::CCC_Expression);
Contexts |= (1LL << CodeCompletionContext::CCC_ObjCInterfaceName);
+ Contexts |= (1LL << CodeCompletionContext::CCC_ObjCClassForwardDecl);
}
// Deal with tag names.
@@ -524,6 +523,7 @@ class ASTInfoCollector : public ASTReaderListener {
IntrusiveRefCntPtr<TargetInfo> &Target;
unsigned &Counter;
bool InitializedLanguage = false;
+ bool InitializedHeaderSearchPaths = false;
public:
ASTInfoCollector(Preprocessor &PP, ASTContext *Context,
@@ -550,11 +550,43 @@ public:
bool ReadHeaderSearchOptions(const HeaderSearchOptions &HSOpts,
StringRef SpecificModuleCachePath,
bool Complain) override {
+ // llvm::SaveAndRestore doesn't support bit field.
+ auto ForceCheckCXX20ModulesInputFiles =
+ this->HSOpts.ForceCheckCXX20ModulesInputFiles;
+ llvm::SaveAndRestore X(this->HSOpts.UserEntries);
+ llvm::SaveAndRestore Y(this->HSOpts.SystemHeaderPrefixes);
+ llvm::SaveAndRestore Z(this->HSOpts.VFSOverlayFiles);
+
this->HSOpts = HSOpts;
+ this->HSOpts.ForceCheckCXX20ModulesInputFiles =
+ ForceCheckCXX20ModulesInputFiles;
+
return false;
}
- bool ReadPreprocessorOptions(const PreprocessorOptions &PPOpts, bool Complain,
+ bool ReadHeaderSearchPaths(const HeaderSearchOptions &HSOpts,
+ bool Complain) override {
+ if (InitializedHeaderSearchPaths)
+ return false;
+
+ this->HSOpts.UserEntries = HSOpts.UserEntries;
+ this->HSOpts.SystemHeaderPrefixes = HSOpts.SystemHeaderPrefixes;
+ this->HSOpts.VFSOverlayFiles = HSOpts.VFSOverlayFiles;
+
+ // Initialize the FileManager. We can't do this in update(), since that
+ // performs the initialization too late (once both target and language
+ // options are read).
+ PP.getFileManager().setVirtualFileSystem(createVFSFromOverlayFiles(
+ HSOpts.VFSOverlayFiles, PP.getDiagnostics(),
+ PP.getFileManager().getVirtualFileSystemPtr()));
+
+ InitializedHeaderSearchPaths = true;
+
+ return false;
+ }
+
+ bool ReadPreprocessorOptions(const PreprocessorOptions &PPOpts,
+ bool ReadMacros, bool Complain,
std::string &SuggestedPredefines) override {
this->PPOpts = PPOpts;
return false;
@@ -705,10 +737,10 @@ void FilterAndStoreDiagnosticConsumer::HandleDiagnostic(
}
if (StandaloneDiags) {
- llvm::Optional<StoredDiagnostic> StoredDiag = None;
+ std::optional<StoredDiagnostic> StoredDiag;
if (!ResultDiag) {
StoredDiag.emplace(Level, Info);
- ResultDiag = StoredDiag.getPointer();
+ ResultDiag = &*StoredDiag;
}
StandaloneDiags->push_back(
makeStandaloneDiagnostic(*LangOpts, *ResultDiag));
@@ -757,9 +789,10 @@ void ASTUnit::ConfigureDiags(IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
std::unique_ptr<ASTUnit> ASTUnit::LoadFromASTFile(
const std::string &Filename, const PCHContainerReader &PCHContainerRdr,
WhatToLoad ToLoad, IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
- const FileSystemOptions &FileSystemOpts, bool UseDebugInfo,
- bool OnlyLocalDecls, CaptureDiagsKind CaptureDiagnostics,
- bool AllowASTWithCompilerErrors, bool UserFilesAreVolatile) {
+ const FileSystemOptions &FileSystemOpts,
+ std::shared_ptr<HeaderSearchOptions> HSOpts, bool OnlyLocalDecls,
+ CaptureDiagsKind CaptureDiagnostics, bool AllowASTWithCompilerErrors,
+ bool UserFilesAreVolatile, IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS) {
std::unique_ptr<ASTUnit> AST(new ASTUnit(true));
// Recover resources if we crash before exiting this method.
@@ -775,16 +808,14 @@ std::unique_ptr<ASTUnit> ASTUnit::LoadFromASTFile(
AST->OnlyLocalDecls = OnlyLocalDecls;
AST->CaptureDiagnostics = CaptureDiagnostics;
AST->Diagnostics = Diags;
- IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS =
- llvm::vfs::getRealFileSystem();
AST->FileMgr = new FileManager(FileSystemOpts, VFS);
AST->UserFilesAreVolatile = UserFilesAreVolatile;
AST->SourceMgr = new SourceManager(AST->getDiagnostics(),
AST->getFileManager(),
UserFilesAreVolatile);
AST->ModuleCache = new InMemoryModuleCache;
- AST->HSOpts = std::make_shared<HeaderSearchOptions>();
- AST->HSOpts->ModuleFormat = std::string(PCHContainerRdr.getFormat());
+ AST->HSOpts = HSOpts ? HSOpts : std::make_shared<HeaderSearchOptions>();
+ AST->HSOpts->ModuleFormat = std::string(PCHContainerRdr.getFormats().front());
AST->HeaderInfo.reset(new HeaderSearch(AST->HSOpts,
AST->getSourceManager(),
AST->getDiagnostics(),
@@ -795,7 +826,6 @@ std::unique_ptr<ASTUnit> ASTUnit::LoadFromASTFile(
// Gather Info for preprocessor construction later on.
HeaderSearch &HeaderInfo = *AST->HeaderInfo;
- unsigned Counter;
AST->PP = std::make_shared<Preprocessor>(
AST->PPOpts, AST->getDiagnostics(), *AST->LangOpts,
@@ -817,8 +847,9 @@ std::unique_ptr<ASTUnit> ASTUnit::LoadFromASTFile(
AST->Reader = new ASTReader(
PP, *AST->ModuleCache, AST->Ctx.get(), PCHContainerRdr, {},
/*isysroot=*/"",
- /*DisableValidation=*/disableValid, AllowASTWithCompilerErrors);
+ /*DisableValidationKind=*/disableValid, AllowASTWithCompilerErrors);
+ unsigned Counter = 0;
AST->Reader->setListener(std::make_unique<ASTInfoCollector>(
*AST->PP, AST->Ctx.get(), *AST->HSOpts, *AST->PPOpts, *AST->LangOpts,
AST->TargetOpts, AST->Target, Counter));
@@ -832,7 +863,7 @@ std::unique_ptr<ASTUnit> ASTUnit::LoadFromASTFile(
AST->Ctx->setExternalSource(AST->Reader);
switch (AST->Reader->ReadAST(Filename, serialization::MK_MainFile,
- SourceLocation(), ASTReader::ARR_None)) {
+ SourceLocation(), ASTReader::ARR_None)) {
case ASTReader::Success:
break;
@@ -850,6 +881,10 @@ std::unique_ptr<ASTUnit> ASTUnit::LoadFromASTFile(
PP.setCounterValue(Counter);
+ Module *M = HeaderInfo.lookupModule(AST->getLangOpts().CurrentModule);
+ if (M && AST->getLangOpts().isCompilingModule() && M->isNamedModule())
+ AST->Ctx->setCurrentNamedModule(M);
+
// Create an AST consumer, even though it isn't used.
if (ToLoad >= LoadASTOnly)
AST->Consumer.reset(new ASTConsumer);
@@ -1069,9 +1104,7 @@ static void
checkAndRemoveNonDriverDiags(SmallVectorImpl<StoredDiagnostic> &StoredDiags) {
// Get rid of stored diagnostics except the ones from the driver which do not
// have a source location.
- StoredDiags.erase(
- std::remove_if(StoredDiags.begin(), StoredDiags.end(), isNonDriverDiag),
- StoredDiags.end());
+ llvm::erase_if(StoredDiags, isNonDriverDiag);
}
static void checkAndSanitizeDiags(SmallVectorImpl<StoredDiagnostic> &
@@ -1116,6 +1149,7 @@ bool ASTUnit::Parse(std::shared_ptr<PCHContainerOperations> PCHContainerOps,
// Create the compiler instance to use for building the AST.
std::unique_ptr<CompilerInstance> Clang(
new CompilerInstance(std::move(PCHContainerOps)));
+ Clang->setInvocation(CCInvocation);
// Clean up on error, disengage it if the function returns successfully.
auto CleanOnError = llvm::make_scope_exit([&]() {
@@ -1142,7 +1176,6 @@ bool ASTUnit::Parse(std::shared_ptr<PCHContainerOperations> PCHContainerOps,
llvm::CrashRecoveryContextCleanupRegistrar<CompilerInstance>
CICleanup(Clang.get());
- Clang->setInvocation(CCInvocation);
OriginalSourceFile =
std::string(Clang->getFrontendOpts().Inputs[0].getFile());
@@ -1305,7 +1338,7 @@ ASTUnit::getMainBufferWithPrecompiledPreamble(
return nullptr;
PreambleBounds Bounds = ComputePreambleBounds(
- *PreambleInvocationIn.getLangOpts(), *MainFileBuffer, MaxLines);
+ PreambleInvocationIn.getLangOpts(), *MainFileBuffer, MaxLines);
if (!Bounds.Size)
return nullptr;
@@ -1352,7 +1385,7 @@ ASTUnit::getMainBufferWithPrecompiledPreamble(
SmallVector<StoredDiagnostic, 4> NewPreambleDiags;
ASTUnitPreambleCallbacks Callbacks;
{
- llvm::Optional<CaptureDroppedDiagnostics> Capture;
+ std::optional<CaptureDroppedDiagnostics> Capture;
if (CaptureDiagnostics != CaptureDiagsKind::None)
Capture.emplace(CaptureDiagnostics, *Diagnostics, &NewPreambleDiags,
&NewPreambleDiagsStandalone);
@@ -1368,7 +1401,8 @@ ASTUnit::getMainBufferWithPrecompiledPreamble(
llvm::ErrorOr<PrecompiledPreamble> NewPreamble = PrecompiledPreamble::Build(
PreambleInvocationIn, MainFileBuffer.get(), Bounds, *Diagnostics, VFS,
- PCHContainerOps, /*StoreInMemory=*/false, Callbacks);
+ PCHContainerOps, StorePreamblesInMemory, PreambleStoragePath,
+ Callbacks);
PreambleInvocationIn.getFrontendOpts().SkipFunctionBodies =
PreviousSkipFunctionBodies;
@@ -1462,8 +1496,8 @@ StringRef ASTUnit::getMainFileName() const {
}
if (SourceMgr) {
- if (const FileEntry *
- FE = SourceMgr->getFileEntryForID(SourceMgr->getMainFileID()))
+ if (OptionalFileEntryRef FE =
+ SourceMgr->getFileEntryRefForID(SourceMgr->getMainFileID()))
return FE->getName();
}
@@ -1708,21 +1742,28 @@ std::unique_ptr<ASTUnit> ASTUnit::LoadFromCompilerInvocation(
return AST;
}
-ASTUnit *ASTUnit::LoadFromCommandLine(
+std::unique_ptr<ASTUnit> ASTUnit::LoadFromCommandLine(
const char **ArgBegin, const char **ArgEnd,
std::shared_ptr<PCHContainerOperations> PCHContainerOps,
IntrusiveRefCntPtr<DiagnosticsEngine> Diags, StringRef ResourceFilesPath,
+ bool StorePreamblesInMemory, StringRef PreambleStoragePath,
bool OnlyLocalDecls, CaptureDiagsKind CaptureDiagnostics,
ArrayRef<RemappedFile> RemappedFiles, bool RemappedFilesKeepOriginalName,
unsigned PrecompilePreambleAfterNParses, TranslationUnitKind TUKind,
bool CacheCodeCompletionResults, bool IncludeBriefCommentsInCodeCompletion,
bool AllowPCHWithCompilerErrors, SkipFunctionBodiesScope SkipFunctionBodies,
bool SingleFileParse, bool UserFilesAreVolatile, bool ForSerialization,
- bool RetainExcludedConditionalBlocks,
- llvm::Optional<StringRef> ModuleFormat, std::unique_ptr<ASTUnit> *ErrAST,
+ bool RetainExcludedConditionalBlocks, std::optional<StringRef> ModuleFormat,
+ std::unique_ptr<ASTUnit> *ErrAST,
IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS) {
assert(Diags.get() && "no DiagnosticsEngine was provided");
+ // If no VFS was provided, create one that tracks the physical file system.
+ // If '-working-directory' was passed as an argument, 'createInvocation' will
+ // set this as the current working directory of the VFS.
+ if (!VFS)
+ VFS = llvm::vfs::createPhysicalFileSystem();
+
SmallVector<StoredDiagnostic, 4> StoredDiagnostics;
std::shared_ptr<CompilerInvocation> CI;
@@ -1731,8 +1772,11 @@ ASTUnit *ASTUnit::LoadFromCommandLine(
CaptureDroppedDiagnostics Capture(CaptureDiagnostics, *Diags,
&StoredDiagnostics, nullptr);
- CI = createInvocationFromCommandLine(
- llvm::makeArrayRef(ArgBegin, ArgEnd), Diags, VFS);
+ CreateInvocationOptions CIOpts;
+ CIOpts.VFS = VFS;
+ CIOpts.Diags = Diags;
+ CIOpts.ProbePrecompiled = true; // FIXME: historical default. Needed?
+ CI = createInvocation(llvm::ArrayRef(ArgBegin, ArgEnd), std::move(CIOpts));
if (!CI)
return nullptr;
}
@@ -1755,8 +1799,7 @@ ASTUnit *ASTUnit::LoadFromCommandLine(
SkipFunctionBodies == SkipFunctionBodiesScope::PreambleAndMainFile;
if (ModuleFormat)
- CI->getHeaderSearchOpts().ModuleFormat =
- std::string(ModuleFormat.getValue());
+ CI->getHeaderSearchOpts().ModuleFormat = std::string(*ModuleFormat);
// Create the AST unit.
std::unique_ptr<ASTUnit> AST;
@@ -1766,10 +1809,10 @@ ASTUnit *ASTUnit::LoadFromCommandLine(
ConfigureDiags(Diags, *AST, CaptureDiagnostics);
AST->Diagnostics = Diags;
AST->FileSystemOpts = CI->getFileSystemOpts();
- if (!VFS)
- VFS = llvm::vfs::getRealFileSystem();
VFS = createVFSFromCompilerInvocation(*CI, *Diags, VFS);
AST->FileMgr = new FileManager(AST->FileSystemOpts, VFS);
+ AST->StorePreamblesInMemory = StorePreamblesInMemory;
+ AST->PreambleStoragePath = PreambleStoragePath;
AST->ModuleCache = new InMemoryModuleCache;
AST->OnlyLocalDecls = OnlyLocalDecls;
AST->CaptureDiagnostics = CaptureDiagnostics;
@@ -1802,7 +1845,7 @@ ASTUnit *ASTUnit::LoadFromCommandLine(
return nullptr;
}
- return AST.release();
+ return AST;
}
bool ASTUnit::Reparse(std::shared_ptr<PCHContainerOperations> PCHContainerOps,
@@ -1924,9 +1967,10 @@ namespace {
void ProcessOverloadCandidates(Sema &S, unsigned CurrentArg,
OverloadCandidate *Candidates,
unsigned NumCandidates,
- SourceLocation OpenParLoc) override {
+ SourceLocation OpenParLoc,
+ bool Braced) override {
Next.ProcessOverloadCandidates(S, CurrentArg, Candidates, NumCandidates,
- OpenParLoc);
+ OpenParLoc, Braced);
}
CodeCompletionAllocator &getAllocator() override {
@@ -1967,7 +2011,8 @@ static void CalculateHiddenNames(const CodeCompletionContext &Context,
case CodeCompletionContext::CCC_SymbolOrNewName:
case CodeCompletionContext::CCC_ParenthesizedExpression:
case CodeCompletionContext::CCC_ObjCInterfaceName:
- break;
+ case CodeCompletionContext::CCC_TopLevelOrExpression:
+ break;
case CodeCompletionContext::CCC_EnumTag:
case CodeCompletionContext::CCC_UnionTag:
@@ -1989,7 +2034,9 @@ static void CalculateHiddenNames(const CodeCompletionContext &Context,
case CodeCompletionContext::CCC_ObjCClassMessage:
case CodeCompletionContext::CCC_ObjCCategoryName:
case CodeCompletionContext::CCC_IncludedFile:
+ case CodeCompletionContext::CCC_Attribute:
case CodeCompletionContext::CCC_NewName:
+ case CodeCompletionContext::CCC_ObjCClassForwardDecl:
// We're looking for nothing, or we're looking for names that cannot
// be hidden.
return;
@@ -2124,7 +2171,8 @@ void ASTUnit::CodeComplete(
std::shared_ptr<PCHContainerOperations> PCHContainerOps,
DiagnosticsEngine &Diag, LangOptions &LangOpts, SourceManager &SourceMgr,
FileManager &FileMgr, SmallVectorImpl<StoredDiagnostic> &StoredDiagnostics,
- SmallVectorImpl<const llvm::MemoryBuffer *> &OwnedBuffers) {
+ SmallVectorImpl<const llvm::MemoryBuffer *> &OwnedBuffers,
+ std::unique_ptr<SyntaxOnlyAction> Act) {
if (!Invocation)
return;
@@ -2153,7 +2201,7 @@ void ASTUnit::CodeComplete(
FrontendOpts.CodeCompletionAt.Column = Column;
// Set the language options appropriately.
- LangOpts = *CCInvocation->getLangOpts();
+ LangOpts = CCInvocation->getLangOpts();
// Spell-checking and warnings are wasteful during code-completion.
LangOpts.SpellChecking = false;
@@ -2212,10 +2260,10 @@ void ASTUnit::CodeComplete(
Clang->setCodeCompletionConsumer(AugmentedConsumer);
auto getUniqueID =
- [&FileMgr](StringRef Filename) -> Optional<llvm::sys::fs::UniqueID> {
+ [&FileMgr](StringRef Filename) -> std::optional<llvm::sys::fs::UniqueID> {
if (auto Status = FileMgr.getVirtualFileSystem().status(Filename))
return Status->getUniqueID();
- return None;
+ return std::nullopt;
};
auto hasSameUniqueID = [getUniqueID](StringRef LHS, StringRef RHS) {
@@ -2261,8 +2309,9 @@ void ASTUnit::CodeComplete(
if (!Clang->getLangOpts().Modules)
PreprocessorOpts.DetailedRecord = false;
- std::unique_ptr<SyntaxOnlyAction> Act;
- Act.reset(new SyntaxOnlyAction);
+ if (!Act)
+ Act.reset(new SyntaxOnlyAction);
+
if (Act->BeginSourceFile(*Clang.get(), Clang->getFrontendOpts().Inputs[0])) {
if (llvm::Error Err = Act->Execute()) {
consumeError(std::move(Err)); // FIXME this drops errors on the floor.
@@ -2275,16 +2324,11 @@ bool ASTUnit::Save(StringRef File) {
if (HadModuleLoaderFatalFailure)
return true;
- // Write to a temporary file and later rename it to the actual file, to avoid
- // possible race conditions.
- SmallString<128> TempPath;
- TempPath = File;
- TempPath += "-%%%%%%%%";
// FIXME: Can we somehow regenerate the stat cache here, or do we need to
// unconditionally create a stat cache when we parse the file?
- if (llvm::Error Err = llvm::writeFileAtomically(
- TempPath, File, [this](llvm::raw_ostream &Out) {
+ if (llvm::Error Err = llvm::writeToOutput(
+ File, [this](llvm::raw_ostream &Out) {
return serialize(Out) ? llvm::make_error<llvm::StringError>(
"ASTUnit serialization failed",
llvm::inconvertibleErrorCode())
@@ -2296,12 +2340,9 @@ bool ASTUnit::Save(StringRef File) {
return false;
}
-static bool serializeUnit(ASTWriter &Writer,
- SmallVectorImpl<char> &Buffer,
- Sema &S,
- bool hasErrors,
- raw_ostream &OS) {
- Writer.WriteAST(S, std::string(), nullptr, "", hasErrors);
+static bool serializeUnit(ASTWriter &Writer, SmallVectorImpl<char> &Buffer,
+ Sema &S, raw_ostream &OS) {
+ Writer.WriteAST(S, std::string(), nullptr, "");
// Write the generated bitstream to "Out".
if (!Buffer.empty())
@@ -2311,18 +2352,14 @@ static bool serializeUnit(ASTWriter &Writer,
}
bool ASTUnit::serialize(raw_ostream &OS) {
- // For serialization we are lenient if the errors were only warn-as-error kind.
- bool hasErrors = getDiagnostics().hasUncompilableErrorOccurred();
-
if (WriterData)
- return serializeUnit(WriterData->Writer, WriterData->Buffer,
- getSema(), hasErrors, OS);
+ return serializeUnit(WriterData->Writer, WriterData->Buffer, getSema(), OS);
SmallString<128> Buffer;
llvm::BitstreamWriter Stream(Buffer);
InMemoryModuleCache ModuleCache;
ASTWriter Writer(Stream, Buffer, ModuleCache, {});
- return serializeUnit(Writer, Buffer, getSema(), hasErrors, OS);
+ return serializeUnit(Writer, Buffer, getSema(), OS);
}
using SLocRemap = ContinuousRangeMap<unsigned, int, 2>;
@@ -2607,9 +2644,9 @@ bool ASTUnit::visitLocalTopLevelDecls(void *context, DeclVisitorFn Fn) {
return true;
}
-const FileEntry *ASTUnit::getPCHFile() {
+OptionalFileEntryRef ASTUnit::getPCHFile() {
if (!Reader)
- return nullptr;
+ return std::nullopt;
serialization::ModuleFile *Mod = nullptr;
Reader->getModuleManager().visit([&Mod](serialization::ModuleFile &M) {
@@ -2632,7 +2669,7 @@ const FileEntry *ASTUnit::getPCHFile() {
if (Mod)
return Mod->File;
- return nullptr;
+ return std::nullopt;
}
bool ASTUnit::isModuleFile() const {
diff --git a/contrib/llvm-project/clang/lib/Frontend/ChainedIncludesSource.cpp b/contrib/llvm-project/clang/lib/Frontend/ChainedIncludesSource.cpp
index 380eba4562b4..c1a9f25a8798 100644
--- a/contrib/llvm-project/clang/lib/Frontend/ChainedIncludesSource.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/ChainedIncludesSource.cpp
@@ -27,15 +27,15 @@
using namespace clang;
namespace {
-class ChainedIncludesSourceImpl : public ExternalSemaSource {
+class ChainedIncludesSource : public ExternalSemaSource {
public:
- ChainedIncludesSourceImpl(std::vector<std::unique_ptr<CompilerInstance>> CIs)
+ ChainedIncludesSource(std::vector<std::unique_ptr<CompilerInstance>> CIs)
: CIs(std::move(CIs)) {}
protected:
- //===----------------------------------------------------------------------===//
+ //===--------------------------------------------------------------------===//
// ExternalASTSource interface.
- //===----------------------------------------------------------------------===//
+ //===--------------------------------------------------------------------===//
/// Return the amount of memory used by memory buffers, breaking down
/// by heap-backed versus mmap'ed memory.
@@ -51,30 +51,7 @@ protected:
private:
std::vector<std::unique_ptr<CompilerInstance>> CIs;
};
-
-/// Members of ChainedIncludesSource, factored out so we can initialize
-/// them before we initialize the ExternalSemaSource base class.
-struct ChainedIncludesSourceMembers {
- ChainedIncludesSourceMembers(
- std::vector<std::unique_ptr<CompilerInstance>> CIs,
- IntrusiveRefCntPtr<ExternalSemaSource> FinalReader)
- : Impl(std::move(CIs)), FinalReader(std::move(FinalReader)) {}
- ChainedIncludesSourceImpl Impl;
- IntrusiveRefCntPtr<ExternalSemaSource> FinalReader;
-};
-
-/// Use MultiplexExternalSemaSource to dispatch all ExternalSemaSource
-/// calls to the final reader.
-class ChainedIncludesSource
- : private ChainedIncludesSourceMembers,
- public MultiplexExternalSemaSource {
-public:
- ChainedIncludesSource(std::vector<std::unique_ptr<CompilerInstance>> CIs,
- IntrusiveRefCntPtr<ExternalSemaSource> FinalReader)
- : ChainedIncludesSourceMembers(std::move(CIs), std::move(FinalReader)),
- MultiplexExternalSemaSource(Impl, *this->FinalReader) {}
-};
-}
+} // end anonymous namespace
static ASTReader *
createASTReader(CompilerInstance &CI, StringRef pchFile,
@@ -214,6 +191,8 @@ IntrusiveRefCntPtr<ExternalSemaSource> clang::createChainedIncludesSource(
if (!Reader)
return nullptr;
- return IntrusiveRefCntPtr<ChainedIncludesSource>(
- new ChainedIncludesSource(std::move(CIs), Reader));
+ auto ChainedSrc =
+ llvm::makeIntrusiveRefCnt<ChainedIncludesSource>(std::move(CIs));
+ return llvm::makeIntrusiveRefCnt<MultiplexExternalSemaSource>(
+ ChainedSrc.get(), Reader.get());
}
diff --git a/contrib/llvm-project/clang/lib/Frontend/CompilerInstance.cpp b/contrib/llvm-project/clang/lib/Frontend/CompilerInstance.cpp
index c642af1849bc..a25aa88bd85e 100644
--- a/contrib/llvm-project/clang/lib/Frontend/CompilerInstance.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/CompilerInstance.cpp
@@ -12,6 +12,7 @@
#include "clang/AST/Decl.h"
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/DiagnosticOptions.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/LangStandard.h"
#include "clang/Basic/SourceManager.h"
@@ -23,7 +24,9 @@
#include "clang/Frontend/FrontendAction.h"
#include "clang/Frontend/FrontendActions.h"
#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Frontend/FrontendPluginRegistry.h"
#include "clang/Frontend/LogDiagnosticPrinter.h"
+#include "clang/Frontend/SARIFDiagnosticPrinter.h"
#include "clang/Frontend/SerializedDiagnosticPrinter.h"
#include "clang/Frontend/TextDiagnosticPrinter.h"
#include "clang/Frontend/Utils.h"
@@ -36,12 +39,14 @@
#include "clang/Serialization/ASTReader.h"
#include "clang/Serialization/GlobalModuleIndex.h"
#include "clang/Serialization/InMemoryModuleCache.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/ScopeExit.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/Config/llvm-config.h"
#include "llvm/Support/BuryPointer.h"
#include "llvm/Support/CrashRecoveryContext.h"
#include "llvm/Support/Errc.h"
#include "llvm/Support/FileSystem.h"
-#include "llvm/Support/Host.h"
#include "llvm/Support/LockFileManager.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
@@ -50,6 +55,8 @@
#include "llvm/Support/TimeProfiler.h"
#include "llvm/Support/Timer.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/Host.h"
+#include <optional>
#include <time.h>
#include <utility>
@@ -107,26 +114,26 @@ bool CompilerInstance::createTarget() {
// Check whether AuxTarget exists, if not, then create TargetInfo for the
// other side of CUDA/OpenMP/SYCL compilation.
if (!getAuxTarget() &&
- (getLangOpts().CUDA || getLangOpts().OpenMPIsDevice ||
+ (getLangOpts().CUDA || getLangOpts().OpenMPIsTargetDevice ||
getLangOpts().SYCLIsDevice) &&
!getFrontendOpts().AuxTriple.empty()) {
auto TO = std::make_shared<TargetOptions>();
TO->Triple = llvm::Triple::normalize(getFrontendOpts().AuxTriple);
if (getFrontendOpts().AuxTargetCPU)
- TO->CPU = getFrontendOpts().AuxTargetCPU.getValue();
+ TO->CPU = *getFrontendOpts().AuxTargetCPU;
if (getFrontendOpts().AuxTargetFeatures)
- TO->FeaturesAsWritten = getFrontendOpts().AuxTargetFeatures.getValue();
+ TO->FeaturesAsWritten = *getFrontendOpts().AuxTargetFeatures;
TO->HostTriple = getTarget().getTriple().str();
setAuxTarget(TargetInfo::CreateTargetInfo(getDiagnostics(), TO));
}
if (!getTarget().hasStrictFP() && !getLangOpts().ExpStrictFP) {
- if (getLangOpts().getFPRoundingMode() !=
- llvm::RoundingMode::NearestTiesToEven) {
+ if (getLangOpts().RoundingMath) {
getDiagnostics().Report(diag::warn_fe_backend_unsupported_fp_rounding);
- getLangOpts().setFPRoundingMode(llvm::RoundingMode::NearestTiesToEven);
+ getLangOpts().RoundingMath = false;
}
- if (getLangOpts().getFPExceptionMode() != LangOptions::FPE_Ignore) {
+ auto FPExc = getLangOpts().getFPExceptionMode();
+ if (FPExc != LangOptions::FPE_Default && FPExc != LangOptions::FPE_Ignore) {
getDiagnostics().Report(diag::warn_fe_backend_unsupported_fp_exceptions);
getLangOpts().setFPExceptionMode(LangOptions::FPE_Ignore);
}
@@ -144,9 +151,6 @@ bool CompilerInstance::createTarget() {
// created. This complexity should be lifted elsewhere.
getTarget().adjust(getDiagnostics(), getLangOpts());
- // Adjust target options based on codegen options.
- getTarget().adjustTargetOptions(getCodeGenOpts(), getTargetOpts());
-
if (auto *Aux = getAuxTarget())
getTarget().setAuxTarget(Aux);
@@ -230,7 +234,7 @@ static void collectIncludePCH(CompilerInstance &CI,
StringRef PCHInclude = PPOpts.ImplicitPCHInclude;
FileManager &FileMgr = CI.getFileManager();
- auto PCHDir = FileMgr.getDirectory(PCHInclude);
+ auto PCHDir = FileMgr.getOptionalDirectoryRef(PCHInclude);
if (!PCHDir) {
MDC->addFile(PCHInclude);
return;
@@ -238,7 +242,7 @@ static void collectIncludePCH(CompilerInstance &CI,
std::error_code EC;
SmallString<128> DirNative;
- llvm::sys::path::native((*PCHDir)->getName(), DirNative);
+ llvm::sys::path::native(PCHDir->getName(), DirNative);
llvm::vfs::FileSystem &FS = FileMgr.getVirtualFileSystem();
SimpleASTReaderListener Validator(CI.getPreprocessor());
for (llvm::vfs::directory_iterator Dir = FS.dir_begin(DirNative, EC), DirEnd;
@@ -247,7 +251,8 @@ static void collectIncludePCH(CompilerInstance &CI,
// used here since we're not interested in validating the PCH at this time,
// but only to check whether this is a file containing an AST.
if (!ASTReader::readASTFileControlBlock(
- Dir->path(), FileMgr, CI.getPCHContainerReader(),
+ Dir->path(), FileMgr, CI.getModuleCache(),
+ CI.getPCHContainerReader(),
/*FindModuleFileExtensions=*/false, Validator,
/*ValidateDiagnosticOptions=*/false))
MDC->addFile(Dir->path());
@@ -344,6 +349,8 @@ CompilerInstance::createDiagnostics(DiagnosticOptions *Opts,
// implementing -verify.
if (Client) {
Diags->setClient(Client, ShouldOwnClient);
+ } else if (Opts->getFormat() == DiagnosticOptions::SARIF) {
+ Diags->setClient(new SARIFDiagnosticPrinter(llvm::errs(), Opts));
} else
Diags->setClient(new TextDiagnosticPrinter(llvm::errs(), Opts));
@@ -393,14 +400,8 @@ static void InitializeFileRemapping(DiagnosticsEngine &Diags,
// Remap files in the source manager (with buffers).
for (const auto &RB : InitOpts.RemappedFileBuffers) {
// Create the file entry for the file that we're mapping from.
- const FileEntry *FromFile =
- FileMgr.getVirtualFile(RB.first, RB.second->getBufferSize(), 0);
- if (!FromFile) {
- Diags.Report(diag::err_fe_remap_missing_from_file) << RB.first;
- if (!InitOpts.RetainRemappedFileBuffers)
- delete RB.second;
- continue;
- }
+ FileEntryRef FromFile =
+ FileMgr.getVirtualFileRef(RB.first, RB.second->getBufferSize(), 0);
// Override the contents of the "from" file with the contents of the
// "to" file. If the caller owns the buffers, then pass a MemoryBufferRef;
@@ -417,7 +418,7 @@ static void InitializeFileRemapping(DiagnosticsEngine &Diags,
// Remap files in the source manager (with other files).
for (const auto &RF : InitOpts.RemappedFiles) {
// Find the file that we're mapping to.
- auto ToFile = FileMgr.getFile(RF.second);
+ OptionalFileEntryRef ToFile = FileMgr.getOptionalFileRef(RF.second);
if (!ToFile) {
Diags.Report(diag::err_fe_remap_missing_to_file) << RF.first << RF.second;
continue;
@@ -425,7 +426,7 @@ static void InitializeFileRemapping(DiagnosticsEngine &Diags,
// Create the file entry for the file that we're mapping from.
const FileEntry *FromFile =
- FileMgr.getVirtualFile(RF.first, (*ToFile)->getSize(), 0);
+ FileMgr.getVirtualFile(RF.first, ToFile->getSize(), 0);
if (!FromFile) {
Diags.Report(diag::err_fe_remap_missing_from_file) << RF.first;
continue;
@@ -469,7 +470,7 @@ void CompilerInstance::createPreprocessor(TranslationUnitKind TUKind) {
// Predefine macros and configure the preprocessor.
InitializePreprocessor(*PP, PPOpts, getPCHContainerReader(),
- getFrontendOpts());
+ getFrontendOpts(), getCodeGenOpts());
// Initialize the header search object. In CUDA compilations, we use the aux
// triple (the host triple) to initialize our header search, since we need to
@@ -542,7 +543,7 @@ std::string CompilerInstance::getSpecificModuleCachePath(StringRef ModuleHash) {
SmallString<256> SpecificModuleCache(getHeaderSearchOpts().ModuleCachePath);
if (!SpecificModuleCache.empty() && !getHeaderSearchOpts().DisableModuleHash)
llvm::sys::path::append(SpecificModuleCache, ModuleHash);
- return std::string(SpecificModuleCache.str());
+ return std::string(SpecificModuleCache);
}
// ASTContext
@@ -558,6 +559,55 @@ void CompilerInstance::createASTContext() {
// ExternalASTSource
+namespace {
+// Helper to recursively read the module names for all modules we're adding.
+// We mark these as known and redirect any attempt to load that module to
+// the files we were handed.
+struct ReadModuleNames : ASTReaderListener {
+ Preprocessor &PP;
+ llvm::SmallVector<std::string, 8> LoadedModules;
+
+ ReadModuleNames(Preprocessor &PP) : PP(PP) {}
+
+ void ReadModuleName(StringRef ModuleName) override {
+ // Keep the module name as a string for now. It's not safe to create a new
+ // IdentifierInfo from an ASTReader callback.
+ LoadedModules.push_back(ModuleName.str());
+ }
+
+ void registerAll() {
+ ModuleMap &MM = PP.getHeaderSearchInfo().getModuleMap();
+ for (const std::string &LoadedModule : LoadedModules)
+ MM.cacheModuleLoad(*PP.getIdentifierInfo(LoadedModule),
+ MM.findModule(LoadedModule));
+ LoadedModules.clear();
+ }
+
+ void markAllUnavailable() {
+ for (const std::string &LoadedModule : LoadedModules) {
+ if (Module *M = PP.getHeaderSearchInfo().getModuleMap().findModule(
+ LoadedModule)) {
+ M->HasIncompatibleModuleFile = true;
+
+ // Mark module as available if the only reason it was unavailable
+ // was missing headers.
+ SmallVector<Module *, 2> Stack;
+ Stack.push_back(M);
+ while (!Stack.empty()) {
+ Module *Current = Stack.pop_back_val();
+ if (Current->IsUnimportable) continue;
+ Current->IsAvailable = true;
+ auto SubmodulesRange = Current->submodules();
+ Stack.insert(Stack.end(), SubmodulesRange.begin(),
+ SubmodulesRange.end());
+ }
+ }
+ }
+ LoadedModules.clear();
+ }
+};
+} // namespace
+
void CompilerInstance::createPCHExternalASTSource(
StringRef Path, DisableValidationForModuleKind DisableValidation,
bool AllowPCHWithCompilerErrors, void *DeserializationListener,
@@ -602,6 +652,11 @@ IntrusiveRefCntPtr<ASTReader> CompilerInstance::createPCHExternalASTSource(
for (auto &Listener : DependencyCollectors)
Listener->attachToASTReader(*Reader);
+ auto Listener = std::make_unique<ReadModuleNames>(PP);
+ auto &ListenerRef = *Listener;
+ ASTReader::ListenerScope ReadModuleNamesListener(*Reader,
+ std::move(Listener));
+
switch (Reader->ReadAST(Path,
Preamble ? serialization::MK_Preamble
: serialization::MK_PCH,
@@ -611,6 +666,7 @@ IntrusiveRefCntPtr<ASTReader> CompilerInstance::createPCHExternalASTSource(
// Set the predefines buffer as suggested by the PCH reader. Typically, the
// predefines buffer will be empty.
PP.setPredefines(Reader->getSuggestedPredefines());
+ ListenerRef.registerAll();
return Reader;
case ASTReader::Failure:
@@ -626,6 +682,7 @@ IntrusiveRefCntPtr<ASTReader> CompilerInstance::createPCHExternalASTSource(
break;
}
+ ListenerRef.markAllUnavailable();
Context.setExternalSource(nullptr);
return nullptr;
}
@@ -638,7 +695,7 @@ static bool EnableCodeCompletion(Preprocessor &PP,
unsigned Column) {
// Tell the source manager to chop off the given file at a specific
// line and column.
- auto Entry = PP.getFileManager().getFile(Filename);
+ auto Entry = PP.getFileManager().getOptionalFileRef(Filename);
if (!Entry) {
PP.getDiagnostics().Report(diag::err_fe_invalid_code_complete_file)
<< Filename;
@@ -653,13 +710,10 @@ static bool EnableCodeCompletion(Preprocessor &PP,
void CompilerInstance::createCodeCompletionConsumer() {
const ParsedSourceLocation &Loc = getFrontendOpts().CodeCompletionAt;
if (!CompletionConsumer) {
- setCodeCompletionConsumer(
- createCodeCompletionConsumer(getPreprocessor(),
- Loc.FileName, Loc.Line, Loc.Column,
- getFrontendOpts().CodeCompleteOpts,
- llvm::outs()));
- if (!CompletionConsumer)
- return;
+ setCodeCompletionConsumer(createCodeCompletionConsumer(
+ getPreprocessor(), Loc.FileName, Loc.Line, Loc.Column,
+ getFrontendOpts().CodeCompleteOpts, llvm::outs()));
+ return;
} else if (EnableCodeCompletion(getPreprocessor(), Loc.FileName,
Loc.Line, Loc.Column)) {
setCodeCompletionConsumer(nullptr);
@@ -693,16 +747,30 @@ void CompilerInstance::createSema(TranslationUnitKind TUKind,
CodeCompleteConsumer *CompletionConsumer) {
TheSema.reset(new Sema(getPreprocessor(), getASTContext(), getASTConsumer(),
TUKind, CompletionConsumer));
+
+ // Set up API notes.
+ TheSema->APINotes.setSwiftVersion(getAPINotesOpts().SwiftVersion);
+
// Attach the external sema source if there is any.
if (ExternalSemaSrc) {
TheSema->addExternalSource(ExternalSemaSrc.get());
ExternalSemaSrc->InitializeSema(*TheSema);
}
+
+ // If we're building a module and are supposed to load API notes,
+ // notify the API notes manager.
+ if (auto *currentModule = getPreprocessor().getCurrentModule()) {
+ (void)TheSema->APINotes.loadCurrentModuleAPINotes(
+ currentModule, getLangOpts().APINotesModules,
+ getAPINotesOpts().ModuleSearchPaths);
+ }
}
// Output Files
void CompilerInstance::clearOutputFiles(bool EraseFiles) {
+ // The ASTConsumer can own streams that write to the output files.
+ assert(!hasASTConsumer() && "ASTConsumer should be reset");
// Ignore errors that occur when trying to discard the temp file.
for (OutputFile &OF : OutputFiles) {
if (EraseFiles) {
@@ -721,12 +789,7 @@ void CompilerInstance::clearOutputFiles(bool EraseFiles) {
continue;
}
- // If '-working-directory' was passed, the output filename should be
- // relative to that.
- SmallString<128> NewOutFile(OF.Filename);
- FileMgr->FixupRelativePath(NewOutFile);
-
- llvm::Error E = OF.File->keep(NewOutFile);
+ llvm::Error E = OF.File->keep(OF.Filename);
if (!E)
continue;
@@ -747,7 +810,7 @@ std::unique_ptr<raw_pwrite_stream> CompilerInstance::createDefaultOutputFile(
bool Binary, StringRef InFile, StringRef Extension, bool RemoveFileOnSignal,
bool CreateMissingDirectories, bool ForceUseTemporary) {
StringRef OutputPath = getFrontendOpts().OutputFile;
- Optional<SmallString<128>> PathStorage;
+ std::optional<SmallString<128>> PathStorage;
if (OutputPath.empty()) {
if (InFile == "-" || Extension.empty()) {
OutputPath = "-";
@@ -789,8 +852,20 @@ CompilerInstance::createOutputFileImpl(StringRef OutputPath, bool Binary,
assert((!CreateMissingDirectories || UseTemporary) &&
"CreateMissingDirectories is only allowed when using temporary files");
+ // If '-working-directory' was passed, the output filename should be
+ // relative to that.
+ std::optional<SmallString<128>> AbsPath;
+ if (OutputPath != "-" && !llvm::sys::path::is_absolute(OutputPath)) {
+ assert(hasFileManager() &&
+ "File Manager is required to fix up relative path.\n");
+
+ AbsPath.emplace(OutputPath);
+ FileMgr->FixupRelativePath(*AbsPath);
+ OutputPath = *AbsPath;
+ }
+
std::unique_ptr<llvm::raw_fd_ostream> OS;
- Optional<StringRef> OSFile;
+ std::optional<StringRef> OSFile;
if (UseTemporary) {
if (OutputPath == "-")
@@ -812,7 +887,7 @@ CompilerInstance::createOutputFileImpl(StringRef OutputPath, bool Binary,
}
}
- Optional<llvm::sys::fs::TempFile> Temp;
+ std::optional<llvm::sys::fs::TempFile> Temp;
if (UseTemporary) {
// Create a temporary file.
// Insert -%%%%%%%% before the extension (if any), and because some tools
@@ -824,10 +899,12 @@ CompilerInstance::createOutputFileImpl(StringRef OutputPath, bool Binary,
TempPath += "-%%%%%%%%";
TempPath += OutputExtension;
TempPath += ".tmp";
+ llvm::sys::fs::OpenFlags BinaryFlags =
+ Binary ? llvm::sys::fs::OF_None : llvm::sys::fs::OF_Text;
Expected<llvm::sys::fs::TempFile> ExpectedFile =
llvm::sys::fs::TempFile::create(
TempPath, llvm::sys::fs::all_read | llvm::sys::fs::all_write,
- Binary ? llvm::sys::fs::OF_None : llvm::sys::fs::OF_Text);
+ BinaryFlags);
llvm::Error E = handleErrors(
ExpectedFile.takeError(), [&](const llvm::ECError &E) -> llvm::Error {
@@ -837,7 +914,9 @@ CompilerInstance::createOutputFileImpl(StringRef OutputPath, bool Binary,
StringRef Parent = llvm::sys::path::parent_path(OutputPath);
EC = llvm::sys::fs::create_directories(Parent);
if (!EC) {
- ExpectedFile = llvm::sys::fs::TempFile::create(TempPath);
+ ExpectedFile = llvm::sys::fs::TempFile::create(
+ TempPath, llvm::sys::fs::all_read | llvm::sys::fs::all_write,
+ BinaryFlags);
if (!ExpectedFile)
return llvm::errorCodeToError(
llvm::errc::no_such_file_or_directory);
@@ -911,10 +990,9 @@ bool CompilerInstance::InitializeSourceManager(const FrontendInputFile &Input,
? FileMgr.getSTDIN()
: FileMgr.getFileRef(InputFile, /*OpenFile=*/true);
if (!FileOrErr) {
- // FIXME: include the error in the diagnostic even when it's not stdin.
auto EC = llvm::errorToErrorCode(FileOrErr.takeError());
if (InputFile != "-")
- Diags.Report(diag::err_fe_error_reading) << InputFile;
+ Diags.Report(diag::err_fe_error_reading) << InputFile << EC.message();
else
Diags.Report(diag::err_fe_error_reading_stdin) << EC.message();
return false;
@@ -940,6 +1018,11 @@ bool CompilerInstance::ExecuteAction(FrontendAction &Act) {
// DesiredStackSpace available.
noteBottomOfStack();
+ auto FinishDiagnosticClient = llvm::make_scope_exit([&]() {
+ // Notify the diagnostic client that all files were processed.
+ getDiagnosticClient().finish();
+ });
+
raw_ostream &OS = getVerboseOutputStream();
if (!Act.PrepareToExecute(*this))
@@ -954,9 +1037,9 @@ bool CompilerInstance::ExecuteAction(FrontendAction &Act) {
// Validate/process some options.
if (getHeaderSearchOpts().Verbose)
- OS << "clang -cc1 version " CLANG_VERSION_STRING
- << " based upon " << BACKEND_PACKAGE_STRING
- << " default target " << llvm::sys::getDefaultTargetTriple() << "\n";
+ OS << "clang -cc1 version " CLANG_VERSION_STRING << " based upon LLVM "
+ << LLVM_VERSION_STRING << " default target "
+ << llvm::sys::getDefaultTargetTriple() << "\n";
if (getCodeGenOpts().TimePasses)
createFrontendTimer();
@@ -978,9 +1061,6 @@ bool CompilerInstance::ExecuteAction(FrontendAction &Act) {
}
}
- // Notify the diagnostic client that all files were processed.
- getDiagnostics().getClient()->finish();
-
if (getDiagnosticOpts().ShowCarets) {
// We can have multiple diagnostics sharing one diagnostic client.
// Get the total number of warnings/errors from the client.
@@ -1015,9 +1095,12 @@ bool CompilerInstance::ExecuteAction(FrontendAction &Act) {
}
StringRef StatsFile = getFrontendOpts().StatsFile;
if (!StatsFile.empty()) {
+ llvm::sys::fs::OpenFlags FileFlags = llvm::sys::fs::OF_TextWithCRLF;
+ if (getFrontendOpts().AppendStats)
+ FileFlags |= llvm::sys::fs::OF_Append;
std::error_code EC;
- auto StatS = std::make_unique<llvm::raw_fd_ostream>(
- StatsFile, EC, llvm::sys::fs::OF_TextWithCRLF);
+ auto StatS =
+ std::make_unique<llvm::raw_fd_ostream>(StatsFile, EC, FileFlags);
if (EC) {
getDiagnostics().Report(diag::warn_fe_unable_to_open_stats_file)
<< StatsFile << EC.message();
@@ -1029,6 +1112,27 @@ bool CompilerInstance::ExecuteAction(FrontendAction &Act) {
return !getDiagnostics().getClient()->getNumErrors();
}
+void CompilerInstance::LoadRequestedPlugins() {
+ // Load any requested plugins.
+ for (const std::string &Path : getFrontendOpts().Plugins) {
+ std::string Error;
+ if (llvm::sys::DynamicLibrary::LoadLibraryPermanently(Path.c_str(), &Error))
+ getDiagnostics().Report(diag::err_fe_unable_to_load_plugin)
+ << Path << Error;
+ }
+
+ // Check if any of the loaded plugins replaces the main AST action
+ for (const FrontendPluginRegistry::entry &Plugin :
+ FrontendPluginRegistry::entries()) {
+ std::unique_ptr<PluginASTAction> P(Plugin.instantiate());
+ if (P->getActionType() == PluginASTAction::ReplaceAction) {
+ getFrontendOpts().ProgramAction = clang::frontend::PluginAction;
+ getFrontendOpts().ActionName = Plugin.getName().str();
+ break;
+ }
+ }
+}
+
/// Determine the appropriate source input kind based on language
/// options.
static Language getLanguageFromOptions(const LangOptions &LangOpts) {
@@ -1071,27 +1175,24 @@ compileModuleImpl(CompilerInstance &ImportingInstance, SourceLocation ImportLoc,
// For any options that aren't intended to affect how a module is built,
// reset them to their default values.
- Invocation->getLangOpts()->resetNonModularOptions();
- PPOpts.resetNonModularOptions();
+ Invocation->resetNonModularOptions();
// Remove any macro definitions that are explicitly ignored by the module.
// They aren't supposed to affect how the module is built anyway.
HeaderSearchOptions &HSOpts = Invocation->getHeaderSearchOpts();
- PPOpts.Macros.erase(
- std::remove_if(PPOpts.Macros.begin(), PPOpts.Macros.end(),
- [&HSOpts](const std::pair<std::string, bool> &def) {
- StringRef MacroDef = def.first;
- return HSOpts.ModulesIgnoreMacros.count(
- llvm::CachedHashString(MacroDef.split('=').first)) > 0;
- }),
- PPOpts.Macros.end());
+ llvm::erase_if(PPOpts.Macros,
+ [&HSOpts](const std::pair<std::string, bool> &def) {
+ StringRef MacroDef = def.first;
+ return HSOpts.ModulesIgnoreMacros.contains(
+ llvm::CachedHashString(MacroDef.split('=').first));
+ });
// If the original compiler invocation had -fmodule-name, pass it through.
- Invocation->getLangOpts()->ModuleName =
- ImportingInstance.getInvocation().getLangOpts()->ModuleName;
+ Invocation->getLangOpts().ModuleName =
+ ImportingInstance.getInvocation().getLangOpts().ModuleName;
// Note the name of the module we're building.
- Invocation->getLangOpts()->CurrentModule = std::string(ModuleName);
+ Invocation->getLangOpts().CurrentModule = std::string(ModuleName);
// Make sure that the failed-module structure has been allocated in
// the importing instance, and propagate the pointer to the newly-created
@@ -1119,7 +1220,9 @@ compileModuleImpl(CompilerInstance &ImportingInstance, SourceLocation ImportLoc,
// Don't free the remapped file buffers; they are owned by our caller.
PPOpts.RetainRemappedFileBuffers = true;
- Invocation->getDiagnosticOpts().VerifyDiagnostics = 0;
+ DiagnosticOptions &DiagOpts = Invocation->getDiagnosticOpts();
+
+ DiagOpts.VerifyDiagnostics = 0;
assert(ImportingInstance.getInvocation().getModuleHash() ==
Invocation->getModuleHash() && "Module hash mismatch!");
@@ -1136,11 +1239,19 @@ compileModuleImpl(CompilerInstance &ImportingInstance, SourceLocation ImportLoc,
ImportingInstance.getDiagnosticClient()),
/*ShouldOwnClient=*/true);
- // Note that this module is part of the module build stack, so that we
- // can detect cycles in the module graph.
- Instance.setFileManager(&ImportingInstance.getFileManager());
+ if (llvm::is_contained(DiagOpts.SystemHeaderWarningsModules, ModuleName))
+ Instance.getDiagnostics().setSuppressSystemWarnings(false);
+
+ if (FrontendOpts.ModulesShareFileManager) {
+ Instance.setFileManager(&ImportingInstance.getFileManager());
+ } else {
+ Instance.createFileManager(&ImportingInstance.getVirtualFileSystem());
+ }
Instance.createSourceManager(Instance.getFileManager());
SourceManager &SourceMgr = Instance.getSourceManager();
+
+ // Note that this module is part of the module build stack, so that we
+ // can detect cycles in the module graph.
SourceMgr.setModuleBuildStack(
ImportingInstance.getSourceManager().getModuleBuildStack());
SourceMgr.pushModuleBuildStack(ModuleName,
@@ -1160,8 +1271,7 @@ compileModuleImpl(CompilerInstance &ImportingInstance, SourceLocation ImportLoc,
// Execute the action to actually build the module in-place. Use a separate
// thread so that we get a stack large enough.
- llvm::CrashRecoveryContext CRC;
- CRC.RunSafelyOnThread(
+ bool Crashed = !llvm::CrashRecoveryContext().RunSafelyOnThread(
[&]() {
GenerateModuleFromModuleMapAction Action;
Instance.ExecuteAction(Action);
@@ -1174,9 +1284,15 @@ compileModuleImpl(CompilerInstance &ImportingInstance, SourceLocation ImportLoc,
diag::remark_module_build_done)
<< ModuleName;
- // Delete any remaining temporary files related to Instance, in case the
- // module generation thread crashed.
- Instance.clearOutputFiles(/*EraseFiles=*/true);
+ if (Crashed) {
+ // Clear the ASTConsumer if it hasn't been already, in case it owns streams
+ // that must be closed before clearing output files.
+ Instance.setSema(nullptr);
+ Instance.setASTConsumer(nullptr);
+
+ // Delete any remaining temporary files related to Instance.
+ Instance.clearOutputFiles(/*EraseFiles=*/true);
+ }
// If \p AllowPCMWithCompilerErrors is set return 'success' even if errors
// occurred.
@@ -1184,19 +1300,17 @@ compileModuleImpl(CompilerInstance &ImportingInstance, SourceLocation ImportLoc,
Instance.getFrontendOpts().AllowPCMWithCompilerErrors;
}
-static const FileEntry *getPublicModuleMap(const FileEntry *File,
- FileManager &FileMgr) {
- StringRef Filename = llvm::sys::path::filename(File->getName());
- SmallString<128> PublicFilename(File->getDir()->getName());
+static OptionalFileEntryRef getPublicModuleMap(FileEntryRef File,
+ FileManager &FileMgr) {
+ StringRef Filename = llvm::sys::path::filename(File.getName());
+ SmallString<128> PublicFilename(File.getDir().getName());
if (Filename == "module_private.map")
llvm::sys::path::append(PublicFilename, "module.map");
else if (Filename == "module.private.modulemap")
llvm::sys::path::append(PublicFilename, "module.modulemap");
else
- return nullptr;
- if (auto FE = FileMgr.getFile(PublicFilename))
- return *FE;
- return nullptr;
+ return std::nullopt;
+ return FileMgr.getOptionalFileRef(PublicFilename);
}
/// Compile a module file for the given module in a separate compiler instance,
@@ -1212,21 +1326,22 @@ static bool compileModule(CompilerInstance &ImportingInstance,
ModuleMap &ModMap
= ImportingInstance.getPreprocessor().getHeaderSearchInfo().getModuleMap();
bool Result;
- if (const FileEntry *ModuleMapFile =
+ if (OptionalFileEntryRef ModuleMapFile =
ModMap.getContainingModuleMapFile(Module)) {
// Canonicalize compilation to start with the public module map. This is
// vital for submodules declarations in the private module maps to be
// correctly parsed when depending on a top level module in the public one.
- if (const FileEntry *PublicMMFile = getPublicModuleMap(
- ModuleMapFile, ImportingInstance.getFileManager()))
+ if (OptionalFileEntryRef PublicMMFile = getPublicModuleMap(
+ *ModuleMapFile, ImportingInstance.getFileManager()))
ModuleMapFile = PublicMMFile;
+ StringRef ModuleMapFilePath = ModuleMapFile->getNameAsRequested();
+
// Use the module map where this module resides.
Result = compileModuleImpl(
ImportingInstance, ImportLoc, Module->getTopLevelModuleName(),
- FrontendInputFile(ModuleMapFile->getName(), IK, +Module->IsSystem),
- ModMap.getModuleMapFileForUniquing(Module)->getName(),
- ModuleFileName);
+ FrontendInputFile(ModuleMapFilePath, IK, +Module->IsSystem),
+ ModMap.getModuleMapFileForUniquing(Module)->getName(), ModuleFileName);
} else {
// FIXME: We only need to fake up an input file here as a way of
// transporting the module's directory to the module map parser. We should
@@ -1248,7 +1363,7 @@ static bool compileModule(CompilerInstance &ImportingInstance,
[&](CompilerInstance &Instance) {
std::unique_ptr<llvm::MemoryBuffer> ModuleMapBuffer =
llvm::MemoryBuffer::getMemBuffer(InferredModuleMapContent);
- ModuleMapFile = Instance.getFileManager().getVirtualFile(
+ FileEntryRef ModuleMapFile = Instance.getFileManager().getVirtualFileRef(
FakeModuleMapFile, InferredModuleMapContent.size(), 0);
Instance.getSourceManager().overrideFileContents(
ModuleMapFile, std::move(ModuleMapBuffer));
@@ -1264,31 +1379,82 @@ static bool compileModule(CompilerInstance &ImportingInstance,
return Result;
}
+/// Read the AST right after compiling the module.
+static bool readASTAfterCompileModule(CompilerInstance &ImportingInstance,
+ SourceLocation ImportLoc,
+ SourceLocation ModuleNameLoc,
+ Module *Module, StringRef ModuleFileName,
+ bool *OutOfDate) {
+ DiagnosticsEngine &Diags = ImportingInstance.getDiagnostics();
+
+ unsigned ModuleLoadCapabilities = ASTReader::ARR_Missing;
+ if (OutOfDate)
+ ModuleLoadCapabilities |= ASTReader::ARR_OutOfDate;
+
+ // Try to read the module file, now that we've compiled it.
+ ASTReader::ASTReadResult ReadResult =
+ ImportingInstance.getASTReader()->ReadAST(
+ ModuleFileName, serialization::MK_ImplicitModule, ImportLoc,
+ ModuleLoadCapabilities);
+ if (ReadResult == ASTReader::Success)
+ return true;
+
+ // The caller wants to handle out-of-date failures.
+ if (OutOfDate && ReadResult == ASTReader::OutOfDate) {
+ *OutOfDate = true;
+ return false;
+ }
+
+ // The ASTReader didn't diagnose the error, so conservatively report it.
+ if (ReadResult == ASTReader::Missing || !Diags.hasErrorOccurred())
+ Diags.Report(ModuleNameLoc, diag::err_module_not_built)
+ << Module->Name << SourceRange(ImportLoc, ModuleNameLoc);
+
+ return false;
+}
+
/// Compile a module in a separate compiler instance and read the AST,
/// returning true if the module compiles without errors.
+static bool compileModuleAndReadASTImpl(CompilerInstance &ImportingInstance,
+ SourceLocation ImportLoc,
+ SourceLocation ModuleNameLoc,
+ Module *Module,
+ StringRef ModuleFileName) {
+ if (!compileModule(ImportingInstance, ModuleNameLoc, Module,
+ ModuleFileName)) {
+ ImportingInstance.getDiagnostics().Report(ModuleNameLoc,
+ diag::err_module_not_built)
+ << Module->Name << SourceRange(ImportLoc, ModuleNameLoc);
+ return false;
+ }
+
+ return readASTAfterCompileModule(ImportingInstance, ImportLoc, ModuleNameLoc,
+ Module, ModuleFileName,
+ /*OutOfDate=*/nullptr);
+}
+
+/// Compile a module in a separate compiler instance and read the AST,
+/// returning true if the module compiles without errors, using a lock manager
+/// to avoid building the same module in multiple compiler instances.
///
/// Uses a lock file manager and exponential backoff to reduce the chances that
/// multiple instances will compete to create the same module. On timeout,
/// deletes the lock file in order to avoid deadlock from crashing processes or
/// bugs in the lock file manager.
-static bool compileModuleAndReadAST(CompilerInstance &ImportingInstance,
- SourceLocation ImportLoc,
- SourceLocation ModuleNameLoc,
- Module *Module, StringRef ModuleFileName) {
+static bool compileModuleAndReadASTBehindLock(
+ CompilerInstance &ImportingInstance, SourceLocation ImportLoc,
+ SourceLocation ModuleNameLoc, Module *Module, StringRef ModuleFileName) {
DiagnosticsEngine &Diags = ImportingInstance.getDiagnostics();
- auto diagnoseBuildFailure = [&] {
- Diags.Report(ModuleNameLoc, diag::err_module_not_built)
- << Module->Name << SourceRange(ImportLoc, ModuleNameLoc);
- };
+ Diags.Report(ModuleNameLoc, diag::remark_module_lock)
+ << ModuleFileName << Module->Name;
// FIXME: have LockFileManager return an error_code so that we can
// avoid the mkdir when the directory already exists.
StringRef Dir = llvm::sys::path::parent_path(ModuleFileName);
llvm::sys::fs::create_directories(Dir);
- while (1) {
- unsigned ModuleLoadCapabilities = ASTReader::ARR_Missing;
+ while (true) {
llvm::LockFileManager Locked(ModuleFileName);
switch (Locked) {
case llvm::LockFileManager::LFS_Error:
@@ -1299,61 +1465,67 @@ static bool compileModuleAndReadAST(CompilerInstance &ImportingInstance,
<< Module->Name << Locked.getErrorMessage();
// Clear out any potential leftover.
Locked.unsafeRemoveLockFile();
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case llvm::LockFileManager::LFS_Owned:
// We're responsible for building the module ourselves.
- if (!compileModule(ImportingInstance, ModuleNameLoc, Module,
- ModuleFileName)) {
- diagnoseBuildFailure();
- return false;
- }
- break;
+ return compileModuleAndReadASTImpl(ImportingInstance, ImportLoc,
+ ModuleNameLoc, Module, ModuleFileName);
case llvm::LockFileManager::LFS_Shared:
- // Someone else is responsible for building the module. Wait for them to
- // finish.
- switch (Locked.waitForUnlock()) {
- case llvm::LockFileManager::Res_Success:
- ModuleLoadCapabilities |= ASTReader::ARR_OutOfDate;
- break;
- case llvm::LockFileManager::Res_OwnerDied:
- continue; // try again to get the lock.
- case llvm::LockFileManager::Res_Timeout:
- // Since ModuleCache takes care of correctness, we try waiting for
- // another process to complete the build so clang does not do it done
- // twice. If case of timeout, build it ourselves.
- Diags.Report(ModuleNameLoc, diag::remark_module_lock_timeout)
- << Module->Name;
- // Clear the lock file so that future invocations can make progress.
- Locked.unsafeRemoveLockFile();
- continue;
- }
- break;
+ break; // The interesting case.
}
- // Try to read the module file, now that we've compiled it.
- ASTReader::ASTReadResult ReadResult =
- ImportingInstance.getASTReader()->ReadAST(
- ModuleFileName, serialization::MK_ImplicitModule, ImportLoc,
- ModuleLoadCapabilities);
-
- if (ReadResult == ASTReader::OutOfDate &&
- Locked == llvm::LockFileManager::LFS_Shared) {
- // The module may be out of date in the presence of file system races,
- // or if one of its imports depends on header search paths that are not
- // consistent with this ImportingInstance. Try again...
+ // Someone else is responsible for building the module. Wait for them to
+ // finish.
+ switch (Locked.waitForUnlock()) {
+ case llvm::LockFileManager::Res_Success:
+ break; // The interesting case.
+ case llvm::LockFileManager::Res_OwnerDied:
+ continue; // try again to get the lock.
+ case llvm::LockFileManager::Res_Timeout:
+ // Since ModuleCache takes care of correctness, we try waiting for
+ // another process to complete the build so clang does not do it done
+ // twice. If case of timeout, build it ourselves.
+ Diags.Report(ModuleNameLoc, diag::remark_module_lock_timeout)
+ << Module->Name;
+ // Clear the lock file so that future invocations can make progress.
+ Locked.unsafeRemoveLockFile();
continue;
- } else if (ReadResult == ASTReader::Missing) {
- diagnoseBuildFailure();
- } else if (ReadResult != ASTReader::Success &&
- !Diags.hasErrorOccurred()) {
- // The ASTReader didn't diagnose the error, so conservatively report it.
- diagnoseBuildFailure();
}
- return ReadResult == ASTReader::Success;
+
+ // Read the module that was just written by someone else.
+ bool OutOfDate = false;
+ if (readASTAfterCompileModule(ImportingInstance, ImportLoc, ModuleNameLoc,
+ Module, ModuleFileName, &OutOfDate))
+ return true;
+ if (!OutOfDate)
+ return false;
+
+ // The module may be out of date in the presence of file system races,
+ // or if one of its imports depends on header search paths that are not
+ // consistent with this ImportingInstance. Try again...
}
}
+/// Compile a module in a separate compiler instance and read the AST,
+/// returning true if the module compiles without errors, potentially using a
+/// lock manager to avoid building the same module in multiple compiler
+/// instances.
+static bool compileModuleAndReadAST(CompilerInstance &ImportingInstance,
+ SourceLocation ImportLoc,
+ SourceLocation ModuleNameLoc,
+ Module *Module, StringRef ModuleFileName) {
+ return ImportingInstance.getInvocation()
+ .getFrontendOpts()
+ .BuildingImplicitModuleUsesLock
+ ? compileModuleAndReadASTBehindLock(ImportingInstance, ImportLoc,
+ ModuleNameLoc, Module,
+ ModuleFileName)
+ : compileModuleAndReadASTImpl(ImportingInstance, ImportLoc,
+ ModuleNameLoc, Module,
+ ModuleFileName);
+}
+
/// Diagnose differences between the current definition of the given
/// configuration macro and the definition provided on the command line.
static void checkConfigMacro(Preprocessor &PP, StringRef ConfigMacro,
@@ -1548,59 +1720,14 @@ void CompilerInstance::createASTReader() {
Listener->attachToASTReader(*TheASTReader);
}
-bool CompilerInstance::loadModuleFile(StringRef FileName) {
+bool CompilerInstance::loadModuleFile(
+ StringRef FileName, serialization::ModuleFile *&LoadedModuleFile) {
llvm::Timer Timer;
if (FrontendTimerGroup)
Timer.init("preloading." + FileName.str(), "Preloading " + FileName.str(),
*FrontendTimerGroup);
llvm::TimeRegion TimeLoading(FrontendTimerGroup ? &Timer : nullptr);
- // Helper to recursively read the module names for all modules we're adding.
- // We mark these as known and redirect any attempt to load that module to
- // the files we were handed.
- struct ReadModuleNames : ASTReaderListener {
- CompilerInstance &CI;
- llvm::SmallVector<IdentifierInfo*, 8> LoadedModules;
-
- ReadModuleNames(CompilerInstance &CI) : CI(CI) {}
-
- void ReadModuleName(StringRef ModuleName) override {
- LoadedModules.push_back(
- CI.getPreprocessor().getIdentifierInfo(ModuleName));
- }
-
- void registerAll() {
- ModuleMap &MM = CI.getPreprocessor().getHeaderSearchInfo().getModuleMap();
- for (auto *II : LoadedModules)
- MM.cacheModuleLoad(*II, MM.findModule(II->getName()));
- LoadedModules.clear();
- }
-
- void markAllUnavailable() {
- for (auto *II : LoadedModules) {
- if (Module *M = CI.getPreprocessor()
- .getHeaderSearchInfo()
- .getModuleMap()
- .findModule(II->getName())) {
- M->HasIncompatibleModuleFile = true;
-
- // Mark module as available if the only reason it was unavailable
- // was missing headers.
- SmallVector<Module *, 2> Stack;
- Stack.push_back(M);
- while (!Stack.empty()) {
- Module *Current = Stack.pop_back_val();
- if (Current->IsUnimportable) continue;
- Current->IsAvailable = true;
- Stack.insert(Stack.end(),
- Current->submodule_begin(), Current->submodule_end());
- }
- }
- }
- LoadedModules.clear();
- }
- };
-
// If we don't already have an ASTReader, create one now.
if (!TheASTReader)
createASTReader();
@@ -1612,7 +1739,7 @@ bool CompilerInstance::loadModuleFile(StringRef FileName) {
SourceLocation())
<= DiagnosticsEngine::Warning;
- auto Listener = std::make_unique<ReadModuleNames>(*this);
+ auto Listener = std::make_unique<ReadModuleNames>(*PP);
auto &ListenerRef = *Listener;
ASTReader::ListenerScope ReadModuleNamesListener(*TheASTReader,
std::move(Listener));
@@ -1620,7 +1747,8 @@ bool CompilerInstance::loadModuleFile(StringRef FileName) {
// Try to load the module file.
switch (TheASTReader->ReadAST(
FileName, serialization::MK_ExplicitModule, SourceLocation(),
- ConfigMismatchIsRecoverable ? ASTReader::ARR_ConfigurationMismatch : 0)) {
+ ConfigMismatchIsRecoverable ? ASTReader::ARR_ConfigurationMismatch : 0,
+ &LoadedModuleFile)) {
case ASTReader::Success:
// We successfully loaded the module file; remember the set of provided
// modules so that we don't try to load implicit modules for them.
@@ -1691,7 +1819,8 @@ ModuleLoadResult CompilerInstance::findOrCompileModuleAndReadAST(
SourceLocation ModuleNameLoc, bool IsInclusionDirective) {
// Search for a module with the given name.
HeaderSearch &HS = PP->getHeaderSearchInfo();
- Module *M = HS.lookupModule(ModuleName, true, !IsInclusionDirective);
+ Module *M =
+ HS.lookupModule(ModuleName, ImportLoc, true, !IsInclusionDirective);
// Select the source and filename for loading the named module.
std::string ModuleFilename;
@@ -1750,7 +1879,7 @@ ModuleLoadResult CompilerInstance::findOrCompileModuleAndReadAST(
// A prebuilt module is indexed as a ModuleFile; the Module does not exist
// until the first call to ReadAST. Look it up now.
- M = HS.lookupModule(ModuleName, true, !IsInclusionDirective);
+ M = HS.lookupModule(ModuleName, ImportLoc, true, !IsInclusionDirective);
// Check whether M refers to the file in the prebuilt module path.
if (M && M->getASTFile())
@@ -1776,7 +1905,7 @@ ModuleLoadResult CompilerInstance::findOrCompileModuleAndReadAST(
diag::warn_module_config_mismatch)
<< ModuleFilename;
// Fall through to error out.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ASTReader::VersionMismatch:
case ASTReader::HadErrors:
ModuleLoader::HadFatalFailure = true;
@@ -1873,16 +2002,9 @@ CompilerInstance::loadModule(SourceLocation ImportLoc,
} else if (ModuleName == getLangOpts().CurrentModule) {
// This is the module we're building.
Module = PP->getHeaderSearchInfo().lookupModule(
- ModuleName, /*AllowSearch*/ true,
+ ModuleName, ImportLoc, /*AllowSearch*/ true,
/*AllowExtraModuleMapSearch*/ !IsInclusionDirective);
- /// FIXME: perhaps we should (a) look for a module using the module name
- // to file map (PrebuiltModuleFiles) and (b) diagnose if still not found?
- //if (Module == nullptr) {
- // getDiagnostics().Report(ModuleNameLoc, diag::err_module_not_found)
- // << ModuleName;
- // DisableGeneratingGlobalModuleIndex = true;
- // return ModuleLoadResult();
- //}
+
MM.cacheModuleLoad(*Path[0].first, Module);
} else {
ModuleLoadResult Result = findOrCompileModuleAndReadAST(
@@ -1903,90 +2025,88 @@ CompilerInstance::loadModule(SourceLocation ImportLoc,
// Verify that the rest of the module path actually corresponds to
// a submodule.
bool MapPrivateSubModToTopLevel = false;
- if (Path.size() > 1) {
- for (unsigned I = 1, N = Path.size(); I != N; ++I) {
- StringRef Name = Path[I].first->getName();
- clang::Module *Sub = Module->findSubmodule(Name);
-
- // If the user is requesting Foo.Private and it doesn't exist, try to
- // match Foo_Private and emit a warning asking for the user to write
- // @import Foo_Private instead. FIXME: remove this when existing clients
- // migrate off of Foo.Private syntax.
- if (!Sub && PP->getLangOpts().ImplicitModules && Name == "Private" &&
- Module == Module->getTopLevelModule()) {
- SmallString<128> PrivateModule(Module->Name);
- PrivateModule.append("_Private");
-
- SmallVector<std::pair<IdentifierInfo *, SourceLocation>, 2> PrivPath;
- auto &II = PP->getIdentifierTable().get(
- PrivateModule, PP->getIdentifierInfo(Module->Name)->getTokenID());
- PrivPath.push_back(std::make_pair(&II, Path[0].second));
-
- if (PP->getHeaderSearchInfo().lookupModule(PrivateModule, true,
- !IsInclusionDirective))
- Sub =
- loadModule(ImportLoc, PrivPath, Visibility, IsInclusionDirective);
- if (Sub) {
- MapPrivateSubModToTopLevel = true;
- if (!getDiagnostics().isIgnored(
- diag::warn_no_priv_submodule_use_toplevel, ImportLoc)) {
- getDiagnostics().Report(Path[I].second,
- diag::warn_no_priv_submodule_use_toplevel)
- << Path[I].first << Module->getFullModuleName() << PrivateModule
- << SourceRange(Path[0].second, Path[I].second)
- << FixItHint::CreateReplacement(SourceRange(Path[0].second),
- PrivateModule);
- getDiagnostics().Report(Sub->DefinitionLoc,
- diag::note_private_top_level_defined);
- }
+ for (unsigned I = 1, N = Path.size(); I != N; ++I) {
+ StringRef Name = Path[I].first->getName();
+ clang::Module *Sub = Module->findSubmodule(Name);
+
+ // If the user is requesting Foo.Private and it doesn't exist, try to
+ // match Foo_Private and emit a warning asking for the user to write
+ // @import Foo_Private instead. FIXME: remove this when existing clients
+ // migrate off of Foo.Private syntax.
+ if (!Sub && Name == "Private" && Module == Module->getTopLevelModule()) {
+ SmallString<128> PrivateModule(Module->Name);
+ PrivateModule.append("_Private");
+
+ SmallVector<std::pair<IdentifierInfo *, SourceLocation>, 2> PrivPath;
+ auto &II = PP->getIdentifierTable().get(
+ PrivateModule, PP->getIdentifierInfo(Module->Name)->getTokenID());
+ PrivPath.push_back(std::make_pair(&II, Path[0].second));
+
+ std::string FileName;
+ // If there is a modulemap module or prebuilt module, load it.
+ if (PP->getHeaderSearchInfo().lookupModule(PrivateModule, ImportLoc, true,
+ !IsInclusionDirective) ||
+ selectModuleSource(nullptr, PrivateModule, FileName, BuiltModules,
+ PP->getHeaderSearchInfo()) != MS_ModuleNotFound)
+ Sub = loadModule(ImportLoc, PrivPath, Visibility, IsInclusionDirective);
+ if (Sub) {
+ MapPrivateSubModToTopLevel = true;
+ PP->markClangModuleAsAffecting(Module);
+ if (!getDiagnostics().isIgnored(
+ diag::warn_no_priv_submodule_use_toplevel, ImportLoc)) {
+ getDiagnostics().Report(Path[I].second,
+ diag::warn_no_priv_submodule_use_toplevel)
+ << Path[I].first << Module->getFullModuleName() << PrivateModule
+ << SourceRange(Path[0].second, Path[I].second)
+ << FixItHint::CreateReplacement(SourceRange(Path[0].second),
+ PrivateModule);
+ getDiagnostics().Report(Sub->DefinitionLoc,
+ diag::note_private_top_level_defined);
}
}
+ }
- if (!Sub) {
- // Attempt to perform typo correction to find a module name that works.
- SmallVector<StringRef, 2> Best;
- unsigned BestEditDistance = (std::numeric_limits<unsigned>::max)();
-
- for (clang::Module::submodule_iterator J = Module->submodule_begin(),
- JEnd = Module->submodule_end();
- J != JEnd; ++J) {
- unsigned ED = Name.edit_distance((*J)->Name,
- /*AllowReplacements=*/true,
- BestEditDistance);
- if (ED <= BestEditDistance) {
- if (ED < BestEditDistance) {
- Best.clear();
- BestEditDistance = ED;
- }
-
- Best.push_back((*J)->Name);
+ if (!Sub) {
+ // Attempt to perform typo correction to find a module name that works.
+ SmallVector<StringRef, 2> Best;
+ unsigned BestEditDistance = (std::numeric_limits<unsigned>::max)();
+
+ for (class Module *SubModule : Module->submodules()) {
+ unsigned ED =
+ Name.edit_distance(SubModule->Name,
+ /*AllowReplacements=*/true, BestEditDistance);
+ if (ED <= BestEditDistance) {
+ if (ED < BestEditDistance) {
+ Best.clear();
+ BestEditDistance = ED;
}
+
+ Best.push_back(SubModule->Name);
}
+ }
- // If there was a clear winner, user it.
- if (Best.size() == 1) {
- getDiagnostics().Report(Path[I].second,
- diag::err_no_submodule_suggest)
+ // If there was a clear winner, user it.
+ if (Best.size() == 1) {
+ getDiagnostics().Report(Path[I].second, diag::err_no_submodule_suggest)
<< Path[I].first << Module->getFullModuleName() << Best[0]
- << SourceRange(Path[0].second, Path[I-1].second)
+ << SourceRange(Path[0].second, Path[I - 1].second)
<< FixItHint::CreateReplacement(SourceRange(Path[I].second),
Best[0]);
- Sub = Module->findSubmodule(Best[0]);
- }
+ Sub = Module->findSubmodule(Best[0]);
}
+ }
- if (!Sub) {
- // No submodule by this name. Complain, and don't look for further
- // submodules.
- getDiagnostics().Report(Path[I].second, diag::err_no_submodule)
+ if (!Sub) {
+ // No submodule by this name. Complain, and don't look for further
+ // submodules.
+ getDiagnostics().Report(Path[I].second, diag::err_no_submodule)
<< Path[I].first << Module->getFullModuleName()
- << SourceRange(Path[0].second, Path[I-1].second);
- break;
- }
-
- Module = Sub;
+ << SourceRange(Path[0].second, Path[I - 1].second);
+ break;
}
+
+ Module = Sub;
}
// Make the named module visible, if it's not already part of the module
@@ -2003,12 +2123,12 @@ CompilerInstance::loadModule(SourceLocation ImportLoc,
<< Module->getFullModuleName()
<< SourceRange(Path.front().second, Path.back().second);
- return ModuleLoadResult::MissingExpected;
+ return ModuleLoadResult(Module, ModuleLoadResult::MissingExpected);
}
// Check whether this module is available.
if (Preprocessor::checkModuleIsAvailable(getLangOpts(), getTarget(),
- getDiagnostics(), Module)) {
+ *Module, getDiagnostics())) {
getDiagnostics().Report(ImportLoc, diag::note_module_import_here)
<< SourceRange(Path.front().second, Path.back().second);
LastModuleImportLoc = ImportLoc;
@@ -2060,7 +2180,7 @@ void CompilerInstance::createModuleFromSource(SourceLocation ImportLoc,
FrontendInputFile Input(
ModuleMapFileName,
- InputKind(getLanguageFromOptions(*Invocation->getLangOpts()),
+ InputKind(getLanguageFromOptions(Invocation->getLangOpts()),
InputKind::ModuleMap, /*Preprocessed*/true));
std::string NullTerminatedSource(Source.str());
@@ -2068,11 +2188,10 @@ void CompilerInstance::createModuleFromSource(SourceLocation ImportLoc,
auto PreBuildStep = [&](CompilerInstance &Other) {
// Create a virtual file containing our desired source.
// FIXME: We shouldn't need to do this.
- const FileEntry *ModuleMapFile = Other.getFileManager().getVirtualFile(
+ FileEntryRef ModuleMapFile = Other.getFileManager().getVirtualFileRef(
ModuleMapFileName, NullTerminatedSource.size(), 0);
Other.getSourceManager().overrideFileContents(
- ModuleMapFile,
- llvm::MemoryBuffer::getMemBuffer(NullTerminatedSource.c_str()));
+ ModuleMapFile, llvm::MemoryBuffer::getMemBuffer(NullTerminatedSource));
Other.BuiltModules = std::move(BuiltModules);
Other.DeleteBuiltModules = false;
@@ -2085,7 +2204,7 @@ void CompilerInstance::createModuleFromSource(SourceLocation ImportLoc,
// Build the module, inheriting any modules that we've built locally.
if (compileModuleImpl(*this, ImportLoc, ModuleName, Input, StringRef(),
ModuleFileName, PreBuildStep, PostBuildStep)) {
- BuiltModules[std::string(ModuleName)] = std::string(ModuleFileName.str());
+ BuiltModules[std::string(ModuleName)] = std::string(ModuleFileName);
llvm::sys::RemoveFileOnSignal(ModuleFileName);
}
}
@@ -2141,7 +2260,7 @@ GlobalModuleIndex *CompilerInstance::loadGlobalModuleIndex(
for (ModuleMap::module_iterator I = MMap.module_begin(),
E = MMap.module_end(); I != E; ++I) {
Module *TheModule = I->second;
- const FileEntry *Entry = TheModule->getASTFile();
+ OptionalFileEntryRef Entry = TheModule->getASTFile();
if (!Entry) {
SmallVector<std::pair<IdentifierInfo *, SourceLocation>, 2> Path;
Path.push_back(std::make_pair(
diff --git a/contrib/llvm-project/clang/lib/Frontend/CompilerInvocation.cpp b/contrib/llvm-project/clang/lib/Frontend/CompilerInvocation.cpp
index 7025028bc94a..feb4de2084b8 100644
--- a/contrib/llvm-project/clang/lib/Frontend/CompilerInvocation.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/CompilerInvocation.cpp
@@ -12,7 +12,6 @@
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/CodeGenOptions.h"
#include "clang/Basic/CommentOptions.h"
-#include "clang/Basic/DebugInfoOptions.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/DiagnosticDriver.h"
#include "clang/Basic/DiagnosticOptions.h"
@@ -49,19 +48,16 @@
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/CachedHashString.h"
-#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/FloatingPointMode.h"
#include "llvm/ADT/Hashing.h"
-#include "llvm/ADT/None.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Config/llvm-config.h"
+#include "llvm/Frontend/Debug/Options.h"
#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/Linker/Linker.h"
#include "llvm/MC/MCTargetOptions.h"
@@ -78,7 +74,7 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/ErrorOr.h"
#include "llvm/Support/FileSystem.h"
-#include "llvm/Support/Host.h"
+#include "llvm/Support/HashBuilder.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
@@ -88,12 +84,18 @@
#include "llvm/Support/VirtualFileSystem.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetOptions.h"
+#include "llvm/TargetParser/Host.h"
+#include "llvm/TargetParser/Triple.h"
#include <algorithm>
#include <atomic>
#include <cassert>
#include <cstddef>
#include <cstring>
+#include <ctime>
+#include <fstream>
+#include <limits>
#include <memory>
+#include <optional>
#include <string>
#include <tuple>
#include <type_traits>
@@ -106,93 +108,208 @@ using namespace options;
using namespace llvm::opt;
//===----------------------------------------------------------------------===//
+// Helpers.
+//===----------------------------------------------------------------------===//
+
+// Parse misexpect tolerance argument value.
+// Valid option values are integers in the range [0, 100)
+static Expected<std::optional<uint32_t>> parseToleranceOption(StringRef Arg) {
+ uint32_t Val;
+ if (Arg.getAsInteger(10, Val))
+ return llvm::createStringError(llvm::inconvertibleErrorCode(),
+ "Not an integer: %s", Arg.data());
+ return Val;
+}
+
+//===----------------------------------------------------------------------===//
// Initialization.
//===----------------------------------------------------------------------===//
-CompilerInvocationRefBase::CompilerInvocationRefBase()
- : LangOpts(new LangOptions()), TargetOpts(new TargetOptions()),
- DiagnosticOpts(new DiagnosticOptions()),
- HeaderSearchOpts(new HeaderSearchOptions()),
- PreprocessorOpts(new PreprocessorOptions()),
- AnalyzerOpts(new AnalyzerOptions()) {}
-
-CompilerInvocationRefBase::CompilerInvocationRefBase(
- const CompilerInvocationRefBase &X)
- : LangOpts(new LangOptions(*X.getLangOpts())),
- TargetOpts(new TargetOptions(X.getTargetOpts())),
- DiagnosticOpts(new DiagnosticOptions(X.getDiagnosticOpts())),
- HeaderSearchOpts(new HeaderSearchOptions(X.getHeaderSearchOpts())),
- PreprocessorOpts(new PreprocessorOptions(X.getPreprocessorOpts())),
- AnalyzerOpts(new AnalyzerOptions(*X.getAnalyzerOpts())) {}
-
-CompilerInvocationRefBase::CompilerInvocationRefBase(
- CompilerInvocationRefBase &&X) = default;
-
-CompilerInvocationRefBase &
-CompilerInvocationRefBase::operator=(CompilerInvocationRefBase X) {
- LangOpts.swap(X.LangOpts);
- TargetOpts.swap(X.TargetOpts);
- DiagnosticOpts.swap(X.DiagnosticOpts);
- HeaderSearchOpts.swap(X.HeaderSearchOpts);
- PreprocessorOpts.swap(X.PreprocessorOpts);
- AnalyzerOpts.swap(X.AnalyzerOpts);
+namespace {
+template <class T> std::shared_ptr<T> make_shared_copy(const T &X) {
+ return std::make_shared<T>(X);
+}
+
+template <class T>
+llvm::IntrusiveRefCntPtr<T> makeIntrusiveRefCntCopy(const T &X) {
+ return llvm::makeIntrusiveRefCnt<T>(X);
+}
+} // namespace
+
+CompilerInvocationBase::CompilerInvocationBase()
+ : LangOpts(std::make_shared<LangOptions>()),
+ TargetOpts(std::make_shared<TargetOptions>()),
+ DiagnosticOpts(llvm::makeIntrusiveRefCnt<DiagnosticOptions>()),
+ HSOpts(std::make_shared<HeaderSearchOptions>()),
+ PPOpts(std::make_shared<PreprocessorOptions>()),
+ AnalyzerOpts(llvm::makeIntrusiveRefCnt<AnalyzerOptions>()),
+ MigratorOpts(std::make_shared<MigratorOptions>()),
+ APINotesOpts(std::make_shared<APINotesOptions>()),
+ CodeGenOpts(std::make_shared<CodeGenOptions>()),
+ FSOpts(std::make_shared<FileSystemOptions>()),
+ FrontendOpts(std::make_shared<FrontendOptions>()),
+ DependencyOutputOpts(std::make_shared<DependencyOutputOptions>()),
+ PreprocessorOutputOpts(std::make_shared<PreprocessorOutputOptions>()) {}
+
+CompilerInvocationBase &
+CompilerInvocationBase::deep_copy_assign(const CompilerInvocationBase &X) {
+ if (this != &X) {
+ LangOpts = make_shared_copy(X.getLangOpts());
+ TargetOpts = make_shared_copy(X.getTargetOpts());
+ DiagnosticOpts = makeIntrusiveRefCntCopy(X.getDiagnosticOpts());
+ HSOpts = make_shared_copy(X.getHeaderSearchOpts());
+ PPOpts = make_shared_copy(X.getPreprocessorOpts());
+ AnalyzerOpts = makeIntrusiveRefCntCopy(X.getAnalyzerOpts());
+ MigratorOpts = make_shared_copy(X.getMigratorOpts());
+ APINotesOpts = make_shared_copy(X.getAPINotesOpts());
+ CodeGenOpts = make_shared_copy(X.getCodeGenOpts());
+ FSOpts = make_shared_copy(X.getFileSystemOpts());
+ FrontendOpts = make_shared_copy(X.getFrontendOpts());
+ DependencyOutputOpts = make_shared_copy(X.getDependencyOutputOpts());
+ PreprocessorOutputOpts = make_shared_copy(X.getPreprocessorOutputOpts());
+ }
+ return *this;
+}
+
+CompilerInvocationBase &
+CompilerInvocationBase::shallow_copy_assign(const CompilerInvocationBase &X) {
+ if (this != &X) {
+ LangOpts = X.LangOpts;
+ TargetOpts = X.TargetOpts;
+ DiagnosticOpts = X.DiagnosticOpts;
+ HSOpts = X.HSOpts;
+ PPOpts = X.PPOpts;
+ AnalyzerOpts = X.AnalyzerOpts;
+ MigratorOpts = X.MigratorOpts;
+ APINotesOpts = X.APINotesOpts;
+ CodeGenOpts = X.CodeGenOpts;
+ FSOpts = X.FSOpts;
+ FrontendOpts = X.FrontendOpts;
+ DependencyOutputOpts = X.DependencyOutputOpts;
+ PreprocessorOutputOpts = X.PreprocessorOutputOpts;
+ }
return *this;
}
-CompilerInvocationRefBase &
-CompilerInvocationRefBase::operator=(CompilerInvocationRefBase &&X) = default;
+namespace {
+template <typename T>
+T &ensureOwned(std::shared_ptr<T> &Storage) {
+ if (Storage.use_count() > 1)
+ Storage = std::make_shared<T>(*Storage);
+ return *Storage;
+}
+
+template <typename T>
+T &ensureOwned(llvm::IntrusiveRefCntPtr<T> &Storage) {
+ if (Storage.useCount() > 1)
+ Storage = llvm::makeIntrusiveRefCnt<T>(*Storage);
+ return *Storage;
+}
+} // namespace
+
+LangOptions &CowCompilerInvocation::getMutLangOpts() {
+ return ensureOwned(LangOpts);
+}
-CompilerInvocationRefBase::~CompilerInvocationRefBase() = default;
+TargetOptions &CowCompilerInvocation::getMutTargetOpts() {
+ return ensureOwned(TargetOpts);
+}
+
+DiagnosticOptions &CowCompilerInvocation::getMutDiagnosticOpts() {
+ return ensureOwned(DiagnosticOpts);
+}
+
+HeaderSearchOptions &CowCompilerInvocation::getMutHeaderSearchOpts() {
+ return ensureOwned(HSOpts);
+}
+
+PreprocessorOptions &CowCompilerInvocation::getMutPreprocessorOpts() {
+ return ensureOwned(PPOpts);
+}
+
+AnalyzerOptions &CowCompilerInvocation::getMutAnalyzerOpts() {
+ return ensureOwned(AnalyzerOpts);
+}
+
+MigratorOptions &CowCompilerInvocation::getMutMigratorOpts() {
+ return ensureOwned(MigratorOpts);
+}
+
+APINotesOptions &CowCompilerInvocation::getMutAPINotesOpts() {
+ return ensureOwned(APINotesOpts);
+}
+
+CodeGenOptions &CowCompilerInvocation::getMutCodeGenOpts() {
+ return ensureOwned(CodeGenOpts);
+}
+
+FileSystemOptions &CowCompilerInvocation::getMutFileSystemOpts() {
+ return ensureOwned(FSOpts);
+}
+
+FrontendOptions &CowCompilerInvocation::getMutFrontendOpts() {
+ return ensureOwned(FrontendOpts);
+}
+
+DependencyOutputOptions &CowCompilerInvocation::getMutDependencyOutputOpts() {
+ return ensureOwned(DependencyOutputOpts);
+}
+
+PreprocessorOutputOptions &
+CowCompilerInvocation::getMutPreprocessorOutputOpts() {
+ return ensureOwned(PreprocessorOutputOpts);
+}
//===----------------------------------------------------------------------===//
// Normalizers
//===----------------------------------------------------------------------===//
+using ArgumentConsumer = CompilerInvocation::ArgumentConsumer;
+
#define SIMPLE_ENUM_VALUE_TABLE
#include "clang/Driver/Options.inc"
#undef SIMPLE_ENUM_VALUE_TABLE
-static llvm::Optional<bool> normalizeSimpleFlag(OptSpecifier Opt,
- unsigned TableIndex,
- const ArgList &Args,
- DiagnosticsEngine &Diags) {
+static std::optional<bool> normalizeSimpleFlag(OptSpecifier Opt,
+ unsigned TableIndex,
+ const ArgList &Args,
+ DiagnosticsEngine &Diags) {
if (Args.hasArg(Opt))
return true;
- return None;
+ return std::nullopt;
}
-static Optional<bool> normalizeSimpleNegativeFlag(OptSpecifier Opt, unsigned,
- const ArgList &Args,
- DiagnosticsEngine &) {
+static std::optional<bool> normalizeSimpleNegativeFlag(OptSpecifier Opt,
+ unsigned,
+ const ArgList &Args,
+ DiagnosticsEngine &) {
if (Args.hasArg(Opt))
return false;
- return None;
+ return std::nullopt;
}
/// The tblgen-erated code passes in a fifth parameter of an arbitrary type, but
/// denormalizeSimpleFlags never looks at it. Avoid bloating compile-time with
/// unnecessary template instantiations and just ignore it with a variadic
/// argument.
-static void denormalizeSimpleFlag(SmallVectorImpl<const char *> &Args,
- const char *Spelling,
- CompilerInvocation::StringAllocator,
- Option::OptionClass, unsigned, /*T*/...) {
- Args.push_back(Spelling);
+static void denormalizeSimpleFlag(ArgumentConsumer Consumer,
+ const Twine &Spelling, Option::OptionClass,
+ unsigned, /*T*/...) {
+ Consumer(Spelling);
}
template <typename T> static constexpr bool is_uint64_t_convertible() {
- return !std::is_same<T, uint64_t>::value &&
- llvm::is_integral_or_enum<T>::value;
+ return !std::is_same_v<T, uint64_t> && llvm::is_integral_or_enum<T>::value;
}
template <typename T,
std::enable_if_t<!is_uint64_t_convertible<T>(), bool> = false>
static auto makeFlagToValueNormalizer(T Value) {
return [Value](OptSpecifier Opt, unsigned, const ArgList &Args,
- DiagnosticsEngine &) -> Optional<T> {
+ DiagnosticsEngine &) -> std::optional<T> {
if (Args.hasArg(Opt))
return Value;
- return None;
+ return std::nullopt;
};
}
@@ -204,40 +321,38 @@ static auto makeFlagToValueNormalizer(T Value) {
static auto makeBooleanOptionNormalizer(bool Value, bool OtherValue,
OptSpecifier OtherOpt) {
- return [Value, OtherValue, OtherOpt](OptSpecifier Opt, unsigned,
- const ArgList &Args,
- DiagnosticsEngine &) -> Optional<bool> {
+ return [Value, OtherValue,
+ OtherOpt](OptSpecifier Opt, unsigned, const ArgList &Args,
+ DiagnosticsEngine &) -> std::optional<bool> {
if (const Arg *A = Args.getLastArg(Opt, OtherOpt)) {
return A->getOption().matches(Opt) ? Value : OtherValue;
}
- return None;
+ return std::nullopt;
};
}
static auto makeBooleanOptionDenormalizer(bool Value) {
- return [Value](SmallVectorImpl<const char *> &Args, const char *Spelling,
- CompilerInvocation::StringAllocator, Option::OptionClass,
- unsigned, bool KeyPath) {
+ return [Value](ArgumentConsumer Consumer, const Twine &Spelling,
+ Option::OptionClass, unsigned, bool KeyPath) {
if (KeyPath == Value)
- Args.push_back(Spelling);
+ Consumer(Spelling);
};
}
-static void denormalizeStringImpl(SmallVectorImpl<const char *> &Args,
- const char *Spelling,
- CompilerInvocation::StringAllocator SA,
+static void denormalizeStringImpl(ArgumentConsumer Consumer,
+ const Twine &Spelling,
Option::OptionClass OptClass, unsigned,
const Twine &Value) {
switch (OptClass) {
case Option::SeparateClass:
case Option::JoinedOrSeparateClass:
case Option::JoinedAndSeparateClass:
- Args.push_back(Spelling);
- Args.push_back(SA(Value));
+ Consumer(Spelling);
+ Consumer(Value);
break;
case Option::JoinedClass:
case Option::CommaJoinedClass:
- Args.push_back(SA(Twine(Spelling) + Value));
+ Consumer(Spelling + Value);
break;
default:
llvm_unreachable("Cannot denormalize an option with option class "
@@ -246,41 +361,40 @@ static void denormalizeStringImpl(SmallVectorImpl<const char *> &Args,
}
template <typename T>
-static void
-denormalizeString(SmallVectorImpl<const char *> &Args, const char *Spelling,
- CompilerInvocation::StringAllocator SA,
- Option::OptionClass OptClass, unsigned TableIndex, T Value) {
- denormalizeStringImpl(Args, Spelling, SA, OptClass, TableIndex, Twine(Value));
+static void denormalizeString(ArgumentConsumer Consumer, const Twine &Spelling,
+ Option::OptionClass OptClass, unsigned TableIndex,
+ T Value) {
+ denormalizeStringImpl(Consumer, Spelling, OptClass, TableIndex, Twine(Value));
}
-static Optional<SimpleEnumValue>
+static std::optional<SimpleEnumValue>
findValueTableByName(const SimpleEnumValueTable &Table, StringRef Name) {
for (int I = 0, E = Table.Size; I != E; ++I)
if (Name == Table.Table[I].Name)
return Table.Table[I];
- return None;
+ return std::nullopt;
}
-static Optional<SimpleEnumValue>
+static std::optional<SimpleEnumValue>
findValueTableByValue(const SimpleEnumValueTable &Table, unsigned Value) {
for (int I = 0, E = Table.Size; I != E; ++I)
if (Value == Table.Table[I].Value)
return Table.Table[I];
- return None;
+ return std::nullopt;
}
-static llvm::Optional<unsigned> normalizeSimpleEnum(OptSpecifier Opt,
- unsigned TableIndex,
- const ArgList &Args,
- DiagnosticsEngine &Diags) {
+static std::optional<unsigned> normalizeSimpleEnum(OptSpecifier Opt,
+ unsigned TableIndex,
+ const ArgList &Args,
+ DiagnosticsEngine &Diags) {
assert(TableIndex < SimpleEnumValueTablesSize);
const SimpleEnumValueTable &Table = SimpleEnumValueTables[TableIndex];
auto *Arg = Args.getLastArg(Opt);
if (!Arg)
- return None;
+ return std::nullopt;
StringRef ArgValue = Arg->getValue();
if (auto MaybeEnumVal = findValueTableByName(Table, ArgValue))
@@ -288,18 +402,17 @@ static llvm::Optional<unsigned> normalizeSimpleEnum(OptSpecifier Opt,
Diags.Report(diag::err_drv_invalid_value)
<< Arg->getAsString(Args) << ArgValue;
- return None;
+ return std::nullopt;
}
-static void denormalizeSimpleEnumImpl(SmallVectorImpl<const char *> &Args,
- const char *Spelling,
- CompilerInvocation::StringAllocator SA,
+static void denormalizeSimpleEnumImpl(ArgumentConsumer Consumer,
+ const Twine &Spelling,
Option::OptionClass OptClass,
unsigned TableIndex, unsigned Value) {
assert(TableIndex < SimpleEnumValueTablesSize);
const SimpleEnumValueTable &Table = SimpleEnumValueTables[TableIndex];
if (auto MaybeEnumVal = findValueTableByValue(Table, Value)) {
- denormalizeString(Args, Spelling, SA, OptClass, TableIndex,
+ denormalizeString(Consumer, Spelling, OptClass, TableIndex,
MaybeEnumVal->Name);
} else {
llvm_unreachable("The simple enum value was not correctly defined in "
@@ -308,49 +421,48 @@ static void denormalizeSimpleEnumImpl(SmallVectorImpl<const char *> &Args,
}
template <typename T>
-static void denormalizeSimpleEnum(SmallVectorImpl<const char *> &Args,
- const char *Spelling,
- CompilerInvocation::StringAllocator SA,
+static void denormalizeSimpleEnum(ArgumentConsumer Consumer,
+ const Twine &Spelling,
Option::OptionClass OptClass,
unsigned TableIndex, T Value) {
- return denormalizeSimpleEnumImpl(Args, Spelling, SA, OptClass, TableIndex,
+ return denormalizeSimpleEnumImpl(Consumer, Spelling, OptClass, TableIndex,
static_cast<unsigned>(Value));
}
-static Optional<std::string> normalizeString(OptSpecifier Opt, int TableIndex,
- const ArgList &Args,
- DiagnosticsEngine &Diags) {
+static std::optional<std::string> normalizeString(OptSpecifier Opt,
+ int TableIndex,
+ const ArgList &Args,
+ DiagnosticsEngine &Diags) {
auto *Arg = Args.getLastArg(Opt);
if (!Arg)
- return None;
+ return std::nullopt;
return std::string(Arg->getValue());
}
template <typename IntTy>
-static Optional<IntTy> normalizeStringIntegral(OptSpecifier Opt, int,
- const ArgList &Args,
- DiagnosticsEngine &Diags) {
+static std::optional<IntTy> normalizeStringIntegral(OptSpecifier Opt, int,
+ const ArgList &Args,
+ DiagnosticsEngine &Diags) {
auto *Arg = Args.getLastArg(Opt);
if (!Arg)
- return None;
+ return std::nullopt;
IntTy Res;
if (StringRef(Arg->getValue()).getAsInteger(0, Res)) {
Diags.Report(diag::err_drv_invalid_int_value)
<< Arg->getAsString(Args) << Arg->getValue();
- return None;
+ return std::nullopt;
}
return Res;
}
-static Optional<std::vector<std::string>>
+static std::optional<std::vector<std::string>>
normalizeStringVector(OptSpecifier Opt, int, const ArgList &Args,
DiagnosticsEngine &) {
return Args.getAllArgValues(Opt);
}
-static void denormalizeStringVector(SmallVectorImpl<const char *> &Args,
- const char *Spelling,
- CompilerInvocation::StringAllocator SA,
+static void denormalizeStringVector(ArgumentConsumer Consumer,
+ const Twine &Spelling,
Option::OptionClass OptClass,
unsigned TableIndex,
const std::vector<std::string> &Values) {
@@ -364,7 +476,7 @@ static void denormalizeStringVector(SmallVectorImpl<const char *> &Args,
CommaJoinedValue.append(Value);
}
}
- denormalizeString(Args, Spelling, SA, Option::OptionClass::JoinedClass,
+ denormalizeString(Consumer, Spelling, Option::OptionClass::JoinedClass,
TableIndex, CommaJoinedValue);
break;
}
@@ -372,7 +484,7 @@ static void denormalizeStringVector(SmallVectorImpl<const char *> &Args,
case Option::SeparateClass:
case Option::JoinedOrSeparateClass:
for (const std::string &Value : Values)
- denormalizeString(Args, Spelling, SA, OptClass, TableIndex, Value);
+ denormalizeString(Consumer, Spelling, OptClass, TableIndex, Value);
break;
default:
llvm_unreachable("Cannot denormalize an option with option class "
@@ -380,12 +492,13 @@ static void denormalizeStringVector(SmallVectorImpl<const char *> &Args,
}
}
-static Optional<std::string> normalizeTriple(OptSpecifier Opt, int TableIndex,
- const ArgList &Args,
- DiagnosticsEngine &Diags) {
+static std::optional<std::string> normalizeTriple(OptSpecifier Opt,
+ int TableIndex,
+ const ArgList &Args,
+ DiagnosticsEngine &Diags) {
auto *Arg = Args.getLastArg(Opt);
if (!Arg)
- return None;
+ return std::nullopt;
return llvm::Triple::normalize(Arg->getValue());
}
@@ -408,9 +521,11 @@ static T extractMaskValue(T KeyPath) {
}
#define PARSE_OPTION_WITH_MARSHALLING( \
- ARGS, DIAGS, ID, FLAGS, PARAM, SHOULD_PARSE, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, MERGER, TABLE_INDEX) \
- if ((FLAGS)&options::CC1Option) { \
+ ARGS, DIAGS, PREFIX_TYPE, SPELLING, ID, KIND, GROUP, ALIAS, ALIASARGS, \
+ FLAGS, VISIBILITY, PARAM, HELPTEXT, METAVAR, VALUES, SHOULD_PARSE, \
+ ALWAYS_EMIT, KEYPATH, DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, \
+ NORMALIZER, DENORMALIZER, MERGER, EXTRACTOR, TABLE_INDEX) \
+ if ((VISIBILITY)&options::CC1Option) { \
KEYPATH = MERGER(KEYPATH, DEFAULT_VALUE); \
if (IMPLIED_CHECK) \
KEYPATH = MERGER(KEYPATH, IMPLIED_VALUE); \
@@ -423,28 +538,29 @@ static T extractMaskValue(T KeyPath) {
// Capture the extracted value as a lambda argument to avoid potential issues
// with lifetime extension of the reference.
#define GENERATE_OPTION_WITH_MARSHALLING( \
- ARGS, STRING_ALLOCATOR, KIND, FLAGS, SPELLING, ALWAYS_EMIT, KEYPATH, \
- DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, DENORMALIZER, EXTRACTOR, \
- TABLE_INDEX) \
- if ((FLAGS)&options::CC1Option) { \
+ CONSUMER, PREFIX_TYPE, SPELLING, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, \
+ VISIBILITY, PARAM, HELPTEXT, METAVAR, VALUES, SHOULD_PARSE, ALWAYS_EMIT, \
+ KEYPATH, DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, \
+ DENORMALIZER, MERGER, EXTRACTOR, TABLE_INDEX) \
+ if ((VISIBILITY)&options::CC1Option) { \
[&](const auto &Extracted) { \
if (ALWAYS_EMIT || \
(Extracted != \
static_cast<decltype(KEYPATH)>((IMPLIED_CHECK) ? (IMPLIED_VALUE) \
: (DEFAULT_VALUE)))) \
- DENORMALIZER(ARGS, SPELLING, STRING_ALLOCATOR, Option::KIND##Class, \
- TABLE_INDEX, Extracted); \
+ DENORMALIZER(CONSUMER, SPELLING, Option::KIND##Class, TABLE_INDEX, \
+ Extracted); \
}(EXTRACTOR(KEYPATH)); \
}
-static const StringRef GetInputKindName(InputKind IK);
+static StringRef GetInputKindName(InputKind IK);
static bool FixupInvocation(CompilerInvocation &Invocation,
DiagnosticsEngine &Diags, const ArgList &Args,
InputKind IK) {
unsigned NumErrorsBefore = Diags.getNumErrors();
- LangOptions &LangOpts = *Invocation.getLangOpts();
+ LangOptions &LangOpts = Invocation.getLangOpts();
CodeGenOptions &CodeGenOpts = Invocation.getCodeGenOpts();
TargetOptions &TargetOpts = Invocation.getTargetOpts();
FrontendOptions &FrontendOpts = Invocation.getFrontendOpts();
@@ -453,6 +569,8 @@ static bool FixupInvocation(CompilerInvocation &Invocation,
CodeGenOpts.XRayAlwaysEmitTypedEvents = LangOpts.XRayAlwaysEmitTypedEvents;
CodeGenOpts.DisableFree = FrontendOpts.DisableFree;
FrontendOpts.GenerateGlobalModuleIndex = FrontendOpts.UseGlobalModuleIndex;
+ if (FrontendOpts.ShowStats)
+ CodeGenOpts.ClearASTBeforeBackend = false;
LangOpts.SanitizeCoverage = CodeGenOpts.hasSanitizeCoverage();
LangOpts.ForceEmitVTables = CodeGenOpts.ForceEmitVTables;
LangOpts.SpeculativeLoadHardening = CodeGenOpts.SpeculativeLoadHardening;
@@ -462,6 +580,7 @@ static bool FixupInvocation(CompilerInvocation &Invocation,
llvm::Triple::ArchType Arch = T.getArch();
CodeGenOpts.CodeModel = TargetOpts.CodeModel;
+ CodeGenOpts.LargeDataThreshold = TargetOpts.LargeDataThreshold;
if (LangOpts.getExceptionHandling() !=
LangOptions::ExceptionHandlingKind::None &&
@@ -472,9 +591,6 @@ static bool FixupInvocation(CompilerInvocation &Invocation,
if (LangOpts.AppleKext && !LangOpts.CPlusPlus)
Diags.Report(diag::warn_c_kext);
- if (Args.hasArg(OPT_fconcepts_ts))
- Diags.Report(diag::warn_fe_concepts_ts_flag);
-
if (LangOpts.NewAlignOverride &&
!llvm::isPowerOf2_32(LangOpts.NewAlignOverride)) {
Arg *A = Args.getLastArg(OPT_fnew_alignment_EQ);
@@ -492,6 +608,10 @@ static bool FixupInvocation(CompilerInvocation &Invocation,
Diags.Report(diag::err_drv_argument_not_allowed_with)
<< "-fgnu89-inline" << GetInputKindName(IK);
+ if (Args.hasArg(OPT_hlsl_entrypoint) && !LangOpts.HLSL)
+ Diags.Report(diag::err_drv_argument_not_allowed_with)
+ << "-hlsl-entry" << GetInputKindName(IK);
+
if (Args.hasArg(OPT_fgpu_allow_device_init) && !LangOpts.HIP)
Diags.Report(diag::warn_ignored_hip_only_option)
<< Args.getLastArg(OPT_fgpu_allow_device_init)->getAsString(Args);
@@ -500,12 +620,29 @@ static bool FixupInvocation(CompilerInvocation &Invocation,
Diags.Report(diag::warn_ignored_hip_only_option)
<< Args.getLastArg(OPT_gpu_max_threads_per_block_EQ)->getAsString(Args);
+ // When these options are used, the compiler is allowed to apply
+ // optimizations that may affect the final result. For example
+ // (x+y)+z is transformed to x+(y+z) but may not give the same
+ // final result; it's not value safe.
+ // Another example can be to simplify x/x to 1.0 but x could be 0.0, INF
+ // or NaN. Final result may then differ. An error is issued when the eval
+ // method is set with one of these options.
+ if (Args.hasArg(OPT_ffp_eval_method_EQ)) {
+ if (LangOpts.ApproxFunc)
+ Diags.Report(diag::err_incompatible_fp_eval_method_options) << 0;
+ if (LangOpts.AllowFPReassoc)
+ Diags.Report(diag::err_incompatible_fp_eval_method_options) << 1;
+ if (LangOpts.AllowRecip)
+ Diags.Report(diag::err_incompatible_fp_eval_method_options) << 2;
+ }
+
// -cl-strict-aliasing needs to emit diagnostic in the case where CL > 1.0.
// This option should be deprecated for CL > 1.0 because
// this option was added for compatibility with OpenCL 1.0.
- if (Args.getLastArg(OPT_cl_strict_aliasing) && LangOpts.OpenCLVersion > 100)
+ if (Args.getLastArg(OPT_cl_strict_aliasing) &&
+ (LangOpts.getOpenCLCompatibleVersion() > 100))
Diags.Report(diag::warn_option_invalid_ocl_version)
- << LangOpts.getOpenCLVersionTuple().getAsString()
+ << LangOpts.getOpenCLVersionString()
<< Args.getLastArg(OPT_cl_strict_aliasing)->getAsString(Args);
if (Arg *A = Args.getLastArg(OPT_fdefault_calling_conv_EQ)) {
@@ -517,16 +654,12 @@ static bool FixupInvocation(CompilerInvocation &Invocation,
emitError |= (DefaultCC == LangOptions::DCC_VectorCall ||
DefaultCC == LangOptions::DCC_RegCall) &&
!T.isX86();
+ emitError |= DefaultCC == LangOptions::DCC_RtdCall && Arch != llvm::Triple::m68k;
if (emitError)
Diags.Report(diag::err_drv_argument_not_allowed_with)
<< A->getSpelling() << T.getTriple();
}
- if (!CodeGenOpts.ProfileRemappingFile.empty() && CodeGenOpts.LegacyPassManager)
- Diags.Report(diag::err_drv_argument_only_allowed_with)
- << Args.getLastArg(OPT_fprofile_remapping_file_EQ)->getAsString(Args)
- << "-fno-legacy-pass-manager";
-
return Diags.getNumErrors() == NumErrorsBefore;
}
@@ -536,27 +669,27 @@ static bool FixupInvocation(CompilerInvocation &Invocation,
static unsigned getOptimizationLevel(ArgList &Args, InputKind IK,
DiagnosticsEngine &Diags) {
- unsigned DefaultOpt = llvm::CodeGenOpt::None;
+ unsigned DefaultOpt = 0;
if ((IK.getLanguage() == Language::OpenCL ||
IK.getLanguage() == Language::OpenCLCXX) &&
!Args.hasArg(OPT_cl_opt_disable))
- DefaultOpt = llvm::CodeGenOpt::Default;
+ DefaultOpt = 2;
if (Arg *A = Args.getLastArg(options::OPT_O_Group)) {
if (A->getOption().matches(options::OPT_O0))
- return llvm::CodeGenOpt::None;
+ return 0;
if (A->getOption().matches(options::OPT_Ofast))
- return llvm::CodeGenOpt::Aggressive;
+ return 3;
assert(A->getOption().matches(options::OPT_O));
StringRef S(A->getValue());
if (S == "s" || S == "z")
- return llvm::CodeGenOpt::Default;
+ return 2;
if (S == "g")
- return llvm::CodeGenOpt::Less;
+ return 1;
return getLastArgIntValue(Args, OPT_O, DefaultOpt, Diags);
}
@@ -580,21 +713,18 @@ static unsigned getOptimizationLevelSize(ArgList &Args) {
return 0;
}
-static void GenerateArg(SmallVectorImpl<const char *> &Args,
- llvm::opt::OptSpecifier OptSpecifier,
- CompilerInvocation::StringAllocator SA) {
+static void GenerateArg(ArgumentConsumer Consumer,
+ llvm::opt::OptSpecifier OptSpecifier) {
Option Opt = getDriverOptTable().getOption(OptSpecifier);
- denormalizeSimpleFlag(Args, SA(Opt.getPrefix() + Opt.getName()), SA,
+ denormalizeSimpleFlag(Consumer, Opt.getPrefixedName(),
Option::OptionClass::FlagClass, 0);
}
-static void GenerateArg(SmallVectorImpl<const char *> &Args,
+static void GenerateArg(ArgumentConsumer Consumer,
llvm::opt::OptSpecifier OptSpecifier,
- const Twine &Value,
- CompilerInvocation::StringAllocator SA) {
+ const Twine &Value) {
Option Opt = getDriverOptTable().getOption(OptSpecifier);
- denormalizeString(Args, SA(Opt.getPrefix() + Opt.getName()), SA,
- Opt.getKind(), 0, Value);
+ denormalizeString(Consumer, Opt.getPrefixedName(), Opt.getKind(), 0, Value);
}
// Parse command line arguments into CompilerInvocation.
@@ -607,32 +737,47 @@ using GenerateFn = llvm::function_ref<void(
CompilerInvocation &, SmallVectorImpl<const char *> &,
CompilerInvocation::StringAllocator)>;
-// May perform round-trip of command line arguments. By default, the round-trip
-// is enabled if CLANG_ROUND_TRIP_CC1_ARGS was defined during build. This can be
-// overwritten at run-time via the "-round-trip-args" and "-no-round-trip-args"
-// command line flags.
-// During round-trip, the command line arguments are parsed into a dummy
-// instance of CompilerInvocation which is used to generate the command line
-// arguments again. The real CompilerInvocation instance is then created by
-// parsing the generated arguments, not the original ones.
+/// May perform round-trip of command line arguments. By default, the round-trip
+/// is enabled in assert builds. This can be overwritten at run-time via the
+/// "-round-trip-args" and "-no-round-trip-args" command line flags, or via the
+/// ForceRoundTrip parameter.
+///
+/// During round-trip, the command line arguments are parsed into a dummy
+/// CompilerInvocation, which is used to generate the command line arguments
+/// again. The real CompilerInvocation is then created by parsing the generated
+/// arguments, not the original ones. This (in combination with tests covering
+/// argument behavior) ensures the generated command line is complete (doesn't
+/// drop/mangle any arguments).
+///
+/// Finally, we check the command line that was used to create the real
+/// CompilerInvocation instance. By default, we compare it to the command line
+/// the real CompilerInvocation generates. This checks whether the generator is
+/// deterministic. If \p CheckAgainstOriginalInvocation is enabled, we instead
+/// compare it to the original command line to verify the original command-line
+/// was canonical and can round-trip exactly.
static bool RoundTrip(ParseFn Parse, GenerateFn Generate,
CompilerInvocation &RealInvocation,
CompilerInvocation &DummyInvocation,
ArrayRef<const char *> CommandLineArgs,
- DiagnosticsEngine &Diags, const char *Argv0) {
- // FIXME: Switch to '#ifndef NDEBUG' when possible.
-#ifdef CLANG_ROUND_TRIP_CC1_ARGS
+ DiagnosticsEngine &Diags, const char *Argv0,
+ bool CheckAgainstOriginalInvocation = false,
+ bool ForceRoundTrip = false) {
+#ifndef NDEBUG
bool DoRoundTripDefault = true;
#else
bool DoRoundTripDefault = false;
#endif
bool DoRoundTrip = DoRoundTripDefault;
- for (const auto *Arg : CommandLineArgs) {
- if (Arg == StringRef("-round-trip-args"))
- DoRoundTrip = true;
- if (Arg == StringRef("-no-round-trip-args"))
- DoRoundTrip = false;
+ if (ForceRoundTrip) {
+ DoRoundTrip = true;
+ } else {
+ for (const auto *Arg : CommandLineArgs) {
+ if (Arg == StringRef("-round-trip-args"))
+ DoRoundTrip = true;
+ if (Arg == StringRef("-no-round-trip-args"))
+ DoRoundTrip = false;
+ }
}
// If round-trip was not requested, simply run the parser with the real
@@ -687,30 +832,34 @@ static bool RoundTrip(ParseFn Parse, GenerateFn Generate,
// Generate arguments from the dummy invocation. If Generate is the
// inverse of Parse, the newly generated arguments must have the same
// semantics as the original.
- SmallVector<const char *> GeneratedArgs1;
- Generate(DummyInvocation, GeneratedArgs1, SA);
+ SmallVector<const char *> GeneratedArgs;
+ Generate(DummyInvocation, GeneratedArgs, SA);
// Run the second parse, now on the generated arguments, and with the real
// invocation and diagnostics. The result is what we will end up using for the
// rest of compilation, so if Generate is not inverse of Parse, something down
// the line will break.
- bool Success2 = Parse(RealInvocation, GeneratedArgs1, Diags, Argv0);
+ bool Success2 = Parse(RealInvocation, GeneratedArgs, Diags, Argv0);
// The first parse on original arguments succeeded, but second parse of
// generated arguments failed. Something must be wrong with the generator.
if (!Success2) {
Diags.Report(diag::err_cc1_round_trip_ok_then_fail);
Diags.Report(diag::note_cc1_round_trip_generated)
- << 1 << SerializeArgs(GeneratedArgs1);
+ << 1 << SerializeArgs(GeneratedArgs);
return false;
}
- // Generate arguments again, this time from the options we will end up using
- // for the rest of the compilation.
- SmallVector<const char *> GeneratedArgs2;
- Generate(RealInvocation, GeneratedArgs2, SA);
+ SmallVector<const char *> ComparisonArgs;
+ if (CheckAgainstOriginalInvocation)
+ // Compare against original arguments.
+ ComparisonArgs.assign(CommandLineArgs.begin(), CommandLineArgs.end());
+ else
+ // Generate arguments again, this time from the options we will end up using
+ // for the rest of the compilation.
+ Generate(RealInvocation, ComparisonArgs, SA);
- // Compares two lists of generated arguments.
+ // Compares two lists of arguments.
auto Equal = [](const ArrayRef<const char *> A,
const ArrayRef<const char *> B) {
return std::equal(A.begin(), A.end(), B.begin(), B.end(),
@@ -722,23 +871,41 @@ static bool RoundTrip(ParseFn Parse, GenerateFn Generate,
// If we generated different arguments from what we assume are two
// semantically equivalent CompilerInvocations, the Generate function may
// be non-deterministic.
- if (!Equal(GeneratedArgs1, GeneratedArgs2)) {
+ if (!Equal(GeneratedArgs, ComparisonArgs)) {
Diags.Report(diag::err_cc1_round_trip_mismatch);
Diags.Report(diag::note_cc1_round_trip_generated)
- << 1 << SerializeArgs(GeneratedArgs1);
+ << 1 << SerializeArgs(GeneratedArgs);
Diags.Report(diag::note_cc1_round_trip_generated)
- << 2 << SerializeArgs(GeneratedArgs2);
+ << 2 << SerializeArgs(ComparisonArgs);
return false;
}
Diags.Report(diag::remark_cc1_round_trip_generated)
- << 1 << SerializeArgs(GeneratedArgs1);
+ << 1 << SerializeArgs(GeneratedArgs);
Diags.Report(diag::remark_cc1_round_trip_generated)
- << 2 << SerializeArgs(GeneratedArgs2);
+ << 2 << SerializeArgs(ComparisonArgs);
return Success2;
}
+bool CompilerInvocation::checkCC1RoundTrip(ArrayRef<const char *> Args,
+ DiagnosticsEngine &Diags,
+ const char *Argv0) {
+ CompilerInvocation DummyInvocation1, DummyInvocation2;
+ return RoundTrip(
+ [](CompilerInvocation &Invocation, ArrayRef<const char *> CommandLineArgs,
+ DiagnosticsEngine &Diags, const char *Argv0) {
+ return CreateFromArgsImpl(Invocation, CommandLineArgs, Diags, Argv0);
+ },
+ [](CompilerInvocation &Invocation, SmallVectorImpl<const char *> &Args,
+ StringAllocator SA) {
+ Args.push_back("-cc1");
+ Invocation.generateCC1CommandLine(Args, SA);
+ },
+ DummyInvocation1, DummyInvocation2, Args, Diags, Argv0,
+ /*CheckAgainstOriginalInvocation=*/true, /*ForceRoundTrip=*/true);
+}
+
static void addDiagnosticArgs(ArgList &Args, OptSpecifier Group,
OptSpecifier GroupWithValue,
std::vector<std::string> &Diagnostics) {
@@ -768,45 +935,24 @@ static void parseAnalyzerConfigs(AnalyzerOptions &AnOpts,
static void getAllNoBuiltinFuncValues(ArgList &Args,
std::vector<std::string> &Funcs) {
std::vector<std::string> Values = Args.getAllArgValues(OPT_fno_builtin_);
- auto BuiltinEnd = llvm::partition(Values, [](const std::string FuncName) {
- return Builtin::Context::isBuiltinFunc(FuncName);
- });
+ auto BuiltinEnd = llvm::partition(Values, Builtin::Context::isBuiltinFunc);
Funcs.insert(Funcs.end(), Values.begin(), BuiltinEnd);
}
-static void GenerateAnalyzerArgs(AnalyzerOptions &Opts,
- SmallVectorImpl<const char *> &Args,
- CompilerInvocation::StringAllocator SA) {
+static void GenerateAnalyzerArgs(const AnalyzerOptions &Opts,
+ ArgumentConsumer Consumer) {
const AnalyzerOptions *AnalyzerOpts = &Opts;
-#define ANALYZER_OPTION_WITH_MARSHALLING( \
- PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
- HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
- DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
- MERGER, EXTRACTOR, TABLE_INDEX) \
- GENERATE_OPTION_WITH_MARSHALLING( \
- Args, SA, KIND, FLAGS, SPELLING, ALWAYS_EMIT, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, DENORMALIZER, EXTRACTOR, TABLE_INDEX)
+#define ANALYZER_OPTION_WITH_MARSHALLING(...) \
+ GENERATE_OPTION_WITH_MARSHALLING(Consumer, __VA_ARGS__)
#include "clang/Driver/Options.inc"
#undef ANALYZER_OPTION_WITH_MARSHALLING
- if (Opts.AnalysisStoreOpt != RegionStoreModel) {
- switch (Opts.AnalysisStoreOpt) {
-#define ANALYSIS_STORE(NAME, CMDFLAG, DESC, CREATFN) \
- case NAME##Model: \
- GenerateArg(Args, OPT_analyzer_store, CMDFLAG, SA); \
- break;
-#include "clang/StaticAnalyzer/Core/Analyses.def"
- default:
- llvm_unreachable("Tried to generate unknown analysis store.");
- }
- }
-
if (Opts.AnalysisConstraintsOpt != RangeConstraintsModel) {
switch (Opts.AnalysisConstraintsOpt) {
#define ANALYSIS_CONSTRAINTS(NAME, CMDFLAG, DESC, CREATFN) \
case NAME##Model: \
- GenerateArg(Args, OPT_analyzer_constraints, CMDFLAG, SA); \
+ GenerateArg(Consumer, OPT_analyzer_constraints, CMDFLAG); \
break;
#include "clang/StaticAnalyzer/Core/Analyses.def"
default:
@@ -818,7 +964,7 @@ static void GenerateAnalyzerArgs(AnalyzerOptions &Opts,
switch (Opts.AnalysisDiagOpt) {
#define ANALYSIS_DIAGNOSTICS(NAME, CMDFLAG, DESC, CREATFN) \
case PD_##NAME: \
- GenerateArg(Args, OPT_analyzer_output, CMDFLAG, SA); \
+ GenerateArg(Consumer, OPT_analyzer_output, CMDFLAG); \
break;
#include "clang/StaticAnalyzer/Core/Analyses.def"
default:
@@ -830,7 +976,7 @@ static void GenerateAnalyzerArgs(AnalyzerOptions &Opts,
switch (Opts.AnalysisPurgeOpt) {
#define ANALYSIS_PURGE(NAME, CMDFLAG, DESC) \
case NAME: \
- GenerateArg(Args, OPT_analyzer_purge, CMDFLAG, SA); \
+ GenerateArg(Consumer, OPT_analyzer_purge, CMDFLAG); \
break;
#include "clang/StaticAnalyzer/Core/Analyses.def"
default:
@@ -842,7 +988,7 @@ static void GenerateAnalyzerArgs(AnalyzerOptions &Opts,
switch (Opts.InliningMode) {
#define ANALYSIS_INLINING_MODE(NAME, CMDFLAG, DESC) \
case NAME: \
- GenerateArg(Args, OPT_analyzer_inlining_mode, CMDFLAG, SA); \
+ GenerateArg(Consumer, OPT_analyzer_inlining_mode, CMDFLAG); \
break;
#include "clang/StaticAnalyzer/Core/Analyses.def"
default:
@@ -853,20 +999,26 @@ static void GenerateAnalyzerArgs(AnalyzerOptions &Opts,
for (const auto &CP : Opts.CheckersAndPackages) {
OptSpecifier Opt =
CP.second ? OPT_analyzer_checker : OPT_analyzer_disable_checker;
- GenerateArg(Args, Opt, CP.first, SA);
+ GenerateArg(Consumer, Opt, CP.first);
}
AnalyzerOptions ConfigOpts;
parseAnalyzerConfigs(ConfigOpts, nullptr);
- for (const auto &C : Opts.Config) {
+ // Sort options by key to avoid relying on StringMap iteration order.
+ SmallVector<std::pair<StringRef, StringRef>, 4> SortedConfigOpts;
+ for (const auto &C : Opts.Config)
+ SortedConfigOpts.emplace_back(C.getKey(), C.getValue());
+ llvm::sort(SortedConfigOpts, llvm::less_first());
+
+ for (const auto &[Key, Value] : SortedConfigOpts) {
// Don't generate anything that came from parseAnalyzerConfigs. It would be
// redundant and may not be valid on the command line.
- auto Entry = ConfigOpts.Config.find(C.getKey());
- if (Entry != ConfigOpts.Config.end() && Entry->getValue() == C.getValue())
+ auto Entry = ConfigOpts.Config.find(Key);
+ if (Entry != ConfigOpts.Config.end() && Entry->getValue() == Value)
continue;
- GenerateArg(Args, OPT_analyzer_config, C.getKey() + "=" + C.getValue(), SA);
+ GenerateArg(Consumer, OPT_analyzer_config, Key + "=" + Value);
}
// Nothing to generate for FullCompilerInvocation.
@@ -878,32 +1030,11 @@ static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
AnalyzerOptions *AnalyzerOpts = &Opts;
-#define ANALYZER_OPTION_WITH_MARSHALLING( \
- PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
- HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
- DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
- MERGER, EXTRACTOR, TABLE_INDEX) \
- PARSE_OPTION_WITH_MARSHALLING( \
- Args, Diags, ID, FLAGS, PARAM, SHOULD_PARSE, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, MERGER, TABLE_INDEX)
+#define ANALYZER_OPTION_WITH_MARSHALLING(...) \
+ PARSE_OPTION_WITH_MARSHALLING(Args, Diags, __VA_ARGS__)
#include "clang/Driver/Options.inc"
#undef ANALYZER_OPTION_WITH_MARSHALLING
- if (Arg *A = Args.getLastArg(OPT_analyzer_store)) {
- StringRef Name = A->getValue();
- AnalysisStores Value = llvm::StringSwitch<AnalysisStores>(Name)
-#define ANALYSIS_STORE(NAME, CMDFLAG, DESC, CREATFN) \
- .Case(CMDFLAG, NAME##Model)
-#include "clang/StaticAnalyzer/Core/Analyses.def"
- .Default(NumStores);
- if (Value == NumStores) {
- Diags.Report(diag::err_drv_invalid_value)
- << A->getAsString(Args) << Name;
- } else {
- Opts.AnalysisStoreOpt = Value;
- }
- }
-
if (Arg *A = Args.getLastArg(OPT_analyzer_constraints)) {
StringRef Name = A->getValue();
AnalysisConstraints Value = llvm::StringSwitch<AnalysisConstraints>(Name)
@@ -915,6 +1046,11 @@ static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
Diags.Report(diag::err_drv_invalid_value)
<< A->getAsString(Args) << Name;
} else {
+#ifndef LLVM_WITH_Z3
+ if (Value == AnalysisConstraints::Z3ConstraintsModel) {
+ Diags.Report(diag::err_analyzer_not_built_with_z3);
+ }
+#endif // LLVM_WITH_Z3
Opts.AnalysisConstraintsOpt = Value;
}
}
@@ -995,7 +1131,7 @@ static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
diag::err_analyzer_config_no_value) << configVal;
break;
}
- if (val.find('=') != StringRef::npos) {
+ if (val.contains('=')) {
Diags.Report(SourceLocation(),
diag::err_analyzer_config_multiple_values)
<< configVal;
@@ -1049,11 +1185,12 @@ static void initOption(AnalyzerOptions::ConfigTable &Config,
static void initOption(AnalyzerOptions::ConfigTable &Config,
DiagnosticsEngine *Diags,
bool &OptionField, StringRef Name, bool DefaultVal) {
- auto PossiblyInvalidVal = llvm::StringSwitch<Optional<bool>>(
- getStringOption(Config, Name, (DefaultVal ? "true" : "false")))
- .Case("true", true)
- .Case("false", false)
- .Default(None);
+ auto PossiblyInvalidVal =
+ llvm::StringSwitch<std::optional<bool>>(
+ getStringOption(Config, Name, (DefaultVal ? "true" : "false")))
+ .Case("true", true)
+ .Case("false", false)
+ .Default(std::nullopt);
if (!PossiblyInvalidVal) {
if (Diags)
@@ -1062,7 +1199,7 @@ static void initOption(AnalyzerOptions::ConfigTable &Config,
else
OptionField = DefaultVal;
} else
- OptionField = PossiblyInvalidVal.getValue();
+ OptionField = *PossiblyInvalidVal;
}
static void initOption(AnalyzerOptions::ConfigTable &Config,
@@ -1081,25 +1218,22 @@ static void initOption(AnalyzerOptions::ConfigTable &Config,
static void parseAnalyzerConfigs(AnalyzerOptions &AnOpts,
DiagnosticsEngine *Diags) {
// TODO: There's no need to store the entire configtable, it'd be plenty
- // enough tostore checker options.
+ // enough to store checker options.
#define ANALYZER_OPTION(TYPE, NAME, CMDFLAG, DESC, DEFAULT_VAL) \
initOption(AnOpts.Config, Diags, AnOpts.NAME, CMDFLAG, DEFAULT_VAL);
+#define ANALYZER_OPTION_DEPENDS_ON_USER_MODE(...)
+#include "clang/StaticAnalyzer/Core/AnalyzerOptions.def"
-#define ANALYZER_OPTION_DEPENDS_ON_USER_MODE(TYPE, NAME, CMDFLAG, DESC, \
- SHALLOW_VAL, DEEP_VAL) \
- switch (AnOpts.getUserMode()) { \
- case UMK_Shallow: \
- initOption(AnOpts.Config, Diags, AnOpts.NAME, CMDFLAG, SHALLOW_VAL); \
- break; \
- case UMK_Deep: \
- initOption(AnOpts.Config, Diags, AnOpts.NAME, CMDFLAG, DEEP_VAL); \
- break; \
- } \
+ assert(AnOpts.UserMode == "shallow" || AnOpts.UserMode == "deep");
+ const bool InShallowMode = AnOpts.UserMode == "shallow";
+#define ANALYZER_OPTION(...)
+#define ANALYZER_OPTION_DEPENDS_ON_USER_MODE(TYPE, NAME, CMDFLAG, DESC, \
+ SHALLOW_VAL, DEEP_VAL) \
+ initOption(AnOpts.Config, Diags, AnOpts.NAME, CMDFLAG, \
+ InShallowMode ? SHALLOW_VAL : DEEP_VAL);
#include "clang/StaticAnalyzer/Core/AnalyzerOptions.def"
-#undef ANALYZER_OPTION
-#undef ANALYZER_OPTION_DEPENDS_ON_USER_MODE
// At this point, AnalyzerOptions is configured. Let's validate some options.
@@ -1118,10 +1252,9 @@ static void parseAnalyzerConfigs(AnalyzerOptions &AnOpts,
for (const StringRef &CheckerOrPackage : CheckersAndPackages) {
if (Diags) {
bool IsChecker = CheckerOrPackage.contains('.');
- bool IsValidName =
- IsChecker
- ? llvm::find(Checkers, CheckerOrPackage) != Checkers.end()
- : llvm::find(Packages, CheckerOrPackage) != Packages.end();
+ bool IsValidName = IsChecker
+ ? llvm::is_contained(Checkers, CheckerOrPackage)
+ : llvm::is_contained(Packages, CheckerOrPackage);
if (!IsValidName)
Diags->Report(diag::err_unknown_analyzer_checker_or_package)
@@ -1151,16 +1284,15 @@ static void parseAnalyzerConfigs(AnalyzerOptions &AnOpts,
/// Generate a remark argument. This is an inverse of `ParseOptimizationRemark`.
static void
-GenerateOptimizationRemark(SmallVectorImpl<const char *> &Args,
- CompilerInvocation::StringAllocator SA,
- OptSpecifier OptEQ, StringRef Name,
+GenerateOptimizationRemark(ArgumentConsumer Consumer, OptSpecifier OptEQ,
+ StringRef Name,
const CodeGenOptions::OptRemark &Remark) {
if (Remark.hasValidPattern()) {
- GenerateArg(Args, OptEQ, Remark.Pattern, SA);
+ GenerateArg(Consumer, OptEQ, Remark.Pattern);
} else if (Remark.Kind == CodeGenOptions::RK_Enabled) {
- GenerateArg(Args, OPT_R_Joined, Name, SA);
+ GenerateArg(Consumer, OPT_R_Joined, Name);
} else if (Remark.Kind == CodeGenOptions::RK_Disabled) {
- GenerateArg(Args, OPT_R_Joined, StringRef("no-") + Name, SA);
+ GenerateArg(Consumer, OPT_R_Joined, StringRef("no-") + Name);
}
}
@@ -1172,8 +1304,9 @@ ParseOptimizationRemark(DiagnosticsEngine &Diags, ArgList &Args,
OptSpecifier OptEQ, StringRef Name) {
CodeGenOptions::OptRemark Result;
- auto InitializeResultPattern = [&Diags, &Args, &Result](const Arg *A) {
- Result.Pattern = A->getValue();
+ auto InitializeResultPattern = [&Diags, &Args, &Result](const Arg *A,
+ StringRef Pattern) {
+ Result.Pattern = Pattern.str();
std::string RegexError;
Result.Regex = std::make_shared<llvm::Regex>(Result.Pattern);
@@ -1198,19 +1331,23 @@ ParseOptimizationRemark(DiagnosticsEngine &Diags, ArgList &Args,
Result.Kind = CodeGenOptions::RK_Disabled;
else if (Value == "no-everything")
Result.Kind = CodeGenOptions::RK_DisabledEverything;
+ else
+ continue;
+
+ if (Result.Kind == CodeGenOptions::RK_Disabled ||
+ Result.Kind == CodeGenOptions::RK_DisabledEverything) {
+ Result.Pattern = "";
+ Result.Regex = nullptr;
+ } else {
+ InitializeResultPattern(A, ".*");
+ }
} else if (A->getOption().matches(OptEQ)) {
Result.Kind = CodeGenOptions::RK_WithPattern;
- if (!InitializeResultPattern(A))
+ if (!InitializeResultPattern(A, A->getValue()))
return CodeGenOptions::OptRemark();
}
}
- if (Result.Kind == CodeGenOptions::RK_Disabled ||
- Result.Kind == CodeGenOptions::RK_DisabledEverything) {
- Result.Pattern = "";
- Result.Regex = nullptr;
- }
-
return Result;
}
@@ -1279,22 +1416,29 @@ static std::string serializeXRayInstrumentationBundle(const XRayInstrSet &S) {
std::string Buffer;
llvm::raw_string_ostream OS(Buffer);
llvm::interleave(BundleParts, OS, [&OS](StringRef Part) { OS << Part; }, ",");
- return OS.str();
+ return Buffer;
}
// Set the profile kind using fprofile-instrument-use-path.
static void setPGOUseInstrumentor(CodeGenOptions &Opts,
- const Twine &ProfileName) {
- auto ReaderOrErr = llvm::IndexedInstrProfReader::create(ProfileName);
- // In error, return silently and let Clang PGOUse report the error message.
+ const Twine &ProfileName,
+ llvm::vfs::FileSystem &FS,
+ DiagnosticsEngine &Diags) {
+ auto ReaderOrErr = llvm::IndexedInstrProfReader::create(ProfileName, FS);
if (auto E = ReaderOrErr.takeError()) {
- llvm::consumeError(std::move(E));
- Opts.setProfileUse(CodeGenOptions::ProfileClangInstr);
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "Error in reading profile %0: %1");
+ llvm::handleAllErrors(std::move(E), [&](const llvm::ErrorInfoBase &EI) {
+ Diags.Report(DiagID) << ProfileName.str() << EI.message();
+ });
return;
}
std::unique_ptr<llvm::IndexedInstrProfReader> PGOReader =
std::move(ReaderOrErr.get());
- if (PGOReader->isIRLevelProfile()) {
+ // Currently memprof profiles are only added at the IR level. Mark the profile
+ // type as IR in that case as well and the subsequent matching needs to detect
+ // which is available (might be one or both).
+ if (PGOReader->isIRLevelProfile() || PGOReader->hasMemoryProfile()) {
if (PGOReader->hasCSIRLevelProfile())
Opts.setProfileUse(CodeGenOptions::ProfileCSIRInstr);
else
@@ -1303,87 +1447,82 @@ static void setPGOUseInstrumentor(CodeGenOptions &Opts,
Opts.setProfileUse(CodeGenOptions::ProfileClangInstr);
}
-void CompilerInvocation::GenerateCodeGenArgs(
- const CodeGenOptions &Opts, SmallVectorImpl<const char *> &Args,
- StringAllocator SA, const llvm::Triple &T, const std::string &OutputFile,
- const LangOptions *LangOpts) {
+void CompilerInvocationBase::GenerateCodeGenArgs(const CodeGenOptions &Opts,
+ ArgumentConsumer Consumer,
+ const llvm::Triple &T,
+ const std::string &OutputFile,
+ const LangOptions *LangOpts) {
const CodeGenOptions &CodeGenOpts = Opts;
if (Opts.OptimizationLevel == 0)
- GenerateArg(Args, OPT_O0, SA);
+ GenerateArg(Consumer, OPT_O0);
else
- GenerateArg(Args, OPT_O, Twine(Opts.OptimizationLevel), SA);
-
-#define CODEGEN_OPTION_WITH_MARSHALLING( \
- PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
- HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
- DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
- MERGER, EXTRACTOR, TABLE_INDEX) \
- GENERATE_OPTION_WITH_MARSHALLING( \
- Args, SA, KIND, FLAGS, SPELLING, ALWAYS_EMIT, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, DENORMALIZER, EXTRACTOR, TABLE_INDEX)
+ GenerateArg(Consumer, OPT_O, Twine(Opts.OptimizationLevel));
+
+#define CODEGEN_OPTION_WITH_MARSHALLING(...) \
+ GENERATE_OPTION_WITH_MARSHALLING(Consumer, __VA_ARGS__)
#include "clang/Driver/Options.inc"
#undef CODEGEN_OPTION_WITH_MARSHALLING
if (Opts.OptimizationLevel > 0) {
if (Opts.Inlining == CodeGenOptions::NormalInlining)
- GenerateArg(Args, OPT_finline_functions, SA);
+ GenerateArg(Consumer, OPT_finline_functions);
else if (Opts.Inlining == CodeGenOptions::OnlyHintInlining)
- GenerateArg(Args, OPT_finline_hint_functions, SA);
+ GenerateArg(Consumer, OPT_finline_hint_functions);
else if (Opts.Inlining == CodeGenOptions::OnlyAlwaysInlining)
- GenerateArg(Args, OPT_fno_inline, SA);
+ GenerateArg(Consumer, OPT_fno_inline);
}
if (Opts.DirectAccessExternalData && LangOpts->PICLevel != 0)
- GenerateArg(Args, OPT_fdirect_access_external_data, SA);
+ GenerateArg(Consumer, OPT_fdirect_access_external_data);
else if (!Opts.DirectAccessExternalData && LangOpts->PICLevel == 0)
- GenerateArg(Args, OPT_fno_direct_access_external_data, SA);
+ GenerateArg(Consumer, OPT_fno_direct_access_external_data);
- Optional<StringRef> DebugInfoVal;
+ std::optional<StringRef> DebugInfoVal;
switch (Opts.DebugInfo) {
- case codegenoptions::DebugLineTablesOnly:
+ case llvm::codegenoptions::DebugLineTablesOnly:
DebugInfoVal = "line-tables-only";
break;
- case codegenoptions::DebugDirectivesOnly:
+ case llvm::codegenoptions::DebugDirectivesOnly:
DebugInfoVal = "line-directives-only";
break;
- case codegenoptions::DebugInfoConstructor:
+ case llvm::codegenoptions::DebugInfoConstructor:
DebugInfoVal = "constructor";
break;
- case codegenoptions::LimitedDebugInfo:
+ case llvm::codegenoptions::LimitedDebugInfo:
DebugInfoVal = "limited";
break;
- case codegenoptions::FullDebugInfo:
+ case llvm::codegenoptions::FullDebugInfo:
DebugInfoVal = "standalone";
break;
- case codegenoptions::UnusedTypeInfo:
+ case llvm::codegenoptions::UnusedTypeInfo:
DebugInfoVal = "unused-types";
break;
- case codegenoptions::NoDebugInfo: // default value
- DebugInfoVal = None;
+ case llvm::codegenoptions::NoDebugInfo: // default value
+ DebugInfoVal = std::nullopt;
break;
- case codegenoptions::LocTrackingOnly: // implied value
- DebugInfoVal = None;
+ case llvm::codegenoptions::LocTrackingOnly: // implied value
+ DebugInfoVal = std::nullopt;
break;
}
if (DebugInfoVal)
- GenerateArg(Args, OPT_debug_info_kind_EQ, *DebugInfoVal, SA);
+ GenerateArg(Consumer, OPT_debug_info_kind_EQ, *DebugInfoVal);
for (const auto &Prefix : Opts.DebugPrefixMap)
- GenerateArg(Args, OPT_fdebug_prefix_map_EQ,
- Prefix.first + "=" + Prefix.second, SA);
+ GenerateArg(Consumer, OPT_fdebug_prefix_map_EQ,
+ Prefix.first + "=" + Prefix.second);
for (const auto &Prefix : Opts.CoveragePrefixMap)
- GenerateArg(Args, OPT_fcoverage_prefix_map_EQ,
- Prefix.first + "=" + Prefix.second, SA);
+ GenerateArg(Consumer, OPT_fcoverage_prefix_map_EQ,
+ Prefix.first + "=" + Prefix.second);
if (Opts.NewStructPathTBAA)
- GenerateArg(Args, OPT_new_struct_path_tbaa, SA);
+ GenerateArg(Consumer, OPT_new_struct_path_tbaa);
if (Opts.OptimizeSize == 1)
- GenerateArg(Args, OPT_O, "s", SA);
+ GenerateArg(Consumer, OPT_O, "s");
else if (Opts.OptimizeSize == 2)
- GenerateArg(Args, OPT_O, "z", SA);
+ GenerateArg(Consumer, OPT_O, "z");
// SimplifyLibCalls is set only in the absence of -fno-builtin and
// -ffreestanding. We'll consider that when generating them.
@@ -1391,58 +1530,65 @@ void CompilerInvocation::GenerateCodeGenArgs(
// NoBuiltinFuncs are generated by LangOptions.
if (Opts.UnrollLoops && Opts.OptimizationLevel <= 1)
- GenerateArg(Args, OPT_funroll_loops, SA);
+ GenerateArg(Consumer, OPT_funroll_loops);
else if (!Opts.UnrollLoops && Opts.OptimizationLevel > 1)
- GenerateArg(Args, OPT_fno_unroll_loops, SA);
+ GenerateArg(Consumer, OPT_fno_unroll_loops);
if (!Opts.BinutilsVersion.empty())
- GenerateArg(Args, OPT_fbinutils_version_EQ, Opts.BinutilsVersion, SA);
+ GenerateArg(Consumer, OPT_fbinutils_version_EQ, Opts.BinutilsVersion);
if (Opts.DebugNameTable ==
static_cast<unsigned>(llvm::DICompileUnit::DebugNameTableKind::GNU))
- GenerateArg(Args, OPT_ggnu_pubnames, SA);
+ GenerateArg(Consumer, OPT_ggnu_pubnames);
else if (Opts.DebugNameTable ==
static_cast<unsigned>(
llvm::DICompileUnit::DebugNameTableKind::Default))
- GenerateArg(Args, OPT_gpubnames, SA);
+ GenerateArg(Consumer, OPT_gpubnames);
+ auto TNK = Opts.getDebugSimpleTemplateNames();
+ if (TNK != llvm::codegenoptions::DebugTemplateNamesKind::Full) {
+ if (TNK == llvm::codegenoptions::DebugTemplateNamesKind::Simple)
+ GenerateArg(Consumer, OPT_gsimple_template_names_EQ, "simple");
+ else if (TNK == llvm::codegenoptions::DebugTemplateNamesKind::Mangled)
+ GenerateArg(Consumer, OPT_gsimple_template_names_EQ, "mangled");
+ }
// ProfileInstrumentUsePath is marshalled automatically, no need to generate
// it or PGOUseInstrumentor.
if (Opts.TimePasses) {
if (Opts.TimePassesPerRun)
- GenerateArg(Args, OPT_ftime_report_EQ, "per-pass-run", SA);
+ GenerateArg(Consumer, OPT_ftime_report_EQ, "per-pass-run");
else
- GenerateArg(Args, OPT_ftime_report, SA);
+ GenerateArg(Consumer, OPT_ftime_report);
}
if (Opts.PrepareForLTO && !Opts.PrepareForThinLTO)
- GenerateArg(Args, OPT_flto, SA);
+ GenerateArg(Consumer, OPT_flto_EQ, "full");
if (Opts.PrepareForThinLTO)
- GenerateArg(Args, OPT_flto_EQ, "thin", SA);
+ GenerateArg(Consumer, OPT_flto_EQ, "thin");
if (!Opts.ThinLTOIndexFile.empty())
- GenerateArg(Args, OPT_fthinlto_index_EQ, Opts.ThinLTOIndexFile, SA);
+ GenerateArg(Consumer, OPT_fthinlto_index_EQ, Opts.ThinLTOIndexFile);
if (Opts.SaveTempsFilePrefix == OutputFile)
- GenerateArg(Args, OPT_save_temps_EQ, "obj", SA);
+ GenerateArg(Consumer, OPT_save_temps_EQ, "obj");
StringRef MemProfileBasename("memprof.profraw");
if (!Opts.MemoryProfileOutput.empty()) {
if (Opts.MemoryProfileOutput == MemProfileBasename) {
- GenerateArg(Args, OPT_fmemory_profile, SA);
+ GenerateArg(Consumer, OPT_fmemory_profile);
} else {
size_t ArgLength =
Opts.MemoryProfileOutput.size() - MemProfileBasename.size();
- GenerateArg(Args, OPT_fmemory_profile_EQ,
- Opts.MemoryProfileOutput.substr(0, ArgLength), SA);
+ GenerateArg(Consumer, OPT_fmemory_profile_EQ,
+ Opts.MemoryProfileOutput.substr(0, ArgLength));
}
}
if (memcmp(Opts.CoverageVersion, "408*", 4) != 0)
- GenerateArg(Args, OPT_coverage_version_EQ,
- StringRef(Opts.CoverageVersion, 4), SA);
+ GenerateArg(Consumer, OPT_coverage_version_EQ,
+ StringRef(Opts.CoverageVersion, 4));
// TODO: Check if we need to generate arguments stored in CmdArgs. (Namely
// '-fembed_bitcode', which does not map to any CompilerInvocation field and
@@ -1452,90 +1598,94 @@ void CompilerInvocation::GenerateCodeGenArgs(
std::string InstrBundle =
serializeXRayInstrumentationBundle(Opts.XRayInstrumentationBundle);
if (!InstrBundle.empty())
- GenerateArg(Args, OPT_fxray_instrumentation_bundle, InstrBundle, SA);
+ GenerateArg(Consumer, OPT_fxray_instrumentation_bundle, InstrBundle);
}
if (Opts.CFProtectionReturn && Opts.CFProtectionBranch)
- GenerateArg(Args, OPT_fcf_protection_EQ, "full", SA);
+ GenerateArg(Consumer, OPT_fcf_protection_EQ, "full");
else if (Opts.CFProtectionReturn)
- GenerateArg(Args, OPT_fcf_protection_EQ, "return", SA);
+ GenerateArg(Consumer, OPT_fcf_protection_EQ, "return");
else if (Opts.CFProtectionBranch)
- GenerateArg(Args, OPT_fcf_protection_EQ, "branch", SA);
+ GenerateArg(Consumer, OPT_fcf_protection_EQ, "branch");
+
+ if (Opts.FunctionReturnThunks)
+ GenerateArg(Consumer, OPT_mfunction_return_EQ, "thunk-extern");
for (const auto &F : Opts.LinkBitcodeFiles) {
bool Builtint = F.LinkFlags == llvm::Linker::Flags::LinkOnlyNeeded &&
F.PropagateAttrs && F.Internalize;
- GenerateArg(Args,
+ GenerateArg(Consumer,
Builtint ? OPT_mlink_builtin_bitcode : OPT_mlink_bitcode_file,
- F.Filename, SA);
+ F.Filename);
}
- // TODO: Consider removing marshalling annotations from f[no_]emulated_tls.
- // That would make it easy to generate the option only **once** if it was
- // explicitly set to non-default value.
- if (Opts.ExplicitEmulatedTLS) {
- GenerateArg(
- Args, Opts.EmulatedTLS ? OPT_femulated_tls : OPT_fno_emulated_tls, SA);
- }
+ if (Opts.EmulatedTLS)
+ GenerateArg(Consumer, OPT_femulated_tls);
if (Opts.FPDenormalMode != llvm::DenormalMode::getIEEE())
- GenerateArg(Args, OPT_fdenormal_fp_math_EQ, Opts.FPDenormalMode.str(), SA);
+ GenerateArg(Consumer, OPT_fdenormal_fp_math_EQ, Opts.FPDenormalMode.str());
- if (Opts.FP32DenormalMode != llvm::DenormalMode::getIEEE())
- GenerateArg(Args, OPT_fdenormal_fp_math_f32_EQ, Opts.FP32DenormalMode.str(),
- SA);
+ if ((Opts.FPDenormalMode != Opts.FP32DenormalMode) ||
+ (Opts.FP32DenormalMode != llvm::DenormalMode::getIEEE()))
+ GenerateArg(Consumer, OPT_fdenormal_fp_math_f32_EQ,
+ Opts.FP32DenormalMode.str());
if (Opts.StructReturnConvention == CodeGenOptions::SRCK_OnStack) {
OptSpecifier Opt =
T.isPPC32() ? OPT_maix_struct_return : OPT_fpcc_struct_return;
- GenerateArg(Args, Opt, SA);
+ GenerateArg(Consumer, Opt);
} else if (Opts.StructReturnConvention == CodeGenOptions::SRCK_InRegs) {
OptSpecifier Opt =
T.isPPC32() ? OPT_msvr4_struct_return : OPT_freg_struct_return;
- GenerateArg(Args, Opt, SA);
+ GenerateArg(Consumer, Opt);
}
if (Opts.EnableAIXExtendedAltivecABI)
- GenerateArg(Args, OPT_mabi_EQ_vec_extabi, SA);
+ GenerateArg(Consumer, OPT_mabi_EQ_vec_extabi);
+
+ if (Opts.XCOFFReadOnlyPointers)
+ GenerateArg(Consumer, OPT_mxcoff_roptr);
if (!Opts.OptRecordPasses.empty())
- GenerateArg(Args, OPT_opt_record_passes, Opts.OptRecordPasses, SA);
+ GenerateArg(Consumer, OPT_opt_record_passes, Opts.OptRecordPasses);
if (!Opts.OptRecordFormat.empty())
- GenerateArg(Args, OPT_opt_record_format, Opts.OptRecordFormat, SA);
+ GenerateArg(Consumer, OPT_opt_record_format, Opts.OptRecordFormat);
- GenerateOptimizationRemark(Args, SA, OPT_Rpass_EQ, "pass",
+ GenerateOptimizationRemark(Consumer, OPT_Rpass_EQ, "pass",
Opts.OptimizationRemark);
- GenerateOptimizationRemark(Args, SA, OPT_Rpass_missed_EQ, "pass-missed",
+ GenerateOptimizationRemark(Consumer, OPT_Rpass_missed_EQ, "pass-missed",
Opts.OptimizationRemarkMissed);
- GenerateOptimizationRemark(Args, SA, OPT_Rpass_analysis_EQ, "pass-analysis",
+ GenerateOptimizationRemark(Consumer, OPT_Rpass_analysis_EQ, "pass-analysis",
Opts.OptimizationRemarkAnalysis);
- GenerateArg(Args, OPT_fdiagnostics_hotness_threshold_EQ,
+ GenerateArg(Consumer, OPT_fdiagnostics_hotness_threshold_EQ,
Opts.DiagnosticsHotnessThreshold
? Twine(*Opts.DiagnosticsHotnessThreshold)
- : "auto",
- SA);
+ : "auto");
+
+ GenerateArg(Consumer, OPT_fdiagnostics_misexpect_tolerance_EQ,
+ Twine(*Opts.DiagnosticsMisExpectTolerance));
for (StringRef Sanitizer : serializeSanitizerKinds(Opts.SanitizeRecover))
- GenerateArg(Args, OPT_fsanitize_recover_EQ, Sanitizer, SA);
+ GenerateArg(Consumer, OPT_fsanitize_recover_EQ, Sanitizer);
for (StringRef Sanitizer : serializeSanitizerKinds(Opts.SanitizeTrap))
- GenerateArg(Args, OPT_fsanitize_trap_EQ, Sanitizer, SA);
+ GenerateArg(Consumer, OPT_fsanitize_trap_EQ, Sanitizer);
if (!Opts.EmitVersionIdentMetadata)
- GenerateArg(Args, OPT_Qn, SA);
+ GenerateArg(Consumer, OPT_Qn);
switch (Opts.FiniteLoops) {
case CodeGenOptions::FiniteLoopsKind::Language:
break;
case CodeGenOptions::FiniteLoopsKind::Always:
- GenerateArg(Args, OPT_ffinite_loops, SA);
+ GenerateArg(Consumer, OPT_ffinite_loops);
break;
case CodeGenOptions::FiniteLoopsKind::Never:
- GenerateArg(Args, OPT_fno_finite_loops, SA);
+ GenerateArg(Consumer, OPT_fno_finite_loops);
break;
}
}
@@ -1567,36 +1717,29 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
// variable name and type.
const LangOptions *LangOpts = &LangOptsRef;
-#define CODEGEN_OPTION_WITH_MARSHALLING( \
- PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
- HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
- DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
- MERGER, EXTRACTOR, TABLE_INDEX) \
- PARSE_OPTION_WITH_MARSHALLING( \
- Args, Diags, ID, FLAGS, PARAM, SHOULD_PARSE, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, MERGER, TABLE_INDEX)
+#define CODEGEN_OPTION_WITH_MARSHALLING(...) \
+ PARSE_OPTION_WITH_MARSHALLING(Args, Diags, __VA_ARGS__)
#include "clang/Driver/Options.inc"
#undef CODEGEN_OPTION_WITH_MARSHALLING
// At O0 we want to fully disable inlining outside of cases marked with
// 'alwaysinline' that are required for correctness.
- Opts.setInlining((Opts.OptimizationLevel == 0)
- ? CodeGenOptions::OnlyAlwaysInlining
- : CodeGenOptions::NormalInlining);
- // Explicit inlining flags can disable some or all inlining even at
- // optimization levels above zero.
- if (Arg *InlineArg = Args.getLastArg(
- options::OPT_finline_functions, options::OPT_finline_hint_functions,
- options::OPT_fno_inline_functions, options::OPT_fno_inline)) {
- if (Opts.OptimizationLevel > 0) {
- const Option &InlineOpt = InlineArg->getOption();
- if (InlineOpt.matches(options::OPT_finline_functions))
- Opts.setInlining(CodeGenOptions::NormalInlining);
- else if (InlineOpt.matches(options::OPT_finline_hint_functions))
- Opts.setInlining(CodeGenOptions::OnlyHintInlining);
- else
- Opts.setInlining(CodeGenOptions::OnlyAlwaysInlining);
- }
+ if (Opts.OptimizationLevel == 0) {
+ Opts.setInlining(CodeGenOptions::OnlyAlwaysInlining);
+ } else if (const Arg *A = Args.getLastArg(options::OPT_finline_functions,
+ options::OPT_finline_hint_functions,
+ options::OPT_fno_inline_functions,
+ options::OPT_fno_inline)) {
+ // Explicit inlining flags can disable some or all inlining even at
+ // optimization levels above zero.
+ if (A->getOption().matches(options::OPT_finline_functions))
+ Opts.setInlining(CodeGenOptions::NormalInlining);
+ else if (A->getOption().matches(options::OPT_finline_hint_functions))
+ Opts.setInlining(CodeGenOptions::OnlyHintInlining);
+ else
+ Opts.setInlining(CodeGenOptions::OnlyAlwaysInlining);
+ } else {
+ Opts.setInlining(CodeGenOptions::NormalInlining);
}
// PIC defaults to -fno-direct-access-external-data while non-PIC defaults to
@@ -1609,18 +1752,19 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
if (Arg *A = Args.getLastArg(OPT_debug_info_kind_EQ)) {
unsigned Val =
llvm::StringSwitch<unsigned>(A->getValue())
- .Case("line-tables-only", codegenoptions::DebugLineTablesOnly)
- .Case("line-directives-only", codegenoptions::DebugDirectivesOnly)
- .Case("constructor", codegenoptions::DebugInfoConstructor)
- .Case("limited", codegenoptions::LimitedDebugInfo)
- .Case("standalone", codegenoptions::FullDebugInfo)
- .Case("unused-types", codegenoptions::UnusedTypeInfo)
+ .Case("line-tables-only", llvm::codegenoptions::DebugLineTablesOnly)
+ .Case("line-directives-only",
+ llvm::codegenoptions::DebugDirectivesOnly)
+ .Case("constructor", llvm::codegenoptions::DebugInfoConstructor)
+ .Case("limited", llvm::codegenoptions::LimitedDebugInfo)
+ .Case("standalone", llvm::codegenoptions::FullDebugInfo)
+ .Case("unused-types", llvm::codegenoptions::UnusedTypeInfo)
.Default(~0U);
if (Val == ~0U)
Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args)
<< A->getValue();
else
- Opts.setDebugInfo(static_cast<codegenoptions::DebugInfoKind>(Val));
+ Opts.setDebugInfo(static_cast<llvm::codegenoptions::DebugInfoKind>(Val));
}
// If -fuse-ctor-homing is set and limited debug info is already on, then use
@@ -1628,23 +1772,21 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
if (const Arg *A =
Args.getLastArg(OPT_fuse_ctor_homing, OPT_fno_use_ctor_homing)) {
if (A->getOption().matches(OPT_fuse_ctor_homing) &&
- Opts.getDebugInfo() == codegenoptions::LimitedDebugInfo)
- Opts.setDebugInfo(codegenoptions::DebugInfoConstructor);
+ Opts.getDebugInfo() == llvm::codegenoptions::LimitedDebugInfo)
+ Opts.setDebugInfo(llvm::codegenoptions::DebugInfoConstructor);
if (A->getOption().matches(OPT_fno_use_ctor_homing) &&
- Opts.getDebugInfo() == codegenoptions::DebugInfoConstructor)
- Opts.setDebugInfo(codegenoptions::LimitedDebugInfo);
+ Opts.getDebugInfo() == llvm::codegenoptions::DebugInfoConstructor)
+ Opts.setDebugInfo(llvm::codegenoptions::LimitedDebugInfo);
}
for (const auto &Arg : Args.getAllArgValues(OPT_fdebug_prefix_map_EQ)) {
auto Split = StringRef(Arg).split('=');
- Opts.DebugPrefixMap.insert(
- {std::string(Split.first), std::string(Split.second)});
+ Opts.DebugPrefixMap.emplace_back(Split.first, Split.second);
}
for (const auto &Arg : Args.getAllArgValues(OPT_fcoverage_prefix_map_EQ)) {
auto Split = StringRef(Arg).split('=');
- Opts.CoveragePrefixMap.insert(
- {std::string(Split.first), std::string(Split.second)});
+ Opts.CoveragePrefixMap.emplace_back(Split.first, Split.second);
}
const llvm::Triple::ArchType DebugEntryValueArchs[] = {
@@ -1680,19 +1822,22 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
: Args.hasArg(OPT_gpubnames)
? llvm::DICompileUnit::DebugNameTableKind::Default
: llvm::DICompileUnit::DebugNameTableKind::None);
-
- if (!Opts.ProfileInstrumentUsePath.empty())
- setPGOUseInstrumentor(Opts, Opts.ProfileInstrumentUsePath);
+ if (const Arg *A = Args.getLastArg(OPT_gsimple_template_names_EQ)) {
+ StringRef Value = A->getValue();
+ if (Value != "simple" && Value != "mangled")
+ Diags.Report(diag::err_drv_unsupported_option_argument)
+ << A->getSpelling() << A->getValue();
+ Opts.setDebugSimpleTemplateNames(
+ StringRef(A->getValue()) == "simple"
+ ? llvm::codegenoptions::DebugTemplateNamesKind::Simple
+ : llvm::codegenoptions::DebugTemplateNamesKind::Mangled);
+ }
if (const Arg *A = Args.getLastArg(OPT_ftime_report, OPT_ftime_report_EQ)) {
Opts.TimePasses = true;
// -ftime-report= is only for new pass manager.
if (A->getOption().getID() == OPT_ftime_report_EQ) {
- if (Opts.LegacyPassManager)
- Diags.Report(diag::err_drv_argument_only_allowed_with)
- << A->getAsString(Args) << "-fno-legacy-pass-manager";
-
StringRef Val = A->getValue();
if (Val == "per-pass")
Opts.TimePassesPerRun = false;
@@ -1704,14 +1849,17 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
}
}
- Opts.PrepareForLTO = Args.hasArg(OPT_flto, OPT_flto_EQ);
+ Opts.PrepareForLTO = false;
Opts.PrepareForThinLTO = false;
if (Arg *A = Args.getLastArg(OPT_flto_EQ)) {
+ Opts.PrepareForLTO = true;
StringRef S = A->getValue();
if (S == "thin")
Opts.PrepareForThinLTO = true;
else if (S != "full")
Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << S;
+ if (Args.hasArg(OPT_funified_lto))
+ Opts.PrepareForThinLTO = true;
}
if (Arg *A = Args.getLastArg(OPT_fthinlto_index_EQ)) {
if (IK.getLanguage() != Language::LLVM_IR)
@@ -1737,7 +1885,7 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
Opts.MemoryProfileOutput = MemProfileBasename;
memcpy(Opts.CoverageVersion, "408*", 4);
- if (Opts.EmitGcovArcs || Opts.EmitGcovNotes) {
+ if (Opts.CoverageNotesFile.size() || Opts.CoverageDataFile.size()) {
if (Args.hasArg(OPT_coverage_version_EQ)) {
StringRef CoverageVersion = Args.getLastArgValue(OPT_coverage_version_EQ);
if (CoverageVersion.size() != 4) {
@@ -1792,6 +1940,27 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << Name;
}
+ if (const Arg *A = Args.getLastArg(OPT_mfunction_return_EQ)) {
+ auto Val = llvm::StringSwitch<llvm::FunctionReturnThunksKind>(A->getValue())
+ .Case("keep", llvm::FunctionReturnThunksKind::Keep)
+ .Case("thunk-extern", llvm::FunctionReturnThunksKind::Extern)
+ .Default(llvm::FunctionReturnThunksKind::Invalid);
+ // SystemZ might want to add support for "expolines."
+ if (!T.isX86())
+ Diags.Report(diag::err_drv_argument_not_allowed_with)
+ << A->getSpelling() << T.getTriple();
+ else if (Val == llvm::FunctionReturnThunksKind::Invalid)
+ Diags.Report(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << A->getValue();
+ else if (Val == llvm::FunctionReturnThunksKind::Extern &&
+ Args.getLastArgValue(OPT_mcmodel_EQ).equals("large"))
+ Diags.Report(diag::err_drv_argument_not_allowed_with)
+ << A->getAsString(Args)
+ << Args.getLastArg(OPT_mcmodel_EQ)->getAsString(Args);
+ else
+ Opts.FunctionReturnThunks = static_cast<unsigned>(Val);
+ }
+
for (auto *A :
Args.filtered(OPT_mlink_bitcode_file, OPT_mlink_builtin_bitcode)) {
CodeGenOptions::BitcodeFileToLink F;
@@ -1806,15 +1975,10 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
Opts.LinkBitcodeFiles.push_back(F);
}
- if (Args.getLastArg(OPT_femulated_tls) ||
- Args.getLastArg(OPT_fno_emulated_tls)) {
- Opts.ExplicitEmulatedTLS = true;
- }
-
if (Arg *A = Args.getLastArg(OPT_ftlsmodel_EQ)) {
if (T.isOSAIX()) {
StringRef Name = A->getValue();
- if (Name != "global-dynamic")
+ if (Name == "local-dynamic")
Diags.Report(diag::err_aix_unsupported_tls_model) << Name;
}
}
@@ -1822,6 +1986,7 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
if (Arg *A = Args.getLastArg(OPT_fdenormal_fp_math_EQ)) {
StringRef Val = A->getValue();
Opts.FPDenormalMode = llvm::parseDenormalFPAttribute(Val);
+ Opts.FP32DenormalMode = Opts.FPDenormalMode;
if (!Opts.FPDenormalMode.isValid())
Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << Val;
}
@@ -1855,14 +2020,29 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
}
}
- if (Arg *A =
- Args.getLastArg(OPT_mabi_EQ_vec_default, OPT_mabi_EQ_vec_extabi)) {
+ if (Arg *A = Args.getLastArg(OPT_mxcoff_roptr)) {
if (!T.isOSAIX())
Diags.Report(diag::err_drv_unsupported_opt_for_target)
<< A->getSpelling() << T.str();
- const Option &O = A->getOption();
- Opts.EnableAIXExtendedAltivecABI = O.matches(OPT_mabi_EQ_vec_extabi);
+ // Since the storage mapping class is specified per csect,
+ // without using data sections, it is less effective to use read-only
+ // pointers. Using read-only pointers may cause other RO variables in the
+ // same csect to become RW when the linker acts upon `-bforceimprw`;
+ // therefore, we require that separate data sections
+ // are used when `-mxcoff-roptr` is in effect. We respect the setting of
+ // data-sections since we have not found reasons to do otherwise that
+ // overcome the user surprise of not respecting the setting.
+ if (!Args.hasFlag(OPT_fdata_sections, OPT_fno_data_sections, false))
+ Diags.Report(diag::err_roptr_requires_data_sections);
+
+ Opts.XCOFFReadOnlyPointers = true;
+ }
+
+ if (Arg *A = Args.getLastArg(OPT_mabi_EQ_quadword_atomics)) {
+ if (!T.isOSAIX() || T.isPPC32())
+ Diags.Report(diag::err_drv_unsupported_opt_for_target)
+ << A->getSpelling() << T.str();
}
bool NeedLocTracking = false;
@@ -1894,8 +2074,8 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
Opts.OptimizationRemarkAnalysis.hasValidPattern();
bool UsingSampleProfile = !Opts.SampleProfileFile.empty();
- bool UsingProfile = UsingSampleProfile ||
- (Opts.getProfileUse() != CodeGenOptions::ProfileNone);
+ bool UsingProfile =
+ UsingSampleProfile || !Opts.ProfileInstrumentUsePath.empty();
if (Opts.DiagnosticsWithHotness && !UsingProfile &&
// An IR file will contain PGO as metadata
@@ -1914,14 +2094,31 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
<< "-fdiagnostics-hotness-threshold=";
} else {
Opts.DiagnosticsHotnessThreshold = *ResultOrErr;
- if ((!Opts.DiagnosticsHotnessThreshold.hasValue() ||
- Opts.DiagnosticsHotnessThreshold.getValue() > 0) &&
+ if ((!Opts.DiagnosticsHotnessThreshold ||
+ *Opts.DiagnosticsHotnessThreshold > 0) &&
!UsingProfile)
Diags.Report(diag::warn_drv_diagnostics_hotness_requires_pgo)
<< "-fdiagnostics-hotness-threshold=";
}
}
+ if (auto *arg =
+ Args.getLastArg(options::OPT_fdiagnostics_misexpect_tolerance_EQ)) {
+ auto ResultOrErr = parseToleranceOption(arg->getValue());
+
+ if (!ResultOrErr) {
+ Diags.Report(diag::err_drv_invalid_diagnotics_misexpect_tolerance)
+ << "-fdiagnostics-misexpect-tolerance=";
+ } else {
+ Opts.DiagnosticsMisExpectTolerance = *ResultOrErr;
+ if ((!Opts.DiagnosticsMisExpectTolerance ||
+ *Opts.DiagnosticsMisExpectTolerance > 0) &&
+ !UsingProfile)
+ Diags.Report(diag::warn_drv_diagnostics_misexpect_requires_pgo)
+ << "-fdiagnostics-misexpect-tolerance=";
+ }
+ }
+
// If the user requested to use a sample profile for PGO, then the
// backend will need to track source location information so the profile
// can be incorporated into the IR.
@@ -1933,8 +2130,9 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
// If the user requested a flag that requires source locations available in
// the backend, make sure that the backend tracks source location information.
- if (NeedLocTracking && Opts.getDebugInfo() == codegenoptions::NoDebugInfo)
- Opts.setDebugInfo(codegenoptions::LocTrackingOnly);
+ if (NeedLocTracking &&
+ Opts.getDebugInfo() == llvm::codegenoptions::NoDebugInfo)
+ Opts.setDebugInfo(llvm::codegenoptions::LocTrackingOnly);
// Parse -fsanitize-recover= arguments.
// FIXME: Report unrecoverable sanitizers incorrectly specified here.
@@ -1952,32 +2150,24 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
else if (Args.hasArg(options::OPT_fno_finite_loops))
Opts.FiniteLoops = CodeGenOptions::FiniteLoopsKind::Never;
- Opts.EmitIEEENaNCompliantInsts =
- Args.hasFlag(options::OPT_mamdgpu_ieee, options::OPT_mno_amdgpu_ieee);
+ Opts.EmitIEEENaNCompliantInsts = Args.hasFlag(
+ options::OPT_mamdgpu_ieee, options::OPT_mno_amdgpu_ieee, true);
if (!Opts.EmitIEEENaNCompliantInsts && !LangOptsRef.NoHonorNaNs)
Diags.Report(diag::err_drv_amdgpu_ieee_without_no_honor_nans);
return Diags.getNumErrors() == NumErrorsBefore;
}
-static void
-GenerateDependencyOutputArgs(const DependencyOutputOptions &Opts,
- SmallVectorImpl<const char *> &Args,
- CompilerInvocation::StringAllocator SA) {
+static void GenerateDependencyOutputArgs(const DependencyOutputOptions &Opts,
+ ArgumentConsumer Consumer) {
const DependencyOutputOptions &DependencyOutputOpts = Opts;
-#define DEPENDENCY_OUTPUT_OPTION_WITH_MARSHALLING( \
- PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
- HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
- DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
- MERGER, EXTRACTOR, TABLE_INDEX) \
- GENERATE_OPTION_WITH_MARSHALLING( \
- Args, SA, KIND, FLAGS, SPELLING, ALWAYS_EMIT, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, DENORMALIZER, EXTRACTOR, TABLE_INDEX)
+#define DEPENDENCY_OUTPUT_OPTION_WITH_MARSHALLING(...) \
+ GENERATE_OPTION_WITH_MARSHALLING(Consumer, __VA_ARGS__)
#include "clang/Driver/Options.inc"
#undef DEPENDENCY_OUTPUT_OPTION_WITH_MARSHALLING
if (Opts.ShowIncludesDest != ShowIncludesDestination::None)
- GenerateArg(Args, OPT_show_includes, SA);
+ GenerateArg(Consumer, OPT_show_includes);
for (const auto &Dep : Opts.ExtraDeps) {
switch (Dep.second) {
@@ -1993,7 +2183,7 @@ GenerateDependencyOutputArgs(const DependencyOutputOptions &Opts,
// marshalling infrastructure.
continue;
case EDK_DepFileEntry:
- GenerateArg(Args, OPT_fdepfile_entry, Dep.first, SA);
+ GenerateArg(Consumer, OPT_fdepfile_entry, Dep.first);
break;
}
}
@@ -2006,14 +2196,8 @@ static bool ParseDependencyOutputArgs(DependencyOutputOptions &Opts,
unsigned NumErrorsBefore = Diags.getNumErrors();
DependencyOutputOptions &DependencyOutputOpts = Opts;
-#define DEPENDENCY_OUTPUT_OPTION_WITH_MARSHALLING( \
- PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
- HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
- DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
- MERGER, EXTRACTOR, TABLE_INDEX) \
- PARSE_OPTION_WITH_MARSHALLING( \
- Args, Diags, ID, FLAGS, PARAM, SHOULD_PARSE, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, MERGER, TABLE_INDEX)
+#define DEPENDENCY_OUTPUT_OPTION_WITH_MARSHALLING(...) \
+ PARSE_OPTION_WITH_MARSHALLING(Args, Diags, __VA_ARGS__)
#include "clang/Driver/Options.inc"
#undef DEPENDENCY_OUTPUT_OPTION_WITH_MARSHALLING
@@ -2035,13 +2219,13 @@ static bool ParseDependencyOutputArgs(DependencyOutputOptions &Opts,
if (!Args.hasArg(OPT_fno_sanitize_ignorelist)) {
for (const auto *A : Args.filtered(OPT_fsanitize_ignorelist_EQ)) {
StringRef Val = A->getValue();
- if (Val.find('=') == StringRef::npos)
+ if (!Val.contains('='))
Opts.ExtraDeps.emplace_back(std::string(Val), EDK_SanitizeIgnorelist);
}
if (Opts.IncludeSystemHeaders) {
for (const auto *A : Args.filtered(OPT_fsanitize_system_ignorelist_EQ)) {
StringRef Val = A->getValue();
- if (Val.find('=') == StringRef::npos)
+ if (!Val.contains('='))
Opts.ExtraDeps.emplace_back(std::string(Val), EDK_SanitizeIgnorelist);
}
}
@@ -2058,10 +2242,20 @@ static bool ParseDependencyOutputArgs(DependencyOutputOptions &Opts,
// Only the -fmodule-file=<file> form.
for (const auto *A : Args.filtered(OPT_fmodule_file)) {
StringRef Val = A->getValue();
- if (Val.find('=') == StringRef::npos)
+ if (!Val.contains('='))
Opts.ExtraDeps.emplace_back(std::string(Val), EDK_ModuleFile);
}
+ // Check for invalid combinations of header-include-format
+ // and header-include-filtering.
+ if ((Opts.HeaderIncludeFormat == HIFMT_Textual &&
+ Opts.HeaderIncludeFiltering != HIFIL_None) ||
+ (Opts.HeaderIncludeFormat == HIFMT_JSON &&
+ Opts.HeaderIncludeFiltering != HIFIL_Only_Direct_System))
+ Diags.Report(diag::err_drv_print_header_env_var_combination_cc1)
+ << Args.getLastArg(OPT_header_include_format_EQ)->getValue()
+ << Args.getLastArg(OPT_header_include_filtering_EQ)->getValue();
+
return Diags.getNumErrors() == NumErrorsBefore;
}
@@ -2077,11 +2271,9 @@ static bool parseShowColorsArgs(const ArgList &Args, bool DefaultColor) {
} ShowColors = DefaultColor ? Colors_Auto : Colors_Off;
for (auto *A : Args) {
const Option &O = A->getOption();
- if (O.matches(options::OPT_fcolor_diagnostics) ||
- O.matches(options::OPT_fdiagnostics_color)) {
+ if (O.matches(options::OPT_fcolor_diagnostics)) {
ShowColors = Colors_On;
- } else if (O.matches(options::OPT_fno_color_diagnostics) ||
- O.matches(options::OPT_fno_diagnostics_color)) {
+ } else if (O.matches(options::OPT_fno_color_diagnostics)) {
ShowColors = Colors_Off;
} else if (O.matches(options::OPT_fdiagnostics_color_EQ)) {
StringRef Value(A->getValue());
@@ -2117,18 +2309,11 @@ static bool checkVerifyPrefixes(const std::vector<std::string> &VerifyPrefixes,
}
static void GenerateFileSystemArgs(const FileSystemOptions &Opts,
- SmallVectorImpl<const char *> &Args,
- CompilerInvocation::StringAllocator SA) {
+ ArgumentConsumer Consumer) {
const FileSystemOptions &FileSystemOpts = Opts;
-#define FILE_SYSTEM_OPTION_WITH_MARSHALLING( \
- PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
- HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
- DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
- MERGER, EXTRACTOR, TABLE_INDEX) \
- GENERATE_OPTION_WITH_MARSHALLING( \
- Args, SA, KIND, FLAGS, SPELLING, ALWAYS_EMIT, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, DENORMALIZER, EXTRACTOR, TABLE_INDEX)
+#define FILE_SYSTEM_OPTION_WITH_MARSHALLING(...) \
+ GENERATE_OPTION_WITH_MARSHALLING(Consumer, __VA_ARGS__)
#include "clang/Driver/Options.inc"
#undef FILE_SYSTEM_OPTION_WITH_MARSHALLING
}
@@ -2139,14 +2324,8 @@ static bool ParseFileSystemArgs(FileSystemOptions &Opts, const ArgList &Args,
FileSystemOptions &FileSystemOpts = Opts;
-#define FILE_SYSTEM_OPTION_WITH_MARSHALLING( \
- PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
- HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
- DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
- MERGER, EXTRACTOR, TABLE_INDEX) \
- PARSE_OPTION_WITH_MARSHALLING( \
- Args, Diags, ID, FLAGS, PARAM, SHOULD_PARSE, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, MERGER, TABLE_INDEX)
+#define FILE_SYSTEM_OPTION_WITH_MARSHALLING(...) \
+ PARSE_OPTION_WITH_MARSHALLING(Args, Diags, __VA_ARGS__)
#include "clang/Driver/Options.inc"
#undef FILE_SYSTEM_OPTION_WITH_MARSHALLING
@@ -2154,17 +2333,10 @@ static bool ParseFileSystemArgs(FileSystemOptions &Opts, const ArgList &Args,
}
static void GenerateMigratorArgs(const MigratorOptions &Opts,
- SmallVectorImpl<const char *> &Args,
- CompilerInvocation::StringAllocator SA) {
+ ArgumentConsumer Consumer) {
const MigratorOptions &MigratorOpts = Opts;
-#define MIGRATOR_OPTION_WITH_MARSHALLING( \
- PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
- HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
- DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
- MERGER, EXTRACTOR, TABLE_INDEX) \
- GENERATE_OPTION_WITH_MARSHALLING( \
- Args, SA, KIND, FLAGS, SPELLING, ALWAYS_EMIT, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, DENORMALIZER, EXTRACTOR, TABLE_INDEX)
+#define MIGRATOR_OPTION_WITH_MARSHALLING(...) \
+ GENERATE_OPTION_WITH_MARSHALLING(Consumer, __VA_ARGS__)
#include "clang/Driver/Options.inc"
#undef MIGRATOR_OPTION_WITH_MARSHALLING
}
@@ -2175,71 +2347,59 @@ static bool ParseMigratorArgs(MigratorOptions &Opts, const ArgList &Args,
MigratorOptions &MigratorOpts = Opts;
-#define MIGRATOR_OPTION_WITH_MARSHALLING( \
- PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
- HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
- DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
- MERGER, EXTRACTOR, TABLE_INDEX) \
- PARSE_OPTION_WITH_MARSHALLING( \
- Args, Diags, ID, FLAGS, PARAM, SHOULD_PARSE, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, MERGER, TABLE_INDEX)
+#define MIGRATOR_OPTION_WITH_MARSHALLING(...) \
+ PARSE_OPTION_WITH_MARSHALLING(Args, Diags, __VA_ARGS__)
#include "clang/Driver/Options.inc"
#undef MIGRATOR_OPTION_WITH_MARSHALLING
return Diags.getNumErrors() == NumErrorsBefore;
}
-void CompilerInvocation::GenerateDiagnosticArgs(
- const DiagnosticOptions &Opts, SmallVectorImpl<const char *> &Args,
- StringAllocator SA, bool DefaultDiagColor) {
+void CompilerInvocationBase::GenerateDiagnosticArgs(
+ const DiagnosticOptions &Opts, ArgumentConsumer Consumer,
+ bool DefaultDiagColor) {
const DiagnosticOptions *DiagnosticOpts = &Opts;
-#define DIAG_OPTION_WITH_MARSHALLING( \
- PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
- HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
- DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
- MERGER, EXTRACTOR, TABLE_INDEX) \
- GENERATE_OPTION_WITH_MARSHALLING( \
- Args, SA, KIND, FLAGS, SPELLING, ALWAYS_EMIT, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, DENORMALIZER, EXTRACTOR, TABLE_INDEX)
+#define DIAG_OPTION_WITH_MARSHALLING(...) \
+ GENERATE_OPTION_WITH_MARSHALLING(Consumer, __VA_ARGS__)
#include "clang/Driver/Options.inc"
#undef DIAG_OPTION_WITH_MARSHALLING
if (!Opts.DiagnosticSerializationFile.empty())
- GenerateArg(Args, OPT_diagnostic_serialized_file,
- Opts.DiagnosticSerializationFile, SA);
+ GenerateArg(Consumer, OPT_diagnostic_serialized_file,
+ Opts.DiagnosticSerializationFile);
if (Opts.ShowColors)
- GenerateArg(Args, OPT_fcolor_diagnostics, SA);
+ GenerateArg(Consumer, OPT_fcolor_diagnostics);
if (Opts.VerifyDiagnostics &&
llvm::is_contained(Opts.VerifyPrefixes, "expected"))
- GenerateArg(Args, OPT_verify, SA);
+ GenerateArg(Consumer, OPT_verify);
for (const auto &Prefix : Opts.VerifyPrefixes)
if (Prefix != "expected")
- GenerateArg(Args, OPT_verify_EQ, Prefix, SA);
+ GenerateArg(Consumer, OPT_verify_EQ, Prefix);
DiagnosticLevelMask VIU = Opts.getVerifyIgnoreUnexpected();
if (VIU == DiagnosticLevelMask::None) {
// This is the default, don't generate anything.
} else if (VIU == DiagnosticLevelMask::All) {
- GenerateArg(Args, OPT_verify_ignore_unexpected, SA);
+ GenerateArg(Consumer, OPT_verify_ignore_unexpected);
} else {
if (static_cast<unsigned>(VIU & DiagnosticLevelMask::Note) != 0)
- GenerateArg(Args, OPT_verify_ignore_unexpected_EQ, "note", SA);
+ GenerateArg(Consumer, OPT_verify_ignore_unexpected_EQ, "note");
if (static_cast<unsigned>(VIU & DiagnosticLevelMask::Remark) != 0)
- GenerateArg(Args, OPT_verify_ignore_unexpected_EQ, "remark", SA);
+ GenerateArg(Consumer, OPT_verify_ignore_unexpected_EQ, "remark");
if (static_cast<unsigned>(VIU & DiagnosticLevelMask::Warning) != 0)
- GenerateArg(Args, OPT_verify_ignore_unexpected_EQ, "warning", SA);
+ GenerateArg(Consumer, OPT_verify_ignore_unexpected_EQ, "warning");
if (static_cast<unsigned>(VIU & DiagnosticLevelMask::Error) != 0)
- GenerateArg(Args, OPT_verify_ignore_unexpected_EQ, "error", SA);
+ GenerateArg(Consumer, OPT_verify_ignore_unexpected_EQ, "error");
}
for (const auto &Warning : Opts.Warnings) {
// This option is automatically generated from UndefPrefixes.
if (Warning == "undef-prefix")
continue;
- Args.push_back(SA(StringRef("-W") + Warning));
+ Consumer(StringRef("-W") + Warning);
}
for (const auto &Remark : Opts.Remarks) {
@@ -2251,14 +2411,37 @@ void CompilerInvocation::GenerateDiagnosticArgs(
if (llvm::is_contained(IgnoredRemarks, Remark))
continue;
- Args.push_back(SA(StringRef("-R") + Remark));
+ Consumer(StringRef("-R") + Remark);
}
}
+std::unique_ptr<DiagnosticOptions>
+clang::CreateAndPopulateDiagOpts(ArrayRef<const char *> Argv) {
+ auto DiagOpts = std::make_unique<DiagnosticOptions>();
+ unsigned MissingArgIndex, MissingArgCount;
+ InputArgList Args = getDriverOptTable().ParseArgs(
+ Argv.slice(1), MissingArgIndex, MissingArgCount);
+
+ bool ShowColors = true;
+ if (std::optional<std::string> NoColor =
+ llvm::sys::Process::GetEnv("NO_COLOR");
+ NoColor && !NoColor->empty()) {
+ // If the user set the NO_COLOR environment variable, we'll honor that
+ // unless the command line overrides it.
+ ShowColors = false;
+ }
+
+ // We ignore MissingArgCount and the return value of ParseDiagnosticArgs.
+ // Any errors that would be diagnosed here will also be diagnosed later,
+ // when the DiagnosticsEngine actually exists.
+ (void)ParseDiagnosticArgs(*DiagOpts, Args, /*Diags=*/nullptr, ShowColors);
+ return DiagOpts;
+}
+
bool clang::ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args,
DiagnosticsEngine *Diags,
bool DefaultDiagColor) {
- Optional<DiagnosticsEngine> IgnoringDiags;
+ std::optional<DiagnosticsEngine> IgnoringDiags;
if (!Diags) {
IgnoringDiags.emplace(new DiagnosticIDs(), new DiagnosticOptions(),
new IgnoringDiagConsumer());
@@ -2271,14 +2454,8 @@ bool clang::ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args,
// "DiagnosticOpts->". Let's provide the expected variable name and type.
DiagnosticOptions *DiagnosticOpts = &Opts;
-#define DIAG_OPTION_WITH_MARSHALLING( \
- PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
- HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
- DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
- MERGER, EXTRACTOR, TABLE_INDEX) \
- PARSE_OPTION_WITH_MARSHALLING( \
- Args, *Diags, ID, FLAGS, PARAM, SHOULD_PARSE, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, MERGER, TABLE_INDEX)
+#define DIAG_OPTION_WITH_MARSHALLING(...) \
+ PARSE_OPTION_WITH_MARSHALLING(Args, *Diags, __VA_ARGS__)
#include "clang/Driver/Options.inc"
#undef DIAG_OPTION_WITH_MARSHALLING
@@ -2307,9 +2484,9 @@ bool clang::ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args,
DiagMask = DiagnosticLevelMask::All;
Opts.setVerifyIgnoreUnexpected(DiagMask);
if (Opts.TabStop == 0 || Opts.TabStop > DiagnosticOptions::MaxTabStop) {
- Opts.TabStop = DiagnosticOptions::DefaultTabStop;
Diags->Report(diag::warn_ignoring_ftabstop_value)
<< Opts.TabStop << DiagnosticOptions::DefaultTabStop;
+ Opts.TabStop = DiagnosticOptions::DefaultTabStop;
}
addDiagnosticArgs(Args, OPT_W_Group, OPT_W_value_Group, Opts.Warnings);
@@ -2368,15 +2545,15 @@ static const auto &getFrontendActionTable() {
{frontend::EmitLLVM, OPT_emit_llvm},
{frontend::EmitLLVMOnly, OPT_emit_llvm_only},
{frontend::EmitCodeGenOnly, OPT_emit_codegen_only},
- {frontend::EmitCodeGenOnly, OPT_emit_codegen_only},
{frontend::EmitObj, OPT_emit_obj},
+ {frontend::ExtractAPI, OPT_extract_api},
{frontend::FixIt, OPT_fixit_EQ},
{frontend::FixIt, OPT_fixit},
{frontend::GenerateModule, OPT_emit_module},
{frontend::GenerateModuleInterface, OPT_emit_module_interface},
- {frontend::GenerateHeaderModule, OPT_emit_header_module},
+ {frontend::GenerateHeaderUnit, OPT_emit_header_unit},
{frontend::GeneratePCH, OPT_emit_pch},
{frontend::GenerateInterfaceStubs, OPT_emit_interface_stubs},
{frontend::InitOnly, OPT_init_only},
@@ -2393,53 +2570,46 @@ static const auto &getFrontendActionTable() {
{frontend::MigrateSource, OPT_migrate},
{frontend::RunPreprocessorOnly, OPT_Eonly},
{frontend::PrintDependencyDirectivesSourceMinimizerOutput,
- OPT_print_dependency_directives_minimized_source},
+ OPT_print_dependency_directives_minimized_source},
};
return Table;
}
/// Maps command line option to frontend action.
-static Optional<frontend::ActionKind> getFrontendAction(OptSpecifier &Opt) {
+static std::optional<frontend::ActionKind>
+getFrontendAction(OptSpecifier &Opt) {
for (const auto &ActionOpt : getFrontendActionTable())
if (ActionOpt.second == Opt.getID())
return ActionOpt.first;
- return None;
+ return std::nullopt;
}
/// Maps frontend action to command line option.
-static Optional<OptSpecifier>
+static std::optional<OptSpecifier>
getProgramActionOpt(frontend::ActionKind ProgramAction) {
for (const auto &ActionOpt : getFrontendActionTable())
if (ActionOpt.first == ProgramAction)
return OptSpecifier(ActionOpt.second);
- return None;
+ return std::nullopt;
}
static void GenerateFrontendArgs(const FrontendOptions &Opts,
- SmallVectorImpl<const char *> &Args,
- CompilerInvocation::StringAllocator SA,
- bool IsHeader) {
+ ArgumentConsumer Consumer, bool IsHeader) {
const FrontendOptions &FrontendOpts = Opts;
-#define FRONTEND_OPTION_WITH_MARSHALLING( \
- PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
- HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
- DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
- MERGER, EXTRACTOR, TABLE_INDEX) \
- GENERATE_OPTION_WITH_MARSHALLING( \
- Args, SA, KIND, FLAGS, SPELLING, ALWAYS_EMIT, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, DENORMALIZER, EXTRACTOR, TABLE_INDEX)
+#define FRONTEND_OPTION_WITH_MARSHALLING(...) \
+ GENERATE_OPTION_WITH_MARSHALLING(Consumer, __VA_ARGS__)
#include "clang/Driver/Options.inc"
#undef FRONTEND_OPTION_WITH_MARSHALLING
- Optional<OptSpecifier> ProgramActionOpt =
+ std::optional<OptSpecifier> ProgramActionOpt =
getProgramActionOpt(Opts.ProgramAction);
// Generating a simple flag covers most frontend actions.
std::function<void()> GenerateProgramAction = [&]() {
- GenerateArg(Args, *ProgramActionOpt, SA);
+ GenerateArg(Consumer, *ProgramActionOpt);
};
if (!ProgramActionOpt) {
@@ -2447,7 +2617,7 @@ static void GenerateFrontendArgs(const FrontendOptions &Opts,
assert(Opts.ProgramAction == frontend::PluginAction &&
"Frontend action without option.");
GenerateProgramAction = [&]() {
- GenerateArg(Args, OPT_plugin, Opts.ActionName, SA);
+ GenerateArg(Consumer, OPT_plugin, Opts.ActionName);
};
}
@@ -2468,21 +2638,21 @@ static void GenerateFrontendArgs(const FrontendOptions &Opts,
}
if (Opts.ASTDumpAll)
- GenerateArg(Args, OPT_ast_dump_all_EQ, Format, SA);
+ GenerateArg(Consumer, OPT_ast_dump_all_EQ, Format);
if (Opts.ASTDumpDecls)
- GenerateArg(Args, OPT_ast_dump_EQ, Format, SA);
+ GenerateArg(Consumer, OPT_ast_dump_EQ, Format);
} else {
if (Opts.ASTDumpAll)
- GenerateArg(Args, OPT_ast_dump_all, SA);
+ GenerateArg(Consumer, OPT_ast_dump_all);
if (Opts.ASTDumpDecls)
- GenerateArg(Args, OPT_ast_dump, SA);
+ GenerateArg(Consumer, OPT_ast_dump);
}
};
}
if (Opts.ProgramAction == frontend::FixIt && !Opts.FixItSuffix.empty()) {
GenerateProgramAction = [&]() {
- GenerateArg(Args, OPT_fixit_EQ, Opts.FixItSuffix, SA);
+ GenerateArg(Consumer, OPT_fixit_EQ, Opts.FixItSuffix);
};
}
@@ -2490,39 +2660,53 @@ static void GenerateFrontendArgs(const FrontendOptions &Opts,
for (const auto &PluginArgs : Opts.PluginArgs) {
Option Opt = getDriverOptTable().getOption(OPT_plugin_arg);
- const char *Spelling =
- SA(Opt.getPrefix() + Opt.getName() + PluginArgs.first);
for (const auto &PluginArg : PluginArgs.second)
- denormalizeString(Args, Spelling, SA, Opt.getKind(), 0, PluginArg);
+ denormalizeString(Consumer,
+ Opt.getPrefix() + Opt.getName() + PluginArgs.first,
+ Opt.getKind(), 0, PluginArg);
}
for (const auto &Ext : Opts.ModuleFileExtensions)
if (auto *TestExt = dyn_cast_or_null<TestModuleFileExtension>(Ext.get()))
- GenerateArg(Args, OPT_ftest_module_file_extension_EQ, TestExt->str(), SA);
+ GenerateArg(Consumer, OPT_ftest_module_file_extension_EQ, TestExt->str());
if (!Opts.CodeCompletionAt.FileName.empty())
- GenerateArg(Args, OPT_code_completion_at, Opts.CodeCompletionAt.ToString(),
- SA);
+ GenerateArg(Consumer, OPT_code_completion_at,
+ Opts.CodeCompletionAt.ToString());
for (const auto &Plugin : Opts.Plugins)
- GenerateArg(Args, OPT_load, Plugin, SA);
+ GenerateArg(Consumer, OPT_load, Plugin);
// ASTDumpDecls and ASTDumpAll already handled with ProgramAction.
for (const auto &ModuleFile : Opts.ModuleFiles)
- GenerateArg(Args, OPT_fmodule_file, ModuleFile, SA);
+ GenerateArg(Consumer, OPT_fmodule_file, ModuleFile);
- if (Opts.AuxTargetCPU.hasValue())
- GenerateArg(Args, OPT_aux_target_cpu, *Opts.AuxTargetCPU, SA);
+ if (Opts.AuxTargetCPU)
+ GenerateArg(Consumer, OPT_aux_target_cpu, *Opts.AuxTargetCPU);
- if (Opts.AuxTargetFeatures.hasValue())
+ if (Opts.AuxTargetFeatures)
for (const auto &Feature : *Opts.AuxTargetFeatures)
- GenerateArg(Args, OPT_aux_target_feature, Feature, SA);
+ GenerateArg(Consumer, OPT_aux_target_feature, Feature);
{
StringRef Preprocessed = Opts.DashX.isPreprocessed() ? "-cpp-output" : "";
StringRef ModuleMap =
Opts.DashX.getFormat() == InputKind::ModuleMap ? "-module-map" : "";
+ StringRef HeaderUnit = "";
+ switch (Opts.DashX.getHeaderUnitKind()) {
+ case InputKind::HeaderUnit_None:
+ break;
+ case InputKind::HeaderUnit_User:
+ HeaderUnit = "-user";
+ break;
+ case InputKind::HeaderUnit_System:
+ HeaderUnit = "-system";
+ break;
+ case InputKind::HeaderUnit_Abs:
+ HeaderUnit = "-header-unit";
+ break;
+ }
StringRef Header = IsHeader ? "-header" : "";
StringRef Lang;
@@ -2565,14 +2749,18 @@ static void GenerateFrontendArgs(const FrontendOptions &Opts,
case Language::LLVM_IR:
Lang = "ir";
break;
+ case Language::HLSL:
+ Lang = "hlsl";
+ break;
}
- GenerateArg(Args, OPT_x, Lang + Header + ModuleMap + Preprocessed, SA);
+ GenerateArg(Consumer, OPT_x,
+ Lang + HeaderUnit + Header + ModuleMap + Preprocessed);
}
// OPT_INPUT has a unique class, generate it directly.
for (const auto &Input : Opts.Inputs)
- Args.push_back(SA(Input.getFile()));
+ Consumer(Input.getFile());
}
static bool ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
@@ -2581,21 +2769,15 @@ static bool ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
FrontendOptions &FrontendOpts = Opts;
-#define FRONTEND_OPTION_WITH_MARSHALLING( \
- PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
- HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
- DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
- MERGER, EXTRACTOR, TABLE_INDEX) \
- PARSE_OPTION_WITH_MARSHALLING( \
- Args, Diags, ID, FLAGS, PARAM, SHOULD_PARSE, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, MERGER, TABLE_INDEX)
+#define FRONTEND_OPTION_WITH_MARSHALLING(...) \
+ PARSE_OPTION_WITH_MARSHALLING(Args, Diags, __VA_ARGS__)
#include "clang/Driver/Options.inc"
#undef FRONTEND_OPTION_WITH_MARSHALLING
Opts.ProgramAction = frontend::ParseSyntaxOnly;
if (const Arg *A = Args.getLastArg(OPT_Action_Group)) {
OptSpecifier Opt = OptSpecifier(A->getOption().getID());
- Optional<frontend::ActionKind> ProgramAction = getFrontendAction(Opt);
+ std::optional<frontend::ActionKind> ProgramAction = getFrontendAction(Opt);
assert(ProgramAction && "Option specifier not in Action_Group.");
if (ProgramAction == frontend::ASTDump &&
@@ -2633,7 +2815,7 @@ static bool ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
"-interface-stub-version=ifs-v1"
<< ErrorMessage;
ProgramAction = frontend::ParseSyntaxOnly;
- } else if (!ArgStr.startswith("ifs-")) {
+ } else if (!ArgStr.starts_with("ifs-")) {
std::string ErrorMessage =
"Invalid interface stub format: " + ArgStr.str() + ".";
Diags.Report(diag::err_drv_invalid_value)
@@ -2689,7 +2871,7 @@ static bool ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
// Only the -fmodule-file=<file> form.
for (const auto *A : Args.filtered(OPT_fmodule_file)) {
StringRef Val = A->getValue();
- if (Val.find('=') == StringRef::npos)
+ if (!Val.contains('='))
Opts.ModuleFiles.push_back(std::string(Val));
}
@@ -2712,13 +2894,32 @@ static bool ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
if (const Arg *A = Args.getLastArg(OPT_x)) {
StringRef XValue = A->getValue();
- // Parse suffixes: '<lang>(-header|[-module-map][-cpp-output])'.
+ // Parse suffixes:
+ // '<lang>(-[{header-unit,user,system}-]header|[-module-map][-cpp-output])'.
// FIXME: Supporting '<lang>-header-cpp-output' would be useful.
bool Preprocessed = XValue.consume_back("-cpp-output");
bool ModuleMap = XValue.consume_back("-module-map");
- IsHeaderFile = !Preprocessed && !ModuleMap &&
- XValue != "precompiled-header" &&
- XValue.consume_back("-header");
+ // Detect and consume the header indicator.
+ bool IsHeader =
+ XValue != "precompiled-header" && XValue.consume_back("-header");
+
+ // If we have c++-{user,system}-header, that indicates a header unit input
+ // likewise, if the user put -fmodule-header together with a header with an
+ // absolute path (header-unit-header).
+ InputKind::HeaderUnitKind HUK = InputKind::HeaderUnit_None;
+ if (IsHeader || Preprocessed) {
+ if (XValue.consume_back("-header-unit"))
+ HUK = InputKind::HeaderUnit_Abs;
+ else if (XValue.consume_back("-system"))
+ HUK = InputKind::HeaderUnit_System;
+ else if (XValue.consume_back("-user"))
+ HUK = InputKind::HeaderUnit_User;
+ }
+
+ // The value set by this processing is an un-preprocessed source which is
+ // not intended to be a module map or header unit.
+ IsHeaderFile = IsHeader && !Preprocessed && !ModuleMap &&
+ HUK == InputKind::HeaderUnit_None;
// Principal languages.
DashX = llvm::StringSwitch<InputKind>(XValue)
@@ -2731,18 +2932,21 @@ static bool ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
.Case("objective-c", Language::ObjC)
.Case("objective-c++", Language::ObjCXX)
.Case("renderscript", Language::RenderScript)
+ .Case("hlsl", Language::HLSL)
.Default(Language::Unknown);
// "objc[++]-cpp-output" is an acceptable synonym for
// "objective-c[++]-cpp-output".
- if (DashX.isUnknown() && Preprocessed && !IsHeaderFile && !ModuleMap)
+ if (DashX.isUnknown() && Preprocessed && !IsHeaderFile && !ModuleMap &&
+ HUK == InputKind::HeaderUnit_None)
DashX = llvm::StringSwitch<InputKind>(XValue)
.Case("objc", Language::ObjC)
.Case("objc++", Language::ObjCXX)
.Default(Language::Unknown);
// Some special cases cannot be combined with suffixes.
- if (DashX.isUnknown() && !Preprocessed && !ModuleMap && !IsHeaderFile)
+ if (DashX.isUnknown() && !Preprocessed && !IsHeaderFile && !ModuleMap &&
+ HUK == InputKind::HeaderUnit_None)
DashX = llvm::StringSwitch<InputKind>(XValue)
.Case("cpp-output", InputKind(Language::C).getPreprocessed())
.Case("assembler-with-cpp", Language::Asm)
@@ -2757,6 +2961,12 @@ static bool ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
if (Preprocessed)
DashX = DashX.getPreprocessed();
+ // A regular header is considered mutually exclusive with a header unit.
+ if (HUK != InputKind::HeaderUnit_None) {
+ DashX = DashX.withHeaderUnit(HUK);
+ IsHeaderFile = true;
+ } else if (IsHeaderFile)
+ DashX = DashX.getHeader();
if (ModuleMap)
DashX = DashX.withFormat(InputKind::ModuleMap);
}
@@ -2766,6 +2976,11 @@ static bool ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
Opts.Inputs.clear();
if (Inputs.empty())
Inputs.push_back("-");
+
+ if (DashX.getHeaderUnitKind() != InputKind::HeaderUnit_None &&
+ Inputs.size() > 1)
+ Diags.Report(diag::err_drv_header_unit_extra_inputs) << Inputs[1];
+
for (unsigned i = 0, e = Inputs.size(); i != e; ++i) {
InputKind IK = DashX;
if (IK.isUnknown()) {
@@ -2803,41 +3018,34 @@ std::string CompilerInvocation::GetResourcesPath(const char *Argv0,
return Driver::GetResourcesPath(ClangExecutable, CLANG_RESOURCE_DIR);
}
-static void GenerateHeaderSearchArgs(HeaderSearchOptions &Opts,
- SmallVectorImpl<const char *> &Args,
- CompilerInvocation::StringAllocator SA) {
+static void GenerateHeaderSearchArgs(const HeaderSearchOptions &Opts,
+ ArgumentConsumer Consumer) {
const HeaderSearchOptions *HeaderSearchOpts = &Opts;
-#define HEADER_SEARCH_OPTION_WITH_MARSHALLING( \
- PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
- HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
- DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
- MERGER, EXTRACTOR, TABLE_INDEX) \
- GENERATE_OPTION_WITH_MARSHALLING( \
- Args, SA, KIND, FLAGS, SPELLING, ALWAYS_EMIT, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, DENORMALIZER, EXTRACTOR, TABLE_INDEX)
+#define HEADER_SEARCH_OPTION_WITH_MARSHALLING(...) \
+ GENERATE_OPTION_WITH_MARSHALLING(Consumer, __VA_ARGS__)
#include "clang/Driver/Options.inc"
#undef HEADER_SEARCH_OPTION_WITH_MARSHALLING
if (Opts.UseLibcxx)
- GenerateArg(Args, OPT_stdlib_EQ, "libc++", SA);
+ GenerateArg(Consumer, OPT_stdlib_EQ, "libc++");
if (!Opts.ModuleCachePath.empty())
- GenerateArg(Args, OPT_fmodules_cache_path, Opts.ModuleCachePath, SA);
+ GenerateArg(Consumer, OPT_fmodules_cache_path, Opts.ModuleCachePath);
for (const auto &File : Opts.PrebuiltModuleFiles)
- GenerateArg(Args, OPT_fmodule_file, File.first + "=" + File.second, SA);
+ GenerateArg(Consumer, OPT_fmodule_file, File.first + "=" + File.second);
for (const auto &Path : Opts.PrebuiltModulePaths)
- GenerateArg(Args, OPT_fprebuilt_module_path, Path, SA);
+ GenerateArg(Consumer, OPT_fprebuilt_module_path, Path);
for (const auto &Macro : Opts.ModulesIgnoreMacros)
- GenerateArg(Args, OPT_fmodules_ignore_macro, Macro.val(), SA);
+ GenerateArg(Consumer, OPT_fmodules_ignore_macro, Macro.val());
auto Matches = [](const HeaderSearchOptions::Entry &Entry,
llvm::ArrayRef<frontend::IncludeDirGroup> Groups,
- llvm::Optional<bool> IsFramework,
- llvm::Optional<bool> IgnoreSysRoot) {
- return llvm::find(Groups, Entry.Group) != Groups.end() &&
+ std::optional<bool> IsFramework,
+ std::optional<bool> IgnoreSysRoot) {
+ return llvm::is_contained(Groups, Entry.Group) &&
(!IsFramework || (Entry.IsFramework == *IsFramework)) &&
(!IgnoreSysRoot || (Entry.IgnoreSysRoot == *IgnoreSysRoot));
};
@@ -2846,8 +3054,8 @@ static void GenerateHeaderSearchArgs(HeaderSearchOptions &Opts,
auto End = Opts.UserEntries.end();
// Add -I..., -F..., and -index-header-map options in order.
- for (; It < End &&
- Matches(*It, {frontend::IndexHeaderMap, frontend::Angled}, None, true);
+ for (; It < End && Matches(*It, {frontend::IndexHeaderMap, frontend::Angled},
+ std::nullopt, true);
++It) {
OptSpecifier Opt = [It, Matches]() {
if (Matches(*It, frontend::IndexHeaderMap, true, true))
@@ -2862,8 +3070,8 @@ static void GenerateHeaderSearchArgs(HeaderSearchOptions &Opts,
}();
if (It->Group == frontend::IndexHeaderMap)
- GenerateArg(Args, OPT_index_header_map, SA);
- GenerateArg(Args, Opt, It->Path, SA);
+ GenerateArg(Consumer, OPT_index_header_map);
+ GenerateArg(Consumer, Opt, It->Path);
};
// Note: some paths that came from "[-iprefix=xx] -iwithprefixbefore=yy" may
@@ -2875,33 +3083,34 @@ static void GenerateHeaderSearchArgs(HeaderSearchOptions &Opts,
++It) {
OptSpecifier Opt =
It->Group == frontend::After ? OPT_iwithprefix : OPT_iwithprefixbefore;
- GenerateArg(Args, Opt, It->Path, SA);
+ GenerateArg(Consumer, Opt, It->Path);
}
// Note: Some paths that came from "-idirafter=xxyy" may have already been
// generated as "-iwithprefix=xxyy". If that's the case, their position on
// command line was such that this has no semantic impact on include paths.
for (; It < End && Matches(*It, {frontend::After}, false, true); ++It)
- GenerateArg(Args, OPT_idirafter, It->Path, SA);
+ GenerateArg(Consumer, OPT_idirafter, It->Path);
for (; It < End && Matches(*It, {frontend::Quoted}, false, true); ++It)
- GenerateArg(Args, OPT_iquote, It->Path, SA);
- for (; It < End && Matches(*It, {frontend::System}, false, None); ++It)
- GenerateArg(Args, It->IgnoreSysRoot ? OPT_isystem : OPT_iwithsysroot,
- It->Path, SA);
+ GenerateArg(Consumer, OPT_iquote, It->Path);
+ for (; It < End && Matches(*It, {frontend::System}, false, std::nullopt);
+ ++It)
+ GenerateArg(Consumer, It->IgnoreSysRoot ? OPT_isystem : OPT_iwithsysroot,
+ It->Path);
for (; It < End && Matches(*It, {frontend::System}, true, true); ++It)
- GenerateArg(Args, OPT_iframework, It->Path, SA);
+ GenerateArg(Consumer, OPT_iframework, It->Path);
for (; It < End && Matches(*It, {frontend::System}, true, false); ++It)
- GenerateArg(Args, OPT_iframeworkwithsysroot, It->Path, SA);
+ GenerateArg(Consumer, OPT_iframeworkwithsysroot, It->Path);
// Add the paths for the various language specific isystem flags.
for (; It < End && Matches(*It, {frontend::CSystem}, false, true); ++It)
- GenerateArg(Args, OPT_c_isystem, It->Path, SA);
+ GenerateArg(Consumer, OPT_c_isystem, It->Path);
for (; It < End && Matches(*It, {frontend::CXXSystem}, false, true); ++It)
- GenerateArg(Args, OPT_cxx_isystem, It->Path, SA);
+ GenerateArg(Consumer, OPT_cxx_isystem, It->Path);
for (; It < End && Matches(*It, {frontend::ObjCSystem}, false, true); ++It)
- GenerateArg(Args, OPT_objc_isystem, It->Path, SA);
+ GenerateArg(Consumer, OPT_objc_isystem, It->Path);
for (; It < End && Matches(*It, {frontend::ObjCXXSystem}, false, true); ++It)
- GenerateArg(Args, OPT_objcxx_isystem, It->Path, SA);
+ GenerateArg(Consumer, OPT_objcxx_isystem, It->Path);
// Add the internal paths from a driver that detects standard include paths.
// Note: Some paths that came from "-internal-isystem" arguments may have
@@ -2913,7 +3122,7 @@ static void GenerateHeaderSearchArgs(HeaderSearchOptions &Opts,
OptSpecifier Opt = It->Group == frontend::System
? OPT_internal_isystem
: OPT_internal_externc_isystem;
- GenerateArg(Args, Opt, It->Path, SA);
+ GenerateArg(Consumer, Opt, It->Path);
}
assert(It == End && "Unhandled HeaderSearchOption::Entry.");
@@ -2922,11 +3131,11 @@ static void GenerateHeaderSearchArgs(HeaderSearchOptions &Opts,
for (const auto &P : Opts.SystemHeaderPrefixes) {
OptSpecifier Opt = P.IsSystemHeader ? OPT_system_header_prefix
: OPT_no_system_header_prefix;
- GenerateArg(Args, Opt, P.Prefix, SA);
+ GenerateArg(Consumer, Opt, P.Prefix);
}
for (const std::string &F : Opts.VFSOverlayFiles)
- GenerateArg(Args, OPT_ivfsoverlay, F, SA);
+ GenerateArg(Consumer, OPT_ivfsoverlay, F);
}
static bool ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args,
@@ -2936,14 +3145,8 @@ static bool ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args,
HeaderSearchOptions *HeaderSearchOpts = &Opts;
-#define HEADER_SEARCH_OPTION_WITH_MARSHALLING( \
- PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
- HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
- DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
- MERGER, EXTRACTOR, TABLE_INDEX) \
- PARSE_OPTION_WITH_MARSHALLING( \
- Args, Diags, ID, FLAGS, PARAM, SHOULD_PARSE, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, MERGER, TABLE_INDEX)
+#define HEADER_SEARCH_OPTION_WITH_MARSHALLING(...) \
+ PARSE_OPTION_WITH_MARSHALLING(Args, Diags, __VA_ARGS__)
#include "clang/Driver/Options.inc"
#undef HEADER_SEARCH_OPTION_WITH_MARSHALLING
@@ -2959,15 +3162,15 @@ static bool ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args,
llvm::sys::fs::make_absolute(WorkingDir, P);
}
llvm::sys::path::remove_dots(P);
- Opts.ModuleCachePath = std::string(P.str());
+ Opts.ModuleCachePath = std::string(P);
// Only the -fmodule-file=<name>=<file> form.
for (const auto *A : Args.filtered(OPT_fmodule_file)) {
StringRef Val = A->getValue();
- if (Val.find('=') != StringRef::npos){
+ if (Val.contains('=')) {
auto Split = Val.split('=');
- Opts.PrebuiltModuleFiles.insert(
- {std::string(Split.first), std::string(Split.second)});
+ Opts.PrebuiltModuleFiles.insert_or_assign(
+ std::string(Split.first), std::string(Split.second));
}
}
for (const auto *A : Args.filtered(OPT_fprebuilt_module_path))
@@ -3000,7 +3203,7 @@ static bool ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args,
SmallString<32> Buffer;
llvm::sys::path::append(Buffer, Opts.Sysroot,
llvm::StringRef(A->getValue()).substr(1));
- Path = std::string(Buffer.str());
+ Path = std::string(Buffer);
}
Opts.AddPath(Path, Group, IsFramework,
@@ -3058,160 +3261,31 @@ static bool ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args,
Opts.AddSystemHeaderPrefix(
A->getValue(), A->getOption().matches(OPT_system_header_prefix));
- for (const auto *A : Args.filtered(OPT_ivfsoverlay))
+ for (const auto *A : Args.filtered(OPT_ivfsoverlay, OPT_vfsoverlay))
Opts.AddVFSOverlayFile(A->getValue());
return Diags.getNumErrors() == NumErrorsBefore;
}
-void CompilerInvocation::setLangDefaults(LangOptions &Opts, InputKind IK,
- const llvm::Triple &T,
- std::vector<std::string> &Includes,
- LangStandard::Kind LangStd) {
- // Set some properties which depend solely on the input kind; it would be nice
- // to move these to the language standard, and have the driver resolve the
- // input kind + language standard.
- //
- // FIXME: Perhaps a better model would be for a single source file to have
- // multiple language standards (C / C++ std, ObjC std, OpenCL std, OpenMP std)
- // simultaneously active?
- if (IK.getLanguage() == Language::Asm) {
- Opts.AsmPreprocessor = 1;
- } else if (IK.isObjectiveC()) {
- Opts.ObjC = 1;
- }
-
- if (LangStd == LangStandard::lang_unspecified) {
- // Based on the base language, pick one.
- switch (IK.getLanguage()) {
- case Language::Unknown:
- case Language::LLVM_IR:
- llvm_unreachable("Invalid input kind!");
- case Language::OpenCL:
- LangStd = LangStandard::lang_opencl12;
- break;
- case Language::OpenCLCXX:
- LangStd = LangStandard::lang_openclcpp;
- break;
- case Language::CUDA:
- LangStd = LangStandard::lang_cuda;
- break;
- case Language::Asm:
- case Language::C:
-#if defined(CLANG_DEFAULT_STD_C)
- LangStd = CLANG_DEFAULT_STD_C;
-#else
- // The PS4 uses C99 as the default C standard.
- if (T.isPS4())
- LangStd = LangStandard::lang_gnu99;
- else
- LangStd = LangStandard::lang_gnu17;
-#endif
- break;
- case Language::ObjC:
-#if defined(CLANG_DEFAULT_STD_C)
- LangStd = CLANG_DEFAULT_STD_C;
-#else
- LangStd = LangStandard::lang_gnu11;
-#endif
- break;
- case Language::CXX:
- case Language::ObjCXX:
-#if defined(CLANG_DEFAULT_STD_CXX)
- LangStd = CLANG_DEFAULT_STD_CXX;
-#else
- LangStd = LangStandard::lang_gnucxx14;
-#endif
- break;
- case Language::RenderScript:
- LangStd = LangStandard::lang_c99;
- break;
- case Language::HIP:
- LangStd = LangStandard::lang_hip;
- break;
- }
- }
+static void GenerateAPINotesArgs(const APINotesOptions &Opts,
+ ArgumentConsumer Consumer) {
+ if (!Opts.SwiftVersion.empty())
+ GenerateArg(Consumer, OPT_fapinotes_swift_version,
+ Opts.SwiftVersion.getAsString());
- const LangStandard &Std = LangStandard::getLangStandardForKind(LangStd);
- Opts.LangStd = LangStd;
- Opts.LineComment = Std.hasLineComments();
- Opts.C99 = Std.isC99();
- Opts.C11 = Std.isC11();
- Opts.C17 = Std.isC17();
- Opts.C2x = Std.isC2x();
- Opts.CPlusPlus = Std.isCPlusPlus();
- Opts.CPlusPlus11 = Std.isCPlusPlus11();
- Opts.CPlusPlus14 = Std.isCPlusPlus14();
- Opts.CPlusPlus17 = Std.isCPlusPlus17();
- Opts.CPlusPlus20 = Std.isCPlusPlus20();
- Opts.CPlusPlus2b = Std.isCPlusPlus2b();
- Opts.GNUMode = Std.isGNUMode();
- Opts.GNUCVersion = 0;
- Opts.HexFloats = Std.hasHexFloats();
- Opts.ImplicitInt = Std.hasImplicitInt();
-
- Opts.CPlusPlusModules = Opts.CPlusPlus20;
-
- // Set OpenCL Version.
- Opts.OpenCL = Std.isOpenCL();
- if (LangStd == LangStandard::lang_opencl10)
- Opts.OpenCLVersion = 100;
- else if (LangStd == LangStandard::lang_opencl11)
- Opts.OpenCLVersion = 110;
- else if (LangStd == LangStandard::lang_opencl12)
- Opts.OpenCLVersion = 120;
- else if (LangStd == LangStandard::lang_opencl20)
- Opts.OpenCLVersion = 200;
- else if (LangStd == LangStandard::lang_opencl30)
- Opts.OpenCLVersion = 300;
- else if (LangStd == LangStandard::lang_openclcpp)
- Opts.OpenCLCPlusPlusVersion = 100;
-
- // OpenCL has some additional defaults.
- if (Opts.OpenCL) {
- Opts.AltiVec = 0;
- Opts.ZVector = 0;
- Opts.setDefaultFPContractMode(LangOptions::FPM_On);
- Opts.OpenCLCPlusPlus = Opts.CPlusPlus;
- Opts.OpenCLPipes = Opts.OpenCLCPlusPlus || Opts.OpenCLVersion == 200;
- Opts.OpenCLGenericAddressSpace =
- Opts.OpenCLCPlusPlus || Opts.OpenCLVersion == 200;
-
- // Include default header file for OpenCL.
- if (Opts.IncludeDefaultHeader) {
- if (Opts.DeclareOpenCLBuiltins) {
- // Only include base header file for builtin types and constants.
- Includes.push_back("opencl-c-base.h");
- } else {
- Includes.push_back("opencl-c.h");
- }
- }
- }
+ for (const auto &Path : Opts.ModuleSearchPaths)
+ GenerateArg(Consumer, OPT_iapinotes_modules, Path);
+}
- Opts.HIP = IK.getLanguage() == Language::HIP;
- Opts.CUDA = IK.getLanguage() == Language::CUDA || Opts.HIP;
- if (Opts.HIP) {
- // HIP toolchain does not support 'Fast' FPOpFusion in backends since it
- // fuses multiplication/addition instructions without contract flag from
- // device library functions in LLVM bitcode, which causes accuracy loss in
- // certain math functions, e.g. tan(-1e20) becomes -0.933 instead of 0.8446.
- // For device library functions in bitcode to work, 'Strict' or 'Standard'
- // FPOpFusion options in backends is needed. Therefore 'fast-honor-pragmas'
- // FP contract option is used to allow fuse across statements in frontend
- // whereas respecting contract flag in backend.
- Opts.setDefaultFPContractMode(LangOptions::FPM_FastHonorPragmas);
- } else if (Opts.CUDA) {
- // Allow fuse across statements disregarding pragmas.
- Opts.setDefaultFPContractMode(LangOptions::FPM_Fast);
+static void ParseAPINotesArgs(APINotesOptions &Opts, ArgList &Args,
+ DiagnosticsEngine &diags) {
+ if (const Arg *A = Args.getLastArg(OPT_fapinotes_swift_version)) {
+ if (Opts.SwiftVersion.tryParse(A->getValue()))
+ diags.Report(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << A->getValue();
}
-
- Opts.RenderScript = IK.getLanguage() == Language::RenderScript;
-
- // OpenCL and C++ both have bool, true, false keywords.
- Opts.Bool = Opts.OpenCL || Opts.CPlusPlus;
-
- // OpenCL has half keyword
- Opts.Half = Opts.OpenCL;
+ for (const Arg *A : Args.filtered(OPT_iapinotes_modules))
+ Opts.ModuleSearchPaths.push_back(A->getValue());
}
/// Check if input file kind and language standard are compatible.
@@ -3251,13 +3325,16 @@ static bool IsInputCompatibleWithStandard(InputKind IK,
// FIXME: The -std= value is not ignored; it affects the tokenization
// and preprocessing rules if we're preprocessing this asm input.
return true;
+
+ case Language::HLSL:
+ return S.getLanguage() == Language::HLSL;
}
llvm_unreachable("unexpected input language");
}
/// Get language name for given input kind.
-static const StringRef GetInputKindName(InputKind IK) {
+static StringRef GetInputKindName(InputKind IK) {
switch (IK.getLanguage()) {
case Language::C:
return "C";
@@ -3283,26 +3360,29 @@ static const StringRef GetInputKindName(InputKind IK) {
case Language::LLVM_IR:
return "LLVM IR";
+ case Language::HLSL:
+ return "HLSL";
+
case Language::Unknown:
break;
}
llvm_unreachable("unknown input language");
}
-void CompilerInvocation::GenerateLangArgs(const LangOptions &Opts,
- SmallVectorImpl<const char *> &Args,
- StringAllocator SA,
- const llvm::Triple &T, InputKind IK) {
+void CompilerInvocationBase::GenerateLangArgs(const LangOptions &Opts,
+ ArgumentConsumer Consumer,
+ const llvm::Triple &T,
+ InputKind IK) {
if (IK.getFormat() == InputKind::Precompiled ||
IK.getLanguage() == Language::LLVM_IR) {
if (Opts.ObjCAutoRefCount)
- GenerateArg(Args, OPT_fobjc_arc, SA);
+ GenerateArg(Consumer, OPT_fobjc_arc);
if (Opts.PICLevel != 0)
- GenerateArg(Args, OPT_pic_level, Twine(Opts.PICLevel), SA);
+ GenerateArg(Consumer, OPT_pic_level, Twine(Opts.PICLevel));
if (Opts.PIE)
- GenerateArg(Args, OPT_pic_is_pie, SA);
+ GenerateArg(Consumer, OPT_pic_is_pie);
for (StringRef Sanitizer : serializeSanitizerKinds(Opts.Sanitize))
- GenerateArg(Args, OPT_fsanitize_EQ, Sanitizer, SA);
+ GenerateArg(Consumer, OPT_fsanitize_EQ, Sanitizer);
return;
}
@@ -3314,7 +3394,8 @@ void CompilerInvocation::GenerateLangArgs(const LangOptions &Opts,
case LangStandard::lang_opencl12:
case LangStandard::lang_opencl20:
case LangStandard::lang_opencl30:
- case LangStandard::lang_openclcpp:
+ case LangStandard::lang_openclcpp10:
+ case LangStandard::lang_openclcpp2021:
StdOpt = OPT_cl_std_EQ;
break;
default:
@@ -3323,139 +3404,146 @@ void CompilerInvocation::GenerateLangArgs(const LangOptions &Opts,
}
auto LangStandard = LangStandard::getLangStandardForKind(Opts.LangStd);
- GenerateArg(Args, StdOpt, LangStandard.getName(), SA);
+ GenerateArg(Consumer, StdOpt, LangStandard.getName());
if (Opts.IncludeDefaultHeader)
- GenerateArg(Args, OPT_finclude_default_header, SA);
+ GenerateArg(Consumer, OPT_finclude_default_header);
if (Opts.DeclareOpenCLBuiltins)
- GenerateArg(Args, OPT_fdeclare_opencl_builtins, SA);
+ GenerateArg(Consumer, OPT_fdeclare_opencl_builtins);
const LangOptions *LangOpts = &Opts;
-#define LANG_OPTION_WITH_MARSHALLING( \
- PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
- HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
- DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
- MERGER, EXTRACTOR, TABLE_INDEX) \
- GENERATE_OPTION_WITH_MARSHALLING( \
- Args, SA, KIND, FLAGS, SPELLING, ALWAYS_EMIT, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, DENORMALIZER, EXTRACTOR, TABLE_INDEX)
+#define LANG_OPTION_WITH_MARSHALLING(...) \
+ GENERATE_OPTION_WITH_MARSHALLING(Consumer, __VA_ARGS__)
#include "clang/Driver/Options.inc"
#undef LANG_OPTION_WITH_MARSHALLING
// The '-fcf-protection=' option is generated by CodeGenOpts generator.
if (Opts.ObjC) {
- GenerateArg(Args, OPT_fobjc_runtime_EQ, Opts.ObjCRuntime.getAsString(), SA);
+ GenerateArg(Consumer, OPT_fobjc_runtime_EQ, Opts.ObjCRuntime.getAsString());
if (Opts.GC == LangOptions::GCOnly)
- GenerateArg(Args, OPT_fobjc_gc_only, SA);
+ GenerateArg(Consumer, OPT_fobjc_gc_only);
else if (Opts.GC == LangOptions::HybridGC)
- GenerateArg(Args, OPT_fobjc_gc, SA);
+ GenerateArg(Consumer, OPT_fobjc_gc);
else if (Opts.ObjCAutoRefCount == 1)
- GenerateArg(Args, OPT_fobjc_arc, SA);
+ GenerateArg(Consumer, OPT_fobjc_arc);
if (Opts.ObjCWeakRuntime)
- GenerateArg(Args, OPT_fobjc_runtime_has_weak, SA);
+ GenerateArg(Consumer, OPT_fobjc_runtime_has_weak);
if (Opts.ObjCWeak)
- GenerateArg(Args, OPT_fobjc_weak, SA);
+ GenerateArg(Consumer, OPT_fobjc_weak);
if (Opts.ObjCSubscriptingLegacyRuntime)
- GenerateArg(Args, OPT_fobjc_subscripting_legacy_runtime, SA);
+ GenerateArg(Consumer, OPT_fobjc_subscripting_legacy_runtime);
}
if (Opts.GNUCVersion != 0) {
unsigned Major = Opts.GNUCVersion / 100 / 100;
unsigned Minor = (Opts.GNUCVersion / 100) % 100;
unsigned Patch = Opts.GNUCVersion % 100;
- GenerateArg(Args, OPT_fgnuc_version_EQ,
- Twine(Major) + "." + Twine(Minor) + "." + Twine(Patch), SA);
+ GenerateArg(Consumer, OPT_fgnuc_version_EQ,
+ Twine(Major) + "." + Twine(Minor) + "." + Twine(Patch));
}
if (Opts.IgnoreXCOFFVisibility)
- GenerateArg(Args, OPT_mignore_xcoff_visibility, SA);
+ GenerateArg(Consumer, OPT_mignore_xcoff_visibility);
if (Opts.SignedOverflowBehavior == LangOptions::SOB_Trapping) {
- GenerateArg(Args, OPT_ftrapv, SA);
- GenerateArg(Args, OPT_ftrapv_handler, Opts.OverflowHandler, SA);
+ GenerateArg(Consumer, OPT_ftrapv);
+ GenerateArg(Consumer, OPT_ftrapv_handler, Opts.OverflowHandler);
} else if (Opts.SignedOverflowBehavior == LangOptions::SOB_Defined) {
- GenerateArg(Args, OPT_fwrapv, SA);
+ GenerateArg(Consumer, OPT_fwrapv);
}
if (Opts.MSCompatibilityVersion != 0) {
unsigned Major = Opts.MSCompatibilityVersion / 10000000;
unsigned Minor = (Opts.MSCompatibilityVersion / 100000) % 100;
unsigned Subminor = Opts.MSCompatibilityVersion % 100000;
- GenerateArg(Args, OPT_fms_compatibility_version,
- Twine(Major) + "." + Twine(Minor) + "." + Twine(Subminor), SA);
+ GenerateArg(Consumer, OPT_fms_compatibility_version,
+ Twine(Major) + "." + Twine(Minor) + "." + Twine(Subminor));
}
- if ((!Opts.GNUMode && !Opts.MSVCCompat && !Opts.CPlusPlus17) || T.isOSzOS()) {
+ if ((!Opts.GNUMode && !Opts.MSVCCompat && !Opts.CPlusPlus17 && !Opts.C23) ||
+ T.isOSzOS()) {
if (!Opts.Trigraphs)
- GenerateArg(Args, OPT_fno_trigraphs, SA);
+ GenerateArg(Consumer, OPT_fno_trigraphs);
} else {
if (Opts.Trigraphs)
- GenerateArg(Args, OPT_ftrigraphs, SA);
+ GenerateArg(Consumer, OPT_ftrigraphs);
}
if (Opts.Blocks && !(Opts.OpenCL && Opts.OpenCLVersion == 200))
- GenerateArg(Args, OPT_fblocks, SA);
+ GenerateArg(Consumer, OPT_fblocks);
if (Opts.ConvergentFunctions &&
!(Opts.OpenCL || (Opts.CUDA && Opts.CUDAIsDevice) || Opts.SYCLIsDevice))
- GenerateArg(Args, OPT_fconvergent_functions, SA);
+ GenerateArg(Consumer, OPT_fconvergent_functions);
if (Opts.NoBuiltin && !Opts.Freestanding)
- GenerateArg(Args, OPT_fno_builtin, SA);
+ GenerateArg(Consumer, OPT_fno_builtin);
if (!Opts.NoBuiltin)
for (const auto &Func : Opts.NoBuiltinFuncs)
- GenerateArg(Args, OPT_fno_builtin_, Func, SA);
+ GenerateArg(Consumer, OPT_fno_builtin_, Func);
if (Opts.LongDoubleSize == 128)
- GenerateArg(Args, OPT_mlong_double_128, SA);
+ GenerateArg(Consumer, OPT_mlong_double_128);
else if (Opts.LongDoubleSize == 64)
- GenerateArg(Args, OPT_mlong_double_64, SA);
+ GenerateArg(Consumer, OPT_mlong_double_64);
+ else if (Opts.LongDoubleSize == 80)
+ GenerateArg(Consumer, OPT_mlong_double_80);
// Not generating '-mrtd', it's just an alias for '-fdefault-calling-conv='.
// OpenMP was requested via '-fopenmp', not implied by '-fopenmp-simd' or
// '-fopenmp-targets='.
if (Opts.OpenMP && !Opts.OpenMPSimd) {
- GenerateArg(Args, OPT_fopenmp, SA);
+ GenerateArg(Consumer, OPT_fopenmp);
- if (Opts.OpenMP != 50)
- GenerateArg(Args, OPT_fopenmp_version_EQ, Twine(Opts.OpenMP), SA);
+ if (Opts.OpenMP != 51)
+ GenerateArg(Consumer, OPT_fopenmp_version_EQ, Twine(Opts.OpenMP));
if (!Opts.OpenMPUseTLS)
- GenerateArg(Args, OPT_fnoopenmp_use_tls, SA);
+ GenerateArg(Consumer, OPT_fnoopenmp_use_tls);
- if (Opts.OpenMPIsDevice)
- GenerateArg(Args, OPT_fopenmp_is_device, SA);
+ if (Opts.OpenMPIsTargetDevice)
+ GenerateArg(Consumer, OPT_fopenmp_is_target_device);
if (Opts.OpenMPIRBuilder)
- GenerateArg(Args, OPT_fopenmp_enable_irbuilder, SA);
+ GenerateArg(Consumer, OPT_fopenmp_enable_irbuilder);
}
if (Opts.OpenMPSimd) {
- GenerateArg(Args, OPT_fopenmp_simd, SA);
+ GenerateArg(Consumer, OPT_fopenmp_simd);
- if (Opts.OpenMP != 50)
- GenerateArg(Args, OPT_fopenmp_version_EQ, Twine(Opts.OpenMP), SA);
+ if (Opts.OpenMP != 51)
+ GenerateArg(Consumer, OPT_fopenmp_version_EQ, Twine(Opts.OpenMP));
}
+ if (Opts.OpenMPThreadSubscription)
+ GenerateArg(Consumer, OPT_fopenmp_assume_threads_oversubscription);
+
+ if (Opts.OpenMPTeamSubscription)
+ GenerateArg(Consumer, OPT_fopenmp_assume_teams_oversubscription);
+
+ if (Opts.OpenMPTargetDebug != 0)
+ GenerateArg(Consumer, OPT_fopenmp_target_debug_EQ,
+ Twine(Opts.OpenMPTargetDebug));
+
if (Opts.OpenMPCUDANumSMs != 0)
- GenerateArg(Args, OPT_fopenmp_cuda_number_of_sm_EQ,
- Twine(Opts.OpenMPCUDANumSMs), SA);
+ GenerateArg(Consumer, OPT_fopenmp_cuda_number_of_sm_EQ,
+ Twine(Opts.OpenMPCUDANumSMs));
if (Opts.OpenMPCUDABlocksPerSM != 0)
- GenerateArg(Args, OPT_fopenmp_cuda_blocks_per_sm_EQ,
- Twine(Opts.OpenMPCUDABlocksPerSM), SA);
+ GenerateArg(Consumer, OPT_fopenmp_cuda_blocks_per_sm_EQ,
+ Twine(Opts.OpenMPCUDABlocksPerSM));
if (Opts.OpenMPCUDAReductionBufNum != 1024)
- GenerateArg(Args, OPT_fopenmp_cuda_teams_reduction_recs_num_EQ,
- Twine(Opts.OpenMPCUDAReductionBufNum), SA);
+ GenerateArg(Consumer, OPT_fopenmp_cuda_teams_reduction_recs_num_EQ,
+ Twine(Opts.OpenMPCUDAReductionBufNum));
if (!Opts.OMPTargetTriples.empty()) {
std::string Targets;
@@ -3463,74 +3551,106 @@ void CompilerInvocation::GenerateLangArgs(const LangOptions &Opts,
llvm::interleave(
Opts.OMPTargetTriples, OS,
[&OS](const llvm::Triple &T) { OS << T.str(); }, ",");
- GenerateArg(Args, OPT_fopenmp_targets_EQ, OS.str(), SA);
+ GenerateArg(Consumer, OPT_fopenmp_targets_EQ, OS.str());
}
if (!Opts.OMPHostIRFile.empty())
- GenerateArg(Args, OPT_fopenmp_host_ir_file_path, Opts.OMPHostIRFile, SA);
+ GenerateArg(Consumer, OPT_fopenmp_host_ir_file_path, Opts.OMPHostIRFile);
if (Opts.OpenMPCUDAMode)
- GenerateArg(Args, OPT_fopenmp_cuda_mode, SA);
+ GenerateArg(Consumer, OPT_fopenmp_cuda_mode);
- if (Opts.OpenMPCUDAForceFullRuntime)
- GenerateArg(Args, OPT_fopenmp_cuda_force_full_runtime, SA);
+ if (Opts.OpenACC) {
+ GenerateArg(Consumer, OPT_fopenacc);
+ if (!Opts.OpenACCMacroOverride.empty())
+ GenerateArg(Consumer, OPT_openacc_macro_override,
+ Opts.OpenACCMacroOverride);
+ }
// The arguments used to set Optimize, OptimizeSize and NoInlineDefine are
// generated from CodeGenOptions.
if (Opts.DefaultFPContractMode == LangOptions::FPM_Fast)
- GenerateArg(Args, OPT_ffp_contract, "fast", SA);
+ GenerateArg(Consumer, OPT_ffp_contract, "fast");
else if (Opts.DefaultFPContractMode == LangOptions::FPM_On)
- GenerateArg(Args, OPT_ffp_contract, "on", SA);
+ GenerateArg(Consumer, OPT_ffp_contract, "on");
else if (Opts.DefaultFPContractMode == LangOptions::FPM_Off)
- GenerateArg(Args, OPT_ffp_contract, "off", SA);
+ GenerateArg(Consumer, OPT_ffp_contract, "off");
else if (Opts.DefaultFPContractMode == LangOptions::FPM_FastHonorPragmas)
- GenerateArg(Args, OPT_ffp_contract, "fast-honor-pragmas", SA);
+ GenerateArg(Consumer, OPT_ffp_contract, "fast-honor-pragmas");
for (StringRef Sanitizer : serializeSanitizerKinds(Opts.Sanitize))
- GenerateArg(Args, OPT_fsanitize_EQ, Sanitizer, SA);
+ GenerateArg(Consumer, OPT_fsanitize_EQ, Sanitizer);
// Conflating '-fsanitize-system-ignorelist' and '-fsanitize-ignorelist'.
for (const std::string &F : Opts.NoSanitizeFiles)
- GenerateArg(Args, OPT_fsanitize_ignorelist_EQ, F, SA);
-
- if (Opts.getClangABICompat() == LangOptions::ClangABI::Ver3_8)
- GenerateArg(Args, OPT_fclang_abi_compat_EQ, "3.8", SA);
- else if (Opts.getClangABICompat() == LangOptions::ClangABI::Ver4)
- GenerateArg(Args, OPT_fclang_abi_compat_EQ, "4.0", SA);
- else if (Opts.getClangABICompat() == LangOptions::ClangABI::Ver6)
- GenerateArg(Args, OPT_fclang_abi_compat_EQ, "6.0", SA);
- else if (Opts.getClangABICompat() == LangOptions::ClangABI::Ver7)
- GenerateArg(Args, OPT_fclang_abi_compat_EQ, "7.0", SA);
- else if (Opts.getClangABICompat() == LangOptions::ClangABI::Ver9)
- GenerateArg(Args, OPT_fclang_abi_compat_EQ, "9.0", SA);
- else if (Opts.getClangABICompat() == LangOptions::ClangABI::Ver11)
- GenerateArg(Args, OPT_fclang_abi_compat_EQ, "11.0", SA);
- else if (Opts.getClangABICompat() == LangOptions::ClangABI::Ver12)
- GenerateArg(Args, OPT_fclang_abi_compat_EQ, "12.0", SA);
+ GenerateArg(Consumer, OPT_fsanitize_ignorelist_EQ, F);
+
+ switch (Opts.getClangABICompat()) {
+ case LangOptions::ClangABI::Ver3_8:
+ GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, "3.8");
+ break;
+ case LangOptions::ClangABI::Ver4:
+ GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, "4.0");
+ break;
+ case LangOptions::ClangABI::Ver6:
+ GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, "6.0");
+ break;
+ case LangOptions::ClangABI::Ver7:
+ GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, "7.0");
+ break;
+ case LangOptions::ClangABI::Ver9:
+ GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, "9.0");
+ break;
+ case LangOptions::ClangABI::Ver11:
+ GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, "11.0");
+ break;
+ case LangOptions::ClangABI::Ver12:
+ GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, "12.0");
+ break;
+ case LangOptions::ClangABI::Ver14:
+ GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, "14.0");
+ break;
+ case LangOptions::ClangABI::Ver15:
+ GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, "15.0");
+ break;
+ case LangOptions::ClangABI::Ver17:
+ GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, "17.0");
+ break;
+ case LangOptions::ClangABI::Latest:
+ break;
+ }
if (Opts.getSignReturnAddressScope() ==
LangOptions::SignReturnAddressScopeKind::All)
- GenerateArg(Args, OPT_msign_return_address_EQ, "all", SA);
+ GenerateArg(Consumer, OPT_msign_return_address_EQ, "all");
else if (Opts.getSignReturnAddressScope() ==
LangOptions::SignReturnAddressScopeKind::NonLeaf)
- GenerateArg(Args, OPT_msign_return_address_EQ, "non-leaf", SA);
+ GenerateArg(Consumer, OPT_msign_return_address_EQ, "non-leaf");
if (Opts.getSignReturnAddressKey() ==
LangOptions::SignReturnAddressKeyKind::BKey)
- GenerateArg(Args, OPT_msign_return_address_key_EQ, "b_key", SA);
+ GenerateArg(Consumer, OPT_msign_return_address_key_EQ, "b_key");
if (Opts.CXXABI)
- GenerateArg(Args, OPT_fcxx_abi_EQ, TargetCXXABI::getSpelling(*Opts.CXXABI),
- SA);
+ GenerateArg(Consumer, OPT_fcxx_abi_EQ,
+ TargetCXXABI::getSpelling(*Opts.CXXABI));
if (Opts.RelativeCXXABIVTables)
- GenerateArg(Args, OPT_fexperimental_relative_cxx_abi_vtables, SA);
+ GenerateArg(Consumer, OPT_fexperimental_relative_cxx_abi_vtables);
else
- GenerateArg(Args, OPT_fno_experimental_relative_cxx_abi_vtables, SA);
+ GenerateArg(Consumer, OPT_fno_experimental_relative_cxx_abi_vtables);
+
+ if (Opts.UseTargetPathSeparator)
+ GenerateArg(Consumer, OPT_ffile_reproducible);
+ else
+ GenerateArg(Consumer, OPT_fno_file_reproducible);
for (const auto &MP : Opts.MacroPrefixMap)
- GenerateArg(Args, OPT_fmacro_prefix_map_EQ, MP.first + "=" + MP.second, SA);
+ GenerateArg(Consumer, OPT_fmacro_prefix_map_EQ, MP.first + "=" + MP.second);
+
+ if (!Opts.RandstructSeed.empty())
+ GenerateArg(Consumer, OPT_frandomize_layout_seed_EQ, Opts.RandstructSeed);
}
bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
@@ -3611,7 +3731,9 @@ bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
.Cases("cl1.2", "CL1.2", LangStandard::lang_opencl12)
.Cases("cl2.0", "CL2.0", LangStandard::lang_opencl20)
.Cases("cl3.0", "CL3.0", LangStandard::lang_opencl30)
- .Cases("clc++", "CLC++", LangStandard::lang_openclcpp)
+ .Cases("clc++", "CLC++", LangStandard::lang_openclcpp10)
+ .Cases("clc++1.0", "CLC++1.0", LangStandard::lang_openclcpp10)
+ .Cases("clc++2021", "CLC++2021", LangStandard::lang_openclcpp2021)
.Default(LangStandard::lang_unspecified);
if (OpenCLLangStd == LangStandard::lang_unspecified) {
@@ -3626,20 +3748,14 @@ bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
Opts.IncludeDefaultHeader = Args.hasArg(OPT_finclude_default_header);
Opts.DeclareOpenCLBuiltins = Args.hasArg(OPT_fdeclare_opencl_builtins);
- CompilerInvocation::setLangDefaults(Opts, IK, T, Includes, LangStd);
+ LangOptions::setLangDefaults(Opts, IK.getLanguage(), T, Includes, LangStd);
// The key paths of codegen options defined in Options.td start with
// "LangOpts->". Let's provide the expected variable name and type.
LangOptions *LangOpts = &Opts;
-#define LANG_OPTION_WITH_MARSHALLING( \
- PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
- HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
- DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
- MERGER, EXTRACTOR, TABLE_INDEX) \
- PARSE_OPTION_WITH_MARSHALLING( \
- Args, Diags, ID, FLAGS, PARAM, SHOULD_PARSE, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, MERGER, TABLE_INDEX)
+#define LANG_OPTION_WITH_MARSHALLING(...) \
+ PARSE_OPTION_WITH_MARSHALLING(Args, Diags, __VA_ARGS__)
#include "clang/Driver/Options.inc"
#undef LANG_OPTION_WITH_MARSHALLING
@@ -3713,8 +3829,8 @@ bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
VersionTuple GNUCVer;
bool Invalid = GNUCVer.tryParse(A->getValue());
unsigned Major = GNUCVer.getMajor();
- unsigned Minor = GNUCVer.getMinor().getValueOr(0);
- unsigned Patch = GNUCVer.getSubminor().getValueOr(0);
+ unsigned Minor = GNUCVer.getMinor().value_or(0);
+ unsigned Patch = GNUCVer.getSubminor().value_or(0);
if (Invalid || GNUCVer.getBuild() || Minor >= 100 || Patch >= 100) {
Diags.Report(diag::err_drv_invalid_value)
<< A->getAsString(Args) << A->getValue();
@@ -3722,28 +3838,7 @@ bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
Opts.GNUCVersion = Major * 100 * 100 + Minor * 100 + Patch;
}
- // In AIX OS, the -mignore-xcoff-visibility is enable by default if there is
- // no -fvisibility=* option.
- // This is the reason why '-fvisibility' needs to be always generated:
- // its absence implies '-mignore-xcoff-visibility'.
- //
- // Suppose the original cc1 command line does contain '-fvisibility default':
- // '-mignore-xcoff-visibility' should not be implied.
- // * If '-fvisibility' is not generated (as most options with default values
- // don't), its absence would imply '-mignore-xcoff-visibility'. This changes
- // the command line semantics.
- // * If '-fvisibility' is generated regardless of its presence and value,
- // '-mignore-xcoff-visibility' won't be implied and the command line
- // semantics are kept intact.
- //
- // When the original cc1 command line does **not** contain '-fvisibility',
- // '-mignore-xcoff-visibility' is implied. The generated command line will
- // contain both '-fvisibility default' and '-mignore-xcoff-visibility' and
- // subsequent calls to `CreateFromArgs`/`generateCC1CommandLine` will always
- // produce the same arguments.
-
- if (T.isOSAIX() && (Args.hasArg(OPT_mignore_xcoff_visibility) ||
- !Args.hasArg(OPT_fvisibility)))
+ if (T.isOSAIX() && (Args.hasArg(OPT_mignore_xcoff_visibility)))
Opts.IgnoreXCOFFVisibility = 1;
if (Args.hasArg(OPT_ftrapv)) {
@@ -3762,34 +3857,43 @@ bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args)
<< A->getValue();
Opts.MSCompatibilityVersion = VT.getMajor() * 10000000 +
- VT.getMinor().getValueOr(0) * 100000 +
- VT.getSubminor().getValueOr(0);
+ VT.getMinor().value_or(0) * 100000 +
+ VT.getSubminor().value_or(0);
}
// Mimicking gcc's behavior, trigraphs are only enabled if -trigraphs
// is specified, or -std is set to a conforming mode.
- // Trigraphs are disabled by default in c++1z onwards.
+ // Trigraphs are disabled by default in C++17 and C23 onwards.
// For z/OS, trigraphs are enabled by default (without regard to the above).
Opts.Trigraphs =
- (!Opts.GNUMode && !Opts.MSVCCompat && !Opts.CPlusPlus17) || T.isOSzOS();
+ (!Opts.GNUMode && !Opts.MSVCCompat && !Opts.CPlusPlus17 && !Opts.C23) ||
+ T.isOSzOS();
Opts.Trigraphs =
Args.hasFlag(OPT_ftrigraphs, OPT_fno_trigraphs, Opts.Trigraphs);
Opts.Blocks = Args.hasArg(OPT_fblocks) || (Opts.OpenCL
&& Opts.OpenCLVersion == 200);
- Opts.ConvergentFunctions = Opts.OpenCL || (Opts.CUDA && Opts.CUDAIsDevice) ||
- Opts.SYCLIsDevice ||
- Args.hasArg(OPT_fconvergent_functions);
+ Opts.ConvergentFunctions = Args.hasArg(OPT_fconvergent_functions) ||
+ Opts.OpenCL || (Opts.CUDA && Opts.CUDAIsDevice) ||
+ Opts.SYCLIsDevice;
Opts.NoBuiltin = Args.hasArg(OPT_fno_builtin) || Opts.Freestanding;
if (!Opts.NoBuiltin)
getAllNoBuiltinFuncValues(Args, Opts.NoBuiltinFuncs);
- Opts.LongDoubleSize = Args.hasArg(OPT_mlong_double_128)
- ? 128
- : Args.hasArg(OPT_mlong_double_64) ? 64 : 0;
- if (Opts.FastRelaxedMath)
+ if (Arg *A = Args.getLastArg(options::OPT_LongDouble_Group)) {
+ if (A->getOption().matches(options::OPT_mlong_double_64))
+ Opts.LongDoubleSize = 64;
+ else if (A->getOption().matches(options::OPT_mlong_double_80))
+ Opts.LongDoubleSize = 80;
+ else if (A->getOption().matches(options::OPT_mlong_double_128))
+ Opts.LongDoubleSize = 128;
+ else
+ Opts.LongDoubleSize = 0;
+ }
+ if (Opts.FastRelaxedMath || Opts.CLUnsafeMath)
Opts.setDefaultFPContractMode(LangOptions::FPM_Fast);
+
llvm::sort(Opts.ModuleFeatures);
// -mrtd option
@@ -3798,16 +3902,22 @@ bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
Diags.Report(diag::err_drv_argument_not_allowed_with)
<< A->getSpelling() << "-fdefault-calling-conv";
else {
- if (T.getArch() != llvm::Triple::x86)
+ switch (T.getArch()) {
+ case llvm::Triple::x86:
+ Opts.setDefaultCallingConv(LangOptions::DCC_StdCall);
+ break;
+ case llvm::Triple::m68k:
+ Opts.setDefaultCallingConv(LangOptions::DCC_RtdCall);
+ break;
+ default:
Diags.Report(diag::err_drv_argument_not_allowed_with)
<< A->getSpelling() << T.getTriple();
- else
- Opts.setDefaultCallingConv(LangOptions::DCC_StdCall);
+ }
}
}
// Check if -fopenmp is specified and set default version to 5.0.
- Opts.OpenMP = Args.hasArg(OPT_fopenmp) ? 50 : 0;
+ Opts.OpenMP = Args.hasArg(OPT_fopenmp) ? 51 : 0;
// Check if -fopenmp-simd is specified.
bool IsSimdSpecified =
Args.hasFlag(options::OPT_fopenmp_simd, options::OPT_fno_openmp_simd,
@@ -3815,23 +3925,24 @@ bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
Opts.OpenMPSimd = !Opts.OpenMP && IsSimdSpecified;
Opts.OpenMPUseTLS =
Opts.OpenMP && !Args.hasArg(options::OPT_fnoopenmp_use_tls);
- Opts.OpenMPIsDevice =
- Opts.OpenMP && Args.hasArg(options::OPT_fopenmp_is_device);
+ Opts.OpenMPIsTargetDevice =
+ Opts.OpenMP && Args.hasArg(options::OPT_fopenmp_is_target_device);
Opts.OpenMPIRBuilder =
Opts.OpenMP && Args.hasArg(options::OPT_fopenmp_enable_irbuilder);
bool IsTargetSpecified =
- Opts.OpenMPIsDevice || Args.hasArg(options::OPT_fopenmp_targets_EQ);
+ Opts.OpenMPIsTargetDevice || Args.hasArg(options::OPT_fopenmp_targets_EQ);
- Opts.ConvergentFunctions = Opts.ConvergentFunctions || Opts.OpenMPIsDevice;
+ Opts.ConvergentFunctions =
+ Opts.ConvergentFunctions || Opts.OpenMPIsTargetDevice;
if (Opts.OpenMP || Opts.OpenMPSimd) {
if (int Version = getLastArgIntValue(
Args, OPT_fopenmp_version_EQ,
- (IsSimdSpecified || IsTargetSpecified) ? 50 : Opts.OpenMP, Diags))
+ (IsSimdSpecified || IsTargetSpecified) ? 51 : Opts.OpenMP, Diags))
Opts.OpenMP = Version;
// Provide diagnostic when a given target is not expected to be an OpenMP
// device or host.
- if (!Opts.OpenMPIsDevice) {
+ if (!Opts.OpenMPIsTargetDevice) {
switch (T.getArch()) {
default:
break;
@@ -3846,12 +3957,13 @@ bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
// Set the flag to prevent the implementation from emitting device exception
// handling code for those requiring so.
- if ((Opts.OpenMPIsDevice && (T.isNVPTX() || T.isAMDGCN())) ||
+ if ((Opts.OpenMPIsTargetDevice && (T.isNVPTX() || T.isAMDGCN())) ||
Opts.OpenCLCPlusPlus) {
+
Opts.Exceptions = 0;
Opts.CXXExceptions = 0;
}
- if (Opts.OpenMPIsDevice && T.isNVPTX()) {
+ if (Opts.OpenMPIsTargetDevice && T.isNVPTX()) {
Opts.OpenMPCUDANumSMs =
getLastArgIntValue(Args, options::OPT_fopenmp_cuda_number_of_sm_EQ,
Opts.OpenMPCUDANumSMs, Diags);
@@ -3863,6 +3975,23 @@ bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
Opts.OpenMPCUDAReductionBufNum, Diags);
}
+ // Set the value of the debugging flag used in the new offloading device RTL.
+ // Set either by a specific value or to a default if not specified.
+ if (Opts.OpenMPIsTargetDevice && (Args.hasArg(OPT_fopenmp_target_debug) ||
+ Args.hasArg(OPT_fopenmp_target_debug_EQ))) {
+ Opts.OpenMPTargetDebug = getLastArgIntValue(
+ Args, OPT_fopenmp_target_debug_EQ, Opts.OpenMPTargetDebug, Diags);
+ if (!Opts.OpenMPTargetDebug && Args.hasArg(OPT_fopenmp_target_debug))
+ Opts.OpenMPTargetDebug = 1;
+ }
+
+ if (Opts.OpenMPIsTargetDevice) {
+ if (Args.hasArg(OPT_fopenmp_assume_teams_oversubscription))
+ Opts.OpenMPTeamSubscription = true;
+ if (Args.hasArg(OPT_fopenmp_assume_threads_oversubscription))
+ Opts.OpenMPThreadSubscription = true;
+ }
+
// Get the OpenMP target triples if any.
if (Arg *A = Args.getLastArg(options::OPT_fopenmp_targets_EQ)) {
enum ArchPtrSize { Arch16Bit, Arch32Bit, Arch64Bit };
@@ -3904,13 +4033,17 @@ bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
}
// Set CUDA mode for OpenMP target NVPTX/AMDGCN if specified in options
- Opts.OpenMPCUDAMode = Opts.OpenMPIsDevice && (T.isNVPTX() || T.isAMDGCN()) &&
+ Opts.OpenMPCUDAMode = Opts.OpenMPIsTargetDevice &&
+ (T.isNVPTX() || T.isAMDGCN()) &&
Args.hasArg(options::OPT_fopenmp_cuda_mode);
- // Set CUDA mode for OpenMP target NVPTX/AMDGCN if specified in options
- Opts.OpenMPCUDAForceFullRuntime =
- Opts.OpenMPIsDevice && (T.isNVPTX() || T.isAMDGCN()) &&
- Args.hasArg(options::OPT_fopenmp_cuda_force_full_runtime);
+ // OpenACC Configuration.
+ if (Args.hasArg(options::OPT_fopenacc)) {
+ Opts.OpenACC = true;
+
+ if (Arg *A = Args.getLastArg(options::OPT_openacc_macro_override))
+ Opts.OpenACCMacroOverride = A->getValue();
+ }
// FIXME: Eliminate this dependency.
unsigned Opt = getOptimizationLevel(Args, IK, Diags),
@@ -3961,13 +4094,13 @@ bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
// Check the version number is valid: either 3.x (0 <= x <= 9) or
// y or y.0 (4 <= y <= current version).
- if (!VerParts.first.startswith("0") &&
- !VerParts.first.getAsInteger(10, Major) &&
- 3 <= Major && Major <= CLANG_VERSION_MAJOR &&
- (Major == 3 ? VerParts.second.size() == 1 &&
- !VerParts.second.getAsInteger(10, Minor)
- : VerParts.first.size() == Ver.size() ||
- VerParts.second == "0")) {
+ if (!VerParts.first.starts_with("0") &&
+ !VerParts.first.getAsInteger(10, Major) && 3 <= Major &&
+ Major <= CLANG_VERSION_MAJOR &&
+ (Major == 3
+ ? VerParts.second.size() == 1 &&
+ !VerParts.second.getAsInteger(10, Minor)
+ : VerParts.first.size() == Ver.size() || VerParts.second == "0")) {
// Got a valid version number.
if (Major == 3 && Minor <= 8)
Opts.setClangABICompat(LangOptions::ClangABI::Ver3_8);
@@ -3983,6 +4116,12 @@ bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
Opts.setClangABICompat(LangOptions::ClangABI::Ver11);
else if (Major <= 12)
Opts.setClangABICompat(LangOptions::ClangABI::Ver12);
+ else if (Major <= 14)
+ Opts.setClangABICompat(LangOptions::ClangABI::Ver14);
+ else if (Major <= 15)
+ Opts.setClangABICompat(LangOptions::ClangABI::Ver15);
+ else if (Major <= 17)
+ Opts.setClangABICompat(LangOptions::ClangABI::Ver17);
} else if (Ver != "latest") {
Diags.Report(diag::err_drv_invalid_value)
<< A->getAsString(Args) << A->getValue();
@@ -4008,10 +4147,10 @@ bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
if (Arg *A = Args.getLastArg(OPT_msign_return_address_key_EQ)) {
StringRef SignKey = A->getValue();
if (!SignScope.empty() && !SignKey.empty()) {
- if (SignKey.equals_insensitive("a_key"))
+ if (SignKey == "a_key")
Opts.setSignReturnAddressKey(
LangOptions::SignReturnAddressKeyKind::AKey);
- else if (SignKey.equals_insensitive("b_key"))
+ else if (SignKey == "b_key")
Opts.setSignReturnAddressKey(
LangOptions::SignReturnAddressKeyKind::BKey);
else
@@ -4040,12 +4179,84 @@ bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
options::OPT_fno_experimental_relative_cxx_abi_vtables,
TargetCXXABI::usesRelativeVTables(T));
+ // RTTI is on by default.
+ bool HasRTTI = !Args.hasArg(options::OPT_fno_rtti);
+ Opts.OmitVTableRTTI =
+ Args.hasFlag(options::OPT_fexperimental_omit_vtable_rtti,
+ options::OPT_fno_experimental_omit_vtable_rtti, false);
+ if (Opts.OmitVTableRTTI && HasRTTI)
+ Diags.Report(diag::err_drv_using_omit_rtti_component_without_no_rtti);
+
for (const auto &A : Args.getAllArgValues(OPT_fmacro_prefix_map_EQ)) {
auto Split = StringRef(A).split('=');
Opts.MacroPrefixMap.insert(
{std::string(Split.first), std::string(Split.second)});
}
+ Opts.UseTargetPathSeparator =
+ !Args.getLastArg(OPT_fno_file_reproducible) &&
+ (Args.getLastArg(OPT_ffile_compilation_dir_EQ) ||
+ Args.getLastArg(OPT_fmacro_prefix_map_EQ) ||
+ Args.getLastArg(OPT_ffile_reproducible));
+
+ // Error if -mvscale-min is unbounded.
+ if (Arg *A = Args.getLastArg(options::OPT_mvscale_min_EQ)) {
+ unsigned VScaleMin;
+ if (StringRef(A->getValue()).getAsInteger(10, VScaleMin) || VScaleMin == 0)
+ Diags.Report(diag::err_cc1_unbounded_vscale_min);
+ }
+
+ if (const Arg *A = Args.getLastArg(OPT_frandomize_layout_seed_file_EQ)) {
+ std::ifstream SeedFile(A->getValue(0));
+
+ if (!SeedFile.is_open())
+ Diags.Report(diag::err_drv_cannot_open_randomize_layout_seed_file)
+ << A->getValue(0);
+
+ std::getline(SeedFile, Opts.RandstructSeed);
+ }
+
+ if (const Arg *A = Args.getLastArg(OPT_frandomize_layout_seed_EQ))
+ Opts.RandstructSeed = A->getValue(0);
+
+ // Validate options for HLSL
+ if (Opts.HLSL) {
+ // TODO: Revisit restricting SPIR-V to logical once we've figured out how to
+ // handle PhysicalStorageBuffer64 memory model
+ if (T.isDXIL() || T.isSPIRVLogical()) {
+ enum { ShaderModel, VulkanEnv, ShaderStage };
+ enum { OS, Environment };
+
+ int ExpectedOS = T.isSPIRVLogical() ? VulkanEnv : ShaderModel;
+
+ if (T.getOSName().empty()) {
+ Diags.Report(diag::err_drv_hlsl_bad_shader_required_in_target)
+ << ExpectedOS << OS << T.str();
+ } else if (T.getEnvironmentName().empty()) {
+ Diags.Report(diag::err_drv_hlsl_bad_shader_required_in_target)
+ << ShaderStage << Environment << T.str();
+ } else if (!T.isShaderStageEnvironment()) {
+ Diags.Report(diag::err_drv_hlsl_bad_shader_unsupported)
+ << ShaderStage << T.getEnvironmentName() << T.str();
+ }
+
+ if (T.isDXIL()) {
+ if (!T.isShaderModelOS() || T.getOSVersion() == VersionTuple(0)) {
+ Diags.Report(diag::err_drv_hlsl_bad_shader_unsupported)
+ << ShaderModel << T.getOSName() << T.str();
+ }
+ } else if (T.isSPIRVLogical()) {
+ if (!T.isVulkanOS() || T.getVulkanVersion() == VersionTuple(0)) {
+ Diags.Report(diag::err_drv_hlsl_bad_shader_unsupported)
+ << VulkanEnv << T.getOSName() << T.str();
+ }
+ } else {
+ llvm_unreachable("expected DXIL or SPIR-V target");
+ }
+ } else
+ Diags.Report(diag::err_drv_hlsl_unsupported_target) << T.str();
+ }
+
return Diags.getNumErrors() == NumErrorsBefore;
}
@@ -4062,10 +4273,11 @@ static bool isStrictlyPreprocessorAction(frontend::ActionKind Action) {
case frontend::EmitLLVMOnly:
case frontend::EmitCodeGenOnly:
case frontend::EmitObj:
+ case frontend::ExtractAPI:
case frontend::FixIt:
case frontend::GenerateModule:
case frontend::GenerateModuleInterface:
- case frontend::GenerateHeaderModule:
+ case frontend::GenerateHeaderUnit:
case frontend::GeneratePCH:
case frontend::GenerateInterfaceStubs:
case frontend::ParseSyntaxOnly:
@@ -4093,36 +4305,28 @@ static bool isStrictlyPreprocessorAction(frontend::ActionKind Action) {
llvm_unreachable("invalid frontend action");
}
-static void GeneratePreprocessorArgs(PreprocessorOptions &Opts,
- SmallVectorImpl<const char *> &Args,
- CompilerInvocation::StringAllocator SA,
+static void GeneratePreprocessorArgs(const PreprocessorOptions &Opts,
+ ArgumentConsumer Consumer,
const LangOptions &LangOpts,
const FrontendOptions &FrontendOpts,
const CodeGenOptions &CodeGenOpts) {
- PreprocessorOptions *PreprocessorOpts = &Opts;
+ const PreprocessorOptions *PreprocessorOpts = &Opts;
-#define PREPROCESSOR_OPTION_WITH_MARSHALLING( \
- PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
- HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
- DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
- MERGER, EXTRACTOR, TABLE_INDEX) \
- GENERATE_OPTION_WITH_MARSHALLING( \
- Args, SA, KIND, FLAGS, SPELLING, ALWAYS_EMIT, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, DENORMALIZER, EXTRACTOR, TABLE_INDEX)
+#define PREPROCESSOR_OPTION_WITH_MARSHALLING(...) \
+ GENERATE_OPTION_WITH_MARSHALLING(Consumer, __VA_ARGS__)
#include "clang/Driver/Options.inc"
#undef PREPROCESSOR_OPTION_WITH_MARSHALLING
if (Opts.PCHWithHdrStop && !Opts.PCHWithHdrStopCreate)
- GenerateArg(Args, OPT_pch_through_hdrstop_use, SA);
+ GenerateArg(Consumer, OPT_pch_through_hdrstop_use);
for (const auto &D : Opts.DeserializedPCHDeclsToErrorOn)
- GenerateArg(Args, OPT_error_on_deserialized_pch_decl, D, SA);
+ GenerateArg(Consumer, OPT_error_on_deserialized_pch_decl, D);
if (Opts.PrecompiledPreambleBytes != std::make_pair(0u, false))
- GenerateArg(Args, OPT_preamble_bytes_EQ,
+ GenerateArg(Consumer, OPT_preamble_bytes_EQ,
Twine(Opts.PrecompiledPreambleBytes.first) + "," +
- (Opts.PrecompiledPreambleBytes.second ? "1" : "0"),
- SA);
+ (Opts.PrecompiledPreambleBytes.second ? "1" : "0"));
for (const auto &M : Opts.Macros) {
// Don't generate __CET__ macro definitions. They are implied by the
@@ -4137,7 +4341,7 @@ static void GeneratePreprocessorArgs(PreprocessorOptions &Opts,
CodeGenOpts.CFProtectionBranch)
continue;
- GenerateArg(Args, M.second ? OPT_U : OPT_D, M.first, SA);
+ GenerateArg(Consumer, M.second ? OPT_U : OPT_D, M.first);
}
for (const auto &I : Opts.Includes) {
@@ -4147,15 +4351,25 @@ static void GeneratePreprocessorArgs(PreprocessorOptions &Opts,
((LangOpts.DeclareOpenCLBuiltins && I == "opencl-c-base.h") ||
I == "opencl-c.h"))
continue;
+ // Don't generate HLSL includes. They are implied by other flags that are
+ // generated elsewhere.
+ if (LangOpts.HLSL && I == "hlsl.h")
+ continue;
- GenerateArg(Args, OPT_include, I, SA);
+ GenerateArg(Consumer, OPT_include, I);
}
for (const auto &CI : Opts.ChainedIncludes)
- GenerateArg(Args, OPT_chain_include, CI, SA);
+ GenerateArg(Consumer, OPT_chain_include, CI);
for (const auto &RF : Opts.RemappedFiles)
- GenerateArg(Args, OPT_remap_file, RF.first + ";" + RF.second, SA);
+ GenerateArg(Consumer, OPT_remap_file, RF.first + ";" + RF.second);
+
+ if (Opts.SourceDateEpoch)
+ GenerateArg(Consumer, OPT_source_date_epoch, Twine(*Opts.SourceDateEpoch));
+
+ if (Opts.DefineTargetOSMacros)
+ GenerateArg(Consumer, OPT_fdefine_target_os_macros);
// Don't handle LexEditorPlaceholders. It is implied by the action that is
// generated elsewhere.
@@ -4169,14 +4383,8 @@ static bool ParsePreprocessorArgs(PreprocessorOptions &Opts, ArgList &Args,
PreprocessorOptions *PreprocessorOpts = &Opts;
-#define PREPROCESSOR_OPTION_WITH_MARSHALLING( \
- PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
- HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
- DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
- MERGER, EXTRACTOR, TABLE_INDEX) \
- PARSE_OPTION_WITH_MARSHALLING( \
- Args, Diags, ID, FLAGS, PARAM, SHOULD_PARSE, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, MERGER, TABLE_INDEX)
+#define PREPROCESSOR_OPTION_WITH_MARSHALLING(...) \
+ PARSE_OPTION_WITH_MARSHALLING(Args, Diags, __VA_ARGS__)
#include "clang/Driver/Options.inc"
#undef PREPROCESSOR_OPTION_WITH_MARSHALLING
@@ -4239,36 +4447,53 @@ static bool ParsePreprocessorArgs(PreprocessorOptions &Opts, ArgList &Args,
Opts.addRemappedFile(Split.first, Split.second);
}
+ if (const Arg *A = Args.getLastArg(OPT_source_date_epoch)) {
+ StringRef Epoch = A->getValue();
+ // SOURCE_DATE_EPOCH, if specified, must be a non-negative decimal integer.
+ // On time64 systems, pick 253402300799 (the UNIX timestamp of
+ // 9999-12-31T23:59:59Z) as the upper bound.
+ const uint64_t MaxTimestamp =
+ std::min<uint64_t>(std::numeric_limits<time_t>::max(), 253402300799);
+ uint64_t V;
+ if (Epoch.getAsInteger(10, V) || V > MaxTimestamp) {
+ Diags.Report(diag::err_fe_invalid_source_date_epoch)
+ << Epoch << MaxTimestamp;
+ } else {
+ Opts.SourceDateEpoch = V;
+ }
+ }
+
// Always avoid lexing editor placeholders when we're just running the
// preprocessor as we never want to emit the
// "editor placeholder in source file" error in PP only mode.
if (isStrictlyPreprocessorAction(Action))
Opts.LexEditorPlaceholders = false;
+ Opts.DefineTargetOSMacros =
+ Args.hasFlag(OPT_fdefine_target_os_macros,
+ OPT_fno_define_target_os_macros, Opts.DefineTargetOSMacros);
+
return Diags.getNumErrors() == NumErrorsBefore;
}
-static void GeneratePreprocessorOutputArgs(
- const PreprocessorOutputOptions &Opts, SmallVectorImpl<const char *> &Args,
- CompilerInvocation::StringAllocator SA, frontend::ActionKind Action) {
+static void
+GeneratePreprocessorOutputArgs(const PreprocessorOutputOptions &Opts,
+ ArgumentConsumer Consumer,
+ frontend::ActionKind Action) {
const PreprocessorOutputOptions &PreprocessorOutputOpts = Opts;
-#define PREPROCESSOR_OUTPUT_OPTION_WITH_MARSHALLING( \
- PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
- HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
- DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
- MERGER, EXTRACTOR, TABLE_INDEX) \
- GENERATE_OPTION_WITH_MARSHALLING( \
- Args, SA, KIND, FLAGS, SPELLING, ALWAYS_EMIT, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, DENORMALIZER, EXTRACTOR, TABLE_INDEX)
+#define PREPROCESSOR_OUTPUT_OPTION_WITH_MARSHALLING(...) \
+ GENERATE_OPTION_WITH_MARSHALLING(Consumer, __VA_ARGS__)
#include "clang/Driver/Options.inc"
#undef PREPROCESSOR_OUTPUT_OPTION_WITH_MARSHALLING
bool Generate_dM = isStrictlyPreprocessorAction(Action) && !Opts.ShowCPP;
if (Generate_dM)
- GenerateArg(Args, OPT_dM, SA);
+ GenerateArg(Consumer, OPT_dM);
if (!Generate_dM && Opts.ShowMacros)
- GenerateArg(Args, OPT_dD, SA);
+ GenerateArg(Consumer, OPT_dD);
+ if (Opts.DirectivesOnly)
+ GenerateArg(Consumer, OPT_fdirectives_only);
}
static bool ParsePreprocessorOutputArgs(PreprocessorOutputOptions &Opts,
@@ -4278,41 +4503,32 @@ static bool ParsePreprocessorOutputArgs(PreprocessorOutputOptions &Opts,
PreprocessorOutputOptions &PreprocessorOutputOpts = Opts;
-#define PREPROCESSOR_OUTPUT_OPTION_WITH_MARSHALLING( \
- PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
- HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
- DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
- MERGER, EXTRACTOR, TABLE_INDEX) \
- PARSE_OPTION_WITH_MARSHALLING( \
- Args, Diags, ID, FLAGS, PARAM, SHOULD_PARSE, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, MERGER, TABLE_INDEX)
+#define PREPROCESSOR_OUTPUT_OPTION_WITH_MARSHALLING(...) \
+ PARSE_OPTION_WITH_MARSHALLING(Args, Diags, __VA_ARGS__)
#include "clang/Driver/Options.inc"
#undef PREPROCESSOR_OUTPUT_OPTION_WITH_MARSHALLING
Opts.ShowCPP = isStrictlyPreprocessorAction(Action) && !Args.hasArg(OPT_dM);
Opts.ShowMacros = Args.hasArg(OPT_dM) || Args.hasArg(OPT_dD);
+ Opts.DirectivesOnly = Args.hasArg(OPT_fdirectives_only);
return Diags.getNumErrors() == NumErrorsBefore;
}
static void GenerateTargetArgs(const TargetOptions &Opts,
- SmallVectorImpl<const char *> &Args,
- CompilerInvocation::StringAllocator SA) {
+ ArgumentConsumer Consumer) {
const TargetOptions *TargetOpts = &Opts;
-#define TARGET_OPTION_WITH_MARSHALLING( \
- PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
- HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
- DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
- MERGER, EXTRACTOR, TABLE_INDEX) \
- GENERATE_OPTION_WITH_MARSHALLING( \
- Args, SA, KIND, FLAGS, SPELLING, ALWAYS_EMIT, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, DENORMALIZER, EXTRACTOR, TABLE_INDEX)
+#define TARGET_OPTION_WITH_MARSHALLING(...) \
+ GENERATE_OPTION_WITH_MARSHALLING(Consumer, __VA_ARGS__)
#include "clang/Driver/Options.inc"
#undef TARGET_OPTION_WITH_MARSHALLING
if (!Opts.SDKVersion.empty())
- GenerateArg(Args, OPT_target_sdk_version_EQ, Opts.SDKVersion.getAsString(),
- SA);
+ GenerateArg(Consumer, OPT_target_sdk_version_EQ,
+ Opts.SDKVersion.getAsString());
+ if (!Opts.DarwinTargetVariantSDKVersion.empty())
+ GenerateArg(Consumer, OPT_darwin_target_variant_sdk_version_EQ,
+ Opts.DarwinTargetVariantSDKVersion.getAsString());
}
static bool ParseTargetArgs(TargetOptions &Opts, ArgList &Args,
@@ -4321,14 +4537,8 @@ static bool ParseTargetArgs(TargetOptions &Opts, ArgList &Args,
TargetOptions *TargetOpts = &Opts;
-#define TARGET_OPTION_WITH_MARSHALLING( \
- PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
- HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
- DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
- MERGER, EXTRACTOR, TABLE_INDEX) \
- PARSE_OPTION_WITH_MARSHALLING( \
- Args, Diags, ID, FLAGS, PARAM, SHOULD_PARSE, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, MERGER, TABLE_INDEX)
+#define TARGET_OPTION_WITH_MARSHALLING(...) \
+ PARSE_OPTION_WITH_MARSHALLING(Args, Diags, __VA_ARGS__)
#include "clang/Driver/Options.inc"
#undef TARGET_OPTION_WITH_MARSHALLING
@@ -4340,6 +4550,15 @@ static bool ParseTargetArgs(TargetOptions &Opts, ArgList &Args,
else
Opts.SDKVersion = Version;
}
+ if (Arg *A =
+ Args.getLastArg(options::OPT_darwin_target_variant_sdk_version_EQ)) {
+ llvm::VersionTuple Version;
+ if (Version.tryParse(A->getValue()))
+ Diags.Report(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << A->getValue();
+ else
+ Opts.DarwinTargetVariantSDKVersion = Version;
+ }
return Diags.getNumErrors() == NumErrorsBefore;
}
@@ -4351,11 +4570,11 @@ bool CompilerInvocation::CreateFromArgsImpl(
// Parse the arguments.
const OptTable &Opts = getDriverOptTable();
- const unsigned IncludedFlagsBitmask = options::CC1Option;
+ llvm::opt::Visibility VisibilityMask(options::CC1Option);
unsigned MissingArgIndex, MissingArgCount;
InputArgList Args = Opts.ParseArgs(CommandLineArgs, MissingArgIndex,
- MissingArgCount, IncludedFlagsBitmask);
- LangOptions &LangOpts = *Res.getLangOpts();
+ MissingArgCount, VisibilityMask);
+ LangOptions &LangOpts = Res.getLangOpts();
// Check for missing argument error.
if (MissingArgCount)
@@ -4366,7 +4585,7 @@ bool CompilerInvocation::CreateFromArgsImpl(
for (const auto *A : Args.filtered(OPT_UNKNOWN)) {
auto ArgString = A->getAsString(Args);
std::string Nearest;
- if (Opts.findNearest(ArgString, Nearest, IncludedFlagsBitmask) > 1)
+ if (Opts.findNearest(ArgString, Nearest, VisibilityMask) > 1)
Diags.Report(diag::err_drv_unknown_argument) << ArgString;
else
Diags.Report(diag::err_drv_unknown_argument_with_suggestion)
@@ -4375,7 +4594,7 @@ bool CompilerInvocation::CreateFromArgsImpl(
ParseFileSystemArgs(Res.getFileSystemOpts(), Args, Diags);
ParseMigratorArgs(Res.getMigratorOpts(), Args, Diags);
- ParseAnalyzerArgs(*Res.getAnalyzerOpts(), Args, Diags);
+ ParseAnalyzerArgs(Res.getAnalyzerOpts(), Args, Diags);
ParseDiagnosticArgs(Res.getDiagnosticOpts(), Args, &Diags,
/*DefaultDiagColor=*/false);
ParseFrontendArgs(Res.getFrontendOpts(), Args, Diags, LangOpts.IsHeaderFile);
@@ -4385,12 +4604,20 @@ bool CompilerInvocation::CreateFromArgsImpl(
llvm::Triple T(Res.getTargetOpts().Triple);
ParseHeaderSearchArgs(Res.getHeaderSearchOpts(), Args, Diags,
Res.getFileSystemOpts().WorkingDir);
+ ParseAPINotesArgs(Res.getAPINotesOpts(), Args, Diags);
ParseLangArgs(LangOpts, Args, DashX, T, Res.getPreprocessorOpts().Includes,
Diags);
if (Res.getFrontendOpts().ProgramAction == frontend::RewriteObjC)
LangOpts.ObjCExceptions = 1;
+ for (auto Warning : Res.getDiagnosticOpts().Warnings) {
+ if (Warning == "misexpect" &&
+ !Diags.isIgnored(diag::warn_profile_data_misexpect, SourceLocation())) {
+ Res.getCodeGenOpts().MisExpect = true;
+ }
+ }
+
if (LangOpts.CUDA) {
// During CUDA device-side compilation, the aux triple is the
// triple used for host compilation.
@@ -4399,7 +4626,7 @@ bool CompilerInvocation::CreateFromArgsImpl(
}
// Set the triple of the host for OpenMP device compile.
- if (LangOpts.OpenMPIsDevice)
+ if (LangOpts.OpenMPIsTargetDevice)
Res.getTargetOpts().HostTriple = Res.getFrontendOpts().AuxTriple;
ParseCodeGenArgs(Res.getCodeGenOpts(), Args, DashX, Diags, T,
@@ -4429,14 +4656,27 @@ bool CompilerInvocation::CreateFromArgsImpl(
// If sanitizer is enabled, disable OPT_ffine_grained_bitfield_accesses.
if (Res.getCodeGenOpts().FineGrainedBitfieldAccesses &&
- !Res.getLangOpts()->Sanitize.empty()) {
+ !Res.getLangOpts().Sanitize.empty()) {
Res.getCodeGenOpts().FineGrainedBitfieldAccesses = false;
Diags.Report(diag::warn_drv_fine_grained_bitfield_accesses_ignored);
}
// Store the command-line for using in the CodeView backend.
- Res.getCodeGenOpts().Argv0 = Argv0;
- Res.getCodeGenOpts().CommandLineArgs = CommandLineArgs;
+ if (Res.getCodeGenOpts().CodeViewCommandLine) {
+ Res.getCodeGenOpts().Argv0 = Argv0;
+ append_range(Res.getCodeGenOpts().CommandLineArgs, CommandLineArgs);
+ }
+
+ // Set PGOOptions. Need to create a temporary VFS to read the profile
+ // to determine the PGO type.
+ if (!Res.getCodeGenOpts().ProfileInstrumentUsePath.empty()) {
+ auto FS =
+ createVFSFromOverlayFiles(Res.getHeaderSearchOpts().VFSOverlayFiles,
+ Diags, llvm::vfs::getRealFileSystem());
+ setPGOUseInstrumentor(Res.getCodeGenOpts(),
+ Res.getCodeGenOpts().ProfileInstrumentUsePath, *FS,
+ Diags);
+ }
FixupInvocation(Res, Diags, Args, DashX);
@@ -4455,142 +4695,179 @@ bool CompilerInvocation::CreateFromArgs(CompilerInvocation &Invocation,
return CreateFromArgsImpl(Invocation, CommandLineArgs, Diags, Argv0);
},
[](CompilerInvocation &Invocation, SmallVectorImpl<const char *> &Args,
- StringAllocator SA) { Invocation.generateCC1CommandLine(Args, SA); },
+ StringAllocator SA) {
+ Args.push_back("-cc1");
+ Invocation.generateCC1CommandLine(Args, SA);
+ },
Invocation, DummyInvocation, CommandLineArgs, Diags, Argv0);
}
std::string CompilerInvocation::getModuleHash() const {
+ // FIXME: Consider using SHA1 instead of MD5.
+ llvm::HashBuilder<llvm::MD5, llvm::endianness::native> HBuilder;
+
// Note: For QoI reasons, the things we use as a hash here should all be
// dumped via the -module-info flag.
- using llvm::hash_code;
- using llvm::hash_value;
- using llvm::hash_combine;
- using llvm::hash_combine_range;
// Start the signature with the compiler version.
- // FIXME: We'd rather use something more cryptographically sound than
- // CityHash, but this will do for now.
- hash_code code = hash_value(getClangFullRepositoryVersion());
+ HBuilder.add(getClangFullRepositoryVersion());
// Also include the serialization version, in case LLVM_APPEND_VC_REV is off
// and getClangFullRepositoryVersion() doesn't include git revision.
- code = hash_combine(code, serialization::VERSION_MAJOR,
- serialization::VERSION_MINOR);
+ HBuilder.add(serialization::VERSION_MAJOR, serialization::VERSION_MINOR);
// Extend the signature with the language options
-#define LANGOPT(Name, Bits, Default, Description) \
- code = hash_combine(code, LangOpts->Name);
-#define ENUM_LANGOPT(Name, Type, Bits, Default, Description) \
- code = hash_combine(code, static_cast<unsigned>(LangOpts->get##Name()));
+#define LANGOPT(Name, Bits, Default, Description) HBuilder.add(LangOpts->Name);
+#define ENUM_LANGOPT(Name, Type, Bits, Default, Description) \
+ HBuilder.add(static_cast<unsigned>(LangOpts->get##Name()));
#define BENIGN_LANGOPT(Name, Bits, Default, Description)
#define BENIGN_ENUM_LANGOPT(Name, Type, Bits, Default, Description)
#include "clang/Basic/LangOptions.def"
- for (StringRef Feature : LangOpts->ModuleFeatures)
- code = hash_combine(code, Feature);
+ HBuilder.addRange(getLangOpts().ModuleFeatures);
- code = hash_combine(code, LangOpts->ObjCRuntime);
- const auto &BCN = LangOpts->CommentOpts.BlockCommandNames;
- code = hash_combine(code, hash_combine_range(BCN.begin(), BCN.end()));
+ HBuilder.add(getLangOpts().ObjCRuntime);
+ HBuilder.addRange(getLangOpts().CommentOpts.BlockCommandNames);
// Extend the signature with the target options.
- code = hash_combine(code, TargetOpts->Triple, TargetOpts->CPU,
- TargetOpts->TuneCPU, TargetOpts->ABI);
- for (const auto &FeatureAsWritten : TargetOpts->FeaturesAsWritten)
- code = hash_combine(code, FeatureAsWritten);
+ HBuilder.add(getTargetOpts().Triple, getTargetOpts().CPU,
+ getTargetOpts().TuneCPU, getTargetOpts().ABI);
+ HBuilder.addRange(getTargetOpts().FeaturesAsWritten);
// Extend the signature with preprocessor options.
const PreprocessorOptions &ppOpts = getPreprocessorOpts();
- const HeaderSearchOptions &hsOpts = getHeaderSearchOpts();
- code = hash_combine(code, ppOpts.UsePredefines, ppOpts.DetailedRecord);
+ HBuilder.add(ppOpts.UsePredefines, ppOpts.DetailedRecord);
- for (const auto &I : getPreprocessorOpts().Macros) {
+ const HeaderSearchOptions &hsOpts = getHeaderSearchOpts();
+ for (const auto &Macro : getPreprocessorOpts().Macros) {
// If we're supposed to ignore this macro for the purposes of modules,
// don't put it into the hash.
if (!hsOpts.ModulesIgnoreMacros.empty()) {
// Check whether we're ignoring this macro.
- StringRef MacroDef = I.first;
+ StringRef MacroDef = Macro.first;
if (hsOpts.ModulesIgnoreMacros.count(
llvm::CachedHashString(MacroDef.split('=').first)))
continue;
}
- code = hash_combine(code, I.first, I.second);
+ HBuilder.add(Macro);
}
// Extend the signature with the sysroot and other header search options.
- code = hash_combine(code, hsOpts.Sysroot,
- hsOpts.ModuleFormat,
- hsOpts.UseDebugInfo,
- hsOpts.UseBuiltinIncludes,
- hsOpts.UseStandardSystemIncludes,
- hsOpts.UseStandardCXXIncludes,
- hsOpts.UseLibcxx,
- hsOpts.ModulesValidateDiagnosticOptions);
- code = hash_combine(code, hsOpts.ResourceDir);
+ HBuilder.add(hsOpts.Sysroot, hsOpts.ModuleFormat, hsOpts.UseDebugInfo,
+ hsOpts.UseBuiltinIncludes, hsOpts.UseStandardSystemIncludes,
+ hsOpts.UseStandardCXXIncludes, hsOpts.UseLibcxx,
+ hsOpts.ModulesValidateDiagnosticOptions);
+ HBuilder.add(hsOpts.ResourceDir);
if (hsOpts.ModulesStrictContextHash) {
- hash_code SHPC = hash_combine_range(hsOpts.SystemHeaderPrefixes.begin(),
- hsOpts.SystemHeaderPrefixes.end());
- hash_code UEC = hash_combine_range(hsOpts.UserEntries.begin(),
- hsOpts.UserEntries.end());
- code = hash_combine(code, hsOpts.SystemHeaderPrefixes.size(), SHPC,
- hsOpts.UserEntries.size(), UEC);
+ HBuilder.addRange(hsOpts.SystemHeaderPrefixes);
+ HBuilder.addRange(hsOpts.UserEntries);
const DiagnosticOptions &diagOpts = getDiagnosticOpts();
- #define DIAGOPT(Name, Bits, Default) \
- code = hash_combine(code, diagOpts.Name);
- #define ENUM_DIAGOPT(Name, Type, Bits, Default) \
- code = hash_combine(code, diagOpts.get##Name());
- #include "clang/Basic/DiagnosticOptions.def"
- #undef DIAGOPT
- #undef ENUM_DIAGOPT
+#define DIAGOPT(Name, Bits, Default) HBuilder.add(diagOpts.Name);
+#define ENUM_DIAGOPT(Name, Type, Bits, Default) \
+ HBuilder.add(diagOpts.get##Name());
+#include "clang/Basic/DiagnosticOptions.def"
+#undef DIAGOPT
+#undef ENUM_DIAGOPT
}
// Extend the signature with the user build path.
- code = hash_combine(code, hsOpts.ModuleUserBuildPath);
+ HBuilder.add(hsOpts.ModuleUserBuildPath);
// Extend the signature with the module file extensions.
- const FrontendOptions &frontendOpts = getFrontendOpts();
- for (const auto &ext : frontendOpts.ModuleFileExtensions) {
- code = ext->hashExtension(code);
+ for (const auto &ext : getFrontendOpts().ModuleFileExtensions)
+ ext->hashExtension(HBuilder);
+
+ // Extend the signature with the Swift version for API notes.
+ const APINotesOptions &APINotesOpts = getAPINotesOpts();
+ if (!APINotesOpts.SwiftVersion.empty()) {
+ HBuilder.add(APINotesOpts.SwiftVersion.getMajor());
+ if (auto Minor = APINotesOpts.SwiftVersion.getMinor())
+ HBuilder.add(*Minor);
+ if (auto Subminor = APINotesOpts.SwiftVersion.getSubminor())
+ HBuilder.add(*Subminor);
+ if (auto Build = APINotesOpts.SwiftVersion.getBuild())
+ HBuilder.add(*Build);
}
// When compiling with -gmodules, also hash -fdebug-prefix-map as it
// affects the debug info in the PCM.
if (getCodeGenOpts().DebugTypeExtRefs)
- for (const auto &KeyValue : getCodeGenOpts().DebugPrefixMap)
- code = hash_combine(code, KeyValue.first, KeyValue.second);
+ HBuilder.addRange(getCodeGenOpts().DebugPrefixMap);
+
+ // Extend the signature with the affecting debug options.
+ if (getHeaderSearchOpts().ModuleFormat == "obj") {
+#define DEBUGOPT(Name, Bits, Default) HBuilder.add(CodeGenOpts->Name);
+#define VALUE_DEBUGOPT(Name, Bits, Default) HBuilder.add(CodeGenOpts->Name);
+#define ENUM_DEBUGOPT(Name, Type, Bits, Default) \
+ HBuilder.add(static_cast<unsigned>(CodeGenOpts->get##Name()));
+#define BENIGN_DEBUGOPT(Name, Bits, Default)
+#define BENIGN_VALUE_DEBUGOPT(Name, Bits, Default)
+#define BENIGN_ENUM_DEBUGOPT(Name, Type, Bits, Default)
+#include "clang/Basic/DebugOptions.def"
+ }
// Extend the signature with the enabled sanitizers, if at least one is
// enabled. Sanitizers which cannot affect AST generation aren't hashed.
- SanitizerSet SanHash = LangOpts->Sanitize;
+ SanitizerSet SanHash = getLangOpts().Sanitize;
SanHash.clear(getPPTransparentSanitizers());
if (!SanHash.empty())
- code = hash_combine(code, SanHash.Mask);
+ HBuilder.add(SanHash.Mask);
- return toString(llvm::APInt(64, code), 36, /*Signed=*/false);
+ llvm::MD5::MD5Result Result;
+ HBuilder.getHasher().final(Result);
+ uint64_t Hash = Result.high() ^ Result.low();
+ return toString(llvm::APInt(64, Hash), 36, /*Signed=*/false);
}
-void CompilerInvocation::generateCC1CommandLine(
- SmallVectorImpl<const char *> &Args, StringAllocator SA) const {
- llvm::Triple T(TargetOpts->Triple);
-
- GenerateFileSystemArgs(FileSystemOpts, Args, SA);
- GenerateMigratorArgs(MigratorOpts, Args, SA);
- GenerateAnalyzerArgs(*AnalyzerOpts, Args, SA);
- GenerateDiagnosticArgs(*DiagnosticOpts, Args, SA, false);
- GenerateFrontendArgs(FrontendOpts, Args, SA, LangOpts->IsHeaderFile);
- GenerateTargetArgs(*TargetOpts, Args, SA);
- GenerateHeaderSearchArgs(*HeaderSearchOpts, Args, SA);
- GenerateLangArgs(*LangOpts, Args, SA, T, FrontendOpts.DashX);
- GenerateCodeGenArgs(CodeGenOpts, Args, SA, T, FrontendOpts.OutputFile,
- &*LangOpts);
- GeneratePreprocessorArgs(*PreprocessorOpts, Args, SA, *LangOpts, FrontendOpts,
- CodeGenOpts);
- GeneratePreprocessorOutputArgs(PreprocessorOutputOpts, Args, SA,
- FrontendOpts.ProgramAction);
- GenerateDependencyOutputArgs(DependencyOutputOpts, Args, SA);
+void CompilerInvocationBase::generateCC1CommandLine(
+ ArgumentConsumer Consumer) const {
+ llvm::Triple T(getTargetOpts().Triple);
+
+ GenerateFileSystemArgs(getFileSystemOpts(), Consumer);
+ GenerateMigratorArgs(getMigratorOpts(), Consumer);
+ GenerateAnalyzerArgs(getAnalyzerOpts(), Consumer);
+ GenerateDiagnosticArgs(getDiagnosticOpts(), Consumer,
+ /*DefaultDiagColor=*/false);
+ GenerateFrontendArgs(getFrontendOpts(), Consumer, getLangOpts().IsHeaderFile);
+ GenerateTargetArgs(getTargetOpts(), Consumer);
+ GenerateHeaderSearchArgs(getHeaderSearchOpts(), Consumer);
+ GenerateAPINotesArgs(getAPINotesOpts(), Consumer);
+ GenerateLangArgs(getLangOpts(), Consumer, T, getFrontendOpts().DashX);
+ GenerateCodeGenArgs(getCodeGenOpts(), Consumer, T,
+ getFrontendOpts().OutputFile, &getLangOpts());
+ GeneratePreprocessorArgs(getPreprocessorOpts(), Consumer, getLangOpts(),
+ getFrontendOpts(), getCodeGenOpts());
+ GeneratePreprocessorOutputArgs(getPreprocessorOutputOpts(), Consumer,
+ getFrontendOpts().ProgramAction);
+ GenerateDependencyOutputArgs(getDependencyOutputOpts(), Consumer);
+}
+
+std::vector<std::string> CompilerInvocationBase::getCC1CommandLine() const {
+ std::vector<std::string> Args{"-cc1"};
+ generateCC1CommandLine(
+ [&Args](const Twine &Arg) { Args.push_back(Arg.str()); });
+ return Args;
+}
+
+void CompilerInvocation::resetNonModularOptions() {
+ getLangOpts().resetNonModularOptions();
+ getPreprocessorOpts().resetNonModularOptions();
+ getCodeGenOpts().resetNonModularOptions(getHeaderSearchOpts().ModuleFormat);
+}
+
+void CompilerInvocation::clearImplicitModuleBuildOptions() {
+ getLangOpts().ImplicitModules = false;
+ getHeaderSearchOpts().ImplicitModuleMaps = false;
+ getHeaderSearchOpts().ModuleCachePath.clear();
+ getHeaderSearchOpts().ModulesValidateOncePerBuildSession = false;
+ getHeaderSearchOpts().BuildSessionTimestamp = 0;
+ // The specific values we canonicalize to for pruning don't affect behaviour,
+ /// so use the default values so they may be dropped from the command-line.
+ getHeaderSearchOpts().ModuleCachePruneInterval = 7 * 24 * 60 * 60;
+ getHeaderSearchOpts().ModuleCachePruneAfter = 31 * 24 * 60 * 60;
}
IntrusiveRefCntPtr<llvm::vfs::FileSystem>
@@ -4604,12 +4881,19 @@ IntrusiveRefCntPtr<llvm::vfs::FileSystem>
clang::createVFSFromCompilerInvocation(
const CompilerInvocation &CI, DiagnosticsEngine &Diags,
IntrusiveRefCntPtr<llvm::vfs::FileSystem> BaseFS) {
- if (CI.getHeaderSearchOpts().VFSOverlayFiles.empty())
+ return createVFSFromOverlayFiles(CI.getHeaderSearchOpts().VFSOverlayFiles,
+ Diags, std::move(BaseFS));
+}
+
+IntrusiveRefCntPtr<llvm::vfs::FileSystem> clang::createVFSFromOverlayFiles(
+ ArrayRef<std::string> VFSOverlayFiles, DiagnosticsEngine &Diags,
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> BaseFS) {
+ if (VFSOverlayFiles.empty())
return BaseFS;
IntrusiveRefCntPtr<llvm::vfs::FileSystem> Result = BaseFS;
// earlier vfs files are on the bottom
- for (const auto &File : CI.getHeaderSearchOpts().VFSOverlayFiles) {
+ for (const auto &File : VFSOverlayFiles) {
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> Buffer =
Result->getBufferForFile(File);
if (!Buffer) {
diff --git a/contrib/llvm-project/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp b/contrib/llvm-project/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp
index 2e23ebfdf160..1df3a12fce14 100644
--- a/contrib/llvm-project/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp
@@ -22,19 +22,17 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Option/ArgList.h"
-#include "llvm/Support/Host.h"
+#include "llvm/TargetParser/Host.h"
using namespace clang;
using namespace llvm::opt;
-std::unique_ptr<CompilerInvocation> clang::createInvocationFromCommandLine(
- ArrayRef<const char *> ArgList, IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
- IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS, bool ShouldRecoverOnErorrs,
- std::vector<std::string> *CC1Args) {
- if (!Diags.get()) {
- // No diagnostics engine was provided, so create our own diagnostics object
- // with the default options.
- Diags = CompilerInstance::createDiagnostics(new DiagnosticOptions);
- }
+std::unique_ptr<CompilerInvocation>
+clang::createInvocation(ArrayRef<const char *> ArgList,
+ CreateInvocationOptions Opts) {
+ assert(!ArgList.empty());
+ auto Diags = Opts.Diags
+ ? std::move(Opts.Diags)
+ : CompilerInstance::createDiagnostics(new DiagnosticOptions);
SmallVector<const char *, 16> Args(ArgList.begin(), ArgList.end());
@@ -46,15 +44,19 @@ std::unique_ptr<CompilerInvocation> clang::createInvocationFromCommandLine(
// FIXME: We shouldn't have to pass in the path info.
driver::Driver TheDriver(Args[0], llvm::sys::getDefaultTargetTriple(), *Diags,
- "clang LLVM compiler", VFS);
+ "clang LLVM compiler", Opts.VFS);
// Don't check that inputs exist, they may have been remapped.
TheDriver.setCheckInputsExist(false);
+ TheDriver.setProbePrecompiled(Opts.ProbePrecompiled);
std::unique_ptr<driver::Compilation> C(TheDriver.BuildCompilation(Args));
if (!C)
return nullptr;
+ if (C->getArgs().hasArg(driver::options::OPT_fdriver_only))
+ return nullptr;
+
// Just print the cc1 options if -### was present.
if (C->getArgs().hasArg(driver::options::OPT__HASH_HASH_HASH)) {
C->getJobs().Print(llvm::errs(), "\n", true);
@@ -79,27 +81,29 @@ std::unique_ptr<CompilerInvocation> clang::createInvocationFromCommandLine(
}
}
}
- if (Jobs.size() == 0 || !isa<driver::Command>(*Jobs.begin()) ||
- (Jobs.size() > 1 && !OffloadCompilation)) {
+
+ bool PickFirstOfMany = OffloadCompilation || Opts.RecoverOnError;
+ if (Jobs.size() == 0 || (Jobs.size() > 1 && !PickFirstOfMany)) {
SmallString<256> Msg;
llvm::raw_svector_ostream OS(Msg);
Jobs.Print(OS, "; ", true);
Diags->Report(diag::err_fe_expected_compiler_job) << OS.str();
return nullptr;
}
-
- const driver::Command &Cmd = cast<driver::Command>(*Jobs.begin());
- if (StringRef(Cmd.getCreator().getName()) != "clang") {
+ auto Cmd = llvm::find_if(Jobs, [](const driver::Command &Cmd) {
+ return StringRef(Cmd.getCreator().getName()) == "clang";
+ });
+ if (Cmd == Jobs.end()) {
Diags->Report(diag::err_fe_expected_clang_command);
return nullptr;
}
- const ArgStringList &CCArgs = Cmd.getArguments();
- if (CC1Args)
- *CC1Args = {CCArgs.begin(), CCArgs.end()};
+ const ArgStringList &CCArgs = Cmd->getArguments();
+ if (Opts.CC1Args)
+ *Opts.CC1Args = {CCArgs.begin(), CCArgs.end()};
auto CI = std::make_unique<CompilerInvocation>();
if (!CompilerInvocation::CreateFromArgs(*CI, CCArgs, *Diags, Args[0]) &&
- !ShouldRecoverOnErorrs)
+ !Opts.RecoverOnError)
return nullptr;
return CI;
}
diff --git a/contrib/llvm-project/clang/lib/Frontend/DependencyFile.cpp b/contrib/llvm-project/clang/lib/Frontend/DependencyFile.cpp
index 288827374106..19abcac2befb 100644
--- a/contrib/llvm-project/clang/lib/Frontend/DependencyFile.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/DependencyFile.cpp
@@ -21,33 +21,31 @@
#include "clang/Lex/Preprocessor.h"
#include "clang/Serialization/ASTReader.h"
#include "llvm/ADT/StringSet.h"
-#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
+#include <optional>
using namespace clang;
namespace {
struct DepCollectorPPCallbacks : public PPCallbacks {
DependencyCollector &DepCollector;
- SourceManager &SM;
- DiagnosticsEngine &Diags;
- DepCollectorPPCallbacks(DependencyCollector &L, SourceManager &SM,
- DiagnosticsEngine &Diags)
- : DepCollector(L), SM(SM), Diags(Diags) {}
-
- void FileChanged(SourceLocation Loc, FileChangeReason Reason,
- SrcMgr::CharacteristicKind FileType,
- FileID PrevFID) override {
- if (Reason != PPCallbacks::EnterFile)
+ Preprocessor &PP;
+ DepCollectorPPCallbacks(DependencyCollector &L, Preprocessor &PP)
+ : DepCollector(L), PP(PP) {}
+
+ void LexedFileChanged(FileID FID, LexedFileChangeReason Reason,
+ SrcMgr::CharacteristicKind FileType, FileID PrevFID,
+ SourceLocation Loc) override {
+ if (Reason != PPCallbacks::LexedFileChangeReason::EnterFile)
return;
// Dependency generation really does want to go all the way to the
// file entry for a source location to find out what is depended on.
// We do not want #line markers to affect dependency generation!
- if (Optional<StringRef> Filename = SM.getNonBuiltinFilenameForID(
- SM.getFileID(SM.getExpansionLoc(Loc))))
+ if (std::optional<StringRef> Filename =
+ PP.getSourceManager().getNonBuiltinFilenameForID(FID))
DepCollector.maybeAddDependency(
llvm::sys::path::remove_leading_dotslash(*Filename),
/*FromModule*/ false, isSystem(FileType), /*IsModuleFile*/ false,
@@ -66,19 +64,20 @@ struct DepCollectorPPCallbacks : public PPCallbacks {
void InclusionDirective(SourceLocation HashLoc, const Token &IncludeTok,
StringRef FileName, bool IsAngled,
- CharSourceRange FilenameRange, const FileEntry *File,
- StringRef SearchPath, StringRef RelativePath,
- const Module *Imported,
+ CharSourceRange FilenameRange,
+ OptionalFileEntryRef File, StringRef SearchPath,
+ StringRef RelativePath, const Module *Imported,
SrcMgr::CharacteristicKind FileType) override {
if (!File)
- DepCollector.maybeAddDependency(FileName, /*FromModule*/false,
- /*IsSystem*/false, /*IsModuleFile*/false,
- /*IsMissing*/true);
+ DepCollector.maybeAddDependency(FileName, /*FromModule*/ false,
+ /*IsSystem*/ false,
+ /*IsModuleFile*/ false,
+ /*IsMissing*/ true);
// Files that actually exist are handled by FileChanged.
}
void HasInclude(SourceLocation Loc, StringRef SpelledFilename, bool IsAngled,
- Optional<FileEntryRef> File,
+ OptionalFileEntryRef File,
SrcMgr::CharacteristicKind FileType) override {
if (!File)
return;
@@ -90,43 +89,53 @@ struct DepCollectorPPCallbacks : public PPCallbacks {
/*IsMissing=*/false);
}
- void EndOfMainFile() override { DepCollector.finishedMainFile(Diags); }
+ void EndOfMainFile() override {
+ DepCollector.finishedMainFile(PP.getDiagnostics());
+ }
};
struct DepCollectorMMCallbacks : public ModuleMapCallbacks {
DependencyCollector &DepCollector;
DepCollectorMMCallbacks(DependencyCollector &DC) : DepCollector(DC) {}
- void moduleMapFileRead(SourceLocation Loc, const FileEntry &Entry,
+ void moduleMapFileRead(SourceLocation Loc, FileEntryRef Entry,
bool IsSystem) override {
StringRef Filename = Entry.getName();
- DepCollector.maybeAddDependency(Filename, /*FromModule*/false,
- /*IsSystem*/IsSystem,
- /*IsModuleFile*/false,
- /*IsMissing*/false);
+ DepCollector.maybeAddDependency(Filename, /*FromModule*/ false,
+ /*IsSystem*/ IsSystem,
+ /*IsModuleFile*/ false,
+ /*IsMissing*/ false);
}
};
struct DepCollectorASTListener : public ASTReaderListener {
DependencyCollector &DepCollector;
- DepCollectorASTListener(DependencyCollector &L) : DepCollector(L) { }
+ FileManager &FileMgr;
+ DepCollectorASTListener(DependencyCollector &L, FileManager &FileMgr)
+ : DepCollector(L), FileMgr(FileMgr) {}
bool needsInputFileVisitation() override { return true; }
bool needsSystemInputFileVisitation() override {
return DepCollector.needSystemDependencies();
}
void visitModuleFile(StringRef Filename,
serialization::ModuleKind Kind) override {
- DepCollector.maybeAddDependency(Filename, /*FromModule*/true,
- /*IsSystem*/false, /*IsModuleFile*/true,
- /*IsMissing*/false);
+ DepCollector.maybeAddDependency(Filename, /*FromModule*/ true,
+ /*IsSystem*/ false, /*IsModuleFile*/ true,
+ /*IsMissing*/ false);
}
bool visitInputFile(StringRef Filename, bool IsSystem,
bool IsOverridden, bool IsExplicitModule) override {
if (IsOverridden || IsExplicitModule)
return true;
- DepCollector.maybeAddDependency(Filename, /*FromModule*/true, IsSystem,
- /*IsModuleFile*/false, /*IsMissing*/false);
+ // Run this through the FileManager in order to respect 'use-external-name'
+ // in case we have a VFS overlay.
+ if (auto FE = FileMgr.getOptionalFileRef(Filename))
+ Filename = FE->getName();
+
+ DepCollector.maybeAddDependency(Filename, /*FromModule*/ true, IsSystem,
+ /*IsModuleFile*/ false,
+ /*IsMissing*/ false);
return true;
}
};
@@ -160,10 +169,7 @@ bool DependencyCollector::addDependency(StringRef Filename) {
}
static bool isSpecialFilename(StringRef Filename) {
- return llvm::StringSwitch<bool>(Filename)
- .Case("<built-in>", true)
- .Case("<stdin>", true)
- .Default(false);
+ return Filename == "<built-in>";
}
bool DependencyCollector::sawDependency(StringRef Filename, bool FromModule,
@@ -175,13 +181,13 @@ bool DependencyCollector::sawDependency(StringRef Filename, bool FromModule,
DependencyCollector::~DependencyCollector() { }
void DependencyCollector::attachToPreprocessor(Preprocessor &PP) {
- PP.addPPCallbacks(std::make_unique<DepCollectorPPCallbacks>(
- *this, PP.getSourceManager(), PP.getDiagnostics()));
+ PP.addPPCallbacks(std::make_unique<DepCollectorPPCallbacks>(*this, PP));
PP.getHeaderSearchInfo().getModuleMap().addModuleMapCallbacks(
std::make_unique<DepCollectorMMCallbacks>(*this));
}
void DependencyCollector::attachToASTReader(ASTReader &R) {
- R.addListener(std::make_unique<DepCollectorASTListener>(*this));
+ R.addListener(
+ std::make_unique<DepCollectorASTListener>(*this, R.getFileManager()));
}
DependencyFileGenerator::DependencyFileGenerator(
@@ -330,7 +336,7 @@ void DependencyFileGenerator::outputDependencyFile(DiagnosticsEngine &Diags) {
void DependencyFileGenerator::outputDependencyFile(llvm::raw_ostream &OS) {
// Write out the dependency targets, trying to avoid overly long
// lines when possible. We try our best to emit exactly the same
- // dependency file as GCC (4.2), assuming the included files are the
+ // dependency file as GCC>=10, assuming the included files are the
// same.
const unsigned MaxColumns = 75;
unsigned Columns = 0;
@@ -357,6 +363,8 @@ void DependencyFileGenerator::outputDependencyFile(llvm::raw_ostream &OS) {
// duplicates.
ArrayRef<std::string> Files = getDependencies();
for (StringRef File : Files) {
+ if (File == "<stdin>")
+ continue;
// Start a new line if this would exceed the column limit. Make
// sure to leave space for a trailing " \" in case we need to
// break the line on the next iteration.
@@ -377,7 +385,6 @@ void DependencyFileGenerator::outputDependencyFile(llvm::raw_ostream &OS) {
for (auto I = Files.begin(), E = Files.end(); I != E; ++I) {
if (Index++ == InputFileIndex)
continue;
- OS << '\n';
PrintFilename(OS, *I, OutputFormat);
OS << ":\n";
}
diff --git a/contrib/llvm-project/clang/lib/Frontend/DependencyGraph.cpp b/contrib/llvm-project/clang/lib/Frontend/DependencyGraph.cpp
index 4bed4e2d4403..b471471f3528 100644
--- a/contrib/llvm-project/clang/lib/Frontend/DependencyGraph.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/DependencyGraph.cpp
@@ -29,9 +29,9 @@ class DependencyGraphCallback : public PPCallbacks {
const Preprocessor *PP;
std::string OutputFile;
std::string SysRoot;
- llvm::SetVector<const FileEntry *> AllFiles;
- typedef llvm::DenseMap<const FileEntry *,
- SmallVector<const FileEntry *, 2> > DependencyMap;
+ llvm::SetVector<FileEntryRef> AllFiles;
+ using DependencyMap =
+ llvm::DenseMap<FileEntryRef, SmallVector<FileEntryRef, 2>>;
DependencyMap Dependencies;
@@ -47,9 +47,9 @@ public:
void InclusionDirective(SourceLocation HashLoc, const Token &IncludeTok,
StringRef FileName, bool IsAngled,
- CharSourceRange FilenameRange, const FileEntry *File,
- StringRef SearchPath, StringRef RelativePath,
- const Module *Imported,
+ CharSourceRange FilenameRange,
+ OptionalFileEntryRef File, StringRef SearchPath,
+ StringRef RelativePath, const Module *Imported,
SrcMgr::CharacteristicKind FileType) override;
void EndOfMainFile() override {
@@ -66,29 +66,23 @@ void clang::AttachDependencyGraphGen(Preprocessor &PP, StringRef OutputFile,
}
void DependencyGraphCallback::InclusionDirective(
- SourceLocation HashLoc,
- const Token &IncludeTok,
- StringRef FileName,
- bool IsAngled,
- CharSourceRange FilenameRange,
- const FileEntry *File,
- StringRef SearchPath,
- StringRef RelativePath,
- const Module *Imported,
+ SourceLocation HashLoc, const Token &IncludeTok, StringRef FileName,
+ bool IsAngled, CharSourceRange FilenameRange, OptionalFileEntryRef File,
+ StringRef SearchPath, StringRef RelativePath, const Module *Imported,
SrcMgr::CharacteristicKind FileType) {
if (!File)
return;
SourceManager &SM = PP->getSourceManager();
- const FileEntry *FromFile
- = SM.getFileEntryForID(SM.getFileID(SM.getExpansionLoc(HashLoc)));
+ OptionalFileEntryRef FromFile =
+ SM.getFileEntryRefForID(SM.getFileID(SM.getExpansionLoc(HashLoc)));
if (!FromFile)
return;
- Dependencies[FromFile].push_back(File);
+ Dependencies[*FromFile].push_back(*File);
- AllFiles.insert(File);
- AllFiles.insert(FromFile);
+ AllFiles.insert(*File);
+ AllFiles.insert(*FromFile);
}
raw_ostream &
@@ -115,9 +109,8 @@ void DependencyGraphCallback::OutputGraphFile() {
OS.indent(2);
writeNodeReference(OS, AllFiles[I]);
OS << " [ shape=\"box\", label=\"";
- StringRef FileName = AllFiles[I]->getName();
- if (FileName.startswith(SysRoot))
- FileName = FileName.substr(SysRoot.size());
+ StringRef FileName = AllFiles[I].getName();
+ FileName.consume_front(SysRoot);
OS << DOT::EscapeString(std::string(FileName)) << "\"];\n";
}
diff --git a/contrib/llvm-project/clang/lib/Frontend/DiagnosticRenderer.cpp b/contrib/llvm-project/clang/lib/Frontend/DiagnosticRenderer.cpp
index 0afc8f3b1dab..18c8be7a7293 100644
--- a/contrib/llvm-project/clang/lib/Frontend/DiagnosticRenderer.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/DiagnosticRenderer.cpp
@@ -18,7 +18,6 @@
#include "clang/Lex/Lexer.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/None.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
@@ -148,7 +147,7 @@ void DiagnosticRenderer::emitStoredDiagnostic(StoredDiagnostic &Diag) {
void DiagnosticRenderer::emitBasicNote(StringRef Message) {
emitDiagnosticMessage(FullSourceLoc(), PresumedLoc(), DiagnosticsEngine::Note,
- Message, None, DiagOrStoredDiag());
+ Message, std::nullopt, DiagOrStoredDiag());
}
/// Prints an include stack when appropriate for a particular
@@ -453,7 +452,7 @@ void DiagnosticRenderer::emitSingleMacroExpansion(
Message << "expanded from macro '" << MacroName << "'";
emitDiagnostic(SpellingLoc, DiagnosticsEngine::Note, Message.str(),
- SpellingRanges, None);
+ SpellingRanges, std::nullopt);
}
/// Check that the macro argument location of Loc starts with ArgumentLoc.
@@ -494,20 +493,18 @@ static bool checkRangesForMacroArgExpansion(FullSourceLoc Loc,
SmallVector<CharSourceRange, 4> SpellingRanges;
mapDiagnosticRanges(Loc, Ranges, SpellingRanges);
- /// Count all valid ranges.
- unsigned ValidCount = 0;
- for (const auto &Range : Ranges)
- if (Range.isValid())
- ValidCount++;
+ // Count all valid ranges.
+ unsigned ValidCount =
+ llvm::count_if(Ranges, [](const auto &R) { return R.isValid(); });
if (ValidCount > SpellingRanges.size())
return false;
- /// To store the source location of the argument location.
+ // To store the source location of the argument location.
FullSourceLoc ArgumentLoc;
- /// Set the ArgumentLoc to the beginning location of the expansion of Loc
- /// so to check if the ranges expands to the same beginning location.
+ // Set the ArgumentLoc to the beginning location of the expansion of Loc
+ // so to check if the ranges expands to the same beginning location.
if (!Loc.isMacroArgExpansion(&ArgumentLoc))
return false;
diff --git a/contrib/llvm-project/clang/lib/Frontend/FrontendAction.cpp b/contrib/llvm-project/clang/lib/Frontend/FrontendAction.cpp
index c996c9c486bc..eff785b99a09 100644
--- a/contrib/llvm-project/clang/lib/Frontend/FrontendAction.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/FrontendAction.cpp
@@ -11,28 +11,37 @@
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclGroup.h"
#include "clang/Basic/Builtins.h"
+#include "clang/Basic/DiagnosticOptions.h"
+#include "clang/Basic/FileEntry.h"
#include "clang/Basic/LangStandard.h"
+#include "clang/Basic/Sarif.h"
+#include "clang/Basic/Stack.h"
#include "clang/Frontend/ASTUnit.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Frontend/FrontendPluginRegistry.h"
#include "clang/Frontend/LayoutOverrideSource.h"
#include "clang/Frontend/MultiplexConsumer.h"
+#include "clang/Frontend/SARIFDiagnosticPrinter.h"
#include "clang/Frontend/Utils.h"
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/LiteralSupport.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Lex/PreprocessorOptions.h"
#include "clang/Parse/ParseAST.h"
+#include "clang/Sema/HLSLExternalSemaSource.h"
+#include "clang/Sema/MultiplexExternalSemaSource.h"
#include "clang/Serialization/ASTDeserializationListener.h"
#include "clang/Serialization/ASTReader.h"
#include "clang/Serialization/GlobalModuleIndex.h"
+#include "llvm/ADT/ScopeExit.h"
#include "llvm/Support/BuryPointer.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Timer.h"
#include "llvm/Support/raw_ostream.h"
+#include <memory>
#include <system_error>
using namespace clang;
@@ -53,6 +62,11 @@ public:
delete Previous;
}
+ DelegatingDeserializationListener(const DelegatingDeserializationListener &) =
+ delete;
+ DelegatingDeserializationListener &
+ operator=(const DelegatingDeserializationListener &) = delete;
+
void ReaderInitialized(ASTReader *Reader) override {
if (Previous)
Previous->ReaderInitialized(Reader);
@@ -143,7 +157,7 @@ void FrontendAction::setCurrentInput(const FrontendInputFile &CurrentInput,
Module *FrontendAction::getCurrentModule() const {
CompilerInstance &CI = getCompilerInstance();
return CI.getPreprocessor().getHeaderSearchInfo().lookupModule(
- CI.getLangOpts().CurrentModule, /*AllowSearch*/false);
+ CI.getLangOpts().CurrentModule, SourceLocation(), /*AllowSearch*/false);
}
std::unique_ptr<ASTConsumer>
@@ -186,14 +200,17 @@ FrontendAction::CreateWrappedASTConsumer(CompilerInstance &CI,
FrontendPluginRegistry::entries()) {
std::unique_ptr<PluginASTAction> P = Plugin.instantiate();
PluginASTAction::ActionType ActionType = P->getActionType();
- if (ActionType == PluginASTAction::Cmdline) {
+ if (ActionType == PluginASTAction::CmdlineAfterMainAction ||
+ ActionType == PluginASTAction::CmdlineBeforeMainAction) {
// This is O(|plugins| * |add_plugins|), but since both numbers are
// way below 50 in practice, that's ok.
- if (llvm::any_of(CI.getFrontendOpts().AddPluginActions,
- [&](const std::string &PluginAction) {
- return PluginAction == Plugin.getName();
- }))
- ActionType = PluginASTAction::AddAfterMainAction;
+ if (llvm::is_contained(CI.getFrontendOpts().AddPluginActions,
+ Plugin.getName())) {
+ if (ActionType == PluginASTAction::CmdlineBeforeMainAction)
+ ActionType = PluginASTAction::AddBeforeMainAction;
+ else
+ ActionType = PluginASTAction::AddAfterMainAction;
+ }
}
if ((ActionType == PluginASTAction::AddBeforeMainAction ||
ActionType == PluginASTAction::AddAfterMainAction) &&
@@ -211,8 +228,13 @@ FrontendAction::CreateWrappedASTConsumer(CompilerInstance &CI,
// Add to Consumers the main consumer, then all the plugins that go after it
Consumers.push_back(std::move(Consumer));
- for (auto &C : AfterConsumers) {
- Consumers.push_back(std::move(C));
+ if (!AfterConsumers.empty()) {
+ // If we have plugins after the main consumer, which may be the codegen
+ // action, they likely will need the ASTContext, so don't clear it in the
+ // codegen action.
+ CI.getCodeGenOpts().ClearASTBeforeBackend = false;
+ for (auto &C : AfterConsumers)
+ Consumers.push_back(std::move(C));
}
return std::make_unique<MultiplexConsumer>(std::move(Consumers));
@@ -320,7 +342,7 @@ static std::error_code collectModuleHeaderIncludes(
return std::error_code();
// Resolve all lazy header directives to header files.
- ModMap.resolveHeaderDirectives(Module);
+ ModMap.resolveHeaderDirectives(Module, /*File=*/std::nullopt);
// If any headers are missing, we can't build this module. In most cases,
// diagnostics for this should have already been produced; we only get here
@@ -348,20 +370,22 @@ static std::error_code collectModuleHeaderIncludes(
}
// Note that Module->PrivateHeaders will not be a TopHeader.
- if (Module::Header UmbrellaHeader = Module->getUmbrellaHeader()) {
- Module->addTopHeader(UmbrellaHeader.Entry);
+ if (std::optional<Module::Header> UmbrellaHeader =
+ Module->getUmbrellaHeaderAsWritten()) {
+ Module->addTopHeader(UmbrellaHeader->Entry);
if (Module->Parent)
// Include the umbrella header for submodules.
- addHeaderInclude(UmbrellaHeader.PathRelativeToRootModuleDirectory,
+ addHeaderInclude(UmbrellaHeader->PathRelativeToRootModuleDirectory,
Includes, LangOpts, Module->IsExternC);
- } else if (Module::DirectoryName UmbrellaDir = Module->getUmbrellaDir()) {
+ } else if (std::optional<Module::DirectoryName> UmbrellaDir =
+ Module->getUmbrellaDirAsWritten()) {
// Add all of the headers we find in this subdirectory.
std::error_code EC;
SmallString<128> DirNative;
- llvm::sys::path::native(UmbrellaDir.Entry->getName(), DirNative);
+ llvm::sys::path::native(UmbrellaDir->Entry.getName(), DirNative);
llvm::vfs::FileSystem &FS = FileMgr.getVirtualFileSystem();
- SmallVector<std::pair<std::string, const FileEntry *>, 8> Headers;
+ SmallVector<std::pair<std::string, FileEntryRef>, 8> Headers;
for (llvm::vfs::recursive_directory_iterator Dir(FS, DirNative, EC), End;
Dir != End && !EC; Dir.increment(EC)) {
// Check whether this entry has an extension typically associated with
@@ -371,7 +395,7 @@ static std::error_code collectModuleHeaderIncludes(
.Default(false))
continue;
- auto Header = FileMgr.getFile(Dir->path());
+ auto Header = FileMgr.getOptionalFileRef(Dir->path());
// FIXME: This shouldn't happen unless there is a file system race. Is
// that worth diagnosing?
if (!Header)
@@ -388,7 +412,7 @@ static std::error_code collectModuleHeaderIncludes(
for (int I = 0; I != Dir.level() + 1; ++I, ++PathIt)
Components.push_back(*PathIt);
SmallString<128> RelativeHeader(
- UmbrellaDir.PathRelativeToRootModuleDirectory);
+ UmbrellaDir->PathRelativeToRootModuleDirectory);
for (auto It = Components.rbegin(), End = Components.rend(); It != End;
++It)
llvm::sys::path::append(RelativeHeader, *It);
@@ -402,11 +426,7 @@ static std::error_code collectModuleHeaderIncludes(
// Sort header paths and make the header inclusion order deterministic
// across different OSs and filesystems.
- llvm::sort(Headers.begin(), Headers.end(), [](
- const std::pair<std::string, const FileEntry *> &LHS,
- const std::pair<std::string, const FileEntry *> &RHS) {
- return LHS.first < RHS.first;
- });
+ llvm::sort(Headers, llvm::less_first());
for (auto &H : Headers) {
// Include this header as part of the umbrella directory.
Module->addTopHeader(H.second);
@@ -415,11 +435,9 @@ static std::error_code collectModuleHeaderIncludes(
}
// Recurse into submodules.
- for (clang::Module::submodule_iterator Sub = Module->submodule_begin(),
- SubEnd = Module->submodule_end();
- Sub != SubEnd; ++Sub)
+ for (auto *Submodule : Module->submodules())
if (std::error_code Err = collectModuleHeaderIncludes(
- LangOpts, FileMgr, Diag, ModMap, *Sub, Includes))
+ LangOpts, FileMgr, Diag, ModMap, Submodule, Includes))
return Err;
return std::error_code();
@@ -434,7 +452,8 @@ static bool loadModuleMapForModuleBuild(CompilerInstance &CI, bool IsSystem,
// Map the current input to a file.
FileID ModuleMapID = SrcMgr.getMainFileID();
- const FileEntry *ModuleMap = SrcMgr.getFileEntryForID(ModuleMapID);
+ OptionalFileEntryRef ModuleMap = SrcMgr.getFileEntryRefForID(ModuleMapID);
+ assert(ModuleMap && "MainFileID without FileEntry");
// If the module map is preprocessed, handle the initial line marker;
// line directives are not part of the module map syntax in general.
@@ -447,13 +466,23 @@ static bool loadModuleMapForModuleBuild(CompilerInstance &CI, bool IsSystem,
}
// Load the module map file.
- if (HS.loadModuleMapFile(ModuleMap, IsSystem, ModuleMapID, &Offset,
+ if (HS.loadModuleMapFile(*ModuleMap, IsSystem, ModuleMapID, &Offset,
PresumedModuleMapFile))
return true;
if (SrcMgr.getBufferOrFake(ModuleMapID).getBufferSize() == Offset)
Offset = 0;
+ // Infer framework module if possible.
+ if (HS.getModuleMap().canInferFrameworkModule(ModuleMap->getDir())) {
+ SmallString<128> InferredFrameworkPath = ModuleMap->getDir().getName();
+ llvm::sys::path::append(InferredFrameworkPath,
+ CI.getLangOpts().ModuleName + ".framework");
+ if (auto Dir =
+ CI.getFileManager().getOptionalDirectoryRef(InferredFrameworkPath))
+ (void)HS.getModuleMap().inferFrameworkModule(*Dir, IsSystem, nullptr);
+ }
+
return false;
}
@@ -471,7 +500,7 @@ static Module *prepareToBuildModule(CompilerInstance &CI,
// Dig out the module definition.
HeaderSearch &HS = CI.getPreprocessor().getHeaderSearchInfo();
- Module *M = HS.lookupModule(CI.getLangOpts().CurrentModule,
+ Module *M = HS.lookupModule(CI.getLangOpts().CurrentModule, SourceLocation(),
/*AllowSearch=*/true);
if (!M) {
CI.getDiagnostics().Report(diag::err_missing_module)
@@ -481,13 +510,13 @@ static Module *prepareToBuildModule(CompilerInstance &CI,
}
// Check whether we can build this module at all.
- if (Preprocessor::checkModuleIsAvailable(CI.getLangOpts(), CI.getTarget(),
- CI.getDiagnostics(), M))
+ if (Preprocessor::checkModuleIsAvailable(CI.getLangOpts(), CI.getTarget(), *M,
+ CI.getDiagnostics()))
return nullptr;
// Inform the preprocessor that includes from within the input buffer should
// be resolved relative to the build directory of the module map file.
- CI.getPreprocessor().setMainFileDir(M->Directory);
+ CI.getPreprocessor().setMainFileDir(*M->Directory);
// If the module was inferred from a different module map (via an expanded
// umbrella module definition), track that fact.
@@ -496,15 +525,15 @@ static Module *prepareToBuildModule(CompilerInstance &CI,
StringRef OriginalModuleMapName = CI.getFrontendOpts().OriginalModuleMap;
if (!OriginalModuleMapName.empty()) {
auto OriginalModuleMap =
- CI.getFileManager().getFile(OriginalModuleMapName,
- /*openFile*/ true);
+ CI.getFileManager().getOptionalFileRef(OriginalModuleMapName,
+ /*openFile*/ true);
if (!OriginalModuleMap) {
CI.getDiagnostics().Report(diag::err_module_map_not_found)
<< OriginalModuleMapName;
return nullptr;
}
- if (*OriginalModuleMap != CI.getSourceManager().getFileEntryForID(
- CI.getSourceManager().getMainFileID())) {
+ if (*OriginalModuleMap != CI.getSourceManager().getFileEntryRefForID(
+ CI.getSourceManager().getMainFileID())) {
M->IsInferred = true;
CI.getPreprocessor().getHeaderSearchInfo().getModuleMap()
.setInferredModuleAllowedBy(M, *OriginalModuleMap);
@@ -529,8 +558,9 @@ getInputBufferForModule(CompilerInstance &CI, Module *M) {
// Collect the set of #includes we need to build the module.
SmallString<256> HeaderContents;
std::error_code Err = std::error_code();
- if (Module::Header UmbrellaHeader = M->getUmbrellaHeader())
- addHeaderInclude(UmbrellaHeader.PathRelativeToRootModuleDirectory,
+ if (std::optional<Module::Header> UmbrellaHeader =
+ M->getUmbrellaHeaderAsWritten())
+ addHeaderInclude(UmbrellaHeader->PathRelativeToRootModuleDirectory,
HeaderContents, CI.getLangOpts(), M->IsExternC);
Err = collectModuleHeaderIncludes(
CI.getLangOpts(), FileMgr, CI.getDiagnostics(),
@@ -558,8 +588,21 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
bool HasBegunSourceFile = false;
bool ReplayASTFile = Input.getKind().getFormat() == InputKind::Precompiled &&
usesPreprocessorOnly();
+
+ // If we fail, reset state since the client will not end up calling the
+ // matching EndSourceFile(). All paths that return true should release this.
+ auto FailureCleanup = llvm::make_scope_exit([&]() {
+ if (HasBegunSourceFile)
+ CI.getDiagnosticClient().EndSourceFile();
+ CI.setASTConsumer(nullptr);
+ CI.clearOutputFiles(/*EraseFiles=*/true);
+ CI.getLangOpts().setCompilingModule(LangOptions::CMK_None);
+ setCurrentInput(FrontendInputFile());
+ setCompilerInstance(nullptr);
+ });
+
if (!BeginInvocation(CI))
- goto failure;
+ return false;
// If we're replaying the build of an AST file, import it and set up
// the initial state from its build.
@@ -578,9 +621,9 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
std::unique_ptr<ASTUnit> AST = ASTUnit::LoadFromASTFile(
std::string(InputFile), CI.getPCHContainerReader(),
ASTUnit::LoadPreprocessorOnly, ASTDiags, CI.getFileSystemOpts(),
- CI.getCodeGenOpts().DebugTypeExtRefs);
+ /*HeaderSearchOptions=*/nullptr);
if (!AST)
- goto failure;
+ return false;
// Options relating to how we treat the input (but not what we do with it)
// are inherited from the AST unit.
@@ -605,11 +648,10 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
if (&MF != &PrimaryModule)
CI.getFrontendOpts().ModuleFiles.push_back(MF.FileName);
- ASTReader->visitTopLevelModuleMaps(
- PrimaryModule, [&](const FileEntry *FE) {
- CI.getFrontendOpts().ModuleMapFiles.push_back(
- std::string(FE->getName()));
- });
+ ASTReader->visitTopLevelModuleMaps(PrimaryModule, [&](FileEntryRef FE) {
+ CI.getFrontendOpts().ModuleMapFiles.push_back(
+ std::string(FE.getName()));
+ });
}
// Set up the input file for replay purposes.
@@ -617,13 +659,14 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
if (Kind.getFormat() == InputKind::ModuleMap) {
Module *ASTModule =
AST->getPreprocessor().getHeaderSearchInfo().lookupModule(
- AST->getLangOpts().CurrentModule, /*AllowSearch*/ false);
+ AST->getLangOpts().CurrentModule, SourceLocation(),
+ /*AllowSearch*/ false);
assert(ASTModule && "module file does not define its own module");
Input = FrontendInputFile(ASTModule->PresumedModuleMapFile, Kind);
} else {
auto &OldSM = AST->getSourceManager();
FileID ID = OldSM.getMainFileID();
- if (auto *File = OldSM.getFileEntryForID(ID))
+ if (auto File = OldSM.getFileEntryRefForID(ID))
Input = FrontendInputFile(File->getName(), Kind);
else
Input = FrontendInputFile(OldSM.getBufferOrFake(ID), Kind);
@@ -646,10 +689,10 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
std::unique_ptr<ASTUnit> AST = ASTUnit::LoadFromASTFile(
std::string(InputFile), CI.getPCHContainerReader(),
ASTUnit::LoadEverything, Diags, CI.getFileSystemOpts(),
- CI.getCodeGenOpts().DebugTypeExtRefs);
+ CI.getHeaderSearchOptsPtr());
if (!AST)
- goto failure;
+ return false;
// Inform the diagnostic client we are processing a source file.
CI.getDiagnosticClient().BeginSourceFile(CI.getLangOpts(), nullptr);
@@ -669,29 +712,36 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
// Initialize the action.
if (!BeginSourceFileAction(CI))
- goto failure;
+ return false;
// Create the AST consumer.
CI.setASTConsumer(CreateWrappedASTConsumer(CI, InputFile));
if (!CI.hasASTConsumer())
- goto failure;
+ return false;
+ FailureCleanup.release();
return true;
}
// Set up the file and source managers, if needed.
if (!CI.hasFileManager()) {
if (!CI.createFileManager()) {
- goto failure;
+ return false;
}
}
- if (!CI.hasSourceManager())
+ if (!CI.hasSourceManager()) {
CI.createSourceManager(CI.getFileManager());
+ if (CI.getDiagnosticOpts().getFormat() == DiagnosticOptions::SARIF) {
+ static_cast<SARIFDiagnosticPrinter *>(&CI.getDiagnosticClient())
+ ->setSarifWriter(
+ std::make_unique<SarifDocumentWriter>(CI.getSourceManager()));
+ }
+ }
// Set up embedding for any specified files. Do this before we load any
// source files, including the primary module map for the compilation.
for (const auto &F : CI.getFrontendOpts().ModulesEmbedFiles) {
- if (auto FE = CI.getFileManager().getFile(F, /*openFile*/true))
+ if (auto FE = CI.getFileManager().getOptionalFileRef(F, /*openFile*/true))
CI.getSourceManager().setFileIsTransient(*FE);
else
CI.getDiagnostics().Report(diag::err_modules_embed_file_not_found) << F;
@@ -710,12 +760,13 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
// Initialize the action.
if (!BeginSourceFileAction(CI))
- goto failure;
+ return false;
// Initialize the main file entry.
if (!CI.InitializeSourceManager(CurrentInput))
- goto failure;
+ return false;
+ FailureCleanup.release();
return true;
}
@@ -726,10 +777,10 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
PreprocessorOptions &PPOpts = CI.getPreprocessorOpts();
StringRef PCHInclude = PPOpts.ImplicitPCHInclude;
std::string SpecificModuleCachePath = CI.getSpecificModuleCachePath();
- if (auto PCHDir = FileMgr.getDirectory(PCHInclude)) {
+ if (auto PCHDir = FileMgr.getOptionalDirectoryRef(PCHInclude)) {
std::error_code EC;
SmallString<128> DirNative;
- llvm::sys::path::native((*PCHDir)->getName(), DirNative);
+ llvm::sys::path::native(PCHDir->getName(), DirNative);
bool Found = false;
llvm::vfs::FileSystem &FS = FileMgr.getVirtualFileSystem();
for (llvm::vfs::directory_iterator Dir = FS.dir_begin(DirNative, EC),
@@ -737,9 +788,10 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
Dir != DirEnd && !EC; Dir.increment(EC)) {
// Check whether this is an acceptable AST file.
if (ASTReader::isAcceptableASTFile(
- Dir->path(), FileMgr, CI.getPCHContainerReader(),
- CI.getLangOpts(), CI.getTargetOpts(), CI.getPreprocessorOpts(),
- SpecificModuleCachePath)) {
+ Dir->path(), FileMgr, CI.getModuleCache(),
+ CI.getPCHContainerReader(), CI.getLangOpts(),
+ CI.getTargetOpts(), CI.getPreprocessorOpts(),
+ SpecificModuleCachePath, /*RequireStrictOptionMatches=*/true)) {
PPOpts.ImplicitPCHInclude = std::string(Dir->path());
Found = true;
break;
@@ -748,7 +800,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
if (!Found) {
CI.getDiagnostics().Report(diag::err_fe_no_pch_in_dir) << PCHInclude;
- goto failure;
+ return false;
}
}
}
@@ -763,9 +815,63 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
&CI.getPreprocessor());
HasBegunSourceFile = true;
- // Initialize the main file entry.
+ // Handle C++20 header units.
+ // Here, the user has the option to specify that the header name should be
+ // looked up in the pre-processor search paths (and the main filename as
+ // passed by the driver might therefore be incomplete until that look-up).
+ if (CI.getLangOpts().CPlusPlusModules && Input.getKind().isHeaderUnit() &&
+ !Input.getKind().isPreprocessed()) {
+ StringRef FileName = Input.getFile();
+ InputKind Kind = Input.getKind();
+ if (Kind.getHeaderUnitKind() != InputKind::HeaderUnit_Abs) {
+ assert(CI.hasPreprocessor() &&
+ "trying to build a header unit without a Pre-processor?");
+ HeaderSearch &HS = CI.getPreprocessor().getHeaderSearchInfo();
+ // Relative searches begin from CWD.
+ auto Dir = CI.getFileManager().getOptionalDirectoryRef(".");
+ SmallVector<std::pair<OptionalFileEntryRef, DirectoryEntryRef>, 1> CWD;
+ CWD.push_back({std::nullopt, *Dir});
+ OptionalFileEntryRef FE =
+ HS.LookupFile(FileName, SourceLocation(),
+ /*Angled*/ Input.getKind().getHeaderUnitKind() ==
+ InputKind::HeaderUnit_System,
+ nullptr, nullptr, CWD, nullptr, nullptr, nullptr,
+ nullptr, nullptr, nullptr);
+ if (!FE) {
+ CI.getDiagnostics().Report(diag::err_module_header_file_not_found)
+ << FileName;
+ return false;
+ }
+ // We now have the filename...
+ FileName = FE->getName();
+ // ... still a header unit, but now use the path as written.
+ Kind = Input.getKind().withHeaderUnit(InputKind::HeaderUnit_Abs);
+ Input = FrontendInputFile(FileName, Kind, Input.isSystem());
+ }
+ // Unless the user has overridden the name, the header unit module name is
+ // the pathname for the file.
+ if (CI.getLangOpts().ModuleName.empty())
+ CI.getLangOpts().ModuleName = std::string(FileName);
+ CI.getLangOpts().CurrentModule = CI.getLangOpts().ModuleName;
+ }
+
if (!CI.InitializeSourceManager(Input))
- goto failure;
+ return false;
+
+ if (CI.getLangOpts().CPlusPlusModules && Input.getKind().isHeaderUnit() &&
+ Input.getKind().isPreprocessed() && !usesPreprocessorOnly()) {
+ // We have an input filename like foo.iih, but we want to find the right
+ // module name (and original file, to build the map entry).
+ // Check if the first line specifies the original source file name with a
+ // linemarker.
+ std::string PresumedInputFile = std::string(getCurrentFileOrBufferName());
+ ReadOriginalFileName(CI, PresumedInputFile);
+ // Unless the user overrides this, the module name is the name by which the
+ // original file was known.
+ if (CI.getLangOpts().ModuleName.empty())
+ CI.getLangOpts().ModuleName = std::string(PresumedInputFile);
+ CI.getLangOpts().CurrentModule = CI.getLangOpts().ModuleName;
+ }
// For module map files, we first parse the module map and synthesize a
// "<module-includes>" buffer before more conventional processing.
@@ -777,11 +883,11 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
if (loadModuleMapForModuleBuild(CI, Input.isSystem(),
Input.isPreprocessed(),
PresumedModuleMapFile, OffsetToContents))
- goto failure;
+ return false;
auto *CurrentModule = prepareToBuildModule(CI, Input.getFile());
if (!CurrentModule)
- goto failure;
+ return false;
CurrentModule->PresumedModuleMapFile = PresumedModuleMapFile;
@@ -792,7 +898,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
// Otherwise, convert the module description to a suitable input buffer.
auto Buffer = getInputBufferForModule(CI, CurrentModule);
if (!Buffer)
- goto failure;
+ return false;
// Reinitialize the main file entry to refer to the new input.
auto Kind = CurrentModule->IsSystem ? SrcMgr::C_System : SrcMgr::C_User;
@@ -805,17 +911,20 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
// Initialize the action.
if (!BeginSourceFileAction(CI))
- goto failure;
+ return false;
// If we were asked to load any module map files, do so now.
for (const auto &Filename : CI.getFrontendOpts().ModuleMapFiles) {
- if (auto File = CI.getFileManager().getFile(Filename))
+ if (auto File = CI.getFileManager().getOptionalFileRef(Filename))
CI.getPreprocessor().getHeaderSearchInfo().loadModuleMapFile(
*File, /*IsSystem*/false);
else
CI.getDiagnostics().Report(diag::err_module_map_not_found) << Filename;
}
+ // If compiling implementation of a module, load its module map file now.
+ (void)CI.getPreprocessor().getCurrentModuleImplementation();
+
// Add a module declaration scope so that modules from -fmodule-map-file
// arguments may shadow modules found implicitly in search paths.
CI.getPreprocessor()
@@ -839,7 +948,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
std::unique_ptr<ASTConsumer> Consumer =
CreateWrappedASTConsumer(CI, PresumedInputFile);
if (!Consumer)
- goto failure;
+ return false;
// FIXME: should not overwrite ASTMutationListener when parsing model files?
if (!isModelParsingAction())
@@ -850,7 +959,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
IntrusiveRefCntPtr<ExternalSemaSource> source, FinalReader;
source = createChainedIncludesSource(CI, FinalReader);
if (!source)
- goto failure;
+ return false;
CI.setASTReader(static_cast<ASTReader *>(FinalReader.get()));
CI.getASTContext().setExternalSource(source);
} else if (CI.getLangOpts().Modules ||
@@ -879,7 +988,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
CI.getPreprocessorOpts().AllowPCHWithCompilerErrors,
DeserialListener, DeleteDeserialListener);
if (!CI.getASTContext().getExternalSource())
- goto failure;
+ return false;
}
// If modules are enabled, create the AST reader before creating
// any builtins, so that all declarations know that they might be
@@ -894,7 +1003,7 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
CI.setASTConsumer(std::move(Consumer));
if (!CI.hasASTConsumer())
- goto failure;
+ return false;
}
// Initialize built-in info as long as we aren't using an external AST
@@ -913,9 +1022,15 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
}
// If we were asked to load any module files, do so now.
- for (const auto &ModuleFile : CI.getFrontendOpts().ModuleFiles)
- if (!CI.loadModuleFile(ModuleFile))
- goto failure;
+ for (const auto &ModuleFile : CI.getFrontendOpts().ModuleFiles) {
+ serialization::ModuleFile *Loaded = nullptr;
+ if (!CI.loadModuleFile(ModuleFile, Loaded))
+ return false;
+
+ if (Loaded && Loaded->StandardCXXModule)
+ CI.getDiagnostics().Report(
+ diag::warn_eagerly_load_for_standard_cplusplus_modules);
+ }
// If there is a layout overrides file, attach an external AST source that
// provides the layouts from that file.
@@ -927,18 +1042,21 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
CI.getASTContext().setExternalSource(Override);
}
- return true;
+ // Setup HLSL External Sema Source
+ if (CI.getLangOpts().HLSL && CI.hasASTContext()) {
+ IntrusiveRefCntPtr<ExternalSemaSource> HLSLSema(
+ new HLSLExternalSemaSource());
+ if (auto *SemaSource = dyn_cast_if_present<ExternalSemaSource>(
+ CI.getASTContext().getExternalSource())) {
+ IntrusiveRefCntPtr<ExternalSemaSource> MultiSema(
+ new MultiplexExternalSemaSource(SemaSource, HLSLSema.get()));
+ CI.getASTContext().setExternalSource(MultiSema);
+ } else
+ CI.getASTContext().setExternalSource(HLSLSema);
+ }
- // If we failed, reset state since the client will not end up calling the
- // matching EndSourceFile().
-failure:
- if (HasBegunSourceFile)
- CI.getDiagnosticClient().EndSourceFile();
- CI.clearOutputFiles(/*EraseFiles=*/true);
- CI.getLangOpts().setCompilingModule(LangOptions::CMK_None);
- setCurrentInput(FrontendInputFile());
- setCompilerInstance(nullptr);
- return false;
+ FailureCleanup.release();
+ return true;
}
llvm::Error FrontendAction::Execute() {
@@ -998,7 +1116,7 @@ void FrontendAction::EndSourceFile() {
}
if (CI.getFrontendOpts().ShowStats) {
- llvm::errs() << "\nSTATISTICS FOR '" << getCurrentFile() << "':\n";
+ llvm::errs() << "\nSTATISTICS FOR '" << getCurrentFileOrBufferName() << "':\n";
CI.getPreprocessor().PrintStats();
CI.getPreprocessor().getIdentifierTable().PrintStats();
CI.getPreprocessor().getHeaderSearchInfo().PrintStats();
@@ -1010,6 +1128,9 @@ void FrontendAction::EndSourceFile() {
// FrontendAction.
CI.clearOutputFiles(/*EraseFiles=*/shouldEraseOutputFiles());
+ // The resources are owned by AST when the current file is AST.
+ // So we reset the resources here to avoid users accessing it
+ // accidently.
if (isCurrentFileAST()) {
if (DisableFree) {
CI.resetAndLeakPreprocessor();
@@ -1040,6 +1161,10 @@ void ASTFrontendAction::ExecuteAction() {
CompilerInstance &CI = getCompilerInstance();
if (!CI.hasPreprocessor())
return;
+ // This is a fallback: If the client forgets to invoke this, we mark the
+ // current stack as the bottom. Though not optimal, this could help prevent
+ // stack overflow during deep recursion.
+ clang::noteBottomOfStack();
// FIXME: Move the truncation aspect of this into Sema, we delayed this till
// here so the source manager would be initialized.
@@ -1119,4 +1244,3 @@ bool WrapperFrontendAction::hasCodeCompletionSupport() const {
WrapperFrontendAction::WrapperFrontendAction(
std::unique_ptr<FrontendAction> WrappedAction)
: WrappedAction(std::move(WrappedAction)) {}
-
diff --git a/contrib/llvm-project/clang/lib/Frontend/FrontendActions.cpp b/contrib/llvm-project/clang/lib/Frontend/FrontendActions.cpp
index c6ebbdc8c04e..c1d6e7145536 100644
--- a/contrib/llvm-project/clang/lib/Frontend/FrontendActions.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/FrontendActions.cpp
@@ -8,27 +8,32 @@
#include "clang/Frontend/FrontendActions.h"
#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/Decl.h"
#include "clang/Basic/FileManager.h"
-#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/LangStandard.h"
+#include "clang/Basic/Module.h"
+#include "clang/Basic/TargetInfo.h"
#include "clang/Frontend/ASTConsumers.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Frontend/MultiplexConsumer.h"
#include "clang/Frontend/Utils.h"
-#include "clang/Lex/DependencyDirectivesSourceMinimizer.h"
+#include "clang/Lex/DependencyDirectivesScanner.h"
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Lex/PreprocessorOptions.h"
#include "clang/Sema/TemplateInstCallback.h"
#include "clang/Serialization/ASTReader.h"
#include "clang/Serialization/ASTWriter.h"
+#include "clang/Serialization/ModuleFile.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/YAMLTraits.h"
#include "llvm/Support/raw_ostream.h"
#include <memory>
+#include <optional>
#include <system_error>
using namespace clang;
@@ -136,7 +141,8 @@ GeneratePCHAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
CI.getPreprocessor(), CI.getModuleCache(), OutputFile, Sysroot, Buffer,
FrontendOpts.ModuleFileExtensions,
CI.getPreprocessorOpts().AllowPCHWithCompilerErrors,
- FrontendOpts.IncludeTimestamps, +CI.getLangOpts().CacheGeneratedPCH));
+ FrontendOpts.IncludeTimestamps, FrontendOpts.BuildingImplicitModule,
+ +CI.getLangOpts().CacheGeneratedPCH));
Consumers.push_back(CI.getPCHContainerWriter().CreatePCHContainerGenerator(
CI, std::string(InFile), OutputFile, std::move(OS), Buffer));
@@ -197,7 +203,9 @@ GenerateModuleAction::CreateASTConsumer(CompilerInstance &CI,
/*AllowASTWithErrors=*/
+CI.getFrontendOpts().AllowPCMWithCompilerErrors,
/*IncludeTimestamps=*/
- +CI.getFrontendOpts().BuildingImplicitModule,
+ +CI.getFrontendOpts().BuildingImplicitModule &&
+ +CI.getFrontendOpts().IncludeTimestamps,
+ /*BuildingImplicitModule=*/+CI.getFrontendOpts().BuildingImplicitModule,
/*ShouldCacheASTInMemory=*/
+CI.getFrontendOpts().BuildingImplicitModule));
Consumers.push_back(CI.getPCHContainerWriter().CreatePCHContainerGenerator(
@@ -245,91 +253,39 @@ GenerateModuleFromModuleMapAction::CreateOutputFile(CompilerInstance &CI,
bool GenerateModuleInterfaceAction::BeginSourceFileAction(
CompilerInstance &CI) {
- if (!CI.getLangOpts().ModulesTS && !CI.getLangOpts().CPlusPlusModules) {
- CI.getDiagnostics().Report(diag::err_module_interface_requires_cpp_modules);
- return false;
- }
-
CI.getLangOpts().setCompilingModule(LangOptions::CMK_ModuleInterface);
return GenerateModuleAction::BeginSourceFileAction(CI);
}
+std::unique_ptr<ASTConsumer>
+GenerateModuleInterfaceAction::CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
+ CI.getHeaderSearchOpts().ModulesSkipDiagnosticOptions = true;
+ CI.getHeaderSearchOpts().ModulesSkipHeaderSearchPaths = true;
+ CI.getHeaderSearchOpts().ModulesSkipPragmaDiagnosticMappings = true;
+
+ return GenerateModuleAction::CreateASTConsumer(CI, InFile);
+}
+
std::unique_ptr<raw_pwrite_stream>
GenerateModuleInterfaceAction::CreateOutputFile(CompilerInstance &CI,
StringRef InFile) {
return CI.createDefaultOutputFile(/*Binary=*/true, InFile, "pcm");
}
-bool GenerateHeaderModuleAction::PrepareToExecuteAction(
- CompilerInstance &CI) {
- if (!CI.getLangOpts().Modules) {
- CI.getDiagnostics().Report(diag::err_header_module_requires_modules);
+bool GenerateHeaderUnitAction::BeginSourceFileAction(CompilerInstance &CI) {
+ if (!CI.getLangOpts().CPlusPlusModules) {
+ CI.getDiagnostics().Report(diag::err_module_interface_requires_cpp_modules);
return false;
}
-
- auto &Inputs = CI.getFrontendOpts().Inputs;
- if (Inputs.empty())
- return GenerateModuleAction::BeginInvocation(CI);
-
- auto Kind = Inputs[0].getKind();
-
- // Convert the header file inputs into a single module input buffer.
- SmallString<256> HeaderContents;
- ModuleHeaders.reserve(Inputs.size());
- for (const FrontendInputFile &FIF : Inputs) {
- // FIXME: We should support re-compiling from an AST file.
- if (FIF.getKind().getFormat() != InputKind::Source || !FIF.isFile()) {
- CI.getDiagnostics().Report(diag::err_module_header_file_not_found)
- << (FIF.isFile() ? FIF.getFile()
- : FIF.getBuffer().getBufferIdentifier());
- return true;
- }
-
- HeaderContents += "#include \"";
- HeaderContents += FIF.getFile();
- HeaderContents += "\"\n";
- ModuleHeaders.push_back(std::string(FIF.getFile()));
- }
- Buffer = llvm::MemoryBuffer::getMemBufferCopy(
- HeaderContents, Module::getModuleInputBufferName());
-
- // Set that buffer up as our "real" input.
- Inputs.clear();
- Inputs.push_back(
- FrontendInputFile(Buffer->getMemBufferRef(), Kind, /*IsSystem*/ false));
-
- return GenerateModuleAction::PrepareToExecuteAction(CI);
-}
-
-bool GenerateHeaderModuleAction::BeginSourceFileAction(
- CompilerInstance &CI) {
- CI.getLangOpts().setCompilingModule(LangOptions::CMK_HeaderModule);
-
- // Synthesize a Module object for the given headers.
- auto &HS = CI.getPreprocessor().getHeaderSearchInfo();
- SmallVector<Module::Header, 16> Headers;
- for (StringRef Name : ModuleHeaders) {
- const DirectoryLookup *CurDir = nullptr;
- Optional<FileEntryRef> FE = HS.LookupFile(
- Name, SourceLocation(), /*Angled*/ false, nullptr, CurDir, None,
- nullptr, nullptr, nullptr, nullptr, nullptr, nullptr);
- if (!FE) {
- CI.getDiagnostics().Report(diag::err_module_header_file_not_found)
- << Name;
- continue;
- }
- Headers.push_back(
- {std::string(Name), std::string(Name), &FE->getFileEntry()});
- }
- HS.getModuleMap().createHeaderModule(CI.getLangOpts().CurrentModule, Headers);
-
+ CI.getLangOpts().setCompilingModule(LangOptions::CMK_HeaderUnit);
return GenerateModuleAction::BeginSourceFileAction(CI);
}
std::unique_ptr<raw_pwrite_stream>
-GenerateHeaderModuleAction::CreateOutputFile(CompilerInstance &CI,
- StringRef InFile) {
+GenerateHeaderUnitAction::CreateOutputFile(CompilerInstance &CI,
+ StringRef InFile) {
return CI.createDefaultOutputFile(/*Binary=*/true, InFile, "pcm");
}
@@ -428,6 +384,8 @@ private:
return "ExplicitTemplateArgumentSubstitution";
case CodeSynthesisContext::DeducedTemplateArgumentSubstitution:
return "DeducedTemplateArgumentSubstitution";
+ case CodeSynthesisContext::LambdaExpressionSubstitution:
+ return "LambdaExpressionSubstitution";
case CodeSynthesisContext::PriorTemplateArgumentSubstitution:
return "PriorTemplateArgumentSubstitution";
case CodeSynthesisContext::DefaultTemplateArgumentChecking:
@@ -452,6 +410,8 @@ private:
return "ConstraintSubstitution";
case CodeSynthesisContext::ConstraintNormalization:
return "ConstraintNormalization";
+ case CodeSynthesisContext::RequirementParameterInstantiation:
+ return "RequirementParameterInstantiation";
case CodeSynthesisContext::ParameterMappingSubstitution:
return "ParameterMappingSubstitution";
case CodeSynthesisContext::RequirementInstantiation:
@@ -462,6 +422,10 @@ private:
return "InitializingStructuredBinding";
case CodeSynthesisContext::MarkingClassDllexported:
return "MarkingClassDllexported";
+ case CodeSynthesisContext::BuildingBuiltinDumpStructCall:
+ return "BuildingBuiltinDumpStructCall";
+ case CodeSynthesisContext::BuildingDeductionGuides:
+ return "BuildingDeductionGuides";
}
return "";
}
@@ -481,25 +445,94 @@ private:
Out << "---" << YAML << "\n";
}
+ static void printEntryName(const Sema &TheSema, const Decl *Entity,
+ llvm::raw_string_ostream &OS) {
+ auto *NamedTemplate = cast<NamedDecl>(Entity);
+
+ PrintingPolicy Policy = TheSema.Context.getPrintingPolicy();
+ // FIXME: Also ask for FullyQualifiedNames?
+ Policy.SuppressDefaultTemplateArgs = false;
+ NamedTemplate->getNameForDiagnostic(OS, Policy, true);
+
+ if (!OS.str().empty())
+ return;
+
+ Decl *Ctx = Decl::castFromDeclContext(NamedTemplate->getDeclContext());
+ NamedDecl *NamedCtx = dyn_cast_or_null<NamedDecl>(Ctx);
+
+ if (const auto *Decl = dyn_cast<TagDecl>(NamedTemplate)) {
+ if (const auto *R = dyn_cast<RecordDecl>(Decl)) {
+ if (R->isLambda()) {
+ OS << "lambda at ";
+ Decl->getLocation().print(OS, TheSema.getSourceManager());
+ return;
+ }
+ }
+ OS << "unnamed " << Decl->getKindName();
+ return;
+ }
+
+ assert(NamedCtx && "NamedCtx cannot be null");
+
+ if (const auto *Decl = dyn_cast<ParmVarDecl>(NamedTemplate)) {
+ OS << "unnamed function parameter " << Decl->getFunctionScopeIndex()
+ << " ";
+ if (Decl->getFunctionScopeDepth() > 0)
+ OS << "(at depth " << Decl->getFunctionScopeDepth() << ") ";
+ OS << "of ";
+ NamedCtx->getNameForDiagnostic(OS, TheSema.getLangOpts(), true);
+ return;
+ }
+
+ if (const auto *Decl = dyn_cast<TemplateTypeParmDecl>(NamedTemplate)) {
+ if (const Type *Ty = Decl->getTypeForDecl()) {
+ if (const auto *TTPT = dyn_cast_or_null<TemplateTypeParmType>(Ty)) {
+ OS << "unnamed template type parameter " << TTPT->getIndex() << " ";
+ if (TTPT->getDepth() > 0)
+ OS << "(at depth " << TTPT->getDepth() << ") ";
+ OS << "of ";
+ NamedCtx->getNameForDiagnostic(OS, TheSema.getLangOpts(), true);
+ return;
+ }
+ }
+ }
+
+ if (const auto *Decl = dyn_cast<NonTypeTemplateParmDecl>(NamedTemplate)) {
+ OS << "unnamed template non-type parameter " << Decl->getIndex() << " ";
+ if (Decl->getDepth() > 0)
+ OS << "(at depth " << Decl->getDepth() << ") ";
+ OS << "of ";
+ NamedCtx->getNameForDiagnostic(OS, TheSema.getLangOpts(), true);
+ return;
+ }
+
+ if (const auto *Decl = dyn_cast<TemplateTemplateParmDecl>(NamedTemplate)) {
+ OS << "unnamed template template parameter " << Decl->getIndex() << " ";
+ if (Decl->getDepth() > 0)
+ OS << "(at depth " << Decl->getDepth() << ") ";
+ OS << "of ";
+ NamedCtx->getNameForDiagnostic(OS, TheSema.getLangOpts(), true);
+ return;
+ }
+
+ llvm_unreachable("Failed to retrieve a name for this entry!");
+ OS << "unnamed identifier";
+ }
+
template <bool BeginInstantiation>
static TemplightEntry getTemplightEntry(const Sema &TheSema,
const CodeSynthesisContext &Inst) {
TemplightEntry Entry;
Entry.Kind = toString(Inst.Kind);
Entry.Event = BeginInstantiation ? "Begin" : "End";
- if (auto *NamedTemplate = dyn_cast_or_null<NamedDecl>(Inst.Entity)) {
- llvm::raw_string_ostream OS(Entry.Name);
- PrintingPolicy Policy = TheSema.Context.getPrintingPolicy();
- // FIXME: Also ask for FullyQualifiedNames?
- Policy.SuppressDefaultTemplateArgs = false;
- NamedTemplate->getNameForDiagnostic(OS, Policy, true);
- const PresumedLoc DefLoc =
+ llvm::raw_string_ostream OS(Entry.Name);
+ printEntryName(TheSema, Inst.Entity, OS);
+ const PresumedLoc DefLoc =
TheSema.getSourceManager().getPresumedLoc(Inst.Entity->getLocation());
- if(!DefLoc.isInvalid())
- Entry.DefinitionLocation = std::string(DefLoc.getFilename()) + ":" +
- std::to_string(DefLoc.getLine()) + ":" +
- std::to_string(DefLoc.getColumn());
- }
+ if (!DefLoc.isInvalid())
+ Entry.DefinitionLocation = std::string(DefLoc.getFilename()) + ":" +
+ std::to_string(DefLoc.getLine()) + ":" +
+ std::to_string(DefLoc.getColumn());
const PresumedLoc PoiLoc =
TheSema.getSourceManager().getPresumedLoc(Inst.PointOfInstantiation);
if (!PoiLoc.isInvalid()) {
@@ -638,8 +671,23 @@ namespace {
return false;
}
+ bool ReadHeaderSearchPaths(const HeaderSearchOptions &HSOpts,
+ bool Complain) override {
+ Out.indent(2) << "Header search paths:\n";
+ Out.indent(4) << "User entries:\n";
+ for (const auto &Entry : HSOpts.UserEntries)
+ Out.indent(6) << Entry.Path << "\n";
+ Out.indent(4) << "System header prefixes:\n";
+ for (const auto &Prefix : HSOpts.SystemHeaderPrefixes)
+ Out.indent(6) << Prefix.Prefix << "\n";
+ Out.indent(4) << "VFS overlay files:\n";
+ for (const auto &Overlay : HSOpts.VFSOverlayFiles)
+ Out.indent(6) << Overlay << "\n";
+ return false;
+ }
+
bool ReadPreprocessorOptions(const PreprocessorOptions &PPOpts,
- bool Complain,
+ bool ReadMacros, bool Complain,
std::string &SuggestedPredefines) override {
Out.indent(2) << "Preprocessor options:\n";
DUMP_BOOLEAN(PPOpts.UsePredefines,
@@ -647,7 +695,7 @@ namespace {
DUMP_BOOLEAN(PPOpts.DetailedRecord,
"Uses detailed preprocessing record (for indexing)");
- if (!PPOpts.Macros.empty()) {
+ if (ReadMacros) {
Out.indent(4) << "Predefined macros:\n";
}
@@ -738,31 +786,150 @@ bool DumpModuleInfoAction::BeginInvocation(CompilerInstance &CI) {
return true;
}
+static StringRef ModuleKindName(Module::ModuleKind MK) {
+ switch (MK) {
+ case Module::ModuleMapModule:
+ return "Module Map Module";
+ case Module::ModuleInterfaceUnit:
+ return "Interface Unit";
+ case Module::ModuleImplementationUnit:
+ return "Implementation Unit";
+ case Module::ModulePartitionInterface:
+ return "Partition Interface";
+ case Module::ModulePartitionImplementation:
+ return "Partition Implementation";
+ case Module::ModuleHeaderUnit:
+ return "Header Unit";
+ case Module::ExplicitGlobalModuleFragment:
+ return "Global Module Fragment";
+ case Module::ImplicitGlobalModuleFragment:
+ return "Implicit Module Fragment";
+ case Module::PrivateModuleFragment:
+ return "Private Module Fragment";
+ }
+ llvm_unreachable("unknown module kind!");
+}
+
void DumpModuleInfoAction::ExecuteAction() {
+ assert(isCurrentFileAST() && "dumping non-AST?");
// Set up the output file.
- std::unique_ptr<llvm::raw_fd_ostream> OutFile;
- StringRef OutputFileName = getCompilerInstance().getFrontendOpts().OutputFile;
+ CompilerInstance &CI = getCompilerInstance();
+ StringRef OutputFileName = CI.getFrontendOpts().OutputFile;
if (!OutputFileName.empty() && OutputFileName != "-") {
std::error_code EC;
- OutFile.reset(new llvm::raw_fd_ostream(OutputFileName.str(), EC,
- llvm::sys::fs::OF_TextWithCRLF));
+ OutputStream.reset(new llvm::raw_fd_ostream(
+ OutputFileName.str(), EC, llvm::sys::fs::OF_TextWithCRLF));
}
- llvm::raw_ostream &Out = OutFile.get()? *OutFile.get() : llvm::outs();
+ llvm::raw_ostream &Out = OutputStream ? *OutputStream : llvm::outs();
Out << "Information for module file '" << getCurrentFile() << "':\n";
- auto &FileMgr = getCompilerInstance().getFileManager();
+ auto &FileMgr = CI.getFileManager();
auto Buffer = FileMgr.getBufferForFile(getCurrentFile());
StringRef Magic = (*Buffer)->getMemBufferRef().getBuffer();
bool IsRaw = (Magic.size() >= 4 && Magic[0] == 'C' && Magic[1] == 'P' &&
Magic[2] == 'C' && Magic[3] == 'H');
Out << " Module format: " << (IsRaw ? "raw" : "obj") << "\n";
- Preprocessor &PP = getCompilerInstance().getPreprocessor();
+ Preprocessor &PP = CI.getPreprocessor();
DumpModuleInfoListener Listener(Out);
- HeaderSearchOptions &HSOpts =
- PP.getHeaderSearchInfo().getHeaderSearchOpts();
+ HeaderSearchOptions &HSOpts = PP.getHeaderSearchInfo().getHeaderSearchOpts();
+
+ // The FrontendAction::BeginSourceFile () method loads the AST so that much
+ // of the information is already available and modules should have been
+ // loaded.
+
+ const LangOptions &LO = getCurrentASTUnit().getLangOpts();
+ if (LO.CPlusPlusModules && !LO.CurrentModule.empty()) {
+
+ ASTReader *R = getCurrentASTUnit().getASTReader().get();
+ unsigned SubModuleCount = R->getTotalNumSubmodules();
+ serialization::ModuleFile &MF = R->getModuleManager().getPrimaryModule();
+ Out << " ====== C++20 Module structure ======\n";
+
+ if (MF.ModuleName != LO.CurrentModule)
+ Out << " Mismatched module names : " << MF.ModuleName << " and "
+ << LO.CurrentModule << "\n";
+
+ struct SubModInfo {
+ unsigned Idx;
+ Module *Mod;
+ Module::ModuleKind Kind;
+ std::string &Name;
+ bool Seen;
+ };
+ std::map<std::string, SubModInfo> SubModMap;
+ auto PrintSubMapEntry = [&](std::string Name, Module::ModuleKind Kind) {
+ Out << " " << ModuleKindName(Kind) << " '" << Name << "'";
+ auto I = SubModMap.find(Name);
+ if (I == SubModMap.end())
+ Out << " was not found in the sub modules!\n";
+ else {
+ I->second.Seen = true;
+ Out << " is at index #" << I->second.Idx << "\n";
+ }
+ };
+ Module *Primary = nullptr;
+ for (unsigned Idx = 0; Idx <= SubModuleCount; ++Idx) {
+ Module *M = R->getModule(Idx);
+ if (!M)
+ continue;
+ if (M->Name == LO.CurrentModule) {
+ Primary = M;
+ Out << " " << ModuleKindName(M->Kind) << " '" << LO.CurrentModule
+ << "' is the Primary Module at index #" << Idx << "\n";
+ SubModMap.insert({M->Name, {Idx, M, M->Kind, M->Name, true}});
+ } else
+ SubModMap.insert({M->Name, {Idx, M, M->Kind, M->Name, false}});
+ }
+ if (Primary) {
+ if (!Primary->submodules().empty())
+ Out << " Sub Modules:\n";
+ for (auto *MI : Primary->submodules()) {
+ PrintSubMapEntry(MI->Name, MI->Kind);
+ }
+ if (!Primary->Imports.empty())
+ Out << " Imports:\n";
+ for (auto *IMP : Primary->Imports) {
+ PrintSubMapEntry(IMP->Name, IMP->Kind);
+ }
+ if (!Primary->Exports.empty())
+ Out << " Exports:\n";
+ for (unsigned MN = 0, N = Primary->Exports.size(); MN != N; ++MN) {
+ if (Module *M = Primary->Exports[MN].getPointer()) {
+ PrintSubMapEntry(M->Name, M->Kind);
+ }
+ }
+ }
+
+ // Emit the macro definitions in the module file so that we can know how
+ // much definitions in the module file quickly.
+ // TODO: Emit the macro definition bodies completely.
+ if (auto FilteredMacros = llvm::make_filter_range(
+ R->getPreprocessor().macros(),
+ [](const auto &Macro) { return Macro.first->isFromAST(); });
+ !FilteredMacros.empty()) {
+ Out << " Macro Definitions:\n";
+ for (/*<IdentifierInfo *, MacroState> pair*/ const auto &Macro :
+ FilteredMacros)
+ Out << " " << Macro.first->getName() << "\n";
+ }
+
+ // Now let's print out any modules we did not see as part of the Primary.
+ for (const auto &SM : SubModMap) {
+ if (!SM.second.Seen && SM.second.Mod) {
+ Out << " " << ModuleKindName(SM.second.Kind) << " '" << SM.first
+ << "' at index #" << SM.second.Idx
+ << " has no direct reference in the Primary\n";
+ }
+ }
+ Out << " ====== ======\n";
+ }
+
+ // The reminder of the output is produced from the listener as the AST
+ // FileCcontrolBlock is (re-)parsed.
ASTReader::readASTFileControlBlock(
- getCurrentFile(), FileMgr, getCompilerInstance().getPCHContainerReader(),
+ getCurrentFile(), FileMgr, CI.getModuleCache(),
+ CI.getPCHContainerReader(),
/*FindModuleFileExtensions=*/true, Listener,
HSOpts.ModulesValidateDiagnosticOptions);
}
@@ -835,14 +1002,14 @@ void PrintPreprocessedAction::ExecuteAction() {
if (llvm::Triple(LLVM_HOST_TRIPLE).isOSWindows()) {
BinaryMode = true;
const SourceManager &SM = CI.getSourceManager();
- if (llvm::Optional<llvm::MemoryBufferRef> Buffer =
+ if (std::optional<llvm::MemoryBufferRef> Buffer =
SM.getBufferOrNone(SM.getMainFileID())) {
const char *cur = Buffer->getBufferStart();
const char *end = Buffer->getBufferEnd();
const char *next = (cur != end) ? cur + 1 : end;
// Limit ourselves to only scanning 256 characters into the source
- // file. This is mostly a sanity check in case the file has no
+ // file. This is mostly a check in case the file has no
// newlines whatsoever.
if (end - cur > 256)
end = cur + 256;
@@ -893,6 +1060,7 @@ void PrintPreambleAction::ExecuteAction() {
case Language::OpenCLCXX:
case Language::CUDA:
case Language::HIP:
+ case Language::HLSL:
break;
case Language::Unknown:
@@ -969,10 +1137,10 @@ void PrintDependencyDirectivesSourceMinimizerAction::ExecuteAction() {
SourceManager &SM = CI.getPreprocessor().getSourceManager();
llvm::MemoryBufferRef FromFile = SM.getBufferOrFake(SM.getMainFileID());
- llvm::SmallString<1024> Output;
- llvm::SmallVector<minimize_source_to_dependency_directives::Token, 32> Toks;
- if (minimizeSourceToDependencyDirectives(
- FromFile.getBuffer(), Output, Toks, &CI.getDiagnostics(),
+ llvm::SmallVector<dependency_directives_scan::Token, 16> Tokens;
+ llvm::SmallVector<dependency_directives_scan::Directive, 32> Directives;
+ if (scanSourceForDependencyDirectives(
+ FromFile.getBuffer(), Tokens, Directives, &CI.getDiagnostics(),
SM.getLocForStartOfFile(SM.getMainFileID()))) {
assert(CI.getDiagnostics().hasErrorOccurred() &&
"no errors reported for failure");
@@ -991,5 +1159,20 @@ void PrintDependencyDirectivesSourceMinimizerAction::ExecuteAction() {
}
return;
}
- llvm::outs() << Output;
+ printDependencyDirectivesAsSource(FromFile.getBuffer(), Directives,
+ llvm::outs());
+}
+
+void GetDependenciesByModuleNameAction::ExecuteAction() {
+ CompilerInstance &CI = getCompilerInstance();
+ Preprocessor &PP = CI.getPreprocessor();
+ SourceManager &SM = PP.getSourceManager();
+ FileID MainFileID = SM.getMainFileID();
+ SourceLocation FileStart = SM.getLocForStartOfFile(MainFileID);
+ SmallVector<std::pair<IdentifierInfo *, SourceLocation>, 2> Path;
+ IdentifierInfo *ModuleID = PP.getIdentifierInfo(ModuleName);
+ Path.push_back(std::make_pair(ModuleID, FileStart));
+ auto ModResult = CI.loadModule(FileStart, Path, Module::Hidden, false);
+ PPCallbacks *CB = PP.getPPCallbacks();
+ CB->moduleImport(SourceLocation(), Path, ModResult);
}
diff --git a/contrib/llvm-project/clang/lib/Frontend/FrontendOptions.cpp b/contrib/llvm-project/clang/lib/Frontend/FrontendOptions.cpp
index 37ac428a8003..bf83b27c1367 100644
--- a/contrib/llvm-project/clang/lib/Frontend/FrontendOptions.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/FrontendOptions.cpp
@@ -27,11 +27,12 @@ InputKind FrontendOptions::getInputKindForExtension(StringRef Extension) {
.Cases("C", "cc", "cp", Language::CXX)
.Cases("cpp", "CPP", "c++", "cxx", "hpp", "hxx", Language::CXX)
.Case("cppm", Language::CXX)
- .Case("iim", InputKind(Language::CXX).getPreprocessed())
+ .Cases("iim", "iih", InputKind(Language::CXX).getPreprocessed())
.Case("cl", Language::OpenCL)
.Case("clcpp", Language::OpenCLCXX)
.Cases("cu", "cuh", Language::CUDA)
.Case("hip", Language::HIP)
.Cases("ll", "bc", Language::LLVM_IR)
+ .Case("hlsl", Language::HLSL)
.Default(Language::Unknown);
}
diff --git a/contrib/llvm-project/clang/lib/Frontend/HeaderIncludeGen.cpp b/contrib/llvm-project/clang/lib/Frontend/HeaderIncludeGen.cpp
index 1ee47d8d2480..992c2670260e 100644
--- a/contrib/llvm-project/clang/lib/Frontend/HeaderIncludeGen.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/HeaderIncludeGen.cpp
@@ -12,6 +12,7 @@
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Lex/Preprocessor.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/JSON.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
@@ -42,6 +43,62 @@ public:
delete OutputFile;
}
+ HeaderIncludesCallback(const HeaderIncludesCallback &) = delete;
+ HeaderIncludesCallback &operator=(const HeaderIncludesCallback &) = delete;
+
+ void FileChanged(SourceLocation Loc, FileChangeReason Reason,
+ SrcMgr::CharacteristicKind FileType,
+ FileID PrevFID) override;
+
+ void FileSkipped(const FileEntryRef &SkippedFile, const Token &FilenameTok,
+ SrcMgr::CharacteristicKind FileType) override;
+
+private:
+ bool ShouldShowHeader(SrcMgr::CharacteristicKind HeaderType) {
+ if (!DepOpts.IncludeSystemHeaders && isSystem(HeaderType))
+ return false;
+
+ // Show the current header if we are (a) past the predefines, or (b) showing
+ // all headers and in the predefines at a depth past the initial file and
+ // command line buffers.
+ return (HasProcessedPredefines ||
+ (ShowAllHeaders && CurrentIncludeDepth > 2));
+ }
+};
+
+/// A callback for emitting header usage information to a file in JSON. Each
+/// line in the file is a JSON object that includes the source file name and
+/// the list of headers directly or indirectly included from it. For example:
+///
+/// {"source":"/tmp/foo.c",
+/// "includes":["/usr/include/stdio.h", "/usr/include/stdlib.h"]}
+///
+/// To reduce the amount of data written to the file, we only record system
+/// headers that are directly included from a file that isn't in the system
+/// directory.
+class HeaderIncludesJSONCallback : public PPCallbacks {
+ SourceManager &SM;
+ raw_ostream *OutputFile;
+ bool OwnsOutputFile;
+ SmallVector<std::string, 16> IncludedHeaders;
+
+public:
+ HeaderIncludesJSONCallback(const Preprocessor *PP, raw_ostream *OutputFile_,
+ bool OwnsOutputFile_)
+ : SM(PP->getSourceManager()), OutputFile(OutputFile_),
+ OwnsOutputFile(OwnsOutputFile_) {}
+
+ ~HeaderIncludesJSONCallback() override {
+ if (OwnsOutputFile)
+ delete OutputFile;
+ }
+
+ HeaderIncludesJSONCallback(const HeaderIncludesJSONCallback &) = delete;
+ HeaderIncludesJSONCallback &
+ operator=(const HeaderIncludesJSONCallback &) = delete;
+
+ void EndOfMainFile() override;
+
void FileChanged(SourceLocation Loc, FileChangeReason Reason,
SrcMgr::CharacteristicKind FileType,
FileID PrevFID) override;
@@ -116,16 +173,33 @@ void clang::AttachHeaderIncludeGen(Preprocessor &PP,
}
}
- // Print header info for extra headers, pretending they were discovered by
- // the regular preprocessor. The primary use case is to support proper
- // generation of Make / Ninja file dependencies for implicit includes, such
- // as sanitizer blacklists. It's only important for cl.exe compatibility,
- // the GNU way to generate rules is -M / -MM / -MD / -MMD.
- for (const auto &Header : DepOpts.ExtraDeps)
- PrintHeaderInfo(OutputFile, Header.first, ShowDepth, 2, MSStyle);
- PP.addPPCallbacks(std::make_unique<HeaderIncludesCallback>(
- &PP, ShowAllHeaders, OutputFile, DepOpts, OwnsOutputFile, ShowDepth,
- MSStyle));
+ switch (DepOpts.HeaderIncludeFormat) {
+ case HIFMT_None:
+ llvm_unreachable("unexpected header format kind");
+ case HIFMT_Textual: {
+ assert(DepOpts.HeaderIncludeFiltering == HIFIL_None &&
+ "header filtering is currently always disabled when output format is"
+ "textual");
+ // Print header info for extra headers, pretending they were discovered by
+ // the regular preprocessor. The primary use case is to support proper
+ // generation of Make / Ninja file dependencies for implicit includes, such
+ // as sanitizer ignorelists. It's only important for cl.exe compatibility,
+ // the GNU way to generate rules is -M / -MM / -MD / -MMD.
+ for (const auto &Header : DepOpts.ExtraDeps)
+ PrintHeaderInfo(OutputFile, Header.first, ShowDepth, 2, MSStyle);
+ PP.addPPCallbacks(std::make_unique<HeaderIncludesCallback>(
+ &PP, ShowAllHeaders, OutputFile, DepOpts, OwnsOutputFile, ShowDepth,
+ MSStyle));
+ break;
+ }
+ case HIFMT_JSON: {
+ assert(DepOpts.HeaderIncludeFiltering == HIFIL_Only_Direct_System &&
+ "only-direct-system is the only option for filtering");
+ PP.addPPCallbacks(std::make_unique<HeaderIncludesJSONCallback>(
+ &PP, OutputFile, OwnsOutputFile));
+ break;
+ }
+ }
}
void HeaderIncludesCallback::FileChanged(SourceLocation Loc,
@@ -147,38 +221,24 @@ void HeaderIncludesCallback::FileChanged(SourceLocation Loc,
// We track when we are done with the predefines by watching for the first
// place where we drop back to a nesting depth of 1.
- if (CurrentIncludeDepth == 1 && !HasProcessedPredefines) {
- if (!DepOpts.ShowIncludesPretendHeader.empty()) {
- PrintHeaderInfo(OutputFile, DepOpts.ShowIncludesPretendHeader,
- ShowDepth, 2, MSStyle);
- }
+ if (CurrentIncludeDepth == 1 && !HasProcessedPredefines)
HasProcessedPredefines = true;
- }
return;
- } else
+ } else {
+ return;
+ }
+
+ if (!ShouldShowHeader(NewFileType))
return;
- // Show the header if we are (a) past the predefines, or (b) showing all
- // headers and in the predefines at a depth past the initial file and command
- // line buffers.
- bool ShowHeader = (HasProcessedPredefines ||
- (ShowAllHeaders && CurrentIncludeDepth > 2));
unsigned IncludeDepth = CurrentIncludeDepth;
if (!HasProcessedPredefines)
--IncludeDepth; // Ignore indent from <built-in>.
- else if (!DepOpts.ShowIncludesPretendHeader.empty())
- ++IncludeDepth; // Pretend inclusion by ShowIncludesPretendHeader.
-
- if (!DepOpts.IncludeSystemHeaders && isSystem(NewFileType))
- ShowHeader = false;
- // Dump the header include information we are past the predefines buffer or
- // are showing all headers and this isn't the magic implicit <command line>
- // header.
// FIXME: Identify headers in a more robust way than comparing their name to
// "<command line>" and "<built-in>" in a bunch of places.
- if (ShowHeader && Reason == PPCallbacks::EnterFile &&
+ if (Reason == PPCallbacks::EnterFile &&
UserLoc.getFilename() != StringRef("<command line>")) {
PrintHeaderInfo(OutputFile, UserLoc.getFilename(), ShowDepth, IncludeDepth,
MSStyle);
@@ -191,9 +251,71 @@ void HeaderIncludesCallback::FileSkipped(const FileEntryRef &SkippedFile, const
if (!DepOpts.ShowSkippedHeaderIncludes)
return;
- if (!DepOpts.IncludeSystemHeaders && isSystem(FileType))
+ if (!ShouldShowHeader(FileType))
return;
PrintHeaderInfo(OutputFile, SkippedFile.getName(), ShowDepth,
CurrentIncludeDepth + 1, MSStyle);
}
+
+void HeaderIncludesJSONCallback::EndOfMainFile() {
+ OptionalFileEntryRef FE = SM.getFileEntryRefForID(SM.getMainFileID());
+ SmallString<256> MainFile(FE->getName());
+ SM.getFileManager().makeAbsolutePath(MainFile);
+
+ std::string Str;
+ llvm::raw_string_ostream OS(Str);
+ llvm::json::OStream JOS(OS);
+ JOS.object([&] {
+ JOS.attribute("source", MainFile.c_str());
+ JOS.attributeArray("includes", [&] {
+ llvm::StringSet<> SeenHeaders;
+ for (const std::string &H : IncludedHeaders)
+ if (SeenHeaders.insert(H).second)
+ JOS.value(H);
+ });
+ });
+ OS << "\n";
+
+ if (OutputFile->get_kind() == raw_ostream::OStreamKind::OK_FDStream) {
+ llvm::raw_fd_ostream *FDS = static_cast<llvm::raw_fd_ostream *>(OutputFile);
+ if (auto L = FDS->lock())
+ *OutputFile << Str;
+ } else
+ *OutputFile << Str;
+}
+
+/// Determine whether the header file should be recorded. The header file should
+/// be recorded only if the header file is a system header and the current file
+/// isn't a system header.
+static bool shouldRecordNewFile(SrcMgr::CharacteristicKind NewFileType,
+ SourceLocation PrevLoc, SourceManager &SM) {
+ return SrcMgr::isSystem(NewFileType) && !SM.isInSystemHeader(PrevLoc);
+}
+
+void HeaderIncludesJSONCallback::FileChanged(
+ SourceLocation Loc, FileChangeReason Reason,
+ SrcMgr::CharacteristicKind NewFileType, FileID PrevFID) {
+ if (PrevFID.isInvalid() ||
+ !shouldRecordNewFile(NewFileType, SM.getLocForStartOfFile(PrevFID), SM))
+ return;
+
+ // Unless we are exiting a #include, make sure to skip ahead to the line the
+ // #include directive was at.
+ PresumedLoc UserLoc = SM.getPresumedLoc(Loc);
+ if (UserLoc.isInvalid())
+ return;
+
+ if (Reason == PPCallbacks::EnterFile &&
+ UserLoc.getFilename() != StringRef("<command line>"))
+ IncludedHeaders.push_back(UserLoc.getFilename());
+}
+
+void HeaderIncludesJSONCallback::FileSkipped(
+ const FileEntryRef &SkippedFile, const Token &FilenameTok,
+ SrcMgr::CharacteristicKind FileType) {
+ if (!shouldRecordNewFile(FileType, FilenameTok.getLocation(), SM))
+ return;
+
+ IncludedHeaders.push_back(SkippedFile.getName().str());
+}
diff --git a/contrib/llvm-project/clang/lib/Frontend/InitPreprocessor.cpp b/contrib/llvm-project/clang/lib/Frontend/InitPreprocessor.cpp
index bca0bb4ada67..1b91c86f9139 100644
--- a/contrib/llvm-project/clang/lib/Frontend/InitPreprocessor.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/InitPreprocessor.cpp
@@ -11,6 +11,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Basic/FileManager.h"
+#include "clang/Basic/HLSLRuntime.h"
#include "clang/Basic/MacroBuilder.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/SyncScope.h"
@@ -25,6 +26,7 @@
#include "clang/Serialization/ASTReader.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DerivedTypes.h"
using namespace clang;
static bool MacroBodyEndsInBackslash(StringRef MacroBody) {
@@ -194,7 +196,7 @@ static void DefineType(const Twine &MacroName, TargetInfo::IntType Ty,
Builder.defineMacro(MacroName, TargetInfo::getTypeName(Ty));
}
-static void DefineTypeWidth(StringRef MacroName, TargetInfo::IntType Ty,
+static void DefineTypeWidth(const Twine &MacroName, TargetInfo::IntType Ty,
const TargetInfo &TI, MacroBuilder &Builder) {
Builder.defineMacro(MacroName, Twine(TI.getTypeWidth(Ty)));
}
@@ -205,6 +207,16 @@ static void DefineTypeSizeof(StringRef MacroName, unsigned BitWidth,
Twine(BitWidth / TI.getCharWidth()));
}
+// This will generate a macro based on the prefix with `_MAX__` as the suffix
+// for the max value representable for the type, and a macro with a `_WIDTH__`
+// suffix for the width of the type.
+static void DefineTypeSizeAndWidth(const Twine &Prefix, TargetInfo::IntType Ty,
+ const TargetInfo &TI,
+ MacroBuilder &Builder) {
+ DefineTypeSize(Prefix + "_MAX__", Ty, TI, Builder);
+ DefineTypeWidth(Prefix + "_WIDTH__", Ty, TI, Builder);
+}
+
static void DefineExactWidthIntType(TargetInfo::IntType Ty,
const TargetInfo &TI,
MacroBuilder &Builder) {
@@ -241,6 +253,8 @@ static void DefineExactWidthIntTypeSize(TargetInfo::IntType Ty,
if (TypeWidth == 64)
Ty = IsSigned ? TI.getInt64Type() : TI.getUInt64Type();
+ // We don't need to define a _WIDTH macro for the exact-width types because
+ // we already know the width.
const char *Prefix = IsSigned ? "__INT" : "__UINT";
DefineTypeSize(Prefix + Twine(TypeWidth) + "_MAX__", Ty, TI, Builder);
}
@@ -254,7 +268,12 @@ static void DefineLeastWidthIntType(unsigned TypeWidth, bool IsSigned,
const char *Prefix = IsSigned ? "__INT_LEAST" : "__UINT_LEAST";
DefineType(Prefix + Twine(TypeWidth) + "_TYPE__", Ty, Builder);
- DefineTypeSize(Prefix + Twine(TypeWidth) + "_MAX__", Ty, TI, Builder);
+ // We only want the *_WIDTH macro for the signed types to avoid too many
+ // predefined macros (the unsigned width and the signed width are identical.)
+ if (IsSigned)
+ DefineTypeSizeAndWidth(Prefix + Twine(TypeWidth), Ty, TI, Builder);
+ else
+ DefineTypeSize(Prefix + Twine(TypeWidth) + "_MAX__", Ty, TI, Builder);
DefineFmt(Prefix + Twine(TypeWidth), Ty, TI, Builder);
}
@@ -268,20 +287,24 @@ static void DefineFastIntType(unsigned TypeWidth, bool IsSigned,
const char *Prefix = IsSigned ? "__INT_FAST" : "__UINT_FAST";
DefineType(Prefix + Twine(TypeWidth) + "_TYPE__", Ty, Builder);
- DefineTypeSize(Prefix + Twine(TypeWidth) + "_MAX__", Ty, TI, Builder);
-
+ // We only want the *_WIDTH macro for the signed types to avoid too many
+ // predefined macros (the unsigned width and the signed width are identical.)
+ if (IsSigned)
+ DefineTypeSizeAndWidth(Prefix + Twine(TypeWidth), Ty, TI, Builder);
+ else
+ DefineTypeSize(Prefix + Twine(TypeWidth) + "_MAX__", Ty, TI, Builder);
DefineFmt(Prefix + Twine(TypeWidth), Ty, TI, Builder);
}
/// Get the value the ATOMIC_*_LOCK_FREE macro should have for a type with
/// the specified properties.
-static const char *getLockFreeValue(unsigned TypeWidth, unsigned TypeAlign,
- unsigned InlineWidth) {
+static const char *getLockFreeValue(unsigned TypeWidth, const TargetInfo &TI) {
// Fully-aligned, power-of-2 sizes no larger than the inline
// width will be inlined as lock-free operations.
- if (TypeWidth == TypeAlign && (TypeWidth & (TypeWidth - 1)) == 0 &&
- TypeWidth <= InlineWidth)
+ // Note: we do not need to check alignment since _Atomic(T) is always
+ // appropriately-aligned in clang.
+ if (TI.hasBuiltinAtomic(TypeWidth, TypeWidth))
return "2"; // "always lock free"
// We cannot be certain what operations the lib calls might be
// able to implement as lock-free on future processors.
@@ -349,6 +372,50 @@ static void InitializeStandardPredefinedMacros(const TargetInfo &TI,
const LangOptions &LangOpts,
const FrontendOptions &FEOpts,
MacroBuilder &Builder) {
+ if (LangOpts.HLSL) {
+ Builder.defineMacro("__hlsl_clang");
+ // HLSL Version
+ Builder.defineMacro("__HLSL_VERSION",
+ Twine((unsigned)LangOpts.getHLSLVersion()));
+
+ if (LangOpts.NativeHalfType)
+ Builder.defineMacro("__HLSL_ENABLE_16_BIT",
+ Twine((unsigned)LangOpts.getHLSLVersion()));
+
+ // Shader target information
+ // "enums" for shader stages
+ Builder.defineMacro("__SHADER_STAGE_VERTEX",
+ Twine((uint32_t)ShaderStage::Vertex));
+ Builder.defineMacro("__SHADER_STAGE_PIXEL",
+ Twine((uint32_t)ShaderStage::Pixel));
+ Builder.defineMacro("__SHADER_STAGE_GEOMETRY",
+ Twine((uint32_t)ShaderStage::Geometry));
+ Builder.defineMacro("__SHADER_STAGE_HULL",
+ Twine((uint32_t)ShaderStage::Hull));
+ Builder.defineMacro("__SHADER_STAGE_DOMAIN",
+ Twine((uint32_t)ShaderStage::Domain));
+ Builder.defineMacro("__SHADER_STAGE_COMPUTE",
+ Twine((uint32_t)ShaderStage::Compute));
+ Builder.defineMacro("__SHADER_STAGE_AMPLIFICATION",
+ Twine((uint32_t)ShaderStage::Amplification));
+ Builder.defineMacro("__SHADER_STAGE_MESH",
+ Twine((uint32_t)ShaderStage::Mesh));
+ Builder.defineMacro("__SHADER_STAGE_LIBRARY",
+ Twine((uint32_t)ShaderStage::Library));
+ // The current shader stage itself
+ uint32_t StageInteger = static_cast<uint32_t>(
+ hlsl::getStageFromEnvironment(TI.getTriple().getEnvironment()));
+
+ Builder.defineMacro("__SHADER_TARGET_STAGE", Twine(StageInteger));
+ // Add target versions
+ if (TI.getTriple().getOS() == llvm::Triple::ShaderModel) {
+ VersionTuple Version = TI.getTriple().getOSVersion();
+ Builder.defineMacro("__SHADER_TARGET_MAJOR", Twine(Version.getMajor()));
+ unsigned Minor = Version.getMinor().value_or(0);
+ Builder.defineMacro("__SHADER_TARGET_MINOR", Twine(Minor));
+ }
+ return;
+ }
// C++ [cpp.predefined]p1:
// The following macro names shall be defined by the implementation:
@@ -371,7 +438,9 @@ static void InitializeStandardPredefinedMacros(const TargetInfo &TI,
// value is, are implementation-defined.
// (Removed in C++20.)
if (!LangOpts.CPlusPlus) {
- if (LangOpts.C17)
+ if (LangOpts.C23)
+ Builder.defineMacro("__STDC_VERSION__", "202311L");
+ else if (LangOpts.C17)
Builder.defineMacro("__STDC_VERSION__", "201710L");
else if (LangOpts.C11)
Builder.defineMacro("__STDC_VERSION__", "201112L");
@@ -381,9 +450,11 @@ static void InitializeStandardPredefinedMacros(const TargetInfo &TI,
Builder.defineMacro("__STDC_VERSION__", "199409L");
} else {
// -- __cplusplus
- // FIXME: Use correct value for C++23.
- if (LangOpts.CPlusPlus2b)
- Builder.defineMacro("__cplusplus", "202101L");
+ if (LangOpts.CPlusPlus26)
+ // FIXME: Use correct value for C++26.
+ Builder.defineMacro("__cplusplus", "202400L");
+ else if (LangOpts.CPlusPlus23)
+ Builder.defineMacro("__cplusplus", "202302L");
// [C++20] The integer literal 202002L.
else if (LangOpts.CPlusPlus20)
Builder.defineMacro("__cplusplus", "202002L");
@@ -433,11 +504,18 @@ static void InitializeStandardPredefinedMacros(const TargetInfo &TI,
// OpenCL v1.0/1.1 s6.9, v1.2/2.0 s6.10: Preprocessor Directives and Macros.
if (LangOpts.OpenCL) {
if (LangOpts.CPlusPlus) {
- if (LangOpts.OpenCLCPlusPlusVersion == 100)
+ switch (LangOpts.OpenCLCPlusPlusVersion) {
+ case 100:
Builder.defineMacro("__OPENCL_CPP_VERSION__", "100");
- else
+ break;
+ case 202100:
+ Builder.defineMacro("__OPENCL_CPP_VERSION__", "202100");
+ break;
+ default:
llvm_unreachable("Unsupported C++ version for OpenCL");
+ }
Builder.defineMacro("__CL_CPP_VERSION_1_0__", "100");
+ Builder.defineMacro("__CL_CPP_VERSION_2021__", "202100");
} else {
// OpenCL v1.0 and v1.1 do not have a predefined macro to indicate the
// language standard with which the program is compiled. __OPENCL_VERSION__
@@ -490,13 +568,53 @@ static void InitializeStandardPredefinedMacros(const TargetInfo &TI,
// Not "standard" per se, but available even with the -undef flag.
if (LangOpts.AsmPreprocessor)
Builder.defineMacro("__ASSEMBLER__");
- if (LangOpts.CUDA && !LangOpts.HIP)
- Builder.defineMacro("__CUDA__");
+ if (LangOpts.CUDA) {
+ if (LangOpts.GPURelocatableDeviceCode)
+ Builder.defineMacro("__CLANG_RDC__");
+ if (!LangOpts.HIP)
+ Builder.defineMacro("__CUDA__");
+ if (LangOpts.GPUDefaultStream ==
+ LangOptions::GPUDefaultStreamKind::PerThread)
+ Builder.defineMacro("CUDA_API_PER_THREAD_DEFAULT_STREAM");
+ }
if (LangOpts.HIP) {
Builder.defineMacro("__HIP__");
Builder.defineMacro("__HIPCC__");
- if (LangOpts.CUDAIsDevice)
+ Builder.defineMacro("__HIP_MEMORY_SCOPE_SINGLETHREAD", "1");
+ Builder.defineMacro("__HIP_MEMORY_SCOPE_WAVEFRONT", "2");
+ Builder.defineMacro("__HIP_MEMORY_SCOPE_WORKGROUP", "3");
+ Builder.defineMacro("__HIP_MEMORY_SCOPE_AGENT", "4");
+ Builder.defineMacro("__HIP_MEMORY_SCOPE_SYSTEM", "5");
+ if (LangOpts.HIPStdPar) {
+ Builder.defineMacro("__HIPSTDPAR__");
+ if (LangOpts.HIPStdParInterposeAlloc)
+ Builder.defineMacro("__HIPSTDPAR_INTERPOSE_ALLOC__");
+ }
+ if (LangOpts.CUDAIsDevice) {
Builder.defineMacro("__HIP_DEVICE_COMPILE__");
+ if (!TI.hasHIPImageSupport()) {
+ Builder.defineMacro("__HIP_NO_IMAGE_SUPPORT__", "1");
+ // Deprecated.
+ Builder.defineMacro("__HIP_NO_IMAGE_SUPPORT", "1");
+ }
+ }
+ if (LangOpts.GPUDefaultStream ==
+ LangOptions::GPUDefaultStreamKind::PerThread) {
+ Builder.defineMacro("__HIP_API_PER_THREAD_DEFAULT_STREAM__");
+ // Deprecated.
+ Builder.defineMacro("HIP_API_PER_THREAD_DEFAULT_STREAM");
+ }
+ }
+
+ if (LangOpts.OpenACC) {
+ // FIXME: When we have full support for OpenACC, we should set this to the
+ // version we support. Until then, set as '1' by default, but provide a
+ // temporary mechanism for users to override this so real-world examples can
+ // be tested against.
+ if (!LangOpts.OpenACCMacroOverride.empty())
+ Builder.defineMacro("_OPENACC", LangOpts.OpenACCMacroOverride);
+ else
+ Builder.defineMacro("_OPENACC", "1");
}
}
@@ -517,15 +635,19 @@ static void InitializeCPlusPlusFeatureTestMacros(const LangOptions &LangOpts,
Builder.defineMacro("__cpp_unicode_literals", "200710L");
Builder.defineMacro("__cpp_user_defined_literals", "200809L");
Builder.defineMacro("__cpp_lambdas", "200907L");
- Builder.defineMacro("__cpp_constexpr",
- LangOpts.CPlusPlus20 ? "201907L" :
- LangOpts.CPlusPlus17 ? "201603L" :
- LangOpts.CPlusPlus14 ? "201304L" : "200704");
+ Builder.defineMacro("__cpp_constexpr", LangOpts.CPlusPlus26 ? "202306L"
+ : LangOpts.CPlusPlus23 ? "202211L"
+ : LangOpts.CPlusPlus20 ? "201907L"
+ : LangOpts.CPlusPlus17 ? "201603L"
+ : LangOpts.CPlusPlus14 ? "201304L"
+ : "200704");
Builder.defineMacro("__cpp_constexpr_in_decltype", "201711L");
Builder.defineMacro("__cpp_range_based_for",
LangOpts.CPlusPlus17 ? "201603L" : "200907");
- Builder.defineMacro("__cpp_static_assert",
- LangOpts.CPlusPlus17 ? "201411L" : "200410");
+ Builder.defineMacro("__cpp_static_assert", LangOpts.CPlusPlus26 ? "202306L"
+ : LangOpts.CPlusPlus17
+ ? "201411L"
+ : "200410");
Builder.defineMacro("__cpp_decltype", "200707L");
Builder.defineMacro("__cpp_attributes", "200809L");
Builder.defineMacro("__cpp_rvalue_references", "200610L");
@@ -584,30 +706,41 @@ static void InitializeCPlusPlusFeatureTestMacros(const LangOptions &LangOpts,
// C++20 features.
if (LangOpts.CPlusPlus20) {
- //Builder.defineMacro("__cpp_aggregate_paren_init", "201902L");
+ Builder.defineMacro("__cpp_aggregate_paren_init", "201902L");
+
+ // P0848 is implemented, but we're still waiting for other concepts
+ // issues to be addressed before bumping __cpp_concepts up to 202002L.
+ // Refer to the discussion of this at https://reviews.llvm.org/D128619.
Builder.defineMacro("__cpp_concepts", "201907L");
Builder.defineMacro("__cpp_conditional_explicit", "201806L");
- //Builder.defineMacro("__cpp_consteval", "201811L");
+ Builder.defineMacro("__cpp_consteval", "202211L");
Builder.defineMacro("__cpp_constexpr_dynamic_alloc", "201907L");
Builder.defineMacro("__cpp_constinit", "201907L");
- //Builder.defineMacro("__cpp_coroutines", "201902L");
+ Builder.defineMacro("__cpp_impl_coroutine", "201902L");
Builder.defineMacro("__cpp_designated_initializers", "201707L");
Builder.defineMacro("__cpp_impl_three_way_comparison", "201907L");
//Builder.defineMacro("__cpp_modules", "201907L");
Builder.defineMacro("__cpp_using_enum", "201907L");
}
- // C++2b features.
- if (LangOpts.CPlusPlus2b) {
+ // C++23 features.
+ if (LangOpts.CPlusPlus23) {
Builder.defineMacro("__cpp_implicit_move", "202011L");
Builder.defineMacro("__cpp_size_t_suffix", "202011L");
+ Builder.defineMacro("__cpp_if_consteval", "202106L");
+ Builder.defineMacro("__cpp_multidimensional_subscript", "202211L");
+ Builder.defineMacro("__cpp_auto_cast", "202110L");
}
+
+ // We provide those C++23 features as extensions in earlier language modes, so
+ // we also define their feature test macros.
+ if (LangOpts.CPlusPlus11)
+ Builder.defineMacro("__cpp_static_call_operator", "202207L");
+ Builder.defineMacro("__cpp_named_character_escapes", "202207L");
+ Builder.defineMacro("__cpp_placeholder_variables", "202306L");
+
if (LangOpts.Char8)
- Builder.defineMacro("__cpp_char8_t", "201811L");
+ Builder.defineMacro("__cpp_char8_t", "202207L");
Builder.defineMacro("__cpp_impl_destroying_delete", "201806L");
-
- // TS features.
- if (LangOpts.Coroutines)
- Builder.defineMacro("__cpp_coroutines", "201703L");
}
/// InitializeOpenCLFeatureTestMacros - Define OpenCL macros based on target
@@ -677,6 +810,13 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
Builder.defineMacro("__ATOMIC_ACQ_REL", "4");
Builder.defineMacro("__ATOMIC_SEQ_CST", "5");
+ // Define macros for the clang atomic scopes.
+ Builder.defineMacro("__MEMORY_SCOPE_SYSTEM", "0");
+ Builder.defineMacro("__MEMORY_SCOPE_DEVICE", "1");
+ Builder.defineMacro("__MEMORY_SCOPE_WRKGRP", "2");
+ Builder.defineMacro("__MEMORY_SCOPE_WVFRNT", "3");
+ Builder.defineMacro("__MEMORY_SCOPE_SINGLE", "4");
+
// Define macros for the OpenCL memory scope.
// The values should match AtomicScopeOpenCLModel::ID enum.
static_assert(
@@ -691,6 +831,18 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
Builder.defineMacro("__OPENCL_MEMORY_SCOPE_ALL_SVM_DEVICES", "3");
Builder.defineMacro("__OPENCL_MEMORY_SCOPE_SUB_GROUP", "4");
+ // Define macros for floating-point data classes, used in __builtin_isfpclass.
+ Builder.defineMacro("__FPCLASS_SNAN", "0x0001");
+ Builder.defineMacro("__FPCLASS_QNAN", "0x0002");
+ Builder.defineMacro("__FPCLASS_NEGINF", "0x0004");
+ Builder.defineMacro("__FPCLASS_NEGNORMAL", "0x0008");
+ Builder.defineMacro("__FPCLASS_NEGSUBNORMAL", "0x0010");
+ Builder.defineMacro("__FPCLASS_NEGZERO", "0x0020");
+ Builder.defineMacro("__FPCLASS_POSZERO", "0x0040");
+ Builder.defineMacro("__FPCLASS_POSSUBNORMAL", "0x0080");
+ Builder.defineMacro("__FPCLASS_POSNORMAL", "0x0100");
+ Builder.defineMacro("__FPCLASS_POSINF", "0x0200");
+
// Support for #pragma redefine_extname (Sun compatibility)
Builder.defineMacro("__PRAGMA_REDEFINE_EXTNAME", "1");
@@ -731,21 +883,15 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
if (version >= VersionTuple(2, 0))
Builder.defineMacro("__OBJC_GNUSTEP_RUNTIME_ABI__", "20");
else
- Builder.defineMacro("__OBJC_GNUSTEP_RUNTIME_ABI__",
- "1" + Twine(std::min(8U, version.getMinor().getValueOr(0))));
+ Builder.defineMacro(
+ "__OBJC_GNUSTEP_RUNTIME_ABI__",
+ "1" + Twine(std::min(8U, version.getMinor().value_or(0))));
}
if (LangOpts.ObjCRuntime.getKind() == ObjCRuntime::ObjFW) {
VersionTuple tuple = LangOpts.ObjCRuntime.getVersion();
-
- unsigned minor = 0;
- if (tuple.getMinor().hasValue())
- minor = tuple.getMinor().getValue();
-
- unsigned subminor = 0;
- if (tuple.getSubminor().hasValue())
- subminor = tuple.getSubminor().getValue();
-
+ unsigned minor = tuple.getMinor().value_or(0);
+ unsigned subminor = tuple.getSubminor().value_or(0);
Builder.defineMacro("__OBJFW_RUNTIME_ABI__",
Twine(tuple.getMajor() * 10000 + minor * 100 +
subminor));
@@ -851,14 +997,14 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
Builder.defineMacro("__LITTLE_ENDIAN__");
}
- if (TI.getPointerWidth(0) == 64 && TI.getLongWidth() == 64
- && TI.getIntWidth() == 32) {
+ if (TI.getPointerWidth(LangAS::Default) == 64 && TI.getLongWidth() == 64 &&
+ TI.getIntWidth() == 32) {
Builder.defineMacro("_LP64");
Builder.defineMacro("__LP64__");
}
- if (TI.getPointerWidth(0) == 32 && TI.getLongWidth() == 32
- && TI.getIntWidth() == 32) {
+ if (TI.getPointerWidth(LangAS::Default) == 32 && TI.getLongWidth() == 32 &&
+ TI.getIntWidth() == 32) {
Builder.defineMacro("_ILP32");
Builder.defineMacro("__ILP32__");
}
@@ -867,20 +1013,34 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
assert(TI.getCharWidth() == 8 && "Only support 8-bit char so far");
Builder.defineMacro("__CHAR_BIT__", Twine(TI.getCharWidth()));
+ Builder.defineMacro("__BOOL_WIDTH__", Twine(TI.getBoolWidth()));
+ Builder.defineMacro("__SHRT_WIDTH__", Twine(TI.getShortWidth()));
+ Builder.defineMacro("__INT_WIDTH__", Twine(TI.getIntWidth()));
+ Builder.defineMacro("__LONG_WIDTH__", Twine(TI.getLongWidth()));
+ Builder.defineMacro("__LLONG_WIDTH__", Twine(TI.getLongLongWidth()));
+
+ size_t BitIntMaxWidth = TI.getMaxBitIntWidth();
+ assert(BitIntMaxWidth <= llvm::IntegerType::MAX_INT_BITS &&
+ "Target defined a max bit width larger than LLVM can support!");
+ assert(BitIntMaxWidth >= TI.getLongLongWidth() &&
+ "Target defined a max bit width smaller than the C standard allows!");
+ Builder.defineMacro("__BITINT_MAXWIDTH__", Twine(BitIntMaxWidth));
+
DefineTypeSize("__SCHAR_MAX__", TargetInfo::SignedChar, TI, Builder);
DefineTypeSize("__SHRT_MAX__", TargetInfo::SignedShort, TI, Builder);
DefineTypeSize("__INT_MAX__", TargetInfo::SignedInt, TI, Builder);
DefineTypeSize("__LONG_MAX__", TargetInfo::SignedLong, TI, Builder);
DefineTypeSize("__LONG_LONG_MAX__", TargetInfo::SignedLongLong, TI, Builder);
- DefineTypeSize("__WCHAR_MAX__", TI.getWCharType(), TI, Builder);
- DefineTypeSize("__WINT_MAX__", TI.getWIntType(), TI, Builder);
- DefineTypeSize("__INTMAX_MAX__", TI.getIntMaxType(), TI, Builder);
- DefineTypeSize("__SIZE_MAX__", TI.getSizeType(), TI, Builder);
+ DefineTypeSizeAndWidth("__WCHAR", TI.getWCharType(), TI, Builder);
+ DefineTypeSizeAndWidth("__WINT", TI.getWIntType(), TI, Builder);
+ DefineTypeSizeAndWidth("__INTMAX", TI.getIntMaxType(), TI, Builder);
+ DefineTypeSizeAndWidth("__SIZE", TI.getSizeType(), TI, Builder);
- DefineTypeSize("__UINTMAX_MAX__", TI.getUIntMaxType(), TI, Builder);
- DefineTypeSize("__PTRDIFF_MAX__", TI.getPtrDiffType(0), TI, Builder);
- DefineTypeSize("__INTPTR_MAX__", TI.getIntPtrType(), TI, Builder);
- DefineTypeSize("__UINTPTR_MAX__", TI.getUIntPtrType(), TI, Builder);
+ DefineTypeSizeAndWidth("__UINTMAX", TI.getUIntMaxType(), TI, Builder);
+ DefineTypeSizeAndWidth("__PTRDIFF", TI.getPtrDiffType(LangAS::Default), TI,
+ Builder);
+ DefineTypeSizeAndWidth("__INTPTR", TI.getIntPtrType(), TI, Builder);
+ DefineTypeSizeAndWidth("__UINTPTR", TI.getUIntPtrType(), TI, Builder);
DefineTypeSizeof("__SIZEOF_DOUBLE__", TI.getDoubleWidth(), TI, Builder);
DefineTypeSizeof("__SIZEOF_FLOAT__", TI.getFloatWidth(), TI, Builder);
@@ -888,10 +1048,12 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
DefineTypeSizeof("__SIZEOF_LONG__", TI.getLongWidth(), TI, Builder);
DefineTypeSizeof("__SIZEOF_LONG_DOUBLE__",TI.getLongDoubleWidth(),TI,Builder);
DefineTypeSizeof("__SIZEOF_LONG_LONG__", TI.getLongLongWidth(), TI, Builder);
- DefineTypeSizeof("__SIZEOF_POINTER__", TI.getPointerWidth(0), TI, Builder);
+ DefineTypeSizeof("__SIZEOF_POINTER__", TI.getPointerWidth(LangAS::Default),
+ TI, Builder);
DefineTypeSizeof("__SIZEOF_SHORT__", TI.getShortWidth(), TI, Builder);
DefineTypeSizeof("__SIZEOF_PTRDIFF_T__",
- TI.getTypeWidth(TI.getPtrDiffType(0)), TI, Builder);
+ TI.getTypeWidth(TI.getPtrDiffType(LangAS::Default)), TI,
+ Builder);
DefineTypeSizeof("__SIZEOF_SIZE_T__",
TI.getTypeWidth(TI.getSizeType()), TI, Builder);
DefineTypeSizeof("__SIZEOF_WCHAR_T__",
@@ -909,29 +1071,29 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
DefineFmt("__UINTMAX", TI.getUIntMaxType(), TI, Builder);
Builder.defineMacro("__UINTMAX_C_SUFFIX__",
TI.getTypeConstantSuffix(TI.getUIntMaxType()));
- DefineTypeWidth("__INTMAX_WIDTH__", TI.getIntMaxType(), TI, Builder);
- DefineType("__PTRDIFF_TYPE__", TI.getPtrDiffType(0), Builder);
- DefineFmt("__PTRDIFF", TI.getPtrDiffType(0), TI, Builder);
- DefineTypeWidth("__PTRDIFF_WIDTH__", TI.getPtrDiffType(0), TI, Builder);
+ DefineType("__PTRDIFF_TYPE__", TI.getPtrDiffType(LangAS::Default), Builder);
+ DefineFmt("__PTRDIFF", TI.getPtrDiffType(LangAS::Default), TI, Builder);
DefineType("__INTPTR_TYPE__", TI.getIntPtrType(), Builder);
DefineFmt("__INTPTR", TI.getIntPtrType(), TI, Builder);
- DefineTypeWidth("__INTPTR_WIDTH__", TI.getIntPtrType(), TI, Builder);
DefineType("__SIZE_TYPE__", TI.getSizeType(), Builder);
DefineFmt("__SIZE", TI.getSizeType(), TI, Builder);
- DefineTypeWidth("__SIZE_WIDTH__", TI.getSizeType(), TI, Builder);
DefineType("__WCHAR_TYPE__", TI.getWCharType(), Builder);
- DefineTypeWidth("__WCHAR_WIDTH__", TI.getWCharType(), TI, Builder);
DefineType("__WINT_TYPE__", TI.getWIntType(), Builder);
- DefineTypeWidth("__WINT_WIDTH__", TI.getWIntType(), TI, Builder);
- DefineTypeWidth("__SIG_ATOMIC_WIDTH__", TI.getSigAtomicType(), TI, Builder);
- DefineTypeSize("__SIG_ATOMIC_MAX__", TI.getSigAtomicType(), TI, Builder);
+ DefineTypeSizeAndWidth("__SIG_ATOMIC", TI.getSigAtomicType(), TI, Builder);
DefineType("__CHAR16_TYPE__", TI.getChar16Type(), Builder);
DefineType("__CHAR32_TYPE__", TI.getChar32Type(), Builder);
- DefineTypeWidth("__UINTMAX_WIDTH__", TI.getUIntMaxType(), TI, Builder);
DefineType("__UINTPTR_TYPE__", TI.getUIntPtrType(), Builder);
DefineFmt("__UINTPTR", TI.getUIntPtrType(), TI, Builder);
- DefineTypeWidth("__UINTPTR_WIDTH__", TI.getUIntPtrType(), TI, Builder);
+
+ // The C standard requires the width of uintptr_t and intptr_t to be the same,
+ // per 7.20.2.4p1. Same for intmax_t and uintmax_t, per 7.20.2.5p1.
+ assert(TI.getTypeWidth(TI.getUIntPtrType()) ==
+ TI.getTypeWidth(TI.getIntPtrType()) &&
+ "uintptr_t and intptr_t have different widths?");
+ assert(TI.getTypeWidth(TI.getUIntMaxType()) ==
+ TI.getTypeWidth(TI.getIntMaxType()) &&
+ "uintmax_t and intmax_t have different widths?");
if (TI.hasFloat16Type())
DefineFloatMacros(Builder, "FLT16", &TI.getHalfFormat(), "F16");
@@ -941,7 +1103,7 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
// Define a __POINTER_WIDTH__ macro for stdint.h.
Builder.defineMacro("__POINTER_WIDTH__",
- Twine((int)TI.getPointerWidth(0)));
+ Twine((int)TI.getPointerWidth(LangAS::Default)));
// Define __BIGGEST_ALIGNMENT__ to be compatible with gcc.
Builder.defineMacro("__BIGGEST_ALIGNMENT__",
@@ -1019,6 +1181,9 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
Builder.defineMacro("__USER_LABEL_PREFIX__", TI.getUserLabelPrefix());
+ if (!LangOpts.MathErrno)
+ Builder.defineMacro("__NO_MATH_ERRNO__");
+
if (LangOpts.FastMath || LangOpts.FiniteMathOnly)
Builder.defineMacro("__FINITE_MATH_ONLY__", "1");
else
@@ -1037,12 +1202,9 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
auto addLockFreeMacros = [&](const llvm::Twine &Prefix) {
// Used by libc++ and libstdc++ to implement ATOMIC_<foo>_LOCK_FREE.
- unsigned InlineWidthBits = TI.getMaxAtomicInlineWidth();
#define DEFINE_LOCK_FREE_MACRO(TYPE, Type) \
Builder.defineMacro(Prefix + #TYPE "_LOCK_FREE", \
- getLockFreeValue(TI.get##Type##Width(), \
- TI.get##Type##Align(), \
- InlineWidthBits));
+ getLockFreeValue(TI.get##Type##Width(), TI));
DEFINE_LOCK_FREE_MACRO(BOOL, Bool);
DEFINE_LOCK_FREE_MACRO(CHAR, Char);
if (LangOpts.Char8)
@@ -1054,10 +1216,9 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
DEFINE_LOCK_FREE_MACRO(INT, Int);
DEFINE_LOCK_FREE_MACRO(LONG, Long);
DEFINE_LOCK_FREE_MACRO(LLONG, LongLong);
- Builder.defineMacro(Prefix + "POINTER_LOCK_FREE",
- getLockFreeValue(TI.getPointerWidth(0),
- TI.getPointerAlign(0),
- InlineWidthBits));
+ Builder.defineMacro(
+ Prefix + "POINTER_LOCK_FREE",
+ getLockFreeValue(TI.getPointerWidth(LangAS::Default), TI));
#undef DEFINE_LOCK_FREE_MACRO
};
addLockFreeMacros("__CLANG_ATOMIC_");
@@ -1077,7 +1238,6 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
}
// Macros to control C99 numerics and <float.h>
- Builder.defineMacro("__FLT_EVAL_METHOD__", Twine(TI.getFloatEvalMethod()));
Builder.defineMacro("__FLT_RADIX__", "2");
Builder.defineMacro("__DECIMAL_DIG__", "__LDBL_DECIMAL_DIG__");
@@ -1141,10 +1301,16 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
case 45:
Builder.defineMacro("_OPENMP", "201511");
break;
- default:
- // Default version is OpenMP 5.0
+ case 50:
Builder.defineMacro("_OPENMP", "201811");
break;
+ case 52:
+ Builder.defineMacro("_OPENMP", "202111");
+ break;
+ default: // case 51:
+ // Default version is OpenMP 5.1
+ Builder.defineMacro("_OPENMP", "202011");
+ break;
}
}
@@ -1155,11 +1321,10 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
Builder.defineMacro("__CUDA_ARCH__");
}
- // We need to communicate this to our CUDA header wrapper, which in turn
- // informs the proper CUDA headers of this choice.
- if (LangOpts.CUDADeviceApproxTranscendentals || LangOpts.FastMath) {
- Builder.defineMacro("__CLANG_CUDA_APPROX_TRANSCENDENTALS__");
- }
+ // We need to communicate this to our CUDA/HIP header wrapper, which in turn
+ // informs the proper CUDA/HIP headers of this choice.
+ if (LangOpts.GPUDeviceApproxTranscendentals)
+ Builder.defineMacro("__CLANG_GPU_APPROX_TRANSCENDENTALS__");
// Define a macro indicating that the source file is being compiled with a
// SYCL device compiler which doesn't produce host binary.
@@ -1171,7 +1336,7 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
if (LangOpts.OpenCL) {
InitializeOpenCLFeatureTestMacros(TI, LangOpts, Builder);
- if (TI.getTriple().isSPIR())
+ if (TI.getTriple().isSPIR() || TI.getTriple().isSPIRV())
Builder.defineMacro("__IMAGE_SUPPORT__");
}
@@ -1183,34 +1348,56 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
Builder.defineMacro("__GLIBCXX_BITSIZE_INT_N_0", "128");
}
+ // ELF targets define __ELF__
+ if (TI.getTriple().isOSBinFormatELF())
+ Builder.defineMacro("__ELF__");
+
+ // Target OS macro definitions.
+ if (PPOpts.DefineTargetOSMacros) {
+ const llvm::Triple &Triple = TI.getTriple();
+#define TARGET_OS(Name, Predicate) \
+ Builder.defineMacro(#Name, (Predicate) ? "1" : "0");
+#include "clang/Basic/TargetOSMacros.def"
+#undef TARGET_OS
+ }
+
// Get other target #defines.
TI.getTargetDefines(LangOpts, Builder);
}
+static void InitializePGOProfileMacros(const CodeGenOptions &CodeGenOpts,
+ MacroBuilder &Builder) {
+ if (CodeGenOpts.hasProfileInstr())
+ Builder.defineMacro("__LLVM_INSTR_PROFILE_GENERATE");
+
+ if (CodeGenOpts.hasProfileIRUse() || CodeGenOpts.hasProfileClangUse())
+ Builder.defineMacro("__LLVM_INSTR_PROFILE_USE");
+}
+
/// InitializePreprocessor - Initialize the preprocessor getting it and the
-/// environment ready to process a single file. This returns true on error.
-///
-void clang::InitializePreprocessor(
- Preprocessor &PP, const PreprocessorOptions &InitOpts,
- const PCHContainerReader &PCHContainerRdr,
- const FrontendOptions &FEOpts) {
+/// environment ready to process a single file.
+void clang::InitializePreprocessor(Preprocessor &PP,
+ const PreprocessorOptions &InitOpts,
+ const PCHContainerReader &PCHContainerRdr,
+ const FrontendOptions &FEOpts,
+ const CodeGenOptions &CodeGenOpts) {
const LangOptions &LangOpts = PP.getLangOpts();
std::string PredefineBuffer;
PredefineBuffer.reserve(4080);
llvm::raw_string_ostream Predefines(PredefineBuffer);
MacroBuilder Builder(Predefines);
- // Emit line markers for various builtin sections of the file. We don't do
- // this in asm preprocessor mode, because "# 4" is not a line marker directive
- // in this mode.
- if (!PP.getLangOpts().AsmPreprocessor)
- Builder.append("# 1 \"<built-in>\" 3");
+ // Emit line markers for various builtin sections of the file. The 3 here
+ // marks <built-in> as being a system header, which suppresses warnings when
+ // the same macro is defined multiple times.
+ Builder.append("# 1 \"<built-in>\" 3");
// Install things like __POWERPC__, __GNUC__, etc into the macro table.
if (InitOpts.UsePredefines) {
// FIXME: This will create multiple definitions for most of the predefined
// macros. This is not the right way to handle this.
- if ((LangOpts.CUDA || LangOpts.OpenMPIsDevice || LangOpts.SYCLIsDevice) &&
+ if ((LangOpts.CUDA || LangOpts.OpenMPIsTargetDevice ||
+ LangOpts.SYCLIsDevice) &&
PP.getAuxTargetInfo())
InitializePredefinedMacros(*PP.getAuxTargetInfo(), LangOpts, FEOpts,
PP.getPreprocessorOpts(), Builder);
@@ -1240,10 +1427,14 @@ void clang::InitializePreprocessor(
InitializeStandardPredefinedMacros(PP.getTargetInfo(), PP.getLangOpts(),
FEOpts, Builder);
+ // The PGO instrumentation profile macros are driven by options
+ // -fprofile[-instr]-generate/-fcs-profile-generate/-fprofile[-instr]-use,
+ // hence they are not guarded by InitOpts.UsePredefines.
+ InitializePGOProfileMacros(CodeGenOpts, Builder);
+
// Add on the predefines from the driver. Wrap in a #line directive to report
// that they come from the command line.
- if (!PP.getLangOpts().AsmPreprocessor)
- Builder.append("# 1 \"<command line>\" 1");
+ Builder.append("# 1 \"<command line>\" 1");
// Process #define's and #undef's in the order they are given.
for (unsigned i = 0, e = InitOpts.Macros.size(); i != e; ++i) {
@@ -1255,8 +1446,7 @@ void clang::InitializePreprocessor(
}
// Exit the command line and go back to <built-in> (2 is LC_LEAVE).
- if (!PP.getLangOpts().AsmPreprocessor)
- Builder.append("# 1 \"<built-in>\" 2");
+ Builder.append("# 1 \"<built-in>\" 2");
// If -imacros are specified, include them now. These are processed before
// any -include directives.
@@ -1279,5 +1469,5 @@ void clang::InitializePreprocessor(
InitOpts.PrecompiledPreambleBytes.second);
// Copy PredefinedBuffer into the Preprocessor.
- PP.setPredefines(Predefines.str());
+ PP.setPredefines(std::move(PredefineBuffer));
}
diff --git a/contrib/llvm-project/clang/lib/Frontend/LayoutOverrideSource.cpp b/contrib/llvm-project/clang/lib/Frontend/LayoutOverrideSource.cpp
index 76762d58fe25..a1866ec09c9d 100644
--- a/contrib/llvm-project/clang/lib/Frontend/LayoutOverrideSource.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/LayoutOverrideSource.cpp
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Frontend/LayoutOverrideSource.h"
#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
#include "clang/Basic/CharInfo.h"
#include "llvm/Support/raw_ostream.h"
#include <fstream>
@@ -16,16 +17,28 @@ using namespace clang;
/// Parse a simple identifier.
static std::string parseName(StringRef S) {
- if (S.empty() || !isIdentifierHead(S[0]))
+ if (S.empty() || !isAsciiIdentifierStart(S[0]))
return "";
unsigned Offset = 1;
- while (Offset < S.size() && isIdentifierBody(S[Offset]))
+ while (Offset < S.size() && isAsciiIdentifierContinue(S[Offset]))
++Offset;
return S.substr(0, Offset).str();
}
+/// Parse an unsigned integer and move S to the next non-digit character.
+static bool parseUnsigned(StringRef &S, unsigned long long &ULL) {
+ if (S.empty() || !isDigit(S[0]))
+ return false;
+ unsigned Idx = 1;
+ while (Idx < S.size() && isDigit(S[Idx]))
+ ++Idx;
+ (void)S.substr(0, Idx).getAsInteger(10, ULL);
+ S = S.substr(Idx);
+ return true;
+}
+
LayoutOverrideSource::LayoutOverrideSource(StringRef Filename) {
std::ifstream Input(Filename.str().c_str());
if (!Input.is_open())
@@ -43,7 +56,7 @@ LayoutOverrideSource::LayoutOverrideSource(StringRef Filename) {
StringRef LineStr(Line);
// Determine whether the following line will start a
- if (LineStr.find("*** Dumping AST Record Layout") != StringRef::npos) {
+ if (LineStr.contains("*** Dumping AST Record Layout")) {
// Flush the last type/layout, if there is one.
if (!CurrentType.empty())
Layouts[CurrentType] = CurrentLayout;
@@ -80,8 +93,8 @@ LayoutOverrideSource::LayoutOverrideSource(StringRef Filename) {
LineStr = LineStr.substr(Pos + strlen(" Size:"));
unsigned long long Size = 0;
- (void)LineStr.getAsInteger(10, Size);
- CurrentLayout.Size = Size;
+ if (parseUnsigned(LineStr, Size))
+ CurrentLayout.Size = Size;
continue;
}
@@ -92,12 +105,13 @@ LayoutOverrideSource::LayoutOverrideSource(StringRef Filename) {
LineStr = LineStr.substr(Pos + strlen("Alignment:"));
unsigned long long Alignment = 0;
- (void)LineStr.getAsInteger(10, Alignment);
- CurrentLayout.Align = Alignment;
+ if (parseUnsigned(LineStr, Alignment))
+ CurrentLayout.Align = Alignment;
continue;
}
- // Check for the size/alignment of the type.
+ // Check for the size/alignment of the type. The number follows "size=" or
+ // "align=" indicates number of bytes.
Pos = LineStr.find("sizeof=");
if (Pos != StringRef::npos) {
/* Skip past the sizeof= prefix. */
@@ -105,8 +119,8 @@ LayoutOverrideSource::LayoutOverrideSource(StringRef Filename) {
// Parse size.
unsigned long long Size = 0;
- (void)LineStr.getAsInteger(10, Size);
- CurrentLayout.Size = Size;
+ if (parseUnsigned(LineStr, Size))
+ CurrentLayout.Size = Size * 8;
Pos = LineStr.find("align=");
if (Pos != StringRef::npos) {
@@ -115,8 +129,8 @@ LayoutOverrideSource::LayoutOverrideSource(StringRef Filename) {
// Parse alignment.
unsigned long long Alignment = 0;
- (void)LineStr.getAsInteger(10, Alignment);
- CurrentLayout.Align = Alignment;
+ if (parseUnsigned(LineStr, Alignment))
+ CurrentLayout.Align = Alignment * 8;
}
continue;
@@ -124,25 +138,48 @@ LayoutOverrideSource::LayoutOverrideSource(StringRef Filename) {
// Check for the field offsets of the type.
Pos = LineStr.find("FieldOffsets: [");
- if (Pos == StringRef::npos)
- continue;
+ if (Pos != StringRef::npos) {
+ LineStr = LineStr.substr(Pos + strlen("FieldOffsets: ["));
+ while (!LineStr.empty() && isDigit(LineStr[0])) {
+ unsigned long long Offset = 0;
+ if (parseUnsigned(LineStr, Offset))
+ CurrentLayout.FieldOffsets.push_back(Offset);
- LineStr = LineStr.substr(Pos + strlen("FieldOffsets: ["));
- while (!LineStr.empty() && isDigit(LineStr[0])) {
- // Parse this offset.
- unsigned Idx = 1;
- while (Idx < LineStr.size() && isDigit(LineStr[Idx]))
- ++Idx;
+ // Skip over this offset, the following comma, and any spaces.
+ LineStr = LineStr.substr(1);
+ LineStr = LineStr.drop_while(isWhitespace);
+ }
+ }
- unsigned long long Offset = 0;
- (void)LineStr.substr(0, Idx).getAsInteger(10, Offset);
+ // Check for the virtual base offsets.
+ Pos = LineStr.find("VBaseOffsets: [");
+ if (Pos != StringRef::npos) {
+ LineStr = LineStr.substr(Pos + strlen("VBaseOffsets: ["));
+ while (!LineStr.empty() && isDigit(LineStr[0])) {
+ unsigned long long Offset = 0;
+ if (parseUnsigned(LineStr, Offset))
+ CurrentLayout.VBaseOffsets.push_back(CharUnits::fromQuantity(Offset));
- CurrentLayout.FieldOffsets.push_back(Offset);
+ // Skip over this offset, the following comma, and any spaces.
+ LineStr = LineStr.substr(1);
+ LineStr = LineStr.drop_while(isWhitespace);
+ }
+ continue;
+ }
- // Skip over this offset, the following comma, and any spaces.
- LineStr = LineStr.substr(Idx + 1);
- while (!LineStr.empty() && isWhitespace(LineStr[0]))
+ // Check for the base offsets.
+ Pos = LineStr.find("BaseOffsets: [");
+ if (Pos != StringRef::npos) {
+ LineStr = LineStr.substr(Pos + strlen("BaseOffsets: ["));
+ while (!LineStr.empty() && isDigit(LineStr[0])) {
+ unsigned long long Offset = 0;
+ if (parseUnsigned(LineStr, Offset))
+ CurrentLayout.BaseOffsets.push_back(CharUnits::fromQuantity(Offset));
+
+ // Skip over this offset, the following comma, and any spaces.
LineStr = LineStr.substr(1);
+ LineStr = LineStr.drop_while(isWhitespace);
+ }
}
}
@@ -182,6 +219,24 @@ LayoutOverrideSource::layoutRecordType(const RecordDecl *Record,
if (NumFields != Known->second.FieldOffsets.size())
return false;
+ // Provide base offsets.
+ if (const auto *RD = dyn_cast<CXXRecordDecl>(Record)) {
+ unsigned NumNB = 0;
+ unsigned NumVB = 0;
+ for (const auto &I : RD->vbases()) {
+ if (NumVB >= Known->second.VBaseOffsets.size())
+ continue;
+ const CXXRecordDecl *VBase = I.getType()->getAsCXXRecordDecl();
+ VirtualBaseOffsets[VBase] = Known->second.VBaseOffsets[NumVB++];
+ }
+ for (const auto &I : RD->bases()) {
+ if (I.isVirtual() || NumNB >= Known->second.BaseOffsets.size())
+ continue;
+ const CXXRecordDecl *Base = I.getType()->getAsCXXRecordDecl();
+ BaseOffsets[Base] = Known->second.BaseOffsets[NumNB++];
+ }
+ }
+
Size = Known->second.Size;
Alignment = Known->second.Align;
return true;
diff --git a/contrib/llvm-project/clang/lib/Frontend/LogDiagnosticPrinter.cpp b/contrib/llvm-project/clang/lib/Frontend/LogDiagnosticPrinter.cpp
index df8b23691a7d..469d1c22633a 100644
--- a/contrib/llvm-project/clang/lib/Frontend/LogDiagnosticPrinter.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/LogDiagnosticPrinter.cpp
@@ -118,8 +118,7 @@ void LogDiagnosticPrinter::HandleDiagnostic(DiagnosticsEngine::Level Level,
const SourceManager &SM = Info.getSourceManager();
FileID FID = SM.getMainFileID();
if (FID.isValid()) {
- const FileEntry *FE = SM.getFileEntryForID(FID);
- if (FE && FE->isValid())
+ if (OptionalFileEntryRef FE = SM.getFileEntryRefForID(FID))
MainFilename = std::string(FE->getName());
}
}
@@ -135,7 +134,7 @@ void LogDiagnosticPrinter::HandleDiagnostic(DiagnosticsEngine::Level Level,
// Format the message.
SmallString<100> MessageStr;
Info.FormatDiagnostic(MessageStr);
- DE.Message = std::string(MessageStr.str());
+ DE.Message = std::string(MessageStr);
// Set the location information.
DE.Filename = "";
@@ -148,8 +147,7 @@ void LogDiagnosticPrinter::HandleDiagnostic(DiagnosticsEngine::Level Level,
// At least print the file name if available:
FileID FID = SM.getFileID(Info.getLocation());
if (FID.isValid()) {
- const FileEntry *FE = SM.getFileEntryForID(FID);
- if (FE && FE->isValid())
+ if (OptionalFileEntryRef FE = SM.getFileEntryRefForID(FID))
DE.Filename = std::string(FE->getName());
}
} else {
diff --git a/contrib/llvm-project/clang/lib/Frontend/ModuleDependencyCollector.cpp b/contrib/llvm-project/clang/lib/Frontend/ModuleDependencyCollector.cpp
index 4301e49f1d80..939e611e5489 100644
--- a/contrib/llvm-project/clang/lib/Frontend/ModuleDependencyCollector.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/ModuleDependencyCollector.cpp
@@ -26,13 +26,19 @@ namespace {
/// Private implementations for ModuleDependencyCollector
class ModuleDependencyListener : public ASTReaderListener {
ModuleDependencyCollector &Collector;
+ FileManager &FileMgr;
public:
- ModuleDependencyListener(ModuleDependencyCollector &Collector)
- : Collector(Collector) {}
+ ModuleDependencyListener(ModuleDependencyCollector &Collector,
+ FileManager &FileMgr)
+ : Collector(Collector), FileMgr(FileMgr) {}
bool needsInputFileVisitation() override { return true; }
bool needsSystemInputFileVisitation() override { return true; }
bool visitInputFile(StringRef Filename, bool IsSystem, bool IsOverridden,
bool IsExplicitModule) override {
+ // Run this through the FileManager in order to respect 'use-external-name'
+ // in case we have a VFS overlay.
+ if (auto FE = FileMgr.getOptionalFileRef(Filename))
+ Filename = FE->getName();
Collector.addFile(Filename);
return true;
}
@@ -47,9 +53,9 @@ struct ModuleDependencyPPCallbacks : public PPCallbacks {
void InclusionDirective(SourceLocation HashLoc, const Token &IncludeTok,
StringRef FileName, bool IsAngled,
- CharSourceRange FilenameRange, const FileEntry *File,
- StringRef SearchPath, StringRef RelativePath,
- const Module *Imported,
+ CharSourceRange FilenameRange,
+ OptionalFileEntryRef File, StringRef SearchPath,
+ StringRef RelativePath, const Module *Imported,
SrcMgr::CharacteristicKind FileType) override {
if (!File)
return;
@@ -66,40 +72,16 @@ struct ModuleDependencyMMCallbacks : public ModuleMapCallbacks {
if (llvm::sys::path::is_absolute(HeaderPath))
Collector.addFile(HeaderPath);
}
- void moduleMapAddUmbrellaHeader(FileManager *FileMgr,
- const FileEntry *Header) override {
- StringRef HeaderFilename = Header->getName();
- moduleMapAddHeader(HeaderFilename);
- // The FileManager can find and cache the symbolic link for a framework
- // header before its real path, this means a module can have some of its
- // headers to use other paths. Although this is usually not a problem, it's
- // inconsistent, and not collecting the original path header leads to
- // umbrella clashes while rebuilding modules in the crash reproducer. For
- // example:
- // ApplicationServices.framework/Frameworks/ImageIO.framework/ImageIO.h
- // instead of:
- // ImageIO.framework/ImageIO.h
- //
- // FIXME: this shouldn't be necessary once we have FileName instances
- // around instead of FileEntry ones. For now, make sure we collect all
- // that we need for the reproducer to work correctly.
- StringRef UmbreallDirFromHeader =
- llvm::sys::path::parent_path(HeaderFilename);
- StringRef UmbrellaDir = Header->getDir()->getName();
- if (!UmbrellaDir.equals(UmbreallDirFromHeader)) {
- SmallString<128> AltHeaderFilename;
- llvm::sys::path::append(AltHeaderFilename, UmbrellaDir,
- llvm::sys::path::filename(HeaderFilename));
- if (FileMgr->getFile(AltHeaderFilename))
- moduleMapAddHeader(AltHeaderFilename);
- }
+ void moduleMapAddUmbrellaHeader(FileEntryRef Header) override {
+ moduleMapAddHeader(Header.getNameAsRequested());
}
};
-}
+} // namespace
void ModuleDependencyCollector::attachToASTReader(ASTReader &R) {
- R.addListener(std::make_unique<ModuleDependencyListener>(*this));
+ R.addListener(
+ std::make_unique<ModuleDependencyListener>(*this, R.getFileManager()));
}
void ModuleDependencyCollector::attachToPreprocessor(Preprocessor &PP) {
diff --git a/contrib/llvm-project/clang/lib/Frontend/MultiplexConsumer.cpp b/contrib/llvm-project/clang/lib/Frontend/MultiplexConsumer.cpp
index 5abbb3a235b4..737877329c9c 100644
--- a/contrib/llvm-project/clang/lib/Frontend/MultiplexConsumer.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/MultiplexConsumer.cpp
@@ -73,6 +73,12 @@ void MultiplexASTDeserializationListener::ModuleRead(
Listener->ModuleRead(ID, Mod);
}
+void MultiplexASTDeserializationListener::ModuleImportRead(
+ serialization::SubmoduleID ID, SourceLocation ImportLoc) {
+ for (auto &Listener : Listeners)
+ Listener->ModuleImportRead(ID, ImportLoc);
+}
+
// This ASTMutationListener forwards its notifications to a set of
// child listeners.
class MultiplexASTMutationListener : public ASTMutationListener {
@@ -236,10 +242,10 @@ void MultiplexASTMutationListener::AddedAttributeToRecord(
MultiplexConsumer::MultiplexConsumer(
std::vector<std::unique_ptr<ASTConsumer>> C)
- : Consumers(std::move(C)), MutationListener(), DeserializationListener() {
+ : Consumers(std::move(C)) {
// Collect the mutation listeners and deserialization listeners of all
// children, and create a multiplex listener each if so.
- std::vector<ASTMutationListener*> mutationListeners;
+ std::vector<ASTMutationListener *> mutationListeners;
std::vector<ASTDeserializationListener*> serializationListeners;
for (auto &Consumer : Consumers) {
if (auto *mutationListener = Consumer->GetASTMutationListener())
diff --git a/contrib/llvm-project/clang/lib/Frontend/PrecompiledPreamble.cpp b/contrib/llvm-project/clang/lib/Frontend/PrecompiledPreamble.cpp
index af82ab3f5558..62373b23b82e 100644
--- a/contrib/llvm-project/clang/lib/Frontend/PrecompiledPreamble.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/PrecompiledPreamble.cpp
@@ -11,10 +11,8 @@
//===----------------------------------------------------------------------===//
#include "clang/Frontend/PrecompiledPreamble.h"
-#include "clang/AST/DeclObjC.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/LangStandard.h"
-#include "clang/Basic/TargetInfo.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/CompilerInvocation.h"
#include "clang/Frontend/FrontendActions.h"
@@ -25,7 +23,6 @@
#include "clang/Lex/PreprocessorOptions.h"
#include "clang/Serialization/ASTWriter.h"
#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Config/llvm-config.h"
@@ -99,13 +96,13 @@ public:
void InclusionDirective(SourceLocation HashLoc, const Token &IncludeTok,
StringRef FileName, bool IsAngled,
- CharSourceRange FilenameRange, const FileEntry *File,
- StringRef SearchPath, StringRef RelativePath,
- const Module *Imported,
+ CharSourceRange FilenameRange,
+ OptionalFileEntryRef File, StringRef SearchPath,
+ StringRef RelativePath, const Module *Imported,
SrcMgr::CharacteristicKind FileType) override {
- // File is null if it wasn't found.
+ // File is std::nullopt if it wasn't found.
// (We have some false negatives if PP recovered e.g. <foo> -> "foo")
- if (File != nullptr)
+ if (File)
return;
// If it's a rare absolute include, we know the full path already.
@@ -116,16 +113,16 @@ public:
// Reconstruct the filenames that would satisfy this directive...
llvm::SmallString<256> Buf;
- auto NotFoundRelativeTo = [&](const DirectoryEntry *DE) {
- Buf = DE->getName();
+ auto NotFoundRelativeTo = [&](DirectoryEntryRef DE) {
+ Buf = DE.getName();
llvm::sys::path::append(Buf, FileName);
llvm::sys::path::remove_dots(Buf, /*remove_dot_dot=*/true);
Out.insert(Buf);
};
// ...relative to the including file.
if (!IsAngled) {
- if (const FileEntry *IncludingFile =
- SM.getFileEntryForID(SM.getFileID(IncludeTok.getLocation())))
+ if (OptionalFileEntryRef IncludingFile =
+ SM.getFileEntryRefForID(SM.getFileID(IncludeTok.getLocation())))
if (IncludingFile->getDir())
NotFoundRelativeTo(IncludingFile->getDir());
}
@@ -135,7 +132,7 @@ public:
Search.search_dir_end())) {
// No support for frameworks or header maps yet.
if (Dir.isNormalDir())
- NotFoundRelativeTo(Dir.getDir());
+ NotFoundRelativeTo(*Dir.getDirRef());
}
}
};
@@ -192,11 +189,67 @@ void TemporaryFiles::removeFile(StringRef File) {
llvm::sys::fs::remove(File);
}
+// A temp file that would be deleted on destructor call. If destructor is not
+// called for any reason, the file will be deleted at static objects'
+// destruction.
+// An assertion will fire if two TempPCHFiles are created with the same name,
+// so it's not intended to be used outside preamble-handling.
+class TempPCHFile {
+public:
+ // A main method used to construct TempPCHFile.
+ static std::unique_ptr<TempPCHFile> create(StringRef StoragePath) {
+ // FIXME: This is a hack so that we can override the preamble file during
+ // crash-recovery testing, which is the only case where the preamble files
+ // are not necessarily cleaned up.
+ if (const char *TmpFile = ::getenv("CINDEXTEST_PREAMBLE_FILE"))
+ return std::unique_ptr<TempPCHFile>(new TempPCHFile(TmpFile));
+
+ llvm::SmallString<128> File;
+ // Using the versions of createTemporaryFile() and
+ // createUniqueFile() with a file descriptor guarantees
+ // that we would never get a race condition in a multi-threaded setting
+ // (i.e., multiple threads getting the same temporary path).
+ int FD;
+ std::error_code EC;
+ if (StoragePath.empty())
+ EC = llvm::sys::fs::createTemporaryFile("preamble", "pch", FD, File);
+ else {
+ llvm::SmallString<128> TempPath = StoragePath;
+ // Use the same filename model as fs::createTemporaryFile().
+ llvm::sys::path::append(TempPath, "preamble-%%%%%%.pch");
+ namespace fs = llvm::sys::fs;
+ // Use the same owner-only file permissions as fs::createTemporaryFile().
+ EC = fs::createUniqueFile(TempPath, FD, File, fs::OF_None,
+ fs::owner_read | fs::owner_write);
+ }
+ if (EC)
+ return nullptr;
+ // We only needed to make sure the file exists, close the file right away.
+ llvm::sys::Process::SafelyCloseFileDescriptor(FD);
+ return std::unique_ptr<TempPCHFile>(new TempPCHFile(File.str().str()));
+ }
+
+ TempPCHFile &operator=(const TempPCHFile &) = delete;
+ TempPCHFile(const TempPCHFile &) = delete;
+ ~TempPCHFile() { TemporaryFiles::getInstance().removeFile(FilePath); };
+
+ /// A path where temporary file is stored.
+ llvm::StringRef getFilePath() const { return FilePath; };
+
+private:
+ TempPCHFile(std::string FilePath) : FilePath(std::move(FilePath)) {
+ TemporaryFiles::getInstance().addFile(this->FilePath);
+ }
+
+ std::string FilePath;
+};
+
class PrecompilePreambleAction : public ASTFrontendAction {
public:
- PrecompilePreambleAction(std::string *InMemStorage,
+ PrecompilePreambleAction(std::shared_ptr<PCHBuffer> Buffer, bool WritePCHFile,
PreambleCallbacks &Callbacks)
- : InMemStorage(InMemStorage), Callbacks(Callbacks) {}
+ : Buffer(std::move(Buffer)), WritePCHFile(WritePCHFile),
+ Callbacks(Callbacks) {}
std::unique_ptr<ASTConsumer> CreateASTConsumer(CompilerInstance &CI,
StringRef InFile) override;
@@ -204,6 +257,12 @@ public:
bool hasEmittedPreamblePCH() const { return HasEmittedPreamblePCH; }
void setEmittedPreamblePCH(ASTWriter &Writer) {
+ if (FileOS) {
+ *FileOS << Buffer->Data;
+ // Make sure it hits disk now.
+ FileOS.reset();
+ }
+
this->HasEmittedPreamblePCH = true;
Callbacks.AfterPCHEmitted(Writer);
}
@@ -222,7 +281,9 @@ private:
friend class PrecompilePreambleConsumer;
bool HasEmittedPreamblePCH = false;
- std::string *InMemStorage;
+ std::shared_ptr<PCHBuffer> Buffer;
+ bool WritePCHFile; // otherwise the PCH is written into the PCHBuffer only.
+ std::unique_ptr<llvm::raw_pwrite_stream> FileOS; // null if in-memory
PreambleCallbacks &Callbacks;
};
@@ -232,12 +293,11 @@ public:
const Preprocessor &PP,
InMemoryModuleCache &ModuleCache,
StringRef isysroot,
- std::unique_ptr<raw_ostream> Out)
- : PCHGenerator(PP, ModuleCache, "", isysroot,
- std::make_shared<PCHBuffer>(),
+ std::shared_ptr<PCHBuffer> Buffer)
+ : PCHGenerator(PP, ModuleCache, "", isysroot, std::move(Buffer),
ArrayRef<std::shared_ptr<ModuleFileExtension>>(),
/*AllowASTWithErrors=*/true),
- Action(Action), Out(std::move(Out)) {}
+ Action(Action) {}
bool HandleTopLevelDecl(DeclGroupRef DG) override {
Action.Callbacks.HandleTopLevelDecl(DG);
@@ -248,15 +308,6 @@ public:
PCHGenerator::HandleTranslationUnit(Ctx);
if (!hasEmittedPCH())
return;
-
- // Write the generated bitstream to "Out".
- *Out << getPCH();
- // Make sure it hits disk now.
- Out->flush();
- // Free the buffer.
- llvm::SmallVector<char, 0> Empty;
- getPCH() = std::move(Empty);
-
Action.setEmittedPreamblePCH(getWriter());
}
@@ -266,7 +317,6 @@ public:
private:
PrecompilePreambleAction &Action;
- std::unique_ptr<raw_ostream> Out;
};
std::unique_ptr<ASTConsumer>
@@ -276,21 +326,18 @@ PrecompilePreambleAction::CreateASTConsumer(CompilerInstance &CI,
if (!GeneratePCHAction::ComputeASTConsumerArguments(CI, Sysroot))
return nullptr;
- std::unique_ptr<llvm::raw_ostream> OS;
- if (InMemStorage) {
- OS = std::make_unique<llvm::raw_string_ostream>(*InMemStorage);
- } else {
- std::string OutputFile;
- OS = GeneratePCHAction::CreateOutputFile(CI, InFile, OutputFile);
+ if (WritePCHFile) {
+ std::string OutputFile; // unused
+ FileOS = GeneratePCHAction::CreateOutputFile(CI, InFile, OutputFile);
+ if (!FileOS)
+ return nullptr;
}
- if (!OS)
- return nullptr;
if (!CI.getFrontendOpts().RelocatablePCH)
Sysroot.clear();
return std::make_unique<PrecompilePreambleConsumer>(
- *this, CI.getPreprocessor(), CI.getModuleCache(), Sysroot, std::move(OS));
+ *this, CI.getPreprocessor(), CI.getModuleCache(), Sysroot, Buffer);
}
template <class T> bool moveOnNoError(llvm::ErrorOr<T> Val, T &Output) {
@@ -308,13 +355,67 @@ PreambleBounds clang::ComputePreambleBounds(const LangOptions &LangOpts,
return Lexer::ComputePreamble(Buffer.getBuffer(), LangOpts, MaxLines);
}
+class PrecompiledPreamble::PCHStorage {
+public:
+ static std::unique_ptr<PCHStorage> file(std::unique_ptr<TempPCHFile> File) {
+ assert(File);
+ std::unique_ptr<PCHStorage> S(new PCHStorage());
+ S->File = std::move(File);
+ return S;
+ }
+ static std::unique_ptr<PCHStorage> inMemory(std::shared_ptr<PCHBuffer> Buf) {
+ std::unique_ptr<PCHStorage> S(new PCHStorage());
+ S->Memory = std::move(Buf);
+ return S;
+ }
+
+ enum class Kind { InMemory, TempFile };
+ Kind getKind() const {
+ if (Memory)
+ return Kind::InMemory;
+ if (File)
+ return Kind::TempFile;
+ llvm_unreachable("Neither Memory nor File?");
+ }
+ llvm::StringRef filePath() const {
+ assert(getKind() == Kind::TempFile);
+ return File->getFilePath();
+ }
+ llvm::StringRef memoryContents() const {
+ assert(getKind() == Kind::InMemory);
+ return StringRef(Memory->Data.data(), Memory->Data.size());
+ }
+
+ // Shrink in-memory buffers to fit.
+ // This incurs a copy, but preambles tend to be long-lived.
+ // Only safe to call once nothing can alias the buffer.
+ void shrink() {
+ if (!Memory)
+ return;
+ Memory->Data = decltype(Memory->Data)(Memory->Data);
+ }
+
+private:
+ PCHStorage() = default;
+ PCHStorage(const PCHStorage &) = delete;
+ PCHStorage &operator=(const PCHStorage &) = delete;
+
+ std::shared_ptr<PCHBuffer> Memory;
+ std::unique_ptr<TempPCHFile> File;
+};
+
+PrecompiledPreamble::~PrecompiledPreamble() = default;
+PrecompiledPreamble::PrecompiledPreamble(PrecompiledPreamble &&) = default;
+PrecompiledPreamble &
+PrecompiledPreamble::operator=(PrecompiledPreamble &&) = default;
+
llvm::ErrorOr<PrecompiledPreamble> PrecompiledPreamble::Build(
const CompilerInvocation &Invocation,
const llvm::MemoryBuffer *MainFileBuffer, PreambleBounds Bounds,
DiagnosticsEngine &Diagnostics,
IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS,
std::shared_ptr<PCHContainerOperations> PCHContainerOps, bool StoreInMemory,
- PreambleCallbacks &Callbacks) {
+ StringRef StoragePath, PreambleCallbacks &Callbacks) {
assert(VFS && "VFS is null");
auto PreambleInvocation = std::make_shared<CompilerInvocation>(Invocation);
@@ -322,20 +423,20 @@ llvm::ErrorOr<PrecompiledPreamble> PrecompiledPreamble::Build(
PreprocessorOptions &PreprocessorOpts =
PreambleInvocation->getPreprocessorOpts();
- llvm::Optional<TempPCHFile> TempFile;
- if (!StoreInMemory) {
+ std::shared_ptr<PCHBuffer> Buffer = std::make_shared<PCHBuffer>();
+ std::unique_ptr<PCHStorage> Storage;
+ if (StoreInMemory) {
+ Storage = PCHStorage::inMemory(Buffer);
+ } else {
// Create a temporary file for the precompiled preamble. In rare
// circumstances, this can fail.
- llvm::ErrorOr<PrecompiledPreamble::TempPCHFile> PreamblePCHFile =
- PrecompiledPreamble::TempPCHFile::CreateNewPreamblePCHFile();
+ std::unique_ptr<TempPCHFile> PreamblePCHFile =
+ TempPCHFile::create(StoragePath);
if (!PreamblePCHFile)
return BuildPreambleError::CouldntCreateTempFile;
- TempFile = std::move(*PreamblePCHFile);
+ Storage = PCHStorage::file(std::move(PreamblePCHFile));
}
- PCHStorage Storage = StoreInMemory ? PCHStorage(InMemoryPreamble())
- : PCHStorage(std::move(*TempFile));
-
// Save the preamble text for later; we'll need to compare against it for
// subsequent reparses.
std::vector<char> PreambleBytes(MainFileBuffer->getBufferStart(),
@@ -345,9 +446,8 @@ llvm::ErrorOr<PrecompiledPreamble> PrecompiledPreamble::Build(
// Tell the compiler invocation to generate a temporary precompiled header.
FrontendOpts.ProgramAction = frontend::GeneratePCH;
- FrontendOpts.OutputFile =
- std::string(StoreInMemory ? getInMemoryPreamblePath()
- : Storage.asFile().getFilePath());
+ FrontendOpts.OutputFile = std::string(
+ StoreInMemory ? getInMemoryPreamblePath() : Storage->filePath());
PreprocessorOpts.PrecompiledPreambleBytes.first = 0;
PreprocessorOpts.PrecompiledPreambleBytes.second = false;
// Inform preprocessor to record conditional stack when building the preamble.
@@ -409,13 +509,17 @@ llvm::ErrorOr<PrecompiledPreamble> PrecompiledPreamble::Build(
PreambleInputBuffer.release());
}
- std::unique_ptr<PrecompilePreambleAction> Act;
- Act.reset(new PrecompilePreambleAction(
- StoreInMemory ? &Storage.asMemory().Data : nullptr, Callbacks));
- Callbacks.BeforeExecute(*Clang);
+ auto Act = std::make_unique<PrecompilePreambleAction>(
+ std::move(Buffer),
+ /*WritePCHFile=*/Storage->getKind() == PCHStorage::Kind::TempFile,
+ Callbacks);
if (!Act->BeginSourceFile(*Clang.get(), Clang->getFrontendOpts().Inputs[0]))
return BuildPreambleError::BeginSourceFileFailed;
+ // Performed after BeginSourceFile to ensure Clang->Preprocessor can be
+ // referenced in the callback.
+ Callbacks.BeforeExecute(*Clang);
+
std::unique_ptr<PPCallbacks> DelegatedPPCallbacks =
Callbacks.createPPCallbacks();
if (DelegatedPPCallbacks)
@@ -438,6 +542,7 @@ llvm::ErrorOr<PrecompiledPreamble> PrecompiledPreamble::Build(
if (!Act->hasEmittedPreamblePCH())
return BuildPreambleError::CouldntEmitPCH;
+ Act.reset(); // Frees the PCH buffer, unless Storage keeps it in memory.
// Keep track of all of the files that the source manager knows about,
// so we can verify whether they have changed or not.
@@ -445,23 +550,28 @@ llvm::ErrorOr<PrecompiledPreamble> PrecompiledPreamble::Build(
SourceManager &SourceMgr = Clang->getSourceManager();
for (auto &Filename : PreambleDepCollector->getDependencies()) {
- auto FileOrErr = Clang->getFileManager().getFile(Filename);
- if (!FileOrErr ||
- *FileOrErr == SourceMgr.getFileEntryForID(SourceMgr.getMainFileID()))
+ auto MaybeFile = Clang->getFileManager().getOptionalFileRef(Filename);
+ if (!MaybeFile ||
+ MaybeFile == SourceMgr.getFileEntryRefForID(SourceMgr.getMainFileID()))
continue;
- auto File = *FileOrErr;
- if (time_t ModTime = File->getModificationTime()) {
- FilesInPreamble[File->getName()] =
- PrecompiledPreamble::PreambleFileHash::createForFile(File->getSize(),
+ auto File = *MaybeFile;
+ if (time_t ModTime = File.getModificationTime()) {
+ FilesInPreamble[File.getName()] =
+ PrecompiledPreamble::PreambleFileHash::createForFile(File.getSize(),
ModTime);
} else {
llvm::MemoryBufferRef Buffer =
SourceMgr.getMemoryBufferForFileOrFake(File);
- FilesInPreamble[File->getName()] =
+ FilesInPreamble[File.getName()] =
PrecompiledPreamble::PreambleFileHash::createForMemoryBuffer(Buffer);
}
}
+ // Shrinking the storage requires extra temporary memory.
+ // Destroying clang first reduces peak memory usage.
+ CICleanup.unregister();
+ Clang.reset();
+ Storage->shrink();
return PrecompiledPreamble(
std::move(Storage), std::move(PreambleBytes), PreambleEndsAtStartOfLine,
std::move(FilesInPreamble), std::move(MissingFiles));
@@ -472,16 +582,12 @@ PreambleBounds PrecompiledPreamble::getBounds() const {
}
std::size_t PrecompiledPreamble::getSize() const {
- switch (Storage.getKind()) {
- case PCHStorage::Kind::Empty:
- assert(false && "Calling getSize() on invalid PrecompiledPreamble. "
- "Was it std::moved?");
- return 0;
+ switch (Storage->getKind()) {
case PCHStorage::Kind::InMemory:
- return Storage.asMemory().Data.size();
+ return Storage->memoryContents().size();
case PCHStorage::Kind::TempFile: {
uint64_t Result;
- if (llvm::sys::fs::file_size(Storage.asFile().getFilePath(), Result))
+ if (llvm::sys::fs::file_size(Storage->filePath(), Result))
return 0;
assert(Result <= std::numeric_limits<std::size_t>::max() &&
@@ -613,12 +719,12 @@ void PrecompiledPreamble::AddImplicitPreamble(
void PrecompiledPreamble::OverridePreamble(
CompilerInvocation &CI, IntrusiveRefCntPtr<llvm::vfs::FileSystem> &VFS,
llvm::MemoryBuffer *MainFileBuffer) const {
- auto Bounds = ComputePreambleBounds(*CI.getLangOpts(), *MainFileBuffer, 0);
+ auto Bounds = ComputePreambleBounds(CI.getLangOpts(), *MainFileBuffer, 0);
configurePreamble(Bounds, CI, VFS, MainFileBuffer);
}
PrecompiledPreamble::PrecompiledPreamble(
- PCHStorage Storage, std::vector<char> PreambleBytes,
+ std::unique_ptr<PCHStorage> Storage, std::vector<char> PreambleBytes,
bool PreambleEndsAtStartOfLine,
llvm::StringMap<PreambleFileHash> FilesInPreamble,
llvm::StringSet<> MissingFiles)
@@ -626,142 +732,7 @@ PrecompiledPreamble::PrecompiledPreamble(
MissingFiles(std::move(MissingFiles)),
PreambleBytes(std::move(PreambleBytes)),
PreambleEndsAtStartOfLine(PreambleEndsAtStartOfLine) {
- assert(this->Storage.getKind() != PCHStorage::Kind::Empty);
-}
-
-llvm::ErrorOr<PrecompiledPreamble::TempPCHFile>
-PrecompiledPreamble::TempPCHFile::CreateNewPreamblePCHFile() {
- // FIXME: This is a hack so that we can override the preamble file during
- // crash-recovery testing, which is the only case where the preamble files
- // are not necessarily cleaned up.
- if (const char *TmpFile = ::getenv("CINDEXTEST_PREAMBLE_FILE"))
- return TempPCHFile(TmpFile);
-
- llvm::SmallString<64> File;
- // Using a version of createTemporaryFile with a file descriptor guarantees
- // that we would never get a race condition in a multi-threaded setting
- // (i.e., multiple threads getting the same temporary path).
- int FD;
- auto EC = llvm::sys::fs::createTemporaryFile("preamble", "pch", FD, File);
- if (EC)
- return EC;
- // We only needed to make sure the file exists, close the file right away.
- llvm::sys::Process::SafelyCloseFileDescriptor(FD);
- return TempPCHFile(std::string(std::move(File).str()));
-}
-
-PrecompiledPreamble::TempPCHFile::TempPCHFile(std::string FilePath)
- : FilePath(std::move(FilePath)) {
- TemporaryFiles::getInstance().addFile(*this->FilePath);
-}
-
-PrecompiledPreamble::TempPCHFile::TempPCHFile(TempPCHFile &&Other) {
- FilePath = std::move(Other.FilePath);
- Other.FilePath = None;
-}
-
-PrecompiledPreamble::TempPCHFile &PrecompiledPreamble::TempPCHFile::
-operator=(TempPCHFile &&Other) {
- RemoveFileIfPresent();
-
- FilePath = std::move(Other.FilePath);
- Other.FilePath = None;
- return *this;
-}
-
-PrecompiledPreamble::TempPCHFile::~TempPCHFile() { RemoveFileIfPresent(); }
-
-void PrecompiledPreamble::TempPCHFile::RemoveFileIfPresent() {
- if (FilePath) {
- TemporaryFiles::getInstance().removeFile(*FilePath);
- FilePath = None;
- }
-}
-
-llvm::StringRef PrecompiledPreamble::TempPCHFile::getFilePath() const {
- assert(FilePath && "TempPCHFile doesn't have a FilePath. Had it been moved?");
- return *FilePath;
-}
-
-PrecompiledPreamble::PCHStorage::PCHStorage(TempPCHFile File)
- : StorageKind(Kind::TempFile) {
- new (&asFile()) TempPCHFile(std::move(File));
-}
-
-PrecompiledPreamble::PCHStorage::PCHStorage(InMemoryPreamble Memory)
- : StorageKind(Kind::InMemory) {
- new (&asMemory()) InMemoryPreamble(std::move(Memory));
-}
-
-PrecompiledPreamble::PCHStorage::PCHStorage(PCHStorage &&Other) : PCHStorage() {
- *this = std::move(Other);
-}
-
-PrecompiledPreamble::PCHStorage &PrecompiledPreamble::PCHStorage::
-operator=(PCHStorage &&Other) {
- destroy();
-
- StorageKind = Other.StorageKind;
- switch (StorageKind) {
- case Kind::Empty:
- // do nothing;
- break;
- case Kind::TempFile:
- new (&asFile()) TempPCHFile(std::move(Other.asFile()));
- break;
- case Kind::InMemory:
- new (&asMemory()) InMemoryPreamble(std::move(Other.asMemory()));
- break;
- }
-
- Other.setEmpty();
- return *this;
-}
-
-PrecompiledPreamble::PCHStorage::~PCHStorage() { destroy(); }
-
-PrecompiledPreamble::PCHStorage::Kind
-PrecompiledPreamble::PCHStorage::getKind() const {
- return StorageKind;
-}
-
-PrecompiledPreamble::TempPCHFile &PrecompiledPreamble::PCHStorage::asFile() {
- assert(getKind() == Kind::TempFile);
- return *reinterpret_cast<TempPCHFile *>(&Storage);
-}
-
-const PrecompiledPreamble::TempPCHFile &
-PrecompiledPreamble::PCHStorage::asFile() const {
- return const_cast<PCHStorage *>(this)->asFile();
-}
-
-PrecompiledPreamble::InMemoryPreamble &
-PrecompiledPreamble::PCHStorage::asMemory() {
- assert(getKind() == Kind::InMemory);
- return *reinterpret_cast<InMemoryPreamble *>(&Storage);
-}
-
-const PrecompiledPreamble::InMemoryPreamble &
-PrecompiledPreamble::PCHStorage::asMemory() const {
- return const_cast<PCHStorage *>(this)->asMemory();
-}
-
-void PrecompiledPreamble::PCHStorage::destroy() {
- switch (StorageKind) {
- case Kind::Empty:
- return;
- case Kind::TempFile:
- asFile().~TempPCHFile();
- return;
- case Kind::InMemory:
- asMemory().~InMemoryPreamble();
- return;
- }
-}
-
-void PrecompiledPreamble::PCHStorage::setEmpty() {
- destroy();
- StorageKind = Kind::Empty;
+ assert(this->Storage != nullptr);
}
PrecompiledPreamble::PreambleFileHash
@@ -807,20 +778,23 @@ void PrecompiledPreamble::configurePreamble(
PreprocessorOpts.DisablePCHOrModuleValidation =
DisableValidationForModuleKind::PCH;
- setupPreambleStorage(Storage, PreprocessorOpts, VFS);
+ // Don't bother generating the long version of the predefines buffer.
+ // The preamble is going to overwrite it anyway.
+ PreprocessorOpts.UsePredefines = false;
+
+ setupPreambleStorage(*Storage, PreprocessorOpts, VFS);
}
void PrecompiledPreamble::setupPreambleStorage(
const PCHStorage &Storage, PreprocessorOptions &PreprocessorOpts,
IntrusiveRefCntPtr<llvm::vfs::FileSystem> &VFS) {
if (Storage.getKind() == PCHStorage::Kind::TempFile) {
- const TempPCHFile &PCHFile = Storage.asFile();
- PreprocessorOpts.ImplicitPCHInclude = std::string(PCHFile.getFilePath());
+ llvm::StringRef PCHPath = Storage.filePath();
+ PreprocessorOpts.ImplicitPCHInclude = PCHPath.str();
// Make sure we can access the PCH file even if we're using a VFS
IntrusiveRefCntPtr<llvm::vfs::FileSystem> RealFS =
llvm::vfs::getRealFileSystem();
- auto PCHPath = PCHFile.getFilePath();
if (VFS == RealFS || VFS->exists(PCHPath))
return;
auto Buf = RealFS->getBufferForFile(PCHPath);
@@ -841,7 +815,8 @@ void PrecompiledPreamble::setupPreambleStorage(
StringRef PCHPath = getInMemoryPreamblePath();
PreprocessorOpts.ImplicitPCHInclude = std::string(PCHPath);
- auto Buf = llvm::MemoryBuffer::getMemBuffer(Storage.asMemory().Data);
+ auto Buf = llvm::MemoryBuffer::getMemBuffer(
+ Storage.memoryContents(), PCHPath, /*RequiresNullTerminator=*/false);
VFS = createVFSOverlayForPreamblePCH(PCHPath, std::move(Buf), VFS);
}
}
diff --git a/contrib/llvm-project/clang/lib/Frontend/PrintPreprocessedOutput.cpp b/contrib/llvm-project/clang/lib/Frontend/PrintPreprocessedOutput.cpp
index 24ea1ccba207..7f5f66906823 100644
--- a/contrib/llvm-project/clang/lib/Frontend/PrintPreprocessedOutput.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/PrintPreprocessedOutput.cpp
@@ -32,42 +32,42 @@ using namespace clang;
/// PrintMacroDefinition - Print a macro definition in a form that will be
/// properly accepted back as a definition.
static void PrintMacroDefinition(const IdentifierInfo &II, const MacroInfo &MI,
- Preprocessor &PP, raw_ostream &OS) {
- OS << "#define " << II.getName();
+ Preprocessor &PP, raw_ostream *OS) {
+ *OS << "#define " << II.getName();
if (MI.isFunctionLike()) {
- OS << '(';
+ *OS << '(';
if (!MI.param_empty()) {
MacroInfo::param_iterator AI = MI.param_begin(), E = MI.param_end();
for (; AI+1 != E; ++AI) {
- OS << (*AI)->getName();
- OS << ',';
+ *OS << (*AI)->getName();
+ *OS << ',';
}
// Last argument.
if ((*AI)->getName() == "__VA_ARGS__")
- OS << "...";
+ *OS << "...";
else
- OS << (*AI)->getName();
+ *OS << (*AI)->getName();
}
if (MI.isGNUVarargs())
- OS << "..."; // #define foo(x...)
+ *OS << "..."; // #define foo(x...)
- OS << ')';
+ *OS << ')';
}
// GCC always emits a space, even if the macro body is empty. However, do not
// want to emit two spaces if the first token has a leading space.
if (MI.tokens_empty() || !MI.tokens_begin()->hasLeadingSpace())
- OS << ' ';
+ *OS << ' ';
SmallString<128> SpellingBuffer;
for (const auto &T : MI.tokens()) {
if (T.hasLeadingSpace())
- OS << ' ';
+ *OS << ' ';
- OS << PP.getSpelling(T, SpellingBuffer);
+ *OS << PP.getSpelling(T, SpellingBuffer);
}
}
@@ -81,7 +81,7 @@ class PrintPPOutputPPCallbacks : public PPCallbacks {
SourceManager &SM;
TokenConcatenation ConcatInfo;
public:
- raw_ostream &OS;
+ raw_ostream *OS;
private:
unsigned CurLine;
@@ -95,14 +95,26 @@ private:
bool DumpIncludeDirectives;
bool UseLineDirectives;
bool IsFirstFileEntered;
+ bool MinimizeWhitespace;
+ bool DirectivesOnly;
+ bool KeepSystemIncludes;
+ raw_ostream *OrigOS;
+ std::unique_ptr<llvm::raw_null_ostream> NullOS;
+
+ Token PrevTok;
+ Token PrevPrevTok;
+
public:
- PrintPPOutputPPCallbacks(Preprocessor &pp, raw_ostream &os, bool lineMarkers,
+ PrintPPOutputPPCallbacks(Preprocessor &pp, raw_ostream *os, bool lineMarkers,
bool defines, bool DumpIncludeDirectives,
- bool UseLineDirectives)
+ bool UseLineDirectives, bool MinimizeWhitespace,
+ bool DirectivesOnly, bool KeepSystemIncludes)
: PP(pp), SM(PP.getSourceManager()), ConcatInfo(PP), OS(os),
DisableLineMarkers(lineMarkers), DumpDefines(defines),
DumpIncludeDirectives(DumpIncludeDirectives),
- UseLineDirectives(UseLineDirectives) {
+ UseLineDirectives(UseLineDirectives),
+ MinimizeWhitespace(MinimizeWhitespace), DirectivesOnly(DirectivesOnly),
+ KeepSystemIncludes(KeepSystemIncludes), OrigOS(os) {
CurLine = 0;
CurFilename += "<uninit>";
EmittedTokensOnThisLine = false;
@@ -110,8 +122,15 @@ public:
FileType = SrcMgr::C_User;
Initialized = false;
IsFirstFileEntered = false;
+ if (KeepSystemIncludes)
+ NullOS = std::make_unique<llvm::raw_null_ostream>();
+
+ PrevTok.startToken();
+ PrevPrevTok.startToken();
}
+ bool isMinimizeWhitespace() const { return MinimizeWhitespace; }
+
void setEmittedTokensOnThisLine() { EmittedTokensOnThisLine = true; }
bool hasEmittedTokensOnThisLine() const { return EmittedTokensOnThisLine; }
@@ -120,16 +139,21 @@ public:
return EmittedDirectiveOnThisLine;
}
- bool startNewLineIfNeeded(bool ShouldUpdateCurrentLine = true);
+ /// Ensure that the output stream position is at the beginning of a new line
+ /// and inserts one if it does not. It is intended to ensure that directives
+ /// inserted by the directives not from the input source (such as #line) are
+ /// in the first column. To insert newlines that represent the input, use
+ /// MoveToLine(/*...*/, /*RequireStartOfLine=*/true).
+ void startNewLineIfNeeded();
void FileChanged(SourceLocation Loc, FileChangeReason Reason,
SrcMgr::CharacteristicKind FileType,
FileID PrevFID) override;
void InclusionDirective(SourceLocation HashLoc, const Token &IncludeTok,
StringRef FileName, bool IsAngled,
- CharSourceRange FilenameRange, const FileEntry *File,
- StringRef SearchPath, StringRef RelativePath,
- const Module *Imported,
+ CharSourceRange FilenameRange,
+ OptionalFileEntryRef File, StringRef SearchPath,
+ StringRef RelativePath, const Module *Imported,
SrcMgr::CharacteristicKind FileType) override;
void Ident(SourceLocation Loc, StringRef str) override;
void PragmaMessage(SourceLocation Loc, StringRef Namespace,
@@ -139,7 +163,7 @@ public:
void PragmaDiagnosticPop(SourceLocation Loc, StringRef Namespace) override;
void PragmaDiagnostic(SourceLocation Loc, StringRef Namespace,
diag::Severity Map, StringRef Str) override;
- void PragmaWarning(SourceLocation Loc, StringRef WarningSpec,
+ void PragmaWarning(SourceLocation Loc, PragmaWarningSpecifier WarningSpec,
ArrayRef<int> Ids) override;
void PragmaWarningPush(SourceLocation Loc, int Level) override;
void PragmaWarningPop(SourceLocation Loc) override;
@@ -148,18 +172,44 @@ public:
void PragmaAssumeNonNullBegin(SourceLocation Loc) override;
void PragmaAssumeNonNullEnd(SourceLocation Loc) override;
- bool HandleFirstTokOnLine(Token &Tok);
+ /// Insert whitespace before emitting the next token.
+ ///
+ /// @param Tok Next token to be emitted.
+ /// @param RequireSpace Ensure at least one whitespace is emitted. Useful
+ /// if non-tokens have been emitted to the stream.
+ /// @param RequireSameLine Never emit newlines. Useful when semantics depend
+ /// on being on the same line, such as directives.
+ void HandleWhitespaceBeforeTok(const Token &Tok, bool RequireSpace,
+ bool RequireSameLine);
/// Move to the line of the provided source location. This will
- /// return true if the output stream required adjustment or if
- /// the requested location is on the first line.
- bool MoveToLine(SourceLocation Loc) {
+ /// return true if a newline was inserted or if
+ /// the requested location is the first token on the first line.
+ /// In these cases the next output will be the first column on the line and
+ /// make it possible to insert indention. The newline was inserted
+ /// implicitly when at the beginning of the file.
+ ///
+ /// @param Tok Token where to move to.
+ /// @param RequireStartOfLine Whether the next line depends on being in the
+ /// first column, such as a directive.
+ ///
+ /// @return Whether column adjustments are necessary.
+ bool MoveToLine(const Token &Tok, bool RequireStartOfLine) {
+ PresumedLoc PLoc = SM.getPresumedLoc(Tok.getLocation());
+ unsigned TargetLine = PLoc.isValid() ? PLoc.getLine() : CurLine;
+ bool IsFirstInFile =
+ Tok.isAtStartOfLine() && PLoc.isValid() && PLoc.getLine() == 1;
+ return MoveToLine(TargetLine, RequireStartOfLine) || IsFirstInFile;
+ }
+
+ /// Move to the line of the provided source location. Returns true if a new
+ /// line was inserted.
+ bool MoveToLine(SourceLocation Loc, bool RequireStartOfLine) {
PresumedLoc PLoc = SM.getPresumedLoc(Loc);
- if (PLoc.isInvalid())
- return false;
- return MoveToLine(PLoc.getLine()) || (PLoc.getLine() == 1);
+ unsigned TargetLine = PLoc.isValid() ? PLoc.getLine() : CurLine;
+ return MoveToLine(TargetLine, RequireStartOfLine);
}
- bool MoveToLine(unsigned LineNo);
+ bool MoveToLine(unsigned LineNo, bool RequireStartOfLine);
bool AvoidConcat(const Token &PrevPrevTok, const Token &PrevTok,
const Token &Tok) {
@@ -187,70 +237,91 @@ public:
void PrintPPOutputPPCallbacks::WriteLineInfo(unsigned LineNo,
const char *Extra,
unsigned ExtraLen) {
- startNewLineIfNeeded(/*ShouldUpdateCurrentLine=*/false);
+ startNewLineIfNeeded();
// Emit #line directives or GNU line markers depending on what mode we're in.
if (UseLineDirectives) {
- OS << "#line" << ' ' << LineNo << ' ' << '"';
- OS.write_escaped(CurFilename);
- OS << '"';
+ *OS << "#line" << ' ' << LineNo << ' ' << '"';
+ OS->write_escaped(CurFilename);
+ *OS << '"';
} else {
- OS << '#' << ' ' << LineNo << ' ' << '"';
- OS.write_escaped(CurFilename);
- OS << '"';
+ *OS << '#' << ' ' << LineNo << ' ' << '"';
+ OS->write_escaped(CurFilename);
+ *OS << '"';
if (ExtraLen)
- OS.write(Extra, ExtraLen);
+ OS->write(Extra, ExtraLen);
if (FileType == SrcMgr::C_System)
- OS.write(" 3", 2);
+ OS->write(" 3", 2);
else if (FileType == SrcMgr::C_ExternCSystem)
- OS.write(" 3 4", 4);
+ OS->write(" 3 4", 4);
}
- OS << '\n';
+ *OS << '\n';
}
/// MoveToLine - Move the output to the source line specified by the location
/// object. We can do this by emitting some number of \n's, or be emitting a
/// #line directive. This returns false if already at the specified line, true
/// if some newlines were emitted.
-bool PrintPPOutputPPCallbacks::MoveToLine(unsigned LineNo) {
+bool PrintPPOutputPPCallbacks::MoveToLine(unsigned LineNo,
+ bool RequireStartOfLine) {
+ // If it is required to start a new line or finish the current, insert
+ // vertical whitespace now and take it into account when moving to the
+ // expected line.
+ bool StartedNewLine = false;
+ if ((RequireStartOfLine && EmittedTokensOnThisLine) ||
+ EmittedDirectiveOnThisLine) {
+ *OS << '\n';
+ StartedNewLine = true;
+ CurLine += 1;
+ EmittedTokensOnThisLine = false;
+ EmittedDirectiveOnThisLine = false;
+ }
+
// If this line is "close enough" to the original line, just print newlines,
// otherwise print a #line directive.
- if (LineNo-CurLine <= 8) {
- if (LineNo-CurLine == 1)
- OS << '\n';
- else if (LineNo == CurLine)
- return false; // Spelling line moved, but expansion line didn't.
- else {
+ if (CurLine == LineNo) {
+ // Nothing to do if we are already on the correct line.
+ } else if (MinimizeWhitespace && DisableLineMarkers) {
+ // With -E -P -fminimize-whitespace, don't emit anything if not necessary.
+ } else if (!StartedNewLine && LineNo - CurLine == 1) {
+ // Printing a single line has priority over printing a #line directive, even
+ // when minimizing whitespace which otherwise would print #line directives
+ // for every single line.
+ *OS << '\n';
+ StartedNewLine = true;
+ } else if (!DisableLineMarkers) {
+ if (LineNo - CurLine <= 8) {
const char *NewLines = "\n\n\n\n\n\n\n\n";
- OS.write(NewLines, LineNo-CurLine);
+ OS->write(NewLines, LineNo - CurLine);
+ } else {
+ // Emit a #line or line marker.
+ WriteLineInfo(LineNo, nullptr, 0);
}
- } else if (!DisableLineMarkers) {
- // Emit a #line or line marker.
- WriteLineInfo(LineNo, nullptr, 0);
- } else {
- // Okay, we're in -P mode, which turns off line markers. However, we still
- // need to emit a newline between tokens on different lines.
- startNewLineIfNeeded(/*ShouldUpdateCurrentLine=*/false);
+ StartedNewLine = true;
+ } else if (EmittedTokensOnThisLine) {
+ // If we are not on the correct line and don't need to be line-correct,
+ // at least ensure we start on a new line.
+ *OS << '\n';
+ StartedNewLine = true;
+ }
+
+ if (StartedNewLine) {
+ EmittedTokensOnThisLine = false;
+ EmittedDirectiveOnThisLine = false;
}
CurLine = LineNo;
- return true;
+ return StartedNewLine;
}
-bool
-PrintPPOutputPPCallbacks::startNewLineIfNeeded(bool ShouldUpdateCurrentLine) {
+void PrintPPOutputPPCallbacks::startNewLineIfNeeded() {
if (EmittedTokensOnThisLine || EmittedDirectiveOnThisLine) {
- OS << '\n';
+ *OS << '\n';
EmittedTokensOnThisLine = false;
EmittedDirectiveOnThisLine = false;
- if (ShouldUpdateCurrentLine)
- ++CurLine;
- return true;
}
-
- return false;
}
/// FileChanged - Whenever the preprocessor enters or exits a #include file
@@ -273,7 +344,7 @@ void PrintPPOutputPPCallbacks::FileChanged(SourceLocation Loc,
if (Reason == PPCallbacks::EnterFile) {
SourceLocation IncludeLoc = UserLoc.getIncludeLoc();
if (IncludeLoc.isValid())
- MoveToLine(IncludeLoc);
+ MoveToLine(IncludeLoc, /*RequireStartOfLine=*/false);
} else if (Reason == PPCallbacks::SystemHeaderPragma) {
// GCC emits the # directive for this directive on the line AFTER the
// directive and emits a bunch of spaces that aren't needed. This is because
@@ -285,12 +356,17 @@ void PrintPPOutputPPCallbacks::FileChanged(SourceLocation Loc,
CurLine = NewLine;
+ // In KeepSystemIncludes mode, redirect OS as needed.
+ if (KeepSystemIncludes && (isSystem(FileType) != isSystem(NewFileType)))
+ OS = isSystem(FileType) ? OrigOS : NullOS.get();
+
CurFilename.clear();
CurFilename += UserLoc.getFilename();
FileType = NewFileType;
if (DisableLineMarkers) {
- startNewLineIfNeeded(/*ShouldUpdateCurrentLine=*/false);
+ if (!MinimizeWhitespace)
+ startNewLineIfNeeded();
return;
}
@@ -323,28 +399,22 @@ void PrintPPOutputPPCallbacks::FileChanged(SourceLocation Loc,
}
void PrintPPOutputPPCallbacks::InclusionDirective(
- SourceLocation HashLoc,
- const Token &IncludeTok,
- StringRef FileName,
- bool IsAngled,
- CharSourceRange FilenameRange,
- const FileEntry *File,
- StringRef SearchPath,
- StringRef RelativePath,
- const Module *Imported,
+ SourceLocation HashLoc, const Token &IncludeTok, StringRef FileName,
+ bool IsAngled, CharSourceRange FilenameRange, OptionalFileEntryRef File,
+ StringRef SearchPath, StringRef RelativePath, const Module *Imported,
SrcMgr::CharacteristicKind FileType) {
// In -dI mode, dump #include directives prior to dumping their content or
- // interpretation.
- if (DumpIncludeDirectives) {
- startNewLineIfNeeded();
- MoveToLine(HashLoc);
+ // interpretation. Similar for -fkeep-system-includes.
+ if (DumpIncludeDirectives || (KeepSystemIncludes && isSystem(FileType))) {
+ MoveToLine(HashLoc, /*RequireStartOfLine=*/true);
const std::string TokenText = PP.getSpelling(IncludeTok);
assert(!TokenText.empty());
- OS << "#" << TokenText << " "
- << (IsAngled ? '<' : '"') << FileName << (IsAngled ? '>' : '"')
- << " /* clang -E -dI */";
+ *OS << "#" << TokenText << " "
+ << (IsAngled ? '<' : '"') << FileName << (IsAngled ? '>' : '"')
+ << " /* clang -E "
+ << (DumpIncludeDirectives ? "-dI" : "-fkeep-system-includes")
+ << " */";
setEmittedDirectiveOnThisLine();
- startNewLineIfNeeded();
}
// When preprocessing, turn implicit imports into module import pragmas.
@@ -353,17 +423,14 @@ void PrintPPOutputPPCallbacks::InclusionDirective(
case tok::pp_include:
case tok::pp_import:
case tok::pp_include_next:
- startNewLineIfNeeded();
- MoveToLine(HashLoc);
- OS << "#pragma clang module import " << Imported->getFullModuleName(true)
- << " /* clang -E: implicit import for "
- << "#" << PP.getSpelling(IncludeTok) << " "
- << (IsAngled ? '<' : '"') << FileName << (IsAngled ? '>' : '"')
- << " */";
- // Since we want a newline after the pragma, but not a #<line>, start a
- // new line immediately.
- EmittedTokensOnThisLine = true;
- startNewLineIfNeeded();
+ MoveToLine(HashLoc, /*RequireStartOfLine=*/true);
+ *OS << "#pragma clang module import "
+ << Imported->getFullModuleName(true)
+ << " /* clang -E: implicit import for "
+ << "#" << PP.getSpelling(IncludeTok) << " "
+ << (IsAngled ? '<' : '"') << FileName << (IsAngled ? '>' : '"')
+ << " */";
+ setEmittedDirectiveOnThisLine();
break;
case tok::pp___include_macros:
@@ -384,37 +451,46 @@ void PrintPPOutputPPCallbacks::InclusionDirective(
/// Handle entering the scope of a module during a module compilation.
void PrintPPOutputPPCallbacks::BeginModule(const Module *M) {
startNewLineIfNeeded();
- OS << "#pragma clang module begin " << M->getFullModuleName(true);
+ *OS << "#pragma clang module begin " << M->getFullModuleName(true);
setEmittedDirectiveOnThisLine();
}
/// Handle leaving the scope of a module during a module compilation.
void PrintPPOutputPPCallbacks::EndModule(const Module *M) {
startNewLineIfNeeded();
- OS << "#pragma clang module end /*" << M->getFullModuleName(true) << "*/";
+ *OS << "#pragma clang module end /*" << M->getFullModuleName(true) << "*/";
setEmittedDirectiveOnThisLine();
}
/// Ident - Handle #ident directives when read by the preprocessor.
///
void PrintPPOutputPPCallbacks::Ident(SourceLocation Loc, StringRef S) {
- MoveToLine(Loc);
+ MoveToLine(Loc, /*RequireStartOfLine=*/true);
- OS.write("#ident ", strlen("#ident "));
- OS.write(S.begin(), S.size());
- EmittedTokensOnThisLine = true;
+ OS->write("#ident ", strlen("#ident "));
+ OS->write(S.begin(), S.size());
+ setEmittedTokensOnThisLine();
}
/// MacroDefined - This hook is called whenever a macro definition is seen.
void PrintPPOutputPPCallbacks::MacroDefined(const Token &MacroNameTok,
const MacroDirective *MD) {
const MacroInfo *MI = MD->getMacroInfo();
- // Only print out macro definitions in -dD mode.
- if (!DumpDefines ||
+ // Print out macro definitions in -dD mode and when we have -fdirectives-only
+ // for C++20 header units.
+ if ((!DumpDefines && !DirectivesOnly) ||
// Ignore __FILE__ etc.
- MI->isBuiltinMacro()) return;
+ MI->isBuiltinMacro())
+ return;
- MoveToLine(MI->getDefinitionLoc());
+ SourceLocation DefLoc = MI->getDefinitionLoc();
+ if (DirectivesOnly && !MI->isUsed()) {
+ SourceManager &SM = PP.getSourceManager();
+ if (SM.isWrittenInBuiltinFile(DefLoc) ||
+ SM.isWrittenInCommandLineFile(DefLoc))
+ return;
+ }
+ MoveToLine(DefLoc, /*RequireStartOfLine=*/true);
PrintMacroDefinition(*MacroNameTok.getIdentifierInfo(), *MI, PP, OS);
setEmittedDirectiveOnThisLine();
}
@@ -422,23 +498,25 @@ void PrintPPOutputPPCallbacks::MacroDefined(const Token &MacroNameTok,
void PrintPPOutputPPCallbacks::MacroUndefined(const Token &MacroNameTok,
const MacroDefinition &MD,
const MacroDirective *Undef) {
- // Only print out macro definitions in -dD mode.
- if (!DumpDefines) return;
+ // Print out macro definitions in -dD mode and when we have -fdirectives-only
+ // for C++20 header units.
+ if (!DumpDefines && !DirectivesOnly)
+ return;
- MoveToLine(MacroNameTok.getLocation());
- OS << "#undef " << MacroNameTok.getIdentifierInfo()->getName();
+ MoveToLine(MacroNameTok.getLocation(), /*RequireStartOfLine=*/true);
+ *OS << "#undef " << MacroNameTok.getIdentifierInfo()->getName();
setEmittedDirectiveOnThisLine();
}
-static void outputPrintable(raw_ostream &OS, StringRef Str) {
+static void outputPrintable(raw_ostream *OS, StringRef Str) {
for (unsigned char Char : Str) {
if (isPrintable(Char) && Char != '\\' && Char != '"')
- OS << (char)Char;
+ *OS << (char)Char;
else // Output anything hard as an octal escape.
- OS << '\\'
- << (char)('0' + ((Char >> 6) & 7))
- << (char)('0' + ((Char >> 3) & 7))
- << (char)('0' + ((Char >> 0) & 7));
+ *OS << '\\'
+ << (char)('0' + ((Char >> 6) & 7))
+ << (char)('0' + ((Char >> 3) & 7))
+ << (char)('0' + ((Char >> 0) & 7));
}
}
@@ -446,54 +524,50 @@ void PrintPPOutputPPCallbacks::PragmaMessage(SourceLocation Loc,
StringRef Namespace,
PragmaMessageKind Kind,
StringRef Str) {
- startNewLineIfNeeded();
- MoveToLine(Loc);
- OS << "#pragma ";
+ MoveToLine(Loc, /*RequireStartOfLine=*/true);
+ *OS << "#pragma ";
if (!Namespace.empty())
- OS << Namespace << ' ';
+ *OS << Namespace << ' ';
switch (Kind) {
case PMK_Message:
- OS << "message(\"";
+ *OS << "message(\"";
break;
case PMK_Warning:
- OS << "warning \"";
+ *OS << "warning \"";
break;
case PMK_Error:
- OS << "error \"";
+ *OS << "error \"";
break;
}
outputPrintable(OS, Str);
- OS << '"';
+ *OS << '"';
if (Kind == PMK_Message)
- OS << ')';
+ *OS << ')';
setEmittedDirectiveOnThisLine();
}
void PrintPPOutputPPCallbacks::PragmaDebug(SourceLocation Loc,
StringRef DebugType) {
- startNewLineIfNeeded();
- MoveToLine(Loc);
+ MoveToLine(Loc, /*RequireStartOfLine=*/true);
- OS << "#pragma clang __debug ";
- OS << DebugType;
+ *OS << "#pragma clang __debug ";
+ *OS << DebugType;
setEmittedDirectiveOnThisLine();
}
void PrintPPOutputPPCallbacks::
PragmaDiagnosticPush(SourceLocation Loc, StringRef Namespace) {
- startNewLineIfNeeded();
- MoveToLine(Loc);
- OS << "#pragma " << Namespace << " diagnostic push";
+ MoveToLine(Loc, /*RequireStartOfLine=*/true);
+ *OS << "#pragma " << Namespace << " diagnostic push";
setEmittedDirectiveOnThisLine();
}
void PrintPPOutputPPCallbacks::
PragmaDiagnosticPop(SourceLocation Loc, StringRef Namespace) {
- startNewLineIfNeeded();
- MoveToLine(Loc);
- OS << "#pragma " << Namespace << " diagnostic pop";
+ MoveToLine(Loc, /*RequireStartOfLine=*/true);
+ *OS << "#pragma " << Namespace << " diagnostic pop";
setEmittedDirectiveOnThisLine();
}
@@ -501,130 +575,158 @@ void PrintPPOutputPPCallbacks::PragmaDiagnostic(SourceLocation Loc,
StringRef Namespace,
diag::Severity Map,
StringRef Str) {
- startNewLineIfNeeded();
- MoveToLine(Loc);
- OS << "#pragma " << Namespace << " diagnostic ";
+ MoveToLine(Loc, /*RequireStartOfLine=*/true);
+ *OS << "#pragma " << Namespace << " diagnostic ";
switch (Map) {
case diag::Severity::Remark:
- OS << "remark";
+ *OS << "remark";
break;
case diag::Severity::Warning:
- OS << "warning";
+ *OS << "warning";
break;
case diag::Severity::Error:
- OS << "error";
+ *OS << "error";
break;
case diag::Severity::Ignored:
- OS << "ignored";
+ *OS << "ignored";
break;
case diag::Severity::Fatal:
- OS << "fatal";
+ *OS << "fatal";
break;
}
- OS << " \"" << Str << '"';
+ *OS << " \"" << Str << '"';
setEmittedDirectiveOnThisLine();
}
void PrintPPOutputPPCallbacks::PragmaWarning(SourceLocation Loc,
- StringRef WarningSpec,
+ PragmaWarningSpecifier WarningSpec,
ArrayRef<int> Ids) {
- startNewLineIfNeeded();
- MoveToLine(Loc);
- OS << "#pragma warning(" << WarningSpec << ':';
+ MoveToLine(Loc, /*RequireStartOfLine=*/true);
+
+ *OS << "#pragma warning(";
+ switch(WarningSpec) {
+ case PWS_Default: *OS << "default"; break;
+ case PWS_Disable: *OS << "disable"; break;
+ case PWS_Error: *OS << "error"; break;
+ case PWS_Once: *OS << "once"; break;
+ case PWS_Suppress: *OS << "suppress"; break;
+ case PWS_Level1: *OS << '1'; break;
+ case PWS_Level2: *OS << '2'; break;
+ case PWS_Level3: *OS << '3'; break;
+ case PWS_Level4: *OS << '4'; break;
+ }
+ *OS << ':';
+
for (ArrayRef<int>::iterator I = Ids.begin(), E = Ids.end(); I != E; ++I)
- OS << ' ' << *I;
- OS << ')';
+ *OS << ' ' << *I;
+ *OS << ')';
setEmittedDirectiveOnThisLine();
}
void PrintPPOutputPPCallbacks::PragmaWarningPush(SourceLocation Loc,
int Level) {
- startNewLineIfNeeded();
- MoveToLine(Loc);
- OS << "#pragma warning(push";
+ MoveToLine(Loc, /*RequireStartOfLine=*/true);
+ *OS << "#pragma warning(push";
if (Level >= 0)
- OS << ", " << Level;
- OS << ')';
+ *OS << ", " << Level;
+ *OS << ')';
setEmittedDirectiveOnThisLine();
}
void PrintPPOutputPPCallbacks::PragmaWarningPop(SourceLocation Loc) {
- startNewLineIfNeeded();
- MoveToLine(Loc);
- OS << "#pragma warning(pop)";
+ MoveToLine(Loc, /*RequireStartOfLine=*/true);
+ *OS << "#pragma warning(pop)";
setEmittedDirectiveOnThisLine();
}
void PrintPPOutputPPCallbacks::PragmaExecCharsetPush(SourceLocation Loc,
StringRef Str) {
- startNewLineIfNeeded();
- MoveToLine(Loc);
- OS << "#pragma character_execution_set(push";
+ MoveToLine(Loc, /*RequireStartOfLine=*/true);
+ *OS << "#pragma character_execution_set(push";
if (!Str.empty())
- OS << ", " << Str;
- OS << ')';
+ *OS << ", " << Str;
+ *OS << ')';
setEmittedDirectiveOnThisLine();
}
void PrintPPOutputPPCallbacks::PragmaExecCharsetPop(SourceLocation Loc) {
- startNewLineIfNeeded();
- MoveToLine(Loc);
- OS << "#pragma character_execution_set(pop)";
+ MoveToLine(Loc, /*RequireStartOfLine=*/true);
+ *OS << "#pragma character_execution_set(pop)";
setEmittedDirectiveOnThisLine();
}
void PrintPPOutputPPCallbacks::
PragmaAssumeNonNullBegin(SourceLocation Loc) {
- startNewLineIfNeeded();
- MoveToLine(Loc);
- OS << "#pragma clang assume_nonnull begin";
+ MoveToLine(Loc, /*RequireStartOfLine=*/true);
+ *OS << "#pragma clang assume_nonnull begin";
setEmittedDirectiveOnThisLine();
}
void PrintPPOutputPPCallbacks::
PragmaAssumeNonNullEnd(SourceLocation Loc) {
- startNewLineIfNeeded();
- MoveToLine(Loc);
- OS << "#pragma clang assume_nonnull end";
+ MoveToLine(Loc, /*RequireStartOfLine=*/true);
+ *OS << "#pragma clang assume_nonnull end";
setEmittedDirectiveOnThisLine();
}
-/// HandleFirstTokOnLine - When emitting a preprocessed file in -E mode, this
-/// is called for the first token on each new line. If this really is the start
-/// of a new logical line, handle it and return true, otherwise return false.
-/// This may not be the start of a logical line because the "start of line"
-/// marker is set for spelling lines, not expansion ones.
-bool PrintPPOutputPPCallbacks::HandleFirstTokOnLine(Token &Tok) {
- // Figure out what line we went to and insert the appropriate number of
- // newline characters.
- if (!MoveToLine(Tok.getLocation()))
- return false;
-
- // Print out space characters so that the first token on a line is
- // indented for easy reading.
- unsigned ColNo = SM.getExpansionColumnNumber(Tok.getLocation());
-
- // The first token on a line can have a column number of 1, yet still expect
- // leading white space, if a macro expansion in column 1 starts with an empty
- // macro argument, or an empty nested macro expansion. In this case, move the
- // token to column 2.
- if (ColNo == 1 && Tok.hasLeadingSpace())
- ColNo = 2;
-
- // This hack prevents stuff like:
- // #define HASH #
- // HASH define foo bar
- // From having the # character end up at column 1, which makes it so it
- // is not handled as a #define next time through the preprocessor if in
- // -fpreprocessed mode.
- if (ColNo <= 1 && Tok.is(tok::hash))
- OS << ' ';
-
- // Otherwise, indent the appropriate number of spaces.
- for (; ColNo > 1; --ColNo)
- OS << ' ';
-
- return true;
+void PrintPPOutputPPCallbacks::HandleWhitespaceBeforeTok(const Token &Tok,
+ bool RequireSpace,
+ bool RequireSameLine) {
+ // These tokens are not expanded to anything and don't need whitespace before
+ // them.
+ if (Tok.is(tok::eof) ||
+ (Tok.isAnnotation() && !Tok.is(tok::annot_header_unit) &&
+ !Tok.is(tok::annot_module_begin) && !Tok.is(tok::annot_module_end) &&
+ !Tok.is(tok::annot_repl_input_end)))
+ return;
+
+ // EmittedDirectiveOnThisLine takes priority over RequireSameLine.
+ if ((!RequireSameLine || EmittedDirectiveOnThisLine) &&
+ MoveToLine(Tok, /*RequireStartOfLine=*/EmittedDirectiveOnThisLine)) {
+ if (MinimizeWhitespace) {
+ // Avoid interpreting hash as a directive under -fpreprocessed.
+ if (Tok.is(tok::hash))
+ *OS << ' ';
+ } else {
+ // Print out space characters so that the first token on a line is
+ // indented for easy reading.
+ unsigned ColNo = SM.getExpansionColumnNumber(Tok.getLocation());
+
+ // The first token on a line can have a column number of 1, yet still
+ // expect leading white space, if a macro expansion in column 1 starts
+ // with an empty macro argument, or an empty nested macro expansion. In
+ // this case, move the token to column 2.
+ if (ColNo == 1 && Tok.hasLeadingSpace())
+ ColNo = 2;
+
+ // This hack prevents stuff like:
+ // #define HASH #
+ // HASH define foo bar
+ // From having the # character end up at column 1, which makes it so it
+ // is not handled as a #define next time through the preprocessor if in
+ // -fpreprocessed mode.
+ if (ColNo <= 1 && Tok.is(tok::hash))
+ *OS << ' ';
+
+ // Otherwise, indent the appropriate number of spaces.
+ for (; ColNo > 1; --ColNo)
+ *OS << ' ';
+ }
+ } else {
+ // Insert whitespace between the previous and next token if either
+ // - The caller requires it
+ // - The input had whitespace between them and we are not in
+ // whitespace-minimization mode
+ // - The whitespace is necessary to keep the tokens apart and there is not
+ // already a newline between them
+ if (RequireSpace || (!MinimizeWhitespace && Tok.hasLeadingSpace()) ||
+ ((EmittedTokensOnThisLine || EmittedDirectiveOnThisLine) &&
+ AvoidConcat(PrevPrevTok, PrevTok, Tok)))
+ *OS << ' ';
+ }
+
+ PrevPrevTok = PrevTok;
+ PrevTok = Tok;
}
void PrintPPOutputPPCallbacks::HandleNewlinesInToken(const char *TokStr,
@@ -668,9 +770,9 @@ struct UnknownPragmaHandler : public PragmaHandler {
Token &PragmaTok) override {
// Figure out what line we went to and insert the appropriate number of
// newline characters.
- Callbacks->startNewLineIfNeeded();
- Callbacks->MoveToLine(PragmaTok.getLocation());
- Callbacks->OS.write(Prefix, strlen(Prefix));
+ Callbacks->MoveToLine(PragmaTok.getLocation(), /*RequireStartOfLine=*/true);
+ Callbacks->OS->write(Prefix, strlen(Prefix));
+ Callbacks->setEmittedTokensOnThisLine();
if (ShouldExpandTokens) {
// The first token does not have expanded macros. Expand them, if
@@ -682,21 +784,16 @@ struct UnknownPragmaHandler : public PragmaHandler {
/*IsReinject=*/false);
PP.Lex(PragmaTok);
}
- Token PrevToken;
- Token PrevPrevToken;
- PrevToken.startToken();
- PrevPrevToken.startToken();
// Read and print all of the pragma tokens.
+ bool IsFirst = true;
while (PragmaTok.isNot(tok::eod)) {
- if (PragmaTok.hasLeadingSpace() ||
- Callbacks->AvoidConcat(PrevPrevToken, PrevToken, PragmaTok))
- Callbacks->OS << ' ';
+ Callbacks->HandleWhitespaceBeforeTok(PragmaTok, /*RequireSpace=*/IsFirst,
+ /*RequireSameLine=*/true);
+ IsFirst = false;
std::string TokSpell = PP.getSpelling(PragmaTok);
- Callbacks->OS.write(&TokSpell[0], TokSpell.size());
-
- PrevPrevToken = PrevToken;
- PrevToken = PragmaTok;
+ Callbacks->OS->write(&TokSpell[0], TokSpell.size());
+ Callbacks->setEmittedTokensOnThisLine();
if (ShouldExpandTokens)
PP.Lex(PragmaTok);
@@ -710,49 +807,48 @@ struct UnknownPragmaHandler : public PragmaHandler {
static void PrintPreprocessedTokens(Preprocessor &PP, Token &Tok,
- PrintPPOutputPPCallbacks *Callbacks,
- raw_ostream &OS) {
+ PrintPPOutputPPCallbacks *Callbacks) {
bool DropComments = PP.getLangOpts().TraditionalCPP &&
!PP.getCommentRetentionState();
+ bool IsStartOfLine = false;
char Buffer[256];
- Token PrevPrevTok, PrevTok;
- PrevPrevTok.startToken();
- PrevTok.startToken();
- while (1) {
- if (Callbacks->hasEmittedDirectiveOnThisLine()) {
- Callbacks->startNewLineIfNeeded();
- Callbacks->MoveToLine(Tok.getLocation());
- }
-
- // If this token is at the start of a line, emit newlines if needed.
- if (Tok.isAtStartOfLine() && Callbacks->HandleFirstTokOnLine(Tok)) {
- // done.
- } else if (Tok.hasLeadingSpace() ||
- // If we haven't emitted a token on this line yet, PrevTok isn't
- // useful to look at and no concatenation could happen anyway.
- (Callbacks->hasEmittedTokensOnThisLine() &&
- // Don't print "-" next to "-", it would form "--".
- Callbacks->AvoidConcat(PrevPrevTok, PrevTok, Tok))) {
- OS << ' ';
- }
+ while (true) {
+ // Two lines joined with line continuation ('\' as last character on the
+ // line) must be emitted as one line even though Tok.getLine() returns two
+ // different values. In this situation Tok.isAtStartOfLine() is false even
+ // though it may be the first token on the lexical line. When
+ // dropping/skipping a token that is at the start of a line, propagate the
+ // start-of-line-ness to the next token to not append it to the previous
+ // line.
+ IsStartOfLine = IsStartOfLine || Tok.isAtStartOfLine();
+
+ Callbacks->HandleWhitespaceBeforeTok(Tok, /*RequireSpace=*/false,
+ /*RequireSameLine=*/!IsStartOfLine);
if (DropComments && Tok.is(tok::comment)) {
// Skip comments. Normally the preprocessor does not generate
// tok::comment nodes at all when not keeping comments, but under
// -traditional-cpp the lexer keeps /all/ whitespace, including comments.
- SourceLocation StartLoc = Tok.getLocation();
- Callbacks->MoveToLine(StartLoc.getLocWithOffset(Tok.getLength()));
+ PP.Lex(Tok);
+ continue;
+ } else if (Tok.is(tok::annot_repl_input_end)) {
+ PP.Lex(Tok);
+ continue;
} else if (Tok.is(tok::eod)) {
// Don't print end of directive tokens, since they are typically newlines
// that mess up our line tracking. These come from unknown pre-processor
// directives or hash-prefixed comments in standalone assembly files.
PP.Lex(Tok);
+ // FIXME: The token on the next line after #include should have
+ // Tok.isAtStartOfLine() set.
+ IsStartOfLine = true;
continue;
} else if (Tok.is(tok::annot_module_include)) {
// PrintPPOutputPPCallbacks::InclusionDirective handles producing
// appropriate output here. Ignore this token entirely.
PP.Lex(Tok);
+ IsStartOfLine = true;
continue;
} else if (Tok.is(tok::annot_module_begin)) {
// FIXME: We retrieve this token after the FileChanged callback, and
@@ -764,11 +860,13 @@ static void PrintPreprocessedTokens(Preprocessor &PP, Token &Tok,
Callbacks->BeginModule(
reinterpret_cast<Module *>(Tok.getAnnotationValue()));
PP.Lex(Tok);
+ IsStartOfLine = true;
continue;
} else if (Tok.is(tok::annot_module_end)) {
Callbacks->EndModule(
reinterpret_cast<Module *>(Tok.getAnnotationValue()));
PP.Lex(Tok);
+ IsStartOfLine = true;
continue;
} else if (Tok.is(tok::annot_header_unit)) {
// This is a header-name that has been (effectively) converted into a
@@ -777,7 +875,7 @@ static void PrintPreprocessedTokens(Preprocessor &PP, Token &Tok,
// components. We don't have a good way to round-trip those.
Module *M = reinterpret_cast<Module *>(Tok.getAnnotationValue());
std::string Name = M->getFullModuleName();
- OS.write(Name.data(), Name.size());
+ Callbacks->OS->write(Name.data(), Name.size());
Callbacks->HandleNewlinesInToken(Name.data(), Name.size());
} else if (Tok.isAnnotation()) {
// Ignore annotation tokens created by pragmas - the pragmas themselves
@@ -785,34 +883,47 @@ static void PrintPreprocessedTokens(Preprocessor &PP, Token &Tok,
PP.Lex(Tok);
continue;
} else if (IdentifierInfo *II = Tok.getIdentifierInfo()) {
- OS << II->getName();
+ *Callbacks->OS << II->getName();
} else if (Tok.isLiteral() && !Tok.needsCleaning() &&
Tok.getLiteralData()) {
- OS.write(Tok.getLiteralData(), Tok.getLength());
- } else if (Tok.getLength() < llvm::array_lengthof(Buffer)) {
+ Callbacks->OS->write(Tok.getLiteralData(), Tok.getLength());
+ } else if (Tok.getLength() < std::size(Buffer)) {
const char *TokPtr = Buffer;
unsigned Len = PP.getSpelling(Tok, TokPtr);
- OS.write(TokPtr, Len);
+ Callbacks->OS->write(TokPtr, Len);
// Tokens that can contain embedded newlines need to adjust our current
// line number.
+ // FIXME: The token may end with a newline in which case
+ // setEmittedDirectiveOnThisLine/setEmittedTokensOnThisLine afterwards is
+ // wrong.
if (Tok.getKind() == tok::comment || Tok.getKind() == tok::unknown)
Callbacks->HandleNewlinesInToken(TokPtr, Len);
+ if (Tok.is(tok::comment) && Len >= 2 && TokPtr[0] == '/' &&
+ TokPtr[1] == '/') {
+ // It's a line comment;
+ // Ensure that we don't concatenate anything behind it.
+ Callbacks->setEmittedDirectiveOnThisLine();
+ }
} else {
std::string S = PP.getSpelling(Tok);
- OS.write(S.data(), S.size());
+ Callbacks->OS->write(S.data(), S.size());
// Tokens that can contain embedded newlines need to adjust our current
// line number.
if (Tok.getKind() == tok::comment || Tok.getKind() == tok::unknown)
Callbacks->HandleNewlinesInToken(S.data(), S.size());
+ if (Tok.is(tok::comment) && S.size() >= 2 && S[0] == '/' && S[1] == '/') {
+ // It's a line comment;
+ // Ensure that we don't concatenate anything behind it.
+ Callbacks->setEmittedDirectiveOnThisLine();
+ }
}
Callbacks->setEmittedTokensOnThisLine();
+ IsStartOfLine = false;
if (Tok.is(tok::eof)) break;
- PrevPrevTok = PrevTok;
- PrevTok = Tok;
PP.Lex(Tok);
}
}
@@ -848,7 +959,7 @@ static void DoPrintMacros(Preprocessor &PP, raw_ostream *OS) {
// Ignore computed macros like __LINE__ and friends.
if (MI.isBuiltinMacro()) continue;
- PrintMacroDefinition(*MacrosByID[i].first, MI, PP, *OS);
+ PrintMacroDefinition(*MacrosByID[i].first, MI, PP, OS);
*OS << '\n';
}
}
@@ -869,8 +980,9 @@ void clang::DoPrintPreprocessedInput(Preprocessor &PP, raw_ostream *OS,
PP.SetCommentRetentionState(Opts.ShowComments, Opts.ShowMacroComments);
PrintPPOutputPPCallbacks *Callbacks = new PrintPPOutputPPCallbacks(
- PP, *OS, !Opts.ShowLineMarkers, Opts.ShowMacros,
- Opts.ShowIncludeDirectives, Opts.UseLineDirectives);
+ PP, OS, !Opts.ShowLineMarkers, Opts.ShowMacros,
+ Opts.ShowIncludeDirectives, Opts.UseLineDirectives,
+ Opts.MinimizeWhitespace, Opts.DirectivesOnly, Opts.KeepSystemIncludes);
// Expand macros in pragmas with -fms-extensions. The assumption is that
// the majority of pragmas in such a file will be Microsoft pragmas.
@@ -906,6 +1018,8 @@ void clang::DoPrintPreprocessedInput(Preprocessor &PP, raw_ostream *OS,
// After we have configured the preprocessor, enter the main file.
PP.EnterMainSourceFile();
+ if (Opts.DirectivesOnly)
+ PP.SetMacroExpansionOnlyInDirectives();
// Consume all of the tokens that come from the predefines buffer. Those
// should not be emitted into the output and are guaranteed to be at the
@@ -926,7 +1040,7 @@ void clang::DoPrintPreprocessedInput(Preprocessor &PP, raw_ostream *OS,
} while (true);
// Read all the preprocessed tokens, printing them out to the stream.
- PrintPreprocessedTokens(PP, Tok, Callbacks, *OS);
+ PrintPreprocessedTokens(PP, Tok, Callbacks);
*OS << '\n';
// Remove the handlers we just added to leave the preprocessor in a sane state
diff --git a/contrib/llvm-project/clang/lib/Frontend/Rewrite/FixItRewriter.cpp b/contrib/llvm-project/clang/lib/Frontend/Rewrite/FixItRewriter.cpp
index 4fe64b96cb15..567bac576adb 100644
--- a/contrib/llvm-project/clang/lib/Frontend/Rewrite/FixItRewriter.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/Rewrite/FixItRewriter.cpp
@@ -93,7 +93,8 @@ bool FixItRewriter::WriteFixedFiles(
}
for (iterator I = buffer_begin(), E = buffer_end(); I != E; ++I) {
- const FileEntry *Entry = Rewrite.getSourceMgr().getFileEntryForID(I->first);
+ OptionalFileEntryRef Entry =
+ Rewrite.getSourceMgr().getFileEntryRefForID(I->first);
int fd;
std::string Filename =
FixItOpts->RewriteFilename(std::string(Entry->getName()), fd);
diff --git a/contrib/llvm-project/clang/lib/Frontend/Rewrite/FrontendActions.cpp b/contrib/llvm-project/clang/lib/Frontend/Rewrite/FrontendActions.cpp
index 09ed07be923e..cf5a9437e89e 100644
--- a/contrib/llvm-project/clang/lib/Frontend/Rewrite/FrontendActions.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/Rewrite/FrontendActions.cpp
@@ -77,7 +77,7 @@ public:
SmallString<128> Path(Filename);
llvm::sys::path::replace_extension(Path,
NewSuffix + llvm::sys::path::extension(Path));
- return std::string(Path.str());
+ return std::string(Path);
}
};
@@ -88,7 +88,7 @@ public:
llvm::sys::fs::createTemporaryFile(llvm::sys::path::filename(Filename),
llvm::sys::path::extension(Filename).drop_front(), fd,
Path);
- return std::string(Path.str());
+ return std::string(Path);
}
};
} // end anonymous namespace
@@ -165,10 +165,11 @@ RewriteObjCAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
if (std::unique_ptr<raw_ostream> OS =
CI.createDefaultOutputFile(false, InFile, "cpp")) {
if (CI.getLangOpts().ObjCRuntime.isNonFragile())
- return CreateModernObjCRewriter(
- std::string(InFile), std::move(OS), CI.getDiagnostics(),
- CI.getLangOpts(), CI.getDiagnosticOpts().NoRewriteMacros,
- (CI.getCodeGenOpts().getDebugInfo() != codegenoptions::NoDebugInfo));
+ return CreateModernObjCRewriter(std::string(InFile), std::move(OS),
+ CI.getDiagnostics(), CI.getLangOpts(),
+ CI.getDiagnosticOpts().NoRewriteMacros,
+ (CI.getCodeGenOpts().getDebugInfo() !=
+ llvm::codegenoptions::NoDebugInfo));
return CreateObjCRewriter(std::string(InFile), std::move(OS),
CI.getDiagnostics(), CI.getLangOpts(),
CI.getDiagnosticOpts().NoRewriteMacros);
@@ -231,7 +232,7 @@ public:
assert(OS && "loaded module file after finishing rewrite action?");
(*OS) << "#pragma clang module build ";
- if (isValidIdentifier(MF->ModuleName))
+ if (isValidAsciiIdentifier(MF->ModuleName))
(*OS) << MF->ModuleName;
else {
(*OS) << '"';
diff --git a/contrib/llvm-project/clang/lib/Frontend/Rewrite/HTMLPrint.cpp b/contrib/llvm-project/clang/lib/Frontend/Rewrite/HTMLPrint.cpp
index 1388c2e1faab..69baa8f59108 100644
--- a/contrib/llvm-project/clang/lib/Frontend/Rewrite/HTMLPrint.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/Rewrite/HTMLPrint.cpp
@@ -62,7 +62,7 @@ void HTMLPrinter::HandleTranslationUnit(ASTContext &Ctx) {
// Format the file.
FileID FID = R.getSourceMgr().getMainFileID();
- const FileEntry* Entry = R.getSourceMgr().getFileEntryForID(FID);
+ OptionalFileEntryRef Entry = R.getSourceMgr().getFileEntryRefForID(FID);
StringRef Name;
// In some cases, in particular the case where the input is from stdin,
// there is no entry. Fall back to the memory buffer for a name in those
diff --git a/contrib/llvm-project/clang/lib/Frontend/Rewrite/InclusionRewriter.cpp b/contrib/llvm-project/clang/lib/Frontend/Rewrite/InclusionRewriter.cpp
index 3f2a78127477..b6b37461089e 100644
--- a/contrib/llvm-project/clang/lib/Frontend/Rewrite/InclusionRewriter.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/Rewrite/InclusionRewriter.cpp
@@ -14,11 +14,11 @@
#include "clang/Rewrite/Frontend/Rewriters.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Frontend/PreprocessorOutputOptions.h"
-#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/Pragma.h"
#include "clang/Lex/Preprocessor.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/raw_ostream.h"
+#include <optional>
using namespace clang;
using namespace llvm;
@@ -31,10 +31,8 @@ class InclusionRewriter : public PPCallbacks {
struct IncludedFile {
FileID Id;
SrcMgr::CharacteristicKind FileType;
- const DirectoryLookup *DirLookup;
- IncludedFile(FileID Id, SrcMgr::CharacteristicKind FileType,
- const DirectoryLookup *DirLookup)
- : Id(Id), FileType(FileType), DirLookup(DirLookup) {}
+ IncludedFile(FileID Id, SrcMgr::CharacteristicKind FileType)
+ : Id(Id), FileType(FileType) {}
};
Preprocessor &PP; ///< Used to find inclusion directives.
SourceManager &SM; ///< Used to read and manage source files.
@@ -57,8 +55,7 @@ class InclusionRewriter : public PPCallbacks {
public:
InclusionRewriter(Preprocessor &PP, raw_ostream &OS, bool ShowLineMarkers,
bool UseLineDirectives);
- void Process(FileID FileId, SrcMgr::CharacteristicKind FileType,
- const DirectoryLookup *DirLookup);
+ void Process(FileID FileId, SrcMgr::CharacteristicKind FileType);
void setPredefinesBuffer(const llvm::MemoryBufferRef &Buf) {
PredefinesBuffer = Buf;
}
@@ -76,9 +73,9 @@ private:
SrcMgr::CharacteristicKind FileType) override;
void InclusionDirective(SourceLocation HashLoc, const Token &IncludeTok,
StringRef FileName, bool IsAngled,
- CharSourceRange FilenameRange, const FileEntry *File,
- StringRef SearchPath, StringRef RelativePath,
- const Module *Imported,
+ CharSourceRange FilenameRange,
+ OptionalFileEntryRef File, StringRef SearchPath,
+ StringRef RelativePath, const Module *Imported,
SrcMgr::CharacteristicKind FileType) override;
void If(SourceLocation Loc, SourceRange ConditionRange,
ConditionValueKind ConditionValue) override;
@@ -93,8 +90,10 @@ private:
bool EnsureNewline);
void CommentOutDirective(Lexer &DirectivesLex, const Token &StartToken,
const MemoryBufferRef &FromFile, StringRef EOL,
- unsigned &NextToWrite, int &Lines);
+ unsigned &NextToWrite, int &Lines,
+ const IncludedFile *Inc = nullptr);
const IncludedFile *FindIncludeAtLocation(SourceLocation Loc) const;
+ StringRef getIncludedFileName(const IncludedFile *Inc) const;
const Module *FindModuleAtLocation(SourceLocation Loc) const;
const Module *FindEnteredModule(SourceLocation Loc) const;
bool IsIfAtLocationTrue(SourceLocation Loc) const;
@@ -162,8 +161,7 @@ void InclusionRewriter::FileChanged(SourceLocation Loc,
return;
FileID Id = FullSourceLoc(Loc, SM).getFileID();
auto P = FileIncludes.insert(
- std::make_pair(LastInclusionLocation,
- IncludedFile(Id, NewFileType, PP.GetCurDirLookup())));
+ std::make_pair(LastInclusionLocation, IncludedFile(Id, NewFileType)));
(void)P;
assert(P.second && "Unexpected revisitation of the same include directive");
LastInclusionLocation = SourceLocation();
@@ -186,16 +184,12 @@ void InclusionRewriter::FileSkipped(const FileEntryRef & /*SkippedFile*/,
/// FileChanged() or FileSkipped() is called after this (or neither is
/// called if this #include results in an error or does not textually include
/// anything).
-void InclusionRewriter::InclusionDirective(SourceLocation HashLoc,
- const Token &/*IncludeTok*/,
- StringRef /*FileName*/,
- bool /*IsAngled*/,
- CharSourceRange /*FilenameRange*/,
- const FileEntry * /*File*/,
- StringRef /*SearchPath*/,
- StringRef /*RelativePath*/,
- const Module *Imported,
- SrcMgr::CharacteristicKind FileType){
+void InclusionRewriter::InclusionDirective(
+ SourceLocation HashLoc, const Token & /*IncludeTok*/,
+ StringRef /*FileName*/, bool /*IsAngled*/,
+ CharSourceRange /*FilenameRange*/, OptionalFileEntryRef /*File*/,
+ StringRef /*SearchPath*/, StringRef /*RelativePath*/,
+ const Module *Imported, SrcMgr::CharacteristicKind FileType) {
if (Imported) {
auto P = ModuleIncludes.insert(std::make_pair(HashLoc, Imported));
(void)P;
@@ -256,28 +250,13 @@ bool InclusionRewriter::IsIfAtLocationTrue(SourceLocation Loc) const {
return false;
}
-/// Detect the likely line ending style of \p FromFile by examining the first
-/// newline found within it.
-static StringRef DetectEOL(const MemoryBufferRef &FromFile) {
- // Detect what line endings the file uses, so that added content does not mix
- // the style. We need to check for "\r\n" first because "\n\r" will match
- // "\r\n\r\n".
- const char *Pos = strchr(FromFile.getBufferStart(), '\n');
- if (!Pos)
- return "\n";
- if (Pos - 1 >= FromFile.getBufferStart() && Pos[-1] == '\r')
- return "\r\n";
- if (Pos + 1 < FromFile.getBufferEnd() && Pos[1] == '\r')
- return "\n\r";
- return "\n";
-}
-
void InclusionRewriter::detectMainFileEOL() {
- Optional<MemoryBufferRef> FromFile = *SM.getBufferOrNone(SM.getMainFileID());
+ std::optional<MemoryBufferRef> FromFile =
+ *SM.getBufferOrNone(SM.getMainFileID());
assert(FromFile);
if (!FromFile)
return; // Should never happen, but whatever.
- MainEOL = DetectEOL(*FromFile);
+ MainEOL = FromFile->getBuffer().detectEOL();
}
/// Writes out bytes from \p FromFile, starting at \p NextToWrite and ending at
@@ -304,30 +283,47 @@ void InclusionRewriter::OutputContentUpTo(const MemoryBufferRef &FromFile,
StringRef TextToWrite(FromFile.getBufferStart() + WriteFrom,
WriteTo - WriteFrom);
+ // count lines manually, it's faster than getPresumedLoc()
+ Line += TextToWrite.count(LocalEOL);
if (MainEOL == LocalEOL) {
OS << TextToWrite;
- // count lines manually, it's faster than getPresumedLoc()
- Line += TextToWrite.count(LocalEOL);
- if (EnsureNewline && !TextToWrite.endswith(LocalEOL))
- OS << MainEOL;
} else {
// Output the file one line at a time, rewriting the line endings as we go.
StringRef Rest = TextToWrite;
while (!Rest.empty()) {
- StringRef LineText;
- std::tie(LineText, Rest) = Rest.split(LocalEOL);
+ // Identify and output the next line excluding an EOL sequence if present.
+ size_t Idx = Rest.find(LocalEOL);
+ StringRef LineText = Rest.substr(0, Idx);
OS << LineText;
- Line++;
- if (!Rest.empty())
+ if (Idx != StringRef::npos) {
+ // An EOL sequence was present, output the EOL sequence for the
+ // main source file and skip past the local EOL sequence.
OS << MainEOL;
+ Idx += LocalEOL.size();
+ }
+ // Strip the line just handled. If Idx is npos or matches the end of the
+ // text, Rest will be set to an empty string and the loop will terminate.
+ Rest = Rest.substr(Idx);
}
- if (TextToWrite.endswith(LocalEOL) || EnsureNewline)
- OS << MainEOL;
}
+ if (EnsureNewline && !TextToWrite.ends_with(LocalEOL))
+ OS << MainEOL;
+
WriteFrom = WriteTo;
}
+StringRef
+InclusionRewriter::getIncludedFileName(const IncludedFile *Inc) const {
+ if (Inc) {
+ auto B = SM.getBufferOrNone(Inc->Id);
+ assert(B && "Attempting to process invalid inclusion");
+ if (B)
+ return llvm::sys::path::filename(B->getBufferIdentifier());
+ }
+ return StringRef();
+}
+
/// Print characters from \p FromFile starting at \p NextToWrite up until the
/// inclusion directive at \p StartToken, then print out the inclusion
/// inclusion directive disabled by a #if directive, updating \p NextToWrite
@@ -337,7 +333,8 @@ void InclusionRewriter::CommentOutDirective(Lexer &DirectiveLex,
const Token &StartToken,
const MemoryBufferRef &FromFile,
StringRef LocalEOL,
- unsigned &NextToWrite, int &Line) {
+ unsigned &NextToWrite, int &Line,
+ const IncludedFile *Inc) {
OutputContentUpTo(FromFile, NextToWrite,
SM.getFileOffset(StartToken.getLocation()), LocalEOL, Line,
false);
@@ -349,12 +346,21 @@ void InclusionRewriter::CommentOutDirective(Lexer &DirectiveLex,
// OutputContentUpTo() would not output anything anyway.
return;
}
- OS << "#if 0 /* expanded by -frewrite-includes */" << MainEOL;
+ if (Inc) {
+ OS << "#if defined(__CLANG_REWRITTEN_INCLUDES) ";
+ if (isSystem(Inc->FileType))
+ OS << "|| defined(__CLANG_REWRITTEN_SYSTEM_INCLUDES) ";
+ OS << "/* " << getIncludedFileName(Inc);
+ } else {
+ OS << "#if 0 /*";
+ }
+ OS << " expanded by -frewrite-includes */" << MainEOL;
OutputContentUpTo(FromFile, NextToWrite,
SM.getFileOffset(DirectiveToken.getLocation()) +
DirectiveToken.getLength(),
LocalEOL, Line, true);
- OS << "#endif /* expanded by -frewrite-includes */" << MainEOL;
+ OS << (Inc ? "#else /* " : "#endif /*") << getIncludedFileName(Inc)
+ << " expanded by -frewrite-includes */" << MainEOL;
}
/// Find the next identifier in the pragma directive specified by \p RawToken.
@@ -371,8 +377,7 @@ StringRef InclusionRewriter::NextIdentifierName(Lexer &RawLex,
/// Use a raw lexer to analyze \p FileId, incrementally copying parts of it
/// and including content of included files recursively.
void InclusionRewriter::Process(FileID FileId,
- SrcMgr::CharacteristicKind FileType,
- const DirectoryLookup *DirLookup) {
+ SrcMgr::CharacteristicKind FileType) {
MemoryBufferRef FromFile;
{
auto B = SM.getBufferOrNone(FileId);
@@ -384,7 +389,7 @@ void InclusionRewriter::Process(FileID FileId,
Lexer RawLex(FileId, FromFile, PP.getSourceManager(), PP.getLangOpts());
RawLex.SetCommentRetentionState(false);
- StringRef LocalEOL = DetectEOL(FromFile);
+ StringRef LocalEOL = FromFile.getBuffer().detectEOL();
// Per the GNU docs: "1" indicates entering a new file.
if (FileId == SM.getMainFileID() || FileId == PP.getPredefinesFileID())
@@ -418,26 +423,32 @@ void InclusionRewriter::Process(FileID FileId,
case tok::pp_include:
case tok::pp_include_next:
case tok::pp_import: {
- CommentOutDirective(RawLex, HashToken, FromFile, LocalEOL, NextToWrite,
- Line);
+ SourceLocation Loc = HashToken.getLocation();
+ const IncludedFile *Inc = FindIncludeAtLocation(Loc);
+ CommentOutDirective(RawLex, HashToken, FromFile, LocalEOL,
+ NextToWrite, Line, Inc);
if (FileId != PP.getPredefinesFileID())
WriteLineInfo(FileName, Line - 1, FileType, "");
StringRef LineInfoExtra;
- SourceLocation Loc = HashToken.getLocation();
if (const Module *Mod = FindModuleAtLocation(Loc))
WriteImplicitModuleImport(Mod);
- else if (const IncludedFile *Inc = FindIncludeAtLocation(Loc)) {
+ else if (Inc) {
const Module *Mod = FindEnteredModule(Loc);
if (Mod)
OS << "#pragma clang module begin "
<< Mod->getFullModuleName(true) << "\n";
// Include and recursively process the file.
- Process(Inc->Id, Inc->FileType, Inc->DirLookup);
+ Process(Inc->Id, Inc->FileType);
if (Mod)
OS << "#pragma clang module end /*"
<< Mod->getFullModuleName(true) << "*/\n";
+ // There's no #include, therefore no #if, for -include files.
+ if (FromFile != PredefinesBuffer) {
+ OS << "#endif /* " << getIncludedFileName(Inc)
+ << " expanded by -frewrite-includes */" << LocalEOL;
+ }
// Add line marker to indicate we're returning from an included
// file.
@@ -559,7 +570,7 @@ void clang::RewriteIncludesInInput(Preprocessor &PP, raw_ostream *OS,
Rewrite->handleModuleBegin(Tok);
} while (Tok.isNot(tok::eof));
Rewrite->setPredefinesBuffer(SM.getBufferOrFake(PP.getPredefinesFileID()));
- Rewrite->Process(PP.getPredefinesFileID(), SrcMgr::C_User, nullptr);
- Rewrite->Process(SM.getMainFileID(), SrcMgr::C_User, nullptr);
+ Rewrite->Process(PP.getPredefinesFileID(), SrcMgr::C_User);
+ Rewrite->Process(SM.getMainFileID(), SrcMgr::C_User);
OS->flush();
}
diff --git a/contrib/llvm-project/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp b/contrib/llvm-project/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp
index fd54bcbf7c35..1f40db785981 100644
--- a/contrib/llvm-project/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp
@@ -24,6 +24,7 @@
#include "clang/Lex/Lexer.h"
#include "clang/Rewrite/Core/Rewriter.h"
#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/MemoryBuffer.h"
@@ -599,8 +600,8 @@ namespace {
StringLiteral *getStringLiteral(StringRef Str) {
QualType StrType = Context->getConstantArrayType(
Context->CharTy, llvm::APInt(32, Str.size() + 1), nullptr,
- ArrayType::Normal, 0);
- return StringLiteral::Create(*Context, Str, StringLiteral::Ascii,
+ ArraySizeModifier::Normal, 0);
+ return StringLiteral::Create(*Context, Str, StringLiteralKind::Ordinary,
/*Pascal=*/false, StrType, SourceLocation());
}
};
@@ -633,7 +634,7 @@ static bool IsHeaderFile(const std::string &Filename) {
return false;
}
- std::string Ext = std::string(Filename.begin()+DotPos+1, Filename.end());
+ std::string Ext = Filename.substr(DotPos + 1);
// C header: .h
// C++ header: .hh or .H;
return Ext == "h" || Ext == "hh" || Ext == "H";
@@ -852,7 +853,7 @@ RewriteModernObjC::getIvarAccessString(ObjCIvarDecl *D) {
if (D->isBitField())
IvarT = GetGroupRecordTypeForObjCIvarBitfield(D);
- if (!isa<TypedefType>(IvarT) && IvarT->isRecordType()) {
+ if (!IvarT->getAs<TypedefType>() && IvarT->isRecordType()) {
RecordDecl *RD = IvarT->castAs<RecordType>()->getDecl();
RD = RD->getDefinition();
if (RD && !RD->getDeclName().getAsIdentifierInfo()) {
@@ -863,9 +864,9 @@ RewriteModernObjC::getIvarAccessString(ObjCIvarDecl *D) {
CDecl = CatDecl->getClassInterface();
std::string RecName = std::string(CDecl->getName());
RecName += "_IMPL";
- RecordDecl *RD =
- RecordDecl::Create(*Context, TTK_Struct, TUDecl, SourceLocation(),
- SourceLocation(), &Context->Idents.get(RecName));
+ RecordDecl *RD = RecordDecl::Create(*Context, TagTypeKind::Struct, TUDecl,
+ SourceLocation(), SourceLocation(),
+ &Context->Idents.get(RecName));
QualType PtrStructIMPL = Context->getPointerType(Context->getTagDeclType(RD));
unsigned UnsignedIntSize =
static_cast<unsigned>(Context->getTypeSize(Context->UnsignedIntTy));
@@ -1957,15 +1958,15 @@ Stmt *RewriteModernObjC::RewriteObjCTryStmt(ObjCAtTryStmt *S) {
// @try -> try
ReplaceText(startLoc, 1, "");
- for (unsigned I = 0, N = S->getNumCatchStmts(); I != N; ++I) {
- ObjCAtCatchStmt *Catch = S->getCatchStmt(I);
+ for (ObjCAtCatchStmt *Catch : S->catch_stmts()) {
VarDecl *catchDecl = Catch->getCatchParamDecl();
startLoc = Catch->getBeginLoc();
bool AtRemoved = false;
if (catchDecl) {
QualType t = catchDecl->getType();
- if (const ObjCObjectPointerType *Ptr = t->getAs<ObjCObjectPointerType>()) {
+ if (const ObjCObjectPointerType *Ptr =
+ t->getAs<ObjCObjectPointerType>()) {
// Should be a pointer to a class.
ObjCInterfaceDecl *IDecl = Ptr->getObjectType()->getInterface();
if (IDecl) {
@@ -2977,9 +2978,9 @@ Stmt *RewriteModernObjC::RewriteObjCDictionaryLiteralExpr(ObjCDictionaryLiteral
// };
QualType RewriteModernObjC::getSuperStructType() {
if (!SuperStructDecl) {
- SuperStructDecl = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
- SourceLocation(), SourceLocation(),
- &Context->Idents.get("__rw_objc_super"));
+ SuperStructDecl = RecordDecl::Create(
+ *Context, TagTypeKind::Struct, TUDecl, SourceLocation(),
+ SourceLocation(), &Context->Idents.get("__rw_objc_super"));
QualType FieldTypes[2];
// struct objc_object *object;
@@ -3005,9 +3006,9 @@ QualType RewriteModernObjC::getSuperStructType() {
QualType RewriteModernObjC::getConstantStringStructType() {
if (!ConstantStringDecl) {
- ConstantStringDecl = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
- SourceLocation(), SourceLocation(),
- &Context->Idents.get("__NSConstantStringImpl"));
+ ConstantStringDecl = RecordDecl::Create(
+ *Context, TagTypeKind::Struct, TUDecl, SourceLocation(),
+ SourceLocation(), &Context->Idents.get("__NSConstantStringImpl"));
QualType FieldTypes[4];
// struct objc_object *receiver;
@@ -3628,7 +3629,7 @@ bool RewriteModernObjC::IsTagDefinedInsideClass(ObjCContainerDecl *IDecl,
/// It handles elaborated types, as well as enum types in the process.
bool RewriteModernObjC::RewriteObjCFieldDeclType(QualType &Type,
std::string &Result) {
- if (isa<TypedefType>(Type)) {
+ if (Type->getAs<TypedefType>()) {
Result += "\t";
return false;
}
@@ -3723,7 +3724,7 @@ void RewriteModernObjC::RewriteObjCFieldDecl(FieldDecl *fieldDecl,
void RewriteModernObjC::RewriteLocallyDefinedNamedAggregates(FieldDecl *fieldDecl,
std::string &Result) {
QualType Type = fieldDecl->getType();
- if (isa<TypedefType>(Type))
+ if (Type->getAs<TypedefType>())
return;
if (Type->isArrayType())
Type = Context->getBaseElementType(Type);
@@ -3781,10 +3782,9 @@ QualType RewriteModernObjC::SynthesizeBitfieldGroupStructType(
SmallVectorImpl<ObjCIvarDecl *> &IVars) {
std::string StructTagName;
ObjCIvarBitfieldGroupType(IV, StructTagName);
- RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct,
- Context->getTranslationUnitDecl(),
- SourceLocation(), SourceLocation(),
- &Context->Idents.get(StructTagName));
+ RecordDecl *RD = RecordDecl::Create(
+ *Context, TagTypeKind::Struct, Context->getTranslationUnitDecl(),
+ SourceLocation(), SourceLocation(), &Context->Idents.get(StructTagName));
for (unsigned i=0, e = IVars.size(); i < e; i++) {
ObjCIvarDecl *Ivar = IVars[i];
RD->addDecl(FieldDecl::Create(*Context, RD, SourceLocation(), SourceLocation(),
@@ -4587,7 +4587,7 @@ Stmt *RewriteModernObjC::SynthesizeBlockCall(CallExpr *Exp, const Expr *BlockExp
const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(FT);
// FTP will be null for closures that don't take arguments.
- RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
+ RecordDecl *RD = RecordDecl::Create(*Context, TagTypeKind::Struct, TUDecl,
SourceLocation(), SourceLocation(),
&Context->Idents.get("__block_impl"));
QualType PtrBlock = Context->getPointerType(Context->getTagDeclType(RD));
@@ -5346,9 +5346,9 @@ Stmt *RewriteModernObjC::SynthBlockInitExpr(BlockExpr *Exp,
RewriteByRefString(RecName, Name, ND, true);
IdentifierInfo *II = &Context->Idents.get(RecName.c_str()
+ sizeof("struct"));
- RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
- SourceLocation(), SourceLocation(),
- II);
+ RecordDecl *RD =
+ RecordDecl::Create(*Context, TagTypeKind::Struct, TUDecl,
+ SourceLocation(), SourceLocation(), II);
assert(RD && "SynthBlockInitExpr(): Can't find RecordDecl");
QualType castT = Context->getPointerType(Context->getTagDeclType(RD));
@@ -5356,16 +5356,15 @@ Stmt *RewriteModernObjC::SynthBlockInitExpr(BlockExpr *Exp,
Exp = new (Context) DeclRefExpr(*Context, FD, false, FD->getType(),
VK_LValue, SourceLocation());
bool isNestedCapturedVar = false;
- if (block)
- for (const auto &CI : block->captures()) {
- const VarDecl *variable = CI.getVariable();
- if (variable == ND && CI.isNested()) {
- assert (CI.isByRef() &&
- "SynthBlockInitExpr - captured block variable is not byref");
- isNestedCapturedVar = true;
- break;
- }
+ for (const auto &CI : block->captures()) {
+ const VarDecl *variable = CI.getVariable();
+ if (variable == ND && CI.isNested()) {
+ assert(CI.isByRef() &&
+ "SynthBlockInitExpr - captured block variable is not byref");
+ isNestedCapturedVar = true;
+ break;
}
+ }
// captured nested byref variable has its address passed. Do not take
// its address again.
if (!isNestedCapturedVar)
@@ -6723,7 +6722,7 @@ static void Write_IvarOffsetVar(RewriteModernObjC &RewriteObj,
std::string &Result,
ArrayRef<ObjCIvarDecl *> Ivars,
ObjCInterfaceDecl *CDecl) {
- // FIXME. visibilty of offset symbols may have to be set; for Darwin
+ // FIXME. visibility of offset symbols may have to be set; for Darwin
// this is what happens:
/**
if (Ivar->getAccessControl() == ObjCIvarDecl::Private ||
@@ -6855,7 +6854,7 @@ void RewriteModernObjC::RewriteObjCProtocolMetaData(ObjCProtocolDecl *PDecl,
std::vector<ObjCMethodDecl *> InstanceMethods, ClassMethods;
std::vector<ObjCMethodDecl *> OptInstanceMethods, OptClassMethods;
for (auto *MD : PDecl->instance_methods()) {
- if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
+ if (MD->getImplementationControl() == ObjCImplementationControl::Optional) {
OptInstanceMethods.push_back(MD);
} else {
InstanceMethods.push_back(MD);
@@ -6863,7 +6862,7 @@ void RewriteModernObjC::RewriteObjCProtocolMetaData(ObjCProtocolDecl *PDecl,
}
for (auto *MD : PDecl->class_methods()) {
- if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
+ if (MD->getImplementationControl() == ObjCImplementationControl::Optional) {
OptClassMethods.push_back(MD);
} else {
ClassMethods.push_back(MD);
@@ -7496,7 +7495,7 @@ Stmt *RewriteModernObjC::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) {
if (D->isBitField())
IvarT = GetGroupRecordTypeForObjCIvarBitfield(D);
- if (!isa<TypedefType>(IvarT) && IvarT->isRecordType()) {
+ if (!IvarT->getAs<TypedefType>() && IvarT->isRecordType()) {
RecordDecl *RD = IvarT->castAs<RecordType>()->getDecl();
RD = RD->getDefinition();
if (RD && !RD->getDeclName().getAsIdentifierInfo()) {
@@ -7508,8 +7507,8 @@ Stmt *RewriteModernObjC::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) {
std::string RecName = std::string(CDecl->getName());
RecName += "_IMPL";
RecordDecl *RD = RecordDecl::Create(
- *Context, TTK_Struct, TUDecl, SourceLocation(), SourceLocation(),
- &Context->Idents.get(RecName));
+ *Context, TagTypeKind::Struct, TUDecl, SourceLocation(),
+ SourceLocation(), &Context->Idents.get(RecName));
QualType PtrStructIMPL = Context->getPointerType(Context->getTagDeclType(RD));
unsigned UnsignedIntSize =
static_cast<unsigned>(Context->getTypeSize(Context->UnsignedIntTy));
diff --git a/contrib/llvm-project/clang/lib/Frontend/Rewrite/RewriteObjC.cpp b/contrib/llvm-project/clang/lib/Frontend/Rewrite/RewriteObjC.cpp
index 0750d36b02ac..bf5176a2b6fb 100644
--- a/contrib/llvm-project/clang/lib/Frontend/Rewrite/RewriteObjC.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/Rewrite/RewriteObjC.cpp
@@ -499,8 +499,8 @@ namespace {
StringLiteral *getStringLiteral(StringRef Str) {
QualType StrType = Context->getConstantArrayType(
Context->CharTy, llvm::APInt(32, Str.size() + 1), nullptr,
- ArrayType::Normal, 0);
- return StringLiteral::Create(*Context, Str, StringLiteral::Ascii,
+ ArraySizeModifier::Normal, 0);
+ return StringLiteral::Create(*Context, Str, StringLiteralKind::Ordinary,
/*Pascal=*/false, StrType, SourceLocation());
}
};
@@ -569,7 +569,7 @@ static bool IsHeaderFile(const std::string &Filename) {
return false;
}
- std::string Ext = std::string(Filename.begin()+DotPos+1, Filename.end());
+ std::string Ext = Filename.substr(DotPos + 1);
// C header: .h
// C++ header: .hh or .H;
return Ext == "h" || Ext == "hh" || Ext == "H";
@@ -2357,7 +2357,7 @@ void RewriteObjC::SynthMsgSendFunctionDecl() {
void RewriteObjC::SynthMsgSendSuperFunctionDecl() {
IdentifierInfo *msgSendIdent = &Context->Idents.get("objc_msgSendSuper");
SmallVector<QualType, 16> ArgTys;
- RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
+ RecordDecl *RD = RecordDecl::Create(*Context, TagTypeKind::Struct, TUDecl,
SourceLocation(), SourceLocation(),
&Context->Idents.get("objc_super"));
QualType argT = Context->getPointerType(Context->getTagDeclType(RD));
@@ -2400,7 +2400,7 @@ void RewriteObjC::SynthMsgSendSuperStretFunctionDecl() {
IdentifierInfo *msgSendIdent =
&Context->Idents.get("objc_msgSendSuper_stret");
SmallVector<QualType, 16> ArgTys;
- RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
+ RecordDecl *RD = RecordDecl::Create(*Context, TagTypeKind::Struct, TUDecl,
SourceLocation(), SourceLocation(),
&Context->Idents.get("objc_super"));
QualType argT = Context->getPointerType(Context->getTagDeclType(RD));
@@ -2531,7 +2531,7 @@ Stmt *RewriteObjC::RewriteObjCStringLiteral(ObjCStringLiteral *Exp) {
// struct objc_super { struct objc_object *receiver; struct objc_class *super; };
QualType RewriteObjC::getSuperStructType() {
if (!SuperStructDecl) {
- SuperStructDecl = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
+ SuperStructDecl = RecordDecl::Create(*Context, TagTypeKind::Struct, TUDecl,
SourceLocation(), SourceLocation(),
&Context->Idents.get("objc_super"));
QualType FieldTypes[2];
@@ -2559,9 +2559,9 @@ QualType RewriteObjC::getSuperStructType() {
QualType RewriteObjC::getConstantStringStructType() {
if (!ConstantStringDecl) {
- ConstantStringDecl = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
- SourceLocation(), SourceLocation(),
- &Context->Idents.get("__NSConstantStringImpl"));
+ ConstantStringDecl = RecordDecl::Create(
+ *Context, TagTypeKind::Struct, TUDecl, SourceLocation(),
+ SourceLocation(), &Context->Idents.get("__NSConstantStringImpl"));
QualType FieldTypes[4];
// struct objc_object *receiver;
@@ -3755,7 +3755,7 @@ Stmt *RewriteObjC::SynthesizeBlockCall(CallExpr *Exp, const Expr *BlockExp) {
const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(FT);
// FTP will be null for closures that don't take arguments.
- RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
+ RecordDecl *RD = RecordDecl::Create(*Context, TagTypeKind::Struct, TUDecl,
SourceLocation(), SourceLocation(),
&Context->Idents.get("__block_impl"));
QualType PtrBlock = Context->getPointerType(Context->getTagDeclType(RD));
@@ -4483,9 +4483,9 @@ Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
RewriteByRefString(RecName, Name, ND, true);
IdentifierInfo *II = &Context->Idents.get(RecName.c_str()
+ sizeof("struct"));
- RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
- SourceLocation(), SourceLocation(),
- II);
+ RecordDecl *RD =
+ RecordDecl::Create(*Context, TagTypeKind::Struct, TUDecl,
+ SourceLocation(), SourceLocation(), II);
assert(RD && "SynthBlockInitExpr(): Can't find RecordDecl");
QualType castT = Context->getPointerType(Context->getTagDeclType(RD));
@@ -5821,9 +5821,9 @@ Stmt *RewriteObjCFragileABI::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) {
std::string(clsDeclared->getIdentifier()->getName());
RecName += "_IMPL";
IdentifierInfo *II = &Context->Idents.get(RecName);
- RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
- SourceLocation(), SourceLocation(),
- II);
+ RecordDecl *RD =
+ RecordDecl::Create(*Context, TagTypeKind::Struct, TUDecl,
+ SourceLocation(), SourceLocation(), II);
assert(RD && "RewriteObjCIvarRefExpr(): Can't find RecordDecl");
QualType castT = Context->getPointerType(Context->getTagDeclType(RD));
CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, castT,
@@ -5862,9 +5862,9 @@ Stmt *RewriteObjCFragileABI::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) {
std::string(clsDeclared->getIdentifier()->getName());
RecName += "_IMPL";
IdentifierInfo *II = &Context->Idents.get(RecName);
- RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
- SourceLocation(), SourceLocation(),
- II);
+ RecordDecl *RD =
+ RecordDecl::Create(*Context, TagTypeKind::Struct, TUDecl,
+ SourceLocation(), SourceLocation(), II);
assert(RD && "RewriteObjCIvarRefExpr(): Can't find RecordDecl");
QualType castT = Context->getPointerType(Context->getTagDeclType(RD));
CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, castT,
diff --git a/contrib/llvm-project/clang/lib/Frontend/SARIFDiagnostic.cpp b/contrib/llvm-project/clang/lib/Frontend/SARIFDiagnostic.cpp
new file mode 100644
index 000000000000..4e36153ed539
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Frontend/SARIFDiagnostic.cpp
@@ -0,0 +1,224 @@
+//===--------- SARIFDiagnostic.cpp - SARIF Diagnostic Formatting ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/SARIFDiagnostic.h"
+#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/DiagnosticOptions.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/Sarif.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Lex/Lexer.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ConvertUTF.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/ErrorOr.h"
+#include "llvm/Support/Locale.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <string>
+
+namespace clang {
+
+SARIFDiagnostic::SARIFDiagnostic(raw_ostream &OS, const LangOptions &LangOpts,
+ DiagnosticOptions *DiagOpts,
+ SarifDocumentWriter *Writer)
+ : DiagnosticRenderer(LangOpts, DiagOpts), Writer(Writer) {}
+
+// FIXME(llvm-project/issues/57323): Refactor Diagnostic classes.
+void SARIFDiagnostic::emitDiagnosticMessage(
+ FullSourceLoc Loc, PresumedLoc PLoc, DiagnosticsEngine::Level Level,
+ StringRef Message, ArrayRef<clang::CharSourceRange> Ranges,
+ DiagOrStoredDiag D) {
+
+ const auto *Diag = D.dyn_cast<const Diagnostic *>();
+
+ if (!Diag)
+ return;
+
+ SarifRule Rule = SarifRule::create().setRuleId(std::to_string(Diag->getID()));
+
+ Rule = addDiagnosticLevelToRule(Rule, Level);
+
+ unsigned RuleIdx = Writer->createRule(Rule);
+
+ SarifResult Result =
+ SarifResult::create(RuleIdx).setDiagnosticMessage(Message);
+
+ if (Loc.isValid())
+ Result = addLocationToResult(Result, Loc, PLoc, Ranges, *Diag);
+
+ Writer->appendResult(Result);
+}
+
+SarifResult SARIFDiagnostic::addLocationToResult(
+ SarifResult Result, FullSourceLoc Loc, PresumedLoc PLoc,
+ ArrayRef<CharSourceRange> Ranges, const Diagnostic &Diag) {
+ SmallVector<CharSourceRange> Locations = {};
+
+ if (PLoc.isInvalid()) {
+ // At least add the file name if available:
+ FileID FID = Loc.getFileID();
+ if (FID.isValid()) {
+ if (OptionalFileEntryRef FE = Loc.getFileEntryRef()) {
+ emitFilename(FE->getName(), Loc.getManager());
+ // FIXME(llvm-project/issues/57366): File-only locations
+ }
+ }
+ return Result;
+ }
+
+ FileID CaretFileID = Loc.getExpansionLoc().getFileID();
+
+ for (const CharSourceRange Range : Ranges) {
+ // Ignore invalid ranges.
+ if (Range.isInvalid())
+ continue;
+
+ auto &SM = Loc.getManager();
+ SourceLocation B = SM.getExpansionLoc(Range.getBegin());
+ CharSourceRange ERange = SM.getExpansionRange(Range.getEnd());
+ SourceLocation E = ERange.getEnd();
+ bool IsTokenRange = ERange.isTokenRange();
+
+ std::pair<FileID, unsigned> BInfo = SM.getDecomposedLoc(B);
+ std::pair<FileID, unsigned> EInfo = SM.getDecomposedLoc(E);
+
+ // If the start or end of the range is in another file, just discard
+ // it.
+ if (BInfo.first != CaretFileID || EInfo.first != CaretFileID)
+ continue;
+
+ // Add in the length of the token, so that we cover multi-char
+ // tokens.
+ unsigned TokSize = 0;
+ if (IsTokenRange)
+ TokSize = Lexer::MeasureTokenLength(E, SM, LangOpts);
+
+ FullSourceLoc BF(B, SM), EF(E, SM);
+ SourceLocation BeginLoc = SM.translateLineCol(
+ BF.getFileID(), BF.getLineNumber(), BF.getColumnNumber());
+ SourceLocation EndLoc = SM.translateLineCol(
+ EF.getFileID(), EF.getLineNumber(), EF.getColumnNumber() + TokSize);
+
+ Locations.push_back(
+ CharSourceRange{SourceRange{BeginLoc, EndLoc}, /* ITR = */ false});
+ // FIXME: Additional ranges should use presumed location in both
+ // Text and SARIF diagnostics.
+ }
+
+ auto &SM = Loc.getManager();
+ auto FID = PLoc.getFileID();
+ // Visual Studio 2010 or earlier expects column number to be off by one.
+ unsigned int ColNo = (LangOpts.MSCompatibilityVersion &&
+ !LangOpts.isCompatibleWithMSVC(LangOptions::MSVC2012))
+ ? PLoc.getColumn() - 1
+ : PLoc.getColumn();
+ SourceLocation DiagLoc = SM.translateLineCol(FID, PLoc.getLine(), ColNo);
+
+ // FIXME(llvm-project/issues/57366): Properly process #line directives.
+ Locations.push_back(
+ CharSourceRange{SourceRange{DiagLoc, DiagLoc}, /* ITR = */ false});
+
+ return Result.setLocations(Locations);
+}
+
+SarifRule
+SARIFDiagnostic::addDiagnosticLevelToRule(SarifRule Rule,
+ DiagnosticsEngine::Level Level) {
+ auto Config = SarifReportingConfiguration::create();
+
+ switch (Level) {
+ case DiagnosticsEngine::Note:
+ Config = Config.setLevel(SarifResultLevel::Note);
+ break;
+ case DiagnosticsEngine::Remark:
+ Config = Config.setLevel(SarifResultLevel::None);
+ break;
+ case DiagnosticsEngine::Warning:
+ Config = Config.setLevel(SarifResultLevel::Warning);
+ break;
+ case DiagnosticsEngine::Error:
+ Config = Config.setLevel(SarifResultLevel::Error).setRank(50);
+ break;
+ case DiagnosticsEngine::Fatal:
+ Config = Config.setLevel(SarifResultLevel::Error).setRank(100);
+ break;
+ case DiagnosticsEngine::Ignored:
+ assert(false && "Invalid diagnostic type");
+ }
+
+ return Rule.setDefaultConfiguration(Config);
+}
+
+llvm::StringRef SARIFDiagnostic::emitFilename(StringRef Filename,
+ const SourceManager &SM) {
+ if (DiagOpts->AbsolutePath) {
+ auto File = SM.getFileManager().getOptionalFileRef(Filename);
+ if (File) {
+ // We want to print a simplified absolute path, i. e. without "dots".
+ //
+ // The hardest part here are the paths like "<part1>/<link>/../<part2>".
+ // On Unix-like systems, we cannot just collapse "<link>/..", because
+ // paths are resolved sequentially, and, thereby, the path
+ // "<part1>/<part2>" may point to a different location. That is why
+ // we use FileManager::getCanonicalName(), which expands all indirections
+ // with llvm::sys::fs::real_path() and caches the result.
+ //
+ // On the other hand, it would be better to preserve as much of the
+ // original path as possible, because that helps a user to recognize it.
+ // real_path() expands all links, which is sometimes too much. Luckily,
+ // on Windows we can just use llvm::sys::path::remove_dots(), because,
+ // on that system, both aforementioned paths point to the same place.
+#ifdef _WIN32
+ SmallString<256> TmpFilename = File->getName();
+ llvm::sys::fs::make_absolute(TmpFilename);
+ llvm::sys::path::native(TmpFilename);
+ llvm::sys::path::remove_dots(TmpFilename, /* remove_dot_dot */ true);
+ Filename = StringRef(TmpFilename.data(), TmpFilename.size());
+#else
+ Filename = SM.getFileManager().getCanonicalName(*File);
+#endif
+ }
+ }
+
+ return Filename;
+}
+
+/// Print out the file/line/column information and include trace.
+///
+/// This method handlen the emission of the diagnostic location information.
+/// This includes extracting as much location information as is present for
+/// the diagnostic and printing it, as well as any include stack or source
+/// ranges necessary.
+void SARIFDiagnostic::emitDiagnosticLoc(FullSourceLoc Loc, PresumedLoc PLoc,
+ DiagnosticsEngine::Level Level,
+ ArrayRef<CharSourceRange> Ranges) {
+ assert(false && "Not implemented in SARIF mode");
+}
+
+void SARIFDiagnostic::emitIncludeLocation(FullSourceLoc Loc, PresumedLoc PLoc) {
+ assert(false && "Not implemented in SARIF mode");
+}
+
+void SARIFDiagnostic::emitImportLocation(FullSourceLoc Loc, PresumedLoc PLoc,
+ StringRef ModuleName) {
+ assert(false && "Not implemented in SARIF mode");
+}
+
+void SARIFDiagnostic::emitBuildingModuleLocation(FullSourceLoc Loc,
+ PresumedLoc PLoc,
+ StringRef ModuleName) {
+ assert(false && "Not implemented in SARIF mode");
+}
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Frontend/SARIFDiagnosticPrinter.cpp b/contrib/llvm-project/clang/lib/Frontend/SARIFDiagnosticPrinter.cpp
new file mode 100644
index 000000000000..73928d19a031
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Frontend/SARIFDiagnosticPrinter.cpp
@@ -0,0 +1,83 @@
+//===------- SARIFDiagnosticPrinter.cpp - Diagnostic Printer---------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This diagnostic client prints out their diagnostic messages in SARIF format.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/SARIFDiagnosticPrinter.h"
+#include "clang/Basic/DiagnosticOptions.h"
+#include "clang/Basic/Sarif.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Frontend/DiagnosticRenderer.h"
+#include "clang/Frontend/SARIFDiagnostic.h"
+#include "clang/Lex/Lexer.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/JSON.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+
+namespace clang {
+
+SARIFDiagnosticPrinter::SARIFDiagnosticPrinter(raw_ostream &OS,
+ DiagnosticOptions *Diags)
+ : OS(OS), DiagOpts(Diags) {}
+
+void SARIFDiagnosticPrinter::BeginSourceFile(const LangOptions &LO,
+ const Preprocessor *PP) {
+ // Build the SARIFDiagnostic utility.
+ assert(hasSarifWriter() && "Writer not set!");
+ assert(!SARIFDiag && "SARIFDiagnostic already set.");
+ SARIFDiag = std::make_unique<SARIFDiagnostic>(OS, LO, &*DiagOpts, &*Writer);
+ // Initialize the SARIF object.
+ Writer->createRun("clang", Prefix);
+}
+
+void SARIFDiagnosticPrinter::EndSourceFile() {
+ assert(SARIFDiag && "SARIFDiagnostic has not been set.");
+ Writer->endRun();
+ llvm::json::Value Value(Writer->createDocument());
+ OS << "\n" << Value << "\n\n";
+ OS.flush();
+ SARIFDiag.reset();
+}
+
+void SARIFDiagnosticPrinter::HandleDiagnostic(DiagnosticsEngine::Level Level,
+ const Diagnostic &Info) {
+ assert(SARIFDiag && "SARIFDiagnostic has not been set.");
+ // Default implementation (Warnings/errors count). Keeps track of the
+ // number of errors.
+ DiagnosticConsumer::HandleDiagnostic(Level, Info);
+
+ // Render the diagnostic message into a temporary buffer eagerly. We'll use
+ // this later as we add the diagnostic to the SARIF object.
+ SmallString<100> OutStr;
+ Info.FormatDiagnostic(OutStr);
+
+ llvm::raw_svector_ostream DiagMessageStream(OutStr);
+
+ // Use a dedicated, simpler path for diagnostics without a valid location.
+ // This is important as if the location is missing, we may be emitting
+ // diagnostics in a context that lacks language options, a source manager, or
+ // other infrastructure necessary when emitting more rich diagnostics.
+ if (Info.getLocation().isInvalid()) {
+ // FIXME: Enable diagnostics without a source manager
+ return;
+ }
+
+ // Assert that the rest of our infrastructure is setup properly.
+ assert(DiagOpts && "Unexpected diagnostic without options set");
+ assert(Info.hasSourceManager() &&
+ "Unexpected diagnostic with no source manager");
+
+ SARIFDiag->emitDiagnostic(
+ FullSourceLoc(Info.getLocation(), Info.getSourceManager()), Level,
+ DiagMessageStream.str(), Info.getRanges(), Info.getFixItHints(), &Info);
+}
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp b/contrib/llvm-project/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp
index 462aeda6e027..b76728acb907 100644
--- a/contrib/llvm-project/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp
@@ -37,14 +37,12 @@ public:
AbbreviationMap() {}
void set(unsigned recordID, unsigned abbrevID) {
- assert(Abbrevs.find(recordID) == Abbrevs.end()
- && "Abbreviation already set.");
+ assert(!Abbrevs.contains(recordID) && "Abbreviation already set.");
Abbrevs[recordID] = abbrevID;
}
unsigned get(unsigned recordID) {
- assert(Abbrevs.find(recordID) != Abbrevs.end() &&
- "Abbreviation not set.");
+ assert(Abbrevs.contains(recordID) && "Abbreviation not set.");
return Abbrevs[recordID];
}
};
@@ -95,8 +93,7 @@ class SDiagsMerger : SerializedDiagnosticReader {
AbbrevLookup DiagFlagLookup;
public:
- SDiagsMerger(SDiagsWriter &Writer)
- : SerializedDiagnosticReader(), Writer(Writer) {}
+ SDiagsMerger(SDiagsWriter &Writer) : Writer(Writer) {}
std::error_code mergeRecordsFromFile(const char *File) {
return readDiagnostics(File);
diff --git a/contrib/llvm-project/clang/lib/Frontend/SerializedDiagnosticReader.cpp b/contrib/llvm-project/clang/lib/Frontend/SerializedDiagnosticReader.cpp
index eca6f5ee1803..5f5ed41b0462 100644
--- a/contrib/llvm-project/clang/lib/Frontend/SerializedDiagnosticReader.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/SerializedDiagnosticReader.cpp
@@ -10,7 +10,6 @@
#include "clang/Basic/FileManager.h"
#include "clang/Basic/FileSystemOptions.h"
#include "clang/Frontend/SerializedDiagnostics.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Bitstream/BitCodes.h"
@@ -20,6 +19,7 @@
#include "llvm/Support/ErrorOr.h"
#include "llvm/Support/ManagedStatic.h"
#include <cstdint>
+#include <optional>
#include <system_error>
using namespace clang;
@@ -35,7 +35,7 @@ std::error_code SerializedDiagnosticReader::readDiagnostics(StringRef File) {
return SDError::CouldNotLoad;
llvm::BitstreamCursor Stream(**Buffer);
- Optional<llvm::BitstreamBlockInfo> BlockInfo;
+ std::optional<llvm::BitstreamBlockInfo> BlockInfo;
if (Stream.AtEndOfStream())
return SDError::InvalidSignature;
@@ -73,7 +73,7 @@ std::error_code SerializedDiagnosticReader::readDiagnostics(StringRef File) {
switch (MaybeSubBlockID.get()) {
case llvm::bitc::BLOCKINFO_BLOCK_ID: {
- Expected<Optional<llvm::BitstreamBlockInfo>> MaybeBlockInfo =
+ Expected<std::optional<llvm::BitstreamBlockInfo>> MaybeBlockInfo =
Stream.ReadBlockInfoBlock();
if (!MaybeBlockInfo) {
// FIXME this drops the error on the floor.
@@ -184,7 +184,7 @@ SerializedDiagnosticReader::readMetaBlock(llvm::BitstreamCursor &Stream) {
consumeError(std::move(Err));
return SDError::MalformedMetadataBlock;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Cursor::BlockEnd:
if (!VersionChecked)
return SDError::MissingVersion;
diff --git a/contrib/llvm-project/clang/lib/Frontend/TestModuleFileExtension.cpp b/contrib/llvm-project/clang/lib/Frontend/TestModuleFileExtension.cpp
index 7d4026a7efc6..2d5145d0c54c 100644
--- a/contrib/llvm-project/clang/lib/Frontend/TestModuleFileExtension.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/TestModuleFileExtension.cpp
@@ -93,16 +93,14 @@ TestModuleFileExtension::getExtensionMetadata() const {
return { BlockName, MajorVersion, MinorVersion, UserInfo };
}
-llvm::hash_code TestModuleFileExtension::hashExtension(
- llvm::hash_code Code) const {
+void TestModuleFileExtension::hashExtension(
+ ExtensionHashBuilder &HBuilder) const {
if (Hashed) {
- Code = llvm::hash_combine(Code, BlockName);
- Code = llvm::hash_combine(Code, MajorVersion);
- Code = llvm::hash_combine(Code, MinorVersion);
- Code = llvm::hash_combine(Code, UserInfo);
+ HBuilder.add(BlockName);
+ HBuilder.add(MajorVersion);
+ HBuilder.add(MinorVersion);
+ HBuilder.add(UserInfo);
}
-
- return Code;
}
std::unique_ptr<ModuleFileExtensionWriter>
@@ -135,5 +133,5 @@ std::string TestModuleFileExtension::str() const {
llvm::raw_string_ostream OS(Buffer);
OS << BlockName << ":" << MajorVersion << ":" << MinorVersion << ":" << Hashed
<< ":" << UserInfo;
- return OS.str();
+ return Buffer;
}
diff --git a/contrib/llvm-project/clang/lib/Frontend/TestModuleFileExtension.h b/contrib/llvm-project/clang/lib/Frontend/TestModuleFileExtension.h
index c8ca4cd4f210..e22c87ed2d1b 100644
--- a/contrib/llvm-project/clang/lib/Frontend/TestModuleFileExtension.h
+++ b/contrib/llvm-project/clang/lib/Frontend/TestModuleFileExtension.h
@@ -55,7 +55,7 @@ public:
ModuleFileExtensionMetadata getExtensionMetadata() const override;
- llvm::hash_code hashExtension(llvm::hash_code Code) const override;
+ void hashExtension(ExtensionHashBuilder &HBuilder) const override;
std::unique_ptr<ModuleFileExtensionWriter>
createExtensionWriter(ASTWriter &Writer) override;
diff --git a/contrib/llvm-project/clang/lib/Frontend/TextDiagnostic.cpp b/contrib/llvm-project/clang/lib/Frontend/TextDiagnostic.cpp
index 8df7496c6ddd..779dead5d058 100644
--- a/contrib/llvm-project/clang/lib/Frontend/TextDiagnostic.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/TextDiagnostic.cpp
@@ -20,11 +20,11 @@
#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
+#include <optional>
using namespace clang;
-static const enum raw_ostream::Colors noteColor =
- raw_ostream::BLACK;
+static const enum raw_ostream::Colors noteColor = raw_ostream::CYAN;
static const enum raw_ostream::Colors remarkColor =
raw_ostream::BLUE;
static const enum raw_ostream::Colors fixitColor =
@@ -44,7 +44,7 @@ static const enum raw_ostream::Colors savedColor =
/// Add highlights to differences in template strings.
static void applyTemplateHighlighting(raw_ostream &OS, StringRef Str,
bool &Normal, bool Bold) {
- while (1) {
+ while (true) {
size_t Pos = Str.find(ToggleHighlight);
OS << Str.slice(0, Pos);
if (Pos == StringRef::npos)
@@ -90,89 +90,108 @@ static int bytesSincePreviousTabOrLineBegin(StringRef SourceLine, size_t i) {
/// printableTextForNextCharacter.
///
/// \param SourceLine The line of source
-/// \param i Pointer to byte index,
+/// \param I Pointer to byte index,
/// \param TabStop used to expand tabs
/// \return pair(printable text, 'true' iff original text was printable)
///
static std::pair<SmallString<16>, bool>
-printableTextForNextCharacter(StringRef SourceLine, size_t *i,
+printableTextForNextCharacter(StringRef SourceLine, size_t *I,
unsigned TabStop) {
- assert(i && "i must not be null");
- assert(*i<SourceLine.size() && "must point to a valid index");
+ assert(I && "I must not be null");
+ assert(*I < SourceLine.size() && "must point to a valid index");
- if (SourceLine[*i]=='\t') {
+ if (SourceLine[*I] == '\t') {
assert(0 < TabStop && TabStop <= DiagnosticOptions::MaxTabStop &&
"Invalid -ftabstop value");
- unsigned col = bytesSincePreviousTabOrLineBegin(SourceLine, *i);
- unsigned NumSpaces = TabStop - col%TabStop;
+ unsigned Col = bytesSincePreviousTabOrLineBegin(SourceLine, *I);
+ unsigned NumSpaces = TabStop - (Col % TabStop);
assert(0 < NumSpaces && NumSpaces <= TabStop
&& "Invalid computation of space amt");
- ++(*i);
+ ++(*I);
- SmallString<16> expandedTab;
- expandedTab.assign(NumSpaces, ' ');
- return std::make_pair(expandedTab, true);
+ SmallString<16> ExpandedTab;
+ ExpandedTab.assign(NumSpaces, ' ');
+ return std::make_pair(ExpandedTab, true);
}
- unsigned char const *begin, *end;
- begin = reinterpret_cast<unsigned char const *>(&*(SourceLine.begin() + *i));
- end = begin + (SourceLine.size() - *i);
-
- if (llvm::isLegalUTF8Sequence(begin, end)) {
- llvm::UTF32 c;
- llvm::UTF32 *cptr = &c;
- unsigned char const *original_begin = begin;
- unsigned char const *cp_end =
- begin + llvm::getNumBytesForUTF8(SourceLine[*i]);
-
- llvm::ConversionResult res = llvm::ConvertUTF8toUTF32(
- &begin, cp_end, &cptr, cptr + 1, llvm::strictConversion);
- (void)res;
- assert(llvm::conversionOK == res);
- assert(0 < begin-original_begin
- && "we must be further along in the string now");
- *i += begin-original_begin;
-
- if (!llvm::sys::locale::isPrint(c)) {
- // If next character is valid UTF-8, but not printable
- SmallString<16> expandedCP("<U+>");
- while (c) {
- expandedCP.insert(expandedCP.begin()+3, llvm::hexdigit(c%16));
- c/=16;
- }
- while (expandedCP.size() < 8)
- expandedCP.insert(expandedCP.begin()+3, llvm::hexdigit(0));
- return std::make_pair(expandedCP, false);
- }
-
- // If next character is valid UTF-8, and printable
- return std::make_pair(SmallString<16>(original_begin, cp_end), true);
+ const unsigned char *Begin = SourceLine.bytes_begin() + *I;
+ // Fast path for the common ASCII case.
+ if (*Begin < 0x80 && llvm::sys::locale::isPrint(*Begin)) {
+ ++(*I);
+ return std::make_pair(SmallString<16>(Begin, Begin + 1), true);
+ }
+ unsigned CharSize = llvm::getNumBytesForUTF8(*Begin);
+ const unsigned char *End = Begin + CharSize;
+
+ // Convert it to UTF32 and check if it's printable.
+ if (End <= SourceLine.bytes_end() && llvm::isLegalUTF8Sequence(Begin, End)) {
+ llvm::UTF32 C;
+ llvm::UTF32 *CPtr = &C;
+
+ // Begin and end before conversion.
+ unsigned char const *OriginalBegin = Begin;
+ llvm::ConversionResult Res = llvm::ConvertUTF8toUTF32(
+ &Begin, End, &CPtr, CPtr + 1, llvm::strictConversion);
+ (void)Res;
+ assert(Res == llvm::conversionOK);
+ assert(OriginalBegin < Begin);
+ assert((Begin - OriginalBegin) == CharSize);
+
+ (*I) += (Begin - OriginalBegin);
+
+ // Valid, multi-byte, printable UTF8 character.
+ if (llvm::sys::locale::isPrint(C))
+ return std::make_pair(SmallString<16>(OriginalBegin, End), true);
+
+ // Valid but not printable.
+ SmallString<16> Str("<U+>");
+ while (C) {
+ Str.insert(Str.begin() + 3, llvm::hexdigit(C % 16));
+ C /= 16;
+ }
+ while (Str.size() < 8)
+ Str.insert(Str.begin() + 3, llvm::hexdigit(0));
+ return std::make_pair(Str, false);
}
- // If next byte is not valid UTF-8 (and therefore not printable)
- SmallString<16> expandedByte("<XX>");
- unsigned char byte = SourceLine[*i];
- expandedByte[1] = llvm::hexdigit(byte / 16);
- expandedByte[2] = llvm::hexdigit(byte % 16);
- ++(*i);
- return std::make_pair(expandedByte, false);
+ // Otherwise, not printable since it's not valid UTF8.
+ SmallString<16> ExpandedByte("<XX>");
+ unsigned char Byte = SourceLine[*I];
+ ExpandedByte[1] = llvm::hexdigit(Byte / 16);
+ ExpandedByte[2] = llvm::hexdigit(Byte % 16);
+ ++(*I);
+ return std::make_pair(ExpandedByte, false);
}
static void expandTabs(std::string &SourceLine, unsigned TabStop) {
- size_t i = SourceLine.size();
- while (i>0) {
- i--;
- if (SourceLine[i]!='\t')
+ size_t I = SourceLine.size();
+ while (I > 0) {
+ I--;
+ if (SourceLine[I] != '\t')
continue;
- size_t tmp_i = i;
- std::pair<SmallString<16>,bool> res
- = printableTextForNextCharacter(SourceLine, &tmp_i, TabStop);
- SourceLine.replace(i, 1, res.first.c_str());
+ size_t TmpI = I;
+ auto [Str, Printable] =
+ printableTextForNextCharacter(SourceLine, &TmpI, TabStop);
+ SourceLine.replace(I, 1, Str.c_str());
}
}
-/// This function takes a raw source line and produces a mapping from the bytes
+/// \p BytesOut:
+/// A mapping from columns to the byte of the source line that produced the
+/// character displaying at that column. This is the inverse of \p ColumnsOut.
+///
+/// The last element in the array is the number of bytes in the source string.
+///
+/// example: (given a tabstop of 8)
+///
+/// "a \t \u3042" -> {0,1,2,-1,-1,-1,-1,-1,3,4,-1,7}
+///
+/// (\\u3042 is represented in UTF-8 by three bytes and takes two columns to
+/// display)
+///
+/// \p ColumnsOut:
+/// A mapping from the bytes
/// of the printable representation of the line to the columns those printable
/// characters will appear at (numbering the first column as 0).
///
@@ -194,60 +213,34 @@ static void expandTabs(std::string &SourceLine, unsigned TabStop) {
///
/// (\\u3042 is represented in UTF-8 by three bytes and takes two columns to
/// display)
-static void byteToColumn(StringRef SourceLine, unsigned TabStop,
- SmallVectorImpl<int> &out) {
- out.clear();
+static void genColumnByteMapping(StringRef SourceLine, unsigned TabStop,
+ SmallVectorImpl<int> &BytesOut,
+ SmallVectorImpl<int> &ColumnsOut) {
+ assert(BytesOut.empty());
+ assert(ColumnsOut.empty());
if (SourceLine.empty()) {
- out.resize(1u,0);
+ BytesOut.resize(1u, 0);
+ ColumnsOut.resize(1u, 0);
return;
}
- out.resize(SourceLine.size()+1, -1);
-
- int columns = 0;
- size_t i = 0;
- while (i<SourceLine.size()) {
- out[i] = columns;
- std::pair<SmallString<16>,bool> res
- = printableTextForNextCharacter(SourceLine, &i, TabStop);
- columns += llvm::sys::locale::columnWidth(res.first);
- }
- out.back() = columns;
-}
-
-/// This function takes a raw source line and produces a mapping from columns
-/// to the byte of the source line that produced the character displaying at
-/// that column. This is the inverse of the mapping produced by byteToColumn()
-///
-/// The last element in the array is the number of bytes in the source string
-///
-/// example: (given a tabstop of 8)
-///
-/// "a \t \u3042" -> {0,1,2,-1,-1,-1,-1,-1,3,4,-1,7}
-///
-/// (\\u3042 is represented in UTF-8 by three bytes and takes two columns to
-/// display)
-static void columnToByte(StringRef SourceLine, unsigned TabStop,
- SmallVectorImpl<int> &out) {
- out.clear();
-
- if (SourceLine.empty()) {
- out.resize(1u, 0);
- return;
+ ColumnsOut.resize(SourceLine.size() + 1, -1);
+
+ int Columns = 0;
+ size_t I = 0;
+ while (I < SourceLine.size()) {
+ ColumnsOut[I] = Columns;
+ BytesOut.resize(Columns + 1, -1);
+ BytesOut.back() = I;
+ auto [Str, Printable] =
+ printableTextForNextCharacter(SourceLine, &I, TabStop);
+ Columns += llvm::sys::locale::columnWidth(Str);
}
- int columns = 0;
- size_t i = 0;
- while (i<SourceLine.size()) {
- out.resize(columns+1, -1);
- out.back() = i;
- std::pair<SmallString<16>,bool> res
- = printableTextForNextCharacter(SourceLine, &i, TabStop);
- columns += llvm::sys::locale::columnWidth(res.first);
- }
- out.resize(columns+1, -1);
- out.back() = i;
+ ColumnsOut.back() = Columns;
+ BytesOut.resize(Columns + 1, -1);
+ BytesOut.back() = I;
}
namespace {
@@ -255,8 +248,7 @@ struct SourceColumnMap {
SourceColumnMap(StringRef SourceLine, unsigned TabStop)
: m_SourceLine(SourceLine) {
- ::byteToColumn(SourceLine, TabStop, m_byteToColumn);
- ::columnToByte(SourceLine, TabStop, m_columnToByte);
+ genColumnByteMapping(SourceLine, TabStop, m_columnToByte, m_byteToColumn);
assert(m_byteToColumn.size()==SourceLine.size()+1);
assert(0 < m_byteToColumn.size() && 0 < m_columnToByte.size());
@@ -332,8 +324,7 @@ static void selectInterestingSourceRegion(std::string &SourceLine,
return;
// No special characters are allowed in CaretLine.
- assert(CaretLine.end() ==
- llvm::find_if(CaretLine, [](char c) { return c < ' ' || '~' < c; }));
+ assert(llvm::none_of(CaretLine, [](char c) { return c < ' ' || '~' < c; }));
// Find the slice that we need to display the full caret line
// correctly.
@@ -471,9 +462,7 @@ static void selectInterestingSourceRegion(std::string &SourceLine,
CaretEnd = map.byteToColumn(SourceEnd) + CaretColumnsOutsideSource;
// [CaretStart, CaretEnd) is the slice we want. Update the various
- // output lines to show only this slice, with two-space padding
- // before the lines so that it looks nicer.
-
+ // output lines to show only this slice.
assert(CaretStart!=(unsigned)-1 && CaretEnd!=(unsigned)-1 &&
SourceStart!=(unsigned)-1 && SourceEnd!=(unsigned)-1);
assert(SourceStart <= SourceEnd);
@@ -605,21 +594,13 @@ static unsigned findEndOfWord(unsigned Start, StringRef Str,
/// Str will be printed. This will be non-zero when part of the first
/// line has already been printed.
/// \param Bold if the current text should be bold
-/// \param Indentation the number of spaces to indent any lines beyond
-/// the first line.
/// \returns true if word-wrapping was required, or false if the
/// string fit on the first line.
-static bool printWordWrapped(raw_ostream &OS, StringRef Str,
- unsigned Columns,
- unsigned Column = 0,
- bool Bold = false,
- unsigned Indentation = WordWrapIndentation) {
+static bool printWordWrapped(raw_ostream &OS, StringRef Str, unsigned Columns,
+ unsigned Column, bool Bold) {
const unsigned Length = std::min(Str.find('\n'), Str.size());
bool TextNormal = true;
- // The string used to indent each line.
- SmallString<16> IndentStr;
- IndentStr.assign(Indentation, ' ');
bool Wrapped = false;
for (unsigned WordStart = 0, WordEnd; WordStart < Length;
WordStart = WordEnd) {
@@ -648,10 +629,10 @@ static bool printWordWrapped(raw_ostream &OS, StringRef Str,
// This word does not fit on the current line, so wrap to the next
// line.
OS << '\n';
- OS.write(&IndentStr[0], Indentation);
+ OS.indent(WordWrapIndentation);
applyTemplateHighlighting(OS, Str.substr(WordStart, WordLength),
TextNormal, Bold);
- Column = Indentation + WordLength;
+ Column = WordWrapIndentation + WordLength;
Wrapped = true;
}
@@ -754,7 +735,7 @@ void TextDiagnostic::emitFilename(StringRef Filename, const SourceManager &SM) {
SmallString<4096> TmpFilename;
#endif
if (DiagOpts->AbsolutePath) {
- auto File = SM.getFileManager().getFile(Filename);
+ auto File = SM.getFileManager().getOptionalFileRef(Filename);
if (File) {
// We want to print a simplified absolute path, i. e. without "dots".
//
@@ -771,7 +752,7 @@ void TextDiagnostic::emitFilename(StringRef Filename, const SourceManager &SM) {
// on Windows we can just use llvm::sys::path::remove_dots(), because,
// on that system, both aforementioned paths point to the same place.
#ifdef _WIN32
- TmpFilename = (*File)->getName();
+ TmpFilename = File->getName();
llvm::sys::fs::make_absolute(TmpFilename);
llvm::sys::path::native(TmpFilename);
llvm::sys::path::remove_dots(TmpFilename, /* remove_dot_dot */ true);
@@ -787,7 +768,7 @@ void TextDiagnostic::emitFilename(StringRef Filename, const SourceManager &SM) {
/// Print out the file/line/column information and include trace.
///
-/// This method handlen the emission of the diagnostic location information.
+/// This method handles the emission of the diagnostic location information.
/// This includes extracting as much location information as is present for
/// the diagnostic and printing it, as well as any include stack or source
/// ranges necessary.
@@ -796,10 +777,8 @@ void TextDiagnostic::emitDiagnosticLoc(FullSourceLoc Loc, PresumedLoc PLoc,
ArrayRef<CharSourceRange> Ranges) {
if (PLoc.isInvalid()) {
// At least print the file name if available:
- FileID FID = Loc.getFileID();
- if (FID.isValid()) {
- const FileEntry *FE = Loc.getFileEntry();
- if (FE && FE->isValid()) {
+ if (FileID FID = Loc.getFileID(); FID.isValid()) {
+ if (OptionalFileEntryRef FE = Loc.getFileEntryRef()) {
emitFilename(FE->getName(), Loc.getManager());
OS << ": ";
}
@@ -816,6 +795,7 @@ void TextDiagnostic::emitDiagnosticLoc(FullSourceLoc Loc, PresumedLoc PLoc,
emitFilename(PLoc.getFilename(), Loc.getManager());
switch (DiagOpts->getFormat()) {
+ case DiagnosticOptions::SARIF:
case DiagnosticOptions::Clang:
if (DiagOpts->ShowLine)
OS << ':' << LineNo;
@@ -838,6 +818,7 @@ void TextDiagnostic::emitDiagnosticLoc(FullSourceLoc Loc, PresumedLoc PLoc,
OS << ColNo;
}
switch (DiagOpts->getFormat()) {
+ case DiagnosticOptions::SARIF:
case DiagnosticOptions::Clang:
case DiagnosticOptions::Vi: OS << ':'; break;
case DiagnosticOptions::MSVC:
@@ -854,31 +835,26 @@ void TextDiagnostic::emitDiagnosticLoc(FullSourceLoc Loc, PresumedLoc PLoc,
if (DiagOpts->ShowSourceRanges && !Ranges.empty()) {
FileID CaretFileID = Loc.getExpansionLoc().getFileID();
bool PrintedRange = false;
+ const SourceManager &SM = Loc.getManager();
- for (ArrayRef<CharSourceRange>::const_iterator RI = Ranges.begin(),
- RE = Ranges.end();
- RI != RE; ++RI) {
+ for (const auto &R : Ranges) {
// Ignore invalid ranges.
- if (!RI->isValid()) continue;
+ if (!R.isValid())
+ continue;
- auto &SM = Loc.getManager();
- SourceLocation B = SM.getExpansionLoc(RI->getBegin());
- CharSourceRange ERange = SM.getExpansionRange(RI->getEnd());
+ SourceLocation B = SM.getExpansionLoc(R.getBegin());
+ CharSourceRange ERange = SM.getExpansionRange(R.getEnd());
SourceLocation E = ERange.getEnd();
- bool IsTokenRange = ERange.isTokenRange();
- std::pair<FileID, unsigned> BInfo = SM.getDecomposedLoc(B);
- std::pair<FileID, unsigned> EInfo = SM.getDecomposedLoc(E);
-
- // If the start or end of the range is in another file, just discard
- // it.
- if (BInfo.first != CaretFileID || EInfo.first != CaretFileID)
+ // If the start or end of the range is in another file, just
+ // discard it.
+ if (SM.getFileID(B) != CaretFileID || SM.getFileID(E) != CaretFileID)
continue;
// Add in the length of the token, so that we cover multi-char
// tokens.
unsigned TokSize = 0;
- if (IsTokenRange)
+ if (ERange.isTokenRange())
TokSize = Lexer::MeasureTokenLength(E, SM, LangOpts);
FullSourceLoc BF(B, SM), EF(E, SM);
@@ -896,10 +872,11 @@ void TextDiagnostic::emitDiagnosticLoc(FullSourceLoc Loc, PresumedLoc PLoc,
}
void TextDiagnostic::emitIncludeLocation(FullSourceLoc Loc, PresumedLoc PLoc) {
- if (DiagOpts->ShowLocation && PLoc.isValid())
- OS << "In file included from " << PLoc.getFilename() << ':'
- << PLoc.getLine() << ":\n";
- else
+ if (DiagOpts->ShowLocation && PLoc.isValid()) {
+ OS << "In file included from ";
+ emitFilename(PLoc.getFilename(), Loc.getManager());
+ OS << ':' << PLoc.getLine() << ":\n";
+ } else
OS << "In included file:\n";
}
@@ -923,15 +900,16 @@ void TextDiagnostic::emitBuildingModuleLocation(FullSourceLoc Loc,
}
/// Find the suitable set of lines to show to include a set of ranges.
-static llvm::Optional<std::pair<unsigned, unsigned>>
+static std::optional<std::pair<unsigned, unsigned>>
findLinesForRange(const CharSourceRange &R, FileID FID,
const SourceManager &SM) {
- if (!R.isValid()) return None;
+ if (!R.isValid())
+ return std::nullopt;
SourceLocation Begin = R.getBegin();
SourceLocation End = R.getEnd();
if (SM.getFileID(Begin) != FID || SM.getFileID(End) != FID)
- return None;
+ return std::nullopt;
return std::make_pair(SM.getExpansionLineNumber(Begin),
SM.getExpansionLineNumber(End));
@@ -972,87 +950,43 @@ maybeAddRange(std::pair<unsigned, unsigned> A, std::pair<unsigned, unsigned> B,
return A;
}
-/// Highlight a SourceRange (with ~'s) for any characters on LineNo.
-static void highlightRange(const CharSourceRange &R,
- unsigned LineNo, FileID FID,
- const SourceColumnMap &map,
- std::string &CaretLine,
- const SourceManager &SM,
- const LangOptions &LangOpts) {
- if (!R.isValid()) return;
-
- SourceLocation Begin = R.getBegin();
- SourceLocation End = R.getEnd();
-
- unsigned StartLineNo = SM.getExpansionLineNumber(Begin);
- if (StartLineNo > LineNo || SM.getFileID(Begin) != FID)
- return; // No intersection.
-
- unsigned EndLineNo = SM.getExpansionLineNumber(End);
- if (EndLineNo < LineNo || SM.getFileID(End) != FID)
- return; // No intersection.
-
- // Compute the column number of the start.
- unsigned StartColNo = 0;
- if (StartLineNo == LineNo) {
- StartColNo = SM.getExpansionColumnNumber(Begin);
- if (StartColNo) --StartColNo; // Zero base the col #.
- }
-
- // Compute the column number of the end.
- unsigned EndColNo = map.getSourceLine().size();
- if (EndLineNo == LineNo) {
- EndColNo = SM.getExpansionColumnNumber(End);
- if (EndColNo) {
- --EndColNo; // Zero base the col #.
-
- // Add in the length of the token, so that we cover multi-char tokens if
- // this is a token range.
- if (R.isTokenRange())
- EndColNo += Lexer::MeasureTokenLength(End, SM, LangOpts);
- } else {
- EndColNo = CaretLine.size();
- }
- }
-
- assert(StartColNo <= EndColNo && "Invalid range!");
-
- // Check that a token range does not highlight only whitespace.
- if (R.isTokenRange()) {
- // Pick the first non-whitespace column.
- while (StartColNo < map.getSourceLine().size() &&
- (map.getSourceLine()[StartColNo] == ' ' ||
- map.getSourceLine()[StartColNo] == '\t'))
- StartColNo = map.startOfNextColumn(StartColNo);
-
- // Pick the last non-whitespace column.
- if (EndColNo > map.getSourceLine().size())
- EndColNo = map.getSourceLine().size();
- while (EndColNo &&
- (map.getSourceLine()[EndColNo-1] == ' ' ||
- map.getSourceLine()[EndColNo-1] == '\t'))
- EndColNo = map.startOfPreviousColumn(EndColNo);
-
- // If the start/end passed each other, then we are trying to highlight a
- // range that just exists in whitespace. That most likely means we have
- // a multi-line highlighting range that covers a blank line.
- if (StartColNo > EndColNo) {
- assert(StartLineNo != EndLineNo && "trying to highlight whitespace");
- StartColNo = EndColNo;
- }
- }
+struct LineRange {
+ unsigned LineNo;
+ unsigned StartCol;
+ unsigned EndCol;
+};
- assert(StartColNo <= map.getSourceLine().size() && "Invalid range!");
- assert(EndColNo <= map.getSourceLine().size() && "Invalid range!");
+/// Highlight \p R (with ~'s) on the current source line.
+static void highlightRange(const LineRange &R, const SourceColumnMap &Map,
+ std::string &CaretLine) {
+ // Pick the first non-whitespace column.
+ unsigned StartColNo = R.StartCol;
+ while (StartColNo < Map.getSourceLine().size() &&
+ (Map.getSourceLine()[StartColNo] == ' ' ||
+ Map.getSourceLine()[StartColNo] == '\t'))
+ StartColNo = Map.startOfNextColumn(StartColNo);
+
+ // Pick the last non-whitespace column.
+ unsigned EndColNo =
+ std::min(static_cast<size_t>(R.EndCol), Map.getSourceLine().size());
+ while (EndColNo && (Map.getSourceLine()[EndColNo - 1] == ' ' ||
+ Map.getSourceLine()[EndColNo - 1] == '\t'))
+ EndColNo = Map.startOfPreviousColumn(EndColNo);
+
+ // If the start/end passed each other, then we are trying to highlight a
+ // range that just exists in whitespace. That most likely means we have
+ // a multi-line highlighting range that covers a blank line.
+ if (StartColNo > EndColNo)
+ return;
// Fill the range with ~'s.
- StartColNo = map.byteToContainingColumn(StartColNo);
- EndColNo = map.byteToContainingColumn(EndColNo);
+ StartColNo = Map.byteToContainingColumn(StartColNo);
+ EndColNo = Map.byteToContainingColumn(EndColNo);
assert(StartColNo <= EndColNo && "Invalid range!");
if (CaretLine.size() < EndColNo)
- CaretLine.resize(EndColNo,' ');
- std::fill(CaretLine.begin()+StartColNo,CaretLine.begin()+EndColNo,'~');
+ CaretLine.resize(EndColNo, ' ');
+ std::fill(CaretLine.begin() + StartColNo, CaretLine.begin() + EndColNo, '~');
}
static std::string buildFixItInsertionLine(FileID FID,
@@ -1066,51 +1000,51 @@ static std::string buildFixItInsertionLine(FileID FID,
return FixItInsertionLine;
unsigned PrevHintEndCol = 0;
- for (ArrayRef<FixItHint>::iterator I = Hints.begin(), E = Hints.end();
- I != E; ++I) {
- if (!I->CodeToInsert.empty()) {
- // We have an insertion hint. Determine whether the inserted
- // code contains no newlines and is on the same line as the caret.
- std::pair<FileID, unsigned> HintLocInfo
- = SM.getDecomposedExpansionLoc(I->RemoveRange.getBegin());
- if (FID == HintLocInfo.first &&
- LineNo == SM.getLineNumber(HintLocInfo.first, HintLocInfo.second) &&
- StringRef(I->CodeToInsert).find_first_of("\n\r") == StringRef::npos) {
- // Insert the new code into the line just below the code
- // that the user wrote.
- // Note: When modifying this function, be very careful about what is a
- // "column" (printed width, platform-dependent) and what is a
- // "byte offset" (SourceManager "column").
- unsigned HintByteOffset
- = SM.getColumnNumber(HintLocInfo.first, HintLocInfo.second) - 1;
-
- // The hint must start inside the source or right at the end
- assert(HintByteOffset < static_cast<unsigned>(map.bytes())+1);
- unsigned HintCol = map.byteToContainingColumn(HintByteOffset);
-
- // If we inserted a long previous hint, push this one forwards, and add
- // an extra space to show that this is not part of the previous
- // completion. This is sort of the best we can do when two hints appear
- // to overlap.
- //
- // Note that if this hint is located immediately after the previous
- // hint, no space will be added, since the location is more important.
- if (HintCol < PrevHintEndCol)
- HintCol = PrevHintEndCol + 1;
-
- // This should NOT use HintByteOffset, because the source might have
- // Unicode characters in earlier columns.
- unsigned NewFixItLineSize = FixItInsertionLine.size() +
- (HintCol - PrevHintEndCol) + I->CodeToInsert.size();
- if (NewFixItLineSize > FixItInsertionLine.size())
- FixItInsertionLine.resize(NewFixItLineSize, ' ');
-
- std::copy(I->CodeToInsert.begin(), I->CodeToInsert.end(),
- FixItInsertionLine.end() - I->CodeToInsert.size());
-
- PrevHintEndCol =
- HintCol + llvm::sys::locale::columnWidth(I->CodeToInsert);
- }
+ for (const auto &H : Hints) {
+ if (H.CodeToInsert.empty())
+ continue;
+
+ // We have an insertion hint. Determine whether the inserted
+ // code contains no newlines and is on the same line as the caret.
+ std::pair<FileID, unsigned> HintLocInfo =
+ SM.getDecomposedExpansionLoc(H.RemoveRange.getBegin());
+ if (FID == HintLocInfo.first &&
+ LineNo == SM.getLineNumber(HintLocInfo.first, HintLocInfo.second) &&
+ StringRef(H.CodeToInsert).find_first_of("\n\r") == StringRef::npos) {
+ // Insert the new code into the line just below the code
+ // that the user wrote.
+ // Note: When modifying this function, be very careful about what is a
+ // "column" (printed width, platform-dependent) and what is a
+ // "byte offset" (SourceManager "column").
+ unsigned HintByteOffset =
+ SM.getColumnNumber(HintLocInfo.first, HintLocInfo.second) - 1;
+
+ // The hint must start inside the source or right at the end
+ assert(HintByteOffset < static_cast<unsigned>(map.bytes()) + 1);
+ unsigned HintCol = map.byteToContainingColumn(HintByteOffset);
+
+ // If we inserted a long previous hint, push this one forwards, and add
+ // an extra space to show that this is not part of the previous
+ // completion. This is sort of the best we can do when two hints appear
+ // to overlap.
+ //
+ // Note that if this hint is located immediately after the previous
+ // hint, no space will be added, since the location is more important.
+ if (HintCol < PrevHintEndCol)
+ HintCol = PrevHintEndCol + 1;
+
+ // This should NOT use HintByteOffset, because the source might have
+ // Unicode characters in earlier columns.
+ unsigned NewFixItLineSize = FixItInsertionLine.size() +
+ (HintCol - PrevHintEndCol) +
+ H.CodeToInsert.size();
+ if (NewFixItLineSize > FixItInsertionLine.size())
+ FixItInsertionLine.resize(NewFixItLineSize, ' ');
+
+ std::copy(H.CodeToInsert.begin(), H.CodeToInsert.end(),
+ FixItInsertionLine.end() - H.CodeToInsert.size());
+
+ PrevHintEndCol = HintCol + llvm::sys::locale::columnWidth(H.CodeToInsert);
}
}
@@ -1119,6 +1053,65 @@ static std::string buildFixItInsertionLine(FileID FID,
return FixItInsertionLine;
}
+static unsigned getNumDisplayWidth(unsigned N) {
+ unsigned L = 1u, M = 10u;
+ while (M <= N && ++L != std::numeric_limits<unsigned>::digits10 + 1)
+ M *= 10u;
+
+ return L;
+}
+
+/// Filter out invalid ranges, ranges that don't fit into the window of
+/// source lines we will print, and ranges from other files.
+///
+/// For the remaining ranges, convert them to simple LineRange structs,
+/// which only cover one line at a time.
+static SmallVector<LineRange>
+prepareAndFilterRanges(const SmallVectorImpl<CharSourceRange> &Ranges,
+ const SourceManager &SM,
+ const std::pair<unsigned, unsigned> &Lines, FileID FID,
+ const LangOptions &LangOpts) {
+ SmallVector<LineRange> LineRanges;
+
+ for (const CharSourceRange &R : Ranges) {
+ if (R.isInvalid())
+ continue;
+ SourceLocation Begin = R.getBegin();
+ SourceLocation End = R.getEnd();
+
+ unsigned StartLineNo = SM.getExpansionLineNumber(Begin);
+ if (StartLineNo > Lines.second || SM.getFileID(Begin) != FID)
+ continue;
+
+ unsigned EndLineNo = SM.getExpansionLineNumber(End);
+ if (EndLineNo < Lines.first || SM.getFileID(End) != FID)
+ continue;
+
+ unsigned StartColumn = SM.getExpansionColumnNumber(Begin);
+ unsigned EndColumn = SM.getExpansionColumnNumber(End);
+ if (R.isTokenRange())
+ EndColumn += Lexer::MeasureTokenLength(End, SM, LangOpts);
+
+ // Only a single line.
+ if (StartLineNo == EndLineNo) {
+ LineRanges.push_back({StartLineNo, StartColumn - 1, EndColumn - 1});
+ continue;
+ }
+
+ // Start line.
+ LineRanges.push_back({StartLineNo, StartColumn - 1, ~0u});
+
+ // Middle lines.
+ for (unsigned S = StartLineNo + 1; S != EndLineNo; ++S)
+ LineRanges.push_back({S, 0, ~0u});
+
+ // End line.
+ LineRanges.push_back({EndLineNo, 0, EndColumn - 1});
+ }
+
+ return LineRanges;
+}
+
/// Emit a code snippet and caret line.
///
/// This routine emits a single line's code snippet and caret line..
@@ -1144,9 +1137,7 @@ void TextDiagnostic::emitSnippetAndCaret(
(LastLevel != DiagnosticsEngine::Note || Level == LastLevel))
return;
- // Decompose the location into a FID/Offset pair.
- std::pair<FileID, unsigned> LocInfo = Loc.getDecomposedLoc();
- FileID FID = LocInfo.first;
+ FileID FID = Loc.getFileID();
const SourceManager &SM = Loc.getManager();
// Get information about the buffer it points into.
@@ -1154,6 +1145,8 @@ void TextDiagnostic::emitSnippetAndCaret(
StringRef BufData = Loc.getBufferData(&Invalid);
if (Invalid)
return;
+ const char *BufStart = BufData.data();
+ const char *BufEnd = BufStart + BufData.size();
unsigned CaretLineNo = Loc.getLineNumber();
unsigned CaretColNo = Loc.getColumnNumber();
@@ -1166,16 +1159,33 @@ void TextDiagnostic::emitSnippetAndCaret(
// Find the set of lines to include.
const unsigned MaxLines = DiagOpts->SnippetLineLimit;
std::pair<unsigned, unsigned> Lines = {CaretLineNo, CaretLineNo};
- for (SmallVectorImpl<CharSourceRange>::iterator I = Ranges.begin(),
- E = Ranges.end();
- I != E; ++I)
- if (auto OptionalRange = findLinesForRange(*I, FID, SM))
+ unsigned DisplayLineNo = Loc.getPresumedLoc().getLine();
+ for (const auto &I : Ranges) {
+ if (auto OptionalRange = findLinesForRange(I, FID, SM))
Lines = maybeAddRange(Lines, *OptionalRange, MaxLines);
- for (unsigned LineNo = Lines.first; LineNo != Lines.second + 1; ++LineNo) {
- const char *BufStart = BufData.data();
- const char *BufEnd = BufStart + BufData.size();
+ DisplayLineNo =
+ std::min(DisplayLineNo, SM.getPresumedLineNumber(I.getBegin()));
+ }
+ // Our line numbers look like:
+ // " [number] | "
+ // Where [number] is MaxLineNoDisplayWidth columns
+ // and the full thing is therefore MaxLineNoDisplayWidth + 4 columns.
+ unsigned MaxLineNoDisplayWidth =
+ DiagOpts->ShowLineNumbers
+ ? std::max(4u, getNumDisplayWidth(DisplayLineNo + MaxLines))
+ : 0;
+ auto indentForLineNumbers = [&] {
+ if (MaxLineNoDisplayWidth > 0)
+ OS.indent(MaxLineNoDisplayWidth + 2) << "| ";
+ };
+
+ SmallVector<LineRange> LineRanges =
+ prepareAndFilterRanges(Ranges, SM, Lines, FID, LangOpts);
+
+ for (unsigned LineNo = Lines.first; LineNo != Lines.second + 1;
+ ++LineNo, ++DisplayLineNo) {
// Rewind from the current position to the start of the line.
const char *LineStart =
BufStart +
@@ -1193,34 +1203,28 @@ void TextDiagnostic::emitSnippetAndCaret(
if (size_t(LineEnd - LineStart) > MaxLineLengthToPrint)
return;
- // Trim trailing null-bytes.
- StringRef Line(LineStart, LineEnd - LineStart);
- while (!Line.empty() && Line.back() == '\0' &&
- (LineNo != CaretLineNo || Line.size() > CaretColNo))
- Line = Line.drop_back();
-
// Copy the line of code into an std::string for ease of manipulation.
- std::string SourceLine(Line.begin(), Line.end());
+ std::string SourceLine(LineStart, LineEnd);
+ // Remove trailing null bytes.
+ while (!SourceLine.empty() && SourceLine.back() == '\0' &&
+ (LineNo != CaretLineNo || SourceLine.size() > CaretColNo))
+ SourceLine.pop_back();
// Build the byte to column map.
const SourceColumnMap sourceColMap(SourceLine, DiagOpts->TabStop);
- // Create a line for the caret that is filled with spaces that is the same
- // number of columns as the line of source code.
- std::string CaretLine(sourceColMap.columns(), ' ');
-
+ std::string CaretLine;
// Highlight all of the characters covered by Ranges with ~ characters.
- for (SmallVectorImpl<CharSourceRange>::iterator I = Ranges.begin(),
- E = Ranges.end();
- I != E; ++I)
- highlightRange(*I, LineNo, FID, sourceColMap, CaretLine, SM, LangOpts);
+ for (const auto &LR : LineRanges) {
+ if (LR.LineNo == LineNo)
+ highlightRange(LR, sourceColMap, CaretLine);
+ }
// Next, insert the caret itself.
if (CaretLineNo == LineNo) {
- CaretColNo = sourceColMap.byteToContainingColumn(CaretColNo - 1);
- if (CaretLine.size() < CaretColNo + 1)
- CaretLine.resize(CaretColNo + 1, ' ');
- CaretLine[CaretColNo] = '^';
+ size_t Col = sourceColMap.byteToContainingColumn(CaretColNo - 1);
+ CaretLine.resize(std::max(Col + 1, CaretLine.size()), ' ');
+ CaretLine[Col] = '^';
}
std::string FixItInsertionLine = buildFixItInsertionLine(
@@ -1237,19 +1241,16 @@ void TextDiagnostic::emitSnippetAndCaret(
// to produce easily machine parsable output. Add a space before the
// source line and the caret to make it trivial to tell the main diagnostic
// line from what the user is intended to see.
- if (DiagOpts->ShowSourceRanges) {
+ if (DiagOpts->ShowSourceRanges && !SourceLine.empty()) {
SourceLine = ' ' + SourceLine;
CaretLine = ' ' + CaretLine;
}
- // Finally, remove any blank spaces from the end of CaretLine.
- while (!CaretLine.empty() && CaretLine[CaretLine.size() - 1] == ' ')
- CaretLine.erase(CaretLine.end() - 1);
-
// Emit what we have computed.
- emitSnippet(SourceLine);
+ emitSnippet(SourceLine, MaxLineNoDisplayWidth, DisplayLineNo);
if (!CaretLine.empty()) {
+ indentForLineNumbers();
if (DiagOpts->ShowColors)
OS.changeColor(caretColor, true);
OS << CaretLine << '\n';
@@ -1258,6 +1259,7 @@ void TextDiagnostic::emitSnippetAndCaret(
}
if (!FixItInsertionLine.empty()) {
+ indentForLineNumbers();
if (DiagOpts->ShowColors)
// Print fixit line in color
OS.changeColor(fixitColor, false);
@@ -1273,37 +1275,37 @@ void TextDiagnostic::emitSnippetAndCaret(
emitParseableFixits(Hints, SM);
}
-void TextDiagnostic::emitSnippet(StringRef line) {
- if (line.empty())
- return;
-
- size_t i = 0;
-
- std::string to_print;
- bool print_reversed = false;
-
- while (i<line.size()) {
- std::pair<SmallString<16>,bool> res
- = printableTextForNextCharacter(line, &i, DiagOpts->TabStop);
- bool was_printable = res.second;
+void TextDiagnostic::emitSnippet(StringRef SourceLine,
+ unsigned MaxLineNoDisplayWidth,
+ unsigned LineNo) {
+ // Emit line number.
+ if (MaxLineNoDisplayWidth > 0) {
+ unsigned LineNoDisplayWidth = getNumDisplayWidth(LineNo);
+ OS.indent(MaxLineNoDisplayWidth - LineNoDisplayWidth + 1)
+ << LineNo << " | ";
+ }
- if (DiagOpts->ShowColors && was_printable == print_reversed) {
- if (print_reversed)
- OS.reverseColor();
- OS << to_print;
- to_print.clear();
- if (DiagOpts->ShowColors)
- OS.resetColor();
+ // Print the source line one character at a time.
+ bool PrintReversed = false;
+ size_t I = 0;
+ while (I < SourceLine.size()) {
+ auto [Str, WasPrintable] =
+ printableTextForNextCharacter(SourceLine, &I, DiagOpts->TabStop);
+
+ // Toggle inverted colors on or off for this character.
+ if (DiagOpts->ShowColors) {
+ if (WasPrintable == PrintReversed) {
+ PrintReversed = !PrintReversed;
+ if (PrintReversed)
+ OS.reverseColor();
+ else
+ OS.resetColor();
+ }
}
-
- print_reversed = !was_printable;
- to_print += res.first.str();
+ OS << Str;
}
- if (print_reversed && DiagOpts->ShowColors)
- OS.reverseColor();
- OS << to_print;
- if (print_reversed && DiagOpts->ShowColors)
+ if (DiagOpts->ShowColors)
OS.resetColor();
OS << '\n';
@@ -1316,24 +1318,21 @@ void TextDiagnostic::emitParseableFixits(ArrayRef<FixItHint> Hints,
// We follow FixItRewriter's example in not (yet) handling
// fix-its in macros.
- for (ArrayRef<FixItHint>::iterator I = Hints.begin(), E = Hints.end();
- I != E; ++I) {
- if (I->RemoveRange.isInvalid() ||
- I->RemoveRange.getBegin().isMacroID() ||
- I->RemoveRange.getEnd().isMacroID())
+ for (const auto &H : Hints) {
+ if (H.RemoveRange.isInvalid() || H.RemoveRange.getBegin().isMacroID() ||
+ H.RemoveRange.getEnd().isMacroID())
return;
}
- for (ArrayRef<FixItHint>::iterator I = Hints.begin(), E = Hints.end();
- I != E; ++I) {
- SourceLocation BLoc = I->RemoveRange.getBegin();
- SourceLocation ELoc = I->RemoveRange.getEnd();
+ for (const auto &H : Hints) {
+ SourceLocation BLoc = H.RemoveRange.getBegin();
+ SourceLocation ELoc = H.RemoveRange.getEnd();
std::pair<FileID, unsigned> BInfo = SM.getDecomposedLoc(BLoc);
std::pair<FileID, unsigned> EInfo = SM.getDecomposedLoc(ELoc);
// Adjust for token ranges.
- if (I->RemoveRange.isTokenRange())
+ if (H.RemoveRange.isTokenRange())
EInfo.second += Lexer::MeasureTokenLength(ELoc, SM, LangOpts);
// We specifically do not do word-wrapping or tab-expansion here,
@@ -1349,7 +1348,7 @@ void TextDiagnostic::emitParseableFixits(ArrayRef<FixItHint> Hints,
<< '-' << SM.getLineNumber(EInfo.first, EInfo.second)
<< ':' << SM.getColumnNumber(EInfo.first, EInfo.second)
<< "}:\"";
- OS.write_escaped(I->CodeToInsert);
+ OS.write_escaped(H.CodeToInsert);
OS << "\"\n";
}
}
diff --git a/contrib/llvm-project/clang/lib/Frontend/TextDiagnosticBuffer.cpp b/contrib/llvm-project/clang/lib/Frontend/TextDiagnosticBuffer.cpp
index 90f273e65f88..681bc25f46b8 100644
--- a/contrib/llvm-project/clang/lib/Frontend/TextDiagnosticBuffer.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/TextDiagnosticBuffer.cpp
@@ -32,20 +32,20 @@ void TextDiagnosticBuffer::HandleDiagnostic(DiagnosticsEngine::Level Level,
"Diagnostic not handled during diagnostic buffering!");
case DiagnosticsEngine::Note:
All.emplace_back(Level, Notes.size());
- Notes.emplace_back(Info.getLocation(), std::string(Buf.str()));
+ Notes.emplace_back(Info.getLocation(), std::string(Buf));
break;
case DiagnosticsEngine::Warning:
All.emplace_back(Level, Warnings.size());
- Warnings.emplace_back(Info.getLocation(), std::string(Buf.str()));
+ Warnings.emplace_back(Info.getLocation(), std::string(Buf));
break;
case DiagnosticsEngine::Remark:
All.emplace_back(Level, Remarks.size());
- Remarks.emplace_back(Info.getLocation(), std::string(Buf.str()));
+ Remarks.emplace_back(Info.getLocation(), std::string(Buf));
break;
case DiagnosticsEngine::Error:
case DiagnosticsEngine::Fatal:
All.emplace_back(Level, Errors.size());
- Errors.emplace_back(Info.getLocation(), std::string(Buf.str()));
+ Errors.emplace_back(Info.getLocation(), std::string(Buf));
break;
}
}
diff --git a/contrib/llvm-project/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp b/contrib/llvm-project/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp
index 0503ae46a15f..f508408ba706 100644
--- a/contrib/llvm-project/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp
@@ -99,9 +99,7 @@ public:
return true;
}
- bool match(StringRef S) override {
- return S.find(Text) != StringRef::npos;
- }
+ bool match(StringRef S) override { return S.contains(Text); }
};
/// RegexDirective - Directive with regular-expression matching.
@@ -228,10 +226,10 @@ public:
P = C;
while (P < End) {
StringRef S(P, End - P);
- if (S.startswith(OpenBrace)) {
+ if (S.starts_with(OpenBrace)) {
++Depth;
P += OpenBrace.size();
- } else if (S.startswith(CloseBrace)) {
+ } else if (S.starts_with(CloseBrace)) {
--Depth;
if (Depth == 0) {
PEnd = P + CloseBrace.size();
@@ -447,7 +445,7 @@ static bool ParseDirective(StringRef S, ExpectedData *ED, SourceManager &SM,
// others.
// Regex in initial directive token: -re
- if (DToken.endswith("-re")) {
+ if (DToken.ends_with("-re")) {
D.RegexKind = true;
KindStr = "regex";
DToken = DToken.substr(0, DToken.size()-3);
@@ -456,20 +454,19 @@ static bool ParseDirective(StringRef S, ExpectedData *ED, SourceManager &SM,
// Type in initial directive token: -{error|warning|note|no-diagnostics}
bool NoDiag = false;
StringRef DType;
- if (DToken.endswith(DType="-error"))
+ if (DToken.ends_with(DType = "-error"))
D.DL = ED ? &ED->Errors : nullptr;
- else if (DToken.endswith(DType="-warning"))
+ else if (DToken.ends_with(DType = "-warning"))
D.DL = ED ? &ED->Warnings : nullptr;
- else if (DToken.endswith(DType="-remark"))
+ else if (DToken.ends_with(DType = "-remark"))
D.DL = ED ? &ED->Remarks : nullptr;
- else if (DToken.endswith(DType="-note"))
+ else if (DToken.ends_with(DType = "-note"))
D.DL = ED ? &ED->Notes : nullptr;
- else if (DToken.endswith(DType="-no-diagnostics")) {
+ else if (DToken.ends_with(DType = "-no-diagnostics")) {
NoDiag = true;
if (D.RegexKind)
continue;
- }
- else
+ } else
continue;
DToken = DToken.substr(0, DToken.size()-DType.size());
@@ -543,9 +540,8 @@ static bool ParseDirective(StringRef S, ExpectedData *ED, SourceManager &SM,
ExpectedLoc = SourceLocation();
} else {
// Lookup file via Preprocessor, like a #include.
- const DirectoryLookup *CurDir;
- Optional<FileEntryRef> File =
- PP->LookupFile(Pos, Filename, false, nullptr, nullptr, CurDir,
+ OptionalFileEntryRef File =
+ PP->LookupFile(Pos, Filename, false, nullptr, nullptr, nullptr,
nullptr, nullptr, nullptr, nullptr, nullptr);
if (!File) {
Diags.Report(Pos.getLocWithOffset(PH.C - PH.Begin),
@@ -615,12 +611,19 @@ static bool ParseDirective(StringRef S, ExpectedData *ED, SourceManager &SM,
diag::err_verify_missing_start) << KindStr;
continue;
}
+ llvm::SmallString<8> CloseBrace("}}");
+ const char *const DelimBegin = PH.C;
PH.Advance();
+ // Count the number of opening braces for `string` kinds
+ for (; !D.RegexKind && PH.Next("{"); PH.Advance())
+ CloseBrace += '}';
const char* const ContentBegin = PH.C; // mark content begin
- // Search for token: }}
- if (!PH.SearchClosingBrace("{{", "}}")) {
- Diags.Report(Pos.getLocWithOffset(PH.C-PH.Begin),
- diag::err_verify_missing_end) << KindStr;
+ // Search for closing brace
+ StringRef OpenBrace(DelimBegin, ContentBegin - DelimBegin);
+ if (!PH.SearchClosingBrace(OpenBrace, CloseBrace)) {
+ Diags.Report(Pos.getLocWithOffset(PH.C - PH.Begin),
+ diag::err_verify_missing_end)
+ << KindStr << CloseBrace;
continue;
}
const char* const ContentEnd = PH.P; // mark content end
@@ -740,12 +743,12 @@ void VerifyDiagnosticConsumer::HandleDiagnostic(
Loc = SrcManager->getExpansionLoc(Loc);
FileID FID = SrcManager->getFileID(Loc);
- const FileEntry *FE = SrcManager->getFileEntryForID(FID);
+ auto FE = SrcManager->getFileEntryRefForID(FID);
if (FE && CurrentPreprocessor && SrcManager->isLoadedFileID(FID)) {
// If the file is a modules header file it shall not be parsed
// for expected-* directives.
HeaderSearch &HS = CurrentPreprocessor->getHeaderSearchInfo();
- if (HS.findModuleForHeader(FE))
+ if (HS.findModuleForHeader(*FE))
PS = IsUnparsedNoDirectives;
}
@@ -871,16 +874,18 @@ static unsigned PrintUnexpected(DiagnosticsEngine &Diags, SourceManager *SourceM
OS << "\n (frontend)";
else {
OS << "\n ";
- if (const FileEntry *File = SourceMgr->getFileEntryForID(
- SourceMgr->getFileID(I->first)))
+ if (OptionalFileEntryRef File =
+ SourceMgr->getFileEntryRefForID(SourceMgr->getFileID(I->first)))
OS << " File " << File->getName();
OS << " Line " << SourceMgr->getPresumedLineNumber(I->first);
}
OS << ": " << I->second;
}
+ std::string Prefix = *Diags.getDiagnosticOptions().VerifyPrefixes.begin();
+ std::string KindStr = Prefix + "-" + Kind;
Diags.Report(diag::err_verify_inconsistent_diags).setForceEmit()
- << Kind << /*Unexpected=*/true << OS.str();
+ << KindStr << /*Unexpected=*/true << OS.str();
return std::distance(diag_begin, diag_end);
}
@@ -910,8 +915,10 @@ static unsigned PrintExpected(DiagnosticsEngine &Diags,
OS << ": " << D->Text;
}
+ std::string Prefix = *Diags.getDiagnosticOptions().VerifyPrefixes.begin();
+ std::string KindStr = Prefix + "-" + Kind;
Diags.Report(diag::err_verify_inconsistent_diags).setForceEmit()
- << Kind << /*Unexpected=*/false << OS.str();
+ << KindStr << /*Unexpected=*/false << OS.str();
return DL.size();
}
@@ -1029,12 +1036,12 @@ void VerifyDiagnosticConsumer::UpdateParsedFileStatus(SourceManager &SM,
if (FID.isInvalid())
return;
- const FileEntry *FE = SM.getFileEntryForID(FID);
+ OptionalFileEntryRef FE = SM.getFileEntryRefForID(FID);
if (PS == IsParsed) {
// Move the FileID from the unparsed set to the parsed set.
UnparsedFiles.erase(FID);
- ParsedFiles.insert(std::make_pair(FID, FE));
+ ParsedFiles.insert(std::make_pair(FID, FE ? &FE->getFileEntry() : nullptr));
} else if (!ParsedFiles.count(FID) && !UnparsedFiles.count(FID)) {
// Add the FileID to the unparsed set if we haven't seen it before.
@@ -1075,17 +1082,17 @@ void VerifyDiagnosticConsumer::CheckDiagnostics() {
// Iterate through list of unparsed files.
for (const auto &I : UnparsedFiles) {
const UnparsedFileStatus &Status = I.second;
- const FileEntry *FE = Status.getFile();
+ OptionalFileEntryRef FE = Status.getFile();
// Skip files that have been parsed via an alias.
- if (FE && ParsedFileCache.count(FE))
+ if (FE && ParsedFileCache.count(*FE))
continue;
// Report a fatal error if this file contained directives.
if (Status.foundDirectives()) {
- llvm::report_fatal_error(Twine("-verify directives found after rather"
- " than during normal parsing of ",
- StringRef(FE ? FE->getName() : "(unknown)")));
+ llvm::report_fatal_error("-verify directives found after rather"
+ " than during normal parsing of " +
+ (FE ? FE->getName() : "(unknown)"));
}
}
@@ -1144,8 +1151,7 @@ std::unique_ptr<Directive> Directive::create(bool RegexKind,
std::string RegexStr;
StringRef S = Text;
while (!S.empty()) {
- if (S.startswith("{{")) {
- S = S.drop_front(2);
+ if (S.consume_front("{{")) {
size_t RegexMatchLength = S.find("}}");
assert(RegexMatchLength != StringRef::npos);
// Append the regex, enclosed in parentheses.
diff --git a/contrib/llvm-project/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp b/contrib/llvm-project/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp
index b95851e380d2..b280a1359d2f 100644
--- a/contrib/llvm-project/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp
+++ b/contrib/llvm-project/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp
@@ -15,6 +15,7 @@
#include "clang/CodeGen/CodeGenAction.h"
#include "clang/Config/config.h"
#include "clang/Driver/Options.h"
+#include "clang/ExtractAPI/FrontendActions.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/CompilerInvocation.h"
#include "clang/Frontend/FrontendActions.h"
@@ -57,13 +58,15 @@ CreateFrontendBaseAction(CompilerInstance &CI) {
case EmitLLVMOnly: return std::make_unique<EmitLLVMOnlyAction>();
case EmitCodeGenOnly: return std::make_unique<EmitCodeGenOnlyAction>();
case EmitObj: return std::make_unique<EmitObjAction>();
+ case ExtractAPI:
+ return std::make_unique<ExtractAPIAction>();
case FixIt: return std::make_unique<FixItAction>();
case GenerateModule:
return std::make_unique<GenerateModuleFromModuleMapAction>();
case GenerateModuleInterface:
return std::make_unique<GenerateModuleInterfaceAction>();
- case GenerateHeaderModule:
- return std::make_unique<GenerateHeaderModuleAction>();
+ case GenerateHeaderUnit:
+ return std::make_unique<GenerateHeaderUnitAction>();
case GeneratePCH: return std::make_unique<GeneratePCHAction>();
case GenerateInterfaceStubs:
return std::make_unique<GenerateInterfaceStubsAction>();
@@ -79,7 +82,7 @@ CreateFrontendBaseAction(CompilerInstance &CI) {
if (Plugin.getName() == CI.getFrontendOpts().ActionName) {
std::unique_ptr<PluginASTAction> P(Plugin.instantiate());
if ((P->getActionType() != PluginASTAction::ReplaceAction &&
- P->getActionType() != PluginASTAction::Cmdline) ||
+ P->getActionType() != PluginASTAction::CmdlineAfterMainAction) ||
!P->ParseArgs(
CI,
CI.getFrontendOpts().PluginArgs[std::string(Plugin.getName())]))
@@ -175,6 +178,14 @@ CreateFrontendAction(CompilerInstance &CI) {
}
#endif
+ // Wrap the base FE action in an extract api action to generate
+ // symbol graph as a biproduct of comilation ( enabled with
+ // --emit-symbol-graph option )
+ if (!FEOpts.SymbolGraphOutputDir.empty()) {
+ CI.getCodeGenOpts().ClearASTBeforeBackend = false;
+ Act = std::make_unique<WrappingExtractAPIAction>(std::move(Act));
+ }
+
// If there are any AST files to merge, create a frontend action
// adaptor to perform the merge.
if (!FEOpts.ASTMergeFiles.empty())
@@ -190,8 +201,8 @@ bool ExecuteCompilerInvocation(CompilerInstance *Clang) {
driver::getDriverOptTable().printHelp(
llvm::outs(), "clang -cc1 [options] file...",
"LLVM 'Clang' Compiler: http://clang.llvm.org",
- /*Include=*/driver::options::CC1Option,
- /*Exclude=*/0, /*ShowAllAliases=*/false);
+ /*ShowHidden=*/false, /*ShowAllAliases=*/false,
+ llvm::opt::Visibility(driver::options::CC1Option));
return true;
}
@@ -203,24 +214,7 @@ bool ExecuteCompilerInvocation(CompilerInstance *Clang) {
return true;
}
- // Load any requested plugins.
- for (const std::string &Path : Clang->getFrontendOpts().Plugins) {
- std::string Error;
- if (llvm::sys::DynamicLibrary::LoadLibraryPermanently(Path.c_str(), &Error))
- Clang->getDiagnostics().Report(diag::err_fe_unable_to_load_plugin)
- << Path << Error;
- }
-
- // Check if any of the loaded plugins replaces the main AST action
- for (const FrontendPluginRegistry::entry &Plugin :
- FrontendPluginRegistry::entries()) {
- std::unique_ptr<PluginASTAction> P(Plugin.instantiate());
- if (P->getActionType() == PluginASTAction::ReplaceAction) {
- Clang->getFrontendOpts().ProgramAction = clang::frontend::PluginAction;
- Clang->getFrontendOpts().ActionName = Plugin.getName().str();
- break;
- }
- }
+ Clang->LoadRequestedPlugins();
// Honor -mllvm.
//
@@ -239,7 +233,7 @@ bool ExecuteCompilerInvocation(CompilerInstance *Clang) {
#if CLANG_ENABLE_STATIC_ANALYZER
// These should happen AFTER plugins have been loaded!
- AnalyzerOptions &AnOpts = *Clang->getAnalyzerOpts();
+ AnalyzerOptions &AnOpts = Clang->getAnalyzerOpts();
// Honor -analyzer-checker-help and -analyzer-checker-help-hidden.
if (AnOpts.ShowCheckerHelp || AnOpts.ShowCheckerHelpAlpha ||
diff --git a/contrib/llvm-project/clang/lib/Headers/__clang_cuda_complex_builtins.h b/contrib/llvm-project/clang/lib/Headers/__clang_cuda_complex_builtins.h
index 2b701fef0ea2..7bc7bc2ce63e 100644
--- a/contrib/llvm-project/clang/lib/Headers/__clang_cuda_complex_builtins.h
+++ b/contrib/llvm-project/clang/lib/Headers/__clang_cuda_complex_builtins.h
@@ -16,7 +16,7 @@
// to work with CUDA and OpenMP target offloading [in C and C++ mode].)
#pragma push_macro("__DEVICE__")
-#ifdef __OPENMP_NVPTX__
+#if defined(__OPENMP_NVPTX__) || defined(__OPENMP_AMDGCN__)
#pragma omp declare target
#define __DEVICE__ __attribute__((noinline, nothrow, cold, weak))
#else
@@ -26,7 +26,7 @@
// To make the algorithms available for C and C++ in CUDA and OpenMP we select
// different but equivalent function versions. TODO: For OpenMP we currently
// select the native builtins as the overload support for templates is lacking.
-#if !defined(__OPENMP_NVPTX__)
+#if !defined(__OPENMP_NVPTX__) && !defined(__OPENMP_AMDGCN__)
#define _ISNANd std::isnan
#define _ISNANf std::isnan
#define _ISINFd std::isinf
@@ -276,7 +276,7 @@ __DEVICE__ float _Complex __divsc3(float __a, float __b, float __c, float __d) {
#undef _fmaxd
#undef _fmaxf
-#ifdef __OPENMP_NVPTX__
+#if defined(__OPENMP_NVPTX__) || defined(__OPENMP_AMDGCN__)
#pragma omp end declare target
#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/__clang_cuda_device_functions.h b/contrib/llvm-project/clang/lib/Headers/__clang_cuda_device_functions.h
index cc4e1a4dd96a..f8a12cefdb81 100644
--- a/contrib/llvm-project/clang/lib/Headers/__clang_cuda_device_functions.h
+++ b/contrib/llvm-project/clang/lib/Headers/__clang_cuda_device_functions.h
@@ -502,8 +502,8 @@ __DEVICE__ unsigned int __pm0(void) { return __nvvm_read_ptx_sreg_pm0(); }
__DEVICE__ unsigned int __pm1(void) { return __nvvm_read_ptx_sreg_pm1(); }
__DEVICE__ unsigned int __pm2(void) { return __nvvm_read_ptx_sreg_pm2(); }
__DEVICE__ unsigned int __pm3(void) { return __nvvm_read_ptx_sreg_pm3(); }
-__DEVICE__ int __popc(int __a) { return __nv_popc(__a); }
-__DEVICE__ int __popcll(long long __a) { return __nv_popcll(__a); }
+__DEVICE__ int __popc(unsigned int __a) { return __nv_popc(__a); }
+__DEVICE__ int __popcll(unsigned long long __a) { return __nv_popcll(__a); }
__DEVICE__ float __powf(float __a, float __b) {
return __nv_fast_powf(__a, __b);
}
diff --git a/contrib/llvm-project/clang/lib/Headers/__clang_cuda_intrinsics.h b/contrib/llvm-project/clang/lib/Headers/__clang_cuda_intrinsics.h
index c7bff6a9d8fe..3c3948863c1d 100644
--- a/contrib/llvm-project/clang/lib/Headers/__clang_cuda_intrinsics.h
+++ b/contrib/llvm-project/clang/lib/Headers/__clang_cuda_intrinsics.h
@@ -71,8 +71,8 @@
} \
inline __device__ unsigned long long __FnName( \
unsigned long long __val, __Type __offset, int __width = warpSize) { \
- return static_cast<unsigned long long>(::__FnName( \
- static_cast<unsigned long long>(__val), __offset, __width)); \
+ return static_cast<unsigned long long>( \
+ ::__FnName(static_cast<long long>(__val), __offset, __width)); \
} \
inline __device__ double __FnName(double __val, __Type __offset, \
int __width = warpSize) { \
@@ -139,8 +139,8 @@ __MAKE_SHUFFLES(__shfl_xor, __nvvm_shfl_bfly_i32, __nvvm_shfl_bfly_f32, 0x1f,
inline __device__ unsigned long long __FnName( \
unsigned int __mask, unsigned long long __val, __Type __offset, \
int __width = warpSize) { \
- return static_cast<unsigned long long>(::__FnName( \
- __mask, static_cast<unsigned long long>(__val), __offset, __width)); \
+ return static_cast<unsigned long long>( \
+ ::__FnName(__mask, static_cast<long long>(__val), __offset, __width)); \
} \
inline __device__ long __FnName(unsigned int __mask, long __val, \
__Type __offset, int __width = warpSize) { \
@@ -234,7 +234,7 @@ inline __device__ unsigned int __match32_any_sync(unsigned int mask,
return __nvvm_match_any_sync_i32(mask, value);
}
-inline __device__ unsigned long long
+inline __device__ unsigned int
__match64_any_sync(unsigned int mask, unsigned long long value) {
return __nvvm_match_any_sync_i64(mask, value);
}
@@ -244,7 +244,7 @@ __match32_all_sync(unsigned int mask, unsigned int value, int *pred) {
return __nvvm_match_all_sync_i32p(mask, value, pred);
}
-inline __device__ unsigned long long
+inline __device__ unsigned int
__match64_all_sync(unsigned int mask, unsigned long long value, int *pred) {
return __nvvm_match_all_sync_i64p(mask, value, pred);
}
@@ -483,4 +483,227 @@ inline __device__ unsigned __funnelshift_rc(unsigned low32, unsigned high32,
#endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 320
+#if CUDA_VERSION >= 11000
+extern "C" {
+__device__ inline size_t __nv_cvta_generic_to_global_impl(const void *__ptr) {
+ return (size_t)(void __attribute__((address_space(1))) *)__ptr;
+}
+__device__ inline size_t __nv_cvta_generic_to_shared_impl(const void *__ptr) {
+ return (size_t)(void __attribute__((address_space(3))) *)__ptr;
+}
+__device__ inline size_t __nv_cvta_generic_to_constant_impl(const void *__ptr) {
+ return (size_t)(void __attribute__((address_space(4))) *)__ptr;
+}
+__device__ inline size_t __nv_cvta_generic_to_local_impl(const void *__ptr) {
+ return (size_t)(void __attribute__((address_space(5))) *)__ptr;
+}
+__device__ inline void *__nv_cvta_global_to_generic_impl(size_t __ptr) {
+ return (void *)(void __attribute__((address_space(1))) *)__ptr;
+}
+__device__ inline void *__nv_cvta_shared_to_generic_impl(size_t __ptr) {
+ return (void *)(void __attribute__((address_space(3))) *)__ptr;
+}
+__device__ inline void *__nv_cvta_constant_to_generic_impl(size_t __ptr) {
+ return (void *)(void __attribute__((address_space(4))) *)__ptr;
+}
+__device__ inline void *__nv_cvta_local_to_generic_impl(size_t __ptr) {
+ return (void *)(void __attribute__((address_space(5))) *)__ptr;
+}
+__device__ inline cuuint32_t __nvvm_get_smem_pointer(void *__ptr) {
+ return __nv_cvta_generic_to_shared_impl(__ptr);
+}
+} // extern "C"
+
+#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 800
+__device__ inline unsigned __reduce_add_sync(unsigned __mask,
+ unsigned __value) {
+ return __nvvm_redux_sync_add(__mask, __value);
+}
+__device__ inline unsigned __reduce_min_sync(unsigned __mask,
+ unsigned __value) {
+ return __nvvm_redux_sync_umin(__mask, __value);
+}
+__device__ inline unsigned __reduce_max_sync(unsigned __mask,
+ unsigned __value) {
+ return __nvvm_redux_sync_umax(__mask, __value);
+}
+__device__ inline int __reduce_min_sync(unsigned __mask, int __value) {
+ return __nvvm_redux_sync_min(__mask, __value);
+}
+__device__ inline int __reduce_max_sync(unsigned __mask, int __value) {
+ return __nvvm_redux_sync_max(__mask, __value);
+}
+__device__ inline unsigned __reduce_or_sync(unsigned __mask, unsigned __value) {
+ return __nvvm_redux_sync_or(__mask, __value);
+}
+__device__ inline unsigned __reduce_and_sync(unsigned __mask,
+ unsigned __value) {
+ return __nvvm_redux_sync_and(__mask, __value);
+}
+__device__ inline unsigned __reduce_xor_sync(unsigned __mask,
+ unsigned __value) {
+ return __nvvm_redux_sync_xor(__mask, __value);
+}
+
+__device__ inline void __nv_memcpy_async_shared_global_4(void *__dst,
+ const void *__src,
+ unsigned __src_size) {
+ __nvvm_cp_async_ca_shared_global_4(
+ (void __attribute__((address_space(3))) *)__dst,
+ (const void __attribute__((address_space(1))) *)__src, __src_size);
+}
+__device__ inline void __nv_memcpy_async_shared_global_8(void *__dst,
+ const void *__src,
+ unsigned __src_size) {
+ __nvvm_cp_async_ca_shared_global_8(
+ (void __attribute__((address_space(3))) *)__dst,
+ (const void __attribute__((address_space(1))) *)__src, __src_size);
+}
+__device__ inline void __nv_memcpy_async_shared_global_16(void *__dst,
+ const void *__src,
+ unsigned __src_size) {
+ __nvvm_cp_async_ca_shared_global_16(
+ (void __attribute__((address_space(3))) *)__dst,
+ (const void __attribute__((address_space(1))) *)__src, __src_size);
+}
+
+__device__ inline void *
+__nv_associate_access_property(const void *__ptr, unsigned long long __prop) {
+ // TODO: it appears to provide compiler with some sort of a hint. We do not
+ // know what exactly it is supposed to do. However, CUDA headers suggest that
+ // just passing through __ptr should not affect correctness. They do so on
+ // pre-sm80 GPUs where this builtin is not available.
+ return (void*)__ptr;
+}
+#endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 800
+
+#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 900
+__device__ inline unsigned __isCtaShared(const void *ptr) {
+ return __isShared(ptr);
+}
+
+__device__ inline unsigned __isClusterShared(const void *__ptr) {
+ return __nvvm_isspacep_shared_cluster(__ptr);
+}
+
+__device__ inline void *__cluster_map_shared_rank(const void *__ptr,
+ unsigned __rank) {
+ return __nvvm_mapa((void *)__ptr, __rank);
+}
+
+__device__ inline unsigned __cluster_query_shared_rank(const void *__ptr) {
+ return __nvvm_getctarank((void *)__ptr);
+}
+
+__device__ inline uint2
+__cluster_map_shared_multicast(const void *__ptr,
+ unsigned int __cluster_cta_mask) {
+ return make_uint2((unsigned)__cvta_generic_to_shared(__ptr),
+ __cluster_cta_mask);
+}
+
+__device__ inline unsigned __clusterDimIsSpecified() {
+ return __nvvm_is_explicit_cluster();
+}
+
+__device__ inline dim3 __clusterDim() {
+ return dim3(__nvvm_read_ptx_sreg_cluster_nctaid_x(),
+ __nvvm_read_ptx_sreg_cluster_nctaid_y(),
+ __nvvm_read_ptx_sreg_cluster_nctaid_z());
+}
+
+__device__ inline dim3 __clusterRelativeBlockIdx() {
+ return dim3(__nvvm_read_ptx_sreg_cluster_ctaid_x(),
+ __nvvm_read_ptx_sreg_cluster_ctaid_y(),
+ __nvvm_read_ptx_sreg_cluster_ctaid_z());
+}
+
+__device__ inline dim3 __clusterGridDimInClusters() {
+ return dim3(__nvvm_read_ptx_sreg_nclusterid_x(),
+ __nvvm_read_ptx_sreg_nclusterid_y(),
+ __nvvm_read_ptx_sreg_nclusterid_z());
+}
+
+__device__ inline dim3 __clusterIdx() {
+ return dim3(__nvvm_read_ptx_sreg_clusterid_x(),
+ __nvvm_read_ptx_sreg_clusterid_y(),
+ __nvvm_read_ptx_sreg_clusterid_z());
+}
+
+__device__ inline unsigned __clusterRelativeBlockRank() {
+ return __nvvm_read_ptx_sreg_cluster_ctarank();
+}
+
+__device__ inline unsigned __clusterSizeInBlocks() {
+ return __nvvm_read_ptx_sreg_cluster_nctarank();
+}
+
+__device__ inline void __cluster_barrier_arrive() {
+ __nvvm_barrier_cluster_arrive();
+}
+
+__device__ inline void __cluster_barrier_arrive_relaxed() {
+ __nvvm_barrier_cluster_arrive_relaxed();
+}
+
+__device__ inline void __cluster_barrier_wait() {
+ __nvvm_barrier_cluster_wait();
+}
+
+__device__ inline void __threadfence_cluster() { __nvvm_fence_sc_cluster(); }
+
+__device__ inline float2 atomicAdd(float2 *__ptr, float2 __val) {
+ float2 __ret;
+ __asm__("atom.add.v2.f32 {%0, %1}, [%2], {%3, %4};"
+ : "=f"(__ret.x), "=f"(__ret.y)
+ : "l"(__ptr), "f"(__val.x), "f"(__val.y));
+ return __ret;
+}
+
+__device__ inline float2 atomicAdd_block(float2 *__ptr, float2 __val) {
+ float2 __ret;
+ __asm__("atom.cta.add.v2.f32 {%0, %1}, [%2], {%3, %4};"
+ : "=f"(__ret.x), "=f"(__ret.y)
+ : "l"(__ptr), "f"(__val.x), "f"(__val.y));
+ return __ret;
+}
+
+__device__ inline float2 atomicAdd_system(float2 *__ptr, float2 __val) {
+ float2 __ret;
+ __asm__("atom.sys.add.v2.f32 {%0, %1}, [%2], {%3, %4};"
+ : "=f"(__ret.x), "=f"(__ret.y)
+ : "l"(__ptr), "f"(__val.x), "f"(__val.y));
+ return __ret;
+}
+
+__device__ inline float4 atomicAdd(float4 *__ptr, float4 __val) {
+ float4 __ret;
+ __asm__("atom.add.v4.f32 {%0, %1, %2, %3}, [%4], {%5, %6, %7, %8};"
+ : "=f"(__ret.x), "=f"(__ret.y), "=f"(__ret.z), "=f"(__ret.w)
+ : "l"(__ptr), "f"(__val.x), "f"(__val.y), "f"(__val.z), "f"(__val.w));
+ return __ret;
+}
+
+__device__ inline float4 atomicAdd_block(float4 *__ptr, float4 __val) {
+ float4 __ret;
+ __asm__(
+ "atom.cta.add.v4.f32 {%0, %1, %2, %3}, [%4], {%5, %6, %7, %8};"
+ : "=f"(__ret.x), "=f"(__ret.y), "=f"(__ret.z), "=f"(__ret.w)
+ : "l"(__ptr), "f"(__val.x), "f"(__val.y), "f"(__val.z), "f"(__val.w));
+ return __ret;
+}
+
+__device__ inline float4 atomicAdd_system(float4 *__ptr, float4 __val) {
+ float4 __ret;
+ __asm__(
+ "atom.sys.add.v4.f32 {%0, %1, %2, %3}, [%4], {%5, %6, %7, %8};"
+ : "=f"(__ret.x), "=f"(__ret.y), "=f"(__ret.z), "=f"(__ret.w)
+ : "l"(__ptr), "f"(__val.x), "f"(__val.y), "f"(__val.z), "f"(__val.w)
+ :);
+ return __ret;
+}
+
+#endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 900
+#endif // CUDA_VERSION >= 11000
+
#endif // defined(__CLANG_CUDA_INTRINSICS_H__)
diff --git a/contrib/llvm-project/clang/lib/Headers/__clang_cuda_libdevice_declares.h b/contrib/llvm-project/clang/lib/Headers/__clang_cuda_libdevice_declares.h
index 6173b589e3ef..ded0382a7ddc 100644
--- a/contrib/llvm-project/clang/lib/Headers/__clang_cuda_libdevice_declares.h
+++ b/contrib/llvm-project/clang/lib/Headers/__clang_cuda_libdevice_declares.h
@@ -16,6 +16,7 @@ extern "C" {
#if defined(__OPENMP_NVPTX__)
#define __DEVICE__
+#pragma omp begin assumes ext_spmd_amenable no_openmp
#elif defined(__CUDA__)
#define __DEVICE__ __device__
#endif
@@ -284,8 +285,8 @@ __DEVICE__ double __nv_normcdfinv(double __a);
__DEVICE__ float __nv_normcdfinvf(float __a);
__DEVICE__ float __nv_normf(int __a, const float *__b);
__DEVICE__ double __nv_norm(int __a, const double *__b);
-__DEVICE__ int __nv_popc(int __a);
-__DEVICE__ int __nv_popcll(long long __a);
+__DEVICE__ int __nv_popc(unsigned int __a);
+__DEVICE__ int __nv_popcll(unsigned long long __a);
__DEVICE__ double __nv_pow(double __a, double __b);
__DEVICE__ float __nv_powf(float __a, float __b);
__DEVICE__ double __nv_powi(double __a, int __b);
@@ -456,6 +457,11 @@ __DEVICE__ double __nv_y1(double __a);
__DEVICE__ float __nv_y1f(float __a);
__DEVICE__ float __nv_ynf(int __a, float __b);
__DEVICE__ double __nv_yn(int __a, double __b);
+
+#if defined(__OPENMP_NVPTX__)
+#pragma omp end assumes ext_spmd_amenable no_openmp
+#endif
+
#if defined(__cplusplus)
} // extern "C"
#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/__clang_cuda_math.h b/contrib/llvm-project/clang/lib/Headers/__clang_cuda_math.h
index 538556f394da..040191650686 100644
--- a/contrib/llvm-project/clang/lib/Headers/__clang_cuda_math.h
+++ b/contrib/llvm-project/clang/lib/Headers/__clang_cuda_math.h
@@ -36,7 +36,7 @@
// because the OpenMP overlay requires constexpr functions here but prior to
// c++14 void return functions could not be constexpr.
#pragma push_macro("__DEVICE_VOID__")
-#ifdef __OPENMP_NVPTX__ && defined(__cplusplus) && __cplusplus < 201402L
+#if defined(__OPENMP_NVPTX__) && defined(__cplusplus) && __cplusplus < 201402L
#define __DEVICE_VOID__ static __attribute__((always_inline, nothrow))
#else
#define __DEVICE_VOID__ __DEVICE__
@@ -45,9 +45,9 @@
// libdevice provides fast low precision and slow full-recision implementations
// for some functions. Which one gets selected depends on
// __CLANG_CUDA_APPROX_TRANSCENDENTALS__ which gets defined by clang if
-// -ffast-math or -fcuda-approx-transcendentals are in effect.
+// -ffast-math or -fgpu-approx-transcendentals are in effect.
#pragma push_macro("__FAST_OR_SLOW")
-#if defined(__CLANG_CUDA_APPROX_TRANSCENDENTALS__)
+#if defined(__CLANG_GPU_APPROX_TRANSCENDENTALS__)
#define __FAST_OR_SLOW(fast, slow) fast
#else
#define __FAST_OR_SLOW(fast, slow) slow
@@ -345,4 +345,4 @@ __DEVICE__ float ynf(int __a, float __b) { return __nv_ynf(__a, __b); }
#pragma pop_macro("__DEVICE_VOID__")
#pragma pop_macro("__FAST_OR_SLOW")
-#endif // __CLANG_CUDA_DEVICE_FUNCTIONS_H__
+#endif // __CLANG_CUDA_MATH_H__
diff --git a/contrib/llvm-project/clang/lib/Headers/__clang_cuda_runtime_wrapper.h b/contrib/llvm-project/clang/lib/Headers/__clang_cuda_runtime_wrapper.h
index f401964bd529..d369c86fe106 100644
--- a/contrib/llvm-project/clang/lib/Headers/__clang_cuda_runtime_wrapper.h
+++ b/contrib/llvm-project/clang/lib/Headers/__clang_cuda_runtime_wrapper.h
@@ -41,6 +41,7 @@
#include <cmath>
#include <cstdlib>
#include <stdlib.h>
+#include <string.h>
#undef __CUDACC__
// Preserve common macros that will be changed below by us or by CUDA
@@ -64,9 +65,9 @@
#endif
// Make largest subset of device functions available during host
-// compilation -- SM_35 for the time being.
+// compilation.
#ifndef __CUDA_ARCH__
-#define __CUDA_ARCH__ 350
+#define __CUDA_ARCH__ 9999
#endif
#include "__clang_cuda_builtin_vars.h"
@@ -195,21 +196,16 @@ inline __host__ double __signbitd(double x) {
// math_function.hpp uses the __USE_FAST_MATH__ macro to determine whether we
// get the slow-but-accurate or fast-but-inaccurate versions of functions like
-// sin and exp. This is controlled in clang by -fcuda-approx-transcendentals.
+// sin and exp. This is controlled in clang by -fgpu-approx-transcendentals.
//
// device_functions.hpp uses __USE_FAST_MATH__ for a different purpose (fast vs.
// slow divides), so we need to scope our define carefully here.
#pragma push_macro("__USE_FAST_MATH__")
-#if defined(__CLANG_CUDA_APPROX_TRANSCENDENTALS__)
+#if defined(__CLANG_GPU_APPROX_TRANSCENDENTALS__)
#define __USE_FAST_MATH__ 1
#endif
#if CUDA_VERSION >= 9000
-// CUDA-9.2 needs host-side memcpy for some host functions in
-// device_functions.hpp
-#if CUDA_VERSION >= 9020
-#include <string.h>
-#endif
#include "crt/math_functions.hpp"
#else
#include "math_functions.hpp"
@@ -275,7 +271,38 @@ static inline __device__ void __brkpt(int __c) { __brkpt(); }
#undef __CUDABE__
#endif
#include "sm_20_atomic_functions.hpp"
+// Predicate functions used in `__builtin_assume` need to have no side effect.
+// However, sm_20_intrinsics.hpp doesn't define them with neither pure nor
+// const attribute. Rename definitions from sm_20_intrinsics.hpp and re-define
+// them as pure ones.
+#pragma push_macro("__isGlobal")
+#pragma push_macro("__isShared")
+#pragma push_macro("__isConstant")
+#pragma push_macro("__isLocal")
+#define __isGlobal __ignored_cuda___isGlobal
+#define __isShared __ignored_cuda___isShared
+#define __isConstant __ignored_cuda___isConstant
+#define __isLocal __ignored_cuda___isLocal
#include "sm_20_intrinsics.hpp"
+#pragma pop_macro("__isGlobal")
+#pragma pop_macro("__isShared")
+#pragma pop_macro("__isConstant")
+#pragma pop_macro("__isLocal")
+#pragma push_macro("__DEVICE__")
+#define __DEVICE__ static __device__ __forceinline__ __attribute__((const))
+__DEVICE__ unsigned int __isGlobal(const void *p) {
+ return __nvvm_isspacep_global(p);
+}
+__DEVICE__ unsigned int __isShared(const void *p) {
+ return __nvvm_isspacep_shared(p);
+}
+__DEVICE__ unsigned int __isConstant(const void *p) {
+ return __nvvm_isspacep_const(p);
+}
+__DEVICE__ unsigned int __isLocal(const void *p) {
+ return __nvvm_isspacep_local(p);
+}
+#pragma pop_macro("__DEVICE__")
#include "sm_32_atomic_functions.hpp"
// Don't include sm_30_intrinsics.h and sm_32_intrinsics.h. These define the
@@ -330,6 +357,34 @@ static inline __device__ void __brkpt(int __c) { __brkpt(); }
#pragma pop_macro("__host__")
+// __clang_cuda_texture_intrinsics.h must be included first in order to provide
+// implementation for __nv_tex_surf_handler that CUDA's headers depend on.
+// The implementation requires c++11 and only works with CUDA-9 or newer.
+#if __cplusplus >= 201103L && CUDA_VERSION >= 9000
+// clang-format off
+#include <__clang_cuda_texture_intrinsics.h>
+// clang-format on
+#else
+#if CUDA_VERSION >= 9000
+// Provide a hint that texture support needs C++11.
+template <typename T> struct __nv_tex_needs_cxx11 {
+ const static bool value = false;
+};
+template <class T>
+__host__ __device__ void __nv_tex_surf_handler(const char *name, T *ptr,
+ cudaTextureObject_t obj,
+ float x) {
+ _Static_assert(__nv_tex_needs_cxx11<T>::value,
+ "Texture support requires C++11");
+}
+#else
+// Textures in CUDA-8 and older are not supported by clang.There's no
+// convenient way to intercept texture use in these versions, so we can't
+// produce a meaningful error. The source code that attempts to use textures
+// will continue to fail as it does now.
+#endif // CUDA_VERSION
+#endif // __cplusplus >= 201103L && CUDA_VERSION >= 9000
+#include "texture_fetch_functions.h"
#include "texture_indirect_functions.h"
// Restore state of __CUDA_ARCH__ and __THROW we had on entry.
diff --git a/contrib/llvm-project/clang/lib/Headers/__clang_cuda_texture_intrinsics.h b/contrib/llvm-project/clang/lib/Headers/__clang_cuda_texture_intrinsics.h
new file mode 100644
index 000000000000..a71952211237
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/__clang_cuda_texture_intrinsics.h
@@ -0,0 +1,742 @@
+/*===--- __clang_cuda_texture_intrinsics.h - Device-side texture support ---===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ *
+ * This header provides in-header implmentations for NVCC's built-in
+ * __nv_tex_surf_handler() which is used by CUDA's texture-related headers. The
+ * built-in is unusual as it's actually a set of function overloads that use the
+ * first string literal argument as one of the overload parameters.
+ */
+#ifndef __CLANG_CUDA_TEXTURE_INTRINSICS_H__
+#define __CLANG_CUDA_TEXTURE_INTRINSICS_H__
+#ifndef __CUDA__
+#error "This file is for CUDA compilation only."
+#endif
+
+// __nv_tex_surf_handler() provided by this header as a macro.
+#define __nv_tex_surf_handler(__op, __ptr, ...) \
+ ::__cuda_tex::__tex_fetch< \
+ ::__cuda_tex::__Tag<::__cuda_tex::__tex_op_hash(__op)>>(__ptr, \
+ __VA_ARGS__)
+
+#pragma push_macro("__ASM_OUT")
+#pragma push_macro("__ASM_OUTP")
+#pragma push_macro("__Args")
+#pragma push_macro("__ID")
+#pragma push_macro("__IDV")
+#pragma push_macro("__IMPL_2DGATHER")
+#pragma push_macro("__IMPL_ALIAS")
+#pragma push_macro("__IMPL_ALIASI")
+#pragma push_macro("__IMPL_F1")
+#pragma push_macro("__IMPL_F3")
+#pragma push_macro("__IMPL_F3N")
+#pragma push_macro("__IMPL_F3S")
+#pragma push_macro("__IMPL_S")
+#pragma push_macro("__IMPL_S3")
+#pragma push_macro("__IMPL_S3I")
+#pragma push_macro("__IMPL_S3N")
+#pragma push_macro("__IMPL_S3NI")
+#pragma push_macro("__IMPL_S3S")
+#pragma push_macro("__IMPL_S3SI")
+#pragma push_macro("__IMPL_SI")
+#pragma push_macro("__L")
+#pragma push_macro("__STRIP_PARENS")
+
+// Put all functions into anonymous namespace so they have internal linkage.
+// The device-only function here must be internal in order to avoid ODR
+// violations in case they are used from the files compiled with
+// -fgpu-rdc. E.g. a library and an app using it may be built with a different
+// version of this header file.
+namespace {
+
+// Put the implmentation into its own namespace so we don't pollute the TU.
+namespace __cuda_tex {
+
+// First, we need a perfect hash function and a few constexpr helper functions
+// for converting a string literal into a numeric value which can be used to
+// parametrize a template. We can not use string literals for that as that would
+// require C++20.
+//
+// The hash function was generated with 'gperf' and then manually converted into
+// its constexpr equivalent.
+//
+// NOTE: the perfect hashing scheme comes with inherent self-test. If the hash
+// function has a collision for any of the texture operations, the compilation
+// will fail due to an attempt to redefine a tag with the same value. If the
+// header compiles, then the hash function is good enough for the job.
+
+constexpr int __tex_len(const char *s) {
+ return (s[0] == 0) ? 0
+ : (s[1] == 0) ? 1
+ : (s[2] == 0) ? 2
+ : (s[3] == 0) ? 3
+ : (s[4] == 0) ? 4
+ : (s[5] == 0) ? 5
+ : (s[6] == 0) ? 6
+ : (s[7] == 0) ? 7
+ : (s[8] == 0) ? 8
+ : (s[9] == 0) ? 9
+ : (s[10] == 0) ? 10
+ : (s[11] == 0) ? 11
+ : (s[12] == 0) ? 12
+ : (s[13] == 0) ? 13
+ : (s[14] == 0) ? 14
+ : (s[15] == 0) ? 15
+ : (s[16] == 0) ? 16
+ : (s[17] == 0) ? 17
+ : (s[18] == 0) ? 18
+ : (s[19] == 0) ? 19
+ : (s[20] == 0) ? 20
+ : (s[21] == 0) ? 21
+ : (s[22] == 0) ? 22
+ : (s[23] == 0) ? 23
+ : (s[24] == 0) ? 24
+ : (s[25] == 0) ? 25
+ : (s[26] == 0) ? 26
+ : (s[27] == 0) ? 27
+ : (s[28] == 0) ? 28
+ : (s[29] == 0) ? 29
+ : (s[30] == 0) ? 30
+ : (s[31] == 0) ? 31
+ : 32;
+}
+
+constexpr int __tex_hash_map(int c) {
+ return (c == 49) ? 10
+ : (c == 50) ? 0
+ : (c == 51) ? 100
+ : (c == 52) ? 30
+ : (c == 67) ? 10
+ : (c == 68) ? 0
+ : (c == 69) ? 25
+ : (c == 72) ? 70
+ : (c == 77) ? 0
+ : (c == 96) ? 44
+ : (c == 99) ? 10
+ : (c == 100) ? 5
+ : (c == 101) ? 60
+ : (c == 102) ? 40
+ : (c == 103) ? 70
+ : (c == 104) ? 25
+ : (c == 112) ? 0
+ : (c == 114) ? 45
+ : (c == 117) ? 5
+ : (c == 118) ? 85
+ : (c == 120) ? 20
+ : 225;
+}
+
+constexpr int __tex_op_hash(const char *str) {
+ return __tex_len(str) + __tex_hash_map(str[7] + 1) + __tex_hash_map(str[6]) +
+ __tex_hash_map(str[5]) + __tex_hash_map(str[__tex_len(str) - 1]);
+}
+
+// Tag type to identify particular texture operation.
+template <int N> struct __Tag;
+#define __ID(__op) __Tag<__tex_op_hash(__op)>
+// Tags for variants of particular operation. E.g. tex2Dgather can translate
+// into 4 different instructions.
+#define __IDV(__op, __variant) \
+ __Tag<10000 + __tex_op_hash(__op) * 100 + __variant>
+
+// Helper classes for figuring out key data types for derived types.
+// E.g. char2 has __base_t = char, __fetch_t = char4
+template <class> struct __TypeInfoT;
+// Type info for the fundamental types.
+template <> struct __TypeInfoT<float> {
+ using __base_t = float;
+ using __fetch_t = float4;
+};
+template <> struct __TypeInfoT<char> {
+ using __base_t = char;
+ using __fetch_t = int4;
+};
+template <> struct __TypeInfoT<signed char> {
+ using __base_t = signed char;
+ using __fetch_t = int4;
+};
+template <> struct __TypeInfoT<unsigned char> {
+ using __base_t = unsigned char;
+ using __fetch_t = uint4;
+};
+template <> struct __TypeInfoT<short> {
+ using __base_t = short;
+ using __fetch_t = int4;
+};
+template <> struct __TypeInfoT<unsigned short> {
+ using __base_t = unsigned short;
+ using __fetch_t = uint4;
+};
+template <> struct __TypeInfoT<int> {
+ using __base_t = int;
+ using __fetch_t = int4;
+};
+template <> struct __TypeInfoT<unsigned int> {
+ using __base_t = unsigned int;
+ using __fetch_t = uint4;
+};
+
+// Derived base/fetch types for N-element vectors.
+template <class __T> struct __TypeInfoT {
+ using __base_t = decltype(__T::x);
+ using __fetch_t = typename __TypeInfoT<__base_t>::__fetch_t;
+};
+
+// Classes that implement specific texture ops.
+template <class __op> struct __tex_fetch_v4;
+
+// Helper macros to strip parens from a macro argument.
+#define __Args(...) __VA_ARGS__
+#define __STRIP_PARENS(__X) __X
+#define __L(__X) __STRIP_PARENS(__Args __X)
+
+// Construct inline assembly output args.
+// Results are stored in a temp var __r.
+// isResident bool is pointed to by __ir
+// Asm args for return values. It's a 4-element vector
+#define __ASM_OUT(__t) \
+ ("=" __t(__r.x), "=" __t(__r.y), "=" __t(__r.z), "=" __t(__r.w))
+// .. possibly combined with a predicate.
+#define __ASM_OUTP(__t) (__L(__ASM_OUT(__t)), "=h"(*__ir))
+
+// Implements a single variant of texture fetch instruction.
+#define __IMPL_F1(__rt, __dt, __args, __asm_op, __asm_outs, __asm_args) \
+ template <> \
+ __device__ __rt __run<__dt>(cudaTextureObject_t __obj, __L(__args)) { \
+ __rt __r; \
+ asm(__asm_op : __L(__asm_outs) : "l"(__obj), __L(__asm_args)); \
+ return __r; \
+ }
+
+// Implements texture fetch instructions for int4/uint4/float4 data types.
+#define __IMPL_F3(__args, __asm_op, __ctype, __asm_op_args, __asm_args) \
+ __IMPL_F1(int4, int4, __args, __asm_op ".s32." __ctype "\t" __asm_op_args, \
+ __ASM_OUT("r"), __asm_args) \
+ __IMPL_F1(uint4, uint4, __args, __asm_op ".u32." __ctype "\t" __asm_op_args, \
+ __ASM_OUT("r"), __asm_args) \
+ __IMPL_F1(float4, float4, __args, \
+ __asm_op ".f32." __ctype "\t" __asm_op_args, __ASM_OUT("f"), \
+ __asm_args)
+// Implements 'sparse' texture fetch instructions for int4/uint4/float4 data
+// types. Similar to above, but returns a boolean 'isPresent' value in addition
+// to texture data,
+#define __IMPL_F3S(__args, __asm_op, __ctype, __asm_op_args, __asm_args) \
+ __IMPL_F1(int4, int4, __args, __asm_op ".s32." __ctype "\t" __asm_op_args, \
+ __ASM_OUTP("r"), __asm_args) \
+ __IMPL_F1(uint4, uint4, __args, __asm_op ".u32." __ctype "\t" __asm_op_args, \
+ __ASM_OUTP("r"), __asm_args) \
+ __IMPL_F1(float4, float4, __args, \
+ __asm_op ".f32." __ctype "\t" __asm_op_args, __ASM_OUTP("f"), \
+ __asm_args)
+
+// Similar to F3, but for integer data which is returned as normalized floats.
+// Only instantiates fetch functions for int4/uint4.
+#define __IMPL_F3N(__args, __asm_op, __ctype, __asm_op_args, __asm_args) \
+ __IMPL_F1(float4, int4, __args, __asm_op ".s32." __ctype "\t" __asm_op_args, \
+ __ASM_OUT("r"), __asm_args) \
+ __IMPL_F1(float4, uint4, __args, \
+ __asm_op ".u32." __ctype "\t" __asm_op_args, __ASM_OUT("r"), \
+ __asm_args)
+
+// Instantiates __tex_fetch_v4 with regular fetch functions.
+#define __IMPL_S3I(__op, __args, __asm_op, __ctype, __asm_op_args, __asm_args) \
+ template <> struct __tex_fetch_v4<__op> { \
+ template <class T> \
+ __device__ static T __run(cudaTextureObject_t __obj, __L(__args)); \
+ __IMPL_F3(__args, __asm_op, __ctype, __asm_op_args, __asm_args) \
+ }
+
+// Same, but for sparse ops. Only available on sm_60+
+#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 600)
+#define __IMPL_S3SI(__op, __args, __asm_op, __ctype, __asm_op_args, \
+ __asm_args) \
+ template <> struct __tex_fetch_v4<__op> { \
+ template <class T> \
+ __device__ static T __run(cudaTextureObject_t __obj, __L(__args)); \
+ __IMPL_F3S(__args, __asm_op, __ctype, __asm_op_args, __asm_args) \
+ }
+#else
+#define __IMPL_S3SI(__op, __args, __asm_op, __ctype, __asm_op_args, __asm_args)
+#endif
+
+// Same, but for normalized float ops.
+#define __IMPL_S3NI(__op, __args, __asm_op, __ctype, __asm_op_args, \
+ __asm_args) \
+ template <> struct __tex_fetch_v4<__op> { \
+ template <class T> \
+ __device__ static float4 __run(cudaTextureObject_t __obj, __L(__args)); \
+ __IMPL_F3N(__args, __asm_op, __ctype, __asm_op_args, __asm_args) \
+ }
+
+// Regular and normalized float ops share a lot of similarities. This macro
+// instantiates both variants -- normal for __op and normalized for __opn.
+#define __IMPL_SI(__op, __opn, __args, __asm_op, __ctype, __asm_op_args, \
+ __asm_args) \
+ __IMPL_S3I(__op, __args, __asm_op, __ctype, __asm_op_args, __asm_args); \
+ __IMPL_S3NI(__opn, __args, __asm_op, __ctype, __asm_op_args, __asm_args)
+
+// Convenience macros which converts string literal __op into a __Tag,
+#define __IMPL_S3(__op, __args, __asm_op, __ctype, __asm_op_args, __asm_args) \
+ __IMPL_S3I(__ID(__op), __args, __asm_op, __ctype, __asm_op_args, __asm_args)
+#define __IMPL_S3S(__op, __args, __asm_op, __ctype, __asm_op_args, __asm_args) \
+ __IMPL_S3SI(__ID(__op), __args, __asm_op, __ctype, __asm_op_args, __asm_args)
+#define __IMPL_S3N(__op, __args, __asm_op, __ctype, __asm_op_args, __asm_args) \
+ __IMPL_S3NI(__ID(__op), __args, __asm_op, __ctype, __asm_op_args, __asm_args)
+#define __IMPL_S(__op, __opn, __args, __asm_op, __ctype, __asm_op_args, \
+ __asm_args) \
+ __IMPL_SI(__ID(__op), __ID(__opn), __args, __asm_op, __ctype, __asm_op_args, \
+ __asm_args)
+
+// CUDA headers have some 'legacy' texture oprerations that duplicate
+// functionality. So, we just inherit it, instead of refining a copy.
+#define __IMPL_ALIASI(__op, __opn) \
+ template <> struct __tex_fetch_v4<__op> : __tex_fetch_v4<__opn> {}
+#define __IMPL_ALIAS(__op, __opn) __IMPL_ALIASI(__ID(__op), __ID(__opn))
+
+// Now we can instantiate everything we need for each specific texture fetch
+// variant.
+__IMPL_S("__tex1D_v2", "__tex1D_rmnf_v2", (float __x), "tex.1d.v4", "f32",
+ "{%0, %1, %2, %3}, [%4, {%5}];", ("f"(__x)));
+__IMPL_S("__tex1Dfetch_v2", "__tex1Dfetch_rmnf_v2", (int __x), "tex.1d.v4",
+ "s32", "{%0, %1, %2, %3}, [%4, {%5}];", ("r"(__x)));
+__IMPL_ALIAS("__itex1D", "__tex1D_v2");
+__IMPL_ALIAS("__itex1Dfetch", "__tex1Dfetch_v2");
+
+__IMPL_S("__tex1DGrad_v2", "__tex1DGrad_rmnf_v2",
+ (float __x, float __dPdx, float __dPdy), "tex.grad.1d.v4", "f32",
+ "{%0, %1, %2, %3}, [%4, {%5}], {%6}, {%7};",
+ ("f"(__x), "f"(__dPdx), "f"(__dPdy)));
+__IMPL_ALIAS("__itex1DGrad", "__tex1DGrad_v2");
+
+__IMPL_S("__tex1DLayered_v2", "__tex1DLayered_rmnf_v2",
+ (float __x, int __layer), "tex.a1d.v4", "f32",
+ "{%0, %1, %2, %3}, [%4, {%5, %6}];", ("r"(__layer), "f"(__x)));
+__IMPL_ALIAS("__itex1DLayered", "__tex1DLayered_v2");
+
+__IMPL_S("__tex1DLayeredGrad_v2", "__tex1DLayeredGrad_rmnf_v2",
+ (float __x, int __layer, float __dPdx, float __dPdy),
+ "tex.grad.a1d.v4", "f32",
+ "{%0, %1, %2, %3}, [%4, {%5, %6}], {%7}, {%8};",
+ ("r"(__layer), "f"(__x), "f"(__dPdx), "f"(__dPdy)));
+__IMPL_ALIAS("__itex1DLayeredGrad", "__tex1DLayeredGrad_v2");
+
+__IMPL_S("__tex1DLayeredLod_v2", "__tex1DLayeredLod_rmnf_v2",
+ (float __x, int __layer, float __level), "tex.level.a1d.v4", "f32",
+ "{%0, %1, %2, %3}, [%4, {%5, %6}], %7;",
+ ("r"(__layer), "f"(__x), "f"(__level)));
+__IMPL_ALIAS("__itex1DLayeredLod", "__tex1DLayeredLod_v2");
+
+__IMPL_S("__tex1DLod_v2", "__tex1DLod_rmnf_v2", (float __x, float __level),
+ "tex.level.1d.v4", "f32", "{%0, %1, %2, %3}, [%4, {%5}], %6;",
+ ("f"(__x), "f"(__level)));
+__IMPL_ALIAS("__itex1DLod", "__tex1DLod_v2");
+
+// 2D
+__IMPL_S("__tex2D_v2", "__tex2D_rmnf_v2", (float __x, float __y), "tex.2d.v4",
+ "f32", "{%0, %1, %2, %3}, [%4, {%5, %6}];", ("f"(__x), "f"(__y)));
+__IMPL_ALIAS("__itex2D", "__tex2D_v2");
+
+__IMPL_S3S("__itex2D_sparse", (float __x, float __y, unsigned char *__ir),
+ "{.reg .pred %%p0;\n\t"
+ "tex.2d.v4",
+ "f32",
+ "{%0, %1, %2, %3}|%%p0, [%5, {%6, %7}];\n\t"
+ " selp.u16 %4, 1, 0, %%p0; }",
+ ("f"(__x), "f"(__y)));
+
+__IMPL_S("__tex2DGrad_v2", "__tex2DGrad_rmnf_v2",
+ (float __x, float __y, const float2 *__dPdx, const float2 *__dPdy),
+ "tex.grad.2d.v4", "f32",
+ "{%0, %1, %2, %3}, [%4, {%5, %6}], {%7, %8}, {%9, %10};",
+ ("f"(__x), "f"(__y), "f"(__dPdx->x), "f"(__dPdx->y), "f"(__dPdy->x),
+ "f"(__dPdy->y)));
+__IMPL_ALIAS("__itex2DGrad_v2", "__tex2DGrad_v2");
+
+__IMPL_S3S("__itex2DGrad_sparse",
+ (float __x, float __y, const float2 *__dPdx, const float2 *__dPdy,
+ unsigned char *__ir),
+ "{.reg .pred %%p0;\n\t"
+ "tex.grad.2d.v4",
+ "f32",
+ "{%0, %1, %2, %3}|%%p0, [%5, {%6, %7}], {%8, %9}, {%10, %11};\n\t"
+ "selp.u16 %4, 1, 0, %%p0; }",
+ ("f"(__x), "f"(__y), "f"(__dPdx->x), "f"(__dPdx->y), "f"(__dPdy->x),
+ "f"(__dPdy->y)));
+
+__IMPL_S("__tex2DLayered_v2", "__tex2DLayered_rmnf_v2",
+ (float __x, float __y, int __layer), "tex.a2d.v4", "f32",
+ "{%0, %1, %2, %3}, [%4, {%5, %6, %7, %7}];",
+ ("r"(__layer), "f"(__x), "f"(__y)));
+__IMPL_ALIAS("__itex2DLayered", "__tex2DLayered_v2");
+
+__IMPL_S3S("__itex2DLayered_sparse",
+ (float __x, float __y, int __layer, unsigned char *__ir),
+ "{.reg .pred %%p0;\n\t"
+ "tex.a2d.v4",
+ "f32",
+ "{%0, %1, %2, %3}|%%p0, [%5, {%6, %7, %8, %8}];\n\t"
+ "selp.u16 %4, 1, 0, %%p0; }",
+ ("r"(__layer), "f"(__x), "f"(__y)));
+
+__IMPL_S("__tex2DLayeredGrad_v2", "__tex2DLayeredGrad_rmnf_v2",
+ (float __x, float __y, int __layer, const float2 *__dPdx,
+ const float2 *__dPdy),
+ "tex.grad.a2d.v4", "f32",
+ "{%0, %1, %2, %3}, [%4, {%5, %6, %7, %7}], {%8, %9}, {%10, %11};",
+ ("r"(__layer), "f"(__x), "f"(__y), "f"(__dPdx->x), "f"(__dPdx->y),
+ "f"(__dPdy->x), "f"(__dPdy->y)));
+__IMPL_ALIAS("__itex2DLayeredGrad_v2", "__tex2DLayeredGrad_v2");
+
+__IMPL_S3S(
+ "__itex2DLayeredGrad_sparse",
+ (float __x, float __y, int __layer, const float2 *__dPdx,
+ const float2 *__dPdy, unsigned char *__ir),
+ "{.reg .pred %%p0;\n\t"
+ "tex.grad.a2d.v4",
+ "f32",
+ "{%0, %1, %2, %3}|%%p0, [%5, {%6, %7, %8, %8}], {%9, %10}, {%11, %12};\n\t"
+ "selp.u16 %4, 1, 0, %%p0; }",
+ ("r"(__layer), "f"(__x), "f"(__y), "f"(__dPdx->x), "f"(__dPdx->y),
+ "f"(__dPdy->x), "f"(__dPdy->y)));
+
+__IMPL_S("__tex2DLayeredLod_v2", "__tex2DLayeredLod_rmnf_v2",
+ (float __x, float __y, int __layer, float __level), "tex.level.a2d.v4",
+ "f32", "{%0, %1, %2, %3}, [%4, {%5, %6, %7, %7}], %8;",
+ ("r"(__layer), "f"(__x), "f"(__y), "f"(__level)));
+__IMPL_ALIAS("__itex2DLayeredLod", "__tex2DLayeredLod_v2");
+
+__IMPL_S3S("__itex2DLayeredLod_sparse",
+ (float __x, float __y, int __layer, float __level,
+ unsigned char *__ir),
+ "{.reg .pred %%p0;\n\t"
+ "tex.level.a2d.v4",
+ "f32",
+ "{%0, %1, %2, %3}|%%p0, [%5, {%6, %7, %8, %8}], %9;\n\t"
+ "selp.u16 %4, 1, 0, %%p0; }",
+ ("r"(__layer), "f"(__x), "f"(__y), "f"(__level)));
+
+__IMPL_S("__tex2DLod_v2", "__tex2DLod_rmnf_v2",
+ (float __x, float __y, float __level), "tex.level.2d.v4", "f32",
+ "{%0, %1, %2, %3}, [%4, {%5, %6}], %7;",
+ ("f"(__x), "f"(__y), "f"(__level)));
+__IMPL_ALIAS("__itex2DLod", "__tex2DLod_v2");
+
+__IMPL_S3S("__itex2DLod_sparse",
+ (float __x, float __y, float __level, unsigned char *__ir),
+ "{.reg .pred %%p0;\n\t"
+ "tex.level.2d.v4",
+ "f32",
+ "{%0, %1, %2, %3}|%%p0, [%5, {%6, %7}], %8;\n\t"
+ "selp.u16 %4, 1, 0, %%p0; }",
+ ("f"(__x), "f"(__y), "f"(__level)));
+
+// 2D gather is special. Unlike other variants that translate into exactly one
+// asm instruction, it uses one of the four different instructions selected by
+// __comp. We implement each instruction variant separately, and dispatch the
+// right one from the manually implemented 'umbrella' fetch.
+#define __IMPL_2DGATHER(variant, instr) \
+ __IMPL_SI(__IDV("__tex2Dgather_v2", variant), \
+ __IDV("__tex2Dgather_rmnf_v2", variant), \
+ (float __x, float __y, int __comp), instr, "f32", \
+ "{%0, %1, %2, %3}, [%4, {%5, %6}];", ("f"(__x), "f"(__y))); \
+ __IMPL_ALIASI(__IDV("__itex2Dgather", variant), \
+ __IDV("__tex2Dgather_v2", variant)); \
+ __IMPL_S3SI(__IDV("__itex2Dgather_sparse", variant), \
+ (float __x, float __y, unsigned char *__ir, int __comp), \
+ "{.reg .pred %%p0;\n\t" instr, "f32", \
+ "{%0, %1, %2, %3}|%%p0, [%5, {%6, %7}];\n\t" \
+ "selp.u16 %4, 1, 0, %%p0; }", \
+ ("f"(__x), "f"(__y)));
+__IMPL_2DGATHER(0, "tld4.r.2d.v4");
+__IMPL_2DGATHER(1, "tld4.g.2d.v4");
+__IMPL_2DGATHER(2, "tld4.b.2d.v4");
+__IMPL_2DGATHER(3, "tld4.a.2d.v4");
+
+// Umbrella dispatcher -- calls into specific 2Dgather variant.
+template <> struct __tex_fetch_v4<__ID("__tex2Dgather_v2")> {
+ template <class __T>
+ __device__ static __T __run(cudaTextureObject_t __obj, float __x, float __y,
+ int __comp) {
+ switch (__comp) {
+ case 0:
+ return __tex_fetch_v4<__IDV("__tex2Dgather_v2", 0)>::__run<__T>(
+ __obj, __x, __y, __comp);
+ case 1:
+ return __tex_fetch_v4<__IDV("__tex2Dgather_v2", 1)>::__run<__T>(
+ __obj, __x, __y, __comp);
+ case 2:
+ return __tex_fetch_v4<__IDV("__tex2Dgather_v2", 2)>::__run<__T>(
+ __obj, __x, __y, __comp);
+ case 3:
+ return __tex_fetch_v4<__IDV("__tex2Dgather_v2", 3)>::__run<__T>(
+ __obj, __x, __y, __comp);
+ }
+ }
+};
+__IMPL_ALIAS("__itex2Dgather", "__tex2Dgather_v2");
+
+template <> struct __tex_fetch_v4<__ID("__tex2Dgather_rmnf_v2")> {
+ template <class __T>
+ __device__ static float4 __run(cudaTextureObject_t __obj, float __x,
+ float __y, int __comp) {
+ switch (__comp) {
+ case 0:
+ return __tex_fetch_v4<__IDV("__tex2Dgather_rmnf_v2", 0)>::__run<__T>(
+ __obj, __x, __y, __comp);
+ case 1:
+ return __tex_fetch_v4<__IDV("__tex2Dgather_rmnf_v2", 1)>::__run<__T>(
+ __obj, __x, __y, __comp);
+ case 2:
+ return __tex_fetch_v4<__IDV("__tex2Dgather_rmnf_v2", 2)>::__run<__T>(
+ __obj, __x, __y, __comp);
+ case 3:
+ return __tex_fetch_v4<__IDV("__tex2Dgather_rmnf_v2", 3)>::__run<__T>(
+ __obj, __x, __y, __comp);
+ }
+ }
+};
+
+#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 600)
+template <> struct __tex_fetch_v4<__ID("__itex2Dgather_sparse")> {
+ template <class __T>
+ __device__ static __T __run(cudaTextureObject_t __obj, float __x, float __y,
+ unsigned char *__ir, int __comp) {
+ switch (__comp) {
+ case 0:
+ return __tex_fetch_v4<__IDV("__itex2Dgather_sparse", 0)>::__run<__T>(
+ __obj, __x, __y, __ir, __comp);
+ case 1:
+ return __tex_fetch_v4<__IDV("__itex2Dgather_sparse", 1)>::__run<__T>(
+ __obj, __x, __y, __ir, __comp);
+ case 2:
+ return __tex_fetch_v4<__IDV("__itex2Dgather_sparse", 2)>::__run<__T>(
+ __obj, __x, __y, __ir, __comp);
+ case 3:
+ return __tex_fetch_v4<__IDV("__itex2Dgather_sparse", 3)>::__run<__T>(
+ __obj, __x, __y, __ir, __comp);
+ }
+ }
+};
+#endif
+
+// 3D
+__IMPL_S("__tex3D_v2", "__tex3D_rmnf_v2", (float __x, float __y, float __z),
+ "tex.3d.v4", "f32", "{%0, %1, %2, %3}, [%4, {%5, %6, %7, %7}];",
+ ("f"(__x), "f"(__y), "f"(__z)));
+__IMPL_ALIAS("__itex3D", "__tex3D_v2");
+
+__IMPL_S3S("__itex3D_sparse",
+ (float __x, float __y, float __z, unsigned char *__ir),
+ "{.reg .pred %%p0;\n\t"
+ "tex.3d.v4",
+ "f32",
+ "{%0, %1, %2, %3}|%%p0, [%5, {%6, %7, %8, %8}];\n\t"
+ "selp.u16 %4, 1, 0, %%p0; }",
+ ("f"(__x), "f"(__y), "f"(__z)));
+
+__IMPL_S("__tex3DGrad_v2", "__tex3DGrad_rmnf_v2",
+ (float __x, float __y, float __z, const float4 *__dPdx,
+ const float4 *__dPdy),
+ "tex.grad.3d.v4", "f32",
+ "{%0, %1, %2, %3}, [%4, {%5, %6, %7, %7}], "
+ "{%8, %9, %10, %10}, {%11, %12, %13, %13};",
+ ("f"(__x), "f"(__y), "f"(__z), "f"(__dPdx->x), "f"(__dPdx->y),
+ "f"(__dPdx->z), "f"(__dPdy->x), "f"(__dPdy->y), "f"(__dPdy->z)));
+__IMPL_ALIAS("__itex3DGrad_v2", "__tex3DGrad_v2");
+
+__IMPL_S3S("__itex3DGrad_sparse",
+ (float __x, float __y, float __z, const float4 *__dPdx,
+ const float4 *__dPdy, unsigned char *__ir),
+ "{.reg .pred %%p0;\n\t"
+ "tex.grad.3d.v4",
+ "f32",
+ "{%0, %1, %2, %3}|%%p0, [%5, {%6, %7, %8, %8}], "
+ "{%9, %10, %11, %11}, {%12, %13, %14, %14};\n\t"
+ "selp.u16 %4, 1, 0, %%p0; }",
+ ("f"(__x), "f"(__y), "f"(__z), "f"(__dPdx->x), "f"(__dPdx->y),
+ "f"(__dPdx->z), "f"(__dPdy->x), "f"(__dPdy->y), "f"(__dPdy->z)));
+
+__IMPL_S("__tex3DLod_v2", "__tex3DLod_rmnf_v2",
+ (float __x, float __y, float __z, float __level), "tex.level.3d.v4",
+ "f32", "{%0, %1, %2, %3}, [%4, {%5, %6, %7, %7}], %8;",
+ ("f"(__x), "f"(__y), "f"(__z), "f"(__level)));
+__IMPL_ALIAS("__itex3DLod", "__tex3DLod_v2");
+
+__IMPL_S3S("__itex3DLod_sparse",
+ (float __x, float __y, float __z, float __level,
+ unsigned char *__ir),
+ "{.reg .pred %%p0;\n\t"
+ "tex.level.3d.v4",
+ "f32",
+ "{%0, %1, %2, %3}|%%p0, [%5, {%6, %7, %8, %8}], %9;\n\t"
+ "selp.u16 %4, 1, 0, %%p0; }",
+ ("f"(__x), "f"(__y), "f"(__z), "f"(__level)));
+
+// Cubemap
+__IMPL_S("__texCubemap_v2", "__texCubemap_rmnf_v2",
+ (float __x, float __y, float __z), "tex.cube.v4", "f32",
+ "{%0, %1, %2, %3}, [%4, {%5, %6, %7, %7}];",
+ ("f"(__x), "f"(__y), "f"(__z)));
+__IMPL_ALIAS("__itexCubemap", "__texCubemap_v2");
+
+__IMPL_S3S("__itexCubemap_sparse",
+ (float __x, float __y, float __z, unsigned char *__ir),
+ "{.reg .pred %%p0;\n\t"
+ "tex.cube.v4",
+ "f32",
+ "{%0, %1, %2, %3}|%%p0, [%5, {%6, %7, %8, %8}];\n\t"
+ "selp.u16 %4, 1, 0, %%p0; }",
+ ("f"(__x), "f"(__y), "f"(__z)));
+
+__IMPL_S("__texCubemapGrad_v2", "__texCubemapGrad_rmnf_v2",
+ (float __x, float __y, float __z, const float4 *__dPdx,
+ const float4 *__dPdy),
+ "tex.grad.cube.v4", "f32",
+ "{%0, %1, %2, %3}, [%4, {%5, %6, %7, %7}], "
+ "{%8, %9, %10, %10}, {%11, %12, %13, %13};",
+ ("f"(__x), "f"(__y), "f"(__z), "f"(__dPdx->x), "f"(__dPdx->y),
+ "f"(__dPdx->z), "f"(__dPdy->x), "f"(__dPdy->y), "f"(__dPdy->z)));
+__IMPL_ALIAS("__itexCubemapGrad_v2", "__texCubemapGrad_v2");
+
+__IMPL_S("__texCubemapLayered_v2", "__texCubemapLayered_rmnf_v2",
+ (float __x, float __y, float __z, int __layer), "tex.acube.v4", "f32",
+ "{%0, %1, %2, %3}, [%4, {%5, %6, %7, %8}];",
+ ("r"(__layer), "f"(__x), "f"(__y), "f"(__z)));
+__IMPL_ALIAS("__itexCubemapLayered", "__texCubemapLayered_v2");
+
+__IMPL_S("__texCubemapLayeredGrad_v2", "__texCubemapLayeredGrad_rmnf_v2",
+ (float __x, float __y, float __z, int __layer, const float4 *__dPdx,
+ const float4 *__dPdy),
+ "tex.grad.acube.v4", "f32",
+ "{%0, %1, %2, %3}, [%4, {%5, %6, %7, %8}], "
+ "{%9, %10, %11, %11}, {%12, %13, %14, %14};",
+ ("r"(__layer), "f"(__x), "f"(__y), "f"(__z), "f"(__dPdx->x),
+ "f"(__dPdx->y), "f"(__dPdx->z), "f"(__dPdy->x), "f"(__dPdy->y),
+ "f"(__dPdy->z)));
+__IMPL_ALIAS("__itexCubemapLayeredGrad_v2", "__texCubemapLayeredGrad_v2");
+
+__IMPL_S("__texCubemapLayeredLod_v2", "__texCubemapLayeredLod_rmnf_v2",
+ (float __x, float __y, float __z, int __layer, float __level),
+ "tex.level.acube.v4", "f32",
+ "{%0, %1, %2, %3}, [%4, {%5, %6, %7, %8}], %9;",
+ ("r"(__layer), "f"(__x), "f"(__y), "f"(__z), "f"(__level)));
+__IMPL_ALIAS("__itexCubemapLayeredLod", "__texCubemapLayeredLod_v2");
+
+__IMPL_S("__texCubemapLod_v2", "__texCubemapLod_rmnf_v2",
+ (float __x, float __y, float __z, float __level), "tex.level.cube.v4",
+ "f32", "{%0, %1, %2, %3}, [%4, {%5, %6, %7, %7}], %8;",
+ ("f"(__x), "f"(__y), "f"(__z), "f"(__level)));
+__IMPL_ALIAS("__itexCubemapLod", "__texCubemapLod_v2");
+
+// Helper class for extracting slice of data from V4 fetch results.
+template <class __DestT, class __SrcT> struct __convert {
+ template <int __NElements = sizeof(__DestT) /
+ sizeof(typename __TypeInfoT<__DestT>::__base_t)>
+ __device__ static __DestT __run(__SrcT __v);
+ template <> __device__ static __DestT __run<1>(__SrcT __v) { return {__v.x}; }
+ template <> __device__ static __DestT __run<2>(__SrcT __v) {
+ return {__v.x, __v.y};
+ }
+ template <> __device__ static __DestT __run<3>(__SrcT __v) {
+ return {__v.x, __v.y, __v.z};
+ }
+ template <> __device__ static __DestT __run<4>(__SrcT __v) {
+ return {__v.x, __v.y, __v.z, __v.w};
+ }
+};
+
+// These are the top-level function overloads the __nv_tex_surf_handler expands
+// to. Each overload deals with one of the several ways __nv_tex_surf_handler
+// is called by CUDA headers. In the end, each of the overloads does the same
+// job -- it figures out which `__tex_fetch_v4::run` variant should be used to
+// fetch texture data and which `__convert::run` is needed to convert it into
+// appropriate return type.
+
+// __nv_tex_surf_handler("__tex...", &ret, cudaTextureObject_t handle, args...);
+// Data type and return type are based on ret.
+template <class __op, class __T, class... __Args>
+__device__ static void __tex_fetch(__T *__ptr, cudaTextureObject_t __handle,
+ __Args... __args) {
+ using __FetchT = typename __TypeInfoT<__T>::__fetch_t;
+ *__ptr = __convert<__T, __FetchT>::__run(
+ __tex_fetch_v4<__op>::template __run<__FetchT>(__handle, __args...));
+}
+
+#if CUDA_VERSION < 12000
+// texture<> objects get magically converted into a texture reference. However,
+// there's no way to convert them to cudaTextureObject_t on C++ level. So, we
+// cheat a bit and use inline assembly to do it. It costs us an extra register
+// and a move, but that is easy for ptxas to optimize away.
+template <class __T>
+__device__ cudaTextureObject_t __tex_handle_to_obj(__T __handle) {
+ cudaTextureObject_t __obj;
+ asm("mov.b64 %0, %1; " : "=l"(__obj) : "l"(__handle));
+ return __obj;
+}
+
+// __nv_tex_surf_handler ("__tex...", &ret, textureReference, args...);
+// Data type and return type is based on ret.
+template <class __op, class __T, class __HandleT, class... __Args>
+__device__ static void __tex_fetch(__T *__ptr, __HandleT __handle,
+ __Args... __args) {
+ using __FetchT = typename __TypeInfoT<__T>::__fetch_t;
+ *__ptr = __convert<__T, __FetchT>::__run(
+ __tex_fetch_v4<__op>::template __run<__FetchT>(
+ __tex_handle_to_obj(__handle), __args...));
+}
+
+// __nv_tex_surf_handler ("__tex...", &type_dummy, &ret, texture<...>, args...);
+// cudaReadModeNormalizedFloat fetches always return float4.
+template <class __op, class __DataT, class __RetT, int __TexT, class... __Args>
+__device__ static void
+__tex_fetch(__DataT *, __RetT *__ptr,
+ texture<__DataT, __TexT, cudaReadModeNormalizedFloat> __handle,
+ __Args... __args) {
+ using __FetchT = typename __TypeInfoT<__DataT>::__fetch_t;
+ *__ptr = __convert<__RetT, float4>::__run(
+ __tex_fetch_v4<__op>::template __run<__FetchT>(
+ __tex_handle_to_obj(__handle), __args...));
+}
+
+// __nv_tex_surf_handler ("__tex...", &type_dummy, &ret, texture<...>, args...);
+// For cudaReadModeElementType fetch return type is based on type_dummy.
+template <class __op, class __DataT, class __RetT, int __TexT, class... __Args>
+__device__ static void
+__tex_fetch(__DataT *, __RetT *__ptr,
+ texture<__DataT, __TexT, cudaReadModeElementType> __handle,
+ __Args... __args) {
+ using __FetchT = typename __TypeInfoT<__DataT>::__fetch_t;
+ *__ptr = __convert<__RetT, __FetchT>::__run(
+ __tex_fetch_v4<__op>::template __run<__FetchT>(
+ __tex_handle_to_obj(__handle), __args...));
+}
+#endif // CUDA_VERSION
+} // namespace __cuda_tex
+} // namespace
+#pragma pop_macro("__ASM_OUT")
+#pragma pop_macro("__ASM_OUTP")
+#pragma pop_macro("__Args")
+#pragma pop_macro("__ID")
+#pragma pop_macro("__IDV")
+#pragma pop_macro("__IMPL_2DGATHER")
+#pragma pop_macro("__IMPL_ALIAS")
+#pragma pop_macro("__IMPL_ALIASI")
+#pragma pop_macro("__IMPL_F1")
+#pragma pop_macro("__IMPL_F3")
+#pragma pop_macro("__IMPL_F3N")
+#pragma pop_macro("__IMPL_F3S")
+#pragma pop_macro("__IMPL_S")
+#pragma pop_macro("__IMPL_S3")
+#pragma pop_macro("__IMPL_S3I")
+#pragma pop_macro("__IMPL_S3N")
+#pragma pop_macro("__IMPL_S3NI")
+#pragma pop_macro("__IMPL_S3S")
+#pragma pop_macro("__IMPL_S3SI")
+#pragma pop_macro("__IMPL_SI")
+#pragma pop_macro("__L")
+#pragma pop_macro("__STRIP_PARENS")
+#endif // __CLANG_CUDA_TEXTURE_INTRINSICS_H__
diff --git a/contrib/llvm-project/clang/lib/Headers/__clang_hip_cmath.h b/contrib/llvm-project/clang/lib/Headers/__clang_hip_cmath.h
index d488db0a94d9..b52d6b781661 100644
--- a/contrib/llvm-project/clang/lib/Headers/__clang_hip_cmath.h
+++ b/contrib/llvm-project/clang/lib/Headers/__clang_hip_cmath.h
@@ -171,7 +171,7 @@ __DEVICE__ __CONSTEXPR__ bool signbit(double __x) { return ::__signbit(__x); }
// Other functions.
__DEVICE__ __CONSTEXPR__ _Float16 fma(_Float16 __x, _Float16 __y,
_Float16 __z) {
- return __ocml_fma_f16(__x, __y, __z);
+ return __builtin_fmaf16(__x, __y, __z);
}
__DEVICE__ __CONSTEXPR__ _Float16 pow(_Float16 __base, int __iexp) {
return __ocml_pown_f16(__base, __iexp);
diff --git a/contrib/llvm-project/clang/lib/Headers/__clang_hip_libdevice_declares.h b/contrib/llvm-project/clang/lib/Headers/__clang_hip_libdevice_declares.h
index 8be848ba2aa3..f15198b3d9f9 100644
--- a/contrib/llvm-project/clang/lib/Headers/__clang_hip_libdevice_declares.h
+++ b/contrib/llvm-project/clang/lib/Headers/__clang_hip_libdevice_declares.h
@@ -10,6 +10,10 @@
#ifndef __CLANG_HIP_LIBDEVICE_DECLARES_H__
#define __CLANG_HIP_LIBDEVICE_DECLARES_H__
+#if !defined(__HIPCC_RTC__) && __has_include("hip/hip_version.h")
+#include "hip/hip_version.h"
+#endif // __has_include("hip/hip_version.h")
+
#ifdef __cplusplus
extern "C" {
#endif
@@ -137,23 +141,6 @@ __device__ __attribute__((const)) float __ocml_fma_rte_f32(float, float, float);
__device__ __attribute__((const)) float __ocml_fma_rtn_f32(float, float, float);
__device__ __attribute__((const)) float __ocml_fma_rtp_f32(float, float, float);
__device__ __attribute__((const)) float __ocml_fma_rtz_f32(float, float, float);
-
-__device__ inline __attribute__((const)) float
-__llvm_amdgcn_cos_f32(float __x) {
- return __builtin_amdgcn_cosf(__x);
-}
-__device__ inline __attribute__((const)) float
-__llvm_amdgcn_rcp_f32(float __x) {
- return __builtin_amdgcn_rcpf(__x);
-}
-__device__ inline __attribute__((const)) float
-__llvm_amdgcn_rsq_f32(float __x) {
- return __builtin_amdgcn_rsqf(__x);
-}
-__device__ inline __attribute__((const)) float
-__llvm_amdgcn_sin_f32(float __x) {
- return __builtin_amdgcn_sinf(__x);
-}
// END INTRINSICS
// END FLOAT
@@ -277,30 +264,25 @@ __device__ __attribute__((const)) double __ocml_fma_rtp_f64(double, double,
__device__ __attribute__((const)) double __ocml_fma_rtz_f64(double, double,
double);
-__device__ inline __attribute__((const)) double
-__llvm_amdgcn_rcp_f64(double __x) {
- return __builtin_amdgcn_rcp(__x);
-}
-__device__ inline __attribute__((const)) double
-__llvm_amdgcn_rsq_f64(double __x) {
- return __builtin_amdgcn_rsq(__x);
-}
-
__device__ __attribute__((const)) _Float16 __ocml_ceil_f16(_Float16);
__device__ _Float16 __ocml_cos_f16(_Float16);
+__device__ __attribute__((const)) _Float16 __ocml_cvtrtn_f16_f32(float);
+__device__ __attribute__((const)) _Float16 __ocml_cvtrtp_f16_f32(float);
+__device__ __attribute__((const)) _Float16 __ocml_cvtrtz_f16_f32(float);
__device__ __attribute__((pure)) _Float16 __ocml_exp_f16(_Float16);
__device__ __attribute__((pure)) _Float16 __ocml_exp10_f16(_Float16);
__device__ __attribute__((pure)) _Float16 __ocml_exp2_f16(_Float16);
__device__ __attribute__((const)) _Float16 __ocml_floor_f16(_Float16);
__device__ __attribute__((const)) _Float16 __ocml_fma_f16(_Float16, _Float16,
_Float16);
+__device__ __attribute__((const)) _Float16 __ocml_fmax_f16(_Float16, _Float16);
+__device__ __attribute__((const)) _Float16 __ocml_fmin_f16(_Float16, _Float16);
__device__ __attribute__((const)) _Float16 __ocml_fabs_f16(_Float16);
__device__ __attribute__((const)) int __ocml_isinf_f16(_Float16);
__device__ __attribute__((const)) int __ocml_isnan_f16(_Float16);
__device__ __attribute__((pure)) _Float16 __ocml_log_f16(_Float16);
__device__ __attribute__((pure)) _Float16 __ocml_log10_f16(_Float16);
__device__ __attribute__((pure)) _Float16 __ocml_log2_f16(_Float16);
-__device__ __attribute__((const)) _Float16 __llvm_amdgcn_rcp_f16(_Float16);
__device__ __attribute__((const)) _Float16 __ocml_rint_f16(_Float16);
__device__ __attribute__((const)) _Float16 __ocml_rsqrt_f16(_Float16);
__device__ _Float16 __ocml_sin_f16(_Float16);
@@ -311,8 +293,15 @@ __device__ __attribute__((pure)) _Float16 __ocml_pown_f16(_Float16, int);
typedef _Float16 __2f16 __attribute__((ext_vector_type(2)));
typedef short __2i16 __attribute__((ext_vector_type(2)));
+// We need to match C99's bool and get an i1 in the IR.
+#ifdef __cplusplus
+typedef bool __ockl_bool;
+#else
+typedef _Bool __ockl_bool;
+#endif
+
__device__ __attribute__((const)) float __ockl_fdot2(__2f16 a, __2f16 b,
- float c, bool s);
+ float c, __ockl_bool s);
__device__ __attribute__((const)) __2f16 __ocml_ceil_2f16(__2f16);
__device__ __attribute__((const)) __2f16 __ocml_fabs_2f16(__2f16);
__device__ __2f16 __ocml_cos_2f16(__2f16);
@@ -327,11 +316,29 @@ __device__ __attribute__((const)) __2i16 __ocml_isnan_2f16(__2f16);
__device__ __attribute__((pure)) __2f16 __ocml_log_2f16(__2f16);
__device__ __attribute__((pure)) __2f16 __ocml_log10_2f16(__2f16);
__device__ __attribute__((pure)) __2f16 __ocml_log2_2f16(__2f16);
+
+#if HIP_VERSION_MAJOR * 100 + HIP_VERSION_MINOR >= 560
+#define __DEPRECATED_SINCE_HIP_560(X) __attribute__((deprecated(X)))
+#else
+#define __DEPRECATED_SINCE_HIP_560(X)
+#endif
+
+// Deprecated, should be removed when rocm releases using it are no longer
+// relevant.
+__DEPRECATED_SINCE_HIP_560("use ((_Float16)1.0) / ")
+__device__ inline _Float16 __llvm_amdgcn_rcp_f16(_Float16 x) {
+ return ((_Float16)1.0f) / x;
+}
+
+__DEPRECATED_SINCE_HIP_560("use ((__2f16)1.0) / ")
__device__ inline __2f16
-__llvm_amdgcn_rcp_2f16(__2f16 __x) // Not currently exposed by ROCDL.
+__llvm_amdgcn_rcp_2f16(__2f16 __x)
{
- return (__2f16)(__llvm_amdgcn_rcp_f16(__x.x), __llvm_amdgcn_rcp_f16(__x.y));
+ return ((__2f16)1.0f) / __x;
}
+
+#undef __DEPRECATED_SINCE_HIP_560
+
__device__ __attribute__((const)) __2f16 __ocml_rint_2f16(__2f16);
__device__ __attribute__((const)) __2f16 __ocml_rsqrt_2f16(__2f16);
__device__ __2f16 __ocml_sin_2f16(__2f16);
diff --git a/contrib/llvm-project/clang/lib/Headers/__clang_hip_math.h b/contrib/llvm-project/clang/lib/Headers/__clang_hip_math.h
index ef7e087b832c..11e1e7d03258 100644
--- a/contrib/llvm-project/clang/lib/Headers/__clang_hip_math.h
+++ b/contrib/llvm-project/clang/lib/Headers/__clang_hip_math.h
@@ -14,9 +14,6 @@
#endif
#if !defined(__HIPCC_RTC__)
-#if defined(__cplusplus)
-#include <algorithm>
-#endif
#include <limits.h>
#include <stdint.h>
#ifdef __OPENMP_AMDGCN__
@@ -32,6 +29,17 @@
#define __DEVICE__ static __device__ inline __attribute__((always_inline))
#endif
+// Device library provides fast low precision and slow full-recision
+// implementations for some functions. Which one gets selected depends on
+// __CLANG_GPU_APPROX_TRANSCENDENTALS__ which gets defined by clang if
+// -ffast-math or -fgpu-approx-transcendentals are in effect.
+#pragma push_macro("__FAST_OR_SLOW")
+#if defined(__CLANG_GPU_APPROX_TRANSCENDENTALS__)
+#define __FAST_OR_SLOW(fast, slow) fast
+#else
+#define __FAST_OR_SLOW(fast, slow) slow
+#endif
+
// A few functions return bool type starting only in C++11.
#pragma push_macro("__RETURN_TYPE")
#ifdef __OPENMP_AMDGCN__
@@ -70,9 +78,9 @@ __DEVICE__ void __static_assert_equal_size() {
#endif
__DEVICE__
-uint64_t __make_mantissa_base8(const char *__tagp) {
+uint64_t __make_mantissa_base8(const char *__tagp __attribute__((nonnull))) {
uint64_t __r = 0;
- while (__tagp) {
+ while (*__tagp != '\0') {
char __tmp = *__tagp;
if (__tmp >= '0' && __tmp <= '7')
@@ -87,9 +95,9 @@ uint64_t __make_mantissa_base8(const char *__tagp) {
}
__DEVICE__
-uint64_t __make_mantissa_base10(const char *__tagp) {
+uint64_t __make_mantissa_base10(const char *__tagp __attribute__((nonnull))) {
uint64_t __r = 0;
- while (__tagp) {
+ while (*__tagp != '\0') {
char __tmp = *__tagp;
if (__tmp >= '0' && __tmp <= '9')
@@ -104,9 +112,9 @@ uint64_t __make_mantissa_base10(const char *__tagp) {
}
__DEVICE__
-uint64_t __make_mantissa_base16(const char *__tagp) {
+uint64_t __make_mantissa_base16(const char *__tagp __attribute__((nonnull))) {
uint64_t __r = 0;
- while (__tagp) {
+ while (*__tagp != '\0') {
char __tmp = *__tagp;
if (__tmp >= '0' && __tmp <= '9')
@@ -125,10 +133,7 @@ uint64_t __make_mantissa_base16(const char *__tagp) {
}
__DEVICE__
-uint64_t __make_mantissa(const char *__tagp) {
- if (!__tagp)
- return 0u;
-
+uint64_t __make_mantissa(const char *__tagp __attribute__((nonnull))) {
if (*__tagp == '0') {
++__tagp;
@@ -142,21 +147,180 @@ uint64_t __make_mantissa(const char *__tagp) {
}
// BEGIN FLOAT
+
+// BEGIN INTRINSICS
+
+__DEVICE__
+float __cosf(float __x) { return __ocml_native_cos_f32(__x); }
+
+__DEVICE__
+float __exp10f(float __x) {
+ const float __log2_10 = 0x1.a934f0p+1f;
+ return __builtin_amdgcn_exp2f(__log2_10 * __x);
+}
+
+__DEVICE__
+float __expf(float __x) {
+ const float __log2_e = 0x1.715476p+0;
+ return __builtin_amdgcn_exp2f(__log2_e * __x);
+}
+
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+float __fadd_rd(float __x, float __y) { return __ocml_add_rtn_f32(__x, __y); }
+__DEVICE__
+float __fadd_rn(float __x, float __y) { return __ocml_add_rte_f32(__x, __y); }
+__DEVICE__
+float __fadd_ru(float __x, float __y) { return __ocml_add_rtp_f32(__x, __y); }
+__DEVICE__
+float __fadd_rz(float __x, float __y) { return __ocml_add_rtz_f32(__x, __y); }
+#else
+__DEVICE__
+float __fadd_rn(float __x, float __y) { return __x + __y; }
+#endif
+
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+float __fdiv_rd(float __x, float __y) { return __ocml_div_rtn_f32(__x, __y); }
+__DEVICE__
+float __fdiv_rn(float __x, float __y) { return __ocml_div_rte_f32(__x, __y); }
+__DEVICE__
+float __fdiv_ru(float __x, float __y) { return __ocml_div_rtp_f32(__x, __y); }
+__DEVICE__
+float __fdiv_rz(float __x, float __y) { return __ocml_div_rtz_f32(__x, __y); }
+#else
+__DEVICE__
+float __fdiv_rn(float __x, float __y) { return __x / __y; }
+#endif
+
+__DEVICE__
+float __fdividef(float __x, float __y) { return __x / __y; }
+
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+float __fmaf_rd(float __x, float __y, float __z) {
+ return __ocml_fma_rtn_f32(__x, __y, __z);
+}
+__DEVICE__
+float __fmaf_rn(float __x, float __y, float __z) {
+ return __ocml_fma_rte_f32(__x, __y, __z);
+}
+__DEVICE__
+float __fmaf_ru(float __x, float __y, float __z) {
+ return __ocml_fma_rtp_f32(__x, __y, __z);
+}
+__DEVICE__
+float __fmaf_rz(float __x, float __y, float __z) {
+ return __ocml_fma_rtz_f32(__x, __y, __z);
+}
+#else
+__DEVICE__
+float __fmaf_rn(float __x, float __y, float __z) {
+ return __builtin_fmaf(__x, __y, __z);
+}
+#endif
+
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+float __fmul_rd(float __x, float __y) { return __ocml_mul_rtn_f32(__x, __y); }
+__DEVICE__
+float __fmul_rn(float __x, float __y) { return __ocml_mul_rte_f32(__x, __y); }
+__DEVICE__
+float __fmul_ru(float __x, float __y) { return __ocml_mul_rtp_f32(__x, __y); }
+__DEVICE__
+float __fmul_rz(float __x, float __y) { return __ocml_mul_rtz_f32(__x, __y); }
+#else
+__DEVICE__
+float __fmul_rn(float __x, float __y) { return __x * __y; }
+#endif
+
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+float __frcp_rd(float __x) { return __ocml_div_rtn_f32(1.0f, __x); }
+__DEVICE__
+float __frcp_rn(float __x) { return __ocml_div_rte_f32(1.0f, __x); }
+__DEVICE__
+float __frcp_ru(float __x) { return __ocml_div_rtp_f32(1.0f, __x); }
+__DEVICE__
+float __frcp_rz(float __x) { return __ocml_div_rtz_f32(1.0f, __x); }
+#else
+__DEVICE__
+float __frcp_rn(float __x) { return 1.0f / __x; }
+#endif
+
+__DEVICE__
+float __frsqrt_rn(float __x) { return __builtin_amdgcn_rsqf(__x); }
+
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+float __fsqrt_rd(float __x) { return __ocml_sqrt_rtn_f32(__x); }
+__DEVICE__
+float __fsqrt_rn(float __x) { return __ocml_sqrt_rte_f32(__x); }
+__DEVICE__
+float __fsqrt_ru(float __x) { return __ocml_sqrt_rtp_f32(__x); }
+__DEVICE__
+float __fsqrt_rz(float __x) { return __ocml_sqrt_rtz_f32(__x); }
+#else
+__DEVICE__
+float __fsqrt_rn(float __x) { return __ocml_native_sqrt_f32(__x); }
+#endif
+
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+float __fsub_rd(float __x, float __y) { return __ocml_sub_rtn_f32(__x, __y); }
+__DEVICE__
+float __fsub_rn(float __x, float __y) { return __ocml_sub_rte_f32(__x, __y); }
+__DEVICE__
+float __fsub_ru(float __x, float __y) { return __ocml_sub_rtp_f32(__x, __y); }
+__DEVICE__
+float __fsub_rz(float __x, float __y) { return __ocml_sub_rtz_f32(__x, __y); }
+#else
+__DEVICE__
+float __fsub_rn(float __x, float __y) { return __x - __y; }
+#endif
+
+__DEVICE__
+float __log10f(float __x) { return __builtin_log10f(__x); }
+
+__DEVICE__
+float __log2f(float __x) { return __builtin_amdgcn_logf(__x); }
+
+__DEVICE__
+float __logf(float __x) { return __builtin_logf(__x); }
+
+__DEVICE__
+float __powf(float __x, float __y) { return __ocml_pow_f32(__x, __y); }
+
+__DEVICE__
+float __saturatef(float __x) { return (__x < 0) ? 0 : ((__x > 1) ? 1 : __x); }
+
+__DEVICE__
+void __sincosf(float __x, float *__sinptr, float *__cosptr) {
+ *__sinptr = __ocml_native_sin_f32(__x);
+ *__cosptr = __ocml_native_cos_f32(__x);
+}
+
+__DEVICE__
+float __sinf(float __x) { return __ocml_native_sin_f32(__x); }
+
+__DEVICE__
+float __tanf(float __x) {
+ return __sinf(__x) * __builtin_amdgcn_rcpf(__cosf(__x));
+}
+// END INTRINSICS
+
#if defined(__cplusplus)
__DEVICE__
int abs(int __x) {
- int __sgn = __x >> (sizeof(int) * CHAR_BIT - 1);
- return (__x ^ __sgn) - __sgn;
+ return __builtin_abs(__x);
}
__DEVICE__
long labs(long __x) {
- long __sgn = __x >> (sizeof(long) * CHAR_BIT - 1);
- return (__x ^ __sgn) - __sgn;
+ return __builtin_labs(__x);
}
__DEVICE__
long long llabs(long long __x) {
- long long __sgn = __x >> (sizeof(long long) * CHAR_BIT - 1);
- return (__x ^ __sgn) - __sgn;
+ return __builtin_llabs(__x);
}
#endif
@@ -185,13 +349,13 @@ __DEVICE__
float cbrtf(float __x) { return __ocml_cbrt_f32(__x); }
__DEVICE__
-float ceilf(float __x) { return __ocml_ceil_f32(__x); }
+float ceilf(float __x) { return __builtin_ceilf(__x); }
__DEVICE__
-float copysignf(float __x, float __y) { return __ocml_copysign_f32(__x, __y); }
+float copysignf(float __x, float __y) { return __builtin_copysignf(__x, __y); }
__DEVICE__
-float cosf(float __x) { return __ocml_cos_f32(__x); }
+float cosf(float __x) { return __FAST_OR_SLOW(__cosf, __ocml_cos_f32)(__x); }
__DEVICE__
float coshf(float __x) { return __ocml_cosh_f32(__x); }
@@ -224,16 +388,16 @@ __DEVICE__
float exp10f(float __x) { return __ocml_exp10_f32(__x); }
__DEVICE__
-float exp2f(float __x) { return __ocml_exp2_f32(__x); }
+float exp2f(float __x) { return __builtin_exp2f(__x); }
__DEVICE__
-float expf(float __x) { return __ocml_exp_f32(__x); }
+float expf(float __x) { return __builtin_expf(__x); }
__DEVICE__
float expm1f(float __x) { return __ocml_expm1_f32(__x); }
__DEVICE__
-float fabsf(float __x) { return __ocml_fabs_f32(__x); }
+float fabsf(float __x) { return __builtin_fabsf(__x); }
__DEVICE__
float fdimf(float __x, float __y) { return __ocml_fdim_f32(__x, __y); }
@@ -242,33 +406,25 @@ __DEVICE__
float fdividef(float __x, float __y) { return __x / __y; }
__DEVICE__
-float floorf(float __x) { return __ocml_floor_f32(__x); }
+float floorf(float __x) { return __builtin_floorf(__x); }
__DEVICE__
float fmaf(float __x, float __y, float __z) {
- return __ocml_fma_f32(__x, __y, __z);
+ return __builtin_fmaf(__x, __y, __z);
}
__DEVICE__
-float fmaxf(float __x, float __y) { return __ocml_fmax_f32(__x, __y); }
+float fmaxf(float __x, float __y) { return __builtin_fmaxf(__x, __y); }
__DEVICE__
-float fminf(float __x, float __y) { return __ocml_fmin_f32(__x, __y); }
+float fminf(float __x, float __y) { return __builtin_fminf(__x, __y); }
__DEVICE__
float fmodf(float __x, float __y) { return __ocml_fmod_f32(__x, __y); }
__DEVICE__
float frexpf(float __x, int *__nptr) {
- int __tmp;
-#ifdef __OPENMP_AMDGCN__
-#pragma omp allocate(__tmp) allocator(omp_thread_mem_alloc)
-#endif
- float __r =
- __ocml_frexp_f32(__x, (__attribute__((address_space(5))) int *)&__tmp);
- *__nptr = __tmp;
-
- return __r;
+ return __builtin_frexpf(__x, __nptr);
}
__DEVICE__
@@ -278,13 +434,13 @@ __DEVICE__
int ilogbf(float __x) { return __ocml_ilogb_f32(__x); }
__DEVICE__
-__RETURN_TYPE __finitef(float __x) { return __ocml_isfinite_f32(__x); }
+__RETURN_TYPE __finitef(float __x) { return __builtin_isfinite(__x); }
__DEVICE__
-__RETURN_TYPE __isinff(float __x) { return __ocml_isinf_f32(__x); }
+__RETURN_TYPE __isinff(float __x) { return __builtin_isinf(__x); }
__DEVICE__
-__RETURN_TYPE __isnanf(float __x) { return __ocml_isnan_f32(__x); }
+__RETURN_TYPE __isnanf(float __x) { return __builtin_isnan(__x); }
__DEVICE__
float j0f(float __x) { return __ocml_j0_f32(__x); }
@@ -314,37 +470,37 @@ float jnf(int __n, float __x) { // TODO: we could use Ahmes multiplication
}
__DEVICE__
-float ldexpf(float __x, int __e) { return __ocml_ldexp_f32(__x, __e); }
+float ldexpf(float __x, int __e) { return __builtin_amdgcn_ldexpf(__x, __e); }
__DEVICE__
float lgammaf(float __x) { return __ocml_lgamma_f32(__x); }
__DEVICE__
-long long int llrintf(float __x) { return __ocml_rint_f32(__x); }
+long long int llrintf(float __x) { return __builtin_rintf(__x); }
__DEVICE__
-long long int llroundf(float __x) { return __ocml_round_f32(__x); }
+long long int llroundf(float __x) { return __builtin_roundf(__x); }
__DEVICE__
-float log10f(float __x) { return __ocml_log10_f32(__x); }
+float log10f(float __x) { return __builtin_log10f(__x); }
__DEVICE__
float log1pf(float __x) { return __ocml_log1p_f32(__x); }
__DEVICE__
-float log2f(float __x) { return __ocml_log2_f32(__x); }
+float log2f(float __x) { return __FAST_OR_SLOW(__log2f, __ocml_log2_f32)(__x); }
__DEVICE__
float logbf(float __x) { return __ocml_logb_f32(__x); }
__DEVICE__
-float logf(float __x) { return __ocml_log_f32(__x); }
+float logf(float __x) { return __FAST_OR_SLOW(__logf, __ocml_log_f32)(__x); }
__DEVICE__
-long int lrintf(float __x) { return __ocml_rint_f32(__x); }
+long int lrintf(float __x) { return __builtin_rintf(__x); }
__DEVICE__
-long int lroundf(float __x) { return __ocml_round_f32(__x); }
+long int lroundf(float __x) { return __builtin_roundf(__x); }
__DEVICE__
float modff(float __x, float *__iptr) {
@@ -359,7 +515,7 @@ float modff(float __x, float *__iptr) {
}
__DEVICE__
-float nanf(const char *__tagp) {
+float nanf(const char *__tagp __attribute__((nonnull))) {
union {
float val;
struct ieee_float {
@@ -380,7 +536,7 @@ float nanf(const char *__tagp) {
}
__DEVICE__
-float nearbyintf(float __x) { return __ocml_nearbyint_f32(__x); }
+float nearbyintf(float __x) { return __builtin_nearbyintf(__x); }
__DEVICE__
float nextafterf(float __x, float __y) {
@@ -412,7 +568,7 @@ float normf(int __dim,
++__a;
}
- return __ocml_sqrt_f32(__r);
+ return __builtin_sqrtf(__r);
}
__DEVICE__
@@ -446,7 +602,7 @@ __DEVICE__
float rhypotf(float __x, float __y) { return __ocml_rhypot_f32(__x, __y); }
__DEVICE__
-float rintf(float __x) { return __ocml_rint_f32(__x); }
+float rintf(float __x) { return __builtin_rintf(__x); }
__DEVICE__
float rnorm3df(float __x, float __y, float __z) {
@@ -471,22 +627,22 @@ float rnormf(int __dim,
}
__DEVICE__
-float roundf(float __x) { return __ocml_round_f32(__x); }
+float roundf(float __x) { return __builtin_roundf(__x); }
__DEVICE__
float rsqrtf(float __x) { return __ocml_rsqrt_f32(__x); }
__DEVICE__
float scalblnf(float __x, long int __n) {
- return (__n < INT_MAX) ? __ocml_scalbn_f32(__x, __n)
+ return (__n < INT_MAX) ? __builtin_amdgcn_ldexpf(__x, __n)
: __ocml_scalb_f32(__x, __n);
}
__DEVICE__
-float scalbnf(float __x, int __n) { return __ocml_scalbn_f32(__x, __n); }
+float scalbnf(float __x, int __n) { return __builtin_amdgcn_ldexpf(__x, __n); }
__DEVICE__
-__RETURN_TYPE __signbitf(float __x) { return __ocml_signbit_f32(__x); }
+__RETURN_TYPE __signbitf(float __x) { return __builtin_signbitf(__x); }
__DEVICE__
void sincosf(float __x, float *__sinptr, float *__cosptr) {
@@ -494,9 +650,13 @@ void sincosf(float __x, float *__sinptr, float *__cosptr) {
#ifdef __OPENMP_AMDGCN__
#pragma omp allocate(__tmp) allocator(omp_thread_mem_alloc)
#endif
+#ifdef __CLANG_CUDA_APPROX_TRANSCENDENTALS__
+ __sincosf(__x, __sinptr, __cosptr);
+#else
*__sinptr =
__ocml_sincos_f32(__x, (__attribute__((address_space(5))) float *)&__tmp);
*__cosptr = __tmp;
+#endif
}
__DEVICE__
@@ -511,7 +671,7 @@ void sincospif(float __x, float *__sinptr, float *__cosptr) {
}
__DEVICE__
-float sinf(float __x) { return __ocml_sin_f32(__x); }
+float sinf(float __x) { return __FAST_OR_SLOW(__sinf, __ocml_sin_f32)(__x); }
__DEVICE__
float sinhf(float __x) { return __ocml_sinh_f32(__x); }
@@ -520,7 +680,7 @@ __DEVICE__
float sinpif(float __x) { return __ocml_sinpi_f32(__x); }
__DEVICE__
-float sqrtf(float __x) { return __ocml_sqrt_f32(__x); }
+float sqrtf(float __x) { return __builtin_sqrtf(__x); }
__DEVICE__
float tanf(float __x) { return __ocml_tan_f32(__x); }
@@ -532,7 +692,7 @@ __DEVICE__
float tgammaf(float __x) { return __ocml_tgamma_f32(__x); }
__DEVICE__
-float truncf(float __x) { return __ocml_trunc_f32(__x); }
+float truncf(float __x) { return __builtin_truncf(__x); }
__DEVICE__
float y0f(float __x) { return __ocml_y0_f32(__x); }
@@ -562,158 +722,7 @@ float ynf(int __n, float __x) { // TODO: we could use Ahmes multiplication
return __x1;
}
-// BEGIN INTRINSICS
-
-__DEVICE__
-float __cosf(float __x) { return __ocml_native_cos_f32(__x); }
-
-__DEVICE__
-float __exp10f(float __x) { return __ocml_native_exp10_f32(__x); }
-
-__DEVICE__
-float __expf(float __x) { return __ocml_native_exp_f32(__x); }
-
-#if defined OCML_BASIC_ROUNDED_OPERATIONS
-__DEVICE__
-float __fadd_rd(float __x, float __y) { return __ocml_add_rtn_f32(__x, __y); }
-__DEVICE__
-float __fadd_rn(float __x, float __y) { return __ocml_add_rte_f32(__x, __y); }
-__DEVICE__
-float __fadd_ru(float __x, float __y) { return __ocml_add_rtp_f32(__x, __y); }
-__DEVICE__
-float __fadd_rz(float __x, float __y) { return __ocml_add_rtz_f32(__x, __y); }
-#else
-__DEVICE__
-float __fadd_rn(float __x, float __y) { return __x + __y; }
-#endif
-
-#if defined OCML_BASIC_ROUNDED_OPERATIONS
-__DEVICE__
-float __fdiv_rd(float __x, float __y) { return __ocml_div_rtn_f32(__x, __y); }
-__DEVICE__
-float __fdiv_rn(float __x, float __y) { return __ocml_div_rte_f32(__x, __y); }
-__DEVICE__
-float __fdiv_ru(float __x, float __y) { return __ocml_div_rtp_f32(__x, __y); }
-__DEVICE__
-float __fdiv_rz(float __x, float __y) { return __ocml_div_rtz_f32(__x, __y); }
-#else
-__DEVICE__
-float __fdiv_rn(float __x, float __y) { return __x / __y; }
-#endif
-
-__DEVICE__
-float __fdividef(float __x, float __y) { return __x / __y; }
-
-#if defined OCML_BASIC_ROUNDED_OPERATIONS
-__DEVICE__
-float __fmaf_rd(float __x, float __y, float __z) {
- return __ocml_fma_rtn_f32(__x, __y, __z);
-}
-__DEVICE__
-float __fmaf_rn(float __x, float __y, float __z) {
- return __ocml_fma_rte_f32(__x, __y, __z);
-}
-__DEVICE__
-float __fmaf_ru(float __x, float __y, float __z) {
- return __ocml_fma_rtp_f32(__x, __y, __z);
-}
-__DEVICE__
-float __fmaf_rz(float __x, float __y, float __z) {
- return __ocml_fma_rtz_f32(__x, __y, __z);
-}
-#else
-__DEVICE__
-float __fmaf_rn(float __x, float __y, float __z) {
- return __ocml_fma_f32(__x, __y, __z);
-}
-#endif
-
-#if defined OCML_BASIC_ROUNDED_OPERATIONS
-__DEVICE__
-float __fmul_rd(float __x, float __y) { return __ocml_mul_rtn_f32(__x, __y); }
-__DEVICE__
-float __fmul_rn(float __x, float __y) { return __ocml_mul_rte_f32(__x, __y); }
-__DEVICE__
-float __fmul_ru(float __x, float __y) { return __ocml_mul_rtp_f32(__x, __y); }
-__DEVICE__
-float __fmul_rz(float __x, float __y) { return __ocml_mul_rtz_f32(__x, __y); }
-#else
-__DEVICE__
-float __fmul_rn(float __x, float __y) { return __x * __y; }
-#endif
-
-#if defined OCML_BASIC_ROUNDED_OPERATIONS
-__DEVICE__
-float __frcp_rd(float __x) { return __ocml_div_rtn_f32(1.0f, __x); }
-__DEVICE__
-float __frcp_rn(float __x) { return __ocml_div_rte_f32(1.0f, __x); }
-__DEVICE__
-float __frcp_ru(float __x) { return __ocml_div_rtp_f32(1.0f, __x); }
-__DEVICE__
-float __frcp_rz(float __x) { return __ocml_div_rtz_f32(1.0f, __x); }
-#else
-__DEVICE__
-float __frcp_rn(float __x) { return 1.0f / __x; }
-#endif
-
-__DEVICE__
-float __frsqrt_rn(float __x) { return __llvm_amdgcn_rsq_f32(__x); }
-
-#if defined OCML_BASIC_ROUNDED_OPERATIONS
-__DEVICE__
-float __fsqrt_rd(float __x) { return __ocml_sqrt_rtn_f32(__x); }
-__DEVICE__
-float __fsqrt_rn(float __x) { return __ocml_sqrt_rte_f32(__x); }
-__DEVICE__
-float __fsqrt_ru(float __x) { return __ocml_sqrt_rtp_f32(__x); }
-__DEVICE__
-float __fsqrt_rz(float __x) { return __ocml_sqrt_rtz_f32(__x); }
-#else
-__DEVICE__
-float __fsqrt_rn(float __x) { return __ocml_native_sqrt_f32(__x); }
-#endif
-
-#if defined OCML_BASIC_ROUNDED_OPERATIONS
-__DEVICE__
-float __fsub_rd(float __x, float __y) { return __ocml_sub_rtn_f32(__x, __y); }
-__DEVICE__
-float __fsub_rn(float __x, float __y) { return __ocml_sub_rte_f32(__x, __y); }
-__DEVICE__
-float __fsub_ru(float __x, float __y) { return __ocml_sub_rtp_f32(__x, __y); }
-__DEVICE__
-float __fsub_rz(float __x, float __y) { return __ocml_sub_rtz_f32(__x, __y); }
-#else
-__DEVICE__
-float __fsub_rn(float __x, float __y) { return __x - __y; }
-#endif
-
-__DEVICE__
-float __log10f(float __x) { return __ocml_native_log10_f32(__x); }
-
-__DEVICE__
-float __log2f(float __x) { return __ocml_native_log2_f32(__x); }
-
-__DEVICE__
-float __logf(float __x) { return __ocml_native_log_f32(__x); }
-
-__DEVICE__
-float __powf(float __x, float __y) { return __ocml_pow_f32(__x, __y); }
-
-__DEVICE__
-float __saturatef(float __x) { return (__x < 0) ? 0 : ((__x > 1) ? 1 : __x); }
-
-__DEVICE__
-void __sincosf(float __x, float *__sinptr, float *__cosptr) {
- *__sinptr = __ocml_native_sin_f32(__x);
- *__cosptr = __ocml_native_cos_f32(__x);
-}
-
-__DEVICE__
-float __sinf(float __x) { return __ocml_native_sin_f32(__x); }
-__DEVICE__
-float __tanf(float __x) { return __ocml_tan_f32(__x); }
-// END INTRINSICS
// END FLOAT
// BEGIN DOUBLE
@@ -742,11 +751,11 @@ __DEVICE__
double cbrt(double __x) { return __ocml_cbrt_f64(__x); }
__DEVICE__
-double ceil(double __x) { return __ocml_ceil_f64(__x); }
+double ceil(double __x) { return __builtin_ceil(__x); }
__DEVICE__
double copysign(double __x, double __y) {
- return __ocml_copysign_f64(__x, __y);
+ return __builtin_copysign(__x, __y);
}
__DEVICE__
@@ -792,38 +801,31 @@ __DEVICE__
double expm1(double __x) { return __ocml_expm1_f64(__x); }
__DEVICE__
-double fabs(double __x) { return __ocml_fabs_f64(__x); }
+double fabs(double __x) { return __builtin_fabs(__x); }
__DEVICE__
double fdim(double __x, double __y) { return __ocml_fdim_f64(__x, __y); }
__DEVICE__
-double floor(double __x) { return __ocml_floor_f64(__x); }
+double floor(double __x) { return __builtin_floor(__x); }
__DEVICE__
double fma(double __x, double __y, double __z) {
- return __ocml_fma_f64(__x, __y, __z);
+ return __builtin_fma(__x, __y, __z);
}
__DEVICE__
-double fmax(double __x, double __y) { return __ocml_fmax_f64(__x, __y); }
+double fmax(double __x, double __y) { return __builtin_fmax(__x, __y); }
__DEVICE__
-double fmin(double __x, double __y) { return __ocml_fmin_f64(__x, __y); }
+double fmin(double __x, double __y) { return __builtin_fmin(__x, __y); }
__DEVICE__
double fmod(double __x, double __y) { return __ocml_fmod_f64(__x, __y); }
__DEVICE__
double frexp(double __x, int *__nptr) {
- int __tmp;
-#ifdef __OPENMP_AMDGCN__
-#pragma omp allocate(__tmp) allocator(omp_thread_mem_alloc)
-#endif
- double __r =
- __ocml_frexp_f64(__x, (__attribute__((address_space(5))) int *)&__tmp);
- *__nptr = __tmp;
- return __r;
+ return __builtin_frexp(__x, __nptr);
}
__DEVICE__
@@ -833,13 +835,13 @@ __DEVICE__
int ilogb(double __x) { return __ocml_ilogb_f64(__x); }
__DEVICE__
-__RETURN_TYPE __finite(double __x) { return __ocml_isfinite_f64(__x); }
+__RETURN_TYPE __finite(double __x) { return __builtin_isfinite(__x); }
__DEVICE__
-__RETURN_TYPE __isinf(double __x) { return __ocml_isinf_f64(__x); }
+__RETURN_TYPE __isinf(double __x) { return __builtin_isinf(__x); }
__DEVICE__
-__RETURN_TYPE __isnan(double __x) { return __ocml_isnan_f64(__x); }
+__RETURN_TYPE __isnan(double __x) { return __builtin_isnan(__x); }
__DEVICE__
double j0(double __x) { return __ocml_j0_f64(__x); }
@@ -869,16 +871,16 @@ double jn(int __n, double __x) { // TODO: we could use Ahmes multiplication
}
__DEVICE__
-double ldexp(double __x, int __e) { return __ocml_ldexp_f64(__x, __e); }
+double ldexp(double __x, int __e) { return __builtin_amdgcn_ldexp(__x, __e); }
__DEVICE__
double lgamma(double __x) { return __ocml_lgamma_f64(__x); }
__DEVICE__
-long long int llrint(double __x) { return __ocml_rint_f64(__x); }
+long long int llrint(double __x) { return __builtin_rint(__x); }
__DEVICE__
-long long int llround(double __x) { return __ocml_round_f64(__x); }
+long long int llround(double __x) { return __builtin_round(__x); }
__DEVICE__
double log(double __x) { return __ocml_log_f64(__x); }
@@ -896,10 +898,10 @@ __DEVICE__
double logb(double __x) { return __ocml_logb_f64(__x); }
__DEVICE__
-long int lrint(double __x) { return __ocml_rint_f64(__x); }
+long int lrint(double __x) { return __builtin_rint(__x); }
__DEVICE__
-long int lround(double __x) { return __ocml_round_f64(__x); }
+long int lround(double __x) { return __builtin_round(__x); }
__DEVICE__
double modf(double __x, double *__iptr) {
@@ -943,7 +945,7 @@ double nan(const char *__tagp) {
}
__DEVICE__
-double nearbyint(double __x) { return __ocml_nearbyint_f64(__x); }
+double nearbyint(double __x) { return __builtin_nearbyint(__x); }
__DEVICE__
double nextafter(double __x, double __y) {
@@ -959,7 +961,7 @@ double norm(int __dim,
++__a;
}
- return __ocml_sqrt_f64(__r);
+ return __builtin_sqrt(__r);
}
__DEVICE__
@@ -1009,7 +1011,7 @@ __DEVICE__
double rhypot(double __x, double __y) { return __ocml_rhypot_f64(__x, __y); }
__DEVICE__
-double rint(double __x) { return __ocml_rint_f64(__x); }
+double rint(double __x) { return __builtin_rint(__x); }
__DEVICE__
double rnorm(int __dim,
@@ -1034,21 +1036,21 @@ double rnorm4d(double __x, double __y, double __z, double __w) {
}
__DEVICE__
-double round(double __x) { return __ocml_round_f64(__x); }
+double round(double __x) { return __builtin_round(__x); }
__DEVICE__
double rsqrt(double __x) { return __ocml_rsqrt_f64(__x); }
__DEVICE__
double scalbln(double __x, long int __n) {
- return (__n < INT_MAX) ? __ocml_scalbn_f64(__x, __n)
+ return (__n < INT_MAX) ? __builtin_amdgcn_ldexp(__x, __n)
: __ocml_scalb_f64(__x, __n);
}
__DEVICE__
-double scalbn(double __x, int __n) { return __ocml_scalbn_f64(__x, __n); }
+double scalbn(double __x, int __n) { return __builtin_amdgcn_ldexp(__x, __n); }
__DEVICE__
-__RETURN_TYPE __signbit(double __x) { return __ocml_signbit_f64(__x); }
+__RETURN_TYPE __signbit(double __x) { return __builtin_signbit(__x); }
__DEVICE__
double sin(double __x) { return __ocml_sin_f64(__x); }
@@ -1082,7 +1084,7 @@ __DEVICE__
double sinpi(double __x) { return __ocml_sinpi_f64(__x); }
__DEVICE__
-double sqrt(double __x) { return __ocml_sqrt_f64(__x); }
+double sqrt(double __x) { return __builtin_sqrt(__x); }
__DEVICE__
double tan(double __x) { return __ocml_tan_f64(__x); }
@@ -1094,7 +1096,7 @@ __DEVICE__
double tgamma(double __x) { return __ocml_tgamma_f64(__x); }
__DEVICE__
-double trunc(double __x) { return __ocml_trunc_f64(__x); }
+double trunc(double __x) { return __builtin_trunc(__x); }
__DEVICE__
double y0(double __x) { return __ocml_y0_f64(__x); }
@@ -1216,7 +1218,7 @@ __DEVICE__
double __dsqrt_rz(double __x) { return __ocml_sqrt_rtz_f64(__x); }
#else
__DEVICE__
-double __dsqrt_rn(double __x) { return __ocml_sqrt_f64(__x); }
+double __dsqrt_rn(double __x) { return __builtin_sqrt(__x); }
#endif
#if defined OCML_BASIC_ROUNDED_OPERATIONS
@@ -1261,7 +1263,7 @@ double __fma_rz(double __x, double __y, double __z) {
#else
__DEVICE__
double __fma_rn(double __x, double __y, double __z) {
- return __ocml_fma_f64(__x, __y, __z);
+ return __builtin_fma(__x, __y, __z);
}
#endif
// END INTRINSICS
@@ -1293,29 +1295,30 @@ __DEVICE__ int max(int __arg1, int __arg2) {
}
__DEVICE__
-float max(float __x, float __y) { return fmaxf(__x, __y); }
+float max(float __x, float __y) { return __builtin_fmaxf(__x, __y); }
__DEVICE__
-double max(double __x, double __y) { return fmax(__x, __y); }
+double max(double __x, double __y) { return __builtin_fmax(__x, __y); }
__DEVICE__
-float min(float __x, float __y) { return fminf(__x, __y); }
+float min(float __x, float __y) { return __builtin_fminf(__x, __y); }
__DEVICE__
-double min(double __x, double __y) { return fmin(__x, __y); }
+double min(double __x, double __y) { return __builtin_fmin(__x, __y); }
#if !defined(__HIPCC_RTC__) && !defined(__OPENMP_AMDGCN__)
__host__ inline static int min(int __arg1, int __arg2) {
- return std::min(__arg1, __arg2);
+ return __arg1 < __arg2 ? __arg1 : __arg2;
}
__host__ inline static int max(int __arg1, int __arg2) {
- return std::max(__arg1, __arg2);
+ return __arg1 > __arg2 ? __arg1 : __arg2;
}
#endif // !defined(__HIPCC_RTC__) && !defined(__OPENMP_AMDGCN__)
#endif
#pragma pop_macro("__DEVICE__")
#pragma pop_macro("__RETURN_TYPE")
+#pragma pop_macro("__FAST_OR_SLOW")
#endif // __CLANG_HIP_MATH_H__
diff --git a/contrib/llvm-project/clang/lib/Headers/__clang_hip_runtime_wrapper.h b/contrib/llvm-project/clang/lib/Headers/__clang_hip_runtime_wrapper.h
index 73021d256cba..ed1550038e63 100644
--- a/contrib/llvm-project/clang/lib/Headers/__clang_hip_runtime_wrapper.h
+++ b/contrib/llvm-project/clang/lib/Headers/__clang_hip_runtime_wrapper.h
@@ -47,25 +47,9 @@ extern "C" {
#endif //__cplusplus
#if !defined(__HIPCC_RTC__)
-#include <cmath>
-#include <cstdlib>
-#include <stdlib.h>
-#else
-typedef __SIZE_TYPE__ size_t;
-// Define macros which are needed to declare HIP device API's without standard
-// C/C++ headers. This is for readability so that these API's can be written
-// the same way as non-hipRTC use case. These macros need to be popped so that
-// they do not pollute users' name space.
-#pragma push_macro("NULL")
-#pragma push_macro("uint32_t")
-#pragma push_macro("uint64_t")
-#pragma push_macro("CHAR_BIT")
-#pragma push_macro("INT_MAX")
-#define NULL (void *)0
-#define uint32_t __UINT32_TYPE__
-#define uint64_t __UINT64_TYPE__
-#define CHAR_BIT __CHAR_BIT__
-#define INT_MAX __INTMAX_MAX__
+#if __has_include("hip/hip_version.h")
+#include "hip/hip_version.h"
+#endif // __has_include("hip/hip_version.h")
#endif // __HIPCC_RTC__
typedef __SIZE_TYPE__ __hip_size_t;
@@ -74,32 +58,83 @@ typedef __SIZE_TYPE__ __hip_size_t;
extern "C" {
#endif //__cplusplus
+#if HIP_VERSION_MAJOR * 100 + HIP_VERSION_MINOR >= 405
+__device__ unsigned long long __ockl_dm_alloc(unsigned long long __size);
+__device__ void __ockl_dm_dealloc(unsigned long long __addr);
+#if __has_feature(address_sanitizer)
+__device__ unsigned long long __asan_malloc_impl(unsigned long long __size,
+ unsigned long long __pc);
+__device__ void __asan_free_impl(unsigned long long __addr,
+ unsigned long long __pc);
+__attribute__((noinline, weak)) __device__ void *malloc(__hip_size_t __size) {
+ unsigned long long __pc = (unsigned long long)__builtin_return_address(0);
+ return (void *)__asan_malloc_impl(__size, __pc);
+}
+__attribute__((noinline, weak)) __device__ void free(void *__ptr) {
+ unsigned long long __pc = (unsigned long long)__builtin_return_address(0);
+ __asan_free_impl((unsigned long long)__ptr, __pc);
+}
+#else // __has_feature(address_sanitizer)
+__attribute__((weak)) inline __device__ void *malloc(__hip_size_t __size) {
+ return (void *) __ockl_dm_alloc(__size);
+}
+__attribute__((weak)) inline __device__ void free(void *__ptr) {
+ __ockl_dm_dealloc((unsigned long long)__ptr);
+}
+#endif // __has_feature(address_sanitizer)
+#else // HIP version check
#if __HIP_ENABLE_DEVICE_MALLOC__
__device__ void *__hip_malloc(__hip_size_t __size);
__device__ void *__hip_free(void *__ptr);
__attribute__((weak)) inline __device__ void *malloc(__hip_size_t __size) {
return __hip_malloc(__size);
}
-__attribute__((weak)) inline __device__ void *free(void *__ptr) {
- return __hip_free(__ptr);
+__attribute__((weak)) inline __device__ void free(void *__ptr) {
+ __hip_free(__ptr);
}
-#else
+#else // __HIP_ENABLE_DEVICE_MALLOC__
__attribute__((weak)) inline __device__ void *malloc(__hip_size_t __size) {
__builtin_trap();
return (void *)0;
}
-__attribute__((weak)) inline __device__ void *free(void *__ptr) {
+__attribute__((weak)) inline __device__ void free(void *__ptr) {
__builtin_trap();
- return (void *)0;
}
-#endif
+#endif // __HIP_ENABLE_DEVICE_MALLOC__
+#endif // HIP version check
#ifdef __cplusplus
} // extern "C"
#endif //__cplusplus
+#if !defined(__HIPCC_RTC__)
+#include <cmath>
+#include <cstdlib>
+#include <stdlib.h>
+#if __has_include("hip/hip_version.h")
+#include "hip/hip_version.h"
+#endif // __has_include("hip/hip_version.h")
+#else
+typedef __SIZE_TYPE__ size_t;
+// Define macros which are needed to declare HIP device API's without standard
+// C/C++ headers. This is for readability so that these API's can be written
+// the same way as non-hipRTC use case. These macros need to be popped so that
+// they do not pollute users' name space.
+#pragma push_macro("NULL")
+#pragma push_macro("uint32_t")
+#pragma push_macro("uint64_t")
+#pragma push_macro("CHAR_BIT")
+#pragma push_macro("INT_MAX")
+#define NULL (void *)0
+#define uint32_t __UINT32_TYPE__
+#define uint64_t __UINT64_TYPE__
+#define CHAR_BIT __CHAR_BIT__
+#define INT_MAX __INTMAX_MAX__
+#endif // __HIPCC_RTC__
+
#include <__clang_hip_libdevice_declares.h>
#include <__clang_hip_math.h>
+#include <__clang_hip_stdlib.h>
#if defined(__HIPCC_RTC__)
#include <__clang_hip_cmath.h>
diff --git a/contrib/llvm-project/clang/lib/Headers/__clang_hip_stdlib.h b/contrib/llvm-project/clang/lib/Headers/__clang_hip_stdlib.h
new file mode 100644
index 000000000000..bd770e2415f9
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/__clang_hip_stdlib.h
@@ -0,0 +1,43 @@
+/*===---- __clang_hip_stdlib.h - Device-side HIP math support --------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __CLANG_HIP_STDLIB_H__
+
+#if !defined(__HIP__) && !defined(__OPENMP_AMDGCN__)
+#error "This file is for HIP and OpenMP AMDGCN device compilation only."
+#endif
+
+#if !defined(__cplusplus)
+
+#include <limits.h>
+
+#ifdef __OPENMP_AMDGCN__
+#define __DEVICE__ static inline __attribute__((always_inline, nothrow))
+#else
+#define __DEVICE__ static __device__ inline __attribute__((always_inline))
+#endif
+
+__DEVICE__
+int abs(int __x) {
+ int __sgn = __x >> (sizeof(int) * CHAR_BIT - 1);
+ return (__x ^ __sgn) - __sgn;
+}
+__DEVICE__
+long labs(long __x) {
+ long __sgn = __x >> (sizeof(long) * CHAR_BIT - 1);
+ return (__x ^ __sgn) - __sgn;
+}
+__DEVICE__
+long long llabs(long long __x) {
+ long long __sgn = __x >> (sizeof(long long) * CHAR_BIT - 1);
+ return (__x ^ __sgn) - __sgn;
+}
+
+#endif // !defined(__cplusplus)
+
+#endif // #define __CLANG_HIP_STDLIB_H__
diff --git a/contrib/llvm-project/clang/lib/Headers/__stdarg___gnuc_va_list.h b/contrib/llvm-project/clang/lib/Headers/__stdarg___gnuc_va_list.h
new file mode 100644
index 000000000000..2a0a7e8cc6c0
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/__stdarg___gnuc_va_list.h
@@ -0,0 +1,13 @@
+/*===---- __stdarg___gnuc_va_list.h - Definition of __gnuc_va_list ---------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __GNUC_VA_LIST
+#define __GNUC_VA_LIST
+typedef __builtin_va_list __gnuc_va_list;
+#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/__stdarg___va_copy.h b/contrib/llvm-project/clang/lib/Headers/__stdarg___va_copy.h
new file mode 100644
index 000000000000..e433e18a7082
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/__stdarg___va_copy.h
@@ -0,0 +1,12 @@
+/*===---- __stdarg___va_copy.h - Definition of __va_copy -------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __va_copy
+#define __va_copy(d, s) __builtin_va_copy(d, s)
+#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/__stdarg_va_arg.h b/contrib/llvm-project/clang/lib/Headers/__stdarg_va_arg.h
new file mode 100644
index 000000000000..89bd2f65d3be
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/__stdarg_va_arg.h
@@ -0,0 +1,22 @@
+/*===---- __stdarg_va_arg.h - Definitions of va_start, va_arg, va_end-------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef va_arg
+
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L
+/* C23 does not require the second parameter for va_start. */
+#define va_start(ap, ...) __builtin_va_start(ap, 0)
+#else
+/* Versions before C23 do require the second parameter. */
+#define va_start(ap, param) __builtin_va_start(ap, param)
+#endif
+#define va_end(ap) __builtin_va_end(ap)
+#define va_arg(ap, type) __builtin_va_arg(ap, type)
+
+#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/__stdarg_va_copy.h b/contrib/llvm-project/clang/lib/Headers/__stdarg_va_copy.h
new file mode 100644
index 000000000000..8645328c2c68
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/__stdarg_va_copy.h
@@ -0,0 +1,12 @@
+/*===---- __stdarg_va_copy.h - Definition of va_copy------------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef va_copy
+#define va_copy(dest, src) __builtin_va_copy(dest, src)
+#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/__stdarg_va_list.h b/contrib/llvm-project/clang/lib/Headers/__stdarg_va_list.h
new file mode 100644
index 000000000000..20c2e2cad940
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/__stdarg_va_list.h
@@ -0,0 +1,13 @@
+/*===---- __stdarg_va_list.h - Definition of va_list -----------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef _VA_LIST
+#define _VA_LIST
+typedef __builtin_va_list va_list;
+#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/__stddef_max_align_t.h b/contrib/llvm-project/clang/lib/Headers/__stddef_max_align_t.h
index e3b439285d0f..512606a87728 100644
--- a/contrib/llvm-project/clang/lib/Headers/__stddef_max_align_t.h
+++ b/contrib/llvm-project/clang/lib/Headers/__stddef_max_align_t.h
@@ -1,4 +1,4 @@
-/*===---- __stddef_max_align_t.h - Definition of max_align_t for modules ---===
+/*===---- __stddef_max_align_t.h - Definition of max_align_t ---------------===
*
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
* See https://llvm.org/LICENSE.txt for license information.
diff --git a/contrib/llvm-project/clang/lib/Headers/__stddef_null.h b/contrib/llvm-project/clang/lib/Headers/__stddef_null.h
new file mode 100644
index 000000000000..c10bd2d7d988
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/__stddef_null.h
@@ -0,0 +1,29 @@
+/*===---- __stddef_null.h - Definition of NULL -----------------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !defined(NULL) || !__building_module(_Builtin_stddef)
+
+/* linux/stddef.h will define NULL to 0. glibc (and other) headers then define
+ * __need_NULL and rely on stddef.h to redefine NULL to the correct value again.
+ * Modules don't support redefining macros like that, but support that pattern
+ * in the non-modules case.
+ */
+#undef NULL
+
+#ifdef __cplusplus
+#if !defined(__MINGW32__) && !defined(_MSC_VER)
+#define NULL __null
+#else
+#define NULL 0
+#endif
+#else
+#define NULL ((void*)0)
+#endif
+
+#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/__stddef_nullptr_t.h b/contrib/llvm-project/clang/lib/Headers/__stddef_nullptr_t.h
new file mode 100644
index 000000000000..7f3fbe6fe0d3
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/__stddef_nullptr_t.h
@@ -0,0 +1,29 @@
+/*===---- __stddef_nullptr_t.h - Definition of nullptr_t -------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+/*
+ * When -fbuiltin-headers-in-system-modules is set this is a non-modular header
+ * and needs to behave as if it was textual.
+ */
+#if !defined(_NULLPTR_T) || \
+ (__has_feature(modules) && !__building_module(_Builtin_stddef))
+#define _NULLPTR_T
+
+#ifdef __cplusplus
+#if defined(_MSC_EXTENSIONS) && defined(_NATIVE_NULLPTR_SUPPORTED)
+namespace std {
+typedef decltype(nullptr) nullptr_t;
+}
+using ::std::nullptr_t;
+#endif
+#elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L
+typedef typeof(nullptr) nullptr_t;
+#endif
+
+#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/__stddef_offsetof.h b/contrib/llvm-project/clang/lib/Headers/__stddef_offsetof.h
new file mode 100644
index 000000000000..84172c6cd273
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/__stddef_offsetof.h
@@ -0,0 +1,17 @@
+/*===---- __stddef_offsetof.h - Definition of offsetof ---------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+/*
+ * When -fbuiltin-headers-in-system-modules is set this is a non-modular header
+ * and needs to behave as if it was textual.
+ */
+#if !defined(offsetof) || \
+ (__has_feature(modules) && !__building_module(_Builtin_stddef))
+#define offsetof(t, d) __builtin_offsetof(t, d)
+#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/__stddef_ptrdiff_t.h b/contrib/llvm-project/clang/lib/Headers/__stddef_ptrdiff_t.h
new file mode 100644
index 000000000000..fd3c893c66c9
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/__stddef_ptrdiff_t.h
@@ -0,0 +1,20 @@
+/*===---- __stddef_ptrdiff_t.h - Definition of ptrdiff_t -------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+/*
+ * When -fbuiltin-headers-in-system-modules is set this is a non-modular header
+ * and needs to behave as if it was textual.
+ */
+#if !defined(_PTRDIFF_T) || \
+ (__has_feature(modules) && !__building_module(_Builtin_stddef))
+#define _PTRDIFF_T
+
+typedef __PTRDIFF_TYPE__ ptrdiff_t;
+
+#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/__stddef_rsize_t.h b/contrib/llvm-project/clang/lib/Headers/__stddef_rsize_t.h
new file mode 100644
index 000000000000..dd433d40d973
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/__stddef_rsize_t.h
@@ -0,0 +1,20 @@
+/*===---- __stddef_rsize_t.h - Definition of rsize_t -----------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+/*
+ * When -fbuiltin-headers-in-system-modules is set this is a non-modular header
+ * and needs to behave as if it was textual.
+ */
+#if !defined(_RSIZE_T) || \
+ (__has_feature(modules) && !__building_module(_Builtin_stddef))
+#define _RSIZE_T
+
+typedef __SIZE_TYPE__ rsize_t;
+
+#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/__stddef_size_t.h b/contrib/llvm-project/clang/lib/Headers/__stddef_size_t.h
new file mode 100644
index 000000000000..3dd7b1f37929
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/__stddef_size_t.h
@@ -0,0 +1,20 @@
+/*===---- __stddef_size_t.h - Definition of size_t -------------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+/*
+ * When -fbuiltin-headers-in-system-modules is set this is a non-modular header
+ * and needs to behave as if it was textual.
+ */
+#if !defined(_SIZE_T) || \
+ (__has_feature(modules) && !__building_module(_Builtin_stddef))
+#define _SIZE_T
+
+typedef __SIZE_TYPE__ size_t;
+
+#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/__stddef_unreachable.h b/contrib/llvm-project/clang/lib/Headers/__stddef_unreachable.h
new file mode 100644
index 000000000000..61df43e9732f
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/__stddef_unreachable.h
@@ -0,0 +1,21 @@
+/*===---- __stddef_unreachable.h - Definition of unreachable ---------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __cplusplus
+
+/*
+ * When -fbuiltin-headers-in-system-modules is set this is a non-modular header
+ * and needs to behave as if it was textual.
+ */
+#if !defined(unreachable) || \
+ (__has_feature(modules) && !__building_module(_Builtin_stddef))
+#define unreachable() __builtin_unreachable()
+#endif
+
+#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/__stddef_wchar_t.h b/contrib/llvm-project/clang/lib/Headers/__stddef_wchar_t.h
new file mode 100644
index 000000000000..bd69f6322541
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/__stddef_wchar_t.h
@@ -0,0 +1,28 @@
+/*===---- __stddef_wchar.h - Definition of wchar_t -------------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !defined(__cplusplus) || (defined(_MSC_VER) && !_NATIVE_WCHAR_T_DEFINED)
+
+/*
+ * When -fbuiltin-headers-in-system-modules is set this is a non-modular header
+ * and needs to behave as if it was textual.
+ */
+#if !defined(_WCHAR_T) || \
+ (__has_feature(modules) && !__building_module(_Builtin_stddef))
+#define _WCHAR_T
+
+#ifdef _MSC_EXTENSIONS
+#define _WCHAR_T_DEFINED
+#endif
+
+typedef __WCHAR_TYPE__ wchar_t;
+
+#endif
+
+#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/__stddef_wint_t.h b/contrib/llvm-project/clang/lib/Headers/__stddef_wint_t.h
new file mode 100644
index 000000000000..0aa291507957
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/__stddef_wint_t.h
@@ -0,0 +1,15 @@
+/*===---- __stddef_wint.h - Definition of wint_t ---------------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef _WINT_T
+#define _WINT_T
+
+typedef __WINT_TYPE__ wint_t;
+
+#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/__wmmintrin_aes.h b/contrib/llvm-project/clang/lib/Headers/__wmmintrin_aes.h
index f540319c7fd2..3010b38711e6 100644
--- a/contrib/llvm-project/clang/lib/Headers/__wmmintrin_aes.h
+++ b/contrib/llvm-project/clang/lib/Headers/__wmmintrin_aes.h
@@ -133,7 +133,7 @@ _mm_aesimc_si128(__m128i __V)
/// An 8-bit round constant used to generate the AES encryption key.
/// \returns A 128-bit round key for AES encryption.
#define _mm_aeskeygenassist_si128(C, R) \
- (__m128i)__builtin_ia32_aeskeygenassist128((__v2di)(__m128i)(C), (int)(R))
+ ((__m128i)__builtin_ia32_aeskeygenassist128((__v2di)(__m128i)(C), (int)(R)))
#undef __DEFAULT_FN_ATTRS
diff --git a/contrib/llvm-project/clang/lib/Headers/__wmmintrin_pclmul.h b/contrib/llvm-project/clang/lib/Headers/__wmmintrin_pclmul.h
index fef4b93dbb43..c9a6d50bdc89 100644
--- a/contrib/llvm-project/clang/lib/Headers/__wmmintrin_pclmul.h
+++ b/contrib/llvm-project/clang/lib/Headers/__wmmintrin_pclmul.h
@@ -22,23 +22,23 @@
/// \headerfile <x86intrin.h>
///
/// \code
-/// __m128i _mm_clmulepi64_si128(__m128i __X, __m128i __Y, const int __I);
+/// __m128i _mm_clmulepi64_si128(__m128i X, __m128i Y, const int I);
/// \endcode
///
/// This intrinsic corresponds to the <c> VPCLMULQDQ </c> instruction.
///
-/// \param __X
+/// \param X
/// A 128-bit vector of [2 x i64] containing one of the source operands.
-/// \param __Y
+/// \param Y
/// A 128-bit vector of [2 x i64] containing one of the source operands.
-/// \param __I
+/// \param I
/// An immediate value specifying which 64-bit values to select from the
-/// operands. Bit 0 is used to select a value from operand \a __X, and bit
-/// 4 is used to select a value from operand \a __Y: \n
-/// Bit[0]=0 indicates that bits[63:0] of operand \a __X are used. \n
-/// Bit[0]=1 indicates that bits[127:64] of operand \a __X are used. \n
-/// Bit[4]=0 indicates that bits[63:0] of operand \a __Y are used. \n
-/// Bit[4]=1 indicates that bits[127:64] of operand \a __Y are used.
+/// operands. Bit 0 is used to select a value from operand \a X, and bit
+/// 4 is used to select a value from operand \a Y: \n
+/// Bit[0]=0 indicates that bits[63:0] of operand \a X are used. \n
+/// Bit[0]=1 indicates that bits[127:64] of operand \a X are used. \n
+/// Bit[4]=0 indicates that bits[63:0] of operand \a Y are used. \n
+/// Bit[4]=1 indicates that bits[127:64] of operand \a Y are used.
/// \returns The 128-bit integer vector containing the result of the carry-less
/// multiplication of the selected 64-bit values.
#define _mm_clmulepi64_si128(X, Y, I) \
diff --git a/contrib/llvm-project/clang/lib/Headers/adcintrin.h b/contrib/llvm-project/clang/lib/Headers/adcintrin.h
new file mode 100644
index 000000000000..0065a1b543f8
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/adcintrin.h
@@ -0,0 +1,160 @@
+/*===---- adcintrin.h - ADC intrinsics -------------------------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __ADCINTRIN_H
+#define __ADCINTRIN_H
+
+#if !defined(__i386__) && !defined(__x86_64__)
+#error "This header is only meant to be used on x86 and x64 architecture"
+#endif
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
+
+/* Use C++ inline semantics in C++, GNU inline for C mode. */
+#if defined(__cplusplus)
+#define __INLINE __inline
+#else
+#define __INLINE static __inline
+#endif
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/// Adds unsigned 32-bit integers \a __x and \a __y, plus 0 or 1 as indicated
+/// by the carry flag \a __cf. Stores the unsigned 32-bit sum in the memory
+/// at \a __p, and returns the 8-bit carry-out (carry flag).
+///
+/// \code{.operation}
+/// temp := (__cf == 0) ? 0 : 1
+/// Store32(__p, __x + __y + temp)
+/// result := CF
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c ADC instruction.
+///
+/// \param __cf
+/// The 8-bit unsigned carry flag; any non-zero value indicates carry.
+/// \param __x
+/// A 32-bit unsigned addend.
+/// \param __y
+/// A 32-bit unsigned addend.
+/// \param __p
+/// Pointer to memory for storing the sum.
+/// \returns The 8-bit unsigned carry-out value.
+__INLINE unsigned char __DEFAULT_FN_ATTRS _addcarry_u32(unsigned char __cf,
+ unsigned int __x,
+ unsigned int __y,
+ unsigned int *__p) {
+ return __builtin_ia32_addcarryx_u32(__cf, __x, __y, __p);
+}
+
+/// Adds unsigned 32-bit integer \a __y to 0 or 1 as indicated by the carry
+/// flag \a __cf, and subtracts the result from unsigned 32-bit integer
+/// \a __x. Stores the unsigned 32-bit difference in the memory at \a __p,
+/// and returns the 8-bit carry-out (carry or overflow flag).
+///
+/// \code{.operation}
+/// temp := (__cf == 0) ? 0 : 1
+/// Store32(__p, __x - (__y + temp))
+/// result := CF
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c SBB instruction.
+///
+/// \param __cf
+/// The 8-bit unsigned carry flag; any non-zero value indicates carry.
+/// \param __x
+/// The 32-bit unsigned minuend.
+/// \param __y
+/// The 32-bit unsigned subtrahend.
+/// \param __p
+/// Pointer to memory for storing the difference.
+/// \returns The 8-bit unsigned carry-out value.
+__INLINE unsigned char __DEFAULT_FN_ATTRS _subborrow_u32(unsigned char __cf,
+ unsigned int __x,
+ unsigned int __y,
+ unsigned int *__p) {
+ return __builtin_ia32_subborrow_u32(__cf, __x, __y, __p);
+}
+
+#ifdef __x86_64__
+/// Adds unsigned 64-bit integers \a __x and \a __y, plus 0 or 1 as indicated
+/// by the carry flag \a __cf. Stores the unsigned 64-bit sum in the memory
+/// at \a __p, and returns the 8-bit carry-out (carry flag).
+///
+/// \code{.operation}
+/// temp := (__cf == 0) ? 0 : 1
+/// Store64(__p, __x + __y + temp)
+/// result := CF
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c ADC instruction.
+///
+/// \param __cf
+/// The 8-bit unsigned carry flag; any non-zero value indicates carry.
+/// \param __x
+/// A 64-bit unsigned addend.
+/// \param __y
+/// A 64-bit unsigned addend.
+/// \param __p
+/// Pointer to memory for storing the sum.
+/// \returns The 8-bit unsigned carry-out value.
+__INLINE unsigned char __DEFAULT_FN_ATTRS
+_addcarry_u64(unsigned char __cf, unsigned long long __x,
+ unsigned long long __y, unsigned long long *__p) {
+ return __builtin_ia32_addcarryx_u64(__cf, __x, __y, __p);
+}
+
+/// Adds unsigned 64-bit integer \a __y to 0 or 1 as indicated by the carry
+/// flag \a __cf, and subtracts the result from unsigned 64-bit integer
+/// \a __x. Stores the unsigned 64-bit difference in the memory at \a __p,
+/// and returns the 8-bit carry-out (carry or overflow flag).
+///
+/// \code{.operation}
+/// temp := (__cf == 0) ? 0 : 1
+/// Store64(__p, __x - (__y + temp))
+/// result := CF
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c ADC instruction.
+///
+/// \param __cf
+/// The 8-bit unsigned carry flag; any non-zero value indicates carry.
+/// \param __x
+/// The 64-bit unsigned minuend.
+/// \param __y
+/// The 64-bit unsigned subtrahend.
+/// \param __p
+/// Pointer to memory for storing the difference.
+/// \returns The 8-bit unsigned carry-out value.
+__INLINE unsigned char __DEFAULT_FN_ATTRS
+_subborrow_u64(unsigned char __cf, unsigned long long __x,
+ unsigned long long __y, unsigned long long *__p) {
+ return __builtin_ia32_subborrow_u64(__cf, __x, __y, __p);
+}
+#endif
+
+#if defined(__cplusplus)
+}
+#endif
+
+#undef __INLINE
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __ADCINTRIN_H */
diff --git a/contrib/llvm-project/clang/lib/Headers/adxintrin.h b/contrib/llvm-project/clang/lib/Headers/adxintrin.h
index 72b9ed08f40c..bc6a4caf3533 100644
--- a/contrib/llvm-project/clang/lib/Headers/adxintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/adxintrin.h
@@ -15,58 +15,88 @@
#define __ADXINTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
+#define __DEFAULT_FN_ATTRS \
+ __attribute__((__always_inline__, __nodebug__, __target__("adx")))
-/* Intrinsics that are available only if __ADX__ defined */
-static __inline unsigned char __attribute__((__always_inline__, __nodebug__, __target__("adx")))
-_addcarryx_u32(unsigned char __cf, unsigned int __x, unsigned int __y,
- unsigned int *__p)
-{
- return __builtin_ia32_addcarryx_u32(__cf, __x, __y, __p);
-}
+/* Use C++ inline semantics in C++, GNU inline for C mode. */
+#if defined(__cplusplus)
+#define __INLINE __inline
+#else
+#define __INLINE static __inline
+#endif
-#ifdef __x86_64__
-static __inline unsigned char __attribute__((__always_inline__, __nodebug__, __target__("adx")))
-_addcarryx_u64(unsigned char __cf, unsigned long long __x,
- unsigned long long __y, unsigned long long *__p)
-{
- return __builtin_ia32_addcarryx_u64(__cf, __x, __y, __p);
-}
+#if defined(__cplusplus)
+extern "C" {
#endif
-/* Intrinsics that are also available if __ADX__ undefined */
-static __inline unsigned char __DEFAULT_FN_ATTRS
-_addcarry_u32(unsigned char __cf, unsigned int __x, unsigned int __y,
- unsigned int *__p)
-{
+/* Intrinsics that are available only if __ADX__ is defined. */
+
+/// Adds unsigned 32-bit integers \a __x and \a __y, plus 0 or 1 as indicated
+/// by the carry flag \a __cf. Stores the unsigned 32-bit sum in the memory
+/// at \a __p, and returns the 8-bit carry-out (carry flag).
+///
+/// \code{.operation}
+/// temp := (__cf == 0) ? 0 : 1
+/// Store32(__p, __x + __y + temp)
+/// result := CF
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c ADCX instruction.
+///
+/// \param __cf
+/// The 8-bit unsigned carry flag; any non-zero value indicates carry.
+/// \param __x
+/// A 32-bit unsigned addend.
+/// \param __y
+/// A 32-bit unsigned addend.
+/// \param __p
+/// Pointer to memory for storing the sum.
+/// \returns The 8-bit unsigned carry-out value.
+__INLINE unsigned char __DEFAULT_FN_ATTRS _addcarryx_u32(unsigned char __cf,
+ unsigned int __x,
+ unsigned int __y,
+ unsigned int *__p) {
return __builtin_ia32_addcarryx_u32(__cf, __x, __y, __p);
}
#ifdef __x86_64__
-static __inline unsigned char __DEFAULT_FN_ATTRS
-_addcarry_u64(unsigned char __cf, unsigned long long __x,
- unsigned long long __y, unsigned long long *__p)
-{
+/// Adds unsigned 64-bit integers \a __x and \a __y, plus 0 or 1 as indicated
+/// by the carry flag \a __cf. Stores the unsigned 64-bit sum in the memory
+/// at \a __p, and returns the 8-bit carry-out (carry flag).
+///
+/// \code{.operation}
+/// temp := (__cf == 0) ? 0 : 1
+/// Store64(__p, __x + __y + temp)
+/// result := CF
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c ADCX instruction.
+///
+/// \param __cf
+/// The 8-bit unsigned carry flag; any non-zero value indicates carry.
+/// \param __x
+/// A 64-bit unsigned addend.
+/// \param __y
+/// A 64-bit unsigned addend.
+/// \param __p
+/// Pointer to memory for storing the sum.
+/// \returns The 8-bit unsigned carry-out value.
+__INLINE unsigned char __DEFAULT_FN_ATTRS
+_addcarryx_u64(unsigned char __cf, unsigned long long __x,
+ unsigned long long __y, unsigned long long *__p) {
return __builtin_ia32_addcarryx_u64(__cf, __x, __y, __p);
}
#endif
-static __inline unsigned char __DEFAULT_FN_ATTRS
-_subborrow_u32(unsigned char __cf, unsigned int __x, unsigned int __y,
- unsigned int *__p)
-{
- return __builtin_ia32_subborrow_u32(__cf, __x, __y, __p);
-}
-
-#ifdef __x86_64__
-static __inline unsigned char __DEFAULT_FN_ATTRS
-_subborrow_u64(unsigned char __cf, unsigned long long __x,
- unsigned long long __y, unsigned long long *__p)
-{
- return __builtin_ia32_subborrow_u64(__cf, __x, __y, __p);
+#if defined(__cplusplus)
}
#endif
+#undef __INLINE
#undef __DEFAULT_FN_ATTRS
#endif /* __ADXINTRIN_H */
diff --git a/contrib/llvm-project/clang/lib/Headers/altivec.h b/contrib/llvm-project/clang/lib/Headers/altivec.h
index 0dd8c859366b..4971631c50f4 100644
--- a/contrib/llvm-project/clang/lib/Headers/altivec.h
+++ b/contrib/llvm-project/clang/lib/Headers/altivec.h
@@ -19,6 +19,10 @@
#define __CR6_EQ_REV 1
#define __CR6_LT 2
#define __CR6_LT_REV 3
+#define __CR6_GT 4
+#define __CR6_GT_REV 5
+#define __CR6_SO 6
+#define __CR6_SO_REV 7
/* Constants for vec_test_data_class */
#define __VEC_CLASS_FP_SUBNORMAL_N (1 << 0)
@@ -307,7 +311,7 @@ vec_add(vector unsigned __int128 __a, vector unsigned __int128 __b) {
static __inline__ vector unsigned char __attribute__((__always_inline__))
vec_add_u128(vector unsigned char __a, vector unsigned char __b) {
- return __builtin_altivec_vadduqm(__a, __b);
+ return (vector unsigned char)__builtin_altivec_vadduqm(__a, __b);
}
#elif defined(__VSX__)
static __inline__ vector signed long long __ATTRS_o_ai
@@ -321,9 +325,9 @@ vec_add(vector signed long long __a, vector signed long long __b) {
(vector unsigned int)__a + (vector unsigned int)__b;
vector unsigned int __carry = __builtin_altivec_vaddcuw(
(vector unsigned int)__a, (vector unsigned int)__b);
- __carry = __builtin_shufflevector((vector unsigned char)__carry,
- (vector unsigned char)__carry, 0, 0, 0, 7,
- 0, 0, 0, 0, 0, 0, 0, 15, 0, 0, 0, 0);
+ __carry = (vector unsigned int)__builtin_shufflevector(
+ (vector unsigned char)__carry, (vector unsigned char)__carry, 0, 0, 0, 7,
+ 0, 0, 0, 0, 0, 0, 0, 15, 0, 0, 0, 0);
return (vector signed long long)(__res + __carry);
#endif
}
@@ -354,7 +358,9 @@ static __inline__ vector double __ATTRS_o_ai vec_add(vector double __a,
static __inline__ vector signed __int128 __ATTRS_o_ai
vec_adde(vector signed __int128 __a, vector signed __int128 __b,
vector signed __int128 __c) {
- return __builtin_altivec_vaddeuqm(__a, __b, __c);
+ return (vector signed __int128)__builtin_altivec_vaddeuqm(
+ (vector unsigned __int128)__a, (vector unsigned __int128)__b,
+ (vector unsigned __int128)__c);
}
static __inline__ vector unsigned __int128 __ATTRS_o_ai
@@ -367,7 +373,9 @@ vec_adde(vector unsigned __int128 __a, vector unsigned __int128 __b,
static __inline__ vector unsigned char __attribute__((__always_inline__))
vec_adde_u128(vector unsigned char __a, vector unsigned char __b,
vector unsigned char __c) {
- return (vector unsigned char)__builtin_altivec_vaddeuqm(__a, __b, __c);
+ return (vector unsigned char)__builtin_altivec_vaddeuqm_c(
+ (vector unsigned char)__a, (vector unsigned char)__b,
+ (vector unsigned char)__c);
}
#endif
@@ -394,7 +402,9 @@ vec_adde(vector unsigned int __a, vector unsigned int __b,
static __inline__ vector signed __int128 __ATTRS_o_ai
vec_addec(vector signed __int128 __a, vector signed __int128 __b,
vector signed __int128 __c) {
- return __builtin_altivec_vaddecuq(__a, __b, __c);
+ return (vector signed __int128)__builtin_altivec_vaddecuq(
+ (vector unsigned __int128)__a, (vector unsigned __int128)__b,
+ (vector unsigned __int128)__c);
}
static __inline__ vector unsigned __int128 __ATTRS_o_ai
@@ -407,7 +417,9 @@ vec_addec(vector unsigned __int128 __a, vector unsigned __int128 __b,
static __inline__ vector unsigned char __attribute__((__always_inline__))
vec_addec_u128(vector unsigned char __a, vector unsigned char __b,
vector unsigned char __c) {
- return (vector unsigned char)__builtin_altivec_vaddecuq(__a, __b, __c);
+ return (vector unsigned char)__builtin_altivec_vaddecuq_c(
+ (vector unsigned char)__a, (vector unsigned char)__b,
+ (vector unsigned char)__c);
}
#ifdef __powerpc64__
@@ -596,7 +608,8 @@ vec_addc(vector unsigned __int128 __a, vector unsigned __int128 __b) {
static __inline__ vector unsigned char __attribute__((__always_inline__))
vec_addc_u128(vector unsigned char __a, vector unsigned char __b) {
- return (vector unsigned char)__builtin_altivec_vaddcuq(__a, __b);
+ return (vector unsigned char)__builtin_altivec_vaddcuq_c(
+ (vector unsigned char)__a, (vector unsigned char)__b);
}
#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
@@ -820,7 +833,9 @@ vec_vadduqm(vector unsigned __int128 __a, vector unsigned __int128 __b) {
static __inline__ vector signed __int128 __ATTRS_o_ai
vec_vaddeuqm(vector signed __int128 __a, vector signed __int128 __b,
vector signed __int128 __c) {
- return __builtin_altivec_vaddeuqm(__a, __b, __c);
+ return (vector signed __int128)__builtin_altivec_vaddeuqm(
+ (vector unsigned __int128)__a, (vector unsigned __int128)__b,
+ (vector unsigned __int128)__c);
}
static __inline__ vector unsigned __int128 __ATTRS_o_ai
@@ -833,7 +848,8 @@ vec_vaddeuqm(vector unsigned __int128 __a, vector unsigned __int128 __b,
static __inline__ vector signed __int128 __ATTRS_o_ai
vec_vaddcuq(vector signed __int128 __a, vector signed __int128 __b) {
- return __builtin_altivec_vaddcuq(__a, __b);
+ return (vector signed __int128)__builtin_altivec_vaddcuq(
+ (vector unsigned __int128)__a, (vector unsigned __int128)__b);
}
static __inline__ vector unsigned __int128 __ATTRS_o_ai
@@ -846,7 +862,9 @@ vec_vaddcuq(vector unsigned __int128 __a, vector unsigned __int128 __b) {
static __inline__ vector signed __int128 __ATTRS_o_ai
vec_vaddecuq(vector signed __int128 __a, vector signed __int128 __b,
vector signed __int128 __c) {
- return __builtin_altivec_vaddecuq(__a, __b, __c);
+ return (vector signed __int128)__builtin_altivec_vaddecuq(
+ (vector unsigned __int128)__a, (vector unsigned __int128)__b,
+ (vector unsigned __int128)__c);
}
static __inline__ vector unsigned __int128 __ATTRS_o_ai
@@ -1802,13 +1820,19 @@ vec_cmpeq(vector double __a, vector double __b) {
static __inline__ vector bool __int128 __ATTRS_o_ai
vec_cmpeq(vector signed __int128 __a, vector signed __int128 __b) {
return (vector bool __int128)__builtin_altivec_vcmpequq(
- (vector bool __int128)__a, (vector bool __int128)__b);
+ (vector unsigned __int128)__a, (vector unsigned __int128)__b);
}
static __inline__ vector bool __int128 __ATTRS_o_ai
vec_cmpeq(vector unsigned __int128 __a, vector unsigned __int128 __b) {
return (vector bool __int128)__builtin_altivec_vcmpequq(
- (vector bool __int128)__a, (vector bool __int128)__b);
+ (vector unsigned __int128)__a, (vector unsigned __int128)__b);
+}
+
+static __inline__ vector bool __int128 __ATTRS_o_ai
+vec_cmpeq(vector bool __int128 __a, vector bool __int128 __b) {
+ return (vector bool __int128)__builtin_altivec_vcmpequq(
+ (vector unsigned __int128)__a, (vector unsigned __int128)__b);
}
#endif
@@ -1878,14 +1902,20 @@ vec_cmpne(vector float __a, vector float __b) {
#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
static __inline__ vector bool __int128 __ATTRS_o_ai
vec_cmpne(vector unsigned __int128 __a, vector unsigned __int128 __b) {
- return (vector bool __int128) ~(__builtin_altivec_vcmpequq(
- (vector bool __int128)__a, (vector bool __int128)__b));
+ return (vector bool __int128)~(__builtin_altivec_vcmpequq(
+ (vector unsigned __int128)__a, (vector unsigned __int128)__b));
}
static __inline__ vector bool __int128 __ATTRS_o_ai
vec_cmpne(vector signed __int128 __a, vector signed __int128 __b) {
- return (vector bool __int128) ~(__builtin_altivec_vcmpequq(
- (vector bool __int128)__a, (vector bool __int128)__b));
+ return (vector bool __int128)~(__builtin_altivec_vcmpequq(
+ (vector unsigned __int128)__a, (vector unsigned __int128)__b));
+}
+
+static __inline__ vector bool __int128 __ATTRS_o_ai
+vec_cmpne(vector bool __int128 __a, vector bool __int128 __b) {
+ return (vector bool __int128)~(__builtin_altivec_vcmpequq(
+ (vector unsigned __int128)__a, (vector unsigned __int128)__b));
}
#endif
@@ -1930,16 +1960,16 @@ vec_cmpnez(vector unsigned int __a, vector unsigned int __b) {
static __inline__ signed int __ATTRS_o_ai
vec_cntlz_lsbb(vector signed char __a) {
#ifdef __LITTLE_ENDIAN__
- return __builtin_altivec_vctzlsbb(__a);
+ return __builtin_altivec_vctzlsbb((vector unsigned char)__a);
#else
- return __builtin_altivec_vclzlsbb(__a);
+ return __builtin_altivec_vclzlsbb((vector unsigned char)__a);
#endif
}
static __inline__ signed int __ATTRS_o_ai
vec_cntlz_lsbb(vector unsigned char __a) {
#ifdef __LITTLE_ENDIAN__
- return __builtin_altivec_vctzlsbb(__a);
+ return __builtin_altivec_vctzlsbb((vector unsigned char)__a);
#else
return __builtin_altivec_vclzlsbb(__a);
#endif
@@ -1948,9 +1978,9 @@ vec_cntlz_lsbb(vector unsigned char __a) {
static __inline__ signed int __ATTRS_o_ai
vec_cnttz_lsbb(vector signed char __a) {
#ifdef __LITTLE_ENDIAN__
- return __builtin_altivec_vclzlsbb(__a);
+ return __builtin_altivec_vclzlsbb((vector unsigned char)__a);
#else
- return __builtin_altivec_vctzlsbb(__a);
+ return __builtin_altivec_vctzlsbb((vector unsigned char)__a);
#endif
}
@@ -1970,7 +2000,7 @@ vec_parity_lsbb(vector unsigned int __a) {
static __inline__ vector unsigned int __ATTRS_o_ai
vec_parity_lsbb(vector signed int __a) {
- return __builtin_altivec_vprtybw(__a);
+ return __builtin_altivec_vprtybw((vector unsigned int)__a);
}
#ifdef __SIZEOF_INT128__
@@ -1981,7 +2011,7 @@ vec_parity_lsbb(vector unsigned __int128 __a) {
static __inline__ vector unsigned __int128 __ATTRS_o_ai
vec_parity_lsbb(vector signed __int128 __a) {
- return __builtin_altivec_vprtybq(__a);
+ return __builtin_altivec_vprtybq((vector unsigned __int128)__a);
}
#endif
@@ -1992,7 +2022,7 @@ vec_parity_lsbb(vector unsigned long long __a) {
static __inline__ vector unsigned long long __ATTRS_o_ai
vec_parity_lsbb(vector signed long long __a) {
- return __builtin_altivec_vprtybd(__a);
+ return __builtin_altivec_vprtybd((vector unsigned long long)__a);
}
#else
@@ -2198,14 +2228,12 @@ vec_cmpgt(vector double __a, vector double __b) {
#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
static __inline__ vector bool __int128 __ATTRS_o_ai
vec_cmpgt(vector signed __int128 __a, vector signed __int128 __b) {
- return (vector bool __int128)__builtin_altivec_vcmpgtsq(
- (vector bool __int128)__a, (vector bool __int128)__b);
+ return (vector bool __int128)__builtin_altivec_vcmpgtsq(__a, __b);
}
static __inline__ vector bool __int128 __ATTRS_o_ai
vec_cmpgt(vector unsigned __int128 __a, vector unsigned __int128 __b) {
- return (vector bool __int128)__builtin_altivec_vcmpgtuq(
- (vector bool __int128)__a, (vector bool __int128)__b);
+ return (vector bool __int128)__builtin_altivec_vcmpgtuq(__a, __b);
}
#endif
@@ -2472,33 +2500,35 @@ vec_cmplt(vector unsigned long long __a, vector unsigned long long __b) {
#ifdef __POWER8_VECTOR__
/* vec_popcnt */
-static __inline__ vector signed char __ATTRS_o_ai
+static __inline__ vector unsigned char __ATTRS_o_ai
vec_popcnt(vector signed char __a) {
- return __builtin_altivec_vpopcntb(__a);
+ return (vector unsigned char)__builtin_altivec_vpopcntb(
+ (vector unsigned char)__a);
}
static __inline__ vector unsigned char __ATTRS_o_ai
vec_popcnt(vector unsigned char __a) {
return __builtin_altivec_vpopcntb(__a);
}
-static __inline__ vector signed short __ATTRS_o_ai
+static __inline__ vector unsigned short __ATTRS_o_ai
vec_popcnt(vector signed short __a) {
- return __builtin_altivec_vpopcnth(__a);
+ return (vector unsigned short)__builtin_altivec_vpopcnth(
+ (vector unsigned short)__a);
}
static __inline__ vector unsigned short __ATTRS_o_ai
vec_popcnt(vector unsigned short __a) {
return __builtin_altivec_vpopcnth(__a);
}
-static __inline__ vector signed int __ATTRS_o_ai
+static __inline__ vector unsigned int __ATTRS_o_ai
vec_popcnt(vector signed int __a) {
- return __builtin_altivec_vpopcntw(__a);
+ return __builtin_altivec_vpopcntw((vector unsigned int)__a);
}
static __inline__ vector unsigned int __ATTRS_o_ai
vec_popcnt(vector unsigned int __a) {
return __builtin_altivec_vpopcntw(__a);
}
-static __inline__ vector signed long long __ATTRS_o_ai
+static __inline__ vector unsigned long long __ATTRS_o_ai
vec_popcnt(vector signed long long __a) {
- return __builtin_altivec_vpopcntd(__a);
+ return __builtin_altivec_vpopcntd((vector unsigned long long)__a);
}
static __inline__ vector unsigned long long __ATTRS_o_ai
vec_popcnt(vector unsigned long long __a) {
@@ -2510,7 +2540,7 @@ vec_popcnt(vector unsigned long long __a) {
static __inline__ vector signed char __ATTRS_o_ai
vec_cntlz(vector signed char __a) {
- return __builtin_altivec_vclzb(__a);
+ return (vector signed char)__builtin_altivec_vclzb((vector unsigned char)__a);
}
static __inline__ vector unsigned char __ATTRS_o_ai
vec_cntlz(vector unsigned char __a) {
@@ -2518,7 +2548,8 @@ vec_cntlz(vector unsigned char __a) {
}
static __inline__ vector signed short __ATTRS_o_ai
vec_cntlz(vector signed short __a) {
- return __builtin_altivec_vclzh(__a);
+ return (vector signed short)__builtin_altivec_vclzh(
+ (vector unsigned short)__a);
}
static __inline__ vector unsigned short __ATTRS_o_ai
vec_cntlz(vector unsigned short __a) {
@@ -2526,7 +2557,7 @@ vec_cntlz(vector unsigned short __a) {
}
static __inline__ vector signed int __ATTRS_o_ai
vec_cntlz(vector signed int __a) {
- return __builtin_altivec_vclzw(__a);
+ return (vector signed int)__builtin_altivec_vclzw((vector unsigned int)__a);
}
static __inline__ vector unsigned int __ATTRS_o_ai
vec_cntlz(vector unsigned int __a) {
@@ -2534,7 +2565,8 @@ vec_cntlz(vector unsigned int __a) {
}
static __inline__ vector signed long long __ATTRS_o_ai
vec_cntlz(vector signed long long __a) {
- return __builtin_altivec_vclzd(__a);
+ return (vector signed long long)__builtin_altivec_vclzd(
+ (vector unsigned long long)__a);
}
static __inline__ vector unsigned long long __ATTRS_o_ai
vec_cntlz(vector unsigned long long __a) {
@@ -2548,7 +2580,7 @@ vec_cntlz(vector unsigned long long __a) {
static __inline__ vector signed char __ATTRS_o_ai
vec_cnttz(vector signed char __a) {
- return __builtin_altivec_vctzb(__a);
+ return (vector signed char)__builtin_altivec_vctzb((vector unsigned char)__a);
}
static __inline__ vector unsigned char __ATTRS_o_ai
vec_cnttz(vector unsigned char __a) {
@@ -2556,7 +2588,8 @@ vec_cnttz(vector unsigned char __a) {
}
static __inline__ vector signed short __ATTRS_o_ai
vec_cnttz(vector signed short __a) {
- return __builtin_altivec_vctzh(__a);
+ return (vector signed short)__builtin_altivec_vctzh(
+ (vector unsigned short)__a);
}
static __inline__ vector unsigned short __ATTRS_o_ai
vec_cnttz(vector unsigned short __a) {
@@ -2564,7 +2597,7 @@ vec_cnttz(vector unsigned short __a) {
}
static __inline__ vector signed int __ATTRS_o_ai
vec_cnttz(vector signed int __a) {
- return __builtin_altivec_vctzw(__a);
+ return (vector signed int)__builtin_altivec_vctzw((vector unsigned int)__a);
}
static __inline__ vector unsigned int __ATTRS_o_ai
vec_cnttz(vector unsigned int __a) {
@@ -2572,7 +2605,8 @@ vec_cnttz(vector unsigned int __a) {
}
static __inline__ vector signed long long __ATTRS_o_ai
vec_cnttz(vector signed long long __a) {
- return __builtin_altivec_vctzd(__a);
+ return (vector signed long long)__builtin_altivec_vctzd(
+ (vector unsigned long long)__a);
}
static __inline__ vector unsigned long long __ATTRS_o_ai
vec_cnttz(vector unsigned long long __a) {
@@ -3049,13 +3083,10 @@ static __inline__ vector unsigned char __ATTRS_o_ai
vec_xl_len_r(const unsigned char *__a, size_t __b) {
vector unsigned char __res =
(vector unsigned char)__builtin_vsx_lxvll(__a, (__b << 56));
-#ifdef __LITTLE_ENDIAN__
vector unsigned char __mask =
(vector unsigned char)__builtin_altivec_lvsr(16 - __b, (int *)NULL);
- __res = (vector unsigned char)__builtin_altivec_vperm_4si(
+ return (vector unsigned char)__builtin_altivec_vperm_4si(
(vector int)__res, (vector int)__res, __mask);
-#endif
- return __res;
}
// vec_xst_len
@@ -3130,15 +3161,12 @@ static __inline__ void __ATTRS_o_ai vec_xst_len(vector double __a, double *__b,
static __inline__ void __ATTRS_o_ai vec_xst_len_r(vector unsigned char __a,
unsigned char *__b,
size_t __c) {
-#ifdef __LITTLE_ENDIAN__
vector unsigned char __mask =
(vector unsigned char)__builtin_altivec_lvsl(16 - __c, (int *)NULL);
vector unsigned char __res =
- __builtin_altivec_vperm_4si((vector int)__a, (vector int)__a, __mask);
+ (vector unsigned char)__builtin_altivec_vperm_4si(
+ (vector int)__a, (vector int)__a, __mask);
return __builtin_vsx_stxvll((vector int)__res, __b, (__c << 56));
-#else
- return __builtin_vsx_stxvll((vector int)__a, __b, (__c << 56));
-#endif
}
#endif
#endif
@@ -3174,65 +3202,79 @@ static __inline__ vector double __ATTRS_o_ai vec_cpsgn(vector double __a,
// the XL-compatible signatures are used for those functions.
#ifdef __XL_COMPAT_ALTIVEC__
#define vec_ctf(__a, __b) \
- _Generic((__a), vector int \
- : (vector float)__builtin_altivec_vcfsx((vector int)(__a), (__b)), \
- vector unsigned int \
- : (vector float)__builtin_altivec_vcfux((vector unsigned int)(__a), \
- (__b)), \
- vector unsigned long long \
- : (__builtin_vsx_xvcvuxdsp((vector unsigned long long)(__a)) * \
- (vector float)(vector unsigned)((0x7f - (__b)) << 23)), \
- vector signed long long \
- : (__builtin_vsx_xvcvsxdsp((vector signed long long)(__a)) * \
- (vector float)(vector unsigned)((0x7f - (__b)) << 23)))
+ _Generic((__a), \
+ vector int: (vector float)__builtin_altivec_vcfsx((vector int)(__a), \
+ ((__b)&0x1F)), \
+ vector unsigned int: (vector float)__builtin_altivec_vcfux( \
+ (vector unsigned int)(__a), ((__b)&0x1F)), \
+ vector unsigned long long: ( \
+ vector float)(__builtin_vsx_xvcvuxdsp( \
+ (vector unsigned long long)(__a)) * \
+ (vector float)(vector unsigned)((0x7f - \
+ ((__b)&0x1F)) \
+ << 23)), \
+ vector signed long long: ( \
+ vector float)(__builtin_vsx_xvcvsxdsp( \
+ (vector signed long long)(__a)) * \
+ (vector float)(vector unsigned)((0x7f - \
+ ((__b)&0x1F)) \
+ << 23)))
#else // __XL_COMPAT_ALTIVEC__
-#define vec_ctf(__a, __b) \
- _Generic((__a), vector int \
- : (vector float)__builtin_altivec_vcfsx((vector int)(__a), (__b)), \
- vector unsigned int \
- : (vector float)__builtin_altivec_vcfux((vector unsigned int)(__a), \
- (__b)), \
- vector unsigned long long \
- : (__builtin_convertvector((vector unsigned long long)(__a), \
- vector double) * \
- (vector double)(vector unsigned long long)((0x3ffULL - (__b)) \
- << 52)), \
- vector signed long long \
- : (__builtin_convertvector((vector signed long long)(__a), \
- vector double) * \
- (vector double)(vector unsigned long long)((0x3ffULL - (__b)) \
- << 52)))
+#define vec_ctf(__a, __b) \
+ _Generic( \
+ (__a), \
+ vector int: (vector float)__builtin_altivec_vcfsx((vector int)(__a), \
+ ((__b)&0x1F)), \
+ vector unsigned int: (vector float)__builtin_altivec_vcfux( \
+ (vector unsigned int)(__a), ((__b)&0x1F)), \
+ vector unsigned long long: ( \
+ vector float)(__builtin_convertvector( \
+ (vector unsigned long long)(__a), vector double) * \
+ (vector double)(vector unsigned long long)((0x3ffULL - \
+ ((__b)&0x1F)) \
+ << 52)), \
+ vector signed long long: ( \
+ vector float)(__builtin_convertvector( \
+ (vector signed long long)(__a), vector double) * \
+ (vector double)(vector unsigned long long)((0x3ffULL - \
+ ((__b)&0x1F)) \
+ << 52)))
#endif // __XL_COMPAT_ALTIVEC__
#else
#define vec_ctf(__a, __b) \
- _Generic((__a), vector int \
- : (vector float)__builtin_altivec_vcfsx((vector int)(__a), (__b)), \
- vector unsigned int \
- : (vector float)__builtin_altivec_vcfux((vector unsigned int)(__a), \
- (__b)))
+ _Generic((__a), \
+ vector int: (vector float)__builtin_altivec_vcfsx((vector int)(__a), \
+ ((__b)&0x1F)), \
+ vector unsigned int: (vector float)__builtin_altivec_vcfux( \
+ (vector unsigned int)(__a), ((__b)&0x1F)))
#endif
/* vec_ctd */
#ifdef __VSX__
#define vec_ctd(__a, __b) \
- _Generic((__a), vector signed int \
- : (vec_doublee((vector signed int)(__a)) * \
- (vector double)(vector unsigned long long)((0x3ffULL - (__b)) \
- << 52)), \
- vector unsigned int \
- : (vec_doublee((vector unsigned int)(__a)) * \
- (vector double)(vector unsigned long long)((0x3ffULL - (__b)) \
- << 52)), \
- vector unsigned long long \
- : (__builtin_convertvector((vector unsigned long long)(__a), \
- vector double) * \
- (vector double)(vector unsigned long long)((0x3ffULL - (__b)) \
- << 52)), \
- vector signed long long \
- : (__builtin_convertvector((vector signed long long)(__a), \
- vector double) * \
- (vector double)(vector unsigned long long)((0x3ffULL - (__b)) \
- << 52)))
+ _Generic((__a), \
+ vector signed int: ( \
+ vec_doublee((vector signed int)(__a)) * \
+ (vector double)(vector unsigned long long)((0x3ffULL - \
+ ((__b)&0x1F)) \
+ << 52)), \
+ vector unsigned int: ( \
+ vec_doublee((vector unsigned int)(__a)) * \
+ (vector double)(vector unsigned long long)((0x3ffULL - \
+ ((__b)&0x1F)) \
+ << 52)), \
+ vector unsigned long long: ( \
+ __builtin_convertvector((vector unsigned long long)(__a), \
+ vector double) * \
+ (vector double)(vector unsigned long long)((0x3ffULL - \
+ ((__b)&0x1F)) \
+ << 52)), \
+ vector signed long long: ( \
+ __builtin_convertvector((vector signed long long)(__a), \
+ vector double) * \
+ (vector double)(vector unsigned long long)((0x3ffULL - \
+ ((__b)&0x1F)) \
+ << 52)))
#endif // __VSX__
/* vec_vcfsx */
@@ -3247,27 +3289,30 @@ static __inline__ vector double __ATTRS_o_ai vec_cpsgn(vector double __a,
#ifdef __VSX__
#ifdef __XL_COMPAT_ALTIVEC__
#define vec_cts(__a, __b) \
- _Generic((__a), vector float \
- : __builtin_altivec_vctsxs((vector float)(__a), (__b)), \
- vector double \
- : __extension__({ \
+ _Generic((__a), \
+ vector float: (vector signed int)__builtin_altivec_vctsxs( \
+ (vector float)(__a), ((__b)&0x1F)), \
+ vector double: __extension__({ \
vector double __ret = \
(vector double)(__a) * \
- (vector double)(vector unsigned long long)((0x3ffULL + (__b)) \
+ (vector double)(vector unsigned long long)((0x3ffULL + \
+ ((__b)&0x1F)) \
<< 52); \
- __builtin_vsx_xvcvdpsxws(__ret); \
+ (vector signed long long)__builtin_vsx_xvcvdpsxws(__ret); \
}))
#else // __XL_COMPAT_ALTIVEC__
#define vec_cts(__a, __b) \
- _Generic((__a), vector float \
- : __builtin_altivec_vctsxs((vector float)(__a), (__b)), \
- vector double \
- : __extension__({ \
+ _Generic((__a), \
+ vector float: (vector signed int)__builtin_altivec_vctsxs( \
+ (vector float)(__a), ((__b)&0x1F)), \
+ vector double: __extension__({ \
vector double __ret = \
(vector double)(__a) * \
- (vector double)(vector unsigned long long)((0x3ffULL + (__b)) \
+ (vector double)(vector unsigned long long)((0x3ffULL + \
+ ((__b)&0x1F)) \
<< 52); \
- __builtin_convertvector(__ret, vector signed long long); \
+ (vector signed long long)__builtin_convertvector( \
+ __ret, vector signed long long); \
}))
#endif // __XL_COMPAT_ALTIVEC__
#else
@@ -3283,27 +3328,30 @@ static __inline__ vector double __ATTRS_o_ai vec_cpsgn(vector double __a,
#ifdef __VSX__
#ifdef __XL_COMPAT_ALTIVEC__
#define vec_ctu(__a, __b) \
- _Generic((__a), vector float \
- : __builtin_altivec_vctuxs((vector float)(__a), (__b)), \
- vector double \
- : __extension__({ \
+ _Generic((__a), \
+ vector float: (vector unsigned int)__builtin_altivec_vctuxs( \
+ (vector float)(__a), ((__b)&0x1F)), \
+ vector double: __extension__({ \
vector double __ret = \
(vector double)(__a) * \
- (vector double)(vector unsigned long long)((0x3ffULL + __b) \
+ (vector double)(vector unsigned long long)((0x3ffULL + \
+ ((__b)&0x1F)) \
<< 52); \
- __builtin_vsx_xvcvdpuxws(__ret); \
+ (vector unsigned long long)__builtin_vsx_xvcvdpuxws(__ret); \
}))
#else // __XL_COMPAT_ALTIVEC__
#define vec_ctu(__a, __b) \
- _Generic((__a), vector float \
- : __builtin_altivec_vctuxs((vector float)(__a), (__b)), \
- vector double \
- : __extension__({ \
+ _Generic((__a), \
+ vector float: (vector unsigned int)__builtin_altivec_vctuxs( \
+ (vector float)(__a), ((__b)&0x1F)), \
+ vector double: __extension__({ \
vector double __ret = \
(vector double)(__a) * \
- (vector double)(vector unsigned long long)((0x3ffULL + __b) \
+ (vector double)(vector unsigned long long)((0x3ffULL + \
+ ((__b)&0x1F)) \
<< 52); \
- __builtin_convertvector(__ret, vector unsigned long long); \
+ (vector unsigned long long)__builtin_convertvector( \
+ __ret, vector unsigned long long); \
}))
#endif // __XL_COMPAT_ALTIVEC__
#else
@@ -3315,60 +3363,62 @@ static __inline__ vector double __ATTRS_o_ai vec_cpsgn(vector double __a,
#ifdef __VSX__
#define vec_ctsl(__a, __b) \
- _Generic((__a), vector float \
- : __extension__({ \
- vector float __ret = \
- (vector float)(__a) * \
- (vector float)(vector unsigned)((0x7f + (__b)) << 23); \
- __builtin_vsx_xvcvspsxds( \
- __builtin_vsx_xxsldwi(__ret, __ret, 1)); \
- }), \
- vector double \
- : __extension__({ \
- vector double __ret = \
- (vector double)(__a) * \
- (vector double)(vector unsigned long long)((0x3ffULL + __b) \
- << 52); \
- __builtin_convertvector(__ret, vector signed long long); \
- }))
+ _Generic( \
+ (__a), vector float \
+ : __extension__({ \
+ vector float __ret = \
+ (vector float)(__a) * \
+ (vector float)(vector unsigned)((0x7f + ((__b)&0x1F)) << 23); \
+ __builtin_vsx_xvcvspsxds(__builtin_vsx_xxsldwi(__ret, __ret, 1)); \
+ }), \
+ vector double \
+ : __extension__({ \
+ vector double __ret = \
+ (vector double)(__a) * \
+ (vector double)(vector unsigned long long)((0x3ffULL + \
+ ((__b)&0x1F)) \
+ << 52); \
+ __builtin_convertvector(__ret, vector signed long long); \
+ }))
/* vec_ctul */
#define vec_ctul(__a, __b) \
- _Generic((__a), vector float \
- : __extension__({ \
- vector float __ret = \
- (vector float)(__a) * \
- (vector float)(vector unsigned)((0x7f + (__b)) << 23); \
- __builtin_vsx_xvcvspuxds( \
- __builtin_vsx_xxsldwi(__ret, __ret, 1)); \
- }), \
- vector double \
- : __extension__({ \
- vector double __ret = \
- (vector double)(__a) * \
- (vector double)(vector unsigned long long)((0x3ffULL + __b) \
- << 52); \
- __builtin_convertvector(__ret, vector unsigned long long); \
- }))
+ _Generic( \
+ (__a), vector float \
+ : __extension__({ \
+ vector float __ret = \
+ (vector float)(__a) * \
+ (vector float)(vector unsigned)((0x7f + ((__b)&0x1F)) << 23); \
+ __builtin_vsx_xvcvspuxds(__builtin_vsx_xxsldwi(__ret, __ret, 1)); \
+ }), \
+ vector double \
+ : __extension__({ \
+ vector double __ret = \
+ (vector double)(__a) * \
+ (vector double)(vector unsigned long long)((0x3ffULL + \
+ ((__b)&0x1F)) \
+ << 52); \
+ __builtin_convertvector(__ret, vector unsigned long long); \
+ }))
#endif
#else // __LITTLE_ENDIAN__
/* vec_ctsl */
#ifdef __VSX__
#define vec_ctsl(__a, __b) \
- _Generic((__a), vector float \
- : __extension__({ \
- vector float __ret = \
- (vector float)(__a) * \
- (vector float)(vector unsigned)((0x7f + (__b)) << 23); \
- __builtin_vsx_xvcvspsxds(__ret); \
- }), \
- vector double \
- : __extension__({ \
+ _Generic((__a), \
+ vector float: __extension__({ \
+ vector float __ret = \
+ (vector float)(__a) * \
+ (vector float)(vector unsigned)((0x7f + ((__b)&0x1F)) << 23); \
+ __builtin_vsx_xvcvspsxds(__ret); \
+ }), \
+ vector double: __extension__({ \
vector double __ret = \
(vector double)(__a) * \
- (vector double)(vector unsigned long long)((0x3ffULL + __b) \
+ (vector double)(vector unsigned long long)((0x3ffULL + \
+ ((__b)&0x1F)) \
<< 52); \
__builtin_convertvector(__ret, vector signed long long); \
}))
@@ -3380,14 +3430,16 @@ static __inline__ vector double __ATTRS_o_ai vec_cpsgn(vector double __a,
: __extension__({ \
vector float __ret = \
(vector float)(__a) * \
- (vector float)(vector unsigned)((0x7f + (__b)) << 23); \
+ (vector float)(vector unsigned)((0x7f + ((__b)&0x1F)) \
+ << 23); \
__builtin_vsx_xvcvspuxds(__ret); \
}), \
vector double \
: __extension__({ \
vector double __ret = \
(vector double)(__a) * \
- (vector double)(vector unsigned long long)((0x3ffULL + __b) \
+ (vector double)(vector unsigned long long)((0x3ffULL + \
+ ((__b)&0x1F)) \
<< 52); \
__builtin_convertvector(__ret, vector unsigned long long); \
}))
@@ -6484,12 +6536,12 @@ vec_nand(vector signed char __a, vector signed char __b) {
static __inline__ vector signed char __ATTRS_o_ai
vec_nand(vector signed char __a, vector bool char __b) {
- return ~(__a & __b);
+ return ~(__a & (vector signed char)__b);
}
static __inline__ vector signed char __ATTRS_o_ai
vec_nand(vector bool char __a, vector signed char __b) {
- return ~(__a & __b);
+ return (vector signed char)~(__a & (vector bool char)__b);
}
static __inline__ vector unsigned char __ATTRS_o_ai
@@ -6499,12 +6551,12 @@ vec_nand(vector unsigned char __a, vector unsigned char __b) {
static __inline__ vector unsigned char __ATTRS_o_ai
vec_nand(vector unsigned char __a, vector bool char __b) {
- return ~(__a & __b);
+ return ~(__a & (vector unsigned char)__b);
}
static __inline__ vector unsigned char __ATTRS_o_ai
vec_nand(vector bool char __a, vector unsigned char __b) {
- return ~(__a & __b);
+ return (vector unsigned char)~(__a & (vector bool char)__b);
}
static __inline__ vector bool char __ATTRS_o_ai vec_nand(vector bool char __a,
@@ -6519,12 +6571,12 @@ vec_nand(vector signed short __a, vector signed short __b) {
static __inline__ vector signed short __ATTRS_o_ai
vec_nand(vector signed short __a, vector bool short __b) {
- return ~(__a & __b);
+ return ~(__a & (vector signed short)__b);
}
static __inline__ vector signed short __ATTRS_o_ai
vec_nand(vector bool short __a, vector signed short __b) {
- return ~(__a & __b);
+ return (vector signed short)~(__a & (vector bool short)__b);
}
static __inline__ vector unsigned short __ATTRS_o_ai
@@ -6534,7 +6586,7 @@ vec_nand(vector unsigned short __a, vector unsigned short __b) {
static __inline__ vector unsigned short __ATTRS_o_ai
vec_nand(vector unsigned short __a, vector bool short __b) {
- return ~(__a & __b);
+ return ~(__a & (vector unsigned short)__b);
}
static __inline__ vector bool short __ATTRS_o_ai
@@ -6549,12 +6601,12 @@ vec_nand(vector signed int __a, vector signed int __b) {
static __inline__ vector signed int __ATTRS_o_ai vec_nand(vector signed int __a,
vector bool int __b) {
- return ~(__a & __b);
+ return ~(__a & (vector signed int)__b);
}
static __inline__ vector signed int __ATTRS_o_ai
vec_nand(vector bool int __a, vector signed int __b) {
- return ~(__a & __b);
+ return (vector signed int)~(__a & (vector bool int)__b);
}
static __inline__ vector unsigned int __ATTRS_o_ai
@@ -6564,12 +6616,12 @@ vec_nand(vector unsigned int __a, vector unsigned int __b) {
static __inline__ vector unsigned int __ATTRS_o_ai
vec_nand(vector unsigned int __a, vector bool int __b) {
- return ~(__a & __b);
+ return ~(__a & (vector unsigned int)__b);
}
static __inline__ vector unsigned int __ATTRS_o_ai
vec_nand(vector bool int __a, vector unsigned int __b) {
- return ~(__a & __b);
+ return (vector unsigned int)~(__a & (vector bool int)__b);
}
static __inline__ vector bool int __ATTRS_o_ai vec_nand(vector bool int __a,
@@ -6590,12 +6642,12 @@ vec_nand(vector signed long long __a, vector signed long long __b) {
static __inline__ vector signed long long __ATTRS_o_ai
vec_nand(vector signed long long __a, vector bool long long __b) {
- return ~(__a & __b);
+ return ~(__a & (vector signed long long)__b);
}
static __inline__ vector signed long long __ATTRS_o_ai
vec_nand(vector bool long long __a, vector signed long long __b) {
- return ~(__a & __b);
+ return (vector signed long long)~(__a & (vector bool long long)__b);
}
static __inline__ vector unsigned long long __ATTRS_o_ai
@@ -6605,12 +6657,12 @@ vec_nand(vector unsigned long long __a, vector unsigned long long __b) {
static __inline__ vector unsigned long long __ATTRS_o_ai
vec_nand(vector unsigned long long __a, vector bool long long __b) {
- return ~(__a & __b);
+ return ~(__a & (vector unsigned long long)__b);
}
static __inline__ vector unsigned long long __ATTRS_o_ai
vec_nand(vector bool long long __a, vector unsigned long long __b) {
- return ~(__a & __b);
+ return (vector unsigned long long)~(__a & (vector bool long long)__b);
}
static __inline__ vector bool long long __ATTRS_o_ai
@@ -6998,12 +7050,12 @@ vec_orc(vector signed char __a, vector signed char __b) {
static __inline__ vector signed char __ATTRS_o_ai
vec_orc(vector signed char __a, vector bool char __b) {
- return __a | ~__b;
+ return __a | (vector signed char)~__b;
}
static __inline__ vector signed char __ATTRS_o_ai
vec_orc(vector bool char __a, vector signed char __b) {
- return __a | ~__b;
+ return (vector signed char)(__a | (vector bool char)~__b);
}
static __inline__ vector unsigned char __ATTRS_o_ai
@@ -7013,12 +7065,12 @@ vec_orc(vector unsigned char __a, vector unsigned char __b) {
static __inline__ vector unsigned char __ATTRS_o_ai
vec_orc(vector unsigned char __a, vector bool char __b) {
- return __a | ~__b;
+ return __a | (vector unsigned char)~__b;
}
static __inline__ vector unsigned char __ATTRS_o_ai
vec_orc(vector bool char __a, vector unsigned char __b) {
- return __a | ~__b;
+ return (vector unsigned char)(__a | (vector bool char)~__b);
}
static __inline__ vector bool char __ATTRS_o_ai vec_orc(vector bool char __a,
@@ -7033,12 +7085,12 @@ vec_orc(vector signed short __a, vector signed short __b) {
static __inline__ vector signed short __ATTRS_o_ai
vec_orc(vector signed short __a, vector bool short __b) {
- return __a | ~__b;
+ return __a | (vector signed short)~__b;
}
static __inline__ vector signed short __ATTRS_o_ai
vec_orc(vector bool short __a, vector signed short __b) {
- return __a | ~__b;
+ return (vector signed short)(__a | (vector bool short)~__b);
}
static __inline__ vector unsigned short __ATTRS_o_ai
@@ -7048,12 +7100,12 @@ vec_orc(vector unsigned short __a, vector unsigned short __b) {
static __inline__ vector unsigned short __ATTRS_o_ai
vec_orc(vector unsigned short __a, vector bool short __b) {
- return __a | ~__b;
+ return __a | (vector unsigned short)~__b;
}
static __inline__ vector unsigned short __ATTRS_o_ai
vec_orc(vector bool short __a, vector unsigned short __b) {
- return __a | ~__b;
+ return (vector unsigned short)(__a | (vector bool short)~__b);
}
static __inline__ vector bool short __ATTRS_o_ai
@@ -7068,12 +7120,12 @@ vec_orc(vector signed int __a, vector signed int __b) {
static __inline__ vector signed int __ATTRS_o_ai vec_orc(vector signed int __a,
vector bool int __b) {
- return __a | ~__b;
+ return __a | (vector signed int)~__b;
}
static __inline__ vector signed int __ATTRS_o_ai
vec_orc(vector bool int __a, vector signed int __b) {
- return __a | ~__b;
+ return (vector signed int)(__a | (vector bool int)~__b);
}
static __inline__ vector unsigned int __ATTRS_o_ai
@@ -7083,12 +7135,12 @@ vec_orc(vector unsigned int __a, vector unsigned int __b) {
static __inline__ vector unsigned int __ATTRS_o_ai
vec_orc(vector unsigned int __a, vector bool int __b) {
- return __a | ~__b;
+ return __a | (vector unsigned int)~__b;
}
static __inline__ vector unsigned int __ATTRS_o_ai
vec_orc(vector bool int __a, vector unsigned int __b) {
- return __a | ~__b;
+ return (vector unsigned int)(__a | (vector bool int)~__b);
}
static __inline__ vector bool int __ATTRS_o_ai vec_orc(vector bool int __a,
@@ -7098,12 +7150,17 @@ static __inline__ vector bool int __ATTRS_o_ai vec_orc(vector bool int __a,
static __inline__ vector float __ATTRS_o_ai
vec_orc(vector bool int __a, vector float __b) {
- return (vector float)(__a | ~(vector unsigned int)__b);
+ return (vector float)(__a | ~(vector bool int)__b);
}
static __inline__ vector float __ATTRS_o_ai
vec_orc(vector float __a, vector bool int __b) {
- return (vector float)((vector unsigned int)__a | ~__b);
+ return (vector float)((vector bool int)__a | ~__b);
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_orc(vector float __a,
+ vector float __b) {
+ return (vector float)((vector unsigned int)__a | ~(vector unsigned int)__b);
}
static __inline__ vector signed long long __ATTRS_o_ai
@@ -7113,12 +7170,12 @@ vec_orc(vector signed long long __a, vector signed long long __b) {
static __inline__ vector signed long long __ATTRS_o_ai
vec_orc(vector signed long long __a, vector bool long long __b) {
- return __a | ~__b;
+ return __a | (vector signed long long)~__b;
}
static __inline__ vector signed long long __ATTRS_o_ai
vec_orc(vector bool long long __a, vector signed long long __b) {
- return __a | ~__b;
+ return (vector signed long long)(__a | (vector bool long long)~__b);
}
static __inline__ vector unsigned long long __ATTRS_o_ai
@@ -7128,12 +7185,12 @@ vec_orc(vector unsigned long long __a, vector unsigned long long __b) {
static __inline__ vector unsigned long long __ATTRS_o_ai
vec_orc(vector unsigned long long __a, vector bool long long __b) {
- return __a | ~__b;
+ return __a | (vector unsigned long long)~__b;
}
static __inline__ vector unsigned long long __ATTRS_o_ai
vec_orc(vector bool long long __a, vector unsigned long long __b) {
- return __a | ~__b;
+ return (vector unsigned long long)(__a | (vector bool long long)~__b);
}
static __inline__ vector bool long long __ATTRS_o_ai
@@ -7143,12 +7200,18 @@ vec_orc(vector bool long long __a, vector bool long long __b) {
static __inline__ vector double __ATTRS_o_ai
vec_orc(vector double __a, vector bool long long __b) {
- return (vector double)((vector unsigned long long)__a | ~__b);
+ return (vector double)((vector bool long long)__a | ~__b);
}
static __inline__ vector double __ATTRS_o_ai
vec_orc(vector bool long long __a, vector double __b) {
- return (vector double)(__a | ~(vector unsigned long long)__b);
+ return (vector double)(__a | ~(vector bool long long)__b);
+}
+
+static __inline__ vector double __ATTRS_o_ai vec_orc(vector double __a,
+ vector double __b) {
+ return (vector double)((vector unsigned long long)__a |
+ ~(vector unsigned long long)__b);
}
#endif
@@ -8258,14 +8321,20 @@ vec_rl(vector signed long long __a, vector unsigned long long __b) {
static __inline__ vector unsigned long long __ATTRS_o_ai
vec_rl(vector unsigned long long __a, vector unsigned long long __b) {
- return __builtin_altivec_vrld(__a, __b);
+ return (vector unsigned long long)__builtin_altivec_vrld(
+ (vector long long)__a, __b);
}
#endif
#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
static __inline__ vector signed __int128 __ATTRS_o_ai
vec_rl(vector signed __int128 __a, vector unsigned __int128 __b) {
- return (__b << __a)|(__b >> ((__CHAR_BIT__ * sizeof(vector signed __int128)) - __a));
+ return (vector signed __int128)(((vector unsigned __int128)__b
+ << (vector unsigned __int128)__a) |
+ ((vector unsigned __int128)__b >>
+ ((__CHAR_BIT__ *
+ sizeof(vector unsigned __int128)) -
+ (vector unsigned __int128)__a)));
}
static __inline__ vector unsigned __int128 __ATTRS_o_ai
@@ -8299,7 +8368,9 @@ vec_rlmi(vector unsigned __int128 __a, vector unsigned __int128 __b,
static __inline__ vector signed __int128 __ATTRS_o_ai
vec_rlmi(vector signed __int128 __a, vector signed __int128 __b,
vector signed __int128 __c) {
- return __builtin_altivec_vrlqmi(__a, __c, __b);
+ return (vector signed __int128)__builtin_altivec_vrlqmi(
+ (vector unsigned __int128)__a, (vector unsigned __int128)__c,
+ (vector unsigned __int128)__b);
}
#endif
@@ -8352,7 +8423,8 @@ vec_rlnm(vector signed __int128 __a, vector signed __int128 __b,
__builtin_shufflevector(TmpB, TmpC, -1, -1, -1, -1, -1, 31, 30, 15, -1,
-1, -1, -1, -1, -1, -1, -1);
#endif
- return __builtin_altivec_vrlqnm(__a, (vector unsigned __int128) MaskAndShift);
+ return (vector signed __int128)__builtin_altivec_vrlqnm(
+ (vector unsigned __int128)__a, (vector unsigned __int128)MaskAndShift);
}
#endif
@@ -8399,9 +8471,20 @@ static __inline__ vector float __ATTRS_o_ai vec_round(vector float __a) {
}
#ifdef __VSX__
+#ifdef __XL_COMPAT_ALTIVEC__
+static __inline__ vector double __ATTRS_o_ai vec_rint(vector double __a);
+static __inline__ vector double __ATTRS_o_ai vec_round(vector double __a) {
+ double __fpscr = __builtin_readflm();
+ __builtin_setrnd(0);
+ vector double __rounded = vec_rint(__a);
+ __builtin_setflm(__fpscr);
+ return __rounded;
+}
+#else
static __inline__ vector double __ATTRS_o_ai vec_round(vector double __a) {
return __builtin_vsx_xvrdpi(__a);
}
+#endif
/* vec_rint */
@@ -8839,7 +8922,7 @@ static __inline__ vector long long __ATTRS_o_ai
vec_sl(vector long long __a, vector unsigned long long __b) {
return (vector long long)vec_sl((vector unsigned long long)__a, __b);
}
-#else
+#elif defined(__VSX__)
static __inline__ vector unsigned char __ATTRS_o_ai
vec_vspltb(vector unsigned char __a, unsigned char __b);
static __inline__ vector unsigned long long __ATTRS_o_ai
@@ -8885,7 +8968,7 @@ static __inline__ vector long long __ATTRS_o_ai
vec_sl(vector long long __a, vector unsigned long long __b) {
return (vector long long)vec_sl((vector unsigned long long)__a, __b);
}
-#endif
+#endif /* __VSX__ */
/* vec_vslb */
@@ -10350,7 +10433,7 @@ static __inline__ vector long long __ATTRS_o_ai
vec_sr(vector long long __a, vector unsigned long long __b) {
return (vector long long)vec_sr((vector unsigned long long)__a, __b);
}
-#else
+#elif defined(__VSX__)
static __inline__ vector unsigned long long __ATTRS_o_ai
vec_sr(vector unsigned long long __a, vector unsigned long long __b) {
__b %= (vector unsigned long long)(sizeof(unsigned long long) * __CHAR_BIT__);
@@ -10394,7 +10477,7 @@ static __inline__ vector long long __ATTRS_o_ai
vec_sr(vector long long __a, vector unsigned long long __b) {
return (vector long long)vec_sr((vector unsigned long long)__a, __b);
}
-#endif
+#endif /* __VSX__ */
/* vec_vsrb */
@@ -10480,7 +10563,7 @@ static __inline__ vector unsigned long long __ATTRS_o_ai
vec_sra(vector unsigned long long __a, vector unsigned long long __b) {
return (vector unsigned long long)((vector signed long long)__a >> __b);
}
-#else
+#elif defined(__VSX__)
static __inline__ vector signed long long __ATTRS_o_ai
vec_sra(vector signed long long __a, vector unsigned long long __b) {
__b %= (vector unsigned long long)(sizeof(unsigned long long) * __CHAR_BIT__);
@@ -10492,7 +10575,7 @@ vec_sra(vector unsigned long long __a, vector unsigned long long __b) {
__b %= (vector unsigned long long)(sizeof(unsigned long long) * __CHAR_BIT__);
return (vector unsigned long long)((vector signed long long)__a >> __b);
}
-#endif
+#endif /* __VSX__ */
/* vec_vsrab */
@@ -12041,13 +12124,15 @@ vec_subc(vector unsigned __int128 __a, vector unsigned __int128 __b) {
static __inline__ vector signed __int128 __ATTRS_o_ai
vec_subc(vector signed __int128 __a, vector signed __int128 __b) {
- return __builtin_altivec_vsubcuq(__a, __b);
+ return (vector signed __int128)__builtin_altivec_vsubcuq(
+ (vector unsigned __int128)__a, (vector unsigned __int128)__b);
}
#endif
static __inline__ vector unsigned char __attribute__((__always_inline__))
vec_subc_u128(vector unsigned char __a, vector unsigned char __b) {
- return (vector unsigned char)__builtin_altivec_vsubcuq(__a, __b);
+ return (vector unsigned char)__builtin_altivec_vsubcuq_c(
+ (vector unsigned char)__a, (vector unsigned char)__b);
}
#endif // __POWER8_VECTOR__
@@ -12269,7 +12354,7 @@ vec_vsubuqm(vector unsigned __int128 __a, vector unsigned __int128 __b) {
static __inline__ vector unsigned char __attribute__((__always_inline__))
vec_sub_u128(vector unsigned char __a, vector unsigned char __b) {
- return __builtin_altivec_vsubuqm(__a, __b);
+ return (vector unsigned char)__builtin_altivec_vsubuqm(__a, __b);
}
/* vec_vsubeuqm */
@@ -12278,7 +12363,9 @@ vec_sub_u128(vector unsigned char __a, vector unsigned char __b) {
static __inline__ vector signed __int128 __ATTRS_o_ai
vec_vsubeuqm(vector signed __int128 __a, vector signed __int128 __b,
vector signed __int128 __c) {
- return __builtin_altivec_vsubeuqm(__a, __b, __c);
+ return (vector signed __int128)__builtin_altivec_vsubeuqm(
+ (vector unsigned __int128)__a, (vector unsigned __int128)__b,
+ (vector unsigned __int128)__c);
}
static __inline__ vector unsigned __int128 __ATTRS_o_ai
@@ -12290,7 +12377,9 @@ vec_vsubeuqm(vector unsigned __int128 __a, vector unsigned __int128 __b,
static __inline__ vector signed __int128 __ATTRS_o_ai
vec_sube(vector signed __int128 __a, vector signed __int128 __b,
vector signed __int128 __c) {
- return __builtin_altivec_vsubeuqm(__a, __b, __c);
+ return (vector signed __int128)__builtin_altivec_vsubeuqm(
+ (vector unsigned __int128)__a, (vector unsigned __int128)__b,
+ (vector unsigned __int128)__c);
}
static __inline__ vector unsigned __int128 __ATTRS_o_ai
@@ -12303,7 +12392,9 @@ vec_sube(vector unsigned __int128 __a, vector unsigned __int128 __b,
static __inline__ vector unsigned char __attribute__((__always_inline__))
vec_sube_u128(vector unsigned char __a, vector unsigned char __b,
vector unsigned char __c) {
- return (vector unsigned char)__builtin_altivec_vsubeuqm(__a, __b, __c);
+ return (vector unsigned char)__builtin_altivec_vsubeuqm_c(
+ (vector unsigned char)__a, (vector unsigned char)__b,
+ (vector unsigned char)__c);
}
/* vec_vsubcuq */
@@ -12311,7 +12402,8 @@ vec_sube_u128(vector unsigned char __a, vector unsigned char __b,
#ifdef __SIZEOF_INT128__
static __inline__ vector signed __int128 __ATTRS_o_ai
vec_vsubcuq(vector signed __int128 __a, vector signed __int128 __b) {
- return __builtin_altivec_vsubcuq(__a, __b);
+ return (vector signed __int128)__builtin_altivec_vsubcuq(
+ (vector unsigned __int128)__a, (vector unsigned __int128)__b);
}
static __inline__ vector unsigned __int128 __ATTRS_o_ai
@@ -12324,7 +12416,9 @@ vec_vsubcuq(vector unsigned __int128 __a, vector unsigned __int128 __b) {
static __inline__ vector signed __int128 __ATTRS_o_ai
vec_vsubecuq(vector signed __int128 __a, vector signed __int128 __b,
vector signed __int128 __c) {
- return __builtin_altivec_vsubecuq(__a, __b, __c);
+ return (vector signed __int128)__builtin_altivec_vsubecuq(
+ (vector unsigned __int128)__a, (vector unsigned __int128)__b,
+ (vector unsigned __int128)__c);
}
static __inline__ vector unsigned __int128 __ATTRS_o_ai
@@ -12352,7 +12446,9 @@ vec_subec(vector unsigned int __a, vector unsigned int __b,
static __inline__ vector signed __int128 __ATTRS_o_ai
vec_subec(vector signed __int128 __a, vector signed __int128 __b,
vector signed __int128 __c) {
- return __builtin_altivec_vsubecuq(__a, __b, __c);
+ return (vector signed __int128)__builtin_altivec_vsubecuq(
+ (vector unsigned __int128)__a, (vector unsigned __int128)__b,
+ (vector unsigned __int128)__c);
}
static __inline__ vector unsigned __int128 __ATTRS_o_ai
@@ -12365,7 +12461,9 @@ vec_subec(vector unsigned __int128 __a, vector unsigned __int128 __b,
static __inline__ vector unsigned char __attribute__((__always_inline__))
vec_subec_u128(vector unsigned char __a, vector unsigned char __b,
vector unsigned char __c) {
- return (vector unsigned char)__builtin_altivec_vsubecuq(__a, __b, __c);
+ return (vector unsigned char)__builtin_altivec_vsubecuq_c(
+ (vector unsigned char)__a, (vector unsigned char)__b,
+ (vector unsigned char)__c);
}
#endif // __POWER8_VECTOR__
@@ -13441,74 +13539,74 @@ vec_vxor(vector bool long long __a, vector bool long long __b) {
/* vec_extract */
static __inline__ signed char __ATTRS_o_ai vec_extract(vector signed char __a,
- unsigned int __b) {
+ signed int __b) {
return __a[__b & 0xf];
}
static __inline__ unsigned char __ATTRS_o_ai
-vec_extract(vector unsigned char __a, unsigned int __b) {
+vec_extract(vector unsigned char __a, signed int __b) {
return __a[__b & 0xf];
}
static __inline__ unsigned char __ATTRS_o_ai vec_extract(vector bool char __a,
- unsigned int __b) {
+ signed int __b) {
return __a[__b & 0xf];
}
static __inline__ signed short __ATTRS_o_ai vec_extract(vector signed short __a,
- unsigned int __b) {
+ signed int __b) {
return __a[__b & 0x7];
}
static __inline__ unsigned short __ATTRS_o_ai
-vec_extract(vector unsigned short __a, unsigned int __b) {
+vec_extract(vector unsigned short __a, signed int __b) {
return __a[__b & 0x7];
}
static __inline__ unsigned short __ATTRS_o_ai vec_extract(vector bool short __a,
- unsigned int __b) {
+ signed int __b) {
return __a[__b & 0x7];
}
static __inline__ signed int __ATTRS_o_ai vec_extract(vector signed int __a,
- unsigned int __b) {
+ signed int __b) {
return __a[__b & 0x3];
}
static __inline__ unsigned int __ATTRS_o_ai vec_extract(vector unsigned int __a,
- unsigned int __b) {
+ signed int __b) {
return __a[__b & 0x3];
}
static __inline__ unsigned int __ATTRS_o_ai vec_extract(vector bool int __a,
- unsigned int __b) {
+ signed int __b) {
return __a[__b & 0x3];
}
#ifdef __VSX__
static __inline__ signed long long __ATTRS_o_ai
-vec_extract(vector signed long long __a, unsigned int __b) {
+vec_extract(vector signed long long __a, signed int __b) {
return __a[__b & 0x1];
}
static __inline__ unsigned long long __ATTRS_o_ai
-vec_extract(vector unsigned long long __a, unsigned int __b) {
+vec_extract(vector unsigned long long __a, signed int __b) {
return __a[__b & 0x1];
}
static __inline__ unsigned long long __ATTRS_o_ai
-vec_extract(vector bool long long __a, unsigned int __b) {
+vec_extract(vector bool long long __a, signed int __b) {
return __a[__b & 0x1];
}
static __inline__ double __ATTRS_o_ai vec_extract(vector double __a,
- unsigned int __b) {
+ signed int __b) {
return __a[__b & 0x1];
}
#endif
static __inline__ float __ATTRS_o_ai vec_extract(vector float __a,
- unsigned int __b) {
+ signed int __b) {
return __a[__b & 0x3];
}
@@ -13568,82 +13666,82 @@ vec_extract_fp32_from_shortl(vector unsigned short __a) {
static __inline__ vector signed char __ATTRS_o_ai
vec_insert(signed char __a, vector signed char __b, int __c) {
- __b[__c] = __a;
+ __b[__c & 0xF] = __a;
return __b;
}
static __inline__ vector unsigned char __ATTRS_o_ai
vec_insert(unsigned char __a, vector unsigned char __b, int __c) {
- __b[__c] = __a;
+ __b[__c & 0xF] = __a;
return __b;
}
static __inline__ vector bool char __ATTRS_o_ai vec_insert(unsigned char __a,
vector bool char __b,
int __c) {
- __b[__c] = __a;
+ __b[__c & 0xF] = __a;
return __b;
}
static __inline__ vector signed short __ATTRS_o_ai
vec_insert(signed short __a, vector signed short __b, int __c) {
- __b[__c] = __a;
+ __b[__c & 0x7] = __a;
return __b;
}
static __inline__ vector unsigned short __ATTRS_o_ai
vec_insert(unsigned short __a, vector unsigned short __b, int __c) {
- __b[__c] = __a;
+ __b[__c & 0x7] = __a;
return __b;
}
static __inline__ vector bool short __ATTRS_o_ai
vec_insert(unsigned short __a, vector bool short __b, int __c) {
- __b[__c] = __a;
+ __b[__c & 0x7] = __a;
return __b;
}
static __inline__ vector signed int __ATTRS_o_ai
vec_insert(signed int __a, vector signed int __b, int __c) {
- __b[__c] = __a;
+ __b[__c & 0x3] = __a;
return __b;
}
static __inline__ vector unsigned int __ATTRS_o_ai
vec_insert(unsigned int __a, vector unsigned int __b, int __c) {
- __b[__c] = __a;
+ __b[__c & 0x3] = __a;
return __b;
}
static __inline__ vector bool int __ATTRS_o_ai vec_insert(unsigned int __a,
vector bool int __b,
int __c) {
- __b[__c] = __a;
+ __b[__c & 0x3] = __a;
return __b;
}
#ifdef __VSX__
static __inline__ vector signed long long __ATTRS_o_ai
vec_insert(signed long long __a, vector signed long long __b, int __c) {
- __b[__c] = __a;
+ __b[__c & 0x1] = __a;
return __b;
}
static __inline__ vector unsigned long long __ATTRS_o_ai
vec_insert(unsigned long long __a, vector unsigned long long __b, int __c) {
- __b[__c] = __a;
+ __b[__c & 0x1] = __a;
return __b;
}
static __inline__ vector bool long long __ATTRS_o_ai
vec_insert(unsigned long long __a, vector bool long long __b, int __c) {
- __b[__c] = __a;
+ __b[__c & 0x1] = __a;
return __b;
}
static __inline__ vector double __ATTRS_o_ai vec_insert(double __a,
vector double __b,
int __c) {
- __b[__c] = __a;
+ __b[__c & 0x1] = __a;
return __b;
}
#endif
@@ -13651,7 +13749,7 @@ static __inline__ vector double __ATTRS_o_ai vec_insert(double __a,
static __inline__ vector float __ATTRS_o_ai vec_insert(float __a,
vector float __b,
int __c) {
- __b[__c] = __a;
+ __b[__c & 0x3] = __a;
return __b;
}
@@ -14549,67 +14647,86 @@ static __inline__ void __ATTRS_o_ai vec_stvrxl(vector float __a, int __b,
static __inline__ vector signed char __ATTRS_o_ai vec_promote(signed char __a,
int __b) {
- vector signed char __res = (vector signed char)(0);
- __res[__b & 0x7] = __a;
+ const vector signed char __zero = (vector signed char)0;
+ vector signed char __res =
+ __builtin_shufflevector(__zero, __zero, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1);
+ __res[__b & 0xf] = __a;
return __res;
}
static __inline__ vector unsigned char __ATTRS_o_ai
vec_promote(unsigned char __a, int __b) {
- vector unsigned char __res = (vector unsigned char)(0);
- __res[__b & 0x7] = __a;
+ const vector unsigned char __zero = (vector unsigned char)(0);
+ vector unsigned char __res =
+ __builtin_shufflevector(__zero, __zero, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1);
+ __res[__b & 0xf] = __a;
return __res;
}
static __inline__ vector short __ATTRS_o_ai vec_promote(short __a, int __b) {
- vector short __res = (vector short)(0);
+ const vector short __zero = (vector short)(0);
+ vector short __res =
+ __builtin_shufflevector(__zero, __zero, -1, -1, -1, -1, -1, -1, -1, -1);
__res[__b & 0x7] = __a;
return __res;
}
static __inline__ vector unsigned short __ATTRS_o_ai
vec_promote(unsigned short __a, int __b) {
- vector unsigned short __res = (vector unsigned short)(0);
+ const vector unsigned short __zero = (vector unsigned short)(0);
+ vector unsigned short __res =
+ __builtin_shufflevector(__zero, __zero, -1, -1, -1, -1, -1, -1, -1, -1);
__res[__b & 0x7] = __a;
return __res;
}
static __inline__ vector int __ATTRS_o_ai vec_promote(int __a, int __b) {
- vector int __res = (vector int)(0);
+ const vector int __zero = (vector int)(0);
+ vector int __res = __builtin_shufflevector(__zero, __zero, -1, -1, -1, -1);
__res[__b & 0x3] = __a;
return __res;
}
static __inline__ vector unsigned int __ATTRS_o_ai vec_promote(unsigned int __a,
int __b) {
- vector unsigned int __res = (vector unsigned int)(0);
+ const vector unsigned int __zero = (vector unsigned int)(0);
+ vector unsigned int __res =
+ __builtin_shufflevector(__zero, __zero, -1, -1, -1, -1);
__res[__b & 0x3] = __a;
return __res;
}
static __inline__ vector float __ATTRS_o_ai vec_promote(float __a, int __b) {
- vector float __res = (vector float)(0);
+ const vector float __zero = (vector float)(0);
+ vector float __res = __builtin_shufflevector(__zero, __zero, -1, -1, -1, -1);
__res[__b & 0x3] = __a;
return __res;
}
#ifdef __VSX__
static __inline__ vector double __ATTRS_o_ai vec_promote(double __a, int __b) {
- vector double __res = (vector double)(0);
+ const vector double __zero = (vector double)(0);
+ vector double __res = __builtin_shufflevector(__zero, __zero, -1, -1);
__res[__b & 0x1] = __a;
return __res;
}
static __inline__ vector signed long long __ATTRS_o_ai
vec_promote(signed long long __a, int __b) {
- vector signed long long __res = (vector signed long long)(0);
+ const vector signed long long __zero = (vector signed long long)(0);
+ vector signed long long __res =
+ __builtin_shufflevector(__zero, __zero, -1, -1);
__res[__b & 0x1] = __a;
return __res;
}
static __inline__ vector unsigned long long __ATTRS_o_ai
vec_promote(unsigned long long __a, int __b) {
- vector unsigned long long __res = (vector unsigned long long)(0);
+ const vector unsigned long long __zero = (vector unsigned long long)(0);
+ vector unsigned long long __res =
+ __builtin_shufflevector(__zero, __zero, -1, -1);
__res[__b & 0x1] = __a;
return __res;
}
@@ -14812,42 +14929,43 @@ static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool int __a,
#ifdef __VSX__
static __inline__ int __ATTRS_o_ai vec_all_eq(vector signed long long __a,
vector signed long long __b) {
+#ifdef __POWER8_VECTOR__
return __builtin_altivec_vcmpequd_p(__CR6_LT, __a, __b);
+#else
+ // No vcmpequd on Power7 so we xor the two vectors and compare against zero as
+ // 32-bit elements.
+ return vec_all_eq((vector signed int)vec_xor(__a, __b), (vector signed int)0);
+#endif
}
static __inline__ int __ATTRS_o_ai vec_all_eq(vector long long __a,
vector bool long long __b) {
- return __builtin_altivec_vcmpequd_p(__CR6_LT, __a, (vector long long)__b);
+ return vec_all_eq((vector signed long long)__a, (vector signed long long)__b);
}
static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned long long __a,
vector unsigned long long __b) {
- return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a,
- (vector long long)__b);
+ return vec_all_eq((vector signed long long)__a, (vector signed long long)__b);
}
static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned long long __a,
vector bool long long __b) {
- return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a,
- (vector long long)__b);
+ return vec_all_eq((vector signed long long)__a, (vector signed long long)__b);
}
static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool long long __a,
vector long long __b) {
- return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a,
- (vector long long)__b);
+ return vec_all_eq((vector signed long long)__a, (vector signed long long)__b);
}
static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool long long __a,
vector unsigned long long __b) {
- return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a,
- (vector long long)__b);
+ return vec_all_eq((vector signed long long)__a, (vector signed long long)__b);
}
static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool long long __a,
vector bool long long __b) {
- return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a,
- (vector long long)__b);
+ return vec_all_eq((vector signed long long)__a, (vector signed long long)__b);
}
#endif
@@ -14870,12 +14988,20 @@ static __inline__ int __ATTRS_o_ai vec_all_eq(vector double __a,
#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
static __inline__ int __ATTRS_o_ai vec_all_eq(vector signed __int128 __a,
vector signed __int128 __b) {
- return __builtin_altivec_vcmpequq_p(__CR6_LT, __a, __b);
+ return __builtin_altivec_vcmpequq_p(__CR6_LT, (vector unsigned __int128)__a,
+ (vector signed __int128)__b);
}
static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned __int128 __a,
vector unsigned __int128 __b) {
- return __builtin_altivec_vcmpequq_p(__CR6_LT, __a, __b);
+ return __builtin_altivec_vcmpequq_p(__CR6_LT, __a,
+ (vector signed __int128)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool __int128 __a,
+ vector bool __int128 __b) {
+ return __builtin_altivec_vcmpequq_p(__CR6_LT, (vector unsigned __int128)__a,
+ (vector signed __int128)__b);
}
#endif
@@ -15815,12 +15941,20 @@ static __inline__ int __ATTRS_o_ai vec_all_ne(vector double __a,
#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
static __inline__ int __ATTRS_o_ai vec_all_ne(vector signed __int128 __a,
vector signed __int128 __b) {
- return __builtin_altivec_vcmpequq_p(__CR6_EQ, __a, __b);
+ return __builtin_altivec_vcmpequq_p(__CR6_EQ, (vector unsigned __int128)__a,
+ __b);
}
static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned __int128 __a,
vector unsigned __int128 __b) {
- return __builtin_altivec_vcmpequq_p(__CR6_EQ, __a, __b);
+ return __builtin_altivec_vcmpequq_p(__CR6_EQ, __a,
+ (vector signed __int128)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool __int128 __a,
+ vector bool __int128 __b) {
+ return __builtin_altivec_vcmpequq_p(__CR6_EQ, (vector unsigned __int128)__a,
+ (vector signed __int128)__b);
}
#endif
@@ -16104,12 +16238,20 @@ static __inline__ int __ATTRS_o_ai vec_any_eq(vector double __a,
#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
static __inline__ int __ATTRS_o_ai vec_any_eq(vector signed __int128 __a,
vector signed __int128 __b) {
- return __builtin_altivec_vcmpequq_p(__CR6_EQ_REV, __a, __b);
+ return __builtin_altivec_vcmpequq_p(__CR6_EQ_REV,
+ (vector unsigned __int128)__a, __b);
}
static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned __int128 __a,
vector unsigned __int128 __b) {
- return __builtin_altivec_vcmpequq_p(__CR6_EQ_REV, __a, __b);
+ return __builtin_altivec_vcmpequq_p(__CR6_EQ_REV, __a,
+ (vector signed __int128)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool __int128 __a,
+ vector bool __int128 __b) {
+ return __builtin_altivec_vcmpequq_p(
+ __CR6_EQ_REV, (vector unsigned __int128)__a, (vector signed __int128)__b);
}
#endif
@@ -17020,43 +17162,43 @@ static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool int __a,
#ifdef __VSX__
static __inline__ int __ATTRS_o_ai vec_any_ne(vector signed long long __a,
vector signed long long __b) {
+#ifdef __POWER8_VECTOR__
return __builtin_altivec_vcmpequd_p(__CR6_LT_REV, __a, __b);
+#else
+ // Take advantage of the optimized sequence for vec_all_eq when vcmpequd is
+ // not available.
+ return !vec_all_eq(__a, __b);
+#endif
}
static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned long long __a,
vector unsigned long long __b) {
- return __builtin_altivec_vcmpequd_p(__CR6_LT_REV, (vector long long)__a,
- (vector long long)__b);
+ return vec_any_ne((vector signed long long)__a, (vector signed long long)__b);
}
static __inline__ int __ATTRS_o_ai vec_any_ne(vector signed long long __a,
vector bool long long __b) {
- return __builtin_altivec_vcmpequd_p(__CR6_LT_REV, __a,
- (vector signed long long)__b);
+ return vec_any_ne((vector signed long long)__a, (vector signed long long)__b);
}
static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned long long __a,
vector bool long long __b) {
- return __builtin_altivec_vcmpequd_p(
- __CR6_LT_REV, (vector signed long long)__a, (vector signed long long)__b);
+ return vec_any_ne((vector signed long long)__a, (vector signed long long)__b);
}
static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool long long __a,
vector signed long long __b) {
- return __builtin_altivec_vcmpequd_p(
- __CR6_LT_REV, (vector signed long long)__a, (vector signed long long)__b);
+ return vec_any_ne((vector signed long long)__a, (vector signed long long)__b);
}
static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool long long __a,
vector unsigned long long __b) {
- return __builtin_altivec_vcmpequd_p(
- __CR6_LT_REV, (vector signed long long)__a, (vector signed long long)__b);
+ return vec_any_ne((vector signed long long)__a, (vector signed long long)__b);
}
static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool long long __a,
vector bool long long __b) {
- return __builtin_altivec_vcmpequd_p(
- __CR6_LT_REV, (vector signed long long)__a, (vector signed long long)__b);
+ return vec_any_ne((vector signed long long)__a, (vector signed long long)__b);
}
#endif
@@ -17079,12 +17221,20 @@ static __inline__ int __ATTRS_o_ai vec_any_ne(vector double __a,
#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
static __inline__ int __ATTRS_o_ai vec_any_ne(vector signed __int128 __a,
vector signed __int128 __b) {
- return __builtin_altivec_vcmpequq_p(__CR6_LT_REV, __a, __b);
+ return __builtin_altivec_vcmpequq_p(__CR6_LT_REV,
+ (vector unsigned __int128)__a, __b);
}
static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned __int128 __a,
vector unsigned __int128 __b) {
- return __builtin_altivec_vcmpequq_p(__CR6_LT_REV, __a, __b);
+ return __builtin_altivec_vcmpequq_p(__CR6_LT_REV, __a,
+ (vector signed __int128)__b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool __int128 __a,
+ vector bool __int128 __b) {
+ return __builtin_altivec_vcmpequq_p(
+ __CR6_LT_REV, (vector unsigned __int128)__a, (vector signed __int128)__b);
}
#endif
@@ -17203,34 +17353,36 @@ provided.
#define vec_ncipher_be __builtin_altivec_crypto_vncipher
#define vec_ncipherlast_be __builtin_altivec_crypto_vncipherlast
-static __inline__ vector unsigned long long __attribute__((__always_inline__))
-__builtin_crypto_vsbox(vector unsigned long long __a) {
+#ifdef __VSX__
+static __inline__ vector unsigned char __attribute__((__always_inline__))
+__builtin_crypto_vsbox(vector unsigned char __a) {
return __builtin_altivec_crypto_vsbox(__a);
}
-static __inline__ vector unsigned long long __attribute__((__always_inline__))
-__builtin_crypto_vcipher(vector unsigned long long __a,
- vector unsigned long long __b) {
+static __inline__ vector unsigned char __attribute__((__always_inline__))
+__builtin_crypto_vcipher(vector unsigned char __a,
+ vector unsigned char __b) {
return __builtin_altivec_crypto_vcipher(__a, __b);
}
-static __inline__ vector unsigned long long __attribute__((__always_inline__))
-__builtin_crypto_vcipherlast(vector unsigned long long __a,
- vector unsigned long long __b) {
+static __inline__ vector unsigned char __attribute__((__always_inline__))
+__builtin_crypto_vcipherlast(vector unsigned char __a,
+ vector unsigned char __b) {
return __builtin_altivec_crypto_vcipherlast(__a, __b);
}
-static __inline__ vector unsigned long long __attribute__((__always_inline__))
-__builtin_crypto_vncipher(vector unsigned long long __a,
- vector unsigned long long __b) {
+static __inline__ vector unsigned char __attribute__((__always_inline__))
+__builtin_crypto_vncipher(vector unsigned char __a,
+ vector unsigned char __b) {
return __builtin_altivec_crypto_vncipher(__a, __b);
}
-static __inline__ vector unsigned long long __attribute__((__always_inline__))
-__builtin_crypto_vncipherlast(vector unsigned long long __a,
- vector unsigned long long __b) {
+static __inline__ vector unsigned char __attribute__((__always_inline__))
+__builtin_crypto_vncipherlast(vector unsigned char __a,
+ vector unsigned char __b) {
return __builtin_altivec_crypto_vncipherlast(__a, __b);
}
+#endif /* __VSX__ */
#define __builtin_crypto_vshasigmad __builtin_altivec_crypto_vshasigmad
#define __builtin_crypto_vshasigmaw __builtin_altivec_crypto_vshasigmaw
@@ -17245,13 +17397,17 @@ __builtin_crypto_vncipherlast(vector unsigned long long __a,
static __inline__ vector bool char __ATTRS_o_ai
vec_permxor(vector bool char __a, vector bool char __b,
vector bool char __c) {
- return __builtin_altivec_crypto_vpermxor(__a, __b, __c);
+ return (vector bool char)__builtin_altivec_crypto_vpermxor(
+ (vector unsigned char)__a, (vector unsigned char)__b,
+ (vector unsigned char)__c);
}
static __inline__ vector signed char __ATTRS_o_ai
vec_permxor(vector signed char __a, vector signed char __b,
vector signed char __c) {
- return __builtin_altivec_crypto_vpermxor(__a, __b, __c);
+ return (vector signed char)__builtin_altivec_crypto_vpermxor(
+ (vector unsigned char)__a, (vector unsigned char)__b,
+ (vector unsigned char)__c);
}
static __inline__ vector unsigned char __ATTRS_o_ai
@@ -17313,7 +17469,7 @@ __builtin_crypto_vpmsumb(vector unsigned long long __a,
static __inline__ vector signed char __ATTRS_o_ai
vec_vgbbd(vector signed char __a) {
- return __builtin_altivec_vgbbd((vector unsigned char)__a);
+ return (vector signed char)__builtin_altivec_vgbbd((vector unsigned char)__a);
}
#define vec_pmsum_be __builtin_crypto_vpmsumb
@@ -17326,32 +17482,44 @@ vec_vgbbd(vector unsigned char __a) {
static __inline__ vector signed long long __ATTRS_o_ai
vec_gbb(vector signed long long __a) {
- return __builtin_altivec_vgbbd((vector unsigned char)__a);
+ return (vector signed long long)__builtin_altivec_vgbbd(
+ (vector unsigned char)__a);
}
static __inline__ vector unsigned long long __ATTRS_o_ai
vec_gbb(vector unsigned long long __a) {
- return __builtin_altivec_vgbbd((vector unsigned char)__a);
+ return (vector unsigned long long)__builtin_altivec_vgbbd(
+ (vector unsigned char)__a);
}
static __inline__ vector long long __ATTRS_o_ai
vec_vbpermq(vector signed char __a, vector signed char __b) {
- return __builtin_altivec_vbpermq((vector unsigned char)__a,
- (vector unsigned char)__b);
+ return (vector long long)__builtin_altivec_vbpermq((vector unsigned char)__a,
+ (vector unsigned char)__b);
}
static __inline__ vector long long __ATTRS_o_ai
vec_vbpermq(vector unsigned char __a, vector unsigned char __b) {
- return __builtin_altivec_vbpermq(__a, __b);
+ return (vector long long)__builtin_altivec_vbpermq(__a, __b);
}
#if defined(__powerpc64__) && defined(__SIZEOF_INT128__)
-static __inline__ vector unsigned long long __attribute__((__always_inline__))
+static __inline__ vector unsigned long long __ATTRS_o_ai
vec_bperm(vector unsigned __int128 __a, vector unsigned char __b) {
return __builtin_altivec_vbpermq((vector unsigned char)__a,
(vector unsigned char)__b);
}
#endif
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_bperm(vector unsigned char __a, vector unsigned char __b) {
+ return (vector unsigned char)__builtin_altivec_vbpermq(__a, __b);
+}
+#endif // __POWER8_VECTOR__
+#ifdef __POWER9_VECTOR__
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_bperm(vector unsigned long long __a, vector unsigned char __b) {
+ return __builtin_altivec_vbpermd(__a, __b);
+}
#endif
@@ -17715,26 +17883,26 @@ vec_xl_be(signed long long __offset, const unsigned __int128 *__ptr) {
#if defined(__POWER10_VECTOR__) && defined(__VSX__) && \
defined(__SIZEOF_INT128__)
-/* vect_xl_sext */
+/* vec_xl_sext */
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
+static __inline__ vector signed __int128 __ATTRS_o_ai
vec_xl_sext(ptrdiff_t __offset, const signed char *__pointer) {
- return (vector unsigned __int128)*(__pointer + __offset);
+ return (vector signed __int128)*(__pointer + __offset);
}
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
+static __inline__ vector signed __int128 __ATTRS_o_ai
vec_xl_sext(ptrdiff_t __offset, const signed short *__pointer) {
- return (vector unsigned __int128)*(__pointer + __offset);
+ return (vector signed __int128)*(__pointer + __offset);
}
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
+static __inline__ vector signed __int128 __ATTRS_o_ai
vec_xl_sext(ptrdiff_t __offset, const signed int *__pointer) {
- return (vector unsigned __int128)*(__pointer + __offset);
+ return (vector signed __int128)*(__pointer + __offset);
}
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
+static __inline__ vector signed __int128 __ATTRS_o_ai
vec_xl_sext(ptrdiff_t __offset, const signed long long *__pointer) {
- return (vector unsigned __int128)*(__pointer + __offset);
+ return (vector signed __int128)*(__pointer + __offset);
}
/* vec_xl_zext */
@@ -18198,13 +18366,17 @@ vec_expandm(vector unsigned __int128 __a) {
#define vec_cntm(__a, __mp) \
_Generic((__a), vector unsigned char \
- : __builtin_altivec_vcntmbb((__a), (unsigned int)(__mp)), \
+ : __builtin_altivec_vcntmbb((vector unsigned char)(__a), \
+ (unsigned char)(__mp)), \
vector unsigned short \
- : __builtin_altivec_vcntmbh((__a), (unsigned int)(__mp)), \
+ : __builtin_altivec_vcntmbh((vector unsigned short)(__a), \
+ (unsigned char)(__mp)), \
vector unsigned int \
- : __builtin_altivec_vcntmbw((__a), (unsigned int)(__mp)), \
+ : __builtin_altivec_vcntmbw((vector unsigned int)(__a), \
+ (unsigned char)(__mp)), \
vector unsigned long long \
- : __builtin_altivec_vcntmbd((__a), (unsigned int)(__mp)))
+ : __builtin_altivec_vcntmbd((vector unsigned long long)(__a), \
+ (unsigned char)(__mp)))
/* vec_gen[b|h|w|d|q]m */
@@ -18265,43 +18437,52 @@ vec_cfuge(vector unsigned long long __a, vector unsigned long long __b) {
#ifdef __SIZEOF_INT128__
#define vec_ternarylogic(__a, __b, __c, __imm) \
_Generic((__a), vector unsigned char \
- : __builtin_vsx_xxeval((vector unsigned long long)(__a), \
- (vector unsigned long long)(__b), \
- (vector unsigned long long)(__c), (__imm)), \
+ : (vector unsigned char)__builtin_vsx_xxeval( \
+ (vector unsigned long long)(__a), \
+ (vector unsigned long long)(__b), \
+ (vector unsigned long long)(__c), (__imm)), \
vector unsigned short \
- : __builtin_vsx_xxeval((vector unsigned long long)(__a), \
- (vector unsigned long long)(__b), \
- (vector unsigned long long)(__c), (__imm)), \
+ : (vector unsigned short)__builtin_vsx_xxeval( \
+ (vector unsigned long long)(__a), \
+ (vector unsigned long long)(__b), \
+ (vector unsigned long long)(__c), (__imm)), \
vector unsigned int \
- : __builtin_vsx_xxeval((vector unsigned long long)(__a), \
- (vector unsigned long long)(__b), \
- (vector unsigned long long)(__c), (__imm)), \
+ : (vector unsigned int)__builtin_vsx_xxeval( \
+ (vector unsigned long long)(__a), \
+ (vector unsigned long long)(__b), \
+ (vector unsigned long long)(__c), (__imm)), \
vector unsigned long long \
- : __builtin_vsx_xxeval((vector unsigned long long)(__a), \
- (vector unsigned long long)(__b), \
- (vector unsigned long long)(__c), (__imm)), \
+ : (vector unsigned long long)__builtin_vsx_xxeval( \
+ (vector unsigned long long)(__a), \
+ (vector unsigned long long)(__b), \
+ (vector unsigned long long)(__c), (__imm)), \
vector unsigned __int128 \
- : __builtin_vsx_xxeval((vector unsigned long long)(__a), \
- (vector unsigned long long)(__b), \
- (vector unsigned long long)(__c), (__imm)))
+ : (vector unsigned __int128)__builtin_vsx_xxeval( \
+ (vector unsigned long long)(__a), \
+ (vector unsigned long long)(__b), \
+ (vector unsigned long long)(__c), (__imm)))
#else
#define vec_ternarylogic(__a, __b, __c, __imm) \
_Generic((__a), vector unsigned char \
- : __builtin_vsx_xxeval((vector unsigned long long)(__a), \
- (vector unsigned long long)(__b), \
- (vector unsigned long long)(__c), (__imm)), \
+ : (vector unsigned char)__builtin_vsx_xxeval( \
+ (vector unsigned long long)(__a), \
+ (vector unsigned long long)(__b), \
+ (vector unsigned long long)(__c), (__imm)), \
vector unsigned short \
- : __builtin_vsx_xxeval((vector unsigned long long)(__a), \
- (vector unsigned long long)(__b), \
- (vector unsigned long long)(__c), (__imm)), \
+ : (vector unsigned short)__builtin_vsx_xxeval( \
+ (vector unsigned long long)(__a), \
+ (vector unsigned long long)(__b), \
+ (vector unsigned long long)(__c), (__imm)), \
vector unsigned int \
- : __builtin_vsx_xxeval((vector unsigned long long)(__a), \
- (vector unsigned long long)(__b), \
- (vector unsigned long long)(__c), (__imm)), \
+ : (vector unsigned int)__builtin_vsx_xxeval( \
+ (vector unsigned long long)(__a), \
+ (vector unsigned long long)(__b), \
+ (vector unsigned long long)(__c), (__imm)), \
vector unsigned long long \
- : __builtin_vsx_xxeval((vector unsigned long long)(__a), \
- (vector unsigned long long)(__b), \
- (vector unsigned long long)(__c), (__imm)))
+ : (vector unsigned long long)__builtin_vsx_xxeval( \
+ (vector unsigned long long)(__a), \
+ (vector unsigned long long)(__b), \
+ (vector unsigned long long)(__c), (__imm)))
#endif /* __SIZEOF_INT128__ */
#endif /* __VSX__ */
@@ -18309,53 +18490,63 @@ vec_cfuge(vector unsigned long long __a, vector unsigned long long __b) {
#ifdef __VSX__
#define vec_genpcvm(__a, __imm) \
- _Generic((__a), vector unsigned char \
- : __builtin_vsx_xxgenpcvbm((__a), (int)(__imm)), \
- vector unsigned short \
- : __builtin_vsx_xxgenpcvhm((__a), (int)(__imm)), \
- vector unsigned int \
- : __builtin_vsx_xxgenpcvwm((__a), (int)(__imm)), \
- vector unsigned long long \
- : __builtin_vsx_xxgenpcvdm((__a), (int)(__imm)))
+ _Generic( \
+ (__a), vector unsigned char \
+ : __builtin_vsx_xxgenpcvbm((vector unsigned char)(__a), (int)(__imm)), \
+ vector unsigned short \
+ : __builtin_vsx_xxgenpcvhm((vector unsigned short)(__a), (int)(__imm)), \
+ vector unsigned int \
+ : __builtin_vsx_xxgenpcvwm((vector unsigned int)(__a), (int)(__imm)), \
+ vector unsigned long long \
+ : __builtin_vsx_xxgenpcvdm((vector unsigned long long)(__a), \
+ (int)(__imm)))
#endif /* __VSX__ */
-/* vec_clrl */
+/* vec_clr_first */
static __inline__ vector signed char __ATTRS_o_ai
-vec_clrl(vector signed char __a, unsigned int __n) {
+vec_clr_first(vector signed char __a, unsigned int __n) {
#ifdef __LITTLE_ENDIAN__
- return __builtin_altivec_vclrrb(__a, __n);
+ return (vector signed char)__builtin_altivec_vclrrb((vector unsigned char)__a,
+ __n);
#else
- return __builtin_altivec_vclrlb( __a, __n);
+ return (vector signed char)__builtin_altivec_vclrlb((vector unsigned char)__a,
+ __n);
#endif
}
static __inline__ vector unsigned char __ATTRS_o_ai
-vec_clrl(vector unsigned char __a, unsigned int __n) {
+vec_clr_first(vector unsigned char __a, unsigned int __n) {
#ifdef __LITTLE_ENDIAN__
- return __builtin_altivec_vclrrb((vector signed char)__a, __n);
+ return (vector unsigned char)__builtin_altivec_vclrrb(
+ (vector unsigned char)__a, __n);
#else
- return __builtin_altivec_vclrlb((vector signed char)__a, __n);
+ return (vector unsigned char)__builtin_altivec_vclrlb(
+ (vector unsigned char)__a, __n);
#endif
}
-/* vec_clrr */
+/* vec_clr_last */
static __inline__ vector signed char __ATTRS_o_ai
-vec_clrr(vector signed char __a, unsigned int __n) {
+vec_clr_last(vector signed char __a, unsigned int __n) {
#ifdef __LITTLE_ENDIAN__
- return __builtin_altivec_vclrlb(__a, __n);
+ return (vector signed char)__builtin_altivec_vclrlb((vector unsigned char)__a,
+ __n);
#else
- return __builtin_altivec_vclrrb( __a, __n);
+ return (vector signed char)__builtin_altivec_vclrrb((vector unsigned char)__a,
+ __n);
#endif
}
static __inline__ vector unsigned char __ATTRS_o_ai
-vec_clrr(vector unsigned char __a, unsigned int __n) {
+vec_clr_last(vector unsigned char __a, unsigned int __n) {
#ifdef __LITTLE_ENDIAN__
- return __builtin_altivec_vclrlb((vector signed char)__a, __n);
+ return (vector unsigned char)__builtin_altivec_vclrlb(
+ (vector unsigned char)__a, __n);
#else
- return __builtin_altivec_vclrrb((vector signed char)__a, __n);
+ return (vector unsigned char)__builtin_altivec_vclrrb(
+ (vector unsigned char)__a, __n);
#endif
}
@@ -18407,13 +18598,75 @@ vec_mod(vector unsigned __int128 __a, vector unsigned __int128 __b) {
}
#endif
-/* vec_sldbi */
-
-#define vec_sldb(__a, __b, __c) __builtin_altivec_vsldbi(__a, __b, (__c & 0x7))
-
-/* vec_srdbi */
-
-#define vec_srdb(__a, __b, __c) __builtin_altivec_vsrdbi(__a, __b, (__c & 0x7))
+/* vec_sldb */
+#define vec_sldb(__a, __b, __c) \
+ _Generic( \
+ (__a), vector unsigned char \
+ : (vector unsigned char)__builtin_altivec_vsldbi( \
+ (vector unsigned char)__a, (vector unsigned char)__b, \
+ (__c & 0x7)), \
+ vector signed char \
+ : (vector signed char)__builtin_altivec_vsldbi( \
+ (vector unsigned char)__a, (vector unsigned char)__b, \
+ (__c & 0x7)), \
+ vector unsigned short \
+ : (vector unsigned short)__builtin_altivec_vsldbi( \
+ (vector unsigned char)__a, (vector unsigned char)__b, \
+ (__c & 0x7)), \
+ vector signed short \
+ : (vector signed short)__builtin_altivec_vsldbi( \
+ (vector unsigned char)__a, (vector unsigned char)__b, \
+ (__c & 0x7)), \
+ vector unsigned int \
+ : (vector unsigned int)__builtin_altivec_vsldbi( \
+ (vector unsigned char)__a, (vector unsigned char)__b, \
+ (__c & 0x7)), \
+ vector signed int \
+ : (vector signed int)__builtin_altivec_vsldbi((vector unsigned char)__a, \
+ (vector unsigned char)__b, \
+ (__c & 0x7)), \
+ vector unsigned long long \
+ : (vector unsigned long long)__builtin_altivec_vsldbi( \
+ (vector unsigned char)__a, (vector unsigned char)__b, \
+ (__c & 0x7)), \
+ vector signed long long \
+ : (vector signed long long)__builtin_altivec_vsldbi( \
+ (vector unsigned char)__a, (vector unsigned char)__b, (__c & 0x7)))
+
+/* vec_srdb */
+#define vec_srdb(__a, __b, __c) \
+ _Generic( \
+ (__a), vector unsigned char \
+ : (vector unsigned char)__builtin_altivec_vsrdbi( \
+ (vector unsigned char)__a, (vector unsigned char)__b, \
+ (__c & 0x7)), \
+ vector signed char \
+ : (vector signed char)__builtin_altivec_vsrdbi( \
+ (vector unsigned char)__a, (vector unsigned char)__b, \
+ (__c & 0x7)), \
+ vector unsigned short \
+ : (vector unsigned short)__builtin_altivec_vsrdbi( \
+ (vector unsigned char)__a, (vector unsigned char)__b, \
+ (__c & 0x7)), \
+ vector signed short \
+ : (vector signed short)__builtin_altivec_vsrdbi( \
+ (vector unsigned char)__a, (vector unsigned char)__b, \
+ (__c & 0x7)), \
+ vector unsigned int \
+ : (vector unsigned int)__builtin_altivec_vsrdbi( \
+ (vector unsigned char)__a, (vector unsigned char)__b, \
+ (__c & 0x7)), \
+ vector signed int \
+ : (vector signed int)__builtin_altivec_vsrdbi((vector unsigned char)__a, \
+ (vector unsigned char)__b, \
+ (__c & 0x7)), \
+ vector unsigned long long \
+ : (vector unsigned long long)__builtin_altivec_vsrdbi( \
+ (vector unsigned char)__a, (vector unsigned char)__b, \
+ (__c & 0x7)), \
+ vector signed long long \
+ : (vector signed long long)__builtin_altivec_vsrdbi( \
+ (vector unsigned char)__a, (vector unsigned char)__b, (__c & 0x7)))
/* vec_insertl */
@@ -18642,16 +18895,46 @@ vec_extracth(vector unsigned long long __a, vector unsigned long long __b,
#ifdef __VSX__
/* vec_permx */
-
#define vec_permx(__a, __b, __c, __d) \
- __builtin_vsx_xxpermx((__a), (__b), (__c), (__d))
+ _Generic( \
+ (__a), vector unsigned char \
+ : (vector unsigned char)__builtin_vsx_xxpermx( \
+ (vector unsigned char)__a, (vector unsigned char)__b, __c, __d), \
+ vector signed char \
+ : (vector signed char)__builtin_vsx_xxpermx( \
+ (vector unsigned char)__a, (vector unsigned char)__b, __c, __d), \
+ vector unsigned short \
+ : (vector unsigned short)__builtin_vsx_xxpermx( \
+ (vector unsigned char)__a, (vector unsigned char)__b, __c, __d), \
+ vector signed short \
+ : (vector signed short)__builtin_vsx_xxpermx( \
+ (vector unsigned char)__a, (vector unsigned char)__b, __c, __d), \
+ vector unsigned int \
+ : (vector unsigned int)__builtin_vsx_xxpermx( \
+ (vector unsigned char)__a, (vector unsigned char)__b, __c, __d), \
+ vector signed int \
+ : (vector signed int)__builtin_vsx_xxpermx( \
+ (vector unsigned char)__a, (vector unsigned char)__b, __c, __d), \
+ vector unsigned long long \
+ : (vector unsigned long long)__builtin_vsx_xxpermx( \
+ (vector unsigned char)__a, (vector unsigned char)__b, __c, __d), \
+ vector signed long long \
+ : (vector signed long long)__builtin_vsx_xxpermx( \
+ (vector unsigned char)__a, (vector unsigned char)__b, __c, __d), \
+ vector float \
+ : (vector float)__builtin_vsx_xxpermx( \
+ (vector unsigned char)__a, (vector unsigned char)__b, __c, __d), \
+ vector double \
+ : (vector double)__builtin_vsx_xxpermx( \
+ (vector unsigned char)__a, (vector unsigned char)__b, __c, __d))
/* vec_blendv */
static __inline__ vector signed char __ATTRS_o_ai
vec_blendv(vector signed char __a, vector signed char __b,
vector unsigned char __c) {
- return __builtin_vsx_xxblendvb(__a, __b, __c);
+ return (vector signed char)__builtin_vsx_xxblendvb(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
}
static __inline__ vector unsigned char __ATTRS_o_ai
@@ -18663,7 +18946,8 @@ vec_blendv(vector unsigned char __a, vector unsigned char __b,
static __inline__ vector signed short __ATTRS_o_ai
vec_blendv(vector signed short __a, vector signed short __b,
vector unsigned short __c) {
- return __builtin_vsx_xxblendvh(__a, __b, __c);
+ return (vector signed short)__builtin_vsx_xxblendvh(
+ (vector unsigned short)__a, (vector unsigned short)__b, __c);
}
static __inline__ vector unsigned short __ATTRS_o_ai
@@ -18675,7 +18959,8 @@ vec_blendv(vector unsigned short __a, vector unsigned short __b,
static __inline__ vector signed int __ATTRS_o_ai
vec_blendv(vector signed int __a, vector signed int __b,
vector unsigned int __c) {
- return __builtin_vsx_xxblendvw(__a, __b, __c);
+ return (vector signed int)__builtin_vsx_xxblendvw(
+ (vector unsigned int)__a, (vector unsigned int)__b, __c);
}
static __inline__ vector unsigned int __ATTRS_o_ai
@@ -18687,33 +18972,68 @@ vec_blendv(vector unsigned int __a, vector unsigned int __b,
static __inline__ vector signed long long __ATTRS_o_ai
vec_blendv(vector signed long long __a, vector signed long long __b,
vector unsigned long long __c) {
- return __builtin_vsx_xxblendvd(__a, __b, __c);
+ return (vector signed long long)__builtin_vsx_xxblendvd(
+ (vector unsigned long long)__a, (vector unsigned long long)__b, __c);
}
static __inline__ vector unsigned long long __ATTRS_o_ai
vec_blendv(vector unsigned long long __a, vector unsigned long long __b,
vector unsigned long long __c) {
- return __builtin_vsx_xxblendvd(__a, __b, __c);
+ return (vector unsigned long long)__builtin_vsx_xxblendvd(__a, __b, __c);
}
static __inline__ vector float __ATTRS_o_ai
vec_blendv(vector float __a, vector float __b, vector unsigned int __c) {
- return __builtin_vsx_xxblendvw(__a, __b, __c);
+ return (vector float)__builtin_vsx_xxblendvw((vector unsigned int)__a,
+ (vector unsigned int)__b, __c);
}
static __inline__ vector double __ATTRS_o_ai
vec_blendv(vector double __a, vector double __b,
vector unsigned long long __c) {
- return __builtin_vsx_xxblendvd(__a, __b, __c);
+ return (vector double)__builtin_vsx_xxblendvd(
+ (vector unsigned long long)__a, (vector unsigned long long)__b, __c);
}
-/* vec_replace_elt */
-
-#define vec_replace_elt __builtin_altivec_vec_replace_elt
-
-/* vec_replace_unaligned */
+#define vec_replace_unaligned(__a, __b, __c) \
+ _Generic((__a), vector signed int \
+ : __builtin_altivec_vinsw((vector unsigned char)__a, \
+ (unsigned int)__b, __c), \
+ vector unsigned int \
+ : __builtin_altivec_vinsw((vector unsigned char)__a, \
+ (unsigned int)__b, __c), \
+ vector unsigned long long \
+ : __builtin_altivec_vinsd((vector unsigned char)__a, \
+ (unsigned long long)__b, __c), \
+ vector signed long long \
+ : __builtin_altivec_vinsd((vector unsigned char)__a, \
+ (unsigned long long)__b, __c), \
+ vector float \
+ : __builtin_altivec_vinsw((vector unsigned char)__a, \
+ (unsigned int)__b, __c), \
+ vector double \
+ : __builtin_altivec_vinsd((vector unsigned char)__a, \
+ (unsigned long long)__b, __c))
-#define vec_replace_unaligned __builtin_altivec_vec_replace_unaligned
+#define vec_replace_elt(__a, __b, __c) \
+ _Generic((__a), vector signed int \
+ : (vector signed int)__builtin_altivec_vinsw_elt( \
+ (vector unsigned char)__a, (unsigned int)__b, __c), \
+ vector unsigned int \
+ : (vector unsigned int)__builtin_altivec_vinsw_elt( \
+ (vector unsigned char)__a, (unsigned int)__b, __c), \
+ vector unsigned long long \
+ : (vector unsigned long long)__builtin_altivec_vinsd_elt( \
+ (vector unsigned char)__a, (unsigned long long)__b, __c), \
+ vector signed long long \
+ : (vector signed long long)__builtin_altivec_vinsd_elt( \
+ (vector unsigned char)__a, (unsigned long long)__b, __c), \
+ vector float \
+ : (vector float)__builtin_altivec_vinsw_elt( \
+ (vector unsigned char)__a, (unsigned int)__b, __c), \
+ vector double \
+ : (vector double)__builtin_altivec_vinsd_elt( \
+ (vector unsigned char)__a, (unsigned long long)__b, __c))
/* vec_splati */
@@ -18733,36 +19053,39 @@ static __inline__ vector double __ATTRS_o_ai vec_splatid(const float __a) {
static __inline__ vector signed int __ATTRS_o_ai vec_splati_ins(
vector signed int __a, const unsigned int __b, const signed int __c) {
+ const unsigned int __d = __b & 0x01;
#ifdef __LITTLE_ENDIAN__
- __a[1 - __b] = __c;
- __a[3 - __b] = __c;
+ __a[1 - __d] = __c;
+ __a[3 - __d] = __c;
#else
- __a[__b] = __c;
- __a[2 + __b] = __c;
+ __a[__d] = __c;
+ __a[2 + __d] = __c;
#endif
return __a;
}
static __inline__ vector unsigned int __ATTRS_o_ai vec_splati_ins(
vector unsigned int __a, const unsigned int __b, const unsigned int __c) {
+ const unsigned int __d = __b & 0x01;
#ifdef __LITTLE_ENDIAN__
- __a[1 - __b] = __c;
- __a[3 - __b] = __c;
+ __a[1 - __d] = __c;
+ __a[3 - __d] = __c;
#else
- __a[__b] = __c;
- __a[2 + __b] = __c;
+ __a[__d] = __c;
+ __a[2 + __d] = __c;
#endif
return __a;
}
static __inline__ vector float __ATTRS_o_ai
vec_splati_ins(vector float __a, const unsigned int __b, const float __c) {
+ const unsigned int __d = __b & 0x01;
#ifdef __LITTLE_ENDIAN__
- __a[1 - __b] = __c;
- __a[3 - __b] = __c;
+ __a[1 - __d] = __c;
+ __a[3 - __d] = __c;
#else
- __a[__b] = __c;
- __a[2 + __b] = __c;
+ __a[__d] = __c;
+ __a[2 + __d] = __c;
#endif
return __a;
}
@@ -18787,27 +19110,33 @@ vec_test_lsbb_all_zeros(vector unsigned char __a) {
static __inline__ vector unsigned char __ATTRS_o_ai
vec_stril(vector unsigned char __a) {
#ifdef __LITTLE_ENDIAN__
- return __builtin_altivec_vstribr((vector signed char)__a);
+ return (vector unsigned char)__builtin_altivec_vstribr(
+ (vector unsigned char)__a);
#else
- return __builtin_altivec_vstribl((vector signed char)__a);
+ return (vector unsigned char)__builtin_altivec_vstribl(
+ (vector unsigned char)__a);
#endif
}
static __inline__ vector signed char __ATTRS_o_ai
vec_stril(vector signed char __a) {
#ifdef __LITTLE_ENDIAN__
- return __builtin_altivec_vstribr(__a);
+ return (vector signed char)__builtin_altivec_vstribr(
+ (vector unsigned char)__a);
#else
- return __builtin_altivec_vstribl(__a);
+ return (vector signed char)__builtin_altivec_vstribl(
+ (vector unsigned char)__a);
#endif
}
static __inline__ vector unsigned short __ATTRS_o_ai
vec_stril(vector unsigned short __a) {
#ifdef __LITTLE_ENDIAN__
- return __builtin_altivec_vstrihr((vector signed short)__a);
+ return (vector unsigned short)__builtin_altivec_vstrihr(
+ (vector signed short)__a);
#else
- return __builtin_altivec_vstrihl((vector signed short)__a);
+ return (vector unsigned short)__builtin_altivec_vstrihl(
+ (vector signed short)__a);
#endif
}
@@ -18824,17 +19153,17 @@ vec_stril(vector signed short __a) {
static __inline__ int __ATTRS_o_ai vec_stril_p(vector unsigned char __a) {
#ifdef __LITTLE_ENDIAN__
- return __builtin_altivec_vstribr_p(__CR6_EQ, (vector signed char)__a);
+ return __builtin_altivec_vstribr_p(__CR6_EQ, (vector unsigned char)__a);
#else
- return __builtin_altivec_vstribl_p(__CR6_EQ, (vector signed char)__a);
+ return __builtin_altivec_vstribl_p(__CR6_EQ, (vector unsigned char)__a);
#endif
}
static __inline__ int __ATTRS_o_ai vec_stril_p(vector signed char __a) {
#ifdef __LITTLE_ENDIAN__
- return __builtin_altivec_vstribr_p(__CR6_EQ, __a);
+ return __builtin_altivec_vstribr_p(__CR6_EQ, (vector unsigned char)__a);
#else
- return __builtin_altivec_vstribl_p(__CR6_EQ, __a);
+ return __builtin_altivec_vstribl_p(__CR6_EQ, (vector unsigned char)__a);
#endif
}
@@ -18859,27 +19188,33 @@ static __inline__ int __ATTRS_o_ai vec_stril_p(vector signed short __a) {
static __inline__ vector unsigned char __ATTRS_o_ai
vec_strir(vector unsigned char __a) {
#ifdef __LITTLE_ENDIAN__
- return __builtin_altivec_vstribl((vector signed char)__a);
+ return (vector unsigned char)__builtin_altivec_vstribl(
+ (vector unsigned char)__a);
#else
- return __builtin_altivec_vstribr((vector signed char)__a);
+ return (vector unsigned char)__builtin_altivec_vstribr(
+ (vector unsigned char)__a);
#endif
}
static __inline__ vector signed char __ATTRS_o_ai
vec_strir(vector signed char __a) {
#ifdef __LITTLE_ENDIAN__
- return __builtin_altivec_vstribl(__a);
+ return (vector signed char)__builtin_altivec_vstribl(
+ (vector unsigned char)__a);
#else
- return __builtin_altivec_vstribr(__a);
+ return (vector signed char)__builtin_altivec_vstribr(
+ (vector unsigned char)__a);
#endif
}
static __inline__ vector unsigned short __ATTRS_o_ai
vec_strir(vector unsigned short __a) {
#ifdef __LITTLE_ENDIAN__
- return __builtin_altivec_vstrihl((vector signed short)__a);
+ return (vector unsigned short)__builtin_altivec_vstrihl(
+ (vector signed short)__a);
#else
- return __builtin_altivec_vstrihr((vector signed short)__a);
+ return (vector unsigned short)__builtin_altivec_vstrihr(
+ (vector signed short)__a);
#endif
}
@@ -18896,17 +19231,17 @@ vec_strir(vector signed short __a) {
static __inline__ int __ATTRS_o_ai vec_strir_p(vector unsigned char __a) {
#ifdef __LITTLE_ENDIAN__
- return __builtin_altivec_vstribl_p(__CR6_EQ, (vector signed char)__a);
+ return __builtin_altivec_vstribl_p(__CR6_EQ, (vector unsigned char)__a);
#else
- return __builtin_altivec_vstribr_p(__CR6_EQ, (vector signed char)__a);
+ return __builtin_altivec_vstribr_p(__CR6_EQ, (vector unsigned char)__a);
#endif
}
static __inline__ int __ATTRS_o_ai vec_strir_p(vector signed char __a) {
#ifdef __LITTLE_ENDIAN__
- return __builtin_altivec_vstribl_p(__CR6_EQ, __a);
+ return __builtin_altivec_vstribl_p(__CR6_EQ, (vector unsigned char)__a);
#else
- return __builtin_altivec_vstribr_p(__CR6_EQ, __a);
+ return __builtin_altivec_vstribr_p(__CR6_EQ, (vector unsigned char)__a);
#endif
}
@@ -18976,6 +19311,51 @@ vec_sra(vector signed __int128 __a, vector unsigned __int128 __b) {
#endif /* __SIZEOF_INT128__ */
#endif /* __POWER10_VECTOR__ */
+#ifdef __POWER8_VECTOR__
+#define __bcdadd(__a, __b, __ps) __builtin_ppc_bcdadd((__a), (__b), (__ps))
+#define __bcdsub(__a, __b, __ps) __builtin_ppc_bcdsub((__a), (__b), (__ps))
+
+static __inline__ long __bcdadd_ofl(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_ppc_bcdadd_p(__CR6_SO, __a, __b);
+}
+
+static __inline__ long __bcdsub_ofl(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_ppc_bcdsub_p(__CR6_SO, __a, __b);
+}
+
+static __inline__ long __bcd_invalid(vector unsigned char __a) {
+ return __builtin_ppc_bcdsub_p(__CR6_SO, __a, __a);
+}
+
+static __inline__ long __bcdcmpeq(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_ppc_bcdsub_p(__CR6_EQ, __a, __b);
+}
+
+static __inline__ long __bcdcmplt(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_ppc_bcdsub_p(__CR6_LT, __a, __b);
+}
+
+static __inline__ long __bcdcmpgt(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_ppc_bcdsub_p(__CR6_GT, __a, __b);
+}
+
+static __inline__ long __bcdcmple(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_ppc_bcdsub_p(__CR6_GT_REV, __a, __b);
+}
+
+static __inline__ long __bcdcmpge(vector unsigned char __a,
+ vector unsigned char __b) {
+ return __builtin_ppc_bcdsub_p(__CR6_LT_REV, __a, __b);
+}
+
+#endif // __POWER8_VECTOR__
+
#undef __ATTRS_o_ai
#endif /* __ALTIVEC_H */
diff --git a/contrib/llvm-project/clang/lib/Headers/ammintrin.h b/contrib/llvm-project/clang/lib/Headers/ammintrin.h
index 3806be6ebc43..f549ab80d946 100644
--- a/contrib/llvm-project/clang/lib/Headers/ammintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/ammintrin.h
@@ -10,6 +10,10 @@
#ifndef __AMMINTRIN_H
#define __AMMINTRIN_H
+#if !defined(__i386__) && !defined(__x86_64__)
+#error "This header is only meant to be used on x86 and x64 architecture"
+#endif
+
#include <pmmintrin.h>
/* Define the default attributes for the functions in this file. */
@@ -151,9 +155,9 @@ _mm_insert_si64(__m128i __x, __m128i __y)
/// \param __a
/// The 64-bit double-precision floating-point register value to be stored.
static __inline__ void __DEFAULT_FN_ATTRS
-_mm_stream_sd(double *__p, __m128d __a)
+_mm_stream_sd(void *__p, __m128d __a)
{
- __builtin_ia32_movntsd(__p, (__v2df)__a);
+ __builtin_ia32_movntsd((double *)__p, (__v2df)__a);
}
/// Stores a 32-bit single-precision floating-point value in a 32-bit
@@ -169,9 +173,9 @@ _mm_stream_sd(double *__p, __m128d __a)
/// \param __a
/// The 32-bit single-precision floating-point register value to be stored.
static __inline__ void __DEFAULT_FN_ATTRS
-_mm_stream_ss(float *__p, __m128 __a)
+_mm_stream_ss(void *__p, __m128 __a)
{
- __builtin_ia32_movntss(__p, (__v4sf)__a);
+ __builtin_ia32_movntss((float *)__p, (__v4sf)__a);
}
#undef __DEFAULT_FN_ATTRS
diff --git a/contrib/llvm-project/clang/lib/Headers/amxcomplexintrin.h b/contrib/llvm-project/clang/lib/Headers/amxcomplexintrin.h
new file mode 100644
index 000000000000..84ef972fcadf
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/amxcomplexintrin.h
@@ -0,0 +1,169 @@
+/*===--------- amxcomplexintrin.h - AMXCOMPLEX intrinsics -*- C++ -*---------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===------------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <amxcomplexintrin.h> directly; include <immintrin.h> instead."
+#endif // __IMMINTRIN_H
+
+#ifndef __AMX_COMPLEXINTRIN_H
+#define __AMX_COMPLEXINTRIN_H
+#ifdef __x86_64__
+
+#define __DEFAULT_FN_ATTRS_COMPLEX \
+ __attribute__((__always_inline__, __nodebug__, __target__("amx-complex")))
+
+/// Perform matrix multiplication of two tiles containing complex elements and
+/// accumulate the results into a packed single precision tile. Each dword
+/// element in input tiles \a a and \a b is interpreted as a complex number
+/// with FP16 real part and FP16 imaginary part.
+/// Calculates the imaginary part of the result. For each possible combination
+/// of (row of \a a, column of \a b), it performs a set of multiplication
+/// and accumulations on all corresponding complex numbers (one from \a a
+/// and one from \a b). The imaginary part of the \a a element is multiplied
+/// with the real part of the corresponding \a b element, and the real part
+/// of the \a a element is multiplied with the imaginary part of the
+/// corresponding \a b elements. The two accumulated results are added, and
+/// then accumulated into the corresponding row and column of \a dst.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// void _tile_cmmimfp16ps(__tile dst, __tile a, __tile b);
+/// \endcode
+///
+/// \code{.operation}
+/// FOR m := 0 TO dst.rows - 1
+/// tmp := dst.row[m]
+/// FOR k := 0 TO (a.colsb / 4) - 1
+/// FOR n := 0 TO (dst.colsb / 4) - 1
+/// tmp.fp32[n] += FP32(a.row[m].fp16[2*k+0]) * FP32(b.row[k].fp16[2*n+1])
+/// tmp.fp32[n] += FP32(a.row[m].fp16[2*k+1]) * FP32(b.row[k].fp16[2*n+0])
+/// ENDFOR
+/// ENDFOR
+/// write_row_and_zero(dst, m, tmp, dst.colsb)
+/// ENDFOR
+/// zero_upper_rows(dst, dst.rows)
+/// zero_tileconfig_start()
+/// \endcode
+///
+/// This intrinsic corresponds to the \c TCMMIMFP16PS instruction.
+///
+/// \param dst
+/// The destination tile. Max size is 1024 Bytes.
+/// \param a
+/// The 1st source tile. Max size is 1024 Bytes.
+/// \param b
+/// The 2nd source tile. Max size is 1024 Bytes.
+#define _tile_cmmimfp16ps(dst, a, b) __builtin_ia32_tcmmimfp16ps(dst, a, b)
+
+/// Perform matrix multiplication of two tiles containing complex elements and
+/// accumulate the results into a packed single precision tile. Each dword
+/// element in input tiles \a a and \a b is interpreted as a complex number
+/// with FP16 real part and FP16 imaginary part.
+/// Calculates the real part of the result. For each possible combination
+/// of (row of \a a, column of \a b), it performs a set of multiplication
+/// and accumulations on all corresponding complex numbers (one from \a a
+/// and one from \a b). The real part of the \a a element is multiplied
+/// with the real part of the corresponding \a b element, and the negated
+/// imaginary part of the \a a element is multiplied with the imaginary
+/// part of the corresponding \a b elements. The two accumulated results
+/// are added, and then accumulated into the corresponding row and column
+/// of \a dst.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// void _tile_cmmrlfp16ps(__tile dst, __tile a, __tile b);
+/// \endcode
+///
+/// \code{.operation}
+/// FOR m := 0 TO dst.rows - 1
+/// tmp := dst.row[m]
+/// FOR k := 0 TO (a.colsb / 4) - 1
+/// FOR n := 0 TO (dst.colsb / 4) - 1
+/// tmp.fp32[n] += FP32(a.row[m].fp16[2*k+0]) * FP32(b.row[k].fp16[2*n+0])
+/// tmp.fp32[n] += FP32(-a.row[m].fp16[2*k+1]) * FP32(b.row[k].fp16[2*n+1])
+/// ENDFOR
+/// ENDFOR
+/// write_row_and_zero(dst, m, tmp, dst.colsb)
+/// ENDFOR
+/// zero_upper_rows(dst, dst.rows)
+/// zero_tileconfig_start()
+/// \endcode
+///
+/// This intrinsic corresponds to the \c TCMMIMFP16PS instruction.
+///
+/// \param dst
+/// The destination tile. Max size is 1024 Bytes.
+/// \param a
+/// The 1st source tile. Max size is 1024 Bytes.
+/// \param b
+/// The 2nd source tile. Max size is 1024 Bytes.
+#define _tile_cmmrlfp16ps(dst, a, b) __builtin_ia32_tcmmrlfp16ps(dst, a, b)
+
+static __inline__ _tile1024i __DEFAULT_FN_ATTRS_COMPLEX
+_tile_cmmimfp16ps_internal(unsigned short m, unsigned short n, unsigned short k,
+ _tile1024i dst, _tile1024i src1, _tile1024i src2) {
+ return __builtin_ia32_tcmmimfp16ps_internal(m, n, k, dst, src1, src2);
+}
+
+static __inline__ _tile1024i __DEFAULT_FN_ATTRS_COMPLEX
+_tile_cmmrlfp16ps_internal(unsigned short m, unsigned short n, unsigned short k,
+ _tile1024i dst, _tile1024i src1, _tile1024i src2) {
+ return __builtin_ia32_tcmmrlfp16ps_internal(m, n, k, dst, src1, src2);
+}
+
+/// Perform matrix multiplication of two tiles containing complex elements and
+/// accumulate the results into a packed single precision tile. Each dword
+/// element in input tiles src0 and src1 is interpreted as a complex number with
+/// FP16 real part and FP16 imaginary part.
+/// This function calculates the imaginary part of the result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the <c> TCMMIMFP16PS </c> instruction.
+///
+/// \param dst
+/// The destination tile. Max size is 1024 Bytes.
+/// \param src0
+/// The 1st source tile. Max size is 1024 Bytes.
+/// \param src1
+/// The 2nd source tile. Max size is 1024 Bytes.
+__DEFAULT_FN_ATTRS_COMPLEX
+static void __tile_cmmimfp16ps(__tile1024i *dst, __tile1024i src0,
+ __tile1024i src1) {
+ dst->tile = _tile_cmmimfp16ps_internal(src0.row, src1.col, src0.col,
+ dst->tile, src0.tile, src1.tile);
+}
+
+/// Perform matrix multiplication of two tiles containing complex elements and
+/// accumulate the results into a packed single precision tile. Each dword
+/// element in input tiles src0 and src1 is interpreted as a complex number with
+/// FP16 real part and FP16 imaginary part.
+/// This function calculates the real part of the result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the <c> TCMMRLFP16PS </c> instruction.
+///
+/// \param dst
+/// The destination tile. Max size is 1024 Bytes.
+/// \param src0
+/// The 1st source tile. Max size is 1024 Bytes.
+/// \param src1
+/// The 2nd source tile. Max size is 1024 Bytes.
+__DEFAULT_FN_ATTRS_COMPLEX
+static void __tile_cmmrlfp16ps(__tile1024i *dst, __tile1024i src0,
+ __tile1024i src1) {
+ dst->tile = _tile_cmmrlfp16ps_internal(src0.row, src1.col, src0.col,
+ dst->tile, src0.tile, src1.tile);
+}
+
+#endif // __x86_64__
+#endif // __AMX_COMPLEXINTRIN_H
diff --git a/contrib/llvm-project/clang/lib/Headers/amxfp16intrin.h b/contrib/llvm-project/clang/lib/Headers/amxfp16intrin.h
new file mode 100644
index 000000000000..ed798245d41e
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/amxfp16intrin.h
@@ -0,0 +1,58 @@
+/*===------------- amxfp16intrin.h - AMX_FP16 intrinsics -*- C++ -*---------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===------------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <amxfp16intrin.h> directly; use <immintrin.h> instead."
+#endif /* __IMMINTRIN_H */
+
+#ifndef __AMX_FP16INTRIN_H
+#define __AMX_FP16INTRIN_H
+#ifdef __x86_64__
+
+/// Compute dot-product of FP16 (16-bit) floating-point pairs in tiles \a a
+/// and \a b, accumulating the intermediate single-precision (32-bit)
+/// floating-point elements with elements in \a dst, and store the 32-bit
+/// result back to tile \a dst.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// void _tile_dpfp16ps (__tile dst, __tile a, __tile b)
+/// \endcode
+///
+/// \code{.operation}
+/// FOR m := 0 TO dst.rows - 1
+/// tmp := dst.row[m]
+/// FOR k := 0 TO (a.colsb / 4) - 1
+/// FOR n := 0 TO (dst.colsb / 4) - 1
+/// tmp.fp32[n] += FP32(a.row[m].fp16[2*k+0]) *
+/// FP32(b.row[k].fp16[2*n+0])
+/// tmp.fp32[n] += FP32(a.row[m].fp16[2*k+1]) *
+/// FP32(b.row[k].fp16[2*n+1])
+/// ENDFOR
+/// ENDFOR
+/// write_row_and_zero(dst, m, tmp, dst.colsb)
+/// ENDFOR
+/// zero_upper_rows(dst, dst.rows)
+/// zero_tileconfig_start()
+/// \endcode
+///
+/// This intrinsic corresponds to the \c TDPFP16PS instruction.
+///
+/// \param dst
+/// The destination tile. Max size is 1024 Bytes.
+/// \param a
+/// The 1st source tile. Max size is 1024 Bytes.
+/// \param b
+/// The 2nd source tile. Max size is 1024 Bytes.
+#define _tile_dpfp16ps(dst, a, b) \
+ __builtin_ia32_tdpfp16ps(dst, a, b)
+
+#endif /* __x86_64__ */
+#endif /* __AMX_FP16INTRIN_H */
diff --git a/contrib/llvm-project/clang/lib/Headers/amxintrin.h b/contrib/llvm-project/clang/lib/Headers/amxintrin.h
index ec601a58e7c3..baa56f5b28e8 100644
--- a/contrib/llvm-project/clang/lib/Headers/amxintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/amxintrin.h
@@ -22,6 +22,8 @@
__attribute__((__always_inline__, __nodebug__, __target__("amx-int8")))
#define __DEFAULT_FN_ATTRS_BF16 \
__attribute__((__always_inline__, __nodebug__, __target__("amx-bf16")))
+#define __DEFAULT_FN_ATTRS_FP16 \
+ __attribute__((__always_inline__, __nodebug__, __target__("amx-fp16")))
/// Load tile configuration from a 64-byte memory location specified by
/// "mem_addr". The tile configuration includes the tile type palette, the
@@ -290,6 +292,13 @@ _tile_dpbf16ps_internal(unsigned short m, unsigned short n, unsigned short k,
return __builtin_ia32_tdpbf16ps_internal(m, n, k, dst, src1, src2);
}
+/// This is internal intrinsic. C/C++ user should avoid calling it directly.
+static __inline__ _tile1024i __DEFAULT_FN_ATTRS_FP16
+_tile_dpfp16ps_internal(unsigned short m, unsigned short n, unsigned short k,
+ _tile1024i dst, _tile1024i src1, _tile1024i src2) {
+ return __builtin_ia32_tdpfp16ps_internal(m, n, k, dst, src1, src2);
+}
+
/// This struct pack the shape and tile data together for user. We suggest
/// initializing the struct as early as possible, because compiler depends
/// on the shape information to do configure. The constant value is preferred
@@ -314,8 +323,8 @@ typedef struct __tile1024i_str {
/// \param stride
/// The stride between the rows' data to be loaded in memory.
__DEFAULT_FN_ATTRS_TILE
-static void __tile_loadd(__tile1024i *dst, const void *base,
- __SIZE_TYPE__ stride) {
+static __inline__ void __tile_loadd(__tile1024i *dst, const void *base,
+ __SIZE_TYPE__ stride) {
dst->tile = _tile_loadd_internal(dst->row, dst->col, base, stride);
}
@@ -335,8 +344,8 @@ static void __tile_loadd(__tile1024i *dst, const void *base,
/// \param stride
/// The stride between the rows' data to be loaded in memory.
__DEFAULT_FN_ATTRS_TILE
-static void __tile_stream_loadd(__tile1024i *dst, const void *base,
- __SIZE_TYPE__ stride) {
+static __inline__ void __tile_stream_loadd(__tile1024i *dst, const void *base,
+ __SIZE_TYPE__ stride) {
dst->tile = _tile_loaddt1_internal(dst->row, dst->col, base, stride);
}
@@ -357,8 +366,8 @@ static void __tile_stream_loadd(__tile1024i *dst, const void *base,
/// \param src1
/// The 2nd source tile. Max size is 1024 Bytes.
__DEFAULT_FN_ATTRS_INT8
-static void __tile_dpbssd(__tile1024i *dst, __tile1024i src0,
- __tile1024i src1) {
+static __inline__ void __tile_dpbssd(__tile1024i *dst, __tile1024i src0,
+ __tile1024i src1) {
dst->tile = _tile_dpbssd_internal(src0.row, src1.col, src0.col, dst->tile,
src0.tile, src1.tile);
}
@@ -380,8 +389,8 @@ static void __tile_dpbssd(__tile1024i *dst, __tile1024i src0,
/// \param src1
/// The 2nd source tile. Max size is 1024 Bytes.
__DEFAULT_FN_ATTRS_INT8
-static void __tile_dpbsud(__tile1024i *dst, __tile1024i src0,
- __tile1024i src1) {
+static __inline__ void __tile_dpbsud(__tile1024i *dst, __tile1024i src0,
+ __tile1024i src1) {
dst->tile = _tile_dpbsud_internal(src0.row, src1.col, src0.col, dst->tile,
src0.tile, src1.tile);
}
@@ -403,8 +412,8 @@ static void __tile_dpbsud(__tile1024i *dst, __tile1024i src0,
/// \param src1
/// The 2nd source tile. Max size is 1024 Bytes.
__DEFAULT_FN_ATTRS_INT8
-static void __tile_dpbusd(__tile1024i *dst, __tile1024i src0,
- __tile1024i src1) {
+static __inline__ void __tile_dpbusd(__tile1024i *dst, __tile1024i src0,
+ __tile1024i src1) {
dst->tile = _tile_dpbusd_internal(src0.row, src1.col, src0.col, dst->tile,
src0.tile, src1.tile);
}
@@ -426,8 +435,8 @@ static void __tile_dpbusd(__tile1024i *dst, __tile1024i src0,
/// \param src1
/// The 2nd source tile. Max size is 1024 Bytes.
__DEFAULT_FN_ATTRS_INT8
-static void __tile_dpbuud(__tile1024i *dst, __tile1024i src0,
- __tile1024i src1) {
+static __inline__ void __tile_dpbuud(__tile1024i *dst, __tile1024i src0,
+ __tile1024i src1) {
dst->tile = _tile_dpbuud_internal(src0.row, src1.col, src0.col, dst->tile,
src0.tile, src1.tile);
}
@@ -439,14 +448,13 @@ static void __tile_dpbuud(__tile1024i *dst, __tile1024i src0,
///
/// This intrinsic corresponds to the <c> TILESTORED </c> instruction.
///
-/// \param dst
-/// A destination tile. Max size is 1024 Bytes.
/// \param base
/// A pointer to base address.
/// \param stride
/// The stride between the rows' data to be stored in memory.
__DEFAULT_FN_ATTRS_TILE
-static void __tile_stored(void *base, __SIZE_TYPE__ stride, __tile1024i src) {
+static __inline__ void __tile_stored(void *base, __SIZE_TYPE__ stride,
+ __tile1024i src) {
_tile_stored_internal(src.row, src.col, base, stride, src.tile);
}
@@ -459,7 +467,7 @@ static void __tile_stored(void *base, __SIZE_TYPE__ stride, __tile1024i src) {
/// \param dst
/// The destination tile to be zero. Max size is 1024 Bytes.
__DEFAULT_FN_ATTRS_TILE
-static void __tile_zero(__tile1024i *dst) {
+static __inline__ void __tile_zero(__tile1024i *dst) {
dst->tile = __builtin_ia32_tilezero_internal(dst->row, dst->col);
}
@@ -479,15 +487,38 @@ static void __tile_zero(__tile1024i *dst) {
/// \param src1
/// The 2nd source tile. Max size is 1024 Bytes.
__DEFAULT_FN_ATTRS_BF16
-static void __tile_dpbf16ps(__tile1024i *dst, __tile1024i src0,
- __tile1024i src1) {
+static __inline__ void __tile_dpbf16ps(__tile1024i *dst, __tile1024i src0,
+ __tile1024i src1) {
dst->tile = _tile_dpbf16ps_internal(src0.row, src1.col, src0.col, dst->tile,
src0.tile, src1.tile);
}
+/// Compute dot-product of FP16 (16-bit) floating-point pairs in tiles src0 and
+/// src1, accumulating the intermediate single-precision (32-bit) floating-point
+/// elements with elements in "dst", and store the 32-bit result back to tile
+/// "dst".
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the <c> TDPFP16PS </c> instruction.
+///
+/// \param dst
+/// The destination tile. Max size is 1024 Bytes.
+/// \param src0
+/// The 1st source tile. Max size is 1024 Bytes.
+/// \param src1
+/// The 2nd source tile. Max size is 1024 Bytes.
+__DEFAULT_FN_ATTRS_FP16
+static __inline__ void __tile_dpfp16ps(__tile1024i *dst, __tile1024i src0,
+ __tile1024i src1) {
+ dst->tile = _tile_dpfp16ps_internal(src0.row, src1.col, src0.col, dst->tile,
+ src0.tile, src1.tile);
+}
+
#undef __DEFAULT_FN_ATTRS_TILE
#undef __DEFAULT_FN_ATTRS_INT8
#undef __DEFAULT_FN_ATTRS_BF16
+#undef __DEFAULT_FN_ATTRS_FP16
#endif /* __x86_64__ */
#endif /* __AMXINTRIN_H */
diff --git a/contrib/llvm-project/clang/lib/Headers/arm_acle.h b/contrib/llvm-project/clang/lib/Headers/arm_acle.h
index 45fac248dadb..9cd34948e3c5 100644
--- a/contrib/llvm-project/clang/lib/Headers/arm_acle.h
+++ b/contrib/llvm-project/clang/lib/Headers/arm_acle.h
@@ -4,6 +4,13 @@
* See https://llvm.org/LICENSE.txt for license information.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
+ * The Arm C Language Extensions specifications can be found in the following
+ * link: https://github.com/ARM-software/acle/releases
+ *
+ * The ACLE section numbers are subject to change. When consulting the
+ * specifications, it is recommended to search using section titles if
+ * the section numbers look outdated.
+ *
*===-----------------------------------------------------------------------===
*/
@@ -20,8 +27,8 @@
extern "C" {
#endif
-/* 8 SYNCHRONIZATION, BARRIER AND HINT INTRINSICS */
-/* 8.3 Memory barriers */
+/* 7 SYNCHRONIZATION, BARRIER AND HINT INTRINSICS */
+/* 7.3 Memory barriers */
#if !__has_builtin(__dmb)
#define __dmb(i) __builtin_arm_dmb(i)
#endif
@@ -32,7 +39,7 @@ extern "C" {
#define __isb(i) __builtin_arm_isb(i)
#endif
-/* 8.4 Hints */
+/* 7.4 Hints */
#if !__has_builtin(__wfi)
static __inline__ void __attribute__((__always_inline__, __nodebug__)) __wfi(void) {
@@ -64,11 +71,11 @@ static __inline__ void __attribute__((__always_inline__, __nodebug__)) __yield(v
}
#endif
-#if __ARM_32BIT_STATE
+#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE
#define __dbg(t) __builtin_arm_dbg(t)
#endif
-/* 8.5 Swap */
+/* 7.5 Swap */
static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
__swp(uint32_t __x, volatile uint32_t *__p) {
uint32_t v;
@@ -78,11 +85,11 @@ __swp(uint32_t __x, volatile uint32_t *__p) {
return v;
}
-/* 8.6 Memory prefetch intrinsics */
-/* 8.6.1 Data prefetch */
+/* 7.6 Memory prefetch intrinsics */
+/* 7.6.1 Data prefetch */
#define __pld(addr) __pldx(0, 0, 0, addr)
-#if __ARM_32BIT_STATE
+#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE
#define __pldx(access_kind, cache_level, retention_policy, addr) \
__builtin_arm_prefetch(addr, access_kind, 1)
#else
@@ -90,10 +97,10 @@ __swp(uint32_t __x, volatile uint32_t *__p) {
__builtin_arm_prefetch(addr, access_kind, cache_level, retention_policy, 1)
#endif
-/* 8.6.2 Instruction prefetch */
+/* 7.6.2 Instruction prefetch */
#define __pli(addr) __plix(0, 0, addr)
-#if __ARM_32BIT_STATE
+#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE
#define __plix(cache_level, retention_policy, addr) \
__builtin_arm_prefetch(addr, 0, 0)
#else
@@ -101,15 +108,15 @@ __swp(uint32_t __x, volatile uint32_t *__p) {
__builtin_arm_prefetch(addr, 0, cache_level, retention_policy, 0)
#endif
-/* 8.7 NOP */
+/* 7.7 NOP */
#if !defined(_MSC_VER) || !defined(__aarch64__)
static __inline__ void __attribute__((__always_inline__, __nodebug__)) __nop(void) {
__builtin_arm_nop();
}
#endif
-/* 9 DATA-PROCESSING INTRINSICS */
-/* 9.2 Miscellaneous data-processing intrinsics */
+/* 8 DATA-PROCESSING INTRINSICS */
+/* 8.2 Miscellaneous data-processing intrinsics */
/* ROR */
static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
__ror(uint32_t __x, uint32_t __y) {
@@ -138,28 +145,32 @@ __rorl(unsigned long __x, uint32_t __y) {
/* CLZ */
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
__clz(uint32_t __t) {
- return __builtin_clz(__t);
+ return __builtin_arm_clz(__t);
}
-static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
__clzl(unsigned long __t) {
- return __builtin_clzl(__t);
+#if __SIZEOF_LONG__ == 4
+ return __builtin_arm_clz(__t);
+#else
+ return __builtin_arm_clz64(__t);
+#endif
}
-static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
__clzll(uint64_t __t) {
- return __builtin_clzll(__t);
+ return __builtin_arm_clz64(__t);
}
/* CLS */
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
__cls(uint32_t __t) {
return __builtin_arm_cls(__t);
}
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
__clsl(unsigned long __t) {
#if __SIZEOF_LONG__ == 4
return __builtin_arm_cls(__t);
@@ -168,7 +179,7 @@ __clsl(unsigned long __t) {
#endif
}
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
__clsll(uint64_t __t) {
return __builtin_arm_cls64(__t);
}
@@ -201,7 +212,7 @@ __rev16(uint32_t __t) {
static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
__rev16ll(uint64_t __t) {
- return (((uint64_t)__rev16(__t >> 32)) << 32) | __rev16(__t);
+ return (((uint64_t)__rev16(__t >> 32)) << 32) | (uint64_t)__rev16((uint32_t)__t);
}
static __inline__ unsigned long __attribute__((__always_inline__, __nodebug__))
@@ -216,7 +227,7 @@ __rev16l(unsigned long __t) {
/* REVSH */
static __inline__ int16_t __attribute__((__always_inline__, __nodebug__))
__revsh(int16_t __t) {
- return __builtin_bswap16(__t);
+ return (int16_t)__builtin_bswap16((uint16_t)__t);
}
/* RBIT */
@@ -227,7 +238,7 @@ __rbit(uint32_t __t) {
static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
__rbitll(uint64_t __t) {
-#if __ARM_32BIT_STATE
+#if defined(__ARM_32BIT_STATE) && __ARM_32BIT_STATE
return (((uint64_t)__builtin_arm_rbit(__t)) << 32) |
__builtin_arm_rbit(__t >> 32);
#else
@@ -244,10 +255,8 @@ __rbitl(unsigned long __t) {
#endif
}
-/*
- * 9.3 16-bit multiplications
- */
-#if __ARM_FEATURE_DSP
+/* 8.3 16-bit multiplications */
+#if defined(__ARM_FEATURE_DSP) && __ARM_FEATURE_DSP
static __inline__ int32_t __attribute__((__always_inline__,__nodebug__))
__smulbb(int32_t __a, int32_t __b) {
return __builtin_arm_smulbb(__a, __b);
@@ -275,19 +284,19 @@ __smulwt(int32_t __a, int32_t __b) {
#endif
/*
- * 9.4 Saturating intrinsics
+ * 8.4 Saturating intrinsics
*
- * FIXME: Change guard to their corrosponding __ARM_FEATURE flag when Q flag
+ * FIXME: Change guard to their corresponding __ARM_FEATURE flag when Q flag
* intrinsics are implemented and the flag is enabled.
*/
-/* 9.4.1 Width-specified saturation intrinsics */
-#if __ARM_FEATURE_SAT
+/* 8.4.1 Width-specified saturation intrinsics */
+#if defined(__ARM_FEATURE_SAT) && __ARM_FEATURE_SAT
#define __ssat(x, y) __builtin_arm_ssat(x, y)
#define __usat(x, y) __builtin_arm_usat(x, y)
#endif
-/* 9.4.2 Saturating addition and subtraction intrinsics */
-#if __ARM_FEATURE_DSP
+/* 8.4.2 Saturating addition and subtraction intrinsics */
+#if defined(__ARM_FEATURE_DSP) && __ARM_FEATURE_DSP
static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
__qadd(int32_t __t, int32_t __v) {
return __builtin_arm_qadd(__t, __v);
@@ -304,8 +313,8 @@ __qdbl(int32_t __t) {
}
#endif
-/* 9.4.3 Accumultating multiplications */
-#if __ARM_FEATURE_DSP
+/* 8.4.3 Accumultating multiplications */
+#if defined(__ARM_FEATURE_DSP) && __ARM_FEATURE_DSP
static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
__smlabb(int32_t __a, int32_t __b, int32_t __c) {
return __builtin_arm_smlabb(__a, __b, __c);
@@ -333,14 +342,14 @@ __smlawt(int32_t __a, int32_t __b, int32_t __c) {
#endif
-/* 9.5.4 Parallel 16-bit saturation */
-#if __ARM_FEATURE_SIMD32
+/* 8.5.4 Parallel 16-bit saturation */
+#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
#define __ssat16(x, y) __builtin_arm_ssat16(x, y)
#define __usat16(x, y) __builtin_arm_usat16(x, y)
#endif
-/* 9.5.5 Packing and unpacking */
-#if __ARM_FEATURE_SIMD32
+/* 8.5.5 Packing and unpacking */
+#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
typedef int32_t int8x4_t;
typedef int32_t int16x2_t;
typedef uint32_t uint8x4_t;
@@ -364,16 +373,16 @@ __uxtb16(int8x4_t __a) {
}
#endif
-/* 9.5.6 Parallel selection */
-#if __ARM_FEATURE_SIMD32
+/* 8.5.6 Parallel selection */
+#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
static __inline__ uint8x4_t __attribute__((__always_inline__, __nodebug__))
__sel(uint8x4_t __a, uint8x4_t __b) {
return __builtin_arm_sel(__a, __b);
}
#endif
-/* 9.5.7 Parallel 8-bit addition and subtraction */
-#if __ARM_FEATURE_SIMD32
+/* 8.5.7 Parallel 8-bit addition and subtraction */
+#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
static __inline__ int8x4_t __attribute__((__always_inline__, __nodebug__))
__qadd8(int8x4_t __a, int8x4_t __b) {
return __builtin_arm_qadd8(__a, __b);
@@ -424,8 +433,8 @@ __usub8(uint8x4_t __a, uint8x4_t __b) {
}
#endif
-/* 9.5.8 Sum of 8-bit absolute differences */
-#if __ARM_FEATURE_SIMD32
+/* 8.5.8 Sum of 8-bit absolute differences */
+#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
__usad8(uint8x4_t __a, uint8x4_t __b) {
return __builtin_arm_usad8(__a, __b);
@@ -436,8 +445,8 @@ __usada8(uint8x4_t __a, uint8x4_t __b, uint32_t __c) {
}
#endif
-/* 9.5.9 Parallel 16-bit addition and subtraction */
-#if __ARM_FEATURE_SIMD32
+/* 8.5.9 Parallel 16-bit addition and subtraction */
+#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
static __inline__ int16x2_t __attribute__((__always_inline__, __nodebug__))
__qadd16(int16x2_t __a, int16x2_t __b) {
return __builtin_arm_qadd16(__a, __b);
@@ -536,8 +545,8 @@ __usub16(uint16x2_t __a, uint16x2_t __b) {
}
#endif
-/* 9.5.10 Parallel 16-bit multiplications */
-#if __ARM_FEATURE_SIMD32
+/* 8.5.10 Parallel 16-bit multiplications */
+#if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32
static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
__smlad(int16x2_t __a, int16x2_t __b, int32_t __c) {
return __builtin_arm_smlad(__a, __b, __c);
@@ -588,150 +597,232 @@ __smusdx(int16x2_t __a, int16x2_t __b) {
}
#endif
-/* 9.7 CRC32 intrinsics */
-#if __ARM_FEATURE_CRC32
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+/* 8.6 Floating-point data-processing intrinsics */
+#if (defined(__ARM_FEATURE_DIRECTED_ROUNDING) && \
+ (__ARM_FEATURE_DIRECTED_ROUNDING)) && \
+ (defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE)
+static __inline__ double __attribute__((__always_inline__, __nodebug__))
+__rintn(double __a) {
+ return __builtin_roundeven(__a);
+}
+
+static __inline__ float __attribute__((__always_inline__, __nodebug__))
+__rintnf(float __a) {
+ return __builtin_roundevenf(__a);
+}
+#endif
+
+/* 8.8 CRC32 intrinsics */
+#if (defined(__ARM_FEATURE_CRC32) && __ARM_FEATURE_CRC32) || \
+ (defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE)
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc")))
__crc32b(uint32_t __a, uint8_t __b) {
return __builtin_arm_crc32b(__a, __b);
}
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc")))
__crc32h(uint32_t __a, uint16_t __b) {
return __builtin_arm_crc32h(__a, __b);
}
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc")))
__crc32w(uint32_t __a, uint32_t __b) {
return __builtin_arm_crc32w(__a, __b);
}
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc")))
__crc32d(uint32_t __a, uint64_t __b) {
return __builtin_arm_crc32d(__a, __b);
}
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc")))
__crc32cb(uint32_t __a, uint8_t __b) {
return __builtin_arm_crc32cb(__a, __b);
}
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc")))
__crc32ch(uint32_t __a, uint16_t __b) {
return __builtin_arm_crc32ch(__a, __b);
}
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc")))
__crc32cw(uint32_t __a, uint32_t __b) {
return __builtin_arm_crc32cw(__a, __b);
}
-static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__, target("crc")))
__crc32cd(uint32_t __a, uint64_t __b) {
return __builtin_arm_crc32cd(__a, __b);
}
#endif
+/* 8.6 Floating-point data-processing intrinsics */
/* Armv8.3-A Javascript conversion intrinsic */
-#if __ARM_64BIT_STATE && defined(__ARM_FEATURE_JCVT)
-static __inline__ int32_t __attribute__((__always_inline__, __nodebug__))
+#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE
+static __inline__ int32_t __attribute__((__always_inline__, __nodebug__, target("v8.3a")))
__jcvt(double __a) {
return __builtin_arm_jcvt(__a);
}
#endif
/* Armv8.5-A FP rounding intrinsics */
-#if __ARM_64BIT_STATE && defined(__ARM_FEATURE_FRINT)
-static __inline__ float __attribute__((__always_inline__, __nodebug__))
-__frint32zf(float __a) {
- return __builtin_arm_frint32zf(__a);
+#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE
+static __inline__ float __attribute__((__always_inline__, __nodebug__, target("v8.5a")))
+__rint32zf(float __a) {
+ return __builtin_arm_rint32zf(__a);
}
-static __inline__ double __attribute__((__always_inline__, __nodebug__))
-__frint32z(double __a) {
- return __builtin_arm_frint32z(__a);
+static __inline__ double __attribute__((__always_inline__, __nodebug__, target("v8.5a")))
+__rint32z(double __a) {
+ return __builtin_arm_rint32z(__a);
}
-static __inline__ float __attribute__((__always_inline__, __nodebug__))
-__frint64zf(float __a) {
- return __builtin_arm_frint64zf(__a);
+static __inline__ float __attribute__((__always_inline__, __nodebug__, target("v8.5a")))
+__rint64zf(float __a) {
+ return __builtin_arm_rint64zf(__a);
}
-static __inline__ double __attribute__((__always_inline__, __nodebug__))
-__frint64z(double __a) {
- return __builtin_arm_frint64z(__a);
+static __inline__ double __attribute__((__always_inline__, __nodebug__, target("v8.5a")))
+__rint64z(double __a) {
+ return __builtin_arm_rint64z(__a);
}
-static __inline__ float __attribute__((__always_inline__, __nodebug__))
-__frint32xf(float __a) {
- return __builtin_arm_frint32xf(__a);
+static __inline__ float __attribute__((__always_inline__, __nodebug__, target("v8.5a")))
+__rint32xf(float __a) {
+ return __builtin_arm_rint32xf(__a);
}
-static __inline__ double __attribute__((__always_inline__, __nodebug__))
-__frint32x(double __a) {
- return __builtin_arm_frint32x(__a);
+static __inline__ double __attribute__((__always_inline__, __nodebug__, target("v8.5a")))
+__rint32x(double __a) {
+ return __builtin_arm_rint32x(__a);
}
-static __inline__ float __attribute__((__always_inline__, __nodebug__))
-__frint64xf(float __a) {
- return __builtin_arm_frint64xf(__a);
+static __inline__ float __attribute__((__always_inline__, __nodebug__, target("v8.5a")))
+__rint64xf(float __a) {
+ return __builtin_arm_rint64xf(__a);
}
-static __inline__ double __attribute__((__always_inline__, __nodebug__))
-__frint64x(double __a) {
- return __builtin_arm_frint64x(__a);
+static __inline__ double __attribute__((__always_inline__, __nodebug__, target("v8.5a")))
+__rint64x(double __a) {
+ return __builtin_arm_rint64x(__a);
}
#endif
-/* Armv8.7-A load/store 64-byte intrinsics */
-#if __ARM_64BIT_STATE && defined(__ARM_FEATURE_LS64)
+/* 8.9 Armv8.7-A load/store 64-byte intrinsics */
+#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE
typedef struct {
uint64_t val[8];
} data512_t;
-static __inline__ data512_t __attribute__((__always_inline__, __nodebug__))
+static __inline__ data512_t __attribute__((__always_inline__, __nodebug__, target("ls64")))
__arm_ld64b(const void *__addr) {
- data512_t __value;
- __builtin_arm_ld64b(__addr, __value.val);
- return __value;
+ data512_t __value;
+ __builtin_arm_ld64b(__addr, __value.val);
+ return __value;
}
-static __inline__ void __attribute__((__always_inline__, __nodebug__))
+static __inline__ void __attribute__((__always_inline__, __nodebug__, target("ls64")))
__arm_st64b(void *__addr, data512_t __value) {
- __builtin_arm_st64b(__addr, __value.val);
+ __builtin_arm_st64b(__addr, __value.val);
}
-static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__, target("ls64")))
__arm_st64bv(void *__addr, data512_t __value) {
- return __builtin_arm_st64bv(__addr, __value.val);
+ return __builtin_arm_st64bv(__addr, __value.val);
}
-static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__, target("ls64")))
__arm_st64bv0(void *__addr, data512_t __value) {
- return __builtin_arm_st64bv0(__addr, __value.val);
+ return __builtin_arm_st64bv0(__addr, __value.val);
}
#endif
-/* 10.1 Special register intrinsics */
+/* 11.1 Special register intrinsics */
#define __arm_rsr(sysreg) __builtin_arm_rsr(sysreg)
#define __arm_rsr64(sysreg) __builtin_arm_rsr64(sysreg)
+#define __arm_rsr128(sysreg) __builtin_arm_rsr128(sysreg)
#define __arm_rsrp(sysreg) __builtin_arm_rsrp(sysreg)
#define __arm_rsrf(sysreg) __builtin_bit_cast(float, __arm_rsr(sysreg))
#define __arm_rsrf64(sysreg) __builtin_bit_cast(double, __arm_rsr64(sysreg))
#define __arm_wsr(sysreg, v) __builtin_arm_wsr(sysreg, v)
#define __arm_wsr64(sysreg, v) __builtin_arm_wsr64(sysreg, v)
+#define __arm_wsr128(sysreg, v) __builtin_arm_wsr128(sysreg, v)
#define __arm_wsrp(sysreg, v) __builtin_arm_wsrp(sysreg, v)
#define __arm_wsrf(sysreg, v) __arm_wsr(sysreg, __builtin_bit_cast(uint32_t, v))
#define __arm_wsrf64(sysreg, v) __arm_wsr64(sysreg, __builtin_bit_cast(uint64_t, v))
-/* Memory Tagging Extensions (MTE) Intrinsics */
-#if __ARM_FEATURE_MEMORY_TAGGING
+/* 10.3 Memory Tagging Extensions (MTE) Intrinsics */
+#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE
#define __arm_mte_create_random_tag(__ptr, __mask) __builtin_arm_irg(__ptr, __mask)
#define __arm_mte_increment_tag(__ptr, __tag_offset) __builtin_arm_addg(__ptr, __tag_offset)
#define __arm_mte_exclude_tag(__ptr, __excluded) __builtin_arm_gmi(__ptr, __excluded)
#define __arm_mte_get_tag(__ptr) __builtin_arm_ldg(__ptr)
#define __arm_mte_set_tag(__ptr) __builtin_arm_stg(__ptr)
#define __arm_mte_ptrdiff(__ptra, __ptrb) __builtin_arm_subp(__ptra, __ptrb)
+
+/* 18 Memory Operations Intrinsics */
+#define __arm_mops_memset_tag(__tagged_address, __value, __size) \
+ __builtin_arm_mops_memset_tag(__tagged_address, __value, __size)
#endif
-/* Transactional Memory Extension (TME) Intrinsics */
-#if __ARM_FEATURE_TME
+/* 11.3 Coprocessor Intrinsics */
+#if defined(__ARM_FEATURE_COPROC)
+
+#if (__ARM_FEATURE_COPROC & 0x1)
+
+#if (__ARM_ARCH < 8)
+#define __arm_cdp(coproc, opc1, CRd, CRn, CRm, opc2) \
+ __builtin_arm_cdp(coproc, opc1, CRd, CRn, CRm, opc2)
+#endif /* __ARM_ARCH < 8 */
+
+#define __arm_ldc(coproc, CRd, p) __builtin_arm_ldc(coproc, CRd, p)
+#define __arm_stc(coproc, CRd, p) __builtin_arm_stc(coproc, CRd, p)
+
+#define __arm_mcr(coproc, opc1, value, CRn, CRm, opc2) \
+ __builtin_arm_mcr(coproc, opc1, value, CRn, CRm, opc2)
+#define __arm_mrc(coproc, opc1, CRn, CRm, opc2) \
+ __builtin_arm_mrc(coproc, opc1, CRn, CRm, opc2)
+
+#if (__ARM_ARCH != 4) && (__ARM_ARCH < 8)
+#define __arm_ldcl(coproc, CRd, p) __builtin_arm_ldcl(coproc, CRd, p)
+#define __arm_stcl(coproc, CRd, p) __builtin_arm_stcl(coproc, CRd, p)
+#endif /* (__ARM_ARCH != 4) && (__ARM_ARCH != 8) */
+
+#if (__ARM_ARCH_8M_MAIN__) || (__ARM_ARCH_8_1M_MAIN__)
+#define __arm_cdp(coproc, opc1, CRd, CRn, CRm, opc2) \
+ __builtin_arm_cdp(coproc, opc1, CRd, CRn, CRm, opc2)
+#define __arm_ldcl(coproc, CRd, p) __builtin_arm_ldcl(coproc, CRd, p)
+#define __arm_stcl(coproc, CRd, p) __builtin_arm_stcl(coproc, CRd, p)
+#endif /* ___ARM_ARCH_8M_MAIN__ */
+
+#endif /* __ARM_FEATURE_COPROC & 0x1 */
+
+#if (__ARM_FEATURE_COPROC & 0x2)
+#define __arm_cdp2(coproc, opc1, CRd, CRn, CRm, opc2) \
+ __builtin_arm_cdp2(coproc, opc1, CRd, CRn, CRm, opc2)
+#define __arm_ldc2(coproc, CRd, p) __builtin_arm_ldc2(coproc, CRd, p)
+#define __arm_stc2(coproc, CRd, p) __builtin_arm_stc2(coproc, CRd, p)
+#define __arm_ldc2l(coproc, CRd, p) __builtin_arm_ldc2l(coproc, CRd, p)
+#define __arm_stc2l(coproc, CRd, p) __builtin_arm_stc2l(coproc, CRd, p)
+#define __arm_mcr2(coproc, opc1, value, CRn, CRm, opc2) \
+ __builtin_arm_mcr2(coproc, opc1, value, CRn, CRm, opc2)
+#define __arm_mrc2(coproc, opc1, CRn, CRm, opc2) \
+ __builtin_arm_mrc2(coproc, opc1, CRn, CRm, opc2)
+#endif
+
+#if (__ARM_FEATURE_COPROC & 0x4)
+#define __arm_mcrr(coproc, opc1, value, CRm) \
+ __builtin_arm_mcrr(coproc, opc1, value, CRm)
+#define __arm_mrrc(coproc, opc1, CRm) __builtin_arm_mrrc(coproc, opc1, CRm)
+#endif
+
+#if (__ARM_FEATURE_COPROC & 0x8)
+#define __arm_mcrr2(coproc, opc1, value, CRm) \
+ __builtin_arm_mcrr2(coproc, opc1, value, CRm)
+#define __arm_mrrc2(coproc, opc1, CRm) __builtin_arm_mrrc2(coproc, opc1, CRm)
+#endif
+
+#endif // __ARM_FEATURE_COPROC
+
+/* 17 Transactional Memory Extension (TME) Intrinsics */
+#if defined(__ARM_FEATURE_TME) && __ARM_FEATURE_TME
#define _TMFAILURE_REASON 0x00007fffu
#define _TMFAILURE_RTRY 0x00008000u
@@ -752,13 +843,13 @@ __arm_st64bv0(void *__addr, data512_t __value) {
#endif /* __ARM_FEATURE_TME */
-/* Armv8.5-A Random number generation intrinsics */
-#if __ARM_64BIT_STATE && defined(__ARM_FEATURE_RNG)
-static __inline__ int __attribute__((__always_inline__, __nodebug__))
+/* 8.7 Armv8.5-A Random number generation intrinsics */
+#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE
+static __inline__ int __attribute__((__always_inline__, __nodebug__, target("rand")))
__rndr(uint64_t *__p) {
return __builtin_arm_rndr(__p);
}
-static __inline__ int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __attribute__((__always_inline__, __nodebug__, target("rand")))
__rndrrs(uint64_t *__p) {
return __builtin_arm_rndrrs(__p);
}
diff --git a/contrib/llvm-project/clang/lib/Headers/arm_neon_sve_bridge.h b/contrib/llvm-project/clang/lib/Headers/arm_neon_sve_bridge.h
new file mode 100644
index 000000000000..a9fbdbaf4bb9
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/arm_neon_sve_bridge.h
@@ -0,0 +1,182 @@
+/*===---- arm_neon_sve_bridge.h - ARM NEON SVE Bridge intrinsics -----------===
+ *
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __ARM_NEON_SVE_BRIDGE_H
+#define __ARM_NEON_SVE_BRIDGE_H
+
+#include <arm_neon.h>
+#include <arm_sve.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Function attributes */
+#define __ai static __inline__ __attribute__((__always_inline__, __nodebug__))
+#define __aio \
+ static __inline__ \
+ __attribute__((__always_inline__, __nodebug__, __overloadable__))
+
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_s8)))
+svint8_t svset_neonq(svint8_t, int8x16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_s16)))
+svint16_t svset_neonq(svint16_t, int16x8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_s32)))
+svint32_t svset_neonq(svint32_t, int32x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_s64)))
+svint64_t svset_neonq(svint64_t, int64x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_u8)))
+svuint8_t svset_neonq(svuint8_t, uint8x16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_u16)))
+svuint16_t svset_neonq(svuint16_t, uint16x8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_u32)))
+svuint32_t svset_neonq(svuint32_t, uint32x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_u64)))
+svuint64_t svset_neonq(svuint64_t, uint64x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_f16)))
+svfloat16_t svset_neonq(svfloat16_t, float16x8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_f32)))
+svfloat32_t svset_neonq(svfloat32_t, float32x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_f64)))
+svfloat64_t svset_neonq(svfloat64_t, float64x2_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_s8)))
+svint8_t svset_neonq_s8(svint8_t, int8x16_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_s16)))
+svint16_t svset_neonq_s16(svint16_t, int16x8_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_s32)))
+svint32_t svset_neonq_s32(svint32_t, int32x4_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_s64)))
+svint64_t svset_neonq_s64(svint64_t, int64x2_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_u8)))
+svuint8_t svset_neonq_u8(svuint8_t, uint8x16_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_u16)))
+svuint16_t svset_neonq_u16(svuint16_t, uint16x8_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_u32)))
+svuint32_t svset_neonq_u32(svuint32_t, uint32x4_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_u64)))
+svuint64_t svset_neonq_u64(svuint64_t, uint64x2_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_f16)))
+svfloat16_t svset_neonq_f16(svfloat16_t, float16x8_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_f32)))
+svfloat32_t svset_neonq_f32(svfloat32_t, float32x4_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_f64)))
+svfloat64_t svset_neonq_f64(svfloat64_t, float64x2_t);
+
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_s8)))
+int8x16_t svget_neonq(svint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_s16)))
+int16x8_t svget_neonq(svint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_s32)))
+int32x4_t svget_neonq(svint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_s64)))
+int64x2_t svget_neonq(svint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_u8)))
+uint8x16_t svget_neonq(svuint8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_u16)))
+uint16x8_t svget_neonq(svuint16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_u32)))
+uint32x4_t svget_neonq(svuint32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_u64)))
+uint64x2_t svget_neonq(svuint64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_f16)))
+float16x8_t svget_neonq(svfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_f32)))
+float32x4_t svget_neonq(svfloat32_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_f64)))
+float64x2_t svget_neonq(svfloat64_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_s8)))
+int8x16_t svget_neonq_s8(svint8_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_s16)))
+int16x8_t svget_neonq_s16(svint16_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_s32)))
+int32x4_t svget_neonq_s32(svint32_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_s64)))
+int64x2_t svget_neonq_s64(svint64_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_u8)))
+uint8x16_t svget_neonq_u8(svuint8_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_u16)))
+uint16x8_t svget_neonq_u16(svuint16_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_u32)))
+uint32x4_t svget_neonq_u32(svuint32_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_u64)))
+uint64x2_t svget_neonq_u64(svuint64_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_f16)))
+float16x8_t svget_neonq_f16(svfloat16_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_f32)))
+float32x4_t svget_neonq_f32(svfloat32_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_f64)))
+float64x2_t svget_neonq_f64(svfloat64_t);
+
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_s8)))
+svint8_t svdup_neonq(int8x16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_s16)))
+svint16_t svdup_neonq(int16x8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_s32)))
+svint32_t svdup_neonq(int32x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_s64)))
+svint64_t svdup_neonq(int64x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_u8)))
+svuint8_t svdup_neonq(uint8x16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_u16)))
+svuint16_t svdup_neonq(uint16x8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_u32)))
+svuint32_t svdup_neonq(uint32x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_u64)))
+svuint64_t svdup_neonq(uint64x2_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_f16)))
+svfloat16_t svdup_neonq(float16x8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_f32)))
+svfloat32_t svdup_neonq(float32x4_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_f64)))
+svfloat64_t svdup_neonq(float64x2_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_s8)))
+svint8_t svdup_neonq_s8(int8x16_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_s16)))
+svint16_t svdup_neonq_s16(int16x8_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_s32)))
+svint32_t svdup_neonq_s32(int32x4_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_s64)))
+svint64_t svdup_neonq_s64(int64x2_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_u8)))
+svuint8_t svdup_neonq_u8(uint8x16_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_u16)))
+svuint16_t svdup_neonq_u16(uint16x8_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_u32)))
+svuint32_t svdup_neonq_u32(uint32x4_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_u64)))
+svuint64_t svdup_neonq_u64(uint64x2_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_f16)))
+svfloat16_t svdup_neonq_f16(float16x8_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_f32)))
+svfloat32_t svdup_neonq_f32(float32x4_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_f64)))
+svfloat64_t svdup_neonq_f64(float64x2_t);
+
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_bf16)))
+svbfloat16_t svset_neonq(svbfloat16_t, bfloat16x8_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset_neonq_bf16)))
+svbfloat16_t svset_neonq_bf16(svbfloat16_t, bfloat16x8_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_bf16)))
+bfloat16x8_t svget_neonq(svbfloat16_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget_neonq_bf16)))
+bfloat16x8_t svget_neonq_bf16(svbfloat16_t);
+__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_bf16)))
+svbfloat16_t svdup_neonq(bfloat16x8_t);
+__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_neonq_bf16)))
+svbfloat16_t svdup_neonq_bf16(bfloat16x8_t);
+
+#undef __ai
+#undef __aio
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif //__ARM_NEON_SVE_BRIDGE_H
diff --git a/contrib/llvm-project/clang/lib/Headers/avx2intrin.h b/contrib/llvm-project/clang/lib/Headers/avx2intrin.h
index cc16720949ea..096cae01b57d 100644
--- a/contrib/llvm-project/clang/lib/Headers/avx2intrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avx2intrin.h
@@ -15,132 +15,547 @@
#define __AVX2INTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("avx2"), __min_vector_width__(256)))
-#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx2"), __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS256 \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx2,no-evex512"), __min_vector_width__(256)))
+#define __DEFAULT_FN_ATTRS128 \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx2,no-evex512"), __min_vector_width__(128)))
/* SSE4 Multiple Packed Sums of Absolute Difference. */
+/// Computes sixteen sum of absolute difference (SAD) operations on sets of
+/// four unsigned 8-bit integers from the 256-bit integer vectors \a X and
+/// \a Y.
+///
+/// Eight SAD results are computed using the lower half of the input
+/// vectors, and another eight using the upper half. These 16-bit values
+/// are returned in the lower and upper halves of the 256-bit result,
+/// respectively.
+///
+/// A single SAD operation selects four bytes from \a X and four bytes from
+/// \a Y as input. It computes the differences between each \a X byte and
+/// the corresponding \a Y byte, takes the absolute value of each
+/// difference, and sums these four values to form one 16-bit result. The
+/// intrinsic computes 16 of these results with different sets of input
+/// bytes.
+///
+/// For each set of eight results, the SAD operations use the same four
+/// bytes from \a Y; the starting bit position for these four bytes is
+/// specified by \a M[1:0] times 32. The eight operations use successive
+/// sets of four bytes from \a X; the starting bit position for the first
+/// set of four bytes is specified by \a M[2] times 32. These bit positions
+/// are all relative to the 128-bit lane for each set of eight operations.
+///
+/// \code{.operation}
+/// r := 0
+/// FOR i := 0 TO 1
+/// j := i*3
+/// Ybase := M[j+1:j]*32 + i*128
+/// Xbase := M[j+2]*32 + i*128
+/// FOR k := 0 TO 3
+/// temp0 := ABS(X[Xbase+7:Xbase] - Y[Ybase+7:Ybase])
+/// temp1 := ABS(X[Xbase+15:Xbase+8] - Y[Ybase+15:Ybase+8])
+/// temp2 := ABS(X[Xbase+23:Xbase+16] - Y[Ybase+23:Ybase+16])
+/// temp3 := ABS(X[Xbase+31:Xbase+24] - Y[Ybase+31:Ybase+24])
+/// result[r+15:r] := temp0 + temp1 + temp2 + temp3
+/// Xbase := Xbase + 8
+/// r := r + 16
+/// ENDFOR
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_mpsadbw_epu8(__m256i X, __m256i Y, const int M);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VMPSADBW instruction.
+///
+/// \param X
+/// A 256-bit integer vector containing one of the inputs.
+/// \param Y
+/// A 256-bit integer vector containing one of the inputs.
+/// \param M
+/// An unsigned immediate value specifying the starting positions of the
+/// bytes to operate on.
+/// \returns A 256-bit vector of [16 x i16] containing the result.
#define _mm256_mpsadbw_epu8(X, Y, M) \
- (__m256i)__builtin_ia32_mpsadbw256((__v32qi)(__m256i)(X), \
- (__v32qi)(__m256i)(Y), (int)(M))
-
+ ((__m256i)__builtin_ia32_mpsadbw256((__v32qi)(__m256i)(X), \
+ (__v32qi)(__m256i)(Y), (int)(M)))
+
+/// Computes the absolute value of each signed byte in the 256-bit integer
+/// vector \a __a and returns each value in the corresponding byte of
+/// the result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPABSB instruction.
+///
+/// \param __a
+/// A 256-bit integer vector.
+/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_abs_epi8(__m256i __a)
{
- return (__m256i)__builtin_ia32_pabsb256((__v32qi)__a);
+ return (__m256i)__builtin_elementwise_abs((__v32qs)__a);
}
+/// Computes the absolute value of each signed 16-bit element in the 256-bit
+/// vector of [16 x i16] in \a __a and returns each value in the
+/// corresponding element of the result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPABSW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16].
+/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_abs_epi16(__m256i __a)
{
- return (__m256i)__builtin_ia32_pabsw256((__v16hi)__a);
+ return (__m256i)__builtin_elementwise_abs((__v16hi)__a);
}
+/// Computes the absolute value of each signed 32-bit element in the 256-bit
+/// vector of [8 x i32] in \a __a and returns each value in the
+/// corresponding element of the result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPABSD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32].
+/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_abs_epi32(__m256i __a)
{
- return (__m256i)__builtin_ia32_pabsd256((__v8si)__a);
-}
-
+ return (__m256i)__builtin_elementwise_abs((__v8si)__a);
+}
+
+/// Converts the elements of two 256-bit vectors of [16 x i16] to 8-bit
+/// integers using signed saturation, and returns the 256-bit result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 7
+/// j := i*16
+/// k := i*8
+/// result[7+k:k] := SATURATE8(__a[15+j:j])
+/// result[71+k:64+k] := SATURATE8(__b[15+j:j])
+/// result[135+k:128+k] := SATURATE8(__a[143+j:128+j])
+/// result[199+k:192+k] := SATURATE8(__b[143+j:128+j])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPACKSSWB instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] used to generate result[63:0] and
+/// result[191:128].
+/// \param __b
+/// A 256-bit vector of [16 x i16] used to generate result[127:64] and
+/// result[255:192].
+/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_packs_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_packsswb256((__v16hi)__a, (__v16hi)__b);
}
+/// Converts the elements of two 256-bit vectors of [8 x i32] to 16-bit
+/// integers using signed saturation, and returns the resulting 256-bit
+/// vector of [16 x i16].
+///
+/// \code{.operation}
+/// FOR i := 0 TO 3
+/// j := i*32
+/// k := i*16
+/// result[15+k:k] := SATURATE16(__a[31+j:j])
+/// result[79+k:64+k] := SATURATE16(__b[31+j:j])
+/// result[143+k:128+k] := SATURATE16(__a[159+j:128+j])
+/// result[207+k:192+k] := SATURATE16(__b[159+j:128+j])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPACKSSDW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] used to generate result[63:0] and
+/// result[191:128].
+/// \param __b
+/// A 256-bit vector of [8 x i32] used to generate result[127:64] and
+/// result[255:192].
+/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_packs_epi32(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_packssdw256((__v8si)__a, (__v8si)__b);
}
+/// Converts elements from two 256-bit vectors of [16 x i16] to 8-bit integers
+/// using unsigned saturation, and returns the 256-bit result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 7
+/// j := i*16
+/// k := i*8
+/// result[7+k:k] := SATURATE8U(__a[15+j:j])
+/// result[71+k:64+k] := SATURATE8U(__b[15+j:j])
+/// result[135+k:128+k] := SATURATE8U(__a[143+j:128+j])
+/// result[199+k:192+k] := SATURATE8U(__b[143+j:128+j])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPACKUSWB instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] used to generate result[63:0] and
+/// result[191:128].
+/// \param __b
+/// A 256-bit vector of [16 x i16] used to generate result[127:64] and
+/// result[255:192].
+/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_packus_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_packuswb256((__v16hi)__a, (__v16hi)__b);
}
+/// Converts elements from two 256-bit vectors of [8 x i32] to 16-bit integers
+/// using unsigned saturation, and returns the resulting 256-bit vector of
+/// [16 x i16].
+///
+/// \code{.operation}
+/// FOR i := 0 TO 3
+/// j := i*32
+/// k := i*16
+/// result[15+k:k] := SATURATE16U(__V1[31+j:j])
+/// result[79+k:64+k] := SATURATE16U(__V2[31+j:j])
+/// result[143+k:128+k] := SATURATE16U(__V1[159+j:128+j])
+/// result[207+k:192+k] := SATURATE16U(__V2[159+j:128+j])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPACKUSDW instruction.
+///
+/// \param __V1
+/// A 256-bit vector of [8 x i32] used to generate result[63:0] and
+/// result[191:128].
+/// \param __V2
+/// A 256-bit vector of [8 x i32] used to generate result[127:64] and
+/// result[255:192].
+/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_packus_epi32(__m256i __V1, __m256i __V2)
{
return (__m256i) __builtin_ia32_packusdw256((__v8si)__V1, (__v8si)__V2);
}
+/// Adds 8-bit integers from corresponding bytes of two 256-bit integer
+/// vectors and returns the lower 8 bits of each sum in the corresponding
+/// byte of the 256-bit integer vector result (overflow is ignored).
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPADDB instruction.
+///
+/// \param __a
+/// A 256-bit integer vector containing one of the source operands.
+/// \param __b
+/// A 256-bit integer vector containing one of the source operands.
+/// \returns A 256-bit integer vector containing the sums.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_add_epi8(__m256i __a, __m256i __b)
{
return (__m256i)((__v32qu)__a + (__v32qu)__b);
}
+/// Adds 16-bit integers from corresponding elements of two 256-bit vectors of
+/// [16 x i16] and returns the lower 16 bits of each sum in the
+/// corresponding element of the [16 x i16] result (overflow is ignored).
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPADDW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \returns A 256-bit vector of [16 x i16] containing the sums.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_add_epi16(__m256i __a, __m256i __b)
{
return (__m256i)((__v16hu)__a + (__v16hu)__b);
}
+/// Adds 32-bit integers from corresponding elements of two 256-bit vectors of
+/// [8 x i32] and returns the lower 32 bits of each sum in the corresponding
+/// element of the [8 x i32] result (overflow is ignored).
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPADDD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [8 x i32] containing one of the source operands.
+/// \returns A 256-bit vector of [8 x i32] containing the sums.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_add_epi32(__m256i __a, __m256i __b)
{
return (__m256i)((__v8su)__a + (__v8su)__b);
}
+/// Adds 64-bit integers from corresponding elements of two 256-bit vectors of
+/// [4 x i64] and returns the lower 64 bits of each sum in the corresponding
+/// element of the [4 x i64] result (overflow is ignored).
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPADDQ instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x i64] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [4 x i64] containing one of the source operands.
+/// \returns A 256-bit vector of [4 x i64] containing the sums.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_add_epi64(__m256i __a, __m256i __b)
{
return (__m256i)((__v4du)__a + (__v4du)__b);
}
+/// Adds 8-bit integers from corresponding bytes of two 256-bit integer
+/// vectors using signed saturation, and returns each sum in the
+/// corresponding byte of the 256-bit integer vector result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPADDSB instruction.
+///
+/// \param __a
+/// A 256-bit integer vector containing one of the source operands.
+/// \param __b
+/// A 256-bit integer vector containing one of the source operands.
+/// \returns A 256-bit integer vector containing the sums.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_adds_epi8(__m256i __a, __m256i __b)
{
- return (__m256i)__builtin_ia32_paddsb256((__v32qi)__a, (__v32qi)__b);
-}
-
+ return (__m256i)__builtin_elementwise_add_sat((__v32qs)__a, (__v32qs)__b);
+}
+
+/// Adds 16-bit integers from corresponding elements of two 256-bit vectors of
+/// [16 x i16] using signed saturation, and returns the [16 x i16] result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPADDSW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \returns A 256-bit vector of [16 x i16] containing the sums.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_adds_epi16(__m256i __a, __m256i __b)
{
- return (__m256i)__builtin_ia32_paddsw256((__v16hi)__a, (__v16hi)__b);
-}
-
+ return (__m256i)__builtin_elementwise_add_sat((__v16hi)__a, (__v16hi)__b);
+}
+
+/// Adds 8-bit integers from corresponding bytes of two 256-bit integer
+/// vectors using unsigned saturation, and returns each sum in the
+/// corresponding byte of the 256-bit integer vector result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPADDUSB instruction.
+///
+/// \param __a
+/// A 256-bit integer vector containing one of the source operands.
+/// \param __b
+/// A 256-bit integer vector containing one of the source operands.
+/// \returns A 256-bit integer vector containing the sums.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_adds_epu8(__m256i __a, __m256i __b)
{
- return (__m256i)__builtin_ia32_paddusb256((__v32qi)__a, (__v32qi)__b);
-}
-
+ return (__m256i)__builtin_elementwise_add_sat((__v32qu)__a, (__v32qu)__b);
+}
+
+/// Adds 16-bit integers from corresponding elements of two 256-bit vectors of
+/// [16 x i16] using unsigned saturation, and returns the [16 x i16] result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPADDUSW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \returns A 256-bit vector of [16 x i16] containing the sums.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_adds_epu16(__m256i __a, __m256i __b)
{
- return (__m256i)__builtin_ia32_paddusw256((__v16hi)__a, (__v16hi)__b);
-}
-
+ return (__m256i)__builtin_elementwise_add_sat((__v16hu)__a, (__v16hu)__b);
+}
+
+/// Uses the lower half of the 256-bit vector \a a as the upper half of a
+/// temporary 256-bit value, and the lower half of the 256-bit vector \a b
+/// as the lower half of the temporary value. Right-shifts the temporary
+/// value by \a n bytes, and uses the lower 16 bytes of the shifted value
+/// as the lower 16 bytes of the result. Uses the upper halves of \a a and
+/// \a b to make another temporary value, right shifts by \a n, and uses
+/// the lower 16 bytes of the shifted value as the upper 16 bytes of the
+/// result.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_alignr_epi8(__m256i a, __m256i b, const int n);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPALIGNR instruction.
+///
+/// \param a
+/// A 256-bit integer vector containing source values.
+/// \param b
+/// A 256-bit integer vector containing source values.
+/// \param n
+/// An immediate value specifying the number of bytes to shift.
+/// \returns A 256-bit integer vector containing the result.
#define _mm256_alignr_epi8(a, b, n) \
- (__m256i)__builtin_ia32_palignr256((__v32qi)(__m256i)(a), \
- (__v32qi)(__m256i)(b), (n))
-
+ ((__m256i)__builtin_ia32_palignr256((__v32qi)(__m256i)(a), \
+ (__v32qi)(__m256i)(b), (n)))
+
+/// Computes the bitwise AND of the 256-bit integer vectors in \a __a and
+/// \a __b.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPAND instruction.
+///
+/// \param __a
+/// A 256-bit integer vector.
+/// \param __b
+/// A 256-bit integer vector.
+/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_and_si256(__m256i __a, __m256i __b)
{
return (__m256i)((__v4du)__a & (__v4du)__b);
}
+/// Computes the bitwise AND of the 256-bit integer vector in \a __b with
+/// the bitwise NOT of the 256-bit integer vector in \a __a.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPANDN instruction.
+///
+/// \param __a
+/// A 256-bit integer vector.
+/// \param __b
+/// A 256-bit integer vector.
+/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_andnot_si256(__m256i __a, __m256i __b)
{
return (__m256i)(~(__v4du)__a & (__v4du)__b);
}
+/// Computes the averages of the corresponding unsigned bytes in the two
+/// 256-bit integer vectors in \a __a and \a __b and returns each
+/// average in the corresponding byte of the 256-bit result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 31
+/// j := i*8
+/// result[j+7:j] := (__a[j+7:j] + __b[j+7:j] + 1) >> 1
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPAVGB instruction.
+///
+/// \param __a
+/// A 256-bit integer vector.
+/// \param __b
+/// A 256-bit integer vector.
+/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_avg_epu8(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_pavgb256((__v32qi)__a, (__v32qi)__b);
}
+/// Computes the averages of the corresponding unsigned 16-bit integers in
+/// the two 256-bit vectors of [16 x i16] in \a __a and \a __b and returns
+/// each average in the corresponding element of the 256-bit result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 15
+/// j := i*16
+/// result[j+15:j] := (__a[j+15:j] + __b[j+15:j] + 1) >> 1
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPAVGW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16].
+/// \param __b
+/// A 256-bit vector of [16 x i16].
+/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_avg_epu16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_pavgw256((__v16hi)__a, (__v16hi)__b);
}
+/// Merges 8-bit integer values from either of the two 256-bit vectors
+/// \a __V1 or \a __V2, as specified by the 256-bit mask \a __M and returns
+/// the resulting 256-bit integer vector.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 31
+/// j := i*8
+/// IF __M[7+i] == 0
+/// result[7+j:j] := __V1[7+j:j]
+/// ELSE
+/// result[7+j:j] := __V2[7+j:j]
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPBLENDVB instruction.
+///
+/// \param __V1
+/// A 256-bit integer vector containing source values.
+/// \param __V2
+/// A 256-bit integer vector containing source values.
+/// \param __M
+/// A 256-bit integer vector, with bit [7] of each byte specifying the
+/// source for each corresponding byte of the result. When the mask bit
+/// is 0, the byte is copied from \a __V1; otherwise, it is copied from
+/// \a __V2.
+/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_blendv_epi8(__m256i __V1, __m256i __V2, __m256i __M)
{
@@ -148,34 +563,171 @@ _mm256_blendv_epi8(__m256i __V1, __m256i __V2, __m256i __M)
(__v32qi)__M);
}
+/// Merges 16-bit integer values from either of the two 256-bit vectors
+/// \a V1 or \a V2, as specified by the immediate integer operand \a M,
+/// and returns the resulting 256-bit vector of [16 x i16].
+///
+/// \code{.operation}
+/// FOR i := 0 TO 7
+/// j := i*16
+/// IF M[i] == 0
+/// result[7+j:j] := V1[7+j:j]
+/// result[135+j:128+j] := V1[135+j:128+j]
+/// ELSE
+/// result[7+j:j] := V2[7+j:j]
+/// result[135+j:128+j] := V2[135+j:128+j]
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_blend_epi16(__m256i V1, __m256i V2, const int M);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPBLENDW instruction.
+///
+/// \param V1
+/// A 256-bit vector of [16 x i16] containing source values.
+/// \param V2
+/// A 256-bit vector of [16 x i16] containing source values.
+/// \param M
+/// An immediate 8-bit integer operand, with bits [7:0] specifying the
+/// source for each element of the result. The position of the mask bit
+/// corresponds to the index of a copied value. When a mask bit is 0, the
+/// element is copied from \a V1; otherwise, it is copied from \a V2.
+/// \a M[0] determines the source for elements 0 and 8, \a M[1] for
+/// elements 1 and 9, and so forth.
+/// \returns A 256-bit vector of [16 x i16] containing the result.
#define _mm256_blend_epi16(V1, V2, M) \
- (__m256i)__builtin_ia32_pblendw256((__v16hi)(__m256i)(V1), \
- (__v16hi)(__m256i)(V2), (int)(M))
-
+ ((__m256i)__builtin_ia32_pblendw256((__v16hi)(__m256i)(V1), \
+ (__v16hi)(__m256i)(V2), (int)(M)))
+
+/// Compares corresponding bytes in the 256-bit integer vectors in \a __a and
+/// \a __b for equality and returns the outcomes in the corresponding
+/// bytes of the 256-bit result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 31
+/// j := i*8
+/// result[j+7:j] := (__a[j+7:j] == __b[j+7:j]) ? 0xFF : 0
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPCMPEQB instruction.
+///
+/// \param __a
+/// A 256-bit integer vector containing one of the inputs.
+/// \param __b
+/// A 256-bit integer vector containing one of the inputs.
+/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cmpeq_epi8(__m256i __a, __m256i __b)
{
return (__m256i)((__v32qi)__a == (__v32qi)__b);
}
+/// Compares corresponding elements in the 256-bit vectors of [16 x i16] in
+/// \a __a and \a __b for equality and returns the outcomes in the
+/// corresponding elements of the 256-bit result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 15
+/// j := i*16
+/// result[j+15:j] := (__a[j+15:j] == __b[j+15:j]) ? 0xFFFF : 0
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPCMPEQW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] containing one of the inputs.
+/// \param __b
+/// A 256-bit vector of [16 x i16] containing one of the inputs.
+/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cmpeq_epi16(__m256i __a, __m256i __b)
{
return (__m256i)((__v16hi)__a == (__v16hi)__b);
}
+/// Compares corresponding elements in the 256-bit vectors of [8 x i32] in
+/// \a __a and \a __b for equality and returns the outcomes in the
+/// corresponding elements of the 256-bit result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 7
+/// j := i*32
+/// result[j+31:j] := (__a[j+31:j] == __b[j+31:j]) ? 0xFFFFFFFF : 0
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPCMPEQD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] containing one of the inputs.
+/// \param __b
+/// A 256-bit vector of [8 x i32] containing one of the inputs.
+/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cmpeq_epi32(__m256i __a, __m256i __b)
{
return (__m256i)((__v8si)__a == (__v8si)__b);
}
+/// Compares corresponding elements in the 256-bit vectors of [4 x i64] in
+/// \a __a and \a __b for equality and returns the outcomes in the
+/// corresponding elements of the 256-bit result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 3
+/// j := i*64
+/// result[j+63:j] := (__a[j+63:j] == __b[j+63:j]) ? 0xFFFFFFFFFFFFFFFF : 0
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPCMPEQQ instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x i64] containing one of the inputs.
+/// \param __b
+/// A 256-bit vector of [4 x i64] containing one of the inputs.
+/// \returns A 256-bit vector of [4 x i64] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cmpeq_epi64(__m256i __a, __m256i __b)
{
return (__m256i)((__v4di)__a == (__v4di)__b);
}
+/// Compares corresponding signed bytes in the 256-bit integer vectors in
+/// \a __a and \a __b for greater-than and returns the outcomes in the
+/// corresponding bytes of the 256-bit result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 31
+/// j := i*8
+/// result[j+7:j] := (__a[j+7:j] > __b[j+7:j]) ? 0xFF : 0
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPCMPGTB instruction.
+///
+/// \param __a
+/// A 256-bit integer vector containing one of the inputs.
+/// \param __b
+/// A 256-bit integer vector containing one of the inputs.
+/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cmpgt_epi8(__m256i __a, __m256i __b)
{
@@ -184,150 +736,624 @@ _mm256_cmpgt_epi8(__m256i __a, __m256i __b)
return (__m256i)((__v32qs)__a > (__v32qs)__b);
}
+/// Compares corresponding signed elements in the 256-bit vectors of
+/// [16 x i16] in \a __a and \a __b for greater-than and returns the
+/// outcomes in the corresponding elements of the 256-bit result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 15
+/// j := i*16
+/// result[j+15:j] := (__a[j+15:j] > __b[j+15:j]) ? 0xFFFF : 0
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPCMPGTW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] containing one of the inputs.
+/// \param __b
+/// A 256-bit vector of [16 x i16] containing one of the inputs.
+/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cmpgt_epi16(__m256i __a, __m256i __b)
{
return (__m256i)((__v16hi)__a > (__v16hi)__b);
}
+/// Compares corresponding signed elements in the 256-bit vectors of
+/// [8 x i32] in \a __a and \a __b for greater-than and returns the
+/// outcomes in the corresponding elements of the 256-bit result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 7
+/// j := i*32
+/// result[j+31:j] := (__a[j+31:j] > __b[j+31:j]) ? 0xFFFFFFFF : 0
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPCMPGTD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] containing one of the inputs.
+/// \param __b
+/// A 256-bit vector of [8 x i32] containing one of the inputs.
+/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cmpgt_epi32(__m256i __a, __m256i __b)
{
return (__m256i)((__v8si)__a > (__v8si)__b);
}
+/// Compares corresponding signed elements in the 256-bit vectors of
+/// [4 x i64] in \a __a and \a __b for greater-than and returns the
+/// outcomes in the corresponding elements of the 256-bit result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 3
+/// j := i*64
+/// result[j+63:j] := (__a[j+63:j] > __b[j+63:j]) ? 0xFFFFFFFFFFFFFFFF : 0
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPCMPGTQ instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x i64] containing one of the inputs.
+/// \param __b
+/// A 256-bit vector of [4 x i64] containing one of the inputs.
+/// \returns A 256-bit vector of [4 x i64] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cmpgt_epi64(__m256i __a, __m256i __b)
{
return (__m256i)((__v4di)__a > (__v4di)__b);
}
+/// Horizontally adds the adjacent pairs of 16-bit integers from two 256-bit
+/// vectors of [16 x i16] and returns the lower 16 bits of each sum in an
+/// element of the [16 x i16] result (overflow is ignored). Sums from
+/// \a __a are returned in the lower 64 bits of each 128-bit half of the
+/// result; sums from \a __b are returned in the upper 64 bits of each
+/// 128-bit half of the result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 1
+/// j := i*128
+/// result[j+15:j] := __a[j+15:j] + __a[j+31:j+16]
+/// result[j+31:j+16] := __a[j+47:j+32] + __a[j+63:j+48]
+/// result[j+47:j+32] := __a[j+79:j+64] + __a[j+95:j+80]
+/// result[j+63:j+48] := __a[j+111:j+96] + __a[j+127:j+112]
+/// result[j+79:j+64] := __b[j+15:j] + __b[j+31:j+16]
+/// result[j+95:j+80] := __b[j+47:j+32] + __b[j+63:j+48]
+/// result[j+111:j+96] := __b[j+79:j+64] + __b[j+95:j+80]
+/// result[j+127:j+112] := __b[j+111:j+96] + __b[j+127:j+112]
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPHADDW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \returns A 256-bit vector of [16 x i16] containing the sums.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_hadd_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_phaddw256((__v16hi)__a, (__v16hi)__b);
}
+/// Horizontally adds the adjacent pairs of 32-bit integers from two 256-bit
+/// vectors of [8 x i32] and returns the lower 32 bits of each sum in an
+/// element of the [8 x i32] result (overflow is ignored). Sums from \a __a
+/// are returned in the lower 64 bits of each 128-bit half of the result;
+/// sums from \a __b are returned in the upper 64 bits of each 128-bit half
+/// of the result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 1
+/// j := i*128
+/// result[j+31:j] := __a[j+31:j] + __a[j+63:j+32]
+/// result[j+63:j+32] := __a[j+95:j+64] + __a[j+127:j+96]
+/// result[j+95:j+64] := __b[j+31:j] + __b[j+63:j+32]
+/// result[j+127:j+96] := __b[j+95:j+64] + __b[j+127:j+96]
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPHADDD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [8 x i32] containing one of the source operands.
+/// \returns A 256-bit vector of [8 x i32] containing the sums.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_hadd_epi32(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_phaddd256((__v8si)__a, (__v8si)__b);
}
+/// Horizontally adds the adjacent pairs of 16-bit integers from two 256-bit
+/// vectors of [16 x i16] using signed saturation and returns each sum in
+/// an element of the [16 x i16] result. Sums from \a __a are returned in
+/// the lower 64 bits of each 128-bit half of the result; sums from \a __b
+/// are returned in the upper 64 bits of each 128-bit half of the result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 1
+/// j := i*128
+/// result[j+15:j] := SATURATE16(__a[j+15:j] + __a[j+31:j+16])
+/// result[j+31:j+16] := SATURATE16(__a[j+47:j+32] + __a[j+63:j+48])
+/// result[j+47:j+32] := SATURATE16(__a[j+79:j+64] + __a[j+95:j+80])
+/// result[j+63:j+48] := SATURATE16(__a[j+111:j+96] + __a[j+127:j+112])
+/// result[j+79:j+64] := SATURATE16(__b[j+15:j] + __b[j+31:j+16])
+/// result[j+95:j+80] := SATURATE16(__b[j+47:j+32] + __b[j+63:j+48])
+/// result[j+111:j+96] := SATURATE16(__b[j+79:j+64] + __b[j+95:j+80])
+/// result[j+127:j+112] := SATURATE16(__b[j+111:j+96] + __b[j+127:j+112])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPHADDSW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \returns A 256-bit vector of [16 x i16] containing the sums.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_hadds_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_phaddsw256((__v16hi)__a, (__v16hi)__b);
}
+/// Horizontally subtracts adjacent pairs of 16-bit integers from two 256-bit
+/// vectors of [16 x i16] and returns the lower 16 bits of each difference
+/// in an element of the [16 x i16] result (overflow is ignored).
+/// Differences from \a __a are returned in the lower 64 bits of each
+/// 128-bit half of the result; differences from \a __b are returned in the
+/// upper 64 bits of each 128-bit half of the result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 1
+/// j := i*128
+/// result[j+15:j] := __a[j+15:j] - __a[j+31:j+16]
+/// result[j+31:j+16] := __a[j+47:j+32] - __a[j+63:j+48]
+/// result[j+47:j+32] := __a[j+79:j+64] - __a[j+95:j+80]
+/// result[j+63:j+48] := __a[j+111:j+96] - __a[j+127:j+112]
+/// result[j+79:j+64] := __b[j+15:j] - __b[j+31:j+16]
+/// result[j+95:j+80] := __b[j+47:j+32] - __b[j+63:j+48]
+/// result[j+111:j+96] := __b[j+79:j+64] - __b[j+95:j+80]
+/// result[j+127:j+112] := __b[j+111:j+96] - __b[j+127:j+112]
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPHSUBW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \returns A 256-bit vector of [16 x i16] containing the differences.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_hsub_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_phsubw256((__v16hi)__a, (__v16hi)__b);
}
+/// Horizontally subtracts adjacent pairs of 32-bit integers from two 256-bit
+/// vectors of [8 x i32] and returns the lower 32 bits of each difference in
+/// an element of the [8 x i32] result (overflow is ignored). Differences
+/// from \a __a are returned in the lower 64 bits of each 128-bit half of
+/// the result; differences from \a __b are returned in the upper 64 bits
+/// of each 128-bit half of the result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 1
+/// j := i*128
+/// result[j+31:j] := __a[j+31:j] - __a[j+63:j+32]
+/// result[j+63:j+32] := __a[j+95:j+64] - __a[j+127:j+96]
+/// result[j+95:j+64] := __b[j+31:j] - __b[j+63:j+32]
+/// result[j+127:j+96] := __b[j+95:j+64] - __b[j+127:j+96]
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPHSUBD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [8 x i32] containing one of the source operands.
+/// \returns A 256-bit vector of [8 x i32] containing the differences.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_hsub_epi32(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_phsubd256((__v8si)__a, (__v8si)__b);
}
+/// Horizontally subtracts adjacent pairs of 16-bit integers from two 256-bit
+/// vectors of [16 x i16] using signed saturation and returns each sum in
+/// an element of the [16 x i16] result. Differences from \a __a are
+/// returned in the lower 64 bits of each 128-bit half of the result;
+/// differences from \a __b are returned in the upper 64 bits of each
+/// 128-bit half of the result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 1
+/// j := i*128
+/// result[j+15:j] := SATURATE16(__a[j+15:j] - __a[j+31:j+16])
+/// result[j+31:j+16] := SATURATE16(__a[j+47:j+32] - __a[j+63:j+48])
+/// result[j+47:j+32] := SATURATE16(__a[j+79:j+64] - __a[j+95:j+80])
+/// result[j+63:j+48] := SATURATE16(__a[j+111:j+96] - __a[j+127:j+112])
+/// result[j+79:j+64] := SATURATE16(__b[j+15:j] - __b[j+31:j+16])
+/// result[j+95:j+80] := SATURATE16(__b[j+47:j+32] - __b[j+63:j+48])
+/// result[j+111:j+96] := SATURATE16(__b[j+79:j+64] - __b[j+95:j+80])
+/// result[j+127:j+112] := SATURATE16(__b[j+111:j+96] - __b[j+127:j+112])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPHSUBSW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \returns A 256-bit vector of [16 x i16] containing the differences.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_hsubs_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_phsubsw256((__v16hi)__a, (__v16hi)__b);
}
+/// Multiplies each unsigned byte from the 256-bit integer vector in \a __a
+/// with the corresponding signed byte from the 256-bit integer vector in
+/// \a __b, forming signed 16-bit intermediate products. Adds adjacent
+/// pairs of those products using signed saturation to form 16-bit sums
+/// returned as elements of the [16 x i16] result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 15
+/// j := i*16
+/// temp1 := __a[j+7:j] * __b[j+7:j]
+/// temp2 := __a[j+15:j+8] * __b[j+15:j+8]
+/// result[j+15:j] := SATURATE16(temp1 + temp2)
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMADDUBSW instruction.
+///
+/// \param __a
+/// A 256-bit vector containing one of the source operands.
+/// \param __b
+/// A 256-bit vector containing one of the source operands.
+/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maddubs_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_pmaddubsw256((__v32qi)__a, (__v32qi)__b);
}
+/// Multiplies corresponding 16-bit elements of two 256-bit vectors of
+/// [16 x i16], forming 32-bit intermediate products, and adds pairs of
+/// those products to form 32-bit sums returned as elements of the
+/// [8 x i32] result.
+///
+/// There is only one wraparound case: when all four of the 16-bit sources
+/// are \c 0x8000, the result will be \c 0x80000000.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 7
+/// j := i*32
+/// temp1 := __a[j+15:j] * __b[j+15:j]
+/// temp2 := __a[j+31:j+16] * __b[j+31:j+16]
+/// result[j+31:j] := temp1 + temp2
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMADDWD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_madd_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_pmaddwd256((__v16hi)__a, (__v16hi)__b);
}
+/// Compares the corresponding signed bytes in the two 256-bit integer vectors
+/// in \a __a and \a __b and returns the larger of each pair in the
+/// corresponding byte of the 256-bit result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMAXSB instruction.
+///
+/// \param __a
+/// A 256-bit integer vector.
+/// \param __b
+/// A 256-bit integer vector.
+/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_max_epi8(__m256i __a, __m256i __b)
{
- return (__m256i)__builtin_ia32_pmaxsb256((__v32qi)__a, (__v32qi)__b);
-}
-
+ return (__m256i)__builtin_elementwise_max((__v32qs)__a, (__v32qs)__b);
+}
+
+/// Compares the corresponding signed 16-bit integers in the two 256-bit
+/// vectors of [16 x i16] in \a __a and \a __b and returns the larger of
+/// each pair in the corresponding element of the 256-bit result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMAXSW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16].
+/// \param __b
+/// A 256-bit vector of [16 x i16].
+/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_max_epi16(__m256i __a, __m256i __b)
{
- return (__m256i)__builtin_ia32_pmaxsw256((__v16hi)__a, (__v16hi)__b);
-}
-
+ return (__m256i)__builtin_elementwise_max((__v16hi)__a, (__v16hi)__b);
+}
+
+/// Compares the corresponding signed 32-bit integers in the two 256-bit
+/// vectors of [8 x i32] in \a __a and \a __b and returns the larger of
+/// each pair in the corresponding element of the 256-bit result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMAXSD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32].
+/// \param __b
+/// A 256-bit vector of [8 x i32].
+/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_max_epi32(__m256i __a, __m256i __b)
{
- return (__m256i)__builtin_ia32_pmaxsd256((__v8si)__a, (__v8si)__b);
-}
-
+ return (__m256i)__builtin_elementwise_max((__v8si)__a, (__v8si)__b);
+}
+
+/// Compares the corresponding unsigned bytes in the two 256-bit integer
+/// vectors in \a __a and \a __b and returns the larger of each pair in
+/// the corresponding byte of the 256-bit result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMAXUB instruction.
+///
+/// \param __a
+/// A 256-bit integer vector.
+/// \param __b
+/// A 256-bit integer vector.
+/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_max_epu8(__m256i __a, __m256i __b)
{
- return (__m256i)__builtin_ia32_pmaxub256((__v32qi)__a, (__v32qi)__b);
-}
-
+ return (__m256i)__builtin_elementwise_max((__v32qu)__a, (__v32qu)__b);
+}
+
+/// Compares the corresponding unsigned 16-bit integers in the two 256-bit
+/// vectors of [16 x i16] in \a __a and \a __b and returns the larger of
+/// each pair in the corresponding element of the 256-bit result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMAXUW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16].
+/// \param __b
+/// A 256-bit vector of [16 x i16].
+/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_max_epu16(__m256i __a, __m256i __b)
{
- return (__m256i)__builtin_ia32_pmaxuw256((__v16hi)__a, (__v16hi)__b);
-}
-
+ return (__m256i)__builtin_elementwise_max((__v16hu)__a, (__v16hu)__b);
+}
+
+/// Compares the corresponding unsigned 32-bit integers in the two 256-bit
+/// vectors of [8 x i32] in \a __a and \a __b and returns the larger of
+/// each pair in the corresponding element of the 256-bit result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMAXUD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32].
+/// \param __b
+/// A 256-bit vector of [8 x i32].
+/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_max_epu32(__m256i __a, __m256i __b)
{
- return (__m256i)__builtin_ia32_pmaxud256((__v8si)__a, (__v8si)__b);
-}
-
+ return (__m256i)__builtin_elementwise_max((__v8su)__a, (__v8su)__b);
+}
+
+/// Compares the corresponding signed bytes in the two 256-bit integer vectors
+/// in \a __a and \a __b and returns the smaller of each pair in the
+/// corresponding byte of the 256-bit result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMINSB instruction.
+///
+/// \param __a
+/// A 256-bit integer vector.
+/// \param __b
+/// A 256-bit integer vector.
+/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_min_epi8(__m256i __a, __m256i __b)
{
- return (__m256i)__builtin_ia32_pminsb256((__v32qi)__a, (__v32qi)__b);
-}
-
+ return (__m256i)__builtin_elementwise_min((__v32qs)__a, (__v32qs)__b);
+}
+
+/// Compares the corresponding signed 16-bit integers in the two 256-bit
+/// vectors of [16 x i16] in \a __a and \a __b and returns the smaller of
+/// each pair in the corresponding element of the 256-bit result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMINSW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16].
+/// \param __b
+/// A 256-bit vector of [16 x i16].
+/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_min_epi16(__m256i __a, __m256i __b)
{
- return (__m256i)__builtin_ia32_pminsw256((__v16hi)__a, (__v16hi)__b);
-}
-
+ return (__m256i)__builtin_elementwise_min((__v16hi)__a, (__v16hi)__b);
+}
+
+/// Compares the corresponding signed 32-bit integers in the two 256-bit
+/// vectors of [8 x i32] in \a __a and \a __b and returns the smaller of
+/// each pair in the corresponding element of the 256-bit result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMINSD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32].
+/// \param __b
+/// A 256-bit vector of [8 x i32].
+/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_min_epi32(__m256i __a, __m256i __b)
{
- return (__m256i)__builtin_ia32_pminsd256((__v8si)__a, (__v8si)__b);
-}
-
+ return (__m256i)__builtin_elementwise_min((__v8si)__a, (__v8si)__b);
+}
+
+/// Compares the corresponding unsigned bytes in the two 256-bit integer
+/// vectors in \a __a and \a __b and returns the smaller of each pair in
+/// the corresponding byte of the 256-bit result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMINUB instruction.
+///
+/// \param __a
+/// A 256-bit integer vector.
+/// \param __b
+/// A 256-bit integer vector.
+/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_min_epu8(__m256i __a, __m256i __b)
{
- return (__m256i)__builtin_ia32_pminub256((__v32qi)__a, (__v32qi)__b);
-}
-
+ return (__m256i)__builtin_elementwise_min((__v32qu)__a, (__v32qu)__b);
+}
+
+/// Compares the corresponding unsigned 16-bit integers in the two 256-bit
+/// vectors of [16 x i16] in \a __a and \a __b and returns the smaller of
+/// each pair in the corresponding element of the 256-bit result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMINUW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16].
+/// \param __b
+/// A 256-bit vector of [16 x i16].
+/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_min_epu16(__m256i __a, __m256i __b)
{
- return (__m256i)__builtin_ia32_pminuw256 ((__v16hi)__a, (__v16hi)__b);
-}
-
+ return (__m256i)__builtin_elementwise_min((__v16hu)__a, (__v16hu)__b);
+}
+
+/// Compares the corresponding unsigned 32-bit integers in the two 256-bit
+/// vectors of [8 x i32] in \a __a and \a __b and returns the smaller of
+/// each pair in the corresponding element of the 256-bit result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMINUD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32].
+/// \param __b
+/// A 256-bit vector of [8 x i32].
+/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_min_epu32(__m256i __a, __m256i __b)
{
- return (__m256i)__builtin_ia32_pminud256((__v8si)__a, (__v8si)__b);
-}
-
+ return (__m256i)__builtin_elementwise_min((__v8su)__a, (__v8su)__b);
+}
+
+/// Creates a 32-bit integer mask from the most significant bit of each byte
+/// in the 256-bit integer vector in \a __a and returns the result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 31
+/// j := i*8
+/// result[i] := __a[j+7]
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMOVMSKB instruction.
+///
+/// \param __a
+/// A 256-bit integer vector containing the source bytes.
+/// \returns The 32-bit integer mask.
static __inline__ int __DEFAULT_FN_ATTRS256
_mm256_movemask_epi8(__m256i __a)
{
return __builtin_ia32_pmovmskb256((__v32qi)__a);
}
+/// Sign-extends bytes from the 128-bit integer vector in \a __V and returns
+/// the 16-bit values in the corresponding elements of a 256-bit vector
+/// of [16 x i16].
+///
+/// \code{.operation}
+/// FOR i := 0 TO 15
+/// j := i*8
+/// k := i*16
+/// result[k+15:k] := SignExtend(__V[j+7:j])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMOVSXBW instruction.
+///
+/// \param __V
+/// A 128-bit integer vector containing the source bytes.
+/// \returns A 256-bit vector of [16 x i16] containing the sign-extended
+/// values.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvtepi8_epi16(__m128i __V)
{
@@ -336,6 +1362,26 @@ _mm256_cvtepi8_epi16(__m128i __V)
return (__m256i)__builtin_convertvector((__v16qs)__V, __v16hi);
}
+/// Sign-extends bytes from the lower half of the 128-bit integer vector in
+/// \a __V and returns the 32-bit values in the corresponding elements of a
+/// 256-bit vector of [8 x i32].
+///
+/// \code{.operation}
+/// FOR i := 0 TO 7
+/// j := i*8
+/// k := i*32
+/// result[k+31:k] := SignExtend(__V[j+7:j])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMOVSXBD instruction.
+///
+/// \param __V
+/// A 128-bit integer vector containing the source bytes.
+/// \returns A 256-bit vector of [8 x i32] containing the sign-extended
+/// values.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvtepi8_epi32(__m128i __V)
{
@@ -344,6 +1390,25 @@ _mm256_cvtepi8_epi32(__m128i __V)
return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8si);
}
+/// Sign-extends the first four bytes from the 128-bit integer vector in
+/// \a __V and returns the 64-bit values in the corresponding elements of a
+/// 256-bit vector of [4 x i64].
+///
+/// \code{.operation}
+/// result[63:0] := SignExtend(__V[7:0])
+/// result[127:64] := SignExtend(__V[15:8])
+/// result[191:128] := SignExtend(__V[23:16])
+/// result[255:192] := SignExtend(__V[31:24])
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMOVSXBQ instruction.
+///
+/// \param __V
+/// A 128-bit integer vector containing the source bytes.
+/// \returns A 256-bit vector of [4 x i64] containing the sign-extended
+/// values.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvtepi8_epi64(__m128i __V)
{
@@ -352,388 +1417,1656 @@ _mm256_cvtepi8_epi64(__m128i __V)
return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3), __v4di);
}
+/// Sign-extends 16-bit elements from the 128-bit vector of [8 x i16] in
+/// \a __V and returns the 32-bit values in the corresponding elements of a
+/// 256-bit vector of [8 x i32].
+///
+/// \code{.operation}
+/// FOR i := 0 TO 7
+/// j := i*16
+/// k := i*32
+/// result[k+31:k] := SignExtend(__V[j+15:j])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMOVSXWD instruction.
+///
+/// \param __V
+/// A 128-bit vector of [8 x i16] containing the source values.
+/// \returns A 256-bit vector of [8 x i32] containing the sign-extended
+/// values.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvtepi16_epi32(__m128i __V)
{
return (__m256i)__builtin_convertvector((__v8hi)__V, __v8si);
}
+/// Sign-extends 16-bit elements from the lower half of the 128-bit vector of
+/// [8 x i16] in \a __V and returns the 64-bit values in the corresponding
+/// elements of a 256-bit vector of [4 x i64].
+///
+/// \code{.operation}
+/// result[63:0] := SignExtend(__V[15:0])
+/// result[127:64] := SignExtend(__V[31:16])
+/// result[191:128] := SignExtend(__V[47:32])
+/// result[255:192] := SignExtend(__V[64:48])
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMOVSXWQ instruction.
+///
+/// \param __V
+/// A 128-bit vector of [8 x i16] containing the source values.
+/// \returns A 256-bit vector of [4 x i64] containing the sign-extended
+/// values.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvtepi16_epi64(__m128i __V)
{
return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1, 2, 3), __v4di);
}
+/// Sign-extends 32-bit elements from the 128-bit vector of [4 x i32] in
+/// \a __V and returns the 64-bit values in the corresponding elements of a
+/// 256-bit vector of [4 x i64].
+///
+/// \code{.operation}
+/// result[63:0] := SignExtend(__V[31:0])
+/// result[127:64] := SignExtend(__V[63:32])
+/// result[191:128] := SignExtend(__V[95:64])
+/// result[255:192] := SignExtend(__V[127:96])
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMOVSXDQ instruction.
+///
+/// \param __V
+/// A 128-bit vector of [4 x i32] containing the source values.
+/// \returns A 256-bit vector of [4 x i64] containing the sign-extended
+/// values.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvtepi32_epi64(__m128i __V)
{
return (__m256i)__builtin_convertvector((__v4si)__V, __v4di);
}
+/// Zero-extends bytes from the 128-bit integer vector in \a __V and returns
+/// the 16-bit values in the corresponding elements of a 256-bit vector
+/// of [16 x i16].
+///
+/// \code{.operation}
+/// FOR i := 0 TO 15
+/// j := i*8
+/// k := i*16
+/// result[k+15:k] := ZeroExtend(__V[j+7:j])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMOVZXBW instruction.
+///
+/// \param __V
+/// A 128-bit integer vector containing the source bytes.
+/// \returns A 256-bit vector of [16 x i16] containing the zero-extended
+/// values.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvtepu8_epi16(__m128i __V)
{
return (__m256i)__builtin_convertvector((__v16qu)__V, __v16hi);
}
+/// Zero-extends bytes from the lower half of the 128-bit integer vector in
+/// \a __V and returns the 32-bit values in the corresponding elements of a
+/// 256-bit vector of [8 x i32].
+///
+/// \code{.operation}
+/// FOR i := 0 TO 7
+/// j := i*8
+/// k := i*32
+/// result[k+31:k] := ZeroExtend(__V[j+7:j])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMOVZXBD instruction.
+///
+/// \param __V
+/// A 128-bit integer vector containing the source bytes.
+/// \returns A 256-bit vector of [8 x i32] containing the zero-extended
+/// values.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvtepu8_epi32(__m128i __V)
{
return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8si);
}
+/// Zero-extends the first four bytes from the 128-bit integer vector in
+/// \a __V and returns the 64-bit values in the corresponding elements of a
+/// 256-bit vector of [4 x i64].
+///
+/// \code{.operation}
+/// result[63:0] := ZeroExtend(__V[7:0])
+/// result[127:64] := ZeroExtend(__V[15:8])
+/// result[191:128] := ZeroExtend(__V[23:16])
+/// result[255:192] := ZeroExtend(__V[31:24])
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMOVZXBQ instruction.
+///
+/// \param __V
+/// A 128-bit integer vector containing the source bytes.
+/// \returns A 256-bit vector of [4 x i64] containing the zero-extended
+/// values.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvtepu8_epi64(__m128i __V)
{
return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3), __v4di);
}
+/// Zero-extends 16-bit elements from the 128-bit vector of [8 x i16] in
+/// \a __V and returns the 32-bit values in the corresponding elements of a
+/// 256-bit vector of [8 x i32].
+///
+/// \code{.operation}
+/// FOR i := 0 TO 7
+/// j := i*16
+/// k := i*32
+/// result[k+31:k] := ZeroExtend(__V[j+15:j])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMOVZXWD instruction.
+///
+/// \param __V
+/// A 128-bit vector of [8 x i16] containing the source values.
+/// \returns A 256-bit vector of [8 x i32] containing the zero-extended
+/// values.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvtepu16_epi32(__m128i __V)
{
return (__m256i)__builtin_convertvector((__v8hu)__V, __v8si);
}
+/// Zero-extends 16-bit elements from the lower half of the 128-bit vector of
+/// [8 x i16] in \a __V and returns the 64-bit values in the corresponding
+/// elements of a 256-bit vector of [4 x i64].
+///
+/// \code{.operation}
+/// result[63:0] := ZeroExtend(__V[15:0])
+/// result[127:64] := ZeroExtend(__V[31:16])
+/// result[191:128] := ZeroExtend(__V[47:32])
+/// result[255:192] := ZeroExtend(__V[64:48])
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMOVSXWQ instruction.
+///
+/// \param __V
+/// A 128-bit vector of [8 x i16] containing the source values.
+/// \returns A 256-bit vector of [4 x i64] containing the zero-extended
+/// values.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvtepu16_epi64(__m128i __V)
{
return (__m256i)__builtin_convertvector(__builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1, 2, 3), __v4di);
}
+/// Zero-extends 32-bit elements from the 128-bit vector of [4 x i32] in
+/// \a __V and returns the 64-bit values in the corresponding elements of a
+/// 256-bit vector of [4 x i64].
+///
+/// \code{.operation}
+/// result[63:0] := ZeroExtend(__V[31:0])
+/// result[127:64] := ZeroExtend(__V[63:32])
+/// result[191:128] := ZeroExtend(__V[95:64])
+/// result[255:192] := ZeroExtend(__V[127:96])
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMOVZXDQ instruction.
+///
+/// \param __V
+/// A 128-bit vector of [4 x i32] containing the source values.
+/// \returns A 256-bit vector of [4 x i64] containing the zero-extended
+/// values.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_cvtepu32_epi64(__m128i __V)
{
return (__m256i)__builtin_convertvector((__v4su)__V, __v4di);
}
+/// Multiplies signed 32-bit integers from even-numbered elements of two
+/// 256-bit vectors of [8 x i32] and returns the 64-bit products in the
+/// [4 x i64] result.
+///
+/// \code{.operation}
+/// result[63:0] := __a[31:0] * __b[31:0]
+/// result[127:64] := __a[95:64] * __b[95:64]
+/// result[191:128] := __a[159:128] * __b[159:128]
+/// result[255:192] := __a[223:192] * __b[223:192]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMULDQ instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [8 x i32] containing one of the source operands.
+/// \returns A 256-bit vector of [4 x i64] containing the products.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mul_epi32(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_pmuldq256((__v8si)__a, (__v8si)__b);
}
+/// Multiplies signed 16-bit integer elements of two 256-bit vectors of
+/// [16 x i16], truncates the 32-bit results to the most significant 18
+/// bits, rounds by adding 1, and returns bits [16:1] of each rounded
+/// product in the [16 x i16] result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 15
+/// j := i*16
+/// temp := ((__a[j+15:j] * __b[j+15:j]) >> 14) + 1
+/// result[j+15:j] := temp[16:1]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMULHRSW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \returns A 256-bit vector of [16 x i16] containing the rounded products.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mulhrs_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_pmulhrsw256((__v16hi)__a, (__v16hi)__b);
}
+/// Multiplies unsigned 16-bit integer elements of two 256-bit vectors of
+/// [16 x i16], and returns the upper 16 bits of each 32-bit product in the
+/// [16 x i16] result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMULHUW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \returns A 256-bit vector of [16 x i16] containing the products.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mulhi_epu16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_pmulhuw256((__v16hi)__a, (__v16hi)__b);
}
+/// Multiplies signed 16-bit integer elements of two 256-bit vectors of
+/// [16 x i16], and returns the upper 16 bits of each 32-bit product in the
+/// [16 x i16] result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMULHW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \returns A 256-bit vector of [16 x i16] containing the products.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mulhi_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_pmulhw256((__v16hi)__a, (__v16hi)__b);
}
+/// Multiplies signed 16-bit integer elements of two 256-bit vectors of
+/// [16 x i16], and returns the lower 16 bits of each 32-bit product in the
+/// [16 x i16] result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMULLW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [16 x i16] containing one of the source operands.
+/// \returns A 256-bit vector of [16 x i16] containing the products.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mullo_epi16(__m256i __a, __m256i __b)
{
return (__m256i)((__v16hu)__a * (__v16hu)__b);
}
+/// Multiplies signed 32-bit integer elements of two 256-bit vectors of
+/// [8 x i32], and returns the lower 32 bits of each 64-bit product in the
+/// [8 x i32] result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMULLD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [8 x i32] containing one of the source operands.
+/// \returns A 256-bit vector of [8 x i32] containing the products.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mullo_epi32 (__m256i __a, __m256i __b)
{
return (__m256i)((__v8su)__a * (__v8su)__b);
}
+/// Multiplies unsigned 32-bit integers from even-numered elements of two
+/// 256-bit vectors of [8 x i32] and returns the 64-bit products in the
+/// [4 x i64] result.
+///
+/// \code{.operation}
+/// result[63:0] := __a[31:0] * __b[31:0]
+/// result[127:64] := __a[95:64] * __b[95:64]
+/// result[191:128] := __a[159:128] * __b[159:128]
+/// result[255:192] := __a[223:192] * __b[223:192]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMULUDQ instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] containing one of the source operands.
+/// \param __b
+/// A 256-bit vector of [8 x i32] containing one of the source operands.
+/// \returns A 256-bit vector of [4 x i64] containing the products.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mul_epu32(__m256i __a, __m256i __b)
{
return __builtin_ia32_pmuludq256((__v8si)__a, (__v8si)__b);
}
+/// Computes the bitwise OR of the 256-bit integer vectors in \a __a and
+/// \a __b.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPOR instruction.
+///
+/// \param __a
+/// A 256-bit integer vector.
+/// \param __b
+/// A 256-bit integer vector.
+/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_or_si256(__m256i __a, __m256i __b)
{
return (__m256i)((__v4du)__a | (__v4du)__b);
}
+/// Computes four sum of absolute difference (SAD) operations on sets of eight
+/// unsigned 8-bit integers from the 256-bit integer vectors \a __a and
+/// \a __b.
+///
+/// One SAD result is computed for each set of eight bytes from \a __a and
+/// eight bytes from \a __b. The zero-extended SAD value is returned in the
+/// corresponding 64-bit element of the result.
+///
+/// A single SAD operation takes the differences between the corresponding
+/// bytes of \a __a and \a __b, takes the absolute value of each difference,
+/// and sums these eight values to form one 16-bit result. This operation
+/// is repeated four times with successive sets of eight bytes.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 3
+/// j := i*64
+/// temp0 := ABS(__a[j+7:j] - __b[j+7:j])
+/// temp1 := ABS(__a[j+15:j+8] - __b[j+15:j+8])
+/// temp2 := ABS(__a[j+23:j+16] - __b[j+23:j+16])
+/// temp3 := ABS(__a[j+31:j+24] - __b[j+31:j+24])
+/// temp4 := ABS(__a[j+39:j+32] - __b[j+39:j+32])
+/// temp5 := ABS(__a[j+47:j+40] - __b[j+47:j+40])
+/// temp6 := ABS(__a[j+55:j+48] - __b[j+55:j+48])
+/// temp7 := ABS(__a[j+63:j+56] - __b[j+63:j+56])
+/// result[j+15:j] := temp0 + temp1 + temp2 + temp3 +
+/// temp4 + temp5 + temp6 + temp7
+/// result[j+63:j+16] := 0
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSADBW instruction.
+///
+/// \param __a
+/// A 256-bit integer vector.
+/// \param __b
+/// A 256-bit integer vector.
+/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sad_epu8(__m256i __a, __m256i __b)
{
return __builtin_ia32_psadbw256((__v32qi)__a, (__v32qi)__b);
}
+/// Shuffles 8-bit integers in the 256-bit integer vector \a __a according
+/// to control information in the 256-bit integer vector \a __b, and
+/// returns the 256-bit result. In effect there are two separate 128-bit
+/// shuffles in the lower and upper halves.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 31
+/// j := i*8
+/// IF __b[j+7] == 1
+/// result[j+7:j] := 0
+/// ELSE
+/// k := __b[j+3:j] * 8
+/// IF i > 15
+/// k := k + 128
+/// FI
+/// result[j+7:j] := __a[k+7:k]
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSHUFB instruction.
+///
+/// \param __a
+/// A 256-bit integer vector containing source values.
+/// \param __b
+/// A 256-bit integer vector containing control information to determine
+/// what goes into the corresponding byte of the result. If bit 7 of the
+/// control byte is 1, the result byte is 0; otherwise, bits 3:0 of the
+/// control byte specify the index (within the same 128-bit half) of \a __a
+/// to copy to the result byte.
+/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_shuffle_epi8(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_pshufb256((__v32qi)__a, (__v32qi)__b);
}
+/// Shuffles 32-bit integers from the 256-bit vector of [8 x i32] in \a a
+/// according to control information in the integer literal \a imm, and
+/// returns the 256-bit result. In effect there are two parallel 128-bit
+/// shuffles in the lower and upper halves.
+///
+/// \code{.operation}
+/// FOR i := 0 to 3
+/// j := i*32
+/// k := (imm >> i*2)[1:0] * 32
+/// result[j+31:j] := a[k+31:k]
+/// result[128+j+31:128+j] := a[128+k+31:128+k]
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_shuffle_epi32(__m256i a, const int imm);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPSHUFB instruction.
+///
+/// \param a
+/// A 256-bit vector of [8 x i32] containing source values.
+/// \param imm
+/// An immediate 8-bit value specifying which elements to copy from \a a.
+/// \a imm[1:0] specifies the index in \a a for elements 0 and 4 of the
+/// result, \a imm[3:2] specifies the index for elements 1 and 5, and so
+/// forth.
+/// \returns A 256-bit vector of [8 x i32] containing the result.
#define _mm256_shuffle_epi32(a, imm) \
- (__m256i)__builtin_ia32_pshufd256((__v8si)(__m256i)(a), (int)(imm))
-
+ ((__m256i)__builtin_ia32_pshufd256((__v8si)(__m256i)(a), (int)(imm)))
+
+/// Shuffles 16-bit integers from the 256-bit vector of [16 x i16] in \a a
+/// according to control information in the integer literal \a imm, and
+/// returns the 256-bit result. The upper 64 bits of each 128-bit half
+/// are shuffled in parallel; the lower 64 bits of each 128-bit half are
+/// copied from \a a unchanged.
+///
+/// \code{.operation}
+/// result[63:0] := a[63:0]
+/// result[191:128] := a[191:128]
+/// FOR i := 0 TO 3
+/// j := i * 16 + 64
+/// k := (imm >> i*2)[1:0] * 16 + 64
+/// result[j+15:j] := a[k+15:k]
+/// result[128+j+15:128+j] := a[128+k+15:128+k]
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_shufflehi_epi16(__m256i a, const int imm);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPSHUFHW instruction.
+///
+/// \param a
+/// A 256-bit vector of [16 x i16] containing source values.
+/// \param imm
+/// An immediate 8-bit value specifying which elements to copy from \a a.
+/// \a imm[1:0] specifies the index in \a a for elements 4 and 8 of the
+/// result, \a imm[3:2] specifies the index for elements 5 and 9, and so
+/// forth. Indexes are offset by 4 (so 0 means index 4, and so forth).
+/// \returns A 256-bit vector of [16 x i16] containing the result.
#define _mm256_shufflehi_epi16(a, imm) \
- (__m256i)__builtin_ia32_pshufhw256((__v16hi)(__m256i)(a), (int)(imm))
-
+ ((__m256i)__builtin_ia32_pshufhw256((__v16hi)(__m256i)(a), (int)(imm)))
+
+/// Shuffles 16-bit integers from the 256-bit vector of [16 x i16] \a a
+/// according to control information in the integer literal \a imm, and
+/// returns the 256-bit [16 x i16] result. The lower 64 bits of each
+/// 128-bit half are shuffled; the upper 64 bits of each 128-bit half are
+/// copied from \a a unchanged.
+///
+/// \code{.operation}
+/// result[127:64] := a[127:64]
+/// result[255:192] := a[255:192]
+/// FOR i := 0 TO 3
+/// j := i * 16
+/// k := (imm >> i*2)[1:0] * 16
+/// result[j+15:j] := a[k+15:k]
+/// result[128+j+15:128+j] := a[128+k+15:128+k]
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_shufflelo_epi16(__m256i a, const int imm);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPSHUFLW instruction.
+///
+/// \param a
+/// A 256-bit vector of [16 x i16] to use as a source of data for the
+/// result.
+/// \param imm
+/// An immediate 8-bit value specifying which elements to copy from \a a.
+/// \a imm[1:0] specifies the index in \a a for elements 0 and 8 of the
+/// result, \a imm[3:2] specifies the index for elements 1 and 9, and so
+/// forth.
+/// \returns A 256-bit vector of [16 x i16] containing the result.
#define _mm256_shufflelo_epi16(a, imm) \
- (__m256i)__builtin_ia32_pshuflw256((__v16hi)(__m256i)(a), (int)(imm))
-
+ ((__m256i)__builtin_ia32_pshuflw256((__v16hi)(__m256i)(a), (int)(imm)))
+
+/// Sets each byte of the result to the corresponding byte of the 256-bit
+/// integer vector in \a __a, the negative of that byte, or zero, depending
+/// on whether the corresponding byte of the 256-bit integer vector in
+/// \a __b is greater than zero, less than zero, or equal to zero,
+/// respectively.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSIGNB instruction.
+///
+/// \param __a
+/// A 256-bit integer vector.
+/// \param __b
+/// A 256-bit integer vector].
+/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sign_epi8(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_psignb256((__v32qi)__a, (__v32qi)__b);
}
+/// Sets each element of the result to the corresponding element of the
+/// 256-bit vector of [16 x i16] in \a __a, the negative of that element,
+/// or zero, depending on whether the corresponding element of the 256-bit
+/// vector of [16 x i16] in \a __b is greater than zero, less than zero, or
+/// equal to zero, respectively.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSIGNW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16].
+/// \param __b
+/// A 256-bit vector of [16 x i16].
+/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sign_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_psignw256((__v16hi)__a, (__v16hi)__b);
}
+/// Sets each element of the result to the corresponding element of the
+/// 256-bit vector of [8 x i32] in \a __a, the negative of that element, or
+/// zero, depending on whether the corresponding element of the 256-bit
+/// vector of [8 x i32] in \a __b is greater than zero, less than zero, or
+/// equal to zero, respectively.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSIGND instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32].
+/// \param __b
+/// A 256-bit vector of [8 x i32].
+/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sign_epi32(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_psignd256((__v8si)__a, (__v8si)__b);
}
+/// Shifts each 128-bit half of the 256-bit integer vector \a a left by
+/// \a imm bytes, shifting in zero bytes, and returns the result. If \a imm
+/// is greater than 15, the returned result is all zeroes.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_slli_si256(__m256i a, const int imm);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPSLLDQ instruction.
+///
+/// \param a
+/// A 256-bit integer vector to be shifted.
+/// \param imm
+/// An unsigned immediate value specifying the shift count (in bytes).
+/// \returns A 256-bit integer vector containing the result.
#define _mm256_slli_si256(a, imm) \
- (__m256i)__builtin_ia32_pslldqi256_byteshift((__v4di)(__m256i)(a), (int)(imm))
-
+ ((__m256i)__builtin_ia32_pslldqi256_byteshift((__v4di)(__m256i)(a), (int)(imm)))
+
+/// Shifts each 128-bit half of the 256-bit integer vector \a a left by
+/// \a imm bytes, shifting in zero bytes, and returns the result. If \a imm
+/// is greater than 15, the returned result is all zeroes.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_bslli_epi128(__m256i a, const int imm);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPSLLDQ instruction.
+///
+/// \param a
+/// A 256-bit integer vector to be shifted.
+/// \param imm
+/// An unsigned immediate value specifying the shift count (in bytes).
+/// \returns A 256-bit integer vector containing the result.
#define _mm256_bslli_epi128(a, imm) \
- (__m256i)__builtin_ia32_pslldqi256_byteshift((__v4di)(__m256i)(a), (int)(imm))
-
+ ((__m256i)__builtin_ia32_pslldqi256_byteshift((__v4di)(__m256i)(a), (int)(imm)))
+
+/// Shifts each 16-bit element of the 256-bit vector of [16 x i16] in \a __a
+/// left by \a __count bits, shifting in zero bits, and returns the result.
+/// If \a __count is greater than 15, the returned result is all zeroes.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSLLW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] to be shifted.
+/// \param __count
+/// An unsigned integer value specifying the shift count (in bits).
+/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_slli_epi16(__m256i __a, int __count)
{
return (__m256i)__builtin_ia32_psllwi256((__v16hi)__a, __count);
}
+/// Shifts each 16-bit element of the 256-bit vector of [16 x i16] in \a __a
+/// left by the number of bits specified by the lower 64 bits of \a __count,
+/// shifting in zero bits, and returns the result. If \a __count is greater
+/// than 15, the returned result is all zeroes.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSLLW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] to be shifted.
+/// \param __count
+/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned
+/// shift count (in bits). The upper element is ignored.
+/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sll_epi16(__m256i __a, __m128i __count)
{
return (__m256i)__builtin_ia32_psllw256((__v16hi)__a, (__v8hi)__count);
}
+/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __a
+/// left by \a __count bits, shifting in zero bits, and returns the result.
+/// If \a __count is greater than 31, the returned result is all zeroes.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSLLD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] to be shifted.
+/// \param __count
+/// An unsigned integer value specifying the shift count (in bits).
+/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_slli_epi32(__m256i __a, int __count)
{
return (__m256i)__builtin_ia32_pslldi256((__v8si)__a, __count);
}
+/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __a
+/// left by the number of bits given in the lower 64 bits of \a __count,
+/// shifting in zero bits, and returns the result. If \a __count is greater
+/// than 31, the returned result is all zeroes.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSLLD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] to be shifted.
+/// \param __count
+/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned
+/// shift count (in bits). The upper element is ignored.
+/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sll_epi32(__m256i __a, __m128i __count)
{
return (__m256i)__builtin_ia32_pslld256((__v8si)__a, (__v4si)__count);
}
+/// Shifts each 64-bit element of the 256-bit vector of [4 x i64] in \a __a
+/// left by \a __count bits, shifting in zero bits, and returns the result.
+/// If \a __count is greater than 63, the returned result is all zeroes.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSLLQ instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x i64] to be shifted.
+/// \param __count
+/// An unsigned integer value specifying the shift count (in bits).
+/// \returns A 256-bit vector of [4 x i64] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_slli_epi64(__m256i __a, int __count)
{
return __builtin_ia32_psllqi256((__v4di)__a, __count);
}
+/// Shifts each 64-bit element of the 256-bit vector of [4 x i64] in \a __a
+/// left by the number of bits given in the lower 64 bits of \a __count,
+/// shifting in zero bits, and returns the result. If \a __count is greater
+/// than 63, the returned result is all zeroes.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSLLQ instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x i64] to be shifted.
+/// \param __count
+/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned
+/// shift count (in bits). The upper element is ignored.
+/// \returns A 256-bit vector of [4 x i64] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sll_epi64(__m256i __a, __m128i __count)
{
return __builtin_ia32_psllq256((__v4di)__a, __count);
}
+/// Shifts each 16-bit element of the 256-bit vector of [16 x i16] in \a __a
+/// right by \a __count bits, shifting in sign bits, and returns the result.
+/// If \a __count is greater than 15, each element of the result is either
+/// 0 or -1 according to the corresponding input sign bit.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSRAW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] to be shifted.
+/// \param __count
+/// An unsigned integer value specifying the shift count (in bits).
+/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_srai_epi16(__m256i __a, int __count)
{
return (__m256i)__builtin_ia32_psrawi256((__v16hi)__a, __count);
}
+/// Shifts each 16-bit element of the 256-bit vector of [16 x i16] in \a __a
+/// right by the number of bits given in the lower 64 bits of \a __count,
+/// shifting in sign bits, and returns the result. If \a __count is greater
+/// than 15, each element of the result is either 0 or -1 according to the
+/// corresponding input sign bit.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSRAW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] to be shifted.
+/// \param __count
+/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned
+/// shift count (in bits). The upper element is ignored.
+/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sra_epi16(__m256i __a, __m128i __count)
{
return (__m256i)__builtin_ia32_psraw256((__v16hi)__a, (__v8hi)__count);
}
+/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __a
+/// right by \a __count bits, shifting in sign bits, and returns the result.
+/// If \a __count is greater than 31, each element of the result is either
+/// 0 or -1 according to the corresponding input sign bit.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSRAD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] to be shifted.
+/// \param __count
+/// An unsigned integer value specifying the shift count (in bits).
+/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_srai_epi32(__m256i __a, int __count)
{
return (__m256i)__builtin_ia32_psradi256((__v8si)__a, __count);
}
+/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __a
+/// right by the number of bits given in the lower 64 bits of \a __count,
+/// shifting in sign bits, and returns the result. If \a __count is greater
+/// than 31, each element of the result is either 0 or -1 according to the
+/// corresponding input sign bit.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSRAD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] to be shifted.
+/// \param __count
+/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned
+/// shift count (in bits). The upper element is ignored.
+/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sra_epi32(__m256i __a, __m128i __count)
{
return (__m256i)__builtin_ia32_psrad256((__v8si)__a, (__v4si)__count);
}
+/// Shifts each 128-bit half of the 256-bit integer vector in \a a right by
+/// \a imm bytes, shifting in zero bytes, and returns the result. If
+/// \a imm is greater than 15, the returned result is all zeroes.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_srli_si256(__m256i a, const int imm);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPSRLDQ instruction.
+///
+/// \param a
+/// A 256-bit integer vector to be shifted.
+/// \param imm
+/// An unsigned immediate value specifying the shift count (in bytes).
+/// \returns A 256-bit integer vector containing the result.
#define _mm256_srli_si256(a, imm) \
- (__m256i)__builtin_ia32_psrldqi256_byteshift((__m256i)(a), (int)(imm))
-
+ ((__m256i)__builtin_ia32_psrldqi256_byteshift((__m256i)(a), (int)(imm)))
+
+/// Shifts each 128-bit half of the 256-bit integer vector in \a a right by
+/// \a imm bytes, shifting in zero bytes, and returns the result. If
+/// \a imm is greater than 15, the returned result is all zeroes.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_bsrli_epi128(__m256i a, const int imm);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPSRLDQ instruction.
+///
+/// \param a
+/// A 256-bit integer vector to be shifted.
+/// \param imm
+/// An unsigned immediate value specifying the shift count (in bytes).
+/// \returns A 256-bit integer vector containing the result.
#define _mm256_bsrli_epi128(a, imm) \
- (__m256i)__builtin_ia32_psrldqi256_byteshift((__m256i)(a), (int)(imm))
-
+ ((__m256i)__builtin_ia32_psrldqi256_byteshift((__m256i)(a), (int)(imm)))
+
+/// Shifts each 16-bit element of the 256-bit vector of [16 x i16] in \a __a
+/// right by \a __count bits, shifting in zero bits, and returns the result.
+/// If \a __count is greater than 15, the returned result is all zeroes.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSRLW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] to be shifted.
+/// \param __count
+/// An unsigned integer value specifying the shift count (in bits).
+/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_srli_epi16(__m256i __a, int __count)
{
return (__m256i)__builtin_ia32_psrlwi256((__v16hi)__a, __count);
}
+/// Shifts each 16-bit element of the 256-bit vector of [16 x i16] in \a __a
+/// right by the number of bits given in the lower 64 bits of \a __count,
+/// shifting in zero bits, and returns the result. If \a __count is greater
+/// than 15, the returned result is all zeroes.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSRLW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] to be shifted.
+/// \param __count
+/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned
+/// shift count (in bits). The upper element is ignored.
+/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_srl_epi16(__m256i __a, __m128i __count)
{
return (__m256i)__builtin_ia32_psrlw256((__v16hi)__a, (__v8hi)__count);
}
+/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __a
+/// right by \a __count bits, shifting in zero bits, and returns the result.
+/// If \a __count is greater than 31, the returned result is all zeroes.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSRLD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] to be shifted.
+/// \param __count
+/// An unsigned integer value specifying the shift count (in bits).
+/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_srli_epi32(__m256i __a, int __count)
{
return (__m256i)__builtin_ia32_psrldi256((__v8si)__a, __count);
}
+/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __a
+/// right by the number of bits given in the lower 64 bits of \a __count,
+/// shifting in zero bits, and returns the result. If \a __count is greater
+/// than 31, the returned result is all zeroes.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSRLD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] to be shifted.
+/// \param __count
+/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned
+/// shift count (in bits). The upper element is ignored.
+/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_srl_epi32(__m256i __a, __m128i __count)
{
return (__m256i)__builtin_ia32_psrld256((__v8si)__a, (__v4si)__count);
}
+/// Shifts each 64-bit element of the 256-bit vector of [4 x i64] in \a __a
+/// right by \a __count bits, shifting in zero bits, and returns the result.
+/// If \a __count is greater than 63, the returned result is all zeroes.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSRLQ instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x i64] to be shifted.
+/// \param __count
+/// An unsigned integer value specifying the shift count (in bits).
+/// \returns A 256-bit vector of [4 x i64] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_srli_epi64(__m256i __a, int __count)
{
return __builtin_ia32_psrlqi256((__v4di)__a, __count);
}
+/// Shifts each 64-bit element of the 256-bit vector of [4 x i64] in \a __a
+/// right by the number of bits given in the lower 64 bits of \a __count,
+/// shifting in zero bits, and returns the result. If \a __count is greater
+/// than 63, the returned result is all zeroes.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSRLQ instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x i64] to be shifted.
+/// \param __count
+/// A 128-bit vector of [2 x i64] whose lower element gives the unsigned
+/// shift count (in bits). The upper element is ignored.
+/// \returns A 256-bit vector of [4 x i64] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_srl_epi64(__m256i __a, __m128i __count)
{
return __builtin_ia32_psrlq256((__v4di)__a, __count);
}
+/// Subtracts 8-bit integers from corresponding bytes of two 256-bit integer
+/// vectors. Returns the lower 8 bits of each difference in the
+/// corresponding byte of the 256-bit integer vector result (overflow is
+/// ignored).
+///
+/// \code{.operation}
+/// FOR i := 0 TO 31
+/// j := i*8
+/// result[j+7:j] := __a[j+7:j] - __b[j+7:j]
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSUBB instruction.
+///
+/// \param __a
+/// A 256-bit integer vector containing the minuends.
+/// \param __b
+/// A 256-bit integer vector containing the subtrahends.
+/// \returns A 256-bit integer vector containing the differences.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sub_epi8(__m256i __a, __m256i __b)
{
return (__m256i)((__v32qu)__a - (__v32qu)__b);
}
+/// Subtracts 16-bit integers from corresponding elements of two 256-bit
+/// vectors of [16 x i16]. Returns the lower 16 bits of each difference in
+/// the corresponding element of the [16 x i16] result (overflow is
+/// ignored).
+///
+/// \code{.operation}
+/// FOR i := 0 TO 15
+/// j := i*16
+/// result[j+15:j] := __a[j+15:j] - __b[j+15:j]
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSUBW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] containing the minuends.
+/// \param __b
+/// A 256-bit vector of [16 x i16] containing the subtrahends.
+/// \returns A 256-bit vector of [16 x i16] containing the differences.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sub_epi16(__m256i __a, __m256i __b)
{
return (__m256i)((__v16hu)__a - (__v16hu)__b);
}
+/// Subtracts 32-bit integers from corresponding elements of two 256-bit
+/// vectors of [8 x i32]. Returns the lower 32 bits of each difference in
+/// the corresponding element of the [8 x i32] result (overflow is ignored).
+///
+/// \code{.operation}
+/// FOR i := 0 TO 7
+/// j := i*32
+/// result[j+31:j] := __a[j+31:j] - __b[j+31:j]
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSUBD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] containing the minuends.
+/// \param __b
+/// A 256-bit vector of [8 x i32] containing the subtrahends.
+/// \returns A 256-bit vector of [8 x i32] containing the differences.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sub_epi32(__m256i __a, __m256i __b)
{
return (__m256i)((__v8su)__a - (__v8su)__b);
}
+/// Subtracts 64-bit integers from corresponding elements of two 256-bit
+/// vectors of [4 x i64]. Returns the lower 64 bits of each difference in
+/// the corresponding element of the [4 x i64] result (overflow is ignored).
+///
+/// \code{.operation}
+/// FOR i := 0 TO 3
+/// j := i*64
+/// result[j+63:j] := __a[j+63:j] - __b[j+63:j]
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSUBQ instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x i64] containing the minuends.
+/// \param __b
+/// A 256-bit vector of [4 x i64] containing the subtrahends.
+/// \returns A 256-bit vector of [4 x i64] containing the differences.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sub_epi64(__m256i __a, __m256i __b)
{
return (__m256i)((__v4du)__a - (__v4du)__b);
}
+/// Subtracts 8-bit integers from corresponding bytes of two 256-bit integer
+/// vectors using signed saturation, and returns each differences in the
+/// corresponding byte of the 256-bit integer vector result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 31
+/// j := i*8
+/// result[j+7:j] := SATURATE8(__a[j+7:j] - __b[j+7:j])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSUBSB instruction.
+///
+/// \param __a
+/// A 256-bit integer vector containing the minuends.
+/// \param __b
+/// A 256-bit integer vector containing the subtrahends.
+/// \returns A 256-bit integer vector containing the differences.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_subs_epi8(__m256i __a, __m256i __b)
{
- return (__m256i)__builtin_ia32_psubsb256((__v32qi)__a, (__v32qi)__b);
-}
-
+ return (__m256i)__builtin_elementwise_sub_sat((__v32qs)__a, (__v32qs)__b);
+}
+
+/// Subtracts 16-bit integers from corresponding elements of two 256-bit
+/// vectors of [16 x i16] using signed saturation, and returns each
+/// difference in the corresponding element of the [16 x i16] result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 15
+/// j := i*16
+/// result[j+7:j] := SATURATE16(__a[j+7:j] - __b[j+7:j])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSUBSW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] containing the minuends.
+/// \param __b
+/// A 256-bit vector of [16 x i16] containing the subtrahends.
+/// \returns A 256-bit vector of [16 x i16] containing the differences.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_subs_epi16(__m256i __a, __m256i __b)
{
- return (__m256i)__builtin_ia32_psubsw256((__v16hi)__a, (__v16hi)__b);
-}
-
+ return (__m256i)__builtin_elementwise_sub_sat((__v16hi)__a, (__v16hi)__b);
+}
+
+/// Subtracts 8-bit integers from corresponding bytes of two 256-bit integer
+/// vectors using unsigned saturation, and returns each difference in the
+/// corresponding byte of the 256-bit integer vector result. For each byte,
+/// computes <c> result = __a - __b </c>.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 31
+/// j := i*8
+/// result[j+7:j] := SATURATE8U(__a[j+7:j] - __b[j+7:j])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSUBUSB instruction.
+///
+/// \param __a
+/// A 256-bit integer vector containing the minuends.
+/// \param __b
+/// A 256-bit integer vector containing the subtrahends.
+/// \returns A 256-bit integer vector containing the differences.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_subs_epu8(__m256i __a, __m256i __b)
{
- return (__m256i)__builtin_ia32_psubusb256((__v32qi)__a, (__v32qi)__b);
-}
-
+ return (__m256i)__builtin_elementwise_sub_sat((__v32qu)__a, (__v32qu)__b);
+}
+
+/// Subtracts 16-bit integers from corresponding elements of two 256-bit
+/// vectors of [16 x i16] using unsigned saturation, and returns each
+/// difference in the corresponding element of the [16 x i16] result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 15
+/// j := i*16
+/// result[j+15:j] := SATURATE16U(__a[j+15:j] - __b[j+15:j])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSUBUSW instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] containing the minuends.
+/// \param __b
+/// A 256-bit vector of [16 x i16] containing the subtrahends.
+/// \returns A 256-bit vector of [16 x i16] containing the differences.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_subs_epu16(__m256i __a, __m256i __b)
{
- return (__m256i)__builtin_ia32_psubusw256((__v16hi)__a, (__v16hi)__b);
-}
-
+ return (__m256i)__builtin_elementwise_sub_sat((__v16hu)__a, (__v16hu)__b);
+}
+
+/// Unpacks and interleaves 8-bit integers from parts of the 256-bit integer
+/// vectors in \a __a and \a __b to form the 256-bit result. Specifically,
+/// uses the upper 64 bits of each 128-bit half of \a __a and \a __b as
+/// input; other bits in these parameters are ignored.
+///
+/// \code{.operation}
+/// result[7:0] := __a[71:64]
+/// result[15:8] := __b[71:64]
+/// result[23:16] := __a[79:72]
+/// result[31:24] := __b[79:72]
+/// . . .
+/// result[127:120] := __b[127:120]
+/// result[135:128] := __a[199:192]
+/// . . .
+/// result[255:248] := __b[255:248]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPUNPCKHBW instruction.
+///
+/// \param __a
+/// A 256-bit integer vector used as the source for the even-numbered bytes
+/// of the result.
+/// \param __b
+/// A 256-bit integer vector used as the source for the odd-numbered bytes
+/// of the result.
+/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_unpackhi_epi8(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_shufflevector((__v32qi)__a, (__v32qi)__b, 8, 32+8, 9, 32+9, 10, 32+10, 11, 32+11, 12, 32+12, 13, 32+13, 14, 32+14, 15, 32+15, 24, 32+24, 25, 32+25, 26, 32+26, 27, 32+27, 28, 32+28, 29, 32+29, 30, 32+30, 31, 32+31);
}
+/// Unpacks and interleaves 16-bit integers from parts of the 256-bit vectors
+/// of [16 x i16] in \a __a and \a __b to return the resulting 256-bit
+/// vector of [16 x i16]. Specifically, uses the upper 64 bits of each
+/// 128-bit half of \a __a and \a __b as input; other bits in these
+/// parameters are ignored.
+///
+/// \code{.operation}
+/// result[15:0] := __a[79:64]
+/// result[31:16] := __b[79:64]
+/// result[47:32] := __a[95:80]
+/// result[63:48] := __b[95:80]
+/// . . .
+/// result[127:112] := __b[127:112]
+/// result[143:128] := __a[211:196]
+/// . . .
+/// result[255:240] := __b[255:240]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPUNPCKHWD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] used as the source for the even-numbered
+/// elements of the result.
+/// \param __b
+/// A 256-bit vector of [16 x i16] used as the source for the odd-numbered
+/// elements of the result.
+/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_unpackhi_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_shufflevector((__v16hi)__a, (__v16hi)__b, 4, 16+4, 5, 16+5, 6, 16+6, 7, 16+7, 12, 16+12, 13, 16+13, 14, 16+14, 15, 16+15);
}
+/// Unpacks and interleaves 32-bit integers from parts of the 256-bit vectors
+/// of [8 x i32] in \a __a and \a __b to return the resulting 256-bit vector
+/// of [8 x i32]. Specifically, uses the upper 64 bits of each 128-bit half
+/// of \a __a and \a __b as input; other bits in these parameters are
+/// ignored.
+///
+/// \code{.operation}
+/// result[31:0] := __a[95:64]
+/// result[63:32] := __b[95:64]
+/// result[95:64] := __a[127:96]
+/// result[127:96] := __b[127:96]
+/// result[159:128] := __a[223:192]
+/// result[191:160] := __b[223:192]
+/// result[223:192] := __a[255:224]
+/// result[255:224] := __b[255:224]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPUNPCKHDQ instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] used as the source for the even-numbered
+/// elements of the result.
+/// \param __b
+/// A 256-bit vector of [8 x i32] used as the source for the odd-numbered
+/// elements of the result.
+/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_unpackhi_epi32(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_shufflevector((__v8si)__a, (__v8si)__b, 2, 8+2, 3, 8+3, 6, 8+6, 7, 8+7);
}
+/// Unpacks and interleaves 64-bit integers from parts of the 256-bit vectors
+/// of [4 x i64] in \a __a and \a __b to return the resulting 256-bit vector
+/// of [4 x i64]. Specifically, uses the upper 64 bits of each 128-bit half
+/// of \a __a and \a __b as input; other bits in these parameters are
+/// ignored.
+///
+/// \code{.operation}
+/// result[63:0] := __a[127:64]
+/// result[127:64] := __b[127:64]
+/// result[191:128] := __a[255:192]
+/// result[255:192] := __b[255:192]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPUNPCKHQDQ instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x i64] used as the source for the even-numbered
+/// elements of the result.
+/// \param __b
+/// A 256-bit vector of [4 x i64] used as the source for the odd-numbered
+/// elements of the result.
+/// \returns A 256-bit vector of [4 x i64] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_unpackhi_epi64(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_shufflevector((__v4di)__a, (__v4di)__b, 1, 4+1, 3, 4+3);
}
+/// Unpacks and interleaves 8-bit integers from parts of the 256-bit integer
+/// vectors in \a __a and \a __b to form the 256-bit result. Specifically,
+/// uses the lower 64 bits of each 128-bit half of \a __a and \a __b as
+/// input; other bits in these parameters are ignored.
+///
+/// \code{.operation}
+/// result[7:0] := __a[7:0]
+/// result[15:8] := __b[7:0]
+/// result[23:16] := __a[15:8]
+/// result[31:24] := __b[15:8]
+/// . . .
+/// result[127:120] := __b[63:56]
+/// result[135:128] := __a[135:128]
+/// . . .
+/// result[255:248] := __b[191:184]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPUNPCKLBW instruction.
+///
+/// \param __a
+/// A 256-bit integer vector used as the source for the even-numbered bytes
+/// of the result.
+/// \param __b
+/// A 256-bit integer vector used as the source for the odd-numbered bytes
+/// of the result.
+/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_unpacklo_epi8(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_shufflevector((__v32qi)__a, (__v32qi)__b, 0, 32+0, 1, 32+1, 2, 32+2, 3, 32+3, 4, 32+4, 5, 32+5, 6, 32+6, 7, 32+7, 16, 32+16, 17, 32+17, 18, 32+18, 19, 32+19, 20, 32+20, 21, 32+21, 22, 32+22, 23, 32+23);
}
+/// Unpacks and interleaves 16-bit integers from parts of the 256-bit vectors
+/// of [16 x i16] in \a __a and \a __b to return the resulting 256-bit
+/// vector of [16 x i16]. Specifically, uses the lower 64 bits of each
+/// 128-bit half of \a __a and \a __b as input; other bits in these
+/// parameters are ignored.
+///
+/// \code{.operation}
+/// result[15:0] := __a[15:0]
+/// result[31:16] := __b[15:0]
+/// result[47:32] := __a[31:16]
+/// result[63:48] := __b[31:16]
+/// . . .
+/// result[127:112] := __b[63:48]
+/// result[143:128] := __a[143:128]
+/// . . .
+/// result[255:239] := __b[191:176]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPUNPCKLWD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x i16] used as the source for the even-numbered
+/// elements of the result.
+/// \param __b
+/// A 256-bit vector of [16 x i16] used as the source for the odd-numbered
+/// elements of the result.
+/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_unpacklo_epi16(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_shufflevector((__v16hi)__a, (__v16hi)__b, 0, 16+0, 1, 16+1, 2, 16+2, 3, 16+3, 8, 16+8, 9, 16+9, 10, 16+10, 11, 16+11);
}
+/// Unpacks and interleaves 32-bit integers from parts of the 256-bit vectors
+/// of [8 x i32] in \a __a and \a __b to return the resulting 256-bit vector
+/// of [8 x i32]. Specifically, uses the lower 64 bits of each 128-bit half
+/// of \a __a and \a __b as input; other bits in these parameters are
+/// ignored.
+///
+/// \code{.operation}
+/// result[31:0] := __a[31:0]
+/// result[63:32] := __b[31:0]
+/// result[95:64] := __a[63:32]
+/// result[127:96] := __b[63:32]
+/// result[159:128] := __a[159:128]
+/// result[191:160] := __b[159:128]
+/// result[223:192] := __a[191:160]
+/// result[255:224] := __b[191:190]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPUNPCKLDQ instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] used as the source for the even-numbered
+/// elements of the result.
+/// \param __b
+/// A 256-bit vector of [8 x i32] used as the source for the odd-numbered
+/// elements of the result.
+/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_unpacklo_epi32(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_shufflevector((__v8si)__a, (__v8si)__b, 0, 8+0, 1, 8+1, 4, 8+4, 5, 8+5);
}
+/// Unpacks and interleaves 64-bit integers from parts of the 256-bit vectors
+/// of [4 x i64] in \a __a and \a __b to return the resulting 256-bit vector
+/// of [4 x i64]. Specifically, uses the lower 64 bits of each 128-bit half
+/// of \a __a and \a __b as input; other bits in these parameters are
+/// ignored.
+///
+/// \code{.operation}
+/// result[63:0] := __a[63:0]
+/// result[127:64] := __b[63:0]
+/// result[191:128] := __a[191:128]
+/// result[255:192] := __b[191:128]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPUNPCKLQDQ instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x i64] used as the source for the even-numbered
+/// elements of the result.
+/// \param __b
+/// A 256-bit vector of [4 x i64] used as the source for the odd-numbered
+/// elements of the result.
+/// \returns A 256-bit vector of [4 x i64] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_unpacklo_epi64(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_shufflevector((__v4di)__a, (__v4di)__b, 0, 4+0, 2, 4+2);
}
+/// Computes the bitwise XOR of the 256-bit integer vectors in \a __a and
+/// \a __b.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPXOR instruction.
+///
+/// \param __a
+/// A 256-bit integer vector.
+/// \param __b
+/// A 256-bit integer vector.
+/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_xor_si256(__m256i __a, __m256i __b)
{
return (__m256i)((__v4du)__a ^ (__v4du)__b);
}
+/// Loads the 256-bit integer vector from memory \a __V using a non-temporal
+/// memory hint and returns the vector. \a __V must be aligned on a 32-byte
+/// boundary.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVNTDQA instruction.
+///
+/// \param __V
+/// A pointer to the 32-byte aligned memory containing the vector to load.
+/// \returns A 256-bit integer vector loaded from memory.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_stream_load_si256(__m256i const *__V)
+_mm256_stream_load_si256(const void *__V)
{
typedef __v4di __v4di_aligned __attribute__((aligned(32)));
return (__m256i)__builtin_nontemporal_load((const __v4di_aligned *)__V);
}
+/// Broadcasts the 32-bit floating-point value from the low element of the
+/// 128-bit vector of [4 x float] in \a __X to all elements of the result's
+/// 128-bit vector of [4 x float].
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VBROADCASTSS instruction.
+///
+/// \param __X
+/// A 128-bit vector of [4 x float] whose low element will be broadcast.
+/// \returns A 128-bit vector of [4 x float] containing the result.
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_broadcastss_ps(__m128 __X)
{
return (__m128)__builtin_shufflevector((__v4sf)__X, (__v4sf)__X, 0, 0, 0, 0);
}
+/// Broadcasts the 64-bit floating-point value from the low element of the
+/// 128-bit vector of [2 x double] in \a __a to both elements of the
+/// result's 128-bit vector of [2 x double].
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c MOVDDUP instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double] whose low element will be broadcast.
+/// \returns A 128-bit vector of [2 x double] containing the result.
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_broadcastsd_pd(__m128d __a)
{
return __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 0);
}
+/// Broadcasts the 32-bit floating-point value from the low element of the
+/// 128-bit vector of [4 x float] in \a __X to all elements of the
+/// result's 256-bit vector of [8 x float].
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VBROADCASTSS instruction.
+///
+/// \param __X
+/// A 128-bit vector of [4 x float] whose low element will be broadcast.
+/// \returns A 256-bit vector of [8 x float] containing the result.
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_broadcastss_ps(__m128 __X)
{
return (__m256)__builtin_shufflevector((__v4sf)__X, (__v4sf)__X, 0, 0, 0, 0, 0, 0, 0, 0);
}
+/// Broadcasts the 64-bit floating-point value from the low element of the
+/// 128-bit vector of [2 x double] in \a __X to all elements of the
+/// result's 256-bit vector of [4 x double].
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VBROADCASTSD instruction.
+///
+/// \param __X
+/// A 128-bit vector of [2 x double] whose low element will be broadcast.
+/// \returns A 256-bit vector of [4 x double] containing the result.
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_broadcastsd_pd(__m128d __X)
{
return (__m256d)__builtin_shufflevector((__v2df)__X, (__v2df)__X, 0, 0, 0, 0);
}
+/// Broadcasts the 128-bit integer data from \a __X to both the lower and
+/// upper halves of the 256-bit result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VBROADCASTI128 instruction.
+///
+/// \param __X
+/// A 128-bit integer vector to be broadcast.
+/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_broadcastsi128_si256(__m128i __X)
{
@@ -742,405 +3075,2208 @@ _mm256_broadcastsi128_si256(__m128i __X)
#define _mm_broadcastsi128_si256(X) _mm256_broadcastsi128_si256(X)
+/// Merges 32-bit integer elements from either of the two 128-bit vectors of
+/// [4 x i32] in \a V1 or \a V2 to the result's 128-bit vector of [4 x i32],
+/// as specified by the immediate integer operand \a M.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 3
+/// j := i*32
+/// IF M[i] == 0
+/// result[31+j:j] := V1[31+j:j]
+/// ELSE
+/// result[31+j:j] := V2[32+j:j]
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128i _mm_blend_epi32(__m128i V1, __m128i V2, const int M);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPBLENDDD instruction.
+///
+/// \param V1
+/// A 128-bit vector of [4 x i32] containing source values.
+/// \param V2
+/// A 128-bit vector of [4 x i32] containing source values.
+/// \param M
+/// An immediate 8-bit integer operand, with bits [3:0] specifying the
+/// source for each element of the result. The position of the mask bit
+/// corresponds to the index of a copied value. When a mask bit is 0, the
+/// element is copied from \a V1; otherwise, it is copied from \a V2.
+/// \returns A 128-bit vector of [4 x i32] containing the result.
#define _mm_blend_epi32(V1, V2, M) \
- (__m128i)__builtin_ia32_pblendd128((__v4si)(__m128i)(V1), \
- (__v4si)(__m128i)(V2), (int)(M))
-
+ ((__m128i)__builtin_ia32_pblendd128((__v4si)(__m128i)(V1), \
+ (__v4si)(__m128i)(V2), (int)(M)))
+
+/// Merges 32-bit integer elements from either of the two 256-bit vectors of
+/// [8 x i32] in \a V1 or \a V2 to return a 256-bit vector of [8 x i32],
+/// as specified by the immediate integer operand \a M.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 7
+/// j := i*32
+/// IF M[i] == 0
+/// result[31+j:j] := V1[31+j:j]
+/// ELSE
+/// result[31+j:j] := V2[32+j:j]
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_blend_epi32(__m256i V1, __m256i V2, const int M);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPBLENDDD instruction.
+///
+/// \param V1
+/// A 256-bit vector of [8 x i32] containing source values.
+/// \param V2
+/// A 256-bit vector of [8 x i32] containing source values.
+/// \param M
+/// An immediate 8-bit integer operand, with bits [7:0] specifying the
+/// source for each element of the result. The position of the mask bit
+/// corresponds to the index of a copied value. When a mask bit is 0, the
+/// element is copied from \a V1; otherwise, it is is copied from \a V2.
+/// \returns A 256-bit vector of [8 x i32] containing the result.
#define _mm256_blend_epi32(V1, V2, M) \
- (__m256i)__builtin_ia32_pblendd256((__v8si)(__m256i)(V1), \
- (__v8si)(__m256i)(V2), (int)(M))
-
+ ((__m256i)__builtin_ia32_pblendd256((__v8si)(__m256i)(V1), \
+ (__v8si)(__m256i)(V2), (int)(M)))
+
+/// Broadcasts the low byte from the 128-bit integer vector in \a __X to all
+/// bytes of the 256-bit result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPBROADCASTB instruction.
+///
+/// \param __X
+/// A 128-bit integer vector whose low byte will be broadcast.
+/// \returns A 256-bit integer vector containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_broadcastb_epi8(__m128i __X)
{
return (__m256i)__builtin_shufflevector((__v16qi)__X, (__v16qi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
}
+/// Broadcasts the low element from the 128-bit vector of [8 x i16] in \a __X
+/// to all elements of the result's 256-bit vector of [16 x i16].
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPBROADCASTW instruction.
+///
+/// \param __X
+/// A 128-bit vector of [8 x i16] whose low element will be broadcast.
+/// \returns A 256-bit vector of [16 x i16] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_broadcastw_epi16(__m128i __X)
{
return (__m256i)__builtin_shufflevector((__v8hi)__X, (__v8hi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
}
+/// Broadcasts the low element from the 128-bit vector of [4 x i32] in \a __X
+/// to all elements of the result's 256-bit vector of [8 x i32].
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPBROADCASTD instruction.
+///
+/// \param __X
+/// A 128-bit vector of [4 x i32] whose low element will be broadcast.
+/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_broadcastd_epi32(__m128i __X)
{
return (__m256i)__builtin_shufflevector((__v4si)__X, (__v4si)__X, 0, 0, 0, 0, 0, 0, 0, 0);
}
+/// Broadcasts the low element from the 128-bit vector of [2 x i64] in \a __X
+/// to all elements of the result's 256-bit vector of [4 x i64].
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPBROADCASTQ instruction.
+///
+/// \param __X
+/// A 128-bit vector of [2 x i64] whose low element will be broadcast.
+/// \returns A 256-bit vector of [4 x i64] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_broadcastq_epi64(__m128i __X)
{
return (__m256i)__builtin_shufflevector((__v2di)__X, (__v2di)__X, 0, 0, 0, 0);
}
+/// Broadcasts the low byte from the 128-bit integer vector in \a __X to all
+/// bytes of the 128-bit result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPBROADCASTB instruction.
+///
+/// \param __X
+/// A 128-bit integer vector whose low byte will be broadcast.
+/// \returns A 128-bit integer vector containing the result.
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_broadcastb_epi8(__m128i __X)
{
return (__m128i)__builtin_shufflevector((__v16qi)__X, (__v16qi)__X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
}
+/// Broadcasts the low element from the 128-bit vector of [8 x i16] in
+/// \a __X to all elements of the result's 128-bit vector of [8 x i16].
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPBROADCASTW instruction.
+///
+/// \param __X
+/// A 128-bit vector of [8 x i16] whose low element will be broadcast.
+/// \returns A 128-bit vector of [8 x i16] containing the result.
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_broadcastw_epi16(__m128i __X)
{
return (__m128i)__builtin_shufflevector((__v8hi)__X, (__v8hi)__X, 0, 0, 0, 0, 0, 0, 0, 0);
}
-
+/// Broadcasts the low element from the 128-bit vector of [4 x i32] in \a __X
+/// to all elements of the result's vector of [4 x i32].
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPBROADCASTD instruction.
+///
+/// \param __X
+/// A 128-bit vector of [4 x i32] whose low element will be broadcast.
+/// \returns A 128-bit vector of [4 x i32] containing the result.
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_broadcastd_epi32(__m128i __X)
{
return (__m128i)__builtin_shufflevector((__v4si)__X, (__v4si)__X, 0, 0, 0, 0);
}
+/// Broadcasts the low element from the 128-bit vector of [2 x i64] in \a __X
+/// to both elements of the result's 128-bit vector of [2 x i64].
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPBROADCASTQ instruction.
+///
+/// \param __X
+/// A 128-bit vector of [2 x i64] whose low element will be broadcast.
+/// \returns A 128-bit vector of [2 x i64] containing the result.
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_broadcastq_epi64(__m128i __X)
{
return (__m128i)__builtin_shufflevector((__v2di)__X, (__v2di)__X, 0, 0);
}
+/// Sets the result's 256-bit vector of [8 x i32] to copies of elements of the
+/// 256-bit vector of [8 x i32] in \a __a as specified by indexes in the
+/// elements of the 256-bit vector of [8 x i32] in \a __b.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 7
+/// j := i*32
+/// k := __b[j+2:j] * 32
+/// result[j+31:j] := __a[k+31:k]
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPERMD instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x i32] containing the source values.
+/// \param __b
+/// A 256-bit vector of [8 x i32] containing indexes of values to use from
+/// \a __a.
+/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_permutevar8x32_epi32(__m256i __a, __m256i __b)
{
return (__m256i)__builtin_ia32_permvarsi256((__v8si)__a, (__v8si)__b);
}
+/// Sets the result's 256-bit vector of [4 x double] to copies of elements of
+/// the 256-bit vector of [4 x double] in \a V as specified by the
+/// immediate value \a M.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 3
+/// j := i*64
+/// k := (M >> i*2)[1:0] * 64
+/// result[j+63:j] := V[k+63:k]
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256d _mm256_permute4x64_pd(__m256d V, const int M);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPERMPD instruction.
+///
+/// \param V
+/// A 256-bit vector of [4 x double] containing the source values.
+/// \param M
+/// An immediate 8-bit value specifying which elements to copy from \a V.
+/// \a M[1:0] specifies the index in \a a for element 0 of the result,
+/// \a M[3:2] specifies the index for element 1, and so forth.
+/// \returns A 256-bit vector of [4 x double] containing the result.
#define _mm256_permute4x64_pd(V, M) \
- (__m256d)__builtin_ia32_permdf256((__v4df)(__m256d)(V), (int)(M))
-
+ ((__m256d)__builtin_ia32_permdf256((__v4df)(__m256d)(V), (int)(M)))
+
+/// Sets the result's 256-bit vector of [8 x float] to copies of elements of
+/// the 256-bit vector of [8 x float] in \a __a as specified by indexes in
+/// the elements of the 256-bit vector of [8 x i32] in \a __b.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 7
+/// j := i*32
+/// k := __b[j+2:j] * 32
+/// result[j+31:j] := __a[k+31:k]
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPERMPS instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x float] containing the source values.
+/// \param __b
+/// A 256-bit vector of [8 x i32] containing indexes of values to use from
+/// \a __a.
+/// \returns A 256-bit vector of [8 x float] containing the result.
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_permutevar8x32_ps(__m256 __a, __m256i __b)
{
return (__m256)__builtin_ia32_permvarsf256((__v8sf)__a, (__v8si)__b);
}
+/// Sets the result's 256-bit vector of [4 x i64] result to copies of elements
+/// of the 256-bit vector of [4 x i64] in \a V as specified by the
+/// immediate value \a M.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 3
+/// j := i*64
+/// k := (M >> i*2)[1:0] * 64
+/// result[j+63:j] := V[k+63:k]
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_permute4x64_epi64(__m256i V, const int M);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPERMQ instruction.
+///
+/// \param V
+/// A 256-bit vector of [4 x i64] containing the source values.
+/// \param M
+/// An immediate 8-bit value specifying which elements to copy from \a V.
+/// \a M[1:0] specifies the index in \a a for element 0 of the result,
+/// \a M[3:2] specifies the index for element 1, and so forth.
+/// \returns A 256-bit vector of [4 x i64] containing the result.
#define _mm256_permute4x64_epi64(V, M) \
- (__m256i)__builtin_ia32_permdi256((__v4di)(__m256i)(V), (int)(M))
-
+ ((__m256i)__builtin_ia32_permdi256((__v4di)(__m256i)(V), (int)(M)))
+
+/// Sets each half of the 256-bit result either to zero or to one of the
+/// four possible 128-bit halves of the 256-bit vectors \a V1 and \a V2,
+/// as specified by the immediate value \a M.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 1
+/// j := i*128
+/// k := M >> (i*4)
+/// IF k[3] == 0
+/// CASE (k[1:0]) OF
+/// 0: result[127+j:j] := V1[127:0]
+/// 1: result[127+j:j] := V1[255:128]
+/// 2: result[127+j:j] := V2[127:0]
+/// 3: result[127+j:j] := V2[255:128]
+/// ESAC
+/// ELSE
+/// result[127+j:j] := 0
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_permute2x128_si256(__m256i V1, __m256i V2, const int M);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPERM2I128 instruction.
+///
+/// \param V1
+/// A 256-bit integer vector containing source values.
+/// \param V2
+/// A 256-bit integer vector containing source values.
+/// \param M
+/// An immediate value specifying how to form the result. Bits [3:0]
+/// control the lower half of the result, bits [7:4] control the upper half.
+/// Within each 4-bit control value, if bit 3 is 1, the result is zero,
+/// otherwise bits [1:0] determine the source as follows. \n
+/// 0: the lower half of \a V1 \n
+/// 1: the upper half of \a V1 \n
+/// 2: the lower half of \a V2 \n
+/// 3: the upper half of \a V2
+/// \returns A 256-bit integer vector containing the result.
#define _mm256_permute2x128_si256(V1, V2, M) \
- (__m256i)__builtin_ia32_permti256((__m256i)(V1), (__m256i)(V2), (int)(M))
-
+ ((__m256i)__builtin_ia32_permti256((__m256i)(V1), (__m256i)(V2), (int)(M)))
+
+/// Extracts half of the 256-bit vector \a V to the 128-bit result. If bit 0
+/// of the immediate \a M is zero, extracts the lower half of the result;
+/// otherwise, extracts the upper half.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128i _mm256_extracti128_si256(__m256i V, const int M);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VEXTRACTI128 instruction.
+///
+/// \param V
+/// A 256-bit integer vector containing the source values.
+/// \param M
+/// An immediate value specifying which half of \a V to extract.
+/// \returns A 128-bit integer vector containing the result.
#define _mm256_extracti128_si256(V, M) \
- (__m128i)__builtin_ia32_extract128i256((__v4di)(__m256i)(V), (int)(M))
-
+ ((__m128i)__builtin_ia32_extract128i256((__v4di)(__m256i)(V), (int)(M)))
+
+/// Copies the 256-bit vector \a V1 to the result, then overwrites half of the
+/// result with the 128-bit vector \a V2. If bit 0 of the immediate \a M
+/// is zero, overwrites the lower half of the result; otherwise,
+/// overwrites the upper half.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_inserti128_si256(__m256i V1, __m128i V2, const int M);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VINSERTI128 instruction.
+///
+/// \param V1
+/// A 256-bit integer vector containing a source value.
+/// \param V2
+/// A 128-bit integer vector containing a source value.
+/// \param M
+/// An immediate value specifying where to put \a V2 in the result.
+/// \returns A 256-bit integer vector containing the result.
#define _mm256_inserti128_si256(V1, V2, M) \
- (__m256i)__builtin_ia32_insert128i256((__v4di)(__m256i)(V1), \
- (__v2di)(__m128i)(V2), (int)(M))
-
+ ((__m256i)__builtin_ia32_insert128i256((__v4di)(__m256i)(V1), \
+ (__v2di)(__m128i)(V2), (int)(M)))
+
+/// Conditionally loads eight 32-bit integer elements from memory \a __X, if
+/// the most significant bit of the corresponding element in the mask
+/// \a __M is set; otherwise, sets that element of the result to zero.
+/// Returns the 256-bit [8 x i32] result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 7
+/// j := i*32
+/// IF __M[j+31] == 1
+/// result[j+31:j] := Load32(__X+(i*4))
+/// ELSE
+/// result[j+31:j] := 0
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMASKMOVD instruction.
+///
+/// \param __X
+/// A pointer to the memory used for loading values.
+/// \param __M
+/// A 256-bit vector of [8 x i32] containing the mask bits.
+/// \returns A 256-bit vector of [8 x i32] containing the loaded or zeroed
+/// elements.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskload_epi32(int const *__X, __m256i __M)
{
return (__m256i)__builtin_ia32_maskloadd256((const __v8si *)__X, (__v8si)__M);
}
+/// Conditionally loads four 64-bit integer elements from memory \a __X, if
+/// the most significant bit of the corresponding element in the mask
+/// \a __M is set; otherwise, sets that element of the result to zero.
+/// Returns the 256-bit [4 x i64] result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 3
+/// j := i*64
+/// IF __M[j+63] == 1
+/// result[j+63:j] := Load64(__X+(i*8))
+/// ELSE
+/// result[j+63:j] := 0
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMASKMOVQ instruction.
+///
+/// \param __X
+/// A pointer to the memory used for loading values.
+/// \param __M
+/// A 256-bit vector of [4 x i64] containing the mask bits.
+/// \returns A 256-bit vector of [4 x i64] containing the loaded or zeroed
+/// elements.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskload_epi64(long long const *__X, __m256i __M)
{
return (__m256i)__builtin_ia32_maskloadq256((const __v4di *)__X, (__v4di)__M);
}
+/// Conditionally loads four 32-bit integer elements from memory \a __X, if
+/// the most significant bit of the corresponding element in the mask
+/// \a __M is set; otherwise, sets that element of the result to zero.
+/// Returns the 128-bit [4 x i32] result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 3
+/// j := i*32
+/// IF __M[j+31] == 1
+/// result[j+31:j] := Load32(__X+(i*4))
+/// ELSE
+/// result[j+31:j] := 0
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMASKMOVD instruction.
+///
+/// \param __X
+/// A pointer to the memory used for loading values.
+/// \param __M
+/// A 128-bit vector of [4 x i32] containing the mask bits.
+/// \returns A 128-bit vector of [4 x i32] containing the loaded or zeroed
+/// elements.
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskload_epi32(int const *__X, __m128i __M)
{
return (__m128i)__builtin_ia32_maskloadd((const __v4si *)__X, (__v4si)__M);
}
+/// Conditionally loads two 64-bit integer elements from memory \a __X, if
+/// the most significant bit of the corresponding element in the mask
+/// \a __M is set; otherwise, sets that element of the result to zero.
+/// Returns the 128-bit [2 x i64] result.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 1
+/// j := i*64
+/// IF __M[j+63] == 1
+/// result[j+63:j] := Load64(__X+(i*8))
+/// ELSE
+/// result[j+63:j] := 0
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMASKMOVQ instruction.
+///
+/// \param __X
+/// A pointer to the memory used for loading values.
+/// \param __M
+/// A 128-bit vector of [2 x i64] containing the mask bits.
+/// \returns A 128-bit vector of [2 x i64] containing the loaded or zeroed
+/// elements.
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskload_epi64(long long const *__X, __m128i __M)
{
return (__m128i)__builtin_ia32_maskloadq((const __v2di *)__X, (__v2di)__M);
}
+/// Conditionally stores eight 32-bit integer elements from the 256-bit vector
+/// of [8 x i32] in \a __Y to memory \a __X, if the most significant bit of
+/// the corresponding element in the mask \a __M is set; otherwise, the
+/// memory element is unchanged.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 7
+/// j := i*32
+/// IF __M[j+31] == 1
+/// Store32(__X+(i*4), __Y[j+31:j])
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMASKMOVD instruction.
+///
+/// \param __X
+/// A pointer to the memory used for storing values.
+/// \param __M
+/// A 256-bit vector of [8 x i32] containing the mask bits.
+/// \param __Y
+/// A 256-bit vector of [8 x i32] containing the values to store.
static __inline__ void __DEFAULT_FN_ATTRS256
_mm256_maskstore_epi32(int *__X, __m256i __M, __m256i __Y)
{
__builtin_ia32_maskstored256((__v8si *)__X, (__v8si)__M, (__v8si)__Y);
}
+/// Conditionally stores four 64-bit integer elements from the 256-bit vector
+/// of [4 x i64] in \a __Y to memory \a __X, if the most significant bit of
+/// the corresponding element in the mask \a __M is set; otherwise, the
+/// memory element is unchanged.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 3
+/// j := i*64
+/// IF __M[j+63] == 1
+/// Store64(__X+(i*8), __Y[j+63:j])
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMASKMOVQ instruction.
+///
+/// \param __X
+/// A pointer to the memory used for storing values.
+/// \param __M
+/// A 256-bit vector of [4 x i64] containing the mask bits.
+/// \param __Y
+/// A 256-bit vector of [4 x i64] containing the values to store.
static __inline__ void __DEFAULT_FN_ATTRS256
_mm256_maskstore_epi64(long long *__X, __m256i __M, __m256i __Y)
{
__builtin_ia32_maskstoreq256((__v4di *)__X, (__v4di)__M, (__v4di)__Y);
}
+/// Conditionally stores four 32-bit integer elements from the 128-bit vector
+/// of [4 x i32] in \a __Y to memory \a __X, if the most significant bit of
+/// the corresponding element in the mask \a __M is set; otherwise, the
+/// memory element is unchanged.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 3
+/// j := i*32
+/// IF __M[j+31] == 1
+/// Store32(__X+(i*4), __Y[j+31:j])
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMASKMOVD instruction.
+///
+/// \param __X
+/// A pointer to the memory used for storing values.
+/// \param __M
+/// A 128-bit vector of [4 x i32] containing the mask bits.
+/// \param __Y
+/// A 128-bit vector of [4 x i32] containing the values to store.
static __inline__ void __DEFAULT_FN_ATTRS128
_mm_maskstore_epi32(int *__X, __m128i __M, __m128i __Y)
{
__builtin_ia32_maskstored((__v4si *)__X, (__v4si)__M, (__v4si)__Y);
}
+/// Conditionally stores two 64-bit integer elements from the 128-bit vector
+/// of [2 x i64] in \a __Y to memory \a __X, if the most significant bit of
+/// the corresponding element in the mask \a __M is set; otherwise, the
+/// memory element is unchanged.
+///
+/// \code{.operation}
+/// FOR i := 0 TO 1
+/// j := i*64
+/// IF __M[j+63] == 1
+/// Store64(__X+(i*8), __Y[j+63:j])
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPMASKMOVQ instruction.
+///
+/// \param __X
+/// A pointer to the memory used for storing values.
+/// \param __M
+/// A 128-bit vector of [2 x i64] containing the mask bits.
+/// \param __Y
+/// A 128-bit vector of [2 x i64] containing the values to store.
static __inline__ void __DEFAULT_FN_ATTRS128
_mm_maskstore_epi64(long long *__X, __m128i __M, __m128i __Y)
{
__builtin_ia32_maskstoreq(( __v2di *)__X, (__v2di)__M, (__v2di)__Y);
}
+/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __X
+/// left by the number of bits given in the corresponding element of the
+/// 256-bit vector of [8 x i32] in \a __Y, shifting in zero bits, and
+/// returns the result. If the shift count for any element is greater than
+/// 31, the result for that element is zero.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSLLVD instruction.
+///
+/// \param __X
+/// A 256-bit vector of [8 x i32] to be shifted.
+/// \param __Y
+/// A 256-bit vector of [8 x i32] containing the unsigned shift counts (in
+/// bits).
+/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sllv_epi32(__m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_psllv8si((__v8si)__X, (__v8si)__Y);
}
+/// Shifts each 32-bit element of the 128-bit vector of [4 x i32] in \a __X
+/// left by the number of bits given in the corresponding element of the
+/// 128-bit vector of [4 x i32] in \a __Y, shifting in zero bits, and
+/// returns the result. If the shift count for any element is greater than
+/// 31, the result for that element is zero.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSLLVD instruction.
+///
+/// \param __X
+/// A 128-bit vector of [4 x i32] to be shifted.
+/// \param __Y
+/// A 128-bit vector of [4 x i32] containing the unsigned shift counts (in
+/// bits).
+/// \returns A 128-bit vector of [4 x i32] containing the result.
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_sllv_epi32(__m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_psllv4si((__v4si)__X, (__v4si)__Y);
}
+/// Shifts each 64-bit element of the 256-bit vector of [4 x i64] in \a __X
+/// left by the number of bits given in the corresponding element of the
+/// 128-bit vector of [4 x i64] in \a __Y, shifting in zero bits, and
+/// returns the result. If the shift count for any element is greater than
+/// 63, the result for that element is zero.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSLLVQ instruction.
+///
+/// \param __X
+/// A 256-bit vector of [4 x i64] to be shifted.
+/// \param __Y
+/// A 256-bit vector of [4 x i64] containing the unsigned shift counts (in
+/// bits).
+/// \returns A 256-bit vector of [4 x i64] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sllv_epi64(__m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_psllv4di((__v4di)__X, (__v4di)__Y);
}
+/// Shifts each 64-bit element of the 128-bit vector of [2 x i64] in \a __X
+/// left by the number of bits given in the corresponding element of the
+/// 128-bit vector of [2 x i64] in \a __Y, shifting in zero bits, and
+/// returns the result. If the shift count for any element is greater than
+/// 63, the result for that element is zero.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSLLVQ instruction.
+///
+/// \param __X
+/// A 128-bit vector of [2 x i64] to be shifted.
+/// \param __Y
+/// A 128-bit vector of [2 x i64] containing the unsigned shift counts (in
+/// bits).
+/// \returns A 128-bit vector of [2 x i64] containing the result.
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_sllv_epi64(__m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_psllv2di((__v2di)__X, (__v2di)__Y);
}
+/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __X
+/// right by the number of bits given in the corresponding element of the
+/// 256-bit vector of [8 x i32] in \a __Y, shifting in sign bits, and
+/// returns the result. If the shift count for any element is greater than
+/// 31, the result for that element is 0 or -1 according to the sign bit
+/// for that element.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSRAVD instruction.
+///
+/// \param __X
+/// A 256-bit vector of [8 x i32] to be shifted.
+/// \param __Y
+/// A 256-bit vector of [8 x i32] containing the unsigned shift counts (in
+/// bits).
+/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_srav_epi32(__m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_psrav8si((__v8si)__X, (__v8si)__Y);
}
+/// Shifts each 32-bit element of the 128-bit vector of [4 x i32] in \a __X
+/// right by the number of bits given in the corresponding element of the
+/// 128-bit vector of [4 x i32] in \a __Y, shifting in sign bits, and
+/// returns the result. If the shift count for any element is greater than
+/// 31, the result for that element is 0 or -1 according to the sign bit
+/// for that element.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSRAVD instruction.
+///
+/// \param __X
+/// A 128-bit vector of [4 x i32] to be shifted.
+/// \param __Y
+/// A 128-bit vector of [4 x i32] containing the unsigned shift counts (in
+/// bits).
+/// \returns A 128-bit vector of [4 x i32] containing the result.
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_srav_epi32(__m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_psrav4si((__v4si)__X, (__v4si)__Y);
}
+/// Shifts each 32-bit element of the 256-bit vector of [8 x i32] in \a __X
+/// right by the number of bits given in the corresponding element of the
+/// 256-bit vector of [8 x i32] in \a __Y, shifting in zero bits, and
+/// returns the result. If the shift count for any element is greater than
+/// 31, the result for that element is zero.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSRLVD instruction.
+///
+/// \param __X
+/// A 256-bit vector of [8 x i32] to be shifted.
+/// \param __Y
+/// A 256-bit vector of [8 x i32] containing the unsigned shift counts (in
+/// bits).
+/// \returns A 256-bit vector of [8 x i32] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_srlv_epi32(__m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_psrlv8si((__v8si)__X, (__v8si)__Y);
}
+/// Shifts each 32-bit element of the 128-bit vector of [4 x i32] in \a __X
+/// right by the number of bits given in the corresponding element of the
+/// 128-bit vector of [4 x i32] in \a __Y, shifting in zero bits, and
+/// returns the result. If the shift count for any element is greater than
+/// 31, the result for that element is zero.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSRLVD instruction.
+///
+/// \param __X
+/// A 128-bit vector of [4 x i32] to be shifted.
+/// \param __Y
+/// A 128-bit vector of [4 x i32] containing the unsigned shift counts (in
+/// bits).
+/// \returns A 128-bit vector of [4 x i32] containing the result.
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_srlv_epi32(__m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_psrlv4si((__v4si)__X, (__v4si)__Y);
}
+/// Shifts each 64-bit element of the 256-bit vector of [4 x i64] in \a __X
+/// right by the number of bits given in the corresponding element of the
+/// 128-bit vector of [4 x i64] in \a __Y, shifting in zero bits, and
+/// returns the result. If the shift count for any element is greater than
+/// 63, the result for that element is zero.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSRLVQ instruction.
+///
+/// \param __X
+/// A 256-bit vector of [4 x i64] to be shifted.
+/// \param __Y
+/// A 256-bit vector of [4 x i64] containing the unsigned shift counts (in
+/// bits).
+/// \returns A 256-bit vector of [4 x i64] containing the result.
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_srlv_epi64(__m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_psrlv4di((__v4di)__X, (__v4di)__Y);
}
+/// Shifts each 64-bit element of the 128-bit vector of [2 x i64] in \a __X
+/// right by the number of bits given in the corresponding element of the
+/// 128-bit vector of [2 x i64] in \a __Y, shifting in zero bits, and
+/// returns the result. If the shift count for any element is greater than
+/// 63, the result for that element is zero.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VPSRLVQ instruction.
+///
+/// \param __X
+/// A 128-bit vector of [2 x i64] to be shifted.
+/// \param __Y
+/// A 128-bit vector of [2 x i64] containing the unsigned shift counts (in
+/// bits).
+/// \returns A 128-bit vector of [2 x i64] containing the result.
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_srlv_epi64(__m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_psrlv2di((__v2di)__X, (__v2di)__Y);
}
+/// Conditionally gathers two 64-bit floating-point values, either from the
+/// 128-bit vector of [2 x double] in \a a, or from memory \a m using scaled
+/// indexes from the 128-bit vector of [4 x i32] in \a i. The 128-bit vector
+/// of [2 x double] in \a mask determines the source for each element.
+///
+/// \code{.operation}
+/// FOR element := 0 to 1
+/// j := element*64
+/// k := element*32
+/// IF mask[j+63] == 0
+/// result[j+63:j] := a[j+63:j]
+/// ELSE
+/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s)
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128d _mm_mask_i32gather_pd(__m128d a, const double *m, __m128i i,
+/// __m128d mask, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VGATHERDPD instruction.
+///
+/// \param a
+/// A 128-bit vector of [2 x double] used as the source when a mask bit is
+/// zero.
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [4 x i32] containing signed indexes into \a m. Only
+/// the first two elements are used.
+/// \param mask
+/// A 128-bit vector of [2 x double] containing the mask. The most
+/// significant bit of each element in the mask vector represents the mask
+/// bits. If a mask bit is zero, the corresponding value from vector \a a
+/// is gathered; otherwise the value is loaded from memory.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [2 x double] containing the gathered values.
#define _mm_mask_i32gather_pd(a, m, i, mask, s) \
- (__m128d)__builtin_ia32_gatherd_pd((__v2df)(__m128i)(a), \
- (double const *)(m), \
- (__v4si)(__m128i)(i), \
- (__v2df)(__m128d)(mask), (s))
-
+ ((__m128d)__builtin_ia32_gatherd_pd((__v2df)(__m128i)(a), \
+ (double const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v2df)(__m128d)(mask), (s)))
+
+/// Conditionally gathers four 64-bit floating-point values, either from the
+/// 256-bit vector of [4 x double] in \a a, or from memory \a m using scaled
+/// indexes from the 128-bit vector of [4 x i32] in \a i. The 256-bit vector
+/// of [4 x double] in \a mask determines the source for each element.
+///
+/// \code{.operation}
+/// FOR element := 0 to 3
+/// j := element*64
+/// k := element*32
+/// IF mask[j+63] == 0
+/// result[j+63:j] := a[j+63:j]
+/// ELSE
+/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s)
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256d _mm256_mask_i32gather_pd(__m256d a, const double *m, __m128i i,
+/// __m256d mask, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VGATHERDPD instruction.
+///
+/// \param a
+/// A 256-bit vector of [4 x double] used as the source when a mask bit is
+/// zero.
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [4 x i32] containing signed indexes into \a m.
+/// \param mask
+/// A 256-bit vector of [4 x double] containing the mask. The most
+/// significant bit of each element in the mask vector represents the mask
+/// bits. If a mask bit is zero, the corresponding value from vector \a a
+/// is gathered; otherwise the value is loaded from memory.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 256-bit vector of [4 x double] containing the gathered values.
#define _mm256_mask_i32gather_pd(a, m, i, mask, s) \
- (__m256d)__builtin_ia32_gatherd_pd256((__v4df)(__m256d)(a), \
- (double const *)(m), \
- (__v4si)(__m128i)(i), \
- (__v4df)(__m256d)(mask), (s))
-
+ ((__m256d)__builtin_ia32_gatherd_pd256((__v4df)(__m256d)(a), \
+ (double const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v4df)(__m256d)(mask), (s)))
+
+/// Conditionally gathers two 64-bit floating-point values, either from the
+/// 128-bit vector of [2 x double] in \a a, or from memory \a m using scaled
+/// indexes from the 128-bit vector of [2 x i64] in \a i. The 128-bit vector
+/// of [2 x double] in \a mask determines the source for each element.
+///
+/// \code{.operation}
+/// FOR element := 0 to 1
+/// j := element*64
+/// k := element*64
+/// IF mask[j+63] == 0
+/// result[j+63:j] := a[j+63:j]
+/// ELSE
+/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s)
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128d _mm_mask_i64gather_pd(__m128d a, const double *m, __m128i i,
+/// __m128d mask, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VGATHERQPD instruction.
+///
+/// \param a
+/// A 128-bit vector of [2 x double] used as the source when a mask bit is
+/// zero.
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [2 x i64] containing signed indexes into \a m.
+/// \param mask
+/// A 128-bit vector of [2 x double] containing the mask. The most
+/// significant bit of each element in the mask vector represents the mask
+/// bits. If a mask bit is zero, the corresponding value from vector \a a
+/// is gathered; otherwise the value is loaded from memory.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [2 x double] containing the gathered values.
#define _mm_mask_i64gather_pd(a, m, i, mask, s) \
- (__m128d)__builtin_ia32_gatherq_pd((__v2df)(__m128d)(a), \
- (double const *)(m), \
- (__v2di)(__m128i)(i), \
- (__v2df)(__m128d)(mask), (s))
-
+ ((__m128d)__builtin_ia32_gatherq_pd((__v2df)(__m128d)(a), \
+ (double const *)(m), \
+ (__v2di)(__m128i)(i), \
+ (__v2df)(__m128d)(mask), (s)))
+
+/// Conditionally gathers four 64-bit floating-point values, either from the
+/// 256-bit vector of [4 x double] in \a a, or from memory \a m using scaled
+/// indexes from the 256-bit vector of [4 x i64] in \a i. The 256-bit vector
+/// of [4 x double] in \a mask determines the source for each element.
+///
+/// \code{.operation}
+/// FOR element := 0 to 3
+/// j := element*64
+/// k := element*64
+/// IF mask[j+63] == 0
+/// result[j+63:j] := a[j+63:j]
+/// ELSE
+/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s)
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256d _mm256_mask_i64gather_pd(__m256d a, const double *m, __m256i i,
+/// __m256d mask, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VGATHERQPD instruction.
+///
+/// \param a
+/// A 256-bit vector of [4 x double] used as the source when a mask bit is
+/// zero.
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 256-bit vector of [4 x i64] containing signed indexes into \a m.
+/// \param mask
+/// A 256-bit vector of [4 x double] containing the mask. The most
+/// significant bit of each element in the mask vector represents the mask
+/// bits. If a mask bit is zero, the corresponding value from vector \a a
+/// is gathered; otherwise the value is loaded from memory.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 256-bit vector of [4 x double] containing the gathered values.
#define _mm256_mask_i64gather_pd(a, m, i, mask, s) \
- (__m256d)__builtin_ia32_gatherq_pd256((__v4df)(__m256d)(a), \
- (double const *)(m), \
- (__v4di)(__m256i)(i), \
- (__v4df)(__m256d)(mask), (s))
-
+ ((__m256d)__builtin_ia32_gatherq_pd256((__v4df)(__m256d)(a), \
+ (double const *)(m), \
+ (__v4di)(__m256i)(i), \
+ (__v4df)(__m256d)(mask), (s)))
+
+/// Conditionally gathers four 32-bit floating-point values, either from the
+/// 128-bit vector of [4 x float] in \a a, or from memory \a m using scaled
+/// indexes from the 128-bit vector of [4 x i32] in \a i. The 128-bit vector
+/// of [4 x float] in \a mask determines the source for each element.
+///
+/// \code{.operation}
+/// FOR element := 0 to 3
+/// j := element*32
+/// k := element*32
+/// IF mask[j+31] == 0
+/// result[j+31:j] := a[j+31:j]
+/// ELSE
+/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s)
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128 _mm_mask_i32gather_ps(__m128 a, const float *m, __m128i i,
+/// __m128 mask, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VGATHERDPS instruction.
+///
+/// \param a
+/// A 128-bit vector of [4 x float] used as the source when a mask bit is
+/// zero.
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [4 x i32] containing signed indexes into \a m.
+/// \param mask
+/// A 128-bit vector of [4 x float] containing the mask. The most
+/// significant bit of each element in the mask vector represents the mask
+/// bits. If a mask bit is zero, the corresponding value from vector \a a
+/// is gathered; otherwise the value is loaded from memory.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [4 x float] containing the gathered values.
#define _mm_mask_i32gather_ps(a, m, i, mask, s) \
- (__m128)__builtin_ia32_gatherd_ps((__v4sf)(__m128)(a), \
- (float const *)(m), \
- (__v4si)(__m128i)(i), \
- (__v4sf)(__m128)(mask), (s))
-
+ ((__m128)__builtin_ia32_gatherd_ps((__v4sf)(__m128)(a), \
+ (float const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v4sf)(__m128)(mask), (s)))
+
+/// Conditionally gathers eight 32-bit floating-point values, either from the
+/// 256-bit vector of [8 x float] in \a a, or from memory \a m using scaled
+/// indexes from the 256-bit vector of [8 x i32] in \a i. The 256-bit vector
+/// of [8 x float] in \a mask determines the source for each element.
+///
+/// \code{.operation}
+/// FOR element := 0 to 7
+/// j := element*32
+/// k := element*32
+/// IF mask[j+31] == 0
+/// result[j+31:j] := a[j+31:j]
+/// ELSE
+/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s)
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256 _mm256_mask_i32gather_ps(__m256 a, const float *m, __m256i i,
+/// __m256 mask, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VGATHERDPS instruction.
+///
+/// \param a
+/// A 256-bit vector of [8 x float] used as the source when a mask bit is
+/// zero.
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 256-bit vector of [8 x i32] containing signed indexes into \a m.
+/// \param mask
+/// A 256-bit vector of [8 x float] containing the mask. The most
+/// significant bit of each element in the mask vector represents the mask
+/// bits. If a mask bit is zero, the corresponding value from vector \a a
+/// is gathered; otherwise the value is loaded from memory.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 256-bit vector of [8 x float] containing the gathered values.
#define _mm256_mask_i32gather_ps(a, m, i, mask, s) \
- (__m256)__builtin_ia32_gatherd_ps256((__v8sf)(__m256)(a), \
- (float const *)(m), \
- (__v8si)(__m256i)(i), \
- (__v8sf)(__m256)(mask), (s))
-
+ ((__m256)__builtin_ia32_gatherd_ps256((__v8sf)(__m256)(a), \
+ (float const *)(m), \
+ (__v8si)(__m256i)(i), \
+ (__v8sf)(__m256)(mask), (s)))
+
+/// Conditionally gathers two 32-bit floating-point values, either from the
+/// 128-bit vector of [4 x float] in \a a, or from memory \a m using scaled
+/// indexes from the 128-bit vector of [2 x i64] in \a i. The 128-bit vector
+/// of [4 x float] in \a mask determines the source for the lower two
+/// elements. The upper two elements of the result are zeroed.
+///
+/// \code{.operation}
+/// FOR element := 0 to 1
+/// j := element*32
+/// k := element*64
+/// IF mask[j+31] == 0
+/// result[j+31:j] := a[j+31:j]
+/// ELSE
+/// result[j+31:j] := Load32(m + SignExtend(i[k+63:k])*s)
+/// FI
+/// ENDFOR
+/// result[127:64] := 0
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128 _mm_mask_i64gather_ps(__m128 a, const float *m, __m128i i,
+/// __m128 mask, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VGATHERQPS instruction.
+///
+/// \param a
+/// A 128-bit vector of [4 x float] used as the source when a mask bit is
+/// zero. Only the first two elements are used.
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [2 x i64] containing signed indexes into \a m.
+/// \param mask
+/// A 128-bit vector of [4 x float] containing the mask. The most
+/// significant bit of each element in the mask vector represents the mask
+/// bits. If a mask bit is zero, the corresponding value from vector \a a
+/// is gathered; otherwise the value is loaded from memory. Only the first
+/// two elements are used.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [4 x float] containing the gathered values.
#define _mm_mask_i64gather_ps(a, m, i, mask, s) \
- (__m128)__builtin_ia32_gatherq_ps((__v4sf)(__m128)(a), \
- (float const *)(m), \
- (__v2di)(__m128i)(i), \
- (__v4sf)(__m128)(mask), (s))
-
+ ((__m128)__builtin_ia32_gatherq_ps((__v4sf)(__m128)(a), \
+ (float const *)(m), \
+ (__v2di)(__m128i)(i), \
+ (__v4sf)(__m128)(mask), (s)))
+
+/// Conditionally gathers four 32-bit floating-point values, either from the
+/// 128-bit vector of [4 x float] in \a a, or from memory \a m using scaled
+/// indexes from the 256-bit vector of [4 x i64] in \a i. The 128-bit vector
+/// of [4 x float] in \a mask determines the source for each element.
+///
+/// \code{.operation}
+/// FOR element := 0 to 3
+/// j := element*32
+/// k := element*64
+/// IF mask[j+31] == 0
+/// result[j+31:j] := a[j+31:j]
+/// ELSE
+/// result[j+31:j] := Load32(m + SignExtend(i[k+63:k])*s)
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128 _mm256_mask_i64gather_ps(__m128 a, const float *m, __m256i i,
+/// __m128 mask, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VGATHERQPS instruction.
+///
+/// \param a
+/// A 128-bit vector of [4 x float] used as the source when a mask bit is
+/// zero.
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 256-bit vector of [4 x i64] containing signed indexes into \a m.
+/// \param mask
+/// A 128-bit vector of [4 x float] containing the mask. The most
+/// significant bit of each element in the mask vector represents the mask
+/// bits. If a mask bit is zero, the corresponding value from vector \a a
+/// is gathered; otherwise the value is loaded from memory.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [4 x float] containing the gathered values.
#define _mm256_mask_i64gather_ps(a, m, i, mask, s) \
- (__m128)__builtin_ia32_gatherq_ps256((__v4sf)(__m128)(a), \
- (float const *)(m), \
- (__v4di)(__m256i)(i), \
- (__v4sf)(__m128)(mask), (s))
-
+ ((__m128)__builtin_ia32_gatherq_ps256((__v4sf)(__m128)(a), \
+ (float const *)(m), \
+ (__v4di)(__m256i)(i), \
+ (__v4sf)(__m128)(mask), (s)))
+
+/// Conditionally gathers four 32-bit integer values, either from the
+/// 128-bit vector of [4 x i32] in \a a, or from memory \a m using scaled
+/// indexes from the 128-bit vector of [4 x i32] in \a i. The 128-bit vector
+/// of [4 x i32] in \a mask determines the source for each element.
+///
+/// \code{.operation}
+/// FOR element := 0 to 3
+/// j := element*32
+/// k := element*32
+/// IF mask[j+31] == 0
+/// result[j+31:j] := a[j+31:j]
+/// ELSE
+/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s)
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128i _mm_mask_i32gather_epi32(__m128i a, const int *m, __m128i i,
+/// __m128i mask, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPGATHERDD instruction.
+///
+/// \param a
+/// A 128-bit vector of [4 x i32] used as the source when a mask bit is
+/// zero.
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [4 x i32] containing signed indexes into \a m.
+/// \param mask
+/// A 128-bit vector of [4 x i32] containing the mask. The most significant
+/// bit of each element in the mask vector represents the mask bits. If a
+/// mask bit is zero, the corresponding value from vector \a a is gathered;
+/// otherwise the value is loaded from memory.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [4 x i32] containing the gathered values.
#define _mm_mask_i32gather_epi32(a, m, i, mask, s) \
- (__m128i)__builtin_ia32_gatherd_d((__v4si)(__m128i)(a), \
- (int const *)(m), \
- (__v4si)(__m128i)(i), \
- (__v4si)(__m128i)(mask), (s))
-
+ ((__m128i)__builtin_ia32_gatherd_d((__v4si)(__m128i)(a), \
+ (int const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v4si)(__m128i)(mask), (s)))
+
+/// Conditionally gathers eight 32-bit integer values, either from the
+/// 256-bit vector of [8 x i32] in \a a, or from memory \a m using scaled
+/// indexes from the 256-bit vector of [8 x i32] in \a i. The 256-bit vector
+/// of [8 x i32] in \a mask determines the source for each element.
+///
+/// \code{.operation}
+/// FOR element := 0 to 7
+/// j := element*32
+/// k := element*32
+/// IF mask[j+31] == 0
+/// result[j+31:j] := a[j+31:j]
+/// ELSE
+/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s)
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_mask_i32gather_epi32(__m256i a, const int *m, __m256i i,
+/// __m256i mask, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPGATHERDD instruction.
+///
+/// \param a
+/// A 256-bit vector of [8 x i32] used as the source when a mask bit is
+/// zero.
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 256-bit vector of [8 x i32] containing signed indexes into \a m.
+/// \param mask
+/// A 256-bit vector of [8 x i32] containing the mask. The most significant
+/// bit of each element in the mask vector represents the mask bits. If a
+/// mask bit is zero, the corresponding value from vector \a a is gathered;
+/// otherwise the value is loaded from memory.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 256-bit vector of [8 x i32] containing the gathered values.
#define _mm256_mask_i32gather_epi32(a, m, i, mask, s) \
- (__m256i)__builtin_ia32_gatherd_d256((__v8si)(__m256i)(a), \
- (int const *)(m), \
- (__v8si)(__m256i)(i), \
- (__v8si)(__m256i)(mask), (s))
-
+ ((__m256i)__builtin_ia32_gatherd_d256((__v8si)(__m256i)(a), \
+ (int const *)(m), \
+ (__v8si)(__m256i)(i), \
+ (__v8si)(__m256i)(mask), (s)))
+
+/// Conditionally gathers two 32-bit integer values, either from the
+/// 128-bit vector of [4 x i32] in \a a, or from memory \a m using scaled
+/// indexes from the 128-bit vector of [2 x i64] in \a i. The 128-bit vector
+/// of [4 x i32] in \a mask determines the source for the lower two
+/// elements. The upper two elements of the result are zeroed.
+///
+/// \code{.operation}
+/// FOR element := 0 to 1
+/// j := element*32
+/// k := element*64
+/// IF mask[j+31] == 0
+/// result[j+31:j] := a[j+31:j]
+/// ELSE
+/// result[j+31:j] := Load32(m + SignExtend(i[k+63:k])*s)
+/// FI
+/// ENDFOR
+/// result[127:64] := 0
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128i _mm_mask_i64gather_epi32(__m128i a, const int *m, __m128i i,
+/// __m128i mask, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPGATHERQD instruction.
+///
+/// \param a
+/// A 128-bit vector of [4 x i32] used as the source when a mask bit is
+/// zero. Only the first two elements are used.
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [2 x i64] containing indexes into \a m.
+/// \param mask
+/// A 128-bit vector of [4 x i32] containing the mask. The most significant
+/// bit of each element in the mask vector represents the mask bits. If a
+/// mask bit is zero, the corresponding value from vector \a a is gathered;
+/// otherwise the value is loaded from memory. Only the first two elements
+/// are used.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [4 x i32] containing the gathered values.
#define _mm_mask_i64gather_epi32(a, m, i, mask, s) \
- (__m128i)__builtin_ia32_gatherq_d((__v4si)(__m128i)(a), \
- (int const *)(m), \
- (__v2di)(__m128i)(i), \
- (__v4si)(__m128i)(mask), (s))
-
+ ((__m128i)__builtin_ia32_gatherq_d((__v4si)(__m128i)(a), \
+ (int const *)(m), \
+ (__v2di)(__m128i)(i), \
+ (__v4si)(__m128i)(mask), (s)))
+
+/// Conditionally gathers four 32-bit integer values, either from the
+/// 128-bit vector of [4 x i32] in \a a, or from memory \a m using scaled
+/// indexes from the 256-bit vector of [4 x i64] in \a i. The 128-bit vector
+/// of [4 x i32] in \a mask determines the source for each element.
+///
+/// \code{.operation}
+/// FOR element := 0 to 3
+/// j := element*32
+/// k := element*64
+/// IF mask[j+31] == 0
+/// result[j+31:j] := a[j+31:j]
+/// ELSE
+/// result[j+31:j] := Load32(m + SignExtend(i[k+63:k])*s)
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128i _mm256_mask_i64gather_epi32(__m128i a, const int *m, __m256i i,
+/// __m128i mask, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPGATHERQD instruction.
+///
+/// \param a
+/// A 128-bit vector of [4 x i32] used as the source when a mask bit is
+/// zero.
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 256-bit vector of [4 x i64] containing signed indexes into \a m.
+/// \param mask
+/// A 128-bit vector of [4 x i32] containing the mask. The most significant
+/// bit of each element in the mask vector represents the mask bits. If a
+/// mask bit is zero, the corresponding value from vector \a a is gathered;
+/// otherwise the value is loaded from memory.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [4 x i32] containing the gathered values.
#define _mm256_mask_i64gather_epi32(a, m, i, mask, s) \
- (__m128i)__builtin_ia32_gatherq_d256((__v4si)(__m128i)(a), \
- (int const *)(m), \
- (__v4di)(__m256i)(i), \
- (__v4si)(__m128i)(mask), (s))
-
+ ((__m128i)__builtin_ia32_gatherq_d256((__v4si)(__m128i)(a), \
+ (int const *)(m), \
+ (__v4di)(__m256i)(i), \
+ (__v4si)(__m128i)(mask), (s)))
+
+/// Conditionally gathers two 64-bit integer values, either from the
+/// 128-bit vector of [2 x i64] in \a a, or from memory \a m using scaled
+/// indexes from the 128-bit vector of [4 x i32] in \a i. The 128-bit vector
+/// of [2 x i64] in \a mask determines the source for each element.
+///
+/// \code{.operation}
+/// FOR element := 0 to 1
+/// j := element*64
+/// k := element*32
+/// IF mask[j+63] == 0
+/// result[j+63:j] := a[j+63:j]
+/// ELSE
+/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s)
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128i _mm_mask_i32gather_epi64(__m128i a, const long long *m, __m128i i,
+/// __m128i mask, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPGATHERDQ instruction.
+///
+/// \param a
+/// A 128-bit vector of [2 x i64] used as the source when a mask bit is
+/// zero.
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [4 x i32] containing signed indexes into \a m. Only
+/// the first two elements are used.
+/// \param mask
+/// A 128-bit vector of [2 x i64] containing the mask. The most significant
+/// bit of each element in the mask vector represents the mask bits. If a
+/// mask bit is zero, the corresponding value from vector \a a is gathered;
+/// otherwise the value is loaded from memory.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [2 x i64] containing the gathered values.
#define _mm_mask_i32gather_epi64(a, m, i, mask, s) \
- (__m128i)__builtin_ia32_gatherd_q((__v2di)(__m128i)(a), \
- (long long const *)(m), \
- (__v4si)(__m128i)(i), \
- (__v2di)(__m128i)(mask), (s))
-
+ ((__m128i)__builtin_ia32_gatherd_q((__v2di)(__m128i)(a), \
+ (long long const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v2di)(__m128i)(mask), (s)))
+
+/// Conditionally gathers four 64-bit integer values, either from the
+/// 256-bit vector of [4 x i64] in \a a, or from memory \a m using scaled
+/// indexes from the 128-bit vector of [4 x i32] in \a i. The 256-bit vector
+/// of [4 x i64] in \a mask determines the source for each element.
+///
+/// \code{.operation}
+/// FOR element := 0 to 3
+/// j := element*64
+/// k := element*32
+/// IF mask[j+63] == 0
+/// result[j+63:j] := a[j+63:j]
+/// ELSE
+/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s)
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_mask_i32gather_epi64(__m256i a, const long long *m,
+/// __m128i i, __m256i mask, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPGATHERDQ instruction.
+///
+/// \param a
+/// A 256-bit vector of [4 x i64] used as the source when a mask bit is
+/// zero.
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [4 x i32] containing signed indexes into \a m.
+/// \param mask
+/// A 256-bit vector of [4 x i64] containing the mask. The most significant
+/// bit of each element in the mask vector represents the mask bits. If a
+/// mask bit is zero, the corresponding value from vector \a a is gathered;
+/// otherwise the value is loaded from memory.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 256-bit vector of [4 x i64] containing the gathered values.
#define _mm256_mask_i32gather_epi64(a, m, i, mask, s) \
- (__m256i)__builtin_ia32_gatherd_q256((__v4di)(__m256i)(a), \
- (long long const *)(m), \
- (__v4si)(__m128i)(i), \
- (__v4di)(__m256i)(mask), (s))
-
+ ((__m256i)__builtin_ia32_gatherd_q256((__v4di)(__m256i)(a), \
+ (long long const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v4di)(__m256i)(mask), (s)))
+
+/// Conditionally gathers two 64-bit integer values, either from the
+/// 128-bit vector of [2 x i64] in \a a, or from memory \a m using scaled
+/// indexes from the 128-bit vector of [2 x i64] in \a i. The 128-bit vector
+/// of [2 x i64] in \a mask determines the source for each element.
+///
+/// \code{.operation}
+/// FOR element := 0 to 1
+/// j := element*64
+/// k := element*64
+/// IF mask[j+63] == 0
+/// result[j+63:j] := a[j+63:j]
+/// ELSE
+/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s)
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128i _mm_mask_i64gather_epi64(__m128i a, const long long *m, __m128i i,
+/// __m128i mask, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPGATHERQQ instruction.
+///
+/// \param a
+/// A 128-bit vector of [2 x i64] used as the source when a mask bit is
+/// zero.
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [2 x i64] containing signed indexes into \a m.
+/// \param mask
+/// A 128-bit vector of [2 x i64] containing the mask. The most significant
+/// bit of each element in the mask vector represents the mask bits. If a
+/// mask bit is zero, the corresponding value from vector \a a is gathered;
+/// otherwise the value is loaded from memory.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [2 x i64] containing the gathered values.
#define _mm_mask_i64gather_epi64(a, m, i, mask, s) \
- (__m128i)__builtin_ia32_gatherq_q((__v2di)(__m128i)(a), \
- (long long const *)(m), \
- (__v2di)(__m128i)(i), \
- (__v2di)(__m128i)(mask), (s))
-
+ ((__m128i)__builtin_ia32_gatherq_q((__v2di)(__m128i)(a), \
+ (long long const *)(m), \
+ (__v2di)(__m128i)(i), \
+ (__v2di)(__m128i)(mask), (s)))
+
+/// Conditionally gathers four 64-bit integer values, either from the
+/// 256-bit vector of [4 x i64] in \a a, or from memory \a m using scaled
+/// indexes from the 256-bit vector of [4 x i64] in \a i. The 256-bit vector
+/// of [4 x i64] in \a mask determines the source for each element.
+///
+/// \code{.operation}
+/// FOR element := 0 to 3
+/// j := element*64
+/// k := element*64
+/// IF mask[j+63] == 0
+/// result[j+63:j] := a[j+63:j]
+/// ELSE
+/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s)
+/// FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_mask_i64gather_epi64(__m256i a, const long long *m,
+/// __m256i i, __m256i mask, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPGATHERQQ instruction.
+///
+/// \param a
+/// A 256-bit vector of [4 x i64] used as the source when a mask bit is
+/// zero.
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 256-bit vector of [4 x i64] containing signed indexes into \a m.
+/// \param mask
+/// A 256-bit vector of [4 x i64] containing the mask. The most significant
+/// bit of each element in the mask vector represents the mask bits. If a
+/// mask bit is zero, the corresponding value from vector \a a is gathered;
+/// otherwise the value is loaded from memory.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 256-bit vector of [4 x i64] containing the gathered values.
#define _mm256_mask_i64gather_epi64(a, m, i, mask, s) \
- (__m256i)__builtin_ia32_gatherq_q256((__v4di)(__m256i)(a), \
- (long long const *)(m), \
- (__v4di)(__m256i)(i), \
- (__v4di)(__m256i)(mask), (s))
-
+ ((__m256i)__builtin_ia32_gatherq_q256((__v4di)(__m256i)(a), \
+ (long long const *)(m), \
+ (__v4di)(__m256i)(i), \
+ (__v4di)(__m256i)(mask), (s)))
+
+/// Gathers two 64-bit floating-point values from memory \a m using scaled
+/// indexes from the 128-bit vector of [4 x i32] in \a i.
+///
+/// \code{.operation}
+/// FOR element := 0 to 1
+/// j := element*64
+/// k := element*32
+/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s)
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128d _mm_i32gather_pd(const double *m, __m128i i, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VGATHERDPD instruction.
+///
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [4 x i32] containing signed indexes into \a m. Only
+/// the first two elements are used.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [2 x double] containing the gathered values.
#define _mm_i32gather_pd(m, i, s) \
- (__m128d)__builtin_ia32_gatherd_pd((__v2df)_mm_undefined_pd(), \
- (double const *)(m), \
- (__v4si)(__m128i)(i), \
- (__v2df)_mm_cmpeq_pd(_mm_setzero_pd(), \
- _mm_setzero_pd()), \
- (s))
-
+ ((__m128d)__builtin_ia32_gatherd_pd((__v2df)_mm_undefined_pd(), \
+ (double const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v2df)_mm_cmpeq_pd(_mm_setzero_pd(), \
+ _mm_setzero_pd()), \
+ (s)))
+
+/// Gathers four 64-bit floating-point values from memory \a m using scaled
+/// indexes from the 128-bit vector of [4 x i32] in \a i.
+///
+/// \code{.operation}
+/// FOR element := 0 to 3
+/// j := element*64
+/// k := element*32
+/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s)
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256d _mm256_i32gather_pd(const double *m, __m128i i, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VGATHERDPD instruction.
+///
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [4 x i32] containing signed indexes into \a m.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 256-bit vector of [4 x double] containing the gathered values.
#define _mm256_i32gather_pd(m, i, s) \
- (__m256d)__builtin_ia32_gatherd_pd256((__v4df)_mm256_undefined_pd(), \
- (double const *)(m), \
- (__v4si)(__m128i)(i), \
- (__v4df)_mm256_cmp_pd(_mm256_setzero_pd(), \
- _mm256_setzero_pd(), \
- _CMP_EQ_OQ), \
- (s))
-
+ ((__m256d)__builtin_ia32_gatherd_pd256((__v4df)_mm256_undefined_pd(), \
+ (double const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v4df)_mm256_cmp_pd(_mm256_setzero_pd(), \
+ _mm256_setzero_pd(), \
+ _CMP_EQ_OQ), \
+ (s)))
+
+/// Gathers two 64-bit floating-point values from memory \a m using scaled
+/// indexes from the 128-bit vector of [2 x i64] in \a i.
+///
+/// \code{.operation}
+/// FOR element := 0 to 1
+/// j := element*64
+/// k := element*64
+/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s)
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128d _mm_i64gather_pd(const double *m, __m128i i, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VGATHERQPD instruction.
+///
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [2 x i64] containing signed indexes into \a m.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [2 x double] containing the gathered values.
#define _mm_i64gather_pd(m, i, s) \
- (__m128d)__builtin_ia32_gatherq_pd((__v2df)_mm_undefined_pd(), \
- (double const *)(m), \
- (__v2di)(__m128i)(i), \
- (__v2df)_mm_cmpeq_pd(_mm_setzero_pd(), \
- _mm_setzero_pd()), \
- (s))
-
+ ((__m128d)__builtin_ia32_gatherq_pd((__v2df)_mm_undefined_pd(), \
+ (double const *)(m), \
+ (__v2di)(__m128i)(i), \
+ (__v2df)_mm_cmpeq_pd(_mm_setzero_pd(), \
+ _mm_setzero_pd()), \
+ (s)))
+
+/// Gathers four 64-bit floating-point values from memory \a m using scaled
+/// indexes from the 256-bit vector of [4 x i64] in \a i.
+///
+/// \code{.operation}
+/// FOR element := 0 to 3
+/// j := element*64
+/// k := element*64
+/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s)
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256d _mm256_i64gather_pd(const double *m, __m256i i, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VGATHERQPD instruction.
+///
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 256-bit vector of [4 x i64] containing signed indexes into \a m.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 256-bit vector of [4 x double] containing the gathered values.
#define _mm256_i64gather_pd(m, i, s) \
- (__m256d)__builtin_ia32_gatherq_pd256((__v4df)_mm256_undefined_pd(), \
- (double const *)(m), \
- (__v4di)(__m256i)(i), \
- (__v4df)_mm256_cmp_pd(_mm256_setzero_pd(), \
- _mm256_setzero_pd(), \
- _CMP_EQ_OQ), \
- (s))
-
+ ((__m256d)__builtin_ia32_gatherq_pd256((__v4df)_mm256_undefined_pd(), \
+ (double const *)(m), \
+ (__v4di)(__m256i)(i), \
+ (__v4df)_mm256_cmp_pd(_mm256_setzero_pd(), \
+ _mm256_setzero_pd(), \
+ _CMP_EQ_OQ), \
+ (s)))
+
+/// Gathers four 32-bit floating-point values from memory \a m using scaled
+/// indexes from the 128-bit vector of [4 x i32] in \a i.
+///
+/// \code{.operation}
+/// FOR element := 0 to 3
+/// j := element*32
+/// k := element*32
+/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s)
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128 _mm_i32gather_ps(const float *m, __m128i i, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VGATHERDPS instruction.
+///
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [4 x i32] containing signed indexes into \a m.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [4 x float] containing the gathered values.
#define _mm_i32gather_ps(m, i, s) \
- (__m128)__builtin_ia32_gatherd_ps((__v4sf)_mm_undefined_ps(), \
- (float const *)(m), \
- (__v4si)(__m128i)(i), \
- (__v4sf)_mm_cmpeq_ps(_mm_setzero_ps(), \
- _mm_setzero_ps()), \
- (s))
-
+ ((__m128)__builtin_ia32_gatherd_ps((__v4sf)_mm_undefined_ps(), \
+ (float const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v4sf)_mm_cmpeq_ps(_mm_setzero_ps(), \
+ _mm_setzero_ps()), \
+ (s)))
+
+/// Gathers eight 32-bit floating-point values from memory \a m using scaled
+/// indexes from the 256-bit vector of [8 x i32] in \a i.
+///
+/// \code{.operation}
+/// FOR element := 0 to 7
+/// j := element*32
+/// k := element*32
+/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s)
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256 _mm256_i32gather_ps(const float *m, __m256i i, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VGATHERDPS instruction.
+///
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 256-bit vector of [8 x i32] containing signed indexes into \a m.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 256-bit vector of [8 x float] containing the gathered values.
#define _mm256_i32gather_ps(m, i, s) \
- (__m256)__builtin_ia32_gatherd_ps256((__v8sf)_mm256_undefined_ps(), \
- (float const *)(m), \
- (__v8si)(__m256i)(i), \
- (__v8sf)_mm256_cmp_ps(_mm256_setzero_ps(), \
- _mm256_setzero_ps(), \
- _CMP_EQ_OQ), \
- (s))
-
+ ((__m256)__builtin_ia32_gatherd_ps256((__v8sf)_mm256_undefined_ps(), \
+ (float const *)(m), \
+ (__v8si)(__m256i)(i), \
+ (__v8sf)_mm256_cmp_ps(_mm256_setzero_ps(), \
+ _mm256_setzero_ps(), \
+ _CMP_EQ_OQ), \
+ (s)))
+
+/// Gathers two 32-bit floating-point values from memory \a m using scaled
+/// indexes from the 128-bit vector of [2 x i64] in \a i. The upper two
+/// elements of the result are zeroed.
+///
+/// \code{.operation}
+/// FOR element := 0 to 1
+/// j := element*32
+/// k := element*64
+/// result[j+31:j] := Load32(m + SignExtend(i[k+63:k])*s)
+/// ENDFOR
+/// result[127:64] := 0
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128 _mm_i64gather_ps(const float *m, __m128i i, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VGATHERQPS instruction.
+///
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [2 x i64] containing signed indexes into \a m.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [4 x float] containing the gathered values.
#define _mm_i64gather_ps(m, i, s) \
- (__m128)__builtin_ia32_gatherq_ps((__v4sf)_mm_undefined_ps(), \
- (float const *)(m), \
- (__v2di)(__m128i)(i), \
- (__v4sf)_mm_cmpeq_ps(_mm_setzero_ps(), \
- _mm_setzero_ps()), \
- (s))
-
+ ((__m128)__builtin_ia32_gatherq_ps((__v4sf)_mm_undefined_ps(), \
+ (float const *)(m), \
+ (__v2di)(__m128i)(i), \
+ (__v4sf)_mm_cmpeq_ps(_mm_setzero_ps(), \
+ _mm_setzero_ps()), \
+ (s)))
+
+/// Gathers four 32-bit floating-point values from memory \a m using scaled
+/// indexes from the 256-bit vector of [4 x i64] in \a i.
+///
+/// \code{.operation}
+/// FOR element := 0 to 3
+/// j := element*32
+/// k := element*64
+/// result[j+31:j] := Load32(m + SignExtend(i[k+64:k])*s)
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128 _mm256_i64gather_ps(const float *m, __m256i i, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VGATHERQPS instruction.
+///
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 256-bit vector of [4 x i64] containing signed indexes into \a m.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [4 x float] containing the gathered values.
#define _mm256_i64gather_ps(m, i, s) \
- (__m128)__builtin_ia32_gatherq_ps256((__v4sf)_mm_undefined_ps(), \
- (float const *)(m), \
- (__v4di)(__m256i)(i), \
- (__v4sf)_mm_cmpeq_ps(_mm_setzero_ps(), \
- _mm_setzero_ps()), \
- (s))
-
+ ((__m128)__builtin_ia32_gatherq_ps256((__v4sf)_mm_undefined_ps(), \
+ (float const *)(m), \
+ (__v4di)(__m256i)(i), \
+ (__v4sf)_mm_cmpeq_ps(_mm_setzero_ps(), \
+ _mm_setzero_ps()), \
+ (s)))
+
+/// Gathers four 32-bit floating-point values from memory \a m using scaled
+/// indexes from the 128-bit vector of [4 x i32] in \a i.
+///
+/// \code{.operation}
+/// FOR element := 0 to 3
+/// j := element*32
+/// k := element*32
+/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s)
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128i _mm_i32gather_epi32(const int *m, __m128i i, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPGATHERDD instruction.
+///
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [4 x i32] containing signed indexes into \a m.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [4 x i32] containing the gathered values.
#define _mm_i32gather_epi32(m, i, s) \
- (__m128i)__builtin_ia32_gatherd_d((__v4si)_mm_undefined_si128(), \
- (int const *)(m), (__v4si)(__m128i)(i), \
- (__v4si)_mm_set1_epi32(-1), (s))
-
+ ((__m128i)__builtin_ia32_gatherd_d((__v4si)_mm_undefined_si128(), \
+ (int const *)(m), (__v4si)(__m128i)(i), \
+ (__v4si)_mm_set1_epi32(-1), (s)))
+
+/// Gathers eight 32-bit floating-point values from memory \a m using scaled
+/// indexes from the 256-bit vector of [8 x i32] in \a i.
+///
+/// \code{.operation}
+/// FOR element := 0 to 7
+/// j := element*32
+/// k := element*32
+/// result[j+31:j] := Load32(m + SignExtend(i[k+31:k])*s)
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_i32gather_epi32(const int *m, __m256i i, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPGATHERDD instruction.
+///
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 256-bit vector of [8 x i32] containing signed indexes into \a m.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 256-bit vector of [8 x i32] containing the gathered values.
#define _mm256_i32gather_epi32(m, i, s) \
- (__m256i)__builtin_ia32_gatherd_d256((__v8si)_mm256_undefined_si256(), \
- (int const *)(m), (__v8si)(__m256i)(i), \
- (__v8si)_mm256_set1_epi32(-1), (s))
-
+ ((__m256i)__builtin_ia32_gatherd_d256((__v8si)_mm256_undefined_si256(), \
+ (int const *)(m), (__v8si)(__m256i)(i), \
+ (__v8si)_mm256_set1_epi32(-1), (s)))
+
+/// Gathers two 32-bit integer values from memory \a m using scaled indexes
+/// from the 128-bit vector of [2 x i64] in \a i. The upper two elements
+/// of the result are zeroed.
+///
+/// \code{.operation}
+/// FOR element := 0 to 1
+/// j := element*32
+/// k := element*64
+/// result[j+31:j] := Load32(m + SignExtend(i[k+63:k])*s)
+/// ENDFOR
+/// result[127:64] := 0
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128i _mm_i64gather_epi32(const int *m, __m128i i, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPGATHERQD instruction.
+///
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [2 x i64] containing signed indexes into \a m.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [4 x i32] containing the gathered values.
#define _mm_i64gather_epi32(m, i, s) \
- (__m128i)__builtin_ia32_gatherq_d((__v4si)_mm_undefined_si128(), \
- (int const *)(m), (__v2di)(__m128i)(i), \
- (__v4si)_mm_set1_epi32(-1), (s))
-
+ ((__m128i)__builtin_ia32_gatherq_d((__v4si)_mm_undefined_si128(), \
+ (int const *)(m), (__v2di)(__m128i)(i), \
+ (__v4si)_mm_set1_epi32(-1), (s)))
+
+/// Gathers four 32-bit integer values from memory \a m using scaled indexes
+/// from the 256-bit vector of [4 x i64] in \a i.
+///
+/// \code{.operation}
+/// FOR element := 0 to 3
+/// j := element*32
+/// k := element*64
+/// result[j+31:j] := Load32(m + SignExtend(i[k+63:k])*s)
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128i _mm256_i64gather_epi32(const int *m, __m256i i, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPGATHERQD instruction.
+///
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 256-bit vector of [4 x i64] containing signed indexes into \a m.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [4 x i32] containing the gathered values.
#define _mm256_i64gather_epi32(m, i, s) \
- (__m128i)__builtin_ia32_gatherq_d256((__v4si)_mm_undefined_si128(), \
- (int const *)(m), (__v4di)(__m256i)(i), \
- (__v4si)_mm_set1_epi32(-1), (s))
-
+ ((__m128i)__builtin_ia32_gatherq_d256((__v4si)_mm_undefined_si128(), \
+ (int const *)(m), (__v4di)(__m256i)(i), \
+ (__v4si)_mm_set1_epi32(-1), (s)))
+
+/// Gathers two 64-bit integer values from memory \a m using scaled indexes
+/// from the 128-bit vector of [4 x i32] in \a i.
+///
+/// \code{.operation}
+/// FOR element := 0 to 1
+/// j := element*64
+/// k := element*32
+/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s)
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128i _mm_i32gather_epi64(const long long *m, __m128i i, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPGATHERDQ instruction.
+///
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [4 x i32] containing signed indexes into \a m. Only
+/// the first two elements are used.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [2 x i64] containing the gathered values.
#define _mm_i32gather_epi64(m, i, s) \
- (__m128i)__builtin_ia32_gatherd_q((__v2di)_mm_undefined_si128(), \
- (long long const *)(m), \
- (__v4si)(__m128i)(i), \
- (__v2di)_mm_set1_epi64x(-1), (s))
-
+ ((__m128i)__builtin_ia32_gatherd_q((__v2di)_mm_undefined_si128(), \
+ (long long const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v2di)_mm_set1_epi64x(-1), (s)))
+
+/// Gathers four 64-bit integer values from memory \a m using scaled indexes
+/// from the 128-bit vector of [4 x i32] in \a i.
+///
+/// \code{.operation}
+/// FOR element := 0 to 3
+/// j := element*64
+/// k := element*32
+/// result[j+63:j] := Load64(m + SignExtend(i[k+31:k])*s)
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_i32gather_epi64(const long long *m, __m128i i, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPGATHERDQ instruction.
+///
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [4 x i32] containing signed indexes into \a m.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 256-bit vector of [4 x i64] containing the gathered values.
#define _mm256_i32gather_epi64(m, i, s) \
- (__m256i)__builtin_ia32_gatherd_q256((__v4di)_mm256_undefined_si256(), \
- (long long const *)(m), \
- (__v4si)(__m128i)(i), \
- (__v4di)_mm256_set1_epi64x(-1), (s))
-
+ ((__m256i)__builtin_ia32_gatherd_q256((__v4di)_mm256_undefined_si256(), \
+ (long long const *)(m), \
+ (__v4si)(__m128i)(i), \
+ (__v4di)_mm256_set1_epi64x(-1), (s)))
+
+/// Gathers two 64-bit integer values from memory \a m using scaled indexes
+/// from the 128-bit vector of [2 x i64] in \a i.
+///
+/// \code{.operation}
+/// FOR element := 0 to 1
+/// j := element*64
+/// k := element*64
+/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s)
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128i _mm_i64gather_epi64(const long long *m, __m128i i, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPGATHERQQ instruction.
+///
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 128-bit vector of [2 x i64] containing signed indexes into \a m.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 128-bit vector of [2 x i64] containing the gathered values.
#define _mm_i64gather_epi64(m, i, s) \
- (__m128i)__builtin_ia32_gatherq_q((__v2di)_mm_undefined_si128(), \
- (long long const *)(m), \
- (__v2di)(__m128i)(i), \
- (__v2di)_mm_set1_epi64x(-1), (s))
-
+ ((__m128i)__builtin_ia32_gatherq_q((__v2di)_mm_undefined_si128(), \
+ (long long const *)(m), \
+ (__v2di)(__m128i)(i), \
+ (__v2di)_mm_set1_epi64x(-1), (s)))
+
+/// Gathers four 64-bit integer values from memory \a m using scaled indexes
+/// from the 256-bit vector of [4 x i64] in \a i.
+///
+/// \code{.operation}
+/// FOR element := 0 to 3
+/// j := element*64
+/// k := element*64
+/// result[j+63:j] := Load64(m + SignExtend(i[k+63:k])*s)
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_i64gather_epi64(const long long *m, __m256i i, const int s);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPGATHERQQ instruction.
+///
+/// \param m
+/// A pointer to the memory used for loading values.
+/// \param i
+/// A 256-bit vector of [4 x i64] containing signed indexes into \a m.
+/// \param s
+/// A literal constant scale factor for the indexes in \a i. Must be
+/// 1, 2, 4, or 8.
+/// \returns A 256-bit vector of [4 x i64] containing the gathered values.
#define _mm256_i64gather_epi64(m, i, s) \
- (__m256i)__builtin_ia32_gatherq_q256((__v4di)_mm256_undefined_si256(), \
- (long long const *)(m), \
- (__v4di)(__m256i)(i), \
- (__v4di)_mm256_set1_epi64x(-1), (s))
+ ((__m256i)__builtin_ia32_gatherq_q256((__v4di)_mm256_undefined_si256(), \
+ (long long const *)(m), \
+ (__v4di)(__m256i)(i), \
+ (__v4di)_mm256_set1_epi64x(-1), (s)))
#undef __DEFAULT_FN_ATTRS256
#undef __DEFAULT_FN_ATTRS128
diff --git a/contrib/llvm-project/clang/lib/Headers/avx512bf16intrin.h b/contrib/llvm-project/clang/lib/Headers/avx512bf16intrin.h
index d1d87e72f147..b28d2e243f2c 100644
--- a/contrib/llvm-project/clang/lib/Headers/avx512bf16intrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avx512bf16intrin.h
@@ -10,18 +10,21 @@
#error "Never use <avx512bf16intrin.h> directly; include <immintrin.h> instead."
#endif
+#ifdef __SSE2__
+
#ifndef __AVX512BF16INTRIN_H
#define __AVX512BF16INTRIN_H
-typedef short __m512bh __attribute__((__vector_size__(64), __aligned__(64)));
-typedef short __m256bh __attribute__((__vector_size__(32), __aligned__(32)));
-typedef unsigned short __bfloat16;
+typedef __bf16 __v32bf __attribute__((__vector_size__(64), __aligned__(64)));
+typedef __bf16 __m512bh __attribute__((__vector_size__(64), __aligned__(64)));
+typedef __bf16 __bfloat16 __attribute__((deprecated("use __bf16 instead")));
#define __DEFAULT_FN_ATTRS512 \
- __attribute__((__always_inline__, __nodebug__, __target__("avx512bf16"), \
+ __attribute__((__always_inline__, __nodebug__, __target__("avx512bf16,evex512"), \
__min_vector_width__(512)))
#define __DEFAULT_FN_ATTRS \
- __attribute__((__always_inline__, __nodebug__, __target__("avx512bf16")))
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512bf16,no-evex512")))
/// Convert One BF16 Data to One Single Float Data.
///
@@ -33,7 +36,7 @@ typedef unsigned short __bfloat16;
/// A bfloat data.
/// \returns A float data whose sign field and exponent field keep unchanged,
/// and fraction field is extended to 23 bits.
-static __inline__ float __DEFAULT_FN_ATTRS _mm_cvtsbh_ss(__bfloat16 __A) {
+static __inline__ float __DEFAULT_FN_ATTRS _mm_cvtsbh_ss(__bf16 __A) {
return __builtin_ia32_cvtsbf162ss_32(__A);
}
@@ -74,9 +77,9 @@ _mm512_cvtne2ps_pbh(__m512 __A, __m512 __B) {
/// conversion of __B, and higher 256 bits come from conversion of __A.
static __inline__ __m512bh __DEFAULT_FN_ATTRS512
_mm512_mask_cvtne2ps_pbh(__m512bh __W, __mmask32 __U, __m512 __A, __m512 __B) {
- return (__m512bh)__builtin_ia32_selectw_512((__mmask32)__U,
- (__v32hi)_mm512_cvtne2ps_pbh(__A, __B),
- (__v32hi)__W);
+ return (__m512bh)__builtin_ia32_selectpbf_512((__mmask32)__U,
+ (__v32bf)_mm512_cvtne2ps_pbh(__A, __B),
+ (__v32bf)__W);
}
/// Convert Two Packed Single Data to One Packed BF16 Data.
@@ -96,9 +99,9 @@ _mm512_mask_cvtne2ps_pbh(__m512bh __W, __mmask32 __U, __m512 __A, __m512 __B) {
/// conversion of __B, and higher 256 bits come from conversion of __A.
static __inline__ __m512bh __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtne2ps_pbh(__mmask32 __U, __m512 __A, __m512 __B) {
- return (__m512bh)__builtin_ia32_selectw_512((__mmask32)__U,
- (__v32hi)_mm512_cvtne2ps_pbh(__A, __B),
- (__v32hi)_mm512_setzero_si512());
+ return (__m512bh)__builtin_ia32_selectpbf_512((__mmask32)__U,
+ (__v32bf)_mm512_cvtne2ps_pbh(__A, __B),
+ (__v32bf)_mm512_setzero_si512());
}
/// Convert Packed Single Data to Packed BF16 Data.
@@ -113,7 +116,7 @@ _mm512_maskz_cvtne2ps_pbh(__mmask32 __U, __m512 __A, __m512 __B) {
static __inline__ __m256bh __DEFAULT_FN_ATTRS512
_mm512_cvtneps_pbh(__m512 __A) {
return (__m256bh)__builtin_ia32_cvtneps2bf16_512_mask((__v16sf)__A,
- (__v16hi)_mm256_undefined_si256(),
+ (__v16bf)_mm256_undefined_si256(),
(__mmask16)-1);
}
@@ -134,7 +137,7 @@ _mm512_cvtneps_pbh(__m512 __A) {
static __inline__ __m256bh __DEFAULT_FN_ATTRS512
_mm512_mask_cvtneps_pbh(__m256bh __W, __mmask16 __U, __m512 __A) {
return (__m256bh)__builtin_ia32_cvtneps2bf16_512_mask((__v16sf)__A,
- (__v16hi)__W,
+ (__v16bf)__W,
(__mmask16)__U);
}
@@ -153,7 +156,7 @@ _mm512_mask_cvtneps_pbh(__m256bh __W, __mmask16 __U, __m512 __A) {
static __inline__ __m256bh __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtneps_pbh(__mmask16 __U, __m512 __A) {
return (__m256bh)__builtin_ia32_cvtneps2bf16_512_mask((__v16sf)__A,
- (__v16hi)_mm256_setzero_si256(),
+ (__v16bf)_mm256_setzero_si256(),
(__mmask16)__U);
}
@@ -174,8 +177,8 @@ _mm512_maskz_cvtneps_pbh(__mmask16 __U, __m512 __A) {
static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_dpbf16_ps(__m512 __D, __m512bh __A, __m512bh __B) {
return (__m512)__builtin_ia32_dpbf16ps_512((__v16sf) __D,
- (__v16si) __A,
- (__v16si) __B);
+ (__v32bf) __A,
+ (__v32bf) __B);
}
/// Dot Product of BF16 Pairs Accumulated into Packed Single Precision.
@@ -232,7 +235,7 @@ _mm512_maskz_dpbf16_ps(__mmask16 __U, __m512 __D, __m512bh __A, __m512bh __B) {
///
/// \param __A
/// A 256-bit vector of [16 x bfloat].
-/// \returns A 512-bit vector of [16 x float] come from convertion of __A
+/// \returns A 512-bit vector of [16 x float] come from conversion of __A
static __inline__ __m512 __DEFAULT_FN_ATTRS512 _mm512_cvtpbh_ps(__m256bh __A) {
return _mm512_castsi512_ps((__m512i)_mm512_slli_epi32(
(__m512i)_mm512_cvtepi16_epi32((__m256i)__A), 16));
@@ -247,7 +250,7 @@ static __inline__ __m512 __DEFAULT_FN_ATTRS512 _mm512_cvtpbh_ps(__m256bh __A) {
/// bit is not set.
/// \param __A
/// A 256-bit vector of [16 x bfloat].
-/// \returns A 512-bit vector of [16 x float] come from convertion of __A
+/// \returns A 512-bit vector of [16 x float] come from conversion of __A
static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_maskz_cvtpbh_ps(__mmask16 __U, __m256bh __A) {
return _mm512_castsi512_ps((__m512i)_mm512_slli_epi32(
@@ -265,7 +268,7 @@ _mm512_maskz_cvtpbh_ps(__mmask16 __U, __m256bh __A) {
/// A 16-bit mask.
/// \param __A
/// A 256-bit vector of [16 x bfloat].
-/// \returns A 512-bit vector of [16 x float] come from convertion of __A
+/// \returns A 512-bit vector of [16 x float] come from conversion of __A
static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_mask_cvtpbh_ps(__m512 __S, __mmask16 __U, __m256bh __A) {
return _mm512_castsi512_ps((__m512i)_mm512_mask_slli_epi32(
@@ -277,3 +280,4 @@ _mm512_mask_cvtpbh_ps(__m512 __S, __mmask16 __U, __m256bh __A) {
#undef __DEFAULT_FN_ATTRS512
#endif
+#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/avx512bitalgintrin.h b/contrib/llvm-project/clang/lib/Headers/avx512bitalgintrin.h
index d4411d156ba5..bad265ceb7db 100644
--- a/contrib/llvm-project/clang/lib/Headers/avx512bitalgintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avx512bitalgintrin.h
@@ -15,7 +15,10 @@
#define __AVX512BITALGINTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512bitalg"), __min_vector_width__(512)))
+#define __DEFAULT_FN_ATTRS \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512bitalg,evex512"), \
+ __min_vector_width__(512)))
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_popcnt_epi16(__m512i __A)
diff --git a/contrib/llvm-project/clang/lib/Headers/avx512bwintrin.h b/contrib/llvm-project/clang/lib/Headers/avx512bwintrin.h
index 4281a33d375c..c854720de6a6 100644
--- a/contrib/llvm-project/clang/lib/Headers/avx512bwintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avx512bwintrin.h
@@ -18,8 +18,12 @@ typedef unsigned int __mmask32;
typedef unsigned long long __mmask64;
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS512 __attribute__((__always_inline__, __nodebug__, __target__("avx512bw"), __min_vector_width__(512)))
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512bw")))
+#define __DEFAULT_FN_ATTRS512 \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512bw,evex512"), __min_vector_width__(512)))
+#define __DEFAULT_FN_ATTRS \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512bw,no-evex512")))
static __inline __mmask32 __DEFAULT_FN_ATTRS
_knot_mask32(__mmask32 __M)
@@ -27,9 +31,7 @@ _knot_mask32(__mmask32 __M)
return __builtin_ia32_knotsi(__M);
}
-static __inline __mmask64 __DEFAULT_FN_ATTRS
-_knot_mask64(__mmask64 __M)
-{
+static __inline __mmask64 __DEFAULT_FN_ATTRS _knot_mask64(__mmask64 __M) {
return __builtin_ia32_knotdi(__M);
}
@@ -39,9 +41,8 @@ _kand_mask32(__mmask32 __A, __mmask32 __B)
return (__mmask32)__builtin_ia32_kandsi((__mmask32)__A, (__mmask32)__B);
}
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS
-_kand_mask64(__mmask64 __A, __mmask64 __B)
-{
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS _kand_mask64(__mmask64 __A,
+ __mmask64 __B) {
return (__mmask64)__builtin_ia32_kanddi((__mmask64)__A, (__mmask64)__B);
}
@@ -51,9 +52,8 @@ _kandn_mask32(__mmask32 __A, __mmask32 __B)
return (__mmask32)__builtin_ia32_kandnsi((__mmask32)__A, (__mmask32)__B);
}
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS
-_kandn_mask64(__mmask64 __A, __mmask64 __B)
-{
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS _kandn_mask64(__mmask64 __A,
+ __mmask64 __B) {
return (__mmask64)__builtin_ia32_kandndi((__mmask64)__A, (__mmask64)__B);
}
@@ -63,9 +63,8 @@ _kor_mask32(__mmask32 __A, __mmask32 __B)
return (__mmask32)__builtin_ia32_korsi((__mmask32)__A, (__mmask32)__B);
}
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS
-_kor_mask64(__mmask64 __A, __mmask64 __B)
-{
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS _kor_mask64(__mmask64 __A,
+ __mmask64 __B) {
return (__mmask64)__builtin_ia32_kordi((__mmask64)__A, (__mmask64)__B);
}
@@ -75,9 +74,8 @@ _kxnor_mask32(__mmask32 __A, __mmask32 __B)
return (__mmask32)__builtin_ia32_kxnorsi((__mmask32)__A, (__mmask32)__B);
}
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS
-_kxnor_mask64(__mmask64 __A, __mmask64 __B)
-{
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS _kxnor_mask64(__mmask64 __A,
+ __mmask64 __B) {
return (__mmask64)__builtin_ia32_kxnordi((__mmask64)__A, (__mmask64)__B);
}
@@ -87,9 +85,8 @@ _kxor_mask32(__mmask32 __A, __mmask32 __B)
return (__mmask32)__builtin_ia32_kxorsi((__mmask32)__A, (__mmask32)__B);
}
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS
-_kxor_mask64(__mmask64 __A, __mmask64 __B)
-{
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS _kxor_mask64(__mmask64 __A,
+ __mmask64 __B) {
return (__mmask64)__builtin_ia32_kxordi((__mmask64)__A, (__mmask64)__B);
}
@@ -112,14 +109,12 @@ _kortest_mask32_u8(__mmask32 __A, __mmask32 __B, unsigned char *__C) {
}
static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_kortestc_mask64_u8(__mmask64 __A, __mmask64 __B)
-{
+_kortestc_mask64_u8(__mmask64 __A, __mmask64 __B) {
return (unsigned char)__builtin_ia32_kortestcdi(__A, __B);
}
static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_kortestz_mask64_u8(__mmask64 __A, __mmask64 __B)
-{
+_kortestz_mask64_u8(__mmask64 __A, __mmask64 __B) {
return (unsigned char)__builtin_ia32_kortestzdi(__A, __B);
}
@@ -148,14 +143,12 @@ _ktest_mask32_u8(__mmask32 __A, __mmask32 __B, unsigned char *__C) {
}
static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_ktestc_mask64_u8(__mmask64 __A, __mmask64 __B)
-{
+_ktestc_mask64_u8(__mmask64 __A, __mmask64 __B) {
return (unsigned char)__builtin_ia32_ktestcdi(__A, __B);
}
static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_ktestz_mask64_u8(__mmask64 __A, __mmask64 __B)
-{
+_ktestz_mask64_u8(__mmask64 __A, __mmask64 __B) {
return (unsigned char)__builtin_ia32_ktestzdi(__A, __B);
}
@@ -171,23 +164,22 @@ _kadd_mask32(__mmask32 __A, __mmask32 __B)
return (__mmask32)__builtin_ia32_kaddsi((__mmask32)__A, (__mmask32)__B);
}
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS
-_kadd_mask64(__mmask64 __A, __mmask64 __B)
-{
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS _kadd_mask64(__mmask64 __A,
+ __mmask64 __B) {
return (__mmask64)__builtin_ia32_kadddi((__mmask64)__A, (__mmask64)__B);
}
#define _kshiftli_mask32(A, I) \
- (__mmask32)__builtin_ia32_kshiftlisi((__mmask32)(A), (unsigned int)(I))
+ ((__mmask32)__builtin_ia32_kshiftlisi((__mmask32)(A), (unsigned int)(I)))
#define _kshiftri_mask32(A, I) \
- (__mmask32)__builtin_ia32_kshiftrisi((__mmask32)(A), (unsigned int)(I))
+ ((__mmask32)__builtin_ia32_kshiftrisi((__mmask32)(A), (unsigned int)(I)))
#define _kshiftli_mask64(A, I) \
- (__mmask64)__builtin_ia32_kshiftlidi((__mmask64)(A), (unsigned int)(I))
+ ((__mmask64)__builtin_ia32_kshiftlidi((__mmask64)(A), (unsigned int)(I)))
#define _kshiftri_mask64(A, I) \
- (__mmask64)__builtin_ia32_kshiftridi((__mmask64)(A), (unsigned int)(I))
+ ((__mmask64)__builtin_ia32_kshiftridi((__mmask64)(A), (unsigned int)(I)))
static __inline__ unsigned int __DEFAULT_FN_ATTRS
_cvtmask32_u32(__mmask32 __A) {
@@ -214,8 +206,7 @@ _load_mask32(__mmask32 *__A) {
return (__mmask32)__builtin_ia32_kmovd(*(__mmask32 *)__A);
}
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS
-_load_mask64(__mmask64 *__A) {
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS _load_mask64(__mmask64 *__A) {
return (__mmask64)__builtin_ia32_kmovq(*(__mmask64 *)__A);
}
@@ -224,52 +215,52 @@ _store_mask32(__mmask32 *__A, __mmask32 __B) {
*(__mmask32 *)__A = __builtin_ia32_kmovd((__mmask32)__B);
}
-static __inline__ void __DEFAULT_FN_ATTRS
-_store_mask64(__mmask64 *__A, __mmask64 __B) {
+static __inline__ void __DEFAULT_FN_ATTRS _store_mask64(__mmask64 *__A,
+ __mmask64 __B) {
*(__mmask64 *)__A = __builtin_ia32_kmovq((__mmask64)__B);
}
/* Integer compare */
#define _mm512_cmp_epi8_mask(a, b, p) \
- (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)(__m512i)(a), \
- (__v64qi)(__m512i)(b), (int)(p), \
- (__mmask64)-1)
+ ((__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)(__m512i)(a), \
+ (__v64qi)(__m512i)(b), (int)(p), \
+ (__mmask64)-1))
#define _mm512_mask_cmp_epi8_mask(m, a, b, p) \
- (__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)(__m512i)(a), \
- (__v64qi)(__m512i)(b), (int)(p), \
- (__mmask64)(m))
+ ((__mmask64)__builtin_ia32_cmpb512_mask((__v64qi)(__m512i)(a), \
+ (__v64qi)(__m512i)(b), (int)(p), \
+ (__mmask64)(m)))
#define _mm512_cmp_epu8_mask(a, b, p) \
- (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)(__m512i)(a), \
- (__v64qi)(__m512i)(b), (int)(p), \
- (__mmask64)-1)
+ ((__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)(__m512i)(a), \
+ (__v64qi)(__m512i)(b), (int)(p), \
+ (__mmask64)-1))
#define _mm512_mask_cmp_epu8_mask(m, a, b, p) \
- (__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)(__m512i)(a), \
- (__v64qi)(__m512i)(b), (int)(p), \
- (__mmask64)(m))
+ ((__mmask64)__builtin_ia32_ucmpb512_mask((__v64qi)(__m512i)(a), \
+ (__v64qi)(__m512i)(b), (int)(p), \
+ (__mmask64)(m)))
#define _mm512_cmp_epi16_mask(a, b, p) \
- (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)(__m512i)(a), \
- (__v32hi)(__m512i)(b), (int)(p), \
- (__mmask32)-1)
+ ((__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)(__m512i)(a), \
+ (__v32hi)(__m512i)(b), (int)(p), \
+ (__mmask32)-1))
#define _mm512_mask_cmp_epi16_mask(m, a, b, p) \
- (__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)(__m512i)(a), \
- (__v32hi)(__m512i)(b), (int)(p), \
- (__mmask32)(m))
+ ((__mmask32)__builtin_ia32_cmpw512_mask((__v32hi)(__m512i)(a), \
+ (__v32hi)(__m512i)(b), (int)(p), \
+ (__mmask32)(m)))
#define _mm512_cmp_epu16_mask(a, b, p) \
- (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)(__m512i)(a), \
- (__v32hi)(__m512i)(b), (int)(p), \
- (__mmask32)-1)
+ ((__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)(__m512i)(a), \
+ (__v32hi)(__m512i)(b), (int)(p), \
+ (__mmask32)-1))
#define _mm512_mask_cmp_epu16_mask(m, a, b, p) \
- (__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)(__m512i)(a), \
- (__v32hi)(__m512i)(b), (int)(p), \
- (__mmask32)(m))
+ ((__mmask32)__builtin_ia32_ucmpw512_mask((__v32hi)(__m512i)(a), \
+ (__v32hi)(__m512i)(b), (int)(p), \
+ (__mmask32)(m)))
#define _mm512_cmpeq_epi8_mask(A, B) \
_mm512_cmp_epi8_mask((A), (B), _MM_CMPINT_EQ)
@@ -485,7 +476,7 @@ _mm512_mask_blend_epi16 (__mmask32 __U, __m512i __A, __m512i __W)
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_abs_epi8 (__m512i __A)
{
- return (__m512i)__builtin_ia32_pabsb512((__v64qi)__A);
+ return (__m512i)__builtin_elementwise_abs((__v64qs)__A);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -507,7 +498,7 @@ _mm512_maskz_abs_epi8 (__mmask64 __U, __m512i __A)
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_abs_epi16 (__m512i __A)
{
- return (__m512i)__builtin_ia32_pabsw512((__v32hi)__A);
+ return (__m512i)__builtin_elementwise_abs((__v32hi)__A);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -617,7 +608,7 @@ _mm512_maskz_packus_epi16(__mmask64 __M, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_adds_epi8 (__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_paddsb512((__v64qi)__A, (__v64qi)__B);
+ return (__m512i)__builtin_elementwise_add_sat((__v64qs)__A, (__v64qs)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -639,7 +630,7 @@ _mm512_maskz_adds_epi8 (__mmask64 __U, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_adds_epi16 (__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_paddsw512((__v32hi)__A, (__v32hi)__B);
+ return (__m512i)__builtin_elementwise_add_sat((__v32hi)__A, (__v32hi)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -661,7 +652,7 @@ _mm512_maskz_adds_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_adds_epu8 (__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_paddusb512((__v64qi) __A, (__v64qi) __B);
+ return (__m512i)__builtin_elementwise_add_sat((__v64qu) __A, (__v64qu) __B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -683,7 +674,7 @@ _mm512_maskz_adds_epu8 (__mmask64 __U, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_adds_epu16 (__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_paddusw512((__v32hi) __A, (__v32hi) __B);
+ return (__m512i)__builtin_elementwise_add_sat((__v32hu) __A, (__v32hu) __B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -751,7 +742,7 @@ _mm512_maskz_avg_epu16 (__mmask32 __U, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_max_epi8 (__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_pmaxsb512((__v64qi) __A, (__v64qi) __B);
+ return (__m512i)__builtin_elementwise_max((__v64qs) __A, (__v64qs) __B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -773,7 +764,7 @@ _mm512_mask_max_epi8 (__m512i __W, __mmask64 __M, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_max_epi16 (__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_pmaxsw512((__v32hi) __A, (__v32hi) __B);
+ return (__m512i)__builtin_elementwise_max((__v32hi) __A, (__v32hi) __B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -796,7 +787,7 @@ _mm512_mask_max_epi16 (__m512i __W, __mmask32 __M, __m512i __A,
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_max_epu8 (__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_pmaxub512((__v64qi)__A, (__v64qi)__B);
+ return (__m512i)__builtin_elementwise_max((__v64qu)__A, (__v64qu)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -818,7 +809,7 @@ _mm512_mask_max_epu8 (__m512i __W, __mmask64 __M, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_max_epu16 (__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_pmaxuw512((__v32hi)__A, (__v32hi)__B);
+ return (__m512i)__builtin_elementwise_max((__v32hu)__A, (__v32hu)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -840,7 +831,7 @@ _mm512_mask_max_epu16 (__m512i __W, __mmask32 __M, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_min_epi8 (__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_pminsb512((__v64qi) __A, (__v64qi) __B);
+ return (__m512i)__builtin_elementwise_min((__v64qs) __A, (__v64qs) __B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -862,7 +853,7 @@ _mm512_mask_min_epi8 (__m512i __W, __mmask64 __M, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_min_epi16 (__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_pminsw512((__v32hi) __A, (__v32hi) __B);
+ return (__m512i)__builtin_elementwise_min((__v32hi) __A, (__v32hi) __B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -884,7 +875,7 @@ _mm512_mask_min_epi16 (__m512i __W, __mmask32 __M, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_min_epu8 (__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_pminub512((__v64qi)__A, (__v64qi)__B);
+ return (__m512i)__builtin_elementwise_min((__v64qu)__A, (__v64qu)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -906,7 +897,7 @@ _mm512_mask_min_epu8 (__m512i __W, __mmask64 __M, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_min_epu16 (__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_pminuw512((__v32hi)__A, (__v32hi)__B);
+ return (__m512i)__builtin_elementwise_min((__v32hu)__A, (__v32hu)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -950,7 +941,7 @@ _mm512_maskz_shuffle_epi8(__mmask64 __U, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_subs_epi8 (__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_psubsb512((__v64qi)__A, (__v64qi)__B);
+ return (__m512i)__builtin_elementwise_sub_sat((__v64qs)__A, (__v64qs)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -972,7 +963,7 @@ _mm512_maskz_subs_epi8 (__mmask64 __U, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_subs_epi16 (__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_psubsw512((__v32hi)__A, (__v32hi)__B);
+ return (__m512i)__builtin_elementwise_sub_sat((__v32hi)__A, (__v32hi)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -994,7 +985,7 @@ _mm512_maskz_subs_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_subs_epu8 (__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_psubusb512((__v64qi) __A, (__v64qi) __B);
+ return (__m512i)__builtin_elementwise_sub_sat((__v64qu) __A, (__v64qu) __B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1016,7 +1007,7 @@ _mm512_maskz_subs_epu8 (__mmask64 __U, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_subs_epu16 (__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_psubusw512((__v32hi) __A, (__v32hi) __B);
+ return (__m512i)__builtin_elementwise_sub_sat((__v32hu) __A, (__v32hu) __B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1428,36 +1419,36 @@ _mm512_maskz_cvtepu8_epi16(__mmask32 __U, __m256i __A)
#define _mm512_shufflehi_epi16(A, imm) \
- (__m512i)__builtin_ia32_pshufhw512((__v32hi)(__m512i)(A), (int)(imm))
+ ((__m512i)__builtin_ia32_pshufhw512((__v32hi)(__m512i)(A), (int)(imm)))
#define _mm512_mask_shufflehi_epi16(W, U, A, imm) \
- (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
- (__v32hi)_mm512_shufflehi_epi16((A), \
- (imm)), \
- (__v32hi)(__m512i)(W))
+ ((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
+ (__v32hi)_mm512_shufflehi_epi16((A), \
+ (imm)), \
+ (__v32hi)(__m512i)(W)))
#define _mm512_maskz_shufflehi_epi16(U, A, imm) \
- (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
- (__v32hi)_mm512_shufflehi_epi16((A), \
- (imm)), \
- (__v32hi)_mm512_setzero_si512())
+ ((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
+ (__v32hi)_mm512_shufflehi_epi16((A), \
+ (imm)), \
+ (__v32hi)_mm512_setzero_si512()))
#define _mm512_shufflelo_epi16(A, imm) \
- (__m512i)__builtin_ia32_pshuflw512((__v32hi)(__m512i)(A), (int)(imm))
+ ((__m512i)__builtin_ia32_pshuflw512((__v32hi)(__m512i)(A), (int)(imm)))
#define _mm512_mask_shufflelo_epi16(W, U, A, imm) \
- (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
- (__v32hi)_mm512_shufflelo_epi16((A), \
- (imm)), \
- (__v32hi)(__m512i)(W))
+ ((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
+ (__v32hi)_mm512_shufflelo_epi16((A), \
+ (imm)), \
+ (__v32hi)(__m512i)(W)))
#define _mm512_maskz_shufflelo_epi16(U, A, imm) \
- (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
- (__v32hi)_mm512_shufflelo_epi16((A), \
- (imm)), \
- (__v32hi)_mm512_setzero_si512())
+ ((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
+ (__v32hi)_mm512_shufflelo_epi16((A), \
+ (imm)), \
+ (__v32hi)_mm512_setzero_si512()))
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_sllv_epi16(__m512i __A, __m512i __B)
@@ -1506,7 +1497,7 @@ _mm512_maskz_sll_epi16(__mmask32 __U, __m512i __A, __m128i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_slli_epi16(__m512i __A, unsigned int __B)
{
- return (__m512i)__builtin_ia32_psllwi512((__v32hi)__A, __B);
+ return (__m512i)__builtin_ia32_psllwi512((__v32hi)__A, (int)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1527,7 +1518,7 @@ _mm512_maskz_slli_epi16(__mmask32 __U, __m512i __A, unsigned int __B)
}
#define _mm512_bslli_epi128(a, imm) \
- (__m512i)__builtin_ia32_pslldqi512_byteshift((__v8di)(__m512i)(a), (int)(imm))
+ ((__m512i)__builtin_ia32_pslldqi512_byteshift((__v8di)(__m512i)(a), (int)(imm)))
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_srlv_epi16(__m512i __A, __m512i __B)
@@ -1598,7 +1589,7 @@ _mm512_maskz_sra_epi16(__mmask32 __U, __m512i __A, __m128i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_srai_epi16(__m512i __A, unsigned int __B)
{
- return (__m512i)__builtin_ia32_psrawi512((__v32hi)__A, __B);
+ return (__m512i)__builtin_ia32_psrawi512((__v32hi)__A, (int)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1643,7 +1634,7 @@ _mm512_maskz_srl_epi16(__mmask32 __U, __m512i __A, __m128i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_srli_epi16(__m512i __A, unsigned int __B)
{
- return (__m512i)__builtin_ia32_psrlwi512((__v32hi)__A, __B);
+ return (__m512i)__builtin_ia32_psrlwi512((__v32hi)__A, (int)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1659,12 +1650,12 @@ static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_srli_epi16(__mmask32 __U, __m512i __A, int __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
- (__v32hi)_mm512_srli_epi16(__A, __B),
+ (__v32hi)_mm512_srli_epi16(__A, (unsigned int)__B),
(__v32hi)_mm512_setzero_si512());
}
#define _mm512_bsrli_epi128(a, imm) \
- (__m512i)__builtin_ia32_psrldqi512_byteshift((__v8di)(__m512i)(a), (int)(imm))
+ ((__m512i)__builtin_ia32_psrldqi512_byteshift((__v8di)(__m512i)(a), (int)(imm)))
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_mask_mov_epi16 (__m512i __W, __mmask32 __U, __m512i __A)
@@ -1714,9 +1705,8 @@ _mm512_maskz_set1_epi8 (__mmask64 __M, char __A)
(__v64qi) _mm512_setzero_si512());
}
-static __inline__ __mmask64 __DEFAULT_FN_ATTRS
-_mm512_kunpackd (__mmask64 __A, __mmask64 __B)
-{
+static __inline__ __mmask64 __DEFAULT_FN_ATTRS _mm512_kunpackd(__mmask64 __A,
+ __mmask64 __B) {
return (__mmask64) __builtin_ia32_kunpckdi ((__mmask64) __A,
(__mmask64) __B);
}
@@ -1984,32 +1974,32 @@ _mm512_mask_permutexvar_epi16 (__m512i __W, __mmask32 __M, __m512i __A,
}
#define _mm512_alignr_epi8(A, B, N) \
- (__m512i)__builtin_ia32_palignr512((__v64qi)(__m512i)(A), \
- (__v64qi)(__m512i)(B), (int)(N))
+ ((__m512i)__builtin_ia32_palignr512((__v64qi)(__m512i)(A), \
+ (__v64qi)(__m512i)(B), (int)(N)))
#define _mm512_mask_alignr_epi8(W, U, A, B, N) \
- (__m512i)__builtin_ia32_selectb_512((__mmask64)(U), \
- (__v64qi)_mm512_alignr_epi8((A), (B), (int)(N)), \
- (__v64qi)(__m512i)(W))
+ ((__m512i)__builtin_ia32_selectb_512((__mmask64)(U), \
+ (__v64qi)_mm512_alignr_epi8((A), (B), (int)(N)), \
+ (__v64qi)(__m512i)(W)))
#define _mm512_maskz_alignr_epi8(U, A, B, N) \
- (__m512i)__builtin_ia32_selectb_512((__mmask64)(U), \
+ ((__m512i)__builtin_ia32_selectb_512((__mmask64)(U), \
(__v64qi)_mm512_alignr_epi8((A), (B), (int)(N)), \
- (__v64qi)(__m512i)_mm512_setzero_si512())
+ (__v64qi)(__m512i)_mm512_setzero_si512()))
#define _mm512_dbsad_epu8(A, B, imm) \
- (__m512i)__builtin_ia32_dbpsadbw512((__v64qi)(__m512i)(A), \
- (__v64qi)(__m512i)(B), (int)(imm))
+ ((__m512i)__builtin_ia32_dbpsadbw512((__v64qi)(__m512i)(A), \
+ (__v64qi)(__m512i)(B), (int)(imm)))
#define _mm512_mask_dbsad_epu8(W, U, A, B, imm) \
- (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
+ ((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
(__v32hi)_mm512_dbsad_epu8((A), (B), (imm)), \
- (__v32hi)(__m512i)(W))
+ (__v32hi)(__m512i)(W)))
#define _mm512_maskz_dbsad_epu8(U, A, B, imm) \
- (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
+ ((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
(__v32hi)_mm512_dbsad_epu8((A), (B), (imm)), \
- (__v32hi)_mm512_setzero_si512())
+ (__v32hi)_mm512_setzero_si512()))
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_sad_epu8 (__m512i __A, __m512i __B)
diff --git a/contrib/llvm-project/clang/lib/Headers/avx512cdintrin.h b/contrib/llvm-project/clang/lib/Headers/avx512cdintrin.h
index bfdba84aa28b..33b552f6fe6a 100644
--- a/contrib/llvm-project/clang/lib/Headers/avx512cdintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avx512cdintrin.h
@@ -15,7 +15,9 @@
#define __AVX512CDINTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512cd"), __min_vector_width__(512)))
+#define __DEFAULT_FN_ATTRS \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512cd,evex512"), __min_vector_width__(512)))
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_conflict_epi64 (__m512i __A)
diff --git a/contrib/llvm-project/clang/lib/Headers/avx512dqintrin.h b/contrib/llvm-project/clang/lib/Headers/avx512dqintrin.h
index 337256c50f50..88b48e3a3207 100644
--- a/contrib/llvm-project/clang/lib/Headers/avx512dqintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avx512dqintrin.h
@@ -15,8 +15,10 @@
#define __AVX512DQINTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS512 __attribute__((__always_inline__, __nodebug__, __target__("avx512dq"), __min_vector_width__(512)))
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512dq")))
+#define __DEFAULT_FN_ATTRS512 __attribute__((__always_inline__, __nodebug__, __target__("avx512dq,evex512"), __min_vector_width__(512)))
+#define __DEFAULT_FN_ATTRS \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512dq,no-evex512")))
static __inline __mmask8 __DEFAULT_FN_ATTRS
_knot_mask8(__mmask8 __M)
@@ -121,10 +123,10 @@ _kadd_mask16(__mmask16 __A, __mmask16 __B)
}
#define _kshiftli_mask8(A, I) \
- (__mmask8)__builtin_ia32_kshiftliqi((__mmask8)(A), (unsigned int)(I))
+ ((__mmask8)__builtin_ia32_kshiftliqi((__mmask8)(A), (unsigned int)(I)))
#define _kshiftri_mask8(A, I) \
- (__mmask8)__builtin_ia32_kshiftriqi((__mmask8)(A), (unsigned int)(I))
+ ((__mmask8)__builtin_ia32_kshiftriqi((__mmask8)(A), (unsigned int)(I)))
static __inline__ unsigned int __DEFAULT_FN_ATTRS
_cvtmask8_u32(__mmask8 __A) {
@@ -342,19 +344,19 @@ _mm512_maskz_cvtpd_epi64 (__mmask8 __U, __m512d __A) {
}
#define _mm512_cvt_roundpd_epi64(A, R) \
- (__m512i)__builtin_ia32_cvtpd2qq512_mask((__v8df)(__m512d)(A), \
- (__v8di)_mm512_setzero_si512(), \
- (__mmask8)-1, (int)(R))
+ ((__m512i)__builtin_ia32_cvtpd2qq512_mask((__v8df)(__m512d)(A), \
+ (__v8di)_mm512_setzero_si512(), \
+ (__mmask8)-1, (int)(R)))
#define _mm512_mask_cvt_roundpd_epi64(W, U, A, R) \
- (__m512i)__builtin_ia32_cvtpd2qq512_mask((__v8df)(__m512d)(A), \
- (__v8di)(__m512i)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m512i)__builtin_ia32_cvtpd2qq512_mask((__v8df)(__m512d)(A), \
+ (__v8di)(__m512i)(W), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_maskz_cvt_roundpd_epi64(U, A, R) \
- (__m512i)__builtin_ia32_cvtpd2qq512_mask((__v8df)(__m512d)(A), \
- (__v8di)_mm512_setzero_si512(), \
- (__mmask8)(U), (int)(R))
+ ((__m512i)__builtin_ia32_cvtpd2qq512_mask((__v8df)(__m512d)(A), \
+ (__v8di)_mm512_setzero_si512(), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_cvtpd_epu64 (__m512d __A) {
@@ -381,19 +383,19 @@ _mm512_maskz_cvtpd_epu64 (__mmask8 __U, __m512d __A) {
}
#define _mm512_cvt_roundpd_epu64(A, R) \
- (__m512i)__builtin_ia32_cvtpd2uqq512_mask((__v8df)(__m512d)(A), \
- (__v8di)_mm512_setzero_si512(), \
- (__mmask8)-1, (int)(R))
+ ((__m512i)__builtin_ia32_cvtpd2uqq512_mask((__v8df)(__m512d)(A), \
+ (__v8di)_mm512_setzero_si512(), \
+ (__mmask8)-1, (int)(R)))
#define _mm512_mask_cvt_roundpd_epu64(W, U, A, R) \
- (__m512i)__builtin_ia32_cvtpd2uqq512_mask((__v8df)(__m512d)(A), \
- (__v8di)(__m512i)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m512i)__builtin_ia32_cvtpd2uqq512_mask((__v8df)(__m512d)(A), \
+ (__v8di)(__m512i)(W), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_maskz_cvt_roundpd_epu64(U, A, R) \
- (__m512i)__builtin_ia32_cvtpd2uqq512_mask((__v8df)(__m512d)(A), \
- (__v8di)_mm512_setzero_si512(), \
- (__mmask8)(U), (int)(R))
+ ((__m512i)__builtin_ia32_cvtpd2uqq512_mask((__v8df)(__m512d)(A), \
+ (__v8di)_mm512_setzero_si512(), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_cvtps_epi64 (__m256 __A) {
@@ -420,19 +422,19 @@ _mm512_maskz_cvtps_epi64 (__mmask8 __U, __m256 __A) {
}
#define _mm512_cvt_roundps_epi64(A, R) \
- (__m512i)__builtin_ia32_cvtps2qq512_mask((__v8sf)(__m256)(A), \
- (__v8di)_mm512_setzero_si512(), \
- (__mmask8)-1, (int)(R))
+ ((__m512i)__builtin_ia32_cvtps2qq512_mask((__v8sf)(__m256)(A), \
+ (__v8di)_mm512_setzero_si512(), \
+ (__mmask8)-1, (int)(R)))
#define _mm512_mask_cvt_roundps_epi64(W, U, A, R) \
- (__m512i)__builtin_ia32_cvtps2qq512_mask((__v8sf)(__m256)(A), \
- (__v8di)(__m512i)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m512i)__builtin_ia32_cvtps2qq512_mask((__v8sf)(__m256)(A), \
+ (__v8di)(__m512i)(W), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_maskz_cvt_roundps_epi64(U, A, R) \
- (__m512i)__builtin_ia32_cvtps2qq512_mask((__v8sf)(__m256)(A), \
- (__v8di)_mm512_setzero_si512(), \
- (__mmask8)(U), (int)(R))
+ ((__m512i)__builtin_ia32_cvtps2qq512_mask((__v8sf)(__m256)(A), \
+ (__v8di)_mm512_setzero_si512(), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_cvtps_epu64 (__m256 __A) {
@@ -459,19 +461,19 @@ _mm512_maskz_cvtps_epu64 (__mmask8 __U, __m256 __A) {
}
#define _mm512_cvt_roundps_epu64(A, R) \
- (__m512i)__builtin_ia32_cvtps2uqq512_mask((__v8sf)(__m256)(A), \
- (__v8di)_mm512_setzero_si512(), \
- (__mmask8)-1, (int)(R))
+ ((__m512i)__builtin_ia32_cvtps2uqq512_mask((__v8sf)(__m256)(A), \
+ (__v8di)_mm512_setzero_si512(), \
+ (__mmask8)-1, (int)(R)))
#define _mm512_mask_cvt_roundps_epu64(W, U, A, R) \
- (__m512i)__builtin_ia32_cvtps2uqq512_mask((__v8sf)(__m256)(A), \
- (__v8di)(__m512i)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m512i)__builtin_ia32_cvtps2uqq512_mask((__v8sf)(__m256)(A), \
+ (__v8di)(__m512i)(W), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_maskz_cvt_roundps_epu64(U, A, R) \
- (__m512i)__builtin_ia32_cvtps2uqq512_mask((__v8sf)(__m256)(A), \
- (__v8di)_mm512_setzero_si512(), \
- (__mmask8)(U), (int)(R))
+ ((__m512i)__builtin_ia32_cvtps2uqq512_mask((__v8sf)(__m256)(A), \
+ (__v8di)_mm512_setzero_si512(), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m512d __DEFAULT_FN_ATTRS512
@@ -494,19 +496,19 @@ _mm512_maskz_cvtepi64_pd (__mmask8 __U, __m512i __A) {
}
#define _mm512_cvt_roundepi64_pd(A, R) \
- (__m512d)__builtin_ia32_cvtqq2pd512_mask((__v8di)(__m512i)(A), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)-1, (int)(R))
+ ((__m512d)__builtin_ia32_cvtqq2pd512_mask((__v8di)(__m512i)(A), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)-1, (int)(R)))
#define _mm512_mask_cvt_roundepi64_pd(W, U, A, R) \
- (__m512d)__builtin_ia32_cvtqq2pd512_mask((__v8di)(__m512i)(A), \
- (__v8df)(__m512d)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_cvtqq2pd512_mask((__v8di)(__m512i)(A), \
+ (__v8df)(__m512d)(W), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_maskz_cvt_roundepi64_pd(U, A, R) \
- (__m512d)__builtin_ia32_cvtqq2pd512_mask((__v8di)(__m512i)(A), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_cvtqq2pd512_mask((__v8di)(__m512i)(A), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m256 __DEFAULT_FN_ATTRS512
_mm512_cvtepi64_ps (__m512i __A) {
@@ -533,19 +535,19 @@ _mm512_maskz_cvtepi64_ps (__mmask8 __U, __m512i __A) {
}
#define _mm512_cvt_roundepi64_ps(A, R) \
- (__m256)__builtin_ia32_cvtqq2ps512_mask((__v8di)(__m512i)(A), \
- (__v8sf)_mm256_setzero_ps(), \
- (__mmask8)-1, (int)(R))
+ ((__m256)__builtin_ia32_cvtqq2ps512_mask((__v8di)(__m512i)(A), \
+ (__v8sf)_mm256_setzero_ps(), \
+ (__mmask8)-1, (int)(R)))
#define _mm512_mask_cvt_roundepi64_ps(W, U, A, R) \
- (__m256)__builtin_ia32_cvtqq2ps512_mask((__v8di)(__m512i)(A), \
- (__v8sf)(__m256)(W), (__mmask8)(U), \
- (int)(R))
+ ((__m256)__builtin_ia32_cvtqq2ps512_mask((__v8di)(__m512i)(A), \
+ (__v8sf)(__m256)(W), (__mmask8)(U), \
+ (int)(R)))
#define _mm512_maskz_cvt_roundepi64_ps(U, A, R) \
- (__m256)__builtin_ia32_cvtqq2ps512_mask((__v8di)(__m512i)(A), \
- (__v8sf)_mm256_setzero_ps(), \
- (__mmask8)(U), (int)(R))
+ ((__m256)__builtin_ia32_cvtqq2ps512_mask((__v8di)(__m512i)(A), \
+ (__v8sf)_mm256_setzero_ps(), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -573,19 +575,19 @@ _mm512_maskz_cvttpd_epi64 (__mmask8 __U, __m512d __A) {
}
#define _mm512_cvtt_roundpd_epi64(A, R) \
- (__m512i)__builtin_ia32_cvttpd2qq512_mask((__v8df)(__m512d)(A), \
- (__v8di)_mm512_setzero_si512(), \
- (__mmask8)-1, (int)(R))
+ ((__m512i)__builtin_ia32_cvttpd2qq512_mask((__v8df)(__m512d)(A), \
+ (__v8di)_mm512_setzero_si512(), \
+ (__mmask8)-1, (int)(R)))
#define _mm512_mask_cvtt_roundpd_epi64(W, U, A, R) \
- (__m512i)__builtin_ia32_cvttpd2qq512_mask((__v8df)(__m512d)(A), \
- (__v8di)(__m512i)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m512i)__builtin_ia32_cvttpd2qq512_mask((__v8df)(__m512d)(A), \
+ (__v8di)(__m512i)(W), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_maskz_cvtt_roundpd_epi64(U, A, R) \
- (__m512i)__builtin_ia32_cvttpd2qq512_mask((__v8df)(__m512d)(A), \
- (__v8di)_mm512_setzero_si512(), \
- (__mmask8)(U), (int)(R))
+ ((__m512i)__builtin_ia32_cvttpd2qq512_mask((__v8df)(__m512d)(A), \
+ (__v8di)_mm512_setzero_si512(), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_cvttpd_epu64 (__m512d __A) {
@@ -612,19 +614,19 @@ _mm512_maskz_cvttpd_epu64 (__mmask8 __U, __m512d __A) {
}
#define _mm512_cvtt_roundpd_epu64(A, R) \
- (__m512i)__builtin_ia32_cvttpd2uqq512_mask((__v8df)(__m512d)(A), \
- (__v8di)_mm512_setzero_si512(), \
- (__mmask8)-1, (int)(R))
+ ((__m512i)__builtin_ia32_cvttpd2uqq512_mask((__v8df)(__m512d)(A), \
+ (__v8di)_mm512_setzero_si512(), \
+ (__mmask8)-1, (int)(R)))
#define _mm512_mask_cvtt_roundpd_epu64(W, U, A, R) \
- (__m512i)__builtin_ia32_cvttpd2uqq512_mask((__v8df)(__m512d)(A), \
- (__v8di)(__m512i)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m512i)__builtin_ia32_cvttpd2uqq512_mask((__v8df)(__m512d)(A), \
+ (__v8di)(__m512i)(W), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_maskz_cvtt_roundpd_epu64(U, A, R) \
- (__m512i)__builtin_ia32_cvttpd2uqq512_mask((__v8df)(__m512d)(A), \
- (__v8di)_mm512_setzero_si512(), \
- (__mmask8)(U), (int)(R))
+ ((__m512i)__builtin_ia32_cvttpd2uqq512_mask((__v8df)(__m512d)(A), \
+ (__v8di)_mm512_setzero_si512(), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_cvttps_epi64 (__m256 __A) {
@@ -651,19 +653,19 @@ _mm512_maskz_cvttps_epi64 (__mmask8 __U, __m256 __A) {
}
#define _mm512_cvtt_roundps_epi64(A, R) \
- (__m512i)__builtin_ia32_cvttps2qq512_mask((__v8sf)(__m256)(A), \
- (__v8di)_mm512_setzero_si512(), \
- (__mmask8)-1, (int)(R))
+ ((__m512i)__builtin_ia32_cvttps2qq512_mask((__v8sf)(__m256)(A), \
+ (__v8di)_mm512_setzero_si512(), \
+ (__mmask8)-1, (int)(R)))
#define _mm512_mask_cvtt_roundps_epi64(W, U, A, R) \
- (__m512i)__builtin_ia32_cvttps2qq512_mask((__v8sf)(__m256)(A), \
- (__v8di)(__m512i)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m512i)__builtin_ia32_cvttps2qq512_mask((__v8sf)(__m256)(A), \
+ (__v8di)(__m512i)(W), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_maskz_cvtt_roundps_epi64(U, A, R) \
- (__m512i)__builtin_ia32_cvttps2qq512_mask((__v8sf)(__m256)(A), \
- (__v8di)_mm512_setzero_si512(), \
- (__mmask8)(U), (int)(R))
+ ((__m512i)__builtin_ia32_cvttps2qq512_mask((__v8sf)(__m256)(A), \
+ (__v8di)_mm512_setzero_si512(), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_cvttps_epu64 (__m256 __A) {
@@ -690,19 +692,19 @@ _mm512_maskz_cvttps_epu64 (__mmask8 __U, __m256 __A) {
}
#define _mm512_cvtt_roundps_epu64(A, R) \
- (__m512i)__builtin_ia32_cvttps2uqq512_mask((__v8sf)(__m256)(A), \
- (__v8di)_mm512_setzero_si512(), \
- (__mmask8)-1, (int)(R))
+ ((__m512i)__builtin_ia32_cvttps2uqq512_mask((__v8sf)(__m256)(A), \
+ (__v8di)_mm512_setzero_si512(), \
+ (__mmask8)-1, (int)(R)))
#define _mm512_mask_cvtt_roundps_epu64(W, U, A, R) \
- (__m512i)__builtin_ia32_cvttps2uqq512_mask((__v8sf)(__m256)(A), \
- (__v8di)(__m512i)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m512i)__builtin_ia32_cvttps2uqq512_mask((__v8sf)(__m256)(A), \
+ (__v8di)(__m512i)(W), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_maskz_cvtt_roundps_epu64(U, A, R) \
- (__m512i)__builtin_ia32_cvttps2uqq512_mask((__v8sf)(__m256)(A), \
- (__v8di)_mm512_setzero_si512(), \
- (__mmask8)(U), (int)(R))
+ ((__m512i)__builtin_ia32_cvttps2uqq512_mask((__v8sf)(__m256)(A), \
+ (__v8di)_mm512_setzero_si512(), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_cvtepu64_pd (__m512i __A) {
@@ -724,20 +726,20 @@ _mm512_maskz_cvtepu64_pd (__mmask8 __U, __m512i __A) {
}
#define _mm512_cvt_roundepu64_pd(A, R) \
- (__m512d)__builtin_ia32_cvtuqq2pd512_mask((__v8di)(__m512i)(A), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)-1, (int)(R))
+ ((__m512d)__builtin_ia32_cvtuqq2pd512_mask((__v8di)(__m512i)(A), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)-1, (int)(R)))
#define _mm512_mask_cvt_roundepu64_pd(W, U, A, R) \
- (__m512d)__builtin_ia32_cvtuqq2pd512_mask((__v8di)(__m512i)(A), \
- (__v8df)(__m512d)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_cvtuqq2pd512_mask((__v8di)(__m512i)(A), \
+ (__v8df)(__m512d)(W), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_maskz_cvt_roundepu64_pd(U, A, R) \
- (__m512d)__builtin_ia32_cvtuqq2pd512_mask((__v8di)(__m512i)(A), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_cvtuqq2pd512_mask((__v8di)(__m512i)(A), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m256 __DEFAULT_FN_ATTRS512
@@ -765,290 +767,290 @@ _mm512_maskz_cvtepu64_ps (__mmask8 __U, __m512i __A) {
}
#define _mm512_cvt_roundepu64_ps(A, R) \
- (__m256)__builtin_ia32_cvtuqq2ps512_mask((__v8di)(__m512i)(A), \
- (__v8sf)_mm256_setzero_ps(), \
- (__mmask8)-1, (int)(R))
+ ((__m256)__builtin_ia32_cvtuqq2ps512_mask((__v8di)(__m512i)(A), \
+ (__v8sf)_mm256_setzero_ps(), \
+ (__mmask8)-1, (int)(R)))
#define _mm512_mask_cvt_roundepu64_ps(W, U, A, R) \
- (__m256)__builtin_ia32_cvtuqq2ps512_mask((__v8di)(__m512i)(A), \
- (__v8sf)(__m256)(W), (__mmask8)(U), \
- (int)(R))
+ ((__m256)__builtin_ia32_cvtuqq2ps512_mask((__v8di)(__m512i)(A), \
+ (__v8sf)(__m256)(W), (__mmask8)(U), \
+ (int)(R)))
#define _mm512_maskz_cvt_roundepu64_ps(U, A, R) \
- (__m256)__builtin_ia32_cvtuqq2ps512_mask((__v8di)(__m512i)(A), \
- (__v8sf)_mm256_setzero_ps(), \
- (__mmask8)(U), (int)(R))
+ ((__m256)__builtin_ia32_cvtuqq2ps512_mask((__v8di)(__m512i)(A), \
+ (__v8sf)_mm256_setzero_ps(), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_range_pd(A, B, C) \
- (__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), (int)(C), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)-1, \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), (int)(C), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)-1, \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm512_mask_range_pd(W, U, A, B, C) \
- (__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), (int)(C), \
- (__v8df)(__m512d)(W), (__mmask8)(U), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), (int)(C), \
+ (__v8df)(__m512d)(W), (__mmask8)(U), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm512_maskz_range_pd(U, A, B, C) \
- (__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), (int)(C), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)(U), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), (int)(C), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(U), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm512_range_round_pd(A, B, C, R) \
- (__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), (int)(C), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)-1, (int)(R))
+ ((__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), (int)(C), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)-1, (int)(R)))
#define _mm512_mask_range_round_pd(W, U, A, B, C, R) \
- (__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), (int)(C), \
- (__v8df)(__m512d)(W), (__mmask8)(U), \
- (int)(R))
+ ((__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), (int)(C), \
+ (__v8df)(__m512d)(W), (__mmask8)(U), \
+ (int)(R)))
#define _mm512_maskz_range_round_pd(U, A, B, C, R) \
- (__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), (int)(C), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), (int)(C), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_range_ps(A, B, C) \
- (__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), (int)(C), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)-1, \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), (int)(C), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)-1, \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm512_mask_range_ps(W, U, A, B, C) \
- (__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), (int)(C), \
- (__v16sf)(__m512)(W), (__mmask16)(U), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), (int)(C), \
+ (__v16sf)(__m512)(W), (__mmask16)(U), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm512_maskz_range_ps(U, A, B, C) \
- (__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), (int)(C), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(U), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), (int)(C), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(U), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm512_range_round_ps(A, B, C, R) \
- (__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), (int)(C), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)-1, (int)(R))
+ ((__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), (int)(C), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)-1, (int)(R)))
#define _mm512_mask_range_round_ps(W, U, A, B, C, R) \
- (__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), (int)(C), \
- (__v16sf)(__m512)(W), (__mmask16)(U), \
- (int)(R))
+ ((__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), (int)(C), \
+ (__v16sf)(__m512)(W), (__mmask16)(U), \
+ (int)(R)))
#define _mm512_maskz_range_round_ps(U, A, B, C, R) \
- (__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), (int)(C), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), (int)(C), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(U), (int)(R)))
#define _mm_range_round_ss(A, B, C, R) \
- (__m128)__builtin_ia32_rangess128_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8) -1, (int)(C),\
- (int)(R))
+ ((__m128)__builtin_ia32_rangess128_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8) -1, (int)(C),\
+ (int)(R)))
#define _mm_range_ss(A ,B , C) _mm_range_round_ss(A, B, C ,_MM_FROUND_CUR_DIRECTION)
#define _mm_mask_range_round_ss(W, U, A, B, C, R) \
- (__m128)__builtin_ia32_rangess128_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)(__m128)(W),\
- (__mmask8)(U), (int)(C),\
- (int)(R))
+ ((__m128)__builtin_ia32_rangess128_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(W),\
+ (__mmask8)(U), (int)(C),\
+ (int)(R)))
#define _mm_mask_range_ss(W , U, A, B, C) _mm_mask_range_round_ss(W, U, A, B, C , _MM_FROUND_CUR_DIRECTION)
#define _mm_maskz_range_round_ss(U, A, B, C, R) \
- (__m128)__builtin_ia32_rangess128_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U), (int)(C),\
- (int)(R))
+ ((__m128)__builtin_ia32_rangess128_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U), (int)(C),\
+ (int)(R)))
#define _mm_maskz_range_ss(U, A ,B , C) _mm_maskz_range_round_ss(U, A, B, C ,_MM_FROUND_CUR_DIRECTION)
#define _mm_range_round_sd(A, B, C, R) \
- (__m128d)__builtin_ia32_rangesd128_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8) -1, (int)(C),\
- (int)(R))
+ ((__m128d)__builtin_ia32_rangesd128_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8) -1, (int)(C),\
+ (int)(R)))
#define _mm_range_sd(A ,B , C) _mm_range_round_sd(A, B, C ,_MM_FROUND_CUR_DIRECTION)
#define _mm_mask_range_round_sd(W, U, A, B, C, R) \
- (__m128d)__builtin_ia32_rangesd128_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)(__m128d)(W),\
- (__mmask8)(U), (int)(C),\
- (int)(R))
+ ((__m128d)__builtin_ia32_rangesd128_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(W),\
+ (__mmask8)(U), (int)(C),\
+ (int)(R)))
#define _mm_mask_range_sd(W, U, A, B, C) _mm_mask_range_round_sd(W, U, A, B, C ,_MM_FROUND_CUR_DIRECTION)
#define _mm_maskz_range_round_sd(U, A, B, C, R) \
- (__m128d)__builtin_ia32_rangesd128_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)(U), (int)(C),\
- (int)(R))
+ ((__m128d)__builtin_ia32_rangesd128_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U), (int)(C),\
+ (int)(R)))
#define _mm_maskz_range_sd(U, A, B, C) _mm_maskz_range_round_sd(U, A, B, C ,_MM_FROUND_CUR_DIRECTION)
#define _mm512_reduce_pd(A, B) \
- (__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)-1, \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)-1, \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm512_mask_reduce_pd(W, U, A, B) \
- (__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \
- (__v8df)(__m512d)(W), \
- (__mmask8)(U), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \
+ (__v8df)(__m512d)(W), \
+ (__mmask8)(U), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm512_maskz_reduce_pd(U, A, B) \
- (__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)(U), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(U), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm512_reduce_ps(A, B) \
- (__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)-1, \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)-1, \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm512_mask_reduce_ps(W, U, A, B) \
- (__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \
- (__v16sf)(__m512)(W), \
- (__mmask16)(U), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \
+ (__v16sf)(__m512)(W), \
+ (__mmask16)(U), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm512_maskz_reduce_ps(U, A, B) \
- (__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(U), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(U), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm512_reduce_round_pd(A, B, R) \
- (__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)-1, (int)(R))
+ ((__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)-1, (int)(R)))
#define _mm512_mask_reduce_round_pd(W, U, A, B, R) \
- (__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \
- (__v8df)(__m512d)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \
+ (__v8df)(__m512d)(W), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_maskz_reduce_round_pd(U, A, B, R) \
- (__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_reduce_round_ps(A, B, R) \
- (__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)-1, (int)(R))
+ ((__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)-1, (int)(R)))
#define _mm512_mask_reduce_round_ps(W, U, A, B, R) \
- (__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \
- (__v16sf)(__m512)(W), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \
+ (__v16sf)(__m512)(W), \
+ (__mmask16)(U), (int)(R)))
#define _mm512_maskz_reduce_round_ps(U, A, B, R) \
- (__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(U), (int)(R)))
#define _mm_reduce_ss(A, B, C) \
- (__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), (__mmask8)-1, \
- (int)(C), _MM_FROUND_CUR_DIRECTION)
+ ((__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), (__mmask8)-1, \
+ (int)(C), _MM_FROUND_CUR_DIRECTION))
#define _mm_mask_reduce_ss(W, U, A, B, C) \
- (__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)(__m128)(W), (__mmask8)(U), \
- (int)(C), _MM_FROUND_CUR_DIRECTION)
+ ((__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(W), (__mmask8)(U), \
+ (int)(C), _MM_FROUND_CUR_DIRECTION))
#define _mm_maskz_reduce_ss(U, A, B, C) \
- (__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U), (int)(C), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U), (int)(C), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm_reduce_round_ss(A, B, C, R) \
- (__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), (__mmask8)-1, \
- (int)(C), (int)(R))
+ ((__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), (__mmask8)-1, \
+ (int)(C), (int)(R)))
#define _mm_mask_reduce_round_ss(W, U, A, B, C, R) \
- (__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)(__m128)(W), (__mmask8)(U), \
- (int)(C), (int)(R))
+ ((__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(W), (__mmask8)(U), \
+ (int)(C), (int)(R)))
#define _mm_maskz_reduce_round_ss(U, A, B, C, R) \
- (__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U), (int)(C), (int)(R))
+ ((__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U), (int)(C), (int)(R)))
#define _mm_reduce_sd(A, B, C) \
- (__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)-1, (int)(C), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1, (int)(C), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm_mask_reduce_sd(W, U, A, B, C) \
- (__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)(__m128d)(W), (__mmask8)(U), \
- (int)(C), _MM_FROUND_CUR_DIRECTION)
+ ((__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(W), (__mmask8)(U), \
+ (int)(C), _MM_FROUND_CUR_DIRECTION))
#define _mm_maskz_reduce_sd(U, A, B, C) \
- (__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)(U), (int)(C), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U), (int)(C), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm_reduce_round_sd(A, B, C, R) \
- (__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)-1, (int)(C), (int)(R))
+ ((__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1, (int)(C), (int)(R)))
#define _mm_mask_reduce_round_sd(W, U, A, B, C, R) \
- (__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)(__m128d)(W), (__mmask8)(U), \
- (int)(C), (int)(R))
+ ((__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(W), (__mmask8)(U), \
+ (int)(C), (int)(R)))
#define _mm_maskz_reduce_round_sd(U, A, B, C, R) \
- (__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)(U), (int)(C), (int)(R))
+ ((__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U), (int)(C), (int)(R)))
static __inline__ __mmask16 __DEFAULT_FN_ATTRS512
_mm512_movepi32_mask (__m512i __A)
@@ -1218,158 +1220,158 @@ _mm512_maskz_broadcast_i64x2(__mmask8 __M, __m128i __A)
}
#define _mm512_extractf32x8_ps(A, imm) \
- (__m256)__builtin_ia32_extractf32x8_mask((__v16sf)(__m512)(A), (int)(imm), \
- (__v8sf)_mm256_undefined_ps(), \
- (__mmask8)-1)
+ ((__m256)__builtin_ia32_extractf32x8_mask((__v16sf)(__m512)(A), (int)(imm), \
+ (__v8sf)_mm256_undefined_ps(), \
+ (__mmask8)-1))
#define _mm512_mask_extractf32x8_ps(W, U, A, imm) \
- (__m256)__builtin_ia32_extractf32x8_mask((__v16sf)(__m512)(A), (int)(imm), \
- (__v8sf)(__m256)(W), \
- (__mmask8)(U))
+ ((__m256)__builtin_ia32_extractf32x8_mask((__v16sf)(__m512)(A), (int)(imm), \
+ (__v8sf)(__m256)(W), \
+ (__mmask8)(U)))
#define _mm512_maskz_extractf32x8_ps(U, A, imm) \
- (__m256)__builtin_ia32_extractf32x8_mask((__v16sf)(__m512)(A), (int)(imm), \
- (__v8sf)_mm256_setzero_ps(), \
- (__mmask8)(U))
+ ((__m256)__builtin_ia32_extractf32x8_mask((__v16sf)(__m512)(A), (int)(imm), \
+ (__v8sf)_mm256_setzero_ps(), \
+ (__mmask8)(U)))
#define _mm512_extractf64x2_pd(A, imm) \
- (__m128d)__builtin_ia32_extractf64x2_512_mask((__v8df)(__m512d)(A), \
- (int)(imm), \
- (__v2df)_mm_undefined_pd(), \
- (__mmask8)-1)
+ ((__m128d)__builtin_ia32_extractf64x2_512_mask((__v8df)(__m512d)(A), \
+ (int)(imm), \
+ (__v2df)_mm_undefined_pd(), \
+ (__mmask8)-1))
#define _mm512_mask_extractf64x2_pd(W, U, A, imm) \
- (__m128d)__builtin_ia32_extractf64x2_512_mask((__v8df)(__m512d)(A), \
- (int)(imm), \
- (__v2df)(__m128d)(W), \
- (__mmask8)(U))
+ ((__m128d)__builtin_ia32_extractf64x2_512_mask((__v8df)(__m512d)(A), \
+ (int)(imm), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U)))
#define _mm512_maskz_extractf64x2_pd(U, A, imm) \
- (__m128d)__builtin_ia32_extractf64x2_512_mask((__v8df)(__m512d)(A), \
- (int)(imm), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)(U))
+ ((__m128d)__builtin_ia32_extractf64x2_512_mask((__v8df)(__m512d)(A), \
+ (int)(imm), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U)))
#define _mm512_extracti32x8_epi32(A, imm) \
- (__m256i)__builtin_ia32_extracti32x8_mask((__v16si)(__m512i)(A), (int)(imm), \
- (__v8si)_mm256_undefined_si256(), \
- (__mmask8)-1)
+ ((__m256i)__builtin_ia32_extracti32x8_mask((__v16si)(__m512i)(A), (int)(imm), \
+ (__v8si)_mm256_undefined_si256(), \
+ (__mmask8)-1))
#define _mm512_mask_extracti32x8_epi32(W, U, A, imm) \
- (__m256i)__builtin_ia32_extracti32x8_mask((__v16si)(__m512i)(A), (int)(imm), \
- (__v8si)(__m256i)(W), \
- (__mmask8)(U))
+ ((__m256i)__builtin_ia32_extracti32x8_mask((__v16si)(__m512i)(A), (int)(imm), \
+ (__v8si)(__m256i)(W), \
+ (__mmask8)(U)))
#define _mm512_maskz_extracti32x8_epi32(U, A, imm) \
- (__m256i)__builtin_ia32_extracti32x8_mask((__v16si)(__m512i)(A), (int)(imm), \
- (__v8si)_mm256_setzero_si256(), \
- (__mmask8)(U))
+ ((__m256i)__builtin_ia32_extracti32x8_mask((__v16si)(__m512i)(A), (int)(imm), \
+ (__v8si)_mm256_setzero_si256(), \
+ (__mmask8)(U)))
#define _mm512_extracti64x2_epi64(A, imm) \
- (__m128i)__builtin_ia32_extracti64x2_512_mask((__v8di)(__m512i)(A), \
+ ((__m128i)__builtin_ia32_extracti64x2_512_mask((__v8di)(__m512i)(A), \
(int)(imm), \
(__v2di)_mm_undefined_si128(), \
- (__mmask8)-1)
+ (__mmask8)-1))
#define _mm512_mask_extracti64x2_epi64(W, U, A, imm) \
- (__m128i)__builtin_ia32_extracti64x2_512_mask((__v8di)(__m512i)(A), \
- (int)(imm), \
- (__v2di)(__m128i)(W), \
- (__mmask8)(U))
+ ((__m128i)__builtin_ia32_extracti64x2_512_mask((__v8di)(__m512i)(A), \
+ (int)(imm), \
+ (__v2di)(__m128i)(W), \
+ (__mmask8)(U)))
#define _mm512_maskz_extracti64x2_epi64(U, A, imm) \
- (__m128i)__builtin_ia32_extracti64x2_512_mask((__v8di)(__m512i)(A), \
- (int)(imm), \
- (__v2di)_mm_setzero_si128(), \
- (__mmask8)(U))
+ ((__m128i)__builtin_ia32_extracti64x2_512_mask((__v8di)(__m512i)(A), \
+ (int)(imm), \
+ (__v2di)_mm_setzero_si128(), \
+ (__mmask8)(U)))
#define _mm512_insertf32x8(A, B, imm) \
- (__m512)__builtin_ia32_insertf32x8((__v16sf)(__m512)(A), \
- (__v8sf)(__m256)(B), (int)(imm))
+ ((__m512)__builtin_ia32_insertf32x8((__v16sf)(__m512)(A), \
+ (__v8sf)(__m256)(B), (int)(imm)))
#define _mm512_mask_insertf32x8(W, U, A, B, imm) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
(__v16sf)_mm512_insertf32x8((A), (B), (imm)), \
- (__v16sf)(__m512)(W))
+ (__v16sf)(__m512)(W)))
#define _mm512_maskz_insertf32x8(U, A, B, imm) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
(__v16sf)_mm512_insertf32x8((A), (B), (imm)), \
- (__v16sf)_mm512_setzero_ps())
+ (__v16sf)_mm512_setzero_ps()))
#define _mm512_insertf64x2(A, B, imm) \
- (__m512d)__builtin_ia32_insertf64x2_512((__v8df)(__m512d)(A), \
- (__v2df)(__m128d)(B), (int)(imm))
+ ((__m512d)__builtin_ia32_insertf64x2_512((__v8df)(__m512d)(A), \
+ (__v2df)(__m128d)(B), (int)(imm)))
#define _mm512_mask_insertf64x2(W, U, A, B, imm) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
(__v8df)_mm512_insertf64x2((A), (B), (imm)), \
- (__v8df)(__m512d)(W))
+ (__v8df)(__m512d)(W)))
#define _mm512_maskz_insertf64x2(U, A, B, imm) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
(__v8df)_mm512_insertf64x2((A), (B), (imm)), \
- (__v8df)_mm512_setzero_pd())
+ (__v8df)_mm512_setzero_pd()))
#define _mm512_inserti32x8(A, B, imm) \
- (__m512i)__builtin_ia32_inserti32x8((__v16si)(__m512i)(A), \
- (__v8si)(__m256i)(B), (int)(imm))
+ ((__m512i)__builtin_ia32_inserti32x8((__v16si)(__m512i)(A), \
+ (__v8si)(__m256i)(B), (int)(imm)))
#define _mm512_mask_inserti32x8(W, U, A, B, imm) \
- (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+ ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
(__v16si)_mm512_inserti32x8((A), (B), (imm)), \
- (__v16si)(__m512i)(W))
+ (__v16si)(__m512i)(W)))
#define _mm512_maskz_inserti32x8(U, A, B, imm) \
- (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+ ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
(__v16si)_mm512_inserti32x8((A), (B), (imm)), \
- (__v16si)_mm512_setzero_si512())
+ (__v16si)_mm512_setzero_si512()))
#define _mm512_inserti64x2(A, B, imm) \
- (__m512i)__builtin_ia32_inserti64x2_512((__v8di)(__m512i)(A), \
- (__v2di)(__m128i)(B), (int)(imm))
+ ((__m512i)__builtin_ia32_inserti64x2_512((__v8di)(__m512i)(A), \
+ (__v2di)(__m128i)(B), (int)(imm)))
#define _mm512_mask_inserti64x2(W, U, A, B, imm) \
- (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+ ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
(__v8di)_mm512_inserti64x2((A), (B), (imm)), \
- (__v8di)(__m512i)(W))
+ (__v8di)(__m512i)(W)))
#define _mm512_maskz_inserti64x2(U, A, B, imm) \
- (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+ ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
(__v8di)_mm512_inserti64x2((A), (B), (imm)), \
- (__v8di)_mm512_setzero_si512())
+ (__v8di)_mm512_setzero_si512()))
#define _mm512_mask_fpclass_ps_mask(U, A, imm) \
- (__mmask16)__builtin_ia32_fpclassps512_mask((__v16sf)(__m512)(A), \
- (int)(imm), (__mmask16)(U))
+ ((__mmask16)__builtin_ia32_fpclassps512_mask((__v16sf)(__m512)(A), \
+ (int)(imm), (__mmask16)(U)))
#define _mm512_fpclass_ps_mask(A, imm) \
- (__mmask16)__builtin_ia32_fpclassps512_mask((__v16sf)(__m512)(A), \
- (int)(imm), (__mmask16)-1)
+ ((__mmask16)__builtin_ia32_fpclassps512_mask((__v16sf)(__m512)(A), \
+ (int)(imm), (__mmask16)-1))
#define _mm512_mask_fpclass_pd_mask(U, A, imm) \
- (__mmask8)__builtin_ia32_fpclasspd512_mask((__v8df)(__m512d)(A), (int)(imm), \
- (__mmask8)(U))
+ ((__mmask8)__builtin_ia32_fpclasspd512_mask((__v8df)(__m512d)(A), (int)(imm), \
+ (__mmask8)(U)))
#define _mm512_fpclass_pd_mask(A, imm) \
- (__mmask8)__builtin_ia32_fpclasspd512_mask((__v8df)(__m512d)(A), (int)(imm), \
- (__mmask8)-1)
+ ((__mmask8)__builtin_ia32_fpclasspd512_mask((__v8df)(__m512d)(A), (int)(imm), \
+ (__mmask8)-1))
#define _mm_fpclass_sd_mask(A, imm) \
- (__mmask8)__builtin_ia32_fpclasssd_mask((__v2df)(__m128d)(A), (int)(imm), \
- (__mmask8)-1)
+ ((__mmask8)__builtin_ia32_fpclasssd_mask((__v2df)(__m128d)(A), (int)(imm), \
+ (__mmask8)-1))
#define _mm_mask_fpclass_sd_mask(U, A, imm) \
- (__mmask8)__builtin_ia32_fpclasssd_mask((__v2df)(__m128d)(A), (int)(imm), \
- (__mmask8)(U))
+ ((__mmask8)__builtin_ia32_fpclasssd_mask((__v2df)(__m128d)(A), (int)(imm), \
+ (__mmask8)(U)))
#define _mm_fpclass_ss_mask(A, imm) \
- (__mmask8)__builtin_ia32_fpclassss_mask((__v4sf)(__m128)(A), (int)(imm), \
- (__mmask8)-1)
+ ((__mmask8)__builtin_ia32_fpclassss_mask((__v4sf)(__m128)(A), (int)(imm), \
+ (__mmask8)-1))
#define _mm_mask_fpclass_ss_mask(U, A, imm) \
- (__mmask8)__builtin_ia32_fpclassss_mask((__v4sf)(__m128)(A), (int)(imm), \
- (__mmask8)(U))
+ ((__mmask8)__builtin_ia32_fpclassss_mask((__v4sf)(__m128)(A), (int)(imm), \
+ (__mmask8)(U)))
#undef __DEFAULT_FN_ATTRS512
#undef __DEFAULT_FN_ATTRS
diff --git a/contrib/llvm-project/clang/lib/Headers/avx512erintrin.h b/contrib/llvm-project/clang/lib/Headers/avx512erintrin.h
index 857006169906..1c5a2d2d208f 100644
--- a/contrib/llvm-project/clang/lib/Headers/avx512erintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avx512erintrin.h
@@ -15,19 +15,19 @@
/* exp2a23 */
#define _mm512_exp2a23_round_pd(A, R) \
- (__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)-1, (int)(R))
+ ((__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)-1, (int)(R)))
#define _mm512_mask_exp2a23_round_pd(S, M, A, R) \
- (__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(S), (__mmask8)(M), \
- (int)(R))
+ ((__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(S), (__mmask8)(M), \
+ (int)(R)))
#define _mm512_maskz_exp2a23_round_pd(M, A, R) \
- (__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)(M), (int)(R))
+ ((__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(M), (int)(R)))
#define _mm512_exp2a23_pd(A) \
_mm512_exp2a23_round_pd((A), _MM_FROUND_CUR_DIRECTION)
@@ -39,19 +39,19 @@
_mm512_maskz_exp2a23_round_pd((M), (A), _MM_FROUND_CUR_DIRECTION)
#define _mm512_exp2a23_round_ps(A, R) \
- (__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)-1, (int)(R))
+ ((__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)-1, (int)(R)))
#define _mm512_mask_exp2a23_round_ps(S, M, A, R) \
- (__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(S), (__mmask16)(M), \
- (int)(R))
+ ((__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(S), (__mmask16)(M), \
+ (int)(R)))
#define _mm512_maskz_exp2a23_round_ps(M, A, R) \
- (__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(M), (int)(R))
+ ((__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(M), (int)(R)))
#define _mm512_exp2a23_ps(A) \
_mm512_exp2a23_round_ps((A), _MM_FROUND_CUR_DIRECTION)
@@ -64,19 +64,19 @@
/* rsqrt28 */
#define _mm512_rsqrt28_round_pd(A, R) \
- (__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)-1, (int)(R))
+ ((__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)-1, (int)(R)))
#define _mm512_mask_rsqrt28_round_pd(S, M, A, R) \
- (__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(S), (__mmask8)(M), \
- (int)(R))
+ ((__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(S), (__mmask8)(M), \
+ (int)(R)))
#define _mm512_maskz_rsqrt28_round_pd(M, A, R) \
- (__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)(M), (int)(R))
+ ((__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(M), (int)(R)))
#define _mm512_rsqrt28_pd(A) \
_mm512_rsqrt28_round_pd((A), _MM_FROUND_CUR_DIRECTION)
@@ -88,19 +88,19 @@
_mm512_maskz_rsqrt28_round_pd((M), (A), _MM_FROUND_CUR_DIRECTION)
#define _mm512_rsqrt28_round_ps(A, R) \
- (__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)-1, (int)(R))
+ ((__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)-1, (int)(R)))
#define _mm512_mask_rsqrt28_round_ps(S, M, A, R) \
- (__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(S), (__mmask16)(M), \
- (int)(R))
+ ((__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(S), (__mmask16)(M), \
+ (int)(R)))
#define _mm512_maskz_rsqrt28_round_ps(M, A, R) \
- (__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(M), (int)(R))
+ ((__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(M), (int)(R)))
#define _mm512_rsqrt28_ps(A) \
_mm512_rsqrt28_round_ps((A), _MM_FROUND_CUR_DIRECTION)
@@ -112,22 +112,22 @@
_mm512_maskz_rsqrt28_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION)
#define _mm_rsqrt28_round_ss(A, B, R) \
- (__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1, (int)(R))
+ ((__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1, (int)(R)))
#define _mm_mask_rsqrt28_round_ss(S, M, A, B, R) \
- (__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)(__m128)(S), \
- (__mmask8)(M), (int)(R))
+ ((__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(S), \
+ (__mmask8)(M), (int)(R)))
#define _mm_maskz_rsqrt28_round_ss(M, A, B, R) \
- (__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)(M), (int)(R))
+ ((__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(M), (int)(R)))
#define _mm_rsqrt28_ss(A, B) \
_mm_rsqrt28_round_ss((A), (B), _MM_FROUND_CUR_DIRECTION)
@@ -139,22 +139,22 @@
_mm_maskz_rsqrt28_round_ss((M), (A), (B), _MM_FROUND_CUR_DIRECTION)
#define _mm_rsqrt28_round_sd(A, B, R) \
- (__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)-1, (int)(R))
+ ((__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1, (int)(R)))
#define _mm_mask_rsqrt28_round_sd(S, M, A, B, R) \
- (__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)(__m128d)(S), \
- (__mmask8)(M), (int)(R))
+ ((__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(S), \
+ (__mmask8)(M), (int)(R)))
#define _mm_maskz_rsqrt28_round_sd(M, A, B, R) \
- (__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)(M), (int)(R))
+ ((__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(M), (int)(R)))
#define _mm_rsqrt28_sd(A, B) \
_mm_rsqrt28_round_sd((A), (B), _MM_FROUND_CUR_DIRECTION)
@@ -167,19 +167,19 @@
/* rcp28 */
#define _mm512_rcp28_round_pd(A, R) \
- (__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)-1, (int)(R))
+ ((__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)-1, (int)(R)))
#define _mm512_mask_rcp28_round_pd(S, M, A, R) \
- (__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(S), (__mmask8)(M), \
- (int)(R))
+ ((__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(S), (__mmask8)(M), \
+ (int)(R)))
#define _mm512_maskz_rcp28_round_pd(M, A, R) \
- (__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)(M), (int)(R))
+ ((__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(M), (int)(R)))
#define _mm512_rcp28_pd(A) \
_mm512_rcp28_round_pd((A), _MM_FROUND_CUR_DIRECTION)
@@ -191,19 +191,19 @@
_mm512_maskz_rcp28_round_pd((M), (A), _MM_FROUND_CUR_DIRECTION)
#define _mm512_rcp28_round_ps(A, R) \
- (__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)-1, (int)(R))
+ ((__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)-1, (int)(R)))
#define _mm512_mask_rcp28_round_ps(S, M, A, R) \
- (__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(S), (__mmask16)(M), \
- (int)(R))
+ ((__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(S), (__mmask16)(M), \
+ (int)(R)))
#define _mm512_maskz_rcp28_round_ps(M, A, R) \
- (__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(M), (int)(R))
+ ((__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(M), (int)(R)))
#define _mm512_rcp28_ps(A) \
_mm512_rcp28_round_ps((A), _MM_FROUND_CUR_DIRECTION)
@@ -215,22 +215,22 @@
_mm512_maskz_rcp28_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION)
#define _mm_rcp28_round_ss(A, B, R) \
- (__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1, (int)(R))
+ ((__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1, (int)(R)))
#define _mm_mask_rcp28_round_ss(S, M, A, B, R) \
- (__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)(__m128)(S), \
- (__mmask8)(M), (int)(R))
+ ((__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(S), \
+ (__mmask8)(M), (int)(R)))
#define _mm_maskz_rcp28_round_ss(M, A, B, R) \
- (__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)(M), (int)(R))
+ ((__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(M), (int)(R)))
#define _mm_rcp28_ss(A, B) \
_mm_rcp28_round_ss((A), (B), _MM_FROUND_CUR_DIRECTION)
@@ -242,22 +242,22 @@
_mm_maskz_rcp28_round_ss((M), (A), (B), _MM_FROUND_CUR_DIRECTION)
#define _mm_rcp28_round_sd(A, B, R) \
- (__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)-1, (int)(R))
+ ((__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1, (int)(R)))
#define _mm_mask_rcp28_round_sd(S, M, A, B, R) \
- (__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)(__m128d)(S), \
- (__mmask8)(M), (int)(R))
+ ((__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(S), \
+ (__mmask8)(M), (int)(R)))
#define _mm_maskz_rcp28_round_sd(M, A, B, R) \
- (__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)(M), (int)(R))
+ ((__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(M), (int)(R)))
#define _mm_rcp28_sd(A, B) \
_mm_rcp28_round_sd((A), (B), _MM_FROUND_CUR_DIRECTION)
diff --git a/contrib/llvm-project/clang/lib/Headers/avx512fintrin.h b/contrib/llvm-project/clang/lib/Headers/avx512fintrin.h
index 010bcadab019..4f172c74b31c 100644
--- a/contrib/llvm-project/clang/lib/Headers/avx512fintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avx512fintrin.h
@@ -26,6 +26,10 @@ typedef unsigned short __v32hu __attribute__((__vector_size__(64)));
typedef unsigned long long __v8du __attribute__((__vector_size__(64)));
typedef unsigned int __v16su __attribute__((__vector_size__(64)));
+/* We need an explicitly signed variant for char. Note that this shouldn't
+ * appear in the interface though. */
+typedef signed char __v64qs __attribute__((__vector_size__(64)));
+
typedef float __m512 __attribute__((__vector_size__(64), __aligned__(64)));
typedef double __m512d __attribute__((__vector_size__(64), __aligned__(64)));
typedef long long __m512i __attribute__((__vector_size__(64), __aligned__(64)));
@@ -163,9 +167,13 @@ typedef enum
} _MM_MANTISSA_SIGN_ENUM;
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS512 __attribute__((__always_inline__, __nodebug__, __target__("avx512f"), __min_vector_width__(512)))
-#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx512f"), __min_vector_width__(128)))
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512f")))
+#define __DEFAULT_FN_ATTRS512 __attribute__((__always_inline__, __nodebug__, __target__("avx512f,evex512"), __min_vector_width__(512)))
+#define __DEFAULT_FN_ATTRS128 \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512f,no-evex512"), __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512f,no-evex512")))
/* Create vectors with repeated elements */
@@ -252,8 +260,8 @@ _mm512_maskz_broadcastq_epi64 (__mmask8 __M, __m128i __A)
static __inline __m512 __DEFAULT_FN_ATTRS512
_mm512_setzero_ps(void)
{
- return __extension__ (__m512){ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
- 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 };
+ return __extension__ (__m512){ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f };
}
#define _mm512_setzero _mm512_setzero_ps
@@ -393,14 +401,15 @@ _mm512_broadcastsd_pd(__m128d __A)
static __inline __m512d __DEFAULT_FN_ATTRS512
_mm512_castpd256_pd512(__m256d __a)
{
- return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, -1, -1, -1, -1);
+ return __builtin_shufflevector(__a, __builtin_nondeterministic_value(__a), 0,
+ 1, 2, 3, 4, 5, 6, 7);
}
static __inline __m512 __DEFAULT_FN_ATTRS512
_mm512_castps256_ps512(__m256 __a)
{
- return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, 4, 5, 6, 7,
- -1, -1, -1, -1, -1, -1, -1, -1);
+ return __builtin_shufflevector(__a, __builtin_nondeterministic_value(__a), 0,
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
}
static __inline __m128d __DEFAULT_FN_ATTRS512
@@ -442,7 +451,10 @@ _mm512_castpd_si512 (__m512d __A)
static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_castpd128_pd512 (__m128d __A)
{
- return __builtin_shufflevector( __A, __A, 0, 1, -1, -1, -1, -1, -1, -1);
+ __m256d __B = __builtin_nondeterministic_value(__B);
+ return __builtin_shufflevector(
+ __builtin_shufflevector(__A, __builtin_nondeterministic_value(__A), 0, 1, 2, 3),
+ __B, 0, 1, 2, 3, 4, 5, 6, 7);
}
static __inline __m512d __DEFAULT_FN_ATTRS512
@@ -460,19 +472,25 @@ _mm512_castps_si512 (__m512 __A)
static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_castps128_ps512 (__m128 __A)
{
- return __builtin_shufflevector( __A, __A, 0, 1, 2, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1);
+ __m256 __B = __builtin_nondeterministic_value(__B);
+ return __builtin_shufflevector(
+ __builtin_shufflevector(__A, __builtin_nondeterministic_value(__A), 0, 1, 2, 3, 4, 5, 6, 7),
+ __B, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_castsi128_si512 (__m128i __A)
{
- return __builtin_shufflevector( __A, __A, 0, 1, -1, -1, -1, -1, -1, -1);
+ __m256i __B = __builtin_nondeterministic_value(__B);
+ return __builtin_shufflevector(
+ __builtin_shufflevector(__A, __builtin_nondeterministic_value(__A), 0, 1, 2, 3),
+ __B, 0, 1, 2, 3, 4, 5, 6, 7);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_castsi256_si512 (__m256i __A)
{
- return __builtin_shufflevector( __A, __A, 0, 1, 2, 3, -1, -1, -1, -1);
+ return __builtin_shufflevector( __A, __builtin_nondeterministic_value(__A), 0, 1, 2, 3, 4, 5, 6, 7);
}
static __inline __m512 __DEFAULT_FN_ATTRS512
@@ -937,18 +955,18 @@ _mm512_maskz_sub_epi32(__mmask16 __U, __m512i __A, __m512i __B)
}
#define _mm512_max_round_pd(A, B, R) \
- (__m512d)__builtin_ia32_maxpd512((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), (int)(R))
+ ((__m512d)__builtin_ia32_maxpd512((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), (int)(R)))
#define _mm512_mask_max_round_pd(W, U, A, B, R) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
(__v8df)_mm512_max_round_pd((A), (B), (R)), \
- (__v8df)(W))
+ (__v8df)(W)))
#define _mm512_maskz_max_round_pd(U, A, B, R) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
(__v8df)_mm512_max_round_pd((A), (B), (R)), \
- (__v8df)_mm512_setzero_pd())
+ (__v8df)_mm512_setzero_pd()))
static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_max_pd(__m512d __A, __m512d __B)
@@ -974,18 +992,18 @@ _mm512_maskz_max_pd (__mmask8 __U, __m512d __A, __m512d __B)
}
#define _mm512_max_round_ps(A, B, R) \
- (__m512)__builtin_ia32_maxps512((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), (int)(R))
+ ((__m512)__builtin_ia32_maxps512((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), (int)(R)))
#define _mm512_mask_max_round_ps(W, U, A, B, R) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
(__v16sf)_mm512_max_round_ps((A), (B), (R)), \
- (__v16sf)(W))
+ (__v16sf)(W)))
#define _mm512_maskz_max_round_ps(U, A, B, R) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
(__v16sf)_mm512_max_round_ps((A), (B), (R)), \
- (__v16sf)_mm512_setzero_ps())
+ (__v16sf)_mm512_setzero_ps()))
static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_max_ps(__m512 __A, __m512 __B)
@@ -1029,22 +1047,22 @@ _mm_maskz_max_ss(__mmask8 __U,__m128 __A, __m128 __B) {
}
#define _mm_max_round_ss(A, B, R) \
- (__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1, (int)(R))
+ ((__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1, (int)(R)))
#define _mm_mask_max_round_ss(W, U, A, B, R) \
- (__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)(__m128)(W), (__mmask8)(U), \
- (int)(R))
+ ((__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(W), (__mmask8)(U), \
+ (int)(R)))
#define _mm_maskz_max_round_ss(U, A, B, R) \
- (__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U), (int)(R))
+ ((__m128)__builtin_ia32_maxss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_max_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
@@ -1065,28 +1083,28 @@ _mm_maskz_max_sd(__mmask8 __U,__m128d __A, __m128d __B) {
}
#define _mm_max_round_sd(A, B, R) \
- (__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)-1, (int)(R))
+ ((__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1, (int)(R)))
#define _mm_mask_max_round_sd(W, U, A, B, R) \
- (__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)(__m128d)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U), (int)(R)))
#define _mm_maskz_max_round_sd(U, A, B, R) \
- (__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)(U), (int)(R))
+ ((__m128d)__builtin_ia32_maxsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U), (int)(R)))
static __inline __m512i
__DEFAULT_FN_ATTRS512
_mm512_max_epi32(__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_pmaxsd512((__v16si)__A, (__v16si)__B);
+ return (__m512i)__builtin_elementwise_max((__v16si)__A, (__v16si)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1108,7 +1126,7 @@ _mm512_maskz_max_epi32 (__mmask16 __M, __m512i __A, __m512i __B)
static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_max_epu32(__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_pmaxud512((__v16si)__A, (__v16si)__B);
+ return (__m512i)__builtin_elementwise_max((__v16su)__A, (__v16su)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1130,7 +1148,7 @@ _mm512_maskz_max_epu32 (__mmask16 __M, __m512i __A, __m512i __B)
static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_max_epi64(__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_pmaxsq512((__v8di)__A, (__v8di)__B);
+ return (__m512i)__builtin_elementwise_max((__v8di)__A, (__v8di)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1152,7 +1170,7 @@ _mm512_maskz_max_epi64 (__mmask8 __M, __m512i __A, __m512i __B)
static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_max_epu64(__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_pmaxuq512((__v8di)__A, (__v8di)__B);
+ return (__m512i)__builtin_elementwise_max((__v8du)__A, (__v8du)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1172,18 +1190,18 @@ _mm512_maskz_max_epu64 (__mmask8 __M, __m512i __A, __m512i __B)
}
#define _mm512_min_round_pd(A, B, R) \
- (__m512d)__builtin_ia32_minpd512((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), (int)(R))
+ ((__m512d)__builtin_ia32_minpd512((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), (int)(R)))
#define _mm512_mask_min_round_pd(W, U, A, B, R) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
(__v8df)_mm512_min_round_pd((A), (B), (R)), \
- (__v8df)(W))
+ (__v8df)(W)))
#define _mm512_maskz_min_round_pd(U, A, B, R) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
(__v8df)_mm512_min_round_pd((A), (B), (R)), \
- (__v8df)_mm512_setzero_pd())
+ (__v8df)_mm512_setzero_pd()))
static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_min_pd(__m512d __A, __m512d __B)
@@ -1209,18 +1227,18 @@ _mm512_maskz_min_pd (__mmask8 __U, __m512d __A, __m512d __B)
}
#define _mm512_min_round_ps(A, B, R) \
- (__m512)__builtin_ia32_minps512((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), (int)(R))
+ ((__m512)__builtin_ia32_minps512((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), (int)(R)))
#define _mm512_mask_min_round_ps(W, U, A, B, R) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
(__v16sf)_mm512_min_round_ps((A), (B), (R)), \
- (__v16sf)(W))
+ (__v16sf)(W)))
#define _mm512_maskz_min_round_ps(U, A, B, R) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
(__v16sf)_mm512_min_round_ps((A), (B), (R)), \
- (__v16sf)_mm512_setzero_ps())
+ (__v16sf)_mm512_setzero_ps()))
static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_min_ps(__m512 __A, __m512 __B)
@@ -1264,22 +1282,22 @@ _mm_maskz_min_ss(__mmask8 __U,__m128 __A, __m128 __B) {
}
#define _mm_min_round_ss(A, B, R) \
- (__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1, (int)(R))
+ ((__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1, (int)(R)))
#define _mm_mask_min_round_ss(W, U, A, B, R) \
- (__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)(__m128)(W), (__mmask8)(U), \
- (int)(R))
+ ((__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(W), (__mmask8)(U), \
+ (int)(R)))
#define _mm_maskz_min_round_ss(U, A, B, R) \
- (__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U), (int)(R))
+ ((__m128)__builtin_ia32_minss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_min_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
@@ -1300,28 +1318,28 @@ _mm_maskz_min_sd(__mmask8 __U,__m128d __A, __m128d __B) {
}
#define _mm_min_round_sd(A, B, R) \
- (__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)-1, (int)(R))
+ ((__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1, (int)(R)))
#define _mm_mask_min_round_sd(W, U, A, B, R) \
- (__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)(__m128d)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U), (int)(R)))
#define _mm_maskz_min_round_sd(U, A, B, R) \
- (__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)(U), (int)(R))
+ ((__m128d)__builtin_ia32_minsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U), (int)(R)))
static __inline __m512i
__DEFAULT_FN_ATTRS512
_mm512_min_epi32(__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_pminsd512((__v16si)__A, (__v16si)__B);
+ return (__m512i)__builtin_elementwise_min((__v16si)__A, (__v16si)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1343,7 +1361,7 @@ _mm512_maskz_min_epi32 (__mmask16 __M, __m512i __A, __m512i __B)
static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_min_epu32(__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_pminud512((__v16si)__A, (__v16si)__B);
+ return (__m512i)__builtin_elementwise_min((__v16su)__A, (__v16su)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1365,7 +1383,7 @@ _mm512_maskz_min_epu32 (__mmask16 __M, __m512i __A, __m512i __B)
static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_min_epi64(__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_pminsq512((__v8di)__A, (__v8di)__B);
+ return (__m512i)__builtin_elementwise_min((__v8di)__A, (__v8di)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1387,7 +1405,7 @@ _mm512_maskz_min_epi64 (__mmask8 __M, __m512i __A, __m512i __B)
static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_min_epu64(__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_pminuq512((__v8di)__A, (__v8di)__B);
+ return (__m512i)__builtin_elementwise_min((__v8du)__A, (__v8du)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1485,17 +1503,17 @@ _mm512_mask_mullox_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) {
}
#define _mm512_sqrt_round_pd(A, R) \
- (__m512d)__builtin_ia32_sqrtpd512((__v8df)(__m512d)(A), (int)(R))
+ ((__m512d)__builtin_ia32_sqrtpd512((__v8df)(__m512d)(A), (int)(R)))
#define _mm512_mask_sqrt_round_pd(W, U, A, R) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
(__v8df)_mm512_sqrt_round_pd((A), (R)), \
- (__v8df)(__m512d)(W))
+ (__v8df)(__m512d)(W)))
#define _mm512_maskz_sqrt_round_pd(U, A, R) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
(__v8df)_mm512_sqrt_round_pd((A), (R)), \
- (__v8df)_mm512_setzero_pd())
+ (__v8df)_mm512_setzero_pd()))
static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_sqrt_pd(__m512d __A)
@@ -1521,17 +1539,17 @@ _mm512_maskz_sqrt_pd (__mmask8 __U, __m512d __A)
}
#define _mm512_sqrt_round_ps(A, R) \
- (__m512)__builtin_ia32_sqrtps512((__v16sf)(__m512)(A), (int)(R))
+ ((__m512)__builtin_ia32_sqrtps512((__v16sf)(__m512)(A), (int)(R)))
#define _mm512_mask_sqrt_round_ps(W, U, A, R) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
(__v16sf)_mm512_sqrt_round_ps((A), (R)), \
- (__v16sf)(__m512)(W))
+ (__v16sf)(__m512)(W)))
#define _mm512_maskz_sqrt_round_ps(U, A, R) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
(__v16sf)_mm512_sqrt_round_ps((A), (R)), \
- (__v16sf)_mm512_setzero_ps())
+ (__v16sf)_mm512_setzero_ps()))
static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_sqrt_ps(__m512 __A)
@@ -1776,7 +1794,7 @@ _mm512_floor_ps(__m512 __A)
{
return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
_MM_FROUND_FLOOR,
- (__v16sf) __A, -1,
+ (__v16sf) __A, (unsigned short)-1,
_MM_FROUND_CUR_DIRECTION);
}
@@ -1794,7 +1812,7 @@ _mm512_floor_pd(__m512d __A)
{
return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
_MM_FROUND_FLOOR,
- (__v8df) __A, -1,
+ (__v8df) __A, (unsigned char)-1,
_MM_FROUND_CUR_DIRECTION);
}
@@ -1821,7 +1839,7 @@ _mm512_ceil_ps(__m512 __A)
{
return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
_MM_FROUND_CEIL,
- (__v16sf) __A, -1,
+ (__v16sf) __A, (unsigned short)-1,
_MM_FROUND_CUR_DIRECTION);
}
@@ -1830,7 +1848,7 @@ _mm512_ceil_pd(__m512d __A)
{
return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
_MM_FROUND_CEIL,
- (__v8df) __A, -1,
+ (__v8df) __A, (unsigned char)-1,
_MM_FROUND_CUR_DIRECTION);
}
@@ -1846,7 +1864,7 @@ _mm512_mask_ceil_pd (__m512d __W, __mmask8 __U, __m512d __A)
static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_abs_epi64(__m512i __A)
{
- return (__m512i)__builtin_ia32_pabsq512((__v8di)__A);
+ return (__m512i)__builtin_elementwise_abs((__v8di)__A);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1868,7 +1886,7 @@ _mm512_maskz_abs_epi64 (__mmask8 __U, __m512i __A)
static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_abs_epi32(__m512i __A)
{
- return (__m512i)__builtin_ia32_pabsd512((__v16si) __A);
+ return (__m512i)__builtin_elementwise_abs((__v16si) __A);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1900,22 +1918,22 @@ _mm_maskz_add_ss(__mmask8 __U,__m128 __A, __m128 __B) {
}
#define _mm_add_round_ss(A, B, R) \
- (__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1, (int)(R))
+ ((__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1, (int)(R)))
#define _mm_mask_add_round_ss(W, U, A, B, R) \
- (__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)(__m128)(W), (__mmask8)(U), \
- (int)(R))
+ ((__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(W), (__mmask8)(U), \
+ (int)(R)))
#define _mm_maskz_add_round_ss(U, A, B, R) \
- (__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U), (int)(R))
+ ((__m128)__builtin_ia32_addss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_add_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
@@ -1929,22 +1947,22 @@ _mm_maskz_add_sd(__mmask8 __U,__m128d __A, __m128d __B) {
return __builtin_ia32_selectsd_128(__U, __A, _mm_setzero_pd());
}
#define _mm_add_round_sd(A, B, R) \
- (__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)-1, (int)(R))
+ ((__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1, (int)(R)))
#define _mm_mask_add_round_sd(W, U, A, B, R) \
- (__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)(__m128d)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U), (int)(R)))
#define _mm_maskz_add_round_sd(U, A, B, R) \
- (__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)(U), (int)(R))
+ ((__m128d)__builtin_ia32_addsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_add_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
@@ -1975,32 +1993,32 @@ _mm512_maskz_add_ps(__mmask16 __U, __m512 __A, __m512 __B) {
}
#define _mm512_add_round_pd(A, B, R) \
- (__m512d)__builtin_ia32_addpd512((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), (int)(R))
+ ((__m512d)__builtin_ia32_addpd512((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), (int)(R)))
#define _mm512_mask_add_round_pd(W, U, A, B, R) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
(__v8df)_mm512_add_round_pd((A), (B), (R)), \
- (__v8df)(__m512d)(W))
+ (__v8df)(__m512d)(W)))
#define _mm512_maskz_add_round_pd(U, A, B, R) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
(__v8df)_mm512_add_round_pd((A), (B), (R)), \
- (__v8df)_mm512_setzero_pd())
+ (__v8df)_mm512_setzero_pd()))
#define _mm512_add_round_ps(A, B, R) \
- (__m512)__builtin_ia32_addps512((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), (int)(R))
+ ((__m512)__builtin_ia32_addps512((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), (int)(R)))
#define _mm512_mask_add_round_ps(W, U, A, B, R) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
(__v16sf)_mm512_add_round_ps((A), (B), (R)), \
- (__v16sf)(__m512)(W))
+ (__v16sf)(__m512)(W)))
#define _mm512_maskz_add_round_ps(U, A, B, R) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
(__v16sf)_mm512_add_round_ps((A), (B), (R)), \
- (__v16sf)_mm512_setzero_ps())
+ (__v16sf)_mm512_setzero_ps()))
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_sub_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
@@ -2014,22 +2032,22 @@ _mm_maskz_sub_ss(__mmask8 __U,__m128 __A, __m128 __B) {
return __builtin_ia32_selectss_128(__U, __A, _mm_setzero_ps());
}
#define _mm_sub_round_ss(A, B, R) \
- (__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1, (int)(R))
+ ((__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1, (int)(R)))
#define _mm_mask_sub_round_ss(W, U, A, B, R) \
- (__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)(__m128)(W), (__mmask8)(U), \
- (int)(R))
+ ((__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(W), (__mmask8)(U), \
+ (int)(R)))
#define _mm_maskz_sub_round_ss(U, A, B, R) \
- (__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U), (int)(R))
+ ((__m128)__builtin_ia32_subss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_sub_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
@@ -2044,22 +2062,22 @@ _mm_maskz_sub_sd(__mmask8 __U,__m128d __A, __m128d __B) {
}
#define _mm_sub_round_sd(A, B, R) \
- (__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)-1, (int)(R))
+ ((__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1, (int)(R)))
#define _mm_mask_sub_round_sd(W, U, A, B, R) \
- (__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)(__m128d)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U), (int)(R)))
#define _mm_maskz_sub_round_sd(U, A, B, R) \
- (__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)(U), (int)(R))
+ ((__m128d)__builtin_ia32_subsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_sub_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
@@ -2090,32 +2108,32 @@ _mm512_maskz_sub_ps(__mmask16 __U, __m512 __A, __m512 __B) {
}
#define _mm512_sub_round_pd(A, B, R) \
- (__m512d)__builtin_ia32_subpd512((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), (int)(R))
+ ((__m512d)__builtin_ia32_subpd512((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), (int)(R)))
#define _mm512_mask_sub_round_pd(W, U, A, B, R) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
(__v8df)_mm512_sub_round_pd((A), (B), (R)), \
- (__v8df)(__m512d)(W))
+ (__v8df)(__m512d)(W)))
#define _mm512_maskz_sub_round_pd(U, A, B, R) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
(__v8df)_mm512_sub_round_pd((A), (B), (R)), \
- (__v8df)_mm512_setzero_pd())
+ (__v8df)_mm512_setzero_pd()))
#define _mm512_sub_round_ps(A, B, R) \
- (__m512)__builtin_ia32_subps512((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), (int)(R))
+ ((__m512)__builtin_ia32_subps512((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), (int)(R)))
#define _mm512_mask_sub_round_ps(W, U, A, B, R) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
(__v16sf)_mm512_sub_round_ps((A), (B), (R)), \
- (__v16sf)(__m512)(W))
+ (__v16sf)(__m512)(W)))
#define _mm512_maskz_sub_round_ps(U, A, B, R) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
(__v16sf)_mm512_sub_round_ps((A), (B), (R)), \
- (__v16sf)_mm512_setzero_ps())
+ (__v16sf)_mm512_setzero_ps()))
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_mul_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
@@ -2129,22 +2147,22 @@ _mm_maskz_mul_ss(__mmask8 __U,__m128 __A, __m128 __B) {
return __builtin_ia32_selectss_128(__U, __A, _mm_setzero_ps());
}
#define _mm_mul_round_ss(A, B, R) \
- (__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1, (int)(R))
+ ((__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1, (int)(R)))
#define _mm_mask_mul_round_ss(W, U, A, B, R) \
- (__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)(__m128)(W), (__mmask8)(U), \
- (int)(R))
+ ((__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(W), (__mmask8)(U), \
+ (int)(R)))
#define _mm_maskz_mul_round_ss(U, A, B, R) \
- (__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U), (int)(R))
+ ((__m128)__builtin_ia32_mulss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_mul_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
@@ -2159,22 +2177,22 @@ _mm_maskz_mul_sd(__mmask8 __U,__m128d __A, __m128d __B) {
}
#define _mm_mul_round_sd(A, B, R) \
- (__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)-1, (int)(R))
+ ((__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1, (int)(R)))
#define _mm_mask_mul_round_sd(W, U, A, B, R) \
- (__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)(__m128d)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U), (int)(R)))
#define _mm_maskz_mul_round_sd(U, A, B, R) \
- (__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)(U), (int)(R))
+ ((__m128d)__builtin_ia32_mulsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_mul_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
@@ -2205,32 +2223,32 @@ _mm512_maskz_mul_ps(__mmask16 __U, __m512 __A, __m512 __B) {
}
#define _mm512_mul_round_pd(A, B, R) \
- (__m512d)__builtin_ia32_mulpd512((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), (int)(R))
+ ((__m512d)__builtin_ia32_mulpd512((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), (int)(R)))
#define _mm512_mask_mul_round_pd(W, U, A, B, R) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
(__v8df)_mm512_mul_round_pd((A), (B), (R)), \
- (__v8df)(__m512d)(W))
+ (__v8df)(__m512d)(W)))
#define _mm512_maskz_mul_round_pd(U, A, B, R) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
(__v8df)_mm512_mul_round_pd((A), (B), (R)), \
- (__v8df)_mm512_setzero_pd())
+ (__v8df)_mm512_setzero_pd()))
#define _mm512_mul_round_ps(A, B, R) \
- (__m512)__builtin_ia32_mulps512((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), (int)(R))
+ ((__m512)__builtin_ia32_mulps512((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), (int)(R)))
#define _mm512_mask_mul_round_ps(W, U, A, B, R) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
(__v16sf)_mm512_mul_round_ps((A), (B), (R)), \
- (__v16sf)(__m512)(W))
+ (__v16sf)(__m512)(W)))
#define _mm512_maskz_mul_round_ps(U, A, B, R) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
(__v16sf)_mm512_mul_round_ps((A), (B), (R)), \
- (__v16sf)_mm512_setzero_ps())
+ (__v16sf)_mm512_setzero_ps()))
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_div_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
@@ -2245,22 +2263,22 @@ _mm_maskz_div_ss(__mmask8 __U,__m128 __A, __m128 __B) {
}
#define _mm_div_round_ss(A, B, R) \
- (__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1, (int)(R))
+ ((__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1, (int)(R)))
#define _mm_mask_div_round_ss(W, U, A, B, R) \
- (__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)(__m128)(W), (__mmask8)(U), \
- (int)(R))
+ ((__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(W), (__mmask8)(U), \
+ (int)(R)))
#define _mm_maskz_div_round_ss(U, A, B, R) \
- (__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U), (int)(R))
+ ((__m128)__builtin_ia32_divss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_div_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
@@ -2275,22 +2293,22 @@ _mm_maskz_div_sd(__mmask8 __U,__m128d __A, __m128d __B) {
}
#define _mm_div_round_sd(A, B, R) \
- (__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)-1, (int)(R))
+ ((__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1, (int)(R)))
#define _mm_mask_div_round_sd(W, U, A, B, R) \
- (__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)(__m128d)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U), (int)(R)))
#define _mm_maskz_div_round_sd(U, A, B, R) \
- (__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)(U), (int)(R))
+ ((__m128d)__builtin_ia32_divsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U), (int)(R)))
static __inline __m512d __DEFAULT_FN_ATTRS512
_mm512_div_pd(__m512d __a, __m512d __b)
@@ -2333,179 +2351,179 @@ _mm512_maskz_div_ps(__mmask16 __U, __m512 __A, __m512 __B) {
}
#define _mm512_div_round_pd(A, B, R) \
- (__m512d)__builtin_ia32_divpd512((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), (int)(R))
+ ((__m512d)__builtin_ia32_divpd512((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), (int)(R)))
#define _mm512_mask_div_round_pd(W, U, A, B, R) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
(__v8df)_mm512_div_round_pd((A), (B), (R)), \
- (__v8df)(__m512d)(W))
+ (__v8df)(__m512d)(W)))
#define _mm512_maskz_div_round_pd(U, A, B, R) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
(__v8df)_mm512_div_round_pd((A), (B), (R)), \
- (__v8df)_mm512_setzero_pd())
+ (__v8df)_mm512_setzero_pd()))
#define _mm512_div_round_ps(A, B, R) \
- (__m512)__builtin_ia32_divps512((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), (int)(R))
+ ((__m512)__builtin_ia32_divps512((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), (int)(R)))
#define _mm512_mask_div_round_ps(W, U, A, B, R) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
(__v16sf)_mm512_div_round_ps((A), (B), (R)), \
- (__v16sf)(__m512)(W))
+ (__v16sf)(__m512)(W)))
#define _mm512_maskz_div_round_ps(U, A, B, R) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
(__v16sf)_mm512_div_round_ps((A), (B), (R)), \
- (__v16sf)_mm512_setzero_ps())
+ (__v16sf)_mm512_setzero_ps()))
#define _mm512_roundscale_ps(A, B) \
- (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(A), (int)(B), \
- (__v16sf)_mm512_undefined_ps(), \
- (__mmask16)-1, \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(A), (int)(B), \
+ (__v16sf)_mm512_undefined_ps(), \
+ (__mmask16)-1, \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm512_mask_roundscale_ps(A, B, C, imm) \
- (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(C), (int)(imm), \
+ ((__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(C), (int)(imm), \
(__v16sf)(__m512)(A), (__mmask16)(B), \
- _MM_FROUND_CUR_DIRECTION)
+ _MM_FROUND_CUR_DIRECTION))
#define _mm512_maskz_roundscale_ps(A, B, imm) \
- (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(B), (int)(imm), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(A), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(B), (int)(imm), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(A), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm512_mask_roundscale_round_ps(A, B, C, imm, R) \
- (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(C), (int)(imm), \
+ ((__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(C), (int)(imm), \
(__v16sf)(__m512)(A), (__mmask16)(B), \
- (int)(R))
+ (int)(R)))
#define _mm512_maskz_roundscale_round_ps(A, B, imm, R) \
- (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(B), (int)(imm), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(A), (int)(R))
+ ((__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(B), (int)(imm), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(A), (int)(R)))
#define _mm512_roundscale_round_ps(A, imm, R) \
- (__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(A), (int)(imm), \
- (__v16sf)_mm512_undefined_ps(), \
- (__mmask16)-1, (int)(R))
+ ((__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(A), (int)(imm), \
+ (__v16sf)_mm512_undefined_ps(), \
+ (__mmask16)-1, (int)(R)))
#define _mm512_roundscale_pd(A, B) \
- (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(A), (int)(B), \
- (__v8df)_mm512_undefined_pd(), \
- (__mmask8)-1, \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(A), (int)(B), \
+ (__v8df)_mm512_undefined_pd(), \
+ (__mmask8)-1, \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm512_mask_roundscale_pd(A, B, C, imm) \
- (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(C), (int)(imm), \
+ ((__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(C), (int)(imm), \
(__v8df)(__m512d)(A), (__mmask8)(B), \
- _MM_FROUND_CUR_DIRECTION)
+ _MM_FROUND_CUR_DIRECTION))
#define _mm512_maskz_roundscale_pd(A, B, imm) \
- (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(B), (int)(imm), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)(A), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(B), (int)(imm), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(A), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm512_mask_roundscale_round_pd(A, B, C, imm, R) \
- (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(C), (int)(imm), \
+ ((__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(C), (int)(imm), \
(__v8df)(__m512d)(A), (__mmask8)(B), \
- (int)(R))
+ (int)(R)))
#define _mm512_maskz_roundscale_round_pd(A, B, imm, R) \
- (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(B), (int)(imm), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)(A), (int)(R))
+ ((__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(B), (int)(imm), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(A), (int)(R)))
#define _mm512_roundscale_round_pd(A, imm, R) \
- (__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(A), (int)(imm), \
- (__v8df)_mm512_undefined_pd(), \
- (__mmask8)-1, (int)(R))
+ ((__m512d)__builtin_ia32_rndscalepd_mask((__v8df)(__m512d)(A), (int)(imm), \
+ (__v8df)_mm512_undefined_pd(), \
+ (__mmask8)-1, (int)(R)))
#define _mm512_fmadd_round_pd(A, B, C, R) \
- (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(C), \
- (__mmask8)-1, (int)(R))
+ ((__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)-1, (int)(R)))
#define _mm512_mask_fmadd_round_pd(A, U, B, C, R) \
- (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_mask3_fmadd_round_pd(A, B, C, U, R) \
- (__m512d)__builtin_ia32_vfmaddpd512_mask3((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_vfmaddpd512_mask3((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_maskz_fmadd_round_pd(U, A, B, C, R) \
- (__m512d)__builtin_ia32_vfmaddpd512_maskz((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_vfmaddpd512_maskz((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_fmsub_round_pd(A, B, C, R) \
- (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- -(__v8df)(__m512d)(C), \
- (__mmask8)-1, (int)(R))
+ ((__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ -(__v8df)(__m512d)(C), \
+ (__mmask8)-1, (int)(R)))
#define _mm512_mask_fmsub_round_pd(A, U, B, C, R) \
- (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- -(__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ -(__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_maskz_fmsub_round_pd(U, A, B, C, R) \
- (__m512d)__builtin_ia32_vfmaddpd512_maskz((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- -(__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_vfmaddpd512_maskz((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ -(__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_fnmadd_round_pd(A, B, C, R) \
- (__m512d)__builtin_ia32_vfmaddpd512_mask(-(__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(C), \
- (__mmask8)-1, (int)(R))
+ ((__m512d)__builtin_ia32_vfmaddpd512_mask(-(__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)-1, (int)(R)))
#define _mm512_mask3_fnmadd_round_pd(A, B, C, U, R) \
- (__m512d)__builtin_ia32_vfmaddpd512_mask3(-(__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_vfmaddpd512_mask3(-(__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_maskz_fnmadd_round_pd(U, A, B, C, R) \
- (__m512d)__builtin_ia32_vfmaddpd512_maskz(-(__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_vfmaddpd512_maskz(-(__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_fnmsub_round_pd(A, B, C, R) \
- (__m512d)__builtin_ia32_vfmaddpd512_mask(-(__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- -(__v8df)(__m512d)(C), \
- (__mmask8)-1, (int)(R))
+ ((__m512d)__builtin_ia32_vfmaddpd512_mask(-(__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ -(__v8df)(__m512d)(C), \
+ (__mmask8)-1, (int)(R)))
#define _mm512_maskz_fnmsub_round_pd(U, A, B, C, R) \
- (__m512d)__builtin_ia32_vfmaddpd512_maskz(-(__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- -(__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_vfmaddpd512_maskz(-(__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ -(__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m512d __DEFAULT_FN_ATTRS512
@@ -2629,87 +2647,87 @@ _mm512_maskz_fnmsub_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
}
#define _mm512_fmadd_round_ps(A, B, C, R) \
- (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(C), \
- (__mmask16)-1, (int)(R))
+ ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)-1, (int)(R)))
#define _mm512_mask_fmadd_round_ps(A, U, B, C, R) \
- (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)))
#define _mm512_mask3_fmadd_round_ps(A, B, C, U, R) \
- (__m512)__builtin_ia32_vfmaddps512_mask3((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_vfmaddps512_mask3((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)))
#define _mm512_maskz_fmadd_round_ps(U, A, B, C, R) \
- (__m512)__builtin_ia32_vfmaddps512_maskz((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_vfmaddps512_maskz((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)))
#define _mm512_fmsub_round_ps(A, B, C, R) \
- (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- -(__v16sf)(__m512)(C), \
- (__mmask16)-1, (int)(R))
+ ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ -(__v16sf)(__m512)(C), \
+ (__mmask16)-1, (int)(R)))
#define _mm512_mask_fmsub_round_ps(A, U, B, C, R) \
- (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- -(__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ -(__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)))
#define _mm512_maskz_fmsub_round_ps(U, A, B, C, R) \
- (__m512)__builtin_ia32_vfmaddps512_maskz((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- -(__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_vfmaddps512_maskz((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ -(__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)))
#define _mm512_fnmadd_round_ps(A, B, C, R) \
- (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
- -(__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(C), \
- (__mmask16)-1, (int)(R))
+ ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
+ -(__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)-1, (int)(R)))
#define _mm512_mask3_fnmadd_round_ps(A, B, C, U, R) \
- (__m512)__builtin_ia32_vfmaddps512_mask3(-(__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_vfmaddps512_mask3(-(__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)))
#define _mm512_maskz_fnmadd_round_ps(U, A, B, C, R) \
- (__m512)__builtin_ia32_vfmaddps512_maskz(-(__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_vfmaddps512_maskz(-(__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)))
#define _mm512_fnmsub_round_ps(A, B, C, R) \
- (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
- -(__v16sf)(__m512)(B), \
- -(__v16sf)(__m512)(C), \
- (__mmask16)-1, (int)(R))
+ ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
+ -(__v16sf)(__m512)(B), \
+ -(__v16sf)(__m512)(C), \
+ (__mmask16)-1, (int)(R)))
#define _mm512_maskz_fnmsub_round_ps(U, A, B, C, R) \
- (__m512)__builtin_ia32_vfmaddps512_maskz(-(__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- -(__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_vfmaddps512_maskz(-(__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ -(__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)))
static __inline__ __m512 __DEFAULT_FN_ATTRS512
@@ -2833,52 +2851,52 @@ _mm512_maskz_fnmsub_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
}
#define _mm512_fmaddsub_round_pd(A, B, C, R) \
- (__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(C), \
- (__mmask8)-1, (int)(R))
+ ((__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)-1, (int)(R)))
#define _mm512_mask_fmaddsub_round_pd(A, U, B, C, R) \
- (__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_mask3_fmaddsub_round_pd(A, B, C, U, R) \
- (__m512d)__builtin_ia32_vfmaddsubpd512_mask3((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_vfmaddsubpd512_mask3((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_maskz_fmaddsub_round_pd(U, A, B, C, R) \
- (__m512d)__builtin_ia32_vfmaddsubpd512_maskz((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_vfmaddsubpd512_maskz((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_fmsubadd_round_pd(A, B, C, R) \
- (__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- -(__v8df)(__m512d)(C), \
- (__mmask8)-1, (int)(R))
+ ((__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ -(__v8df)(__m512d)(C), \
+ (__mmask8)-1, (int)(R)))
#define _mm512_mask_fmsubadd_round_pd(A, U, B, C, R) \
- (__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- -(__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ -(__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_maskz_fmsubadd_round_pd(U, A, B, C, R) \
- (__m512d)__builtin_ia32_vfmaddsubpd512_maskz((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- -(__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_vfmaddsubpd512_maskz((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ -(__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m512d __DEFAULT_FN_ATTRS512
@@ -2952,52 +2970,52 @@ _mm512_maskz_fmsubadd_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
}
#define _mm512_fmaddsub_round_ps(A, B, C, R) \
- (__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(C), \
- (__mmask16)-1, (int)(R))
+ ((__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)-1, (int)(R)))
#define _mm512_mask_fmaddsub_round_ps(A, U, B, C, R) \
- (__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)))
#define _mm512_mask3_fmaddsub_round_ps(A, B, C, U, R) \
- (__m512)__builtin_ia32_vfmaddsubps512_mask3((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_vfmaddsubps512_mask3((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)))
#define _mm512_maskz_fmaddsub_round_ps(U, A, B, C, R) \
- (__m512)__builtin_ia32_vfmaddsubps512_maskz((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_vfmaddsubps512_maskz((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)))
#define _mm512_fmsubadd_round_ps(A, B, C, R) \
- (__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- -(__v16sf)(__m512)(C), \
- (__mmask16)-1, (int)(R))
+ ((__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ -(__v16sf)(__m512)(C), \
+ (__mmask16)-1, (int)(R)))
#define _mm512_mask_fmsubadd_round_ps(A, U, B, C, R) \
- (__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- -(__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ -(__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)))
#define _mm512_maskz_fmsubadd_round_ps(U, A, B, C, R) \
- (__m512)__builtin_ia32_vfmaddsubps512_maskz((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- -(__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_vfmaddsubps512_maskz((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ -(__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)))
static __inline__ __m512 __DEFAULT_FN_ATTRS512
@@ -3071,10 +3089,10 @@ _mm512_maskz_fmsubadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
}
#define _mm512_mask3_fmsub_round_pd(A, B, C, U, R) \
- (__m512d)__builtin_ia32_vfmsubpd512_mask3((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_vfmsubpd512_mask3((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m512d __DEFAULT_FN_ATTRS512
@@ -3088,10 +3106,10 @@ _mm512_mask3_fmsub_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
}
#define _mm512_mask3_fmsub_round_ps(A, B, C, U, R) \
- (__m512)__builtin_ia32_vfmsubps512_mask3((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_vfmsubps512_mask3((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)))
static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_mask3_fmsub_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
@@ -3104,10 +3122,10 @@ _mm512_mask3_fmsub_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
}
#define _mm512_mask3_fmsubadd_round_pd(A, B, C, U, R) \
- (__m512d)__builtin_ia32_vfmsubaddpd512_mask3((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_vfmsubaddpd512_mask3((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m512d __DEFAULT_FN_ATTRS512
@@ -3121,10 +3139,10 @@ _mm512_mask3_fmsubadd_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
}
#define _mm512_mask3_fmsubadd_round_ps(A, B, C, U, R) \
- (__m512)__builtin_ia32_vfmsubaddps512_mask3((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_vfmsubaddps512_mask3((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)))
static __inline__ __m512 __DEFAULT_FN_ATTRS512
@@ -3138,10 +3156,10 @@ _mm512_mask3_fmsubadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
}
#define _mm512_mask_fnmadd_round_pd(A, U, B, C, R) \
- (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
- -(__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
+ -(__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m512d __DEFAULT_FN_ATTRS512
@@ -3155,10 +3173,10 @@ _mm512_mask_fnmadd_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
}
#define _mm512_mask_fnmadd_round_ps(A, U, B, C, R) \
- (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
- -(__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
+ -(__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)))
static __inline__ __m512 __DEFAULT_FN_ATTRS512
@@ -3172,17 +3190,17 @@ _mm512_mask_fnmadd_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
}
#define _mm512_mask_fnmsub_round_pd(A, U, B, C, R) \
- (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
- -(__v8df)(__m512d)(B), \
- -(__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
+ -(__v8df)(__m512d)(B), \
+ -(__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_mask3_fnmsub_round_pd(A, B, C, U, R) \
- (__m512d)__builtin_ia32_vfmsubpd512_mask3(-(__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(C), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_vfmsubpd512_mask3(-(__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m512d __DEFAULT_FN_ATTRS512
@@ -3206,17 +3224,17 @@ _mm512_mask3_fnmsub_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
}
#define _mm512_mask_fnmsub_round_ps(A, U, B, C, R) \
- (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
- -(__v16sf)(__m512)(B), \
- -(__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
+ -(__v16sf)(__m512)(B), \
+ -(__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)))
#define _mm512_mask3_fnmsub_round_ps(A, B, C, U, R) \
- (__m512)__builtin_ia32_vfmsubps512_mask3(-(__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(C), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_vfmsubps512_mask3(-(__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R)))
static __inline__ __m512 __DEFAULT_FN_ATTRS512
@@ -3312,63 +3330,63 @@ _mm512_maskz_permutex2var_epi64(__mmask8 __U, __m512i __A, __m512i __I,
}
#define _mm512_alignr_epi64(A, B, I) \
- (__m512i)__builtin_ia32_alignq512((__v8di)(__m512i)(A), \
- (__v8di)(__m512i)(B), (int)(I))
+ ((__m512i)__builtin_ia32_alignq512((__v8di)(__m512i)(A), \
+ (__v8di)(__m512i)(B), (int)(I)))
#define _mm512_mask_alignr_epi64(W, U, A, B, imm) \
- (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
- (__v8di)_mm512_alignr_epi64((A), (B), (imm)), \
- (__v8di)(__m512i)(W))
+ ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+ (__v8di)_mm512_alignr_epi64((A), (B), (imm)), \
+ (__v8di)(__m512i)(W)))
#define _mm512_maskz_alignr_epi64(U, A, B, imm) \
- (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
- (__v8di)_mm512_alignr_epi64((A), (B), (imm)), \
- (__v8di)_mm512_setzero_si512())
+ ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+ (__v8di)_mm512_alignr_epi64((A), (B), (imm)), \
+ (__v8di)_mm512_setzero_si512()))
#define _mm512_alignr_epi32(A, B, I) \
- (__m512i)__builtin_ia32_alignd512((__v16si)(__m512i)(A), \
- (__v16si)(__m512i)(B), (int)(I))
+ ((__m512i)__builtin_ia32_alignd512((__v16si)(__m512i)(A), \
+ (__v16si)(__m512i)(B), (int)(I)))
#define _mm512_mask_alignr_epi32(W, U, A, B, imm) \
- (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
- (__v16si)_mm512_alignr_epi32((A), (B), (imm)), \
- (__v16si)(__m512i)(W))
+ ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+ (__v16si)_mm512_alignr_epi32((A), (B), (imm)), \
+ (__v16si)(__m512i)(W)))
#define _mm512_maskz_alignr_epi32(U, A, B, imm) \
- (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
- (__v16si)_mm512_alignr_epi32((A), (B), (imm)), \
- (__v16si)_mm512_setzero_si512())
+ ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+ (__v16si)_mm512_alignr_epi32((A), (B), (imm)), \
+ (__v16si)_mm512_setzero_si512()))
/* Vector Extract */
#define _mm512_extractf64x4_pd(A, I) \
- (__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(I), \
- (__v4df)_mm256_undefined_pd(), \
- (__mmask8)-1)
+ ((__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(I), \
+ (__v4df)_mm256_undefined_pd(), \
+ (__mmask8)-1))
#define _mm512_mask_extractf64x4_pd(W, U, A, imm) \
- (__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(imm), \
- (__v4df)(__m256d)(W), \
- (__mmask8)(U))
+ ((__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(imm), \
+ (__v4df)(__m256d)(W), \
+ (__mmask8)(U)))
#define _mm512_maskz_extractf64x4_pd(U, A, imm) \
- (__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(imm), \
- (__v4df)_mm256_setzero_pd(), \
- (__mmask8)(U))
+ ((__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(imm), \
+ (__v4df)_mm256_setzero_pd(), \
+ (__mmask8)(U)))
#define _mm512_extractf32x4_ps(A, I) \
- (__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(I), \
- (__v4sf)_mm_undefined_ps(), \
- (__mmask8)-1)
+ ((__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(I), \
+ (__v4sf)_mm_undefined_ps(), \
+ (__mmask8)-1))
#define _mm512_mask_extractf32x4_ps(W, U, A, imm) \
- (__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(imm), \
- (__v4sf)(__m128)(W), \
- (__mmask8)(U))
+ ((__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(imm), \
+ (__v4sf)(__m128)(W), \
+ (__mmask8)(U)))
#define _mm512_maskz_extractf32x4_ps(U, A, imm) \
- (__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(imm), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U))
+ ((__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(imm), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U)))
/* Vector Blend */
@@ -3407,14 +3425,14 @@ _mm512_mask_blend_epi32(__mmask16 __U, __m512i __A, __m512i __W)
/* Compare */
#define _mm512_cmp_round_ps_mask(A, B, P, R) \
- (__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), (int)(P), \
- (__mmask16)-1, (int)(R))
+ ((__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), (int)(P), \
+ (__mmask16)-1, (int)(R)))
#define _mm512_mask_cmp_round_ps_mask(U, A, B, P, R) \
- (__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), (int)(P), \
- (__mmask16)(U), (int)(R))
+ ((__mmask16)__builtin_ia32_cmpps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), (int)(P), \
+ (__mmask16)(U), (int)(R)))
#define _mm512_cmp_ps_mask(A, B, P) \
_mm512_cmp_round_ps_mask((A), (B), (P), _MM_FROUND_CUR_DIRECTION)
@@ -3462,14 +3480,14 @@ _mm512_mask_blend_epi32(__mmask16 __U, __m512i __A, __m512i __W)
_mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_ORD_Q)
#define _mm512_cmp_round_pd_mask(A, B, P, R) \
- (__mmask8)__builtin_ia32_cmppd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), (int)(P), \
- (__mmask8)-1, (int)(R))
+ ((__mmask8)__builtin_ia32_cmppd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), (int)(P), \
+ (__mmask8)-1, (int)(R)))
#define _mm512_mask_cmp_round_pd_mask(U, A, B, P, R) \
- (__mmask8)__builtin_ia32_cmppd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), (int)(P), \
- (__mmask8)(U), (int)(R))
+ ((__mmask8)__builtin_ia32_cmppd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), (int)(P), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_cmp_pd_mask(A, B, P) \
_mm512_cmp_round_pd_mask((A), (B), (P), _MM_FROUND_CUR_DIRECTION)
@@ -3519,19 +3537,19 @@ _mm512_mask_blend_epi32(__mmask16 __U, __m512i __A, __m512i __W)
/* Conversion */
#define _mm512_cvtt_roundps_epu32(A, R) \
- (__m512i)__builtin_ia32_cvttps2udq512_mask((__v16sf)(__m512)(A), \
- (__v16si)_mm512_undefined_epi32(), \
- (__mmask16)-1, (int)(R))
+ ((__m512i)__builtin_ia32_cvttps2udq512_mask((__v16sf)(__m512)(A), \
+ (__v16si)_mm512_undefined_epi32(), \
+ (__mmask16)-1, (int)(R)))
#define _mm512_mask_cvtt_roundps_epu32(W, U, A, R) \
- (__m512i)__builtin_ia32_cvttps2udq512_mask((__v16sf)(__m512)(A), \
- (__v16si)(__m512i)(W), \
- (__mmask16)(U), (int)(R))
+ ((__m512i)__builtin_ia32_cvttps2udq512_mask((__v16sf)(__m512)(A), \
+ (__v16si)(__m512i)(W), \
+ (__mmask16)(U), (int)(R)))
#define _mm512_maskz_cvtt_roundps_epu32(U, A, R) \
- (__m512i)__builtin_ia32_cvttps2udq512_mask((__v16sf)(__m512)(A), \
- (__v16si)_mm512_setzero_si512(), \
- (__mmask16)(U), (int)(R))
+ ((__m512i)__builtin_ia32_cvttps2udq512_mask((__v16sf)(__m512)(A), \
+ (__v16si)_mm512_setzero_si512(), \
+ (__mmask16)(U), (int)(R)))
static __inline __m512i __DEFAULT_FN_ATTRS512
@@ -3563,34 +3581,34 @@ _mm512_maskz_cvttps_epu32 (__mmask16 __U, __m512 __A)
}
#define _mm512_cvt_roundepi32_ps(A, R) \
- (__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(__m512i)(A), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)-1, (int)(R))
+ ((__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(__m512i)(A), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)-1, (int)(R)))
#define _mm512_mask_cvt_roundepi32_ps(W, U, A, R) \
- (__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(__m512i)(A), \
- (__v16sf)(__m512)(W), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(__m512i)(A), \
+ (__v16sf)(__m512)(W), \
+ (__mmask16)(U), (int)(R)))
#define _mm512_maskz_cvt_roundepi32_ps(U, A, R) \
- (__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(__m512i)(A), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_cvtdq2ps512_mask((__v16si)(__m512i)(A), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(U), (int)(R)))
#define _mm512_cvt_roundepu32_ps(A, R) \
- (__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(__m512i)(A), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)-1, (int)(R))
+ ((__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(__m512i)(A), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)-1, (int)(R)))
#define _mm512_mask_cvt_roundepu32_ps(W, U, A, R) \
- (__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(__m512i)(A), \
- (__v16sf)(__m512)(W), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(__m512i)(A), \
+ (__v16sf)(__m512)(W), \
+ (__mmask16)(U), (int)(R)))
#define _mm512_maskz_cvt_roundepu32_ps(U, A, R) \
- (__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(__m512i)(A), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_cvtudq2ps512_mask((__v16si)(__m512i)(A), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(U), (int)(R)))
static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_cvtepu32_ps (__m512i __A)
@@ -3705,19 +3723,19 @@ _mm512_mask_cvtepu32lo_pd(__m512d __W, __mmask8 __U,__m512i __A)
}
#define _mm512_cvt_roundpd_ps(A, R) \
- (__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(__m512d)(A), \
- (__v8sf)_mm256_setzero_ps(), \
- (__mmask8)-1, (int)(R))
+ ((__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(__m512d)(A), \
+ (__v8sf)_mm256_setzero_ps(), \
+ (__mmask8)-1, (int)(R)))
#define _mm512_mask_cvt_roundpd_ps(W, U, A, R) \
- (__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(__m512d)(A), \
- (__v8sf)(__m256)(W), (__mmask8)(U), \
- (int)(R))
+ ((__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(__m512d)(A), \
+ (__v8sf)(__m256)(W), (__mmask8)(U), \
+ (int)(R)))
#define _mm512_maskz_cvt_roundpd_ps(U, A, R) \
- (__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(__m512d)(A), \
- (__v8sf)_mm256_setzero_ps(), \
- (__mmask8)(U), (int)(R))
+ ((__m256)__builtin_ia32_cvtpd2ps512_mask((__v8df)(__m512d)(A), \
+ (__v8sf)_mm256_setzero_ps(), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m256 __DEFAULT_FN_ATTRS512
_mm512_cvtpd_ps (__m512d __A)
@@ -3765,38 +3783,38 @@ _mm512_mask_cvtpd_pslo (__m512 __W, __mmask8 __U,__m512d __A)
}
#define _mm512_cvt_roundps_ph(A, I) \
- (__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
- (__v16hi)_mm256_undefined_si256(), \
- (__mmask16)-1)
+ ((__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
+ (__v16hi)_mm256_undefined_si256(), \
+ (__mmask16)-1))
#define _mm512_mask_cvt_roundps_ph(U, W, A, I) \
- (__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
- (__v16hi)(__m256i)(U), \
- (__mmask16)(W))
+ ((__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
+ (__v16hi)(__m256i)(U), \
+ (__mmask16)(W)))
#define _mm512_maskz_cvt_roundps_ph(W, A, I) \
- (__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
- (__v16hi)_mm256_setzero_si256(), \
- (__mmask16)(W))
+ ((__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
+ (__v16hi)_mm256_setzero_si256(), \
+ (__mmask16)(W)))
#define _mm512_cvtps_ph _mm512_cvt_roundps_ph
#define _mm512_mask_cvtps_ph _mm512_mask_cvt_roundps_ph
#define _mm512_maskz_cvtps_ph _mm512_maskz_cvt_roundps_ph
#define _mm512_cvt_roundph_ps(A, R) \
- (__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(__m256i)(A), \
- (__v16sf)_mm512_undefined_ps(), \
- (__mmask16)-1, (int)(R))
+ ((__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(__m256i)(A), \
+ (__v16sf)_mm512_undefined_ps(), \
+ (__mmask16)-1, (int)(R)))
#define _mm512_mask_cvt_roundph_ps(W, U, A, R) \
- (__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(__m256i)(A), \
- (__v16sf)(__m512)(W), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(__m256i)(A), \
+ (__v16sf)(__m512)(W), \
+ (__mmask16)(U), (int)(R)))
#define _mm512_maskz_cvt_roundph_ps(U, A, R) \
- (__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(__m256i)(A), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(__m256i)(A), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(U), (int)(R)))
static __inline __m512 __DEFAULT_FN_ATTRS512
@@ -3828,19 +3846,19 @@ _mm512_maskz_cvtph_ps (__mmask16 __U, __m256i __A)
}
#define _mm512_cvtt_roundpd_epi32(A, R) \
- (__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(__m512d)(A), \
- (__v8si)_mm256_setzero_si256(), \
- (__mmask8)-1, (int)(R))
+ ((__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(__m512d)(A), \
+ (__v8si)_mm256_setzero_si256(), \
+ (__mmask8)-1, (int)(R)))
#define _mm512_mask_cvtt_roundpd_epi32(W, U, A, R) \
- (__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(__m512d)(A), \
- (__v8si)(__m256i)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(__m512d)(A), \
+ (__v8si)(__m256i)(W), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_maskz_cvtt_roundpd_epi32(U, A, R) \
- (__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(__m512d)(A), \
- (__v8si)_mm256_setzero_si256(), \
- (__mmask8)(U), (int)(R))
+ ((__m256i)__builtin_ia32_cvttpd2dq512_mask((__v8df)(__m512d)(A), \
+ (__v8si)_mm256_setzero_si256(), \
+ (__mmask8)(U), (int)(R)))
static __inline __m256i __DEFAULT_FN_ATTRS512
_mm512_cvttpd_epi32(__m512d __a)
@@ -3870,19 +3888,19 @@ _mm512_maskz_cvttpd_epi32 (__mmask8 __U, __m512d __A)
}
#define _mm512_cvtt_roundps_epi32(A, R) \
- (__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(__m512)(A), \
- (__v16si)_mm512_setzero_si512(), \
- (__mmask16)-1, (int)(R))
+ ((__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(__m512)(A), \
+ (__v16si)_mm512_setzero_si512(), \
+ (__mmask16)-1, (int)(R)))
#define _mm512_mask_cvtt_roundps_epi32(W, U, A, R) \
- (__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(__m512)(A), \
- (__v16si)(__m512i)(W), \
- (__mmask16)(U), (int)(R))
+ ((__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(__m512)(A), \
+ (__v16si)(__m512i)(W), \
+ (__mmask16)(U), (int)(R)))
#define _mm512_maskz_cvtt_roundps_epi32(U, A, R) \
- (__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(__m512)(A), \
- (__v16si)_mm512_setzero_si512(), \
- (__mmask16)(U), (int)(R))
+ ((__m512i)__builtin_ia32_cvttps2dq512_mask((__v16sf)(__m512)(A), \
+ (__v16si)_mm512_setzero_si512(), \
+ (__mmask16)(U), (int)(R)))
static __inline __m512i __DEFAULT_FN_ATTRS512
_mm512_cvttps_epi32(__m512 __a)
@@ -3912,19 +3930,19 @@ _mm512_maskz_cvttps_epi32 (__mmask16 __U, __m512 __A)
}
#define _mm512_cvt_roundps_epi32(A, R) \
- (__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(__m512)(A), \
- (__v16si)_mm512_setzero_si512(), \
- (__mmask16)-1, (int)(R))
+ ((__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(__m512)(A), \
+ (__v16si)_mm512_setzero_si512(), \
+ (__mmask16)-1, (int)(R)))
#define _mm512_mask_cvt_roundps_epi32(W, U, A, R) \
- (__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(__m512)(A), \
- (__v16si)(__m512i)(W), \
- (__mmask16)(U), (int)(R))
+ ((__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(__m512)(A), \
+ (__v16si)(__m512i)(W), \
+ (__mmask16)(U), (int)(R)))
#define _mm512_maskz_cvt_roundps_epi32(U, A, R) \
- (__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(__m512)(A), \
- (__v16si)_mm512_setzero_si512(), \
- (__mmask16)(U), (int)(R))
+ ((__m512i)__builtin_ia32_cvtps2dq512_mask((__v16sf)(__m512)(A), \
+ (__v16si)_mm512_setzero_si512(), \
+ (__mmask16)(U), (int)(R)))
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_cvtps_epi32 (__m512 __A)
@@ -3955,19 +3973,19 @@ _mm512_maskz_cvtps_epi32 (__mmask16 __U, __m512 __A)
}
#define _mm512_cvt_roundpd_epi32(A, R) \
- (__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(__m512d)(A), \
- (__v8si)_mm256_setzero_si256(), \
- (__mmask8)-1, (int)(R))
+ ((__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(__m512d)(A), \
+ (__v8si)_mm256_setzero_si256(), \
+ (__mmask8)-1, (int)(R)))
#define _mm512_mask_cvt_roundpd_epi32(W, U, A, R) \
- (__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(__m512d)(A), \
- (__v8si)(__m256i)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(__m512d)(A), \
+ (__v8si)(__m256i)(W), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_maskz_cvt_roundpd_epi32(U, A, R) \
- (__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(__m512d)(A), \
- (__v8si)_mm256_setzero_si256(), \
- (__mmask8)(U), (int)(R))
+ ((__m256i)__builtin_ia32_cvtpd2dq512_mask((__v8df)(__m512d)(A), \
+ (__v8si)_mm256_setzero_si256(), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_cvtpd_epi32 (__m512d __A)
@@ -3999,19 +4017,19 @@ _mm512_maskz_cvtpd_epi32 (__mmask8 __U, __m512d __A)
}
#define _mm512_cvt_roundps_epu32(A, R) \
- (__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(__m512)(A), \
- (__v16si)_mm512_setzero_si512(), \
- (__mmask16)-1, (int)(R))
+ ((__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(__m512)(A), \
+ (__v16si)_mm512_setzero_si512(), \
+ (__mmask16)-1, (int)(R)))
#define _mm512_mask_cvt_roundps_epu32(W, U, A, R) \
- (__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(__m512)(A), \
- (__v16si)(__m512i)(W), \
- (__mmask16)(U), (int)(R))
+ ((__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(__m512)(A), \
+ (__v16si)(__m512i)(W), \
+ (__mmask16)(U), (int)(R)))
#define _mm512_maskz_cvt_roundps_epu32(U, A, R) \
- (__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(__m512)(A), \
- (__v16si)_mm512_setzero_si512(), \
- (__mmask16)(U), (int)(R))
+ ((__m512i)__builtin_ia32_cvtps2udq512_mask((__v16sf)(__m512)(A), \
+ (__v16si)_mm512_setzero_si512(), \
+ (__mmask16)(U), (int)(R)))
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_cvtps_epu32 ( __m512 __A)
@@ -4043,19 +4061,19 @@ _mm512_maskz_cvtps_epu32 ( __mmask16 __U, __m512 __A)
}
#define _mm512_cvt_roundpd_epu32(A, R) \
- (__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(__m512d)(A), \
- (__v8si)_mm256_setzero_si256(), \
- (__mmask8)-1, (int)(R))
+ ((__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(__m512d)(A), \
+ (__v8si)_mm256_setzero_si256(), \
+ (__mmask8)-1, (int)(R)))
#define _mm512_mask_cvt_roundpd_epu32(W, U, A, R) \
- (__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(__m512d)(A), \
- (__v8si)(__m256i)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(__m512d)(A), \
+ (__v8si)(__m256i)(W), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_maskz_cvt_roundpd_epu32(U, A, R) \
- (__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(__m512d)(A), \
- (__v8si)_mm256_setzero_si256(), \
- (__mmask8)(U), (int)(R))
+ ((__m256i)__builtin_ia32_cvtpd2udq512_mask((__v8df)(__m512d)(A), \
+ (__v8si)_mm256_setzero_si256(), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_cvtpd_epu32 (__m512d __A)
@@ -4975,70 +4993,70 @@ _mm512_maskz_rorv_epi64 (__mmask8 __U, __m512i __A, __m512i __B)
#define _mm512_cmp_epi32_mask(a, b, p) \
- (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)(__m512i)(a), \
- (__v16si)(__m512i)(b), (int)(p), \
- (__mmask16)-1)
+ ((__mmask16)__builtin_ia32_cmpd512_mask((__v16si)(__m512i)(a), \
+ (__v16si)(__m512i)(b), (int)(p), \
+ (__mmask16)-1))
#define _mm512_cmp_epu32_mask(a, b, p) \
- (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)(__m512i)(a), \
- (__v16si)(__m512i)(b), (int)(p), \
- (__mmask16)-1)
+ ((__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)(__m512i)(a), \
+ (__v16si)(__m512i)(b), (int)(p), \
+ (__mmask16)-1))
#define _mm512_cmp_epi64_mask(a, b, p) \
- (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)(__m512i)(a), \
- (__v8di)(__m512i)(b), (int)(p), \
- (__mmask8)-1)
+ ((__mmask8)__builtin_ia32_cmpq512_mask((__v8di)(__m512i)(a), \
+ (__v8di)(__m512i)(b), (int)(p), \
+ (__mmask8)-1))
#define _mm512_cmp_epu64_mask(a, b, p) \
- (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)(__m512i)(a), \
- (__v8di)(__m512i)(b), (int)(p), \
- (__mmask8)-1)
+ ((__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)(__m512i)(a), \
+ (__v8di)(__m512i)(b), (int)(p), \
+ (__mmask8)-1))
#define _mm512_mask_cmp_epi32_mask(m, a, b, p) \
- (__mmask16)__builtin_ia32_cmpd512_mask((__v16si)(__m512i)(a), \
- (__v16si)(__m512i)(b), (int)(p), \
- (__mmask16)(m))
+ ((__mmask16)__builtin_ia32_cmpd512_mask((__v16si)(__m512i)(a), \
+ (__v16si)(__m512i)(b), (int)(p), \
+ (__mmask16)(m)))
#define _mm512_mask_cmp_epu32_mask(m, a, b, p) \
- (__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)(__m512i)(a), \
- (__v16si)(__m512i)(b), (int)(p), \
- (__mmask16)(m))
+ ((__mmask16)__builtin_ia32_ucmpd512_mask((__v16si)(__m512i)(a), \
+ (__v16si)(__m512i)(b), (int)(p), \
+ (__mmask16)(m)))
#define _mm512_mask_cmp_epi64_mask(m, a, b, p) \
- (__mmask8)__builtin_ia32_cmpq512_mask((__v8di)(__m512i)(a), \
- (__v8di)(__m512i)(b), (int)(p), \
- (__mmask8)(m))
+ ((__mmask8)__builtin_ia32_cmpq512_mask((__v8di)(__m512i)(a), \
+ (__v8di)(__m512i)(b), (int)(p), \
+ (__mmask8)(m)))
#define _mm512_mask_cmp_epu64_mask(m, a, b, p) \
- (__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)(__m512i)(a), \
- (__v8di)(__m512i)(b), (int)(p), \
- (__mmask8)(m))
+ ((__mmask8)__builtin_ia32_ucmpq512_mask((__v8di)(__m512i)(a), \
+ (__v8di)(__m512i)(b), (int)(p), \
+ (__mmask8)(m)))
#define _mm512_rol_epi32(a, b) \
- (__m512i)__builtin_ia32_prold512((__v16si)(__m512i)(a), (int)(b))
+ ((__m512i)__builtin_ia32_prold512((__v16si)(__m512i)(a), (int)(b)))
#define _mm512_mask_rol_epi32(W, U, a, b) \
- (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
- (__v16si)_mm512_rol_epi32((a), (b)), \
- (__v16si)(__m512i)(W))
+ ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+ (__v16si)_mm512_rol_epi32((a), (b)), \
+ (__v16si)(__m512i)(W)))
#define _mm512_maskz_rol_epi32(U, a, b) \
- (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
- (__v16si)_mm512_rol_epi32((a), (b)), \
- (__v16si)_mm512_setzero_si512())
+ ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+ (__v16si)_mm512_rol_epi32((a), (b)), \
+ (__v16si)_mm512_setzero_si512()))
#define _mm512_rol_epi64(a, b) \
- (__m512i)__builtin_ia32_prolq512((__v8di)(__m512i)(a), (int)(b))
+ ((__m512i)__builtin_ia32_prolq512((__v8di)(__m512i)(a), (int)(b)))
#define _mm512_mask_rol_epi64(W, U, a, b) \
- (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
- (__v8di)_mm512_rol_epi64((a), (b)), \
- (__v8di)(__m512i)(W))
+ ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+ (__v8di)_mm512_rol_epi64((a), (b)), \
+ (__v8di)(__m512i)(W)))
#define _mm512_maskz_rol_epi64(U, a, b) \
- (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
- (__v8di)_mm512_rol_epi64((a), (b)), \
- (__v8di)_mm512_setzero_si512())
+ ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+ (__v8di)_mm512_rol_epi64((a), (b)), \
+ (__v8di)_mm512_setzero_si512()))
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_rolv_epi32 (__m512i __A, __m512i __B)
@@ -5085,35 +5103,35 @@ _mm512_maskz_rolv_epi64 (__mmask8 __U, __m512i __A, __m512i __B)
}
#define _mm512_ror_epi32(A, B) \
- (__m512i)__builtin_ia32_prord512((__v16si)(__m512i)(A), (int)(B))
+ ((__m512i)__builtin_ia32_prord512((__v16si)(__m512i)(A), (int)(B)))
#define _mm512_mask_ror_epi32(W, U, A, B) \
- (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
- (__v16si)_mm512_ror_epi32((A), (B)), \
- (__v16si)(__m512i)(W))
+ ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+ (__v16si)_mm512_ror_epi32((A), (B)), \
+ (__v16si)(__m512i)(W)))
#define _mm512_maskz_ror_epi32(U, A, B) \
- (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
- (__v16si)_mm512_ror_epi32((A), (B)), \
- (__v16si)_mm512_setzero_si512())
+ ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+ (__v16si)_mm512_ror_epi32((A), (B)), \
+ (__v16si)_mm512_setzero_si512()))
#define _mm512_ror_epi64(A, B) \
- (__m512i)__builtin_ia32_prorq512((__v8di)(__m512i)(A), (int)(B))
+ ((__m512i)__builtin_ia32_prorq512((__v8di)(__m512i)(A), (int)(B)))
#define _mm512_mask_ror_epi64(W, U, A, B) \
- (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
- (__v8di)_mm512_ror_epi64((A), (B)), \
- (__v8di)(__m512i)(W))
+ ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+ (__v8di)_mm512_ror_epi64((A), (B)), \
+ (__v8di)(__m512i)(W)))
#define _mm512_maskz_ror_epi64(U, A, B) \
- (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
- (__v8di)_mm512_ror_epi64((A), (B)), \
- (__v8di)_mm512_setzero_si512())
+ ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+ (__v8di)_mm512_ror_epi64((A), (B)), \
+ (__v8di)_mm512_setzero_si512()))
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_slli_epi32(__m512i __A, unsigned int __B)
{
- return (__m512i)__builtin_ia32_pslldi512((__v16si)__A, __B);
+ return (__m512i)__builtin_ia32_pslldi512((__v16si)__A, (int)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -5135,7 +5153,7 @@ _mm512_maskz_slli_epi32(__mmask16 __U, __m512i __A, unsigned int __B) {
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_slli_epi64(__m512i __A, unsigned int __B)
{
- return (__m512i)__builtin_ia32_psllqi512((__v8di)__A, __B);
+ return (__m512i)__builtin_ia32_psllqi512((__v8di)__A, (int)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -5157,7 +5175,7 @@ _mm512_maskz_slli_epi64(__mmask8 __U, __m512i __A, unsigned int __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_srli_epi32(__m512i __A, unsigned int __B)
{
- return (__m512i)__builtin_ia32_psrldi512((__v16si)__A, __B);
+ return (__m512i)__builtin_ia32_psrldi512((__v16si)__A, (int)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -5179,7 +5197,7 @@ _mm512_maskz_srli_epi32(__mmask16 __U, __m512i __A, unsigned int __B) {
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_srli_epi64(__m512i __A, unsigned int __B)
{
- return (__m512i)__builtin_ia32_psrlqi512((__v8di)__A, __B);
+ return (__m512i)__builtin_ia32_psrlqi512((__v8di)__A, (int)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -5304,168 +5322,168 @@ _mm512_maskz_movedup_pd (__mmask8 __U, __m512d __A)
}
#define _mm512_fixupimm_round_pd(A, B, C, imm, R) \
- (__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8di)(__m512i)(C), (int)(imm), \
- (__mmask8)-1, (int)(R))
+ ((__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8di)(__m512i)(C), (int)(imm), \
+ (__mmask8)-1, (int)(R)))
#define _mm512_mask_fixupimm_round_pd(A, U, B, C, imm, R) \
- (__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8di)(__m512i)(C), (int)(imm), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8di)(__m512i)(C), (int)(imm), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_fixupimm_pd(A, B, C, imm) \
- (__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8di)(__m512i)(C), (int)(imm), \
- (__mmask8)-1, \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8di)(__m512i)(C), (int)(imm), \
+ (__mmask8)-1, \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm512_mask_fixupimm_pd(A, U, B, C, imm) \
- (__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8di)(__m512i)(C), (int)(imm), \
- (__mmask8)(U), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m512d)__builtin_ia32_fixupimmpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8di)(__m512i)(C), (int)(imm), \
+ (__mmask8)(U), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm512_maskz_fixupimm_round_pd(U, A, B, C, imm, R) \
- (__m512d)__builtin_ia32_fixupimmpd512_maskz((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8di)(__m512i)(C), \
- (int)(imm), (__mmask8)(U), \
- (int)(R))
+ ((__m512d)__builtin_ia32_fixupimmpd512_maskz((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8di)(__m512i)(C), \
+ (int)(imm), (__mmask8)(U), \
+ (int)(R)))
#define _mm512_maskz_fixupimm_pd(U, A, B, C, imm) \
- (__m512d)__builtin_ia32_fixupimmpd512_maskz((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8di)(__m512i)(C), \
- (int)(imm), (__mmask8)(U), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m512d)__builtin_ia32_fixupimmpd512_maskz((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8di)(__m512i)(C), \
+ (int)(imm), (__mmask8)(U), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm512_fixupimm_round_ps(A, B, C, imm, R) \
- (__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16si)(__m512i)(C), (int)(imm), \
- (__mmask16)-1, (int)(R))
+ ((__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16si)(__m512i)(C), (int)(imm), \
+ (__mmask16)-1, (int)(R)))
#define _mm512_mask_fixupimm_round_ps(A, U, B, C, imm, R) \
- (__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16si)(__m512i)(C), (int)(imm), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16si)(__m512i)(C), (int)(imm), \
+ (__mmask16)(U), (int)(R)))
#define _mm512_fixupimm_ps(A, B, C, imm) \
- (__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16si)(__m512i)(C), (int)(imm), \
- (__mmask16)-1, \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16si)(__m512i)(C), (int)(imm), \
+ (__mmask16)-1, \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm512_mask_fixupimm_ps(A, U, B, C, imm) \
- (__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16si)(__m512i)(C), (int)(imm), \
- (__mmask16)(U), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m512)__builtin_ia32_fixupimmps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16si)(__m512i)(C), (int)(imm), \
+ (__mmask16)(U), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm512_maskz_fixupimm_round_ps(U, A, B, C, imm, R) \
- (__m512)__builtin_ia32_fixupimmps512_maskz((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16si)(__m512i)(C), \
- (int)(imm), (__mmask16)(U), \
- (int)(R))
+ ((__m512)__builtin_ia32_fixupimmps512_maskz((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16si)(__m512i)(C), \
+ (int)(imm), (__mmask16)(U), \
+ (int)(R)))
#define _mm512_maskz_fixupimm_ps(U, A, B, C, imm) \
- (__m512)__builtin_ia32_fixupimmps512_maskz((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16si)(__m512i)(C), \
- (int)(imm), (__mmask16)(U), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m512)__builtin_ia32_fixupimmps512_maskz((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16si)(__m512i)(C), \
+ (int)(imm), (__mmask16)(U), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm_fixupimm_round_sd(A, B, C, imm, R) \
- (__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2di)(__m128i)(C), (int)(imm), \
- (__mmask8)-1, (int)(R))
+ ((__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2di)(__m128i)(C), (int)(imm), \
+ (__mmask8)-1, (int)(R)))
#define _mm_mask_fixupimm_round_sd(A, U, B, C, imm, R) \
- (__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2di)(__m128i)(C), (int)(imm), \
- (__mmask8)(U), (int)(R))
+ ((__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2di)(__m128i)(C), (int)(imm), \
+ (__mmask8)(U), (int)(R)))
#define _mm_fixupimm_sd(A, B, C, imm) \
- (__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2di)(__m128i)(C), (int)(imm), \
- (__mmask8)-1, \
- _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_mask_fixupimm_sd(A, U, B, C, imm) \
- (__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2di)(__m128i)(C), (int)(imm), \
- (__mmask8)(U), \
- _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_maskz_fixupimm_round_sd(U, A, B, C, imm, R) \
- (__m128d)__builtin_ia32_fixupimmsd_maskz((__v2df)(__m128d)(A), \
+ ((__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2di)(__m128i)(C), (int)(imm), \
- (__mmask8)(U), (int)(R))
+ (__mmask8)-1, \
+ _MM_FROUND_CUR_DIRECTION))
-#define _mm_maskz_fixupimm_sd(U, A, B, C, imm) \
- (__m128d)__builtin_ia32_fixupimmsd_maskz((__v2df)(__m128d)(A), \
+#define _mm_mask_fixupimm_sd(A, U, B, C, imm) \
+ ((__m128d)__builtin_ia32_fixupimmsd_mask((__v2df)(__m128d)(A), \
(__v2df)(__m128d)(B), \
(__v2di)(__m128i)(C), (int)(imm), \
(__mmask8)(U), \
- _MM_FROUND_CUR_DIRECTION)
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm_maskz_fixupimm_round_sd(U, A, B, C, imm, R) \
+ ((__m128d)__builtin_ia32_fixupimmsd_maskz((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2di)(__m128i)(C), (int)(imm), \
+ (__mmask8)(U), (int)(R)))
+
+#define _mm_maskz_fixupimm_sd(U, A, B, C, imm) \
+ ((__m128d)__builtin_ia32_fixupimmsd_maskz((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2di)(__m128i)(C), (int)(imm), \
+ (__mmask8)(U), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm_fixupimm_round_ss(A, B, C, imm, R) \
- (__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4si)(__m128i)(C), (int)(imm), \
- (__mmask8)-1, (int)(R))
+ ((__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4si)(__m128i)(C), (int)(imm), \
+ (__mmask8)-1, (int)(R)))
#define _mm_mask_fixupimm_round_ss(A, U, B, C, imm, R) \
- (__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4si)(__m128i)(C), (int)(imm), \
- (__mmask8)(U), (int)(R))
+ ((__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4si)(__m128i)(C), (int)(imm), \
+ (__mmask8)(U), (int)(R)))
#define _mm_fixupimm_ss(A, B, C, imm) \
- (__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4si)(__m128i)(C), (int)(imm), \
- (__mmask8)-1, \
- _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_mask_fixupimm_ss(A, U, B, C, imm) \
- (__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4si)(__m128i)(C), (int)(imm), \
- (__mmask8)(U), \
- _MM_FROUND_CUR_DIRECTION)
-
-#define _mm_maskz_fixupimm_round_ss(U, A, B, C, imm, R) \
- (__m128)__builtin_ia32_fixupimmss_maskz((__v4sf)(__m128)(A), \
+ ((__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4si)(__m128i)(C), (int)(imm), \
- (__mmask8)(U), (int)(R))
+ (__mmask8)-1, \
+ _MM_FROUND_CUR_DIRECTION))
-#define _mm_maskz_fixupimm_ss(U, A, B, C, imm) \
- (__m128)__builtin_ia32_fixupimmss_maskz((__v4sf)(__m128)(A), \
+#define _mm_mask_fixupimm_ss(A, U, B, C, imm) \
+ ((__m128)__builtin_ia32_fixupimmss_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4si)(__m128i)(C), (int)(imm), \
(__mmask8)(U), \
- _MM_FROUND_CUR_DIRECTION)
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm_maskz_fixupimm_round_ss(U, A, B, C, imm, R) \
+ ((__m128)__builtin_ia32_fixupimmss_maskz((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4si)(__m128i)(C), (int)(imm), \
+ (__mmask8)(U), (int)(R)))
+
+#define _mm_maskz_fixupimm_ss(U, A, B, C, imm) \
+ ((__m128)__builtin_ia32_fixupimmss_maskz((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4si)(__m128i)(C), (int)(imm), \
+ (__mmask8)(U), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm_getexp_round_sd(A, B, R) \
- (__m128d)__builtin_ia32_getexpsd128_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)-1, (int)(R))
+ ((__m128d)__builtin_ia32_getexpsd128_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1, (int)(R)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
@@ -5486,10 +5504,10 @@ _mm_mask_getexp_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
}
#define _mm_mask_getexp_round_sd(W, U, A, B, R) \
- (__m128d)__builtin_ia32_getexpsd128_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)(__m128d)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m128d)__builtin_ia32_getexpsd128_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_getexp_sd (__mmask8 __U, __m128d __A, __m128d __B)
@@ -5502,16 +5520,16 @@ _mm_maskz_getexp_sd (__mmask8 __U, __m128d __A, __m128d __B)
}
#define _mm_maskz_getexp_round_sd(U, A, B, R) \
- (__m128d)__builtin_ia32_getexpsd128_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)(U), (int)(R))
+ ((__m128d)__builtin_ia32_getexpsd128_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U), (int)(R)))
#define _mm_getexp_round_ss(A, B, R) \
- (__m128)__builtin_ia32_getexpss128_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1, (int)(R))
+ ((__m128)__builtin_ia32_getexpss128_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1, (int)(R)))
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_getexp_ss (__m128 __A, __m128 __B)
@@ -5531,10 +5549,10 @@ _mm_mask_getexp_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
}
#define _mm_mask_getexp_round_ss(W, U, A, B, R) \
- (__m128)__builtin_ia32_getexpss128_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)(__m128)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m128)__builtin_ia32_getexpss128_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(W), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_getexp_ss (__mmask8 __U, __m128 __A, __m128 __B)
@@ -5547,100 +5565,100 @@ _mm_maskz_getexp_ss (__mmask8 __U, __m128 __A, __m128 __B)
}
#define _mm_maskz_getexp_round_ss(U, A, B, R) \
- (__m128)__builtin_ia32_getexpss128_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U), (int)(R))
+ ((__m128)__builtin_ia32_getexpss128_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U), (int)(R)))
#define _mm_getmant_round_sd(A, B, C, D, R) \
- (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (int)(((D)<<2) | (C)), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)-1, (int)(R))
+ ((__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (int)(((D)<<2) | (C)), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1, (int)(R)))
#define _mm_getmant_sd(A, B, C, D) \
- (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (int)(((D)<<2) | (C)), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)-1, \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (int)(((D)<<2) | (C)), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1, \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm_mask_getmant_sd(W, U, A, B, C, D) \
- (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (int)(((D)<<2) | (C)), \
- (__v2df)(__m128d)(W), \
- (__mmask8)(U), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (int)(((D)<<2) | (C)), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm_mask_getmant_round_sd(W, U, A, B, C, D, R) \
- (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (int)(((D)<<2) | (C)), \
- (__v2df)(__m128d)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (int)(((D)<<2) | (C)), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U), (int)(R)))
#define _mm_maskz_getmant_sd(U, A, B, C, D) \
- (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (int)(((D)<<2) | (C)), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)(U), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (int)(((D)<<2) | (C)), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm_maskz_getmant_round_sd(U, A, B, C, D, R) \
- (__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (int)(((D)<<2) | (C)), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)(U), (int)(R))
+ ((__m128d)__builtin_ia32_getmantsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (int)(((D)<<2) | (C)), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U), (int)(R)))
#define _mm_getmant_round_ss(A, B, C, D, R) \
- (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (int)(((D)<<2) | (C)), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1, (int)(R))
+ ((__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (int)(((D)<<2) | (C)), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1, (int)(R)))
#define _mm_getmant_ss(A, B, C, D) \
- (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (int)(((D)<<2) | (C)), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1, \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (int)(((D)<<2) | (C)), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1, \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm_mask_getmant_ss(W, U, A, B, C, D) \
- (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (int)(((D)<<2) | (C)), \
- (__v4sf)(__m128)(W), \
- (__mmask8)(U), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (int)(((D)<<2) | (C)), \
+ (__v4sf)(__m128)(W), \
+ (__mmask8)(U), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm_mask_getmant_round_ss(W, U, A, B, C, D, R) \
- (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (int)(((D)<<2) | (C)), \
- (__v4sf)(__m128)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (int)(((D)<<2) | (C)), \
+ (__v4sf)(__m128)(W), \
+ (__mmask8)(U), (int)(R)))
#define _mm_maskz_getmant_ss(U, A, B, C, D) \
- (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (int)(((D)<<2) | (C)), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (int)(((D)<<2) | (C)), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm_maskz_getmant_round_ss(U, A, B, C, D, R) \
- (__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (int)(((D)<<2) | (C)), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U), (int)(R))
+ ((__m128)__builtin_ia32_getmantss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (int)(((D)<<2) | (C)), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __mmask16 __DEFAULT_FN_ATTRS
_mm512_kmov (__mmask16 __A)
@@ -5649,16 +5667,16 @@ _mm512_kmov (__mmask16 __A)
}
#define _mm_comi_round_sd(A, B, P, R) \
- (int)__builtin_ia32_vcomisd((__v2df)(__m128d)(A), (__v2df)(__m128d)(B), \
- (int)(P), (int)(R))
+ ((int)__builtin_ia32_vcomisd((__v2df)(__m128d)(A), (__v2df)(__m128d)(B), \
+ (int)(P), (int)(R)))
#define _mm_comi_round_ss(A, B, P, R) \
- (int)__builtin_ia32_vcomiss((__v4sf)(__m128)(A), (__v4sf)(__m128)(B), \
- (int)(P), (int)(R))
+ ((int)__builtin_ia32_vcomiss((__v4sf)(__m128)(A), (__v4sf)(__m128)(B), \
+ (int)(P), (int)(R)))
#ifdef __x86_64__
#define _mm_cvt_roundsd_si64(A, R) \
- (long long)__builtin_ia32_vcvtsd2si64((__v2df)(__m128d)(A), (int)(R))
+ ((long long)__builtin_ia32_vcvtsd2si64((__v2df)(__m128d)(A), (int)(R)))
#endif
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -5925,55 +5943,58 @@ _mm512_maskz_srlv_epi64(__mmask8 __U, __m512i __X, __m512i __Y)
(__v8di)_mm512_setzero_si512());
}
-#define _mm512_ternarylogic_epi32(A, B, C, imm) \
- (__m512i)__builtin_ia32_pternlogd512_mask((__v16si)(__m512i)(A), \
- (__v16si)(__m512i)(B), \
- (__v16si)(__m512i)(C), (int)(imm), \
- (__mmask16)-1)
-
-#define _mm512_mask_ternarylogic_epi32(A, U, B, C, imm) \
- (__m512i)__builtin_ia32_pternlogd512_mask((__v16si)(__m512i)(A), \
- (__v16si)(__m512i)(B), \
- (__v16si)(__m512i)(C), (int)(imm), \
- (__mmask16)(U))
-
-#define _mm512_maskz_ternarylogic_epi32(U, A, B, C, imm) \
- (__m512i)__builtin_ia32_pternlogd512_maskz((__v16si)(__m512i)(A), \
- (__v16si)(__m512i)(B), \
- (__v16si)(__m512i)(C), \
- (int)(imm), (__mmask16)(U))
-
-#define _mm512_ternarylogic_epi64(A, B, C, imm) \
- (__m512i)__builtin_ia32_pternlogq512_mask((__v8di)(__m512i)(A), \
- (__v8di)(__m512i)(B), \
- (__v8di)(__m512i)(C), (int)(imm), \
- (__mmask8)-1)
-
-#define _mm512_mask_ternarylogic_epi64(A, U, B, C, imm) \
- (__m512i)__builtin_ia32_pternlogq512_mask((__v8di)(__m512i)(A), \
- (__v8di)(__m512i)(B), \
- (__v8di)(__m512i)(C), (int)(imm), \
- (__mmask8)(U))
-
-#define _mm512_maskz_ternarylogic_epi64(U, A, B, C, imm) \
- (__m512i)__builtin_ia32_pternlogq512_maskz((__v8di)(__m512i)(A), \
- (__v8di)(__m512i)(B), \
- (__v8di)(__m512i)(C), (int)(imm), \
- (__mmask8)(U))
+/// \enum _MM_TERNLOG_ENUM
+/// A helper to represent the ternary logic operations among vector \a A,
+/// \a B and \a C. The representation is passed to \a imm.
+typedef enum {
+ _MM_TERNLOG_A = 0xF0,
+ _MM_TERNLOG_B = 0xCC,
+ _MM_TERNLOG_C = 0xAA
+} _MM_TERNLOG_ENUM;
+
+#define _mm512_ternarylogic_epi32(A, B, C, imm) \
+ ((__m512i)__builtin_ia32_pternlogd512_mask( \
+ (__v16si)(__m512i)(A), (__v16si)(__m512i)(B), (__v16si)(__m512i)(C), \
+ (unsigned char)(imm), (__mmask16)-1))
+
+#define _mm512_mask_ternarylogic_epi32(A, U, B, C, imm) \
+ ((__m512i)__builtin_ia32_pternlogd512_mask( \
+ (__v16si)(__m512i)(A), (__v16si)(__m512i)(B), (__v16si)(__m512i)(C), \
+ (unsigned char)(imm), (__mmask16)(U)))
+
+#define _mm512_maskz_ternarylogic_epi32(U, A, B, C, imm) \
+ ((__m512i)__builtin_ia32_pternlogd512_maskz( \
+ (__v16si)(__m512i)(A), (__v16si)(__m512i)(B), (__v16si)(__m512i)(C), \
+ (unsigned char)(imm), (__mmask16)(U)))
+
+#define _mm512_ternarylogic_epi64(A, B, C, imm) \
+ ((__m512i)__builtin_ia32_pternlogq512_mask( \
+ (__v8di)(__m512i)(A), (__v8di)(__m512i)(B), (__v8di)(__m512i)(C), \
+ (unsigned char)(imm), (__mmask8)-1))
+
+#define _mm512_mask_ternarylogic_epi64(A, U, B, C, imm) \
+ ((__m512i)__builtin_ia32_pternlogq512_mask( \
+ (__v8di)(__m512i)(A), (__v8di)(__m512i)(B), (__v8di)(__m512i)(C), \
+ (unsigned char)(imm), (__mmask8)(U)))
+
+#define _mm512_maskz_ternarylogic_epi64(U, A, B, C, imm) \
+ ((__m512i)__builtin_ia32_pternlogq512_maskz( \
+ (__v8di)(__m512i)(A), (__v8di)(__m512i)(B), (__v8di)(__m512i)(C), \
+ (unsigned char)(imm), (__mmask8)(U)))
#ifdef __x86_64__
#define _mm_cvt_roundsd_i64(A, R) \
- (long long)__builtin_ia32_vcvtsd2si64((__v2df)(__m128d)(A), (int)(R))
+ ((long long)__builtin_ia32_vcvtsd2si64((__v2df)(__m128d)(A), (int)(R)))
#endif
#define _mm_cvt_roundsd_si32(A, R) \
- (int)__builtin_ia32_vcvtsd2si32((__v2df)(__m128d)(A), (int)(R))
+ ((int)__builtin_ia32_vcvtsd2si32((__v2df)(__m128d)(A), (int)(R)))
#define _mm_cvt_roundsd_i32(A, R) \
- (int)__builtin_ia32_vcvtsd2si32((__v2df)(__m128d)(A), (int)(R))
+ ((int)__builtin_ia32_vcvtsd2si32((__v2df)(__m128d)(A), (int)(R)))
#define _mm_cvt_roundsd_u32(A, R) \
- (unsigned int)__builtin_ia32_vcvtsd2usi32((__v2df)(__m128d)(A), (int)(R))
+ ((unsigned int)__builtin_ia32_vcvtsd2usi32((__v2df)(__m128d)(A), (int)(R)))
static __inline__ unsigned __DEFAULT_FN_ATTRS128
_mm_cvtsd_u32 (__m128d __A)
@@ -5984,8 +6005,8 @@ _mm_cvtsd_u32 (__m128d __A)
#ifdef __x86_64__
#define _mm_cvt_roundsd_u64(A, R) \
- (unsigned long long)__builtin_ia32_vcvtsd2usi64((__v2df)(__m128d)(A), \
- (int)(R))
+ ((unsigned long long)__builtin_ia32_vcvtsd2usi64((__v2df)(__m128d)(A), \
+ (int)(R)))
static __inline__ unsigned long long __DEFAULT_FN_ATTRS128
_mm_cvtsd_u64 (__m128d __A)
@@ -5997,21 +6018,21 @@ _mm_cvtsd_u64 (__m128d __A)
#endif
#define _mm_cvt_roundss_si32(A, R) \
- (int)__builtin_ia32_vcvtss2si32((__v4sf)(__m128)(A), (int)(R))
+ ((int)__builtin_ia32_vcvtss2si32((__v4sf)(__m128)(A), (int)(R)))
#define _mm_cvt_roundss_i32(A, R) \
- (int)__builtin_ia32_vcvtss2si32((__v4sf)(__m128)(A), (int)(R))
+ ((int)__builtin_ia32_vcvtss2si32((__v4sf)(__m128)(A), (int)(R)))
#ifdef __x86_64__
#define _mm_cvt_roundss_si64(A, R) \
- (long long)__builtin_ia32_vcvtss2si64((__v4sf)(__m128)(A), (int)(R))
+ ((long long)__builtin_ia32_vcvtss2si64((__v4sf)(__m128)(A), (int)(R)))
#define _mm_cvt_roundss_i64(A, R) \
- (long long)__builtin_ia32_vcvtss2si64((__v4sf)(__m128)(A), (int)(R))
+ ((long long)__builtin_ia32_vcvtss2si64((__v4sf)(__m128)(A), (int)(R)))
#endif
#define _mm_cvt_roundss_u32(A, R) \
- (unsigned int)__builtin_ia32_vcvtss2usi32((__v4sf)(__m128)(A), (int)(R))
+ ((unsigned int)__builtin_ia32_vcvtss2usi32((__v4sf)(__m128)(A), (int)(R)))
static __inline__ unsigned __DEFAULT_FN_ATTRS128
_mm_cvtss_u32 (__m128 __A)
@@ -6022,8 +6043,8 @@ _mm_cvtss_u32 (__m128 __A)
#ifdef __x86_64__
#define _mm_cvt_roundss_u64(A, R) \
- (unsigned long long)__builtin_ia32_vcvtss2usi64((__v4sf)(__m128)(A), \
- (int)(R))
+ ((unsigned long long)__builtin_ia32_vcvtss2usi64((__v4sf)(__m128)(A), \
+ (int)(R)))
static __inline__ unsigned long long __DEFAULT_FN_ATTRS128
_mm_cvtss_u64 (__m128 __A)
@@ -6035,10 +6056,10 @@ _mm_cvtss_u64 (__m128 __A)
#endif
#define _mm_cvtt_roundsd_i32(A, R) \
- (int)__builtin_ia32_vcvttsd2si32((__v2df)(__m128d)(A), (int)(R))
+ ((int)__builtin_ia32_vcvttsd2si32((__v2df)(__m128d)(A), (int)(R)))
#define _mm_cvtt_roundsd_si32(A, R) \
- (int)__builtin_ia32_vcvttsd2si32((__v2df)(__m128d)(A), (int)(R))
+ ((int)__builtin_ia32_vcvttsd2si32((__v2df)(__m128d)(A), (int)(R)))
static __inline__ int __DEFAULT_FN_ATTRS128
_mm_cvttsd_i32 (__m128d __A)
@@ -6049,10 +6070,10 @@ _mm_cvttsd_i32 (__m128d __A)
#ifdef __x86_64__
#define _mm_cvtt_roundsd_si64(A, R) \
- (long long)__builtin_ia32_vcvttsd2si64((__v2df)(__m128d)(A), (int)(R))
+ ((long long)__builtin_ia32_vcvttsd2si64((__v2df)(__m128d)(A), (int)(R)))
#define _mm_cvtt_roundsd_i64(A, R) \
- (long long)__builtin_ia32_vcvttsd2si64((__v2df)(__m128d)(A), (int)(R))
+ ((long long)__builtin_ia32_vcvttsd2si64((__v2df)(__m128d)(A), (int)(R)))
static __inline__ long long __DEFAULT_FN_ATTRS128
_mm_cvttsd_i64 (__m128d __A)
@@ -6063,7 +6084,7 @@ _mm_cvttsd_i64 (__m128d __A)
#endif
#define _mm_cvtt_roundsd_u32(A, R) \
- (unsigned int)__builtin_ia32_vcvttsd2usi32((__v2df)(__m128d)(A), (int)(R))
+ ((unsigned int)__builtin_ia32_vcvttsd2usi32((__v2df)(__m128d)(A), (int)(R)))
static __inline__ unsigned __DEFAULT_FN_ATTRS128
_mm_cvttsd_u32 (__m128d __A)
@@ -6074,8 +6095,8 @@ _mm_cvttsd_u32 (__m128d __A)
#ifdef __x86_64__
#define _mm_cvtt_roundsd_u64(A, R) \
- (unsigned long long)__builtin_ia32_vcvttsd2usi64((__v2df)(__m128d)(A), \
- (int)(R))
+ ((unsigned long long)__builtin_ia32_vcvttsd2usi64((__v2df)(__m128d)(A), \
+ (int)(R)))
static __inline__ unsigned long long __DEFAULT_FN_ATTRS128
_mm_cvttsd_u64 (__m128d __A)
@@ -6087,10 +6108,10 @@ _mm_cvttsd_u64 (__m128d __A)
#endif
#define _mm_cvtt_roundss_i32(A, R) \
- (int)__builtin_ia32_vcvttss2si32((__v4sf)(__m128)(A), (int)(R))
+ ((int)__builtin_ia32_vcvttss2si32((__v4sf)(__m128)(A), (int)(R)))
#define _mm_cvtt_roundss_si32(A, R) \
- (int)__builtin_ia32_vcvttss2si32((__v4sf)(__m128)(A), (int)(R))
+ ((int)__builtin_ia32_vcvttss2si32((__v4sf)(__m128)(A), (int)(R)))
static __inline__ int __DEFAULT_FN_ATTRS128
_mm_cvttss_i32 (__m128 __A)
@@ -6101,10 +6122,10 @@ _mm_cvttss_i32 (__m128 __A)
#ifdef __x86_64__
#define _mm_cvtt_roundss_i64(A, R) \
- (long long)__builtin_ia32_vcvttss2si64((__v4sf)(__m128)(A), (int)(R))
+ ((long long)__builtin_ia32_vcvttss2si64((__v4sf)(__m128)(A), (int)(R)))
#define _mm_cvtt_roundss_si64(A, R) \
- (long long)__builtin_ia32_vcvttss2si64((__v4sf)(__m128)(A), (int)(R))
+ ((long long)__builtin_ia32_vcvttss2si64((__v4sf)(__m128)(A), (int)(R)))
static __inline__ long long __DEFAULT_FN_ATTRS128
_mm_cvttss_i64 (__m128 __A)
@@ -6115,7 +6136,7 @@ _mm_cvttss_i64 (__m128 __A)
#endif
#define _mm_cvtt_roundss_u32(A, R) \
- (unsigned int)__builtin_ia32_vcvttss2usi32((__v4sf)(__m128)(A), (int)(R))
+ ((unsigned int)__builtin_ia32_vcvttss2usi32((__v4sf)(__m128)(A), (int)(R)))
static __inline__ unsigned __DEFAULT_FN_ATTRS128
_mm_cvttss_u32 (__m128 __A)
@@ -6126,8 +6147,8 @@ _mm_cvttss_u32 (__m128 __A)
#ifdef __x86_64__
#define _mm_cvtt_roundss_u64(A, R) \
- (unsigned long long)__builtin_ia32_vcvttss2usi64((__v4sf)(__m128)(A), \
- (int)(R))
+ ((unsigned long long)__builtin_ia32_vcvttss2usi64((__v4sf)(__m128)(A), \
+ (int)(R)))
static __inline__ unsigned long long __DEFAULT_FN_ATTRS128
_mm_cvttss_u64 (__m128 __A)
@@ -6139,30 +6160,30 @@ _mm_cvttss_u64 (__m128 __A)
#endif
#define _mm512_permute_pd(X, C) \
- (__m512d)__builtin_ia32_vpermilpd512((__v8df)(__m512d)(X), (int)(C))
+ ((__m512d)__builtin_ia32_vpermilpd512((__v8df)(__m512d)(X), (int)(C)))
#define _mm512_mask_permute_pd(W, U, X, C) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
- (__v8df)_mm512_permute_pd((X), (C)), \
- (__v8df)(__m512d)(W))
+ ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ (__v8df)_mm512_permute_pd((X), (C)), \
+ (__v8df)(__m512d)(W)))
#define _mm512_maskz_permute_pd(U, X, C) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
- (__v8df)_mm512_permute_pd((X), (C)), \
- (__v8df)_mm512_setzero_pd())
+ ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ (__v8df)_mm512_permute_pd((X), (C)), \
+ (__v8df)_mm512_setzero_pd()))
#define _mm512_permute_ps(X, C) \
- (__m512)__builtin_ia32_vpermilps512((__v16sf)(__m512)(X), (int)(C))
+ ((__m512)__builtin_ia32_vpermilps512((__v16sf)(__m512)(X), (int)(C)))
#define _mm512_mask_permute_ps(W, U, X, C) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
- (__v16sf)_mm512_permute_ps((X), (C)), \
- (__v16sf)(__m512)(W))
+ ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ (__v16sf)_mm512_permute_ps((X), (C)), \
+ (__v16sf)(__m512)(W)))
#define _mm512_maskz_permute_ps(U, X, C) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
- (__v16sf)_mm512_permute_ps((X), (C)), \
- (__v16sf)_mm512_setzero_ps())
+ ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ (__v16sf)_mm512_permute_ps((X), (C)), \
+ (__v16sf)_mm512_setzero_ps()))
static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_permutevar_pd(__m512d __A, __m512i __C)
@@ -6274,19 +6295,19 @@ _mm512_maskz_permutex2var_ps(__mmask16 __U, __m512 __A, __m512i __I, __m512 __B)
#define _mm512_cvtt_roundpd_epu32(A, R) \
- (__m256i)__builtin_ia32_cvttpd2udq512_mask((__v8df)(__m512d)(A), \
- (__v8si)_mm256_undefined_si256(), \
- (__mmask8)-1, (int)(R))
+ ((__m256i)__builtin_ia32_cvttpd2udq512_mask((__v8df)(__m512d)(A), \
+ (__v8si)_mm256_undefined_si256(), \
+ (__mmask8)-1, (int)(R)))
#define _mm512_mask_cvtt_roundpd_epu32(W, U, A, R) \
- (__m256i)__builtin_ia32_cvttpd2udq512_mask((__v8df)(__m512d)(A), \
- (__v8si)(__m256i)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m256i)__builtin_ia32_cvttpd2udq512_mask((__v8df)(__m512d)(A), \
+ (__v8si)(__m256i)(W), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_maskz_cvtt_roundpd_epu32(U, A, R) \
- (__m256i)__builtin_ia32_cvttpd2udq512_mask((__v8df)(__m512d)(A), \
- (__v8si)_mm256_setzero_si256(), \
- (__mmask8)(U), (int)(R))
+ ((__m256i)__builtin_ia32_cvttpd2udq512_mask((__v8df)(__m512d)(A), \
+ (__v8si)_mm256_setzero_si256(), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m256i __DEFAULT_FN_ATTRS512
_mm512_cvttpd_epu32 (__m512d __A)
@@ -6318,106 +6339,106 @@ _mm512_maskz_cvttpd_epu32 (__mmask8 __U, __m512d __A)
}
#define _mm_roundscale_round_sd(A, B, imm, R) \
- (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)-1, (int)(imm), \
- (int)(R))
+ ((__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1, (int)(imm), \
+ (int)(R)))
#define _mm_roundscale_sd(A, B, imm) \
- (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)-1, (int)(imm), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1, (int)(imm), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm_mask_roundscale_sd(W, U, A, B, imm) \
- (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)(__m128d)(W), \
- (__mmask8)(U), (int)(imm), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U), (int)(imm), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm_mask_roundscale_round_sd(W, U, A, B, I, R) \
- (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)(__m128d)(W), \
- (__mmask8)(U), (int)(I), \
- (int)(R))
+ ((__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U), (int)(I), \
+ (int)(R)))
#define _mm_maskz_roundscale_sd(U, A, B, I) \
- (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)(U), (int)(I), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U), (int)(I), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm_maskz_roundscale_round_sd(U, A, B, I, R) \
- (__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)(U), (int)(I), \
- (int)(R))
+ ((__m128d)__builtin_ia32_rndscalesd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U), (int)(I), \
+ (int)(R)))
#define _mm_roundscale_round_ss(A, B, imm, R) \
- (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1, (int)(imm), \
- (int)(R))
+ ((__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1, (int)(imm), \
+ (int)(R)))
#define _mm_roundscale_ss(A, B, imm) \
- (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1, (int)(imm), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1, (int)(imm), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm_mask_roundscale_ss(W, U, A, B, I) \
- (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)(__m128)(W), \
- (__mmask8)(U), (int)(I), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(W), \
+ (__mmask8)(U), (int)(I), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm_mask_roundscale_round_ss(W, U, A, B, I, R) \
- (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)(__m128)(W), \
- (__mmask8)(U), (int)(I), \
- (int)(R))
+ ((__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(W), \
+ (__mmask8)(U), (int)(I), \
+ (int)(R)))
#define _mm_maskz_roundscale_ss(U, A, B, I) \
- (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U), (int)(I), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U), (int)(I), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm_maskz_roundscale_round_ss(U, A, B, I, R) \
- (__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U), (int)(I), \
- (int)(R))
+ ((__m128)__builtin_ia32_rndscaless_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U), (int)(I), \
+ (int)(R)))
#define _mm512_scalef_round_pd(A, B, R) \
- (__m512d)__builtin_ia32_scalefpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)_mm512_undefined_pd(), \
- (__mmask8)-1, (int)(R))
+ ((__m512d)__builtin_ia32_scalefpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)_mm512_undefined_pd(), \
+ (__mmask8)-1, (int)(R)))
#define _mm512_mask_scalef_round_pd(W, U, A, B, R) \
- (__m512d)__builtin_ia32_scalefpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_scalefpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(W), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_maskz_scalef_round_pd(U, A, B, R) \
- (__m512d)__builtin_ia32_scalefpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_scalefpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_scalef_pd (__m512d __A, __m512d __B)
@@ -6452,22 +6473,22 @@ _mm512_maskz_scalef_pd (__mmask8 __U, __m512d __A, __m512d __B)
}
#define _mm512_scalef_round_ps(A, B, R) \
- (__m512)__builtin_ia32_scalefps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)_mm512_undefined_ps(), \
- (__mmask16)-1, (int)(R))
+ ((__m512)__builtin_ia32_scalefps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)_mm512_undefined_ps(), \
+ (__mmask16)-1, (int)(R)))
#define _mm512_mask_scalef_round_ps(W, U, A, B, R) \
- (__m512)__builtin_ia32_scalefps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(W), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_scalefps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(W), \
+ (__mmask16)(U), (int)(R)))
#define _mm512_maskz_scalef_round_ps(U, A, B, R) \
- (__m512)__builtin_ia32_scalefps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_scalefps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(U), (int)(R)))
static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_scalef_ps (__m512 __A, __m512 __B)
@@ -6502,10 +6523,10 @@ _mm512_maskz_scalef_ps (__mmask16 __U, __m512 __A, __m512 __B)
}
#define _mm_scalef_round_sd(A, B, R) \
- (__m128d)__builtin_ia32_scalefsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)-1, (int)(R))
+ ((__m128d)__builtin_ia32_scalefsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1, (int)(R)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_scalef_sd (__m128d __A, __m128d __B)
@@ -6527,10 +6548,10 @@ _mm_mask_scalef_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
}
#define _mm_mask_scalef_round_sd(W, U, A, B, R) \
- (__m128d)__builtin_ia32_scalefsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)(__m128d)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m128d)__builtin_ia32_scalefsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_scalef_sd (__mmask8 __U, __m128d __A, __m128d __B)
@@ -6543,16 +6564,16 @@ _mm_maskz_scalef_sd (__mmask8 __U, __m128d __A, __m128d __B)
}
#define _mm_maskz_scalef_round_sd(U, A, B, R) \
- (__m128d)__builtin_ia32_scalefsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)(U), (int)(R))
+ ((__m128d)__builtin_ia32_scalefsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U), (int)(R)))
#define _mm_scalef_round_ss(A, B, R) \
- (__m128)__builtin_ia32_scalefss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1, (int)(R))
+ ((__m128)__builtin_ia32_scalefss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1, (int)(R)))
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_scalef_ss (__m128 __A, __m128 __B)
@@ -6574,10 +6595,10 @@ _mm_mask_scalef_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
}
#define _mm_mask_scalef_round_ss(W, U, A, B, R) \
- (__m128)__builtin_ia32_scalefss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)(__m128)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m128)__builtin_ia32_scalefss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(W), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_scalef_ss (__mmask8 __U, __m128 __A, __m128 __B)
@@ -6590,16 +6611,16 @@ _mm_maskz_scalef_ss (__mmask8 __U, __m128 __A, __m128 __B)
}
#define _mm_maskz_scalef_round_ss(U, A, B, R) \
- (__m128)__builtin_ia32_scalefss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U), \
- (int)(R))
+ ((__m128)__builtin_ia32_scalefss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U), \
+ (int)(R)))
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_srai_epi32(__m512i __A, unsigned int __B)
{
- return (__m512i)__builtin_ia32_psradi512((__v16si)__A, __B);
+ return (__m512i)__builtin_ia32_psradi512((__v16si)__A, (int)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -6622,7 +6643,7 @@ _mm512_maskz_srai_epi32(__mmask16 __U, __m512i __A,
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_srai_epi64(__m512i __A, unsigned int __B)
{
- return (__m512i)__builtin_ia32_psraqi512((__v8di)__A, __B);
+ return (__m512i)__builtin_ia32_psraqi512((__v8di)__A, (int)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -6642,94 +6663,94 @@ _mm512_maskz_srai_epi64(__mmask8 __U, __m512i __A, unsigned int __B)
}
#define _mm512_shuffle_f32x4(A, B, imm) \
- (__m512)__builtin_ia32_shuf_f32x4((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), (int)(imm))
+ ((__m512)__builtin_ia32_shuf_f32x4((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), (int)(imm)))
#define _mm512_mask_shuffle_f32x4(W, U, A, B, imm) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
- (__v16sf)_mm512_shuffle_f32x4((A), (B), (imm)), \
- (__v16sf)(__m512)(W))
+ ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ (__v16sf)_mm512_shuffle_f32x4((A), (B), (imm)), \
+ (__v16sf)(__m512)(W)))
#define _mm512_maskz_shuffle_f32x4(U, A, B, imm) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
- (__v16sf)_mm512_shuffle_f32x4((A), (B), (imm)), \
- (__v16sf)_mm512_setzero_ps())
+ ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ (__v16sf)_mm512_shuffle_f32x4((A), (B), (imm)), \
+ (__v16sf)_mm512_setzero_ps()))
#define _mm512_shuffle_f64x2(A, B, imm) \
- (__m512d)__builtin_ia32_shuf_f64x2((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), (int)(imm))
+ ((__m512d)__builtin_ia32_shuf_f64x2((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), (int)(imm)))
#define _mm512_mask_shuffle_f64x2(W, U, A, B, imm) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
- (__v8df)_mm512_shuffle_f64x2((A), (B), (imm)), \
- (__v8df)(__m512d)(W))
+ ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ (__v8df)_mm512_shuffle_f64x2((A), (B), (imm)), \
+ (__v8df)(__m512d)(W)))
#define _mm512_maskz_shuffle_f64x2(U, A, B, imm) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
- (__v8df)_mm512_shuffle_f64x2((A), (B), (imm)), \
- (__v8df)_mm512_setzero_pd())
+ ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ (__v8df)_mm512_shuffle_f64x2((A), (B), (imm)), \
+ (__v8df)_mm512_setzero_pd()))
#define _mm512_shuffle_i32x4(A, B, imm) \
- (__m512i)__builtin_ia32_shuf_i32x4((__v16si)(__m512i)(A), \
- (__v16si)(__m512i)(B), (int)(imm))
+ ((__m512i)__builtin_ia32_shuf_i32x4((__v16si)(__m512i)(A), \
+ (__v16si)(__m512i)(B), (int)(imm)))
#define _mm512_mask_shuffle_i32x4(W, U, A, B, imm) \
- (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
- (__v16si)_mm512_shuffle_i32x4((A), (B), (imm)), \
- (__v16si)(__m512i)(W))
+ ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+ (__v16si)_mm512_shuffle_i32x4((A), (B), (imm)), \
+ (__v16si)(__m512i)(W)))
#define _mm512_maskz_shuffle_i32x4(U, A, B, imm) \
- (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
- (__v16si)_mm512_shuffle_i32x4((A), (B), (imm)), \
- (__v16si)_mm512_setzero_si512())
+ ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+ (__v16si)_mm512_shuffle_i32x4((A), (B), (imm)), \
+ (__v16si)_mm512_setzero_si512()))
#define _mm512_shuffle_i64x2(A, B, imm) \
- (__m512i)__builtin_ia32_shuf_i64x2((__v8di)(__m512i)(A), \
- (__v8di)(__m512i)(B), (int)(imm))
+ ((__m512i)__builtin_ia32_shuf_i64x2((__v8di)(__m512i)(A), \
+ (__v8di)(__m512i)(B), (int)(imm)))
#define _mm512_mask_shuffle_i64x2(W, U, A, B, imm) \
- (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
- (__v8di)_mm512_shuffle_i64x2((A), (B), (imm)), \
- (__v8di)(__m512i)(W))
+ ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+ (__v8di)_mm512_shuffle_i64x2((A), (B), (imm)), \
+ (__v8di)(__m512i)(W)))
#define _mm512_maskz_shuffle_i64x2(U, A, B, imm) \
- (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
- (__v8di)_mm512_shuffle_i64x2((A), (B), (imm)), \
- (__v8di)_mm512_setzero_si512())
+ ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+ (__v8di)_mm512_shuffle_i64x2((A), (B), (imm)), \
+ (__v8di)_mm512_setzero_si512()))
#define _mm512_shuffle_pd(A, B, M) \
- (__m512d)__builtin_ia32_shufpd512((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), (int)(M))
+ ((__m512d)__builtin_ia32_shufpd512((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), (int)(M)))
#define _mm512_mask_shuffle_pd(W, U, A, B, M) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
- (__v8df)_mm512_shuffle_pd((A), (B), (M)), \
- (__v8df)(__m512d)(W))
+ ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ (__v8df)_mm512_shuffle_pd((A), (B), (M)), \
+ (__v8df)(__m512d)(W)))
#define _mm512_maskz_shuffle_pd(U, A, B, M) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
- (__v8df)_mm512_shuffle_pd((A), (B), (M)), \
- (__v8df)_mm512_setzero_pd())
+ ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ (__v8df)_mm512_shuffle_pd((A), (B), (M)), \
+ (__v8df)_mm512_setzero_pd()))
#define _mm512_shuffle_ps(A, B, M) \
- (__m512)__builtin_ia32_shufps512((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), (int)(M))
+ ((__m512)__builtin_ia32_shufps512((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), (int)(M)))
#define _mm512_mask_shuffle_ps(W, U, A, B, M) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
- (__v16sf)_mm512_shuffle_ps((A), (B), (M)), \
- (__v16sf)(__m512)(W))
+ ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ (__v16sf)_mm512_shuffle_ps((A), (B), (M)), \
+ (__v16sf)(__m512)(W)))
#define _mm512_maskz_shuffle_ps(U, A, B, M) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
- (__v16sf)_mm512_shuffle_ps((A), (B), (M)), \
- (__v16sf)_mm512_setzero_ps())
+ ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ (__v16sf)_mm512_shuffle_ps((A), (B), (M)), \
+ (__v16sf)_mm512_setzero_ps()))
#define _mm_sqrt_round_sd(A, B, R) \
- (__m128d)__builtin_ia32_sqrtsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)-1, (int)(R))
+ ((__m128d)__builtin_ia32_sqrtsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1, (int)(R)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_sqrt_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
@@ -6742,10 +6763,10 @@ _mm_mask_sqrt_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
}
#define _mm_mask_sqrt_round_sd(W, U, A, B, R) \
- (__m128d)__builtin_ia32_sqrtsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)(__m128d)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m128d)__builtin_ia32_sqrtsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_sqrt_sd (__mmask8 __U, __m128d __A, __m128d __B)
@@ -6758,16 +6779,16 @@ _mm_maskz_sqrt_sd (__mmask8 __U, __m128d __A, __m128d __B)
}
#define _mm_maskz_sqrt_round_sd(U, A, B, R) \
- (__m128d)__builtin_ia32_sqrtsd_round_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)(U), (int)(R))
+ ((__m128d)__builtin_ia32_sqrtsd_round_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U), (int)(R)))
#define _mm_sqrt_round_ss(A, B, R) \
- (__m128)__builtin_ia32_sqrtss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1, (int)(R))
+ ((__m128)__builtin_ia32_sqrtss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1, (int)(R)))
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_sqrt_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
@@ -6780,10 +6801,10 @@ _mm_mask_sqrt_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
}
#define _mm_mask_sqrt_round_ss(W, U, A, B, R) \
- (__m128)__builtin_ia32_sqrtss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)(__m128)(W), (__mmask8)(U), \
- (int)(R))
+ ((__m128)__builtin_ia32_sqrtss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(W), (__mmask8)(U), \
+ (int)(R)))
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_sqrt_ss (__mmask8 __U, __m128 __A, __m128 __B)
@@ -6796,10 +6817,10 @@ _mm_maskz_sqrt_ss (__mmask8 __U, __m128 __A, __m128 __B)
}
#define _mm_maskz_sqrt_round_ss(U, A, B, R) \
- (__m128)__builtin_ia32_sqrtss_round_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U), (int)(R))
+ ((__m128)__builtin_ia32_sqrtss_round_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_broadcast_f32x4(__m128 __A)
@@ -7366,183 +7387,183 @@ _mm512_mask_cvtepi64_storeu_epi16 (void *__P, __mmask8 __M, __m512i __A)
}
#define _mm512_extracti32x4_epi32(A, imm) \
- (__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \
- (__v4si)_mm_undefined_si128(), \
- (__mmask8)-1)
+ ((__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \
+ (__v4si)_mm_undefined_si128(), \
+ (__mmask8)-1))
#define _mm512_mask_extracti32x4_epi32(W, U, A, imm) \
- (__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \
- (__v4si)(__m128i)(W), \
- (__mmask8)(U))
+ ((__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \
+ (__v4si)(__m128i)(W), \
+ (__mmask8)(U)))
#define _mm512_maskz_extracti32x4_epi32(U, A, imm) \
- (__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \
- (__v4si)_mm_setzero_si128(), \
- (__mmask8)(U))
+ ((__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \
+ (__v4si)_mm_setzero_si128(), \
+ (__mmask8)(U)))
#define _mm512_extracti64x4_epi64(A, imm) \
- (__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \
- (__v4di)_mm256_undefined_si256(), \
- (__mmask8)-1)
+ ((__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \
+ (__v4di)_mm256_undefined_si256(), \
+ (__mmask8)-1))
#define _mm512_mask_extracti64x4_epi64(W, U, A, imm) \
- (__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \
- (__v4di)(__m256i)(W), \
- (__mmask8)(U))
+ ((__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \
+ (__v4di)(__m256i)(W), \
+ (__mmask8)(U)))
#define _mm512_maskz_extracti64x4_epi64(U, A, imm) \
- (__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \
- (__v4di)_mm256_setzero_si256(), \
- (__mmask8)(U))
+ ((__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \
+ (__v4di)_mm256_setzero_si256(), \
+ (__mmask8)(U)))
#define _mm512_insertf64x4(A, B, imm) \
- (__m512d)__builtin_ia32_insertf64x4((__v8df)(__m512d)(A), \
- (__v4df)(__m256d)(B), (int)(imm))
+ ((__m512d)__builtin_ia32_insertf64x4((__v8df)(__m512d)(A), \
+ (__v4df)(__m256d)(B), (int)(imm)))
#define _mm512_mask_insertf64x4(W, U, A, B, imm) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
- (__v8df)_mm512_insertf64x4((A), (B), (imm)), \
- (__v8df)(__m512d)(W))
+ ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ (__v8df)_mm512_insertf64x4((A), (B), (imm)), \
+ (__v8df)(__m512d)(W)))
#define _mm512_maskz_insertf64x4(U, A, B, imm) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
- (__v8df)_mm512_insertf64x4((A), (B), (imm)), \
- (__v8df)_mm512_setzero_pd())
+ ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ (__v8df)_mm512_insertf64x4((A), (B), (imm)), \
+ (__v8df)_mm512_setzero_pd()))
#define _mm512_inserti64x4(A, B, imm) \
- (__m512i)__builtin_ia32_inserti64x4((__v8di)(__m512i)(A), \
- (__v4di)(__m256i)(B), (int)(imm))
+ ((__m512i)__builtin_ia32_inserti64x4((__v8di)(__m512i)(A), \
+ (__v4di)(__m256i)(B), (int)(imm)))
#define _mm512_mask_inserti64x4(W, U, A, B, imm) \
- (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
- (__v8di)_mm512_inserti64x4((A), (B), (imm)), \
- (__v8di)(__m512i)(W))
+ ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+ (__v8di)_mm512_inserti64x4((A), (B), (imm)), \
+ (__v8di)(__m512i)(W)))
#define _mm512_maskz_inserti64x4(U, A, B, imm) \
- (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
- (__v8di)_mm512_inserti64x4((A), (B), (imm)), \
- (__v8di)_mm512_setzero_si512())
+ ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+ (__v8di)_mm512_inserti64x4((A), (B), (imm)), \
+ (__v8di)_mm512_setzero_si512()))
#define _mm512_insertf32x4(A, B, imm) \
- (__m512)__builtin_ia32_insertf32x4((__v16sf)(__m512)(A), \
- (__v4sf)(__m128)(B), (int)(imm))
+ ((__m512)__builtin_ia32_insertf32x4((__v16sf)(__m512)(A), \
+ (__v4sf)(__m128)(B), (int)(imm)))
#define _mm512_mask_insertf32x4(W, U, A, B, imm) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
- (__v16sf)_mm512_insertf32x4((A), (B), (imm)), \
- (__v16sf)(__m512)(W))
+ ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ (__v16sf)_mm512_insertf32x4((A), (B), (imm)), \
+ (__v16sf)(__m512)(W)))
#define _mm512_maskz_insertf32x4(U, A, B, imm) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
- (__v16sf)_mm512_insertf32x4((A), (B), (imm)), \
- (__v16sf)_mm512_setzero_ps())
+ ((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ (__v16sf)_mm512_insertf32x4((A), (B), (imm)), \
+ (__v16sf)_mm512_setzero_ps()))
#define _mm512_inserti32x4(A, B, imm) \
- (__m512i)__builtin_ia32_inserti32x4((__v16si)(__m512i)(A), \
- (__v4si)(__m128i)(B), (int)(imm))
+ ((__m512i)__builtin_ia32_inserti32x4((__v16si)(__m512i)(A), \
+ (__v4si)(__m128i)(B), (int)(imm)))
#define _mm512_mask_inserti32x4(W, U, A, B, imm) \
- (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
- (__v16si)_mm512_inserti32x4((A), (B), (imm)), \
- (__v16si)(__m512i)(W))
+ ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+ (__v16si)_mm512_inserti32x4((A), (B), (imm)), \
+ (__v16si)(__m512i)(W)))
#define _mm512_maskz_inserti32x4(U, A, B, imm) \
- (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
- (__v16si)_mm512_inserti32x4((A), (B), (imm)), \
- (__v16si)_mm512_setzero_si512())
+ ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+ (__v16si)_mm512_inserti32x4((A), (B), (imm)), \
+ (__v16si)_mm512_setzero_si512()))
#define _mm512_getmant_round_pd(A, B, C, R) \
- (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
- (int)(((C)<<2) | (B)), \
- (__v8df)_mm512_undefined_pd(), \
- (__mmask8)-1, (int)(R))
+ ((__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
+ (int)(((C)<<2) | (B)), \
+ (__v8df)_mm512_undefined_pd(), \
+ (__mmask8)-1, (int)(R)))
#define _mm512_mask_getmant_round_pd(W, U, A, B, C, R) \
- (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
- (int)(((C)<<2) | (B)), \
- (__v8df)(__m512d)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
+ (int)(((C)<<2) | (B)), \
+ (__v8df)(__m512d)(W), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_maskz_getmant_round_pd(U, A, B, C, R) \
- (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
- (int)(((C)<<2) | (B)), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
+ (int)(((C)<<2) | (B)), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_getmant_pd(A, B, C) \
- (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
- (int)(((C)<<2) | (B)), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)-1, \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
+ (int)(((C)<<2) | (B)), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)-1, \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm512_mask_getmant_pd(W, U, A, B, C) \
- (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
- (int)(((C)<<2) | (B)), \
- (__v8df)(__m512d)(W), \
- (__mmask8)(U), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
+ (int)(((C)<<2) | (B)), \
+ (__v8df)(__m512d)(W), \
+ (__mmask8)(U), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm512_maskz_getmant_pd(U, A, B, C) \
- (__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
- (int)(((C)<<2) | (B)), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)(U), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
+ (int)(((C)<<2) | (B)), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(U), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm512_getmant_round_ps(A, B, C, R) \
- (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
- (int)(((C)<<2) | (B)), \
- (__v16sf)_mm512_undefined_ps(), \
- (__mmask16)-1, (int)(R))
+ ((__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
+ (int)(((C)<<2) | (B)), \
+ (__v16sf)_mm512_undefined_ps(), \
+ (__mmask16)-1, (int)(R)))
#define _mm512_mask_getmant_round_ps(W, U, A, B, C, R) \
- (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
- (int)(((C)<<2) | (B)), \
- (__v16sf)(__m512)(W), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
+ (int)(((C)<<2) | (B)), \
+ (__v16sf)(__m512)(W), \
+ (__mmask16)(U), (int)(R)))
#define _mm512_maskz_getmant_round_ps(U, A, B, C, R) \
- (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
- (int)(((C)<<2) | (B)), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
+ (int)(((C)<<2) | (B)), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(U), (int)(R)))
#define _mm512_getmant_ps(A, B, C) \
- (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
- (int)(((C)<<2)|(B)), \
- (__v16sf)_mm512_undefined_ps(), \
- (__mmask16)-1, \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
+ (int)(((C)<<2)|(B)), \
+ (__v16sf)_mm512_undefined_ps(), \
+ (__mmask16)-1, \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm512_mask_getmant_ps(W, U, A, B, C) \
- (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
- (int)(((C)<<2)|(B)), \
- (__v16sf)(__m512)(W), \
- (__mmask16)(U), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
+ (int)(((C)<<2)|(B)), \
+ (__v16sf)(__m512)(W), \
+ (__mmask16)(U), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm512_maskz_getmant_ps(U, A, B, C) \
- (__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
- (int)(((C)<<2)|(B)), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(U), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__m512)__builtin_ia32_getmantps512_mask((__v16sf)(__m512)(A), \
+ (int)(((C)<<2)|(B)), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(U), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm512_getexp_round_pd(A, R) \
- (__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \
- (__v8df)_mm512_undefined_pd(), \
- (__mmask8)-1, (int)(R))
+ ((__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)_mm512_undefined_pd(), \
+ (__mmask8)-1, (int)(R)))
#define _mm512_mask_getexp_round_pd(W, U, A, R) \
- (__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(W), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_maskz_getexp_round_pd(U, A, R) \
- (__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_getexppd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_getexp_pd (__m512d __A)
@@ -7572,19 +7593,19 @@ _mm512_maskz_getexp_pd (__mmask8 __U, __m512d __A)
}
#define _mm512_getexp_round_ps(A, R) \
- (__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)_mm512_undefined_ps(), \
- (__mmask16)-1, (int)(R))
+ ((__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)_mm512_undefined_ps(), \
+ (__mmask16)-1, (int)(R)))
#define _mm512_mask_getexp_round_ps(W, U, A, R) \
- (__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(W), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(W), \
+ (__mmask16)(U), (int)(R)))
#define _mm512_maskz_getexp_round_ps(U, A, R) \
- (__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(U), (int)(R))
+ ((__m512)__builtin_ia32_getexpps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)_mm512_setzero_ps(), \
+ (__mmask16)(U), (int)(R)))
static __inline__ __m512 __DEFAULT_FN_ATTRS512
_mm512_getexp_ps (__m512 __A)
@@ -7614,100 +7635,100 @@ _mm512_maskz_getexp_ps (__mmask16 __U, __m512 __A)
}
#define _mm512_i64gather_ps(index, addr, scale) \
- (__m256)__builtin_ia32_gatherdiv16sf((__v8sf)_mm256_undefined_ps(), \
- (void const *)(addr), \
- (__v8di)(__m512i)(index), (__mmask8)-1, \
- (int)(scale))
+ ((__m256)__builtin_ia32_gatherdiv16sf((__v8sf)_mm256_undefined_ps(), \
+ (void const *)(addr), \
+ (__v8di)(__m512i)(index), (__mmask8)-1, \
+ (int)(scale)))
#define _mm512_mask_i64gather_ps(v1_old, mask, index, addr, scale) \
- (__m256)__builtin_ia32_gatherdiv16sf((__v8sf)(__m256)(v1_old),\
- (void const *)(addr), \
- (__v8di)(__m512i)(index), \
- (__mmask8)(mask), (int)(scale))
-
-#define _mm512_i64gather_epi32(index, addr, scale) \
- (__m256i)__builtin_ia32_gatherdiv16si((__v8si)_mm256_undefined_si256(), \
+ ((__m256)__builtin_ia32_gatherdiv16sf((__v8sf)(__m256)(v1_old),\
(void const *)(addr), \
(__v8di)(__m512i)(index), \
- (__mmask8)-1, (int)(scale))
+ (__mmask8)(mask), (int)(scale)))
+
+#define _mm512_i64gather_epi32(index, addr, scale) \
+ ((__m256i)__builtin_ia32_gatherdiv16si((__v8si)_mm256_undefined_si256(), \
+ (void const *)(addr), \
+ (__v8di)(__m512i)(index), \
+ (__mmask8)-1, (int)(scale)))
#define _mm512_mask_i64gather_epi32(v1_old, mask, index, addr, scale) \
- (__m256i)__builtin_ia32_gatherdiv16si((__v8si)(__m256i)(v1_old), \
- (void const *)(addr), \
- (__v8di)(__m512i)(index), \
- (__mmask8)(mask), (int)(scale))
+ ((__m256i)__builtin_ia32_gatherdiv16si((__v8si)(__m256i)(v1_old), \
+ (void const *)(addr), \
+ (__v8di)(__m512i)(index), \
+ (__mmask8)(mask), (int)(scale)))
#define _mm512_i64gather_pd(index, addr, scale) \
- (__m512d)__builtin_ia32_gatherdiv8df((__v8df)_mm512_undefined_pd(), \
- (void const *)(addr), \
- (__v8di)(__m512i)(index), (__mmask8)-1, \
- (int)(scale))
+ ((__m512d)__builtin_ia32_gatherdiv8df((__v8df)_mm512_undefined_pd(), \
+ (void const *)(addr), \
+ (__v8di)(__m512i)(index), (__mmask8)-1, \
+ (int)(scale)))
#define _mm512_mask_i64gather_pd(v1_old, mask, index, addr, scale) \
- (__m512d)__builtin_ia32_gatherdiv8df((__v8df)(__m512d)(v1_old), \
- (void const *)(addr), \
- (__v8di)(__m512i)(index), \
- (__mmask8)(mask), (int)(scale))
+ ((__m512d)__builtin_ia32_gatherdiv8df((__v8df)(__m512d)(v1_old), \
+ (void const *)(addr), \
+ (__v8di)(__m512i)(index), \
+ (__mmask8)(mask), (int)(scale)))
#define _mm512_i64gather_epi64(index, addr, scale) \
- (__m512i)__builtin_ia32_gatherdiv8di((__v8di)_mm512_undefined_epi32(), \
- (void const *)(addr), \
- (__v8di)(__m512i)(index), (__mmask8)-1, \
- (int)(scale))
+ ((__m512i)__builtin_ia32_gatherdiv8di((__v8di)_mm512_undefined_epi32(), \
+ (void const *)(addr), \
+ (__v8di)(__m512i)(index), (__mmask8)-1, \
+ (int)(scale)))
#define _mm512_mask_i64gather_epi64(v1_old, mask, index, addr, scale) \
- (__m512i)__builtin_ia32_gatherdiv8di((__v8di)(__m512i)(v1_old), \
- (void const *)(addr), \
- (__v8di)(__m512i)(index), \
- (__mmask8)(mask), (int)(scale))
+ ((__m512i)__builtin_ia32_gatherdiv8di((__v8di)(__m512i)(v1_old), \
+ (void const *)(addr), \
+ (__v8di)(__m512i)(index), \
+ (__mmask8)(mask), (int)(scale)))
#define _mm512_i32gather_ps(index, addr, scale) \
- (__m512)__builtin_ia32_gathersiv16sf((__v16sf)_mm512_undefined_ps(), \
- (void const *)(addr), \
- (__v16si)(__m512)(index), \
- (__mmask16)-1, (int)(scale))
+ ((__m512)__builtin_ia32_gathersiv16sf((__v16sf)_mm512_undefined_ps(), \
+ (void const *)(addr), \
+ (__v16si)(__m512)(index), \
+ (__mmask16)-1, (int)(scale)))
#define _mm512_mask_i32gather_ps(v1_old, mask, index, addr, scale) \
- (__m512)__builtin_ia32_gathersiv16sf((__v16sf)(__m512)(v1_old), \
- (void const *)(addr), \
- (__v16si)(__m512)(index), \
- (__mmask16)(mask), (int)(scale))
+ ((__m512)__builtin_ia32_gathersiv16sf((__v16sf)(__m512)(v1_old), \
+ (void const *)(addr), \
+ (__v16si)(__m512)(index), \
+ (__mmask16)(mask), (int)(scale)))
#define _mm512_i32gather_epi32(index, addr, scale) \
- (__m512i)__builtin_ia32_gathersiv16si((__v16si)_mm512_undefined_epi32(), \
- (void const *)(addr), \
- (__v16si)(__m512i)(index), \
- (__mmask16)-1, (int)(scale))
+ ((__m512i)__builtin_ia32_gathersiv16si((__v16si)_mm512_undefined_epi32(), \
+ (void const *)(addr), \
+ (__v16si)(__m512i)(index), \
+ (__mmask16)-1, (int)(scale)))
#define _mm512_mask_i32gather_epi32(v1_old, mask, index, addr, scale) \
- (__m512i)__builtin_ia32_gathersiv16si((__v16si)(__m512i)(v1_old), \
- (void const *)(addr), \
- (__v16si)(__m512i)(index), \
- (__mmask16)(mask), (int)(scale))
+ ((__m512i)__builtin_ia32_gathersiv16si((__v16si)(__m512i)(v1_old), \
+ (void const *)(addr), \
+ (__v16si)(__m512i)(index), \
+ (__mmask16)(mask), (int)(scale)))
#define _mm512_i32gather_pd(index, addr, scale) \
- (__m512d)__builtin_ia32_gathersiv8df((__v8df)_mm512_undefined_pd(), \
- (void const *)(addr), \
- (__v8si)(__m256i)(index), (__mmask8)-1, \
- (int)(scale))
+ ((__m512d)__builtin_ia32_gathersiv8df((__v8df)_mm512_undefined_pd(), \
+ (void const *)(addr), \
+ (__v8si)(__m256i)(index), (__mmask8)-1, \
+ (int)(scale)))
#define _mm512_mask_i32gather_pd(v1_old, mask, index, addr, scale) \
- (__m512d)__builtin_ia32_gathersiv8df((__v8df)(__m512d)(v1_old), \
- (void const *)(addr), \
- (__v8si)(__m256i)(index), \
- (__mmask8)(mask), (int)(scale))
+ ((__m512d)__builtin_ia32_gathersiv8df((__v8df)(__m512d)(v1_old), \
+ (void const *)(addr), \
+ (__v8si)(__m256i)(index), \
+ (__mmask8)(mask), (int)(scale)))
#define _mm512_i32gather_epi64(index, addr, scale) \
- (__m512i)__builtin_ia32_gathersiv8di((__v8di)_mm512_undefined_epi32(), \
- (void const *)(addr), \
- (__v8si)(__m256i)(index), (__mmask8)-1, \
- (int)(scale))
+ ((__m512i)__builtin_ia32_gathersiv8di((__v8di)_mm512_undefined_epi32(), \
+ (void const *)(addr), \
+ (__v8si)(__m256i)(index), (__mmask8)-1, \
+ (int)(scale)))
#define _mm512_mask_i32gather_epi64(v1_old, mask, index, addr, scale) \
- (__m512i)__builtin_ia32_gathersiv8di((__v8di)(__m512i)(v1_old), \
- (void const *)(addr), \
- (__v8si)(__m256i)(index), \
- (__mmask8)(mask), (int)(scale))
+ ((__m512i)__builtin_ia32_gathersiv8di((__v8di)(__m512i)(v1_old), \
+ (void const *)(addr), \
+ (__v8si)(__m256i)(index), \
+ (__mmask8)(mask), (int)(scale)))
#define _mm512_i64scatter_ps(addr, index, v1, scale) \
__builtin_ia32_scatterdiv16sf((void *)(addr), (__mmask8)-1, \
@@ -7800,16 +7821,16 @@ _mm_mask_fmadd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
}
#define _mm_fmadd_round_ss(A, B, C, R) \
- (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)(__m128)(C), (__mmask8)-1, \
- (int)(R))
+ ((__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(C), (__mmask8)-1, \
+ (int)(R)))
#define _mm_mask_fmadd_round_ss(W, U, A, B, R) \
- (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \
- (__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), (__mmask8)(U), \
- (int)(R))
+ ((__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \
+ (__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), (__mmask8)(U), \
+ (int)(R)))
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_fmadd_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
@@ -7822,10 +7843,10 @@ _mm_maskz_fmadd_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
}
#define _mm_maskz_fmadd_round_ss(U, A, B, C, R) \
- (__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)(__m128)(C), (__mmask8)(U), \
- (int)(R))
+ ((__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(C), (__mmask8)(U), \
+ (int)(R)))
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask3_fmadd_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
@@ -7838,10 +7859,10 @@ _mm_mask3_fmadd_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
}
#define _mm_mask3_fmadd_round_ss(W, X, Y, U, R) \
- (__m128)__builtin_ia32_vfmaddss3_mask3((__v4sf)(__m128)(W), \
- (__v4sf)(__m128)(X), \
- (__v4sf)(__m128)(Y), (__mmask8)(U), \
- (int)(R))
+ ((__m128)__builtin_ia32_vfmaddss3_mask3((__v4sf)(__m128)(W), \
+ (__v4sf)(__m128)(X), \
+ (__v4sf)(__m128)(Y), (__mmask8)(U), \
+ (int)(R)))
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_fmsub_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
@@ -7854,16 +7875,16 @@ _mm_mask_fmsub_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
}
#define _mm_fmsub_round_ss(A, B, C, R) \
- (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- -(__v4sf)(__m128)(C), (__mmask8)-1, \
- (int)(R))
+ ((__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ -(__v4sf)(__m128)(C), (__mmask8)-1, \
+ (int)(R)))
#define _mm_mask_fmsub_round_ss(W, U, A, B, R) \
- (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \
- (__v4sf)(__m128)(A), \
- -(__v4sf)(__m128)(B), (__mmask8)(U), \
- (int)(R))
+ ((__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \
+ (__v4sf)(__m128)(A), \
+ -(__v4sf)(__m128)(B), (__mmask8)(U), \
+ (int)(R)))
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_fmsub_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
@@ -7876,10 +7897,10 @@ _mm_maskz_fmsub_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
}
#define _mm_maskz_fmsub_round_ss(U, A, B, C, R) \
- (__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- -(__v4sf)(__m128)(C), (__mmask8)(U), \
- (int)(R))
+ ((__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ -(__v4sf)(__m128)(C), (__mmask8)(U), \
+ (int)(R)))
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask3_fmsub_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
@@ -7892,10 +7913,10 @@ _mm_mask3_fmsub_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
}
#define _mm_mask3_fmsub_round_ss(W, X, Y, U, R) \
- (__m128)__builtin_ia32_vfmsubss3_mask3((__v4sf)(__m128)(W), \
- (__v4sf)(__m128)(X), \
- (__v4sf)(__m128)(Y), (__mmask8)(U), \
- (int)(R))
+ ((__m128)__builtin_ia32_vfmsubss3_mask3((__v4sf)(__m128)(W), \
+ (__v4sf)(__m128)(X), \
+ (__v4sf)(__m128)(Y), (__mmask8)(U), \
+ (int)(R)))
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_fnmadd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
@@ -7908,16 +7929,16 @@ _mm_mask_fnmadd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
}
#define _mm_fnmadd_round_ss(A, B, C, R) \
- (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
- -(__v4sf)(__m128)(B), \
- (__v4sf)(__m128)(C), (__mmask8)-1, \
- (int)(R))
+ ((__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
+ -(__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(C), (__mmask8)-1, \
+ (int)(R)))
#define _mm_mask_fnmadd_round_ss(W, U, A, B, R) \
- (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \
- -(__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), (__mmask8)(U), \
- (int)(R))
+ ((__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \
+ -(__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), (__mmask8)(U), \
+ (int)(R)))
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_fnmadd_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
@@ -7930,10 +7951,10 @@ _mm_maskz_fnmadd_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
}
#define _mm_maskz_fnmadd_round_ss(U, A, B, C, R) \
- (__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \
- -(__v4sf)(__m128)(B), \
- (__v4sf)(__m128)(C), (__mmask8)(U), \
- (int)(R))
+ ((__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \
+ -(__v4sf)(__m128)(B), \
+ (__v4sf)(__m128)(C), (__mmask8)(U), \
+ (int)(R)))
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask3_fnmadd_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
@@ -7946,10 +7967,10 @@ _mm_mask3_fnmadd_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
}
#define _mm_mask3_fnmadd_round_ss(W, X, Y, U, R) \
- (__m128)__builtin_ia32_vfmaddss3_mask3((__v4sf)(__m128)(W), \
- -(__v4sf)(__m128)(X), \
- (__v4sf)(__m128)(Y), (__mmask8)(U), \
- (int)(R))
+ ((__m128)__builtin_ia32_vfmaddss3_mask3((__v4sf)(__m128)(W), \
+ -(__v4sf)(__m128)(X), \
+ (__v4sf)(__m128)(Y), (__mmask8)(U), \
+ (int)(R)))
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_fnmsub_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
@@ -7962,16 +7983,16 @@ _mm_mask_fnmsub_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
}
#define _mm_fnmsub_round_ss(A, B, C, R) \
- (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
- -(__v4sf)(__m128)(B), \
- -(__v4sf)(__m128)(C), (__mmask8)-1, \
- (int)(R))
+ ((__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
+ -(__v4sf)(__m128)(B), \
+ -(__v4sf)(__m128)(C), (__mmask8)-1, \
+ (int)(R)))
#define _mm_mask_fnmsub_round_ss(W, U, A, B, R) \
- (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \
- -(__v4sf)(__m128)(A), \
- -(__v4sf)(__m128)(B), (__mmask8)(U), \
- (int)(R))
+ ((__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \
+ -(__v4sf)(__m128)(A), \
+ -(__v4sf)(__m128)(B), (__mmask8)(U), \
+ (int)(R)))
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_maskz_fnmsub_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
@@ -7984,10 +8005,10 @@ _mm_maskz_fnmsub_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
}
#define _mm_maskz_fnmsub_round_ss(U, A, B, C, R) \
- (__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \
- -(__v4sf)(__m128)(B), \
- -(__v4sf)(__m128)(C), (__mmask8)(U), \
- (int)(R))
+ ((__m128)__builtin_ia32_vfmaddss3_maskz((__v4sf)(__m128)(A), \
+ -(__v4sf)(__m128)(B), \
+ -(__v4sf)(__m128)(C), (__mmask8)(U), \
+ (int)(R)))
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask3_fnmsub_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
@@ -8000,10 +8021,10 @@ _mm_mask3_fnmsub_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
}
#define _mm_mask3_fnmsub_round_ss(W, X, Y, U, R) \
- (__m128)__builtin_ia32_vfmsubss3_mask3((__v4sf)(__m128)(W), \
- -(__v4sf)(__m128)(X), \
- (__v4sf)(__m128)(Y), (__mmask8)(U), \
- (int)(R))
+ ((__m128)__builtin_ia32_vfmsubss3_mask3((__v4sf)(__m128)(W), \
+ -(__v4sf)(__m128)(X), \
+ (__v4sf)(__m128)(Y), (__mmask8)(U), \
+ (int)(R)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_fmadd_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
@@ -8016,16 +8037,16 @@ _mm_mask_fmadd_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
}
#define _mm_fmadd_round_sd(A, B, C, R) \
- (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)(__m128d)(C), (__mmask8)-1, \
- (int)(R))
+ ((__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(C), (__mmask8)-1, \
+ (int)(R)))
#define _mm_mask_fmadd_round_sd(W, U, A, B, R) \
- (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \
- (__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), (__mmask8)(U), \
- (int)(R))
+ ((__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \
+ (__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), (__mmask8)(U), \
+ (int)(R)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_fmadd_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
@@ -8038,10 +8059,10 @@ _mm_maskz_fmadd_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
}
#define _mm_maskz_fmadd_round_sd(U, A, B, C, R) \
- (__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)(__m128d)(C), (__mmask8)(U), \
- (int)(R))
+ ((__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(C), (__mmask8)(U), \
+ (int)(R)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask3_fmadd_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
@@ -8054,10 +8075,10 @@ _mm_mask3_fmadd_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
}
#define _mm_mask3_fmadd_round_sd(W, X, Y, U, R) \
- (__m128d)__builtin_ia32_vfmaddsd3_mask3((__v2df)(__m128d)(W), \
- (__v2df)(__m128d)(X), \
- (__v2df)(__m128d)(Y), (__mmask8)(U), \
- (int)(R))
+ ((__m128d)__builtin_ia32_vfmaddsd3_mask3((__v2df)(__m128d)(W), \
+ (__v2df)(__m128d)(X), \
+ (__v2df)(__m128d)(Y), (__mmask8)(U), \
+ (int)(R)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_fmsub_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
@@ -8070,16 +8091,16 @@ _mm_mask_fmsub_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
}
#define _mm_fmsub_round_sd(A, B, C, R) \
- (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- -(__v2df)(__m128d)(C), (__mmask8)-1, \
- (int)(R))
+ ((__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ -(__v2df)(__m128d)(C), (__mmask8)-1, \
+ (int)(R)))
#define _mm_mask_fmsub_round_sd(W, U, A, B, R) \
- (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \
- (__v2df)(__m128d)(A), \
- -(__v2df)(__m128d)(B), (__mmask8)(U), \
- (int)(R))
+ ((__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \
+ (__v2df)(__m128d)(A), \
+ -(__v2df)(__m128d)(B), (__mmask8)(U), \
+ (int)(R)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_fmsub_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
@@ -8092,10 +8113,10 @@ _mm_maskz_fmsub_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
}
#define _mm_maskz_fmsub_round_sd(U, A, B, C, R) \
- (__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- -(__v2df)(__m128d)(C), \
- (__mmask8)(U), (int)(R))
+ ((__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ -(__v2df)(__m128d)(C), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask3_fmsub_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
@@ -8108,10 +8129,10 @@ _mm_mask3_fmsub_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
}
#define _mm_mask3_fmsub_round_sd(W, X, Y, U, R) \
- (__m128d)__builtin_ia32_vfmsubsd3_mask3((__v2df)(__m128d)(W), \
- (__v2df)(__m128d)(X), \
- (__v2df)(__m128d)(Y), \
- (__mmask8)(U), (int)(R))
+ ((__m128d)__builtin_ia32_vfmsubsd3_mask3((__v2df)(__m128d)(W), \
+ (__v2df)(__m128d)(X), \
+ (__v2df)(__m128d)(Y), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_fnmadd_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
@@ -8124,16 +8145,16 @@ _mm_mask_fnmadd_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
}
#define _mm_fnmadd_round_sd(A, B, C, R) \
- (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \
- -(__v2df)(__m128d)(B), \
- (__v2df)(__m128d)(C), (__mmask8)-1, \
- (int)(R))
+ ((__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \
+ -(__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(C), (__mmask8)-1, \
+ (int)(R)))
#define _mm_mask_fnmadd_round_sd(W, U, A, B, R) \
- (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \
- -(__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), (__mmask8)(U), \
- (int)(R))
+ ((__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \
+ -(__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), (__mmask8)(U), \
+ (int)(R)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_fnmadd_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
@@ -8146,10 +8167,10 @@ _mm_maskz_fnmadd_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
}
#define _mm_maskz_fnmadd_round_sd(U, A, B, C, R) \
- (__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \
- -(__v2df)(__m128d)(B), \
- (__v2df)(__m128d)(C), (__mmask8)(U), \
- (int)(R))
+ ((__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \
+ -(__v2df)(__m128d)(B), \
+ (__v2df)(__m128d)(C), (__mmask8)(U), \
+ (int)(R)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask3_fnmadd_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
@@ -8162,10 +8183,10 @@ _mm_mask3_fnmadd_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
}
#define _mm_mask3_fnmadd_round_sd(W, X, Y, U, R) \
- (__m128d)__builtin_ia32_vfmaddsd3_mask3((__v2df)(__m128d)(W), \
- -(__v2df)(__m128d)(X), \
- (__v2df)(__m128d)(Y), (__mmask8)(U), \
- (int)(R))
+ ((__m128d)__builtin_ia32_vfmaddsd3_mask3((__v2df)(__m128d)(W), \
+ -(__v2df)(__m128d)(X), \
+ (__v2df)(__m128d)(Y), (__mmask8)(U), \
+ (int)(R)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_fnmsub_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
@@ -8178,16 +8199,16 @@ _mm_mask_fnmsub_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
}
#define _mm_fnmsub_round_sd(A, B, C, R) \
- (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \
- -(__v2df)(__m128d)(B), \
- -(__v2df)(__m128d)(C), (__mmask8)-1, \
- (int)(R))
+ ((__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \
+ -(__v2df)(__m128d)(B), \
+ -(__v2df)(__m128d)(C), (__mmask8)-1, \
+ (int)(R)))
#define _mm_mask_fnmsub_round_sd(W, U, A, B, R) \
- (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \
- -(__v2df)(__m128d)(A), \
- -(__v2df)(__m128d)(B), (__mmask8)(U), \
- (int)(R))
+ ((__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \
+ -(__v2df)(__m128d)(A), \
+ -(__v2df)(__m128d)(B), (__mmask8)(U), \
+ (int)(R)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_maskz_fnmsub_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
@@ -8200,11 +8221,11 @@ _mm_maskz_fnmsub_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
}
#define _mm_maskz_fnmsub_round_sd(U, A, B, C, R) \
- (__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \
- -(__v2df)(__m128d)(B), \
- -(__v2df)(__m128d)(C), \
- (__mmask8)(U), \
- (int)(R))
+ ((__m128d)__builtin_ia32_vfmaddsd3_maskz((__v2df)(__m128d)(A), \
+ -(__v2df)(__m128d)(B), \
+ -(__v2df)(__m128d)(C), \
+ (__mmask8)(U), \
+ (int)(R)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask3_fnmsub_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
@@ -8217,36 +8238,36 @@ _mm_mask3_fnmsub_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
}
#define _mm_mask3_fnmsub_round_sd(W, X, Y, U, R) \
- (__m128d)__builtin_ia32_vfmsubsd3_mask3((__v2df)(__m128d)(W), \
- -(__v2df)(__m128d)(X), \
- (__v2df)(__m128d)(Y), \
- (__mmask8)(U), (int)(R))
+ ((__m128d)__builtin_ia32_vfmsubsd3_mask3((__v2df)(__m128d)(W), \
+ -(__v2df)(__m128d)(X), \
+ (__v2df)(__m128d)(Y), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_permutex_pd(X, C) \
- (__m512d)__builtin_ia32_permdf512((__v8df)(__m512d)(X), (int)(C))
+ ((__m512d)__builtin_ia32_permdf512((__v8df)(__m512d)(X), (int)(C)))
#define _mm512_mask_permutex_pd(W, U, X, C) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
- (__v8df)_mm512_permutex_pd((X), (C)), \
- (__v8df)(__m512d)(W))
+ ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ (__v8df)_mm512_permutex_pd((X), (C)), \
+ (__v8df)(__m512d)(W)))
#define _mm512_maskz_permutex_pd(U, X, C) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
- (__v8df)_mm512_permutex_pd((X), (C)), \
- (__v8df)_mm512_setzero_pd())
+ ((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ (__v8df)_mm512_permutex_pd((X), (C)), \
+ (__v8df)_mm512_setzero_pd()))
#define _mm512_permutex_epi64(X, C) \
- (__m512i)__builtin_ia32_permdi512((__v8di)(__m512i)(X), (int)(C))
+ ((__m512i)__builtin_ia32_permdi512((__v8di)(__m512i)(X), (int)(C)))
#define _mm512_mask_permutex_epi64(W, U, X, C) \
- (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
- (__v8di)_mm512_permutex_epi64((X), (C)), \
- (__v8di)(__m512i)(W))
+ ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+ (__v8di)_mm512_permutex_epi64((X), (C)), \
+ (__v8di)(__m512i)(W)))
#define _mm512_maskz_permutex_epi64(U, X, C) \
- (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
- (__v8di)_mm512_permutex_epi64((X), (C)), \
- (__v8di)_mm512_setzero_si512())
+ ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+ (__v8di)_mm512_permutex_epi64((X), (C)), \
+ (__v8di)_mm512_setzero_si512()))
static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_permutexvar_pd (__m512i __X, __m512d __Y)
@@ -8416,10 +8437,10 @@ _mm512_kxor (__mmask16 __A, __mmask16 __B)
#define _kxor_mask16 _mm512_kxor
#define _kshiftli_mask16(A, I) \
- (__mmask16)__builtin_ia32_kshiftlihi((__mmask16)(A), (unsigned int)(I))
+ ((__mmask16)__builtin_ia32_kshiftlihi((__mmask16)(A), (unsigned int)(I)))
#define _kshiftri_mask16(A, I) \
- (__mmask16)__builtin_ia32_kshiftrihi((__mmask16)(A), (unsigned int)(I))
+ ((__mmask16)__builtin_ia32_kshiftrihi((__mmask16)(A), (unsigned int)(I)))
static __inline__ unsigned int __DEFAULT_FN_ATTRS
_cvtmask16_u32(__mmask16 __A) {
@@ -8538,48 +8559,48 @@ _mm512_maskz_compress_epi32 (__mmask16 __U, __m512i __A)
}
#define _mm_cmp_round_ss_mask(X, Y, P, R) \
- (__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
- (__v4sf)(__m128)(Y), (int)(P), \
- (__mmask8)-1, (int)(R))
+ ((__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
+ (__v4sf)(__m128)(Y), (int)(P), \
+ (__mmask8)-1, (int)(R)))
#define _mm_mask_cmp_round_ss_mask(M, X, Y, P, R) \
- (__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
- (__v4sf)(__m128)(Y), (int)(P), \
- (__mmask8)(M), (int)(R))
+ ((__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
+ (__v4sf)(__m128)(Y), (int)(P), \
+ (__mmask8)(M), (int)(R)))
#define _mm_cmp_ss_mask(X, Y, P) \
- (__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
- (__v4sf)(__m128)(Y), (int)(P), \
- (__mmask8)-1, \
- _MM_FROUND_CUR_DIRECTION)
+ ((__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
+ (__v4sf)(__m128)(Y), (int)(P), \
+ (__mmask8)-1, \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm_mask_cmp_ss_mask(M, X, Y, P) \
- (__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
- (__v4sf)(__m128)(Y), (int)(P), \
- (__mmask8)(M), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__mmask8)__builtin_ia32_cmpss_mask((__v4sf)(__m128)(X), \
+ (__v4sf)(__m128)(Y), (int)(P), \
+ (__mmask8)(M), \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm_cmp_round_sd_mask(X, Y, P, R) \
- (__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
- (__v2df)(__m128d)(Y), (int)(P), \
- (__mmask8)-1, (int)(R))
+ ((__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
+ (__v2df)(__m128d)(Y), (int)(P), \
+ (__mmask8)-1, (int)(R)))
#define _mm_mask_cmp_round_sd_mask(M, X, Y, P, R) \
- (__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
- (__v2df)(__m128d)(Y), (int)(P), \
- (__mmask8)(M), (int)(R))
+ ((__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
+ (__v2df)(__m128d)(Y), (int)(P), \
+ (__mmask8)(M), (int)(R)))
#define _mm_cmp_sd_mask(X, Y, P) \
- (__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
- (__v2df)(__m128d)(Y), (int)(P), \
- (__mmask8)-1, \
- _MM_FROUND_CUR_DIRECTION)
+ ((__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
+ (__v2df)(__m128d)(Y), (int)(P), \
+ (__mmask8)-1, \
+ _MM_FROUND_CUR_DIRECTION))
#define _mm_mask_cmp_sd_mask(M, X, Y, P) \
- (__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
- (__v2df)(__m128d)(Y), (int)(P), \
- (__mmask8)(M), \
- _MM_FROUND_CUR_DIRECTION)
+ ((__mmask8)__builtin_ia32_cmpsd_mask((__v2df)(__m128d)(X), \
+ (__v2df)(__m128d)(Y), (int)(P), \
+ (__mmask8)(M), \
+ _MM_FROUND_CUR_DIRECTION))
/* Bit Test */
@@ -8760,17 +8781,17 @@ _mm_maskz_load_sd (__mmask8 __U, const double* __A)
}
#define _mm512_shuffle_epi32(A, I) \
- (__m512i)__builtin_ia32_pshufd512((__v16si)(__m512i)(A), (int)(I))
+ ((__m512i)__builtin_ia32_pshufd512((__v16si)(__m512i)(A), (int)(I)))
#define _mm512_mask_shuffle_epi32(W, U, A, I) \
- (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
- (__v16si)_mm512_shuffle_epi32((A), (I)), \
- (__v16si)(__m512i)(W))
+ ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+ (__v16si)_mm512_shuffle_epi32((A), (I)), \
+ (__v16si)(__m512i)(W)))
#define _mm512_maskz_shuffle_epi32(U, A, I) \
- (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
- (__v16si)_mm512_shuffle_epi32((A), (I)), \
- (__v16si)_mm512_setzero_si512())
+ ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+ (__v16si)_mm512_shuffle_epi32((A), (I)), \
+ (__v16si)_mm512_setzero_si512()))
static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_mask_expand_pd (__m512d __W, __mmask8 __U, __m512d __A)
@@ -8901,19 +8922,19 @@ _mm512_maskz_expand_epi32 (__mmask16 __U, __m512i __A)
}
#define _mm512_cvt_roundps_pd(A, R) \
- (__m512d)__builtin_ia32_cvtps2pd512_mask((__v8sf)(__m256)(A), \
- (__v8df)_mm512_undefined_pd(), \
- (__mmask8)-1, (int)(R))
+ ((__m512d)__builtin_ia32_cvtps2pd512_mask((__v8sf)(__m256)(A), \
+ (__v8df)_mm512_undefined_pd(), \
+ (__mmask8)-1, (int)(R)))
#define _mm512_mask_cvt_roundps_pd(W, U, A, R) \
- (__m512d)__builtin_ia32_cvtps2pd512_mask((__v8sf)(__m256)(A), \
- (__v8df)(__m512d)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_cvtps2pd512_mask((__v8sf)(__m256)(A), \
+ (__v8df)(__m512d)(W), \
+ (__mmask8)(U), (int)(R)))
#define _mm512_maskz_cvt_roundps_pd(U, A, R) \
- (__m512d)__builtin_ia32_cvtps2pd512_mask((__v8sf)(__m256)(A), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)(U), (int)(R))
+ ((__m512d)__builtin_ia32_cvtps2pd512_mask((__v8sf)(__m256)(A), \
+ (__v8df)_mm512_setzero_pd(), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m512d __DEFAULT_FN_ATTRS512
_mm512_cvtps_pd (__m256 __A)
@@ -9010,22 +9031,22 @@ _mm512_mask_compressstoreu_epi32 (void *__P, __mmask16 __U, __m512i __A)
}
#define _mm_cvt_roundsd_ss(A, B, R) \
- (__m128)__builtin_ia32_cvtsd2ss_round_mask((__v4sf)(__m128)(A), \
- (__v2df)(__m128d)(B), \
- (__v4sf)_mm_undefined_ps(), \
- (__mmask8)-1, (int)(R))
+ ((__m128)__builtin_ia32_cvtsd2ss_round_mask((__v4sf)(__m128)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v4sf)_mm_undefined_ps(), \
+ (__mmask8)-1, (int)(R)))
#define _mm_mask_cvt_roundsd_ss(W, U, A, B, R) \
- (__m128)__builtin_ia32_cvtsd2ss_round_mask((__v4sf)(__m128)(A), \
- (__v2df)(__m128d)(B), \
- (__v4sf)(__m128)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m128)__builtin_ia32_cvtsd2ss_round_mask((__v4sf)(__m128)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v4sf)(__m128)(W), \
+ (__mmask8)(U), (int)(R)))
#define _mm_maskz_cvt_roundsd_ss(U, A, B, R) \
- (__m128)__builtin_ia32_cvtsd2ss_round_mask((__v4sf)(__m128)(A), \
- (__v2df)(__m128d)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U), (int)(R))
+ ((__m128)__builtin_ia32_cvtsd2ss_round_mask((__v4sf)(__m128)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_cvtsd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128d __B)
@@ -9058,47 +9079,47 @@ _mm_maskz_cvtsd_ss (__mmask8 __U, __m128 __A, __m128d __B)
#ifdef __x86_64__
#define _mm_cvt_roundi64_sd(A, B, R) \
- (__m128d)__builtin_ia32_cvtsi2sd64((__v2df)(__m128d)(A), (long long)(B), \
- (int)(R))
+ ((__m128d)__builtin_ia32_cvtsi2sd64((__v2df)(__m128d)(A), (long long)(B), \
+ (int)(R)))
#define _mm_cvt_roundsi64_sd(A, B, R) \
- (__m128d)__builtin_ia32_cvtsi2sd64((__v2df)(__m128d)(A), (long long)(B), \
- (int)(R))
+ ((__m128d)__builtin_ia32_cvtsi2sd64((__v2df)(__m128d)(A), (long long)(B), \
+ (int)(R)))
#endif
#define _mm_cvt_roundsi32_ss(A, B, R) \
- (__m128)__builtin_ia32_cvtsi2ss32((__v4sf)(__m128)(A), (int)(B), (int)(R))
+ ((__m128)__builtin_ia32_cvtsi2ss32((__v4sf)(__m128)(A), (int)(B), (int)(R)))
#define _mm_cvt_roundi32_ss(A, B, R) \
- (__m128)__builtin_ia32_cvtsi2ss32((__v4sf)(__m128)(A), (int)(B), (int)(R))
+ ((__m128)__builtin_ia32_cvtsi2ss32((__v4sf)(__m128)(A), (int)(B), (int)(R)))
#ifdef __x86_64__
#define _mm_cvt_roundsi64_ss(A, B, R) \
- (__m128)__builtin_ia32_cvtsi2ss64((__v4sf)(__m128)(A), (long long)(B), \
- (int)(R))
+ ((__m128)__builtin_ia32_cvtsi2ss64((__v4sf)(__m128)(A), (long long)(B), \
+ (int)(R)))
#define _mm_cvt_roundi64_ss(A, B, R) \
- (__m128)__builtin_ia32_cvtsi2ss64((__v4sf)(__m128)(A), (long long)(B), \
- (int)(R))
+ ((__m128)__builtin_ia32_cvtsi2ss64((__v4sf)(__m128)(A), (long long)(B), \
+ (int)(R)))
#endif
#define _mm_cvt_roundss_sd(A, B, R) \
- (__m128d)__builtin_ia32_cvtss2sd_round_mask((__v2df)(__m128d)(A), \
- (__v4sf)(__m128)(B), \
- (__v2df)_mm_undefined_pd(), \
- (__mmask8)-1, (int)(R))
+ ((__m128d)__builtin_ia32_cvtss2sd_round_mask((__v2df)(__m128d)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v2df)_mm_undefined_pd(), \
+ (__mmask8)-1, (int)(R)))
#define _mm_mask_cvt_roundss_sd(W, U, A, B, R) \
- (__m128d)__builtin_ia32_cvtss2sd_round_mask((__v2df)(__m128d)(A), \
- (__v4sf)(__m128)(B), \
- (__v2df)(__m128d)(W), \
- (__mmask8)(U), (int)(R))
+ ((__m128d)__builtin_ia32_cvtss2sd_round_mask((__v2df)(__m128d)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U), (int)(R)))
#define _mm_maskz_cvt_roundss_sd(U, A, B, R) \
- (__m128d)__builtin_ia32_cvtss2sd_round_mask((__v2df)(__m128d)(A), \
- (__v4sf)(__m128)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)(U), (int)(R))
+ ((__m128d)__builtin_ia32_cvtss2sd_round_mask((__v2df)(__m128d)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U), (int)(R)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_cvtss_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128 __B)
@@ -9127,8 +9148,8 @@ _mm_cvtu32_sd (__m128d __A, unsigned __B)
#ifdef __x86_64__
#define _mm_cvt_roundu64_sd(A, B, R) \
- (__m128d)__builtin_ia32_cvtusi2sd64((__v2df)(__m128d)(A), \
- (unsigned long long)(B), (int)(R))
+ ((__m128d)__builtin_ia32_cvtusi2sd64((__v2df)(__m128d)(A), \
+ (unsigned long long)(B), (int)(R)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_cvtu64_sd (__m128d __A, unsigned long long __B)
@@ -9139,8 +9160,8 @@ _mm_cvtu64_sd (__m128d __A, unsigned long long __B)
#endif
#define _mm_cvt_roundu32_ss(A, B, R) \
- (__m128)__builtin_ia32_cvtusi2ss32((__v4sf)(__m128)(A), (unsigned int)(B), \
- (int)(R))
+ ((__m128)__builtin_ia32_cvtusi2ss32((__v4sf)(__m128)(A), (unsigned int)(B), \
+ (int)(R)))
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_cvtu32_ss (__m128 __A, unsigned __B)
@@ -9151,8 +9172,8 @@ _mm_cvtu32_ss (__m128 __A, unsigned __B)
#ifdef __x86_64__
#define _mm_cvt_roundu64_ss(A, B, R) \
- (__m128)__builtin_ia32_cvtusi2ss64((__v4sf)(__m128)(A), \
- (unsigned long long)(B), (int)(R))
+ ((__m128)__builtin_ia32_cvtusi2ss64((__v4sf)(__m128)(A), \
+ (unsigned long long)(B), (int)(R)))
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_cvtu64_ss (__m128 __A, unsigned long long __B)
@@ -9312,43 +9333,43 @@ _mm512_mask_abs_pd(__m512d __W, __mmask8 __K, __m512d __A)
*/
static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_add_epi64(__m512i __W) {
- return __builtin_ia32_reduce_add_q512(__W);
+ return __builtin_reduce_add((__v8di)__W);
}
static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_mul_epi64(__m512i __W) {
- return __builtin_ia32_reduce_mul_q512(__W);
+ return __builtin_reduce_mul((__v8di)__W);
}
static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_and_epi64(__m512i __W) {
- return __builtin_ia32_reduce_and_q512(__W);
+ return __builtin_reduce_and((__v8di)__W);
}
static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_or_epi64(__m512i __W) {
- return __builtin_ia32_reduce_or_q512(__W);
+ return __builtin_reduce_or((__v8di)__W);
}
static __inline__ long long __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_add_epi64(__mmask8 __M, __m512i __W) {
__W = _mm512_maskz_mov_epi64(__M, __W);
- return __builtin_ia32_reduce_add_q512(__W);
+ return __builtin_reduce_add((__v8di)__W);
}
static __inline__ long long __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_mul_epi64(__mmask8 __M, __m512i __W) {
__W = _mm512_mask_mov_epi64(_mm512_set1_epi64(1), __M, __W);
- return __builtin_ia32_reduce_mul_q512(__W);
+ return __builtin_reduce_mul((__v8di)__W);
}
static __inline__ long long __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_and_epi64(__mmask8 __M, __m512i __W) {
- __W = _mm512_mask_mov_epi64(_mm512_set1_epi64(~0ULL), __M, __W);
- return __builtin_ia32_reduce_and_q512(__W);
+ __W = _mm512_mask_mov_epi64(_mm512_set1_epi64(-1LL), __M, __W);
+ return __builtin_reduce_and((__v8di)__W);
}
static __inline__ long long __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_or_epi64(__mmask8 __M, __m512i __W) {
__W = _mm512_maskz_mov_epi64(__M, __W);
- return __builtin_ia32_reduce_or_q512(__W);
+ return __builtin_reduce_or((__v8di)__W);
}
// -0.0 is used to ignore the start value since it is the neutral value of
@@ -9376,46 +9397,46 @@ _mm512_mask_reduce_mul_pd(__mmask8 __M, __m512d __W) {
static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_reduce_add_epi32(__m512i __W) {
- return __builtin_ia32_reduce_add_d512((__v16si)__W);
+ return __builtin_reduce_add((__v16si)__W);
}
static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_reduce_mul_epi32(__m512i __W) {
- return __builtin_ia32_reduce_mul_d512((__v16si)__W);
+ return __builtin_reduce_mul((__v16si)__W);
}
static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_reduce_and_epi32(__m512i __W) {
- return __builtin_ia32_reduce_and_d512((__v16si)__W);
+ return __builtin_reduce_and((__v16si)__W);
}
static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_reduce_or_epi32(__m512i __W) {
- return __builtin_ia32_reduce_or_d512((__v16si)__W);
+ return __builtin_reduce_or((__v16si)__W);
}
static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_add_epi32( __mmask16 __M, __m512i __W) {
__W = _mm512_maskz_mov_epi32(__M, __W);
- return __builtin_ia32_reduce_add_d512((__v16si)__W);
+ return __builtin_reduce_add((__v16si)__W);
}
static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_mul_epi32( __mmask16 __M, __m512i __W) {
__W = _mm512_mask_mov_epi32(_mm512_set1_epi32(1), __M, __W);
- return __builtin_ia32_reduce_mul_d512((__v16si)__W);
+ return __builtin_reduce_mul((__v16si)__W);
}
static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_and_epi32( __mmask16 __M, __m512i __W) {
- __W = _mm512_mask_mov_epi32(_mm512_set1_epi32(~0U), __M, __W);
- return __builtin_ia32_reduce_and_d512((__v16si)__W);
+ __W = _mm512_mask_mov_epi32(_mm512_set1_epi32(-1), __M, __W);
+ return __builtin_reduce_and((__v16si)__W);
}
static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_or_epi32(__mmask16 __M, __m512i __W) {
__W = _mm512_maskz_mov_epi32(__M, __W);
- return __builtin_ia32_reduce_or_d512((__v16si)__W);
+ return __builtin_reduce_or((__v16si)__W);
}
static __inline__ float __DEFAULT_FN_ATTRS512
@@ -9442,89 +9463,89 @@ _mm512_mask_reduce_mul_ps(__mmask16 __M, __m512 __W) {
static __inline__ long long __DEFAULT_FN_ATTRS512
_mm512_reduce_max_epi64(__m512i __V) {
- return __builtin_ia32_reduce_smax_q512(__V);
+ return __builtin_reduce_max((__v8di)__V);
}
static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
_mm512_reduce_max_epu64(__m512i __V) {
- return __builtin_ia32_reduce_umax_q512(__V);
+ return __builtin_reduce_max((__v8du)__V);
}
static __inline__ long long __DEFAULT_FN_ATTRS512
_mm512_reduce_min_epi64(__m512i __V) {
- return __builtin_ia32_reduce_smin_q512(__V);
+ return __builtin_reduce_min((__v8di)__V);
}
static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
_mm512_reduce_min_epu64(__m512i __V) {
- return __builtin_ia32_reduce_umin_q512(__V);
+ return __builtin_reduce_min((__v8du)__V);
}
static __inline__ long long __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_max_epi64(__mmask8 __M, __m512i __V) {
__V = _mm512_mask_mov_epi64(_mm512_set1_epi64(-__LONG_LONG_MAX__ - 1LL), __M, __V);
- return __builtin_ia32_reduce_smax_q512(__V);
+ return __builtin_reduce_max((__v8di)__V);
}
static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_max_epu64(__mmask8 __M, __m512i __V) {
__V = _mm512_maskz_mov_epi64(__M, __V);
- return __builtin_ia32_reduce_umax_q512(__V);
+ return __builtin_reduce_max((__v8du)__V);
}
static __inline__ long long __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_min_epi64(__mmask8 __M, __m512i __V) {
__V = _mm512_mask_mov_epi64(_mm512_set1_epi64(__LONG_LONG_MAX__), __M, __V);
- return __builtin_ia32_reduce_smin_q512(__V);
+ return __builtin_reduce_min((__v8di)__V);
}
static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_min_epu64(__mmask8 __M, __m512i __V) {
- __V = _mm512_mask_mov_epi64(_mm512_set1_epi64(~0ULL), __M, __V);
- return __builtin_ia32_reduce_umin_q512(__V);
+ __V = _mm512_mask_mov_epi64(_mm512_set1_epi64(-1LL), __M, __V);
+ return __builtin_reduce_min((__v8du)__V);
}
static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_reduce_max_epi32(__m512i __V) {
- return __builtin_ia32_reduce_smax_d512((__v16si)__V);
+ return __builtin_reduce_max((__v16si)__V);
}
static __inline__ unsigned int __DEFAULT_FN_ATTRS512
_mm512_reduce_max_epu32(__m512i __V) {
- return __builtin_ia32_reduce_umax_d512((__v16si)__V);
+ return __builtin_reduce_max((__v16su)__V);
}
static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_reduce_min_epi32(__m512i __V) {
- return __builtin_ia32_reduce_smin_d512((__v16si)__V);
+ return __builtin_reduce_min((__v16si)__V);
}
static __inline__ unsigned int __DEFAULT_FN_ATTRS512
_mm512_reduce_min_epu32(__m512i __V) {
- return __builtin_ia32_reduce_umin_d512((__v16si)__V);
+ return __builtin_reduce_min((__v16su)__V);
}
static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_max_epi32(__mmask16 __M, __m512i __V) {
__V = _mm512_mask_mov_epi32(_mm512_set1_epi32(-__INT_MAX__ - 1), __M, __V);
- return __builtin_ia32_reduce_smax_d512((__v16si)__V);
+ return __builtin_reduce_max((__v16si)__V);
}
static __inline__ unsigned int __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_max_epu32(__mmask16 __M, __m512i __V) {
__V = _mm512_maskz_mov_epi32(__M, __V);
- return __builtin_ia32_reduce_umax_d512((__v16si)__V);
+ return __builtin_reduce_max((__v16su)__V);
}
static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_min_epi32(__mmask16 __M, __m512i __V) {
__V = _mm512_mask_mov_epi32(_mm512_set1_epi32(__INT_MAX__), __M, __V);
- return __builtin_ia32_reduce_smin_d512((__v16si)__V);
+ return __builtin_reduce_min((__v16si)__V);
}
static __inline__ unsigned int __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_min_epu32(__mmask16 __M, __m512i __V) {
- __V = _mm512_mask_mov_epi32(_mm512_set1_epi32(~0U), __M, __V);
- return __builtin_ia32_reduce_umin_d512((__v16si)__V);
+ __V = _mm512_mask_mov_epi32(_mm512_set1_epi32(-1), __M, __V);
+ return __builtin_reduce_min((__v16su)__V);
}
static __inline__ double __DEFAULT_FN_ATTRS512
@@ -9594,7 +9615,7 @@ _mm512_cvtsi512_si32(__m512i __A) {
///
/// This intrinsic corresponds to the <c> VGATHERDPD </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 7
/// i := j*64
/// m := j*32
@@ -9602,7 +9623,7 @@ _mm512_cvtsi512_si32(__m512i __A) {
/// dst[i+63:i] := MEM[addr+63:addr]
/// ENDFOR
/// dst[MAX:512] := 0
-/// \endoperation
+/// \endcode
#define _mm512_i32logather_pd(vindex, base_addr, scale) \
_mm512_i32gather_pd(_mm512_castsi512_si256(vindex), (base_addr), (scale))
@@ -9614,7 +9635,7 @@ _mm512_cvtsi512_si32(__m512i __A) {
///
/// This intrinsic corresponds to the <c> VGATHERDPD </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 7
/// i := j*64
/// m := j*32
@@ -9626,7 +9647,7 @@ _mm512_cvtsi512_si32(__m512i __A) {
/// FI
/// ENDFOR
/// dst[MAX:512] := 0
-/// \endoperation
+/// \endcode
#define _mm512_mask_i32logather_pd(src, mask, vindex, base_addr, scale) \
_mm512_mask_i32gather_pd((src), (mask), _mm512_castsi512_si256(vindex), \
(base_addr), (scale))
@@ -9637,7 +9658,7 @@ _mm512_cvtsi512_si32(__m512i __A) {
///
/// This intrinsic corresponds to the <c> VPGATHERDQ </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 7
/// i := j*64
/// m := j*32
@@ -9645,7 +9666,7 @@ _mm512_cvtsi512_si32(__m512i __A) {
/// dst[i+63:i] := MEM[addr+63:addr]
/// ENDFOR
/// dst[MAX:512] := 0
-/// \endoperation
+/// \endcode
#define _mm512_i32logather_epi64(vindex, base_addr, scale) \
_mm512_i32gather_epi64(_mm512_castsi512_si256(vindex), (base_addr), (scale))
@@ -9656,7 +9677,7 @@ _mm512_cvtsi512_si32(__m512i __A) {
///
/// This intrinsic corresponds to the <c> VPGATHERDQ </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 7
/// i := j*64
/// m := j*32
@@ -9668,7 +9689,7 @@ _mm512_cvtsi512_si32(__m512i __A) {
/// FI
/// ENDFOR
/// dst[MAX:512] := 0
-/// \endoperation
+/// \endcode
#define _mm512_mask_i32logather_epi64(src, mask, vindex, base_addr, scale) \
_mm512_mask_i32gather_epi64((src), (mask), _mm512_castsi512_si256(vindex), \
(base_addr), (scale))
@@ -9679,14 +9700,14 @@ _mm512_cvtsi512_si32(__m512i __A) {
///
/// This intrinsic corresponds to the <c> VSCATTERDPD </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 7
/// i := j*64
/// m := j*32
/// addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
/// MEM[addr+63:addr] := v1[i+63:i]
/// ENDFOR
-/// \endoperation
+/// \endcode
#define _mm512_i32loscatter_pd(base_addr, vindex, v1, scale) \
_mm512_i32scatter_pd((base_addr), _mm512_castsi512_si256(vindex), (v1), (scale))
@@ -9698,7 +9719,7 @@ _mm512_cvtsi512_si32(__m512i __A) {
///
/// This intrinsic corresponds to the <c> VSCATTERDPD </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 7
/// i := j*64
/// m := j*32
@@ -9707,7 +9728,7 @@ _mm512_cvtsi512_si32(__m512i __A) {
/// MEM[addr+63:addr] := a[i+63:i]
/// FI
/// ENDFOR
-/// \endoperation
+/// \endcode
#define _mm512_mask_i32loscatter_pd(base_addr, mask, vindex, v1, scale) \
_mm512_mask_i32scatter_pd((base_addr), (mask), \
_mm512_castsi512_si256(vindex), (v1), (scale))
@@ -9718,14 +9739,14 @@ _mm512_cvtsi512_si32(__m512i __A) {
///
/// This intrinsic corresponds to the <c> VPSCATTERDQ </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 7
/// i := j*64
/// m := j*32
/// addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
/// MEM[addr+63:addr] := a[i+63:i]
/// ENDFOR
-/// \endoperation
+/// \endcode
#define _mm512_i32loscatter_epi64(base_addr, vindex, v1, scale) \
_mm512_i32scatter_epi64((base_addr), \
_mm512_castsi512_si256(vindex), (v1), (scale))
@@ -9737,7 +9758,7 @@ _mm512_cvtsi512_si32(__m512i __A) {
///
/// This intrinsic corresponds to the <c> VPSCATTERDQ </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 7
/// i := j*64
/// m := j*32
@@ -9746,7 +9767,7 @@ _mm512_cvtsi512_si32(__m512i __A) {
/// MEM[addr+63:addr] := a[i+63:i]
/// FI
/// ENDFOR
-/// \endoperation
+/// \endcode
#define _mm512_mask_i32loscatter_epi64(base_addr, mask, vindex, v1, scale) \
_mm512_mask_i32scatter_epi64((base_addr), (mask), \
_mm512_castsi512_si256(vindex), (v1), (scale))
diff --git a/contrib/llvm-project/clang/lib/Headers/avx512fp16intrin.h b/contrib/llvm-project/clang/lib/Headers/avx512fp16intrin.h
new file mode 100644
index 000000000000..4123f10c3951
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/avx512fp16intrin.h
@@ -0,0 +1,3352 @@
+/*===----------- avx512fp16intrin.h - AVX512-FP16 intrinsics ---------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __IMMINTRIN_H
+#error "Never use <avx512fp16intrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifdef __SSE2__
+
+#ifndef __AVX512FP16INTRIN_H
+#define __AVX512FP16INTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+typedef _Float16 __v32hf __attribute__((__vector_size__(64), __aligned__(64)));
+typedef _Float16 __m512h __attribute__((__vector_size__(64), __aligned__(64)));
+typedef _Float16 __m512h_u __attribute__((__vector_size__(64), __aligned__(1)));
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS512 \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512fp16,evex512"), __min_vector_width__(512)))
+#define __DEFAULT_FN_ATTRS256 \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512fp16,no-evex512"), \
+ __min_vector_width__(256)))
+#define __DEFAULT_FN_ATTRS128 \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512fp16,no-evex512"), \
+ __min_vector_width__(128)))
+
+static __inline__ _Float16 __DEFAULT_FN_ATTRS512 _mm512_cvtsh_h(__m512h __a) {
+ return __a[0];
+}
+
+static __inline __m128h __DEFAULT_FN_ATTRS128 _mm_setzero_ph(void) {
+ return (__m128h){0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0};
+}
+
+static __inline __m256h __DEFAULT_FN_ATTRS256 _mm256_setzero_ph(void) {
+ return (__m256h){0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0};
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_undefined_ph(void) {
+ return (__m256h)__builtin_ia32_undef256();
+}
+
+static __inline __m512h __DEFAULT_FN_ATTRS512 _mm512_setzero_ph(void) {
+ return (__m512h){0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0};
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_undefined_ph(void) {
+ return (__m128h)__builtin_ia32_undef128();
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_undefined_ph(void) {
+ return (__m512h)__builtin_ia32_undef512();
+}
+
+static __inline __m512h __DEFAULT_FN_ATTRS512 _mm512_set1_ph(_Float16 __h) {
+ return (__m512h)(__v32hf){__h, __h, __h, __h, __h, __h, __h, __h,
+ __h, __h, __h, __h, __h, __h, __h, __h,
+ __h, __h, __h, __h, __h, __h, __h, __h,
+ __h, __h, __h, __h, __h, __h, __h, __h};
+}
+
+static __inline __m512h __DEFAULT_FN_ATTRS512
+_mm512_set_ph(_Float16 __h1, _Float16 __h2, _Float16 __h3, _Float16 __h4,
+ _Float16 __h5, _Float16 __h6, _Float16 __h7, _Float16 __h8,
+ _Float16 __h9, _Float16 __h10, _Float16 __h11, _Float16 __h12,
+ _Float16 __h13, _Float16 __h14, _Float16 __h15, _Float16 __h16,
+ _Float16 __h17, _Float16 __h18, _Float16 __h19, _Float16 __h20,
+ _Float16 __h21, _Float16 __h22, _Float16 __h23, _Float16 __h24,
+ _Float16 __h25, _Float16 __h26, _Float16 __h27, _Float16 __h28,
+ _Float16 __h29, _Float16 __h30, _Float16 __h31, _Float16 __h32) {
+ return (__m512h)(__v32hf){__h32, __h31, __h30, __h29, __h28, __h27, __h26,
+ __h25, __h24, __h23, __h22, __h21, __h20, __h19,
+ __h18, __h17, __h16, __h15, __h14, __h13, __h12,
+ __h11, __h10, __h9, __h8, __h7, __h6, __h5,
+ __h4, __h3, __h2, __h1};
+}
+
+#define _mm512_setr_ph(h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11, h12, h13, \
+ h14, h15, h16, h17, h18, h19, h20, h21, h22, h23, h24, \
+ h25, h26, h27, h28, h29, h30, h31, h32) \
+ _mm512_set_ph((h32), (h31), (h30), (h29), (h28), (h27), (h26), (h25), (h24), \
+ (h23), (h22), (h21), (h20), (h19), (h18), (h17), (h16), (h15), \
+ (h14), (h13), (h12), (h11), (h10), (h9), (h8), (h7), (h6), \
+ (h5), (h4), (h3), (h2), (h1))
+
+static __inline __m512h __DEFAULT_FN_ATTRS512
+_mm512_set1_pch(_Float16 _Complex h) {
+ return (__m512h)_mm512_set1_ps(__builtin_bit_cast(float, h));
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_castph_ps(__m128h __a) {
+ return (__m128)__a;
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_castph_ps(__m256h __a) {
+ return (__m256)__a;
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS512 _mm512_castph_ps(__m512h __a) {
+ return (__m512)__a;
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_castph_pd(__m128h __a) {
+ return (__m128d)__a;
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_castph_pd(__m256h __a) {
+ return (__m256d)__a;
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS512 _mm512_castph_pd(__m512h __a) {
+ return (__m512d)__a;
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_castph_si128(__m128h __a) {
+ return (__m128i)__a;
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_castph_si256(__m256h __a) {
+ return (__m256i)__a;
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_castph_si512(__m512h __a) {
+ return (__m512i)__a;
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_castps_ph(__m128 __a) {
+ return (__m128h)__a;
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_castps_ph(__m256 __a) {
+ return (__m256h)__a;
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_castps_ph(__m512 __a) {
+ return (__m512h)__a;
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_castpd_ph(__m128d __a) {
+ return (__m128h)__a;
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_castpd_ph(__m256d __a) {
+ return (__m256h)__a;
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_castpd_ph(__m512d __a) {
+ return (__m512h)__a;
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_castsi128_ph(__m128i __a) {
+ return (__m128h)__a;
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_castsi256_ph(__m256i __a) {
+ return (__m256h)__a;
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_castsi512_ph(__m512i __a) {
+ return (__m512h)__a;
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS256
+_mm256_castph256_ph128(__m256h __a) {
+ return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, 4, 5, 6, 7);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS512
+_mm512_castph512_ph128(__m512h __a) {
+ return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, 4, 5, 6, 7);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS512
+_mm512_castph512_ph256(__m512h __a) {
+ return __builtin_shufflevector(__a, __a, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+ 12, 13, 14, 15);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_castph128_ph256(__m128h __a) {
+ return __builtin_shufflevector(__a, __builtin_nondeterministic_value(__a),
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_castph128_ph512(__m128h __a) {
+ __m256h __b = __builtin_nondeterministic_value(__b);
+ return __builtin_shufflevector(
+ __builtin_shufflevector(__a, __builtin_nondeterministic_value(__a),
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15),
+ __b, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_castph256_ph512(__m256h __a) {
+ return __builtin_shufflevector(__a, __builtin_nondeterministic_value(__a), 0,
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
+ 27, 28, 29, 30, 31);
+}
+
+/// Constructs a 256-bit floating-point vector of [16 x half] from a
+/// 128-bit floating-point vector of [8 x half]. The lower 128 bits
+/// contain the value of the source vector. The upper 384 bits are set
+/// to zero.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic has no corresponding instruction.
+///
+/// \param __a
+/// A 128-bit vector of [8 x half].
+/// \returns A 512-bit floating-point vector of [16 x half]. The lower 128 bits
+/// contain the value of the parameter. The upper 384 bits are set to zero.
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_zextph128_ph256(__m128h __a) {
+ return __builtin_shufflevector(__a, (__v8hf)_mm_setzero_ph(), 0, 1, 2, 3, 4,
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
+}
+
+/// Constructs a 512-bit floating-point vector of [32 x half] from a
+/// 128-bit floating-point vector of [8 x half]. The lower 128 bits
+/// contain the value of the source vector. The upper 384 bits are set
+/// to zero.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic has no corresponding instruction.
+///
+/// \param __a
+/// A 128-bit vector of [8 x half].
+/// \returns A 512-bit floating-point vector of [32 x half]. The lower 128 bits
+/// contain the value of the parameter. The upper 384 bits are set to zero.
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_zextph128_ph512(__m128h __a) {
+ return __builtin_shufflevector(
+ __a, (__v8hf)_mm_setzero_ph(), 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+ 13, 14, 15, 8, 9, 10, 11, 12, 13, 14, 15, 8, 9, 10, 11, 12, 13, 14, 15);
+}
+
+/// Constructs a 512-bit floating-point vector of [32 x half] from a
+/// 256-bit floating-point vector of [16 x half]. The lower 256 bits
+/// contain the value of the source vector. The upper 256 bits are set
+/// to zero.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic has no corresponding instruction.
+///
+/// \param __a
+/// A 256-bit vector of [16 x half].
+/// \returns A 512-bit floating-point vector of [32 x half]. The lower 256 bits
+/// contain the value of the parameter. The upper 256 bits are set to zero.
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_zextph256_ph512(__m256h __a) {
+ return __builtin_shufflevector(__a, (__v16hf)_mm256_setzero_ph(), 0, 1, 2, 3,
+ 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
+ 29, 30, 31);
+}
+
+#define _mm_comi_round_sh(A, B, P, R) \
+ __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, (int)(P), (int)(R))
+
+#define _mm_comi_sh(A, B, pred) \
+ _mm_comi_round_sh((A), (B), (pred), _MM_FROUND_CUR_DIRECTION)
+
+static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comieq_sh(__m128h A,
+ __m128h B) {
+ return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_EQ_OS,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comilt_sh(__m128h A,
+ __m128h B) {
+ return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_LT_OS,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comile_sh(__m128h A,
+ __m128h B) {
+ return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_LE_OS,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comigt_sh(__m128h A,
+ __m128h B) {
+ return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_GT_OS,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comige_sh(__m128h A,
+ __m128h B) {
+ return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_GE_OS,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comineq_sh(__m128h A,
+ __m128h B) {
+ return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_NEQ_US,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomieq_sh(__m128h A,
+ __m128h B) {
+ return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_EQ_OQ,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomilt_sh(__m128h A,
+ __m128h B) {
+ return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_LT_OQ,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomile_sh(__m128h A,
+ __m128h B) {
+ return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_LE_OQ,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomigt_sh(__m128h A,
+ __m128h B) {
+ return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_GT_OQ,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomige_sh(__m128h A,
+ __m128h B) {
+ return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_GE_OQ,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomineq_sh(__m128h A,
+ __m128h B) {
+ return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_NEQ_UQ,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_add_ph(__m512h __A,
+ __m512h __B) {
+ return (__m512h)((__v32hf)__A + (__v32hf)__B);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_mask_add_ph(__m512h __W, __mmask32 __U, __m512h __A, __m512h __B) {
+ return (__m512h)__builtin_ia32_selectph_512(
+ (__mmask32)__U, (__v32hf)_mm512_add_ph(__A, __B), (__v32hf)__W);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_maskz_add_ph(__mmask32 __U, __m512h __A, __m512h __B) {
+ return (__m512h)__builtin_ia32_selectph_512((__mmask32)__U,
+ (__v32hf)_mm512_add_ph(__A, __B),
+ (__v32hf)_mm512_setzero_ph());
+}
+
+#define _mm512_add_round_ph(A, B, R) \
+ ((__m512h)__builtin_ia32_addph512((__v32hf)(__m512h)(A), \
+ (__v32hf)(__m512h)(B), (int)(R)))
+
+#define _mm512_mask_add_round_ph(W, U, A, B, R) \
+ ((__m512h)__builtin_ia32_selectph_512( \
+ (__mmask32)(U), (__v32hf)_mm512_add_round_ph((A), (B), (R)), \
+ (__v32hf)(__m512h)(W)))
+
+#define _mm512_maskz_add_round_ph(U, A, B, R) \
+ ((__m512h)__builtin_ia32_selectph_512( \
+ (__mmask32)(U), (__v32hf)_mm512_add_round_ph((A), (B), (R)), \
+ (__v32hf)_mm512_setzero_ph()))
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_sub_ph(__m512h __A,
+ __m512h __B) {
+ return (__m512h)((__v32hf)__A - (__v32hf)__B);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_mask_sub_ph(__m512h __W, __mmask32 __U, __m512h __A, __m512h __B) {
+ return (__m512h)__builtin_ia32_selectph_512(
+ (__mmask32)__U, (__v32hf)_mm512_sub_ph(__A, __B), (__v32hf)__W);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_maskz_sub_ph(__mmask32 __U, __m512h __A, __m512h __B) {
+ return (__m512h)__builtin_ia32_selectph_512((__mmask32)__U,
+ (__v32hf)_mm512_sub_ph(__A, __B),
+ (__v32hf)_mm512_setzero_ph());
+}
+
+#define _mm512_sub_round_ph(A, B, R) \
+ ((__m512h)__builtin_ia32_subph512((__v32hf)(__m512h)(A), \
+ (__v32hf)(__m512h)(B), (int)(R)))
+
+#define _mm512_mask_sub_round_ph(W, U, A, B, R) \
+ ((__m512h)__builtin_ia32_selectph_512( \
+ (__mmask32)(U), (__v32hf)_mm512_sub_round_ph((A), (B), (R)), \
+ (__v32hf)(__m512h)(W)))
+
+#define _mm512_maskz_sub_round_ph(U, A, B, R) \
+ ((__m512h)__builtin_ia32_selectph_512( \
+ (__mmask32)(U), (__v32hf)_mm512_sub_round_ph((A), (B), (R)), \
+ (__v32hf)_mm512_setzero_ph()))
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_mul_ph(__m512h __A,
+ __m512h __B) {
+ return (__m512h)((__v32hf)__A * (__v32hf)__B);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_mask_mul_ph(__m512h __W, __mmask32 __U, __m512h __A, __m512h __B) {
+ return (__m512h)__builtin_ia32_selectph_512(
+ (__mmask32)__U, (__v32hf)_mm512_mul_ph(__A, __B), (__v32hf)__W);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_maskz_mul_ph(__mmask32 __U, __m512h __A, __m512h __B) {
+ return (__m512h)__builtin_ia32_selectph_512((__mmask32)__U,
+ (__v32hf)_mm512_mul_ph(__A, __B),
+ (__v32hf)_mm512_setzero_ph());
+}
+
+#define _mm512_mul_round_ph(A, B, R) \
+ ((__m512h)__builtin_ia32_mulph512((__v32hf)(__m512h)(A), \
+ (__v32hf)(__m512h)(B), (int)(R)))
+
+#define _mm512_mask_mul_round_ph(W, U, A, B, R) \
+ ((__m512h)__builtin_ia32_selectph_512( \
+ (__mmask32)(U), (__v32hf)_mm512_mul_round_ph((A), (B), (R)), \
+ (__v32hf)(__m512h)(W)))
+
+#define _mm512_maskz_mul_round_ph(U, A, B, R) \
+ ((__m512h)__builtin_ia32_selectph_512( \
+ (__mmask32)(U), (__v32hf)_mm512_mul_round_ph((A), (B), (R)), \
+ (__v32hf)_mm512_setzero_ph()))
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_div_ph(__m512h __A,
+ __m512h __B) {
+ return (__m512h)((__v32hf)__A / (__v32hf)__B);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_mask_div_ph(__m512h __W, __mmask32 __U, __m512h __A, __m512h __B) {
+ return (__m512h)__builtin_ia32_selectph_512(
+ (__mmask32)__U, (__v32hf)_mm512_div_ph(__A, __B), (__v32hf)__W);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_maskz_div_ph(__mmask32 __U, __m512h __A, __m512h __B) {
+ return (__m512h)__builtin_ia32_selectph_512((__mmask32)__U,
+ (__v32hf)_mm512_div_ph(__A, __B),
+ (__v32hf)_mm512_setzero_ph());
+}
+
+#define _mm512_div_round_ph(A, B, R) \
+ ((__m512h)__builtin_ia32_divph512((__v32hf)(__m512h)(A), \
+ (__v32hf)(__m512h)(B), (int)(R)))
+
+#define _mm512_mask_div_round_ph(W, U, A, B, R) \
+ ((__m512h)__builtin_ia32_selectph_512( \
+ (__mmask32)(U), (__v32hf)_mm512_div_round_ph((A), (B), (R)), \
+ (__v32hf)(__m512h)(W)))
+
+#define _mm512_maskz_div_round_ph(U, A, B, R) \
+ ((__m512h)__builtin_ia32_selectph_512( \
+ (__mmask32)(U), (__v32hf)_mm512_div_round_ph((A), (B), (R)), \
+ (__v32hf)_mm512_setzero_ph()))
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_min_ph(__m512h __A,
+ __m512h __B) {
+ return (__m512h)__builtin_ia32_minph512((__v32hf)__A, (__v32hf)__B,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_mask_min_ph(__m512h __W, __mmask32 __U, __m512h __A, __m512h __B) {
+ return (__m512h)__builtin_ia32_selectph_512(
+ (__mmask32)__U, (__v32hf)_mm512_min_ph(__A, __B), (__v32hf)__W);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_maskz_min_ph(__mmask32 __U, __m512h __A, __m512h __B) {
+ return (__m512h)__builtin_ia32_selectph_512((__mmask32)__U,
+ (__v32hf)_mm512_min_ph(__A, __B),
+ (__v32hf)_mm512_setzero_ph());
+}
+
+#define _mm512_min_round_ph(A, B, R) \
+ ((__m512h)__builtin_ia32_minph512((__v32hf)(__m512h)(A), \
+ (__v32hf)(__m512h)(B), (int)(R)))
+
+#define _mm512_mask_min_round_ph(W, U, A, B, R) \
+ ((__m512h)__builtin_ia32_selectph_512( \
+ (__mmask32)(U), (__v32hf)_mm512_min_round_ph((A), (B), (R)), \
+ (__v32hf)(__m512h)(W)))
+
+#define _mm512_maskz_min_round_ph(U, A, B, R) \
+ ((__m512h)__builtin_ia32_selectph_512( \
+ (__mmask32)(U), (__v32hf)_mm512_min_round_ph((A), (B), (R)), \
+ (__v32hf)_mm512_setzero_ph()))
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_max_ph(__m512h __A,
+ __m512h __B) {
+ return (__m512h)__builtin_ia32_maxph512((__v32hf)__A, (__v32hf)__B,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_mask_max_ph(__m512h __W, __mmask32 __U, __m512h __A, __m512h __B) {
+ return (__m512h)__builtin_ia32_selectph_512(
+ (__mmask32)__U, (__v32hf)_mm512_max_ph(__A, __B), (__v32hf)__W);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_maskz_max_ph(__mmask32 __U, __m512h __A, __m512h __B) {
+ return (__m512h)__builtin_ia32_selectph_512((__mmask32)__U,
+ (__v32hf)_mm512_max_ph(__A, __B),
+ (__v32hf)_mm512_setzero_ph());
+}
+
+#define _mm512_max_round_ph(A, B, R) \
+ ((__m512h)__builtin_ia32_maxph512((__v32hf)(__m512h)(A), \
+ (__v32hf)(__m512h)(B), (int)(R)))
+
+#define _mm512_mask_max_round_ph(W, U, A, B, R) \
+ ((__m512h)__builtin_ia32_selectph_512( \
+ (__mmask32)(U), (__v32hf)_mm512_max_round_ph((A), (B), (R)), \
+ (__v32hf)(__m512h)(W)))
+
+#define _mm512_maskz_max_round_ph(U, A, B, R) \
+ ((__m512h)__builtin_ia32_selectph_512( \
+ (__mmask32)(U), (__v32hf)_mm512_max_round_ph((A), (B), (R)), \
+ (__v32hf)_mm512_setzero_ph()))
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_abs_ph(__m512h __A) {
+ return (__m512h)_mm512_and_epi32(_mm512_set1_epi32(0x7FFF7FFF), (__m512i)__A);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_conj_pch(__m512h __A) {
+ return (__m512h)_mm512_xor_ps((__m512)__A, _mm512_set1_ps(-0.0f));
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_mask_conj_pch(__m512h __W, __mmask16 __U, __m512h __A) {
+ return (__m512h)__builtin_ia32_selectps_512(
+ (__mmask16)__U, (__v16sf)_mm512_conj_pch(__A), (__v16sf)__W);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_maskz_conj_pch(__mmask16 __U, __m512h __A) {
+ return (__m512h)__builtin_ia32_selectps_512((__mmask16)__U,
+ (__v16sf)_mm512_conj_pch(__A),
+ (__v16sf)_mm512_setzero_ps());
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_add_sh(__m128h __A,
+ __m128h __B) {
+ __A[0] += __B[0];
+ return __A;
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_add_sh(__m128h __W,
+ __mmask8 __U,
+ __m128h __A,
+ __m128h __B) {
+ __A = _mm_add_sh(__A, __B);
+ return __builtin_ia32_selectsh_128(__U, __A, __W);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_add_sh(__mmask8 __U,
+ __m128h __A,
+ __m128h __B) {
+ __A = _mm_add_sh(__A, __B);
+ return __builtin_ia32_selectsh_128(__U, __A, _mm_setzero_ph());
+}
+
+#define _mm_add_round_sh(A, B, R) \
+ ((__m128h)__builtin_ia32_addsh_round_mask( \
+ (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(), \
+ (__mmask8)-1, (int)(R)))
+
+#define _mm_mask_add_round_sh(W, U, A, B, R) \
+ ((__m128h)__builtin_ia32_addsh_round_mask( \
+ (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)(__m128h)(W), \
+ (__mmask8)(U), (int)(R)))
+
+#define _mm_maskz_add_round_sh(U, A, B, R) \
+ ((__m128h)__builtin_ia32_addsh_round_mask( \
+ (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(), \
+ (__mmask8)(U), (int)(R)))
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_sub_sh(__m128h __A,
+ __m128h __B) {
+ __A[0] -= __B[0];
+ return __A;
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_sub_sh(__m128h __W,
+ __mmask8 __U,
+ __m128h __A,
+ __m128h __B) {
+ __A = _mm_sub_sh(__A, __B);
+ return __builtin_ia32_selectsh_128(__U, __A, __W);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_sub_sh(__mmask8 __U,
+ __m128h __A,
+ __m128h __B) {
+ __A = _mm_sub_sh(__A, __B);
+ return __builtin_ia32_selectsh_128(__U, __A, _mm_setzero_ph());
+}
+
+#define _mm_sub_round_sh(A, B, R) \
+ ((__m128h)__builtin_ia32_subsh_round_mask( \
+ (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(), \
+ (__mmask8)-1, (int)(R)))
+
+#define _mm_mask_sub_round_sh(W, U, A, B, R) \
+ ((__m128h)__builtin_ia32_subsh_round_mask( \
+ (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)(__m128h)(W), \
+ (__mmask8)(U), (int)(R)))
+
+#define _mm_maskz_sub_round_sh(U, A, B, R) \
+ ((__m128h)__builtin_ia32_subsh_round_mask( \
+ (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(), \
+ (__mmask8)(U), (int)(R)))
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mul_sh(__m128h __A,
+ __m128h __B) {
+ __A[0] *= __B[0];
+ return __A;
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_mul_sh(__m128h __W,
+ __mmask8 __U,
+ __m128h __A,
+ __m128h __B) {
+ __A = _mm_mul_sh(__A, __B);
+ return __builtin_ia32_selectsh_128(__U, __A, __W);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_mul_sh(__mmask8 __U,
+ __m128h __A,
+ __m128h __B) {
+ __A = _mm_mul_sh(__A, __B);
+ return __builtin_ia32_selectsh_128(__U, __A, _mm_setzero_ph());
+}
+
+#define _mm_mul_round_sh(A, B, R) \
+ ((__m128h)__builtin_ia32_mulsh_round_mask( \
+ (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(), \
+ (__mmask8)-1, (int)(R)))
+
+#define _mm_mask_mul_round_sh(W, U, A, B, R) \
+ ((__m128h)__builtin_ia32_mulsh_round_mask( \
+ (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)(__m128h)(W), \
+ (__mmask8)(U), (int)(R)))
+
+#define _mm_maskz_mul_round_sh(U, A, B, R) \
+ ((__m128h)__builtin_ia32_mulsh_round_mask( \
+ (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(), \
+ (__mmask8)(U), (int)(R)))
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_div_sh(__m128h __A,
+ __m128h __B) {
+ __A[0] /= __B[0];
+ return __A;
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_div_sh(__m128h __W,
+ __mmask8 __U,
+ __m128h __A,
+ __m128h __B) {
+ __A = _mm_div_sh(__A, __B);
+ return __builtin_ia32_selectsh_128(__U, __A, __W);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_div_sh(__mmask8 __U,
+ __m128h __A,
+ __m128h __B) {
+ __A = _mm_div_sh(__A, __B);
+ return __builtin_ia32_selectsh_128(__U, __A, _mm_setzero_ph());
+}
+
+#define _mm_div_round_sh(A, B, R) \
+ ((__m128h)__builtin_ia32_divsh_round_mask( \
+ (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(), \
+ (__mmask8)-1, (int)(R)))
+
+#define _mm_mask_div_round_sh(W, U, A, B, R) \
+ ((__m128h)__builtin_ia32_divsh_round_mask( \
+ (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)(__m128h)(W), \
+ (__mmask8)(U), (int)(R)))
+
+#define _mm_maskz_div_round_sh(U, A, B, R) \
+ ((__m128h)__builtin_ia32_divsh_round_mask( \
+ (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(), \
+ (__mmask8)(U), (int)(R)))
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_min_sh(__m128h __A,
+ __m128h __B) {
+ return (__m128h)__builtin_ia32_minsh_round_mask(
+ (__v8hf)__A, (__v8hf)__B, (__v8hf)_mm_setzero_ph(), (__mmask8)-1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_min_sh(__m128h __W,
+ __mmask8 __U,
+ __m128h __A,
+ __m128h __B) {
+ return (__m128h)__builtin_ia32_minsh_round_mask((__v8hf)__A, (__v8hf)__B,
+ (__v8hf)__W, (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_min_sh(__mmask8 __U,
+ __m128h __A,
+ __m128h __B) {
+ return (__m128h)__builtin_ia32_minsh_round_mask(
+ (__v8hf)__A, (__v8hf)__B, (__v8hf)_mm_setzero_ph(), (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_min_round_sh(A, B, R) \
+ ((__m128h)__builtin_ia32_minsh_round_mask( \
+ (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(), \
+ (__mmask8)-1, (int)(R)))
+
+#define _mm_mask_min_round_sh(W, U, A, B, R) \
+ ((__m128h)__builtin_ia32_minsh_round_mask( \
+ (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)(__m128h)(W), \
+ (__mmask8)(U), (int)(R)))
+
+#define _mm_maskz_min_round_sh(U, A, B, R) \
+ ((__m128h)__builtin_ia32_minsh_round_mask( \
+ (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(), \
+ (__mmask8)(U), (int)(R)))
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_max_sh(__m128h __A,
+ __m128h __B) {
+ return (__m128h)__builtin_ia32_maxsh_round_mask(
+ (__v8hf)__A, (__v8hf)__B, (__v8hf)_mm_setzero_ph(), (__mmask8)-1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_max_sh(__m128h __W,
+ __mmask8 __U,
+ __m128h __A,
+ __m128h __B) {
+ return (__m128h)__builtin_ia32_maxsh_round_mask((__v8hf)__A, (__v8hf)__B,
+ (__v8hf)__W, (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_max_sh(__mmask8 __U,
+ __m128h __A,
+ __m128h __B) {
+ return (__m128h)__builtin_ia32_maxsh_round_mask(
+ (__v8hf)__A, (__v8hf)__B, (__v8hf)_mm_setzero_ph(), (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_max_round_sh(A, B, R) \
+ ((__m128h)__builtin_ia32_maxsh_round_mask( \
+ (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(), \
+ (__mmask8)-1, (int)(R)))
+
+#define _mm_mask_max_round_sh(W, U, A, B, R) \
+ ((__m128h)__builtin_ia32_maxsh_round_mask( \
+ (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)(__m128h)(W), \
+ (__mmask8)(U), (int)(R)))
+
+#define _mm_maskz_max_round_sh(U, A, B, R) \
+ ((__m128h)__builtin_ia32_maxsh_round_mask( \
+ (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(), \
+ (__mmask8)(U), (int)(R)))
+
+#define _mm512_cmp_round_ph_mask(A, B, P, R) \
+ ((__mmask32)__builtin_ia32_cmpph512_mask((__v32hf)(__m512h)(A), \
+ (__v32hf)(__m512h)(B), (int)(P), \
+ (__mmask32)-1, (int)(R)))
+
+#define _mm512_mask_cmp_round_ph_mask(U, A, B, P, R) \
+ ((__mmask32)__builtin_ia32_cmpph512_mask((__v32hf)(__m512h)(A), \
+ (__v32hf)(__m512h)(B), (int)(P), \
+ (__mmask32)(U), (int)(R)))
+
+#define _mm512_cmp_ph_mask(A, B, P) \
+ _mm512_cmp_round_ph_mask((A), (B), (P), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm512_mask_cmp_ph_mask(U, A, B, P) \
+ _mm512_mask_cmp_round_ph_mask((U), (A), (B), (P), _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_cmp_round_sh_mask(X, Y, P, R) \
+ ((__mmask8)__builtin_ia32_cmpsh_mask((__v8hf)(__m128h)(X), \
+ (__v8hf)(__m128h)(Y), (int)(P), \
+ (__mmask8)-1, (int)(R)))
+
+#define _mm_mask_cmp_round_sh_mask(M, X, Y, P, R) \
+ ((__mmask8)__builtin_ia32_cmpsh_mask((__v8hf)(__m128h)(X), \
+ (__v8hf)(__m128h)(Y), (int)(P), \
+ (__mmask8)(M), (int)(R)))
+
+#define _mm_cmp_sh_mask(X, Y, P) \
+ ((__mmask8)__builtin_ia32_cmpsh_mask( \
+ (__v8hf)(__m128h)(X), (__v8hf)(__m128h)(Y), (int)(P), (__mmask8)-1, \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm_mask_cmp_sh_mask(M, X, Y, P) \
+ ((__mmask8)__builtin_ia32_cmpsh_mask( \
+ (__v8hf)(__m128h)(X), (__v8hf)(__m128h)(Y), (int)(P), (__mmask8)(M), \
+ _MM_FROUND_CUR_DIRECTION))
+// loads with vmovsh:
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_load_sh(void const *__dp) {
+ struct __mm_load_sh_struct {
+ _Float16 __u;
+ } __attribute__((__packed__, __may_alias__));
+ _Float16 __u = ((const struct __mm_load_sh_struct *)__dp)->__u;
+ return (__m128h){__u, 0, 0, 0, 0, 0, 0, 0};
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_mask_load_sh(__m128h __W, __mmask8 __U, const void *__A) {
+ __m128h src = (__v8hf)__builtin_shufflevector(
+ (__v8hf)__W, (__v8hf)_mm_setzero_ph(), 0, 8, 8, 8, 8, 8, 8, 8);
+
+ return (__m128h)__builtin_ia32_loadsh128_mask((const __v8hf *)__A, src, __U & 1);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_maskz_load_sh(__mmask8 __U, const void *__A) {
+ return (__m128h)__builtin_ia32_loadsh128_mask(
+ (const __v8hf *)__A, (__v8hf)_mm_setzero_ph(), __U & 1);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_load_ph(void const *__p) {
+ return *(const __m512h *)__p;
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_load_ph(void const *__p) {
+ return *(const __m256h *)__p;
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_load_ph(void const *__p) {
+ return *(const __m128h *)__p;
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_loadu_ph(void const *__p) {
+ struct __loadu_ph {
+ __m512h_u __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((const struct __loadu_ph *)__p)->__v;
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_loadu_ph(void const *__p) {
+ struct __loadu_ph {
+ __m256h_u __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((const struct __loadu_ph *)__p)->__v;
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_loadu_ph(void const *__p) {
+ struct __loadu_ph {
+ __m128h_u __v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((const struct __loadu_ph *)__p)->__v;
+}
+
+// stores with vmovsh:
+static __inline__ void __DEFAULT_FN_ATTRS128 _mm_store_sh(void *__dp,
+ __m128h __a) {
+ struct __mm_store_sh_struct {
+ _Float16 __u;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __mm_store_sh_struct *)__dp)->__u = __a[0];
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_store_sh(void *__W,
+ __mmask8 __U,
+ __m128h __A) {
+ __builtin_ia32_storesh128_mask((__v8hf *)__W, __A, __U & 1);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS512 _mm512_store_ph(void *__P,
+ __m512h __A) {
+ *(__m512h *)__P = __A;
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_store_ph(void *__P,
+ __m256h __A) {
+ *(__m256h *)__P = __A;
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS128 _mm_store_ph(void *__P,
+ __m128h __A) {
+ *(__m128h *)__P = __A;
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS512 _mm512_storeu_ph(void *__P,
+ __m512h __A) {
+ struct __storeu_ph {
+ __m512h_u __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __storeu_ph *)__P)->__v = __A;
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS256 _mm256_storeu_ph(void *__P,
+ __m256h __A) {
+ struct __storeu_ph {
+ __m256h_u __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __storeu_ph *)__P)->__v = __A;
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS128 _mm_storeu_ph(void *__P,
+ __m128h __A) {
+ struct __storeu_ph {
+ __m128h_u __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __storeu_ph *)__P)->__v = __A;
+}
+
+// moves with vmovsh:
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_move_sh(__m128h __a,
+ __m128h __b) {
+ __a[0] = __b[0];
+ return __a;
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_move_sh(__m128h __W,
+ __mmask8 __U,
+ __m128h __A,
+ __m128h __B) {
+ return __builtin_ia32_selectsh_128(__U, _mm_move_sh(__A, __B), __W);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_move_sh(__mmask8 __U,
+ __m128h __A,
+ __m128h __B) {
+ return __builtin_ia32_selectsh_128(__U, _mm_move_sh(__A, __B),
+ _mm_setzero_ph());
+}
+
+// vmovw:
+static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtsi16_si128(short __a) {
+ return (__m128i)(__v8hi){__a, 0, 0, 0, 0, 0, 0, 0};
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS128 _mm_cvtsi128_si16(__m128i __a) {
+ __v8hi __b = (__v8hi)__a;
+ return __b[0];
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_rcp_ph(__m512h __A) {
+ return (__m512h)__builtin_ia32_rcpph512_mask(
+ (__v32hf)__A, (__v32hf)_mm512_undefined_ph(), (__mmask32)-1);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_mask_rcp_ph(__m512h __W, __mmask32 __U, __m512h __A) {
+ return (__m512h)__builtin_ia32_rcpph512_mask((__v32hf)__A, (__v32hf)__W,
+ (__mmask32)__U);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_maskz_rcp_ph(__mmask32 __U, __m512h __A) {
+ return (__m512h)__builtin_ia32_rcpph512_mask(
+ (__v32hf)__A, (__v32hf)_mm512_setzero_ph(), (__mmask32)__U);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_rsqrt_ph(__m512h __A) {
+ return (__m512h)__builtin_ia32_rsqrtph512_mask(
+ (__v32hf)__A, (__v32hf)_mm512_undefined_ph(), (__mmask32)-1);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_mask_rsqrt_ph(__m512h __W, __mmask32 __U, __m512h __A) {
+ return (__m512h)__builtin_ia32_rsqrtph512_mask((__v32hf)__A, (__v32hf)__W,
+ (__mmask32)__U);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_maskz_rsqrt_ph(__mmask32 __U, __m512h __A) {
+ return (__m512h)__builtin_ia32_rsqrtph512_mask(
+ (__v32hf)__A, (__v32hf)_mm512_setzero_ph(), (__mmask32)__U);
+}
+
+#define _mm512_getmant_ph(A, B, C) \
+ ((__m512h)__builtin_ia32_getmantph512_mask( \
+ (__v32hf)(__m512h)(A), (int)(((C) << 2) | (B)), \
+ (__v32hf)_mm512_undefined_ph(), (__mmask32)-1, \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm512_mask_getmant_ph(W, U, A, B, C) \
+ ((__m512h)__builtin_ia32_getmantph512_mask( \
+ (__v32hf)(__m512h)(A), (int)(((C) << 2) | (B)), (__v32hf)(__m512h)(W), \
+ (__mmask32)(U), _MM_FROUND_CUR_DIRECTION))
+
+#define _mm512_maskz_getmant_ph(U, A, B, C) \
+ ((__m512h)__builtin_ia32_getmantph512_mask( \
+ (__v32hf)(__m512h)(A), (int)(((C) << 2) | (B)), \
+ (__v32hf)_mm512_setzero_ph(), (__mmask32)(U), _MM_FROUND_CUR_DIRECTION))
+
+#define _mm512_getmant_round_ph(A, B, C, R) \
+ ((__m512h)__builtin_ia32_getmantph512_mask( \
+ (__v32hf)(__m512h)(A), (int)(((C) << 2) | (B)), \
+ (__v32hf)_mm512_undefined_ph(), (__mmask32)-1, (int)(R)))
+
+#define _mm512_mask_getmant_round_ph(W, U, A, B, C, R) \
+ ((__m512h)__builtin_ia32_getmantph512_mask( \
+ (__v32hf)(__m512h)(A), (int)(((C) << 2) | (B)), (__v32hf)(__m512h)(W), \
+ (__mmask32)(U), (int)(R)))
+
+#define _mm512_maskz_getmant_round_ph(U, A, B, C, R) \
+ ((__m512h)__builtin_ia32_getmantph512_mask( \
+ (__v32hf)(__m512h)(A), (int)(((C) << 2) | (B)), \
+ (__v32hf)_mm512_setzero_ph(), (__mmask32)(U), (int)(R)))
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_getexp_ph(__m512h __A) {
+ return (__m512h)__builtin_ia32_getexpph512_mask(
+ (__v32hf)__A, (__v32hf)_mm512_undefined_ph(), (__mmask32)-1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_mask_getexp_ph(__m512h __W, __mmask32 __U, __m512h __A) {
+ return (__m512h)__builtin_ia32_getexpph512_mask(
+ (__v32hf)__A, (__v32hf)__W, (__mmask32)__U, _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_maskz_getexp_ph(__mmask32 __U, __m512h __A) {
+ return (__m512h)__builtin_ia32_getexpph512_mask(
+ (__v32hf)__A, (__v32hf)_mm512_setzero_ph(), (__mmask32)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_getexp_round_ph(A, R) \
+ ((__m512h)__builtin_ia32_getexpph512_mask((__v32hf)(__m512h)(A), \
+ (__v32hf)_mm512_undefined_ph(), \
+ (__mmask32)-1, (int)(R)))
+
+#define _mm512_mask_getexp_round_ph(W, U, A, R) \
+ ((__m512h)__builtin_ia32_getexpph512_mask( \
+ (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(W), (__mmask32)(U), (int)(R)))
+
+#define _mm512_maskz_getexp_round_ph(U, A, R) \
+ ((__m512h)__builtin_ia32_getexpph512_mask((__v32hf)(__m512h)(A), \
+ (__v32hf)_mm512_setzero_ph(), \
+ (__mmask32)(U), (int)(R)))
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_scalef_ph(__m512h __A,
+ __m512h __B) {
+ return (__m512h)__builtin_ia32_scalefph512_mask(
+ (__v32hf)__A, (__v32hf)__B, (__v32hf)_mm512_undefined_ph(), (__mmask32)-1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_mask_scalef_ph(__m512h __W, __mmask32 __U, __m512h __A, __m512h __B) {
+ return (__m512h)__builtin_ia32_scalefph512_mask((__v32hf)__A, (__v32hf)__B,
+ (__v32hf)__W, (__mmask32)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_maskz_scalef_ph(__mmask32 __U, __m512h __A, __m512h __B) {
+ return (__m512h)__builtin_ia32_scalefph512_mask(
+ (__v32hf)__A, (__v32hf)__B, (__v32hf)_mm512_setzero_ph(), (__mmask32)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_scalef_round_ph(A, B, R) \
+ ((__m512h)__builtin_ia32_scalefph512_mask( \
+ (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), \
+ (__v32hf)_mm512_undefined_ph(), (__mmask32)-1, (int)(R)))
+
+#define _mm512_mask_scalef_round_ph(W, U, A, B, R) \
+ ((__m512h)__builtin_ia32_scalefph512_mask( \
+ (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(W), \
+ (__mmask32)(U), (int)(R)))
+
+#define _mm512_maskz_scalef_round_ph(U, A, B, R) \
+ ((__m512h)__builtin_ia32_scalefph512_mask( \
+ (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), \
+ (__v32hf)_mm512_setzero_ph(), (__mmask32)(U), (int)(R)))
+
+#define _mm512_roundscale_ph(A, B) \
+ ((__m512h)__builtin_ia32_rndscaleph_mask( \
+ (__v32hf)(__m512h)(A), (int)(B), (__v32hf)(__m512h)(A), (__mmask32)-1, \
+ _MM_FROUND_CUR_DIRECTION))
+
+#define _mm512_mask_roundscale_ph(A, B, C, imm) \
+ ((__m512h)__builtin_ia32_rndscaleph_mask( \
+ (__v32hf)(__m512h)(C), (int)(imm), (__v32hf)(__m512h)(A), \
+ (__mmask32)(B), _MM_FROUND_CUR_DIRECTION))
+
+#define _mm512_maskz_roundscale_ph(A, B, imm) \
+ ((__m512h)__builtin_ia32_rndscaleph_mask( \
+ (__v32hf)(__m512h)(B), (int)(imm), (__v32hf)_mm512_setzero_ph(), \
+ (__mmask32)(A), _MM_FROUND_CUR_DIRECTION))
+
+#define _mm512_mask_roundscale_round_ph(A, B, C, imm, R) \
+ ((__m512h)__builtin_ia32_rndscaleph_mask((__v32hf)(__m512h)(C), (int)(imm), \
+ (__v32hf)(__m512h)(A), \
+ (__mmask32)(B), (int)(R)))
+
+#define _mm512_maskz_roundscale_round_ph(A, B, imm, R) \
+ ((__m512h)__builtin_ia32_rndscaleph_mask((__v32hf)(__m512h)(B), (int)(imm), \
+ (__v32hf)_mm512_setzero_ph(), \
+ (__mmask32)(A), (int)(R)))
+
+#define _mm512_roundscale_round_ph(A, imm, R) \
+ ((__m512h)__builtin_ia32_rndscaleph_mask((__v32hf)(__m512h)(A), (int)(imm), \
+ (__v32hf)_mm512_undefined_ph(), \
+ (__mmask32)-1, (int)(R)))
+
+#define _mm512_reduce_ph(A, imm) \
+ ((__m512h)__builtin_ia32_reduceph512_mask( \
+ (__v32hf)(__m512h)(A), (int)(imm), (__v32hf)_mm512_undefined_ph(), \
+ (__mmask32)-1, _MM_FROUND_CUR_DIRECTION))
+
+#define _mm512_mask_reduce_ph(W, U, A, imm) \
+ ((__m512h)__builtin_ia32_reduceph512_mask( \
+ (__v32hf)(__m512h)(A), (int)(imm), (__v32hf)(__m512h)(W), \
+ (__mmask32)(U), _MM_FROUND_CUR_DIRECTION))
+
+#define _mm512_maskz_reduce_ph(U, A, imm) \
+ ((__m512h)__builtin_ia32_reduceph512_mask( \
+ (__v32hf)(__m512h)(A), (int)(imm), (__v32hf)_mm512_setzero_ph(), \
+ (__mmask32)(U), _MM_FROUND_CUR_DIRECTION))
+
+#define _mm512_mask_reduce_round_ph(W, U, A, imm, R) \
+ ((__m512h)__builtin_ia32_reduceph512_mask((__v32hf)(__m512h)(A), (int)(imm), \
+ (__v32hf)(__m512h)(W), \
+ (__mmask32)(U), (int)(R)))
+
+#define _mm512_maskz_reduce_round_ph(U, A, imm, R) \
+ ((__m512h)__builtin_ia32_reduceph512_mask((__v32hf)(__m512h)(A), (int)(imm), \
+ (__v32hf)_mm512_setzero_ph(), \
+ (__mmask32)(U), (int)(R)))
+
+#define _mm512_reduce_round_ph(A, imm, R) \
+ ((__m512h)__builtin_ia32_reduceph512_mask((__v32hf)(__m512h)(A), (int)(imm), \
+ (__v32hf)_mm512_undefined_ph(), \
+ (__mmask32)-1, (int)(R)))
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_rcp_sh(__m128h __A,
+ __m128h __B) {
+ return (__m128h)__builtin_ia32_rcpsh_mask(
+ (__v8hf)__A, (__v8hf)__B, (__v8hf)_mm_setzero_ph(), (__mmask8)-1);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_rcp_sh(__m128h __W,
+ __mmask8 __U,
+ __m128h __A,
+ __m128h __B) {
+ return (__m128h)__builtin_ia32_rcpsh_mask((__v8hf)__A, (__v8hf)__B,
+ (__v8hf)__W, (__mmask8)__U);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_rcp_sh(__mmask8 __U,
+ __m128h __A,
+ __m128h __B) {
+ return (__m128h)__builtin_ia32_rcpsh_mask(
+ (__v8hf)__A, (__v8hf)__B, (__v8hf)_mm_setzero_ph(), (__mmask8)__U);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_rsqrt_sh(__m128h __A,
+ __m128h __B) {
+ return (__m128h)__builtin_ia32_rsqrtsh_mask(
+ (__v8hf)__A, (__v8hf)__B, (__v8hf)_mm_setzero_ph(), (__mmask8)-1);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_rsqrt_sh(__m128h __W,
+ __mmask8 __U,
+ __m128h __A,
+ __m128h __B) {
+ return (__m128h)__builtin_ia32_rsqrtsh_mask((__v8hf)__A, (__v8hf)__B,
+ (__v8hf)__W, (__mmask8)__U);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_maskz_rsqrt_sh(__mmask8 __U, __m128h __A, __m128h __B) {
+ return (__m128h)__builtin_ia32_rsqrtsh_mask(
+ (__v8hf)__A, (__v8hf)__B, (__v8hf)_mm_setzero_ph(), (__mmask8)__U);
+}
+
+#define _mm_getmant_round_sh(A, B, C, D, R) \
+ ((__m128h)__builtin_ia32_getmantsh_round_mask( \
+ (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (int)(((D) << 2) | (C)), \
+ (__v8hf)_mm_setzero_ph(), (__mmask8)-1, (int)(R)))
+
+#define _mm_getmant_sh(A, B, C, D) \
+ ((__m128h)__builtin_ia32_getmantsh_round_mask( \
+ (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (int)(((D) << 2) | (C)), \
+ (__v8hf)_mm_setzero_ph(), (__mmask8)-1, _MM_FROUND_CUR_DIRECTION))
+
+#define _mm_mask_getmant_sh(W, U, A, B, C, D) \
+ ((__m128h)__builtin_ia32_getmantsh_round_mask( \
+ (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (int)(((D) << 2) | (C)), \
+ (__v8hf)(__m128h)(W), (__mmask8)(U), _MM_FROUND_CUR_DIRECTION))
+
+#define _mm_mask_getmant_round_sh(W, U, A, B, C, D, R) \
+ ((__m128h)__builtin_ia32_getmantsh_round_mask( \
+ (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (int)(((D) << 2) | (C)), \
+ (__v8hf)(__m128h)(W), (__mmask8)(U), (int)(R)))
+
+#define _mm_maskz_getmant_sh(U, A, B, C, D) \
+ ((__m128h)__builtin_ia32_getmantsh_round_mask( \
+ (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (int)(((D) << 2) | (C)), \
+ (__v8hf)_mm_setzero_ph(), (__mmask8)(U), _MM_FROUND_CUR_DIRECTION))
+
+#define _mm_maskz_getmant_round_sh(U, A, B, C, D, R) \
+ ((__m128h)__builtin_ia32_getmantsh_round_mask( \
+ (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (int)(((D) << 2) | (C)), \
+ (__v8hf)_mm_setzero_ph(), (__mmask8)(U), (int)(R)))
+
+#define _mm_getexp_round_sh(A, B, R) \
+ ((__m128h)__builtin_ia32_getexpsh128_round_mask( \
+ (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(), \
+ (__mmask8)-1, (int)(R)))
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_getexp_sh(__m128h __A,
+ __m128h __B) {
+ return (__m128h)__builtin_ia32_getexpsh128_round_mask(
+ (__v8hf)__A, (__v8hf)__B, (__v8hf)_mm_setzero_ph(), (__mmask8)-1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_mask_getexp_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
+ return (__m128h)__builtin_ia32_getexpsh128_round_mask(
+ (__v8hf)__A, (__v8hf)__B, (__v8hf)__W, (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mask_getexp_round_sh(W, U, A, B, R) \
+ ((__m128h)__builtin_ia32_getexpsh128_round_mask( \
+ (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)(__m128h)(W), \
+ (__mmask8)(U), (int)(R)))
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_maskz_getexp_sh(__mmask8 __U, __m128h __A, __m128h __B) {
+ return (__m128h)__builtin_ia32_getexpsh128_round_mask(
+ (__v8hf)__A, (__v8hf)__B, (__v8hf)_mm_setzero_ph(), (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_maskz_getexp_round_sh(U, A, B, R) \
+ ((__m128h)__builtin_ia32_getexpsh128_round_mask( \
+ (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(), \
+ (__mmask8)(U), (int)(R)))
+
+#define _mm_scalef_round_sh(A, B, R) \
+ ((__m128h)__builtin_ia32_scalefsh_round_mask( \
+ (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(), \
+ (__mmask8)-1, (int)(R)))
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_scalef_sh(__m128h __A,
+ __m128h __B) {
+ return (__m128h)__builtin_ia32_scalefsh_round_mask(
+ (__v8hf)__A, (__v8hf)(__B), (__v8hf)_mm_setzero_ph(), (__mmask8)-1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_mask_scalef_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
+ return (__m128h)__builtin_ia32_scalefsh_round_mask((__v8hf)__A, (__v8hf)__B,
+ (__v8hf)__W, (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mask_scalef_round_sh(W, U, A, B, R) \
+ ((__m128h)__builtin_ia32_scalefsh_round_mask( \
+ (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)(__m128h)(W), \
+ (__mmask8)(U), (int)(R)))
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_maskz_scalef_sh(__mmask8 __U, __m128h __A, __m128h __B) {
+ return (__m128h)__builtin_ia32_scalefsh_round_mask(
+ (__v8hf)__A, (__v8hf)__B, (__v8hf)_mm_setzero_ph(), (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_maskz_scalef_round_sh(U, A, B, R) \
+ ((__m128h)__builtin_ia32_scalefsh_round_mask( \
+ (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(), \
+ (__mmask8)(U), (int)(R)))
+
+#define _mm_roundscale_round_sh(A, B, imm, R) \
+ ((__m128h)__builtin_ia32_rndscalesh_round_mask( \
+ (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(), \
+ (__mmask8)-1, (int)(imm), (int)(R)))
+
+#define _mm_roundscale_sh(A, B, imm) \
+ ((__m128h)__builtin_ia32_rndscalesh_round_mask( \
+ (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(), \
+ (__mmask8)-1, (int)(imm), _MM_FROUND_CUR_DIRECTION))
+
+#define _mm_mask_roundscale_sh(W, U, A, B, I) \
+ ((__m128h)__builtin_ia32_rndscalesh_round_mask( \
+ (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)(__m128h)(W), \
+ (__mmask8)(U), (int)(I), _MM_FROUND_CUR_DIRECTION))
+
+#define _mm_mask_roundscale_round_sh(W, U, A, B, I, R) \
+ ((__m128h)__builtin_ia32_rndscalesh_round_mask( \
+ (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)(__m128h)(W), \
+ (__mmask8)(U), (int)(I), (int)(R)))
+
+#define _mm_maskz_roundscale_sh(U, A, B, I) \
+ ((__m128h)__builtin_ia32_rndscalesh_round_mask( \
+ (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(), \
+ (__mmask8)(U), (int)(I), _MM_FROUND_CUR_DIRECTION))
+
+#define _mm_maskz_roundscale_round_sh(U, A, B, I, R) \
+ ((__m128h)__builtin_ia32_rndscalesh_round_mask( \
+ (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(), \
+ (__mmask8)(U), (int)(I), (int)(R)))
+
+#define _mm_reduce_sh(A, B, C) \
+ ((__m128h)__builtin_ia32_reducesh_mask( \
+ (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(), \
+ (__mmask8)-1, (int)(C), _MM_FROUND_CUR_DIRECTION))
+
+#define _mm_mask_reduce_sh(W, U, A, B, C) \
+ ((__m128h)__builtin_ia32_reducesh_mask( \
+ (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)(__m128h)(W), \
+ (__mmask8)(U), (int)(C), _MM_FROUND_CUR_DIRECTION))
+
+#define _mm_maskz_reduce_sh(U, A, B, C) \
+ ((__m128h)__builtin_ia32_reducesh_mask( \
+ (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(), \
+ (__mmask8)(U), (int)(C), _MM_FROUND_CUR_DIRECTION))
+
+#define _mm_reduce_round_sh(A, B, C, R) \
+ ((__m128h)__builtin_ia32_reducesh_mask( \
+ (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(), \
+ (__mmask8)-1, (int)(C), (int)(R)))
+
+#define _mm_mask_reduce_round_sh(W, U, A, B, C, R) \
+ ((__m128h)__builtin_ia32_reducesh_mask( \
+ (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)(__m128h)(W), \
+ (__mmask8)(U), (int)(C), (int)(R)))
+
+#define _mm_maskz_reduce_round_sh(U, A, B, C, R) \
+ ((__m128h)__builtin_ia32_reducesh_mask( \
+ (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(), \
+ (__mmask8)(U), (int)(C), (int)(R)))
+
+#define _mm512_sqrt_round_ph(A, R) \
+ ((__m512h)__builtin_ia32_sqrtph512((__v32hf)(__m512h)(A), (int)(R)))
+
+#define _mm512_mask_sqrt_round_ph(W, U, A, R) \
+ ((__m512h)__builtin_ia32_selectph_512( \
+ (__mmask32)(U), (__v32hf)_mm512_sqrt_round_ph((A), (R)), \
+ (__v32hf)(__m512h)(W)))
+
+#define _mm512_maskz_sqrt_round_ph(U, A, R) \
+ ((__m512h)__builtin_ia32_selectph_512( \
+ (__mmask32)(U), (__v32hf)_mm512_sqrt_round_ph((A), (R)), \
+ (__v32hf)_mm512_setzero_ph()))
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_sqrt_ph(__m512h __A) {
+ return (__m512h)__builtin_ia32_sqrtph512((__v32hf)__A,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_mask_sqrt_ph(__m512h __W, __mmask32 __U, __m512h __A) {
+ return (__m512h)__builtin_ia32_selectph_512(
+ (__mmask32)(__U),
+ (__v32hf)__builtin_ia32_sqrtph512((__A), (_MM_FROUND_CUR_DIRECTION)),
+ (__v32hf)(__m512h)(__W));
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_maskz_sqrt_ph(__mmask32 __U, __m512h __A) {
+ return (__m512h)__builtin_ia32_selectph_512(
+ (__mmask32)(__U),
+ (__v32hf)__builtin_ia32_sqrtph512((__A), (_MM_FROUND_CUR_DIRECTION)),
+ (__v32hf)_mm512_setzero_ph());
+}
+
+#define _mm_sqrt_round_sh(A, B, R) \
+ ((__m128h)__builtin_ia32_sqrtsh_round_mask( \
+ (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(), \
+ (__mmask8)-1, (int)(R)))
+
+#define _mm_mask_sqrt_round_sh(W, U, A, B, R) \
+ ((__m128h)__builtin_ia32_sqrtsh_round_mask( \
+ (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)(__m128h)(W), \
+ (__mmask8)(U), (int)(R)))
+
+#define _mm_maskz_sqrt_round_sh(U, A, B, R) \
+ ((__m128h)__builtin_ia32_sqrtsh_round_mask( \
+ (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)_mm_setzero_ph(), \
+ (__mmask8)(U), (int)(R)))
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_sqrt_sh(__m128h __A,
+ __m128h __B) {
+ return (__m128h)__builtin_ia32_sqrtsh_round_mask(
+ (__v8hf)(__m128h)(__A), (__v8hf)(__m128h)(__B), (__v8hf)_mm_setzero_ph(),
+ (__mmask8)-1, _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_sqrt_sh(__m128h __W,
+ __mmask32 __U,
+ __m128h __A,
+ __m128h __B) {
+ return (__m128h)__builtin_ia32_sqrtsh_round_mask(
+ (__v8hf)(__m128h)(__A), (__v8hf)(__m128h)(__B), (__v8hf)(__m128h)(__W),
+ (__mmask8)(__U), _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_sqrt_sh(__mmask32 __U,
+ __m128h __A,
+ __m128h __B) {
+ return (__m128h)__builtin_ia32_sqrtsh_round_mask(
+ (__v8hf)(__m128h)(__A), (__v8hf)(__m128h)(__B), (__v8hf)_mm_setzero_ph(),
+ (__mmask8)(__U), _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_mask_fpclass_ph_mask(U, A, imm) \
+ ((__mmask32)__builtin_ia32_fpclassph512_mask((__v32hf)(__m512h)(A), \
+ (int)(imm), (__mmask32)(U)))
+
+#define _mm512_fpclass_ph_mask(A, imm) \
+ ((__mmask32)__builtin_ia32_fpclassph512_mask((__v32hf)(__m512h)(A), \
+ (int)(imm), (__mmask32)-1))
+
+#define _mm_fpclass_sh_mask(A, imm) \
+ ((__mmask8)__builtin_ia32_fpclasssh_mask((__v8hf)(__m128h)(A), (int)(imm), \
+ (__mmask8)-1))
+
+#define _mm_mask_fpclass_sh_mask(U, A, imm) \
+ ((__mmask8)__builtin_ia32_fpclasssh_mask((__v8hf)(__m128h)(A), (int)(imm), \
+ (__mmask8)(U)))
+
+#define _mm512_cvt_roundpd_ph(A, R) \
+ ((__m128h)__builtin_ia32_vcvtpd2ph512_mask( \
+ (__v8df)(A), (__v8hf)_mm_undefined_ph(), (__mmask8)(-1), (int)(R)))
+
+#define _mm512_mask_cvt_roundpd_ph(W, U, A, R) \
+ ((__m128h)__builtin_ia32_vcvtpd2ph512_mask((__v8df)(A), (__v8hf)(W), \
+ (__mmask8)(U), (int)(R)))
+
+#define _mm512_maskz_cvt_roundpd_ph(U, A, R) \
+ ((__m128h)__builtin_ia32_vcvtpd2ph512_mask( \
+ (__v8df)(A), (__v8hf)_mm_setzero_ph(), (__mmask8)(U), (int)(R)))
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS512 _mm512_cvtpd_ph(__m512d __A) {
+ return (__m128h)__builtin_ia32_vcvtpd2ph512_mask(
+ (__v8df)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)-1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS512
+_mm512_mask_cvtpd_ph(__m128h __W, __mmask8 __U, __m512d __A) {
+ return (__m128h)__builtin_ia32_vcvtpd2ph512_mask(
+ (__v8df)__A, (__v8hf)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS512
+_mm512_maskz_cvtpd_ph(__mmask8 __U, __m512d __A) {
+ return (__m128h)__builtin_ia32_vcvtpd2ph512_mask(
+ (__v8df)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundph_pd(A, R) \
+ ((__m512d)__builtin_ia32_vcvtph2pd512_mask( \
+ (__v8hf)(A), (__v8df)_mm512_undefined_pd(), (__mmask8)(-1), (int)(R)))
+
+#define _mm512_mask_cvt_roundph_pd(W, U, A, R) \
+ ((__m512d)__builtin_ia32_vcvtph2pd512_mask((__v8hf)(A), (__v8df)(W), \
+ (__mmask8)(U), (int)(R)))
+
+#define _mm512_maskz_cvt_roundph_pd(U, A, R) \
+ ((__m512d)__builtin_ia32_vcvtph2pd512_mask( \
+ (__v8hf)(A), (__v8df)_mm512_setzero_pd(), (__mmask8)(U), (int)(R)))
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS512 _mm512_cvtph_pd(__m128h __A) {
+ return (__m512d)__builtin_ia32_vcvtph2pd512_mask(
+ (__v8hf)__A, (__v8df)_mm512_setzero_pd(), (__mmask8)-1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
+_mm512_mask_cvtph_pd(__m512d __W, __mmask8 __U, __m128h __A) {
+ return (__m512d)__builtin_ia32_vcvtph2pd512_mask(
+ (__v8hf)__A, (__v8df)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS512
+_mm512_maskz_cvtph_pd(__mmask8 __U, __m128h __A) {
+ return (__m512d)__builtin_ia32_vcvtph2pd512_mask(
+ (__v8hf)__A, (__v8df)_mm512_setzero_pd(), (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_cvt_roundsh_ss(A, B, R) \
+ ((__m128)__builtin_ia32_vcvtsh2ss_round_mask((__v4sf)(A), (__v8hf)(B), \
+ (__v4sf)_mm_undefined_ps(), \
+ (__mmask8)(-1), (int)(R)))
+
+#define _mm_mask_cvt_roundsh_ss(W, U, A, B, R) \
+ ((__m128)__builtin_ia32_vcvtsh2ss_round_mask( \
+ (__v4sf)(A), (__v8hf)(B), (__v4sf)(W), (__mmask8)(U), (int)(R)))
+
+#define _mm_maskz_cvt_roundsh_ss(U, A, B, R) \
+ ((__m128)__builtin_ia32_vcvtsh2ss_round_mask((__v4sf)(A), (__v8hf)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U), (int)(R)))
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_cvtsh_ss(__m128 __A,
+ __m128h __B) {
+ return (__m128)__builtin_ia32_vcvtsh2ss_round_mask(
+ (__v4sf)__A, (__v8hf)__B, (__v4sf)_mm_undefined_ps(), (__mmask8)-1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_cvtsh_ss(__m128 __W,
+ __mmask8 __U,
+ __m128 __A,
+ __m128h __B) {
+ return (__m128)__builtin_ia32_vcvtsh2ss_round_mask((__v4sf)__A, (__v8hf)__B,
+ (__v4sf)__W, (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_maskz_cvtsh_ss(__mmask8 __U,
+ __m128 __A,
+ __m128h __B) {
+ return (__m128)__builtin_ia32_vcvtsh2ss_round_mask(
+ (__v4sf)__A, (__v8hf)__B, (__v4sf)_mm_setzero_ps(), (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_cvt_roundss_sh(A, B, R) \
+ ((__m128h)__builtin_ia32_vcvtss2sh_round_mask((__v8hf)(A), (__v4sf)(B), \
+ (__v8hf)_mm_undefined_ph(), \
+ (__mmask8)(-1), (int)(R)))
+
+#define _mm_mask_cvt_roundss_sh(W, U, A, B, R) \
+ ((__m128h)__builtin_ia32_vcvtss2sh_round_mask( \
+ (__v8hf)(A), (__v4sf)(B), (__v8hf)(W), (__mmask8)(U), (int)(R)))
+
+#define _mm_maskz_cvt_roundss_sh(U, A, B, R) \
+ ((__m128h)__builtin_ia32_vcvtss2sh_round_mask((__v8hf)(A), (__v4sf)(B), \
+ (__v8hf)_mm_setzero_ph(), \
+ (__mmask8)(U), (int)(R)))
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtss_sh(__m128h __A,
+ __m128 __B) {
+ return (__m128h)__builtin_ia32_vcvtss2sh_round_mask(
+ (__v8hf)__A, (__v4sf)__B, (__v8hf)_mm_undefined_ph(), (__mmask8)-1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_cvtss_sh(__m128h __W,
+ __mmask8 __U,
+ __m128h __A,
+ __m128 __B) {
+ return (__m128h)__builtin_ia32_vcvtss2sh_round_mask(
+ (__v8hf)__A, (__v4sf)__B, (__v8hf)__W, (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_cvtss_sh(__mmask8 __U,
+ __m128h __A,
+ __m128 __B) {
+ return (__m128h)__builtin_ia32_vcvtss2sh_round_mask(
+ (__v8hf)__A, (__v4sf)__B, (__v8hf)_mm_setzero_ph(), (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_cvt_roundsd_sh(A, B, R) \
+ ((__m128h)__builtin_ia32_vcvtsd2sh_round_mask((__v8hf)(A), (__v2df)(B), \
+ (__v8hf)_mm_undefined_ph(), \
+ (__mmask8)(-1), (int)(R)))
+
+#define _mm_mask_cvt_roundsd_sh(W, U, A, B, R) \
+ ((__m128h)__builtin_ia32_vcvtsd2sh_round_mask( \
+ (__v8hf)(A), (__v2df)(B), (__v8hf)(W), (__mmask8)(U), (int)(R)))
+
+#define _mm_maskz_cvt_roundsd_sh(U, A, B, R) \
+ ((__m128h)__builtin_ia32_vcvtsd2sh_round_mask((__v8hf)(A), (__v2df)(B), \
+ (__v8hf)_mm_setzero_ph(), \
+ (__mmask8)(U), (int)(R)))
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtsd_sh(__m128h __A,
+ __m128d __B) {
+ return (__m128h)__builtin_ia32_vcvtsd2sh_round_mask(
+ (__v8hf)__A, (__v2df)__B, (__v8hf)_mm_undefined_ph(), (__mmask8)-1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_cvtsd_sh(__m128h __W,
+ __mmask8 __U,
+ __m128h __A,
+ __m128d __B) {
+ return (__m128h)__builtin_ia32_vcvtsd2sh_round_mask(
+ (__v8hf)__A, (__v2df)__B, (__v8hf)__W, (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_maskz_cvtsd_sh(__mmask8 __U, __m128h __A, __m128d __B) {
+ return (__m128h)__builtin_ia32_vcvtsd2sh_round_mask(
+ (__v8hf)__A, (__v2df)__B, (__v8hf)_mm_setzero_ph(), (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_cvt_roundsh_sd(A, B, R) \
+ ((__m128d)__builtin_ia32_vcvtsh2sd_round_mask((__v2df)(A), (__v8hf)(B), \
+ (__v2df)_mm_undefined_pd(), \
+ (__mmask8)(-1), (int)(R)))
+
+#define _mm_mask_cvt_roundsh_sd(W, U, A, B, R) \
+ ((__m128d)__builtin_ia32_vcvtsh2sd_round_mask( \
+ (__v2df)(A), (__v8hf)(B), (__v2df)(W), (__mmask8)(U), (int)(R)))
+
+#define _mm_maskz_cvt_roundsh_sd(U, A, B, R) \
+ ((__m128d)__builtin_ia32_vcvtsh2sd_round_mask((__v2df)(A), (__v8hf)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U), (int)(R)))
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_cvtsh_sd(__m128d __A,
+ __m128h __B) {
+ return (__m128d)__builtin_ia32_vcvtsh2sd_round_mask(
+ (__v2df)__A, (__v8hf)__B, (__v2df)_mm_undefined_pd(), (__mmask8)-1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_cvtsh_sd(__m128d __W,
+ __mmask8 __U,
+ __m128d __A,
+ __m128h __B) {
+ return (__m128d)__builtin_ia32_vcvtsh2sd_round_mask(
+ (__v2df)__A, (__v8hf)__B, (__v2df)__W, (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
+_mm_maskz_cvtsh_sd(__mmask8 __U, __m128d __A, __m128h __B) {
+ return (__m128d)__builtin_ia32_vcvtsh2sd_round_mask(
+ (__v2df)__A, (__v8hf)__B, (__v2df)_mm_setzero_pd(), (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundph_epi16(A, R) \
+ ((__m512i)__builtin_ia32_vcvtph2w512_mask((__v32hf)(A), \
+ (__v32hi)_mm512_undefined_epi32(), \
+ (__mmask32)(-1), (int)(R)))
+
+#define _mm512_mask_cvt_roundph_epi16(W, U, A, R) \
+ ((__m512i)__builtin_ia32_vcvtph2w512_mask((__v32hf)(A), (__v32hi)(W), \
+ (__mmask32)(U), (int)(R)))
+
+#define _mm512_maskz_cvt_roundph_epi16(U, A, R) \
+ ((__m512i)__builtin_ia32_vcvtph2w512_mask((__v32hf)(A), \
+ (__v32hi)_mm512_setzero_epi32(), \
+ (__mmask32)(U), (int)(R)))
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_cvtph_epi16(__m512h __A) {
+ return (__m512i)__builtin_ia32_vcvtph2w512_mask(
+ (__v32hf)__A, (__v32hi)_mm512_setzero_epi32(), (__mmask32)-1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_mask_cvtph_epi16(__m512i __W, __mmask32 __U, __m512h __A) {
+ return (__m512i)__builtin_ia32_vcvtph2w512_mask(
+ (__v32hf)__A, (__v32hi)__W, (__mmask32)__U, _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_maskz_cvtph_epi16(__mmask32 __U, __m512h __A) {
+ return (__m512i)__builtin_ia32_vcvtph2w512_mask(
+ (__v32hf)__A, (__v32hi)_mm512_setzero_epi32(), (__mmask32)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvtt_roundph_epi16(A, R) \
+ ((__m512i)__builtin_ia32_vcvttph2w512_mask( \
+ (__v32hf)(A), (__v32hi)_mm512_undefined_epi32(), (__mmask32)(-1), \
+ (int)(R)))
+
+#define _mm512_mask_cvtt_roundph_epi16(W, U, A, R) \
+ ((__m512i)__builtin_ia32_vcvttph2w512_mask((__v32hf)(A), (__v32hi)(W), \
+ (__mmask32)(U), (int)(R)))
+
+#define _mm512_maskz_cvtt_roundph_epi16(U, A, R) \
+ ((__m512i)__builtin_ia32_vcvttph2w512_mask((__v32hf)(A), \
+ (__v32hi)_mm512_setzero_epi32(), \
+ (__mmask32)(U), (int)(R)))
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_cvttph_epi16(__m512h __A) {
+ return (__m512i)__builtin_ia32_vcvttph2w512_mask(
+ (__v32hf)__A, (__v32hi)_mm512_setzero_epi32(), (__mmask32)-1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_mask_cvttph_epi16(__m512i __W, __mmask32 __U, __m512h __A) {
+ return (__m512i)__builtin_ia32_vcvttph2w512_mask(
+ (__v32hf)__A, (__v32hi)__W, (__mmask32)__U, _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_maskz_cvttph_epi16(__mmask32 __U, __m512h __A) {
+ return (__m512i)__builtin_ia32_vcvttph2w512_mask(
+ (__v32hf)__A, (__v32hi)_mm512_setzero_epi32(), (__mmask32)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundepi16_ph(A, R) \
+ ((__m512h)__builtin_ia32_vcvtw2ph512_mask((__v32hi)(A), \
+ (__v32hf)_mm512_undefined_ph(), \
+ (__mmask32)(-1), (int)(R)))
+
+#define _mm512_mask_cvt_roundepi16_ph(W, U, A, R) \
+ ((__m512h)__builtin_ia32_vcvtw2ph512_mask((__v32hi)(A), (__v32hf)(W), \
+ (__mmask32)(U), (int)(R)))
+
+#define _mm512_maskz_cvt_roundepi16_ph(U, A, R) \
+ ((__m512h)__builtin_ia32_vcvtw2ph512_mask( \
+ (__v32hi)(A), (__v32hf)_mm512_setzero_ph(), (__mmask32)(U), (int)(R)))
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_cvtepi16_ph(__m512i __A) {
+ return (__m512h)__builtin_ia32_vcvtw2ph512_mask(
+ (__v32hi)__A, (__v32hf)_mm512_setzero_ph(), (__mmask32)-1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_mask_cvtepi16_ph(__m512h __W, __mmask32 __U, __m512i __A) {
+ return (__m512h)__builtin_ia32_vcvtw2ph512_mask(
+ (__v32hi)__A, (__v32hf)__W, (__mmask32)__U, _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_maskz_cvtepi16_ph(__mmask32 __U, __m512i __A) {
+ return (__m512h)__builtin_ia32_vcvtw2ph512_mask(
+ (__v32hi)__A, (__v32hf)_mm512_setzero_ph(), (__mmask32)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundph_epu16(A, R) \
+ ((__m512i)__builtin_ia32_vcvtph2uw512_mask( \
+ (__v32hf)(A), (__v32hu)_mm512_undefined_epi32(), (__mmask32)(-1), \
+ (int)(R)))
+
+#define _mm512_mask_cvt_roundph_epu16(W, U, A, R) \
+ ((__m512i)__builtin_ia32_vcvtph2uw512_mask((__v32hf)(A), (__v32hu)(W), \
+ (__mmask32)(U), (int)(R)))
+
+#define _mm512_maskz_cvt_roundph_epu16(U, A, R) \
+ ((__m512i)__builtin_ia32_vcvtph2uw512_mask((__v32hf)(A), \
+ (__v32hu)_mm512_setzero_epi32(), \
+ (__mmask32)(U), (int)(R)))
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_cvtph_epu16(__m512h __A) {
+ return (__m512i)__builtin_ia32_vcvtph2uw512_mask(
+ (__v32hf)__A, (__v32hu)_mm512_setzero_epi32(), (__mmask32)-1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_mask_cvtph_epu16(__m512i __W, __mmask32 __U, __m512h __A) {
+ return (__m512i)__builtin_ia32_vcvtph2uw512_mask(
+ (__v32hf)__A, (__v32hu)__W, (__mmask32)__U, _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_maskz_cvtph_epu16(__mmask32 __U, __m512h __A) {
+ return (__m512i)__builtin_ia32_vcvtph2uw512_mask(
+ (__v32hf)__A, (__v32hu)_mm512_setzero_epi32(), (__mmask32)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvtt_roundph_epu16(A, R) \
+ ((__m512i)__builtin_ia32_vcvttph2uw512_mask( \
+ (__v32hf)(A), (__v32hu)_mm512_undefined_epi32(), (__mmask32)(-1), \
+ (int)(R)))
+
+#define _mm512_mask_cvtt_roundph_epu16(W, U, A, R) \
+ ((__m512i)__builtin_ia32_vcvttph2uw512_mask((__v32hf)(A), (__v32hu)(W), \
+ (__mmask32)(U), (int)(R)))
+
+#define _mm512_maskz_cvtt_roundph_epu16(U, A, R) \
+ ((__m512i)__builtin_ia32_vcvttph2uw512_mask((__v32hf)(A), \
+ (__v32hu)_mm512_setzero_epi32(), \
+ (__mmask32)(U), (int)(R)))
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_cvttph_epu16(__m512h __A) {
+ return (__m512i)__builtin_ia32_vcvttph2uw512_mask(
+ (__v32hf)__A, (__v32hu)_mm512_setzero_epi32(), (__mmask32)-1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_mask_cvttph_epu16(__m512i __W, __mmask32 __U, __m512h __A) {
+ return (__m512i)__builtin_ia32_vcvttph2uw512_mask(
+ (__v32hf)__A, (__v32hu)__W, (__mmask32)__U, _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_maskz_cvttph_epu16(__mmask32 __U, __m512h __A) {
+ return (__m512i)__builtin_ia32_vcvttph2uw512_mask(
+ (__v32hf)__A, (__v32hu)_mm512_setzero_epi32(), (__mmask32)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundepu16_ph(A, R) \
+ ((__m512h)__builtin_ia32_vcvtuw2ph512_mask((__v32hu)(A), \
+ (__v32hf)_mm512_undefined_ph(), \
+ (__mmask32)(-1), (int)(R)))
+
+#define _mm512_mask_cvt_roundepu16_ph(W, U, A, R) \
+ ((__m512h)__builtin_ia32_vcvtuw2ph512_mask((__v32hu)(A), (__v32hf)(W), \
+ (__mmask32)(U), (int)(R)))
+
+#define _mm512_maskz_cvt_roundepu16_ph(U, A, R) \
+ ((__m512h)__builtin_ia32_vcvtuw2ph512_mask( \
+ (__v32hu)(A), (__v32hf)_mm512_setzero_ph(), (__mmask32)(U), (int)(R)))
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_cvtepu16_ph(__m512i __A) {
+ return (__m512h)__builtin_ia32_vcvtuw2ph512_mask(
+ (__v32hu)__A, (__v32hf)_mm512_setzero_ph(), (__mmask32)-1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_mask_cvtepu16_ph(__m512h __W, __mmask32 __U, __m512i __A) {
+ return (__m512h)__builtin_ia32_vcvtuw2ph512_mask(
+ (__v32hu)__A, (__v32hf)__W, (__mmask32)__U, _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_maskz_cvtepu16_ph(__mmask32 __U, __m512i __A) {
+ return (__m512h)__builtin_ia32_vcvtuw2ph512_mask(
+ (__v32hu)__A, (__v32hf)_mm512_setzero_ph(), (__mmask32)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundph_epi32(A, R) \
+ ((__m512i)__builtin_ia32_vcvtph2dq512_mask( \
+ (__v16hf)(A), (__v16si)_mm512_undefined_epi32(), (__mmask16)(-1), \
+ (int)(R)))
+
+#define _mm512_mask_cvt_roundph_epi32(W, U, A, R) \
+ ((__m512i)__builtin_ia32_vcvtph2dq512_mask((__v16hf)(A), (__v16si)(W), \
+ (__mmask16)(U), (int)(R)))
+
+#define _mm512_maskz_cvt_roundph_epi32(U, A, R) \
+ ((__m512i)__builtin_ia32_vcvtph2dq512_mask((__v16hf)(A), \
+ (__v16si)_mm512_setzero_epi32(), \
+ (__mmask16)(U), (int)(R)))
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_cvtph_epi32(__m256h __A) {
+ return (__m512i)__builtin_ia32_vcvtph2dq512_mask(
+ (__v16hf)__A, (__v16si)_mm512_setzero_epi32(), (__mmask16)-1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_mask_cvtph_epi32(__m512i __W, __mmask16 __U, __m256h __A) {
+ return (__m512i)__builtin_ia32_vcvtph2dq512_mask(
+ (__v16hf)__A, (__v16si)__W, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_maskz_cvtph_epi32(__mmask16 __U, __m256h __A) {
+ return (__m512i)__builtin_ia32_vcvtph2dq512_mask(
+ (__v16hf)__A, (__v16si)_mm512_setzero_epi32(), (__mmask16)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundph_epu32(A, R) \
+ ((__m512i)__builtin_ia32_vcvtph2udq512_mask( \
+ (__v16hf)(A), (__v16su)_mm512_undefined_epi32(), (__mmask16)(-1), \
+ (int)(R)))
+
+#define _mm512_mask_cvt_roundph_epu32(W, U, A, R) \
+ ((__m512i)__builtin_ia32_vcvtph2udq512_mask((__v16hf)(A), (__v16su)(W), \
+ (__mmask16)(U), (int)(R)))
+
+#define _mm512_maskz_cvt_roundph_epu32(U, A, R) \
+ ((__m512i)__builtin_ia32_vcvtph2udq512_mask((__v16hf)(A), \
+ (__v16su)_mm512_setzero_epi32(), \
+ (__mmask16)(U), (int)(R)))
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_cvtph_epu32(__m256h __A) {
+ return (__m512i)__builtin_ia32_vcvtph2udq512_mask(
+ (__v16hf)__A, (__v16su)_mm512_setzero_epi32(), (__mmask16)-1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_mask_cvtph_epu32(__m512i __W, __mmask16 __U, __m256h __A) {
+ return (__m512i)__builtin_ia32_vcvtph2udq512_mask(
+ (__v16hf)__A, (__v16su)__W, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_maskz_cvtph_epu32(__mmask16 __U, __m256h __A) {
+ return (__m512i)__builtin_ia32_vcvtph2udq512_mask(
+ (__v16hf)__A, (__v16su)_mm512_setzero_epi32(), (__mmask16)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundepi32_ph(A, R) \
+ ((__m256h)__builtin_ia32_vcvtdq2ph512_mask((__v16si)(A), \
+ (__v16hf)_mm256_undefined_ph(), \
+ (__mmask16)(-1), (int)(R)))
+
+#define _mm512_mask_cvt_roundepi32_ph(W, U, A, R) \
+ ((__m256h)__builtin_ia32_vcvtdq2ph512_mask((__v16si)(A), (__v16hf)(W), \
+ (__mmask16)(U), (int)(R)))
+
+#define _mm512_maskz_cvt_roundepi32_ph(U, A, R) \
+ ((__m256h)__builtin_ia32_vcvtdq2ph512_mask( \
+ (__v16si)(A), (__v16hf)_mm256_setzero_ph(), (__mmask16)(U), (int)(R)))
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS512
+_mm512_cvtepi32_ph(__m512i __A) {
+ return (__m256h)__builtin_ia32_vcvtdq2ph512_mask(
+ (__v16si)__A, (__v16hf)_mm256_setzero_ph(), (__mmask16)-1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS512
+_mm512_mask_cvtepi32_ph(__m256h __W, __mmask16 __U, __m512i __A) {
+ return (__m256h)__builtin_ia32_vcvtdq2ph512_mask(
+ (__v16si)__A, (__v16hf)__W, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS512
+_mm512_maskz_cvtepi32_ph(__mmask16 __U, __m512i __A) {
+ return (__m256h)__builtin_ia32_vcvtdq2ph512_mask(
+ (__v16si)__A, (__v16hf)_mm256_setzero_ph(), (__mmask16)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundepu32_ph(A, R) \
+ ((__m256h)__builtin_ia32_vcvtudq2ph512_mask((__v16su)(A), \
+ (__v16hf)_mm256_undefined_ph(), \
+ (__mmask16)(-1), (int)(R)))
+
+#define _mm512_mask_cvt_roundepu32_ph(W, U, A, R) \
+ ((__m256h)__builtin_ia32_vcvtudq2ph512_mask((__v16su)(A), (__v16hf)(W), \
+ (__mmask16)(U), (int)(R)))
+
+#define _mm512_maskz_cvt_roundepu32_ph(U, A, R) \
+ ((__m256h)__builtin_ia32_vcvtudq2ph512_mask( \
+ (__v16su)(A), (__v16hf)_mm256_setzero_ph(), (__mmask16)(U), (int)(R)))
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS512
+_mm512_cvtepu32_ph(__m512i __A) {
+ return (__m256h)__builtin_ia32_vcvtudq2ph512_mask(
+ (__v16su)__A, (__v16hf)_mm256_setzero_ph(), (__mmask16)-1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS512
+_mm512_mask_cvtepu32_ph(__m256h __W, __mmask16 __U, __m512i __A) {
+ return (__m256h)__builtin_ia32_vcvtudq2ph512_mask(
+ (__v16su)__A, (__v16hf)__W, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS512
+_mm512_maskz_cvtepu32_ph(__mmask16 __U, __m512i __A) {
+ return (__m256h)__builtin_ia32_vcvtudq2ph512_mask(
+ (__v16su)__A, (__v16hf)_mm256_setzero_ph(), (__mmask16)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvtt_roundph_epi32(A, R) \
+ ((__m512i)__builtin_ia32_vcvttph2dq512_mask( \
+ (__v16hf)(A), (__v16si)_mm512_undefined_epi32(), (__mmask16)(-1), \
+ (int)(R)))
+
+#define _mm512_mask_cvtt_roundph_epi32(W, U, A, R) \
+ ((__m512i)__builtin_ia32_vcvttph2dq512_mask((__v16hf)(A), (__v16si)(W), \
+ (__mmask16)(U), (int)(R)))
+
+#define _mm512_maskz_cvtt_roundph_epi32(U, A, R) \
+ ((__m512i)__builtin_ia32_vcvttph2dq512_mask((__v16hf)(A), \
+ (__v16si)_mm512_setzero_epi32(), \
+ (__mmask16)(U), (int)(R)))
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_cvttph_epi32(__m256h __A) {
+ return (__m512i)__builtin_ia32_vcvttph2dq512_mask(
+ (__v16hf)__A, (__v16si)_mm512_setzero_epi32(), (__mmask16)-1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_mask_cvttph_epi32(__m512i __W, __mmask16 __U, __m256h __A) {
+ return (__m512i)__builtin_ia32_vcvttph2dq512_mask(
+ (__v16hf)__A, (__v16si)__W, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_maskz_cvttph_epi32(__mmask16 __U, __m256h __A) {
+ return (__m512i)__builtin_ia32_vcvttph2dq512_mask(
+ (__v16hf)__A, (__v16si)_mm512_setzero_epi32(), (__mmask16)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvtt_roundph_epu32(A, R) \
+ ((__m512i)__builtin_ia32_vcvttph2udq512_mask( \
+ (__v16hf)(A), (__v16su)_mm512_undefined_epi32(), (__mmask16)(-1), \
+ (int)(R)))
+
+#define _mm512_mask_cvtt_roundph_epu32(W, U, A, R) \
+ ((__m512i)__builtin_ia32_vcvttph2udq512_mask((__v16hf)(A), (__v16su)(W), \
+ (__mmask16)(U), (int)(R)))
+
+#define _mm512_maskz_cvtt_roundph_epu32(U, A, R) \
+ ((__m512i)__builtin_ia32_vcvttph2udq512_mask( \
+ (__v16hf)(A), (__v16su)_mm512_setzero_epi32(), (__mmask16)(U), \
+ (int)(R)))
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_cvttph_epu32(__m256h __A) {
+ return (__m512i)__builtin_ia32_vcvttph2udq512_mask(
+ (__v16hf)__A, (__v16su)_mm512_setzero_epi32(), (__mmask16)-1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_mask_cvttph_epu32(__m512i __W, __mmask16 __U, __m256h __A) {
+ return (__m512i)__builtin_ia32_vcvttph2udq512_mask(
+ (__v16hf)__A, (__v16su)__W, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_maskz_cvttph_epu32(__mmask16 __U, __m256h __A) {
+ return (__m512i)__builtin_ia32_vcvttph2udq512_mask(
+ (__v16hf)__A, (__v16su)_mm512_setzero_epi32(), (__mmask16)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundepi64_ph(A, R) \
+ ((__m128h)__builtin_ia32_vcvtqq2ph512_mask( \
+ (__v8di)(A), (__v8hf)_mm_undefined_ph(), (__mmask8)(-1), (int)(R)))
+
+#define _mm512_mask_cvt_roundepi64_ph(W, U, A, R) \
+ ((__m128h)__builtin_ia32_vcvtqq2ph512_mask((__v8di)(A), (__v8hf)(W), \
+ (__mmask8)(U), (int)(R)))
+
+#define _mm512_maskz_cvt_roundepi64_ph(U, A, R) \
+ ((__m128h)__builtin_ia32_vcvtqq2ph512_mask( \
+ (__v8di)(A), (__v8hf)_mm_setzero_ph(), (__mmask8)(U), (int)(R)))
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS512
+_mm512_cvtepi64_ph(__m512i __A) {
+ return (__m128h)__builtin_ia32_vcvtqq2ph512_mask(
+ (__v8di)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)-1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS512
+_mm512_mask_cvtepi64_ph(__m128h __W, __mmask8 __U, __m512i __A) {
+ return (__m128h)__builtin_ia32_vcvtqq2ph512_mask(
+ (__v8di)__A, (__v8hf)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS512
+_mm512_maskz_cvtepi64_ph(__mmask8 __U, __m512i __A) {
+ return (__m128h)__builtin_ia32_vcvtqq2ph512_mask(
+ (__v8di)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundph_epi64(A, R) \
+ ((__m512i)__builtin_ia32_vcvtph2qq512_mask((__v8hf)(A), \
+ (__v8di)_mm512_undefined_epi32(), \
+ (__mmask8)(-1), (int)(R)))
+
+#define _mm512_mask_cvt_roundph_epi64(W, U, A, R) \
+ ((__m512i)__builtin_ia32_vcvtph2qq512_mask((__v8hf)(A), (__v8di)(W), \
+ (__mmask8)(U), (int)(R)))
+
+#define _mm512_maskz_cvt_roundph_epi64(U, A, R) \
+ ((__m512i)__builtin_ia32_vcvtph2qq512_mask( \
+ (__v8hf)(A), (__v8di)_mm512_setzero_epi32(), (__mmask8)(U), (int)(R)))
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_cvtph_epi64(__m128h __A) {
+ return (__m512i)__builtin_ia32_vcvtph2qq512_mask(
+ (__v8hf)__A, (__v8di)_mm512_setzero_epi32(), (__mmask8)-1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_mask_cvtph_epi64(__m512i __W, __mmask8 __U, __m128h __A) {
+ return (__m512i)__builtin_ia32_vcvtph2qq512_mask(
+ (__v8hf)__A, (__v8di)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_maskz_cvtph_epi64(__mmask8 __U, __m128h __A) {
+ return (__m512i)__builtin_ia32_vcvtph2qq512_mask(
+ (__v8hf)__A, (__v8di)_mm512_setzero_epi32(), (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundepu64_ph(A, R) \
+ ((__m128h)__builtin_ia32_vcvtuqq2ph512_mask( \
+ (__v8du)(A), (__v8hf)_mm_undefined_ph(), (__mmask8)(-1), (int)(R)))
+
+#define _mm512_mask_cvt_roundepu64_ph(W, U, A, R) \
+ ((__m128h)__builtin_ia32_vcvtuqq2ph512_mask((__v8du)(A), (__v8hf)(W), \
+ (__mmask8)(U), (int)(R)))
+
+#define _mm512_maskz_cvt_roundepu64_ph(U, A, R) \
+ ((__m128h)__builtin_ia32_vcvtuqq2ph512_mask( \
+ (__v8du)(A), (__v8hf)_mm_setzero_ph(), (__mmask8)(U), (int)(R)))
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS512
+_mm512_cvtepu64_ph(__m512i __A) {
+ return (__m128h)__builtin_ia32_vcvtuqq2ph512_mask(
+ (__v8du)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)-1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS512
+_mm512_mask_cvtepu64_ph(__m128h __W, __mmask8 __U, __m512i __A) {
+ return (__m128h)__builtin_ia32_vcvtuqq2ph512_mask(
+ (__v8du)__A, (__v8hf)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS512
+_mm512_maskz_cvtepu64_ph(__mmask8 __U, __m512i __A) {
+ return (__m128h)__builtin_ia32_vcvtuqq2ph512_mask(
+ (__v8du)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvt_roundph_epu64(A, R) \
+ ((__m512i)__builtin_ia32_vcvtph2uqq512_mask( \
+ (__v8hf)(A), (__v8du)_mm512_undefined_epi32(), (__mmask8)(-1), \
+ (int)(R)))
+
+#define _mm512_mask_cvt_roundph_epu64(W, U, A, R) \
+ ((__m512i)__builtin_ia32_vcvtph2uqq512_mask((__v8hf)(A), (__v8du)(W), \
+ (__mmask8)(U), (int)(R)))
+
+#define _mm512_maskz_cvt_roundph_epu64(U, A, R) \
+ ((__m512i)__builtin_ia32_vcvtph2uqq512_mask( \
+ (__v8hf)(A), (__v8du)_mm512_setzero_epi32(), (__mmask8)(U), (int)(R)))
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_cvtph_epu64(__m128h __A) {
+ return (__m512i)__builtin_ia32_vcvtph2uqq512_mask(
+ (__v8hf)__A, (__v8du)_mm512_setzero_epi32(), (__mmask8)-1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_mask_cvtph_epu64(__m512i __W, __mmask8 __U, __m128h __A) {
+ return (__m512i)__builtin_ia32_vcvtph2uqq512_mask(
+ (__v8hf)__A, (__v8du)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_maskz_cvtph_epu64(__mmask8 __U, __m128h __A) {
+ return (__m512i)__builtin_ia32_vcvtph2uqq512_mask(
+ (__v8hf)__A, (__v8du)_mm512_setzero_epi32(), (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvtt_roundph_epi64(A, R) \
+ ((__m512i)__builtin_ia32_vcvttph2qq512_mask( \
+ (__v8hf)(A), (__v8di)_mm512_undefined_epi32(), (__mmask8)(-1), \
+ (int)(R)))
+
+#define _mm512_mask_cvtt_roundph_epi64(W, U, A, R) \
+ ((__m512i)__builtin_ia32_vcvttph2qq512_mask((__v8hf)(A), (__v8di)(W), \
+ (__mmask8)(U), (int)(R)))
+
+#define _mm512_maskz_cvtt_roundph_epi64(U, A, R) \
+ ((__m512i)__builtin_ia32_vcvttph2qq512_mask( \
+ (__v8hf)(A), (__v8di)_mm512_setzero_epi32(), (__mmask8)(U), (int)(R)))
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_cvttph_epi64(__m128h __A) {
+ return (__m512i)__builtin_ia32_vcvttph2qq512_mask(
+ (__v8hf)__A, (__v8di)_mm512_setzero_epi32(), (__mmask8)-1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_mask_cvttph_epi64(__m512i __W, __mmask8 __U, __m128h __A) {
+ return (__m512i)__builtin_ia32_vcvttph2qq512_mask(
+ (__v8hf)__A, (__v8di)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_maskz_cvttph_epi64(__mmask8 __U, __m128h __A) {
+ return (__m512i)__builtin_ia32_vcvttph2qq512_mask(
+ (__v8hf)__A, (__v8di)_mm512_setzero_epi32(), (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvtt_roundph_epu64(A, R) \
+ ((__m512i)__builtin_ia32_vcvttph2uqq512_mask( \
+ (__v8hf)(A), (__v8du)_mm512_undefined_epi32(), (__mmask8)(-1), \
+ (int)(R)))
+
+#define _mm512_mask_cvtt_roundph_epu64(W, U, A, R) \
+ ((__m512i)__builtin_ia32_vcvttph2uqq512_mask((__v8hf)(A), (__v8du)(W), \
+ (__mmask8)(U), (int)(R)))
+
+#define _mm512_maskz_cvtt_roundph_epu64(U, A, R) \
+ ((__m512i)__builtin_ia32_vcvttph2uqq512_mask( \
+ (__v8hf)(A), (__v8du)_mm512_setzero_epi32(), (__mmask8)(U), (int)(R)))
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_cvttph_epu64(__m128h __A) {
+ return (__m512i)__builtin_ia32_vcvttph2uqq512_mask(
+ (__v8hf)__A, (__v8du)_mm512_setzero_epi32(), (__mmask8)-1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_mask_cvttph_epu64(__m512i __W, __mmask8 __U, __m128h __A) {
+ return (__m512i)__builtin_ia32_vcvttph2uqq512_mask(
+ (__v8hf)__A, (__v8du)__W, (__mmask8)__U, _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_maskz_cvttph_epu64(__mmask8 __U, __m128h __A) {
+ return (__m512i)__builtin_ia32_vcvttph2uqq512_mask(
+ (__v8hf)__A, (__v8du)_mm512_setzero_epi32(), (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_cvt_roundsh_i32(A, R) \
+ ((int)__builtin_ia32_vcvtsh2si32((__v8hf)(A), (int)(R)))
+
+static __inline__ int __DEFAULT_FN_ATTRS128 _mm_cvtsh_i32(__m128h __A) {
+ return (int)__builtin_ia32_vcvtsh2si32((__v8hf)__A, _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_cvt_roundsh_u32(A, R) \
+ ((unsigned int)__builtin_ia32_vcvtsh2usi32((__v8hf)(A), (int)(R)))
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS128
+_mm_cvtsh_u32(__m128h __A) {
+ return (unsigned int)__builtin_ia32_vcvtsh2usi32((__v8hf)__A,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#ifdef __x86_64__
+#define _mm_cvt_roundsh_i64(A, R) \
+ ((long long)__builtin_ia32_vcvtsh2si64((__v8hf)(A), (int)(R)))
+
+static __inline__ long long __DEFAULT_FN_ATTRS128 _mm_cvtsh_i64(__m128h __A) {
+ return (long long)__builtin_ia32_vcvtsh2si64((__v8hf)__A,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_cvt_roundsh_u64(A, R) \
+ ((unsigned long long)__builtin_ia32_vcvtsh2usi64((__v8hf)(A), (int)(R)))
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS128
+_mm_cvtsh_u64(__m128h __A) {
+ return (unsigned long long)__builtin_ia32_vcvtsh2usi64(
+ (__v8hf)__A, _MM_FROUND_CUR_DIRECTION);
+}
+#endif // __x86_64__
+
+#define _mm_cvt_roundu32_sh(A, B, R) \
+ ((__m128h)__builtin_ia32_vcvtusi2sh((__v8hf)(A), (unsigned int)(B), (int)(R)))
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_cvtu32_sh(__m128h __A, unsigned int __B) {
+ __A[0] = __B;
+ return __A;
+}
+
+#ifdef __x86_64__
+#define _mm_cvt_roundu64_sh(A, B, R) \
+ ((__m128h)__builtin_ia32_vcvtusi642sh((__v8hf)(A), (unsigned long long)(B), \
+ (int)(R)))
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_cvtu64_sh(__m128h __A, unsigned long long __B) {
+ __A[0] = __B;
+ return __A;
+}
+#endif
+
+#define _mm_cvt_roundi32_sh(A, B, R) \
+ ((__m128h)__builtin_ia32_vcvtsi2sh((__v8hf)(A), (int)(B), (int)(R)))
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvti32_sh(__m128h __A,
+ int __B) {
+ __A[0] = __B;
+ return __A;
+}
+
+#ifdef __x86_64__
+#define _mm_cvt_roundi64_sh(A, B, R) \
+ ((__m128h)__builtin_ia32_vcvtsi642sh((__v8hf)(A), (long long)(B), (int)(R)))
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvti64_sh(__m128h __A,
+ long long __B) {
+ __A[0] = __B;
+ return __A;
+}
+#endif
+
+#define _mm_cvtt_roundsh_i32(A, R) \
+ ((int)__builtin_ia32_vcvttsh2si32((__v8hf)(A), (int)(R)))
+
+static __inline__ int __DEFAULT_FN_ATTRS128 _mm_cvttsh_i32(__m128h __A) {
+ return (int)__builtin_ia32_vcvttsh2si32((__v8hf)__A,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#ifdef __x86_64__
+#define _mm_cvtt_roundsh_i64(A, R) \
+ ((long long)__builtin_ia32_vcvttsh2si64((__v8hf)(A), (int)(R)))
+
+static __inline__ long long __DEFAULT_FN_ATTRS128 _mm_cvttsh_i64(__m128h __A) {
+ return (long long)__builtin_ia32_vcvttsh2si64((__v8hf)__A,
+ _MM_FROUND_CUR_DIRECTION);
+}
+#endif
+
+#define _mm_cvtt_roundsh_u32(A, R) \
+ ((unsigned int)__builtin_ia32_vcvttsh2usi32((__v8hf)(A), (int)(R)))
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS128
+_mm_cvttsh_u32(__m128h __A) {
+ return (unsigned int)__builtin_ia32_vcvttsh2usi32((__v8hf)__A,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#ifdef __x86_64__
+#define _mm_cvtt_roundsh_u64(A, R) \
+ ((unsigned long long)__builtin_ia32_vcvttsh2usi64((__v8hf)(A), (int)(R)))
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS128
+_mm_cvttsh_u64(__m128h __A) {
+ return (unsigned long long)__builtin_ia32_vcvttsh2usi64(
+ (__v8hf)__A, _MM_FROUND_CUR_DIRECTION);
+}
+#endif
+
+#define _mm512_cvtx_roundph_ps(A, R) \
+ ((__m512)__builtin_ia32_vcvtph2psx512_mask((__v16hf)(A), \
+ (__v16sf)_mm512_undefined_ps(), \
+ (__mmask16)(-1), (int)(R)))
+
+#define _mm512_mask_cvtx_roundph_ps(W, U, A, R) \
+ ((__m512)__builtin_ia32_vcvtph2psx512_mask((__v16hf)(A), (__v16sf)(W), \
+ (__mmask16)(U), (int)(R)))
+
+#define _mm512_maskz_cvtx_roundph_ps(U, A, R) \
+ ((__m512)__builtin_ia32_vcvtph2psx512_mask( \
+ (__v16hf)(A), (__v16sf)_mm512_setzero_ps(), (__mmask16)(U), (int)(R)))
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS512 _mm512_cvtxph_ps(__m256h __A) {
+ return (__m512)__builtin_ia32_vcvtph2psx512_mask(
+ (__v16hf)__A, (__v16sf)_mm512_setzero_ps(), (__mmask16)-1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
+_mm512_mask_cvtxph_ps(__m512 __W, __mmask16 __U, __m256h __A) {
+ return (__m512)__builtin_ia32_vcvtph2psx512_mask(
+ (__v16hf)__A, (__v16sf)__W, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS512
+_mm512_maskz_cvtxph_ps(__mmask16 __U, __m256h __A) {
+ return (__m512)__builtin_ia32_vcvtph2psx512_mask(
+ (__v16hf)__A, (__v16sf)_mm512_setzero_ps(), (__mmask16)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvtx_roundps_ph(A, R) \
+ ((__m256h)__builtin_ia32_vcvtps2phx512_mask((__v16sf)(A), \
+ (__v16hf)_mm256_undefined_ph(), \
+ (__mmask16)(-1), (int)(R)))
+
+#define _mm512_mask_cvtx_roundps_ph(W, U, A, R) \
+ ((__m256h)__builtin_ia32_vcvtps2phx512_mask((__v16sf)(A), (__v16hf)(W), \
+ (__mmask16)(U), (int)(R)))
+
+#define _mm512_maskz_cvtx_roundps_ph(U, A, R) \
+ ((__m256h)__builtin_ia32_vcvtps2phx512_mask( \
+ (__v16sf)(A), (__v16hf)_mm256_setzero_ph(), (__mmask16)(U), (int)(R)))
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS512 _mm512_cvtxps_ph(__m512 __A) {
+ return (__m256h)__builtin_ia32_vcvtps2phx512_mask(
+ (__v16sf)__A, (__v16hf)_mm256_setzero_ph(), (__mmask16)-1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS512
+_mm512_mask_cvtxps_ph(__m256h __W, __mmask16 __U, __m512 __A) {
+ return (__m256h)__builtin_ia32_vcvtps2phx512_mask(
+ (__v16sf)__A, (__v16hf)__W, (__mmask16)__U, _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS512
+_mm512_maskz_cvtxps_ph(__mmask16 __U, __m512 __A) {
+ return (__m256h)__builtin_ia32_vcvtps2phx512_mask(
+ (__v16sf)__A, (__v16hf)_mm256_setzero_ph(), (__mmask16)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_fmadd_round_ph(A, B, C, R) \
+ ((__m512h)__builtin_ia32_vfmaddph512_mask( \
+ (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C), \
+ (__mmask32)-1, (int)(R)))
+
+#define _mm512_mask_fmadd_round_ph(A, U, B, C, R) \
+ ((__m512h)__builtin_ia32_vfmaddph512_mask( \
+ (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C), \
+ (__mmask32)(U), (int)(R)))
+
+#define _mm512_mask3_fmadd_round_ph(A, B, C, U, R) \
+ ((__m512h)__builtin_ia32_vfmaddph512_mask3( \
+ (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C), \
+ (__mmask32)(U), (int)(R)))
+
+#define _mm512_maskz_fmadd_round_ph(U, A, B, C, R) \
+ ((__m512h)__builtin_ia32_vfmaddph512_maskz( \
+ (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C), \
+ (__mmask32)(U), (int)(R)))
+
+#define _mm512_fmsub_round_ph(A, B, C, R) \
+ ((__m512h)__builtin_ia32_vfmaddph512_mask( \
+ (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), -(__v32hf)(__m512h)(C), \
+ (__mmask32)-1, (int)(R)))
+
+#define _mm512_mask_fmsub_round_ph(A, U, B, C, R) \
+ ((__m512h)__builtin_ia32_vfmaddph512_mask( \
+ (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), -(__v32hf)(__m512h)(C), \
+ (__mmask32)(U), (int)(R)))
+
+#define _mm512_maskz_fmsub_round_ph(U, A, B, C, R) \
+ ((__m512h)__builtin_ia32_vfmaddph512_maskz( \
+ (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), -(__v32hf)(__m512h)(C), \
+ (__mmask32)(U), (int)(R)))
+
+#define _mm512_fnmadd_round_ph(A, B, C, R) \
+ ((__m512h)__builtin_ia32_vfmaddph512_mask( \
+ (__v32hf)(__m512h)(A), -(__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C), \
+ (__mmask32)-1, (int)(R)))
+
+#define _mm512_mask3_fnmadd_round_ph(A, B, C, U, R) \
+ ((__m512h)__builtin_ia32_vfmaddph512_mask3( \
+ -(__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C), \
+ (__mmask32)(U), (int)(R)))
+
+#define _mm512_maskz_fnmadd_round_ph(U, A, B, C, R) \
+ ((__m512h)__builtin_ia32_vfmaddph512_maskz( \
+ -(__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C), \
+ (__mmask32)(U), (int)(R)))
+
+#define _mm512_fnmsub_round_ph(A, B, C, R) \
+ ((__m512h)__builtin_ia32_vfmaddph512_mask( \
+ (__v32hf)(__m512h)(A), -(__v32hf)(__m512h)(B), -(__v32hf)(__m512h)(C), \
+ (__mmask32)-1, (int)(R)))
+
+#define _mm512_maskz_fnmsub_round_ph(U, A, B, C, R) \
+ ((__m512h)__builtin_ia32_vfmaddph512_maskz( \
+ -(__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), -(__v32hf)(__m512h)(C), \
+ (__mmask32)(U), (int)(R)))
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_fmadd_ph(__m512h __A,
+ __m512h __B,
+ __m512h __C) {
+ return (__m512h)__builtin_ia32_vfmaddph512_mask((__v32hf)__A, (__v32hf)__B,
+ (__v32hf)__C, (__mmask32)-1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_mask_fmadd_ph(__m512h __A, __mmask32 __U, __m512h __B, __m512h __C) {
+ return (__m512h)__builtin_ia32_vfmaddph512_mask((__v32hf)__A, (__v32hf)__B,
+ (__v32hf)__C, (__mmask32)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_mask3_fmadd_ph(__m512h __A, __m512h __B, __m512h __C, __mmask32 __U) {
+ return (__m512h)__builtin_ia32_vfmaddph512_mask3((__v32hf)__A, (__v32hf)__B,
+ (__v32hf)__C, (__mmask32)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_maskz_fmadd_ph(__mmask32 __U, __m512h __A, __m512h __B, __m512h __C) {
+ return (__m512h)__builtin_ia32_vfmaddph512_maskz((__v32hf)__A, (__v32hf)__B,
+ (__v32hf)__C, (__mmask32)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_fmsub_ph(__m512h __A,
+ __m512h __B,
+ __m512h __C) {
+ return (__m512h)__builtin_ia32_vfmaddph512_mask((__v32hf)__A, (__v32hf)__B,
+ -(__v32hf)__C, (__mmask32)-1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_mask_fmsub_ph(__m512h __A, __mmask32 __U, __m512h __B, __m512h __C) {
+ return (__m512h)__builtin_ia32_vfmaddph512_mask((__v32hf)__A, (__v32hf)__B,
+ -(__v32hf)__C, (__mmask32)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_maskz_fmsub_ph(__mmask32 __U, __m512h __A, __m512h __B, __m512h __C) {
+ return (__m512h)__builtin_ia32_vfmaddph512_maskz(
+ (__v32hf)__A, (__v32hf)__B, -(__v32hf)__C, (__mmask32)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_fnmadd_ph(__m512h __A,
+ __m512h __B,
+ __m512h __C) {
+ return (__m512h)__builtin_ia32_vfmaddph512_mask((__v32hf)__A, -(__v32hf)__B,
+ (__v32hf)__C, (__mmask32)-1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_mask3_fnmadd_ph(__m512h __A, __m512h __B, __m512h __C, __mmask32 __U) {
+ return (__m512h)__builtin_ia32_vfmaddph512_mask3(-(__v32hf)__A, (__v32hf)__B,
+ (__v32hf)__C, (__mmask32)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_maskz_fnmadd_ph(__mmask32 __U, __m512h __A, __m512h __B, __m512h __C) {
+ return (__m512h)__builtin_ia32_vfmaddph512_maskz(-(__v32hf)__A, (__v32hf)__B,
+ (__v32hf)__C, (__mmask32)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_fnmsub_ph(__m512h __A,
+ __m512h __B,
+ __m512h __C) {
+ return (__m512h)__builtin_ia32_vfmaddph512_mask((__v32hf)__A, -(__v32hf)__B,
+ -(__v32hf)__C, (__mmask32)-1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_maskz_fnmsub_ph(__mmask32 __U, __m512h __A, __m512h __B, __m512h __C) {
+ return (__m512h)__builtin_ia32_vfmaddph512_maskz(
+ -(__v32hf)__A, (__v32hf)__B, -(__v32hf)__C, (__mmask32)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_fmaddsub_round_ph(A, B, C, R) \
+ ((__m512h)__builtin_ia32_vfmaddsubph512_mask( \
+ (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C), \
+ (__mmask32)-1, (int)(R)))
+
+#define _mm512_mask_fmaddsub_round_ph(A, U, B, C, R) \
+ ((__m512h)__builtin_ia32_vfmaddsubph512_mask( \
+ (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C), \
+ (__mmask32)(U), (int)(R)))
+
+#define _mm512_mask3_fmaddsub_round_ph(A, B, C, U, R) \
+ ((__m512h)__builtin_ia32_vfmaddsubph512_mask3( \
+ (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C), \
+ (__mmask32)(U), (int)(R)))
+
+#define _mm512_maskz_fmaddsub_round_ph(U, A, B, C, R) \
+ ((__m512h)__builtin_ia32_vfmaddsubph512_maskz( \
+ (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C), \
+ (__mmask32)(U), (int)(R)))
+
+#define _mm512_fmsubadd_round_ph(A, B, C, R) \
+ ((__m512h)__builtin_ia32_vfmaddsubph512_mask( \
+ (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), -(__v32hf)(__m512h)(C), \
+ (__mmask32)-1, (int)(R)))
+
+#define _mm512_mask_fmsubadd_round_ph(A, U, B, C, R) \
+ ((__m512h)__builtin_ia32_vfmaddsubph512_mask( \
+ (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), -(__v32hf)(__m512h)(C), \
+ (__mmask32)(U), (int)(R)))
+
+#define _mm512_maskz_fmsubadd_round_ph(U, A, B, C, R) \
+ ((__m512h)__builtin_ia32_vfmaddsubph512_maskz( \
+ (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), -(__v32hf)(__m512h)(C), \
+ (__mmask32)(U), (int)(R)))
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_fmaddsub_ph(__m512h __A, __m512h __B, __m512h __C) {
+ return (__m512h)__builtin_ia32_vfmaddsubph512_mask(
+ (__v32hf)__A, (__v32hf)__B, (__v32hf)__C, (__mmask32)-1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_mask_fmaddsub_ph(__m512h __A, __mmask32 __U, __m512h __B, __m512h __C) {
+ return (__m512h)__builtin_ia32_vfmaddsubph512_mask(
+ (__v32hf)__A, (__v32hf)__B, (__v32hf)__C, (__mmask32)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_mask3_fmaddsub_ph(__m512h __A, __m512h __B, __m512h __C, __mmask32 __U) {
+ return (__m512h)__builtin_ia32_vfmaddsubph512_mask3(
+ (__v32hf)__A, (__v32hf)__B, (__v32hf)__C, (__mmask32)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_maskz_fmaddsub_ph(__mmask32 __U, __m512h __A, __m512h __B, __m512h __C) {
+ return (__m512h)__builtin_ia32_vfmaddsubph512_maskz(
+ (__v32hf)__A, (__v32hf)__B, (__v32hf)__C, (__mmask32)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_fmsubadd_ph(__m512h __A, __m512h __B, __m512h __C) {
+ return (__m512h)__builtin_ia32_vfmaddsubph512_mask(
+ (__v32hf)__A, (__v32hf)__B, -(__v32hf)__C, (__mmask32)-1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_mask_fmsubadd_ph(__m512h __A, __mmask32 __U, __m512h __B, __m512h __C) {
+ return (__m512h)__builtin_ia32_vfmaddsubph512_mask(
+ (__v32hf)__A, (__v32hf)__B, -(__v32hf)__C, (__mmask32)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_maskz_fmsubadd_ph(__mmask32 __U, __m512h __A, __m512h __B, __m512h __C) {
+ return (__m512h)__builtin_ia32_vfmaddsubph512_maskz(
+ (__v32hf)__A, (__v32hf)__B, -(__v32hf)__C, (__mmask32)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_mask3_fmsub_round_ph(A, B, C, U, R) \
+ ((__m512h)__builtin_ia32_vfmsubph512_mask3( \
+ (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C), \
+ (__mmask32)(U), (int)(R)))
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_mask3_fmsub_ph(__m512h __A, __m512h __B, __m512h __C, __mmask32 __U) {
+ return (__m512h)__builtin_ia32_vfmsubph512_mask3((__v32hf)__A, (__v32hf)__B,
+ (__v32hf)__C, (__mmask32)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_mask3_fmsubadd_round_ph(A, B, C, U, R) \
+ ((__m512h)__builtin_ia32_vfmsubaddph512_mask3( \
+ (__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C), \
+ (__mmask32)(U), (int)(R)))
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_mask3_fmsubadd_ph(__m512h __A, __m512h __B, __m512h __C, __mmask32 __U) {
+ return (__m512h)__builtin_ia32_vfmsubaddph512_mask3(
+ (__v32hf)__A, (__v32hf)__B, (__v32hf)__C, (__mmask32)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_mask_fnmadd_round_ph(A, U, B, C, R) \
+ ((__m512h)__builtin_ia32_vfmaddph512_mask( \
+ (__v32hf)(__m512h)(A), -(__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C), \
+ (__mmask32)(U), (int)(R)))
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_mask_fnmadd_ph(__m512h __A, __mmask32 __U, __m512h __B, __m512h __C) {
+ return (__m512h)__builtin_ia32_vfmaddph512_mask((__v32hf)__A, -(__v32hf)__B,
+ (__v32hf)__C, (__mmask32)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_mask_fnmsub_round_ph(A, U, B, C, R) \
+ ((__m512h)__builtin_ia32_vfmaddph512_mask( \
+ (__v32hf)(__m512h)(A), -(__v32hf)(__m512h)(B), -(__v32hf)(__m512h)(C), \
+ (__mmask32)(U), (int)(R)))
+
+#define _mm512_mask3_fnmsub_round_ph(A, B, C, U, R) \
+ ((__m512h)__builtin_ia32_vfmsubph512_mask3( \
+ -(__v32hf)(__m512h)(A), (__v32hf)(__m512h)(B), (__v32hf)(__m512h)(C), \
+ (__mmask32)(U), (int)(R)))
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_mask_fnmsub_ph(__m512h __A, __mmask32 __U, __m512h __B, __m512h __C) {
+ return (__m512h)__builtin_ia32_vfmaddph512_mask((__v32hf)__A, -(__v32hf)__B,
+ -(__v32hf)__C, (__mmask32)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_mask3_fnmsub_ph(__m512h __A, __m512h __B, __m512h __C, __mmask32 __U) {
+ return (__m512h)__builtin_ia32_vfmsubph512_mask3(-(__v32hf)__A, (__v32hf)__B,
+ (__v32hf)__C, (__mmask32)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fmadd_sh(__m128h __W,
+ __m128h __A,
+ __m128h __B) {
+ return __builtin_ia32_vfmaddsh3_mask((__v8hf)__W, (__v8hf)__A, (__v8hf)__B,
+ (__mmask8)-1, _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_fmadd_sh(__m128h __W,
+ __mmask8 __U,
+ __m128h __A,
+ __m128h __B) {
+ return __builtin_ia32_vfmaddsh3_mask((__v8hf)__W, (__v8hf)__A, (__v8hf)__B,
+ (__mmask8)__U, _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_fmadd_round_sh(A, B, C, R) \
+ ((__m128h)__builtin_ia32_vfmaddsh3_mask( \
+ (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)(__m128h)(C), \
+ (__mmask8)-1, (int)(R)))
+
+#define _mm_mask_fmadd_round_sh(W, U, A, B, R) \
+ ((__m128h)__builtin_ia32_vfmaddsh3_mask( \
+ (__v8hf)(__m128h)(W), (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), \
+ (__mmask8)(U), (int)(R)))
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_maskz_fmadd_sh(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
+ return __builtin_ia32_vfmaddsh3_maskz((__v8hf)__A, (__v8hf)__B, (__v8hf)__C,
+ (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_maskz_fmadd_round_sh(U, A, B, C, R) \
+ ((__m128h)__builtin_ia32_vfmaddsh3_maskz( \
+ (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), (__v8hf)(__m128h)(C), \
+ (__mmask8)(U), (int)(R)))
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_mask3_fmadd_sh(__m128h __W, __m128h __X, __m128h __Y, __mmask8 __U) {
+ return __builtin_ia32_vfmaddsh3_mask3((__v8hf)__W, (__v8hf)__X, (__v8hf)__Y,
+ (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mask3_fmadd_round_sh(W, X, Y, U, R) \
+ ((__m128h)__builtin_ia32_vfmaddsh3_mask3( \
+ (__v8hf)(__m128h)(W), (__v8hf)(__m128h)(X), (__v8hf)(__m128h)(Y), \
+ (__mmask8)(U), (int)(R)))
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fmsub_sh(__m128h __W,
+ __m128h __A,
+ __m128h __B) {
+ return (__m128h)__builtin_ia32_vfmaddsh3_mask((__v8hf)__W, (__v8hf)__A,
+ -(__v8hf)__B, (__mmask8)-1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_fmsub_sh(__m128h __W,
+ __mmask8 __U,
+ __m128h __A,
+ __m128h __B) {
+ return (__m128h)__builtin_ia32_vfmaddsh3_mask((__v8hf)__W, (__v8hf)__A,
+ -(__v8hf)__B, (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_fmsub_round_sh(A, B, C, R) \
+ ((__m128h)__builtin_ia32_vfmaddsh3_mask( \
+ (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), -(__v8hf)(__m128h)(C), \
+ (__mmask8)-1, (int)(R)))
+
+#define _mm_mask_fmsub_round_sh(W, U, A, B, R) \
+ ((__m128h)__builtin_ia32_vfmaddsh3_mask( \
+ (__v8hf)(__m128h)(W), (__v8hf)(__m128h)(A), -(__v8hf)(__m128h)(B), \
+ (__mmask8)(U), (int)(R)))
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_maskz_fmsub_sh(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
+ return (__m128h)__builtin_ia32_vfmaddsh3_maskz((__v8hf)__A, (__v8hf)__B,
+ -(__v8hf)__C, (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_maskz_fmsub_round_sh(U, A, B, C, R) \
+ ((__m128h)__builtin_ia32_vfmaddsh3_maskz( \
+ (__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), -(__v8hf)(__m128h)(C), \
+ (__mmask8)(U), (int)R))
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_mask3_fmsub_sh(__m128h __W, __m128h __X, __m128h __Y, __mmask8 __U) {
+ return __builtin_ia32_vfmsubsh3_mask3((__v8hf)__W, (__v8hf)__X, (__v8hf)__Y,
+ (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mask3_fmsub_round_sh(W, X, Y, U, R) \
+ ((__m128h)__builtin_ia32_vfmsubsh3_mask3( \
+ (__v8hf)(__m128h)(W), (__v8hf)(__m128h)(X), (__v8hf)(__m128h)(Y), \
+ (__mmask8)(U), (int)(R)))
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fnmadd_sh(__m128h __W,
+ __m128h __A,
+ __m128h __B) {
+ return __builtin_ia32_vfmaddsh3_mask((__v8hf)__W, -(__v8hf)__A, (__v8hf)__B,
+ (__mmask8)-1, _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_mask_fnmadd_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
+ return __builtin_ia32_vfmaddsh3_mask((__v8hf)__W, -(__v8hf)__A, (__v8hf)__B,
+ (__mmask8)__U, _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_fnmadd_round_sh(A, B, C, R) \
+ ((__m128h)__builtin_ia32_vfmaddsh3_mask( \
+ (__v8hf)(__m128h)(A), -(__v8hf)(__m128h)(B), (__v8hf)(__m128h)(C), \
+ (__mmask8)-1, (int)(R)))
+
+#define _mm_mask_fnmadd_round_sh(W, U, A, B, R) \
+ ((__m128h)__builtin_ia32_vfmaddsh3_mask( \
+ (__v8hf)(__m128h)(W), -(__v8hf)(__m128h)(A), (__v8hf)(__m128h)(B), \
+ (__mmask8)(U), (int)(R)))
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_maskz_fnmadd_sh(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
+ return __builtin_ia32_vfmaddsh3_maskz((__v8hf)__A, -(__v8hf)__B, (__v8hf)__C,
+ (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_maskz_fnmadd_round_sh(U, A, B, C, R) \
+ ((__m128h)__builtin_ia32_vfmaddsh3_maskz( \
+ (__v8hf)(__m128h)(A), -(__v8hf)(__m128h)(B), (__v8hf)(__m128h)(C), \
+ (__mmask8)(U), (int)(R)))
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_mask3_fnmadd_sh(__m128h __W, __m128h __X, __m128h __Y, __mmask8 __U) {
+ return __builtin_ia32_vfmaddsh3_mask3((__v8hf)__W, -(__v8hf)__X, (__v8hf)__Y,
+ (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mask3_fnmadd_round_sh(W, X, Y, U, R) \
+ ((__m128h)__builtin_ia32_vfmaddsh3_mask3( \
+ (__v8hf)(__m128h)(W), -(__v8hf)(__m128h)(X), (__v8hf)(__m128h)(Y), \
+ (__mmask8)(U), (int)(R)))
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fnmsub_sh(__m128h __W,
+ __m128h __A,
+ __m128h __B) {
+ return __builtin_ia32_vfmaddsh3_mask((__v8hf)__W, -(__v8hf)__A, -(__v8hf)__B,
+ (__mmask8)-1, _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_mask_fnmsub_sh(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
+ return __builtin_ia32_vfmaddsh3_mask((__v8hf)__W, -(__v8hf)__A, -(__v8hf)__B,
+ (__mmask8)__U, _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_fnmsub_round_sh(A, B, C, R) \
+ ((__m128h)__builtin_ia32_vfmaddsh3_mask( \
+ (__v8hf)(__m128h)(A), -(__v8hf)(__m128h)(B), -(__v8hf)(__m128h)(C), \
+ (__mmask8)-1, (int)(R)))
+
+#define _mm_mask_fnmsub_round_sh(W, U, A, B, R) \
+ ((__m128h)__builtin_ia32_vfmaddsh3_mask( \
+ (__v8hf)(__m128h)(W), -(__v8hf)(__m128h)(A), -(__v8hf)(__m128h)(B), \
+ (__mmask8)(U), (int)(R)))
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_maskz_fnmsub_sh(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
+ return __builtin_ia32_vfmaddsh3_maskz((__v8hf)__A, -(__v8hf)__B, -(__v8hf)__C,
+ (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_maskz_fnmsub_round_sh(U, A, B, C, R) \
+ ((__m128h)__builtin_ia32_vfmaddsh3_maskz( \
+ (__v8hf)(__m128h)(A), -(__v8hf)(__m128h)(B), -(__v8hf)(__m128h)(C), \
+ (__mmask8)(U), (int)(R)))
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_mask3_fnmsub_sh(__m128h __W, __m128h __X, __m128h __Y, __mmask8 __U) {
+ return __builtin_ia32_vfmsubsh3_mask3((__v8hf)__W, -(__v8hf)__X, (__v8hf)__Y,
+ (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_mask3_fnmsub_round_sh(W, X, Y, U, R) \
+ ((__m128h)__builtin_ia32_vfmsubsh3_mask3( \
+ (__v8hf)(__m128h)(W), -(__v8hf)(__m128h)(X), (__v8hf)(__m128h)(Y), \
+ (__mmask8)(U), (int)(R)))
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fcmadd_sch(__m128h __A,
+ __m128h __B,
+ __m128h __C) {
+ return (__m128h)__builtin_ia32_vfcmaddcsh_mask((__v4sf)__A, (__v4sf)__B,
+ (__v4sf)__C, (__mmask8)-1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_mask_fcmadd_sch(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) {
+ return (__m128h)__builtin_ia32_vfcmaddcsh_round_mask(
+ (__v4sf)__A, (__v4sf)(__B), (__v4sf)(__C), __U, _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_maskz_fcmadd_sch(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
+ return (__m128h)__builtin_ia32_vfcmaddcsh_maskz((__v4sf)__A, (__v4sf)__B,
+ (__v4sf)__C, (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_mask3_fcmadd_sch(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) {
+ return (__m128h)__builtin_ia32_vfcmaddcsh_round_mask3(
+ (__v4sf)__A, (__v4sf)__B, (__v4sf)__C, __U, _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_fcmadd_round_sch(A, B, C, R) \
+ ((__m128h)__builtin_ia32_vfcmaddcsh_mask( \
+ (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B), (__v4sf)(__m128h)(C), \
+ (__mmask8)-1, (int)(R)))
+
+#define _mm_mask_fcmadd_round_sch(A, U, B, C, R) \
+ ((__m128h)__builtin_ia32_vfcmaddcsh_round_mask( \
+ (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B), (__v4sf)(__m128h)(C), \
+ (__mmask8)(U), (int)(R)))
+
+#define _mm_maskz_fcmadd_round_sch(U, A, B, C, R) \
+ ((__m128h)__builtin_ia32_vfcmaddcsh_maskz( \
+ (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B), (__v4sf)(__m128h)(C), \
+ (__mmask8)(U), (int)(R)))
+
+#define _mm_mask3_fcmadd_round_sch(A, B, C, U, R) \
+ ((__m128h)__builtin_ia32_vfcmaddcsh_round_mask3( \
+ (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B), (__v4sf)(__m128h)(C), \
+ (__mmask8)(U), (int)(R)))
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fmadd_sch(__m128h __A,
+ __m128h __B,
+ __m128h __C) {
+ return (__m128h)__builtin_ia32_vfmaddcsh_mask((__v4sf)__A, (__v4sf)__B,
+ (__v4sf)__C, (__mmask8)-1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_mask_fmadd_sch(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) {
+ return (__m128h)__builtin_ia32_vfmaddcsh_round_mask(
+ (__v4sf)__A, (__v4sf)(__B), (__v4sf)(__C), __U, _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_maskz_fmadd_sch(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
+ return (__m128h)__builtin_ia32_vfmaddcsh_maskz((__v4sf)__A, (__v4sf)__B,
+ (__v4sf)__C, (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_mask3_fmadd_sch(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) {
+ return (__m128h)__builtin_ia32_vfmaddcsh_round_mask3(
+ (__v4sf)__A, (__v4sf)__B, (__v4sf)__C, __U, _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_fmadd_round_sch(A, B, C, R) \
+ ((__m128h)__builtin_ia32_vfmaddcsh_mask( \
+ (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B), (__v4sf)(__m128h)(C), \
+ (__mmask8)-1, (int)(R)))
+
+#define _mm_mask_fmadd_round_sch(A, U, B, C, R) \
+ ((__m128h)__builtin_ia32_vfmaddcsh_round_mask( \
+ (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B), (__v4sf)(__m128h)(C), \
+ (__mmask8)(U), (int)(R)))
+
+#define _mm_maskz_fmadd_round_sch(U, A, B, C, R) \
+ ((__m128h)__builtin_ia32_vfmaddcsh_maskz( \
+ (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B), (__v4sf)(__m128h)(C), \
+ (__mmask8)(U), (int)(R)))
+
+#define _mm_mask3_fmadd_round_sch(A, B, C, U, R) \
+ ((__m128h)__builtin_ia32_vfmaddcsh_round_mask3( \
+ (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B), (__v4sf)(__m128h)(C), \
+ (__mmask8)(U), (int)(R)))
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fcmul_sch(__m128h __A,
+ __m128h __B) {
+ return (__m128h)__builtin_ia32_vfcmulcsh_mask(
+ (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_undefined_ph(), (__mmask8)-1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_mask_fcmul_sch(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
+ return (__m128h)__builtin_ia32_vfcmulcsh_mask((__v4sf)__A, (__v4sf)__B,
+ (__v4sf)__W, (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_maskz_fcmul_sch(__mmask8 __U, __m128h __A, __m128h __B) {
+ return (__m128h)__builtin_ia32_vfcmulcsh_mask(
+ (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_setzero_ph(), (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_fcmul_round_sch(A, B, R) \
+ ((__m128h)__builtin_ia32_vfcmulcsh_mask( \
+ (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B), \
+ (__v4sf)(__m128h)_mm_undefined_ph(), (__mmask8)-1, (int)(R)))
+
+#define _mm_mask_fcmul_round_sch(W, U, A, B, R) \
+ ((__m128h)__builtin_ia32_vfcmulcsh_mask( \
+ (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B), (__v4sf)(__m128h)(W), \
+ (__mmask8)(U), (int)(R)))
+
+#define _mm_maskz_fcmul_round_sch(U, A, B, R) \
+ ((__m128h)__builtin_ia32_vfcmulcsh_mask( \
+ (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B), \
+ (__v4sf)(__m128h)_mm_setzero_ph(), (__mmask8)(U), (int)(R)))
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fmul_sch(__m128h __A,
+ __m128h __B) {
+ return (__m128h)__builtin_ia32_vfmulcsh_mask(
+ (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_undefined_ph(), (__mmask8)-1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_fmul_sch(__m128h __W,
+ __mmask8 __U,
+ __m128h __A,
+ __m128h __B) {
+ return (__m128h)__builtin_ia32_vfmulcsh_mask((__v4sf)__A, (__v4sf)__B,
+ (__v4sf)__W, (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_maskz_fmul_sch(__mmask8 __U, __m128h __A, __m128h __B) {
+ return (__m128h)__builtin_ia32_vfmulcsh_mask(
+ (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_setzero_ph(), (__mmask8)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm_fmul_round_sch(A, B, R) \
+ ((__m128h)__builtin_ia32_vfmulcsh_mask( \
+ (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B), \
+ (__v4sf)(__m128h)_mm_undefined_ph(), (__mmask8)-1, (int)(R)))
+
+#define _mm_mask_fmul_round_sch(W, U, A, B, R) \
+ ((__m128h)__builtin_ia32_vfmulcsh_mask( \
+ (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B), (__v4sf)(__m128h)(W), \
+ (__mmask8)(U), (int)(R)))
+
+#define _mm_maskz_fmul_round_sch(U, A, B, R) \
+ ((__m128h)__builtin_ia32_vfmulcsh_mask( \
+ (__v4sf)(__m128h)(A), (__v4sf)(__m128h)(B), \
+ (__v4sf)(__m128h)_mm_setzero_ph(), (__mmask8)(U), (int)(R)))
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_fcmul_pch(__m512h __A,
+ __m512h __B) {
+ return (__m512h)__builtin_ia32_vfcmulcph512_mask(
+ (__v16sf)__A, (__v16sf)__B, (__v16sf)_mm512_undefined_ph(), (__mmask16)-1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_mask_fcmul_pch(__m512h __W, __mmask16 __U, __m512h __A, __m512h __B) {
+ return (__m512h)__builtin_ia32_vfcmulcph512_mask((__v16sf)__A, (__v16sf)__B,
+ (__v16sf)__W, (__mmask16)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_maskz_fcmul_pch(__mmask16 __U, __m512h __A, __m512h __B) {
+ return (__m512h)__builtin_ia32_vfcmulcph512_mask(
+ (__v16sf)__A, (__v16sf)__B, (__v16sf)_mm512_setzero_ph(), (__mmask16)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_fcmul_round_pch(A, B, R) \
+ ((__m512h)__builtin_ia32_vfcmulcph512_mask( \
+ (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B), \
+ (__v16sf)(__m512h)_mm512_undefined_ph(), (__mmask16)-1, (int)(R)))
+
+#define _mm512_mask_fcmul_round_pch(W, U, A, B, R) \
+ ((__m512h)__builtin_ia32_vfcmulcph512_mask( \
+ (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B), (__v16sf)(__m512h)(W), \
+ (__mmask16)(U), (int)(R)))
+
+#define _mm512_maskz_fcmul_round_pch(U, A, B, R) \
+ ((__m512h)__builtin_ia32_vfcmulcph512_mask( \
+ (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B), \
+ (__v16sf)(__m512h)_mm512_setzero_ph(), (__mmask16)(U), (int)(R)))
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_fmul_pch(__m512h __A,
+ __m512h __B) {
+ return (__m512h)__builtin_ia32_vfmulcph512_mask(
+ (__v16sf)__A, (__v16sf)__B, (__v16sf)_mm512_undefined_ph(), (__mmask16)-1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_mask_fmul_pch(__m512h __W, __mmask16 __U, __m512h __A, __m512h __B) {
+ return (__m512h)__builtin_ia32_vfmulcph512_mask((__v16sf)__A, (__v16sf)__B,
+ (__v16sf)__W, (__mmask16)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_maskz_fmul_pch(__mmask16 __U, __m512h __A, __m512h __B) {
+ return (__m512h)__builtin_ia32_vfmulcph512_mask(
+ (__v16sf)__A, (__v16sf)__B, (__v16sf)_mm512_setzero_ph(), (__mmask16)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_fmul_round_pch(A, B, R) \
+ ((__m512h)__builtin_ia32_vfmulcph512_mask( \
+ (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B), \
+ (__v16sf)(__m512h)_mm512_undefined_ph(), (__mmask16)-1, (int)(R)))
+
+#define _mm512_mask_fmul_round_pch(W, U, A, B, R) \
+ ((__m512h)__builtin_ia32_vfmulcph512_mask( \
+ (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B), (__v16sf)(__m512h)(W), \
+ (__mmask16)(U), (int)(R)))
+
+#define _mm512_maskz_fmul_round_pch(U, A, B, R) \
+ ((__m512h)__builtin_ia32_vfmulcph512_mask( \
+ (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B), \
+ (__v16sf)(__m512h)_mm512_setzero_ph(), (__mmask16)(U), (int)(R)))
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_fcmadd_pch(__m512h __A,
+ __m512h __B,
+ __m512h __C) {
+ return (__m512h)__builtin_ia32_vfcmaddcph512_mask3(
+ (__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)-1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_mask_fcmadd_pch(__m512h __A, __mmask16 __U, __m512h __B, __m512h __C) {
+ return (__m512h)__builtin_ia32_vfcmaddcph512_mask(
+ (__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_mask3_fcmadd_pch(__m512h __A, __m512h __B, __m512h __C, __mmask16 __U) {
+ return (__m512h)__builtin_ia32_vfcmaddcph512_mask3(
+ (__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_maskz_fcmadd_pch(__mmask16 __U, __m512h __A, __m512h __B, __m512h __C) {
+ return (__m512h)__builtin_ia32_vfcmaddcph512_maskz(
+ (__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_fcmadd_round_pch(A, B, C, R) \
+ ((__m512h)__builtin_ia32_vfcmaddcph512_mask3( \
+ (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B), (__v16sf)(__m512h)(C), \
+ (__mmask16)-1, (int)(R)))
+
+#define _mm512_mask_fcmadd_round_pch(A, U, B, C, R) \
+ ((__m512h)__builtin_ia32_vfcmaddcph512_mask( \
+ (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B), (__v16sf)(__m512h)(C), \
+ (__mmask16)(U), (int)(R)))
+
+#define _mm512_mask3_fcmadd_round_pch(A, B, C, U, R) \
+ ((__m512h)__builtin_ia32_vfcmaddcph512_mask3( \
+ (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B), (__v16sf)(__m512h)(C), \
+ (__mmask16)(U), (int)(R)))
+
+#define _mm512_maskz_fcmadd_round_pch(U, A, B, C, R) \
+ ((__m512h)__builtin_ia32_vfcmaddcph512_maskz( \
+ (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B), (__v16sf)(__m512h)(C), \
+ (__mmask16)(U), (int)(R)))
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_fmadd_pch(__m512h __A,
+ __m512h __B,
+ __m512h __C) {
+ return (__m512h)__builtin_ia32_vfmaddcph512_mask3((__v16sf)__A, (__v16sf)__B,
+ (__v16sf)__C, (__mmask16)-1,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_mask_fmadd_pch(__m512h __A, __mmask16 __U, __m512h __B, __m512h __C) {
+ return (__m512h)__builtin_ia32_vfmaddcph512_mask((__v16sf)__A, (__v16sf)__B,
+ (__v16sf)__C, (__mmask16)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_mask3_fmadd_pch(__m512h __A, __m512h __B, __m512h __C, __mmask16 __U) {
+ return (__m512h)__builtin_ia32_vfmaddcph512_mask3(
+ (__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_maskz_fmadd_pch(__mmask16 __U, __m512h __A, __m512h __B, __m512h __C) {
+ return (__m512h)__builtin_ia32_vfmaddcph512_maskz(
+ (__v16sf)__A, (__v16sf)__B, (__v16sf)__C, (__mmask16)__U,
+ _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_fmadd_round_pch(A, B, C, R) \
+ ((__m512h)__builtin_ia32_vfmaddcph512_mask3( \
+ (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B), (__v16sf)(__m512h)(C), \
+ (__mmask16)-1, (int)(R)))
+
+#define _mm512_mask_fmadd_round_pch(A, U, B, C, R) \
+ ((__m512h)__builtin_ia32_vfmaddcph512_mask( \
+ (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B), (__v16sf)(__m512h)(C), \
+ (__mmask16)(U), (int)(R)))
+
+#define _mm512_mask3_fmadd_round_pch(A, B, C, U, R) \
+ ((__m512h)__builtin_ia32_vfmaddcph512_mask3( \
+ (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B), (__v16sf)(__m512h)(C), \
+ (__mmask16)(U), (int)(R)))
+
+#define _mm512_maskz_fmadd_round_pch(U, A, B, C, R) \
+ ((__m512h)__builtin_ia32_vfmaddcph512_maskz( \
+ (__v16sf)(__m512h)(A), (__v16sf)(__m512h)(B), (__v16sf)(__m512h)(C), \
+ (__mmask16)(U), (int)(R)))
+
+static __inline__ _Float16 __DEFAULT_FN_ATTRS512
+_mm512_reduce_add_ph(__m512h __W) {
+ return __builtin_ia32_reduce_fadd_ph512(-0.0f16, __W);
+}
+
+static __inline__ _Float16 __DEFAULT_FN_ATTRS512
+_mm512_reduce_mul_ph(__m512h __W) {
+ return __builtin_ia32_reduce_fmul_ph512(1.0f16, __W);
+}
+
+static __inline__ _Float16 __DEFAULT_FN_ATTRS512
+_mm512_reduce_max_ph(__m512h __V) {
+ return __builtin_ia32_reduce_fmax_ph512(__V);
+}
+
+static __inline__ _Float16 __DEFAULT_FN_ATTRS512
+_mm512_reduce_min_ph(__m512h __V) {
+ return __builtin_ia32_reduce_fmin_ph512(__V);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_mask_blend_ph(__mmask32 __U, __m512h __A, __m512h __W) {
+ return (__m512h)__builtin_ia32_selectph_512((__mmask32)__U, (__v32hf)__W,
+ (__v32hf)__A);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_permutex2var_ph(__m512h __A, __m512i __I, __m512h __B) {
+ return (__m512h)__builtin_ia32_vpermi2varhi512((__v32hi)__A, (__v32hi)__I,
+ (__v32hi)__B);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_permutexvar_ph(__m512i __A, __m512h __B) {
+ return (__m512h)__builtin_ia32_permvarhi512((__v32hi)__B, (__v32hi)__A);
+}
+
+// intrinsics below are alias for f*mul_*ch
+#define _mm512_mul_pch(A, B) _mm512_fmul_pch(A, B)
+#define _mm512_mask_mul_pch(W, U, A, B) _mm512_mask_fmul_pch(W, U, A, B)
+#define _mm512_maskz_mul_pch(U, A, B) _mm512_maskz_fmul_pch(U, A, B)
+#define _mm512_mul_round_pch(A, B, R) _mm512_fmul_round_pch(A, B, R)
+#define _mm512_mask_mul_round_pch(W, U, A, B, R) \
+ _mm512_mask_fmul_round_pch(W, U, A, B, R)
+#define _mm512_maskz_mul_round_pch(U, A, B, R) \
+ _mm512_maskz_fmul_round_pch(U, A, B, R)
+
+#define _mm512_cmul_pch(A, B) _mm512_fcmul_pch(A, B)
+#define _mm512_mask_cmul_pch(W, U, A, B) _mm512_mask_fcmul_pch(W, U, A, B)
+#define _mm512_maskz_cmul_pch(U, A, B) _mm512_maskz_fcmul_pch(U, A, B)
+#define _mm512_cmul_round_pch(A, B, R) _mm512_fcmul_round_pch(A, B, R)
+#define _mm512_mask_cmul_round_pch(W, U, A, B, R) \
+ _mm512_mask_fcmul_round_pch(W, U, A, B, R)
+#define _mm512_maskz_cmul_round_pch(U, A, B, R) \
+ _mm512_maskz_fcmul_round_pch(U, A, B, R)
+
+#define _mm_mul_sch(A, B) _mm_fmul_sch(A, B)
+#define _mm_mask_mul_sch(W, U, A, B) _mm_mask_fmul_sch(W, U, A, B)
+#define _mm_maskz_mul_sch(U, A, B) _mm_maskz_fmul_sch(U, A, B)
+#define _mm_mul_round_sch(A, B, R) _mm_fmul_round_sch(A, B, R)
+#define _mm_mask_mul_round_sch(W, U, A, B, R) \
+ _mm_mask_fmul_round_sch(W, U, A, B, R)
+#define _mm_maskz_mul_round_sch(U, A, B, R) _mm_maskz_fmul_round_sch(U, A, B, R)
+
+#define _mm_cmul_sch(A, B) _mm_fcmul_sch(A, B)
+#define _mm_mask_cmul_sch(W, U, A, B) _mm_mask_fcmul_sch(W, U, A, B)
+#define _mm_maskz_cmul_sch(U, A, B) _mm_maskz_fcmul_sch(U, A, B)
+#define _mm_cmul_round_sch(A, B, R) _mm_fcmul_round_sch(A, B, R)
+#define _mm_mask_cmul_round_sch(W, U, A, B, R) \
+ _mm_mask_fcmul_round_sch(W, U, A, B, R)
+#define _mm_maskz_cmul_round_sch(U, A, B, R) \
+ _mm_maskz_fcmul_round_sch(U, A, B, R)
+
+#undef __DEFAULT_FN_ATTRS128
+#undef __DEFAULT_FN_ATTRS256
+#undef __DEFAULT_FN_ATTRS512
+
+#endif
+#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/avx512ifmaintrin.h b/contrib/llvm-project/clang/lib/Headers/avx512ifmaintrin.h
index 5f7da52f1f73..9468d17556e7 100644
--- a/contrib/llvm-project/clang/lib/Headers/avx512ifmaintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avx512ifmaintrin.h
@@ -15,7 +15,9 @@
#define __IFMAINTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512ifma"), __min_vector_width__(512)))
+#define __DEFAULT_FN_ATTRS \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512ifma,evex512"), __min_vector_width__(512)))
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_madd52hi_epu64 (__m512i __X, __m512i __Y, __m512i __Z)
diff --git a/contrib/llvm-project/clang/lib/Headers/avx512ifmavlintrin.h b/contrib/llvm-project/clang/lib/Headers/avx512ifmavlintrin.h
index 5889401d1055..8787cd471d42 100644
--- a/contrib/llvm-project/clang/lib/Headers/avx512ifmavlintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avx512ifmavlintrin.h
@@ -15,17 +15,30 @@
#define __IFMAVLINTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx512ifma,avx512vl"), __min_vector_width__(128)))
-#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("avx512ifma,avx512vl"), __min_vector_width__(256)))
-
-
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_madd52hi_epu64 (__m128i __X, __m128i __Y, __m128i __Z)
-{
- return (__m128i)__builtin_ia32_vpmadd52huq128((__v2di) __X, (__v2di) __Y,
- (__v2di) __Z);
-}
+#define __DEFAULT_FN_ATTRS128 \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512ifma,avx512vl,no-evex512"), \
+ __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS256 \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512ifma,avx512vl,no-evex512"), \
+ __min_vector_width__(256)))
+
+#define _mm_madd52hi_epu64(X, Y, Z) \
+ ((__m128i)__builtin_ia32_vpmadd52huq128((__v2di)(X), (__v2di)(Y), \
+ (__v2di)(Z)))
+
+#define _mm256_madd52hi_epu64(X, Y, Z) \
+ ((__m256i)__builtin_ia32_vpmadd52huq256((__v4di)(X), (__v4di)(Y), \
+ (__v4di)(Z)))
+
+#define _mm_madd52lo_epu64(X, Y, Z) \
+ ((__m128i)__builtin_ia32_vpmadd52luq128((__v2di)(X), (__v2di)(Y), \
+ (__v2di)(Z)))
+
+#define _mm256_madd52lo_epu64(X, Y, Z) \
+ ((__m256i)__builtin_ia32_vpmadd52luq256((__v4di)(X), (__v4di)(Y), \
+ (__v4di)(Z)))
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_madd52hi_epu64 (__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y)
@@ -44,13 +57,6 @@ _mm_maskz_madd52hi_epu64 (__mmask8 __M, __m128i __X, __m128i __Y, __m128i __Z)
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_madd52hi_epu64 (__m256i __X, __m256i __Y, __m256i __Z)
-{
- return (__m256i)__builtin_ia32_vpmadd52huq256((__v4di)__X, (__v4di)__Y,
- (__v4di)__Z);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_madd52hi_epu64 (__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_selectq_256(__M,
@@ -67,13 +73,6 @@ _mm256_maskz_madd52hi_epu64 (__mmask8 __M, __m256i __X, __m256i __Y, __m256i __Z
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_madd52lo_epu64 (__m128i __X, __m128i __Y, __m128i __Z)
-{
- return (__m128i)__builtin_ia32_vpmadd52luq128((__v2di)__X, (__v2di)__Y,
- (__v2di)__Z);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_madd52lo_epu64 (__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_selectq_128(__M,
@@ -90,13 +89,6 @@ _mm_maskz_madd52lo_epu64 (__mmask8 __M, __m128i __X, __m128i __Y, __m128i __Z)
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_madd52lo_epu64 (__m256i __X, __m256i __Y, __m256i __Z)
-{
- return (__m256i)__builtin_ia32_vpmadd52luq256((__v4di)__X, (__v4di)__Y,
- (__v4di)__Z);
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_madd52lo_epu64 (__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y)
{
return (__m256i)__builtin_ia32_selectq_256(__M,
diff --git a/contrib/llvm-project/clang/lib/Headers/avx512pfintrin.h b/contrib/llvm-project/clang/lib/Headers/avx512pfintrin.h
index b8bcf49c6b24..f853be021a2d 100644
--- a/contrib/llvm-project/clang/lib/Headers/avx512pfintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avx512pfintrin.h
@@ -14,9 +14,6 @@
#ifndef __AVX512PFINTRIN_H
#define __AVX512PFINTRIN_H
-/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512pf")))
-
#define _mm512_mask_prefetch_i32gather_pd(index, mask, addr, scale, hint) \
__builtin_ia32_gatherpfdpd((__mmask8)(mask), (__v8si)(__m256i)(index), \
(void const *)(addr), (int)(scale), \
@@ -92,6 +89,4 @@
__builtin_ia32_scatterpfqps((__mmask8)(mask), (__v8di)(__m512i)(index), \
(void *)(addr), (int)(scale), (int)(hint))
-#undef __DEFAULT_FN_ATTRS
-
#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/avx512vbmi2intrin.h b/contrib/llvm-project/clang/lib/Headers/avx512vbmi2intrin.h
index a23144616ce3..11598c888787 100644
--- a/contrib/llvm-project/clang/lib/Headers/avx512vbmi2intrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avx512vbmi2intrin.h
@@ -15,7 +15,7 @@
#define __AVX512VBMI2INTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vbmi2"), __min_vector_width__(512)))
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vbmi2,evex512"), __min_vector_width__(512)))
static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -129,88 +129,88 @@ _mm512_maskz_expandloadu_epi8(__mmask64 __U, void const *__P)
}
#define _mm512_shldi_epi64(A, B, I) \
- (__m512i)__builtin_ia32_vpshldq512((__v8di)(__m512i)(A), \
- (__v8di)(__m512i)(B), (int)(I))
+ ((__m512i)__builtin_ia32_vpshldq512((__v8di)(__m512i)(A), \
+ (__v8di)(__m512i)(B), (int)(I)))
#define _mm512_mask_shldi_epi64(S, U, A, B, I) \
- (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
- (__v8di)_mm512_shldi_epi64((A), (B), (I)), \
- (__v8di)(__m512i)(S))
+ ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+ (__v8di)_mm512_shldi_epi64((A), (B), (I)), \
+ (__v8di)(__m512i)(S)))
#define _mm512_maskz_shldi_epi64(U, A, B, I) \
- (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
- (__v8di)_mm512_shldi_epi64((A), (B), (I)), \
- (__v8di)_mm512_setzero_si512())
+ ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+ (__v8di)_mm512_shldi_epi64((A), (B), (I)), \
+ (__v8di)_mm512_setzero_si512()))
#define _mm512_shldi_epi32(A, B, I) \
- (__m512i)__builtin_ia32_vpshldd512((__v16si)(__m512i)(A), \
- (__v16si)(__m512i)(B), (int)(I))
+ ((__m512i)__builtin_ia32_vpshldd512((__v16si)(__m512i)(A), \
+ (__v16si)(__m512i)(B), (int)(I)))
#define _mm512_mask_shldi_epi32(S, U, A, B, I) \
- (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
- (__v16si)_mm512_shldi_epi32((A), (B), (I)), \
- (__v16si)(__m512i)(S))
+ ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+ (__v16si)_mm512_shldi_epi32((A), (B), (I)), \
+ (__v16si)(__m512i)(S)))
#define _mm512_maskz_shldi_epi32(U, A, B, I) \
- (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
- (__v16si)_mm512_shldi_epi32((A), (B), (I)), \
- (__v16si)_mm512_setzero_si512())
+ ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+ (__v16si)_mm512_shldi_epi32((A), (B), (I)), \
+ (__v16si)_mm512_setzero_si512()))
#define _mm512_shldi_epi16(A, B, I) \
- (__m512i)__builtin_ia32_vpshldw512((__v32hi)(__m512i)(A), \
- (__v32hi)(__m512i)(B), (int)(I))
+ ((__m512i)__builtin_ia32_vpshldw512((__v32hi)(__m512i)(A), \
+ (__v32hi)(__m512i)(B), (int)(I)))
#define _mm512_mask_shldi_epi16(S, U, A, B, I) \
- (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
- (__v32hi)_mm512_shldi_epi16((A), (B), (I)), \
- (__v32hi)(__m512i)(S))
+ ((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
+ (__v32hi)_mm512_shldi_epi16((A), (B), (I)), \
+ (__v32hi)(__m512i)(S)))
#define _mm512_maskz_shldi_epi16(U, A, B, I) \
- (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
- (__v32hi)_mm512_shldi_epi16((A), (B), (I)), \
- (__v32hi)_mm512_setzero_si512())
+ ((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
+ (__v32hi)_mm512_shldi_epi16((A), (B), (I)), \
+ (__v32hi)_mm512_setzero_si512()))
#define _mm512_shrdi_epi64(A, B, I) \
- (__m512i)__builtin_ia32_vpshrdq512((__v8di)(__m512i)(A), \
- (__v8di)(__m512i)(B), (int)(I))
+ ((__m512i)__builtin_ia32_vpshrdq512((__v8di)(__m512i)(A), \
+ (__v8di)(__m512i)(B), (int)(I)))
#define _mm512_mask_shrdi_epi64(S, U, A, B, I) \
- (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
- (__v8di)_mm512_shrdi_epi64((A), (B), (I)), \
- (__v8di)(__m512i)(S))
+ ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+ (__v8di)_mm512_shrdi_epi64((A), (B), (I)), \
+ (__v8di)(__m512i)(S)))
#define _mm512_maskz_shrdi_epi64(U, A, B, I) \
- (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
- (__v8di)_mm512_shrdi_epi64((A), (B), (I)), \
- (__v8di)_mm512_setzero_si512())
+ ((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+ (__v8di)_mm512_shrdi_epi64((A), (B), (I)), \
+ (__v8di)_mm512_setzero_si512()))
#define _mm512_shrdi_epi32(A, B, I) \
- (__m512i)__builtin_ia32_vpshrdd512((__v16si)(__m512i)(A), \
- (__v16si)(__m512i)(B), (int)(I))
+ ((__m512i)__builtin_ia32_vpshrdd512((__v16si)(__m512i)(A), \
+ (__v16si)(__m512i)(B), (int)(I)))
#define _mm512_mask_shrdi_epi32(S, U, A, B, I) \
- (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
- (__v16si)_mm512_shrdi_epi32((A), (B), (I)), \
- (__v16si)(__m512i)(S))
+ ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+ (__v16si)_mm512_shrdi_epi32((A), (B), (I)), \
+ (__v16si)(__m512i)(S)))
#define _mm512_maskz_shrdi_epi32(U, A, B, I) \
- (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
- (__v16si)_mm512_shrdi_epi32((A), (B), (I)), \
- (__v16si)_mm512_setzero_si512())
+ ((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+ (__v16si)_mm512_shrdi_epi32((A), (B), (I)), \
+ (__v16si)_mm512_setzero_si512()))
#define _mm512_shrdi_epi16(A, B, I) \
- (__m512i)__builtin_ia32_vpshrdw512((__v32hi)(__m512i)(A), \
- (__v32hi)(__m512i)(B), (int)(I))
+ ((__m512i)__builtin_ia32_vpshrdw512((__v32hi)(__m512i)(A), \
+ (__v32hi)(__m512i)(B), (int)(I)))
#define _mm512_mask_shrdi_epi16(S, U, A, B, I) \
- (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
- (__v32hi)_mm512_shrdi_epi16((A), (B), (I)), \
- (__v32hi)(__m512i)(S))
+ ((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
+ (__v32hi)_mm512_shrdi_epi16((A), (B), (I)), \
+ (__v32hi)(__m512i)(S)))
#define _mm512_maskz_shrdi_epi16(U, A, B, I) \
- (__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
- (__v32hi)_mm512_shrdi_epi16((A), (B), (I)), \
- (__v32hi)_mm512_setzero_si512())
+ ((__m512i)__builtin_ia32_selectw_512((__mmask32)(U), \
+ (__v32hi)_mm512_shrdi_epi16((A), (B), (I)), \
+ (__v32hi)_mm512_setzero_si512()))
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_shldv_epi64(__m512i __A, __m512i __B, __m512i __C)
diff --git a/contrib/llvm-project/clang/lib/Headers/avx512vbmiintrin.h b/contrib/llvm-project/clang/lib/Headers/avx512vbmiintrin.h
index c0e0f94d48d4..e47cd5caddaa 100644
--- a/contrib/llvm-project/clang/lib/Headers/avx512vbmiintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avx512vbmiintrin.h
@@ -15,8 +15,9 @@
#define __VBMIINTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vbmi"), __min_vector_width__(512)))
-
+#define __DEFAULT_FN_ATTRS \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512vbmi,evex512"), __min_vector_width__(512)))
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_permutex2var_epi8(__m512i __A, __m512i __I, __m512i __B)
diff --git a/contrib/llvm-project/clang/lib/Headers/avx512vbmivlintrin.h b/contrib/llvm-project/clang/lib/Headers/avx512vbmivlintrin.h
index c5b96ae8ada7..848ca2d18c3c 100644
--- a/contrib/llvm-project/clang/lib/Headers/avx512vbmivlintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avx512vbmivlintrin.h
@@ -15,9 +15,14 @@
#define __VBMIVLINTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx512vbmi,avx512vl"), __min_vector_width__(128)))
-#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("avx512vbmi,avx512vl"), __min_vector_width__(256)))
-
+#define __DEFAULT_FN_ATTRS128 \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512vbmi,avx512vl,no-evex512"), \
+ __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS256 \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512vbmi,avx512vl,no-evex512"), \
+ __min_vector_width__(256)))
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_permutex2var_epi8(__m128i __A, __m128i __I, __m128i __B)
diff --git a/contrib/llvm-project/clang/lib/Headers/avx512vlbf16intrin.h b/contrib/llvm-project/clang/lib/Headers/avx512vlbf16intrin.h
index 1b1a744bcdbf..89c9f49c7aed 100644
--- a/contrib/llvm-project/clang/lib/Headers/avx512vlbf16intrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avx512vlbf16intrin.h
@@ -10,17 +10,19 @@
#error "Never use <avx512vlbf16intrin.h> directly; include <immintrin.h> instead."
#endif
+#ifdef __SSE2__
+
#ifndef __AVX512VLBF16INTRIN_H
#define __AVX512VLBF16INTRIN_H
-typedef short __m128bh __attribute__((__vector_size__(16), __aligned__(16)));
-
-#define __DEFAULT_FN_ATTRS128 \
- __attribute__((__always_inline__, __nodebug__, \
- __target__("avx512vl, avx512bf16"), __min_vector_width__(128)))
-#define __DEFAULT_FN_ATTRS256 \
- __attribute__((__always_inline__, __nodebug__, \
- __target__("avx512vl, avx512bf16"), __min_vector_width__(256)))
+#define __DEFAULT_FN_ATTRS128 \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512vl,avx512bf16,no-evex512"), \
+ __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS256 \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512vl,avx512bf16,no-evex512"), \
+ __min_vector_width__(256)))
/// Convert Two Packed Single Data to One Packed BF16 Data.
///
@@ -59,9 +61,9 @@ _mm_cvtne2ps_pbh(__m128 __A, __m128 __B) {
/// conversion of __B, and higher 64 bits come from conversion of __A.
static __inline__ __m128bh __DEFAULT_FN_ATTRS128
_mm_mask_cvtne2ps_pbh(__m128bh __W, __mmask8 __U, __m128 __A, __m128 __B) {
- return (__m128bh)__builtin_ia32_selectw_128((__mmask8)__U,
- (__v8hi)_mm_cvtne2ps_pbh(__A, __B),
- (__v8hi)__W);
+ return (__m128bh)__builtin_ia32_selectpbf_128((__mmask8)__U,
+ (__v8bf)_mm_cvtne2ps_pbh(__A, __B),
+ (__v8bf)__W);
}
/// Convert Two Packed Single Data to One Packed BF16 Data.
@@ -81,9 +83,9 @@ _mm_mask_cvtne2ps_pbh(__m128bh __W, __mmask8 __U, __m128 __A, __m128 __B) {
/// conversion of __B, and higher 64 bits come from conversion of __A.
static __inline__ __m128bh __DEFAULT_FN_ATTRS128
_mm_maskz_cvtne2ps_pbh(__mmask8 __U, __m128 __A, __m128 __B) {
- return (__m128bh)__builtin_ia32_selectw_128((__mmask8)__U,
- (__v8hi)_mm_cvtne2ps_pbh(__A, __B),
- (__v8hi)_mm_setzero_si128());
+ return (__m128bh)__builtin_ia32_selectpbf_128((__mmask8)__U,
+ (__v8bf)_mm_cvtne2ps_pbh(__A, __B),
+ (__v8bf)_mm_setzero_si128());
}
/// Convert Two Packed Single Data to One Packed BF16 Data.
@@ -123,9 +125,9 @@ _mm256_cvtne2ps_pbh(__m256 __A, __m256 __B) {
/// conversion of __B, and higher 128 bits come from conversion of __A.
static __inline__ __m256bh __DEFAULT_FN_ATTRS256
_mm256_mask_cvtne2ps_pbh(__m256bh __W, __mmask16 __U, __m256 __A, __m256 __B) {
- return (__m256bh)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_cvtne2ps_pbh(__A, __B),
- (__v16hi)__W);
+ return (__m256bh)__builtin_ia32_selectpbf_256((__mmask16)__U,
+ (__v16bf)_mm256_cvtne2ps_pbh(__A, __B),
+ (__v16bf)__W);
}
/// Convert Two Packed Single Data to One Packed BF16 Data.
@@ -145,9 +147,9 @@ _mm256_mask_cvtne2ps_pbh(__m256bh __W, __mmask16 __U, __m256 __A, __m256 __B) {
/// conversion of __B, and higher 128 bits come from conversion of __A.
static __inline__ __m256bh __DEFAULT_FN_ATTRS256
_mm256_maskz_cvtne2ps_pbh(__mmask16 __U, __m256 __A, __m256 __B) {
- return (__m256bh)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_cvtne2ps_pbh(__A, __B),
- (__v16hi)_mm256_setzero_si256());
+ return (__m256bh)__builtin_ia32_selectpbf_256((__mmask16)__U,
+ (__v16bf)_mm256_cvtne2ps_pbh(__A, __B),
+ (__v16bf)_mm256_setzero_si256());
}
/// Convert Packed Single Data to Packed BF16 Data.
@@ -160,12 +162,8 @@ _mm256_maskz_cvtne2ps_pbh(__mmask16 __U, __m256 __A, __m256 __B) {
/// A 128-bit vector of [4 x float].
/// \returns A 128-bit vector of [8 x bfloat] whose lower 64 bits come from
/// conversion of __A, and higher 64 bits are 0.
-static __inline__ __m128bh __DEFAULT_FN_ATTRS128
-_mm_cvtneps_pbh(__m128 __A) {
- return (__m128bh)__builtin_ia32_cvtneps2bf16_128_mask((__v4sf) __A,
- (__v8hi)_mm_undefined_si128(),
- (__mmask8)-1);
-}
+#define _mm_cvtneps_pbh(A) \
+ ((__m128bh)__builtin_ia32_vcvtneps2bf16128((__v4sf)(A)))
/// Convert Packed Single Data to Packed BF16 Data.
///
@@ -185,7 +183,7 @@ _mm_cvtneps_pbh(__m128 __A) {
static __inline__ __m128bh __DEFAULT_FN_ATTRS128
_mm_mask_cvtneps_pbh(__m128bh __W, __mmask8 __U, __m128 __A) {
return (__m128bh)__builtin_ia32_cvtneps2bf16_128_mask((__v4sf) __A,
- (__v8hi)__W,
+ (__v8bf)__W,
(__mmask8)__U);
}
@@ -205,7 +203,7 @@ _mm_mask_cvtneps_pbh(__m128bh __W, __mmask8 __U, __m128 __A) {
static __inline__ __m128bh __DEFAULT_FN_ATTRS128
_mm_maskz_cvtneps_pbh(__mmask8 __U, __m128 __A) {
return (__m128bh)__builtin_ia32_cvtneps2bf16_128_mask((__v4sf) __A,
- (__v8hi)_mm_setzero_si128(),
+ (__v8bf)_mm_setzero_si128(),
(__mmask8)__U);
}
@@ -218,12 +216,8 @@ _mm_maskz_cvtneps_pbh(__mmask8 __U, __m128 __A) {
/// \param __A
/// A 256-bit vector of [8 x float].
/// \returns A 128-bit vector of [8 x bfloat] comes from conversion of __A.
-static __inline__ __m128bh __DEFAULT_FN_ATTRS256
-_mm256_cvtneps_pbh(__m256 __A) {
- return (__m128bh)__builtin_ia32_cvtneps2bf16_256_mask((__v8sf)__A,
- (__v8hi)_mm_undefined_si128(),
- (__mmask8)-1);
-}
+#define _mm256_cvtneps_pbh(A) \
+ ((__m128bh)__builtin_ia32_vcvtneps2bf16256((__v8sf)(A)))
/// Convert Packed Single Data to Packed BF16 Data.
///
@@ -242,7 +236,7 @@ _mm256_cvtneps_pbh(__m256 __A) {
static __inline__ __m128bh __DEFAULT_FN_ATTRS256
_mm256_mask_cvtneps_pbh(__m128bh __W, __mmask8 __U, __m256 __A) {
return (__m128bh)__builtin_ia32_cvtneps2bf16_256_mask((__v8sf)__A,
- (__v8hi)__W,
+ (__v8bf)__W,
(__mmask8)__U);
}
@@ -261,7 +255,7 @@ _mm256_mask_cvtneps_pbh(__m128bh __W, __mmask8 __U, __m256 __A) {
static __inline__ __m128bh __DEFAULT_FN_ATTRS256
_mm256_maskz_cvtneps_pbh(__mmask8 __U, __m256 __A) {
return (__m128bh)__builtin_ia32_cvtneps2bf16_256_mask((__v8sf)__A,
- (__v8hi)_mm_setzero_si128(),
+ (__v8bf)_mm_setzero_si128(),
(__mmask8)__U);
}
@@ -282,8 +276,8 @@ _mm256_maskz_cvtneps_pbh(__mmask8 __U, __m256 __A) {
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_dpbf16_ps(__m128 __D, __m128bh __A, __m128bh __B) {
return (__m128)__builtin_ia32_dpbf16ps_128((__v4sf)__D,
- (__v4si)__A,
- (__v4si)__B);
+ (__v8bf)__A,
+ (__v8bf)__B);
}
/// Dot Product of BF16 Pairs Accumulated into Packed Single Precision.
@@ -351,8 +345,8 @@ _mm_maskz_dpbf16_ps(__mmask8 __U, __m128 __D, __m128bh __A, __m128bh __B) {
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_dpbf16_ps(__m256 __D, __m256bh __A, __m256bh __B) {
return (__m256)__builtin_ia32_dpbf16ps_256((__v8sf)__D,
- (__v8si)__A,
- (__v8si)__B);
+ (__v16bf)__A,
+ (__v16bf)__B);
}
/// Dot Product of BF16 Pairs Accumulated into Packed Single Precision.
@@ -413,11 +407,23 @@ _mm256_maskz_dpbf16_ps(__mmask8 __U, __m256 __D, __m256bh __A, __m256bh __B) {
/// A float data.
/// \returns A bf16 data whose sign field and exponent field keep unchanged,
/// and fraction field is truncated to 7 bits.
-static __inline__ __bfloat16 __DEFAULT_FN_ATTRS128 _mm_cvtness_sbh(float __A) {
+static __inline__ __bf16 __DEFAULT_FN_ATTRS128 _mm_cvtness_sbh(float __A) {
__v4sf __V = {__A, 0, 0, 0};
- __v8hi __R = __builtin_ia32_cvtneps2bf16_128_mask(
- (__v4sf)__V, (__v8hi)_mm_undefined_si128(), (__mmask8)-1);
- return __R[0];
+ __v8bf __R = __builtin_ia32_cvtneps2bf16_128_mask(
+ (__v4sf)__V, (__v8bf)_mm_undefined_si128(), (__mmask8)-1);
+ return (__bf16)__R[0];
+}
+
+/// Convert Packed BF16 Data to Packed float Data.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \param __A
+/// A 128-bit vector of [4 x bfloat].
+/// \returns A 128-bit vector of [4 x float] come from conversion of __A
+static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_cvtpbh_ps(__m128bh __A) {
+ return _mm_castsi128_ps(
+ (__m128i)_mm_slli_epi32((__m128i)_mm_cvtepi16_epi32((__m128i)__A), 16));
}
/// Convert Packed BF16 Data to Packed float Data.
@@ -426,7 +432,7 @@ static __inline__ __bfloat16 __DEFAULT_FN_ATTRS128 _mm_cvtness_sbh(float __A) {
///
/// \param __A
/// A 128-bit vector of [8 x bfloat].
-/// \returns A 256-bit vector of [8 x float] come from convertion of __A
+/// \returns A 256-bit vector of [8 x float] come from conversion of __A
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_cvtpbh_ps(__m128bh __A) {
return _mm256_castsi256_ps((__m256i)_mm256_slli_epi32(
(__m256i)_mm256_cvtepi16_epi32((__m128i)__A), 16));
@@ -437,11 +443,27 @@ static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_cvtpbh_ps(__m128bh __A) {
/// \headerfile <x86intrin.h>
///
/// \param __U
+/// A 4-bit mask. Elements are zeroed out when the corresponding mask
+/// bit is not set.
+/// \param __A
+/// A 128-bit vector of [4 x bfloat].
+/// \returns A 128-bit vector of [4 x float] come from conversion of __A
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
+_mm_maskz_cvtpbh_ps(__mmask8 __U, __m128bh __A) {
+ return _mm_castsi128_ps((__m128i)_mm_slli_epi32(
+ (__m128i)_mm_maskz_cvtepi16_epi32((__mmask8)__U, (__m128i)__A), 16));
+}
+
+/// Convert Packed BF16 Data to Packed float Data using zeroing mask.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \param __U
/// A 8-bit mask. Elements are zeroed out when the corresponding mask
/// bit is not set.
/// \param __A
/// A 128-bit vector of [8 x bfloat].
-/// \returns A 256-bit vector of [8 x float] come from convertion of __A
+/// \returns A 256-bit vector of [8 x float] come from conversion of __A
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_maskz_cvtpbh_ps(__mmask8 __U, __m128bh __A) {
return _mm256_castsi256_ps((__m256i)_mm256_slli_epi32(
@@ -453,6 +475,26 @@ _mm256_maskz_cvtpbh_ps(__mmask8 __U, __m128bh __A) {
/// \headerfile <x86intrin.h>
///
/// \param __S
+/// A 128-bit vector of [4 x float]. Elements are copied from __S when
+/// the corresponding mask bit is not set.
+/// \param __U
+/// A 4-bit mask. Elements are zeroed out when the corresponding mask
+/// bit is not set.
+/// \param __A
+/// A 128-bit vector of [4 x bfloat].
+/// \returns A 128-bit vector of [4 x float] come from conversion of __A
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
+_mm_mask_cvtpbh_ps(__m128 __S, __mmask8 __U, __m128bh __A) {
+ return _mm_castsi128_ps((__m128i)_mm_mask_slli_epi32(
+ (__m128i)__S, (__mmask8)__U, (__m128i)_mm_cvtepi16_epi32((__m128i)__A),
+ 16));
+}
+
+/// Convert Packed BF16 Data to Packed float Data using merging mask.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \param __S
/// A 256-bit vector of [8 x float]. Elements are copied from __S when
/// the corresponding mask bit is not set.
/// \param __U
@@ -460,7 +502,7 @@ _mm256_maskz_cvtpbh_ps(__mmask8 __U, __m128bh __A) {
/// bit is not set.
/// \param __A
/// A 128-bit vector of [8 x bfloat].
-/// \returns A 256-bit vector of [8 x float] come from convertion of __A
+/// \returns A 256-bit vector of [8 x float] come from conversion of __A
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_mask_cvtpbh_ps(__m256 __S, __mmask8 __U, __m128bh __A) {
return _mm256_castsi256_ps((__m256i)_mm256_mask_slli_epi32(
@@ -472,3 +514,4 @@ _mm256_mask_cvtpbh_ps(__m256 __S, __mmask8 __U, __m128bh __A) {
#undef __DEFAULT_FN_ATTRS256
#endif
+#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/avx512vlbitalgintrin.h b/contrib/llvm-project/clang/lib/Headers/avx512vlbitalgintrin.h
index 5154eae14cbb..377e3a5ea571 100644
--- a/contrib/llvm-project/clang/lib/Headers/avx512vlbitalgintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avx512vlbitalgintrin.h
@@ -15,8 +15,14 @@
#define __AVX512VLBITALGINTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512bitalg"), __min_vector_width__(128)))
-#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512bitalg"), __min_vector_width__(256)))
+#define __DEFAULT_FN_ATTRS128 \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512vl,avx512bitalg,no-evex512"), \
+ __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS256 \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512vl,avx512bitalg,no-evex512"), \
+ __min_vector_width__(256)))
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_popcnt_epi16(__m256i __A)
diff --git a/contrib/llvm-project/clang/lib/Headers/avx512vlbwintrin.h b/contrib/llvm-project/clang/lib/Headers/avx512vlbwintrin.h
index 6ed10ed9803b..9aedba066999 100644
--- a/contrib/llvm-project/clang/lib/Headers/avx512vlbwintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avx512vlbwintrin.h
@@ -15,90 +15,96 @@
#define __AVX512VLBWINTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512bw"), __min_vector_width__(128)))
-#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512bw"), __min_vector_width__(256)))
+#define __DEFAULT_FN_ATTRS128 \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512vl,avx512bw,no-evex512"), \
+ __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS256 \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512vl,avx512bw,no-evex512"), \
+ __min_vector_width__(256)))
/* Integer compare */
#define _mm_cmp_epi8_mask(a, b, p) \
- (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)(__m128i)(a), \
- (__v16qi)(__m128i)(b), (int)(p), \
- (__mmask16)-1)
+ ((__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)(__m128i)(a), \
+ (__v16qi)(__m128i)(b), (int)(p), \
+ (__mmask16)-1))
#define _mm_mask_cmp_epi8_mask(m, a, b, p) \
- (__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)(__m128i)(a), \
- (__v16qi)(__m128i)(b), (int)(p), \
- (__mmask16)(m))
+ ((__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)(__m128i)(a), \
+ (__v16qi)(__m128i)(b), (int)(p), \
+ (__mmask16)(m)))
#define _mm_cmp_epu8_mask(a, b, p) \
- (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)(__m128i)(a), \
- (__v16qi)(__m128i)(b), (int)(p), \
- (__mmask16)-1)
+ ((__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)(__m128i)(a), \
+ (__v16qi)(__m128i)(b), (int)(p), \
+ (__mmask16)-1))
#define _mm_mask_cmp_epu8_mask(m, a, b, p) \
- (__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)(__m128i)(a), \
- (__v16qi)(__m128i)(b), (int)(p), \
- (__mmask16)(m))
+ ((__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)(__m128i)(a), \
+ (__v16qi)(__m128i)(b), (int)(p), \
+ (__mmask16)(m)))
#define _mm256_cmp_epi8_mask(a, b, p) \
- (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)(__m256i)(a), \
- (__v32qi)(__m256i)(b), (int)(p), \
- (__mmask32)-1)
+ ((__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)(__m256i)(a), \
+ (__v32qi)(__m256i)(b), (int)(p), \
+ (__mmask32)-1))
#define _mm256_mask_cmp_epi8_mask(m, a, b, p) \
- (__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)(__m256i)(a), \
- (__v32qi)(__m256i)(b), (int)(p), \
- (__mmask32)(m))
+ ((__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)(__m256i)(a), \
+ (__v32qi)(__m256i)(b), (int)(p), \
+ (__mmask32)(m)))
#define _mm256_cmp_epu8_mask(a, b, p) \
- (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)(__m256i)(a), \
- (__v32qi)(__m256i)(b), (int)(p), \
- (__mmask32)-1)
+ ((__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)(__m256i)(a), \
+ (__v32qi)(__m256i)(b), (int)(p), \
+ (__mmask32)-1))
#define _mm256_mask_cmp_epu8_mask(m, a, b, p) \
- (__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)(__m256i)(a), \
- (__v32qi)(__m256i)(b), (int)(p), \
- (__mmask32)(m))
+ ((__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)(__m256i)(a), \
+ (__v32qi)(__m256i)(b), (int)(p), \
+ (__mmask32)(m)))
#define _mm_cmp_epi16_mask(a, b, p) \
- (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)(__m128i)(a), \
- (__v8hi)(__m128i)(b), (int)(p), \
- (__mmask8)-1)
+ ((__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)(__m128i)(a), \
+ (__v8hi)(__m128i)(b), (int)(p), \
+ (__mmask8)-1))
#define _mm_mask_cmp_epi16_mask(m, a, b, p) \
- (__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)(__m128i)(a), \
- (__v8hi)(__m128i)(b), (int)(p), \
- (__mmask8)(m))
+ ((__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)(__m128i)(a), \
+ (__v8hi)(__m128i)(b), (int)(p), \
+ (__mmask8)(m)))
#define _mm_cmp_epu16_mask(a, b, p) \
- (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)(__m128i)(a), \
- (__v8hi)(__m128i)(b), (int)(p), \
- (__mmask8)-1)
+ ((__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)(__m128i)(a), \
+ (__v8hi)(__m128i)(b), (int)(p), \
+ (__mmask8)-1))
#define _mm_mask_cmp_epu16_mask(m, a, b, p) \
- (__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)(__m128i)(a), \
- (__v8hi)(__m128i)(b), (int)(p), \
- (__mmask8)(m))
+ ((__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)(__m128i)(a), \
+ (__v8hi)(__m128i)(b), (int)(p), \
+ (__mmask8)(m)))
#define _mm256_cmp_epi16_mask(a, b, p) \
- (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)(__m256i)(a), \
- (__v16hi)(__m256i)(b), (int)(p), \
- (__mmask16)-1)
+ ((__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)(__m256i)(a), \
+ (__v16hi)(__m256i)(b), (int)(p), \
+ (__mmask16)-1))
#define _mm256_mask_cmp_epi16_mask(m, a, b, p) \
- (__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)(__m256i)(a), \
- (__v16hi)(__m256i)(b), (int)(p), \
- (__mmask16)(m))
+ ((__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)(__m256i)(a), \
+ (__v16hi)(__m256i)(b), (int)(p), \
+ (__mmask16)(m)))
#define _mm256_cmp_epu16_mask(a, b, p) \
- (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)(__m256i)(a), \
- (__v16hi)(__m256i)(b), (int)(p), \
- (__mmask16)-1)
+ ((__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)(__m256i)(a), \
+ (__v16hi)(__m256i)(b), (int)(p), \
+ (__mmask16)-1))
#define _mm256_mask_cmp_epu16_mask(m, a, b, p) \
- (__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)(__m256i)(a), \
- (__v16hi)(__m256i)(b), (int)(p), \
- (__mmask16)(m))
+ ((__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)(__m256i)(a), \
+ (__v16hi)(__m256i)(b), (int)(p), \
+ (__mmask16)(m)))
#define _mm_cmpeq_epi8_mask(A, B) \
_mm_cmp_epi8_mask((A), (B), _MM_CMPINT_EQ)
@@ -1821,46 +1827,46 @@ _mm256_maskz_cvtepu8_epi16 (__mmask16 __U, __m128i __A)
#define _mm_mask_shufflehi_epi16(W, U, A, imm) \
- (__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
- (__v8hi)_mm_shufflehi_epi16((A), (imm)), \
- (__v8hi)(__m128i)(W))
+ ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
+ (__v8hi)_mm_shufflehi_epi16((A), (imm)), \
+ (__v8hi)(__m128i)(W)))
#define _mm_maskz_shufflehi_epi16(U, A, imm) \
- (__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
- (__v8hi)_mm_shufflehi_epi16((A), (imm)), \
- (__v8hi)_mm_setzero_si128())
+ ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
+ (__v8hi)_mm_shufflehi_epi16((A), (imm)), \
+ (__v8hi)_mm_setzero_si128()))
#define _mm256_mask_shufflehi_epi16(W, U, A, imm) \
- (__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
- (__v16hi)_mm256_shufflehi_epi16((A), (imm)), \
- (__v16hi)(__m256i)(W))
+ ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
+ (__v16hi)_mm256_shufflehi_epi16((A), (imm)), \
+ (__v16hi)(__m256i)(W)))
#define _mm256_maskz_shufflehi_epi16(U, A, imm) \
- (__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
- (__v16hi)_mm256_shufflehi_epi16((A), (imm)), \
- (__v16hi)_mm256_setzero_si256())
+ ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
+ (__v16hi)_mm256_shufflehi_epi16((A), (imm)), \
+ (__v16hi)_mm256_setzero_si256()))
#define _mm_mask_shufflelo_epi16(W, U, A, imm) \
- (__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
- (__v8hi)_mm_shufflelo_epi16((A), (imm)), \
- (__v8hi)(__m128i)(W))
+ ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
+ (__v8hi)_mm_shufflelo_epi16((A), (imm)), \
+ (__v8hi)(__m128i)(W)))
#define _mm_maskz_shufflelo_epi16(U, A, imm) \
- (__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
- (__v8hi)_mm_shufflelo_epi16((A), (imm)), \
- (__v8hi)_mm_setzero_si128())
+ ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
+ (__v8hi)_mm_shufflelo_epi16((A), (imm)), \
+ (__v8hi)_mm_setzero_si128()))
#define _mm256_mask_shufflelo_epi16(W, U, A, imm) \
- (__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
- (__v16hi)_mm256_shufflelo_epi16((A), \
- (imm)), \
- (__v16hi)(__m256i)(W))
+ ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
+ (__v16hi)_mm256_shufflelo_epi16((A), \
+ (imm)), \
+ (__v16hi)(__m256i)(W)))
#define _mm256_maskz_shufflelo_epi16(U, A, imm) \
- (__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
- (__v16hi)_mm256_shufflelo_epi16((A), \
- (imm)), \
- (__v16hi)_mm256_setzero_si256())
+ ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
+ (__v16hi)_mm256_shufflelo_epi16((A), \
+ (imm)), \
+ (__v16hi)_mm256_setzero_si256()))
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_sllv_epi16(__m256i __A, __m256i __B)
@@ -1942,7 +1948,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_slli_epi16(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
- (__v8hi)_mm_slli_epi16(__A, __B),
+ (__v8hi)_mm_slli_epi16(__A, (int)__B),
(__v8hi)__W);
}
@@ -1950,7 +1956,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_slli_epi16 (__mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
- (__v8hi)_mm_slli_epi16(__A, __B),
+ (__v8hi)_mm_slli_epi16(__A, (int)__B),
(__v8hi)_mm_setzero_si128());
}
@@ -1959,7 +1965,7 @@ _mm256_mask_slli_epi16(__m256i __W, __mmask16 __U, __m256i __A,
unsigned int __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_slli_epi16(__A, __B),
+ (__v16hi)_mm256_slli_epi16(__A, (int)__B),
(__v16hi)__W);
}
@@ -1967,7 +1973,7 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_slli_epi16(__mmask16 __U, __m256i __A, unsigned int __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_slli_epi16(__A, __B),
+ (__v16hi)_mm256_slli_epi16(__A, (int)__B),
(__v16hi)_mm256_setzero_si256());
}
@@ -2095,7 +2101,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_srai_epi16(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
- (__v8hi)_mm_srai_epi16(__A, __B),
+ (__v8hi)_mm_srai_epi16(__A, (int)__B),
(__v8hi)__W);
}
@@ -2103,7 +2109,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_srai_epi16(__mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
- (__v8hi)_mm_srai_epi16(__A, __B),
+ (__v8hi)_mm_srai_epi16(__A, (int)__B),
(__v8hi)_mm_setzero_si128());
}
@@ -2112,7 +2118,7 @@ _mm256_mask_srai_epi16(__m256i __W, __mmask16 __U, __m256i __A,
unsigned int __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_srai_epi16(__A, __B),
+ (__v16hi)_mm256_srai_epi16(__A, (int)__B),
(__v16hi)__W);
}
@@ -2120,7 +2126,7 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_srai_epi16(__mmask16 __U, __m256i __A, unsigned int __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_srai_epi16(__A, __B),
+ (__v16hi)_mm256_srai_epi16(__A, (int)__B),
(__v16hi)_mm256_setzero_si256());
}
@@ -2756,52 +2762,404 @@ _mm256_mask_permutexvar_epi16 (__m256i __W, __mmask16 __M, __m256i __A,
}
#define _mm_mask_alignr_epi8(W, U, A, B, N) \
- (__m128i)__builtin_ia32_selectb_128((__mmask16)(U), \
+ ((__m128i)__builtin_ia32_selectb_128((__mmask16)(U), \
(__v16qi)_mm_alignr_epi8((A), (B), (int)(N)), \
- (__v16qi)(__m128i)(W))
+ (__v16qi)(__m128i)(W)))
#define _mm_maskz_alignr_epi8(U, A, B, N) \
- (__m128i)__builtin_ia32_selectb_128((__mmask16)(U), \
+ ((__m128i)__builtin_ia32_selectb_128((__mmask16)(U), \
(__v16qi)_mm_alignr_epi8((A), (B), (int)(N)), \
- (__v16qi)_mm_setzero_si128())
+ (__v16qi)_mm_setzero_si128()))
#define _mm256_mask_alignr_epi8(W, U, A, B, N) \
- (__m256i)__builtin_ia32_selectb_256((__mmask32)(U), \
+ ((__m256i)__builtin_ia32_selectb_256((__mmask32)(U), \
(__v32qi)_mm256_alignr_epi8((A), (B), (int)(N)), \
- (__v32qi)(__m256i)(W))
+ (__v32qi)(__m256i)(W)))
#define _mm256_maskz_alignr_epi8(U, A, B, N) \
- (__m256i)__builtin_ia32_selectb_256((__mmask32)(U), \
+ ((__m256i)__builtin_ia32_selectb_256((__mmask32)(U), \
(__v32qi)_mm256_alignr_epi8((A), (B), (int)(N)), \
- (__v32qi)_mm256_setzero_si256())
+ (__v32qi)_mm256_setzero_si256()))
#define _mm_dbsad_epu8(A, B, imm) \
- (__m128i)__builtin_ia32_dbpsadbw128((__v16qi)(__m128i)(A), \
- (__v16qi)(__m128i)(B), (int)(imm))
+ ((__m128i)__builtin_ia32_dbpsadbw128((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (int)(imm)))
#define _mm_mask_dbsad_epu8(W, U, A, B, imm) \
- (__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
+ ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
(__v8hi)_mm_dbsad_epu8((A), (B), (imm)), \
- (__v8hi)(__m128i)(W))
+ (__v8hi)(__m128i)(W)))
#define _mm_maskz_dbsad_epu8(U, A, B, imm) \
- (__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
+ ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
(__v8hi)_mm_dbsad_epu8((A), (B), (imm)), \
- (__v8hi)_mm_setzero_si128())
+ (__v8hi)_mm_setzero_si128()))
#define _mm256_dbsad_epu8(A, B, imm) \
- (__m256i)__builtin_ia32_dbpsadbw256((__v32qi)(__m256i)(A), \
- (__v32qi)(__m256i)(B), (int)(imm))
+ ((__m256i)__builtin_ia32_dbpsadbw256((__v32qi)(__m256i)(A), \
+ (__v32qi)(__m256i)(B), (int)(imm)))
#define _mm256_mask_dbsad_epu8(W, U, A, B, imm) \
- (__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
+ ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
(__v16hi)_mm256_dbsad_epu8((A), (B), (imm)), \
- (__v16hi)(__m256i)(W))
+ (__v16hi)(__m256i)(W)))
#define _mm256_maskz_dbsad_epu8(U, A, B, imm) \
- (__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
+ ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
(__v16hi)_mm256_dbsad_epu8((A), (B), (imm)), \
- (__v16hi)_mm256_setzero_si256())
+ (__v16hi)_mm256_setzero_si256()))
+
+static __inline__ short __DEFAULT_FN_ATTRS128
+_mm_reduce_add_epi16(__m128i __W) {
+ return __builtin_reduce_add((__v8hi)__W);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS128
+_mm_reduce_mul_epi16(__m128i __W) {
+ return __builtin_reduce_mul((__v8hi)__W);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS128
+_mm_reduce_and_epi16(__m128i __W) {
+ return __builtin_reduce_and((__v8hi)__W);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS128
+_mm_reduce_or_epi16(__m128i __W) {
+ return __builtin_reduce_or((__v8hi)__W);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS128
+_mm_mask_reduce_add_epi16( __mmask8 __M, __m128i __W) {
+ __W = _mm_maskz_mov_epi16(__M, __W);
+ return __builtin_reduce_add((__v8hi)__W);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS128
+_mm_mask_reduce_mul_epi16( __mmask8 __M, __m128i __W) {
+ __W = _mm_mask_mov_epi16(_mm_set1_epi16(1), __M, __W);
+ return __builtin_reduce_mul((__v8hi)__W);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS128
+_mm_mask_reduce_and_epi16( __mmask8 __M, __m128i __W) {
+ __W = _mm_mask_mov_epi16(_mm_set1_epi16(-1), __M, __W);
+ return __builtin_reduce_and((__v8hi)__W);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS128
+_mm_mask_reduce_or_epi16(__mmask8 __M, __m128i __W) {
+ __W = _mm_maskz_mov_epi16(__M, __W);
+ return __builtin_reduce_or((__v8hi)__W);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS128
+_mm_reduce_max_epi16(__m128i __V) {
+ return __builtin_reduce_max((__v8hi)__V);
+}
+
+static __inline__ unsigned short __DEFAULT_FN_ATTRS128
+_mm_reduce_max_epu16(__m128i __V) {
+ return __builtin_reduce_max((__v8hu)__V);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS128
+_mm_reduce_min_epi16(__m128i __V) {
+ return __builtin_reduce_min((__v8hi)__V);
+}
+
+static __inline__ unsigned short __DEFAULT_FN_ATTRS128
+_mm_reduce_min_epu16(__m128i __V) {
+ return __builtin_reduce_min((__v8hu)__V);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS128
+_mm_mask_reduce_max_epi16(__mmask16 __M, __m128i __V) {
+ __V = _mm_mask_mov_epi16(_mm_set1_epi16(-32767-1), __M, __V);
+ return __builtin_reduce_max((__v8hi)__V);
+}
+
+static __inline__ unsigned short __DEFAULT_FN_ATTRS128
+_mm_mask_reduce_max_epu16(__mmask16 __M, __m128i __V) {
+ __V = _mm_maskz_mov_epi16(__M, __V);
+ return __builtin_reduce_max((__v8hu)__V);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS128
+_mm_mask_reduce_min_epi16(__mmask16 __M, __m128i __V) {
+ __V = _mm_mask_mov_epi16(_mm_set1_epi16(32767), __M, __V);
+ return __builtin_reduce_min((__v8hi)__V);
+}
+
+static __inline__ unsigned short __DEFAULT_FN_ATTRS128
+_mm_mask_reduce_min_epu16(__mmask16 __M, __m128i __V) {
+ __V = _mm_mask_mov_epi16(_mm_set1_epi16(-1), __M, __V);
+ return __builtin_reduce_min((__v8hu)__V);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS256
+_mm256_reduce_add_epi16(__m256i __W) {
+ return __builtin_reduce_add((__v16hi)__W);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS256
+_mm256_reduce_mul_epi16(__m256i __W) {
+ return __builtin_reduce_mul((__v16hi)__W);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS256
+_mm256_reduce_and_epi16(__m256i __W) {
+ return __builtin_reduce_and((__v16hi)__W);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS256
+_mm256_reduce_or_epi16(__m256i __W) {
+ return __builtin_reduce_or((__v16hi)__W);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS256
+_mm256_mask_reduce_add_epi16( __mmask16 __M, __m256i __W) {
+ __W = _mm256_maskz_mov_epi16(__M, __W);
+ return __builtin_reduce_add((__v16hi)__W);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS256
+_mm256_mask_reduce_mul_epi16( __mmask16 __M, __m256i __W) {
+ __W = _mm256_mask_mov_epi16(_mm256_set1_epi16(1), __M, __W);
+ return __builtin_reduce_mul((__v16hi)__W);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS256
+_mm256_mask_reduce_and_epi16( __mmask16 __M, __m256i __W) {
+ __W = _mm256_mask_mov_epi16(_mm256_set1_epi16(-1), __M, __W);
+ return __builtin_reduce_and((__v16hi)__W);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS256
+_mm256_mask_reduce_or_epi16(__mmask16 __M, __m256i __W) {
+ __W = _mm256_maskz_mov_epi16(__M, __W);
+ return __builtin_reduce_or((__v16hi)__W);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS256
+_mm256_reduce_max_epi16(__m256i __V) {
+ return __builtin_reduce_max((__v16hi)__V);
+}
+
+static __inline__ unsigned short __DEFAULT_FN_ATTRS256
+_mm256_reduce_max_epu16(__m256i __V) {
+ return __builtin_reduce_max((__v16hu)__V);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS256
+_mm256_reduce_min_epi16(__m256i __V) {
+ return __builtin_reduce_min((__v16hi)__V);
+}
+
+static __inline__ unsigned short __DEFAULT_FN_ATTRS256
+_mm256_reduce_min_epu16(__m256i __V) {
+ return __builtin_reduce_min((__v16hu)__V);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS256
+_mm256_mask_reduce_max_epi16(__mmask16 __M, __m256i __V) {
+ __V = _mm256_mask_mov_epi16(_mm256_set1_epi16(-32767-1), __M, __V);
+ return __builtin_reduce_max((__v16hi)__V);
+}
+
+static __inline__ unsigned short __DEFAULT_FN_ATTRS256
+_mm256_mask_reduce_max_epu16(__mmask16 __M, __m256i __V) {
+ __V = _mm256_maskz_mov_epi16(__M, __V);
+ return __builtin_reduce_max((__v16hu)__V);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS256
+_mm256_mask_reduce_min_epi16(__mmask16 __M, __m256i __V) {
+ __V = _mm256_mask_mov_epi16(_mm256_set1_epi16(32767), __M, __V);
+ return __builtin_reduce_min((__v16hi)__V);
+}
+
+static __inline__ unsigned short __DEFAULT_FN_ATTRS256
+_mm256_mask_reduce_min_epu16(__mmask16 __M, __m256i __V) {
+ __V = _mm256_mask_mov_epi16(_mm256_set1_epi16(-1), __M, __V);
+ return __builtin_reduce_min((__v16hu)__V);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS128
+_mm_reduce_add_epi8(__m128i __W) {
+ return __builtin_reduce_add((__v16qs)__W);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS128
+_mm_reduce_mul_epi8(__m128i __W) {
+ return __builtin_reduce_mul((__v16qs)__W);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS128
+_mm_reduce_and_epi8(__m128i __W) {
+ return __builtin_reduce_and((__v16qs)__W);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS128
+_mm_reduce_or_epi8(__m128i __W) {
+ return __builtin_reduce_or((__v16qs)__W);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS128
+_mm_mask_reduce_add_epi8(__mmask16 __M, __m128i __W) {
+ __W = _mm_maskz_mov_epi8(__M, __W);
+ return __builtin_reduce_add((__v16qs)__W);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS128
+_mm_mask_reduce_mul_epi8(__mmask16 __M, __m128i __W) {
+ __W = _mm_mask_mov_epi8(_mm_set1_epi8(1), __M, __W);
+ return __builtin_reduce_mul((__v16qs)__W);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS128
+_mm_mask_reduce_and_epi8(__mmask16 __M, __m128i __W) {
+ __W = _mm_mask_mov_epi8(_mm_set1_epi8(-1), __M, __W);
+ return __builtin_reduce_and((__v16qs)__W);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS128
+_mm_mask_reduce_or_epi8(__mmask16 __M, __m128i __W) {
+ __W = _mm_maskz_mov_epi8(__M, __W);
+ return __builtin_reduce_or((__v16qs)__W);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS128
+_mm_reduce_max_epi8(__m128i __V) {
+ return __builtin_reduce_max((__v16qs)__V);
+}
+
+static __inline__ unsigned char __DEFAULT_FN_ATTRS128
+_mm_reduce_max_epu8(__m128i __V) {
+ return __builtin_reduce_max((__v16qu)__V);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS128
+_mm_reduce_min_epi8(__m128i __V) {
+ return __builtin_reduce_min((__v16qs)__V);
+}
+
+static __inline__ unsigned char __DEFAULT_FN_ATTRS128
+_mm_reduce_min_epu8(__m128i __V) {
+ return __builtin_reduce_min((__v16qu)__V);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS128
+_mm_mask_reduce_max_epi8(__mmask16 __M, __m128i __V) {
+ __V = _mm_mask_mov_epi8(_mm_set1_epi8(-127-1), __M, __V);
+ return __builtin_reduce_max((__v16qs)__V);
+}
+
+static __inline__ unsigned char __DEFAULT_FN_ATTRS128
+_mm_mask_reduce_max_epu8(__mmask16 __M, __m128i __V) {
+ __V = _mm_maskz_mov_epi8(__M, __V);
+ return __builtin_reduce_max((__v16qu)__V);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS128
+_mm_mask_reduce_min_epi8(__mmask16 __M, __m128i __V) {
+ __V = _mm_mask_mov_epi8(_mm_set1_epi8(127), __M, __V);
+ return __builtin_reduce_min((__v16qs)__V);
+}
+
+static __inline__ unsigned char __DEFAULT_FN_ATTRS128
+_mm_mask_reduce_min_epu8(__mmask16 __M, __m128i __V) {
+ __V = _mm_mask_mov_epi8(_mm_set1_epi8(-1), __M, __V);
+ return __builtin_reduce_min((__v16qu)__V);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS256
+_mm256_reduce_add_epi8(__m256i __W) {
+ return __builtin_reduce_add((__v32qs)__W);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS256
+_mm256_reduce_mul_epi8(__m256i __W) {
+ return __builtin_reduce_mul((__v32qs)__W);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS256
+_mm256_reduce_and_epi8(__m256i __W) {
+ return __builtin_reduce_and((__v32qs)__W);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS256
+_mm256_reduce_or_epi8(__m256i __W) {
+ return __builtin_reduce_or((__v32qs)__W);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS256
+_mm256_mask_reduce_add_epi8(__mmask32 __M, __m256i __W) {
+ __W = _mm256_maskz_mov_epi8(__M, __W);
+ return __builtin_reduce_add((__v32qs)__W);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS256
+_mm256_mask_reduce_mul_epi8(__mmask32 __M, __m256i __W) {
+ __W = _mm256_mask_mov_epi8(_mm256_set1_epi8(1), __M, __W);
+ return __builtin_reduce_mul((__v32qs)__W);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS256
+_mm256_mask_reduce_and_epi8(__mmask32 __M, __m256i __W) {
+ __W = _mm256_mask_mov_epi8(_mm256_set1_epi8(-1), __M, __W);
+ return __builtin_reduce_and((__v32qs)__W);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS256
+_mm256_mask_reduce_or_epi8(__mmask32 __M, __m256i __W) {
+ __W = _mm256_maskz_mov_epi8(__M, __W);
+ return __builtin_reduce_or((__v32qs)__W);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS256
+_mm256_reduce_max_epi8(__m256i __V) {
+ return __builtin_reduce_max((__v32qs)__V);
+}
+
+static __inline__ unsigned char __DEFAULT_FN_ATTRS256
+_mm256_reduce_max_epu8(__m256i __V) {
+ return __builtin_reduce_max((__v32qu)__V);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS256
+_mm256_reduce_min_epi8(__m256i __V) {
+ return __builtin_reduce_min((__v32qs)__V);
+}
+
+static __inline__ unsigned char __DEFAULT_FN_ATTRS256
+_mm256_reduce_min_epu8(__m256i __V) {
+ return __builtin_reduce_min((__v32qu)__V);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS256
+_mm256_mask_reduce_max_epi8(__mmask32 __M, __m256i __V) {
+ __V = _mm256_mask_mov_epi8(_mm256_set1_epi8(-127-1), __M, __V);
+ return __builtin_reduce_max((__v32qs)__V);
+}
+
+static __inline__ unsigned char __DEFAULT_FN_ATTRS256
+_mm256_mask_reduce_max_epu8(__mmask32 __M, __m256i __V) {
+ __V = _mm256_maskz_mov_epi8(__M, __V);
+ return __builtin_reduce_max((__v32qu)__V);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS256
+_mm256_mask_reduce_min_epi8(__mmask32 __M, __m256i __V) {
+ __V = _mm256_mask_mov_epi8(_mm256_set1_epi8(127), __M, __V);
+ return __builtin_reduce_min((__v32qs)__V);
+}
+
+static __inline__ unsigned char __DEFAULT_FN_ATTRS256
+_mm256_mask_reduce_min_epu8(__mmask32 __M, __m256i __V) {
+ __V = _mm256_mask_mov_epi8(_mm256_set1_epi8(-1), __M, __V);
+ return __builtin_reduce_min((__v32qu)__V);
+}
#undef __DEFAULT_FN_ATTRS128
#undef __DEFAULT_FN_ATTRS256
diff --git a/contrib/llvm-project/clang/lib/Headers/avx512vlcdintrin.h b/contrib/llvm-project/clang/lib/Headers/avx512vlcdintrin.h
index cc8b72528d01..923e2c551a97 100644
--- a/contrib/llvm-project/clang/lib/Headers/avx512vlcdintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avx512vlcdintrin.h
@@ -14,9 +14,14 @@
#define __AVX512VLCDINTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512cd"), __min_vector_width__(128)))
-#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512cd"), __min_vector_width__(256)))
-
+#define __DEFAULT_FN_ATTRS128 \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512vl,avx512cd,no-evex512"), \
+ __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS256 \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512vl,avx512cd,no-evex512"), \
+ __min_vector_width__(256)))
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_broadcastmb_epi64 (__mmask8 __A)
diff --git a/contrib/llvm-project/clang/lib/Headers/avx512vldqintrin.h b/contrib/llvm-project/clang/lib/Headers/avx512vldqintrin.h
index 95ba574ea821..272cdd89e2d2 100644
--- a/contrib/llvm-project/clang/lib/Headers/avx512vldqintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avx512vldqintrin.h
@@ -15,8 +15,14 @@
#define __AVX512VLDQINTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512dq"), __min_vector_width__(128)))
-#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512dq"), __min_vector_width__(256)))
+#define __DEFAULT_FN_ATTRS128 \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512vl,avx512dq,no-evex512"), \
+ __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS256 \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512vl,avx512dq,no-evex512"), \
+ __min_vector_width__(256)))
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mullo_epi64 (__m256i __A, __m256i __B) {
@@ -773,134 +779,134 @@ _mm256_maskz_cvtepu64_ps (__mmask8 __U, __m256i __A) {
}
#define _mm_range_pd(A, B, C) \
- (__m128d)__builtin_ia32_rangepd128_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), (int)(C), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)-1)
+ ((__m128d)__builtin_ia32_rangepd128_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), (int)(C), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1))
#define _mm_mask_range_pd(W, U, A, B, C) \
- (__m128d)__builtin_ia32_rangepd128_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), (int)(C), \
- (__v2df)(__m128d)(W), \
- (__mmask8)(U))
+ ((__m128d)__builtin_ia32_rangepd128_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), (int)(C), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U)))
#define _mm_maskz_range_pd(U, A, B, C) \
- (__m128d)__builtin_ia32_rangepd128_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), (int)(C), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)(U))
+ ((__m128d)__builtin_ia32_rangepd128_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), (int)(C), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U)))
#define _mm256_range_pd(A, B, C) \
- (__m256d)__builtin_ia32_rangepd256_mask((__v4df)(__m256d)(A), \
- (__v4df)(__m256d)(B), (int)(C), \
- (__v4df)_mm256_setzero_pd(), \
- (__mmask8)-1)
+ ((__m256d)__builtin_ia32_rangepd256_mask((__v4df)(__m256d)(A), \
+ (__v4df)(__m256d)(B), (int)(C), \
+ (__v4df)_mm256_setzero_pd(), \
+ (__mmask8)-1))
#define _mm256_mask_range_pd(W, U, A, B, C) \
- (__m256d)__builtin_ia32_rangepd256_mask((__v4df)(__m256d)(A), \
- (__v4df)(__m256d)(B), (int)(C), \
- (__v4df)(__m256d)(W), \
- (__mmask8)(U))
+ ((__m256d)__builtin_ia32_rangepd256_mask((__v4df)(__m256d)(A), \
+ (__v4df)(__m256d)(B), (int)(C), \
+ (__v4df)(__m256d)(W), \
+ (__mmask8)(U)))
#define _mm256_maskz_range_pd(U, A, B, C) \
- (__m256d)__builtin_ia32_rangepd256_mask((__v4df)(__m256d)(A), \
- (__v4df)(__m256d)(B), (int)(C), \
- (__v4df)_mm256_setzero_pd(), \
- (__mmask8)(U))
+ ((__m256d)__builtin_ia32_rangepd256_mask((__v4df)(__m256d)(A), \
+ (__v4df)(__m256d)(B), (int)(C), \
+ (__v4df)_mm256_setzero_pd(), \
+ (__mmask8)(U)))
#define _mm_range_ps(A, B, C) \
- (__m128)__builtin_ia32_rangeps128_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), (int)(C), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1)
+ ((__m128)__builtin_ia32_rangeps128_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), (int)(C), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1))
#define _mm_mask_range_ps(W, U, A, B, C) \
- (__m128)__builtin_ia32_rangeps128_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), (int)(C), \
- (__v4sf)(__m128)(W), (__mmask8)(U))
+ ((__m128)__builtin_ia32_rangeps128_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), (int)(C), \
+ (__v4sf)(__m128)(W), (__mmask8)(U)))
#define _mm_maskz_range_ps(U, A, B, C) \
- (__m128)__builtin_ia32_rangeps128_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), (int)(C), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U))
+ ((__m128)__builtin_ia32_rangeps128_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), (int)(C), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U)))
#define _mm256_range_ps(A, B, C) \
- (__m256)__builtin_ia32_rangeps256_mask((__v8sf)(__m256)(A), \
- (__v8sf)(__m256)(B), (int)(C), \
- (__v8sf)_mm256_setzero_ps(), \
- (__mmask8)-1)
+ ((__m256)__builtin_ia32_rangeps256_mask((__v8sf)(__m256)(A), \
+ (__v8sf)(__m256)(B), (int)(C), \
+ (__v8sf)_mm256_setzero_ps(), \
+ (__mmask8)-1))
#define _mm256_mask_range_ps(W, U, A, B, C) \
- (__m256)__builtin_ia32_rangeps256_mask((__v8sf)(__m256)(A), \
- (__v8sf)(__m256)(B), (int)(C), \
- (__v8sf)(__m256)(W), (__mmask8)(U))
+ ((__m256)__builtin_ia32_rangeps256_mask((__v8sf)(__m256)(A), \
+ (__v8sf)(__m256)(B), (int)(C), \
+ (__v8sf)(__m256)(W), (__mmask8)(U)))
#define _mm256_maskz_range_ps(U, A, B, C) \
- (__m256)__builtin_ia32_rangeps256_mask((__v8sf)(__m256)(A), \
- (__v8sf)(__m256)(B), (int)(C), \
- (__v8sf)_mm256_setzero_ps(), \
- (__mmask8)(U))
+ ((__m256)__builtin_ia32_rangeps256_mask((__v8sf)(__m256)(A), \
+ (__v8sf)(__m256)(B), (int)(C), \
+ (__v8sf)_mm256_setzero_ps(), \
+ (__mmask8)(U)))
#define _mm_reduce_pd(A, B) \
- (__m128d)__builtin_ia32_reducepd128_mask((__v2df)(__m128d)(A), (int)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)-1)
+ ((__m128d)__builtin_ia32_reducepd128_mask((__v2df)(__m128d)(A), (int)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1))
#define _mm_mask_reduce_pd(W, U, A, B) \
- (__m128d)__builtin_ia32_reducepd128_mask((__v2df)(__m128d)(A), (int)(B), \
- (__v2df)(__m128d)(W), \
- (__mmask8)(U))
+ ((__m128d)__builtin_ia32_reducepd128_mask((__v2df)(__m128d)(A), (int)(B), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U)))
#define _mm_maskz_reduce_pd(U, A, B) \
- (__m128d)__builtin_ia32_reducepd128_mask((__v2df)(__m128d)(A), (int)(B), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)(U))
+ ((__m128d)__builtin_ia32_reducepd128_mask((__v2df)(__m128d)(A), (int)(B), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U)))
#define _mm256_reduce_pd(A, B) \
- (__m256d)__builtin_ia32_reducepd256_mask((__v4df)(__m256d)(A), (int)(B), \
- (__v4df)_mm256_setzero_pd(), \
- (__mmask8)-1)
+ ((__m256d)__builtin_ia32_reducepd256_mask((__v4df)(__m256d)(A), (int)(B), \
+ (__v4df)_mm256_setzero_pd(), \
+ (__mmask8)-1))
#define _mm256_mask_reduce_pd(W, U, A, B) \
- (__m256d)__builtin_ia32_reducepd256_mask((__v4df)(__m256d)(A), (int)(B), \
- (__v4df)(__m256d)(W), \
- (__mmask8)(U))
+ ((__m256d)__builtin_ia32_reducepd256_mask((__v4df)(__m256d)(A), (int)(B), \
+ (__v4df)(__m256d)(W), \
+ (__mmask8)(U)))
#define _mm256_maskz_reduce_pd(U, A, B) \
- (__m256d)__builtin_ia32_reducepd256_mask((__v4df)(__m256d)(A), (int)(B), \
- (__v4df)_mm256_setzero_pd(), \
- (__mmask8)(U))
+ ((__m256d)__builtin_ia32_reducepd256_mask((__v4df)(__m256d)(A), (int)(B), \
+ (__v4df)_mm256_setzero_pd(), \
+ (__mmask8)(U)))
#define _mm_reduce_ps(A, B) \
- (__m128)__builtin_ia32_reduceps128_mask((__v4sf)(__m128)(A), (int)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1)
+ ((__m128)__builtin_ia32_reduceps128_mask((__v4sf)(__m128)(A), (int)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1))
#define _mm_mask_reduce_ps(W, U, A, B) \
- (__m128)__builtin_ia32_reduceps128_mask((__v4sf)(__m128)(A), (int)(B), \
- (__v4sf)(__m128)(W), \
- (__mmask8)(U))
+ ((__m128)__builtin_ia32_reduceps128_mask((__v4sf)(__m128)(A), (int)(B), \
+ (__v4sf)(__m128)(W), \
+ (__mmask8)(U)))
#define _mm_maskz_reduce_ps(U, A, B) \
- (__m128)__builtin_ia32_reduceps128_mask((__v4sf)(__m128)(A), (int)(B), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U))
+ ((__m128)__builtin_ia32_reduceps128_mask((__v4sf)(__m128)(A), (int)(B), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U)))
#define _mm256_reduce_ps(A, B) \
- (__m256)__builtin_ia32_reduceps256_mask((__v8sf)(__m256)(A), (int)(B), \
- (__v8sf)_mm256_setzero_ps(), \
- (__mmask8)-1)
+ ((__m256)__builtin_ia32_reduceps256_mask((__v8sf)(__m256)(A), (int)(B), \
+ (__v8sf)_mm256_setzero_ps(), \
+ (__mmask8)-1))
#define _mm256_mask_reduce_ps(W, U, A, B) \
- (__m256)__builtin_ia32_reduceps256_mask((__v8sf)(__m256)(A), (int)(B), \
- (__v8sf)(__m256)(W), \
- (__mmask8)(U))
+ ((__m256)__builtin_ia32_reduceps256_mask((__v8sf)(__m256)(A), (int)(B), \
+ (__v8sf)(__m256)(W), \
+ (__mmask8)(U)))
#define _mm256_maskz_reduce_ps(U, A, B) \
- (__m256)__builtin_ia32_reduceps256_mask((__v8sf)(__m256)(A), (int)(B), \
- (__v8sf)_mm256_setzero_ps(), \
- (__mmask8)(U))
+ ((__m256)__builtin_ia32_reduceps256_mask((__v8sf)(__m256)(A), (int)(B), \
+ (__v8sf)_mm256_setzero_ps(), \
+ (__mmask8)(U)))
static __inline__ __mmask8 __DEFAULT_FN_ATTRS128
_mm_movepi32_mask (__m128i __A)
@@ -1066,100 +1072,100 @@ _mm256_maskz_broadcast_i64x2 (__mmask8 __M, __m128i __A)
}
#define _mm256_extractf64x2_pd(A, imm) \
- (__m128d)__builtin_ia32_extractf64x2_256_mask((__v4df)(__m256d)(A), \
- (int)(imm), \
- (__v2df)_mm_undefined_pd(), \
- (__mmask8)-1)
+ ((__m128d)__builtin_ia32_extractf64x2_256_mask((__v4df)(__m256d)(A), \
+ (int)(imm), \
+ (__v2df)_mm_undefined_pd(), \
+ (__mmask8)-1))
#define _mm256_mask_extractf64x2_pd(W, U, A, imm) \
- (__m128d)__builtin_ia32_extractf64x2_256_mask((__v4df)(__m256d)(A), \
- (int)(imm), \
- (__v2df)(__m128d)(W), \
- (__mmask8)(U))
+ ((__m128d)__builtin_ia32_extractf64x2_256_mask((__v4df)(__m256d)(A), \
+ (int)(imm), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U)))
#define _mm256_maskz_extractf64x2_pd(U, A, imm) \
- (__m128d)__builtin_ia32_extractf64x2_256_mask((__v4df)(__m256d)(A), \
- (int)(imm), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)(U))
+ ((__m128d)__builtin_ia32_extractf64x2_256_mask((__v4df)(__m256d)(A), \
+ (int)(imm), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U)))
#define _mm256_extracti64x2_epi64(A, imm) \
- (__m128i)__builtin_ia32_extracti64x2_256_mask((__v4di)(__m256i)(A), \
+ ((__m128i)__builtin_ia32_extracti64x2_256_mask((__v4di)(__m256i)(A), \
(int)(imm), \
(__v2di)_mm_undefined_si128(), \
- (__mmask8)-1)
+ (__mmask8)-1))
#define _mm256_mask_extracti64x2_epi64(W, U, A, imm) \
- (__m128i)__builtin_ia32_extracti64x2_256_mask((__v4di)(__m256i)(A), \
- (int)(imm), \
- (__v2di)(__m128i)(W), \
- (__mmask8)(U))
+ ((__m128i)__builtin_ia32_extracti64x2_256_mask((__v4di)(__m256i)(A), \
+ (int)(imm), \
+ (__v2di)(__m128i)(W), \
+ (__mmask8)(U)))
#define _mm256_maskz_extracti64x2_epi64(U, A, imm) \
- (__m128i)__builtin_ia32_extracti64x2_256_mask((__v4di)(__m256i)(A), \
- (int)(imm), \
- (__v2di)_mm_setzero_si128(), \
- (__mmask8)(U))
+ ((__m128i)__builtin_ia32_extracti64x2_256_mask((__v4di)(__m256i)(A), \
+ (int)(imm), \
+ (__v2di)_mm_setzero_si128(), \
+ (__mmask8)(U)))
#define _mm256_insertf64x2(A, B, imm) \
- (__m256d)__builtin_ia32_insertf64x2_256((__v4df)(__m256d)(A), \
- (__v2df)(__m128d)(B), (int)(imm))
+ ((__m256d)__builtin_ia32_insertf64x2_256((__v4df)(__m256d)(A), \
+ (__v2df)(__m128d)(B), (int)(imm)))
#define _mm256_mask_insertf64x2(W, U, A, B, imm) \
- (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
+ ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
(__v4df)_mm256_insertf64x2((A), (B), (imm)), \
- (__v4df)(__m256d)(W))
+ (__v4df)(__m256d)(W)))
#define _mm256_maskz_insertf64x2(U, A, B, imm) \
- (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
+ ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
(__v4df)_mm256_insertf64x2((A), (B), (imm)), \
- (__v4df)_mm256_setzero_pd())
+ (__v4df)_mm256_setzero_pd()))
#define _mm256_inserti64x2(A, B, imm) \
- (__m256i)__builtin_ia32_inserti64x2_256((__v4di)(__m256i)(A), \
- (__v2di)(__m128i)(B), (int)(imm))
+ ((__m256i)__builtin_ia32_inserti64x2_256((__v4di)(__m256i)(A), \
+ (__v2di)(__m128i)(B), (int)(imm)))
#define _mm256_mask_inserti64x2(W, U, A, B, imm) \
- (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
- (__v4di)_mm256_inserti64x2((A), (B), (imm)), \
- (__v4di)(__m256i)(W))
+ ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
+ (__v4di)_mm256_inserti64x2((A), (B), (imm)), \
+ (__v4di)(__m256i)(W)))
#define _mm256_maskz_inserti64x2(U, A, B, imm) \
- (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
- (__v4di)_mm256_inserti64x2((A), (B), (imm)), \
- (__v4di)_mm256_setzero_si256())
+ ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
+ (__v4di)_mm256_inserti64x2((A), (B), (imm)), \
+ (__v4di)_mm256_setzero_si256()))
#define _mm_mask_fpclass_pd_mask(U, A, imm) \
- (__mmask8)__builtin_ia32_fpclasspd128_mask((__v2df)(__m128d)(A), (int)(imm), \
- (__mmask8)(U))
+ ((__mmask8)__builtin_ia32_fpclasspd128_mask((__v2df)(__m128d)(A), (int)(imm), \
+ (__mmask8)(U)))
#define _mm_fpclass_pd_mask(A, imm) \
- (__mmask8)__builtin_ia32_fpclasspd128_mask((__v2df)(__m128d)(A), (int)(imm), \
- (__mmask8)-1)
+ ((__mmask8)__builtin_ia32_fpclasspd128_mask((__v2df)(__m128d)(A), (int)(imm), \
+ (__mmask8)-1))
#define _mm256_mask_fpclass_pd_mask(U, A, imm) \
- (__mmask8)__builtin_ia32_fpclasspd256_mask((__v4df)(__m256d)(A), (int)(imm), \
- (__mmask8)(U))
+ ((__mmask8)__builtin_ia32_fpclasspd256_mask((__v4df)(__m256d)(A), (int)(imm), \
+ (__mmask8)(U)))
#define _mm256_fpclass_pd_mask(A, imm) \
- (__mmask8)__builtin_ia32_fpclasspd256_mask((__v4df)(__m256d)(A), (int)(imm), \
- (__mmask8)-1)
+ ((__mmask8)__builtin_ia32_fpclasspd256_mask((__v4df)(__m256d)(A), (int)(imm), \
+ (__mmask8)-1))
#define _mm_mask_fpclass_ps_mask(U, A, imm) \
- (__mmask8)__builtin_ia32_fpclassps128_mask((__v4sf)(__m128)(A), (int)(imm), \
- (__mmask8)(U))
+ ((__mmask8)__builtin_ia32_fpclassps128_mask((__v4sf)(__m128)(A), (int)(imm), \
+ (__mmask8)(U)))
#define _mm_fpclass_ps_mask(A, imm) \
- (__mmask8)__builtin_ia32_fpclassps128_mask((__v4sf)(__m128)(A), (int)(imm), \
- (__mmask8)-1)
+ ((__mmask8)__builtin_ia32_fpclassps128_mask((__v4sf)(__m128)(A), (int)(imm), \
+ (__mmask8)-1))
#define _mm256_mask_fpclass_ps_mask(U, A, imm) \
- (__mmask8)__builtin_ia32_fpclassps256_mask((__v8sf)(__m256)(A), (int)(imm), \
- (__mmask8)(U))
+ ((__mmask8)__builtin_ia32_fpclassps256_mask((__v8sf)(__m256)(A), (int)(imm), \
+ (__mmask8)(U)))
#define _mm256_fpclass_ps_mask(A, imm) \
- (__mmask8)__builtin_ia32_fpclassps256_mask((__v8sf)(__m256)(A), (int)(imm), \
- (__mmask8)-1)
+ ((__mmask8)__builtin_ia32_fpclassps256_mask((__v8sf)(__m256)(A), (int)(imm), \
+ (__mmask8)-1))
#undef __DEFAULT_FN_ATTRS128
#undef __DEFAULT_FN_ATTRS256
diff --git a/contrib/llvm-project/clang/lib/Headers/avx512vlfp16intrin.h b/contrib/llvm-project/clang/lib/Headers/avx512vlfp16intrin.h
new file mode 100644
index 000000000000..a12acb7d9a24
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/avx512vlfp16intrin.h
@@ -0,0 +1,2071 @@
+/*===---------- avx512vlfp16intrin.h - AVX512-FP16 intrinsics --------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __IMMINTRIN_H
+#error \
+ "Never use <avx512vlfp16intrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifdef __SSE2__
+
+#ifndef __AVX512VLFP16INTRIN_H
+#define __AVX512VLFP16INTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS256 \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512fp16,avx512vl,no-evex512"), \
+ __min_vector_width__(256)))
+#define __DEFAULT_FN_ATTRS128 \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512fp16,avx512vl,no-evex512"), \
+ __min_vector_width__(128)))
+
+static __inline__ _Float16 __DEFAULT_FN_ATTRS128 _mm_cvtsh_h(__m128h __a) {
+ return __a[0];
+}
+
+static __inline__ _Float16 __DEFAULT_FN_ATTRS256 _mm256_cvtsh_h(__m256h __a) {
+ return __a[0];
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_set_sh(_Float16 __h) {
+ return __extension__(__m128h){__h, 0, 0, 0, 0, 0, 0, 0};
+}
+
+static __inline __m128h __DEFAULT_FN_ATTRS128 _mm_set1_ph(_Float16 __h) {
+ return (__m128h)(__v8hf){__h, __h, __h, __h, __h, __h, __h, __h};
+}
+
+static __inline __m256h __DEFAULT_FN_ATTRS256 _mm256_set1_ph(_Float16 __h) {
+ return (__m256h)(__v16hf){__h, __h, __h, __h, __h, __h, __h, __h,
+ __h, __h, __h, __h, __h, __h, __h, __h};
+}
+
+static __inline __m128h __DEFAULT_FN_ATTRS128
+_mm_set_ph(_Float16 __h1, _Float16 __h2, _Float16 __h3, _Float16 __h4,
+ _Float16 __h5, _Float16 __h6, _Float16 __h7, _Float16 __h8) {
+ return (__m128h)(__v8hf){__h8, __h7, __h6, __h5, __h4, __h3, __h2, __h1};
+}
+
+static __inline __m256h __DEFAULT_FN_ATTRS256
+_mm256_set1_pch(_Float16 _Complex h) {
+ return (__m256h)_mm256_set1_ps(__builtin_bit_cast(float, h));
+}
+
+static __inline __m128h __DEFAULT_FN_ATTRS128
+_mm_set1_pch(_Float16 _Complex h) {
+ return (__m128h)_mm_set1_ps(__builtin_bit_cast(float, h));
+}
+
+static __inline __m256h __DEFAULT_FN_ATTRS256
+_mm256_set_ph(_Float16 __h1, _Float16 __h2, _Float16 __h3, _Float16 __h4,
+ _Float16 __h5, _Float16 __h6, _Float16 __h7, _Float16 __h8,
+ _Float16 __h9, _Float16 __h10, _Float16 __h11, _Float16 __h12,
+ _Float16 __h13, _Float16 __h14, _Float16 __h15, _Float16 __h16) {
+ return (__m256h)(__v16hf){__h16, __h15, __h14, __h13, __h12, __h11,
+ __h10, __h9, __h8, __h7, __h6, __h5,
+ __h4, __h3, __h2, __h1};
+}
+
+#define _mm_setr_ph(h1, h2, h3, h4, h5, h6, h7, h8) \
+ _mm_set_ph((h8), (h7), (h6), (h5), (h4), (h3), (h2), (h1))
+
+#define _mm256_setr_ph(h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11, h12, h13, \
+ h14, h15, h16) \
+ _mm256_set_ph((h16), (h15), (h14), (h13), (h12), (h11), (h10), (h9), (h8), \
+ (h7), (h6), (h5), (h4), (h3), (h2), (h1))
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_add_ph(__m256h __A,
+ __m256h __B) {
+ return (__m256h)((__v16hf)__A + (__v16hf)__B);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_mask_add_ph(__m256h __W, __mmask16 __U, __m256h __A, __m256h __B) {
+ return (__m256h)__builtin_ia32_selectph_256(
+ __U, (__v16hf)_mm256_add_ph(__A, __B), (__v16hf)__W);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_maskz_add_ph(__mmask16 __U, __m256h __A, __m256h __B) {
+ return (__m256h)__builtin_ia32_selectph_256(
+ __U, (__v16hf)_mm256_add_ph(__A, __B), (__v16hf)_mm256_setzero_ph());
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_add_ph(__m128h __A,
+ __m128h __B) {
+ return (__m128h)((__v8hf)__A + (__v8hf)__B);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_add_ph(__m128h __W,
+ __mmask8 __U,
+ __m128h __A,
+ __m128h __B) {
+ return (__m128h)__builtin_ia32_selectph_128(__U, (__v8hf)_mm_add_ph(__A, __B),
+ (__v8hf)__W);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_add_ph(__mmask8 __U,
+ __m128h __A,
+ __m128h __B) {
+ return (__m128h)__builtin_ia32_selectph_128(__U, (__v8hf)_mm_add_ph(__A, __B),
+ (__v8hf)_mm_setzero_ph());
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_sub_ph(__m256h __A,
+ __m256h __B) {
+ return (__m256h)((__v16hf)__A - (__v16hf)__B);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_mask_sub_ph(__m256h __W, __mmask16 __U, __m256h __A, __m256h __B) {
+ return (__m256h)__builtin_ia32_selectph_256(
+ __U, (__v16hf)_mm256_sub_ph(__A, __B), (__v16hf)__W);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_maskz_sub_ph(__mmask16 __U, __m256h __A, __m256h __B) {
+ return (__m256h)__builtin_ia32_selectph_256(
+ __U, (__v16hf)_mm256_sub_ph(__A, __B), (__v16hf)_mm256_setzero_ph());
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_sub_ph(__m128h __A,
+ __m128h __B) {
+ return (__m128h)((__v8hf)__A - (__v8hf)__B);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_sub_ph(__m128h __W,
+ __mmask8 __U,
+ __m128h __A,
+ __m128h __B) {
+ return (__m128h)__builtin_ia32_selectph_128(__U, (__v8hf)_mm_sub_ph(__A, __B),
+ (__v8hf)__W);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_sub_ph(__mmask8 __U,
+ __m128h __A,
+ __m128h __B) {
+ return (__m128h)__builtin_ia32_selectph_128(__U, (__v8hf)_mm_sub_ph(__A, __B),
+ (__v8hf)_mm_setzero_ph());
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_mul_ph(__m256h __A,
+ __m256h __B) {
+ return (__m256h)((__v16hf)__A * (__v16hf)__B);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_mask_mul_ph(__m256h __W, __mmask16 __U, __m256h __A, __m256h __B) {
+ return (__m256h)__builtin_ia32_selectph_256(
+ __U, (__v16hf)_mm256_mul_ph(__A, __B), (__v16hf)__W);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_maskz_mul_ph(__mmask16 __U, __m256h __A, __m256h __B) {
+ return (__m256h)__builtin_ia32_selectph_256(
+ __U, (__v16hf)_mm256_mul_ph(__A, __B), (__v16hf)_mm256_setzero_ph());
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mul_ph(__m128h __A,
+ __m128h __B) {
+ return (__m128h)((__v8hf)__A * (__v8hf)__B);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_mul_ph(__m128h __W,
+ __mmask8 __U,
+ __m128h __A,
+ __m128h __B) {
+ return (__m128h)__builtin_ia32_selectph_128(__U, (__v8hf)_mm_mul_ph(__A, __B),
+ (__v8hf)__W);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_mul_ph(__mmask8 __U,
+ __m128h __A,
+ __m128h __B) {
+ return (__m128h)__builtin_ia32_selectph_128(__U, (__v8hf)_mm_mul_ph(__A, __B),
+ (__v8hf)_mm_setzero_ph());
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_div_ph(__m256h __A,
+ __m256h __B) {
+ return (__m256h)((__v16hf)__A / (__v16hf)__B);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_mask_div_ph(__m256h __W, __mmask16 __U, __m256h __A, __m256h __B) {
+ return (__m256h)__builtin_ia32_selectph_256(
+ __U, (__v16hf)_mm256_div_ph(__A, __B), (__v16hf)__W);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_maskz_div_ph(__mmask16 __U, __m256h __A, __m256h __B) {
+ return (__m256h)__builtin_ia32_selectph_256(
+ __U, (__v16hf)_mm256_div_ph(__A, __B), (__v16hf)_mm256_setzero_ph());
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_div_ph(__m128h __A,
+ __m128h __B) {
+ return (__m128h)((__v8hf)__A / (__v8hf)__B);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_div_ph(__m128h __W,
+ __mmask8 __U,
+ __m128h __A,
+ __m128h __B) {
+ return (__m128h)__builtin_ia32_selectph_128(__U, (__v8hf)_mm_div_ph(__A, __B),
+ (__v8hf)__W);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_div_ph(__mmask8 __U,
+ __m128h __A,
+ __m128h __B) {
+ return (__m128h)__builtin_ia32_selectph_128(__U, (__v8hf)_mm_div_ph(__A, __B),
+ (__v8hf)_mm_setzero_ph());
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_min_ph(__m256h __A,
+ __m256h __B) {
+ return (__m256h)__builtin_ia32_minph256((__v16hf)__A, (__v16hf)__B);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_mask_min_ph(__m256h __W, __mmask16 __U, __m256h __A, __m256h __B) {
+ return (__m256h)__builtin_ia32_selectph_256(
+ (__mmask16)__U,
+ (__v16hf)__builtin_ia32_minph256((__v16hf)__A, (__v16hf)__B),
+ (__v16hf)__W);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_maskz_min_ph(__mmask16 __U, __m256h __A, __m256h __B) {
+ return (__m256h)__builtin_ia32_selectph_256(
+ (__mmask16)__U,
+ (__v16hf)__builtin_ia32_minph256((__v16hf)__A, (__v16hf)__B),
+ (__v16hf)_mm256_setzero_ph());
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_min_ph(__m128h __A,
+ __m128h __B) {
+ return (__m128h)__builtin_ia32_minph128((__v8hf)__A, (__v8hf)__B);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_min_ph(__m128h __W,
+ __mmask8 __U,
+ __m128h __A,
+ __m128h __B) {
+ return (__m128h)__builtin_ia32_selectph_128(
+ (__mmask8)__U, (__v8hf)__builtin_ia32_minph128((__v8hf)__A, (__v8hf)__B),
+ (__v8hf)__W);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_min_ph(__mmask8 __U,
+ __m128h __A,
+ __m128h __B) {
+ return (__m128h)__builtin_ia32_selectph_128(
+ (__mmask8)__U, (__v8hf)__builtin_ia32_minph128((__v8hf)__A, (__v8hf)__B),
+ (__v8hf)_mm_setzero_ph());
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_max_ph(__m256h __A,
+ __m256h __B) {
+ return (__m256h)__builtin_ia32_maxph256((__v16hf)__A, (__v16hf)__B);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_mask_max_ph(__m256h __W, __mmask16 __U, __m256h __A, __m256h __B) {
+ return (__m256h)__builtin_ia32_selectph_256(
+ (__mmask16)__U,
+ (__v16hf)__builtin_ia32_maxph256((__v16hf)__A, (__v16hf)__B),
+ (__v16hf)__W);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_maskz_max_ph(__mmask16 __U, __m256h __A, __m256h __B) {
+ return (__m256h)__builtin_ia32_selectph_256(
+ (__mmask16)__U,
+ (__v16hf)__builtin_ia32_maxph256((__v16hf)__A, (__v16hf)__B),
+ (__v16hf)_mm256_setzero_ph());
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_max_ph(__m128h __A,
+ __m128h __B) {
+ return (__m128h)__builtin_ia32_maxph128((__v8hf)__A, (__v8hf)__B);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_max_ph(__m128h __W,
+ __mmask8 __U,
+ __m128h __A,
+ __m128h __B) {
+ return (__m128h)__builtin_ia32_selectph_128(
+ (__mmask8)__U, (__v8hf)__builtin_ia32_maxph128((__v8hf)__A, (__v8hf)__B),
+ (__v8hf)__W);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_max_ph(__mmask8 __U,
+ __m128h __A,
+ __m128h __B) {
+ return (__m128h)__builtin_ia32_selectph_128(
+ (__mmask8)__U, (__v8hf)__builtin_ia32_maxph128((__v8hf)__A, (__v8hf)__B),
+ (__v8hf)_mm_setzero_ph());
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_abs_ph(__m256h __A) {
+ return (__m256h)_mm256_and_epi32(_mm256_set1_epi32(0x7FFF7FFF), (__m256i)__A);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_abs_ph(__m128h __A) {
+ return (__m128h)_mm_and_epi32(_mm_set1_epi32(0x7FFF7FFF), (__m128i)__A);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_conj_pch(__m256h __A) {
+ return (__m256h)_mm256_xor_ps((__m256)__A, _mm256_set1_ps(-0.0f));
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_mask_conj_pch(__m256h __W, __mmask8 __U, __m256h __A) {
+ return (__m256h)__builtin_ia32_selectps_256(
+ (__mmask8)__U, (__v8sf)_mm256_conj_pch(__A), (__v8sf)__W);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_maskz_conj_pch(__mmask8 __U, __m256h __A) {
+ return (__m256h)__builtin_ia32_selectps_256(
+ (__mmask8)__U, (__v8sf)_mm256_conj_pch(__A), (__v8sf)_mm256_setzero_ps());
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_conj_pch(__m128h __A) {
+ return (__m128h)_mm_xor_ps((__m128)__A, _mm_set1_ps(-0.0f));
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_conj_pch(__m128h __W,
+ __mmask8 __U,
+ __m128h __A) {
+ return (__m128h)__builtin_ia32_selectps_128(
+ (__mmask8)__U, (__v4sf)_mm_conj_pch(__A), (__v4sf)__W);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_maskz_conj_pch(__mmask8 __U, __m128h __A) {
+ return (__m128h)__builtin_ia32_selectps_128(
+ (__mmask8)__U, (__v4sf)_mm_conj_pch(__A), (__v4sf)_mm_setzero_ps());
+}
+
+#define _mm256_cmp_ph_mask(a, b, p) \
+ ((__mmask16)__builtin_ia32_cmpph256_mask( \
+ (__v16hf)(__m256h)(a), (__v16hf)(__m256h)(b), (int)(p), (__mmask16)-1))
+
+#define _mm256_mask_cmp_ph_mask(m, a, b, p) \
+ ((__mmask16)__builtin_ia32_cmpph256_mask( \
+ (__v16hf)(__m256h)(a), (__v16hf)(__m256h)(b), (int)(p), (__mmask16)(m)))
+
+#define _mm_cmp_ph_mask(a, b, p) \
+ ((__mmask8)__builtin_ia32_cmpph128_mask( \
+ (__v8hf)(__m128h)(a), (__v8hf)(__m128h)(b), (int)(p), (__mmask8)-1))
+
+#define _mm_mask_cmp_ph_mask(m, a, b, p) \
+ ((__mmask8)__builtin_ia32_cmpph128_mask( \
+ (__v8hf)(__m128h)(a), (__v8hf)(__m128h)(b), (int)(p), (__mmask8)(m)))
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_rcp_ph(__m256h __A) {
+ return (__m256h)__builtin_ia32_rcpph256_mask(
+ (__v16hf)__A, (__v16hf)_mm256_undefined_ph(), (__mmask16)-1);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_mask_rcp_ph(__m256h __W, __mmask16 __U, __m256h __A) {
+ return (__m256h)__builtin_ia32_rcpph256_mask((__v16hf)__A, (__v16hf)__W,
+ (__mmask16)__U);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_maskz_rcp_ph(__mmask16 __U, __m256h __A) {
+ return (__m256h)__builtin_ia32_rcpph256_mask(
+ (__v16hf)__A, (__v16hf)_mm256_setzero_ph(), (__mmask16)__U);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_rcp_ph(__m128h __A) {
+ return (__m128h)__builtin_ia32_rcpph128_mask(
+ (__v8hf)__A, (__v8hf)_mm_undefined_ph(), (__mmask8)-1);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_rcp_ph(__m128h __W,
+ __mmask8 __U,
+ __m128h __A) {
+ return (__m128h)__builtin_ia32_rcpph128_mask((__v8hf)__A, (__v8hf)__W,
+ (__mmask8)__U);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_rcp_ph(__mmask8 __U,
+ __m128h __A) {
+ return (__m128h)__builtin_ia32_rcpph128_mask(
+ (__v8hf)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)__U);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_rsqrt_ph(__m256h __A) {
+ return (__m256h)__builtin_ia32_rsqrtph256_mask(
+ (__v16hf)__A, (__v16hf)_mm256_undefined_ph(), (__mmask16)-1);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_mask_rsqrt_ph(__m256h __W, __mmask16 __U, __m256h __A) {
+ return (__m256h)__builtin_ia32_rsqrtph256_mask((__v16hf)__A, (__v16hf)__W,
+ (__mmask16)__U);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_maskz_rsqrt_ph(__mmask16 __U, __m256h __A) {
+ return (__m256h)__builtin_ia32_rsqrtph256_mask(
+ (__v16hf)__A, (__v16hf)_mm256_setzero_ph(), (__mmask16)__U);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_rsqrt_ph(__m128h __A) {
+ return (__m128h)__builtin_ia32_rsqrtph128_mask(
+ (__v8hf)__A, (__v8hf)_mm_undefined_ph(), (__mmask8)-1);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_rsqrt_ph(__m128h __W,
+ __mmask8 __U,
+ __m128h __A) {
+ return (__m128h)__builtin_ia32_rsqrtph128_mask((__v8hf)__A, (__v8hf)__W,
+ (__mmask8)__U);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_maskz_rsqrt_ph(__mmask8 __U, __m128h __A) {
+ return (__m128h)__builtin_ia32_rsqrtph128_mask(
+ (__v8hf)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)__U);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_getexp_ph(__m128h __A) {
+ return (__m128h)__builtin_ia32_getexpph128_mask(
+ (__v8hf)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)-1);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_mask_getexp_ph(__m128h __W, __mmask8 __U, __m128h __A) {
+ return (__m128h)__builtin_ia32_getexpph128_mask((__v8hf)__A, (__v8hf)__W,
+ (__mmask8)__U);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_maskz_getexp_ph(__mmask8 __U, __m128h __A) {
+ return (__m128h)__builtin_ia32_getexpph128_mask(
+ (__v8hf)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)__U);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_getexp_ph(__m256h __A) {
+ return (__m256h)__builtin_ia32_getexpph256_mask(
+ (__v16hf)__A, (__v16hf)_mm256_setzero_ph(), (__mmask16)-1);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_mask_getexp_ph(__m256h __W, __mmask16 __U, __m256h __A) {
+ return (__m256h)__builtin_ia32_getexpph256_mask((__v16hf)__A, (__v16hf)__W,
+ (__mmask16)__U);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_maskz_getexp_ph(__mmask16 __U, __m256h __A) {
+ return (__m256h)__builtin_ia32_getexpph256_mask(
+ (__v16hf)__A, (__v16hf)_mm256_setzero_ph(), (__mmask16)__U);
+}
+
+#define _mm_getmant_ph(A, B, C) \
+ ((__m128h)__builtin_ia32_getmantph128_mask( \
+ (__v8hf)(__m128h)(A), (int)(((C) << 2) | (B)), (__v8hf)_mm_setzero_ph(), \
+ (__mmask8)-1))
+
+#define _mm_mask_getmant_ph(W, U, A, B, C) \
+ ((__m128h)__builtin_ia32_getmantph128_mask( \
+ (__v8hf)(__m128h)(A), (int)(((C) << 2) | (B)), (__v8hf)(__m128h)(W), \
+ (__mmask8)(U)))
+
+#define _mm_maskz_getmant_ph(U, A, B, C) \
+ ((__m128h)__builtin_ia32_getmantph128_mask( \
+ (__v8hf)(__m128h)(A), (int)(((C) << 2) | (B)), (__v8hf)_mm_setzero_ph(), \
+ (__mmask8)(U)))
+
+#define _mm256_getmant_ph(A, B, C) \
+ ((__m256h)__builtin_ia32_getmantph256_mask( \
+ (__v16hf)(__m256h)(A), (int)(((C) << 2) | (B)), \
+ (__v16hf)_mm256_setzero_ph(), (__mmask16)-1))
+
+#define _mm256_mask_getmant_ph(W, U, A, B, C) \
+ ((__m256h)__builtin_ia32_getmantph256_mask( \
+ (__v16hf)(__m256h)(A), (int)(((C) << 2) | (B)), (__v16hf)(__m256h)(W), \
+ (__mmask16)(U)))
+
+#define _mm256_maskz_getmant_ph(U, A, B, C) \
+ ((__m256h)__builtin_ia32_getmantph256_mask( \
+ (__v16hf)(__m256h)(A), (int)(((C) << 2) | (B)), \
+ (__v16hf)_mm256_setzero_ph(), (__mmask16)(U)))
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_scalef_ph(__m128h __A,
+ __m128h __B) {
+ return (__m128h)__builtin_ia32_scalefph128_mask(
+ (__v8hf)__A, (__v8hf)__B, (__v8hf)_mm_setzero_ph(), (__mmask8)-1);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_mask_scalef_ph(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
+ return (__m128h)__builtin_ia32_scalefph128_mask((__v8hf)__A, (__v8hf)__B,
+ (__v8hf)__W, (__mmask8)__U);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_maskz_scalef_ph(__mmask8 __U, __m128h __A, __m128h __B) {
+ return (__m128h)__builtin_ia32_scalefph128_mask(
+ (__v8hf)__A, (__v8hf)__B, (__v8hf)_mm_setzero_ph(), (__mmask8)__U);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_scalef_ph(__m256h __A,
+ __m256h __B) {
+ return (__m256h)__builtin_ia32_scalefph256_mask(
+ (__v16hf)__A, (__v16hf)__B, (__v16hf)_mm256_setzero_ph(), (__mmask16)-1);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_mask_scalef_ph(__m256h __W, __mmask16 __U, __m256h __A, __m256h __B) {
+ return (__m256h)__builtin_ia32_scalefph256_mask((__v16hf)__A, (__v16hf)__B,
+ (__v16hf)__W, (__mmask16)__U);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_maskz_scalef_ph(__mmask16 __U, __m256h __A, __m256h __B) {
+ return (__m256h)__builtin_ia32_scalefph256_mask(
+ (__v16hf)__A, (__v16hf)__B, (__v16hf)_mm256_setzero_ph(), (__mmask16)__U);
+}
+
+#define _mm_roundscale_ph(A, imm) \
+ ((__m128h)__builtin_ia32_rndscaleph_128_mask( \
+ (__v8hf)(__m128h)(A), (int)(imm), (__v8hf)_mm_setzero_ph(), \
+ (__mmask8)-1))
+
+#define _mm_mask_roundscale_ph(W, U, A, imm) \
+ ((__m128h)__builtin_ia32_rndscaleph_128_mask( \
+ (__v8hf)(__m128h)(A), (int)(imm), (__v8hf)(__m128h)(W), (__mmask8)(U)))
+
+#define _mm_maskz_roundscale_ph(U, A, imm) \
+ ((__m128h)__builtin_ia32_rndscaleph_128_mask( \
+ (__v8hf)(__m128h)(A), (int)(imm), (__v8hf)_mm_setzero_ph(), \
+ (__mmask8)(U)))
+
+#define _mm256_roundscale_ph(A, imm) \
+ ((__m256h)__builtin_ia32_rndscaleph_256_mask( \
+ (__v16hf)(__m256h)(A), (int)(imm), (__v16hf)_mm256_setzero_ph(), \
+ (__mmask16)-1))
+
+#define _mm256_mask_roundscale_ph(W, U, A, imm) \
+ ((__m256h)__builtin_ia32_rndscaleph_256_mask( \
+ (__v16hf)(__m256h)(A), (int)(imm), (__v16hf)(__m256h)(W), \
+ (__mmask16)(U)))
+
+#define _mm256_maskz_roundscale_ph(U, A, imm) \
+ ((__m256h)__builtin_ia32_rndscaleph_256_mask( \
+ (__v16hf)(__m256h)(A), (int)(imm), (__v16hf)_mm256_setzero_ph(), \
+ (__mmask16)(U)))
+
+#define _mm_reduce_ph(A, imm) \
+ ((__m128h)__builtin_ia32_reduceph128_mask((__v8hf)(__m128h)(A), (int)(imm), \
+ (__v8hf)_mm_setzero_ph(), \
+ (__mmask8)-1))
+
+#define _mm_mask_reduce_ph(W, U, A, imm) \
+ ((__m128h)__builtin_ia32_reduceph128_mask( \
+ (__v8hf)(__m128h)(A), (int)(imm), (__v8hf)(__m128h)(W), (__mmask8)(U)))
+
+#define _mm_maskz_reduce_ph(U, A, imm) \
+ ((__m128h)__builtin_ia32_reduceph128_mask((__v8hf)(__m128h)(A), (int)(imm), \
+ (__v8hf)_mm_setzero_ph(), \
+ (__mmask8)(U)))
+
+#define _mm256_reduce_ph(A, imm) \
+ ((__m256h)__builtin_ia32_reduceph256_mask((__v16hf)(__m256h)(A), (int)(imm), \
+ (__v16hf)_mm256_setzero_ph(), \
+ (__mmask16)-1))
+
+#define _mm256_mask_reduce_ph(W, U, A, imm) \
+ ((__m256h)__builtin_ia32_reduceph256_mask((__v16hf)(__m256h)(A), (int)(imm), \
+ (__v16hf)(__m256h)(W), \
+ (__mmask16)(U)))
+
+#define _mm256_maskz_reduce_ph(U, A, imm) \
+ ((__m256h)__builtin_ia32_reduceph256_mask((__v16hf)(__m256h)(A), (int)(imm), \
+ (__v16hf)_mm256_setzero_ph(), \
+ (__mmask16)(U)))
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_sqrt_ph(__m128h __a) {
+ return __builtin_ia32_sqrtph((__v8hf)__a);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_sqrt_ph(__m128h __W,
+ __mmask8 __U,
+ __m128h __A) {
+ return (__m128h)__builtin_ia32_selectph_128(
+ (__mmask8)__U, (__v8hf)_mm_sqrt_ph(__A), (__v8hf)__W);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_maskz_sqrt_ph(__mmask8 __U,
+ __m128h __A) {
+ return (__m128h)__builtin_ia32_selectph_128(
+ (__mmask8)__U, (__v8hf)_mm_sqrt_ph(__A), (__v8hf)_mm_setzero_ph());
+}
+
+static __inline __m256h __DEFAULT_FN_ATTRS256 _mm256_sqrt_ph(__m256h __a) {
+ return (__m256h)__builtin_ia32_sqrtph256((__v16hf)__a);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_mask_sqrt_ph(__m256h __W, __mmask16 __U, __m256h __A) {
+ return (__m256h)__builtin_ia32_selectph_256(
+ (__mmask16)__U, (__v16hf)_mm256_sqrt_ph(__A), (__v16hf)__W);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_maskz_sqrt_ph(__mmask16 __U, __m256h __A) {
+ return (__m256h)__builtin_ia32_selectph_256((__mmask16)__U,
+ (__v16hf)_mm256_sqrt_ph(__A),
+ (__v16hf)_mm256_setzero_ph());
+}
+
+#define _mm_mask_fpclass_ph_mask(U, A, imm) \
+ ((__mmask8)__builtin_ia32_fpclassph128_mask((__v8hf)(__m128h)(A), \
+ (int)(imm), (__mmask8)(U)))
+
+#define _mm_fpclass_ph_mask(A, imm) \
+ ((__mmask8)__builtin_ia32_fpclassph128_mask((__v8hf)(__m128h)(A), \
+ (int)(imm), (__mmask8)-1))
+
+#define _mm256_mask_fpclass_ph_mask(U, A, imm) \
+ ((__mmask16)__builtin_ia32_fpclassph256_mask((__v16hf)(__m256h)(A), \
+ (int)(imm), (__mmask16)(U)))
+
+#define _mm256_fpclass_ph_mask(A, imm) \
+ ((__mmask16)__builtin_ia32_fpclassph256_mask((__v16hf)(__m256h)(A), \
+ (int)(imm), (__mmask16)-1))
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtpd_ph(__m128d __A) {
+ return (__m128h)__builtin_ia32_vcvtpd2ph128_mask(
+ (__v2df)__A, (__v8hf)_mm_undefined_ph(), (__mmask8)-1);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_cvtpd_ph(__m128h __W,
+ __mmask8 __U,
+ __m128d __A) {
+ return (__m128h)__builtin_ia32_vcvtpd2ph128_mask((__v2df)__A, (__v8hf)__W,
+ (__mmask8)__U);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_maskz_cvtpd_ph(__mmask8 __U, __m128d __A) {
+ return (__m128h)__builtin_ia32_vcvtpd2ph128_mask(
+ (__v2df)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)__U);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS256 _mm256_cvtpd_ph(__m256d __A) {
+ return (__m128h)__builtin_ia32_vcvtpd2ph256_mask(
+ (__v4df)__A, (__v8hf)_mm_undefined_ph(), (__mmask8)-1);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS256
+_mm256_mask_cvtpd_ph(__m128h __W, __mmask8 __U, __m256d __A) {
+ return (__m128h)__builtin_ia32_vcvtpd2ph256_mask((__v4df)__A, (__v8hf)__W,
+ (__mmask8)__U);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS256
+_mm256_maskz_cvtpd_ph(__mmask8 __U, __m256d __A) {
+ return (__m128h)__builtin_ia32_vcvtpd2ph256_mask(
+ (__v4df)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)__U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_cvtph_pd(__m128h __A) {
+ return (__m128d)__builtin_ia32_vcvtph2pd128_mask(
+ (__v8hf)__A, (__v2df)_mm_undefined_pd(), (__mmask8)-1);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS128 _mm_mask_cvtph_pd(__m128d __W,
+ __mmask8 __U,
+ __m128h __A) {
+ return (__m128d)__builtin_ia32_vcvtph2pd128_mask((__v8hf)__A, (__v2df)__W,
+ (__mmask8)__U);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS128
+_mm_maskz_cvtph_pd(__mmask8 __U, __m128h __A) {
+ return (__m128d)__builtin_ia32_vcvtph2pd128_mask(
+ (__v8hf)__A, (__v2df)_mm_setzero_pd(), (__mmask8)__U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS256 _mm256_cvtph_pd(__m128h __A) {
+ return (__m256d)__builtin_ia32_vcvtph2pd256_mask(
+ (__v8hf)__A, (__v4df)_mm256_undefined_pd(), (__mmask8)-1);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
+_mm256_mask_cvtph_pd(__m256d __W, __mmask8 __U, __m128h __A) {
+ return (__m256d)__builtin_ia32_vcvtph2pd256_mask((__v8hf)__A, (__v4df)__W,
+ (__mmask8)__U);
+}
+
+static __inline__ __m256d __DEFAULT_FN_ATTRS256
+_mm256_maskz_cvtph_pd(__mmask8 __U, __m128h __A) {
+ return (__m256d)__builtin_ia32_vcvtph2pd256_mask(
+ (__v8hf)__A, (__v4df)_mm256_setzero_pd(), (__mmask8)__U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtph_epi16(__m128h __A) {
+ return (__m128i)__builtin_ia32_vcvtph2w128_mask(
+ (__v8hf)__A, (__v8hi)_mm_undefined_si128(), (__mmask8)-1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_mask_cvtph_epi16(__m128i __W, __mmask8 __U, __m128h __A) {
+ return (__m128i)__builtin_ia32_vcvtph2w128_mask((__v8hf)__A, (__v8hi)__W,
+ (__mmask8)__U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_maskz_cvtph_epi16(__mmask8 __U, __m128h __A) {
+ return (__m128i)__builtin_ia32_vcvtph2w128_mask(
+ (__v8hf)__A, (__v8hi)_mm_setzero_si128(), (__mmask8)__U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_cvtph_epi16(__m256h __A) {
+ return (__m256i)__builtin_ia32_vcvtph2w256_mask(
+ (__v16hf)__A, (__v16hi)_mm256_undefined_si256(), (__mmask16)-1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_mask_cvtph_epi16(__m256i __W, __mmask16 __U, __m256h __A) {
+ return (__m256i)__builtin_ia32_vcvtph2w256_mask((__v16hf)__A, (__v16hi)__W,
+ (__mmask16)__U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_maskz_cvtph_epi16(__mmask16 __U, __m256h __A) {
+ return (__m256i)__builtin_ia32_vcvtph2w256_mask(
+ (__v16hf)__A, (__v16hi)_mm256_setzero_si256(), (__mmask16)__U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvttph_epi16(__m128h __A) {
+ return (__m128i)__builtin_ia32_vcvttph2w128_mask(
+ (__v8hf)__A, (__v8hi)_mm_undefined_si128(), (__mmask8)-1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_mask_cvttph_epi16(__m128i __W, __mmask8 __U, __m128h __A) {
+ return (__m128i)__builtin_ia32_vcvttph2w128_mask((__v8hf)__A, (__v8hi)__W,
+ (__mmask8)__U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_maskz_cvttph_epi16(__mmask8 __U, __m128h __A) {
+ return (__m128i)__builtin_ia32_vcvttph2w128_mask(
+ (__v8hf)__A, (__v8hi)_mm_setzero_si128(), (__mmask8)__U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_cvttph_epi16(__m256h __A) {
+ return (__m256i)__builtin_ia32_vcvttph2w256_mask(
+ (__v16hf)__A, (__v16hi)_mm256_undefined_si256(), (__mmask16)-1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_mask_cvttph_epi16(__m256i __W, __mmask16 __U, __m256h __A) {
+ return (__m256i)__builtin_ia32_vcvttph2w256_mask((__v16hf)__A, (__v16hi)__W,
+ (__mmask16)__U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_maskz_cvttph_epi16(__mmask16 __U, __m256h __A) {
+ return (__m256i)__builtin_ia32_vcvttph2w256_mask(
+ (__v16hf)__A, (__v16hi)_mm256_setzero_si256(), (__mmask16)__U);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtepi16_ph(__m128i __A) {
+ return (__m128h) __builtin_convertvector((__v8hi)__A, __v8hf);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_mask_cvtepi16_ph(__m128h __W, __mmask8 __U, __m128i __A) {
+ return (__m128h)__builtin_ia32_selectph_128(
+ (__mmask8)__U, (__v8hf)_mm_cvtepi16_ph(__A), (__v8hf)__W);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_maskz_cvtepi16_ph(__mmask8 __U, __m128i __A) {
+ return (__m128h)__builtin_ia32_selectph_128(
+ (__mmask8)__U, (__v8hf)_mm_cvtepi16_ph(__A), (__v8hf)_mm_setzero_ph());
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_cvtepi16_ph(__m256i __A) {
+ return (__m256h) __builtin_convertvector((__v16hi)__A, __v16hf);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_mask_cvtepi16_ph(__m256h __W, __mmask16 __U, __m256i __A) {
+ return (__m256h)__builtin_ia32_selectph_256(
+ (__mmask16)__U, (__v16hf)_mm256_cvtepi16_ph(__A), (__v16hf)__W);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_maskz_cvtepi16_ph(__mmask16 __U, __m256i __A) {
+ return (__m256h)__builtin_ia32_selectph_256((__mmask16)__U,
+ (__v16hf)_mm256_cvtepi16_ph(__A),
+ (__v16hf)_mm256_setzero_ph());
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtph_epu16(__m128h __A) {
+ return (__m128i)__builtin_ia32_vcvtph2uw128_mask(
+ (__v8hf)__A, (__v8hu)_mm_undefined_si128(), (__mmask8)-1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_mask_cvtph_epu16(__m128i __W, __mmask8 __U, __m128h __A) {
+ return (__m128i)__builtin_ia32_vcvtph2uw128_mask((__v8hf)__A, (__v8hu)__W,
+ (__mmask8)__U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_maskz_cvtph_epu16(__mmask8 __U, __m128h __A) {
+ return (__m128i)__builtin_ia32_vcvtph2uw128_mask(
+ (__v8hf)__A, (__v8hu)_mm_setzero_si128(), (__mmask8)__U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_cvtph_epu16(__m256h __A) {
+ return (__m256i)__builtin_ia32_vcvtph2uw256_mask(
+ (__v16hf)__A, (__v16hu)_mm256_undefined_si256(), (__mmask16)-1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_mask_cvtph_epu16(__m256i __W, __mmask16 __U, __m256h __A) {
+ return (__m256i)__builtin_ia32_vcvtph2uw256_mask((__v16hf)__A, (__v16hu)__W,
+ (__mmask16)__U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_maskz_cvtph_epu16(__mmask16 __U, __m256h __A) {
+ return (__m256i)__builtin_ia32_vcvtph2uw256_mask(
+ (__v16hf)__A, (__v16hu)_mm256_setzero_si256(), (__mmask16)__U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvttph_epu16(__m128h __A) {
+ return (__m128i)__builtin_ia32_vcvttph2uw128_mask(
+ (__v8hf)__A, (__v8hu)_mm_undefined_si128(), (__mmask8)-1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_mask_cvttph_epu16(__m128i __W, __mmask8 __U, __m128h __A) {
+ return (__m128i)__builtin_ia32_vcvttph2uw128_mask((__v8hf)__A, (__v8hu)__W,
+ (__mmask8)__U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_maskz_cvttph_epu16(__mmask8 __U, __m128h __A) {
+ return (__m128i)__builtin_ia32_vcvttph2uw128_mask(
+ (__v8hf)__A, (__v8hu)_mm_setzero_si128(), (__mmask8)__U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_cvttph_epu16(__m256h __A) {
+ return (__m256i)__builtin_ia32_vcvttph2uw256_mask(
+ (__v16hf)__A, (__v16hu)_mm256_undefined_si256(), (__mmask16)-1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_mask_cvttph_epu16(__m256i __W, __mmask16 __U, __m256h __A) {
+ return (__m256i)__builtin_ia32_vcvttph2uw256_mask((__v16hf)__A, (__v16hu)__W,
+ (__mmask16)__U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_maskz_cvttph_epu16(__mmask16 __U, __m256h __A) {
+ return (__m256i)__builtin_ia32_vcvttph2uw256_mask(
+ (__v16hf)__A, (__v16hu)_mm256_setzero_si256(), (__mmask16)__U);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtepu16_ph(__m128i __A) {
+ return (__m128h) __builtin_convertvector((__v8hu)__A, __v8hf);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_mask_cvtepu16_ph(__m128h __W, __mmask8 __U, __m128i __A) {
+ return (__m128h)__builtin_ia32_selectph_128(
+ (__mmask8)__U, (__v8hf)_mm_cvtepu16_ph(__A), (__v8hf)__W);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_maskz_cvtepu16_ph(__mmask8 __U, __m128i __A) {
+ return (__m128h)__builtin_ia32_selectph_128(
+ (__mmask8)__U, (__v8hf)_mm_cvtepu16_ph(__A), (__v8hf)_mm_setzero_ph());
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_cvtepu16_ph(__m256i __A) {
+ return (__m256h) __builtin_convertvector((__v16hu)__A, __v16hf);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_mask_cvtepu16_ph(__m256h __W, __mmask16 __U, __m256i __A) {
+ return (__m256h)__builtin_ia32_selectph_256(
+ (__mmask16)__U, (__v16hf)_mm256_cvtepu16_ph(__A), (__v16hf)__W);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_maskz_cvtepu16_ph(__mmask16 __U, __m256i __A) {
+ return (__m256h)__builtin_ia32_selectph_256((__mmask16)__U,
+ (__v16hf)_mm256_cvtepu16_ph(__A),
+ (__v16hf)_mm256_setzero_ph());
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtph_epi32(__m128h __A) {
+ return (__m128i)__builtin_ia32_vcvtph2dq128_mask(
+ (__v8hf)__A, (__v4si)_mm_undefined_si128(), (__mmask8)-1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_mask_cvtph_epi32(__m128i __W, __mmask8 __U, __m128h __A) {
+ return (__m128i)__builtin_ia32_vcvtph2dq128_mask((__v8hf)__A, (__v4si)__W,
+ (__mmask8)__U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_maskz_cvtph_epi32(__mmask8 __U, __m128h __A) {
+ return (__m128i)__builtin_ia32_vcvtph2dq128_mask(
+ (__v8hf)__A, (__v4si)_mm_setzero_si128(), (__mmask8)__U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_cvtph_epi32(__m128h __A) {
+ return (__m256i)__builtin_ia32_vcvtph2dq256_mask(
+ (__v8hf)__A, (__v8si)_mm256_undefined_si256(), (__mmask8)-1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_mask_cvtph_epi32(__m256i __W, __mmask8 __U, __m128h __A) {
+ return (__m256i)__builtin_ia32_vcvtph2dq256_mask((__v8hf)__A, (__v8si)__W,
+ (__mmask8)__U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_maskz_cvtph_epi32(__mmask8 __U, __m128h __A) {
+ return (__m256i)__builtin_ia32_vcvtph2dq256_mask(
+ (__v8hf)__A, (__v8si)_mm256_setzero_si256(), (__mmask8)__U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtph_epu32(__m128h __A) {
+ return (__m128i)__builtin_ia32_vcvtph2udq128_mask(
+ (__v8hf)__A, (__v4su)_mm_undefined_si128(), (__mmask8)-1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_mask_cvtph_epu32(__m128i __W, __mmask8 __U, __m128h __A) {
+ return (__m128i)__builtin_ia32_vcvtph2udq128_mask((__v8hf)__A, (__v4su)__W,
+ (__mmask8)__U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_maskz_cvtph_epu32(__mmask8 __U, __m128h __A) {
+ return (__m128i)__builtin_ia32_vcvtph2udq128_mask(
+ (__v8hf)__A, (__v4su)_mm_setzero_si128(), (__mmask8)__U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_cvtph_epu32(__m128h __A) {
+ return (__m256i)__builtin_ia32_vcvtph2udq256_mask(
+ (__v8hf)__A, (__v8su)_mm256_undefined_si256(), (__mmask8)-1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_mask_cvtph_epu32(__m256i __W, __mmask8 __U, __m128h __A) {
+ return (__m256i)__builtin_ia32_vcvtph2udq256_mask((__v8hf)__A, (__v8su)__W,
+ (__mmask8)__U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_maskz_cvtph_epu32(__mmask8 __U, __m128h __A) {
+ return (__m256i)__builtin_ia32_vcvtph2udq256_mask(
+ (__v8hf)__A, (__v8su)_mm256_setzero_si256(), (__mmask8)__U);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtepi32_ph(__m128i __A) {
+ return (__m128h)__builtin_ia32_vcvtdq2ph128_mask(
+ (__v4si)__A, (__v8hf)_mm_undefined_ph(), (__mmask8)-1);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_mask_cvtepi32_ph(__m128h __W, __mmask8 __U, __m128i __A) {
+ return (__m128h)__builtin_ia32_vcvtdq2ph128_mask((__v4si)__A, (__v8hf)__W,
+ (__mmask8)__U);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_maskz_cvtepi32_ph(__mmask8 __U, __m128i __A) {
+ return (__m128h)__builtin_ia32_vcvtdq2ph128_mask(
+ (__v4si)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)__U);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS256
+_mm256_cvtepi32_ph(__m256i __A) {
+ return (__m128h) __builtin_convertvector((__v8si)__A, __v8hf);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS256
+_mm256_mask_cvtepi32_ph(__m128h __W, __mmask8 __U, __m256i __A) {
+ return (__m128h)__builtin_ia32_selectph_128(
+ (__mmask8)__U, (__v8hf)_mm256_cvtepi32_ph(__A), (__v8hf)__W);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS256
+_mm256_maskz_cvtepi32_ph(__mmask8 __U, __m256i __A) {
+ return (__m128h)__builtin_ia32_selectph_128(
+ (__mmask8)__U, (__v8hf)_mm256_cvtepi32_ph(__A), (__v8hf)_mm_setzero_ph());
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtepu32_ph(__m128i __A) {
+ return (__m128h)__builtin_ia32_vcvtudq2ph128_mask(
+ (__v4su)__A, (__v8hf)_mm_undefined_ph(), (__mmask8)-1);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_mask_cvtepu32_ph(__m128h __W, __mmask8 __U, __m128i __A) {
+ return (__m128h)__builtin_ia32_vcvtudq2ph128_mask((__v4su)__A, (__v8hf)__W,
+ (__mmask8)__U);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_maskz_cvtepu32_ph(__mmask8 __U, __m128i __A) {
+ return (__m128h)__builtin_ia32_vcvtudq2ph128_mask(
+ (__v4su)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)__U);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS256
+_mm256_cvtepu32_ph(__m256i __A) {
+ return (__m128h) __builtin_convertvector((__v8su)__A, __v8hf);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS256
+_mm256_mask_cvtepu32_ph(__m128h __W, __mmask8 __U, __m256i __A) {
+ return (__m128h)__builtin_ia32_selectph_128(
+ (__mmask8)__U, (__v8hf)_mm256_cvtepu32_ph(__A), (__v8hf)__W);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS256
+_mm256_maskz_cvtepu32_ph(__mmask8 __U, __m256i __A) {
+ return (__m128h)__builtin_ia32_selectph_128(
+ (__mmask8)__U, (__v8hf)_mm256_cvtepu32_ph(__A), (__v8hf)_mm_setzero_ph());
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvttph_epi32(__m128h __A) {
+ return (__m128i)__builtin_ia32_vcvttph2dq128_mask(
+ (__v8hf)__A, (__v4si)_mm_undefined_si128(), (__mmask8)-1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_mask_cvttph_epi32(__m128i __W, __mmask8 __U, __m128h __A) {
+ return (__m128i)__builtin_ia32_vcvttph2dq128_mask((__v8hf)__A, (__v4si)__W,
+ (__mmask8)__U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_maskz_cvttph_epi32(__mmask8 __U, __m128h __A) {
+ return (__m128i)__builtin_ia32_vcvttph2dq128_mask(
+ (__v8hf)__A, (__v4si)_mm_setzero_si128(), (__mmask8)__U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_cvttph_epi32(__m128h __A) {
+ return (__m256i)__builtin_ia32_vcvttph2dq256_mask(
+ (__v8hf)__A, (__v8si)_mm256_undefined_si256(), (__mmask8)-1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_mask_cvttph_epi32(__m256i __W, __mmask8 __U, __m128h __A) {
+ return (__m256i)__builtin_ia32_vcvttph2dq256_mask((__v8hf)__A, (__v8si)__W,
+ (__mmask8)__U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_maskz_cvttph_epi32(__mmask8 __U, __m128h __A) {
+ return (__m256i)__builtin_ia32_vcvttph2dq256_mask(
+ (__v8hf)__A, (__v8si)_mm256_setzero_si256(), (__mmask8)__U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvttph_epu32(__m128h __A) {
+ return (__m128i)__builtin_ia32_vcvttph2udq128_mask(
+ (__v8hf)__A, (__v4su)_mm_undefined_si128(), (__mmask8)-1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_mask_cvttph_epu32(__m128i __W, __mmask8 __U, __m128h __A) {
+ return (__m128i)__builtin_ia32_vcvttph2udq128_mask((__v8hf)__A, (__v4su)__W,
+ (__mmask8)__U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_maskz_cvttph_epu32(__mmask8 __U, __m128h __A) {
+ return (__m128i)__builtin_ia32_vcvttph2udq128_mask(
+ (__v8hf)__A, (__v4su)_mm_setzero_si128(), (__mmask8)__U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_cvttph_epu32(__m128h __A) {
+ return (__m256i)__builtin_ia32_vcvttph2udq256_mask(
+ (__v8hf)__A, (__v8su)_mm256_undefined_si256(), (__mmask8)-1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_mask_cvttph_epu32(__m256i __W, __mmask8 __U, __m128h __A) {
+ return (__m256i)__builtin_ia32_vcvttph2udq256_mask((__v8hf)__A, (__v8su)__W,
+ (__mmask8)__U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_maskz_cvttph_epu32(__mmask8 __U, __m128h __A) {
+ return (__m256i)__builtin_ia32_vcvttph2udq256_mask(
+ (__v8hf)__A, (__v8su)_mm256_setzero_si256(), (__mmask8)__U);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtepi64_ph(__m128i __A) {
+ return (__m128h)__builtin_ia32_vcvtqq2ph128_mask(
+ (__v2di)__A, (__v8hf)_mm_undefined_ph(), (__mmask8)-1);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_mask_cvtepi64_ph(__m128h __W, __mmask8 __U, __m128i __A) {
+ return (__m128h)__builtin_ia32_vcvtqq2ph128_mask((__v2di)__A, (__v8hf)__W,
+ (__mmask8)__U);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_maskz_cvtepi64_ph(__mmask8 __U, __m128i __A) {
+ return (__m128h)__builtin_ia32_vcvtqq2ph128_mask(
+ (__v2di)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)__U);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS256
+_mm256_cvtepi64_ph(__m256i __A) {
+ return (__m128h)__builtin_ia32_vcvtqq2ph256_mask(
+ (__v4di)__A, (__v8hf)_mm_undefined_ph(), (__mmask8)-1);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS256
+_mm256_mask_cvtepi64_ph(__m128h __W, __mmask8 __U, __m256i __A) {
+ return (__m128h)__builtin_ia32_vcvtqq2ph256_mask((__v4di)__A, (__v8hf)__W,
+ (__mmask8)__U);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS256
+_mm256_maskz_cvtepi64_ph(__mmask8 __U, __m256i __A) {
+ return (__m128h)__builtin_ia32_vcvtqq2ph256_mask(
+ (__v4di)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)__U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtph_epi64(__m128h __A) {
+ return (__m128i)__builtin_ia32_vcvtph2qq128_mask(
+ (__v8hf)__A, (__v2di)_mm_undefined_si128(), (__mmask8)-1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_mask_cvtph_epi64(__m128i __W, __mmask8 __U, __m128h __A) {
+ return (__m128i)__builtin_ia32_vcvtph2qq128_mask((__v8hf)__A, (__v2di)__W,
+ (__mmask8)__U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_maskz_cvtph_epi64(__mmask8 __U, __m128h __A) {
+ return (__m128i)__builtin_ia32_vcvtph2qq128_mask(
+ (__v8hf)__A, (__v2di)_mm_setzero_si128(), (__mmask8)__U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_cvtph_epi64(__m128h __A) {
+ return (__m256i)__builtin_ia32_vcvtph2qq256_mask(
+ (__v8hf)__A, (__v4di)_mm256_undefined_si256(), (__mmask8)-1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_mask_cvtph_epi64(__m256i __W, __mmask8 __U, __m128h __A) {
+ return (__m256i)__builtin_ia32_vcvtph2qq256_mask((__v8hf)__A, (__v4di)__W,
+ (__mmask8)__U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_maskz_cvtph_epi64(__mmask8 __U, __m128h __A) {
+ return (__m256i)__builtin_ia32_vcvtph2qq256_mask(
+ (__v8hf)__A, (__v4di)_mm256_setzero_si256(), (__mmask8)__U);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtepu64_ph(__m128i __A) {
+ return (__m128h)__builtin_ia32_vcvtuqq2ph128_mask(
+ (__v2du)__A, (__v8hf)_mm_undefined_ph(), (__mmask8)-1);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_mask_cvtepu64_ph(__m128h __W, __mmask8 __U, __m128i __A) {
+ return (__m128h)__builtin_ia32_vcvtuqq2ph128_mask((__v2du)__A, (__v8hf)__W,
+ (__mmask8)__U);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_maskz_cvtepu64_ph(__mmask8 __U, __m128i __A) {
+ return (__m128h)__builtin_ia32_vcvtuqq2ph128_mask(
+ (__v2du)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)__U);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS256
+_mm256_cvtepu64_ph(__m256i __A) {
+ return (__m128h)__builtin_ia32_vcvtuqq2ph256_mask(
+ (__v4du)__A, (__v8hf)_mm_undefined_ph(), (__mmask8)-1);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS256
+_mm256_mask_cvtepu64_ph(__m128h __W, __mmask8 __U, __m256i __A) {
+ return (__m128h)__builtin_ia32_vcvtuqq2ph256_mask((__v4du)__A, (__v8hf)__W,
+ (__mmask8)__U);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS256
+_mm256_maskz_cvtepu64_ph(__mmask8 __U, __m256i __A) {
+ return (__m128h)__builtin_ia32_vcvtuqq2ph256_mask(
+ (__v4du)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)__U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtph_epu64(__m128h __A) {
+ return (__m128i)__builtin_ia32_vcvtph2uqq128_mask(
+ (__v8hf)__A, (__v2du)_mm_undefined_si128(), (__mmask8)-1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_mask_cvtph_epu64(__m128i __W, __mmask8 __U, __m128h __A) {
+ return (__m128i)__builtin_ia32_vcvtph2uqq128_mask((__v8hf)__A, (__v2du)__W,
+ (__mmask8)__U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_maskz_cvtph_epu64(__mmask8 __U, __m128h __A) {
+ return (__m128i)__builtin_ia32_vcvtph2uqq128_mask(
+ (__v8hf)__A, (__v2du)_mm_setzero_si128(), (__mmask8)__U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_cvtph_epu64(__m128h __A) {
+ return (__m256i)__builtin_ia32_vcvtph2uqq256_mask(
+ (__v8hf)__A, (__v4du)_mm256_undefined_si256(), (__mmask8)-1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_mask_cvtph_epu64(__m256i __W, __mmask8 __U, __m128h __A) {
+ return (__m256i)__builtin_ia32_vcvtph2uqq256_mask((__v8hf)__A, (__v4du)__W,
+ (__mmask8)__U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_maskz_cvtph_epu64(__mmask8 __U, __m128h __A) {
+ return (__m256i)__builtin_ia32_vcvtph2uqq256_mask(
+ (__v8hf)__A, (__v4du)_mm256_setzero_si256(), (__mmask8)__U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvttph_epi64(__m128h __A) {
+ return (__m128i)__builtin_ia32_vcvttph2qq128_mask(
+ (__v8hf)__A, (__v2di)_mm_undefined_si128(), (__mmask8)-1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_mask_cvttph_epi64(__m128i __W, __mmask8 __U, __m128h __A) {
+ return (__m128i)__builtin_ia32_vcvttph2qq128_mask((__v8hf)__A, (__v2di)__W,
+ (__mmask8)__U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_maskz_cvttph_epi64(__mmask8 __U, __m128h __A) {
+ return (__m128i)__builtin_ia32_vcvttph2qq128_mask(
+ (__v8hf)__A, (__v2di)_mm_setzero_si128(), (__mmask8)__U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_cvttph_epi64(__m128h __A) {
+ return (__m256i)__builtin_ia32_vcvttph2qq256_mask(
+ (__v8hf)__A, (__v4di)_mm256_undefined_si256(), (__mmask8)-1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_mask_cvttph_epi64(__m256i __W, __mmask8 __U, __m128h __A) {
+ return (__m256i)__builtin_ia32_vcvttph2qq256_mask((__v8hf)__A, (__v4di)__W,
+ (__mmask8)__U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_maskz_cvttph_epi64(__mmask8 __U, __m128h __A) {
+ return (__m256i)__builtin_ia32_vcvttph2qq256_mask(
+ (__v8hf)__A, (__v4di)_mm256_setzero_si256(), (__mmask8)__U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvttph_epu64(__m128h __A) {
+ return (__m128i)__builtin_ia32_vcvttph2uqq128_mask(
+ (__v8hf)__A, (__v2du)_mm_undefined_si128(), (__mmask8)-1);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_mask_cvttph_epu64(__m128i __W, __mmask8 __U, __m128h __A) {
+ return (__m128i)__builtin_ia32_vcvttph2uqq128_mask((__v8hf)__A, (__v2du)__W,
+ (__mmask8)__U);
+}
+
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_maskz_cvttph_epu64(__mmask8 __U, __m128h __A) {
+ return (__m128i)__builtin_ia32_vcvttph2uqq128_mask(
+ (__v8hf)__A, (__v2du)_mm_setzero_si128(), (__mmask8)__U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_cvttph_epu64(__m128h __A) {
+ return (__m256i)__builtin_ia32_vcvttph2uqq256_mask(
+ (__v8hf)__A, (__v4du)_mm256_undefined_si256(), (__mmask8)-1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_mask_cvttph_epu64(__m256i __W, __mmask8 __U, __m128h __A) {
+ return (__m256i)__builtin_ia32_vcvttph2uqq256_mask((__v8hf)__A, (__v4du)__W,
+ (__mmask8)__U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_maskz_cvttph_epu64(__mmask8 __U, __m128h __A) {
+ return (__m256i)__builtin_ia32_vcvttph2uqq256_mask(
+ (__v8hf)__A, (__v4du)_mm256_setzero_si256(), (__mmask8)__U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_cvtxph_ps(__m128h __A) {
+ return (__m128)__builtin_ia32_vcvtph2psx128_mask(
+ (__v8hf)__A, (__v4sf)_mm_undefined_ps(), (__mmask8)-1);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_mask_cvtxph_ps(__m128 __W,
+ __mmask8 __U,
+ __m128h __A) {
+ return (__m128)__builtin_ia32_vcvtph2psx128_mask((__v8hf)__A, (__v4sf)__W,
+ (__mmask8)__U);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
+_mm_maskz_cvtxph_ps(__mmask8 __U, __m128h __A) {
+ return (__m128)__builtin_ia32_vcvtph2psx128_mask(
+ (__v8hf)__A, (__v4sf)_mm_setzero_ps(), (__mmask8)__U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_cvtxph_ps(__m128h __A) {
+ return (__m256)__builtin_ia32_vcvtph2psx256_mask(
+ (__v8hf)__A, (__v8sf)_mm256_undefined_ps(), (__mmask8)-1);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
+_mm256_mask_cvtxph_ps(__m256 __W, __mmask8 __U, __m128h __A) {
+ return (__m256)__builtin_ia32_vcvtph2psx256_mask((__v8hf)__A, (__v8sf)__W,
+ (__mmask8)__U);
+}
+
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
+_mm256_maskz_cvtxph_ps(__mmask8 __U, __m128h __A) {
+ return (__m256)__builtin_ia32_vcvtph2psx256_mask(
+ (__v8hf)__A, (__v8sf)_mm256_setzero_ps(), (__mmask8)__U);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtxps_ph(__m128 __A) {
+ return (__m128h)__builtin_ia32_vcvtps2phx128_mask(
+ (__v4sf)__A, (__v8hf)_mm_undefined_ph(), (__mmask8)-1);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_cvtxps_ph(__m128h __W,
+ __mmask8 __U,
+ __m128 __A) {
+ return (__m128h)__builtin_ia32_vcvtps2phx128_mask((__v4sf)__A, (__v8hf)__W,
+ (__mmask8)__U);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_maskz_cvtxps_ph(__mmask8 __U, __m128 __A) {
+ return (__m128h)__builtin_ia32_vcvtps2phx128_mask(
+ (__v4sf)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)__U);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS256 _mm256_cvtxps_ph(__m256 __A) {
+ return (__m128h)__builtin_ia32_vcvtps2phx256_mask(
+ (__v8sf)__A, (__v8hf)_mm_undefined_ph(), (__mmask8)-1);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS256
+_mm256_mask_cvtxps_ph(__m128h __W, __mmask8 __U, __m256 __A) {
+ return (__m128h)__builtin_ia32_vcvtps2phx256_mask((__v8sf)__A, (__v8hf)__W,
+ (__mmask8)__U);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS256
+_mm256_maskz_cvtxps_ph(__mmask8 __U, __m256 __A) {
+ return (__m128h)__builtin_ia32_vcvtps2phx256_mask(
+ (__v8sf)__A, (__v8hf)_mm_setzero_ph(), (__mmask8)__U);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fmadd_ph(__m128h __A,
+ __m128h __B,
+ __m128h __C) {
+ return (__m128h)__builtin_ia32_vfmaddph((__v8hf)__A, (__v8hf)__B,
+ (__v8hf)__C);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_fmadd_ph(__m128h __A,
+ __mmask8 __U,
+ __m128h __B,
+ __m128h __C) {
+ return (__m128h)__builtin_ia32_selectph_128(
+ (__mmask8)__U,
+ __builtin_ia32_vfmaddph((__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
+ (__v8hf)__A);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_mask3_fmadd_ph(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) {
+ return (__m128h)__builtin_ia32_selectph_128(
+ (__mmask8)__U,
+ __builtin_ia32_vfmaddph((__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
+ (__v8hf)__C);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_maskz_fmadd_ph(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
+ return (__m128h)__builtin_ia32_selectph_128(
+ (__mmask8)__U,
+ __builtin_ia32_vfmaddph((__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
+ (__v8hf)_mm_setzero_ph());
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fmsub_ph(__m128h __A,
+ __m128h __B,
+ __m128h __C) {
+ return (__m128h)__builtin_ia32_vfmaddph((__v8hf)__A, (__v8hf)__B,
+ -(__v8hf)__C);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_fmsub_ph(__m128h __A,
+ __mmask8 __U,
+ __m128h __B,
+ __m128h __C) {
+ return (__m128h)__builtin_ia32_selectph_128(
+ (__mmask8)__U, _mm_fmsub_ph((__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
+ (__v8hf)__A);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_maskz_fmsub_ph(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
+ return (__m128h)__builtin_ia32_selectph_128(
+ (__mmask8)__U, _mm_fmsub_ph((__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
+ (__v8hf)_mm_setzero_ph());
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_mask3_fnmadd_ph(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) {
+ return (__m128h)__builtin_ia32_selectph_128(
+ (__mmask8)__U,
+ __builtin_ia32_vfmaddph(-(__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
+ (__v8hf)__C);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_maskz_fnmadd_ph(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
+ return (__m128h)__builtin_ia32_selectph_128(
+ (__mmask8)__U,
+ __builtin_ia32_vfmaddph(-(__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
+ (__v8hf)_mm_setzero_ph());
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_maskz_fnmsub_ph(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
+ return (__m128h)__builtin_ia32_selectph_128(
+ (__mmask8)__U,
+ __builtin_ia32_vfmaddph(-(__v8hf)__A, (__v8hf)__B, -(__v8hf)__C),
+ (__v8hf)_mm_setzero_ph());
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_fmadd_ph(__m256h __A,
+ __m256h __B,
+ __m256h __C) {
+ return (__m256h)__builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B,
+ (__v16hf)__C);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_mask_fmadd_ph(__m256h __A, __mmask16 __U, __m256h __B, __m256h __C) {
+ return (__m256h)__builtin_ia32_selectph_256(
+ (__mmask16)__U,
+ __builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
+ (__v16hf)__A);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_mask3_fmadd_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) {
+ return (__m256h)__builtin_ia32_selectph_256(
+ (__mmask16)__U,
+ __builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
+ (__v16hf)__C);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_maskz_fmadd_ph(__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) {
+ return (__m256h)__builtin_ia32_selectph_256(
+ (__mmask16)__U,
+ __builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
+ (__v16hf)_mm256_setzero_ph());
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_fmsub_ph(__m256h __A,
+ __m256h __B,
+ __m256h __C) {
+ return (__m256h)__builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B,
+ -(__v16hf)__C);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_mask_fmsub_ph(__m256h __A, __mmask16 __U, __m256h __B, __m256h __C) {
+ return (__m256h)__builtin_ia32_selectph_256(
+ (__mmask16)__U,
+ __builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C),
+ (__v16hf)__A);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_maskz_fmsub_ph(__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) {
+ return (__m256h)__builtin_ia32_selectph_256(
+ (__mmask16)__U,
+ __builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C),
+ (__v16hf)_mm256_setzero_ph());
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_mask3_fnmadd_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) {
+ return (__m256h)__builtin_ia32_selectph_256(
+ (__mmask16)__U,
+ __builtin_ia32_vfmaddph256(-(__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
+ (__v16hf)__C);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_maskz_fnmadd_ph(__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) {
+ return (__m256h)__builtin_ia32_selectph_256(
+ (__mmask16)__U,
+ __builtin_ia32_vfmaddph256(-(__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
+ (__v16hf)_mm256_setzero_ph());
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_maskz_fnmsub_ph(__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) {
+ return (__m256h)__builtin_ia32_selectph_256(
+ (__mmask16)__U,
+ __builtin_ia32_vfmaddph256(-(__v16hf)__A, (__v16hf)__B, -(__v16hf)__C),
+ (__v16hf)_mm256_setzero_ph());
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fmaddsub_ph(__m128h __A,
+ __m128h __B,
+ __m128h __C) {
+ return (__m128h)__builtin_ia32_vfmaddsubph((__v8hf)__A, (__v8hf)__B,
+ (__v8hf)__C);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_mask_fmaddsub_ph(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) {
+ return (__m128h)__builtin_ia32_selectph_128(
+ (__mmask8)__U,
+ __builtin_ia32_vfmaddsubph((__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
+ (__v8hf)__A);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_mask3_fmaddsub_ph(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) {
+ return (__m128h)__builtin_ia32_selectph_128(
+ (__mmask8)__U,
+ __builtin_ia32_vfmaddsubph((__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
+ (__v8hf)__C);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_maskz_fmaddsub_ph(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
+ return (__m128h)__builtin_ia32_selectph_128(
+ (__mmask8)__U,
+ __builtin_ia32_vfmaddsubph((__v8hf)__A, (__v8hf)__B, (__v8hf)__C),
+ (__v8hf)_mm_setzero_ph());
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fmsubadd_ph(__m128h __A,
+ __m128h __B,
+ __m128h __C) {
+ return (__m128h)__builtin_ia32_vfmaddsubph((__v8hf)__A, (__v8hf)__B,
+ -(__v8hf)__C);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_mask_fmsubadd_ph(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) {
+ return (__m128h)__builtin_ia32_selectph_128(
+ (__mmask8)__U,
+ __builtin_ia32_vfmaddsubph((__v8hf)__A, (__v8hf)__B, -(__v8hf)__C),
+ (__v8hf)__A);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_maskz_fmsubadd_ph(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
+ return (__m128h)__builtin_ia32_selectph_128(
+ (__mmask8)__U,
+ __builtin_ia32_vfmaddsubph((__v8hf)__A, (__v8hf)__B, -(__v8hf)__C),
+ (__v8hf)_mm_setzero_ph());
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_fmaddsub_ph(__m256h __A, __m256h __B, __m256h __C) {
+ return (__m256h)__builtin_ia32_vfmaddsubph256((__v16hf)__A, (__v16hf)__B,
+ (__v16hf)__C);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_mask_fmaddsub_ph(__m256h __A, __mmask16 __U, __m256h __B, __m256h __C) {
+ return (__m256h)__builtin_ia32_selectph_256(
+ (__mmask16)__U,
+ __builtin_ia32_vfmaddsubph256((__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
+ (__v16hf)__A);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_mask3_fmaddsub_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) {
+ return (__m256h)__builtin_ia32_selectph_256(
+ (__mmask16)__U,
+ __builtin_ia32_vfmaddsubph256((__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
+ (__v16hf)__C);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_maskz_fmaddsub_ph(__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) {
+ return (__m256h)__builtin_ia32_selectph_256(
+ (__mmask16)__U,
+ __builtin_ia32_vfmaddsubph256((__v16hf)__A, (__v16hf)__B, (__v16hf)__C),
+ (__v16hf)_mm256_setzero_ph());
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_fmsubadd_ph(__m256h __A, __m256h __B, __m256h __C) {
+ return (__m256h)__builtin_ia32_vfmaddsubph256((__v16hf)__A, (__v16hf)__B,
+ -(__v16hf)__C);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_mask_fmsubadd_ph(__m256h __A, __mmask16 __U, __m256h __B, __m256h __C) {
+ return (__m256h)__builtin_ia32_selectph_256(
+ (__mmask16)__U,
+ __builtin_ia32_vfmaddsubph256((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C),
+ (__v16hf)__A);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_maskz_fmsubadd_ph(__mmask16 __U, __m256h __A, __m256h __B, __m256h __C) {
+ return (__m256h)__builtin_ia32_selectph_256(
+ (__mmask16)__U,
+ __builtin_ia32_vfmaddsubph256((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C),
+ (__v16hf)_mm256_setzero_ph());
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_mask3_fmsub_ph(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) {
+ return (__m128h)__builtin_ia32_selectph_128(
+ (__mmask8)__U,
+ __builtin_ia32_vfmaddph((__v8hf)__A, (__v8hf)__B, -(__v8hf)__C),
+ (__v8hf)__C);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_mask3_fmsub_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) {
+ return (__m256h)__builtin_ia32_selectph_256(
+ (__mmask16)__U,
+ __builtin_ia32_vfmaddph256((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C),
+ (__v16hf)__C);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_mask3_fmsubadd_ph(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) {
+ return (__m128h)__builtin_ia32_selectph_128(
+ (__mmask8)__U,
+ __builtin_ia32_vfmaddsubph((__v8hf)__A, (__v8hf)__B, -(__v8hf)__C),
+ (__v8hf)__C);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_mask3_fmsubadd_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) {
+ return (__m256h)__builtin_ia32_selectph_256(
+ (__mmask16)__U,
+ __builtin_ia32_vfmaddsubph256((__v16hf)__A, (__v16hf)__B, -(__v16hf)__C),
+ (__v16hf)__C);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fnmadd_ph(__m128h __A,
+ __m128h __B,
+ __m128h __C) {
+ return (__m128h)__builtin_ia32_vfmaddph((__v8hf)__A, -(__v8hf)__B,
+ (__v8hf)__C);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_mask_fnmadd_ph(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) {
+ return (__m128h)__builtin_ia32_selectph_128(
+ (__mmask8)__U,
+ __builtin_ia32_vfmaddph((__v8hf)__A, -(__v8hf)__B, (__v8hf)__C),
+ (__v8hf)__A);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_fnmadd_ph(__m256h __A,
+ __m256h __B,
+ __m256h __C) {
+ return (__m256h)__builtin_ia32_vfmaddph256((__v16hf)__A, -(__v16hf)__B,
+ (__v16hf)__C);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_mask_fnmadd_ph(__m256h __A, __mmask16 __U, __m256h __B, __m256h __C) {
+ return (__m256h)__builtin_ia32_selectph_256(
+ (__mmask16)__U,
+ __builtin_ia32_vfmaddph256((__v16hf)__A, -(__v16hf)__B, (__v16hf)__C),
+ (__v16hf)__A);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fnmsub_ph(__m128h __A,
+ __m128h __B,
+ __m128h __C) {
+ return (__m128h)__builtin_ia32_vfmaddph((__v8hf)__A, -(__v8hf)__B,
+ -(__v8hf)__C);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_mask_fnmsub_ph(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) {
+ return (__m128h)__builtin_ia32_selectph_128(
+ (__mmask8)__U,
+ __builtin_ia32_vfmaddph((__v8hf)__A, -(__v8hf)__B, -(__v8hf)__C),
+ (__v8hf)__A);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_mask3_fnmsub_ph(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) {
+ return (__m128h)__builtin_ia32_selectph_128(
+ (__mmask8)__U,
+ __builtin_ia32_vfmaddph((__v8hf)__A, -(__v8hf)__B, -(__v8hf)__C),
+ (__v8hf)__C);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_fnmsub_ph(__m256h __A,
+ __m256h __B,
+ __m256h __C) {
+ return (__m256h)__builtin_ia32_vfmaddph256((__v16hf)__A, -(__v16hf)__B,
+ -(__v16hf)__C);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_mask_fnmsub_ph(__m256h __A, __mmask16 __U, __m256h __B, __m256h __C) {
+ return (__m256h)__builtin_ia32_selectph_256(
+ (__mmask16)__U,
+ __builtin_ia32_vfmaddph256((__v16hf)__A, -(__v16hf)__B, -(__v16hf)__C),
+ (__v16hf)__A);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_mask3_fnmsub_ph(__m256h __A, __m256h __B, __m256h __C, __mmask16 __U) {
+ return (__m256h)__builtin_ia32_selectph_256(
+ (__mmask16)__U,
+ __builtin_ia32_vfmaddph256((__v16hf)__A, -(__v16hf)__B, -(__v16hf)__C),
+ (__v16hf)__C);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fcmul_pch(__m128h __A,
+ __m128h __B) {
+ return (__m128h)__builtin_ia32_vfcmulcph128_mask(
+ (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_undefined_ph(), (__mmask8)-1);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_mask_fcmul_pch(__m128h __W, __mmask8 __U, __m128h __A, __m128h __B) {
+ return (__m128h)__builtin_ia32_vfcmulcph128_mask((__v4sf)__A, (__v4sf)__B,
+ (__v4sf)__W, (__mmask8)__U);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_maskz_fcmul_pch(__mmask8 __U, __m128h __A, __m128h __B) {
+ return (__m128h)__builtin_ia32_vfcmulcph128_mask(
+ (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_setzero_ph(), (__mmask8)__U);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS128 _mm256_fcmul_pch(__m256h __A,
+ __m256h __B) {
+ return (__m256h)__builtin_ia32_vfcmulcph256_mask(
+ (__v8sf)__A, (__v8sf)__B, (__v8sf)_mm256_undefined_ph(), (__mmask8)-1);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_mask_fcmul_pch(__m256h __W, __mmask8 __U, __m256h __A, __m256h __B) {
+ return (__m256h)__builtin_ia32_vfcmulcph256_mask((__v8sf)__A, (__v8sf)__B,
+ (__v8sf)__W, (__mmask8)__U);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_maskz_fcmul_pch(__mmask8 __U, __m256h __A, __m256h __B) {
+ return (__m256h)__builtin_ia32_vfcmulcph256_mask(
+ (__v8sf)__A, (__v8sf)__B, (__v8sf)_mm256_setzero_ph(), (__mmask8)__U);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fcmadd_pch(__m128h __A,
+ __m128h __B,
+ __m128h __C) {
+ return (__m128h)__builtin_ia32_vfcmaddcph128_mask((__v4sf)__A, (__v4sf)__B,
+ (__v4sf)__C, (__mmask8)-1);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_mask_fcmadd_pch(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) {
+ return (__m128h)__builtin_ia32_selectps_128(
+ __U,
+ __builtin_ia32_vfcmaddcph128_mask((__v4sf)__A, (__v4sf)(__m128h)__B,
+ (__v4sf)__C, (__mmask8)__U),
+ (__v4sf)__A);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_mask3_fcmadd_pch(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) {
+ return (__m128h)__builtin_ia32_vfcmaddcph128_mask((__v4sf)__A, (__v4sf)__B,
+ (__v4sf)__C, (__mmask8)__U);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_maskz_fcmadd_pch(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
+ return (__m128h)__builtin_ia32_vfcmaddcph128_maskz(
+ (__v4sf)__A, (__v4sf)__B, (__v4sf)__C, (__mmask8)__U);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_fcmadd_pch(__m256h __A,
+ __m256h __B,
+ __m256h __C) {
+ return (__m256h)__builtin_ia32_vfcmaddcph256_mask((__v8sf)__A, (__v8sf)__B,
+ (__v8sf)__C, (__mmask8)-1);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_mask_fcmadd_pch(__m256h __A, __mmask8 __U, __m256h __B, __m256h __C) {
+ return (__m256h)__builtin_ia32_selectps_256(
+ __U,
+ __builtin_ia32_vfcmaddcph256_mask((__v8sf)__A, (__v8sf)__B, (__v8sf)__C,
+ (__mmask8)__U),
+ (__v8sf)__A);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_mask3_fcmadd_pch(__m256h __A, __m256h __B, __m256h __C, __mmask8 __U) {
+ return (__m256h)__builtin_ia32_vfcmaddcph256_mask((__v8sf)__A, (__v8sf)__B,
+ (__v8sf)__C, (__mmask8)__U);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_maskz_fcmadd_pch(__mmask8 __U, __m256h __A, __m256h __B, __m256h __C) {
+ return (__m256h)__builtin_ia32_vfcmaddcph256_maskz(
+ (__v8sf)__A, (__v8sf)__B, (__v8sf)__C, (__mmask8)__U);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fmul_pch(__m128h __A,
+ __m128h __B) {
+ return (__m128h)__builtin_ia32_vfmulcph128_mask(
+ (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_undefined_ph(), (__mmask8)-1);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_fmul_pch(__m128h __W,
+ __mmask8 __U,
+ __m128h __A,
+ __m128h __B) {
+ return (__m128h)__builtin_ia32_vfmulcph128_mask((__v4sf)__A, (__v4sf)__B,
+ (__v4sf)__W, (__mmask8)__U);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_maskz_fmul_pch(__mmask8 __U, __m128h __A, __m128h __B) {
+ return (__m128h)__builtin_ia32_vfmulcph128_mask(
+ (__v4sf)__A, (__v4sf)__B, (__v4sf)_mm_setzero_ph(), (__mmask8)__U);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_fmul_pch(__m256h __A,
+ __m256h __B) {
+ return (__m256h)__builtin_ia32_vfmulcph256_mask(
+ (__v8sf)__A, (__v8sf)__B, (__v8sf)_mm256_undefined_ph(), (__mmask8)-1);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_mask_fmul_pch(__m256h __W, __mmask8 __U, __m256h __A, __m256h __B) {
+ return (__m256h)__builtin_ia32_vfmulcph256_mask((__v8sf)__A, (__v8sf)__B,
+ (__v8sf)__W, (__mmask8)__U);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_maskz_fmul_pch(__mmask8 __U, __m256h __A, __m256h __B) {
+ return (__m256h)__builtin_ia32_vfmulcph256_mask(
+ (__v8sf)__A, (__v8sf)__B, (__v8sf)_mm256_setzero_ph(), (__mmask8)__U);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_fmadd_pch(__m128h __A,
+ __m128h __B,
+ __m128h __C) {
+ return (__m128h)__builtin_ia32_vfmaddcph128_mask((__v4sf)__A, (__v4sf)__B,
+ (__v4sf)__C, (__mmask8)-1);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_mask_fmadd_pch(__m128h __A, __mmask8 __U, __m128h __B, __m128h __C) {
+ return (__m128h)__builtin_ia32_selectps_128(
+ __U,
+ __builtin_ia32_vfmaddcph128_mask((__v4sf)__A, (__v4sf)__B, (__v4sf)__C,
+ (__mmask8)__U),
+ (__v4sf)__A);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_mask3_fmadd_pch(__m128h __A, __m128h __B, __m128h __C, __mmask8 __U) {
+ return (__m128h)__builtin_ia32_vfmaddcph128_mask((__v4sf)__A, (__v4sf)__B,
+ (__v4sf)__C, (__mmask8)__U);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_maskz_fmadd_pch(__mmask8 __U, __m128h __A, __m128h __B, __m128h __C) {
+ return (__m128h)__builtin_ia32_vfmaddcph128_maskz((__v4sf)__A, (__v4sf)__B,
+ (__v4sf)__C, (__mmask8)__U);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_fmadd_pch(__m256h __A,
+ __m256h __B,
+ __m256h __C) {
+ return (__m256h)__builtin_ia32_vfmaddcph256_mask((__v8sf)__A, (__v8sf)__B,
+ (__v8sf)__C, (__mmask8)-1);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_mask_fmadd_pch(__m256h __A, __mmask8 __U, __m256h __B, __m256h __C) {
+ return (__m256h)__builtin_ia32_selectps_256(
+ __U,
+ __builtin_ia32_vfmaddcph256_mask((__v8sf)__A, (__v8sf)__B, (__v8sf)__C,
+ (__mmask8)__U),
+ (__v8sf)__A);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_mask3_fmadd_pch(__m256h __A, __m256h __B, __m256h __C, __mmask8 __U) {
+ return (__m256h)__builtin_ia32_vfmaddcph256_mask((__v8sf)__A, (__v8sf)__B,
+ (__v8sf)__C, (__mmask8)__U);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_maskz_fmadd_pch(__mmask8 __U, __m256h __A, __m256h __B, __m256h __C) {
+ return (__m256h)__builtin_ia32_vfmaddcph256_maskz((__v8sf)__A, (__v8sf)__B,
+ (__v8sf)__C, (__mmask8)__U);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_mask_blend_ph(__mmask8 __U,
+ __m128h __A,
+ __m128h __W) {
+ return (__m128h)__builtin_ia32_selectph_128((__mmask8)__U, (__v8hf)__W,
+ (__v8hf)__A);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_mask_blend_ph(__mmask16 __U, __m256h __A, __m256h __W) {
+ return (__m256h)__builtin_ia32_selectph_256((__mmask16)__U, (__v16hf)__W,
+ (__v16hf)__A);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_permutex2var_ph(__m128h __A, __m128i __I, __m128h __B) {
+ return (__m128h)__builtin_ia32_vpermi2varhi128((__v8hi)__A, (__v8hi)__I,
+ (__v8hi)__B);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_permutex2var_ph(__m256h __A, __m256i __I, __m256h __B) {
+ return (__m256h)__builtin_ia32_vpermi2varhi256((__v16hi)__A, (__v16hi)__I,
+ (__v16hi)__B);
+}
+
+static __inline__ __m128h __DEFAULT_FN_ATTRS128
+_mm_permutexvar_ph(__m128i __A, __m128h __B) {
+ return (__m128h)__builtin_ia32_permvarhi128((__v8hi)__B, (__v8hi)__A);
+}
+
+static __inline__ __m256h __DEFAULT_FN_ATTRS256
+_mm256_permutexvar_ph(__m256i __A, __m256h __B) {
+ return (__m256h)__builtin_ia32_permvarhi256((__v16hi)__B, (__v16hi)__A);
+}
+
+static __inline__ _Float16 __DEFAULT_FN_ATTRS256
+_mm256_reduce_add_ph(__m256h __W) {
+ return __builtin_ia32_reduce_fadd_ph256(-0.0f16, __W);
+}
+
+static __inline__ _Float16 __DEFAULT_FN_ATTRS256
+_mm256_reduce_mul_ph(__m256h __W) {
+ return __builtin_ia32_reduce_fmul_ph256(1.0f16, __W);
+}
+
+static __inline__ _Float16 __DEFAULT_FN_ATTRS256
+_mm256_reduce_max_ph(__m256h __V) {
+ return __builtin_ia32_reduce_fmax_ph256(__V);
+}
+
+static __inline__ _Float16 __DEFAULT_FN_ATTRS256
+_mm256_reduce_min_ph(__m256h __V) {
+ return __builtin_ia32_reduce_fmin_ph256(__V);
+}
+
+static __inline__ _Float16 __DEFAULT_FN_ATTRS128
+_mm_reduce_add_ph(__m128h __W) {
+ return __builtin_ia32_reduce_fadd_ph128(-0.0f16, __W);
+}
+
+static __inline__ _Float16 __DEFAULT_FN_ATTRS128
+_mm_reduce_mul_ph(__m128h __W) {
+ return __builtin_ia32_reduce_fmul_ph128(1.0f16, __W);
+}
+
+static __inline__ _Float16 __DEFAULT_FN_ATTRS128
+_mm_reduce_max_ph(__m128h __V) {
+ return __builtin_ia32_reduce_fmax_ph128(__V);
+}
+
+static __inline__ _Float16 __DEFAULT_FN_ATTRS128
+_mm_reduce_min_ph(__m128h __V) {
+ return __builtin_ia32_reduce_fmin_ph128(__V);
+}
+
+// intrinsics below are alias for f*mul_*ch
+#define _mm_mul_pch(A, B) _mm_fmul_pch(A, B)
+#define _mm_mask_mul_pch(W, U, A, B) _mm_mask_fmul_pch(W, U, A, B)
+#define _mm_maskz_mul_pch(U, A, B) _mm_maskz_fmul_pch(U, A, B)
+#define _mm256_mul_pch(A, B) _mm256_fmul_pch(A, B)
+#define _mm256_mask_mul_pch(W, U, A, B) _mm256_mask_fmul_pch(W, U, A, B)
+#define _mm256_maskz_mul_pch(U, A, B) _mm256_maskz_fmul_pch(U, A, B)
+
+#define _mm_cmul_pch(A, B) _mm_fcmul_pch(A, B)
+#define _mm_mask_cmul_pch(W, U, A, B) _mm_mask_fcmul_pch(W, U, A, B)
+#define _mm_maskz_cmul_pch(U, A, B) _mm_maskz_fcmul_pch(U, A, B)
+#define _mm256_cmul_pch(A, B) _mm256_fcmul_pch(A, B)
+#define _mm256_mask_cmul_pch(W, U, A, B) _mm256_mask_fcmul_pch(W, U, A, B)
+#define _mm256_maskz_cmul_pch(U, A, B) _mm256_maskz_fcmul_pch(U, A, B)
+
+#undef __DEFAULT_FN_ATTRS128
+#undef __DEFAULT_FN_ATTRS256
+
+#endif
+#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/avx512vlintrin.h b/contrib/llvm-project/clang/lib/Headers/avx512vlintrin.h
index 968c10efeac0..2a5f7b43f63f 100644
--- a/contrib/llvm-project/clang/lib/Headers/avx512vlintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avx512vlintrin.h
@@ -14,8 +14,14 @@
#ifndef __AVX512VLINTRIN_H
#define __AVX512VLINTRIN_H
-#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl"), __min_vector_width__(128)))
-#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl"), __min_vector_width__(256)))
+#define __DEFAULT_FN_ATTRS128 \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512vl,no-evex512"), \
+ __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS256 \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512vl,no-evex512"), \
+ __min_vector_width__(256)))
typedef short __v2hi __attribute__((__vector_size__(4)));
typedef char __v4qi __attribute__((__vector_size__(4)));
@@ -771,124 +777,124 @@ _mm_maskz_xor_epi64(__mmask8 __U, __m128i __A, __m128i __B)
}
#define _mm_cmp_epi32_mask(a, b, p) \
- (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)(__m128i)(a), \
- (__v4si)(__m128i)(b), (int)(p), \
- (__mmask8)-1)
+ ((__mmask8)__builtin_ia32_cmpd128_mask((__v4si)(__m128i)(a), \
+ (__v4si)(__m128i)(b), (int)(p), \
+ (__mmask8)-1))
#define _mm_mask_cmp_epi32_mask(m, a, b, p) \
- (__mmask8)__builtin_ia32_cmpd128_mask((__v4si)(__m128i)(a), \
- (__v4si)(__m128i)(b), (int)(p), \
- (__mmask8)(m))
+ ((__mmask8)__builtin_ia32_cmpd128_mask((__v4si)(__m128i)(a), \
+ (__v4si)(__m128i)(b), (int)(p), \
+ (__mmask8)(m)))
#define _mm_cmp_epu32_mask(a, b, p) \
- (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)(__m128i)(a), \
- (__v4si)(__m128i)(b), (int)(p), \
- (__mmask8)-1)
+ ((__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)(__m128i)(a), \
+ (__v4si)(__m128i)(b), (int)(p), \
+ (__mmask8)-1))
#define _mm_mask_cmp_epu32_mask(m, a, b, p) \
- (__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)(__m128i)(a), \
- (__v4si)(__m128i)(b), (int)(p), \
- (__mmask8)(m))
+ ((__mmask8)__builtin_ia32_ucmpd128_mask((__v4si)(__m128i)(a), \
+ (__v4si)(__m128i)(b), (int)(p), \
+ (__mmask8)(m)))
#define _mm256_cmp_epi32_mask(a, b, p) \
- (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)(__m256i)(a), \
- (__v8si)(__m256i)(b), (int)(p), \
- (__mmask8)-1)
+ ((__mmask8)__builtin_ia32_cmpd256_mask((__v8si)(__m256i)(a), \
+ (__v8si)(__m256i)(b), (int)(p), \
+ (__mmask8)-1))
#define _mm256_mask_cmp_epi32_mask(m, a, b, p) \
- (__mmask8)__builtin_ia32_cmpd256_mask((__v8si)(__m256i)(a), \
- (__v8si)(__m256i)(b), (int)(p), \
- (__mmask8)(m))
+ ((__mmask8)__builtin_ia32_cmpd256_mask((__v8si)(__m256i)(a), \
+ (__v8si)(__m256i)(b), (int)(p), \
+ (__mmask8)(m)))
#define _mm256_cmp_epu32_mask(a, b, p) \
- (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)(__m256i)(a), \
- (__v8si)(__m256i)(b), (int)(p), \
- (__mmask8)-1)
+ ((__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)(__m256i)(a), \
+ (__v8si)(__m256i)(b), (int)(p), \
+ (__mmask8)-1))
#define _mm256_mask_cmp_epu32_mask(m, a, b, p) \
- (__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)(__m256i)(a), \
- (__v8si)(__m256i)(b), (int)(p), \
- (__mmask8)(m))
+ ((__mmask8)__builtin_ia32_ucmpd256_mask((__v8si)(__m256i)(a), \
+ (__v8si)(__m256i)(b), (int)(p), \
+ (__mmask8)(m)))
#define _mm_cmp_epi64_mask(a, b, p) \
- (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)(__m128i)(a), \
- (__v2di)(__m128i)(b), (int)(p), \
- (__mmask8)-1)
+ ((__mmask8)__builtin_ia32_cmpq128_mask((__v2di)(__m128i)(a), \
+ (__v2di)(__m128i)(b), (int)(p), \
+ (__mmask8)-1))
#define _mm_mask_cmp_epi64_mask(m, a, b, p) \
- (__mmask8)__builtin_ia32_cmpq128_mask((__v2di)(__m128i)(a), \
- (__v2di)(__m128i)(b), (int)(p), \
- (__mmask8)(m))
+ ((__mmask8)__builtin_ia32_cmpq128_mask((__v2di)(__m128i)(a), \
+ (__v2di)(__m128i)(b), (int)(p), \
+ (__mmask8)(m)))
#define _mm_cmp_epu64_mask(a, b, p) \
- (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)(__m128i)(a), \
- (__v2di)(__m128i)(b), (int)(p), \
- (__mmask8)-1)
+ ((__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)(__m128i)(a), \
+ (__v2di)(__m128i)(b), (int)(p), \
+ (__mmask8)-1))
#define _mm_mask_cmp_epu64_mask(m, a, b, p) \
- (__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)(__m128i)(a), \
- (__v2di)(__m128i)(b), (int)(p), \
- (__mmask8)(m))
+ ((__mmask8)__builtin_ia32_ucmpq128_mask((__v2di)(__m128i)(a), \
+ (__v2di)(__m128i)(b), (int)(p), \
+ (__mmask8)(m)))
#define _mm256_cmp_epi64_mask(a, b, p) \
- (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)(__m256i)(a), \
- (__v4di)(__m256i)(b), (int)(p), \
- (__mmask8)-1)
+ ((__mmask8)__builtin_ia32_cmpq256_mask((__v4di)(__m256i)(a), \
+ (__v4di)(__m256i)(b), (int)(p), \
+ (__mmask8)-1))
#define _mm256_mask_cmp_epi64_mask(m, a, b, p) \
- (__mmask8)__builtin_ia32_cmpq256_mask((__v4di)(__m256i)(a), \
- (__v4di)(__m256i)(b), (int)(p), \
- (__mmask8)(m))
+ ((__mmask8)__builtin_ia32_cmpq256_mask((__v4di)(__m256i)(a), \
+ (__v4di)(__m256i)(b), (int)(p), \
+ (__mmask8)(m)))
#define _mm256_cmp_epu64_mask(a, b, p) \
- (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)(__m256i)(a), \
- (__v4di)(__m256i)(b), (int)(p), \
- (__mmask8)-1)
+ ((__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)(__m256i)(a), \
+ (__v4di)(__m256i)(b), (int)(p), \
+ (__mmask8)-1))
#define _mm256_mask_cmp_epu64_mask(m, a, b, p) \
- (__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)(__m256i)(a), \
- (__v4di)(__m256i)(b), (int)(p), \
- (__mmask8)(m))
+ ((__mmask8)__builtin_ia32_ucmpq256_mask((__v4di)(__m256i)(a), \
+ (__v4di)(__m256i)(b), (int)(p), \
+ (__mmask8)(m)))
#define _mm256_cmp_ps_mask(a, b, p) \
- (__mmask8)__builtin_ia32_cmpps256_mask((__v8sf)(__m256)(a), \
- (__v8sf)(__m256)(b), (int)(p), \
- (__mmask8)-1)
+ ((__mmask8)__builtin_ia32_cmpps256_mask((__v8sf)(__m256)(a), \
+ (__v8sf)(__m256)(b), (int)(p), \
+ (__mmask8)-1))
#define _mm256_mask_cmp_ps_mask(m, a, b, p) \
- (__mmask8)__builtin_ia32_cmpps256_mask((__v8sf)(__m256)(a), \
- (__v8sf)(__m256)(b), (int)(p), \
- (__mmask8)(m))
+ ((__mmask8)__builtin_ia32_cmpps256_mask((__v8sf)(__m256)(a), \
+ (__v8sf)(__m256)(b), (int)(p), \
+ (__mmask8)(m)))
#define _mm256_cmp_pd_mask(a, b, p) \
- (__mmask8)__builtin_ia32_cmppd256_mask((__v4df)(__m256d)(a), \
- (__v4df)(__m256d)(b), (int)(p), \
- (__mmask8)-1)
+ ((__mmask8)__builtin_ia32_cmppd256_mask((__v4df)(__m256d)(a), \
+ (__v4df)(__m256d)(b), (int)(p), \
+ (__mmask8)-1))
#define _mm256_mask_cmp_pd_mask(m, a, b, p) \
- (__mmask8)__builtin_ia32_cmppd256_mask((__v4df)(__m256d)(a), \
- (__v4df)(__m256d)(b), (int)(p), \
- (__mmask8)(m))
+ ((__mmask8)__builtin_ia32_cmppd256_mask((__v4df)(__m256d)(a), \
+ (__v4df)(__m256d)(b), (int)(p), \
+ (__mmask8)(m)))
#define _mm_cmp_ps_mask(a, b, p) \
- (__mmask8)__builtin_ia32_cmpps128_mask((__v4sf)(__m128)(a), \
- (__v4sf)(__m128)(b), (int)(p), \
- (__mmask8)-1)
+ ((__mmask8)__builtin_ia32_cmpps128_mask((__v4sf)(__m128)(a), \
+ (__v4sf)(__m128)(b), (int)(p), \
+ (__mmask8)-1))
#define _mm_mask_cmp_ps_mask(m, a, b, p) \
- (__mmask8)__builtin_ia32_cmpps128_mask((__v4sf)(__m128)(a), \
- (__v4sf)(__m128)(b), (int)(p), \
- (__mmask8)(m))
+ ((__mmask8)__builtin_ia32_cmpps128_mask((__v4sf)(__m128)(a), \
+ (__v4sf)(__m128)(b), (int)(p), \
+ (__mmask8)(m)))
#define _mm_cmp_pd_mask(a, b, p) \
- (__mmask8)__builtin_ia32_cmppd128_mask((__v2df)(__m128d)(a), \
- (__v2df)(__m128d)(b), (int)(p), \
- (__mmask8)-1)
+ ((__mmask8)__builtin_ia32_cmppd128_mask((__v2df)(__m128d)(a), \
+ (__v2df)(__m128d)(b), (int)(p), \
+ (__mmask8)-1))
#define _mm_mask_cmp_pd_mask(m, a, b, p) \
- (__mmask8)__builtin_ia32_cmppd128_mask((__v2df)(__m128d)(a), \
- (__v2df)(__m128d)(b), (int)(p), \
- (__mmask8)(m))
+ ((__mmask8)__builtin_ia32_cmppd128_mask((__v2df)(__m128d)(a), \
+ (__v2df)(__m128d)(b), (int)(p), \
+ (__mmask8)(m)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_fmadd_pd(__m128d __A, __mmask8 __U, __m128d __B, __m128d __C)
@@ -2988,7 +2994,7 @@ _mm256_maskz_abs_epi32(__mmask8 __U, __m256i __A) {
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_abs_epi64 (__m128i __A) {
- return (__m128i)__builtin_ia32_pabsq128((__v2di)__A);
+ return (__m128i)__builtin_elementwise_abs((__v2di)__A);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -3007,7 +3013,7 @@ _mm_maskz_abs_epi64 (__mmask8 __U, __m128i __A) {
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_abs_epi64 (__m256i __A) {
- return (__m256i)__builtin_ia32_pabsq256 ((__v4di)__A);
+ return (__m256i)__builtin_elementwise_abs((__v4di)__A);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -3054,7 +3060,7 @@ _mm256_mask_max_epi32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) {
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_max_epi64 (__m128i __A, __m128i __B) {
- return (__m128i)__builtin_ia32_pmaxsq128((__v2di)__A, (__v2di)__B);
+ return (__m128i)__builtin_elementwise_max((__v2di)__A, (__v2di)__B);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -3073,7 +3079,7 @@ _mm_mask_max_epi64 (__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_max_epi64 (__m256i __A, __m256i __B) {
- return (__m256i)__builtin_ia32_pmaxsq256((__v4di)__A, (__v4di)__B);
+ return (__m256i)__builtin_elementwise_max((__v4di)__A, (__v4di)__B);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -3120,7 +3126,7 @@ _mm256_mask_max_epu32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) {
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_max_epu64 (__m128i __A, __m128i __B) {
- return (__m128i)__builtin_ia32_pmaxuq128((__v2di)__A, (__v2di)__B);
+ return (__m128i)__builtin_elementwise_max((__v2du)__A, (__v2du)__B);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -3139,7 +3145,7 @@ _mm_mask_max_epu64 (__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_max_epu64 (__m256i __A, __m256i __B) {
- return (__m256i)__builtin_ia32_pmaxuq256((__v4di)__A, (__v4di)__B);
+ return (__m256i)__builtin_elementwise_max((__v4du)__A, (__v4du)__B);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -3186,7 +3192,7 @@ _mm256_mask_min_epi32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) {
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_min_epi64 (__m128i __A, __m128i __B) {
- return (__m128i)__builtin_ia32_pminsq128((__v2di)__A, (__v2di)__B);
+ return (__m128i)__builtin_elementwise_min((__v2di)__A, (__v2di)__B);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -3205,7 +3211,7 @@ _mm_maskz_min_epi64 (__mmask8 __M, __m128i __A, __m128i __B) {
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_min_epi64 (__m256i __A, __m256i __B) {
- return (__m256i)__builtin_ia32_pminsq256((__v4di)__A, (__v4di)__B);
+ return (__m256i)__builtin_elementwise_min((__v4di)__A, (__v4di)__B);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -3252,7 +3258,7 @@ _mm256_mask_min_epu32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) {
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_min_epu64 (__m128i __A, __m128i __B) {
- return (__m128i)__builtin_ia32_pminuq128((__v2di)__A, (__v2di)__B);
+ return (__m128i)__builtin_elementwise_min((__v2du)__A, (__v2du)__B);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -3271,7 +3277,7 @@ _mm_maskz_min_epu64 (__mmask8 __M, __m128i __A, __m128i __B) {
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_min_epu64 (__m256i __A, __m256i __B) {
- return (__m256i)__builtin_ia32_pminuq256((__v4di)__A, (__v4di)__B);
+ return (__m256i)__builtin_elementwise_min((__v4du)__A, (__v4du)__B);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -3289,78 +3295,78 @@ _mm256_maskz_min_epu64 (__mmask8 __M, __m256i __A, __m256i __B) {
}
#define _mm_roundscale_pd(A, imm) \
- (__m128d)__builtin_ia32_rndscalepd_128_mask((__v2df)(__m128d)(A), \
- (int)(imm), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)-1)
+ ((__m128d)__builtin_ia32_rndscalepd_128_mask((__v2df)(__m128d)(A), \
+ (int)(imm), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1))
#define _mm_mask_roundscale_pd(W, U, A, imm) \
- (__m128d)__builtin_ia32_rndscalepd_128_mask((__v2df)(__m128d)(A), \
- (int)(imm), \
- (__v2df)(__m128d)(W), \
- (__mmask8)(U))
+ ((__m128d)__builtin_ia32_rndscalepd_128_mask((__v2df)(__m128d)(A), \
+ (int)(imm), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U)))
#define _mm_maskz_roundscale_pd(U, A, imm) \
- (__m128d)__builtin_ia32_rndscalepd_128_mask((__v2df)(__m128d)(A), \
- (int)(imm), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)(U))
+ ((__m128d)__builtin_ia32_rndscalepd_128_mask((__v2df)(__m128d)(A), \
+ (int)(imm), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U)))
#define _mm256_roundscale_pd(A, imm) \
- (__m256d)__builtin_ia32_rndscalepd_256_mask((__v4df)(__m256d)(A), \
- (int)(imm), \
- (__v4df)_mm256_setzero_pd(), \
- (__mmask8)-1)
+ ((__m256d)__builtin_ia32_rndscalepd_256_mask((__v4df)(__m256d)(A), \
+ (int)(imm), \
+ (__v4df)_mm256_setzero_pd(), \
+ (__mmask8)-1))
#define _mm256_mask_roundscale_pd(W, U, A, imm) \
- (__m256d)__builtin_ia32_rndscalepd_256_mask((__v4df)(__m256d)(A), \
- (int)(imm), \
- (__v4df)(__m256d)(W), \
- (__mmask8)(U))
+ ((__m256d)__builtin_ia32_rndscalepd_256_mask((__v4df)(__m256d)(A), \
+ (int)(imm), \
+ (__v4df)(__m256d)(W), \
+ (__mmask8)(U)))
#define _mm256_maskz_roundscale_pd(U, A, imm) \
- (__m256d)__builtin_ia32_rndscalepd_256_mask((__v4df)(__m256d)(A), \
- (int)(imm), \
- (__v4df)_mm256_setzero_pd(), \
- (__mmask8)(U))
+ ((__m256d)__builtin_ia32_rndscalepd_256_mask((__v4df)(__m256d)(A), \
+ (int)(imm), \
+ (__v4df)_mm256_setzero_pd(), \
+ (__mmask8)(U)))
#define _mm_roundscale_ps(A, imm) \
- (__m128)__builtin_ia32_rndscaleps_128_mask((__v4sf)(__m128)(A), (int)(imm), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1)
+ ((__m128)__builtin_ia32_rndscaleps_128_mask((__v4sf)(__m128)(A), (int)(imm), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1))
#define _mm_mask_roundscale_ps(W, U, A, imm) \
- (__m128)__builtin_ia32_rndscaleps_128_mask((__v4sf)(__m128)(A), (int)(imm), \
- (__v4sf)(__m128)(W), \
- (__mmask8)(U))
+ ((__m128)__builtin_ia32_rndscaleps_128_mask((__v4sf)(__m128)(A), (int)(imm), \
+ (__v4sf)(__m128)(W), \
+ (__mmask8)(U)))
#define _mm_maskz_roundscale_ps(U, A, imm) \
- (__m128)__builtin_ia32_rndscaleps_128_mask((__v4sf)(__m128)(A), (int)(imm), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U))
+ ((__m128)__builtin_ia32_rndscaleps_128_mask((__v4sf)(__m128)(A), (int)(imm), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U)))
#define _mm256_roundscale_ps(A, imm) \
- (__m256)__builtin_ia32_rndscaleps_256_mask((__v8sf)(__m256)(A), (int)(imm), \
- (__v8sf)_mm256_setzero_ps(), \
- (__mmask8)-1)
+ ((__m256)__builtin_ia32_rndscaleps_256_mask((__v8sf)(__m256)(A), (int)(imm), \
+ (__v8sf)_mm256_setzero_ps(), \
+ (__mmask8)-1))
#define _mm256_mask_roundscale_ps(W, U, A, imm) \
- (__m256)__builtin_ia32_rndscaleps_256_mask((__v8sf)(__m256)(A), (int)(imm), \
- (__v8sf)(__m256)(W), \
- (__mmask8)(U))
+ ((__m256)__builtin_ia32_rndscaleps_256_mask((__v8sf)(__m256)(A), (int)(imm), \
+ (__v8sf)(__m256)(W), \
+ (__mmask8)(U)))
#define _mm256_maskz_roundscale_ps(U, A, imm) \
- (__m256)__builtin_ia32_rndscaleps_256_mask((__v8sf)(__m256)(A), (int)(imm), \
- (__v8sf)_mm256_setzero_ps(), \
- (__mmask8)(U))
+ ((__m256)__builtin_ia32_rndscaleps_256_mask((__v8sf)(__m256)(A), (int)(imm), \
+ (__v8sf)_mm256_setzero_ps(), \
+ (__mmask8)(U)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_scalef_pd (__m128d __A, __m128d __B) {
@@ -4298,56 +4304,56 @@ _mm256_maskz_scalef_ps (__mmask8 __U, __m256 __A, __m256 __B) {
#define _mm_rol_epi32(a, b) \
- (__m128i)__builtin_ia32_prold128((__v4si)(__m128i)(a), (int)(b))
+ ((__m128i)__builtin_ia32_prold128((__v4si)(__m128i)(a), (int)(b)))
#define _mm_mask_rol_epi32(w, u, a, b) \
- (__m128i)__builtin_ia32_selectd_128((__mmask8)(u), \
- (__v4si)_mm_rol_epi32((a), (b)), \
- (__v4si)(__m128i)(w))
+ ((__m128i)__builtin_ia32_selectd_128((__mmask8)(u), \
+ (__v4si)_mm_rol_epi32((a), (b)), \
+ (__v4si)(__m128i)(w)))
#define _mm_maskz_rol_epi32(u, a, b) \
- (__m128i)__builtin_ia32_selectd_128((__mmask8)(u), \
- (__v4si)_mm_rol_epi32((a), (b)), \
- (__v4si)_mm_setzero_si128())
+ ((__m128i)__builtin_ia32_selectd_128((__mmask8)(u), \
+ (__v4si)_mm_rol_epi32((a), (b)), \
+ (__v4si)_mm_setzero_si128()))
#define _mm256_rol_epi32(a, b) \
- (__m256i)__builtin_ia32_prold256((__v8si)(__m256i)(a), (int)(b))
+ ((__m256i)__builtin_ia32_prold256((__v8si)(__m256i)(a), (int)(b)))
#define _mm256_mask_rol_epi32(w, u, a, b) \
- (__m256i)__builtin_ia32_selectd_256((__mmask8)(u), \
- (__v8si)_mm256_rol_epi32((a), (b)), \
- (__v8si)(__m256i)(w))
+ ((__m256i)__builtin_ia32_selectd_256((__mmask8)(u), \
+ (__v8si)_mm256_rol_epi32((a), (b)), \
+ (__v8si)(__m256i)(w)))
#define _mm256_maskz_rol_epi32(u, a, b) \
- (__m256i)__builtin_ia32_selectd_256((__mmask8)(u), \
- (__v8si)_mm256_rol_epi32((a), (b)), \
- (__v8si)_mm256_setzero_si256())
+ ((__m256i)__builtin_ia32_selectd_256((__mmask8)(u), \
+ (__v8si)_mm256_rol_epi32((a), (b)), \
+ (__v8si)_mm256_setzero_si256()))
#define _mm_rol_epi64(a, b) \
- (__m128i)__builtin_ia32_prolq128((__v2di)(__m128i)(a), (int)(b))
+ ((__m128i)__builtin_ia32_prolq128((__v2di)(__m128i)(a), (int)(b)))
#define _mm_mask_rol_epi64(w, u, a, b) \
- (__m128i)__builtin_ia32_selectq_128((__mmask8)(u), \
- (__v2di)_mm_rol_epi64((a), (b)), \
- (__v2di)(__m128i)(w))
+ ((__m128i)__builtin_ia32_selectq_128((__mmask8)(u), \
+ (__v2di)_mm_rol_epi64((a), (b)), \
+ (__v2di)(__m128i)(w)))
#define _mm_maskz_rol_epi64(u, a, b) \
- (__m128i)__builtin_ia32_selectq_128((__mmask8)(u), \
- (__v2di)_mm_rol_epi64((a), (b)), \
- (__v2di)_mm_setzero_si128())
+ ((__m128i)__builtin_ia32_selectq_128((__mmask8)(u), \
+ (__v2di)_mm_rol_epi64((a), (b)), \
+ (__v2di)_mm_setzero_si128()))
#define _mm256_rol_epi64(a, b) \
- (__m256i)__builtin_ia32_prolq256((__v4di)(__m256i)(a), (int)(b))
+ ((__m256i)__builtin_ia32_prolq256((__v4di)(__m256i)(a), (int)(b)))
#define _mm256_mask_rol_epi64(w, u, a, b) \
- (__m256i)__builtin_ia32_selectq_256((__mmask8)(u), \
- (__v4di)_mm256_rol_epi64((a), (b)), \
- (__v4di)(__m256i)(w))
+ ((__m256i)__builtin_ia32_selectq_256((__mmask8)(u), \
+ (__v4di)_mm256_rol_epi64((a), (b)), \
+ (__v4di)(__m256i)(w)))
#define _mm256_maskz_rol_epi64(u, a, b) \
- (__m256i)__builtin_ia32_selectq_256((__mmask8)(u), \
- (__v4di)_mm256_rol_epi64((a), (b)), \
- (__v4di)_mm256_setzero_si256())
+ ((__m256i)__builtin_ia32_selectq_256((__mmask8)(u), \
+ (__v4di)_mm256_rol_epi64((a), (b)), \
+ (__v4di)_mm256_setzero_si256()))
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_rolv_epi32 (__m128i __A, __m128i __B)
@@ -4438,56 +4444,56 @@ _mm256_maskz_rolv_epi64 (__mmask8 __U, __m256i __A, __m256i __B)
}
#define _mm_ror_epi32(a, b) \
- (__m128i)__builtin_ia32_prord128((__v4si)(__m128i)(a), (int)(b))
+ ((__m128i)__builtin_ia32_prord128((__v4si)(__m128i)(a), (int)(b)))
#define _mm_mask_ror_epi32(w, u, a, b) \
- (__m128i)__builtin_ia32_selectd_128((__mmask8)(u), \
- (__v4si)_mm_ror_epi32((a), (b)), \
- (__v4si)(__m128i)(w))
+ ((__m128i)__builtin_ia32_selectd_128((__mmask8)(u), \
+ (__v4si)_mm_ror_epi32((a), (b)), \
+ (__v4si)(__m128i)(w)))
#define _mm_maskz_ror_epi32(u, a, b) \
- (__m128i)__builtin_ia32_selectd_128((__mmask8)(u), \
- (__v4si)_mm_ror_epi32((a), (b)), \
- (__v4si)_mm_setzero_si128())
+ ((__m128i)__builtin_ia32_selectd_128((__mmask8)(u), \
+ (__v4si)_mm_ror_epi32((a), (b)), \
+ (__v4si)_mm_setzero_si128()))
#define _mm256_ror_epi32(a, b) \
- (__m256i)__builtin_ia32_prord256((__v8si)(__m256i)(a), (int)(b))
+ ((__m256i)__builtin_ia32_prord256((__v8si)(__m256i)(a), (int)(b)))
#define _mm256_mask_ror_epi32(w, u, a, b) \
- (__m256i)__builtin_ia32_selectd_256((__mmask8)(u), \
- (__v8si)_mm256_ror_epi32((a), (b)), \
- (__v8si)(__m256i)(w))
+ ((__m256i)__builtin_ia32_selectd_256((__mmask8)(u), \
+ (__v8si)_mm256_ror_epi32((a), (b)), \
+ (__v8si)(__m256i)(w)))
#define _mm256_maskz_ror_epi32(u, a, b) \
- (__m256i)__builtin_ia32_selectd_256((__mmask8)(u), \
- (__v8si)_mm256_ror_epi32((a), (b)), \
- (__v8si)_mm256_setzero_si256())
+ ((__m256i)__builtin_ia32_selectd_256((__mmask8)(u), \
+ (__v8si)_mm256_ror_epi32((a), (b)), \
+ (__v8si)_mm256_setzero_si256()))
#define _mm_ror_epi64(a, b) \
- (__m128i)__builtin_ia32_prorq128((__v2di)(__m128i)(a), (int)(b))
+ ((__m128i)__builtin_ia32_prorq128((__v2di)(__m128i)(a), (int)(b)))
#define _mm_mask_ror_epi64(w, u, a, b) \
- (__m128i)__builtin_ia32_selectq_128((__mmask8)(u), \
- (__v2di)_mm_ror_epi64((a), (b)), \
- (__v2di)(__m128i)(w))
+ ((__m128i)__builtin_ia32_selectq_128((__mmask8)(u), \
+ (__v2di)_mm_ror_epi64((a), (b)), \
+ (__v2di)(__m128i)(w)))
#define _mm_maskz_ror_epi64(u, a, b) \
- (__m128i)__builtin_ia32_selectq_128((__mmask8)(u), \
- (__v2di)_mm_ror_epi64((a), (b)), \
- (__v2di)_mm_setzero_si128())
+ ((__m128i)__builtin_ia32_selectq_128((__mmask8)(u), \
+ (__v2di)_mm_ror_epi64((a), (b)), \
+ (__v2di)_mm_setzero_si128()))
#define _mm256_ror_epi64(a, b) \
- (__m256i)__builtin_ia32_prorq256((__v4di)(__m256i)(a), (int)(b))
+ ((__m256i)__builtin_ia32_prorq256((__v4di)(__m256i)(a), (int)(b)))
#define _mm256_mask_ror_epi64(w, u, a, b) \
- (__m256i)__builtin_ia32_selectq_256((__mmask8)(u), \
- (__v4di)_mm256_ror_epi64((a), (b)), \
- (__v4di)(__m256i)(w))
+ ((__m256i)__builtin_ia32_selectq_256((__mmask8)(u), \
+ (__v4di)_mm256_ror_epi64((a), (b)), \
+ (__v4di)(__m256i)(w)))
#define _mm256_maskz_ror_epi64(u, a, b) \
- (__m256i)__builtin_ia32_selectq_256((__mmask8)(u), \
- (__v4di)_mm256_ror_epi64((a), (b)), \
- (__v4di)_mm256_setzero_si256())
+ ((__m256i)__builtin_ia32_selectq_256((__mmask8)(u), \
+ (__v4di)_mm256_ror_epi64((a), (b)), \
+ (__v4di)_mm256_setzero_si256()))
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_sll_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
@@ -4525,7 +4531,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_slli_epi32(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
- (__v4si)_mm_slli_epi32(__A, __B),
+ (__v4si)_mm_slli_epi32(__A, (int)__B),
(__v4si)__W);
}
@@ -4533,7 +4539,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_slli_epi32(__mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
- (__v4si)_mm_slli_epi32(__A, __B),
+ (__v4si)_mm_slli_epi32(__A, (int)__B),
(__v4si)_mm_setzero_si128());
}
@@ -4541,7 +4547,7 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_slli_epi32(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
- (__v8si)_mm256_slli_epi32(__A, __B),
+ (__v8si)_mm256_slli_epi32(__A, (int)__B),
(__v8si)__W);
}
@@ -4549,7 +4555,7 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_slli_epi32(__mmask8 __U, __m256i __A, unsigned int __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
- (__v8si)_mm256_slli_epi32(__A, __B),
+ (__v8si)_mm256_slli_epi32(__A, (int)__B),
(__v8si)_mm256_setzero_si256());
}
@@ -4589,7 +4595,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_slli_epi64(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
- (__v2di)_mm_slli_epi64(__A, __B),
+ (__v2di)_mm_slli_epi64(__A, (int)__B),
(__v2di)__W);
}
@@ -4597,7 +4603,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_slli_epi64(__mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
- (__v2di)_mm_slli_epi64(__A, __B),
+ (__v2di)_mm_slli_epi64(__A, (int)__B),
(__v2di)_mm_setzero_si128());
}
@@ -4605,7 +4611,7 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_slli_epi64(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
- (__v4di)_mm256_slli_epi64(__A, __B),
+ (__v4di)_mm256_slli_epi64(__A, (int)__B),
(__v4di)__W);
}
@@ -4613,7 +4619,7 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_slli_epi64(__mmask8 __U, __m256i __A, unsigned int __B)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
- (__v4di)_mm256_slli_epi64(__A, __B),
+ (__v4di)_mm256_slli_epi64(__A, (int)__B),
(__v4di)_mm256_setzero_si256());
}
@@ -4869,7 +4875,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_srli_epi32(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
- (__v4si)_mm_srli_epi32(__A, __B),
+ (__v4si)_mm_srli_epi32(__A, (int)__B),
(__v4si)__W);
}
@@ -4877,7 +4883,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_srli_epi32(__mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
- (__v4si)_mm_srli_epi32(__A, __B),
+ (__v4si)_mm_srli_epi32(__A, (int)__B),
(__v4si)_mm_setzero_si128());
}
@@ -4885,7 +4891,7 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_srli_epi32(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
- (__v8si)_mm256_srli_epi32(__A, __B),
+ (__v8si)_mm256_srli_epi32(__A, (int)__B),
(__v8si)__W);
}
@@ -4893,7 +4899,7 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_srli_epi32(__mmask8 __U, __m256i __A, unsigned int __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
- (__v8si)_mm256_srli_epi32(__A, __B),
+ (__v8si)_mm256_srli_epi32(__A, (int)__B),
(__v8si)_mm256_setzero_si256());
}
@@ -4933,7 +4939,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_srli_epi64(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
- (__v2di)_mm_srli_epi64(__A, __B),
+ (__v2di)_mm_srli_epi64(__A, (int)__B),
(__v2di)__W);
}
@@ -4941,7 +4947,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_srli_epi64(__mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
- (__v2di)_mm_srli_epi64(__A, __B),
+ (__v2di)_mm_srli_epi64(__A, (int)__B),
(__v2di)_mm_setzero_si128());
}
@@ -4949,7 +4955,7 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_srli_epi64(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
- (__v4di)_mm256_srli_epi64(__A, __B),
+ (__v4di)_mm256_srli_epi64(__A, (int)__B),
(__v4di)__W);
}
@@ -4957,7 +4963,7 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_srli_epi64(__mmask8 __U, __m256i __A, unsigned int __B)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
- (__v4di)_mm256_srli_epi64(__A, __B),
+ (__v4di)_mm256_srli_epi64(__A, (int)__B),
(__v4di)_mm256_setzero_si256());
}
@@ -5356,76 +5362,76 @@ _mm256_maskz_set1_epi64 (__mmask8 __M, long long __A)
}
#define _mm_fixupimm_pd(A, B, C, imm) \
- (__m128d)__builtin_ia32_fixupimmpd128_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2di)(__m128i)(C), (int)(imm), \
- (__mmask8)-1)
+ ((__m128d)__builtin_ia32_fixupimmpd128_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2di)(__m128i)(C), (int)(imm), \
+ (__mmask8)-1))
#define _mm_mask_fixupimm_pd(A, U, B, C, imm) \
- (__m128d)__builtin_ia32_fixupimmpd128_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2di)(__m128i)(C), (int)(imm), \
- (__mmask8)(U))
+ ((__m128d)__builtin_ia32_fixupimmpd128_mask((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2di)(__m128i)(C), (int)(imm), \
+ (__mmask8)(U)))
#define _mm_maskz_fixupimm_pd(U, A, B, C, imm) \
- (__m128d)__builtin_ia32_fixupimmpd128_maskz((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2di)(__m128i)(C), \
- (int)(imm), (__mmask8)(U))
+ ((__m128d)__builtin_ia32_fixupimmpd128_maskz((__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), \
+ (__v2di)(__m128i)(C), \
+ (int)(imm), (__mmask8)(U)))
#define _mm256_fixupimm_pd(A, B, C, imm) \
- (__m256d)__builtin_ia32_fixupimmpd256_mask((__v4df)(__m256d)(A), \
- (__v4df)(__m256d)(B), \
- (__v4di)(__m256i)(C), (int)(imm), \
- (__mmask8)-1)
+ ((__m256d)__builtin_ia32_fixupimmpd256_mask((__v4df)(__m256d)(A), \
+ (__v4df)(__m256d)(B), \
+ (__v4di)(__m256i)(C), (int)(imm), \
+ (__mmask8)-1))
#define _mm256_mask_fixupimm_pd(A, U, B, C, imm) \
- (__m256d)__builtin_ia32_fixupimmpd256_mask((__v4df)(__m256d)(A), \
- (__v4df)(__m256d)(B), \
- (__v4di)(__m256i)(C), (int)(imm), \
- (__mmask8)(U))
+ ((__m256d)__builtin_ia32_fixupimmpd256_mask((__v4df)(__m256d)(A), \
+ (__v4df)(__m256d)(B), \
+ (__v4di)(__m256i)(C), (int)(imm), \
+ (__mmask8)(U)))
#define _mm256_maskz_fixupimm_pd(U, A, B, C, imm) \
- (__m256d)__builtin_ia32_fixupimmpd256_maskz((__v4df)(__m256d)(A), \
- (__v4df)(__m256d)(B), \
- (__v4di)(__m256i)(C), \
- (int)(imm), (__mmask8)(U))
+ ((__m256d)__builtin_ia32_fixupimmpd256_maskz((__v4df)(__m256d)(A), \
+ (__v4df)(__m256d)(B), \
+ (__v4di)(__m256i)(C), \
+ (int)(imm), (__mmask8)(U)))
#define _mm_fixupimm_ps(A, B, C, imm) \
- (__m128)__builtin_ia32_fixupimmps128_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4si)(__m128i)(C), (int)(imm), \
- (__mmask8)-1)
+ ((__m128)__builtin_ia32_fixupimmps128_mask((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4si)(__m128i)(C), (int)(imm), \
+ (__mmask8)-1))
#define _mm_mask_fixupimm_ps(A, U, B, C, imm) \
- (__m128)__builtin_ia32_fixupimmps128_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4si)(__m128i)(C), (int)(imm), \
- (__mmask8)(U))
-
-#define _mm_maskz_fixupimm_ps(U, A, B, C, imm) \
- (__m128)__builtin_ia32_fixupimmps128_maskz((__v4sf)(__m128)(A), \
+ ((__m128)__builtin_ia32_fixupimmps128_mask((__v4sf)(__m128)(A), \
(__v4sf)(__m128)(B), \
(__v4si)(__m128i)(C), (int)(imm), \
- (__mmask8)(U))
+ (__mmask8)(U)))
+
+#define _mm_maskz_fixupimm_ps(U, A, B, C, imm) \
+ ((__m128)__builtin_ia32_fixupimmps128_maskz((__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), \
+ (__v4si)(__m128i)(C), (int)(imm), \
+ (__mmask8)(U)))
#define _mm256_fixupimm_ps(A, B, C, imm) \
- (__m256)__builtin_ia32_fixupimmps256_mask((__v8sf)(__m256)(A), \
- (__v8sf)(__m256)(B), \
- (__v8si)(__m256i)(C), (int)(imm), \
- (__mmask8)-1)
+ ((__m256)__builtin_ia32_fixupimmps256_mask((__v8sf)(__m256)(A), \
+ (__v8sf)(__m256)(B), \
+ (__v8si)(__m256i)(C), (int)(imm), \
+ (__mmask8)-1))
#define _mm256_mask_fixupimm_ps(A, U, B, C, imm) \
- (__m256)__builtin_ia32_fixupimmps256_mask((__v8sf)(__m256)(A), \
- (__v8sf)(__m256)(B), \
- (__v8si)(__m256i)(C), (int)(imm), \
- (__mmask8)(U))
-
-#define _mm256_maskz_fixupimm_ps(U, A, B, C, imm) \
- (__m256)__builtin_ia32_fixupimmps256_maskz((__v8sf)(__m256)(A), \
+ ((__m256)__builtin_ia32_fixupimmps256_mask((__v8sf)(__m256)(A), \
(__v8sf)(__m256)(B), \
(__v8si)(__m256i)(C), (int)(imm), \
- (__mmask8)(U))
+ (__mmask8)(U)))
+
+#define _mm256_maskz_fixupimm_ps(U, A, B, C, imm) \
+ ((__m256)__builtin_ia32_fixupimmps256_maskz((__v8sf)(__m256)(A), \
+ (__v8sf)(__m256)(B), \
+ (__v8si)(__m256i)(C), (int)(imm), \
+ (__mmask8)(U)))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_load_pd (__m128d __W, __mmask8 __U, void const *__P)
@@ -6033,44 +6039,44 @@ _mm256_maskz_rcp14_ps (__mmask8 __U, __m256 __A)
}
#define _mm_mask_permute_pd(W, U, X, C) \
- (__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
- (__v2df)_mm_permute_pd((X), (C)), \
- (__v2df)(__m128d)(W))
+ ((__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
+ (__v2df)_mm_permute_pd((X), (C)), \
+ (__v2df)(__m128d)(W)))
#define _mm_maskz_permute_pd(U, X, C) \
- (__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
- (__v2df)_mm_permute_pd((X), (C)), \
- (__v2df)_mm_setzero_pd())
+ ((__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
+ (__v2df)_mm_permute_pd((X), (C)), \
+ (__v2df)_mm_setzero_pd()))
#define _mm256_mask_permute_pd(W, U, X, C) \
- (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
- (__v4df)_mm256_permute_pd((X), (C)), \
- (__v4df)(__m256d)(W))
+ ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
+ (__v4df)_mm256_permute_pd((X), (C)), \
+ (__v4df)(__m256d)(W)))
#define _mm256_maskz_permute_pd(U, X, C) \
- (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
- (__v4df)_mm256_permute_pd((X), (C)), \
- (__v4df)_mm256_setzero_pd())
+ ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
+ (__v4df)_mm256_permute_pd((X), (C)), \
+ (__v4df)_mm256_setzero_pd()))
#define _mm_mask_permute_ps(W, U, X, C) \
- (__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
- (__v4sf)_mm_permute_ps((X), (C)), \
- (__v4sf)(__m128)(W))
+ ((__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
+ (__v4sf)_mm_permute_ps((X), (C)), \
+ (__v4sf)(__m128)(W)))
#define _mm_maskz_permute_ps(U, X, C) \
- (__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
- (__v4sf)_mm_permute_ps((X), (C)), \
- (__v4sf)_mm_setzero_ps())
+ ((__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
+ (__v4sf)_mm_permute_ps((X), (C)), \
+ (__v4sf)_mm_setzero_ps()))
#define _mm256_mask_permute_ps(W, U, X, C) \
- (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
- (__v8sf)_mm256_permute_ps((X), (C)), \
- (__v8sf)(__m256)(W))
+ ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
+ (__v8sf)_mm256_permute_ps((X), (C)), \
+ (__v8sf)(__m256)(W)))
#define _mm256_maskz_permute_ps(U, X, C) \
- (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
- (__v8sf)_mm256_permute_ps((X), (C)), \
- (__v8sf)_mm256_setzero_ps())
+ ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
+ (__v8sf)_mm256_permute_ps((X), (C)), \
+ (__v8sf)_mm256_setzero_ps()))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_permutevar_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128i __C)
@@ -6408,7 +6414,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_srai_epi32(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
- (__v4si)_mm_srai_epi32(__A, __B),
+ (__v4si)_mm_srai_epi32(__A, (int)__B),
(__v4si)__W);
}
@@ -6416,7 +6422,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_srai_epi32(__mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
- (__v4si)_mm_srai_epi32(__A, __B),
+ (__v4si)_mm_srai_epi32(__A, (int)__B),
(__v4si)_mm_setzero_si128());
}
@@ -6424,7 +6430,7 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_srai_epi32(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
- (__v8si)_mm256_srai_epi32(__A, __B),
+ (__v8si)_mm256_srai_epi32(__A, (int)__B),
(__v8si)__W);
}
@@ -6432,7 +6438,7 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_srai_epi32(__mmask8 __U, __m256i __A, unsigned int __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
- (__v8si)_mm256_srai_epi32(__A, __B),
+ (__v8si)_mm256_srai_epi32(__A, (int)__B),
(__v8si)_mm256_setzero_si256());
}
@@ -6483,7 +6489,7 @@ _mm256_maskz_sra_epi64(__mmask8 __U, __m256i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_srai_epi64(__m128i __A, unsigned int __imm)
{
- return (__m128i)__builtin_ia32_psraqi128((__v2di)__A, __imm);
+ return (__m128i)__builtin_ia32_psraqi128((__v2di)__A, (int)__imm);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -6505,7 +6511,7 @@ _mm_maskz_srai_epi64(__mmask8 __U, __m128i __A, unsigned int __imm)
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_srai_epi64(__m256i __A, unsigned int __imm)
{
- return (__m256i)__builtin_ia32_psraqi256((__v4di)__A, __imm);
+ return (__m256i)__builtin_ia32_psraqi256((__v4di)__A, (int)__imm);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -6525,176 +6531,162 @@ _mm256_maskz_srai_epi64(__mmask8 __U, __m256i __A, unsigned int __imm)
(__v4di)_mm256_setzero_si256());
}
-#define _mm_ternarylogic_epi32(A, B, C, imm) \
- (__m128i)__builtin_ia32_pternlogd128_mask((__v4si)(__m128i)(A), \
- (__v4si)(__m128i)(B), \
- (__v4si)(__m128i)(C), (int)(imm), \
- (__mmask8)-1)
-
-#define _mm_mask_ternarylogic_epi32(A, U, B, C, imm) \
- (__m128i)__builtin_ia32_pternlogd128_mask((__v4si)(__m128i)(A), \
- (__v4si)(__m128i)(B), \
- (__v4si)(__m128i)(C), (int)(imm), \
- (__mmask8)(U))
-
-#define _mm_maskz_ternarylogic_epi32(U, A, B, C, imm) \
- (__m128i)__builtin_ia32_pternlogd128_maskz((__v4si)(__m128i)(A), \
- (__v4si)(__m128i)(B), \
- (__v4si)(__m128i)(C), (int)(imm), \
- (__mmask8)(U))
-
-#define _mm256_ternarylogic_epi32(A, B, C, imm) \
- (__m256i)__builtin_ia32_pternlogd256_mask((__v8si)(__m256i)(A), \
- (__v8si)(__m256i)(B), \
- (__v8si)(__m256i)(C), (int)(imm), \
- (__mmask8)-1)
-
-#define _mm256_mask_ternarylogic_epi32(A, U, B, C, imm) \
- (__m256i)__builtin_ia32_pternlogd256_mask((__v8si)(__m256i)(A), \
- (__v8si)(__m256i)(B), \
- (__v8si)(__m256i)(C), (int)(imm), \
- (__mmask8)(U))
-
-#define _mm256_maskz_ternarylogic_epi32(U, A, B, C, imm) \
- (__m256i)__builtin_ia32_pternlogd256_maskz((__v8si)(__m256i)(A), \
- (__v8si)(__m256i)(B), \
- (__v8si)(__m256i)(C), (int)(imm), \
- (__mmask8)(U))
-
-#define _mm_ternarylogic_epi64(A, B, C, imm) \
- (__m128i)__builtin_ia32_pternlogq128_mask((__v2di)(__m128i)(A), \
- (__v2di)(__m128i)(B), \
- (__v2di)(__m128i)(C), (int)(imm), \
- (__mmask8)-1)
-
-#define _mm_mask_ternarylogic_epi64(A, U, B, C, imm) \
- (__m128i)__builtin_ia32_pternlogq128_mask((__v2di)(__m128i)(A), \
- (__v2di)(__m128i)(B), \
- (__v2di)(__m128i)(C), (int)(imm), \
- (__mmask8)(U))
-
-#define _mm_maskz_ternarylogic_epi64(U, A, B, C, imm) \
- (__m128i)__builtin_ia32_pternlogq128_maskz((__v2di)(__m128i)(A), \
- (__v2di)(__m128i)(B), \
- (__v2di)(__m128i)(C), (int)(imm), \
- (__mmask8)(U))
-
-#define _mm256_ternarylogic_epi64(A, B, C, imm) \
- (__m256i)__builtin_ia32_pternlogq256_mask((__v4di)(__m256i)(A), \
- (__v4di)(__m256i)(B), \
- (__v4di)(__m256i)(C), (int)(imm), \
- (__mmask8)-1)
-
-#define _mm256_mask_ternarylogic_epi64(A, U, B, C, imm) \
- (__m256i)__builtin_ia32_pternlogq256_mask((__v4di)(__m256i)(A), \
- (__v4di)(__m256i)(B), \
- (__v4di)(__m256i)(C), (int)(imm), \
- (__mmask8)(U))
-
-#define _mm256_maskz_ternarylogic_epi64(U, A, B, C, imm) \
- (__m256i)__builtin_ia32_pternlogq256_maskz((__v4di)(__m256i)(A), \
- (__v4di)(__m256i)(B), \
- (__v4di)(__m256i)(C), (int)(imm), \
- (__mmask8)(U))
-
-
+#define _mm_ternarylogic_epi32(A, B, C, imm) \
+ ((__m128i)__builtin_ia32_pternlogd128_mask( \
+ (__v4si)(__m128i)(A), (__v4si)(__m128i)(B), (__v4si)(__m128i)(C), \
+ (unsigned char)(imm), (__mmask8)-1))
+
+#define _mm_mask_ternarylogic_epi32(A, U, B, C, imm) \
+ ((__m128i)__builtin_ia32_pternlogd128_mask( \
+ (__v4si)(__m128i)(A), (__v4si)(__m128i)(B), (__v4si)(__m128i)(C), \
+ (unsigned char)(imm), (__mmask8)(U)))
+
+#define _mm_maskz_ternarylogic_epi32(U, A, B, C, imm) \
+ ((__m128i)__builtin_ia32_pternlogd128_maskz( \
+ (__v4si)(__m128i)(A), (__v4si)(__m128i)(B), (__v4si)(__m128i)(C), \
+ (unsigned char)(imm), (__mmask8)(U)))
+
+#define _mm256_ternarylogic_epi32(A, B, C, imm) \
+ ((__m256i)__builtin_ia32_pternlogd256_mask( \
+ (__v8si)(__m256i)(A), (__v8si)(__m256i)(B), (__v8si)(__m256i)(C), \
+ (unsigned char)(imm), (__mmask8)-1))
+
+#define _mm256_mask_ternarylogic_epi32(A, U, B, C, imm) \
+ ((__m256i)__builtin_ia32_pternlogd256_mask( \
+ (__v8si)(__m256i)(A), (__v8si)(__m256i)(B), (__v8si)(__m256i)(C), \
+ (unsigned char)(imm), (__mmask8)(U)))
+
+#define _mm256_maskz_ternarylogic_epi32(U, A, B, C, imm) \
+ ((__m256i)__builtin_ia32_pternlogd256_maskz( \
+ (__v8si)(__m256i)(A), (__v8si)(__m256i)(B), (__v8si)(__m256i)(C), \
+ (unsigned char)(imm), (__mmask8)(U)))
+
+#define _mm_ternarylogic_epi64(A, B, C, imm) \
+ ((__m128i)__builtin_ia32_pternlogq128_mask( \
+ (__v2di)(__m128i)(A), (__v2di)(__m128i)(B), (__v2di)(__m128i)(C), \
+ (unsigned char)(imm), (__mmask8)-1))
+
+#define _mm_mask_ternarylogic_epi64(A, U, B, C, imm) \
+ ((__m128i)__builtin_ia32_pternlogq128_mask( \
+ (__v2di)(__m128i)(A), (__v2di)(__m128i)(B), (__v2di)(__m128i)(C), \
+ (unsigned char)(imm), (__mmask8)(U)))
+
+#define _mm_maskz_ternarylogic_epi64(U, A, B, C, imm) \
+ ((__m128i)__builtin_ia32_pternlogq128_maskz( \
+ (__v2di)(__m128i)(A), (__v2di)(__m128i)(B), (__v2di)(__m128i)(C), \
+ (unsigned char)(imm), (__mmask8)(U)))
+
+#define _mm256_ternarylogic_epi64(A, B, C, imm) \
+ ((__m256i)__builtin_ia32_pternlogq256_mask( \
+ (__v4di)(__m256i)(A), (__v4di)(__m256i)(B), (__v4di)(__m256i)(C), \
+ (unsigned char)(imm), (__mmask8)-1))
+
+#define _mm256_mask_ternarylogic_epi64(A, U, B, C, imm) \
+ ((__m256i)__builtin_ia32_pternlogq256_mask( \
+ (__v4di)(__m256i)(A), (__v4di)(__m256i)(B), (__v4di)(__m256i)(C), \
+ (unsigned char)(imm), (__mmask8)(U)))
+
+#define _mm256_maskz_ternarylogic_epi64(U, A, B, C, imm) \
+ ((__m256i)__builtin_ia32_pternlogq256_maskz( \
+ (__v4di)(__m256i)(A), (__v4di)(__m256i)(B), (__v4di)(__m256i)(C), \
+ (unsigned char)(imm), (__mmask8)(U)))
#define _mm256_shuffle_f32x4(A, B, imm) \
- (__m256)__builtin_ia32_shuf_f32x4_256((__v8sf)(__m256)(A), \
- (__v8sf)(__m256)(B), (int)(imm))
+ ((__m256)__builtin_ia32_shuf_f32x4_256((__v8sf)(__m256)(A), \
+ (__v8sf)(__m256)(B), (int)(imm)))
#define _mm256_mask_shuffle_f32x4(W, U, A, B, imm) \
- (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
- (__v8sf)_mm256_shuffle_f32x4((A), (B), (imm)), \
- (__v8sf)(__m256)(W))
+ ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
+ (__v8sf)_mm256_shuffle_f32x4((A), (B), (imm)), \
+ (__v8sf)(__m256)(W)))
#define _mm256_maskz_shuffle_f32x4(U, A, B, imm) \
- (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
- (__v8sf)_mm256_shuffle_f32x4((A), (B), (imm)), \
- (__v8sf)_mm256_setzero_ps())
+ ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
+ (__v8sf)_mm256_shuffle_f32x4((A), (B), (imm)), \
+ (__v8sf)_mm256_setzero_ps()))
#define _mm256_shuffle_f64x2(A, B, imm) \
- (__m256d)__builtin_ia32_shuf_f64x2_256((__v4df)(__m256d)(A), \
- (__v4df)(__m256d)(B), (int)(imm))
+ ((__m256d)__builtin_ia32_shuf_f64x2_256((__v4df)(__m256d)(A), \
+ (__v4df)(__m256d)(B), (int)(imm)))
#define _mm256_mask_shuffle_f64x2(W, U, A, B, imm) \
- (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
- (__v4df)_mm256_shuffle_f64x2((A), (B), (imm)), \
- (__v4df)(__m256d)(W))
+ ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
+ (__v4df)_mm256_shuffle_f64x2((A), (B), (imm)), \
+ (__v4df)(__m256d)(W)))
#define _mm256_maskz_shuffle_f64x2(U, A, B, imm) \
- (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
- (__v4df)_mm256_shuffle_f64x2((A), (B), (imm)), \
- (__v4df)_mm256_setzero_pd())
+ ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
+ (__v4df)_mm256_shuffle_f64x2((A), (B), (imm)), \
+ (__v4df)_mm256_setzero_pd()))
#define _mm256_shuffle_i32x4(A, B, imm) \
- (__m256i)__builtin_ia32_shuf_i32x4_256((__v8si)(__m256i)(A), \
- (__v8si)(__m256i)(B), (int)(imm))
+ ((__m256i)__builtin_ia32_shuf_i32x4_256((__v8si)(__m256i)(A), \
+ (__v8si)(__m256i)(B), (int)(imm)))
#define _mm256_mask_shuffle_i32x4(W, U, A, B, imm) \
- (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
- (__v8si)_mm256_shuffle_i32x4((A), (B), (imm)), \
- (__v8si)(__m256i)(W))
+ ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
+ (__v8si)_mm256_shuffle_i32x4((A), (B), (imm)), \
+ (__v8si)(__m256i)(W)))
#define _mm256_maskz_shuffle_i32x4(U, A, B, imm) \
- (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
- (__v8si)_mm256_shuffle_i32x4((A), (B), (imm)), \
- (__v8si)_mm256_setzero_si256())
+ ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
+ (__v8si)_mm256_shuffle_i32x4((A), (B), (imm)), \
+ (__v8si)_mm256_setzero_si256()))
#define _mm256_shuffle_i64x2(A, B, imm) \
- (__m256i)__builtin_ia32_shuf_i64x2_256((__v4di)(__m256i)(A), \
- (__v4di)(__m256i)(B), (int)(imm))
+ ((__m256i)__builtin_ia32_shuf_i64x2_256((__v4di)(__m256i)(A), \
+ (__v4di)(__m256i)(B), (int)(imm)))
#define _mm256_mask_shuffle_i64x2(W, U, A, B, imm) \
- (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
- (__v4di)_mm256_shuffle_i64x2((A), (B), (imm)), \
- (__v4di)(__m256i)(W))
+ ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
+ (__v4di)_mm256_shuffle_i64x2((A), (B), (imm)), \
+ (__v4di)(__m256i)(W)))
#define _mm256_maskz_shuffle_i64x2(U, A, B, imm) \
- (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
- (__v4di)_mm256_shuffle_i64x2((A), (B), (imm)), \
- (__v4di)_mm256_setzero_si256())
+ ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
+ (__v4di)_mm256_shuffle_i64x2((A), (B), (imm)), \
+ (__v4di)_mm256_setzero_si256()))
#define _mm_mask_shuffle_pd(W, U, A, B, M) \
- (__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
- (__v2df)_mm_shuffle_pd((A), (B), (M)), \
- (__v2df)(__m128d)(W))
+ ((__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
+ (__v2df)_mm_shuffle_pd((A), (B), (M)), \
+ (__v2df)(__m128d)(W)))
#define _mm_maskz_shuffle_pd(U, A, B, M) \
- (__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
- (__v2df)_mm_shuffle_pd((A), (B), (M)), \
- (__v2df)_mm_setzero_pd())
+ ((__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
+ (__v2df)_mm_shuffle_pd((A), (B), (M)), \
+ (__v2df)_mm_setzero_pd()))
#define _mm256_mask_shuffle_pd(W, U, A, B, M) \
- (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
- (__v4df)_mm256_shuffle_pd((A), (B), (M)), \
- (__v4df)(__m256d)(W))
+ ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
+ (__v4df)_mm256_shuffle_pd((A), (B), (M)), \
+ (__v4df)(__m256d)(W)))
#define _mm256_maskz_shuffle_pd(U, A, B, M) \
- (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
- (__v4df)_mm256_shuffle_pd((A), (B), (M)), \
- (__v4df)_mm256_setzero_pd())
+ ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
+ (__v4df)_mm256_shuffle_pd((A), (B), (M)), \
+ (__v4df)_mm256_setzero_pd()))
#define _mm_mask_shuffle_ps(W, U, A, B, M) \
- (__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
- (__v4sf)_mm_shuffle_ps((A), (B), (M)), \
- (__v4sf)(__m128)(W))
+ ((__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
+ (__v4sf)_mm_shuffle_ps((A), (B), (M)), \
+ (__v4sf)(__m128)(W)))
#define _mm_maskz_shuffle_ps(U, A, B, M) \
- (__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
- (__v4sf)_mm_shuffle_ps((A), (B), (M)), \
- (__v4sf)_mm_setzero_ps())
+ ((__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
+ (__v4sf)_mm_shuffle_ps((A), (B), (M)), \
+ (__v4sf)_mm_setzero_ps()))
#define _mm256_mask_shuffle_ps(W, U, A, B, M) \
- (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
- (__v8sf)_mm256_shuffle_ps((A), (B), (M)), \
- (__v8sf)(__m256)(W))
+ ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
+ (__v8sf)_mm256_shuffle_ps((A), (B), (M)), \
+ (__v8sf)(__m256)(W)))
#define _mm256_maskz_shuffle_ps(U, A, B, M) \
- (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
- (__v8sf)_mm256_shuffle_ps((A), (B), (M)), \
- (__v8sf)_mm256_setzero_ps())
+ ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
+ (__v8sf)_mm256_shuffle_ps((A), (B), (M)), \
+ (__v8sf)_mm256_setzero_ps()))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_rsqrt14_pd (__m128d __A)
@@ -7834,262 +7826,262 @@ _mm256_mask_cvtepi64_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A)
}
#define _mm256_extractf32x4_ps(A, imm) \
- (__m128)__builtin_ia32_extractf32x4_256_mask((__v8sf)(__m256)(A), \
- (int)(imm), \
- (__v4sf)_mm_undefined_ps(), \
- (__mmask8)-1)
+ ((__m128)__builtin_ia32_extractf32x4_256_mask((__v8sf)(__m256)(A), \
+ (int)(imm), \
+ (__v4sf)_mm_undefined_ps(), \
+ (__mmask8)-1))
#define _mm256_mask_extractf32x4_ps(W, U, A, imm) \
- (__m128)__builtin_ia32_extractf32x4_256_mask((__v8sf)(__m256)(A), \
- (int)(imm), \
- (__v4sf)(__m128)(W), \
- (__mmask8)(U))
+ ((__m128)__builtin_ia32_extractf32x4_256_mask((__v8sf)(__m256)(A), \
+ (int)(imm), \
+ (__v4sf)(__m128)(W), \
+ (__mmask8)(U)))
#define _mm256_maskz_extractf32x4_ps(U, A, imm) \
- (__m128)__builtin_ia32_extractf32x4_256_mask((__v8sf)(__m256)(A), \
- (int)(imm), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U))
+ ((__m128)__builtin_ia32_extractf32x4_256_mask((__v8sf)(__m256)(A), \
+ (int)(imm), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U)))
#define _mm256_extracti32x4_epi32(A, imm) \
- (__m128i)__builtin_ia32_extracti32x4_256_mask((__v8si)(__m256i)(A), \
- (int)(imm), \
- (__v4si)_mm_undefined_si128(), \
- (__mmask8)-1)
+ ((__m128i)__builtin_ia32_extracti32x4_256_mask((__v8si)(__m256i)(A), \
+ (int)(imm), \
+ (__v4si)_mm_undefined_si128(), \
+ (__mmask8)-1))
#define _mm256_mask_extracti32x4_epi32(W, U, A, imm) \
- (__m128i)__builtin_ia32_extracti32x4_256_mask((__v8si)(__m256i)(A), \
- (int)(imm), \
- (__v4si)(__m128i)(W), \
- (__mmask8)(U))
+ ((__m128i)__builtin_ia32_extracti32x4_256_mask((__v8si)(__m256i)(A), \
+ (int)(imm), \
+ (__v4si)(__m128i)(W), \
+ (__mmask8)(U)))
#define _mm256_maskz_extracti32x4_epi32(U, A, imm) \
- (__m128i)__builtin_ia32_extracti32x4_256_mask((__v8si)(__m256i)(A), \
- (int)(imm), \
- (__v4si)_mm_setzero_si128(), \
- (__mmask8)(U))
+ ((__m128i)__builtin_ia32_extracti32x4_256_mask((__v8si)(__m256i)(A), \
+ (int)(imm), \
+ (__v4si)_mm_setzero_si128(), \
+ (__mmask8)(U)))
#define _mm256_insertf32x4(A, B, imm) \
- (__m256)__builtin_ia32_insertf32x4_256((__v8sf)(__m256)(A), \
- (__v4sf)(__m128)(B), (int)(imm))
+ ((__m256)__builtin_ia32_insertf32x4_256((__v8sf)(__m256)(A), \
+ (__v4sf)(__m128)(B), (int)(imm)))
#define _mm256_mask_insertf32x4(W, U, A, B, imm) \
- (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
+ ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
(__v8sf)_mm256_insertf32x4((A), (B), (imm)), \
- (__v8sf)(__m256)(W))
+ (__v8sf)(__m256)(W)))
#define _mm256_maskz_insertf32x4(U, A, B, imm) \
- (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
+ ((__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
(__v8sf)_mm256_insertf32x4((A), (B), (imm)), \
- (__v8sf)_mm256_setzero_ps())
+ (__v8sf)_mm256_setzero_ps()))
#define _mm256_inserti32x4(A, B, imm) \
- (__m256i)__builtin_ia32_inserti32x4_256((__v8si)(__m256i)(A), \
- (__v4si)(__m128i)(B), (int)(imm))
+ ((__m256i)__builtin_ia32_inserti32x4_256((__v8si)(__m256i)(A), \
+ (__v4si)(__m128i)(B), (int)(imm)))
#define _mm256_mask_inserti32x4(W, U, A, B, imm) \
- (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
+ ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
(__v8si)_mm256_inserti32x4((A), (B), (imm)), \
- (__v8si)(__m256i)(W))
+ (__v8si)(__m256i)(W)))
#define _mm256_maskz_inserti32x4(U, A, B, imm) \
- (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
+ ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
(__v8si)_mm256_inserti32x4((A), (B), (imm)), \
- (__v8si)_mm256_setzero_si256())
+ (__v8si)_mm256_setzero_si256()))
#define _mm_getmant_pd(A, B, C) \
- (__m128d)__builtin_ia32_getmantpd128_mask((__v2df)(__m128d)(A), \
- (int)(((C)<<2) | (B)), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)-1)
+ ((__m128d)__builtin_ia32_getmantpd128_mask((__v2df)(__m128d)(A), \
+ (int)(((C)<<2) | (B)), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)-1))
#define _mm_mask_getmant_pd(W, U, A, B, C) \
- (__m128d)__builtin_ia32_getmantpd128_mask((__v2df)(__m128d)(A), \
- (int)(((C)<<2) | (B)), \
- (__v2df)(__m128d)(W), \
- (__mmask8)(U))
+ ((__m128d)__builtin_ia32_getmantpd128_mask((__v2df)(__m128d)(A), \
+ (int)(((C)<<2) | (B)), \
+ (__v2df)(__m128d)(W), \
+ (__mmask8)(U)))
#define _mm_maskz_getmant_pd(U, A, B, C) \
- (__m128d)__builtin_ia32_getmantpd128_mask((__v2df)(__m128d)(A), \
- (int)(((C)<<2) | (B)), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)(U))
+ ((__m128d)__builtin_ia32_getmantpd128_mask((__v2df)(__m128d)(A), \
+ (int)(((C)<<2) | (B)), \
+ (__v2df)_mm_setzero_pd(), \
+ (__mmask8)(U)))
#define _mm256_getmant_pd(A, B, C) \
- (__m256d)__builtin_ia32_getmantpd256_mask((__v4df)(__m256d)(A), \
- (int)(((C)<<2) | (B)), \
- (__v4df)_mm256_setzero_pd(), \
- (__mmask8)-1)
+ ((__m256d)__builtin_ia32_getmantpd256_mask((__v4df)(__m256d)(A), \
+ (int)(((C)<<2) | (B)), \
+ (__v4df)_mm256_setzero_pd(), \
+ (__mmask8)-1))
#define _mm256_mask_getmant_pd(W, U, A, B, C) \
- (__m256d)__builtin_ia32_getmantpd256_mask((__v4df)(__m256d)(A), \
- (int)(((C)<<2) | (B)), \
- (__v4df)(__m256d)(W), \
- (__mmask8)(U))
+ ((__m256d)__builtin_ia32_getmantpd256_mask((__v4df)(__m256d)(A), \
+ (int)(((C)<<2) | (B)), \
+ (__v4df)(__m256d)(W), \
+ (__mmask8)(U)))
#define _mm256_maskz_getmant_pd(U, A, B, C) \
- (__m256d)__builtin_ia32_getmantpd256_mask((__v4df)(__m256d)(A), \
- (int)(((C)<<2) | (B)), \
- (__v4df)_mm256_setzero_pd(), \
- (__mmask8)(U))
+ ((__m256d)__builtin_ia32_getmantpd256_mask((__v4df)(__m256d)(A), \
+ (int)(((C)<<2) | (B)), \
+ (__v4df)_mm256_setzero_pd(), \
+ (__mmask8)(U)))
#define _mm_getmant_ps(A, B, C) \
- (__m128)__builtin_ia32_getmantps128_mask((__v4sf)(__m128)(A), \
- (int)(((C)<<2) | (B)), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1)
+ ((__m128)__builtin_ia32_getmantps128_mask((__v4sf)(__m128)(A), \
+ (int)(((C)<<2) | (B)), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)-1))
#define _mm_mask_getmant_ps(W, U, A, B, C) \
- (__m128)__builtin_ia32_getmantps128_mask((__v4sf)(__m128)(A), \
- (int)(((C)<<2) | (B)), \
- (__v4sf)(__m128)(W), \
- (__mmask8)(U))
+ ((__m128)__builtin_ia32_getmantps128_mask((__v4sf)(__m128)(A), \
+ (int)(((C)<<2) | (B)), \
+ (__v4sf)(__m128)(W), \
+ (__mmask8)(U)))
#define _mm_maskz_getmant_ps(U, A, B, C) \
- (__m128)__builtin_ia32_getmantps128_mask((__v4sf)(__m128)(A), \
- (int)(((C)<<2) | (B)), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U))
+ ((__m128)__builtin_ia32_getmantps128_mask((__v4sf)(__m128)(A), \
+ (int)(((C)<<2) | (B)), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8)(U)))
#define _mm256_getmant_ps(A, B, C) \
- (__m256)__builtin_ia32_getmantps256_mask((__v8sf)(__m256)(A), \
- (int)(((C)<<2) | (B)), \
- (__v8sf)_mm256_setzero_ps(), \
- (__mmask8)-1)
+ ((__m256)__builtin_ia32_getmantps256_mask((__v8sf)(__m256)(A), \
+ (int)(((C)<<2) | (B)), \
+ (__v8sf)_mm256_setzero_ps(), \
+ (__mmask8)-1))
#define _mm256_mask_getmant_ps(W, U, A, B, C) \
- (__m256)__builtin_ia32_getmantps256_mask((__v8sf)(__m256)(A), \
- (int)(((C)<<2) | (B)), \
- (__v8sf)(__m256)(W), \
- (__mmask8)(U))
+ ((__m256)__builtin_ia32_getmantps256_mask((__v8sf)(__m256)(A), \
+ (int)(((C)<<2) | (B)), \
+ (__v8sf)(__m256)(W), \
+ (__mmask8)(U)))
#define _mm256_maskz_getmant_ps(U, A, B, C) \
- (__m256)__builtin_ia32_getmantps256_mask((__v8sf)(__m256)(A), \
- (int)(((C)<<2) | (B)), \
- (__v8sf)_mm256_setzero_ps(), \
- (__mmask8)(U))
+ ((__m256)__builtin_ia32_getmantps256_mask((__v8sf)(__m256)(A), \
+ (int)(((C)<<2) | (B)), \
+ (__v8sf)_mm256_setzero_ps(), \
+ (__mmask8)(U)))
#define _mm_mmask_i64gather_pd(v1_old, mask, index, addr, scale) \
- (__m128d)__builtin_ia32_gather3div2df((__v2df)(__m128d)(v1_old), \
- (void const *)(addr), \
- (__v2di)(__m128i)(index), \
- (__mmask8)(mask), (int)(scale))
+ ((__m128d)__builtin_ia32_gather3div2df((__v2df)(__m128d)(v1_old), \
+ (void const *)(addr), \
+ (__v2di)(__m128i)(index), \
+ (__mmask8)(mask), (int)(scale)))
#define _mm_mmask_i64gather_epi64(v1_old, mask, index, addr, scale) \
- (__m128i)__builtin_ia32_gather3div2di((__v2di)(__m128i)(v1_old), \
- (void const *)(addr), \
- (__v2di)(__m128i)(index), \
- (__mmask8)(mask), (int)(scale))
+ ((__m128i)__builtin_ia32_gather3div2di((__v2di)(__m128i)(v1_old), \
+ (void const *)(addr), \
+ (__v2di)(__m128i)(index), \
+ (__mmask8)(mask), (int)(scale)))
#define _mm256_mmask_i64gather_pd(v1_old, mask, index, addr, scale) \
- (__m256d)__builtin_ia32_gather3div4df((__v4df)(__m256d)(v1_old), \
- (void const *)(addr), \
- (__v4di)(__m256i)(index), \
- (__mmask8)(mask), (int)(scale))
+ ((__m256d)__builtin_ia32_gather3div4df((__v4df)(__m256d)(v1_old), \
+ (void const *)(addr), \
+ (__v4di)(__m256i)(index), \
+ (__mmask8)(mask), (int)(scale)))
#define _mm256_mmask_i64gather_epi64(v1_old, mask, index, addr, scale) \
- (__m256i)__builtin_ia32_gather3div4di((__v4di)(__m256i)(v1_old), \
- (void const *)(addr), \
- (__v4di)(__m256i)(index), \
- (__mmask8)(mask), (int)(scale))
+ ((__m256i)__builtin_ia32_gather3div4di((__v4di)(__m256i)(v1_old), \
+ (void const *)(addr), \
+ (__v4di)(__m256i)(index), \
+ (__mmask8)(mask), (int)(scale)))
#define _mm_mmask_i64gather_ps(v1_old, mask, index, addr, scale) \
- (__m128)__builtin_ia32_gather3div4sf((__v4sf)(__m128)(v1_old), \
- (void const *)(addr), \
- (__v2di)(__m128i)(index), \
- (__mmask8)(mask), (int)(scale))
-
-#define _mm_mmask_i64gather_epi32(v1_old, mask, index, addr, scale) \
- (__m128i)__builtin_ia32_gather3div4si((__v4si)(__m128i)(v1_old), \
+ ((__m128)__builtin_ia32_gather3div4sf((__v4sf)(__m128)(v1_old), \
(void const *)(addr), \
(__v2di)(__m128i)(index), \
- (__mmask8)(mask), (int)(scale))
+ (__mmask8)(mask), (int)(scale)))
-#define _mm256_mmask_i64gather_ps(v1_old, mask, index, addr, scale) \
- (__m128)__builtin_ia32_gather3div8sf((__v4sf)(__m128)(v1_old), \
- (void const *)(addr), \
- (__v4di)(__m256i)(index), \
- (__mmask8)(mask), (int)(scale))
+#define _mm_mmask_i64gather_epi32(v1_old, mask, index, addr, scale) \
+ ((__m128i)__builtin_ia32_gather3div4si((__v4si)(__m128i)(v1_old), \
+ (void const *)(addr), \
+ (__v2di)(__m128i)(index), \
+ (__mmask8)(mask), (int)(scale)))
-#define _mm256_mmask_i64gather_epi32(v1_old, mask, index, addr, scale) \
- (__m128i)__builtin_ia32_gather3div8si((__v4si)(__m128i)(v1_old), \
+#define _mm256_mmask_i64gather_ps(v1_old, mask, index, addr, scale) \
+ ((__m128)__builtin_ia32_gather3div8sf((__v4sf)(__m128)(v1_old), \
(void const *)(addr), \
(__v4di)(__m256i)(index), \
- (__mmask8)(mask), (int)(scale))
+ (__mmask8)(mask), (int)(scale)))
+
+#define _mm256_mmask_i64gather_epi32(v1_old, mask, index, addr, scale) \
+ ((__m128i)__builtin_ia32_gather3div8si((__v4si)(__m128i)(v1_old), \
+ (void const *)(addr), \
+ (__v4di)(__m256i)(index), \
+ (__mmask8)(mask), (int)(scale)))
#define _mm_mmask_i32gather_pd(v1_old, mask, index, addr, scale) \
- (__m128d)__builtin_ia32_gather3siv2df((__v2df)(__m128d)(v1_old), \
- (void const *)(addr), \
- (__v4si)(__m128i)(index), \
- (__mmask8)(mask), (int)(scale))
+ ((__m128d)__builtin_ia32_gather3siv2df((__v2df)(__m128d)(v1_old), \
+ (void const *)(addr), \
+ (__v4si)(__m128i)(index), \
+ (__mmask8)(mask), (int)(scale)))
#define _mm_mmask_i32gather_epi64(v1_old, mask, index, addr, scale) \
- (__m128i)__builtin_ia32_gather3siv2di((__v2di)(__m128i)(v1_old), \
- (void const *)(addr), \
- (__v4si)(__m128i)(index), \
- (__mmask8)(mask), (int)(scale))
+ ((__m128i)__builtin_ia32_gather3siv2di((__v2di)(__m128i)(v1_old), \
+ (void const *)(addr), \
+ (__v4si)(__m128i)(index), \
+ (__mmask8)(mask), (int)(scale)))
#define _mm256_mmask_i32gather_pd(v1_old, mask, index, addr, scale) \
- (__m256d)__builtin_ia32_gather3siv4df((__v4df)(__m256d)(v1_old), \
- (void const *)(addr), \
- (__v4si)(__m128i)(index), \
- (__mmask8)(mask), (int)(scale))
+ ((__m256d)__builtin_ia32_gather3siv4df((__v4df)(__m256d)(v1_old), \
+ (void const *)(addr), \
+ (__v4si)(__m128i)(index), \
+ (__mmask8)(mask), (int)(scale)))
#define _mm256_mmask_i32gather_epi64(v1_old, mask, index, addr, scale) \
- (__m256i)__builtin_ia32_gather3siv4di((__v4di)(__m256i)(v1_old), \
- (void const *)(addr), \
- (__v4si)(__m128i)(index), \
- (__mmask8)(mask), (int)(scale))
+ ((__m256i)__builtin_ia32_gather3siv4di((__v4di)(__m256i)(v1_old), \
+ (void const *)(addr), \
+ (__v4si)(__m128i)(index), \
+ (__mmask8)(mask), (int)(scale)))
#define _mm_mmask_i32gather_ps(v1_old, mask, index, addr, scale) \
- (__m128)__builtin_ia32_gather3siv4sf((__v4sf)(__m128)(v1_old), \
- (void const *)(addr), \
- (__v4si)(__m128i)(index), \
- (__mmask8)(mask), (int)(scale))
-
-#define _mm_mmask_i32gather_epi32(v1_old, mask, index, addr, scale) \
- (__m128i)__builtin_ia32_gather3siv4si((__v4si)(__m128i)(v1_old), \
+ ((__m128)__builtin_ia32_gather3siv4sf((__v4sf)(__m128)(v1_old), \
(void const *)(addr), \
(__v4si)(__m128i)(index), \
- (__mmask8)(mask), (int)(scale))
+ (__mmask8)(mask), (int)(scale)))
-#define _mm256_mmask_i32gather_ps(v1_old, mask, index, addr, scale) \
- (__m256)__builtin_ia32_gather3siv8sf((__v8sf)(__m256)(v1_old), \
- (void const *)(addr), \
- (__v8si)(__m256i)(index), \
- (__mmask8)(mask), (int)(scale))
+#define _mm_mmask_i32gather_epi32(v1_old, mask, index, addr, scale) \
+ ((__m128i)__builtin_ia32_gather3siv4si((__v4si)(__m128i)(v1_old), \
+ (void const *)(addr), \
+ (__v4si)(__m128i)(index), \
+ (__mmask8)(mask), (int)(scale)))
-#define _mm256_mmask_i32gather_epi32(v1_old, mask, index, addr, scale) \
- (__m256i)__builtin_ia32_gather3siv8si((__v8si)(__m256i)(v1_old), \
+#define _mm256_mmask_i32gather_ps(v1_old, mask, index, addr, scale) \
+ ((__m256)__builtin_ia32_gather3siv8sf((__v8sf)(__m256)(v1_old), \
(void const *)(addr), \
(__v8si)(__m256i)(index), \
- (__mmask8)(mask), (int)(scale))
+ (__mmask8)(mask), (int)(scale)))
+
+#define _mm256_mmask_i32gather_epi32(v1_old, mask, index, addr, scale) \
+ ((__m256i)__builtin_ia32_gather3siv8si((__v8si)(__m256i)(v1_old), \
+ (void const *)(addr), \
+ (__v8si)(__m256i)(index), \
+ (__mmask8)(mask), (int)(scale)))
#define _mm256_permutex_pd(X, C) \
- (__m256d)__builtin_ia32_permdf256((__v4df)(__m256d)(X), (int)(C))
+ ((__m256d)__builtin_ia32_permdf256((__v4df)(__m256d)(X), (int)(C)))
#define _mm256_mask_permutex_pd(W, U, X, C) \
- (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
+ ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
(__v4df)_mm256_permutex_pd((X), (C)), \
- (__v4df)(__m256d)(W))
+ (__v4df)(__m256d)(W)))
#define _mm256_maskz_permutex_pd(U, X, C) \
- (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
- (__v4df)_mm256_permutex_pd((X), (C)), \
- (__v4df)_mm256_setzero_pd())
+ ((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
+ (__v4df)_mm256_permutex_pd((X), (C)), \
+ (__v4df)_mm256_setzero_pd()))
#define _mm256_permutex_epi64(X, C) \
- (__m256i)__builtin_ia32_permdi256((__v4di)(__m256i)(X), (int)(C))
+ ((__m256i)__builtin_ia32_permdi256((__v4di)(__m256i)(X), (int)(C)))
#define _mm256_mask_permutex_epi64(W, U, X, C) \
- (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
+ ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
(__v4di)_mm256_permutex_epi64((X), (C)), \
- (__v4di)(__m256i)(W))
+ (__v4di)(__m256i)(W)))
#define _mm256_maskz_permutex_epi64(U, X, C) \
- (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
+ ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
(__v4di)_mm256_permutex_epi64((X), (C)), \
- (__v4di)_mm256_setzero_si256())
+ (__v4di)_mm256_setzero_si256()))
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_permutexvar_pd (__m256i __X, __m256d __Y)
@@ -8175,60 +8167,60 @@ _mm256_maskz_permutexvar_epi32(__mmask8 __M, __m256i __X, __m256i __Y)
}
#define _mm_alignr_epi32(A, B, imm) \
- (__m128i)__builtin_ia32_alignd128((__v4si)(__m128i)(A), \
- (__v4si)(__m128i)(B), (int)(imm))
+ ((__m128i)__builtin_ia32_alignd128((__v4si)(__m128i)(A), \
+ (__v4si)(__m128i)(B), (int)(imm)))
#define _mm_mask_alignr_epi32(W, U, A, B, imm) \
- (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
+ ((__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
(__v4si)_mm_alignr_epi32((A), (B), (imm)), \
- (__v4si)(__m128i)(W))
+ (__v4si)(__m128i)(W)))
#define _mm_maskz_alignr_epi32(U, A, B, imm) \
- (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
+ ((__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
(__v4si)_mm_alignr_epi32((A), (B), (imm)), \
- (__v4si)_mm_setzero_si128())
+ (__v4si)_mm_setzero_si128()))
#define _mm256_alignr_epi32(A, B, imm) \
- (__m256i)__builtin_ia32_alignd256((__v8si)(__m256i)(A), \
- (__v8si)(__m256i)(B), (int)(imm))
+ ((__m256i)__builtin_ia32_alignd256((__v8si)(__m256i)(A), \
+ (__v8si)(__m256i)(B), (int)(imm)))
#define _mm256_mask_alignr_epi32(W, U, A, B, imm) \
- (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
+ ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
(__v8si)_mm256_alignr_epi32((A), (B), (imm)), \
- (__v8si)(__m256i)(W))
+ (__v8si)(__m256i)(W)))
#define _mm256_maskz_alignr_epi32(U, A, B, imm) \
- (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
+ ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
(__v8si)_mm256_alignr_epi32((A), (B), (imm)), \
- (__v8si)_mm256_setzero_si256())
+ (__v8si)_mm256_setzero_si256()))
#define _mm_alignr_epi64(A, B, imm) \
- (__m128i)__builtin_ia32_alignq128((__v2di)(__m128i)(A), \
- (__v2di)(__m128i)(B), (int)(imm))
+ ((__m128i)__builtin_ia32_alignq128((__v2di)(__m128i)(A), \
+ (__v2di)(__m128i)(B), (int)(imm)))
#define _mm_mask_alignr_epi64(W, U, A, B, imm) \
- (__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \
+ ((__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \
(__v2di)_mm_alignr_epi64((A), (B), (imm)), \
- (__v2di)(__m128i)(W))
+ (__v2di)(__m128i)(W)))
#define _mm_maskz_alignr_epi64(U, A, B, imm) \
- (__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \
+ ((__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \
(__v2di)_mm_alignr_epi64((A), (B), (imm)), \
- (__v2di)_mm_setzero_si128())
+ (__v2di)_mm_setzero_si128()))
#define _mm256_alignr_epi64(A, B, imm) \
- (__m256i)__builtin_ia32_alignq256((__v4di)(__m256i)(A), \
- (__v4di)(__m256i)(B), (int)(imm))
+ ((__m256i)__builtin_ia32_alignq256((__v4di)(__m256i)(A), \
+ (__v4di)(__m256i)(B), (int)(imm)))
#define _mm256_mask_alignr_epi64(W, U, A, B, imm) \
- (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
+ ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
(__v4di)_mm256_alignr_epi64((A), (B), (imm)), \
- (__v4di)(__m256i)(W))
+ (__v4di)(__m256i)(W)))
#define _mm256_maskz_alignr_epi64(U, A, B, imm) \
- (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
+ ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
(__v4di)_mm256_alignr_epi64((A), (B), (imm)), \
- (__v4di)_mm256_setzero_si256())
+ (__v4di)_mm256_setzero_si256()))
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_mask_movehdup_ps (__m128 __W, __mmask8 __U, __m128 __A)
@@ -8295,24 +8287,24 @@ _mm256_maskz_moveldup_ps (__mmask8 __U, __m256 __A)
}
#define _mm256_mask_shuffle_epi32(W, U, A, I) \
- (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
- (__v8si)_mm256_shuffle_epi32((A), (I)), \
- (__v8si)(__m256i)(W))
+ ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
+ (__v8si)_mm256_shuffle_epi32((A), (I)), \
+ (__v8si)(__m256i)(W)))
#define _mm256_maskz_shuffle_epi32(U, A, I) \
- (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
- (__v8si)_mm256_shuffle_epi32((A), (I)), \
- (__v8si)_mm256_setzero_si256())
+ ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
+ (__v8si)_mm256_shuffle_epi32((A), (I)), \
+ (__v8si)_mm256_setzero_si256()))
#define _mm_mask_shuffle_epi32(W, U, A, I) \
- (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
- (__v4si)_mm_shuffle_epi32((A), (I)), \
- (__v4si)(__m128i)(W))
+ ((__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
+ (__v4si)_mm_shuffle_epi32((A), (I)), \
+ (__v4si)(__m128i)(W)))
#define _mm_maskz_shuffle_epi32(U, A, I) \
- (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
- (__v4si)_mm_shuffle_epi32((A), (I)), \
- (__v4si)_mm_setzero_si128())
+ ((__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
+ (__v4si)_mm_shuffle_epi32((A), (I)), \
+ (__v4si)_mm_setzero_si128()))
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_mask_mov_pd (__m128d __W, __mmask8 __U, __m128d __A)
@@ -8413,27 +8405,27 @@ _mm256_maskz_cvtph_ps (__mmask8 __U, __m128i __A)
}
#define _mm_mask_cvt_roundps_ph(W, U, A, I) \
- (__m128i)__builtin_ia32_vcvtps2ph_mask((__v4sf)(__m128)(A), (int)(I), \
- (__v8hi)(__m128i)(W), \
- (__mmask8)(U))
+ ((__m128i)__builtin_ia32_vcvtps2ph_mask((__v4sf)(__m128)(A), (int)(I), \
+ (__v8hi)(__m128i)(W), \
+ (__mmask8)(U)))
#define _mm_maskz_cvt_roundps_ph(U, A, I) \
- (__m128i)__builtin_ia32_vcvtps2ph_mask((__v4sf)(__m128)(A), (int)(I), \
- (__v8hi)_mm_setzero_si128(), \
- (__mmask8)(U))
+ ((__m128i)__builtin_ia32_vcvtps2ph_mask((__v4sf)(__m128)(A), (int)(I), \
+ (__v8hi)_mm_setzero_si128(), \
+ (__mmask8)(U)))
#define _mm_mask_cvtps_ph _mm_mask_cvt_roundps_ph
#define _mm_maskz_cvtps_ph _mm_maskz_cvt_roundps_ph
#define _mm256_mask_cvt_roundps_ph(W, U, A, I) \
- (__m128i)__builtin_ia32_vcvtps2ph256_mask((__v8sf)(__m256)(A), (int)(I), \
- (__v8hi)(__m128i)(W), \
- (__mmask8)(U))
+ ((__m128i)__builtin_ia32_vcvtps2ph256_mask((__v8sf)(__m256)(A), (int)(I), \
+ (__v8hi)(__m128i)(W), \
+ (__mmask8)(U)))
#define _mm256_maskz_cvt_roundps_ph(U, A, I) \
- (__m128i)__builtin_ia32_vcvtps2ph256_mask((__v8sf)(__m256)(A), (int)(I), \
- (__v8hi)_mm_setzero_si128(), \
- (__mmask8)(U))
+ ((__m128i)__builtin_ia32_vcvtps2ph256_mask((__v8sf)(__m256)(A), (int)(I), \
+ (__v8hi)_mm_setzero_si128(), \
+ (__mmask8)(U)))
#define _mm256_mask_cvtps_ph _mm256_mask_cvt_roundps_ph
#define _mm256_maskz_cvtps_ph _mm256_maskz_cvt_roundps_ph
diff --git a/contrib/llvm-project/clang/lib/Headers/avx512vlvbmi2intrin.h b/contrib/llvm-project/clang/lib/Headers/avx512vlvbmi2intrin.h
index a40f926de75a..77af2d5cbd2a 100644
--- a/contrib/llvm-project/clang/lib/Headers/avx512vlvbmi2intrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avx512vlvbmi2intrin.h
@@ -15,8 +15,14 @@
#define __AVX512VLVBMI2INTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512vbmi2"), __min_vector_width__(128)))
-#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512vbmi2"), __min_vector_width__(256)))
+#define __DEFAULT_FN_ATTRS128 \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512vl,avx512vbmi2,no-evex512"), \
+ __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS256 \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512vl,avx512vbmi2,no-evex512"), \
+ __min_vector_width__(256)))
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_compress_epi16(__m128i __S, __mmask8 __U, __m128i __D)
@@ -239,172 +245,172 @@ _mm256_maskz_expandloadu_epi8(__mmask32 __U, void const *__P)
}
#define _mm256_shldi_epi64(A, B, I) \
- (__m256i)__builtin_ia32_vpshldq256((__v4di)(__m256i)(A), \
- (__v4di)(__m256i)(B), (int)(I))
+ ((__m256i)__builtin_ia32_vpshldq256((__v4di)(__m256i)(A), \
+ (__v4di)(__m256i)(B), (int)(I)))
#define _mm256_mask_shldi_epi64(S, U, A, B, I) \
- (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
- (__v4di)_mm256_shldi_epi64((A), (B), (I)), \
- (__v4di)(__m256i)(S))
+ ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
+ (__v4di)_mm256_shldi_epi64((A), (B), (I)), \
+ (__v4di)(__m256i)(S)))
#define _mm256_maskz_shldi_epi64(U, A, B, I) \
- (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
- (__v4di)_mm256_shldi_epi64((A), (B), (I)), \
- (__v4di)_mm256_setzero_si256())
+ ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
+ (__v4di)_mm256_shldi_epi64((A), (B), (I)), \
+ (__v4di)_mm256_setzero_si256()))
#define _mm_shldi_epi64(A, B, I) \
- (__m128i)__builtin_ia32_vpshldq128((__v2di)(__m128i)(A), \
- (__v2di)(__m128i)(B), (int)(I))
+ ((__m128i)__builtin_ia32_vpshldq128((__v2di)(__m128i)(A), \
+ (__v2di)(__m128i)(B), (int)(I)))
#define _mm_mask_shldi_epi64(S, U, A, B, I) \
- (__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \
- (__v2di)_mm_shldi_epi64((A), (B), (I)), \
- (__v2di)(__m128i)(S))
+ ((__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \
+ (__v2di)_mm_shldi_epi64((A), (B), (I)), \
+ (__v2di)(__m128i)(S)))
#define _mm_maskz_shldi_epi64(U, A, B, I) \
- (__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \
- (__v2di)_mm_shldi_epi64((A), (B), (I)), \
- (__v2di)_mm_setzero_si128())
+ ((__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \
+ (__v2di)_mm_shldi_epi64((A), (B), (I)), \
+ (__v2di)_mm_setzero_si128()))
#define _mm256_shldi_epi32(A, B, I) \
- (__m256i)__builtin_ia32_vpshldd256((__v8si)(__m256i)(A), \
- (__v8si)(__m256i)(B), (int)(I))
+ ((__m256i)__builtin_ia32_vpshldd256((__v8si)(__m256i)(A), \
+ (__v8si)(__m256i)(B), (int)(I)))
#define _mm256_mask_shldi_epi32(S, U, A, B, I) \
- (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
- (__v8si)_mm256_shldi_epi32((A), (B), (I)), \
- (__v8si)(__m256i)(S))
+ ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
+ (__v8si)_mm256_shldi_epi32((A), (B), (I)), \
+ (__v8si)(__m256i)(S)))
#define _mm256_maskz_shldi_epi32(U, A, B, I) \
- (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
- (__v8si)_mm256_shldi_epi32((A), (B), (I)), \
- (__v8si)_mm256_setzero_si256())
+ ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
+ (__v8si)_mm256_shldi_epi32((A), (B), (I)), \
+ (__v8si)_mm256_setzero_si256()))
#define _mm_shldi_epi32(A, B, I) \
- (__m128i)__builtin_ia32_vpshldd128((__v4si)(__m128i)(A), \
- (__v4si)(__m128i)(B), (int)(I))
+ ((__m128i)__builtin_ia32_vpshldd128((__v4si)(__m128i)(A), \
+ (__v4si)(__m128i)(B), (int)(I)))
#define _mm_mask_shldi_epi32(S, U, A, B, I) \
- (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
- (__v4si)_mm_shldi_epi32((A), (B), (I)), \
- (__v4si)(__m128i)(S))
+ ((__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
+ (__v4si)_mm_shldi_epi32((A), (B), (I)), \
+ (__v4si)(__m128i)(S)))
#define _mm_maskz_shldi_epi32(U, A, B, I) \
- (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
- (__v4si)_mm_shldi_epi32((A), (B), (I)), \
- (__v4si)_mm_setzero_si128())
+ ((__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
+ (__v4si)_mm_shldi_epi32((A), (B), (I)), \
+ (__v4si)_mm_setzero_si128()))
#define _mm256_shldi_epi16(A, B, I) \
- (__m256i)__builtin_ia32_vpshldw256((__v16hi)(__m256i)(A), \
- (__v16hi)(__m256i)(B), (int)(I))
+ ((__m256i)__builtin_ia32_vpshldw256((__v16hi)(__m256i)(A), \
+ (__v16hi)(__m256i)(B), (int)(I)))
#define _mm256_mask_shldi_epi16(S, U, A, B, I) \
- (__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
- (__v16hi)_mm256_shldi_epi16((A), (B), (I)), \
- (__v16hi)(__m256i)(S))
+ ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
+ (__v16hi)_mm256_shldi_epi16((A), (B), (I)), \
+ (__v16hi)(__m256i)(S)))
#define _mm256_maskz_shldi_epi16(U, A, B, I) \
- (__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
- (__v16hi)_mm256_shldi_epi16((A), (B), (I)), \
- (__v16hi)_mm256_setzero_si256())
+ ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
+ (__v16hi)_mm256_shldi_epi16((A), (B), (I)), \
+ (__v16hi)_mm256_setzero_si256()))
#define _mm_shldi_epi16(A, B, I) \
- (__m128i)__builtin_ia32_vpshldw128((__v8hi)(__m128i)(A), \
- (__v8hi)(__m128i)(B), (int)(I))
+ ((__m128i)__builtin_ia32_vpshldw128((__v8hi)(__m128i)(A), \
+ (__v8hi)(__m128i)(B), (int)(I)))
#define _mm_mask_shldi_epi16(S, U, A, B, I) \
- (__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
- (__v8hi)_mm_shldi_epi16((A), (B), (I)), \
- (__v8hi)(__m128i)(S))
+ ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
+ (__v8hi)_mm_shldi_epi16((A), (B), (I)), \
+ (__v8hi)(__m128i)(S)))
#define _mm_maskz_shldi_epi16(U, A, B, I) \
- (__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
- (__v8hi)_mm_shldi_epi16((A), (B), (I)), \
- (__v8hi)_mm_setzero_si128())
+ ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
+ (__v8hi)_mm_shldi_epi16((A), (B), (I)), \
+ (__v8hi)_mm_setzero_si128()))
#define _mm256_shrdi_epi64(A, B, I) \
- (__m256i)__builtin_ia32_vpshrdq256((__v4di)(__m256i)(A), \
- (__v4di)(__m256i)(B), (int)(I))
+ ((__m256i)__builtin_ia32_vpshrdq256((__v4di)(__m256i)(A), \
+ (__v4di)(__m256i)(B), (int)(I)))
#define _mm256_mask_shrdi_epi64(S, U, A, B, I) \
- (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
- (__v4di)_mm256_shrdi_epi64((A), (B), (I)), \
- (__v4di)(__m256i)(S))
+ ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
+ (__v4di)_mm256_shrdi_epi64((A), (B), (I)), \
+ (__v4di)(__m256i)(S)))
#define _mm256_maskz_shrdi_epi64(U, A, B, I) \
- (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
- (__v4di)_mm256_shrdi_epi64((A), (B), (I)), \
- (__v4di)_mm256_setzero_si256())
+ ((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
+ (__v4di)_mm256_shrdi_epi64((A), (B), (I)), \
+ (__v4di)_mm256_setzero_si256()))
#define _mm_shrdi_epi64(A, B, I) \
- (__m128i)__builtin_ia32_vpshrdq128((__v2di)(__m128i)(A), \
- (__v2di)(__m128i)(B), (int)(I))
+ ((__m128i)__builtin_ia32_vpshrdq128((__v2di)(__m128i)(A), \
+ (__v2di)(__m128i)(B), (int)(I)))
#define _mm_mask_shrdi_epi64(S, U, A, B, I) \
- (__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \
- (__v2di)_mm_shrdi_epi64((A), (B), (I)), \
- (__v2di)(__m128i)(S))
+ ((__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \
+ (__v2di)_mm_shrdi_epi64((A), (B), (I)), \
+ (__v2di)(__m128i)(S)))
#define _mm_maskz_shrdi_epi64(U, A, B, I) \
- (__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \
- (__v2di)_mm_shrdi_epi64((A), (B), (I)), \
- (__v2di)_mm_setzero_si128())
+ ((__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \
+ (__v2di)_mm_shrdi_epi64((A), (B), (I)), \
+ (__v2di)_mm_setzero_si128()))
#define _mm256_shrdi_epi32(A, B, I) \
- (__m256i)__builtin_ia32_vpshrdd256((__v8si)(__m256i)(A), \
- (__v8si)(__m256i)(B), (int)(I))
+ ((__m256i)__builtin_ia32_vpshrdd256((__v8si)(__m256i)(A), \
+ (__v8si)(__m256i)(B), (int)(I)))
#define _mm256_mask_shrdi_epi32(S, U, A, B, I) \
- (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
- (__v8si)_mm256_shrdi_epi32((A), (B), (I)), \
- (__v8si)(__m256i)(S))
+ ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
+ (__v8si)_mm256_shrdi_epi32((A), (B), (I)), \
+ (__v8si)(__m256i)(S)))
#define _mm256_maskz_shrdi_epi32(U, A, B, I) \
- (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
- (__v8si)_mm256_shrdi_epi32((A), (B), (I)), \
- (__v8si)_mm256_setzero_si256())
+ ((__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
+ (__v8si)_mm256_shrdi_epi32((A), (B), (I)), \
+ (__v8si)_mm256_setzero_si256()))
#define _mm_shrdi_epi32(A, B, I) \
- (__m128i)__builtin_ia32_vpshrdd128((__v4si)(__m128i)(A), \
- (__v4si)(__m128i)(B), (int)(I))
+ ((__m128i)__builtin_ia32_vpshrdd128((__v4si)(__m128i)(A), \
+ (__v4si)(__m128i)(B), (int)(I)))
#define _mm_mask_shrdi_epi32(S, U, A, B, I) \
- (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
- (__v4si)_mm_shrdi_epi32((A), (B), (I)), \
- (__v4si)(__m128i)(S))
+ ((__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
+ (__v4si)_mm_shrdi_epi32((A), (B), (I)), \
+ (__v4si)(__m128i)(S)))
#define _mm_maskz_shrdi_epi32(U, A, B, I) \
- (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
- (__v4si)_mm_shrdi_epi32((A), (B), (I)), \
- (__v4si)_mm_setzero_si128())
+ ((__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
+ (__v4si)_mm_shrdi_epi32((A), (B), (I)), \
+ (__v4si)_mm_setzero_si128()))
#define _mm256_shrdi_epi16(A, B, I) \
- (__m256i)__builtin_ia32_vpshrdw256((__v16hi)(__m256i)(A), \
- (__v16hi)(__m256i)(B), (int)(I))
+ ((__m256i)__builtin_ia32_vpshrdw256((__v16hi)(__m256i)(A), \
+ (__v16hi)(__m256i)(B), (int)(I)))
#define _mm256_mask_shrdi_epi16(S, U, A, B, I) \
- (__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
- (__v16hi)_mm256_shrdi_epi16((A), (B), (I)), \
- (__v16hi)(__m256i)(S))
+ ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
+ (__v16hi)_mm256_shrdi_epi16((A), (B), (I)), \
+ (__v16hi)(__m256i)(S)))
#define _mm256_maskz_shrdi_epi16(U, A, B, I) \
- (__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
- (__v16hi)_mm256_shrdi_epi16((A), (B), (I)), \
- (__v16hi)_mm256_setzero_si256())
+ ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
+ (__v16hi)_mm256_shrdi_epi16((A), (B), (I)), \
+ (__v16hi)_mm256_setzero_si256()))
#define _mm_shrdi_epi16(A, B, I) \
- (__m128i)__builtin_ia32_vpshrdw128((__v8hi)(__m128i)(A), \
- (__v8hi)(__m128i)(B), (int)(I))
+ ((__m128i)__builtin_ia32_vpshrdw128((__v8hi)(__m128i)(A), \
+ (__v8hi)(__m128i)(B), (int)(I)))
#define _mm_mask_shrdi_epi16(S, U, A, B, I) \
- (__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
- (__v8hi)_mm_shrdi_epi16((A), (B), (I)), \
- (__v8hi)(__m128i)(S))
+ ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
+ (__v8hi)_mm_shrdi_epi16((A), (B), (I)), \
+ (__v8hi)(__m128i)(S)))
#define _mm_maskz_shrdi_epi16(U, A, B, I) \
- (__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
- (__v8hi)_mm_shrdi_epi16((A), (B), (I)), \
- (__v8hi)_mm_setzero_si128())
+ ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
+ (__v8hi)_mm_shrdi_epi16((A), (B), (I)), \
+ (__v8hi)_mm_setzero_si128()))
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_shldv_epi64(__m256i __A, __m256i __B, __m256i __C)
diff --git a/contrib/llvm-project/clang/lib/Headers/avx512vlvnniintrin.h b/contrib/llvm-project/clang/lib/Headers/avx512vlvnniintrin.h
index 71ac1b4370d4..d1e5cd9d6983 100644
--- a/contrib/llvm-project/clang/lib/Headers/avx512vlvnniintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avx512vlvnniintrin.h
@@ -15,8 +15,14 @@
#define __AVX512VLVNNIINTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512vnni"), __min_vector_width__(128)))
-#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512vnni"), __min_vector_width__(256)))
+#define __DEFAULT_FN_ATTRS128 \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512vl,avx512vnni,no-evex512"), \
+ __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS256 \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512vl,avx512vnni,no-evex512"), \
+ __min_vector_width__(256)))
/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \a A with
/// corresponding signed 8-bit integers in \a B, producing 4 intermediate signed
@@ -25,7 +31,7 @@
///
/// This intrinsic corresponds to the <c> VPDPBUSD </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 7
/// tmp1.word := Signed(ZeroExtend16(A.byte[4*j]) * SignExtend16(B.byte[4*j]))
/// tmp2.word := Signed(ZeroExtend16(A.byte[4*j+1]) * SignExtend16(B.byte[4*j+1]))
@@ -34,9 +40,9 @@
/// DST.dword[j] := S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4
/// ENDFOR
/// DST[MAX:256] := 0
-/// \endoperation
+/// \endcode
#define _mm256_dpbusd_epi32(S, A, B) \
- (__m256i)__builtin_ia32_vpdpbusd256((__v8si)(S), (__v8si)(A), (__v8si)(B))
+ ((__m256i)__builtin_ia32_vpdpbusd256((__v8si)(S), (__v8si)(A), (__v8si)(B)))
/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \a A with
/// corresponding signed 8-bit integers in \a B, producing 4 intermediate signed
@@ -45,7 +51,7 @@
///
/// This intrinsic corresponds to the <c> VPDPBUSDS </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 7
/// tmp1.word := Signed(ZeroExtend16(A.byte[4*j]) * SignExtend16(B.byte[4*j]))
/// tmp2.word := Signed(ZeroExtend16(A.byte[4*j+1]) * SignExtend16(B.byte[4*j+1]))
@@ -54,9 +60,9 @@
/// DST.dword[j] := Saturate32(S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4)
/// ENDFOR
/// DST[MAX:256] := 0
-/// \endoperation
+/// \endcode
#define _mm256_dpbusds_epi32(S, A, B) \
- (__m256i)__builtin_ia32_vpdpbusds256((__v8si)(S), (__v8si)(A), (__v8si)(B))
+ ((__m256i)__builtin_ia32_vpdpbusds256((__v8si)(S), (__v8si)(A), (__v8si)(B)))
/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a A with
/// corresponding 16-bit integers in \a B, producing 2 intermediate signed 32-bit
@@ -65,16 +71,16 @@
///
/// This intrinsic corresponds to the <c> VPDPWSSD </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 7
/// tmp1.dword := SignExtend32(A.word[2*j]) * SignExtend32(B.word[2*j])
/// tmp2.dword := SignExtend32(A.word[2*j+1]) * SignExtend32(B.word[2*j+1])
/// DST.dword[j] := S.dword[j] + tmp1 + tmp2
/// ENDFOR
/// DST[MAX:256] := 0
-/// \endoperation
+/// \endcode
#define _mm256_dpwssd_epi32(S, A, B) \
- (__m256i)__builtin_ia32_vpdpwssd256((__v8si)(S), (__v8si)(A), (__v8si)(B))
+ ((__m256i)__builtin_ia32_vpdpwssd256((__v8si)(S), (__v8si)(A), (__v8si)(B)))
/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a A with
/// corresponding 16-bit integers in \a B, producing 2 intermediate signed 32-bit
@@ -83,16 +89,16 @@
///
/// This intrinsic corresponds to the <c> VPDPWSSDS </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 7
/// tmp1.dword := SignExtend32(A.word[2*j]) * SignExtend32(B.word[2*j])
/// tmp2.dword := SignExtend32(A.word[2*j+1]) * SignExtend32(B.word[2*j+1])
/// DST.dword[j] := Saturate32(S.dword[j] + tmp1 + tmp2)
/// ENDFOR
/// DST[MAX:256] := 0
-/// \endoperation
+/// \endcode
#define _mm256_dpwssds_epi32(S, A, B) \
- (__m256i)__builtin_ia32_vpdpwssds256((__v8si)(S), (__v8si)(A), (__v8si)(B))
+ ((__m256i)__builtin_ia32_vpdpwssds256((__v8si)(S), (__v8si)(A), (__v8si)(B)))
/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \a A with
/// corresponding signed 8-bit integers in \a B, producing 4 intermediate signed
@@ -101,7 +107,7 @@
///
/// This intrinsic corresponds to the <c> VPDPBUSD </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 3
/// tmp1.word := Signed(ZeroExtend16(A.byte[4*j]) * SignExtend16(B.byte[4*j]))
/// tmp2.word := Signed(ZeroExtend16(A.byte[4*j+1]) * SignExtend16(B.byte[4*j+1]))
@@ -110,9 +116,9 @@
/// DST.dword[j] := S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4
/// ENDFOR
/// DST[MAX:128] := 0
-/// \endoperation
+/// \endcode
#define _mm_dpbusd_epi32(S, A, B) \
- (__m128i)__builtin_ia32_vpdpbusd128((__v4si)(S), (__v4si)(A), (__v4si)(B))
+ ((__m128i)__builtin_ia32_vpdpbusd128((__v4si)(S), (__v4si)(A), (__v4si)(B)))
/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \a A with
/// corresponding signed 8-bit integers in \a B, producing 4 intermediate signed
@@ -121,7 +127,7 @@
///
/// This intrinsic corresponds to the <c> VPDPBUSDS </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 3
/// tmp1.word := Signed(ZeroExtend16(A.byte[4*j]) * SignExtend16(B.byte[4*j]))
/// tmp2.word := Signed(ZeroExtend16(A.byte[4*j+1]) * SignExtend16(B.byte[4*j+1]))
@@ -130,9 +136,9 @@
/// DST.dword[j] := Saturate32(S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4)
/// ENDFOR
/// DST[MAX:128] := 0
-/// \endoperation
+/// \endcode
#define _mm_dpbusds_epi32(S, A, B) \
- (__m128i)__builtin_ia32_vpdpbusds128((__v4si)(S), (__v4si)(A), (__v4si)(B))
+ ((__m128i)__builtin_ia32_vpdpbusds128((__v4si)(S), (__v4si)(A), (__v4si)(B)))
/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a A with
/// corresponding 16-bit integers in \a B, producing 2 intermediate signed 32-bit
@@ -141,16 +147,16 @@
///
/// This intrinsic corresponds to the <c> VPDPWSSD </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 3
/// tmp1.dword := SignExtend32(A.word[2*j]) * SignExtend32(B.word[2*j])
/// tmp2.dword := SignExtend32(A.word[2*j+1]) * SignExtend32(B.word[2*j+1])
/// DST.dword[j] := S.dword[j] + tmp1 + tmp2
/// ENDFOR
/// DST[MAX:128] := 0
-/// \endoperation
+/// \endcode
#define _mm_dpwssd_epi32(S, A, B) \
- (__m128i)__builtin_ia32_vpdpwssd128((__v4si)(S), (__v4si)(A), (__v4si)(B))
+ ((__m128i)__builtin_ia32_vpdpwssd128((__v4si)(S), (__v4si)(A), (__v4si)(B)))
/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a A with
/// corresponding 16-bit integers in \a B, producing 2 intermediate signed 32-bit
@@ -159,16 +165,16 @@
///
/// This intrinsic corresponds to the <c> VPDPWSSDS </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 3
/// tmp1.dword := SignExtend32(A.word[2*j]) * SignExtend32(B.word[2*j])
/// tmp2.dword := SignExtend32(A.word[2*j+1]) * SignExtend32(B.word[2*j+1])
/// DST.dword[j] := Saturate32(S.dword[j] + tmp1 + tmp2)
/// ENDFOR
/// DST[MAX:128] := 0
-/// \endoperation
+/// \endcode
#define _mm_dpwssds_epi32(S, A, B) \
- (__m128i)__builtin_ia32_vpdpwssds128((__v4si)(S), (__v4si)(A), (__v4si)(B))
+ ((__m128i)__builtin_ia32_vpdpwssds128((__v4si)(S), (__v4si)(A), (__v4si)(B)))
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_dpbusd_epi32(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B)
diff --git a/contrib/llvm-project/clang/lib/Headers/avx512vlvp2intersectintrin.h b/contrib/llvm-project/clang/lib/Headers/avx512vlvp2intersectintrin.h
index 3e0815e5d46f..63a31241a5ed 100644
--- a/contrib/llvm-project/clang/lib/Headers/avx512vlvp2intersectintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avx512vlvp2intersectintrin.h
@@ -28,12 +28,14 @@
#ifndef _AVX512VLVP2INTERSECT_H
#define _AVX512VLVP2INTERSECT_H
-#define __DEFAULT_FN_ATTRS128 \
- __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512vp2intersect"), \
+#define __DEFAULT_FN_ATTRS128 \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512vl,avx512vp2intersect,no-evex512"), \
__min_vector_width__(128)))
-#define __DEFAULT_FN_ATTRS256 \
- __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512vp2intersect"), \
+#define __DEFAULT_FN_ATTRS256 \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512vl,avx512vp2intersect,no-evex512"), \
__min_vector_width__(256)))
/// Store, in an even/odd pair of mask registers, the indicators of the
/// locations of value matches between dwords in operands __a and __b.
diff --git a/contrib/llvm-project/clang/lib/Headers/avx512vnniintrin.h b/contrib/llvm-project/clang/lib/Headers/avx512vnniintrin.h
index 9935a119aaca..0fb381a12f2f 100644
--- a/contrib/llvm-project/clang/lib/Headers/avx512vnniintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avx512vnniintrin.h
@@ -15,8 +15,9 @@
#define __AVX512VNNIINTRIN_H
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vnni"), __min_vector_width__(512)))
-
+#define __DEFAULT_FN_ATTRS \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512vnni,evex512"), __min_vector_width__(512)))
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_dpbusd_epi32(__m512i __S, __m512i __A, __m512i __B)
diff --git a/contrib/llvm-project/clang/lib/Headers/avx512vp2intersectintrin.h b/contrib/llvm-project/clang/lib/Headers/avx512vp2intersectintrin.h
index 5d3cb48cfd20..16552cae3b4f 100644
--- a/contrib/llvm-project/clang/lib/Headers/avx512vp2intersectintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avx512vp2intersectintrin.h
@@ -28,8 +28,9 @@
#ifndef _AVX512VP2INTERSECT_H
#define _AVX512VP2INTERSECT_H
-#define __DEFAULT_FN_ATTRS \
- __attribute__((__always_inline__, __nodebug__, __target__("avx512vp2intersect"), \
+#define __DEFAULT_FN_ATTRS \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512vp2intersect,evex512"), \
__min_vector_width__(512)))
/// Store, in an even/odd pair of mask registers, the indicators of the
diff --git a/contrib/llvm-project/clang/lib/Headers/avx512vpopcntdqintrin.h b/contrib/llvm-project/clang/lib/Headers/avx512vpopcntdqintrin.h
index bb435e623330..e73e7e4f7131 100644
--- a/contrib/llvm-project/clang/lib/Headers/avx512vpopcntdqintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avx512vpopcntdqintrin.h
@@ -17,7 +17,9 @@
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS \
- __attribute__((__always_inline__, __nodebug__, __target__("avx512vpopcntdq"), __min_vector_width__(512)))
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512vpopcntdq,evex512"), \
+ __min_vector_width__(512)))
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_popcnt_epi64(__m512i __A) {
return (__m512i)__builtin_ia32_vpopcntq_512((__v8di)__A);
diff --git a/contrib/llvm-project/clang/lib/Headers/avx512vpopcntdqvlintrin.h b/contrib/llvm-project/clang/lib/Headers/avx512vpopcntdqvlintrin.h
index a3cb9b6bccb3..b2df2e84d3ed 100644
--- a/contrib/llvm-project/clang/lib/Headers/avx512vpopcntdqvlintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avx512vpopcntdqvlintrin.h
@@ -17,9 +17,13 @@
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS128 \
- __attribute__((__always_inline__, __nodebug__, __target__("avx512vpopcntdq,avx512vl"), __min_vector_width__(128)))
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512vpopcntdq,avx512vl,no-evex512"), \
+ __min_vector_width__(128)))
#define __DEFAULT_FN_ATTRS256 \
- __attribute__((__always_inline__, __nodebug__, __target__("avx512vpopcntdq,avx512vl"), __min_vector_width__(256)))
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512vpopcntdq,avx512vl,no-evex512"), \
+ __min_vector_width__(256)))
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_popcnt_epi64(__m128i __A) {
diff --git a/contrib/llvm-project/clang/lib/Headers/avxifmaintrin.h b/contrib/llvm-project/clang/lib/Headers/avxifmaintrin.h
new file mode 100644
index 000000000000..5c782d2a5b86
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/avxifmaintrin.h
@@ -0,0 +1,177 @@
+/*===----------------- avxifmaintrin.h - IFMA intrinsics -------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <avxifmaintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __AVXIFMAINTRIN_H
+#define __AVXIFMAINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS128 \
+ __attribute__((__always_inline__, __nodebug__, __target__("avxifma"), \
+ __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS256 \
+ __attribute__((__always_inline__, __nodebug__, __target__("avxifma"), \
+ __min_vector_width__(256)))
+
+// must vex-encoding
+
+/// Multiply packed unsigned 52-bit integers in each 64-bit element of \a __Y
+/// and \a __Z to form a 104-bit intermediate result. Add the high 52-bit
+/// unsigned integer from the intermediate result with the corresponding
+/// unsigned 64-bit integer in \a __X, and store the results in \a dst.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128i
+/// _mm_madd52hi_avx_epu64 (__m128i __X, __m128i __Y, __m128i __Z)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPMADD52HUQ instruction.
+///
+/// \return
+/// return __m128i dst.
+/// \param __X
+/// A 128-bit vector of [2 x i64]
+/// \param __Y
+/// A 128-bit vector of [2 x i64]
+/// \param __Z
+/// A 128-bit vector of [2 x i64]
+///
+/// \code{.operation}
+/// FOR j := 0 to 1
+/// i := j*64
+/// tmp[127:0] := ZeroExtend64(__Y[i+51:i]) * ZeroExtend64(__Z[i+51:i])
+/// dst[i+63:i] := __X[i+63:i] + ZeroExtend64(tmp[103:52])
+/// ENDFOR
+/// dst[MAX:128] := 0
+/// \endcode
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_madd52hi_avx_epu64(__m128i __X, __m128i __Y, __m128i __Z) {
+ return (__m128i)__builtin_ia32_vpmadd52huq128((__v2di)__X, (__v2di)__Y,
+ (__v2di)__Z);
+}
+
+/// Multiply packed unsigned 52-bit integers in each 64-bit element of \a __Y
+/// and \a __Z to form a 104-bit intermediate result. Add the high 52-bit
+/// unsigned integer from the intermediate result with the corresponding
+/// unsigned 64-bit integer in \a __X, and store the results in \a dst.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i
+/// _mm256_madd52hi_avx_epu64 (__m256i __X, __m256i __Y, __m256i __Z)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPMADD52HUQ instruction.
+///
+/// \return
+/// return __m256i dst.
+/// \param __X
+/// A 256-bit vector of [4 x i64]
+/// \param __Y
+/// A 256-bit vector of [4 x i64]
+/// \param __Z
+/// A 256-bit vector of [4 x i64]
+///
+/// \code{.operation}
+/// FOR j := 0 to 3
+/// i := j*64
+/// tmp[127:0] := ZeroExtend64(__Y[i+51:i]) * ZeroExtend64(__Z[i+51:i])
+/// dst[i+63:i] := __X[i+63:i] + ZeroExtend64(tmp[103:52])
+/// ENDFOR
+/// dst[MAX:256] := 0
+/// \endcode
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_madd52hi_avx_epu64(__m256i __X, __m256i __Y, __m256i __Z) {
+ return (__m256i)__builtin_ia32_vpmadd52huq256((__v4di)__X, (__v4di)__Y,
+ (__v4di)__Z);
+}
+
+/// Multiply packed unsigned 52-bit integers in each 64-bit element of \a __Y
+/// and \a __Z to form a 104-bit intermediate result. Add the low 52-bit
+/// unsigned integer from the intermediate result with the corresponding
+/// unsigned 64-bit integer in \a __X, and store the results in \a dst.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128i
+/// _mm_madd52lo_avx_epu64 (__m128i __X, __m128i __Y, __m128i __Z)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPMADD52LUQ instruction.
+///
+/// \return
+/// return __m128i dst.
+/// \param __X
+/// A 128-bit vector of [2 x i64]
+/// \param __Y
+/// A 128-bit vector of [2 x i64]
+/// \param __Z
+/// A 128-bit vector of [2 x i64]
+///
+/// \code{.operation}
+/// FOR j := 0 to 1
+/// i := j*64
+/// tmp[127:0] := ZeroExtend64(__Y[i+51:i]) * ZeroExtend64(__Z[i+51:i])
+/// dst[i+63:i] := __X[i+63:i] + ZeroExtend64(tmp[51:0])
+/// ENDFOR
+/// dst[MAX:128] := 0
+/// \endcode
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_madd52lo_avx_epu64(__m128i __X, __m128i __Y, __m128i __Z) {
+ return (__m128i)__builtin_ia32_vpmadd52luq128((__v2di)__X, (__v2di)__Y,
+ (__v2di)__Z);
+}
+
+/// Multiply packed unsigned 52-bit integers in each 64-bit element of \a __Y
+/// and \a __Z to form a 104-bit intermediate result. Add the low 52-bit
+/// unsigned integer from the intermediate result with the corresponding
+/// unsigned 64-bit integer in \a __X, and store the results in \a dst.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i
+/// _mm256_madd52lo_avx_epu64 (__m256i __X, __m256i __Y, __m256i __Z)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPMADD52LUQ instruction.
+///
+/// \return
+/// return __m256i dst.
+/// \param __X
+/// A 256-bit vector of [4 x i64]
+/// \param __Y
+/// A 256-bit vector of [4 x i64]
+/// \param __Z
+/// A 256-bit vector of [4 x i64]
+///
+/// \code{.operation}
+/// FOR j := 0 to 3
+/// i := j*64
+/// tmp[127:0] := ZeroExtend64(__Y[i+51:i]) * ZeroExtend64(__Z[i+51:i])
+/// dst[i+63:i] := __X[i+63:i] + ZeroExtend64(tmp[51:0])
+/// ENDFOR
+/// dst[MAX:256] := 0
+/// \endcode
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_madd52lo_avx_epu64(__m256i __X, __m256i __Y, __m256i __Z) {
+ return (__m256i)__builtin_ia32_vpmadd52luq256((__v4di)__X, (__v4di)__Y,
+ (__v4di)__Z);
+}
+#undef __DEFAULT_FN_ATTRS128
+#undef __DEFAULT_FN_ATTRS256
+
+#endif // __AVXIFMAINTRIN_H
diff --git a/contrib/llvm-project/clang/lib/Headers/avxintrin.h b/contrib/llvm-project/clang/lib/Headers/avxintrin.h
index 382b6215751e..f116d8bc3a94 100644
--- a/contrib/llvm-project/clang/lib/Headers/avxintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avxintrin.h
@@ -39,9 +39,23 @@ typedef float __m256_u __attribute__ ((__vector_size__ (32), __aligned__(1)));
typedef double __m256d_u __attribute__((__vector_size__(32), __aligned__(1)));
typedef long long __m256i_u __attribute__((__vector_size__(32), __aligned__(1)));
+#ifdef __SSE2__
+/* Both _Float16 and __bf16 require SSE2 being enabled. */
+typedef _Float16 __v16hf __attribute__((__vector_size__(32), __aligned__(32)));
+typedef _Float16 __m256h __attribute__((__vector_size__(32), __aligned__(32)));
+typedef _Float16 __m256h_u __attribute__((__vector_size__(32), __aligned__(1)));
+
+typedef __bf16 __v16bf __attribute__((__vector_size__(32), __aligned__(32)));
+typedef __bf16 __m256bh __attribute__((__vector_size__(32), __aligned__(32)));
+#endif
+
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx"), __min_vector_width__(256)))
-#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("avx"), __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS \
+ __attribute__((__always_inline__, __nodebug__, __target__("avx,no-evex512"), \
+ __min_vector_width__(256)))
+#define __DEFAULT_FN_ATTRS128 \
+ __attribute__((__always_inline__, __nodebug__, __target__("avx,no-evex512"), \
+ __min_vector_width__(128)))
/* Arithmetic */
/// Adds two 256-bit vectors of [4 x double].
@@ -400,7 +414,7 @@ _mm256_rcp_ps(__m256 __a)
/// 11: Truncated.
/// \returns A 256-bit vector of [4 x double] containing the rounded values.
#define _mm256_round_pd(V, M) \
- (__m256d)__builtin_ia32_roundpd256((__v4df)(__m256d)(V), (M))
+ ((__m256d)__builtin_ia32_roundpd256((__v4df)(__m256d)(V), (M)))
/// Rounds the values stored in a 256-bit vector of [8 x float] as
/// specified by the byte operand. The source values are rounded to integer
@@ -432,7 +446,7 @@ _mm256_rcp_ps(__m256 __a)
/// 11: Truncated.
/// \returns A 256-bit vector of [8 x float] containing the rounded values.
#define _mm256_round_ps(V, M) \
- (__m256)__builtin_ia32_roundps256((__v8sf)(__m256)(V), (M))
+ ((__m256)__builtin_ia32_roundps256((__v8sf)(__m256)(V), (M)))
/// Rounds up the values stored in a 256-bit vector of [4 x double]. The
/// source values are rounded up to integer values and returned as 64-bit
@@ -989,7 +1003,7 @@ _mm256_permutevar_ps(__m256 __a, __m256i __c)
/// returned vector.
/// \returns A 128-bit vector of [2 x double] containing the copied values.
#define _mm_permute_pd(A, C) \
- (__m128d)__builtin_ia32_vpermilpd((__v2df)(__m128d)(A), (int)(C))
+ ((__m128d)__builtin_ia32_vpermilpd((__v2df)(__m128d)(A), (int)(C)))
/// Copies the values in a 256-bit vector of [4 x double] as specified by
/// the immediate integer operand.
@@ -1029,7 +1043,7 @@ _mm256_permutevar_ps(__m256 __a, __m256i __c)
/// returned vector.
/// \returns A 256-bit vector of [4 x double] containing the copied values.
#define _mm256_permute_pd(A, C) \
- (__m256d)__builtin_ia32_vpermilpd256((__v4df)(__m256d)(A), (int)(C))
+ ((__m256d)__builtin_ia32_vpermilpd256((__v4df)(__m256d)(A), (int)(C)))
/// Copies the values in a 128-bit vector of [4 x float] as specified by
/// the immediate integer operand.
@@ -1085,7 +1099,7 @@ _mm256_permutevar_ps(__m256 __a, __m256i __c)
/// returned vector.
/// \returns A 128-bit vector of [4 x float] containing the copied values.
#define _mm_permute_ps(A, C) \
- (__m128)__builtin_ia32_vpermilps((__v4sf)(__m128)(A), (int)(C))
+ ((__m128)__builtin_ia32_vpermilps((__v4sf)(__m128)(A), (int)(C)))
/// Copies the values in a 256-bit vector of [8 x float] as specified by
/// the immediate integer operand.
@@ -1177,7 +1191,7 @@ _mm256_permutevar_ps(__m256 __a, __m256i __c)
/// returned vector.
/// \returns A 256-bit vector of [8 x float] containing the copied values.
#define _mm256_permute_ps(A, C) \
- (__m256)__builtin_ia32_vpermilps256((__v8sf)(__m256)(A), (int)(C))
+ ((__m256)__builtin_ia32_vpermilps256((__v8sf)(__m256)(A), (int)(C)))
/// Permutes 128-bit data values stored in two 256-bit vectors of
/// [4 x double], as specified by the immediate integer operand.
@@ -1217,8 +1231,8 @@ _mm256_permutevar_ps(__m256 __a, __m256i __c)
/// destination.
/// \returns A 256-bit vector of [4 x double] containing the copied values.
#define _mm256_permute2f128_pd(V1, V2, M) \
- (__m256d)__builtin_ia32_vperm2f128_pd256((__v4df)(__m256d)(V1), \
- (__v4df)(__m256d)(V2), (int)(M))
+ ((__m256d)__builtin_ia32_vperm2f128_pd256((__v4df)(__m256d)(V1), \
+ (__v4df)(__m256d)(V2), (int)(M)))
/// Permutes 128-bit data values stored in two 256-bit vectors of
/// [8 x float], as specified by the immediate integer operand.
@@ -1258,8 +1272,8 @@ _mm256_permutevar_ps(__m256 __a, __m256i __c)
/// destination.
/// \returns A 256-bit vector of [8 x float] containing the copied values.
#define _mm256_permute2f128_ps(V1, V2, M) \
- (__m256)__builtin_ia32_vperm2f128_ps256((__v8sf)(__m256)(V1), \
- (__v8sf)(__m256)(V2), (int)(M))
+ ((__m256)__builtin_ia32_vperm2f128_ps256((__v8sf)(__m256)(V1), \
+ (__v8sf)(__m256)(V2), (int)(M)))
/// Permutes 128-bit data values stored in two 256-bit integer vectors,
/// as specified by the immediate integer operand.
@@ -1298,8 +1312,8 @@ _mm256_permutevar_ps(__m256 __a, __m256i __c)
/// destination.
/// \returns A 256-bit integer vector containing the copied values.
#define _mm256_permute2f128_si256(V1, V2, M) \
- (__m256i)__builtin_ia32_vperm2f128_si256((__v8si)(__m256i)(V1), \
- (__v8si)(__m256i)(V2), (int)(M))
+ ((__m256i)__builtin_ia32_vperm2f128_si256((__v8si)(__m256i)(V1), \
+ (__v8si)(__m256i)(V2), (int)(M)))
/* Vector Blend */
/// Merges 64-bit double-precision data values stored in either of the
@@ -1327,8 +1341,8 @@ _mm256_permutevar_ps(__m256 __a, __m256i __c)
/// operand \a V2 is copied to the same position in the destination.
/// \returns A 256-bit vector of [4 x double] containing the copied values.
#define _mm256_blend_pd(V1, V2, M) \
- (__m256d)__builtin_ia32_blendpd256((__v4df)(__m256d)(V1), \
- (__v4df)(__m256d)(V2), (int)(M))
+ ((__m256d)__builtin_ia32_blendpd256((__v4df)(__m256d)(V1), \
+ (__v4df)(__m256d)(V2), (int)(M)))
/// Merges 32-bit single-precision data values stored in either of the
/// two 256-bit vectors of [8 x float], as specified by the immediate
@@ -1355,8 +1369,8 @@ _mm256_permutevar_ps(__m256 __a, __m256i __c)
/// operand \a V2 is copied to the same position in the destination.
/// \returns A 256-bit vector of [8 x float] containing the copied values.
#define _mm256_blend_ps(V1, V2, M) \
- (__m256)__builtin_ia32_blendps256((__v8sf)(__m256)(V1), \
- (__v8sf)(__m256)(V2), (int)(M))
+ ((__m256)__builtin_ia32_blendps256((__v8sf)(__m256)(V1), \
+ (__v8sf)(__m256)(V2), (int)(M)))
/// Merges 64-bit double-precision data values stored in either of the
/// two 256-bit vectors of [4 x double], as specified by the 256-bit vector
@@ -1453,8 +1467,8 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
/// two parallel dot product computations.
/// \returns A 256-bit vector of [8 x float] containing the two dot products.
#define _mm256_dp_ps(V1, V2, M) \
- (__m256)__builtin_ia32_dpps256((__v8sf)(__m256)(V1), \
- (__v8sf)(__m256)(V2), (M))
+ ((__m256)__builtin_ia32_dpps256((__v8sf)(__m256)(V1), \
+ (__v8sf)(__m256)(V2), (M)))
/* Vector shuffle */
/// Selects 8 float values from the 256-bit operands of [8 x float], as
@@ -1504,11 +1518,14 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
/// 00: Bits [31:0] and [159:128] are copied from the selected operand. \n
/// 01: Bits [63:32] and [191:160] are copied from the selected operand. \n
/// 10: Bits [95:64] and [223:192] are copied from the selected operand. \n
-/// 11: Bits [127:96] and [255:224] are copied from the selected operand.
+/// 11: Bits [127:96] and [255:224] are copied from the selected operand. \n
+/// Note: To generate a mask, you can use the \c _MM_SHUFFLE macro.
+/// <c>_MM_SHUFFLE(b6, b4, b2, b0)</c> can create an 8-bit mask of the form
+/// <c>[b6, b4, b2, b0]</c>.
/// \returns A 256-bit vector of [8 x float] containing the shuffled values.
#define _mm256_shuffle_ps(a, b, mask) \
- (__m256)__builtin_ia32_shufps256((__v8sf)(__m256)(a), \
- (__v8sf)(__m256)(b), (int)(mask))
+ ((__m256)__builtin_ia32_shufps256((__v8sf)(__m256)(a), \
+ (__v8sf)(__m256)(b), (int)(mask)))
/// Selects four double-precision values from the 256-bit operands of
/// [4 x double], as specified by the immediate value operand.
@@ -1553,8 +1570,8 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
/// destination.
/// \returns A 256-bit vector of [4 x double] containing the shuffled values.
#define _mm256_shuffle_pd(a, b, mask) \
- (__m256d)__builtin_ia32_shufpd256((__v4df)(__m256d)(a), \
- (__v4df)(__m256d)(b), (int)(mask))
+ ((__m256d)__builtin_ia32_shufpd256((__v4df)(__m256d)(a), \
+ (__v4df)(__m256d)(b), (int)(mask)))
/* Compare */
#define _CMP_EQ_OQ 0x00 /* Equal (ordered, non-signaling) */
@@ -1647,8 +1664,8 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
/// 0x1F: True (unordered, signaling)
/// \returns A 128-bit vector of [2 x double] containing the comparison results.
#define _mm_cmp_pd(a, b, c) \
- (__m128d)__builtin_ia32_cmppd((__v2df)(__m128d)(a), \
- (__v2df)(__m128d)(b), (c))
+ ((__m128d)__builtin_ia32_cmppd((__v2df)(__m128d)(a), \
+ (__v2df)(__m128d)(b), (c)))
/// Compares each of the corresponding values of two 128-bit vectors of
/// [4 x float], using the operation specified by the immediate integer
@@ -1707,8 +1724,8 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
/// 0x1F: True (unordered, signaling)
/// \returns A 128-bit vector of [4 x float] containing the comparison results.
#define _mm_cmp_ps(a, b, c) \
- (__m128)__builtin_ia32_cmpps((__v4sf)(__m128)(a), \
- (__v4sf)(__m128)(b), (c))
+ ((__m128)__builtin_ia32_cmpps((__v4sf)(__m128)(a), \
+ (__v4sf)(__m128)(b), (c)))
/// Compares each of the corresponding double-precision values of two
/// 256-bit vectors of [4 x double], using the operation specified by the
@@ -1767,8 +1784,8 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
/// 0x1F: True (unordered, signaling)
/// \returns A 256-bit vector of [4 x double] containing the comparison results.
#define _mm256_cmp_pd(a, b, c) \
- (__m256d)__builtin_ia32_cmppd256((__v4df)(__m256d)(a), \
- (__v4df)(__m256d)(b), (c))
+ ((__m256d)__builtin_ia32_cmppd256((__v4df)(__m256d)(a), \
+ (__v4df)(__m256d)(b), (c)))
/// Compares each of the corresponding values of two 256-bit vectors of
/// [8 x float], using the operation specified by the immediate integer
@@ -1827,8 +1844,8 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
/// 0x1F: True (unordered, signaling)
/// \returns A 256-bit vector of [8 x float] containing the comparison results.
#define _mm256_cmp_ps(a, b, c) \
- (__m256)__builtin_ia32_cmpps256((__v8sf)(__m256)(a), \
- (__v8sf)(__m256)(b), (c))
+ ((__m256)__builtin_ia32_cmpps256((__v8sf)(__m256)(a), \
+ (__v8sf)(__m256)(b), (c)))
/// Compares each of the corresponding scalar double-precision values of
/// two 128-bit vectors of [2 x double], using the operation specified by the
@@ -1886,8 +1903,8 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
/// 0x1F: True (unordered, signaling)
/// \returns A 128-bit vector of [2 x double] containing the comparison results.
#define _mm_cmp_sd(a, b, c) \
- (__m128d)__builtin_ia32_cmpsd((__v2df)(__m128d)(a), \
- (__v2df)(__m128d)(b), (c))
+ ((__m128d)__builtin_ia32_cmpsd((__v2df)(__m128d)(a), \
+ (__v2df)(__m128d)(b), (c)))
/// Compares each of the corresponding scalar values of two 128-bit
/// vectors of [4 x float], using the operation specified by the immediate
@@ -1945,64 +1962,76 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
/// 0x1F: True (unordered, signaling)
/// \returns A 128-bit vector of [4 x float] containing the comparison results.
#define _mm_cmp_ss(a, b, c) \
- (__m128)__builtin_ia32_cmpss((__v4sf)(__m128)(a), \
- (__v4sf)(__m128)(b), (c))
+ ((__m128)__builtin_ia32_cmpss((__v4sf)(__m128)(a), \
+ (__v4sf)(__m128)(b), (c)))
/// Takes a [8 x i32] vector and returns the vector element value
/// indexed by the immediate constant operand.
///
/// \headerfile <x86intrin.h>
///
+/// \code
+/// int _mm256_extract_epi32(__m256i X, const int N);
+/// \endcode
+///
/// This intrinsic corresponds to the <c> VEXTRACTF128+COMPOSITE </c>
/// instruction.
///
-/// \param __a
+/// \param X
/// A 256-bit vector of [8 x i32].
-/// \param __imm
+/// \param N
/// An immediate integer operand with bits [2:0] determining which vector
/// element is extracted and returned.
/// \returns A 32-bit integer containing the extracted 32 bits of extended
/// packed data.
#define _mm256_extract_epi32(X, N) \
- (int)__builtin_ia32_vec_ext_v8si((__v8si)(__m256i)(X), (int)(N))
+ ((int)__builtin_ia32_vec_ext_v8si((__v8si)(__m256i)(X), (int)(N)))
/// Takes a [16 x i16] vector and returns the vector element value
/// indexed by the immediate constant operand.
///
/// \headerfile <x86intrin.h>
///
+/// \code
+/// int _mm256_extract_epi16(__m256i X, const int N);
+/// \endcode
+///
/// This intrinsic corresponds to the <c> VEXTRACTF128+COMPOSITE </c>
/// instruction.
///
-/// \param __a
+/// \param X
/// A 256-bit integer vector of [16 x i16].
-/// \param __imm
+/// \param N
/// An immediate integer operand with bits [3:0] determining which vector
/// element is extracted and returned.
/// \returns A 32-bit integer containing the extracted 16 bits of zero extended
/// packed data.
#define _mm256_extract_epi16(X, N) \
- (int)(unsigned short)__builtin_ia32_vec_ext_v16hi((__v16hi)(__m256i)(X), \
- (int)(N))
+ ((int)(unsigned short)__builtin_ia32_vec_ext_v16hi((__v16hi)(__m256i)(X), \
+ (int)(N)))
/// Takes a [32 x i8] vector and returns the vector element value
/// indexed by the immediate constant operand.
///
/// \headerfile <x86intrin.h>
///
+/// \code
+/// int _mm256_extract_epi8(__m256i X, const int N);
+/// \endcode
+///
/// This intrinsic corresponds to the <c> VEXTRACTF128+COMPOSITE </c>
/// instruction.
///
-/// \param __a
+/// \param X
/// A 256-bit integer vector of [32 x i8].
-/// \param __imm
+/// \param N
/// An immediate integer operand with bits [4:0] determining which vector
/// element is extracted and returned.
/// \returns A 32-bit integer containing the extracted 8 bits of zero extended
/// packed data.
#define _mm256_extract_epi8(X, N) \
- (int)(unsigned char)__builtin_ia32_vec_ext_v32qi((__v32qi)(__m256i)(X), \
- (int)(N))
+ ((int)(unsigned char)__builtin_ia32_vec_ext_v32qi((__v32qi)(__m256i)(X), \
+ (int)(N)))
#ifdef __x86_64__
/// Takes a [4 x i64] vector and returns the vector element value
@@ -2010,18 +2039,22 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
///
/// \headerfile <x86intrin.h>
///
+/// \code
+/// long long _mm256_extract_epi64(__m256i X, const int N);
+/// \endcode
+///
/// This intrinsic corresponds to the <c> VEXTRACTF128+COMPOSITE </c>
/// instruction.
///
-/// \param __a
+/// \param X
/// A 256-bit integer vector of [4 x i64].
-/// \param __imm
+/// \param N
/// An immediate integer operand with bits [1:0] determining which vector
/// element is extracted and returned.
/// \returns A 64-bit integer containing the extracted 64 bits of extended
/// packed data.
#define _mm256_extract_epi64(X, N) \
- (long long)__builtin_ia32_vec_ext_v4di((__v4di)(__m256i)(X), (int)(N))
+ ((long long)__builtin_ia32_vec_ext_v4di((__v4di)(__m256i)(X), (int)(N)))
#endif
/// Takes a [8 x i32] vector and replaces the vector element value
@@ -2030,21 +2063,25 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
///
/// \headerfile <x86intrin.h>
///
+/// \code
+/// __m256i _mm256_insert_epi32(__m256i X, int I, const int N);
+/// \endcode
+///
/// This intrinsic corresponds to the <c> VINSERTF128+COMPOSITE </c>
/// instruction.
///
-/// \param __a
+/// \param X
/// A vector of [8 x i32] to be used by the insert operation.
-/// \param __b
+/// \param I
/// An integer value. The replacement value for the insert operation.
-/// \param __imm
+/// \param N
/// An immediate integer specifying the index of the vector element to be
/// replaced.
-/// \returns A copy of vector \a __a, after replacing its element indexed by
-/// \a __imm with \a __b.
+/// \returns A copy of vector \a X, after replacing its element indexed by
+/// \a N with \a I.
#define _mm256_insert_epi32(X, I, N) \
- (__m256i)__builtin_ia32_vec_set_v8si((__v8si)(__m256i)(X), \
- (int)(I), (int)(N))
+ ((__m256i)__builtin_ia32_vec_set_v8si((__v8si)(__m256i)(X), \
+ (int)(I), (int)(N)))
/// Takes a [16 x i16] vector and replaces the vector element value
@@ -2053,21 +2090,25 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
///
/// \headerfile <x86intrin.h>
///
+/// \code
+/// __m256i _mm256_insert_epi16(__m256i X, int I, const int N);
+/// \endcode
+///
/// This intrinsic corresponds to the <c> VINSERTF128+COMPOSITE </c>
/// instruction.
///
-/// \param __a
+/// \param X
/// A vector of [16 x i16] to be used by the insert operation.
-/// \param __b
+/// \param I
/// An i16 integer value. The replacement value for the insert operation.
-/// \param __imm
+/// \param N
/// An immediate integer specifying the index of the vector element to be
/// replaced.
-/// \returns A copy of vector \a __a, after replacing its element indexed by
-/// \a __imm with \a __b.
+/// \returns A copy of vector \a X, after replacing its element indexed by
+/// \a N with \a I.
#define _mm256_insert_epi16(X, I, N) \
- (__m256i)__builtin_ia32_vec_set_v16hi((__v16hi)(__m256i)(X), \
- (int)(I), (int)(N))
+ ((__m256i)__builtin_ia32_vec_set_v16hi((__v16hi)(__m256i)(X), \
+ (int)(I), (int)(N)))
/// Takes a [32 x i8] vector and replaces the vector element value
/// indexed by the immediate constant operand with a new value. Returns the
@@ -2075,21 +2116,25 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
///
/// \headerfile <x86intrin.h>
///
+/// \code
+/// __m256i _mm256_insert_epi8(__m256i X, int I, const int N);
+/// \endcode
+///
/// This intrinsic corresponds to the <c> VINSERTF128+COMPOSITE </c>
/// instruction.
///
-/// \param __a
+/// \param X
/// A vector of [32 x i8] to be used by the insert operation.
-/// \param __b
+/// \param I
/// An i8 integer value. The replacement value for the insert operation.
-/// \param __imm
+/// \param N
/// An immediate integer specifying the index of the vector element to be
/// replaced.
-/// \returns A copy of vector \a __a, after replacing its element indexed by
-/// \a __imm with \a __b.
+/// \returns A copy of vector \a X, after replacing its element indexed by
+/// \a N with \a I.
#define _mm256_insert_epi8(X, I, N) \
- (__m256i)__builtin_ia32_vec_set_v32qi((__v32qi)(__m256i)(X), \
- (int)(I), (int)(N))
+ ((__m256i)__builtin_ia32_vec_set_v32qi((__v32qi)(__m256i)(X), \
+ (int)(I), (int)(N)))
#ifdef __x86_64__
/// Takes a [4 x i64] vector and replaces the vector element value
@@ -2098,21 +2143,25 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
///
/// \headerfile <x86intrin.h>
///
+/// \code
+/// __m256i _mm256_insert_epi64(__m256i X, int I, const int N);
+/// \endcode
+///
/// This intrinsic corresponds to the <c> VINSERTF128+COMPOSITE </c>
/// instruction.
///
-/// \param __a
+/// \param X
/// A vector of [4 x i64] to be used by the insert operation.
-/// \param __b
+/// \param I
/// A 64-bit integer value. The replacement value for the insert operation.
-/// \param __imm
+/// \param N
/// An immediate integer specifying the index of the vector element to be
/// replaced.
-/// \returns A copy of vector \a __a, after replacing its element indexed by
-/// \a __imm with \a __b.
+/// \returns A copy of vector \a X, after replacing its element indexed by
+/// \a N with \a I.
#define _mm256_insert_epi64(X, I, N) \
- (__m256i)__builtin_ia32_vec_set_v4di((__v4di)(__m256i)(X), \
- (long long)(I), (int)(N))
+ ((__m256i)__builtin_ia32_vec_set_v4di((__v4di)(__m256i)(X), \
+ (long long)(I), (int)(N)))
#endif
/* Conversion */
@@ -2972,8 +3021,11 @@ _mm256_zeroupper(void)
static __inline __m128 __DEFAULT_FN_ATTRS128
_mm_broadcast_ss(float const *__a)
{
- float __f = *__a;
- return __extension__ (__m128)(__v4sf){ __f, __f, __f, __f };
+ struct __mm_broadcast_ss_struct {
+ float __f;
+ } __attribute__((__packed__, __may_alias__));
+ float __f = ((const struct __mm_broadcast_ss_struct*)__a)->__f;
+ return __extension__ (__m128){ __f, __f, __f, __f };
}
/// Loads a scalar double-precision floating point value from the
@@ -2991,7 +3043,10 @@ _mm_broadcast_ss(float const *__a)
static __inline __m256d __DEFAULT_FN_ATTRS
_mm256_broadcast_sd(double const *__a)
{
- double __d = *__a;
+ struct __mm256_broadcast_sd_struct {
+ double __d;
+ } __attribute__((__packed__, __may_alias__));
+ double __d = ((const struct __mm256_broadcast_sd_struct*)__a)->__d;
return __extension__ (__m256d)(__v4df){ __d, __d, __d, __d };
}
@@ -3010,7 +3065,10 @@ _mm256_broadcast_sd(double const *__a)
static __inline __m256 __DEFAULT_FN_ATTRS
_mm256_broadcast_ss(float const *__a)
{
- float __f = *__a;
+ struct __mm256_broadcast_ss_struct {
+ float __f;
+ } __attribute__((__packed__, __may_alias__));
+ float __f = ((const struct __mm256_broadcast_ss_struct*)__a)->__f;
return __extension__ (__m256)(__v8sf){ __f, __f, __f, __f, __f, __f, __f, __f };
}
@@ -3177,7 +3235,7 @@ _mm256_loadu_si256(__m256i_u const *__p)
/// A pointer to a 256-bit integer vector containing integer values.
/// \returns A 256-bit integer vector containing the moved values.
static __inline __m256i __DEFAULT_FN_ATTRS
-_mm256_lddqu_si256(__m256i const *__p)
+_mm256_lddqu_si256(__m256i_u const *__p)
{
return (__m256i)__builtin_ia32_lddqu256((char const *)__p);
}
@@ -3509,7 +3567,7 @@ _mm_maskstore_ps(float *__p, __m128i __m, __m128 __a)
/// \param __b
/// A 256-bit integer vector containing the values to be moved.
static __inline void __DEFAULT_FN_ATTRS
-_mm256_stream_si256(__m256i *__a, __m256i __b)
+_mm256_stream_si256(void *__a, __m256i __b)
{
typedef __v4di __v4di_aligned __attribute__((aligned(32)));
__builtin_nontemporal_store((__v4di_aligned)__b, (__v4di_aligned*)__a);
@@ -3529,7 +3587,7 @@ _mm256_stream_si256(__m256i *__a, __m256i __b)
/// \param __b
/// A 256-bit vector of [4 x double] containing the values to be moved.
static __inline void __DEFAULT_FN_ATTRS
-_mm256_stream_pd(double *__a, __m256d __b)
+_mm256_stream_pd(void *__a, __m256d __b)
{
typedef __v4df __v4df_aligned __attribute__((aligned(32)));
__builtin_nontemporal_store((__v4df_aligned)__b, (__v4df_aligned*)__a);
@@ -3550,7 +3608,7 @@ _mm256_stream_pd(double *__a, __m256d __b)
/// \param __a
/// A 256-bit vector of [8 x float] containing the values to be moved.
static __inline void __DEFAULT_FN_ATTRS
-_mm256_stream_ps(float *__p, __m256 __a)
+_mm256_stream_ps(void *__p, __m256 __a)
{
typedef __v8sf __v8sf_aligned __attribute__((aligned(32)));
__builtin_nontemporal_store((__v8sf_aligned)__a, (__v8sf_aligned*)__p);
@@ -4253,7 +4311,7 @@ _mm256_set1_epi64x(long long __q)
static __inline __m256d __DEFAULT_FN_ATTRS
_mm256_setzero_pd(void)
{
- return __extension__ (__m256d){ 0, 0, 0, 0 };
+ return __extension__ (__m256d){ 0.0, 0.0, 0.0, 0.0 };
}
/// Constructs a 256-bit floating-point vector of [8 x float] with all
@@ -4267,7 +4325,7 @@ _mm256_setzero_pd(void)
static __inline __m256 __DEFAULT_FN_ATTRS
_mm256_setzero_ps(void)
{
- return __extension__ (__m256){ 0, 0, 0, 0, 0, 0, 0, 0 };
+ return __extension__ (__m256){ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f };
}
/// Constructs a 256-bit integer vector initialized to zero.
@@ -4454,7 +4512,8 @@ _mm256_castsi256_si128(__m256i __a)
static __inline __m256d __DEFAULT_FN_ATTRS
_mm256_castpd128_pd256(__m128d __a)
{
- return __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 1, -1, -1);
+ return __builtin_shufflevector(
+ (__v2df)__a, (__v2df)__builtin_nondeterministic_value(__a), 0, 1, 2, 3);
}
/// Constructs a 256-bit floating-point vector of [8 x float] from a
@@ -4475,7 +4534,9 @@ _mm256_castpd128_pd256(__m128d __a)
static __inline __m256 __DEFAULT_FN_ATTRS
_mm256_castps128_ps256(__m128 __a)
{
- return __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 0, 1, 2, 3, -1, -1, -1, -1);
+ return __builtin_shufflevector((__v4sf)__a,
+ (__v4sf)__builtin_nondeterministic_value(__a),
+ 0, 1, 2, 3, 4, 5, 6, 7);
}
/// Constructs a 256-bit integer vector from a 128-bit integer vector.
@@ -4494,7 +4555,8 @@ _mm256_castps128_ps256(__m128 __a)
static __inline __m256i __DEFAULT_FN_ATTRS
_mm256_castsi128_si256(__m128i __a)
{
- return __builtin_shufflevector((__v2di)__a, (__v2di)__a, 0, 1, -1, -1);
+ return __builtin_shufflevector(
+ (__v2di)__a, (__v2di)__builtin_nondeterministic_value(__a), 0, 1, 2, 3);
}
/// Constructs a 256-bit floating-point vector of [4 x double] from a
@@ -4592,8 +4654,8 @@ _mm256_zextsi128_si256(__m128i __a)
/// result.
/// \returns A 256-bit vector of [8 x float] containing the interleaved values.
#define _mm256_insertf128_ps(V1, V2, M) \
- (__m256)__builtin_ia32_vinsertf128_ps256((__v8sf)(__m256)(V1), \
- (__v4sf)(__m128)(V2), (int)(M))
+ ((__m256)__builtin_ia32_vinsertf128_ps256((__v8sf)(__m256)(V1), \
+ (__v4sf)(__m128)(V2), (int)(M)))
/// Constructs a new 256-bit vector of [4 x double] by first duplicating
/// a 256-bit vector of [4 x double] given in the first parameter, and then
@@ -4630,8 +4692,8 @@ _mm256_zextsi128_si256(__m128i __a)
/// result.
/// \returns A 256-bit vector of [4 x double] containing the interleaved values.
#define _mm256_insertf128_pd(V1, V2, M) \
- (__m256d)__builtin_ia32_vinsertf128_pd256((__v4df)(__m256d)(V1), \
- (__v2df)(__m128d)(V2), (int)(M))
+ ((__m256d)__builtin_ia32_vinsertf128_pd256((__v4df)(__m256d)(V1), \
+ (__v2df)(__m128d)(V2), (int)(M)))
/// Constructs a new 256-bit integer vector by first duplicating a
/// 256-bit integer vector given in the first parameter, and then replacing
@@ -4668,8 +4730,8 @@ _mm256_zextsi128_si256(__m128i __a)
/// result.
/// \returns A 256-bit integer vector containing the interleaved values.
#define _mm256_insertf128_si256(V1, V2, M) \
- (__m256i)__builtin_ia32_vinsertf128_si256((__v8si)(__m256i)(V1), \
- (__v4si)(__m128i)(V2), (int)(M))
+ ((__m256i)__builtin_ia32_vinsertf128_si256((__v8si)(__m256i)(V1), \
+ (__v4si)(__m128i)(V2), (int)(M)))
/*
Vector extract.
@@ -4698,7 +4760,7 @@ _mm256_zextsi128_si256(__m128i __a)
/// If bit [0] of \a M is 1, bits [255:128] of \a V are copied to the result.
/// \returns A 128-bit vector of [4 x float] containing the extracted bits.
#define _mm256_extractf128_ps(V, M) \
- (__m128)__builtin_ia32_vextractf128_ps256((__v8sf)(__m256)(V), (int)(M))
+ ((__m128)__builtin_ia32_vextractf128_ps256((__v8sf)(__m256)(V), (int)(M)))
/// Extracts either the upper or the lower 128 bits from a 256-bit vector
/// of [4 x double], as determined by the immediate integer parameter, and
@@ -4722,7 +4784,7 @@ _mm256_zextsi128_si256(__m128i __a)
/// If bit [0] of \a M is 1, bits [255:128] of \a V are copied to the result.
/// \returns A 128-bit vector of [2 x double] containing the extracted bits.
#define _mm256_extractf128_pd(V, M) \
- (__m128d)__builtin_ia32_vextractf128_pd256((__v4df)(__m256d)(V), (int)(M))
+ ((__m128d)__builtin_ia32_vextractf128_pd256((__v4df)(__m256d)(V), (int)(M)))
/// Extracts either the upper or the lower 128 bits from a 256-bit
/// integer vector, as determined by the immediate integer parameter, and
@@ -4746,7 +4808,136 @@ _mm256_zextsi128_si256(__m128i __a)
/// If bit [0] of \a M is 1, bits [255:128] of \a V are copied to the result.
/// \returns A 128-bit integer vector containing the extracted bits.
#define _mm256_extractf128_si256(V, M) \
- (__m128i)__builtin_ia32_vextractf128_si256((__v8si)(__m256i)(V), (int)(M))
+ ((__m128i)__builtin_ia32_vextractf128_si256((__v8si)(__m256i)(V), (int)(M)))
+
+/// Constructs a 256-bit floating-point vector of [8 x float] by
+/// concatenating two 128-bit floating-point vectors of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VINSERTF128 </c> instruction.
+///
+/// \param __hi
+/// A 128-bit floating-point vector of [4 x float] to be copied to the upper
+/// 128 bits of the result.
+/// \param __lo
+/// A 128-bit floating-point vector of [4 x float] to be copied to the lower
+/// 128 bits of the result.
+/// \returns A 256-bit floating-point vector of [8 x float] containing the
+/// concatenated result.
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_set_m128 (__m128 __hi, __m128 __lo)
+{
+ return (__m256) __builtin_shufflevector((__v4sf)__lo, (__v4sf)__hi, 0, 1, 2, 3, 4, 5, 6, 7);
+}
+
+/// Constructs a 256-bit floating-point vector of [4 x double] by
+/// concatenating two 128-bit floating-point vectors of [2 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VINSERTF128 </c> instruction.
+///
+/// \param __hi
+/// A 128-bit floating-point vector of [2 x double] to be copied to the upper
+/// 128 bits of the result.
+/// \param __lo
+/// A 128-bit floating-point vector of [2 x double] to be copied to the lower
+/// 128 bits of the result.
+/// \returns A 256-bit floating-point vector of [4 x double] containing the
+/// concatenated result.
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_set_m128d (__m128d __hi, __m128d __lo)
+{
+ return (__m256d) __builtin_shufflevector((__v2df)__lo, (__v2df)__hi, 0, 1, 2, 3);
+}
+
+/// Constructs a 256-bit integer vector by concatenating two 128-bit
+/// integer vectors.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VINSERTF128 </c> instruction.
+///
+/// \param __hi
+/// A 128-bit integer vector to be copied to the upper 128 bits of the
+/// result.
+/// \param __lo
+/// A 128-bit integer vector to be copied to the lower 128 bits of the
+/// result.
+/// \returns A 256-bit integer vector containing the concatenated result.
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_set_m128i (__m128i __hi, __m128i __lo)
+{
+ return (__m256i) __builtin_shufflevector((__v2di)__lo, (__v2di)__hi, 0, 1, 2, 3);
+}
+
+/// Constructs a 256-bit floating-point vector of [8 x float] by
+/// concatenating two 128-bit floating-point vectors of [4 x float]. This is
+/// similar to _mm256_set_m128, but the order of the input parameters is
+/// swapped.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VINSERTF128 </c> instruction.
+///
+/// \param __lo
+/// A 128-bit floating-point vector of [4 x float] to be copied to the lower
+/// 128 bits of the result.
+/// \param __hi
+/// A 128-bit floating-point vector of [4 x float] to be copied to the upper
+/// 128 bits of the result.
+/// \returns A 256-bit floating-point vector of [8 x float] containing the
+/// concatenated result.
+static __inline __m256 __DEFAULT_FN_ATTRS
+_mm256_setr_m128 (__m128 __lo, __m128 __hi)
+{
+ return _mm256_set_m128(__hi, __lo);
+}
+
+/// Constructs a 256-bit floating-point vector of [4 x double] by
+/// concatenating two 128-bit floating-point vectors of [2 x double]. This is
+/// similar to _mm256_set_m128d, but the order of the input parameters is
+/// swapped.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VINSERTF128 </c> instruction.
+///
+/// \param __lo
+/// A 128-bit floating-point vector of [2 x double] to be copied to the lower
+/// 128 bits of the result.
+/// \param __hi
+/// A 128-bit floating-point vector of [2 x double] to be copied to the upper
+/// 128 bits of the result.
+/// \returns A 256-bit floating-point vector of [4 x double] containing the
+/// concatenated result.
+static __inline __m256d __DEFAULT_FN_ATTRS
+_mm256_setr_m128d (__m128d __lo, __m128d __hi)
+{
+ return (__m256d)_mm256_set_m128d(__hi, __lo);
+}
+
+/// Constructs a 256-bit integer vector by concatenating two 128-bit
+/// integer vectors. This is similar to _mm256_set_m128i, but the order of
+/// the input parameters is swapped.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VINSERTF128 </c> instruction.
+///
+/// \param __lo
+/// A 128-bit integer vector to be copied to the lower 128 bits of the
+/// result.
+/// \param __hi
+/// A 128-bit integer vector to be copied to the upper 128 bits of the
+/// result.
+/// \returns A 256-bit integer vector containing the concatenated result.
+static __inline __m256i __DEFAULT_FN_ATTRS
+_mm256_setr_m128i (__m128i __lo, __m128i __hi)
+{
+ return (__m256i)_mm256_set_m128i(__hi, __lo);
+}
/* SIMD load ops (unaligned) */
/// Loads two 128-bit floating-point vectors of [4 x float] from
@@ -4773,8 +4964,7 @@ _mm256_zextsi128_si256(__m128i __a)
static __inline __m256 __DEFAULT_FN_ATTRS
_mm256_loadu2_m128(float const *__addr_hi, float const *__addr_lo)
{
- __m256 __v256 = _mm256_castps128_ps256(_mm_loadu_ps(__addr_lo));
- return _mm256_insertf128_ps(__v256, _mm_loadu_ps(__addr_hi), 1);
+ return _mm256_set_m128(_mm_loadu_ps(__addr_hi), _mm_loadu_ps(__addr_lo));
}
/// Loads two 128-bit floating-point vectors of [2 x double] from
@@ -4801,8 +4991,7 @@ _mm256_loadu2_m128(float const *__addr_hi, float const *__addr_lo)
static __inline __m256d __DEFAULT_FN_ATTRS
_mm256_loadu2_m128d(double const *__addr_hi, double const *__addr_lo)
{
- __m256d __v256 = _mm256_castpd128_pd256(_mm_loadu_pd(__addr_lo));
- return _mm256_insertf128_pd(__v256, _mm_loadu_pd(__addr_hi), 1);
+ return _mm256_set_m128d(_mm_loadu_pd(__addr_hi), _mm_loadu_pd(__addr_lo));
}
/// Loads two 128-bit integer vectors from unaligned memory locations and
@@ -4826,8 +5015,7 @@ _mm256_loadu2_m128d(double const *__addr_hi, double const *__addr_lo)
static __inline __m256i __DEFAULT_FN_ATTRS
_mm256_loadu2_m128i(__m128i_u const *__addr_hi, __m128i_u const *__addr_lo)
{
- __m256i __v256 = _mm256_castsi128_si256(_mm_loadu_si128(__addr_lo));
- return _mm256_insertf128_si256(__v256, _mm_loadu_si128(__addr_hi), 1);
+ return _mm256_set_m128i(_mm_loadu_si128(__addr_hi), _mm_loadu_si128(__addr_lo));
}
/* SIMD store ops (unaligned) */
@@ -4918,135 +5106,6 @@ _mm256_storeu2_m128i(__m128i_u *__addr_hi, __m128i_u *__addr_lo, __m256i __a)
_mm_storeu_si128(__addr_hi, __v128);
}
-/// Constructs a 256-bit floating-point vector of [8 x float] by
-/// concatenating two 128-bit floating-point vectors of [4 x float].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VINSERTF128 </c> instruction.
-///
-/// \param __hi
-/// A 128-bit floating-point vector of [4 x float] to be copied to the upper
-/// 128 bits of the result.
-/// \param __lo
-/// A 128-bit floating-point vector of [4 x float] to be copied to the lower
-/// 128 bits of the result.
-/// \returns A 256-bit floating-point vector of [8 x float] containing the
-/// concatenated result.
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_set_m128 (__m128 __hi, __m128 __lo)
-{
- return (__m256) __builtin_shufflevector((__v4sf)__lo, (__v4sf)__hi, 0, 1, 2, 3, 4, 5, 6, 7);
-}
-
-/// Constructs a 256-bit floating-point vector of [4 x double] by
-/// concatenating two 128-bit floating-point vectors of [2 x double].
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VINSERTF128 </c> instruction.
-///
-/// \param __hi
-/// A 128-bit floating-point vector of [2 x double] to be copied to the upper
-/// 128 bits of the result.
-/// \param __lo
-/// A 128-bit floating-point vector of [2 x double] to be copied to the lower
-/// 128 bits of the result.
-/// \returns A 256-bit floating-point vector of [4 x double] containing the
-/// concatenated result.
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_set_m128d (__m128d __hi, __m128d __lo)
-{
- return (__m256d) __builtin_shufflevector((__v2df)__lo, (__v2df)__hi, 0, 1, 2, 3);
-}
-
-/// Constructs a 256-bit integer vector by concatenating two 128-bit
-/// integer vectors.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VINSERTF128 </c> instruction.
-///
-/// \param __hi
-/// A 128-bit integer vector to be copied to the upper 128 bits of the
-/// result.
-/// \param __lo
-/// A 128-bit integer vector to be copied to the lower 128 bits of the
-/// result.
-/// \returns A 256-bit integer vector containing the concatenated result.
-static __inline __m256i __DEFAULT_FN_ATTRS
-_mm256_set_m128i (__m128i __hi, __m128i __lo)
-{
- return (__m256i) __builtin_shufflevector((__v2di)__lo, (__v2di)__hi, 0, 1, 2, 3);
-}
-
-/// Constructs a 256-bit floating-point vector of [8 x float] by
-/// concatenating two 128-bit floating-point vectors of [4 x float]. This is
-/// similar to _mm256_set_m128, but the order of the input parameters is
-/// swapped.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VINSERTF128 </c> instruction.
-///
-/// \param __lo
-/// A 128-bit floating-point vector of [4 x float] to be copied to the lower
-/// 128 bits of the result.
-/// \param __hi
-/// A 128-bit floating-point vector of [4 x float] to be copied to the upper
-/// 128 bits of the result.
-/// \returns A 256-bit floating-point vector of [8 x float] containing the
-/// concatenated result.
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_setr_m128 (__m128 __lo, __m128 __hi)
-{
- return _mm256_set_m128(__hi, __lo);
-}
-
-/// Constructs a 256-bit floating-point vector of [4 x double] by
-/// concatenating two 128-bit floating-point vectors of [2 x double]. This is
-/// similar to _mm256_set_m128d, but the order of the input parameters is
-/// swapped.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VINSERTF128 </c> instruction.
-///
-/// \param __lo
-/// A 128-bit floating-point vector of [2 x double] to be copied to the lower
-/// 128 bits of the result.
-/// \param __hi
-/// A 128-bit floating-point vector of [2 x double] to be copied to the upper
-/// 128 bits of the result.
-/// \returns A 256-bit floating-point vector of [4 x double] containing the
-/// concatenated result.
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_setr_m128d (__m128d __lo, __m128d __hi)
-{
- return (__m256d)_mm256_set_m128d(__hi, __lo);
-}
-
-/// Constructs a 256-bit integer vector by concatenating two 128-bit
-/// integer vectors. This is similar to _mm256_set_m128i, but the order of
-/// the input parameters is swapped.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> VINSERTF128 </c> instruction.
-///
-/// \param __lo
-/// A 128-bit integer vector to be copied to the lower 128 bits of the
-/// result.
-/// \param __hi
-/// A 128-bit integer vector to be copied to the upper 128 bits of the
-/// result.
-/// \returns A 256-bit integer vector containing the concatenated result.
-static __inline __m256i __DEFAULT_FN_ATTRS
-_mm256_setr_m128i (__m128i __lo, __m128i __hi)
-{
- return (__m256i)_mm256_set_m128i(__hi, __lo);
-}
-
#undef __DEFAULT_FN_ATTRS
#undef __DEFAULT_FN_ATTRS128
diff --git a/contrib/llvm-project/clang/lib/Headers/avxneconvertintrin.h b/contrib/llvm-project/clang/lib/Headers/avxneconvertintrin.h
new file mode 100644
index 000000000000..1bef1c893787
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/avxneconvertintrin.h
@@ -0,0 +1,484 @@
+/*===-------------- avxneconvertintrin.h - AVXNECONVERT --------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error \
+ "Never use <avxneconvertintrin.h> directly; include <immintrin.h> instead."
+#endif // __IMMINTRIN_H
+
+#ifdef __SSE2__
+
+#ifndef __AVXNECONVERTINTRIN_H
+#define __AVXNECONVERTINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS128 \
+ __attribute__((__always_inline__, __nodebug__, __target__("avxneconvert"), \
+ __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS256 \
+ __attribute__((__always_inline__, __nodebug__, __target__("avxneconvert"), \
+ __min_vector_width__(256)))
+
+/// Convert scalar BF16 (16-bit) floating-point element
+/// stored at memory locations starting at location \a __A to a
+/// single-precision (32-bit) floating-point, broadcast it to packed
+/// single-precision (32-bit) floating-point elements, and store the results in
+/// \a dst.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// _mm_bcstnebf16_ps(const void *__A);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VBCSTNEBF162PS instruction.
+///
+/// \param __A
+/// A pointer to a 16-bit memory location. The address of the memory
+/// location does not have to be aligned.
+/// \returns
+/// A 128-bit vector of [4 x float].
+///
+/// \code{.operation}
+/// b := Convert_BF16_To_FP32(MEM[__A+15:__A])
+/// FOR j := 0 to 3
+/// m := j*32
+/// dst[m+31:m] := b
+/// ENDFOR
+/// dst[MAX:128] := 0
+/// \endcode
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
+_mm_bcstnebf16_ps(const void *__A) {
+ return (__m128)__builtin_ia32_vbcstnebf162ps128((const __bf16 *)__A);
+}
+
+/// Convert scalar BF16 (16-bit) floating-point element
+/// stored at memory locations starting at location \a __A to a
+/// single-precision (32-bit) floating-point, broadcast it to packed
+/// single-precision (32-bit) floating-point elements, and store the results in
+/// \a dst.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// _mm256_bcstnebf16_ps(const void *__A);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VBCSTNEBF162PS instruction.
+///
+/// \param __A
+/// A pointer to a 16-bit memory location. The address of the memory
+/// location does not have to be aligned.
+/// \returns
+/// A 256-bit vector of [8 x float].
+///
+/// \code{.operation}
+/// b := Convert_BF16_To_FP32(MEM[__A+15:__A])
+/// FOR j := 0 to 7
+/// m := j*32
+/// dst[m+31:m] := b
+/// ENDFOR
+/// dst[MAX:256] := 0
+/// \endcode
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
+_mm256_bcstnebf16_ps(const void *__A) {
+ return (__m256)__builtin_ia32_vbcstnebf162ps256((const __bf16 *)__A);
+}
+
+/// Convert scalar half-precision (16-bit) floating-point element
+/// stored at memory locations starting at location \a __A to a
+/// single-precision (32-bit) floating-point, broadcast it to packed
+/// single-precision (32-bit) floating-point elements, and store the results in
+/// \a dst.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// _mm_bcstnesh_ps(const void *__A);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VBCSTNESH2PS instruction.
+///
+/// \param __A
+/// A pointer to a 16-bit memory location. The address of the memory
+/// location does not have to be aligned.
+/// \returns
+/// A 128-bit vector of [4 x float].
+///
+/// \code{.operation}
+/// b := Convert_FP16_To_FP32(MEM[__A+15:__A])
+/// FOR j := 0 to 3
+/// m := j*32
+/// dst[m+31:m] := b
+/// ENDFOR
+/// dst[MAX:128] := 0
+/// \endcode
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
+_mm_bcstnesh_ps(const void *__A) {
+ return (__m128)__builtin_ia32_vbcstnesh2ps128((const _Float16 *)__A);
+}
+
+/// Convert scalar half-precision (16-bit) floating-point element
+/// stored at memory locations starting at location \a __A to a
+/// single-precision (32-bit) floating-point, broadcast it to packed
+/// single-precision (32-bit) floating-point elements, and store the results in
+/// \a dst.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// _mm256_bcstnesh_ps(const void *__A);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VBCSTNESH2PS instruction.
+///
+/// \param __A
+/// A pointer to a 16-bit memory location. The address of the memory
+/// location does not have to be aligned.
+/// \returns
+/// A 256-bit vector of [8 x float].
+///
+/// \code{.operation}
+/// b := Convert_FP16_To_FP32(MEM[__A+15:__A])
+/// FOR j := 0 to 7
+/// m := j*32
+/// dst[m+31:m] := b
+/// ENDFOR
+/// dst[MAX:256] := 0
+/// \endcode
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
+_mm256_bcstnesh_ps(const void *__A) {
+ return (__m256)__builtin_ia32_vbcstnesh2ps256((const _Float16 *)__A);
+}
+
+/// Convert packed BF16 (16-bit) floating-point even-indexed elements
+/// stored at memory locations starting at location \a __A to packed
+/// single-precision (32-bit) floating-point elements, and store the results in
+/// \a dst.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// _mm_cvtneebf16_ps(const __m128bh *__A);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VCVTNEEBF162PS instruction.
+///
+/// \param __A
+/// A pointer to a 128-bit memory location containing 8 consecutive
+/// BF16 (16-bit) floating-point values.
+/// \returns
+/// A 128-bit vector of [4 x float].
+///
+/// \code{.operation}
+/// FOR j := 0 to 3
+/// k := j*2
+/// i := k*16
+/// m := j*32
+/// dst[m+31:m] := Convert_BF16_To_FP32(MEM[__A+i+15:__A+i])
+/// ENDFOR
+/// dst[MAX:128] := 0
+/// \endcode
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
+_mm_cvtneebf16_ps(const __m128bh *__A) {
+ return (__m128)__builtin_ia32_vcvtneebf162ps128((const __v8bf *)__A);
+}
+
+/// Convert packed BF16 (16-bit) floating-point even-indexed elements
+/// stored at memory locations starting at location \a __A to packed
+/// single-precision (32-bit) floating-point elements, and store the results in
+/// \a dst.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// _mm256_cvtneebf16_ps(const __m256bh *__A);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VCVTNEEBF162PS instruction.
+///
+/// \param __A
+/// A pointer to a 256-bit memory location containing 16 consecutive
+/// BF16 (16-bit) floating-point values.
+/// \returns
+/// A 256-bit vector of [8 x float].
+///
+/// \code{.operation}
+/// FOR j := 0 to 7
+/// k := j*2
+/// i := k*16
+/// m := j*32
+/// dst[m+31:m] := Convert_BF16_To_FP32(MEM[__A+i+15:__A+i])
+/// ENDFOR
+/// dst[MAX:256] := 0
+/// \endcode
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
+_mm256_cvtneebf16_ps(const __m256bh *__A) {
+ return (__m256)__builtin_ia32_vcvtneebf162ps256((const __v16bf *)__A);
+}
+
+/// Convert packed half-precision (16-bit) floating-point even-indexed elements
+/// stored at memory locations starting at location \a __A to packed
+/// single-precision (32-bit) floating-point elements, and store the results in
+/// \a dst.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// _mm_cvtneeph_ps(const __m128h *__A);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VCVTNEEPH2PS instruction.
+///
+/// \param __A
+/// A pointer to a 128-bit memory location containing 8 consecutive
+/// half-precision (16-bit) floating-point values.
+/// \returns
+/// A 128-bit vector of [4 x float].
+///
+/// \code{.operation}
+/// FOR j := 0 to 3
+/// k := j*2
+/// i := k*16
+/// m := j*32
+/// dst[m+31:m] := Convert_FP16_To_FP32(MEM[__A+i+15:__A+i])
+/// ENDFOR
+/// dst[MAX:128] := 0
+/// \endcode
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
+_mm_cvtneeph_ps(const __m128h *__A) {
+ return (__m128)__builtin_ia32_vcvtneeph2ps128((const __v8hf *)__A);
+}
+
+/// Convert packed half-precision (16-bit) floating-point even-indexed elements
+/// stored at memory locations starting at location \a __A to packed
+/// single-precision (32-bit) floating-point elements, and store the results in
+/// \a dst.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// _mm256_cvtneeph_ps(const __m256h *__A);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VCVTNEEPH2PS instruction.
+///
+/// \param __A
+/// A pointer to a 256-bit memory location containing 16 consecutive
+/// half-precision (16-bit) floating-point values.
+/// \returns
+/// A 256-bit vector of [8 x float].
+///
+/// \code{.operation}
+/// FOR j := 0 to 7
+/// k := j*2
+/// i := k*16
+/// m := j*32
+/// dst[m+31:m] := Convert_FP16_To_FP32(MEM[__A+i+15:__A+i])
+/// ENDFOR
+/// dst[MAX:256] := 0
+/// \endcode
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
+_mm256_cvtneeph_ps(const __m256h *__A) {
+ return (__m256)__builtin_ia32_vcvtneeph2ps256((const __v16hf *)__A);
+}
+
+/// Convert packed BF16 (16-bit) floating-point odd-indexed elements
+/// stored at memory locations starting at location \a __A to packed
+/// single-precision (32-bit) floating-point elements, and store the results in
+/// \a dst.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// _mm_cvtneobf16_ps(const __m128bh *__A);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VCVTNEOBF162PS instruction.
+///
+/// \param __A
+/// A pointer to a 128-bit memory location containing 8 consecutive
+/// BF16 (16-bit) floating-point values.
+/// \returns
+/// A 128-bit vector of [4 x float].
+///
+/// \code{.operation}
+/// FOR j := 0 to 3
+/// k := j*2+1
+/// i := k*16
+/// m := j*32
+/// dst[m+31:m] := Convert_BF16_To_FP32(MEM[__A+i+15:__A+i])
+/// ENDFOR
+/// dst[MAX:128] := 0
+/// \endcode
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
+_mm_cvtneobf16_ps(const __m128bh *__A) {
+ return (__m128)__builtin_ia32_vcvtneobf162ps128((const __v8bf *)__A);
+}
+
+/// Convert packed BF16 (16-bit) floating-point odd-indexed elements
+/// stored at memory locations starting at location \a __A to packed
+/// single-precision (32-bit) floating-point elements, and store the results in
+/// \a dst.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// _mm256_cvtneobf16_ps(const __m256bh *__A);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VCVTNEOBF162PS instruction.
+///
+/// \param __A
+/// A pointer to a 256-bit memory location containing 16 consecutive
+/// BF16 (16-bit) floating-point values.
+/// \returns
+/// A 256-bit vector of [8 x float].
+///
+/// \code{.operation}
+/// FOR j := 0 to 7
+/// k := j*2+1
+/// i := k*16
+/// m := j*32
+/// dst[m+31:m] := Convert_BF16_To_FP32(MEM[__A+i+15:__A+i])
+/// ENDFOR
+/// dst[MAX:256] := 0
+/// \endcode
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
+_mm256_cvtneobf16_ps(const __m256bh *__A) {
+ return (__m256)__builtin_ia32_vcvtneobf162ps256((const __v16bf *)__A);
+}
+
+/// Convert packed half-precision (16-bit) floating-point odd-indexed elements
+/// stored at memory locations starting at location \a __A to packed
+/// single-precision (32-bit) floating-point elements, and store the results in
+/// \a dst.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// _mm_cvtneoph_ps(const __m128h *__A);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VCVTNEOPH2PS instruction.
+///
+/// \param __A
+/// A pointer to a 128-bit memory location containing 8 consecutive
+/// half-precision (16-bit) floating-point values.
+/// \returns
+/// A 128-bit vector of [4 x float].
+///
+/// \code{.operation}
+/// FOR j := 0 to 3
+/// k := j*2+1
+/// i := k*16
+/// m := j*32
+/// dst[m+31:m] := Convert_FP16_To_FP32(MEM[__A+i+15:__A+i])
+/// ENDFOR
+/// dst[MAX:128] := 0
+/// \endcode
+static __inline__ __m128 __DEFAULT_FN_ATTRS128
+_mm_cvtneoph_ps(const __m128h *__A) {
+ return (__m128)__builtin_ia32_vcvtneoph2ps128((const __v8hf *)__A);
+}
+
+/// Convert packed half-precision (16-bit) floating-point odd-indexed elements
+/// stored at memory locations starting at location \a __A to packed
+/// single-precision (32-bit) floating-point elements, and store the results in
+/// \a dst.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// _mm256_cvtneoph_ps(const __m256h *__A);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VCVTNEOPH2PS instruction.
+///
+/// \param __A
+/// A pointer to a 256-bit memory location containing 16 consecutive
+/// half-precision (16-bit) floating-point values.
+/// \returns
+/// A 256-bit vector of [8 x float].
+///
+/// \code{.operation}
+/// FOR j := 0 to 7
+/// k := j*2+1
+/// i := k*16
+/// m := j*32
+/// dst[m+31:m] := Convert_FP16_To_FP32(MEM[__A+i+15:__A+i])
+/// ENDFOR
+/// dst[MAX:256] := 0
+/// \endcode
+static __inline__ __m256 __DEFAULT_FN_ATTRS256
+_mm256_cvtneoph_ps(const __m256h *__A) {
+ return (__m256)__builtin_ia32_vcvtneoph2ps256((const __v16hf *)__A);
+}
+
+/// Convert packed single-precision (32-bit) floating-point elements in \a __A
+/// to packed BF16 (16-bit) floating-point elements, and store the results in \a
+/// dst.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// _mm_cvtneps_avx_pbh(__m128 __A);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VCVTNEPS2BF16 instruction.
+///
+/// \param __A
+/// A 128-bit vector of [4 x float].
+/// \returns
+/// A 128-bit vector of [8 x bfloat].
+///
+/// \code{.operation}
+/// FOR j := 0 to 3
+/// dst.word[j] := Convert_FP32_To_BF16(__A.fp32[j])
+/// ENDFOR
+/// dst[MAX:128] := 0
+/// \endcode
+static __inline__ __m128bh __DEFAULT_FN_ATTRS128
+_mm_cvtneps_avx_pbh(__m128 __A) {
+ return (__m128bh)__builtin_ia32_vcvtneps2bf16128((__v4sf)__A);
+}
+
+/// Convert packed single-precision (32-bit) floating-point elements in \a __A
+/// to packed BF16 (16-bit) floating-point elements, and store the results in \a
+/// dst.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// _mm256_cvtneps_avx_pbh(__m256 __A);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VCVTNEPS2BF16 instruction.
+///
+/// \param __A
+/// A 256-bit vector of [8 x float].
+/// \returns
+/// A 128-bit vector of [8 x bfloat].
+///
+/// \code{.operation}
+/// FOR j := 0 to 7
+/// dst.word[j] := Convert_FP32_To_BF16(a.fp32[j])
+/// ENDFOR
+/// dst[MAX:128] := 0
+/// \endcode
+static __inline__ __m128bh __DEFAULT_FN_ATTRS256
+_mm256_cvtneps_avx_pbh(__m256 __A) {
+ return (__m128bh)__builtin_ia32_vcvtneps2bf16256((__v8sf)__A);
+}
+
+#undef __DEFAULT_FN_ATTRS128
+#undef __DEFAULT_FN_ATTRS256
+
+#endif // __AVXNECONVERTINTRIN_H
+#endif // __SSE2__
diff --git a/contrib/llvm-project/clang/lib/Headers/avxvnniint16intrin.h b/contrib/llvm-project/clang/lib/Headers/avxvnniint16intrin.h
new file mode 100644
index 000000000000..e4d342a8b45b
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/avxvnniint16intrin.h
@@ -0,0 +1,473 @@
+/*===----------- avxvnniint16intrin.h - AVXVNNIINT16 intrinsics-------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error \
+ "Never use <avxvnniint16intrin.h> directly; include <immintrin.h> instead."
+#endif // __IMMINTRIN_H
+
+#ifndef __AVXVNNIINT16INTRIN_H
+#define __AVXVNNIINT16INTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS128 \
+ __attribute__((__always_inline__, __nodebug__, __target__("avxvnniint16"), \
+ __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS256 \
+ __attribute__((__always_inline__, __nodebug__, __target__("avxvnniint16"), \
+ __min_vector_width__(256)))
+
+/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a __A with
+/// corresponding unsigned 16-bit integers in \a __B, producing 2 intermediate
+/// signed 16-bit results. Sum these 2 results with the corresponding
+/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128i _mm_dpwsud_epi32(__m128i __W, __m128i __A, __m128i __B)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPDPWSUD instruction.
+///
+/// \param __W
+/// A 128-bit vector of [4 x int].
+/// \param __A
+/// A 128-bit vector of [8 x short].
+/// \param __B
+/// A 128-bit vector of [8 x unsigned short].
+/// \returns
+/// A 128-bit vector of [4 x int].
+///
+/// \code{.operation}
+/// FOR j := 0 to 3
+/// tmp1.dword := SignExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j])
+/// tmp2.dword := SignExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1])
+/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2
+/// ENDFOR
+/// dst[MAX:128] := 0
+/// \endcode
+static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpwsud_epi32(__m128i __W,
+ __m128i __A,
+ __m128i __B) {
+ return (__m128i)__builtin_ia32_vpdpwsud128((__v4si)__W, (__v4si)__A,
+ (__v4si)__B);
+}
+
+/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a __A with
+/// corresponding unsigned 16-bit integers in \a __B, producing 2 intermediate
+/// signed 16-bit results. Sum these 2 results with the corresponding
+/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_dpwsud_epi32(__m256i __W, __m256i __A, __m256i __B)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPDPWSUD instruction.
+///
+/// \param __W
+/// A 256-bit vector of [8 x int].
+/// \param __A
+/// A 256-bit vector of [16 x short].
+/// \param __B
+/// A 256-bit vector of [16 x unsigned short].
+/// \returns
+/// A 256-bit vector of [8 x int].
+///
+/// \code{.operation}
+/// FOR j := 0 to 7
+/// tmp1.dword := SignExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j])
+/// tmp2.dword := SignExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1])
+/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2
+/// ENDFOR
+/// dst[MAX:256] := 0
+/// \endcode
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_dpwsud_epi32(__m256i __W, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_vpdpwsud256((__v8si)__W, (__v8si)__A,
+ (__v8si)__B);
+}
+
+/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a __A with
+/// corresponding unsigned 16-bit integers in \a __B, producing 2 intermediate
+/// signed 16-bit results. Sum these 2 results with the corresponding
+/// 32-bit integer in \a __W with signed saturation, and store the packed
+/// 32-bit results in \a dst.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128i _mm_dpwsuds_epi32(__m128i __W, __m128i __A, __m128i __B)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPDPWSUDS instruction.
+///
+/// \param __W
+/// A 128-bit vector of [4 x int].
+/// \param __A
+/// A 128-bit vector of [8 x short].
+/// \param __B
+/// A 128-bit vector of [8 x unsigned short].
+/// \returns
+/// A 128-bit vector of [4 x int].
+///
+/// \code{.operation}
+/// FOR j := 0 to 3
+/// tmp1.dword := SignExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j])
+/// tmp2.dword := SignExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1])
+/// dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2)
+/// ENDFOR
+/// dst[MAX:128] := 0
+/// \endcode
+static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpwsuds_epi32(__m128i __W,
+ __m128i __A,
+ __m128i __B) {
+ return (__m128i)__builtin_ia32_vpdpwsuds128((__v4si)__W, (__v4si)__A,
+ (__v4si)__B);
+}
+
+/// Multiply groups of 2 adjacent pairs of signed 16-bit integers in \a __A with
+/// corresponding unsigned 16-bit integers in \a __B, producing 2 intermediate
+/// signed 16-bit results. Sum these 2 results with the corresponding
+/// 32-bit integer in \a __W with signed saturation, and store the packed
+/// 32-bit results in \a dst.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_dpwsuds_epi32(__m256i __W, __m256i __A, __m256i __B)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPDPWSUDS instruction.
+///
+/// \param __W
+/// A 256-bit vector of [8 x int].
+/// \param __A
+/// A 256-bit vector of [16 x short].
+/// \param __B
+/// A 256-bit vector of [16 x unsigned short].
+/// \returns
+/// A 256-bit vector of [8 x int].
+///
+/// \code{.operation}
+/// FOR j := 0 to 7
+/// tmp1.dword := SignExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j])
+/// tmp2.dword := SignExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1])
+/// dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2)
+/// ENDFOR
+/// dst[MAX:256] := 0
+/// \endcode
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_dpwsuds_epi32(__m256i __W, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_vpdpwsuds256((__v8si)__W, (__v8si)__A,
+ (__v8si)__B);
+}
+
+/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \a __A with
+/// corresponding signed 16-bit integers in \a __B, producing 2 intermediate
+/// signed 16-bit results. Sum these 2 results with the corresponding
+/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128i _mm_dpbusd_epi32(__m128i __W, __m128i __A, __m128i __B)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPDPWUSD instruction.
+///
+/// \param __W
+/// A 128-bit vector of [4 x int].
+/// \param __A
+/// A 128-bit vector of [8 x unsigned short].
+/// \param __B
+/// A 128-bit vector of [8 x short].
+/// \returns
+/// A 128-bit vector of [4 x int].
+///
+/// \code{.operation}
+/// FOR j := 0 to 3
+/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j])
+/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1])
+/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2
+/// ENDFOR
+/// dst[MAX:128] := 0
+/// \endcode
+static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpwusd_epi32(__m128i __W,
+ __m128i __A,
+ __m128i __B) {
+ return (__m128i)__builtin_ia32_vpdpwusd128((__v4si)__W, (__v4si)__A,
+ (__v4si)__B);
+}
+
+/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \a __A with
+/// corresponding signed 16-bit integers in \a __B, producing 2 intermediate
+/// signed 16-bit results. Sum these 2 results with the corresponding
+/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_dpwusd_epi32(__m256i __W, __m256i __A, __m256i __B)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPDPWUSD instruction.
+///
+/// \param __W
+/// A 256-bit vector of [8 x int].
+/// \param __A
+/// A 256-bit vector of [16 x unsigned short].
+/// \param __B
+/// A 256-bit vector of [16 x short].
+/// \returns
+/// A 256-bit vector of [8 x int].
+///
+/// \code{.operation}
+/// FOR j := 0 to 7
+/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j])
+/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1])
+/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2
+/// ENDFOR
+/// dst[MAX:256] := 0
+/// \endcode
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_dpwusd_epi32(__m256i __W, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_vpdpwusd256((__v8si)__W, (__v8si)__A,
+ (__v8si)__B);
+}
+
+/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \a __A with
+/// corresponding signed 16-bit integers in \a __B, producing 2 intermediate
+/// signed 16-bit results. Sum these 2 results with the corresponding
+/// 32-bit integer in \a __W with signed saturation, and store the packed
+/// 32-bit results in \a dst.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128i _mm_dpwusds_epi32(__m128i __W, __m128i __A, __m128i __B)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPDPWSUDS instruction.
+///
+/// \param __W
+/// A 128-bit vector of [4 x int].
+/// \param __A
+/// A 128-bit vector of [8 x unsigned short].
+/// \param __B
+/// A 128-bit vector of [8 x short].
+/// \returns
+/// A 128-bit vector of [4 x int].
+///
+/// \code{.operation}
+/// FOR j := 0 to 3
+/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j])
+/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1])
+/// dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2)
+/// ENDFOR
+/// dst[MAX:128] := 0
+/// \endcode
+static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpwusds_epi32(__m128i __W,
+ __m128i __A,
+ __m128i __B) {
+ return (__m128i)__builtin_ia32_vpdpwusds128((__v4si)__W, (__v4si)__A,
+ (__v4si)__B);
+}
+
+/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \a __A with
+/// corresponding signed 16-bit integers in \a __B, producing 2 intermediate
+/// signed 16-bit results. Sum these 2 results with the corresponding
+/// 32-bit integer in \a __W with signed saturation, and store the packed
+/// 32-bit results in \a dst.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_dpwsuds_epi32(__m256i __W, __m256i __A, __m256i __B)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPDPWSUDS instruction.
+///
+/// \param __W
+/// A 256-bit vector of [8 x int].
+/// \param __A
+/// A 256-bit vector of [16 x unsigned short].
+/// \param __B
+/// A 256-bit vector of [16 x short].
+/// \returns
+/// A 256-bit vector of [8 x int].
+///
+/// \code{.operation}
+/// FOR j := 0 to 7
+/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j])
+/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1])
+/// dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2)
+/// ENDFOR
+/// dst[MAX:256] := 0
+/// \endcode
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_dpwusds_epi32(__m256i __W, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_vpdpwusds256((__v8si)__W, (__v8si)__A,
+ (__v8si)__B);
+}
+
+/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \a __A with
+/// corresponding unsigned 16-bit integers in \a __B, producing 2 intermediate
+/// signed 16-bit results. Sum these 2 results with the corresponding
+/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128i _mm_dpwuud_epi32(__m128i __W, __m128i __A, __m128i __B)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPDPWUUD instruction.
+///
+/// \param __W
+/// A 128-bit vector of [4 x unsigned int].
+/// \param __A
+/// A 128-bit vector of [8 x unsigned short].
+/// \param __B
+/// A 128-bit vector of [8 x unsigned short].
+/// \returns
+/// A 128-bit vector of [4 x unsigned int].
+///
+/// \code{.operation}
+/// FOR j := 0 to 3
+/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j])
+/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1])
+/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2
+/// ENDFOR
+/// dst[MAX:128] := 0
+/// \endcode
+static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpwuud_epi32(__m128i __W,
+ __m128i __A,
+ __m128i __B) {
+ return (__m128i)__builtin_ia32_vpdpwuud128((__v4si)__W, (__v4si)__A,
+ (__v4si)__B);
+}
+
+/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \a __A with
+/// corresponding unsigned 16-bit integers in \a __B, producing 2 intermediate
+/// signed 16-bit results. Sum these 2 results with the corresponding
+/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_dpwuud_epi32(__m256i __W, __m256i __A, __m256i __B)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPDPWUUD instruction.
+///
+/// \param __W
+/// A 256-bit vector of [8 x unsigned int].
+/// \param __A
+/// A 256-bit vector of [16 x unsigned short].
+/// \param __B
+/// A 256-bit vector of [16 x unsigned short].
+/// \returns
+/// A 256-bit vector of [8 x unsigned int].
+///
+/// \code{.operation}
+/// FOR j := 0 to 7
+/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j])
+/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1])
+/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2
+/// ENDFOR
+/// dst[MAX:256] := 0
+/// \endcode
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_dpwuud_epi32(__m256i __W, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_vpdpwuud256((__v8si)__W, (__v8si)__A,
+ (__v8si)__B);
+}
+
+/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \a __A with
+/// corresponding unsigned 16-bit integers in \a __B, producing 2 intermediate
+/// signed 16-bit results. Sum these 2 results with the corresponding
+/// 32-bit integer in \a __W with signed saturation, and store the packed
+/// 32-bit results in \a dst.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128i _mm_dpwsuds_epi32(__m128i __W, __m128i __A, __m128i __B)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPDPWSUDS instruction.
+///
+/// \param __W
+/// A 128-bit vector of [4 x unsigned int].
+/// \param __A
+/// A 128-bit vector of [8 x unsigned short].
+/// \param __B
+/// A 128-bit vector of [8 x unsigned short].
+/// \returns
+/// A 128-bit vector of [4 x unsigned int].
+///
+/// \code{.operation}
+/// FOR j := 0 to 3
+/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j])
+/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1])
+/// dst.dword[j] := UNSIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2)
+/// ENDFOR
+/// dst[MAX:128] := 0
+/// \endcode
+static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpwuuds_epi32(__m128i __W,
+ __m128i __A,
+ __m128i __B) {
+ return (__m128i)__builtin_ia32_vpdpwuuds128((__v4si)__W, (__v4si)__A,
+ (__v4si)__B);
+}
+
+/// Multiply groups of 2 adjacent pairs of unsigned 16-bit integers in \a __A with
+/// corresponding unsigned 16-bit integers in \a __B, producing 2 intermediate
+/// signed 16-bit results. Sum these 2 results with the corresponding
+/// 32-bit integer in \a __W with signed saturation, and store the packed
+/// 32-bit results in \a dst.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_dpwuuds_epi32(__m256i __W, __m256i __A, __m256i __B)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPDPWSUDS instruction.
+///
+/// \param __W
+/// A 256-bit vector of [8 x unsigned int].
+/// \param __A
+/// A 256-bit vector of [16 x unsigned short].
+/// \param __B
+/// A 256-bit vector of [16 x unsigned short].
+/// \returns
+/// A 256-bit vector of [8 x unsigned int].
+///
+/// \code{.operation}
+/// FOR j := 0 to 7
+/// tmp1.dword := ZeroExtend32(__A.word[2*j]) * ZeroExtend32(__B.word[2*j])
+/// tmp2.dword := ZeroExtend32(__A.word[2*j+1]) * ZeroExtend32(__B.word[2*j+1])
+/// dst.dword[j] := UNSIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2)
+/// ENDFOR
+/// dst[MAX:256] := 0
+/// \endcode
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_dpwuuds_epi32(__m256i __W, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_vpdpwuuds256((__v8si)__W, (__v8si)__A,
+ (__v8si)__B);
+}
+
+#undef __DEFAULT_FN_ATTRS128
+#undef __DEFAULT_FN_ATTRS256
+
+#endif // __AVXVNNIINT16INTRIN_H
diff --git a/contrib/llvm-project/clang/lib/Headers/avxvnniint8intrin.h b/contrib/llvm-project/clang/lib/Headers/avxvnniint8intrin.h
new file mode 100644
index 000000000000..b0b6cb853f71
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/avxvnniint8intrin.h
@@ -0,0 +1,471 @@
+/*===-------- avxvnniint8intrin.h - AVXVNNIINT8 intrinsics -----------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __IMMINTRIN_H
+#error \
+ "Never use <avxvnniint8intrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef __AVXVNNIINT8INTRIN_H
+#define __AVXVNNIINT8INTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS256 \
+ __attribute__((__always_inline__, __nodebug__, __target__("avxvnniint8"), \
+ __min_vector_width__(256)))
+#define __DEFAULT_FN_ATTRS128 \
+ __attribute__((__always_inline__, __nodebug__, __target__("avxvnniint8"), \
+ __min_vector_width__(128)))
+
+/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in \a __A with
+/// corresponding signed 8-bit integers in \a __B, producing 4 intermediate
+/// signed 16-bit results. Sum these 4 results with the corresponding
+/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// _mm_dpbssd_epi32(__m128i __W, __m128i __A, __m128i __B);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPDPBSSD instruction.
+///
+/// \param __A
+/// A 128-bit vector of [16 x char].
+/// \param __B
+/// A 128-bit vector of [16 x char].
+/// \returns
+/// A 128-bit vector of [4 x int].
+///
+/// \code{.operation}
+/// FOR j := 0 to 3
+/// tmp1.word := SignExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j])
+/// tmp2.word := SignExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1])
+/// tmp3.word := SignExtend16(__A.byte[4*j+2]) * SignExtend16(__B.byte[4*j+2])
+/// tmp4.word := SignExtend16(__A.byte[4*j+3]) * SignExtend16(__B.byte[4*j+3])
+/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4
+/// ENDFOR
+/// dst[MAX:128] := 0
+/// \endcode
+static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpbssd_epi32(__m128i __W,
+ __m128i __A,
+ __m128i __B) {
+ return (__m128i)__builtin_ia32_vpdpbssd128((__v4si)__W, (__v4si)__A,
+ (__v4si)__B);
+}
+
+/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in \a __A with
+/// corresponding signed 8-bit integers in \a __B, producing 4 intermediate
+/// signed 16-bit results. Sum these 4 results with the corresponding
+/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// _mm256_dpbssd_epi32(__m256i __W, __m256i __A, __m256i __B);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPDPBSSD instruction.
+///
+/// \param __A
+/// A 256-bit vector of [32 x char].
+/// \param __B
+/// A 256-bit vector of [32 x char].
+/// \returns
+/// A 256-bit vector of [8 x int].
+///
+/// \code{.operation}
+/// FOR j := 0 to 7
+/// tmp1.word := SignExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j])
+/// tmp2.word := SignExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1])
+/// tmp3.word := SignExtend16(__A.byte[4*j+2]) * SignExtend16(__B.byte[4*j+2])
+/// tmp4.word := SignExtend16(__A.byte[4*j+3]) * SignExtend16(__B.byte[4*j+3])
+/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4
+/// ENDFOR
+/// dst[MAX:256] := 0
+/// \endcode
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_dpbssd_epi32(__m256i __W, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_vpdpbssd256((__v8si)__W, (__v8si)__A,
+ (__v8si)__B);
+}
+
+/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in \a __A with
+/// corresponding signed 8-bit integers in \a __B, producing 4 intermediate
+/// signed 16-bit results. Sum these 4 results with the corresponding
+/// 32-bit integer in \a __W with signed saturation, and store the packed
+/// 32-bit results in \a dst.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// _mm_dpbssds_epi32( __m128i __W, __m128i __A, __m128i __B);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPDPBSSD instruction.
+///
+/// \param __A
+/// A 128-bit vector of [16 x char].
+/// \param __B
+/// A 128-bit vector of [16 x char].
+/// \returns
+/// A 128-bit vector of [4 x int].
+///
+/// \code{.operation}
+/// FOR j := 0 to 3
+/// tmp1.word := SignExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j])
+/// tmp2.word := SignExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1])
+/// tmp3.word := SignExtend16(__A.byte[4*j+2]) * SignExtend16(__B.byte[4*j+2])
+/// tmp4.word := SignExtend16(__A.byte[4*j+3]) * SignExtend16(__B.byte[4*j+3])
+/// dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4)
+/// ENDFOR
+/// dst[MAX:128] := 0
+/// \endcode
+static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpbssds_epi32(__m128i __W,
+ __m128i __A,
+ __m128i __B) {
+ return (__m128i)__builtin_ia32_vpdpbssds128((__v4si)__W, (__v4si)__A,
+ (__v4si)__B);
+}
+
+/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in \a __A with
+/// corresponding signed 8-bit integers in \a __B, producing 4 intermediate
+/// signed 16-bit results. Sum these 4 results with the corresponding
+/// 32-bit integer in \a __W with signed saturation, and store the packed
+/// 32-bit results in \a dst.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// _mm256_dpbssds_epi32(__m256i __W, __m256i __A, __m256i __B);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPDPBSSD instruction.
+///
+/// \param __A
+/// A 256-bit vector of [32 x char].
+/// \param __B
+/// A 256-bit vector of [32 x char].
+/// \returns
+/// A 256-bit vector of [8 x int].
+///
+/// \code{.operation}
+/// FOR j := 0 to 7
+/// tmp1.word := SignExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j])
+/// tmp2.word := SignExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1])
+/// tmp3.word := SignExtend16(__A.byte[4*j+2]) * SignExtend16(__B.byte[4*j+2])
+/// tmp4.word := SignExtend16(__A.byte[4*j+3]) * SignExtend16(__B.byte[4*j+3])
+/// dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4)
+/// ENDFOR
+/// dst[MAX:256] := 0
+/// \endcode
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_dpbssds_epi32(__m256i __W, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_vpdpbssds256((__v8si)__W, (__v8si)__A,
+ (__v8si)__B);
+}
+
+/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in \a __A with
+/// corresponding unsigned 8-bit integers in \a __B, producing 4 intermediate
+/// signed 16-bit results. Sum these 4 results with the corresponding
+/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// _mm_dpbsud_epi32(__m128i __W, __m128i __A, __m128i __B);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPDPBSSD instruction.
+///
+/// \param __A
+/// A 128-bit vector of [16 x char].
+/// \param __B
+/// A 128-bit vector of [16 x unsigned char].
+/// \returns
+/// A 128-bit vector of [4 x int].
+///
+/// \code{.operation}
+/// FOR j := 0 to 3
+/// tmp1.word := Signed(SignExtend16(__A.byte[4*j]) * ZeroExtend16(__B.byte[4*j]))
+/// tmp2.word := Signed(SignExtend16(__A.byte[4*j+1]) * ZeroExtend16(__B.byte[4*j+1]))
+/// tmp3.word := Signed(SignExtend16(__A.byte[4*j+2]) * ZeroExtend16(__B.byte[4*j+2]))
+/// tmp4.word := Signed(SignExtend16(__A.byte[4*j+3]) * ZeroExtend16(__B.byte[4*j+3]))
+/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4
+/// ENDFOR
+/// dst[MAX:128] := 0
+/// \endcode
+static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpbsud_epi32(__m128i __W,
+ __m128i __A,
+ __m128i __B) {
+ return (__m128i)__builtin_ia32_vpdpbsud128((__v4si)__W, (__v4si)__A,
+ (__v4si)__B);
+}
+
+/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in \a __A with
+/// corresponding unsigned 8-bit integers in \a __B, producing 4 intermediate
+/// signed 16-bit results. Sum these 4 results with the corresponding
+/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// _mm256_dpbsud_epi32(__m256i __W, __m256i __A, __m256i __B);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPDPBSSD instruction.
+///
+/// \param __A
+/// A 256-bit vector of [32 x char].
+/// \param __B
+/// A 256-bit vector of [32 x unsigned char].
+/// \returns
+/// A 256-bit vector of [8 x int].
+///
+/// \code{.operation}
+/// FOR j := 0 to 7
+/// tmp1.word := Signed(SignExtend16(__A.byte[4*j]) * ZeroExtend16(__B.byte[4*j]))
+/// tmp2.word := Signed(SignExtend16(__A.byte[4*j+1]) * ZeroExtend16(__B.byte[4*j+1]))
+/// tmp3.word := Signed(SignExtend16(__A.byte[4*j+2]) * ZeroExtend16(__B.byte[4*j+2]))
+/// tmp4.word := Signed(SignExtend16(__A.byte[4*j+3]) * ZeroExtend16(__B.byte[4*j+3]))
+/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4
+/// ENDFOR
+/// dst[MAX:256] := 0
+/// \endcode
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_dpbsud_epi32(__m256i __W, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_vpdpbsud256((__v8si)__W, (__v8si)__A,
+ (__v8si)__B);
+}
+
+/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in \a __A with
+/// corresponding unsigned 8-bit integers in \a __B, producing 4 intermediate
+/// signed 16-bit results. Sum these 4 results with the corresponding
+/// 32-bit integer in \a __W with signed saturation, and store the packed
+/// 32-bit results in \a dst.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// _mm_dpbsuds_epi32( __m128i __W, __m128i __A, __m128i __B);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPDPBSSD instruction.
+///
+/// \param __A
+/// A 128-bit vector of [16 x char].
+/// \param __B
+/// A 128-bit vector of [16 x unsigned char].
+/// \returns
+/// A 128-bit vector of [4 x int].
+///
+/// \code{.operation}
+/// FOR j := 0 to 3
+/// tmp1.word := Signed(SignExtend16(__A.byte[4*j]) * ZeroExtend16(__B.byte[4*j]))
+/// tmp2.word := Signed(SignExtend16(__A.byte[4*j+1]) * ZeroExtend16(__B.byte[4*j+1]))
+/// tmp3.word := Signed(SignExtend16(__A.byte[4*j+2]) * ZeroExtend16(__B.byte[4*j+2]))
+/// tmp4.word := Signed(SignExtend16(__A.byte[4*j+3]) * ZeroExtend16(__B.byte[4*j+3]))
+/// dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4)
+/// ENDFOR
+/// dst[MAX:128] := 0
+/// \endcode
+static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpbsuds_epi32(__m128i __W,
+ __m128i __A,
+ __m128i __B) {
+ return (__m128i)__builtin_ia32_vpdpbsuds128((__v4si)__W, (__v4si)__A,
+ (__v4si)__B);
+}
+
+/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in \a __A with
+/// corresponding unsigned 8-bit integers in \a __B, producing 4 intermediate
+/// signed 16-bit results. Sum these 4 results with the corresponding
+/// 32-bit integer in \a __W with signed saturation, and store the packed
+/// 32-bit results in \a dst.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// _mm256_dpbsuds_epi32(__m256i __W, __m256i __A, __m256i __B);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPDPBSSD instruction.
+///
+/// \param __A
+/// A 256-bit vector of [32 x char].
+/// \param __B
+/// A 256-bit vector of [32 x unsigned char].
+/// \returns
+/// A 256-bit vector of [8 x int].
+///
+/// \code{.operation}
+/// FOR j := 0 to 7
+/// tmp1.word := Signed(SignExtend16(__A.byte[4*j]) * ZeroExtend16(__B.byte[4*j]))
+/// tmp2.word := Signed(SignExtend16(__A.byte[4*j+1]) * ZeroExtend16(__B.byte[4*j+1]))
+/// tmp3.word := Signed(SignExtend16(__A.byte[4*j+2]) * ZeroExtend16(__B.byte[4*j+2]))
+/// tmp4.word := Signed(SignExtend16(__A.byte[4*j+3]) * ZeroExtend16(__B.byte[4*j+3]))
+/// dst.dword[j] := SIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4)
+/// ENDFOR
+/// dst[MAX:256] := 0
+/// \endcode
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_dpbsuds_epi32(__m256i __W, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_vpdpbsuds256((__v8si)__W, (__v8si)__A,
+ (__v8si)__B);
+}
+
+/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \a __A with
+/// corresponding unsigned 8-bit integers in \a __B, producing 4 intermediate
+/// signed 16-bit results. Sum these 4 results with the corresponding
+/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// _mm_dpbuud_epi32(__m128i __W, __m128i __A, __m128i __B);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPDPBSSD instruction.
+///
+/// \param __A
+/// A 128-bit vector of [16 x unsigned char].
+/// \param __B
+/// A 128-bit vector of [16 x unsigned char].
+/// \returns
+/// A 128-bit vector of [4 x int].
+///
+/// \code{.operation}
+/// FOR j := 0 to 3
+/// tmp1.word := ZeroExtend16(__A.byte[4*j]) * ZeroExtend16(__B.byte[4*j])
+/// tmp2.word := ZeroExtend16(__A.byte[4*j+1]) * ZeroExtend16(__B.byte[4*j+1])
+/// tmp3.word := ZeroExtend16(__A.byte[4*j+2]) * ZeroExtend16(__B.byte[4*j+2])
+/// tmp4.word := ZeroExtend16(__A.byte[4*j+3]) * ZeroExtend16(__B.byte[4*j+3])
+/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4
+/// ENDFOR
+/// dst[MAX:128] := 0
+/// \endcode
+static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpbuud_epi32(__m128i __W,
+ __m128i __A,
+ __m128i __B) {
+ return (__m128i)__builtin_ia32_vpdpbuud128((__v4si)__W, (__v4si)__A,
+ (__v4si)__B);
+}
+
+/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \a __A with
+/// corresponding unsigned 8-bit integers in \a __B, producing 4 intermediate
+/// signed 16-bit results. Sum these 4 results with the corresponding
+/// 32-bit integer in \a __W, and store the packed 32-bit results in \a dst.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// _mm256_dpbuud_epi32(__m256i __W, __m256i __A, __m256i __B);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPDPBSSD instruction.
+///
+/// \param __A
+/// A 256-bit vector of [32 x unsigned char].
+/// \param __B
+/// A 256-bit vector of [32 x unsigned char].
+/// \returns
+/// A 256-bit vector of [8 x int].
+///
+/// \code{.operation}
+/// FOR j := 0 to 7
+/// tmp1.word := ZeroExtend16(__A.byte[4*j]) * ZeroExtend16(__B.byte[4*j])
+/// tmp2.word := ZeroExtend16(__A.byte[4*j+1]) * ZeroExtend16(__B.byte[4*j+1])
+/// tmp3.word := ZeroExtend16(__A.byte[4*j+2]) * ZeroExtend16(__B.byte[4*j+2])
+/// tmp4.word := ZeroExtend16(__A.byte[4*j+3]) * ZeroExtend16(__B.byte[4*j+3])
+/// dst.dword[j] := __W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4
+/// ENDFOR
+/// dst[MAX:256] := 0
+/// \endcode
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_dpbuud_epi32(__m256i __W, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_vpdpbuud256((__v8si)__W, (__v8si)__A,
+ (__v8si)__B);
+}
+
+/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in \a __A with
+/// corresponding unsigned 8-bit integers in \a __B, producing 4 intermediate
+/// signed 16-bit results. Sum these 4 results with the corresponding
+/// 32-bit integer in \a __W with signed saturation, and store the packed
+/// 32-bit results in \a dst.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// _mm_dpbuuds_epi32( __m128i __W, __m128i __A, __m128i __B);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPDPBUUDS instruction.
+///
+/// \param __A
+/// A 128-bit vector of [16 x unsigned char].
+/// \param __B
+/// A 128-bit vector of [16 x unsigned char].
+/// \returns
+/// A 128-bit vector of [4 x int].
+///
+/// \code{.operation}
+/// FOR j := 0 to 3
+/// tmp1.word := ZeroExtend16(__A.byte[4*j]) * ZeroExtend16(__B.byte[4*j])
+/// tmp2.word := ZeroExtend16(__A.byte[4*j+1]) * ZeroExtend16(__B.byte[4*j+1])
+/// tmp3.word := ZeroExtend16(__A.byte[4*j+2]) * ZeroExtend16(__B.byte[4*j+2])
+/// tmp4.word := ZeroExtend16(__A.byte[4*j+3]) * ZeroExtend16(__B.byte[4*j+3])
+/// dst.dword[j] := UNSIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4)
+/// ENDFOR
+/// dst[MAX:128] := 0
+/// \endcode
+static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_dpbuuds_epi32(__m128i __W,
+ __m128i __A,
+ __m128i __B) {
+ return (__m128i)__builtin_ia32_vpdpbuuds128((__v4si)__W, (__v4si)__A,
+ (__v4si)__B);
+}
+
+/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in \a __A with
+/// corresponding unsigned 8-bit integers in \a __B, producing 4 intermediate
+/// signed 16-bit results. Sum these 4 results with the corresponding
+/// 32-bit integer in \a __W with signed saturation, and store the packed
+/// 32-bit results in \a dst.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// _mm256_dpbuuds_epi32(__m256i __W, __m256i __A, __m256i __B);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VPDPBUUDS instruction.
+///
+/// \param __A
+/// A 256-bit vector of [32 x unsigned char].
+/// \param __B
+/// A 256-bit vector of [32 x unsigned char].
+/// \returns
+/// A 256-bit vector of [8 x int].
+///
+/// \code{.operation}
+/// FOR j := 0 to 7
+/// tmp1.word := ZeroExtend16(__A.byte[4*j]) * ZeroExtend16(__B.byte[4*j])
+/// tmp2.word := ZeroExtend16(__A.byte[4*j+1]) * ZeroExtend16(__B.byte[4*j+1])
+/// tmp3.word := ZeroExtend16(__A.byte[4*j+2]) * ZeroExtend16(__B.byte[4*j+2])
+/// tmp4.word := ZeroExtend16(__A.byte[4*j+3]) * ZeroExtend16(__B.byte[4*j+3])
+/// dst.dword[j] := UNSIGNED_DWORD_SATURATE(__W.dword[j] + tmp1 + tmp2 + tmp3 + tmp4)
+/// ENDFOR
+/// dst[MAX:256] := 0
+/// \endcode
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_dpbuuds_epi32(__m256i __W, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_vpdpbuuds256((__v8si)__W, (__v8si)__A,
+ (__v8si)__B);
+}
+#undef __DEFAULT_FN_ATTRS128
+#undef __DEFAULT_FN_ATTRS256
+
+#endif // __AVXVNNIINT8INTRIN_H
diff --git a/contrib/llvm-project/clang/lib/Headers/avxvnniintrin.h b/contrib/llvm-project/clang/lib/Headers/avxvnniintrin.h
index ad45cb7962e5..b7de562b57c0 100644
--- a/contrib/llvm-project/clang/lib/Headers/avxvnniintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avxvnniintrin.h
@@ -50,7 +50,7 @@
///
/// This intrinsic corresponds to the <c> VPDPBUSD </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 7
/// tmp1.word := Signed(ZeroExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j]))
/// tmp2.word := Signed(ZeroExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1]))
@@ -59,7 +59,7 @@
/// DST.dword[j] := __S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4
/// ENDFOR
/// DST[MAX:256] := 0
-/// \endoperation
+/// \endcode
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_dpbusd_avx_epi32(__m256i __S, __m256i __A, __m256i __B)
{
@@ -73,7 +73,7 @@ _mm256_dpbusd_avx_epi32(__m256i __S, __m256i __A, __m256i __B)
///
/// This intrinsic corresponds to the <c> VPDPBUSDS </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 7
/// tmp1.word := Signed(ZeroExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j]))
/// tmp2.word := Signed(ZeroExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1]))
@@ -82,7 +82,7 @@ _mm256_dpbusd_avx_epi32(__m256i __S, __m256i __A, __m256i __B)
/// DST.dword[j] := Saturate32(__S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4)
/// ENDFOR
/// DST[MAX:256] := 0
-/// \endoperation
+/// \endcode
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_dpbusds_avx_epi32(__m256i __S, __m256i __A, __m256i __B)
{
@@ -96,14 +96,14 @@ _mm256_dpbusds_avx_epi32(__m256i __S, __m256i __A, __m256i __B)
///
/// This intrinsic corresponds to the <c> VPDPWSSD </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 7
/// tmp1.dword := SignExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j])
/// tmp2.dword := SignExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1])
/// DST.dword[j] := __S.dword[j] + tmp1 + tmp2
/// ENDFOR
/// DST[MAX:256] := 0
-/// \endoperation
+/// \endcode
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_dpwssd_avx_epi32(__m256i __S, __m256i __A, __m256i __B)
{
@@ -117,14 +117,14 @@ _mm256_dpwssd_avx_epi32(__m256i __S, __m256i __A, __m256i __B)
///
/// This intrinsic corresponds to the <c> VPDPWSSDS </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 7
/// tmp1.dword := SignExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j])
/// tmp2.dword := SignExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1])
/// DST.dword[j] := Saturate32(__S.dword[j] + tmp1 + tmp2)
/// ENDFOR
/// DST[MAX:256] := 0
-/// \endoperation
+/// \endcode
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_dpwssds_avx_epi32(__m256i __S, __m256i __A, __m256i __B)
{
@@ -138,7 +138,7 @@ _mm256_dpwssds_avx_epi32(__m256i __S, __m256i __A, __m256i __B)
///
/// This intrinsic corresponds to the <c> VPDPBUSD </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 3
/// tmp1.word := Signed(ZeroExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j]))
/// tmp2.word := Signed(ZeroExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1]))
@@ -147,7 +147,7 @@ _mm256_dpwssds_avx_epi32(__m256i __S, __m256i __A, __m256i __B)
/// DST.dword[j] := __S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4
/// ENDFOR
/// DST[MAX:128] := 0
-/// \endoperation
+/// \endcode
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_dpbusd_avx_epi32(__m128i __S, __m128i __A, __m128i __B)
{
@@ -161,7 +161,7 @@ _mm_dpbusd_avx_epi32(__m128i __S, __m128i __A, __m128i __B)
///
/// This intrinsic corresponds to the <c> VPDPBUSDS </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 3
/// tmp1.word := Signed(ZeroExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j]))
/// tmp2.word := Signed(ZeroExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1]))
@@ -170,7 +170,7 @@ _mm_dpbusd_avx_epi32(__m128i __S, __m128i __A, __m128i __B)
/// DST.dword[j] := Saturate32(__S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4)
/// ENDFOR
/// DST[MAX:128] := 0
-/// \endoperation
+/// \endcode
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_dpbusds_avx_epi32(__m128i __S, __m128i __A, __m128i __B)
{
@@ -184,14 +184,14 @@ _mm_dpbusds_avx_epi32(__m128i __S, __m128i __A, __m128i __B)
///
/// This intrinsic corresponds to the <c> VPDPWSSD </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 3
/// tmp1.dword := SignExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j])
/// tmp2.dword := SignExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1])
/// DST.dword[j] := __S.dword[j] + tmp1 + tmp2
/// ENDFOR
/// DST[MAX:128] := 0
-/// \endoperation
+/// \endcode
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_dpwssd_avx_epi32(__m128i __S, __m128i __A, __m128i __B)
{
@@ -205,14 +205,14 @@ _mm_dpwssd_avx_epi32(__m128i __S, __m128i __A, __m128i __B)
///
/// This intrinsic corresponds to the <c> VPDPWSSDS </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 3
/// tmp1.dword := SignExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j])
/// tmp2.dword := SignExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1])
/// DST.dword[j] := Saturate32(__S.dword[j] + tmp1 + tmp2)
/// ENDFOR
/// DST[MAX:128] := 0
-/// \endoperation
+/// \endcode
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_dpwssds_avx_epi32(__m128i __S, __m128i __A, __m128i __B)
{
diff --git a/contrib/llvm-project/clang/lib/Headers/bmi2intrin.h b/contrib/llvm-project/clang/lib/Headers/bmi2intrin.h
index 0b56aed5f4cb..f0a3343bef91 100644
--- a/contrib/llvm-project/clang/lib/Headers/bmi2intrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/bmi2intrin.h
@@ -7,8 +7,8 @@
*===-----------------------------------------------------------------------===
*/
-#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
-#error "Never use <bmi2intrin.h> directly; include <x86intrin.h> instead."
+#ifndef __IMMINTRIN_H
+#error "Never use <bmi2intrin.h> directly; include <immintrin.h> instead."
#endif
#ifndef __BMI2INTRIN_H
@@ -17,44 +17,228 @@
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("bmi2")))
+/// Copies the unsigned 32-bit integer \a __X and zeroes the upper bits
+/// starting at bit number \a __Y.
+///
+/// \code{.operation}
+/// i := __Y[7:0]
+/// result := __X
+/// IF i < 32
+/// result[31:i] := 0
+/// FI
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c BZHI instruction.
+///
+/// \param __X
+/// The 32-bit source value to copy.
+/// \param __Y
+/// The lower 8 bits specify the bit number of the lowest bit to zero.
+/// \returns The partially zeroed 32-bit value.
static __inline__ unsigned int __DEFAULT_FN_ATTRS
_bzhi_u32(unsigned int __X, unsigned int __Y)
{
return __builtin_ia32_bzhi_si(__X, __Y);
}
+/// Deposit (scatter) low-order bits from the unsigned 32-bit integer \a __X
+/// into the 32-bit result, according to the mask in the unsigned 32-bit
+/// integer \a __Y. All other bits of the result are zero.
+///
+/// \code{.operation}
+/// i := 0
+/// result := 0
+/// FOR m := 0 TO 31
+/// IF __Y[m] == 1
+/// result[m] := __X[i]
+/// i := i + 1
+/// ENDIF
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c PDEP instruction.
+///
+/// \param __X
+/// The 32-bit source value to copy.
+/// \param __Y
+/// The 32-bit mask specifying where to deposit source bits.
+/// \returns The 32-bit result.
static __inline__ unsigned int __DEFAULT_FN_ATTRS
_pdep_u32(unsigned int __X, unsigned int __Y)
{
return __builtin_ia32_pdep_si(__X, __Y);
}
+/// Extract (gather) bits from the unsigned 32-bit integer \a __X into the
+/// low-order bits of the 32-bit result, according to the mask in the
+/// unsigned 32-bit integer \a __Y. All other bits of the result are zero.
+///
+/// \code{.operation}
+/// i := 0
+/// result := 0
+/// FOR m := 0 TO 31
+/// IF __Y[m] == 1
+/// result[i] := __X[m]
+/// i := i + 1
+/// ENDIF
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c PEXT instruction.
+///
+/// \param __X
+/// The 32-bit source value to copy.
+/// \param __Y
+/// The 32-bit mask specifying which source bits to extract.
+/// \returns The 32-bit result.
static __inline__ unsigned int __DEFAULT_FN_ATTRS
_pext_u32(unsigned int __X, unsigned int __Y)
{
return __builtin_ia32_pext_si(__X, __Y);
}
+/// Multiplies the unsigned 32-bit integers \a __X and \a __Y to form a
+/// 64-bit product. Stores the upper 32 bits of the product in the
+/// memory at \a __P and returns the lower 32 bits.
+///
+/// \code{.operation}
+/// Store32(__P, (__X * __Y)[63:32])
+/// result := (__X * __Y)[31:0]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c MULX instruction.
+///
+/// \param __X
+/// An unsigned 32-bit multiplicand.
+/// \param __Y
+/// An unsigned 32-bit multiplicand.
+/// \param __P
+/// A pointer to memory for storing the upper half of the product.
+/// \returns The lower half of the product.
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_mulx_u32(unsigned int __X, unsigned int __Y, unsigned int *__P)
+{
+ unsigned long long __res = (unsigned long long) __X * __Y;
+ *__P = (unsigned int)(__res >> 32);
+ return (unsigned int)__res;
+}
+
#ifdef __x86_64__
+/// Copies the unsigned 64-bit integer \a __X and zeroes the upper bits
+/// starting at bit number \a __Y.
+///
+/// \code{.operation}
+/// i := __Y[7:0]
+/// result := __X
+/// IF i < 64
+/// result[63:i] := 0
+/// FI
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c BZHI instruction.
+///
+/// \param __X
+/// The 64-bit source value to copy.
+/// \param __Y
+/// The lower 8 bits specify the bit number of the lowest bit to zero.
+/// \returns The partially zeroed 64-bit value.
static __inline__ unsigned long long __DEFAULT_FN_ATTRS
_bzhi_u64(unsigned long long __X, unsigned long long __Y)
{
return __builtin_ia32_bzhi_di(__X, __Y);
}
+/// Deposit (scatter) low-order bits from the unsigned 64-bit integer \a __X
+/// into the 64-bit result, according to the mask in the unsigned 64-bit
+/// integer \a __Y. All other bits of the result are zero.
+///
+/// \code{.operation}
+/// i := 0
+/// result := 0
+/// FOR m := 0 TO 63
+/// IF __Y[m] == 1
+/// result[m] := __X[i]
+/// i := i + 1
+/// ENDIF
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c PDEP instruction.
+///
+/// \param __X
+/// The 64-bit source value to copy.
+/// \param __Y
+/// The 64-bit mask specifying where to deposit source bits.
+/// \returns The 64-bit result.
static __inline__ unsigned long long __DEFAULT_FN_ATTRS
_pdep_u64(unsigned long long __X, unsigned long long __Y)
{
return __builtin_ia32_pdep_di(__X, __Y);
}
+/// Extract (gather) bits from the unsigned 64-bit integer \a __X into the
+/// low-order bits of the 64-bit result, according to the mask in the
+/// unsigned 64-bit integer \a __Y. All other bits of the result are zero.
+///
+/// \code{.operation}
+/// i := 0
+/// result := 0
+/// FOR m := 0 TO 63
+/// IF __Y[m] == 1
+/// result[i] := __X[m]
+/// i := i + 1
+/// ENDIF
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c PEXT instruction.
+///
+/// \param __X
+/// The 64-bit source value to copy.
+/// \param __Y
+/// The 64-bit mask specifying which source bits to extract.
+/// \returns The 64-bit result.
static __inline__ unsigned long long __DEFAULT_FN_ATTRS
_pext_u64(unsigned long long __X, unsigned long long __Y)
{
return __builtin_ia32_pext_di(__X, __Y);
}
+/// Multiplies the unsigned 64-bit integers \a __X and \a __Y to form a
+/// 128-bit product. Stores the upper 64 bits of the product to the
+/// memory addressed by \a __P and returns the lower 64 bits.
+///
+/// \code{.operation}
+/// Store64(__P, (__X * __Y)[127:64])
+/// result := (__X * __Y)[63:0]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c MULX instruction.
+///
+/// \param __X
+/// An unsigned 64-bit multiplicand.
+/// \param __Y
+/// An unsigned 64-bit multiplicand.
+/// \param __P
+/// A pointer to memory for storing the upper half of the product.
+/// \returns The lower half of the product.
static __inline__ unsigned long long __DEFAULT_FN_ATTRS
_mulx_u64 (unsigned long long __X, unsigned long long __Y,
unsigned long long *__P)
@@ -64,17 +248,7 @@ _mulx_u64 (unsigned long long __X, unsigned long long __Y,
return (unsigned long long) __res;
}
-#else /* !__x86_64__ */
-
-static __inline__ unsigned int __DEFAULT_FN_ATTRS
-_mulx_u32 (unsigned int __X, unsigned int __Y, unsigned int *__P)
-{
- unsigned long long __res = (unsigned long long) __X * __Y;
- *__P = (unsigned int) (__res >> 32);
- return (unsigned int) __res;
-}
-
-#endif /* !__x86_64__ */
+#endif /* __x86_64__ */
#undef __DEFAULT_FN_ATTRS
diff --git a/contrib/llvm-project/clang/lib/Headers/bmiintrin.h b/contrib/llvm-project/clang/lib/Headers/bmiintrin.h
index f583c215f919..d8e57c0cb494 100644
--- a/contrib/llvm-project/clang/lib/Headers/bmiintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/bmiintrin.h
@@ -19,18 +19,17 @@
to use it as a potentially faster version of BSF. */
#define __RELAXED_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
-#define _tzcnt_u16(a) (__tzcnt_u16((a)))
-
/// Counts the number of trailing zero bits in the operand.
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> TZCNT </c> instruction.
+/// This intrinsic corresponds to the \c TZCNT instruction.
///
/// \param __X
/// An unsigned 16-bit integer whose trailing zeros are to be counted.
/// \returns An unsigned 16-bit integer containing the number of trailing zero
/// bits in the operand.
+/// \see _tzcnt_u16
static __inline__ unsigned short __RELAXED_FN_ATTRS
__tzcnt_u16(unsigned short __X)
{
@@ -41,12 +40,30 @@ __tzcnt_u16(unsigned short __X)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> TZCNT </c> instruction.
+/// \code
+/// unsigned short _tzcnt_u16(unsigned short __X);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c TZCNT instruction.
+///
+/// \param __X
+/// An unsigned 16-bit integer whose trailing zeros are to be counted.
+/// \returns An unsigned 16-bit integer containing the number of trailing zero
+/// bits in the operand.
+/// \see __tzcnt_u16
+#define _tzcnt_u16 __tzcnt_u16
+
+/// Counts the number of trailing zero bits in the operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c TZCNT instruction.
///
/// \param __X
/// An unsigned 32-bit integer whose trailing zeros are to be counted.
/// \returns An unsigned 32-bit integer containing the number of trailing zero
/// bits in the operand.
+/// \see { _mm_tzcnt_32 _tzcnt_u32 }
static __inline__ unsigned int __RELAXED_FN_ATTRS
__tzcnt_u32(unsigned int __X)
{
@@ -57,19 +74,35 @@ __tzcnt_u32(unsigned int __X)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> TZCNT </c> instruction.
+/// This intrinsic corresponds to the \c TZCNT instruction.
///
/// \param __X
/// An unsigned 32-bit integer whose trailing zeros are to be counted.
-/// \returns An 32-bit integer containing the number of trailing zero bits in
+/// \returns A 32-bit integer containing the number of trailing zero bits in
/// the operand.
+/// \see { __tzcnt_u32 _tzcnt_u32 }
static __inline__ int __RELAXED_FN_ATTRS
_mm_tzcnt_32(unsigned int __X)
{
- return __builtin_ia32_tzcnt_u32(__X);
+ return (int)__builtin_ia32_tzcnt_u32(__X);
}
-#define _tzcnt_u32(a) (__tzcnt_u32((a)))
+/// Counts the number of trailing zero bits in the operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// unsigned int _tzcnt_u32(unsigned int __X);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c TZCNT instruction.
+///
+/// \param __X
+/// An unsigned 32-bit integer whose trailing zeros are to be counted.
+/// \returns An unsigned 32-bit integer containing the number of trailing zero
+/// bits in the operand.
+/// \see { _mm_tzcnt_32 __tzcnt_u32 }
+#define _tzcnt_u32 __tzcnt_u32
#ifdef __x86_64__
@@ -77,12 +110,13 @@ _mm_tzcnt_32(unsigned int __X)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> TZCNT </c> instruction.
+/// This intrinsic corresponds to the \c TZCNT instruction.
///
/// \param __X
/// An unsigned 64-bit integer whose trailing zeros are to be counted.
/// \returns An unsigned 64-bit integer containing the number of trailing zero
/// bits in the operand.
+/// \see { _mm_tzcnt_64 _tzcnt_u64 }
static __inline__ unsigned long long __RELAXED_FN_ATTRS
__tzcnt_u64(unsigned long long __X)
{
@@ -93,19 +127,35 @@ __tzcnt_u64(unsigned long long __X)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> TZCNT </c> instruction.
+/// This intrinsic corresponds to the \c TZCNT instruction.
///
/// \param __X
/// An unsigned 64-bit integer whose trailing zeros are to be counted.
/// \returns An 64-bit integer containing the number of trailing zero bits in
/// the operand.
+/// \see { __tzcnt_u64 _tzcnt_u64 }
static __inline__ long long __RELAXED_FN_ATTRS
_mm_tzcnt_64(unsigned long long __X)
{
- return __builtin_ia32_tzcnt_u64(__X);
+ return (long long)__builtin_ia32_tzcnt_u64(__X);
}
-#define _tzcnt_u64(a) (__tzcnt_u64((a)))
+/// Counts the number of trailing zero bits in the operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// unsigned long long _tzcnt_u64(unsigned long long __X);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c TZCNT instruction.
+///
+/// \param __X
+/// An unsigned 64-bit integer whose trailing zeros are to be counted.
+/// \returns An unsigned 64-bit integer containing the number of trailing zero
+/// bits in the operand.
+/// \see { _mm_tzcnt_64 __tzcnt_u64
+#define _tzcnt_u64 __tzcnt_u64
#endif /* __x86_64__ */
@@ -117,21 +167,12 @@ _mm_tzcnt_64(unsigned long long __X)
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("bmi")))
-#define _andn_u32(a, b) (__andn_u32((a), (b)))
-
-/* _bextr_u32 != __bextr_u32 */
-#define _blsi_u32(a) (__blsi_u32((a)))
-
-#define _blsmsk_u32(a) (__blsmsk_u32((a)))
-
-#define _blsr_u32(a) (__blsr_u32((a)))
-
/// Performs a bitwise AND of the second operand with the one's
/// complement of the first operand.
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> ANDN </c> instruction.
+/// This intrinsic corresponds to the \c ANDN instruction.
///
/// \param __X
/// An unsigned integer containing one of the operands.
@@ -139,19 +180,40 @@ _mm_tzcnt_64(unsigned long long __X)
/// An unsigned integer containing one of the operands.
/// \returns An unsigned integer containing the bitwise AND of the second
/// operand with the one's complement of the first operand.
+/// \see _andn_u32
static __inline__ unsigned int __DEFAULT_FN_ATTRS
__andn_u32(unsigned int __X, unsigned int __Y)
{
return ~__X & __Y;
}
+/// Performs a bitwise AND of the second operand with the one's
+/// complement of the first operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// unsigned int _andn_u32(unsigned int __X, unsigned int __Y);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c ANDN instruction.
+///
+/// \param __X
+/// An unsigned integer containing one of the operands.
+/// \param __Y
+/// An unsigned integer containing one of the operands.
+/// \returns An unsigned integer containing the bitwise AND of the second
+/// operand with the one's complement of the first operand.
+/// \see __andn_u32
+#define _andn_u32 __andn_u32
+
/* AMD-specified, double-leading-underscore version of BEXTR */
/// Extracts the specified bits from the first operand and returns them
/// in the least significant bits of the result.
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> BEXTR </c> instruction.
+/// This intrinsic corresponds to the \c BEXTR instruction.
///
/// \param __X
/// An unsigned integer whose bits are to be extracted.
@@ -174,7 +236,7 @@ __bextr_u32(unsigned int __X, unsigned int __Y)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> BEXTR </c> instruction.
+/// This intrinsic corresponds to the \c BEXTR instruction.
///
/// \param __X
/// An unsigned integer whose bits are to be extracted.
@@ -199,7 +261,7 @@ _bextr_u32(unsigned int __X, unsigned int __Y, unsigned int __Z)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> BEXTR </c> instruction.
+/// This intrinsic corresponds to the \c BEXTR instruction.
///
/// \param __X
/// An unsigned integer whose bits are to be extracted.
@@ -220,69 +282,117 @@ _bextr2_u32(unsigned int __X, unsigned int __Y) {
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> BLSI </c> instruction.
+/// This intrinsic corresponds to the \c BLSI instruction.
///
/// \param __X
/// An unsigned integer whose bits are to be cleared.
/// \returns An unsigned integer containing the result of clearing the bits from
/// the source operand.
+/// \see _blsi_u32
static __inline__ unsigned int __DEFAULT_FN_ATTRS
__blsi_u32(unsigned int __X)
{
return __X & -__X;
}
+/// Clears all bits in the source except for the least significant bit
+/// containing a value of 1 and returns the result.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// unsigned int _blsi_u32(unsigned int __X);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c BLSI instruction.
+///
+/// \param __X
+/// An unsigned integer whose bits are to be cleared.
+/// \returns An unsigned integer containing the result of clearing the bits from
+/// the source operand.
+/// \see __blsi_u32
+#define _blsi_u32 __blsi_u32
+
/// Creates a mask whose bits are set to 1, using bit 0 up to and
/// including the least significant bit that is set to 1 in the source
/// operand and returns the result.
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> BLSMSK </c> instruction.
+/// This intrinsic corresponds to the \c BLSMSK instruction.
///
/// \param __X
/// An unsigned integer used to create the mask.
/// \returns An unsigned integer containing the newly created mask.
+/// \see _blsmsk_u32
static __inline__ unsigned int __DEFAULT_FN_ATTRS
__blsmsk_u32(unsigned int __X)
{
return __X ^ (__X - 1);
}
+/// Creates a mask whose bits are set to 1, using bit 0 up to and
+/// including the least significant bit that is set to 1 in the source
+/// operand and returns the result.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// unsigned int _blsmsk_u32(unsigned int __X);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c BLSMSK instruction.
+///
+/// \param __X
+/// An unsigned integer used to create the mask.
+/// \returns An unsigned integer containing the newly created mask.
+/// \see __blsmsk_u32
+#define _blsmsk_u32 __blsmsk_u32
+
/// Clears the least significant bit that is set to 1 in the source
/// operand and returns the result.
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> BLSR </c> instruction.
+/// This intrinsic corresponds to the \c BLSR instruction.
///
/// \param __X
/// An unsigned integer containing the operand to be cleared.
/// \returns An unsigned integer containing the result of clearing the source
/// operand.
+/// \see _blsr_u32
static __inline__ unsigned int __DEFAULT_FN_ATTRS
__blsr_u32(unsigned int __X)
{
return __X & (__X - 1);
}
-#ifdef __x86_64__
-
-#define _andn_u64(a, b) (__andn_u64((a), (b)))
-
-/* _bextr_u64 != __bextr_u64 */
-#define _blsi_u64(a) (__blsi_u64((a)))
-
-#define _blsmsk_u64(a) (__blsmsk_u64((a)))
+/// Clears the least significant bit that is set to 1 in the source
+/// operand and returns the result.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// unsigned int _bls4_u32(unsigned int __X);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c BLSR instruction.
+///
+/// \param __X
+/// An unsigned integer containing the operand to be cleared.
+/// \returns An unsigned integer containing the result of clearing the source
+/// operand.
+/// \see __blsr_u32
+#define _blsr_u32 __blsr_u32
-#define _blsr_u64(a) (__blsr_u64((a)))
+#ifdef __x86_64__
/// Performs a bitwise AND of the second operand with the one's
/// complement of the first operand.
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> ANDN </c> instruction.
+/// This intrinsic corresponds to the \c ANDN instruction.
///
/// \param __X
/// An unsigned 64-bit integer containing one of the operands.
@@ -290,19 +400,41 @@ __blsr_u32(unsigned int __X)
/// An unsigned 64-bit integer containing one of the operands.
/// \returns An unsigned 64-bit integer containing the bitwise AND of the second
/// operand with the one's complement of the first operand.
+/// \see _andn_u64
static __inline__ unsigned long long __DEFAULT_FN_ATTRS
__andn_u64 (unsigned long long __X, unsigned long long __Y)
{
return ~__X & __Y;
}
+/// Performs a bitwise AND of the second operand with the one's
+/// complement of the first operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// unsigned long long _andn_u64(unsigned long long __X,
+/// unsigned long long __Y);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c ANDN instruction.
+///
+/// \param __X
+/// An unsigned 64-bit integer containing one of the operands.
+/// \param __Y
+/// An unsigned 64-bit integer containing one of the operands.
+/// \returns An unsigned 64-bit integer containing the bitwise AND of the second
+/// operand with the one's complement of the first operand.
+/// \see __andn_u64
+#define _andn_u64 __andn_u64
+
/* AMD-specified, double-leading-underscore version of BEXTR */
/// Extracts the specified bits from the first operand and returns them
/// in the least significant bits of the result.
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> BEXTR </c> instruction.
+/// This intrinsic corresponds to the \c BEXTR instruction.
///
/// \param __X
/// An unsigned 64-bit integer whose bits are to be extracted.
@@ -325,7 +457,7 @@ __bextr_u64(unsigned long long __X, unsigned long long __Y)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> BEXTR </c> instruction.
+/// This intrinsic corresponds to the \c BEXTR instruction.
///
/// \param __X
/// An unsigned 64-bit integer whose bits are to be extracted.
@@ -350,7 +482,7 @@ _bextr_u64(unsigned long long __X, unsigned int __Y, unsigned int __Z)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> BEXTR </c> instruction.
+/// This intrinsic corresponds to the \c BEXTR instruction.
///
/// \param __X
/// An unsigned 64-bit integer whose bits are to be extracted.
@@ -371,52 +503,109 @@ _bextr2_u64(unsigned long long __X, unsigned long long __Y) {
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> BLSI </c> instruction.
+/// This intrinsic corresponds to the \c BLSI instruction.
///
/// \param __X
/// An unsigned 64-bit integer whose bits are to be cleared.
/// \returns An unsigned 64-bit integer containing the result of clearing the
/// bits from the source operand.
+/// \see _blsi_u64
static __inline__ unsigned long long __DEFAULT_FN_ATTRS
__blsi_u64(unsigned long long __X)
{
return __X & -__X;
}
+/// Clears all bits in the source except for the least significant bit
+/// containing a value of 1 and returns the result.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// unsigned long long _blsi_u64(unsigned long long __X);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c BLSI instruction.
+///
+/// \param __X
+/// An unsigned 64-bit integer whose bits are to be cleared.
+/// \returns An unsigned 64-bit integer containing the result of clearing the
+/// bits from the source operand.
+/// \see __blsi_u64
+#define _blsi_u64 __blsi_u64
+
/// Creates a mask whose bits are set to 1, using bit 0 up to and
/// including the least significant bit that is set to 1 in the source
/// operand and returns the result.
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> BLSMSK </c> instruction.
+/// This intrinsic corresponds to the \c BLSMSK instruction.
///
/// \param __X
/// An unsigned 64-bit integer used to create the mask.
/// \returns An unsigned 64-bit integer containing the newly created mask.
+/// \see _blsmsk_u64
static __inline__ unsigned long long __DEFAULT_FN_ATTRS
__blsmsk_u64(unsigned long long __X)
{
return __X ^ (__X - 1);
}
+/// Creates a mask whose bits are set to 1, using bit 0 up to and
+/// including the least significant bit that is set to 1 in the source
+/// operand and returns the result.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// unsigned long long _blsmsk_u64(unsigned long long __X);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c BLSMSK instruction.
+///
+/// \param __X
+/// An unsigned 64-bit integer used to create the mask.
+/// \returns An unsigned 64-bit integer containing the newly created mask.
+/// \see __blsmsk_u64
+#define _blsmsk_u64 __blsmsk_u64
+
/// Clears the least significant bit that is set to 1 in the source
/// operand and returns the result.
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> BLSR </c> instruction.
+/// This intrinsic corresponds to the \c BLSR instruction.
///
/// \param __X
/// An unsigned 64-bit integer containing the operand to be cleared.
/// \returns An unsigned 64-bit integer containing the result of clearing the
/// source operand.
+/// \see _blsr_u64
static __inline__ unsigned long long __DEFAULT_FN_ATTRS
__blsr_u64(unsigned long long __X)
{
return __X & (__X - 1);
}
+/// Clears the least significant bit that is set to 1 in the source
+/// operand and returns the result.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// unsigned long long _blsr_u64(unsigned long long __X);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c BLSR instruction.
+///
+/// \param __X
+/// An unsigned 64-bit integer containing the operand to be cleared.
+/// \returns An unsigned 64-bit integer containing the result of clearing the
+/// source operand.
+/// \see __blsr_u64
+#define _blsr_u64 __blsr_u64
+
#endif /* __x86_64__ */
#undef __DEFAULT_FN_ATTRS
diff --git a/contrib/llvm-project/clang/lib/Headers/cetintrin.h b/contrib/llvm-project/clang/lib/Headers/cetintrin.h
index 4290e9d7355b..a68df5b1d2e7 100644
--- a/contrib/llvm-project/clang/lib/Headers/cetintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/cetintrin.h
@@ -19,7 +19,7 @@
__attribute__((__always_inline__, __nodebug__, __target__("shstk")))
static __inline__ void __DEFAULT_FN_ATTRS _incsspd(int __a) {
- __builtin_ia32_incsspd(__a);
+ __builtin_ia32_incsspd((unsigned int)__a);
}
#ifdef __x86_64__
@@ -34,7 +34,7 @@ static __inline__ void __DEFAULT_FN_ATTRS _inc_ssp(unsigned int __a) {
}
#else /* __x86_64__ */
static __inline__ void __DEFAULT_FN_ATTRS _inc_ssp(unsigned int __a) {
- __builtin_ia32_incsspd((int)__a);
+ __builtin_ia32_incsspd(__a);
}
#endif /* __x86_64__ */
@@ -42,10 +42,26 @@ static __inline__ unsigned int __DEFAULT_FN_ATTRS _rdsspd(unsigned int __a) {
return __builtin_ia32_rdsspd(__a);
}
+static __inline__ unsigned int __DEFAULT_FN_ATTRS _rdsspd_i32(void) {
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wuninitialized"
+ unsigned int t;
+ return __builtin_ia32_rdsspd(t);
+#pragma clang diagnostic pop
+}
+
#ifdef __x86_64__
static __inline__ unsigned long long __DEFAULT_FN_ATTRS _rdsspq(unsigned long long __a) {
return __builtin_ia32_rdsspq(__a);
}
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS _rdsspq_i64(void) {
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wuninitialized"
+ unsigned long long t;
+ return __builtin_ia32_rdsspq(t);
+#pragma clang diagnostic pop
+}
#endif /* __x86_64__ */
#ifdef __x86_64__
@@ -58,7 +74,7 @@ static __inline__ unsigned int __DEFAULT_FN_ATTRS _get_ssp(void) {
}
#endif /* __x86_64__ */
-static __inline__ void __DEFAULT_FN_ATTRS _saveprevssp() {
+static __inline__ void __DEFAULT_FN_ATTRS _saveprevssp(void) {
__builtin_ia32_saveprevssp();
}
@@ -86,7 +102,7 @@ static __inline__ void __DEFAULT_FN_ATTRS _wrussq(unsigned long long __a, void *
}
#endif /* __x86_64__ */
-static __inline__ void __DEFAULT_FN_ATTRS _setssbsy() {
+static __inline__ void __DEFAULT_FN_ATTRS _setssbsy(void) {
__builtin_ia32_setssbsy();
}
diff --git a/contrib/llvm-project/clang/lib/Headers/clflushoptintrin.h b/contrib/llvm-project/clang/lib/Headers/clflushoptintrin.h
index 060eb36f30f9..ae0a0244c497 100644
--- a/contrib/llvm-project/clang/lib/Headers/clflushoptintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/clflushoptintrin.h
@@ -17,6 +17,15 @@
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("clflushopt")))
+/// Invalidates all levels of the cache hierarchy and flushes modified data to
+/// memory for the cache line specified by the address \a __m.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c CLFLUSHOPT instruction.
+///
+/// \param __m
+/// An address within the cache line to flush and invalidate.
static __inline__ void __DEFAULT_FN_ATTRS
_mm_clflushopt(void const * __m) {
__builtin_ia32_clflushopt(__m);
diff --git a/contrib/llvm-project/clang/lib/Headers/clzerointrin.h b/contrib/llvm-project/clang/lib/Headers/clzerointrin.h
index a180984a3f28..acccfe94ff31 100644
--- a/contrib/llvm-project/clang/lib/Headers/clzerointrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/clzerointrin.h
@@ -6,7 +6,7 @@
*
*===-----------------------------------------------------------------------===
*/
-#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
+#ifndef __X86INTRIN_H
#error "Never use <clzerointrin.h> directly; include <x86intrin.h> instead."
#endif
@@ -17,14 +17,16 @@
#define __DEFAULT_FN_ATTRS \
__attribute__((__always_inline__, __nodebug__, __target__("clzero")))
-/// Loads the cache line address and zero's out the cacheline
+/// Zeroes out the cache line for the address \a __line. This uses a
+/// non-temporal store. Calling \c _mm_sfence() afterward might be needed
+/// to enforce ordering.
///
-/// \headerfile <clzerointrin.h>
+/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> CLZERO </c> instruction.
+/// This intrinsic corresponds to the \c CLZERO instruction.
///
/// \param __line
-/// A pointer to a cacheline which needs to be zeroed out.
+/// An address within the cache line to zero out.
static __inline__ void __DEFAULT_FN_ATTRS
_mm_clzero (void * __line)
{
diff --git a/contrib/llvm-project/clang/lib/Headers/cmpccxaddintrin.h b/contrib/llvm-project/clang/lib/Headers/cmpccxaddintrin.h
new file mode 100644
index 000000000000..6957498996c8
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/cmpccxaddintrin.h
@@ -0,0 +1,70 @@
+/*===--------------- cmpccxaddintrin.h - CMPCCXADD intrinsics--------------===
+ *
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __X86GPRINTRIN_H
+#error \
+ "Never use <cmpccxaddintrin.h> directly; include <x86gprintrin.h> instead."
+#endif // __X86GPRINTRIN_H
+
+#ifndef __CMPCCXADDINTRIN_H
+#define __CMPCCXADDINTRIN_H
+#ifdef __x86_64__
+
+typedef enum {
+ _CMPCCX_O, /* Overflow. */
+ _CMPCCX_NO, /* No overflow. */
+ _CMPCCX_B, /* Below. */
+ _CMPCCX_NB, /* Not below. */
+ _CMPCCX_Z, /* Zero. */
+ _CMPCCX_NZ, /* Not zero. */
+ _CMPCCX_BE, /* Below or equal. */
+ _CMPCCX_NBE, /* Neither below nor equal. */
+ _CMPCCX_S, /* Sign. */
+ _CMPCCX_NS, /* No sign. */
+ _CMPCCX_P, /* Parity. */
+ _CMPCCX_NP, /* No parity. */
+ _CMPCCX_L, /* Less. */
+ _CMPCCX_NL, /* Not less. */
+ _CMPCCX_LE, /* Less or equal. */
+ _CMPCCX_NLE, /* Neither less nor equal. */
+} _CMPCCX_ENUM;
+
+/// Compares the value from the memory __A with the value of __B. If the
+/// specified condition __D is met, then add the third operand __C to the
+/// __A and write it into __A, else the value of __A is unchanged. The return
+/// value is the original value of __A.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c CMPCCXADD instructions.
+///
+/// \param __A
+/// __A pointer specifying the memory address.
+///
+/// \param __B
+/// A integer operand.
+///
+/// \param __C
+/// A integer operand.
+///
+/// \param __D
+/// The specified condition.
+///
+/// \returns a integer which is the original value of first operand.
+
+#define _cmpccxadd_epi32(__A, __B, __C, __D) \
+ ((int)(__builtin_ia32_cmpccxadd32((void *)(__A), (int)(__B), (int)(__C), \
+ (int)(__D))))
+
+#define _cmpccxadd_epi64(__A, __B, __C, __D) \
+ ((long long)(__builtin_ia32_cmpccxadd64((void *)(__A), (long long)(__B), \
+ (long long)(__C), (int)(__D))))
+
+#endif // __x86_64__
+#endif // __CMPCCXADDINTRIN_H
diff --git a/contrib/llvm-project/clang/lib/Headers/cpuid.h b/contrib/llvm-project/clang/lib/Headers/cpuid.h
index 34f0e76807c5..1ad6853a97c9 100644
--- a/contrib/llvm-project/clang/lib/Headers/cpuid.h
+++ b/contrib/llvm-project/clang/lib/Headers/cpuid.h
@@ -195,13 +195,23 @@
#define bit_PCONFIG 0x00040000
#define bit_IBT 0x00100000
#define bit_AMXBF16 0x00400000
+#define bit_AVX512FP16 0x00800000
#define bit_AMXTILE 0x01000000
#define bit_AMXINT8 0x02000000
/* Features in %eax for leaf 7 sub-leaf 1 */
-#define bit_AVXVNNI 0x00000008
+#define bit_RAOINT 0x00000008
+#define bit_AVXVNNI 0x00000010
#define bit_AVX512BF16 0x00000020
+#define bit_CMPCCXADD 0x00000080
+#define bit_AMXFP16 0x00200000
#define bit_HRESET 0x00400000
+#define bit_AVXIFMA 0x00800000
+
+/* Features in %edx for leaf 7 sub-leaf 1 */
+#define bit_AVXVNNIINT8 0x00000010
+#define bit_AVXNECONVERT 0x00000020
+#define bit_PREFETCHI 0x00004000
/* Features in %eax for leaf 13 sub-leaf 1 */
#define bit_XSAVEOPT 0x00000001
@@ -231,6 +241,7 @@
/* Features in %ebx for leaf 0x80000008 */
#define bit_CLZERO 0x00000001
+#define bit_RDPRU 0x00000010
#define bit_WBNOINVD 0x00000200
@@ -259,7 +270,8 @@
: "0"(__leaf), "2"(__count))
#endif
-static __inline int __get_cpuid_max (unsigned int __leaf, unsigned int *__sig)
+static __inline unsigned int __get_cpuid_max (unsigned int __leaf,
+ unsigned int *__sig)
{
unsigned int __eax, __ebx, __ecx, __edx;
#if __i386__
diff --git a/contrib/llvm-project/clang/lib/Headers/crc32intrin.h b/contrib/llvm-project/clang/lib/Headers/crc32intrin.h
new file mode 100644
index 000000000000..a0bd99d1b572
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/crc32intrin.h
@@ -0,0 +1,100 @@
+/*===---- crc32intrin.h - SSE4.2 Accumulate CRC32 intrinsics ---------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CRC32INTRIN_H
+#define __CRC32INTRIN_H
+
+#define __DEFAULT_FN_ATTRS \
+ __attribute__((__always_inline__, __nodebug__, __target__("crc32")))
+
+/// Adds the unsigned integer operand to the CRC-32C checksum of the
+/// unsigned char operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> CRC32B </c> instruction.
+///
+/// \param __C
+/// An unsigned integer operand to add to the CRC-32C checksum of operand
+/// \a __D.
+/// \param __D
+/// An unsigned 8-bit integer operand used to compute the CRC-32C checksum.
+/// \returns The result of adding operand \a __C to the CRC-32C checksum of
+/// operand \a __D.
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_mm_crc32_u8(unsigned int __C, unsigned char __D)
+{
+ return __builtin_ia32_crc32qi(__C, __D);
+}
+
+/// Adds the unsigned integer operand to the CRC-32C checksum of the
+/// unsigned short operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> CRC32W </c> instruction.
+///
+/// \param __C
+/// An unsigned integer operand to add to the CRC-32C checksum of operand
+/// \a __D.
+/// \param __D
+/// An unsigned 16-bit integer operand used to compute the CRC-32C checksum.
+/// \returns The result of adding operand \a __C to the CRC-32C checksum of
+/// operand \a __D.
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_mm_crc32_u16(unsigned int __C, unsigned short __D)
+{
+ return __builtin_ia32_crc32hi(__C, __D);
+}
+
+/// Adds the first unsigned integer operand to the CRC-32C checksum of
+/// the second unsigned integer operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> CRC32L </c> instruction.
+///
+/// \param __C
+/// An unsigned integer operand to add to the CRC-32C checksum of operand
+/// \a __D.
+/// \param __D
+/// An unsigned 32-bit integer operand used to compute the CRC-32C checksum.
+/// \returns The result of adding operand \a __C to the CRC-32C checksum of
+/// operand \a __D.
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_mm_crc32_u32(unsigned int __C, unsigned int __D)
+{
+ return __builtin_ia32_crc32si(__C, __D);
+}
+
+#ifdef __x86_64__
+/// Adds the unsigned integer operand to the CRC-32C checksum of the
+/// unsigned 64-bit integer operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> CRC32Q </c> instruction.
+///
+/// \param __C
+/// An unsigned integer operand to add to the CRC-32C checksum of operand
+/// \a __D.
+/// \param __D
+/// An unsigned 64-bit integer operand used to compute the CRC-32C checksum.
+/// \returns The result of adding operand \a __C to the CRC-32C checksum of
+/// operand \a __D.
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+_mm_crc32_u64(unsigned long long __C, unsigned long long __D)
+{
+ return __builtin_ia32_crc32di(__C, __D);
+}
+#endif /* __x86_64__ */
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __CRC32INTRIN_H */
diff --git a/contrib/llvm-project/clang/lib/Headers/cuda_wrappers/bits/basic_string.h b/contrib/llvm-project/clang/lib/Headers/cuda_wrappers/bits/basic_string.h
new file mode 100644
index 000000000000..64f50d9f6a72
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/cuda_wrappers/bits/basic_string.h
@@ -0,0 +1,9 @@
+// CUDA headers define __noinline__ which interferes with libstdc++'s use of
+// `__attribute((__noinline__))`. In order to avoid compilation error,
+// temporarily unset __noinline__ when we include affected libstdc++ header.
+
+#pragma push_macro("__noinline__")
+#undef __noinline__
+#include_next "bits/basic_string.h"
+
+#pragma pop_macro("__noinline__")
diff --git a/contrib/llvm-project/clang/lib/Headers/cuda_wrappers/bits/basic_string.tcc b/contrib/llvm-project/clang/lib/Headers/cuda_wrappers/bits/basic_string.tcc
new file mode 100644
index 000000000000..90c7fe34d932
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/cuda_wrappers/bits/basic_string.tcc
@@ -0,0 +1,9 @@
+// CUDA headers define __noinline__ which interferes with libstdc++'s use of
+// `__attribute((__noinline__))`. In order to avoid compilation error,
+// temporarily unset __noinline__ when we include affected libstdc++ header.
+
+#pragma push_macro("__noinline__")
+#undef __noinline__
+#include_next "bits/basic_string.tcc"
+
+#pragma pop_macro("__noinline__")
diff --git a/contrib/llvm-project/clang/lib/Headers/cuda_wrappers/bits/shared_ptr_base.h b/contrib/llvm-project/clang/lib/Headers/cuda_wrappers/bits/shared_ptr_base.h
new file mode 100644
index 000000000000..10028dd7bd9a
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/cuda_wrappers/bits/shared_ptr_base.h
@@ -0,0 +1,9 @@
+// CUDA headers define __noinline__ which interferes with libstdc++'s use of
+// `__attribute((__noinline__))`. In order to avoid compilation error,
+// temporarily unset __noinline__ when we include affected libstdc++ header.
+
+#pragma push_macro("__noinline__")
+#undef __noinline__
+#include_next "bits/shared_ptr_base.h"
+
+#pragma pop_macro("__noinline__")
diff --git a/contrib/llvm-project/clang/lib/Headers/cuda_wrappers/cmath b/contrib/llvm-project/clang/lib/Headers/cuda_wrappers/cmath
new file mode 100644
index 000000000000..45f89beec9b4
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/cuda_wrappers/cmath
@@ -0,0 +1,90 @@
+/*===---- cmath - CUDA wrapper for <cmath> ---------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CLANG_CUDA_WRAPPERS_CMATH
+#define __CLANG_CUDA_WRAPPERS_CMATH
+
+#include_next <cmath>
+
+#if defined(_LIBCPP_STD_VER)
+
+// libc++ will need long double variants of these functions, but CUDA does not
+// provide them. We'll provide their declarations, which should allow the
+// headers to parse, but would not allow accidental use of them on a GPU.
+
+__attribute__((device)) long double logb(long double);
+__attribute__((device)) long double scalbn(long double, int);
+
+namespace std {
+
+// For __constexpr_fmin/fmax we only need device-side overloads before c++14
+// where they are not constexpr.
+#if _LIBCPP_STD_VER < 14
+
+__attribute__((device))
+inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 float __constexpr_fmax(float __x, float __y) _NOEXCEPT {
+ return __builtin_fmaxf(__x, __y);
+}
+
+__attribute__((device))
+inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 double __constexpr_fmax(double __x, double __y) _NOEXCEPT {
+ return __builtin_fmax(__x, __y);
+}
+
+__attribute__((device))
+inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 long double
+__constexpr_fmax(long double __x, long double __y) _NOEXCEPT {
+ return __builtin_fmaxl(__x, __y);
+}
+
+template <class _Tp, class _Up, __enable_if_t<is_arithmetic<_Tp>::value && is_arithmetic<_Up>::value, int> = 0>
+__attribute__((device))
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 typename __promote<_Tp, _Up>::type
+__constexpr_fmax(_Tp __x, _Up __y) _NOEXCEPT {
+ using __result_type = typename __promote<_Tp, _Up>::type;
+ return std::__constexpr_fmax(static_cast<__result_type>(__x), static_cast<__result_type>(__y));
+}
+#endif // _LIBCPP_STD_VER < 14
+
+// For logb/scalbn templates we must always provide device overloads because
+// libc++ implementation uses __builtin_XXX which gets translated into a libcall
+// which we can't handle on GPU. We need to forward those to CUDA-provided
+// implementations.
+
+template <class _Tp>
+__attribute__((device))
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp __constexpr_logb(_Tp __x) {
+ return ::logb(__x);
+}
+
+template <class _Tp>
+__attribute__((device))
+_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _Tp __constexpr_scalbn(_Tp __x, int __exp) {
+ return ::scalbn(__x, __exp);
+}
+
+} // namespace std//
+
+#endif // _LIBCPP_STD_VER
+
+#endif // include guard
diff --git a/contrib/llvm-project/clang/lib/Headers/emmintrin.h b/contrib/llvm-project/clang/lib/Headers/emmintrin.h
index bb759721faeb..96e3ebdecbdf 100644
--- a/contrib/llvm-project/clang/lib/Headers/emmintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/emmintrin.h
@@ -10,22 +10,27 @@
#ifndef __EMMINTRIN_H
#define __EMMINTRIN_H
+#if !defined(__i386__) && !defined(__x86_64__)
+#error "This header is only meant to be used on x86 and x64 architecture"
+#endif
+
#include <xmmintrin.h>
typedef double __m128d __attribute__((__vector_size__(16), __aligned__(16)));
typedef long long __m128i __attribute__((__vector_size__(16), __aligned__(16)));
typedef double __m128d_u __attribute__((__vector_size__(16), __aligned__(1)));
-typedef long long __m128i_u __attribute__((__vector_size__(16), __aligned__(1)));
+typedef long long __m128i_u
+ __attribute__((__vector_size__(16), __aligned__(1)));
/* Type defines. */
-typedef double __v2df __attribute__ ((__vector_size__ (16)));
-typedef long long __v2di __attribute__ ((__vector_size__ (16)));
+typedef double __v2df __attribute__((__vector_size__(16)));
+typedef long long __v2di __attribute__((__vector_size__(16)));
typedef short __v8hi __attribute__((__vector_size__(16)));
typedef char __v16qi __attribute__((__vector_size__(16)));
/* Unsigned types */
-typedef unsigned long long __v2du __attribute__ ((__vector_size__ (16)));
+typedef unsigned long long __v2du __attribute__((__vector_size__(16)));
typedef unsigned short __v8hu __attribute__((__vector_size__(16)));
typedef unsigned char __v16qu __attribute__((__vector_size__(16)));
@@ -33,9 +38,23 @@ typedef unsigned char __v16qu __attribute__((__vector_size__(16)));
* appear in the interface though. */
typedef signed char __v16qs __attribute__((__vector_size__(16)));
+#ifdef __SSE2__
+/* Both _Float16 and __bf16 require SSE2 being enabled. */
+typedef _Float16 __v8hf __attribute__((__vector_size__(16), __aligned__(16)));
+typedef _Float16 __m128h __attribute__((__vector_size__(16), __aligned__(16)));
+typedef _Float16 __m128h_u __attribute__((__vector_size__(16), __aligned__(1)));
+
+typedef __bf16 __v8bf __attribute__((__vector_size__(16), __aligned__(16)));
+typedef __bf16 __m128bh __attribute__((__vector_size__(16), __aligned__(16)));
+#endif
+
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse2"), __min_vector_width__(128)))
-#define __DEFAULT_FN_ATTRS_MMX __attribute__((__always_inline__, __nodebug__, __target__("mmx,sse2"), __min_vector_width__(64)))
+#define __DEFAULT_FN_ATTRS \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("sse2,no-evex512"), __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS_MMX \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("mmx,sse2,no-evex512"), __min_vector_width__(64)))
/// Adds lower double-precision values in both operands and returns the
/// sum in the lower 64 bits of the result. The upper 64 bits of the result
@@ -52,9 +71,8 @@ typedef signed char __v16qs __attribute__((__vector_size__(16)));
/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
/// sum of the lower 64 bits of both operands. The upper 64 bits are copied
/// from the upper 64 bits of the first source operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_add_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_add_sd(__m128d __a,
+ __m128d __b) {
__a[0] += __b[0];
return __a;
}
@@ -71,9 +89,8 @@ _mm_add_sd(__m128d __a, __m128d __b)
/// A 128-bit vector of [2 x double] containing one of the source operands.
/// \returns A 128-bit vector of [2 x double] containing the sums of both
/// operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_add_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_add_pd(__m128d __a,
+ __m128d __b) {
return (__m128d)((__v2df)__a + (__v2df)__b);
}
@@ -94,9 +111,8 @@ _mm_add_pd(__m128d __a, __m128d __b)
/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
/// difference of the lower 64 bits of both operands. The upper 64 bits are
/// copied from the upper 64 bits of the first source operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_sub_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_sub_sd(__m128d __a,
+ __m128d __b) {
__a[0] -= __b[0];
return __a;
}
@@ -113,9 +129,8 @@ _mm_sub_sd(__m128d __a, __m128d __b)
/// A 128-bit vector of [2 x double] containing the subtrahend.
/// \returns A 128-bit vector of [2 x double] containing the differences between
/// both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_sub_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_sub_pd(__m128d __a,
+ __m128d __b) {
return (__m128d)((__v2df)__a - (__v2df)__b);
}
@@ -135,9 +150,8 @@ _mm_sub_pd(__m128d __a, __m128d __b)
/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
/// product of the lower 64 bits of both operands. The upper 64 bits are
/// copied from the upper 64 bits of the first source operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_mul_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_mul_sd(__m128d __a,
+ __m128d __b) {
__a[0] *= __b[0];
return __a;
}
@@ -154,9 +168,8 @@ _mm_mul_sd(__m128d __a, __m128d __b)
/// A 128-bit vector of [2 x double] containing one of the operands.
/// \returns A 128-bit vector of [2 x double] containing the products of both
/// operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_mul_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_mul_pd(__m128d __a,
+ __m128d __b) {
return (__m128d)((__v2df)__a * (__v2df)__b);
}
@@ -177,9 +190,8 @@ _mm_mul_pd(__m128d __a, __m128d __b)
/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
/// quotient of the lower 64 bits of both operands. The upper 64 bits are
/// copied from the upper 64 bits of the first source operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_div_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_div_sd(__m128d __a,
+ __m128d __b) {
__a[0] /= __b[0];
return __a;
}
@@ -197,9 +209,8 @@ _mm_div_sd(__m128d __a, __m128d __b)
/// A 128-bit vector of [2 x double] containing the divisor.
/// \returns A 128-bit vector of [2 x double] containing the quotients of both
/// operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_div_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_div_pd(__m128d __a,
+ __m128d __b) {
return (__m128d)((__v2df)__a / (__v2df)__b);
}
@@ -222,11 +233,10 @@ _mm_div_pd(__m128d __a, __m128d __b)
/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
/// square root of the lower 64 bits of operand \a __b, and whose upper 64
/// bits are copied from the upper 64 bits of operand \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_sqrt_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_sqrt_sd(__m128d __a,
+ __m128d __b) {
__m128d __c = __builtin_ia32_sqrtsd((__v2df)__b);
- return __extension__ (__m128d) { __c[0], __a[1] };
+ return __extension__(__m128d){__c[0], __a[1]};
}
/// Calculates the square root of the each of two values stored in a
@@ -240,9 +250,7 @@ _mm_sqrt_sd(__m128d __a, __m128d __b)
/// A 128-bit vector of [2 x double].
/// \returns A 128-bit vector of [2 x double] containing the square roots of the
/// values in the operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_sqrt_pd(__m128d __a)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_sqrt_pd(__m128d __a) {
return __builtin_ia32_sqrtpd((__v2df)__a);
}
@@ -264,9 +272,8 @@ _mm_sqrt_pd(__m128d __a)
/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
/// minimum value between both operands. The upper 64 bits are copied from
/// the upper 64 bits of the first source operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_min_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_min_sd(__m128d __a,
+ __m128d __b) {
return __builtin_ia32_minsd((__v2df)__a, (__v2df)__b);
}
@@ -284,9 +291,8 @@ _mm_min_sd(__m128d __a, __m128d __b)
/// A 128-bit vector of [2 x double] containing one of the operands.
/// \returns A 128-bit vector of [2 x double] containing the minimum values
/// between both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_min_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_min_pd(__m128d __a,
+ __m128d __b) {
return __builtin_ia32_minpd((__v2df)__a, (__v2df)__b);
}
@@ -308,9 +314,8 @@ _mm_min_pd(__m128d __a, __m128d __b)
/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
/// maximum value between both operands. The upper 64 bits are copied from
/// the upper 64 bits of the first source operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_max_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_max_sd(__m128d __a,
+ __m128d __b) {
return __builtin_ia32_maxsd((__v2df)__a, (__v2df)__b);
}
@@ -328,9 +333,8 @@ _mm_max_sd(__m128d __a, __m128d __b)
/// A 128-bit vector of [2 x double] containing one of the operands.
/// \returns A 128-bit vector of [2 x double] containing the maximum values
/// between both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_max_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_max_pd(__m128d __a,
+ __m128d __b) {
return __builtin_ia32_maxpd((__v2df)__a, (__v2df)__b);
}
@@ -346,9 +350,8 @@ _mm_max_pd(__m128d __a, __m128d __b)
/// A 128-bit vector of [2 x double] containing one of the source operands.
/// \returns A 128-bit vector of [2 x double] containing the bitwise AND of the
/// values between both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_and_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_and_pd(__m128d __a,
+ __m128d __b) {
return (__m128d)((__v2du)__a & (__v2du)__b);
}
@@ -367,9 +370,8 @@ _mm_and_pd(__m128d __a, __m128d __b)
/// \returns A 128-bit vector of [2 x double] containing the bitwise AND of the
/// values in the second operand and the one's complement of the first
/// operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_andnot_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_andnot_pd(__m128d __a,
+ __m128d __b) {
return (__m128d)(~(__v2du)__a & (__v2du)__b);
}
@@ -385,9 +387,8 @@ _mm_andnot_pd(__m128d __a, __m128d __b)
/// A 128-bit vector of [2 x double] containing one of the source operands.
/// \returns A 128-bit vector of [2 x double] containing the bitwise OR of the
/// values between both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_or_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_or_pd(__m128d __a,
+ __m128d __b) {
return (__m128d)((__v2du)__a | (__v2du)__b);
}
@@ -403,9 +404,8 @@ _mm_or_pd(__m128d __a, __m128d __b)
/// A 128-bit vector of [2 x double] containing one of the source operands.
/// \returns A 128-bit vector of [2 x double] containing the bitwise XOR of the
/// values between both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_xor_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_xor_pd(__m128d __a,
+ __m128d __b) {
return (__m128d)((__v2du)__a ^ (__v2du)__b);
}
@@ -422,9 +422,8 @@ _mm_xor_pd(__m128d __a, __m128d __b)
/// \param __b
/// A 128-bit vector of [2 x double].
/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpeq_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpeq_pd(__m128d __a,
+ __m128d __b) {
return (__m128d)__builtin_ia32_cmpeqpd((__v2df)__a, (__v2df)__b);
}
@@ -442,9 +441,8 @@ _mm_cmpeq_pd(__m128d __a, __m128d __b)
/// \param __b
/// A 128-bit vector of [2 x double].
/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmplt_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmplt_pd(__m128d __a,
+ __m128d __b) {
return (__m128d)__builtin_ia32_cmpltpd((__v2df)__a, (__v2df)__b);
}
@@ -463,9 +461,8 @@ _mm_cmplt_pd(__m128d __a, __m128d __b)
/// \param __b
/// A 128-bit vector of [2 x double].
/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmple_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmple_pd(__m128d __a,
+ __m128d __b) {
return (__m128d)__builtin_ia32_cmplepd((__v2df)__a, (__v2df)__b);
}
@@ -484,9 +481,8 @@ _mm_cmple_pd(__m128d __a, __m128d __b)
/// \param __b
/// A 128-bit vector of [2 x double].
/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpgt_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpgt_pd(__m128d __a,
+ __m128d __b) {
return (__m128d)__builtin_ia32_cmpltpd((__v2df)__b, (__v2df)__a);
}
@@ -505,9 +501,8 @@ _mm_cmpgt_pd(__m128d __a, __m128d __b)
/// \param __b
/// A 128-bit vector of [2 x double].
/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpge_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpge_pd(__m128d __a,
+ __m128d __b) {
return (__m128d)__builtin_ia32_cmplepd((__v2df)__b, (__v2df)__a);
}
@@ -528,9 +523,8 @@ _mm_cmpge_pd(__m128d __a, __m128d __b)
/// \param __b
/// A 128-bit vector of [2 x double].
/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpord_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpord_pd(__m128d __a,
+ __m128d __b) {
return (__m128d)__builtin_ia32_cmpordpd((__v2df)__a, (__v2df)__b);
}
@@ -552,9 +546,8 @@ _mm_cmpord_pd(__m128d __a, __m128d __b)
/// \param __b
/// A 128-bit vector of [2 x double].
/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpunord_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpunord_pd(__m128d __a,
+ __m128d __b) {
return (__m128d)__builtin_ia32_cmpunordpd((__v2df)__a, (__v2df)__b);
}
@@ -573,9 +566,8 @@ _mm_cmpunord_pd(__m128d __a, __m128d __b)
/// \param __b
/// A 128-bit vector of [2 x double].
/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpneq_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpneq_pd(__m128d __a,
+ __m128d __b) {
return (__m128d)__builtin_ia32_cmpneqpd((__v2df)__a, (__v2df)__b);
}
@@ -594,9 +586,8 @@ _mm_cmpneq_pd(__m128d __a, __m128d __b)
/// \param __b
/// A 128-bit vector of [2 x double].
/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpnlt_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnlt_pd(__m128d __a,
+ __m128d __b) {
return (__m128d)__builtin_ia32_cmpnltpd((__v2df)__a, (__v2df)__b);
}
@@ -615,9 +606,8 @@ _mm_cmpnlt_pd(__m128d __a, __m128d __b)
/// \param __b
/// A 128-bit vector of [2 x double].
/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpnle_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnle_pd(__m128d __a,
+ __m128d __b) {
return (__m128d)__builtin_ia32_cmpnlepd((__v2df)__a, (__v2df)__b);
}
@@ -636,9 +626,8 @@ _mm_cmpnle_pd(__m128d __a, __m128d __b)
/// \param __b
/// A 128-bit vector of [2 x double].
/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpngt_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpngt_pd(__m128d __a,
+ __m128d __b) {
return (__m128d)__builtin_ia32_cmpnltpd((__v2df)__b, (__v2df)__a);
}
@@ -657,9 +646,8 @@ _mm_cmpngt_pd(__m128d __a, __m128d __b)
/// \param __b
/// A 128-bit vector of [2 x double].
/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpnge_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnge_pd(__m128d __a,
+ __m128d __b) {
return (__m128d)__builtin_ia32_cmpnlepd((__v2df)__b, (__v2df)__a);
}
@@ -680,9 +668,8 @@ _mm_cmpnge_pd(__m128d __a, __m128d __b)
/// compared to the lower double-precision value of \a __a.
/// \returns A 128-bit vector. The lower 64 bits contains the comparison
/// results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpeq_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpeq_sd(__m128d __a,
+ __m128d __b) {
return (__m128d)__builtin_ia32_cmpeqsd((__v2df)__a, (__v2df)__b);
}
@@ -705,9 +692,8 @@ _mm_cmpeq_sd(__m128d __a, __m128d __b)
/// compared to the lower double-precision value of \a __a.
/// \returns A 128-bit vector. The lower 64 bits contains the comparison
/// results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmplt_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmplt_sd(__m128d __a,
+ __m128d __b) {
return (__m128d)__builtin_ia32_cmpltsd((__v2df)__a, (__v2df)__b);
}
@@ -730,9 +716,8 @@ _mm_cmplt_sd(__m128d __a, __m128d __b)
/// compared to the lower double-precision value of \a __a.
/// \returns A 128-bit vector. The lower 64 bits contains the comparison
/// results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmple_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmple_sd(__m128d __a,
+ __m128d __b) {
return (__m128d)__builtin_ia32_cmplesd((__v2df)__a, (__v2df)__b);
}
@@ -755,11 +740,10 @@ _mm_cmple_sd(__m128d __a, __m128d __b)
/// compared to the lower double-precision value of \a __a.
/// \returns A 128-bit vector. The lower 64 bits contains the comparison
/// results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpgt_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpgt_sd(__m128d __a,
+ __m128d __b) {
__m128d __c = __builtin_ia32_cmpltsd((__v2df)__b, (__v2df)__a);
- return __extension__ (__m128d) { __c[0], __a[1] };
+ return __extension__(__m128d){__c[0], __a[1]};
}
/// Compares the lower double-precision floating-point values in each of
@@ -781,11 +765,10 @@ _mm_cmpgt_sd(__m128d __a, __m128d __b)
/// compared to the lower double-precision value of \a __a.
/// \returns A 128-bit vector. The lower 64 bits contains the comparison
/// results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpge_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpge_sd(__m128d __a,
+ __m128d __b) {
__m128d __c = __builtin_ia32_cmplesd((__v2df)__b, (__v2df)__a);
- return __extension__ (__m128d) { __c[0], __a[1] };
+ return __extension__(__m128d){__c[0], __a[1]};
}
/// Compares the lower double-precision floating-point values in each of
@@ -809,9 +792,8 @@ _mm_cmpge_sd(__m128d __a, __m128d __b)
/// compared to the lower double-precision value of \a __a.
/// \returns A 128-bit vector. The lower 64 bits contains the comparison
/// results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpord_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpord_sd(__m128d __a,
+ __m128d __b) {
return (__m128d)__builtin_ia32_cmpordsd((__v2df)__a, (__v2df)__b);
}
@@ -837,9 +819,8 @@ _mm_cmpord_sd(__m128d __a, __m128d __b)
/// compared to the lower double-precision value of \a __a.
/// \returns A 128-bit vector. The lower 64 bits contains the comparison
/// results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpunord_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpunord_sd(__m128d __a,
+ __m128d __b) {
return (__m128d)__builtin_ia32_cmpunordsd((__v2df)__a, (__v2df)__b);
}
@@ -862,9 +843,8 @@ _mm_cmpunord_sd(__m128d __a, __m128d __b)
/// compared to the lower double-precision value of \a __a.
/// \returns A 128-bit vector. The lower 64 bits contains the comparison
/// results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpneq_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpneq_sd(__m128d __a,
+ __m128d __b) {
return (__m128d)__builtin_ia32_cmpneqsd((__v2df)__a, (__v2df)__b);
}
@@ -887,9 +867,8 @@ _mm_cmpneq_sd(__m128d __a, __m128d __b)
/// compared to the lower double-precision value of \a __a.
/// \returns A 128-bit vector. The lower 64 bits contains the comparison
/// results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpnlt_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnlt_sd(__m128d __a,
+ __m128d __b) {
return (__m128d)__builtin_ia32_cmpnltsd((__v2df)__a, (__v2df)__b);
}
@@ -912,9 +891,8 @@ _mm_cmpnlt_sd(__m128d __a, __m128d __b)
/// compared to the lower double-precision value of \a __a.
/// \returns A 128-bit vector. The lower 64 bits contains the comparison
/// results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpnle_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnle_sd(__m128d __a,
+ __m128d __b) {
return (__m128d)__builtin_ia32_cmpnlesd((__v2df)__a, (__v2df)__b);
}
@@ -937,11 +915,10 @@ _mm_cmpnle_sd(__m128d __a, __m128d __b)
/// compared to the lower double-precision value of \a __a.
/// \returns A 128-bit vector. The lower 64 bits contains the comparison
/// results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpngt_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpngt_sd(__m128d __a,
+ __m128d __b) {
__m128d __c = __builtin_ia32_cmpnltsd((__v2df)__b, (__v2df)__a);
- return __extension__ (__m128d) { __c[0], __a[1] };
+ return __extension__(__m128d){__c[0], __a[1]};
}
/// Compares the lower double-precision floating-point values in each of
@@ -963,11 +940,10 @@ _mm_cmpngt_sd(__m128d __a, __m128d __b)
/// compared to the lower double-precision value of \a __a.
/// \returns A 128-bit vector. The lower 64 bits contains the comparison
/// results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpnge_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnge_sd(__m128d __a,
+ __m128d __b) {
__m128d __c = __builtin_ia32_cmpnlesd((__v2df)__b, (__v2df)__a);
- return __extension__ (__m128d) { __c[0], __a[1] };
+ return __extension__(__m128d){__c[0], __a[1]};
}
/// Compares the lower double-precision floating-point values in each of
@@ -988,9 +964,8 @@ _mm_cmpnge_sd(__m128d __a, __m128d __b)
/// compared to the lower double-precision value of \a __a.
/// \returns An integer containing the comparison results. If either of the two
/// lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comieq_sd(__m128d __a, __m128d __b)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_comieq_sd(__m128d __a,
+ __m128d __b) {
return __builtin_ia32_comisdeq((__v2df)__a, (__v2df)__b);
}
@@ -1014,9 +989,8 @@ _mm_comieq_sd(__m128d __a, __m128d __b)
/// compared to the lower double-precision value of \a __a.
/// \returns An integer containing the comparison results. If either of the two
/// lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comilt_sd(__m128d __a, __m128d __b)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_comilt_sd(__m128d __a,
+ __m128d __b) {
return __builtin_ia32_comisdlt((__v2df)__a, (__v2df)__b);
}
@@ -1040,9 +1014,8 @@ _mm_comilt_sd(__m128d __a, __m128d __b)
/// compared to the lower double-precision value of \a __a.
/// \returns An integer containing the comparison results. If either of the two
/// lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comile_sd(__m128d __a, __m128d __b)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_comile_sd(__m128d __a,
+ __m128d __b) {
return __builtin_ia32_comisdle((__v2df)__a, (__v2df)__b);
}
@@ -1066,9 +1039,8 @@ _mm_comile_sd(__m128d __a, __m128d __b)
/// compared to the lower double-precision value of \a __a.
/// \returns An integer containing the comparison results. If either of the two
/// lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comigt_sd(__m128d __a, __m128d __b)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_comigt_sd(__m128d __a,
+ __m128d __b) {
return __builtin_ia32_comisdgt((__v2df)__a, (__v2df)__b);
}
@@ -1092,9 +1064,8 @@ _mm_comigt_sd(__m128d __a, __m128d __b)
/// compared to the lower double-precision value of \a __a.
/// \returns An integer containing the comparison results. If either of the two
/// lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comige_sd(__m128d __a, __m128d __b)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_comige_sd(__m128d __a,
+ __m128d __b) {
return __builtin_ia32_comisdge((__v2df)__a, (__v2df)__b);
}
@@ -1118,9 +1089,8 @@ _mm_comige_sd(__m128d __a, __m128d __b)
/// compared to the lower double-precision value of \a __a.
/// \returns An integer containing the comparison results. If either of the two
/// lower double-precision values is NaN, 1 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comineq_sd(__m128d __a, __m128d __b)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_comineq_sd(__m128d __a,
+ __m128d __b) {
return __builtin_ia32_comisdneq((__v2df)__a, (__v2df)__b);
}
@@ -1142,9 +1112,8 @@ _mm_comineq_sd(__m128d __a, __m128d __b)
/// compared to the lower double-precision value of \a __a.
/// \returns An integer containing the comparison results. If either of the two
/// lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomieq_sd(__m128d __a, __m128d __b)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomieq_sd(__m128d __a,
+ __m128d __b) {
return __builtin_ia32_ucomisdeq((__v2df)__a, (__v2df)__b);
}
@@ -1168,9 +1137,8 @@ _mm_ucomieq_sd(__m128d __a, __m128d __b)
/// compared to the lower double-precision value of \a __a.
/// \returns An integer containing the comparison results. If either of the two
/// lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomilt_sd(__m128d __a, __m128d __b)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomilt_sd(__m128d __a,
+ __m128d __b) {
return __builtin_ia32_ucomisdlt((__v2df)__a, (__v2df)__b);
}
@@ -1194,9 +1162,8 @@ _mm_ucomilt_sd(__m128d __a, __m128d __b)
/// compared to the lower double-precision value of \a __a.
/// \returns An integer containing the comparison results. If either of the two
/// lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomile_sd(__m128d __a, __m128d __b)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomile_sd(__m128d __a,
+ __m128d __b) {
return __builtin_ia32_ucomisdle((__v2df)__a, (__v2df)__b);
}
@@ -1220,9 +1187,8 @@ _mm_ucomile_sd(__m128d __a, __m128d __b)
/// compared to the lower double-precision value of \a __a.
/// \returns An integer containing the comparison results. If either of the two
/// lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomigt_sd(__m128d __a, __m128d __b)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomigt_sd(__m128d __a,
+ __m128d __b) {
return __builtin_ia32_ucomisdgt((__v2df)__a, (__v2df)__b);
}
@@ -1246,9 +1212,8 @@ _mm_ucomigt_sd(__m128d __a, __m128d __b)
/// compared to the lower double-precision value of \a __a.
/// \returns An integer containing the comparison results. If either of the two
/// lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomige_sd(__m128d __a, __m128d __b)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomige_sd(__m128d __a,
+ __m128d __b) {
return __builtin_ia32_ucomisdge((__v2df)__a, (__v2df)__b);
}
@@ -1272,9 +1237,8 @@ _mm_ucomige_sd(__m128d __a, __m128d __b)
/// compared to the lower double-precision value of \a __a.
/// \returns An integer containing the comparison result. If either of the two
/// lower double-precision values is NaN, 1 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomineq_sd(__m128d __a, __m128d __b)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomineq_sd(__m128d __a,
+ __m128d __b) {
return __builtin_ia32_ucomisdneq((__v2df)__a, (__v2df)__b);
}
@@ -1291,9 +1255,7 @@ _mm_ucomineq_sd(__m128d __a, __m128d __b)
/// A 128-bit vector of [2 x double].
/// \returns A 128-bit vector of [4 x float] whose lower 64 bits contain the
/// converted values. The upper 64 bits are set to zero.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cvtpd_ps(__m128d __a)
-{
+static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cvtpd_ps(__m128d __a) {
return __builtin_ia32_cvtpd2ps((__v2df)__a);
}
@@ -1311,9 +1273,7 @@ _mm_cvtpd_ps(__m128d __a)
/// floating-point elements are converted to double-precision values. The
/// upper two elements are unused.
/// \returns A 128-bit vector of [2 x double] containing the converted values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cvtps_pd(__m128 __a)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtps_pd(__m128 __a) {
return (__m128d) __builtin_convertvector(
__builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 0, 1), __v2df);
}
@@ -1334,9 +1294,7 @@ _mm_cvtps_pd(__m128 __a)
///
/// The upper two elements are unused.
/// \returns A 128-bit vector of [2 x double] containing the converted values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cvtepi32_pd(__m128i __a)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtepi32_pd(__m128i __a) {
return (__m128d) __builtin_convertvector(
__builtin_shufflevector((__v4si)__a, (__v4si)__a, 0, 1), __v2df);
}
@@ -1354,9 +1312,7 @@ _mm_cvtepi32_pd(__m128i __a)
/// A 128-bit vector of [2 x double].
/// \returns A 128-bit vector of [4 x i32] whose lower 64 bits contain the
/// converted values. The upper 64 bits are set to zero.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtpd_epi32(__m128d __a)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtpd_epi32(__m128d __a) {
return __builtin_ia32_cvtpd2dq((__v2df)__a);
}
@@ -1371,9 +1327,7 @@ _mm_cvtpd_epi32(__m128d __a)
/// A 128-bit vector of [2 x double]. The lower 64 bits are used in the
/// conversion.
/// \returns A 32-bit signed integer containing the converted value.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_cvtsd_si32(__m128d __a)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_cvtsd_si32(__m128d __a) {
return __builtin_ia32_cvtsd2si((__v2df)__a);
}
@@ -1396,9 +1350,8 @@ _mm_cvtsd_si32(__m128d __a)
/// \returns A 128-bit vector of [4 x float]. The lower 32 bits contain the
/// converted value from the second parameter. The upper 96 bits are copied
/// from the upper 96 bits of the first parameter.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cvtsd_ss(__m128 __a, __m128d __b)
-{
+static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cvtsd_ss(__m128 __a,
+ __m128d __b) {
return (__m128)__builtin_ia32_cvtsd2ss((__v4sf)__a, (__v2df)__b);
}
@@ -1419,9 +1372,8 @@ _mm_cvtsd_ss(__m128 __a, __m128d __b)
/// \returns A 128-bit vector of [2 x double]. The lower 64 bits contain the
/// converted value from the second parameter. The upper 64 bits are copied
/// from the upper 64 bits of the first parameter.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cvtsi32_sd(__m128d __a, int __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtsi32_sd(__m128d __a,
+ int __b) {
__a[0] = __b;
return __a;
}
@@ -1445,9 +1397,8 @@ _mm_cvtsi32_sd(__m128d __a, int __b)
/// \returns A 128-bit vector of [2 x double]. The lower 64 bits contain the
/// converted value from the second parameter. The upper 64 bits are copied
/// from the upper 64 bits of the first parameter.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cvtss_sd(__m128d __a, __m128 __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtss_sd(__m128d __a,
+ __m128 __b) {
__a[0] = __b[0];
return __a;
}
@@ -1469,9 +1420,7 @@ _mm_cvtss_sd(__m128d __a, __m128 __b)
/// A 128-bit vector of [2 x double].
/// \returns A 128-bit vector of [4 x i32] whose lower 64 bits contain the
/// converted values. The upper 64 bits are set to zero.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvttpd_epi32(__m128d __a)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvttpd_epi32(__m128d __a) {
return (__m128i)__builtin_ia32_cvttpd2dq((__v2df)__a);
}
@@ -1487,9 +1436,7 @@ _mm_cvttpd_epi32(__m128d __a)
/// A 128-bit vector of [2 x double]. The lower 64 bits are used in the
/// conversion.
/// \returns A 32-bit signed integer containing the converted value.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_cvttsd_si32(__m128d __a)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_cvttsd_si32(__m128d __a) {
return __builtin_ia32_cvttsd2si((__v2df)__a);
}
@@ -1504,9 +1451,7 @@ _mm_cvttsd_si32(__m128d __a)
/// \param __a
/// A 128-bit vector of [2 x double].
/// \returns A 64-bit vector of [2 x i32] containing the converted values.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_cvtpd_pi32(__m128d __a)
-{
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX _mm_cvtpd_pi32(__m128d __a) {
return (__m64)__builtin_ia32_cvtpd2pi((__v2df)__a);
}
@@ -1524,9 +1469,7 @@ _mm_cvtpd_pi32(__m128d __a)
/// \param __a
/// A 128-bit vector of [2 x double].
/// \returns A 64-bit vector of [2 x i32] containing the converted values.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_cvttpd_pi32(__m128d __a)
-{
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX _mm_cvttpd_pi32(__m128d __a) {
return (__m64)__builtin_ia32_cvttpd2pi((__v2df)__a);
}
@@ -1541,9 +1484,7 @@ _mm_cvttpd_pi32(__m128d __a)
/// \param __a
/// A 64-bit vector of [2 x i32].
/// \returns A 128-bit vector of [2 x double] containing the converted values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS_MMX
-_mm_cvtpi32_pd(__m64 __a)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS_MMX _mm_cvtpi32_pd(__m64 __a) {
return __builtin_ia32_cvtpi2pd((__v2si)__a);
}
@@ -1558,9 +1499,7 @@ _mm_cvtpi32_pd(__m64 __a)
/// A 128-bit vector of [2 x double]. The lower 64 bits are returned.
/// \returns A double-precision floating-point value copied from the lower 64
/// bits of \a __a.
-static __inline__ double __DEFAULT_FN_ATTRS
-_mm_cvtsd_f64(__m128d __a)
-{
+static __inline__ double __DEFAULT_FN_ATTRS _mm_cvtsd_f64(__m128d __a) {
return __a[0];
}
@@ -1575,10 +1514,8 @@ _mm_cvtsd_f64(__m128d __a)
/// A pointer to a 128-bit memory location. The address of the memory
/// location has to be 16-byte aligned.
/// \returns A 128-bit vector of [2 x double] containing the loaded values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_load_pd(double const *__dp)
-{
- return *(const __m128d*)__dp;
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_load_pd(double const *__dp) {
+ return *(const __m128d *)__dp;
}
/// Loads a double-precision floating-point value from a specified memory
@@ -1593,17 +1530,15 @@ _mm_load_pd(double const *__dp)
/// A pointer to a memory location containing a double-precision value.
/// \returns A 128-bit vector of [2 x double] containing the loaded and
/// duplicated values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_load1_pd(double const *__dp)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_load1_pd(double const *__dp) {
struct __mm_load1_pd_struct {
double __u;
} __attribute__((__packed__, __may_alias__));
- double __u = ((const struct __mm_load1_pd_struct*)__dp)->__u;
- return __extension__ (__m128d){ __u, __u };
+ double __u = ((const struct __mm_load1_pd_struct *)__dp)->__u;
+ return __extension__(__m128d){__u, __u};
}
-#define _mm_load_pd1(dp) _mm_load1_pd(dp)
+#define _mm_load_pd1(dp) _mm_load1_pd(dp)
/// Loads two double-precision values, in reverse order, from an aligned
/// memory location into a 128-bit vector of [2 x double].
@@ -1619,10 +1554,8 @@ _mm_load1_pd(double const *__dp)
/// loaded in reverse order.
/// \returns A 128-bit vector of [2 x double] containing the reversed loaded
/// values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_loadr_pd(double const *__dp)
-{
- __m128d __u = *(const __m128d*)__dp;
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_loadr_pd(double const *__dp) {
+ __m128d __u = *(const __m128d *)__dp;
return __builtin_shufflevector((__v2df)__u, (__v2df)__u, 1, 0);
}
@@ -1637,13 +1570,11 @@ _mm_loadr_pd(double const *__dp)
/// A pointer to a 128-bit memory location. The address of the memory
/// location does not have to be aligned.
/// \returns A 128-bit vector of [2 x double] containing the loaded values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_loadu_pd(double const *__dp)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_loadu_pd(double const *__dp) {
struct __loadu_pd {
__m128d_u __v;
} __attribute__((__packed__, __may_alias__));
- return ((const struct __loadu_pd*)__dp)->__v;
+ return ((const struct __loadu_pd *)__dp)->__v;
}
/// Loads a 64-bit integer value to the low element of a 128-bit integer
@@ -1657,14 +1588,12 @@ _mm_loadu_pd(double const *__dp)
/// A pointer to a 64-bit memory location. The address of the memory
/// location does not have to be aligned.
/// \returns A 128-bit vector of [2 x i64] containing the loaded value.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_loadu_si64(void const *__a)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_loadu_si64(void const *__a) {
struct __loadu_si64 {
long long __v;
} __attribute__((__packed__, __may_alias__));
- long long __u = ((const struct __loadu_si64*)__a)->__v;
- return __extension__ (__m128i)(__v2di){__u, 0LL};
+ long long __u = ((const struct __loadu_si64 *)__a)->__v;
+ return __extension__(__m128i)(__v2di){__u, 0LL};
}
/// Loads a 32-bit integer value to the low element of a 128-bit integer
@@ -1678,14 +1607,12 @@ _mm_loadu_si64(void const *__a)
/// A pointer to a 32-bit memory location. The address of the memory
/// location does not have to be aligned.
/// \returns A 128-bit vector of [4 x i32] containing the loaded value.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_loadu_si32(void const *__a)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_loadu_si32(void const *__a) {
struct __loadu_si32 {
int __v;
} __attribute__((__packed__, __may_alias__));
- int __u = ((const struct __loadu_si32*)__a)->__v;
- return __extension__ (__m128i)(__v4si){__u, 0, 0, 0};
+ int __u = ((const struct __loadu_si32 *)__a)->__v;
+ return __extension__(__m128i)(__v4si){__u, 0, 0, 0};
}
/// Loads a 16-bit integer value to the low element of a 128-bit integer
@@ -1699,14 +1626,12 @@ _mm_loadu_si32(void const *__a)
/// A pointer to a 16-bit memory location. The address of the memory
/// location does not have to be aligned.
/// \returns A 128-bit vector of [8 x i16] containing the loaded value.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_loadu_si16(void const *__a)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_loadu_si16(void const *__a) {
struct __loadu_si16 {
short __v;
} __attribute__((__packed__, __may_alias__));
- short __u = ((const struct __loadu_si16*)__a)->__v;
- return __extension__ (__m128i)(__v8hi){__u, 0, 0, 0, 0, 0, 0, 0};
+ short __u = ((const struct __loadu_si16 *)__a)->__v;
+ return __extension__(__m128i)(__v8hi){__u, 0, 0, 0, 0, 0, 0, 0};
}
/// Loads a 64-bit double-precision value to the low element of a
@@ -1720,14 +1645,12 @@ _mm_loadu_si16(void const *__a)
/// A pointer to a memory location containing a double-precision value.
/// The address of the memory location does not have to be aligned.
/// \returns A 128-bit vector of [2 x double] containing the loaded value.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_load_sd(double const *__dp)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_load_sd(double const *__dp) {
struct __mm_load_sd_struct {
double __u;
} __attribute__((__packed__, __may_alias__));
- double __u = ((const struct __mm_load_sd_struct*)__dp)->__u;
- return __extension__ (__m128d){ __u, 0 };
+ double __u = ((const struct __mm_load_sd_struct *)__dp)->__u;
+ return __extension__(__m128d){__u, 0};
}
/// Loads a double-precision value into the high-order bits of a 128-bit
@@ -1747,14 +1670,13 @@ _mm_load_sd(double const *__dp)
/// [127:64] of the result. The address of the memory location does not have
/// to be aligned.
/// \returns A 128-bit vector of [2 x double] containing the moved values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_loadh_pd(__m128d __a, double const *__dp)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_loadh_pd(__m128d __a,
+ double const *__dp) {
struct __mm_loadh_pd_struct {
double __u;
} __attribute__((__packed__, __may_alias__));
- double __u = ((const struct __mm_loadh_pd_struct*)__dp)->__u;
- return __extension__ (__m128d){ __a[0], __u };
+ double __u = ((const struct __mm_loadh_pd_struct *)__dp)->__u;
+ return __extension__(__m128d){__a[0], __u};
}
/// Loads a double-precision value into the low-order bits of a 128-bit
@@ -1774,14 +1696,13 @@ _mm_loadh_pd(__m128d __a, double const *__dp)
/// [63:0] of the result. The address of the memory location does not have to
/// be aligned.
/// \returns A 128-bit vector of [2 x double] containing the moved values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_loadl_pd(__m128d __a, double const *__dp)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_loadl_pd(__m128d __a,
+ double const *__dp) {
struct __mm_loadl_pd_struct {
double __u;
} __attribute__((__packed__, __may_alias__));
- double __u = ((const struct __mm_loadl_pd_struct*)__dp)->__u;
- return __extension__ (__m128d){ __u, __a[1] };
+ double __u = ((const struct __mm_loadl_pd_struct *)__dp)->__u;
+ return __extension__(__m128d){__u, __a[1]};
}
/// Constructs a 128-bit floating-point vector of [2 x double] with
@@ -1795,9 +1716,7 @@ _mm_loadl_pd(__m128d __a, double const *__dp)
///
/// \returns A 128-bit floating-point vector of [2 x double] with unspecified
/// content.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_undefined_pd(void)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_undefined_pd(void) {
return (__m128d)__builtin_ia32_undef128();
}
@@ -1815,10 +1734,8 @@ _mm_undefined_pd(void)
/// \returns An initialized 128-bit floating-point vector of [2 x double]. The
/// lower 64 bits contain the value of the parameter. The upper 64 bits are
/// set to zero.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_set_sd(double __w)
-{
- return __extension__ (__m128d){ __w, 0 };
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_set_sd(double __w) {
+ return __extension__(__m128d){__w, 0};
}
/// Constructs a 128-bit floating-point vector of [2 x double], with each
@@ -1833,10 +1750,8 @@ _mm_set_sd(double __w)
/// A double-precision floating-point value used to initialize each vector
/// element of the result.
/// \returns An initialized 128-bit floating-point vector of [2 x double].
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_set1_pd(double __w)
-{
- return __extension__ (__m128d){ __w, __w };
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_set1_pd(double __w) {
+ return __extension__(__m128d){__w, __w};
}
/// Constructs a 128-bit floating-point vector of [2 x double], with each
@@ -1851,9 +1766,7 @@ _mm_set1_pd(double __w)
/// A double-precision floating-point value used to initialize each vector
/// element of the result.
/// \returns An initialized 128-bit floating-point vector of [2 x double].
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_set_pd1(double __w)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_set_pd1(double __w) {
return _mm_set1_pd(__w);
}
@@ -1871,10 +1784,9 @@ _mm_set_pd1(double __w)
/// A double-precision floating-point value used to initialize the lower 64
/// bits of the result.
/// \returns An initialized 128-bit floating-point vector of [2 x double].
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_set_pd(double __w, double __x)
-{
- return __extension__ (__m128d){ __x, __w };
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_set_pd(double __w,
+ double __x) {
+ return __extension__(__m128d){__x, __w};
}
/// Constructs a 128-bit floating-point vector of [2 x double],
@@ -1892,10 +1804,9 @@ _mm_set_pd(double __w, double __x)
/// A double-precision floating-point value used to initialize the upper 64
/// bits of the result.
/// \returns An initialized 128-bit floating-point vector of [2 x double].
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_setr_pd(double __w, double __x)
-{
- return __extension__ (__m128d){ __w, __x };
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_setr_pd(double __w,
+ double __x) {
+ return __extension__(__m128d){__w, __x};
}
/// Constructs a 128-bit floating-point vector of [2 x double]
@@ -1907,10 +1818,8 @@ _mm_setr_pd(double __w, double __x)
///
/// \returns An initialized 128-bit floating-point vector of [2 x double] with
/// all elements set to zero.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_setzero_pd(void)
-{
- return __extension__ (__m128d){ 0, 0 };
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_setzero_pd(void) {
+ return __extension__(__m128d){0.0, 0.0};
}
/// Constructs a 128-bit floating-point vector of [2 x double]. The lower
@@ -1928,9 +1837,8 @@ _mm_setzero_pd(void)
/// A 128-bit vector of [2 x double]. The lower 64 bits are written to the
/// lower 64 bits of the result.
/// \returns A 128-bit vector of [2 x double] containing the moved values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_move_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_move_sd(__m128d __a,
+ __m128d __b) {
__a[0] = __b[0];
return __a;
}
@@ -1946,13 +1854,12 @@ _mm_move_sd(__m128d __a, __m128d __b)
/// A pointer to a 64-bit memory location.
/// \param __a
/// A 128-bit vector of [2 x double] containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_store_sd(double *__dp, __m128d __a)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_store_sd(double *__dp,
+ __m128d __a) {
struct __mm_store_sd_struct {
double __u;
} __attribute__((__packed__, __may_alias__));
- ((struct __mm_store_sd_struct*)__dp)->__u = __a[0];
+ ((struct __mm_store_sd_struct *)__dp)->__u = __a[0];
}
/// Moves packed double-precision values from a 128-bit vector of
@@ -1968,10 +1875,9 @@ _mm_store_sd(double *__dp, __m128d __a)
/// \param __a
/// A packed 128-bit vector of [2 x double] containing the values to be
/// moved.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_store_pd(double *__dp, __m128d __a)
-{
- *(__m128d*)__dp = __a;
+static __inline__ void __DEFAULT_FN_ATTRS _mm_store_pd(double *__dp,
+ __m128d __a) {
+ *(__m128d *)__dp = __a;
}
/// Moves the lower 64 bits of a 128-bit vector of [2 x double] twice to
@@ -1988,9 +1894,8 @@ _mm_store_pd(double *__dp, __m128d __a)
/// \param __a
/// A 128-bit vector of [2 x double] whose lower 64 bits are copied to each
/// of the values in \a __dp.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_store1_pd(double *__dp, __m128d __a)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_store1_pd(double *__dp,
+ __m128d __a) {
__a = __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 0);
_mm_store_pd(__dp, __a);
}
@@ -2009,9 +1914,8 @@ _mm_store1_pd(double *__dp, __m128d __a)
/// \param __a
/// A 128-bit vector of [2 x double] whose lower 64 bits are copied to each
/// of the values in \a __dp.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_store_pd1(double *__dp, __m128d __a)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_store_pd1(double *__dp,
+ __m128d __a) {
_mm_store1_pd(__dp, __a);
}
@@ -2027,13 +1931,12 @@ _mm_store_pd1(double *__dp, __m128d __a)
/// location does not have to be aligned.
/// \param __a
/// A 128-bit vector of [2 x double] containing the values to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeu_pd(double *__dp, __m128d __a)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_pd(double *__dp,
+ __m128d __a) {
struct __storeu_pd {
__m128d_u __v;
} __attribute__((__packed__, __may_alias__));
- ((struct __storeu_pd*)__dp)->__v = __a;
+ ((struct __storeu_pd *)__dp)->__v = __a;
}
/// Stores two double-precision values, in reverse order, from a 128-bit
@@ -2050,9 +1953,8 @@ _mm_storeu_pd(double *__dp, __m128d __a)
/// \param __a
/// A 128-bit vector of [2 x double] containing the values to be reversed and
/// stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storer_pd(double *__dp, __m128d __a)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_storer_pd(double *__dp,
+ __m128d __a) {
__a = __builtin_shufflevector((__v2df)__a, (__v2df)__a, 1, 0);
*(__m128d *)__dp = __a;
}
@@ -2068,13 +1970,12 @@ _mm_storer_pd(double *__dp, __m128d __a)
/// A pointer to a 64-bit memory location.
/// \param __a
/// A 128-bit vector of [2 x double] containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeh_pd(double *__dp, __m128d __a)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_storeh_pd(double *__dp,
+ __m128d __a) {
struct __mm_storeh_pd_struct {
double __u;
} __attribute__((__packed__, __may_alias__));
- ((struct __mm_storeh_pd_struct*)__dp)->__u = __a[1];
+ ((struct __mm_storeh_pd_struct *)__dp)->__u = __a[1];
}
/// Stores the lower 64 bits of a 128-bit vector of [2 x double] to a
@@ -2088,13 +1989,12 @@ _mm_storeh_pd(double *__dp, __m128d __a)
/// A pointer to a 64-bit memory location.
/// \param __a
/// A 128-bit vector of [2 x double] containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storel_pd(double *__dp, __m128d __a)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_storel_pd(double *__dp,
+ __m128d __a) {
struct __mm_storeh_pd_struct {
double __u;
} __attribute__((__packed__, __may_alias__));
- ((struct __mm_storeh_pd_struct*)__dp)->__u = __a[0];
+ ((struct __mm_storeh_pd_struct *)__dp)->__u = __a[0];
}
/// Adds the corresponding elements of two 128-bit vectors of [16 x i8],
@@ -2113,9 +2013,8 @@ _mm_storel_pd(double *__dp, __m128d __a)
/// A 128-bit vector of [16 x i8].
/// \returns A 128-bit vector of [16 x i8] containing the sums of both
/// parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_add_epi8(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_add_epi8(__m128i __a,
+ __m128i __b) {
return (__m128i)((__v16qu)__a + (__v16qu)__b);
}
@@ -2135,9 +2034,8 @@ _mm_add_epi8(__m128i __a, __m128i __b)
/// A 128-bit vector of [8 x i16].
/// \returns A 128-bit vector of [8 x i16] containing the sums of both
/// parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_add_epi16(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_add_epi16(__m128i __a,
+ __m128i __b) {
return (__m128i)((__v8hu)__a + (__v8hu)__b);
}
@@ -2157,9 +2055,8 @@ _mm_add_epi16(__m128i __a, __m128i __b)
/// A 128-bit vector of [4 x i32].
/// \returns A 128-bit vector of [4 x i32] containing the sums of both
/// parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_add_epi32(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_add_epi32(__m128i __a,
+ __m128i __b) {
return (__m128i)((__v4su)__a + (__v4su)__b);
}
@@ -2175,9 +2072,8 @@ _mm_add_epi32(__m128i __a, __m128i __b)
/// \param __b
/// A 64-bit integer.
/// \returns A 64-bit integer containing the sum of both parameters.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_add_si64(__m64 __a, __m64 __b)
-{
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX _mm_add_si64(__m64 __a,
+ __m64 __b) {
return (__m64)__builtin_ia32_paddq((__v1di)__a, (__v1di)__b);
}
@@ -2197,9 +2093,8 @@ _mm_add_si64(__m64 __a, __m64 __b)
/// A 128-bit vector of [2 x i64].
/// \returns A 128-bit vector of [2 x i64] containing the sums of both
/// parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_add_epi64(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_add_epi64(__m128i __a,
+ __m128i __b) {
return (__m128i)((__v2du)__a + (__v2du)__b);
}
@@ -2218,10 +2113,9 @@ _mm_add_epi64(__m128i __a, __m128i __b)
/// A 128-bit signed [16 x i8] vector.
/// \returns A 128-bit signed [16 x i8] vector containing the saturated sums of
/// both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_adds_epi8(__m128i __a, __m128i __b)
-{
- return (__m128i)__builtin_ia32_paddsb128((__v16qi)__a, (__v16qi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epi8(__m128i __a,
+ __m128i __b) {
+ return (__m128i)__builtin_elementwise_add_sat((__v16qs)__a, (__v16qs)__b);
}
/// Adds, with saturation, the corresponding elements of two 128-bit
@@ -2240,10 +2134,9 @@ _mm_adds_epi8(__m128i __a, __m128i __b)
/// A 128-bit signed [8 x i16] vector.
/// \returns A 128-bit signed [8 x i16] vector containing the saturated sums of
/// both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_adds_epi16(__m128i __a, __m128i __b)
-{
- return (__m128i)__builtin_ia32_paddsw128((__v8hi)__a, (__v8hi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epi16(__m128i __a,
+ __m128i __b) {
+ return (__m128i)__builtin_elementwise_add_sat((__v8hi)__a, (__v8hi)__b);
}
/// Adds, with saturation, the corresponding elements of two 128-bit
@@ -2261,10 +2154,9 @@ _mm_adds_epi16(__m128i __a, __m128i __b)
/// A 128-bit unsigned [16 x i8] vector.
/// \returns A 128-bit unsigned [16 x i8] vector containing the saturated sums
/// of both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_adds_epu8(__m128i __a, __m128i __b)
-{
- return (__m128i)__builtin_ia32_paddusb128((__v16qi)__a, (__v16qi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epu8(__m128i __a,
+ __m128i __b) {
+ return (__m128i)__builtin_elementwise_add_sat((__v16qu)__a, (__v16qu)__b);
}
/// Adds, with saturation, the corresponding elements of two 128-bit
@@ -2282,10 +2174,9 @@ _mm_adds_epu8(__m128i __a, __m128i __b)
/// A 128-bit unsigned [8 x i16] vector.
/// \returns A 128-bit unsigned [8 x i16] vector containing the saturated sums
/// of both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_adds_epu16(__m128i __a, __m128i __b)
-{
- return (__m128i)__builtin_ia32_paddusw128((__v8hi)__a, (__v8hi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epu16(__m128i __a,
+ __m128i __b) {
+ return (__m128i)__builtin_elementwise_add_sat((__v8hu)__a, (__v8hu)__b);
}
/// Computes the rounded averages of corresponding elements of two
@@ -2302,9 +2193,8 @@ _mm_adds_epu16(__m128i __a, __m128i __b)
/// A 128-bit unsigned [16 x i8] vector.
/// \returns A 128-bit unsigned [16 x i8] vector containing the rounded
/// averages of both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_avg_epu8(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_avg_epu8(__m128i __a,
+ __m128i __b) {
return (__m128i)__builtin_ia32_pavgb128((__v16qi)__a, (__v16qi)__b);
}
@@ -2322,9 +2212,8 @@ _mm_avg_epu8(__m128i __a, __m128i __b)
/// A 128-bit unsigned [8 x i16] vector.
/// \returns A 128-bit unsigned [8 x i16] vector containing the rounded
/// averages of both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_avg_epu16(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_avg_epu16(__m128i __a,
+ __m128i __b) {
return (__m128i)__builtin_ia32_pavgw128((__v8hi)__a, (__v8hi)__b);
}
@@ -2348,9 +2237,8 @@ _mm_avg_epu16(__m128i __a, __m128i __b)
/// A 128-bit signed [8 x i16] vector.
/// \returns A 128-bit signed [4 x i32] vector containing the sums of products
/// of both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_madd_epi16(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_madd_epi16(__m128i __a,
+ __m128i __b) {
return (__m128i)__builtin_ia32_pmaddwd128((__v8hi)__a, (__v8hi)__b);
}
@@ -2368,10 +2256,9 @@ _mm_madd_epi16(__m128i __a, __m128i __b)
/// A 128-bit signed [8 x i16] vector.
/// \returns A 128-bit signed [8 x i16] vector containing the greater value of
/// each comparison.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_max_epi16(__m128i __a, __m128i __b)
-{
- return (__m128i)__builtin_ia32_pmaxsw128((__v8hi)__a, (__v8hi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epi16(__m128i __a,
+ __m128i __b) {
+ return (__m128i)__builtin_elementwise_max((__v8hi)__a, (__v8hi)__b);
}
/// Compares corresponding elements of two 128-bit unsigned [16 x i8]
@@ -2388,10 +2275,9 @@ _mm_max_epi16(__m128i __a, __m128i __b)
/// A 128-bit unsigned [16 x i8] vector.
/// \returns A 128-bit unsigned [16 x i8] vector containing the greater value of
/// each comparison.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_max_epu8(__m128i __a, __m128i __b)
-{
- return (__m128i)__builtin_ia32_pmaxub128((__v16qi)__a, (__v16qi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epu8(__m128i __a,
+ __m128i __b) {
+ return (__m128i)__builtin_elementwise_max((__v16qu)__a, (__v16qu)__b);
}
/// Compares corresponding elements of two 128-bit signed [8 x i16]
@@ -2408,10 +2294,9 @@ _mm_max_epu8(__m128i __a, __m128i __b)
/// A 128-bit signed [8 x i16] vector.
/// \returns A 128-bit signed [8 x i16] vector containing the smaller value of
/// each comparison.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_min_epi16(__m128i __a, __m128i __b)
-{
- return (__m128i)__builtin_ia32_pminsw128((__v8hi)__a, (__v8hi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epi16(__m128i __a,
+ __m128i __b) {
+ return (__m128i)__builtin_elementwise_min((__v8hi)__a, (__v8hi)__b);
}
/// Compares corresponding elements of two 128-bit unsigned [16 x i8]
@@ -2428,10 +2313,9 @@ _mm_min_epi16(__m128i __a, __m128i __b)
/// A 128-bit unsigned [16 x i8] vector.
/// \returns A 128-bit unsigned [16 x i8] vector containing the smaller value of
/// each comparison.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_min_epu8(__m128i __a, __m128i __b)
-{
- return (__m128i)__builtin_ia32_pminub128((__v16qi)__a, (__v16qi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epu8(__m128i __a,
+ __m128i __b) {
+ return (__m128i)__builtin_elementwise_min((__v16qu)__a, (__v16qu)__b);
}
/// Multiplies the corresponding elements of two signed [8 x i16]
@@ -2448,9 +2332,8 @@ _mm_min_epu8(__m128i __a, __m128i __b)
/// A 128-bit signed [8 x i16] vector.
/// \returns A 128-bit signed [8 x i16] vector containing the upper 16 bits of
/// each of the eight 32-bit products.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mulhi_epi16(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mulhi_epi16(__m128i __a,
+ __m128i __b) {
return (__m128i)__builtin_ia32_pmulhw128((__v8hi)__a, (__v8hi)__b);
}
@@ -2468,9 +2351,8 @@ _mm_mulhi_epi16(__m128i __a, __m128i __b)
/// A 128-bit unsigned [8 x i16] vector.
/// \returns A 128-bit unsigned [8 x i16] vector containing the upper 16 bits
/// of each of the eight 32-bit products.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mulhi_epu16(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mulhi_epu16(__m128i __a,
+ __m128i __b) {
return (__m128i)__builtin_ia32_pmulhuw128((__v8hi)__a, (__v8hi)__b);
}
@@ -2488,9 +2370,8 @@ _mm_mulhi_epu16(__m128i __a, __m128i __b)
/// A 128-bit signed [8 x i16] vector.
/// \returns A 128-bit signed [8 x i16] vector containing the lower 16 bits of
/// each of the eight 32-bit products.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mullo_epi16(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mullo_epi16(__m128i __a,
+ __m128i __b) {
return (__m128i)((__v8hu)__a * (__v8hu)__b);
}
@@ -2507,9 +2388,8 @@ _mm_mullo_epi16(__m128i __a, __m128i __b)
/// \param __b
/// A 64-bit integer containing one of the source operands.
/// \returns A 64-bit integer vector containing the product of both operands.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_mul_su32(__m64 __a, __m64 __b)
-{
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX _mm_mul_su32(__m64 __a,
+ __m64 __b) {
return __builtin_ia32_pmuludq((__v2si)__a, (__v2si)__b);
}
@@ -2526,9 +2406,8 @@ _mm_mul_su32(__m64 __a, __m64 __b)
/// \param __b
/// A [2 x i64] vector containing one of the source operands.
/// \returns A [2 x i64] vector containing the product of both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mul_epu32(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mul_epu32(__m128i __a,
+ __m128i __b) {
return __builtin_ia32_pmuludq128((__v4si)__a, (__v4si)__b);
}
@@ -2548,9 +2427,8 @@ _mm_mul_epu32(__m128i __a, __m128i __b)
/// A 128-bit integer vector containing one of the source operands.
/// \returns A [2 x i64] vector containing the sums of the sets of absolute
/// differences between both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sad_epu8(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sad_epu8(__m128i __a,
+ __m128i __b) {
return __builtin_ia32_psadbw128((__v16qi)__a, (__v16qi)__b);
}
@@ -2566,9 +2444,8 @@ _mm_sad_epu8(__m128i __a, __m128i __b)
/// A 128-bit integer vector containing the subtrahends.
/// \returns A 128-bit integer vector containing the differences of the values
/// in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sub_epi8(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sub_epi8(__m128i __a,
+ __m128i __b) {
return (__m128i)((__v16qu)__a - (__v16qu)__b);
}
@@ -2584,9 +2461,8 @@ _mm_sub_epi8(__m128i __a, __m128i __b)
/// A 128-bit integer vector containing the subtrahends.
/// \returns A 128-bit integer vector containing the differences of the values
/// in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sub_epi16(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sub_epi16(__m128i __a,
+ __m128i __b) {
return (__m128i)((__v8hu)__a - (__v8hu)__b);
}
@@ -2602,9 +2478,8 @@ _mm_sub_epi16(__m128i __a, __m128i __b)
/// A 128-bit integer vector containing the subtrahends.
/// \returns A 128-bit integer vector containing the differences of the values
/// in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sub_epi32(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sub_epi32(__m128i __a,
+ __m128i __b) {
return (__m128i)((__v4su)__a - (__v4su)__b);
}
@@ -2621,9 +2496,8 @@ _mm_sub_epi32(__m128i __a, __m128i __b)
/// A 64-bit integer vector containing the subtrahend.
/// \returns A 64-bit integer vector containing the difference of the values in
/// the operands.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_sub_si64(__m64 __a, __m64 __b)
-{
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX _mm_sub_si64(__m64 __a,
+ __m64 __b) {
return (__m64)__builtin_ia32_psubq((__v1di)__a, (__v1di)__b);
}
@@ -2639,9 +2513,8 @@ _mm_sub_si64(__m64 __a, __m64 __b)
/// A 128-bit integer vector containing the subtrahends.
/// \returns A 128-bit integer vector containing the differences of the values
/// in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sub_epi64(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sub_epi64(__m128i __a,
+ __m128i __b) {
return (__m128i)((__v2du)__a - (__v2du)__b);
}
@@ -2660,10 +2533,9 @@ _mm_sub_epi64(__m128i __a, __m128i __b)
/// A 128-bit integer vector containing the subtrahends.
/// \returns A 128-bit integer vector containing the differences of the values
/// in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_subs_epi8(__m128i __a, __m128i __b)
-{
- return (__m128i)__builtin_ia32_psubsb128((__v16qi)__a, (__v16qi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epi8(__m128i __a,
+ __m128i __b) {
+ return (__m128i)__builtin_elementwise_sub_sat((__v16qs)__a, (__v16qs)__b);
}
/// Subtracts corresponding 16-bit signed integer values in the input and
@@ -2681,10 +2553,9 @@ _mm_subs_epi8(__m128i __a, __m128i __b)
/// A 128-bit integer vector containing the subtrahends.
/// \returns A 128-bit integer vector containing the differences of the values
/// in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_subs_epi16(__m128i __a, __m128i __b)
-{
- return (__m128i)__builtin_ia32_psubsw128((__v8hi)__a, (__v8hi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epi16(__m128i __a,
+ __m128i __b) {
+ return (__m128i)__builtin_elementwise_sub_sat((__v8hi)__a, (__v8hi)__b);
}
/// Subtracts corresponding 8-bit unsigned integer values in the input
@@ -2701,10 +2572,9 @@ _mm_subs_epi16(__m128i __a, __m128i __b)
/// A 128-bit integer vector containing the subtrahends.
/// \returns A 128-bit integer vector containing the unsigned integer
/// differences of the values in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_subs_epu8(__m128i __a, __m128i __b)
-{
- return (__m128i)__builtin_ia32_psubusb128((__v16qi)__a, (__v16qi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epu8(__m128i __a,
+ __m128i __b) {
+ return (__m128i)__builtin_elementwise_sub_sat((__v16qu)__a, (__v16qu)__b);
}
/// Subtracts corresponding 16-bit unsigned integer values in the input
@@ -2721,10 +2591,9 @@ _mm_subs_epu8(__m128i __a, __m128i __b)
/// A 128-bit integer vector containing the subtrahends.
/// \returns A 128-bit integer vector containing the unsigned integer
/// differences of the values in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_subs_epu16(__m128i __a, __m128i __b)
-{
- return (__m128i)__builtin_ia32_psubusw128((__v8hi)__a, (__v8hi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epu16(__m128i __a,
+ __m128i __b) {
+ return (__m128i)__builtin_elementwise_sub_sat((__v8hu)__a, (__v8hu)__b);
}
/// Performs a bitwise AND of two 128-bit integer vectors.
@@ -2739,9 +2608,8 @@ _mm_subs_epu16(__m128i __a, __m128i __b)
/// A 128-bit integer vector containing one of the source operands.
/// \returns A 128-bit integer vector containing the bitwise AND of the values
/// in both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_and_si128(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_and_si128(__m128i __a,
+ __m128i __b) {
return (__m128i)((__v2du)__a & (__v2du)__b);
}
@@ -2759,9 +2627,8 @@ _mm_and_si128(__m128i __a, __m128i __b)
/// A 128-bit vector containing the right source operand.
/// \returns A 128-bit integer vector containing the bitwise AND of the one's
/// complement of the first operand and the values in the second operand.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_andnot_si128(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_andnot_si128(__m128i __a,
+ __m128i __b) {
return (__m128i)(~(__v2du)__a & (__v2du)__b);
}
/// Performs a bitwise OR of two 128-bit integer vectors.
@@ -2776,9 +2643,8 @@ _mm_andnot_si128(__m128i __a, __m128i __b)
/// A 128-bit integer vector containing one of the source operands.
/// \returns A 128-bit integer vector containing the bitwise OR of the values
/// in both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_or_si128(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_or_si128(__m128i __a,
+ __m128i __b) {
return (__m128i)((__v2du)__a | (__v2du)__b);
}
@@ -2794,9 +2660,8 @@ _mm_or_si128(__m128i __a, __m128i __b)
/// A 128-bit integer vector containing one of the source operands.
/// \returns A 128-bit integer vector containing the bitwise exclusive OR of the
/// values in both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_xor_si128(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_xor_si128(__m128i __a,
+ __m128i __b) {
return (__m128i)((__v2du)__a ^ (__v2du)__b);
}
@@ -2817,11 +2682,13 @@ _mm_xor_si128(__m128i __a, __m128i __b)
/// An immediate value specifying the number of bytes to left-shift operand
/// \a a.
/// \returns A 128-bit integer vector containing the left-shifted value.
-#define _mm_slli_si128(a, imm) \
- (__m128i)__builtin_ia32_pslldqi128_byteshift((__v2di)(__m128i)(a), (int)(imm))
+#define _mm_slli_si128(a, imm) \
+ ((__m128i)__builtin_ia32_pslldqi128_byteshift((__v2di)(__m128i)(a), \
+ (int)(imm)))
-#define _mm_bslli_si128(a, imm) \
- (__m128i)__builtin_ia32_pslldqi128_byteshift((__v2di)(__m128i)(a), (int)(imm))
+#define _mm_bslli_si128(a, imm) \
+ ((__m128i)__builtin_ia32_pslldqi128_byteshift((__v2di)(__m128i)(a), \
+ (int)(imm)))
/// Left-shifts each 16-bit value in the 128-bit integer vector operand
/// by the specified number of bits. Low-order bits are cleared.
@@ -2836,9 +2703,8 @@ _mm_xor_si128(__m128i __a, __m128i __b)
/// An integer value specifying the number of bits to left-shift each value
/// in operand \a __a.
/// \returns A 128-bit integer vector containing the left-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_slli_epi16(__m128i __a, int __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_slli_epi16(__m128i __a,
+ int __count) {
return (__m128i)__builtin_ia32_psllwi128((__v8hi)__a, __count);
}
@@ -2855,9 +2721,8 @@ _mm_slli_epi16(__m128i __a, int __count)
/// A 128-bit integer vector in which bits [63:0] specify the number of bits
/// to left-shift each value in operand \a __a.
/// \returns A 128-bit integer vector containing the left-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sll_epi16(__m128i __a, __m128i __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sll_epi16(__m128i __a,
+ __m128i __count) {
return (__m128i)__builtin_ia32_psllw128((__v8hi)__a, (__v8hi)__count);
}
@@ -2874,9 +2739,8 @@ _mm_sll_epi16(__m128i __a, __m128i __count)
/// An integer value specifying the number of bits to left-shift each value
/// in operand \a __a.
/// \returns A 128-bit integer vector containing the left-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_slli_epi32(__m128i __a, int __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_slli_epi32(__m128i __a,
+ int __count) {
return (__m128i)__builtin_ia32_pslldi128((__v4si)__a, __count);
}
@@ -2893,9 +2757,8 @@ _mm_slli_epi32(__m128i __a, int __count)
/// A 128-bit integer vector in which bits [63:0] specify the number of bits
/// to left-shift each value in operand \a __a.
/// \returns A 128-bit integer vector containing the left-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sll_epi32(__m128i __a, __m128i __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sll_epi32(__m128i __a,
+ __m128i __count) {
return (__m128i)__builtin_ia32_pslld128((__v4si)__a, (__v4si)__count);
}
@@ -2912,9 +2775,8 @@ _mm_sll_epi32(__m128i __a, __m128i __count)
/// An integer value specifying the number of bits to left-shift each value
/// in operand \a __a.
/// \returns A 128-bit integer vector containing the left-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_slli_epi64(__m128i __a, int __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_slli_epi64(__m128i __a,
+ int __count) {
return __builtin_ia32_psllqi128((__v2di)__a, __count);
}
@@ -2931,9 +2793,8 @@ _mm_slli_epi64(__m128i __a, int __count)
/// A 128-bit integer vector in which bits [63:0] specify the number of bits
/// to left-shift each value in operand \a __a.
/// \returns A 128-bit integer vector containing the left-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sll_epi64(__m128i __a, __m128i __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sll_epi64(__m128i __a,
+ __m128i __count) {
return __builtin_ia32_psllq128((__v2di)__a, (__v2di)__count);
}
@@ -2951,9 +2812,8 @@ _mm_sll_epi64(__m128i __a, __m128i __count)
/// An integer value specifying the number of bits to right-shift each value
/// in operand \a __a.
/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srai_epi16(__m128i __a, int __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srai_epi16(__m128i __a,
+ int __count) {
return (__m128i)__builtin_ia32_psrawi128((__v8hi)__a, __count);
}
@@ -2971,9 +2831,8 @@ _mm_srai_epi16(__m128i __a, int __count)
/// A 128-bit integer vector in which bits [63:0] specify the number of bits
/// to right-shift each value in operand \a __a.
/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sra_epi16(__m128i __a, __m128i __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sra_epi16(__m128i __a,
+ __m128i __count) {
return (__m128i)__builtin_ia32_psraw128((__v8hi)__a, (__v8hi)__count);
}
@@ -2991,9 +2850,8 @@ _mm_sra_epi16(__m128i __a, __m128i __count)
/// An integer value specifying the number of bits to right-shift each value
/// in operand \a __a.
/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srai_epi32(__m128i __a, int __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srai_epi32(__m128i __a,
+ int __count) {
return (__m128i)__builtin_ia32_psradi128((__v4si)__a, __count);
}
@@ -3011,9 +2869,8 @@ _mm_srai_epi32(__m128i __a, int __count)
/// A 128-bit integer vector in which bits [63:0] specify the number of bits
/// to right-shift each value in operand \a __a.
/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sra_epi32(__m128i __a, __m128i __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sra_epi32(__m128i __a,
+ __m128i __count) {
return (__m128i)__builtin_ia32_psrad128((__v4si)__a, (__v4si)__count);
}
@@ -3034,11 +2891,13 @@ _mm_sra_epi32(__m128i __a, __m128i __count)
/// An immediate value specifying the number of bytes to right-shift operand
/// \a a.
/// \returns A 128-bit integer vector containing the right-shifted value.
-#define _mm_srli_si128(a, imm) \
- (__m128i)__builtin_ia32_psrldqi128_byteshift((__v2di)(__m128i)(a), (int)(imm))
+#define _mm_srli_si128(a, imm) \
+ ((__m128i)__builtin_ia32_psrldqi128_byteshift((__v2di)(__m128i)(a), \
+ (int)(imm)))
-#define _mm_bsrli_si128(a, imm) \
- (__m128i)__builtin_ia32_psrldqi128_byteshift((__v2di)(__m128i)(a), (int)(imm))
+#define _mm_bsrli_si128(a, imm) \
+ ((__m128i)__builtin_ia32_psrldqi128_byteshift((__v2di)(__m128i)(a), \
+ (int)(imm)))
/// Right-shifts each of 16-bit values in the 128-bit integer vector
/// operand by the specified number of bits. High-order bits are cleared.
@@ -3053,9 +2912,8 @@ _mm_sra_epi32(__m128i __a, __m128i __count)
/// An integer value specifying the number of bits to right-shift each value
/// in operand \a __a.
/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srli_epi16(__m128i __a, int __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srli_epi16(__m128i __a,
+ int __count) {
return (__m128i)__builtin_ia32_psrlwi128((__v8hi)__a, __count);
}
@@ -3072,9 +2930,8 @@ _mm_srli_epi16(__m128i __a, int __count)
/// A 128-bit integer vector in which bits [63:0] specify the number of bits
/// to right-shift each value in operand \a __a.
/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srl_epi16(__m128i __a, __m128i __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srl_epi16(__m128i __a,
+ __m128i __count) {
return (__m128i)__builtin_ia32_psrlw128((__v8hi)__a, (__v8hi)__count);
}
@@ -3091,9 +2948,8 @@ _mm_srl_epi16(__m128i __a, __m128i __count)
/// An integer value specifying the number of bits to right-shift each value
/// in operand \a __a.
/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srli_epi32(__m128i __a, int __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srli_epi32(__m128i __a,
+ int __count) {
return (__m128i)__builtin_ia32_psrldi128((__v4si)__a, __count);
}
@@ -3110,9 +2966,8 @@ _mm_srli_epi32(__m128i __a, int __count)
/// A 128-bit integer vector in which bits [63:0] specify the number of bits
/// to right-shift each value in operand \a __a.
/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srl_epi32(__m128i __a, __m128i __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srl_epi32(__m128i __a,
+ __m128i __count) {
return (__m128i)__builtin_ia32_psrld128((__v4si)__a, (__v4si)__count);
}
@@ -3129,9 +2984,8 @@ _mm_srl_epi32(__m128i __a, __m128i __count)
/// An integer value specifying the number of bits to right-shift each value
/// in operand \a __a.
/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srli_epi64(__m128i __a, int __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srli_epi64(__m128i __a,
+ int __count) {
return __builtin_ia32_psrlqi128((__v2di)__a, __count);
}
@@ -3148,9 +3002,8 @@ _mm_srli_epi64(__m128i __a, int __count)
/// A 128-bit integer vector in which bits [63:0] specify the number of bits
/// to right-shift each value in operand \a __a.
/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srl_epi64(__m128i __a, __m128i __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srl_epi64(__m128i __a,
+ __m128i __count) {
return __builtin_ia32_psrlq128((__v2di)__a, (__v2di)__count);
}
@@ -3167,9 +3020,8 @@ _mm_srl_epi64(__m128i __a, __m128i __count)
/// \param __b
/// A 128-bit integer vector.
/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpeq_epi8(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi8(__m128i __a,
+ __m128i __b) {
return (__m128i)((__v16qi)__a == (__v16qi)__b);
}
@@ -3186,9 +3038,8 @@ _mm_cmpeq_epi8(__m128i __a, __m128i __b)
/// \param __b
/// A 128-bit integer vector.
/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpeq_epi16(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi16(__m128i __a,
+ __m128i __b) {
return (__m128i)((__v8hi)__a == (__v8hi)__b);
}
@@ -3205,9 +3056,8 @@ _mm_cmpeq_epi16(__m128i __a, __m128i __b)
/// \param __b
/// A 128-bit integer vector.
/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpeq_epi32(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi32(__m128i __a,
+ __m128i __b) {
return (__m128i)((__v4si)__a == (__v4si)__b);
}
@@ -3225,9 +3075,8 @@ _mm_cmpeq_epi32(__m128i __a, __m128i __b)
/// \param __b
/// A 128-bit integer vector.
/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpgt_epi8(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi8(__m128i __a,
+ __m128i __b) {
/* This function always performs a signed comparison, but __v16qi is a char
which may be signed or unsigned, so use __v16qs. */
return (__m128i)((__v16qs)__a > (__v16qs)__b);
@@ -3248,9 +3097,8 @@ _mm_cmpgt_epi8(__m128i __a, __m128i __b)
/// \param __b
/// A 128-bit integer vector.
/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpgt_epi16(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi16(__m128i __a,
+ __m128i __b) {
return (__m128i)((__v8hi)__a > (__v8hi)__b);
}
@@ -3269,9 +3117,8 @@ _mm_cmpgt_epi16(__m128i __a, __m128i __b)
/// \param __b
/// A 128-bit integer vector.
/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpgt_epi32(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi32(__m128i __a,
+ __m128i __b) {
return (__m128i)((__v4si)__a > (__v4si)__b);
}
@@ -3290,9 +3137,8 @@ _mm_cmpgt_epi32(__m128i __a, __m128i __b)
/// \param __b
/// A 128-bit integer vector.
/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmplt_epi8(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmplt_epi8(__m128i __a,
+ __m128i __b) {
return _mm_cmpgt_epi8(__b, __a);
}
@@ -3311,9 +3157,8 @@ _mm_cmplt_epi8(__m128i __a, __m128i __b)
/// \param __b
/// A 128-bit integer vector.
/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmplt_epi16(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmplt_epi16(__m128i __a,
+ __m128i __b) {
return _mm_cmpgt_epi16(__b, __a);
}
@@ -3332,9 +3177,8 @@ _mm_cmplt_epi16(__m128i __a, __m128i __b)
/// \param __b
/// A 128-bit integer vector.
/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmplt_epi32(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmplt_epi32(__m128i __a,
+ __m128i __b) {
return _mm_cmpgt_epi32(__b, __a);
}
@@ -3356,9 +3200,8 @@ _mm_cmplt_epi32(__m128i __a, __m128i __b)
/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
/// converted value of the second operand. The upper 64 bits are copied from
/// the upper 64 bits of the first operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cvtsi64_sd(__m128d __a, long long __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtsi64_sd(__m128d __a,
+ long long __b) {
__a[0] = __b;
return __a;
}
@@ -3374,9 +3217,7 @@ _mm_cvtsi64_sd(__m128d __a, long long __b)
/// A 128-bit vector of [2 x double]. The lower 64 bits are used in the
/// conversion.
/// \returns A 64-bit signed integer containing the converted value.
-static __inline__ long long __DEFAULT_FN_ATTRS
-_mm_cvtsd_si64(__m128d __a)
-{
+static __inline__ long long __DEFAULT_FN_ATTRS _mm_cvtsd_si64(__m128d __a) {
return __builtin_ia32_cvtsd2si64((__v2df)__a);
}
@@ -3392,9 +3233,7 @@ _mm_cvtsd_si64(__m128d __a)
/// A 128-bit vector of [2 x double]. The lower 64 bits are used in the
/// conversion.
/// \returns A 64-bit signed integer containing the converted value.
-static __inline__ long long __DEFAULT_FN_ATTRS
-_mm_cvttsd_si64(__m128d __a)
-{
+static __inline__ long long __DEFAULT_FN_ATTRS _mm_cvttsd_si64(__m128d __a) {
return __builtin_ia32_cvttsd2si64((__v2df)__a);
}
#endif
@@ -3408,10 +3247,8 @@ _mm_cvttsd_si64(__m128d __a)
/// \param __a
/// A 128-bit integer vector.
/// \returns A 128-bit vector of [4 x float] containing the converted values.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cvtepi32_ps(__m128i __a)
-{
- return (__m128)__builtin_convertvector((__v4si)__a, __v4sf);
+static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cvtepi32_ps(__m128i __a) {
+ return (__m128) __builtin_convertvector((__v4si)__a, __v4sf);
}
/// Converts a vector of [4 x float] into a vector of [4 x i32].
@@ -3424,9 +3261,7 @@ _mm_cvtepi32_ps(__m128i __a)
/// A 128-bit vector of [4 x float].
/// \returns A 128-bit integer vector of [4 x i32] containing the converted
/// values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtps_epi32(__m128 __a)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtps_epi32(__m128 __a) {
return (__m128i)__builtin_ia32_cvtps2dq((__v4sf)__a);
}
@@ -3441,9 +3276,7 @@ _mm_cvtps_epi32(__m128 __a)
/// \param __a
/// A 128-bit vector of [4 x float].
/// \returns A 128-bit vector of [4 x i32] containing the converted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvttps_epi32(__m128 __a)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvttps_epi32(__m128 __a) {
return (__m128i)__builtin_ia32_cvttps2dq((__v4sf)__a);
}
@@ -3457,29 +3290,24 @@ _mm_cvttps_epi32(__m128 __a)
/// \param __a
/// A 32-bit signed integer operand.
/// \returns A 128-bit vector of [4 x i32].
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtsi32_si128(int __a)
-{
- return __extension__ (__m128i)(__v4si){ __a, 0, 0, 0 };
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtsi32_si128(int __a) {
+ return __extension__(__m128i)(__v4si){__a, 0, 0, 0};
}
-#ifdef __x86_64__
/// Returns a vector of [2 x i64] where the lower element is the input
/// operand and the upper element is zero.
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction.
+/// This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction
+/// in 64-bit mode.
///
/// \param __a
/// A 64-bit signed integer operand containing the value to be converted.
/// \returns A 128-bit vector of [2 x i64] containing the converted value.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtsi64_si128(long long __a)
-{
- return __extension__ (__m128i)(__v2di){ __a, 0 };
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtsi64_si128(long long __a) {
+ return __extension__(__m128i)(__v2di){__a, 0};
}
-#endif
/// Moves the least significant 32 bits of a vector of [4 x i32] to a
/// 32-bit signed integer value.
@@ -3492,14 +3320,11 @@ _mm_cvtsi64_si128(long long __a)
/// A vector of [4 x i32]. The least significant 32 bits are moved to the
/// destination.
/// \returns A 32-bit signed integer containing the moved value.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_cvtsi128_si32(__m128i __a)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_cvtsi128_si32(__m128i __a) {
__v4si __b = (__v4si)__a;
return __b[0];
}
-#ifdef __x86_64__
/// Moves the least significant 64 bits of a vector of [2 x i64] to a
/// 64-bit signed integer value.
///
@@ -3511,12 +3336,9 @@ _mm_cvtsi128_si32(__m128i __a)
/// A vector of [2 x i64]. The least significant 64 bits are moved to the
/// destination.
/// \returns A 64-bit signed integer containing the moved value.
-static __inline__ long long __DEFAULT_FN_ATTRS
-_mm_cvtsi128_si64(__m128i __a)
-{
+static __inline__ long long __DEFAULT_FN_ATTRS _mm_cvtsi128_si64(__m128i __a) {
return __a[0];
}
-#endif
/// Moves packed integer values from an aligned 128-bit memory location
/// to elements in a 128-bit integer vector.
@@ -3529,8 +3351,7 @@ _mm_cvtsi128_si64(__m128i __a)
/// An aligned pointer to a memory location containing integer values.
/// \returns A 128-bit integer vector containing the moved values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_load_si128(__m128i const *__p)
-{
+_mm_load_si128(__m128i const *__p) {
return *__p;
}
@@ -3545,12 +3366,11 @@ _mm_load_si128(__m128i const *__p)
/// A pointer to a memory location containing integer values.
/// \returns A 128-bit integer vector containing the moved values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_loadu_si128(__m128i_u const *__p)
-{
+_mm_loadu_si128(__m128i_u const *__p) {
struct __loadu_si128 {
__m128i_u __v;
} __attribute__((__packed__, __may_alias__));
- return ((const struct __loadu_si128*)__p)->__v;
+ return ((const struct __loadu_si128 *)__p)->__v;
}
/// Returns a vector of [2 x i64] where the lower element is taken from
@@ -3566,12 +3386,12 @@ _mm_loadu_si128(__m128i_u const *__p)
/// \returns A 128-bit vector of [2 x i64]. The lower order bits contain the
/// moved value. The higher order bits are cleared.
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_loadl_epi64(__m128i_u const *__p)
-{
+_mm_loadl_epi64(__m128i_u const *__p) {
struct __mm_loadl_epi64_struct {
long long __u;
} __attribute__((__packed__, __may_alias__));
- return __extension__ (__m128i) { ((const struct __mm_loadl_epi64_struct*)__p)->__u, 0};
+ return __extension__(__m128i){
+ ((const struct __mm_loadl_epi64_struct *)__p)->__u, 0};
}
/// Generates a 128-bit vector of [4 x i32] with unspecified content.
@@ -3583,9 +3403,7 @@ _mm_loadl_epi64(__m128i_u const *__p)
/// This intrinsic has no corresponding instruction.
///
/// \returns A 128-bit vector of [4 x i32] with unspecified content.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_undefined_si128(void)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_undefined_si128(void) {
return (__m128i)__builtin_ia32_undef128();
}
@@ -3605,10 +3423,9 @@ _mm_undefined_si128(void)
/// destination vector of [2 x i64].
/// \returns An initialized 128-bit vector of [2 x i64] containing the values
/// provided in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set_epi64x(long long __q1, long long __q0)
-{
- return __extension__ (__m128i)(__v2di){ __q0, __q1 };
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set_epi64x(long long __q1,
+ long long __q0) {
+ return __extension__(__m128i)(__v2di){__q0, __q1};
}
/// Initializes both 64-bit values in a 128-bit vector of [2 x i64] with
@@ -3627,9 +3444,8 @@ _mm_set_epi64x(long long __q1, long long __q0)
/// destination vector of [2 x i64].
/// \returns An initialized 128-bit vector of [2 x i64] containing the values
/// provided in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set_epi64(__m64 __q1, __m64 __q0)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set_epi64(__m64 __q1,
+ __m64 __q0) {
return _mm_set_epi64x((long long)__q1, (long long)__q0);
}
@@ -3655,10 +3471,9 @@ _mm_set_epi64(__m64 __q1, __m64 __q0)
/// vector.
/// \returns An initialized 128-bit vector of [4 x i32] containing the values
/// provided in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set_epi32(int __i3, int __i2, int __i1, int __i0)
-{
- return __extension__ (__m128i)(__v4si){ __i0, __i1, __i2, __i3};
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set_epi32(int __i3, int __i2,
+ int __i1, int __i0) {
+ return __extension__(__m128i)(__v4si){__i0, __i1, __i2, __i3};
}
/// Initializes the 16-bit values in a 128-bit vector of [8 x i16] with
@@ -3696,9 +3511,10 @@ _mm_set_epi32(int __i3, int __i2, int __i1, int __i0)
/// \returns An initialized 128-bit vector of [8 x i16] containing the values
/// provided in the operands.
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set_epi16(short __w7, short __w6, short __w5, short __w4, short __w3, short __w2, short __w1, short __w0)
-{
- return __extension__ (__m128i)(__v8hi){ __w0, __w1, __w2, __w3, __w4, __w5, __w6, __w7 };
+_mm_set_epi16(short __w7, short __w6, short __w5, short __w4, short __w3,
+ short __w2, short __w1, short __w0) {
+ return __extension__(__m128i)(__v8hi){__w0, __w1, __w2, __w3,
+ __w4, __w5, __w6, __w7};
}
/// Initializes the 8-bit values in a 128-bit vector of [16 x i8] with
@@ -3744,9 +3560,12 @@ _mm_set_epi16(short __w7, short __w6, short __w5, short __w4, short __w3, short
/// \returns An initialized 128-bit vector of [16 x i8] containing the values
/// provided in the operands.
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set_epi8(char __b15, char __b14, char __b13, char __b12, char __b11, char __b10, char __b9, char __b8, char __b7, char __b6, char __b5, char __b4, char __b3, char __b2, char __b1, char __b0)
-{
- return __extension__ (__m128i)(__v16qi){ __b0, __b1, __b2, __b3, __b4, __b5, __b6, __b7, __b8, __b9, __b10, __b11, __b12, __b13, __b14, __b15 };
+_mm_set_epi8(char __b15, char __b14, char __b13, char __b12, char __b11,
+ char __b10, char __b9, char __b8, char __b7, char __b6, char __b5,
+ char __b4, char __b3, char __b2, char __b1, char __b0) {
+ return __extension__(__m128i)(__v16qi){
+ __b0, __b1, __b2, __b3, __b4, __b5, __b6, __b7,
+ __b8, __b9, __b10, __b11, __b12, __b13, __b14, __b15};
}
/// Initializes both values in a 128-bit integer vector with the
@@ -3762,9 +3581,7 @@ _mm_set_epi8(char __b15, char __b14, char __b13, char __b12, char __b11, char __
/// vector.
/// \returns An initialized 128-bit integer vector of [2 x i64] with both
/// elements containing the value provided in the operand.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set1_epi64x(long long __q)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set1_epi64x(long long __q) {
return _mm_set_epi64x(__q, __q);
}
@@ -3781,9 +3598,7 @@ _mm_set1_epi64x(long long __q)
/// vector.
/// \returns An initialized 128-bit vector of [2 x i64] with all elements
/// containing the value provided in the operand.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set1_epi64(__m64 __q)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set1_epi64(__m64 __q) {
return _mm_set_epi64(__q, __q);
}
@@ -3800,9 +3615,7 @@ _mm_set1_epi64(__m64 __q)
/// vector.
/// \returns An initialized 128-bit vector of [4 x i32] with all elements
/// containing the value provided in the operand.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set1_epi32(int __i)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set1_epi32(int __i) {
return _mm_set_epi32(__i, __i, __i, __i);
}
@@ -3819,9 +3632,7 @@ _mm_set1_epi32(int __i)
/// vector.
/// \returns An initialized 128-bit vector of [8 x i16] with all elements
/// containing the value provided in the operand.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set1_epi16(short __w)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set1_epi16(short __w) {
return _mm_set_epi16(__w, __w, __w, __w, __w, __w, __w, __w);
}
@@ -3838,10 +3649,9 @@ _mm_set1_epi16(short __w)
/// vector.
/// \returns An initialized 128-bit vector of [16 x i8] with all elements
/// containing the value provided in the operand.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set1_epi8(char __b)
-{
- return _mm_set_epi8(__b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set1_epi8(char __b) {
+ return _mm_set_epi8(__b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b,
+ __b, __b, __b, __b, __b);
}
/// Constructs a 128-bit integer vector, initialized in reverse order
@@ -3858,9 +3668,8 @@ _mm_set1_epi8(char __b)
/// A 64-bit integral value used to initialize the upper 64 bits of the
/// result.
/// \returns An initialized 128-bit integer vector.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_setr_epi64(__m64 __q0, __m64 __q1)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_setr_epi64(__m64 __q0,
+ __m64 __q1) {
return _mm_set_epi64(__q1, __q0);
}
@@ -3881,9 +3690,9 @@ _mm_setr_epi64(__m64 __q0, __m64 __q1)
/// \param __i3
/// A 32-bit integral value used to initialize bits [127:96] of the result.
/// \returns An initialized 128-bit integer vector.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_setr_epi32(int __i0, int __i1, int __i2, int __i3)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_setr_epi32(int __i0, int __i1,
+ int __i2,
+ int __i3) {
return _mm_set_epi32(__i3, __i2, __i1, __i0);
}
@@ -3913,8 +3722,8 @@ _mm_setr_epi32(int __i0, int __i1, int __i2, int __i3)
/// A 16-bit integral value used to initialize bits [127:112] of the result.
/// \returns An initialized 128-bit integer vector.
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_setr_epi16(short __w0, short __w1, short __w2, short __w3, short __w4, short __w5, short __w6, short __w7)
-{
+_mm_setr_epi16(short __w0, short __w1, short __w2, short __w3, short __w4,
+ short __w5, short __w6, short __w7) {
return _mm_set_epi16(__w7, __w6, __w5, __w4, __w3, __w2, __w1, __w0);
}
@@ -3960,9 +3769,11 @@ _mm_setr_epi16(short __w0, short __w1, short __w2, short __w3, short __w4, short
/// An 8-bit integral value used to initialize bits [127:120] of the result.
/// \returns An initialized 128-bit integer vector.
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_setr_epi8(char __b0, char __b1, char __b2, char __b3, char __b4, char __b5, char __b6, char __b7, char __b8, char __b9, char __b10, char __b11, char __b12, char __b13, char __b14, char __b15)
-{
- return _mm_set_epi8(__b15, __b14, __b13, __b12, __b11, __b10, __b9, __b8, __b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0);
+_mm_setr_epi8(char __b0, char __b1, char __b2, char __b3, char __b4, char __b5,
+ char __b6, char __b7, char __b8, char __b9, char __b10,
+ char __b11, char __b12, char __b13, char __b14, char __b15) {
+ return _mm_set_epi8(__b15, __b14, __b13, __b12, __b11, __b10, __b9, __b8,
+ __b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0);
}
/// Creates a 128-bit integer vector initialized to zero.
@@ -3973,10 +3784,8 @@ _mm_setr_epi8(char __b0, char __b1, char __b2, char __b3, char __b4, char __b5,
///
/// \returns An initialized 128-bit integer vector with all elements set to
/// zero.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_setzero_si128(void)
-{
- return __extension__ (__m128i)(__v2di){ 0LL, 0LL };
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_setzero_si128(void) {
+ return __extension__(__m128i)(__v2di){0LL, 0LL};
}
/// Stores a 128-bit integer vector to a memory location aligned on a
@@ -3991,9 +3800,8 @@ _mm_setzero_si128(void)
/// values.
/// \param __b
/// A 128-bit integer vector containing the values to be moved.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_store_si128(__m128i *__p, __m128i __b)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_store_si128(__m128i *__p,
+ __m128i __b) {
*__p = __b;
}
@@ -4007,13 +3815,12 @@ _mm_store_si128(__m128i *__p, __m128i __b)
/// A pointer to a memory location that will receive the integer values.
/// \param __b
/// A 128-bit integer vector containing the values to be moved.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeu_si128(__m128i_u *__p, __m128i __b)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_si128(__m128i_u *__p,
+ __m128i __b) {
struct __storeu_si128 {
__m128i_u __v;
} __attribute__((__packed__, __may_alias__));
- ((struct __storeu_si128*)__p)->__v = __b;
+ ((struct __storeu_si128 *)__p)->__v = __b;
}
/// Stores a 64-bit integer value from the low element of a 128-bit integer
@@ -4028,13 +3835,12 @@ _mm_storeu_si128(__m128i_u *__p, __m128i __b)
/// location does not have to be aligned.
/// \param __b
/// A 128-bit integer vector containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeu_si64(void *__p, __m128i __b)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_si64(void *__p,
+ __m128i __b) {
struct __storeu_si64 {
long long __v;
} __attribute__((__packed__, __may_alias__));
- ((struct __storeu_si64*)__p)->__v = ((__v2di)__b)[0];
+ ((struct __storeu_si64 *)__p)->__v = ((__v2di)__b)[0];
}
/// Stores a 32-bit integer value from the low element of a 128-bit integer
@@ -4049,13 +3855,12 @@ _mm_storeu_si64(void *__p, __m128i __b)
/// location does not have to be aligned.
/// \param __b
/// A 128-bit integer vector containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeu_si32(void *__p, __m128i __b)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_si32(void *__p,
+ __m128i __b) {
struct __storeu_si32 {
int __v;
} __attribute__((__packed__, __may_alias__));
- ((struct __storeu_si32*)__p)->__v = ((__v4si)__b)[0];
+ ((struct __storeu_si32 *)__p)->__v = ((__v4si)__b)[0];
}
/// Stores a 16-bit integer value from the low element of a 128-bit integer
@@ -4070,13 +3875,12 @@ _mm_storeu_si32(void *__p, __m128i __b)
/// location does not have to be aligned.
/// \param __b
/// A 128-bit integer vector containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeu_si16(void *__p, __m128i __b)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_si16(void *__p,
+ __m128i __b) {
struct __storeu_si16 {
short __v;
} __attribute__((__packed__, __may_alias__));
- ((struct __storeu_si16*)__p)->__v = ((__v8hi)__b)[0];
+ ((struct __storeu_si16 *)__p)->__v = ((__v8hi)__b)[0];
}
/// Moves bytes selected by the mask from the first operand to the
@@ -4100,9 +3904,9 @@ _mm_storeu_si16(void *__p, __m128i __b)
/// \param __p
/// A pointer to an unaligned 128-bit memory location where the specified
/// values are moved.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_maskmoveu_si128(__m128i __d, __m128i __n, char *__p)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_maskmoveu_si128(__m128i __d,
+ __m128i __n,
+ char *__p) {
__builtin_ia32_maskmovdqu((__v16qi)__d, (__v16qi)__n, __p);
}
@@ -4119,13 +3923,12 @@ _mm_maskmoveu_si128(__m128i __d, __m128i __n, char *__p)
/// \param __a
/// A 128-bit integer vector of [2 x i64]. The lower 64 bits contain the
/// value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storel_epi64(__m128i_u *__p, __m128i __a)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_storel_epi64(__m128i_u *__p,
+ __m128i __a) {
struct __mm_storel_epi64_struct {
long long __u;
} __attribute__((__packed__, __may_alias__));
- ((struct __mm_storel_epi64_struct*)__p)->__u = __a[0];
+ ((struct __mm_storel_epi64_struct *)__p)->__u = __a[0];
}
/// Stores a 128-bit floating point vector of [2 x double] to a 128-bit
@@ -4142,10 +3945,9 @@ _mm_storel_epi64(__m128i_u *__p, __m128i __a)
/// A pointer to the 128-bit aligned memory location used to store the value.
/// \param __a
/// A vector of [2 x double] containing the 64-bit values to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_stream_pd(double *__p, __m128d __a)
-{
- __builtin_nontemporal_store((__v2df)__a, (__v2df*)__p);
+static __inline__ void __DEFAULT_FN_ATTRS _mm_stream_pd(void *__p,
+ __m128d __a) {
+ __builtin_nontemporal_store((__v2df)__a, (__v2df *)__p);
}
/// Stores a 128-bit integer vector to a 128-bit aligned memory location.
@@ -4161,10 +3963,9 @@ _mm_stream_pd(double *__p, __m128d __a)
/// A pointer to the 128-bit aligned memory location used to store the value.
/// \param __a
/// A 128-bit integer vector containing the values to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_stream_si128(__m128i *__p, __m128i __a)
-{
- __builtin_nontemporal_store((__v2di)__a, (__v2di*)__p);
+static __inline__ void __DEFAULT_FN_ATTRS _mm_stream_si128(void *__p,
+ __m128i __a) {
+ __builtin_nontemporal_store((__v2di)__a, (__v2di *)__p);
}
/// Stores a 32-bit integer value in the specified memory location.
@@ -4180,10 +3981,10 @@ _mm_stream_si128(__m128i *__p, __m128i __a)
/// A pointer to the 32-bit memory location used to store the value.
/// \param __a
/// A 32-bit integer containing the value to be stored.
-static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("sse2")))
-_mm_stream_si32(int *__p, int __a)
-{
- __builtin_ia32_movnti(__p, __a);
+static __inline__ void
+ __attribute__((__always_inline__, __nodebug__, __target__("sse2")))
+ _mm_stream_si32(void *__p, int __a) {
+ __builtin_ia32_movnti((int *)__p, __a);
}
#ifdef __x86_64__
@@ -4200,10 +4001,10 @@ _mm_stream_si32(int *__p, int __a)
/// A pointer to the 64-bit memory location used to store the value.
/// \param __a
/// A 64-bit integer containing the value to be stored.
-static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("sse2")))
-_mm_stream_si64(long long *__p, long long __a)
-{
- __builtin_ia32_movnti64(__p, __a);
+static __inline__ void
+ __attribute__((__always_inline__, __nodebug__, __target__("sse2")))
+ _mm_stream_si64(void *__p, long long __a) {
+ __builtin_ia32_movnti64((long long *)__p, __a);
}
#endif
@@ -4221,7 +4022,7 @@ extern "C" {
/// \param __p
/// A pointer to the memory location used to identify the cache line to be
/// flushed.
-void _mm_clflush(void const * __p);
+void _mm_clflush(void const *__p);
/// Forces strong memory ordering (serialization) between load
/// instructions preceding this instruction and load instructions following
@@ -4271,9 +4072,8 @@ void _mm_mfence(void);
/// than 0x80 are saturated to 0x80. The converted [8 x i8] values are
/// written to the higher 64 bits of the result.
/// \returns A 128-bit vector of [16 x i8] containing the converted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_packs_epi16(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packs_epi16(__m128i __a,
+ __m128i __b) {
return (__m128i)__builtin_ia32_packsswb128((__v8hi)__a, (__v8hi)__b);
}
@@ -4299,9 +4099,8 @@ _mm_packs_epi16(__m128i __a, __m128i __b)
/// less than 0x8000 are saturated to 0x8000. The converted [4 x i16] values
/// are written to the higher 64 bits of the result.
/// \returns A 128-bit vector of [8 x i16] containing the converted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_packs_epi32(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packs_epi32(__m128i __a,
+ __m128i __b) {
return (__m128i)__builtin_ia32_packssdw128((__v4si)__a, (__v4si)__b);
}
@@ -4327,9 +4126,8 @@ _mm_packs_epi32(__m128i __a, __m128i __b)
/// than 0x00 are saturated to 0x00. The converted [8 x i8] values are
/// written to the higher 64 bits of the result.
/// \returns A 128-bit vector of [16 x i8] containing the converted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_packus_epi16(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packus_epi16(__m128i __a,
+ __m128i __b) {
return (__m128i)__builtin_ia32_packuswb128((__v8hi)__a, (__v8hi)__b);
}
@@ -4338,26 +4136,30 @@ _mm_packus_epi16(__m128i __a, __m128i __b)
///
/// \headerfile <x86intrin.h>
///
+/// \code
+/// __m128i _mm_extract_epi16(__m128i a, const int imm);
+/// \endcode
+///
/// This intrinsic corresponds to the <c> VPEXTRW / PEXTRW </c> instruction.
///
-/// \param __a
+/// \param a
/// A 128-bit integer vector.
-/// \param __imm
-/// An immediate value. Bits [2:0] selects values from \a __a to be assigned
+/// \param imm
+/// An immediate value. Bits [2:0] selects values from \a a to be assigned
/// to bits[15:0] of the result. \n
-/// 000: assign values from bits [15:0] of \a __a. \n
-/// 001: assign values from bits [31:16] of \a __a. \n
-/// 010: assign values from bits [47:32] of \a __a. \n
-/// 011: assign values from bits [63:48] of \a __a. \n
-/// 100: assign values from bits [79:64] of \a __a. \n
-/// 101: assign values from bits [95:80] of \a __a. \n
-/// 110: assign values from bits [111:96] of \a __a. \n
-/// 111: assign values from bits [127:112] of \a __a.
+/// 000: assign values from bits [15:0] of \a a. \n
+/// 001: assign values from bits [31:16] of \a a. \n
+/// 010: assign values from bits [47:32] of \a a. \n
+/// 011: assign values from bits [63:48] of \a a. \n
+/// 100: assign values from bits [79:64] of \a a. \n
+/// 101: assign values from bits [95:80] of \a a. \n
+/// 110: assign values from bits [111:96] of \a a. \n
+/// 111: assign values from bits [127:112] of \a a.
/// \returns An integer, whose lower 16 bits are selected from the 128-bit
/// integer vector parameter and the remaining bits are assigned zeros.
-#define _mm_extract_epi16(a, imm) \
- (int)(unsigned short)__builtin_ia32_vec_ext_v8hi((__v8hi)(__m128i)(a), \
- (int)(imm))
+#define _mm_extract_epi16(a, imm) \
+ ((int)(unsigned short)__builtin_ia32_vec_ext_v8hi((__v8hi)(__m128i)(a), \
+ (int)(imm)))
/// Constructs a 128-bit integer vector by first making a copy of the
/// 128-bit integer vector parameter, and then inserting the lower 16 bits
@@ -4366,22 +4168,26 @@ _mm_packus_epi16(__m128i __a, __m128i __b)
///
/// \headerfile <x86intrin.h>
///
+/// \code
+/// __m128i _mm_insert_epi16(__m128i a, int b, const int imm);
+/// \endcode
+///
/// This intrinsic corresponds to the <c> VPINSRW / PINSRW </c> instruction.
///
-/// \param __a
+/// \param a
/// A 128-bit integer vector of [8 x i16]. This vector is copied to the
/// result and then one of the eight elements in the result is replaced by
-/// the lower 16 bits of \a __b.
-/// \param __b
+/// the lower 16 bits of \a b.
+/// \param b
/// An integer. The lower 16 bits of this parameter are written to the
-/// result beginning at an offset specified by \a __imm.
-/// \param __imm
+/// result beginning at an offset specified by \a imm.
+/// \param imm
/// An immediate value specifying the bit offset in the result at which the
-/// lower 16 bits of \a __b are written.
+/// lower 16 bits of \a b are written.
/// \returns A 128-bit integer vector containing the constructed values.
-#define _mm_insert_epi16(a, b, imm) \
- (__m128i)__builtin_ia32_vec_set_v8hi((__v8hi)(__m128i)(a), (int)(b), \
- (int)(imm))
+#define _mm_insert_epi16(a, b, imm) \
+ ((__m128i)__builtin_ia32_vec_set_v8hi((__v8hi)(__m128i)(a), (int)(b), \
+ (int)(imm)))
/// Copies the values of the most significant bits from each 8-bit
/// element in a 128-bit integer vector of [16 x i8] to create a 16-bit mask
@@ -4395,9 +4201,7 @@ _mm_packus_epi16(__m128i __a, __m128i __b)
/// A 128-bit integer vector containing the values with bits to be extracted.
/// \returns The most significant bits from each 8-bit element in \a __a,
/// written to bits [15:0]. The other bits are assigned zeros.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_movemask_epi8(__m128i __a)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_movemask_epi8(__m128i __a) {
return __builtin_ia32_pmovmskb128((__v16qi)__a);
}
@@ -4427,10 +4231,13 @@ _mm_movemask_epi8(__m128i __a)
/// 00: assign values from bits [31:0] of \a a. \n
/// 01: assign values from bits [63:32] of \a a. \n
/// 10: assign values from bits [95:64] of \a a. \n
-/// 11: assign values from bits [127:96] of \a a.
+/// 11: assign values from bits [127:96] of \a a. \n
+/// Note: To generate a mask, you can use the \c _MM_SHUFFLE macro.
+/// <c>_MM_SHUFFLE(b6, b4, b2, b0)</c> can create an 8-bit mask of the form
+/// <c>[b6, b4, b2, b0]</c>.
/// \returns A 128-bit integer vector containing the shuffled values.
-#define _mm_shuffle_epi32(a, imm) \
- (__m128i)__builtin_ia32_pshufd((__v4si)(__m128i)(a), (int)(imm))
+#define _mm_shuffle_epi32(a, imm) \
+ ((__m128i)__builtin_ia32_pshufd((__v4si)(__m128i)(a), (int)(imm)))
/// Constructs a 128-bit integer vector by shuffling four lower 16-bit
/// elements of a 128-bit integer vector of [8 x i16], using the immediate
@@ -4458,9 +4265,12 @@ _mm_movemask_epi8(__m128i __a)
/// 01: assign values from bits [31:16] of \a a. \n
/// 10: assign values from bits [47:32] of \a a. \n
/// 11: assign values from bits [63:48] of \a a. \n
+/// Note: To generate a mask, you can use the \c _MM_SHUFFLE macro.
+/// <c>_MM_SHUFFLE(b6, b4, b2, b0)</c> can create an 8-bit mask of the form
+/// <c>[b6, b4, b2, b0]</c>.
/// \returns A 128-bit integer vector containing the shuffled values.
-#define _mm_shufflelo_epi16(a, imm) \
- (__m128i)__builtin_ia32_pshuflw((__v8hi)(__m128i)(a), (int)(imm))
+#define _mm_shufflelo_epi16(a, imm) \
+ ((__m128i)__builtin_ia32_pshuflw((__v8hi)(__m128i)(a), (int)(imm)))
/// Constructs a 128-bit integer vector by shuffling four upper 16-bit
/// elements of a 128-bit integer vector of [8 x i16], using the immediate
@@ -4488,9 +4298,12 @@ _mm_movemask_epi8(__m128i __a)
/// 01: assign values from bits [95:80] of \a a. \n
/// 10: assign values from bits [111:96] of \a a. \n
/// 11: assign values from bits [127:112] of \a a. \n
+/// Note: To generate a mask, you can use the \c _MM_SHUFFLE macro.
+/// <c>_MM_SHUFFLE(b6, b4, b2, b0)</c> can create an 8-bit mask of the form
+/// <c>[b6, b4, b2, b0]</c>.
/// \returns A 128-bit integer vector containing the shuffled values.
-#define _mm_shufflehi_epi16(a, imm) \
- (__m128i)__builtin_ia32_pshufhw((__v8hi)(__m128i)(a), (int)(imm))
+#define _mm_shufflehi_epi16(a, imm) \
+ ((__m128i)__builtin_ia32_pshufhw((__v8hi)(__m128i)(a), (int)(imm)))
/// Unpacks the high-order (index 8-15) values from two 128-bit vectors
/// of [16 x i8] and interleaves them into a 128-bit vector of [16 x i8].
@@ -4521,10 +4334,11 @@ _mm_movemask_epi8(__m128i __a)
/// Bits [119:112] are written to bits [111:104] of the result. \n
/// Bits [127:120] are written to bits [127:120] of the result.
/// \returns A 128-bit vector of [16 x i8] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpackhi_epi8(__m128i __a, __m128i __b)
-{
- return (__m128i)__builtin_shufflevector((__v16qi)__a, (__v16qi)__b, 8, 16+8, 9, 16+9, 10, 16+10, 11, 16+11, 12, 16+12, 13, 16+13, 14, 16+14, 15, 16+15);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi8(__m128i __a,
+ __m128i __b) {
+ return (__m128i)__builtin_shufflevector(
+ (__v16qi)__a, (__v16qi)__b, 8, 16 + 8, 9, 16 + 9, 10, 16 + 10, 11,
+ 16 + 11, 12, 16 + 12, 13, 16 + 13, 14, 16 + 14, 15, 16 + 15);
}
/// Unpacks the high-order (index 4-7) values from two 128-bit vectors of
@@ -4548,10 +4362,10 @@ _mm_unpackhi_epi8(__m128i __a, __m128i __b)
/// Bits [111:96] are written to bits [95:80] of the result. \n
/// Bits [127:112] are written to bits [127:112] of the result.
/// \returns A 128-bit vector of [8 x i16] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpackhi_epi16(__m128i __a, __m128i __b)
-{
- return (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi)__b, 4, 8+4, 5, 8+5, 6, 8+6, 7, 8+7);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi16(__m128i __a,
+ __m128i __b) {
+ return (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi)__b, 4, 8 + 4, 5,
+ 8 + 5, 6, 8 + 6, 7, 8 + 7);
}
/// Unpacks the high-order (index 2,3) values from two 128-bit vectors of
@@ -4571,10 +4385,10 @@ _mm_unpackhi_epi16(__m128i __a, __m128i __b)
/// Bits [95:64] are written to bits [64:32] of the destination. \n
/// Bits [127:96] are written to bits [127:96] of the destination.
/// \returns A 128-bit vector of [4 x i32] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpackhi_epi32(__m128i __a, __m128i __b)
-{
- return (__m128i)__builtin_shufflevector((__v4si)__a, (__v4si)__b, 2, 4+2, 3, 4+3);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi32(__m128i __a,
+ __m128i __b) {
+ return (__m128i)__builtin_shufflevector((__v4si)__a, (__v4si)__b, 2, 4 + 2, 3,
+ 4 + 3);
}
/// Unpacks the high-order 64-bit elements from two 128-bit vectors of
@@ -4592,10 +4406,9 @@ _mm_unpackhi_epi32(__m128i __a, __m128i __b)
/// A 128-bit vector of [2 x i64]. \n
/// Bits [127:64] are written to bits [127:64] of the destination.
/// \returns A 128-bit vector of [2 x i64] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpackhi_epi64(__m128i __a, __m128i __b)
-{
- return (__m128i)__builtin_shufflevector((__v2di)__a, (__v2di)__b, 1, 2+1);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi64(__m128i __a,
+ __m128i __b) {
+ return (__m128i)__builtin_shufflevector((__v2di)__a, (__v2di)__b, 1, 2 + 1);
}
/// Unpacks the low-order (index 0-7) values from two 128-bit vectors of
@@ -4627,10 +4440,11 @@ _mm_unpackhi_epi64(__m128i __a, __m128i __b)
/// Bits [55:48] are written to bits [111:104] of the result. \n
/// Bits [63:56] are written to bits [127:120] of the result.
/// \returns A 128-bit vector of [16 x i8] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpacklo_epi8(__m128i __a, __m128i __b)
-{
- return (__m128i)__builtin_shufflevector((__v16qi)__a, (__v16qi)__b, 0, 16+0, 1, 16+1, 2, 16+2, 3, 16+3, 4, 16+4, 5, 16+5, 6, 16+6, 7, 16+7);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpacklo_epi8(__m128i __a,
+ __m128i __b) {
+ return (__m128i)__builtin_shufflevector(
+ (__v16qi)__a, (__v16qi)__b, 0, 16 + 0, 1, 16 + 1, 2, 16 + 2, 3, 16 + 3, 4,
+ 16 + 4, 5, 16 + 5, 6, 16 + 6, 7, 16 + 7);
}
/// Unpacks the low-order (index 0-3) values from each of the two 128-bit
@@ -4655,10 +4469,10 @@ _mm_unpacklo_epi8(__m128i __a, __m128i __b)
/// Bits [47:32] are written to bits [95:80] of the result. \n
/// Bits [63:48] are written to bits [127:112] of the result.
/// \returns A 128-bit vector of [8 x i16] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpacklo_epi16(__m128i __a, __m128i __b)
-{
- return (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi)__b, 0, 8+0, 1, 8+1, 2, 8+2, 3, 8+3);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpacklo_epi16(__m128i __a,
+ __m128i __b) {
+ return (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi)__b, 0, 8 + 0, 1,
+ 8 + 1, 2, 8 + 2, 3, 8 + 3);
}
/// Unpacks the low-order (index 0,1) values from two 128-bit vectors of
@@ -4678,10 +4492,10 @@ _mm_unpacklo_epi16(__m128i __a, __m128i __b)
/// Bits [31:0] are written to bits [64:32] of the destination. \n
/// Bits [63:32] are written to bits [127:96] of the destination.
/// \returns A 128-bit vector of [4 x i32] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpacklo_epi32(__m128i __a, __m128i __b)
-{
- return (__m128i)__builtin_shufflevector((__v4si)__a, (__v4si)__b, 0, 4+0, 1, 4+1);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpacklo_epi32(__m128i __a,
+ __m128i __b) {
+ return (__m128i)__builtin_shufflevector((__v4si)__a, (__v4si)__b, 0, 4 + 0, 1,
+ 4 + 1);
}
/// Unpacks the low-order 64-bit elements from two 128-bit vectors of
@@ -4699,10 +4513,9 @@ _mm_unpacklo_epi32(__m128i __a, __m128i __b)
/// A 128-bit vector of [2 x i64]. \n
/// Bits [63:0] are written to bits [127:64] of the destination. \n
/// \returns A 128-bit vector of [2 x i64] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpacklo_epi64(__m128i __a, __m128i __b)
-{
- return (__m128i)__builtin_shufflevector((__v2di)__a, (__v2di)__b, 0, 2+0);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpacklo_epi64(__m128i __a,
+ __m128i __b) {
+ return (__m128i)__builtin_shufflevector((__v2di)__a, (__v2di)__b, 0, 2 + 0);
}
/// Returns the lower 64 bits of a 128-bit integer vector as a 64-bit
@@ -4716,9 +4529,7 @@ _mm_unpacklo_epi64(__m128i __a, __m128i __b)
/// A 128-bit integer vector operand. The lower 64 bits are moved to the
/// destination.
/// \returns A 64-bit integer containing the lower 64 bits of the parameter.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
-_mm_movepi64_pi64(__m128i __a)
-{
+static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_movepi64_pi64(__m128i __a) {
return (__m64)__a[0];
}
@@ -4733,10 +4544,8 @@ _mm_movepi64_pi64(__m128i __a)
/// A 64-bit value.
/// \returns A 128-bit integer vector. The lower 64 bits contain the value from
/// the operand. The upper 64 bits are assigned zeros.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_movpi64_epi64(__m64 __a)
-{
- return __extension__ (__m128i)(__v2di){ (long long)__a, 0 };
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_movpi64_epi64(__m64 __a) {
+ return __extension__(__m128i)(__v2di){(long long)__a, 0};
}
/// Moves the lower 64 bits of a 128-bit integer vector to a 128-bit
@@ -4751,9 +4560,7 @@ _mm_movpi64_epi64(__m64 __a)
/// destination.
/// \returns A 128-bit integer vector. The lower 64 bits contain the value from
/// the operand. The upper 64 bits are assigned zeros.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_move_epi64(__m128i __a)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_move_epi64(__m128i __a) {
return __builtin_shufflevector((__v2di)__a, _mm_setzero_si128(), 0, 2);
}
@@ -4772,10 +4579,9 @@ _mm_move_epi64(__m128i __a)
/// A 128-bit vector of [2 x double]. \n
/// Bits [127:64] are written to bits [127:64] of the destination.
/// \returns A 128-bit vector of [2 x double] containing the interleaved values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_unpackhi_pd(__m128d __a, __m128d __b)
-{
- return __builtin_shufflevector((__v2df)__a, (__v2df)__b, 1, 2+1);
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_unpackhi_pd(__m128d __a,
+ __m128d __b) {
+ return __builtin_shufflevector((__v2df)__a, (__v2df)__b, 1, 2 + 1);
}
/// Unpacks the low-order 64-bit elements from two 128-bit vectors
@@ -4793,10 +4599,9 @@ _mm_unpackhi_pd(__m128d __a, __m128d __b)
/// A 128-bit vector of [2 x double]. \n
/// Bits [63:0] are written to bits [127:64] of the destination.
/// \returns A 128-bit vector of [2 x double] containing the interleaved values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_unpacklo_pd(__m128d __a, __m128d __b)
-{
- return __builtin_shufflevector((__v2df)__a, (__v2df)__b, 0, 2+0);
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_unpacklo_pd(__m128d __a,
+ __m128d __b) {
+ return __builtin_shufflevector((__v2df)__a, (__v2df)__b, 0, 2 + 0);
}
/// Extracts the sign bits of the double-precision values in the 128-bit
@@ -4812,13 +4617,10 @@ _mm_unpacklo_pd(__m128d __a, __m128d __b)
/// be extracted.
/// \returns The sign bits from each of the double-precision elements in \a __a,
/// written to bits [1:0]. The remaining bits are assigned values of zero.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_movemask_pd(__m128d __a)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_movemask_pd(__m128d __a) {
return __builtin_ia32_movmskpd((__v2df)__a);
}
-
/// Constructs a 128-bit floating-point vector of [2 x double] from two
/// 128-bit vector parameters of [2 x double], using the immediate-value
/// parameter as a specifier.
@@ -4842,10 +4644,13 @@ _mm_movemask_pd(__m128d __a)
/// Bit[0] = 1: upper element of \a a copied to lower element of result. \n
/// Bit[1] = 0: lower element of \a b copied to upper element of result. \n
/// Bit[1] = 1: upper element of \a b copied to upper element of result. \n
+/// Note: To generate a mask, you can use the \c _MM_SHUFFLE2 macro.
+/// <c>_MM_SHUFFLE2(b1, b0)</c> can create a 2-bit mask of the form
+/// <c>[b1, b0]</c>.
/// \returns A 128-bit vector of [2 x double] containing the shuffled values.
-#define _mm_shuffle_pd(a, b, i) \
- (__m128d)__builtin_ia32_shufpd((__v2df)(__m128d)(a), (__v2df)(__m128d)(b), \
- (int)(i))
+#define _mm_shuffle_pd(a, b, i) \
+ ((__m128d)__builtin_ia32_shufpd((__v2df)(__m128d)(a), (__v2df)(__m128d)(b), \
+ (int)(i)))
/// Casts a 128-bit floating-point vector of [2 x double] into a 128-bit
/// floating-point vector of [4 x float].
@@ -4858,9 +4663,7 @@ _mm_movemask_pd(__m128d __a)
/// A 128-bit floating-point vector of [2 x double].
/// \returns A 128-bit floating-point vector of [4 x float] containing the same
/// bitwise pattern as the parameter.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_castpd_ps(__m128d __a)
-{
+static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_castpd_ps(__m128d __a) {
return (__m128)__a;
}
@@ -4875,9 +4678,7 @@ _mm_castpd_ps(__m128d __a)
/// A 128-bit floating-point vector of [2 x double].
/// \returns A 128-bit integer vector containing the same bitwise pattern as the
/// parameter.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_castpd_si128(__m128d __a)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_castpd_si128(__m128d __a) {
return (__m128i)__a;
}
@@ -4892,9 +4693,7 @@ _mm_castpd_si128(__m128d __a)
/// A 128-bit floating-point vector of [4 x float].
/// \returns A 128-bit floating-point vector of [2 x double] containing the same
/// bitwise pattern as the parameter.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_castps_pd(__m128 __a)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_castps_pd(__m128 __a) {
return (__m128d)__a;
}
@@ -4909,9 +4708,7 @@ _mm_castps_pd(__m128 __a)
/// A 128-bit floating-point vector of [4 x float].
/// \returns A 128-bit integer vector containing the same bitwise pattern as the
/// parameter.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_castps_si128(__m128 __a)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_castps_si128(__m128 __a) {
return (__m128i)__a;
}
@@ -4926,9 +4723,7 @@ _mm_castps_si128(__m128 __a)
/// A 128-bit integer vector.
/// \returns A 128-bit floating-point vector of [4 x float] containing the same
/// bitwise pattern as the parameter.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_castsi128_ps(__m128i __a)
-{
+static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_castsi128_ps(__m128i __a) {
return (__m128)__a;
}
@@ -4943,9 +4738,7 @@ _mm_castsi128_ps(__m128i __a)
/// A 128-bit integer vector.
/// \returns A 128-bit floating-point vector of [2 x double] containing the same
/// bitwise pattern as the parameter.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_castsi128_pd(__m128i __a)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_castsi128_pd(__m128i __a) {
return (__m128d)__a;
}
@@ -4970,12 +4763,13 @@ void _mm_pause(void);
#define _MM_SHUFFLE2(x, y) (((x) << 1) | (y))
-#define _MM_DENORMALS_ZERO_ON (0x0040U)
-#define _MM_DENORMALS_ZERO_OFF (0x0000U)
+#define _MM_DENORMALS_ZERO_ON (0x0040U)
+#define _MM_DENORMALS_ZERO_OFF (0x0000U)
#define _MM_DENORMALS_ZERO_MASK (0x0040U)
#define _MM_GET_DENORMALS_ZERO_MODE() (_mm_getcsr() & _MM_DENORMALS_ZERO_MASK)
-#define _MM_SET_DENORMALS_ZERO_MODE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_DENORMALS_ZERO_MASK) | (x)))
+#define _MM_SET_DENORMALS_ZERO_MODE(x) \
+ (_mm_setcsr((_mm_getcsr() & ~_MM_DENORMALS_ZERO_MASK) | (x)))
#endif /* __EMMINTRIN_H */
diff --git a/contrib/llvm-project/clang/lib/Headers/f16cintrin.h b/contrib/llvm-project/clang/lib/Headers/f16cintrin.h
index 109b604adae3..94a662c1d93a 100644
--- a/contrib/llvm-project/clang/lib/Headers/f16cintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/f16cintrin.h
@@ -65,9 +65,9 @@ _cvtsh_ss(unsigned short __a)
/// 011: Truncate \n
/// 1XX: Use MXCSR.RC for rounding
/// \returns The converted 16-bit half-precision float value.
-#define _cvtss_sh(a, imm) \
+#define _cvtss_sh(a, imm) __extension__ ({ \
(unsigned short)(((__v8hi)__builtin_ia32_vcvtps2ph((__v4sf){a, 0, 0, 0}, \
- (imm)))[0])
+ (imm)))[0]); })
/// Converts a 128-bit vector containing 32-bit float values into a
/// 128-bit vector containing 16-bit half-precision float values.
@@ -93,7 +93,7 @@ _cvtsh_ss(unsigned short __a)
/// values. The lower 64 bits are used to store the converted 16-bit
/// half-precision floating-point values.
#define _mm_cvtps_ph(a, imm) \
- (__m128i)__builtin_ia32_vcvtps2ph((__v4sf)(__m128)(a), (imm))
+ ((__m128i)__builtin_ia32_vcvtps2ph((__v4sf)(__m128)(a), (imm)))
/// Converts a 128-bit vector containing 16-bit half-precision float
/// values into a 128-bit vector containing 32-bit float values.
@@ -136,7 +136,7 @@ _mm_cvtph_ps(__m128i __a)
/// \returns A 128-bit vector containing the converted 16-bit half-precision
/// float values.
#define _mm256_cvtps_ph(a, imm) \
- (__m128i)__builtin_ia32_vcvtps2ph256((__v8sf)(__m256)(a), (imm))
+ ((__m128i)__builtin_ia32_vcvtps2ph256((__v8sf)(__m256)(a), (imm)))
/// Converts a 128-bit vector containing 16-bit half-precision float
/// values into a 256-bit vector of [8 x float].
diff --git a/contrib/llvm-project/clang/lib/Headers/float.h b/contrib/llvm-project/clang/lib/Headers/float.h
index ed610b24aa10..0e73bca0a2d6 100644
--- a/contrib/llvm-project/clang/lib/Headers/float.h
+++ b/contrib/llvm-project/clang/lib/Headers/float.h
@@ -14,10 +14,11 @@
* additional definitions provided for Windows.
* For more details see http://msdn.microsoft.com/en-us/library/y0ybw9fy.aspx
*
- * Also fall back on Darwin to allow additional definitions and
+ * Also fall back on Darwin and AIX to allow additional definitions and
* implementation-defined values.
*/
-#if (defined(__APPLE__) || (defined(__MINGW32__) || defined(_MSC_VER))) && \
+#if (defined(__APPLE__) || defined(__MINGW32__) || defined(_MSC_VER) || \
+ defined(_AIX)) && \
__STDC_HOSTED__ && __has_include_next(<float.h>)
/* Prior to Apple's 10.7 SDK, float.h SDK header used to apply an extra level
@@ -37,7 +38,10 @@
# undef FLT_MANT_DIG
# undef DBL_MANT_DIG
# undef LDBL_MANT_DIG
-# if __STDC_VERSION__ >= 199901L || !defined(__STRICT_ANSI__) || __cplusplus >= 201103L
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || \
+ !defined(__STRICT_ANSI__) || \
+ (defined(__cplusplus) && __cplusplus >= 201103L) || \
+ (__STDC_HOSTED__ && defined(_AIX) && defined(_ALL_SOURCE))
# undef DECIMAL_DIG
# endif
# undef FLT_DIG
@@ -64,7 +68,10 @@
# undef FLT_MIN
# undef DBL_MIN
# undef LDBL_MIN
-# if __STDC_VERSION__ >= 201112L || !defined(__STRICT_ANSI__) || __cplusplus >= 201703L
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) || \
+ !defined(__STRICT_ANSI__) || \
+ (defined(__cplusplus) && __cplusplus >= 201703L) || \
+ (__STDC_HOSTED__ && defined(_AIX) && defined(_ALL_SOURCE))
# undef FLT_TRUE_MIN
# undef DBL_TRUE_MIN
# undef LDBL_TRUE_MIN
@@ -79,7 +86,10 @@
/* Characteristics of floating point types, C99 5.2.4.2.2 */
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || \
+ (defined(__cplusplus) && __cplusplus >= 201103L)
#define FLT_EVAL_METHOD __FLT_EVAL_METHOD__
+#endif
#define FLT_ROUNDS (__builtin_flt_rounds())
#define FLT_RADIX __FLT_RADIX__
@@ -87,7 +97,10 @@
#define DBL_MANT_DIG __DBL_MANT_DIG__
#define LDBL_MANT_DIG __LDBL_MANT_DIG__
-#if __STDC_VERSION__ >= 199901L || !defined(__STRICT_ANSI__) || __cplusplus >= 201103L
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || \
+ !defined(__STRICT_ANSI__) || \
+ (defined(__cplusplus) && __cplusplus >= 201103L) || \
+ (__STDC_HOSTED__ && defined(_AIX) && defined(_ALL_SOURCE))
# define DECIMAL_DIG __DECIMAL_DIG__
#endif
@@ -123,7 +136,10 @@
#define DBL_MIN __DBL_MIN__
#define LDBL_MIN __LDBL_MIN__
-#if __STDC_VERSION__ >= 201112L || !defined(__STRICT_ANSI__) || __cplusplus >= 201703L
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) || \
+ !defined(__STRICT_ANSI__) || \
+ (defined(__cplusplus) && __cplusplus >= 201703L) || \
+ (__STDC_HOSTED__ && defined(_AIX) && defined(_ALL_SOURCE))
# define FLT_TRUE_MIN __FLT_DENORM_MIN__
# define DBL_TRUE_MIN __DBL_DENORM_MIN__
# define LDBL_TRUE_MIN __LDBL_DENORM_MIN__
diff --git a/contrib/llvm-project/clang/lib/Headers/fmaintrin.h b/contrib/llvm-project/clang/lib/Headers/fmaintrin.h
index d889b7c5e270..ea832fac4f99 100644
--- a/contrib/llvm-project/clang/lib/Headers/fmaintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/fmaintrin.h
@@ -18,192 +18,756 @@
#define __DEFAULT_FN_ATTRS128 __attribute__((__always_inline__, __nodebug__, __target__("fma"), __min_vector_width__(128)))
#define __DEFAULT_FN_ATTRS256 __attribute__((__always_inline__, __nodebug__, __target__("fma"), __min_vector_width__(256)))
+/// Computes a multiply-add of 128-bit vectors of [4 x float].
+/// For each element, computes <c> (__A * __B) + __C </c>.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFMADD213PS instruction.
+///
+/// \param __A
+/// A 128-bit vector of [4 x float] containing the multiplicand.
+/// \param __B
+/// A 128-bit vector of [4 x float] containing the multiplier.
+/// \param __C
+/// A 128-bit vector of [4 x float] containing the addend.
+/// \returns A 128-bit vector of [4 x float] containing the result.
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_fmadd_ps(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
}
+/// Computes a multiply-add of 128-bit vectors of [2 x double].
+/// For each element, computes <c> (__A * __B) + __C </c>.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFMADD213PD instruction.
+///
+/// \param __A
+/// A 128-bit vector of [2 x double] containing the multiplicand.
+/// \param __B
+/// A 128-bit vector of [2 x double] containing the multiplier.
+/// \param __C
+/// A 128-bit vector of [2 x double] containing the addend.
+/// \returns A 128-bit [2 x double] vector containing the result.
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_fmadd_pd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, (__v2df)__C);
}
+/// Computes a scalar multiply-add of the single-precision values in the
+/// low 32 bits of 128-bit vectors of [4 x float].
+/// \code
+/// result[31:0] = (__A[31:0] * __B[31:0]) + __C[31:0]
+/// result[127:32] = __A[127:32]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFMADD213SS instruction.
+///
+/// \param __A
+/// A 128-bit vector of [4 x float] containing the multiplicand in the low
+/// 32 bits.
+/// \param __B
+/// A 128-bit vector of [4 x float] containing the multiplier in the low
+/// 32 bits.
+/// \param __C
+/// A 128-bit vector of [4 x float] containing the addend in the low
+/// 32 bits.
+/// \returns A 128-bit vector of [4 x float] containing the result in the low
+/// 32 bits and a copy of \a __A[127:32] in the upper 96 bits.
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_fmadd_ss(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddss3((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
}
+/// Computes a scalar multiply-add of the double-precision values in the
+/// low 64 bits of 128-bit vectors of [2 x double].
+/// \code
+/// result[63:0] = (__A[63:0] * __B[63:0]) + __C[63:0]
+/// result[127:64] = __A[127:64]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFMADD213SD instruction.
+///
+/// \param __A
+/// A 128-bit vector of [2 x double] containing the multiplicand in the low
+/// 64 bits.
+/// \param __B
+/// A 128-bit vector of [2 x double] containing the multiplier in the low
+/// 64 bits.
+/// \param __C
+/// A 128-bit vector of [2 x double] containing the addend in the low
+/// 64 bits.
+/// \returns A 128-bit vector of [2 x double] containing the result in the low
+/// 64 bits and a copy of \a __A[127:64] in the upper 64 bits.
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_fmadd_sd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddsd3((__v2df)__A, (__v2df)__B, (__v2df)__C);
}
+/// Computes a multiply-subtract of 128-bit vectors of [4 x float].
+/// For each element, computes <c> (__A * __B) - __C </c>.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFMSUB213PS instruction.
+///
+/// \param __A
+/// A 128-bit vector of [4 x float] containing the multiplicand.
+/// \param __B
+/// A 128-bit vector of [4 x float] containing the multiplier.
+/// \param __C
+/// A 128-bit vector of [4 x float] containing the subtrahend.
+/// \returns A 128-bit vector of [4 x float] containing the result.
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_fmsub_ps(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddps((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C);
}
+/// Computes a multiply-subtract of 128-bit vectors of [2 x double].
+/// For each element, computes <c> (__A * __B) - __C </c>.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFMSUB213PD instruction.
+///
+/// \param __A
+/// A 128-bit vector of [2 x double] containing the multiplicand.
+/// \param __B
+/// A 128-bit vector of [2 x double] containing the multiplier.
+/// \param __C
+/// A 128-bit vector of [2 x double] containing the addend.
+/// \returns A 128-bit vector of [2 x double] containing the result.
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_fmsub_pd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddpd((__v2df)__A, (__v2df)__B, -(__v2df)__C);
}
+/// Computes a scalar multiply-subtract of the single-precision values in
+/// the low 32 bits of 128-bit vectors of [4 x float].
+/// \code
+/// result[31:0] = (__A[31:0] * __B[31:0]) - __C[31:0]
+/// result[127:32] = __A[127:32]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFMSUB213SS instruction.
+///
+/// \param __A
+/// A 128-bit vector of [4 x float] containing the multiplicand in the low
+/// 32 bits.
+/// \param __B
+/// A 128-bit vector of [4 x float] containing the multiplier in the low
+/// 32 bits.
+/// \param __C
+/// A 128-bit vector of [4 x float] containing the subtrahend in the low
+/// 32 bits.
+/// \returns A 128-bit vector of [4 x float] containing the result in the low
+/// 32 bits, and a copy of \a __A[127:32] in the upper 96 bits.
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_fmsub_ss(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddss3((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C);
}
+/// Computes a scalar multiply-subtract of the double-precision values in
+/// the low 64 bits of 128-bit vectors of [2 x double].
+/// \code
+/// result[63:0] = (__A[63:0] * __B[63:0]) - __C[63:0]
+/// result[127:64] = __A[127:64]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFMSUB213SD instruction.
+///
+/// \param __A
+/// A 128-bit vector of [2 x double] containing the multiplicand in the low
+/// 64 bits.
+/// \param __B
+/// A 128-bit vector of [2 x double] containing the multiplier in the low
+/// 64 bits.
+/// \param __C
+/// A 128-bit vector of [2 x double] containing the subtrahend in the low
+/// 64 bits.
+/// \returns A 128-bit vector of [2 x double] containing the result in the low
+/// 64 bits, and a copy of \a __A[127:64] in the upper 64 bits.
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_fmsub_sd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddsd3((__v2df)__A, (__v2df)__B, -(__v2df)__C);
}
+/// Computes a negated multiply-add of 128-bit vectors of [4 x float].
+/// For each element, computes <c> -(__A * __B) + __C </c>.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFNMADD213DPS instruction.
+///
+/// \param __A
+/// A 128-bit vector of [4 x float] containing the multiplicand.
+/// \param __B
+/// A 128-bit vector of [4 x float] containing the multiplier.
+/// \param __C
+/// A 128-bit vector of [4 x float] containing the addend.
+/// \returns A 128-bit [4 x float] vector containing the result.
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddps(-(__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
}
+/// Computes a negated multiply-add of 128-bit vectors of [2 x double].
+/// For each element, computes <c> -(__A * __B) + __C </c>.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFNMADD213PD instruction.
+///
+/// \param __A
+/// A 128-bit vector of [2 x double] containing the multiplicand.
+/// \param __B
+/// A 128-bit vector of [2 x double] containing the multiplier.
+/// \param __C
+/// A 128-bit vector of [2 x double] containing the addend.
+/// \returns A 128-bit vector of [2 x double] containing the result.
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, (__v2df)__C);
}
+/// Computes a scalar negated multiply-add of the single-precision values in
+/// the low 32 bits of 128-bit vectors of [4 x float].
+/// \code
+/// result[31:0] = -(__A[31:0] * __B[31:0]) + __C[31:0]
+/// result[127:32] = __A[127:32]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFNMADD213SS instruction.
+///
+/// \param __A
+/// A 128-bit vector of [4 x float] containing the multiplicand in the low
+/// 32 bits.
+/// \param __B
+/// A 128-bit vector of [4 x float] containing the multiplier in the low
+/// 32 bits.
+/// \param __C
+/// A 128-bit vector of [4 x float] containing the addend in the low
+/// 32 bits.
+/// \returns A 128-bit vector of [4 x float] containing the result in the low
+/// 32 bits, and a copy of \a __A[127:32] in the upper 96 bits.
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_fnmadd_ss(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddss3((__v4sf)__A, -(__v4sf)__B, (__v4sf)__C);
}
+/// Computes a scalar negated multiply-add of the double-precision values
+/// in the low 64 bits of 128-bit vectors of [2 x double].
+/// \code
+/// result[63:0] = -(__A[63:0] * __B[63:0]) + __C[63:0]
+/// result[127:64] = __A[127:64]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFNMADD213SD instruction.
+///
+/// \param __A
+/// A 128-bit vector of [2 x double] containing the multiplicand in the low
+/// 64 bits.
+/// \param __B
+/// A 128-bit vector of [2 x double] containing the multiplier in the low
+/// 64 bits.
+/// \param __C
+/// A 128-bit vector of [2 x double] containing the addend in the low
+/// 64 bits.
+/// \returns A 128-bit vector of [2 x double] containing the result in the low
+/// 64 bits, and a copy of \a __A[127:64] in the upper 64 bits.
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_fnmadd_sd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddsd3((__v2df)__A, -(__v2df)__B, (__v2df)__C);
}
+/// Computes a negated multiply-subtract of 128-bit vectors of [4 x float].
+/// For each element, computes <c> -(__A * __B) - __C </c>.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFNMSUB213PS instruction.
+///
+/// \param __A
+/// A 128-bit vector of [4 x float] containing the multiplicand.
+/// \param __B
+/// A 128-bit vector of [4 x float] containing the multiplier.
+/// \param __C
+/// A 128-bit vector of [4 x float] containing the subtrahend.
+/// \returns A 128-bit vector of [4 x float] containing the result.
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddps(-(__v4sf)__A, (__v4sf)__B, -(__v4sf)__C);
}
+/// Computes a negated multiply-subtract of 128-bit vectors of [2 x double].
+/// For each element, computes <c> -(__A * __B) - __C </c>.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFNMSUB213PD instruction.
+///
+/// \param __A
+/// A 128-bit vector of [2 x double] containing the multiplicand.
+/// \param __B
+/// A 128-bit vector of [2 x double] containing the multiplier.
+/// \param __C
+/// A 128-bit vector of [2 x double] containing the subtrahend.
+/// \returns A 128-bit vector of [2 x double] containing the result.
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddpd(-(__v2df)__A, (__v2df)__B, -(__v2df)__C);
}
+/// Computes a scalar negated multiply-subtract of the single-precision
+/// values in the low 32 bits of 128-bit vectors of [4 x float].
+/// \code
+/// result[31:0] = -(__A[31:0] * __B[31:0]) - __C[31:0]
+/// result[127:32] = __A[127:32]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFNMSUB213SS instruction.
+///
+/// \param __A
+/// A 128-bit vector of [4 x float] containing the multiplicand in the low
+/// 32 bits.
+/// \param __B
+/// A 128-bit vector of [4 x float] containing the multiplier in the low
+/// 32 bits.
+/// \param __C
+/// A 128-bit vector of [4 x float] containing the subtrahend in the low
+/// 32 bits.
+/// \returns A 128-bit vector of [4 x float] containing the result in the low
+/// 32 bits, and a copy of \a __A[127:32] in the upper 96 bits.
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_fnmsub_ss(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddss3((__v4sf)__A, -(__v4sf)__B, -(__v4sf)__C);
}
+/// Computes a scalar negated multiply-subtract of the double-precision
+/// values in the low 64 bits of 128-bit vectors of [2 x double].
+/// \code
+/// result[63:0] = -(__A[63:0] * __B[63:0]) - __C[63:0]
+/// result[127:64] = __A[127:64]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFNMSUB213SD instruction.
+///
+/// \param __A
+/// A 128-bit vector of [2 x double] containing the multiplicand in the low
+/// 64 bits.
+/// \param __B
+/// A 128-bit vector of [2 x double] containing the multiplier in the low
+/// 64 bits.
+/// \param __C
+/// A 128-bit vector of [2 x double] containing the subtrahend in the low
+/// 64 bits.
+/// \returns A 128-bit vector of [2 x double] containing the result in the low
+/// 64 bits, and a copy of \a __A[127:64] in the upper 64 bits.
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_fnmsub_sd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddsd3((__v2df)__A, -(__v2df)__B, -(__v2df)__C);
}
+/// Computes a multiply with alternating add/subtract of 128-bit vectors of
+/// [4 x float].
+/// \code
+/// result[31:0] = (__A[31:0] * __B[31:0]) - __C[31:0]
+/// result[63:32] = (__A[63:32] * __B[63:32]) + __C[63:32]
+/// result[95:64] = (__A[95:64] * __B[95:64]) - __C[95:64]
+/// result[127:96] = (__A[127:96] * __B[127:96]) + __C[127:96]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFMADDSUB213PS instruction.
+///
+/// \param __A
+/// A 128-bit vector of [4 x float] containing the multiplicand.
+/// \param __B
+/// A 128-bit vector of [4 x float] containing the multiplier.
+/// \param __C
+/// A 128-bit vector of [4 x float] containing the addend/subtrahend.
+/// \returns A 128-bit vector of [4 x float] containing the result.
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_fmaddsub_ps(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddsubps((__v4sf)__A, (__v4sf)__B, (__v4sf)__C);
}
+/// Computes a multiply with alternating add/subtract of 128-bit vectors of
+/// [2 x double].
+/// \code
+/// result[63:0] = (__A[63:0] * __B[63:0]) - __C[63:0]
+/// result[127:64] = (__A[127:64] * __B[127:64]) + __C[127:64]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFMADDSUB213PD instruction.
+///
+/// \param __A
+/// A 128-bit vector of [2 x double] containing the multiplicand.
+/// \param __B
+/// A 128-bit vector of [2 x double] containing the multiplier.
+/// \param __C
+/// A 128-bit vector of [2 x double] containing the addend/subtrahend.
+/// \returns A 128-bit vector of [2 x double] containing the result.
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_fmaddsub_pd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddsubpd((__v2df)__A, (__v2df)__B, (__v2df)__C);
}
+/// Computes a multiply with alternating add/subtract of 128-bit vectors of
+/// [4 x float].
+/// \code
+/// result[31:0] = (__A[31:0] * __B[31:0]) + __C[31:0]
+/// result[63:32] = (__A[63:32] * __B[63:32]) - __C[63:32]
+/// result[95:64] = (__A[95:64] * __B[95:64]) + __C[95:64]
+/// result[127:96 = (__A[127:96] * __B[127:96]) - __C[127:96]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFMSUBADD213PS instruction.
+///
+/// \param __A
+/// A 128-bit vector of [4 x float] containing the multiplicand.
+/// \param __B
+/// A 128-bit vector of [4 x float] containing the multiplier.
+/// \param __C
+/// A 128-bit vector of [4 x float] containing the addend/subtrahend.
+/// \returns A 128-bit vector of [4 x float] containing the result.
static __inline__ __m128 __DEFAULT_FN_ATTRS128
_mm_fmsubadd_ps(__m128 __A, __m128 __B, __m128 __C)
{
return (__m128)__builtin_ia32_vfmaddsubps((__v4sf)__A, (__v4sf)__B, -(__v4sf)__C);
}
+/// Computes a multiply with alternating add/subtract of 128-bit vectors of
+/// [2 x double].
+/// \code
+/// result[63:0] = (__A[63:0] * __B[63:0]) + __C[63:0]
+/// result[127:64] = (__A[127:64] * __B[127:64]) - __C[127:64]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFMADDSUB213PD instruction.
+///
+/// \param __A
+/// A 128-bit vector of [2 x double] containing the multiplicand.
+/// \param __B
+/// A 128-bit vector of [2 x double] containing the multiplier.
+/// \param __C
+/// A 128-bit vector of [2 x double] containing the addend/subtrahend.
+/// \returns A 128-bit vector of [2 x double] containing the result.
static __inline__ __m128d __DEFAULT_FN_ATTRS128
_mm_fmsubadd_pd(__m128d __A, __m128d __B, __m128d __C)
{
return (__m128d)__builtin_ia32_vfmaddsubpd((__v2df)__A, (__v2df)__B, -(__v2df)__C);
}
+/// Computes a multiply-add of 256-bit vectors of [8 x float].
+/// For each element, computes <c> (__A * __B) + __C </c>.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFMADD213PS instruction.
+///
+/// \param __A
+/// A 256-bit vector of [8 x float] containing the multiplicand.
+/// \param __B
+/// A 256-bit vector of [8 x float] containing the multiplier.
+/// \param __C
+/// A 256-bit vector of [8 x float] containing the addend.
+/// \returns A 256-bit vector of [8 x float] containing the result.
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_fmadd_ps(__m256 __A, __m256 __B, __m256 __C)
{
return (__m256)__builtin_ia32_vfmaddps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
}
+/// Computes a multiply-add of 256-bit vectors of [4 x double].
+/// For each element, computes <c> (__A * __B) + __C </c>.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFMADD213PD instruction.
+///
+/// \param __A
+/// A 256-bit vector of [4 x double] containing the multiplicand.
+/// \param __B
+/// A 256-bit vector of [4 x double] containing the multiplier.
+/// \param __C
+/// A 256-bit vector of [4 x double] containing the addend.
+/// \returns A 256-bit vector of [4 x double] containing the result.
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_fmadd_pd(__m256d __A, __m256d __B, __m256d __C)
{
return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, (__v4df)__C);
}
+/// Computes a multiply-subtract of 256-bit vectors of [8 x float].
+/// For each element, computes <c> (__A * __B) - __C </c>.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFMSUB213PS instruction.
+///
+/// \param __A
+/// A 256-bit vector of [8 x float] containing the multiplicand.
+/// \param __B
+/// A 256-bit vector of [8 x float] containing the multiplier.
+/// \param __C
+/// A 256-bit vector of [8 x float] containing the subtrahend.
+/// \returns A 256-bit vector of [8 x float] containing the result.
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_fmsub_ps(__m256 __A, __m256 __B, __m256 __C)
{
return (__m256)__builtin_ia32_vfmaddps256((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C);
}
+/// Computes a multiply-subtract of 256-bit vectors of [4 x double].
+/// For each element, computes <c> (__A * __B) - __C </c>.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFMSUB213PD instruction.
+///
+/// \param __A
+/// A 256-bit vector of [4 x double] containing the multiplicand.
+/// \param __B
+/// A 256-bit vector of [4 x double] containing the multiplier.
+/// \param __C
+/// A 256-bit vector of [4 x double] containing the subtrahend.
+/// \returns A 256-bit vector of [4 x double] containing the result.
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_fmsub_pd(__m256d __A, __m256d __B, __m256d __C)
{
return (__m256d)__builtin_ia32_vfmaddpd256((__v4df)__A, (__v4df)__B, -(__v4df)__C);
}
+/// Computes a negated multiply-add of 256-bit vectors of [8 x float].
+/// For each element, computes <c> -(__A * __B) + __C </c>.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFNMADD213PS instruction.
+///
+/// \param __A
+/// A 256-bit vector of [8 x float] containing the multiplicand.
+/// \param __B
+/// A 256-bit vector of [8 x float] containing the multiplier.
+/// \param __C
+/// A 256-bit vector of [8 x float] containing the addend.
+/// \returns A 256-bit vector of [8 x float] containing the result.
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C)
{
return (__m256)__builtin_ia32_vfmaddps256(-(__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
}
+/// Computes a negated multiply-add of 256-bit vectors of [4 x double].
+/// For each element, computes <c> -(__A * __B) + __C </c>.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFNMADD213PD instruction.
+///
+/// \param __A
+/// A 256-bit vector of [4 x double] containing the multiplicand.
+/// \param __B
+/// A 256-bit vector of [4 x double] containing the multiplier.
+/// \param __C
+/// A 256-bit vector of [4 x double] containing the addend.
+/// \returns A 256-bit vector of [4 x double] containing the result.
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C)
{
return (__m256d)__builtin_ia32_vfmaddpd256(-(__v4df)__A, (__v4df)__B, (__v4df)__C);
}
+/// Computes a negated multiply-subtract of 256-bit vectors of [8 x float].
+/// For each element, computes <c> -(__A * __B) - __C </c>.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFNMSUB213PS instruction.
+///
+/// \param __A
+/// A 256-bit vector of [8 x float] containing the multiplicand.
+/// \param __B
+/// A 256-bit vector of [8 x float] containing the multiplier.
+/// \param __C
+/// A 256-bit vector of [8 x float] containing the subtrahend.
+/// \returns A 256-bit vector of [8 x float] containing the result.
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C)
{
return (__m256)__builtin_ia32_vfmaddps256(-(__v8sf)__A, (__v8sf)__B, -(__v8sf)__C);
}
+/// Computes a negated multiply-subtract of 256-bit vectors of [4 x double].
+/// For each element, computes <c> -(__A * __B) - __C </c>.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFNMSUB213PD instruction.
+///
+/// \param __A
+/// A 256-bit vector of [4 x double] containing the multiplicand.
+/// \param __B
+/// A 256-bit vector of [4 x double] containing the multiplier.
+/// \param __C
+/// A 256-bit vector of [4 x double] containing the subtrahend.
+/// \returns A 256-bit vector of [4 x double] containing the result.
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C)
{
return (__m256d)__builtin_ia32_vfmaddpd256(-(__v4df)__A, (__v4df)__B, -(__v4df)__C);
}
+/// Computes a multiply with alternating add/subtract of 256-bit vectors of
+/// [8 x float].
+/// \code
+/// result[31:0] = (__A[31:0] * __B[31:0]) - __C[31:0]
+/// result[63:32] = (__A[63:32] * __B[63:32]) + __C[63:32]
+/// result[95:64] = (__A[95:64] * __B[95:64]) - __C[95:64]
+/// result[127:96] = (__A[127:96] * __B[127:96]) + __C[127:96]
+/// result[159:128] = (__A[159:128] * __B[159:128]) - __C[159:128]
+/// result[191:160] = (__A[191:160] * __B[191:160]) + __C[191:160]
+/// result[223:192] = (__A[223:192] * __B[223:192]) - __C[223:192]
+/// result[255:224] = (__A[255:224] * __B[255:224]) + __C[255:224]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFMADDSUB213PS instruction.
+///
+/// \param __A
+/// A 256-bit vector of [8 x float] containing the multiplicand.
+/// \param __B
+/// A 256-bit vector of [8 x float] containing the multiplier.
+/// \param __C
+/// A 256-bit vector of [8 x float] containing the addend/subtrahend.
+/// \returns A 256-bit vector of [8 x float] containing the result.
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_fmaddsub_ps(__m256 __A, __m256 __B, __m256 __C)
{
return (__m256)__builtin_ia32_vfmaddsubps256((__v8sf)__A, (__v8sf)__B, (__v8sf)__C);
}
+/// Computes a multiply with alternating add/subtract of 256-bit vectors of
+/// [4 x double].
+/// \code
+/// result[63:0] = (__A[63:0] * __B[63:0]) - __C[63:0]
+/// result[127:64] = (__A[127:64] * __B[127:64]) + __C[127:64]
+/// result[191:128] = (__A[191:128] * __B[191:128]) - __C[191:128]
+/// result[255:192] = (__A[255:192] * __B[255:192]) + __C[255:192]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFMADDSUB213PD instruction.
+///
+/// \param __A
+/// A 256-bit vector of [4 x double] containing the multiplicand.
+/// \param __B
+/// A 256-bit vector of [4 x double] containing the multiplier.
+/// \param __C
+/// A 256-bit vector of [4 x double] containing the addend/subtrahend.
+/// \returns A 256-bit vector of [4 x double] containing the result.
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_fmaddsub_pd(__m256d __A, __m256d __B, __m256d __C)
{
return (__m256d)__builtin_ia32_vfmaddsubpd256((__v4df)__A, (__v4df)__B, (__v4df)__C);
}
+/// Computes a vector multiply with alternating add/subtract of 256-bit
+/// vectors of [8 x float].
+/// \code
+/// result[31:0] = (__A[31:0] * __B[31:0]) + __C[31:0]
+/// result[63:32] = (__A[63:32] * __B[63:32]) - __C[63:32]
+/// result[95:64] = (__A[95:64] * __B[95:64]) + __C[95:64]
+/// result[127:96] = (__A[127:96] * __B[127:96]) - __C[127:96]
+/// result[159:128] = (__A[159:128] * __B[159:128]) + __C[159:128]
+/// result[191:160] = (__A[191:160] * __B[191:160]) - __C[191:160]
+/// result[223:192] = (__A[223:192] * __B[223:192]) + __C[223:192]
+/// result[255:224] = (__A[255:224] * __B[255:224]) - __C[255:224]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFMSUBADD213PS instruction.
+///
+/// \param __A
+/// A 256-bit vector of [8 x float] containing the multiplicand.
+/// \param __B
+/// A 256-bit vector of [8 x float] containing the multiplier.
+/// \param __C
+/// A 256-bit vector of [8 x float] containing the addend/subtrahend.
+/// \returns A 256-bit vector of [8 x float] containing the result.
static __inline__ __m256 __DEFAULT_FN_ATTRS256
_mm256_fmsubadd_ps(__m256 __A, __m256 __B, __m256 __C)
{
return (__m256)__builtin_ia32_vfmaddsubps256((__v8sf)__A, (__v8sf)__B, -(__v8sf)__C);
}
+/// Computes a vector multiply with alternating add/subtract of 256-bit
+/// vectors of [4 x double].
+/// \code
+/// result[63:0] = (__A[63:0] * __B[63:0]) + __C[63:0]
+/// result[127:64] = (__A[127:64] * __B[127:64]) - __C[127:64]
+/// result[191:128] = (__A[191:128] * __B[191:128]) + __C[191:128]
+/// result[255:192] = (__A[255:192] * __B[255:192]) - __C[255:192]
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VFMSUBADD213PD instruction.
+///
+/// \param __A
+/// A 256-bit vector of [4 x double] containing the multiplicand.
+/// \param __B
+/// A 256-bit vector of [4 x double] containing the multiplier.
+/// \param __C
+/// A 256-bit vector of [4 x double] containing the addend/subtrahend.
+/// \returns A 256-bit vector of [4 x double] containing the result.
static __inline__ __m256d __DEFAULT_FN_ATTRS256
_mm256_fmsubadd_pd(__m256d __A, __m256d __B, __m256d __C)
{
diff --git a/contrib/llvm-project/clang/lib/Headers/gfniintrin.h b/contrib/llvm-project/clang/lib/Headers/gfniintrin.h
index 11a321b7c919..73b04a824aba 100644
--- a/contrib/llvm-project/clang/lib/Headers/gfniintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/gfniintrin.h
@@ -15,27 +15,46 @@
#define __GFNIINTRIN_H
/* Default attributes for simple form (no masking). */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("gfni"), __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("gfni,no-evex512"), __min_vector_width__(128)))
/* Default attributes for YMM unmasked form. */
-#define __DEFAULT_FN_ATTRS_Y __attribute__((__always_inline__, __nodebug__, __target__("avx,gfni"), __min_vector_width__(256)))
-
-/* Default attributes for ZMM forms. */
-#define __DEFAULT_FN_ATTRS_Z __attribute__((__always_inline__, __nodebug__, __target__("avx512bw,gfni"), __min_vector_width__(512)))
-
-/* Default attributes for VLX forms. */
-#define __DEFAULT_FN_ATTRS_VL128 __attribute__((__always_inline__, __nodebug__, __target__("avx512bw,avx512vl,gfni"), __min_vector_width__(128)))
-#define __DEFAULT_FN_ATTRS_VL256 __attribute__((__always_inline__, __nodebug__, __target__("avx512bw,avx512vl,gfni"), __min_vector_width__(256)))
+#define __DEFAULT_FN_ATTRS_Y \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx,gfni,no-evex512"), \
+ __min_vector_width__(256)))
+
+/* Default attributes for ZMM unmasked forms. */
+#define __DEFAULT_FN_ATTRS_Z \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512f,evex512,gfni"), \
+ __min_vector_width__(512)))
+/* Default attributes for ZMM masked forms. */
+#define __DEFAULT_FN_ATTRS_Z_MASK \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512bw,evex512,gfni"), \
+ __min_vector_width__(512)))
+
+/* Default attributes for VLX masked forms. */
+#define __DEFAULT_FN_ATTRS_VL128 \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512bw,avx512vl,gfni,no-evex512"), \
+ __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS_VL256 \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512bw,avx512vl,gfni,no-evex512"), \
+ __min_vector_width__(256)))
#define _mm_gf2p8affineinv_epi64_epi8(A, B, I) \
- (__m128i)__builtin_ia32_vgf2p8affineinvqb_v16qi((__v16qi)(__m128i)(A), \
- (__v16qi)(__m128i)(B), \
- (char)(I))
+ ((__m128i)__builtin_ia32_vgf2p8affineinvqb_v16qi((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), \
+ (char)(I)))
#define _mm_gf2p8affine_epi64_epi8(A, B, I) \
- (__m128i)__builtin_ia32_vgf2p8affineqb_v16qi((__v16qi)(__m128i)(A), \
- (__v16qi)(__m128i)(B), \
- (char)(I))
+ ((__m128i)__builtin_ia32_vgf2p8affineqb_v16qi((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), \
+ (char)(I)))
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_gf2p8mul_epi8(__m128i __A, __m128i __B)
@@ -46,14 +65,14 @@ _mm_gf2p8mul_epi8(__m128i __A, __m128i __B)
#ifdef __AVXINTRIN_H
#define _mm256_gf2p8affineinv_epi64_epi8(A, B, I) \
- (__m256i)__builtin_ia32_vgf2p8affineinvqb_v32qi((__v32qi)(__m256i)(A), \
- (__v32qi)(__m256i)(B), \
- (char)(I))
+ ((__m256i)__builtin_ia32_vgf2p8affineinvqb_v32qi((__v32qi)(__m256i)(A), \
+ (__v32qi)(__m256i)(B), \
+ (char)(I)))
#define _mm256_gf2p8affine_epi64_epi8(A, B, I) \
- (__m256i)__builtin_ia32_vgf2p8affineqb_v32qi((__v32qi)(__m256i)(A), \
- (__v32qi)(__m256i)(B), \
- (char)(I))
+ ((__m256i)__builtin_ia32_vgf2p8affineqb_v32qi((__v32qi)(__m256i)(A), \
+ (__v32qi)(__m256i)(B), \
+ (char)(I)))
static __inline__ __m256i __DEFAULT_FN_ATTRS_Y
_mm256_gf2p8mul_epi8(__m256i __A, __m256i __B)
@@ -65,32 +84,32 @@ _mm256_gf2p8mul_epi8(__m256i __A, __m256i __B)
#ifdef __AVX512BWINTRIN_H
#define _mm512_gf2p8affineinv_epi64_epi8(A, B, I) \
- (__m512i)__builtin_ia32_vgf2p8affineinvqb_v64qi((__v64qi)(__m512i)(A), \
- (__v64qi)(__m512i)(B), \
- (char)(I))
+ ((__m512i)__builtin_ia32_vgf2p8affineinvqb_v64qi((__v64qi)(__m512i)(A), \
+ (__v64qi)(__m512i)(B), \
+ (char)(I)))
#define _mm512_mask_gf2p8affineinv_epi64_epi8(S, U, A, B, I) \
- (__m512i)__builtin_ia32_selectb_512((__mmask64)(U), \
- (__v64qi)_mm512_gf2p8affineinv_epi64_epi8(A, B, I), \
- (__v64qi)(__m512i)(S))
+ ((__m512i)__builtin_ia32_selectb_512((__mmask64)(U), \
+ (__v64qi)_mm512_gf2p8affineinv_epi64_epi8(A, B, I), \
+ (__v64qi)(__m512i)(S)))
#define _mm512_maskz_gf2p8affineinv_epi64_epi8(U, A, B, I) \
- (__m512i)_mm512_mask_gf2p8affineinv_epi64_epi8((__m512i)_mm512_setzero_si512(), \
- U, A, B, I)
+ _mm512_mask_gf2p8affineinv_epi64_epi8((__m512i)_mm512_setzero_si512(), \
+ U, A, B, I)
#define _mm512_gf2p8affine_epi64_epi8(A, B, I) \
- (__m512i)__builtin_ia32_vgf2p8affineqb_v64qi((__v64qi)(__m512i)(A), \
- (__v64qi)(__m512i)(B), \
- (char)(I))
+ ((__m512i)__builtin_ia32_vgf2p8affineqb_v64qi((__v64qi)(__m512i)(A), \
+ (__v64qi)(__m512i)(B), \
+ (char)(I)))
#define _mm512_mask_gf2p8affine_epi64_epi8(S, U, A, B, I) \
- (__m512i)__builtin_ia32_selectb_512((__mmask64)(U), \
- (__v64qi)_mm512_gf2p8affine_epi64_epi8(A, B, I), \
- (__v64qi)(__m512i)(S))
+ ((__m512i)__builtin_ia32_selectb_512((__mmask64)(U), \
+ (__v64qi)_mm512_gf2p8affine_epi64_epi8((A), (B), (I)), \
+ (__v64qi)(__m512i)(S)))
#define _mm512_maskz_gf2p8affine_epi64_epi8(U, A, B, I) \
- (__m512i)_mm512_mask_gf2p8affine_epi64_epi8((__m512i)_mm512_setzero_si512(), \
- U, A, B, I)
+ _mm512_mask_gf2p8affine_epi64_epi8((__m512i)_mm512_setzero_si512(), \
+ U, A, B, I)
static __inline__ __m512i __DEFAULT_FN_ATTRS_Z
_mm512_gf2p8mul_epi8(__m512i __A, __m512i __B)
@@ -99,7 +118,7 @@ _mm512_gf2p8mul_epi8(__m512i __A, __m512i __B)
(__v64qi) __B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS_Z
+static __inline__ __m512i __DEFAULT_FN_ATTRS_Z_MASK
_mm512_mask_gf2p8mul_epi8(__m512i __S, __mmask64 __U, __m512i __A, __m512i __B)
{
return (__m512i) __builtin_ia32_selectb_512(__U,
@@ -107,7 +126,7 @@ _mm512_mask_gf2p8mul_epi8(__m512i __S, __mmask64 __U, __m512i __A, __m512i __B)
(__v64qi) __S);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS_Z
+static __inline__ __m512i __DEFAULT_FN_ATTRS_Z_MASK
_mm512_maskz_gf2p8mul_epi8(__mmask64 __U, __m512i __A, __m512i __B)
{
return _mm512_mask_gf2p8mul_epi8((__m512i)_mm512_setzero_si512(),
@@ -117,40 +136,39 @@ _mm512_maskz_gf2p8mul_epi8(__mmask64 __U, __m512i __A, __m512i __B)
#ifdef __AVX512VLBWINTRIN_H
#define _mm_mask_gf2p8affineinv_epi64_epi8(S, U, A, B, I) \
- (__m128i)__builtin_ia32_selectb_128((__mmask16)(U), \
- (__v16qi)_mm_gf2p8affineinv_epi64_epi8(A, B, I), \
- (__v16qi)(__m128i)(S))
+ ((__m128i)__builtin_ia32_selectb_128((__mmask16)(U), \
+ (__v16qi)_mm_gf2p8affineinv_epi64_epi8(A, B, I), \
+ (__v16qi)(__m128i)(S)))
#define _mm_maskz_gf2p8affineinv_epi64_epi8(U, A, B, I) \
- (__m128i)_mm_mask_gf2p8affineinv_epi64_epi8((__m128i)_mm_setzero_si128(), \
- U, A, B, I)
+ _mm_mask_gf2p8affineinv_epi64_epi8((__m128i)_mm_setzero_si128(), \
+ U, A, B, I)
#define _mm256_mask_gf2p8affineinv_epi64_epi8(S, U, A, B, I) \
- (__m256i)__builtin_ia32_selectb_256((__mmask32)(U), \
- (__v32qi)_mm256_gf2p8affineinv_epi64_epi8(A, B, I), \
- (__v32qi)(__m256i)(S))
+ ((__m256i)__builtin_ia32_selectb_256((__mmask32)(U), \
+ (__v32qi)_mm256_gf2p8affineinv_epi64_epi8(A, B, I), \
+ (__v32qi)(__m256i)(S)))
#define _mm256_maskz_gf2p8affineinv_epi64_epi8(U, A, B, I) \
- (__m256i)_mm256_mask_gf2p8affineinv_epi64_epi8((__m256i)_mm256_setzero_si256(), \
- U, A, B, I)
+ _mm256_mask_gf2p8affineinv_epi64_epi8((__m256i)_mm256_setzero_si256(), \
+ U, A, B, I)
#define _mm_mask_gf2p8affine_epi64_epi8(S, U, A, B, I) \
- (__m128i)__builtin_ia32_selectb_128((__mmask16)(U), \
- (__v16qi)_mm_gf2p8affine_epi64_epi8(A, B, I), \
- (__v16qi)(__m128i)(S))
+ ((__m128i)__builtin_ia32_selectb_128((__mmask16)(U), \
+ (__v16qi)_mm_gf2p8affine_epi64_epi8(A, B, I), \
+ (__v16qi)(__m128i)(S)))
#define _mm_maskz_gf2p8affine_epi64_epi8(U, A, B, I) \
- (__m128i)_mm_mask_gf2p8affine_epi64_epi8((__m128i)_mm_setzero_si128(), \
- U, A, B, I)
+ _mm_mask_gf2p8affine_epi64_epi8((__m128i)_mm_setzero_si128(), U, A, B, I)
#define _mm256_mask_gf2p8affine_epi64_epi8(S, U, A, B, I) \
- (__m256i)__builtin_ia32_selectb_256((__mmask32)(U), \
- (__v32qi)_mm256_gf2p8affine_epi64_epi8(A, B, I), \
- (__v32qi)(__m256i)(S))
+ ((__m256i)__builtin_ia32_selectb_256((__mmask32)(U), \
+ (__v32qi)_mm256_gf2p8affine_epi64_epi8(A, B, I), \
+ (__v32qi)(__m256i)(S)))
#define _mm256_maskz_gf2p8affine_epi64_epi8(U, A, B, I) \
- (__m256i)_mm256_mask_gf2p8affine_epi64_epi8((__m256i)_mm256_setzero_si256(), \
- U, A, B, I)
+ _mm256_mask_gf2p8affine_epi64_epi8((__m256i)_mm256_setzero_si256(), \
+ U, A, B, I)
static __inline__ __m128i __DEFAULT_FN_ATTRS_VL128
_mm_mask_gf2p8mul_epi8(__m128i __S, __mmask16 __U, __m128i __A, __m128i __B)
diff --git a/contrib/llvm-project/clang/lib/Headers/hexagon_protos.h b/contrib/llvm-project/clang/lib/Headers/hexagon_protos.h
index cdffd93bb859..2642f3c8428d 100644
--- a/contrib/llvm-project/clang/lib/Headers/hexagon_protos.h
+++ b/contrib/llvm-project/clang/lib/Headers/hexagon_protos.h
@@ -8003,17 +8003,6 @@
#define Q6_P_vtrunohb_PP __builtin_HEXAGON_S6_vtrunohb_ppp
#endif /* __HEXAGON_ARCH___ >= 62 */
-#if __HEXAGON_ARCH__ >= 62
-/* ==========================================================================
- Assembly Syntax: Vd32=vmem(Rt32):nt
- C Intrinsic Prototype: HVX_Vector Q6_V_vmem_R_nt(Word32 Rt)
- Instruction Type: MAPPING
- Execution Slots: SLOT0123
- ========================================================================== */
-
-#define Q6_V_vmem_R_nt __builtin_HEXAGON_V6_ldntnt0
-#endif /* __HEXAGON_ARCH___ >= 62 */
-
#if __HEXAGON_ARCH__ >= 65
/* ==========================================================================
Assembly Syntax: Pd4=!any8(vcmpb.eq(Rss32,Rtt32))
diff --git a/contrib/llvm-project/clang/lib/Headers/hexagon_types.h b/contrib/llvm-project/clang/lib/Headers/hexagon_types.h
index 6958809418d8..029727cc4817 100644
--- a/contrib/llvm-project/clang/lib/Headers/hexagon_types.h
+++ b/contrib/llvm-project/clang/lib/Headers/hexagon_types.h
@@ -1177,37 +1177,6 @@ private:
#endif /* __cplusplus */
-// V65 Silver types
-#if __Q6S_ARCH__ >= 65
- // Silver vector types are 128 bytes, and pairs are 256. The vector predicate
- // types are 16 bytes and 32 bytes for pairs.
- typedef long HEXAGON_VecPred128 __attribute__((__vector_size__(16)))
- __attribute__((aligned(128)));
-
- typedef long HEXAGON_VecPred256 __attribute__((__vector_size__(32)))
- __attribute__((aligned(128)));
-
- typedef long HEXAGON_Vect1024 __attribute__((__vector_size__(128)))
- __attribute__((aligned(128)));
-
- typedef long HEXAGON_Vect2048 __attribute__((__vector_size__(256)))
- __attribute__((aligned(256)));
-
- typedef long HEXAGON_UVect1024 __attribute__((__vector_size__(128)))
- __attribute__((aligned(4)));
-
- typedef long HEXAGON_UVect2048 __attribute__((__vector_size__(256)))
- __attribute__((aligned(4)));
-
- #define Q6S_VectorPredPair HEXAGON_VecPred256
- #define Q6S_VectorPred HEXAGON_VecPred128
- #define Q6S_Vector HEXAGON_Vect1024
- #define Q6S_VectorPair HEXAGON_Vect2048
- #define Q6S_UVector HEXAGON_UVect1024
- #define Q6S_UVectorPair HEXAGON_UVect2048
-
-#else /* __Q6S_ARCH__ >= 65 */
-
// V65 Vector types
#if __HVX_ARCH__ >= 65
#if defined __HVX__ && (__HVX_LENGTH__ == 128)
@@ -1256,7 +1225,6 @@ private:
#endif /* defined __HVX__ && (__HVX_LENGTH__ == 64) */
#endif /* defined __HVX__ && (__HVX_LENGTH__ == 128) */
#endif /* __HVX_ARCH__ >= 65 */
-#endif /* __Q6S_ARCH__ >= 65 */
/* Predicates */
diff --git a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticAnalysisKinds.td b/contrib/llvm-project/clang/lib/Headers/hlsl.h
index 20efd96b85fd..a9dce4503ddd 100644
--- a/contrib/llvm-project/clang/include/clang/Basic/DiagnosticAnalysisKinds.td
+++ b/contrib/llvm-project/clang/lib/Headers/hlsl.h
@@ -1,4 +1,4 @@
-//==--- DiagnosticAnalysisKinds.td - libanalysis diagnostics --------------===//
+//===----- hlsl.h - HLSL definitions --------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -6,6 +6,10 @@
//
//===----------------------------------------------------------------------===//
-let Component = "Analysis" in {
+#ifndef _HLSL_H_
+#define _HLSL_H_
-}
+#include "hlsl/hlsl_basic_types.h"
+#include "hlsl/hlsl_intrinsics.h"
+
+#endif //_HLSL_H_
diff --git a/contrib/llvm-project/clang/lib/Headers/hlsl/hlsl_basic_types.h b/contrib/llvm-project/clang/lib/Headers/hlsl/hlsl_basic_types.h
new file mode 100644
index 000000000000..9ea605cfa840
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/hlsl/hlsl_basic_types.h
@@ -0,0 +1,67 @@
+//===----- hlsl_basic_types.h - HLSL definitions for basic types ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _HLSL_HLSL_BASIC_TYPES_H_
+#define _HLSL_HLSL_BASIC_TYPES_H_
+
+namespace hlsl {
+// built-in scalar data types:
+
+#ifdef __HLSL_ENABLE_16_BIT
+// 16-bit integer.
+typedef unsigned short uint16_t;
+typedef short int16_t;
+#endif
+
+// unsigned 32-bit integer.
+typedef unsigned int uint;
+
+// 64-bit integer.
+typedef unsigned long uint64_t;
+typedef long int64_t;
+
+// built-in vector data types:
+
+#ifdef __HLSL_ENABLE_16_BIT
+typedef vector<int16_t, 2> int16_t2;
+typedef vector<int16_t, 3> int16_t3;
+typedef vector<int16_t, 4> int16_t4;
+typedef vector<uint16_t, 2> uint16_t2;
+typedef vector<uint16_t, 3> uint16_t3;
+typedef vector<uint16_t, 4> uint16_t4;
+#endif
+
+typedef vector<int, 2> int2;
+typedef vector<int, 3> int3;
+typedef vector<int, 4> int4;
+typedef vector<uint, 2> uint2;
+typedef vector<uint, 3> uint3;
+typedef vector<uint, 4> uint4;
+typedef vector<int64_t, 2> int64_t2;
+typedef vector<int64_t, 3> int64_t3;
+typedef vector<int64_t, 4> int64_t4;
+typedef vector<uint64_t, 2> uint64_t2;
+typedef vector<uint64_t, 3> uint64_t3;
+typedef vector<uint64_t, 4> uint64_t4;
+
+#ifdef __HLSL_ENABLE_16_BIT
+typedef vector<half, 2> half2;
+typedef vector<half, 3> half3;
+typedef vector<half, 4> half4;
+#endif
+
+typedef vector<float, 2> float2;
+typedef vector<float, 3> float3;
+typedef vector<float, 4> float4;
+typedef vector<double, 2> double2;
+typedef vector<double, 3> double3;
+typedef vector<double, 4> double4;
+
+} // namespace hlsl
+
+#endif //_HLSL_HLSL_BASIC_TYPES_H_
diff --git a/contrib/llvm-project/clang/lib/Headers/hlsl/hlsl_intrinsics.h b/contrib/llvm-project/clang/lib/Headers/hlsl/hlsl_intrinsics.h
new file mode 100644
index 000000000000..da153d8f8e03
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/hlsl/hlsl_intrinsics.h
@@ -0,0 +1,624 @@
+//===----- hlsl_intrinsics.h - HLSL definitions for intrinsics ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _HLSL_HLSL_INTRINSICS_H_
+#define _HLSL_HLSL_INTRINSICS_H_
+
+namespace hlsl {
+
+// Note: Functions in this file are sorted alphabetically, then grouped by base
+// element type, and the element types are sorted by size, then singed integer,
+// unsigned integer and floating point. Keeping this ordering consistent will
+// help keep this file manageable as it grows.
+
+#define _HLSL_BUILTIN_ALIAS(builtin) \
+ __attribute__((clang_builtin_alias(builtin)))
+#define _HLSL_AVAILABILITY(environment, version) \
+ __attribute__((availability(environment, introduced = version)))
+
+//===----------------------------------------------------------------------===//
+// abs builtins
+//===----------------------------------------------------------------------===//
+#ifdef __HLSL_ENABLE_16_BIT
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_abs)
+int16_t abs(int16_t);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_abs)
+int16_t2 abs(int16_t2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_abs)
+int16_t3 abs(int16_t3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_abs)
+int16_t4 abs(int16_t4);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_abs)
+
+half abs(half);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_abs)
+half2 abs(half2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_abs)
+half3 abs(half3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_abs)
+half4 abs(half4);
+#endif
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_abs)
+int abs(int);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_abs)
+int2 abs(int2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_abs)
+int3 abs(int3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_abs)
+int4 abs(int4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_abs)
+float abs(float);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_abs)
+float2 abs(float2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_abs)
+float3 abs(float3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_abs)
+float4 abs(float4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_abs)
+int64_t abs(int64_t);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_abs)
+int64_t2 abs(int64_t2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_abs)
+int64_t3 abs(int64_t3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_abs)
+int64_t4 abs(int64_t4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_abs)
+double abs(double);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_abs)
+double2 abs(double2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_abs)
+double3 abs(double3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_abs)
+double4 abs(double4);
+
+//===----------------------------------------------------------------------===//
+// ceil builtins
+//===----------------------------------------------------------------------===//
+#ifdef __HLSL_ENABLE_16_BIT
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_ceil)
+half ceil(half);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_ceil)
+half2 ceil(half2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_ceil)
+half3 ceil(half3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_ceil)
+half4 ceil(half4);
+#endif
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_ceil)
+float ceil(float);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_ceil)
+float2 ceil(float2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_ceil)
+float3 ceil(float3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_ceil)
+float4 ceil(float4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_ceil)
+double ceil(double);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_ceil)
+double2 ceil(double2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_ceil)
+double3 ceil(double3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_ceil)
+double4 ceil(double4);
+
+//===----------------------------------------------------------------------===//
+// cos builtins
+//===----------------------------------------------------------------------===//
+#ifdef __HLSL_ENABLE_16_BIT
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_cos)
+half cos(half);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_cos)
+half2 cos(half2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_cos)
+half3 cos(half3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_cos)
+half4 cos(half4);
+#endif
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_cos)
+float cos(float);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_cos)
+float2 cos(float2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_cos)
+float3 cos(float3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_cos)
+float4 cos(float4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_cos)
+double cos(double);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_cos)
+double2 cos(double2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_cos)
+double3 cos(double3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_cos)
+double4 cos(double4);
+
+//===----------------------------------------------------------------------===//
+// floor builtins
+//===----------------------------------------------------------------------===//
+#ifdef __HLSL_ENABLE_16_BIT
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_floor)
+half floor(half);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_floor)
+half2 floor(half2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_floor)
+half3 floor(half3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_floor)
+half4 floor(half4);
+#endif
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_floor)
+float floor(float);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_floor)
+float2 floor(float2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_floor)
+float3 floor(float3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_floor)
+float4 floor(float4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_floor)
+double floor(double);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_floor)
+double2 floor(double2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_floor)
+double3 floor(double3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_floor)
+double4 floor(double4);
+
+//===----------------------------------------------------------------------===//
+// log builtins
+//===----------------------------------------------------------------------===//
+#ifdef __HLSL_ENABLE_16_BIT
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log)
+half log(half);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log)
+half2 log(half2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log)
+half3 log(half3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log)
+half4 log(half4);
+#endif
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log)
+float log(float);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log)
+float2 log(float2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log)
+float3 log(float3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log)
+float4 log(float4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log)
+double log(double);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log)
+double2 log(double2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log)
+double3 log(double3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log)
+double4 log(double4);
+
+//===----------------------------------------------------------------------===//
+// log10 builtins
+//===----------------------------------------------------------------------===//
+#ifdef __HLSL_ENABLE_16_BIT
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log10)
+half log10(half);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log10)
+half2 log10(half2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log10)
+half3 log10(half3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log10)
+half4 log10(half4);
+#endif
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log10)
+float log10(float);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log10)
+float2 log10(float2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log10)
+float3 log10(float3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log10)
+float4 log10(float4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log10)
+double log10(double);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log10)
+double2 log10(double2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log10)
+double3 log10(double3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log10)
+double4 log10(double4);
+
+//===----------------------------------------------------------------------===//
+// log2 builtins
+//===----------------------------------------------------------------------===//
+#ifdef __HLSL_ENABLE_16_BIT
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log2)
+half log2(half);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log2)
+half2 log2(half2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log2)
+half3 log2(half3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log2)
+half4 log2(half4);
+#endif
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log2)
+float log2(float);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log2)
+float2 log2(float2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log2)
+float3 log2(float3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log2)
+float4 log2(float4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log2)
+double log2(double);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log2)
+double2 log2(double2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log2)
+double3 log2(double3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_log2)
+double4 log2(double4);
+
+//===----------------------------------------------------------------------===//
+// max builtins
+//===----------------------------------------------------------------------===//
+#ifdef __HLSL_ENABLE_16_BIT
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
+half max(half, half);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
+half2 max(half2, half2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
+half3 max(half3, half3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
+half4 max(half4, half4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
+int16_t max(int16_t, int16_t);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
+int16_t2 max(int16_t2, int16_t2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
+int16_t3 max(int16_t3, int16_t3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
+int16_t4 max(int16_t4, int16_t4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
+uint16_t max(uint16_t, uint16_t);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
+uint16_t2 max(uint16_t2, uint16_t2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
+uint16_t3 max(uint16_t3, uint16_t3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
+uint16_t4 max(uint16_t4, uint16_t4);
+#endif
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
+int max(int, int);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
+int2 max(int2, int2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
+int3 max(int3, int3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
+int4 max(int4, int4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
+uint max(uint, uint);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
+uint2 max(uint2, uint2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
+uint3 max(uint3, uint3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
+uint4 max(uint4, uint4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
+int64_t max(int64_t, int64_t);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
+int64_t2 max(int64_t2, int64_t2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
+int64_t3 max(int64_t3, int64_t3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
+int64_t4 max(int64_t4, int64_t4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
+uint64_t max(uint64_t, uint64_t);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
+uint64_t2 max(uint64_t2, uint64_t2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
+uint64_t3 max(uint64_t3, uint64_t3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
+uint64_t4 max(uint64_t4, uint64_t4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
+float max(float, float);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
+float2 max(float2, float2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
+float3 max(float3, float3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
+float4 max(float4, float4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
+double max(double, double);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
+double2 max(double2, double2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
+double3 max(double3, double3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_max)
+double4 max(double4, double4);
+
+//===----------------------------------------------------------------------===//
+// min builtins
+//===----------------------------------------------------------------------===//
+#ifdef __HLSL_ENABLE_16_BIT
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
+half min(half, half);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
+half2 min(half2, half2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
+half3 min(half3, half3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
+half4 min(half4, half4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
+int16_t min(int16_t, int16_t);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
+int16_t2 min(int16_t2, int16_t2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
+int16_t3 min(int16_t3, int16_t3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
+int16_t4 min(int16_t4, int16_t4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
+uint16_t min(uint16_t, uint16_t);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
+uint16_t2 min(uint16_t2, uint16_t2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
+uint16_t3 min(uint16_t3, uint16_t3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
+uint16_t4 min(uint16_t4, uint16_t4);
+#endif
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
+int min(int, int);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
+int2 min(int2, int2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
+int3 min(int3, int3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
+int4 min(int4, int4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
+uint min(uint, uint);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
+uint2 min(uint2, uint2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
+uint3 min(uint3, uint3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
+uint4 min(uint4, uint4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
+float min(float, float);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
+float2 min(float2, float2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
+float3 min(float3, float3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
+float4 min(float4, float4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
+int64_t min(int64_t, int64_t);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
+int64_t2 min(int64_t2, int64_t2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
+int64_t3 min(int64_t3, int64_t3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
+int64_t4 min(int64_t4, int64_t4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
+uint64_t min(uint64_t, uint64_t);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
+uint64_t2 min(uint64_t2, uint64_t2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
+uint64_t3 min(uint64_t3, uint64_t3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
+uint64_t4 min(uint64_t4, uint64_t4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
+double min(double, double);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
+double2 min(double2, double2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
+double3 min(double3, double3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_min)
+double4 min(double4, double4);
+
+//===----------------------------------------------------------------------===//
+// pow builtins
+//===----------------------------------------------------------------------===//
+#ifdef __HLSL_ENABLE_16_BIT
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_pow)
+half pow(half, half);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_pow)
+half2 pow(half2, half2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_pow)
+half3 pow(half3, half3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_pow)
+half4 pow(half4, half4);
+#endif
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_pow)
+float pow(float, float);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_pow)
+float2 pow(float2, float2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_pow)
+float3 pow(float3, float3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_pow)
+float4 pow(float4, float4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_pow)
+double pow(double, double);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_pow)
+double2 pow(double2, double2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_pow)
+double3 pow(double3, double3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_pow)
+double4 pow(double4, double4);
+
+//===----------------------------------------------------------------------===//
+// reversebits builtins
+//===----------------------------------------------------------------------===//
+#ifdef __HLSL_ENABLE_16_BIT
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
+int16_t reversebits(int16_t);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
+int16_t2 reversebits(int16_t2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
+int16_t3 reversebits(int16_t3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
+int16_t4 reversebits(int16_t4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
+uint16_t reversebits(uint16_t);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
+uint16_t2 reversebits(uint16_t2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
+uint16_t3 reversebits(uint16_t3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
+uint16_t4 reversebits(uint16_t4);
+#endif
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
+int reversebits(int);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
+int2 reversebits(int2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
+int3 reversebits(int3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
+int4 reversebits(int4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
+uint reversebits(uint);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
+uint2 reversebits(uint2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
+uint3 reversebits(uint3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
+uint4 reversebits(uint4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
+int64_t reversebits(int64_t);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
+int64_t2 reversebits(int64_t2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
+int64_t3 reversebits(int64_t3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
+int64_t4 reversebits(int64_t4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
+uint64_t reversebits(uint64_t);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
+uint64_t2 reversebits(uint64_t2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
+uint64_t3 reversebits(uint64_t3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_bitreverse)
+uint64_t4 reversebits(uint64_t4);
+
+//===----------------------------------------------------------------------===//
+// sin builtins
+//===----------------------------------------------------------------------===//
+#ifdef __HLSL_ENABLE_16_BIT
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sin)
+half sin(half);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sin)
+half2 sin(half2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sin)
+half3 sin(half3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sin)
+half4 sin(half4);
+#endif
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sin)
+float sin(float);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sin)
+float2 sin(float2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sin)
+float3 sin(float3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sin)
+float4 sin(float4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sin)
+double sin(double);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sin)
+double2 sin(double2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sin)
+double3 sin(double3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_sin)
+double4 sin(double4);
+
+//===----------------------------------------------------------------------===//
+// sqrt builtins
+//===----------------------------------------------------------------------===//
+#ifdef __HLSL_ENABLE_16_BIT
+_HLSL_BUILTIN_ALIAS(__builtin_sqrtf16)
+half sqrt(half In);
+#endif
+
+_HLSL_BUILTIN_ALIAS(__builtin_sqrtf)
+float sqrt(float In);
+
+_HLSL_BUILTIN_ALIAS(__builtin_sqrt)
+double sqrt(double In);
+
+//===----------------------------------------------------------------------===//
+// trunc builtins
+//===----------------------------------------------------------------------===//
+#ifdef __HLSL_ENABLE_16_BIT
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_trunc)
+half trunc(half);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_trunc)
+half2 trunc(half2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_trunc)
+half3 trunc(half3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_trunc)
+half4 trunc(half4);
+#endif
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_trunc)
+float trunc(float);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_trunc)
+float2 trunc(float2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_trunc)
+float3 trunc(float3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_trunc)
+float4 trunc(float4);
+
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_trunc)
+double trunc(double);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_trunc)
+double2 trunc(double2);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_trunc)
+double3 trunc(double3);
+_HLSL_BUILTIN_ALIAS(__builtin_elementwise_trunc)
+double4 trunc(double4);
+
+//===----------------------------------------------------------------------===//
+// Wave* builtins
+//===----------------------------------------------------------------------===//
+_HLSL_AVAILABILITY(shadermodel, 6.0)
+_HLSL_BUILTIN_ALIAS(__builtin_hlsl_wave_active_count_bits)
+uint WaveActiveCountBits(bool bBit);
+
+} // namespace hlsl
+#endif //_HLSL_HLSL_INTRINSICS_H_
diff --git a/contrib/llvm-project/clang/lib/Headers/hresetintrin.h b/contrib/llvm-project/clang/lib/Headers/hresetintrin.h
index 13e31a2e03ad..646f6c130961 100644
--- a/contrib/llvm-project/clang/lib/Headers/hresetintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/hresetintrin.h
@@ -25,7 +25,7 @@
///
/// This intrinsic corresponds to the <c> HRESET </c> instruction.
///
-/// \operation
+/// \code{.operation}
/// IF __eax == 0
/// // nop
/// ELSE
@@ -35,7 +35,7 @@
/// FI
/// ENDFOR
/// FI
-/// \endoperation
+/// \endcode
static __inline void __DEFAULT_FN_ATTRS
_hreset(int __eax)
{
diff --git a/contrib/llvm-project/clang/lib/Headers/hvx_hexagon_protos.h b/contrib/llvm-project/clang/lib/Headers/hvx_hexagon_protos.h
index 41ce7a6b93e9..7e3679a38b2c 100644
--- a/contrib/llvm-project/clang/lib/Headers/hvx_hexagon_protos.h
+++ b/contrib/llvm-project/clang/lib/Headers/hvx_hexagon_protos.h
@@ -9,7 +9,6 @@
//===----------------------------------------------------------------------===//
-
#ifndef _HVX_HEXAGON_PROTOS_H_
#define _HVX_HEXAGON_PROTOS_H_ 1
@@ -28,7 +27,7 @@
Execution Slots: SLOT0
========================================================================== */
-#define Q6_R_vextract_VR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_extractw)
+#define Q6_R_vextract_VR(Vu,Rs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_extractw)(Vu,Rs)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -39,7 +38,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_V_hi_W __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_hi)
+#define Q6_V_hi_W(Vss) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_hi)(Vss)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -50,7 +49,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_V_lo_W __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lo)
+#define Q6_V_lo_W(Vss) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lo)(Vss)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -61,7 +60,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_V_vsplat_R __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplatw)
+#define Q6_V_vsplat_R(Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplatw)(Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -72,7 +71,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_and_QQ __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_and)
+#define Q6_Q_and_QQ(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -83,7 +82,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_and_QQn __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_and_n)
+#define Q6_Q_and_QQn(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_and_n)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -94,7 +93,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_not_Q __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_not)
+#define Q6_Q_not_Q(Qs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_not)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1))),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -105,7 +104,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_or_QQ __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_or)
+#define Q6_Q_or_QQ(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -116,7 +115,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_or_QQn __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_or_n)
+#define Q6_Q_or_QQn(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_or_n)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -127,7 +126,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vsetq_R __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_scalar2)
+#define Q6_Q_vsetq_R(Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_scalar2)(Rt)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -138,7 +137,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_xor_QQ __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_xor)
+#define Q6_Q_xor_QQ(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -149,7 +148,7 @@
Execution Slots: SLOT0
========================================================================== */
-#define Q6_vmem_QnRIV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nqpred_ai)
+#define Q6_vmem_QnRIV(Qv,Rt,Vs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nqpred_ai)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Rt,Vs)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -160,7 +159,7 @@
Execution Slots: SLOT0
========================================================================== */
-#define Q6_vmem_QnRIV_nt __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nt_nqpred_ai)
+#define Q6_vmem_QnRIV_nt(Qv,Rt,Vs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nt_nqpred_ai)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Rt,Vs)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -171,7 +170,7 @@
Execution Slots: SLOT0
========================================================================== */
-#define Q6_vmem_QRIV_nt __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nt_qpred_ai)
+#define Q6_vmem_QRIV_nt(Qv,Rt,Vs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nt_qpred_ai)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Rt,Vs)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -182,7 +181,7 @@
Execution Slots: SLOT0
========================================================================== */
-#define Q6_vmem_QRIV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_qpred_ai)
+#define Q6_vmem_QRIV(Qv,Rt,Vs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_qpred_ai)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Rt,Vs)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -193,7 +192,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vuh_vabsdiff_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffh)
+#define Q6_Vuh_vabsdiff_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -204,7 +203,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vub_vabsdiff_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffub)
+#define Q6_Vub_vabsdiff_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffub)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -215,7 +214,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vuh_vabsdiff_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffuh)
+#define Q6_Vuh_vabsdiff_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffuh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -226,7 +225,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vuw_vabsdiff_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffw)
+#define Q6_Vuw_vabsdiff_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffw)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -237,7 +236,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vabs_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsh)
+#define Q6_Vh_vabs_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsh)(Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -248,7 +247,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vabs_Vh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsh_sat)
+#define Q6_Vh_vabs_Vh_sat(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsh_sat)(Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -259,7 +258,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_vabs_Vw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsw)
+#define Q6_Vw_vabs_Vw(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsw)(Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -270,7 +269,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_vabs_Vw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsw_sat)
+#define Q6_Vw_vabs_Vw_sat(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsw_sat)(Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -281,7 +280,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vadd_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddb)
+#define Q6_Vb_vadd_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddb)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -292,7 +291,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wb_vadd_WbWb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddb_dv)
+#define Q6_Wb_vadd_WbWb(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddb_dv)(Vuu,Vvv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -303,7 +302,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_condacc_QnVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbnq)
+#define Q6_Vb_condacc_QnVbVb(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -314,7 +313,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_condacc_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbq)
+#define Q6_Vb_condacc_QVbVb(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -325,7 +324,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vadd_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddh)
+#define Q6_Vh_vadd_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -336,7 +335,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wh_vadd_WhWh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddh_dv)
+#define Q6_Wh_vadd_WhWh(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddh_dv)(Vuu,Vvv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -347,7 +346,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_condacc_QnVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhnq)
+#define Q6_Vh_condacc_QnVhVh(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -358,7 +357,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_condacc_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhq)
+#define Q6_Vh_condacc_QVhVh(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -369,7 +368,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vadd_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhsat)
+#define Q6_Vh_vadd_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhsat)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -380,7 +379,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wh_vadd_WhWh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhsat_dv)
+#define Q6_Wh_vadd_WhWh_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhsat_dv)(Vuu,Vvv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -391,7 +390,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_vadd_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhw)
+#define Q6_Ww_vadd_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhw)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -402,7 +401,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wh_vadd_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubh)
+#define Q6_Wh_vadd_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -413,7 +412,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vub_vadd_VubVub_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubsat)
+#define Q6_Vub_vadd_VubVub_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubsat)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -424,7 +423,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wub_vadd_WubWub_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubsat_dv)
+#define Q6_Wub_vadd_WubWub_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubsat_dv)(Vuu,Vvv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -435,7 +434,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vuh_vadd_VuhVuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhsat)
+#define Q6_Vuh_vadd_VuhVuh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhsat)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -446,7 +445,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wuh_vadd_WuhWuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhsat_dv)
+#define Q6_Wuh_vadd_WuhWuh_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhsat_dv)(Vuu,Vvv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -457,7 +456,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_vadd_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhw)
+#define Q6_Ww_vadd_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhw)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -468,7 +467,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_vadd_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddw)
+#define Q6_Vw_vadd_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddw)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -479,7 +478,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Ww_vadd_WwWw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddw_dv)
+#define Q6_Ww_vadd_WwWw(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddw_dv)(Vuu,Vvv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -490,7 +489,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_condacc_QnVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwnq)
+#define Q6_Vw_condacc_QnVwVw(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -501,7 +500,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_condacc_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwq)
+#define Q6_Vw_condacc_QVwVw(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -512,7 +511,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_vadd_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwsat)
+#define Q6_Vw_vadd_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwsat)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -523,7 +522,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Ww_vadd_WwWw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwsat_dv)
+#define Q6_Ww_vadd_WwWw_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwsat_dv)(Vuu,Vvv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -534,7 +533,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_V_valign_VVR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_valignb)
+#define Q6_V_valign_VVR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_valignb)(Vu,Vv,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -545,7 +544,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_V_valign_VVI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_valignbi)
+#define Q6_V_valign_VVI(Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_valignbi)(Vu,Vv,Iu3)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -556,7 +555,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_V_vand_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vand)
+#define Q6_V_vand_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vand)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -567,7 +566,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_V_vand_QR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)
+#define Q6_V_vand_QR(Qu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qu),-1),Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -578,7 +577,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_V_vandor_VQR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt_acc)
+#define Q6_V_vandor_VQR(Vx,Qu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt_acc)(Vx,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qu),-1),Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -589,7 +588,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Q_vand_VR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)
+#define Q6_Q_vand_VR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)(Vu,Rt)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -600,7 +599,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Q_vandor_QVR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt_acc)
+#define Q6_Q_vandor_QVR(Qx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt_acc)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Rt)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -611,7 +610,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vasl_VhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslh)
+#define Q6_Vh_vasl_VhR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslh)(Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -622,7 +621,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vasl_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslhv)
+#define Q6_Vh_vasl_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslhv)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -633,7 +632,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_vasl_VwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslw)
+#define Q6_Vw_vasl_VwR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslw)(Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -644,7 +643,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_vaslacc_VwVwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslw_acc)
+#define Q6_Vw_vaslacc_VwVwR(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslw_acc)(Vx,Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -655,7 +654,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_vasl_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslwv)
+#define Q6_Vw_vasl_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslwv)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -666,7 +665,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vasr_VhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrh)
+#define Q6_Vh_vasr_VhR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrh)(Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -677,7 +676,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vasr_VhVhR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhbrndsat)
+#define Q6_Vb_vasr_VhVhR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhbrndsat)(Vu,Vv,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -688,7 +687,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vub_vasr_VhVhR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhubrndsat)
+#define Q6_Vub_vasr_VhVhR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhubrndsat)(Vu,Vv,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -699,7 +698,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vub_vasr_VhVhR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhubsat)
+#define Q6_Vub_vasr_VhVhR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhubsat)(Vu,Vv,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -710,7 +709,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vasr_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhv)
+#define Q6_Vh_vasr_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhv)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -721,7 +720,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_vasr_VwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrw)
+#define Q6_Vw_vasr_VwR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrw)(Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -732,7 +731,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_vasracc_VwVwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrw_acc)
+#define Q6_Vw_vasracc_VwVwR(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrw_acc)(Vx,Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -743,7 +742,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vasr_VwVwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwh)
+#define Q6_Vh_vasr_VwVwR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwh)(Vu,Vv,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -754,7 +753,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vasr_VwVwR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwhrndsat)
+#define Q6_Vh_vasr_VwVwR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwhrndsat)(Vu,Vv,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -765,7 +764,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vasr_VwVwR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwhsat)
+#define Q6_Vh_vasr_VwVwR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwhsat)(Vu,Vv,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -776,7 +775,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vuh_vasr_VwVwR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwuhsat)
+#define Q6_Vuh_vasr_VwVwR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwuhsat)(Vu,Vv,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -787,7 +786,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_vasr_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwv)
+#define Q6_Vw_vasr_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwv)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -798,7 +797,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_V_equals_V __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vassign)
+#define Q6_V_equals_V(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vassign)(Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -809,7 +808,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_W_equals_W __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vassignp)
+#define Q6_W_equals_W(Vuu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vassignp)(Vuu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -820,7 +819,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vavg_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgh)
+#define Q6_Vh_vavg_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -831,7 +830,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vavg_VhVh_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavghrnd)
+#define Q6_Vh_vavg_VhVh_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavghrnd)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -842,7 +841,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vub_vavg_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgub)
+#define Q6_Vub_vavg_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgub)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -853,7 +852,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vub_vavg_VubVub_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgubrnd)
+#define Q6_Vub_vavg_VubVub_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgubrnd)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -864,7 +863,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vuh_vavg_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguh)
+#define Q6_Vuh_vavg_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -875,7 +874,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vuh_vavg_VuhVuh_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguhrnd)
+#define Q6_Vuh_vavg_VuhVuh_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguhrnd)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -886,7 +885,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_vavg_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgw)
+#define Q6_Vw_vavg_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgw)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -897,7 +896,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_vavg_VwVw_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgwrnd)
+#define Q6_Vw_vavg_VwVw_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgwrnd)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -908,7 +907,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vuh_vcl0_Vuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcl0h)
+#define Q6_Vuh_vcl0_Vuh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcl0h)(Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -919,7 +918,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vuw_vcl0_Vuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcl0w)
+#define Q6_Vuw_vcl0_Vuw(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcl0w)(Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -930,7 +929,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_W_vcombine_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcombine)
+#define Q6_W_vcombine_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcombine)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -941,7 +940,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_V_vzero __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vd0)
+#define Q6_V_vzero() __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vd0)()
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -952,7 +951,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vdeal_Vb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealb)
+#define Q6_Vb_vdeal_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealb)(Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -963,7 +962,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vdeale_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealb4w)
+#define Q6_Vb_vdeale_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealb4w)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -974,7 +973,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vdeal_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealh)
+#define Q6_Vh_vdeal_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealh)(Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -985,7 +984,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_W_vdeal_VVR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealvdd)
+#define Q6_W_vdeal_VVR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealvdd)(Vu,Vv,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -996,7 +995,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_V_vdelta_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdelta)
+#define Q6_V_vdelta_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdelta)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1007,7 +1006,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vh_vdmpy_VubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus)
+#define Q6_Vh_vdmpy_VubRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus)(Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1018,7 +1017,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vh_vdmpyacc_VhVubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_acc)
+#define Q6_Vh_vdmpyacc_VhVubRb(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_acc)(Vx,Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1029,7 +1028,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wh_vdmpy_WubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_dv)
+#define Q6_Wh_vdmpy_WubRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_dv)(Vuu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1040,7 +1039,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wh_vdmpyacc_WhWubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_dv_acc)
+#define Q6_Wh_vdmpyacc_WhWubRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_dv_acc)(Vxx,Vuu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1051,7 +1050,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vdmpy_VhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb)
+#define Q6_Vw_vdmpy_VhRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb)(Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1062,7 +1061,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vdmpyacc_VwVhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_acc)
+#define Q6_Vw_vdmpyacc_VwVhRb(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_acc)(Vx,Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1073,7 +1072,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_vdmpy_WhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_dv)
+#define Q6_Ww_vdmpy_WhRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_dv)(Vuu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1084,7 +1083,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_vdmpyacc_WwWhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_dv_acc)
+#define Q6_Ww_vdmpyacc_WwWhRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_dv_acc)(Vxx,Vuu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1095,7 +1094,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vdmpy_WhRh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhisat)
+#define Q6_Vw_vdmpy_WhRh_sat(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhisat)(Vuu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1106,29 +1105,29 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vdmpyacc_VwWhRh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhisat_acc)
+#define Q6_Vw_vdmpyacc_VwWhRh_sat(Vx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhisat_acc)(Vx,Vuu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
/* ==========================================================================
Assembly Syntax: Vd32.w=vdmpy(Vu32.h,Rt32.h):sat
C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_VhRh_sat(HVX_Vector Vu, Word32 Rt)
- Instruction Type: CVI_VX_DV
+ Instruction Type: CVI_VX
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vdmpy_VhRh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsat)
+#define Q6_Vw_vdmpy_VhRh_sat(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsat)(Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
/* ==========================================================================
Assembly Syntax: Vx32.w+=vdmpy(Vu32.h,Rt32.h):sat
C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwVhRh_sat(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
- Instruction Type: CVI_VX_DV
+ Instruction Type: CVI_VX
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vdmpyacc_VwVhRh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsat_acc)
+#define Q6_Vw_vdmpyacc_VwVhRh_sat(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsat_acc)(Vx,Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1139,7 +1138,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vdmpy_WhRuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsuisat)
+#define Q6_Vw_vdmpy_WhRuh_sat(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsuisat)(Vuu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1150,40 +1149,40 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vdmpyacc_VwWhRuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsuisat_acc)
+#define Q6_Vw_vdmpyacc_VwWhRuh_sat(Vx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsuisat_acc)(Vx,Vuu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
/* ==========================================================================
Assembly Syntax: Vd32.w=vdmpy(Vu32.h,Rt32.uh):sat
C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_VhRuh_sat(HVX_Vector Vu, Word32 Rt)
- Instruction Type: CVI_VX_DV
+ Instruction Type: CVI_VX
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vdmpy_VhRuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsusat)
+#define Q6_Vw_vdmpy_VhRuh_sat(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsusat)(Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
/* ==========================================================================
Assembly Syntax: Vx32.w+=vdmpy(Vu32.h,Rt32.uh):sat
C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwVhRuh_sat(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
- Instruction Type: CVI_VX_DV
+ Instruction Type: CVI_VX
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vdmpyacc_VwVhRuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsusat_acc)
+#define Q6_Vw_vdmpyacc_VwVhRuh_sat(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsusat_acc)(Vx,Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
/* ==========================================================================
Assembly Syntax: Vd32.w=vdmpy(Vu32.h,Vv32.h):sat
C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv)
- Instruction Type: CVI_VX_DV
+ Instruction Type: CVI_VX
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vdmpy_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhvsat)
+#define Q6_Vw_vdmpy_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhvsat)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1194,7 +1193,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vdmpyacc_VwVhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhvsat_acc)
+#define Q6_Vw_vdmpyacc_VwVhVh_sat(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhvsat_acc)(Vx,Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1205,7 +1204,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wuw_vdsad_WuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdsaduh)
+#define Q6_Wuw_vdsad_WuhRuh(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdsaduh)(Vuu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1216,7 +1215,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wuw_vdsadacc_WuwWuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdsaduh_acc)
+#define Q6_Wuw_vdsadacc_WuwWuhRuh(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdsaduh_acc)(Vxx,Vuu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1227,7 +1226,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_eq_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb)
+#define Q6_Q_vcmp_eq_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb)(Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1238,7 +1237,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_eqand_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_and)
+#define Q6_Q_vcmp_eqand_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1249,7 +1248,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_eqor_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_or)
+#define Q6_Q_vcmp_eqor_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1260,7 +1259,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_eqxacc_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_xor)
+#define Q6_Q_vcmp_eqxacc_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1271,7 +1270,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_eq_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh)
+#define Q6_Q_vcmp_eq_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh)(Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1282,7 +1281,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_eqand_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_and)
+#define Q6_Q_vcmp_eqand_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1293,7 +1292,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_eqor_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_or)
+#define Q6_Q_vcmp_eqor_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1304,7 +1303,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_eqxacc_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_xor)
+#define Q6_Q_vcmp_eqxacc_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1315,7 +1314,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_eq_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw)
+#define Q6_Q_vcmp_eq_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw)(Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1326,7 +1325,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_eqand_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_and)
+#define Q6_Q_vcmp_eqand_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1337,7 +1336,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_eqor_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_or)
+#define Q6_Q_vcmp_eqor_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1348,7 +1347,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_eqxacc_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_xor)
+#define Q6_Q_vcmp_eqxacc_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1359,7 +1358,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_gt_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb)
+#define Q6_Q_vcmp_gt_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb)(Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1370,7 +1369,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_gtand_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_and)
+#define Q6_Q_vcmp_gtand_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1381,7 +1380,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_gtor_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_or)
+#define Q6_Q_vcmp_gtor_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1392,7 +1391,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_gtxacc_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_xor)
+#define Q6_Q_vcmp_gtxacc_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1403,7 +1402,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_gt_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth)
+#define Q6_Q_vcmp_gt_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth)(Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1414,7 +1413,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_gtand_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_and)
+#define Q6_Q_vcmp_gtand_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1425,7 +1424,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_gtor_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_or)
+#define Q6_Q_vcmp_gtor_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1436,7 +1435,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_gtxacc_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_xor)
+#define Q6_Q_vcmp_gtxacc_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1447,7 +1446,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_gt_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub)
+#define Q6_Q_vcmp_gt_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub)(Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1458,7 +1457,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_gtand_QVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_and)
+#define Q6_Q_vcmp_gtand_QVubVub(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1469,7 +1468,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_gtor_QVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_or)
+#define Q6_Q_vcmp_gtor_QVubVub(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1480,7 +1479,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_gtxacc_QVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_xor)
+#define Q6_Q_vcmp_gtxacc_QVubVub(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1491,7 +1490,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_gt_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh)
+#define Q6_Q_vcmp_gt_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh)(Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1502,7 +1501,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_gtand_QVuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_and)
+#define Q6_Q_vcmp_gtand_QVuhVuh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1513,7 +1512,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_gtor_QVuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_or)
+#define Q6_Q_vcmp_gtor_QVuhVuh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1524,7 +1523,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_gtxacc_QVuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_xor)
+#define Q6_Q_vcmp_gtxacc_QVuhVuh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1535,7 +1534,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_gt_VuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw)
+#define Q6_Q_vcmp_gt_VuwVuw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw)(Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1546,7 +1545,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_gtand_QVuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_and)
+#define Q6_Q_vcmp_gtand_QVuwVuw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1557,7 +1556,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_gtor_QVuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_or)
+#define Q6_Q_vcmp_gtor_QVuwVuw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1568,7 +1567,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_gtxacc_QVuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_xor)
+#define Q6_Q_vcmp_gtxacc_QVuwVuw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1579,7 +1578,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_gt_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw)
+#define Q6_Q_vcmp_gt_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw)(Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1590,7 +1589,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_gtand_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_and)
+#define Q6_Q_vcmp_gtand_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1601,7 +1600,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_gtor_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_or)
+#define Q6_Q_vcmp_gtor_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1612,7 +1611,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vcmp_gtxacc_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_xor)
+#define Q6_Q_vcmp_gtxacc_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1623,7 +1622,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vinsert_VwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vinsertwr)
+#define Q6_Vw_vinsert_VwR(Vx,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vinsertwr)(Vx,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1634,7 +1633,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_V_vlalign_VVR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlalignb)
+#define Q6_V_vlalign_VVR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlalignb)(Vu,Vv,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1645,7 +1644,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_V_vlalign_VVI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlalignbi)
+#define Q6_V_vlalign_VVI(Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlalignbi)(Vu,Vv,Iu3)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1656,7 +1655,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vuh_vlsr_VuhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrh)
+#define Q6_Vuh_vlsr_VuhR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrh)(Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1667,7 +1666,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vlsr_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrhv)
+#define Q6_Vh_vlsr_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrhv)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1678,7 +1677,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vuw_vlsr_VuwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrw)
+#define Q6_Vuw_vlsr_VuwR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrw)(Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1689,7 +1688,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_vlsr_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrwv)
+#define Q6_Vw_vlsr_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrwv)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1700,7 +1699,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vlut32_VbVbR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb)
+#define Q6_Vb_vlut32_VbVbR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb)(Vu,Vv,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1711,7 +1710,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vlut32or_VbVbVbR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_oracc)
+#define Q6_Vb_vlut32or_VbVbVbR(Vx,Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_oracc)(Vx,Vu,Vv,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1722,7 +1721,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wh_vlut16_VbVhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh)
+#define Q6_Wh_vlut16_VbVhR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh)(Vu,Vv,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1733,7 +1732,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wh_vlut16or_WhVbVhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_oracc)
+#define Q6_Wh_vlut16or_WhVbVhR(Vxx,Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_oracc)(Vxx,Vu,Vv,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1744,7 +1743,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vmax_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxh)
+#define Q6_Vh_vmax_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1755,7 +1754,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vub_vmax_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxub)
+#define Q6_Vub_vmax_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxub)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1766,7 +1765,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vuh_vmax_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxuh)
+#define Q6_Vuh_vmax_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxuh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1777,7 +1776,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_vmax_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxw)
+#define Q6_Vw_vmax_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxw)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1788,7 +1787,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vmin_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminh)
+#define Q6_Vh_vmin_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1799,7 +1798,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vub_vmin_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminub)
+#define Q6_Vub_vmin_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminub)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1810,7 +1809,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vuh_vmin_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminuh)
+#define Q6_Vuh_vmin_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminuh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1821,7 +1820,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_vmin_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminw)
+#define Q6_Vw_vmin_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminw)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1832,7 +1831,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wh_vmpa_WubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabus)
+#define Q6_Wh_vmpa_WubRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabus)(Vuu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1843,7 +1842,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wh_vmpaacc_WhWubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabus_acc)
+#define Q6_Wh_vmpaacc_WhWubRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabus_acc)(Vxx,Vuu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1854,7 +1853,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wh_vmpa_WubWb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabusv)
+#define Q6_Wh_vmpa_WubWb(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabusv)(Vuu,Vvv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1865,7 +1864,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wh_vmpa_WubWub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuuv)
+#define Q6_Wh_vmpa_WubWub(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuuv)(Vuu,Vvv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1876,7 +1875,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_vmpa_WhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahb)
+#define Q6_Ww_vmpa_WhRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahb)(Vuu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1887,7 +1886,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_vmpaacc_WwWhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahb_acc)
+#define Q6_Ww_vmpaacc_WwWhRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahb_acc)(Vxx,Vuu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1898,7 +1897,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wh_vmpy_VubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybus)
+#define Q6_Wh_vmpy_VubRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybus)(Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1909,7 +1908,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wh_vmpyacc_WhVubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybus_acc)
+#define Q6_Wh_vmpyacc_WhVubRb(Vxx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybus_acc)(Vxx,Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1920,7 +1919,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wh_vmpy_VubVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybusv)
+#define Q6_Wh_vmpy_VubVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybusv)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1931,7 +1930,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wh_vmpyacc_WhVubVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybusv_acc)
+#define Q6_Wh_vmpyacc_WhVubVb(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybusv_acc)(Vxx,Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1942,7 +1941,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wh_vmpy_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybv)
+#define Q6_Wh_vmpy_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybv)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1953,7 +1952,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wh_vmpyacc_WhVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybv_acc)
+#define Q6_Wh_vmpyacc_WhVbVb(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybv_acc)(Vxx,Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1964,7 +1963,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vmpye_VwVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyewuh)
+#define Q6_Vw_vmpye_VwVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyewuh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1975,7 +1974,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_vmpy_VhRh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyh)
+#define Q6_Ww_vmpy_VhRh(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyh)(Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -1986,29 +1985,29 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_vmpyacc_WwVhRh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhsat_acc)
+#define Q6_Ww_vmpyacc_WwVhRh_sat(Vxx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhsat_acc)(Vxx,Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
/* ==========================================================================
Assembly Syntax: Vd32.h=vmpy(Vu32.h,Rt32.h):<<1:rnd:sat
C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpy_VhRh_s1_rnd_sat(HVX_Vector Vu, Word32 Rt)
- Instruction Type: CVI_VX_DV
+ Instruction Type: CVI_VX
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vh_vmpy_VhRh_s1_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhsrs)
+#define Q6_Vh_vmpy_VhRh_s1_rnd_sat(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhsrs)(Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
/* ==========================================================================
Assembly Syntax: Vd32.h=vmpy(Vu32.h,Rt32.h):<<1:sat
C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpy_VhRh_s1_sat(HVX_Vector Vu, Word32 Rt)
- Instruction Type: CVI_VX_DV
+ Instruction Type: CVI_VX
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vh_vmpy_VhRh_s1_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhss)
+#define Q6_Vh_vmpy_VhRh_s1_sat(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhss)(Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2019,7 +2018,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_vmpy_VhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhus)
+#define Q6_Ww_vmpy_VhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhus)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2030,7 +2029,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_vmpyacc_WwVhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhus_acc)
+#define Q6_Ww_vmpyacc_WwVhVuh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhus_acc)(Vxx,Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2041,7 +2040,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_vmpy_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhv)
+#define Q6_Ww_vmpy_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhv)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2052,18 +2051,18 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_vmpyacc_WwVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhv_acc)
+#define Q6_Ww_vmpyacc_WwVhVh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhv_acc)(Vxx,Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
/* ==========================================================================
Assembly Syntax: Vd32.h=vmpy(Vu32.h,Vv32.h):<<1:rnd:sat
C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpy_VhVh_s1_rnd_sat(HVX_Vector Vu, HVX_Vector Vv)
- Instruction Type: CVI_VX_DV
+ Instruction Type: CVI_VX
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vh_vmpy_VhVh_s1_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhvsrs)
+#define Q6_Vh_vmpy_VhVh_s1_rnd_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhvsrs)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2074,7 +2073,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vmpyieo_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyieoh)
+#define Q6_Vw_vmpyieo_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyieoh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2085,7 +2084,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vmpyieacc_VwVwVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewh_acc)
+#define Q6_Vw_vmpyieacc_VwVwVh(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewh_acc)(Vx,Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2096,7 +2095,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vmpyie_VwVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewuh)
+#define Q6_Vw_vmpyie_VwVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewuh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2107,7 +2106,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vmpyieacc_VwVwVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewuh_acc)
+#define Q6_Vw_vmpyieacc_VwVwVuh(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewuh_acc)(Vx,Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2118,7 +2117,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vh_vmpyi_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyih)
+#define Q6_Vh_vmpyi_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyih)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2129,7 +2128,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vh_vmpyiacc_VhVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyih_acc)
+#define Q6_Vh_vmpyiacc_VhVhVh(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyih_acc)(Vx,Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2140,7 +2139,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vh_vmpyi_VhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyihb)
+#define Q6_Vh_vmpyi_VhRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyihb)(Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2151,7 +2150,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vh_vmpyiacc_VhVhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyihb_acc)
+#define Q6_Vh_vmpyiacc_VhVhRb(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyihb_acc)(Vx,Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2162,7 +2161,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vmpyio_VwVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiowh)
+#define Q6_Vw_vmpyio_VwVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiowh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2173,7 +2172,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vmpyi_VwRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwb)
+#define Q6_Vw_vmpyi_VwRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwb)(Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2184,7 +2183,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vmpyiacc_VwVwRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwb_acc)
+#define Q6_Vw_vmpyiacc_VwVwRb(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwb_acc)(Vx,Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2195,7 +2194,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vmpyi_VwRh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwh)
+#define Q6_Vw_vmpyi_VwRh(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwh)(Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2206,7 +2205,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vmpyiacc_VwVwRh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwh_acc)
+#define Q6_Vw_vmpyiacc_VwVwRh(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwh_acc)(Vx,Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2217,7 +2216,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vmpyo_VwVh_s1_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh)
+#define Q6_Vw_vmpyo_VwVh_s1_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2228,7 +2227,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vmpyo_VwVh_s1_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_rnd)
+#define Q6_Vw_vmpyo_VwVh_s1_rnd_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_rnd)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2239,7 +2238,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vmpyoacc_VwVwVh_s1_rnd_sat_shift __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_rnd_sacc)
+#define Q6_Vw_vmpyoacc_VwVwVh_s1_rnd_sat_shift(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_rnd_sacc)(Vx,Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2250,7 +2249,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vmpyoacc_VwVwVh_s1_sat_shift __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_sacc)
+#define Q6_Vw_vmpyoacc_VwVwVh_s1_sat_shift(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_sacc)(Vx,Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2261,7 +2260,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wuh_vmpy_VubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyub)
+#define Q6_Wuh_vmpy_VubRub(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyub)(Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2272,7 +2271,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wuh_vmpyacc_WuhVubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyub_acc)
+#define Q6_Wuh_vmpyacc_WuhVubRub(Vxx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyub_acc)(Vxx,Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2283,7 +2282,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wuh_vmpy_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyubv)
+#define Q6_Wuh_vmpy_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyubv)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2294,7 +2293,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wuh_vmpyacc_WuhVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyubv_acc)
+#define Q6_Wuh_vmpyacc_WuhVubVub(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyubv_acc)(Vxx,Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2305,7 +2304,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wuw_vmpy_VuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuh)
+#define Q6_Wuw_vmpy_VuhRuh(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuh)(Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2316,7 +2315,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wuw_vmpyacc_WuwVuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuh_acc)
+#define Q6_Wuw_vmpyacc_WuwVuhRuh(Vxx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuh_acc)(Vxx,Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2327,7 +2326,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wuw_vmpy_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhv)
+#define Q6_Wuw_vmpy_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhv)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2338,7 +2337,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wuw_vmpyacc_WuwVuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhv_acc)
+#define Q6_Wuw_vmpyacc_WuwVuhVuh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhv_acc)(Vxx,Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2349,7 +2348,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_V_vmux_QVV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmux)
+#define Q6_V_vmux_QVV(Qt,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmux)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1),Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2360,7 +2359,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vnavg_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgh)
+#define Q6_Vh_vnavg_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2371,7 +2370,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vnavg_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgub)
+#define Q6_Vb_vnavg_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgub)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2382,7 +2381,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_vnavg_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgw)
+#define Q6_Vw_vnavg_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgw)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2393,7 +2392,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vnormamt_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnormamth)
+#define Q6_Vh_vnormamt_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnormamth)(Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2404,7 +2403,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_vnormamt_Vw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnormamtw)
+#define Q6_Vw_vnormamt_Vw(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnormamtw)(Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2415,7 +2414,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_V_vnot_V __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnot)
+#define Q6_V_vnot_V(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnot)(Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2426,7 +2425,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_V_vor_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vor)
+#define Q6_V_vor_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vor)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2437,7 +2436,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vpacke_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackeb)
+#define Q6_Vb_vpacke_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackeb)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2448,7 +2447,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vpacke_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackeh)
+#define Q6_Vh_vpacke_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackeh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2459,7 +2458,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vpack_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackhb_sat)
+#define Q6_Vb_vpack_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackhb_sat)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2470,7 +2469,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vub_vpack_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackhub_sat)
+#define Q6_Vub_vpack_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackhub_sat)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2481,7 +2480,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vpacko_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackob)
+#define Q6_Vb_vpacko_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackob)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2492,7 +2491,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vpacko_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackoh)
+#define Q6_Vh_vpacko_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackoh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2503,7 +2502,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vpack_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackwh_sat)
+#define Q6_Vh_vpack_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackwh_sat)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2514,7 +2513,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vuh_vpack_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackwuh_sat)
+#define Q6_Vuh_vpack_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackwuh_sat)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2525,7 +2524,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vpopcount_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpopcounth)
+#define Q6_Vh_vpopcount_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpopcounth)(Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2536,7 +2535,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_V_vrdelta_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrdelta)
+#define Q6_V_vrdelta_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrdelta)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2547,7 +2546,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vrmpy_VubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybus)
+#define Q6_Vw_vrmpy_VubRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybus)(Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2558,7 +2557,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vrmpyacc_VwVubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybus_acc)
+#define Q6_Vw_vrmpyacc_VwVubRb(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybus_acc)(Vx,Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2569,7 +2568,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_vrmpy_WubRbI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusi)
+#define Q6_Ww_vrmpy_WubRbI(Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusi)(Vuu,Rt,Iu1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2580,7 +2579,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_vrmpyacc_WwWubRbI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusi_acc)
+#define Q6_Ww_vrmpyacc_WwWubRbI(Vxx,Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusi_acc)(Vxx,Vuu,Rt,Iu1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2591,18 +2590,18 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vrmpy_VubVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusv)
+#define Q6_Vw_vrmpy_VubVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusv)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
/* ==========================================================================
Assembly Syntax: Vx32.w+=vrmpy(Vu32.ub,Vv32.b)
C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpyacc_VwVubVb(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
- Instruction Type: CVI_VX_DV
+ Instruction Type: CVI_VX
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vrmpyacc_VwVubVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusv_acc)
+#define Q6_Vw_vrmpyacc_VwVubVb(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusv_acc)(Vx,Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2613,18 +2612,18 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vrmpy_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybv)
+#define Q6_Vw_vrmpy_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybv)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
/* ==========================================================================
Assembly Syntax: Vx32.w+=vrmpy(Vu32.b,Vv32.b)
C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpyacc_VwVbVb(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
- Instruction Type: CVI_VX_DV
+ Instruction Type: CVI_VX
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vrmpyacc_VwVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybv_acc)
+#define Q6_Vw_vrmpyacc_VwVbVb(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybv_acc)(Vx,Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2635,7 +2634,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vuw_vrmpy_VubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyub)
+#define Q6_Vuw_vrmpy_VubRub(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyub)(Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2646,7 +2645,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vuw_vrmpyacc_VuwVubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyub_acc)
+#define Q6_Vuw_vrmpyacc_VuwVubRub(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyub_acc)(Vx,Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2657,7 +2656,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wuw_vrmpy_WubRubI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubi)
+#define Q6_Wuw_vrmpy_WubRubI(Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubi)(Vuu,Rt,Iu1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2668,7 +2667,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wuw_vrmpyacc_WuwWubRubI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubi_acc)
+#define Q6_Wuw_vrmpyacc_WuwWubRubI(Vxx,Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubi_acc)(Vxx,Vuu,Rt,Iu1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2679,18 +2678,18 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vuw_vrmpy_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubv)
+#define Q6_Vuw_vrmpy_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubv)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
/* ==========================================================================
Assembly Syntax: Vx32.uw+=vrmpy(Vu32.ub,Vv32.ub)
C Intrinsic Prototype: HVX_Vector Q6_Vuw_vrmpyacc_VuwVubVub(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
- Instruction Type: CVI_VX_DV
+ Instruction Type: CVI_VX
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vuw_vrmpyacc_VuwVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubv_acc)
+#define Q6_Vuw_vrmpyacc_VuwVubVub(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubv_acc)(Vx,Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2701,7 +2700,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_V_vror_VR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vror)
+#define Q6_V_vror_VR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vror)(Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2712,7 +2711,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vround_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundhb)
+#define Q6_Vb_vround_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundhb)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2723,7 +2722,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vub_vround_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundhub)
+#define Q6_Vub_vround_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundhub)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2734,7 +2733,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vround_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundwh)
+#define Q6_Vh_vround_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundwh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2745,7 +2744,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vuh_vround_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundwuh)
+#define Q6_Vuh_vround_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundwuh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2756,7 +2755,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wuw_vrsad_WubRubI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrsadubi)
+#define Q6_Wuw_vrsad_WubRubI(Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrsadubi)(Vuu,Rt,Iu1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2767,7 +2766,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wuw_vrsadacc_WuwWubRubI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrsadubi_acc)
+#define Q6_Wuw_vrsadacc_WuwWubRubI(Vxx,Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrsadubi_acc)(Vxx,Vuu,Rt,Iu1)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2778,7 +2777,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vub_vsat_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsathub)
+#define Q6_Vub_vsat_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsathub)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2789,7 +2788,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vsat_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatwh)
+#define Q6_Vh_vsat_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatwh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2800,7 +2799,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wh_vsxt_Vb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsb)
+#define Q6_Wh_vsxt_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsb)(Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2811,7 +2810,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Ww_vsxt_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsh)
+#define Q6_Ww_vsxt_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsh)(Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2822,7 +2821,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vshuffe_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufeh)
+#define Q6_Vh_vshuffe_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufeh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2833,7 +2832,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vshuff_Vb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffb)
+#define Q6_Vb_vshuff_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffb)(Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2844,7 +2843,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vshuffe_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffeb)
+#define Q6_Vb_vshuffe_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffeb)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2855,7 +2854,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vshuff_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffh)
+#define Q6_Vh_vshuff_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffh)(Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2866,7 +2865,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vshuffo_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffob)
+#define Q6_Vb_vshuffo_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffob)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2877,7 +2876,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_W_vshuff_VVR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffvdd)
+#define Q6_W_vshuff_VVR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffvdd)(Vu,Vv,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2888,7 +2887,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wb_vshuffoe_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoeb)
+#define Q6_Wb_vshuffoe_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoeb)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2899,7 +2898,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wh_vshuffoe_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoeh)
+#define Q6_Wh_vshuffoe_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoeh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2910,7 +2909,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vshuffo_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoh)
+#define Q6_Vh_vshuffo_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2921,7 +2920,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vsub_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubb)
+#define Q6_Vb_vsub_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubb)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2932,7 +2931,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wb_vsub_WbWb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubb_dv)
+#define Q6_Wb_vsub_WbWb(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubb_dv)(Vuu,Vvv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2943,7 +2942,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_condnac_QnVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbnq)
+#define Q6_Vb_condnac_QnVbVb(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2954,7 +2953,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_condnac_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbq)
+#define Q6_Vb_condnac_QVbVb(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2965,7 +2964,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vsub_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubh)
+#define Q6_Vh_vsub_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2976,7 +2975,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wh_vsub_WhWh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubh_dv)
+#define Q6_Wh_vsub_WhWh(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubh_dv)(Vuu,Vvv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2987,7 +2986,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_condnac_QnVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhnq)
+#define Q6_Vh_condnac_QnVhVh(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -2998,7 +2997,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_condnac_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhq)
+#define Q6_Vh_condnac_QVhVh(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3009,7 +3008,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vsub_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhsat)
+#define Q6_Vh_vsub_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhsat)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3020,7 +3019,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wh_vsub_WhWh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhsat_dv)
+#define Q6_Wh_vsub_WhWh_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhsat_dv)(Vuu,Vvv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3031,7 +3030,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_vsub_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhw)
+#define Q6_Ww_vsub_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhw)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3042,7 +3041,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wh_vsub_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububh)
+#define Q6_Wh_vsub_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3053,7 +3052,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vub_vsub_VubVub_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububsat)
+#define Q6_Vub_vsub_VubVub_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububsat)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3064,7 +3063,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wub_vsub_WubWub_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububsat_dv)
+#define Q6_Wub_vsub_WubWub_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububsat_dv)(Vuu,Vvv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3075,7 +3074,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vuh_vsub_VuhVuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhsat)
+#define Q6_Vuh_vsub_VuhVuh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhsat)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3086,7 +3085,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wuh_vsub_WuhWuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhsat_dv)
+#define Q6_Wuh_vsub_WuhWuh_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhsat_dv)(Vuu,Vvv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3097,7 +3096,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_vsub_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhw)
+#define Q6_Ww_vsub_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhw)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3108,7 +3107,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_vsub_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubw)
+#define Q6_Vw_vsub_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubw)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3119,7 +3118,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Ww_vsub_WwWw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubw_dv)
+#define Q6_Ww_vsub_WwWw(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubw_dv)(Vuu,Vvv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3130,7 +3129,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_condnac_QnVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwnq)
+#define Q6_Vw_condnac_QnVwVw(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3141,7 +3140,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_condnac_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwq)
+#define Q6_Vw_condnac_QVwVw(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3152,7 +3151,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_vsub_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwsat)
+#define Q6_Vw_vsub_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwsat)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3163,7 +3162,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Ww_vsub_WwWw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwsat_dv)
+#define Q6_Ww_vsub_WwWw_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwsat_dv)(Vuu,Vvv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3174,7 +3173,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_W_vswap_QVV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vswap)
+#define Q6_W_vswap_QVV(Qt,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vswap)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1),Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3185,7 +3184,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wh_vtmpy_WbRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyb)
+#define Q6_Wh_vtmpy_WbRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyb)(Vuu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3196,7 +3195,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wh_vtmpyacc_WhWbRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyb_acc)
+#define Q6_Wh_vtmpyacc_WhWbRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyb_acc)(Vxx,Vuu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3207,7 +3206,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wh_vtmpy_WubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpybus)
+#define Q6_Wh_vtmpy_WubRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpybus)(Vuu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3218,7 +3217,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wh_vtmpyacc_WhWubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpybus_acc)
+#define Q6_Wh_vtmpyacc_WhWubRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpybus_acc)(Vxx,Vuu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3229,7 +3228,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_vtmpy_WhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyhb)
+#define Q6_Ww_vtmpy_WhRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyhb)(Vuu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3240,7 +3239,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_vtmpyacc_WwWhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyhb_acc)
+#define Q6_Ww_vtmpyacc_WwWhRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyhb_acc)(Vxx,Vuu,Rt)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3251,7 +3250,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wh_vunpack_Vb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackb)
+#define Q6_Wh_vunpack_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackb)(Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3262,7 +3261,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Ww_vunpack_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackh)
+#define Q6_Ww_vunpack_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackh)(Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3273,7 +3272,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wh_vunpackoor_WhVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackob)
+#define Q6_Wh_vunpackoor_WhVb(Vxx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackob)(Vxx,Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3284,7 +3283,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Ww_vunpackoor_WwVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackoh)
+#define Q6_Ww_vunpackoor_WwVh(Vxx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackoh)(Vxx,Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3295,7 +3294,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wuh_vunpack_Vub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackub)
+#define Q6_Wuh_vunpack_Vub(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackub)(Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3306,7 +3305,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wuw_vunpack_Vuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackuh)
+#define Q6_Wuw_vunpack_Vuh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackuh)(Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3317,7 +3316,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_V_vxor_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vxor)
+#define Q6_V_vxor_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vxor)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3328,7 +3327,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wuh_vzxt_Vub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vzb)
+#define Q6_Wuh_vzxt_Vub(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vzb)(Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 60
@@ -3339,7 +3338,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wuw_vzxt_Vuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vzh)
+#define Q6_Wuw_vzxt_Vuh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vzh)(Vu)
#endif /* __HEXAGON_ARCH___ >= 60 */
#if __HVX_ARCH__ >= 62
@@ -3350,7 +3349,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vb_vsplat_R __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplatb)
+#define Q6_Vb_vsplat_R(Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplatb)(Rt)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3361,7 +3360,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vh_vsplat_R __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplath)
+#define Q6_Vh_vsplat_R(Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplath)(Rt)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3372,7 +3371,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Q_vsetq2_R __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_scalar2v2)
+#define Q6_Q_vsetq2_R(Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_scalar2v2)(Rt)),-1)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3383,7 +3382,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Qb_vshuffe_QhQh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_shuffeqh)
+#define Q6_Qb_vshuffe_QhQh(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_shuffeqh)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3394,7 +3393,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Qh_vshuffe_QwQw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_shuffeqw)
+#define Q6_Qh_vshuffe_QwQw(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_shuffeqw)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3405,7 +3404,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vadd_VbVb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbsat)
+#define Q6_Vb_vadd_VbVb_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbsat)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3416,7 +3415,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wb_vadd_WbWb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbsat_dv)
+#define Q6_Wb_vadd_WbWb_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbsat_dv)(Vuu,Vvv)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3427,7 +3426,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_vadd_VwVwQ_carry __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddcarry)
+#define Q6_Vw_vadd_VwVwQ_carry(Vu,Vv,Qx) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddcarry)(Vu,Vv,Qx)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3438,7 +3437,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vadd_vclb_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddclbh)
+#define Q6_Vh_vadd_vclb_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddclbh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3449,7 +3448,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_vadd_vclb_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddclbw)
+#define Q6_Vw_vadd_vclb_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddclbw)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3460,7 +3459,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_vaddacc_WwVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhw_acc)
+#define Q6_Ww_vaddacc_WwVhVh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhw_acc)(Vxx,Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3471,7 +3470,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wh_vaddacc_WhVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubh_acc)
+#define Q6_Wh_vaddacc_WhVubVub(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubh_acc)(Vxx,Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3482,7 +3481,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vub_vadd_VubVb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddububb_sat)
+#define Q6_Vub_vadd_VubVb_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddububb_sat)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3493,7 +3492,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_vaddacc_WwVuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhw_acc)
+#define Q6_Ww_vaddacc_WwVuhVuh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhw_acc)(Vxx,Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3504,7 +3503,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vuw_vadd_VuwVuw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduwsat)
+#define Q6_Vuw_vadd_VuwVuw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduwsat)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3515,7 +3514,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wuw_vadd_WuwWuw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduwsat_dv)
+#define Q6_Wuw_vadd_WuwWuw_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduwsat_dv)(Vuu,Vvv)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3526,7 +3525,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_V_vand_QnR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandnqrt)
+#define Q6_V_vand_QnR(Qu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandnqrt)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qu),-1),Rt)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3537,7 +3536,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_V_vandor_VQnR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandnqrt_acc)
+#define Q6_V_vandor_VQnR(Vx,Qu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandnqrt_acc)(Vx,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qu),-1),Rt)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3548,7 +3547,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_V_vand_QnV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvnqv)
+#define Q6_V_vand_QnV(Qv,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvnqv)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vu)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3559,7 +3558,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_V_vand_QV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvqv)
+#define Q6_V_vand_QV(Qv,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvqv)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vu)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3570,7 +3569,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vasr_VhVhR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhbsat)
+#define Q6_Vb_vasr_VhVhR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhbsat)(Vu,Vv,Rt)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3581,7 +3580,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vuh_vasr_VuwVuwR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruwuhrndsat)
+#define Q6_Vuh_vasr_VuwVuwR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruwuhrndsat)(Vu,Vv,Rt)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3592,7 +3591,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vuh_vasr_VwVwR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwuhrndsat)
+#define Q6_Vuh_vasr_VwVwR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwuhrndsat)(Vu,Vv,Rt)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3603,7 +3602,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vub_vlsr_VubR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrb)
+#define Q6_Vub_vlsr_VubR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrb)(Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3614,7 +3613,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vlut32_VbVbR_nomatch __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_nm)
+#define Q6_Vb_vlut32_VbVbR_nomatch(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_nm)(Vu,Vv,Rt)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3625,7 +3624,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vlut32or_VbVbVbI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_oracci)
+#define Q6_Vb_vlut32or_VbVbVbI(Vx,Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_oracci)(Vx,Vu,Vv,Iu3)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3636,7 +3635,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vlut32_VbVbI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvbi)
+#define Q6_Vb_vlut32_VbVbI(Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvbi)(Vu,Vv,Iu3)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3647,7 +3646,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wh_vlut16_VbVhR_nomatch __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_nm)
+#define Q6_Wh_vlut16_VbVhR_nomatch(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_nm)(Vu,Vv,Rt)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3658,7 +3657,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wh_vlut16or_WhVbVhI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_oracci)
+#define Q6_Wh_vlut16or_WhVbVhI(Vxx,Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_oracci)(Vxx,Vu,Vv,Iu3)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3669,7 +3668,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wh_vlut16_VbVhI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwhi)
+#define Q6_Wh_vlut16_VbVhI(Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwhi)(Vu,Vv,Iu3)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3680,7 +3679,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vmax_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxb)
+#define Q6_Vb_vmax_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxb)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3691,7 +3690,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vmin_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminb)
+#define Q6_Vb_vmin_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminb)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3702,7 +3701,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_vmpa_WuhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhb)
+#define Q6_Ww_vmpa_WuhRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhb)(Vuu,Rt)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3713,7 +3712,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_vmpaacc_WwWuhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhb_acc)
+#define Q6_Ww_vmpaacc_WwWuhRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhb_acc)(Vxx,Vuu,Rt)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3724,7 +3723,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_W_vmpye_VwVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyewuh_64)
+#define Q6_W_vmpye_VwVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyewuh_64)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3735,7 +3734,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vmpyi_VwRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwub)
+#define Q6_Vw_vmpyi_VwRub(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwub)(Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3746,7 +3745,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vw_vmpyiacc_VwVwRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwub_acc)
+#define Q6_Vw_vmpyiacc_VwVwRub(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwub_acc)(Vx,Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3757,7 +3756,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_W_vmpyoacc_WVwVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_64_acc)
+#define Q6_W_vmpyoacc_WVwVh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_64_acc)(Vxx,Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3768,7 +3767,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vub_vround_VuhVuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrounduhub)
+#define Q6_Vub_vround_VuhVuh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrounduhub)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3779,7 +3778,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vuh_vround_VuwVuw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrounduwuh)
+#define Q6_Vuh_vround_VuwVuw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrounduwuh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3790,7 +3789,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vuh_vsat_VuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatuwuh)
+#define Q6_Vuh_vsat_VuwVuw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatuwuh)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3801,7 +3800,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vsub_VbVb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbsat)
+#define Q6_Vb_vsub_VbVb_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbsat)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3812,7 +3811,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wb_vsub_WbWb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbsat_dv)
+#define Q6_Wb_vsub_WbWb_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbsat_dv)(Vuu,Vvv)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3823,7 +3822,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_vsub_VwVwQ_carry __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubcarry)
+#define Q6_Vw_vsub_VwVwQ_carry(Vu,Vv,Qx) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubcarry)(Vu,Vv,Qx)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3834,7 +3833,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vub_vsub_VubVb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubububb_sat)
+#define Q6_Vub_vsub_VubVb_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubububb_sat)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3845,7 +3844,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vuw_vsub_VuwVuw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuwsat)
+#define Q6_Vuw_vsub_VuwVuw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuwsat)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 62
@@ -3856,7 +3855,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Wuw_vsub_WuwWuw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuwsat_dv)
+#define Q6_Wuw_vsub_WuwWuw_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuwsat_dv)(Vuu,Vvv)
#endif /* __HEXAGON_ARCH___ >= 62 */
#if __HVX_ARCH__ >= 65
@@ -3867,7 +3866,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vabs_Vb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsb)
+#define Q6_Vb_vabs_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsb)(Vu)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -3878,7 +3877,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vabs_Vb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsb_sat)
+#define Q6_Vb_vabs_Vb_sat(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsb_sat)(Vu)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -3889,7 +3888,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vaslacc_VhVhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslh_acc)
+#define Q6_Vh_vaslacc_VhVhR(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslh_acc)(Vx,Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -3900,7 +3899,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_vasracc_VhVhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrh_acc)
+#define Q6_Vh_vasracc_VhVhR(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrh_acc)(Vx,Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -3911,7 +3910,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vub_vasr_VuhVuhR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruhubrndsat)
+#define Q6_Vub_vasr_VuhVuhR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruhubrndsat)(Vu,Vv,Rt)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -3922,7 +3921,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vub_vasr_VuhVuhR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruhubsat)
+#define Q6_Vub_vasr_VuhVuhR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruhubsat)(Vu,Vv,Rt)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -3933,7 +3932,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vuh_vasr_VuwVuwR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruwuhsat)
+#define Q6_Vuh_vasr_VuwVuwR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruwuhsat)(Vu,Vv,Rt)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -3944,7 +3943,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vavg_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgb)
+#define Q6_Vb_vavg_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgb)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -3955,7 +3954,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vavg_VbVb_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgbrnd)
+#define Q6_Vb_vavg_VbVb_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgbrnd)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -3966,7 +3965,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vuw_vavg_VuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguw)
+#define Q6_Vuw_vavg_VuwVuw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguw)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -3977,7 +3976,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vuw_vavg_VuwVuw_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguwrnd)
+#define Q6_Vuw_vavg_VuwVuw_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguwrnd)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -3988,7 +3987,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_W_vzero __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdd0)
+#define Q6_W_vzero() __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdd0)()
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -3999,7 +3998,7 @@
Execution Slots: SLOT01
========================================================================== */
-#define Q6_vgather_ARMVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermh)
+#define Q6_vgather_ARMVh(Rs,Rt,Mu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermh)(Rs,Rt,Mu,Vv)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4010,7 +4009,7 @@
Execution Slots: SLOT01
========================================================================== */
-#define Q6_vgather_AQRMVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhq)
+#define Q6_vgather_AQRMVh(Rs,Qs,Rt,Mu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhq)(Rs,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vv)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4021,7 +4020,7 @@
Execution Slots: SLOT01
========================================================================== */
-#define Q6_vgather_ARMWw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhw)
+#define Q6_vgather_ARMWw(Rs,Rt,Mu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhw)(Rs,Rt,Mu,Vvv)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4032,7 +4031,7 @@
Execution Slots: SLOT01
========================================================================== */
-#define Q6_vgather_AQRMWw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhwq)
+#define Q6_vgather_AQRMWw(Rs,Qs,Rt,Mu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhwq)(Rs,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vvv)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4043,7 +4042,7 @@
Execution Slots: SLOT01
========================================================================== */
-#define Q6_vgather_ARMVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermw)
+#define Q6_vgather_ARMVw(Rs,Rt,Mu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermw)(Rs,Rt,Mu,Vv)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4054,7 +4053,7 @@
Execution Slots: SLOT01
========================================================================== */
-#define Q6_vgather_AQRMVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermwq)
+#define Q6_vgather_AQRMVw(Rs,Qs,Rt,Mu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermwq)(Rs,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vv)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4065,7 +4064,7 @@
Execution Slots: SLOT2
========================================================================== */
-#define Q6_Vh_vlut4_VuhPh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlut4)
+#define Q6_Vh_vlut4_VuhPh(Vu,Rtt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlut4)(Vu,Rtt)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4076,7 +4075,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wh_vmpa_WubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuu)
+#define Q6_Wh_vmpa_WubRub(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuu)(Vuu,Rt)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4087,7 +4086,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Wh_vmpaacc_WhWubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuu_acc)
+#define Q6_Wh_vmpaacc_WhWubRub(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuu_acc)(Vxx,Vuu,Rt)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4098,7 +4097,7 @@
Execution Slots: SLOT2
========================================================================== */
-#define Q6_Vh_vmpa_VhVhVhPh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahhsat)
+#define Q6_Vh_vmpa_VhVhVhPh_sat(Vx,Vu,Rtt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahhsat)(Vx,Vu,Rtt)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4109,7 +4108,7 @@
Execution Slots: SLOT2
========================================================================== */
-#define Q6_Vh_vmpa_VhVhVuhPuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhuhsat)
+#define Q6_Vh_vmpa_VhVhVuhPuh_sat(Vx,Vu,Rtt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhuhsat)(Vx,Vu,Rtt)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4120,7 +4119,7 @@
Execution Slots: SLOT2
========================================================================== */
-#define Q6_Vh_vmps_VhVhVuhPuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpsuhuhsat)
+#define Q6_Vh_vmps_VhVhVuhPuh_sat(Vx,Vu,Rtt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpsuhuhsat)(Vx,Vu,Rtt)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4131,7 +4130,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_vmpyacc_WwVhRh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyh_acc)
+#define Q6_Ww_vmpyacc_WwVhRh(Vxx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyh_acc)(Vxx,Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4142,7 +4141,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vuw_vmpye_VuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhe)
+#define Q6_Vuw_vmpye_VuhRuh(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhe)(Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4153,7 +4152,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Vuw_vmpyeacc_VuwVuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhe_acc)
+#define Q6_Vuw_vmpyeacc_VuwVuhRuh(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhe_acc)(Vx,Vu,Rt)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4164,7 +4163,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_vnavg_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgb)
+#define Q6_Vb_vnavg_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgb)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4175,7 +4174,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vb_prefixsum_Q __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqb)
+#define Q6_Vb_prefixsum_Q(Qv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqb)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1))
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4186,7 +4185,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vh_prefixsum_Q __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqh)
+#define Q6_Vh_prefixsum_Q(Qv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqh)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1))
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4197,7 +4196,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_prefixsum_Q __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqw)
+#define Q6_Vw_prefixsum_Q(Qv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqw)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1))
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4208,7 +4207,7 @@
Execution Slots: SLOT0
========================================================================== */
-#define Q6_vscatter_RMVhV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermh)
+#define Q6_vscatter_RMVhV(Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermh)(Rt,Mu,Vv,Vw)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4219,7 +4218,7 @@
Execution Slots: SLOT0
========================================================================== */
-#define Q6_vscatteracc_RMVhV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermh_add)
+#define Q6_vscatteracc_RMVhV(Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermh_add)(Rt,Mu,Vv,Vw)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4230,7 +4229,7 @@
Execution Slots: SLOT0
========================================================================== */
-#define Q6_vscatter_QRMVhV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhq)
+#define Q6_vscatter_QRMVhV(Qs,Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vv,Vw)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4241,7 +4240,7 @@
Execution Slots: SLOT0
========================================================================== */
-#define Q6_vscatter_RMWwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhw)
+#define Q6_vscatter_RMWwV(Rt,Mu,Vvv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhw)(Rt,Mu,Vvv,Vw)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4252,7 +4251,7 @@
Execution Slots: SLOT0
========================================================================== */
-#define Q6_vscatteracc_RMWwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhw_add)
+#define Q6_vscatteracc_RMWwV(Rt,Mu,Vvv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhw_add)(Rt,Mu,Vvv,Vw)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4263,7 +4262,7 @@
Execution Slots: SLOT0
========================================================================== */
-#define Q6_vscatter_QRMWwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhwq)
+#define Q6_vscatter_QRMWwV(Qs,Rt,Mu,Vvv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhwq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vvv,Vw)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4274,7 +4273,7 @@
Execution Slots: SLOT0
========================================================================== */
-#define Q6_vscatter_RMVwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermw)
+#define Q6_vscatter_RMVwV(Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermw)(Rt,Mu,Vv,Vw)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4285,7 +4284,7 @@
Execution Slots: SLOT0
========================================================================== */
-#define Q6_vscatteracc_RMVwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermw_add)
+#define Q6_vscatteracc_RMVwV(Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermw_add)(Rt,Mu,Vv,Vw)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 65
@@ -4296,7 +4295,7 @@
Execution Slots: SLOT0
========================================================================== */
-#define Q6_vscatter_QRMVwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermwq)
+#define Q6_vscatter_QRMVwV(Qs,Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermwq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vv,Vw)
#endif /* __HEXAGON_ARCH___ >= 65 */
#if __HVX_ARCH__ >= 66
@@ -4307,7 +4306,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_vadd_VwVwQ_carry_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddcarrysat)
+#define Q6_Vw_vadd_VwVwQ_carry_sat(Vu,Vv,Qs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddcarrysat)(Vu,Vv,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1))
#endif /* __HEXAGON_ARCH___ >= 66 */
#if __HVX_ARCH__ >= 66
@@ -4318,7 +4317,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Ww_vasrinto_WwVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasr_into)
+#define Q6_Ww_vasrinto_WwVwVw(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasr_into)(Vxx,Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 66 */
#if __HVX_ARCH__ >= 66
@@ -4329,7 +4328,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vuw_vrotr_VuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrotr)
+#define Q6_Vuw_vrotr_VuwVuw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrotr)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 66 */
#if __HVX_ARCH__ >= 66
@@ -4340,7 +4339,7 @@
Execution Slots: SLOT0123
========================================================================== */
-#define Q6_Vw_vsatdw_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatdw)
+#define Q6_Vw_vsatdw_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatdw)(Vu,Vv)
#endif /* __HEXAGON_ARCH___ >= 66 */
#if __HVX_ARCH__ >= 68
@@ -4351,7 +4350,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_v6mpy_WubWbI_h __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyhubs10)
+#define Q6_Ww_v6mpy_WubWbI_h(Vuu,Vvv,Iu2) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyhubs10)(Vuu,Vvv,Iu2)
#endif /* __HEXAGON_ARCH___ >= 68 */
#if __HVX_ARCH__ >= 68
@@ -4362,7 +4361,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_v6mpyacc_WwWubWbI_h __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyhubs10_vxx)
+#define Q6_Ww_v6mpyacc_WwWubWbI_h(Vxx,Vuu,Vvv,Iu2) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyhubs10_vxx)(Vxx,Vuu,Vvv,Iu2)
#endif /* __HEXAGON_ARCH___ >= 68 */
#if __HVX_ARCH__ >= 68
@@ -4373,7 +4372,7 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_v6mpy_WubWbI_v __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyvubs10)
+#define Q6_Ww_v6mpy_WubWbI_v(Vuu,Vvv,Iu2) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyvubs10)(Vuu,Vvv,Iu2)
#endif /* __HEXAGON_ARCH___ >= 68 */
#if __HVX_ARCH__ >= 68
@@ -4384,9 +4383,801 @@
Execution Slots: SLOT23
========================================================================== */
-#define Q6_Ww_v6mpyacc_WwWubWbI_v __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyvubs10_vxx)
+#define Q6_Ww_v6mpyacc_WwWubWbI_v(Vxx,Vuu,Vvv,Iu2) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyvubs10_vxx)(Vxx,Vuu,Vvv,Iu2)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.hf=vabs(Vu32.hf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vhf_vabs_Vhf(HVX_Vector Vu)
+ Instruction Type: CVI_VX_LATE
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vhf_vabs_Vhf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabs_hf)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.sf=vabs(Vu32.sf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vsf_vabs_Vsf(HVX_Vector Vu)
+ Instruction Type: CVI_VX_LATE
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vsf_vabs_Vsf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabs_sf)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.qf16=vadd(Vu32.hf,Vv32.hf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vadd_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vqf16_vadd_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.hf=vadd(Vu32.hf,Vv32.hf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vhf_vadd_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vhf_vadd_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_hf_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.qf16=vadd(Vu32.qf16,Vv32.qf16)
+ C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vadd_Vqf16Vqf16(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vqf16_vadd_Vqf16Vqf16(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_qf16)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.qf16=vadd(Vu32.qf16,Vv32.hf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vadd_Vqf16Vhf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vqf16_vadd_Vqf16Vhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_qf16_mix)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.qf32=vadd(Vu32.qf32,Vv32.qf32)
+ C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vadd_Vqf32Vqf32(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vqf32_vadd_Vqf32Vqf32(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_qf32)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.qf32=vadd(Vu32.qf32,Vv32.sf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vadd_Vqf32Vsf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vqf32_vadd_Vqf32Vsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_qf32_mix)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.qf32=vadd(Vu32.sf,Vv32.sf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vadd_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vqf32_vadd_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vdd32.sf=vadd(Vu32.hf,Vv32.hf)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wsf_vadd_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Wsf_vadd_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_sf_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.sf=vadd(Vu32.sf,Vv32.sf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vsf_vadd_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vsf_vadd_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_sf_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.w=vfmv(Vu32.w)
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vfmv_Vw(HVX_Vector Vu)
+ Instruction Type: CVI_VX_LATE
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vw_vfmv_Vw(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vassign_fp)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.hf=Vu32.qf16
+ C Intrinsic Prototype: HVX_Vector Q6_Vhf_equals_Vqf16(HVX_Vector Vu)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vhf_equals_Vqf16(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vconv_hf_qf16)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.hf=Vuu32.qf32
+ C Intrinsic Prototype: HVX_Vector Q6_Vhf_equals_Wqf32(HVX_VectorPair Vuu)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vhf_equals_Wqf32(Vuu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vconv_hf_qf32)(Vuu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.sf=Vu32.qf32
+ C Intrinsic Prototype: HVX_Vector Q6_Vsf_equals_Vqf32(HVX_Vector Vu)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vsf_equals_Vqf32(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vconv_sf_qf32)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.b=vcvt(Vu32.hf,Vv32.hf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vb_vcvt_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vb_vcvt_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_b_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.h=vcvt(Vu32.hf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_vcvt_Vhf(HVX_Vector Vu)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vh_vcvt_Vhf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_h_hf)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vdd32.hf=vcvt(Vu32.b)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Whf_vcvt_Vb(HVX_Vector Vu)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Whf_vcvt_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_hf_b)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.hf=vcvt(Vu32.h)
+ C Intrinsic Prototype: HVX_Vector Q6_Vhf_vcvt_Vh(HVX_Vector Vu)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vhf_vcvt_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_hf_h)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.hf=vcvt(Vu32.sf,Vv32.sf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vhf_vcvt_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vhf_vcvt_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_hf_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vdd32.hf=vcvt(Vu32.ub)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Whf_vcvt_Vub(HVX_Vector Vu)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Whf_vcvt_Vub(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_hf_ub)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.hf=vcvt(Vu32.uh)
+ C Intrinsic Prototype: HVX_Vector Q6_Vhf_vcvt_Vuh(HVX_Vector Vu)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vhf_vcvt_Vuh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_hf_uh)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vdd32.sf=vcvt(Vu32.hf)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wsf_vcvt_Vhf(HVX_Vector Vu)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Wsf_vcvt_Vhf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_sf_hf)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.ub=vcvt(Vu32.hf,Vv32.hf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vub_vcvt_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vub_vcvt_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_ub_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.uh=vcvt(Vu32.hf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vuh_vcvt_Vhf(HVX_Vector Vu)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vuh_vcvt_Vhf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_uh_hf)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.sf=vdmpy(Vu32.hf,Vv32.hf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vsf_vdmpy_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vsf_vdmpy_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpy_sf_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vx32.sf+=vdmpy(Vu32.hf,Vv32.hf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vsf_vdmpyacc_VsfVhfVhf(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vsf_vdmpyacc_VsfVhfVhf(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpy_sf_hf_acc)(Vx,Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.hf=vfmax(Vu32.hf,Vv32.hf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vhf_vfmax_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_LATE
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vhf_vfmax_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfmax_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.sf=vfmax(Vu32.sf,Vv32.sf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vsf_vfmax_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_LATE
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vsf_vfmax_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfmax_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.hf=vfmin(Vu32.hf,Vv32.hf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vhf_vfmin_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_LATE
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vhf_vfmin_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfmin_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.sf=vfmin(Vu32.sf,Vv32.sf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vsf_vfmin_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_LATE
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vsf_vfmin_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfmin_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.hf=vfneg(Vu32.hf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vhf_vfneg_Vhf(HVX_Vector Vu)
+ Instruction Type: CVI_VX_LATE
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vhf_vfneg_Vhf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfneg_hf)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.sf=vfneg(Vu32.sf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vsf_vfneg_Vsf(HVX_Vector Vu)
+ Instruction Type: CVI_VX_LATE
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vsf_vfneg_Vsf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfneg_sf)(Vu)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Qd4=vcmp.gt(Vu32.hf,Vv32.hf)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vcmp_gt_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgthf)(Vu,Vv)),-1)
#endif /* __HEXAGON_ARCH___ >= 68 */
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Qx4&=vcmp.gt(Vu32.hf,Vv32.hf)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVhfVhf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vcmp_gtand_QVhfVhf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgthf_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Qx4|=vcmp.gt(Vu32.hf,Vv32.hf)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVhfVhf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vcmp_gtor_QVhfVhf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgthf_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Qx4^=vcmp.gt(Vu32.hf,Vv32.hf)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVhfVhf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vcmp_gtxacc_QVhfVhf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgthf_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Qd4=vcmp.gt(Vu32.sf,Vv32.sf)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vcmp_gt_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtsf)(Vu,Vv)),-1)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Qx4&=vcmp.gt(Vu32.sf,Vv32.sf)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVsfVsf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vcmp_gtand_QVsfVsf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtsf_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Qx4|=vcmp.gt(Vu32.sf,Vv32.sf)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVsfVsf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vcmp_gtor_QVsfVsf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtsf_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Qx4^=vcmp.gt(Vu32.sf,Vv32.sf)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVsfVsf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vcmp_gtxacc_QVsfVsf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtsf_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.hf=vmax(Vu32.hf,Vv32.hf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vhf_vmax_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vhf_vmax_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmax_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.sf=vmax(Vu32.sf,Vv32.sf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vsf_vmax_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vsf_vmax_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmax_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.hf=vmin(Vu32.hf,Vv32.hf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vhf_vmin_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vhf_vmin_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmin_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.sf=vmin(Vu32.sf,Vv32.sf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vsf_vmin_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vsf_vmin_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmin_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.hf=vmpy(Vu32.hf,Vv32.hf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vhf_vmpy_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vhf_vmpy_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_hf_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vx32.hf+=vmpy(Vu32.hf,Vv32.hf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vhf_vmpyacc_VhfVhfVhf(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vhf_vmpyacc_VhfVhfVhf(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_hf_hf_acc)(Vx,Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.qf16=vmpy(Vu32.qf16,Vv32.qf16)
+ C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vmpy_Vqf16Vqf16(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vqf16_vmpy_Vqf16Vqf16(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf16)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.qf16=vmpy(Vu32.hf,Vv32.hf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vmpy_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vqf16_vmpy_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf16_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.qf16=vmpy(Vu32.qf16,Vv32.hf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vmpy_Vqf16Vhf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vqf16_vmpy_Vqf16Vhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf16_mix_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.qf32=vmpy(Vu32.qf32,Vv32.qf32)
+ C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vmpy_Vqf32Vqf32(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vqf32_vmpy_Vqf32Vqf32(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf32)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vdd32.qf32=vmpy(Vu32.hf,Vv32.hf)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wqf32_vmpy_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Wqf32_vmpy_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf32_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vdd32.qf32=vmpy(Vu32.qf16,Vv32.hf)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wqf32_vmpy_Vqf16Vhf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Wqf32_vmpy_Vqf16Vhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf32_mix_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vdd32.qf32=vmpy(Vu32.qf16,Vv32.qf16)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wqf32_vmpy_Vqf16Vqf16(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Wqf32_vmpy_Vqf16Vqf16(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf32_qf16)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.qf32=vmpy(Vu32.sf,Vv32.sf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vmpy_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vqf32_vmpy_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf32_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vdd32.sf=vmpy(Vu32.hf,Vv32.hf)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wsf_vmpy_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Wsf_vmpy_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_sf_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vxx32.sf+=vmpy(Vu32.hf,Vv32.hf)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wsf_vmpyacc_WsfVhfVhf(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Wsf_vmpyacc_WsfVhfVhf(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_sf_hf_acc)(Vxx,Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.sf=vmpy(Vu32.sf,Vv32.sf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vsf_vmpy_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vsf_vmpy_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_sf_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.qf16=vsub(Vu32.hf,Vv32.hf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vsub_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vqf16_vsub_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.hf=vsub(Vu32.hf,Vv32.hf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vhf_vsub_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vhf_vsub_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_hf_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.qf16=vsub(Vu32.qf16,Vv32.qf16)
+ C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vsub_Vqf16Vqf16(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vqf16_vsub_Vqf16Vqf16(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_qf16)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.qf16=vsub(Vu32.qf16,Vv32.hf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vsub_Vqf16Vhf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vqf16_vsub_Vqf16Vhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_qf16_mix)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.qf32=vsub(Vu32.qf32,Vv32.qf32)
+ C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vsub_Vqf32Vqf32(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vqf32_vsub_Vqf32Vqf32(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_qf32)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.qf32=vsub(Vu32.qf32,Vv32.sf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vsub_Vqf32Vsf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vqf32_vsub_Vqf32Vsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_qf32_mix)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.qf32=vsub(Vu32.sf,Vv32.sf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vsub_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vqf32_vsub_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vdd32.sf=vsub(Vu32.hf,Vv32.hf)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wsf_vsub_VhfVhf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Wsf_vsub_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_sf_hf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vd32.sf=vsub(Vu32.sf,Vv32.sf)
+ C Intrinsic Prototype: HVX_Vector Q6_Vsf_vsub_VsfVsf(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vsf_vsub_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_sf_sf)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 69
+/* ==========================================================================
+ Assembly Syntax: Vd32.ub=vasr(Vuu32.uh,Vv32.ub):rnd:sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vub_vasr_WuhVub_rnd_sat(HVX_VectorPair Vuu, HVX_Vector Vv)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vub_vasr_WuhVub_rnd_sat(Vuu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrvuhubrndsat)(Vuu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 69 */
+
+#if __HVX_ARCH__ >= 69
+/* ==========================================================================
+ Assembly Syntax: Vd32.ub=vasr(Vuu32.uh,Vv32.ub):sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vub_vasr_WuhVub_sat(HVX_VectorPair Vuu, HVX_Vector Vv)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vub_vasr_WuhVub_sat(Vuu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrvuhubsat)(Vuu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 69 */
+
+#if __HVX_ARCH__ >= 69
+/* ==========================================================================
+ Assembly Syntax: Vd32.uh=vasr(Vuu32.w,Vv32.uh):rnd:sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vuh_vasr_WwVuh_rnd_sat(HVX_VectorPair Vuu, HVX_Vector Vv)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vuh_vasr_WwVuh_rnd_sat(Vuu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrvwuhrndsat)(Vuu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 69 */
+
+#if __HVX_ARCH__ >= 69
+/* ==========================================================================
+ Assembly Syntax: Vd32.uh=vasr(Vuu32.w,Vv32.uh):sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vuh_vasr_WwVuh_sat(HVX_VectorPair Vuu, HVX_Vector Vv)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vuh_vasr_WwVuh_sat(Vuu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrvwuhsat)(Vuu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 69 */
+
+#if __HVX_ARCH__ >= 69
+/* ==========================================================================
+ Assembly Syntax: Vd32.uh=vmpy(Vu32.uh,Vv32.uh):>>16
+ C Intrinsic Prototype: HVX_Vector Q6_Vuh_vmpy_VuhVuh_rs16(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vuh_vmpy_VuhVuh_rs16(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhvs)(Vu,Vv)
+#endif /* __HEXAGON_ARCH___ >= 69 */
+
#endif /* __HVX__ */
#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/ia32intrin.h b/contrib/llvm-project/clang/lib/Headers/ia32intrin.h
index 00138effd505..1b979770e196 100644
--- a/contrib/llvm-project/clang/lib/Headers/ia32intrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/ia32intrin.h
@@ -16,7 +16,7 @@
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
-#define __DEFAULT_FN_ATTRS_SSE42 __attribute__((__always_inline__, __nodebug__, __target__("sse4.2")))
+#define __DEFAULT_FN_ATTRS_CRC32 __attribute__((__always_inline__, __nodebug__, __target__("crc32")))
#if defined(__cplusplus) && (__cplusplus >= 201103L)
#define __DEFAULT_FN_ATTRS_CAST __attribute__((__always_inline__)) constexpr
@@ -26,167 +26,271 @@
#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS
#endif
-/** Find the first set bit starting from the lsb. Result is undefined if
- * input is 0.
- *
- * \headerfile <x86intrin.h>
- *
- * This intrinsic corresponds to the <c> BSF </c> instruction or the
- * <c> TZCNT </c> instruction.
- *
- * \param __A
- * A 32-bit integer operand.
- * \returns A 32-bit integer containing the bit number.
- */
+/// Find the first set bit starting from the lsb. Result is undefined if
+/// input is 0.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c BSF instruction or the
+/// \c TZCNT instruction.
+///
+/// \param __A
+/// A 32-bit integer operand.
+/// \returns A 32-bit integer containing the bit number.
+/// \see _bit_scan_forward
static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
__bsfd(int __A) {
- return __builtin_ctz(__A);
-}
-
-/** Find the first set bit starting from the msb. Result is undefined if
- * input is 0.
- *
- * \headerfile <x86intrin.h>
- *
- * This intrinsic corresponds to the <c> BSR </c> instruction or the
- * <c> LZCNT </c> instruction and an <c> XOR </c>.
- *
- * \param __A
- * A 32-bit integer operand.
- * \returns A 32-bit integer containing the bit number.
- */
+ return __builtin_ctz((unsigned int)__A);
+}
+
+/// Find the first set bit starting from the msb. Result is undefined if
+/// input is 0.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c BSR instruction or the
+/// \c LZCNT instruction and an \c XOR.
+///
+/// \param __A
+/// A 32-bit integer operand.
+/// \returns A 32-bit integer containing the bit number.
+/// \see _bit_scan_reverse
static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
__bsrd(int __A) {
- return 31 - __builtin_clz(__A);
-}
-
-/** Swaps the bytes in the input. Converting little endian to big endian or
- * vice versa.
- *
- * \headerfile <x86intrin.h>
- *
- * This intrinsic corresponds to the <c> BSWAP </c> instruction.
- *
- * \param __A
- * A 32-bit integer operand.
- * \returns A 32-bit integer containing the swapped bytes.
- */
+ return 31 - __builtin_clz((unsigned int)__A);
+}
+
+/// Swaps the bytes in the input, converting little endian to big endian or
+/// vice versa.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c BSWAP instruction.
+///
+/// \param __A
+/// A 32-bit integer operand.
+/// \returns A 32-bit integer containing the swapped bytes.
static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
__bswapd(int __A) {
- return __builtin_bswap32(__A);
-}
-
+ return (int)__builtin_bswap32((unsigned int)__A);
+}
+
+/// Swaps the bytes in the input, converting little endian to big endian or
+/// vice versa.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c BSWAP instruction.
+///
+/// \param __A
+/// A 32-bit integer operand.
+/// \returns A 32-bit integer containing the swapped bytes.
static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
_bswap(int __A) {
- return __builtin_bswap32(__A);
-}
-
+ return (int)__builtin_bswap32((unsigned int)__A);
+}
+
+/// Find the first set bit starting from the lsb. Result is undefined if
+/// input is 0.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// int _bit_scan_forward(int A);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c BSF instruction or the
+/// \c TZCNT instruction.
+///
+/// \param A
+/// A 32-bit integer operand.
+/// \returns A 32-bit integer containing the bit number.
+/// \see __bsfd
#define _bit_scan_forward(A) __bsfd((A))
+
+/// Find the first set bit starting from the msb. Result is undefined if
+/// input is 0.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// int _bit_scan_reverse(int A);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c BSR instruction or the
+/// \c LZCNT instruction and an \c XOR.
+///
+/// \param A
+/// A 32-bit integer operand.
+/// \returns A 32-bit integer containing the bit number.
+/// \see __bsrd
#define _bit_scan_reverse(A) __bsrd((A))
#ifdef __x86_64__
-/** Find the first set bit starting from the lsb. Result is undefined if
- * input is 0.
- *
- * \headerfile <x86intrin.h>
- *
- * This intrinsic corresponds to the <c> BSF </c> instruction or the
- * <c> TZCNT </c> instruction.
- *
- * \param __A
- * A 64-bit integer operand.
- * \returns A 32-bit integer containing the bit number.
- */
+/// Find the first set bit starting from the lsb. Result is undefined if
+/// input is 0.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c BSF instruction or the
+/// \c TZCNT instruction.
+///
+/// \param __A
+/// A 64-bit integer operand.
+/// \returns A 32-bit integer containing the bit number.
static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
__bsfq(long long __A) {
- return __builtin_ctzll(__A);
-}
-
-/** Find the first set bit starting from the msb. Result is undefined if
- * input is 0.
- *
- * \headerfile <x86intrin.h>
- *
- * This intrinsic corresponds to the <c> BSR </c> instruction or the
- * <c> LZCNT </c> instruction and an <c> XOR </c>.
- *
- * \param __A
- * A 64-bit integer operand.
- * \returns A 32-bit integer containing the bit number.
- */
+ return (long long)__builtin_ctzll((unsigned long long)__A);
+}
+
+/// Find the first set bit starting from the msb. Result is undefined if
+/// input is 0.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c BSR instruction or the
+/// \c LZCNT instruction and an \c XOR.
+///
+/// \param __A
+/// A 64-bit integer operand.
+/// \returns A 32-bit integer containing the bit number.
static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
__bsrq(long long __A) {
- return 63 - __builtin_clzll(__A);
-}
-
-/** Swaps the bytes in the input. Converting little endian to big endian or
- * vice versa.
- *
- * \headerfile <x86intrin.h>
- *
- * This intrinsic corresponds to the <c> BSWAP </c> instruction.
- *
- * \param __A
- * A 64-bit integer operand.
- * \returns A 64-bit integer containing the swapped bytes.
- */
+ return 63 - __builtin_clzll((unsigned long long)__A);
+}
+
+/// Swaps the bytes in the input. Converting little endian to big endian or
+/// vice versa.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c BSWAP instruction.
+///
+/// \param __A
+/// A 64-bit integer operand.
+/// \returns A 64-bit integer containing the swapped bytes.
+/// \see _bswap64
static __inline__ long long __DEFAULT_FN_ATTRS_CONSTEXPR
__bswapq(long long __A) {
- return __builtin_bswap64(__A);
-}
-
+ return (long long)__builtin_bswap64((unsigned long long)__A);
+}
+
+/// Swaps the bytes in the input. Converting little endian to big endian or
+/// vice versa.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// long long _bswap64(long long A);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c BSWAP instruction.
+///
+/// \param A
+/// A 64-bit integer operand.
+/// \returns A 64-bit integer containing the swapped bytes.
+/// \see __bswapq
#define _bswap64(A) __bswapq((A))
-#endif
+#endif /* __x86_64__ */
-/** Counts the number of bits in the source operand having a value of 1.
- *
- * \headerfile <x86intrin.h>
- *
- * This intrinsic corresponds to the <c> POPCNT </c> instruction or a
- * a sequence of arithmetic and logic ops to calculate it.
- *
- * \param __A
- * An unsigned 32-bit integer operand.
- * \returns A 32-bit integer containing the number of bits with value 1 in the
- * source operand.
- */
+/// Counts the number of bits in the source operand having a value of 1.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c POPCNT instruction or a
+/// a sequence of arithmetic and logic ops to calculate it.
+///
+/// \param __A
+/// An unsigned 32-bit integer operand.
+/// \returns A 32-bit integer containing the number of bits with value 1 in the
+/// source operand.
+/// \see _popcnt32
static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
__popcntd(unsigned int __A)
{
return __builtin_popcount(__A);
}
+/// Counts the number of bits in the source operand having a value of 1.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// int _popcnt32(int A);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c POPCNT instruction or a
+/// a sequence of arithmetic and logic ops to calculate it.
+///
+/// \param A
+/// An unsigned 32-bit integer operand.
+/// \returns A 32-bit integer containing the number of bits with value 1 in the
+/// source operand.
+/// \see __popcntd
#define _popcnt32(A) __popcntd((A))
#ifdef __x86_64__
-/** Counts the number of bits in the source operand having a value of 1.
- *
- * \headerfile <x86intrin.h>
- *
- * This intrinsic corresponds to the <c> POPCNT </c> instruction or a
- * a sequence of arithmetic and logic ops to calculate it.
- *
- * \param __A
- * An unsigned 64-bit integer operand.
- * \returns A 64-bit integer containing the number of bits with value 1 in the
- * source operand.
- */
+/// Counts the number of bits in the source operand having a value of 1.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c POPCNT instruction or a
+/// a sequence of arithmetic and logic ops to calculate it.
+///
+/// \param __A
+/// An unsigned 64-bit integer operand.
+/// \returns A 64-bit integer containing the number of bits with value 1 in the
+/// source operand.
+/// \see _popcnt64
static __inline__ long long __DEFAULT_FN_ATTRS_CONSTEXPR
__popcntq(unsigned long long __A)
{
return __builtin_popcountll(__A);
}
+/// Counts the number of bits in the source operand having a value of 1.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// long long _popcnt64(unsigned long long A);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c POPCNT instruction or a
+/// a sequence of arithmetic and logic ops to calculate it.
+///
+/// \param A
+/// An unsigned 64-bit integer operand.
+/// \returns A 64-bit integer containing the number of bits with value 1 in the
+/// source operand.
+/// \see __popcntq
#define _popcnt64(A) __popcntq((A))
#endif /* __x86_64__ */
#ifdef __x86_64__
+/// Returns the program status and control \c RFLAGS register with the \c VM
+/// and \c RF flags cleared.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PUSHFQ + \c POP instruction sequence.
+///
+/// \returns The 64-bit value of the RFLAGS register.
static __inline__ unsigned long long __DEFAULT_FN_ATTRS
__readeflags(void)
{
return __builtin_ia32_readeflags_u64();
}
+/// Writes the specified value to the program status and control \c RFLAGS
+/// register. Reserved bits are not affected.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PUSH + \c POPFQ instruction sequence.
+///
+/// \param __f
+/// The 64-bit value to write to \c RFLAGS.
static __inline__ void __DEFAULT_FN_ATTRS
__writeeflags(unsigned long long __f)
{
@@ -194,12 +298,29 @@ __writeeflags(unsigned long long __f)
}
#else /* !__x86_64__ */
+/// Returns the program status and control \c EFLAGS register with the \c VM
+/// and \c RF flags cleared.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PUSHFD + \c POP instruction sequence.
+///
+/// \returns The 32-bit value of the EFLAGS register.
static __inline__ unsigned int __DEFAULT_FN_ATTRS
__readeflags(void)
{
return __builtin_ia32_readeflags_u32();
}
+/// Writes the specified value to the program status and control \c EFLAGS
+/// register. Reserved bits are not affected.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PUSH + \c POPFD instruction sequence.
+///
+/// \param __f
+/// The 32-bit value to write to \c EFLAGS.
static __inline__ void __DEFAULT_FN_ATTRS
__writeeflags(unsigned int __f)
{
@@ -207,165 +328,209 @@ __writeeflags(unsigned int __f)
}
#endif /* !__x86_64__ */
-/** Cast a 32-bit float value to a 32-bit unsigned integer value
- *
- * \headerfile <x86intrin.h>
- * This intrinsic corresponds to the <c> VMOVD / MOVD </c> instruction in x86_64,
- * and corresponds to the <c> VMOVL / MOVL </c> instruction in ia32.
- *
- * \param __A
- * A 32-bit float value.
- * \returns a 32-bit unsigned integer containing the converted value.
- */
+/// Cast a 32-bit float value to a 32-bit unsigned integer value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVD / \c MOVD instruction in x86_64,
+/// and corresponds to the \c VMOVL / \c MOVL instruction in ia32.
+///
+/// \param __A
+/// A 32-bit float value.
+/// \returns a 32-bit unsigned integer containing the converted value.
static __inline__ unsigned int __DEFAULT_FN_ATTRS_CAST
_castf32_u32(float __A) {
return __builtin_bit_cast(unsigned int, __A);
}
-/** Cast a 64-bit float value to a 64-bit unsigned integer value
- *
- * \headerfile <x86intrin.h>
- * This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction in x86_64,
- * and corresponds to the <c> VMOVL / MOVL </c> instruction in ia32.
- *
- * \param __A
- * A 64-bit float value.
- * \returns a 64-bit unsigned integer containing the converted value.
- */
+/// Cast a 64-bit float value to a 64-bit unsigned integer value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVQ / \c MOVQ instruction in x86_64,
+/// and corresponds to the \c VMOVL / \c MOVL instruction in ia32.
+///
+/// \param __A
+/// A 64-bit float value.
+/// \returns a 64-bit unsigned integer containing the converted value.
static __inline__ unsigned long long __DEFAULT_FN_ATTRS_CAST
_castf64_u64(double __A) {
return __builtin_bit_cast(unsigned long long, __A);
}
-/** Cast a 32-bit unsigned integer value to a 32-bit float value
- *
- * \headerfile <x86intrin.h>
- * This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction in x86_64,
- * and corresponds to the <c> FLDS </c> instruction in ia32.
- *
- * \param __A
- * A 32-bit unsigned integer value.
- * \returns a 32-bit float value containing the converted value.
- */
+/// Cast a 32-bit unsigned integer value to a 32-bit float value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVQ / \c MOVQ instruction in x86_64,
+/// and corresponds to the \c FLDS instruction in ia32.
+///
+/// \param __A
+/// A 32-bit unsigned integer value.
+/// \returns a 32-bit float value containing the converted value.
static __inline__ float __DEFAULT_FN_ATTRS_CAST
_castu32_f32(unsigned int __A) {
return __builtin_bit_cast(float, __A);
}
-/** Cast a 64-bit unsigned integer value to a 64-bit float value
- *
- * \headerfile <x86intrin.h>
- * This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction in x86_64,
- * and corresponds to the <c> FLDL </c> instruction in ia32.
- *
- * \param __A
- * A 64-bit unsigned integer value.
- * \returns a 64-bit float value containing the converted value.
- */
+/// Cast a 64-bit unsigned integer value to a 64-bit float value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c VMOVQ / \c MOVQ instruction in x86_64,
+/// and corresponds to the \c FLDL instruction in ia32.
+///
+/// \param __A
+/// A 64-bit unsigned integer value.
+/// \returns a 64-bit float value containing the converted value.
static __inline__ double __DEFAULT_FN_ATTRS_CAST
_castu64_f64(unsigned long long __A) {
return __builtin_bit_cast(double, __A);
}
-/** Adds the unsigned integer operand to the CRC-32C checksum of the
- * unsigned char operand.
- *
- * \headerfile <x86intrin.h>
- *
- * This intrinsic corresponds to the <c> CRC32B </c> instruction.
- *
- * \param __C
- * An unsigned integer operand to add to the CRC-32C checksum of operand
- * \a __D.
- * \param __D
- * An unsigned 8-bit integer operand used to compute the CRC-32C checksum.
- * \returns The result of adding operand \a __C to the CRC-32C checksum of
- * operand \a __D.
- */
-static __inline__ unsigned int __DEFAULT_FN_ATTRS_SSE42
+/// Adds the unsigned integer operand to the CRC-32C checksum of the
+/// unsigned char operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c CRC32B instruction.
+///
+/// \param __C
+/// An unsigned integer operand to add to the CRC-32C checksum of operand
+/// \a __D.
+/// \param __D
+/// An unsigned 8-bit integer operand used to compute the CRC-32C checksum.
+/// \returns The result of adding operand \a __C to the CRC-32C checksum of
+/// operand \a __D.
+static __inline__ unsigned int __DEFAULT_FN_ATTRS_CRC32
__crc32b(unsigned int __C, unsigned char __D)
{
return __builtin_ia32_crc32qi(__C, __D);
}
-/** Adds the unsigned integer operand to the CRC-32C checksum of the
- * unsigned short operand.
- *
- * \headerfile <x86intrin.h>
- *
- * This intrinsic corresponds to the <c> CRC32W </c> instruction.
- *
- * \param __C
- * An unsigned integer operand to add to the CRC-32C checksum of operand
- * \a __D.
- * \param __D
- * An unsigned 16-bit integer operand used to compute the CRC-32C checksum.
- * \returns The result of adding operand \a __C to the CRC-32C checksum of
- * operand \a __D.
- */
-static __inline__ unsigned int __DEFAULT_FN_ATTRS_SSE42
+/// Adds the unsigned integer operand to the CRC-32C checksum of the
+/// unsigned short operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c CRC32W instruction.
+///
+/// \param __C
+/// An unsigned integer operand to add to the CRC-32C checksum of operand
+/// \a __D.
+/// \param __D
+/// An unsigned 16-bit integer operand used to compute the CRC-32C checksum.
+/// \returns The result of adding operand \a __C to the CRC-32C checksum of
+/// operand \a __D.
+static __inline__ unsigned int __DEFAULT_FN_ATTRS_CRC32
__crc32w(unsigned int __C, unsigned short __D)
{
return __builtin_ia32_crc32hi(__C, __D);
}
-/** Adds the unsigned integer operand to the CRC-32C checksum of the
- * second unsigned integer operand.
- *
- * \headerfile <x86intrin.h>
- *
- * This intrinsic corresponds to the <c> CRC32D </c> instruction.
- *
- * \param __C
- * An unsigned integer operand to add to the CRC-32C checksum of operand
- * \a __D.
- * \param __D
- * An unsigned 32-bit integer operand used to compute the CRC-32C checksum.
- * \returns The result of adding operand \a __C to the CRC-32C checksum of
- * operand \a __D.
- */
-static __inline__ unsigned int __DEFAULT_FN_ATTRS_SSE42
+/// Adds the unsigned integer operand to the CRC-32C checksum of the
+/// second unsigned integer operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c CRC32D instruction.
+///
+/// \param __C
+/// An unsigned integer operand to add to the CRC-32C checksum of operand
+/// \a __D.
+/// \param __D
+/// An unsigned 32-bit integer operand used to compute the CRC-32C checksum.
+/// \returns The result of adding operand \a __C to the CRC-32C checksum of
+/// operand \a __D.
+static __inline__ unsigned int __DEFAULT_FN_ATTRS_CRC32
__crc32d(unsigned int __C, unsigned int __D)
{
return __builtin_ia32_crc32si(__C, __D);
}
#ifdef __x86_64__
-/** Adds the unsigned integer operand to the CRC-32C checksum of the
- * unsigned 64-bit integer operand.
- *
- * \headerfile <x86intrin.h>
- *
- * This intrinsic corresponds to the <c> CRC32Q </c> instruction.
- *
- * \param __C
- * An unsigned integer operand to add to the CRC-32C checksum of operand
- * \a __D.
- * \param __D
- * An unsigned 64-bit integer operand used to compute the CRC-32C checksum.
- * \returns The result of adding operand \a __C to the CRC-32C checksum of
- * operand \a __D.
- */
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS_SSE42
+/// Adds the unsigned integer operand to the CRC-32C checksum of the
+/// unsigned 64-bit integer operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c CRC32Q instruction.
+///
+/// \param __C
+/// An unsigned integer operand to add to the CRC-32C checksum of operand
+/// \a __D.
+/// \param __D
+/// An unsigned 64-bit integer operand used to compute the CRC-32C checksum.
+/// \returns The result of adding operand \a __C to the CRC-32C checksum of
+/// operand \a __D.
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS_CRC32
__crc32q(unsigned long long __C, unsigned long long __D)
{
return __builtin_ia32_crc32di(__C, __D);
}
#endif /* __x86_64__ */
+/// Reads the specified performance monitoring counter. Refer to your
+/// processor's documentation to determine which performance counters are
+/// supported.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c RDPMC instruction.
+///
+/// \param __A
+/// The performance counter to read.
+/// \returns The 64-bit value read from the performance counter.
+/// \see _rdpmc
static __inline__ unsigned long long __DEFAULT_FN_ATTRS
__rdpmc(int __A) {
return __builtin_ia32_rdpmc(__A);
}
-/* __rdtscp */
+/// Reads the processor's time stamp counter and the \c IA32_TSC_AUX MSR
+/// \c (0xc0000103).
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c RDTSCP instruction.
+///
+/// \param __A
+/// Address of where to store the 32-bit \c IA32_TSC_AUX value.
+/// \returns The 64-bit value of the time stamp counter.
static __inline__ unsigned long long __DEFAULT_FN_ATTRS
__rdtscp(unsigned int *__A) {
return __builtin_ia32_rdtscp(__A);
}
+/// Reads the processor's time stamp counter.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// unsigned long long _rdtsc();
+/// \endcode
+///
+/// This intrinsic corresponds to the \c RDTSC instruction.
+///
+/// \returns The 64-bit value of the time stamp counter.
#define _rdtsc() __rdtsc()
+/// Reads the specified performance monitoring counter. Refer to your
+/// processor's documentation to determine which performance counters are
+/// supported.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// unsigned long long _rdpmc(int A);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c RDPMC instruction.
+///
+/// \param A
+/// The performance counter to read.
+/// \returns The 64-bit value read from the performance counter.
+/// \see __rdpmc
#define _rdpmc(A) __rdpmc(A)
static __inline__ void __DEFAULT_FN_ATTRS
@@ -373,45 +538,153 @@ _wbinvd(void) {
__builtin_ia32_wbinvd();
}
+/// Rotates an 8-bit value to the left by the specified number of bits.
+/// This operation is undefined if the number of bits exceeds the size of
+/// the value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c ROL instruction.
+///
+/// \param __X
+/// The unsigned 8-bit value to be rotated.
+/// \param __C
+/// The number of bits to rotate the value.
+/// \returns The rotated value.
static __inline__ unsigned char __DEFAULT_FN_ATTRS_CONSTEXPR
__rolb(unsigned char __X, int __C) {
return __builtin_rotateleft8(__X, __C);
}
+/// Rotates an 8-bit value to the right by the specified number of bits.
+/// This operation is undefined if the number of bits exceeds the size of
+/// the value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c ROR instruction.
+///
+/// \param __X
+/// The unsigned 8-bit value to be rotated.
+/// \param __C
+/// The number of bits to rotate the value.
+/// \returns The rotated value.
static __inline__ unsigned char __DEFAULT_FN_ATTRS_CONSTEXPR
__rorb(unsigned char __X, int __C) {
return __builtin_rotateright8(__X, __C);
}
+/// Rotates a 16-bit value to the left by the specified number of bits.
+/// This operation is undefined if the number of bits exceeds the size of
+/// the value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c ROL instruction.
+///
+/// \param __X
+/// The unsigned 16-bit value to be rotated.
+/// \param __C
+/// The number of bits to rotate the value.
+/// \returns The rotated value.
+/// \see _rotwl
static __inline__ unsigned short __DEFAULT_FN_ATTRS_CONSTEXPR
__rolw(unsigned short __X, int __C) {
return __builtin_rotateleft16(__X, __C);
}
+/// Rotates a 16-bit value to the right by the specified number of bits.
+/// This operation is undefined if the number of bits exceeds the size of
+/// the value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c ROR instruction.
+///
+/// \param __X
+/// The unsigned 16-bit value to be rotated.
+/// \param __C
+/// The number of bits to rotate the value.
+/// \returns The rotated value.
+/// \see _rotwr
static __inline__ unsigned short __DEFAULT_FN_ATTRS_CONSTEXPR
__rorw(unsigned short __X, int __C) {
return __builtin_rotateright16(__X, __C);
}
+/// Rotates a 32-bit value to the left by the specified number of bits.
+/// This operation is undefined if the number of bits exceeds the size of
+/// the value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c ROL instruction.
+///
+/// \param __X
+/// The unsigned 32-bit value to be rotated.
+/// \param __C
+/// The number of bits to rotate the value.
+/// \returns The rotated value.
+/// \see _rotl
static __inline__ unsigned int __DEFAULT_FN_ATTRS_CONSTEXPR
__rold(unsigned int __X, int __C) {
- return __builtin_rotateleft32(__X, __C);
-}
-
+ return __builtin_rotateleft32(__X, (unsigned int)__C);
+}
+
+/// Rotates a 32-bit value to the right by the specified number of bits.
+/// This operation is undefined if the number of bits exceeds the size of
+/// the value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c ROR instruction.
+///
+/// \param __X
+/// The unsigned 32-bit value to be rotated.
+/// \param __C
+/// The number of bits to rotate the value.
+/// \returns The rotated value.
+/// \see _rotr
static __inline__ unsigned int __DEFAULT_FN_ATTRS_CONSTEXPR
__rord(unsigned int __X, int __C) {
- return __builtin_rotateright32(__X, __C);
+ return __builtin_rotateright32(__X, (unsigned int)__C);
}
#ifdef __x86_64__
+/// Rotates a 64-bit value to the left by the specified number of bits.
+/// This operation is undefined if the number of bits exceeds the size of
+/// the value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c ROL instruction.
+///
+/// \param __X
+/// The unsigned 64-bit value to be rotated.
+/// \param __C
+/// The number of bits to rotate the value.
+/// \returns The rotated value.
static __inline__ unsigned long long __DEFAULT_FN_ATTRS_CONSTEXPR
__rolq(unsigned long long __X, int __C) {
- return __builtin_rotateleft64(__X, __C);
-}
-
+ return __builtin_rotateleft64(__X, (unsigned long long)__C);
+}
+
+/// Rotates a 64-bit value to the right by the specified number of bits.
+/// This operation is undefined if the number of bits exceeds the size of
+/// the value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c ROR instruction.
+///
+/// \param __X
+/// The unsigned 64-bit value to be rotated.
+/// \param __C
+/// The number of bits to rotate the value.
+/// \returns The rotated value.
static __inline__ unsigned long long __DEFAULT_FN_ATTRS_CONSTEXPR
__rorq(unsigned long long __X, int __C) {
- return __builtin_rotateright64(__X, __C);
+ return __builtin_rotateright64(__X, (unsigned long long)__C);
}
#endif /* __x86_64__ */
@@ -419,23 +692,172 @@ __rorq(unsigned long long __X, int __C) {
/* These are already provided as builtins for MSVC. */
/* Select the correct function based on the size of long. */
#ifdef __LP64__
+/// Rotates a 64-bit value to the left by the specified number of bits.
+/// This operation is undefined if the number of bits exceeds the size of
+/// the value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// unsigned long long _lrotl(unsigned long long a, int b);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c ROL instruction.
+///
+/// \param a
+/// The unsigned 64-bit value to be rotated.
+/// \param b
+/// The number of bits to rotate the value.
+/// \returns The rotated value.
+/// \see __rolq
#define _lrotl(a,b) __rolq((a), (b))
+
+/// Rotates a 64-bit value to the right by the specified number of bits.
+/// This operation is undefined if the number of bits exceeds the size of
+/// the value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// unsigned long long _lrotr(unsigned long long a, int b);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c ROR instruction.
+///
+/// \param a
+/// The unsigned 64-bit value to be rotated.
+/// \param b
+/// The number of bits to rotate the value.
+/// \returns The rotated value.
+/// \see __rorq
#define _lrotr(a,b) __rorq((a), (b))
-#else
+#else // __LP64__
+/// Rotates a 32-bit value to the left by the specified number of bits.
+/// This operation is undefined if the number of bits exceeds the size of
+/// the value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// unsigned int _lrotl(unsigned int a, int b);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c ROL instruction.
+///
+/// \param a
+/// The unsigned 32-bit value to be rotated.
+/// \param b
+/// The number of bits to rotate the value.
+/// \returns The rotated value.
+/// \see __rold
#define _lrotl(a,b) __rold((a), (b))
+
+/// Rotates a 32-bit value to the right by the specified number of bits.
+/// This operation is undefined if the number of bits exceeds the size of
+/// the value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// unsigned int _lrotr(unsigned int a, int b);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c ROR instruction.
+///
+/// \param a
+/// The unsigned 32-bit value to be rotated.
+/// \param b
+/// The number of bits to rotate the value.
+/// \returns The rotated value.
+/// \see __rord
#define _lrotr(a,b) __rord((a), (b))
-#endif
+#endif // __LP64__
+
+/// Rotates a 32-bit value to the left by the specified number of bits.
+/// This operation is undefined if the number of bits exceeds the size of
+/// the value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// unsigned int _rotl(unsigned int a, int b);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c ROL instruction.
+///
+/// \param a
+/// The unsigned 32-bit value to be rotated.
+/// \param b
+/// The number of bits to rotate the value.
+/// \returns The rotated value.
+/// \see __rold
#define _rotl(a,b) __rold((a), (b))
+
+/// Rotates a 32-bit value to the right by the specified number of bits.
+/// This operation is undefined if the number of bits exceeds the size of
+/// the value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// unsigned int _rotr(unsigned int a, int b);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c ROR instruction.
+///
+/// \param a
+/// The unsigned 32-bit value to be rotated.
+/// \param b
+/// The number of bits to rotate the value.
+/// \returns The rotated value.
+/// \see __rord
#define _rotr(a,b) __rord((a), (b))
#endif // _MSC_VER
/* These are not builtins so need to be provided in all modes. */
+/// Rotates a 16-bit value to the left by the specified number of bits.
+/// This operation is undefined if the number of bits exceeds the size of
+/// the value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// unsigned short _rotwl(unsigned short a, int b);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c ROL instruction.
+///
+/// \param a
+/// The unsigned 16-bit value to be rotated.
+/// \param b
+/// The number of bits to rotate the value.
+/// \returns The rotated value.
+/// \see __rolw
#define _rotwl(a,b) __rolw((a), (b))
+
+/// Rotates a 16-bit value to the right by the specified number of bits.
+/// This operation is undefined if the number of bits exceeds the size of
+/// the value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// unsigned short _rotwr(unsigned short a, int b);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c ROR instruction.
+///
+/// \param a
+/// The unsigned 16-bit value to be rotated.
+/// \param b
+/// The number of bits to rotate the value.
+/// \returns The rotated value.
+/// \see __rorw
#define _rotwr(a,b) __rorw((a), (b))
#undef __DEFAULT_FN_ATTRS
#undef __DEFAULT_FN_ATTRS_CAST
-#undef __DEFAULT_FN_ATTRS_SSE42
+#undef __DEFAULT_FN_ATTRS_CRC32
#undef __DEFAULT_FN_ATTRS_CONSTEXPR
#endif /* __IA32INTRIN_H */
diff --git a/contrib/llvm-project/clang/lib/Headers/immintrin.h b/contrib/llvm-project/clang/lib/Headers/immintrin.h
index 56d3dadf6a33..27800f7a8202 100644
--- a/contrib/llvm-project/clang/lib/Headers/immintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/immintrin.h
@@ -10,6 +10,10 @@
#ifndef __IMMINTRIN_H
#define __IMMINTRIN_H
+#if !defined(__i386__) && !defined(__x86_64__)
+#error "This header is only meant to be used on x86 and x64 architecture"
+#endif
+
#include <x86gprintrin.h>
#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
@@ -186,6 +190,11 @@
#endif
#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__AVXIFMA__)
+#include <avxifmaintrin.h>
+#endif
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
defined(__AVX512VBMI__)
#include <avx512vbmiintrin.h>
#endif
@@ -211,6 +220,16 @@
#endif
#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__AVX512FP16__)
+#include <avx512fp16intrin.h>
+#endif
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ (defined(__AVX512VL__) && defined(__AVX512FP16__))
+#include <avx512vlfp16intrin.h>
+#endif
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
defined(__AVX512BF16__)
#include <avx512bf16intrin.h>
#endif
@@ -241,12 +260,44 @@
#endif
#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__AVXVNNIINT8__)
+#include <avxvnniint8intrin.h>
+#endif
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__AVXNECONVERT__)
+#include <avxneconvertintrin.h>
+#endif
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__SHA512__)
+#include <sha512intrin.h>
+#endif
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__SM3__)
+#include <sm3intrin.h>
+#endif
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__SM4__)
+#include <sm4intrin.h>
+#endif
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__AVXVNNIINT16__)
+#include <avxvnniint16intrin.h>
+#endif
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
defined(__RDPID__)
-/// Returns the value of the IA32_TSC_AUX MSR (0xc0000103).
+/// Reads the value of the IA32_TSC_AUX MSR (0xc0000103).
///
/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the <c> RDPID </c> instruction.
+///
+/// \returns The 32-bit contents of the MSR.
static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("rdpid")))
_rdpid_u32(void) {
return __builtin_ia32_rdpid();
@@ -255,72 +306,172 @@ _rdpid_u32(void) {
#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
defined(__RDRND__)
+/// Returns a 16-bit hardware-generated random value.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the <c> RDRAND </c> instruction.
+///
+/// \param __p
+/// A pointer to a 16-bit memory location to place the random value.
+/// \returns 1 if the value was successfully generated, 0 otherwise.
static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
_rdrand16_step(unsigned short *__p)
{
- return __builtin_ia32_rdrand16_step(__p);
+ return (int)__builtin_ia32_rdrand16_step(__p);
}
+/// Returns a 32-bit hardware-generated random value.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the <c> RDRAND </c> instruction.
+///
+/// \param __p
+/// A pointer to a 32-bit memory location to place the random value.
+/// \returns 1 if the value was successfully generated, 0 otherwise.
static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
_rdrand32_step(unsigned int *__p)
{
- return __builtin_ia32_rdrand32_step(__p);
+ return (int)__builtin_ia32_rdrand32_step(__p);
}
-#ifdef __x86_64__
+/// Returns a 64-bit hardware-generated random value.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the <c> RDRAND </c> instruction.
+///
+/// \param __p
+/// A pointer to a 64-bit memory location to place the random value.
+/// \returns 1 if the value was successfully generated, 0 otherwise.
static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
_rdrand64_step(unsigned long long *__p)
{
- return __builtin_ia32_rdrand64_step(__p);
-}
+#ifdef __x86_64__
+ return (int)__builtin_ia32_rdrand64_step(__p);
+#else
+ // We need to emulate the functionality of 64-bit rdrand with 2 32-bit
+ // rdrand instructions.
+ unsigned int __lo, __hi;
+ unsigned int __res_lo = __builtin_ia32_rdrand32_step(&__lo);
+ unsigned int __res_hi = __builtin_ia32_rdrand32_step(&__hi);
+ if (__res_lo && __res_hi) {
+ *__p = ((unsigned long long)__hi << 32) | (unsigned long long)__lo;
+ return 1;
+ } else {
+ *__p = 0;
+ return 0;
+ }
#endif
+}
#endif /* __RDRND__ */
#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
defined(__FSGSBASE__)
#ifdef __x86_64__
+/// Reads the FS base register.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the <c> RDFSBASE </c> instruction.
+///
+/// \returns The lower 32 bits of the FS base register.
static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
_readfsbase_u32(void)
{
return __builtin_ia32_rdfsbase32();
}
+/// Reads the FS base register.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the <c> RDFSBASE </c> instruction.
+///
+/// \returns The contents of the FS base register.
static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
_readfsbase_u64(void)
{
return __builtin_ia32_rdfsbase64();
}
+/// Reads the GS base register.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the <c> RDGSBASE </c> instruction.
+///
+/// \returns The lower 32 bits of the GS base register.
static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
_readgsbase_u32(void)
{
return __builtin_ia32_rdgsbase32();
}
+/// Reads the GS base register.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the <c> RDGSBASE </c> instruction.
+///
+/// \returns The contents of the GS base register.
static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
_readgsbase_u64(void)
{
return __builtin_ia32_rdgsbase64();
}
+/// Modifies the FS base register.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the <c> WRFSBASE </c> instruction.
+///
+/// \param __V
+/// Value to use for the lower 32 bits of the FS base register.
static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
_writefsbase_u32(unsigned int __V)
{
__builtin_ia32_wrfsbase32(__V);
}
+/// Modifies the FS base register.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the <c> WRFSBASE </c> instruction.
+///
+/// \param __V
+/// Value to use for the FS base register.
static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
_writefsbase_u64(unsigned long long __V)
{
__builtin_ia32_wrfsbase64(__V);
}
+/// Modifies the GS base register.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the <c> WRGSBASE </c> instruction.
+///
+/// \param __V
+/// Value to use for the lower 32 bits of the GS base register.
static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
_writegsbase_u32(unsigned int __V)
{
__builtin_ia32_wrgsbase32(__V);
}
+/// Modifies the GS base register.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the <c> WRFSBASE </c> instruction.
+///
+/// \param __V
+/// Value to use for GS base register.
static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
_writegsbase_u64(unsigned long long __V)
{
@@ -339,53 +490,110 @@ _writegsbase_u64(unsigned long long __V)
* field inside of it.
*/
+/// Load a 16-bit value from memory and swap its bytes.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the MOVBE instruction.
+///
+/// \param __P
+/// A pointer to the 16-bit value to load.
+/// \returns The byte-swapped value.
static __inline__ short __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
_loadbe_i16(void const * __P) {
struct __loadu_i16 {
- short __v;
+ unsigned short __v;
} __attribute__((__packed__, __may_alias__));
- return __builtin_bswap16(((const struct __loadu_i16*)__P)->__v);
+ return (short)__builtin_bswap16(((const struct __loadu_i16*)__P)->__v);
}
+/// Swap the bytes of a 16-bit value and store it to memory.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the MOVBE instruction.
+///
+/// \param __P
+/// A pointer to the memory for storing the swapped value.
+/// \param __D
+/// The 16-bit value to be byte-swapped.
static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
_storebe_i16(void * __P, short __D) {
struct __storeu_i16 {
- short __v;
+ unsigned short __v;
} __attribute__((__packed__, __may_alias__));
- ((struct __storeu_i16*)__P)->__v = __builtin_bswap16(__D);
+ ((struct __storeu_i16*)__P)->__v = __builtin_bswap16((unsigned short)__D);
}
+/// Load a 32-bit value from memory and swap its bytes.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the MOVBE instruction.
+///
+/// \param __P
+/// A pointer to the 32-bit value to load.
+/// \returns The byte-swapped value.
static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
_loadbe_i32(void const * __P) {
struct __loadu_i32 {
- int __v;
+ unsigned int __v;
} __attribute__((__packed__, __may_alias__));
- return __builtin_bswap32(((const struct __loadu_i32*)__P)->__v);
+ return (int)__builtin_bswap32(((const struct __loadu_i32*)__P)->__v);
}
+/// Swap the bytes of a 32-bit value and store it to memory.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the MOVBE instruction.
+///
+/// \param __P
+/// A pointer to the memory for storing the swapped value.
+/// \param __D
+/// The 32-bit value to be byte-swapped.
static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
_storebe_i32(void * __P, int __D) {
struct __storeu_i32 {
- int __v;
+ unsigned int __v;
} __attribute__((__packed__, __may_alias__));
- ((struct __storeu_i32*)__P)->__v = __builtin_bswap32(__D);
+ ((struct __storeu_i32*)__P)->__v = __builtin_bswap32((unsigned int)__D);
}
#ifdef __x86_64__
+/// Load a 64-bit value from memory and swap its bytes.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the MOVBE instruction.
+///
+/// \param __P
+/// A pointer to the 64-bit value to load.
+/// \returns The byte-swapped value.
static __inline__ long long __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
_loadbe_i64(void const * __P) {
struct __loadu_i64 {
- long long __v;
+ unsigned long long __v;
} __attribute__((__packed__, __may_alias__));
- return __builtin_bswap64(((const struct __loadu_i64*)__P)->__v);
+ return (long long)__builtin_bswap64(((const struct __loadu_i64*)__P)->__v);
}
+/// Swap the bytes of a 64-bit value and store it to memory.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the MOVBE instruction.
+///
+/// \param __P
+/// A pointer to the memory for storing the swapped value.
+/// \param __D
+/// The 64-bit value to be byte-swapped.
static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
_storebe_i64(void * __P, long long __D) {
struct __storeu_i64 {
- long long __v;
+ unsigned long long __v;
} __attribute__((__packed__, __may_alias__));
- ((struct __storeu_i64*)__P)->__v = __builtin_bswap64(__D);
+ ((struct __storeu_i64*)__P)->__v = __builtin_bswap64((unsigned long long)__D);
}
#endif
#endif /* __MOVBE */
@@ -429,9 +637,13 @@ _storebe_i64(void * __P, long long __D) {
#include <cetintrin.h>
#endif
-/* Some intrinsics inside adxintrin.h are available only on processors with ADX,
- * whereas others are also available at all times. */
+/* Intrinsics inside adcintrin.h are available at all times. */
+#include <adcintrin.h>
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__ADX__)
#include <adxintrin.h>
+#endif
#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
defined(__RDSEED__)
@@ -477,6 +689,10 @@ _storebe_i64(void * __P, long long __D) {
defined(__INVPCID__)
#include <invpcidintrin.h>
#endif
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__AMX_FP16__)
+#include <amxfp16intrin.h>
+#endif
#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
defined(__KL__) || defined(__WIDEKL__)
@@ -484,11 +700,16 @@ _storebe_i64(void * __P, long long __D) {
#endif
#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__AMXTILE__) || defined(__AMXINT8__) || defined(__AMXBF16__)
+ defined(__AMX_TILE__) || defined(__AMX_INT8__) || defined(__AMX_BF16__)
#include <amxintrin.h>
#endif
#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__AMX_COMPLEX__)
+#include <amxcomplexintrin.h>
+#endif
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
defined(__AVX512VP2INTERSECT__)
#include <avx512vp2intersectintrin.h>
#endif
@@ -525,13 +746,13 @@ extern "C" {
#if defined(__i386__) || defined(__x86_64__)
static __inline__ long __DEFAULT_FN_ATTRS
_InterlockedExchange_HLEAcquire(long volatile *_Target, long _Value) {
- __asm__ __volatile__(".byte 0xf2 ; lock ; xchg %0, %1"
+ __asm__ __volatile__(".byte 0xf2 ; lock ; xchg {%0, %1|%1, %0}"
: "+r" (_Value), "+m" (*_Target) :: "memory");
return _Value;
}
static __inline__ long __DEFAULT_FN_ATTRS
_InterlockedExchange_HLERelease(long volatile *_Target, long _Value) {
- __asm__ __volatile__(".byte 0xf3 ; lock ; xchg %0, %1"
+ __asm__ __volatile__(".byte 0xf3 ; lock ; xchg {%0, %1|%1, %0}"
: "+r" (_Value), "+m" (*_Target) :: "memory");
return _Value;
}
@@ -539,13 +760,13 @@ _InterlockedExchange_HLERelease(long volatile *_Target, long _Value) {
#if defined(__x86_64__)
static __inline__ __int64 __DEFAULT_FN_ATTRS
_InterlockedExchange64_HLEAcquire(__int64 volatile *_Target, __int64 _Value) {
- __asm__ __volatile__(".byte 0xf2 ; lock ; xchg %0, %1"
+ __asm__ __volatile__(".byte 0xf2 ; lock ; xchg {%0, %1|%1, %0}"
: "+r" (_Value), "+m" (*_Target) :: "memory");
return _Value;
}
static __inline__ __int64 __DEFAULT_FN_ATTRS
_InterlockedExchange64_HLERelease(__int64 volatile *_Target, __int64 _Value) {
- __asm__ __volatile__(".byte 0xf3 ; lock ; xchg %0, %1"
+ __asm__ __volatile__(".byte 0xf3 ; lock ; xchg {%0, %1|%1, %0}"
: "+r" (_Value), "+m" (*_Target) :: "memory");
return _Value;
}
@@ -557,7 +778,7 @@ _InterlockedExchange64_HLERelease(__int64 volatile *_Target, __int64 _Value) {
static __inline__ long __DEFAULT_FN_ATTRS
_InterlockedCompareExchange_HLEAcquire(long volatile *_Destination,
long _Exchange, long _Comparand) {
- __asm__ __volatile__(".byte 0xf2 ; lock ; cmpxchg %2, %1"
+ __asm__ __volatile__(".byte 0xf2 ; lock ; cmpxchg {%2, %1|%1, %2}"
: "+a" (_Comparand), "+m" (*_Destination)
: "r" (_Exchange) : "memory");
return _Comparand;
@@ -565,7 +786,7 @@ _InterlockedCompareExchange_HLEAcquire(long volatile *_Destination,
static __inline__ long __DEFAULT_FN_ATTRS
_InterlockedCompareExchange_HLERelease(long volatile *_Destination,
long _Exchange, long _Comparand) {
- __asm__ __volatile__(".byte 0xf3 ; lock ; cmpxchg %2, %1"
+ __asm__ __volatile__(".byte 0xf3 ; lock ; cmpxchg {%2, %1|%1, %2}"
: "+a" (_Comparand), "+m" (*_Destination)
: "r" (_Exchange) : "memory");
return _Comparand;
@@ -575,7 +796,7 @@ _InterlockedCompareExchange_HLERelease(long volatile *_Destination,
static __inline__ __int64 __DEFAULT_FN_ATTRS
_InterlockedCompareExchange64_HLEAcquire(__int64 volatile *_Destination,
__int64 _Exchange, __int64 _Comparand) {
- __asm__ __volatile__(".byte 0xf2 ; lock ; cmpxchg %2, %1"
+ __asm__ __volatile__(".byte 0xf2 ; lock ; cmpxchg {%2, %1|%1, %2}"
: "+a" (_Comparand), "+m" (*_Destination)
: "r" (_Exchange) : "memory");
return _Comparand;
@@ -583,7 +804,7 @@ _InterlockedCompareExchange64_HLEAcquire(__int64 volatile *_Destination,
static __inline__ __int64 __DEFAULT_FN_ATTRS
_InterlockedCompareExchange64_HLERelease(__int64 volatile *_Destination,
__int64 _Exchange, __int64 _Comparand) {
- __asm__ __volatile__(".byte 0xf3 ; lock ; cmpxchg %2, %1"
+ __asm__ __volatile__(".byte 0xf3 ; lock ; cmpxchg {%2, %1|%1, %2}"
: "+a" (_Comparand), "+m" (*_Destination)
: "r" (_Exchange) : "memory");
return _Comparand;
diff --git a/contrib/llvm-project/clang/lib/Headers/intrin.h b/contrib/llvm-project/clang/lib/Headers/intrin.h
index 34ec79d6acbc..9ebaea9fee94 100644
--- a/contrib/llvm-project/clang/lib/Headers/intrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/intrin.h
@@ -97,8 +97,9 @@ unsigned long __readcr8(void);
unsigned int __readdr(unsigned int);
#ifdef __i386__
unsigned char __readfsbyte(unsigned long);
-unsigned __int64 __readfsqword(unsigned long);
unsigned short __readfsword(unsigned long);
+unsigned long __readfsdword(unsigned long);
+unsigned __int64 __readfsqword(unsigned long);
#endif
unsigned __int64 __readmsr(unsigned long);
unsigned __int64 __readpmc(unsigned long);
@@ -149,10 +150,8 @@ long _InterlockedExchangeAdd_HLEAcquire(long volatile *, long);
long _InterlockedExchangeAdd_HLERelease(long volatile *, long);
__int64 _InterlockedExchangeAdd64_HLEAcquire(__int64 volatile *, __int64);
__int64 _InterlockedExchangeAdd64_HLERelease(__int64 volatile *, __int64);
-void __attribute__((__deprecated__(
- "use other intrinsics or C++11 atomics instead"))) _ReadBarrier(void);
-void __attribute__((__deprecated__(
- "use other intrinsics or C++11 atomics instead"))) _ReadWriteBarrier(void);
+void _ReadBarrier(void);
+void _ReadWriteBarrier(void);
unsigned int _rorx_u32(unsigned int, const unsigned int);
int _sarx_i32(int, unsigned int);
#if __STDC_HOSTED__
@@ -163,8 +162,7 @@ unsigned int _shrx_u32(unsigned int, unsigned int);
void _Store_HLERelease(long volatile *, long);
void _Store64_HLERelease(__int64 volatile *, __int64);
void _StorePointer_HLERelease(void *volatile *, void *);
-void __attribute__((__deprecated__(
- "use other intrinsics or C++11 atomics instead"))) _WriteBarrier(void);
+void _WriteBarrier(void);
unsigned __int32 xbegin(void);
void _xend(void);
@@ -457,7 +455,9 @@ static __inline__ void __DEFAULT_FN_ATTRS __movsb(unsigned char *__dst,
:
: "memory");
#else
- __asm__ __volatile__("xchg %%esi, %1\nrep movsb\nxchg %%esi, %1"
+ __asm__ __volatile__("xchg {%%esi, %1|%1, esi}\n"
+ "rep movsb\n"
+ "xchg {%%esi, %1|%1, esi}"
: "+D"(__dst), "+r"(__src), "+c"(__n)
:
: "memory");
@@ -467,12 +467,14 @@ static __inline__ void __DEFAULT_FN_ATTRS __movsd(unsigned long *__dst,
unsigned long const *__src,
size_t __n) {
#if defined(__x86_64__)
- __asm__ __volatile__("rep movsl"
+ __asm__ __volatile__("rep movs{l|d}"
: "+D"(__dst), "+S"(__src), "+c"(__n)
:
: "memory");
#else
- __asm__ __volatile__("xchg %%esi, %1\nrep movsl\nxchg %%esi, %1"
+ __asm__ __volatile__("xchg {%%esi, %1|%1, esi}\n"
+ "rep movs{l|d}\n"
+ "xchg {%%esi, %1|%1, esi}"
: "+D"(__dst), "+r"(__src), "+c"(__n)
:
: "memory");
@@ -487,7 +489,9 @@ static __inline__ void __DEFAULT_FN_ATTRS __movsw(unsigned short *__dst,
:
: "memory");
#else
- __asm__ __volatile__("xchg %%esi, %1\nrep movsw\nxchg %%esi, %1"
+ __asm__ __volatile__("xchg {%%esi, %1|%1, esi}\n"
+ "rep movsw\n"
+ "xchg {%%esi, %1|%1, esi}"
: "+D"(__dst), "+r"(__src), "+c"(__n)
:
: "memory");
@@ -496,7 +500,7 @@ static __inline__ void __DEFAULT_FN_ATTRS __movsw(unsigned short *__dst,
static __inline__ void __DEFAULT_FN_ATTRS __stosd(unsigned long *__dst,
unsigned long __x,
size_t __n) {
- __asm__ __volatile__("rep stosl"
+ __asm__ __volatile__("rep stos{l|d}"
: "+D"(__dst), "+c"(__n)
: "a"(__x)
: "memory");
@@ -530,27 +534,6 @@ static __inline__ void __DEFAULT_FN_ATTRS __stosq(unsigned __int64 *__dst,
|* Misc
\*----------------------------------------------------------------------------*/
#if defined(__i386__) || defined(__x86_64__)
-#if defined(__i386__)
-#define __cpuid_count(__leaf, __count, __eax, __ebx, __ecx, __edx) \
- __asm("cpuid" \
- : "=a"(__eax), "=b"(__ebx), "=c"(__ecx), "=d"(__edx) \
- : "0"(__leaf), "2"(__count))
-#else
-/* x86-64 uses %rbx as the base register, so preserve it. */
-#define __cpuid_count(__leaf, __count, __eax, __ebx, __ecx, __edx) \
- __asm("xchgq %%rbx,%q1\n" \
- "cpuid\n" \
- "xchgq %%rbx,%q1" \
- : "=a"(__eax), "=r"(__ebx), "=c"(__ecx), "=d"(__edx) \
- : "0"(__leaf), "2"(__count))
-#endif
-static __inline__ void __DEFAULT_FN_ATTRS __cpuid(int __info[4], int __level) {
- __cpuid_count(__level, 0, __info[0], __info[1], __info[2], __info[3]);
-}
-static __inline__ void __DEFAULT_FN_ATTRS __cpuidex(int __info[4], int __level,
- int __ecx) {
- __cpuid_count(__level, __ecx, __info[0], __info[1], __info[2], __info[3]);
-}
static __inline__ void __DEFAULT_FN_ATTRS __halt(void) {
__asm__ volatile("hlt");
}
@@ -577,6 +560,34 @@ unsigned __int64 __cdecl _byteswap_uint64(unsigned __int64 val);
__int64 __mulh(__int64 __a, __int64 __b);
unsigned __int64 __umulh(unsigned __int64 __a, unsigned __int64 __b);
+
+void __break(int);
+
+void __writex18byte(unsigned long offset, unsigned char data);
+void __writex18word(unsigned long offset, unsigned short data);
+void __writex18dword(unsigned long offset, unsigned long data);
+void __writex18qword(unsigned long offset, unsigned __int64 data);
+
+unsigned char __readx18byte(unsigned long offset);
+unsigned short __readx18word(unsigned long offset);
+unsigned long __readx18dword(unsigned long offset);
+unsigned __int64 __readx18qword(unsigned long offset);
+
+double _CopyDoubleFromInt64(__int64);
+float _CopyFloatFromInt32(__int32);
+__int32 _CopyInt32FromFloat(float);
+__int64 _CopyInt64FromDouble(double);
+
+unsigned int _CountLeadingOnes(unsigned long);
+unsigned int _CountLeadingOnes64(unsigned __int64);
+unsigned int _CountLeadingSigns(long);
+unsigned int _CountLeadingSigns64(__int64);
+unsigned int _CountLeadingZeros(unsigned long);
+unsigned int _CountLeadingZeros64(unsigned _int64);
+unsigned int _CountOneBits(unsigned long);
+unsigned int _CountOneBits64(unsigned __int64);
+
+void __cdecl __prefetch(void *);
#endif
/*----------------------------------------------------------------------------*\
@@ -600,13 +611,17 @@ __readmsr(unsigned long __register) {
static __inline__ unsigned __LPTRINT_TYPE__ __DEFAULT_FN_ATTRS __readcr3(void) {
unsigned __LPTRINT_TYPE__ __cr3_val;
- __asm__ __volatile__ ("mov %%cr3, %0" : "=r"(__cr3_val) : : "memory");
+ __asm__ __volatile__(
+ "mov {%%cr3, %0|%0, cr3}"
+ : "=r"(__cr3_val)
+ :
+ : "memory");
return __cr3_val;
}
static __inline__ void __DEFAULT_FN_ATTRS
__writecr3(unsigned __INTPTR_TYPE__ __cr3_val) {
- __asm__ ("mov %0, %%cr3" : : "r"(__cr3_val) : "memory");
+ __asm__ ("mov {%0, %%cr3|cr3, %0}" : : "r"(__cr3_val) : "memory");
}
#ifdef __cplusplus
diff --git a/contrib/llvm-project/clang/lib/Headers/keylockerintrin.h b/contrib/llvm-project/clang/lib/Headers/keylockerintrin.h
index 68b0a5689618..1994ac42070a 100644
--- a/contrib/llvm-project/clang/lib/Headers/keylockerintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/keylockerintrin.h
@@ -46,7 +46,7 @@
///
/// This intrinsic corresponds to the <c> LOADIWKEY </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// IF CPL > 0 // LOADKWKEY only allowed at ring 0 (supervisor mode)
/// GP (0)
/// FI
@@ -91,7 +91,7 @@
/// AF := 0
/// PF := 0
/// CF := 0
-/// \endoperation
+/// \endcode
static __inline__ void __DEFAULT_FN_ATTRS
_mm_loadiwkey (unsigned int __ctl, __m128i __intkey,
__m128i __enkey_lo, __m128i __enkey_hi) {
@@ -99,14 +99,14 @@ _mm_loadiwkey (unsigned int __ctl, __m128i __intkey,
}
/// Wrap a 128-bit AES key from __key into a key handle and output in
-/// ((__m128i*)__h) to ((__m128i*)__h) + 5 and a 32-bit value as return.
+/// ((__m128i*)__h) to ((__m128i*)__h) + 2 and a 32-bit value as return.
/// The explicit source operand __htype specifies handle restrictions.
///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the <c> ENCODEKEY128 </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// InputKey[127:0] := __key[127:0]
/// KeyMetadata[2:0] := __htype[2:0]
/// KeyMetadata[23:3] := 0 // Reserved for future usage
@@ -120,23 +120,20 @@ _mm_loadiwkey (unsigned int __ctl, __m128i __intkey,
/// MEM[__h+127:__h] := Handle[127:0] // AAD
/// MEM[__h+255:__h+128] := Handle[255:128] // Integrity Tag
/// MEM[__h+383:__h+256] := Handle[383:256] // CipherText
-/// MEM[__h+511:__h+384] := 0 // Reserved for future usage
-/// MEM[__h+639:__h+512] := 0 // Reserved for future usage
-/// MEM[__h+767:__h+640] := 0 // Reserved for future usage
/// OF := 0
/// SF := 0
/// ZF := 0
/// AF := 0
/// PF := 0
/// CF := 0
-/// \endoperation
+/// \endcode
static __inline__ unsigned int __DEFAULT_FN_ATTRS
_mm_encodekey128_u32(unsigned int __htype, __m128i __key, void *__h) {
return __builtin_ia32_encodekey128_u32(__htype, (__v2di)__key, __h);
}
/// Wrap a 256-bit AES key from __key_hi:__key_lo into a key handle, then
-/// output handle in ((__m128i*)__h) to ((__m128i*)__h) + 6 and
+/// output handle in ((__m128i*)__h) to ((__m128i*)__h) + 3 and
/// a 32-bit value as return.
/// The explicit source operand __htype specifies handle restrictions.
///
@@ -144,7 +141,7 @@ _mm_encodekey128_u32(unsigned int __htype, __m128i __key, void *__h) {
///
/// This intrinsic corresponds to the <c> ENCODEKEY256 </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// InputKey[127:0] := __key_lo[127:0]
/// InputKey[255:128] := __key_hi[255:128]
/// KeyMetadata[2:0] := __htype[2:0]
@@ -160,16 +157,13 @@ _mm_encodekey128_u32(unsigned int __htype, __m128i __key, void *__h) {
/// MEM[__h+255:__h+128] := Handle[255:128] // Tag
/// MEM[__h+383:__h+256] := Handle[383:256] // CipherText[127:0]
/// MEM[__h+511:__h+384] := Handle[511:384] // CipherText[255:128]
-/// MEM[__h+639:__h+512] := 0 // Reserved for future usage
-/// MEM[__h+767:__h+640] := 0 // Reserved for future usage
-/// MEM[__h+895:__h+768] := 0 Integrity// Reserved for future usage
/// OF := 0
/// SF := 0
/// ZF := 0
/// AF := 0
/// PF := 0
/// CF := 0
-/// \endoperation
+/// \endcode
static __inline__ unsigned int __DEFAULT_FN_ATTRS
_mm_encodekey256_u32(unsigned int __htype, __m128i __key_lo, __m128i __key_hi,
void *__h) {
@@ -185,7 +179,7 @@ _mm_encodekey256_u32(unsigned int __htype, __m128i __key_lo, __m128i __key_hi,
///
/// This intrinsic corresponds to the <c> AESENC128KL </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// Handle[383:0] := MEM[__h+383:__h] // Load is not guaranteed to be atomic.
/// IllegalHandle := ( HandleReservedBitSet (Handle[383:0]) ||
/// (Handle[127:0] AND (CPL > 0)) ||
@@ -208,7 +202,7 @@ _mm_encodekey256_u32(unsigned int __htype, __m128i __key_lo, __m128i __key_hi,
/// AF := 0
/// PF := 0
/// CF := 0
-/// \endoperation
+/// \endcode
static __inline__ unsigned char __DEFAULT_FN_ATTRS
_mm_aesenc128kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
return __builtin_ia32_aesenc128kl_u8((__v2di *)__odata, (__v2di)__idata, __h);
@@ -222,7 +216,7 @@ _mm_aesenc128kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
///
/// This intrinsic corresponds to the <c> AESENC256KL </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// Handle[511:0] := MEM[__h+511:__h] // Load is not guaranteed to be atomic.
/// IllegalHandle := ( HandleReservedBitSet (Handle[511:0]) ||
/// (Handle[127:0] AND (CPL > 0)) ||
@@ -247,7 +241,7 @@ _mm_aesenc128kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
/// AF := 0
/// PF := 0
/// CF := 0
-/// \endoperation
+/// \endcode
static __inline__ unsigned char __DEFAULT_FN_ATTRS
_mm_aesenc256kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
return __builtin_ia32_aesenc256kl_u8((__v2di *)__odata, (__v2di)__idata, __h);
@@ -261,7 +255,7 @@ _mm_aesenc256kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
///
/// This intrinsic corresponds to the <c> AESDEC128KL </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// Handle[383:0] := MEM[__h+383:__h] // Load is not guaranteed to be atomic.
/// IllegalHandle := (HandleReservedBitSet (Handle[383:0]) ||
/// (Handle[127:0] AND (CPL > 0)) ||
@@ -286,7 +280,7 @@ _mm_aesenc256kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
/// AF := 0
/// PF := 0
/// CF := 0
-/// \endoperation
+/// \endcode
static __inline__ unsigned char __DEFAULT_FN_ATTRS
_mm_aesdec128kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
return __builtin_ia32_aesdec128kl_u8((__v2di *)__odata, (__v2di)__idata, __h);
@@ -300,7 +294,7 @@ _mm_aesdec128kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
///
/// This intrinsic corresponds to the <c> AESDEC256KL </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// Handle[511:0] := MEM[__h+511:__h]
/// IllegalHandle := (HandleReservedBitSet (Handle[511:0]) ||
/// (Handle[127:0] AND (CPL > 0)) ||
@@ -325,7 +319,7 @@ _mm_aesdec128kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
/// AF := 0
/// PF := 0
/// CF := 0
-/// \endoperation
+/// \endcode
static __inline__ unsigned char __DEFAULT_FN_ATTRS
_mm_aesdec256kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
return __builtin_ia32_aesdec256kl_u8((__v2di *)__odata, (__v2di)__idata, __h);
@@ -352,7 +346,7 @@ _mm_aesdec256kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
///
/// This intrinsic corresponds to the <c> AESENCWIDE128KL </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// Handle := MEM[__h+383:__h]
/// IllegalHandle := ( HandleReservedBitSet (Handle[383:0]) ||
/// (Handle[127:0] AND (CPL > 0)) ||
@@ -383,7 +377,7 @@ _mm_aesdec256kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
/// AF := 0
/// PF := 0
/// CF := 0
-/// \endoperation
+/// \endcode
static __inline__ unsigned char __DEFAULT_FN_ATTRS
_mm_aesencwide128kl_u8(__m128i __odata[8], const __m128i __idata[8], const void* __h) {
return __builtin_ia32_aesencwide128kl_u8((__v2di *)__odata,
@@ -398,7 +392,7 @@ _mm_aesencwide128kl_u8(__m128i __odata[8], const __m128i __idata[8], const void*
///
/// This intrinsic corresponds to the <c> AESENCWIDE256KL </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// Handle[511:0] := MEM[__h+511:__h]
/// IllegalHandle := ( HandleReservedBitSet (Handle[511:0]) ||
/// (Handle[127:0] AND (CPL > 0)) ||
@@ -429,7 +423,7 @@ _mm_aesencwide128kl_u8(__m128i __odata[8], const __m128i __idata[8], const void*
/// AF := 0
/// PF := 0
/// CF := 0
-/// \endoperation
+/// \endcode
static __inline__ unsigned char __DEFAULT_FN_ATTRS
_mm_aesencwide256kl_u8(__m128i __odata[8], const __m128i __idata[8], const void* __h) {
return __builtin_ia32_aesencwide256kl_u8((__v2di *)__odata,
@@ -444,7 +438,7 @@ _mm_aesencwide256kl_u8(__m128i __odata[8], const __m128i __idata[8], const void*
///
/// This intrinsic corresponds to the <c> AESDECWIDE128KL </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// Handle[383:0] := MEM[__h+383:__h]
/// IllegalHandle := ( HandleReservedBitSet (Handle[383:0]) ||
/// (Handle[127:0] AND (CPL > 0)) ||
@@ -475,7 +469,7 @@ _mm_aesencwide256kl_u8(__m128i __odata[8], const __m128i __idata[8], const void*
/// AF := 0
/// PF := 0
/// CF := 0
-/// \endoperation
+/// \endcode
static __inline__ unsigned char __DEFAULT_FN_ATTRS
_mm_aesdecwide128kl_u8(__m128i __odata[8], const __m128i __idata[8], const void* __h) {
return __builtin_ia32_aesdecwide128kl_u8((__v2di *)__odata,
@@ -490,7 +484,7 @@ _mm_aesdecwide128kl_u8(__m128i __odata[8], const __m128i __idata[8], const void*
///
/// This intrinsic corresponds to the <c> AESDECWIDE256KL </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// Handle[511:0] := MEM[__h+511:__h]
/// IllegalHandle = ( HandleReservedBitSet (Handle[511:0]) ||
/// (Handle[127:0] AND (CPL > 0)) ||
@@ -521,7 +515,7 @@ _mm_aesdecwide128kl_u8(__m128i __odata[8], const __m128i __idata[8], const void*
/// AF := 0
/// PF := 0
/// CF := 0
-/// \endoperation
+/// \endcode
static __inline__ unsigned char __DEFAULT_FN_ATTRS
_mm_aesdecwide256kl_u8(__m128i __odata[8], const __m128i __idata[8], const void* __h) {
return __builtin_ia32_aesdecwide256kl_u8((__v2di *)__odata,
diff --git a/contrib/llvm-project/clang/lib/Headers/larchintrin.h b/contrib/llvm-project/clang/lib/Headers/larchintrin.h
new file mode 100644
index 000000000000..f4218295919a
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/larchintrin.h
@@ -0,0 +1,246 @@
+/*===------------ larchintrin.h - LoongArch intrinsics ---------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef _LOONGARCH_BASE_INTRIN_H
+#define _LOONGARCH_BASE_INTRIN_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct rdtime {
+ unsigned int value;
+ unsigned int timeid;
+} __rdtime_t;
+
+#if __loongarch_grlen == 64
+typedef struct drdtime {
+ unsigned long dvalue;
+ unsigned long dtimeid;
+} __drdtime_t;
+
+extern __inline __drdtime_t
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __rdtime_d(void) {
+ __drdtime_t __drdtime;
+ __asm__ volatile(
+ "rdtime.d %[val], %[tid]\n\t"
+ : [val] "=&r"(__drdtime.dvalue), [tid] "=&r"(__drdtime.dtimeid));
+ return __drdtime;
+}
+#endif
+
+extern __inline __rdtime_t
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __rdtimeh_w(void) {
+ __rdtime_t __rdtime;
+ __asm__ volatile("rdtimeh.w %[val], %[tid]\n\t"
+ : [val] "=&r"(__rdtime.value), [tid] "=&r"(__rdtime.timeid));
+ return __rdtime;
+}
+
+extern __inline __rdtime_t
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __rdtimel_w(void) {
+ __rdtime_t __rdtime;
+ __asm__ volatile("rdtimel.w %[val], %[tid]\n\t"
+ : [val] "=&r"(__rdtime.value), [tid] "=&r"(__rdtime.timeid));
+ return __rdtime;
+}
+
+#if __loongarch_grlen == 64
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __crc_w_b_w(char _1, int _2) {
+ return (int)__builtin_loongarch_crc_w_b_w((char)_1, (int)_2);
+}
+
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __crc_w_h_w(short _1, int _2) {
+ return (int)__builtin_loongarch_crc_w_h_w((short)_1, (int)_2);
+}
+
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __crc_w_w_w(int _1, int _2) {
+ return (int)__builtin_loongarch_crc_w_w_w((int)_1, (int)_2);
+}
+
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __crc_w_d_w(long int _1, int _2) {
+ return (int)__builtin_loongarch_crc_w_d_w((long int)_1, (int)_2);
+}
+
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __crcc_w_b_w(char _1, int _2) {
+ return (int)__builtin_loongarch_crcc_w_b_w((char)_1, (int)_2);
+}
+
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __crcc_w_h_w(short _1, int _2) {
+ return (int)__builtin_loongarch_crcc_w_h_w((short)_1, (int)_2);
+}
+
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __crcc_w_w_w(int _1, int _2) {
+ return (int)__builtin_loongarch_crcc_w_w_w((int)_1, (int)_2);
+}
+
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __crcc_w_d_w(long int _1, int _2) {
+ return (int)__builtin_loongarch_crcc_w_d_w((long int)_1, (int)_2);
+}
+#endif
+
+#define __break(/*ui15*/ _1) __builtin_loongarch_break((_1))
+
+#if __loongarch_grlen == 32
+#define __cacop_w(/*uimm5*/ _1, /*unsigned int*/ _2, /*simm12*/ _3) \
+ ((void)__builtin_loongarch_cacop_w((_1), (unsigned int)(_2), (_3)))
+#endif
+
+#if __loongarch_grlen == 64
+#define __cacop_d(/*uimm5*/ _1, /*unsigned long int*/ _2, /*simm12*/ _3) \
+ ((void)__builtin_loongarch_cacop_d((_1), (unsigned long int)(_2), (_3)))
+#endif
+
+#define __dbar(/*ui15*/ _1) __builtin_loongarch_dbar((_1))
+
+#define __ibar(/*ui15*/ _1) __builtin_loongarch_ibar((_1))
+
+#define __movfcsr2gr(/*ui5*/ _1) __builtin_loongarch_movfcsr2gr((_1));
+
+#define __movgr2fcsr(/*ui5*/ _1, _2) \
+ __builtin_loongarch_movgr2fcsr((_1), (unsigned int)_2);
+
+#define __syscall(/*ui15*/ _1) __builtin_loongarch_syscall((_1))
+
+#define __csrrd_w(/*ui14*/ _1) ((unsigned int)__builtin_loongarch_csrrd_w((_1)))
+
+#define __csrwr_w(/*unsigned int*/ _1, /*ui14*/ _2) \
+ ((unsigned int)__builtin_loongarch_csrwr_w((unsigned int)(_1), (_2)))
+
+#define __csrxchg_w(/*unsigned int*/ _1, /*unsigned int*/ _2, /*ui14*/ _3) \
+ ((unsigned int)__builtin_loongarch_csrxchg_w((unsigned int)(_1), \
+ (unsigned int)(_2), (_3)))
+
+#if __loongarch_grlen == 64
+#define __csrrd_d(/*ui14*/ _1) \
+ ((unsigned long int)__builtin_loongarch_csrrd_d((_1)))
+
+#define __csrwr_d(/*unsigned long int*/ _1, /*ui14*/ _2) \
+ ((unsigned long int)__builtin_loongarch_csrwr_d((unsigned long int)(_1), \
+ (_2)))
+
+#define __csrxchg_d(/*unsigned long int*/ _1, /*unsigned long int*/ _2, \
+ /*ui14*/ _3) \
+ ((unsigned long int)__builtin_loongarch_csrxchg_d( \
+ (unsigned long int)(_1), (unsigned long int)(_2), (_3)))
+#endif
+
+extern __inline unsigned char
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __iocsrrd_b(unsigned int _1) {
+ return (unsigned char)__builtin_loongarch_iocsrrd_b((unsigned int)_1);
+}
+
+extern __inline unsigned short
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __iocsrrd_h(unsigned int _1) {
+ return (unsigned short)__builtin_loongarch_iocsrrd_h((unsigned int)_1);
+}
+
+extern __inline unsigned int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __iocsrrd_w(unsigned int _1) {
+ return (unsigned int)__builtin_loongarch_iocsrrd_w((unsigned int)_1);
+}
+
+#if __loongarch_grlen == 64
+extern __inline unsigned long int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __iocsrrd_d(unsigned int _1) {
+ return (unsigned long int)__builtin_loongarch_iocsrrd_d((unsigned int)_1);
+}
+#endif
+
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __iocsrwr_b(unsigned char _1, unsigned int _2) {
+ __builtin_loongarch_iocsrwr_b((unsigned char)_1, (unsigned int)_2);
+}
+
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __iocsrwr_h(unsigned short _1, unsigned int _2) {
+ __builtin_loongarch_iocsrwr_h((unsigned short)_1, (unsigned int)_2);
+}
+
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __iocsrwr_w(unsigned int _1, unsigned int _2) {
+ __builtin_loongarch_iocsrwr_w((unsigned int)_1, (unsigned int)_2);
+}
+
+extern __inline unsigned int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __cpucfg(unsigned int _1) {
+ return (unsigned int)__builtin_loongarch_cpucfg((unsigned int)_1);
+}
+
+#if __loongarch_grlen == 64
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __iocsrwr_d(unsigned long int _1, unsigned int _2) {
+ __builtin_loongarch_iocsrwr_d((unsigned long int)_1, (unsigned int)_2);
+}
+
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __asrtgt_d(long int _1, long int _2) {
+ __builtin_loongarch_asrtgt_d((long int)_1, (long int)_2);
+}
+
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __asrtle_d(long int _1, long int _2) {
+ __builtin_loongarch_asrtle_d((long int)_1, (long int)_2);
+}
+#endif
+
+#if __loongarch_grlen == 64
+#define __lddir_d(/*long int*/ _1, /*ui5*/ _2) \
+ ((long int)__builtin_loongarch_lddir_d((long int)(_1), (_2)))
+
+#define __ldpte_d(/*long int*/ _1, /*ui5*/ _2) \
+ ((void)__builtin_loongarch_ldpte_d((long int)(_1), (_2)))
+#endif
+
+#define __frecipe_s(/*float*/ _1) \
+ (float)__builtin_loongarch_frecipe_s((float)_1)
+
+#define __frecipe_d(/*double*/ _1) \
+ (double)__builtin_loongarch_frecipe_d((double)_1)
+
+#define __frsqrte_s(/*float*/ _1) \
+ (float)__builtin_loongarch_frsqrte_s((float)_1)
+
+#define __frsqrte_d(/*double*/ _1) \
+ (double)__builtin_loongarch_frsqrte_d((double)_1)
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _LOONGARCH_BASE_INTRIN_H */
diff --git a/contrib/llvm-project/clang/lib/Headers/lasxintrin.h b/contrib/llvm-project/clang/lib/Headers/lasxintrin.h
new file mode 100644
index 000000000000..dafc2a2f3e6a
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/lasxintrin.h
@@ -0,0 +1,3884 @@
+/*===------------ lasxintrin.h - LoongArch LASX intrinsics -----------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef _LOONGSON_ASXINTRIN_H
+#define _LOONGSON_ASXINTRIN_H 1
+
+#if defined(__loongarch_asx)
+
+typedef signed char v32i8 __attribute__((vector_size(32), aligned(32)));
+typedef signed char v32i8_b __attribute__((vector_size(32), aligned(1)));
+typedef unsigned char v32u8 __attribute__((vector_size(32), aligned(32)));
+typedef unsigned char v32u8_b __attribute__((vector_size(32), aligned(1)));
+typedef short v16i16 __attribute__((vector_size(32), aligned(32)));
+typedef short v16i16_h __attribute__((vector_size(32), aligned(2)));
+typedef unsigned short v16u16 __attribute__((vector_size(32), aligned(32)));
+typedef unsigned short v16u16_h __attribute__((vector_size(32), aligned(2)));
+typedef int v8i32 __attribute__((vector_size(32), aligned(32)));
+typedef int v8i32_w __attribute__((vector_size(32), aligned(4)));
+typedef unsigned int v8u32 __attribute__((vector_size(32), aligned(32)));
+typedef unsigned int v8u32_w __attribute__((vector_size(32), aligned(4)));
+typedef long long v4i64 __attribute__((vector_size(32), aligned(32)));
+typedef long long v4i64_d __attribute__((vector_size(32), aligned(8)));
+typedef unsigned long long v4u64 __attribute__((vector_size(32), aligned(32)));
+typedef unsigned long long v4u64_d __attribute__((vector_size(32), aligned(8)));
+typedef float v8f32 __attribute__((vector_size(32), aligned(32)));
+typedef float v8f32_w __attribute__((vector_size(32), aligned(4)));
+typedef double v4f64 __attribute__((vector_size(32), aligned(32)));
+typedef double v4f64_d __attribute__((vector_size(32), aligned(8)));
+
+typedef double v4f64 __attribute__((vector_size(32), aligned(32)));
+typedef double v4f64_d __attribute__((vector_size(32), aligned(8)));
+
+typedef float __m256 __attribute__((__vector_size__(32), __may_alias__));
+typedef long long __m256i __attribute__((__vector_size__(32), __may_alias__));
+typedef double __m256d __attribute__((__vector_size__(32), __may_alias__));
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsll_b(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsll_b((v32i8)_1, (v32i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsll_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsll_h((v16i16)_1, (v16i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsll_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsll_w((v8i32)_1, (v8i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsll_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsll_d((v4i64)_1, (v4i64)_2);
+}
+
+#define __lasx_xvslli_b(/*__m256i*/ _1, /*ui3*/ _2) \
+ ((__m256i)__builtin_lasx_xvslli_b((v32i8)(_1), (_2)))
+
+#define __lasx_xvslli_h(/*__m256i*/ _1, /*ui4*/ _2) \
+ ((__m256i)__builtin_lasx_xvslli_h((v16i16)(_1), (_2)))
+
+#define __lasx_xvslli_w(/*__m256i*/ _1, /*ui5*/ _2) \
+ ((__m256i)__builtin_lasx_xvslli_w((v8i32)(_1), (_2)))
+
+#define __lasx_xvslli_d(/*__m256i*/ _1, /*ui6*/ _2) \
+ ((__m256i)__builtin_lasx_xvslli_d((v4i64)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsra_b(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsra_b((v32i8)_1, (v32i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsra_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsra_h((v16i16)_1, (v16i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsra_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsra_w((v8i32)_1, (v8i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsra_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsra_d((v4i64)_1, (v4i64)_2);
+}
+
+#define __lasx_xvsrai_b(/*__m256i*/ _1, /*ui3*/ _2) \
+ ((__m256i)__builtin_lasx_xvsrai_b((v32i8)(_1), (_2)))
+
+#define __lasx_xvsrai_h(/*__m256i*/ _1, /*ui4*/ _2) \
+ ((__m256i)__builtin_lasx_xvsrai_h((v16i16)(_1), (_2)))
+
+#define __lasx_xvsrai_w(/*__m256i*/ _1, /*ui5*/ _2) \
+ ((__m256i)__builtin_lasx_xvsrai_w((v8i32)(_1), (_2)))
+
+#define __lasx_xvsrai_d(/*__m256i*/ _1, /*ui6*/ _2) \
+ ((__m256i)__builtin_lasx_xvsrai_d((v4i64)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsrar_b(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsrar_b((v32i8)_1, (v32i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsrar_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsrar_h((v16i16)_1, (v16i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsrar_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsrar_w((v8i32)_1, (v8i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsrar_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsrar_d((v4i64)_1, (v4i64)_2);
+}
+
+#define __lasx_xvsrari_b(/*__m256i*/ _1, /*ui3*/ _2) \
+ ((__m256i)__builtin_lasx_xvsrari_b((v32i8)(_1), (_2)))
+
+#define __lasx_xvsrari_h(/*__m256i*/ _1, /*ui4*/ _2) \
+ ((__m256i)__builtin_lasx_xvsrari_h((v16i16)(_1), (_2)))
+
+#define __lasx_xvsrari_w(/*__m256i*/ _1, /*ui5*/ _2) \
+ ((__m256i)__builtin_lasx_xvsrari_w((v8i32)(_1), (_2)))
+
+#define __lasx_xvsrari_d(/*__m256i*/ _1, /*ui6*/ _2) \
+ ((__m256i)__builtin_lasx_xvsrari_d((v4i64)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsrl_b(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsrl_b((v32i8)_1, (v32i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsrl_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsrl_h((v16i16)_1, (v16i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsrl_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsrl_w((v8i32)_1, (v8i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsrl_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsrl_d((v4i64)_1, (v4i64)_2);
+}
+
+#define __lasx_xvsrli_b(/*__m256i*/ _1, /*ui3*/ _2) \
+ ((__m256i)__builtin_lasx_xvsrli_b((v32i8)(_1), (_2)))
+
+#define __lasx_xvsrli_h(/*__m256i*/ _1, /*ui4*/ _2) \
+ ((__m256i)__builtin_lasx_xvsrli_h((v16i16)(_1), (_2)))
+
+#define __lasx_xvsrli_w(/*__m256i*/ _1, /*ui5*/ _2) \
+ ((__m256i)__builtin_lasx_xvsrli_w((v8i32)(_1), (_2)))
+
+#define __lasx_xvsrli_d(/*__m256i*/ _1, /*ui6*/ _2) \
+ ((__m256i)__builtin_lasx_xvsrli_d((v4i64)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsrlr_b(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsrlr_b((v32i8)_1, (v32i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsrlr_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsrlr_h((v16i16)_1, (v16i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsrlr_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsrlr_w((v8i32)_1, (v8i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsrlr_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsrlr_d((v4i64)_1, (v4i64)_2);
+}
+
+#define __lasx_xvsrlri_b(/*__m256i*/ _1, /*ui3*/ _2) \
+ ((__m256i)__builtin_lasx_xvsrlri_b((v32i8)(_1), (_2)))
+
+#define __lasx_xvsrlri_h(/*__m256i*/ _1, /*ui4*/ _2) \
+ ((__m256i)__builtin_lasx_xvsrlri_h((v16i16)(_1), (_2)))
+
+#define __lasx_xvsrlri_w(/*__m256i*/ _1, /*ui5*/ _2) \
+ ((__m256i)__builtin_lasx_xvsrlri_w((v8i32)(_1), (_2)))
+
+#define __lasx_xvsrlri_d(/*__m256i*/ _1, /*ui6*/ _2) \
+ ((__m256i)__builtin_lasx_xvsrlri_d((v4i64)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvbitclr_b(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvbitclr_b((v32u8)_1, (v32u8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvbitclr_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvbitclr_h((v16u16)_1, (v16u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvbitclr_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvbitclr_w((v8u32)_1, (v8u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvbitclr_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvbitclr_d((v4u64)_1, (v4u64)_2);
+}
+
+#define __lasx_xvbitclri_b(/*__m256i*/ _1, /*ui3*/ _2) \
+ ((__m256i)__builtin_lasx_xvbitclri_b((v32u8)(_1), (_2)))
+
+#define __lasx_xvbitclri_h(/*__m256i*/ _1, /*ui4*/ _2) \
+ ((__m256i)__builtin_lasx_xvbitclri_h((v16u16)(_1), (_2)))
+
+#define __lasx_xvbitclri_w(/*__m256i*/ _1, /*ui5*/ _2) \
+ ((__m256i)__builtin_lasx_xvbitclri_w((v8u32)(_1), (_2)))
+
+#define __lasx_xvbitclri_d(/*__m256i*/ _1, /*ui6*/ _2) \
+ ((__m256i)__builtin_lasx_xvbitclri_d((v4u64)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvbitset_b(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvbitset_b((v32u8)_1, (v32u8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvbitset_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvbitset_h((v16u16)_1, (v16u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvbitset_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvbitset_w((v8u32)_1, (v8u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvbitset_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvbitset_d((v4u64)_1, (v4u64)_2);
+}
+
+#define __lasx_xvbitseti_b(/*__m256i*/ _1, /*ui3*/ _2) \
+ ((__m256i)__builtin_lasx_xvbitseti_b((v32u8)(_1), (_2)))
+
+#define __lasx_xvbitseti_h(/*__m256i*/ _1, /*ui4*/ _2) \
+ ((__m256i)__builtin_lasx_xvbitseti_h((v16u16)(_1), (_2)))
+
+#define __lasx_xvbitseti_w(/*__m256i*/ _1, /*ui5*/ _2) \
+ ((__m256i)__builtin_lasx_xvbitseti_w((v8u32)(_1), (_2)))
+
+#define __lasx_xvbitseti_d(/*__m256i*/ _1, /*ui6*/ _2) \
+ ((__m256i)__builtin_lasx_xvbitseti_d((v4u64)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvbitrev_b(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvbitrev_b((v32u8)_1, (v32u8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvbitrev_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvbitrev_h((v16u16)_1, (v16u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvbitrev_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvbitrev_w((v8u32)_1, (v8u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvbitrev_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvbitrev_d((v4u64)_1, (v4u64)_2);
+}
+
+#define __lasx_xvbitrevi_b(/*__m256i*/ _1, /*ui3*/ _2) \
+ ((__m256i)__builtin_lasx_xvbitrevi_b((v32u8)(_1), (_2)))
+
+#define __lasx_xvbitrevi_h(/*__m256i*/ _1, /*ui4*/ _2) \
+ ((__m256i)__builtin_lasx_xvbitrevi_h((v16u16)(_1), (_2)))
+
+#define __lasx_xvbitrevi_w(/*__m256i*/ _1, /*ui5*/ _2) \
+ ((__m256i)__builtin_lasx_xvbitrevi_w((v8u32)(_1), (_2)))
+
+#define __lasx_xvbitrevi_d(/*__m256i*/ _1, /*ui6*/ _2) \
+ ((__m256i)__builtin_lasx_xvbitrevi_d((v4u64)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvadd_b(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvadd_b((v32i8)_1, (v32i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvadd_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvadd_h((v16i16)_1, (v16i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvadd_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvadd_w((v8i32)_1, (v8i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvadd_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvadd_d((v4i64)_1, (v4i64)_2);
+}
+
+#define __lasx_xvaddi_bu(/*__m256i*/ _1, /*ui5*/ _2) \
+ ((__m256i)__builtin_lasx_xvaddi_bu((v32i8)(_1), (_2)))
+
+#define __lasx_xvaddi_hu(/*__m256i*/ _1, /*ui5*/ _2) \
+ ((__m256i)__builtin_lasx_xvaddi_hu((v16i16)(_1), (_2)))
+
+#define __lasx_xvaddi_wu(/*__m256i*/ _1, /*ui5*/ _2) \
+ ((__m256i)__builtin_lasx_xvaddi_wu((v8i32)(_1), (_2)))
+
+#define __lasx_xvaddi_du(/*__m256i*/ _1, /*ui5*/ _2) \
+ ((__m256i)__builtin_lasx_xvaddi_du((v4i64)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsub_b(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsub_b((v32i8)_1, (v32i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsub_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsub_h((v16i16)_1, (v16i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsub_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsub_w((v8i32)_1, (v8i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsub_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsub_d((v4i64)_1, (v4i64)_2);
+}
+
+#define __lasx_xvsubi_bu(/*__m256i*/ _1, /*ui5*/ _2) \
+ ((__m256i)__builtin_lasx_xvsubi_bu((v32i8)(_1), (_2)))
+
+#define __lasx_xvsubi_hu(/*__m256i*/ _1, /*ui5*/ _2) \
+ ((__m256i)__builtin_lasx_xvsubi_hu((v16i16)(_1), (_2)))
+
+#define __lasx_xvsubi_wu(/*__m256i*/ _1, /*ui5*/ _2) \
+ ((__m256i)__builtin_lasx_xvsubi_wu((v8i32)(_1), (_2)))
+
+#define __lasx_xvsubi_du(/*__m256i*/ _1, /*ui5*/ _2) \
+ ((__m256i)__builtin_lasx_xvsubi_du((v4i64)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmax_b(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmax_b((v32i8)_1, (v32i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmax_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmax_h((v16i16)_1, (v16i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmax_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmax_w((v8i32)_1, (v8i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmax_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmax_d((v4i64)_1, (v4i64)_2);
+}
+
+#define __lasx_xvmaxi_b(/*__m256i*/ _1, /*si5*/ _2) \
+ ((__m256i)__builtin_lasx_xvmaxi_b((v32i8)(_1), (_2)))
+
+#define __lasx_xvmaxi_h(/*__m256i*/ _1, /*si5*/ _2) \
+ ((__m256i)__builtin_lasx_xvmaxi_h((v16i16)(_1), (_2)))
+
+#define __lasx_xvmaxi_w(/*__m256i*/ _1, /*si5*/ _2) \
+ ((__m256i)__builtin_lasx_xvmaxi_w((v8i32)(_1), (_2)))
+
+#define __lasx_xvmaxi_d(/*__m256i*/ _1, /*si5*/ _2) \
+ ((__m256i)__builtin_lasx_xvmaxi_d((v4i64)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmax_bu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmax_bu((v32u8)_1, (v32u8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmax_hu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmax_hu((v16u16)_1, (v16u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmax_wu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmax_wu((v8u32)_1, (v8u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmax_du(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmax_du((v4u64)_1, (v4u64)_2);
+}
+
+#define __lasx_xvmaxi_bu(/*__m256i*/ _1, /*ui5*/ _2) \
+ ((__m256i)__builtin_lasx_xvmaxi_bu((v32u8)(_1), (_2)))
+
+#define __lasx_xvmaxi_hu(/*__m256i*/ _1, /*ui5*/ _2) \
+ ((__m256i)__builtin_lasx_xvmaxi_hu((v16u16)(_1), (_2)))
+
+#define __lasx_xvmaxi_wu(/*__m256i*/ _1, /*ui5*/ _2) \
+ ((__m256i)__builtin_lasx_xvmaxi_wu((v8u32)(_1), (_2)))
+
+#define __lasx_xvmaxi_du(/*__m256i*/ _1, /*ui5*/ _2) \
+ ((__m256i)__builtin_lasx_xvmaxi_du((v4u64)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmin_b(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmin_b((v32i8)_1, (v32i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmin_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmin_h((v16i16)_1, (v16i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmin_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmin_w((v8i32)_1, (v8i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmin_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmin_d((v4i64)_1, (v4i64)_2);
+}
+
+#define __lasx_xvmini_b(/*__m256i*/ _1, /*si5*/ _2) \
+ ((__m256i)__builtin_lasx_xvmini_b((v32i8)(_1), (_2)))
+
+#define __lasx_xvmini_h(/*__m256i*/ _1, /*si5*/ _2) \
+ ((__m256i)__builtin_lasx_xvmini_h((v16i16)(_1), (_2)))
+
+#define __lasx_xvmini_w(/*__m256i*/ _1, /*si5*/ _2) \
+ ((__m256i)__builtin_lasx_xvmini_w((v8i32)(_1), (_2)))
+
+#define __lasx_xvmini_d(/*__m256i*/ _1, /*si5*/ _2) \
+ ((__m256i)__builtin_lasx_xvmini_d((v4i64)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmin_bu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmin_bu((v32u8)_1, (v32u8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmin_hu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmin_hu((v16u16)_1, (v16u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmin_wu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmin_wu((v8u32)_1, (v8u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmin_du(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmin_du((v4u64)_1, (v4u64)_2);
+}
+
+#define __lasx_xvmini_bu(/*__m256i*/ _1, /*ui5*/ _2) \
+ ((__m256i)__builtin_lasx_xvmini_bu((v32u8)(_1), (_2)))
+
+#define __lasx_xvmini_hu(/*__m256i*/ _1, /*ui5*/ _2) \
+ ((__m256i)__builtin_lasx_xvmini_hu((v16u16)(_1), (_2)))
+
+#define __lasx_xvmini_wu(/*__m256i*/ _1, /*ui5*/ _2) \
+ ((__m256i)__builtin_lasx_xvmini_wu((v8u32)(_1), (_2)))
+
+#define __lasx_xvmini_du(/*__m256i*/ _1, /*ui5*/ _2) \
+ ((__m256i)__builtin_lasx_xvmini_du((v4u64)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvseq_b(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvseq_b((v32i8)_1, (v32i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvseq_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvseq_h((v16i16)_1, (v16i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvseq_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvseq_w((v8i32)_1, (v8i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvseq_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvseq_d((v4i64)_1, (v4i64)_2);
+}
+
+#define __lasx_xvseqi_b(/*__m256i*/ _1, /*si5*/ _2) \
+ ((__m256i)__builtin_lasx_xvseqi_b((v32i8)(_1), (_2)))
+
+#define __lasx_xvseqi_h(/*__m256i*/ _1, /*si5*/ _2) \
+ ((__m256i)__builtin_lasx_xvseqi_h((v16i16)(_1), (_2)))
+
+#define __lasx_xvseqi_w(/*__m256i*/ _1, /*si5*/ _2) \
+ ((__m256i)__builtin_lasx_xvseqi_w((v8i32)(_1), (_2)))
+
+#define __lasx_xvseqi_d(/*__m256i*/ _1, /*si5*/ _2) \
+ ((__m256i)__builtin_lasx_xvseqi_d((v4i64)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvslt_b(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvslt_b((v32i8)_1, (v32i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvslt_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvslt_h((v16i16)_1, (v16i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvslt_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvslt_w((v8i32)_1, (v8i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvslt_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvslt_d((v4i64)_1, (v4i64)_2);
+}
+
+#define __lasx_xvslti_b(/*__m256i*/ _1, /*si5*/ _2) \
+ ((__m256i)__builtin_lasx_xvslti_b((v32i8)(_1), (_2)))
+
+#define __lasx_xvslti_h(/*__m256i*/ _1, /*si5*/ _2) \
+ ((__m256i)__builtin_lasx_xvslti_h((v16i16)(_1), (_2)))
+
+#define __lasx_xvslti_w(/*__m256i*/ _1, /*si5*/ _2) \
+ ((__m256i)__builtin_lasx_xvslti_w((v8i32)(_1), (_2)))
+
+#define __lasx_xvslti_d(/*__m256i*/ _1, /*si5*/ _2) \
+ ((__m256i)__builtin_lasx_xvslti_d((v4i64)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvslt_bu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvslt_bu((v32u8)_1, (v32u8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvslt_hu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvslt_hu((v16u16)_1, (v16u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvslt_wu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvslt_wu((v8u32)_1, (v8u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvslt_du(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvslt_du((v4u64)_1, (v4u64)_2);
+}
+
+#define __lasx_xvslti_bu(/*__m256i*/ _1, /*ui5*/ _2) \
+ ((__m256i)__builtin_lasx_xvslti_bu((v32u8)(_1), (_2)))
+
+#define __lasx_xvslti_hu(/*__m256i*/ _1, /*ui5*/ _2) \
+ ((__m256i)__builtin_lasx_xvslti_hu((v16u16)(_1), (_2)))
+
+#define __lasx_xvslti_wu(/*__m256i*/ _1, /*ui5*/ _2) \
+ ((__m256i)__builtin_lasx_xvslti_wu((v8u32)(_1), (_2)))
+
+#define __lasx_xvslti_du(/*__m256i*/ _1, /*ui5*/ _2) \
+ ((__m256i)__builtin_lasx_xvslti_du((v4u64)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsle_b(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsle_b((v32i8)_1, (v32i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsle_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsle_h((v16i16)_1, (v16i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsle_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsle_w((v8i32)_1, (v8i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsle_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsle_d((v4i64)_1, (v4i64)_2);
+}
+
+#define __lasx_xvslei_b(/*__m256i*/ _1, /*si5*/ _2) \
+ ((__m256i)__builtin_lasx_xvslei_b((v32i8)(_1), (_2)))
+
+#define __lasx_xvslei_h(/*__m256i*/ _1, /*si5*/ _2) \
+ ((__m256i)__builtin_lasx_xvslei_h((v16i16)(_1), (_2)))
+
+#define __lasx_xvslei_w(/*__m256i*/ _1, /*si5*/ _2) \
+ ((__m256i)__builtin_lasx_xvslei_w((v8i32)(_1), (_2)))
+
+#define __lasx_xvslei_d(/*__m256i*/ _1, /*si5*/ _2) \
+ ((__m256i)__builtin_lasx_xvslei_d((v4i64)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsle_bu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsle_bu((v32u8)_1, (v32u8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsle_hu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsle_hu((v16u16)_1, (v16u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsle_wu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsle_wu((v8u32)_1, (v8u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsle_du(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsle_du((v4u64)_1, (v4u64)_2);
+}
+
+#define __lasx_xvslei_bu(/*__m256i*/ _1, /*ui5*/ _2) \
+ ((__m256i)__builtin_lasx_xvslei_bu((v32u8)(_1), (_2)))
+
+#define __lasx_xvslei_hu(/*__m256i*/ _1, /*ui5*/ _2) \
+ ((__m256i)__builtin_lasx_xvslei_hu((v16u16)(_1), (_2)))
+
+#define __lasx_xvslei_wu(/*__m256i*/ _1, /*ui5*/ _2) \
+ ((__m256i)__builtin_lasx_xvslei_wu((v8u32)(_1), (_2)))
+
+#define __lasx_xvslei_du(/*__m256i*/ _1, /*ui5*/ _2) \
+ ((__m256i)__builtin_lasx_xvslei_du((v4u64)(_1), (_2)))
+
+#define __lasx_xvsat_b(/*__m256i*/ _1, /*ui3*/ _2) \
+ ((__m256i)__builtin_lasx_xvsat_b((v32i8)(_1), (_2)))
+
+#define __lasx_xvsat_h(/*__m256i*/ _1, /*ui4*/ _2) \
+ ((__m256i)__builtin_lasx_xvsat_h((v16i16)(_1), (_2)))
+
+#define __lasx_xvsat_w(/*__m256i*/ _1, /*ui5*/ _2) \
+ ((__m256i)__builtin_lasx_xvsat_w((v8i32)(_1), (_2)))
+
+#define __lasx_xvsat_d(/*__m256i*/ _1, /*ui6*/ _2) \
+ ((__m256i)__builtin_lasx_xvsat_d((v4i64)(_1), (_2)))
+
+#define __lasx_xvsat_bu(/*__m256i*/ _1, /*ui3*/ _2) \
+ ((__m256i)__builtin_lasx_xvsat_bu((v32u8)(_1), (_2)))
+
+#define __lasx_xvsat_hu(/*__m256i*/ _1, /*ui4*/ _2) \
+ ((__m256i)__builtin_lasx_xvsat_hu((v16u16)(_1), (_2)))
+
+#define __lasx_xvsat_wu(/*__m256i*/ _1, /*ui5*/ _2) \
+ ((__m256i)__builtin_lasx_xvsat_wu((v8u32)(_1), (_2)))
+
+#define __lasx_xvsat_du(/*__m256i*/ _1, /*ui6*/ _2) \
+ ((__m256i)__builtin_lasx_xvsat_du((v4u64)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvadda_b(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvadda_b((v32i8)_1, (v32i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvadda_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvadda_h((v16i16)_1, (v16i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvadda_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvadda_w((v8i32)_1, (v8i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvadda_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvadda_d((v4i64)_1, (v4i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsadd_b(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsadd_b((v32i8)_1, (v32i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsadd_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsadd_h((v16i16)_1, (v16i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsadd_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsadd_w((v8i32)_1, (v8i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsadd_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsadd_d((v4i64)_1, (v4i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsadd_bu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsadd_bu((v32u8)_1, (v32u8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsadd_hu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsadd_hu((v16u16)_1, (v16u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsadd_wu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsadd_wu((v8u32)_1, (v8u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsadd_du(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsadd_du((v4u64)_1, (v4u64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvavg_b(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvavg_b((v32i8)_1, (v32i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvavg_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvavg_h((v16i16)_1, (v16i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvavg_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvavg_w((v8i32)_1, (v8i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvavg_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvavg_d((v4i64)_1, (v4i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvavg_bu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvavg_bu((v32u8)_1, (v32u8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvavg_hu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvavg_hu((v16u16)_1, (v16u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvavg_wu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvavg_wu((v8u32)_1, (v8u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvavg_du(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvavg_du((v4u64)_1, (v4u64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvavgr_b(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvavgr_b((v32i8)_1, (v32i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvavgr_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvavgr_h((v16i16)_1, (v16i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvavgr_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvavgr_w((v8i32)_1, (v8i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvavgr_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvavgr_d((v4i64)_1, (v4i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvavgr_bu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvavgr_bu((v32u8)_1, (v32u8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvavgr_hu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvavgr_hu((v16u16)_1, (v16u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvavgr_wu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvavgr_wu((v8u32)_1, (v8u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvavgr_du(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvavgr_du((v4u64)_1, (v4u64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvssub_b(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvssub_b((v32i8)_1, (v32i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvssub_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvssub_h((v16i16)_1, (v16i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvssub_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvssub_w((v8i32)_1, (v8i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvssub_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvssub_d((v4i64)_1, (v4i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvssub_bu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvssub_bu((v32u8)_1, (v32u8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvssub_hu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvssub_hu((v16u16)_1, (v16u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvssub_wu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvssub_wu((v8u32)_1, (v8u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvssub_du(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvssub_du((v4u64)_1, (v4u64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvabsd_b(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvabsd_b((v32i8)_1, (v32i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvabsd_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvabsd_h((v16i16)_1, (v16i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvabsd_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvabsd_w((v8i32)_1, (v8i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvabsd_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvabsd_d((v4i64)_1, (v4i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvabsd_bu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvabsd_bu((v32u8)_1, (v32u8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvabsd_hu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvabsd_hu((v16u16)_1, (v16u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvabsd_wu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvabsd_wu((v8u32)_1, (v8u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvabsd_du(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvabsd_du((v4u64)_1, (v4u64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmul_b(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmul_b((v32i8)_1, (v32i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmul_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmul_h((v16i16)_1, (v16i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmul_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmul_w((v8i32)_1, (v8i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmul_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmul_d((v4i64)_1, (v4i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmadd_b(__m256i _1, __m256i _2, __m256i _3) {
+ return (__m256i)__builtin_lasx_xvmadd_b((v32i8)_1, (v32i8)_2, (v32i8)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmadd_h(__m256i _1, __m256i _2, __m256i _3) {
+ return (__m256i)__builtin_lasx_xvmadd_h((v16i16)_1, (v16i16)_2, (v16i16)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmadd_w(__m256i _1, __m256i _2, __m256i _3) {
+ return (__m256i)__builtin_lasx_xvmadd_w((v8i32)_1, (v8i32)_2, (v8i32)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmadd_d(__m256i _1, __m256i _2, __m256i _3) {
+ return (__m256i)__builtin_lasx_xvmadd_d((v4i64)_1, (v4i64)_2, (v4i64)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmsub_b(__m256i _1, __m256i _2, __m256i _3) {
+ return (__m256i)__builtin_lasx_xvmsub_b((v32i8)_1, (v32i8)_2, (v32i8)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmsub_h(__m256i _1, __m256i _2, __m256i _3) {
+ return (__m256i)__builtin_lasx_xvmsub_h((v16i16)_1, (v16i16)_2, (v16i16)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmsub_w(__m256i _1, __m256i _2, __m256i _3) {
+ return (__m256i)__builtin_lasx_xvmsub_w((v8i32)_1, (v8i32)_2, (v8i32)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmsub_d(__m256i _1, __m256i _2, __m256i _3) {
+ return (__m256i)__builtin_lasx_xvmsub_d((v4i64)_1, (v4i64)_2, (v4i64)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvdiv_b(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvdiv_b((v32i8)_1, (v32i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvdiv_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvdiv_h((v16i16)_1, (v16i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvdiv_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvdiv_w((v8i32)_1, (v8i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvdiv_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvdiv_d((v4i64)_1, (v4i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvdiv_bu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvdiv_bu((v32u8)_1, (v32u8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvdiv_hu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvdiv_hu((v16u16)_1, (v16u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvdiv_wu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvdiv_wu((v8u32)_1, (v8u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvdiv_du(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvdiv_du((v4u64)_1, (v4u64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvhaddw_h_b(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvhaddw_h_b((v32i8)_1, (v32i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvhaddw_w_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvhaddw_w_h((v16i16)_1, (v16i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvhaddw_d_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvhaddw_d_w((v8i32)_1, (v8i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvhaddw_hu_bu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvhaddw_hu_bu((v32u8)_1, (v32u8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvhaddw_wu_hu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvhaddw_wu_hu((v16u16)_1, (v16u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvhaddw_du_wu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvhaddw_du_wu((v8u32)_1, (v8u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvhsubw_h_b(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvhsubw_h_b((v32i8)_1, (v32i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvhsubw_w_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvhsubw_w_h((v16i16)_1, (v16i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvhsubw_d_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvhsubw_d_w((v8i32)_1, (v8i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvhsubw_hu_bu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvhsubw_hu_bu((v32u8)_1, (v32u8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvhsubw_wu_hu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvhsubw_wu_hu((v16u16)_1, (v16u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvhsubw_du_wu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvhsubw_du_wu((v8u32)_1, (v8u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmod_b(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmod_b((v32i8)_1, (v32i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmod_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmod_h((v16i16)_1, (v16i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmod_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmod_w((v8i32)_1, (v8i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmod_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmod_d((v4i64)_1, (v4i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmod_bu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmod_bu((v32u8)_1, (v32u8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmod_hu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmod_hu((v16u16)_1, (v16u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmod_wu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmod_wu((v8u32)_1, (v8u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmod_du(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmod_du((v4u64)_1, (v4u64)_2);
+}
+
+#define __lasx_xvrepl128vei_b(/*__m256i*/ _1, /*ui4*/ _2) \
+ ((__m256i)__builtin_lasx_xvrepl128vei_b((v32i8)(_1), (_2)))
+
+#define __lasx_xvrepl128vei_h(/*__m256i*/ _1, /*ui3*/ _2) \
+ ((__m256i)__builtin_lasx_xvrepl128vei_h((v16i16)(_1), (_2)))
+
+#define __lasx_xvrepl128vei_w(/*__m256i*/ _1, /*ui2*/ _2) \
+ ((__m256i)__builtin_lasx_xvrepl128vei_w((v8i32)(_1), (_2)))
+
+#define __lasx_xvrepl128vei_d(/*__m256i*/ _1, /*ui1*/ _2) \
+ ((__m256i)__builtin_lasx_xvrepl128vei_d((v4i64)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvpickev_b(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvpickev_b((v32i8)_1, (v32i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvpickev_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvpickev_h((v16i16)_1, (v16i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvpickev_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvpickev_w((v8i32)_1, (v8i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvpickev_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvpickev_d((v4i64)_1, (v4i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvpickod_b(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvpickod_b((v32i8)_1, (v32i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvpickod_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvpickod_h((v16i16)_1, (v16i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvpickod_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvpickod_w((v8i32)_1, (v8i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvpickod_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvpickod_d((v4i64)_1, (v4i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvilvh_b(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvilvh_b((v32i8)_1, (v32i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvilvh_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvilvh_h((v16i16)_1, (v16i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvilvh_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvilvh_w((v8i32)_1, (v8i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvilvh_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvilvh_d((v4i64)_1, (v4i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvilvl_b(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvilvl_b((v32i8)_1, (v32i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvilvl_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvilvl_h((v16i16)_1, (v16i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvilvl_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvilvl_w((v8i32)_1, (v8i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvilvl_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvilvl_d((v4i64)_1, (v4i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvpackev_b(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvpackev_b((v32i8)_1, (v32i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvpackev_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvpackev_h((v16i16)_1, (v16i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvpackev_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvpackev_w((v8i32)_1, (v8i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvpackev_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvpackev_d((v4i64)_1, (v4i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvpackod_b(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvpackod_b((v32i8)_1, (v32i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvpackod_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvpackod_h((v16i16)_1, (v16i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvpackod_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvpackod_w((v8i32)_1, (v8i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvpackod_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvpackod_d((v4i64)_1, (v4i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvshuf_b(__m256i _1, __m256i _2, __m256i _3) {
+ return (__m256i)__builtin_lasx_xvshuf_b((v32i8)_1, (v32i8)_2, (v32i8)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvshuf_h(__m256i _1, __m256i _2, __m256i _3) {
+ return (__m256i)__builtin_lasx_xvshuf_h((v16i16)_1, (v16i16)_2, (v16i16)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvshuf_w(__m256i _1, __m256i _2, __m256i _3) {
+ return (__m256i)__builtin_lasx_xvshuf_w((v8i32)_1, (v8i32)_2, (v8i32)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvshuf_d(__m256i _1, __m256i _2, __m256i _3) {
+ return (__m256i)__builtin_lasx_xvshuf_d((v4i64)_1, (v4i64)_2, (v4i64)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvand_v(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvand_v((v32u8)_1, (v32u8)_2);
+}
+
+#define __lasx_xvandi_b(/*__m256i*/ _1, /*ui8*/ _2) \
+ ((__m256i)__builtin_lasx_xvandi_b((v32u8)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvor_v(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvor_v((v32u8)_1, (v32u8)_2);
+}
+
+#define __lasx_xvori_b(/*__m256i*/ _1, /*ui8*/ _2) \
+ ((__m256i)__builtin_lasx_xvori_b((v32u8)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvnor_v(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvnor_v((v32u8)_1, (v32u8)_2);
+}
+
+#define __lasx_xvnori_b(/*__m256i*/ _1, /*ui8*/ _2) \
+ ((__m256i)__builtin_lasx_xvnori_b((v32u8)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvxor_v(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvxor_v((v32u8)_1, (v32u8)_2);
+}
+
+#define __lasx_xvxori_b(/*__m256i*/ _1, /*ui8*/ _2) \
+ ((__m256i)__builtin_lasx_xvxori_b((v32u8)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvbitsel_v(__m256i _1, __m256i _2, __m256i _3) {
+ return (__m256i)__builtin_lasx_xvbitsel_v((v32u8)_1, (v32u8)_2, (v32u8)_3);
+}
+
+#define __lasx_xvbitseli_b(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) \
+ ((__m256i)__builtin_lasx_xvbitseli_b((v32u8)(_1), (v32u8)(_2), (_3)))
+
+#define __lasx_xvshuf4i_b(/*__m256i*/ _1, /*ui8*/ _2) \
+ ((__m256i)__builtin_lasx_xvshuf4i_b((v32i8)(_1), (_2)))
+
+#define __lasx_xvshuf4i_h(/*__m256i*/ _1, /*ui8*/ _2) \
+ ((__m256i)__builtin_lasx_xvshuf4i_h((v16i16)(_1), (_2)))
+
+#define __lasx_xvshuf4i_w(/*__m256i*/ _1, /*ui8*/ _2) \
+ ((__m256i)__builtin_lasx_xvshuf4i_w((v8i32)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvreplgr2vr_b(int _1) {
+ return (__m256i)__builtin_lasx_xvreplgr2vr_b((int)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvreplgr2vr_h(int _1) {
+ return (__m256i)__builtin_lasx_xvreplgr2vr_h((int)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvreplgr2vr_w(int _1) {
+ return (__m256i)__builtin_lasx_xvreplgr2vr_w((int)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvreplgr2vr_d(long int _1) {
+ return (__m256i)__builtin_lasx_xvreplgr2vr_d((long int)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvpcnt_b(__m256i _1) {
+ return (__m256i)__builtin_lasx_xvpcnt_b((v32i8)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvpcnt_h(__m256i _1) {
+ return (__m256i)__builtin_lasx_xvpcnt_h((v16i16)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvpcnt_w(__m256i _1) {
+ return (__m256i)__builtin_lasx_xvpcnt_w((v8i32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvpcnt_d(__m256i _1) {
+ return (__m256i)__builtin_lasx_xvpcnt_d((v4i64)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvclo_b(__m256i _1) {
+ return (__m256i)__builtin_lasx_xvclo_b((v32i8)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvclo_h(__m256i _1) {
+ return (__m256i)__builtin_lasx_xvclo_h((v16i16)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvclo_w(__m256i _1) {
+ return (__m256i)__builtin_lasx_xvclo_w((v8i32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvclo_d(__m256i _1) {
+ return (__m256i)__builtin_lasx_xvclo_d((v4i64)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvclz_b(__m256i _1) {
+ return (__m256i)__builtin_lasx_xvclz_b((v32i8)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvclz_h(__m256i _1) {
+ return (__m256i)__builtin_lasx_xvclz_h((v16i16)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvclz_w(__m256i _1) {
+ return (__m256i)__builtin_lasx_xvclz_w((v8i32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvclz_d(__m256i _1) {
+ return (__m256i)__builtin_lasx_xvclz_d((v4i64)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256
+ __lasx_xvfadd_s(__m256 _1, __m256 _2) {
+ return (__m256)__builtin_lasx_xvfadd_s((v8f32)_1, (v8f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256d
+ __lasx_xvfadd_d(__m256d _1, __m256d _2) {
+ return (__m256d)__builtin_lasx_xvfadd_d((v4f64)_1, (v4f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256
+ __lasx_xvfsub_s(__m256 _1, __m256 _2) {
+ return (__m256)__builtin_lasx_xvfsub_s((v8f32)_1, (v8f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256d
+ __lasx_xvfsub_d(__m256d _1, __m256d _2) {
+ return (__m256d)__builtin_lasx_xvfsub_d((v4f64)_1, (v4f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256
+ __lasx_xvfmul_s(__m256 _1, __m256 _2) {
+ return (__m256)__builtin_lasx_xvfmul_s((v8f32)_1, (v8f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256d
+ __lasx_xvfmul_d(__m256d _1, __m256d _2) {
+ return (__m256d)__builtin_lasx_xvfmul_d((v4f64)_1, (v4f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256
+ __lasx_xvfdiv_s(__m256 _1, __m256 _2) {
+ return (__m256)__builtin_lasx_xvfdiv_s((v8f32)_1, (v8f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256d
+ __lasx_xvfdiv_d(__m256d _1, __m256d _2) {
+ return (__m256d)__builtin_lasx_xvfdiv_d((v4f64)_1, (v4f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvfcvt_h_s(__m256 _1, __m256 _2) {
+ return (__m256i)__builtin_lasx_xvfcvt_h_s((v8f32)_1, (v8f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256
+ __lasx_xvfcvt_s_d(__m256d _1, __m256d _2) {
+ return (__m256)__builtin_lasx_xvfcvt_s_d((v4f64)_1, (v4f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256
+ __lasx_xvfmin_s(__m256 _1, __m256 _2) {
+ return (__m256)__builtin_lasx_xvfmin_s((v8f32)_1, (v8f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256d
+ __lasx_xvfmin_d(__m256d _1, __m256d _2) {
+ return (__m256d)__builtin_lasx_xvfmin_d((v4f64)_1, (v4f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256
+ __lasx_xvfmina_s(__m256 _1, __m256 _2) {
+ return (__m256)__builtin_lasx_xvfmina_s((v8f32)_1, (v8f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256d
+ __lasx_xvfmina_d(__m256d _1, __m256d _2) {
+ return (__m256d)__builtin_lasx_xvfmina_d((v4f64)_1, (v4f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256
+ __lasx_xvfmax_s(__m256 _1, __m256 _2) {
+ return (__m256)__builtin_lasx_xvfmax_s((v8f32)_1, (v8f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256d
+ __lasx_xvfmax_d(__m256d _1, __m256d _2) {
+ return (__m256d)__builtin_lasx_xvfmax_d((v4f64)_1, (v4f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256
+ __lasx_xvfmaxa_s(__m256 _1, __m256 _2) {
+ return (__m256)__builtin_lasx_xvfmaxa_s((v8f32)_1, (v8f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256d
+ __lasx_xvfmaxa_d(__m256d _1, __m256d _2) {
+ return (__m256d)__builtin_lasx_xvfmaxa_d((v4f64)_1, (v4f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvfclass_s(__m256 _1) {
+ return (__m256i)__builtin_lasx_xvfclass_s((v8f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvfclass_d(__m256d _1) {
+ return (__m256i)__builtin_lasx_xvfclass_d((v4f64)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256
+ __lasx_xvfsqrt_s(__m256 _1) {
+ return (__m256)__builtin_lasx_xvfsqrt_s((v8f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256d
+ __lasx_xvfsqrt_d(__m256d _1) {
+ return (__m256d)__builtin_lasx_xvfsqrt_d((v4f64)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256
+ __lasx_xvfrecip_s(__m256 _1) {
+ return (__m256)__builtin_lasx_xvfrecip_s((v8f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256d
+ __lasx_xvfrecip_d(__m256d _1) {
+ return (__m256d)__builtin_lasx_xvfrecip_d((v4f64)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256
+ __lasx_xvfrecipe_s(__m256 _1) {
+ return (__m256)__builtin_lasx_xvfrecipe_s((v8f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256d
+ __lasx_xvfrecipe_d(__m256d _1) {
+ return (__m256d)__builtin_lasx_xvfrecipe_d((v4f64)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256
+ __lasx_xvfrint_s(__m256 _1) {
+ return (__m256)__builtin_lasx_xvfrint_s((v8f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256d
+ __lasx_xvfrint_d(__m256d _1) {
+ return (__m256d)__builtin_lasx_xvfrint_d((v4f64)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256
+ __lasx_xvfrsqrt_s(__m256 _1) {
+ return (__m256)__builtin_lasx_xvfrsqrt_s((v8f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256d
+ __lasx_xvfrsqrt_d(__m256d _1) {
+ return (__m256d)__builtin_lasx_xvfrsqrt_d((v4f64)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256
+ __lasx_xvfrsqrte_s(__m256 _1) {
+ return (__m256)__builtin_lasx_xvfrsqrte_s((v8f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256d
+ __lasx_xvfrsqrte_d(__m256d _1) {
+ return (__m256d)__builtin_lasx_xvfrsqrte_d((v4f64)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256
+ __lasx_xvflogb_s(__m256 _1) {
+ return (__m256)__builtin_lasx_xvflogb_s((v8f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256d
+ __lasx_xvflogb_d(__m256d _1) {
+ return (__m256d)__builtin_lasx_xvflogb_d((v4f64)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256
+ __lasx_xvfcvth_s_h(__m256i _1) {
+ return (__m256)__builtin_lasx_xvfcvth_s_h((v16i16)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256d
+ __lasx_xvfcvth_d_s(__m256 _1) {
+ return (__m256d)__builtin_lasx_xvfcvth_d_s((v8f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256
+ __lasx_xvfcvtl_s_h(__m256i _1) {
+ return (__m256)__builtin_lasx_xvfcvtl_s_h((v16i16)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256d
+ __lasx_xvfcvtl_d_s(__m256 _1) {
+ return (__m256d)__builtin_lasx_xvfcvtl_d_s((v8f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvftint_w_s(__m256 _1) {
+ return (__m256i)__builtin_lasx_xvftint_w_s((v8f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvftint_l_d(__m256d _1) {
+ return (__m256i)__builtin_lasx_xvftint_l_d((v4f64)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvftint_wu_s(__m256 _1) {
+ return (__m256i)__builtin_lasx_xvftint_wu_s((v8f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvftint_lu_d(__m256d _1) {
+ return (__m256i)__builtin_lasx_xvftint_lu_d((v4f64)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvftintrz_w_s(__m256 _1) {
+ return (__m256i)__builtin_lasx_xvftintrz_w_s((v8f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvftintrz_l_d(__m256d _1) {
+ return (__m256i)__builtin_lasx_xvftintrz_l_d((v4f64)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvftintrz_wu_s(__m256 _1) {
+ return (__m256i)__builtin_lasx_xvftintrz_wu_s((v8f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvftintrz_lu_d(__m256d _1) {
+ return (__m256i)__builtin_lasx_xvftintrz_lu_d((v4f64)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256
+ __lasx_xvffint_s_w(__m256i _1) {
+ return (__m256)__builtin_lasx_xvffint_s_w((v8i32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256d
+ __lasx_xvffint_d_l(__m256i _1) {
+ return (__m256d)__builtin_lasx_xvffint_d_l((v4i64)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256
+ __lasx_xvffint_s_wu(__m256i _1) {
+ return (__m256)__builtin_lasx_xvffint_s_wu((v8u32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256d
+ __lasx_xvffint_d_lu(__m256i _1) {
+ return (__m256d)__builtin_lasx_xvffint_d_lu((v4u64)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvreplve_b(__m256i _1, int _2) {
+ return (__m256i)__builtin_lasx_xvreplve_b((v32i8)_1, (int)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvreplve_h(__m256i _1, int _2) {
+ return (__m256i)__builtin_lasx_xvreplve_h((v16i16)_1, (int)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvreplve_w(__m256i _1, int _2) {
+ return (__m256i)__builtin_lasx_xvreplve_w((v8i32)_1, (int)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvreplve_d(__m256i _1, int _2) {
+ return (__m256i)__builtin_lasx_xvreplve_d((v4i64)_1, (int)_2);
+}
+
+#define __lasx_xvpermi_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) \
+ ((__m256i)__builtin_lasx_xvpermi_w((v8i32)(_1), (v8i32)(_2), (_3)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvandn_v(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvandn_v((v32u8)_1, (v32u8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvneg_b(__m256i _1) {
+ return (__m256i)__builtin_lasx_xvneg_b((v32i8)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvneg_h(__m256i _1) {
+ return (__m256i)__builtin_lasx_xvneg_h((v16i16)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvneg_w(__m256i _1) {
+ return (__m256i)__builtin_lasx_xvneg_w((v8i32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvneg_d(__m256i _1) {
+ return (__m256i)__builtin_lasx_xvneg_d((v4i64)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmuh_b(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmuh_b((v32i8)_1, (v32i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmuh_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmuh_h((v16i16)_1, (v16i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmuh_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmuh_w((v8i32)_1, (v8i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmuh_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmuh_d((v4i64)_1, (v4i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmuh_bu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmuh_bu((v32u8)_1, (v32u8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmuh_hu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmuh_hu((v16u16)_1, (v16u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmuh_wu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmuh_wu((v8u32)_1, (v8u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmuh_du(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmuh_du((v4u64)_1, (v4u64)_2);
+}
+
+#define __lasx_xvsllwil_h_b(/*__m256i*/ _1, /*ui3*/ _2) \
+ ((__m256i)__builtin_lasx_xvsllwil_h_b((v32i8)(_1), (_2)))
+
+#define __lasx_xvsllwil_w_h(/*__m256i*/ _1, /*ui4*/ _2) \
+ ((__m256i)__builtin_lasx_xvsllwil_w_h((v16i16)(_1), (_2)))
+
+#define __lasx_xvsllwil_d_w(/*__m256i*/ _1, /*ui5*/ _2) \
+ ((__m256i)__builtin_lasx_xvsllwil_d_w((v8i32)(_1), (_2)))
+
+#define __lasx_xvsllwil_hu_bu(/*__m256i*/ _1, /*ui3*/ _2) \
+ ((__m256i)__builtin_lasx_xvsllwil_hu_bu((v32u8)(_1), (_2)))
+
+#define __lasx_xvsllwil_wu_hu(/*__m256i*/ _1, /*ui4*/ _2) \
+ ((__m256i)__builtin_lasx_xvsllwil_wu_hu((v16u16)(_1), (_2)))
+
+#define __lasx_xvsllwil_du_wu(/*__m256i*/ _1, /*ui5*/ _2) \
+ ((__m256i)__builtin_lasx_xvsllwil_du_wu((v8u32)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsran_b_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsran_b_h((v16i16)_1, (v16i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsran_h_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsran_h_w((v8i32)_1, (v8i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsran_w_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsran_w_d((v4i64)_1, (v4i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvssran_b_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvssran_b_h((v16i16)_1, (v16i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvssran_h_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvssran_h_w((v8i32)_1, (v8i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvssran_w_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvssran_w_d((v4i64)_1, (v4i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvssran_bu_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvssran_bu_h((v16u16)_1, (v16u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvssran_hu_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvssran_hu_w((v8u32)_1, (v8u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvssran_wu_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvssran_wu_d((v4u64)_1, (v4u64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsrarn_b_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsrarn_b_h((v16i16)_1, (v16i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsrarn_h_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsrarn_h_w((v8i32)_1, (v8i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsrarn_w_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsrarn_w_d((v4i64)_1, (v4i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvssrarn_b_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvssrarn_b_h((v16i16)_1, (v16i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvssrarn_h_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvssrarn_h_w((v8i32)_1, (v8i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvssrarn_w_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvssrarn_w_d((v4i64)_1, (v4i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvssrarn_bu_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvssrarn_bu_h((v16u16)_1, (v16u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvssrarn_hu_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvssrarn_hu_w((v8u32)_1, (v8u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvssrarn_wu_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvssrarn_wu_d((v4u64)_1, (v4u64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsrln_b_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsrln_b_h((v16i16)_1, (v16i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsrln_h_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsrln_h_w((v8i32)_1, (v8i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsrln_w_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsrln_w_d((v4i64)_1, (v4i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvssrln_bu_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvssrln_bu_h((v16u16)_1, (v16u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvssrln_hu_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvssrln_hu_w((v8u32)_1, (v8u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvssrln_wu_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvssrln_wu_d((v4u64)_1, (v4u64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsrlrn_b_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsrlrn_b_h((v16i16)_1, (v16i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsrlrn_h_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsrlrn_h_w((v8i32)_1, (v8i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsrlrn_w_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsrlrn_w_d((v4i64)_1, (v4i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvssrlrn_bu_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvssrlrn_bu_h((v16u16)_1, (v16u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvssrlrn_hu_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvssrlrn_hu_w((v8u32)_1, (v8u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvssrlrn_wu_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvssrlrn_wu_d((v4u64)_1, (v4u64)_2);
+}
+
+#define __lasx_xvfrstpi_b(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \
+ ((__m256i)__builtin_lasx_xvfrstpi_b((v32i8)(_1), (v32i8)(_2), (_3)))
+
+#define __lasx_xvfrstpi_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \
+ ((__m256i)__builtin_lasx_xvfrstpi_h((v16i16)(_1), (v16i16)(_2), (_3)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvfrstp_b(__m256i _1, __m256i _2, __m256i _3) {
+ return (__m256i)__builtin_lasx_xvfrstp_b((v32i8)_1, (v32i8)_2, (v32i8)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvfrstp_h(__m256i _1, __m256i _2, __m256i _3) {
+ return (__m256i)__builtin_lasx_xvfrstp_h((v16i16)_1, (v16i16)_2, (v16i16)_3);
+}
+
+#define __lasx_xvshuf4i_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) \
+ ((__m256i)__builtin_lasx_xvshuf4i_d((v4i64)(_1), (v4i64)(_2), (_3)))
+
+#define __lasx_xvbsrl_v(/*__m256i*/ _1, /*ui5*/ _2) \
+ ((__m256i)__builtin_lasx_xvbsrl_v((v32i8)(_1), (_2)))
+
+#define __lasx_xvbsll_v(/*__m256i*/ _1, /*ui5*/ _2) \
+ ((__m256i)__builtin_lasx_xvbsll_v((v32i8)(_1), (_2)))
+
+#define __lasx_xvextrins_b(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) \
+ ((__m256i)__builtin_lasx_xvextrins_b((v32i8)(_1), (v32i8)(_2), (_3)))
+
+#define __lasx_xvextrins_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) \
+ ((__m256i)__builtin_lasx_xvextrins_h((v16i16)(_1), (v16i16)(_2), (_3)))
+
+#define __lasx_xvextrins_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) \
+ ((__m256i)__builtin_lasx_xvextrins_w((v8i32)(_1), (v8i32)(_2), (_3)))
+
+#define __lasx_xvextrins_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) \
+ ((__m256i)__builtin_lasx_xvextrins_d((v4i64)(_1), (v4i64)(_2), (_3)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmskltz_b(__m256i _1) {
+ return (__m256i)__builtin_lasx_xvmskltz_b((v32i8)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmskltz_h(__m256i _1) {
+ return (__m256i)__builtin_lasx_xvmskltz_h((v16i16)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmskltz_w(__m256i _1) {
+ return (__m256i)__builtin_lasx_xvmskltz_w((v8i32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmskltz_d(__m256i _1) {
+ return (__m256i)__builtin_lasx_xvmskltz_d((v4i64)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsigncov_b(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsigncov_b((v32i8)_1, (v32i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsigncov_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsigncov_h((v16i16)_1, (v16i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsigncov_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsigncov_w((v8i32)_1, (v8i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsigncov_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsigncov_d((v4i64)_1, (v4i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256
+ __lasx_xvfmadd_s(__m256 _1, __m256 _2, __m256 _3) {
+ return (__m256)__builtin_lasx_xvfmadd_s((v8f32)_1, (v8f32)_2, (v8f32)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256d
+ __lasx_xvfmadd_d(__m256d _1, __m256d _2, __m256d _3) {
+ return (__m256d)__builtin_lasx_xvfmadd_d((v4f64)_1, (v4f64)_2, (v4f64)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256
+ __lasx_xvfmsub_s(__m256 _1, __m256 _2, __m256 _3) {
+ return (__m256)__builtin_lasx_xvfmsub_s((v8f32)_1, (v8f32)_2, (v8f32)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256d
+ __lasx_xvfmsub_d(__m256d _1, __m256d _2, __m256d _3) {
+ return (__m256d)__builtin_lasx_xvfmsub_d((v4f64)_1, (v4f64)_2, (v4f64)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256
+ __lasx_xvfnmadd_s(__m256 _1, __m256 _2, __m256 _3) {
+ return (__m256)__builtin_lasx_xvfnmadd_s((v8f32)_1, (v8f32)_2, (v8f32)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256d
+ __lasx_xvfnmadd_d(__m256d _1, __m256d _2, __m256d _3) {
+ return (__m256d)__builtin_lasx_xvfnmadd_d((v4f64)_1, (v4f64)_2, (v4f64)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256
+ __lasx_xvfnmsub_s(__m256 _1, __m256 _2, __m256 _3) {
+ return (__m256)__builtin_lasx_xvfnmsub_s((v8f32)_1, (v8f32)_2, (v8f32)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256d
+ __lasx_xvfnmsub_d(__m256d _1, __m256d _2, __m256d _3) {
+ return (__m256d)__builtin_lasx_xvfnmsub_d((v4f64)_1, (v4f64)_2, (v4f64)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvftintrne_w_s(__m256 _1) {
+ return (__m256i)__builtin_lasx_xvftintrne_w_s((v8f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvftintrne_l_d(__m256d _1) {
+ return (__m256i)__builtin_lasx_xvftintrne_l_d((v4f64)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvftintrp_w_s(__m256 _1) {
+ return (__m256i)__builtin_lasx_xvftintrp_w_s((v8f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvftintrp_l_d(__m256d _1) {
+ return (__m256i)__builtin_lasx_xvftintrp_l_d((v4f64)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvftintrm_w_s(__m256 _1) {
+ return (__m256i)__builtin_lasx_xvftintrm_w_s((v8f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvftintrm_l_d(__m256d _1) {
+ return (__m256i)__builtin_lasx_xvftintrm_l_d((v4f64)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvftint_w_d(__m256d _1, __m256d _2) {
+ return (__m256i)__builtin_lasx_xvftint_w_d((v4f64)_1, (v4f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256
+ __lasx_xvffint_s_l(__m256i _1, __m256i _2) {
+ return (__m256)__builtin_lasx_xvffint_s_l((v4i64)_1, (v4i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvftintrz_w_d(__m256d _1, __m256d _2) {
+ return (__m256i)__builtin_lasx_xvftintrz_w_d((v4f64)_1, (v4f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvftintrp_w_d(__m256d _1, __m256d _2) {
+ return (__m256i)__builtin_lasx_xvftintrp_w_d((v4f64)_1, (v4f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvftintrm_w_d(__m256d _1, __m256d _2) {
+ return (__m256i)__builtin_lasx_xvftintrm_w_d((v4f64)_1, (v4f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvftintrne_w_d(__m256d _1, __m256d _2) {
+ return (__m256i)__builtin_lasx_xvftintrne_w_d((v4f64)_1, (v4f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvftinth_l_s(__m256 _1) {
+ return (__m256i)__builtin_lasx_xvftinth_l_s((v8f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvftintl_l_s(__m256 _1) {
+ return (__m256i)__builtin_lasx_xvftintl_l_s((v8f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256d
+ __lasx_xvffinth_d_w(__m256i _1) {
+ return (__m256d)__builtin_lasx_xvffinth_d_w((v8i32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256d
+ __lasx_xvffintl_d_w(__m256i _1) {
+ return (__m256d)__builtin_lasx_xvffintl_d_w((v8i32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvftintrzh_l_s(__m256 _1) {
+ return (__m256i)__builtin_lasx_xvftintrzh_l_s((v8f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvftintrzl_l_s(__m256 _1) {
+ return (__m256i)__builtin_lasx_xvftintrzl_l_s((v8f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvftintrph_l_s(__m256 _1) {
+ return (__m256i)__builtin_lasx_xvftintrph_l_s((v8f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvftintrpl_l_s(__m256 _1) {
+ return (__m256i)__builtin_lasx_xvftintrpl_l_s((v8f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvftintrmh_l_s(__m256 _1) {
+ return (__m256i)__builtin_lasx_xvftintrmh_l_s((v8f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvftintrml_l_s(__m256 _1) {
+ return (__m256i)__builtin_lasx_xvftintrml_l_s((v8f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvftintrneh_l_s(__m256 _1) {
+ return (__m256i)__builtin_lasx_xvftintrneh_l_s((v8f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvftintrnel_l_s(__m256 _1) {
+ return (__m256i)__builtin_lasx_xvftintrnel_l_s((v8f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256
+ __lasx_xvfrintrne_s(__m256 _1) {
+ return (__m256)__builtin_lasx_xvfrintrne_s((v8f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256d
+ __lasx_xvfrintrne_d(__m256d _1) {
+ return (__m256d)__builtin_lasx_xvfrintrne_d((v4f64)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256
+ __lasx_xvfrintrz_s(__m256 _1) {
+ return (__m256)__builtin_lasx_xvfrintrz_s((v8f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256d
+ __lasx_xvfrintrz_d(__m256d _1) {
+ return (__m256d)__builtin_lasx_xvfrintrz_d((v4f64)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256
+ __lasx_xvfrintrp_s(__m256 _1) {
+ return (__m256)__builtin_lasx_xvfrintrp_s((v8f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256d
+ __lasx_xvfrintrp_d(__m256d _1) {
+ return (__m256d)__builtin_lasx_xvfrintrp_d((v4f64)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256
+ __lasx_xvfrintrm_s(__m256 _1) {
+ return (__m256)__builtin_lasx_xvfrintrm_s((v8f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256d
+ __lasx_xvfrintrm_d(__m256d _1) {
+ return (__m256d)__builtin_lasx_xvfrintrm_d((v4f64)_1);
+}
+
+#define __lasx_xvld(/*void **/ _1, /*si12*/ _2) \
+ ((__m256i)__builtin_lasx_xvld((void const *)(_1), (_2)))
+
+#define __lasx_xvst(/*__m256i*/ _1, /*void **/ _2, /*si12*/ _3) \
+ ((void)__builtin_lasx_xvst((v32i8)(_1), (void *)(_2), (_3)))
+
+#define __lasx_xvstelm_b(/*__m256i*/ _1, /*void **/ _2, /*si8*/ _3, \
+ /*idx*/ _4) \
+ ((void)__builtin_lasx_xvstelm_b((v32i8)(_1), (void *)(_2), (_3), (_4)))
+
+#define __lasx_xvstelm_h(/*__m256i*/ _1, /*void **/ _2, /*si8*/ _3, \
+ /*idx*/ _4) \
+ ((void)__builtin_lasx_xvstelm_h((v16i16)(_1), (void *)(_2), (_3), (_4)))
+
+#define __lasx_xvstelm_w(/*__m256i*/ _1, /*void **/ _2, /*si8*/ _3, \
+ /*idx*/ _4) \
+ ((void)__builtin_lasx_xvstelm_w((v8i32)(_1), (void *)(_2), (_3), (_4)))
+
+#define __lasx_xvstelm_d(/*__m256i*/ _1, /*void **/ _2, /*si8*/ _3, \
+ /*idx*/ _4) \
+ ((void)__builtin_lasx_xvstelm_d((v4i64)(_1), (void *)(_2), (_3), (_4)))
+
+#define __lasx_xvinsve0_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui3*/ _3) \
+ ((__m256i)__builtin_lasx_xvinsve0_w((v8i32)(_1), (v8i32)(_2), (_3)))
+
+#define __lasx_xvinsve0_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui2*/ _3) \
+ ((__m256i)__builtin_lasx_xvinsve0_d((v4i64)(_1), (v4i64)(_2), (_3)))
+
+#define __lasx_xvpickve_w(/*__m256i*/ _1, /*ui3*/ _2) \
+ ((__m256i)__builtin_lasx_xvpickve_w((v8i32)(_1), (_2)))
+
+#define __lasx_xvpickve_d(/*__m256i*/ _1, /*ui2*/ _2) \
+ ((__m256i)__builtin_lasx_xvpickve_d((v4i64)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvssrlrn_b_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvssrlrn_b_h((v16i16)_1, (v16i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvssrlrn_h_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvssrlrn_h_w((v8i32)_1, (v8i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvssrlrn_w_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvssrlrn_w_d((v4i64)_1, (v4i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvssrln_b_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvssrln_b_h((v16i16)_1, (v16i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvssrln_h_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvssrln_h_w((v8i32)_1, (v8i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvssrln_w_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvssrln_w_d((v4i64)_1, (v4i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvorn_v(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvorn_v((v32i8)_1, (v32i8)_2);
+}
+
+#define __lasx_xvldi(/*i13*/ _1) ((__m256i)__builtin_lasx_xvldi((_1)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvldx(void const *_1, long int _2) {
+ return (__m256i)__builtin_lasx_xvldx((void const *)_1, (long int)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) void
+ __lasx_xvstx(__m256i _1, void *_2, long int _3) {
+ return (void)__builtin_lasx_xvstx((v32i8)_1, (void *)_2, (long int)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvextl_qu_du(__m256i _1) {
+ return (__m256i)__builtin_lasx_xvextl_qu_du((v4u64)_1);
+}
+
+#define __lasx_xvinsgr2vr_w(/*__m256i*/ _1, /*int*/ _2, /*ui3*/ _3) \
+ ((__m256i)__builtin_lasx_xvinsgr2vr_w((v8i32)(_1), (int)(_2), (_3)))
+
+#define __lasx_xvinsgr2vr_d(/*__m256i*/ _1, /*long int*/ _2, /*ui2*/ _3) \
+ ((__m256i)__builtin_lasx_xvinsgr2vr_d((v4i64)(_1), (long int)(_2), (_3)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvreplve0_b(__m256i _1) {
+ return (__m256i)__builtin_lasx_xvreplve0_b((v32i8)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvreplve0_h(__m256i _1) {
+ return (__m256i)__builtin_lasx_xvreplve0_h((v16i16)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvreplve0_w(__m256i _1) {
+ return (__m256i)__builtin_lasx_xvreplve0_w((v8i32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvreplve0_d(__m256i _1) {
+ return (__m256i)__builtin_lasx_xvreplve0_d((v4i64)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvreplve0_q(__m256i _1) {
+ return (__m256i)__builtin_lasx_xvreplve0_q((v32i8)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_vext2xv_h_b(__m256i _1) {
+ return (__m256i)__builtin_lasx_vext2xv_h_b((v32i8)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_vext2xv_w_h(__m256i _1) {
+ return (__m256i)__builtin_lasx_vext2xv_w_h((v16i16)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_vext2xv_d_w(__m256i _1) {
+ return (__m256i)__builtin_lasx_vext2xv_d_w((v8i32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_vext2xv_w_b(__m256i _1) {
+ return (__m256i)__builtin_lasx_vext2xv_w_b((v32i8)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_vext2xv_d_h(__m256i _1) {
+ return (__m256i)__builtin_lasx_vext2xv_d_h((v16i16)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_vext2xv_d_b(__m256i _1) {
+ return (__m256i)__builtin_lasx_vext2xv_d_b((v32i8)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_vext2xv_hu_bu(__m256i _1) {
+ return (__m256i)__builtin_lasx_vext2xv_hu_bu((v32i8)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_vext2xv_wu_hu(__m256i _1) {
+ return (__m256i)__builtin_lasx_vext2xv_wu_hu((v16i16)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_vext2xv_du_wu(__m256i _1) {
+ return (__m256i)__builtin_lasx_vext2xv_du_wu((v8i32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_vext2xv_wu_bu(__m256i _1) {
+ return (__m256i)__builtin_lasx_vext2xv_wu_bu((v32i8)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_vext2xv_du_hu(__m256i _1) {
+ return (__m256i)__builtin_lasx_vext2xv_du_hu((v16i16)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_vext2xv_du_bu(__m256i _1) {
+ return (__m256i)__builtin_lasx_vext2xv_du_bu((v32i8)_1);
+}
+
+#define __lasx_xvpermi_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) \
+ ((__m256i)__builtin_lasx_xvpermi_q((v32i8)(_1), (v32i8)(_2), (_3)))
+
+#define __lasx_xvpermi_d(/*__m256i*/ _1, /*ui8*/ _2) \
+ ((__m256i)__builtin_lasx_xvpermi_d((v4i64)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvperm_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvperm_w((v8i32)_1, (v8i32)_2);
+}
+
+#define __lasx_xvldrepl_b(/*void **/ _1, /*si12*/ _2) \
+ ((__m256i)__builtin_lasx_xvldrepl_b((void const *)(_1), (_2)))
+
+#define __lasx_xvldrepl_h(/*void **/ _1, /*si11*/ _2) \
+ ((__m256i)__builtin_lasx_xvldrepl_h((void const *)(_1), (_2)))
+
+#define __lasx_xvldrepl_w(/*void **/ _1, /*si10*/ _2) \
+ ((__m256i)__builtin_lasx_xvldrepl_w((void const *)(_1), (_2)))
+
+#define __lasx_xvldrepl_d(/*void **/ _1, /*si9*/ _2) \
+ ((__m256i)__builtin_lasx_xvldrepl_d((void const *)(_1), (_2)))
+
+#define __lasx_xvpickve2gr_w(/*__m256i*/ _1, /*ui3*/ _2) \
+ ((int)__builtin_lasx_xvpickve2gr_w((v8i32)(_1), (_2)))
+
+#define __lasx_xvpickve2gr_wu(/*__m256i*/ _1, /*ui3*/ _2) \
+ ((unsigned int)__builtin_lasx_xvpickve2gr_wu((v8i32)(_1), (_2)))
+
+#define __lasx_xvpickve2gr_d(/*__m256i*/ _1, /*ui2*/ _2) \
+ ((long int)__builtin_lasx_xvpickve2gr_d((v4i64)(_1), (_2)))
+
+#define __lasx_xvpickve2gr_du(/*__m256i*/ _1, /*ui2*/ _2) \
+ ((unsigned long int)__builtin_lasx_xvpickve2gr_du((v4i64)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvaddwev_q_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvaddwev_q_d((v4i64)_1, (v4i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvaddwev_d_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvaddwev_d_w((v8i32)_1, (v8i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvaddwev_w_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvaddwev_w_h((v16i16)_1, (v16i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvaddwev_h_b(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvaddwev_h_b((v32i8)_1, (v32i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvaddwev_q_du(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvaddwev_q_du((v4u64)_1, (v4u64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvaddwev_d_wu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvaddwev_d_wu((v8u32)_1, (v8u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvaddwev_w_hu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvaddwev_w_hu((v16u16)_1, (v16u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvaddwev_h_bu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvaddwev_h_bu((v32u8)_1, (v32u8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsubwev_q_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsubwev_q_d((v4i64)_1, (v4i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsubwev_d_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsubwev_d_w((v8i32)_1, (v8i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsubwev_w_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsubwev_w_h((v16i16)_1, (v16i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsubwev_h_b(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsubwev_h_b((v32i8)_1, (v32i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsubwev_q_du(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsubwev_q_du((v4u64)_1, (v4u64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsubwev_d_wu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsubwev_d_wu((v8u32)_1, (v8u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsubwev_w_hu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsubwev_w_hu((v16u16)_1, (v16u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsubwev_h_bu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsubwev_h_bu((v32u8)_1, (v32u8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmulwev_q_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmulwev_q_d((v4i64)_1, (v4i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmulwev_d_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmulwev_d_w((v8i32)_1, (v8i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmulwev_w_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmulwev_w_h((v16i16)_1, (v16i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmulwev_h_b(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmulwev_h_b((v32i8)_1, (v32i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmulwev_q_du(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmulwev_q_du((v4u64)_1, (v4u64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmulwev_d_wu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmulwev_d_wu((v8u32)_1, (v8u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmulwev_w_hu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmulwev_w_hu((v16u16)_1, (v16u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmulwev_h_bu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmulwev_h_bu((v32u8)_1, (v32u8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvaddwod_q_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvaddwod_q_d((v4i64)_1, (v4i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvaddwod_d_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvaddwod_d_w((v8i32)_1, (v8i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvaddwod_w_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvaddwod_w_h((v16i16)_1, (v16i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvaddwod_h_b(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvaddwod_h_b((v32i8)_1, (v32i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvaddwod_q_du(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvaddwod_q_du((v4u64)_1, (v4u64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvaddwod_d_wu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvaddwod_d_wu((v8u32)_1, (v8u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvaddwod_w_hu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvaddwod_w_hu((v16u16)_1, (v16u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvaddwod_h_bu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvaddwod_h_bu((v32u8)_1, (v32u8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsubwod_q_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsubwod_q_d((v4i64)_1, (v4i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsubwod_d_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsubwod_d_w((v8i32)_1, (v8i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsubwod_w_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsubwod_w_h((v16i16)_1, (v16i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsubwod_h_b(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsubwod_h_b((v32i8)_1, (v32i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsubwod_q_du(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsubwod_q_du((v4u64)_1, (v4u64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsubwod_d_wu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsubwod_d_wu((v8u32)_1, (v8u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsubwod_w_hu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsubwod_w_hu((v16u16)_1, (v16u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsubwod_h_bu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsubwod_h_bu((v32u8)_1, (v32u8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmulwod_q_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmulwod_q_d((v4i64)_1, (v4i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmulwod_d_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmulwod_d_w((v8i32)_1, (v8i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmulwod_w_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmulwod_w_h((v16i16)_1, (v16i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmulwod_h_b(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmulwod_h_b((v32i8)_1, (v32i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmulwod_q_du(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmulwod_q_du((v4u64)_1, (v4u64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmulwod_d_wu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmulwod_d_wu((v8u32)_1, (v8u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmulwod_w_hu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmulwod_w_hu((v16u16)_1, (v16u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmulwod_h_bu(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmulwod_h_bu((v32u8)_1, (v32u8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvaddwev_d_wu_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvaddwev_d_wu_w((v8u32)_1, (v8i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvaddwev_w_hu_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvaddwev_w_hu_h((v16u16)_1, (v16i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvaddwev_h_bu_b(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvaddwev_h_bu_b((v32u8)_1, (v32i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmulwev_d_wu_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmulwev_d_wu_w((v8u32)_1, (v8i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmulwev_w_hu_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmulwev_w_hu_h((v16u16)_1, (v16i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmulwev_h_bu_b(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmulwev_h_bu_b((v32u8)_1, (v32i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvaddwod_d_wu_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvaddwod_d_wu_w((v8u32)_1, (v8i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvaddwod_w_hu_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvaddwod_w_hu_h((v16u16)_1, (v16i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvaddwod_h_bu_b(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvaddwod_h_bu_b((v32u8)_1, (v32i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmulwod_d_wu_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmulwod_d_wu_w((v8u32)_1, (v8i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmulwod_w_hu_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmulwod_w_hu_h((v16u16)_1, (v16i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmulwod_h_bu_b(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmulwod_h_bu_b((v32u8)_1, (v32i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvhaddw_q_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvhaddw_q_d((v4i64)_1, (v4i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvhaddw_qu_du(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvhaddw_qu_du((v4u64)_1, (v4u64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvhsubw_q_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvhsubw_q_d((v4i64)_1, (v4i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvhsubw_qu_du(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvhsubw_qu_du((v4u64)_1, (v4u64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmaddwev_q_d(__m256i _1, __m256i _2, __m256i _3) {
+ return (__m256i)__builtin_lasx_xvmaddwev_q_d((v4i64)_1, (v4i64)_2, (v4i64)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmaddwev_d_w(__m256i _1, __m256i _2, __m256i _3) {
+ return (__m256i)__builtin_lasx_xvmaddwev_d_w((v4i64)_1, (v8i32)_2, (v8i32)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmaddwev_w_h(__m256i _1, __m256i _2, __m256i _3) {
+ return (__m256i)__builtin_lasx_xvmaddwev_w_h((v8i32)_1, (v16i16)_2,
+ (v16i16)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmaddwev_h_b(__m256i _1, __m256i _2, __m256i _3) {
+ return (__m256i)__builtin_lasx_xvmaddwev_h_b((v16i16)_1, (v32i8)_2,
+ (v32i8)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmaddwev_q_du(__m256i _1, __m256i _2, __m256i _3) {
+ return (__m256i)__builtin_lasx_xvmaddwev_q_du((v4u64)_1, (v4u64)_2,
+ (v4u64)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmaddwev_d_wu(__m256i _1, __m256i _2, __m256i _3) {
+ return (__m256i)__builtin_lasx_xvmaddwev_d_wu((v4u64)_1, (v8u32)_2,
+ (v8u32)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmaddwev_w_hu(__m256i _1, __m256i _2, __m256i _3) {
+ return (__m256i)__builtin_lasx_xvmaddwev_w_hu((v8u32)_1, (v16u16)_2,
+ (v16u16)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmaddwev_h_bu(__m256i _1, __m256i _2, __m256i _3) {
+ return (__m256i)__builtin_lasx_xvmaddwev_h_bu((v16u16)_1, (v32u8)_2,
+ (v32u8)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmaddwod_q_d(__m256i _1, __m256i _2, __m256i _3) {
+ return (__m256i)__builtin_lasx_xvmaddwod_q_d((v4i64)_1, (v4i64)_2, (v4i64)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmaddwod_d_w(__m256i _1, __m256i _2, __m256i _3) {
+ return (__m256i)__builtin_lasx_xvmaddwod_d_w((v4i64)_1, (v8i32)_2, (v8i32)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmaddwod_w_h(__m256i _1, __m256i _2, __m256i _3) {
+ return (__m256i)__builtin_lasx_xvmaddwod_w_h((v8i32)_1, (v16i16)_2,
+ (v16i16)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmaddwod_h_b(__m256i _1, __m256i _2, __m256i _3) {
+ return (__m256i)__builtin_lasx_xvmaddwod_h_b((v16i16)_1, (v32i8)_2,
+ (v32i8)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmaddwod_q_du(__m256i _1, __m256i _2, __m256i _3) {
+ return (__m256i)__builtin_lasx_xvmaddwod_q_du((v4u64)_1, (v4u64)_2,
+ (v4u64)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmaddwod_d_wu(__m256i _1, __m256i _2, __m256i _3) {
+ return (__m256i)__builtin_lasx_xvmaddwod_d_wu((v4u64)_1, (v8u32)_2,
+ (v8u32)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmaddwod_w_hu(__m256i _1, __m256i _2, __m256i _3) {
+ return (__m256i)__builtin_lasx_xvmaddwod_w_hu((v8u32)_1, (v16u16)_2,
+ (v16u16)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmaddwod_h_bu(__m256i _1, __m256i _2, __m256i _3) {
+ return (__m256i)__builtin_lasx_xvmaddwod_h_bu((v16u16)_1, (v32u8)_2,
+ (v32u8)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmaddwev_q_du_d(__m256i _1, __m256i _2, __m256i _3) {
+ return (__m256i)__builtin_lasx_xvmaddwev_q_du_d((v4i64)_1, (v4u64)_2,
+ (v4i64)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmaddwev_d_wu_w(__m256i _1, __m256i _2, __m256i _3) {
+ return (__m256i)__builtin_lasx_xvmaddwev_d_wu_w((v4i64)_1, (v8u32)_2,
+ (v8i32)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmaddwev_w_hu_h(__m256i _1, __m256i _2, __m256i _3) {
+ return (__m256i)__builtin_lasx_xvmaddwev_w_hu_h((v8i32)_1, (v16u16)_2,
+ (v16i16)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmaddwev_h_bu_b(__m256i _1, __m256i _2, __m256i _3) {
+ return (__m256i)__builtin_lasx_xvmaddwev_h_bu_b((v16i16)_1, (v32u8)_2,
+ (v32i8)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmaddwod_q_du_d(__m256i _1, __m256i _2, __m256i _3) {
+ return (__m256i)__builtin_lasx_xvmaddwod_q_du_d((v4i64)_1, (v4u64)_2,
+ (v4i64)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmaddwod_d_wu_w(__m256i _1, __m256i _2, __m256i _3) {
+ return (__m256i)__builtin_lasx_xvmaddwod_d_wu_w((v4i64)_1, (v8u32)_2,
+ (v8i32)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmaddwod_w_hu_h(__m256i _1, __m256i _2, __m256i _3) {
+ return (__m256i)__builtin_lasx_xvmaddwod_w_hu_h((v8i32)_1, (v16u16)_2,
+ (v16i16)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmaddwod_h_bu_b(__m256i _1, __m256i _2, __m256i _3) {
+ return (__m256i)__builtin_lasx_xvmaddwod_h_bu_b((v16i16)_1, (v32u8)_2,
+ (v32i8)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvrotr_b(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvrotr_b((v32i8)_1, (v32i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvrotr_h(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvrotr_h((v16i16)_1, (v16i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvrotr_w(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvrotr_w((v8i32)_1, (v8i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvrotr_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvrotr_d((v4i64)_1, (v4i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvadd_q(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvadd_q((v4i64)_1, (v4i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvsub_q(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvsub_q((v4i64)_1, (v4i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvaddwev_q_du_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvaddwev_q_du_d((v4u64)_1, (v4i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvaddwod_q_du_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvaddwod_q_du_d((v4u64)_1, (v4i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmulwev_q_du_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmulwev_q_du_d((v4u64)_1, (v4i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmulwod_q_du_d(__m256i _1, __m256i _2) {
+ return (__m256i)__builtin_lasx_xvmulwod_q_du_d((v4u64)_1, (v4i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmskgez_b(__m256i _1) {
+ return (__m256i)__builtin_lasx_xvmskgez_b((v32i8)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvmsknz_b(__m256i _1) {
+ return (__m256i)__builtin_lasx_xvmsknz_b((v32i8)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvexth_h_b(__m256i _1) {
+ return (__m256i)__builtin_lasx_xvexth_h_b((v32i8)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvexth_w_h(__m256i _1) {
+ return (__m256i)__builtin_lasx_xvexth_w_h((v16i16)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvexth_d_w(__m256i _1) {
+ return (__m256i)__builtin_lasx_xvexth_d_w((v8i32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvexth_q_d(__m256i _1) {
+ return (__m256i)__builtin_lasx_xvexth_q_d((v4i64)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvexth_hu_bu(__m256i _1) {
+ return (__m256i)__builtin_lasx_xvexth_hu_bu((v32u8)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvexth_wu_hu(__m256i _1) {
+ return (__m256i)__builtin_lasx_xvexth_wu_hu((v16u16)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvexth_du_wu(__m256i _1) {
+ return (__m256i)__builtin_lasx_xvexth_du_wu((v8u32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvexth_qu_du(__m256i _1) {
+ return (__m256i)__builtin_lasx_xvexth_qu_du((v4u64)_1);
+}
+
+#define __lasx_xvrotri_b(/*__m256i*/ _1, /*ui3*/ _2) \
+ ((__m256i)__builtin_lasx_xvrotri_b((v32i8)(_1), (_2)))
+
+#define __lasx_xvrotri_h(/*__m256i*/ _1, /*ui4*/ _2) \
+ ((__m256i)__builtin_lasx_xvrotri_h((v16i16)(_1), (_2)))
+
+#define __lasx_xvrotri_w(/*__m256i*/ _1, /*ui5*/ _2) \
+ ((__m256i)__builtin_lasx_xvrotri_w((v8i32)(_1), (_2)))
+
+#define __lasx_xvrotri_d(/*__m256i*/ _1, /*ui6*/ _2) \
+ ((__m256i)__builtin_lasx_xvrotri_d((v4i64)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvextl_q_d(__m256i _1) {
+ return (__m256i)__builtin_lasx_xvextl_q_d((v4i64)_1);
+}
+
+#define __lasx_xvsrlni_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \
+ ((__m256i)__builtin_lasx_xvsrlni_b_h((v32i8)(_1), (v32i8)(_2), (_3)))
+
+#define __lasx_xvsrlni_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \
+ ((__m256i)__builtin_lasx_xvsrlni_h_w((v16i16)(_1), (v16i16)(_2), (_3)))
+
+#define __lasx_xvsrlni_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \
+ ((__m256i)__builtin_lasx_xvsrlni_w_d((v8i32)(_1), (v8i32)(_2), (_3)))
+
+#define __lasx_xvsrlni_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \
+ ((__m256i)__builtin_lasx_xvsrlni_d_q((v4i64)(_1), (v4i64)(_2), (_3)))
+
+#define __lasx_xvsrlrni_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \
+ ((__m256i)__builtin_lasx_xvsrlrni_b_h((v32i8)(_1), (v32i8)(_2), (_3)))
+
+#define __lasx_xvsrlrni_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \
+ ((__m256i)__builtin_lasx_xvsrlrni_h_w((v16i16)(_1), (v16i16)(_2), (_3)))
+
+#define __lasx_xvsrlrni_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \
+ ((__m256i)__builtin_lasx_xvsrlrni_w_d((v8i32)(_1), (v8i32)(_2), (_3)))
+
+#define __lasx_xvsrlrni_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \
+ ((__m256i)__builtin_lasx_xvsrlrni_d_q((v4i64)(_1), (v4i64)(_2), (_3)))
+
+#define __lasx_xvssrlni_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \
+ ((__m256i)__builtin_lasx_xvssrlni_b_h((v32i8)(_1), (v32i8)(_2), (_3)))
+
+#define __lasx_xvssrlni_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \
+ ((__m256i)__builtin_lasx_xvssrlni_h_w((v16i16)(_1), (v16i16)(_2), (_3)))
+
+#define __lasx_xvssrlni_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \
+ ((__m256i)__builtin_lasx_xvssrlni_w_d((v8i32)(_1), (v8i32)(_2), (_3)))
+
+#define __lasx_xvssrlni_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \
+ ((__m256i)__builtin_lasx_xvssrlni_d_q((v4i64)(_1), (v4i64)(_2), (_3)))
+
+#define __lasx_xvssrlni_bu_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \
+ ((__m256i)__builtin_lasx_xvssrlni_bu_h((v32u8)(_1), (v32i8)(_2), (_3)))
+
+#define __lasx_xvssrlni_hu_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \
+ ((__m256i)__builtin_lasx_xvssrlni_hu_w((v16u16)(_1), (v16i16)(_2), (_3)))
+
+#define __lasx_xvssrlni_wu_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \
+ ((__m256i)__builtin_lasx_xvssrlni_wu_d((v8u32)(_1), (v8i32)(_2), (_3)))
+
+#define __lasx_xvssrlni_du_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \
+ ((__m256i)__builtin_lasx_xvssrlni_du_q((v4u64)(_1), (v4i64)(_2), (_3)))
+
+#define __lasx_xvssrlrni_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \
+ ((__m256i)__builtin_lasx_xvssrlrni_b_h((v32i8)(_1), (v32i8)(_2), (_3)))
+
+#define __lasx_xvssrlrni_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \
+ ((__m256i)__builtin_lasx_xvssrlrni_h_w((v16i16)(_1), (v16i16)(_2), (_3)))
+
+#define __lasx_xvssrlrni_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \
+ ((__m256i)__builtin_lasx_xvssrlrni_w_d((v8i32)(_1), (v8i32)(_2), (_3)))
+
+#define __lasx_xvssrlrni_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \
+ ((__m256i)__builtin_lasx_xvssrlrni_d_q((v4i64)(_1), (v4i64)(_2), (_3)))
+
+#define __lasx_xvssrlrni_bu_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \
+ ((__m256i)__builtin_lasx_xvssrlrni_bu_h((v32u8)(_1), (v32i8)(_2), (_3)))
+
+#define __lasx_xvssrlrni_hu_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \
+ ((__m256i)__builtin_lasx_xvssrlrni_hu_w((v16u16)(_1), (v16i16)(_2), (_3)))
+
+#define __lasx_xvssrlrni_wu_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \
+ ((__m256i)__builtin_lasx_xvssrlrni_wu_d((v8u32)(_1), (v8i32)(_2), (_3)))
+
+#define __lasx_xvssrlrni_du_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \
+ ((__m256i)__builtin_lasx_xvssrlrni_du_q((v4u64)(_1), (v4i64)(_2), (_3)))
+
+#define __lasx_xvsrani_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \
+ ((__m256i)__builtin_lasx_xvsrani_b_h((v32i8)(_1), (v32i8)(_2), (_3)))
+
+#define __lasx_xvsrani_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \
+ ((__m256i)__builtin_lasx_xvsrani_h_w((v16i16)(_1), (v16i16)(_2), (_3)))
+
+#define __lasx_xvsrani_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \
+ ((__m256i)__builtin_lasx_xvsrani_w_d((v8i32)(_1), (v8i32)(_2), (_3)))
+
+#define __lasx_xvsrani_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \
+ ((__m256i)__builtin_lasx_xvsrani_d_q((v4i64)(_1), (v4i64)(_2), (_3)))
+
+#define __lasx_xvsrarni_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \
+ ((__m256i)__builtin_lasx_xvsrarni_b_h((v32i8)(_1), (v32i8)(_2), (_3)))
+
+#define __lasx_xvsrarni_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \
+ ((__m256i)__builtin_lasx_xvsrarni_h_w((v16i16)(_1), (v16i16)(_2), (_3)))
+
+#define __lasx_xvsrarni_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \
+ ((__m256i)__builtin_lasx_xvsrarni_w_d((v8i32)(_1), (v8i32)(_2), (_3)))
+
+#define __lasx_xvsrarni_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \
+ ((__m256i)__builtin_lasx_xvsrarni_d_q((v4i64)(_1), (v4i64)(_2), (_3)))
+
+#define __lasx_xvssrani_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \
+ ((__m256i)__builtin_lasx_xvssrani_b_h((v32i8)(_1), (v32i8)(_2), (_3)))
+
+#define __lasx_xvssrani_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \
+ ((__m256i)__builtin_lasx_xvssrani_h_w((v16i16)(_1), (v16i16)(_2), (_3)))
+
+#define __lasx_xvssrani_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \
+ ((__m256i)__builtin_lasx_xvssrani_w_d((v8i32)(_1), (v8i32)(_2), (_3)))
+
+#define __lasx_xvssrani_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \
+ ((__m256i)__builtin_lasx_xvssrani_d_q((v4i64)(_1), (v4i64)(_2), (_3)))
+
+#define __lasx_xvssrani_bu_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \
+ ((__m256i)__builtin_lasx_xvssrani_bu_h((v32u8)(_1), (v32i8)(_2), (_3)))
+
+#define __lasx_xvssrani_hu_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \
+ ((__m256i)__builtin_lasx_xvssrani_hu_w((v16u16)(_1), (v16i16)(_2), (_3)))
+
+#define __lasx_xvssrani_wu_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \
+ ((__m256i)__builtin_lasx_xvssrani_wu_d((v8u32)(_1), (v8i32)(_2), (_3)))
+
+#define __lasx_xvssrani_du_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \
+ ((__m256i)__builtin_lasx_xvssrani_du_q((v4u64)(_1), (v4i64)(_2), (_3)))
+
+#define __lasx_xvssrarni_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \
+ ((__m256i)__builtin_lasx_xvssrarni_b_h((v32i8)(_1), (v32i8)(_2), (_3)))
+
+#define __lasx_xvssrarni_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \
+ ((__m256i)__builtin_lasx_xvssrarni_h_w((v16i16)(_1), (v16i16)(_2), (_3)))
+
+#define __lasx_xvssrarni_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \
+ ((__m256i)__builtin_lasx_xvssrarni_w_d((v8i32)(_1), (v8i32)(_2), (_3)))
+
+#define __lasx_xvssrarni_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \
+ ((__m256i)__builtin_lasx_xvssrarni_d_q((v4i64)(_1), (v4i64)(_2), (_3)))
+
+#define __lasx_xvssrarni_bu_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \
+ ((__m256i)__builtin_lasx_xvssrarni_bu_h((v32u8)(_1), (v32i8)(_2), (_3)))
+
+#define __lasx_xvssrarni_hu_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \
+ ((__m256i)__builtin_lasx_xvssrarni_hu_w((v16u16)(_1), (v16i16)(_2), (_3)))
+
+#define __lasx_xvssrarni_wu_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \
+ ((__m256i)__builtin_lasx_xvssrarni_wu_d((v8u32)(_1), (v8i32)(_2), (_3)))
+
+#define __lasx_xvssrarni_du_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \
+ ((__m256i)__builtin_lasx_xvssrarni_du_q((v4u64)(_1), (v4i64)(_2), (_3)))
+
+#define __lasx_xbnz_b(/*__m256i*/ _1) ((int)__builtin_lasx_xbnz_b((v32u8)(_1)))
+
+#define __lasx_xbnz_d(/*__m256i*/ _1) ((int)__builtin_lasx_xbnz_d((v4u64)(_1)))
+
+#define __lasx_xbnz_h(/*__m256i*/ _1) ((int)__builtin_lasx_xbnz_h((v16u16)(_1)))
+
+#define __lasx_xbnz_v(/*__m256i*/ _1) ((int)__builtin_lasx_xbnz_v((v32u8)(_1)))
+
+#define __lasx_xbnz_w(/*__m256i*/ _1) ((int)__builtin_lasx_xbnz_w((v8u32)(_1)))
+
+#define __lasx_xbz_b(/*__m256i*/ _1) ((int)__builtin_lasx_xbz_b((v32u8)(_1)))
+
+#define __lasx_xbz_d(/*__m256i*/ _1) ((int)__builtin_lasx_xbz_d((v4u64)(_1)))
+
+#define __lasx_xbz_h(/*__m256i*/ _1) ((int)__builtin_lasx_xbz_h((v16u16)(_1)))
+
+#define __lasx_xbz_v(/*__m256i*/ _1) ((int)__builtin_lasx_xbz_v((v32u8)(_1)))
+
+#define __lasx_xbz_w(/*__m256i*/ _1) ((int)__builtin_lasx_xbz_w((v8u32)(_1)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvfcmp_caf_d(__m256d _1, __m256d _2) {
+ return (__m256i)__builtin_lasx_xvfcmp_caf_d((v4f64)_1, (v4f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvfcmp_caf_s(__m256 _1, __m256 _2) {
+ return (__m256i)__builtin_lasx_xvfcmp_caf_s((v8f32)_1, (v8f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvfcmp_ceq_d(__m256d _1, __m256d _2) {
+ return (__m256i)__builtin_lasx_xvfcmp_ceq_d((v4f64)_1, (v4f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvfcmp_ceq_s(__m256 _1, __m256 _2) {
+ return (__m256i)__builtin_lasx_xvfcmp_ceq_s((v8f32)_1, (v8f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvfcmp_cle_d(__m256d _1, __m256d _2) {
+ return (__m256i)__builtin_lasx_xvfcmp_cle_d((v4f64)_1, (v4f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvfcmp_cle_s(__m256 _1, __m256 _2) {
+ return (__m256i)__builtin_lasx_xvfcmp_cle_s((v8f32)_1, (v8f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvfcmp_clt_d(__m256d _1, __m256d _2) {
+ return (__m256i)__builtin_lasx_xvfcmp_clt_d((v4f64)_1, (v4f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvfcmp_clt_s(__m256 _1, __m256 _2) {
+ return (__m256i)__builtin_lasx_xvfcmp_clt_s((v8f32)_1, (v8f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvfcmp_cne_d(__m256d _1, __m256d _2) {
+ return (__m256i)__builtin_lasx_xvfcmp_cne_d((v4f64)_1, (v4f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvfcmp_cne_s(__m256 _1, __m256 _2) {
+ return (__m256i)__builtin_lasx_xvfcmp_cne_s((v8f32)_1, (v8f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvfcmp_cor_d(__m256d _1, __m256d _2) {
+ return (__m256i)__builtin_lasx_xvfcmp_cor_d((v4f64)_1, (v4f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvfcmp_cor_s(__m256 _1, __m256 _2) {
+ return (__m256i)__builtin_lasx_xvfcmp_cor_s((v8f32)_1, (v8f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvfcmp_cueq_d(__m256d _1, __m256d _2) {
+ return (__m256i)__builtin_lasx_xvfcmp_cueq_d((v4f64)_1, (v4f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvfcmp_cueq_s(__m256 _1, __m256 _2) {
+ return (__m256i)__builtin_lasx_xvfcmp_cueq_s((v8f32)_1, (v8f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvfcmp_cule_d(__m256d _1, __m256d _2) {
+ return (__m256i)__builtin_lasx_xvfcmp_cule_d((v4f64)_1, (v4f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvfcmp_cule_s(__m256 _1, __m256 _2) {
+ return (__m256i)__builtin_lasx_xvfcmp_cule_s((v8f32)_1, (v8f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvfcmp_cult_d(__m256d _1, __m256d _2) {
+ return (__m256i)__builtin_lasx_xvfcmp_cult_d((v4f64)_1, (v4f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvfcmp_cult_s(__m256 _1, __m256 _2) {
+ return (__m256i)__builtin_lasx_xvfcmp_cult_s((v8f32)_1, (v8f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvfcmp_cun_d(__m256d _1, __m256d _2) {
+ return (__m256i)__builtin_lasx_xvfcmp_cun_d((v4f64)_1, (v4f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvfcmp_cune_d(__m256d _1, __m256d _2) {
+ return (__m256i)__builtin_lasx_xvfcmp_cune_d((v4f64)_1, (v4f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvfcmp_cune_s(__m256 _1, __m256 _2) {
+ return (__m256i)__builtin_lasx_xvfcmp_cune_s((v8f32)_1, (v8f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvfcmp_cun_s(__m256 _1, __m256 _2) {
+ return (__m256i)__builtin_lasx_xvfcmp_cun_s((v8f32)_1, (v8f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvfcmp_saf_d(__m256d _1, __m256d _2) {
+ return (__m256i)__builtin_lasx_xvfcmp_saf_d((v4f64)_1, (v4f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvfcmp_saf_s(__m256 _1, __m256 _2) {
+ return (__m256i)__builtin_lasx_xvfcmp_saf_s((v8f32)_1, (v8f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvfcmp_seq_d(__m256d _1, __m256d _2) {
+ return (__m256i)__builtin_lasx_xvfcmp_seq_d((v4f64)_1, (v4f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvfcmp_seq_s(__m256 _1, __m256 _2) {
+ return (__m256i)__builtin_lasx_xvfcmp_seq_s((v8f32)_1, (v8f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvfcmp_sle_d(__m256d _1, __m256d _2) {
+ return (__m256i)__builtin_lasx_xvfcmp_sle_d((v4f64)_1, (v4f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvfcmp_sle_s(__m256 _1, __m256 _2) {
+ return (__m256i)__builtin_lasx_xvfcmp_sle_s((v8f32)_1, (v8f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvfcmp_slt_d(__m256d _1, __m256d _2) {
+ return (__m256i)__builtin_lasx_xvfcmp_slt_d((v4f64)_1, (v4f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvfcmp_slt_s(__m256 _1, __m256 _2) {
+ return (__m256i)__builtin_lasx_xvfcmp_slt_s((v8f32)_1, (v8f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvfcmp_sne_d(__m256d _1, __m256d _2) {
+ return (__m256i)__builtin_lasx_xvfcmp_sne_d((v4f64)_1, (v4f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvfcmp_sne_s(__m256 _1, __m256 _2) {
+ return (__m256i)__builtin_lasx_xvfcmp_sne_s((v8f32)_1, (v8f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvfcmp_sor_d(__m256d _1, __m256d _2) {
+ return (__m256i)__builtin_lasx_xvfcmp_sor_d((v4f64)_1, (v4f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvfcmp_sor_s(__m256 _1, __m256 _2) {
+ return (__m256i)__builtin_lasx_xvfcmp_sor_s((v8f32)_1, (v8f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvfcmp_sueq_d(__m256d _1, __m256d _2) {
+ return (__m256i)__builtin_lasx_xvfcmp_sueq_d((v4f64)_1, (v4f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvfcmp_sueq_s(__m256 _1, __m256 _2) {
+ return (__m256i)__builtin_lasx_xvfcmp_sueq_s((v8f32)_1, (v8f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvfcmp_sule_d(__m256d _1, __m256d _2) {
+ return (__m256i)__builtin_lasx_xvfcmp_sule_d((v4f64)_1, (v4f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvfcmp_sule_s(__m256 _1, __m256 _2) {
+ return (__m256i)__builtin_lasx_xvfcmp_sule_s((v8f32)_1, (v8f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvfcmp_sult_d(__m256d _1, __m256d _2) {
+ return (__m256i)__builtin_lasx_xvfcmp_sult_d((v4f64)_1, (v4f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvfcmp_sult_s(__m256 _1, __m256 _2) {
+ return (__m256i)__builtin_lasx_xvfcmp_sult_s((v8f32)_1, (v8f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvfcmp_sun_d(__m256d _1, __m256d _2) {
+ return (__m256i)__builtin_lasx_xvfcmp_sun_d((v4f64)_1, (v4f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvfcmp_sune_d(__m256d _1, __m256d _2) {
+ return (__m256i)__builtin_lasx_xvfcmp_sune_d((v4f64)_1, (v4f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvfcmp_sune_s(__m256 _1, __m256 _2) {
+ return (__m256i)__builtin_lasx_xvfcmp_sune_s((v8f32)_1, (v8f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m256i
+ __lasx_xvfcmp_sun_s(__m256 _1, __m256 _2) {
+ return (__m256i)__builtin_lasx_xvfcmp_sun_s((v8f32)_1, (v8f32)_2);
+}
+
+#define __lasx_xvpickve_d_f(/*__m256d*/ _1, /*ui2*/ _2) \
+ ((__m256d)__builtin_lasx_xvpickve_d_f((v4f64)(_1), (_2)))
+
+#define __lasx_xvpickve_w_f(/*__m256*/ _1, /*ui3*/ _2) \
+ ((__m256)__builtin_lasx_xvpickve_w_f((v8f32)(_1), (_2)))
+
+#define __lasx_xvrepli_b(/*si10*/ _1) ((__m256i)__builtin_lasx_xvrepli_b((_1)))
+
+#define __lasx_xvrepli_d(/*si10*/ _1) ((__m256i)__builtin_lasx_xvrepli_d((_1)))
+
+#define __lasx_xvrepli_h(/*si10*/ _1) ((__m256i)__builtin_lasx_xvrepli_h((_1)))
+
+#define __lasx_xvrepli_w(/*si10*/ _1) ((__m256i)__builtin_lasx_xvrepli_w((_1)))
+
+#endif /* defined(__loongarch_asx). */
+#endif /* _LOONGSON_ASXINTRIN_H. */
diff --git a/contrib/llvm-project/clang/lib/Headers/limits.h b/contrib/llvm-project/clang/lib/Headers/limits.h
index c653580bac4e..15e6bbe0abcf 100644
--- a/contrib/llvm-project/clang/lib/Headers/limits.h
+++ b/contrib/llvm-project/clang/lib/Headers/limits.h
@@ -52,7 +52,11 @@
#define LONG_MIN (-__LONG_MAX__ -1L)
#define UCHAR_MAX (__SCHAR_MAX__*2 +1)
-#define USHRT_MAX (__SHRT_MAX__ *2 +1)
+#if __SHRT_WIDTH__ < __INT_WIDTH__
+#define USHRT_MAX (__SHRT_MAX__ * 2 + 1)
+#else
+#define USHRT_MAX (__SHRT_MAX__ * 2U + 1U)
+#endif
#define UINT_MAX (__INT_MAX__ *2U +1U)
#define ULONG_MAX (__LONG_MAX__ *2UL+1UL)
@@ -62,6 +66,24 @@
#define CHAR_BIT __CHAR_BIT__
+/* C23 5.2.4.2.1 */
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L
+#define BOOL_WIDTH __BOOL_WIDTH__
+#define CHAR_WIDTH CHAR_BIT
+#define SCHAR_WIDTH CHAR_BIT
+#define UCHAR_WIDTH CHAR_BIT
+#define USHRT_WIDTH __SHRT_WIDTH__
+#define SHRT_WIDTH __SHRT_WIDTH__
+#define UINT_WIDTH __INT_WIDTH__
+#define INT_WIDTH __INT_WIDTH__
+#define ULONG_WIDTH __LONG_WIDTH__
+#define LONG_WIDTH __LONG_WIDTH__
+#define ULLONG_WIDTH __LLONG_WIDTH__
+#define LLONG_WIDTH __LLONG_WIDTH__
+
+#define BITINT_MAXWIDTH __BITINT_MAXWIDTH__
+#endif
+
#ifdef __CHAR_UNSIGNED__ /* -funsigned-char */
#define CHAR_MIN 0
#define CHAR_MAX UCHAR_MAX
@@ -73,7 +95,8 @@
/* C99 5.2.4.2.1: Added long long.
C++11 18.3.3.2: same contents as the Standard C Library header <limits.h>.
*/
-#if __STDC_VERSION__ >= 199901L || __cplusplus >= 201103L
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || \
+ (defined(__cplusplus) && __cplusplus >= 201103L)
#undef LLONG_MIN
#undef LLONG_MAX
diff --git a/contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/assert.h b/contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/assert.h
new file mode 100644
index 000000000000..de650ca8442a
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/assert.h
@@ -0,0 +1,34 @@
+//===-- Wrapper for C standard assert.h declarations on the GPU ------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __CLANG_LLVM_LIBC_WRAPPERS_ASSERT_H__
+#define __CLANG_LLVM_LIBC_WRAPPERS_ASSERT_H__
+
+#if !defined(_OPENMP) && !defined(__HIP__) && !defined(__CUDA__)
+#error "This file is for GPU offloading compilation only"
+#endif
+
+#include_next <assert.h>
+
+#if __has_include(<llvm-libc-decls/assert.h>)
+
+#if defined(__HIP__) || defined(__CUDA__)
+#define __LIBC_ATTRS __attribute__((device))
+#endif
+
+#pragma omp begin declare target
+
+#include <llvm-libc-decls/assert.h>
+
+#pragma omp end declare target
+
+#undef __LIBC_ATTRS
+
+#endif
+
+#endif // __CLANG_LLVM_LIBC_WRAPPERS_ASSERT_H__
diff --git a/contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/ctype.h b/contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/ctype.h
new file mode 100644
index 000000000000..49c2af93471b
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/ctype.h
@@ -0,0 +1,102 @@
+//===-- Wrapper for C standard ctype.h declarations on the GPU ------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __CLANG_LLVM_LIBC_WRAPPERS_CTYPE_H__
+#define __CLANG_LLVM_LIBC_WRAPPERS_CTYPE_H__
+
+#if !defined(_OPENMP) && !defined(__HIP__) && !defined(__CUDA__)
+#error "This file is for GPU offloading compilation only"
+#endif
+
+// The GNU headers like to define 'toupper' and 'tolower' redundantly. This is
+// necessary to prevent it from doing that and remapping our implementation.
+#if (defined(__NVPTX__) || defined(__AMDGPU__)) && defined(__GLIBC__)
+#pragma push_macro("__USE_EXTERN_INLINES")
+#undef __USE_EXTERN_INLINES
+#endif
+
+#include_next <ctype.h>
+
+#if (defined(__NVPTX__) || defined(__AMDGPU__)) && defined(__GLIBC__)
+#pragma pop_macro("__USE_EXTERN_INLINES")
+#endif
+
+#if __has_include(<llvm-libc-decls/ctype.h>)
+
+#if defined(__HIP__) || defined(__CUDA__)
+#define __LIBC_ATTRS __attribute__((device))
+#endif
+
+// The GNU headers like to provide these as macros, we need to undefine them so
+// they do not conflict with the following definitions for the GPU.
+
+#pragma push_macro("isalnum")
+#pragma push_macro("isalpha")
+#pragma push_macro("isascii")
+#pragma push_macro("isblank")
+#pragma push_macro("iscntrl")
+#pragma push_macro("isdigit")
+#pragma push_macro("isgraph")
+#pragma push_macro("islower")
+#pragma push_macro("isprint")
+#pragma push_macro("ispunct")
+#pragma push_macro("isspace")
+#pragma push_macro("isupper")
+#pragma push_macro("isxdigit")
+#pragma push_macro("toascii")
+#pragma push_macro("tolower")
+#pragma push_macro("toupper")
+
+#undef isalnum
+#undef isalpha
+#undef isascii
+#undef iscntrl
+#undef isdigit
+#undef islower
+#undef isgraph
+#undef isprint
+#undef ispunct
+#undef isspace
+#undef isupper
+#undef isblank
+#undef isxdigit
+#undef toascii
+#undef tolower
+#undef toupper
+
+#pragma omp begin declare target
+
+#include <llvm-libc-decls/ctype.h>
+
+#pragma omp end declare target
+
+// Restore the original macros when compiling on the host.
+#if !defined(__NVPTX__) && !defined(__AMDGPU__)
+#pragma pop_macro("isalnum")
+#pragma pop_macro("isalpha")
+#pragma pop_macro("isascii")
+#pragma pop_macro("isblank")
+#pragma pop_macro("iscntrl")
+#pragma pop_macro("isdigit")
+#pragma pop_macro("isgraph")
+#pragma pop_macro("islower")
+#pragma pop_macro("isprint")
+#pragma pop_macro("ispunct")
+#pragma pop_macro("isspace")
+#pragma pop_macro("isupper")
+#pragma pop_macro("isxdigit")
+#pragma pop_macro("toascii")
+#pragma pop_macro("tolower")
+#pragma pop_macro("toupper")
+#endif
+
+#undef __LIBC_ATTRS
+
+#endif
+
+#endif // __CLANG_LLVM_LIBC_WRAPPERS_CTYPE_H__
diff --git a/contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/inttypes.h b/contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/inttypes.h
new file mode 100644
index 000000000000..415f1e4b7bca
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/inttypes.h
@@ -0,0 +1,34 @@
+//===-- Wrapper for C standard inttypes.h declarations on the GPU ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __CLANG_LLVM_LIBC_WRAPPERS_INTTYPES_H__
+#define __CLANG_LLVM_LIBC_WRAPPERS_INTTYPES_H__
+
+#if !defined(_OPENMP) && !defined(__HIP__) && !defined(__CUDA__)
+#error "This file is for GPU offloading compilation only"
+#endif
+
+#include_next <inttypes.h>
+
+#if __has_include(<llvm-libc-decls/inttypes.h>)
+
+#if defined(__HIP__) || defined(__CUDA__)
+#define __LIBC_ATTRS __attribute__((device))
+#endif
+
+#pragma omp begin declare target
+
+#include <llvm-libc-decls/inttypes.h>
+
+#pragma omp end declare target
+
+#undef __LIBC_ATTRS
+
+#endif
+
+#endif // __CLANG_LLVM_LIBC_WRAPPERS_INTTYPES_H__
diff --git a/contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/llvm-libc-decls/README.txt b/contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/llvm-libc-decls/README.txt
new file mode 100644
index 000000000000..e012cd9e2931
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/llvm-libc-decls/README.txt
@@ -0,0 +1,6 @@
+LLVM libc declarations
+======================
+
+This directory will be filled by the `libc` project with declarations that are
+availible on the device. Each declaration will use the `__LIBC_ATTRS` attribute
+to control emission on the device side.
diff --git a/contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/stdio.h b/contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/stdio.h
new file mode 100644
index 000000000000..950f91b3763e
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/stdio.h
@@ -0,0 +1,80 @@
+//===-- Wrapper for C standard stdio.h declarations on the GPU ------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#if !defined(_OPENMP) && !defined(__HIP__) && !defined(__CUDA__)
+#error "This file is for GPU offloading compilation only"
+#endif
+
+#include_next <stdio.h>
+
+// In some old versions of glibc, other standard headers sometimes define
+// special macros (e.g., __need_FILE) before including stdio.h to cause stdio.h
+// to produce special definitions. Future includes of stdio.h when those
+// special macros are undefined are expected to produce the normal definitions
+// from stdio.h.
+//
+// We do not apply our include guard (__CLANG_LLVM_LIBC_WRAPPERS_STDIO_H__)
+// unconditionally to the above include_next. Otherwise, after an occurrence of
+// the first glibc stdio.h use case described above, the include_next would be
+// skipped for remaining includes of stdio.h, leaving required symbols
+// undefined.
+//
+// We make the following assumptions to handle all use cases:
+//
+// 1. If the above include_next produces special glibc definitions, then (a) it
+// does not produce the normal definitions that we must intercept below, (b)
+// the current file was included from a glibc header that already defined
+// __GLIBC__ (usually by including glibc's <features.h>), and (c) the above
+// include_next does not define _STDIO_H. In that case, we skip the rest of
+// the current file and don't guard against future includes.
+// 2. If the above include_next produces the normal stdio.h definitions, then
+// either (a) __GLIBC__ is not defined because C headers are from some other
+// libc implementation or (b) the above include_next defines _STDIO_H to
+// prevent the above include_next from having any effect in the future.
+#if !defined(__GLIBC__) || defined(_STDIO_H)
+
+#ifndef __CLANG_LLVM_LIBC_WRAPPERS_STDIO_H__
+#define __CLANG_LLVM_LIBC_WRAPPERS_STDIO_H__
+
+#if __has_include(<llvm-libc-decls/stdio.h>)
+
+#if defined(__HIP__) || defined(__CUDA__)
+#define __LIBC_ATTRS __attribute__((device))
+#endif
+
+// Some headers provide these as macros. Temporarily undefine them so they do
+// not conflict with any definitions for the GPU.
+
+#pragma push_macro("stdout")
+#pragma push_macro("stdin")
+#pragma push_macro("stderr")
+
+#undef stdout
+#undef stderr
+#undef stdin
+
+#pragma omp begin declare target
+
+#include <llvm-libc-decls/stdio.h>
+
+#pragma omp end declare target
+
+#undef __LIBC_ATTRS
+
+// Restore the original macros when compiling on the host.
+#if !defined(__NVPTX__) && !defined(__AMDGPU__)
+#pragma pop_macro("stdout")
+#pragma pop_macro("stderr")
+#pragma pop_macro("stdin")
+#endif
+
+#endif
+
+#endif // __CLANG_LLVM_LIBC_WRAPPERS_STDIO_H__
+
+#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/stdlib.h b/contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/stdlib.h
new file mode 100644
index 000000000000..7fce5a1a31d5
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/stdlib.h
@@ -0,0 +1,45 @@
+//===-- Wrapper for C standard stdlib.h declarations on the GPU -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __CLANG_LLVM_LIBC_WRAPPERS_STDLIB_H__
+#define __CLANG_LLVM_LIBC_WRAPPERS_STDLIB_H__
+
+#if !defined(_OPENMP) && !defined(__HIP__) && !defined(__CUDA__)
+#error "This file is for GPU offloading compilation only"
+#endif
+
+#include_next <stdlib.h>
+
+#if __has_include(<llvm-libc-decls/stdlib.h>)
+
+#if defined(__HIP__) || defined(__CUDA__)
+#define __LIBC_ATTRS __attribute__((device))
+#endif
+
+#pragma omp begin declare target
+
+// The LLVM C library uses these named types so we forward declare them.
+typedef void (*__atexithandler_t)(void);
+typedef int (*__bsearchcompare_t)(const void *, const void *);
+typedef int (*__qsortcompare_t)(const void *, const void *);
+typedef int (*__qsortrcompare_t)(const void *, const void *, void *);
+
+// Enforce ABI compatibility with the structs used by the LLVM C library.
+_Static_assert(__builtin_offsetof(div_t, quot) == 0, "ABI mismatch!");
+_Static_assert(__builtin_offsetof(ldiv_t, quot) == 0, "ABI mismatch!");
+_Static_assert(__builtin_offsetof(lldiv_t, quot) == 0, "ABI mismatch!");
+
+#include <llvm-libc-decls/stdlib.h>
+
+#pragma omp end declare target
+
+#undef __LIBC_ATTRS
+
+#endif
+
+#endif // __CLANG_LLVM_LIBC_WRAPPERS_STDLIB_H__
diff --git a/contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/string.h b/contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/string.h
new file mode 100644
index 000000000000..0ea49cb13760
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/string.h
@@ -0,0 +1,96 @@
+//===-- Wrapper for C standard string.h declarations on the GPU -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __CLANG_LLVM_LIBC_WRAPPERS_STRING_H__
+#define __CLANG_LLVM_LIBC_WRAPPERS_STRING_H__
+
+#if !defined(_OPENMP) && !defined(__HIP__) && !defined(__CUDA__)
+#error "This file is for GPU offloading compilation only"
+#endif
+
+#include_next <string.h>
+
+#if __has_include(<llvm-libc-decls/string.h>)
+
+#if defined(__HIP__) || defined(__CUDA__)
+#define __LIBC_ATTRS __attribute__((device))
+#endif
+
+#pragma omp begin declare target
+
+// The GNU headers provide C++ standard compliant headers when in C++ mode and
+// the LLVM libc does not. We need to manually provide the definitions using the
+// same prototypes.
+#if defined(__cplusplus) && defined(__GLIBC__) && \
+ defined(__CORRECT_ISO_CPP_STRING_H_PROTO)
+
+#ifndef __LIBC_ATTRS
+#define __LIBC_ATTRS
+#endif
+
+extern "C" {
+void *memccpy(void *__restrict, const void *__restrict, int,
+ size_t) __LIBC_ATTRS;
+int memcmp(const void *, const void *, size_t) __LIBC_ATTRS;
+void *memcpy(void *__restrict, const void *__restrict, size_t) __LIBC_ATTRS;
+void *memmem(const void *, size_t, const void *, size_t) __LIBC_ATTRS;
+void *memmove(void *, const void *, size_t) __LIBC_ATTRS;
+void *mempcpy(void *__restrict, const void *__restrict, size_t) __LIBC_ATTRS;
+void *memset(void *, int, size_t) __LIBC_ATTRS;
+char *stpcpy(char *__restrict, const char *__restrict) __LIBC_ATTRS;
+char *stpncpy(char *__restrict, const char *__restrict, size_t) __LIBC_ATTRS;
+char *strcat(char *__restrict, const char *__restrict) __LIBC_ATTRS;
+int strcmp(const char *, const char *) __LIBC_ATTRS;
+int strcoll(const char *, const char *) __LIBC_ATTRS;
+char *strcpy(char *__restrict, const char *__restrict) __LIBC_ATTRS;
+size_t strcspn(const char *, const char *) __LIBC_ATTRS;
+char *strdup(const char *) __LIBC_ATTRS;
+size_t strlen(const char *) __LIBC_ATTRS;
+char *strncat(char *__restrict, const char *__restrict, size_t) __LIBC_ATTRS;
+int strncmp(const char *, const char *, size_t) __LIBC_ATTRS;
+char *strncpy(char *__restrict, const char *__restrict, size_t) __LIBC_ATTRS;
+char *strndup(const char *, size_t) __LIBC_ATTRS;
+size_t strnlen(const char *, size_t) __LIBC_ATTRS;
+size_t strspn(const char *, const char *) __LIBC_ATTRS;
+char *strtok(char *__restrict, const char *__restrict) __LIBC_ATTRS;
+char *strtok_r(char *__restrict, const char *__restrict,
+ char **__restrict) __LIBC_ATTRS;
+size_t strxfrm(char *__restrict, const char *__restrict, size_t) __LIBC_ATTRS;
+}
+
+extern "C++" {
+char *strstr(char *, const char *) noexcept __LIBC_ATTRS;
+const char *strstr(const char *, const char *) noexcept __LIBC_ATTRS;
+char *strpbrk(char *, const char *) noexcept __LIBC_ATTRS;
+const char *strpbrk(const char *, const char *) noexcept __LIBC_ATTRS;
+char *strrchr(char *, int) noexcept __LIBC_ATTRS;
+const char *strrchr(const char *, int) noexcept __LIBC_ATTRS;
+char *strchr(char *, int) noexcept __LIBC_ATTRS;
+const char *strchr(const char *, int) noexcept __LIBC_ATTRS;
+char *strchrnul(char *, int) noexcept __LIBC_ATTRS;
+const char *strchrnul(const char *, int) noexcept __LIBC_ATTRS;
+char *strcasestr(char *, const char *) noexcept __LIBC_ATTRS;
+const char *strcasestr(const char *, const char *) noexcept __LIBC_ATTRS;
+void *memrchr(void *__s, int __c, size_t __n) noexcept __LIBC_ATTRS;
+const void *memrchr(const void *__s, int __c, size_t __n) noexcept __LIBC_ATTRS;
+void *memchr(void *__s, int __c, size_t __n) noexcept __LIBC_ATTRS;
+const void *memchr(const void *__s, int __c, size_t __n) noexcept __LIBC_ATTRS;
+}
+
+#else
+#include <llvm-libc-decls/string.h>
+
+#endif
+
+#pragma omp end declare target
+
+#undef __LIBC_ATTRS
+
+#endif
+
+#endif // __CLANG_LLVM_LIBC_WRAPPERS_STRING_H__
diff --git a/contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/time.h b/contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/time.h
new file mode 100644
index 000000000000..9d1340c4eb74
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/llvm_libc_wrappers/time.h
@@ -0,0 +1,34 @@
+//===-- Wrapper for C standard time.h declarations on the GPU -------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __CLANG_LLVM_LIBC_WRAPPERS_TIME_H__
+#define __CLANG_LLVM_LIBC_WRAPPERS_TIME_H__
+
+#if !defined(_OPENMP) && !defined(__HIP__) && !defined(__CUDA__)
+#error "This file is for GPU offloading compilation only"
+#endif
+
+#include_next <time.h>
+
+#if __has_include(<llvm-libc-decls/time.h>)
+
+#if defined(__HIP__) || defined(__CUDA__)
+#define __LIBC_ATTRS __attribute__((device))
+#endif
+
+#pragma omp begin declare target
+
+_Static_assert(sizeof(clock_t) == sizeof(long), "ABI mismatch!");
+
+#include <llvm-libc-decls/time.h>
+
+#pragma omp end declare target
+
+#endif
+
+#endif // __CLANG_LLVM_LIBC_WRAPPERS_TIME_H__
diff --git a/contrib/llvm-project/clang/lib/Headers/lsxintrin.h b/contrib/llvm-project/clang/lib/Headers/lsxintrin.h
new file mode 100644
index 000000000000..f347955ce6fb
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/lsxintrin.h
@@ -0,0 +1,3750 @@
+/*===------------- lsxintrin.h - LoongArch LSX intrinsics ------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef _LOONGSON_SXINTRIN_H
+#define _LOONGSON_SXINTRIN_H 1
+
+#if defined(__loongarch_sx)
+typedef signed char v16i8 __attribute__((vector_size(16), aligned(16)));
+typedef signed char v16i8_b __attribute__((vector_size(16), aligned(1)));
+typedef unsigned char v16u8 __attribute__((vector_size(16), aligned(16)));
+typedef unsigned char v16u8_b __attribute__((vector_size(16), aligned(1)));
+typedef short v8i16 __attribute__((vector_size(16), aligned(16)));
+typedef short v8i16_h __attribute__((vector_size(16), aligned(2)));
+typedef unsigned short v8u16 __attribute__((vector_size(16), aligned(16)));
+typedef unsigned short v8u16_h __attribute__((vector_size(16), aligned(2)));
+typedef int v4i32 __attribute__((vector_size(16), aligned(16)));
+typedef int v4i32_w __attribute__((vector_size(16), aligned(4)));
+typedef unsigned int v4u32 __attribute__((vector_size(16), aligned(16)));
+typedef unsigned int v4u32_w __attribute__((vector_size(16), aligned(4)));
+typedef long long v2i64 __attribute__((vector_size(16), aligned(16)));
+typedef long long v2i64_d __attribute__((vector_size(16), aligned(8)));
+typedef unsigned long long v2u64 __attribute__((vector_size(16), aligned(16)));
+typedef unsigned long long v2u64_d __attribute__((vector_size(16), aligned(8)));
+typedef float v4f32 __attribute__((vector_size(16), aligned(16)));
+typedef float v4f32_w __attribute__((vector_size(16), aligned(4)));
+typedef double v2f64 __attribute__((vector_size(16), aligned(16)));
+typedef double v2f64_d __attribute__((vector_size(16), aligned(8)));
+
+typedef long long __m128i __attribute__((__vector_size__(16), __may_alias__));
+typedef float __m128 __attribute__((__vector_size__(16), __may_alias__));
+typedef double __m128d __attribute__((__vector_size__(16), __may_alias__));
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsll_b(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsll_b((v16i8)_1, (v16i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsll_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsll_h((v8i16)_1, (v8i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsll_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsll_w((v4i32)_1, (v4i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsll_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsll_d((v2i64)_1, (v2i64)_2);
+}
+
+#define __lsx_vslli_b(/*__m128i*/ _1, /*ui3*/ _2) \
+ ((__m128i)__builtin_lsx_vslli_b((v16i8)(_1), (_2)))
+
+#define __lsx_vslli_h(/*__m128i*/ _1, /*ui4*/ _2) \
+ ((__m128i)__builtin_lsx_vslli_h((v8i16)(_1), (_2)))
+
+#define __lsx_vslli_w(/*__m128i*/ _1, /*ui5*/ _2) \
+ ((__m128i)__builtin_lsx_vslli_w((v4i32)(_1), (_2)))
+
+#define __lsx_vslli_d(/*__m128i*/ _1, /*ui6*/ _2) \
+ ((__m128i)__builtin_lsx_vslli_d((v2i64)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsra_b(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsra_b((v16i8)_1, (v16i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsra_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsra_h((v8i16)_1, (v8i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsra_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsra_w((v4i32)_1, (v4i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsra_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsra_d((v2i64)_1, (v2i64)_2);
+}
+
+#define __lsx_vsrai_b(/*__m128i*/ _1, /*ui3*/ _2) \
+ ((__m128i)__builtin_lsx_vsrai_b((v16i8)(_1), (_2)))
+
+#define __lsx_vsrai_h(/*__m128i*/ _1, /*ui4*/ _2) \
+ ((__m128i)__builtin_lsx_vsrai_h((v8i16)(_1), (_2)))
+
+#define __lsx_vsrai_w(/*__m128i*/ _1, /*ui5*/ _2) \
+ ((__m128i)__builtin_lsx_vsrai_w((v4i32)(_1), (_2)))
+
+#define __lsx_vsrai_d(/*__m128i*/ _1, /*ui6*/ _2) \
+ ((__m128i)__builtin_lsx_vsrai_d((v2i64)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsrar_b(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsrar_b((v16i8)_1, (v16i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsrar_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsrar_h((v8i16)_1, (v8i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsrar_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsrar_w((v4i32)_1, (v4i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsrar_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsrar_d((v2i64)_1, (v2i64)_2);
+}
+
+#define __lsx_vsrari_b(/*__m128i*/ _1, /*ui3*/ _2) \
+ ((__m128i)__builtin_lsx_vsrari_b((v16i8)(_1), (_2)))
+
+#define __lsx_vsrari_h(/*__m128i*/ _1, /*ui4*/ _2) \
+ ((__m128i)__builtin_lsx_vsrari_h((v8i16)(_1), (_2)))
+
+#define __lsx_vsrari_w(/*__m128i*/ _1, /*ui5*/ _2) \
+ ((__m128i)__builtin_lsx_vsrari_w((v4i32)(_1), (_2)))
+
+#define __lsx_vsrari_d(/*__m128i*/ _1, /*ui6*/ _2) \
+ ((__m128i)__builtin_lsx_vsrari_d((v2i64)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsrl_b(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsrl_b((v16i8)_1, (v16i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsrl_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsrl_h((v8i16)_1, (v8i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsrl_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsrl_w((v4i32)_1, (v4i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsrl_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsrl_d((v2i64)_1, (v2i64)_2);
+}
+
+#define __lsx_vsrli_b(/*__m128i*/ _1, /*ui3*/ _2) \
+ ((__m128i)__builtin_lsx_vsrli_b((v16i8)(_1), (_2)))
+
+#define __lsx_vsrli_h(/*__m128i*/ _1, /*ui4*/ _2) \
+ ((__m128i)__builtin_lsx_vsrli_h((v8i16)(_1), (_2)))
+
+#define __lsx_vsrli_w(/*__m128i*/ _1, /*ui5*/ _2) \
+ ((__m128i)__builtin_lsx_vsrli_w((v4i32)(_1), (_2)))
+
+#define __lsx_vsrli_d(/*__m128i*/ _1, /*ui6*/ _2) \
+ ((__m128i)__builtin_lsx_vsrli_d((v2i64)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsrlr_b(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsrlr_b((v16i8)_1, (v16i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsrlr_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsrlr_h((v8i16)_1, (v8i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsrlr_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsrlr_w((v4i32)_1, (v4i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsrlr_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsrlr_d((v2i64)_1, (v2i64)_2);
+}
+
+#define __lsx_vsrlri_b(/*__m128i*/ _1, /*ui3*/ _2) \
+ ((__m128i)__builtin_lsx_vsrlri_b((v16i8)(_1), (_2)))
+
+#define __lsx_vsrlri_h(/*__m128i*/ _1, /*ui4*/ _2) \
+ ((__m128i)__builtin_lsx_vsrlri_h((v8i16)(_1), (_2)))
+
+#define __lsx_vsrlri_w(/*__m128i*/ _1, /*ui5*/ _2) \
+ ((__m128i)__builtin_lsx_vsrlri_w((v4i32)(_1), (_2)))
+
+#define __lsx_vsrlri_d(/*__m128i*/ _1, /*ui6*/ _2) \
+ ((__m128i)__builtin_lsx_vsrlri_d((v2i64)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vbitclr_b(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vbitclr_b((v16u8)_1, (v16u8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vbitclr_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vbitclr_h((v8u16)_1, (v8u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vbitclr_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vbitclr_w((v4u32)_1, (v4u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vbitclr_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vbitclr_d((v2u64)_1, (v2u64)_2);
+}
+
+#define __lsx_vbitclri_b(/*__m128i*/ _1, /*ui3*/ _2) \
+ ((__m128i)__builtin_lsx_vbitclri_b((v16u8)(_1), (_2)))
+
+#define __lsx_vbitclri_h(/*__m128i*/ _1, /*ui4*/ _2) \
+ ((__m128i)__builtin_lsx_vbitclri_h((v8u16)(_1), (_2)))
+
+#define __lsx_vbitclri_w(/*__m128i*/ _1, /*ui5*/ _2) \
+ ((__m128i)__builtin_lsx_vbitclri_w((v4u32)(_1), (_2)))
+
+#define __lsx_vbitclri_d(/*__m128i*/ _1, /*ui6*/ _2) \
+ ((__m128i)__builtin_lsx_vbitclri_d((v2u64)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vbitset_b(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vbitset_b((v16u8)_1, (v16u8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vbitset_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vbitset_h((v8u16)_1, (v8u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vbitset_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vbitset_w((v4u32)_1, (v4u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vbitset_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vbitset_d((v2u64)_1, (v2u64)_2);
+}
+
+#define __lsx_vbitseti_b(/*__m128i*/ _1, /*ui3*/ _2) \
+ ((__m128i)__builtin_lsx_vbitseti_b((v16u8)(_1), (_2)))
+
+#define __lsx_vbitseti_h(/*__m128i*/ _1, /*ui4*/ _2) \
+ ((__m128i)__builtin_lsx_vbitseti_h((v8u16)(_1), (_2)))
+
+#define __lsx_vbitseti_w(/*__m128i*/ _1, /*ui5*/ _2) \
+ ((__m128i)__builtin_lsx_vbitseti_w((v4u32)(_1), (_2)))
+
+#define __lsx_vbitseti_d(/*__m128i*/ _1, /*ui6*/ _2) \
+ ((__m128i)__builtin_lsx_vbitseti_d((v2u64)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vbitrev_b(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vbitrev_b((v16u8)_1, (v16u8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vbitrev_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vbitrev_h((v8u16)_1, (v8u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vbitrev_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vbitrev_w((v4u32)_1, (v4u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vbitrev_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vbitrev_d((v2u64)_1, (v2u64)_2);
+}
+
+#define __lsx_vbitrevi_b(/*__m128i*/ _1, /*ui3*/ _2) \
+ ((__m128i)__builtin_lsx_vbitrevi_b((v16u8)(_1), (_2)))
+
+#define __lsx_vbitrevi_h(/*__m128i*/ _1, /*ui4*/ _2) \
+ ((__m128i)__builtin_lsx_vbitrevi_h((v8u16)(_1), (_2)))
+
+#define __lsx_vbitrevi_w(/*__m128i*/ _1, /*ui5*/ _2) \
+ ((__m128i)__builtin_lsx_vbitrevi_w((v4u32)(_1), (_2)))
+
+#define __lsx_vbitrevi_d(/*__m128i*/ _1, /*ui6*/ _2) \
+ ((__m128i)__builtin_lsx_vbitrevi_d((v2u64)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vadd_b(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vadd_b((v16i8)_1, (v16i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vadd_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vadd_h((v8i16)_1, (v8i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vadd_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vadd_w((v4i32)_1, (v4i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vadd_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vadd_d((v2i64)_1, (v2i64)_2);
+}
+
+#define __lsx_vaddi_bu(/*__m128i*/ _1, /*ui5*/ _2) \
+ ((__m128i)__builtin_lsx_vaddi_bu((v16i8)(_1), (_2)))
+
+#define __lsx_vaddi_hu(/*__m128i*/ _1, /*ui5*/ _2) \
+ ((__m128i)__builtin_lsx_vaddi_hu((v8i16)(_1), (_2)))
+
+#define __lsx_vaddi_wu(/*__m128i*/ _1, /*ui5*/ _2) \
+ ((__m128i)__builtin_lsx_vaddi_wu((v4i32)(_1), (_2)))
+
+#define __lsx_vaddi_du(/*__m128i*/ _1, /*ui5*/ _2) \
+ ((__m128i)__builtin_lsx_vaddi_du((v2i64)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsub_b(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsub_b((v16i8)_1, (v16i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsub_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsub_h((v8i16)_1, (v8i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsub_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsub_w((v4i32)_1, (v4i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsub_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsub_d((v2i64)_1, (v2i64)_2);
+}
+
+#define __lsx_vsubi_bu(/*__m128i*/ _1, /*ui5*/ _2) \
+ ((__m128i)__builtin_lsx_vsubi_bu((v16i8)(_1), (_2)))
+
+#define __lsx_vsubi_hu(/*__m128i*/ _1, /*ui5*/ _2) \
+ ((__m128i)__builtin_lsx_vsubi_hu((v8i16)(_1), (_2)))
+
+#define __lsx_vsubi_wu(/*__m128i*/ _1, /*ui5*/ _2) \
+ ((__m128i)__builtin_lsx_vsubi_wu((v4i32)(_1), (_2)))
+
+#define __lsx_vsubi_du(/*__m128i*/ _1, /*ui5*/ _2) \
+ ((__m128i)__builtin_lsx_vsubi_du((v2i64)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmax_b(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmax_b((v16i8)_1, (v16i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmax_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmax_h((v8i16)_1, (v8i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmax_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmax_w((v4i32)_1, (v4i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmax_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmax_d((v2i64)_1, (v2i64)_2);
+}
+
+#define __lsx_vmaxi_b(/*__m128i*/ _1, /*si5*/ _2) \
+ ((__m128i)__builtin_lsx_vmaxi_b((v16i8)(_1), (_2)))
+
+#define __lsx_vmaxi_h(/*__m128i*/ _1, /*si5*/ _2) \
+ ((__m128i)__builtin_lsx_vmaxi_h((v8i16)(_1), (_2)))
+
+#define __lsx_vmaxi_w(/*__m128i*/ _1, /*si5*/ _2) \
+ ((__m128i)__builtin_lsx_vmaxi_w((v4i32)(_1), (_2)))
+
+#define __lsx_vmaxi_d(/*__m128i*/ _1, /*si5*/ _2) \
+ ((__m128i)__builtin_lsx_vmaxi_d((v2i64)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmax_bu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmax_bu((v16u8)_1, (v16u8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmax_hu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmax_hu((v8u16)_1, (v8u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmax_wu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmax_wu((v4u32)_1, (v4u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmax_du(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmax_du((v2u64)_1, (v2u64)_2);
+}
+
+#define __lsx_vmaxi_bu(/*__m128i*/ _1, /*ui5*/ _2) \
+ ((__m128i)__builtin_lsx_vmaxi_bu((v16u8)(_1), (_2)))
+
+#define __lsx_vmaxi_hu(/*__m128i*/ _1, /*ui5*/ _2) \
+ ((__m128i)__builtin_lsx_vmaxi_hu((v8u16)(_1), (_2)))
+
+#define __lsx_vmaxi_wu(/*__m128i*/ _1, /*ui5*/ _2) \
+ ((__m128i)__builtin_lsx_vmaxi_wu((v4u32)(_1), (_2)))
+
+#define __lsx_vmaxi_du(/*__m128i*/ _1, /*ui5*/ _2) \
+ ((__m128i)__builtin_lsx_vmaxi_du((v2u64)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmin_b(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmin_b((v16i8)_1, (v16i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmin_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmin_h((v8i16)_1, (v8i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmin_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmin_w((v4i32)_1, (v4i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmin_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmin_d((v2i64)_1, (v2i64)_2);
+}
+
+#define __lsx_vmini_b(/*__m128i*/ _1, /*si5*/ _2) \
+ ((__m128i)__builtin_lsx_vmini_b((v16i8)(_1), (_2)))
+
+#define __lsx_vmini_h(/*__m128i*/ _1, /*si5*/ _2) \
+ ((__m128i)__builtin_lsx_vmini_h((v8i16)(_1), (_2)))
+
+#define __lsx_vmini_w(/*__m128i*/ _1, /*si5*/ _2) \
+ ((__m128i)__builtin_lsx_vmini_w((v4i32)(_1), (_2)))
+
+#define __lsx_vmini_d(/*__m128i*/ _1, /*si5*/ _2) \
+ ((__m128i)__builtin_lsx_vmini_d((v2i64)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmin_bu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmin_bu((v16u8)_1, (v16u8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmin_hu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmin_hu((v8u16)_1, (v8u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmin_wu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmin_wu((v4u32)_1, (v4u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmin_du(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmin_du((v2u64)_1, (v2u64)_2);
+}
+
+#define __lsx_vmini_bu(/*__m128i*/ _1, /*ui5*/ _2) \
+ ((__m128i)__builtin_lsx_vmini_bu((v16u8)(_1), (_2)))
+
+#define __lsx_vmini_hu(/*__m128i*/ _1, /*ui5*/ _2) \
+ ((__m128i)__builtin_lsx_vmini_hu((v8u16)(_1), (_2)))
+
+#define __lsx_vmini_wu(/*__m128i*/ _1, /*ui5*/ _2) \
+ ((__m128i)__builtin_lsx_vmini_wu((v4u32)(_1), (_2)))
+
+#define __lsx_vmini_du(/*__m128i*/ _1, /*ui5*/ _2) \
+ ((__m128i)__builtin_lsx_vmini_du((v2u64)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vseq_b(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vseq_b((v16i8)_1, (v16i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vseq_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vseq_h((v8i16)_1, (v8i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vseq_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vseq_w((v4i32)_1, (v4i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vseq_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vseq_d((v2i64)_1, (v2i64)_2);
+}
+
+#define __lsx_vseqi_b(/*__m128i*/ _1, /*si5*/ _2) \
+ ((__m128i)__builtin_lsx_vseqi_b((v16i8)(_1), (_2)))
+
+#define __lsx_vseqi_h(/*__m128i*/ _1, /*si5*/ _2) \
+ ((__m128i)__builtin_lsx_vseqi_h((v8i16)(_1), (_2)))
+
+#define __lsx_vseqi_w(/*__m128i*/ _1, /*si5*/ _2) \
+ ((__m128i)__builtin_lsx_vseqi_w((v4i32)(_1), (_2)))
+
+#define __lsx_vseqi_d(/*__m128i*/ _1, /*si5*/ _2) \
+ ((__m128i)__builtin_lsx_vseqi_d((v2i64)(_1), (_2)))
+
+#define __lsx_vslti_b(/*__m128i*/ _1, /*si5*/ _2) \
+ ((__m128i)__builtin_lsx_vslti_b((v16i8)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vslt_b(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vslt_b((v16i8)_1, (v16i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vslt_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vslt_h((v8i16)_1, (v8i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vslt_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vslt_w((v4i32)_1, (v4i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vslt_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vslt_d((v2i64)_1, (v2i64)_2);
+}
+
+#define __lsx_vslti_h(/*__m128i*/ _1, /*si5*/ _2) \
+ ((__m128i)__builtin_lsx_vslti_h((v8i16)(_1), (_2)))
+
+#define __lsx_vslti_w(/*__m128i*/ _1, /*si5*/ _2) \
+ ((__m128i)__builtin_lsx_vslti_w((v4i32)(_1), (_2)))
+
+#define __lsx_vslti_d(/*__m128i*/ _1, /*si5*/ _2) \
+ ((__m128i)__builtin_lsx_vslti_d((v2i64)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vslt_bu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vslt_bu((v16u8)_1, (v16u8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vslt_hu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vslt_hu((v8u16)_1, (v8u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vslt_wu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vslt_wu((v4u32)_1, (v4u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vslt_du(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vslt_du((v2u64)_1, (v2u64)_2);
+}
+
+#define __lsx_vslti_bu(/*__m128i*/ _1, /*ui5*/ _2) \
+ ((__m128i)__builtin_lsx_vslti_bu((v16u8)(_1), (_2)))
+
+#define __lsx_vslti_hu(/*__m128i*/ _1, /*ui5*/ _2) \
+ ((__m128i)__builtin_lsx_vslti_hu((v8u16)(_1), (_2)))
+
+#define __lsx_vslti_wu(/*__m128i*/ _1, /*ui5*/ _2) \
+ ((__m128i)__builtin_lsx_vslti_wu((v4u32)(_1), (_2)))
+
+#define __lsx_vslti_du(/*__m128i*/ _1, /*ui5*/ _2) \
+ ((__m128i)__builtin_lsx_vslti_du((v2u64)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsle_b(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsle_b((v16i8)_1, (v16i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsle_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsle_h((v8i16)_1, (v8i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsle_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsle_w((v4i32)_1, (v4i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsle_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsle_d((v2i64)_1, (v2i64)_2);
+}
+
+#define __lsx_vslei_b(/*__m128i*/ _1, /*si5*/ _2) \
+ ((__m128i)__builtin_lsx_vslei_b((v16i8)(_1), (_2)))
+
+#define __lsx_vslei_h(/*__m128i*/ _1, /*si5*/ _2) \
+ ((__m128i)__builtin_lsx_vslei_h((v8i16)(_1), (_2)))
+
+#define __lsx_vslei_w(/*__m128i*/ _1, /*si5*/ _2) \
+ ((__m128i)__builtin_lsx_vslei_w((v4i32)(_1), (_2)))
+
+#define __lsx_vslei_d(/*__m128i*/ _1, /*si5*/ _2) \
+ ((__m128i)__builtin_lsx_vslei_d((v2i64)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsle_bu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsle_bu((v16u8)_1, (v16u8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsle_hu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsle_hu((v8u16)_1, (v8u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsle_wu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsle_wu((v4u32)_1, (v4u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsle_du(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsle_du((v2u64)_1, (v2u64)_2);
+}
+
+#define __lsx_vslei_bu(/*__m128i*/ _1, /*ui5*/ _2) \
+ ((__m128i)__builtin_lsx_vslei_bu((v16u8)(_1), (_2)))
+
+#define __lsx_vslei_hu(/*__m128i*/ _1, /*ui5*/ _2) \
+ ((__m128i)__builtin_lsx_vslei_hu((v8u16)(_1), (_2)))
+
+#define __lsx_vslei_wu(/*__m128i*/ _1, /*ui5*/ _2) \
+ ((__m128i)__builtin_lsx_vslei_wu((v4u32)(_1), (_2)))
+
+#define __lsx_vslei_du(/*__m128i*/ _1, /*ui5*/ _2) \
+ ((__m128i)__builtin_lsx_vslei_du((v2u64)(_1), (_2)))
+
+#define __lsx_vsat_b(/*__m128i*/ _1, /*ui3*/ _2) \
+ ((__m128i)__builtin_lsx_vsat_b((v16i8)(_1), (_2)))
+
+#define __lsx_vsat_h(/*__m128i*/ _1, /*ui4*/ _2) \
+ ((__m128i)__builtin_lsx_vsat_h((v8i16)(_1), (_2)))
+
+#define __lsx_vsat_w(/*__m128i*/ _1, /*ui5*/ _2) \
+ ((__m128i)__builtin_lsx_vsat_w((v4i32)(_1), (_2)))
+
+#define __lsx_vsat_d(/*__m128i*/ _1, /*ui6*/ _2) \
+ ((__m128i)__builtin_lsx_vsat_d((v2i64)(_1), (_2)))
+
+#define __lsx_vsat_bu(/*__m128i*/ _1, /*ui3*/ _2) \
+ ((__m128i)__builtin_lsx_vsat_bu((v16u8)(_1), (_2)))
+
+#define __lsx_vsat_hu(/*__m128i*/ _1, /*ui4*/ _2) \
+ ((__m128i)__builtin_lsx_vsat_hu((v8u16)(_1), (_2)))
+
+#define __lsx_vsat_wu(/*__m128i*/ _1, /*ui5*/ _2) \
+ ((__m128i)__builtin_lsx_vsat_wu((v4u32)(_1), (_2)))
+
+#define __lsx_vsat_du(/*__m128i*/ _1, /*ui6*/ _2) \
+ ((__m128i)__builtin_lsx_vsat_du((v2u64)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vadda_b(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vadda_b((v16i8)_1, (v16i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vadda_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vadda_h((v8i16)_1, (v8i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vadda_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vadda_w((v4i32)_1, (v4i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vadda_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vadda_d((v2i64)_1, (v2i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsadd_b(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsadd_b((v16i8)_1, (v16i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsadd_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsadd_h((v8i16)_1, (v8i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsadd_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsadd_w((v4i32)_1, (v4i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsadd_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsadd_d((v2i64)_1, (v2i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsadd_bu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsadd_bu((v16u8)_1, (v16u8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsadd_hu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsadd_hu((v8u16)_1, (v8u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsadd_wu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsadd_wu((v4u32)_1, (v4u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsadd_du(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsadd_du((v2u64)_1, (v2u64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vavg_b(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vavg_b((v16i8)_1, (v16i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vavg_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vavg_h((v8i16)_1, (v8i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vavg_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vavg_w((v4i32)_1, (v4i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vavg_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vavg_d((v2i64)_1, (v2i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vavg_bu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vavg_bu((v16u8)_1, (v16u8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vavg_hu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vavg_hu((v8u16)_1, (v8u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vavg_wu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vavg_wu((v4u32)_1, (v4u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vavg_du(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vavg_du((v2u64)_1, (v2u64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vavgr_b(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vavgr_b((v16i8)_1, (v16i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vavgr_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vavgr_h((v8i16)_1, (v8i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vavgr_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vavgr_w((v4i32)_1, (v4i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vavgr_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vavgr_d((v2i64)_1, (v2i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vavgr_bu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vavgr_bu((v16u8)_1, (v16u8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vavgr_hu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vavgr_hu((v8u16)_1, (v8u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vavgr_wu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vavgr_wu((v4u32)_1, (v4u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vavgr_du(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vavgr_du((v2u64)_1, (v2u64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vssub_b(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vssub_b((v16i8)_1, (v16i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vssub_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vssub_h((v8i16)_1, (v8i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vssub_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vssub_w((v4i32)_1, (v4i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vssub_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vssub_d((v2i64)_1, (v2i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vssub_bu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vssub_bu((v16u8)_1, (v16u8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vssub_hu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vssub_hu((v8u16)_1, (v8u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vssub_wu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vssub_wu((v4u32)_1, (v4u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vssub_du(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vssub_du((v2u64)_1, (v2u64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vabsd_b(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vabsd_b((v16i8)_1, (v16i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vabsd_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vabsd_h((v8i16)_1, (v8i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vabsd_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vabsd_w((v4i32)_1, (v4i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vabsd_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vabsd_d((v2i64)_1, (v2i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vabsd_bu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vabsd_bu((v16u8)_1, (v16u8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vabsd_hu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vabsd_hu((v8u16)_1, (v8u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vabsd_wu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vabsd_wu((v4u32)_1, (v4u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vabsd_du(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vabsd_du((v2u64)_1, (v2u64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmul_b(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmul_b((v16i8)_1, (v16i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmul_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmul_h((v8i16)_1, (v8i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmul_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmul_w((v4i32)_1, (v4i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmul_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmul_d((v2i64)_1, (v2i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmadd_b(__m128i _1, __m128i _2, __m128i _3) {
+ return (__m128i)__builtin_lsx_vmadd_b((v16i8)_1, (v16i8)_2, (v16i8)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmadd_h(__m128i _1, __m128i _2, __m128i _3) {
+ return (__m128i)__builtin_lsx_vmadd_h((v8i16)_1, (v8i16)_2, (v8i16)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmadd_w(__m128i _1, __m128i _2, __m128i _3) {
+ return (__m128i)__builtin_lsx_vmadd_w((v4i32)_1, (v4i32)_2, (v4i32)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmadd_d(__m128i _1, __m128i _2, __m128i _3) {
+ return (__m128i)__builtin_lsx_vmadd_d((v2i64)_1, (v2i64)_2, (v2i64)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmsub_b(__m128i _1, __m128i _2, __m128i _3) {
+ return (__m128i)__builtin_lsx_vmsub_b((v16i8)_1, (v16i8)_2, (v16i8)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmsub_h(__m128i _1, __m128i _2, __m128i _3) {
+ return (__m128i)__builtin_lsx_vmsub_h((v8i16)_1, (v8i16)_2, (v8i16)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmsub_w(__m128i _1, __m128i _2, __m128i _3) {
+ return (__m128i)__builtin_lsx_vmsub_w((v4i32)_1, (v4i32)_2, (v4i32)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmsub_d(__m128i _1, __m128i _2, __m128i _3) {
+ return (__m128i)__builtin_lsx_vmsub_d((v2i64)_1, (v2i64)_2, (v2i64)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vdiv_b(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vdiv_b((v16i8)_1, (v16i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vdiv_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vdiv_h((v8i16)_1, (v8i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vdiv_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vdiv_w((v4i32)_1, (v4i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vdiv_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vdiv_d((v2i64)_1, (v2i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vdiv_bu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vdiv_bu((v16u8)_1, (v16u8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vdiv_hu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vdiv_hu((v8u16)_1, (v8u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vdiv_wu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vdiv_wu((v4u32)_1, (v4u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vdiv_du(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vdiv_du((v2u64)_1, (v2u64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vhaddw_h_b(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vhaddw_h_b((v16i8)_1, (v16i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vhaddw_w_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vhaddw_w_h((v8i16)_1, (v8i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vhaddw_d_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vhaddw_d_w((v4i32)_1, (v4i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vhaddw_hu_bu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vhaddw_hu_bu((v16u8)_1, (v16u8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vhaddw_wu_hu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vhaddw_wu_hu((v8u16)_1, (v8u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vhaddw_du_wu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vhaddw_du_wu((v4u32)_1, (v4u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vhsubw_h_b(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vhsubw_h_b((v16i8)_1, (v16i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vhsubw_w_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vhsubw_w_h((v8i16)_1, (v8i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vhsubw_d_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vhsubw_d_w((v4i32)_1, (v4i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vhsubw_hu_bu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vhsubw_hu_bu((v16u8)_1, (v16u8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vhsubw_wu_hu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vhsubw_wu_hu((v8u16)_1, (v8u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vhsubw_du_wu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vhsubw_du_wu((v4u32)_1, (v4u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmod_b(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmod_b((v16i8)_1, (v16i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmod_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmod_h((v8i16)_1, (v8i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmod_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmod_w((v4i32)_1, (v4i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmod_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmod_d((v2i64)_1, (v2i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmod_bu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmod_bu((v16u8)_1, (v16u8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmod_hu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmod_hu((v8u16)_1, (v8u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmod_wu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmod_wu((v4u32)_1, (v4u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmod_du(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmod_du((v2u64)_1, (v2u64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vreplve_b(__m128i _1, int _2) {
+ return (__m128i)__builtin_lsx_vreplve_b((v16i8)_1, (int)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vreplve_h(__m128i _1, int _2) {
+ return (__m128i)__builtin_lsx_vreplve_h((v8i16)_1, (int)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vreplve_w(__m128i _1, int _2) {
+ return (__m128i)__builtin_lsx_vreplve_w((v4i32)_1, (int)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vreplve_d(__m128i _1, int _2) {
+ return (__m128i)__builtin_lsx_vreplve_d((v2i64)_1, (int)_2);
+}
+
+#define __lsx_vreplvei_b(/*__m128i*/ _1, /*ui4*/ _2) \
+ ((__m128i)__builtin_lsx_vreplvei_b((v16i8)(_1), (_2)))
+
+#define __lsx_vreplvei_h(/*__m128i*/ _1, /*ui3*/ _2) \
+ ((__m128i)__builtin_lsx_vreplvei_h((v8i16)(_1), (_2)))
+
+#define __lsx_vreplvei_w(/*__m128i*/ _1, /*ui2*/ _2) \
+ ((__m128i)__builtin_lsx_vreplvei_w((v4i32)(_1), (_2)))
+
+#define __lsx_vreplvei_d(/*__m128i*/ _1, /*ui1*/ _2) \
+ ((__m128i)__builtin_lsx_vreplvei_d((v2i64)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vpickev_b(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vpickev_b((v16i8)_1, (v16i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vpickev_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vpickev_h((v8i16)_1, (v8i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vpickev_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vpickev_w((v4i32)_1, (v4i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vpickev_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vpickev_d((v2i64)_1, (v2i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vpickod_b(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vpickod_b((v16i8)_1, (v16i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vpickod_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vpickod_h((v8i16)_1, (v8i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vpickod_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vpickod_w((v4i32)_1, (v4i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vpickod_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vpickod_d((v2i64)_1, (v2i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vilvh_b(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vilvh_b((v16i8)_1, (v16i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vilvh_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vilvh_h((v8i16)_1, (v8i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vilvh_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vilvh_w((v4i32)_1, (v4i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vilvh_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vilvh_d((v2i64)_1, (v2i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vilvl_b(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vilvl_b((v16i8)_1, (v16i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vilvl_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vilvl_h((v8i16)_1, (v8i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vilvl_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vilvl_w((v4i32)_1, (v4i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vilvl_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vilvl_d((v2i64)_1, (v2i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vpackev_b(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vpackev_b((v16i8)_1, (v16i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vpackev_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vpackev_h((v8i16)_1, (v8i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vpackev_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vpackev_w((v4i32)_1, (v4i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vpackev_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vpackev_d((v2i64)_1, (v2i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vpackod_b(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vpackod_b((v16i8)_1, (v16i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vpackod_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vpackod_h((v8i16)_1, (v8i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vpackod_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vpackod_w((v4i32)_1, (v4i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vpackod_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vpackod_d((v2i64)_1, (v2i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vshuf_h(__m128i _1, __m128i _2, __m128i _3) {
+ return (__m128i)__builtin_lsx_vshuf_h((v8i16)_1, (v8i16)_2, (v8i16)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vshuf_w(__m128i _1, __m128i _2, __m128i _3) {
+ return (__m128i)__builtin_lsx_vshuf_w((v4i32)_1, (v4i32)_2, (v4i32)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vshuf_d(__m128i _1, __m128i _2, __m128i _3) {
+ return (__m128i)__builtin_lsx_vshuf_d((v2i64)_1, (v2i64)_2, (v2i64)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vand_v(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vand_v((v16u8)_1, (v16u8)_2);
+}
+
+#define __lsx_vandi_b(/*__m128i*/ _1, /*ui8*/ _2) \
+ ((__m128i)__builtin_lsx_vandi_b((v16u8)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vor_v(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vor_v((v16u8)_1, (v16u8)_2);
+}
+
+#define __lsx_vori_b(/*__m128i*/ _1, /*ui8*/ _2) \
+ ((__m128i)__builtin_lsx_vori_b((v16u8)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vnor_v(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vnor_v((v16u8)_1, (v16u8)_2);
+}
+
+#define __lsx_vnori_b(/*__m128i*/ _1, /*ui8*/ _2) \
+ ((__m128i)__builtin_lsx_vnori_b((v16u8)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vxor_v(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vxor_v((v16u8)_1, (v16u8)_2);
+}
+
+#define __lsx_vxori_b(/*__m128i*/ _1, /*ui8*/ _2) \
+ ((__m128i)__builtin_lsx_vxori_b((v16u8)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vbitsel_v(__m128i _1, __m128i _2, __m128i _3) {
+ return (__m128i)__builtin_lsx_vbitsel_v((v16u8)_1, (v16u8)_2, (v16u8)_3);
+}
+
+#define __lsx_vbitseli_b(/*__m128i*/ _1, /*__m128i*/ _2, /*ui8*/ _3) \
+ ((__m128i)__builtin_lsx_vbitseli_b((v16u8)(_1), (v16u8)(_2), (_3)))
+
+#define __lsx_vshuf4i_b(/*__m128i*/ _1, /*ui8*/ _2) \
+ ((__m128i)__builtin_lsx_vshuf4i_b((v16i8)(_1), (_2)))
+
+#define __lsx_vshuf4i_h(/*__m128i*/ _1, /*ui8*/ _2) \
+ ((__m128i)__builtin_lsx_vshuf4i_h((v8i16)(_1), (_2)))
+
+#define __lsx_vshuf4i_w(/*__m128i*/ _1, /*ui8*/ _2) \
+ ((__m128i)__builtin_lsx_vshuf4i_w((v4i32)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vreplgr2vr_b(int _1) {
+ return (__m128i)__builtin_lsx_vreplgr2vr_b((int)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vreplgr2vr_h(int _1) {
+ return (__m128i)__builtin_lsx_vreplgr2vr_h((int)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vreplgr2vr_w(int _1) {
+ return (__m128i)__builtin_lsx_vreplgr2vr_w((int)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vreplgr2vr_d(long int _1) {
+ return (__m128i)__builtin_lsx_vreplgr2vr_d((long int)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vpcnt_b(__m128i _1) {
+ return (__m128i)__builtin_lsx_vpcnt_b((v16i8)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vpcnt_h(__m128i _1) {
+ return (__m128i)__builtin_lsx_vpcnt_h((v8i16)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vpcnt_w(__m128i _1) {
+ return (__m128i)__builtin_lsx_vpcnt_w((v4i32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vpcnt_d(__m128i _1) {
+ return (__m128i)__builtin_lsx_vpcnt_d((v2i64)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vclo_b(__m128i _1) {
+ return (__m128i)__builtin_lsx_vclo_b((v16i8)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vclo_h(__m128i _1) {
+ return (__m128i)__builtin_lsx_vclo_h((v8i16)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vclo_w(__m128i _1) {
+ return (__m128i)__builtin_lsx_vclo_w((v4i32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vclo_d(__m128i _1) {
+ return (__m128i)__builtin_lsx_vclo_d((v2i64)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vclz_b(__m128i _1) {
+ return (__m128i)__builtin_lsx_vclz_b((v16i8)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vclz_h(__m128i _1) {
+ return (__m128i)__builtin_lsx_vclz_h((v8i16)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vclz_w(__m128i _1) {
+ return (__m128i)__builtin_lsx_vclz_w((v4i32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vclz_d(__m128i _1) {
+ return (__m128i)__builtin_lsx_vclz_d((v2i64)_1);
+}
+
+#define __lsx_vpickve2gr_b(/*__m128i*/ _1, /*ui4*/ _2) \
+ ((int)__builtin_lsx_vpickve2gr_b((v16i8)(_1), (_2)))
+
+#define __lsx_vpickve2gr_h(/*__m128i*/ _1, /*ui3*/ _2) \
+ ((int)__builtin_lsx_vpickve2gr_h((v8i16)(_1), (_2)))
+
+#define __lsx_vpickve2gr_w(/*__m128i*/ _1, /*ui2*/ _2) \
+ ((int)__builtin_lsx_vpickve2gr_w((v4i32)(_1), (_2)))
+
+#define __lsx_vpickve2gr_d(/*__m128i*/ _1, /*ui1*/ _2) \
+ ((long int)__builtin_lsx_vpickve2gr_d((v2i64)(_1), (_2)))
+
+#define __lsx_vpickve2gr_bu(/*__m128i*/ _1, /*ui4*/ _2) \
+ ((unsigned int)__builtin_lsx_vpickve2gr_bu((v16i8)(_1), (_2)))
+
+#define __lsx_vpickve2gr_hu(/*__m128i*/ _1, /*ui3*/ _2) \
+ ((unsigned int)__builtin_lsx_vpickve2gr_hu((v8i16)(_1), (_2)))
+
+#define __lsx_vpickve2gr_wu(/*__m128i*/ _1, /*ui2*/ _2) \
+ ((unsigned int)__builtin_lsx_vpickve2gr_wu((v4i32)(_1), (_2)))
+
+#define __lsx_vpickve2gr_du(/*__m128i*/ _1, /*ui1*/ _2) \
+ ((unsigned long int)__builtin_lsx_vpickve2gr_du((v2i64)(_1), (_2)))
+
+#define __lsx_vinsgr2vr_b(/*__m128i*/ _1, /*int*/ _2, /*ui4*/ _3) \
+ ((__m128i)__builtin_lsx_vinsgr2vr_b((v16i8)(_1), (int)(_2), (_3)))
+
+#define __lsx_vinsgr2vr_h(/*__m128i*/ _1, /*int*/ _2, /*ui3*/ _3) \
+ ((__m128i)__builtin_lsx_vinsgr2vr_h((v8i16)(_1), (int)(_2), (_3)))
+
+#define __lsx_vinsgr2vr_w(/*__m128i*/ _1, /*int*/ _2, /*ui2*/ _3) \
+ ((__m128i)__builtin_lsx_vinsgr2vr_w((v4i32)(_1), (int)(_2), (_3)))
+
+#define __lsx_vinsgr2vr_d(/*__m128i*/ _1, /*long int*/ _2, /*ui1*/ _3) \
+ ((__m128i)__builtin_lsx_vinsgr2vr_d((v2i64)(_1), (long int)(_2), (_3)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128
+ __lsx_vfadd_s(__m128 _1, __m128 _2) {
+ return (__m128)__builtin_lsx_vfadd_s((v4f32)_1, (v4f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128d
+ __lsx_vfadd_d(__m128d _1, __m128d _2) {
+ return (__m128d)__builtin_lsx_vfadd_d((v2f64)_1, (v2f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128
+ __lsx_vfsub_s(__m128 _1, __m128 _2) {
+ return (__m128)__builtin_lsx_vfsub_s((v4f32)_1, (v4f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128d
+ __lsx_vfsub_d(__m128d _1, __m128d _2) {
+ return (__m128d)__builtin_lsx_vfsub_d((v2f64)_1, (v2f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128
+ __lsx_vfmul_s(__m128 _1, __m128 _2) {
+ return (__m128)__builtin_lsx_vfmul_s((v4f32)_1, (v4f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128d
+ __lsx_vfmul_d(__m128d _1, __m128d _2) {
+ return (__m128d)__builtin_lsx_vfmul_d((v2f64)_1, (v2f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128
+ __lsx_vfdiv_s(__m128 _1, __m128 _2) {
+ return (__m128)__builtin_lsx_vfdiv_s((v4f32)_1, (v4f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128d
+ __lsx_vfdiv_d(__m128d _1, __m128d _2) {
+ return (__m128d)__builtin_lsx_vfdiv_d((v2f64)_1, (v2f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vfcvt_h_s(__m128 _1, __m128 _2) {
+ return (__m128i)__builtin_lsx_vfcvt_h_s((v4f32)_1, (v4f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128
+ __lsx_vfcvt_s_d(__m128d _1, __m128d _2) {
+ return (__m128)__builtin_lsx_vfcvt_s_d((v2f64)_1, (v2f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128
+ __lsx_vfmin_s(__m128 _1, __m128 _2) {
+ return (__m128)__builtin_lsx_vfmin_s((v4f32)_1, (v4f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128d
+ __lsx_vfmin_d(__m128d _1, __m128d _2) {
+ return (__m128d)__builtin_lsx_vfmin_d((v2f64)_1, (v2f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128
+ __lsx_vfmina_s(__m128 _1, __m128 _2) {
+ return (__m128)__builtin_lsx_vfmina_s((v4f32)_1, (v4f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128d
+ __lsx_vfmina_d(__m128d _1, __m128d _2) {
+ return (__m128d)__builtin_lsx_vfmina_d((v2f64)_1, (v2f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128
+ __lsx_vfmax_s(__m128 _1, __m128 _2) {
+ return (__m128)__builtin_lsx_vfmax_s((v4f32)_1, (v4f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128d
+ __lsx_vfmax_d(__m128d _1, __m128d _2) {
+ return (__m128d)__builtin_lsx_vfmax_d((v2f64)_1, (v2f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128
+ __lsx_vfmaxa_s(__m128 _1, __m128 _2) {
+ return (__m128)__builtin_lsx_vfmaxa_s((v4f32)_1, (v4f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128d
+ __lsx_vfmaxa_d(__m128d _1, __m128d _2) {
+ return (__m128d)__builtin_lsx_vfmaxa_d((v2f64)_1, (v2f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vfclass_s(__m128 _1) {
+ return (__m128i)__builtin_lsx_vfclass_s((v4f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vfclass_d(__m128d _1) {
+ return (__m128i)__builtin_lsx_vfclass_d((v2f64)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128
+ __lsx_vfsqrt_s(__m128 _1) {
+ return (__m128)__builtin_lsx_vfsqrt_s((v4f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128d
+ __lsx_vfsqrt_d(__m128d _1) {
+ return (__m128d)__builtin_lsx_vfsqrt_d((v2f64)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128
+ __lsx_vfrecip_s(__m128 _1) {
+ return (__m128)__builtin_lsx_vfrecip_s((v4f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128d
+ __lsx_vfrecip_d(__m128d _1) {
+ return (__m128d)__builtin_lsx_vfrecip_d((v2f64)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128
+ __lsx_vfrecipe_s(__m128 _1) {
+ return (__m128)__builtin_lsx_vfrecipe_s((v4f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128d
+ __lsx_vfrecipe_d(__m128d _1) {
+ return (__m128d)__builtin_lsx_vfrecipe_d((v2f64)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128
+ __lsx_vfrint_s(__m128 _1) {
+ return (__m128)__builtin_lsx_vfrint_s((v4f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128d
+ __lsx_vfrint_d(__m128d _1) {
+ return (__m128d)__builtin_lsx_vfrint_d((v2f64)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128
+ __lsx_vfrsqrt_s(__m128 _1) {
+ return (__m128)__builtin_lsx_vfrsqrt_s((v4f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128d
+ __lsx_vfrsqrt_d(__m128d _1) {
+ return (__m128d)__builtin_lsx_vfrsqrt_d((v2f64)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128
+ __lsx_vfrsqrte_s(__m128 _1) {
+ return (__m128)__builtin_lsx_vfrsqrte_s((v4f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128d
+ __lsx_vfrsqrte_d(__m128d _1) {
+ return (__m128d)__builtin_lsx_vfrsqrte_d((v2f64)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128
+ __lsx_vflogb_s(__m128 _1) {
+ return (__m128)__builtin_lsx_vflogb_s((v4f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128d
+ __lsx_vflogb_d(__m128d _1) {
+ return (__m128d)__builtin_lsx_vflogb_d((v2f64)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128
+ __lsx_vfcvth_s_h(__m128i _1) {
+ return (__m128)__builtin_lsx_vfcvth_s_h((v8i16)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128d
+ __lsx_vfcvth_d_s(__m128 _1) {
+ return (__m128d)__builtin_lsx_vfcvth_d_s((v4f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128
+ __lsx_vfcvtl_s_h(__m128i _1) {
+ return (__m128)__builtin_lsx_vfcvtl_s_h((v8i16)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128d
+ __lsx_vfcvtl_d_s(__m128 _1) {
+ return (__m128d)__builtin_lsx_vfcvtl_d_s((v4f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vftint_w_s(__m128 _1) {
+ return (__m128i)__builtin_lsx_vftint_w_s((v4f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vftint_l_d(__m128d _1) {
+ return (__m128i)__builtin_lsx_vftint_l_d((v2f64)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vftint_wu_s(__m128 _1) {
+ return (__m128i)__builtin_lsx_vftint_wu_s((v4f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vftint_lu_d(__m128d _1) {
+ return (__m128i)__builtin_lsx_vftint_lu_d((v2f64)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vftintrz_w_s(__m128 _1) {
+ return (__m128i)__builtin_lsx_vftintrz_w_s((v4f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vftintrz_l_d(__m128d _1) {
+ return (__m128i)__builtin_lsx_vftintrz_l_d((v2f64)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vftintrz_wu_s(__m128 _1) {
+ return (__m128i)__builtin_lsx_vftintrz_wu_s((v4f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vftintrz_lu_d(__m128d _1) {
+ return (__m128i)__builtin_lsx_vftintrz_lu_d((v2f64)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128
+ __lsx_vffint_s_w(__m128i _1) {
+ return (__m128)__builtin_lsx_vffint_s_w((v4i32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128d
+ __lsx_vffint_d_l(__m128i _1) {
+ return (__m128d)__builtin_lsx_vffint_d_l((v2i64)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128
+ __lsx_vffint_s_wu(__m128i _1) {
+ return (__m128)__builtin_lsx_vffint_s_wu((v4u32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128d
+ __lsx_vffint_d_lu(__m128i _1) {
+ return (__m128d)__builtin_lsx_vffint_d_lu((v2u64)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vandn_v(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vandn_v((v16u8)_1, (v16u8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vneg_b(__m128i _1) {
+ return (__m128i)__builtin_lsx_vneg_b((v16i8)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vneg_h(__m128i _1) {
+ return (__m128i)__builtin_lsx_vneg_h((v8i16)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vneg_w(__m128i _1) {
+ return (__m128i)__builtin_lsx_vneg_w((v4i32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vneg_d(__m128i _1) {
+ return (__m128i)__builtin_lsx_vneg_d((v2i64)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmuh_b(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmuh_b((v16i8)_1, (v16i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmuh_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmuh_h((v8i16)_1, (v8i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmuh_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmuh_w((v4i32)_1, (v4i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmuh_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmuh_d((v2i64)_1, (v2i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmuh_bu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmuh_bu((v16u8)_1, (v16u8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmuh_hu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmuh_hu((v8u16)_1, (v8u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmuh_wu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmuh_wu((v4u32)_1, (v4u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmuh_du(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmuh_du((v2u64)_1, (v2u64)_2);
+}
+
+#define __lsx_vsllwil_h_b(/*__m128i*/ _1, /*ui3*/ _2) \
+ ((__m128i)__builtin_lsx_vsllwil_h_b((v16i8)(_1), (_2)))
+
+#define __lsx_vsllwil_w_h(/*__m128i*/ _1, /*ui4*/ _2) \
+ ((__m128i)__builtin_lsx_vsllwil_w_h((v8i16)(_1), (_2)))
+
+#define __lsx_vsllwil_d_w(/*__m128i*/ _1, /*ui5*/ _2) \
+ ((__m128i)__builtin_lsx_vsllwil_d_w((v4i32)(_1), (_2)))
+
+#define __lsx_vsllwil_hu_bu(/*__m128i*/ _1, /*ui3*/ _2) \
+ ((__m128i)__builtin_lsx_vsllwil_hu_bu((v16u8)(_1), (_2)))
+
+#define __lsx_vsllwil_wu_hu(/*__m128i*/ _1, /*ui4*/ _2) \
+ ((__m128i)__builtin_lsx_vsllwil_wu_hu((v8u16)(_1), (_2)))
+
+#define __lsx_vsllwil_du_wu(/*__m128i*/ _1, /*ui5*/ _2) \
+ ((__m128i)__builtin_lsx_vsllwil_du_wu((v4u32)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsran_b_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsran_b_h((v8i16)_1, (v8i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsran_h_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsran_h_w((v4i32)_1, (v4i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsran_w_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsran_w_d((v2i64)_1, (v2i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vssran_b_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vssran_b_h((v8i16)_1, (v8i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vssran_h_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vssran_h_w((v4i32)_1, (v4i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vssran_w_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vssran_w_d((v2i64)_1, (v2i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vssran_bu_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vssran_bu_h((v8u16)_1, (v8u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vssran_hu_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vssran_hu_w((v4u32)_1, (v4u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vssran_wu_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vssran_wu_d((v2u64)_1, (v2u64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsrarn_b_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsrarn_b_h((v8i16)_1, (v8i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsrarn_h_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsrarn_h_w((v4i32)_1, (v4i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsrarn_w_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsrarn_w_d((v2i64)_1, (v2i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vssrarn_b_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vssrarn_b_h((v8i16)_1, (v8i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vssrarn_h_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vssrarn_h_w((v4i32)_1, (v4i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vssrarn_w_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vssrarn_w_d((v2i64)_1, (v2i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vssrarn_bu_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vssrarn_bu_h((v8u16)_1, (v8u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vssrarn_hu_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vssrarn_hu_w((v4u32)_1, (v4u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vssrarn_wu_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vssrarn_wu_d((v2u64)_1, (v2u64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsrln_b_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsrln_b_h((v8i16)_1, (v8i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsrln_h_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsrln_h_w((v4i32)_1, (v4i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsrln_w_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsrln_w_d((v2i64)_1, (v2i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vssrln_bu_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vssrln_bu_h((v8u16)_1, (v8u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vssrln_hu_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vssrln_hu_w((v4u32)_1, (v4u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vssrln_wu_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vssrln_wu_d((v2u64)_1, (v2u64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsrlrn_b_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsrlrn_b_h((v8i16)_1, (v8i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsrlrn_h_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsrlrn_h_w((v4i32)_1, (v4i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsrlrn_w_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsrlrn_w_d((v2i64)_1, (v2i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vssrlrn_bu_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vssrlrn_bu_h((v8u16)_1, (v8u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vssrlrn_hu_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vssrlrn_hu_w((v4u32)_1, (v4u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vssrlrn_wu_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vssrlrn_wu_d((v2u64)_1, (v2u64)_2);
+}
+
+#define __lsx_vfrstpi_b(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \
+ ((__m128i)__builtin_lsx_vfrstpi_b((v16i8)(_1), (v16i8)(_2), (_3)))
+
+#define __lsx_vfrstpi_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \
+ ((__m128i)__builtin_lsx_vfrstpi_h((v8i16)(_1), (v8i16)(_2), (_3)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vfrstp_b(__m128i _1, __m128i _2, __m128i _3) {
+ return (__m128i)__builtin_lsx_vfrstp_b((v16i8)_1, (v16i8)_2, (v16i8)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vfrstp_h(__m128i _1, __m128i _2, __m128i _3) {
+ return (__m128i)__builtin_lsx_vfrstp_h((v8i16)_1, (v8i16)_2, (v8i16)_3);
+}
+
+#define __lsx_vshuf4i_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui8*/ _3) \
+ ((__m128i)__builtin_lsx_vshuf4i_d((v2i64)(_1), (v2i64)(_2), (_3)))
+
+#define __lsx_vbsrl_v(/*__m128i*/ _1, /*ui5*/ _2) \
+ ((__m128i)__builtin_lsx_vbsrl_v((v16i8)(_1), (_2)))
+
+#define __lsx_vbsll_v(/*__m128i*/ _1, /*ui5*/ _2) \
+ ((__m128i)__builtin_lsx_vbsll_v((v16i8)(_1), (_2)))
+
+#define __lsx_vextrins_b(/*__m128i*/ _1, /*__m128i*/ _2, /*ui8*/ _3) \
+ ((__m128i)__builtin_lsx_vextrins_b((v16i8)(_1), (v16i8)(_2), (_3)))
+
+#define __lsx_vextrins_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui8*/ _3) \
+ ((__m128i)__builtin_lsx_vextrins_h((v8i16)(_1), (v8i16)(_2), (_3)))
+
+#define __lsx_vextrins_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui8*/ _3) \
+ ((__m128i)__builtin_lsx_vextrins_w((v4i32)(_1), (v4i32)(_2), (_3)))
+
+#define __lsx_vextrins_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui8*/ _3) \
+ ((__m128i)__builtin_lsx_vextrins_d((v2i64)(_1), (v2i64)(_2), (_3)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmskltz_b(__m128i _1) {
+ return (__m128i)__builtin_lsx_vmskltz_b((v16i8)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmskltz_h(__m128i _1) {
+ return (__m128i)__builtin_lsx_vmskltz_h((v8i16)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmskltz_w(__m128i _1) {
+ return (__m128i)__builtin_lsx_vmskltz_w((v4i32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmskltz_d(__m128i _1) {
+ return (__m128i)__builtin_lsx_vmskltz_d((v2i64)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsigncov_b(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsigncov_b((v16i8)_1, (v16i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsigncov_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsigncov_h((v8i16)_1, (v8i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsigncov_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsigncov_w((v4i32)_1, (v4i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsigncov_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsigncov_d((v2i64)_1, (v2i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128
+ __lsx_vfmadd_s(__m128 _1, __m128 _2, __m128 _3) {
+ return (__m128)__builtin_lsx_vfmadd_s((v4f32)_1, (v4f32)_2, (v4f32)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128d
+ __lsx_vfmadd_d(__m128d _1, __m128d _2, __m128d _3) {
+ return (__m128d)__builtin_lsx_vfmadd_d((v2f64)_1, (v2f64)_2, (v2f64)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128
+ __lsx_vfmsub_s(__m128 _1, __m128 _2, __m128 _3) {
+ return (__m128)__builtin_lsx_vfmsub_s((v4f32)_1, (v4f32)_2, (v4f32)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128d
+ __lsx_vfmsub_d(__m128d _1, __m128d _2, __m128d _3) {
+ return (__m128d)__builtin_lsx_vfmsub_d((v2f64)_1, (v2f64)_2, (v2f64)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128
+ __lsx_vfnmadd_s(__m128 _1, __m128 _2, __m128 _3) {
+ return (__m128)__builtin_lsx_vfnmadd_s((v4f32)_1, (v4f32)_2, (v4f32)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128d
+ __lsx_vfnmadd_d(__m128d _1, __m128d _2, __m128d _3) {
+ return (__m128d)__builtin_lsx_vfnmadd_d((v2f64)_1, (v2f64)_2, (v2f64)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128
+ __lsx_vfnmsub_s(__m128 _1, __m128 _2, __m128 _3) {
+ return (__m128)__builtin_lsx_vfnmsub_s((v4f32)_1, (v4f32)_2, (v4f32)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128d
+ __lsx_vfnmsub_d(__m128d _1, __m128d _2, __m128d _3) {
+ return (__m128d)__builtin_lsx_vfnmsub_d((v2f64)_1, (v2f64)_2, (v2f64)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vftintrne_w_s(__m128 _1) {
+ return (__m128i)__builtin_lsx_vftintrne_w_s((v4f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vftintrne_l_d(__m128d _1) {
+ return (__m128i)__builtin_lsx_vftintrne_l_d((v2f64)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vftintrp_w_s(__m128 _1) {
+ return (__m128i)__builtin_lsx_vftintrp_w_s((v4f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vftintrp_l_d(__m128d _1) {
+ return (__m128i)__builtin_lsx_vftintrp_l_d((v2f64)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vftintrm_w_s(__m128 _1) {
+ return (__m128i)__builtin_lsx_vftintrm_w_s((v4f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vftintrm_l_d(__m128d _1) {
+ return (__m128i)__builtin_lsx_vftintrm_l_d((v2f64)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vftint_w_d(__m128d _1, __m128d _2) {
+ return (__m128i)__builtin_lsx_vftint_w_d((v2f64)_1, (v2f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128
+ __lsx_vffint_s_l(__m128i _1, __m128i _2) {
+ return (__m128)__builtin_lsx_vffint_s_l((v2i64)_1, (v2i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vftintrz_w_d(__m128d _1, __m128d _2) {
+ return (__m128i)__builtin_lsx_vftintrz_w_d((v2f64)_1, (v2f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vftintrp_w_d(__m128d _1, __m128d _2) {
+ return (__m128i)__builtin_lsx_vftintrp_w_d((v2f64)_1, (v2f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vftintrm_w_d(__m128d _1, __m128d _2) {
+ return (__m128i)__builtin_lsx_vftintrm_w_d((v2f64)_1, (v2f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vftintrne_w_d(__m128d _1, __m128d _2) {
+ return (__m128i)__builtin_lsx_vftintrne_w_d((v2f64)_1, (v2f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vftintl_l_s(__m128 _1) {
+ return (__m128i)__builtin_lsx_vftintl_l_s((v4f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vftinth_l_s(__m128 _1) {
+ return (__m128i)__builtin_lsx_vftinth_l_s((v4f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128d
+ __lsx_vffinth_d_w(__m128i _1) {
+ return (__m128d)__builtin_lsx_vffinth_d_w((v4i32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128d
+ __lsx_vffintl_d_w(__m128i _1) {
+ return (__m128d)__builtin_lsx_vffintl_d_w((v4i32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vftintrzl_l_s(__m128 _1) {
+ return (__m128i)__builtin_lsx_vftintrzl_l_s((v4f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vftintrzh_l_s(__m128 _1) {
+ return (__m128i)__builtin_lsx_vftintrzh_l_s((v4f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vftintrpl_l_s(__m128 _1) {
+ return (__m128i)__builtin_lsx_vftintrpl_l_s((v4f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vftintrph_l_s(__m128 _1) {
+ return (__m128i)__builtin_lsx_vftintrph_l_s((v4f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vftintrml_l_s(__m128 _1) {
+ return (__m128i)__builtin_lsx_vftintrml_l_s((v4f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vftintrmh_l_s(__m128 _1) {
+ return (__m128i)__builtin_lsx_vftintrmh_l_s((v4f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vftintrnel_l_s(__m128 _1) {
+ return (__m128i)__builtin_lsx_vftintrnel_l_s((v4f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vftintrneh_l_s(__m128 _1) {
+ return (__m128i)__builtin_lsx_vftintrneh_l_s((v4f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128
+ __lsx_vfrintrne_s(__m128 _1) {
+ return (__m128)__builtin_lsx_vfrintrne_s((v4f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128d
+ __lsx_vfrintrne_d(__m128d _1) {
+ return (__m128d)__builtin_lsx_vfrintrne_d((v2f64)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128
+ __lsx_vfrintrz_s(__m128 _1) {
+ return (__m128)__builtin_lsx_vfrintrz_s((v4f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128d
+ __lsx_vfrintrz_d(__m128d _1) {
+ return (__m128d)__builtin_lsx_vfrintrz_d((v2f64)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128
+ __lsx_vfrintrp_s(__m128 _1) {
+ return (__m128)__builtin_lsx_vfrintrp_s((v4f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128d
+ __lsx_vfrintrp_d(__m128d _1) {
+ return (__m128d)__builtin_lsx_vfrintrp_d((v2f64)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128
+ __lsx_vfrintrm_s(__m128 _1) {
+ return (__m128)__builtin_lsx_vfrintrm_s((v4f32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128d
+ __lsx_vfrintrm_d(__m128d _1) {
+ return (__m128d)__builtin_lsx_vfrintrm_d((v2f64)_1);
+}
+
+#define __lsx_vstelm_b(/*__m128i*/ _1, /*void **/ _2, /*si8*/ _3, /*idx*/ _4) \
+ ((void)__builtin_lsx_vstelm_b((v16i8)(_1), (void *)(_2), (_3), (_4)))
+
+#define __lsx_vstelm_h(/*__m128i*/ _1, /*void **/ _2, /*si8*/ _3, /*idx*/ _4) \
+ ((void)__builtin_lsx_vstelm_h((v8i16)(_1), (void *)(_2), (_3), (_4)))
+
+#define __lsx_vstelm_w(/*__m128i*/ _1, /*void **/ _2, /*si8*/ _3, /*idx*/ _4) \
+ ((void)__builtin_lsx_vstelm_w((v4i32)(_1), (void *)(_2), (_3), (_4)))
+
+#define __lsx_vstelm_d(/*__m128i*/ _1, /*void **/ _2, /*si8*/ _3, /*idx*/ _4) \
+ ((void)__builtin_lsx_vstelm_d((v2i64)(_1), (void *)(_2), (_3), (_4)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vaddwev_d_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vaddwev_d_w((v4i32)_1, (v4i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vaddwev_w_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vaddwev_w_h((v8i16)_1, (v8i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vaddwev_h_b(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vaddwev_h_b((v16i8)_1, (v16i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vaddwod_d_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vaddwod_d_w((v4i32)_1, (v4i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vaddwod_w_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vaddwod_w_h((v8i16)_1, (v8i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vaddwod_h_b(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vaddwod_h_b((v16i8)_1, (v16i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vaddwev_d_wu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vaddwev_d_wu((v4u32)_1, (v4u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vaddwev_w_hu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vaddwev_w_hu((v8u16)_1, (v8u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vaddwev_h_bu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vaddwev_h_bu((v16u8)_1, (v16u8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vaddwod_d_wu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vaddwod_d_wu((v4u32)_1, (v4u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vaddwod_w_hu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vaddwod_w_hu((v8u16)_1, (v8u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vaddwod_h_bu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vaddwod_h_bu((v16u8)_1, (v16u8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vaddwev_d_wu_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vaddwev_d_wu_w((v4u32)_1, (v4i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vaddwev_w_hu_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vaddwev_w_hu_h((v8u16)_1, (v8i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vaddwev_h_bu_b(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vaddwev_h_bu_b((v16u8)_1, (v16i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vaddwod_d_wu_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vaddwod_d_wu_w((v4u32)_1, (v4i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vaddwod_w_hu_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vaddwod_w_hu_h((v8u16)_1, (v8i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vaddwod_h_bu_b(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vaddwod_h_bu_b((v16u8)_1, (v16i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsubwev_d_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsubwev_d_w((v4i32)_1, (v4i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsubwev_w_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsubwev_w_h((v8i16)_1, (v8i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsubwev_h_b(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsubwev_h_b((v16i8)_1, (v16i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsubwod_d_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsubwod_d_w((v4i32)_1, (v4i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsubwod_w_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsubwod_w_h((v8i16)_1, (v8i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsubwod_h_b(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsubwod_h_b((v16i8)_1, (v16i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsubwev_d_wu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsubwev_d_wu((v4u32)_1, (v4u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsubwev_w_hu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsubwev_w_hu((v8u16)_1, (v8u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsubwev_h_bu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsubwev_h_bu((v16u8)_1, (v16u8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsubwod_d_wu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsubwod_d_wu((v4u32)_1, (v4u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsubwod_w_hu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsubwod_w_hu((v8u16)_1, (v8u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsubwod_h_bu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsubwod_h_bu((v16u8)_1, (v16u8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vaddwev_q_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vaddwev_q_d((v2i64)_1, (v2i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vaddwod_q_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vaddwod_q_d((v2i64)_1, (v2i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vaddwev_q_du(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vaddwev_q_du((v2u64)_1, (v2u64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vaddwod_q_du(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vaddwod_q_du((v2u64)_1, (v2u64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsubwev_q_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsubwev_q_d((v2i64)_1, (v2i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsubwod_q_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsubwod_q_d((v2i64)_1, (v2i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsubwev_q_du(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsubwev_q_du((v2u64)_1, (v2u64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsubwod_q_du(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsubwod_q_du((v2u64)_1, (v2u64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vaddwev_q_du_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vaddwev_q_du_d((v2u64)_1, (v2i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vaddwod_q_du_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vaddwod_q_du_d((v2u64)_1, (v2i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmulwev_d_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmulwev_d_w((v4i32)_1, (v4i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmulwev_w_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmulwev_w_h((v8i16)_1, (v8i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmulwev_h_b(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmulwev_h_b((v16i8)_1, (v16i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmulwod_d_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmulwod_d_w((v4i32)_1, (v4i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmulwod_w_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmulwod_w_h((v8i16)_1, (v8i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmulwod_h_b(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmulwod_h_b((v16i8)_1, (v16i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmulwev_d_wu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmulwev_d_wu((v4u32)_1, (v4u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmulwev_w_hu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmulwev_w_hu((v8u16)_1, (v8u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmulwev_h_bu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmulwev_h_bu((v16u8)_1, (v16u8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmulwod_d_wu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmulwod_d_wu((v4u32)_1, (v4u32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmulwod_w_hu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmulwod_w_hu((v8u16)_1, (v8u16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmulwod_h_bu(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmulwod_h_bu((v16u8)_1, (v16u8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmulwev_d_wu_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmulwev_d_wu_w((v4u32)_1, (v4i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmulwev_w_hu_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmulwev_w_hu_h((v8u16)_1, (v8i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmulwev_h_bu_b(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmulwev_h_bu_b((v16u8)_1, (v16i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmulwod_d_wu_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmulwod_d_wu_w((v4u32)_1, (v4i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmulwod_w_hu_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmulwod_w_hu_h((v8u16)_1, (v8i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmulwod_h_bu_b(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmulwod_h_bu_b((v16u8)_1, (v16i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmulwev_q_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmulwev_q_d((v2i64)_1, (v2i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmulwod_q_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmulwod_q_d((v2i64)_1, (v2i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmulwev_q_du(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmulwev_q_du((v2u64)_1, (v2u64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmulwod_q_du(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmulwod_q_du((v2u64)_1, (v2u64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmulwev_q_du_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmulwev_q_du_d((v2u64)_1, (v2i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmulwod_q_du_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vmulwod_q_du_d((v2u64)_1, (v2i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vhaddw_q_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vhaddw_q_d((v2i64)_1, (v2i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vhaddw_qu_du(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vhaddw_qu_du((v2u64)_1, (v2u64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vhsubw_q_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vhsubw_q_d((v2i64)_1, (v2i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vhsubw_qu_du(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vhsubw_qu_du((v2u64)_1, (v2u64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmaddwev_d_w(__m128i _1, __m128i _2, __m128i _3) {
+ return (__m128i)__builtin_lsx_vmaddwev_d_w((v2i64)_1, (v4i32)_2, (v4i32)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmaddwev_w_h(__m128i _1, __m128i _2, __m128i _3) {
+ return (__m128i)__builtin_lsx_vmaddwev_w_h((v4i32)_1, (v8i16)_2, (v8i16)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmaddwev_h_b(__m128i _1, __m128i _2, __m128i _3) {
+ return (__m128i)__builtin_lsx_vmaddwev_h_b((v8i16)_1, (v16i8)_2, (v16i8)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmaddwev_d_wu(__m128i _1, __m128i _2, __m128i _3) {
+ return (__m128i)__builtin_lsx_vmaddwev_d_wu((v2u64)_1, (v4u32)_2, (v4u32)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmaddwev_w_hu(__m128i _1, __m128i _2, __m128i _3) {
+ return (__m128i)__builtin_lsx_vmaddwev_w_hu((v4u32)_1, (v8u16)_2, (v8u16)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmaddwev_h_bu(__m128i _1, __m128i _2, __m128i _3) {
+ return (__m128i)__builtin_lsx_vmaddwev_h_bu((v8u16)_1, (v16u8)_2, (v16u8)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmaddwod_d_w(__m128i _1, __m128i _2, __m128i _3) {
+ return (__m128i)__builtin_lsx_vmaddwod_d_w((v2i64)_1, (v4i32)_2, (v4i32)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmaddwod_w_h(__m128i _1, __m128i _2, __m128i _3) {
+ return (__m128i)__builtin_lsx_vmaddwod_w_h((v4i32)_1, (v8i16)_2, (v8i16)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmaddwod_h_b(__m128i _1, __m128i _2, __m128i _3) {
+ return (__m128i)__builtin_lsx_vmaddwod_h_b((v8i16)_1, (v16i8)_2, (v16i8)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmaddwod_d_wu(__m128i _1, __m128i _2, __m128i _3) {
+ return (__m128i)__builtin_lsx_vmaddwod_d_wu((v2u64)_1, (v4u32)_2, (v4u32)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmaddwod_w_hu(__m128i _1, __m128i _2, __m128i _3) {
+ return (__m128i)__builtin_lsx_vmaddwod_w_hu((v4u32)_1, (v8u16)_2, (v8u16)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmaddwod_h_bu(__m128i _1, __m128i _2, __m128i _3) {
+ return (__m128i)__builtin_lsx_vmaddwod_h_bu((v8u16)_1, (v16u8)_2, (v16u8)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmaddwev_d_wu_w(__m128i _1, __m128i _2, __m128i _3) {
+ return (__m128i)__builtin_lsx_vmaddwev_d_wu_w((v2i64)_1, (v4u32)_2,
+ (v4i32)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmaddwev_w_hu_h(__m128i _1, __m128i _2, __m128i _3) {
+ return (__m128i)__builtin_lsx_vmaddwev_w_hu_h((v4i32)_1, (v8u16)_2,
+ (v8i16)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmaddwev_h_bu_b(__m128i _1, __m128i _2, __m128i _3) {
+ return (__m128i)__builtin_lsx_vmaddwev_h_bu_b((v8i16)_1, (v16u8)_2,
+ (v16i8)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmaddwod_d_wu_w(__m128i _1, __m128i _2, __m128i _3) {
+ return (__m128i)__builtin_lsx_vmaddwod_d_wu_w((v2i64)_1, (v4u32)_2,
+ (v4i32)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmaddwod_w_hu_h(__m128i _1, __m128i _2, __m128i _3) {
+ return (__m128i)__builtin_lsx_vmaddwod_w_hu_h((v4i32)_1, (v8u16)_2,
+ (v8i16)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmaddwod_h_bu_b(__m128i _1, __m128i _2, __m128i _3) {
+ return (__m128i)__builtin_lsx_vmaddwod_h_bu_b((v8i16)_1, (v16u8)_2,
+ (v16i8)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmaddwev_q_d(__m128i _1, __m128i _2, __m128i _3) {
+ return (__m128i)__builtin_lsx_vmaddwev_q_d((v2i64)_1, (v2i64)_2, (v2i64)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmaddwod_q_d(__m128i _1, __m128i _2, __m128i _3) {
+ return (__m128i)__builtin_lsx_vmaddwod_q_d((v2i64)_1, (v2i64)_2, (v2i64)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmaddwev_q_du(__m128i _1, __m128i _2, __m128i _3) {
+ return (__m128i)__builtin_lsx_vmaddwev_q_du((v2u64)_1, (v2u64)_2, (v2u64)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmaddwod_q_du(__m128i _1, __m128i _2, __m128i _3) {
+ return (__m128i)__builtin_lsx_vmaddwod_q_du((v2u64)_1, (v2u64)_2, (v2u64)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmaddwev_q_du_d(__m128i _1, __m128i _2, __m128i _3) {
+ return (__m128i)__builtin_lsx_vmaddwev_q_du_d((v2i64)_1, (v2u64)_2,
+ (v2i64)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmaddwod_q_du_d(__m128i _1, __m128i _2, __m128i _3) {
+ return (__m128i)__builtin_lsx_vmaddwod_q_du_d((v2i64)_1, (v2u64)_2,
+ (v2i64)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vrotr_b(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vrotr_b((v16i8)_1, (v16i8)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vrotr_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vrotr_h((v8i16)_1, (v8i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vrotr_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vrotr_w((v4i32)_1, (v4i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vrotr_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vrotr_d((v2i64)_1, (v2i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vadd_q(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vadd_q((v2i64)_1, (v2i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vsub_q(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vsub_q((v2i64)_1, (v2i64)_2);
+}
+
+#define __lsx_vldrepl_b(/*void **/ _1, /*si12*/ _2) \
+ ((__m128i)__builtin_lsx_vldrepl_b((void const *)(_1), (_2)))
+
+#define __lsx_vldrepl_h(/*void **/ _1, /*si11*/ _2) \
+ ((__m128i)__builtin_lsx_vldrepl_h((void const *)(_1), (_2)))
+
+#define __lsx_vldrepl_w(/*void **/ _1, /*si10*/ _2) \
+ ((__m128i)__builtin_lsx_vldrepl_w((void const *)(_1), (_2)))
+
+#define __lsx_vldrepl_d(/*void **/ _1, /*si9*/ _2) \
+ ((__m128i)__builtin_lsx_vldrepl_d((void const *)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmskgez_b(__m128i _1) {
+ return (__m128i)__builtin_lsx_vmskgez_b((v16i8)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vmsknz_b(__m128i _1) {
+ return (__m128i)__builtin_lsx_vmsknz_b((v16i8)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vexth_h_b(__m128i _1) {
+ return (__m128i)__builtin_lsx_vexth_h_b((v16i8)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vexth_w_h(__m128i _1) {
+ return (__m128i)__builtin_lsx_vexth_w_h((v8i16)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vexth_d_w(__m128i _1) {
+ return (__m128i)__builtin_lsx_vexth_d_w((v4i32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vexth_q_d(__m128i _1) {
+ return (__m128i)__builtin_lsx_vexth_q_d((v2i64)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vexth_hu_bu(__m128i _1) {
+ return (__m128i)__builtin_lsx_vexth_hu_bu((v16u8)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vexth_wu_hu(__m128i _1) {
+ return (__m128i)__builtin_lsx_vexth_wu_hu((v8u16)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vexth_du_wu(__m128i _1) {
+ return (__m128i)__builtin_lsx_vexth_du_wu((v4u32)_1);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vexth_qu_du(__m128i _1) {
+ return (__m128i)__builtin_lsx_vexth_qu_du((v2u64)_1);
+}
+
+#define __lsx_vrotri_b(/*__m128i*/ _1, /*ui3*/ _2) \
+ ((__m128i)__builtin_lsx_vrotri_b((v16i8)(_1), (_2)))
+
+#define __lsx_vrotri_h(/*__m128i*/ _1, /*ui4*/ _2) \
+ ((__m128i)__builtin_lsx_vrotri_h((v8i16)(_1), (_2)))
+
+#define __lsx_vrotri_w(/*__m128i*/ _1, /*ui5*/ _2) \
+ ((__m128i)__builtin_lsx_vrotri_w((v4i32)(_1), (_2)))
+
+#define __lsx_vrotri_d(/*__m128i*/ _1, /*ui6*/ _2) \
+ ((__m128i)__builtin_lsx_vrotri_d((v2i64)(_1), (_2)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vextl_q_d(__m128i _1) {
+ return (__m128i)__builtin_lsx_vextl_q_d((v2i64)_1);
+}
+
+#define __lsx_vsrlni_b_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) \
+ ((__m128i)__builtin_lsx_vsrlni_b_h((v16i8)(_1), (v16i8)(_2), (_3)))
+
+#define __lsx_vsrlni_h_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \
+ ((__m128i)__builtin_lsx_vsrlni_h_w((v8i16)(_1), (v8i16)(_2), (_3)))
+
+#define __lsx_vsrlni_w_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) \
+ ((__m128i)__builtin_lsx_vsrlni_w_d((v4i32)(_1), (v4i32)(_2), (_3)))
+
+#define __lsx_vsrlni_d_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) \
+ ((__m128i)__builtin_lsx_vsrlni_d_q((v2i64)(_1), (v2i64)(_2), (_3)))
+
+#define __lsx_vsrlrni_b_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) \
+ ((__m128i)__builtin_lsx_vsrlrni_b_h((v16i8)(_1), (v16i8)(_2), (_3)))
+
+#define __lsx_vsrlrni_h_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \
+ ((__m128i)__builtin_lsx_vsrlrni_h_w((v8i16)(_1), (v8i16)(_2), (_3)))
+
+#define __lsx_vsrlrni_w_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) \
+ ((__m128i)__builtin_lsx_vsrlrni_w_d((v4i32)(_1), (v4i32)(_2), (_3)))
+
+#define __lsx_vsrlrni_d_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) \
+ ((__m128i)__builtin_lsx_vsrlrni_d_q((v2i64)(_1), (v2i64)(_2), (_3)))
+
+#define __lsx_vssrlni_b_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) \
+ ((__m128i)__builtin_lsx_vssrlni_b_h((v16i8)(_1), (v16i8)(_2), (_3)))
+
+#define __lsx_vssrlni_h_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \
+ ((__m128i)__builtin_lsx_vssrlni_h_w((v8i16)(_1), (v8i16)(_2), (_3)))
+
+#define __lsx_vssrlni_w_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) \
+ ((__m128i)__builtin_lsx_vssrlni_w_d((v4i32)(_1), (v4i32)(_2), (_3)))
+
+#define __lsx_vssrlni_d_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) \
+ ((__m128i)__builtin_lsx_vssrlni_d_q((v2i64)(_1), (v2i64)(_2), (_3)))
+
+#define __lsx_vssrlni_bu_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) \
+ ((__m128i)__builtin_lsx_vssrlni_bu_h((v16u8)(_1), (v16i8)(_2), (_3)))
+
+#define __lsx_vssrlni_hu_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \
+ ((__m128i)__builtin_lsx_vssrlni_hu_w((v8u16)(_1), (v8i16)(_2), (_3)))
+
+#define __lsx_vssrlni_wu_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) \
+ ((__m128i)__builtin_lsx_vssrlni_wu_d((v4u32)(_1), (v4i32)(_2), (_3)))
+
+#define __lsx_vssrlni_du_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) \
+ ((__m128i)__builtin_lsx_vssrlni_du_q((v2u64)(_1), (v2i64)(_2), (_3)))
+
+#define __lsx_vssrlrni_b_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) \
+ ((__m128i)__builtin_lsx_vssrlrni_b_h((v16i8)(_1), (v16i8)(_2), (_3)))
+
+#define __lsx_vssrlrni_h_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \
+ ((__m128i)__builtin_lsx_vssrlrni_h_w((v8i16)(_1), (v8i16)(_2), (_3)))
+
+#define __lsx_vssrlrni_w_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) \
+ ((__m128i)__builtin_lsx_vssrlrni_w_d((v4i32)(_1), (v4i32)(_2), (_3)))
+
+#define __lsx_vssrlrni_d_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) \
+ ((__m128i)__builtin_lsx_vssrlrni_d_q((v2i64)(_1), (v2i64)(_2), (_3)))
+
+#define __lsx_vssrlrni_bu_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) \
+ ((__m128i)__builtin_lsx_vssrlrni_bu_h((v16u8)(_1), (v16i8)(_2), (_3)))
+
+#define __lsx_vssrlrni_hu_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \
+ ((__m128i)__builtin_lsx_vssrlrni_hu_w((v8u16)(_1), (v8i16)(_2), (_3)))
+
+#define __lsx_vssrlrni_wu_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) \
+ ((__m128i)__builtin_lsx_vssrlrni_wu_d((v4u32)(_1), (v4i32)(_2), (_3)))
+
+#define __lsx_vssrlrni_du_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) \
+ ((__m128i)__builtin_lsx_vssrlrni_du_q((v2u64)(_1), (v2i64)(_2), (_3)))
+
+#define __lsx_vsrani_b_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) \
+ ((__m128i)__builtin_lsx_vsrani_b_h((v16i8)(_1), (v16i8)(_2), (_3)))
+
+#define __lsx_vsrani_h_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \
+ ((__m128i)__builtin_lsx_vsrani_h_w((v8i16)(_1), (v8i16)(_2), (_3)))
+
+#define __lsx_vsrani_w_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) \
+ ((__m128i)__builtin_lsx_vsrani_w_d((v4i32)(_1), (v4i32)(_2), (_3)))
+
+#define __lsx_vsrani_d_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) \
+ ((__m128i)__builtin_lsx_vsrani_d_q((v2i64)(_1), (v2i64)(_2), (_3)))
+
+#define __lsx_vsrarni_b_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) \
+ ((__m128i)__builtin_lsx_vsrarni_b_h((v16i8)(_1), (v16i8)(_2), (_3)))
+
+#define __lsx_vsrarni_h_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \
+ ((__m128i)__builtin_lsx_vsrarni_h_w((v8i16)(_1), (v8i16)(_2), (_3)))
+
+#define __lsx_vsrarni_w_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) \
+ ((__m128i)__builtin_lsx_vsrarni_w_d((v4i32)(_1), (v4i32)(_2), (_3)))
+
+#define __lsx_vsrarni_d_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) \
+ ((__m128i)__builtin_lsx_vsrarni_d_q((v2i64)(_1), (v2i64)(_2), (_3)))
+
+#define __lsx_vssrani_b_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) \
+ ((__m128i)__builtin_lsx_vssrani_b_h((v16i8)(_1), (v16i8)(_2), (_3)))
+
+#define __lsx_vssrani_h_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \
+ ((__m128i)__builtin_lsx_vssrani_h_w((v8i16)(_1), (v8i16)(_2), (_3)))
+
+#define __lsx_vssrani_w_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) \
+ ((__m128i)__builtin_lsx_vssrani_w_d((v4i32)(_1), (v4i32)(_2), (_3)))
+
+#define __lsx_vssrani_d_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) \
+ ((__m128i)__builtin_lsx_vssrani_d_q((v2i64)(_1), (v2i64)(_2), (_3)))
+
+#define __lsx_vssrani_bu_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) \
+ ((__m128i)__builtin_lsx_vssrani_bu_h((v16u8)(_1), (v16i8)(_2), (_3)))
+
+#define __lsx_vssrani_hu_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \
+ ((__m128i)__builtin_lsx_vssrani_hu_w((v8u16)(_1), (v8i16)(_2), (_3)))
+
+#define __lsx_vssrani_wu_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) \
+ ((__m128i)__builtin_lsx_vssrani_wu_d((v4u32)(_1), (v4i32)(_2), (_3)))
+
+#define __lsx_vssrani_du_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) \
+ ((__m128i)__builtin_lsx_vssrani_du_q((v2u64)(_1), (v2i64)(_2), (_3)))
+
+#define __lsx_vssrarni_b_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) \
+ ((__m128i)__builtin_lsx_vssrarni_b_h((v16i8)(_1), (v16i8)(_2), (_3)))
+
+#define __lsx_vssrarni_h_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \
+ ((__m128i)__builtin_lsx_vssrarni_h_w((v8i16)(_1), (v8i16)(_2), (_3)))
+
+#define __lsx_vssrarni_w_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) \
+ ((__m128i)__builtin_lsx_vssrarni_w_d((v4i32)(_1), (v4i32)(_2), (_3)))
+
+#define __lsx_vssrarni_d_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) \
+ ((__m128i)__builtin_lsx_vssrarni_d_q((v2i64)(_1), (v2i64)(_2), (_3)))
+
+#define __lsx_vssrarni_bu_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) \
+ ((__m128i)__builtin_lsx_vssrarni_bu_h((v16u8)(_1), (v16i8)(_2), (_3)))
+
+#define __lsx_vssrarni_hu_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \
+ ((__m128i)__builtin_lsx_vssrarni_hu_w((v8u16)(_1), (v8i16)(_2), (_3)))
+
+#define __lsx_vssrarni_wu_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) \
+ ((__m128i)__builtin_lsx_vssrarni_wu_d((v4u32)(_1), (v4i32)(_2), (_3)))
+
+#define __lsx_vssrarni_du_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) \
+ ((__m128i)__builtin_lsx_vssrarni_du_q((v2u64)(_1), (v2i64)(_2), (_3)))
+
+#define __lsx_vpermi_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui8*/ _3) \
+ ((__m128i)__builtin_lsx_vpermi_w((v4i32)(_1), (v4i32)(_2), (_3)))
+
+#define __lsx_vld(/*void **/ _1, /*si12*/ _2) \
+ ((__m128i)__builtin_lsx_vld((void const *)(_1), (_2)))
+
+#define __lsx_vst(/*__m128i*/ _1, /*void **/ _2, /*si12*/ _3) \
+ ((void)__builtin_lsx_vst((v16i8)(_1), (void *)(_2), (_3)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vssrlrn_b_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vssrlrn_b_h((v8i16)_1, (v8i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vssrlrn_h_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vssrlrn_h_w((v4i32)_1, (v4i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vssrlrn_w_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vssrlrn_w_d((v2i64)_1, (v2i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vssrln_b_h(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vssrln_b_h((v8i16)_1, (v8i16)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vssrln_h_w(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vssrln_h_w((v4i32)_1, (v4i32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vssrln_w_d(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vssrln_w_d((v2i64)_1, (v2i64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vorn_v(__m128i _1, __m128i _2) {
+ return (__m128i)__builtin_lsx_vorn_v((v16i8)_1, (v16i8)_2);
+}
+
+#define __lsx_vldi(/*i13*/ _1) ((__m128i)__builtin_lsx_vldi((_1)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vshuf_b(__m128i _1, __m128i _2, __m128i _3) {
+ return (__m128i)__builtin_lsx_vshuf_b((v16i8)_1, (v16i8)_2, (v16i8)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vldx(void const *_1, long int _2) {
+ return (__m128i)__builtin_lsx_vldx((void const *)_1, (long int)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) void
+ __lsx_vstx(__m128i _1, void *_2, long int _3) {
+ return (void)__builtin_lsx_vstx((v16i8)_1, (void *)_2, (long int)_3);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vextl_qu_du(__m128i _1) {
+ return (__m128i)__builtin_lsx_vextl_qu_du((v2u64)_1);
+}
+
+#define __lsx_bnz_b(/*__m128i*/ _1) ((int)__builtin_lsx_bnz_b((v16u8)(_1)))
+
+#define __lsx_bnz_d(/*__m128i*/ _1) ((int)__builtin_lsx_bnz_d((v2u64)(_1)))
+
+#define __lsx_bnz_h(/*__m128i*/ _1) ((int)__builtin_lsx_bnz_h((v8u16)(_1)))
+
+#define __lsx_bnz_v(/*__m128i*/ _1) ((int)__builtin_lsx_bnz_v((v16u8)(_1)))
+
+#define __lsx_bnz_w(/*__m128i*/ _1) ((int)__builtin_lsx_bnz_w((v4u32)(_1)))
+
+#define __lsx_bz_b(/*__m128i*/ _1) ((int)__builtin_lsx_bz_b((v16u8)(_1)))
+
+#define __lsx_bz_d(/*__m128i*/ _1) ((int)__builtin_lsx_bz_d((v2u64)(_1)))
+
+#define __lsx_bz_h(/*__m128i*/ _1) ((int)__builtin_lsx_bz_h((v8u16)(_1)))
+
+#define __lsx_bz_v(/*__m128i*/ _1) ((int)__builtin_lsx_bz_v((v16u8)(_1)))
+
+#define __lsx_bz_w(/*__m128i*/ _1) ((int)__builtin_lsx_bz_w((v4u32)(_1)))
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vfcmp_caf_d(__m128d _1, __m128d _2) {
+ return (__m128i)__builtin_lsx_vfcmp_caf_d((v2f64)_1, (v2f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vfcmp_caf_s(__m128 _1, __m128 _2) {
+ return (__m128i)__builtin_lsx_vfcmp_caf_s((v4f32)_1, (v4f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vfcmp_ceq_d(__m128d _1, __m128d _2) {
+ return (__m128i)__builtin_lsx_vfcmp_ceq_d((v2f64)_1, (v2f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vfcmp_ceq_s(__m128 _1, __m128 _2) {
+ return (__m128i)__builtin_lsx_vfcmp_ceq_s((v4f32)_1, (v4f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vfcmp_cle_d(__m128d _1, __m128d _2) {
+ return (__m128i)__builtin_lsx_vfcmp_cle_d((v2f64)_1, (v2f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vfcmp_cle_s(__m128 _1, __m128 _2) {
+ return (__m128i)__builtin_lsx_vfcmp_cle_s((v4f32)_1, (v4f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vfcmp_clt_d(__m128d _1, __m128d _2) {
+ return (__m128i)__builtin_lsx_vfcmp_clt_d((v2f64)_1, (v2f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vfcmp_clt_s(__m128 _1, __m128 _2) {
+ return (__m128i)__builtin_lsx_vfcmp_clt_s((v4f32)_1, (v4f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vfcmp_cne_d(__m128d _1, __m128d _2) {
+ return (__m128i)__builtin_lsx_vfcmp_cne_d((v2f64)_1, (v2f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vfcmp_cne_s(__m128 _1, __m128 _2) {
+ return (__m128i)__builtin_lsx_vfcmp_cne_s((v4f32)_1, (v4f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vfcmp_cor_d(__m128d _1, __m128d _2) {
+ return (__m128i)__builtin_lsx_vfcmp_cor_d((v2f64)_1, (v2f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vfcmp_cor_s(__m128 _1, __m128 _2) {
+ return (__m128i)__builtin_lsx_vfcmp_cor_s((v4f32)_1, (v4f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vfcmp_cueq_d(__m128d _1, __m128d _2) {
+ return (__m128i)__builtin_lsx_vfcmp_cueq_d((v2f64)_1, (v2f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vfcmp_cueq_s(__m128 _1, __m128 _2) {
+ return (__m128i)__builtin_lsx_vfcmp_cueq_s((v4f32)_1, (v4f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vfcmp_cule_d(__m128d _1, __m128d _2) {
+ return (__m128i)__builtin_lsx_vfcmp_cule_d((v2f64)_1, (v2f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vfcmp_cule_s(__m128 _1, __m128 _2) {
+ return (__m128i)__builtin_lsx_vfcmp_cule_s((v4f32)_1, (v4f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vfcmp_cult_d(__m128d _1, __m128d _2) {
+ return (__m128i)__builtin_lsx_vfcmp_cult_d((v2f64)_1, (v2f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vfcmp_cult_s(__m128 _1, __m128 _2) {
+ return (__m128i)__builtin_lsx_vfcmp_cult_s((v4f32)_1, (v4f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vfcmp_cun_d(__m128d _1, __m128d _2) {
+ return (__m128i)__builtin_lsx_vfcmp_cun_d((v2f64)_1, (v2f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vfcmp_cune_d(__m128d _1, __m128d _2) {
+ return (__m128i)__builtin_lsx_vfcmp_cune_d((v2f64)_1, (v2f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vfcmp_cune_s(__m128 _1, __m128 _2) {
+ return (__m128i)__builtin_lsx_vfcmp_cune_s((v4f32)_1, (v4f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vfcmp_cun_s(__m128 _1, __m128 _2) {
+ return (__m128i)__builtin_lsx_vfcmp_cun_s((v4f32)_1, (v4f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vfcmp_saf_d(__m128d _1, __m128d _2) {
+ return (__m128i)__builtin_lsx_vfcmp_saf_d((v2f64)_1, (v2f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vfcmp_saf_s(__m128 _1, __m128 _2) {
+ return (__m128i)__builtin_lsx_vfcmp_saf_s((v4f32)_1, (v4f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vfcmp_seq_d(__m128d _1, __m128d _2) {
+ return (__m128i)__builtin_lsx_vfcmp_seq_d((v2f64)_1, (v2f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vfcmp_seq_s(__m128 _1, __m128 _2) {
+ return (__m128i)__builtin_lsx_vfcmp_seq_s((v4f32)_1, (v4f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vfcmp_sle_d(__m128d _1, __m128d _2) {
+ return (__m128i)__builtin_lsx_vfcmp_sle_d((v2f64)_1, (v2f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vfcmp_sle_s(__m128 _1, __m128 _2) {
+ return (__m128i)__builtin_lsx_vfcmp_sle_s((v4f32)_1, (v4f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vfcmp_slt_d(__m128d _1, __m128d _2) {
+ return (__m128i)__builtin_lsx_vfcmp_slt_d((v2f64)_1, (v2f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vfcmp_slt_s(__m128 _1, __m128 _2) {
+ return (__m128i)__builtin_lsx_vfcmp_slt_s((v4f32)_1, (v4f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vfcmp_sne_d(__m128d _1, __m128d _2) {
+ return (__m128i)__builtin_lsx_vfcmp_sne_d((v2f64)_1, (v2f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vfcmp_sne_s(__m128 _1, __m128 _2) {
+ return (__m128i)__builtin_lsx_vfcmp_sne_s((v4f32)_1, (v4f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vfcmp_sor_d(__m128d _1, __m128d _2) {
+ return (__m128i)__builtin_lsx_vfcmp_sor_d((v2f64)_1, (v2f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vfcmp_sor_s(__m128 _1, __m128 _2) {
+ return (__m128i)__builtin_lsx_vfcmp_sor_s((v4f32)_1, (v4f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vfcmp_sueq_d(__m128d _1, __m128d _2) {
+ return (__m128i)__builtin_lsx_vfcmp_sueq_d((v2f64)_1, (v2f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vfcmp_sueq_s(__m128 _1, __m128 _2) {
+ return (__m128i)__builtin_lsx_vfcmp_sueq_s((v4f32)_1, (v4f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vfcmp_sule_d(__m128d _1, __m128d _2) {
+ return (__m128i)__builtin_lsx_vfcmp_sule_d((v2f64)_1, (v2f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vfcmp_sule_s(__m128 _1, __m128 _2) {
+ return (__m128i)__builtin_lsx_vfcmp_sule_s((v4f32)_1, (v4f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vfcmp_sult_d(__m128d _1, __m128d _2) {
+ return (__m128i)__builtin_lsx_vfcmp_sult_d((v2f64)_1, (v2f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vfcmp_sult_s(__m128 _1, __m128 _2) {
+ return (__m128i)__builtin_lsx_vfcmp_sult_s((v4f32)_1, (v4f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vfcmp_sun_d(__m128d _1, __m128d _2) {
+ return (__m128i)__builtin_lsx_vfcmp_sun_d((v2f64)_1, (v2f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vfcmp_sune_d(__m128d _1, __m128d _2) {
+ return (__m128i)__builtin_lsx_vfcmp_sune_d((v2f64)_1, (v2f64)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vfcmp_sune_s(__m128 _1, __m128 _2) {
+ return (__m128i)__builtin_lsx_vfcmp_sune_s((v4f32)_1, (v4f32)_2);
+}
+
+extern __inline
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__)) __m128i
+ __lsx_vfcmp_sun_s(__m128 _1, __m128 _2) {
+ return (__m128i)__builtin_lsx_vfcmp_sun_s((v4f32)_1, (v4f32)_2);
+}
+
+#define __lsx_vrepli_b(/*si10*/ _1) ((__m128i)__builtin_lsx_vrepli_b((_1)))
+
+#define __lsx_vrepli_d(/*si10*/ _1) ((__m128i)__builtin_lsx_vrepli_d((_1)))
+
+#define __lsx_vrepli_h(/*si10*/ _1) ((__m128i)__builtin_lsx_vrepli_h((_1)))
+
+#define __lsx_vrepli_w(/*si10*/ _1) ((__m128i)__builtin_lsx_vrepli_w((_1)))
+
+#endif /* defined(__loongarch_sx) */
+#endif /* _LOONGSON_SXINTRIN_H */
diff --git a/contrib/llvm-project/clang/lib/Headers/mm_malloc.h b/contrib/llvm-project/clang/lib/Headers/mm_malloc.h
index 933dbaacade5..d32fe5941627 100644
--- a/contrib/llvm-project/clang/lib/Headers/mm_malloc.h
+++ b/contrib/llvm-project/clang/lib/Headers/mm_malloc.h
@@ -28,9 +28,9 @@ extern "C" int posix_memalign(void **__memptr, size_t __alignment, size_t __size
#if !(defined(_WIN32) && defined(_mm_malloc))
static __inline__ void *__attribute__((__always_inline__, __nodebug__,
- __malloc__))
-_mm_malloc(size_t __size, size_t __align)
-{
+ __malloc__, __alloc_size__(1),
+ __alloc_align__(2)))
+_mm_malloc(size_t __size, size_t __align) {
if (__align == 1) {
return malloc(__size);
}
diff --git a/contrib/llvm-project/clang/lib/Headers/mmintrin.h b/contrib/llvm-project/clang/lib/Headers/mmintrin.h
index 79a8b55016b1..08849f01071a 100644
--- a/contrib/llvm-project/clang/lib/Headers/mmintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/mmintrin.h
@@ -10,6 +10,10 @@
#ifndef __MMINTRIN_H
#define __MMINTRIN_H
+#if !defined(__i386__) && !defined(__x86_64__)
+#error "This header is only meant to be used on x86 and x64 architecture"
+#endif
+
typedef long long __m64 __attribute__((__vector_size__(8), __aligned__(8)));
typedef long long __v1di __attribute__((__vector_size__(8)));
@@ -18,7 +22,9 @@ typedef short __v4hi __attribute__((__vector_size__(8)));
typedef char __v8qi __attribute__((__vector_size__(8)));
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("mmx"), __min_vector_width__(64)))
+#define __DEFAULT_FN_ATTRS \
+ __attribute__((__always_inline__, __nodebug__, __target__("mmx,no-evex512"), \
+ __min_vector_width__(64)))
/// Clears the MMX state by setting the state of the x87 stack registers
/// to empty.
@@ -27,10 +33,10 @@ typedef char __v8qi __attribute__((__vector_size__(8)));
///
/// This intrinsic corresponds to the <c> EMMS </c> instruction.
///
-static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("mmx")))
-_mm_empty(void)
-{
- __builtin_ia32_emms();
+static __inline__ void __attribute__((__always_inline__, __nodebug__,
+ __target__("mmx,no-evex512")))
+_mm_empty(void) {
+ __builtin_ia32_emms();
}
/// Constructs a 64-bit integer vector, setting the lower 32 bits to the
diff --git a/contrib/llvm-project/clang/lib/Headers/module.modulemap b/contrib/llvm-project/clang/lib/Headers/module.modulemap
index 6894672ef052..56a13f69bc05 100644
--- a/contrib/llvm-project/clang/lib/Headers/module.modulemap
+++ b/contrib/llvm-project/clang/lib/Headers/module.modulemap
@@ -153,9 +153,162 @@ module _Builtin_intrinsics [system] [extern_c] {
}
}
-module _Builtin_stddef_max_align_t [system] [extern_c] {
- header "__stddef_max_align_t.h"
+// Start -fbuiltin-headers-in-system-modules affected modules
+
+// The following modules all ignore their headers when
+// -fbuiltin-headers-in-system-modules is passed, and many of
+// those headers join system modules when present.
+
+// e.g. if -fbuiltin-headers-in-system-modules is passed, then
+// float.h will not be in the _Builtin_float module (that module
+// will be empty). If there is a system module that declares
+// `header "float.h"`, then the builtin float.h will join
+// that module. The system float.h (if present) will be treated
+// as a textual header in the sytem module.
+module _Builtin_float [system] {
+ header "float.h"
+ export *
+}
+
+module _Builtin_inttypes [system] {
+ header "inttypes.h"
+ export *
+}
+
+module _Builtin_iso646 [system] {
+ header "iso646.h"
+ export *
+}
+
+module _Builtin_limits [system] {
+ header "limits.h"
+ export *
+}
+
+module _Builtin_stdalign [system] {
+ header "stdalign.h"
+ export *
+}
+
+module _Builtin_stdarg [system] {
+ textual header "stdarg.h"
+
+ explicit module __gnuc_va_list {
+ header "__stdarg___gnuc_va_list.h"
+ export *
+ }
+
+ explicit module __va_copy {
+ header "__stdarg___va_copy.h"
+ export *
+ }
+
+ explicit module va_arg {
+ header "__stdarg_va_arg.h"
+ export *
+ }
+
+ explicit module va_copy {
+ header "__stdarg_va_copy.h"
+ export *
+ }
+
+ explicit module va_list {
+ header "__stdarg_va_list.h"
+ export *
+ }
+}
+
+module _Builtin_stdatomic [system] {
+ header "stdatomic.h"
+ export *
+}
+
+module _Builtin_stdbool [system] {
+ header "stdbool.h"
+ export *
+}
+
+module _Builtin_stddef [system] {
+ textual header "stddef.h"
+
+ // __stddef_max_align_t.h is always in this module, even if
+ // -fbuiltin-headers-in-system-modules is passed.
+ explicit module max_align_t {
+ header "__stddef_max_align_t.h"
+ export *
+ }
+
+ explicit module null {
+ header "__stddef_null.h"
+ export *
+ }
+
+ explicit module nullptr_t {
+ header "__stddef_nullptr_t.h"
+ export *
+ }
+
+ explicit module offsetof {
+ header "__stddef_offsetof.h"
+ export *
+ }
+
+ explicit module ptrdiff_t {
+ header "__stddef_ptrdiff_t.h"
+ export *
+ }
+
+ explicit module rsize_t {
+ header "__stddef_rsize_t.h"
+ export *
+ }
+
+ explicit module size_t {
+ header "__stddef_size_t.h"
+ export *
+ }
+
+ explicit module unreachable {
+ header "__stddef_unreachable.h"
+ export *
+ }
+
+ explicit module wchar_t {
+ header "__stddef_wchar_t.h"
+ export *
+ }
+}
+
+// wint_t is provided by <wchar.h> and not <stddef.h>. It's here
+// for compatibility, but must be explicitly requested. Therefore
+// __stddef_wint_t.h is not part of _Builtin_stddef. It is always in
+// this module even if -fbuiltin-headers-in-system-modules is passed.
+module _Builtin_stddef_wint_t [system] {
+ header "__stddef_wint_t.h"
+ export *
+}
+
+module _Builtin_stdint [system] {
+ header "stdint.h"
+ export *
+}
+
+module _Builtin_stdnoreturn [system] {
+ header "stdnoreturn.h"
+ export *
+}
+
+module _Builtin_tgmath [system] {
+ header "tgmath.h"
+ export *
+}
+
+module _Builtin_unwind [system] {
+ header "unwind.h"
+ export *
}
+// End -fbuiltin-headers-in-system-modules affected modules
module opencl_c {
requires opencl
diff --git a/contrib/llvm-project/clang/lib/Headers/mwaitxintrin.h b/contrib/llvm-project/clang/lib/Headers/mwaitxintrin.h
index ed485380af79..65f427105b41 100644
--- a/contrib/llvm-project/clang/lib/Headers/mwaitxintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/mwaitxintrin.h
@@ -16,12 +16,41 @@
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("mwaitx")))
+
+/// Establishes a linear address memory range to be monitored and puts
+/// the processor in the monitor event pending state. Data stored in the
+/// monitored address range causes the processor to exit the pending state.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c MONITORX instruction.
+///
+/// \param __p
+/// The memory range to be monitored. The size of the range is determined by
+/// CPUID function 0000_0005h.
+/// \param __extensions
+/// Optional extensions for the monitoring state.
+/// \param __hints
+/// Optional hints for the monitoring state.
static __inline__ void __DEFAULT_FN_ATTRS
_mm_monitorx(void * __p, unsigned __extensions, unsigned __hints)
{
__builtin_ia32_monitorx(__p, __extensions, __hints);
}
+/// Used with the \c MONITORX instruction to wait while the processor is in
+/// the monitor event pending state. Data stored in the monitored address
+/// range, or an interrupt, causes the processor to exit the pending state.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c MWAITX instruction.
+///
+/// \param __extensions
+/// Optional extensions for the monitoring state, which can vary by
+/// processor.
+/// \param __hints
+/// Optional hints for the monitoring state, which can vary by processor.
static __inline__ void __DEFAULT_FN_ATTRS
_mm_mwaitx(unsigned __extensions, unsigned __hints, unsigned __clock)
{
diff --git a/contrib/llvm-project/clang/lib/Headers/nmmintrin.h b/contrib/llvm-project/clang/lib/Headers/nmmintrin.h
index 672aea496681..59fc7ec99e61 100644
--- a/contrib/llvm-project/clang/lib/Headers/nmmintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/nmmintrin.h
@@ -10,6 +10,10 @@
#ifndef __NMMINTRIN_H
#define __NMMINTRIN_H
+#if !defined(__i386__) && !defined(__x86_64__)
+#error "This header is only meant to be used on x86 and x64 architecture"
+#endif
+
/* To match expectations of gcc we put the sse4.2 definitions into smmintrin.h,
just include it now then. */
#include <smmintrin.h>
diff --git a/contrib/llvm-project/clang/lib/Headers/opencl-c-base.h b/contrib/llvm-project/clang/lib/Headers/opencl-c-base.h
index 3c5e2c973936..2494f6213fc5 100644
--- a/contrib/llvm-project/clang/lib/Headers/opencl-c-base.h
+++ b/contrib/llvm-project/clang/lib/Headers/opencl-c-base.h
@@ -12,8 +12,8 @@
// Define extension macros
#if (defined(__OPENCL_CPP_VERSION__) || __OPENCL_C_VERSION__ >= 200)
-// For SPIR all extensions are supported.
-#if defined(__SPIR__)
+// For SPIR and SPIR-V all extensions are supported.
+#if defined(__SPIR__) || defined(__SPIRV__)
#define cl_khr_subgroup_extended_types 1
#define cl_khr_subgroup_non_uniform_vote 1
#define cl_khr_subgroup_ballot 1
@@ -21,16 +21,37 @@
#define cl_khr_subgroup_shuffle 1
#define cl_khr_subgroup_shuffle_relative 1
#define cl_khr_subgroup_clustered_reduce 1
+#define cl_khr_subgroup_rotate 1
#define cl_khr_extended_bit_ops 1
#define cl_khr_integer_dot_product 1
#define __opencl_c_integer_dot_product_input_4x8bit 1
#define __opencl_c_integer_dot_product_input_4x8bit_packed 1
+#define cl_ext_float_atomics 1
+#ifdef cl_khr_fp16
+#define __opencl_c_ext_fp16_global_atomic_load_store 1
+#define __opencl_c_ext_fp16_local_atomic_load_store 1
+#define __opencl_c_ext_fp16_global_atomic_add 1
+#define __opencl_c_ext_fp16_local_atomic_add 1
+#define __opencl_c_ext_fp16_global_atomic_min_max 1
+#define __opencl_c_ext_fp16_local_atomic_min_max 1
+#endif
+#ifdef cl_khr_fp64
+#define __opencl_c_ext_fp64_global_atomic_add 1
+#define __opencl_c_ext_fp64_local_atomic_add 1
+#define __opencl_c_ext_fp64_global_atomic_min_max 1
+#define __opencl_c_ext_fp64_local_atomic_min_max 1
+#endif
+#define __opencl_c_ext_fp32_global_atomic_add 1
+#define __opencl_c_ext_fp32_local_atomic_add 1
+#define __opencl_c_ext_fp32_global_atomic_min_max 1
+#define __opencl_c_ext_fp32_local_atomic_min_max 1
+#define __opencl_c_ext_image_raw10_raw12 1
-#endif // defined(__SPIR__)
+#endif // defined(__SPIR__) || defined(__SPIRV__)
#endif // (defined(__OPENCL_CPP_VERSION__) || __OPENCL_C_VERSION__ >= 200)
// Define feature macros for OpenCL C 2.0
-#if (defined(__OPENCL_CPP_VERSION__) || __OPENCL_C_VERSION__ == 200)
+#if (__OPENCL_CPP_VERSION__ == 100 || __OPENCL_C_VERSION__ == 200)
#define __opencl_c_pipes 1
#define __opencl_c_generic_address_space 1
#define __opencl_c_work_group_collective_functions 1
@@ -45,12 +66,46 @@
#endif
// Define header-only feature macros for OpenCL C 3.0.
-#if (__OPENCL_C_VERSION__ == 300)
-// For the SPIR target all features are supported.
-#if defined(__SPIR__)
+#if (__OPENCL_CPP_VERSION__ == 202100 || __OPENCL_C_VERSION__ == 300)
+// For the SPIR and SPIR-V target all features are supported.
+#if defined(__SPIR__) || defined(__SPIRV__)
+#define __opencl_c_work_group_collective_functions 1
+#define __opencl_c_atomic_order_seq_cst 1
+#define __opencl_c_atomic_scope_device 1
#define __opencl_c_atomic_scope_all_devices 1
+#define __opencl_c_read_write_images 1
#endif // defined(__SPIR__)
-#endif // (__OPENCL_C_VERSION__ == 300)
+
+// Undefine any feature macros that have been explicitly disabled using
+// an __undef_<feature> macro.
+#ifdef __undef___opencl_c_work_group_collective_functions
+#undef __opencl_c_work_group_collective_functions
+#endif
+#ifdef __undef___opencl_c_atomic_order_seq_cst
+#undef __opencl_c_atomic_order_seq_cst
+#endif
+#ifdef __undef___opencl_c_atomic_scope_device
+#undef __opencl_c_atomic_scope_device
+#endif
+#ifdef __undef___opencl_c_atomic_scope_all_devices
+#undef __opencl_c_atomic_scope_all_devices
+#endif
+#ifdef __undef___opencl_c_read_write_images
+#undef __opencl_c_read_write_images
+#endif
+
+#endif // (__OPENCL_CPP_VERSION__ == 202100 || __OPENCL_C_VERSION__ == 300)
+
+#if !defined(__opencl_c_generic_address_space)
+// Internal feature macro to provide named (global, local, private) address
+// space overloads for builtin functions that take a pointer argument.
+#define __opencl_c_named_address_space_builtins 1
+#endif // !defined(__opencl_c_generic_address_space)
+
+#if defined(cl_intel_subgroups) || defined(cl_khr_subgroups) || defined(__opencl_c_subgroups)
+// Internal feature macro to provide subgroup builtins.
+#define __opencl_subgroup_builtins 1
+#endif
// built-in scalar data types:
@@ -169,6 +224,9 @@ typedef double double8 __attribute__((ext_vector_type(8)));
typedef double double16 __attribute__((ext_vector_type(16)));
#endif
+// An internal alias for half, for use by OpenCLBuiltins.td.
+#define __half half
+
#if defined(__OPENCL_CPP_VERSION__)
#define NULL nullptr
#elif defined(__OPENCL_C_VERSION__)
@@ -329,11 +387,17 @@ typedef enum memory_scope {
memory_scope_device = __OPENCL_MEMORY_SCOPE_DEVICE,
#if defined(__opencl_c_atomic_scope_all_devices)
memory_scope_all_svm_devices = __OPENCL_MEMORY_SCOPE_ALL_SVM_DEVICES,
-#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
memory_scope_all_devices = memory_scope_all_svm_devices,
-#endif // __OPENCL_C_VERSION__ >= CL_VERSION_3_0
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
#endif // defined(__opencl_c_atomic_scope_all_devices)
-#if defined(cl_intel_subgroups) || defined(cl_khr_subgroups)
+/**
+ * Subgroups have different requirements on forward progress, so just test
+ * all the relevant macros.
+ * CL 3.0 sub-groups "they are not guaranteed to make independent forward progress"
+ * KHR subgroups "Subgroups within a workgroup are independent, make forward progress with respect to each other"
+ */
+#if defined(cl_intel_subgroups) || defined(cl_khr_subgroups) || defined(__opencl_c_subgroups)
memory_scope_sub_group = __OPENCL_MEMORY_SCOPE_SUB_GROUP
#endif
} memory_scope;
@@ -411,6 +475,13 @@ typedef enum memory_order
#define CLK_HALF_FLOAT 0x10DD
#define CLK_FLOAT 0x10DE
#define CLK_UNORM_INT24 0x10DF
+#if __OPENCL_C_VERSION__ >= CL_VERSION_3_0
+#define CLK_UNORM_INT_101010_2 0x10E0
+#endif // __OPENCL_C_VERSION__ >= CL_VERSION_3_0
+#ifdef __opencl_c_ext_image_raw10_raw12
+#define CLK_UNSIGNED_INT_RAW10_EXT 0x10E3
+#define CLK_UNSIGNED_INT_RAW12_EXT 0x10E4
+#endif // __opencl_c_ext_image_raw10_raw12
// Channel order, numbering must be aligned with cl_channel_order in cl.h
//
@@ -473,12 +544,14 @@ typedef int clk_profiling_info;
#define MAX_WORK_DIM 3
+#ifdef __opencl_c_device_enqueue
typedef struct {
unsigned int workDimension;
size_t globalWorkOffset[MAX_WORK_DIM];
size_t globalWorkSize[MAX_WORK_DIM];
size_t localWorkSize[MAX_WORK_DIM];
} ndrange_t;
+#endif // __opencl_c_device_enqueue
#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
@@ -572,6 +645,28 @@ typedef struct {
#define as_intptr_t(x) __builtin_astype((x), intptr_t)
#define as_uintptr_t(x) __builtin_astype((x), uintptr_t)
+// C++ for OpenCL - __remove_address_space
+#if defined(__OPENCL_CPP_VERSION__)
+template <typename _Tp> struct __remove_address_space { using type = _Tp; };
+#if defined(__opencl_c_generic_address_space)
+template <typename _Tp> struct __remove_address_space<__generic _Tp> {
+ using type = _Tp;
+};
+#endif
+template <typename _Tp> struct __remove_address_space<__global _Tp> {
+ using type = _Tp;
+};
+template <typename _Tp> struct __remove_address_space<__private _Tp> {
+ using type = _Tp;
+};
+template <typename _Tp> struct __remove_address_space<__local _Tp> {
+ using type = _Tp;
+};
+template <typename _Tp> struct __remove_address_space<__constant _Tp> {
+ using type = _Tp;
+};
+#endif
+
// OpenCL v1.1 s6.9, v1.2/2.0 s6.10 - Function qualifiers
#define __kernel_exec(X, typen) __kernel \
diff --git a/contrib/llvm-project/clang/lib/Headers/opencl-c.h b/contrib/llvm-project/clang/lib/Headers/opencl-c.h
index fc50dd718c4e..288bb18bc654 100644
--- a/contrib/llvm-project/clang/lib/Headers/opencl-c.h
+++ b/contrib/llvm-project/clang/lib/Headers/opencl-c.h
@@ -11,11 +11,11 @@
#include "opencl-c-base.h"
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_images)
#ifndef cl_khr_depth_images
#define cl_khr_depth_images
#endif //cl_khr_depth_images
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_images)
#if __OPENCL_C_VERSION__ < CL_VERSION_2_0
#ifdef cl_khr_3d_image_writes
@@ -23,11 +23,14 @@
#endif //cl_khr_3d_image_writes
#endif //__OPENCL_C_VERSION__ < CL_VERSION_2_0
-
-#if (defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)) && defined(__SPIR__)
+#if (defined(__OPENCL_CPP_VERSION__) || \
+ (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)) && \
+ (defined(__SPIR__) || defined(__SPIRV__))
#pragma OPENCL EXTENSION cl_intel_planar_yuv : begin
#pragma OPENCL EXTENSION cl_intel_planar_yuv : end
-#endif // (defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)) && defined(__SPIR__)
+#endif // (defined(__OPENCL_CPP_VERSION__) ||
+ // (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)) &&
+ // (defined(__SPIR__) || defined(__SPIRV__))
#define __ovld __attribute__((overloadable))
#define __conv __attribute__((convergent))
@@ -6359,7 +6362,7 @@ uint __ovld __cnfn get_work_dim(void);
* dimindx, get_global_size() returns 1.
* For clEnqueueTask, this always returns 1.
*/
-size_t __ovld __cnfn get_global_size(uint dimindx);
+size_t __ovld __cnfn get_global_size(uint);
/**
* Returns the unique global work-item ID value for
@@ -6370,7 +6373,7 @@ size_t __ovld __cnfn get_global_size(uint dimindx);
* other values of dimindx, get_global_id() returns 0.
* For clEnqueueTask, this returns 0.
*/
-size_t __ovld __cnfn get_global_id(uint dimindx);
+size_t __ovld __cnfn get_global_id(uint);
/**
* Returns the number of local work-items specified in
@@ -6384,7 +6387,7 @@ size_t __ovld __cnfn get_global_id(uint dimindx);
* get_local_size() returns 1.
* For clEnqueueTask, this always returns 1.
*/
-size_t __ovld __cnfn get_local_size(uint dimindx);
+size_t __ovld __cnfn get_local_size(uint);
/**
* Returns the unique local work-item ID i.e. a work-item
@@ -6394,7 +6397,7 @@ size_t __ovld __cnfn get_local_size(uint dimindx);
* get_local_id() returns 0.
* For clEnqueueTask, this returns 0.
*/
-size_t __ovld __cnfn get_local_id(uint dimindx);
+size_t __ovld __cnfn get_local_id(uint);
/**
* Returns the number of work-groups that will execute a
@@ -6403,7 +6406,7 @@ size_t __ovld __cnfn get_local_id(uint dimindx);
* For other values of dimindx, get_num_groups() returns 1.
* For clEnqueueTask, this always returns 1.
*/
-size_t __ovld __cnfn get_num_groups(uint dimindx);
+size_t __ovld __cnfn get_num_groups(uint);
/**
* get_group_id returns the work-group ID which is a
@@ -6412,7 +6415,7 @@ size_t __ovld __cnfn get_num_groups(uint dimindx);
* For other values, get_group_id() returns 0.
* For clEnqueueTask, this returns 0.
*/
-size_t __ovld __cnfn get_group_id(uint dimindx);
+size_t __ovld __cnfn get_group_id(uint);
/**
* get_global_offset returns the offset values specified in
@@ -6422,10 +6425,10 @@ size_t __ovld __cnfn get_group_id(uint dimindx);
* For other values, get_global_offset() returns 0.
* For clEnqueueTask, this returns 0.
*/
-size_t __ovld __cnfn get_global_offset(uint dimindx);
+size_t __ovld __cnfn get_global_offset(uint);
#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-size_t __ovld get_enqueued_local_size(uint dimindx);
+size_t __ovld get_enqueued_local_size(uint);
size_t __ovld get_global_linear_id(void);
size_t __ovld get_local_linear_id(void);
#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
@@ -6487,27 +6490,27 @@ half16 __ovld __cnfn acosh(half16);
/**
* Compute acos (x) / PI.
*/
-float __ovld __cnfn acospi(float x);
-float2 __ovld __cnfn acospi(float2 x);
-float3 __ovld __cnfn acospi(float3 x);
-float4 __ovld __cnfn acospi(float4 x);
-float8 __ovld __cnfn acospi(float8 x);
-float16 __ovld __cnfn acospi(float16 x);
+float __ovld __cnfn acospi(float);
+float2 __ovld __cnfn acospi(float2);
+float3 __ovld __cnfn acospi(float3);
+float4 __ovld __cnfn acospi(float4);
+float8 __ovld __cnfn acospi(float8);
+float16 __ovld __cnfn acospi(float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn acospi(double x);
-double2 __ovld __cnfn acospi(double2 x);
-double3 __ovld __cnfn acospi(double3 x);
-double4 __ovld __cnfn acospi(double4 x);
-double8 __ovld __cnfn acospi(double8 x);
-double16 __ovld __cnfn acospi(double16 x);
+double __ovld __cnfn acospi(double);
+double2 __ovld __cnfn acospi(double2);
+double3 __ovld __cnfn acospi(double3);
+double4 __ovld __cnfn acospi(double4);
+double8 __ovld __cnfn acospi(double8);
+double16 __ovld __cnfn acospi(double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn acospi(half x);
-half2 __ovld __cnfn acospi(half2 x);
-half3 __ovld __cnfn acospi(half3 x);
-half4 __ovld __cnfn acospi(half4 x);
-half8 __ovld __cnfn acospi(half8 x);
-half16 __ovld __cnfn acospi(half16 x);
+half __ovld __cnfn acospi(half);
+half2 __ovld __cnfn acospi(half2);
+half3 __ovld __cnfn acospi(half3);
+half4 __ovld __cnfn acospi(half4);
+half8 __ovld __cnfn acospi(half8);
+half16 __ovld __cnfn acospi(half16);
#endif //cl_khr_fp16
/**
@@ -6565,79 +6568,79 @@ half16 __ovld __cnfn asinh(half16);
/**
* Compute asin (x) / PI.
*/
-float __ovld __cnfn asinpi(float x);
-float2 __ovld __cnfn asinpi(float2 x);
-float3 __ovld __cnfn asinpi(float3 x);
-float4 __ovld __cnfn asinpi(float4 x);
-float8 __ovld __cnfn asinpi(float8 x);
-float16 __ovld __cnfn asinpi(float16 x);
+float __ovld __cnfn asinpi(float);
+float2 __ovld __cnfn asinpi(float2);
+float3 __ovld __cnfn asinpi(float3);
+float4 __ovld __cnfn asinpi(float4);
+float8 __ovld __cnfn asinpi(float8);
+float16 __ovld __cnfn asinpi(float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn asinpi(double x);
-double2 __ovld __cnfn asinpi(double2 x);
-double3 __ovld __cnfn asinpi(double3 x);
-double4 __ovld __cnfn asinpi(double4 x);
-double8 __ovld __cnfn asinpi(double8 x);
-double16 __ovld __cnfn asinpi(double16 x);
+double __ovld __cnfn asinpi(double);
+double2 __ovld __cnfn asinpi(double2);
+double3 __ovld __cnfn asinpi(double3);
+double4 __ovld __cnfn asinpi(double4);
+double8 __ovld __cnfn asinpi(double8);
+double16 __ovld __cnfn asinpi(double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn asinpi(half x);
-half2 __ovld __cnfn asinpi(half2 x);
-half3 __ovld __cnfn asinpi(half3 x);
-half4 __ovld __cnfn asinpi(half4 x);
-half8 __ovld __cnfn asinpi(half8 x);
-half16 __ovld __cnfn asinpi(half16 x);
+half __ovld __cnfn asinpi(half);
+half2 __ovld __cnfn asinpi(half2);
+half3 __ovld __cnfn asinpi(half3);
+half4 __ovld __cnfn asinpi(half4);
+half8 __ovld __cnfn asinpi(half8);
+half16 __ovld __cnfn asinpi(half16);
#endif //cl_khr_fp16
/**
* Arc tangent function.
*/
-float __ovld __cnfn atan(float y_over_x);
-float2 __ovld __cnfn atan(float2 y_over_x);
-float3 __ovld __cnfn atan(float3 y_over_x);
-float4 __ovld __cnfn atan(float4 y_over_x);
-float8 __ovld __cnfn atan(float8 y_over_x);
-float16 __ovld __cnfn atan(float16 y_over_x);
+float __ovld __cnfn atan(float);
+float2 __ovld __cnfn atan(float2);
+float3 __ovld __cnfn atan(float3);
+float4 __ovld __cnfn atan(float4);
+float8 __ovld __cnfn atan(float8);
+float16 __ovld __cnfn atan(float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn atan(double y_over_x);
-double2 __ovld __cnfn atan(double2 y_over_x);
-double3 __ovld __cnfn atan(double3 y_over_x);
-double4 __ovld __cnfn atan(double4 y_over_x);
-double8 __ovld __cnfn atan(double8 y_over_x);
-double16 __ovld __cnfn atan(double16 y_over_x);
+double __ovld __cnfn atan(double);
+double2 __ovld __cnfn atan(double2);
+double3 __ovld __cnfn atan(double3);
+double4 __ovld __cnfn atan(double4);
+double8 __ovld __cnfn atan(double8);
+double16 __ovld __cnfn atan(double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn atan(half y_over_x);
-half2 __ovld __cnfn atan(half2 y_over_x);
-half3 __ovld __cnfn atan(half3 y_over_x);
-half4 __ovld __cnfn atan(half4 y_over_x);
-half8 __ovld __cnfn atan(half8 y_over_x);
-half16 __ovld __cnfn atan(half16 y_over_x);
+half __ovld __cnfn atan(half);
+half2 __ovld __cnfn atan(half2);
+half3 __ovld __cnfn atan(half3);
+half4 __ovld __cnfn atan(half4);
+half8 __ovld __cnfn atan(half8);
+half16 __ovld __cnfn atan(half16);
#endif //cl_khr_fp16
/**
* Arc tangent of y / x.
*/
-float __ovld __cnfn atan2(float y, float x);
-float2 __ovld __cnfn atan2(float2 y, float2 x);
-float3 __ovld __cnfn atan2(float3 y, float3 x);
-float4 __ovld __cnfn atan2(float4 y, float4 x);
-float8 __ovld __cnfn atan2(float8 y, float8 x);
-float16 __ovld __cnfn atan2(float16 y, float16 x);
+float __ovld __cnfn atan2(float, float);
+float2 __ovld __cnfn atan2(float2, float2);
+float3 __ovld __cnfn atan2(float3, float3);
+float4 __ovld __cnfn atan2(float4, float4);
+float8 __ovld __cnfn atan2(float8, float8);
+float16 __ovld __cnfn atan2(float16, float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn atan2(double y, double x);
-double2 __ovld __cnfn atan2(double2 y, double2 x);
-double3 __ovld __cnfn atan2(double3 y, double3 x);
-double4 __ovld __cnfn atan2(double4 y, double4 x);
-double8 __ovld __cnfn atan2(double8 y, double8 x);
-double16 __ovld __cnfn atan2(double16 y, double16 x);
+double __ovld __cnfn atan2(double, double);
+double2 __ovld __cnfn atan2(double2, double2);
+double3 __ovld __cnfn atan2(double3, double3);
+double4 __ovld __cnfn atan2(double4, double4);
+double8 __ovld __cnfn atan2(double8, double8);
+double16 __ovld __cnfn atan2(double16, double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn atan2(half y, half x);
-half2 __ovld __cnfn atan2(half2 y, half2 x);
-half3 __ovld __cnfn atan2(half3 y, half3 x);
-half4 __ovld __cnfn atan2(half4 y, half4 x);
-half8 __ovld __cnfn atan2(half8 y, half8 x);
-half16 __ovld __cnfn atan2(half16 y, half16 x);
+half __ovld __cnfn atan2(half, half);
+half2 __ovld __cnfn atan2(half2, half2);
+half3 __ovld __cnfn atan2(half3, half3);
+half4 __ovld __cnfn atan2(half4, half4);
+half8 __ovld __cnfn atan2(half8, half8);
+half16 __ovld __cnfn atan2(half16, half16);
#endif //cl_khr_fp16
/**
@@ -6669,53 +6672,53 @@ half16 __ovld __cnfn atanh(half16);
/**
* Compute atan (x) / PI.
*/
-float __ovld __cnfn atanpi(float x);
-float2 __ovld __cnfn atanpi(float2 x);
-float3 __ovld __cnfn atanpi(float3 x);
-float4 __ovld __cnfn atanpi(float4 x);
-float8 __ovld __cnfn atanpi(float8 x);
-float16 __ovld __cnfn atanpi(float16 x);
+float __ovld __cnfn atanpi(float);
+float2 __ovld __cnfn atanpi(float2);
+float3 __ovld __cnfn atanpi(float3);
+float4 __ovld __cnfn atanpi(float4);
+float8 __ovld __cnfn atanpi(float8);
+float16 __ovld __cnfn atanpi(float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn atanpi(double x);
-double2 __ovld __cnfn atanpi(double2 x);
-double3 __ovld __cnfn atanpi(double3 x);
-double4 __ovld __cnfn atanpi(double4 x);
-double8 __ovld __cnfn atanpi(double8 x);
-double16 __ovld __cnfn atanpi(double16 x);
+double __ovld __cnfn atanpi(double);
+double2 __ovld __cnfn atanpi(double2);
+double3 __ovld __cnfn atanpi(double3);
+double4 __ovld __cnfn atanpi(double4);
+double8 __ovld __cnfn atanpi(double8);
+double16 __ovld __cnfn atanpi(double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn atanpi(half x);
-half2 __ovld __cnfn atanpi(half2 x);
-half3 __ovld __cnfn atanpi(half3 x);
-half4 __ovld __cnfn atanpi(half4 x);
-half8 __ovld __cnfn atanpi(half8 x);
-half16 __ovld __cnfn atanpi(half16 x);
+half __ovld __cnfn atanpi(half);
+half2 __ovld __cnfn atanpi(half2);
+half3 __ovld __cnfn atanpi(half3);
+half4 __ovld __cnfn atanpi(half4);
+half8 __ovld __cnfn atanpi(half8);
+half16 __ovld __cnfn atanpi(half16);
#endif //cl_khr_fp16
/**
* Compute atan2 (y, x) / PI.
*/
-float __ovld __cnfn atan2pi(float y, float x);
-float2 __ovld __cnfn atan2pi(float2 y, float2 x);
-float3 __ovld __cnfn atan2pi(float3 y, float3 x);
-float4 __ovld __cnfn atan2pi(float4 y, float4 x);
-float8 __ovld __cnfn atan2pi(float8 y, float8 x);
-float16 __ovld __cnfn atan2pi(float16 y, float16 x);
+float __ovld __cnfn atan2pi(float, float);
+float2 __ovld __cnfn atan2pi(float2, float2);
+float3 __ovld __cnfn atan2pi(float3, float3);
+float4 __ovld __cnfn atan2pi(float4, float4);
+float8 __ovld __cnfn atan2pi(float8, float8);
+float16 __ovld __cnfn atan2pi(float16, float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn atan2pi(double y, double x);
-double2 __ovld __cnfn atan2pi(double2 y, double2 x);
-double3 __ovld __cnfn atan2pi(double3 y, double3 x);
-double4 __ovld __cnfn atan2pi(double4 y, double4 x);
-double8 __ovld __cnfn atan2pi(double8 y, double8 x);
-double16 __ovld __cnfn atan2pi(double16 y, double16 x);
+double __ovld __cnfn atan2pi(double, double);
+double2 __ovld __cnfn atan2pi(double2, double2);
+double3 __ovld __cnfn atan2pi(double3, double3);
+double4 __ovld __cnfn atan2pi(double4, double4);
+double8 __ovld __cnfn atan2pi(double8, double8);
+double16 __ovld __cnfn atan2pi(double16, double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn atan2pi(half y, half x);
-half2 __ovld __cnfn atan2pi(half2 y, half2 x);
-half3 __ovld __cnfn atan2pi(half3 y, half3 x);
-half4 __ovld __cnfn atan2pi(half4 y, half4 x);
-half8 __ovld __cnfn atan2pi(half8 y, half8 x);
-half16 __ovld __cnfn atan2pi(half16 y, half16 x);
+half __ovld __cnfn atan2pi(half, half);
+half2 __ovld __cnfn atan2pi(half2, half2);
+half3 __ovld __cnfn atan2pi(half3, half3);
+half4 __ovld __cnfn atan2pi(half4, half4);
+half8 __ovld __cnfn atan2pi(half8, half8);
+half16 __ovld __cnfn atan2pi(half16, half16);
#endif //cl_khr_fp16
/**
@@ -6774,27 +6777,27 @@ half16 __ovld __cnfn ceil(half16);
/**
* Returns x with its sign changed to match the sign of y.
*/
-float __ovld __cnfn copysign(float x, float y);
-float2 __ovld __cnfn copysign(float2 x, float2 y);
-float3 __ovld __cnfn copysign(float3 x, float3 y);
-float4 __ovld __cnfn copysign(float4 x, float4 y);
-float8 __ovld __cnfn copysign(float8 x, float8 y);
-float16 __ovld __cnfn copysign(float16 x, float16 y);
+float __ovld __cnfn copysign(float, float);
+float2 __ovld __cnfn copysign(float2, float2);
+float3 __ovld __cnfn copysign(float3, float3);
+float4 __ovld __cnfn copysign(float4, float4);
+float8 __ovld __cnfn copysign(float8, float8);
+float16 __ovld __cnfn copysign(float16, float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn copysign(double x, double y);
-double2 __ovld __cnfn copysign(double2 x, double2 y);
-double3 __ovld __cnfn copysign(double3 x, double3 y);
-double4 __ovld __cnfn copysign(double4 x, double4 y);
-double8 __ovld __cnfn copysign(double8 x, double8 y);
-double16 __ovld __cnfn copysign(double16 x, double16 y);
+double __ovld __cnfn copysign(double, double);
+double2 __ovld __cnfn copysign(double2, double2);
+double3 __ovld __cnfn copysign(double3, double3);
+double4 __ovld __cnfn copysign(double4, double4);
+double8 __ovld __cnfn copysign(double8, double8);
+double16 __ovld __cnfn copysign(double16, double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn copysign(half x, half y);
-half2 __ovld __cnfn copysign(half2 x, half2 y);
-half3 __ovld __cnfn copysign(half3 x, half3 y);
-half4 __ovld __cnfn copysign(half4 x, half4 y);
-half8 __ovld __cnfn copysign(half8 x, half8 y);
-half16 __ovld __cnfn copysign(half16 x, half16 y);
+half __ovld __cnfn copysign(half, half);
+half2 __ovld __cnfn copysign(half2, half2);
+half3 __ovld __cnfn copysign(half3, half3);
+half4 __ovld __cnfn copysign(half4, half4);
+half8 __ovld __cnfn copysign(half8, half8);
+half16 __ovld __cnfn copysign(half16, half16);
#endif //cl_khr_fp16
/**
@@ -6852,27 +6855,27 @@ half16 __ovld __cnfn cosh(half16);
/**
* Compute cos (PI * x).
*/
-float __ovld __cnfn cospi(float x);
-float2 __ovld __cnfn cospi(float2 x);
-float3 __ovld __cnfn cospi(float3 x);
-float4 __ovld __cnfn cospi(float4 x);
-float8 __ovld __cnfn cospi(float8 x);
-float16 __ovld __cnfn cospi(float16 x);
+float __ovld __cnfn cospi(float);
+float2 __ovld __cnfn cospi(float2);
+float3 __ovld __cnfn cospi(float3);
+float4 __ovld __cnfn cospi(float4);
+float8 __ovld __cnfn cospi(float8);
+float16 __ovld __cnfn cospi(float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn cospi(double x);
-double2 __ovld __cnfn cospi(double2 x);
-double3 __ovld __cnfn cospi(double3 x);
-double4 __ovld __cnfn cospi(double4 x);
-double8 __ovld __cnfn cospi(double8 x);
-double16 __ovld __cnfn cospi(double16 x);
+double __ovld __cnfn cospi(double);
+double2 __ovld __cnfn cospi(double2);
+double3 __ovld __cnfn cospi(double3);
+double4 __ovld __cnfn cospi(double4);
+double8 __ovld __cnfn cospi(double8);
+double16 __ovld __cnfn cospi(double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn cospi(half x);
-half2 __ovld __cnfn cospi(half2 x);
-half3 __ovld __cnfn cospi(half3 x);
-half4 __ovld __cnfn cospi(half4 x);
-half8 __ovld __cnfn cospi(half8 x);
-half16 __ovld __cnfn cospi(half16 x);
+half __ovld __cnfn cospi(half);
+half2 __ovld __cnfn cospi(half2);
+half3 __ovld __cnfn cospi(half3);
+half4 __ovld __cnfn cospi(half4);
+half8 __ovld __cnfn cospi(half8);
+half16 __ovld __cnfn cospi(half16);
#endif //cl_khr_fp16
/**
@@ -6931,27 +6934,27 @@ half16 __ovld __cnfn erf(half16);
/**
* Compute the base e exponential function of x.
*/
-float __ovld __cnfn exp(float x);
-float2 __ovld __cnfn exp(float2 x);
-float3 __ovld __cnfn exp(float3 x);
-float4 __ovld __cnfn exp(float4 x);
-float8 __ovld __cnfn exp(float8 x);
-float16 __ovld __cnfn exp(float16 x);
+float __ovld __cnfn exp(float);
+float2 __ovld __cnfn exp(float2);
+float3 __ovld __cnfn exp(float3);
+float4 __ovld __cnfn exp(float4);
+float8 __ovld __cnfn exp(float8);
+float16 __ovld __cnfn exp(float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn exp(double x);
-double2 __ovld __cnfn exp(double2 x);
-double3 __ovld __cnfn exp(double3 x);
-double4 __ovld __cnfn exp(double4 x);
-double8 __ovld __cnfn exp(double8 x);
-double16 __ovld __cnfn exp(double16 x);
+double __ovld __cnfn exp(double);
+double2 __ovld __cnfn exp(double2);
+double3 __ovld __cnfn exp(double3);
+double4 __ovld __cnfn exp(double4);
+double8 __ovld __cnfn exp(double8);
+double16 __ovld __cnfn exp(double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn exp(half x);
-half2 __ovld __cnfn exp(half2 x);
-half3 __ovld __cnfn exp(half3 x);
-half4 __ovld __cnfn exp(half4 x);
-half8 __ovld __cnfn exp(half8 x);
-half16 __ovld __cnfn exp(half16 x);
+half __ovld __cnfn exp(half);
+half2 __ovld __cnfn exp(half2);
+half3 __ovld __cnfn exp(half3);
+half4 __ovld __cnfn exp(half4);
+half8 __ovld __cnfn exp(half8);
+half16 __ovld __cnfn exp(half16);
#endif //cl_khr_fp16
/**
@@ -7009,27 +7012,27 @@ half16 __ovld __cnfn exp10(half16);
/**
* Compute e^x- 1.0.
*/
-float __ovld __cnfn expm1(float x);
-float2 __ovld __cnfn expm1(float2 x);
-float3 __ovld __cnfn expm1(float3 x);
-float4 __ovld __cnfn expm1(float4 x);
-float8 __ovld __cnfn expm1(float8 x);
-float16 __ovld __cnfn expm1(float16 x);
+float __ovld __cnfn expm1(float);
+float2 __ovld __cnfn expm1(float2);
+float3 __ovld __cnfn expm1(float3);
+float4 __ovld __cnfn expm1(float4);
+float8 __ovld __cnfn expm1(float8);
+float16 __ovld __cnfn expm1(float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn expm1(double x);
-double2 __ovld __cnfn expm1(double2 x);
-double3 __ovld __cnfn expm1(double3 x);
-double4 __ovld __cnfn expm1(double4 x);
-double8 __ovld __cnfn expm1(double8 x);
-double16 __ovld __cnfn expm1(double16 x);
+double __ovld __cnfn expm1(double);
+double2 __ovld __cnfn expm1(double2);
+double3 __ovld __cnfn expm1(double3);
+double4 __ovld __cnfn expm1(double4);
+double8 __ovld __cnfn expm1(double8);
+double16 __ovld __cnfn expm1(double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn expm1(half x);
-half2 __ovld __cnfn expm1(half2 x);
-half3 __ovld __cnfn expm1(half3 x);
-half4 __ovld __cnfn expm1(half4 x);
-half8 __ovld __cnfn expm1(half8 x);
-half16 __ovld __cnfn expm1(half16 x);
+half __ovld __cnfn expm1(half);
+half2 __ovld __cnfn expm1(half2);
+half3 __ovld __cnfn expm1(half3);
+half4 __ovld __cnfn expm1(half4);
+half8 __ovld __cnfn expm1(half8);
+half16 __ovld __cnfn expm1(half16);
#endif //cl_khr_fp16
/**
@@ -7061,27 +7064,27 @@ half16 __ovld __cnfn fabs(half16);
/**
* x - y if x > y, +0 if x is less than or equal to y.
*/
-float __ovld __cnfn fdim(float x, float y);
-float2 __ovld __cnfn fdim(float2 x, float2 y);
-float3 __ovld __cnfn fdim(float3 x, float3 y);
-float4 __ovld __cnfn fdim(float4 x, float4 y);
-float8 __ovld __cnfn fdim(float8 x, float8 y);
-float16 __ovld __cnfn fdim(float16 x, float16 y);
+float __ovld __cnfn fdim(float, float);
+float2 __ovld __cnfn fdim(float2, float2);
+float3 __ovld __cnfn fdim(float3, float3);
+float4 __ovld __cnfn fdim(float4, float4);
+float8 __ovld __cnfn fdim(float8, float8);
+float16 __ovld __cnfn fdim(float16, float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn fdim(double x, double y);
-double2 __ovld __cnfn fdim(double2 x, double2 y);
-double3 __ovld __cnfn fdim(double3 x, double3 y);
-double4 __ovld __cnfn fdim(double4 x, double4 y);
-double8 __ovld __cnfn fdim(double8 x, double8 y);
-double16 __ovld __cnfn fdim(double16 x, double16 y);
+double __ovld __cnfn fdim(double, double);
+double2 __ovld __cnfn fdim(double2, double2);
+double3 __ovld __cnfn fdim(double3, double3);
+double4 __ovld __cnfn fdim(double4, double4);
+double8 __ovld __cnfn fdim(double8, double8);
+double16 __ovld __cnfn fdim(double16, double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn fdim(half x, half y);
-half2 __ovld __cnfn fdim(half2 x, half2 y);
-half3 __ovld __cnfn fdim(half3 x, half3 y);
-half4 __ovld __cnfn fdim(half4 x, half4 y);
-half8 __ovld __cnfn fdim(half8 x, half8 y);
-half16 __ovld __cnfn fdim(half16 x, half16 y);
+half __ovld __cnfn fdim(half, half);
+half2 __ovld __cnfn fdim(half2, half2);
+half3 __ovld __cnfn fdim(half3, half3);
+half4 __ovld __cnfn fdim(half4, half4);
+half8 __ovld __cnfn fdim(half8, half8);
+half16 __ovld __cnfn fdim(half16, half16);
#endif //cl_khr_fp16
/**
@@ -7118,27 +7121,27 @@ half16 __ovld __cnfn floor(half16);
* intermediate products shall not occur. Edge case
* behavior is per the IEEE 754-2008 standard.
*/
-float __ovld __cnfn fma(float a, float b, float c);
-float2 __ovld __cnfn fma(float2 a, float2 b, float2 c);
-float3 __ovld __cnfn fma(float3 a, float3 b, float3 c);
-float4 __ovld __cnfn fma(float4 a, float4 b, float4 c);
-float8 __ovld __cnfn fma(float8 a, float8 b, float8 c);
-float16 __ovld __cnfn fma(float16 a, float16 b, float16 c);
+float __ovld __cnfn fma(float, float, float);
+float2 __ovld __cnfn fma(float2, float2, float2);
+float3 __ovld __cnfn fma(float3, float3, float3);
+float4 __ovld __cnfn fma(float4, float4, float4);
+float8 __ovld __cnfn fma(float8, float8, float8);
+float16 __ovld __cnfn fma(float16, float16, float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn fma(double a, double b, double c);
-double2 __ovld __cnfn fma(double2 a, double2 b, double2 c);
-double3 __ovld __cnfn fma(double3 a, double3 b, double3 c);
-double4 __ovld __cnfn fma(double4 a, double4 b, double4 c);
-double8 __ovld __cnfn fma(double8 a, double8 b, double8 c);
-double16 __ovld __cnfn fma(double16 a, double16 b, double16 c);
+double __ovld __cnfn fma(double, double, double);
+double2 __ovld __cnfn fma(double2, double2, double2);
+double3 __ovld __cnfn fma(double3, double3, double3);
+double4 __ovld __cnfn fma(double4, double4, double4);
+double8 __ovld __cnfn fma(double8, double8, double8);
+double16 __ovld __cnfn fma(double16, double16, double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn fma(half a, half b, half c);
-half2 __ovld __cnfn fma(half2 a, half2 b, half2 c);
-half3 __ovld __cnfn fma(half3 a, half3 b, half3 c);
-half4 __ovld __cnfn fma(half4 a, half4 b, half4 c);
-half8 __ovld __cnfn fma(half8 a, half8 b, half8 c);
-half16 __ovld __cnfn fma(half16 a, half16 b, half16 c);
+half __ovld __cnfn fma(half, half, half);
+half2 __ovld __cnfn fma(half2, half2, half2);
+half3 __ovld __cnfn fma(half3, half3, half3);
+half4 __ovld __cnfn fma(half4, half4, half4);
+half8 __ovld __cnfn fma(half8, half8, half8);
+half16 __ovld __cnfn fma(half16, half16, half16);
#endif //cl_khr_fp16
/**
@@ -7147,42 +7150,42 @@ half16 __ovld __cnfn fma(half16 a, half16 b, half16 c);
* argument. If both arguments are NaNs, fmax()
* returns a NaN.
*/
-float __ovld __cnfn fmax(float x, float y);
-float2 __ovld __cnfn fmax(float2 x, float2 y);
-float3 __ovld __cnfn fmax(float3 x, float3 y);
-float4 __ovld __cnfn fmax(float4 x, float4 y);
-float8 __ovld __cnfn fmax(float8 x, float8 y);
-float16 __ovld __cnfn fmax(float16 x, float16 y);
-float2 __ovld __cnfn fmax(float2 x, float y);
-float3 __ovld __cnfn fmax(float3 x, float y);
-float4 __ovld __cnfn fmax(float4 x, float y);
-float8 __ovld __cnfn fmax(float8 x, float y);
-float16 __ovld __cnfn fmax(float16 x, float y);
+float __ovld __cnfn fmax(float, float);
+float2 __ovld __cnfn fmax(float2, float2);
+float3 __ovld __cnfn fmax(float3, float3);
+float4 __ovld __cnfn fmax(float4, float4);
+float8 __ovld __cnfn fmax(float8, float8);
+float16 __ovld __cnfn fmax(float16, float16);
+float2 __ovld __cnfn fmax(float2, float);
+float3 __ovld __cnfn fmax(float3, float);
+float4 __ovld __cnfn fmax(float4, float);
+float8 __ovld __cnfn fmax(float8, float);
+float16 __ovld __cnfn fmax(float16, float);
#ifdef cl_khr_fp64
-double __ovld __cnfn fmax(double x, double y);
-double2 __ovld __cnfn fmax(double2 x, double2 y);
-double3 __ovld __cnfn fmax(double3 x, double3 y);
-double4 __ovld __cnfn fmax(double4 x, double4 y);
-double8 __ovld __cnfn fmax(double8 x, double8 y);
-double16 __ovld __cnfn fmax(double16 x, double16 y);
-double2 __ovld __cnfn fmax(double2 x, double y);
-double3 __ovld __cnfn fmax(double3 x, double y);
-double4 __ovld __cnfn fmax(double4 x, double y);
-double8 __ovld __cnfn fmax(double8 x, double y);
-double16 __ovld __cnfn fmax(double16 x, double y);
+double __ovld __cnfn fmax(double, double);
+double2 __ovld __cnfn fmax(double2, double2);
+double3 __ovld __cnfn fmax(double3, double3);
+double4 __ovld __cnfn fmax(double4, double4);
+double8 __ovld __cnfn fmax(double8, double8);
+double16 __ovld __cnfn fmax(double16, double16);
+double2 __ovld __cnfn fmax(double2, double);
+double3 __ovld __cnfn fmax(double3, double);
+double4 __ovld __cnfn fmax(double4, double);
+double8 __ovld __cnfn fmax(double8, double);
+double16 __ovld __cnfn fmax(double16, double);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn fmax(half x, half y);
-half2 __ovld __cnfn fmax(half2 x, half2 y);
-half3 __ovld __cnfn fmax(half3 x, half3 y);
-half4 __ovld __cnfn fmax(half4 x, half4 y);
-half8 __ovld __cnfn fmax(half8 x, half8 y);
-half16 __ovld __cnfn fmax(half16 x, half16 y);
-half2 __ovld __cnfn fmax(half2 x, half y);
-half3 __ovld __cnfn fmax(half3 x, half y);
-half4 __ovld __cnfn fmax(half4 x, half y);
-half8 __ovld __cnfn fmax(half8 x, half y);
-half16 __ovld __cnfn fmax(half16 x, half y);
+half __ovld __cnfn fmax(half, half);
+half2 __ovld __cnfn fmax(half2, half2);
+half3 __ovld __cnfn fmax(half3, half3);
+half4 __ovld __cnfn fmax(half4, half4);
+half8 __ovld __cnfn fmax(half8, half8);
+half16 __ovld __cnfn fmax(half16, half16);
+half2 __ovld __cnfn fmax(half2, half);
+half3 __ovld __cnfn fmax(half3, half);
+half4 __ovld __cnfn fmax(half4, half);
+half8 __ovld __cnfn fmax(half8, half);
+half16 __ovld __cnfn fmax(half16, half);
#endif //cl_khr_fp16
/**
@@ -7191,68 +7194,68 @@ half16 __ovld __cnfn fmax(half16 x, half y);
* argument. If both arguments are NaNs, fmin()
* returns a NaN.
*/
-float __ovld __cnfn fmin(float x, float y);
-float2 __ovld __cnfn fmin(float2 x, float2 y);
-float3 __ovld __cnfn fmin(float3 x, float3 y);
-float4 __ovld __cnfn fmin(float4 x, float4 y);
-float8 __ovld __cnfn fmin(float8 x, float8 y);
-float16 __ovld __cnfn fmin(float16 x, float16 y);
-float2 __ovld __cnfn fmin(float2 x, float y);
-float3 __ovld __cnfn fmin(float3 x, float y);
-float4 __ovld __cnfn fmin(float4 x, float y);
-float8 __ovld __cnfn fmin(float8 x, float y);
-float16 __ovld __cnfn fmin(float16 x, float y);
+float __ovld __cnfn fmin(float, float);
+float2 __ovld __cnfn fmin(float2, float2);
+float3 __ovld __cnfn fmin(float3, float3);
+float4 __ovld __cnfn fmin(float4, float4);
+float8 __ovld __cnfn fmin(float8, float8);
+float16 __ovld __cnfn fmin(float16, float16);
+float2 __ovld __cnfn fmin(float2, float);
+float3 __ovld __cnfn fmin(float3, float);
+float4 __ovld __cnfn fmin(float4, float);
+float8 __ovld __cnfn fmin(float8, float);
+float16 __ovld __cnfn fmin(float16, float);
#ifdef cl_khr_fp64
-double __ovld __cnfn fmin(double x, double y);
-double2 __ovld __cnfn fmin(double2 x, double2 y);
-double3 __ovld __cnfn fmin(double3 x, double3 y);
-double4 __ovld __cnfn fmin(double4 x, double4 y);
-double8 __ovld __cnfn fmin(double8 x, double8 y);
-double16 __ovld __cnfn fmin(double16 x, double16 y);
-double2 __ovld __cnfn fmin(double2 x, double y);
-double3 __ovld __cnfn fmin(double3 x, double y);
-double4 __ovld __cnfn fmin(double4 x, double y);
-double8 __ovld __cnfn fmin(double8 x, double y);
-double16 __ovld __cnfn fmin(double16 x, double y);
+double __ovld __cnfn fmin(double, double);
+double2 __ovld __cnfn fmin(double2, double2);
+double3 __ovld __cnfn fmin(double3, double3);
+double4 __ovld __cnfn fmin(double4, double4);
+double8 __ovld __cnfn fmin(double8, double8);
+double16 __ovld __cnfn fmin(double16, double16);
+double2 __ovld __cnfn fmin(double2, double);
+double3 __ovld __cnfn fmin(double3, double);
+double4 __ovld __cnfn fmin(double4, double);
+double8 __ovld __cnfn fmin(double8, double);
+double16 __ovld __cnfn fmin(double16, double);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn fmin(half x, half y);
-half2 __ovld __cnfn fmin(half2 x, half2 y);
-half3 __ovld __cnfn fmin(half3 x, half3 y);
-half4 __ovld __cnfn fmin(half4 x, half4 y);
-half8 __ovld __cnfn fmin(half8 x, half8 y);
-half16 __ovld __cnfn fmin(half16 x, half16 y);
-half2 __ovld __cnfn fmin(half2 x, half y);
-half3 __ovld __cnfn fmin(half3 x, half y);
-half4 __ovld __cnfn fmin(half4 x, half y);
-half8 __ovld __cnfn fmin(half8 x, half y);
-half16 __ovld __cnfn fmin(half16 x, half y);
+half __ovld __cnfn fmin(half, half);
+half2 __ovld __cnfn fmin(half2, half2);
+half3 __ovld __cnfn fmin(half3, half3);
+half4 __ovld __cnfn fmin(half4, half4);
+half8 __ovld __cnfn fmin(half8, half8);
+half16 __ovld __cnfn fmin(half16, half16);
+half2 __ovld __cnfn fmin(half2, half);
+half3 __ovld __cnfn fmin(half3, half);
+half4 __ovld __cnfn fmin(half4, half);
+half8 __ovld __cnfn fmin(half8, half);
+half16 __ovld __cnfn fmin(half16, half);
#endif //cl_khr_fp16
/**
* Modulus. Returns x - y * trunc (x/y).
*/
-float __ovld __cnfn fmod(float x, float y);
-float2 __ovld __cnfn fmod(float2 x, float2 y);
-float3 __ovld __cnfn fmod(float3 x, float3 y);
-float4 __ovld __cnfn fmod(float4 x, float4 y);
-float8 __ovld __cnfn fmod(float8 x, float8 y);
-float16 __ovld __cnfn fmod(float16 x, float16 y);
+float __ovld __cnfn fmod(float, float);
+float2 __ovld __cnfn fmod(float2, float2);
+float3 __ovld __cnfn fmod(float3, float3);
+float4 __ovld __cnfn fmod(float4, float4);
+float8 __ovld __cnfn fmod(float8, float8);
+float16 __ovld __cnfn fmod(float16, float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn fmod(double x, double y);
-double2 __ovld __cnfn fmod(double2 x, double2 y);
-double3 __ovld __cnfn fmod(double3 x, double3 y);
-double4 __ovld __cnfn fmod(double4 x, double4 y);
-double8 __ovld __cnfn fmod(double8 x, double8 y);
-double16 __ovld __cnfn fmod(double16 x, double16 y);
+double __ovld __cnfn fmod(double, double);
+double2 __ovld __cnfn fmod(double2, double2);
+double3 __ovld __cnfn fmod(double3, double3);
+double4 __ovld __cnfn fmod(double4, double4);
+double8 __ovld __cnfn fmod(double8, double8);
+double16 __ovld __cnfn fmod(double16, double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn fmod(half x, half y);
-half2 __ovld __cnfn fmod(half2 x, half2 y);
-half3 __ovld __cnfn fmod(half3 x, half3 y);
-half4 __ovld __cnfn fmod(half4 x, half4 y);
-half8 __ovld __cnfn fmod(half8 x, half8 y);
-half16 __ovld __cnfn fmod(half16 x, half16 y);
+half __ovld __cnfn fmod(half, half);
+half2 __ovld __cnfn fmod(half2, half2);
+half3 __ovld __cnfn fmod(half3, half3);
+half4 __ovld __cnfn fmod(half4, half4);
+half8 __ovld __cnfn fmod(half8, half8);
+half16 __ovld __cnfn fmod(half16, half16);
#endif //cl_khr_fp16
/**
@@ -7260,88 +7263,90 @@ half16 __ovld __cnfn fmod(half16 x, half16 y);
* floor(x) is returned in iptr.
*/
#if defined(__opencl_c_generic_address_space)
-float __ovld fract(float x, float *iptr);
-float2 __ovld fract(float2 x, float2 *iptr);
-float3 __ovld fract(float3 x, float3 *iptr);
-float4 __ovld fract(float4 x, float4 *iptr);
-float8 __ovld fract(float8 x, float8 *iptr);
-float16 __ovld fract(float16 x, float16 *iptr);
+float __ovld fract(float, float *);
+float2 __ovld fract(float2, float2 *);
+float3 __ovld fract(float3, float3 *);
+float4 __ovld fract(float4, float4 *);
+float8 __ovld fract(float8, float8 *);
+float16 __ovld fract(float16, float16 *);
#ifdef cl_khr_fp64
-double __ovld fract(double x, double *iptr);
-double2 __ovld fract(double2 x, double2 *iptr);
-double3 __ovld fract(double3 x, double3 *iptr);
-double4 __ovld fract(double4 x, double4 *iptr);
-double8 __ovld fract(double8 x, double8 *iptr);
-double16 __ovld fract(double16 x, double16 *iptr);
+double __ovld fract(double, double *);
+double2 __ovld fract(double2, double2 *);
+double3 __ovld fract(double3, double3 *);
+double4 __ovld fract(double4, double4 *);
+double8 __ovld fract(double8, double8 *);
+double16 __ovld fract(double16, double16 *);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld fract(half x, half *iptr);
-half2 __ovld fract(half2 x, half2 *iptr);
-half3 __ovld fract(half3 x, half3 *iptr);
-half4 __ovld fract(half4 x, half4 *iptr);
-half8 __ovld fract(half8 x, half8 *iptr);
-half16 __ovld fract(half16 x, half16 *iptr);
+half __ovld fract(half, half *);
+half2 __ovld fract(half2, half2 *);
+half3 __ovld fract(half3, half3 *);
+half4 __ovld fract(half4, half4 *);
+half8 __ovld fract(half8, half8 *);
+half16 __ovld fract(half16, half16 *);
#endif //cl_khr_fp16
-#else
-float __ovld fract(float x, __global float *iptr);
-float2 __ovld fract(float2 x, __global float2 *iptr);
-float3 __ovld fract(float3 x, __global float3 *iptr);
-float4 __ovld fract(float4 x, __global float4 *iptr);
-float8 __ovld fract(float8 x, __global float8 *iptr);
-float16 __ovld fract(float16 x, __global float16 *iptr);
-float __ovld fract(float x, __local float *iptr);
-float2 __ovld fract(float2 x, __local float2 *iptr);
-float3 __ovld fract(float3 x, __local float3 *iptr);
-float4 __ovld fract(float4 x, __local float4 *iptr);
-float8 __ovld fract(float8 x, __local float8 *iptr);
-float16 __ovld fract(float16 x, __local float16 *iptr);
-float __ovld fract(float x, __private float *iptr);
-float2 __ovld fract(float2 x, __private float2 *iptr);
-float3 __ovld fract(float3 x, __private float3 *iptr);
-float4 __ovld fract(float4 x, __private float4 *iptr);
-float8 __ovld fract(float8 x, __private float8 *iptr);
-float16 __ovld fract(float16 x, __private float16 *iptr);
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
+float __ovld fract(float, __global float *);
+float2 __ovld fract(float2, __global float2 *);
+float3 __ovld fract(float3, __global float3 *);
+float4 __ovld fract(float4, __global float4 *);
+float8 __ovld fract(float8, __global float8 *);
+float16 __ovld fract(float16, __global float16 *);
+float __ovld fract(float, __local float *);
+float2 __ovld fract(float2, __local float2 *);
+float3 __ovld fract(float3, __local float3 *);
+float4 __ovld fract(float4, __local float4 *);
+float8 __ovld fract(float8, __local float8 *);
+float16 __ovld fract(float16, __local float16 *);
+float __ovld fract(float, __private float *);
+float2 __ovld fract(float2, __private float2 *);
+float3 __ovld fract(float3, __private float3 *);
+float4 __ovld fract(float4, __private float4 *);
+float8 __ovld fract(float8, __private float8 *);
+float16 __ovld fract(float16, __private float16 *);
#ifdef cl_khr_fp64
-double __ovld fract(double x, __global double *iptr);
-double2 __ovld fract(double2 x, __global double2 *iptr);
-double3 __ovld fract(double3 x, __global double3 *iptr);
-double4 __ovld fract(double4 x, __global double4 *iptr);
-double8 __ovld fract(double8 x, __global double8 *iptr);
-double16 __ovld fract(double16 x, __global double16 *iptr);
-double __ovld fract(double x, __local double *iptr);
-double2 __ovld fract(double2 x, __local double2 *iptr);
-double3 __ovld fract(double3 x, __local double3 *iptr);
-double4 __ovld fract(double4 x, __local double4 *iptr);
-double8 __ovld fract(double8 x, __local double8 *iptr);
-double16 __ovld fract(double16 x, __local double16 *iptr);
-double __ovld fract(double x, __private double *iptr);
-double2 __ovld fract(double2 x, __private double2 *iptr);
-double3 __ovld fract(double3 x, __private double3 *iptr);
-double4 __ovld fract(double4 x, __private double4 *iptr);
-double8 __ovld fract(double8 x, __private double8 *iptr);
-double16 __ovld fract(double16 x, __private double16 *iptr);
+double __ovld fract(double, __global double *);
+double2 __ovld fract(double2, __global double2 *);
+double3 __ovld fract(double3, __global double3 *);
+double4 __ovld fract(double4, __global double4 *);
+double8 __ovld fract(double8, __global double8 *);
+double16 __ovld fract(double16, __global double16 *);
+double __ovld fract(double, __local double *);
+double2 __ovld fract(double2, __local double2 *);
+double3 __ovld fract(double3, __local double3 *);
+double4 __ovld fract(double4, __local double4 *);
+double8 __ovld fract(double8, __local double8 *);
+double16 __ovld fract(double16, __local double16 *);
+double __ovld fract(double, __private double *);
+double2 __ovld fract(double2, __private double2 *);
+double3 __ovld fract(double3, __private double3 *);
+double4 __ovld fract(double4, __private double4 *);
+double8 __ovld fract(double8, __private double8 *);
+double16 __ovld fract(double16, __private double16 *);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld fract(half x, __global half *iptr);
-half2 __ovld fract(half2 x, __global half2 *iptr);
-half3 __ovld fract(half3 x, __global half3 *iptr);
-half4 __ovld fract(half4 x, __global half4 *iptr);
-half8 __ovld fract(half8 x, __global half8 *iptr);
-half16 __ovld fract(half16 x, __global half16 *iptr);
-half __ovld fract(half x, __local half *iptr);
-half2 __ovld fract(half2 x, __local half2 *iptr);
-half3 __ovld fract(half3 x, __local half3 *iptr);
-half4 __ovld fract(half4 x, __local half4 *iptr);
-half8 __ovld fract(half8 x, __local half8 *iptr);
-half16 __ovld fract(half16 x, __local half16 *iptr);
-half __ovld fract(half x, __private half *iptr);
-half2 __ovld fract(half2 x, __private half2 *iptr);
-half3 __ovld fract(half3 x, __private half3 *iptr);
-half4 __ovld fract(half4 x, __private half4 *iptr);
-half8 __ovld fract(half8 x, __private half8 *iptr);
-half16 __ovld fract(half16 x, __private half16 *iptr);
+half __ovld fract(half, __global half *);
+half2 __ovld fract(half2, __global half2 *);
+half3 __ovld fract(half3, __global half3 *);
+half4 __ovld fract(half4, __global half4 *);
+half8 __ovld fract(half8, __global half8 *);
+half16 __ovld fract(half16, __global half16 *);
+half __ovld fract(half, __local half *);
+half2 __ovld fract(half2, __local half2 *);
+half3 __ovld fract(half3, __local half3 *);
+half4 __ovld fract(half4, __local half4 *);
+half8 __ovld fract(half8, __local half8 *);
+half16 __ovld fract(half16, __local half16 *);
+half __ovld fract(half, __private half *);
+half2 __ovld fract(half2, __private half2 *);
+half3 __ovld fract(half3, __private half3 *);
+half4 __ovld fract(half4, __private half4 *);
+half8 __ovld fract(half8, __private half8 *);
+half16 __ovld fract(half16, __private half16 *);
#endif //cl_khr_fp16
-#endif //defined(__opencl_c_generic_address_space)
+#endif //defined(__opencl_c_named_address_space_builtins)
/**
* Extract mantissa and exponent from x. For each
@@ -7350,181 +7355,183 @@ half16 __ovld fract(half16 x, __private half16 *iptr);
* component of x equals mantissa returned * 2^exp.
*/
#if defined(__opencl_c_generic_address_space)
-float __ovld frexp(float x, int *exp);
-float2 __ovld frexp(float2 x, int2 *exp);
-float3 __ovld frexp(float3 x, int3 *exp);
-float4 __ovld frexp(float4 x, int4 *exp);
-float8 __ovld frexp(float8 x, int8 *exp);
-float16 __ovld frexp(float16 x, int16 *exp);
+float __ovld frexp(float, int *);
+float2 __ovld frexp(float2, int2 *);
+float3 __ovld frexp(float3, int3 *);
+float4 __ovld frexp(float4, int4 *);
+float8 __ovld frexp(float8, int8 *);
+float16 __ovld frexp(float16, int16 *);
#ifdef cl_khr_fp64
-double __ovld frexp(double x, int *exp);
-double2 __ovld frexp(double2 x, int2 *exp);
-double3 __ovld frexp(double3 x, int3 *exp);
-double4 __ovld frexp(double4 x, int4 *exp);
-double8 __ovld frexp(double8 x, int8 *exp);
-double16 __ovld frexp(double16 x, int16 *exp);
+double __ovld frexp(double, int *);
+double2 __ovld frexp(double2, int2 *);
+double3 __ovld frexp(double3, int3 *);
+double4 __ovld frexp(double4, int4 *);
+double8 __ovld frexp(double8, int8 *);
+double16 __ovld frexp(double16, int16 *);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld frexp(half x, int *exp);
-half2 __ovld frexp(half2 x, int2 *exp);
-half3 __ovld frexp(half3 x, int3 *exp);
-half4 __ovld frexp(half4 x, int4 *exp);
-half8 __ovld frexp(half8 x, int8 *exp);
-half16 __ovld frexp(half16 x, int16 *exp);
+half __ovld frexp(half, int *);
+half2 __ovld frexp(half2, int2 *);
+half3 __ovld frexp(half3, int3 *);
+half4 __ovld frexp(half4, int4 *);
+half8 __ovld frexp(half8, int8 *);
+half16 __ovld frexp(half16, int16 *);
#endif //cl_khr_fp16
-#else
-float __ovld frexp(float x, __global int *exp);
-float2 __ovld frexp(float2 x, __global int2 *exp);
-float3 __ovld frexp(float3 x, __global int3 *exp);
-float4 __ovld frexp(float4 x, __global int4 *exp);
-float8 __ovld frexp(float8 x, __global int8 *exp);
-float16 __ovld frexp(float16 x, __global int16 *exp);
-float __ovld frexp(float x, __local int *exp);
-float2 __ovld frexp(float2 x, __local int2 *exp);
-float3 __ovld frexp(float3 x, __local int3 *exp);
-float4 __ovld frexp(float4 x, __local int4 *exp);
-float8 __ovld frexp(float8 x, __local int8 *exp);
-float16 __ovld frexp(float16 x, __local int16 *exp);
-float __ovld frexp(float x, __private int *exp);
-float2 __ovld frexp(float2 x, __private int2 *exp);
-float3 __ovld frexp(float3 x, __private int3 *exp);
-float4 __ovld frexp(float4 x, __private int4 *exp);
-float8 __ovld frexp(float8 x, __private int8 *exp);
-float16 __ovld frexp(float16 x, __private int16 *exp);
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
+float __ovld frexp(float, __global int *);
+float2 __ovld frexp(float2, __global int2 *);
+float3 __ovld frexp(float3, __global int3 *);
+float4 __ovld frexp(float4, __global int4 *);
+float8 __ovld frexp(float8, __global int8 *);
+float16 __ovld frexp(float16, __global int16 *);
+float __ovld frexp(float, __local int *);
+float2 __ovld frexp(float2, __local int2 *);
+float3 __ovld frexp(float3, __local int3 *);
+float4 __ovld frexp(float4, __local int4 *);
+float8 __ovld frexp(float8, __local int8 *);
+float16 __ovld frexp(float16, __local int16 *);
+float __ovld frexp(float, __private int *);
+float2 __ovld frexp(float2, __private int2 *);
+float3 __ovld frexp(float3, __private int3 *);
+float4 __ovld frexp(float4, __private int4 *);
+float8 __ovld frexp(float8, __private int8 *);
+float16 __ovld frexp(float16, __private int16 *);
#ifdef cl_khr_fp64
-double __ovld frexp(double x, __global int *exp);
-double2 __ovld frexp(double2 x, __global int2 *exp);
-double3 __ovld frexp(double3 x, __global int3 *exp);
-double4 __ovld frexp(double4 x, __global int4 *exp);
-double8 __ovld frexp(double8 x, __global int8 *exp);
-double16 __ovld frexp(double16 x, __global int16 *exp);
-double __ovld frexp(double x, __local int *exp);
-double2 __ovld frexp(double2 x, __local int2 *exp);
-double3 __ovld frexp(double3 x, __local int3 *exp);
-double4 __ovld frexp(double4 x, __local int4 *exp);
-double8 __ovld frexp(double8 x, __local int8 *exp);
-double16 __ovld frexp(double16 x, __local int16 *exp);
-double __ovld frexp(double x, __private int *exp);
-double2 __ovld frexp(double2 x, __private int2 *exp);
-double3 __ovld frexp(double3 x, __private int3 *exp);
-double4 __ovld frexp(double4 x, __private int4 *exp);
-double8 __ovld frexp(double8 x, __private int8 *exp);
-double16 __ovld frexp(double16 x, __private int16 *exp);
+double __ovld frexp(double, __global int *);
+double2 __ovld frexp(double2, __global int2 *);
+double3 __ovld frexp(double3, __global int3 *);
+double4 __ovld frexp(double4, __global int4 *);
+double8 __ovld frexp(double8, __global int8 *);
+double16 __ovld frexp(double16, __global int16 *);
+double __ovld frexp(double, __local int *);
+double2 __ovld frexp(double2, __local int2 *);
+double3 __ovld frexp(double3, __local int3 *);
+double4 __ovld frexp(double4, __local int4 *);
+double8 __ovld frexp(double8, __local int8 *);
+double16 __ovld frexp(double16, __local int16 *);
+double __ovld frexp(double, __private int *);
+double2 __ovld frexp(double2, __private int2 *);
+double3 __ovld frexp(double3, __private int3 *);
+double4 __ovld frexp(double4, __private int4 *);
+double8 __ovld frexp(double8, __private int8 *);
+double16 __ovld frexp(double16, __private int16 *);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld frexp(half x, __global int *exp);
-half2 __ovld frexp(half2 x, __global int2 *exp);
-half3 __ovld frexp(half3 x, __global int3 *exp);
-half4 __ovld frexp(half4 x, __global int4 *exp);
-half8 __ovld frexp(half8 x, __global int8 *exp);
-half16 __ovld frexp(half16 x, __global int16 *exp);
-half __ovld frexp(half x, __local int *exp);
-half2 __ovld frexp(half2 x, __local int2 *exp);
-half3 __ovld frexp(half3 x, __local int3 *exp);
-half4 __ovld frexp(half4 x, __local int4 *exp);
-half8 __ovld frexp(half8 x, __local int8 *exp);
-half16 __ovld frexp(half16 x, __local int16 *exp);
-half __ovld frexp(half x, __private int *exp);
-half2 __ovld frexp(half2 x, __private int2 *exp);
-half3 __ovld frexp(half3 x, __private int3 *exp);
-half4 __ovld frexp(half4 x, __private int4 *exp);
-half8 __ovld frexp(half8 x, __private int8 *exp);
-half16 __ovld frexp(half16 x, __private int16 *exp);
+half __ovld frexp(half, __global int *);
+half2 __ovld frexp(half2, __global int2 *);
+half3 __ovld frexp(half3, __global int3 *);
+half4 __ovld frexp(half4, __global int4 *);
+half8 __ovld frexp(half8, __global int8 *);
+half16 __ovld frexp(half16, __global int16 *);
+half __ovld frexp(half, __local int *);
+half2 __ovld frexp(half2, __local int2 *);
+half3 __ovld frexp(half3, __local int3 *);
+half4 __ovld frexp(half4, __local int4 *);
+half8 __ovld frexp(half8, __local int8 *);
+half16 __ovld frexp(half16, __local int16 *);
+half __ovld frexp(half, __private int *);
+half2 __ovld frexp(half2, __private int2 *);
+half3 __ovld frexp(half3, __private int3 *);
+half4 __ovld frexp(half4, __private int4 *);
+half8 __ovld frexp(half8, __private int8 *);
+half16 __ovld frexp(half16, __private int16 *);
#endif //cl_khr_fp16
-#endif //defined(__opencl_c_generic_address_space)
+#endif //defined(__opencl_c_named_address_space_builtins)
/**
* Compute the value of the square root of x^2 + y^2
* without undue overflow or underflow.
*/
-float __ovld __cnfn hypot(float x, float y);
-float2 __ovld __cnfn hypot(float2 x, float2 y);
-float3 __ovld __cnfn hypot(float3 x, float3 y);
-float4 __ovld __cnfn hypot(float4 x, float4 y);
-float8 __ovld __cnfn hypot(float8 x, float8 y);
-float16 __ovld __cnfn hypot(float16 x, float16 y);
+float __ovld __cnfn hypot(float, float);
+float2 __ovld __cnfn hypot(float2, float2);
+float3 __ovld __cnfn hypot(float3, float3);
+float4 __ovld __cnfn hypot(float4, float4);
+float8 __ovld __cnfn hypot(float8, float8);
+float16 __ovld __cnfn hypot(float16, float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn hypot(double x, double y);
-double2 __ovld __cnfn hypot(double2 x, double2 y);
-double3 __ovld __cnfn hypot(double3 x, double3 y);
-double4 __ovld __cnfn hypot(double4 x, double4 y);
-double8 __ovld __cnfn hypot(double8 x, double8 y);
-double16 __ovld __cnfn hypot(double16 x, double16 y);
+double __ovld __cnfn hypot(double, double);
+double2 __ovld __cnfn hypot(double2, double2);
+double3 __ovld __cnfn hypot(double3, double3);
+double4 __ovld __cnfn hypot(double4, double4);
+double8 __ovld __cnfn hypot(double8, double8);
+double16 __ovld __cnfn hypot(double16, double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn hypot(half x, half y);
-half2 __ovld __cnfn hypot(half2 x, half2 y);
-half3 __ovld __cnfn hypot(half3 x, half3 y);
-half4 __ovld __cnfn hypot(half4 x, half4 y);
-half8 __ovld __cnfn hypot(half8 x, half8 y);
-half16 __ovld __cnfn hypot(half16 x, half16 y);
+half __ovld __cnfn hypot(half, half);
+half2 __ovld __cnfn hypot(half2, half2);
+half3 __ovld __cnfn hypot(half3, half3);
+half4 __ovld __cnfn hypot(half4, half4);
+half8 __ovld __cnfn hypot(half8, half8);
+half16 __ovld __cnfn hypot(half16, half16);
#endif //cl_khr_fp16
/**
* Return the exponent as an integer value.
*/
-int __ovld __cnfn ilogb(float x);
-int2 __ovld __cnfn ilogb(float2 x);
-int3 __ovld __cnfn ilogb(float3 x);
-int4 __ovld __cnfn ilogb(float4 x);
-int8 __ovld __cnfn ilogb(float8 x);
-int16 __ovld __cnfn ilogb(float16 x);
+int __ovld __cnfn ilogb(float);
+int2 __ovld __cnfn ilogb(float2);
+int3 __ovld __cnfn ilogb(float3);
+int4 __ovld __cnfn ilogb(float4);
+int8 __ovld __cnfn ilogb(float8);
+int16 __ovld __cnfn ilogb(float16);
#ifdef cl_khr_fp64
-int __ovld __cnfn ilogb(double x);
-int2 __ovld __cnfn ilogb(double2 x);
-int3 __ovld __cnfn ilogb(double3 x);
-int4 __ovld __cnfn ilogb(double4 x);
-int8 __ovld __cnfn ilogb(double8 x);
-int16 __ovld __cnfn ilogb(double16 x);
+int __ovld __cnfn ilogb(double);
+int2 __ovld __cnfn ilogb(double2);
+int3 __ovld __cnfn ilogb(double3);
+int4 __ovld __cnfn ilogb(double4);
+int8 __ovld __cnfn ilogb(double8);
+int16 __ovld __cnfn ilogb(double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-int __ovld __cnfn ilogb(half x);
-int2 __ovld __cnfn ilogb(half2 x);
-int3 __ovld __cnfn ilogb(half3 x);
-int4 __ovld __cnfn ilogb(half4 x);
-int8 __ovld __cnfn ilogb(half8 x);
-int16 __ovld __cnfn ilogb(half16 x);
+int __ovld __cnfn ilogb(half);
+int2 __ovld __cnfn ilogb(half2);
+int3 __ovld __cnfn ilogb(half3);
+int4 __ovld __cnfn ilogb(half4);
+int8 __ovld __cnfn ilogb(half8);
+int16 __ovld __cnfn ilogb(half16);
#endif //cl_khr_fp16
/**
* Multiply x by 2 to the power n.
*/
-float __ovld __cnfn ldexp(float x, int n);
-float2 __ovld __cnfn ldexp(float2 x, int2 n);
-float3 __ovld __cnfn ldexp(float3 x, int3 n);
-float4 __ovld __cnfn ldexp(float4 x, int4 n);
-float8 __ovld __cnfn ldexp(float8 x, int8 n);
-float16 __ovld __cnfn ldexp(float16 x, int16 n);
-float2 __ovld __cnfn ldexp(float2 x, int n);
-float3 __ovld __cnfn ldexp(float3 x, int n);
-float4 __ovld __cnfn ldexp(float4 x, int n);
-float8 __ovld __cnfn ldexp(float8 x, int n);
-float16 __ovld __cnfn ldexp(float16 x, int n);
+float __ovld __cnfn ldexp(float, int);
+float2 __ovld __cnfn ldexp(float2, int2);
+float3 __ovld __cnfn ldexp(float3, int3);
+float4 __ovld __cnfn ldexp(float4, int4);
+float8 __ovld __cnfn ldexp(float8, int8);
+float16 __ovld __cnfn ldexp(float16, int16);
+float2 __ovld __cnfn ldexp(float2, int);
+float3 __ovld __cnfn ldexp(float3, int);
+float4 __ovld __cnfn ldexp(float4, int);
+float8 __ovld __cnfn ldexp(float8, int);
+float16 __ovld __cnfn ldexp(float16, int);
#ifdef cl_khr_fp64
-double __ovld __cnfn ldexp(double x, int n);
-double2 __ovld __cnfn ldexp(double2 x, int2 n);
-double3 __ovld __cnfn ldexp(double3 x, int3 n);
-double4 __ovld __cnfn ldexp(double4 x, int4 n);
-double8 __ovld __cnfn ldexp(double8 x, int8 n);
-double16 __ovld __cnfn ldexp(double16 x, int16 n);
-double2 __ovld __cnfn ldexp(double2 x, int n);
-double3 __ovld __cnfn ldexp(double3 x, int n);
-double4 __ovld __cnfn ldexp(double4 x, int n);
-double8 __ovld __cnfn ldexp(double8 x, int n);
-double16 __ovld __cnfn ldexp(double16 x, int n);
+double __ovld __cnfn ldexp(double, int);
+double2 __ovld __cnfn ldexp(double2, int2);
+double3 __ovld __cnfn ldexp(double3, int3);
+double4 __ovld __cnfn ldexp(double4, int4);
+double8 __ovld __cnfn ldexp(double8, int8);
+double16 __ovld __cnfn ldexp(double16, int16);
+double2 __ovld __cnfn ldexp(double2, int);
+double3 __ovld __cnfn ldexp(double3, int);
+double4 __ovld __cnfn ldexp(double4, int);
+double8 __ovld __cnfn ldexp(double8, int);
+double16 __ovld __cnfn ldexp(double16, int);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn ldexp(half x, int n);
-half2 __ovld __cnfn ldexp(half2 x, int2 n);
-half3 __ovld __cnfn ldexp(half3 x, int3 n);
-half4 __ovld __cnfn ldexp(half4 x, int4 n);
-half8 __ovld __cnfn ldexp(half8 x, int8 n);
-half16 __ovld __cnfn ldexp(half16 x, int16 n);
-half2 __ovld __cnfn ldexp(half2 x, int n);
-half3 __ovld __cnfn ldexp(half3 x, int n);
-half4 __ovld __cnfn ldexp(half4 x, int n);
-half8 __ovld __cnfn ldexp(half8 x, int n);
-half16 __ovld __cnfn ldexp(half16 x, int n);
+half __ovld __cnfn ldexp(half, int);
+half2 __ovld __cnfn ldexp(half2, int2);
+half3 __ovld __cnfn ldexp(half3, int3);
+half4 __ovld __cnfn ldexp(half4, int4);
+half8 __ovld __cnfn ldexp(half8, int8);
+half16 __ovld __cnfn ldexp(half16, int16);
+half2 __ovld __cnfn ldexp(half2, int);
+half3 __ovld __cnfn ldexp(half3, int);
+half4 __ovld __cnfn ldexp(half4, int);
+half8 __ovld __cnfn ldexp(half8, int);
+half16 __ovld __cnfn ldexp(half16, int);
#endif //cl_khr_fp16
/**
@@ -7533,112 +7540,114 @@ half16 __ovld __cnfn ldexp(half16 x, int n);
* function. The sign of the gamma function is
* returned in the signp argument of lgamma_r.
*/
-float __ovld __cnfn lgamma(float x);
-float2 __ovld __cnfn lgamma(float2 x);
-float3 __ovld __cnfn lgamma(float3 x);
-float4 __ovld __cnfn lgamma(float4 x);
-float8 __ovld __cnfn lgamma(float8 x);
-float16 __ovld __cnfn lgamma(float16 x);
+float __ovld __cnfn lgamma(float);
+float2 __ovld __cnfn lgamma(float2);
+float3 __ovld __cnfn lgamma(float3);
+float4 __ovld __cnfn lgamma(float4);
+float8 __ovld __cnfn lgamma(float8);
+float16 __ovld __cnfn lgamma(float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn lgamma(double x);
-double2 __ovld __cnfn lgamma(double2 x);
-double3 __ovld __cnfn lgamma(double3 x);
-double4 __ovld __cnfn lgamma(double4 x);
-double8 __ovld __cnfn lgamma(double8 x);
-double16 __ovld __cnfn lgamma(double16 x);
+double __ovld __cnfn lgamma(double);
+double2 __ovld __cnfn lgamma(double2);
+double3 __ovld __cnfn lgamma(double3);
+double4 __ovld __cnfn lgamma(double4);
+double8 __ovld __cnfn lgamma(double8);
+double16 __ovld __cnfn lgamma(double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn lgamma(half x);
-half2 __ovld __cnfn lgamma(half2 x);
-half3 __ovld __cnfn lgamma(half3 x);
-half4 __ovld __cnfn lgamma(half4 x);
-half8 __ovld __cnfn lgamma(half8 x);
-half16 __ovld __cnfn lgamma(half16 x);
+half __ovld __cnfn lgamma(half);
+half2 __ovld __cnfn lgamma(half2);
+half3 __ovld __cnfn lgamma(half3);
+half4 __ovld __cnfn lgamma(half4);
+half8 __ovld __cnfn lgamma(half8);
+half16 __ovld __cnfn lgamma(half16);
#endif //cl_khr_fp16
#if defined(__opencl_c_generic_address_space)
-float __ovld lgamma_r(float x, int *signp);
-float2 __ovld lgamma_r(float2 x, int2 *signp);
-float3 __ovld lgamma_r(float3 x, int3 *signp);
-float4 __ovld lgamma_r(float4 x, int4 *signp);
-float8 __ovld lgamma_r(float8 x, int8 *signp);
-float16 __ovld lgamma_r(float16 x, int16 *signp);
+float __ovld lgamma_r(float, int *);
+float2 __ovld lgamma_r(float2, int2 *);
+float3 __ovld lgamma_r(float3, int3 *);
+float4 __ovld lgamma_r(float4, int4 *);
+float8 __ovld lgamma_r(float8, int8 *);
+float16 __ovld lgamma_r(float16, int16 *);
#ifdef cl_khr_fp64
-double __ovld lgamma_r(double x, int *signp);
-double2 __ovld lgamma_r(double2 x, int2 *signp);
-double3 __ovld lgamma_r(double3 x, int3 *signp);
-double4 __ovld lgamma_r(double4 x, int4 *signp);
-double8 __ovld lgamma_r(double8 x, int8 *signp);
-double16 __ovld lgamma_r(double16 x, int16 *signp);
+double __ovld lgamma_r(double, int *);
+double2 __ovld lgamma_r(double2, int2 *);
+double3 __ovld lgamma_r(double3, int3 *);
+double4 __ovld lgamma_r(double4, int4 *);
+double8 __ovld lgamma_r(double8, int8 *);
+double16 __ovld lgamma_r(double16, int16 *);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld lgamma_r(half x, int *signp);
-half2 __ovld lgamma_r(half2 x, int2 *signp);
-half3 __ovld lgamma_r(half3 x, int3 *signp);
-half4 __ovld lgamma_r(half4 x, int4 *signp);
-half8 __ovld lgamma_r(half8 x, int8 *signp);
-half16 __ovld lgamma_r(half16 x, int16 *signp);
+half __ovld lgamma_r(half, int *);
+half2 __ovld lgamma_r(half2, int2 *);
+half3 __ovld lgamma_r(half3, int3 *);
+half4 __ovld lgamma_r(half4, int4 *);
+half8 __ovld lgamma_r(half8, int8 *);
+half16 __ovld lgamma_r(half16, int16 *);
#endif //cl_khr_fp16
-#else
-float __ovld lgamma_r(float x, __global int *signp);
-float2 __ovld lgamma_r(float2 x, __global int2 *signp);
-float3 __ovld lgamma_r(float3 x, __global int3 *signp);
-float4 __ovld lgamma_r(float4 x, __global int4 *signp);
-float8 __ovld lgamma_r(float8 x, __global int8 *signp);
-float16 __ovld lgamma_r(float16 x, __global int16 *signp);
-float __ovld lgamma_r(float x, __local int *signp);
-float2 __ovld lgamma_r(float2 x, __local int2 *signp);
-float3 __ovld lgamma_r(float3 x, __local int3 *signp);
-float4 __ovld lgamma_r(float4 x, __local int4 *signp);
-float8 __ovld lgamma_r(float8 x, __local int8 *signp);
-float16 __ovld lgamma_r(float16 x, __local int16 *signp);
-float __ovld lgamma_r(float x, __private int *signp);
-float2 __ovld lgamma_r(float2 x, __private int2 *signp);
-float3 __ovld lgamma_r(float3 x, __private int3 *signp);
-float4 __ovld lgamma_r(float4 x, __private int4 *signp);
-float8 __ovld lgamma_r(float8 x, __private int8 *signp);
-float16 __ovld lgamma_r(float16 x, __private int16 *signp);
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
+float __ovld lgamma_r(float, __global int *);
+float2 __ovld lgamma_r(float2, __global int2 *);
+float3 __ovld lgamma_r(float3, __global int3 *);
+float4 __ovld lgamma_r(float4, __global int4 *);
+float8 __ovld lgamma_r(float8, __global int8 *);
+float16 __ovld lgamma_r(float16, __global int16 *);
+float __ovld lgamma_r(float, __local int *);
+float2 __ovld lgamma_r(float2, __local int2 *);
+float3 __ovld lgamma_r(float3, __local int3 *);
+float4 __ovld lgamma_r(float4, __local int4 *);
+float8 __ovld lgamma_r(float8, __local int8 *);
+float16 __ovld lgamma_r(float16, __local int16 *);
+float __ovld lgamma_r(float, __private int *);
+float2 __ovld lgamma_r(float2, __private int2 *);
+float3 __ovld lgamma_r(float3, __private int3 *);
+float4 __ovld lgamma_r(float4, __private int4 *);
+float8 __ovld lgamma_r(float8, __private int8 *);
+float16 __ovld lgamma_r(float16, __private int16 *);
#ifdef cl_khr_fp64
-double __ovld lgamma_r(double x, __global int *signp);
-double2 __ovld lgamma_r(double2 x, __global int2 *signp);
-double3 __ovld lgamma_r(double3 x, __global int3 *signp);
-double4 __ovld lgamma_r(double4 x, __global int4 *signp);
-double8 __ovld lgamma_r(double8 x, __global int8 *signp);
-double16 __ovld lgamma_r(double16 x, __global int16 *signp);
-double __ovld lgamma_r(double x, __local int *signp);
-double2 __ovld lgamma_r(double2 x, __local int2 *signp);
-double3 __ovld lgamma_r(double3 x, __local int3 *signp);
-double4 __ovld lgamma_r(double4 x, __local int4 *signp);
-double8 __ovld lgamma_r(double8 x, __local int8 *signp);
-double16 __ovld lgamma_r(double16 x, __local int16 *signp);
-double __ovld lgamma_r(double x, __private int *signp);
-double2 __ovld lgamma_r(double2 x, __private int2 *signp);
-double3 __ovld lgamma_r(double3 x, __private int3 *signp);
-double4 __ovld lgamma_r(double4 x, __private int4 *signp);
-double8 __ovld lgamma_r(double8 x, __private int8 *signp);
-double16 __ovld lgamma_r(double16 x, __private int16 *signp);
+double __ovld lgamma_r(double, __global int *);
+double2 __ovld lgamma_r(double2, __global int2 *);
+double3 __ovld lgamma_r(double3, __global int3 *);
+double4 __ovld lgamma_r(double4, __global int4 *);
+double8 __ovld lgamma_r(double8, __global int8 *);
+double16 __ovld lgamma_r(double16, __global int16 *);
+double __ovld lgamma_r(double, __local int *);
+double2 __ovld lgamma_r(double2, __local int2 *);
+double3 __ovld lgamma_r(double3, __local int3 *);
+double4 __ovld lgamma_r(double4, __local int4 *);
+double8 __ovld lgamma_r(double8, __local int8 *);
+double16 __ovld lgamma_r(double16, __local int16 *);
+double __ovld lgamma_r(double, __private int *);
+double2 __ovld lgamma_r(double2, __private int2 *);
+double3 __ovld lgamma_r(double3, __private int3 *);
+double4 __ovld lgamma_r(double4, __private int4 *);
+double8 __ovld lgamma_r(double8, __private int8 *);
+double16 __ovld lgamma_r(double16, __private int16 *);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld lgamma_r(half x, __global int *signp);
-half2 __ovld lgamma_r(half2 x, __global int2 *signp);
-half3 __ovld lgamma_r(half3 x, __global int3 *signp);
-half4 __ovld lgamma_r(half4 x, __global int4 *signp);
-half8 __ovld lgamma_r(half8 x, __global int8 *signp);
-half16 __ovld lgamma_r(half16 x, __global int16 *signp);
-half __ovld lgamma_r(half x, __local int *signp);
-half2 __ovld lgamma_r(half2 x, __local int2 *signp);
-half3 __ovld lgamma_r(half3 x, __local int3 *signp);
-half4 __ovld lgamma_r(half4 x, __local int4 *signp);
-half8 __ovld lgamma_r(half8 x, __local int8 *signp);
-half16 __ovld lgamma_r(half16 x, __local int16 *signp);
-half __ovld lgamma_r(half x, __private int *signp);
-half2 __ovld lgamma_r(half2 x, __private int2 *signp);
-half3 __ovld lgamma_r(half3 x, __private int3 *signp);
-half4 __ovld lgamma_r(half4 x, __private int4 *signp);
-half8 __ovld lgamma_r(half8 x, __private int8 *signp);
-half16 __ovld lgamma_r(half16 x, __private int16 *signp);
+half __ovld lgamma_r(half, __global int *);
+half2 __ovld lgamma_r(half2, __global int2 *);
+half3 __ovld lgamma_r(half3, __global int3 *);
+half4 __ovld lgamma_r(half4, __global int4 *);
+half8 __ovld lgamma_r(half8, __global int8 *);
+half16 __ovld lgamma_r(half16, __global int16 *);
+half __ovld lgamma_r(half, __local int *);
+half2 __ovld lgamma_r(half2, __local int2 *);
+half3 __ovld lgamma_r(half3, __local int3 *);
+half4 __ovld lgamma_r(half4, __local int4 *);
+half8 __ovld lgamma_r(half8, __local int8 *);
+half16 __ovld lgamma_r(half16, __local int16 *);
+half __ovld lgamma_r(half, __private int *);
+half2 __ovld lgamma_r(half2, __private int2 *);
+half3 __ovld lgamma_r(half3, __private int3 *);
+half4 __ovld lgamma_r(half4, __private int4 *);
+half8 __ovld lgamma_r(half8, __private int8 *);
+half16 __ovld lgamma_r(half16, __private int16 *);
#endif //cl_khr_fp16
-#endif //defined(__opencl_c_generic_address_space)
+#endif //defined(__opencl_c_named_address_space_builtins)
/**
* Compute natural logarithm.
@@ -7721,54 +7730,54 @@ half16 __ovld __cnfn log10(half16);
/**
* Compute a base e logarithm of (1.0 + x).
*/
-float __ovld __cnfn log1p(float x);
-float2 __ovld __cnfn log1p(float2 x);
-float3 __ovld __cnfn log1p(float3 x);
-float4 __ovld __cnfn log1p(float4 x);
-float8 __ovld __cnfn log1p(float8 x);
-float16 __ovld __cnfn log1p(float16 x);
+float __ovld __cnfn log1p(float);
+float2 __ovld __cnfn log1p(float2);
+float3 __ovld __cnfn log1p(float3);
+float4 __ovld __cnfn log1p(float4);
+float8 __ovld __cnfn log1p(float8);
+float16 __ovld __cnfn log1p(float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn log1p(double x);
-double2 __ovld __cnfn log1p(double2 x);
-double3 __ovld __cnfn log1p(double3 x);
-double4 __ovld __cnfn log1p(double4 x);
-double8 __ovld __cnfn log1p(double8 x);
-double16 __ovld __cnfn log1p(double16 x);
+double __ovld __cnfn log1p(double);
+double2 __ovld __cnfn log1p(double2);
+double3 __ovld __cnfn log1p(double3);
+double4 __ovld __cnfn log1p(double4);
+double8 __ovld __cnfn log1p(double8);
+double16 __ovld __cnfn log1p(double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn log1p(half x);
-half2 __ovld __cnfn log1p(half2 x);
-half3 __ovld __cnfn log1p(half3 x);
-half4 __ovld __cnfn log1p(half4 x);
-half8 __ovld __cnfn log1p(half8 x);
-half16 __ovld __cnfn log1p(half16 x);
+half __ovld __cnfn log1p(half);
+half2 __ovld __cnfn log1p(half2);
+half3 __ovld __cnfn log1p(half3);
+half4 __ovld __cnfn log1p(half4);
+half8 __ovld __cnfn log1p(half8);
+half16 __ovld __cnfn log1p(half16);
#endif //cl_khr_fp16
/**
* Compute the exponent of x, which is the integral
* part of logr | x |.
*/
-float __ovld __cnfn logb(float x);
-float2 __ovld __cnfn logb(float2 x);
-float3 __ovld __cnfn logb(float3 x);
-float4 __ovld __cnfn logb(float4 x);
-float8 __ovld __cnfn logb(float8 x);
-float16 __ovld __cnfn logb(float16 x);
+float __ovld __cnfn logb(float);
+float2 __ovld __cnfn logb(float2);
+float3 __ovld __cnfn logb(float3);
+float4 __ovld __cnfn logb(float4);
+float8 __ovld __cnfn logb(float8);
+float16 __ovld __cnfn logb(float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn logb(double x);
-double2 __ovld __cnfn logb(double2 x);
-double3 __ovld __cnfn logb(double3 x);
-double4 __ovld __cnfn logb(double4 x);
-double8 __ovld __cnfn logb(double8 x);
-double16 __ovld __cnfn logb(double16 x);
+double __ovld __cnfn logb(double);
+double2 __ovld __cnfn logb(double2);
+double3 __ovld __cnfn logb(double3);
+double4 __ovld __cnfn logb(double4);
+double8 __ovld __cnfn logb(double8);
+double16 __ovld __cnfn logb(double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn logb(half x);
-half2 __ovld __cnfn logb(half2 x);
-half3 __ovld __cnfn logb(half3 x);
-half4 __ovld __cnfn logb(half4 x);
-half8 __ovld __cnfn logb(half8 x);
-half16 __ovld __cnfn logb(half16 x);
+half __ovld __cnfn logb(half);
+half2 __ovld __cnfn logb(half2);
+half3 __ovld __cnfn logb(half3);
+half4 __ovld __cnfn logb(half4);
+half8 __ovld __cnfn logb(half8);
+half16 __ovld __cnfn logb(half16);
#endif //cl_khr_fp16
/**
@@ -7778,81 +7787,81 @@ half16 __ovld __cnfn logb(half16 x);
* defined. mad is intended to be used where speed is
* preferred over accuracy.
*/
-float __ovld __cnfn mad(float a, float b, float c);
-float2 __ovld __cnfn mad(float2 a, float2 b, float2 c);
-float3 __ovld __cnfn mad(float3 a, float3 b, float3 c);
-float4 __ovld __cnfn mad(float4 a, float4 b, float4 c);
-float8 __ovld __cnfn mad(float8 a, float8 b, float8 c);
-float16 __ovld __cnfn mad(float16 a, float16 b, float16 c);
+float __ovld __cnfn mad(float, float, float);
+float2 __ovld __cnfn mad(float2, float2, float2);
+float3 __ovld __cnfn mad(float3, float3, float3);
+float4 __ovld __cnfn mad(float4, float4, float4);
+float8 __ovld __cnfn mad(float8, float8, float8);
+float16 __ovld __cnfn mad(float16, float16, float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn mad(double a, double b, double c);
-double2 __ovld __cnfn mad(double2 a, double2 b, double2 c);
-double3 __ovld __cnfn mad(double3 a, double3 b, double3 c);
-double4 __ovld __cnfn mad(double4 a, double4 b, double4 c);
-double8 __ovld __cnfn mad(double8 a, double8 b, double8 c);
-double16 __ovld __cnfn mad(double16 a, double16 b, double16 c);
+double __ovld __cnfn mad(double, double, double);
+double2 __ovld __cnfn mad(double2, double2, double2);
+double3 __ovld __cnfn mad(double3, double3, double3);
+double4 __ovld __cnfn mad(double4, double4, double4);
+double8 __ovld __cnfn mad(double8, double8, double8);
+double16 __ovld __cnfn mad(double16, double16, double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn mad(half a, half b, half c);
-half2 __ovld __cnfn mad(half2 a, half2 b, half2 c);
-half3 __ovld __cnfn mad(half3 a, half3 b, half3 c);
-half4 __ovld __cnfn mad(half4 a, half4 b, half4 c);
-half8 __ovld __cnfn mad(half8 a, half8 b, half8 c);
-half16 __ovld __cnfn mad(half16 a, half16 b, half16 c);
+half __ovld __cnfn mad(half, half, half);
+half2 __ovld __cnfn mad(half2, half2, half2);
+half3 __ovld __cnfn mad(half3, half3, half3);
+half4 __ovld __cnfn mad(half4, half4, half4);
+half8 __ovld __cnfn mad(half8, half8, half8);
+half16 __ovld __cnfn mad(half16, half16, half16);
#endif //cl_khr_fp16
/**
* Returns x if | x | > | y |, y if | y | > | x |, otherwise
* fmax(x, y).
*/
-float __ovld __cnfn maxmag(float x, float y);
-float2 __ovld __cnfn maxmag(float2 x, float2 y);
-float3 __ovld __cnfn maxmag(float3 x, float3 y);
-float4 __ovld __cnfn maxmag(float4 x, float4 y);
-float8 __ovld __cnfn maxmag(float8 x, float8 y);
-float16 __ovld __cnfn maxmag(float16 x, float16 y);
+float __ovld __cnfn maxmag(float, float);
+float2 __ovld __cnfn maxmag(float2, float2);
+float3 __ovld __cnfn maxmag(float3, float3);
+float4 __ovld __cnfn maxmag(float4, float4);
+float8 __ovld __cnfn maxmag(float8, float8);
+float16 __ovld __cnfn maxmag(float16, float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn maxmag(double x, double y);
-double2 __ovld __cnfn maxmag(double2 x, double2 y);
-double3 __ovld __cnfn maxmag(double3 x, double3 y);
-double4 __ovld __cnfn maxmag(double4 x, double4 y);
-double8 __ovld __cnfn maxmag(double8 x, double8 y);
-double16 __ovld __cnfn maxmag(double16 x, double16 y);
+double __ovld __cnfn maxmag(double, double);
+double2 __ovld __cnfn maxmag(double2, double2);
+double3 __ovld __cnfn maxmag(double3, double3);
+double4 __ovld __cnfn maxmag(double4, double4);
+double8 __ovld __cnfn maxmag(double8, double8);
+double16 __ovld __cnfn maxmag(double16, double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn maxmag(half x, half y);
-half2 __ovld __cnfn maxmag(half2 x, half2 y);
-half3 __ovld __cnfn maxmag(half3 x, half3 y);
-half4 __ovld __cnfn maxmag(half4 x, half4 y);
-half8 __ovld __cnfn maxmag(half8 x, half8 y);
-half16 __ovld __cnfn maxmag(half16 x, half16 y);
+half __ovld __cnfn maxmag(half, half);
+half2 __ovld __cnfn maxmag(half2, half2);
+half3 __ovld __cnfn maxmag(half3, half3);
+half4 __ovld __cnfn maxmag(half4, half4);
+half8 __ovld __cnfn maxmag(half8, half8);
+half16 __ovld __cnfn maxmag(half16, half16);
#endif //cl_khr_fp16
/**
* Returns x if | x | < | y |, y if | y | < | x |, otherwise
* fmin(x, y).
*/
-float __ovld __cnfn minmag(float x, float y);
-float2 __ovld __cnfn minmag(float2 x, float2 y);
-float3 __ovld __cnfn minmag(float3 x, float3 y);
-float4 __ovld __cnfn minmag(float4 x, float4 y);
-float8 __ovld __cnfn minmag(float8 x, float8 y);
-float16 __ovld __cnfn minmag(float16 x, float16 y);
+float __ovld __cnfn minmag(float, float);
+float2 __ovld __cnfn minmag(float2, float2);
+float3 __ovld __cnfn minmag(float3, float3);
+float4 __ovld __cnfn minmag(float4, float4);
+float8 __ovld __cnfn minmag(float8, float8);
+float16 __ovld __cnfn minmag(float16, float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn minmag(double x, double y);
-double2 __ovld __cnfn minmag(double2 x, double2 y);
-double3 __ovld __cnfn minmag(double3 x, double3 y);
-double4 __ovld __cnfn minmag(double4 x, double4 y);
-double8 __ovld __cnfn minmag(double8 x, double8 y);
-double16 __ovld __cnfn minmag(double16 x, double16 y);
+double __ovld __cnfn minmag(double, double);
+double2 __ovld __cnfn minmag(double2, double2);
+double3 __ovld __cnfn minmag(double3, double3);
+double4 __ovld __cnfn minmag(double4, double4);
+double8 __ovld __cnfn minmag(double8, double8);
+double16 __ovld __cnfn minmag(double16, double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn minmag(half x, half y);
-half2 __ovld __cnfn minmag(half2 x, half2 y);
-half3 __ovld __cnfn minmag(half3 x, half3 y);
-half4 __ovld __cnfn minmag(half4 x, half4 y);
-half8 __ovld __cnfn minmag(half8 x, half8 y);
-half16 __ovld __cnfn minmag(half16 x, half16 y);
+half __ovld __cnfn minmag(half, half);
+half2 __ovld __cnfn minmag(half2, half2);
+half3 __ovld __cnfn minmag(half3, half3);
+half4 __ovld __cnfn minmag(half4, half4);
+half8 __ovld __cnfn minmag(half8, half8);
+half16 __ovld __cnfn minmag(half16, half16);
#endif //cl_khr_fp16
/**
@@ -7863,114 +7872,116 @@ half16 __ovld __cnfn minmag(half16 x, half16 y);
* pointed to by iptr.
*/
#if defined(__opencl_c_generic_address_space)
-float __ovld modf(float x, float *iptr);
-float2 __ovld modf(float2 x, float2 *iptr);
-float3 __ovld modf(float3 x, float3 *iptr);
-float4 __ovld modf(float4 x, float4 *iptr);
-float8 __ovld modf(float8 x, float8 *iptr);
-float16 __ovld modf(float16 x, float16 *iptr);
+float __ovld modf(float, float *);
+float2 __ovld modf(float2, float2 *);
+float3 __ovld modf(float3, float3 *);
+float4 __ovld modf(float4, float4 *);
+float8 __ovld modf(float8, float8 *);
+float16 __ovld modf(float16, float16 *);
#ifdef cl_khr_fp64
-double __ovld modf(double x, double *iptr);
-double2 __ovld modf(double2 x, double2 *iptr);
-double3 __ovld modf(double3 x, double3 *iptr);
-double4 __ovld modf(double4 x, double4 *iptr);
-double8 __ovld modf(double8 x, double8 *iptr);
-double16 __ovld modf(double16 x, double16 *iptr);
+double __ovld modf(double, double *);
+double2 __ovld modf(double2, double2 *);
+double3 __ovld modf(double3, double3 *);
+double4 __ovld modf(double4, double4 *);
+double8 __ovld modf(double8, double8 *);
+double16 __ovld modf(double16, double16 *);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld modf(half x, half *iptr);
-half2 __ovld modf(half2 x, half2 *iptr);
-half3 __ovld modf(half3 x, half3 *iptr);
-half4 __ovld modf(half4 x, half4 *iptr);
-half8 __ovld modf(half8 x, half8 *iptr);
-half16 __ovld modf(half16 x, half16 *iptr);
+half __ovld modf(half, half *);
+half2 __ovld modf(half2, half2 *);
+half3 __ovld modf(half3, half3 *);
+half4 __ovld modf(half4, half4 *);
+half8 __ovld modf(half8, half8 *);
+half16 __ovld modf(half16, half16 *);
#endif //cl_khr_fp16
-#else
-float __ovld modf(float x, __global float *iptr);
-float2 __ovld modf(float2 x, __global float2 *iptr);
-float3 __ovld modf(float3 x, __global float3 *iptr);
-float4 __ovld modf(float4 x, __global float4 *iptr);
-float8 __ovld modf(float8 x, __global float8 *iptr);
-float16 __ovld modf(float16 x, __global float16 *iptr);
-float __ovld modf(float x, __local float *iptr);
-float2 __ovld modf(float2 x, __local float2 *iptr);
-float3 __ovld modf(float3 x, __local float3 *iptr);
-float4 __ovld modf(float4 x, __local float4 *iptr);
-float8 __ovld modf(float8 x, __local float8 *iptr);
-float16 __ovld modf(float16 x, __local float16 *iptr);
-float __ovld modf(float x, __private float *iptr);
-float2 __ovld modf(float2 x, __private float2 *iptr);
-float3 __ovld modf(float3 x, __private float3 *iptr);
-float4 __ovld modf(float4 x, __private float4 *iptr);
-float8 __ovld modf(float8 x, __private float8 *iptr);
-float16 __ovld modf(float16 x, __private float16 *iptr);
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
+float __ovld modf(float, __global float *);
+float2 __ovld modf(float2, __global float2 *);
+float3 __ovld modf(float3, __global float3 *);
+float4 __ovld modf(float4, __global float4 *);
+float8 __ovld modf(float8, __global float8 *);
+float16 __ovld modf(float16, __global float16 *);
+float __ovld modf(float, __local float *);
+float2 __ovld modf(float2, __local float2 *);
+float3 __ovld modf(float3, __local float3 *);
+float4 __ovld modf(float4, __local float4 *);
+float8 __ovld modf(float8, __local float8 *);
+float16 __ovld modf(float16, __local float16 *);
+float __ovld modf(float, __private float *);
+float2 __ovld modf(float2, __private float2 *);
+float3 __ovld modf(float3, __private float3 *);
+float4 __ovld modf(float4, __private float4 *);
+float8 __ovld modf(float8, __private float8 *);
+float16 __ovld modf(float16, __private float16 *);
#ifdef cl_khr_fp64
-double __ovld modf(double x, __global double *iptr);
-double2 __ovld modf(double2 x, __global double2 *iptr);
-double3 __ovld modf(double3 x, __global double3 *iptr);
-double4 __ovld modf(double4 x, __global double4 *iptr);
-double8 __ovld modf(double8 x, __global double8 *iptr);
-double16 __ovld modf(double16 x, __global double16 *iptr);
-double __ovld modf(double x, __local double *iptr);
-double2 __ovld modf(double2 x, __local double2 *iptr);
-double3 __ovld modf(double3 x, __local double3 *iptr);
-double4 __ovld modf(double4 x, __local double4 *iptr);
-double8 __ovld modf(double8 x, __local double8 *iptr);
-double16 __ovld modf(double16 x, __local double16 *iptr);
-double __ovld modf(double x, __private double *iptr);
-double2 __ovld modf(double2 x, __private double2 *iptr);
-double3 __ovld modf(double3 x, __private double3 *iptr);
-double4 __ovld modf(double4 x, __private double4 *iptr);
-double8 __ovld modf(double8 x, __private double8 *iptr);
-double16 __ovld modf(double16 x, __private double16 *iptr);
+double __ovld modf(double, __global double *);
+double2 __ovld modf(double2, __global double2 *);
+double3 __ovld modf(double3, __global double3 *);
+double4 __ovld modf(double4, __global double4 *);
+double8 __ovld modf(double8, __global double8 *);
+double16 __ovld modf(double16, __global double16 *);
+double __ovld modf(double, __local double *);
+double2 __ovld modf(double2, __local double2 *);
+double3 __ovld modf(double3, __local double3 *);
+double4 __ovld modf(double4, __local double4 *);
+double8 __ovld modf(double8, __local double8 *);
+double16 __ovld modf(double16, __local double16 *);
+double __ovld modf(double, __private double *);
+double2 __ovld modf(double2, __private double2 *);
+double3 __ovld modf(double3, __private double3 *);
+double4 __ovld modf(double4, __private double4 *);
+double8 __ovld modf(double8, __private double8 *);
+double16 __ovld modf(double16, __private double16 *);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld modf(half x, __global half *iptr);
-half2 __ovld modf(half2 x, __global half2 *iptr);
-half3 __ovld modf(half3 x, __global half3 *iptr);
-half4 __ovld modf(half4 x, __global half4 *iptr);
-half8 __ovld modf(half8 x, __global half8 *iptr);
-half16 __ovld modf(half16 x, __global half16 *iptr);
-half __ovld modf(half x, __local half *iptr);
-half2 __ovld modf(half2 x, __local half2 *iptr);
-half3 __ovld modf(half3 x, __local half3 *iptr);
-half4 __ovld modf(half4 x, __local half4 *iptr);
-half8 __ovld modf(half8 x, __local half8 *iptr);
-half16 __ovld modf(half16 x, __local half16 *iptr);
-half __ovld modf(half x, __private half *iptr);
-half2 __ovld modf(half2 x, __private half2 *iptr);
-half3 __ovld modf(half3 x, __private half3 *iptr);
-half4 __ovld modf(half4 x, __private half4 *iptr);
-half8 __ovld modf(half8 x, __private half8 *iptr);
-half16 __ovld modf(half16 x, __private half16 *iptr);
+half __ovld modf(half, __global half *);
+half2 __ovld modf(half2, __global half2 *);
+half3 __ovld modf(half3, __global half3 *);
+half4 __ovld modf(half4, __global half4 *);
+half8 __ovld modf(half8, __global half8 *);
+half16 __ovld modf(half16, __global half16 *);
+half __ovld modf(half, __local half *);
+half2 __ovld modf(half2, __local half2 *);
+half3 __ovld modf(half3, __local half3 *);
+half4 __ovld modf(half4, __local half4 *);
+half8 __ovld modf(half8, __local half8 *);
+half16 __ovld modf(half16, __local half16 *);
+half __ovld modf(half, __private half *);
+half2 __ovld modf(half2, __private half2 *);
+half3 __ovld modf(half3, __private half3 *);
+half4 __ovld modf(half4, __private half4 *);
+half8 __ovld modf(half8, __private half8 *);
+half16 __ovld modf(half16, __private half16 *);
#endif //cl_khr_fp16
-#endif //defined(__opencl_c_generic_address_space)
+#endif //defined(__opencl_c_named_address_space_builtins)
/**
* Returns a quiet NaN. The nancode may be placed
* in the significand of the resulting NaN.
*/
-float __ovld __cnfn nan(uint nancode);
-float2 __ovld __cnfn nan(uint2 nancode);
-float3 __ovld __cnfn nan(uint3 nancode);
-float4 __ovld __cnfn nan(uint4 nancode);
-float8 __ovld __cnfn nan(uint8 nancode);
-float16 __ovld __cnfn nan(uint16 nancode);
+float __ovld __cnfn nan(uint);
+float2 __ovld __cnfn nan(uint2);
+float3 __ovld __cnfn nan(uint3);
+float4 __ovld __cnfn nan(uint4);
+float8 __ovld __cnfn nan(uint8);
+float16 __ovld __cnfn nan(uint16);
#ifdef cl_khr_fp64
-double __ovld __cnfn nan(ulong nancode);
-double2 __ovld __cnfn nan(ulong2 nancode);
-double3 __ovld __cnfn nan(ulong3 nancode);
-double4 __ovld __cnfn nan(ulong4 nancode);
-double8 __ovld __cnfn nan(ulong8 nancode);
-double16 __ovld __cnfn nan(ulong16 nancode);
+double __ovld __cnfn nan(ulong);
+double2 __ovld __cnfn nan(ulong2);
+double3 __ovld __cnfn nan(ulong3);
+double4 __ovld __cnfn nan(ulong4);
+double8 __ovld __cnfn nan(ulong8);
+double16 __ovld __cnfn nan(ulong16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn nan(ushort nancode);
-half2 __ovld __cnfn nan(ushort2 nancode);
-half3 __ovld __cnfn nan(ushort3 nancode);
-half4 __ovld __cnfn nan(ushort4 nancode);
-half8 __ovld __cnfn nan(ushort8 nancode);
-half16 __ovld __cnfn nan(ushort16 nancode);
+half __ovld __cnfn nan(ushort);
+half2 __ovld __cnfn nan(ushort2);
+half3 __ovld __cnfn nan(ushort3);
+half4 __ovld __cnfn nan(ushort4);
+half8 __ovld __cnfn nan(ushort8);
+half16 __ovld __cnfn nan(ushort16);
#endif //cl_khr_fp16
/**
@@ -7980,105 +7991,105 @@ half16 __ovld __cnfn nan(ushort16 nancode);
* largest representable floating-point number less
* than x.
*/
-float __ovld __cnfn nextafter(float x, float y);
-float2 __ovld __cnfn nextafter(float2 x, float2 y);
-float3 __ovld __cnfn nextafter(float3 x, float3 y);
-float4 __ovld __cnfn nextafter(float4 x, float4 y);
-float8 __ovld __cnfn nextafter(float8 x, float8 y);
-float16 __ovld __cnfn nextafter(float16 x, float16 y);
+float __ovld __cnfn nextafter(float, float);
+float2 __ovld __cnfn nextafter(float2, float2);
+float3 __ovld __cnfn nextafter(float3, float3);
+float4 __ovld __cnfn nextafter(float4, float4);
+float8 __ovld __cnfn nextafter(float8, float8);
+float16 __ovld __cnfn nextafter(float16, float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn nextafter(double x, double y);
-double2 __ovld __cnfn nextafter(double2 x, double2 y);
-double3 __ovld __cnfn nextafter(double3 x, double3 y);
-double4 __ovld __cnfn nextafter(double4 x, double4 y);
-double8 __ovld __cnfn nextafter(double8 x, double8 y);
-double16 __ovld __cnfn nextafter(double16 x, double16 y);
+double __ovld __cnfn nextafter(double, double);
+double2 __ovld __cnfn nextafter(double2, double2);
+double3 __ovld __cnfn nextafter(double3, double3);
+double4 __ovld __cnfn nextafter(double4, double4);
+double8 __ovld __cnfn nextafter(double8, double8);
+double16 __ovld __cnfn nextafter(double16, double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn nextafter(half x, half y);
-half2 __ovld __cnfn nextafter(half2 x, half2 y);
-half3 __ovld __cnfn nextafter(half3 x, half3 y);
-half4 __ovld __cnfn nextafter(half4 x, half4 y);
-half8 __ovld __cnfn nextafter(half8 x, half8 y);
-half16 __ovld __cnfn nextafter(half16 x, half16 y);
+half __ovld __cnfn nextafter(half, half);
+half2 __ovld __cnfn nextafter(half2, half2);
+half3 __ovld __cnfn nextafter(half3, half3);
+half4 __ovld __cnfn nextafter(half4, half4);
+half8 __ovld __cnfn nextafter(half8, half8);
+half16 __ovld __cnfn nextafter(half16, half16);
#endif //cl_khr_fp16
/**
* Compute x to the power y.
*/
-float __ovld __cnfn pow(float x, float y);
-float2 __ovld __cnfn pow(float2 x, float2 y);
-float3 __ovld __cnfn pow(float3 x, float3 y);
-float4 __ovld __cnfn pow(float4 x, float4 y);
-float8 __ovld __cnfn pow(float8 x, float8 y);
-float16 __ovld __cnfn pow(float16 x, float16 y);
+float __ovld __cnfn pow(float, float);
+float2 __ovld __cnfn pow(float2, float2);
+float3 __ovld __cnfn pow(float3, float3);
+float4 __ovld __cnfn pow(float4, float4);
+float8 __ovld __cnfn pow(float8, float8);
+float16 __ovld __cnfn pow(float16, float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn pow(double x, double y);
-double2 __ovld __cnfn pow(double2 x, double2 y);
-double3 __ovld __cnfn pow(double3 x, double3 y);
-double4 __ovld __cnfn pow(double4 x, double4 y);
-double8 __ovld __cnfn pow(double8 x, double8 y);
-double16 __ovld __cnfn pow(double16 x, double16 y);
+double __ovld __cnfn pow(double, double);
+double2 __ovld __cnfn pow(double2, double2);
+double3 __ovld __cnfn pow(double3, double3);
+double4 __ovld __cnfn pow(double4, double4);
+double8 __ovld __cnfn pow(double8, double8);
+double16 __ovld __cnfn pow(double16, double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn pow(half x, half y);
-half2 __ovld __cnfn pow(half2 x, half2 y);
-half3 __ovld __cnfn pow(half3 x, half3 y);
-half4 __ovld __cnfn pow(half4 x, half4 y);
-half8 __ovld __cnfn pow(half8 x, half8 y);
-half16 __ovld __cnfn pow(half16 x, half16 y);
+half __ovld __cnfn pow(half, half);
+half2 __ovld __cnfn pow(half2, half2);
+half3 __ovld __cnfn pow(half3, half3);
+half4 __ovld __cnfn pow(half4, half4);
+half8 __ovld __cnfn pow(half8, half8);
+half16 __ovld __cnfn pow(half16, half16);
#endif //cl_khr_fp16
/**
* Compute x to the power y, where y is an integer.
*/
-float __ovld __cnfn pown(float x, int y);
-float2 __ovld __cnfn pown(float2 x, int2 y);
-float3 __ovld __cnfn pown(float3 x, int3 y);
-float4 __ovld __cnfn pown(float4 x, int4 y);
-float8 __ovld __cnfn pown(float8 x, int8 y);
-float16 __ovld __cnfn pown(float16 x, int16 y);
+float __ovld __cnfn pown(float, int);
+float2 __ovld __cnfn pown(float2, int2);
+float3 __ovld __cnfn pown(float3, int3);
+float4 __ovld __cnfn pown(float4, int4);
+float8 __ovld __cnfn pown(float8, int8);
+float16 __ovld __cnfn pown(float16, int16);
#ifdef cl_khr_fp64
-double __ovld __cnfn pown(double x, int y);
-double2 __ovld __cnfn pown(double2 x, int2 y);
-double3 __ovld __cnfn pown(double3 x, int3 y);
-double4 __ovld __cnfn pown(double4 x, int4 y);
-double8 __ovld __cnfn pown(double8 x, int8 y);
-double16 __ovld __cnfn pown(double16 x, int16 y);
+double __ovld __cnfn pown(double, int);
+double2 __ovld __cnfn pown(double2, int2);
+double3 __ovld __cnfn pown(double3, int3);
+double4 __ovld __cnfn pown(double4, int4);
+double8 __ovld __cnfn pown(double8, int8);
+double16 __ovld __cnfn pown(double16, int16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn pown(half x, int y);
-half2 __ovld __cnfn pown(half2 x, int2 y);
-half3 __ovld __cnfn pown(half3 x, int3 y);
-half4 __ovld __cnfn pown(half4 x, int4 y);
-half8 __ovld __cnfn pown(half8 x, int8 y);
-half16 __ovld __cnfn pown(half16 x, int16 y);
+half __ovld __cnfn pown(half, int);
+half2 __ovld __cnfn pown(half2, int2);
+half3 __ovld __cnfn pown(half3, int3);
+half4 __ovld __cnfn pown(half4, int4);
+half8 __ovld __cnfn pown(half8, int8);
+half16 __ovld __cnfn pown(half16, int16);
#endif //cl_khr_fp16
/**
* Compute x to the power y, where x is >= 0.
*/
-float __ovld __cnfn powr(float x, float y);
-float2 __ovld __cnfn powr(float2 x, float2 y);
-float3 __ovld __cnfn powr(float3 x, float3 y);
-float4 __ovld __cnfn powr(float4 x, float4 y);
-float8 __ovld __cnfn powr(float8 x, float8 y);
-float16 __ovld __cnfn powr(float16 x, float16 y);
+float __ovld __cnfn powr(float, float);
+float2 __ovld __cnfn powr(float2, float2);
+float3 __ovld __cnfn powr(float3, float3);
+float4 __ovld __cnfn powr(float4, float4);
+float8 __ovld __cnfn powr(float8, float8);
+float16 __ovld __cnfn powr(float16, float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn powr(double x, double y);
-double2 __ovld __cnfn powr(double2 x, double2 y);
-double3 __ovld __cnfn powr(double3 x, double3 y);
-double4 __ovld __cnfn powr(double4 x, double4 y);
-double8 __ovld __cnfn powr(double8 x, double8 y);
-double16 __ovld __cnfn powr(double16 x, double16 y);
+double __ovld __cnfn powr(double, double);
+double2 __ovld __cnfn powr(double2, double2);
+double3 __ovld __cnfn powr(double3, double3);
+double4 __ovld __cnfn powr(double4, double4);
+double8 __ovld __cnfn powr(double8, double8);
+double16 __ovld __cnfn powr(double16, double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn powr(half x, half y);
-half2 __ovld __cnfn powr(half2 x, half2 y);
-half3 __ovld __cnfn powr(half3 x, half3 y);
-half4 __ovld __cnfn powr(half4 x, half4 y);
-half8 __ovld __cnfn powr(half8 x, half8 y);
-half16 __ovld __cnfn powr(half16 x, half16 y);
+half __ovld __cnfn powr(half, half);
+half2 __ovld __cnfn powr(half2, half2);
+half3 __ovld __cnfn powr(half3, half3);
+half4 __ovld __cnfn powr(half4, half4);
+half8 __ovld __cnfn powr(half8, half8);
+half16 __ovld __cnfn powr(half16, half16);
#endif //cl_khr_fp16
/**
@@ -8087,27 +8098,27 @@ half16 __ovld __cnfn powr(half16 x, half16 y);
* are two integers closest to x/y, n shall be the even
* one. If r is zero, it is given the same sign as x.
*/
-float __ovld __cnfn remainder(float x, float y);
-float2 __ovld __cnfn remainder(float2 x, float2 y);
-float3 __ovld __cnfn remainder(float3 x, float3 y);
-float4 __ovld __cnfn remainder(float4 x, float4 y);
-float8 __ovld __cnfn remainder(float8 x, float8 y);
-float16 __ovld __cnfn remainder(float16 x, float16 y);
+float __ovld __cnfn remainder(float, float);
+float2 __ovld __cnfn remainder(float2, float2);
+float3 __ovld __cnfn remainder(float3, float3);
+float4 __ovld __cnfn remainder(float4, float4);
+float8 __ovld __cnfn remainder(float8, float8);
+float16 __ovld __cnfn remainder(float16, float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn remainder(double x, double y);
-double2 __ovld __cnfn remainder(double2 x, double2 y);
-double3 __ovld __cnfn remainder(double3 x, double3 y);
-double4 __ovld __cnfn remainder(double4 x, double4 y);
-double8 __ovld __cnfn remainder(double8 x, double8 y);
-double16 __ovld __cnfn remainder(double16 x, double16 y);
+double __ovld __cnfn remainder(double, double);
+double2 __ovld __cnfn remainder(double2, double2);
+double3 __ovld __cnfn remainder(double3, double3);
+double4 __ovld __cnfn remainder(double4, double4);
+double8 __ovld __cnfn remainder(double8, double8);
+double16 __ovld __cnfn remainder(double16, double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn remainder(half x, half y);
-half2 __ovld __cnfn remainder(half2 x, half2 y);
-half3 __ovld __cnfn remainder(half3 x, half3 y);
-half4 __ovld __cnfn remainder(half4 x, half4 y);
-half8 __ovld __cnfn remainder(half8 x, half8 y);
-half16 __ovld __cnfn remainder(half16 x, half16 y);
+half __ovld __cnfn remainder(half, half);
+half2 __ovld __cnfn remainder(half2, half2);
+half3 __ovld __cnfn remainder(half3, half3);
+half4 __ovld __cnfn remainder(half4, half4);
+half8 __ovld __cnfn remainder(half8, half8);
+half16 __ovld __cnfn remainder(half16, half16);
#endif //cl_khr_fp16
/**
@@ -8123,89 +8134,90 @@ half16 __ovld __cnfn remainder(half16 x, half16 y);
* pointed to by quo.
*/
#if defined(__opencl_c_generic_address_space)
-float __ovld remquo(float x, float y, int *quo);
-float2 __ovld remquo(float2 x, float2 y, int2 *quo);
-float3 __ovld remquo(float3 x, float3 y, int3 *quo);
-float4 __ovld remquo(float4 x, float4 y, int4 *quo);
-float8 __ovld remquo(float8 x, float8 y, int8 *quo);
-float16 __ovld remquo(float16 x, float16 y, int16 *quo);
+float __ovld remquo(float, float, int *);
+float2 __ovld remquo(float2, float2, int2 *);
+float3 __ovld remquo(float3, float3, int3 *);
+float4 __ovld remquo(float4, float4, int4 *);
+float8 __ovld remquo(float8, float8, int8 *);
+float16 __ovld remquo(float16, float16, int16 *);
#ifdef cl_khr_fp64
-double __ovld remquo(double x, double y, int *quo);
-double2 __ovld remquo(double2 x, double2 y, int2 *quo);
-double3 __ovld remquo(double3 x, double3 y, int3 *quo);
-double4 __ovld remquo(double4 x, double4 y, int4 *quo);
-double8 __ovld remquo(double8 x, double8 y, int8 *quo);
-double16 __ovld remquo(double16 x, double16 y, int16 *quo);
+double __ovld remquo(double, double, int *);
+double2 __ovld remquo(double2, double2, int2 *);
+double3 __ovld remquo(double3, double3, int3 *);
+double4 __ovld remquo(double4, double4, int4 *);
+double8 __ovld remquo(double8, double8, int8 *);
+double16 __ovld remquo(double16, double16, int16 *);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld remquo(half x, half y, int *quo);
-half2 __ovld remquo(half2 x, half2 y, int2 *quo);
-half3 __ovld remquo(half3 x, half3 y, int3 *quo);
-half4 __ovld remquo(half4 x, half4 y, int4 *quo);
-half8 __ovld remquo(half8 x, half8 y, int8 *quo);
-half16 __ovld remquo(half16 x, half16 y, int16 *quo);
-
+half __ovld remquo(half, half, int *);
+half2 __ovld remquo(half2, half2, int2 *);
+half3 __ovld remquo(half3, half3, int3 *);
+half4 __ovld remquo(half4, half4, int4 *);
+half8 __ovld remquo(half8, half8, int8 *);
+half16 __ovld remquo(half16, half16, int16 *);
#endif //cl_khr_fp16
-#else
-float __ovld remquo(float x, float y, __global int *quo);
-float2 __ovld remquo(float2 x, float2 y, __global int2 *quo);
-float3 __ovld remquo(float3 x, float3 y, __global int3 *quo);
-float4 __ovld remquo(float4 x, float4 y, __global int4 *quo);
-float8 __ovld remquo(float8 x, float8 y, __global int8 *quo);
-float16 __ovld remquo(float16 x, float16 y, __global int16 *quo);
-float __ovld remquo(float x, float y, __local int *quo);
-float2 __ovld remquo(float2 x, float2 y, __local int2 *quo);
-float3 __ovld remquo(float3 x, float3 y, __local int3 *quo);
-float4 __ovld remquo(float4 x, float4 y, __local int4 *quo);
-float8 __ovld remquo(float8 x, float8 y, __local int8 *quo);
-float16 __ovld remquo(float16 x, float16 y, __local int16 *quo);
-float __ovld remquo(float x, float y, __private int *quo);
-float2 __ovld remquo(float2 x, float2 y, __private int2 *quo);
-float3 __ovld remquo(float3 x, float3 y, __private int3 *quo);
-float4 __ovld remquo(float4 x, float4 y, __private int4 *quo);
-float8 __ovld remquo(float8 x, float8 y, __private int8 *quo);
-float16 __ovld remquo(float16 x, float16 y, __private int16 *quo);
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
+float __ovld remquo(float, float, __global int *);
+float2 __ovld remquo(float2, float2, __global int2 *);
+float3 __ovld remquo(float3, float3, __global int3 *);
+float4 __ovld remquo(float4, float4, __global int4 *);
+float8 __ovld remquo(float8, float8, __global int8 *);
+float16 __ovld remquo(float16, float16, __global int16 *);
+float __ovld remquo(float, float, __local int *);
+float2 __ovld remquo(float2, float2, __local int2 *);
+float3 __ovld remquo(float3, float3, __local int3 *);
+float4 __ovld remquo(float4, float4, __local int4 *);
+float8 __ovld remquo(float8, float8, __local int8 *);
+float16 __ovld remquo(float16, float16, __local int16 *);
+float __ovld remquo(float, float, __private int *);
+float2 __ovld remquo(float2, float2, __private int2 *);
+float3 __ovld remquo(float3, float3, __private int3 *);
+float4 __ovld remquo(float4, float4, __private int4 *);
+float8 __ovld remquo(float8, float8, __private int8 *);
+float16 __ovld remquo(float16, float16, __private int16 *);
#ifdef cl_khr_fp64
-double __ovld remquo(double x, double y, __global int *quo);
-double2 __ovld remquo(double2 x, double2 y, __global int2 *quo);
-double3 __ovld remquo(double3 x, double3 y, __global int3 *quo);
-double4 __ovld remquo(double4 x, double4 y, __global int4 *quo);
-double8 __ovld remquo(double8 x, double8 y, __global int8 *quo);
-double16 __ovld remquo(double16 x, double16 y, __global int16 *quo);
-double __ovld remquo(double x, double y, __local int *quo);
-double2 __ovld remquo(double2 x, double2 y, __local int2 *quo);
-double3 __ovld remquo(double3 x, double3 y, __local int3 *quo);
-double4 __ovld remquo(double4 x, double4 y, __local int4 *quo);
-double8 __ovld remquo(double8 x, double8 y, __local int8 *quo);
-double16 __ovld remquo(double16 x, double16 y, __local int16 *quo);
-double __ovld remquo(double x, double y, __private int *quo);
-double2 __ovld remquo(double2 x, double2 y, __private int2 *quo);
-double3 __ovld remquo(double3 x, double3 y, __private int3 *quo);
-double4 __ovld remquo(double4 x, double4 y, __private int4 *quo);
-double8 __ovld remquo(double8 x, double8 y, __private int8 *quo);
-double16 __ovld remquo(double16 x, double16 y, __private int16 *quo);
+double __ovld remquo(double, double, __global int *);
+double2 __ovld remquo(double2, double2, __global int2 *);
+double3 __ovld remquo(double3, double3, __global int3 *);
+double4 __ovld remquo(double4, double4, __global int4 *);
+double8 __ovld remquo(double8, double8, __global int8 *);
+double16 __ovld remquo(double16, double16, __global int16 *);
+double __ovld remquo(double, double, __local int *);
+double2 __ovld remquo(double2, double2, __local int2 *);
+double3 __ovld remquo(double3, double3, __local int3 *);
+double4 __ovld remquo(double4, double4, __local int4 *);
+double8 __ovld remquo(double8, double8, __local int8 *);
+double16 __ovld remquo(double16, double16, __local int16 *);
+double __ovld remquo(double, double, __private int *);
+double2 __ovld remquo(double2, double2, __private int2 *);
+double3 __ovld remquo(double3, double3, __private int3 *);
+double4 __ovld remquo(double4, double4, __private int4 *);
+double8 __ovld remquo(double8, double8, __private int8 *);
+double16 __ovld remquo(double16, double16, __private int16 *);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld remquo(half x, half y, __global int *quo);
-half2 __ovld remquo(half2 x, half2 y, __global int2 *quo);
-half3 __ovld remquo(half3 x, half3 y, __global int3 *quo);
-half4 __ovld remquo(half4 x, half4 y, __global int4 *quo);
-half8 __ovld remquo(half8 x, half8 y, __global int8 *quo);
-half16 __ovld remquo(half16 x, half16 y, __global int16 *quo);
-half __ovld remquo(half x, half y, __local int *quo);
-half2 __ovld remquo(half2 x, half2 y, __local int2 *quo);
-half3 __ovld remquo(half3 x, half3 y, __local int3 *quo);
-half4 __ovld remquo(half4 x, half4 y, __local int4 *quo);
-half8 __ovld remquo(half8 x, half8 y, __local int8 *quo);
-half16 __ovld remquo(half16 x, half16 y, __local int16 *quo);
-half __ovld remquo(half x, half y, __private int *quo);
-half2 __ovld remquo(half2 x, half2 y, __private int2 *quo);
-half3 __ovld remquo(half3 x, half3 y, __private int3 *quo);
-half4 __ovld remquo(half4 x, half4 y, __private int4 *quo);
-half8 __ovld remquo(half8 x, half8 y, __private int8 *quo);
-half16 __ovld remquo(half16 x, half16 y, __private int16 *quo);
+half __ovld remquo(half, half, __global int *);
+half2 __ovld remquo(half2, half2, __global int2 *);
+half3 __ovld remquo(half3, half3, __global int3 *);
+half4 __ovld remquo(half4, half4, __global int4 *);
+half8 __ovld remquo(half8, half8, __global int8 *);
+half16 __ovld remquo(half16, half16, __global int16 *);
+half __ovld remquo(half, half, __local int *);
+half2 __ovld remquo(half2, half2, __local int2 *);
+half3 __ovld remquo(half3, half3, __local int3 *);
+half4 __ovld remquo(half4, half4, __local int4 *);
+half8 __ovld remquo(half8, half8, __local int8 *);
+half16 __ovld remquo(half16, half16, __local int16 *);
+half __ovld remquo(half, half, __private int *);
+half2 __ovld remquo(half2, half2, __private int2 *);
+half3 __ovld remquo(half3, half3, __private int3 *);
+half4 __ovld remquo(half4, half4, __private int4 *);
+half8 __ovld remquo(half8, half8, __private int8 *);
+half16 __ovld remquo(half16, half16, __private int16 *);
#endif //cl_khr_fp16
-#endif //defined(__opencl_c_generic_address_space)
+#endif //defined(__opencl_c_named_address_space_builtins)
/**
* Round to integral value (using round to nearest
* even rounding mode) in floating-point format.
@@ -8238,27 +8250,27 @@ half16 __ovld __cnfn rint(half16);
/**
* Compute x to the power 1/y.
*/
-float __ovld __cnfn rootn(float x, int y);
-float2 __ovld __cnfn rootn(float2 x, int2 y);
-float3 __ovld __cnfn rootn(float3 x, int3 y);
-float4 __ovld __cnfn rootn(float4 x, int4 y);
-float8 __ovld __cnfn rootn(float8 x, int8 y);
-float16 __ovld __cnfn rootn(float16 x, int16 y);
+float __ovld __cnfn rootn(float, int);
+float2 __ovld __cnfn rootn(float2, int2);
+float3 __ovld __cnfn rootn(float3, int3);
+float4 __ovld __cnfn rootn(float4, int4);
+float8 __ovld __cnfn rootn(float8, int8);
+float16 __ovld __cnfn rootn(float16, int16);
#ifdef cl_khr_fp64
-double __ovld __cnfn rootn(double x, int y);
-double2 __ovld __cnfn rootn(double2 x, int2 y);
-double3 __ovld __cnfn rootn(double3 x, int3 y);
-double4 __ovld __cnfn rootn(double4 x, int4 y);
-double8 __ovld __cnfn rootn(double8 x, int8 y);
-double16 __ovld __cnfn rootn(double16 x, int16 y);
+double __ovld __cnfn rootn(double, int);
+double2 __ovld __cnfn rootn(double2, int2);
+double3 __ovld __cnfn rootn(double3, int3);
+double4 __ovld __cnfn rootn(double4, int4);
+double8 __ovld __cnfn rootn(double8, int8);
+double16 __ovld __cnfn rootn(double16, int16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn rootn(half x, int y);
-half2 __ovld __cnfn rootn(half2 x, int2 y);
-half3 __ovld __cnfn rootn(half3 x, int3 y);
-half4 __ovld __cnfn rootn(half4 x, int4 y);
-half8 __ovld __cnfn rootn(half8 x, int8 y);
-half16 __ovld __cnfn rootn(half16 x, int16 y);
+half __ovld __cnfn rootn(half, int);
+half2 __ovld __cnfn rootn(half2, int2);
+half3 __ovld __cnfn rootn(half3, int3);
+half4 __ovld __cnfn rootn(half4, int4);
+half8 __ovld __cnfn rootn(half8, int8);
+half16 __ovld __cnfn rootn(half16, int16);
#endif //cl_khr_fp16
/**
@@ -8266,27 +8278,27 @@ half16 __ovld __cnfn rootn(half16 x, int16 y);
* halfway cases away from zero, regardless of the
* current rounding direction.
*/
-float __ovld __cnfn round(float x);
-float2 __ovld __cnfn round(float2 x);
-float3 __ovld __cnfn round(float3 x);
-float4 __ovld __cnfn round(float4 x);
-float8 __ovld __cnfn round(float8 x);
-float16 __ovld __cnfn round(float16 x);
+float __ovld __cnfn round(float);
+float2 __ovld __cnfn round(float2);
+float3 __ovld __cnfn round(float3);
+float4 __ovld __cnfn round(float4);
+float8 __ovld __cnfn round(float8);
+float16 __ovld __cnfn round(float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn round(double x);
-double2 __ovld __cnfn round(double2 x);
-double3 __ovld __cnfn round(double3 x);
-double4 __ovld __cnfn round(double4 x);
-double8 __ovld __cnfn round(double8 x);
-double16 __ovld __cnfn round(double16 x);
+double __ovld __cnfn round(double);
+double2 __ovld __cnfn round(double2);
+double3 __ovld __cnfn round(double3);
+double4 __ovld __cnfn round(double4);
+double8 __ovld __cnfn round(double8);
+double16 __ovld __cnfn round(double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn round(half x);
-half2 __ovld __cnfn round(half2 x);
-half3 __ovld __cnfn round(half3 x);
-half4 __ovld __cnfn round(half4 x);
-half8 __ovld __cnfn round(half8 x);
-half16 __ovld __cnfn round(half16 x);
+half __ovld __cnfn round(half);
+half2 __ovld __cnfn round(half2);
+half3 __ovld __cnfn round(half3);
+half4 __ovld __cnfn round(half4);
+half8 __ovld __cnfn round(half8);
+half16 __ovld __cnfn round(half16);
#endif //cl_khr_fp16
/**
@@ -8347,88 +8359,90 @@ half16 __ovld __cnfn sin(half16);
* in cosval.
*/
#if defined(__opencl_c_generic_address_space)
-float __ovld sincos(float x, float *cosval);
-float2 __ovld sincos(float2 x, float2 *cosval);
-float3 __ovld sincos(float3 x, float3 *cosval);
-float4 __ovld sincos(float4 x, float4 *cosval);
-float8 __ovld sincos(float8 x, float8 *cosval);
-float16 __ovld sincos(float16 x, float16 *cosval);
+float __ovld sincos(float, float *);
+float2 __ovld sincos(float2, float2 *);
+float3 __ovld sincos(float3, float3 *);
+float4 __ovld sincos(float4, float4 *);
+float8 __ovld sincos(float8, float8 *);
+float16 __ovld sincos(float16, float16 *);
#ifdef cl_khr_fp64
-double __ovld sincos(double x, double *cosval);
-double2 __ovld sincos(double2 x, double2 *cosval);
-double3 __ovld sincos(double3 x, double3 *cosval);
-double4 __ovld sincos(double4 x, double4 *cosval);
-double8 __ovld sincos(double8 x, double8 *cosval);
-double16 __ovld sincos(double16 x, double16 *cosval);
+double __ovld sincos(double, double *);
+double2 __ovld sincos(double2, double2 *);
+double3 __ovld sincos(double3, double3 *);
+double4 __ovld sincos(double4, double4 *);
+double8 __ovld sincos(double8, double8 *);
+double16 __ovld sincos(double16, double16 *);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld sincos(half x, half *cosval);
-half2 __ovld sincos(half2 x, half2 *cosval);
-half3 __ovld sincos(half3 x, half3 *cosval);
-half4 __ovld sincos(half4 x, half4 *cosval);
-half8 __ovld sincos(half8 x, half8 *cosval);
-half16 __ovld sincos(half16 x, half16 *cosval);
+half __ovld sincos(half, half *);
+half2 __ovld sincos(half2, half2 *);
+half3 __ovld sincos(half3, half3 *);
+half4 __ovld sincos(half4, half4 *);
+half8 __ovld sincos(half8, half8 *);
+half16 __ovld sincos(half16, half16 *);
#endif //cl_khr_fp16
-#else
-float __ovld sincos(float x, __global float *cosval);
-float2 __ovld sincos(float2 x, __global float2 *cosval);
-float3 __ovld sincos(float3 x, __global float3 *cosval);
-float4 __ovld sincos(float4 x, __global float4 *cosval);
-float8 __ovld sincos(float8 x, __global float8 *cosval);
-float16 __ovld sincos(float16 x, __global float16 *cosval);
-float __ovld sincos(float x, __local float *cosval);
-float2 __ovld sincos(float2 x, __local float2 *cosval);
-float3 __ovld sincos(float3 x, __local float3 *cosval);
-float4 __ovld sincos(float4 x, __local float4 *cosval);
-float8 __ovld sincos(float8 x, __local float8 *cosval);
-float16 __ovld sincos(float16 x, __local float16 *cosval);
-float __ovld sincos(float x, __private float *cosval);
-float2 __ovld sincos(float2 x, __private float2 *cosval);
-float3 __ovld sincos(float3 x, __private float3 *cosval);
-float4 __ovld sincos(float4 x, __private float4 *cosval);
-float8 __ovld sincos(float8 x, __private float8 *cosval);
-float16 __ovld sincos(float16 x, __private float16 *cosval);
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
+float __ovld sincos(float, __global float *);
+float2 __ovld sincos(float2, __global float2 *);
+float3 __ovld sincos(float3, __global float3 *);
+float4 __ovld sincos(float4, __global float4 *);
+float8 __ovld sincos(float8, __global float8 *);
+float16 __ovld sincos(float16, __global float16 *);
+float __ovld sincos(float, __local float *);
+float2 __ovld sincos(float2, __local float2 *);
+float3 __ovld sincos(float3, __local float3 *);
+float4 __ovld sincos(float4, __local float4 *);
+float8 __ovld sincos(float8, __local float8 *);
+float16 __ovld sincos(float16, __local float16 *);
+float __ovld sincos(float, __private float *);
+float2 __ovld sincos(float2, __private float2 *);
+float3 __ovld sincos(float3, __private float3 *);
+float4 __ovld sincos(float4, __private float4 *);
+float8 __ovld sincos(float8, __private float8 *);
+float16 __ovld sincos(float16, __private float16 *);
#ifdef cl_khr_fp64
-double __ovld sincos(double x, __global double *cosval);
-double2 __ovld sincos(double2 x, __global double2 *cosval);
-double3 __ovld sincos(double3 x, __global double3 *cosval);
-double4 __ovld sincos(double4 x, __global double4 *cosval);
-double8 __ovld sincos(double8 x, __global double8 *cosval);
-double16 __ovld sincos(double16 x, __global double16 *cosval);
-double __ovld sincos(double x, __local double *cosval);
-double2 __ovld sincos(double2 x, __local double2 *cosval);
-double3 __ovld sincos(double3 x, __local double3 *cosval);
-double4 __ovld sincos(double4 x, __local double4 *cosval);
-double8 __ovld sincos(double8 x, __local double8 *cosval);
-double16 __ovld sincos(double16 x, __local double16 *cosval);
-double __ovld sincos(double x, __private double *cosval);
-double2 __ovld sincos(double2 x, __private double2 *cosval);
-double3 __ovld sincos(double3 x, __private double3 *cosval);
-double4 __ovld sincos(double4 x, __private double4 *cosval);
-double8 __ovld sincos(double8 x, __private double8 *cosval);
-double16 __ovld sincos(double16 x, __private double16 *cosval);
+double __ovld sincos(double, __global double *);
+double2 __ovld sincos(double2, __global double2 *);
+double3 __ovld sincos(double3, __global double3 *);
+double4 __ovld sincos(double4, __global double4 *);
+double8 __ovld sincos(double8, __global double8 *);
+double16 __ovld sincos(double16, __global double16 *);
+double __ovld sincos(double, __local double *);
+double2 __ovld sincos(double2, __local double2 *);
+double3 __ovld sincos(double3, __local double3 *);
+double4 __ovld sincos(double4, __local double4 *);
+double8 __ovld sincos(double8, __local double8 *);
+double16 __ovld sincos(double16, __local double16 *);
+double __ovld sincos(double, __private double *);
+double2 __ovld sincos(double2, __private double2 *);
+double3 __ovld sincos(double3, __private double3 *);
+double4 __ovld sincos(double4, __private double4 *);
+double8 __ovld sincos(double8, __private double8 *);
+double16 __ovld sincos(double16, __private double16 *);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld sincos(half x, __global half *cosval);
-half2 __ovld sincos(half2 x, __global half2 *cosval);
-half3 __ovld sincos(half3 x, __global half3 *cosval);
-half4 __ovld sincos(half4 x, __global half4 *cosval);
-half8 __ovld sincos(half8 x, __global half8 *cosval);
-half16 __ovld sincos(half16 x, __global half16 *cosval);
-half __ovld sincos(half x, __local half *cosval);
-half2 __ovld sincos(half2 x, __local half2 *cosval);
-half3 __ovld sincos(half3 x, __local half3 *cosval);
-half4 __ovld sincos(half4 x, __local half4 *cosval);
-half8 __ovld sincos(half8 x, __local half8 *cosval);
-half16 __ovld sincos(half16 x, __local half16 *cosval);
-half __ovld sincos(half x, __private half *cosval);
-half2 __ovld sincos(half2 x, __private half2 *cosval);
-half3 __ovld sincos(half3 x, __private half3 *cosval);
-half4 __ovld sincos(half4 x, __private half4 *cosval);
-half8 __ovld sincos(half8 x, __private half8 *cosval);
-half16 __ovld sincos(half16 x, __private half16 *cosval);
+half __ovld sincos(half, __global half *);
+half2 __ovld sincos(half2, __global half2 *);
+half3 __ovld sincos(half3, __global half3 *);
+half4 __ovld sincos(half4, __global half4 *);
+half8 __ovld sincos(half8, __global half8 *);
+half16 __ovld sincos(half16, __global half16 *);
+half __ovld sincos(half, __local half *);
+half2 __ovld sincos(half2, __local half2 *);
+half3 __ovld sincos(half3, __local half3 *);
+half4 __ovld sincos(half4, __local half4 *);
+half8 __ovld sincos(half8, __local half8 *);
+half16 __ovld sincos(half16, __local half16 *);
+half __ovld sincos(half, __private half *);
+half2 __ovld sincos(half2, __private half2 *);
+half3 __ovld sincos(half3, __private half3 *);
+half4 __ovld sincos(half4, __private half4 *);
+half8 __ovld sincos(half8, __private half8 *);
+half16 __ovld sincos(half16, __private half16 *);
#endif //cl_khr_fp16
-#endif //defined(__opencl_c_generic_address_space)
+#endif //defined(__opencl_c_named_address_space_builtins)
/**
* Compute hyperbolic sine.
@@ -8459,27 +8473,27 @@ half16 __ovld __cnfn sinh(half16);
/**
* Compute sin (PI * x).
*/
-float __ovld __cnfn sinpi(float x);
-float2 __ovld __cnfn sinpi(float2 x);
-float3 __ovld __cnfn sinpi(float3 x);
-float4 __ovld __cnfn sinpi(float4 x);
-float8 __ovld __cnfn sinpi(float8 x);
-float16 __ovld __cnfn sinpi(float16 x);
+float __ovld __cnfn sinpi(float);
+float2 __ovld __cnfn sinpi(float2);
+float3 __ovld __cnfn sinpi(float3);
+float4 __ovld __cnfn sinpi(float4);
+float8 __ovld __cnfn sinpi(float8);
+float16 __ovld __cnfn sinpi(float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn sinpi(double x);
-double2 __ovld __cnfn sinpi(double2 x);
-double3 __ovld __cnfn sinpi(double3 x);
-double4 __ovld __cnfn sinpi(double4 x);
-double8 __ovld __cnfn sinpi(double8 x);
-double16 __ovld __cnfn sinpi(double16 x);
+double __ovld __cnfn sinpi(double);
+double2 __ovld __cnfn sinpi(double2);
+double3 __ovld __cnfn sinpi(double3);
+double4 __ovld __cnfn sinpi(double4);
+double8 __ovld __cnfn sinpi(double8);
+double16 __ovld __cnfn sinpi(double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn sinpi(half x);
-half2 __ovld __cnfn sinpi(half2 x);
-half3 __ovld __cnfn sinpi(half3 x);
-half4 __ovld __cnfn sinpi(half4 x);
-half8 __ovld __cnfn sinpi(half8 x);
-half16 __ovld __cnfn sinpi(half16 x);
+half __ovld __cnfn sinpi(half);
+half2 __ovld __cnfn sinpi(half2);
+half3 __ovld __cnfn sinpi(half3);
+half4 __ovld __cnfn sinpi(half4);
+half8 __ovld __cnfn sinpi(half8);
+half16 __ovld __cnfn sinpi(half16);
#endif //cl_khr_fp16
/**
@@ -8563,27 +8577,27 @@ half16 __ovld __cnfn tanh(half16);
/**
* Compute tan (PI * x).
*/
-float __ovld __cnfn tanpi(float x);
-float2 __ovld __cnfn tanpi(float2 x);
-float3 __ovld __cnfn tanpi(float3 x);
-float4 __ovld __cnfn tanpi(float4 x);
-float8 __ovld __cnfn tanpi(float8 x);
-float16 __ovld __cnfn tanpi(float16 x);
+float __ovld __cnfn tanpi(float);
+float2 __ovld __cnfn tanpi(float2);
+float3 __ovld __cnfn tanpi(float3);
+float4 __ovld __cnfn tanpi(float4);
+float8 __ovld __cnfn tanpi(float8);
+float16 __ovld __cnfn tanpi(float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn tanpi(double x);
-double2 __ovld __cnfn tanpi(double2 x);
-double3 __ovld __cnfn tanpi(double3 x);
-double4 __ovld __cnfn tanpi(double4 x);
-double8 __ovld __cnfn tanpi(double8 x);
-double16 __ovld __cnfn tanpi(double16 x);
+double __ovld __cnfn tanpi(double);
+double2 __ovld __cnfn tanpi(double2);
+double3 __ovld __cnfn tanpi(double3);
+double4 __ovld __cnfn tanpi(double4);
+double8 __ovld __cnfn tanpi(double8);
+double16 __ovld __cnfn tanpi(double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn tanpi(half x);
-half2 __ovld __cnfn tanpi(half2 x);
-half3 __ovld __cnfn tanpi(half3 x);
-half4 __ovld __cnfn tanpi(half4 x);
-half8 __ovld __cnfn tanpi(half8 x);
-half16 __ovld __cnfn tanpi(half16 x);
+half __ovld __cnfn tanpi(half);
+half2 __ovld __cnfn tanpi(half2);
+half3 __ovld __cnfn tanpi(half3);
+half4 __ovld __cnfn tanpi(half4);
+half8 __ovld __cnfn tanpi(half8);
+half16 __ovld __cnfn tanpi(half16);
#endif //cl_khr_fp16
/**
@@ -8642,711 +8656,711 @@ half16 __ovld __cnfn trunc(half16);
/**
* Compute cosine. x must be in the range -2^16 ... +2^16.
*/
-float __ovld __cnfn half_cos(float x);
-float2 __ovld __cnfn half_cos(float2 x);
-float3 __ovld __cnfn half_cos(float3 x);
-float4 __ovld __cnfn half_cos(float4 x);
-float8 __ovld __cnfn half_cos(float8 x);
-float16 __ovld __cnfn half_cos(float16 x);
+float __ovld __cnfn half_cos(float);
+float2 __ovld __cnfn half_cos(float2);
+float3 __ovld __cnfn half_cos(float3);
+float4 __ovld __cnfn half_cos(float4);
+float8 __ovld __cnfn half_cos(float8);
+float16 __ovld __cnfn half_cos(float16);
/**
* Compute x / y.
*/
-float __ovld __cnfn half_divide(float x, float y);
-float2 __ovld __cnfn half_divide(float2 x, float2 y);
-float3 __ovld __cnfn half_divide(float3 x, float3 y);
-float4 __ovld __cnfn half_divide(float4 x, float4 y);
-float8 __ovld __cnfn half_divide(float8 x, float8 y);
-float16 __ovld __cnfn half_divide(float16 x, float16 y);
+float __ovld __cnfn half_divide(float, float);
+float2 __ovld __cnfn half_divide(float2, float2);
+float3 __ovld __cnfn half_divide(float3, float3);
+float4 __ovld __cnfn half_divide(float4, float4);
+float8 __ovld __cnfn half_divide(float8, float8);
+float16 __ovld __cnfn half_divide(float16, float16);
/**
* Compute the base- e exponential of x.
*/
-float __ovld __cnfn half_exp(float x);
-float2 __ovld __cnfn half_exp(float2 x);
-float3 __ovld __cnfn half_exp(float3 x);
-float4 __ovld __cnfn half_exp(float4 x);
-float8 __ovld __cnfn half_exp(float8 x);
-float16 __ovld __cnfn half_exp(float16 x);
+float __ovld __cnfn half_exp(float);
+float2 __ovld __cnfn half_exp(float2);
+float3 __ovld __cnfn half_exp(float3);
+float4 __ovld __cnfn half_exp(float4);
+float8 __ovld __cnfn half_exp(float8);
+float16 __ovld __cnfn half_exp(float16);
/**
* Compute the base- 2 exponential of x.
*/
-float __ovld __cnfn half_exp2(float x);
-float2 __ovld __cnfn half_exp2(float2 x);
-float3 __ovld __cnfn half_exp2(float3 x);
-float4 __ovld __cnfn half_exp2(float4 x);
-float8 __ovld __cnfn half_exp2(float8 x);
-float16 __ovld __cnfn half_exp2(float16 x);
+float __ovld __cnfn half_exp2(float);
+float2 __ovld __cnfn half_exp2(float2);
+float3 __ovld __cnfn half_exp2(float3);
+float4 __ovld __cnfn half_exp2(float4);
+float8 __ovld __cnfn half_exp2(float8);
+float16 __ovld __cnfn half_exp2(float16);
/**
* Compute the base- 10 exponential of x.
*/
-float __ovld __cnfn half_exp10(float x);
-float2 __ovld __cnfn half_exp10(float2 x);
-float3 __ovld __cnfn half_exp10(float3 x);
-float4 __ovld __cnfn half_exp10(float4 x);
-float8 __ovld __cnfn half_exp10(float8 x);
-float16 __ovld __cnfn half_exp10(float16 x);
+float __ovld __cnfn half_exp10(float);
+float2 __ovld __cnfn half_exp10(float2);
+float3 __ovld __cnfn half_exp10(float3);
+float4 __ovld __cnfn half_exp10(float4);
+float8 __ovld __cnfn half_exp10(float8);
+float16 __ovld __cnfn half_exp10(float16);
/**
* Compute natural logarithm.
*/
-float __ovld __cnfn half_log(float x);
-float2 __ovld __cnfn half_log(float2 x);
-float3 __ovld __cnfn half_log(float3 x);
-float4 __ovld __cnfn half_log(float4 x);
-float8 __ovld __cnfn half_log(float8 x);
-float16 __ovld __cnfn half_log(float16 x);
+float __ovld __cnfn half_log(float);
+float2 __ovld __cnfn half_log(float2);
+float3 __ovld __cnfn half_log(float3);
+float4 __ovld __cnfn half_log(float4);
+float8 __ovld __cnfn half_log(float8);
+float16 __ovld __cnfn half_log(float16);
/**
* Compute a base 2 logarithm.
*/
-float __ovld __cnfn half_log2(float x);
-float2 __ovld __cnfn half_log2(float2 x);
-float3 __ovld __cnfn half_log2(float3 x);
-float4 __ovld __cnfn half_log2(float4 x);
-float8 __ovld __cnfn half_log2(float8 x);
-float16 __ovld __cnfn half_log2(float16 x);
+float __ovld __cnfn half_log2(float);
+float2 __ovld __cnfn half_log2(float2);
+float3 __ovld __cnfn half_log2(float3);
+float4 __ovld __cnfn half_log2(float4);
+float8 __ovld __cnfn half_log2(float8);
+float16 __ovld __cnfn half_log2(float16);
/**
* Compute a base 10 logarithm.
*/
-float __ovld __cnfn half_log10(float x);
-float2 __ovld __cnfn half_log10(float2 x);
-float3 __ovld __cnfn half_log10(float3 x);
-float4 __ovld __cnfn half_log10(float4 x);
-float8 __ovld __cnfn half_log10(float8 x);
-float16 __ovld __cnfn half_log10(float16 x);
+float __ovld __cnfn half_log10(float);
+float2 __ovld __cnfn half_log10(float2);
+float3 __ovld __cnfn half_log10(float3);
+float4 __ovld __cnfn half_log10(float4);
+float8 __ovld __cnfn half_log10(float8);
+float16 __ovld __cnfn half_log10(float16);
/**
* Compute x to the power y, where x is >= 0.
*/
-float __ovld __cnfn half_powr(float x, float y);
-float2 __ovld __cnfn half_powr(float2 x, float2 y);
-float3 __ovld __cnfn half_powr(float3 x, float3 y);
-float4 __ovld __cnfn half_powr(float4 x, float4 y);
-float8 __ovld __cnfn half_powr(float8 x, float8 y);
-float16 __ovld __cnfn half_powr(float16 x, float16 y);
+float __ovld __cnfn half_powr(float, float);
+float2 __ovld __cnfn half_powr(float2, float2);
+float3 __ovld __cnfn half_powr(float3, float3);
+float4 __ovld __cnfn half_powr(float4, float4);
+float8 __ovld __cnfn half_powr(float8, float8);
+float16 __ovld __cnfn half_powr(float16, float16);
/**
* Compute reciprocal.
*/
-float __ovld __cnfn half_recip(float x);
-float2 __ovld __cnfn half_recip(float2 x);
-float3 __ovld __cnfn half_recip(float3 x);
-float4 __ovld __cnfn half_recip(float4 x);
-float8 __ovld __cnfn half_recip(float8 x);
-float16 __ovld __cnfn half_recip(float16 x);
+float __ovld __cnfn half_recip(float);
+float2 __ovld __cnfn half_recip(float2);
+float3 __ovld __cnfn half_recip(float3);
+float4 __ovld __cnfn half_recip(float4);
+float8 __ovld __cnfn half_recip(float8);
+float16 __ovld __cnfn half_recip(float16);
/**
* Compute inverse square root.
*/
-float __ovld __cnfn half_rsqrt(float x);
-float2 __ovld __cnfn half_rsqrt(float2 x);
-float3 __ovld __cnfn half_rsqrt(float3 x);
-float4 __ovld __cnfn half_rsqrt(float4 x);
-float8 __ovld __cnfn half_rsqrt(float8 x);
-float16 __ovld __cnfn half_rsqrt(float16 x);
+float __ovld __cnfn half_rsqrt(float);
+float2 __ovld __cnfn half_rsqrt(float2);
+float3 __ovld __cnfn half_rsqrt(float3);
+float4 __ovld __cnfn half_rsqrt(float4);
+float8 __ovld __cnfn half_rsqrt(float8);
+float16 __ovld __cnfn half_rsqrt(float16);
/**
* Compute sine. x must be in the range -2^16 ... +2^16.
*/
-float __ovld __cnfn half_sin(float x);
-float2 __ovld __cnfn half_sin(float2 x);
-float3 __ovld __cnfn half_sin(float3 x);
-float4 __ovld __cnfn half_sin(float4 x);
-float8 __ovld __cnfn half_sin(float8 x);
-float16 __ovld __cnfn half_sin(float16 x);
+float __ovld __cnfn half_sin(float);
+float2 __ovld __cnfn half_sin(float2);
+float3 __ovld __cnfn half_sin(float3);
+float4 __ovld __cnfn half_sin(float4);
+float8 __ovld __cnfn half_sin(float8);
+float16 __ovld __cnfn half_sin(float16);
/**
* Compute square root.
*/
-float __ovld __cnfn half_sqrt(float x);
-float2 __ovld __cnfn half_sqrt(float2 x);
-float3 __ovld __cnfn half_sqrt(float3 x);
-float4 __ovld __cnfn half_sqrt(float4 x);
-float8 __ovld __cnfn half_sqrt(float8 x);
-float16 __ovld __cnfn half_sqrt(float16 x);
+float __ovld __cnfn half_sqrt(float);
+float2 __ovld __cnfn half_sqrt(float2);
+float3 __ovld __cnfn half_sqrt(float3);
+float4 __ovld __cnfn half_sqrt(float4);
+float8 __ovld __cnfn half_sqrt(float8);
+float16 __ovld __cnfn half_sqrt(float16);
/**
* Compute tangent. x must be in the range -216 ... +216.
*/
-float __ovld __cnfn half_tan(float x);
-float2 __ovld __cnfn half_tan(float2 x);
-float3 __ovld __cnfn half_tan(float3 x);
-float4 __ovld __cnfn half_tan(float4 x);
-float8 __ovld __cnfn half_tan(float8 x);
-float16 __ovld __cnfn half_tan(float16 x);
+float __ovld __cnfn half_tan(float);
+float2 __ovld __cnfn half_tan(float2);
+float3 __ovld __cnfn half_tan(float3);
+float4 __ovld __cnfn half_tan(float4);
+float8 __ovld __cnfn half_tan(float8);
+float16 __ovld __cnfn half_tan(float16);
/**
* Compute cosine over an implementation-defined range.
* The maximum error is implementation-defined.
*/
-float __ovld __cnfn native_cos(float x);
-float2 __ovld __cnfn native_cos(float2 x);
-float3 __ovld __cnfn native_cos(float3 x);
-float4 __ovld __cnfn native_cos(float4 x);
-float8 __ovld __cnfn native_cos(float8 x);
-float16 __ovld __cnfn native_cos(float16 x);
+float __ovld __cnfn native_cos(float);
+float2 __ovld __cnfn native_cos(float2);
+float3 __ovld __cnfn native_cos(float3);
+float4 __ovld __cnfn native_cos(float4);
+float8 __ovld __cnfn native_cos(float8);
+float16 __ovld __cnfn native_cos(float16);
/**
* Compute x / y over an implementation-defined range.
* The maximum error is implementation-defined.
*/
-float __ovld __cnfn native_divide(float x, float y);
-float2 __ovld __cnfn native_divide(float2 x, float2 y);
-float3 __ovld __cnfn native_divide(float3 x, float3 y);
-float4 __ovld __cnfn native_divide(float4 x, float4 y);
-float8 __ovld __cnfn native_divide(float8 x, float8 y);
-float16 __ovld __cnfn native_divide(float16 x, float16 y);
+float __ovld __cnfn native_divide(float, float);
+float2 __ovld __cnfn native_divide(float2, float2);
+float3 __ovld __cnfn native_divide(float3, float3);
+float4 __ovld __cnfn native_divide(float4, float4);
+float8 __ovld __cnfn native_divide(float8, float8);
+float16 __ovld __cnfn native_divide(float16, float16);
/**
* Compute the base- e exponential of x over an
* implementation-defined range. The maximum error is
* implementation-defined.
*/
-float __ovld __cnfn native_exp(float x);
-float2 __ovld __cnfn native_exp(float2 x);
-float3 __ovld __cnfn native_exp(float3 x);
-float4 __ovld __cnfn native_exp(float4 x);
-float8 __ovld __cnfn native_exp(float8 x);
-float16 __ovld __cnfn native_exp(float16 x);
+float __ovld __cnfn native_exp(float);
+float2 __ovld __cnfn native_exp(float2);
+float3 __ovld __cnfn native_exp(float3);
+float4 __ovld __cnfn native_exp(float4);
+float8 __ovld __cnfn native_exp(float8);
+float16 __ovld __cnfn native_exp(float16);
/**
* Compute the base- 2 exponential of x over an
* implementation-defined range. The maximum error is
* implementation-defined.
*/
-float __ovld __cnfn native_exp2(float x);
-float2 __ovld __cnfn native_exp2(float2 x);
-float3 __ovld __cnfn native_exp2(float3 x);
-float4 __ovld __cnfn native_exp2(float4 x);
-float8 __ovld __cnfn native_exp2(float8 x);
-float16 __ovld __cnfn native_exp2(float16 x);
+float __ovld __cnfn native_exp2(float);
+float2 __ovld __cnfn native_exp2(float2);
+float3 __ovld __cnfn native_exp2(float3);
+float4 __ovld __cnfn native_exp2(float4);
+float8 __ovld __cnfn native_exp2(float8);
+float16 __ovld __cnfn native_exp2(float16);
/**
* Compute the base- 10 exponential of x over an
* implementation-defined range. The maximum error is
* implementation-defined.
*/
-float __ovld __cnfn native_exp10(float x);
-float2 __ovld __cnfn native_exp10(float2 x);
-float3 __ovld __cnfn native_exp10(float3 x);
-float4 __ovld __cnfn native_exp10(float4 x);
-float8 __ovld __cnfn native_exp10(float8 x);
-float16 __ovld __cnfn native_exp10(float16 x);
+float __ovld __cnfn native_exp10(float);
+float2 __ovld __cnfn native_exp10(float2);
+float3 __ovld __cnfn native_exp10(float3);
+float4 __ovld __cnfn native_exp10(float4);
+float8 __ovld __cnfn native_exp10(float8);
+float16 __ovld __cnfn native_exp10(float16);
/**
* Compute natural logarithm over an implementationdefined
* range. The maximum error is implementation
* defined.
*/
-float __ovld __cnfn native_log(float x);
-float2 __ovld __cnfn native_log(float2 x);
-float3 __ovld __cnfn native_log(float3 x);
-float4 __ovld __cnfn native_log(float4 x);
-float8 __ovld __cnfn native_log(float8 x);
-float16 __ovld __cnfn native_log(float16 x);
+float __ovld __cnfn native_log(float);
+float2 __ovld __cnfn native_log(float2);
+float3 __ovld __cnfn native_log(float3);
+float4 __ovld __cnfn native_log(float4);
+float8 __ovld __cnfn native_log(float8);
+float16 __ovld __cnfn native_log(float16);
/**
* Compute a base 2 logarithm over an implementationdefined
* range. The maximum error is implementationdefined.
*/
-float __ovld __cnfn native_log2(float x);
-float2 __ovld __cnfn native_log2(float2 x);
-float3 __ovld __cnfn native_log2(float3 x);
-float4 __ovld __cnfn native_log2(float4 x);
-float8 __ovld __cnfn native_log2(float8 x);
-float16 __ovld __cnfn native_log2(float16 x);
+float __ovld __cnfn native_log2(float);
+float2 __ovld __cnfn native_log2(float2);
+float3 __ovld __cnfn native_log2(float3);
+float4 __ovld __cnfn native_log2(float4);
+float8 __ovld __cnfn native_log2(float8);
+float16 __ovld __cnfn native_log2(float16);
/**
* Compute a base 10 logarithm over an implementationdefined
* range. The maximum error is implementationdefined.
*/
-float __ovld __cnfn native_log10(float x);
-float2 __ovld __cnfn native_log10(float2 x);
-float3 __ovld __cnfn native_log10(float3 x);
-float4 __ovld __cnfn native_log10(float4 x);
-float8 __ovld __cnfn native_log10(float8 x);
-float16 __ovld __cnfn native_log10(float16 x);
+float __ovld __cnfn native_log10(float);
+float2 __ovld __cnfn native_log10(float2);
+float3 __ovld __cnfn native_log10(float3);
+float4 __ovld __cnfn native_log10(float4);
+float8 __ovld __cnfn native_log10(float8);
+float16 __ovld __cnfn native_log10(float16);
/**
* Compute x to the power y, where x is >= 0. The range of
* x and y are implementation-defined. The maximum error
* is implementation-defined.
*/
-float __ovld __cnfn native_powr(float x, float y);
-float2 __ovld __cnfn native_powr(float2 x, float2 y);
-float3 __ovld __cnfn native_powr(float3 x, float3 y);
-float4 __ovld __cnfn native_powr(float4 x, float4 y);
-float8 __ovld __cnfn native_powr(float8 x, float8 y);
-float16 __ovld __cnfn native_powr(float16 x, float16 y);
+float __ovld __cnfn native_powr(float, float);
+float2 __ovld __cnfn native_powr(float2, float2);
+float3 __ovld __cnfn native_powr(float3, float3);
+float4 __ovld __cnfn native_powr(float4, float4);
+float8 __ovld __cnfn native_powr(float8, float8);
+float16 __ovld __cnfn native_powr(float16, float16);
/**
* Compute reciprocal over an implementation-defined
* range. The maximum error is implementation-defined.
*/
-float __ovld __cnfn native_recip(float x);
-float2 __ovld __cnfn native_recip(float2 x);
-float3 __ovld __cnfn native_recip(float3 x);
-float4 __ovld __cnfn native_recip(float4 x);
-float8 __ovld __cnfn native_recip(float8 x);
-float16 __ovld __cnfn native_recip(float16 x);
+float __ovld __cnfn native_recip(float);
+float2 __ovld __cnfn native_recip(float2);
+float3 __ovld __cnfn native_recip(float3);
+float4 __ovld __cnfn native_recip(float4);
+float8 __ovld __cnfn native_recip(float8);
+float16 __ovld __cnfn native_recip(float16);
/**
* Compute inverse square root over an implementationdefined
* range. The maximum error is implementationdefined.
*/
-float __ovld __cnfn native_rsqrt(float x);
-float2 __ovld __cnfn native_rsqrt(float2 x);
-float3 __ovld __cnfn native_rsqrt(float3 x);
-float4 __ovld __cnfn native_rsqrt(float4 x);
-float8 __ovld __cnfn native_rsqrt(float8 x);
-float16 __ovld __cnfn native_rsqrt(float16 x);
+float __ovld __cnfn native_rsqrt(float);
+float2 __ovld __cnfn native_rsqrt(float2);
+float3 __ovld __cnfn native_rsqrt(float3);
+float4 __ovld __cnfn native_rsqrt(float4);
+float8 __ovld __cnfn native_rsqrt(float8);
+float16 __ovld __cnfn native_rsqrt(float16);
/**
* Compute sine over an implementation-defined range.
* The maximum error is implementation-defined.
*/
-float __ovld __cnfn native_sin(float x);
-float2 __ovld __cnfn native_sin(float2 x);
-float3 __ovld __cnfn native_sin(float3 x);
-float4 __ovld __cnfn native_sin(float4 x);
-float8 __ovld __cnfn native_sin(float8 x);
-float16 __ovld __cnfn native_sin(float16 x);
+float __ovld __cnfn native_sin(float);
+float2 __ovld __cnfn native_sin(float2);
+float3 __ovld __cnfn native_sin(float3);
+float4 __ovld __cnfn native_sin(float4);
+float8 __ovld __cnfn native_sin(float8);
+float16 __ovld __cnfn native_sin(float16);
/**
* Compute square root over an implementation-defined
* range. The maximum error is implementation-defined.
*/
-float __ovld __cnfn native_sqrt(float x);
-float2 __ovld __cnfn native_sqrt(float2 x);
-float3 __ovld __cnfn native_sqrt(float3 x);
-float4 __ovld __cnfn native_sqrt(float4 x);
-float8 __ovld __cnfn native_sqrt(float8 x);
-float16 __ovld __cnfn native_sqrt(float16 x);
+float __ovld __cnfn native_sqrt(float);
+float2 __ovld __cnfn native_sqrt(float2);
+float3 __ovld __cnfn native_sqrt(float3);
+float4 __ovld __cnfn native_sqrt(float4);
+float8 __ovld __cnfn native_sqrt(float8);
+float16 __ovld __cnfn native_sqrt(float16);
/**
* Compute tangent over an implementation-defined range.
* The maximum error is implementation-defined.
*/
-float __ovld __cnfn native_tan(float x);
-float2 __ovld __cnfn native_tan(float2 x);
-float3 __ovld __cnfn native_tan(float3 x);
-float4 __ovld __cnfn native_tan(float4 x);
-float8 __ovld __cnfn native_tan(float8 x);
-float16 __ovld __cnfn native_tan(float16 x);
+float __ovld __cnfn native_tan(float);
+float2 __ovld __cnfn native_tan(float2);
+float3 __ovld __cnfn native_tan(float3);
+float4 __ovld __cnfn native_tan(float4);
+float8 __ovld __cnfn native_tan(float8);
+float16 __ovld __cnfn native_tan(float16);
// OpenCL v1.1 s6.11.3, v1.2 s6.12.3, v2.0 s6.13.3 - Integer Functions
/**
* Returns | x |.
*/
-uchar __ovld __cnfn abs(char x);
-uchar __ovld __cnfn abs(uchar x);
-uchar2 __ovld __cnfn abs(char2 x);
-uchar2 __ovld __cnfn abs(uchar2 x);
-uchar3 __ovld __cnfn abs(char3 x);
-uchar3 __ovld __cnfn abs(uchar3 x);
-uchar4 __ovld __cnfn abs(char4 x);
-uchar4 __ovld __cnfn abs(uchar4 x);
-uchar8 __ovld __cnfn abs(char8 x);
-uchar8 __ovld __cnfn abs(uchar8 x);
-uchar16 __ovld __cnfn abs(char16 x);
-uchar16 __ovld __cnfn abs(uchar16 x);
-ushort __ovld __cnfn abs(short x);
-ushort __ovld __cnfn abs(ushort x);
-ushort2 __ovld __cnfn abs(short2 x);
-ushort2 __ovld __cnfn abs(ushort2 x);
-ushort3 __ovld __cnfn abs(short3 x);
-ushort3 __ovld __cnfn abs(ushort3 x);
-ushort4 __ovld __cnfn abs(short4 x);
-ushort4 __ovld __cnfn abs(ushort4 x);
-ushort8 __ovld __cnfn abs(short8 x);
-ushort8 __ovld __cnfn abs(ushort8 x);
-ushort16 __ovld __cnfn abs(short16 x);
-ushort16 __ovld __cnfn abs(ushort16 x);
-uint __ovld __cnfn abs(int x);
-uint __ovld __cnfn abs(uint x);
-uint2 __ovld __cnfn abs(int2 x);
-uint2 __ovld __cnfn abs(uint2 x);
-uint3 __ovld __cnfn abs(int3 x);
-uint3 __ovld __cnfn abs(uint3 x);
-uint4 __ovld __cnfn abs(int4 x);
-uint4 __ovld __cnfn abs(uint4 x);
-uint8 __ovld __cnfn abs(int8 x);
-uint8 __ovld __cnfn abs(uint8 x);
-uint16 __ovld __cnfn abs(int16 x);
-uint16 __ovld __cnfn abs(uint16 x);
-ulong __ovld __cnfn abs(long x);
-ulong __ovld __cnfn abs(ulong x);
-ulong2 __ovld __cnfn abs(long2 x);
-ulong2 __ovld __cnfn abs(ulong2 x);
-ulong3 __ovld __cnfn abs(long3 x);
-ulong3 __ovld __cnfn abs(ulong3 x);
-ulong4 __ovld __cnfn abs(long4 x);
-ulong4 __ovld __cnfn abs(ulong4 x);
-ulong8 __ovld __cnfn abs(long8 x);
-ulong8 __ovld __cnfn abs(ulong8 x);
-ulong16 __ovld __cnfn abs(long16 x);
-ulong16 __ovld __cnfn abs(ulong16 x);
+uchar __ovld __cnfn abs(char);
+uchar __ovld __cnfn abs(uchar);
+uchar2 __ovld __cnfn abs(char2);
+uchar2 __ovld __cnfn abs(uchar2);
+uchar3 __ovld __cnfn abs(char3);
+uchar3 __ovld __cnfn abs(uchar3);
+uchar4 __ovld __cnfn abs(char4);
+uchar4 __ovld __cnfn abs(uchar4);
+uchar8 __ovld __cnfn abs(char8);
+uchar8 __ovld __cnfn abs(uchar8);
+uchar16 __ovld __cnfn abs(char16);
+uchar16 __ovld __cnfn abs(uchar16);
+ushort __ovld __cnfn abs(short);
+ushort __ovld __cnfn abs(ushort);
+ushort2 __ovld __cnfn abs(short2);
+ushort2 __ovld __cnfn abs(ushort2);
+ushort3 __ovld __cnfn abs(short3);
+ushort3 __ovld __cnfn abs(ushort3);
+ushort4 __ovld __cnfn abs(short4);
+ushort4 __ovld __cnfn abs(ushort4);
+ushort8 __ovld __cnfn abs(short8);
+ushort8 __ovld __cnfn abs(ushort8);
+ushort16 __ovld __cnfn abs(short16);
+ushort16 __ovld __cnfn abs(ushort16);
+uint __ovld __cnfn abs(int);
+uint __ovld __cnfn abs(uint);
+uint2 __ovld __cnfn abs(int2);
+uint2 __ovld __cnfn abs(uint2);
+uint3 __ovld __cnfn abs(int3);
+uint3 __ovld __cnfn abs(uint3);
+uint4 __ovld __cnfn abs(int4);
+uint4 __ovld __cnfn abs(uint4);
+uint8 __ovld __cnfn abs(int8);
+uint8 __ovld __cnfn abs(uint8);
+uint16 __ovld __cnfn abs(int16);
+uint16 __ovld __cnfn abs(uint16);
+ulong __ovld __cnfn abs(long);
+ulong __ovld __cnfn abs(ulong);
+ulong2 __ovld __cnfn abs(long2);
+ulong2 __ovld __cnfn abs(ulong2);
+ulong3 __ovld __cnfn abs(long3);
+ulong3 __ovld __cnfn abs(ulong3);
+ulong4 __ovld __cnfn abs(long4);
+ulong4 __ovld __cnfn abs(ulong4);
+ulong8 __ovld __cnfn abs(long8);
+ulong8 __ovld __cnfn abs(ulong8);
+ulong16 __ovld __cnfn abs(long16);
+ulong16 __ovld __cnfn abs(ulong16);
/**
* Returns | x - y | without modulo overflow.
*/
-uchar __ovld __cnfn abs_diff(char x, char y);
-uchar __ovld __cnfn abs_diff(uchar x, uchar y);
-uchar2 __ovld __cnfn abs_diff(char2 x, char2 y);
-uchar2 __ovld __cnfn abs_diff(uchar2 x, uchar2 y);
-uchar3 __ovld __cnfn abs_diff(char3 x, char3 y);
-uchar3 __ovld __cnfn abs_diff(uchar3 x, uchar3 y);
-uchar4 __ovld __cnfn abs_diff(char4 x, char4 y);
-uchar4 __ovld __cnfn abs_diff(uchar4 x, uchar4 y);
-uchar8 __ovld __cnfn abs_diff(char8 x, char8 y);
-uchar8 __ovld __cnfn abs_diff(uchar8 x, uchar8 y);
-uchar16 __ovld __cnfn abs_diff(char16 x, char16 y);
-uchar16 __ovld __cnfn abs_diff(uchar16 x, uchar16 y);
-ushort __ovld __cnfn abs_diff(short x, short y);
-ushort __ovld __cnfn abs_diff(ushort x, ushort y);
-ushort2 __ovld __cnfn abs_diff(short2 x, short2 y);
-ushort2 __ovld __cnfn abs_diff(ushort2 x, ushort2 y);
-ushort3 __ovld __cnfn abs_diff(short3 x, short3 y);
-ushort3 __ovld __cnfn abs_diff(ushort3 x, ushort3 y);
-ushort4 __ovld __cnfn abs_diff(short4 x, short4 y);
-ushort4 __ovld __cnfn abs_diff(ushort4 x, ushort4 y);
-ushort8 __ovld __cnfn abs_diff(short8 x, short8 y);
-ushort8 __ovld __cnfn abs_diff(ushort8 x, ushort8 y);
-ushort16 __ovld __cnfn abs_diff(short16 x, short16 y);
-ushort16 __ovld __cnfn abs_diff(ushort16 x, ushort16 y);
-uint __ovld __cnfn abs_diff(int x, int y);
-uint __ovld __cnfn abs_diff(uint x, uint y);
-uint2 __ovld __cnfn abs_diff(int2 x, int2 y);
-uint2 __ovld __cnfn abs_diff(uint2 x, uint2 y);
-uint3 __ovld __cnfn abs_diff(int3 x, int3 y);
-uint3 __ovld __cnfn abs_diff(uint3 x, uint3 y);
-uint4 __ovld __cnfn abs_diff(int4 x, int4 y);
-uint4 __ovld __cnfn abs_diff(uint4 x, uint4 y);
-uint8 __ovld __cnfn abs_diff(int8 x, int8 y);
-uint8 __ovld __cnfn abs_diff(uint8 x, uint8 y);
-uint16 __ovld __cnfn abs_diff(int16 x, int16 y);
-uint16 __ovld __cnfn abs_diff(uint16 x, uint16 y);
-ulong __ovld __cnfn abs_diff(long x, long y);
-ulong __ovld __cnfn abs_diff(ulong x, ulong y);
-ulong2 __ovld __cnfn abs_diff(long2 x, long2 y);
-ulong2 __ovld __cnfn abs_diff(ulong2 x, ulong2 y);
-ulong3 __ovld __cnfn abs_diff(long3 x, long3 y);
-ulong3 __ovld __cnfn abs_diff(ulong3 x, ulong3 y);
-ulong4 __ovld __cnfn abs_diff(long4 x, long4 y);
-ulong4 __ovld __cnfn abs_diff(ulong4 x, ulong4 y);
-ulong8 __ovld __cnfn abs_diff(long8 x, long8 y);
-ulong8 __ovld __cnfn abs_diff(ulong8 x, ulong8 y);
-ulong16 __ovld __cnfn abs_diff(long16 x, long16 y);
-ulong16 __ovld __cnfn abs_diff(ulong16 x, ulong16 y);
+uchar __ovld __cnfn abs_diff(char, char);
+uchar __ovld __cnfn abs_diff(uchar, uchar);
+uchar2 __ovld __cnfn abs_diff(char2, char2);
+uchar2 __ovld __cnfn abs_diff(uchar2, uchar2);
+uchar3 __ovld __cnfn abs_diff(char3, char3);
+uchar3 __ovld __cnfn abs_diff(uchar3, uchar3);
+uchar4 __ovld __cnfn abs_diff(char4, char4);
+uchar4 __ovld __cnfn abs_diff(uchar4, uchar4);
+uchar8 __ovld __cnfn abs_diff(char8, char8);
+uchar8 __ovld __cnfn abs_diff(uchar8, uchar8);
+uchar16 __ovld __cnfn abs_diff(char16, char16);
+uchar16 __ovld __cnfn abs_diff(uchar16, uchar16);
+ushort __ovld __cnfn abs_diff(short, short);
+ushort __ovld __cnfn abs_diff(ushort, ushort);
+ushort2 __ovld __cnfn abs_diff(short2, short2);
+ushort2 __ovld __cnfn abs_diff(ushort2, ushort2);
+ushort3 __ovld __cnfn abs_diff(short3, short3);
+ushort3 __ovld __cnfn abs_diff(ushort3, ushort3);
+ushort4 __ovld __cnfn abs_diff(short4, short4);
+ushort4 __ovld __cnfn abs_diff(ushort4, ushort4);
+ushort8 __ovld __cnfn abs_diff(short8, short8);
+ushort8 __ovld __cnfn abs_diff(ushort8, ushort8);
+ushort16 __ovld __cnfn abs_diff(short16, short16);
+ushort16 __ovld __cnfn abs_diff(ushort16, ushort16);
+uint __ovld __cnfn abs_diff(int, int);
+uint __ovld __cnfn abs_diff(uint, uint);
+uint2 __ovld __cnfn abs_diff(int2, int2);
+uint2 __ovld __cnfn abs_diff(uint2, uint2);
+uint3 __ovld __cnfn abs_diff(int3, int3);
+uint3 __ovld __cnfn abs_diff(uint3, uint3);
+uint4 __ovld __cnfn abs_diff(int4, int4);
+uint4 __ovld __cnfn abs_diff(uint4, uint4);
+uint8 __ovld __cnfn abs_diff(int8, int8);
+uint8 __ovld __cnfn abs_diff(uint8, uint8);
+uint16 __ovld __cnfn abs_diff(int16, int16);
+uint16 __ovld __cnfn abs_diff(uint16, uint16);
+ulong __ovld __cnfn abs_diff(long, long);
+ulong __ovld __cnfn abs_diff(ulong, ulong);
+ulong2 __ovld __cnfn abs_diff(long2, long2);
+ulong2 __ovld __cnfn abs_diff(ulong2, ulong2);
+ulong3 __ovld __cnfn abs_diff(long3, long3);
+ulong3 __ovld __cnfn abs_diff(ulong3, ulong3);
+ulong4 __ovld __cnfn abs_diff(long4, long4);
+ulong4 __ovld __cnfn abs_diff(ulong4, ulong4);
+ulong8 __ovld __cnfn abs_diff(long8, long8);
+ulong8 __ovld __cnfn abs_diff(ulong8, ulong8);
+ulong16 __ovld __cnfn abs_diff(long16, long16);
+ulong16 __ovld __cnfn abs_diff(ulong16, ulong16);
/**
* Returns x + y and saturates the result.
*/
-char __ovld __cnfn add_sat(char x, char y);
-uchar __ovld __cnfn add_sat(uchar x, uchar y);
-char2 __ovld __cnfn add_sat(char2 x, char2 y);
-uchar2 __ovld __cnfn add_sat(uchar2 x, uchar2 y);
-char3 __ovld __cnfn add_sat(char3 x, char3 y);
-uchar3 __ovld __cnfn add_sat(uchar3 x, uchar3 y);
-char4 __ovld __cnfn add_sat(char4 x, char4 y);
-uchar4 __ovld __cnfn add_sat(uchar4 x, uchar4 y);
-char8 __ovld __cnfn add_sat(char8 x, char8 y);
-uchar8 __ovld __cnfn add_sat(uchar8 x, uchar8 y);
-char16 __ovld __cnfn add_sat(char16 x, char16 y);
-uchar16 __ovld __cnfn add_sat(uchar16 x, uchar16 y);
-short __ovld __cnfn add_sat(short x, short y);
-ushort __ovld __cnfn add_sat(ushort x, ushort y);
-short2 __ovld __cnfn add_sat(short2 x, short2 y);
-ushort2 __ovld __cnfn add_sat(ushort2 x, ushort2 y);
-short3 __ovld __cnfn add_sat(short3 x, short3 y);
-ushort3 __ovld __cnfn add_sat(ushort3 x, ushort3 y);
-short4 __ovld __cnfn add_sat(short4 x, short4 y);
-ushort4 __ovld __cnfn add_sat(ushort4 x, ushort4 y);
-short8 __ovld __cnfn add_sat(short8 x, short8 y);
-ushort8 __ovld __cnfn add_sat(ushort8 x, ushort8 y);
-short16 __ovld __cnfn add_sat(short16 x, short16 y);
-ushort16 __ovld __cnfn add_sat(ushort16 x, ushort16 y);
-int __ovld __cnfn add_sat(int x, int y);
-uint __ovld __cnfn add_sat(uint x, uint y);
-int2 __ovld __cnfn add_sat(int2 x, int2 y);
-uint2 __ovld __cnfn add_sat(uint2 x, uint2 y);
-int3 __ovld __cnfn add_sat(int3 x, int3 y);
-uint3 __ovld __cnfn add_sat(uint3 x, uint3 y);
-int4 __ovld __cnfn add_sat(int4 x, int4 y);
-uint4 __ovld __cnfn add_sat(uint4 x, uint4 y);
-int8 __ovld __cnfn add_sat(int8 x, int8 y);
-uint8 __ovld __cnfn add_sat(uint8 x, uint8 y);
-int16 __ovld __cnfn add_sat(int16 x, int16 y);
-uint16 __ovld __cnfn add_sat(uint16 x, uint16 y);
-long __ovld __cnfn add_sat(long x, long y);
-ulong __ovld __cnfn add_sat(ulong x, ulong y);
-long2 __ovld __cnfn add_sat(long2 x, long2 y);
-ulong2 __ovld __cnfn add_sat(ulong2 x, ulong2 y);
-long3 __ovld __cnfn add_sat(long3 x, long3 y);
-ulong3 __ovld __cnfn add_sat(ulong3 x, ulong3 y);
-long4 __ovld __cnfn add_sat(long4 x, long4 y);
-ulong4 __ovld __cnfn add_sat(ulong4 x, ulong4 y);
-long8 __ovld __cnfn add_sat(long8 x, long8 y);
-ulong8 __ovld __cnfn add_sat(ulong8 x, ulong8 y);
-long16 __ovld __cnfn add_sat(long16 x, long16 y);
-ulong16 __ovld __cnfn add_sat(ulong16 x, ulong16 y);
+char __ovld __cnfn add_sat(char, char);
+uchar __ovld __cnfn add_sat(uchar, uchar);
+char2 __ovld __cnfn add_sat(char2, char2);
+uchar2 __ovld __cnfn add_sat(uchar2, uchar2);
+char3 __ovld __cnfn add_sat(char3, char3);
+uchar3 __ovld __cnfn add_sat(uchar3, uchar3);
+char4 __ovld __cnfn add_sat(char4, char4);
+uchar4 __ovld __cnfn add_sat(uchar4, uchar4);
+char8 __ovld __cnfn add_sat(char8, char8);
+uchar8 __ovld __cnfn add_sat(uchar8, uchar8);
+char16 __ovld __cnfn add_sat(char16, char16);
+uchar16 __ovld __cnfn add_sat(uchar16, uchar16);
+short __ovld __cnfn add_sat(short, short);
+ushort __ovld __cnfn add_sat(ushort, ushort);
+short2 __ovld __cnfn add_sat(short2, short2);
+ushort2 __ovld __cnfn add_sat(ushort2, ushort2);
+short3 __ovld __cnfn add_sat(short3, short3);
+ushort3 __ovld __cnfn add_sat(ushort3, ushort3);
+short4 __ovld __cnfn add_sat(short4, short4);
+ushort4 __ovld __cnfn add_sat(ushort4, ushort4);
+short8 __ovld __cnfn add_sat(short8, short8);
+ushort8 __ovld __cnfn add_sat(ushort8, ushort8);
+short16 __ovld __cnfn add_sat(short16, short16);
+ushort16 __ovld __cnfn add_sat(ushort16, ushort16);
+int __ovld __cnfn add_sat(int, int);
+uint __ovld __cnfn add_sat(uint, uint);
+int2 __ovld __cnfn add_sat(int2, int2);
+uint2 __ovld __cnfn add_sat(uint2, uint2);
+int3 __ovld __cnfn add_sat(int3, int3);
+uint3 __ovld __cnfn add_sat(uint3, uint3);
+int4 __ovld __cnfn add_sat(int4, int4);
+uint4 __ovld __cnfn add_sat(uint4, uint4);
+int8 __ovld __cnfn add_sat(int8, int8);
+uint8 __ovld __cnfn add_sat(uint8, uint8);
+int16 __ovld __cnfn add_sat(int16, int16);
+uint16 __ovld __cnfn add_sat(uint16, uint16);
+long __ovld __cnfn add_sat(long, long);
+ulong __ovld __cnfn add_sat(ulong, ulong);
+long2 __ovld __cnfn add_sat(long2, long2);
+ulong2 __ovld __cnfn add_sat(ulong2, ulong2);
+long3 __ovld __cnfn add_sat(long3, long3);
+ulong3 __ovld __cnfn add_sat(ulong3, ulong3);
+long4 __ovld __cnfn add_sat(long4, long4);
+ulong4 __ovld __cnfn add_sat(ulong4, ulong4);
+long8 __ovld __cnfn add_sat(long8, long8);
+ulong8 __ovld __cnfn add_sat(ulong8, ulong8);
+long16 __ovld __cnfn add_sat(long16, long16);
+ulong16 __ovld __cnfn add_sat(ulong16, ulong16);
/**
* Returns (x + y) >> 1. The intermediate sum does
* not modulo overflow.
*/
-char __ovld __cnfn hadd(char x, char y);
-uchar __ovld __cnfn hadd(uchar x, uchar y);
-char2 __ovld __cnfn hadd(char2 x, char2 y);
-uchar2 __ovld __cnfn hadd(uchar2 x, uchar2 y);
-char3 __ovld __cnfn hadd(char3 x, char3 y);
-uchar3 __ovld __cnfn hadd(uchar3 x, uchar3 y);
-char4 __ovld __cnfn hadd(char4 x, char4 y);
-uchar4 __ovld __cnfn hadd(uchar4 x, uchar4 y);
-char8 __ovld __cnfn hadd(char8 x, char8 y);
-uchar8 __ovld __cnfn hadd(uchar8 x, uchar8 y);
-char16 __ovld __cnfn hadd(char16 x, char16 y);
-uchar16 __ovld __cnfn hadd(uchar16 x, uchar16 y);
-short __ovld __cnfn hadd(short x, short y);
-ushort __ovld __cnfn hadd(ushort x, ushort y);
-short2 __ovld __cnfn hadd(short2 x, short2 y);
-ushort2 __ovld __cnfn hadd(ushort2 x, ushort2 y);
-short3 __ovld __cnfn hadd(short3 x, short3 y);
-ushort3 __ovld __cnfn hadd(ushort3 x, ushort3 y);
-short4 __ovld __cnfn hadd(short4 x, short4 y);
-ushort4 __ovld __cnfn hadd(ushort4 x, ushort4 y);
-short8 __ovld __cnfn hadd(short8 x, short8 y);
-ushort8 __ovld __cnfn hadd(ushort8 x, ushort8 y);
-short16 __ovld __cnfn hadd(short16 x, short16 y);
-ushort16 __ovld __cnfn hadd(ushort16 x, ushort16 y);
-int __ovld __cnfn hadd(int x, int y);
-uint __ovld __cnfn hadd(uint x, uint y);
-int2 __ovld __cnfn hadd(int2 x, int2 y);
-uint2 __ovld __cnfn hadd(uint2 x, uint2 y);
-int3 __ovld __cnfn hadd(int3 x, int3 y);
-uint3 __ovld __cnfn hadd(uint3 x, uint3 y);
-int4 __ovld __cnfn hadd(int4 x, int4 y);
-uint4 __ovld __cnfn hadd(uint4 x, uint4 y);
-int8 __ovld __cnfn hadd(int8 x, int8 y);
-uint8 __ovld __cnfn hadd(uint8 x, uint8 y);
-int16 __ovld __cnfn hadd(int16 x, int16 y);
-uint16 __ovld __cnfn hadd(uint16 x, uint16 y);
-long __ovld __cnfn hadd(long x, long y);
-ulong __ovld __cnfn hadd(ulong x, ulong y);
-long2 __ovld __cnfn hadd(long2 x, long2 y);
-ulong2 __ovld __cnfn hadd(ulong2 x, ulong2 y);
-long3 __ovld __cnfn hadd(long3 x, long3 y);
-ulong3 __ovld __cnfn hadd(ulong3 x, ulong3 y);
-long4 __ovld __cnfn hadd(long4 x, long4 y);
-ulong4 __ovld __cnfn hadd(ulong4 x, ulong4 y);
-long8 __ovld __cnfn hadd(long8 x, long8 y);
-ulong8 __ovld __cnfn hadd(ulong8 x, ulong8 y);
-long16 __ovld __cnfn hadd(long16 x, long16 y);
-ulong16 __ovld __cnfn hadd(ulong16 x, ulong16 y);
+char __ovld __cnfn hadd(char, char);
+uchar __ovld __cnfn hadd(uchar, uchar);
+char2 __ovld __cnfn hadd(char2, char2);
+uchar2 __ovld __cnfn hadd(uchar2, uchar2);
+char3 __ovld __cnfn hadd(char3, char3);
+uchar3 __ovld __cnfn hadd(uchar3, uchar3);
+char4 __ovld __cnfn hadd(char4, char4);
+uchar4 __ovld __cnfn hadd(uchar4, uchar4);
+char8 __ovld __cnfn hadd(char8, char8);
+uchar8 __ovld __cnfn hadd(uchar8, uchar8);
+char16 __ovld __cnfn hadd(char16, char16);
+uchar16 __ovld __cnfn hadd(uchar16, uchar16);
+short __ovld __cnfn hadd(short, short);
+ushort __ovld __cnfn hadd(ushort, ushort);
+short2 __ovld __cnfn hadd(short2, short2);
+ushort2 __ovld __cnfn hadd(ushort2, ushort2);
+short3 __ovld __cnfn hadd(short3, short3);
+ushort3 __ovld __cnfn hadd(ushort3, ushort3);
+short4 __ovld __cnfn hadd(short4, short4);
+ushort4 __ovld __cnfn hadd(ushort4, ushort4);
+short8 __ovld __cnfn hadd(short8, short8);
+ushort8 __ovld __cnfn hadd(ushort8, ushort8);
+short16 __ovld __cnfn hadd(short16, short16);
+ushort16 __ovld __cnfn hadd(ushort16, ushort16);
+int __ovld __cnfn hadd(int, int);
+uint __ovld __cnfn hadd(uint, uint);
+int2 __ovld __cnfn hadd(int2, int2);
+uint2 __ovld __cnfn hadd(uint2, uint2);
+int3 __ovld __cnfn hadd(int3, int3);
+uint3 __ovld __cnfn hadd(uint3, uint3);
+int4 __ovld __cnfn hadd(int4, int4);
+uint4 __ovld __cnfn hadd(uint4, uint4);
+int8 __ovld __cnfn hadd(int8, int8);
+uint8 __ovld __cnfn hadd(uint8, uint8);
+int16 __ovld __cnfn hadd(int16, int16);
+uint16 __ovld __cnfn hadd(uint16, uint16);
+long __ovld __cnfn hadd(long, long);
+ulong __ovld __cnfn hadd(ulong, ulong);
+long2 __ovld __cnfn hadd(long2, long2);
+ulong2 __ovld __cnfn hadd(ulong2, ulong2);
+long3 __ovld __cnfn hadd(long3, long3);
+ulong3 __ovld __cnfn hadd(ulong3, ulong3);
+long4 __ovld __cnfn hadd(long4, long4);
+ulong4 __ovld __cnfn hadd(ulong4, ulong4);
+long8 __ovld __cnfn hadd(long8, long8);
+ulong8 __ovld __cnfn hadd(ulong8, ulong8);
+long16 __ovld __cnfn hadd(long16, long16);
+ulong16 __ovld __cnfn hadd(ulong16, ulong16);
/**
* Returns (x + y + 1) >> 1. The intermediate sum
* does not modulo overflow.
*/
-char __ovld __cnfn rhadd(char x, char y);
-uchar __ovld __cnfn rhadd(uchar x, uchar y);
-char2 __ovld __cnfn rhadd(char2 x, char2 y);
-uchar2 __ovld __cnfn rhadd(uchar2 x, uchar2 y);
-char3 __ovld __cnfn rhadd(char3 x, char3 y);
-uchar3 __ovld __cnfn rhadd(uchar3 x, uchar3 y);
-char4 __ovld __cnfn rhadd(char4 x, char4 y);
-uchar4 __ovld __cnfn rhadd(uchar4 x, uchar4 y);
-char8 __ovld __cnfn rhadd(char8 x, char8 y);
-uchar8 __ovld __cnfn rhadd(uchar8 x, uchar8 y);
-char16 __ovld __cnfn rhadd(char16 x, char16 y);
-uchar16 __ovld __cnfn rhadd(uchar16 x, uchar16 y);
-short __ovld __cnfn rhadd(short x, short y);
-ushort __ovld __cnfn rhadd(ushort x, ushort y);
-short2 __ovld __cnfn rhadd(short2 x, short2 y);
-ushort2 __ovld __cnfn rhadd(ushort2 x, ushort2 y);
-short3 __ovld __cnfn rhadd(short3 x, short3 y);
-ushort3 __ovld __cnfn rhadd(ushort3 x, ushort3 y);
-short4 __ovld __cnfn rhadd(short4 x, short4 y);
-ushort4 __ovld __cnfn rhadd(ushort4 x, ushort4 y);
-short8 __ovld __cnfn rhadd(short8 x, short8 y);
-ushort8 __ovld __cnfn rhadd(ushort8 x, ushort8 y);
-short16 __ovld __cnfn rhadd(short16 x, short16 y);
-ushort16 __ovld __cnfn rhadd(ushort16 x, ushort16 y);
-int __ovld __cnfn rhadd(int x, int y);
-uint __ovld __cnfn rhadd(uint x, uint y);
-int2 __ovld __cnfn rhadd(int2 x, int2 y);
-uint2 __ovld __cnfn rhadd(uint2 x, uint2 y);
-int3 __ovld __cnfn rhadd(int3 x, int3 y);
-uint3 __ovld __cnfn rhadd(uint3 x, uint3 y);
-int4 __ovld __cnfn rhadd(int4 x, int4 y);
-uint4 __ovld __cnfn rhadd(uint4 x, uint4 y);
-int8 __ovld __cnfn rhadd(int8 x, int8 y);
-uint8 __ovld __cnfn rhadd(uint8 x, uint8 y);
-int16 __ovld __cnfn rhadd(int16 x, int16 y);
-uint16 __ovld __cnfn rhadd(uint16 x, uint16 y);
-long __ovld __cnfn rhadd(long x, long y);
-ulong __ovld __cnfn rhadd(ulong x, ulong y);
-long2 __ovld __cnfn rhadd(long2 x, long2 y);
-ulong2 __ovld __cnfn rhadd(ulong2 x, ulong2 y);
-long3 __ovld __cnfn rhadd(long3 x, long3 y);
-ulong3 __ovld __cnfn rhadd(ulong3 x, ulong3 y);
-long4 __ovld __cnfn rhadd(long4 x, long4 y);
-ulong4 __ovld __cnfn rhadd(ulong4 x, ulong4 y);
-long8 __ovld __cnfn rhadd(long8 x, long8 y);
-ulong8 __ovld __cnfn rhadd(ulong8 x, ulong8 y);
-long16 __ovld __cnfn rhadd(long16 x, long16 y);
-ulong16 __ovld __cnfn rhadd(ulong16 x, ulong16 y);
+char __ovld __cnfn rhadd(char, char);
+uchar __ovld __cnfn rhadd(uchar, uchar);
+char2 __ovld __cnfn rhadd(char2, char2);
+uchar2 __ovld __cnfn rhadd(uchar2, uchar2);
+char3 __ovld __cnfn rhadd(char3, char3);
+uchar3 __ovld __cnfn rhadd(uchar3, uchar3);
+char4 __ovld __cnfn rhadd(char4, char4);
+uchar4 __ovld __cnfn rhadd(uchar4, uchar4);
+char8 __ovld __cnfn rhadd(char8, char8);
+uchar8 __ovld __cnfn rhadd(uchar8, uchar8);
+char16 __ovld __cnfn rhadd(char16, char16);
+uchar16 __ovld __cnfn rhadd(uchar16, uchar16);
+short __ovld __cnfn rhadd(short, short);
+ushort __ovld __cnfn rhadd(ushort, ushort);
+short2 __ovld __cnfn rhadd(short2, short2);
+ushort2 __ovld __cnfn rhadd(ushort2, ushort2);
+short3 __ovld __cnfn rhadd(short3, short3);
+ushort3 __ovld __cnfn rhadd(ushort3, ushort3);
+short4 __ovld __cnfn rhadd(short4, short4);
+ushort4 __ovld __cnfn rhadd(ushort4, ushort4);
+short8 __ovld __cnfn rhadd(short8, short8);
+ushort8 __ovld __cnfn rhadd(ushort8, ushort8);
+short16 __ovld __cnfn rhadd(short16, short16);
+ushort16 __ovld __cnfn rhadd(ushort16, ushort16);
+int __ovld __cnfn rhadd(int, int);
+uint __ovld __cnfn rhadd(uint, uint);
+int2 __ovld __cnfn rhadd(int2, int2);
+uint2 __ovld __cnfn rhadd(uint2, uint2);
+int3 __ovld __cnfn rhadd(int3, int3);
+uint3 __ovld __cnfn rhadd(uint3, uint3);
+int4 __ovld __cnfn rhadd(int4, int4);
+uint4 __ovld __cnfn rhadd(uint4, uint4);
+int8 __ovld __cnfn rhadd(int8, int8);
+uint8 __ovld __cnfn rhadd(uint8, uint8);
+int16 __ovld __cnfn rhadd(int16, int16);
+uint16 __ovld __cnfn rhadd(uint16, uint16);
+long __ovld __cnfn rhadd(long, long);
+ulong __ovld __cnfn rhadd(ulong, ulong);
+long2 __ovld __cnfn rhadd(long2, long2);
+ulong2 __ovld __cnfn rhadd(ulong2, ulong2);
+long3 __ovld __cnfn rhadd(long3, long3);
+ulong3 __ovld __cnfn rhadd(ulong3, ulong3);
+long4 __ovld __cnfn rhadd(long4, long4);
+ulong4 __ovld __cnfn rhadd(ulong4, ulong4);
+long8 __ovld __cnfn rhadd(long8, long8);
+ulong8 __ovld __cnfn rhadd(ulong8, ulong8);
+long16 __ovld __cnfn rhadd(long16, long16);
+ulong16 __ovld __cnfn rhadd(ulong16, ulong16);
/**
* Returns min(max(x, minval), maxval).
* Results are undefined if minval > maxval.
*/
-char __ovld __cnfn clamp(char x, char minval, char maxval);
-uchar __ovld __cnfn clamp(uchar x, uchar minval, uchar maxval);
-char2 __ovld __cnfn clamp(char2 x, char2 minval, char2 maxval);
-uchar2 __ovld __cnfn clamp(uchar2 x, uchar2 minval, uchar2 maxval);
-char3 __ovld __cnfn clamp(char3 x, char3 minval, char3 maxval);
-uchar3 __ovld __cnfn clamp(uchar3 x, uchar3 minval, uchar3 maxval);
-char4 __ovld __cnfn clamp(char4 x, char4 minval, char4 maxval);
-uchar4 __ovld __cnfn clamp(uchar4 x, uchar4 minval, uchar4 maxval);
-char8 __ovld __cnfn clamp(char8 x, char8 minval, char8 maxval);
-uchar8 __ovld __cnfn clamp(uchar8 x, uchar8 minval, uchar8 maxval);
-char16 __ovld __cnfn clamp(char16 x, char16 minval, char16 maxval);
-uchar16 __ovld __cnfn clamp(uchar16 x, uchar16 minval, uchar16 maxval);
-short __ovld __cnfn clamp(short x, short minval, short maxval);
-ushort __ovld __cnfn clamp(ushort x, ushort minval, ushort maxval);
-short2 __ovld __cnfn clamp(short2 x, short2 minval, short2 maxval);
-ushort2 __ovld __cnfn clamp(ushort2 x, ushort2 minval, ushort2 maxval);
-short3 __ovld __cnfn clamp(short3 x, short3 minval, short3 maxval);
-ushort3 __ovld __cnfn clamp(ushort3 x, ushort3 minval, ushort3 maxval);
-short4 __ovld __cnfn clamp(short4 x, short4 minval, short4 maxval);
-ushort4 __ovld __cnfn clamp(ushort4 x, ushort4 minval, ushort4 maxval);
-short8 __ovld __cnfn clamp(short8 x, short8 minval, short8 maxval);
-ushort8 __ovld __cnfn clamp(ushort8 x, ushort8 minval, ushort8 maxval);
-short16 __ovld __cnfn clamp(short16 x, short16 minval, short16 maxval);
-ushort16 __ovld __cnfn clamp(ushort16 x, ushort16 minval, ushort16 maxval);
-int __ovld __cnfn clamp(int x, int minval, int maxval);
-uint __ovld __cnfn clamp(uint x, uint minval, uint maxval);
-int2 __ovld __cnfn clamp(int2 x, int2 minval, int2 maxval);
-uint2 __ovld __cnfn clamp(uint2 x, uint2 minval, uint2 maxval);
-int3 __ovld __cnfn clamp(int3 x, int3 minval, int3 maxval);
-uint3 __ovld __cnfn clamp(uint3 x, uint3 minval, uint3 maxval);
-int4 __ovld __cnfn clamp(int4 x, int4 minval, int4 maxval);
-uint4 __ovld __cnfn clamp(uint4 x, uint4 minval, uint4 maxval);
-int8 __ovld __cnfn clamp(int8 x, int8 minval, int8 maxval);
-uint8 __ovld __cnfn clamp(uint8 x, uint8 minval, uint8 maxval);
-int16 __ovld __cnfn clamp(int16 x, int16 minval, int16 maxval);
-uint16 __ovld __cnfn clamp(uint16 x, uint16 minval, uint16 maxval);
-long __ovld __cnfn clamp(long x, long minval, long maxval);
-ulong __ovld __cnfn clamp(ulong x, ulong minval, ulong maxval);
-long2 __ovld __cnfn clamp(long2 x, long2 minval, long2 maxval);
-ulong2 __ovld __cnfn clamp(ulong2 x, ulong2 minval, ulong2 maxval);
-long3 __ovld __cnfn clamp(long3 x, long3 minval, long3 maxval);
-ulong3 __ovld __cnfn clamp(ulong3 x, ulong3 minval, ulong3 maxval);
-long4 __ovld __cnfn clamp(long4 x, long4 minval, long4 maxval);
-ulong4 __ovld __cnfn clamp(ulong4 x, ulong4 minval, ulong4 maxval);
-long8 __ovld __cnfn clamp(long8 x, long8 minval, long8 maxval);
-ulong8 __ovld __cnfn clamp(ulong8 x, ulong8 minval, ulong8 maxval);
-long16 __ovld __cnfn clamp(long16 x, long16 minval, long16 maxval);
-ulong16 __ovld __cnfn clamp(ulong16 x, ulong16 minval, ulong16 maxval);
-char2 __ovld __cnfn clamp(char2 x, char minval, char maxval);
-uchar2 __ovld __cnfn clamp(uchar2 x, uchar minval, uchar maxval);
-char3 __ovld __cnfn clamp(char3 x, char minval, char maxval);
-uchar3 __ovld __cnfn clamp(uchar3 x, uchar minval, uchar maxval);
-char4 __ovld __cnfn clamp(char4 x, char minval, char maxval);
-uchar4 __ovld __cnfn clamp(uchar4 x, uchar minval, uchar maxval);
-char8 __ovld __cnfn clamp(char8 x, char minval, char maxval);
-uchar8 __ovld __cnfn clamp(uchar8 x, uchar minval, uchar maxval);
-char16 __ovld __cnfn clamp(char16 x, char minval, char maxval);
-uchar16 __ovld __cnfn clamp(uchar16 x, uchar minval, uchar maxval);
-short2 __ovld __cnfn clamp(short2 x, short minval, short maxval);
-ushort2 __ovld __cnfn clamp(ushort2 x, ushort minval, ushort maxval);
-short3 __ovld __cnfn clamp(short3 x, short minval, short maxval);
-ushort3 __ovld __cnfn clamp(ushort3 x, ushort minval, ushort maxval);
-short4 __ovld __cnfn clamp(short4 x, short minval, short maxval);
-ushort4 __ovld __cnfn clamp(ushort4 x, ushort minval, ushort maxval);
-short8 __ovld __cnfn clamp(short8 x, short minval, short maxval);
-ushort8 __ovld __cnfn clamp(ushort8 x, ushort minval, ushort maxval);
-short16 __ovld __cnfn clamp(short16 x, short minval, short maxval);
-ushort16 __ovld __cnfn clamp(ushort16 x, ushort minval, ushort maxval);
-int2 __ovld __cnfn clamp(int2 x, int minval, int maxval);
-uint2 __ovld __cnfn clamp(uint2 x, uint minval, uint maxval);
-int3 __ovld __cnfn clamp(int3 x, int minval, int maxval);
-uint3 __ovld __cnfn clamp(uint3 x, uint minval, uint maxval);
-int4 __ovld __cnfn clamp(int4 x, int minval, int maxval);
-uint4 __ovld __cnfn clamp(uint4 x, uint minval, uint maxval);
-int8 __ovld __cnfn clamp(int8 x, int minval, int maxval);
-uint8 __ovld __cnfn clamp(uint8 x, uint minval, uint maxval);
-int16 __ovld __cnfn clamp(int16 x, int minval, int maxval);
-uint16 __ovld __cnfn clamp(uint16 x, uint minval, uint maxval);
-long2 __ovld __cnfn clamp(long2 x, long minval, long maxval);
-ulong2 __ovld __cnfn clamp(ulong2 x, ulong minval, ulong maxval);
-long3 __ovld __cnfn clamp(long3 x, long minval, long maxval);
-ulong3 __ovld __cnfn clamp(ulong3 x, ulong minval, ulong maxval);
-long4 __ovld __cnfn clamp(long4 x, long minval, long maxval);
-ulong4 __ovld __cnfn clamp(ulong4 x, ulong minval, ulong maxval);
-long8 __ovld __cnfn clamp(long8 x, long minval, long maxval);
-ulong8 __ovld __cnfn clamp(ulong8 x, ulong minval, ulong maxval);
-long16 __ovld __cnfn clamp(long16 x, long minval, long maxval);
-ulong16 __ovld __cnfn clamp(ulong16 x, ulong minval, ulong maxval);
+char __ovld __cnfn clamp(char, char, char);
+uchar __ovld __cnfn clamp(uchar, uchar, uchar);
+char2 __ovld __cnfn clamp(char2, char2, char2);
+uchar2 __ovld __cnfn clamp(uchar2, uchar2, uchar2);
+char3 __ovld __cnfn clamp(char3, char3, char3);
+uchar3 __ovld __cnfn clamp(uchar3, uchar3, uchar3);
+char4 __ovld __cnfn clamp(char4, char4, char4);
+uchar4 __ovld __cnfn clamp(uchar4, uchar4, uchar4);
+char8 __ovld __cnfn clamp(char8, char8, char8);
+uchar8 __ovld __cnfn clamp(uchar8, uchar8, uchar8);
+char16 __ovld __cnfn clamp(char16, char16, char16);
+uchar16 __ovld __cnfn clamp(uchar16, uchar16, uchar16);
+short __ovld __cnfn clamp(short, short, short);
+ushort __ovld __cnfn clamp(ushort, ushort, ushort);
+short2 __ovld __cnfn clamp(short2, short2, short2);
+ushort2 __ovld __cnfn clamp(ushort2, ushort2, ushort2);
+short3 __ovld __cnfn clamp(short3, short3, short3);
+ushort3 __ovld __cnfn clamp(ushort3, ushort3, ushort3);
+short4 __ovld __cnfn clamp(short4, short4, short4);
+ushort4 __ovld __cnfn clamp(ushort4, ushort4, ushort4);
+short8 __ovld __cnfn clamp(short8, short8, short8);
+ushort8 __ovld __cnfn clamp(ushort8, ushort8, ushort8);
+short16 __ovld __cnfn clamp(short16, short16, short16);
+ushort16 __ovld __cnfn clamp(ushort16, ushort16, ushort16);
+int __ovld __cnfn clamp(int, int, int);
+uint __ovld __cnfn clamp(uint, uint, uint);
+int2 __ovld __cnfn clamp(int2, int2, int2);
+uint2 __ovld __cnfn clamp(uint2, uint2, uint2);
+int3 __ovld __cnfn clamp(int3, int3, int3);
+uint3 __ovld __cnfn clamp(uint3, uint3, uint3);
+int4 __ovld __cnfn clamp(int4, int4, int4);
+uint4 __ovld __cnfn clamp(uint4, uint4, uint4);
+int8 __ovld __cnfn clamp(int8, int8, int8);
+uint8 __ovld __cnfn clamp(uint8, uint8, uint8);
+int16 __ovld __cnfn clamp(int16, int16, int16);
+uint16 __ovld __cnfn clamp(uint16, uint16, uint16);
+long __ovld __cnfn clamp(long, long, long);
+ulong __ovld __cnfn clamp(ulong, ulong, ulong);
+long2 __ovld __cnfn clamp(long2, long2, long2);
+ulong2 __ovld __cnfn clamp(ulong2, ulong2, ulong2);
+long3 __ovld __cnfn clamp(long3, long3, long3);
+ulong3 __ovld __cnfn clamp(ulong3, ulong3, ulong3);
+long4 __ovld __cnfn clamp(long4, long4, long4);
+ulong4 __ovld __cnfn clamp(ulong4, ulong4, ulong4);
+long8 __ovld __cnfn clamp(long8, long8, long8);
+ulong8 __ovld __cnfn clamp(ulong8, ulong8, ulong8);
+long16 __ovld __cnfn clamp(long16, long16, long16);
+ulong16 __ovld __cnfn clamp(ulong16, ulong16, ulong16);
+char2 __ovld __cnfn clamp(char2, char, char);
+uchar2 __ovld __cnfn clamp(uchar2, uchar, uchar);
+char3 __ovld __cnfn clamp(char3, char, char);
+uchar3 __ovld __cnfn clamp(uchar3, uchar, uchar);
+char4 __ovld __cnfn clamp(char4, char, char);
+uchar4 __ovld __cnfn clamp(uchar4, uchar, uchar);
+char8 __ovld __cnfn clamp(char8, char, char);
+uchar8 __ovld __cnfn clamp(uchar8, uchar, uchar);
+char16 __ovld __cnfn clamp(char16, char, char);
+uchar16 __ovld __cnfn clamp(uchar16, uchar, uchar);
+short2 __ovld __cnfn clamp(short2, short, short);
+ushort2 __ovld __cnfn clamp(ushort2, ushort, ushort);
+short3 __ovld __cnfn clamp(short3, short, short);
+ushort3 __ovld __cnfn clamp(ushort3, ushort, ushort);
+short4 __ovld __cnfn clamp(short4, short, short);
+ushort4 __ovld __cnfn clamp(ushort4, ushort, ushort);
+short8 __ovld __cnfn clamp(short8, short, short);
+ushort8 __ovld __cnfn clamp(ushort8, ushort, ushort);
+short16 __ovld __cnfn clamp(short16, short, short);
+ushort16 __ovld __cnfn clamp(ushort16, ushort, ushort);
+int2 __ovld __cnfn clamp(int2, int, int);
+uint2 __ovld __cnfn clamp(uint2, uint, uint);
+int3 __ovld __cnfn clamp(int3, int, int);
+uint3 __ovld __cnfn clamp(uint3, uint, uint);
+int4 __ovld __cnfn clamp(int4, int, int);
+uint4 __ovld __cnfn clamp(uint4, uint, uint);
+int8 __ovld __cnfn clamp(int8, int, int);
+uint8 __ovld __cnfn clamp(uint8, uint, uint);
+int16 __ovld __cnfn clamp(int16, int, int);
+uint16 __ovld __cnfn clamp(uint16, uint, uint);
+long2 __ovld __cnfn clamp(long2, long, long);
+ulong2 __ovld __cnfn clamp(ulong2, ulong, ulong);
+long3 __ovld __cnfn clamp(long3, long, long);
+ulong3 __ovld __cnfn clamp(ulong3, ulong, ulong);
+long4 __ovld __cnfn clamp(long4, long, long);
+ulong4 __ovld __cnfn clamp(ulong4, ulong, ulong);
+long8 __ovld __cnfn clamp(long8, long, long);
+ulong8 __ovld __cnfn clamp(ulong8, ulong, ulong);
+long16 __ovld __cnfn clamp(long16, long, long);
+ulong16 __ovld __cnfn clamp(ulong16, ulong, ulong);
/**
* Returns the number of leading 0-bits in x, starting
* at the most significant bit position.
*/
-char __ovld __cnfn clz(char x);
-uchar __ovld __cnfn clz(uchar x);
-char2 __ovld __cnfn clz(char2 x);
-uchar2 __ovld __cnfn clz(uchar2 x);
-char3 __ovld __cnfn clz(char3 x);
-uchar3 __ovld __cnfn clz(uchar3 x);
-char4 __ovld __cnfn clz(char4 x);
-uchar4 __ovld __cnfn clz(uchar4 x);
-char8 __ovld __cnfn clz(char8 x);
-uchar8 __ovld __cnfn clz(uchar8 x);
-char16 __ovld __cnfn clz(char16 x);
-uchar16 __ovld __cnfn clz(uchar16 x);
-short __ovld __cnfn clz(short x);
-ushort __ovld __cnfn clz(ushort x);
-short2 __ovld __cnfn clz(short2 x);
-ushort2 __ovld __cnfn clz(ushort2 x);
-short3 __ovld __cnfn clz(short3 x);
-ushort3 __ovld __cnfn clz(ushort3 x);
-short4 __ovld __cnfn clz(short4 x);
-ushort4 __ovld __cnfn clz(ushort4 x);
-short8 __ovld __cnfn clz(short8 x);
-ushort8 __ovld __cnfn clz(ushort8 x);
-short16 __ovld __cnfn clz(short16 x);
-ushort16 __ovld __cnfn clz(ushort16 x);
-int __ovld __cnfn clz(int x);
-uint __ovld __cnfn clz(uint x);
-int2 __ovld __cnfn clz(int2 x);
-uint2 __ovld __cnfn clz(uint2 x);
-int3 __ovld __cnfn clz(int3 x);
-uint3 __ovld __cnfn clz(uint3 x);
-int4 __ovld __cnfn clz(int4 x);
-uint4 __ovld __cnfn clz(uint4 x);
-int8 __ovld __cnfn clz(int8 x);
-uint8 __ovld __cnfn clz(uint8 x);
-int16 __ovld __cnfn clz(int16 x);
-uint16 __ovld __cnfn clz(uint16 x);
-long __ovld __cnfn clz(long x);
-ulong __ovld __cnfn clz(ulong x);
-long2 __ovld __cnfn clz(long2 x);
-ulong2 __ovld __cnfn clz(ulong2 x);
-long3 __ovld __cnfn clz(long3 x);
-ulong3 __ovld __cnfn clz(ulong3 x);
-long4 __ovld __cnfn clz(long4 x);
-ulong4 __ovld __cnfn clz(ulong4 x);
-long8 __ovld __cnfn clz(long8 x);
-ulong8 __ovld __cnfn clz(ulong8 x);
-long16 __ovld __cnfn clz(long16 x);
-ulong16 __ovld __cnfn clz(ulong16 x);
+char __ovld __cnfn clz(char);
+uchar __ovld __cnfn clz(uchar);
+char2 __ovld __cnfn clz(char2);
+uchar2 __ovld __cnfn clz(uchar2);
+char3 __ovld __cnfn clz(char3);
+uchar3 __ovld __cnfn clz(uchar3);
+char4 __ovld __cnfn clz(char4);
+uchar4 __ovld __cnfn clz(uchar4);
+char8 __ovld __cnfn clz(char8);
+uchar8 __ovld __cnfn clz(uchar8);
+char16 __ovld __cnfn clz(char16);
+uchar16 __ovld __cnfn clz(uchar16);
+short __ovld __cnfn clz(short);
+ushort __ovld __cnfn clz(ushort);
+short2 __ovld __cnfn clz(short2);
+ushort2 __ovld __cnfn clz(ushort2);
+short3 __ovld __cnfn clz(short3);
+ushort3 __ovld __cnfn clz(ushort3);
+short4 __ovld __cnfn clz(short4);
+ushort4 __ovld __cnfn clz(ushort4);
+short8 __ovld __cnfn clz(short8);
+ushort8 __ovld __cnfn clz(ushort8);
+short16 __ovld __cnfn clz(short16);
+ushort16 __ovld __cnfn clz(ushort16);
+int __ovld __cnfn clz(int);
+uint __ovld __cnfn clz(uint);
+int2 __ovld __cnfn clz(int2);
+uint2 __ovld __cnfn clz(uint2);
+int3 __ovld __cnfn clz(int3);
+uint3 __ovld __cnfn clz(uint3);
+int4 __ovld __cnfn clz(int4);
+uint4 __ovld __cnfn clz(uint4);
+int8 __ovld __cnfn clz(int8);
+uint8 __ovld __cnfn clz(uint8);
+int16 __ovld __cnfn clz(int16);
+uint16 __ovld __cnfn clz(uint16);
+long __ovld __cnfn clz(long);
+ulong __ovld __cnfn clz(ulong);
+long2 __ovld __cnfn clz(long2);
+ulong2 __ovld __cnfn clz(ulong2);
+long3 __ovld __cnfn clz(long3);
+ulong3 __ovld __cnfn clz(ulong3);
+long4 __ovld __cnfn clz(long4);
+ulong4 __ovld __cnfn clz(ulong4);
+long8 __ovld __cnfn clz(long8);
+ulong8 __ovld __cnfn clz(ulong8);
+long16 __ovld __cnfn clz(long16);
+ulong16 __ovld __cnfn clz(ulong16);
/**
* Returns the count of trailing 0-bits in x. If x is 0,
@@ -9354,396 +9368,396 @@ ulong16 __ovld __cnfn clz(ulong16 x);
* component type of x, if x is a vector.
*/
#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-char __ovld __cnfn ctz(char x);
-uchar __ovld __cnfn ctz(uchar x);
-char2 __ovld __cnfn ctz(char2 x);
-uchar2 __ovld __cnfn ctz(uchar2 x);
-char3 __ovld __cnfn ctz(char3 x);
-uchar3 __ovld __cnfn ctz(uchar3 x);
-char4 __ovld __cnfn ctz(char4 x);
-uchar4 __ovld __cnfn ctz(uchar4 x);
-char8 __ovld __cnfn ctz(char8 x);
-uchar8 __ovld __cnfn ctz(uchar8 x);
-char16 __ovld __cnfn ctz(char16 x);
-uchar16 __ovld __cnfn ctz(uchar16 x);
-short __ovld __cnfn ctz(short x);
-ushort __ovld __cnfn ctz(ushort x);
-short2 __ovld __cnfn ctz(short2 x);
-ushort2 __ovld __cnfn ctz(ushort2 x);
-short3 __ovld __cnfn ctz(short3 x);
-ushort3 __ovld __cnfn ctz(ushort3 x);
-short4 __ovld __cnfn ctz(short4 x);
-ushort4 __ovld __cnfn ctz(ushort4 x);
-short8 __ovld __cnfn ctz(short8 x);
-ushort8 __ovld __cnfn ctz(ushort8 x);
-short16 __ovld __cnfn ctz(short16 x);
-ushort16 __ovld __cnfn ctz(ushort16 x);
-int __ovld __cnfn ctz(int x);
-uint __ovld __cnfn ctz(uint x);
-int2 __ovld __cnfn ctz(int2 x);
-uint2 __ovld __cnfn ctz(uint2 x);
-int3 __ovld __cnfn ctz(int3 x);
-uint3 __ovld __cnfn ctz(uint3 x);
-int4 __ovld __cnfn ctz(int4 x);
-uint4 __ovld __cnfn ctz(uint4 x);
-int8 __ovld __cnfn ctz(int8 x);
-uint8 __ovld __cnfn ctz(uint8 x);
-int16 __ovld __cnfn ctz(int16 x);
-uint16 __ovld __cnfn ctz(uint16 x);
-long __ovld __cnfn ctz(long x);
-ulong __ovld __cnfn ctz(ulong x);
-long2 __ovld __cnfn ctz(long2 x);
-ulong2 __ovld __cnfn ctz(ulong2 x);
-long3 __ovld __cnfn ctz(long3 x);
-ulong3 __ovld __cnfn ctz(ulong3 x);
-long4 __ovld __cnfn ctz(long4 x);
-ulong4 __ovld __cnfn ctz(ulong4 x);
-long8 __ovld __cnfn ctz(long8 x);
-ulong8 __ovld __cnfn ctz(ulong8 x);
-long16 __ovld __cnfn ctz(long16 x);
-ulong16 __ovld __cnfn ctz(ulong16 x);
+char __ovld __cnfn ctz(char);
+uchar __ovld __cnfn ctz(uchar);
+char2 __ovld __cnfn ctz(char2);
+uchar2 __ovld __cnfn ctz(uchar2);
+char3 __ovld __cnfn ctz(char3);
+uchar3 __ovld __cnfn ctz(uchar3);
+char4 __ovld __cnfn ctz(char4);
+uchar4 __ovld __cnfn ctz(uchar4);
+char8 __ovld __cnfn ctz(char8);
+uchar8 __ovld __cnfn ctz(uchar8);
+char16 __ovld __cnfn ctz(char16);
+uchar16 __ovld __cnfn ctz(uchar16);
+short __ovld __cnfn ctz(short);
+ushort __ovld __cnfn ctz(ushort);
+short2 __ovld __cnfn ctz(short2);
+ushort2 __ovld __cnfn ctz(ushort2);
+short3 __ovld __cnfn ctz(short3);
+ushort3 __ovld __cnfn ctz(ushort3);
+short4 __ovld __cnfn ctz(short4);
+ushort4 __ovld __cnfn ctz(ushort4);
+short8 __ovld __cnfn ctz(short8);
+ushort8 __ovld __cnfn ctz(ushort8);
+short16 __ovld __cnfn ctz(short16);
+ushort16 __ovld __cnfn ctz(ushort16);
+int __ovld __cnfn ctz(int);
+uint __ovld __cnfn ctz(uint);
+int2 __ovld __cnfn ctz(int2);
+uint2 __ovld __cnfn ctz(uint2);
+int3 __ovld __cnfn ctz(int3);
+uint3 __ovld __cnfn ctz(uint3);
+int4 __ovld __cnfn ctz(int4);
+uint4 __ovld __cnfn ctz(uint4);
+int8 __ovld __cnfn ctz(int8);
+uint8 __ovld __cnfn ctz(uint8);
+int16 __ovld __cnfn ctz(int16);
+uint16 __ovld __cnfn ctz(uint16);
+long __ovld __cnfn ctz(long);
+ulong __ovld __cnfn ctz(ulong);
+long2 __ovld __cnfn ctz(long2);
+ulong2 __ovld __cnfn ctz(ulong2);
+long3 __ovld __cnfn ctz(long3);
+ulong3 __ovld __cnfn ctz(ulong3);
+long4 __ovld __cnfn ctz(long4);
+ulong4 __ovld __cnfn ctz(ulong4);
+long8 __ovld __cnfn ctz(long8);
+ulong8 __ovld __cnfn ctz(ulong8);
+long16 __ovld __cnfn ctz(long16);
+ulong16 __ovld __cnfn ctz(ulong16);
#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
/**
* Returns mul_hi(a, b) + c.
*/
-char __ovld __cnfn mad_hi(char a, char b, char c);
-uchar __ovld __cnfn mad_hi(uchar a, uchar b, uchar c);
-char2 __ovld __cnfn mad_hi(char2 a, char2 b, char2 c);
-uchar2 __ovld __cnfn mad_hi(uchar2 a, uchar2 b, uchar2 c);
-char3 __ovld __cnfn mad_hi(char3 a, char3 b, char3 c);
-uchar3 __ovld __cnfn mad_hi(uchar3 a, uchar3 b, uchar3 c);
-char4 __ovld __cnfn mad_hi(char4 a, char4 b, char4 c);
-uchar4 __ovld __cnfn mad_hi(uchar4 a, uchar4 b, uchar4 c);
-char8 __ovld __cnfn mad_hi(char8 a, char8 b, char8 c);
-uchar8 __ovld __cnfn mad_hi(uchar8 a, uchar8 b, uchar8 c);
-char16 __ovld __cnfn mad_hi(char16 a, char16 b, char16 c);
-uchar16 __ovld __cnfn mad_hi(uchar16 a, uchar16 b, uchar16 c);
-short __ovld __cnfn mad_hi(short a, short b, short c);
-ushort __ovld __cnfn mad_hi(ushort a, ushort b, ushort c);
-short2 __ovld __cnfn mad_hi(short2 a, short2 b, short2 c);
-ushort2 __ovld __cnfn mad_hi(ushort2 a, ushort2 b, ushort2 c);
-short3 __ovld __cnfn mad_hi(short3 a, short3 b, short3 c);
-ushort3 __ovld __cnfn mad_hi(ushort3 a, ushort3 b, ushort3 c);
-short4 __ovld __cnfn mad_hi(short4 a, short4 b, short4 c);
-ushort4 __ovld __cnfn mad_hi(ushort4 a, ushort4 b, ushort4 c);
-short8 __ovld __cnfn mad_hi(short8 a, short8 b, short8 c);
-ushort8 __ovld __cnfn mad_hi(ushort8 a, ushort8 b, ushort8 c);
-short16 __ovld __cnfn mad_hi(short16 a, short16 b, short16 c);
-ushort16 __ovld __cnfn mad_hi(ushort16 a, ushort16 b, ushort16 c);
-int __ovld __cnfn mad_hi(int a, int b, int c);
-uint __ovld __cnfn mad_hi(uint a, uint b, uint c);
-int2 __ovld __cnfn mad_hi(int2 a, int2 b, int2 c);
-uint2 __ovld __cnfn mad_hi(uint2 a, uint2 b, uint2 c);
-int3 __ovld __cnfn mad_hi(int3 a, int3 b, int3 c);
-uint3 __ovld __cnfn mad_hi(uint3 a, uint3 b, uint3 c);
-int4 __ovld __cnfn mad_hi(int4 a, int4 b, int4 c);
-uint4 __ovld __cnfn mad_hi(uint4 a, uint4 b, uint4 c);
-int8 __ovld __cnfn mad_hi(int8 a, int8 b, int8 c);
-uint8 __ovld __cnfn mad_hi(uint8 a, uint8 b, uint8 c);
-int16 __ovld __cnfn mad_hi(int16 a, int16 b, int16 c);
-uint16 __ovld __cnfn mad_hi(uint16 a, uint16 b, uint16 c);
-long __ovld __cnfn mad_hi(long a, long b, long c);
-ulong __ovld __cnfn mad_hi(ulong a, ulong b, ulong c);
-long2 __ovld __cnfn mad_hi(long2 a, long2 b, long2 c);
-ulong2 __ovld __cnfn mad_hi(ulong2 a, ulong2 b, ulong2 c);
-long3 __ovld __cnfn mad_hi(long3 a, long3 b, long3 c);
-ulong3 __ovld __cnfn mad_hi(ulong3 a, ulong3 b, ulong3 c);
-long4 __ovld __cnfn mad_hi(long4 a, long4 b, long4 c);
-ulong4 __ovld __cnfn mad_hi(ulong4 a, ulong4 b, ulong4 c);
-long8 __ovld __cnfn mad_hi(long8 a, long8 b, long8 c);
-ulong8 __ovld __cnfn mad_hi(ulong8 a, ulong8 b, ulong8 c);
-long16 __ovld __cnfn mad_hi(long16 a, long16 b, long16 c);
-ulong16 __ovld __cnfn mad_hi(ulong16 a, ulong16 b, ulong16 c);
+char __ovld __cnfn mad_hi(char, char, char);
+uchar __ovld __cnfn mad_hi(uchar, uchar, uchar);
+char2 __ovld __cnfn mad_hi(char2, char2, char2);
+uchar2 __ovld __cnfn mad_hi(uchar2, uchar2, uchar2);
+char3 __ovld __cnfn mad_hi(char3, char3, char3);
+uchar3 __ovld __cnfn mad_hi(uchar3, uchar3, uchar3);
+char4 __ovld __cnfn mad_hi(char4, char4, char4);
+uchar4 __ovld __cnfn mad_hi(uchar4, uchar4, uchar4);
+char8 __ovld __cnfn mad_hi(char8, char8, char8);
+uchar8 __ovld __cnfn mad_hi(uchar8, uchar8, uchar8);
+char16 __ovld __cnfn mad_hi(char16, char16, char16);
+uchar16 __ovld __cnfn mad_hi(uchar16, uchar16, uchar16);
+short __ovld __cnfn mad_hi(short, short, short);
+ushort __ovld __cnfn mad_hi(ushort, ushort, ushort);
+short2 __ovld __cnfn mad_hi(short2, short2, short2);
+ushort2 __ovld __cnfn mad_hi(ushort2, ushort2, ushort2);
+short3 __ovld __cnfn mad_hi(short3, short3, short3);
+ushort3 __ovld __cnfn mad_hi(ushort3, ushort3, ushort3);
+short4 __ovld __cnfn mad_hi(short4, short4, short4);
+ushort4 __ovld __cnfn mad_hi(ushort4, ushort4, ushort4);
+short8 __ovld __cnfn mad_hi(short8, short8, short8);
+ushort8 __ovld __cnfn mad_hi(ushort8, ushort8, ushort8);
+short16 __ovld __cnfn mad_hi(short16, short16, short16);
+ushort16 __ovld __cnfn mad_hi(ushort16, ushort16, ushort16);
+int __ovld __cnfn mad_hi(int, int, int);
+uint __ovld __cnfn mad_hi(uint, uint, uint);
+int2 __ovld __cnfn mad_hi(int2, int2, int2);
+uint2 __ovld __cnfn mad_hi(uint2, uint2, uint2);
+int3 __ovld __cnfn mad_hi(int3, int3, int3);
+uint3 __ovld __cnfn mad_hi(uint3, uint3, uint3);
+int4 __ovld __cnfn mad_hi(int4, int4, int4);
+uint4 __ovld __cnfn mad_hi(uint4, uint4, uint4);
+int8 __ovld __cnfn mad_hi(int8, int8, int8);
+uint8 __ovld __cnfn mad_hi(uint8, uint8, uint8);
+int16 __ovld __cnfn mad_hi(int16, int16, int16);
+uint16 __ovld __cnfn mad_hi(uint16, uint16, uint16);
+long __ovld __cnfn mad_hi(long, long, long);
+ulong __ovld __cnfn mad_hi(ulong, ulong, ulong);
+long2 __ovld __cnfn mad_hi(long2, long2, long2);
+ulong2 __ovld __cnfn mad_hi(ulong2, ulong2, ulong2);
+long3 __ovld __cnfn mad_hi(long3, long3, long3);
+ulong3 __ovld __cnfn mad_hi(ulong3, ulong3, ulong3);
+long4 __ovld __cnfn mad_hi(long4, long4, long4);
+ulong4 __ovld __cnfn mad_hi(ulong4, ulong4, ulong4);
+long8 __ovld __cnfn mad_hi(long8, long8, long8);
+ulong8 __ovld __cnfn mad_hi(ulong8, ulong8, ulong8);
+long16 __ovld __cnfn mad_hi(long16, long16, long16);
+ulong16 __ovld __cnfn mad_hi(ulong16, ulong16, ulong16);
/**
* Returns a * b + c and saturates the result.
*/
-char __ovld __cnfn mad_sat(char a, char b, char c);
-uchar __ovld __cnfn mad_sat(uchar a, uchar b, uchar c);
-char2 __ovld __cnfn mad_sat(char2 a, char2 b, char2 c);
-uchar2 __ovld __cnfn mad_sat(uchar2 a, uchar2 b, uchar2 c);
-char3 __ovld __cnfn mad_sat(char3 a, char3 b, char3 c);
-uchar3 __ovld __cnfn mad_sat(uchar3 a, uchar3 b, uchar3 c);
-char4 __ovld __cnfn mad_sat(char4 a, char4 b, char4 c);
-uchar4 __ovld __cnfn mad_sat(uchar4 a, uchar4 b, uchar4 c);
-char8 __ovld __cnfn mad_sat(char8 a, char8 b, char8 c);
-uchar8 __ovld __cnfn mad_sat(uchar8 a, uchar8 b, uchar8 c);
-char16 __ovld __cnfn mad_sat(char16 a, char16 b, char16 c);
-uchar16 __ovld __cnfn mad_sat(uchar16 a, uchar16 b, uchar16 c);
-short __ovld __cnfn mad_sat(short a, short b, short c);
-ushort __ovld __cnfn mad_sat(ushort a, ushort b, ushort c);
-short2 __ovld __cnfn mad_sat(short2 a, short2 b, short2 c);
-ushort2 __ovld __cnfn mad_sat(ushort2 a, ushort2 b, ushort2 c);
-short3 __ovld __cnfn mad_sat(short3 a, short3 b, short3 c);
-ushort3 __ovld __cnfn mad_sat(ushort3 a, ushort3 b, ushort3 c);
-short4 __ovld __cnfn mad_sat(short4 a, short4 b, short4 c);
-ushort4 __ovld __cnfn mad_sat(ushort4 a, ushort4 b, ushort4 c);
-short8 __ovld __cnfn mad_sat(short8 a, short8 b, short8 c);
-ushort8 __ovld __cnfn mad_sat(ushort8 a, ushort8 b, ushort8 c);
-short16 __ovld __cnfn mad_sat(short16 a, short16 b, short16 c);
-ushort16 __ovld __cnfn mad_sat(ushort16 a, ushort16 b, ushort16 c);
-int __ovld __cnfn mad_sat(int a, int b, int c);
-uint __ovld __cnfn mad_sat(uint a, uint b, uint c);
-int2 __ovld __cnfn mad_sat(int2 a, int2 b, int2 c);
-uint2 __ovld __cnfn mad_sat(uint2 a, uint2 b, uint2 c);
-int3 __ovld __cnfn mad_sat(int3 a, int3 b, int3 c);
-uint3 __ovld __cnfn mad_sat(uint3 a, uint3 b, uint3 c);
-int4 __ovld __cnfn mad_sat(int4 a, int4 b, int4 c);
-uint4 __ovld __cnfn mad_sat(uint4 a, uint4 b, uint4 c);
-int8 __ovld __cnfn mad_sat(int8 a, int8 b, int8 c);
-uint8 __ovld __cnfn mad_sat(uint8 a, uint8 b, uint8 c);
-int16 __ovld __cnfn mad_sat(int16 a, int16 b, int16 c);
-uint16 __ovld __cnfn mad_sat(uint16 a, uint16 b, uint16 c);
-long __ovld __cnfn mad_sat(long a, long b, long c);
-ulong __ovld __cnfn mad_sat(ulong a, ulong b, ulong c);
-long2 __ovld __cnfn mad_sat(long2 a, long2 b, long2 c);
-ulong2 __ovld __cnfn mad_sat(ulong2 a, ulong2 b, ulong2 c);
-long3 __ovld __cnfn mad_sat(long3 a, long3 b, long3 c);
-ulong3 __ovld __cnfn mad_sat(ulong3 a, ulong3 b, ulong3 c);
-long4 __ovld __cnfn mad_sat(long4 a, long4 b, long4 c);
-ulong4 __ovld __cnfn mad_sat(ulong4 a, ulong4 b, ulong4 c);
-long8 __ovld __cnfn mad_sat(long8 a, long8 b, long8 c);
-ulong8 __ovld __cnfn mad_sat(ulong8 a, ulong8 b, ulong8 c);
-long16 __ovld __cnfn mad_sat(long16 a, long16 b, long16 c);
-ulong16 __ovld __cnfn mad_sat(ulong16 a, ulong16 b, ulong16 c);
+char __ovld __cnfn mad_sat(char, char, char);
+uchar __ovld __cnfn mad_sat(uchar, uchar, uchar);
+char2 __ovld __cnfn mad_sat(char2, char2, char2);
+uchar2 __ovld __cnfn mad_sat(uchar2, uchar2, uchar2);
+char3 __ovld __cnfn mad_sat(char3, char3, char3);
+uchar3 __ovld __cnfn mad_sat(uchar3, uchar3, uchar3);
+char4 __ovld __cnfn mad_sat(char4, char4, char4);
+uchar4 __ovld __cnfn mad_sat(uchar4, uchar4, uchar4);
+char8 __ovld __cnfn mad_sat(char8, char8, char8);
+uchar8 __ovld __cnfn mad_sat(uchar8, uchar8, uchar8);
+char16 __ovld __cnfn mad_sat(char16, char16, char16);
+uchar16 __ovld __cnfn mad_sat(uchar16, uchar16, uchar16);
+short __ovld __cnfn mad_sat(short, short, short);
+ushort __ovld __cnfn mad_sat(ushort, ushort, ushort);
+short2 __ovld __cnfn mad_sat(short2, short2, short2);
+ushort2 __ovld __cnfn mad_sat(ushort2, ushort2, ushort2);
+short3 __ovld __cnfn mad_sat(short3, short3, short3);
+ushort3 __ovld __cnfn mad_sat(ushort3, ushort3, ushort3);
+short4 __ovld __cnfn mad_sat(short4, short4, short4);
+ushort4 __ovld __cnfn mad_sat(ushort4, ushort4, ushort4);
+short8 __ovld __cnfn mad_sat(short8, short8, short8);
+ushort8 __ovld __cnfn mad_sat(ushort8, ushort8, ushort8);
+short16 __ovld __cnfn mad_sat(short16, short16, short16);
+ushort16 __ovld __cnfn mad_sat(ushort16, ushort16, ushort16);
+int __ovld __cnfn mad_sat(int, int, int);
+uint __ovld __cnfn mad_sat(uint, uint, uint);
+int2 __ovld __cnfn mad_sat(int2, int2, int2);
+uint2 __ovld __cnfn mad_sat(uint2, uint2, uint2);
+int3 __ovld __cnfn mad_sat(int3, int3, int3);
+uint3 __ovld __cnfn mad_sat(uint3, uint3, uint3);
+int4 __ovld __cnfn mad_sat(int4, int4, int4);
+uint4 __ovld __cnfn mad_sat(uint4, uint4, uint4);
+int8 __ovld __cnfn mad_sat(int8, int8, int8);
+uint8 __ovld __cnfn mad_sat(uint8, uint8, uint8);
+int16 __ovld __cnfn mad_sat(int16, int16, int16);
+uint16 __ovld __cnfn mad_sat(uint16, uint16, uint16);
+long __ovld __cnfn mad_sat(long, long, long);
+ulong __ovld __cnfn mad_sat(ulong, ulong, ulong);
+long2 __ovld __cnfn mad_sat(long2, long2, long2);
+ulong2 __ovld __cnfn mad_sat(ulong2, ulong2, ulong2);
+long3 __ovld __cnfn mad_sat(long3, long3, long3);
+ulong3 __ovld __cnfn mad_sat(ulong3, ulong3, ulong3);
+long4 __ovld __cnfn mad_sat(long4, long4, long4);
+ulong4 __ovld __cnfn mad_sat(ulong4, ulong4, ulong4);
+long8 __ovld __cnfn mad_sat(long8, long8, long8);
+ulong8 __ovld __cnfn mad_sat(ulong8, ulong8, ulong8);
+long16 __ovld __cnfn mad_sat(long16, long16, long16);
+ulong16 __ovld __cnfn mad_sat(ulong16, ulong16, ulong16);
/**
* Returns y if x < y, otherwise it returns x.
*/
-char __ovld __cnfn max(char x, char y);
-uchar __ovld __cnfn max(uchar x, uchar y);
-char2 __ovld __cnfn max(char2 x, char2 y);
-uchar2 __ovld __cnfn max(uchar2 x, uchar2 y);
-char3 __ovld __cnfn max(char3 x, char3 y);
-uchar3 __ovld __cnfn max(uchar3 x, uchar3 y);
-char4 __ovld __cnfn max(char4 x, char4 y);
-uchar4 __ovld __cnfn max(uchar4 x, uchar4 y);
-char8 __ovld __cnfn max(char8 x, char8 y);
-uchar8 __ovld __cnfn max(uchar8 x, uchar8 y);
-char16 __ovld __cnfn max(char16 x, char16 y);
-uchar16 __ovld __cnfn max(uchar16 x, uchar16 y);
-short __ovld __cnfn max(short x, short y);
-ushort __ovld __cnfn max(ushort x, ushort y);
-short2 __ovld __cnfn max(short2 x, short2 y);
-ushort2 __ovld __cnfn max(ushort2 x, ushort2 y);
-short3 __ovld __cnfn max(short3 x, short3 y);
-ushort3 __ovld __cnfn max(ushort3 x, ushort3 y);
-short4 __ovld __cnfn max(short4 x, short4 y);
-ushort4 __ovld __cnfn max(ushort4 x, ushort4 y);
-short8 __ovld __cnfn max(short8 x, short8 y);
-ushort8 __ovld __cnfn max(ushort8 x, ushort8 y);
-short16 __ovld __cnfn max(short16 x, short16 y);
-ushort16 __ovld __cnfn max(ushort16 x, ushort16 y);
-int __ovld __cnfn max(int x, int y);
-uint __ovld __cnfn max(uint x, uint y);
-int2 __ovld __cnfn max(int2 x, int2 y);
-uint2 __ovld __cnfn max(uint2 x, uint2 y);
-int3 __ovld __cnfn max(int3 x, int3 y);
-uint3 __ovld __cnfn max(uint3 x, uint3 y);
-int4 __ovld __cnfn max(int4 x, int4 y);
-uint4 __ovld __cnfn max(uint4 x, uint4 y);
-int8 __ovld __cnfn max(int8 x, int8 y);
-uint8 __ovld __cnfn max(uint8 x, uint8 y);
-int16 __ovld __cnfn max(int16 x, int16 y);
-uint16 __ovld __cnfn max(uint16 x, uint16 y);
-long __ovld __cnfn max(long x, long y);
-ulong __ovld __cnfn max(ulong x, ulong y);
-long2 __ovld __cnfn max(long2 x, long2 y);
-ulong2 __ovld __cnfn max(ulong2 x, ulong2 y);
-long3 __ovld __cnfn max(long3 x, long3 y);
-ulong3 __ovld __cnfn max(ulong3 x, ulong3 y);
-long4 __ovld __cnfn max(long4 x, long4 y);
-ulong4 __ovld __cnfn max(ulong4 x, ulong4 y);
-long8 __ovld __cnfn max(long8 x, long8 y);
-ulong8 __ovld __cnfn max(ulong8 x, ulong8 y);
-long16 __ovld __cnfn max(long16 x, long16 y);
-ulong16 __ovld __cnfn max(ulong16 x, ulong16 y);
-char2 __ovld __cnfn max(char2 x, char y);
-uchar2 __ovld __cnfn max(uchar2 x, uchar y);
-char3 __ovld __cnfn max(char3 x, char y);
-uchar3 __ovld __cnfn max(uchar3 x, uchar y);
-char4 __ovld __cnfn max(char4 x, char y);
-uchar4 __ovld __cnfn max(uchar4 x, uchar y);
-char8 __ovld __cnfn max(char8 x, char y);
-uchar8 __ovld __cnfn max(uchar8 x, uchar y);
-char16 __ovld __cnfn max(char16 x, char y);
-uchar16 __ovld __cnfn max(uchar16 x, uchar y);
-short2 __ovld __cnfn max(short2 x, short y);
-ushort2 __ovld __cnfn max(ushort2 x, ushort y);
-short3 __ovld __cnfn max(short3 x, short y);
-ushort3 __ovld __cnfn max(ushort3 x, ushort y);
-short4 __ovld __cnfn max(short4 x, short y);
-ushort4 __ovld __cnfn max(ushort4 x, ushort y);
-short8 __ovld __cnfn max(short8 x, short y);
-ushort8 __ovld __cnfn max(ushort8 x, ushort y);
-short16 __ovld __cnfn max(short16 x, short y);
-ushort16 __ovld __cnfn max(ushort16 x, ushort y);
-int2 __ovld __cnfn max(int2 x, int y);
-uint2 __ovld __cnfn max(uint2 x, uint y);
-int3 __ovld __cnfn max(int3 x, int y);
-uint3 __ovld __cnfn max(uint3 x, uint y);
-int4 __ovld __cnfn max(int4 x, int y);
-uint4 __ovld __cnfn max(uint4 x, uint y);
-int8 __ovld __cnfn max(int8 x, int y);
-uint8 __ovld __cnfn max(uint8 x, uint y);
-int16 __ovld __cnfn max(int16 x, int y);
-uint16 __ovld __cnfn max(uint16 x, uint y);
-long2 __ovld __cnfn max(long2 x, long y);
-ulong2 __ovld __cnfn max(ulong2 x, ulong y);
-long3 __ovld __cnfn max(long3 x, long y);
-ulong3 __ovld __cnfn max(ulong3 x, ulong y);
-long4 __ovld __cnfn max(long4 x, long y);
-ulong4 __ovld __cnfn max(ulong4 x, ulong y);
-long8 __ovld __cnfn max(long8 x, long y);
-ulong8 __ovld __cnfn max(ulong8 x, ulong y);
-long16 __ovld __cnfn max(long16 x, long y);
-ulong16 __ovld __cnfn max(ulong16 x, ulong y);
+char __ovld __cnfn max(char, char);
+uchar __ovld __cnfn max(uchar, uchar);
+char2 __ovld __cnfn max(char2, char2);
+uchar2 __ovld __cnfn max(uchar2, uchar2);
+char3 __ovld __cnfn max(char3, char3);
+uchar3 __ovld __cnfn max(uchar3, uchar3);
+char4 __ovld __cnfn max(char4, char4);
+uchar4 __ovld __cnfn max(uchar4, uchar4);
+char8 __ovld __cnfn max(char8, char8);
+uchar8 __ovld __cnfn max(uchar8, uchar8);
+char16 __ovld __cnfn max(char16, char16);
+uchar16 __ovld __cnfn max(uchar16, uchar16);
+short __ovld __cnfn max(short, short);
+ushort __ovld __cnfn max(ushort, ushort);
+short2 __ovld __cnfn max(short2, short2);
+ushort2 __ovld __cnfn max(ushort2, ushort2);
+short3 __ovld __cnfn max(short3, short3);
+ushort3 __ovld __cnfn max(ushort3, ushort3);
+short4 __ovld __cnfn max(short4, short4);
+ushort4 __ovld __cnfn max(ushort4, ushort4);
+short8 __ovld __cnfn max(short8, short8);
+ushort8 __ovld __cnfn max(ushort8, ushort8);
+short16 __ovld __cnfn max(short16, short16);
+ushort16 __ovld __cnfn max(ushort16, ushort16);
+int __ovld __cnfn max(int, int);
+uint __ovld __cnfn max(uint, uint);
+int2 __ovld __cnfn max(int2, int2);
+uint2 __ovld __cnfn max(uint2, uint2);
+int3 __ovld __cnfn max(int3, int3);
+uint3 __ovld __cnfn max(uint3, uint3);
+int4 __ovld __cnfn max(int4, int4);
+uint4 __ovld __cnfn max(uint4, uint4);
+int8 __ovld __cnfn max(int8, int8);
+uint8 __ovld __cnfn max(uint8, uint8);
+int16 __ovld __cnfn max(int16, int16);
+uint16 __ovld __cnfn max(uint16, uint16);
+long __ovld __cnfn max(long, long);
+ulong __ovld __cnfn max(ulong, ulong);
+long2 __ovld __cnfn max(long2, long2);
+ulong2 __ovld __cnfn max(ulong2, ulong2);
+long3 __ovld __cnfn max(long3, long3);
+ulong3 __ovld __cnfn max(ulong3, ulong3);
+long4 __ovld __cnfn max(long4, long4);
+ulong4 __ovld __cnfn max(ulong4, ulong4);
+long8 __ovld __cnfn max(long8, long8);
+ulong8 __ovld __cnfn max(ulong8, ulong8);
+long16 __ovld __cnfn max(long16, long16);
+ulong16 __ovld __cnfn max(ulong16, ulong16);
+char2 __ovld __cnfn max(char2, char);
+uchar2 __ovld __cnfn max(uchar2, uchar);
+char3 __ovld __cnfn max(char3, char);
+uchar3 __ovld __cnfn max(uchar3, uchar);
+char4 __ovld __cnfn max(char4, char);
+uchar4 __ovld __cnfn max(uchar4, uchar);
+char8 __ovld __cnfn max(char8, char);
+uchar8 __ovld __cnfn max(uchar8, uchar);
+char16 __ovld __cnfn max(char16, char);
+uchar16 __ovld __cnfn max(uchar16, uchar);
+short2 __ovld __cnfn max(short2, short);
+ushort2 __ovld __cnfn max(ushort2, ushort);
+short3 __ovld __cnfn max(short3, short);
+ushort3 __ovld __cnfn max(ushort3, ushort);
+short4 __ovld __cnfn max(short4, short);
+ushort4 __ovld __cnfn max(ushort4, ushort);
+short8 __ovld __cnfn max(short8, short);
+ushort8 __ovld __cnfn max(ushort8, ushort);
+short16 __ovld __cnfn max(short16, short);
+ushort16 __ovld __cnfn max(ushort16, ushort);
+int2 __ovld __cnfn max(int2, int);
+uint2 __ovld __cnfn max(uint2, uint);
+int3 __ovld __cnfn max(int3, int);
+uint3 __ovld __cnfn max(uint3, uint);
+int4 __ovld __cnfn max(int4, int);
+uint4 __ovld __cnfn max(uint4, uint);
+int8 __ovld __cnfn max(int8, int);
+uint8 __ovld __cnfn max(uint8, uint);
+int16 __ovld __cnfn max(int16, int);
+uint16 __ovld __cnfn max(uint16, uint);
+long2 __ovld __cnfn max(long2, long);
+ulong2 __ovld __cnfn max(ulong2, ulong);
+long3 __ovld __cnfn max(long3, long);
+ulong3 __ovld __cnfn max(ulong3, ulong);
+long4 __ovld __cnfn max(long4, long);
+ulong4 __ovld __cnfn max(ulong4, ulong);
+long8 __ovld __cnfn max(long8, long);
+ulong8 __ovld __cnfn max(ulong8, ulong);
+long16 __ovld __cnfn max(long16, long);
+ulong16 __ovld __cnfn max(ulong16, ulong);
/**
* Returns y if y < x, otherwise it returns x.
*/
-char __ovld __cnfn min(char x, char y);
-uchar __ovld __cnfn min(uchar x, uchar y);
-char2 __ovld __cnfn min(char2 x, char2 y);
-uchar2 __ovld __cnfn min(uchar2 x, uchar2 y);
-char3 __ovld __cnfn min(char3 x, char3 y);
-uchar3 __ovld __cnfn min(uchar3 x, uchar3 y);
-char4 __ovld __cnfn min(char4 x, char4 y);
-uchar4 __ovld __cnfn min(uchar4 x, uchar4 y);
-char8 __ovld __cnfn min(char8 x, char8 y);
-uchar8 __ovld __cnfn min(uchar8 x, uchar8 y);
-char16 __ovld __cnfn min(char16 x, char16 y);
-uchar16 __ovld __cnfn min(uchar16 x, uchar16 y);
-short __ovld __cnfn min(short x, short y);
-ushort __ovld __cnfn min(ushort x, ushort y);
-short2 __ovld __cnfn min(short2 x, short2 y);
-ushort2 __ovld __cnfn min(ushort2 x, ushort2 y);
-short3 __ovld __cnfn min(short3 x, short3 y);
-ushort3 __ovld __cnfn min(ushort3 x, ushort3 y);
-short4 __ovld __cnfn min(short4 x, short4 y);
-ushort4 __ovld __cnfn min(ushort4 x, ushort4 y);
-short8 __ovld __cnfn min(short8 x, short8 y);
-ushort8 __ovld __cnfn min(ushort8 x, ushort8 y);
-short16 __ovld __cnfn min(short16 x, short16 y);
-ushort16 __ovld __cnfn min(ushort16 x, ushort16 y);
-int __ovld __cnfn min(int x, int y);
-uint __ovld __cnfn min(uint x, uint y);
-int2 __ovld __cnfn min(int2 x, int2 y);
-uint2 __ovld __cnfn min(uint2 x, uint2 y);
-int3 __ovld __cnfn min(int3 x, int3 y);
-uint3 __ovld __cnfn min(uint3 x, uint3 y);
-int4 __ovld __cnfn min(int4 x, int4 y);
-uint4 __ovld __cnfn min(uint4 x, uint4 y);
-int8 __ovld __cnfn min(int8 x, int8 y);
-uint8 __ovld __cnfn min(uint8 x, uint8 y);
-int16 __ovld __cnfn min(int16 x, int16 y);
-uint16 __ovld __cnfn min(uint16 x, uint16 y);
-long __ovld __cnfn min(long x, long y);
-ulong __ovld __cnfn min(ulong x, ulong y);
-long2 __ovld __cnfn min(long2 x, long2 y);
-ulong2 __ovld __cnfn min(ulong2 x, ulong2 y);
-long3 __ovld __cnfn min(long3 x, long3 y);
-ulong3 __ovld __cnfn min(ulong3 x, ulong3 y);
-long4 __ovld __cnfn min(long4 x, long4 y);
-ulong4 __ovld __cnfn min(ulong4 x, ulong4 y);
-long8 __ovld __cnfn min(long8 x, long8 y);
-ulong8 __ovld __cnfn min(ulong8 x, ulong8 y);
-long16 __ovld __cnfn min(long16 x, long16 y);
-ulong16 __ovld __cnfn min(ulong16 x, ulong16 y);
-char2 __ovld __cnfn min(char2 x, char y);
-uchar2 __ovld __cnfn min(uchar2 x, uchar y);
-char3 __ovld __cnfn min(char3 x, char y);
-uchar3 __ovld __cnfn min(uchar3 x, uchar y);
-char4 __ovld __cnfn min(char4 x, char y);
-uchar4 __ovld __cnfn min(uchar4 x, uchar y);
-char8 __ovld __cnfn min(char8 x, char y);
-uchar8 __ovld __cnfn min(uchar8 x, uchar y);
-char16 __ovld __cnfn min(char16 x, char y);
-uchar16 __ovld __cnfn min(uchar16 x, uchar y);
-short2 __ovld __cnfn min(short2 x, short y);
-ushort2 __ovld __cnfn min(ushort2 x, ushort y);
-short3 __ovld __cnfn min(short3 x, short y);
-ushort3 __ovld __cnfn min(ushort3 x, ushort y);
-short4 __ovld __cnfn min(short4 x, short y);
-ushort4 __ovld __cnfn min(ushort4 x, ushort y);
-short8 __ovld __cnfn min(short8 x, short y);
-ushort8 __ovld __cnfn min(ushort8 x, ushort y);
-short16 __ovld __cnfn min(short16 x, short y);
-ushort16 __ovld __cnfn min(ushort16 x, ushort y);
-int2 __ovld __cnfn min(int2 x, int y);
-uint2 __ovld __cnfn min(uint2 x, uint y);
-int3 __ovld __cnfn min(int3 x, int y);
-uint3 __ovld __cnfn min(uint3 x, uint y);
-int4 __ovld __cnfn min(int4 x, int y);
-uint4 __ovld __cnfn min(uint4 x, uint y);
-int8 __ovld __cnfn min(int8 x, int y);
-uint8 __ovld __cnfn min(uint8 x, uint y);
-int16 __ovld __cnfn min(int16 x, int y);
-uint16 __ovld __cnfn min(uint16 x, uint y);
-long2 __ovld __cnfn min(long2 x, long y);
-ulong2 __ovld __cnfn min(ulong2 x, ulong y);
-long3 __ovld __cnfn min(long3 x, long y);
-ulong3 __ovld __cnfn min(ulong3 x, ulong y);
-long4 __ovld __cnfn min(long4 x, long y);
-ulong4 __ovld __cnfn min(ulong4 x, ulong y);
-long8 __ovld __cnfn min(long8 x, long y);
-ulong8 __ovld __cnfn min(ulong8 x, ulong y);
-long16 __ovld __cnfn min(long16 x, long y);
-ulong16 __ovld __cnfn min(ulong16 x, ulong y);
+char __ovld __cnfn min(char, char);
+uchar __ovld __cnfn min(uchar, uchar);
+char2 __ovld __cnfn min(char2, char2);
+uchar2 __ovld __cnfn min(uchar2, uchar2);
+char3 __ovld __cnfn min(char3, char3);
+uchar3 __ovld __cnfn min(uchar3, uchar3);
+char4 __ovld __cnfn min(char4, char4);
+uchar4 __ovld __cnfn min(uchar4, uchar4);
+char8 __ovld __cnfn min(char8, char8);
+uchar8 __ovld __cnfn min(uchar8, uchar8);
+char16 __ovld __cnfn min(char16, char16);
+uchar16 __ovld __cnfn min(uchar16, uchar16);
+short __ovld __cnfn min(short, short);
+ushort __ovld __cnfn min(ushort, ushort);
+short2 __ovld __cnfn min(short2, short2);
+ushort2 __ovld __cnfn min(ushort2, ushort2);
+short3 __ovld __cnfn min(short3, short3);
+ushort3 __ovld __cnfn min(ushort3, ushort3);
+short4 __ovld __cnfn min(short4, short4);
+ushort4 __ovld __cnfn min(ushort4, ushort4);
+short8 __ovld __cnfn min(short8, short8);
+ushort8 __ovld __cnfn min(ushort8, ushort8);
+short16 __ovld __cnfn min(short16, short16);
+ushort16 __ovld __cnfn min(ushort16, ushort16);
+int __ovld __cnfn min(int, int);
+uint __ovld __cnfn min(uint, uint);
+int2 __ovld __cnfn min(int2, int2);
+uint2 __ovld __cnfn min(uint2, uint2);
+int3 __ovld __cnfn min(int3, int3);
+uint3 __ovld __cnfn min(uint3, uint3);
+int4 __ovld __cnfn min(int4, int4);
+uint4 __ovld __cnfn min(uint4, uint4);
+int8 __ovld __cnfn min(int8, int8);
+uint8 __ovld __cnfn min(uint8, uint8);
+int16 __ovld __cnfn min(int16, int16);
+uint16 __ovld __cnfn min(uint16, uint16);
+long __ovld __cnfn min(long, long);
+ulong __ovld __cnfn min(ulong, ulong);
+long2 __ovld __cnfn min(long2, long2);
+ulong2 __ovld __cnfn min(ulong2, ulong2);
+long3 __ovld __cnfn min(long3, long3);
+ulong3 __ovld __cnfn min(ulong3, ulong3);
+long4 __ovld __cnfn min(long4, long4);
+ulong4 __ovld __cnfn min(ulong4, ulong4);
+long8 __ovld __cnfn min(long8, long8);
+ulong8 __ovld __cnfn min(ulong8, ulong8);
+long16 __ovld __cnfn min(long16, long16);
+ulong16 __ovld __cnfn min(ulong16, ulong16);
+char2 __ovld __cnfn min(char2, char);
+uchar2 __ovld __cnfn min(uchar2, uchar);
+char3 __ovld __cnfn min(char3, char);
+uchar3 __ovld __cnfn min(uchar3, uchar);
+char4 __ovld __cnfn min(char4, char);
+uchar4 __ovld __cnfn min(uchar4, uchar);
+char8 __ovld __cnfn min(char8, char);
+uchar8 __ovld __cnfn min(uchar8, uchar);
+char16 __ovld __cnfn min(char16, char);
+uchar16 __ovld __cnfn min(uchar16, uchar);
+short2 __ovld __cnfn min(short2, short);
+ushort2 __ovld __cnfn min(ushort2, ushort);
+short3 __ovld __cnfn min(short3, short);
+ushort3 __ovld __cnfn min(ushort3, ushort);
+short4 __ovld __cnfn min(short4, short);
+ushort4 __ovld __cnfn min(ushort4, ushort);
+short8 __ovld __cnfn min(short8, short);
+ushort8 __ovld __cnfn min(ushort8, ushort);
+short16 __ovld __cnfn min(short16, short);
+ushort16 __ovld __cnfn min(ushort16, ushort);
+int2 __ovld __cnfn min(int2, int);
+uint2 __ovld __cnfn min(uint2, uint);
+int3 __ovld __cnfn min(int3, int);
+uint3 __ovld __cnfn min(uint3, uint);
+int4 __ovld __cnfn min(int4, int);
+uint4 __ovld __cnfn min(uint4, uint);
+int8 __ovld __cnfn min(int8, int);
+uint8 __ovld __cnfn min(uint8, uint);
+int16 __ovld __cnfn min(int16, int);
+uint16 __ovld __cnfn min(uint16, uint);
+long2 __ovld __cnfn min(long2, long);
+ulong2 __ovld __cnfn min(ulong2, ulong);
+long3 __ovld __cnfn min(long3, long);
+ulong3 __ovld __cnfn min(ulong3, ulong);
+long4 __ovld __cnfn min(long4, long);
+ulong4 __ovld __cnfn min(ulong4, ulong);
+long8 __ovld __cnfn min(long8, long);
+ulong8 __ovld __cnfn min(ulong8, ulong);
+long16 __ovld __cnfn min(long16, long);
+ulong16 __ovld __cnfn min(ulong16, ulong);
/**
* Computes x * y and returns the high half of the
* product of x and y.
*/
-char __ovld __cnfn mul_hi(char x, char y);
-uchar __ovld __cnfn mul_hi(uchar x, uchar y);
-char2 __ovld __cnfn mul_hi(char2 x, char2 y);
-uchar2 __ovld __cnfn mul_hi(uchar2 x, uchar2 y);
-char3 __ovld __cnfn mul_hi(char3 x, char3 y);
-uchar3 __ovld __cnfn mul_hi(uchar3 x, uchar3 y);
-char4 __ovld __cnfn mul_hi(char4 x, char4 y);
-uchar4 __ovld __cnfn mul_hi(uchar4 x, uchar4 y);
-char8 __ovld __cnfn mul_hi(char8 x, char8 y);
-uchar8 __ovld __cnfn mul_hi(uchar8 x, uchar8 y);
-char16 __ovld __cnfn mul_hi(char16 x, char16 y);
-uchar16 __ovld __cnfn mul_hi(uchar16 x, uchar16 y);
-short __ovld __cnfn mul_hi(short x, short y);
-ushort __ovld __cnfn mul_hi(ushort x, ushort y);
-short2 __ovld __cnfn mul_hi(short2 x, short2 y);
-ushort2 __ovld __cnfn mul_hi(ushort2 x, ushort2 y);
-short3 __ovld __cnfn mul_hi(short3 x, short3 y);
-ushort3 __ovld __cnfn mul_hi(ushort3 x, ushort3 y);
-short4 __ovld __cnfn mul_hi(short4 x, short4 y);
-ushort4 __ovld __cnfn mul_hi(ushort4 x, ushort4 y);
-short8 __ovld __cnfn mul_hi(short8 x, short8 y);
-ushort8 __ovld __cnfn mul_hi(ushort8 x, ushort8 y);
-short16 __ovld __cnfn mul_hi(short16 x, short16 y);
-ushort16 __ovld __cnfn mul_hi(ushort16 x, ushort16 y);
-int __ovld __cnfn mul_hi(int x, int y);
-uint __ovld __cnfn mul_hi(uint x, uint y);
-int2 __ovld __cnfn mul_hi(int2 x, int2 y);
-uint2 __ovld __cnfn mul_hi(uint2 x, uint2 y);
-int3 __ovld __cnfn mul_hi(int3 x, int3 y);
-uint3 __ovld __cnfn mul_hi(uint3 x, uint3 y);
-int4 __ovld __cnfn mul_hi(int4 x, int4 y);
-uint4 __ovld __cnfn mul_hi(uint4 x, uint4 y);
-int8 __ovld __cnfn mul_hi(int8 x, int8 y);
-uint8 __ovld __cnfn mul_hi(uint8 x, uint8 y);
-int16 __ovld __cnfn mul_hi(int16 x, int16 y);
-uint16 __ovld __cnfn mul_hi(uint16 x, uint16 y);
-long __ovld __cnfn mul_hi(long x, long y);
-ulong __ovld __cnfn mul_hi(ulong x, ulong y);
-long2 __ovld __cnfn mul_hi(long2 x, long2 y);
-ulong2 __ovld __cnfn mul_hi(ulong2 x, ulong2 y);
-long3 __ovld __cnfn mul_hi(long3 x, long3 y);
-ulong3 __ovld __cnfn mul_hi(ulong3 x, ulong3 y);
-long4 __ovld __cnfn mul_hi(long4 x, long4 y);
-ulong4 __ovld __cnfn mul_hi(ulong4 x, ulong4 y);
-long8 __ovld __cnfn mul_hi(long8 x, long8 y);
-ulong8 __ovld __cnfn mul_hi(ulong8 x, ulong8 y);
-long16 __ovld __cnfn mul_hi(long16 x, long16 y);
-ulong16 __ovld __cnfn mul_hi(ulong16 x, ulong16 y);
+char __ovld __cnfn mul_hi(char, char);
+uchar __ovld __cnfn mul_hi(uchar, uchar);
+char2 __ovld __cnfn mul_hi(char2, char2);
+uchar2 __ovld __cnfn mul_hi(uchar2, uchar2);
+char3 __ovld __cnfn mul_hi(char3, char3);
+uchar3 __ovld __cnfn mul_hi(uchar3, uchar3);
+char4 __ovld __cnfn mul_hi(char4, char4);
+uchar4 __ovld __cnfn mul_hi(uchar4, uchar4);
+char8 __ovld __cnfn mul_hi(char8, char8);
+uchar8 __ovld __cnfn mul_hi(uchar8, uchar8);
+char16 __ovld __cnfn mul_hi(char16, char16);
+uchar16 __ovld __cnfn mul_hi(uchar16, uchar16);
+short __ovld __cnfn mul_hi(short, short);
+ushort __ovld __cnfn mul_hi(ushort, ushort);
+short2 __ovld __cnfn mul_hi(short2, short2);
+ushort2 __ovld __cnfn mul_hi(ushort2, ushort2);
+short3 __ovld __cnfn mul_hi(short3, short3);
+ushort3 __ovld __cnfn mul_hi(ushort3, ushort3);
+short4 __ovld __cnfn mul_hi(short4, short4);
+ushort4 __ovld __cnfn mul_hi(ushort4, ushort4);
+short8 __ovld __cnfn mul_hi(short8, short8);
+ushort8 __ovld __cnfn mul_hi(ushort8, ushort8);
+short16 __ovld __cnfn mul_hi(short16, short16);
+ushort16 __ovld __cnfn mul_hi(ushort16, ushort16);
+int __ovld __cnfn mul_hi(int, int);
+uint __ovld __cnfn mul_hi(uint, uint);
+int2 __ovld __cnfn mul_hi(int2, int2);
+uint2 __ovld __cnfn mul_hi(uint2, uint2);
+int3 __ovld __cnfn mul_hi(int3, int3);
+uint3 __ovld __cnfn mul_hi(uint3, uint3);
+int4 __ovld __cnfn mul_hi(int4, int4);
+uint4 __ovld __cnfn mul_hi(uint4, uint4);
+int8 __ovld __cnfn mul_hi(int8, int8);
+uint8 __ovld __cnfn mul_hi(uint8, uint8);
+int16 __ovld __cnfn mul_hi(int16, int16);
+uint16 __ovld __cnfn mul_hi(uint16, uint16);
+long __ovld __cnfn mul_hi(long, long);
+ulong __ovld __cnfn mul_hi(ulong, ulong);
+long2 __ovld __cnfn mul_hi(long2, long2);
+ulong2 __ovld __cnfn mul_hi(ulong2, ulong2);
+long3 __ovld __cnfn mul_hi(long3, long3);
+ulong3 __ovld __cnfn mul_hi(ulong3, ulong3);
+long4 __ovld __cnfn mul_hi(long4, long4);
+ulong4 __ovld __cnfn mul_hi(ulong4, ulong4);
+long8 __ovld __cnfn mul_hi(long8, long8);
+ulong8 __ovld __cnfn mul_hi(ulong8, ulong8);
+long16 __ovld __cnfn mul_hi(long16, long16);
+ulong16 __ovld __cnfn mul_hi(ulong16, ulong16);
/**
* For each element in v, the bits are shifted left by
@@ -9753,209 +9767,209 @@ ulong16 __ovld __cnfn mul_hi(ulong16 x, ulong16 y);
* side of the element are shifted back in from the
* right.
*/
-char __ovld __cnfn rotate(char v, char i);
-uchar __ovld __cnfn rotate(uchar v, uchar i);
-char2 __ovld __cnfn rotate(char2 v, char2 i);
-uchar2 __ovld __cnfn rotate(uchar2 v, uchar2 i);
-char3 __ovld __cnfn rotate(char3 v, char3 i);
-uchar3 __ovld __cnfn rotate(uchar3 v, uchar3 i);
-char4 __ovld __cnfn rotate(char4 v, char4 i);
-uchar4 __ovld __cnfn rotate(uchar4 v, uchar4 i);
-char8 __ovld __cnfn rotate(char8 v, char8 i);
-uchar8 __ovld __cnfn rotate(uchar8 v, uchar8 i);
-char16 __ovld __cnfn rotate(char16 v, char16 i);
-uchar16 __ovld __cnfn rotate(uchar16 v, uchar16 i);
-short __ovld __cnfn rotate(short v, short i);
-ushort __ovld __cnfn rotate(ushort v, ushort i);
-short2 __ovld __cnfn rotate(short2 v, short2 i);
-ushort2 __ovld __cnfn rotate(ushort2 v, ushort2 i);
-short3 __ovld __cnfn rotate(short3 v, short3 i);
-ushort3 __ovld __cnfn rotate(ushort3 v, ushort3 i);
-short4 __ovld __cnfn rotate(short4 v, short4 i);
-ushort4 __ovld __cnfn rotate(ushort4 v, ushort4 i);
-short8 __ovld __cnfn rotate(short8 v, short8 i);
-ushort8 __ovld __cnfn rotate(ushort8 v, ushort8 i);
-short16 __ovld __cnfn rotate(short16 v, short16 i);
-ushort16 __ovld __cnfn rotate(ushort16 v, ushort16 i);
-int __ovld __cnfn rotate(int v, int i);
-uint __ovld __cnfn rotate(uint v, uint i);
-int2 __ovld __cnfn rotate(int2 v, int2 i);
-uint2 __ovld __cnfn rotate(uint2 v, uint2 i);
-int3 __ovld __cnfn rotate(int3 v, int3 i);
-uint3 __ovld __cnfn rotate(uint3 v, uint3 i);
-int4 __ovld __cnfn rotate(int4 v, int4 i);
-uint4 __ovld __cnfn rotate(uint4 v, uint4 i);
-int8 __ovld __cnfn rotate(int8 v, int8 i);
-uint8 __ovld __cnfn rotate(uint8 v, uint8 i);
-int16 __ovld __cnfn rotate(int16 v, int16 i);
-uint16 __ovld __cnfn rotate(uint16 v, uint16 i);
-long __ovld __cnfn rotate(long v, long i);
-ulong __ovld __cnfn rotate(ulong v, ulong i);
-long2 __ovld __cnfn rotate(long2 v, long2 i);
-ulong2 __ovld __cnfn rotate(ulong2 v, ulong2 i);
-long3 __ovld __cnfn rotate(long3 v, long3 i);
-ulong3 __ovld __cnfn rotate(ulong3 v, ulong3 i);
-long4 __ovld __cnfn rotate(long4 v, long4 i);
-ulong4 __ovld __cnfn rotate(ulong4 v, ulong4 i);
-long8 __ovld __cnfn rotate(long8 v, long8 i);
-ulong8 __ovld __cnfn rotate(ulong8 v, ulong8 i);
-long16 __ovld __cnfn rotate(long16 v, long16 i);
-ulong16 __ovld __cnfn rotate(ulong16 v, ulong16 i);
+char __ovld __cnfn rotate(char, char);
+uchar __ovld __cnfn rotate(uchar, uchar);
+char2 __ovld __cnfn rotate(char2, char2);
+uchar2 __ovld __cnfn rotate(uchar2, uchar2);
+char3 __ovld __cnfn rotate(char3, char3);
+uchar3 __ovld __cnfn rotate(uchar3, uchar3);
+char4 __ovld __cnfn rotate(char4, char4);
+uchar4 __ovld __cnfn rotate(uchar4, uchar4);
+char8 __ovld __cnfn rotate(char8, char8);
+uchar8 __ovld __cnfn rotate(uchar8, uchar8);
+char16 __ovld __cnfn rotate(char16, char16);
+uchar16 __ovld __cnfn rotate(uchar16, uchar16);
+short __ovld __cnfn rotate(short, short);
+ushort __ovld __cnfn rotate(ushort, ushort);
+short2 __ovld __cnfn rotate(short2, short2);
+ushort2 __ovld __cnfn rotate(ushort2, ushort2);
+short3 __ovld __cnfn rotate(short3, short3);
+ushort3 __ovld __cnfn rotate(ushort3, ushort3);
+short4 __ovld __cnfn rotate(short4, short4);
+ushort4 __ovld __cnfn rotate(ushort4, ushort4);
+short8 __ovld __cnfn rotate(short8, short8);
+ushort8 __ovld __cnfn rotate(ushort8, ushort8);
+short16 __ovld __cnfn rotate(short16, short16);
+ushort16 __ovld __cnfn rotate(ushort16, ushort16);
+int __ovld __cnfn rotate(int, int);
+uint __ovld __cnfn rotate(uint, uint);
+int2 __ovld __cnfn rotate(int2, int2);
+uint2 __ovld __cnfn rotate(uint2, uint2);
+int3 __ovld __cnfn rotate(int3, int3);
+uint3 __ovld __cnfn rotate(uint3, uint3);
+int4 __ovld __cnfn rotate(int4, int4);
+uint4 __ovld __cnfn rotate(uint4, uint4);
+int8 __ovld __cnfn rotate(int8, int8);
+uint8 __ovld __cnfn rotate(uint8, uint8);
+int16 __ovld __cnfn rotate(int16, int16);
+uint16 __ovld __cnfn rotate(uint16, uint16);
+long __ovld __cnfn rotate(long, long);
+ulong __ovld __cnfn rotate(ulong, ulong);
+long2 __ovld __cnfn rotate(long2, long2);
+ulong2 __ovld __cnfn rotate(ulong2, ulong2);
+long3 __ovld __cnfn rotate(long3, long3);
+ulong3 __ovld __cnfn rotate(ulong3, ulong3);
+long4 __ovld __cnfn rotate(long4, long4);
+ulong4 __ovld __cnfn rotate(ulong4, ulong4);
+long8 __ovld __cnfn rotate(long8, long8);
+ulong8 __ovld __cnfn rotate(ulong8, ulong8);
+long16 __ovld __cnfn rotate(long16, long16);
+ulong16 __ovld __cnfn rotate(ulong16, ulong16);
/**
* Returns x - y and saturates the result.
*/
-char __ovld __cnfn sub_sat(char x, char y);
-uchar __ovld __cnfn sub_sat(uchar x, uchar y);
-char2 __ovld __cnfn sub_sat(char2 x, char2 y);
-uchar2 __ovld __cnfn sub_sat(uchar2 x, uchar2 y);
-char3 __ovld __cnfn sub_sat(char3 x, char3 y);
-uchar3 __ovld __cnfn sub_sat(uchar3 x, uchar3 y);
-char4 __ovld __cnfn sub_sat(char4 x, char4 y);
-uchar4 __ovld __cnfn sub_sat(uchar4 x, uchar4 y);
-char8 __ovld __cnfn sub_sat(char8 x, char8 y);
-uchar8 __ovld __cnfn sub_sat(uchar8 x, uchar8 y);
-char16 __ovld __cnfn sub_sat(char16 x, char16 y);
-uchar16 __ovld __cnfn sub_sat(uchar16 x, uchar16 y);
-short __ovld __cnfn sub_sat(short x, short y);
-ushort __ovld __cnfn sub_sat(ushort x, ushort y);
-short2 __ovld __cnfn sub_sat(short2 x, short2 y);
-ushort2 __ovld __cnfn sub_sat(ushort2 x, ushort2 y);
-short3 __ovld __cnfn sub_sat(short3 x, short3 y);
-ushort3 __ovld __cnfn sub_sat(ushort3 x, ushort3 y);
-short4 __ovld __cnfn sub_sat(short4 x, short4 y);
-ushort4 __ovld __cnfn sub_sat(ushort4 x, ushort4 y);
-short8 __ovld __cnfn sub_sat(short8 x, short8 y);
-ushort8 __ovld __cnfn sub_sat(ushort8 x, ushort8 y);
-short16 __ovld __cnfn sub_sat(short16 x, short16 y);
-ushort16 __ovld __cnfn sub_sat(ushort16 x, ushort16 y);
-int __ovld __cnfn sub_sat(int x, int y);
-uint __ovld __cnfn sub_sat(uint x, uint y);
-int2 __ovld __cnfn sub_sat(int2 x, int2 y);
-uint2 __ovld __cnfn sub_sat(uint2 x, uint2 y);
-int3 __ovld __cnfn sub_sat(int3 x, int3 y);
-uint3 __ovld __cnfn sub_sat(uint3 x, uint3 y);
-int4 __ovld __cnfn sub_sat(int4 x, int4 y);
-uint4 __ovld __cnfn sub_sat(uint4 x, uint4 y);
-int8 __ovld __cnfn sub_sat(int8 x, int8 y);
-uint8 __ovld __cnfn sub_sat(uint8 x, uint8 y);
-int16 __ovld __cnfn sub_sat(int16 x, int16 y);
-uint16 __ovld __cnfn sub_sat(uint16 x, uint16 y);
-long __ovld __cnfn sub_sat(long x, long y);
-ulong __ovld __cnfn sub_sat(ulong x, ulong y);
-long2 __ovld __cnfn sub_sat(long2 x, long2 y);
-ulong2 __ovld __cnfn sub_sat(ulong2 x, ulong2 y);
-long3 __ovld __cnfn sub_sat(long3 x, long3 y);
-ulong3 __ovld __cnfn sub_sat(ulong3 x, ulong3 y);
-long4 __ovld __cnfn sub_sat(long4 x, long4 y);
-ulong4 __ovld __cnfn sub_sat(ulong4 x, ulong4 y);
-long8 __ovld __cnfn sub_sat(long8 x, long8 y);
-ulong8 __ovld __cnfn sub_sat(ulong8 x, ulong8 y);
-long16 __ovld __cnfn sub_sat(long16 x, long16 y);
-ulong16 __ovld __cnfn sub_sat(ulong16 x, ulong16 y);
+char __ovld __cnfn sub_sat(char, char);
+uchar __ovld __cnfn sub_sat(uchar, uchar);
+char2 __ovld __cnfn sub_sat(char2, char2);
+uchar2 __ovld __cnfn sub_sat(uchar2, uchar2);
+char3 __ovld __cnfn sub_sat(char3, char3);
+uchar3 __ovld __cnfn sub_sat(uchar3, uchar3);
+char4 __ovld __cnfn sub_sat(char4, char4);
+uchar4 __ovld __cnfn sub_sat(uchar4, uchar4);
+char8 __ovld __cnfn sub_sat(char8, char8);
+uchar8 __ovld __cnfn sub_sat(uchar8, uchar8);
+char16 __ovld __cnfn sub_sat(char16, char16);
+uchar16 __ovld __cnfn sub_sat(uchar16, uchar16);
+short __ovld __cnfn sub_sat(short, short);
+ushort __ovld __cnfn sub_sat(ushort, ushort);
+short2 __ovld __cnfn sub_sat(short2, short2);
+ushort2 __ovld __cnfn sub_sat(ushort2, ushort2);
+short3 __ovld __cnfn sub_sat(short3, short3);
+ushort3 __ovld __cnfn sub_sat(ushort3, ushort3);
+short4 __ovld __cnfn sub_sat(short4, short4);
+ushort4 __ovld __cnfn sub_sat(ushort4, ushort4);
+short8 __ovld __cnfn sub_sat(short8, short8);
+ushort8 __ovld __cnfn sub_sat(ushort8, ushort8);
+short16 __ovld __cnfn sub_sat(short16, short16);
+ushort16 __ovld __cnfn sub_sat(ushort16, ushort16);
+int __ovld __cnfn sub_sat(int, int);
+uint __ovld __cnfn sub_sat(uint, uint);
+int2 __ovld __cnfn sub_sat(int2, int2);
+uint2 __ovld __cnfn sub_sat(uint2, uint2);
+int3 __ovld __cnfn sub_sat(int3, int3);
+uint3 __ovld __cnfn sub_sat(uint3, uint3);
+int4 __ovld __cnfn sub_sat(int4, int4);
+uint4 __ovld __cnfn sub_sat(uint4, uint4);
+int8 __ovld __cnfn sub_sat(int8, int8);
+uint8 __ovld __cnfn sub_sat(uint8, uint8);
+int16 __ovld __cnfn sub_sat(int16, int16);
+uint16 __ovld __cnfn sub_sat(uint16, uint16);
+long __ovld __cnfn sub_sat(long, long);
+ulong __ovld __cnfn sub_sat(ulong, ulong);
+long2 __ovld __cnfn sub_sat(long2, long2);
+ulong2 __ovld __cnfn sub_sat(ulong2, ulong2);
+long3 __ovld __cnfn sub_sat(long3, long3);
+ulong3 __ovld __cnfn sub_sat(ulong3, ulong3);
+long4 __ovld __cnfn sub_sat(long4, long4);
+ulong4 __ovld __cnfn sub_sat(ulong4, ulong4);
+long8 __ovld __cnfn sub_sat(long8, long8);
+ulong8 __ovld __cnfn sub_sat(ulong8, ulong8);
+long16 __ovld __cnfn sub_sat(long16, long16);
+ulong16 __ovld __cnfn sub_sat(ulong16, ulong16);
/**
* result[i] = ((short)hi[i] << 8) | lo[i]
* result[i] = ((ushort)hi[i] << 8) | lo[i]
*/
-short __ovld __cnfn upsample(char hi, uchar lo);
-ushort __ovld __cnfn upsample(uchar hi, uchar lo);
-short2 __ovld __cnfn upsample(char2 hi, uchar2 lo);
-short3 __ovld __cnfn upsample(char3 hi, uchar3 lo);
-short4 __ovld __cnfn upsample(char4 hi, uchar4 lo);
-short8 __ovld __cnfn upsample(char8 hi, uchar8 lo);
-short16 __ovld __cnfn upsample(char16 hi, uchar16 lo);
-ushort2 __ovld __cnfn upsample(uchar2 hi, uchar2 lo);
-ushort3 __ovld __cnfn upsample(uchar3 hi, uchar3 lo);
-ushort4 __ovld __cnfn upsample(uchar4 hi, uchar4 lo);
-ushort8 __ovld __cnfn upsample(uchar8 hi, uchar8 lo);
-ushort16 __ovld __cnfn upsample(uchar16 hi, uchar16 lo);
+short __ovld __cnfn upsample(char, uchar);
+ushort __ovld __cnfn upsample(uchar, uchar);
+short2 __ovld __cnfn upsample(char2, uchar2);
+short3 __ovld __cnfn upsample(char3, uchar3);
+short4 __ovld __cnfn upsample(char4, uchar4);
+short8 __ovld __cnfn upsample(char8, uchar8);
+short16 __ovld __cnfn upsample(char16, uchar16);
+ushort2 __ovld __cnfn upsample(uchar2, uchar2);
+ushort3 __ovld __cnfn upsample(uchar3, uchar3);
+ushort4 __ovld __cnfn upsample(uchar4, uchar4);
+ushort8 __ovld __cnfn upsample(uchar8, uchar8);
+ushort16 __ovld __cnfn upsample(uchar16, uchar16);
/**
* result[i] = ((int)hi[i] << 16) | lo[i]
* result[i] = ((uint)hi[i] << 16) | lo[i]
*/
-int __ovld __cnfn upsample(short hi, ushort lo);
-uint __ovld __cnfn upsample(ushort hi, ushort lo);
-int2 __ovld __cnfn upsample(short2 hi, ushort2 lo);
-int3 __ovld __cnfn upsample(short3 hi, ushort3 lo);
-int4 __ovld __cnfn upsample(short4 hi, ushort4 lo);
-int8 __ovld __cnfn upsample(short8 hi, ushort8 lo);
-int16 __ovld __cnfn upsample(short16 hi, ushort16 lo);
-uint2 __ovld __cnfn upsample(ushort2 hi, ushort2 lo);
-uint3 __ovld __cnfn upsample(ushort3 hi, ushort3 lo);
-uint4 __ovld __cnfn upsample(ushort4 hi, ushort4 lo);
-uint8 __ovld __cnfn upsample(ushort8 hi, ushort8 lo);
-uint16 __ovld __cnfn upsample(ushort16 hi, ushort16 lo);
+int __ovld __cnfn upsample(short, ushort);
+uint __ovld __cnfn upsample(ushort, ushort);
+int2 __ovld __cnfn upsample(short2, ushort2);
+int3 __ovld __cnfn upsample(short3, ushort3);
+int4 __ovld __cnfn upsample(short4, ushort4);
+int8 __ovld __cnfn upsample(short8, ushort8);
+int16 __ovld __cnfn upsample(short16, ushort16);
+uint2 __ovld __cnfn upsample(ushort2, ushort2);
+uint3 __ovld __cnfn upsample(ushort3, ushort3);
+uint4 __ovld __cnfn upsample(ushort4, ushort4);
+uint8 __ovld __cnfn upsample(ushort8, ushort8);
+uint16 __ovld __cnfn upsample(ushort16, ushort16);
/**
* result[i] = ((long)hi[i] << 32) | lo[i]
* result[i] = ((ulong)hi[i] << 32) | lo[i]
*/
-long __ovld __cnfn upsample(int hi, uint lo);
-ulong __ovld __cnfn upsample(uint hi, uint lo);
-long2 __ovld __cnfn upsample(int2 hi, uint2 lo);
-long3 __ovld __cnfn upsample(int3 hi, uint3 lo);
-long4 __ovld __cnfn upsample(int4 hi, uint4 lo);
-long8 __ovld __cnfn upsample(int8 hi, uint8 lo);
-long16 __ovld __cnfn upsample(int16 hi, uint16 lo);
-ulong2 __ovld __cnfn upsample(uint2 hi, uint2 lo);
-ulong3 __ovld __cnfn upsample(uint3 hi, uint3 lo);
-ulong4 __ovld __cnfn upsample(uint4 hi, uint4 lo);
-ulong8 __ovld __cnfn upsample(uint8 hi, uint8 lo);
-ulong16 __ovld __cnfn upsample(uint16 hi, uint16 lo);
+long __ovld __cnfn upsample(int, uint);
+ulong __ovld __cnfn upsample(uint, uint);
+long2 __ovld __cnfn upsample(int2, uint2);
+long3 __ovld __cnfn upsample(int3, uint3);
+long4 __ovld __cnfn upsample(int4, uint4);
+long8 __ovld __cnfn upsample(int8, uint8);
+long16 __ovld __cnfn upsample(int16, uint16);
+ulong2 __ovld __cnfn upsample(uint2, uint2);
+ulong3 __ovld __cnfn upsample(uint3, uint3);
+ulong4 __ovld __cnfn upsample(uint4, uint4);
+ulong8 __ovld __cnfn upsample(uint8, uint8);
+ulong16 __ovld __cnfn upsample(uint16, uint16);
/*
* popcount(x): returns the number of set bit in x
*/
#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-char __ovld __cnfn popcount(char x);
-uchar __ovld __cnfn popcount(uchar x);
-char2 __ovld __cnfn popcount(char2 x);
-uchar2 __ovld __cnfn popcount(uchar2 x);
-char3 __ovld __cnfn popcount(char3 x);
-uchar3 __ovld __cnfn popcount(uchar3 x);
-char4 __ovld __cnfn popcount(char4 x);
-uchar4 __ovld __cnfn popcount(uchar4 x);
-char8 __ovld __cnfn popcount(char8 x);
-uchar8 __ovld __cnfn popcount(uchar8 x);
-char16 __ovld __cnfn popcount(char16 x);
-uchar16 __ovld __cnfn popcount(uchar16 x);
-short __ovld __cnfn popcount(short x);
-ushort __ovld __cnfn popcount(ushort x);
-short2 __ovld __cnfn popcount(short2 x);
-ushort2 __ovld __cnfn popcount(ushort2 x);
-short3 __ovld __cnfn popcount(short3 x);
-ushort3 __ovld __cnfn popcount(ushort3 x);
-short4 __ovld __cnfn popcount(short4 x);
-ushort4 __ovld __cnfn popcount(ushort4 x);
-short8 __ovld __cnfn popcount(short8 x);
-ushort8 __ovld __cnfn popcount(ushort8 x);
-short16 __ovld __cnfn popcount(short16 x);
-ushort16 __ovld __cnfn popcount(ushort16 x);
-int __ovld __cnfn popcount(int x);
-uint __ovld __cnfn popcount(uint x);
-int2 __ovld __cnfn popcount(int2 x);
-uint2 __ovld __cnfn popcount(uint2 x);
-int3 __ovld __cnfn popcount(int3 x);
-uint3 __ovld __cnfn popcount(uint3 x);
-int4 __ovld __cnfn popcount(int4 x);
-uint4 __ovld __cnfn popcount(uint4 x);
-int8 __ovld __cnfn popcount(int8 x);
-uint8 __ovld __cnfn popcount(uint8 x);
-int16 __ovld __cnfn popcount(int16 x);
-uint16 __ovld __cnfn popcount(uint16 x);
-long __ovld __cnfn popcount(long x);
-ulong __ovld __cnfn popcount(ulong x);
-long2 __ovld __cnfn popcount(long2 x);
-ulong2 __ovld __cnfn popcount(ulong2 x);
-long3 __ovld __cnfn popcount(long3 x);
-ulong3 __ovld __cnfn popcount(ulong3 x);
-long4 __ovld __cnfn popcount(long4 x);
-ulong4 __ovld __cnfn popcount(ulong4 x);
-long8 __ovld __cnfn popcount(long8 x);
-ulong8 __ovld __cnfn popcount(ulong8 x);
-long16 __ovld __cnfn popcount(long16 x);
-ulong16 __ovld __cnfn popcount(ulong16 x);
+char __ovld __cnfn popcount(char);
+uchar __ovld __cnfn popcount(uchar);
+char2 __ovld __cnfn popcount(char2);
+uchar2 __ovld __cnfn popcount(uchar2);
+char3 __ovld __cnfn popcount(char3);
+uchar3 __ovld __cnfn popcount(uchar3);
+char4 __ovld __cnfn popcount(char4);
+uchar4 __ovld __cnfn popcount(uchar4);
+char8 __ovld __cnfn popcount(char8);
+uchar8 __ovld __cnfn popcount(uchar8);
+char16 __ovld __cnfn popcount(char16);
+uchar16 __ovld __cnfn popcount(uchar16);
+short __ovld __cnfn popcount(short);
+ushort __ovld __cnfn popcount(ushort);
+short2 __ovld __cnfn popcount(short2);
+ushort2 __ovld __cnfn popcount(ushort2);
+short3 __ovld __cnfn popcount(short3);
+ushort3 __ovld __cnfn popcount(ushort3);
+short4 __ovld __cnfn popcount(short4);
+ushort4 __ovld __cnfn popcount(ushort4);
+short8 __ovld __cnfn popcount(short8);
+ushort8 __ovld __cnfn popcount(ushort8);
+short16 __ovld __cnfn popcount(short16);
+ushort16 __ovld __cnfn popcount(ushort16);
+int __ovld __cnfn popcount(int);
+uint __ovld __cnfn popcount(uint);
+int2 __ovld __cnfn popcount(int2);
+uint2 __ovld __cnfn popcount(uint2);
+int3 __ovld __cnfn popcount(int3);
+uint3 __ovld __cnfn popcount(uint3);
+int4 __ovld __cnfn popcount(int4);
+uint4 __ovld __cnfn popcount(uint4);
+int8 __ovld __cnfn popcount(int8);
+uint8 __ovld __cnfn popcount(uint8);
+int16 __ovld __cnfn popcount(int16);
+uint16 __ovld __cnfn popcount(uint16);
+long __ovld __cnfn popcount(long);
+ulong __ovld __cnfn popcount(ulong);
+long2 __ovld __cnfn popcount(long2);
+ulong2 __ovld __cnfn popcount(ulong2);
+long3 __ovld __cnfn popcount(long3);
+ulong3 __ovld __cnfn popcount(ulong3);
+long4 __ovld __cnfn popcount(long4);
+ulong4 __ovld __cnfn popcount(ulong4);
+long8 __ovld __cnfn popcount(long8);
+ulong8 __ovld __cnfn popcount(ulong8);
+long16 __ovld __cnfn popcount(long16);
+ulong16 __ovld __cnfn popcount(ulong16);
#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
/**
@@ -9964,18 +9978,18 @@ ulong16 __ovld __cnfn popcount(ulong16 x);
* Refer to definition of mul24 to see how the 24-bit
* integer multiplication is performed.
*/
-int __ovld __cnfn mad24(int x, int y, int z);
-uint __ovld __cnfn mad24(uint x, uint y, uint z);
-int2 __ovld __cnfn mad24(int2 x, int2 y, int2 z);
-uint2 __ovld __cnfn mad24(uint2 x, uint2 y, uint2 z);
-int3 __ovld __cnfn mad24(int3 x, int3 y, int3 z);
-uint3 __ovld __cnfn mad24(uint3 x, uint3 y, uint3 z);
-int4 __ovld __cnfn mad24(int4 x, int4 y, int4 z);
-uint4 __ovld __cnfn mad24(uint4 x, uint4 y, uint4 z);
-int8 __ovld __cnfn mad24(int8 x, int8 y, int8 z);
-uint8 __ovld __cnfn mad24(uint8 x, uint8 y, uint8 z);
-int16 __ovld __cnfn mad24(int16 x, int16 y, int16 z);
-uint16 __ovld __cnfn mad24(uint16 x, uint16 y, uint16 z);
+int __ovld __cnfn mad24(int, int, int);
+uint __ovld __cnfn mad24(uint, uint, uint);
+int2 __ovld __cnfn mad24(int2, int2, int2);
+uint2 __ovld __cnfn mad24(uint2, uint2, uint2);
+int3 __ovld __cnfn mad24(int3, int3, int3);
+uint3 __ovld __cnfn mad24(uint3, uint3, uint3);
+int4 __ovld __cnfn mad24(int4, int4, int4);
+uint4 __ovld __cnfn mad24(uint4, uint4, uint4);
+int8 __ovld __cnfn mad24(int8, int8, int8);
+uint8 __ovld __cnfn mad24(uint8, uint8, uint8);
+int16 __ovld __cnfn mad24(int16, int16, int16);
+uint16 __ovld __cnfn mad24(uint16, uint16, uint16);
/**
* Multiply two 24-bit integer values x and y. x and y
@@ -9987,18 +10001,18 @@ uint16 __ovld __cnfn mad24(uint16 x, uint16 y, uint16 z);
* x and y are not in this range, the multiplication
* result is implementation-defined.
*/
-int __ovld __cnfn mul24(int x, int y);
-uint __ovld __cnfn mul24(uint x, uint y);
-int2 __ovld __cnfn mul24(int2 x, int2 y);
-uint2 __ovld __cnfn mul24(uint2 x, uint2 y);
-int3 __ovld __cnfn mul24(int3 x, int3 y);
-uint3 __ovld __cnfn mul24(uint3 x, uint3 y);
-int4 __ovld __cnfn mul24(int4 x, int4 y);
-uint4 __ovld __cnfn mul24(uint4 x, uint4 y);
-int8 __ovld __cnfn mul24(int8 x, int8 y);
-uint8 __ovld __cnfn mul24(uint8 x, uint8 y);
-int16 __ovld __cnfn mul24(int16 x, int16 y);
-uint16 __ovld __cnfn mul24(uint16 x, uint16 y);
+int __ovld __cnfn mul24(int, int);
+uint __ovld __cnfn mul24(uint, uint);
+int2 __ovld __cnfn mul24(int2, int2);
+uint2 __ovld __cnfn mul24(uint2, uint2);
+int3 __ovld __cnfn mul24(int3, int3);
+uint3 __ovld __cnfn mul24(uint3, uint3);
+int4 __ovld __cnfn mul24(int4, int4);
+uint4 __ovld __cnfn mul24(uint4, uint4);
+int8 __ovld __cnfn mul24(int8, int8);
+uint8 __ovld __cnfn mul24(uint8, uint8);
+int16 __ovld __cnfn mul24(int16, int16);
+uint16 __ovld __cnfn mul24(uint16, uint16);
// OpenCL v1.1 s6.11.4, v1.2 s6.12.4, v2.0 s6.13.4 - Common Functions
@@ -10006,153 +10020,153 @@ uint16 __ovld __cnfn mul24(uint16 x, uint16 y);
* Returns fmin(fmax(x, minval), maxval).
* Results are undefined if minval > maxval.
*/
-float __ovld __cnfn clamp(float x, float minval, float maxval);
-float2 __ovld __cnfn clamp(float2 x, float2 minval, float2 maxval);
-float3 __ovld __cnfn clamp(float3 x, float3 minval, float3 maxval);
-float4 __ovld __cnfn clamp(float4 x, float4 minval, float4 maxval);
-float8 __ovld __cnfn clamp(float8 x, float8 minval, float8 maxval);
-float16 __ovld __cnfn clamp(float16 x, float16 minval, float16 maxval);
-float2 __ovld __cnfn clamp(float2 x, float minval, float maxval);
-float3 __ovld __cnfn clamp(float3 x, float minval, float maxval);
-float4 __ovld __cnfn clamp(float4 x, float minval, float maxval);
-float8 __ovld __cnfn clamp(float8 x, float minval, float maxval);
-float16 __ovld __cnfn clamp(float16 x, float minval, float maxval);
+float __ovld __cnfn clamp(float, float, float);
+float2 __ovld __cnfn clamp(float2, float2, float2);
+float3 __ovld __cnfn clamp(float3, float3, float3);
+float4 __ovld __cnfn clamp(float4, float4, float4);
+float8 __ovld __cnfn clamp(float8, float8, float8);
+float16 __ovld __cnfn clamp(float16, float16, float16);
+float2 __ovld __cnfn clamp(float2, float, float);
+float3 __ovld __cnfn clamp(float3, float, float);
+float4 __ovld __cnfn clamp(float4, float, float);
+float8 __ovld __cnfn clamp(float8, float, float);
+float16 __ovld __cnfn clamp(float16, float, float);
#ifdef cl_khr_fp64
-double __ovld __cnfn clamp(double x, double minval, double maxval);
-double2 __ovld __cnfn clamp(double2 x, double2 minval, double2 maxval);
-double3 __ovld __cnfn clamp(double3 x, double3 minval, double3 maxval);
-double4 __ovld __cnfn clamp(double4 x, double4 minval, double4 maxval);
-double8 __ovld __cnfn clamp(double8 x, double8 minval, double8 maxval);
-double16 __ovld __cnfn clamp(double16 x, double16 minval, double16 maxval);
-double2 __ovld __cnfn clamp(double2 x, double minval, double maxval);
-double3 __ovld __cnfn clamp(double3 x, double minval, double maxval);
-double4 __ovld __cnfn clamp(double4 x, double minval, double maxval);
-double8 __ovld __cnfn clamp(double8 x, double minval, double maxval);
-double16 __ovld __cnfn clamp(double16 x, double minval, double maxval);
+double __ovld __cnfn clamp(double, double, double);
+double2 __ovld __cnfn clamp(double2, double2, double2);
+double3 __ovld __cnfn clamp(double3, double3, double3);
+double4 __ovld __cnfn clamp(double4, double4, double4);
+double8 __ovld __cnfn clamp(double8, double8, double8);
+double16 __ovld __cnfn clamp(double16, double16, double16);
+double2 __ovld __cnfn clamp(double2, double, double);
+double3 __ovld __cnfn clamp(double3, double, double);
+double4 __ovld __cnfn clamp(double4, double, double);
+double8 __ovld __cnfn clamp(double8, double, double);
+double16 __ovld __cnfn clamp(double16, double, double);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn clamp(half x, half minval, half maxval);
-half2 __ovld __cnfn clamp(half2 x, half2 minval, half2 maxval);
-half3 __ovld __cnfn clamp(half3 x, half3 minval, half3 maxval);
-half4 __ovld __cnfn clamp(half4 x, half4 minval, half4 maxval);
-half8 __ovld __cnfn clamp(half8 x, half8 minval, half8 maxval);
-half16 __ovld __cnfn clamp(half16 x, half16 minval, half16 maxval);
-half2 __ovld __cnfn clamp(half2 x, half minval, half maxval);
-half3 __ovld __cnfn clamp(half3 x, half minval, half maxval);
-half4 __ovld __cnfn clamp(half4 x, half minval, half maxval);
-half8 __ovld __cnfn clamp(half8 x, half minval, half maxval);
-half16 __ovld __cnfn clamp(half16 x, half minval, half maxval);
+half __ovld __cnfn clamp(half, half, half);
+half2 __ovld __cnfn clamp(half2, half2, half2);
+half3 __ovld __cnfn clamp(half3, half3, half3);
+half4 __ovld __cnfn clamp(half4, half4, half4);
+half8 __ovld __cnfn clamp(half8, half8, half8);
+half16 __ovld __cnfn clamp(half16, half16, half16);
+half2 __ovld __cnfn clamp(half2, half, half);
+half3 __ovld __cnfn clamp(half3, half, half);
+half4 __ovld __cnfn clamp(half4, half, half);
+half8 __ovld __cnfn clamp(half8, half, half);
+half16 __ovld __cnfn clamp(half16, half, half);
#endif //cl_khr_fp16
/**
* Converts radians to degrees, i.e. (180 / PI) *
* radians.
*/
-float __ovld __cnfn degrees(float radians);
-float2 __ovld __cnfn degrees(float2 radians);
-float3 __ovld __cnfn degrees(float3 radians);
-float4 __ovld __cnfn degrees(float4 radians);
-float8 __ovld __cnfn degrees(float8 radians);
-float16 __ovld __cnfn degrees(float16 radians);
+float __ovld __cnfn degrees(float);
+float2 __ovld __cnfn degrees(float2);
+float3 __ovld __cnfn degrees(float3);
+float4 __ovld __cnfn degrees(float4);
+float8 __ovld __cnfn degrees(float8);
+float16 __ovld __cnfn degrees(float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn degrees(double radians);
-double2 __ovld __cnfn degrees(double2 radians);
-double3 __ovld __cnfn degrees(double3 radians);
-double4 __ovld __cnfn degrees(double4 radians);
-double8 __ovld __cnfn degrees(double8 radians);
-double16 __ovld __cnfn degrees(double16 radians);
+double __ovld __cnfn degrees(double);
+double2 __ovld __cnfn degrees(double2);
+double3 __ovld __cnfn degrees(double3);
+double4 __ovld __cnfn degrees(double4);
+double8 __ovld __cnfn degrees(double8);
+double16 __ovld __cnfn degrees(double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn degrees(half radians);
-half2 __ovld __cnfn degrees(half2 radians);
-half3 __ovld __cnfn degrees(half3 radians);
-half4 __ovld __cnfn degrees(half4 radians);
-half8 __ovld __cnfn degrees(half8 radians);
-half16 __ovld __cnfn degrees(half16 radians);
+half __ovld __cnfn degrees(half);
+half2 __ovld __cnfn degrees(half2);
+half3 __ovld __cnfn degrees(half3);
+half4 __ovld __cnfn degrees(half4);
+half8 __ovld __cnfn degrees(half8);
+half16 __ovld __cnfn degrees(half16);
#endif //cl_khr_fp16
/**
* Returns y if x < y, otherwise it returns x. If x and y
* are infinite or NaN, the return values are undefined.
*/
-float __ovld __cnfn max(float x, float y);
-float2 __ovld __cnfn max(float2 x, float2 y);
-float3 __ovld __cnfn max(float3 x, float3 y);
-float4 __ovld __cnfn max(float4 x, float4 y);
-float8 __ovld __cnfn max(float8 x, float8 y);
-float16 __ovld __cnfn max(float16 x, float16 y);
-float2 __ovld __cnfn max(float2 x, float y);
-float3 __ovld __cnfn max(float3 x, float y);
-float4 __ovld __cnfn max(float4 x, float y);
-float8 __ovld __cnfn max(float8 x, float y);
-float16 __ovld __cnfn max(float16 x, float y);
+float __ovld __cnfn max(float, float);
+float2 __ovld __cnfn max(float2, float2);
+float3 __ovld __cnfn max(float3, float3);
+float4 __ovld __cnfn max(float4, float4);
+float8 __ovld __cnfn max(float8, float8);
+float16 __ovld __cnfn max(float16, float16);
+float2 __ovld __cnfn max(float2, float);
+float3 __ovld __cnfn max(float3, float);
+float4 __ovld __cnfn max(float4, float);
+float8 __ovld __cnfn max(float8, float);
+float16 __ovld __cnfn max(float16, float);
#ifdef cl_khr_fp64
-double __ovld __cnfn max(double x, double y);
-double2 __ovld __cnfn max(double2 x, double2 y);
-double3 __ovld __cnfn max(double3 x, double3 y);
-double4 __ovld __cnfn max(double4 x, double4 y);
-double8 __ovld __cnfn max(double8 x, double8 y);
-double16 __ovld __cnfn max(double16 x, double16 y);
-double2 __ovld __cnfn max(double2 x, double y);
-double3 __ovld __cnfn max(double3 x, double y);
-double4 __ovld __cnfn max(double4 x, double y);
-double8 __ovld __cnfn max(double8 x, double y);
-double16 __ovld __cnfn max(double16 x, double y);
+double __ovld __cnfn max(double, double);
+double2 __ovld __cnfn max(double2, double2);
+double3 __ovld __cnfn max(double3, double3);
+double4 __ovld __cnfn max(double4, double4);
+double8 __ovld __cnfn max(double8, double8);
+double16 __ovld __cnfn max(double16, double16);
+double2 __ovld __cnfn max(double2, double);
+double3 __ovld __cnfn max(double3, double);
+double4 __ovld __cnfn max(double4, double);
+double8 __ovld __cnfn max(double8, double);
+double16 __ovld __cnfn max(double16, double);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn max(half x, half y);
-half2 __ovld __cnfn max(half2 x, half2 y);
-half3 __ovld __cnfn max(half3 x, half3 y);
-half4 __ovld __cnfn max(half4 x, half4 y);
-half8 __ovld __cnfn max(half8 x, half8 y);
-half16 __ovld __cnfn max(half16 x, half16 y);
-half2 __ovld __cnfn max(half2 x, half y);
-half3 __ovld __cnfn max(half3 x, half y);
-half4 __ovld __cnfn max(half4 x, half y);
-half8 __ovld __cnfn max(half8 x, half y);
-half16 __ovld __cnfn max(half16 x, half y);
+half __ovld __cnfn max(half, half);
+half2 __ovld __cnfn max(half2, half2);
+half3 __ovld __cnfn max(half3, half3);
+half4 __ovld __cnfn max(half4, half4);
+half8 __ovld __cnfn max(half8, half8);
+half16 __ovld __cnfn max(half16, half16);
+half2 __ovld __cnfn max(half2, half);
+half3 __ovld __cnfn max(half3, half);
+half4 __ovld __cnfn max(half4, half);
+half8 __ovld __cnfn max(half8, half);
+half16 __ovld __cnfn max(half16, half);
#endif //cl_khr_fp16
/**
* Returns y if y < x, otherwise it returns x. If x and y
* are infinite or NaN, the return values are undefined.
*/
-float __ovld __cnfn min(float x, float y);
-float2 __ovld __cnfn min(float2 x, float2 y);
-float3 __ovld __cnfn min(float3 x, float3 y);
-float4 __ovld __cnfn min(float4 x, float4 y);
-float8 __ovld __cnfn min(float8 x, float8 y);
-float16 __ovld __cnfn min(float16 x, float16 y);
-float2 __ovld __cnfn min(float2 x, float y);
-float3 __ovld __cnfn min(float3 x, float y);
-float4 __ovld __cnfn min(float4 x, float y);
-float8 __ovld __cnfn min(float8 x, float y);
-float16 __ovld __cnfn min(float16 x, float y);
+float __ovld __cnfn min(float, float);
+float2 __ovld __cnfn min(float2, float2);
+float3 __ovld __cnfn min(float3, float3);
+float4 __ovld __cnfn min(float4, float4);
+float8 __ovld __cnfn min(float8, float8);
+float16 __ovld __cnfn min(float16, float16);
+float2 __ovld __cnfn min(float2, float);
+float3 __ovld __cnfn min(float3, float);
+float4 __ovld __cnfn min(float4, float);
+float8 __ovld __cnfn min(float8, float);
+float16 __ovld __cnfn min(float16, float);
#ifdef cl_khr_fp64
-double __ovld __cnfn min(double x, double y);
-double2 __ovld __cnfn min(double2 x, double2 y);
-double3 __ovld __cnfn min(double3 x, double3 y);
-double4 __ovld __cnfn min(double4 x, double4 y);
-double8 __ovld __cnfn min(double8 x, double8 y);
-double16 __ovld __cnfn min(double16 x, double16 y);
-double2 __ovld __cnfn min(double2 x, double y);
-double3 __ovld __cnfn min(double3 x, double y);
-double4 __ovld __cnfn min(double4 x, double y);
-double8 __ovld __cnfn min(double8 x, double y);
-double16 __ovld __cnfn min(double16 x, double y);
+double __ovld __cnfn min(double, double);
+double2 __ovld __cnfn min(double2, double2);
+double3 __ovld __cnfn min(double3, double3);
+double4 __ovld __cnfn min(double4, double4);
+double8 __ovld __cnfn min(double8, double8);
+double16 __ovld __cnfn min(double16, double16);
+double2 __ovld __cnfn min(double2, double);
+double3 __ovld __cnfn min(double3, double);
+double4 __ovld __cnfn min(double4, double);
+double8 __ovld __cnfn min(double8, double);
+double16 __ovld __cnfn min(double16, double);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn min(half x, half y);
-half2 __ovld __cnfn min(half2 x, half2 y);
-half3 __ovld __cnfn min(half3 x, half3 y);
-half4 __ovld __cnfn min(half4 x, half4 y);
-half8 __ovld __cnfn min(half8 x, half8 y);
-half16 __ovld __cnfn min(half16 x, half16 y);
-half2 __ovld __cnfn min(half2 x, half y);
-half3 __ovld __cnfn min(half3 x, half y);
-half4 __ovld __cnfn min(half4 x, half y);
-half8 __ovld __cnfn min(half8 x, half y);
-half16 __ovld __cnfn min(half16 x, half y);
+half __ovld __cnfn min(half, half);
+half2 __ovld __cnfn min(half2, half2);
+half3 __ovld __cnfn min(half3, half3);
+half4 __ovld __cnfn min(half4, half4);
+half8 __ovld __cnfn min(half8, half8);
+half16 __ovld __cnfn min(half16, half16);
+half2 __ovld __cnfn min(half2, half);
+half3 __ovld __cnfn min(half3, half);
+half4 __ovld __cnfn min(half4, half);
+half8 __ovld __cnfn min(half8, half);
+half16 __ovld __cnfn min(half16, half);
#endif //cl_khr_fp16
/**
@@ -10162,110 +10176,110 @@ half16 __ovld __cnfn min(half16 x, half y);
* in the range 0.0 ... 1.0, the return values are
* undefined.
*/
-float __ovld __cnfn mix(float x, float y, float a);
-float2 __ovld __cnfn mix(float2 x, float2 y, float2 a);
-float3 __ovld __cnfn mix(float3 x, float3 y, float3 a);
-float4 __ovld __cnfn mix(float4 x, float4 y, float4 a);
-float8 __ovld __cnfn mix(float8 x, float8 y, float8 a);
-float16 __ovld __cnfn mix(float16 x, float16 y, float16 a);
-float2 __ovld __cnfn mix(float2 x, float2 y, float a);
-float3 __ovld __cnfn mix(float3 x, float3 y, float a);
-float4 __ovld __cnfn mix(float4 x, float4 y, float a);
-float8 __ovld __cnfn mix(float8 x, float8 y, float a);
-float16 __ovld __cnfn mix(float16 x, float16 y, float a);
+float __ovld __cnfn mix(float, float, float);
+float2 __ovld __cnfn mix(float2, float2, float2);
+float3 __ovld __cnfn mix(float3, float3, float3);
+float4 __ovld __cnfn mix(float4, float4, float4);
+float8 __ovld __cnfn mix(float8, float8, float8);
+float16 __ovld __cnfn mix(float16, float16, float16);
+float2 __ovld __cnfn mix(float2, float2, float);
+float3 __ovld __cnfn mix(float3, float3, float);
+float4 __ovld __cnfn mix(float4, float4, float);
+float8 __ovld __cnfn mix(float8, float8, float);
+float16 __ovld __cnfn mix(float16, float16, float);
#ifdef cl_khr_fp64
-double __ovld __cnfn mix(double x, double y, double a);
-double2 __ovld __cnfn mix(double2 x, double2 y, double2 a);
-double3 __ovld __cnfn mix(double3 x, double3 y, double3 a);
-double4 __ovld __cnfn mix(double4 x, double4 y, double4 a);
-double8 __ovld __cnfn mix(double8 x, double8 y, double8 a);
-double16 __ovld __cnfn mix(double16 x, double16 y, double16 a);
-double2 __ovld __cnfn mix(double2 x, double2 y, double a);
-double3 __ovld __cnfn mix(double3 x, double3 y, double a);
-double4 __ovld __cnfn mix(double4 x, double4 y, double a);
-double8 __ovld __cnfn mix(double8 x, double8 y, double a);
-double16 __ovld __cnfn mix(double16 x, double16 y, double a);
+double __ovld __cnfn mix(double, double, double);
+double2 __ovld __cnfn mix(double2, double2, double2);
+double3 __ovld __cnfn mix(double3, double3, double3);
+double4 __ovld __cnfn mix(double4, double4, double4);
+double8 __ovld __cnfn mix(double8, double8, double8);
+double16 __ovld __cnfn mix(double16, double16, double16);
+double2 __ovld __cnfn mix(double2, double2, double);
+double3 __ovld __cnfn mix(double3, double3, double);
+double4 __ovld __cnfn mix(double4, double4, double);
+double8 __ovld __cnfn mix(double8, double8, double);
+double16 __ovld __cnfn mix(double16, double16, double);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn mix(half x, half y, half a);
-half2 __ovld __cnfn mix(half2 x, half2 y, half2 a);
-half3 __ovld __cnfn mix(half3 x, half3 y, half3 a);
-half4 __ovld __cnfn mix(half4 x, half4 y, half4 a);
-half8 __ovld __cnfn mix(half8 x, half8 y, half8 a);
-half16 __ovld __cnfn mix(half16 x, half16 y, half16 a);
-half2 __ovld __cnfn mix(half2 x, half2 y, half a);
-half3 __ovld __cnfn mix(half3 x, half3 y, half a);
-half4 __ovld __cnfn mix(half4 x, half4 y, half a);
-half8 __ovld __cnfn mix(half8 x, half8 y, half a);
-half16 __ovld __cnfn mix(half16 x, half16 y, half a);
+half __ovld __cnfn mix(half, half, half);
+half2 __ovld __cnfn mix(half2, half2, half2);
+half3 __ovld __cnfn mix(half3, half3, half3);
+half4 __ovld __cnfn mix(half4, half4, half4);
+half8 __ovld __cnfn mix(half8, half8, half8);
+half16 __ovld __cnfn mix(half16, half16, half16);
+half2 __ovld __cnfn mix(half2, half2, half);
+half3 __ovld __cnfn mix(half3, half3, half);
+half4 __ovld __cnfn mix(half4, half4, half);
+half8 __ovld __cnfn mix(half8, half8, half);
+half16 __ovld __cnfn mix(half16, half16, half);
#endif //cl_khr_fp16
/**
* Converts degrees to radians, i.e. (PI / 180) *
* degrees.
*/
-float __ovld __cnfn radians(float degrees);
-float2 __ovld __cnfn radians(float2 degrees);
-float3 __ovld __cnfn radians(float3 degrees);
-float4 __ovld __cnfn radians(float4 degrees);
-float8 __ovld __cnfn radians(float8 degrees);
-float16 __ovld __cnfn radians(float16 degrees);
+float __ovld __cnfn radians(float);
+float2 __ovld __cnfn radians(float2);
+float3 __ovld __cnfn radians(float3);
+float4 __ovld __cnfn radians(float4);
+float8 __ovld __cnfn radians(float8);
+float16 __ovld __cnfn radians(float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn radians(double degrees);
-double2 __ovld __cnfn radians(double2 degrees);
-double3 __ovld __cnfn radians(double3 degrees);
-double4 __ovld __cnfn radians(double4 degrees);
-double8 __ovld __cnfn radians(double8 degrees);
-double16 __ovld __cnfn radians(double16 degrees);
+double __ovld __cnfn radians(double);
+double2 __ovld __cnfn radians(double2);
+double3 __ovld __cnfn radians(double3);
+double4 __ovld __cnfn radians(double4);
+double8 __ovld __cnfn radians(double8);
+double16 __ovld __cnfn radians(double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn radians(half degrees);
-half2 __ovld __cnfn radians(half2 degrees);
-half3 __ovld __cnfn radians(half3 degrees);
-half4 __ovld __cnfn radians(half4 degrees);
-half8 __ovld __cnfn radians(half8 degrees);
-half16 __ovld __cnfn radians(half16 degrees);
+half __ovld __cnfn radians(half);
+half2 __ovld __cnfn radians(half2);
+half3 __ovld __cnfn radians(half3);
+half4 __ovld __cnfn radians(half4);
+half8 __ovld __cnfn radians(half8);
+half16 __ovld __cnfn radians(half16);
#endif //cl_khr_fp16
/**
* Returns 0.0 if x < edge, otherwise it returns 1.0.
*/
-float __ovld __cnfn step(float edge, float x);
-float2 __ovld __cnfn step(float2 edge, float2 x);
-float3 __ovld __cnfn step(float3 edge, float3 x);
-float4 __ovld __cnfn step(float4 edge, float4 x);
-float8 __ovld __cnfn step(float8 edge, float8 x);
-float16 __ovld __cnfn step(float16 edge, float16 x);
-float2 __ovld __cnfn step(float edge, float2 x);
-float3 __ovld __cnfn step(float edge, float3 x);
-float4 __ovld __cnfn step(float edge, float4 x);
-float8 __ovld __cnfn step(float edge, float8 x);
-float16 __ovld __cnfn step(float edge, float16 x);
+float __ovld __cnfn step(float, float);
+float2 __ovld __cnfn step(float2, float2);
+float3 __ovld __cnfn step(float3, float3);
+float4 __ovld __cnfn step(float4, float4);
+float8 __ovld __cnfn step(float8, float8);
+float16 __ovld __cnfn step(float16, float16);
+float2 __ovld __cnfn step(float, float2);
+float3 __ovld __cnfn step(float, float3);
+float4 __ovld __cnfn step(float, float4);
+float8 __ovld __cnfn step(float, float8);
+float16 __ovld __cnfn step(float, float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn step(double edge, double x);
-double2 __ovld __cnfn step(double2 edge, double2 x);
-double3 __ovld __cnfn step(double3 edge, double3 x);
-double4 __ovld __cnfn step(double4 edge, double4 x);
-double8 __ovld __cnfn step(double8 edge, double8 x);
-double16 __ovld __cnfn step(double16 edge, double16 x);
-double2 __ovld __cnfn step(double edge, double2 x);
-double3 __ovld __cnfn step(double edge, double3 x);
-double4 __ovld __cnfn step(double edge, double4 x);
-double8 __ovld __cnfn step(double edge, double8 x);
-double16 __ovld __cnfn step(double edge, double16 x);
+double __ovld __cnfn step(double, double);
+double2 __ovld __cnfn step(double2, double2);
+double3 __ovld __cnfn step(double3, double3);
+double4 __ovld __cnfn step(double4, double4);
+double8 __ovld __cnfn step(double8, double8);
+double16 __ovld __cnfn step(double16, double16);
+double2 __ovld __cnfn step(double, double2);
+double3 __ovld __cnfn step(double, double3);
+double4 __ovld __cnfn step(double, double4);
+double8 __ovld __cnfn step(double, double8);
+double16 __ovld __cnfn step(double, double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn step(half edge, half x);
-half2 __ovld __cnfn step(half2 edge, half2 x);
-half3 __ovld __cnfn step(half3 edge, half3 x);
-half4 __ovld __cnfn step(half4 edge, half4 x);
-half8 __ovld __cnfn step(half8 edge, half8 x);
-half16 __ovld __cnfn step(half16 edge, half16 x);
-half2 __ovld __cnfn step(half edge, half2 x);
-half3 __ovld __cnfn step(half edge, half3 x);
-half4 __ovld __cnfn step(half edge, half4 x);
-half8 __ovld __cnfn step(half edge, half8 x);
-half16 __ovld __cnfn step(half edge, half16 x);
+half __ovld __cnfn step(half, half);
+half2 __ovld __cnfn step(half2, half2);
+half3 __ovld __cnfn step(half3, half3);
+half4 __ovld __cnfn step(half4, half4);
+half8 __ovld __cnfn step(half8, half8);
+half16 __ovld __cnfn step(half16, half16);
+half2 __ovld __cnfn step(half, half2);
+half3 __ovld __cnfn step(half, half3);
+half4 __ovld __cnfn step(half, half4);
+half8 __ovld __cnfn step(half, half8);
+half16 __ovld __cnfn step(half, half16);
#endif //cl_khr_fp16
/**
@@ -10281,69 +10295,69 @@ half16 __ovld __cnfn step(half edge, half16 x);
* Results are undefined if edge0 >= edge1 or if x,
* edge0 or edge1 is a NaN.
*/
-float __ovld __cnfn smoothstep(float edge0, float edge1, float x);
-float2 __ovld __cnfn smoothstep(float2 edge0, float2 edge1, float2 x);
-float3 __ovld __cnfn smoothstep(float3 edge0, float3 edge1, float3 x);
-float4 __ovld __cnfn smoothstep(float4 edge0, float4 edge1, float4 x);
-float8 __ovld __cnfn smoothstep(float8 edge0, float8 edge1, float8 x);
-float16 __ovld __cnfn smoothstep(float16 edge0, float16 edge1, float16 x);
-float2 __ovld __cnfn smoothstep(float edge0, float edge1, float2 x);
-float3 __ovld __cnfn smoothstep(float edge0, float edge1, float3 x);
-float4 __ovld __cnfn smoothstep(float edge0, float edge1, float4 x);
-float8 __ovld __cnfn smoothstep(float edge0, float edge1, float8 x);
-float16 __ovld __cnfn smoothstep(float edge0, float edge1, float16 x);
+float __ovld __cnfn smoothstep(float, float, float);
+float2 __ovld __cnfn smoothstep(float2, float2, float2);
+float3 __ovld __cnfn smoothstep(float3, float3, float3);
+float4 __ovld __cnfn smoothstep(float4, float4, float4);
+float8 __ovld __cnfn smoothstep(float8, float8, float8);
+float16 __ovld __cnfn smoothstep(float16, float16, float16);
+float2 __ovld __cnfn smoothstep(float, float, float2);
+float3 __ovld __cnfn smoothstep(float, float, float3);
+float4 __ovld __cnfn smoothstep(float, float, float4);
+float8 __ovld __cnfn smoothstep(float, float, float8);
+float16 __ovld __cnfn smoothstep(float, float, float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn smoothstep(double edge0, double edge1, double x);
-double2 __ovld __cnfn smoothstep(double2 edge0, double2 edge1, double2 x);
-double3 __ovld __cnfn smoothstep(double3 edge0, double3 edge1, double3 x);
-double4 __ovld __cnfn smoothstep(double4 edge0, double4 edge1, double4 x);
-double8 __ovld __cnfn smoothstep(double8 edge0, double8 edge1, double8 x);
-double16 __ovld __cnfn smoothstep(double16 edge0, double16 edge1, double16 x);
-double2 __ovld __cnfn smoothstep(double edge0, double edge1, double2 x);
-double3 __ovld __cnfn smoothstep(double edge0, double edge1, double3 x);
-double4 __ovld __cnfn smoothstep(double edge0, double edge1, double4 x);
-double8 __ovld __cnfn smoothstep(double edge0, double edge1, double8 x);
-double16 __ovld __cnfn smoothstep(double edge0, double edge1, double16 x);
+double __ovld __cnfn smoothstep(double, double, double);
+double2 __ovld __cnfn smoothstep(double2, double2, double2);
+double3 __ovld __cnfn smoothstep(double3, double3, double3);
+double4 __ovld __cnfn smoothstep(double4, double4, double4);
+double8 __ovld __cnfn smoothstep(double8, double8, double8);
+double16 __ovld __cnfn smoothstep(double16, double16, double16);
+double2 __ovld __cnfn smoothstep(double, double, double2);
+double3 __ovld __cnfn smoothstep(double, double, double3);
+double4 __ovld __cnfn smoothstep(double, double, double4);
+double8 __ovld __cnfn smoothstep(double, double, double8);
+double16 __ovld __cnfn smoothstep(double, double, double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn smoothstep(half edge0, half edge1, half x);
-half2 __ovld __cnfn smoothstep(half2 edge0, half2 edge1, half2 x);
-half3 __ovld __cnfn smoothstep(half3 edge0, half3 edge1, half3 x);
-half4 __ovld __cnfn smoothstep(half4 edge0, half4 edge1, half4 x);
-half8 __ovld __cnfn smoothstep(half8 edge0, half8 edge1, half8 x);
-half16 __ovld __cnfn smoothstep(half16 edge0, half16 edge1, half16 x);
-half2 __ovld __cnfn smoothstep(half edge0, half edge1, half2 x);
-half3 __ovld __cnfn smoothstep(half edge0, half edge1, half3 x);
-half4 __ovld __cnfn smoothstep(half edge0, half edge1, half4 x);
-half8 __ovld __cnfn smoothstep(half edge0, half edge1, half8 x);
-half16 __ovld __cnfn smoothstep(half edge0, half edge1, half16 x);
+half __ovld __cnfn smoothstep(half, half, half);
+half2 __ovld __cnfn smoothstep(half2, half2, half2);
+half3 __ovld __cnfn smoothstep(half3, half3, half3);
+half4 __ovld __cnfn smoothstep(half4, half4, half4);
+half8 __ovld __cnfn smoothstep(half8, half8, half8);
+half16 __ovld __cnfn smoothstep(half16, half16, half16);
+half2 __ovld __cnfn smoothstep(half, half, half2);
+half3 __ovld __cnfn smoothstep(half, half, half3);
+half4 __ovld __cnfn smoothstep(half, half, half4);
+half8 __ovld __cnfn smoothstep(half, half, half8);
+half16 __ovld __cnfn smoothstep(half, half, half16);
#endif //cl_khr_fp16
/**
* Returns 1.0 if x > 0, -0.0 if x = -0.0, +0.0 if x =
* +0.0, or -1.0 if x < 0. Returns 0.0 if x is a NaN.
*/
-float __ovld __cnfn sign(float x);
-float2 __ovld __cnfn sign(float2 x);
-float3 __ovld __cnfn sign(float3 x);
-float4 __ovld __cnfn sign(float4 x);
-float8 __ovld __cnfn sign(float8 x);
-float16 __ovld __cnfn sign(float16 x);
+float __ovld __cnfn sign(float);
+float2 __ovld __cnfn sign(float2);
+float3 __ovld __cnfn sign(float3);
+float4 __ovld __cnfn sign(float4);
+float8 __ovld __cnfn sign(float8);
+float16 __ovld __cnfn sign(float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn sign(double x);
-double2 __ovld __cnfn sign(double2 x);
-double3 __ovld __cnfn sign(double3 x);
-double4 __ovld __cnfn sign(double4 x);
-double8 __ovld __cnfn sign(double8 x);
-double16 __ovld __cnfn sign(double16 x);
+double __ovld __cnfn sign(double);
+double2 __ovld __cnfn sign(double2);
+double3 __ovld __cnfn sign(double3);
+double4 __ovld __cnfn sign(double4);
+double8 __ovld __cnfn sign(double8);
+double16 __ovld __cnfn sign(double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn sign(half x);
-half2 __ovld __cnfn sign(half2 x);
-half3 __ovld __cnfn sign(half3 x);
-half4 __ovld __cnfn sign(half4 x);
-half8 __ovld __cnfn sign(half8 x);
-half16 __ovld __cnfn sign(half16 x);
+half __ovld __cnfn sign(half);
+half2 __ovld __cnfn sign(half2);
+half3 __ovld __cnfn sign(half3);
+half4 __ovld __cnfn sign(half4);
+half8 __ovld __cnfn sign(half8);
+half16 __ovld __cnfn sign(half16);
#endif //cl_khr_fp16
// OpenCL v1.1 s6.11.5, v1.2 s6.12.5, v2.0 s6.13.5 - Geometric Functions
@@ -10352,128 +10366,116 @@ half16 __ovld __cnfn sign(half16 x);
* Returns the cross product of p0.xyz and p1.xyz. The
* w component of float4 result returned will be 0.0.
*/
-float4 __ovld __cnfn cross(float4 p0, float4 p1);
-float3 __ovld __cnfn cross(float3 p0, float3 p1);
+float4 __ovld __cnfn cross(float4, float4);
+float3 __ovld __cnfn cross(float3, float3);
#ifdef cl_khr_fp64
-double4 __ovld __cnfn cross(double4 p0, double4 p1);
-double3 __ovld __cnfn cross(double3 p0, double3 p1);
+double4 __ovld __cnfn cross(double4, double4);
+double3 __ovld __cnfn cross(double3, double3);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half4 __ovld __cnfn cross(half4 p0, half4 p1);
-half3 __ovld __cnfn cross(half3 p0, half3 p1);
+half4 __ovld __cnfn cross(half4, half4);
+half3 __ovld __cnfn cross(half3, half3);
#endif //cl_khr_fp16
/**
* Compute dot product.
*/
-float __ovld __cnfn dot(float p0, float p1);
-float __ovld __cnfn dot(float2 p0, float2 p1);
-float __ovld __cnfn dot(float3 p0, float3 p1);
-float __ovld __cnfn dot(float4 p0, float4 p1);
+float __ovld __cnfn dot(float, float);
+float __ovld __cnfn dot(float2, float2);
+float __ovld __cnfn dot(float3, float3);
+float __ovld __cnfn dot(float4, float4);
#ifdef cl_khr_fp64
-double __ovld __cnfn dot(double p0, double p1);
-double __ovld __cnfn dot(double2 p0, double2 p1);
-double __ovld __cnfn dot(double3 p0, double3 p1);
-double __ovld __cnfn dot(double4 p0, double4 p1);
+double __ovld __cnfn dot(double, double);
+double __ovld __cnfn dot(double2, double2);
+double __ovld __cnfn dot(double3, double3);
+double __ovld __cnfn dot(double4, double4);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn dot(half p0, half p1);
-half __ovld __cnfn dot(half2 p0, half2 p1);
-half __ovld __cnfn dot(half3 p0, half3 p1);
-half __ovld __cnfn dot(half4 p0, half4 p1);
+half __ovld __cnfn dot(half, half);
+half __ovld __cnfn dot(half2, half2);
+half __ovld __cnfn dot(half3, half3);
+half __ovld __cnfn dot(half4, half4);
#endif //cl_khr_fp16
/**
* Returns the distance between p0 and p1. This is
* calculated as length(p0 - p1).
*/
-float __ovld __cnfn distance(float p0, float p1);
-float __ovld __cnfn distance(float2 p0, float2 p1);
-float __ovld __cnfn distance(float3 p0, float3 p1);
-float __ovld __cnfn distance(float4 p0, float4 p1);
+float __ovld __cnfn distance(float, float);
+float __ovld __cnfn distance(float2, float2);
+float __ovld __cnfn distance(float3, float3);
+float __ovld __cnfn distance(float4, float4);
#ifdef cl_khr_fp64
-double __ovld __cnfn distance(double p0, double p1);
-double __ovld __cnfn distance(double2 p0, double2 p1);
-double __ovld __cnfn distance(double3 p0, double3 p1);
-double __ovld __cnfn distance(double4 p0, double4 p1);
+double __ovld __cnfn distance(double, double);
+double __ovld __cnfn distance(double2, double2);
+double __ovld __cnfn distance(double3, double3);
+double __ovld __cnfn distance(double4, double4);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn distance(half p0, half p1);
-half __ovld __cnfn distance(half2 p0, half2 p1);
-half __ovld __cnfn distance(half3 p0, half3 p1);
-half __ovld __cnfn distance(half4 p0, half4 p1);
+half __ovld __cnfn distance(half, half);
+half __ovld __cnfn distance(half2, half2);
+half __ovld __cnfn distance(half3, half3);
+half __ovld __cnfn distance(half4, half4);
#endif //cl_khr_fp16
/**
* Return the length of vector p, i.e.,
* sqrt(p.x2 + p.y 2 + ...)
*/
-float __ovld __cnfn length(float p);
-float __ovld __cnfn length(float2 p);
-float __ovld __cnfn length(float3 p);
-float __ovld __cnfn length(float4 p);
+float __ovld __cnfn length(float);
+float __ovld __cnfn length(float2);
+float __ovld __cnfn length(float3);
+float __ovld __cnfn length(float4);
#ifdef cl_khr_fp64
-double __ovld __cnfn length(double p);
-double __ovld __cnfn length(double2 p);
-double __ovld __cnfn length(double3 p);
-double __ovld __cnfn length(double4 p);
+double __ovld __cnfn length(double);
+double __ovld __cnfn length(double2);
+double __ovld __cnfn length(double3);
+double __ovld __cnfn length(double4);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn length(half p);
-half __ovld __cnfn length(half2 p);
-half __ovld __cnfn length(half3 p);
-half __ovld __cnfn length(half4 p);
+half __ovld __cnfn length(half);
+half __ovld __cnfn length(half2);
+half __ovld __cnfn length(half3);
+half __ovld __cnfn length(half4);
#endif //cl_khr_fp16
/**
* Returns a vector in the same direction as p but with a
* length of 1.
*/
-float __ovld __cnfn normalize(float p);
-float2 __ovld __cnfn normalize(float2 p);
-float3 __ovld __cnfn normalize(float3 p);
-float4 __ovld __cnfn normalize(float4 p);
+float __ovld __cnfn normalize(float);
+float2 __ovld __cnfn normalize(float2);
+float3 __ovld __cnfn normalize(float3);
+float4 __ovld __cnfn normalize(float4);
#ifdef cl_khr_fp64
-double __ovld __cnfn normalize(double p);
-double2 __ovld __cnfn normalize(double2 p);
-double3 __ovld __cnfn normalize(double3 p);
-double4 __ovld __cnfn normalize(double4 p);
+double __ovld __cnfn normalize(double);
+double2 __ovld __cnfn normalize(double2);
+double3 __ovld __cnfn normalize(double3);
+double4 __ovld __cnfn normalize(double4);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn normalize(half p);
-half2 __ovld __cnfn normalize(half2 p);
-half3 __ovld __cnfn normalize(half3 p);
-half4 __ovld __cnfn normalize(half4 p);
+half __ovld __cnfn normalize(half);
+half2 __ovld __cnfn normalize(half2);
+half3 __ovld __cnfn normalize(half3);
+half4 __ovld __cnfn normalize(half4);
#endif //cl_khr_fp16
/**
* Returns fast_length(p0 - p1).
*/
-float __ovld __cnfn fast_distance(float p0, float p1);
-float __ovld __cnfn fast_distance(float2 p0, float2 p1);
-float __ovld __cnfn fast_distance(float3 p0, float3 p1);
-float __ovld __cnfn fast_distance(float4 p0, float4 p1);
-#ifdef cl_khr_fp16
-half __ovld __cnfn fast_distance(half p0, half p1);
-half __ovld __cnfn fast_distance(half2 p0, half2 p1);
-half __ovld __cnfn fast_distance(half3 p0, half3 p1);
-half __ovld __cnfn fast_distance(half4 p0, half4 p1);
-#endif //cl_khr_fp16
+float __ovld __cnfn fast_distance(float, float);
+float __ovld __cnfn fast_distance(float2, float2);
+float __ovld __cnfn fast_distance(float3, float3);
+float __ovld __cnfn fast_distance(float4, float4);
/**
* Returns the length of vector p computed as:
* half_sqrt(p.x2 + p.y2 + ...)
*/
-float __ovld __cnfn fast_length(float p);
-float __ovld __cnfn fast_length(float2 p);
-float __ovld __cnfn fast_length(float3 p);
-float __ovld __cnfn fast_length(float4 p);
-#ifdef cl_khr_fp16
-half __ovld __cnfn fast_length(half p);
-half __ovld __cnfn fast_length(half2 p);
-half __ovld __cnfn fast_length(half3 p);
-half __ovld __cnfn fast_length(half4 p);
-#endif //cl_khr_fp16
+float __ovld __cnfn fast_length(float);
+float __ovld __cnfn fast_length(float2);
+float __ovld __cnfn fast_length(float3);
+float __ovld __cnfn fast_length(float4);
/**
* Returns a vector in the same direction as p but with a
@@ -10496,16 +10498,10 @@ half __ovld __cnfn fast_length(half4 p);
* less than sqrt(FLT_MIN) may be flushed to zero
* before proceeding with the calculation.
*/
-float __ovld __cnfn fast_normalize(float p);
-float2 __ovld __cnfn fast_normalize(float2 p);
-float3 __ovld __cnfn fast_normalize(float3 p);
-float4 __ovld __cnfn fast_normalize(float4 p);
-#ifdef cl_khr_fp16
-half __ovld __cnfn fast_normalize(half p);
-half2 __ovld __cnfn fast_normalize(half2 p);
-half3 __ovld __cnfn fast_normalize(half3 p);
-half4 __ovld __cnfn fast_normalize(half4 p);
-#endif //cl_khr_fp16
+float __ovld __cnfn fast_normalize(float);
+float2 __ovld __cnfn fast_normalize(float2);
+float3 __ovld __cnfn fast_normalize(float3);
+float4 __ovld __cnfn fast_normalize(float4);
// OpenCL v1.1 s6.11.6, v1.2 s6.12.6, v2.0 s6.13.6 - Relational Functions
@@ -10513,184 +10509,184 @@ half4 __ovld __cnfn fast_normalize(half4 p);
* intn isequal (floatn x, floatn y)
* Returns the component-wise compare of x == y.
*/
-int __ovld __cnfn isequal(float x, float y);
-int2 __ovld __cnfn isequal(float2 x, float2 y);
-int3 __ovld __cnfn isequal(float3 x, float3 y);
-int4 __ovld __cnfn isequal(float4 x, float4 y);
-int8 __ovld __cnfn isequal(float8 x, float8 y);
-int16 __ovld __cnfn isequal(float16 x, float16 y);
+int __ovld __cnfn isequal(float, float);
+int2 __ovld __cnfn isequal(float2, float2);
+int3 __ovld __cnfn isequal(float3, float3);
+int4 __ovld __cnfn isequal(float4, float4);
+int8 __ovld __cnfn isequal(float8, float8);
+int16 __ovld __cnfn isequal(float16, float16);
#ifdef cl_khr_fp64
-int __ovld __cnfn isequal(double x, double y);
-long2 __ovld __cnfn isequal(double2 x, double2 y);
-long3 __ovld __cnfn isequal(double3 x, double3 y);
-long4 __ovld __cnfn isequal(double4 x, double4 y);
-long8 __ovld __cnfn isequal(double8 x, double8 y);
-long16 __ovld __cnfn isequal(double16 x, double16 y);
+int __ovld __cnfn isequal(double, double);
+long2 __ovld __cnfn isequal(double2, double2);
+long3 __ovld __cnfn isequal(double3, double3);
+long4 __ovld __cnfn isequal(double4, double4);
+long8 __ovld __cnfn isequal(double8, double8);
+long16 __ovld __cnfn isequal(double16, double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-int __ovld __cnfn isequal(half x, half y);
-short2 __ovld __cnfn isequal(half2 x, half2 y);
-short3 __ovld __cnfn isequal(half3 x, half3 y);
-short4 __ovld __cnfn isequal(half4 x, half4 y);
-short8 __ovld __cnfn isequal(half8 x, half8 y);
-short16 __ovld __cnfn isequal(half16 x, half16 y);
+int __ovld __cnfn isequal(half, half);
+short2 __ovld __cnfn isequal(half2, half2);
+short3 __ovld __cnfn isequal(half3, half3);
+short4 __ovld __cnfn isequal(half4, half4);
+short8 __ovld __cnfn isequal(half8, half8);
+short16 __ovld __cnfn isequal(half16, half16);
#endif //cl_khr_fp16
/**
* Returns the component-wise compare of x != y.
*/
-int __ovld __cnfn isnotequal(float x, float y);
-int2 __ovld __cnfn isnotequal(float2 x, float2 y);
-int3 __ovld __cnfn isnotequal(float3 x, float3 y);
-int4 __ovld __cnfn isnotequal(float4 x, float4 y);
-int8 __ovld __cnfn isnotequal(float8 x, float8 y);
-int16 __ovld __cnfn isnotequal(float16 x, float16 y);
+int __ovld __cnfn isnotequal(float, float);
+int2 __ovld __cnfn isnotequal(float2, float2);
+int3 __ovld __cnfn isnotequal(float3, float3);
+int4 __ovld __cnfn isnotequal(float4, float4);
+int8 __ovld __cnfn isnotequal(float8, float8);
+int16 __ovld __cnfn isnotequal(float16, float16);
#ifdef cl_khr_fp64
-int __ovld __cnfn isnotequal(double x, double y);
-long2 __ovld __cnfn isnotequal(double2 x, double2 y);
-long3 __ovld __cnfn isnotequal(double3 x, double3 y);
-long4 __ovld __cnfn isnotequal(double4 x, double4 y);
-long8 __ovld __cnfn isnotequal(double8 x, double8 y);
-long16 __ovld __cnfn isnotequal(double16 x, double16 y);
+int __ovld __cnfn isnotequal(double, double);
+long2 __ovld __cnfn isnotequal(double2, double2);
+long3 __ovld __cnfn isnotequal(double3, double3);
+long4 __ovld __cnfn isnotequal(double4, double4);
+long8 __ovld __cnfn isnotequal(double8, double8);
+long16 __ovld __cnfn isnotequal(double16, double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-int __ovld __cnfn isnotequal(half x, half y);
-short2 __ovld __cnfn isnotequal(half2 x, half2 y);
-short3 __ovld __cnfn isnotequal(half3 x, half3 y);
-short4 __ovld __cnfn isnotequal(half4 x, half4 y);
-short8 __ovld __cnfn isnotequal(half8 x, half8 y);
-short16 __ovld __cnfn isnotequal(half16 x, half16 y);
+int __ovld __cnfn isnotequal(half, half);
+short2 __ovld __cnfn isnotequal(half2, half2);
+short3 __ovld __cnfn isnotequal(half3, half3);
+short4 __ovld __cnfn isnotequal(half4, half4);
+short8 __ovld __cnfn isnotequal(half8, half8);
+short16 __ovld __cnfn isnotequal(half16, half16);
#endif //cl_khr_fp16
/**
* Returns the component-wise compare of x > y.
*/
-int __ovld __cnfn isgreater(float x, float y);
-int2 __ovld __cnfn isgreater(float2 x, float2 y);
-int3 __ovld __cnfn isgreater(float3 x, float3 y);
-int4 __ovld __cnfn isgreater(float4 x, float4 y);
-int8 __ovld __cnfn isgreater(float8 x, float8 y);
-int16 __ovld __cnfn isgreater(float16 x, float16 y);
+int __ovld __cnfn isgreater(float, float);
+int2 __ovld __cnfn isgreater(float2, float2);
+int3 __ovld __cnfn isgreater(float3, float3);
+int4 __ovld __cnfn isgreater(float4, float4);
+int8 __ovld __cnfn isgreater(float8, float8);
+int16 __ovld __cnfn isgreater(float16, float16);
#ifdef cl_khr_fp64
-int __ovld __cnfn isgreater(double x, double y);
-long2 __ovld __cnfn isgreater(double2 x, double2 y);
-long3 __ovld __cnfn isgreater(double3 x, double3 y);
-long4 __ovld __cnfn isgreater(double4 x, double4 y);
-long8 __ovld __cnfn isgreater(double8 x, double8 y);
-long16 __ovld __cnfn isgreater(double16 x, double16 y);
+int __ovld __cnfn isgreater(double, double);
+long2 __ovld __cnfn isgreater(double2, double2);
+long3 __ovld __cnfn isgreater(double3, double3);
+long4 __ovld __cnfn isgreater(double4, double4);
+long8 __ovld __cnfn isgreater(double8, double8);
+long16 __ovld __cnfn isgreater(double16, double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-int __ovld __cnfn isgreater(half x, half y);
-short2 __ovld __cnfn isgreater(half2 x, half2 y);
-short3 __ovld __cnfn isgreater(half3 x, half3 y);
-short4 __ovld __cnfn isgreater(half4 x, half4 y);
-short8 __ovld __cnfn isgreater(half8 x, half8 y);
-short16 __ovld __cnfn isgreater(half16 x, half16 y);
+int __ovld __cnfn isgreater(half, half);
+short2 __ovld __cnfn isgreater(half2, half2);
+short3 __ovld __cnfn isgreater(half3, half3);
+short4 __ovld __cnfn isgreater(half4, half4);
+short8 __ovld __cnfn isgreater(half8, half8);
+short16 __ovld __cnfn isgreater(half16, half16);
#endif //cl_khr_fp16
/**
* Returns the component-wise compare of x >= y.
*/
-int __ovld __cnfn isgreaterequal(float x, float y);
-int2 __ovld __cnfn isgreaterequal(float2 x, float2 y);
-int3 __ovld __cnfn isgreaterequal(float3 x, float3 y);
-int4 __ovld __cnfn isgreaterequal(float4 x, float4 y);
-int8 __ovld __cnfn isgreaterequal(float8 x, float8 y);
-int16 __ovld __cnfn isgreaterequal(float16 x, float16 y);
+int __ovld __cnfn isgreaterequal(float, float);
+int2 __ovld __cnfn isgreaterequal(float2, float2);
+int3 __ovld __cnfn isgreaterequal(float3, float3);
+int4 __ovld __cnfn isgreaterequal(float4, float4);
+int8 __ovld __cnfn isgreaterequal(float8, float8);
+int16 __ovld __cnfn isgreaterequal(float16, float16);
#ifdef cl_khr_fp64
-int __ovld __cnfn isgreaterequal(double x, double y);
-long2 __ovld __cnfn isgreaterequal(double2 x, double2 y);
-long3 __ovld __cnfn isgreaterequal(double3 x, double3 y);
-long4 __ovld __cnfn isgreaterequal(double4 x, double4 y);
-long8 __ovld __cnfn isgreaterequal(double8 x, double8 y);
-long16 __ovld __cnfn isgreaterequal(double16 x, double16 y);
+int __ovld __cnfn isgreaterequal(double, double);
+long2 __ovld __cnfn isgreaterequal(double2, double2);
+long3 __ovld __cnfn isgreaterequal(double3, double3);
+long4 __ovld __cnfn isgreaterequal(double4, double4);
+long8 __ovld __cnfn isgreaterequal(double8, double8);
+long16 __ovld __cnfn isgreaterequal(double16, double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-int __ovld __cnfn isgreaterequal(half x, half y);
-short2 __ovld __cnfn isgreaterequal(half2 x, half2 y);
-short3 __ovld __cnfn isgreaterequal(half3 x, half3 y);
-short4 __ovld __cnfn isgreaterequal(half4 x, half4 y);
-short8 __ovld __cnfn isgreaterequal(half8 x, half8 y);
-short16 __ovld __cnfn isgreaterequal(half16 x, half16 y);
+int __ovld __cnfn isgreaterequal(half, half);
+short2 __ovld __cnfn isgreaterequal(half2, half2);
+short3 __ovld __cnfn isgreaterequal(half3, half3);
+short4 __ovld __cnfn isgreaterequal(half4, half4);
+short8 __ovld __cnfn isgreaterequal(half8, half8);
+short16 __ovld __cnfn isgreaterequal(half16, half16);
#endif //cl_khr_fp16
/**
* Returns the component-wise compare of x < y.
*/
-int __ovld __cnfn isless(float x, float y);
-int2 __ovld __cnfn isless(float2 x, float2 y);
-int3 __ovld __cnfn isless(float3 x, float3 y);
-int4 __ovld __cnfn isless(float4 x, float4 y);
-int8 __ovld __cnfn isless(float8 x, float8 y);
-int16 __ovld __cnfn isless(float16 x, float16 y);
+int __ovld __cnfn isless(float, float);
+int2 __ovld __cnfn isless(float2, float2);
+int3 __ovld __cnfn isless(float3, float3);
+int4 __ovld __cnfn isless(float4, float4);
+int8 __ovld __cnfn isless(float8, float8);
+int16 __ovld __cnfn isless(float16, float16);
#ifdef cl_khr_fp64
-int __ovld __cnfn isless(double x, double y);
-long2 __ovld __cnfn isless(double2 x, double2 y);
-long3 __ovld __cnfn isless(double3 x, double3 y);
-long4 __ovld __cnfn isless(double4 x, double4 y);
-long8 __ovld __cnfn isless(double8 x, double8 y);
-long16 __ovld __cnfn isless(double16 x, double16 y);
+int __ovld __cnfn isless(double, double);
+long2 __ovld __cnfn isless(double2, double2);
+long3 __ovld __cnfn isless(double3, double3);
+long4 __ovld __cnfn isless(double4, double4);
+long8 __ovld __cnfn isless(double8, double8);
+long16 __ovld __cnfn isless(double16, double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-int __ovld __cnfn isless(half x, half y);
-short2 __ovld __cnfn isless(half2 x, half2 y);
-short3 __ovld __cnfn isless(half3 x, half3 y);
-short4 __ovld __cnfn isless(half4 x, half4 y);
-short8 __ovld __cnfn isless(half8 x, half8 y);
-short16 __ovld __cnfn isless(half16 x, half16 y);
+int __ovld __cnfn isless(half, half);
+short2 __ovld __cnfn isless(half2, half2);
+short3 __ovld __cnfn isless(half3, half3);
+short4 __ovld __cnfn isless(half4, half4);
+short8 __ovld __cnfn isless(half8, half8);
+short16 __ovld __cnfn isless(half16, half16);
#endif //cl_khr_fp16
/**
* Returns the component-wise compare of x <= y.
*/
-int __ovld __cnfn islessequal(float x, float y);
-int2 __ovld __cnfn islessequal(float2 x, float2 y);
-int3 __ovld __cnfn islessequal(float3 x, float3 y);
-int4 __ovld __cnfn islessequal(float4 x, float4 y);
-int8 __ovld __cnfn islessequal(float8 x, float8 y);
-int16 __ovld __cnfn islessequal(float16 x, float16 y);
+int __ovld __cnfn islessequal(float, float);
+int2 __ovld __cnfn islessequal(float2, float2);
+int3 __ovld __cnfn islessequal(float3, float3);
+int4 __ovld __cnfn islessequal(float4, float4);
+int8 __ovld __cnfn islessequal(float8, float8);
+int16 __ovld __cnfn islessequal(float16, float16);
#ifdef cl_khr_fp64
-int __ovld __cnfn islessequal(double x, double y);
-long2 __ovld __cnfn islessequal(double2 x, double2 y);
-long3 __ovld __cnfn islessequal(double3 x, double3 y);
-long4 __ovld __cnfn islessequal(double4 x, double4 y);
-long8 __ovld __cnfn islessequal(double8 x, double8 y);
-long16 __ovld __cnfn islessequal(double16 x, double16 y);
+int __ovld __cnfn islessequal(double, double);
+long2 __ovld __cnfn islessequal(double2, double2);
+long3 __ovld __cnfn islessequal(double3, double3);
+long4 __ovld __cnfn islessequal(double4, double4);
+long8 __ovld __cnfn islessequal(double8, double8);
+long16 __ovld __cnfn islessequal(double16, double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-int __ovld __cnfn islessequal(half x, half y);
-short2 __ovld __cnfn islessequal(half2 x, half2 y);
-short3 __ovld __cnfn islessequal(half3 x, half3 y);
-short4 __ovld __cnfn islessequal(half4 x, half4 y);
-short8 __ovld __cnfn islessequal(half8 x, half8 y);
-short16 __ovld __cnfn islessequal(half16 x, half16 y);
+int __ovld __cnfn islessequal(half, half);
+short2 __ovld __cnfn islessequal(half2, half2);
+short3 __ovld __cnfn islessequal(half3, half3);
+short4 __ovld __cnfn islessequal(half4, half4);
+short8 __ovld __cnfn islessequal(half8, half8);
+short16 __ovld __cnfn islessequal(half16, half16);
#endif //cl_khr_fp16
/**
* Returns the component-wise compare of
* (x < y) || (x > y) .
*/
-int __ovld __cnfn islessgreater(float x, float y);
-int2 __ovld __cnfn islessgreater(float2 x, float2 y);
-int3 __ovld __cnfn islessgreater(float3 x, float3 y);
-int4 __ovld __cnfn islessgreater(float4 x, float4 y);
-int8 __ovld __cnfn islessgreater(float8 x, float8 y);
-int16 __ovld __cnfn islessgreater(float16 x, float16 y);
+int __ovld __cnfn islessgreater(float, float);
+int2 __ovld __cnfn islessgreater(float2, float2);
+int3 __ovld __cnfn islessgreater(float3, float3);
+int4 __ovld __cnfn islessgreater(float4, float4);
+int8 __ovld __cnfn islessgreater(float8, float8);
+int16 __ovld __cnfn islessgreater(float16, float16);
#ifdef cl_khr_fp64
-int __ovld __cnfn islessgreater(double x, double y);
-long2 __ovld __cnfn islessgreater(double2 x, double2 y);
-long3 __ovld __cnfn islessgreater(double3 x, double3 y);
-long4 __ovld __cnfn islessgreater(double4 x, double4 y);
-long8 __ovld __cnfn islessgreater(double8 x, double8 y);
-long16 __ovld __cnfn islessgreater(double16 x, double16 y);
+int __ovld __cnfn islessgreater(double, double);
+long2 __ovld __cnfn islessgreater(double2, double2);
+long3 __ovld __cnfn islessgreater(double3, double3);
+long4 __ovld __cnfn islessgreater(double4, double4);
+long8 __ovld __cnfn islessgreater(double8, double8);
+long16 __ovld __cnfn islessgreater(double16, double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-int __ovld __cnfn islessgreater(half x, half y);
-short2 __ovld __cnfn islessgreater(half2 x, half2 y);
-short3 __ovld __cnfn islessgreater(half3 x, half3 y);
-short4 __ovld __cnfn islessgreater(half4 x, half4 y);
-short8 __ovld __cnfn islessgreater(half8 x, half8 y);
-short16 __ovld __cnfn islessgreater(half16 x, half16 y);
+int __ovld __cnfn islessgreater(half, half);
+short2 __ovld __cnfn islessgreater(half2, half2);
+short3 __ovld __cnfn islessgreater(half3, half3);
+short4 __ovld __cnfn islessgreater(half4, half4);
+short8 __ovld __cnfn islessgreater(half8, half8);
+short16 __ovld __cnfn islessgreater(half16, half16);
#endif //cl_khr_fp16
/**
@@ -10802,27 +10798,27 @@ short16 __ovld __cnfn isnormal(half16);
* arguments x and y, and returns the result
* isequal(x, x) && isequal(y, y).
*/
-int __ovld __cnfn isordered(float x, float y);
-int2 __ovld __cnfn isordered(float2 x, float2 y);
-int3 __ovld __cnfn isordered(float3 x, float3 y);
-int4 __ovld __cnfn isordered(float4 x, float4 y);
-int8 __ovld __cnfn isordered(float8 x, float8 y);
-int16 __ovld __cnfn isordered(float16 x, float16 y);
+int __ovld __cnfn isordered(float, float);
+int2 __ovld __cnfn isordered(float2, float2);
+int3 __ovld __cnfn isordered(float3, float3);
+int4 __ovld __cnfn isordered(float4, float4);
+int8 __ovld __cnfn isordered(float8, float8);
+int16 __ovld __cnfn isordered(float16, float16);
#ifdef cl_khr_fp64
-int __ovld __cnfn isordered(double x, double y);
-long2 __ovld __cnfn isordered(double2 x, double2 y);
-long3 __ovld __cnfn isordered(double3 x, double3 y);
-long4 __ovld __cnfn isordered(double4 x, double4 y);
-long8 __ovld __cnfn isordered(double8 x, double8 y);
-long16 __ovld __cnfn isordered(double16 x, double16 y);
+int __ovld __cnfn isordered(double, double);
+long2 __ovld __cnfn isordered(double2, double2);
+long3 __ovld __cnfn isordered(double3, double3);
+long4 __ovld __cnfn isordered(double4, double4);
+long8 __ovld __cnfn isordered(double8, double8);
+long16 __ovld __cnfn isordered(double16, double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-int __ovld __cnfn isordered(half x, half y);
-short2 __ovld __cnfn isordered(half2 x, half2 y);
-short3 __ovld __cnfn isordered(half3 x, half3 y);
-short4 __ovld __cnfn isordered(half4 x, half4 y);
-short8 __ovld __cnfn isordered(half8 x, half8 y);
-short16 __ovld __cnfn isordered(half16 x, half16 y);
+int __ovld __cnfn isordered(half, half);
+short2 __ovld __cnfn isordered(half2, half2);
+short3 __ovld __cnfn isordered(half3, half3);
+short4 __ovld __cnfn isordered(half4, half4);
+short8 __ovld __cnfn isordered(half8, half8);
+short16 __ovld __cnfn isordered(half16, half16);
#endif //cl_khr_fp16
/**
@@ -10830,27 +10826,27 @@ short16 __ovld __cnfn isordered(half16 x, half16 y);
* takes arguments x and y, returning non-zero if x or y
* is NaN, and zero otherwise.
*/
-int __ovld __cnfn isunordered(float x, float y);
-int2 __ovld __cnfn isunordered(float2 x, float2 y);
-int3 __ovld __cnfn isunordered(float3 x, float3 y);
-int4 __ovld __cnfn isunordered(float4 x, float4 y);
-int8 __ovld __cnfn isunordered(float8 x, float8 y);
-int16 __ovld __cnfn isunordered(float16 x, float16 y);
+int __ovld __cnfn isunordered(float, float);
+int2 __ovld __cnfn isunordered(float2, float2);
+int3 __ovld __cnfn isunordered(float3, float3);
+int4 __ovld __cnfn isunordered(float4, float4);
+int8 __ovld __cnfn isunordered(float8, float8);
+int16 __ovld __cnfn isunordered(float16, float16);
#ifdef cl_khr_fp64
-int __ovld __cnfn isunordered(double x, double y);
-long2 __ovld __cnfn isunordered(double2 x, double2 y);
-long3 __ovld __cnfn isunordered(double3 x, double3 y);
-long4 __ovld __cnfn isunordered(double4 x, double4 y);
-long8 __ovld __cnfn isunordered(double8 x, double8 y);
-long16 __ovld __cnfn isunordered(double16 x, double16 y);
+int __ovld __cnfn isunordered(double, double);
+long2 __ovld __cnfn isunordered(double2, double2);
+long3 __ovld __cnfn isunordered(double3, double3);
+long4 __ovld __cnfn isunordered(double4, double4);
+long8 __ovld __cnfn isunordered(double8, double8);
+long16 __ovld __cnfn isunordered(double16, double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-int __ovld __cnfn isunordered(half x, half y);
-short2 __ovld __cnfn isunordered(half2 x, half2 y);
-short3 __ovld __cnfn isunordered(half3 x, half3 y);
-short4 __ovld __cnfn isunordered(half4 x, half4 y);
-short8 __ovld __cnfn isunordered(half8 x, half8 y);
-short16 __ovld __cnfn isunordered(half16 x, half16 y);
+int __ovld __cnfn isunordered(half, half);
+short2 __ovld __cnfn isunordered(half2, half2);
+short3 __ovld __cnfn isunordered(half3, half3);
+short4 __ovld __cnfn isunordered(half4, half4);
+short8 __ovld __cnfn isunordered(half8, half8);
+short16 __ovld __cnfn isunordered(half16, half16);
#endif //cl_khr_fp16
/**
@@ -10887,134 +10883,134 @@ short16 __ovld __cnfn signbit(half16);
* Returns 1 if the most significant bit in any component
* of x is set; otherwise returns 0.
*/
-int __ovld __cnfn any(char x);
-int __ovld __cnfn any(char2 x);
-int __ovld __cnfn any(char3 x);
-int __ovld __cnfn any(char4 x);
-int __ovld __cnfn any(char8 x);
-int __ovld __cnfn any(char16 x);
-int __ovld __cnfn any(short x);
-int __ovld __cnfn any(short2 x);
-int __ovld __cnfn any(short3 x);
-int __ovld __cnfn any(short4 x);
-int __ovld __cnfn any(short8 x);
-int __ovld __cnfn any(short16 x);
-int __ovld __cnfn any(int x);
-int __ovld __cnfn any(int2 x);
-int __ovld __cnfn any(int3 x);
-int __ovld __cnfn any(int4 x);
-int __ovld __cnfn any(int8 x);
-int __ovld __cnfn any(int16 x);
-int __ovld __cnfn any(long x);
-int __ovld __cnfn any(long2 x);
-int __ovld __cnfn any(long3 x);
-int __ovld __cnfn any(long4 x);
-int __ovld __cnfn any(long8 x);
-int __ovld __cnfn any(long16 x);
+int __ovld __cnfn any(char);
+int __ovld __cnfn any(char2);
+int __ovld __cnfn any(char3);
+int __ovld __cnfn any(char4);
+int __ovld __cnfn any(char8);
+int __ovld __cnfn any(char16);
+int __ovld __cnfn any(short);
+int __ovld __cnfn any(short2);
+int __ovld __cnfn any(short3);
+int __ovld __cnfn any(short4);
+int __ovld __cnfn any(short8);
+int __ovld __cnfn any(short16);
+int __ovld __cnfn any(int);
+int __ovld __cnfn any(int2);
+int __ovld __cnfn any(int3);
+int __ovld __cnfn any(int4);
+int __ovld __cnfn any(int8);
+int __ovld __cnfn any(int16);
+int __ovld __cnfn any(long);
+int __ovld __cnfn any(long2);
+int __ovld __cnfn any(long3);
+int __ovld __cnfn any(long4);
+int __ovld __cnfn any(long8);
+int __ovld __cnfn any(long16);
/**
* Returns 1 if the most significant bit in all components
* of x is set; otherwise returns 0.
*/
-int __ovld __cnfn all(char x);
-int __ovld __cnfn all(char2 x);
-int __ovld __cnfn all(char3 x);
-int __ovld __cnfn all(char4 x);
-int __ovld __cnfn all(char8 x);
-int __ovld __cnfn all(char16 x);
-int __ovld __cnfn all(short x);
-int __ovld __cnfn all(short2 x);
-int __ovld __cnfn all(short3 x);
-int __ovld __cnfn all(short4 x);
-int __ovld __cnfn all(short8 x);
-int __ovld __cnfn all(short16 x);
-int __ovld __cnfn all(int x);
-int __ovld __cnfn all(int2 x);
-int __ovld __cnfn all(int3 x);
-int __ovld __cnfn all(int4 x);
-int __ovld __cnfn all(int8 x);
-int __ovld __cnfn all(int16 x);
-int __ovld __cnfn all(long x);
-int __ovld __cnfn all(long2 x);
-int __ovld __cnfn all(long3 x);
-int __ovld __cnfn all(long4 x);
-int __ovld __cnfn all(long8 x);
-int __ovld __cnfn all(long16 x);
+int __ovld __cnfn all(char);
+int __ovld __cnfn all(char2);
+int __ovld __cnfn all(char3);
+int __ovld __cnfn all(char4);
+int __ovld __cnfn all(char8);
+int __ovld __cnfn all(char16);
+int __ovld __cnfn all(short);
+int __ovld __cnfn all(short2);
+int __ovld __cnfn all(short3);
+int __ovld __cnfn all(short4);
+int __ovld __cnfn all(short8);
+int __ovld __cnfn all(short16);
+int __ovld __cnfn all(int);
+int __ovld __cnfn all(int2);
+int __ovld __cnfn all(int3);
+int __ovld __cnfn all(int4);
+int __ovld __cnfn all(int8);
+int __ovld __cnfn all(int16);
+int __ovld __cnfn all(long);
+int __ovld __cnfn all(long2);
+int __ovld __cnfn all(long3);
+int __ovld __cnfn all(long4);
+int __ovld __cnfn all(long8);
+int __ovld __cnfn all(long16);
/**
* Each bit of the result is the corresponding bit of a if
* the corresponding bit of c is 0. Otherwise it is the
* corresponding bit of b.
*/
-char __ovld __cnfn bitselect(char a, char b, char c);
-uchar __ovld __cnfn bitselect(uchar a, uchar b, uchar c);
-char2 __ovld __cnfn bitselect(char2 a, char2 b, char2 c);
-uchar2 __ovld __cnfn bitselect(uchar2 a, uchar2 b, uchar2 c);
-char3 __ovld __cnfn bitselect(char3 a, char3 b, char3 c);
-uchar3 __ovld __cnfn bitselect(uchar3 a, uchar3 b, uchar3 c);
-char4 __ovld __cnfn bitselect(char4 a, char4 b, char4 c);
-uchar4 __ovld __cnfn bitselect(uchar4 a, uchar4 b, uchar4 c);
-char8 __ovld __cnfn bitselect(char8 a, char8 b, char8 c);
-uchar8 __ovld __cnfn bitselect(uchar8 a, uchar8 b, uchar8 c);
-char16 __ovld __cnfn bitselect(char16 a, char16 b, char16 c);
-uchar16 __ovld __cnfn bitselect(uchar16 a, uchar16 b, uchar16 c);
-short __ovld __cnfn bitselect(short a, short b, short c);
-ushort __ovld __cnfn bitselect(ushort a, ushort b, ushort c);
-short2 __ovld __cnfn bitselect(short2 a, short2 b, short2 c);
-ushort2 __ovld __cnfn bitselect(ushort2 a, ushort2 b, ushort2 c);
-short3 __ovld __cnfn bitselect(short3 a, short3 b, short3 c);
-ushort3 __ovld __cnfn bitselect(ushort3 a, ushort3 b, ushort3 c);
-short4 __ovld __cnfn bitselect(short4 a, short4 b, short4 c);
-ushort4 __ovld __cnfn bitselect(ushort4 a, ushort4 b, ushort4 c);
-short8 __ovld __cnfn bitselect(short8 a, short8 b, short8 c);
-ushort8 __ovld __cnfn bitselect(ushort8 a, ushort8 b, ushort8 c);
-short16 __ovld __cnfn bitselect(short16 a, short16 b, short16 c);
-ushort16 __ovld __cnfn bitselect(ushort16 a, ushort16 b, ushort16 c);
-int __ovld __cnfn bitselect(int a, int b, int c);
-uint __ovld __cnfn bitselect(uint a, uint b, uint c);
-int2 __ovld __cnfn bitselect(int2 a, int2 b, int2 c);
-uint2 __ovld __cnfn bitselect(uint2 a, uint2 b, uint2 c);
-int3 __ovld __cnfn bitselect(int3 a, int3 b, int3 c);
-uint3 __ovld __cnfn bitselect(uint3 a, uint3 b, uint3 c);
-int4 __ovld __cnfn bitselect(int4 a, int4 b, int4 c);
-uint4 __ovld __cnfn bitselect(uint4 a, uint4 b, uint4 c);
-int8 __ovld __cnfn bitselect(int8 a, int8 b, int8 c);
-uint8 __ovld __cnfn bitselect(uint8 a, uint8 b, uint8 c);
-int16 __ovld __cnfn bitselect(int16 a, int16 b, int16 c);
-uint16 __ovld __cnfn bitselect(uint16 a, uint16 b, uint16 c);
-long __ovld __cnfn bitselect(long a, long b, long c);
-ulong __ovld __cnfn bitselect(ulong a, ulong b, ulong c);
-long2 __ovld __cnfn bitselect(long2 a, long2 b, long2 c);
-ulong2 __ovld __cnfn bitselect(ulong2 a, ulong2 b, ulong2 c);
-long3 __ovld __cnfn bitselect(long3 a, long3 b, long3 c);
-ulong3 __ovld __cnfn bitselect(ulong3 a, ulong3 b, ulong3 c);
-long4 __ovld __cnfn bitselect(long4 a, long4 b, long4 c);
-ulong4 __ovld __cnfn bitselect(ulong4 a, ulong4 b, ulong4 c);
-long8 __ovld __cnfn bitselect(long8 a, long8 b, long8 c);
-ulong8 __ovld __cnfn bitselect(ulong8 a, ulong8 b, ulong8 c);
-long16 __ovld __cnfn bitselect(long16 a, long16 b, long16 c);
-ulong16 __ovld __cnfn bitselect(ulong16 a, ulong16 b, ulong16 c);
-float __ovld __cnfn bitselect(float a, float b, float c);
-float2 __ovld __cnfn bitselect(float2 a, float2 b, float2 c);
-float3 __ovld __cnfn bitselect(float3 a, float3 b, float3 c);
-float4 __ovld __cnfn bitselect(float4 a, float4 b, float4 c);
-float8 __ovld __cnfn bitselect(float8 a, float8 b, float8 c);
-float16 __ovld __cnfn bitselect(float16 a, float16 b, float16 c);
+char __ovld __cnfn bitselect(char, char, char);
+uchar __ovld __cnfn bitselect(uchar, uchar, uchar);
+char2 __ovld __cnfn bitselect(char2, char2, char2);
+uchar2 __ovld __cnfn bitselect(uchar2, uchar2, uchar2);
+char3 __ovld __cnfn bitselect(char3, char3, char3);
+uchar3 __ovld __cnfn bitselect(uchar3, uchar3, uchar3);
+char4 __ovld __cnfn bitselect(char4, char4, char4);
+uchar4 __ovld __cnfn bitselect(uchar4, uchar4, uchar4);
+char8 __ovld __cnfn bitselect(char8, char8, char8);
+uchar8 __ovld __cnfn bitselect(uchar8, uchar8, uchar8);
+char16 __ovld __cnfn bitselect(char16, char16, char16);
+uchar16 __ovld __cnfn bitselect(uchar16, uchar16, uchar16);
+short __ovld __cnfn bitselect(short, short, short);
+ushort __ovld __cnfn bitselect(ushort, ushort, ushort);
+short2 __ovld __cnfn bitselect(short2, short2, short2);
+ushort2 __ovld __cnfn bitselect(ushort2, ushort2, ushort2);
+short3 __ovld __cnfn bitselect(short3, short3, short3);
+ushort3 __ovld __cnfn bitselect(ushort3, ushort3, ushort3);
+short4 __ovld __cnfn bitselect(short4, short4, short4);
+ushort4 __ovld __cnfn bitselect(ushort4, ushort4, ushort4);
+short8 __ovld __cnfn bitselect(short8, short8, short8);
+ushort8 __ovld __cnfn bitselect(ushort8, ushort8, ushort8);
+short16 __ovld __cnfn bitselect(short16, short16, short16);
+ushort16 __ovld __cnfn bitselect(ushort16, ushort16, ushort16);
+int __ovld __cnfn bitselect(int, int, int);
+uint __ovld __cnfn bitselect(uint, uint, uint);
+int2 __ovld __cnfn bitselect(int2, int2, int2);
+uint2 __ovld __cnfn bitselect(uint2, uint2, uint2);
+int3 __ovld __cnfn bitselect(int3, int3, int3);
+uint3 __ovld __cnfn bitselect(uint3, uint3, uint3);
+int4 __ovld __cnfn bitselect(int4, int4, int4);
+uint4 __ovld __cnfn bitselect(uint4, uint4, uint4);
+int8 __ovld __cnfn bitselect(int8, int8, int8);
+uint8 __ovld __cnfn bitselect(uint8, uint8, uint8);
+int16 __ovld __cnfn bitselect(int16, int16, int16);
+uint16 __ovld __cnfn bitselect(uint16, uint16, uint16);
+long __ovld __cnfn bitselect(long, long, long);
+ulong __ovld __cnfn bitselect(ulong, ulong, ulong);
+long2 __ovld __cnfn bitselect(long2, long2, long2);
+ulong2 __ovld __cnfn bitselect(ulong2, ulong2, ulong2);
+long3 __ovld __cnfn bitselect(long3, long3, long3);
+ulong3 __ovld __cnfn bitselect(ulong3, ulong3, ulong3);
+long4 __ovld __cnfn bitselect(long4, long4, long4);
+ulong4 __ovld __cnfn bitselect(ulong4, ulong4, ulong4);
+long8 __ovld __cnfn bitselect(long8, long8, long8);
+ulong8 __ovld __cnfn bitselect(ulong8, ulong8, ulong8);
+long16 __ovld __cnfn bitselect(long16, long16, long16);
+ulong16 __ovld __cnfn bitselect(ulong16, ulong16, ulong16);
+float __ovld __cnfn bitselect(float, float, float);
+float2 __ovld __cnfn bitselect(float2, float2, float2);
+float3 __ovld __cnfn bitselect(float3, float3, float3);
+float4 __ovld __cnfn bitselect(float4, float4, float4);
+float8 __ovld __cnfn bitselect(float8, float8, float8);
+float16 __ovld __cnfn bitselect(float16, float16, float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn bitselect(double a, double b, double c);
-double2 __ovld __cnfn bitselect(double2 a, double2 b, double2 c);
-double3 __ovld __cnfn bitselect(double3 a, double3 b, double3 c);
-double4 __ovld __cnfn bitselect(double4 a, double4 b, double4 c);
-double8 __ovld __cnfn bitselect(double8 a, double8 b, double8 c);
-double16 __ovld __cnfn bitselect(double16 a, double16 b, double16 c);
+double __ovld __cnfn bitselect(double, double, double);
+double2 __ovld __cnfn bitselect(double2, double2, double2);
+double3 __ovld __cnfn bitselect(double3, double3, double3);
+double4 __ovld __cnfn bitselect(double4, double4, double4);
+double8 __ovld __cnfn bitselect(double8, double8, double8);
+double16 __ovld __cnfn bitselect(double16, double16, double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn bitselect(half a, half b, half c);
-half2 __ovld __cnfn bitselect(half2 a, half2 b, half2 c);
-half3 __ovld __cnfn bitselect(half3 a, half3 b, half3 c);
-half4 __ovld __cnfn bitselect(half4 a, half4 b, half4 c);
-half8 __ovld __cnfn bitselect(half8 a, half8 b, half8 c);
-half16 __ovld __cnfn bitselect(half16 a, half16 b, half16 c);
+half __ovld __cnfn bitselect(half, half, half);
+half2 __ovld __cnfn bitselect(half2, half2, half2);
+half3 __ovld __cnfn bitselect(half3, half3, half3);
+half4 __ovld __cnfn bitselect(half4, half4, half4);
+half8 __ovld __cnfn bitselect(half8, half8, half8);
+half16 __ovld __cnfn bitselect(half16, half16, half16);
#endif //cl_khr_fp16
/**
@@ -11024,149 +11020,149 @@ half16 __ovld __cnfn bitselect(half16 a, half16 b, half16 c);
* b and a must have the same type.
* c must have the same number of elements and bits as a.
*/
-char __ovld __cnfn select(char a, char b, char c);
-uchar __ovld __cnfn select(uchar a, uchar b, char c);
-char2 __ovld __cnfn select(char2 a, char2 b, char2 c);
-uchar2 __ovld __cnfn select(uchar2 a, uchar2 b, char2 c);
-char3 __ovld __cnfn select(char3 a, char3 b, char3 c);
-uchar3 __ovld __cnfn select(uchar3 a, uchar3 b, char3 c);
-char4 __ovld __cnfn select(char4 a, char4 b, char4 c);
-uchar4 __ovld __cnfn select(uchar4 a, uchar4 b, char4 c);
-char8 __ovld __cnfn select(char8 a, char8 b, char8 c);
-uchar8 __ovld __cnfn select(uchar8 a, uchar8 b, char8 c);
-char16 __ovld __cnfn select(char16 a, char16 b, char16 c);
-uchar16 __ovld __cnfn select(uchar16 a, uchar16 b, char16 c);
-
-short __ovld __cnfn select(short a, short b, short c);
-ushort __ovld __cnfn select(ushort a, ushort b, short c);
-short2 __ovld __cnfn select(short2 a, short2 b, short2 c);
-ushort2 __ovld __cnfn select(ushort2 a, ushort2 b, short2 c);
-short3 __ovld __cnfn select(short3 a, short3 b, short3 c);
-ushort3 __ovld __cnfn select(ushort3 a, ushort3 b, short3 c);
-short4 __ovld __cnfn select(short4 a, short4 b, short4 c);
-ushort4 __ovld __cnfn select(ushort4 a, ushort4 b, short4 c);
-short8 __ovld __cnfn select(short8 a, short8 b, short8 c);
-ushort8 __ovld __cnfn select(ushort8 a, ushort8 b, short8 c);
-short16 __ovld __cnfn select(short16 a, short16 b, short16 c);
-ushort16 __ovld __cnfn select(ushort16 a, ushort16 b, short16 c);
-
-int __ovld __cnfn select(int a, int b, int c);
-uint __ovld __cnfn select(uint a, uint b, int c);
-int2 __ovld __cnfn select(int2 a, int2 b, int2 c);
-uint2 __ovld __cnfn select(uint2 a, uint2 b, int2 c);
-int3 __ovld __cnfn select(int3 a, int3 b, int3 c);
-uint3 __ovld __cnfn select(uint3 a, uint3 b, int3 c);
-int4 __ovld __cnfn select(int4 a, int4 b, int4 c);
-uint4 __ovld __cnfn select(uint4 a, uint4 b, int4 c);
-int8 __ovld __cnfn select(int8 a, int8 b, int8 c);
-uint8 __ovld __cnfn select(uint8 a, uint8 b, int8 c);
-int16 __ovld __cnfn select(int16 a, int16 b, int16 c);
-uint16 __ovld __cnfn select(uint16 a, uint16 b, int16 c);
-float __ovld __cnfn select(float a, float b, int c);
-float2 __ovld __cnfn select(float2 a, float2 b, int2 c);
-float3 __ovld __cnfn select(float3 a, float3 b, int3 c);
-float4 __ovld __cnfn select(float4 a, float4 b, int4 c);
-float8 __ovld __cnfn select(float8 a, float8 b, int8 c);
-float16 __ovld __cnfn select(float16 a, float16 b, int16 c);
-
-long __ovld __cnfn select(long a, long b, long c);
-ulong __ovld __cnfn select(ulong a, ulong b, long c);
-long2 __ovld __cnfn select(long2 a, long2 b, long2 c);
-ulong2 __ovld __cnfn select(ulong2 a, ulong2 b, long2 c);
-long3 __ovld __cnfn select(long3 a, long3 b, long3 c);
-ulong3 __ovld __cnfn select(ulong3 a, ulong3 b, long3 c);
-long4 __ovld __cnfn select(long4 a, long4 b, long4 c);
-ulong4 __ovld __cnfn select(ulong4 a, ulong4 b, long4 c);
-long8 __ovld __cnfn select(long8 a, long8 b, long8 c);
-ulong8 __ovld __cnfn select(ulong8 a, ulong8 b, long8 c);
-long16 __ovld __cnfn select(long16 a, long16 b, long16 c);
-ulong16 __ovld __cnfn select(ulong16 a, ulong16 b, long16 c);
-
-char __ovld __cnfn select(char a, char b, uchar c);
-uchar __ovld __cnfn select(uchar a, uchar b, uchar c);
-char2 __ovld __cnfn select(char2 a, char2 b, uchar2 c);
-uchar2 __ovld __cnfn select(uchar2 a, uchar2 b, uchar2 c);
-char3 __ovld __cnfn select(char3 a, char3 b, uchar3 c);
-uchar3 __ovld __cnfn select(uchar3 a, uchar3 b, uchar3 c);
-char4 __ovld __cnfn select(char4 a, char4 b, uchar4 c);
-uchar4 __ovld __cnfn select(uchar4 a, uchar4 b, uchar4 c);
-char8 __ovld __cnfn select(char8 a, char8 b, uchar8 c);
-uchar8 __ovld __cnfn select(uchar8 a, uchar8 b, uchar8 c);
-char16 __ovld __cnfn select(char16 a, char16 b, uchar16 c);
-uchar16 __ovld __cnfn select(uchar16 a, uchar16 b, uchar16 c);
-
-short __ovld __cnfn select(short a, short b, ushort c);
-ushort __ovld __cnfn select(ushort a, ushort b, ushort c);
-short2 __ovld __cnfn select(short2 a, short2 b, ushort2 c);
-ushort2 __ovld __cnfn select(ushort2 a, ushort2 b, ushort2 c);
-short3 __ovld __cnfn select(short3 a, short3 b, ushort3 c);
-ushort3 __ovld __cnfn select(ushort3 a, ushort3 b, ushort3 c);
-short4 __ovld __cnfn select(short4 a, short4 b, ushort4 c);
-ushort4 __ovld __cnfn select(ushort4 a, ushort4 b, ushort4 c);
-short8 __ovld __cnfn select(short8 a, short8 b, ushort8 c);
-ushort8 __ovld __cnfn select(ushort8 a, ushort8 b, ushort8 c);
-short16 __ovld __cnfn select(short16 a, short16 b, ushort16 c);
-ushort16 __ovld __cnfn select(ushort16 a, ushort16 b, ushort16 c);
-
-int __ovld __cnfn select(int a, int b, uint c);
-uint __ovld __cnfn select(uint a, uint b, uint c);
-int2 __ovld __cnfn select(int2 a, int2 b, uint2 c);
-uint2 __ovld __cnfn select(uint2 a, uint2 b, uint2 c);
-int3 __ovld __cnfn select(int3 a, int3 b, uint3 c);
-uint3 __ovld __cnfn select(uint3 a, uint3 b, uint3 c);
-int4 __ovld __cnfn select(int4 a, int4 b, uint4 c);
-uint4 __ovld __cnfn select(uint4 a, uint4 b, uint4 c);
-int8 __ovld __cnfn select(int8 a, int8 b, uint8 c);
-uint8 __ovld __cnfn select(uint8 a, uint8 b, uint8 c);
-int16 __ovld __cnfn select(int16 a, int16 b, uint16 c);
-uint16 __ovld __cnfn select(uint16 a, uint16 b, uint16 c);
-float __ovld __cnfn select(float a, float b, uint c);
-float2 __ovld __cnfn select(float2 a, float2 b, uint2 c);
-float3 __ovld __cnfn select(float3 a, float3 b, uint3 c);
-float4 __ovld __cnfn select(float4 a, float4 b, uint4 c);
-float8 __ovld __cnfn select(float8 a, float8 b, uint8 c);
-float16 __ovld __cnfn select(float16 a, float16 b, uint16 c);
-
-long __ovld __cnfn select(long a, long b, ulong c);
-ulong __ovld __cnfn select(ulong a, ulong b, ulong c);
-long2 __ovld __cnfn select(long2 a, long2 b, ulong2 c);
-ulong2 __ovld __cnfn select(ulong2 a, ulong2 b, ulong2 c);
-long3 __ovld __cnfn select(long3 a, long3 b, ulong3 c);
-ulong3 __ovld __cnfn select(ulong3 a, ulong3 b, ulong3 c);
-long4 __ovld __cnfn select(long4 a, long4 b, ulong4 c);
-ulong4 __ovld __cnfn select(ulong4 a, ulong4 b, ulong4 c);
-long8 __ovld __cnfn select(long8 a, long8 b, ulong8 c);
-ulong8 __ovld __cnfn select(ulong8 a, ulong8 b, ulong8 c);
-long16 __ovld __cnfn select(long16 a, long16 b, ulong16 c);
-ulong16 __ovld __cnfn select(ulong16 a, ulong16 b, ulong16 c);
+char __ovld __cnfn select(char, char, char);
+uchar __ovld __cnfn select(uchar, uchar, char);
+char2 __ovld __cnfn select(char2, char2, char2);
+uchar2 __ovld __cnfn select(uchar2, uchar2, char2);
+char3 __ovld __cnfn select(char3, char3, char3);
+uchar3 __ovld __cnfn select(uchar3, uchar3, char3);
+char4 __ovld __cnfn select(char4, char4, char4);
+uchar4 __ovld __cnfn select(uchar4, uchar4, char4);
+char8 __ovld __cnfn select(char8, char8, char8);
+uchar8 __ovld __cnfn select(uchar8, uchar8, char8);
+char16 __ovld __cnfn select(char16, char16, char16);
+uchar16 __ovld __cnfn select(uchar16, uchar16, char16);
+
+short __ovld __cnfn select(short, short, short);
+ushort __ovld __cnfn select(ushort, ushort, short);
+short2 __ovld __cnfn select(short2, short2, short2);
+ushort2 __ovld __cnfn select(ushort2, ushort2, short2);
+short3 __ovld __cnfn select(short3, short3, short3);
+ushort3 __ovld __cnfn select(ushort3, ushort3, short3);
+short4 __ovld __cnfn select(short4, short4, short4);
+ushort4 __ovld __cnfn select(ushort4, ushort4, short4);
+short8 __ovld __cnfn select(short8, short8, short8);
+ushort8 __ovld __cnfn select(ushort8, ushort8, short8);
+short16 __ovld __cnfn select(short16, short16, short16);
+ushort16 __ovld __cnfn select(ushort16, ushort16, short16);
+
+int __ovld __cnfn select(int, int, int);
+uint __ovld __cnfn select(uint, uint, int);
+int2 __ovld __cnfn select(int2, int2, int2);
+uint2 __ovld __cnfn select(uint2, uint2, int2);
+int3 __ovld __cnfn select(int3, int3, int3);
+uint3 __ovld __cnfn select(uint3, uint3, int3);
+int4 __ovld __cnfn select(int4, int4, int4);
+uint4 __ovld __cnfn select(uint4, uint4, int4);
+int8 __ovld __cnfn select(int8, int8, int8);
+uint8 __ovld __cnfn select(uint8, uint8, int8);
+int16 __ovld __cnfn select(int16, int16, int16);
+uint16 __ovld __cnfn select(uint16, uint16, int16);
+float __ovld __cnfn select(float, float, int);
+float2 __ovld __cnfn select(float2, float2, int2);
+float3 __ovld __cnfn select(float3, float3, int3);
+float4 __ovld __cnfn select(float4, float4, int4);
+float8 __ovld __cnfn select(float8, float8, int8);
+float16 __ovld __cnfn select(float16, float16, int16);
+
+long __ovld __cnfn select(long, long, long);
+ulong __ovld __cnfn select(ulong, ulong, long);
+long2 __ovld __cnfn select(long2, long2, long2);
+ulong2 __ovld __cnfn select(ulong2, ulong2, long2);
+long3 __ovld __cnfn select(long3, long3, long3);
+ulong3 __ovld __cnfn select(ulong3, ulong3, long3);
+long4 __ovld __cnfn select(long4, long4, long4);
+ulong4 __ovld __cnfn select(ulong4, ulong4, long4);
+long8 __ovld __cnfn select(long8, long8, long8);
+ulong8 __ovld __cnfn select(ulong8, ulong8, long8);
+long16 __ovld __cnfn select(long16, long16, long16);
+ulong16 __ovld __cnfn select(ulong16, ulong16, long16);
+
+char __ovld __cnfn select(char, char, uchar);
+uchar __ovld __cnfn select(uchar, uchar, uchar);
+char2 __ovld __cnfn select(char2, char2, uchar2);
+uchar2 __ovld __cnfn select(uchar2, uchar2, uchar2);
+char3 __ovld __cnfn select(char3, char3, uchar3);
+uchar3 __ovld __cnfn select(uchar3, uchar3, uchar3);
+char4 __ovld __cnfn select(char4, char4, uchar4);
+uchar4 __ovld __cnfn select(uchar4, uchar4, uchar4);
+char8 __ovld __cnfn select(char8, char8, uchar8);
+uchar8 __ovld __cnfn select(uchar8, uchar8, uchar8);
+char16 __ovld __cnfn select(char16, char16, uchar16);
+uchar16 __ovld __cnfn select(uchar16, uchar16, uchar16);
+
+short __ovld __cnfn select(short, short, ushort);
+ushort __ovld __cnfn select(ushort, ushort, ushort);
+short2 __ovld __cnfn select(short2, short2, ushort2);
+ushort2 __ovld __cnfn select(ushort2, ushort2, ushort2);
+short3 __ovld __cnfn select(short3, short3, ushort3);
+ushort3 __ovld __cnfn select(ushort3, ushort3, ushort3);
+short4 __ovld __cnfn select(short4, short4, ushort4);
+ushort4 __ovld __cnfn select(ushort4, ushort4, ushort4);
+short8 __ovld __cnfn select(short8, short8, ushort8);
+ushort8 __ovld __cnfn select(ushort8, ushort8, ushort8);
+short16 __ovld __cnfn select(short16, short16, ushort16);
+ushort16 __ovld __cnfn select(ushort16, ushort16, ushort16);
+
+int __ovld __cnfn select(int, int, uint);
+uint __ovld __cnfn select(uint, uint, uint);
+int2 __ovld __cnfn select(int2, int2, uint2);
+uint2 __ovld __cnfn select(uint2, uint2, uint2);
+int3 __ovld __cnfn select(int3, int3, uint3);
+uint3 __ovld __cnfn select(uint3, uint3, uint3);
+int4 __ovld __cnfn select(int4, int4, uint4);
+uint4 __ovld __cnfn select(uint4, uint4, uint4);
+int8 __ovld __cnfn select(int8, int8, uint8);
+uint8 __ovld __cnfn select(uint8, uint8, uint8);
+int16 __ovld __cnfn select(int16, int16, uint16);
+uint16 __ovld __cnfn select(uint16, uint16, uint16);
+float __ovld __cnfn select(float, float, uint);
+float2 __ovld __cnfn select(float2, float2, uint2);
+float3 __ovld __cnfn select(float3, float3, uint3);
+float4 __ovld __cnfn select(float4, float4, uint4);
+float8 __ovld __cnfn select(float8, float8, uint8);
+float16 __ovld __cnfn select(float16, float16, uint16);
+
+long __ovld __cnfn select(long, long, ulong);
+ulong __ovld __cnfn select(ulong, ulong, ulong);
+long2 __ovld __cnfn select(long2, long2, ulong2);
+ulong2 __ovld __cnfn select(ulong2, ulong2, ulong2);
+long3 __ovld __cnfn select(long3, long3, ulong3);
+ulong3 __ovld __cnfn select(ulong3, ulong3, ulong3);
+long4 __ovld __cnfn select(long4, long4, ulong4);
+ulong4 __ovld __cnfn select(ulong4, ulong4, ulong4);
+long8 __ovld __cnfn select(long8, long8, ulong8);
+ulong8 __ovld __cnfn select(ulong8, ulong8, ulong8);
+long16 __ovld __cnfn select(long16, long16, ulong16);
+ulong16 __ovld __cnfn select(ulong16, ulong16, ulong16);
#ifdef cl_khr_fp64
-double __ovld __cnfn select(double a, double b, long c);
-double2 __ovld __cnfn select(double2 a, double2 b, long2 c);
-double3 __ovld __cnfn select(double3 a, double3 b, long3 c);
-double4 __ovld __cnfn select(double4 a, double4 b, long4 c);
-double8 __ovld __cnfn select(double8 a, double8 b, long8 c);
-double16 __ovld __cnfn select(double16 a, double16 b, long16 c);
-double __ovld __cnfn select(double a, double b, ulong c);
-double2 __ovld __cnfn select(double2 a, double2 b, ulong2 c);
-double3 __ovld __cnfn select(double3 a, double3 b, ulong3 c);
-double4 __ovld __cnfn select(double4 a, double4 b, ulong4 c);
-double8 __ovld __cnfn select(double8 a, double8 b, ulong8 c);
-double16 __ovld __cnfn select(double16 a, double16 b, ulong16 c);
+double __ovld __cnfn select(double, double, long);
+double2 __ovld __cnfn select(double2, double2, long2);
+double3 __ovld __cnfn select(double3, double3, long3);
+double4 __ovld __cnfn select(double4, double4, long4);
+double8 __ovld __cnfn select(double8, double8, long8);
+double16 __ovld __cnfn select(double16, double16, long16);
+double __ovld __cnfn select(double, double, ulong);
+double2 __ovld __cnfn select(double2, double2, ulong2);
+double3 __ovld __cnfn select(double3, double3, ulong3);
+double4 __ovld __cnfn select(double4, double4, ulong4);
+double8 __ovld __cnfn select(double8, double8, ulong8);
+double16 __ovld __cnfn select(double16, double16, ulong16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn select(half a, half b, short c);
-half2 __ovld __cnfn select(half2 a, half2 b, short2 c);
-half3 __ovld __cnfn select(half3 a, half3 b, short3 c);
-half4 __ovld __cnfn select(half4 a, half4 b, short4 c);
-half8 __ovld __cnfn select(half8 a, half8 b, short8 c);
-half16 __ovld __cnfn select(half16 a, half16 b, short16 c);
-half __ovld __cnfn select(half a, half b, ushort c);
-half2 __ovld __cnfn select(half2 a, half2 b, ushort2 c);
-half3 __ovld __cnfn select(half3 a, half3 b, ushort3 c);
-half4 __ovld __cnfn select(half4 a, half4 b, ushort4 c);
-half8 __ovld __cnfn select(half8 a, half8 b, ushort8 c);
-half16 __ovld __cnfn select(half16 a, half16 b, ushort16 c);
+half __ovld __cnfn select(half, half, short);
+half2 __ovld __cnfn select(half2, half2, short2);
+half3 __ovld __cnfn select(half3, half3, short3);
+half4 __ovld __cnfn select(half4, half4, short4);
+half8 __ovld __cnfn select(half8, half8, short8);
+half16 __ovld __cnfn select(half16, half16, short16);
+half __ovld __cnfn select(half, half, ushort);
+half2 __ovld __cnfn select(half2, half2, ushort2);
+half3 __ovld __cnfn select(half3, half3, ushort3);
+half4 __ovld __cnfn select(half4, half4, ushort4);
+half8 __ovld __cnfn select(half8, half8, ushort8);
+half16 __ovld __cnfn select(half16, half16, ushort16);
#endif //cl_khr_fp16
// OpenCL v1.1 s6.11.7, v1.2 s6.12.7, v2.0 s6.13.7 - Vector Data Load and Store Functions
@@ -11187,543 +11183,538 @@ half16 __ovld __cnfn select(half16 a, half16 b, ushort16 c);
* 64-bit aligned if gentype is long, ulong, double.
*/
-char2 __ovld vload2(size_t offset, const __constant char *p);
-uchar2 __ovld vload2(size_t offset, const __constant uchar *p);
-short2 __ovld vload2(size_t offset, const __constant short *p);
-ushort2 __ovld vload2(size_t offset, const __constant ushort *p);
-int2 __ovld vload2(size_t offset, const __constant int *p);
-uint2 __ovld vload2(size_t offset, const __constant uint *p);
-long2 __ovld vload2(size_t offset, const __constant long *p);
-ulong2 __ovld vload2(size_t offset, const __constant ulong *p);
-float2 __ovld vload2(size_t offset, const __constant float *p);
-char3 __ovld vload3(size_t offset, const __constant char *p);
-uchar3 __ovld vload3(size_t offset, const __constant uchar *p);
-short3 __ovld vload3(size_t offset, const __constant short *p);
-ushort3 __ovld vload3(size_t offset, const __constant ushort *p);
-int3 __ovld vload3(size_t offset, const __constant int *p);
-uint3 __ovld vload3(size_t offset, const __constant uint *p);
-long3 __ovld vload3(size_t offset, const __constant long *p);
-ulong3 __ovld vload3(size_t offset, const __constant ulong *p);
-float3 __ovld vload3(size_t offset, const __constant float *p);
-char4 __ovld vload4(size_t offset, const __constant char *p);
-uchar4 __ovld vload4(size_t offset, const __constant uchar *p);
-short4 __ovld vload4(size_t offset, const __constant short *p);
-ushort4 __ovld vload4(size_t offset, const __constant ushort *p);
-int4 __ovld vload4(size_t offset, const __constant int *p);
-uint4 __ovld vload4(size_t offset, const __constant uint *p);
-long4 __ovld vload4(size_t offset, const __constant long *p);
-ulong4 __ovld vload4(size_t offset, const __constant ulong *p);
-float4 __ovld vload4(size_t offset, const __constant float *p);
-char8 __ovld vload8(size_t offset, const __constant char *p);
-uchar8 __ovld vload8(size_t offset, const __constant uchar *p);
-short8 __ovld vload8(size_t offset, const __constant short *p);
-ushort8 __ovld vload8(size_t offset, const __constant ushort *p);
-int8 __ovld vload8(size_t offset, const __constant int *p);
-uint8 __ovld vload8(size_t offset, const __constant uint *p);
-long8 __ovld vload8(size_t offset, const __constant long *p);
-ulong8 __ovld vload8(size_t offset, const __constant ulong *p);
-float8 __ovld vload8(size_t offset, const __constant float *p);
-char16 __ovld vload16(size_t offset, const __constant char *p);
-uchar16 __ovld vload16(size_t offset, const __constant uchar *p);
-short16 __ovld vload16(size_t offset, const __constant short *p);
-ushort16 __ovld vload16(size_t offset, const __constant ushort *p);
-int16 __ovld vload16(size_t offset, const __constant int *p);
-uint16 __ovld vload16(size_t offset, const __constant uint *p);
-long16 __ovld vload16(size_t offset, const __constant long *p);
-ulong16 __ovld vload16(size_t offset, const __constant ulong *p);
-float16 __ovld vload16(size_t offset, const __constant float *p);
+char2 __ovld __purefn vload2(size_t, const __constant char *);
+uchar2 __ovld __purefn vload2(size_t, const __constant uchar *);
+short2 __ovld __purefn vload2(size_t, const __constant short *);
+ushort2 __ovld __purefn vload2(size_t, const __constant ushort *);
+int2 __ovld __purefn vload2(size_t, const __constant int *);
+uint2 __ovld __purefn vload2(size_t, const __constant uint *);
+long2 __ovld __purefn vload2(size_t, const __constant long *);
+ulong2 __ovld __purefn vload2(size_t, const __constant ulong *);
+float2 __ovld __purefn vload2(size_t, const __constant float *);
+char3 __ovld __purefn vload3(size_t, const __constant char *);
+uchar3 __ovld __purefn vload3(size_t, const __constant uchar *);
+short3 __ovld __purefn vload3(size_t, const __constant short *);
+ushort3 __ovld __purefn vload3(size_t, const __constant ushort *);
+int3 __ovld __purefn vload3(size_t, const __constant int *);
+uint3 __ovld __purefn vload3(size_t, const __constant uint *);
+long3 __ovld __purefn vload3(size_t, const __constant long *);
+ulong3 __ovld __purefn vload3(size_t, const __constant ulong *);
+float3 __ovld __purefn vload3(size_t, const __constant float *);
+char4 __ovld __purefn vload4(size_t, const __constant char *);
+uchar4 __ovld __purefn vload4(size_t, const __constant uchar *);
+short4 __ovld __purefn vload4(size_t, const __constant short *);
+ushort4 __ovld __purefn vload4(size_t, const __constant ushort *);
+int4 __ovld __purefn vload4(size_t, const __constant int *);
+uint4 __ovld __purefn vload4(size_t, const __constant uint *);
+long4 __ovld __purefn vload4(size_t, const __constant long *);
+ulong4 __ovld __purefn vload4(size_t, const __constant ulong *);
+float4 __ovld __purefn vload4(size_t, const __constant float *);
+char8 __ovld __purefn vload8(size_t, const __constant char *);
+uchar8 __ovld __purefn vload8(size_t, const __constant uchar *);
+short8 __ovld __purefn vload8(size_t, const __constant short *);
+ushort8 __ovld __purefn vload8(size_t, const __constant ushort *);
+int8 __ovld __purefn vload8(size_t, const __constant int *);
+uint8 __ovld __purefn vload8(size_t, const __constant uint *);
+long8 __ovld __purefn vload8(size_t, const __constant long *);
+ulong8 __ovld __purefn vload8(size_t, const __constant ulong *);
+float8 __ovld __purefn vload8(size_t, const __constant float *);
+char16 __ovld __purefn vload16(size_t, const __constant char *);
+uchar16 __ovld __purefn vload16(size_t, const __constant uchar *);
+short16 __ovld __purefn vload16(size_t, const __constant short *);
+ushort16 __ovld __purefn vload16(size_t, const __constant ushort *);
+int16 __ovld __purefn vload16(size_t, const __constant int *);
+uint16 __ovld __purefn vload16(size_t, const __constant uint *);
+long16 __ovld __purefn vload16(size_t, const __constant long *);
+ulong16 __ovld __purefn vload16(size_t, const __constant ulong *);
+float16 __ovld __purefn vload16(size_t, const __constant float *);
#ifdef cl_khr_fp64
-double2 __ovld vload2(size_t offset, const __constant double *p);
-double3 __ovld vload3(size_t offset, const __constant double *p);
-double4 __ovld vload4(size_t offset, const __constant double *p);
-double8 __ovld vload8(size_t offset, const __constant double *p);
-double16 __ovld vload16(size_t offset, const __constant double *p);
+double2 __ovld __purefn vload2(size_t, const __constant double *);
+double3 __ovld __purefn vload3(size_t, const __constant double *);
+double4 __ovld __purefn vload4(size_t, const __constant double *);
+double8 __ovld __purefn vload8(size_t, const __constant double *);
+double16 __ovld __purefn vload16(size_t, const __constant double *);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld vload(size_t offset, const __constant half *p);
-half2 __ovld vload2(size_t offset, const __constant half *p);
-half3 __ovld vload3(size_t offset, const __constant half *p);
-half4 __ovld vload4(size_t offset, const __constant half *p);
-half8 __ovld vload8(size_t offset, const __constant half *p);
-half16 __ovld vload16(size_t offset, const __constant half *p);
+half2 __ovld __purefn vload2(size_t, const __constant half *);
+half3 __ovld __purefn vload3(size_t, const __constant half *);
+half4 __ovld __purefn vload4(size_t, const __constant half *);
+half8 __ovld __purefn vload8(size_t, const __constant half *);
+half16 __ovld __purefn vload16(size_t, const __constant half *);
#endif //cl_khr_fp16
#if defined(__opencl_c_generic_address_space)
-char2 __ovld vload2(size_t offset, const char *p);
-uchar2 __ovld vload2(size_t offset, const uchar *p);
-short2 __ovld vload2(size_t offset, const short *p);
-ushort2 __ovld vload2(size_t offset, const ushort *p);
-int2 __ovld vload2(size_t offset, const int *p);
-uint2 __ovld vload2(size_t offset, const uint *p);
-long2 __ovld vload2(size_t offset, const long *p);
-ulong2 __ovld vload2(size_t offset, const ulong *p);
-float2 __ovld vload2(size_t offset, const float *p);
-char3 __ovld vload3(size_t offset, const char *p);
-uchar3 __ovld vload3(size_t offset, const uchar *p);
-short3 __ovld vload3(size_t offset, const short *p);
-ushort3 __ovld vload3(size_t offset, const ushort *p);
-int3 __ovld vload3(size_t offset, const int *p);
-uint3 __ovld vload3(size_t offset, const uint *p);
-long3 __ovld vload3(size_t offset, const long *p);
-ulong3 __ovld vload3(size_t offset, const ulong *p);
-float3 __ovld vload3(size_t offset, const float *p);
-char4 __ovld vload4(size_t offset, const char *p);
-uchar4 __ovld vload4(size_t offset, const uchar *p);
-short4 __ovld vload4(size_t offset, const short *p);
-ushort4 __ovld vload4(size_t offset, const ushort *p);
-int4 __ovld vload4(size_t offset, const int *p);
-uint4 __ovld vload4(size_t offset, const uint *p);
-long4 __ovld vload4(size_t offset, const long *p);
-ulong4 __ovld vload4(size_t offset, const ulong *p);
-float4 __ovld vload4(size_t offset, const float *p);
-char8 __ovld vload8(size_t offset, const char *p);
-uchar8 __ovld vload8(size_t offset, const uchar *p);
-short8 __ovld vload8(size_t offset, const short *p);
-ushort8 __ovld vload8(size_t offset, const ushort *p);
-int8 __ovld vload8(size_t offset, const int *p);
-uint8 __ovld vload8(size_t offset, const uint *p);
-long8 __ovld vload8(size_t offset, const long *p);
-ulong8 __ovld vload8(size_t offset, const ulong *p);
-float8 __ovld vload8(size_t offset, const float *p);
-char16 __ovld vload16(size_t offset, const char *p);
-uchar16 __ovld vload16(size_t offset, const uchar *p);
-short16 __ovld vload16(size_t offset, const short *p);
-ushort16 __ovld vload16(size_t offset, const ushort *p);
-int16 __ovld vload16(size_t offset, const int *p);
-uint16 __ovld vload16(size_t offset, const uint *p);
-long16 __ovld vload16(size_t offset, const long *p);
-ulong16 __ovld vload16(size_t offset, const ulong *p);
-float16 __ovld vload16(size_t offset, const float *p);
+char2 __ovld __purefn vload2(size_t, const char *);
+uchar2 __ovld __purefn vload2(size_t, const uchar *);
+short2 __ovld __purefn vload2(size_t, const short *);
+ushort2 __ovld __purefn vload2(size_t, const ushort *);
+int2 __ovld __purefn vload2(size_t, const int *);
+uint2 __ovld __purefn vload2(size_t, const uint *);
+long2 __ovld __purefn vload2(size_t, const long *);
+ulong2 __ovld __purefn vload2(size_t, const ulong *);
+float2 __ovld __purefn vload2(size_t, const float *);
+char3 __ovld __purefn vload3(size_t, const char *);
+uchar3 __ovld __purefn vload3(size_t, const uchar *);
+short3 __ovld __purefn vload3(size_t, const short *);
+ushort3 __ovld __purefn vload3(size_t, const ushort *);
+int3 __ovld __purefn vload3(size_t, const int *);
+uint3 __ovld __purefn vload3(size_t, const uint *);
+long3 __ovld __purefn vload3(size_t, const long *);
+ulong3 __ovld __purefn vload3(size_t, const ulong *);
+float3 __ovld __purefn vload3(size_t, const float *);
+char4 __ovld __purefn vload4(size_t, const char *);
+uchar4 __ovld __purefn vload4(size_t, const uchar *);
+short4 __ovld __purefn vload4(size_t, const short *);
+ushort4 __ovld __purefn vload4(size_t, const ushort *);
+int4 __ovld __purefn vload4(size_t, const int *);
+uint4 __ovld __purefn vload4(size_t, const uint *);
+long4 __ovld __purefn vload4(size_t, const long *);
+ulong4 __ovld __purefn vload4(size_t, const ulong *);
+float4 __ovld __purefn vload4(size_t, const float *);
+char8 __ovld __purefn vload8(size_t, const char *);
+uchar8 __ovld __purefn vload8(size_t, const uchar *);
+short8 __ovld __purefn vload8(size_t, const short *);
+ushort8 __ovld __purefn vload8(size_t, const ushort *);
+int8 __ovld __purefn vload8(size_t, const int *);
+uint8 __ovld __purefn vload8(size_t, const uint *);
+long8 __ovld __purefn vload8(size_t, const long *);
+ulong8 __ovld __purefn vload8(size_t, const ulong *);
+float8 __ovld __purefn vload8(size_t, const float *);
+char16 __ovld __purefn vload16(size_t, const char *);
+uchar16 __ovld __purefn vload16(size_t, const uchar *);
+short16 __ovld __purefn vload16(size_t, const short *);
+ushort16 __ovld __purefn vload16(size_t, const ushort *);
+int16 __ovld __purefn vload16(size_t, const int *);
+uint16 __ovld __purefn vload16(size_t, const uint *);
+long16 __ovld __purefn vload16(size_t, const long *);
+ulong16 __ovld __purefn vload16(size_t, const ulong *);
+float16 __ovld __purefn vload16(size_t, const float *);
#ifdef cl_khr_fp64
-double2 __ovld vload2(size_t offset, const double *p);
-double3 __ovld vload3(size_t offset, const double *p);
-double4 __ovld vload4(size_t offset, const double *p);
-double8 __ovld vload8(size_t offset, const double *p);
-double16 __ovld vload16(size_t offset, const double *p);
+double2 __ovld __purefn vload2(size_t, const double *);
+double3 __ovld __purefn vload3(size_t, const double *);
+double4 __ovld __purefn vload4(size_t, const double *);
+double8 __ovld __purefn vload8(size_t, const double *);
+double16 __ovld __purefn vload16(size_t, const double *);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld vload(size_t offset, const half *p);
-half2 __ovld vload2(size_t offset, const half *p);
-half3 __ovld vload3(size_t offset, const half *p);
-half4 __ovld vload4(size_t offset, const half *p);
-half8 __ovld vload8(size_t offset, const half *p);
-half16 __ovld vload16(size_t offset, const half *p);
+half2 __ovld __purefn vload2(size_t, const half *);
+half3 __ovld __purefn vload3(size_t, const half *);
+half4 __ovld __purefn vload4(size_t, const half *);
+half8 __ovld __purefn vload8(size_t, const half *);
+half16 __ovld __purefn vload16(size_t, const half *);
#endif //cl_khr_fp16
-#else
-char2 __ovld vload2(size_t offset, const __global char *p);
-uchar2 __ovld vload2(size_t offset, const __global uchar *p);
-short2 __ovld vload2(size_t offset, const __global short *p);
-ushort2 __ovld vload2(size_t offset, const __global ushort *p);
-int2 __ovld vload2(size_t offset, const __global int *p);
-uint2 __ovld vload2(size_t offset, const __global uint *p);
-long2 __ovld vload2(size_t offset, const __global long *p);
-ulong2 __ovld vload2(size_t offset, const __global ulong *p);
-float2 __ovld vload2(size_t offset, const __global float *p);
-char3 __ovld vload3(size_t offset, const __global char *p);
-uchar3 __ovld vload3(size_t offset, const __global uchar *p);
-short3 __ovld vload3(size_t offset, const __global short *p);
-ushort3 __ovld vload3(size_t offset, const __global ushort *p);
-int3 __ovld vload3(size_t offset, const __global int *p);
-uint3 __ovld vload3(size_t offset, const __global uint *p);
-long3 __ovld vload3(size_t offset, const __global long *p);
-ulong3 __ovld vload3(size_t offset, const __global ulong *p);
-float3 __ovld vload3(size_t offset, const __global float *p);
-char4 __ovld vload4(size_t offset, const __global char *p);
-uchar4 __ovld vload4(size_t offset, const __global uchar *p);
-short4 __ovld vload4(size_t offset, const __global short *p);
-ushort4 __ovld vload4(size_t offset, const __global ushort *p);
-int4 __ovld vload4(size_t offset, const __global int *p);
-uint4 __ovld vload4(size_t offset, const __global uint *p);
-long4 __ovld vload4(size_t offset, const __global long *p);
-ulong4 __ovld vload4(size_t offset, const __global ulong *p);
-float4 __ovld vload4(size_t offset, const __global float *p);
-char8 __ovld vload8(size_t offset, const __global char *p);
-uchar8 __ovld vload8(size_t offset, const __global uchar *p);
-short8 __ovld vload8(size_t offset, const __global short *p);
-ushort8 __ovld vload8(size_t offset, const __global ushort *p);
-int8 __ovld vload8(size_t offset, const __global int *p);
-uint8 __ovld vload8(size_t offset, const __global uint *p);
-long8 __ovld vload8(size_t offset, const __global long *p);
-ulong8 __ovld vload8(size_t offset, const __global ulong *p);
-float8 __ovld vload8(size_t offset, const __global float *p);
-char16 __ovld vload16(size_t offset, const __global char *p);
-uchar16 __ovld vload16(size_t offset, const __global uchar *p);
-short16 __ovld vload16(size_t offset, const __global short *p);
-ushort16 __ovld vload16(size_t offset, const __global ushort *p);
-int16 __ovld vload16(size_t offset, const __global int *p);
-uint16 __ovld vload16(size_t offset, const __global uint *p);
-long16 __ovld vload16(size_t offset, const __global long *p);
-ulong16 __ovld vload16(size_t offset, const __global ulong *p);
-float16 __ovld vload16(size_t offset, const __global float *p);
-char2 __ovld vload2(size_t offset, const __local char *p);
-uchar2 __ovld vload2(size_t offset, const __local uchar *p);
-short2 __ovld vload2(size_t offset, const __local short *p);
-ushort2 __ovld vload2(size_t offset, const __local ushort *p);
-int2 __ovld vload2(size_t offset, const __local int *p);
-uint2 __ovld vload2(size_t offset, const __local uint *p);
-long2 __ovld vload2(size_t offset, const __local long *p);
-ulong2 __ovld vload2(size_t offset, const __local ulong *p);
-float2 __ovld vload2(size_t offset, const __local float *p);
-char3 __ovld vload3(size_t offset, const __local char *p);
-uchar3 __ovld vload3(size_t offset, const __local uchar *p);
-short3 __ovld vload3(size_t offset, const __local short *p);
-ushort3 __ovld vload3(size_t offset, const __local ushort *p);
-int3 __ovld vload3(size_t offset, const __local int *p);
-uint3 __ovld vload3(size_t offset, const __local uint *p);
-long3 __ovld vload3(size_t offset, const __local long *p);
-ulong3 __ovld vload3(size_t offset, const __local ulong *p);
-float3 __ovld vload3(size_t offset, const __local float *p);
-char4 __ovld vload4(size_t offset, const __local char *p);
-uchar4 __ovld vload4(size_t offset, const __local uchar *p);
-short4 __ovld vload4(size_t offset, const __local short *p);
-ushort4 __ovld vload4(size_t offset, const __local ushort *p);
-int4 __ovld vload4(size_t offset, const __local int *p);
-uint4 __ovld vload4(size_t offset, const __local uint *p);
-long4 __ovld vload4(size_t offset, const __local long *p);
-ulong4 __ovld vload4(size_t offset, const __local ulong *p);
-float4 __ovld vload4(size_t offset, const __local float *p);
-char8 __ovld vload8(size_t offset, const __local char *p);
-uchar8 __ovld vload8(size_t offset, const __local uchar *p);
-short8 __ovld vload8(size_t offset, const __local short *p);
-ushort8 __ovld vload8(size_t offset, const __local ushort *p);
-int8 __ovld vload8(size_t offset, const __local int *p);
-uint8 __ovld vload8(size_t offset, const __local uint *p);
-long8 __ovld vload8(size_t offset, const __local long *p);
-ulong8 __ovld vload8(size_t offset, const __local ulong *p);
-float8 __ovld vload8(size_t offset, const __local float *p);
-char16 __ovld vload16(size_t offset, const __local char *p);
-uchar16 __ovld vload16(size_t offset, const __local uchar *p);
-short16 __ovld vload16(size_t offset, const __local short *p);
-ushort16 __ovld vload16(size_t offset, const __local ushort *p);
-int16 __ovld vload16(size_t offset, const __local int *p);
-uint16 __ovld vload16(size_t offset, const __local uint *p);
-long16 __ovld vload16(size_t offset, const __local long *p);
-ulong16 __ovld vload16(size_t offset, const __local ulong *p);
-float16 __ovld vload16(size_t offset, const __local float *p);
-char2 __ovld vload2(size_t offset, const __private char *p);
-uchar2 __ovld vload2(size_t offset, const __private uchar *p);
-short2 __ovld vload2(size_t offset, const __private short *p);
-ushort2 __ovld vload2(size_t offset, const __private ushort *p);
-int2 __ovld vload2(size_t offset, const __private int *p);
-uint2 __ovld vload2(size_t offset, const __private uint *p);
-long2 __ovld vload2(size_t offset, const __private long *p);
-ulong2 __ovld vload2(size_t offset, const __private ulong *p);
-float2 __ovld vload2(size_t offset, const __private float *p);
-char3 __ovld vload3(size_t offset, const __private char *p);
-uchar3 __ovld vload3(size_t offset, const __private uchar *p);
-short3 __ovld vload3(size_t offset, const __private short *p);
-ushort3 __ovld vload3(size_t offset, const __private ushort *p);
-int3 __ovld vload3(size_t offset, const __private int *p);
-uint3 __ovld vload3(size_t offset, const __private uint *p);
-long3 __ovld vload3(size_t offset, const __private long *p);
-ulong3 __ovld vload3(size_t offset, const __private ulong *p);
-float3 __ovld vload3(size_t offset, const __private float *p);
-char4 __ovld vload4(size_t offset, const __private char *p);
-uchar4 __ovld vload4(size_t offset, const __private uchar *p);
-short4 __ovld vload4(size_t offset, const __private short *p);
-ushort4 __ovld vload4(size_t offset, const __private ushort *p);
-int4 __ovld vload4(size_t offset, const __private int *p);
-uint4 __ovld vload4(size_t offset, const __private uint *p);
-long4 __ovld vload4(size_t offset, const __private long *p);
-ulong4 __ovld vload4(size_t offset, const __private ulong *p);
-float4 __ovld vload4(size_t offset, const __private float *p);
-char8 __ovld vload8(size_t offset, const __private char *p);
-uchar8 __ovld vload8(size_t offset, const __private uchar *p);
-short8 __ovld vload8(size_t offset, const __private short *p);
-ushort8 __ovld vload8(size_t offset, const __private ushort *p);
-int8 __ovld vload8(size_t offset, const __private int *p);
-uint8 __ovld vload8(size_t offset, const __private uint *p);
-long8 __ovld vload8(size_t offset, const __private long *p);
-ulong8 __ovld vload8(size_t offset, const __private ulong *p);
-float8 __ovld vload8(size_t offset, const __private float *p);
-char16 __ovld vload16(size_t offset, const __private char *p);
-uchar16 __ovld vload16(size_t offset, const __private uchar *p);
-short16 __ovld vload16(size_t offset, const __private short *p);
-ushort16 __ovld vload16(size_t offset, const __private ushort *p);
-int16 __ovld vload16(size_t offset, const __private int *p);
-uint16 __ovld vload16(size_t offset, const __private uint *p);
-long16 __ovld vload16(size_t offset, const __private long *p);
-ulong16 __ovld vload16(size_t offset, const __private ulong *p);
-float16 __ovld vload16(size_t offset, const __private float *p);
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
+char2 __ovld __purefn vload2(size_t, const __global char *);
+uchar2 __ovld __purefn vload2(size_t, const __global uchar *);
+short2 __ovld __purefn vload2(size_t, const __global short *);
+ushort2 __ovld __purefn vload2(size_t, const __global ushort *);
+int2 __ovld __purefn vload2(size_t, const __global int *);
+uint2 __ovld __purefn vload2(size_t, const __global uint *);
+long2 __ovld __purefn vload2(size_t, const __global long *);
+ulong2 __ovld __purefn vload2(size_t, const __global ulong *);
+float2 __ovld __purefn vload2(size_t, const __global float *);
+char3 __ovld __purefn vload3(size_t, const __global char *);
+uchar3 __ovld __purefn vload3(size_t, const __global uchar *);
+short3 __ovld __purefn vload3(size_t, const __global short *);
+ushort3 __ovld __purefn vload3(size_t, const __global ushort *);
+int3 __ovld __purefn vload3(size_t, const __global int *);
+uint3 __ovld __purefn vload3(size_t, const __global uint *);
+long3 __ovld __purefn vload3(size_t, const __global long *);
+ulong3 __ovld __purefn vload3(size_t, const __global ulong *);
+float3 __ovld __purefn vload3(size_t, const __global float *);
+char4 __ovld __purefn vload4(size_t, const __global char *);
+uchar4 __ovld __purefn vload4(size_t, const __global uchar *);
+short4 __ovld __purefn vload4(size_t, const __global short *);
+ushort4 __ovld __purefn vload4(size_t, const __global ushort *);
+int4 __ovld __purefn vload4(size_t, const __global int *);
+uint4 __ovld __purefn vload4(size_t, const __global uint *);
+long4 __ovld __purefn vload4(size_t, const __global long *);
+ulong4 __ovld __purefn vload4(size_t, const __global ulong *);
+float4 __ovld __purefn vload4(size_t, const __global float *);
+char8 __ovld __purefn vload8(size_t, const __global char *);
+uchar8 __ovld __purefn vload8(size_t, const __global uchar *);
+short8 __ovld __purefn vload8(size_t, const __global short *);
+ushort8 __ovld __purefn vload8(size_t, const __global ushort *);
+int8 __ovld __purefn vload8(size_t, const __global int *);
+uint8 __ovld __purefn vload8(size_t, const __global uint *);
+long8 __ovld __purefn vload8(size_t, const __global long *);
+ulong8 __ovld __purefn vload8(size_t, const __global ulong *);
+float8 __ovld __purefn vload8(size_t, const __global float *);
+char16 __ovld __purefn vload16(size_t, const __global char *);
+uchar16 __ovld __purefn vload16(size_t, const __global uchar *);
+short16 __ovld __purefn vload16(size_t, const __global short *);
+ushort16 __ovld __purefn vload16(size_t, const __global ushort *);
+int16 __ovld __purefn vload16(size_t, const __global int *);
+uint16 __ovld __purefn vload16(size_t, const __global uint *);
+long16 __ovld __purefn vload16(size_t, const __global long *);
+ulong16 __ovld __purefn vload16(size_t, const __global ulong *);
+float16 __ovld __purefn vload16(size_t, const __global float *);
+char2 __ovld __purefn vload2(size_t, const __local char *);
+uchar2 __ovld __purefn vload2(size_t, const __local uchar *);
+short2 __ovld __purefn vload2(size_t, const __local short *);
+ushort2 __ovld __purefn vload2(size_t, const __local ushort *);
+int2 __ovld __purefn vload2(size_t, const __local int *);
+uint2 __ovld __purefn vload2(size_t, const __local uint *);
+long2 __ovld __purefn vload2(size_t, const __local long *);
+ulong2 __ovld __purefn vload2(size_t, const __local ulong *);
+float2 __ovld __purefn vload2(size_t, const __local float *);
+char3 __ovld __purefn vload3(size_t, const __local char *);
+uchar3 __ovld __purefn vload3(size_t, const __local uchar *);
+short3 __ovld __purefn vload3(size_t, const __local short *);
+ushort3 __ovld __purefn vload3(size_t, const __local ushort *);
+int3 __ovld __purefn vload3(size_t, const __local int *);
+uint3 __ovld __purefn vload3(size_t, const __local uint *);
+long3 __ovld __purefn vload3(size_t, const __local long *);
+ulong3 __ovld __purefn vload3(size_t, const __local ulong *);
+float3 __ovld __purefn vload3(size_t, const __local float *);
+char4 __ovld __purefn vload4(size_t, const __local char *);
+uchar4 __ovld __purefn vload4(size_t, const __local uchar *);
+short4 __ovld __purefn vload4(size_t, const __local short *);
+ushort4 __ovld __purefn vload4(size_t, const __local ushort *);
+int4 __ovld __purefn vload4(size_t, const __local int *);
+uint4 __ovld __purefn vload4(size_t, const __local uint *);
+long4 __ovld __purefn vload4(size_t, const __local long *);
+ulong4 __ovld __purefn vload4(size_t, const __local ulong *);
+float4 __ovld __purefn vload4(size_t, const __local float *);
+char8 __ovld __purefn vload8(size_t, const __local char *);
+uchar8 __ovld __purefn vload8(size_t, const __local uchar *);
+short8 __ovld __purefn vload8(size_t, const __local short *);
+ushort8 __ovld __purefn vload8(size_t, const __local ushort *);
+int8 __ovld __purefn vload8(size_t, const __local int *);
+uint8 __ovld __purefn vload8(size_t, const __local uint *);
+long8 __ovld __purefn vload8(size_t, const __local long *);
+ulong8 __ovld __purefn vload8(size_t, const __local ulong *);
+float8 __ovld __purefn vload8(size_t, const __local float *);
+char16 __ovld __purefn vload16(size_t, const __local char *);
+uchar16 __ovld __purefn vload16(size_t, const __local uchar *);
+short16 __ovld __purefn vload16(size_t, const __local short *);
+ushort16 __ovld __purefn vload16(size_t, const __local ushort *);
+int16 __ovld __purefn vload16(size_t, const __local int *);
+uint16 __ovld __purefn vload16(size_t, const __local uint *);
+long16 __ovld __purefn vload16(size_t, const __local long *);
+ulong16 __ovld __purefn vload16(size_t, const __local ulong *);
+float16 __ovld __purefn vload16(size_t, const __local float *);
+char2 __ovld __purefn vload2(size_t, const __private char *);
+uchar2 __ovld __purefn vload2(size_t, const __private uchar *);
+short2 __ovld __purefn vload2(size_t, const __private short *);
+ushort2 __ovld __purefn vload2(size_t, const __private ushort *);
+int2 __ovld __purefn vload2(size_t, const __private int *);
+uint2 __ovld __purefn vload2(size_t, const __private uint *);
+long2 __ovld __purefn vload2(size_t, const __private long *);
+ulong2 __ovld __purefn vload2(size_t, const __private ulong *);
+float2 __ovld __purefn vload2(size_t, const __private float *);
+char3 __ovld __purefn vload3(size_t, const __private char *);
+uchar3 __ovld __purefn vload3(size_t, const __private uchar *);
+short3 __ovld __purefn vload3(size_t, const __private short *);
+ushort3 __ovld __purefn vload3(size_t, const __private ushort *);
+int3 __ovld __purefn vload3(size_t, const __private int *);
+uint3 __ovld __purefn vload3(size_t, const __private uint *);
+long3 __ovld __purefn vload3(size_t, const __private long *);
+ulong3 __ovld __purefn vload3(size_t, const __private ulong *);
+float3 __ovld __purefn vload3(size_t, const __private float *);
+char4 __ovld __purefn vload4(size_t, const __private char *);
+uchar4 __ovld __purefn vload4(size_t, const __private uchar *);
+short4 __ovld __purefn vload4(size_t, const __private short *);
+ushort4 __ovld __purefn vload4(size_t, const __private ushort *);
+int4 __ovld __purefn vload4(size_t, const __private int *);
+uint4 __ovld __purefn vload4(size_t, const __private uint *);
+long4 __ovld __purefn vload4(size_t, const __private long *);
+ulong4 __ovld __purefn vload4(size_t, const __private ulong *);
+float4 __ovld __purefn vload4(size_t, const __private float *);
+char8 __ovld __purefn vload8(size_t, const __private char *);
+uchar8 __ovld __purefn vload8(size_t, const __private uchar *);
+short8 __ovld __purefn vload8(size_t, const __private short *);
+ushort8 __ovld __purefn vload8(size_t, const __private ushort *);
+int8 __ovld __purefn vload8(size_t, const __private int *);
+uint8 __ovld __purefn vload8(size_t, const __private uint *);
+long8 __ovld __purefn vload8(size_t, const __private long *);
+ulong8 __ovld __purefn vload8(size_t, const __private ulong *);
+float8 __ovld __purefn vload8(size_t, const __private float *);
+char16 __ovld __purefn vload16(size_t, const __private char *);
+uchar16 __ovld __purefn vload16(size_t, const __private uchar *);
+short16 __ovld __purefn vload16(size_t, const __private short *);
+ushort16 __ovld __purefn vload16(size_t, const __private ushort *);
+int16 __ovld __purefn vload16(size_t, const __private int *);
+uint16 __ovld __purefn vload16(size_t, const __private uint *);
+long16 __ovld __purefn vload16(size_t, const __private long *);
+ulong16 __ovld __purefn vload16(size_t, const __private ulong *);
+float16 __ovld __purefn vload16(size_t, const __private float *);
#ifdef cl_khr_fp64
-double2 __ovld vload2(size_t offset, const __global double *p);
-double3 __ovld vload3(size_t offset, const __global double *p);
-double4 __ovld vload4(size_t offset, const __global double *p);
-double8 __ovld vload8(size_t offset, const __global double *p);
-double16 __ovld vload16(size_t offset, const __global double *p);
-double2 __ovld vload2(size_t offset, const __local double *p);
-double3 __ovld vload3(size_t offset, const __local double *p);
-double4 __ovld vload4(size_t offset, const __local double *p);
-double8 __ovld vload8(size_t offset, const __local double *p);
-double16 __ovld vload16(size_t offset, const __local double *p);
-double2 __ovld vload2(size_t offset, const __private double *p);
-double3 __ovld vload3(size_t offset, const __private double *p);
-double4 __ovld vload4(size_t offset, const __private double *p);
-double8 __ovld vload8(size_t offset, const __private double *p);
-double16 __ovld vload16(size_t offset, const __private double *p);
+double2 __ovld __purefn vload2(size_t, const __global double *);
+double3 __ovld __purefn vload3(size_t, const __global double *);
+double4 __ovld __purefn vload4(size_t, const __global double *);
+double8 __ovld __purefn vload8(size_t, const __global double *);
+double16 __ovld __purefn vload16(size_t, const __global double *);
+double2 __ovld __purefn vload2(size_t, const __local double *);
+double3 __ovld __purefn vload3(size_t, const __local double *);
+double4 __ovld __purefn vload4(size_t, const __local double *);
+double8 __ovld __purefn vload8(size_t, const __local double *);
+double16 __ovld __purefn vload16(size_t, const __local double *);
+double2 __ovld __purefn vload2(size_t, const __private double *);
+double3 __ovld __purefn vload3(size_t, const __private double *);
+double4 __ovld __purefn vload4(size_t, const __private double *);
+double8 __ovld __purefn vload8(size_t, const __private double *);
+double16 __ovld __purefn vload16(size_t, const __private double *);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld vload(size_t offset, const __global half *p);
-half2 __ovld vload2(size_t offset, const __global half *p);
-half3 __ovld vload3(size_t offset, const __global half *p);
-half4 __ovld vload4(size_t offset, const __global half *p);
-half8 __ovld vload8(size_t offset, const __global half *p);
-half16 __ovld vload16(size_t offset, const __global half *p);
-half __ovld vload(size_t offset, const __local half *p);
-half2 __ovld vload2(size_t offset, const __local half *p);
-half3 __ovld vload3(size_t offset, const __local half *p);
-half4 __ovld vload4(size_t offset, const __local half *p);
-half8 __ovld vload8(size_t offset, const __local half *p);
-half16 __ovld vload16(size_t offset, const __local half *p);
-half __ovld vload(size_t offset, const __private half *p);
-half2 __ovld vload2(size_t offset, const __private half *p);
-half3 __ovld vload3(size_t offset, const __private half *p);
-half4 __ovld vload4(size_t offset, const __private half *p);
-half8 __ovld vload8(size_t offset, const __private half *p);
-half16 __ovld vload16(size_t offset, const __private half *p);
+half2 __ovld __purefn vload2(size_t, const __global half *);
+half3 __ovld __purefn vload3(size_t, const __global half *);
+half4 __ovld __purefn vload4(size_t, const __global half *);
+half8 __ovld __purefn vload8(size_t, const __global half *);
+half16 __ovld __purefn vload16(size_t, const __global half *);
+half2 __ovld __purefn vload2(size_t, const __local half *);
+half3 __ovld __purefn vload3(size_t, const __local half *);
+half4 __ovld __purefn vload4(size_t, const __local half *);
+half8 __ovld __purefn vload8(size_t, const __local half *);
+half16 __ovld __purefn vload16(size_t, const __local half *);
+half2 __ovld __purefn vload2(size_t, const __private half *);
+half3 __ovld __purefn vload3(size_t, const __private half *);
+half4 __ovld __purefn vload4(size_t, const __private half *);
+half8 __ovld __purefn vload8(size_t, const __private half *);
+half16 __ovld __purefn vload16(size_t, const __private half *);
#endif //cl_khr_fp16
-#endif //defined(__opencl_c_generic_address_space)
+#endif //defined(__opencl_c_named_address_space_builtins)
#if defined(__opencl_c_generic_address_space)
-void __ovld vstore2(char2 data, size_t offset, char *p);
-void __ovld vstore2(uchar2 data, size_t offset, uchar *p);
-void __ovld vstore2(short2 data, size_t offset, short *p);
-void __ovld vstore2(ushort2 data, size_t offset, ushort *p);
-void __ovld vstore2(int2 data, size_t offset, int *p);
-void __ovld vstore2(uint2 data, size_t offset, uint *p);
-void __ovld vstore2(long2 data, size_t offset, long *p);
-void __ovld vstore2(ulong2 data, size_t offset, ulong *p);
-void __ovld vstore2(float2 data, size_t offset, float *p);
-void __ovld vstore3(char3 data, size_t offset, char *p);
-void __ovld vstore3(uchar3 data, size_t offset, uchar *p);
-void __ovld vstore3(short3 data, size_t offset, short *p);
-void __ovld vstore3(ushort3 data, size_t offset, ushort *p);
-void __ovld vstore3(int3 data, size_t offset, int *p);
-void __ovld vstore3(uint3 data, size_t offset, uint *p);
-void __ovld vstore3(long3 data, size_t offset, long *p);
-void __ovld vstore3(ulong3 data, size_t offset, ulong *p);
-void __ovld vstore3(float3 data, size_t offset, float *p);
-void __ovld vstore4(char4 data, size_t offset, char *p);
-void __ovld vstore4(uchar4 data, size_t offset, uchar *p);
-void __ovld vstore4(short4 data, size_t offset, short *p);
-void __ovld vstore4(ushort4 data, size_t offset, ushort *p);
-void __ovld vstore4(int4 data, size_t offset, int *p);
-void __ovld vstore4(uint4 data, size_t offset, uint *p);
-void __ovld vstore4(long4 data, size_t offset, long *p);
-void __ovld vstore4(ulong4 data, size_t offset, ulong *p);
-void __ovld vstore4(float4 data, size_t offset, float *p);
-void __ovld vstore8(char8 data, size_t offset, char *p);
-void __ovld vstore8(uchar8 data, size_t offset, uchar *p);
-void __ovld vstore8(short8 data, size_t offset, short *p);
-void __ovld vstore8(ushort8 data, size_t offset, ushort *p);
-void __ovld vstore8(int8 data, size_t offset, int *p);
-void __ovld vstore8(uint8 data, size_t offset, uint *p);
-void __ovld vstore8(long8 data, size_t offset, long *p);
-void __ovld vstore8(ulong8 data, size_t offset, ulong *p);
-void __ovld vstore8(float8 data, size_t offset, float *p);
-void __ovld vstore16(char16 data, size_t offset, char *p);
-void __ovld vstore16(uchar16 data, size_t offset, uchar *p);
-void __ovld vstore16(short16 data, size_t offset, short *p);
-void __ovld vstore16(ushort16 data, size_t offset, ushort *p);
-void __ovld vstore16(int16 data, size_t offset, int *p);
-void __ovld vstore16(uint16 data, size_t offset, uint *p);
-void __ovld vstore16(long16 data, size_t offset, long *p);
-void __ovld vstore16(ulong16 data, size_t offset, ulong *p);
-void __ovld vstore16(float16 data, size_t offset, float *p);
+void __ovld vstore2(char2, size_t, char *);
+void __ovld vstore2(uchar2, size_t, uchar *);
+void __ovld vstore2(short2, size_t, short *);
+void __ovld vstore2(ushort2, size_t, ushort *);
+void __ovld vstore2(int2, size_t, int *);
+void __ovld vstore2(uint2, size_t, uint *);
+void __ovld vstore2(long2, size_t, long *);
+void __ovld vstore2(ulong2, size_t, ulong *);
+void __ovld vstore2(float2, size_t, float *);
+void __ovld vstore3(char3, size_t, char *);
+void __ovld vstore3(uchar3, size_t, uchar *);
+void __ovld vstore3(short3, size_t, short *);
+void __ovld vstore3(ushort3, size_t, ushort *);
+void __ovld vstore3(int3, size_t, int *);
+void __ovld vstore3(uint3, size_t, uint *);
+void __ovld vstore3(long3, size_t, long *);
+void __ovld vstore3(ulong3, size_t, ulong *);
+void __ovld vstore3(float3, size_t, float *);
+void __ovld vstore4(char4, size_t, char *);
+void __ovld vstore4(uchar4, size_t, uchar *);
+void __ovld vstore4(short4, size_t, short *);
+void __ovld vstore4(ushort4, size_t, ushort *);
+void __ovld vstore4(int4, size_t, int *);
+void __ovld vstore4(uint4, size_t, uint *);
+void __ovld vstore4(long4, size_t, long *);
+void __ovld vstore4(ulong4, size_t, ulong *);
+void __ovld vstore4(float4, size_t, float *);
+void __ovld vstore8(char8, size_t, char *);
+void __ovld vstore8(uchar8, size_t, uchar *);
+void __ovld vstore8(short8, size_t, short *);
+void __ovld vstore8(ushort8, size_t, ushort *);
+void __ovld vstore8(int8, size_t, int *);
+void __ovld vstore8(uint8, size_t, uint *);
+void __ovld vstore8(long8, size_t, long *);
+void __ovld vstore8(ulong8, size_t, ulong *);
+void __ovld vstore8(float8, size_t, float *);
+void __ovld vstore16(char16, size_t, char *);
+void __ovld vstore16(uchar16, size_t, uchar *);
+void __ovld vstore16(short16, size_t, short *);
+void __ovld vstore16(ushort16, size_t, ushort *);
+void __ovld vstore16(int16, size_t, int *);
+void __ovld vstore16(uint16, size_t, uint *);
+void __ovld vstore16(long16, size_t, long *);
+void __ovld vstore16(ulong16, size_t, ulong *);
+void __ovld vstore16(float16, size_t, float *);
#ifdef cl_khr_fp64
-void __ovld vstore2(double2 data, size_t offset, double *p);
-void __ovld vstore3(double3 data, size_t offset, double *p);
-void __ovld vstore4(double4 data, size_t offset, double *p);
-void __ovld vstore8(double8 data, size_t offset, double *p);
-void __ovld vstore16(double16 data, size_t offset, double *p);
+void __ovld vstore2(double2, size_t, double *);
+void __ovld vstore3(double3, size_t, double *);
+void __ovld vstore4(double4, size_t, double *);
+void __ovld vstore8(double8, size_t, double *);
+void __ovld vstore16(double16, size_t, double *);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-void __ovld vstore(half data, size_t offset, half *p);
-void __ovld vstore2(half2 data, size_t offset, half *p);
-void __ovld vstore3(half3 data, size_t offset, half *p);
-void __ovld vstore4(half4 data, size_t offset, half *p);
-void __ovld vstore8(half8 data, size_t offset, half *p);
-void __ovld vstore16(half16 data, size_t offset, half *p);
+void __ovld vstore2(half2, size_t, half *);
+void __ovld vstore3(half3, size_t, half *);
+void __ovld vstore4(half4, size_t, half *);
+void __ovld vstore8(half8, size_t, half *);
+void __ovld vstore16(half16, size_t, half *);
#endif //cl_khr_fp16
-#else
-void __ovld vstore2(char2 data, size_t offset, __global char *p);
-void __ovld vstore2(uchar2 data, size_t offset, __global uchar *p);
-void __ovld vstore2(short2 data, size_t offset, __global short *p);
-void __ovld vstore2(ushort2 data, size_t offset, __global ushort *p);
-void __ovld vstore2(int2 data, size_t offset, __global int *p);
-void __ovld vstore2(uint2 data, size_t offset, __global uint *p);
-void __ovld vstore2(long2 data, size_t offset, __global long *p);
-void __ovld vstore2(ulong2 data, size_t offset, __global ulong *p);
-void __ovld vstore2(float2 data, size_t offset, __global float *p);
-void __ovld vstore3(char3 data, size_t offset, __global char *p);
-void __ovld vstore3(uchar3 data, size_t offset, __global uchar *p);
-void __ovld vstore3(short3 data, size_t offset, __global short *p);
-void __ovld vstore3(ushort3 data, size_t offset, __global ushort *p);
-void __ovld vstore3(int3 data, size_t offset, __global int *p);
-void __ovld vstore3(uint3 data, size_t offset, __global uint *p);
-void __ovld vstore3(long3 data, size_t offset, __global long *p);
-void __ovld vstore3(ulong3 data, size_t offset, __global ulong *p);
-void __ovld vstore3(float3 data, size_t offset, __global float *p);
-void __ovld vstore4(char4 data, size_t offset, __global char *p);
-void __ovld vstore4(uchar4 data, size_t offset, __global uchar *p);
-void __ovld vstore4(short4 data, size_t offset, __global short *p);
-void __ovld vstore4(ushort4 data, size_t offset, __global ushort *p);
-void __ovld vstore4(int4 data, size_t offset, __global int *p);
-void __ovld vstore4(uint4 data, size_t offset, __global uint *p);
-void __ovld vstore4(long4 data, size_t offset, __global long *p);
-void __ovld vstore4(ulong4 data, size_t offset, __global ulong *p);
-void __ovld vstore4(float4 data, size_t offset, __global float *p);
-void __ovld vstore8(char8 data, size_t offset, __global char *p);
-void __ovld vstore8(uchar8 data, size_t offset, __global uchar *p);
-void __ovld vstore8(short8 data, size_t offset, __global short *p);
-void __ovld vstore8(ushort8 data, size_t offset, __global ushort *p);
-void __ovld vstore8(int8 data, size_t offset, __global int *p);
-void __ovld vstore8(uint8 data, size_t offset, __global uint *p);
-void __ovld vstore8(long8 data, size_t offset, __global long *p);
-void __ovld vstore8(ulong8 data, size_t offset, __global ulong *p);
-void __ovld vstore8(float8 data, size_t offset, __global float *p);
-void __ovld vstore16(char16 data, size_t offset, __global char *p);
-void __ovld vstore16(uchar16 data, size_t offset, __global uchar *p);
-void __ovld vstore16(short16 data, size_t offset, __global short *p);
-void __ovld vstore16(ushort16 data, size_t offset, __global ushort *p);
-void __ovld vstore16(int16 data, size_t offset, __global int *p);
-void __ovld vstore16(uint16 data, size_t offset, __global uint *p);
-void __ovld vstore16(long16 data, size_t offset, __global long *p);
-void __ovld vstore16(ulong16 data, size_t offset, __global ulong *p);
-void __ovld vstore16(float16 data, size_t offset, __global float *p);
-void __ovld vstore2(char2 data, size_t offset, __local char *p);
-void __ovld vstore2(uchar2 data, size_t offset, __local uchar *p);
-void __ovld vstore2(short2 data, size_t offset, __local short *p);
-void __ovld vstore2(ushort2 data, size_t offset, __local ushort *p);
-void __ovld vstore2(int2 data, size_t offset, __local int *p);
-void __ovld vstore2(uint2 data, size_t offset, __local uint *p);
-void __ovld vstore2(long2 data, size_t offset, __local long *p);
-void __ovld vstore2(ulong2 data, size_t offset, __local ulong *p);
-void __ovld vstore2(float2 data, size_t offset, __local float *p);
-void __ovld vstore3(char3 data, size_t offset, __local char *p);
-void __ovld vstore3(uchar3 data, size_t offset, __local uchar *p);
-void __ovld vstore3(short3 data, size_t offset, __local short *p);
-void __ovld vstore3(ushort3 data, size_t offset, __local ushort *p);
-void __ovld vstore3(int3 data, size_t offset, __local int *p);
-void __ovld vstore3(uint3 data, size_t offset, __local uint *p);
-void __ovld vstore3(long3 data, size_t offset, __local long *p);
-void __ovld vstore3(ulong3 data, size_t offset, __local ulong *p);
-void __ovld vstore3(float3 data, size_t offset, __local float *p);
-void __ovld vstore4(char4 data, size_t offset, __local char *p);
-void __ovld vstore4(uchar4 data, size_t offset, __local uchar *p);
-void __ovld vstore4(short4 data, size_t offset, __local short *p);
-void __ovld vstore4(ushort4 data, size_t offset, __local ushort *p);
-void __ovld vstore4(int4 data, size_t offset, __local int *p);
-void __ovld vstore4(uint4 data, size_t offset, __local uint *p);
-void __ovld vstore4(long4 data, size_t offset, __local long *p);
-void __ovld vstore4(ulong4 data, size_t offset, __local ulong *p);
-void __ovld vstore4(float4 data, size_t offset, __local float *p);
-void __ovld vstore8(char8 data, size_t offset, __local char *p);
-void __ovld vstore8(uchar8 data, size_t offset, __local uchar *p);
-void __ovld vstore8(short8 data, size_t offset, __local short *p);
-void __ovld vstore8(ushort8 data, size_t offset, __local ushort *p);
-void __ovld vstore8(int8 data, size_t offset, __local int *p);
-void __ovld vstore8(uint8 data, size_t offset, __local uint *p);
-void __ovld vstore8(long8 data, size_t offset, __local long *p);
-void __ovld vstore8(ulong8 data, size_t offset, __local ulong *p);
-void __ovld vstore8(float8 data, size_t offset, __local float *p);
-void __ovld vstore16(char16 data, size_t offset, __local char *p);
-void __ovld vstore16(uchar16 data, size_t offset, __local uchar *p);
-void __ovld vstore16(short16 data, size_t offset, __local short *p);
-void __ovld vstore16(ushort16 data, size_t offset, __local ushort *p);
-void __ovld vstore16(int16 data, size_t offset, __local int *p);
-void __ovld vstore16(uint16 data, size_t offset, __local uint *p);
-void __ovld vstore16(long16 data, size_t offset, __local long *p);
-void __ovld vstore16(ulong16 data, size_t offset, __local ulong *p);
-void __ovld vstore16(float16 data, size_t offset, __local float *p);
-void __ovld vstore2(char2 data, size_t offset, __private char *p);
-void __ovld vstore2(uchar2 data, size_t offset, __private uchar *p);
-void __ovld vstore2(short2 data, size_t offset, __private short *p);
-void __ovld vstore2(ushort2 data, size_t offset, __private ushort *p);
-void __ovld vstore2(int2 data, size_t offset, __private int *p);
-void __ovld vstore2(uint2 data, size_t offset, __private uint *p);
-void __ovld vstore2(long2 data, size_t offset, __private long *p);
-void __ovld vstore2(ulong2 data, size_t offset, __private ulong *p);
-void __ovld vstore2(float2 data, size_t offset, __private float *p);
-void __ovld vstore3(char3 data, size_t offset, __private char *p);
-void __ovld vstore3(uchar3 data, size_t offset, __private uchar *p);
-void __ovld vstore3(short3 data, size_t offset, __private short *p);
-void __ovld vstore3(ushort3 data, size_t offset, __private ushort *p);
-void __ovld vstore3(int3 data, size_t offset, __private int *p);
-void __ovld vstore3(uint3 data, size_t offset, __private uint *p);
-void __ovld vstore3(long3 data, size_t offset, __private long *p);
-void __ovld vstore3(ulong3 data, size_t offset, __private ulong *p);
-void __ovld vstore3(float3 data, size_t offset, __private float *p);
-void __ovld vstore4(char4 data, size_t offset, __private char *p);
-void __ovld vstore4(uchar4 data, size_t offset, __private uchar *p);
-void __ovld vstore4(short4 data, size_t offset, __private short *p);
-void __ovld vstore4(ushort4 data, size_t offset, __private ushort *p);
-void __ovld vstore4(int4 data, size_t offset, __private int *p);
-void __ovld vstore4(uint4 data, size_t offset, __private uint *p);
-void __ovld vstore4(long4 data, size_t offset, __private long *p);
-void __ovld vstore4(ulong4 data, size_t offset, __private ulong *p);
-void __ovld vstore4(float4 data, size_t offset, __private float *p);
-void __ovld vstore8(char8 data, size_t offset, __private char *p);
-void __ovld vstore8(uchar8 data, size_t offset, __private uchar *p);
-void __ovld vstore8(short8 data, size_t offset, __private short *p);
-void __ovld vstore8(ushort8 data, size_t offset, __private ushort *p);
-void __ovld vstore8(int8 data, size_t offset, __private int *p);
-void __ovld vstore8(uint8 data, size_t offset, __private uint *p);
-void __ovld vstore8(long8 data, size_t offset, __private long *p);
-void __ovld vstore8(ulong8 data, size_t offset, __private ulong *p);
-void __ovld vstore8(float8 data, size_t offset, __private float *p);
-void __ovld vstore16(char16 data, size_t offset, __private char *p);
-void __ovld vstore16(uchar16 data, size_t offset, __private uchar *p);
-void __ovld vstore16(short16 data, size_t offset, __private short *p);
-void __ovld vstore16(ushort16 data, size_t offset, __private ushort *p);
-void __ovld vstore16(int16 data, size_t offset, __private int *p);
-void __ovld vstore16(uint16 data, size_t offset, __private uint *p);
-void __ovld vstore16(long16 data, size_t offset, __private long *p);
-void __ovld vstore16(ulong16 data, size_t offset, __private ulong *p);
-void __ovld vstore16(float16 data, size_t offset, __private float *p);
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
+void __ovld vstore2(char2, size_t, __global char *);
+void __ovld vstore2(uchar2, size_t, __global uchar *);
+void __ovld vstore2(short2, size_t, __global short *);
+void __ovld vstore2(ushort2, size_t, __global ushort *);
+void __ovld vstore2(int2, size_t, __global int *);
+void __ovld vstore2(uint2, size_t, __global uint *);
+void __ovld vstore2(long2, size_t, __global long *);
+void __ovld vstore2(ulong2, size_t, __global ulong *);
+void __ovld vstore2(float2, size_t, __global float *);
+void __ovld vstore3(char3, size_t, __global char *);
+void __ovld vstore3(uchar3, size_t, __global uchar *);
+void __ovld vstore3(short3, size_t, __global short *);
+void __ovld vstore3(ushort3, size_t, __global ushort *);
+void __ovld vstore3(int3, size_t, __global int *);
+void __ovld vstore3(uint3, size_t, __global uint *);
+void __ovld vstore3(long3, size_t, __global long *);
+void __ovld vstore3(ulong3, size_t, __global ulong *);
+void __ovld vstore3(float3, size_t, __global float *);
+void __ovld vstore4(char4, size_t, __global char *);
+void __ovld vstore4(uchar4, size_t, __global uchar *);
+void __ovld vstore4(short4, size_t, __global short *);
+void __ovld vstore4(ushort4, size_t, __global ushort *);
+void __ovld vstore4(int4, size_t, __global int *);
+void __ovld vstore4(uint4, size_t, __global uint *);
+void __ovld vstore4(long4, size_t, __global long *);
+void __ovld vstore4(ulong4, size_t, __global ulong *);
+void __ovld vstore4(float4, size_t, __global float *);
+void __ovld vstore8(char8, size_t, __global char *);
+void __ovld vstore8(uchar8, size_t, __global uchar *);
+void __ovld vstore8(short8, size_t, __global short *);
+void __ovld vstore8(ushort8, size_t, __global ushort *);
+void __ovld vstore8(int8, size_t, __global int *);
+void __ovld vstore8(uint8, size_t, __global uint *);
+void __ovld vstore8(long8, size_t, __global long *);
+void __ovld vstore8(ulong8, size_t, __global ulong *);
+void __ovld vstore8(float8, size_t, __global float *);
+void __ovld vstore16(char16, size_t, __global char *);
+void __ovld vstore16(uchar16, size_t, __global uchar *);
+void __ovld vstore16(short16, size_t, __global short *);
+void __ovld vstore16(ushort16, size_t, __global ushort *);
+void __ovld vstore16(int16, size_t, __global int *);
+void __ovld vstore16(uint16, size_t, __global uint *);
+void __ovld vstore16(long16, size_t, __global long *);
+void __ovld vstore16(ulong16, size_t, __global ulong *);
+void __ovld vstore16(float16, size_t, __global float *);
+void __ovld vstore2(char2, size_t, __local char *);
+void __ovld vstore2(uchar2, size_t, __local uchar *);
+void __ovld vstore2(short2, size_t, __local short *);
+void __ovld vstore2(ushort2, size_t, __local ushort *);
+void __ovld vstore2(int2, size_t, __local int *);
+void __ovld vstore2(uint2, size_t, __local uint *);
+void __ovld vstore2(long2, size_t, __local long *);
+void __ovld vstore2(ulong2, size_t, __local ulong *);
+void __ovld vstore2(float2, size_t, __local float *);
+void __ovld vstore3(char3, size_t, __local char *);
+void __ovld vstore3(uchar3, size_t, __local uchar *);
+void __ovld vstore3(short3, size_t, __local short *);
+void __ovld vstore3(ushort3, size_t, __local ushort *);
+void __ovld vstore3(int3, size_t, __local int *);
+void __ovld vstore3(uint3, size_t, __local uint *);
+void __ovld vstore3(long3, size_t, __local long *);
+void __ovld vstore3(ulong3, size_t, __local ulong *);
+void __ovld vstore3(float3, size_t, __local float *);
+void __ovld vstore4(char4, size_t, __local char *);
+void __ovld vstore4(uchar4, size_t, __local uchar *);
+void __ovld vstore4(short4, size_t, __local short *);
+void __ovld vstore4(ushort4, size_t, __local ushort *);
+void __ovld vstore4(int4, size_t, __local int *);
+void __ovld vstore4(uint4, size_t, __local uint *);
+void __ovld vstore4(long4, size_t, __local long *);
+void __ovld vstore4(ulong4, size_t, __local ulong *);
+void __ovld vstore4(float4, size_t, __local float *);
+void __ovld vstore8(char8, size_t, __local char *);
+void __ovld vstore8(uchar8, size_t, __local uchar *);
+void __ovld vstore8(short8, size_t, __local short *);
+void __ovld vstore8(ushort8, size_t, __local ushort *);
+void __ovld vstore8(int8, size_t, __local int *);
+void __ovld vstore8(uint8, size_t, __local uint *);
+void __ovld vstore8(long8, size_t, __local long *);
+void __ovld vstore8(ulong8, size_t, __local ulong *);
+void __ovld vstore8(float8, size_t, __local float *);
+void __ovld vstore16(char16, size_t, __local char *);
+void __ovld vstore16(uchar16, size_t, __local uchar *);
+void __ovld vstore16(short16, size_t, __local short *);
+void __ovld vstore16(ushort16, size_t, __local ushort *);
+void __ovld vstore16(int16, size_t, __local int *);
+void __ovld vstore16(uint16, size_t, __local uint *);
+void __ovld vstore16(long16, size_t, __local long *);
+void __ovld vstore16(ulong16, size_t, __local ulong *);
+void __ovld vstore16(float16, size_t, __local float *);
+void __ovld vstore2(char2, size_t, __private char *);
+void __ovld vstore2(uchar2, size_t, __private uchar *);
+void __ovld vstore2(short2, size_t, __private short *);
+void __ovld vstore2(ushort2, size_t, __private ushort *);
+void __ovld vstore2(int2, size_t, __private int *);
+void __ovld vstore2(uint2, size_t, __private uint *);
+void __ovld vstore2(long2, size_t, __private long *);
+void __ovld vstore2(ulong2, size_t, __private ulong *);
+void __ovld vstore2(float2, size_t, __private float *);
+void __ovld vstore3(char3, size_t, __private char *);
+void __ovld vstore3(uchar3, size_t, __private uchar *);
+void __ovld vstore3(short3, size_t, __private short *);
+void __ovld vstore3(ushort3, size_t, __private ushort *);
+void __ovld vstore3(int3, size_t, __private int *);
+void __ovld vstore3(uint3, size_t, __private uint *);
+void __ovld vstore3(long3, size_t, __private long *);
+void __ovld vstore3(ulong3, size_t, __private ulong *);
+void __ovld vstore3(float3, size_t, __private float *);
+void __ovld vstore4(char4, size_t, __private char *);
+void __ovld vstore4(uchar4, size_t, __private uchar *);
+void __ovld vstore4(short4, size_t, __private short *);
+void __ovld vstore4(ushort4, size_t, __private ushort *);
+void __ovld vstore4(int4, size_t, __private int *);
+void __ovld vstore4(uint4, size_t, __private uint *);
+void __ovld vstore4(long4, size_t, __private long *);
+void __ovld vstore4(ulong4, size_t, __private ulong *);
+void __ovld vstore4(float4, size_t, __private float *);
+void __ovld vstore8(char8, size_t, __private char *);
+void __ovld vstore8(uchar8, size_t, __private uchar *);
+void __ovld vstore8(short8, size_t, __private short *);
+void __ovld vstore8(ushort8, size_t, __private ushort *);
+void __ovld vstore8(int8, size_t, __private int *);
+void __ovld vstore8(uint8, size_t, __private uint *);
+void __ovld vstore8(long8, size_t, __private long *);
+void __ovld vstore8(ulong8, size_t, __private ulong *);
+void __ovld vstore8(float8, size_t, __private float *);
+void __ovld vstore16(char16, size_t, __private char *);
+void __ovld vstore16(uchar16, size_t, __private uchar *);
+void __ovld vstore16(short16, size_t, __private short *);
+void __ovld vstore16(ushort16, size_t, __private ushort *);
+void __ovld vstore16(int16, size_t, __private int *);
+void __ovld vstore16(uint16, size_t, __private uint *);
+void __ovld vstore16(long16, size_t, __private long *);
+void __ovld vstore16(ulong16, size_t, __private ulong *);
+void __ovld vstore16(float16, size_t, __private float *);
#ifdef cl_khr_fp64
-void __ovld vstore2(double2 data, size_t offset, __global double *p);
-void __ovld vstore3(double3 data, size_t offset, __global double *p);
-void __ovld vstore4(double4 data, size_t offset, __global double *p);
-void __ovld vstore8(double8 data, size_t offset, __global double *p);
-void __ovld vstore16(double16 data, size_t offset, __global double *p);
-void __ovld vstore2(double2 data, size_t offset, __local double *p);
-void __ovld vstore3(double3 data, size_t offset, __local double *p);
-void __ovld vstore4(double4 data, size_t offset, __local double *p);
-void __ovld vstore8(double8 data, size_t offset, __local double *p);
-void __ovld vstore16(double16 data, size_t offset, __local double *p);
-void __ovld vstore2(double2 data, size_t offset, __private double *p);
-void __ovld vstore3(double3 data, size_t offset, __private double *p);
-void __ovld vstore4(double4 data, size_t offset, __private double *p);
-void __ovld vstore8(double8 data, size_t offset, __private double *p);
-void __ovld vstore16(double16 data, size_t offset, __private double *p);
+void __ovld vstore2(double2, size_t, __global double *);
+void __ovld vstore3(double3, size_t, __global double *);
+void __ovld vstore4(double4, size_t, __global double *);
+void __ovld vstore8(double8, size_t, __global double *);
+void __ovld vstore16(double16, size_t, __global double *);
+void __ovld vstore2(double2, size_t, __local double *);
+void __ovld vstore3(double3, size_t, __local double *);
+void __ovld vstore4(double4, size_t, __local double *);
+void __ovld vstore8(double8, size_t, __local double *);
+void __ovld vstore16(double16, size_t, __local double *);
+void __ovld vstore2(double2, size_t, __private double *);
+void __ovld vstore3(double3, size_t, __private double *);
+void __ovld vstore4(double4, size_t, __private double *);
+void __ovld vstore8(double8, size_t, __private double *);
+void __ovld vstore16(double16, size_t, __private double *);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-void __ovld vstore(half data, size_t offset, __global half *p);
-void __ovld vstore2(half2 data, size_t offset, __global half *p);
-void __ovld vstore3(half3 data, size_t offset, __global half *p);
-void __ovld vstore4(half4 data, size_t offset, __global half *p);
-void __ovld vstore8(half8 data, size_t offset, __global half *p);
-void __ovld vstore16(half16 data, size_t offset, __global half *p);
-void __ovld vstore(half data, size_t offset, __local half *p);
-void __ovld vstore2(half2 data, size_t offset, __local half *p);
-void __ovld vstore3(half3 data, size_t offset, __local half *p);
-void __ovld vstore4(half4 data, size_t offset, __local half *p);
-void __ovld vstore8(half8 data, size_t offset, __local half *p);
-void __ovld vstore16(half16 data, size_t offset, __local half *p);
-void __ovld vstore(half data, size_t offset, __private half *p);
-void __ovld vstore2(half2 data, size_t offset, __private half *p);
-void __ovld vstore3(half3 data, size_t offset, __private half *p);
-void __ovld vstore4(half4 data, size_t offset, __private half *p);
-void __ovld vstore8(half8 data, size_t offset, __private half *p);
-void __ovld vstore16(half16 data, size_t offset, __private half *p);
+void __ovld vstore2(half2, size_t, __global half *);
+void __ovld vstore3(half3, size_t, __global half *);
+void __ovld vstore4(half4, size_t, __global half *);
+void __ovld vstore8(half8, size_t, __global half *);
+void __ovld vstore16(half16, size_t, __global half *);
+void __ovld vstore2(half2, size_t, __local half *);
+void __ovld vstore3(half3, size_t, __local half *);
+void __ovld vstore4(half4, size_t, __local half *);
+void __ovld vstore8(half8, size_t, __local half *);
+void __ovld vstore16(half16, size_t, __local half *);
+void __ovld vstore2(half2, size_t, __private half *);
+void __ovld vstore3(half3, size_t, __private half *);
+void __ovld vstore4(half4, size_t, __private half *);
+void __ovld vstore8(half8, size_t, __private half *);
+void __ovld vstore16(half16, size_t, __private half *);
#endif //cl_khr_fp16
-#endif //defined(__opencl_c_generic_address_space)
+#endif //defined(__opencl_c_named_address_space_builtins)
/**
* Read sizeof (half) bytes of data from address
@@ -11733,15 +11724,17 @@ void __ovld vstore16(half16 data, size_t offset, __private half *p);
* The read address computed as (p + offset)
* must be 16-bit aligned.
*/
-float __ovld vload_half(size_t offset, const __constant half *p);
+float __ovld __purefn vload_half(size_t, const __constant half *);
#if defined(__opencl_c_generic_address_space)
-float __ovld vload_half(size_t offset, const half *p);
-#else
-float __ovld vload_half(size_t offset, const __global half *p);
-float __ovld vload_half(size_t offset, const __local half *p);
-float __ovld vload_half(size_t offset, const __private half *p);
+float __ovld __purefn vload_half(size_t, const half *);
#endif //defined(__opencl_c_generic_address_space)
+#if defined(__opencl_c_named_address_space_builtins)
+float __ovld __purefn vload_half(size_t, const __global half *);
+float __ovld __purefn vload_half(size_t, const __local half *);
+float __ovld __purefn vload_half(size_t, const __private half *);
+#endif //defined(__opencl_c_named_address_space_builtins)
+
/**
* Read sizeof (halfn) bytes of data from address
* (p + (offset * n)). The data read is interpreted
@@ -11750,35 +11743,37 @@ float __ovld vload_half(size_t offset, const __private half *p);
* value is returned. The read address computed
* as (p + (offset * n)) must be 16-bit aligned.
*/
-float2 __ovld vload_half2(size_t offset, const __constant half *p);
-float3 __ovld vload_half3(size_t offset, const __constant half *p);
-float4 __ovld vload_half4(size_t offset, const __constant half *p);
-float8 __ovld vload_half8(size_t offset, const __constant half *p);
-float16 __ovld vload_half16(size_t offset, const __constant half *p);
+float2 __ovld __purefn vload_half2(size_t, const __constant half *);
+float3 __ovld __purefn vload_half3(size_t, const __constant half *);
+float4 __ovld __purefn vload_half4(size_t, const __constant half *);
+float8 __ovld __purefn vload_half8(size_t, const __constant half *);
+float16 __ovld __purefn vload_half16(size_t, const __constant half *);
#if defined(__opencl_c_generic_address_space)
-float2 __ovld vload_half2(size_t offset, const half *p);
-float3 __ovld vload_half3(size_t offset, const half *p);
-float4 __ovld vload_half4(size_t offset, const half *p);
-float8 __ovld vload_half8(size_t offset, const half *p);
-float16 __ovld vload_half16(size_t offset, const half *p);
-#else
-float2 __ovld vload_half2(size_t offset, const __global half *p);
-float3 __ovld vload_half3(size_t offset, const __global half *p);
-float4 __ovld vload_half4(size_t offset, const __global half *p);
-float8 __ovld vload_half8(size_t offset, const __global half *p);
-float16 __ovld vload_half16(size_t offset, const __global half *p);
-float2 __ovld vload_half2(size_t offset, const __local half *p);
-float3 __ovld vload_half3(size_t offset, const __local half *p);
-float4 __ovld vload_half4(size_t offset, const __local half *p);
-float8 __ovld vload_half8(size_t offset, const __local half *p);
-float16 __ovld vload_half16(size_t offset, const __local half *p);
-float2 __ovld vload_half2(size_t offset, const __private half *p);
-float3 __ovld vload_half3(size_t offset, const __private half *p);
-float4 __ovld vload_half4(size_t offset, const __private half *p);
-float8 __ovld vload_half8(size_t offset, const __private half *p);
-float16 __ovld vload_half16(size_t offset, const __private half *p);
+float2 __ovld __purefn vload_half2(size_t, const half *);
+float3 __ovld __purefn vload_half3(size_t, const half *);
+float4 __ovld __purefn vload_half4(size_t, const half *);
+float8 __ovld __purefn vload_half8(size_t, const half *);
+float16 __ovld __purefn vload_half16(size_t, const half *);
#endif //defined(__opencl_c_generic_address_space)
+#if defined(__opencl_c_named_address_space_builtins)
+float2 __ovld __purefn vload_half2(size_t, const __global half *);
+float3 __ovld __purefn vload_half3(size_t, const __global half *);
+float4 __ovld __purefn vload_half4(size_t, const __global half *);
+float8 __ovld __purefn vload_half8(size_t, const __global half *);
+float16 __ovld __purefn vload_half16(size_t, const __global half *);
+float2 __ovld __purefn vload_half2(size_t, const __local half *);
+float3 __ovld __purefn vload_half3(size_t, const __local half *);
+float4 __ovld __purefn vload_half4(size_t, const __local half *);
+float8 __ovld __purefn vload_half8(size_t, const __local half *);
+float16 __ovld __purefn vload_half16(size_t, const __local half *);
+float2 __ovld __purefn vload_half2(size_t, const __private half *);
+float3 __ovld __purefn vload_half3(size_t, const __private half *);
+float4 __ovld __purefn vload_half4(size_t, const __private half *);
+float8 __ovld __purefn vload_half8(size_t, const __private half *);
+float16 __ovld __purefn vload_half16(size_t, const __private half *);
+#endif //defined(__opencl_c_named_address_space_builtins)
+
/**
* The float value given by data is first
* converted to a half value using the appropriate
@@ -11791,52 +11786,54 @@ float16 __ovld vload_half16(size_t offset, const __private half *p);
* nearest even.
*/
#if defined(__opencl_c_generic_address_space)
-void __ovld vstore_half(float data, size_t offset, half *p);
-void __ovld vstore_half_rte(float data, size_t offset, half *p);
-void __ovld vstore_half_rtz(float data, size_t offset, half *p);
-void __ovld vstore_half_rtp(float data, size_t offset, half *p);
-void __ovld vstore_half_rtn(float data, size_t offset, half *p);
+void __ovld vstore_half(float, size_t, half *);
+void __ovld vstore_half_rte(float, size_t, half *);
+void __ovld vstore_half_rtz(float, size_t, half *);
+void __ovld vstore_half_rtp(float, size_t, half *);
+void __ovld vstore_half_rtn(float, size_t, half *);
#ifdef cl_khr_fp64
-void __ovld vstore_half(double data, size_t offset, half *p);
-void __ovld vstore_half_rte(double data, size_t offset, half *p);
-void __ovld vstore_half_rtz(double data, size_t offset, half *p);
-void __ovld vstore_half_rtp(double data, size_t offset, half *p);
-void __ovld vstore_half_rtn(double data, size_t offset, half *p);
+void __ovld vstore_half(double, size_t, half *);
+void __ovld vstore_half_rte(double, size_t, half *);
+void __ovld vstore_half_rtz(double, size_t, half *);
+void __ovld vstore_half_rtp(double, size_t, half *);
+void __ovld vstore_half_rtn(double, size_t, half *);
#endif //cl_khr_fp64
-#else
-void __ovld vstore_half(float data, size_t offset, __global half *p);
-void __ovld vstore_half_rte(float data, size_t offset, __global half *p);
-void __ovld vstore_half_rtz(float data, size_t offset, __global half *p);
-void __ovld vstore_half_rtp(float data, size_t offset, __global half *p);
-void __ovld vstore_half_rtn(float data, size_t offset, __global half *p);
-void __ovld vstore_half(float data, size_t offset, __local half *p);
-void __ovld vstore_half_rte(float data, size_t offset, __local half *p);
-void __ovld vstore_half_rtz(float data, size_t offset, __local half *p);
-void __ovld vstore_half_rtp(float data, size_t offset, __local half *p);
-void __ovld vstore_half_rtn(float data, size_t offset, __local half *p);
-void __ovld vstore_half(float data, size_t offset, __private half *p);
-void __ovld vstore_half_rte(float data, size_t offset, __private half *p);
-void __ovld vstore_half_rtz(float data, size_t offset, __private half *p);
-void __ovld vstore_half_rtp(float data, size_t offset, __private half *p);
-void __ovld vstore_half_rtn(float data, size_t offset, __private half *p);
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
+void __ovld vstore_half(float, size_t, __global half *);
+void __ovld vstore_half_rte(float, size_t, __global half *);
+void __ovld vstore_half_rtz(float, size_t, __global half *);
+void __ovld vstore_half_rtp(float, size_t, __global half *);
+void __ovld vstore_half_rtn(float, size_t, __global half *);
+void __ovld vstore_half(float, size_t, __local half *);
+void __ovld vstore_half_rte(float, size_t, __local half *);
+void __ovld vstore_half_rtz(float, size_t, __local half *);
+void __ovld vstore_half_rtp(float, size_t, __local half *);
+void __ovld vstore_half_rtn(float, size_t, __local half *);
+void __ovld vstore_half(float, size_t, __private half *);
+void __ovld vstore_half_rte(float, size_t, __private half *);
+void __ovld vstore_half_rtz(float, size_t, __private half *);
+void __ovld vstore_half_rtp(float, size_t, __private half *);
+void __ovld vstore_half_rtn(float, size_t, __private half *);
#ifdef cl_khr_fp64
-void __ovld vstore_half(double data, size_t offset, __global half *p);
-void __ovld vstore_half_rte(double data, size_t offset, __global half *p);
-void __ovld vstore_half_rtz(double data, size_t offset, __global half *p);
-void __ovld vstore_half_rtp(double data, size_t offset, __global half *p);
-void __ovld vstore_half_rtn(double data, size_t offset, __global half *p);
-void __ovld vstore_half(double data, size_t offset, __local half *p);
-void __ovld vstore_half_rte(double data, size_t offset, __local half *p);
-void __ovld vstore_half_rtz(double data, size_t offset, __local half *p);
-void __ovld vstore_half_rtp(double data, size_t offset, __local half *p);
-void __ovld vstore_half_rtn(double data, size_t offset, __local half *p);
-void __ovld vstore_half(double data, size_t offset, __private half *p);
-void __ovld vstore_half_rte(double data, size_t offset, __private half *p);
-void __ovld vstore_half_rtz(double data, size_t offset, __private half *p);
-void __ovld vstore_half_rtp(double data, size_t offset, __private half *p);
-void __ovld vstore_half_rtn(double data, size_t offset, __private half *p);
+void __ovld vstore_half(double, size_t, __global half *);
+void __ovld vstore_half_rte(double, size_t, __global half *);
+void __ovld vstore_half_rtz(double, size_t, __global half *);
+void __ovld vstore_half_rtp(double, size_t, __global half *);
+void __ovld vstore_half_rtn(double, size_t, __global half *);
+void __ovld vstore_half(double, size_t, __local half *);
+void __ovld vstore_half_rte(double, size_t, __local half *);
+void __ovld vstore_half_rtz(double, size_t, __local half *);
+void __ovld vstore_half_rtp(double, size_t, __local half *);
+void __ovld vstore_half_rtn(double, size_t, __local half *);
+void __ovld vstore_half(double, size_t, __private half *);
+void __ovld vstore_half_rte(double, size_t, __private half *);
+void __ovld vstore_half_rtz(double, size_t, __private half *);
+void __ovld vstore_half_rtp(double, size_t, __private half *);
+void __ovld vstore_half_rtn(double, size_t, __private half *);
#endif //cl_khr_fp64
-#endif //defined(__opencl_c_generic_address_space)
+#endif //defined(__opencl_c_named_address_space_builtins)
/**
* The floatn value given by data is converted to
@@ -11850,212 +11847,214 @@ void __ovld vstore_half_rtn(double data, size_t offset, __private half *p);
* nearest even.
*/
#if defined(__opencl_c_generic_address_space)
-void __ovld vstore_half2(float2 data, size_t offset, half *p);
-void __ovld vstore_half3(float3 data, size_t offset, half *p);
-void __ovld vstore_half4(float4 data, size_t offset, half *p);
-void __ovld vstore_half8(float8 data, size_t offset, half *p);
-void __ovld vstore_half16(float16 data, size_t offset, half *p);
-void __ovld vstore_half2_rte(float2 data, size_t offset, half *p);
-void __ovld vstore_half3_rte(float3 data, size_t offset, half *p);
-void __ovld vstore_half4_rte(float4 data, size_t offset, half *p);
-void __ovld vstore_half8_rte(float8 data, size_t offset, half *p);
-void __ovld vstore_half16_rte(float16 data, size_t offset, half *p);
-void __ovld vstore_half2_rtz(float2 data, size_t offset, half *p);
-void __ovld vstore_half3_rtz(float3 data, size_t offset, half *p);
-void __ovld vstore_half4_rtz(float4 data, size_t offset, half *p);
-void __ovld vstore_half8_rtz(float8 data, size_t offset, half *p);
-void __ovld vstore_half16_rtz(float16 data, size_t offset, half *p);
-void __ovld vstore_half2_rtp(float2 data, size_t offset, half *p);
-void __ovld vstore_half3_rtp(float3 data, size_t offset, half *p);
-void __ovld vstore_half4_rtp(float4 data, size_t offset, half *p);
-void __ovld vstore_half8_rtp(float8 data, size_t offset, half *p);
-void __ovld vstore_half16_rtp(float16 data, size_t offset, half *p);
-void __ovld vstore_half2_rtn(float2 data, size_t offset, half *p);
-void __ovld vstore_half3_rtn(float3 data, size_t offset, half *p);
-void __ovld vstore_half4_rtn(float4 data, size_t offset, half *p);
-void __ovld vstore_half8_rtn(float8 data, size_t offset, half *p);
-void __ovld vstore_half16_rtn(float16 data, size_t offset, half *p);
+void __ovld vstore_half2(float2, size_t, half *);
+void __ovld vstore_half3(float3, size_t, half *);
+void __ovld vstore_half4(float4, size_t, half *);
+void __ovld vstore_half8(float8, size_t, half *);
+void __ovld vstore_half16(float16, size_t, half *);
+void __ovld vstore_half2_rte(float2, size_t, half *);
+void __ovld vstore_half3_rte(float3, size_t, half *);
+void __ovld vstore_half4_rte(float4, size_t, half *);
+void __ovld vstore_half8_rte(float8, size_t, half *);
+void __ovld vstore_half16_rte(float16, size_t, half *);
+void __ovld vstore_half2_rtz(float2, size_t, half *);
+void __ovld vstore_half3_rtz(float3, size_t, half *);
+void __ovld vstore_half4_rtz(float4, size_t, half *);
+void __ovld vstore_half8_rtz(float8, size_t, half *);
+void __ovld vstore_half16_rtz(float16, size_t, half *);
+void __ovld vstore_half2_rtp(float2, size_t, half *);
+void __ovld vstore_half3_rtp(float3, size_t, half *);
+void __ovld vstore_half4_rtp(float4, size_t, half *);
+void __ovld vstore_half8_rtp(float8, size_t, half *);
+void __ovld vstore_half16_rtp(float16, size_t, half *);
+void __ovld vstore_half2_rtn(float2, size_t, half *);
+void __ovld vstore_half3_rtn(float3, size_t, half *);
+void __ovld vstore_half4_rtn(float4, size_t, half *);
+void __ovld vstore_half8_rtn(float8, size_t, half *);
+void __ovld vstore_half16_rtn(float16, size_t, half *);
#ifdef cl_khr_fp64
-void __ovld vstore_half2(double2 data, size_t offset, half *p);
-void __ovld vstore_half3(double3 data, size_t offset, half *p);
-void __ovld vstore_half4(double4 data, size_t offset, half *p);
-void __ovld vstore_half8(double8 data, size_t offset, half *p);
-void __ovld vstore_half16(double16 data, size_t offset, half *p);
-void __ovld vstore_half2_rte(double2 data, size_t offset, half *p);
-void __ovld vstore_half3_rte(double3 data, size_t offset, half *p);
-void __ovld vstore_half4_rte(double4 data, size_t offset, half *p);
-void __ovld vstore_half8_rte(double8 data, size_t offset, half *p);
-void __ovld vstore_half16_rte(double16 data, size_t offset, half *p);
-void __ovld vstore_half2_rtz(double2 data, size_t offset, half *p);
-void __ovld vstore_half3_rtz(double3 data, size_t offset, half *p);
-void __ovld vstore_half4_rtz(double4 data, size_t offset, half *p);
-void __ovld vstore_half8_rtz(double8 data, size_t offset, half *p);
-void __ovld vstore_half16_rtz(double16 data, size_t offset, half *p);
-void __ovld vstore_half2_rtp(double2 data, size_t offset, half *p);
-void __ovld vstore_half3_rtp(double3 data, size_t offset, half *p);
-void __ovld vstore_half4_rtp(double4 data, size_t offset, half *p);
-void __ovld vstore_half8_rtp(double8 data, size_t offset, half *p);
-void __ovld vstore_half16_rtp(double16 data, size_t offset, half *p);
-void __ovld vstore_half2_rtn(double2 data, size_t offset, half *p);
-void __ovld vstore_half3_rtn(double3 data, size_t offset, half *p);
-void __ovld vstore_half4_rtn(double4 data, size_t offset, half *p);
-void __ovld vstore_half8_rtn(double8 data, size_t offset, half *p);
-void __ovld vstore_half16_rtn(double16 data, size_t offset, half *p);
+void __ovld vstore_half2(double2, size_t, half *);
+void __ovld vstore_half3(double3, size_t, half *);
+void __ovld vstore_half4(double4, size_t, half *);
+void __ovld vstore_half8(double8, size_t, half *);
+void __ovld vstore_half16(double16, size_t, half *);
+void __ovld vstore_half2_rte(double2, size_t, half *);
+void __ovld vstore_half3_rte(double3, size_t, half *);
+void __ovld vstore_half4_rte(double4, size_t, half *);
+void __ovld vstore_half8_rte(double8, size_t, half *);
+void __ovld vstore_half16_rte(double16, size_t, half *);
+void __ovld vstore_half2_rtz(double2, size_t, half *);
+void __ovld vstore_half3_rtz(double3, size_t, half *);
+void __ovld vstore_half4_rtz(double4, size_t, half *);
+void __ovld vstore_half8_rtz(double8, size_t, half *);
+void __ovld vstore_half16_rtz(double16, size_t, half *);
+void __ovld vstore_half2_rtp(double2, size_t, half *);
+void __ovld vstore_half3_rtp(double3, size_t, half *);
+void __ovld vstore_half4_rtp(double4, size_t, half *);
+void __ovld vstore_half8_rtp(double8, size_t, half *);
+void __ovld vstore_half16_rtp(double16, size_t, half *);
+void __ovld vstore_half2_rtn(double2, size_t, half *);
+void __ovld vstore_half3_rtn(double3, size_t, half *);
+void __ovld vstore_half4_rtn(double4, size_t, half *);
+void __ovld vstore_half8_rtn(double8, size_t, half *);
+void __ovld vstore_half16_rtn(double16, size_t, half *);
#endif //cl_khr_fp64
-#else
-void __ovld vstore_half2(float2 data, size_t offset, __global half *p);
-void __ovld vstore_half3(float3 data, size_t offset, __global half *p);
-void __ovld vstore_half4(float4 data, size_t offset, __global half *p);
-void __ovld vstore_half8(float8 data, size_t offset, __global half *p);
-void __ovld vstore_half16(float16 data, size_t offset, __global half *p);
-void __ovld vstore_half2_rte(float2 data, size_t offset, __global half *p);
-void __ovld vstore_half3_rte(float3 data, size_t offset, __global half *p);
-void __ovld vstore_half4_rte(float4 data, size_t offset, __global half *p);
-void __ovld vstore_half8_rte(float8 data, size_t offset, __global half *p);
-void __ovld vstore_half16_rte(float16 data, size_t offset, __global half *p);
-void __ovld vstore_half2_rtz(float2 data, size_t offset, __global half *p);
-void __ovld vstore_half3_rtz(float3 data, size_t offset, __global half *p);
-void __ovld vstore_half4_rtz(float4 data, size_t offset, __global half *p);
-void __ovld vstore_half8_rtz(float8 data, size_t offset, __global half *p);
-void __ovld vstore_half16_rtz(float16 data, size_t offset, __global half *p);
-void __ovld vstore_half2_rtp(float2 data, size_t offset, __global half *p);
-void __ovld vstore_half3_rtp(float3 data, size_t offset, __global half *p);
-void __ovld vstore_half4_rtp(float4 data, size_t offset, __global half *p);
-void __ovld vstore_half8_rtp(float8 data, size_t offset, __global half *p);
-void __ovld vstore_half16_rtp(float16 data, size_t offset, __global half *p);
-void __ovld vstore_half2_rtn(float2 data, size_t offset, __global half *p);
-void __ovld vstore_half3_rtn(float3 data, size_t offset, __global half *p);
-void __ovld vstore_half4_rtn(float4 data, size_t offset, __global half *p);
-void __ovld vstore_half8_rtn(float8 data, size_t offset, __global half *p);
-void __ovld vstore_half16_rtn(float16 data, size_t offset, __global half *p);
-void __ovld vstore_half2(float2 data, size_t offset, __local half *p);
-void __ovld vstore_half3(float3 data, size_t offset, __local half *p);
-void __ovld vstore_half4(float4 data, size_t offset, __local half *p);
-void __ovld vstore_half8(float8 data, size_t offset, __local half *p);
-void __ovld vstore_half16(float16 data, size_t offset, __local half *p);
-void __ovld vstore_half2_rte(float2 data, size_t offset, __local half *p);
-void __ovld vstore_half3_rte(float3 data, size_t offset, __local half *p);
-void __ovld vstore_half4_rte(float4 data, size_t offset, __local half *p);
-void __ovld vstore_half8_rte(float8 data, size_t offset, __local half *p);
-void __ovld vstore_half16_rte(float16 data, size_t offset, __local half *p);
-void __ovld vstore_half2_rtz(float2 data, size_t offset, __local half *p);
-void __ovld vstore_half3_rtz(float3 data, size_t offset, __local half *p);
-void __ovld vstore_half4_rtz(float4 data, size_t offset, __local half *p);
-void __ovld vstore_half8_rtz(float8 data, size_t offset, __local half *p);
-void __ovld vstore_half16_rtz(float16 data, size_t offset, __local half *p);
-void __ovld vstore_half2_rtp(float2 data, size_t offset, __local half *p);
-void __ovld vstore_half3_rtp(float3 data, size_t offset, __local half *p);
-void __ovld vstore_half4_rtp(float4 data, size_t offset, __local half *p);
-void __ovld vstore_half8_rtp(float8 data, size_t offset, __local half *p);
-void __ovld vstore_half16_rtp(float16 data, size_t offset, __local half *p);
-void __ovld vstore_half2_rtn(float2 data, size_t offset, __local half *p);
-void __ovld vstore_half3_rtn(float3 data, size_t offset, __local half *p);
-void __ovld vstore_half4_rtn(float4 data, size_t offset, __local half *p);
-void __ovld vstore_half8_rtn(float8 data, size_t offset, __local half *p);
-void __ovld vstore_half16_rtn(float16 data, size_t offset, __local half *p);
-void __ovld vstore_half2(float2 data, size_t offset, __private half *p);
-void __ovld vstore_half3(float3 data, size_t offset, __private half *p);
-void __ovld vstore_half4(float4 data, size_t offset, __private half *p);
-void __ovld vstore_half8(float8 data, size_t offset, __private half *p);
-void __ovld vstore_half16(float16 data, size_t offset, __private half *p);
-void __ovld vstore_half2_rte(float2 data, size_t offset, __private half *p);
-void __ovld vstore_half3_rte(float3 data, size_t offset, __private half *p);
-void __ovld vstore_half4_rte(float4 data, size_t offset, __private half *p);
-void __ovld vstore_half8_rte(float8 data, size_t offset, __private half *p);
-void __ovld vstore_half16_rte(float16 data, size_t offset, __private half *p);
-void __ovld vstore_half2_rtz(float2 data, size_t offset, __private half *p);
-void __ovld vstore_half3_rtz(float3 data, size_t offset, __private half *p);
-void __ovld vstore_half4_rtz(float4 data, size_t offset, __private half *p);
-void __ovld vstore_half8_rtz(float8 data, size_t offset, __private half *p);
-void __ovld vstore_half16_rtz(float16 data, size_t offset, __private half *p);
-void __ovld vstore_half2_rtp(float2 data, size_t offset, __private half *p);
-void __ovld vstore_half3_rtp(float3 data, size_t offset, __private half *p);
-void __ovld vstore_half4_rtp(float4 data, size_t offset, __private half *p);
-void __ovld vstore_half8_rtp(float8 data, size_t offset, __private half *p);
-void __ovld vstore_half16_rtp(float16 data, size_t offset, __private half *p);
-void __ovld vstore_half2_rtn(float2 data, size_t offset, __private half *p);
-void __ovld vstore_half3_rtn(float3 data, size_t offset, __private half *p);
-void __ovld vstore_half4_rtn(float4 data, size_t offset, __private half *p);
-void __ovld vstore_half8_rtn(float8 data, size_t offset, __private half *p);
-void __ovld vstore_half16_rtn(float16 data, size_t offset, __private half *p);
+#endif //defined(__opencl_c_generic_address_space)
+
+#if defined(__opencl_c_named_address_space_builtins)
+void __ovld vstore_half2(float2, size_t, __global half *);
+void __ovld vstore_half3(float3, size_t, __global half *);
+void __ovld vstore_half4(float4, size_t, __global half *);
+void __ovld vstore_half8(float8, size_t, __global half *);
+void __ovld vstore_half16(float16, size_t, __global half *);
+void __ovld vstore_half2_rte(float2, size_t, __global half *);
+void __ovld vstore_half3_rte(float3, size_t, __global half *);
+void __ovld vstore_half4_rte(float4, size_t, __global half *);
+void __ovld vstore_half8_rte(float8, size_t, __global half *);
+void __ovld vstore_half16_rte(float16, size_t, __global half *);
+void __ovld vstore_half2_rtz(float2, size_t, __global half *);
+void __ovld vstore_half3_rtz(float3, size_t, __global half *);
+void __ovld vstore_half4_rtz(float4, size_t, __global half *);
+void __ovld vstore_half8_rtz(float8, size_t, __global half *);
+void __ovld vstore_half16_rtz(float16, size_t, __global half *);
+void __ovld vstore_half2_rtp(float2, size_t, __global half *);
+void __ovld vstore_half3_rtp(float3, size_t, __global half *);
+void __ovld vstore_half4_rtp(float4, size_t, __global half *);
+void __ovld vstore_half8_rtp(float8, size_t, __global half *);
+void __ovld vstore_half16_rtp(float16, size_t, __global half *);
+void __ovld vstore_half2_rtn(float2, size_t, __global half *);
+void __ovld vstore_half3_rtn(float3, size_t, __global half *);
+void __ovld vstore_half4_rtn(float4, size_t, __global half *);
+void __ovld vstore_half8_rtn(float8, size_t, __global half *);
+void __ovld vstore_half16_rtn(float16, size_t, __global half *);
+void __ovld vstore_half2(float2, size_t, __local half *);
+void __ovld vstore_half3(float3, size_t, __local half *);
+void __ovld vstore_half4(float4, size_t, __local half *);
+void __ovld vstore_half8(float8, size_t, __local half *);
+void __ovld vstore_half16(float16, size_t, __local half *);
+void __ovld vstore_half2_rte(float2, size_t, __local half *);
+void __ovld vstore_half3_rte(float3, size_t, __local half *);
+void __ovld vstore_half4_rte(float4, size_t, __local half *);
+void __ovld vstore_half8_rte(float8, size_t, __local half *);
+void __ovld vstore_half16_rte(float16, size_t, __local half *);
+void __ovld vstore_half2_rtz(float2, size_t, __local half *);
+void __ovld vstore_half3_rtz(float3, size_t, __local half *);
+void __ovld vstore_half4_rtz(float4, size_t, __local half *);
+void __ovld vstore_half8_rtz(float8, size_t, __local half *);
+void __ovld vstore_half16_rtz(float16, size_t, __local half *);
+void __ovld vstore_half2_rtp(float2, size_t, __local half *);
+void __ovld vstore_half3_rtp(float3, size_t, __local half *);
+void __ovld vstore_half4_rtp(float4, size_t, __local half *);
+void __ovld vstore_half8_rtp(float8, size_t, __local half *);
+void __ovld vstore_half16_rtp(float16, size_t, __local half *);
+void __ovld vstore_half2_rtn(float2, size_t, __local half *);
+void __ovld vstore_half3_rtn(float3, size_t, __local half *);
+void __ovld vstore_half4_rtn(float4, size_t, __local half *);
+void __ovld vstore_half8_rtn(float8, size_t, __local half *);
+void __ovld vstore_half16_rtn(float16, size_t, __local half *);
+void __ovld vstore_half2(float2, size_t, __private half *);
+void __ovld vstore_half3(float3, size_t, __private half *);
+void __ovld vstore_half4(float4, size_t, __private half *);
+void __ovld vstore_half8(float8, size_t, __private half *);
+void __ovld vstore_half16(float16, size_t, __private half *);
+void __ovld vstore_half2_rte(float2, size_t, __private half *);
+void __ovld vstore_half3_rte(float3, size_t, __private half *);
+void __ovld vstore_half4_rte(float4, size_t, __private half *);
+void __ovld vstore_half8_rte(float8, size_t, __private half *);
+void __ovld vstore_half16_rte(float16, size_t, __private half *);
+void __ovld vstore_half2_rtz(float2, size_t, __private half *);
+void __ovld vstore_half3_rtz(float3, size_t, __private half *);
+void __ovld vstore_half4_rtz(float4, size_t, __private half *);
+void __ovld vstore_half8_rtz(float8, size_t, __private half *);
+void __ovld vstore_half16_rtz(float16, size_t, __private half *);
+void __ovld vstore_half2_rtp(float2, size_t, __private half *);
+void __ovld vstore_half3_rtp(float3, size_t, __private half *);
+void __ovld vstore_half4_rtp(float4, size_t, __private half *);
+void __ovld vstore_half8_rtp(float8, size_t, __private half *);
+void __ovld vstore_half16_rtp(float16, size_t, __private half *);
+void __ovld vstore_half2_rtn(float2, size_t, __private half *);
+void __ovld vstore_half3_rtn(float3, size_t, __private half *);
+void __ovld vstore_half4_rtn(float4, size_t, __private half *);
+void __ovld vstore_half8_rtn(float8, size_t, __private half *);
+void __ovld vstore_half16_rtn(float16, size_t, __private half *);
#ifdef cl_khr_fp64
-void __ovld vstore_half2(double2 data, size_t offset, __global half *p);
-void __ovld vstore_half3(double3 data, size_t offset, __global half *p);
-void __ovld vstore_half4(double4 data, size_t offset, __global half *p);
-void __ovld vstore_half8(double8 data, size_t offset, __global half *p);
-void __ovld vstore_half16(double16 data, size_t offset, __global half *p);
-void __ovld vstore_half2_rte(double2 data, size_t offset, __global half *p);
-void __ovld vstore_half3_rte(double3 data, size_t offset, __global half *p);
-void __ovld vstore_half4_rte(double4 data, size_t offset, __global half *p);
-void __ovld vstore_half8_rte(double8 data, size_t offset, __global half *p);
-void __ovld vstore_half16_rte(double16 data, size_t offset, __global half *p);
-void __ovld vstore_half2_rtz(double2 data, size_t offset, __global half *p);
-void __ovld vstore_half3_rtz(double3 data, size_t offset, __global half *p);
-void __ovld vstore_half4_rtz(double4 data, size_t offset, __global half *p);
-void __ovld vstore_half8_rtz(double8 data, size_t offset, __global half *p);
-void __ovld vstore_half16_rtz(double16 data, size_t offset, __global half *p);
-void __ovld vstore_half2_rtp(double2 data, size_t offset, __global half *p);
-void __ovld vstore_half3_rtp(double3 data, size_t offset, __global half *p);
-void __ovld vstore_half4_rtp(double4 data, size_t offset, __global half *p);
-void __ovld vstore_half8_rtp(double8 data, size_t offset, __global half *p);
-void __ovld vstore_half16_rtp(double16 data, size_t offset, __global half *p);
-void __ovld vstore_half2_rtn(double2 data, size_t offset, __global half *p);
-void __ovld vstore_half3_rtn(double3 data, size_t offset, __global half *p);
-void __ovld vstore_half4_rtn(double4 data, size_t offset, __global half *p);
-void __ovld vstore_half8_rtn(double8 data, size_t offset, __global half *p);
-void __ovld vstore_half16_rtn(double16 data, size_t offset, __global half *p);
-void __ovld vstore_half2(double2 data, size_t offset, __local half *p);
-void __ovld vstore_half3(double3 data, size_t offset, __local half *p);
-void __ovld vstore_half4(double4 data, size_t offset, __local half *p);
-void __ovld vstore_half8(double8 data, size_t offset, __local half *p);
-void __ovld vstore_half16(double16 data, size_t offset, __local half *p);
-void __ovld vstore_half2_rte(double2 data, size_t offset, __local half *p);
-void __ovld vstore_half3_rte(double3 data, size_t offset, __local half *p);
-void __ovld vstore_half4_rte(double4 data, size_t offset, __local half *p);
-void __ovld vstore_half8_rte(double8 data, size_t offset, __local half *p);
-void __ovld vstore_half16_rte(double16 data, size_t offset, __local half *p);
-void __ovld vstore_half2_rtz(double2 data, size_t offset, __local half *p);
-void __ovld vstore_half3_rtz(double3 data, size_t offset, __local half *p);
-void __ovld vstore_half4_rtz(double4 data, size_t offset, __local half *p);
-void __ovld vstore_half8_rtz(double8 data, size_t offset, __local half *p);
-void __ovld vstore_half16_rtz(double16 data, size_t offset, __local half *p);
-void __ovld vstore_half2_rtp(double2 data, size_t offset, __local half *p);
-void __ovld vstore_half3_rtp(double3 data, size_t offset, __local half *p);
-void __ovld vstore_half4_rtp(double4 data, size_t offset, __local half *p);
-void __ovld vstore_half8_rtp(double8 data, size_t offset, __local half *p);
-void __ovld vstore_half16_rtp(double16 data, size_t offset, __local half *p);
-void __ovld vstore_half2_rtn(double2 data, size_t offset, __local half *p);
-void __ovld vstore_half3_rtn(double3 data, size_t offset, __local half *p);
-void __ovld vstore_half4_rtn(double4 data, size_t offset, __local half *p);
-void __ovld vstore_half8_rtn(double8 data, size_t offset, __local half *p);
-void __ovld vstore_half16_rtn(double16 data, size_t offset, __local half *p);
-void __ovld vstore_half2(double2 data, size_t offset, __private half *p);
-void __ovld vstore_half3(double3 data, size_t offset, __private half *p);
-void __ovld vstore_half4(double4 data, size_t offset, __private half *p);
-void __ovld vstore_half8(double8 data, size_t offset, __private half *p);
-void __ovld vstore_half16(double16 data, size_t offset, __private half *p);
-void __ovld vstore_half2_rte(double2 data, size_t offset, __private half *p);
-void __ovld vstore_half3_rte(double3 data, size_t offset, __private half *p);
-void __ovld vstore_half4_rte(double4 data, size_t offset, __private half *p);
-void __ovld vstore_half8_rte(double8 data, size_t offset, __private half *p);
-void __ovld vstore_half16_rte(double16 data, size_t offset, __private half *p);
-void __ovld vstore_half2_rtz(double2 data, size_t offset, __private half *p);
-void __ovld vstore_half3_rtz(double3 data, size_t offset, __private half *p);
-void __ovld vstore_half4_rtz(double4 data, size_t offset, __private half *p);
-void __ovld vstore_half8_rtz(double8 data, size_t offset, __private half *p);
-void __ovld vstore_half16_rtz(double16 data, size_t offset, __private half *p);
-void __ovld vstore_half2_rtp(double2 data, size_t offset, __private half *p);
-void __ovld vstore_half3_rtp(double3 data, size_t offset, __private half *p);
-void __ovld vstore_half4_rtp(double4 data, size_t offset, __private half *p);
-void __ovld vstore_half8_rtp(double8 data, size_t offset, __private half *p);
-void __ovld vstore_half16_rtp(double16 data, size_t offset, __private half *p);
-void __ovld vstore_half2_rtn(double2 data, size_t offset, __private half *p);
-void __ovld vstore_half3_rtn(double3 data, size_t offset, __private half *p);
-void __ovld vstore_half4_rtn(double4 data, size_t offset, __private half *p);
-void __ovld vstore_half8_rtn(double8 data, size_t offset, __private half *p);
-void __ovld vstore_half16_rtn(double16 data, size_t offset, __private half *p);
+void __ovld vstore_half2(double2, size_t, __global half *);
+void __ovld vstore_half3(double3, size_t, __global half *);
+void __ovld vstore_half4(double4, size_t, __global half *);
+void __ovld vstore_half8(double8, size_t, __global half *);
+void __ovld vstore_half16(double16, size_t, __global half *);
+void __ovld vstore_half2_rte(double2, size_t, __global half *);
+void __ovld vstore_half3_rte(double3, size_t, __global half *);
+void __ovld vstore_half4_rte(double4, size_t, __global half *);
+void __ovld vstore_half8_rte(double8, size_t, __global half *);
+void __ovld vstore_half16_rte(double16, size_t, __global half *);
+void __ovld vstore_half2_rtz(double2, size_t, __global half *);
+void __ovld vstore_half3_rtz(double3, size_t, __global half *);
+void __ovld vstore_half4_rtz(double4, size_t, __global half *);
+void __ovld vstore_half8_rtz(double8, size_t, __global half *);
+void __ovld vstore_half16_rtz(double16, size_t, __global half *);
+void __ovld vstore_half2_rtp(double2, size_t, __global half *);
+void __ovld vstore_half3_rtp(double3, size_t, __global half *);
+void __ovld vstore_half4_rtp(double4, size_t, __global half *);
+void __ovld vstore_half8_rtp(double8, size_t, __global half *);
+void __ovld vstore_half16_rtp(double16, size_t, __global half *);
+void __ovld vstore_half2_rtn(double2, size_t, __global half *);
+void __ovld vstore_half3_rtn(double3, size_t, __global half *);
+void __ovld vstore_half4_rtn(double4, size_t, __global half *);
+void __ovld vstore_half8_rtn(double8, size_t, __global half *);
+void __ovld vstore_half16_rtn(double16, size_t, __global half *);
+void __ovld vstore_half2(double2, size_t, __local half *);
+void __ovld vstore_half3(double3, size_t, __local half *);
+void __ovld vstore_half4(double4, size_t, __local half *);
+void __ovld vstore_half8(double8, size_t, __local half *);
+void __ovld vstore_half16(double16, size_t, __local half *);
+void __ovld vstore_half2_rte(double2, size_t, __local half *);
+void __ovld vstore_half3_rte(double3, size_t, __local half *);
+void __ovld vstore_half4_rte(double4, size_t, __local half *);
+void __ovld vstore_half8_rte(double8, size_t, __local half *);
+void __ovld vstore_half16_rte(double16, size_t, __local half *);
+void __ovld vstore_half2_rtz(double2, size_t, __local half *);
+void __ovld vstore_half3_rtz(double3, size_t, __local half *);
+void __ovld vstore_half4_rtz(double4, size_t, __local half *);
+void __ovld vstore_half8_rtz(double8, size_t, __local half *);
+void __ovld vstore_half16_rtz(double16, size_t, __local half *);
+void __ovld vstore_half2_rtp(double2, size_t, __local half *);
+void __ovld vstore_half3_rtp(double3, size_t, __local half *);
+void __ovld vstore_half4_rtp(double4, size_t, __local half *);
+void __ovld vstore_half8_rtp(double8, size_t, __local half *);
+void __ovld vstore_half16_rtp(double16, size_t, __local half *);
+void __ovld vstore_half2_rtn(double2, size_t, __local half *);
+void __ovld vstore_half3_rtn(double3, size_t, __local half *);
+void __ovld vstore_half4_rtn(double4, size_t, __local half *);
+void __ovld vstore_half8_rtn(double8, size_t, __local half *);
+void __ovld vstore_half16_rtn(double16, size_t, __local half *);
+void __ovld vstore_half2(double2, size_t, __private half *);
+void __ovld vstore_half3(double3, size_t, __private half *);
+void __ovld vstore_half4(double4, size_t, __private half *);
+void __ovld vstore_half8(double8, size_t, __private half *);
+void __ovld vstore_half16(double16, size_t, __private half *);
+void __ovld vstore_half2_rte(double2, size_t, __private half *);
+void __ovld vstore_half3_rte(double3, size_t, __private half *);
+void __ovld vstore_half4_rte(double4, size_t, __private half *);
+void __ovld vstore_half8_rte(double8, size_t, __private half *);
+void __ovld vstore_half16_rte(double16, size_t, __private half *);
+void __ovld vstore_half2_rtz(double2, size_t, __private half *);
+void __ovld vstore_half3_rtz(double3, size_t, __private half *);
+void __ovld vstore_half4_rtz(double4, size_t, __private half *);
+void __ovld vstore_half8_rtz(double8, size_t, __private half *);
+void __ovld vstore_half16_rtz(double16, size_t, __private half *);
+void __ovld vstore_half2_rtp(double2, size_t, __private half *);
+void __ovld vstore_half3_rtp(double3, size_t, __private half *);
+void __ovld vstore_half4_rtp(double4, size_t, __private half *);
+void __ovld vstore_half8_rtp(double8, size_t, __private half *);
+void __ovld vstore_half16_rtp(double16, size_t, __private half *);
+void __ovld vstore_half2_rtn(double2, size_t, __private half *);
+void __ovld vstore_half3_rtn(double3, size_t, __private half *);
+void __ovld vstore_half4_rtn(double4, size_t, __private half *);
+void __ovld vstore_half8_rtn(double8, size_t, __private half *);
+void __ovld vstore_half16_rtn(double16, size_t, __private half *);
#endif //cl_khr_fp64
-#endif //defined(__opencl_c_generic_address_space)
+#endif //defined(__opencl_c_named_address_space_builtins)
/**
* For n = 1, 2, 4, 8 and 16 read sizeof (halfn)
@@ -12070,40 +12069,37 @@ void __ovld vstore_half16_rtn(double16 data, size_t offset, __private half *p);
* The address computed as (p + (offset * 4))
* must be aligned to sizeof (half) * 4 bytes.
*/
-float __ovld vloada_half(size_t offset, const __constant half *p);
-float2 __ovld vloada_half2(size_t offset, const __constant half *p);
-float3 __ovld vloada_half3(size_t offset, const __constant half *p);
-float4 __ovld vloada_half4(size_t offset, const __constant half *p);
-float8 __ovld vloada_half8(size_t offset, const __constant half *p);
-float16 __ovld vloada_half16(size_t offset, const __constant half *p);
+float2 __ovld __purefn vloada_half2(size_t, const __constant half *);
+float3 __ovld __purefn vloada_half3(size_t, const __constant half *);
+float4 __ovld __purefn vloada_half4(size_t, const __constant half *);
+float8 __ovld __purefn vloada_half8(size_t, const __constant half *);
+float16 __ovld __purefn vloada_half16(size_t, const __constant half *);
#if defined(__opencl_c_generic_address_space)
-float __ovld vloada_half(size_t offset, const half *p);
-float2 __ovld vloada_half2(size_t offset, const half *p);
-float3 __ovld vloada_half3(size_t offset, const half *p);
-float4 __ovld vloada_half4(size_t offset, const half *p);
-float8 __ovld vloada_half8(size_t offset, const half *p);
-float16 __ovld vloada_half16(size_t offset, const half *p);
-#else
-float __ovld vloada_half(size_t offset, const __global half *p);
-float2 __ovld vloada_half2(size_t offset, const __global half *p);
-float3 __ovld vloada_half3(size_t offset, const __global half *p);
-float4 __ovld vloada_half4(size_t offset, const __global half *p);
-float8 __ovld vloada_half8(size_t offset, const __global half *p);
-float16 __ovld vloada_half16(size_t offset, const __global half *p);
-float __ovld vloada_half(size_t offset, const __local half *p);
-float2 __ovld vloada_half2(size_t offset, const __local half *p);
-float3 __ovld vloada_half3(size_t offset, const __local half *p);
-float4 __ovld vloada_half4(size_t offset, const __local half *p);
-float8 __ovld vloada_half8(size_t offset, const __local half *p);
-float16 __ovld vloada_half16(size_t offset, const __local half *p);
-float __ovld vloada_half(size_t offset, const __private half *p);
-float2 __ovld vloada_half2(size_t offset, const __private half *p);
-float3 __ovld vloada_half3(size_t offset, const __private half *p);
-float4 __ovld vloada_half4(size_t offset, const __private half *p);
-float8 __ovld vloada_half8(size_t offset, const __private half *p);
-float16 __ovld vloada_half16(size_t offset, const __private half *p);
+float2 __ovld __purefn vloada_half2(size_t, const half *);
+float3 __ovld __purefn vloada_half3(size_t, const half *);
+float4 __ovld __purefn vloada_half4(size_t, const half *);
+float8 __ovld __purefn vloada_half8(size_t, const half *);
+float16 __ovld __purefn vloada_half16(size_t, const half *);
#endif //defined(__opencl_c_generic_address_space)
+#if defined(__opencl_c_named_address_space_builtins)
+float2 __ovld __purefn vloada_half2(size_t, const __global half *);
+float3 __ovld __purefn vloada_half3(size_t, const __global half *);
+float4 __ovld __purefn vloada_half4(size_t, const __global half *);
+float8 __ovld __purefn vloada_half8(size_t, const __global half *);
+float16 __ovld __purefn vloada_half16(size_t, const __global half *);
+float2 __ovld __purefn vloada_half2(size_t, const __local half *);
+float3 __ovld __purefn vloada_half3(size_t, const __local half *);
+float4 __ovld __purefn vloada_half4(size_t, const __local half *);
+float8 __ovld __purefn vloada_half8(size_t, const __local half *);
+float16 __ovld __purefn vloada_half16(size_t, const __local half *);
+float2 __ovld __purefn vloada_half2(size_t, const __private half *);
+float3 __ovld __purefn vloada_half3(size_t, const __private half *);
+float4 __ovld __purefn vloada_half4(size_t, const __private half *);
+float8 __ovld __purefn vloada_half8(size_t, const __private half *);
+float16 __ovld __purefn vloada_half16(size_t, const __private half *);
+#endif //defined(__opencl_c_named_address_space_builtins)
+
/**
* The floatn value given by data is converted to
* a halfn value using the appropriate rounding
@@ -12121,291 +12117,252 @@ float16 __ovld vloada_half16(size_t offset, const __private half *p);
* round to nearest even.
*/
#if defined(__opencl_c_generic_address_space)
-void __ovld vstorea_half(float data, size_t offset, half *p);
-void __ovld vstorea_half2(float2 data, size_t offset, half *p);
-void __ovld vstorea_half3(float3 data, size_t offset, half *p);
-void __ovld vstorea_half4(float4 data, size_t offset, half *p);
-void __ovld vstorea_half8(float8 data, size_t offset, half *p);
-void __ovld vstorea_half16(float16 data, size_t offset, half *p);
-
-void __ovld vstorea_half_rte(float data, size_t offset, half *p);
-void __ovld vstorea_half2_rte(float2 data, size_t offset, half *p);
-void __ovld vstorea_half3_rte(float3 data, size_t offset, half *p);
-void __ovld vstorea_half4_rte(float4 data, size_t offset, half *p);
-void __ovld vstorea_half8_rte(float8 data, size_t offset, half *p);
-void __ovld vstorea_half16_rte(float16 data, size_t offset, half *p);
-
-void __ovld vstorea_half_rtz(float data, size_t offset, half *p);
-void __ovld vstorea_half2_rtz(float2 data, size_t offset, half *p);
-void __ovld vstorea_half3_rtz(float3 data, size_t offset, half *p);
-void __ovld vstorea_half4_rtz(float4 data, size_t offset, half *p);
-void __ovld vstorea_half8_rtz(float8 data, size_t offset, half *p);
-void __ovld vstorea_half16_rtz(float16 data, size_t offset, half *p);
-
-void __ovld vstorea_half_rtp(float data, size_t offset, half *p);
-void __ovld vstorea_half2_rtp(float2 data, size_t offset, half *p);
-void __ovld vstorea_half3_rtp(float3 data, size_t offset, half *p);
-void __ovld vstorea_half4_rtp(float4 data, size_t offset, half *p);
-void __ovld vstorea_half8_rtp(float8 data, size_t offset, half *p);
-void __ovld vstorea_half16_rtp(float16 data, size_t offset, half *p);
-
-void __ovld vstorea_half_rtn(float data, size_t offset, half *p);
-void __ovld vstorea_half2_rtn(float2 data, size_t offset, half *p);
-void __ovld vstorea_half3_rtn(float3 data, size_t offset, half *p);
-void __ovld vstorea_half4_rtn(float4 data, size_t offset, half *p);
-void __ovld vstorea_half8_rtn(float8 data, size_t offset, half *p);
-void __ovld vstorea_half16_rtn(float16 data, size_t offset, half *p);
+void __ovld vstorea_half2(float2, size_t, half *);
+void __ovld vstorea_half3(float3, size_t, half *);
+void __ovld vstorea_half4(float4, size_t, half *);
+void __ovld vstorea_half8(float8, size_t, half *);
+void __ovld vstorea_half16(float16, size_t, half *);
+
+void __ovld vstorea_half2_rte(float2, size_t, half *);
+void __ovld vstorea_half3_rte(float3, size_t, half *);
+void __ovld vstorea_half4_rte(float4, size_t, half *);
+void __ovld vstorea_half8_rte(float8, size_t, half *);
+void __ovld vstorea_half16_rte(float16, size_t, half *);
+
+void __ovld vstorea_half2_rtz(float2, size_t, half *);
+void __ovld vstorea_half3_rtz(float3, size_t, half *);
+void __ovld vstorea_half4_rtz(float4, size_t, half *);
+void __ovld vstorea_half8_rtz(float8, size_t, half *);
+void __ovld vstorea_half16_rtz(float16, size_t, half *);
+
+void __ovld vstorea_half2_rtp(float2, size_t, half *);
+void __ovld vstorea_half3_rtp(float3, size_t, half *);
+void __ovld vstorea_half4_rtp(float4, size_t, half *);
+void __ovld vstorea_half8_rtp(float8, size_t, half *);
+void __ovld vstorea_half16_rtp(float16, size_t, half *);
+
+void __ovld vstorea_half2_rtn(float2, size_t, half *);
+void __ovld vstorea_half3_rtn(float3, size_t, half *);
+void __ovld vstorea_half4_rtn(float4, size_t, half *);
+void __ovld vstorea_half8_rtn(float8, size_t, half *);
+void __ovld vstorea_half16_rtn(float16, size_t, half *);
#ifdef cl_khr_fp64
-void __ovld vstorea_half(double data, size_t offset, half *p);
-void __ovld vstorea_half2(double2 data, size_t offset, half *p);
-void __ovld vstorea_half3(double3 data, size_t offset, half *p);
-void __ovld vstorea_half4(double4 data, size_t offset, half *p);
-void __ovld vstorea_half8(double8 data, size_t offset, half *p);
-void __ovld vstorea_half16(double16 data, size_t offset, half *p);
-
-void __ovld vstorea_half_rte(double data, size_t offset, half *p);
-void __ovld vstorea_half2_rte(double2 data, size_t offset, half *p);
-void __ovld vstorea_half3_rte(double3 data, size_t offset, half *p);
-void __ovld vstorea_half4_rte(double4 data, size_t offset, half *p);
-void __ovld vstorea_half8_rte(double8 data, size_t offset, half *p);
-void __ovld vstorea_half16_rte(double16 data, size_t offset, half *p);
-
-void __ovld vstorea_half_rtz(double data, size_t offset, half *p);
-void __ovld vstorea_half2_rtz(double2 data, size_t offset, half *p);
-void __ovld vstorea_half3_rtz(double3 data, size_t offset, half *p);
-void __ovld vstorea_half4_rtz(double4 data, size_t offset, half *p);
-void __ovld vstorea_half8_rtz(double8 data, size_t offset, half *p);
-void __ovld vstorea_half16_rtz(double16 data, size_t offset, half *p);
-
-void __ovld vstorea_half_rtp(double data, size_t offset, half *p);
-void __ovld vstorea_half2_rtp(double2 data, size_t offset, half *p);
-void __ovld vstorea_half3_rtp(double3 data, size_t offset, half *p);
-void __ovld vstorea_half4_rtp(double4 data, size_t offset, half *p);
-void __ovld vstorea_half8_rtp(double8 data, size_t offset, half *p);
-void __ovld vstorea_half16_rtp(double16 data, size_t offset, half *p);
-
-void __ovld vstorea_half_rtn(double data, size_t offset, half *p);
-void __ovld vstorea_half2_rtn(double2 data, size_t offset, half *p);
-void __ovld vstorea_half3_rtn(double3 data, size_t offset, half *p);
-void __ovld vstorea_half4_rtn(double4 data, size_t offset, half *p);
-void __ovld vstorea_half8_rtn(double8 data, size_t offset, half *p);
-void __ovld vstorea_half16_rtn(double16 data, size_t offset, half *p);
+void __ovld vstorea_half2(double2, size_t, half *);
+void __ovld vstorea_half3(double3, size_t, half *);
+void __ovld vstorea_half4(double4, size_t, half *);
+void __ovld vstorea_half8(double8, size_t, half *);
+void __ovld vstorea_half16(double16, size_t, half *);
+
+void __ovld vstorea_half2_rte(double2, size_t, half *);
+void __ovld vstorea_half3_rte(double3, size_t, half *);
+void __ovld vstorea_half4_rte(double4, size_t, half *);
+void __ovld vstorea_half8_rte(double8, size_t, half *);
+void __ovld vstorea_half16_rte(double16, size_t, half *);
+
+void __ovld vstorea_half2_rtz(double2, size_t, half *);
+void __ovld vstorea_half3_rtz(double3, size_t, half *);
+void __ovld vstorea_half4_rtz(double4, size_t, half *);
+void __ovld vstorea_half8_rtz(double8, size_t, half *);
+void __ovld vstorea_half16_rtz(double16, size_t, half *);
+
+void __ovld vstorea_half2_rtp(double2, size_t, half *);
+void __ovld vstorea_half3_rtp(double3, size_t, half *);
+void __ovld vstorea_half4_rtp(double4, size_t, half *);
+void __ovld vstorea_half8_rtp(double8, size_t, half *);
+void __ovld vstorea_half16_rtp(double16, size_t, half *);
+
+void __ovld vstorea_half2_rtn(double2, size_t, half *);
+void __ovld vstorea_half3_rtn(double3, size_t, half *);
+void __ovld vstorea_half4_rtn(double4, size_t, half *);
+void __ovld vstorea_half8_rtn(double8, size_t, half *);
+void __ovld vstorea_half16_rtn(double16, size_t, half *);
#endif //cl_khr_fp64
+#endif //defined(__opencl_c_generic_address_space)
-#else
-void __ovld vstorea_half(float data, size_t offset, __global half *p);
-void __ovld vstorea_half2(float2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3(float3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4(float4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8(float8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16(float16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half_rte(float data, size_t offset, __global half *p);
-void __ovld vstorea_half2_rte(float2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3_rte(float3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4_rte(float4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8_rte(float8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16_rte(float16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half_rtz(float data, size_t offset, __global half *p);
-void __ovld vstorea_half2_rtz(float2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3_rtz(float3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4_rtz(float4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8_rtz(float8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16_rtz(float16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half_rtp(float data, size_t offset, __global half *p);
-void __ovld vstorea_half2_rtp(float2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3_rtp(float3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4_rtp(float4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8_rtp(float8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16_rtp(float16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half_rtn(float data, size_t offset, __global half *p);
-void __ovld vstorea_half2_rtn(float2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3_rtn(float3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4_rtn(float4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8_rtn(float8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16_rtn(float16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half(float data, size_t offset, __local half *p);
-void __ovld vstorea_half2(float2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3(float3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4(float4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8(float8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16(float16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half_rte(float data, size_t offset, __local half *p);
-void __ovld vstorea_half2_rte(float2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3_rte(float3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4_rte(float4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8_rte(float8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16_rte(float16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half_rtz(float data, size_t offset, __local half *p);
-void __ovld vstorea_half2_rtz(float2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3_rtz(float3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4_rtz(float4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8_rtz(float8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16_rtz(float16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half_rtp(float data, size_t offset, __local half *p);
-void __ovld vstorea_half2_rtp(float2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3_rtp(float3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4_rtp(float4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8_rtp(float8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16_rtp(float16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half_rtn(float data, size_t offset, __local half *p);
-void __ovld vstorea_half2_rtn(float2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3_rtn(float3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4_rtn(float4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8_rtn(float8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16_rtn(float16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half(float data, size_t offset, __private half *p);
-void __ovld vstorea_half2(float2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3(float3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4(float4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8(float8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16(float16 data, size_t offset, __private half *p);
-
-void __ovld vstorea_half_rte(float data, size_t offset, __private half *p);
-void __ovld vstorea_half2_rte(float2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3_rte(float3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4_rte(float4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8_rte(float8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16_rte(float16 data, size_t offset, __private half *p);
-
-void __ovld vstorea_half_rtz(float data, size_t offset, __private half *p);
-void __ovld vstorea_half2_rtz(float2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3_rtz(float3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4_rtz(float4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8_rtz(float8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16_rtz(float16 data, size_t offset, __private half *p);
-
-void __ovld vstorea_half_rtp(float data, size_t offset, __private half *p);
-void __ovld vstorea_half2_rtp(float2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3_rtp(float3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4_rtp(float4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8_rtp(float8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16_rtp(float16 data, size_t offset, __private half *p);
-
-void __ovld vstorea_half_rtn(float data, size_t offset, __private half *p);
-void __ovld vstorea_half2_rtn(float2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3_rtn(float3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4_rtn(float4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8_rtn(float8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16_rtn(float16 data, size_t offset, __private half *p);
+#if defined(__opencl_c_named_address_space_builtins)
+void __ovld vstorea_half2(float2, size_t, __global half *);
+void __ovld vstorea_half3(float3, size_t, __global half *);
+void __ovld vstorea_half4(float4, size_t, __global half *);
+void __ovld vstorea_half8(float8, size_t, __global half *);
+void __ovld vstorea_half16(float16, size_t, __global half *);
+
+void __ovld vstorea_half2_rte(float2, size_t, __global half *);
+void __ovld vstorea_half3_rte(float3, size_t, __global half *);
+void __ovld vstorea_half4_rte(float4, size_t, __global half *);
+void __ovld vstorea_half8_rte(float8, size_t, __global half *);
+void __ovld vstorea_half16_rte(float16, size_t, __global half *);
+
+void __ovld vstorea_half2_rtz(float2, size_t, __global half *);
+void __ovld vstorea_half3_rtz(float3, size_t, __global half *);
+void __ovld vstorea_half4_rtz(float4, size_t, __global half *);
+void __ovld vstorea_half8_rtz(float8, size_t, __global half *);
+void __ovld vstorea_half16_rtz(float16, size_t, __global half *);
+
+void __ovld vstorea_half2_rtp(float2, size_t, __global half *);
+void __ovld vstorea_half3_rtp(float3, size_t, __global half *);
+void __ovld vstorea_half4_rtp(float4, size_t, __global half *);
+void __ovld vstorea_half8_rtp(float8, size_t, __global half *);
+void __ovld vstorea_half16_rtp(float16, size_t, __global half *);
+
+void __ovld vstorea_half2_rtn(float2, size_t, __global half *);
+void __ovld vstorea_half3_rtn(float3, size_t, __global half *);
+void __ovld vstorea_half4_rtn(float4, size_t, __global half *);
+void __ovld vstorea_half8_rtn(float8, size_t, __global half *);
+void __ovld vstorea_half16_rtn(float16, size_t, __global half *);
+
+void __ovld vstorea_half2(float2, size_t, __local half *);
+void __ovld vstorea_half3(float3, size_t, __local half *);
+void __ovld vstorea_half4(float4, size_t, __local half *);
+void __ovld vstorea_half8(float8, size_t, __local half *);
+void __ovld vstorea_half16(float16, size_t, __local half *);
+
+void __ovld vstorea_half2_rte(float2, size_t, __local half *);
+void __ovld vstorea_half3_rte(float3, size_t, __local half *);
+void __ovld vstorea_half4_rte(float4, size_t, __local half *);
+void __ovld vstorea_half8_rte(float8, size_t, __local half *);
+void __ovld vstorea_half16_rte(float16, size_t, __local half *);
+
+void __ovld vstorea_half2_rtz(float2, size_t, __local half *);
+void __ovld vstorea_half3_rtz(float3, size_t, __local half *);
+void __ovld vstorea_half4_rtz(float4, size_t, __local half *);
+void __ovld vstorea_half8_rtz(float8, size_t, __local half *);
+void __ovld vstorea_half16_rtz(float16, size_t, __local half *);
+
+void __ovld vstorea_half2_rtp(float2, size_t, __local half *);
+void __ovld vstorea_half3_rtp(float3, size_t, __local half *);
+void __ovld vstorea_half4_rtp(float4, size_t, __local half *);
+void __ovld vstorea_half8_rtp(float8, size_t, __local half *);
+void __ovld vstorea_half16_rtp(float16, size_t, __local half *);
+
+void __ovld vstorea_half2_rtn(float2, size_t, __local half *);
+void __ovld vstorea_half3_rtn(float3, size_t, __local half *);
+void __ovld vstorea_half4_rtn(float4, size_t, __local half *);
+void __ovld vstorea_half8_rtn(float8, size_t, __local half *);
+void __ovld vstorea_half16_rtn(float16, size_t, __local half *);
+
+void __ovld vstorea_half2(float2, size_t, __private half *);
+void __ovld vstorea_half3(float3, size_t, __private half *);
+void __ovld vstorea_half4(float4, size_t, __private half *);
+void __ovld vstorea_half8(float8, size_t, __private half *);
+void __ovld vstorea_half16(float16, size_t, __private half *);
+
+void __ovld vstorea_half2_rte(float2, size_t, __private half *);
+void __ovld vstorea_half3_rte(float3, size_t, __private half *);
+void __ovld vstorea_half4_rte(float4, size_t, __private half *);
+void __ovld vstorea_half8_rte(float8, size_t, __private half *);
+void __ovld vstorea_half16_rte(float16, size_t, __private half *);
+
+void __ovld vstorea_half2_rtz(float2, size_t, __private half *);
+void __ovld vstorea_half3_rtz(float3, size_t, __private half *);
+void __ovld vstorea_half4_rtz(float4, size_t, __private half *);
+void __ovld vstorea_half8_rtz(float8, size_t, __private half *);
+void __ovld vstorea_half16_rtz(float16, size_t, __private half *);
+
+void __ovld vstorea_half2_rtp(float2, size_t, __private half *);
+void __ovld vstorea_half3_rtp(float3, size_t, __private half *);
+void __ovld vstorea_half4_rtp(float4, size_t, __private half *);
+void __ovld vstorea_half8_rtp(float8, size_t, __private half *);
+void __ovld vstorea_half16_rtp(float16, size_t, __private half *);
+
+void __ovld vstorea_half2_rtn(float2, size_t, __private half *);
+void __ovld vstorea_half3_rtn(float3, size_t, __private half *);
+void __ovld vstorea_half4_rtn(float4, size_t, __private half *);
+void __ovld vstorea_half8_rtn(float8, size_t, __private half *);
+void __ovld vstorea_half16_rtn(float16, size_t, __private half *);
#ifdef cl_khr_fp64
-void __ovld vstorea_half(double data, size_t offset, __global half *p);
-void __ovld vstorea_half2(double2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3(double3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4(double4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8(double8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16(double16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half_rte(double data, size_t offset, __global half *p);
-void __ovld vstorea_half2_rte(double2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3_rte(double3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4_rte(double4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8_rte(double8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16_rte(double16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half_rtz(double data, size_t offset, __global half *p);
-void __ovld vstorea_half2_rtz(double2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3_rtz(double3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4_rtz(double4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8_rtz(double8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16_rtz(double16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half_rtp(double data, size_t offset, __global half *p);
-void __ovld vstorea_half2_rtp(double2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3_rtp(double3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4_rtp(double4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8_rtp(double8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16_rtp(double16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half_rtn(double data, size_t offset, __global half *p);
-void __ovld vstorea_half2_rtn(double2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3_rtn(double3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4_rtn(double4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8_rtn(double8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16_rtn(double16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half(double data, size_t offset, __local half *p);
-void __ovld vstorea_half2(double2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3(double3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4(double4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8(double8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16(double16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half_rte(double data, size_t offset, __local half *p);
-void __ovld vstorea_half2_rte(double2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3_rte(double3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4_rte(double4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8_rte(double8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16_rte(double16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half_rtz(double data, size_t offset, __local half *p);
-void __ovld vstorea_half2_rtz(double2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3_rtz(double3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4_rtz(double4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8_rtz(double8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16_rtz(double16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half_rtp(double data, size_t offset, __local half *p);
-void __ovld vstorea_half2_rtp(double2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3_rtp(double3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4_rtp(double4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8_rtp(double8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16_rtp(double16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half_rtn(double data, size_t offset, __local half *p);
-void __ovld vstorea_half2_rtn(double2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3_rtn(double3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4_rtn(double4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8_rtn(double8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16_rtn(double16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half(double data, size_t offset, __private half *p);
-void __ovld vstorea_half2(double2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3(double3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4(double4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8(double8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16(double16 data, size_t offset, __private half *p);
-
-void __ovld vstorea_half_rte(double data, size_t offset, __private half *p);
-void __ovld vstorea_half2_rte(double2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3_rte(double3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4_rte(double4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8_rte(double8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16_rte(double16 data, size_t offset, __private half *p);
-
-void __ovld vstorea_half_rtz(double data, size_t offset, __private half *p);
-void __ovld vstorea_half2_rtz(double2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3_rtz(double3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4_rtz(double4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8_rtz(double8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16_rtz(double16 data, size_t offset, __private half *p);
-
-void __ovld vstorea_half_rtp(double data, size_t offset, __private half *p);
-void __ovld vstorea_half2_rtp(double2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3_rtp(double3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4_rtp(double4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8_rtp(double8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16_rtp(double16 data, size_t offset, __private half *p);
-
-void __ovld vstorea_half_rtn(double data, size_t offset, __private half *p);
-void __ovld vstorea_half2_rtn(double2 data,size_t offset, __private half *p);
-void __ovld vstorea_half3_rtn(double3 data,size_t offset, __private half *p);
-void __ovld vstorea_half4_rtn(double4 data,size_t offset, __private half *p);
-void __ovld vstorea_half8_rtn(double8 data,size_t offset, __private half *p);
-void __ovld vstorea_half16_rtn(double16 data,size_t offset, __private half *p);
+void __ovld vstorea_half2(double2, size_t, __global half *);
+void __ovld vstorea_half3(double3, size_t, __global half *);
+void __ovld vstorea_half4(double4, size_t, __global half *);
+void __ovld vstorea_half8(double8, size_t, __global half *);
+void __ovld vstorea_half16(double16, size_t, __global half *);
+
+void __ovld vstorea_half2_rte(double2, size_t, __global half *);
+void __ovld vstorea_half3_rte(double3, size_t, __global half *);
+void __ovld vstorea_half4_rte(double4, size_t, __global half *);
+void __ovld vstorea_half8_rte(double8, size_t, __global half *);
+void __ovld vstorea_half16_rte(double16, size_t, __global half *);
+
+void __ovld vstorea_half2_rtz(double2, size_t, __global half *);
+void __ovld vstorea_half3_rtz(double3, size_t, __global half *);
+void __ovld vstorea_half4_rtz(double4, size_t, __global half *);
+void __ovld vstorea_half8_rtz(double8, size_t, __global half *);
+void __ovld vstorea_half16_rtz(double16, size_t, __global half *);
+
+void __ovld vstorea_half2_rtp(double2, size_t, __global half *);
+void __ovld vstorea_half3_rtp(double3, size_t, __global half *);
+void __ovld vstorea_half4_rtp(double4, size_t, __global half *);
+void __ovld vstorea_half8_rtp(double8, size_t, __global half *);
+void __ovld vstorea_half16_rtp(double16, size_t, __global half *);
+
+void __ovld vstorea_half2_rtn(double2, size_t, __global half *);
+void __ovld vstorea_half3_rtn(double3, size_t, __global half *);
+void __ovld vstorea_half4_rtn(double4, size_t, __global half *);
+void __ovld vstorea_half8_rtn(double8, size_t, __global half *);
+void __ovld vstorea_half16_rtn(double16, size_t, __global half *);
+
+void __ovld vstorea_half2(double2, size_t, __local half *);
+void __ovld vstorea_half3(double3, size_t, __local half *);
+void __ovld vstorea_half4(double4, size_t, __local half *);
+void __ovld vstorea_half8(double8, size_t, __local half *);
+void __ovld vstorea_half16(double16, size_t, __local half *);
+
+void __ovld vstorea_half2_rte(double2, size_t, __local half *);
+void __ovld vstorea_half3_rte(double3, size_t, __local half *);
+void __ovld vstorea_half4_rte(double4, size_t, __local half *);
+void __ovld vstorea_half8_rte(double8, size_t, __local half *);
+void __ovld vstorea_half16_rte(double16, size_t, __local half *);
+
+void __ovld vstorea_half2_rtz(double2, size_t, __local half *);
+void __ovld vstorea_half3_rtz(double3, size_t, __local half *);
+void __ovld vstorea_half4_rtz(double4, size_t, __local half *);
+void __ovld vstorea_half8_rtz(double8, size_t, __local half *);
+void __ovld vstorea_half16_rtz(double16, size_t, __local half *);
+
+void __ovld vstorea_half2_rtp(double2, size_t, __local half *);
+void __ovld vstorea_half3_rtp(double3, size_t, __local half *);
+void __ovld vstorea_half4_rtp(double4, size_t, __local half *);
+void __ovld vstorea_half8_rtp(double8, size_t, __local half *);
+void __ovld vstorea_half16_rtp(double16, size_t, __local half *);
+
+void __ovld vstorea_half2_rtn(double2, size_t, __local half *);
+void __ovld vstorea_half3_rtn(double3, size_t, __local half *);
+void __ovld vstorea_half4_rtn(double4, size_t, __local half *);
+void __ovld vstorea_half8_rtn(double8, size_t, __local half *);
+void __ovld vstorea_half16_rtn(double16, size_t, __local half *);
+
+void __ovld vstorea_half2(double2, size_t, __private half *);
+void __ovld vstorea_half3(double3, size_t, __private half *);
+void __ovld vstorea_half4(double4, size_t, __private half *);
+void __ovld vstorea_half8(double8, size_t, __private half *);
+void __ovld vstorea_half16(double16, size_t, __private half *);
+
+void __ovld vstorea_half2_rte(double2, size_t, __private half *);
+void __ovld vstorea_half3_rte(double3, size_t, __private half *);
+void __ovld vstorea_half4_rte(double4, size_t, __private half *);
+void __ovld vstorea_half8_rte(double8, size_t, __private half *);
+void __ovld vstorea_half16_rte(double16, size_t, __private half *);
+
+void __ovld vstorea_half2_rtz(double2, size_t, __private half *);
+void __ovld vstorea_half3_rtz(double3, size_t, __private half *);
+void __ovld vstorea_half4_rtz(double4, size_t, __private half *);
+void __ovld vstorea_half8_rtz(double8, size_t, __private half *);
+void __ovld vstorea_half16_rtz(double16, size_t, __private half *);
+
+void __ovld vstorea_half2_rtp(double2, size_t, __private half *);
+void __ovld vstorea_half3_rtp(double3, size_t, __private half *);
+void __ovld vstorea_half4_rtp(double4, size_t, __private half *);
+void __ovld vstorea_half8_rtp(double8, size_t, __private half *);
+void __ovld vstorea_half16_rtp(double16, size_t, __private half *);
+
+void __ovld vstorea_half2_rtn(double2, size_t, __private half *);
+void __ovld vstorea_half3_rtn(double3, size_t, __private half *);
+void __ovld vstorea_half4_rtn(double4, size_t, __private half *);
+void __ovld vstorea_half8_rtn(double8, size_t, __private half *);
+void __ovld vstorea_half16_rtn(double16, size_t, __private half *);
#endif //cl_khr_fp64
-#endif //defined(__opencl_c_generic_address_space)
+#endif //defined(__opencl_c_named_address_space_builtins)
// OpenCL v1.1 s6.11.8, v1.2 s6.12.8, v2.0 s6.13.8 - Synchronization Functions
@@ -12439,11 +12396,11 @@ void __ovld vstorea_half16_rtn(double16 data,size_t offset, __private half *p);
* image objects and then want to read the updated data.
*/
-void __ovld __conv barrier(cl_mem_fence_flags flags);
+void __ovld __conv barrier(cl_mem_fence_flags);
#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-void __ovld __conv work_group_barrier(cl_mem_fence_flags flags, memory_scope scope);
-void __ovld __conv work_group_barrier(cl_mem_fence_flags flags);
+void __ovld __conv work_group_barrier(cl_mem_fence_flags, memory_scope);
+void __ovld __conv work_group_barrier(cl_mem_fence_flags);
#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
// OpenCL v1.1 s6.11.9, v1.2 s6.12.9 - Explicit Memory Fence Functions
@@ -12461,7 +12418,7 @@ void __ovld __conv work_group_barrier(cl_mem_fence_flags flags);
* CLK_LOCAL_MEM_FENCE
* CLK_GLOBAL_MEM_FENCE.
*/
-void __ovld mem_fence(cl_mem_fence_flags flags);
+void __ovld mem_fence(cl_mem_fence_flags);
/**
* Read memory barrier that orders only
@@ -12473,7 +12430,7 @@ void __ovld mem_fence(cl_mem_fence_flags flags);
* CLK_LOCAL_MEM_FENCE
* CLK_GLOBAL_MEM_FENCE.
*/
-void __ovld read_mem_fence(cl_mem_fence_flags flags);
+void __ovld read_mem_fence(cl_mem_fence_flags);
/**
* Write memory barrier that orders only
@@ -12485,7 +12442,7 @@ void __ovld read_mem_fence(cl_mem_fence_flags flags);
* CLK_LOCAL_MEM_FENCE
* CLK_GLOBAL_MEM_FENCE.
*/
-void __ovld write_mem_fence(cl_mem_fence_flags flags);
+void __ovld write_mem_fence(cl_mem_fence_flags);
// OpenCL v2.0 s6.13.9 - Address Space Qualifier Functions
@@ -12531,141 +12488,141 @@ cl_mem_fence_flags __ovld get_fence(void *ptr);
* synchronization of source data such as using a
* barrier before performing the copy.
*/
-event_t __ovld async_work_group_copy(__local char *dst, const __global char *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uchar *dst, const __global uchar *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local short *dst, const __global short *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ushort *dst, const __global ushort *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local int *dst, const __global int *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uint *dst, const __global uint *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local long *dst, const __global long *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ulong *dst, const __global ulong *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local float *dst, const __global float *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local char2 *dst, const __global char2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uchar2 *dst, const __global uchar2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local short2 *dst, const __global short2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ushort2 *dst, const __global ushort2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local int2 *dst, const __global int2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uint2 *dst, const __global uint2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local long2 *dst, const __global long2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ulong2 *dst, const __global ulong2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local float2 *dst, const __global float2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local char3 *dst, const __global char3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uchar3 *dst, const __global uchar3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local short3 *dst, const __global short3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ushort3 *dst, const __global ushort3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local int3 *dst, const __global int3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uint3 *dst, const __global uint3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local long3 *dst, const __global long3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ulong3 *dst, const __global ulong3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local float3 *dst, const __global float3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local char4 *dst, const __global char4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uchar4 *dst, const __global uchar4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local short4 *dst, const __global short4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ushort4 *dst, const __global ushort4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local int4 *dst, const __global int4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uint4 *dst, const __global uint4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local long4 *dst, const __global long4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ulong4 *dst, const __global ulong4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local float4 *dst, const __global float4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local char8 *dst, const __global char8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uchar8 *dst, const __global uchar8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local short8 *dst, const __global short8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ushort8 *dst, const __global ushort8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local int8 *dst, const __global int8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uint8 *dst, const __global uint8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local long8 *dst, const __global long8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ulong8 *dst, const __global ulong8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local float8 *dst, const __global float8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local char16 *dst, const __global char16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uchar16 *dst, const __global uchar16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local short16 *dst, const __global short16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ushort16 *dst, const __global ushort16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local int16 *dst, const __global int16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uint16 *dst, const __global uint16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local long16 *dst, const __global long16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ulong16 *dst, const __global ulong16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local float16 *dst, const __global float16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global char *dst, const __local char *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uchar *dst, const __local uchar *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global short *dst, const __local short *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ushort *dst, const __local ushort *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global int *dst, const __local int *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uint *dst, const __local uint *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global long *dst, const __local long *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ulong *dst, const __local ulong *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global float *dst, const __local float *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global char2 *dst, const __local char2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uchar2 *dst, const __local uchar2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global short2 *dst, const __local short2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ushort2 *dst, const __local ushort2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global int2 *dst, const __local int2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uint2 *dst, const __local uint2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global long2 *dst, const __local long2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ulong2 *dst, const __local ulong2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global float2 *dst, const __local float2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global char3 *dst, const __local char3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uchar3 *dst, const __local uchar3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global short3 *dst, const __local short3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ushort3 *dst, const __local ushort3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global int3 *dst, const __local int3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uint3 *dst, const __local uint3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global long3 *dst, const __local long3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ulong3 *dst, const __local ulong3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global float3 *dst, const __local float3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global char4 *dst, const __local char4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uchar4 *dst, const __local uchar4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global short4 *dst, const __local short4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ushort4 *dst, const __local ushort4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global int4 *dst, const __local int4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uint4 *dst, const __local uint4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global long4 *dst, const __local long4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ulong4 *dst, const __local ulong4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global float4 *dst, const __local float4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global char8 *dst, const __local char8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uchar8 *dst, const __local uchar8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global short8 *dst, const __local short8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ushort8 *dst, const __local ushort8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global int8 *dst, const __local int8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uint8 *dst, const __local uint8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global long8 *dst, const __local long8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ulong8 *dst, const __local ulong8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global float8 *dst, const __local float8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global char16 *dst, const __local char16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uchar16 *dst, const __local uchar16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global short16 *dst, const __local short16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ushort16 *dst, const __local ushort16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global int16 *dst, const __local int16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uint16 *dst, const __local uint16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global long16 *dst, const __local long16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ulong16 *dst, const __local ulong16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global float16 *dst, const __local float16 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local char *, const __global char *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local uchar *, const __global uchar *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local short *, const __global short *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local ushort *, const __global ushort *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local int *, const __global int *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local uint *, const __global uint *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local long *, const __global long *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local ulong *, const __global ulong *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local float *, const __global float *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local char2 *, const __global char2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local uchar2 *, const __global uchar2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local short2 *, const __global short2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local ushort2 *, const __global ushort2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local int2 *, const __global int2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local uint2 *, const __global uint2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local long2 *, const __global long2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local ulong2 *, const __global ulong2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local float2 *, const __global float2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local char3 *, const __global char3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local uchar3 *, const __global uchar3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local short3 *, const __global short3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local ushort3 *, const __global ushort3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local int3 *, const __global int3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local uint3 *, const __global uint3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local long3 *, const __global long3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local ulong3 *, const __global ulong3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local float3 *, const __global float3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local char4 *, const __global char4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local uchar4 *, const __global uchar4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local short4 *, const __global short4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local ushort4 *, const __global ushort4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local int4 *, const __global int4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local uint4 *, const __global uint4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local long4 *, const __global long4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local ulong4 *, const __global ulong4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local float4 *, const __global float4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local char8 *, const __global char8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local uchar8 *, const __global uchar8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local short8 *, const __global short8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local ushort8 *, const __global ushort8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local int8 *, const __global int8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local uint8 *, const __global uint8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local long8 *, const __global long8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local ulong8 *, const __global ulong8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local float8 *, const __global float8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local char16 *, const __global char16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local uchar16 *, const __global uchar16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local short16 *, const __global short16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local ushort16 *, const __global ushort16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local int16 *, const __global int16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local uint16 *, const __global uint16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local long16 *, const __global long16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local ulong16 *, const __global ulong16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local float16 *, const __global float16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global char *, const __local char *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global uchar *, const __local uchar *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global short *, const __local short *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global ushort *, const __local ushort *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global int *, const __local int *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global uint *, const __local uint *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global long *, const __local long *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global ulong *, const __local ulong *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global float *, const __local float *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global char2 *, const __local char2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global uchar2 *, const __local uchar2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global short2 *, const __local short2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global ushort2 *, const __local ushort2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global int2 *, const __local int2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global uint2 *, const __local uint2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global long2 *, const __local long2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global ulong2 *, const __local ulong2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global float2 *, const __local float2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global char3 *, const __local char3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global uchar3 *, const __local uchar3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global short3 *, const __local short3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global ushort3 *, const __local ushort3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global int3 *, const __local int3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global uint3 *, const __local uint3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global long3 *, const __local long3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global ulong3 *, const __local ulong3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global float3 *, const __local float3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global char4 *, const __local char4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global uchar4 *, const __local uchar4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global short4 *, const __local short4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global ushort4 *, const __local ushort4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global int4 *, const __local int4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global uint4 *, const __local uint4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global long4 *, const __local long4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global ulong4 *, const __local ulong4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global float4 *, const __local float4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global char8 *, const __local char8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global uchar8 *, const __local uchar8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global short8 *, const __local short8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global ushort8 *, const __local ushort8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global int8 *, const __local int8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global uint8 *, const __local uint8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global long8 *, const __local long8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global ulong8 *, const __local ulong8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global float8 *, const __local float8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global char16 *, const __local char16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global uchar16 *, const __local uchar16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global short16 *, const __local short16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global ushort16 *, const __local ushort16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global int16 *, const __local int16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global uint16 *, const __local uint16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global long16 *, const __local long16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global ulong16 *, const __local ulong16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global float16 *, const __local float16 *, size_t, event_t);
#ifdef cl_khr_fp64
-event_t __ovld async_work_group_copy(__local double *dst, const __global double *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local double2 *dst, const __global double2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local double3 *dst, const __global double3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local double4 *dst, const __global double4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local double8 *dst, const __global double8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local double16 *dst, const __global double16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global double *dst, const __local double *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global double2 *dst, const __local double2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global double3 *dst, const __local double3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global double4 *dst, const __local double4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global double8 *dst, const __local double8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global double16 *dst, const __local double16 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local double *, const __global double *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local double2 *, const __global double2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local double3 *, const __global double3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local double4 *, const __global double4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local double8 *, const __global double8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local double16 *, const __global double16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global double *, const __local double *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global double2 *, const __local double2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global double3 *, const __local double3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global double4 *, const __local double4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global double8 *, const __local double8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global double16 *, const __local double16 *, size_t, event_t);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-event_t __ovld async_work_group_copy(__local half *dst, const __global half *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local half2 *dst, const __global half2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local half3 *dst, const __global half3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local half4 *dst, const __global half4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local half8 *dst, const __global half8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local half16 *dst, const __global half16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global half *dst, const __local half *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global half2 *dst, const __local half2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global half3 *dst, const __local half3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global half4 *dst, const __local half4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global half8 *dst, const __local half8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global half16 *dst, const __local half16 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local half *, const __global half *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local half2 *, const __global half2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local half3 *, const __global half3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local half4 *, const __global half4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local half8 *, const __global half8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local half16 *, const __global half16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global half *, const __local half *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global half2 *, const __local half2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global half3 *, const __local half3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global half4 *, const __local half4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global half8 *, const __local half8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global half16 *, const __local half16 *, size_t, event_t);
#endif //cl_khr_fp16
/**
@@ -12694,141 +12651,141 @@ event_t __ovld async_work_group_copy(__global half16 *dst, const __local half16
* synchronization of source data such as using a
* barrier before performing the copy.
*/
-event_t __ovld async_work_group_strided_copy(__local char *dst, const __global char *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uchar *dst, const __global uchar *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local short *dst, const __global short *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ushort *dst, const __global ushort *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local int *dst, const __global int *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uint *dst, const __global uint *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local long *dst, const __global long *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ulong *dst, const __global ulong *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local float *dst, const __global float *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local char2 *dst, const __global char2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uchar2 *dst, const __global uchar2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local short2 *dst, const __global short2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ushort2 *dst, const __global ushort2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local int2 *dst, const __global int2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uint2 *dst, const __global uint2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local long2 *dst, const __global long2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ulong2 *dst, const __global ulong2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local float2 *dst, const __global float2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local char3 *dst, const __global char3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uchar3 *dst, const __global uchar3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local short3 *dst, const __global short3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ushort3 *dst, const __global ushort3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local int3 *dst, const __global int3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uint3 *dst, const __global uint3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local long3 *dst, const __global long3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ulong3 *dst, const __global ulong3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local float3 *dst, const __global float3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local char4 *dst, const __global char4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uchar4 *dst, const __global uchar4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local short4 *dst, const __global short4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ushort4 *dst, const __global ushort4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local int4 *dst, const __global int4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uint4 *dst, const __global uint4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local long4 *dst, const __global long4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ulong4 *dst, const __global ulong4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local float4 *dst, const __global float4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local char8 *dst, const __global char8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uchar8 *dst, const __global uchar8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local short8 *dst, const __global short8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ushort8 *dst, const __global ushort8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local int8 *dst, const __global int8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uint8 *dst, const __global uint8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local long8 *dst, const __global long8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ulong8 *dst, const __global ulong8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local float8 *dst, const __global float8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local char16 *dst, const __global char16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uchar16 *dst, const __global uchar16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local short16 *dst, const __global short16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ushort16 *dst, const __global ushort16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local int16 *dst, const __global int16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uint16 *dst, const __global uint16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local long16 *dst, const __global long16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ulong16 *dst, const __global ulong16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local float16 *dst, const __global float16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global char *dst, const __local char *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uchar *dst, const __local uchar *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global short *dst, const __local short *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ushort *dst, const __local ushort *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global int *dst, const __local int *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uint *dst, const __local uint *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global long *dst, const __local long *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ulong *dst, const __local ulong *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global float *dst, const __local float *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global char2 *dst, const __local char2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uchar2 *dst, const __local uchar2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global short2 *dst, const __local short2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ushort2 *dst, const __local ushort2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global int2 *dst, const __local int2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uint2 *dst, const __local uint2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global long2 *dst, const __local long2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ulong2 *dst, const __local ulong2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global float2 *dst, const __local float2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global char3 *dst, const __local char3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uchar3 *dst, const __local uchar3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global short3 *dst, const __local short3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ushort3 *dst, const __local ushort3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global int3 *dst, const __local int3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uint3 *dst, const __local uint3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global long3 *dst, const __local long3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ulong3 *dst, const __local ulong3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global float3 *dst, const __local float3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global char4 *dst, const __local char4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uchar4 *dst, const __local uchar4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global short4 *dst, const __local short4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ushort4 *dst, const __local ushort4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global int4 *dst, const __local int4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uint4 *dst, const __local uint4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global long4 *dst, const __local long4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ulong4 *dst, const __local ulong4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global float4 *dst, const __local float4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global char8 *dst, const __local char8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uchar8 *dst, const __local uchar8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global short8 *dst, const __local short8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ushort8 *dst, const __local ushort8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global int8 *dst, const __local int8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uint8 *dst, const __local uint8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global long8 *dst, const __local long8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ulong8 *dst, const __local ulong8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global float8 *dst, const __local float8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global char16 *dst, const __local char16 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uchar16 *dst, const __local uchar16 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global short16 *dst, const __local short16 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ushort16 *dst, const __local ushort16 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global int16 *dst, const __local int16 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uint16 *dst, const __local uint16 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global long16 *dst, const __local long16 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ulong16 *dst, const __local ulong16 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global float16 *dst, const __local float16 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local char *, const __global char *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local uchar *, const __global uchar *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local short *, const __global short *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local ushort *, const __global ushort *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local int *, const __global int *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local uint *, const __global uint *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local long *, const __global long *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local ulong *, const __global ulong *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local float *, const __global float *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local char2 *, const __global char2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local uchar2 *, const __global uchar2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local short2 *, const __global short2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local ushort2 *, const __global ushort2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local int2 *, const __global int2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local uint2 *, const __global uint2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local long2 *, const __global long2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local ulong2 *, const __global ulong2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local float2 *, const __global float2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local char3 *, const __global char3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local uchar3 *, const __global uchar3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local short3 *, const __global short3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local ushort3 *, const __global ushort3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local int3 *, const __global int3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local uint3 *, const __global uint3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local long3 *, const __global long3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local ulong3 *, const __global ulong3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local float3 *, const __global float3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local char4 *, const __global char4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local uchar4 *, const __global uchar4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local short4 *, const __global short4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local ushort4 *, const __global ushort4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local int4 *, const __global int4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local uint4 *, const __global uint4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local long4 *, const __global long4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local ulong4 *, const __global ulong4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local float4 *, const __global float4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local char8 *, const __global char8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local uchar8 *, const __global uchar8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local short8 *, const __global short8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local ushort8 *, const __global ushort8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local int8 *, const __global int8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local uint8 *, const __global uint8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local long8 *, const __global long8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local ulong8 *, const __global ulong8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local float8 *, const __global float8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local char16 *, const __global char16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local uchar16 *, const __global uchar16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local short16 *, const __global short16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local ushort16 *, const __global ushort16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local int16 *, const __global int16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local uint16 *, const __global uint16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local long16 *, const __global long16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local ulong16 *, const __global ulong16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local float16 *, const __global float16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global char *, const __local char *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global uchar *, const __local uchar *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global short *, const __local short *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global ushort *, const __local ushort *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global int *, const __local int *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global uint *, const __local uint *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global long *, const __local long *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global ulong *, const __local ulong *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global float *, const __local float *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global char2 *, const __local char2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global uchar2 *, const __local uchar2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global short2 *, const __local short2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global ushort2 *, const __local ushort2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global int2 *, const __local int2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global uint2 *, const __local uint2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global long2 *, const __local long2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global ulong2 *, const __local ulong2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global float2 *, const __local float2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global char3 *, const __local char3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global uchar3 *, const __local uchar3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global short3 *, const __local short3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global ushort3 *, const __local ushort3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global int3 *, const __local int3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global uint3 *, const __local uint3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global long3 *, const __local long3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global ulong3 *, const __local ulong3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global float3 *, const __local float3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global char4 *, const __local char4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global uchar4 *, const __local uchar4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global short4 *, const __local short4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global ushort4 *, const __local ushort4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global int4 *, const __local int4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global uint4 *, const __local uint4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global long4 *, const __local long4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global ulong4 *, const __local ulong4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global float4 *, const __local float4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global char8 *, const __local char8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global uchar8 *, const __local uchar8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global short8 *, const __local short8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global ushort8 *, const __local ushort8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global int8 *, const __local int8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global uint8 *, const __local uint8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global long8 *, const __local long8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global ulong8 *, const __local ulong8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global float8 *, const __local float8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global char16 *, const __local char16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global uchar16 *, const __local uchar16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global short16 *, const __local short16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global ushort16 *, const __local ushort16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global int16 *, const __local int16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global uint16 *, const __local uint16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global long16 *, const __local long16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global ulong16 *, const __local ulong16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global float16 *, const __local float16 *, size_t, size_t, event_t);
#ifdef cl_khr_fp64
-event_t __ovld async_work_group_strided_copy(__local double *dst, const __global double *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local double2 *dst, const __global double2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local double3 *dst, const __global double3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local double4 *dst, const __global double4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local double8 *dst, const __global double8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local double16 *dst, const __global double16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global double *dst, const __local double *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global double2 *dst, const __local double2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global double3 *dst, const __local double3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global double4 *dst, const __local double4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global double8 *dst, const __local double8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global double16 *dst, const __local double16 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local double *, const __global double *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local double2 *, const __global double2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local double3 *, const __global double3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local double4 *, const __global double4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local double8 *, const __global double8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local double16 *, const __global double16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global double *, const __local double *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global double2 *, const __local double2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global double3 *, const __local double3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global double4 *, const __local double4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global double8 *, const __local double8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global double16 *, const __local double16 *, size_t, size_t, event_t);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-event_t __ovld async_work_group_strided_copy(__local half *dst, const __global half *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local half2 *dst, const __global half2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local half3 *dst, const __global half3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local half4 *dst, const __global half4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local half8 *dst, const __global half8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local half16 *dst, const __global half16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global half *dst, const __local half *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global half2 *dst, const __local half2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global half3 *dst, const __local half3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global half4 *dst, const __local half4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global half8 *dst, const __local half8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global half16 *dst, const __local half16 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local half *, const __global half *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local half2 *, const __global half2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local half3 *, const __global half3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local half4 *, const __global half4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local half8 *, const __global half8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local half16 *, const __global half16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global half *, const __local half *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global half2 *, const __local half2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global half3 *, const __local half3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global half4 *, const __local half4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global half8 *, const __local half8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global half16 *, const __local half16 *, size_t, size_t, event_t);
#endif //cl_khr_fp16
/**
@@ -12842,7 +12799,7 @@ event_t __ovld async_work_group_strided_copy(__global half16 *dst, const __local
* the same num_events and event objects specified
* in event_list; otherwise the results are undefined.
*/
-void __ovld wait_group_events(int num_events, event_t *event_list);
+void __ovld wait_group_events(int, event_t *);
/**
* Prefetch num_elements * sizeof(gentype)
@@ -12851,75 +12808,75 @@ void __ovld wait_group_events(int num_events, event_t *event_list);
* and does not affect the functional
* behavior of the kernel.
*/
-void __ovld prefetch(const __global char *p, size_t num_elements);
-void __ovld prefetch(const __global uchar *p, size_t num_elements);
-void __ovld prefetch(const __global short *p, size_t num_elements);
-void __ovld prefetch(const __global ushort *p, size_t num_elements);
-void __ovld prefetch(const __global int *p, size_t num_elements);
-void __ovld prefetch(const __global uint *p, size_t num_elements);
-void __ovld prefetch(const __global long *p, size_t num_elements);
-void __ovld prefetch(const __global ulong *p, size_t num_elements);
-void __ovld prefetch(const __global float *p, size_t num_elements);
-void __ovld prefetch(const __global char2 *p, size_t num_elements);
-void __ovld prefetch(const __global uchar2 *p, size_t num_elements);
-void __ovld prefetch(const __global short2 *p, size_t num_elements);
-void __ovld prefetch(const __global ushort2 *p, size_t num_elements);
-void __ovld prefetch(const __global int2 *p, size_t num_elements);
-void __ovld prefetch(const __global uint2 *p, size_t num_elements);
-void __ovld prefetch(const __global long2 *p, size_t num_elements);
-void __ovld prefetch(const __global ulong2 *p, size_t num_elements);
-void __ovld prefetch(const __global float2 *p, size_t num_elements);
-void __ovld prefetch(const __global char3 *p, size_t num_elements);
-void __ovld prefetch(const __global uchar3 *p, size_t num_elements);
-void __ovld prefetch(const __global short3 *p, size_t num_elements);
-void __ovld prefetch(const __global ushort3 *p, size_t num_elements);
-void __ovld prefetch(const __global int3 *p, size_t num_elements);
-void __ovld prefetch(const __global uint3 *p, size_t num_elements);
-void __ovld prefetch(const __global long3 *p, size_t num_elements);
-void __ovld prefetch(const __global ulong3 *p, size_t num_elements);
-void __ovld prefetch(const __global float3 *p, size_t num_elements);
-void __ovld prefetch(const __global char4 *p, size_t num_elements);
-void __ovld prefetch(const __global uchar4 *p, size_t num_elements);
-void __ovld prefetch(const __global short4 *p, size_t num_elements);
-void __ovld prefetch(const __global ushort4 *p, size_t num_elements);
-void __ovld prefetch(const __global int4 *p, size_t num_elements);
-void __ovld prefetch(const __global uint4 *p, size_t num_elements);
-void __ovld prefetch(const __global long4 *p, size_t num_elements);
-void __ovld prefetch(const __global ulong4 *p, size_t num_elements);
-void __ovld prefetch(const __global float4 *p, size_t num_elements);
-void __ovld prefetch(const __global char8 *p, size_t num_elements);
-void __ovld prefetch(const __global uchar8 *p, size_t num_elements);
-void __ovld prefetch(const __global short8 *p, size_t num_elements);
-void __ovld prefetch(const __global ushort8 *p, size_t num_elements);
-void __ovld prefetch(const __global int8 *p, size_t num_elements);
-void __ovld prefetch(const __global uint8 *p, size_t num_elements);
-void __ovld prefetch(const __global long8 *p, size_t num_elements);
-void __ovld prefetch(const __global ulong8 *p, size_t num_elements);
-void __ovld prefetch(const __global float8 *p, size_t num_elements);
-void __ovld prefetch(const __global char16 *p, size_t num_elements);
-void __ovld prefetch(const __global uchar16 *p, size_t num_elements);
-void __ovld prefetch(const __global short16 *p, size_t num_elements);
-void __ovld prefetch(const __global ushort16 *p, size_t num_elements);
-void __ovld prefetch(const __global int16 *p, size_t num_elements);
-void __ovld prefetch(const __global uint16 *p, size_t num_elements);
-void __ovld prefetch(const __global long16 *p, size_t num_elements);
-void __ovld prefetch(const __global ulong16 *p, size_t num_elements);
-void __ovld prefetch(const __global float16 *p, size_t num_elements);
+void __ovld prefetch(const __global char *, size_t);
+void __ovld prefetch(const __global uchar *, size_t);
+void __ovld prefetch(const __global short *, size_t);
+void __ovld prefetch(const __global ushort *, size_t);
+void __ovld prefetch(const __global int *, size_t);
+void __ovld prefetch(const __global uint *, size_t);
+void __ovld prefetch(const __global long *, size_t);
+void __ovld prefetch(const __global ulong *, size_t);
+void __ovld prefetch(const __global float *, size_t);
+void __ovld prefetch(const __global char2 *, size_t);
+void __ovld prefetch(const __global uchar2 *, size_t);
+void __ovld prefetch(const __global short2 *, size_t);
+void __ovld prefetch(const __global ushort2 *, size_t);
+void __ovld prefetch(const __global int2 *, size_t);
+void __ovld prefetch(const __global uint2 *, size_t);
+void __ovld prefetch(const __global long2 *, size_t);
+void __ovld prefetch(const __global ulong2 *, size_t);
+void __ovld prefetch(const __global float2 *, size_t);
+void __ovld prefetch(const __global char3 *, size_t);
+void __ovld prefetch(const __global uchar3 *, size_t);
+void __ovld prefetch(const __global short3 *, size_t);
+void __ovld prefetch(const __global ushort3 *, size_t);
+void __ovld prefetch(const __global int3 *, size_t);
+void __ovld prefetch(const __global uint3 *, size_t);
+void __ovld prefetch(const __global long3 *, size_t);
+void __ovld prefetch(const __global ulong3 *, size_t);
+void __ovld prefetch(const __global float3 *, size_t);
+void __ovld prefetch(const __global char4 *, size_t);
+void __ovld prefetch(const __global uchar4 *, size_t);
+void __ovld prefetch(const __global short4 *, size_t);
+void __ovld prefetch(const __global ushort4 *, size_t);
+void __ovld prefetch(const __global int4 *, size_t);
+void __ovld prefetch(const __global uint4 *, size_t);
+void __ovld prefetch(const __global long4 *, size_t);
+void __ovld prefetch(const __global ulong4 *, size_t);
+void __ovld prefetch(const __global float4 *, size_t);
+void __ovld prefetch(const __global char8 *, size_t);
+void __ovld prefetch(const __global uchar8 *, size_t);
+void __ovld prefetch(const __global short8 *, size_t);
+void __ovld prefetch(const __global ushort8 *, size_t);
+void __ovld prefetch(const __global int8 *, size_t);
+void __ovld prefetch(const __global uint8 *, size_t);
+void __ovld prefetch(const __global long8 *, size_t);
+void __ovld prefetch(const __global ulong8 *, size_t);
+void __ovld prefetch(const __global float8 *, size_t);
+void __ovld prefetch(const __global char16 *, size_t);
+void __ovld prefetch(const __global uchar16 *, size_t);
+void __ovld prefetch(const __global short16 *, size_t);
+void __ovld prefetch(const __global ushort16 *, size_t);
+void __ovld prefetch(const __global int16 *, size_t);
+void __ovld prefetch(const __global uint16 *, size_t);
+void __ovld prefetch(const __global long16 *, size_t);
+void __ovld prefetch(const __global ulong16 *, size_t);
+void __ovld prefetch(const __global float16 *, size_t);
#ifdef cl_khr_fp64
-void __ovld prefetch(const __global double *p, size_t num_elements);
-void __ovld prefetch(const __global double2 *p, size_t num_elements);
-void __ovld prefetch(const __global double3 *p, size_t num_elements);
-void __ovld prefetch(const __global double4 *p, size_t num_elements);
-void __ovld prefetch(const __global double8 *p, size_t num_elements);
-void __ovld prefetch(const __global double16 *p, size_t num_elements);
+void __ovld prefetch(const __global double *, size_t);
+void __ovld prefetch(const __global double2 *, size_t);
+void __ovld prefetch(const __global double3 *, size_t);
+void __ovld prefetch(const __global double4 *, size_t);
+void __ovld prefetch(const __global double8 *, size_t);
+void __ovld prefetch(const __global double16 *, size_t);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-void __ovld prefetch(const __global half *p, size_t num_elements);
-void __ovld prefetch(const __global half2 *p, size_t num_elements);
-void __ovld prefetch(const __global half3 *p, size_t num_elements);
-void __ovld prefetch(const __global half4 *p, size_t num_elements);
-void __ovld prefetch(const __global half8 *p, size_t num_elements);
-void __ovld prefetch(const __global half16 *p, size_t num_elements);
+void __ovld prefetch(const __global half *, size_t);
+void __ovld prefetch(const __global half2 *, size_t);
+void __ovld prefetch(const __global half3 *, size_t);
+void __ovld prefetch(const __global half4 *, size_t);
+void __ovld prefetch(const __global half8 *, size_t);
+void __ovld prefetch(const __global half16 *, size_t);
#endif // cl_khr_fp16
// OpenCL v1.1 s6.11.1, v1.2 s6.12.11 - Atomic Functions
@@ -12934,29 +12891,29 @@ void __ovld prefetch(const __global half16 *p, size_t num_elements);
* (old + val) and store result at location
* pointed by p. The function returns old.
*/
-int __ovld atomic_add(volatile __global int *p, int val);
-unsigned int __ovld atomic_add(volatile __global unsigned int *p, unsigned int val);
-int __ovld atomic_add(volatile __local int *p, int val);
-unsigned int __ovld atomic_add(volatile __local unsigned int *p, unsigned int val);
+int __ovld atomic_add(volatile __global int *, int);
+uint __ovld atomic_add(volatile __global uint *, uint);
+int __ovld atomic_add(volatile __local int *, int);
+uint __ovld atomic_add(volatile __local uint *, uint);
#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_add(volatile int *p, int val);
-unsigned int __ovld atomic_add(volatile unsigned int *p, unsigned int val);
+int __ovld atomic_add(volatile int *, int);
+uint __ovld atomic_add(volatile uint *, uint);
#endif
#if defined(cl_khr_global_int32_base_atomics)
-int __ovld atom_add(volatile __global int *p, int val);
-unsigned int __ovld atom_add(volatile __global unsigned int *p, unsigned int val);
+int __ovld atom_add(volatile __global int *, int);
+uint __ovld atom_add(volatile __global uint *, uint);
#endif
#if defined(cl_khr_local_int32_base_atomics)
-int __ovld atom_add(volatile __local int *p, int val);
-unsigned int __ovld atom_add(volatile __local unsigned int *p, unsigned int val);
+int __ovld atom_add(volatile __local int *, int);
+uint __ovld atom_add(volatile __local uint *, uint);
#endif
#if defined(cl_khr_int64_base_atomics)
-long __ovld atom_add(volatile __global long *p, long val);
-unsigned long __ovld atom_add(volatile __global unsigned long *p, unsigned long val);
-long __ovld atom_add(volatile __local long *p, long val);
-unsigned long __ovld atom_add(volatile __local unsigned long *p, unsigned long val);
+long __ovld atom_add(volatile __global long *, long);
+ulong __ovld atom_add(volatile __global ulong *, ulong);
+long __ovld atom_add(volatile __local long *, long);
+ulong __ovld atom_add(volatile __local ulong *, ulong);
#endif
/**
@@ -12964,29 +12921,29 @@ unsigned long __ovld atom_add(volatile __local unsigned long *p, unsigned long v
* Compute (old - val) and store result at location pointed by p. The function
* returns old.
*/
-int __ovld atomic_sub(volatile __global int *p, int val);
-unsigned int __ovld atomic_sub(volatile __global unsigned int *p, unsigned int val);
-int __ovld atomic_sub(volatile __local int *p, int val);
-unsigned int __ovld atomic_sub(volatile __local unsigned int *p, unsigned int val);
+int __ovld atomic_sub(volatile __global int *, int);
+uint __ovld atomic_sub(volatile __global uint *, uint);
+int __ovld atomic_sub(volatile __local int *, int);
+uint __ovld atomic_sub(volatile __local uint *, uint);
#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_sub(volatile int *p, int val);
-unsigned int __ovld atomic_sub(volatile unsigned int *p, unsigned int val);
+int __ovld atomic_sub(volatile int *, int);
+uint __ovld atomic_sub(volatile uint *, uint);
#endif
#if defined(cl_khr_global_int32_base_atomics)
-int __ovld atom_sub(volatile __global int *p, int val);
-unsigned int __ovld atom_sub(volatile __global unsigned int *p, unsigned int val);
+int __ovld atom_sub(volatile __global int *, int);
+uint __ovld atom_sub(volatile __global uint *, uint);
#endif
#if defined(cl_khr_local_int32_base_atomics)
-int __ovld atom_sub(volatile __local int *p, int val);
-unsigned int __ovld atom_sub(volatile __local unsigned int *p, unsigned int val);
+int __ovld atom_sub(volatile __local int *, int);
+uint __ovld atom_sub(volatile __local uint *, uint);
#endif
#if defined(cl_khr_int64_base_atomics)
-long __ovld atom_sub(volatile __global long *p, long val);
-unsigned long __ovld atom_sub(volatile __global unsigned long *p, unsigned long val);
-long __ovld atom_sub(volatile __local long *p, long val);
-unsigned long __ovld atom_sub(volatile __local unsigned long *p, unsigned long val);
+long __ovld atom_sub(volatile __global long *, long);
+ulong __ovld atom_sub(volatile __global ulong *, ulong);
+long __ovld atom_sub(volatile __local long *, long);
+ulong __ovld atom_sub(volatile __local ulong *, ulong);
#endif
/**
@@ -12994,32 +12951,32 @@ unsigned long __ovld atom_sub(volatile __local unsigned long *p, unsigned long v
* with new value given by val. Returns old
* value.
*/
-int __ovld atomic_xchg(volatile __global int *p, int val);
-unsigned int __ovld atomic_xchg(volatile __global unsigned int *p, unsigned int val);
-int __ovld atomic_xchg(volatile __local int *p, int val);
-unsigned int __ovld atomic_xchg(volatile __local unsigned int *p, unsigned int val);
-float __ovld atomic_xchg(volatile __global float *p, float val);
-float __ovld atomic_xchg(volatile __local float *p, float val);
+int __ovld atomic_xchg(volatile __global int *, int);
+uint __ovld atomic_xchg(volatile __global uint *, uint);
+int __ovld atomic_xchg(volatile __local int *, int);
+uint __ovld atomic_xchg(volatile __local uint *, uint);
+float __ovld atomic_xchg(volatile __global float *, float);
+float __ovld atomic_xchg(volatile __local float *, float);
#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_xchg(volatile int *p, int val);
-unsigned int __ovld atomic_xchg(volatile unsigned int *p, unsigned int val);
-float __ovld atomic_xchg(volatile float *p, float val);
+int __ovld atomic_xchg(volatile int *, int);
+uint __ovld atomic_xchg(volatile uint *, uint);
+float __ovld atomic_xchg(volatile float *, float);
#endif
#if defined(cl_khr_global_int32_base_atomics)
-int __ovld atom_xchg(volatile __global int *p, int val);
-unsigned int __ovld atom_xchg(volatile __global unsigned int *p, unsigned int val);
+int __ovld atom_xchg(volatile __global int *, int);
+uint __ovld atom_xchg(volatile __global uint *, uint);
#endif
#if defined(cl_khr_local_int32_base_atomics)
-int __ovld atom_xchg(volatile __local int *p, int val);
-unsigned int __ovld atom_xchg(volatile __local unsigned int *p, unsigned int val);
+int __ovld atom_xchg(volatile __local int *, int);
+uint __ovld atom_xchg(volatile __local uint *, uint);
#endif
#if defined(cl_khr_int64_base_atomics)
-long __ovld atom_xchg(volatile __global long *p, long val);
-long __ovld atom_xchg(volatile __local long *p, long val);
-unsigned long __ovld atom_xchg(volatile __global unsigned long *p, unsigned long val);
-unsigned long __ovld atom_xchg(volatile __local unsigned long *p, unsigned long val);
+long __ovld atom_xchg(volatile __global long *, long);
+long __ovld atom_xchg(volatile __local long *, long);
+ulong __ovld atom_xchg(volatile __global ulong *, ulong);
+ulong __ovld atom_xchg(volatile __local ulong *, ulong);
#endif
/**
@@ -13028,29 +12985,29 @@ unsigned long __ovld atom_xchg(volatile __local unsigned long *p, unsigned long
* (old + 1) and store result at location
* pointed by p. The function returns old.
*/
-int __ovld atomic_inc(volatile __global int *p);
-unsigned int __ovld atomic_inc(volatile __global unsigned int *p);
-int __ovld atomic_inc(volatile __local int *p);
-unsigned int __ovld atomic_inc(volatile __local unsigned int *p);
+int __ovld atomic_inc(volatile __global int *);
+uint __ovld atomic_inc(volatile __global uint *);
+int __ovld atomic_inc(volatile __local int *);
+uint __ovld atomic_inc(volatile __local uint *);
#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_inc(volatile int *p);
-unsigned int __ovld atomic_inc(volatile unsigned int *p);
+int __ovld atomic_inc(volatile int *);
+uint __ovld atomic_inc(volatile uint *);
#endif
#if defined(cl_khr_global_int32_base_atomics)
-int __ovld atom_inc(volatile __global int *p);
-unsigned int __ovld atom_inc(volatile __global unsigned int *p);
+int __ovld atom_inc(volatile __global int *);
+uint __ovld atom_inc(volatile __global uint *);
#endif
#if defined(cl_khr_local_int32_base_atomics)
-int __ovld atom_inc(volatile __local int *p);
-unsigned int __ovld atom_inc(volatile __local unsigned int *p);
+int __ovld atom_inc(volatile __local int *);
+uint __ovld atom_inc(volatile __local uint *);
#endif
#if defined(cl_khr_int64_base_atomics)
-long __ovld atom_inc(volatile __global long *p);
-unsigned long __ovld atom_inc(volatile __global unsigned long *p);
-long __ovld atom_inc(volatile __local long *p);
-unsigned long __ovld atom_inc(volatile __local unsigned long *p);
+long __ovld atom_inc(volatile __global long *);
+ulong __ovld atom_inc(volatile __global ulong *);
+long __ovld atom_inc(volatile __local long *);
+ulong __ovld atom_inc(volatile __local ulong *);
#endif
/**
@@ -13059,29 +13016,29 @@ unsigned long __ovld atom_inc(volatile __local unsigned long *p);
* (old - 1) and store result at location
* pointed by p. The function returns old.
*/
-int __ovld atomic_dec(volatile __global int *p);
-unsigned int __ovld atomic_dec(volatile __global unsigned int *p);
-int __ovld atomic_dec(volatile __local int *p);
-unsigned int __ovld atomic_dec(volatile __local unsigned int *p);
+int __ovld atomic_dec(volatile __global int *);
+uint __ovld atomic_dec(volatile __global uint *);
+int __ovld atomic_dec(volatile __local int *);
+uint __ovld atomic_dec(volatile __local uint *);
#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_dec(volatile int *p);
-unsigned int __ovld atomic_dec(volatile unsigned int *p);
+int __ovld atomic_dec(volatile int *);
+uint __ovld atomic_dec(volatile uint *);
#endif
#if defined(cl_khr_global_int32_base_atomics)
-int __ovld atom_dec(volatile __global int *p);
-unsigned int __ovld atom_dec(volatile __global unsigned int *p);
+int __ovld atom_dec(volatile __global int *);
+uint __ovld atom_dec(volatile __global uint *);
#endif
#if defined(cl_khr_local_int32_base_atomics)
-int __ovld atom_dec(volatile __local int *p);
-unsigned int __ovld atom_dec(volatile __local unsigned int *p);
+int __ovld atom_dec(volatile __local int *);
+uint __ovld atom_dec(volatile __local uint *);
#endif
#if defined(cl_khr_int64_base_atomics)
-long __ovld atom_dec(volatile __global long *p);
-unsigned long __ovld atom_dec(volatile __global unsigned long *p);
-long __ovld atom_dec(volatile __local long *p);
-unsigned long __ovld atom_dec(volatile __local unsigned long *p);
+long __ovld atom_dec(volatile __global long *);
+ulong __ovld atom_dec(volatile __global ulong *);
+long __ovld atom_dec(volatile __local long *);
+ulong __ovld atom_dec(volatile __local ulong *);
#endif
/**
@@ -13091,29 +13048,29 @@ unsigned long __ovld atom_dec(volatile __local unsigned long *p);
* location pointed by p. The function
* returns old.
*/
-int __ovld atomic_cmpxchg(volatile __global int *p, int cmp, int val);
-unsigned int __ovld atomic_cmpxchg(volatile __global unsigned int *p, unsigned int cmp, unsigned int val);
-int __ovld atomic_cmpxchg(volatile __local int *p, int cmp, int val);
-unsigned int __ovld atomic_cmpxchg(volatile __local unsigned int *p, unsigned int cmp, unsigned int val);
+int __ovld atomic_cmpxchg(volatile __global int *, int, int);
+uint __ovld atomic_cmpxchg(volatile __global uint *, uint, uint);
+int __ovld atomic_cmpxchg(volatile __local int *, int, int);
+uint __ovld atomic_cmpxchg(volatile __local uint *, uint, uint);
#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_cmpxchg(volatile int *p, int cmp, int val);
-unsigned int __ovld atomic_cmpxchg(volatile unsigned int *p, unsigned int cmp, unsigned int val);
+int __ovld atomic_cmpxchg(volatile int *, int, int);
+uint __ovld atomic_cmpxchg(volatile uint *, uint, uint);
#endif
#if defined(cl_khr_global_int32_base_atomics)
-int __ovld atom_cmpxchg(volatile __global int *p, int cmp, int val);
-unsigned int __ovld atom_cmpxchg(volatile __global unsigned int *p, unsigned int cmp, unsigned int val);
+int __ovld atom_cmpxchg(volatile __global int *, int, int);
+uint __ovld atom_cmpxchg(volatile __global uint *, uint, uint);
#endif
#if defined(cl_khr_local_int32_base_atomics)
-int __ovld atom_cmpxchg(volatile __local int *p, int cmp, int val);
-unsigned int __ovld atom_cmpxchg(volatile __local unsigned int *p, unsigned int cmp, unsigned int val);
+int __ovld atom_cmpxchg(volatile __local int *, int, int);
+uint __ovld atom_cmpxchg(volatile __local uint *, uint, uint);
#endif
#if defined(cl_khr_int64_base_atomics)
-long __ovld atom_cmpxchg(volatile __global long *p, long cmp, long val);
-unsigned long __ovld atom_cmpxchg(volatile __global unsigned long *p, unsigned long cmp, unsigned long val);
-long __ovld atom_cmpxchg(volatile __local long *p, long cmp, long val);
-unsigned long __ovld atom_cmpxchg(volatile __local unsigned long *p, unsigned long cmp, unsigned long val);
+long __ovld atom_cmpxchg(volatile __global long *, long, long);
+ulong __ovld atom_cmpxchg(volatile __global ulong *, ulong, ulong);
+long __ovld atom_cmpxchg(volatile __local long *, long, long);
+ulong __ovld atom_cmpxchg(volatile __local ulong *, ulong, ulong);
#endif
/**
@@ -13123,29 +13080,29 @@ unsigned long __ovld atom_cmpxchg(volatile __local unsigned long *p, unsigned lo
* location pointed by p. The function
* returns old.
*/
-int __ovld atomic_min(volatile __global int *p, int val);
-unsigned int __ovld atomic_min(volatile __global unsigned int *p, unsigned int val);
-int __ovld atomic_min(volatile __local int *p, int val);
-unsigned int __ovld atomic_min(volatile __local unsigned int *p, unsigned int val);
+int __ovld atomic_min(volatile __global int *, int);
+uint __ovld atomic_min(volatile __global uint *, uint);
+int __ovld atomic_min(volatile __local int *, int);
+uint __ovld atomic_min(volatile __local uint *, uint);
#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_min(volatile int *p, int val);
-unsigned int __ovld atomic_min(volatile unsigned int *p, unsigned int val);
+int __ovld atomic_min(volatile int *, int);
+uint __ovld atomic_min(volatile uint *, uint);
#endif
#if defined(cl_khr_global_int32_extended_atomics)
-int __ovld atom_min(volatile __global int *p, int val);
-unsigned int __ovld atom_min(volatile __global unsigned int *p, unsigned int val);
+int __ovld atom_min(volatile __global int *, int);
+uint __ovld atom_min(volatile __global uint *, uint);
#endif
#if defined(cl_khr_local_int32_extended_atomics)
-int __ovld atom_min(volatile __local int *p, int val);
-unsigned int __ovld atom_min(volatile __local unsigned int *p, unsigned int val);
+int __ovld atom_min(volatile __local int *, int);
+uint __ovld atom_min(volatile __local uint *, uint);
#endif
#if defined(cl_khr_int64_extended_atomics)
-long __ovld atom_min(volatile __global long *p, long val);
-unsigned long __ovld atom_min(volatile __global unsigned long *p, unsigned long val);
-long __ovld atom_min(volatile __local long *p, long val);
-unsigned long __ovld atom_min(volatile __local unsigned long *p, unsigned long val);
+long __ovld atom_min(volatile __global long *, long);
+ulong __ovld atom_min(volatile __global ulong *, ulong);
+long __ovld atom_min(volatile __local long *, long);
+ulong __ovld atom_min(volatile __local ulong *, ulong);
#endif
/**
@@ -13155,29 +13112,29 @@ unsigned long __ovld atom_min(volatile __local unsigned long *p, unsigned long v
* location pointed by p. The function
* returns old.
*/
-int __ovld atomic_max(volatile __global int *p, int val);
-unsigned int __ovld atomic_max(volatile __global unsigned int *p, unsigned int val);
-int __ovld atomic_max(volatile __local int *p, int val);
-unsigned int __ovld atomic_max(volatile __local unsigned int *p, unsigned int val);
+int __ovld atomic_max(volatile __global int *, int);
+uint __ovld atomic_max(volatile __global uint *, uint);
+int __ovld atomic_max(volatile __local int *, int);
+uint __ovld atomic_max(volatile __local uint *, uint);
#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_max(volatile int *p, int val);
-unsigned int __ovld atomic_max(volatile unsigned int *p, unsigned int val);
+int __ovld atomic_max(volatile int *, int);
+uint __ovld atomic_max(volatile uint *, uint);
#endif
#if defined(cl_khr_global_int32_extended_atomics)
-int __ovld atom_max(volatile __global int *p, int val);
-unsigned int __ovld atom_max(volatile __global unsigned int *p, unsigned int val);
+int __ovld atom_max(volatile __global int *, int);
+uint __ovld atom_max(volatile __global uint *, uint);
#endif
#if defined(cl_khr_local_int32_extended_atomics)
-int __ovld atom_max(volatile __local int *p, int val);
-unsigned int __ovld atom_max(volatile __local unsigned int *p, unsigned int val);
+int __ovld atom_max(volatile __local int *, int);
+uint __ovld atom_max(volatile __local uint *, uint);
#endif
#if defined(cl_khr_int64_extended_atomics)
-long __ovld atom_max(volatile __global long *p, long val);
-unsigned long __ovld atom_max(volatile __global unsigned long *p, unsigned long val);
-long __ovld atom_max(volatile __local long *p, long val);
-unsigned long __ovld atom_max(volatile __local unsigned long *p, unsigned long val);
+long __ovld atom_max(volatile __global long *, long);
+ulong __ovld atom_max(volatile __global ulong *, ulong);
+long __ovld atom_max(volatile __local long *, long);
+ulong __ovld atom_max(volatile __local ulong *, ulong);
#endif
/**
@@ -13186,29 +13143,29 @@ unsigned long __ovld atom_max(volatile __local unsigned long *p, unsigned long v
* (old & val) and store result at location
* pointed by p. The function returns old.
*/
-int __ovld atomic_and(volatile __global int *p, int val);
-unsigned int __ovld atomic_and(volatile __global unsigned int *p, unsigned int val);
-int __ovld atomic_and(volatile __local int *p, int val);
-unsigned int __ovld atomic_and(volatile __local unsigned int *p, unsigned int val);
+int __ovld atomic_and(volatile __global int *, int);
+uint __ovld atomic_and(volatile __global uint *, uint);
+int __ovld atomic_and(volatile __local int *, int);
+uint __ovld atomic_and(volatile __local uint *, uint);
#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_and(volatile int *p, int val);
-unsigned int __ovld atomic_and(volatile unsigned int *p, unsigned int val);
+int __ovld atomic_and(volatile int *, int);
+uint __ovld atomic_and(volatile uint *, uint);
#endif
#if defined(cl_khr_global_int32_extended_atomics)
-int __ovld atom_and(volatile __global int *p, int val);
-unsigned int __ovld atom_and(volatile __global unsigned int *p, unsigned int val);
+int __ovld atom_and(volatile __global int *, int);
+uint __ovld atom_and(volatile __global uint *, uint);
#endif
#if defined(cl_khr_local_int32_extended_atomics)
-int __ovld atom_and(volatile __local int *p, int val);
-unsigned int __ovld atom_and(volatile __local unsigned int *p, unsigned int val);
+int __ovld atom_and(volatile __local int *, int);
+uint __ovld atom_and(volatile __local uint *, uint);
#endif
#if defined(cl_khr_int64_extended_atomics)
-long __ovld atom_and(volatile __global long *p, long val);
-unsigned long __ovld atom_and(volatile __global unsigned long *p, unsigned long val);
-long __ovld atom_and(volatile __local long *p, long val);
-unsigned long __ovld atom_and(volatile __local unsigned long *p, unsigned long val);
+long __ovld atom_and(volatile __global long *, long);
+ulong __ovld atom_and(volatile __global ulong *, ulong);
+long __ovld atom_and(volatile __local long *, long);
+ulong __ovld atom_and(volatile __local ulong *, ulong);
#endif
/**
@@ -13217,29 +13174,29 @@ unsigned long __ovld atom_and(volatile __local unsigned long *p, unsigned long v
* (old | val) and store result at location
* pointed by p. The function returns old.
*/
-int __ovld atomic_or(volatile __global int *p, int val);
-unsigned int __ovld atomic_or(volatile __global unsigned int *p, unsigned int val);
-int __ovld atomic_or(volatile __local int *p, int val);
-unsigned int __ovld atomic_or(volatile __local unsigned int *p, unsigned int val);
+int __ovld atomic_or(volatile __global int *, int);
+uint __ovld atomic_or(volatile __global uint *, uint);
+int __ovld atomic_or(volatile __local int *, int);
+uint __ovld atomic_or(volatile __local uint *, uint);
#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_or(volatile int *p, int val);
-unsigned int __ovld atomic_or(volatile unsigned int *p, unsigned int val);
+int __ovld atomic_or(volatile int *, int);
+uint __ovld atomic_or(volatile uint *, uint);
#endif
#if defined(cl_khr_global_int32_extended_atomics)
-int __ovld atom_or(volatile __global int *p, int val);
-unsigned int __ovld atom_or(volatile __global unsigned int *p, unsigned int val);
+int __ovld atom_or(volatile __global int *, int);
+uint __ovld atom_or(volatile __global uint *, uint);
#endif
#if defined(cl_khr_local_int32_extended_atomics)
-int __ovld atom_or(volatile __local int *p, int val);
-unsigned int __ovld atom_or(volatile __local unsigned int *p, unsigned int val);
+int __ovld atom_or(volatile __local int *, int);
+uint __ovld atom_or(volatile __local uint *, uint);
#endif
#if defined(cl_khr_int64_extended_atomics)
-long __ovld atom_or(volatile __global long *p, long val);
-unsigned long __ovld atom_or(volatile __global unsigned long *p, unsigned long val);
-long __ovld atom_or(volatile __local long *p, long val);
-unsigned long __ovld atom_or(volatile __local unsigned long *p, unsigned long val);
+long __ovld atom_or(volatile __global long *, long);
+ulong __ovld atom_or(volatile __global ulong *, ulong);
+long __ovld atom_or(volatile __local long *, long);
+ulong __ovld atom_or(volatile __local ulong *, ulong);
#endif
/**
@@ -13248,29 +13205,29 @@ unsigned long __ovld atom_or(volatile __local unsigned long *p, unsigned long va
* (old ^ val) and store result at location
* pointed by p. The function returns old.
*/
-int __ovld atomic_xor(volatile __global int *p, int val);
-unsigned int __ovld atomic_xor(volatile __global unsigned int *p, unsigned int val);
-int __ovld atomic_xor(volatile __local int *p, int val);
-unsigned int __ovld atomic_xor(volatile __local unsigned int *p, unsigned int val);
+int __ovld atomic_xor(volatile __global int *, int);
+uint __ovld atomic_xor(volatile __global uint *, uint);
+int __ovld atomic_xor(volatile __local int *, int);
+uint __ovld atomic_xor(volatile __local uint *, uint);
#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_xor(volatile int *p, int val);
-unsigned int __ovld atomic_xor(volatile unsigned int *p, unsigned int val);
+int __ovld atomic_xor(volatile int *, int);
+uint __ovld atomic_xor(volatile uint *, uint);
#endif
#if defined(cl_khr_global_int32_extended_atomics)
-int __ovld atom_xor(volatile __global int *p, int val);
-unsigned int __ovld atom_xor(volatile __global unsigned int *p, unsigned int val);
+int __ovld atom_xor(volatile __global int *, int);
+uint __ovld atom_xor(volatile __global uint *, uint);
#endif
#if defined(cl_khr_local_int32_extended_atomics)
-int __ovld atom_xor(volatile __local int *p, int val);
-unsigned int __ovld atom_xor(volatile __local unsigned int *p, unsigned int val);
+int __ovld atom_xor(volatile __local int *, int);
+uint __ovld atom_xor(volatile __local uint *, uint);
#endif
#if defined(cl_khr_int64_extended_atomics)
-long __ovld atom_xor(volatile __global long *p, long val);
-unsigned long __ovld atom_xor(volatile __global unsigned long *p, unsigned long val);
-long __ovld atom_xor(volatile __local long *p, long val);
-unsigned long __ovld atom_xor(volatile __local unsigned long *p, unsigned long val);
+long __ovld atom_xor(volatile __global long *, long);
+ulong __ovld atom_xor(volatile __global ulong *, ulong);
+long __ovld atom_xor(volatile __local long *, long);
+ulong __ovld atom_xor(volatile __local ulong *, ulong);
#endif
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
@@ -13289,339 +13246,1342 @@ unsigned long __ovld atom_xor(volatile __local unsigned long *p, unsigned long v
#endif
// atomic_init()
-void __ovld atomic_init(volatile atomic_int *object, int value);
-void __ovld atomic_init(volatile atomic_uint *object, uint value);
-void __ovld atomic_init(volatile atomic_float *object, float value);
+#if defined(__opencl_c_generic_address_space)
+void __ovld atomic_init(volatile atomic_int *, int);
+void __ovld atomic_init(volatile atomic_uint *, uint);
+void __ovld atomic_init(volatile atomic_float *, float);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-void __ovld atomic_init(volatile atomic_long *object, long value);
-void __ovld atomic_init(volatile atomic_ulong *object, ulong value);
+void __ovld atomic_init(volatile atomic_long *, long);
+void __ovld atomic_init(volatile atomic_ulong *, ulong);
#ifdef cl_khr_fp64
-void __ovld atomic_init(volatile atomic_double *object, double value);
+void __ovld atomic_init(volatile atomic_double *, double);
#endif //cl_khr_fp64
#endif
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+void __ovld atomic_init(volatile __global atomic_int *, int);
+void __ovld atomic_init(volatile __local atomic_int *, int);
+void __ovld atomic_init(volatile __global atomic_uint *, uint);
+void __ovld atomic_init(volatile __local atomic_uint *, uint);
+void __ovld atomic_init(volatile __global atomic_float *, float);
+void __ovld atomic_init(volatile __local atomic_float *, float);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+void __ovld atomic_init(volatile __global atomic_long *, long);
+void __ovld atomic_init(volatile __local atomic_long *, long);
+void __ovld atomic_init(volatile __global atomic_ulong *, ulong);
+void __ovld atomic_init(volatile __local atomic_ulong *, ulong);
+#ifdef cl_khr_fp64
+void __ovld atomic_init(volatile __global atomic_double *, double);
+void __ovld atomic_init(volatile __local atomic_double *, double);
+#endif //cl_khr_fp64
+#endif
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
// atomic_work_item_fence()
-void __ovld atomic_work_item_fence(cl_mem_fence_flags flags, memory_order order, memory_scope scope);
+void __ovld atomic_work_item_fence(cl_mem_fence_flags, memory_order, memory_scope);
// atomic_fetch()
// OpenCL v2.0 s6.13.11.7.5:
// add/sub: atomic type argument can be uintptr_t/intptr_t, value type argument can be ptrdiff_t.
#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)
-int __ovld atomic_fetch_add(volatile atomic_int *object, int operand);
-uint __ovld atomic_fetch_add(volatile atomic_uint *object, uint operand);
-int __ovld atomic_fetch_sub(volatile atomic_int *object, int operand);
-uint __ovld atomic_fetch_sub(volatile atomic_uint *object, uint operand);
-int __ovld atomic_fetch_or(volatile atomic_int *object, int operand);
-uint __ovld atomic_fetch_or(volatile atomic_uint *object, uint operand);
-int __ovld atomic_fetch_xor(volatile atomic_int *object, int operand);
-uint __ovld atomic_fetch_xor(volatile atomic_uint *object, uint operand);
-int __ovld atomic_fetch_and(volatile atomic_int *object, int operand);
-uint __ovld atomic_fetch_and(volatile atomic_uint *object, uint operand);
-int __ovld atomic_fetch_min(volatile atomic_int *object, int operand);
-uint __ovld atomic_fetch_min(volatile atomic_uint *object, uint operand);
-int __ovld atomic_fetch_max(volatile atomic_int *object, int operand);
-uint __ovld atomic_fetch_max(volatile atomic_uint *object, uint operand);
-
+#if defined(__opencl_c_generic_address_space)
+int __ovld atomic_fetch_add(volatile atomic_int *, int);
+uint __ovld atomic_fetch_add(volatile atomic_uint *, uint);
+int __ovld atomic_fetch_sub(volatile atomic_int *, int);
+uint __ovld atomic_fetch_sub(volatile atomic_uint *, uint);
+int __ovld atomic_fetch_or(volatile atomic_int *, int);
+uint __ovld atomic_fetch_or(volatile atomic_uint *, uint);
+int __ovld atomic_fetch_xor(volatile atomic_int *, int);
+uint __ovld atomic_fetch_xor(volatile atomic_uint *, uint);
+int __ovld atomic_fetch_and(volatile atomic_int *, int);
+uint __ovld atomic_fetch_and(volatile atomic_uint *, uint);
+int __ovld atomic_fetch_min(volatile atomic_int *, int);
+uint __ovld atomic_fetch_min(volatile atomic_uint *, uint);
+int __ovld atomic_fetch_max(volatile atomic_int *, int);
+uint __ovld atomic_fetch_max(volatile atomic_uint *, uint);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+long __ovld atomic_fetch_add(volatile atomic_long *, long);
+ulong __ovld atomic_fetch_add(volatile atomic_ulong *, ulong);
+long __ovld atomic_fetch_sub(volatile atomic_long *, long);
+ulong __ovld atomic_fetch_sub(volatile atomic_ulong *, ulong);
+long __ovld atomic_fetch_or(volatile atomic_long *, long);
+ulong __ovld atomic_fetch_or(volatile atomic_ulong *, ulong);
+long __ovld atomic_fetch_xor(volatile atomic_long *, long);
+ulong __ovld atomic_fetch_xor(volatile atomic_ulong *, ulong);
+long __ovld atomic_fetch_and(volatile atomic_long *, long);
+ulong __ovld atomic_fetch_and(volatile atomic_ulong *, ulong);
+long __ovld atomic_fetch_min(volatile atomic_long *, long);
+ulong __ovld atomic_fetch_min(volatile atomic_ulong *, ulong);
+long __ovld atomic_fetch_max(volatile atomic_long *, long);
+ulong __ovld atomic_fetch_max(volatile atomic_ulong *, ulong);
+uintptr_t __ovld atomic_fetch_add(volatile atomic_uintptr_t *, ptrdiff_t);
+uintptr_t __ovld atomic_fetch_sub(volatile atomic_uintptr_t *, ptrdiff_t);
+#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+int __ovld atomic_fetch_add(volatile __global atomic_int *, int);
+int __ovld atomic_fetch_add(volatile __local atomic_int *, int);
+uint __ovld atomic_fetch_add(volatile __global atomic_uint *, uint);
+uint __ovld atomic_fetch_add(volatile __local atomic_uint *, uint);
+int __ovld atomic_fetch_sub(volatile __global atomic_int *, int);
+int __ovld atomic_fetch_sub(volatile __local atomic_int *, int);
+uint __ovld atomic_fetch_sub(volatile __global atomic_uint *, uint);
+uint __ovld atomic_fetch_sub(volatile __local atomic_uint *, uint);
+int __ovld atomic_fetch_or(volatile __global atomic_int *, int);
+int __ovld atomic_fetch_or(volatile __local atomic_int *, int);
+uint __ovld atomic_fetch_or(volatile __global atomic_uint *, uint);
+uint __ovld atomic_fetch_or(volatile __local atomic_uint *, uint);
+int __ovld atomic_fetch_xor(volatile __global atomic_int *, int);
+int __ovld atomic_fetch_xor(volatile __local atomic_int *, int);
+uint __ovld atomic_fetch_xor(volatile __global atomic_uint *, uint);
+uint __ovld atomic_fetch_xor(volatile __local atomic_uint *, uint);
+int __ovld atomic_fetch_and(volatile __global atomic_int *, int);
+int __ovld atomic_fetch_and(volatile __local atomic_int *, int);
+uint __ovld atomic_fetch_and(volatile __global atomic_uint *, uint);
+uint __ovld atomic_fetch_and(volatile __local atomic_uint *, uint);
+int __ovld atomic_fetch_min(volatile __global atomic_int *, int);
+int __ovld atomic_fetch_min(volatile __local atomic_int *, int);
+uint __ovld atomic_fetch_min(volatile __global atomic_uint *, uint);
+uint __ovld atomic_fetch_min(volatile __local atomic_uint *, uint);
+int __ovld atomic_fetch_max(volatile __global atomic_int *, int);
+int __ovld atomic_fetch_max(volatile __local atomic_int *, int);
+uint __ovld atomic_fetch_max(volatile __global atomic_uint *, uint);
+uint __ovld atomic_fetch_max(volatile __local atomic_uint *, uint);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-long __ovld atomic_fetch_add(volatile atomic_long *object, long operand);
-ulong __ovld atomic_fetch_add(volatile atomic_ulong *object, ulong operand);
-long __ovld atomic_fetch_sub(volatile atomic_long *object, long operand);
-ulong __ovld atomic_fetch_sub(volatile atomic_ulong *object, ulong operand);
-long __ovld atomic_fetch_or(volatile atomic_long *object, long operand);
-ulong __ovld atomic_fetch_or(volatile atomic_ulong *object, ulong operand);
-long __ovld atomic_fetch_xor(volatile atomic_long *object, long operand);
-ulong __ovld atomic_fetch_xor(volatile atomic_ulong *object, ulong operand);
-long __ovld atomic_fetch_and(volatile atomic_long *object, long operand);
-ulong __ovld atomic_fetch_and(volatile atomic_ulong *object, ulong operand);
-long __ovld atomic_fetch_min(volatile atomic_long *object, long operand);
-ulong __ovld atomic_fetch_min(volatile atomic_ulong *object, ulong operand);
-long __ovld atomic_fetch_max(volatile atomic_long *object, long operand);
-ulong __ovld atomic_fetch_max(volatile atomic_ulong *object, ulong operand);
-uintptr_t __ovld atomic_fetch_add(volatile atomic_uintptr_t *object, ptrdiff_t operand);
-uintptr_t __ovld atomic_fetch_sub(volatile atomic_uintptr_t *object, ptrdiff_t operand);
+long __ovld atomic_fetch_add(volatile __global atomic_long *, long);
+long __ovld atomic_fetch_add(volatile __local atomic_long *, long);
+ulong __ovld atomic_fetch_add(volatile __global atomic_ulong *, ulong);
+ulong __ovld atomic_fetch_add(volatile __local atomic_ulong *, ulong);
+uintptr_t __ovld atomic_fetch_add(volatile __global atomic_uintptr_t *, ptrdiff_t);
+uintptr_t __ovld atomic_fetch_add(volatile __local atomic_uintptr_t *, ptrdiff_t);
+long __ovld atomic_fetch_sub(volatile __global atomic_long *, long);
+long __ovld atomic_fetch_sub(volatile __local atomic_long *, long);
+ulong __ovld atomic_fetch_sub(volatile __global atomic_ulong *, ulong);
+ulong __ovld atomic_fetch_sub(volatile __local atomic_ulong *, ulong);
+uintptr_t __ovld atomic_fetch_sub(volatile __global atomic_uintptr_t *, ptrdiff_t);
+uintptr_t __ovld atomic_fetch_sub(volatile __local atomic_uintptr_t *, ptrdiff_t);
+long __ovld atomic_fetch_or(volatile __global atomic_long *, long);
+long __ovld atomic_fetch_or(volatile __local atomic_long *, long);
+ulong __ovld atomic_fetch_or(volatile __global atomic_ulong *, ulong);
+ulong __ovld atomic_fetch_or(volatile __local atomic_ulong *, ulong);
+uintptr_t __ovld atomic_fetch_or(volatile __global atomic_uintptr_t *, intptr_t);
+uintptr_t __ovld atomic_fetch_or(volatile __local atomic_uintptr_t *, intptr_t);
+intptr_t __ovld atomic_fetch_or(volatile __global atomic_intptr_t *, uintptr_t);
+intptr_t __ovld atomic_fetch_or(volatile __local atomic_intptr_t *, uintptr_t);
+long __ovld atomic_fetch_xor(volatile __global atomic_long *, long);
+long __ovld atomic_fetch_xor(volatile __local atomic_long *, long);
+ulong __ovld atomic_fetch_xor(volatile __global atomic_ulong *, ulong);
+ulong __ovld atomic_fetch_xor(volatile __local atomic_ulong *, ulong);
+uintptr_t __ovld atomic_fetch_xor(volatile __global atomic_uintptr_t *, intptr_t);
+uintptr_t __ovld atomic_fetch_xor(volatile __local atomic_uintptr_t *, intptr_t);
+intptr_t __ovld atomic_fetch_xor(volatile __global atomic_intptr_t *, uintptr_t);
+intptr_t __ovld atomic_fetch_xor(volatile __local atomic_intptr_t *, uintptr_t);
+long __ovld atomic_fetch_and(volatile __global atomic_long *, long);
+long __ovld atomic_fetch_and(volatile __local atomic_long *, long);
+ulong __ovld atomic_fetch_and(volatile __global atomic_ulong *, ulong);
+ulong __ovld atomic_fetch_and(volatile __local atomic_ulong *, ulong);
+uintptr_t __ovld atomic_fetch_and(volatile __global atomic_uintptr_t *, intptr_t);
+uintptr_t __ovld atomic_fetch_and(volatile __local atomic_uintptr_t *, intptr_t);
+intptr_t __ovld atomic_fetch_and(volatile __global atomic_intptr_t *, uintptr_t);
+intptr_t __ovld atomic_fetch_and(volatile __local atomic_intptr_t *, uintptr_t);
+long __ovld atomic_fetch_min(volatile __global atomic_long *, long);
+long __ovld atomic_fetch_min(volatile __local atomic_long *, long);
+ulong __ovld atomic_fetch_min(volatile __global atomic_ulong *, ulong);
+ulong __ovld atomic_fetch_min(volatile __local atomic_ulong *, ulong);
+uintptr_t __ovld atomic_fetch_min(volatile __global atomic_uintptr_t *, intptr_t);
+uintptr_t __ovld atomic_fetch_min(volatile __local atomic_uintptr_t *, intptr_t);
+intptr_t __ovld atomic_fetch_min(volatile __global atomic_intptr_t *, uintptr_t);
+intptr_t __ovld atomic_fetch_min(volatile __local atomic_intptr_t *, uintptr_t);
+long __ovld atomic_fetch_max(volatile __global atomic_long *, long);
+long __ovld atomic_fetch_max(volatile __local atomic_long *, long);
+ulong __ovld atomic_fetch_max(volatile __global atomic_ulong *, ulong);
+ulong __ovld atomic_fetch_max(volatile __local atomic_ulong *, ulong);
+uintptr_t __ovld atomic_fetch_max(volatile __global atomic_uintptr_t *, uintptr_t);
+uintptr_t __ovld atomic_fetch_max(volatile __local atomic_uintptr_t *, uintptr_t);
#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
#endif
#if defined(__opencl_c_atomic_scope_device)
-int __ovld atomic_fetch_add_explicit(volatile atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_add_explicit(volatile atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_sub_explicit(volatile atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_sub_explicit(volatile atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_or_explicit(volatile atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_or_explicit(volatile atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_xor_explicit(volatile atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_xor_explicit(volatile atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_and_explicit(volatile atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_and_explicit(volatile atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_min_explicit(volatile atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_max_explicit(volatile atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *object, uint operand, memory_order order);
+#if defined(__opencl_c_generic_address_space)
+int __ovld atomic_fetch_add_explicit(volatile atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_add_explicit(volatile atomic_uint *, uint, memory_order);
+int __ovld atomic_fetch_sub_explicit(volatile atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_sub_explicit(volatile atomic_uint *, uint, memory_order);
+int __ovld atomic_fetch_or_explicit(volatile atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_or_explicit(volatile atomic_uint *, uint, memory_order);
+int __ovld atomic_fetch_xor_explicit(volatile atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_xor_explicit(volatile atomic_uint *, uint, memory_order);
+int __ovld atomic_fetch_and_explicit(volatile atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_and_explicit(volatile atomic_uint *, uint, memory_order);
+int __ovld atomic_fetch_min_explicit(volatile atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *, uint, memory_order);
+int __ovld atomic_fetch_max_explicit(volatile atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *, uint, memory_order);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-long __ovld atomic_fetch_add_explicit(volatile atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_add_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
-long __ovld atomic_fetch_sub_explicit(volatile atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_sub_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
-long __ovld atomic_fetch_or_explicit(volatile atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_or_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
-long __ovld atomic_fetch_xor_explicit(volatile atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_xor_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
-long __ovld atomic_fetch_and_explicit(volatile atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_and_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
-long __ovld atomic_fetch_min_explicit(volatile atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
-long __ovld atomic_fetch_max_explicit(volatile atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
-uintptr_t __ovld atomic_fetch_add_explicit(volatile atomic_uintptr_t *object, ptrdiff_t operand, memory_order order);
-uintptr_t __ovld atomic_fetch_sub_explicit(volatile atomic_uintptr_t *object, ptrdiff_t operand, memory_order order);
+long __ovld atomic_fetch_add_explicit(volatile atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_add_explicit(volatile atomic_ulong *, ulong, memory_order);
+long __ovld atomic_fetch_sub_explicit(volatile atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_sub_explicit(volatile atomic_ulong *, ulong, memory_order);
+long __ovld atomic_fetch_or_explicit(volatile atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_or_explicit(volatile atomic_ulong *, ulong, memory_order);
+long __ovld atomic_fetch_xor_explicit(volatile atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_xor_explicit(volatile atomic_ulong *, ulong, memory_order);
+long __ovld atomic_fetch_and_explicit(volatile atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_and_explicit(volatile atomic_ulong *, ulong, memory_order);
+long __ovld atomic_fetch_min_explicit(volatile atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *, ulong, memory_order);
+long __ovld atomic_fetch_max_explicit(volatile atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *, ulong, memory_order);
+uintptr_t __ovld atomic_fetch_add_explicit(volatile atomic_uintptr_t *, ptrdiff_t, memory_order);
+uintptr_t __ovld atomic_fetch_sub_explicit(volatile atomic_uintptr_t *, ptrdiff_t, memory_order);
#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+int __ovld atomic_fetch_add_explicit(volatile __global atomic_int *, int, memory_order);
+int __ovld atomic_fetch_add_explicit(volatile __local atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_add_explicit(volatile __global atomic_uint *, uint, memory_order);
+uint __ovld atomic_fetch_add_explicit(volatile __local atomic_uint *, uint, memory_order);
+int __ovld atomic_fetch_sub_explicit(volatile __global atomic_int *, int, memory_order);
+int __ovld atomic_fetch_sub_explicit(volatile __local atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_sub_explicit(volatile __global atomic_uint *, uint, memory_order);
+uint __ovld atomic_fetch_sub_explicit(volatile __local atomic_uint *, uint, memory_order);
+int __ovld atomic_fetch_or_explicit(volatile __global atomic_int *, int, memory_order);
+int __ovld atomic_fetch_or_explicit(volatile __local atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_or_explicit(volatile __global atomic_uint *, uint, memory_order);
+uint __ovld atomic_fetch_or_explicit(volatile __local atomic_uint *, uint, memory_order);
+int __ovld atomic_fetch_xor_explicit(volatile __global atomic_int *, int, memory_order);
+int __ovld atomic_fetch_xor_explicit(volatile __local atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_xor_explicit(volatile __global atomic_uint *, uint, memory_order);
+uint __ovld atomic_fetch_xor_explicit(volatile __local atomic_uint *, uint, memory_order);
+int __ovld atomic_fetch_and_explicit(volatile __global atomic_int *, int, memory_order);
+int __ovld atomic_fetch_and_explicit(volatile __local atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_and_explicit(volatile __global atomic_uint *, uint, memory_order);
+uint __ovld atomic_fetch_and_explicit(volatile __local atomic_uint *, uint, memory_order);
+int __ovld atomic_fetch_min_explicit(volatile __global atomic_int *, int, memory_order);
+int __ovld atomic_fetch_min_explicit(volatile __local atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_min_explicit(volatile __global atomic_uint *, uint, memory_order);
+uint __ovld atomic_fetch_min_explicit(volatile __local atomic_uint *, uint, memory_order);
+int __ovld atomic_fetch_max_explicit(volatile __global atomic_int *, int, memory_order);
+int __ovld atomic_fetch_max_explicit(volatile __local atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_max_explicit(volatile __global atomic_uint *, uint, memory_order);
+uint __ovld atomic_fetch_max_explicit(volatile __local atomic_uint *, uint, memory_order);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+long __ovld atomic_fetch_add_explicit(volatile __global atomic_long *, long, memory_order);
+long __ovld atomic_fetch_add_explicit(volatile __local atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_add_explicit(volatile __global atomic_ulong *, ulong, memory_order);
+ulong __ovld atomic_fetch_add_explicit(volatile __local atomic_ulong *, ulong, memory_order);
+uintptr_t __ovld atomic_fetch_add_explicit(volatile __global atomic_uintptr_t *, ptrdiff_t, memory_order);
+uintptr_t __ovld atomic_fetch_add_explicit(volatile __local atomic_uintptr_t *, ptrdiff_t, memory_order);
+long __ovld atomic_fetch_sub_explicit(volatile __global atomic_long *, long, memory_order);
+long __ovld atomic_fetch_sub_explicit(volatile __local atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_sub_explicit(volatile __global atomic_ulong *, ulong, memory_order);
+ulong __ovld atomic_fetch_sub_explicit(volatile __local atomic_ulong *, ulong, memory_order);
+uintptr_t __ovld atomic_fetch_sub_explicit(volatile __global atomic_uintptr_t *, ptrdiff_t, memory_order);
+uintptr_t __ovld atomic_fetch_sub_explicit(volatile __local atomic_uintptr_t *, ptrdiff_t, memory_order);
+long __ovld atomic_fetch_or_explicit(volatile __global atomic_long *, long, memory_order);
+long __ovld atomic_fetch_or_explicit(volatile __local atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_or_explicit(volatile __global atomic_ulong *, ulong, memory_order);
+ulong __ovld atomic_fetch_or_explicit(volatile __local atomic_ulong *, ulong, memory_order);
+uintptr_t __ovld atomic_fetch_or_explicit(volatile __global atomic_uintptr_t *, intptr_t, memory_order);
+uintptr_t __ovld atomic_fetch_or_explicit(volatile __local atomic_uintptr_t *, intptr_t, memory_order);
+intptr_t __ovld atomic_fetch_or_explicit(volatile __global atomic_intptr_t *, uintptr_t, memory_order);
+intptr_t __ovld atomic_fetch_or_explicit(volatile __local atomic_intptr_t *, uintptr_t, memory_order);
+long __ovld atomic_fetch_xor_explicit(volatile __global atomic_long *, long, memory_order);
+long __ovld atomic_fetch_xor_explicit(volatile __local atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_xor_explicit(volatile __global atomic_ulong *, ulong, memory_order);
+ulong __ovld atomic_fetch_xor_explicit(volatile __local atomic_ulong *, ulong, memory_order);
+uintptr_t __ovld atomic_fetch_xor_explicit(volatile __global atomic_uintptr_t *, intptr_t, memory_order);
+uintptr_t __ovld atomic_fetch_xor_explicit(volatile __local atomic_uintptr_t *, intptr_t, memory_order);
+intptr_t __ovld atomic_fetch_xor_explicit(volatile __global atomic_intptr_t *, uintptr_t, memory_order);
+intptr_t __ovld atomic_fetch_xor_explicit(volatile __local atomic_intptr_t *, uintptr_t, memory_order);
+long __ovld atomic_fetch_and_explicit(volatile __global atomic_long *, long, memory_order);
+long __ovld atomic_fetch_and_explicit(volatile __local atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_and_explicit(volatile __global atomic_ulong *, ulong, memory_order);
+ulong __ovld atomic_fetch_and_explicit(volatile __local atomic_ulong *, ulong, memory_order);
+uintptr_t __ovld atomic_fetch_and_explicit(volatile __global atomic_uintptr_t *, intptr_t, memory_order);
+uintptr_t __ovld atomic_fetch_and_explicit(volatile __local atomic_uintptr_t *, intptr_t, memory_order);
+intptr_t __ovld atomic_fetch_and_explicit(volatile __global atomic_intptr_t *, uintptr_t, memory_order);
+intptr_t __ovld atomic_fetch_and_explicit(volatile __local atomic_intptr_t *, uintptr_t, memory_order);
+long __ovld atomic_fetch_min_explicit(volatile __global atomic_long *, long, memory_order);
+long __ovld atomic_fetch_min_explicit(volatile __local atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_min_explicit(volatile __global atomic_ulong *, ulong, memory_order);
+ulong __ovld atomic_fetch_min_explicit(volatile __local atomic_ulong *, ulong, memory_order);
+uintptr_t __ovld atomic_fetch_min_explicit(volatile __global atomic_uintptr_t *, intptr_t, memory_order);
+uintptr_t __ovld atomic_fetch_min_explicit(volatile __local atomic_uintptr_t *, intptr_t, memory_order);
+intptr_t __ovld atomic_fetch_min_explicit(volatile __global atomic_intptr_t *, uintptr_t, memory_order);
+intptr_t __ovld atomic_fetch_min_explicit(volatile __local atomic_intptr_t *, uintptr_t, memory_order);
+long __ovld atomic_fetch_max_explicit(volatile __global atomic_long *, long, memory_order);
+long __ovld atomic_fetch_max_explicit(volatile __local atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_max_explicit(volatile __global atomic_ulong *, ulong, memory_order);
+ulong __ovld atomic_fetch_max_explicit(volatile __local atomic_ulong *, ulong, memory_order);
+uintptr_t __ovld atomic_fetch_max_explicit(volatile __global atomic_uintptr_t *, uintptr_t, memory_order);
+uintptr_t __ovld atomic_fetch_max_explicit(volatile __local atomic_uintptr_t *, uintptr_t, memory_order);
+#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
#endif
-int __ovld atomic_fetch_add_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_add_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_sub_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_sub_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_or_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_or_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_xor_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_xor_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_and_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_and_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_min_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_max_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
+#if defined(__opencl_c_generic_address_space)
+int __ovld atomic_fetch_add_explicit(volatile atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_add_explicit(volatile atomic_uint *, uint, memory_order, memory_scope);
+int __ovld atomic_fetch_sub_explicit(volatile atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_sub_explicit(volatile atomic_uint *, uint, memory_order, memory_scope);
+int __ovld atomic_fetch_or_explicit(volatile atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_or_explicit(volatile atomic_uint *, uint, memory_order, memory_scope);
+int __ovld atomic_fetch_xor_explicit(volatile atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_xor_explicit(volatile atomic_uint *, uint, memory_order, memory_scope);
+int __ovld atomic_fetch_and_explicit(volatile atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_and_explicit(volatile atomic_uint *, uint, memory_order, memory_scope);
+int __ovld atomic_fetch_min_explicit(volatile atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *, uint, memory_order, memory_scope);
+int __ovld atomic_fetch_max_explicit(volatile atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *, uint, memory_order, memory_scope);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+long __ovld atomic_fetch_add_explicit(volatile atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_fetch_add_explicit(volatile atomic_ulong *, ulong, memory_order, memory_scope);
+long __ovld atomic_fetch_sub_explicit(volatile atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_fetch_sub_explicit(volatile atomic_ulong *, ulong, memory_order, memory_scope);
+long __ovld atomic_fetch_or_explicit(volatile atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_fetch_or_explicit(volatile atomic_ulong *, ulong, memory_order, memory_scope);
+long __ovld atomic_fetch_xor_explicit(volatile atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_fetch_xor_explicit(volatile atomic_ulong *, ulong, memory_order, memory_scope);
+long __ovld atomic_fetch_and_explicit(volatile atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_fetch_and_explicit(volatile atomic_ulong *, ulong, memory_order, memory_scope);
+long __ovld atomic_fetch_min_explicit(volatile atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *, ulong, memory_order, memory_scope);
+long __ovld atomic_fetch_max_explicit(volatile atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *, ulong, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_add_explicit(volatile atomic_uintptr_t *, ptrdiff_t, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_sub_explicit(volatile atomic_uintptr_t *, ptrdiff_t, memory_order, memory_scope);
+#endif
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+int __ovld atomic_fetch_add_explicit(volatile __global atomic_int *, int, memory_order, memory_scope);
+int __ovld atomic_fetch_add_explicit(volatile __local atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_add_explicit(volatile __global atomic_uint *, uint, memory_order, memory_scope);
+uint __ovld atomic_fetch_add_explicit(volatile __local atomic_uint *, uint, memory_order, memory_scope);
+int __ovld atomic_fetch_sub_explicit(volatile __global atomic_int *, int, memory_order, memory_scope);
+int __ovld atomic_fetch_sub_explicit(volatile __local atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_sub_explicit(volatile __global atomic_uint *, uint, memory_order, memory_scope);
+uint __ovld atomic_fetch_sub_explicit(volatile __local atomic_uint *, uint, memory_order, memory_scope);
+int __ovld atomic_fetch_or_explicit(volatile __global atomic_int *, int, memory_order, memory_scope);
+int __ovld atomic_fetch_or_explicit(volatile __local atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_or_explicit(volatile __global atomic_uint *, uint, memory_order, memory_scope);
+uint __ovld atomic_fetch_or_explicit(volatile __local atomic_uint *, uint, memory_order, memory_scope);
+int __ovld atomic_fetch_xor_explicit(volatile __global atomic_int *, int, memory_order, memory_scope);
+int __ovld atomic_fetch_xor_explicit(volatile __local atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_xor_explicit(volatile __global atomic_uint *, uint, memory_order, memory_scope);
+uint __ovld atomic_fetch_xor_explicit(volatile __local atomic_uint *, uint, memory_order, memory_scope);
+int __ovld atomic_fetch_and_explicit(volatile __global atomic_int *, int, memory_order, memory_scope);
+int __ovld atomic_fetch_and_explicit(volatile __local atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_and_explicit(volatile __global atomic_uint *, uint, memory_order, memory_scope);
+uint __ovld atomic_fetch_and_explicit(volatile __local atomic_uint *, uint, memory_order, memory_scope);
+int __ovld atomic_fetch_min_explicit(volatile __global atomic_int *, int, memory_order, memory_scope);
+int __ovld atomic_fetch_min_explicit(volatile __local atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_min_explicit(volatile __global atomic_uint *, uint, memory_order, memory_scope);
+uint __ovld atomic_fetch_min_explicit(volatile __local atomic_uint *, uint, memory_order, memory_scope);
+int __ovld atomic_fetch_max_explicit(volatile __global atomic_int *, int, memory_order, memory_scope);
+int __ovld atomic_fetch_max_explicit(volatile __local atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_max_explicit(volatile __global atomic_uint *, uint, memory_order, memory_scope);
+uint __ovld atomic_fetch_max_explicit(volatile __local atomic_uint *, uint, memory_order, memory_scope);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-long __ovld atomic_fetch_add_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_add_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_sub_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_sub_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_or_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_or_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_xor_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_xor_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_and_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_and_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_min_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_max_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
+long __ovld atomic_fetch_add_explicit(volatile __global atomic_long *, long, memory_order, memory_scope);
+long __ovld atomic_fetch_add_explicit(volatile __local atomic_long *, long, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_add_explicit(volatile __global atomic_uintptr_t *, ptrdiff_t, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_add_explicit(volatile __local atomic_uintptr_t *, ptrdiff_t, memory_order, memory_scope);
+ulong __ovld atomic_fetch_add_explicit(volatile __global atomic_ulong *, ulong, memory_order, memory_scope);
+ulong __ovld atomic_fetch_add_explicit(volatile __local atomic_ulong *, ulong, memory_order, memory_scope);
+long __ovld atomic_fetch_sub_explicit(volatile __global atomic_long *, long, memory_order, memory_scope);
+long __ovld atomic_fetch_sub_explicit(volatile __local atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_fetch_sub_explicit(volatile __global atomic_ulong *, ulong, memory_order, memory_scope);
+ulong __ovld atomic_fetch_sub_explicit(volatile __local atomic_ulong *, ulong, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_sub_explicit(volatile __global atomic_uintptr_t *, ptrdiff_t, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_sub_explicit(volatile __local atomic_uintptr_t *, ptrdiff_t, memory_order, memory_scope);
+long __ovld atomic_fetch_or_explicit(volatile __global atomic_long *, long, memory_order, memory_scope);
+long __ovld atomic_fetch_or_explicit(volatile __local atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_fetch_or_explicit(volatile __global atomic_ulong *, ulong, memory_order, memory_scope);
+ulong __ovld atomic_fetch_or_explicit(volatile __local atomic_ulong *, ulong, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_or_explicit(volatile __global atomic_uintptr_t *, intptr_t, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_or_explicit(volatile __local atomic_uintptr_t *, intptr_t, memory_order, memory_scope);
+intptr_t __ovld atomic_fetch_or_explicit(volatile __global atomic_intptr_t *, uintptr_t, memory_order, memory_scope);
+intptr_t __ovld atomic_fetch_or_explicit(volatile __local atomic_intptr_t *, uintptr_t, memory_order, memory_scope);
+long __ovld atomic_fetch_xor_explicit(volatile __global atomic_long *, long, memory_order, memory_scope);
+long __ovld atomic_fetch_xor_explicit(volatile __local atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_fetch_xor_explicit(volatile __global atomic_ulong *, ulong, memory_order, memory_scope);
+ulong __ovld atomic_fetch_xor_explicit(volatile __local atomic_ulong *, ulong, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_xor_explicit(volatile __global atomic_uintptr_t *, intptr_t, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_xor_explicit(volatile __local atomic_uintptr_t *, intptr_t, memory_order, memory_scope);
+intptr_t __ovld atomic_fetch_xor_explicit(volatile __global atomic_intptr_t *, uintptr_t, memory_order, memory_scope);
+intptr_t __ovld atomic_fetch_xor_explicit(volatile __local atomic_intptr_t *, uintptr_t, memory_order, memory_scope);
+long __ovld atomic_fetch_and_explicit(volatile __global atomic_long *, long, memory_order, memory_scope);
+long __ovld atomic_fetch_and_explicit(volatile __local atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_fetch_and_explicit(volatile __global atomic_ulong *, ulong, memory_order, memory_scope);
+ulong __ovld atomic_fetch_and_explicit(volatile __local atomic_ulong *, ulong, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_and_explicit(volatile __global atomic_uintptr_t *, intptr_t, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_and_explicit(volatile __local atomic_uintptr_t *, intptr_t, memory_order, memory_scope);
+intptr_t __ovld atomic_fetch_and_explicit(volatile __global atomic_intptr_t *, uintptr_t, memory_order, memory_scope);
+intptr_t __ovld atomic_fetch_and_explicit(volatile __local atomic_intptr_t *, uintptr_t, memory_order, memory_scope);
+long __ovld atomic_fetch_min_explicit(volatile __global atomic_long *, long, memory_order, memory_scope);
+long __ovld atomic_fetch_min_explicit(volatile __local atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_fetch_min_explicit(volatile __global atomic_ulong *, ulong, memory_order, memory_scope);
+ulong __ovld atomic_fetch_min_explicit(volatile __local atomic_ulong *, ulong, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_min_explicit(volatile __global atomic_uintptr_t *, intptr_t, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_min_explicit(volatile __local atomic_uintptr_t *, intptr_t, memory_order, memory_scope);
+intptr_t __ovld atomic_fetch_min_explicit(volatile __global atomic_intptr_t *, uintptr_t, memory_order, memory_scope);
+intptr_t __ovld atomic_fetch_min_explicit(volatile __local atomic_intptr_t *, uintptr_t, memory_order, memory_scope);
+long __ovld atomic_fetch_max_explicit(volatile __global atomic_long *, long, memory_order, memory_scope);
+long __ovld atomic_fetch_max_explicit(volatile __local atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_fetch_max_explicit(volatile __global atomic_ulong *, ulong, memory_order, memory_scope);
+ulong __ovld atomic_fetch_max_explicit(volatile __local atomic_ulong *, ulong, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_max_explicit(volatile __global atomic_uintptr_t *, uintptr_t, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_max_explicit(volatile __local atomic_uintptr_t *, uintptr_t, memory_order, memory_scope);
#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+
+// The functionality added by cl_ext_float_atomics extension
+#if defined(cl_ext_float_atomics)
+
+#if defined(__opencl_c_ext_fp16_global_atomic_load_store)
+void __ovld atomic_store(volatile __global atomic_half *, half);
+void __ovld atomic_store_explicit(volatile __global atomic_half *,
+ half, memory_order);
+void __ovld atomic_store_explicit(volatile __global atomic_half *,
+ half, memory_order, memory_scope);
+half __ovld atomic_load(volatile __global atomic_half *);
+half __ovld atomic_load_explicit(volatile __global atomic_half *,
+ memory_order);
+half __ovld atomic_load_explicit(volatile __global atomic_half *,
+ memory_order, memory_scope);
+half __ovld atomic_exchange(volatile __global atomic_half *, half);
+half __ovld atomic_exchange_explicit(volatile __global atomic_half *,
+ half, memory_order);
+half __ovld atomic_exchange_explicit(volatile __global atomic_half *,
+ half, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp16_global_atomic_load_store)
+
+#if defined(__opencl_c_ext_fp16_local_atomic_load_store)
+void __ovld atomic_store(volatile __local atomic_half *, half);
+void __ovld atomic_store_explicit(volatile __local atomic_half *,
+ half, memory_order);
+void __ovld atomic_store_explicit(volatile __local atomic_half *,
+ half, memory_order, memory_scope);
+half __ovld atomic_load(volatile __local atomic_half *);
+half __ovld atomic_load_explicit(volatile __local atomic_half *,
+ memory_order);
+half __ovld atomic_load_explicit(volatile __local atomic_half *,
+ memory_order, memory_scope);
+half __ovld atomic_exchange(volatile __local atomic_half *, half);
+half __ovld atomic_exchange_explicit(volatile __local atomic_half *,
+ half, memory_order);
+half __ovld atomic_exchange_explicit(volatile __local atomic_half *,
+ half, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp16_local_atomic_load_store)
+
+#if defined(__opencl_c_ext_fp16_global_atomic_load_store) && \
+ defined(__opencl_c_ext_fp16_local_atomic_load_store)
+void __ovld atomic_store(volatile atomic_half *, half);
+void __ovld atomic_store_explicit(volatile atomic_half *, half,
+ memory_order);
+void __ovld atomic_store_explicit(volatile atomic_half *, half,
+ memory_order, memory_scope);
+half __ovld atomic_load(volatile atomic_half *);
+half __ovld atomic_load_explicit(volatile atomic_half *,
+ memory_order);
+half __ovld atomic_load_explicit(volatile atomic_half *,
+ memory_order, memory_scope);
+half __ovld atomic_exchange(volatile atomic_half *, half);
+half __ovld atomic_exchange_explicit(volatile atomic_half *, half,
+ memory_order);
+half __ovld atomic_exchange_explicit(volatile atomic_half *, half,
+ memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp16_global_atomic_load_store) &&
+ // defined(__opencl_c_ext_fp16_local_atomic_load_store)
+
+#if defined(__opencl_c_ext_fp16_global_atomic_min_max)
+half __ovld atomic_fetch_min(volatile __global atomic_half *, half);
+half __ovld atomic_fetch_max(volatile __global atomic_half *, half);
+half __ovld atomic_fetch_min_explicit(volatile __global atomic_half *,
+ half, memory_order);
+half __ovld atomic_fetch_max_explicit(volatile __global atomic_half *,
+ half, memory_order);
+half __ovld atomic_fetch_min_explicit(volatile __global atomic_half *,
+ half, memory_order, memory_scope);
+half __ovld atomic_fetch_max_explicit(volatile __global atomic_half *,
+ half, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp16_global_atomic_min_max)
+
+#if defined(__opencl_c_ext_fp16_local_atomic_min_max)
+half __ovld atomic_fetch_min(volatile __local atomic_half *, half);
+half __ovld atomic_fetch_max(volatile __local atomic_half *, half);
+half __ovld atomic_fetch_min_explicit(volatile __local atomic_half *,
+ half, memory_order);
+half __ovld atomic_fetch_max_explicit(volatile __local atomic_half *,
+ half, memory_order);
+half __ovld atomic_fetch_min_explicit(volatile __local atomic_half *,
+ half, memory_order, memory_scope);
+half __ovld atomic_fetch_max_explicit(volatile __local atomic_half *,
+ half, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp16_local_atomic_min_max)
+
+#if defined(__opencl_c_ext_fp16_global_atomic_min_max) && \
+ defined(__opencl_c_ext_fp16_local_atomic_min_max)
+half __ovld atomic_fetch_min(volatile atomic_half *, half);
+half __ovld atomic_fetch_max(volatile atomic_half *, half);
+half __ovld atomic_fetch_min_explicit(volatile atomic_half *,
+ half, memory_order);
+half __ovld atomic_fetch_max_explicit(volatile atomic_half *,
+ half, memory_order);
+half __ovld atomic_fetch_min_explicit(volatile atomic_half *,
+ half, memory_order, memory_scope);
+half __ovld atomic_fetch_max_explicit(volatile atomic_half *,
+ half, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp16_global_atomic_min_max) && \
+ defined(__opencl_c_ext_fp16_local_atomic_min_max)
+
+#if defined(__opencl_c_ext_fp32_global_atomic_min_max)
+float __ovld atomic_fetch_min(volatile __global atomic_float *, float);
+float __ovld atomic_fetch_max(volatile __global atomic_float *, float);
+float __ovld atomic_fetch_min_explicit(volatile __global atomic_float *,
+ float, memory_order);
+float __ovld atomic_fetch_max_explicit(volatile __global atomic_float *,
+ float, memory_order);
+float __ovld atomic_fetch_min_explicit(volatile __global atomic_float *,
+ float, memory_order, memory_scope);
+float __ovld atomic_fetch_max_explicit(volatile __global atomic_float *,
+ float, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp32_global_atomic_min_max)
+
+#if defined(__opencl_c_ext_fp32_local_atomic_min_max)
+float __ovld atomic_fetch_min(volatile __local atomic_float *, float);
+float __ovld atomic_fetch_max(volatile __local atomic_float *, float);
+float __ovld atomic_fetch_min_explicit(volatile __local atomic_float *,
+ float, memory_order);
+float __ovld atomic_fetch_max_explicit(volatile __local atomic_float *,
+ float, memory_order);
+float __ovld atomic_fetch_min_explicit(volatile __local atomic_float *,
+ float, memory_order, memory_scope);
+float __ovld atomic_fetch_max_explicit(volatile __local atomic_float *,
+ float, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp32_local_atomic_min_max)
+
+#if defined(__opencl_c_ext_fp32_global_atomic_min_max) && \
+ defined(__opencl_c_ext_fp32_local_atomic_min_max)
+float __ovld atomic_fetch_min(volatile atomic_float *, float);
+float __ovld atomic_fetch_max(volatile atomic_float *, float);
+float __ovld atomic_fetch_min_explicit(volatile atomic_float *,
+ float, memory_order);
+float __ovld atomic_fetch_max_explicit(volatile atomic_float *,
+ float, memory_order);
+float __ovld atomic_fetch_min_explicit(volatile atomic_float *,
+ float, memory_order, memory_scope);
+float __ovld atomic_fetch_max_explicit(volatile atomic_float *,
+ float, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp32_global_atomic_min_max) && \
+ defined(__opencl_c_ext_fp32_local_atomic_min_max)
+
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-uintptr_t __ovld atomic_fetch_add_explicit(volatile atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_sub_explicit(volatile atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope);
-#endif
+#if defined(__opencl_c_ext_fp64_global_atomic_min_max)
+double __ovld atomic_fetch_min(volatile __global atomic_double *, double);
+double __ovld atomic_fetch_max(volatile __global atomic_double *, double);
+double __ovld atomic_fetch_min_explicit(volatile __global atomic_double *,
+ double, memory_order);
+double __ovld atomic_fetch_max_explicit(volatile __global atomic_double *,
+ double, memory_order);
+double __ovld atomic_fetch_min_explicit(volatile __global atomic_double *,
+ double, memory_order, memory_scope);
+double __ovld atomic_fetch_max_explicit(volatile __global atomic_double *,
+ double, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp64_global_atomic_min_max)
+
+#if defined(__opencl_c_ext_fp64_local_atomic_min_max)
+double __ovld atomic_fetch_min(volatile __local atomic_double *, double);
+double __ovld atomic_fetch_max(volatile __local atomic_double *, double);
+double __ovld atomic_fetch_min_explicit(volatile __local atomic_double *,
+ double, memory_order);
+double __ovld atomic_fetch_max_explicit(volatile __local atomic_double *,
+ double, memory_order);
+double __ovld atomic_fetch_min_explicit(volatile __local atomic_double *,
+ double, memory_order, memory_scope);
+double __ovld atomic_fetch_max_explicit(volatile __local atomic_double *,
+ double, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp64_local_atomic_min_max)
+
+#if defined(__opencl_c_ext_fp64_global_atomic_min_max) && \
+ defined(__opencl_c_ext_fp64_local_atomic_min_max)
+double __ovld atomic_fetch_min(volatile atomic_double *, double);
+double __ovld atomic_fetch_max(volatile atomic_double *, double);
+double __ovld atomic_fetch_min_explicit(volatile atomic_double *,
+ double, memory_order);
+double __ovld atomic_fetch_max_explicit(volatile atomic_double *,
+ double, memory_order);
+double __ovld atomic_fetch_min_explicit(volatile atomic_double *,
+ double, memory_order, memory_scope);
+double __ovld atomic_fetch_max_explicit(volatile atomic_double *,
+ double, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp64_global_atomic_min_max) && \
+ defined(__opencl_c_ext_fp64_local_atomic_min_max)
+#endif // defined(cl_khr_int64_base_atomics) && \
+ defined(cl_khr_int64_extended_atomics)
+
+#if defined(__opencl_c_ext_fp16_global_atomic_add)
+half __ovld atomic_fetch_add(volatile __global atomic_half *, half);
+half __ovld atomic_fetch_sub(volatile __global atomic_half *, half);
+half __ovld atomic_fetch_add_explicit(volatile __global atomic_half *,
+ half, memory_order);
+half __ovld atomic_fetch_sub_explicit(volatile __global atomic_half *,
+ half, memory_order);
+half __ovld atomic_fetch_add_explicit(volatile __global atomic_half *,
+ half, memory_order, memory_scope);
+half __ovld atomic_fetch_sub_explicit(volatile __global atomic_half *,
+ half, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp16_global_atomic_add)
+
+#if defined(__opencl_c_ext_fp16_local_atomic_add)
+half __ovld atomic_fetch_add(volatile __local atomic_half *, half);
+half __ovld atomic_fetch_sub(volatile __local atomic_half *, half);
+half __ovld atomic_fetch_add_explicit(volatile __local atomic_half *,
+ half, memory_order);
+half __ovld atomic_fetch_sub_explicit(volatile __local atomic_half *,
+ half, memory_order);
+half __ovld atomic_fetch_add_explicit(volatile __local atomic_half *,
+ half, memory_order, memory_scope);
+half __ovld atomic_fetch_sub_explicit(volatile __local atomic_half *,
+ half, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp16_local_atomic_add)
+
+#if defined(__opencl_c_ext_fp16_global_atomic_add) && \
+ defined(__opencl_c_ext_fp16_local_atomic_add)
+half __ovld atomic_fetch_add(volatile atomic_half *, half);
+half __ovld atomic_fetch_sub(volatile atomic_half *, half);
+half __ovld atomic_fetch_add_explicit(volatile atomic_half *,
+ half, memory_order);
+half __ovld atomic_fetch_sub_explicit(volatile atomic_half *,
+ half, memory_order);
+half __ovld atomic_fetch_add_explicit(volatile atomic_half *,
+ half, memory_order, memory_scope);
+half __ovld atomic_fetch_sub_explicit(volatile atomic_half *,
+ half, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp16_global_atomic_add) && \
+ defined(__opencl_c_ext_fp16_local_atomic_add)
+
+#if defined(__opencl_c_ext_fp32_global_atomic_add)
+float __ovld atomic_fetch_add(volatile __global atomic_float *, float);
+float __ovld atomic_fetch_sub(volatile __global atomic_float *, float);
+float __ovld atomic_fetch_add_explicit(volatile __global atomic_float *,
+ float, memory_order);
+float __ovld atomic_fetch_sub_explicit(volatile __global atomic_float *,
+ float, memory_order);
+float __ovld atomic_fetch_add_explicit(volatile __global atomic_float *,
+ float, memory_order, memory_scope);
+float __ovld atomic_fetch_sub_explicit(volatile __global atomic_float *,
+ float, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp32_global_atomic_add)
+
+#if defined(__opencl_c_ext_fp32_local_atomic_add)
+float __ovld atomic_fetch_add(volatile __local atomic_float *, float);
+float __ovld atomic_fetch_sub(volatile __local atomic_float *, float);
+float __ovld atomic_fetch_add_explicit(volatile __local atomic_float *,
+ float, memory_order);
+float __ovld atomic_fetch_sub_explicit(volatile __local atomic_float *,
+ float, memory_order);
+float __ovld atomic_fetch_add_explicit(volatile __local atomic_float *,
+ float, memory_order, memory_scope);
+float __ovld atomic_fetch_sub_explicit(volatile __local atomic_float *,
+ float, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp32_local_atomic_add)
+
+#if defined(__opencl_c_ext_fp32_global_atomic_add) && \
+ defined(__opencl_c_ext_fp32_local_atomic_add)
+float __ovld atomic_fetch_add(volatile atomic_float *, float);
+float __ovld atomic_fetch_sub(volatile atomic_float *, float);
+float __ovld atomic_fetch_add_explicit(volatile atomic_float *,
+ float, memory_order);
+float __ovld atomic_fetch_sub_explicit(volatile atomic_float *,
+ float, memory_order);
+float __ovld atomic_fetch_add_explicit(volatile atomic_float *,
+ float, memory_order, memory_scope);
+float __ovld atomic_fetch_sub_explicit(volatile atomic_float *,
+ float, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp32_global_atomic_add) && \
+ defined(__opencl_c_ext_fp32_local_atomic_add)
+
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#if defined(__opencl_c_ext_fp64_global_atomic_add)
+double __ovld atomic_fetch_add(volatile __global atomic_double *, double);
+double __ovld atomic_fetch_sub(volatile __global atomic_double *, double);
+double __ovld atomic_fetch_add_explicit(volatile __global atomic_double *,
+ double, memory_order);
+double __ovld atomic_fetch_sub_explicit(volatile __global atomic_double *,
+ double, memory_order);
+double __ovld atomic_fetch_add_explicit(volatile __global atomic_double *,
+ double, memory_order, memory_scope);
+double __ovld atomic_fetch_sub_explicit(volatile __global atomic_double *,
+ double, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp64_global_atomic_add)
+
+#if defined(__opencl_c_ext_fp64_local_atomic_add)
+double __ovld atomic_fetch_add(volatile __local atomic_double *, double);
+double __ovld atomic_fetch_sub(volatile __local atomic_double *, double);
+double __ovld atomic_fetch_add_explicit(volatile __local atomic_double *,
+ double, memory_order);
+double __ovld atomic_fetch_sub_explicit(volatile __local atomic_double *,
+ double, memory_order);
+double __ovld atomic_fetch_add_explicit(volatile __local atomic_double *,
+ double, memory_order, memory_scope);
+double __ovld atomic_fetch_sub_explicit(volatile __local atomic_double *,
+ double, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp64_local_atomic_add)
+
+#if defined(__opencl_c_ext_fp64_global_atomic_add) && \
+ defined(__opencl_c_ext_fp64_local_atomic_add)
+double __ovld atomic_fetch_add(volatile atomic_double *, double);
+double __ovld atomic_fetch_sub(volatile atomic_double *, double);
+double __ovld atomic_fetch_add_explicit(volatile atomic_double *,
+ double, memory_order);
+double __ovld atomic_fetch_sub_explicit(volatile atomic_double *,
+ double, memory_order);
+double __ovld atomic_fetch_add_explicit(volatile atomic_double *,
+ double, memory_order, memory_scope);
+double __ovld atomic_fetch_sub_explicit(volatile atomic_double *,
+ double, memory_order, memory_scope);
+#endif // defined(__opencl_c_ext_fp64_global_atomic_add) && \
+ defined(__opencl_c_ext_fp64_local_atomic_add)
+#endif // defined(cl_khr_int64_base_atomics) && \
+ defined(cl_khr_int64_extended_atomics)
+
+#endif // cl_ext_float_atomics
// atomic_store()
#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)
-void __ovld atomic_store(volatile atomic_int *object, int desired);
-void __ovld atomic_store(volatile atomic_uint *object, uint desired);
-void __ovld atomic_store(volatile atomic_float *object, float desired);
+#if defined(__opencl_c_generic_address_space)
+void __ovld atomic_store(volatile atomic_int *, int);
+void __ovld atomic_store(volatile atomic_uint *, uint);
+void __ovld atomic_store(volatile atomic_float *, float);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#ifdef cl_khr_fp64
-void __ovld atomic_store(volatile atomic_double *object, double desired);
+void __ovld atomic_store(volatile atomic_double *, double);
#endif //cl_khr_fp64
-void __ovld atomic_store(volatile atomic_long *object, long desired);
-void __ovld atomic_store(volatile atomic_ulong *object, ulong desired);
+void __ovld atomic_store(volatile atomic_long *, long);
+void __ovld atomic_store(volatile atomic_ulong *, ulong);
#endif
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+void __ovld atomic_store(volatile __global atomic_int *, int);
+void __ovld atomic_store(volatile __local atomic_int *, int);
+void __ovld atomic_store(volatile __global atomic_uint *, uint);
+void __ovld atomic_store(volatile __local atomic_uint *, uint);
+void __ovld atomic_store(volatile __global atomic_float *, float);
+void __ovld atomic_store(volatile __local atomic_float *, float);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+void __ovld atomic_store(volatile __global atomic_double *, double);
+void __ovld atomic_store(volatile __local atomic_double *, double);
+#endif //cl_khr_fp64
+void __ovld atomic_store(volatile __global atomic_long *, long);
+void __ovld atomic_store(volatile __local atomic_long *, long);
+void __ovld atomic_store(volatile __global atomic_ulong *, ulong);
+void __ovld atomic_store(volatile __local atomic_ulong *, ulong);
+#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
#endif
#if defined(__opencl_c_atomic_scope_device)
-void __ovld atomic_store_explicit(volatile atomic_int *object, int desired, memory_order order);
-void __ovld atomic_store_explicit(volatile atomic_uint *object, uint desired, memory_order order);
-void __ovld atomic_store_explicit(volatile atomic_float *object, float desired, memory_order order);
+#if defined(__opencl_c_generic_address_space)
+void __ovld atomic_store_explicit(volatile atomic_int *, int, memory_order);
+void __ovld atomic_store_explicit(volatile atomic_uint *, uint, memory_order);
+void __ovld atomic_store_explicit(volatile atomic_float *, float, memory_order);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#ifdef cl_khr_fp64
-void __ovld atomic_store_explicit(volatile atomic_double *object, double desired, memory_order order);
+void __ovld atomic_store_explicit(volatile atomic_double *, double, memory_order);
#endif //cl_khr_fp64
-void __ovld atomic_store_explicit(volatile atomic_long *object, long desired, memory_order order);
-void __ovld atomic_store_explicit(volatile atomic_ulong *object, ulong desired, memory_order order);
+void __ovld atomic_store_explicit(volatile atomic_long *, long, memory_order);
+void __ovld atomic_store_explicit(volatile atomic_ulong *, ulong, memory_order);
+#endif
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+void __ovld atomic_store_explicit(volatile __global atomic_int *, int, memory_order);
+void __ovld atomic_store_explicit(volatile __local atomic_int *, int, memory_order);
+void __ovld atomic_store_explicit(volatile __global atomic_uint *, uint, memory_order);
+void __ovld atomic_store_explicit(volatile __local atomic_uint *, uint, memory_order);
+void __ovld atomic_store_explicit(volatile __global atomic_float *, float, memory_order);
+void __ovld atomic_store_explicit(volatile __local atomic_float *, float, memory_order);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+void __ovld atomic_store_explicit(volatile __global atomic_double *, double, memory_order);
+void __ovld atomic_store_explicit(volatile __local atomic_double *, double, memory_order);
#endif
+void __ovld atomic_store_explicit(volatile __global atomic_long *, long, memory_order);
+void __ovld atomic_store_explicit(volatile __local atomic_long *, long, memory_order);
+void __ovld atomic_store_explicit(volatile __global atomic_ulong *, ulong, memory_order);
+void __ovld atomic_store_explicit(volatile __local atomic_ulong *, ulong, memory_order);
+#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
#endif
-void __ovld atomic_store_explicit(volatile atomic_int *object, int desired, memory_order order, memory_scope scope);
-void __ovld atomic_store_explicit(volatile atomic_uint *object, uint desired, memory_order order, memory_scope scope);
-void __ovld atomic_store_explicit(volatile atomic_float *object, float desired, memory_order order, memory_scope scope);
+#if defined(__opencl_c_generic_address_space)
+void __ovld atomic_store_explicit(volatile atomic_int *, int, memory_order, memory_scope);
+void __ovld atomic_store_explicit(volatile atomic_uint *, uint, memory_order, memory_scope);
+void __ovld atomic_store_explicit(volatile atomic_float *, float, memory_order, memory_scope);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#ifdef cl_khr_fp64
-void __ovld atomic_store_explicit(volatile atomic_double *object, double desired, memory_order order, memory_scope scope);
+void __ovld atomic_store_explicit(volatile atomic_double *, double, memory_order, memory_scope);
#endif //cl_khr_fp64
-void __ovld atomic_store_explicit(volatile atomic_long *object, long desired, memory_order order, memory_scope scope);
-void __ovld atomic_store_explicit(volatile atomic_ulong *object, ulong desired, memory_order order, memory_scope scope);
+void __ovld atomic_store_explicit(volatile atomic_long *, long, memory_order, memory_scope);
+void __ovld atomic_store_explicit(volatile atomic_ulong *, ulong, memory_order, memory_scope);
#endif
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+void __ovld atomic_store_explicit(volatile __global atomic_int *, int, memory_order, memory_scope);
+void __ovld atomic_store_explicit(volatile __local atomic_int *, int, memory_order, memory_scope);
+void __ovld atomic_store_explicit(volatile __global atomic_uint *, uint, memory_order, memory_scope);
+void __ovld atomic_store_explicit(volatile __local atomic_uint *, uint, memory_order, memory_scope);
+void __ovld atomic_store_explicit(volatile __global atomic_float *, float, memory_order, memory_scope);
+void __ovld atomic_store_explicit(volatile __local atomic_float *, float, memory_order, memory_scope);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+void __ovld atomic_store_explicit(volatile __global atomic_double *, double, memory_order, memory_scope);
+void __ovld atomic_store_explicit(volatile __local atomic_double *, double, memory_order, memory_scope);
+#endif //cl_khr_fp64
+void __ovld atomic_store_explicit(volatile __global atomic_long *, long, memory_order, memory_scope);
+void __ovld atomic_store_explicit(volatile __local atomic_long *, long, memory_order, memory_scope);
+void __ovld atomic_store_explicit(volatile __global atomic_ulong *, ulong, memory_order, memory_scope);
+void __ovld atomic_store_explicit(volatile __local atomic_ulong *, ulong, memory_order, memory_scope);
+#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
// atomic_load()
#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)
-int __ovld atomic_load(volatile atomic_int *object);
-uint __ovld atomic_load(volatile atomic_uint *object);
-float __ovld atomic_load(volatile atomic_float *object);
+#if defined(__opencl_c_generic_address_space)
+int __ovld atomic_load(volatile atomic_int *);
+uint __ovld atomic_load(volatile atomic_uint *);
+float __ovld atomic_load(volatile atomic_float *);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#ifdef cl_khr_fp64
-double __ovld atomic_load(volatile atomic_double *object);
+double __ovld atomic_load(volatile atomic_double *);
#endif //cl_khr_fp64
-long __ovld atomic_load(volatile atomic_long *object);
-ulong __ovld atomic_load(volatile atomic_ulong *object);
+long __ovld atomic_load(volatile atomic_long *);
+ulong __ovld atomic_load(volatile atomic_ulong *);
#endif
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+int __ovld atomic_load(volatile __global atomic_int *);
+int __ovld atomic_load(volatile __local atomic_int *);
+uint __ovld atomic_load(volatile __global atomic_uint *);
+uint __ovld atomic_load(volatile __local atomic_uint *);
+float __ovld atomic_load(volatile __global atomic_float *);
+float __ovld atomic_load(volatile __local atomic_float *);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+double __ovld atomic_load(volatile __global atomic_double *);
+double __ovld atomic_load(volatile __local atomic_double *);
+#endif //cl_khr_fp64
+long __ovld atomic_load(volatile __global atomic_long *);
+long __ovld atomic_load(volatile __local atomic_long *);
+ulong __ovld atomic_load(volatile __global atomic_ulong *);
+ulong __ovld atomic_load(volatile __local atomic_ulong *);
+#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
#endif
#if defined(__opencl_c_atomic_scope_device)
-int __ovld atomic_load_explicit(volatile atomic_int *object, memory_order order);
-uint __ovld atomic_load_explicit(volatile atomic_uint *object, memory_order order);
-float __ovld atomic_load_explicit(volatile atomic_float *object, memory_order order);
+#if defined(__opencl_c_generic_address_space)
+int __ovld atomic_load_explicit(volatile atomic_int *, memory_order);
+uint __ovld atomic_load_explicit(volatile atomic_uint *, memory_order);
+float __ovld atomic_load_explicit(volatile atomic_float *, memory_order);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#ifdef cl_khr_fp64
-double __ovld atomic_load_explicit(volatile atomic_double *object, memory_order order);
+double __ovld atomic_load_explicit(volatile atomic_double *, memory_order);
#endif //cl_khr_fp64
-long __ovld atomic_load_explicit(volatile atomic_long *object, memory_order order);
-ulong __ovld atomic_load_explicit(volatile atomic_ulong *object, memory_order order);
+long __ovld atomic_load_explicit(volatile atomic_long *, memory_order);
+ulong __ovld atomic_load_explicit(volatile atomic_ulong *, memory_order);
#endif
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+int __ovld atomic_load_explicit(volatile __global atomic_int *, memory_order);
+int __ovld atomic_load_explicit(volatile __local atomic_int *, memory_order);
+uint __ovld atomic_load_explicit(volatile __global atomic_uint *, memory_order);
+uint __ovld atomic_load_explicit(volatile __local atomic_uint *, memory_order);
+float __ovld atomic_load_explicit(volatile __global atomic_float *, memory_order);
+float __ovld atomic_load_explicit(volatile __local atomic_float *, memory_order);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+double __ovld atomic_load_explicit(volatile __global atomic_double *, memory_order);
+double __ovld atomic_load_explicit(volatile __local atomic_double *, memory_order);
+#endif //cl_khr_fp64
+long __ovld atomic_load_explicit(volatile __global atomic_long *, memory_order);
+long __ovld atomic_load_explicit(volatile __local atomic_long *, memory_order);
+ulong __ovld atomic_load_explicit(volatile __global atomic_ulong *, memory_order);
+ulong __ovld atomic_load_explicit(volatile __local atomic_ulong *, memory_order);
+#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
#endif
-int __ovld atomic_load_explicit(volatile atomic_int *object, memory_order order, memory_scope scope);
-uint __ovld atomic_load_explicit(volatile atomic_uint *object, memory_order order, memory_scope scope);
-float __ovld atomic_load_explicit(volatile atomic_float *object, memory_order order, memory_scope scope);
+#if defined(__opencl_c_generic_address_space)
+int __ovld atomic_load_explicit(volatile atomic_int *, memory_order, memory_scope);
+uint __ovld atomic_load_explicit(volatile atomic_uint *, memory_order, memory_scope);
+float __ovld atomic_load_explicit(volatile atomic_float *, memory_order, memory_scope);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#ifdef cl_khr_fp64
-double __ovld atomic_load_explicit(volatile atomic_double *object, memory_order order, memory_scope scope);
+double __ovld atomic_load_explicit(volatile atomic_double *, memory_order, memory_scope);
#endif //cl_khr_fp64
-long __ovld atomic_load_explicit(volatile atomic_long *object, memory_order order, memory_scope scope);
-ulong __ovld atomic_load_explicit(volatile atomic_ulong *object, memory_order order, memory_scope scope);
+long __ovld atomic_load_explicit(volatile atomic_long *, memory_order, memory_scope);
+ulong __ovld atomic_load_explicit(volatile atomic_ulong *, memory_order, memory_scope);
#endif
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+int __ovld atomic_load_explicit(volatile __global atomic_int *, memory_order, memory_scope);
+int __ovld atomic_load_explicit(volatile __local atomic_int *, memory_order, memory_scope);
+uint __ovld atomic_load_explicit(volatile __global atomic_uint *, memory_order, memory_scope);
+uint __ovld atomic_load_explicit(volatile __local atomic_uint *, memory_order, memory_scope);
+float __ovld atomic_load_explicit(volatile __global atomic_float *, memory_order, memory_scope);
+float __ovld atomic_load_explicit(volatile __local atomic_float *, memory_order, memory_scope);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+double __ovld atomic_load_explicit(volatile __global atomic_double *, memory_order, memory_scope);
+double __ovld atomic_load_explicit(volatile __local atomic_double *, memory_order, memory_scope);
+#endif
+long __ovld atomic_load_explicit(volatile __global atomic_long *, memory_order, memory_scope);
+long __ovld atomic_load_explicit(volatile __local atomic_long *, memory_order, memory_scope);
+ulong __ovld atomic_load_explicit(volatile __global atomic_ulong *, memory_order, memory_scope);
+ulong __ovld atomic_load_explicit(volatile __local atomic_ulong *, memory_order, memory_scope);
+#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
// atomic_exchange()
#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)
-int __ovld atomic_exchange(volatile atomic_int *object, int desired);
-uint __ovld atomic_exchange(volatile atomic_uint *object, uint desired);
-float __ovld atomic_exchange(volatile atomic_float *object, float desired);
+#if defined(__opencl_c_generic_address_space)
+int __ovld atomic_exchange(volatile atomic_int *, int);
+uint __ovld atomic_exchange(volatile atomic_uint *, uint);
+float __ovld atomic_exchange(volatile atomic_float *, float);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#ifdef cl_khr_fp64
-double __ovld atomic_exchange(volatile atomic_double *object, double desired);
+double __ovld atomic_exchange(volatile atomic_double *, double);
#endif //cl_khr_fp64
-long __ovld atomic_exchange(volatile atomic_long *object, long desired);
-ulong __ovld atomic_exchange(volatile atomic_ulong *object, ulong desired);
+long __ovld atomic_exchange(volatile atomic_long *, long);
+ulong __ovld atomic_exchange(volatile atomic_ulong *, ulong);
#endif
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+int __ovld atomic_exchange(volatile __global atomic_int *, int);
+int __ovld atomic_exchange(volatile __local atomic_int *, int);
+uint __ovld atomic_exchange(volatile __global atomic_uint *, uint);
+uint __ovld atomic_exchange(volatile __local atomic_uint *, uint);
+float __ovld atomic_exchange(volatile __global atomic_float *, float);
+float __ovld atomic_exchange(volatile __local atomic_float *, float);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+double __ovld atomic_exchange(volatile __global atomic_double *, double);
+double __ovld atomic_exchange(volatile __local atomic_double *, double);
+#endif //cl_khr_fp64
+long __ovld atomic_exchange(volatile __global atomic_long *, long);
+long __ovld atomic_exchange(volatile __local atomic_long *, long);
+ulong __ovld atomic_exchange(volatile __global atomic_ulong *, ulong);
+ulong __ovld atomic_exchange(volatile __local atomic_ulong *, ulong);
+#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
#endif
#if defined(__opencl_c_atomic_scope_device)
-int __ovld atomic_exchange_explicit(volatile atomic_int *object, int desired, memory_order order);
-uint __ovld atomic_exchange_explicit(volatile atomic_uint *object, uint desired, memory_order order);
-float __ovld atomic_exchange_explicit(volatile atomic_float *object, float desired, memory_order order);
+#if defined(__opencl_c_generic_address_space)
+int __ovld atomic_exchange_explicit(volatile atomic_int *, int, memory_order);
+uint __ovld atomic_exchange_explicit(volatile atomic_uint *, uint, memory_order);
+float __ovld atomic_exchange_explicit(volatile atomic_float *, float, memory_order);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#ifdef cl_khr_fp64
-double __ovld atomic_exchange_explicit(volatile atomic_double *object, double desired, memory_order order);
+double __ovld atomic_exchange_explicit(volatile atomic_double *, double, memory_order);
#endif //cl_khr_fp64
-long __ovld atomic_exchange_explicit(volatile atomic_long *object, long desired, memory_order order);
-ulong __ovld atomic_exchange_explicit(volatile atomic_ulong *object, ulong desired, memory_order order);
+long __ovld atomic_exchange_explicit(volatile atomic_long *, long, memory_order);
+ulong __ovld atomic_exchange_explicit(volatile atomic_ulong *, ulong, memory_order);
#endif
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+int __ovld atomic_exchange_explicit(volatile __global atomic_int *, int, memory_order);
+int __ovld atomic_exchange_explicit(volatile __local atomic_int *, int, memory_order);
+uint __ovld atomic_exchange_explicit(volatile __global atomic_uint *, uint, memory_order);
+uint __ovld atomic_exchange_explicit(volatile __local atomic_uint *, uint, memory_order);
+float __ovld atomic_exchange_explicit(volatile __global atomic_float *, float, memory_order);
+float __ovld atomic_exchange_explicit(volatile __local atomic_float *, float, memory_order);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+double __ovld atomic_exchange_explicit(volatile __global atomic_double *, double, memory_order);
+double __ovld atomic_exchange_explicit(volatile __local atomic_double *, double, memory_order);
+#endif //cl_khr_fp64
+long __ovld atomic_exchange_explicit(volatile __global atomic_long *, long, memory_order);
+long __ovld atomic_exchange_explicit(volatile __local atomic_long *, long, memory_order);
+ulong __ovld atomic_exchange_explicit(volatile __global atomic_ulong *, ulong, memory_order);
+ulong __ovld atomic_exchange_explicit(volatile __local atomic_ulong *, ulong, memory_order);
+#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)wi
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
#endif
-int __ovld atomic_exchange_explicit(volatile atomic_int *object, int desired, memory_order order, memory_scope scope);
-uint __ovld atomic_exchange_explicit(volatile atomic_uint *object, uint desired, memory_order order, memory_scope scope);
-float __ovld atomic_exchange_explicit(volatile atomic_float *object, float desired, memory_order order, memory_scope scope);
+#if defined(__opencl_c_generic_address_space)
+int __ovld atomic_exchange_explicit(volatile atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_exchange_explicit(volatile atomic_uint *, uint, memory_order, memory_scope);
+float __ovld atomic_exchange_explicit(volatile atomic_float *, float, memory_order, memory_scope);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#ifdef cl_khr_fp64
-double __ovld atomic_exchange_explicit(volatile atomic_double *object, double desired, memory_order order, memory_scope scope);
+double __ovld atomic_exchange_explicit(volatile atomic_double *, double, memory_order, memory_scope);
#endif //cl_khr_fp64
-long __ovld atomic_exchange_explicit(volatile atomic_long *object, long desired, memory_order order, memory_scope scope);
-ulong __ovld atomic_exchange_explicit(volatile atomic_ulong *object, ulong desired, memory_order order, memory_scope scope);
+long __ovld atomic_exchange_explicit(volatile atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_exchange_explicit(volatile atomic_ulong *, ulong, memory_order, memory_scope);
#endif
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+int __ovld atomic_exchange_explicit(volatile __global atomic_int *, int, memory_order, memory_scope);
+int __ovld atomic_exchange_explicit(volatile __local atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_exchange_explicit(volatile __global atomic_uint *, uint, memory_order, memory_scope);
+uint __ovld atomic_exchange_explicit(volatile __local atomic_uint *, uint, memory_order, memory_scope);
+float __ovld atomic_exchange_explicit(volatile __global atomic_float *, float, memory_order, memory_scope);
+float __ovld atomic_exchange_explicit(volatile __local atomic_float *, float, memory_order, memory_scope);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+double __ovld atomic_exchange_explicit(volatile __global atomic_double *, double, memory_order, memory_scope);
+double __ovld atomic_exchange_explicit(volatile __local atomic_double *, double, memory_order, memory_scope);
+#endif //cl_khr_fp64
+long __ovld atomic_exchange_explicit(volatile __global atomic_long *, long, memory_order, memory_scope);
+long __ovld atomic_exchange_explicit(volatile __local atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_exchange_explicit(volatile __global atomic_ulong *, ulong, memory_order, memory_scope);
+ulong __ovld atomic_exchange_explicit(volatile __local atomic_ulong *, ulong, memory_order, memory_scope);
+#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
// atomic_compare_exchange_strong() and atomic_compare_exchange_weak()
#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)
-bool __ovld atomic_compare_exchange_strong(volatile atomic_int *object, int *expected, int desired);
-bool __ovld atomic_compare_exchange_strong(volatile atomic_uint *object, uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_weak(volatile atomic_int *object, int *expected, int desired);
-bool __ovld atomic_compare_exchange_weak(volatile atomic_uint *object, uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_strong(volatile atomic_float *object, float *expected, float desired);
-bool __ovld atomic_compare_exchange_weak(volatile atomic_float *object, float *expected, float desired);
-
+#if defined(__opencl_c_generic_address_space)
+bool __ovld atomic_compare_exchange_strong(volatile atomic_int *, int *, int);
+bool __ovld atomic_compare_exchange_strong(volatile atomic_uint *, uint *, uint);
+bool __ovld atomic_compare_exchange_weak(volatile atomic_int *, int *, int);
+bool __ovld atomic_compare_exchange_weak(volatile atomic_uint *, uint *, uint);
+bool __ovld atomic_compare_exchange_strong(volatile atomic_float *, float *, float);
+bool __ovld atomic_compare_exchange_weak(volatile atomic_float *, float *, float);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#ifdef cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong(volatile atomic_double *object, double *expected, double desired);
-bool __ovld atomic_compare_exchange_weak(volatile atomic_double *object, double *expected, double desired);
+bool __ovld atomic_compare_exchange_strong(volatile atomic_double *, double *, double);
+bool __ovld atomic_compare_exchange_weak(volatile atomic_double *, double *, double);
#endif //cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong(volatile atomic_long *object, long *expected, long desired);
-bool __ovld atomic_compare_exchange_weak(volatile atomic_long *object, long *expected, long desired);
-bool __ovld atomic_compare_exchange_strong(volatile atomic_ulong *object, ulong *expected, ulong desired);
-bool __ovld atomic_compare_exchange_weak(volatile atomic_ulong *object, ulong *expected, ulong desired);
+bool __ovld atomic_compare_exchange_strong(volatile atomic_long *, long *, long);
+bool __ovld atomic_compare_exchange_weak(volatile atomic_long *, long *, long);
+bool __ovld atomic_compare_exchange_strong(volatile atomic_ulong *, ulong *, ulong);
+bool __ovld atomic_compare_exchange_weak(volatile atomic_ulong *, ulong *, ulong);
#endif
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_int *, __global int *, int);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_int *, __local int *, int);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_int *, __private int *, int);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_int *, __global int *, int);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_int *, __local int *, int);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_int *, __private int *, int);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_uint *, __global uint *, uint);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_uint *, __local uint *, uint);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_uint *, __private uint *, uint);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_uint *, __global uint *, uint);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_uint *, __local uint *, uint);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_uint *, __private uint *, uint);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_float *, __global float *, float);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_float *, __local float *, float);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_float *, __private float *, float);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_float *, __global float *, float);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_float *, __local float *, float);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_float *, __private float *, float);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_int *, __global int *, int);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_int *, __local int *, int);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_int *, __private int *, int);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_int *, __global int *, int);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_int *, __local int *, int);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_int *, __private int *, int);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_uint *, __global uint *, uint);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_uint *, __local uint *, uint);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_uint *, __private uint *, uint);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_uint *, __global uint *, uint);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_uint *, __local uint *, uint);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_uint *, __private uint *, uint);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_float *, __global float *, float);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_float *, __local float *, float);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_float *, __private float *, float);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_float *, __global float *, float);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_float *, __local float *, float);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_float *, __private float *, float);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_double *, __global double *, double);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_double *, __local double *, double);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_double *, __private double *, double);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_double *, __global double *, double);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_double *, __local double *, double);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_double *, __private double *, double);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_double *, __global double *, double);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_double *, __local double *, double);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_double *, __private double *, double);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_double *, __global double *, double);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_double *, __local double *, double);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_double *, __private double *, double);
+#endif //cl_khr_fp64
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_long *, __global long *, long);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_long *, __local long *, long);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_long *, __private long *, long);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_long *, __global long *, long);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_long *, __local long *, long);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_long *, __private long *, long);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_ulong *, __global ulong *, ulong);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_ulong *, __local ulong *, ulong);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_ulong *, __private ulong *, ulong);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_ulong *, __global ulong *, ulong);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_ulong *, __local ulong *, ulong);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_ulong *, __private ulong *, ulong);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_long *, __global long *, long);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_long *, __local long *, long);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_long *, __private long *, long);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_long *, __global long *, long);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_long *, __local long *, long);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_long *, __private long *, long);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_ulong *, __global ulong *, ulong);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_ulong *, __local ulong *, ulong);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_ulong *, __private ulong *, ulong);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_ulong *, __global ulong *, ulong);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_ulong *, __local ulong *, ulong);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_ulong *, __private ulong *, ulong);
+#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
#endif
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_int *object, int *expected,
- int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_uint *object, uint *expected,
- uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_int *object, int *expected,
- int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_uint *object, uint *expected,
- uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_float *object, float *expected,
- float desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_float *object, float *expected,
- float desired, memory_order success, memory_order failure);
+#if defined(__opencl_c_atomic_scope_device)
+#if defined(__opencl_c_generic_address_space)
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_int *, int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_uint *, uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_int *, int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_uint *, uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_float *, float *, float, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_float *, float *, float, memory_order, memory_order);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#ifdef cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_double *object, double *expected,
- double desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_double *object, double *expected,
- double desired, memory_order success, memory_order failure);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_double *, double *, double, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_double *, double *, double, memory_order, memory_order);
#endif //cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_long *object, long *expected,
- long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_long *object, long *expected,
- long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_ulong *object, ulong *expected,
- ulong desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_ulong *object, ulong *expected,
- ulong desired, memory_order success, memory_order failure);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_long *, long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_long *, long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_ulong *, ulong *, ulong, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_ulong *, ulong *, ulong, memory_order, memory_order);
#endif
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *, __global int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *, __local int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *, __private int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *, __global int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *, __local int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *, __private int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *, __global uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *, __local uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *, __private uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *, __global uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *, __local uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *, __private uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *, __global float *, float, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *, __local float *, float, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *, __private float *, float, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *, __global float *, float, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *, __local float *, float, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *, __private float *, float, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *, __global int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *, __local int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *, __private int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *, __global int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *, __local int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *, __private int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *, __global uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *, __local uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *, __private uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *, __global uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *, __local uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *, __private uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *, __global float *, float, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *, __local float *, float, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *, __private float *, float, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *, __global float *, float, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *, __local float *, float, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *, __private float *, float, memory_order, memory_order);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *, __global double *, double, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *, __local double *, double, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *, __private double *, double, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *, __global double *, double, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *, __local double *, double, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *, __private double *, double, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *, __global double *, double, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *, __local double *, double, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *, __private double *, double, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *, __global double *, double, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *, __local double *, double, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *, __private double *, double, memory_order, memory_order);
+#endif //cl_khr_fp64
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *, __global long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *, __local long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *, __private long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *, __global long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *, __local long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *, __private long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *, __global ulong *, ulong, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *, __local ulong *, ulong, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *, __private ulong *, ulong, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *, __global ulong *, ulong, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *, __local ulong *, ulong, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *, __private ulong *, ulong, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *, __global long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *, __local long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *, __private long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *, __global long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *, __local long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *, __private long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *, __global ulong *, ulong, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *, __local ulong *, ulong, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *, __private ulong *, ulong, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *, __global ulong *, ulong, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *, __local ulong *, ulong, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *, __private ulong *, ulong, memory_order, memory_order);
+#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+#endif //defined(__opencl_c_atomic_scope_device)
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_int *object, int *expected,
- int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_uint *object, uint *expected,
- uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_int *object, int *expected,
- int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_uint *object, uint *expected,
- uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_float *object, float *expected,
- float desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_float *object, float *expected,
- float desired, memory_order success, memory_order failure, memory_scope scope);
+#if defined(__opencl_c_generic_address_space)
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_int *, int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_uint *, uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_int *, int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_uint *, uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_float *, float *, float, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_float *, float *, float, memory_order, memory_order, memory_scope);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#ifdef cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_double *object, double *expected,
- double desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_double *object, double *expected,
- double desired, memory_order success, memory_order failure, memory_scope scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_double *, double *, double, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_double *, double *, double, memory_order, memory_order, memory_scope);
#endif //cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_long *object, long *expected,
- long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_long *object, long *expected,
- long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_ulong *object, ulong *expected,
- ulong desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_ulong *object, ulong *expected,
- ulong desired, memory_order success, memory_order failure, memory_scope scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_long *, long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_long *, long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_ulong *, ulong *, ulong, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_ulong *, ulong *, ulong, memory_order, memory_order, memory_scope);
#endif
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *, __global int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *, __local int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *, __private int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *, __global int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *, __local int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *, __private int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *, __global uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *, __local uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *, __private uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *, __global uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *, __local uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *, __private uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *, __global float *, float, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *, __local float *, float, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *, __private float *, float, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *, __global float *, float, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *, __local float *, float, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *, __private float *, float, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *, __global int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *, __local int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *, __private int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *, __global int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *, __local int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *, __private int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *, __global uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *, __local uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *, __private uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *, __global uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *, __local uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *, __private uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *, __global float *, float, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *, __local float *, float, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *, __private float *, float, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *, __global float *, float, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *, __local float *, float, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *, __private float *, float, memory_order, memory_order, memory_scope);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *, __global double *, double, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *, __local double *, double, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *, __private double *, double, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *, __global double *, double, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *, __local double *, double, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *, __private double *, double, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *, __global double *, double, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *, __local double *, double, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *, __private double *, double, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *, __global double *, double, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *, __local double *, double, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *, __private double *, double, memory_order, memory_order, memory_scope);
+#endif //cl_khr_fp64
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *, __global long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *, __local long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *, __private long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *, __global long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *, __local long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *, __private long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *, __global ulong *, ulong, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *, __local ulong *, ulong, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *, __private ulong *, ulong, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *, __global ulong *, ulong, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *, __local ulong *, ulong, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *, __private ulong *, ulong, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *, __global long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *, __local long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *, __private long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *, __global long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *, __local long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *, __private long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *, __global ulong *, ulong, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *, __local ulong *, ulong, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *, __private ulong *, ulong, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *, __global ulong *, ulong, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *, __local ulong *, ulong, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *, __private ulong *, ulong, memory_order, memory_order, memory_scope);
+#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
// atomic_flag_test_and_set() and atomic_flag_clear()
#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)
-bool __ovld atomic_flag_test_and_set(volatile atomic_flag *object);
-void __ovld atomic_flag_clear(volatile atomic_flag *object);
+#if defined(__opencl_c_generic_address_space)
+bool __ovld atomic_flag_test_and_set(volatile atomic_flag *);
+void __ovld atomic_flag_clear(volatile atomic_flag *);
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+bool __ovld atomic_flag_test_and_set(volatile __global atomic_flag *);
+bool __ovld atomic_flag_test_and_set(volatile __local atomic_flag *);
+void __ovld atomic_flag_clear(volatile __global atomic_flag *);
+void __ovld atomic_flag_clear(volatile __local atomic_flag *);
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
#endif
#if defined(__opencl_c_atomic_scope_device)
-bool __ovld atomic_flag_test_and_set_explicit(volatile atomic_flag *object, memory_order order);
-void __ovld atomic_flag_clear_explicit(volatile atomic_flag *object, memory_order order);
+#if defined(__opencl_c_generic_address_space)
+bool __ovld atomic_flag_test_and_set_explicit(volatile atomic_flag *, memory_order);
+void __ovld atomic_flag_clear_explicit(volatile atomic_flag *, memory_order);
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+bool __ovld atomic_flag_test_and_set_explicit(volatile __global atomic_flag *, memory_order);
+bool __ovld atomic_flag_test_and_set_explicit(volatile __local atomic_flag *, memory_order);
+void __ovld atomic_flag_clear_explicit(volatile __global atomic_flag *, memory_order);
+void __ovld atomic_flag_clear_explicit(volatile __local atomic_flag *, memory_order);
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
#endif
-bool __ovld atomic_flag_test_and_set_explicit(volatile atomic_flag *object, memory_order order, memory_scope scope);
-void __ovld atomic_flag_clear_explicit(volatile atomic_flag *object, memory_order order, memory_scope scope);
+#if defined(__opencl_c_generic_address_space)
+bool __ovld atomic_flag_test_and_set_explicit(volatile atomic_flag *, memory_order, memory_scope);
+void __ovld atomic_flag_clear_explicit(volatile atomic_flag *, memory_order, memory_scope);
+#endif //defined(__opencl_c_generic_address_space)
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+bool __ovld atomic_flag_test_and_set_explicit(volatile __global atomic_flag *, memory_order, memory_scope);
+bool __ovld atomic_flag_test_and_set_explicit(volatile __local atomic_flag *, memory_order, memory_scope);
+void __ovld atomic_flag_clear_explicit(volatile __global atomic_flag *, memory_order, memory_scope);
+void __ovld atomic_flag_clear_explicit(volatile __local atomic_flag *, memory_order, memory_scope);
+#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
// OpenCL v1.1 s6.11.12, v1.2 s6.12.12, v2.0 s6.13.12 - Miscellaneous Vector Functions
@@ -13668,452 +14628,452 @@ void __ovld atomic_flag_clear_explicit(volatile atomic_flag *object, memory_orde
* short8 b;
* b = shuffle(a, mask); <- not valid
*/
-char2 __ovld __cnfn shuffle(char2 x, uchar2 mask);
-char2 __ovld __cnfn shuffle(char4 x, uchar2 mask);
-char2 __ovld __cnfn shuffle(char8 x, uchar2 mask);
-char2 __ovld __cnfn shuffle(char16 x, uchar2 mask);
-
-uchar2 __ovld __cnfn shuffle(uchar2 x, uchar2 mask);
-uchar2 __ovld __cnfn shuffle(uchar4 x, uchar2 mask);
-uchar2 __ovld __cnfn shuffle(uchar8 x, uchar2 mask);
-uchar2 __ovld __cnfn shuffle(uchar16 x, uchar2 mask);
-
-short2 __ovld __cnfn shuffle(short2 x, ushort2 mask);
-short2 __ovld __cnfn shuffle(short4 x, ushort2 mask);
-short2 __ovld __cnfn shuffle(short8 x, ushort2 mask);
-short2 __ovld __cnfn shuffle(short16 x, ushort2 mask);
-
-ushort2 __ovld __cnfn shuffle(ushort2 x, ushort2 mask);
-ushort2 __ovld __cnfn shuffle(ushort4 x, ushort2 mask);
-ushort2 __ovld __cnfn shuffle(ushort8 x, ushort2 mask);
-ushort2 __ovld __cnfn shuffle(ushort16 x, ushort2 mask);
-
-int2 __ovld __cnfn shuffle(int2 x, uint2 mask);
-int2 __ovld __cnfn shuffle(int4 x, uint2 mask);
-int2 __ovld __cnfn shuffle(int8 x, uint2 mask);
-int2 __ovld __cnfn shuffle(int16 x, uint2 mask);
-
-uint2 __ovld __cnfn shuffle(uint2 x, uint2 mask);
-uint2 __ovld __cnfn shuffle(uint4 x, uint2 mask);
-uint2 __ovld __cnfn shuffle(uint8 x, uint2 mask);
-uint2 __ovld __cnfn shuffle(uint16 x, uint2 mask);
-
-long2 __ovld __cnfn shuffle(long2 x, ulong2 mask);
-long2 __ovld __cnfn shuffle(long4 x, ulong2 mask);
-long2 __ovld __cnfn shuffle(long8 x, ulong2 mask);
-long2 __ovld __cnfn shuffle(long16 x, ulong2 mask);
-
-ulong2 __ovld __cnfn shuffle(ulong2 x, ulong2 mask);
-ulong2 __ovld __cnfn shuffle(ulong4 x, ulong2 mask);
-ulong2 __ovld __cnfn shuffle(ulong8 x, ulong2 mask);
-ulong2 __ovld __cnfn shuffle(ulong16 x, ulong2 mask);
-
-float2 __ovld __cnfn shuffle(float2 x, uint2 mask);
-float2 __ovld __cnfn shuffle(float4 x, uint2 mask);
-float2 __ovld __cnfn shuffle(float8 x, uint2 mask);
-float2 __ovld __cnfn shuffle(float16 x, uint2 mask);
-
-char4 __ovld __cnfn shuffle(char2 x, uchar4 mask);
-char4 __ovld __cnfn shuffle(char4 x, uchar4 mask);
-char4 __ovld __cnfn shuffle(char8 x, uchar4 mask);
-char4 __ovld __cnfn shuffle(char16 x, uchar4 mask);
-
-uchar4 __ovld __cnfn shuffle(uchar2 x, uchar4 mask);
-uchar4 __ovld __cnfn shuffle(uchar4 x, uchar4 mask);
-uchar4 __ovld __cnfn shuffle(uchar8 x, uchar4 mask);
-uchar4 __ovld __cnfn shuffle(uchar16 x, uchar4 mask);
-
-short4 __ovld __cnfn shuffle(short2 x, ushort4 mask);
-short4 __ovld __cnfn shuffle(short4 x, ushort4 mask);
-short4 __ovld __cnfn shuffle(short8 x, ushort4 mask);
-short4 __ovld __cnfn shuffle(short16 x, ushort4 mask);
-
-ushort4 __ovld __cnfn shuffle(ushort2 x, ushort4 mask);
-ushort4 __ovld __cnfn shuffle(ushort4 x, ushort4 mask);
-ushort4 __ovld __cnfn shuffle(ushort8 x, ushort4 mask);
-ushort4 __ovld __cnfn shuffle(ushort16 x, ushort4 mask);
-
-int4 __ovld __cnfn shuffle(int2 x, uint4 mask);
-int4 __ovld __cnfn shuffle(int4 x, uint4 mask);
-int4 __ovld __cnfn shuffle(int8 x, uint4 mask);
-int4 __ovld __cnfn shuffle(int16 x, uint4 mask);
-
-uint4 __ovld __cnfn shuffle(uint2 x, uint4 mask);
-uint4 __ovld __cnfn shuffle(uint4 x, uint4 mask);
-uint4 __ovld __cnfn shuffle(uint8 x, uint4 mask);
-uint4 __ovld __cnfn shuffle(uint16 x, uint4 mask);
-
-long4 __ovld __cnfn shuffle(long2 x, ulong4 mask);
-long4 __ovld __cnfn shuffle(long4 x, ulong4 mask);
-long4 __ovld __cnfn shuffle(long8 x, ulong4 mask);
-long4 __ovld __cnfn shuffle(long16 x, ulong4 mask);
-
-ulong4 __ovld __cnfn shuffle(ulong2 x, ulong4 mask);
-ulong4 __ovld __cnfn shuffle(ulong4 x, ulong4 mask);
-ulong4 __ovld __cnfn shuffle(ulong8 x, ulong4 mask);
-ulong4 __ovld __cnfn shuffle(ulong16 x, ulong4 mask);
-
-float4 __ovld __cnfn shuffle(float2 x, uint4 mask);
-float4 __ovld __cnfn shuffle(float4 x, uint4 mask);
-float4 __ovld __cnfn shuffle(float8 x, uint4 mask);
-float4 __ovld __cnfn shuffle(float16 x, uint4 mask);
-
-char8 __ovld __cnfn shuffle(char2 x, uchar8 mask);
-char8 __ovld __cnfn shuffle(char4 x, uchar8 mask);
-char8 __ovld __cnfn shuffle(char8 x, uchar8 mask);
-char8 __ovld __cnfn shuffle(char16 x, uchar8 mask);
-
-uchar8 __ovld __cnfn shuffle(uchar2 x, uchar8 mask);
-uchar8 __ovld __cnfn shuffle(uchar4 x, uchar8 mask);
-uchar8 __ovld __cnfn shuffle(uchar8 x, uchar8 mask);
-uchar8 __ovld __cnfn shuffle(uchar16 x, uchar8 mask);
-
-short8 __ovld __cnfn shuffle(short2 x, ushort8 mask);
-short8 __ovld __cnfn shuffle(short4 x, ushort8 mask);
-short8 __ovld __cnfn shuffle(short8 x, ushort8 mask);
-short8 __ovld __cnfn shuffle(short16 x, ushort8 mask);
-
-ushort8 __ovld __cnfn shuffle(ushort2 x, ushort8 mask);
-ushort8 __ovld __cnfn shuffle(ushort4 x, ushort8 mask);
-ushort8 __ovld __cnfn shuffle(ushort8 x, ushort8 mask);
-ushort8 __ovld __cnfn shuffle(ushort16 x, ushort8 mask);
-
-int8 __ovld __cnfn shuffle(int2 x, uint8 mask);
-int8 __ovld __cnfn shuffle(int4 x, uint8 mask);
-int8 __ovld __cnfn shuffle(int8 x, uint8 mask);
-int8 __ovld __cnfn shuffle(int16 x, uint8 mask);
-
-uint8 __ovld __cnfn shuffle(uint2 x, uint8 mask);
-uint8 __ovld __cnfn shuffle(uint4 x, uint8 mask);
-uint8 __ovld __cnfn shuffle(uint8 x, uint8 mask);
-uint8 __ovld __cnfn shuffle(uint16 x, uint8 mask);
-
-long8 __ovld __cnfn shuffle(long2 x, ulong8 mask);
-long8 __ovld __cnfn shuffle(long4 x, ulong8 mask);
-long8 __ovld __cnfn shuffle(long8 x, ulong8 mask);
-long8 __ovld __cnfn shuffle(long16 x, ulong8 mask);
-
-ulong8 __ovld __cnfn shuffle(ulong2 x, ulong8 mask);
-ulong8 __ovld __cnfn shuffle(ulong4 x, ulong8 mask);
-ulong8 __ovld __cnfn shuffle(ulong8 x, ulong8 mask);
-ulong8 __ovld __cnfn shuffle(ulong16 x, ulong8 mask);
-
-float8 __ovld __cnfn shuffle(float2 x, uint8 mask);
-float8 __ovld __cnfn shuffle(float4 x, uint8 mask);
-float8 __ovld __cnfn shuffle(float8 x, uint8 mask);
-float8 __ovld __cnfn shuffle(float16 x, uint8 mask);
-
-char16 __ovld __cnfn shuffle(char2 x, uchar16 mask);
-char16 __ovld __cnfn shuffle(char4 x, uchar16 mask);
-char16 __ovld __cnfn shuffle(char8 x, uchar16 mask);
-char16 __ovld __cnfn shuffle(char16 x, uchar16 mask);
-
-uchar16 __ovld __cnfn shuffle(uchar2 x, uchar16 mask);
-uchar16 __ovld __cnfn shuffle(uchar4 x, uchar16 mask);
-uchar16 __ovld __cnfn shuffle(uchar8 x, uchar16 mask);
-uchar16 __ovld __cnfn shuffle(uchar16 x, uchar16 mask);
-
-short16 __ovld __cnfn shuffle(short2 x, ushort16 mask);
-short16 __ovld __cnfn shuffle(short4 x, ushort16 mask);
-short16 __ovld __cnfn shuffle(short8 x, ushort16 mask);
-short16 __ovld __cnfn shuffle(short16 x, ushort16 mask);
-
-ushort16 __ovld __cnfn shuffle(ushort2 x, ushort16 mask);
-ushort16 __ovld __cnfn shuffle(ushort4 x, ushort16 mask);
-ushort16 __ovld __cnfn shuffle(ushort8 x, ushort16 mask);
-ushort16 __ovld __cnfn shuffle(ushort16 x, ushort16 mask);
-
-int16 __ovld __cnfn shuffle(int2 x, uint16 mask);
-int16 __ovld __cnfn shuffle(int4 x, uint16 mask);
-int16 __ovld __cnfn shuffle(int8 x, uint16 mask);
-int16 __ovld __cnfn shuffle(int16 x, uint16 mask);
-
-uint16 __ovld __cnfn shuffle(uint2 x, uint16 mask);
-uint16 __ovld __cnfn shuffle(uint4 x, uint16 mask);
-uint16 __ovld __cnfn shuffle(uint8 x, uint16 mask);
-uint16 __ovld __cnfn shuffle(uint16 x, uint16 mask);
-
-long16 __ovld __cnfn shuffle(long2 x, ulong16 mask);
-long16 __ovld __cnfn shuffle(long4 x, ulong16 mask);
-long16 __ovld __cnfn shuffle(long8 x, ulong16 mask);
-long16 __ovld __cnfn shuffle(long16 x, ulong16 mask);
-
-ulong16 __ovld __cnfn shuffle(ulong2 x, ulong16 mask);
-ulong16 __ovld __cnfn shuffle(ulong4 x, ulong16 mask);
-ulong16 __ovld __cnfn shuffle(ulong8 x, ulong16 mask);
-ulong16 __ovld __cnfn shuffle(ulong16 x, ulong16 mask);
-
-float16 __ovld __cnfn shuffle(float2 x, uint16 mask);
-float16 __ovld __cnfn shuffle(float4 x, uint16 mask);
-float16 __ovld __cnfn shuffle(float8 x, uint16 mask);
-float16 __ovld __cnfn shuffle(float16 x, uint16 mask);
+char2 __ovld __cnfn shuffle(char2, uchar2);
+char2 __ovld __cnfn shuffle(char4, uchar2);
+char2 __ovld __cnfn shuffle(char8, uchar2);
+char2 __ovld __cnfn shuffle(char16, uchar2);
+
+uchar2 __ovld __cnfn shuffle(uchar2, uchar2);
+uchar2 __ovld __cnfn shuffle(uchar4, uchar2);
+uchar2 __ovld __cnfn shuffle(uchar8, uchar2);
+uchar2 __ovld __cnfn shuffle(uchar16, uchar2);
+
+short2 __ovld __cnfn shuffle(short2, ushort2);
+short2 __ovld __cnfn shuffle(short4, ushort2);
+short2 __ovld __cnfn shuffle(short8, ushort2);
+short2 __ovld __cnfn shuffle(short16, ushort2);
+
+ushort2 __ovld __cnfn shuffle(ushort2, ushort2);
+ushort2 __ovld __cnfn shuffle(ushort4, ushort2);
+ushort2 __ovld __cnfn shuffle(ushort8, ushort2);
+ushort2 __ovld __cnfn shuffle(ushort16, ushort2);
+
+int2 __ovld __cnfn shuffle(int2, uint2);
+int2 __ovld __cnfn shuffle(int4, uint2);
+int2 __ovld __cnfn shuffle(int8, uint2);
+int2 __ovld __cnfn shuffle(int16, uint2);
+
+uint2 __ovld __cnfn shuffle(uint2, uint2);
+uint2 __ovld __cnfn shuffle(uint4, uint2);
+uint2 __ovld __cnfn shuffle(uint8, uint2);
+uint2 __ovld __cnfn shuffle(uint16, uint2);
+
+long2 __ovld __cnfn shuffle(long2, ulong2);
+long2 __ovld __cnfn shuffle(long4, ulong2);
+long2 __ovld __cnfn shuffle(long8, ulong2);
+long2 __ovld __cnfn shuffle(long16, ulong2);
+
+ulong2 __ovld __cnfn shuffle(ulong2, ulong2);
+ulong2 __ovld __cnfn shuffle(ulong4, ulong2);
+ulong2 __ovld __cnfn shuffle(ulong8, ulong2);
+ulong2 __ovld __cnfn shuffle(ulong16, ulong2);
+
+float2 __ovld __cnfn shuffle(float2, uint2);
+float2 __ovld __cnfn shuffle(float4, uint2);
+float2 __ovld __cnfn shuffle(float8, uint2);
+float2 __ovld __cnfn shuffle(float16, uint2);
+
+char4 __ovld __cnfn shuffle(char2, uchar4);
+char4 __ovld __cnfn shuffle(char4, uchar4);
+char4 __ovld __cnfn shuffle(char8, uchar4);
+char4 __ovld __cnfn shuffle(char16, uchar4);
+
+uchar4 __ovld __cnfn shuffle(uchar2, uchar4);
+uchar4 __ovld __cnfn shuffle(uchar4, uchar4);
+uchar4 __ovld __cnfn shuffle(uchar8, uchar4);
+uchar4 __ovld __cnfn shuffle(uchar16, uchar4);
+
+short4 __ovld __cnfn shuffle(short2, ushort4);
+short4 __ovld __cnfn shuffle(short4, ushort4);
+short4 __ovld __cnfn shuffle(short8, ushort4);
+short4 __ovld __cnfn shuffle(short16, ushort4);
+
+ushort4 __ovld __cnfn shuffle(ushort2, ushort4);
+ushort4 __ovld __cnfn shuffle(ushort4, ushort4);
+ushort4 __ovld __cnfn shuffle(ushort8, ushort4);
+ushort4 __ovld __cnfn shuffle(ushort16, ushort4);
+
+int4 __ovld __cnfn shuffle(int2, uint4);
+int4 __ovld __cnfn shuffle(int4, uint4);
+int4 __ovld __cnfn shuffle(int8, uint4);
+int4 __ovld __cnfn shuffle(int16, uint4);
+
+uint4 __ovld __cnfn shuffle(uint2, uint4);
+uint4 __ovld __cnfn shuffle(uint4, uint4);
+uint4 __ovld __cnfn shuffle(uint8, uint4);
+uint4 __ovld __cnfn shuffle(uint16, uint4);
+
+long4 __ovld __cnfn shuffle(long2, ulong4);
+long4 __ovld __cnfn shuffle(long4, ulong4);
+long4 __ovld __cnfn shuffle(long8, ulong4);
+long4 __ovld __cnfn shuffle(long16, ulong4);
+
+ulong4 __ovld __cnfn shuffle(ulong2, ulong4);
+ulong4 __ovld __cnfn shuffle(ulong4, ulong4);
+ulong4 __ovld __cnfn shuffle(ulong8, ulong4);
+ulong4 __ovld __cnfn shuffle(ulong16, ulong4);
+
+float4 __ovld __cnfn shuffle(float2, uint4);
+float4 __ovld __cnfn shuffle(float4, uint4);
+float4 __ovld __cnfn shuffle(float8, uint4);
+float4 __ovld __cnfn shuffle(float16, uint4);
+
+char8 __ovld __cnfn shuffle(char2, uchar8);
+char8 __ovld __cnfn shuffle(char4, uchar8);
+char8 __ovld __cnfn shuffle(char8, uchar8);
+char8 __ovld __cnfn shuffle(char16, uchar8);
+
+uchar8 __ovld __cnfn shuffle(uchar2, uchar8);
+uchar8 __ovld __cnfn shuffle(uchar4, uchar8);
+uchar8 __ovld __cnfn shuffle(uchar8, uchar8);
+uchar8 __ovld __cnfn shuffle(uchar16, uchar8);
+
+short8 __ovld __cnfn shuffle(short2, ushort8);
+short8 __ovld __cnfn shuffle(short4, ushort8);
+short8 __ovld __cnfn shuffle(short8, ushort8);
+short8 __ovld __cnfn shuffle(short16, ushort8);
+
+ushort8 __ovld __cnfn shuffle(ushort2, ushort8);
+ushort8 __ovld __cnfn shuffle(ushort4, ushort8);
+ushort8 __ovld __cnfn shuffle(ushort8, ushort8);
+ushort8 __ovld __cnfn shuffle(ushort16, ushort8);
+
+int8 __ovld __cnfn shuffle(int2, uint8);
+int8 __ovld __cnfn shuffle(int4, uint8);
+int8 __ovld __cnfn shuffle(int8, uint8);
+int8 __ovld __cnfn shuffle(int16, uint8);
+
+uint8 __ovld __cnfn shuffle(uint2, uint8);
+uint8 __ovld __cnfn shuffle(uint4, uint8);
+uint8 __ovld __cnfn shuffle(uint8, uint8);
+uint8 __ovld __cnfn shuffle(uint16, uint8);
+
+long8 __ovld __cnfn shuffle(long2, ulong8);
+long8 __ovld __cnfn shuffle(long4, ulong8);
+long8 __ovld __cnfn shuffle(long8, ulong8);
+long8 __ovld __cnfn shuffle(long16, ulong8);
+
+ulong8 __ovld __cnfn shuffle(ulong2, ulong8);
+ulong8 __ovld __cnfn shuffle(ulong4, ulong8);
+ulong8 __ovld __cnfn shuffle(ulong8, ulong8);
+ulong8 __ovld __cnfn shuffle(ulong16, ulong8);
+
+float8 __ovld __cnfn shuffle(float2, uint8);
+float8 __ovld __cnfn shuffle(float4, uint8);
+float8 __ovld __cnfn shuffle(float8, uint8);
+float8 __ovld __cnfn shuffle(float16, uint8);
+
+char16 __ovld __cnfn shuffle(char2, uchar16);
+char16 __ovld __cnfn shuffle(char4, uchar16);
+char16 __ovld __cnfn shuffle(char8, uchar16);
+char16 __ovld __cnfn shuffle(char16, uchar16);
+
+uchar16 __ovld __cnfn shuffle(uchar2, uchar16);
+uchar16 __ovld __cnfn shuffle(uchar4, uchar16);
+uchar16 __ovld __cnfn shuffle(uchar8, uchar16);
+uchar16 __ovld __cnfn shuffle(uchar16, uchar16);
+
+short16 __ovld __cnfn shuffle(short2, ushort16);
+short16 __ovld __cnfn shuffle(short4, ushort16);
+short16 __ovld __cnfn shuffle(short8, ushort16);
+short16 __ovld __cnfn shuffle(short16, ushort16);
+
+ushort16 __ovld __cnfn shuffle(ushort2, ushort16);
+ushort16 __ovld __cnfn shuffle(ushort4, ushort16);
+ushort16 __ovld __cnfn shuffle(ushort8, ushort16);
+ushort16 __ovld __cnfn shuffle(ushort16, ushort16);
+
+int16 __ovld __cnfn shuffle(int2, uint16);
+int16 __ovld __cnfn shuffle(int4, uint16);
+int16 __ovld __cnfn shuffle(int8, uint16);
+int16 __ovld __cnfn shuffle(int16, uint16);
+
+uint16 __ovld __cnfn shuffle(uint2, uint16);
+uint16 __ovld __cnfn shuffle(uint4, uint16);
+uint16 __ovld __cnfn shuffle(uint8, uint16);
+uint16 __ovld __cnfn shuffle(uint16, uint16);
+
+long16 __ovld __cnfn shuffle(long2, ulong16);
+long16 __ovld __cnfn shuffle(long4, ulong16);
+long16 __ovld __cnfn shuffle(long8, ulong16);
+long16 __ovld __cnfn shuffle(long16, ulong16);
+
+ulong16 __ovld __cnfn shuffle(ulong2, ulong16);
+ulong16 __ovld __cnfn shuffle(ulong4, ulong16);
+ulong16 __ovld __cnfn shuffle(ulong8, ulong16);
+ulong16 __ovld __cnfn shuffle(ulong16, ulong16);
+
+float16 __ovld __cnfn shuffle(float2, uint16);
+float16 __ovld __cnfn shuffle(float4, uint16);
+float16 __ovld __cnfn shuffle(float8, uint16);
+float16 __ovld __cnfn shuffle(float16, uint16);
#ifdef cl_khr_fp64
-double2 __ovld __cnfn shuffle(double2 x, ulong2 mask);
-double2 __ovld __cnfn shuffle(double4 x, ulong2 mask);
-double2 __ovld __cnfn shuffle(double8 x, ulong2 mask);
-double2 __ovld __cnfn shuffle(double16 x, ulong2 mask);
-
-double4 __ovld __cnfn shuffle(double2 x, ulong4 mask);
-double4 __ovld __cnfn shuffle(double4 x, ulong4 mask);
-double4 __ovld __cnfn shuffle(double8 x, ulong4 mask);
-double4 __ovld __cnfn shuffle(double16 x, ulong4 mask);
-
-double8 __ovld __cnfn shuffle(double2 x, ulong8 mask);
-double8 __ovld __cnfn shuffle(double4 x, ulong8 mask);
-double8 __ovld __cnfn shuffle(double8 x, ulong8 mask);
-double8 __ovld __cnfn shuffle(double16 x, ulong8 mask);
-
-double16 __ovld __cnfn shuffle(double2 x, ulong16 mask);
-double16 __ovld __cnfn shuffle(double4 x, ulong16 mask);
-double16 __ovld __cnfn shuffle(double8 x, ulong16 mask);
-double16 __ovld __cnfn shuffle(double16 x, ulong16 mask);
+double2 __ovld __cnfn shuffle(double2, ulong2);
+double2 __ovld __cnfn shuffle(double4, ulong2);
+double2 __ovld __cnfn shuffle(double8, ulong2);
+double2 __ovld __cnfn shuffle(double16, ulong2);
+
+double4 __ovld __cnfn shuffle(double2, ulong4);
+double4 __ovld __cnfn shuffle(double4, ulong4);
+double4 __ovld __cnfn shuffle(double8, ulong4);
+double4 __ovld __cnfn shuffle(double16, ulong4);
+
+double8 __ovld __cnfn shuffle(double2, ulong8);
+double8 __ovld __cnfn shuffle(double4, ulong8);
+double8 __ovld __cnfn shuffle(double8, ulong8);
+double8 __ovld __cnfn shuffle(double16, ulong8);
+
+double16 __ovld __cnfn shuffle(double2, ulong16);
+double16 __ovld __cnfn shuffle(double4, ulong16);
+double16 __ovld __cnfn shuffle(double8, ulong16);
+double16 __ovld __cnfn shuffle(double16, ulong16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half2 __ovld __cnfn shuffle(half2 x, ushort2 mask);
-half2 __ovld __cnfn shuffle(half4 x, ushort2 mask);
-half2 __ovld __cnfn shuffle(half8 x, ushort2 mask);
-half2 __ovld __cnfn shuffle(half16 x, ushort2 mask);
-
-half4 __ovld __cnfn shuffle(half2 x, ushort4 mask);
-half4 __ovld __cnfn shuffle(half4 x, ushort4 mask);
-half4 __ovld __cnfn shuffle(half8 x, ushort4 mask);
-half4 __ovld __cnfn shuffle(half16 x, ushort4 mask);
-
-half8 __ovld __cnfn shuffle(half2 x, ushort8 mask);
-half8 __ovld __cnfn shuffle(half4 x, ushort8 mask);
-half8 __ovld __cnfn shuffle(half8 x, ushort8 mask);
-half8 __ovld __cnfn shuffle(half16 x, ushort8 mask);
-
-half16 __ovld __cnfn shuffle(half2 x, ushort16 mask);
-half16 __ovld __cnfn shuffle(half4 x, ushort16 mask);
-half16 __ovld __cnfn shuffle(half8 x, ushort16 mask);
-half16 __ovld __cnfn shuffle(half16 x, ushort16 mask);
+half2 __ovld __cnfn shuffle(half2, ushort2);
+half2 __ovld __cnfn shuffle(half4, ushort2);
+half2 __ovld __cnfn shuffle(half8, ushort2);
+half2 __ovld __cnfn shuffle(half16, ushort2);
+
+half4 __ovld __cnfn shuffle(half2, ushort4);
+half4 __ovld __cnfn shuffle(half4, ushort4);
+half4 __ovld __cnfn shuffle(half8, ushort4);
+half4 __ovld __cnfn shuffle(half16, ushort4);
+
+half8 __ovld __cnfn shuffle(half2, ushort8);
+half8 __ovld __cnfn shuffle(half4, ushort8);
+half8 __ovld __cnfn shuffle(half8, ushort8);
+half8 __ovld __cnfn shuffle(half16, ushort8);
+
+half16 __ovld __cnfn shuffle(half2, ushort16);
+half16 __ovld __cnfn shuffle(half4, ushort16);
+half16 __ovld __cnfn shuffle(half8, ushort16);
+half16 __ovld __cnfn shuffle(half16, ushort16);
#endif //cl_khr_fp16
-char2 __ovld __cnfn shuffle2(char2 x, char2 y, uchar2 mask);
-char2 __ovld __cnfn shuffle2(char4 x, char4 y, uchar2 mask);
-char2 __ovld __cnfn shuffle2(char8 x, char8 y, uchar2 mask);
-char2 __ovld __cnfn shuffle2(char16 x, char16 y, uchar2 mask);
-
-uchar2 __ovld __cnfn shuffle2(uchar2 x, uchar2 y, uchar2 mask);
-uchar2 __ovld __cnfn shuffle2(uchar4 x, uchar4 y, uchar2 mask);
-uchar2 __ovld __cnfn shuffle2(uchar8 x, uchar8 y, uchar2 mask);
-uchar2 __ovld __cnfn shuffle2(uchar16 x, uchar16 y, uchar2 mask);
-
-short2 __ovld __cnfn shuffle2(short2 x, short2 y, ushort2 mask);
-short2 __ovld __cnfn shuffle2(short4 x, short4 y, ushort2 mask);
-short2 __ovld __cnfn shuffle2(short8 x, short8 y, ushort2 mask);
-short2 __ovld __cnfn shuffle2(short16 x, short16 y, ushort2 mask);
-
-ushort2 __ovld __cnfn shuffle2(ushort2 x, ushort2 y, ushort2 mask);
-ushort2 __ovld __cnfn shuffle2(ushort4 x, ushort4 y, ushort2 mask);
-ushort2 __ovld __cnfn shuffle2(ushort8 x, ushort8 y, ushort2 mask);
-ushort2 __ovld __cnfn shuffle2(ushort16 x, ushort16 y, ushort2 mask);
-
-int2 __ovld __cnfn shuffle2(int2 x, int2 y, uint2 mask);
-int2 __ovld __cnfn shuffle2(int4 x, int4 y, uint2 mask);
-int2 __ovld __cnfn shuffle2(int8 x, int8 y, uint2 mask);
-int2 __ovld __cnfn shuffle2(int16 x, int16 y, uint2 mask);
-
-uint2 __ovld __cnfn shuffle2(uint2 x, uint2 y, uint2 mask);
-uint2 __ovld __cnfn shuffle2(uint4 x, uint4 y, uint2 mask);
-uint2 __ovld __cnfn shuffle2(uint8 x, uint8 y, uint2 mask);
-uint2 __ovld __cnfn shuffle2(uint16 x, uint16 y, uint2 mask);
-
-long2 __ovld __cnfn shuffle2(long2 x, long2 y, ulong2 mask);
-long2 __ovld __cnfn shuffle2(long4 x, long4 y, ulong2 mask);
-long2 __ovld __cnfn shuffle2(long8 x, long8 y, ulong2 mask);
-long2 __ovld __cnfn shuffle2(long16 x, long16 y, ulong2 mask);
-
-ulong2 __ovld __cnfn shuffle2(ulong2 x, ulong2 y, ulong2 mask);
-ulong2 __ovld __cnfn shuffle2(ulong4 x, ulong4 y, ulong2 mask);
-ulong2 __ovld __cnfn shuffle2(ulong8 x, ulong8 y, ulong2 mask);
-ulong2 __ovld __cnfn shuffle2(ulong16 x, ulong16 y, ulong2 mask);
-
-float2 __ovld __cnfn shuffle2(float2 x, float2 y, uint2 mask);
-float2 __ovld __cnfn shuffle2(float4 x, float4 y, uint2 mask);
-float2 __ovld __cnfn shuffle2(float8 x, float8 y, uint2 mask);
-float2 __ovld __cnfn shuffle2(float16 x, float16 y, uint2 mask);
-
-char4 __ovld __cnfn shuffle2(char2 x, char2 y, uchar4 mask);
-char4 __ovld __cnfn shuffle2(char4 x, char4 y, uchar4 mask);
-char4 __ovld __cnfn shuffle2(char8 x, char8 y, uchar4 mask);
-char4 __ovld __cnfn shuffle2(char16 x, char16 y, uchar4 mask);
-
-uchar4 __ovld __cnfn shuffle2(uchar2 x, uchar2 y, uchar4 mask);
-uchar4 __ovld __cnfn shuffle2(uchar4 x, uchar4 y, uchar4 mask);
-uchar4 __ovld __cnfn shuffle2(uchar8 x, uchar8 y, uchar4 mask);
-uchar4 __ovld __cnfn shuffle2(uchar16 x, uchar16 y, uchar4 mask);
-
-short4 __ovld __cnfn shuffle2(short2 x, short2 y, ushort4 mask);
-short4 __ovld __cnfn shuffle2(short4 x, short4 y, ushort4 mask);
-short4 __ovld __cnfn shuffle2(short8 x, short8 y, ushort4 mask);
-short4 __ovld __cnfn shuffle2(short16 x, short16 y, ushort4 mask);
-
-ushort4 __ovld __cnfn shuffle2(ushort2 x, ushort2 y, ushort4 mask);
-ushort4 __ovld __cnfn shuffle2(ushort4 x, ushort4 y, ushort4 mask);
-ushort4 __ovld __cnfn shuffle2(ushort8 x, ushort8 y, ushort4 mask);
-ushort4 __ovld __cnfn shuffle2(ushort16 x, ushort16 y, ushort4 mask);
-
-int4 __ovld __cnfn shuffle2(int2 x, int2 y, uint4 mask);
-int4 __ovld __cnfn shuffle2(int4 x, int4 y, uint4 mask);
-int4 __ovld __cnfn shuffle2(int8 x, int8 y, uint4 mask);
-int4 __ovld __cnfn shuffle2(int16 x, int16 y, uint4 mask);
-
-uint4 __ovld __cnfn shuffle2(uint2 x, uint2 y, uint4 mask);
-uint4 __ovld __cnfn shuffle2(uint4 x, uint4 y, uint4 mask);
-uint4 __ovld __cnfn shuffle2(uint8 x, uint8 y, uint4 mask);
-uint4 __ovld __cnfn shuffle2(uint16 x, uint16 y, uint4 mask);
-
-long4 __ovld __cnfn shuffle2(long2 x, long2 y, ulong4 mask);
-long4 __ovld __cnfn shuffle2(long4 x, long4 y, ulong4 mask);
-long4 __ovld __cnfn shuffle2(long8 x, long8 y, ulong4 mask);
-long4 __ovld __cnfn shuffle2(long16 x, long16 y, ulong4 mask);
-
-ulong4 __ovld __cnfn shuffle2(ulong2 x, ulong2 y, ulong4 mask);
-ulong4 __ovld __cnfn shuffle2(ulong4 x, ulong4 y, ulong4 mask);
-ulong4 __ovld __cnfn shuffle2(ulong8 x, ulong8 y, ulong4 mask);
-ulong4 __ovld __cnfn shuffle2(ulong16 x, ulong16 y, ulong4 mask);
-
-float4 __ovld __cnfn shuffle2(float2 x, float2 y, uint4 mask);
-float4 __ovld __cnfn shuffle2(float4 x, float4 y, uint4 mask);
-float4 __ovld __cnfn shuffle2(float8 x, float8 y, uint4 mask);
-float4 __ovld __cnfn shuffle2(float16 x, float16 y, uint4 mask);
-
-char8 __ovld __cnfn shuffle2(char2 x, char2 y, uchar8 mask);
-char8 __ovld __cnfn shuffle2(char4 x, char4 y, uchar8 mask);
-char8 __ovld __cnfn shuffle2(char8 x, char8 y, uchar8 mask);
-char8 __ovld __cnfn shuffle2(char16 x, char16 y, uchar8 mask);
-
-uchar8 __ovld __cnfn shuffle2(uchar2 x, uchar2 y, uchar8 mask);
-uchar8 __ovld __cnfn shuffle2(uchar4 x, uchar4 y, uchar8 mask);
-uchar8 __ovld __cnfn shuffle2(uchar8 x, uchar8 y, uchar8 mask);
-uchar8 __ovld __cnfn shuffle2(uchar16 x, uchar16 y, uchar8 mask);
-
-short8 __ovld __cnfn shuffle2(short2 x, short2 y, ushort8 mask);
-short8 __ovld __cnfn shuffle2(short4 x, short4 y, ushort8 mask);
-short8 __ovld __cnfn shuffle2(short8 x, short8 y, ushort8 mask);
-short8 __ovld __cnfn shuffle2(short16 x, short16 y, ushort8 mask);
-
-ushort8 __ovld __cnfn shuffle2(ushort2 x, ushort2 y, ushort8 mask);
-ushort8 __ovld __cnfn shuffle2(ushort4 x, ushort4 y, ushort8 mask);
-ushort8 __ovld __cnfn shuffle2(ushort8 x, ushort8 y, ushort8 mask);
-ushort8 __ovld __cnfn shuffle2(ushort16 x, ushort16 y, ushort8 mask);
-
-int8 __ovld __cnfn shuffle2(int2 x, int2 y, uint8 mask);
-int8 __ovld __cnfn shuffle2(int4 x, int4 y, uint8 mask);
-int8 __ovld __cnfn shuffle2(int8 x, int8 y, uint8 mask);
-int8 __ovld __cnfn shuffle2(int16 x, int16 y, uint8 mask);
-
-uint8 __ovld __cnfn shuffle2(uint2 x, uint2 y, uint8 mask);
-uint8 __ovld __cnfn shuffle2(uint4 x, uint4 y, uint8 mask);
-uint8 __ovld __cnfn shuffle2(uint8 x, uint8 y, uint8 mask);
-uint8 __ovld __cnfn shuffle2(uint16 x, uint16 y, uint8 mask);
-
-long8 __ovld __cnfn shuffle2(long2 x, long2 y, ulong8 mask);
-long8 __ovld __cnfn shuffle2(long4 x, long4 y, ulong8 mask);
-long8 __ovld __cnfn shuffle2(long8 x, long8 y, ulong8 mask);
-long8 __ovld __cnfn shuffle2(long16 x, long16 y, ulong8 mask);
-
-ulong8 __ovld __cnfn shuffle2(ulong2 x, ulong2 y, ulong8 mask);
-ulong8 __ovld __cnfn shuffle2(ulong4 x, ulong4 y, ulong8 mask);
-ulong8 __ovld __cnfn shuffle2(ulong8 x, ulong8 y, ulong8 mask);
-ulong8 __ovld __cnfn shuffle2(ulong16 x, ulong16 y, ulong8 mask);
-
-float8 __ovld __cnfn shuffle2(float2 x, float2 y, uint8 mask);
-float8 __ovld __cnfn shuffle2(float4 x, float4 y, uint8 mask);
-float8 __ovld __cnfn shuffle2(float8 x, float8 y, uint8 mask);
-float8 __ovld __cnfn shuffle2(float16 x, float16 y, uint8 mask);
-
-char16 __ovld __cnfn shuffle2(char2 x, char2 y, uchar16 mask);
-char16 __ovld __cnfn shuffle2(char4 x, char4 y, uchar16 mask);
-char16 __ovld __cnfn shuffle2(char8 x, char8 y, uchar16 mask);
-char16 __ovld __cnfn shuffle2(char16 x, char16 y, uchar16 mask);
-
-uchar16 __ovld __cnfn shuffle2(uchar2 x, uchar2 y, uchar16 mask);
-uchar16 __ovld __cnfn shuffle2(uchar4 x, uchar4 y, uchar16 mask);
-uchar16 __ovld __cnfn shuffle2(uchar8 x, uchar8 y, uchar16 mask);
-uchar16 __ovld __cnfn shuffle2(uchar16 x, uchar16 y, uchar16 mask);
-
-short16 __ovld __cnfn shuffle2(short2 x, short2 y, ushort16 mask);
-short16 __ovld __cnfn shuffle2(short4 x, short4 y, ushort16 mask);
-short16 __ovld __cnfn shuffle2(short8 x, short8 y, ushort16 mask);
-short16 __ovld __cnfn shuffle2(short16 x, short16 y, ushort16 mask);
-
-ushort16 __ovld __cnfn shuffle2(ushort2 x, ushort2 y, ushort16 mask);
-ushort16 __ovld __cnfn shuffle2(ushort4 x, ushort4 y, ushort16 mask);
-ushort16 __ovld __cnfn shuffle2(ushort8 x, ushort8 y, ushort16 mask);
-ushort16 __ovld __cnfn shuffle2(ushort16 x, ushort16 y, ushort16 mask);
-
-int16 __ovld __cnfn shuffle2(int2 x, int2 y, uint16 mask);
-int16 __ovld __cnfn shuffle2(int4 x, int4 y, uint16 mask);
-int16 __ovld __cnfn shuffle2(int8 x, int8 y, uint16 mask);
-int16 __ovld __cnfn shuffle2(int16 x, int16 y, uint16 mask);
-
-uint16 __ovld __cnfn shuffle2(uint2 x, uint2 y, uint16 mask);
-uint16 __ovld __cnfn shuffle2(uint4 x, uint4 y, uint16 mask);
-uint16 __ovld __cnfn shuffle2(uint8 x, uint8 y, uint16 mask);
-uint16 __ovld __cnfn shuffle2(uint16 x, uint16 y, uint16 mask);
-
-long16 __ovld __cnfn shuffle2(long2 x, long2 y, ulong16 mask);
-long16 __ovld __cnfn shuffle2(long4 x, long4 y, ulong16 mask);
-long16 __ovld __cnfn shuffle2(long8 x, long8 y, ulong16 mask);
-long16 __ovld __cnfn shuffle2(long16 x, long16 y, ulong16 mask);
-
-ulong16 __ovld __cnfn shuffle2(ulong2 x, ulong2 y, ulong16 mask);
-ulong16 __ovld __cnfn shuffle2(ulong4 x, ulong4 y, ulong16 mask);
-ulong16 __ovld __cnfn shuffle2(ulong8 x, ulong8 y, ulong16 mask);
-ulong16 __ovld __cnfn shuffle2(ulong16 x, ulong16 y, ulong16 mask);
-
-float16 __ovld __cnfn shuffle2(float2 x, float2 y, uint16 mask);
-float16 __ovld __cnfn shuffle2(float4 x, float4 y, uint16 mask);
-float16 __ovld __cnfn shuffle2(float8 x, float8 y, uint16 mask);
-float16 __ovld __cnfn shuffle2(float16 x, float16 y, uint16 mask);
+char2 __ovld __cnfn shuffle2(char2, char2, uchar2);
+char2 __ovld __cnfn shuffle2(char4, char4, uchar2);
+char2 __ovld __cnfn shuffle2(char8, char8, uchar2);
+char2 __ovld __cnfn shuffle2(char16, char16, uchar2);
+
+uchar2 __ovld __cnfn shuffle2(uchar2, uchar2, uchar2);
+uchar2 __ovld __cnfn shuffle2(uchar4, uchar4, uchar2);
+uchar2 __ovld __cnfn shuffle2(uchar8, uchar8, uchar2);
+uchar2 __ovld __cnfn shuffle2(uchar16, uchar16, uchar2);
+
+short2 __ovld __cnfn shuffle2(short2, short2, ushort2);
+short2 __ovld __cnfn shuffle2(short4, short4, ushort2);
+short2 __ovld __cnfn shuffle2(short8, short8, ushort2);
+short2 __ovld __cnfn shuffle2(short16, short16, ushort2);
+
+ushort2 __ovld __cnfn shuffle2(ushort2, ushort2, ushort2);
+ushort2 __ovld __cnfn shuffle2(ushort4, ushort4, ushort2);
+ushort2 __ovld __cnfn shuffle2(ushort8, ushort8, ushort2);
+ushort2 __ovld __cnfn shuffle2(ushort16, ushort16, ushort2);
+
+int2 __ovld __cnfn shuffle2(int2, int2, uint2);
+int2 __ovld __cnfn shuffle2(int4, int4, uint2);
+int2 __ovld __cnfn shuffle2(int8, int8, uint2);
+int2 __ovld __cnfn shuffle2(int16, int16, uint2);
+
+uint2 __ovld __cnfn shuffle2(uint2, uint2, uint2);
+uint2 __ovld __cnfn shuffle2(uint4, uint4, uint2);
+uint2 __ovld __cnfn shuffle2(uint8, uint8, uint2);
+uint2 __ovld __cnfn shuffle2(uint16, uint16, uint2);
+
+long2 __ovld __cnfn shuffle2(long2, long2, ulong2);
+long2 __ovld __cnfn shuffle2(long4, long4, ulong2);
+long2 __ovld __cnfn shuffle2(long8, long8, ulong2);
+long2 __ovld __cnfn shuffle2(long16, long16, ulong2);
+
+ulong2 __ovld __cnfn shuffle2(ulong2, ulong2, ulong2);
+ulong2 __ovld __cnfn shuffle2(ulong4, ulong4, ulong2);
+ulong2 __ovld __cnfn shuffle2(ulong8, ulong8, ulong2);
+ulong2 __ovld __cnfn shuffle2(ulong16, ulong16, ulong2);
+
+float2 __ovld __cnfn shuffle2(float2, float2, uint2);
+float2 __ovld __cnfn shuffle2(float4, float4, uint2);
+float2 __ovld __cnfn shuffle2(float8, float8, uint2);
+float2 __ovld __cnfn shuffle2(float16, float16, uint2);
+
+char4 __ovld __cnfn shuffle2(char2, char2, uchar4);
+char4 __ovld __cnfn shuffle2(char4, char4, uchar4);
+char4 __ovld __cnfn shuffle2(char8, char8, uchar4);
+char4 __ovld __cnfn shuffle2(char16, char16, uchar4);
+
+uchar4 __ovld __cnfn shuffle2(uchar2, uchar2, uchar4);
+uchar4 __ovld __cnfn shuffle2(uchar4, uchar4, uchar4);
+uchar4 __ovld __cnfn shuffle2(uchar8, uchar8, uchar4);
+uchar4 __ovld __cnfn shuffle2(uchar16, uchar16, uchar4);
+
+short4 __ovld __cnfn shuffle2(short2, short2, ushort4);
+short4 __ovld __cnfn shuffle2(short4, short4, ushort4);
+short4 __ovld __cnfn shuffle2(short8, short8, ushort4);
+short4 __ovld __cnfn shuffle2(short16, short16, ushort4);
+
+ushort4 __ovld __cnfn shuffle2(ushort2, ushort2, ushort4);
+ushort4 __ovld __cnfn shuffle2(ushort4, ushort4, ushort4);
+ushort4 __ovld __cnfn shuffle2(ushort8, ushort8, ushort4);
+ushort4 __ovld __cnfn shuffle2(ushort16, ushort16, ushort4);
+
+int4 __ovld __cnfn shuffle2(int2, int2, uint4);
+int4 __ovld __cnfn shuffle2(int4, int4, uint4);
+int4 __ovld __cnfn shuffle2(int8, int8, uint4);
+int4 __ovld __cnfn shuffle2(int16, int16, uint4);
+
+uint4 __ovld __cnfn shuffle2(uint2, uint2, uint4);
+uint4 __ovld __cnfn shuffle2(uint4, uint4, uint4);
+uint4 __ovld __cnfn shuffle2(uint8, uint8, uint4);
+uint4 __ovld __cnfn shuffle2(uint16, uint16, uint4);
+
+long4 __ovld __cnfn shuffle2(long2, long2, ulong4);
+long4 __ovld __cnfn shuffle2(long4, long4, ulong4);
+long4 __ovld __cnfn shuffle2(long8, long8, ulong4);
+long4 __ovld __cnfn shuffle2(long16, long16, ulong4);
+
+ulong4 __ovld __cnfn shuffle2(ulong2, ulong2, ulong4);
+ulong4 __ovld __cnfn shuffle2(ulong4, ulong4, ulong4);
+ulong4 __ovld __cnfn shuffle2(ulong8, ulong8, ulong4);
+ulong4 __ovld __cnfn shuffle2(ulong16, ulong16, ulong4);
+
+float4 __ovld __cnfn shuffle2(float2, float2, uint4);
+float4 __ovld __cnfn shuffle2(float4, float4, uint4);
+float4 __ovld __cnfn shuffle2(float8, float8, uint4);
+float4 __ovld __cnfn shuffle2(float16, float16, uint4);
+
+char8 __ovld __cnfn shuffle2(char2, char2, uchar8);
+char8 __ovld __cnfn shuffle2(char4, char4, uchar8);
+char8 __ovld __cnfn shuffle2(char8, char8, uchar8);
+char8 __ovld __cnfn shuffle2(char16, char16, uchar8);
+
+uchar8 __ovld __cnfn shuffle2(uchar2, uchar2, uchar8);
+uchar8 __ovld __cnfn shuffle2(uchar4, uchar4, uchar8);
+uchar8 __ovld __cnfn shuffle2(uchar8, uchar8, uchar8);
+uchar8 __ovld __cnfn shuffle2(uchar16, uchar16, uchar8);
+
+short8 __ovld __cnfn shuffle2(short2, short2, ushort8);
+short8 __ovld __cnfn shuffle2(short4, short4, ushort8);
+short8 __ovld __cnfn shuffle2(short8, short8, ushort8);
+short8 __ovld __cnfn shuffle2(short16, short16, ushort8);
+
+ushort8 __ovld __cnfn shuffle2(ushort2, ushort2, ushort8);
+ushort8 __ovld __cnfn shuffle2(ushort4, ushort4, ushort8);
+ushort8 __ovld __cnfn shuffle2(ushort8, ushort8, ushort8);
+ushort8 __ovld __cnfn shuffle2(ushort16, ushort16, ushort8);
+
+int8 __ovld __cnfn shuffle2(int2, int2, uint8);
+int8 __ovld __cnfn shuffle2(int4, int4, uint8);
+int8 __ovld __cnfn shuffle2(int8, int8, uint8);
+int8 __ovld __cnfn shuffle2(int16, int16, uint8);
+
+uint8 __ovld __cnfn shuffle2(uint2, uint2, uint8);
+uint8 __ovld __cnfn shuffle2(uint4, uint4, uint8);
+uint8 __ovld __cnfn shuffle2(uint8, uint8, uint8);
+uint8 __ovld __cnfn shuffle2(uint16, uint16, uint8);
+
+long8 __ovld __cnfn shuffle2(long2, long2, ulong8);
+long8 __ovld __cnfn shuffle2(long4, long4, ulong8);
+long8 __ovld __cnfn shuffle2(long8, long8, ulong8);
+long8 __ovld __cnfn shuffle2(long16, long16, ulong8);
+
+ulong8 __ovld __cnfn shuffle2(ulong2, ulong2, ulong8);
+ulong8 __ovld __cnfn shuffle2(ulong4, ulong4, ulong8);
+ulong8 __ovld __cnfn shuffle2(ulong8, ulong8, ulong8);
+ulong8 __ovld __cnfn shuffle2(ulong16, ulong16, ulong8);
+
+float8 __ovld __cnfn shuffle2(float2, float2, uint8);
+float8 __ovld __cnfn shuffle2(float4, float4, uint8);
+float8 __ovld __cnfn shuffle2(float8, float8, uint8);
+float8 __ovld __cnfn shuffle2(float16, float16, uint8);
+
+char16 __ovld __cnfn shuffle2(char2, char2, uchar16);
+char16 __ovld __cnfn shuffle2(char4, char4, uchar16);
+char16 __ovld __cnfn shuffle2(char8, char8, uchar16);
+char16 __ovld __cnfn shuffle2(char16, char16, uchar16);
+
+uchar16 __ovld __cnfn shuffle2(uchar2, uchar2, uchar16);
+uchar16 __ovld __cnfn shuffle2(uchar4, uchar4, uchar16);
+uchar16 __ovld __cnfn shuffle2(uchar8, uchar8, uchar16);
+uchar16 __ovld __cnfn shuffle2(uchar16, uchar16, uchar16);
+
+short16 __ovld __cnfn shuffle2(short2, short2, ushort16);
+short16 __ovld __cnfn shuffle2(short4, short4, ushort16);
+short16 __ovld __cnfn shuffle2(short8, short8, ushort16);
+short16 __ovld __cnfn shuffle2(short16, short16, ushort16);
+
+ushort16 __ovld __cnfn shuffle2(ushort2, ushort2, ushort16);
+ushort16 __ovld __cnfn shuffle2(ushort4, ushort4, ushort16);
+ushort16 __ovld __cnfn shuffle2(ushort8, ushort8, ushort16);
+ushort16 __ovld __cnfn shuffle2(ushort16, ushort16, ushort16);
+
+int16 __ovld __cnfn shuffle2(int2, int2, uint16);
+int16 __ovld __cnfn shuffle2(int4, int4, uint16);
+int16 __ovld __cnfn shuffle2(int8, int8, uint16);
+int16 __ovld __cnfn shuffle2(int16, int16, uint16);
+
+uint16 __ovld __cnfn shuffle2(uint2, uint2, uint16);
+uint16 __ovld __cnfn shuffle2(uint4, uint4, uint16);
+uint16 __ovld __cnfn shuffle2(uint8, uint8, uint16);
+uint16 __ovld __cnfn shuffle2(uint16, uint16, uint16);
+
+long16 __ovld __cnfn shuffle2(long2, long2, ulong16);
+long16 __ovld __cnfn shuffle2(long4, long4, ulong16);
+long16 __ovld __cnfn shuffle2(long8, long8, ulong16);
+long16 __ovld __cnfn shuffle2(long16, long16, ulong16);
+
+ulong16 __ovld __cnfn shuffle2(ulong2, ulong2, ulong16);
+ulong16 __ovld __cnfn shuffle2(ulong4, ulong4, ulong16);
+ulong16 __ovld __cnfn shuffle2(ulong8, ulong8, ulong16);
+ulong16 __ovld __cnfn shuffle2(ulong16, ulong16, ulong16);
+
+float16 __ovld __cnfn shuffle2(float2, float2, uint16);
+float16 __ovld __cnfn shuffle2(float4, float4, uint16);
+float16 __ovld __cnfn shuffle2(float8, float8, uint16);
+float16 __ovld __cnfn shuffle2(float16, float16, uint16);
#ifdef cl_khr_fp64
-double2 __ovld __cnfn shuffle2(double2 x, double2 y, ulong2 mask);
-double2 __ovld __cnfn shuffle2(double4 x, double4 y, ulong2 mask);
-double2 __ovld __cnfn shuffle2(double8 x, double8 y, ulong2 mask);
-double2 __ovld __cnfn shuffle2(double16 x, double16 y, ulong2 mask);
-
-double4 __ovld __cnfn shuffle2(double2 x, double2 y, ulong4 mask);
-double4 __ovld __cnfn shuffle2(double4 x, double4 y, ulong4 mask);
-double4 __ovld __cnfn shuffle2(double8 x, double8 y, ulong4 mask);
-double4 __ovld __cnfn shuffle2(double16 x, double16 y, ulong4 mask);
-
-double8 __ovld __cnfn shuffle2(double2 x, double2 y, ulong8 mask);
-double8 __ovld __cnfn shuffle2(double4 x, double4 y, ulong8 mask);
-double8 __ovld __cnfn shuffle2(double8 x, double8 y, ulong8 mask);
-double8 __ovld __cnfn shuffle2(double16 x, double16 y, ulong8 mask);
-
-double16 __ovld __cnfn shuffle2(double2 x, double2 y, ulong16 mask);
-double16 __ovld __cnfn shuffle2(double4 x, double4 y, ulong16 mask);
-double16 __ovld __cnfn shuffle2(double8 x, double8 y, ulong16 mask);
-double16 __ovld __cnfn shuffle2(double16 x, double16 y, ulong16 mask);
+double2 __ovld __cnfn shuffle2(double2, double2, ulong2);
+double2 __ovld __cnfn shuffle2(double4, double4, ulong2);
+double2 __ovld __cnfn shuffle2(double8, double8, ulong2);
+double2 __ovld __cnfn shuffle2(double16, double16, ulong2);
+
+double4 __ovld __cnfn shuffle2(double2, double2, ulong4);
+double4 __ovld __cnfn shuffle2(double4, double4, ulong4);
+double4 __ovld __cnfn shuffle2(double8, double8, ulong4);
+double4 __ovld __cnfn shuffle2(double16, double16, ulong4);
+
+double8 __ovld __cnfn shuffle2(double2, double2, ulong8);
+double8 __ovld __cnfn shuffle2(double4, double4, ulong8);
+double8 __ovld __cnfn shuffle2(double8, double8, ulong8);
+double8 __ovld __cnfn shuffle2(double16, double16, ulong8);
+
+double16 __ovld __cnfn shuffle2(double2, double2, ulong16);
+double16 __ovld __cnfn shuffle2(double4, double4, ulong16);
+double16 __ovld __cnfn shuffle2(double8, double8, ulong16);
+double16 __ovld __cnfn shuffle2(double16, double16, ulong16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half2 __ovld __cnfn shuffle2(half2 x, half2 y, ushort2 mask);
-half2 __ovld __cnfn shuffle2(half4 x, half4 y, ushort2 mask);
-half2 __ovld __cnfn shuffle2(half8 x, half8 y, ushort2 mask);
-half2 __ovld __cnfn shuffle2(half16 x, half16 y, ushort2 mask);
-
-half4 __ovld __cnfn shuffle2(half2 x, half2 y, ushort4 mask);
-half4 __ovld __cnfn shuffle2(half4 x, half4 y, ushort4 mask);
-half4 __ovld __cnfn shuffle2(half8 x, half8 y, ushort4 mask);
-half4 __ovld __cnfn shuffle2(half16 x, half16 y, ushort4 mask);
-
-half8 __ovld __cnfn shuffle2(half2 x, half2 y, ushort8 mask);
-half8 __ovld __cnfn shuffle2(half4 x, half4 y, ushort8 mask);
-half8 __ovld __cnfn shuffle2(half8 x, half8 y, ushort8 mask);
-half8 __ovld __cnfn shuffle2(half16 x, half16 y, ushort8 mask);
-
-half16 __ovld __cnfn shuffle2(half2 x, half2 y, ushort16 mask);
-half16 __ovld __cnfn shuffle2(half4 x, half4 y, ushort16 mask);
-half16 __ovld __cnfn shuffle2(half8 x, half8 y, ushort16 mask);
-half16 __ovld __cnfn shuffle2(half16 x, half16 y, ushort16 mask);
+half2 __ovld __cnfn shuffle2(half2, half2, ushort2);
+half2 __ovld __cnfn shuffle2(half4, half4, ushort2);
+half2 __ovld __cnfn shuffle2(half8, half8, ushort2);
+half2 __ovld __cnfn shuffle2(half16, half16, ushort2);
+
+half4 __ovld __cnfn shuffle2(half2, half2, ushort4);
+half4 __ovld __cnfn shuffle2(half4, half4, ushort4);
+half4 __ovld __cnfn shuffle2(half8, half8, ushort4);
+half4 __ovld __cnfn shuffle2(half16, half16, ushort4);
+
+half8 __ovld __cnfn shuffle2(half2, half2, ushort8);
+half8 __ovld __cnfn shuffle2(half4, half4, ushort8);
+half8 __ovld __cnfn shuffle2(half8, half8, ushort8);
+half8 __ovld __cnfn shuffle2(half16, half16, ushort8);
+
+half16 __ovld __cnfn shuffle2(half2, half2, ushort16);
+half16 __ovld __cnfn shuffle2(half4, half4, ushort16);
+half16 __ovld __cnfn shuffle2(half8, half8, ushort16);
+half16 __ovld __cnfn shuffle2(half16, half16, ushort16);
#endif //cl_khr_fp16
// OpenCL v1.1 s6.11.3, v1.2 s6.12.14, v2.0 s6.13.14 - Image Read and Write Functions
@@ -14215,123 +15175,131 @@ half16 __ovld __cnfn shuffle2(half16 x, half16 y, ushort16 mask);
* in the description above are undefined.
*/
-float4 __purefn __ovld read_imagef(read_only image2d_t image, sampler_t sampler, int2 coord);
-float4 __purefn __ovld read_imagef(read_only image2d_t image, sampler_t sampler, float2 coord);
+float4 __ovld __purefn read_imagef(read_only image2d_t, sampler_t, int2);
+float4 __ovld __purefn read_imagef(read_only image2d_t, sampler_t, float2);
-int4 __purefn __ovld read_imagei(read_only image2d_t image, sampler_t sampler, int2 coord);
-int4 __purefn __ovld read_imagei(read_only image2d_t image, sampler_t sampler, float2 coord);
-uint4 __purefn __ovld read_imageui(read_only image2d_t image, sampler_t sampler, int2 coord);
-uint4 __purefn __ovld read_imageui(read_only image2d_t image, sampler_t sampler, float2 coord);
+int4 __ovld __purefn read_imagei(read_only image2d_t, sampler_t, int2);
+int4 __ovld __purefn read_imagei(read_only image2d_t, sampler_t, float2);
+uint4 __ovld __purefn read_imageui(read_only image2d_t, sampler_t, int2);
+uint4 __ovld __purefn read_imageui(read_only image2d_t, sampler_t, float2);
-float4 __purefn __ovld read_imagef(read_only image3d_t image, sampler_t sampler, int4 coord);
-float4 __purefn __ovld read_imagef(read_only image3d_t image, sampler_t sampler, float4 coord);
+float4 __ovld __purefn read_imagef(read_only image3d_t, sampler_t, int4);
+float4 __ovld __purefn read_imagef(read_only image3d_t, sampler_t, float4);
-int4 __purefn __ovld read_imagei(read_only image3d_t image, sampler_t sampler, int4 coord);
-int4 __purefn __ovld read_imagei(read_only image3d_t image, sampler_t sampler, float4 coord);
-uint4 __purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler, int4 coord);
-uint4 __purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler, float4 coord);
+int4 __ovld __purefn read_imagei(read_only image3d_t, sampler_t, int4);
+int4 __ovld __purefn read_imagei(read_only image3d_t, sampler_t, float4);
+uint4 __ovld __purefn read_imageui(read_only image3d_t, sampler_t, int4);
+uint4 __ovld __purefn read_imageui(read_only image3d_t, sampler_t, float4);
#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-float4 __purefn __ovld read_imagef(read_only image2d_array_t image_array, sampler_t sampler, int4 coord);
-float4 __purefn __ovld read_imagef(read_only image2d_array_t image_array, sampler_t sampler, float4 coord);
+float4 __ovld __purefn read_imagef(read_only image2d_array_t, sampler_t, int4);
+float4 __ovld __purefn read_imagef(read_only image2d_array_t, sampler_t, float4);
-int4 __purefn __ovld read_imagei(read_only image2d_array_t image_array, sampler_t sampler, int4 coord);
-int4 __purefn __ovld read_imagei(read_only image2d_array_t image_array, sampler_t sampler, float4 coord);
-uint4 __purefn __ovld read_imageui(read_only image2d_array_t image_array, sampler_t sampler, int4 coord);
-uint4 __purefn __ovld read_imageui(read_only image2d_array_t image_array, sampler_t sampler, float4 coord);
+int4 __ovld __purefn read_imagei(read_only image2d_array_t, sampler_t, int4);
+int4 __ovld __purefn read_imagei(read_only image2d_array_t, sampler_t, float4);
+uint4 __ovld __purefn read_imageui(read_only image2d_array_t, sampler_t, int4);
+uint4 __ovld __purefn read_imageui(read_only image2d_array_t, sampler_t, float4);
#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-float4 __purefn __ovld read_imagef(read_only image1d_t image, sampler_t sampler, int coord);
-float4 __purefn __ovld read_imagef(read_only image1d_t image, sampler_t sampler, float coord);
+float4 __ovld __purefn read_imagef(read_only image1d_t, sampler_t, int);
+float4 __ovld __purefn read_imagef(read_only image1d_t, sampler_t, float);
-int4 __purefn __ovld read_imagei(read_only image1d_t image, sampler_t sampler, int coord);
-int4 __purefn __ovld read_imagei(read_only image1d_t image, sampler_t sampler, float coord);
-uint4 __purefn __ovld read_imageui(read_only image1d_t image, sampler_t sampler, int coord);
-uint4 __purefn __ovld read_imageui(read_only image1d_t image, sampler_t sampler, float coord);
+int4 __ovld __purefn read_imagei(read_only image1d_t, sampler_t, int);
+int4 __ovld __purefn read_imagei(read_only image1d_t, sampler_t, float);
+uint4 __ovld __purefn read_imageui(read_only image1d_t, sampler_t, int);
+uint4 __ovld __purefn read_imageui(read_only image1d_t, sampler_t, float);
#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-float4 __purefn __ovld read_imagef(read_only image1d_array_t image_array, sampler_t sampler, int2 coord);
-float4 __purefn __ovld read_imagef(read_only image1d_array_t image_array, sampler_t sampler, float2 coord);
+float4 __ovld __purefn read_imagef(read_only image1d_array_t, sampler_t, int2);
+float4 __ovld __purefn read_imagef(read_only image1d_array_t, sampler_t, float2);
-int4 __purefn __ovld read_imagei(read_only image1d_array_t image_array, sampler_t sampler, int2 coord);
-int4 __purefn __ovld read_imagei(read_only image1d_array_t image_array, sampler_t sampler, float2 coord);
-uint4 __purefn __ovld read_imageui(read_only image1d_array_t image_array, sampler_t sampler, int2 coord);
-uint4 __purefn __ovld read_imageui(read_only image1d_array_t image_array, sampler_t sampler, float2 coord);
+int4 __ovld __purefn read_imagei(read_only image1d_array_t, sampler_t, int2);
+int4 __ovld __purefn read_imagei(read_only image1d_array_t, sampler_t, float2);
+uint4 __ovld __purefn read_imageui(read_only image1d_array_t, sampler_t, int2);
+uint4 __ovld __purefn read_imageui(read_only image1d_array_t, sampler_t, float2);
#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
#ifdef cl_khr_depth_images
-float __purefn __ovld read_imagef(read_only image2d_depth_t image, sampler_t sampler, float2 coord);
-float __purefn __ovld read_imagef(read_only image2d_depth_t image, sampler_t sampler, int2 coord);
+float __ovld __purefn read_imagef(read_only image2d_depth_t, sampler_t, float2);
+float __ovld __purefn read_imagef(read_only image2d_depth_t, sampler_t, int2);
-float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, sampler_t sampler, float4 coord);
-float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, sampler_t sampler, int4 coord);
+float __ovld __purefn read_imagef(read_only image2d_array_depth_t, sampler_t, float4);
+float __ovld __purefn read_imagef(read_only image2d_array_depth_t, sampler_t, int4);
#endif //cl_khr_depth_images
#if defined(cl_khr_gl_msaa_sharing)
-float4 __purefn __ovld read_imagef(read_only image2d_msaa_t image, int2 coord, int sample);
-int4 __purefn __ovld read_imagei(read_only image2d_msaa_t image, int2 coord, int sample);
-uint4 __purefn __ovld read_imageui(read_only image2d_msaa_t image, int2 coord, int sample);
+float4 __ovld __purefn read_imagef(read_only image2d_msaa_t, int2, int);
+int4 __ovld __purefn read_imagei(read_only image2d_msaa_t, int2, int);
+uint4 __ovld __purefn read_imageui(read_only image2d_msaa_t, int2, int);
-float __purefn __ovld read_imagef(read_only image2d_msaa_depth_t image, int2 coord, int sample);
+float __ovld __purefn read_imagef(read_only image2d_msaa_depth_t, int2, int);
-float4 __purefn __ovld read_imagef(read_only image2d_array_msaa_t image, int4 coord, int sample);
-int4 __purefn __ovld read_imagei(read_only image2d_array_msaa_t image, int4 coord, int sample);
-uint4 __purefn __ovld read_imageui(read_only image2d_array_msaa_t image, int4 coord, int sample);
+float4 __ovld __purefn read_imagef(read_only image2d_array_msaa_t, int4, int);
+int4 __ovld __purefn read_imagei(read_only image2d_array_msaa_t, int4, int);
+uint4 __ovld __purefn read_imageui(read_only image2d_array_msaa_t, int4, int);
-float __purefn __ovld read_imagef(read_only image2d_array_msaa_depth_t image, int4 coord, int sample);
+float __ovld __purefn read_imagef(read_only image2d_array_msaa_depth_t, int4, int);
#endif //cl_khr_gl_msaa_sharing
// OpenCL Extension v2.0 s9.18 - Mipmaps
#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
#ifdef cl_khr_mipmap_image
-float4 __purefn __ovld read_imagef(read_only image1d_t image, sampler_t sampler, float coord, float lod);
-int4 __purefn __ovld read_imagei(read_only image1d_t image, sampler_t sampler, float coord, float lod);
-uint4 __purefn __ovld read_imageui(read_only image1d_t image, sampler_t sampler, float coord, float lod);
+float4 __ovld __purefn read_imagef(read_only image1d_t, sampler_t, float, float);
+int4 __ovld __purefn read_imagei(read_only image1d_t, sampler_t, float, float);
+uint4 __ovld __purefn read_imageui(read_only image1d_t, sampler_t, float, float);
-float4 __purefn __ovld read_imagef(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
-int4 __purefn __ovld read_imagei(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
+float4 __ovld __purefn read_imagef(read_only image1d_array_t, sampler_t, float2, float);
+int4 __ovld __purefn read_imagei(read_only image1d_array_t, sampler_t, float2, float);
+uint4 __ovld __purefn read_imageui(read_only image1d_array_t, sampler_t, float2, float);
-float4 __purefn __ovld read_imagef(read_only image2d_t image, sampler_t sampler, float2 coord, float lod);
-int4 __purefn __ovld read_imagei(read_only image2d_t image, sampler_t sampler, float2 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_only image2d_t image, sampler_t sampler, float2 coord, float lod);
+float4 __ovld __purefn read_imagef(read_only image2d_t, sampler_t, float2, float);
+int4 __ovld __purefn read_imagei(read_only image2d_t, sampler_t, float2, float);
+uint4 __ovld __purefn read_imageui(read_only image2d_t, sampler_t, float2, float);
-float __purefn __ovld read_imagef(read_only image2d_depth_t image, sampler_t sampler, float2 coord, float lod);
+#ifdef cl_khr_depth_images
+float __ovld __purefn read_imagef(read_only image2d_depth_t, sampler_t, float2, float);
+#endif // cl_khr_depth_images
-float4 __purefn __ovld read_imagef(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
-int4 __purefn __ovld read_imagei(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
+float4 __ovld __purefn read_imagef(read_only image2d_array_t, sampler_t, float4, float);
+int4 __ovld __purefn read_imagei(read_only image2d_array_t, sampler_t, float4, float);
+uint4 __ovld __purefn read_imageui(read_only image2d_array_t, sampler_t, float4, float);
-float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, sampler_t sampler, float4 coord, float lod);
+#ifdef cl_khr_depth_images
+float __ovld __purefn read_imagef(read_only image2d_array_depth_t, sampler_t, float4, float);
+#endif // cl_khr_depth_images
-float4 __purefn __ovld read_imagef(read_only image3d_t image, sampler_t sampler, float4 coord, float lod);
-int4 __purefn __ovld read_imagei(read_only image3d_t image, sampler_t sampler, float4 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler, float4 coord, float lod);
+float4 __ovld __purefn read_imagef(read_only image3d_t, sampler_t, float4, float);
+int4 __ovld __purefn read_imagei(read_only image3d_t, sampler_t, float4, float);
+uint4 __ovld __purefn read_imageui(read_only image3d_t, sampler_t, float4, float);
-float4 __purefn __ovld read_imagef(read_only image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY);
-int4 __purefn __ovld read_imagei(read_only image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY);
-uint4 __purefn __ovld read_imageui(read_only image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY);
+float4 __ovld __purefn read_imagef(read_only image1d_t, sampler_t, float, float, float);
+int4 __ovld __purefn read_imagei(read_only image1d_t, sampler_t, float, float, float);
+uint4 __ovld __purefn read_imageui(read_only image1d_t, sampler_t, float, float, float);
-float4 __purefn __ovld read_imagef(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY);
-int4 __purefn __ovld read_imagei(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY);
-uint4 __purefn __ovld read_imageui(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY);
+float4 __ovld __purefn read_imagef(read_only image1d_array_t, sampler_t, float2, float, float);
+int4 __ovld __purefn read_imagei(read_only image1d_array_t, sampler_t, float2, float, float);
+uint4 __ovld __purefn read_imageui(read_only image1d_array_t, sampler_t, float2, float, float);
-float4 __purefn __ovld read_imagef(read_only image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
-int4 __purefn __ovld read_imagei(read_only image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
-uint4 __purefn __ovld read_imageui(read_only image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
+float4 __ovld __purefn read_imagef(read_only image2d_t, sampler_t, float2, float2, float2);
+int4 __ovld __purefn read_imagei(read_only image2d_t, sampler_t, float2, float2, float2);
+uint4 __ovld __purefn read_imageui(read_only image2d_t, sampler_t, float2, float2, float2);
-float __purefn __ovld read_imagef(read_only image2d_depth_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
+#ifdef cl_khr_depth_images
+float __ovld __purefn read_imagef(read_only image2d_depth_t, sampler_t, float2, float2, float2);
+#endif // cl_khr_depth_images
-float4 __purefn __ovld read_imagef(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
-int4 __purefn __ovld read_imagei(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
-uint4 __purefn __ovld read_imageui(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
+float4 __ovld __purefn read_imagef(read_only image2d_array_t, sampler_t, float4, float2, float2);
+int4 __ovld __purefn read_imagei(read_only image2d_array_t, sampler_t, float4, float2, float2);
+uint4 __ovld __purefn read_imageui(read_only image2d_array_t, sampler_t, float4, float2, float2);
-float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
+#ifdef cl_khr_depth_images
+float __ovld __purefn read_imagef(read_only image2d_array_depth_t, sampler_t, float4, float2, float2);
+#endif // cl_khr_depth_images
-float4 __purefn __ovld read_imagef(read_only image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
-int4 __purefn __ovld read_imagei(read_only image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
-uint4 __purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
+float4 __ovld __purefn read_imagef(read_only image3d_t, sampler_t, float4, float4, float4);
+int4 __ovld __purefn read_imagei(read_only image3d_t, sampler_t, float4, float4, float4);
+uint4 __ovld __purefn read_imageui(read_only image3d_t, sampler_t, float4, float4, float4);
#endif //cl_khr_mipmap_image
#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
@@ -14342,169 +15310,175 @@ uint4 __purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler,
* Sampler-less Image Access
*/
-float4 __purefn __ovld read_imagef(read_only image1d_t image, int coord);
-int4 __purefn __ovld read_imagei(read_only image1d_t image, int coord);
-uint4 __purefn __ovld read_imageui(read_only image1d_t image, int coord);
+float4 __ovld __purefn read_imagef(read_only image1d_t, int);
+int4 __ovld __purefn read_imagei(read_only image1d_t, int);
+uint4 __ovld __purefn read_imageui(read_only image1d_t, int);
-float4 __purefn __ovld read_imagef(read_only image1d_buffer_t image, int coord);
-int4 __purefn __ovld read_imagei(read_only image1d_buffer_t image, int coord);
-uint4 __purefn __ovld read_imageui(read_only image1d_buffer_t image, int coord);
+float4 __ovld __purefn read_imagef(read_only image1d_buffer_t, int);
+int4 __ovld __purefn read_imagei(read_only image1d_buffer_t, int);
+uint4 __ovld __purefn read_imageui(read_only image1d_buffer_t, int);
-float4 __purefn __ovld read_imagef(read_only image1d_array_t image, int2 coord);
-int4 __purefn __ovld read_imagei(read_only image1d_array_t image, int2 coord);
-uint4 __purefn __ovld read_imageui(read_only image1d_array_t image, int2 coord);
+float4 __ovld __purefn read_imagef(read_only image1d_array_t, int2);
+int4 __ovld __purefn read_imagei(read_only image1d_array_t, int2);
+uint4 __ovld __purefn read_imageui(read_only image1d_array_t, int2);
-float4 __purefn __ovld read_imagef(read_only image2d_t image, int2 coord);
-int4 __purefn __ovld read_imagei(read_only image2d_t image, int2 coord);
-uint4 __purefn __ovld read_imageui(read_only image2d_t image, int2 coord);
+float4 __ovld __purefn read_imagef(read_only image2d_t, int2);
+int4 __ovld __purefn read_imagei(read_only image2d_t, int2);
+uint4 __ovld __purefn read_imageui(read_only image2d_t, int2);
-float4 __purefn __ovld read_imagef(read_only image2d_array_t image, int4 coord);
-int4 __purefn __ovld read_imagei(read_only image2d_array_t image, int4 coord);
-uint4 __purefn __ovld read_imageui(read_only image2d_array_t image, int4 coord);
+float4 __ovld __purefn read_imagef(read_only image2d_array_t, int4);
+int4 __ovld __purefn read_imagei(read_only image2d_array_t, int4);
+uint4 __ovld __purefn read_imageui(read_only image2d_array_t, int4);
#ifdef cl_khr_depth_images
-float __purefn __ovld read_imagef(read_only image2d_depth_t image, int2 coord);
-float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, int4 coord);
+float __ovld __purefn read_imagef(read_only image2d_depth_t, int2);
+float __ovld __purefn read_imagef(read_only image2d_array_depth_t, int4);
#endif //cl_khr_depth_images
-float4 __purefn __ovld read_imagef(read_only image3d_t image, int4 coord);
-int4 __purefn __ovld read_imagei(read_only image3d_t image, int4 coord);
-uint4 __purefn __ovld read_imageui(read_only image3d_t image, int4 coord);
+float4 __ovld __purefn read_imagef(read_only image3d_t, int4);
+int4 __ovld __purefn read_imagei(read_only image3d_t, int4);
+uint4 __ovld __purefn read_imageui(read_only image3d_t, int4);
#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
// Image read functions returning half4 type
#ifdef cl_khr_fp16
-half4 __purefn __ovld read_imageh(read_only image1d_t image, sampler_t sampler, int coord);
-half4 __purefn __ovld read_imageh(read_only image1d_t image, sampler_t sampler, float coord);
-half4 __purefn __ovld read_imageh(read_only image2d_t image, sampler_t sampler, int2 coord);
-half4 __purefn __ovld read_imageh(read_only image2d_t image, sampler_t sampler, float2 coord);
-half4 __purefn __ovld read_imageh(read_only image3d_t image, sampler_t sampler, int4 coord);
-half4 __purefn __ovld read_imageh(read_only image3d_t image, sampler_t sampler, float4 coord);
+half4 __ovld __purefn read_imageh(read_only image1d_t, sampler_t, int);
+half4 __ovld __purefn read_imageh(read_only image1d_t, sampler_t, float);
+half4 __ovld __purefn read_imageh(read_only image2d_t, sampler_t, int2);
+half4 __ovld __purefn read_imageh(read_only image2d_t, sampler_t, float2);
+half4 __ovld __purefn read_imageh(read_only image3d_t, sampler_t, int4);
+half4 __ovld __purefn read_imageh(read_only image3d_t, sampler_t, float4);
#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-half4 __purefn __ovld read_imageh(read_only image1d_array_t image, sampler_t sampler, int2 coord);
-half4 __purefn __ovld read_imageh(read_only image1d_array_t image, sampler_t sampler, float2 coord);
-half4 __purefn __ovld read_imageh(read_only image2d_array_t image, sampler_t sampler, int4 coord);
-half4 __purefn __ovld read_imageh(read_only image2d_array_t image, sampler_t sampler, float4 coord);
+half4 __ovld __purefn read_imageh(read_only image1d_array_t, sampler_t, int2);
+half4 __ovld __purefn read_imageh(read_only image1d_array_t, sampler_t, float2);
+half4 __ovld __purefn read_imageh(read_only image2d_array_t, sampler_t, int4);
+half4 __ovld __purefn read_imageh(read_only image2d_array_t, sampler_t, float4);
/**
* Sampler-less Image Access
*/
-half4 __purefn __ovld read_imageh(read_only image1d_t image, int coord);
-half4 __purefn __ovld read_imageh(read_only image2d_t image, int2 coord);
-half4 __purefn __ovld read_imageh(read_only image3d_t image, int4 coord);
-half4 __purefn __ovld read_imageh(read_only image1d_array_t image, int2 coord);
-half4 __purefn __ovld read_imageh(read_only image2d_array_t image, int4 coord);
-half4 __purefn __ovld read_imageh(read_only image1d_buffer_t image, int coord);
+half4 __ovld __purefn read_imageh(read_only image1d_t, int);
+half4 __ovld __purefn read_imageh(read_only image2d_t, int2);
+half4 __ovld __purefn read_imageh(read_only image3d_t, int4);
+half4 __ovld __purefn read_imageh(read_only image1d_array_t, int2);
+half4 __ovld __purefn read_imageh(read_only image2d_array_t, int4);
+half4 __ovld __purefn read_imageh(read_only image1d_buffer_t, int);
#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
#endif //cl_khr_fp16
// Image read functions for read_write images
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-float4 __purefn __ovld read_imagef(read_write image1d_t image, int coord);
-int4 __purefn __ovld read_imagei(read_write image1d_t image, int coord);
-uint4 __purefn __ovld read_imageui(read_write image1d_t image, int coord);
+#if defined(__opencl_c_read_write_images)
+float4 __ovld __purefn read_imagef(read_write image1d_t, int);
+int4 __ovld __purefn read_imagei(read_write image1d_t, int);
+uint4 __ovld __purefn read_imageui(read_write image1d_t, int);
-float4 __purefn __ovld read_imagef(read_write image1d_buffer_t image, int coord);
-int4 __purefn __ovld read_imagei(read_write image1d_buffer_t image, int coord);
-uint4 __purefn __ovld read_imageui(read_write image1d_buffer_t image, int coord);
+float4 __ovld __purefn read_imagef(read_write image1d_buffer_t, int);
+int4 __ovld __purefn read_imagei(read_write image1d_buffer_t, int);
+uint4 __ovld __purefn read_imageui(read_write image1d_buffer_t, int);
-float4 __purefn __ovld read_imagef(read_write image1d_array_t image, int2 coord);
-int4 __purefn __ovld read_imagei(read_write image1d_array_t image, int2 coord);
-uint4 __purefn __ovld read_imageui(read_write image1d_array_t image, int2 coord);
+float4 __ovld __purefn read_imagef(read_write image1d_array_t, int2);
+int4 __ovld __purefn read_imagei(read_write image1d_array_t, int2);
+uint4 __ovld __purefn read_imageui(read_write image1d_array_t, int2);
-float4 __purefn __ovld read_imagef(read_write image2d_t image, int2 coord);
-int4 __purefn __ovld read_imagei(read_write image2d_t image, int2 coord);
-uint4 __purefn __ovld read_imageui(read_write image2d_t image, int2 coord);
+float4 __ovld __purefn read_imagef(read_write image2d_t, int2);
+int4 __ovld __purefn read_imagei(read_write image2d_t, int2);
+uint4 __ovld __purefn read_imageui(read_write image2d_t, int2);
-float4 __purefn __ovld read_imagef(read_write image2d_array_t image, int4 coord);
-int4 __purefn __ovld read_imagei(read_write image2d_array_t image, int4 coord);
-uint4 __purefn __ovld read_imageui(read_write image2d_array_t image, int4 coord);
+float4 __ovld __purefn read_imagef(read_write image2d_array_t, int4);
+int4 __ovld __purefn read_imagei(read_write image2d_array_t, int4);
+uint4 __ovld __purefn read_imageui(read_write image2d_array_t, int4);
-float4 __purefn __ovld read_imagef(read_write image3d_t image, int4 coord);
-int4 __purefn __ovld read_imagei(read_write image3d_t image, int4 coord);
-uint4 __purefn __ovld read_imageui(read_write image3d_t image, int4 coord);
+#ifdef cl_khr_3d_image_writes
+float4 __ovld __purefn read_imagef(read_write image3d_t, int4);
+int4 __ovld __purefn read_imagei(read_write image3d_t, int4);
+uint4 __ovld __purefn read_imageui(read_write image3d_t, int4);
+#endif // cl_khr_3d_image_writes
#ifdef cl_khr_depth_images
-float __purefn __ovld read_imagef(read_write image2d_depth_t image, int2 coord);
-float __purefn __ovld read_imagef(read_write image2d_array_depth_t image, int4 coord);
+float __ovld __purefn read_imagef(read_write image2d_depth_t, int2);
+float __ovld __purefn read_imagef(read_write image2d_array_depth_t, int4);
#endif //cl_khr_depth_images
#if cl_khr_gl_msaa_sharing
-float4 __purefn __ovld read_imagef(read_write image2d_msaa_t image, int2 coord, int sample);
-int4 __purefn __ovld read_imagei(read_write image2d_msaa_t image, int2 coord, int sample);
-uint4 __purefn __ovld read_imageui(read_write image2d_msaa_t image, int2 coord, int sample);
+float4 __ovld __purefn read_imagef(read_write image2d_msaa_t, int2, int);
+int4 __ovld __purefn read_imagei(read_write image2d_msaa_t, int2, int);
+uint4 __ovld __purefn read_imageui(read_write image2d_msaa_t, int2, int);
-float4 __purefn __ovld read_imagef(read_write image2d_array_msaa_t image, int4 coord, int sample);
-int4 __purefn __ovld read_imagei(read_write image2d_array_msaa_t image, int4 coord, int sample);
-uint4 __purefn __ovld read_imageui(read_write image2d_array_msaa_t image, int4 coord, int sample);
+float4 __ovld __purefn read_imagef(read_write image2d_array_msaa_t, int4, int);
+int4 __ovld __purefn read_imagei(read_write image2d_array_msaa_t, int4, int);
+uint4 __ovld __purefn read_imageui(read_write image2d_array_msaa_t, int4, int);
-float __purefn __ovld read_imagef(read_write image2d_msaa_depth_t image, int2 coord, int sample);
-float __purefn __ovld read_imagef(read_write image2d_array_msaa_depth_t image, int4 coord, int sample);
+float __ovld __purefn read_imagef(read_write image2d_msaa_depth_t, int2, int);
+float __ovld __purefn read_imagef(read_write image2d_array_msaa_depth_t, int4, int);
#endif //cl_khr_gl_msaa_sharing
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
#ifdef cl_khr_mipmap_image
-float4 __purefn __ovld read_imagef(read_write image1d_t image, sampler_t sampler, float coord, float lod);
-int4 __purefn __ovld read_imagei(read_write image1d_t image, sampler_t sampler, float coord, float lod);
-uint4 __purefn __ovld read_imageui(read_write image1d_t image, sampler_t sampler, float coord, float lod);
+float4 __ovld __purefn read_imagef(read_write image1d_t, sampler_t, float, float);
+int4 __ovld __purefn read_imagei(read_write image1d_t, sampler_t, float, float);
+uint4 __ovld __purefn read_imageui(read_write image1d_t, sampler_t, float, float);
-float4 __purefn __ovld read_imagef(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
-int4 __purefn __ovld read_imagei(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
+float4 __ovld __purefn read_imagef(read_write image1d_array_t, sampler_t, float2, float);
+int4 __ovld __purefn read_imagei(read_write image1d_array_t, sampler_t, float2, float);
+uint4 __ovld __purefn read_imageui(read_write image1d_array_t, sampler_t, float2, float);
-float4 __purefn __ovld read_imagef(read_write image2d_t image, sampler_t sampler, float2 coord, float lod);
-int4 __purefn __ovld read_imagei(read_write image2d_t image, sampler_t sampler, float2 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_write image2d_t image, sampler_t sampler, float2 coord, float lod);
+float4 __ovld __purefn read_imagef(read_write image2d_t, sampler_t, float2, float);
+int4 __ovld __purefn read_imagei(read_write image2d_t, sampler_t, float2, float);
+uint4 __ovld __purefn read_imageui(read_write image2d_t, sampler_t, float2, float);
-float __purefn __ovld read_imagef(read_write image2d_depth_t image, sampler_t sampler, float2 coord, float lod);
+float __ovld __purefn read_imagef(read_write image2d_depth_t, sampler_t, float2, float);
-float4 __purefn __ovld read_imagef(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
-int4 __purefn __ovld read_imagei(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
+float4 __ovld __purefn read_imagef(read_write image2d_array_t, sampler_t, float4, float);
+int4 __ovld __purefn read_imagei(read_write image2d_array_t, sampler_t, float4, float);
+uint4 __ovld __purefn read_imageui(read_write image2d_array_t, sampler_t, float4, float);
-float __purefn __ovld read_imagef(read_write image2d_array_depth_t image, sampler_t sampler, float4 coord, float lod);
+float __ovld __purefn read_imagef(read_write image2d_array_depth_t, sampler_t, float4, float);
-float4 __purefn __ovld read_imagef(read_write image3d_t image, sampler_t sampler, float4 coord, float lod);
-int4 __purefn __ovld read_imagei(read_write image3d_t image, sampler_t sampler, float4 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_write image3d_t image, sampler_t sampler, float4 coord, float lod);
+#ifdef cl_khr_3d_image_writes
+float4 __ovld __purefn read_imagef(read_write image3d_t, sampler_t, float4, float);
+int4 __ovld __purefn read_imagei(read_write image3d_t, sampler_t, float4, float);
+uint4 __ovld __purefn read_imageui(read_write image3d_t, sampler_t, float4, float);
+#endif // cl_khr_3d_image_writes
-float4 __purefn __ovld read_imagef(read_write image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY);
-int4 __purefn __ovld read_imagei(read_write image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY);
-uint4 __purefn __ovld read_imageui(read_write image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY);
+float4 __ovld __purefn read_imagef(read_write image1d_t, sampler_t, float, float, float);
+int4 __ovld __purefn read_imagei(read_write image1d_t, sampler_t, float, float, float);
+uint4 __ovld __purefn read_imageui(read_write image1d_t, sampler_t, float, float, float);
-float4 __purefn __ovld read_imagef(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY);
-int4 __purefn __ovld read_imagei(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY);
-uint4 __purefn __ovld read_imageui(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY);
+float4 __ovld __purefn read_imagef(read_write image1d_array_t, sampler_t, float2, float, float);
+int4 __ovld __purefn read_imagei(read_write image1d_array_t, sampler_t, float2, float, float);
+uint4 __ovld __purefn read_imageui(read_write image1d_array_t, sampler_t, float2, float, float);
-float4 __purefn __ovld read_imagef(read_write image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
-int4 __purefn __ovld read_imagei(read_write image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
-uint4 __purefn __ovld read_imageui(read_write image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
+float4 __ovld __purefn read_imagef(read_write image2d_t, sampler_t, float2, float2, float2);
+int4 __ovld __purefn read_imagei(read_write image2d_t, sampler_t, float2, float2, float2);
+uint4 __ovld __purefn read_imageui(read_write image2d_t, sampler_t, float2, float2, float2);
-float __purefn __ovld read_imagef(read_write image2d_depth_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
+float __ovld __purefn read_imagef(read_write image2d_depth_t, sampler_t, float2, float2, float2);
-float4 __purefn __ovld read_imagef(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
-int4 __purefn __ovld read_imagei(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
-uint4 __purefn __ovld read_imageui(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
+float4 __ovld __purefn read_imagef(read_write image2d_array_t, sampler_t, float4, float2, float2);
+int4 __ovld __purefn read_imagei(read_write image2d_array_t, sampler_t, float4, float2, float2);
+uint4 __ovld __purefn read_imageui(read_write image2d_array_t, sampler_t, float4, float2, float2);
-float __purefn __ovld read_imagef(read_write image2d_array_depth_t image, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
+float __ovld __purefn read_imagef(read_write image2d_array_depth_t, sampler_t, float4, float2, float2);
-float4 __purefn __ovld read_imagef(read_write image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
-int4 __purefn __ovld read_imagei(read_write image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
-uint4 __purefn __ovld read_imageui(read_write image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
+#ifdef cl_khr_3d_image_writes
+float4 __ovld __purefn read_imagef(read_write image3d_t, sampler_t, float4, float4, float4);
+int4 __ovld __purefn read_imagei(read_write image3d_t, sampler_t, float4, float4, float4);
+uint4 __ovld __purefn read_imageui(read_write image3d_t, sampler_t, float4, float4, float4);
+#endif // cl_khr_3d_image_writes
#endif //cl_khr_mipmap_image
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
// Image read functions returning half4 type
#ifdef cl_khr_fp16
-half4 __purefn __ovld read_imageh(read_write image1d_t image, int coord);
-half4 __purefn __ovld read_imageh(read_write image2d_t image, int2 coord);
-half4 __purefn __ovld read_imageh(read_write image3d_t image, int4 coord);
-half4 __purefn __ovld read_imageh(read_write image1d_array_t image, int2 coord);
-half4 __purefn __ovld read_imageh(read_write image2d_array_t image, int4 coord);
-half4 __purefn __ovld read_imageh(read_write image1d_buffer_t image, int coord);
+half4 __ovld __purefn read_imageh(read_write image1d_t, int);
+half4 __ovld __purefn read_imageh(read_write image2d_t, int2);
+#ifdef cl_khr_3d_image_writes
+half4 __ovld __purefn read_imageh(read_write image3d_t, int4);
+#endif // cl_khr_3d_image_writes
+half4 __ovld __purefn read_imageh(read_write image1d_array_t, int2);
+half4 __ovld __purefn read_imageh(read_write image2d_array_t, int4);
+half4 __ovld __purefn read_imageh(read_write image1d_buffer_t, int);
#endif //cl_khr_fp16
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_read_write_images)
/**
* Write color value to location specified by coordinate
@@ -14573,63 +15547,63 @@ half4 __purefn __ovld read_imageh(read_write image1d_buffer_t image, int coord);
* values that are not in the range (0 ... image width -1,
* 0 ... image height - 1), respectively, is undefined.
*/
-void __ovld write_imagef(write_only image2d_t image, int2 coord, float4 color);
-void __ovld write_imagei(write_only image2d_t image, int2 coord, int4 color);
-void __ovld write_imageui(write_only image2d_t image, int2 coord, uint4 color);
+void __ovld write_imagef(write_only image2d_t, int2, float4);
+void __ovld write_imagei(write_only image2d_t, int2, int4);
+void __ovld write_imageui(write_only image2d_t, int2, uint4);
-void __ovld write_imagef(write_only image2d_array_t image_array, int4 coord, float4 color);
-void __ovld write_imagei(write_only image2d_array_t image_array, int4 coord, int4 color);
-void __ovld write_imageui(write_only image2d_array_t image_array, int4 coord, uint4 color);
+void __ovld write_imagef(write_only image2d_array_t, int4, float4);
+void __ovld write_imagei(write_only image2d_array_t, int4, int4);
+void __ovld write_imageui(write_only image2d_array_t, int4, uint4);
-void __ovld write_imagef(write_only image1d_t image, int coord, float4 color);
-void __ovld write_imagei(write_only image1d_t image, int coord, int4 color);
-void __ovld write_imageui(write_only image1d_t image, int coord, uint4 color);
+void __ovld write_imagef(write_only image1d_t, int, float4);
+void __ovld write_imagei(write_only image1d_t, int, int4);
+void __ovld write_imageui(write_only image1d_t, int, uint4);
-void __ovld write_imagef(write_only image1d_buffer_t image, int coord, float4 color);
-void __ovld write_imagei(write_only image1d_buffer_t image, int coord, int4 color);
-void __ovld write_imageui(write_only image1d_buffer_t image, int coord, uint4 color);
+void __ovld write_imagef(write_only image1d_buffer_t, int, float4);
+void __ovld write_imagei(write_only image1d_buffer_t, int, int4);
+void __ovld write_imageui(write_only image1d_buffer_t, int, uint4);
-void __ovld write_imagef(write_only image1d_array_t image_array, int2 coord, float4 color);
-void __ovld write_imagei(write_only image1d_array_t image_array, int2 coord, int4 color);
-void __ovld write_imageui(write_only image1d_array_t image_array, int2 coord, uint4 color);
+void __ovld write_imagef(write_only image1d_array_t, int2, float4);
+void __ovld write_imagei(write_only image1d_array_t, int2, int4);
+void __ovld write_imageui(write_only image1d_array_t, int2, uint4);
#ifdef cl_khr_3d_image_writes
-void __ovld write_imagef(write_only image3d_t image, int4 coord, float4 color);
-void __ovld write_imagei(write_only image3d_t image, int4 coord, int4 color);
-void __ovld write_imageui(write_only image3d_t image, int4 coord, uint4 color);
+void __ovld write_imagef(write_only image3d_t, int4, float4);
+void __ovld write_imagei(write_only image3d_t, int4, int4);
+void __ovld write_imageui(write_only image3d_t, int4, uint4);
#endif
#ifdef cl_khr_depth_images
-void __ovld write_imagef(write_only image2d_depth_t image, int2 coord, float color);
-void __ovld write_imagef(write_only image2d_array_depth_t image, int4 coord, float color);
+void __ovld write_imagef(write_only image2d_depth_t, int2, float);
+void __ovld write_imagef(write_only image2d_array_depth_t, int4, float);
#endif //cl_khr_depth_images
// OpenCL Extension v2.0 s9.18 - Mipmaps
#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
#if defined(cl_khr_mipmap_image_writes)
-void __ovld write_imagef(write_only image1d_t image, int coord, int lod, float4 color);
-void __ovld write_imagei(write_only image1d_t image, int coord, int lod, int4 color);
-void __ovld write_imageui(write_only image1d_t image, int coord, int lod, uint4 color);
+void __ovld write_imagef(write_only image1d_t, int, int, float4);
+void __ovld write_imagei(write_only image1d_t, int, int, int4);
+void __ovld write_imageui(write_only image1d_t, int, int, uint4);
-void __ovld write_imagef(write_only image1d_array_t image_array, int2 coord, int lod, float4 color);
-void __ovld write_imagei(write_only image1d_array_t image_array, int2 coord, int lod, int4 color);
-void __ovld write_imageui(write_only image1d_array_t image_array, int2 coord, int lod, uint4 color);
+void __ovld write_imagef(write_only image1d_array_t, int2, int, float4);
+void __ovld write_imagei(write_only image1d_array_t, int2, int, int4);
+void __ovld write_imageui(write_only image1d_array_t, int2, int, uint4);
-void __ovld write_imagef(write_only image2d_t image, int2 coord, int lod, float4 color);
-void __ovld write_imagei(write_only image2d_t image, int2 coord, int lod, int4 color);
-void __ovld write_imageui(write_only image2d_t image, int2 coord, int lod, uint4 color);
+void __ovld write_imagef(write_only image2d_t, int2, int, float4);
+void __ovld write_imagei(write_only image2d_t, int2, int, int4);
+void __ovld write_imageui(write_only image2d_t, int2, int, uint4);
-void __ovld write_imagef(write_only image2d_array_t image_array, int4 coord, int lod, float4 color);
-void __ovld write_imagei(write_only image2d_array_t image_array, int4 coord, int lod, int4 color);
-void __ovld write_imageui(write_only image2d_array_t image_array, int4 coord, int lod, uint4 color);
+void __ovld write_imagef(write_only image2d_array_t, int4, int, float4);
+void __ovld write_imagei(write_only image2d_array_t, int4, int, int4);
+void __ovld write_imageui(write_only image2d_array_t, int4, int, uint4);
-void __ovld write_imagef(write_only image2d_depth_t image, int2 coord, int lod, float depth);
-void __ovld write_imagef(write_only image2d_array_depth_t image, int4 coord, int lod, float depth);
+void __ovld write_imagef(write_only image2d_depth_t, int2, int, float);
+void __ovld write_imagef(write_only image2d_array_depth_t, int4, int, float);
#ifdef cl_khr_3d_image_writes
-void __ovld write_imagef(write_only image3d_t image, int4 coord, int lod, float4 color);
-void __ovld write_imagei(write_only image3d_t image, int4 coord, int lod, int4 color);
-void __ovld write_imageui(write_only image3d_t image, int4 coord, int lod, uint4 color);
+void __ovld write_imagef(write_only image3d_t, int4, int, float4);
+void __ovld write_imagei(write_only image3d_t, int4, int, int4);
+void __ovld write_imageui(write_only image3d_t, int4, int, uint4);
#endif //cl_khr_3d_image_writes
#endif //defined(cl_khr_mipmap_image_writes)
@@ -14637,91 +15611,89 @@ void __ovld write_imageui(write_only image3d_t image, int4 coord, int lod, uint4
// Image write functions for half4 type
#ifdef cl_khr_fp16
-void __ovld write_imageh(write_only image1d_t image, int coord, half4 color);
-void __ovld write_imageh(write_only image2d_t image, int2 coord, half4 color);
+void __ovld write_imageh(write_only image1d_t, int, half4);
+void __ovld write_imageh(write_only image2d_t, int2, half4);
#ifdef cl_khr_3d_image_writes
-void __ovld write_imageh(write_only image3d_t image, int4 coord, half4 color);
+void __ovld write_imageh(write_only image3d_t, int4, half4);
#endif
-void __ovld write_imageh(write_only image1d_array_t image, int2 coord, half4 color);
-void __ovld write_imageh(write_only image2d_array_t image, int4 coord, half4 color);
-void __ovld write_imageh(write_only image1d_buffer_t image, int coord, half4 color);
+void __ovld write_imageh(write_only image1d_array_t, int2, half4);
+void __ovld write_imageh(write_only image2d_array_t, int4, half4);
+void __ovld write_imageh(write_only image1d_buffer_t, int, half4);
#endif //cl_khr_fp16
// Image write functions for read_write images
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-void __ovld write_imagef(read_write image2d_t image, int2 coord, float4 color);
-void __ovld write_imagei(read_write image2d_t image, int2 coord, int4 color);
-void __ovld write_imageui(read_write image2d_t image, int2 coord, uint4 color);
+#if defined(__opencl_c_read_write_images)
+void __ovld write_imagef(read_write image2d_t, int2, float4);
+void __ovld write_imagei(read_write image2d_t, int2, int4);
+void __ovld write_imageui(read_write image2d_t, int2, uint4);
-void __ovld write_imagef(read_write image2d_array_t image_array, int4 coord, float4 color);
-void __ovld write_imagei(read_write image2d_array_t image_array, int4 coord, int4 color);
-void __ovld write_imageui(read_write image2d_array_t image_array, int4 coord, uint4 color);
+void __ovld write_imagef(read_write image2d_array_t, int4, float4);
+void __ovld write_imagei(read_write image2d_array_t, int4, int4);
+void __ovld write_imageui(read_write image2d_array_t, int4, uint4);
-void __ovld write_imagef(read_write image1d_t image, int coord, float4 color);
-void __ovld write_imagei(read_write image1d_t image, int coord, int4 color);
-void __ovld write_imageui(read_write image1d_t image, int coord, uint4 color);
+void __ovld write_imagef(read_write image1d_t, int, float4);
+void __ovld write_imagei(read_write image1d_t, int, int4);
+void __ovld write_imageui(read_write image1d_t, int, uint4);
-void __ovld write_imagef(read_write image1d_buffer_t image, int coord, float4 color);
-void __ovld write_imagei(read_write image1d_buffer_t image, int coord, int4 color);
-void __ovld write_imageui(read_write image1d_buffer_t image, int coord, uint4 color);
+void __ovld write_imagef(read_write image1d_buffer_t, int, float4);
+void __ovld write_imagei(read_write image1d_buffer_t, int, int4);
+void __ovld write_imageui(read_write image1d_buffer_t, int, uint4);
-void __ovld write_imagef(read_write image1d_array_t image_array, int2 coord, float4 color);
-void __ovld write_imagei(read_write image1d_array_t image_array, int2 coord, int4 color);
-void __ovld write_imageui(read_write image1d_array_t image_array, int2 coord, uint4 color);
+void __ovld write_imagef(read_write image1d_array_t, int2, float4);
+void __ovld write_imagei(read_write image1d_array_t, int2, int4);
+void __ovld write_imageui(read_write image1d_array_t, int2, uint4);
#ifdef cl_khr_3d_image_writes
-void __ovld write_imagef(read_write image3d_t image, int4 coord, float4 color);
-void __ovld write_imagei(read_write image3d_t image, int4 coord, int4 color);
-void __ovld write_imageui(read_write image3d_t image, int4 coord, uint4 color);
+void __ovld write_imagef(read_write image3d_t, int4, float4);
+void __ovld write_imagei(read_write image3d_t, int4, int4);
+void __ovld write_imageui(read_write image3d_t, int4, uint4);
#endif
#ifdef cl_khr_depth_images
-void __ovld write_imagef(read_write image2d_depth_t image, int2 coord, float color);
-void __ovld write_imagef(read_write image2d_array_depth_t image, int4 coord, float color);
+void __ovld write_imagef(read_write image2d_depth_t, int2, float);
+void __ovld write_imagef(read_write image2d_array_depth_t, int4, float);
#endif //cl_khr_depth_images
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
#if defined(cl_khr_mipmap_image_writes)
-void __ovld write_imagef(read_write image1d_t image, int coord, int lod, float4 color);
-void __ovld write_imagei(read_write image1d_t image, int coord, int lod, int4 color);
-void __ovld write_imageui(read_write image1d_t image, int coord, int lod, uint4 color);
+void __ovld write_imagef(read_write image1d_t, int, int, float4);
+void __ovld write_imagei(read_write image1d_t, int, int, int4);
+void __ovld write_imageui(read_write image1d_t, int, int, uint4);
-void __ovld write_imagef(read_write image1d_array_t image_array, int2 coord, int lod, float4 color);
-void __ovld write_imagei(read_write image1d_array_t image_array, int2 coord, int lod, int4 color);
-void __ovld write_imageui(read_write image1d_array_t image_array, int2 coord, int lod, uint4 color);
+void __ovld write_imagef(read_write image1d_array_t, int2, int, float4);
+void __ovld write_imagei(read_write image1d_array_t, int2, int, int4);
+void __ovld write_imageui(read_write image1d_array_t, int2, int, uint4);
-void __ovld write_imagef(read_write image2d_t image, int2 coord, int lod, float4 color);
-void __ovld write_imagei(read_write image2d_t image, int2 coord, int lod, int4 color);
-void __ovld write_imageui(read_write image2d_t image, int2 coord, int lod, uint4 color);
+void __ovld write_imagef(read_write image2d_t, int2, int, float4);
+void __ovld write_imagei(read_write image2d_t, int2, int, int4);
+void __ovld write_imageui(read_write image2d_t, int2, int, uint4);
-void __ovld write_imagef(read_write image2d_array_t image_array, int4 coord, int lod, float4 color);
-void __ovld write_imagei(read_write image2d_array_t image_array, int4 coord, int lod, int4 color);
-void __ovld write_imageui(read_write image2d_array_t image_array, int4 coord, int lod, uint4 color);
+void __ovld write_imagef(read_write image2d_array_t, int4, int, float4);
+void __ovld write_imagei(read_write image2d_array_t, int4, int, int4);
+void __ovld write_imageui(read_write image2d_array_t, int4, int, uint4);
-void __ovld write_imagef(read_write image2d_depth_t image, int2 coord, int lod, float color);
-void __ovld write_imagef(read_write image2d_array_depth_t image, int4 coord, int lod, float color);
+void __ovld write_imagef(read_write image2d_depth_t, int2, int, float);
+void __ovld write_imagef(read_write image2d_array_depth_t, int4, int, float);
#ifdef cl_khr_3d_image_writes
-void __ovld write_imagef(read_write image3d_t image, int4 coord, int lod, float4 color);
-void __ovld write_imagei(read_write image3d_t image, int4 coord, int lod, int4 color);
-void __ovld write_imageui(read_write image3d_t image, int4 coord, int lod, uint4 color);
+void __ovld write_imagef(read_write image3d_t, int4, int, float4);
+void __ovld write_imagei(read_write image3d_t, int4, int, int4);
+void __ovld write_imageui(read_write image3d_t, int4, int, uint4);
#endif //cl_khr_3d_image_writes
#endif //cl_khr_mipmap_image_writes
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
// Image write functions for half4 type
#ifdef cl_khr_fp16
-void __ovld write_imageh(read_write image1d_t image, int coord, half4 color);
-void __ovld write_imageh(read_write image2d_t image, int2 coord, half4 color);
+void __ovld write_imageh(read_write image1d_t, int, half4);
+void __ovld write_imageh(read_write image2d_t, int2, half4);
#ifdef cl_khr_3d_image_writes
-void __ovld write_imageh(read_write image3d_t image, int4 coord, half4 color);
+void __ovld write_imageh(read_write image3d_t, int4, half4);
#endif
-void __ovld write_imageh(read_write image1d_array_t image, int2 coord, half4 color);
-void __ovld write_imageh(read_write image2d_array_t image, int4 coord, half4 color);
-void __ovld write_imageh(read_write image1d_buffer_t image, int coord, half4 color);
+void __ovld write_imageh(read_write image1d_array_t, int2, half4);
+void __ovld write_imageh(read_write image2d_array_t, int4, half4);
+void __ovld write_imageh(read_write image1d_buffer_t, int, half4);
#endif //cl_khr_fp16
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_read_write_images)
// Note: In OpenCL v1.0/1.1/1.2, image argument of image query builtin functions does not have
// access qualifier, which by default assume read_only access qualifier. Image query builtin
@@ -14731,124 +15703,126 @@ void __ovld write_imageh(read_write image1d_buffer_t image, int coord, half4 col
* Return the image width in pixels.
*
*/
-int __ovld __cnfn get_image_width(read_only image1d_t image);
-int __ovld __cnfn get_image_width(read_only image1d_buffer_t image);
-int __ovld __cnfn get_image_width(read_only image2d_t image);
-#ifdef cl_khr_3d_image_writes
-int __ovld __cnfn get_image_width(read_only image3d_t image);
-#endif
-int __ovld __cnfn get_image_width(read_only image1d_array_t image);
-int __ovld __cnfn get_image_width(read_only image2d_array_t image);
+int __ovld __cnfn get_image_width(read_only image1d_t);
+int __ovld __cnfn get_image_width(read_only image1d_buffer_t);
+int __ovld __cnfn get_image_width(read_only image2d_t);
+int __ovld __cnfn get_image_width(read_only image3d_t);
+int __ovld __cnfn get_image_width(read_only image1d_array_t);
+int __ovld __cnfn get_image_width(read_only image2d_array_t);
#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_width(read_only image2d_depth_t image);
-int __ovld __cnfn get_image_width(read_only image2d_array_depth_t image);
+int __ovld __cnfn get_image_width(read_only image2d_depth_t);
+int __ovld __cnfn get_image_width(read_only image2d_array_depth_t);
#endif //cl_khr_depth_images
#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_width(read_only image2d_msaa_t image);
-int __ovld __cnfn get_image_width(read_only image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_width(read_only image2d_array_msaa_t image);
-int __ovld __cnfn get_image_width(read_only image2d_array_msaa_depth_t image);
+int __ovld __cnfn get_image_width(read_only image2d_msaa_t);
+int __ovld __cnfn get_image_width(read_only image2d_msaa_depth_t);
+int __ovld __cnfn get_image_width(read_only image2d_array_msaa_t);
+int __ovld __cnfn get_image_width(read_only image2d_array_msaa_depth_t);
#endif //cl_khr_gl_msaa_sharing
-int __ovld __cnfn get_image_width(write_only image1d_t image);
-int __ovld __cnfn get_image_width(write_only image1d_buffer_t image);
-int __ovld __cnfn get_image_width(write_only image2d_t image);
+int __ovld __cnfn get_image_width(write_only image1d_t);
+int __ovld __cnfn get_image_width(write_only image1d_buffer_t);
+int __ovld __cnfn get_image_width(write_only image2d_t);
#ifdef cl_khr_3d_image_writes
-int __ovld __cnfn get_image_width(write_only image3d_t image);
+int __ovld __cnfn get_image_width(write_only image3d_t);
#endif
-int __ovld __cnfn get_image_width(write_only image1d_array_t image);
-int __ovld __cnfn get_image_width(write_only image2d_array_t image);
+int __ovld __cnfn get_image_width(write_only image1d_array_t);
+int __ovld __cnfn get_image_width(write_only image2d_array_t);
#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_width(write_only image2d_depth_t image);
-int __ovld __cnfn get_image_width(write_only image2d_array_depth_t image);
+int __ovld __cnfn get_image_width(write_only image2d_depth_t);
+int __ovld __cnfn get_image_width(write_only image2d_array_depth_t);
#endif //cl_khr_depth_images
#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_width(write_only image2d_msaa_t image);
-int __ovld __cnfn get_image_width(write_only image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_width(write_only image2d_array_msaa_t image);
-int __ovld __cnfn get_image_width(write_only image2d_array_msaa_depth_t image);
+int __ovld __cnfn get_image_width(write_only image2d_msaa_t);
+int __ovld __cnfn get_image_width(write_only image2d_msaa_depth_t);
+int __ovld __cnfn get_image_width(write_only image2d_array_msaa_t);
+int __ovld __cnfn get_image_width(write_only image2d_array_msaa_depth_t);
#endif //cl_khr_gl_msaa_sharing
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-int __ovld __cnfn get_image_width(read_write image1d_t image);
-int __ovld __cnfn get_image_width(read_write image1d_buffer_t image);
-int __ovld __cnfn get_image_width(read_write image2d_t image);
-int __ovld __cnfn get_image_width(read_write image3d_t image);
-int __ovld __cnfn get_image_width(read_write image1d_array_t image);
-int __ovld __cnfn get_image_width(read_write image2d_array_t image);
+#if defined(__opencl_c_read_write_images)
+int __ovld __cnfn get_image_width(read_write image1d_t);
+int __ovld __cnfn get_image_width(read_write image1d_buffer_t);
+int __ovld __cnfn get_image_width(read_write image2d_t);
+#ifdef cl_khr_3d_image_writes
+int __ovld __cnfn get_image_width(read_write image3d_t);
+#endif // cl_khr_3d_image_writes
+int __ovld __cnfn get_image_width(read_write image1d_array_t);
+int __ovld __cnfn get_image_width(read_write image2d_array_t);
#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_width(read_write image2d_depth_t image);
-int __ovld __cnfn get_image_width(read_write image2d_array_depth_t image);
+int __ovld __cnfn get_image_width(read_write image2d_depth_t);
+int __ovld __cnfn get_image_width(read_write image2d_array_depth_t);
#endif //cl_khr_depth_images
#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_width(read_write image2d_msaa_t image);
-int __ovld __cnfn get_image_width(read_write image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_width(read_write image2d_array_msaa_t image);
-int __ovld __cnfn get_image_width(read_write image2d_array_msaa_depth_t image);
+int __ovld __cnfn get_image_width(read_write image2d_msaa_t);
+int __ovld __cnfn get_image_width(read_write image2d_msaa_depth_t);
+int __ovld __cnfn get_image_width(read_write image2d_array_msaa_t);
+int __ovld __cnfn get_image_width(read_write image2d_array_msaa_depth_t);
#endif //cl_khr_gl_msaa_sharing
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_read_write_images)
/**
* Return the image height in pixels.
*/
-int __ovld __cnfn get_image_height(read_only image2d_t image);
-int __ovld __cnfn get_image_height(read_only image3d_t image);
-int __ovld __cnfn get_image_height(read_only image2d_array_t image);
+int __ovld __cnfn get_image_height(read_only image2d_t);
+int __ovld __cnfn get_image_height(read_only image3d_t);
+int __ovld __cnfn get_image_height(read_only image2d_array_t);
#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_height(read_only image2d_depth_t image);
-int __ovld __cnfn get_image_height(read_only image2d_array_depth_t image);
+int __ovld __cnfn get_image_height(read_only image2d_depth_t);
+int __ovld __cnfn get_image_height(read_only image2d_array_depth_t);
#endif //cl_khr_depth_images
#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_height(read_only image2d_msaa_t image);
-int __ovld __cnfn get_image_height(read_only image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_height(read_only image2d_array_msaa_t image);
-int __ovld __cnfn get_image_height(read_only image2d_array_msaa_depth_t image);
+int __ovld __cnfn get_image_height(read_only image2d_msaa_t);
+int __ovld __cnfn get_image_height(read_only image2d_msaa_depth_t);
+int __ovld __cnfn get_image_height(read_only image2d_array_msaa_t);
+int __ovld __cnfn get_image_height(read_only image2d_array_msaa_depth_t);
#endif //cl_khr_gl_msaa_sharing
-int __ovld __cnfn get_image_height(write_only image2d_t image);
+int __ovld __cnfn get_image_height(write_only image2d_t);
#ifdef cl_khr_3d_image_writes
-int __ovld __cnfn get_image_height(write_only image3d_t image);
+int __ovld __cnfn get_image_height(write_only image3d_t);
#endif
-int __ovld __cnfn get_image_height(write_only image2d_array_t image);
+int __ovld __cnfn get_image_height(write_only image2d_array_t);
#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_height(write_only image2d_depth_t image);
-int __ovld __cnfn get_image_height(write_only image2d_array_depth_t image);
+int __ovld __cnfn get_image_height(write_only image2d_depth_t);
+int __ovld __cnfn get_image_height(write_only image2d_array_depth_t);
#endif //cl_khr_depth_images
#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_height(write_only image2d_msaa_t image);
-int __ovld __cnfn get_image_height(write_only image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_height(write_only image2d_array_msaa_t image);
-int __ovld __cnfn get_image_height(write_only image2d_array_msaa_depth_t image);
+int __ovld __cnfn get_image_height(write_only image2d_msaa_t);
+int __ovld __cnfn get_image_height(write_only image2d_msaa_depth_t);
+int __ovld __cnfn get_image_height(write_only image2d_array_msaa_t);
+int __ovld __cnfn get_image_height(write_only image2d_array_msaa_depth_t);
#endif //cl_khr_gl_msaa_sharing
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-int __ovld __cnfn get_image_height(read_write image2d_t image);
-int __ovld __cnfn get_image_height(read_write image3d_t image);
-int __ovld __cnfn get_image_height(read_write image2d_array_t image);
+#if defined(__opencl_c_read_write_images)
+int __ovld __cnfn get_image_height(read_write image2d_t);
+#ifdef cl_khr_3d_image_writes
+int __ovld __cnfn get_image_height(read_write image3d_t);
+#endif // cl_khr_3d_image_writes
+int __ovld __cnfn get_image_height(read_write image2d_array_t);
#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_height(read_write image2d_depth_t image);
-int __ovld __cnfn get_image_height(read_write image2d_array_depth_t image);
+int __ovld __cnfn get_image_height(read_write image2d_depth_t);
+int __ovld __cnfn get_image_height(read_write image2d_array_depth_t);
#endif //cl_khr_depth_images
#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_height(read_write image2d_msaa_t image);
-int __ovld __cnfn get_image_height(read_write image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_height(read_write image2d_array_msaa_t image);
-int __ovld __cnfn get_image_height(read_write image2d_array_msaa_depth_t image);
+int __ovld __cnfn get_image_height(read_write image2d_msaa_t);
+int __ovld __cnfn get_image_height(read_write image2d_msaa_depth_t);
+int __ovld __cnfn get_image_height(read_write image2d_array_msaa_t);
+int __ovld __cnfn get_image_height(read_write image2d_array_msaa_depth_t);
#endif //cl_khr_gl_msaa_sharing
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_read_write_images)
/**
* Return the image depth in pixels.
*/
-int __ovld __cnfn get_image_depth(read_only image3d_t image);
+int __ovld __cnfn get_image_depth(read_only image3d_t);
#ifdef cl_khr_3d_image_writes
-int __ovld __cnfn get_image_depth(write_only image3d_t image);
-#endif
+int __ovld __cnfn get_image_depth(write_only image3d_t);
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-int __ovld __cnfn get_image_depth(read_write image3d_t image);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
+int __ovld __cnfn get_image_depth(read_write image3d_t);
+#endif //defined(__opencl_c_read_write_images)
+#endif // cl_khr_3d_image_writes
// OpenCL Extension v2.0 s9.18 - Mipmaps
#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
@@ -14857,34 +15831,46 @@ int __ovld __cnfn get_image_depth(read_write image3d_t image);
* Return the image miplevels.
*/
-int __ovld get_image_num_mip_levels(read_only image1d_t image);
-int __ovld get_image_num_mip_levels(read_only image2d_t image);
-int __ovld get_image_num_mip_levels(read_only image3d_t image);
+int __ovld get_image_num_mip_levels(read_only image1d_t);
+int __ovld get_image_num_mip_levels(read_only image2d_t);
+int __ovld get_image_num_mip_levels(read_only image3d_t);
-int __ovld get_image_num_mip_levels(write_only image1d_t image);
-int __ovld get_image_num_mip_levels(write_only image2d_t image);
+int __ovld get_image_num_mip_levels(write_only image1d_t);
+int __ovld get_image_num_mip_levels(write_only image2d_t);
#ifdef cl_khr_3d_image_writes
-int __ovld get_image_num_mip_levels(write_only image3d_t image);
+int __ovld get_image_num_mip_levels(write_only image3d_t);
#endif
-int __ovld get_image_num_mip_levels(read_write image1d_t image);
-int __ovld get_image_num_mip_levels(read_write image2d_t image);
-int __ovld get_image_num_mip_levels(read_write image3d_t image);
+#if defined(__opencl_c_read_write_images)
+int __ovld get_image_num_mip_levels(read_write image1d_t);
+int __ovld get_image_num_mip_levels(read_write image2d_t);
+#ifdef cl_khr_3d_image_writes
+int __ovld get_image_num_mip_levels(read_write image3d_t);
+#endif // cl_khr_3d_image_writes
+#endif //defined(__opencl_c_read_write_images)
-int __ovld get_image_num_mip_levels(read_only image1d_array_t image);
-int __ovld get_image_num_mip_levels(read_only image2d_array_t image);
-int __ovld get_image_num_mip_levels(read_only image2d_array_depth_t image);
-int __ovld get_image_num_mip_levels(read_only image2d_depth_t image);
+int __ovld get_image_num_mip_levels(read_only image1d_array_t);
+int __ovld get_image_num_mip_levels(read_only image2d_array_t);
+#ifdef cl_khr_depth_images
+int __ovld get_image_num_mip_levels(read_only image2d_array_depth_t);
+int __ovld get_image_num_mip_levels(read_only image2d_depth_t);
+#endif // cl_khr_depth_images
-int __ovld get_image_num_mip_levels(write_only image1d_array_t image);
-int __ovld get_image_num_mip_levels(write_only image2d_array_t image);
-int __ovld get_image_num_mip_levels(write_only image2d_array_depth_t image);
-int __ovld get_image_num_mip_levels(write_only image2d_depth_t image);
+int __ovld get_image_num_mip_levels(write_only image1d_array_t);
+int __ovld get_image_num_mip_levels(write_only image2d_array_t);
+#ifdef cl_khr_depth_images
+int __ovld get_image_num_mip_levels(write_only image2d_array_depth_t);
+int __ovld get_image_num_mip_levels(write_only image2d_depth_t);
+#endif // cl_khr_depth_images
-int __ovld get_image_num_mip_levels(read_write image1d_array_t image);
-int __ovld get_image_num_mip_levels(read_write image2d_array_t image);
-int __ovld get_image_num_mip_levels(read_write image2d_array_depth_t image);
-int __ovld get_image_num_mip_levels(read_write image2d_depth_t image);
+#if defined(__opencl_c_read_write_images)
+int __ovld get_image_num_mip_levels(read_write image1d_array_t);
+int __ovld get_image_num_mip_levels(read_write image2d_array_t);
+#ifdef cl_khr_depth_images
+int __ovld get_image_num_mip_levels(read_write image2d_array_depth_t);
+int __ovld get_image_num_mip_levels(read_write image2d_depth_t);
+#endif // cl_khr_depth_images
+#endif //defined(__opencl_c_read_write_images)
#endif //cl_khr_mipmap_image
#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
@@ -14908,60 +15894,62 @@ int __ovld get_image_num_mip_levels(read_write image2d_depth_t image);
* CLK_FLOAT
*/
-int __ovld __cnfn get_image_channel_data_type(read_only image1d_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image1d_buffer_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image2d_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image3d_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image1d_array_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_t image);
+int __ovld __cnfn get_image_channel_data_type(read_only image1d_t);
+int __ovld __cnfn get_image_channel_data_type(read_only image1d_buffer_t);
+int __ovld __cnfn get_image_channel_data_type(read_only image2d_t);
+int __ovld __cnfn get_image_channel_data_type(read_only image3d_t);
+int __ovld __cnfn get_image_channel_data_type(read_only image1d_array_t);
+int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_t);
#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_channel_data_type(read_only image2d_depth_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_depth_t image);
+int __ovld __cnfn get_image_channel_data_type(read_only image2d_depth_t);
+int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_depth_t);
#endif //cl_khr_depth_images
#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_channel_data_type(read_only image2d_msaa_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_msaa_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_msaa_depth_t image);
+int __ovld __cnfn get_image_channel_data_type(read_only image2d_msaa_t);
+int __ovld __cnfn get_image_channel_data_type(read_only image2d_msaa_depth_t);
+int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_msaa_t);
+int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_msaa_depth_t);
#endif //cl_khr_gl_msaa_sharing
-int __ovld __cnfn get_image_channel_data_type(write_only image1d_t image);
-int __ovld __cnfn get_image_channel_data_type(write_only image1d_buffer_t image);
-int __ovld __cnfn get_image_channel_data_type(write_only image2d_t image);
+int __ovld __cnfn get_image_channel_data_type(write_only image1d_t);
+int __ovld __cnfn get_image_channel_data_type(write_only image1d_buffer_t);
+int __ovld __cnfn get_image_channel_data_type(write_only image2d_t);
#ifdef cl_khr_3d_image_writes
-int __ovld __cnfn get_image_channel_data_type(write_only image3d_t image);
+int __ovld __cnfn get_image_channel_data_type(write_only image3d_t);
#endif
-int __ovld __cnfn get_image_channel_data_type(write_only image1d_array_t image);
-int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_t image);
+int __ovld __cnfn get_image_channel_data_type(write_only image1d_array_t);
+int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_t);
#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_channel_data_type(write_only image2d_depth_t image);
-int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_depth_t image);
+int __ovld __cnfn get_image_channel_data_type(write_only image2d_depth_t);
+int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_depth_t);
#endif //cl_khr_depth_images
#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_channel_data_type(write_only image2d_msaa_t image);
-int __ovld __cnfn get_image_channel_data_type(write_only image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_msaa_t image);
-int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_msaa_depth_t image);
+int __ovld __cnfn get_image_channel_data_type(write_only image2d_msaa_t);
+int __ovld __cnfn get_image_channel_data_type(write_only image2d_msaa_depth_t);
+int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_msaa_t);
+int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_msaa_depth_t);
#endif //cl_khr_gl_msaa_sharing
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-int __ovld __cnfn get_image_channel_data_type(read_write image1d_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image1d_buffer_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image2d_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image3d_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image1d_array_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_t image);
+#if defined(__opencl_c_read_write_images)
+int __ovld __cnfn get_image_channel_data_type(read_write image1d_t);
+int __ovld __cnfn get_image_channel_data_type(read_write image1d_buffer_t);
+int __ovld __cnfn get_image_channel_data_type(read_write image2d_t);
+#ifdef cl_khr_3d_image_writes
+int __ovld __cnfn get_image_channel_data_type(read_write image3d_t);
+#endif // cl_khr_3d_image_writes
+int __ovld __cnfn get_image_channel_data_type(read_write image1d_array_t);
+int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_t);
#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_channel_data_type(read_write image2d_depth_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_depth_t image);
+int __ovld __cnfn get_image_channel_data_type(read_write image2d_depth_t);
+int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_depth_t);
#endif //cl_khr_depth_images
#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_channel_data_type(read_write image2d_msaa_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_msaa_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_msaa_depth_t image);
+int __ovld __cnfn get_image_channel_data_type(read_write image2d_msaa_t);
+int __ovld __cnfn get_image_channel_data_type(read_write image2d_msaa_depth_t);
+int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_msaa_t);
+int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_msaa_depth_t);
#endif //cl_khr_gl_msaa_sharing
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_read_write_images)
/**
* Return the image channel order. Valid values are:
@@ -14980,106 +15968,108 @@ int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_msaa_dept
* CLK_LUMINANCE
*/
-int __ovld __cnfn get_image_channel_order(read_only image1d_t image);
-int __ovld __cnfn get_image_channel_order(read_only image1d_buffer_t image);
-int __ovld __cnfn get_image_channel_order(read_only image2d_t image);
-int __ovld __cnfn get_image_channel_order(read_only image3d_t image);
-int __ovld __cnfn get_image_channel_order(read_only image1d_array_t image);
-int __ovld __cnfn get_image_channel_order(read_only image2d_array_t image);
+int __ovld __cnfn get_image_channel_order(read_only image1d_t);
+int __ovld __cnfn get_image_channel_order(read_only image1d_buffer_t);
+int __ovld __cnfn get_image_channel_order(read_only image2d_t);
+int __ovld __cnfn get_image_channel_order(read_only image3d_t);
+int __ovld __cnfn get_image_channel_order(read_only image1d_array_t);
+int __ovld __cnfn get_image_channel_order(read_only image2d_array_t);
#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_channel_order(read_only image2d_depth_t image);
-int __ovld __cnfn get_image_channel_order(read_only image2d_array_depth_t image);
+int __ovld __cnfn get_image_channel_order(read_only image2d_depth_t);
+int __ovld __cnfn get_image_channel_order(read_only image2d_array_depth_t);
#endif //cl_khr_depth_images
#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_channel_order(read_only image2d_msaa_t image);
-int __ovld __cnfn get_image_channel_order(read_only image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_channel_order(read_only image2d_array_msaa_t image);
-int __ovld __cnfn get_image_channel_order(read_only image2d_array_msaa_depth_t image);
+int __ovld __cnfn get_image_channel_order(read_only image2d_msaa_t);
+int __ovld __cnfn get_image_channel_order(read_only image2d_msaa_depth_t);
+int __ovld __cnfn get_image_channel_order(read_only image2d_array_msaa_t);
+int __ovld __cnfn get_image_channel_order(read_only image2d_array_msaa_depth_t);
#endif //cl_khr_gl_msaa_sharing
-int __ovld __cnfn get_image_channel_order(write_only image1d_t image);
-int __ovld __cnfn get_image_channel_order(write_only image1d_buffer_t image);
-int __ovld __cnfn get_image_channel_order(write_only image2d_t image);
+int __ovld __cnfn get_image_channel_order(write_only image1d_t);
+int __ovld __cnfn get_image_channel_order(write_only image1d_buffer_t);
+int __ovld __cnfn get_image_channel_order(write_only image2d_t);
#ifdef cl_khr_3d_image_writes
-int __ovld __cnfn get_image_channel_order(write_only image3d_t image);
+int __ovld __cnfn get_image_channel_order(write_only image3d_t);
#endif
-int __ovld __cnfn get_image_channel_order(write_only image1d_array_t image);
-int __ovld __cnfn get_image_channel_order(write_only image2d_array_t image);
+int __ovld __cnfn get_image_channel_order(write_only image1d_array_t);
+int __ovld __cnfn get_image_channel_order(write_only image2d_array_t);
#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_channel_order(write_only image2d_depth_t image);
-int __ovld __cnfn get_image_channel_order(write_only image2d_array_depth_t image);
+int __ovld __cnfn get_image_channel_order(write_only image2d_depth_t);
+int __ovld __cnfn get_image_channel_order(write_only image2d_array_depth_t);
#endif //cl_khr_depth_images
#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_channel_order(write_only image2d_msaa_t image);
-int __ovld __cnfn get_image_channel_order(write_only image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_channel_order(write_only image2d_array_msaa_t image);
-int __ovld __cnfn get_image_channel_order(write_only image2d_array_msaa_depth_t image);
+int __ovld __cnfn get_image_channel_order(write_only image2d_msaa_t);
+int __ovld __cnfn get_image_channel_order(write_only image2d_msaa_depth_t);
+int __ovld __cnfn get_image_channel_order(write_only image2d_array_msaa_t);
+int __ovld __cnfn get_image_channel_order(write_only image2d_array_msaa_depth_t);
#endif //cl_khr_gl_msaa_sharing
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-int __ovld __cnfn get_image_channel_order(read_write image1d_t image);
-int __ovld __cnfn get_image_channel_order(read_write image1d_buffer_t image);
-int __ovld __cnfn get_image_channel_order(read_write image2d_t image);
-int __ovld __cnfn get_image_channel_order(read_write image3d_t image);
-int __ovld __cnfn get_image_channel_order(read_write image1d_array_t image);
-int __ovld __cnfn get_image_channel_order(read_write image2d_array_t image);
+#if defined(__opencl_c_read_write_images)
+int __ovld __cnfn get_image_channel_order(read_write image1d_t);
+int __ovld __cnfn get_image_channel_order(read_write image1d_buffer_t);
+int __ovld __cnfn get_image_channel_order(read_write image2d_t);
+#ifdef cl_khr_3d_image_writes
+int __ovld __cnfn get_image_channel_order(read_write image3d_t);
+#endif // cl_khr_3d_image_writes
+int __ovld __cnfn get_image_channel_order(read_write image1d_array_t);
+int __ovld __cnfn get_image_channel_order(read_write image2d_array_t);
#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_channel_order(read_write image2d_depth_t image);
-int __ovld __cnfn get_image_channel_order(read_write image2d_array_depth_t image);
+int __ovld __cnfn get_image_channel_order(read_write image2d_depth_t);
+int __ovld __cnfn get_image_channel_order(read_write image2d_array_depth_t);
#endif //cl_khr_depth_images
#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_channel_order(read_write image2d_msaa_t image);
-int __ovld __cnfn get_image_channel_order(read_write image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_channel_order(read_write image2d_array_msaa_t image);
-int __ovld __cnfn get_image_channel_order(read_write image2d_array_msaa_depth_t image);
+int __ovld __cnfn get_image_channel_order(read_write image2d_msaa_t);
+int __ovld __cnfn get_image_channel_order(read_write image2d_msaa_depth_t);
+int __ovld __cnfn get_image_channel_order(read_write image2d_array_msaa_t);
+int __ovld __cnfn get_image_channel_order(read_write image2d_array_msaa_depth_t);
#endif //cl_khr_gl_msaa_sharing
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_read_write_images)
/**
* Return the 2D image width and height as an int2
* type. The width is returned in the x component, and
* the height in the y component.
*/
-int2 __ovld __cnfn get_image_dim(read_only image2d_t image);
-int2 __ovld __cnfn get_image_dim(read_only image2d_array_t image);
+int2 __ovld __cnfn get_image_dim(read_only image2d_t);
+int2 __ovld __cnfn get_image_dim(read_only image2d_array_t);
#ifdef cl_khr_depth_images
-int2 __ovld __cnfn get_image_dim(read_only image2d_array_depth_t image);
-int2 __ovld __cnfn get_image_dim(read_only image2d_depth_t image);
+int2 __ovld __cnfn get_image_dim(read_only image2d_array_depth_t);
+int2 __ovld __cnfn get_image_dim(read_only image2d_depth_t);
#endif //cl_khr_depth_images
#if defined(cl_khr_gl_msaa_sharing)
-int2 __ovld __cnfn get_image_dim(read_only image2d_msaa_t image);
-int2 __ovld __cnfn get_image_dim(read_only image2d_msaa_depth_t image);
-int2 __ovld __cnfn get_image_dim(read_only image2d_array_msaa_t image);
-int2 __ovld __cnfn get_image_dim(read_only image2d_array_msaa_depth_t image);
+int2 __ovld __cnfn get_image_dim(read_only image2d_msaa_t);
+int2 __ovld __cnfn get_image_dim(read_only image2d_msaa_depth_t);
+int2 __ovld __cnfn get_image_dim(read_only image2d_array_msaa_t);
+int2 __ovld __cnfn get_image_dim(read_only image2d_array_msaa_depth_t);
#endif //cl_khr_gl_msaa_sharing
-int2 __ovld __cnfn get_image_dim(write_only image2d_t image);
-int2 __ovld __cnfn get_image_dim(write_only image2d_array_t image);
+int2 __ovld __cnfn get_image_dim(write_only image2d_t);
+int2 __ovld __cnfn get_image_dim(write_only image2d_array_t);
#ifdef cl_khr_depth_images
-int2 __ovld __cnfn get_image_dim(write_only image2d_array_depth_t image);
-int2 __ovld __cnfn get_image_dim(write_only image2d_depth_t image);
+int2 __ovld __cnfn get_image_dim(write_only image2d_array_depth_t);
+int2 __ovld __cnfn get_image_dim(write_only image2d_depth_t);
#endif //cl_khr_depth_images
#if defined(cl_khr_gl_msaa_sharing)
-int2 __ovld __cnfn get_image_dim(write_only image2d_msaa_t image);
-int2 __ovld __cnfn get_image_dim(write_only image2d_msaa_depth_t image);
-int2 __ovld __cnfn get_image_dim(write_only image2d_array_msaa_t image);
-int2 __ovld __cnfn get_image_dim(write_only image2d_array_msaa_depth_t image);
+int2 __ovld __cnfn get_image_dim(write_only image2d_msaa_t);
+int2 __ovld __cnfn get_image_dim(write_only image2d_msaa_depth_t);
+int2 __ovld __cnfn get_image_dim(write_only image2d_array_msaa_t);
+int2 __ovld __cnfn get_image_dim(write_only image2d_array_msaa_depth_t);
#endif //cl_khr_gl_msaa_sharing
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-int2 __ovld __cnfn get_image_dim(read_write image2d_t image);
-int2 __ovld __cnfn get_image_dim(read_write image2d_array_t image);
+#if defined(__opencl_c_read_write_images)
+int2 __ovld __cnfn get_image_dim(read_write image2d_t);
+int2 __ovld __cnfn get_image_dim(read_write image2d_array_t);
#ifdef cl_khr_depth_images
-int2 __ovld __cnfn get_image_dim(read_write image2d_array_depth_t image);
-int2 __ovld __cnfn get_image_dim(read_write image2d_depth_t image);
+int2 __ovld __cnfn get_image_dim(read_write image2d_array_depth_t);
+int2 __ovld __cnfn get_image_dim(read_write image2d_depth_t);
#endif //cl_khr_depth_images
#if defined(cl_khr_gl_msaa_sharing)
-int2 __ovld __cnfn get_image_dim(read_write image2d_msaa_t image);
-int2 __ovld __cnfn get_image_dim(read_write image2d_msaa_depth_t image);
-int2 __ovld __cnfn get_image_dim(read_write image2d_array_msaa_t image);
-int2 __ovld __cnfn get_image_dim(read_write image2d_array_msaa_depth_t image);
+int2 __ovld __cnfn get_image_dim(read_write image2d_msaa_t);
+int2 __ovld __cnfn get_image_dim(read_write image2d_msaa_depth_t);
+int2 __ovld __cnfn get_image_dim(read_write image2d_array_msaa_t);
+int2 __ovld __cnfn get_image_dim(read_write image2d_array_msaa_depth_t);
#endif //cl_khr_gl_msaa_sharing
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_read_write_images)
/**
* Return the 3D image width, height, and depth as an
@@ -15087,183 +16077,184 @@ int2 __ovld __cnfn get_image_dim(read_write image2d_array_msaa_depth_t image);
* component, height in the y component, depth in the z
* component and the w component is 0.
*/
-int4 __ovld __cnfn get_image_dim(read_only image3d_t image);
+int4 __ovld __cnfn get_image_dim(read_only image3d_t);
#ifdef cl_khr_3d_image_writes
-int4 __ovld __cnfn get_image_dim(write_only image3d_t image);
-#endif
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-int4 __ovld __cnfn get_image_dim(read_write image3d_t image);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+int4 __ovld __cnfn get_image_dim(write_only image3d_t);
+#if defined(__opencl_c_read_write_images)
+int4 __ovld __cnfn get_image_dim(read_write image3d_t);
+#endif //defined(__opencl_c_read_write_images)
+#endif // cl_khr_3d_image_writes
/**
* Return the image array size.
*/
-size_t __ovld __cnfn get_image_array_size(read_only image1d_array_t image_array);
-size_t __ovld __cnfn get_image_array_size(read_only image2d_array_t image_array);
+size_t __ovld __cnfn get_image_array_size(read_only image1d_array_t);
+size_t __ovld __cnfn get_image_array_size(read_only image2d_array_t);
#ifdef cl_khr_depth_images
-size_t __ovld __cnfn get_image_array_size(read_only image2d_array_depth_t image_array);
+size_t __ovld __cnfn get_image_array_size(read_only image2d_array_depth_t);
#endif //cl_khr_depth_images
#if defined(cl_khr_gl_msaa_sharing)
-size_t __ovld __cnfn get_image_array_size(read_only image2d_array_msaa_t image_array);
-size_t __ovld __cnfn get_image_array_size(read_only image2d_array_msaa_depth_t image_array);
+size_t __ovld __cnfn get_image_array_size(read_only image2d_array_msaa_t);
+size_t __ovld __cnfn get_image_array_size(read_only image2d_array_msaa_depth_t);
#endif //cl_khr_gl_msaa_sharing
-size_t __ovld __cnfn get_image_array_size(write_only image1d_array_t image_array);
-size_t __ovld __cnfn get_image_array_size(write_only image2d_array_t image_array);
+size_t __ovld __cnfn get_image_array_size(write_only image1d_array_t);
+size_t __ovld __cnfn get_image_array_size(write_only image2d_array_t);
#ifdef cl_khr_depth_images
-size_t __ovld __cnfn get_image_array_size(write_only image2d_array_depth_t image_array);
+size_t __ovld __cnfn get_image_array_size(write_only image2d_array_depth_t);
#endif //cl_khr_depth_images
#if defined(cl_khr_gl_msaa_sharing)
-size_t __ovld __cnfn get_image_array_size(write_only image2d_array_msaa_t image_array);
-size_t __ovld __cnfn get_image_array_size(write_only image2d_array_msaa_depth_t image_array);
+size_t __ovld __cnfn get_image_array_size(write_only image2d_array_msaa_t);
+size_t __ovld __cnfn get_image_array_size(write_only image2d_array_msaa_depth_t);
#endif //cl_khr_gl_msaa_sharing
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-size_t __ovld __cnfn get_image_array_size(read_write image1d_array_t image_array);
-size_t __ovld __cnfn get_image_array_size(read_write image2d_array_t image_array);
+#if defined(__opencl_c_read_write_images)
+size_t __ovld __cnfn get_image_array_size(read_write image1d_array_t);
+size_t __ovld __cnfn get_image_array_size(read_write image2d_array_t);
#ifdef cl_khr_depth_images
-size_t __ovld __cnfn get_image_array_size(read_write image2d_array_depth_t image_array);
+size_t __ovld __cnfn get_image_array_size(read_write image2d_array_depth_t);
#endif //cl_khr_depth_images
#if defined(cl_khr_gl_msaa_sharing)
-size_t __ovld __cnfn get_image_array_size(read_write image2d_array_msaa_t image_array);
-size_t __ovld __cnfn get_image_array_size(read_write image2d_array_msaa_depth_t image_array);
+size_t __ovld __cnfn get_image_array_size(read_write image2d_array_msaa_t);
+size_t __ovld __cnfn get_image_array_size(read_write image2d_array_msaa_depth_t);
#endif //cl_khr_gl_msaa_sharing
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_read_write_images)
/**
* Return the number of samples associated with image
*/
#if defined(cl_khr_gl_msaa_sharing)
-int __ovld get_image_num_samples(read_only image2d_msaa_t image);
-int __ovld get_image_num_samples(read_only image2d_msaa_depth_t image);
-int __ovld get_image_num_samples(read_only image2d_array_msaa_t image);
-int __ovld get_image_num_samples(read_only image2d_array_msaa_depth_t image);
-
-int __ovld get_image_num_samples(write_only image2d_msaa_t image);
-int __ovld get_image_num_samples(write_only image2d_msaa_depth_t image);
-int __ovld get_image_num_samples(write_only image2d_array_msaa_t image);
-int __ovld get_image_num_samples(write_only image2d_array_msaa_depth_t image);
-
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-int __ovld get_image_num_samples(read_write image2d_msaa_t image);
-int __ovld get_image_num_samples(read_write image2d_msaa_depth_t image);
-int __ovld get_image_num_samples(read_write image2d_array_msaa_t image);
-int __ovld get_image_num_samples(read_write image2d_array_msaa_depth_t image);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+int __ovld __cnfn get_image_num_samples(read_only image2d_msaa_t);
+int __ovld __cnfn get_image_num_samples(read_only image2d_msaa_depth_t);
+int __ovld __cnfn get_image_num_samples(read_only image2d_array_msaa_t);
+int __ovld __cnfn get_image_num_samples(read_only image2d_array_msaa_depth_t);
+
+int __ovld __cnfn get_image_num_samples(write_only image2d_msaa_t);
+int __ovld __cnfn get_image_num_samples(write_only image2d_msaa_depth_t);
+int __ovld __cnfn get_image_num_samples(write_only image2d_array_msaa_t);
+int __ovld __cnfn get_image_num_samples(write_only image2d_array_msaa_depth_t);
+
+#if defined(__opencl_c_read_write_images)
+int __ovld __cnfn get_image_num_samples(read_write image2d_msaa_t);
+int __ovld __cnfn get_image_num_samples(read_write image2d_msaa_depth_t);
+int __ovld __cnfn get_image_num_samples(read_write image2d_array_msaa_t);
+int __ovld __cnfn get_image_num_samples(read_write image2d_array_msaa_depth_t);
+#endif //defined(__opencl_c_read_write_images)
#endif
// OpenCL v2.0 s6.13.15 - Work-group Functions
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_work_group_collective_functions)
int __ovld __conv work_group_all(int predicate);
int __ovld __conv work_group_any(int predicate);
#ifdef cl_khr_fp16
-half __ovld __conv work_group_broadcast(half a, size_t local_id);
-half __ovld __conv work_group_broadcast(half a, size_t x, size_t y);
-half __ovld __conv work_group_broadcast(half a, size_t x, size_t y, size_t z);
+half __ovld __conv work_group_broadcast(half, size_t local_id);
+half __ovld __conv work_group_broadcast(half, size_t, size_t);
+half __ovld __conv work_group_broadcast(half, size_t, size_t, size_t);
#endif
-int __ovld __conv work_group_broadcast(int a, size_t local_id);
-int __ovld __conv work_group_broadcast(int a, size_t x, size_t y);
-int __ovld __conv work_group_broadcast(int a, size_t x, size_t y, size_t z);
-uint __ovld __conv work_group_broadcast(uint a, size_t local_id);
-uint __ovld __conv work_group_broadcast(uint a, size_t x, size_t y);
-uint __ovld __conv work_group_broadcast(uint a, size_t x, size_t y, size_t z);
-long __ovld __conv work_group_broadcast(long a, size_t local_id);
-long __ovld __conv work_group_broadcast(long a, size_t x, size_t y);
-long __ovld __conv work_group_broadcast(long a, size_t x, size_t y, size_t z);
-ulong __ovld __conv work_group_broadcast(ulong a, size_t local_id);
-ulong __ovld __conv work_group_broadcast(ulong a, size_t x, size_t y);
-ulong __ovld __conv work_group_broadcast(ulong a, size_t x, size_t y, size_t z);
-float __ovld __conv work_group_broadcast(float a, size_t local_id);
-float __ovld __conv work_group_broadcast(float a, size_t x, size_t y);
-float __ovld __conv work_group_broadcast(float a, size_t x, size_t y, size_t z);
+int __ovld __conv work_group_broadcast(int, size_t local_id);
+int __ovld __conv work_group_broadcast(int, size_t, size_t);
+int __ovld __conv work_group_broadcast(int, size_t, size_t, size_t);
+uint __ovld __conv work_group_broadcast(uint, size_t local_id);
+uint __ovld __conv work_group_broadcast(uint, size_t, size_t);
+uint __ovld __conv work_group_broadcast(uint, size_t, size_t, size_t);
+long __ovld __conv work_group_broadcast(long, size_t local_id);
+long __ovld __conv work_group_broadcast(long, size_t, size_t);
+long __ovld __conv work_group_broadcast(long, size_t, size_t, size_t);
+ulong __ovld __conv work_group_broadcast(ulong, size_t local_id);
+ulong __ovld __conv work_group_broadcast(ulong, size_t, size_t);
+ulong __ovld __conv work_group_broadcast(ulong, size_t, size_t, size_t);
+float __ovld __conv work_group_broadcast(float, size_t local_id);
+float __ovld __conv work_group_broadcast(float, size_t, size_t);
+float __ovld __conv work_group_broadcast(float, size_t, size_t, size_t);
#ifdef cl_khr_fp64
-double __ovld __conv work_group_broadcast(double a, size_t local_id);
-double __ovld __conv work_group_broadcast(double a, size_t x, size_t y);
-double __ovld __conv work_group_broadcast(double a, size_t x, size_t y, size_t z);
+double __ovld __conv work_group_broadcast(double, size_t local_id);
+double __ovld __conv work_group_broadcast(double, size_t, size_t);
+double __ovld __conv work_group_broadcast(double, size_t, size_t, size_t);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __conv work_group_reduce_add(half x);
-half __ovld __conv work_group_reduce_min(half x);
-half __ovld __conv work_group_reduce_max(half x);
-half __ovld __conv work_group_scan_exclusive_add(half x);
-half __ovld __conv work_group_scan_exclusive_min(half x);
-half __ovld __conv work_group_scan_exclusive_max(half x);
-half __ovld __conv work_group_scan_inclusive_add(half x);
-half __ovld __conv work_group_scan_inclusive_min(half x);
-half __ovld __conv work_group_scan_inclusive_max(half x);
+half __ovld __conv work_group_reduce_add(half);
+half __ovld __conv work_group_reduce_min(half);
+half __ovld __conv work_group_reduce_max(half);
+half __ovld __conv work_group_scan_exclusive_add(half);
+half __ovld __conv work_group_scan_exclusive_min(half);
+half __ovld __conv work_group_scan_exclusive_max(half);
+half __ovld __conv work_group_scan_inclusive_add(half);
+half __ovld __conv work_group_scan_inclusive_min(half);
+half __ovld __conv work_group_scan_inclusive_max(half);
#endif
-int __ovld __conv work_group_reduce_add(int x);
-int __ovld __conv work_group_reduce_min(int x);
-int __ovld __conv work_group_reduce_max(int x);
-int __ovld __conv work_group_scan_exclusive_add(int x);
-int __ovld __conv work_group_scan_exclusive_min(int x);
-int __ovld __conv work_group_scan_exclusive_max(int x);
-int __ovld __conv work_group_scan_inclusive_add(int x);
-int __ovld __conv work_group_scan_inclusive_min(int x);
-int __ovld __conv work_group_scan_inclusive_max(int x);
-uint __ovld __conv work_group_reduce_add(uint x);
-uint __ovld __conv work_group_reduce_min(uint x);
-uint __ovld __conv work_group_reduce_max(uint x);
-uint __ovld __conv work_group_scan_exclusive_add(uint x);
-uint __ovld __conv work_group_scan_exclusive_min(uint x);
-uint __ovld __conv work_group_scan_exclusive_max(uint x);
-uint __ovld __conv work_group_scan_inclusive_add(uint x);
-uint __ovld __conv work_group_scan_inclusive_min(uint x);
-uint __ovld __conv work_group_scan_inclusive_max(uint x);
-long __ovld __conv work_group_reduce_add(long x);
-long __ovld __conv work_group_reduce_min(long x);
-long __ovld __conv work_group_reduce_max(long x);
-long __ovld __conv work_group_scan_exclusive_add(long x);
-long __ovld __conv work_group_scan_exclusive_min(long x);
-long __ovld __conv work_group_scan_exclusive_max(long x);
-long __ovld __conv work_group_scan_inclusive_add(long x);
-long __ovld __conv work_group_scan_inclusive_min(long x);
-long __ovld __conv work_group_scan_inclusive_max(long x);
-ulong __ovld __conv work_group_reduce_add(ulong x);
-ulong __ovld __conv work_group_reduce_min(ulong x);
-ulong __ovld __conv work_group_reduce_max(ulong x);
-ulong __ovld __conv work_group_scan_exclusive_add(ulong x);
-ulong __ovld __conv work_group_scan_exclusive_min(ulong x);
-ulong __ovld __conv work_group_scan_exclusive_max(ulong x);
-ulong __ovld __conv work_group_scan_inclusive_add(ulong x);
-ulong __ovld __conv work_group_scan_inclusive_min(ulong x);
-ulong __ovld __conv work_group_scan_inclusive_max(ulong x);
-float __ovld __conv work_group_reduce_add(float x);
-float __ovld __conv work_group_reduce_min(float x);
-float __ovld __conv work_group_reduce_max(float x);
-float __ovld __conv work_group_scan_exclusive_add(float x);
-float __ovld __conv work_group_scan_exclusive_min(float x);
-float __ovld __conv work_group_scan_exclusive_max(float x);
-float __ovld __conv work_group_scan_inclusive_add(float x);
-float __ovld __conv work_group_scan_inclusive_min(float x);
-float __ovld __conv work_group_scan_inclusive_max(float x);
+int __ovld __conv work_group_reduce_add(int);
+int __ovld __conv work_group_reduce_min(int);
+int __ovld __conv work_group_reduce_max(int);
+int __ovld __conv work_group_scan_exclusive_add(int);
+int __ovld __conv work_group_scan_exclusive_min(int);
+int __ovld __conv work_group_scan_exclusive_max(int);
+int __ovld __conv work_group_scan_inclusive_add(int);
+int __ovld __conv work_group_scan_inclusive_min(int);
+int __ovld __conv work_group_scan_inclusive_max(int);
+uint __ovld __conv work_group_reduce_add(uint);
+uint __ovld __conv work_group_reduce_min(uint);
+uint __ovld __conv work_group_reduce_max(uint);
+uint __ovld __conv work_group_scan_exclusive_add(uint);
+uint __ovld __conv work_group_scan_exclusive_min(uint);
+uint __ovld __conv work_group_scan_exclusive_max(uint);
+uint __ovld __conv work_group_scan_inclusive_add(uint);
+uint __ovld __conv work_group_scan_inclusive_min(uint);
+uint __ovld __conv work_group_scan_inclusive_max(uint);
+long __ovld __conv work_group_reduce_add(long);
+long __ovld __conv work_group_reduce_min(long);
+long __ovld __conv work_group_reduce_max(long);
+long __ovld __conv work_group_scan_exclusive_add(long);
+long __ovld __conv work_group_scan_exclusive_min(long);
+long __ovld __conv work_group_scan_exclusive_max(long);
+long __ovld __conv work_group_scan_inclusive_add(long);
+long __ovld __conv work_group_scan_inclusive_min(long);
+long __ovld __conv work_group_scan_inclusive_max(long);
+ulong __ovld __conv work_group_reduce_add(ulong);
+ulong __ovld __conv work_group_reduce_min(ulong);
+ulong __ovld __conv work_group_reduce_max(ulong);
+ulong __ovld __conv work_group_scan_exclusive_add(ulong);
+ulong __ovld __conv work_group_scan_exclusive_min(ulong);
+ulong __ovld __conv work_group_scan_exclusive_max(ulong);
+ulong __ovld __conv work_group_scan_inclusive_add(ulong);
+ulong __ovld __conv work_group_scan_inclusive_min(ulong);
+ulong __ovld __conv work_group_scan_inclusive_max(ulong);
+float __ovld __conv work_group_reduce_add(float);
+float __ovld __conv work_group_reduce_min(float);
+float __ovld __conv work_group_reduce_max(float);
+float __ovld __conv work_group_scan_exclusive_add(float);
+float __ovld __conv work_group_scan_exclusive_min(float);
+float __ovld __conv work_group_scan_exclusive_max(float);
+float __ovld __conv work_group_scan_inclusive_add(float);
+float __ovld __conv work_group_scan_inclusive_min(float);
+float __ovld __conv work_group_scan_inclusive_max(float);
#ifdef cl_khr_fp64
-double __ovld __conv work_group_reduce_add(double x);
-double __ovld __conv work_group_reduce_min(double x);
-double __ovld __conv work_group_reduce_max(double x);
-double __ovld __conv work_group_scan_exclusive_add(double x);
-double __ovld __conv work_group_scan_exclusive_min(double x);
-double __ovld __conv work_group_scan_exclusive_max(double x);
-double __ovld __conv work_group_scan_inclusive_add(double x);
-double __ovld __conv work_group_scan_inclusive_min(double x);
-double __ovld __conv work_group_scan_inclusive_max(double x);
+double __ovld __conv work_group_reduce_add(double);
+double __ovld __conv work_group_reduce_min(double);
+double __ovld __conv work_group_reduce_max(double);
+double __ovld __conv work_group_scan_exclusive_add(double);
+double __ovld __conv work_group_scan_exclusive_min(double);
+double __ovld __conv work_group_scan_exclusive_max(double);
+double __ovld __conv work_group_scan_inclusive_add(double);
+double __ovld __conv work_group_scan_inclusive_min(double);
+double __ovld __conv work_group_scan_inclusive_max(double);
#endif //cl_khr_fp64
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_work_group_collective_functions)
// OpenCL v2.0 s6.13.16 - Pipe Functions
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_pipes)
bool __ovld is_valid_reserve_id(reserve_id_t reserve_id);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_pipes)
// OpenCL v2.0 s6.13.17 - Enqueue Kernels
#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#ifdef __opencl_c_device_enqueue
ndrange_t __ovld ndrange_1D(size_t);
ndrange_t __ovld ndrange_1D(size_t, size_t);
ndrange_t __ovld ndrange_1D(size_t, size_t, size_t);
@@ -15288,14 +16279,15 @@ void __ovld set_user_event_status(clk_event_t e, int state);
bool __ovld is_valid_event (clk_event_t event);
-void __ovld capture_event_profiling_info(clk_event_t, clk_profiling_info, __global void* value);
+void __ovld capture_event_profiling_info(clk_event_t, clk_profiling_info, __global void*);
queue_t __ovld get_default_queue(void);
+#endif //__opencl_c_device_enqueue
#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
// OpenCL Extension v2.0 s9.17 - Sub-groups
-#if defined(cl_intel_subgroups) || defined(cl_khr_subgroups)
+#if defined(__opencl_subgroup_builtins)
// Shared Sub Group Functions
uint __ovld get_sub_group_size(void);
uint __ovld get_max_sub_group_size(void);
@@ -15306,95 +16298,95 @@ uint __ovld get_enqueued_num_sub_groups(void);
uint __ovld get_sub_group_id(void);
uint __ovld get_sub_group_local_id(void);
-void __ovld __conv sub_group_barrier(cl_mem_fence_flags flags);
+void __ovld __conv sub_group_barrier(cl_mem_fence_flags);
#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-void __ovld __conv sub_group_barrier(cl_mem_fence_flags flags, memory_scope scope);
+void __ovld __conv sub_group_barrier(cl_mem_fence_flags, memory_scope);
#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
int __ovld __conv sub_group_all(int predicate);
int __ovld __conv sub_group_any(int predicate);
-int __ovld __conv sub_group_broadcast(int x, uint sub_group_local_id);
-uint __ovld __conv sub_group_broadcast(uint x, uint sub_group_local_id);
-long __ovld __conv sub_group_broadcast(long x, uint sub_group_local_id);
-ulong __ovld __conv sub_group_broadcast(ulong x, uint sub_group_local_id);
-float __ovld __conv sub_group_broadcast(float x, uint sub_group_local_id);
-
-int __ovld __conv sub_group_reduce_add(int x);
-uint __ovld __conv sub_group_reduce_add(uint x);
-long __ovld __conv sub_group_reduce_add(long x);
-ulong __ovld __conv sub_group_reduce_add(ulong x);
-float __ovld __conv sub_group_reduce_add(float x);
-int __ovld __conv sub_group_reduce_min(int x);
-uint __ovld __conv sub_group_reduce_min(uint x);
-long __ovld __conv sub_group_reduce_min(long x);
-ulong __ovld __conv sub_group_reduce_min(ulong x);
-float __ovld __conv sub_group_reduce_min(float x);
-int __ovld __conv sub_group_reduce_max(int x);
-uint __ovld __conv sub_group_reduce_max(uint x);
-long __ovld __conv sub_group_reduce_max(long x);
-ulong __ovld __conv sub_group_reduce_max(ulong x);
-float __ovld __conv sub_group_reduce_max(float x);
-
-int __ovld __conv sub_group_scan_exclusive_add(int x);
-uint __ovld __conv sub_group_scan_exclusive_add(uint x);
-long __ovld __conv sub_group_scan_exclusive_add(long x);
-ulong __ovld __conv sub_group_scan_exclusive_add(ulong x);
-float __ovld __conv sub_group_scan_exclusive_add(float x);
-int __ovld __conv sub_group_scan_exclusive_min(int x);
-uint __ovld __conv sub_group_scan_exclusive_min(uint x);
-long __ovld __conv sub_group_scan_exclusive_min(long x);
-ulong __ovld __conv sub_group_scan_exclusive_min(ulong x);
-float __ovld __conv sub_group_scan_exclusive_min(float x);
-int __ovld __conv sub_group_scan_exclusive_max(int x);
-uint __ovld __conv sub_group_scan_exclusive_max(uint x);
-long __ovld __conv sub_group_scan_exclusive_max(long x);
-ulong __ovld __conv sub_group_scan_exclusive_max(ulong x);
-float __ovld __conv sub_group_scan_exclusive_max(float x);
-
-int __ovld __conv sub_group_scan_inclusive_add(int x);
-uint __ovld __conv sub_group_scan_inclusive_add(uint x);
-long __ovld __conv sub_group_scan_inclusive_add(long x);
-ulong __ovld __conv sub_group_scan_inclusive_add(ulong x);
-float __ovld __conv sub_group_scan_inclusive_add(float x);
-int __ovld __conv sub_group_scan_inclusive_min(int x);
-uint __ovld __conv sub_group_scan_inclusive_min(uint x);
-long __ovld __conv sub_group_scan_inclusive_min(long x);
-ulong __ovld __conv sub_group_scan_inclusive_min(ulong x);
-float __ovld __conv sub_group_scan_inclusive_min(float x);
-int __ovld __conv sub_group_scan_inclusive_max(int x);
-uint __ovld __conv sub_group_scan_inclusive_max(uint x);
-long __ovld __conv sub_group_scan_inclusive_max(long x);
-ulong __ovld __conv sub_group_scan_inclusive_max(ulong x);
-float __ovld __conv sub_group_scan_inclusive_max(float x);
+int __ovld __conv sub_group_broadcast(int , uint sub_group_local_id);
+uint __ovld __conv sub_group_broadcast(uint , uint sub_group_local_id);
+long __ovld __conv sub_group_broadcast(long , uint sub_group_local_id);
+ulong __ovld __conv sub_group_broadcast(ulong, uint sub_group_local_id);
+float __ovld __conv sub_group_broadcast(float, uint sub_group_local_id);
+
+int __ovld __conv sub_group_reduce_add(int );
+uint __ovld __conv sub_group_reduce_add(uint );
+long __ovld __conv sub_group_reduce_add(long );
+ulong __ovld __conv sub_group_reduce_add(ulong);
+float __ovld __conv sub_group_reduce_add(float);
+int __ovld __conv sub_group_reduce_min(int );
+uint __ovld __conv sub_group_reduce_min(uint );
+long __ovld __conv sub_group_reduce_min(long );
+ulong __ovld __conv sub_group_reduce_min(ulong);
+float __ovld __conv sub_group_reduce_min(float);
+int __ovld __conv sub_group_reduce_max(int );
+uint __ovld __conv sub_group_reduce_max(uint );
+long __ovld __conv sub_group_reduce_max(long );
+ulong __ovld __conv sub_group_reduce_max(ulong);
+float __ovld __conv sub_group_reduce_max(float);
+
+int __ovld __conv sub_group_scan_exclusive_add(int );
+uint __ovld __conv sub_group_scan_exclusive_add(uint );
+long __ovld __conv sub_group_scan_exclusive_add(long );
+ulong __ovld __conv sub_group_scan_exclusive_add(ulong);
+float __ovld __conv sub_group_scan_exclusive_add(float);
+int __ovld __conv sub_group_scan_exclusive_min(int );
+uint __ovld __conv sub_group_scan_exclusive_min(uint );
+long __ovld __conv sub_group_scan_exclusive_min(long );
+ulong __ovld __conv sub_group_scan_exclusive_min(ulong);
+float __ovld __conv sub_group_scan_exclusive_min(float);
+int __ovld __conv sub_group_scan_exclusive_max(int );
+uint __ovld __conv sub_group_scan_exclusive_max(uint );
+long __ovld __conv sub_group_scan_exclusive_max(long );
+ulong __ovld __conv sub_group_scan_exclusive_max(ulong);
+float __ovld __conv sub_group_scan_exclusive_max(float);
+
+int __ovld __conv sub_group_scan_inclusive_add(int );
+uint __ovld __conv sub_group_scan_inclusive_add(uint );
+long __ovld __conv sub_group_scan_inclusive_add(long );
+ulong __ovld __conv sub_group_scan_inclusive_add(ulong);
+float __ovld __conv sub_group_scan_inclusive_add(float);
+int __ovld __conv sub_group_scan_inclusive_min(int );
+uint __ovld __conv sub_group_scan_inclusive_min(uint );
+long __ovld __conv sub_group_scan_inclusive_min(long );
+ulong __ovld __conv sub_group_scan_inclusive_min(ulong);
+float __ovld __conv sub_group_scan_inclusive_min(float);
+int __ovld __conv sub_group_scan_inclusive_max(int );
+uint __ovld __conv sub_group_scan_inclusive_max(uint );
+long __ovld __conv sub_group_scan_inclusive_max(long );
+ulong __ovld __conv sub_group_scan_inclusive_max(ulong);
+float __ovld __conv sub_group_scan_inclusive_max(float);
#ifdef cl_khr_fp16
-half __ovld __conv sub_group_broadcast(half x, uint sub_group_local_id);
-half __ovld __conv sub_group_reduce_add(half x);
-half __ovld __conv sub_group_reduce_min(half x);
-half __ovld __conv sub_group_reduce_max(half x);
-half __ovld __conv sub_group_scan_exclusive_add(half x);
-half __ovld __conv sub_group_scan_exclusive_min(half x);
-half __ovld __conv sub_group_scan_exclusive_max(half x);
-half __ovld __conv sub_group_scan_inclusive_add(half x);
-half __ovld __conv sub_group_scan_inclusive_min(half x);
-half __ovld __conv sub_group_scan_inclusive_max(half x);
+half __ovld __conv sub_group_broadcast(half, uint sub_group_local_id);
+half __ovld __conv sub_group_reduce_add(half);
+half __ovld __conv sub_group_reduce_min(half);
+half __ovld __conv sub_group_reduce_max(half);
+half __ovld __conv sub_group_scan_exclusive_add(half);
+half __ovld __conv sub_group_scan_exclusive_min(half);
+half __ovld __conv sub_group_scan_exclusive_max(half);
+half __ovld __conv sub_group_scan_inclusive_add(half);
+half __ovld __conv sub_group_scan_inclusive_min(half);
+half __ovld __conv sub_group_scan_inclusive_max(half);
#endif //cl_khr_fp16
#ifdef cl_khr_fp64
-double __ovld __conv sub_group_broadcast(double x, uint sub_group_local_id);
-double __ovld __conv sub_group_reduce_add(double x);
-double __ovld __conv sub_group_reduce_min(double x);
-double __ovld __conv sub_group_reduce_max(double x);
-double __ovld __conv sub_group_scan_exclusive_add(double x);
-double __ovld __conv sub_group_scan_exclusive_min(double x);
-double __ovld __conv sub_group_scan_exclusive_max(double x);
-double __ovld __conv sub_group_scan_inclusive_add(double x);
-double __ovld __conv sub_group_scan_inclusive_min(double x);
-double __ovld __conv sub_group_scan_inclusive_max(double x);
+double __ovld __conv sub_group_broadcast(double, uint sub_group_local_id);
+double __ovld __conv sub_group_reduce_add(double);
+double __ovld __conv sub_group_reduce_min(double);
+double __ovld __conv sub_group_reduce_max(double);
+double __ovld __conv sub_group_scan_exclusive_add(double);
+double __ovld __conv sub_group_scan_exclusive_min(double);
+double __ovld __conv sub_group_scan_exclusive_max(double);
+double __ovld __conv sub_group_scan_inclusive_add(double);
+double __ovld __conv sub_group_scan_inclusive_min(double);
+double __ovld __conv sub_group_scan_inclusive_max(double);
#endif //cl_khr_fp64
-#endif //cl_khr_subgroups cl_intel_subgroups
+#endif // __opencl_subgroup_builtins
#if defined(cl_khr_subgroup_extended_types)
char __ovld __conv sub_group_broadcast( char value, uint index );
@@ -16288,132 +17280,170 @@ int __ovld __cnfn dot_acc_sat_4x8packed_us_int(uint, uint, int);
int __ovld __cnfn dot_acc_sat_4x8packed_su_int(uint, uint, int);
#endif // __opencl_c_integer_dot_product_input_4x8bit_packed
+#if defined(cl_khr_subgroup_rotate)
+char __ovld __conv sub_group_rotate(char, int);
+uchar __ovld __conv sub_group_rotate(uchar, int);
+short __ovld __conv sub_group_rotate(short, int);
+ushort __ovld __conv sub_group_rotate(ushort, int);
+int __ovld __conv sub_group_rotate(int, int);
+uint __ovld __conv sub_group_rotate(uint, int);
+long __ovld __conv sub_group_rotate(long, int);
+ulong __ovld __conv sub_group_rotate(ulong, int);
+float __ovld __conv sub_group_rotate(float, int);
+#if defined(cl_khr_fp64)
+double __ovld __conv sub_group_rotate(double, int);
+#endif // cl_khr_fp64
+#if defined(cl_khr_fp16)
+half __ovld __conv sub_group_rotate(half, int);
+#endif // cl_khr_fp16
+
+char __ovld __conv sub_group_clustered_rotate(char, int, uint);
+uchar __ovld __conv sub_group_clustered_rotate(uchar, int, uint);
+short __ovld __conv sub_group_clustered_rotate(short, int, uint);
+ushort __ovld __conv sub_group_clustered_rotate(ushort, int, uint);
+int __ovld __conv sub_group_clustered_rotate(int, int, uint);
+uint __ovld __conv sub_group_clustered_rotate(uint, int, uint);
+long __ovld __conv sub_group_clustered_rotate(long, int, uint);
+ulong __ovld __conv sub_group_clustered_rotate(ulong, int, uint);
+float __ovld __conv sub_group_clustered_rotate(float, int, uint);
+#if defined(cl_khr_fp64)
+double __ovld __conv sub_group_clustered_rotate(double, int, uint);
+#endif // cl_khr_fp64
+#if defined(cl_khr_fp16)
+half __ovld __conv sub_group_clustered_rotate(half, int, uint);
+#endif // cl_khr_fp16
+#endif // cl_khr_subgroup_rotate
+
#if defined(cl_intel_subgroups)
// Intel-Specific Sub Group Functions
-float __ovld __conv intel_sub_group_shuffle( float x, uint c );
-float2 __ovld __conv intel_sub_group_shuffle( float2 x, uint c );
-float3 __ovld __conv intel_sub_group_shuffle( float3 x, uint c );
-float4 __ovld __conv intel_sub_group_shuffle( float4 x, uint c );
-float8 __ovld __conv intel_sub_group_shuffle( float8 x, uint c );
-float16 __ovld __conv intel_sub_group_shuffle( float16 x, uint c );
-
-int __ovld __conv intel_sub_group_shuffle( int x, uint c );
-int2 __ovld __conv intel_sub_group_shuffle( int2 x, uint c );
-int3 __ovld __conv intel_sub_group_shuffle( int3 x, uint c );
-int4 __ovld __conv intel_sub_group_shuffle( int4 x, uint c );
-int8 __ovld __conv intel_sub_group_shuffle( int8 x, uint c );
-int16 __ovld __conv intel_sub_group_shuffle( int16 x, uint c );
-
-uint __ovld __conv intel_sub_group_shuffle( uint x, uint c );
-uint2 __ovld __conv intel_sub_group_shuffle( uint2 x, uint c );
-uint3 __ovld __conv intel_sub_group_shuffle( uint3 x, uint c );
-uint4 __ovld __conv intel_sub_group_shuffle( uint4 x, uint c );
-uint8 __ovld __conv intel_sub_group_shuffle( uint8 x, uint c );
-uint16 __ovld __conv intel_sub_group_shuffle( uint16 x, uint c );
-
-long __ovld __conv intel_sub_group_shuffle( long x, uint c );
-ulong __ovld __conv intel_sub_group_shuffle( ulong x, uint c );
-
-float __ovld __conv intel_sub_group_shuffle_down( float cur, float next, uint c );
-float2 __ovld __conv intel_sub_group_shuffle_down( float2 cur, float2 next, uint c );
-float3 __ovld __conv intel_sub_group_shuffle_down( float3 cur, float3 next, uint c );
-float4 __ovld __conv intel_sub_group_shuffle_down( float4 cur, float4 next, uint c );
-float8 __ovld __conv intel_sub_group_shuffle_down( float8 cur, float8 next, uint c );
-float16 __ovld __conv intel_sub_group_shuffle_down( float16 cur, float16 next, uint c );
-
-int __ovld __conv intel_sub_group_shuffle_down( int cur, int next, uint c );
-int2 __ovld __conv intel_sub_group_shuffle_down( int2 cur, int2 next, uint c );
-int3 __ovld __conv intel_sub_group_shuffle_down( int3 cur, int3 next, uint c );
-int4 __ovld __conv intel_sub_group_shuffle_down( int4 cur, int4 next, uint c );
-int8 __ovld __conv intel_sub_group_shuffle_down( int8 cur, int8 next, uint c );
-int16 __ovld __conv intel_sub_group_shuffle_down( int16 cur, int16 next, uint c );
-
-uint __ovld __conv intel_sub_group_shuffle_down( uint cur, uint next, uint c );
-uint2 __ovld __conv intel_sub_group_shuffle_down( uint2 cur, uint2 next, uint c );
-uint3 __ovld __conv intel_sub_group_shuffle_down( uint3 cur, uint3 next, uint c );
-uint4 __ovld __conv intel_sub_group_shuffle_down( uint4 cur, uint4 next, uint c );
-uint8 __ovld __conv intel_sub_group_shuffle_down( uint8 cur, uint8 next, uint c );
-uint16 __ovld __conv intel_sub_group_shuffle_down( uint16 cur, uint16 next, uint c );
-
-long __ovld __conv intel_sub_group_shuffle_down( long prev, long cur, uint c );
-ulong __ovld __conv intel_sub_group_shuffle_down( ulong prev, ulong cur, uint c );
-
-float __ovld __conv intel_sub_group_shuffle_up( float prev, float cur, uint c );
-float2 __ovld __conv intel_sub_group_shuffle_up( float2 prev, float2 cur, uint c );
-float3 __ovld __conv intel_sub_group_shuffle_up( float3 prev, float3 cur, uint c );
-float4 __ovld __conv intel_sub_group_shuffle_up( float4 prev, float4 cur, uint c );
-float8 __ovld __conv intel_sub_group_shuffle_up( float8 prev, float8 cur, uint c );
-float16 __ovld __conv intel_sub_group_shuffle_up( float16 prev, float16 cur, uint c );
-
-int __ovld __conv intel_sub_group_shuffle_up( int prev, int cur, uint c );
-int2 __ovld __conv intel_sub_group_shuffle_up( int2 prev, int2 cur, uint c );
-int3 __ovld __conv intel_sub_group_shuffle_up( int3 prev, int3 cur, uint c );
-int4 __ovld __conv intel_sub_group_shuffle_up( int4 prev, int4 cur, uint c );
-int8 __ovld __conv intel_sub_group_shuffle_up( int8 prev, int8 cur, uint c );
-int16 __ovld __conv intel_sub_group_shuffle_up( int16 prev, int16 cur, uint c );
-
-uint __ovld __conv intel_sub_group_shuffle_up( uint prev, uint cur, uint c );
-uint2 __ovld __conv intel_sub_group_shuffle_up( uint2 prev, uint2 cur, uint c );
-uint3 __ovld __conv intel_sub_group_shuffle_up( uint3 prev, uint3 cur, uint c );
-uint4 __ovld __conv intel_sub_group_shuffle_up( uint4 prev, uint4 cur, uint c );
-uint8 __ovld __conv intel_sub_group_shuffle_up( uint8 prev, uint8 cur, uint c );
-uint16 __ovld __conv intel_sub_group_shuffle_up( uint16 prev, uint16 cur, uint c );
-
-long __ovld __conv intel_sub_group_shuffle_up( long prev, long cur, uint c );
-ulong __ovld __conv intel_sub_group_shuffle_up( ulong prev, ulong cur, uint c );
-
-float __ovld __conv intel_sub_group_shuffle_xor( float x, uint c );
-float2 __ovld __conv intel_sub_group_shuffle_xor( float2 x, uint c );
-float3 __ovld __conv intel_sub_group_shuffle_xor( float3 x, uint c );
-float4 __ovld __conv intel_sub_group_shuffle_xor( float4 x, uint c );
-float8 __ovld __conv intel_sub_group_shuffle_xor( float8 x, uint c );
-float16 __ovld __conv intel_sub_group_shuffle_xor( float16 x, uint c );
-
-int __ovld __conv intel_sub_group_shuffle_xor( int x, uint c );
-int2 __ovld __conv intel_sub_group_shuffle_xor( int2 x, uint c );
-int3 __ovld __conv intel_sub_group_shuffle_xor( int3 x, uint c );
-int4 __ovld __conv intel_sub_group_shuffle_xor( int4 x, uint c );
-int8 __ovld __conv intel_sub_group_shuffle_xor( int8 x, uint c );
-int16 __ovld __conv intel_sub_group_shuffle_xor( int16 x, uint c );
-
-uint __ovld __conv intel_sub_group_shuffle_xor( uint x, uint c );
-uint2 __ovld __conv intel_sub_group_shuffle_xor( uint2 x, uint c );
-uint3 __ovld __conv intel_sub_group_shuffle_xor( uint3 x, uint c );
-uint4 __ovld __conv intel_sub_group_shuffle_xor( uint4 x, uint c );
-uint8 __ovld __conv intel_sub_group_shuffle_xor( uint8 x, uint c );
-uint16 __ovld __conv intel_sub_group_shuffle_xor( uint16 x, uint c );
-
-long __ovld __conv intel_sub_group_shuffle_xor( long x, uint c );
-ulong __ovld __conv intel_sub_group_shuffle_xor( ulong x, uint c );
-
-uint __ovld __conv intel_sub_group_block_read( read_only image2d_t image, int2 coord );
-uint2 __ovld __conv intel_sub_group_block_read2( read_only image2d_t image, int2 coord );
-uint4 __ovld __conv intel_sub_group_block_read4( read_only image2d_t image, int2 coord );
-uint8 __ovld __conv intel_sub_group_block_read8( read_only image2d_t image, int2 coord );
+float __ovld __conv intel_sub_group_shuffle( float , uint );
+float2 __ovld __conv intel_sub_group_shuffle( float2, uint );
+float3 __ovld __conv intel_sub_group_shuffle( float3, uint );
+float4 __ovld __conv intel_sub_group_shuffle( float4, uint );
+float8 __ovld __conv intel_sub_group_shuffle( float8, uint );
+float16 __ovld __conv intel_sub_group_shuffle( float16, uint );
+
+int __ovld __conv intel_sub_group_shuffle( int , uint );
+int2 __ovld __conv intel_sub_group_shuffle( int2, uint );
+int3 __ovld __conv intel_sub_group_shuffle( int3, uint );
+int4 __ovld __conv intel_sub_group_shuffle( int4, uint );
+int8 __ovld __conv intel_sub_group_shuffle( int8, uint );
+int16 __ovld __conv intel_sub_group_shuffle( int16, uint );
+
+uint __ovld __conv intel_sub_group_shuffle( uint , uint );
+uint2 __ovld __conv intel_sub_group_shuffle( uint2, uint );
+uint3 __ovld __conv intel_sub_group_shuffle( uint3, uint );
+uint4 __ovld __conv intel_sub_group_shuffle( uint4, uint );
+uint8 __ovld __conv intel_sub_group_shuffle( uint8, uint );
+uint16 __ovld __conv intel_sub_group_shuffle( uint16, uint );
+
+long __ovld __conv intel_sub_group_shuffle( long, uint );
+ulong __ovld __conv intel_sub_group_shuffle( ulong, uint );
+
+float __ovld __conv intel_sub_group_shuffle_down( float cur, float next, uint );
+float2 __ovld __conv intel_sub_group_shuffle_down( float2 cur, float2 next, uint );
+float3 __ovld __conv intel_sub_group_shuffle_down( float3 cur, float3 next, uint );
+float4 __ovld __conv intel_sub_group_shuffle_down( float4 cur, float4 next, uint );
+float8 __ovld __conv intel_sub_group_shuffle_down( float8 cur, float8 next, uint );
+float16 __ovld __conv intel_sub_group_shuffle_down( float16 cur, float16 next, uint );
+
+int __ovld __conv intel_sub_group_shuffle_down( int cur, int next, uint );
+int2 __ovld __conv intel_sub_group_shuffle_down( int2 cur, int2 next, uint );
+int3 __ovld __conv intel_sub_group_shuffle_down( int3 cur, int3 next, uint );
+int4 __ovld __conv intel_sub_group_shuffle_down( int4 cur, int4 next, uint );
+int8 __ovld __conv intel_sub_group_shuffle_down( int8 cur, int8 next, uint );
+int16 __ovld __conv intel_sub_group_shuffle_down( int16 cur, int16 next, uint );
+
+uint __ovld __conv intel_sub_group_shuffle_down( uint cur, uint next, uint );
+uint2 __ovld __conv intel_sub_group_shuffle_down( uint2 cur, uint2 next, uint );
+uint3 __ovld __conv intel_sub_group_shuffle_down( uint3 cur, uint3 next, uint );
+uint4 __ovld __conv intel_sub_group_shuffle_down( uint4 cur, uint4 next, uint );
+uint8 __ovld __conv intel_sub_group_shuffle_down( uint8 cur, uint8 next, uint );
+uint16 __ovld __conv intel_sub_group_shuffle_down( uint16 cur, uint16 next, uint );
+
+long __ovld __conv intel_sub_group_shuffle_down( long prev, long cur, uint );
+ulong __ovld __conv intel_sub_group_shuffle_down( ulong prev, ulong cur, uint );
+
+float __ovld __conv intel_sub_group_shuffle_up( float prev, float cur, uint );
+float2 __ovld __conv intel_sub_group_shuffle_up( float2 prev, float2 cur, uint );
+float3 __ovld __conv intel_sub_group_shuffle_up( float3 prev, float3 cur, uint );
+float4 __ovld __conv intel_sub_group_shuffle_up( float4 prev, float4 cur, uint );
+float8 __ovld __conv intel_sub_group_shuffle_up( float8 prev, float8 cur, uint );
+float16 __ovld __conv intel_sub_group_shuffle_up( float16 prev, float16 cur, uint );
+
+int __ovld __conv intel_sub_group_shuffle_up( int prev, int cur, uint );
+int2 __ovld __conv intel_sub_group_shuffle_up( int2 prev, int2 cur, uint );
+int3 __ovld __conv intel_sub_group_shuffle_up( int3 prev, int3 cur, uint );
+int4 __ovld __conv intel_sub_group_shuffle_up( int4 prev, int4 cur, uint );
+int8 __ovld __conv intel_sub_group_shuffle_up( int8 prev, int8 cur, uint );
+int16 __ovld __conv intel_sub_group_shuffle_up( int16 prev, int16 cur, uint );
+
+uint __ovld __conv intel_sub_group_shuffle_up( uint prev, uint cur, uint );
+uint2 __ovld __conv intel_sub_group_shuffle_up( uint2 prev, uint2 cur, uint );
+uint3 __ovld __conv intel_sub_group_shuffle_up( uint3 prev, uint3 cur, uint );
+uint4 __ovld __conv intel_sub_group_shuffle_up( uint4 prev, uint4 cur, uint );
+uint8 __ovld __conv intel_sub_group_shuffle_up( uint8 prev, uint8 cur, uint );
+uint16 __ovld __conv intel_sub_group_shuffle_up( uint16 prev, uint16 cur, uint );
+
+long __ovld __conv intel_sub_group_shuffle_up( long prev, long cur, uint );
+ulong __ovld __conv intel_sub_group_shuffle_up( ulong prev, ulong cur, uint );
+
+float __ovld __conv intel_sub_group_shuffle_xor( float , uint );
+float2 __ovld __conv intel_sub_group_shuffle_xor( float2, uint );
+float3 __ovld __conv intel_sub_group_shuffle_xor( float3, uint );
+float4 __ovld __conv intel_sub_group_shuffle_xor( float4, uint );
+float8 __ovld __conv intel_sub_group_shuffle_xor( float8, uint );
+float16 __ovld __conv intel_sub_group_shuffle_xor( float16, uint );
+
+int __ovld __conv intel_sub_group_shuffle_xor( int , uint );
+int2 __ovld __conv intel_sub_group_shuffle_xor( int2, uint );
+int3 __ovld __conv intel_sub_group_shuffle_xor( int3, uint );
+int4 __ovld __conv intel_sub_group_shuffle_xor( int4, uint );
+int8 __ovld __conv intel_sub_group_shuffle_xor( int8, uint );
+int16 __ovld __conv intel_sub_group_shuffle_xor( int16, uint );
+
+uint __ovld __conv intel_sub_group_shuffle_xor( uint , uint );
+uint2 __ovld __conv intel_sub_group_shuffle_xor( uint2, uint );
+uint3 __ovld __conv intel_sub_group_shuffle_xor( uint3, uint );
+uint4 __ovld __conv intel_sub_group_shuffle_xor( uint4, uint );
+uint8 __ovld __conv intel_sub_group_shuffle_xor( uint8, uint );
+uint16 __ovld __conv intel_sub_group_shuffle_xor( uint16, uint );
+
+long __ovld __conv intel_sub_group_shuffle_xor( long, uint );
+ulong __ovld __conv intel_sub_group_shuffle_xor( ulong, uint );
+
+#if defined(__opencl_c_images)
+uint __ovld __conv intel_sub_group_block_read(read_only image2d_t, int2);
+uint2 __ovld __conv intel_sub_group_block_read2(read_only image2d_t, int2);
+uint4 __ovld __conv intel_sub_group_block_read4(read_only image2d_t, int2);
+uint8 __ovld __conv intel_sub_group_block_read8(read_only image2d_t, int2);
+#endif
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-uint __ovld __conv intel_sub_group_block_read(read_write image2d_t image, int2 coord);
-uint2 __ovld __conv intel_sub_group_block_read2(read_write image2d_t image, int2 coord);
-uint4 __ovld __conv intel_sub_group_block_read4(read_write image2d_t image, int2 coord);
-uint8 __ovld __conv intel_sub_group_block_read8(read_write image2d_t image, int2 coord);
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
+uint __ovld __conv intel_sub_group_block_read(read_write image2d_t, int2);
+uint2 __ovld __conv intel_sub_group_block_read2(read_write image2d_t, int2);
+uint4 __ovld __conv intel_sub_group_block_read4(read_write image2d_t, int2);
+uint8 __ovld __conv intel_sub_group_block_read8(read_write image2d_t, int2);
+#endif // defined(__opencl_c_read_write_images)
uint __ovld __conv intel_sub_group_block_read( const __global uint* p );
uint2 __ovld __conv intel_sub_group_block_read2( const __global uint* p );
uint4 __ovld __conv intel_sub_group_block_read4( const __global uint* p );
uint8 __ovld __conv intel_sub_group_block_read8( const __global uint* p );
-void __ovld __conv intel_sub_group_block_write(write_only image2d_t image, int2 coord, uint data);
-void __ovld __conv intel_sub_group_block_write2(write_only image2d_t image, int2 coord, uint2 data);
-void __ovld __conv intel_sub_group_block_write4(write_only image2d_t image, int2 coord, uint4 data);
-void __ovld __conv intel_sub_group_block_write8(write_only image2d_t image, int2 coord, uint8 data);
+#if defined(__opencl_c_images)
+void __ovld __conv intel_sub_group_block_write(write_only image2d_t, int2, uint);
+void __ovld __conv intel_sub_group_block_write2(write_only image2d_t, int2, uint2);
+void __ovld __conv intel_sub_group_block_write4(write_only image2d_t, int2, uint4);
+void __ovld __conv intel_sub_group_block_write8(write_only image2d_t, int2, uint8);
+#endif // defined(__opencl_c_images)
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-void __ovld __conv intel_sub_group_block_write(read_write image2d_t image, int2 coord, uint data);
-void __ovld __conv intel_sub_group_block_write2(read_write image2d_t image, int2 coord, uint2 data);
-void __ovld __conv intel_sub_group_block_write4(read_write image2d_t image, int2 coord, uint4 data);
-void __ovld __conv intel_sub_group_block_write8(read_write image2d_t image, int2 coord, uint8 data);
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
+void __ovld __conv intel_sub_group_block_write(read_write image2d_t, int2, uint);
+void __ovld __conv intel_sub_group_block_write2(read_write image2d_t, int2, uint2);
+void __ovld __conv intel_sub_group_block_write4(read_write image2d_t, int2, uint4);
+void __ovld __conv intel_sub_group_block_write8(read_write image2d_t, int2, uint8);
+#endif // defined(__opencl_c_read_write_images)
void __ovld __conv intel_sub_group_block_write( __global uint* p, uint data );
void __ovld __conv intel_sub_group_block_write2( __global uint* p, uint2 data );
@@ -16421,89 +17451,89 @@ void __ovld __conv intel_sub_group_block_write4( __global uint* p, uint4 data
void __ovld __conv intel_sub_group_block_write8( __global uint* p, uint8 data );
#ifdef cl_khr_fp16
-half __ovld __conv intel_sub_group_shuffle( half x, uint c );
-half __ovld __conv intel_sub_group_shuffle_down( half prev, half cur, uint c );
-half __ovld __conv intel_sub_group_shuffle_up( half prev, half cur, uint c );
-half __ovld __conv intel_sub_group_shuffle_xor( half x, uint c );
+half __ovld __conv intel_sub_group_shuffle( half, uint );
+half __ovld __conv intel_sub_group_shuffle_down( half prev, half cur, uint );
+half __ovld __conv intel_sub_group_shuffle_up( half prev, half cur, uint );
+half __ovld __conv intel_sub_group_shuffle_xor( half, uint );
#endif
#if defined(cl_khr_fp64)
-double __ovld __conv intel_sub_group_shuffle( double x, uint c );
-double __ovld __conv intel_sub_group_shuffle_down( double prev, double cur, uint c );
-double __ovld __conv intel_sub_group_shuffle_up( double prev, double cur, uint c );
-double __ovld __conv intel_sub_group_shuffle_xor( double x, uint c );
+double __ovld __conv intel_sub_group_shuffle( double, uint );
+double __ovld __conv intel_sub_group_shuffle_down( double prev, double cur, uint );
+double __ovld __conv intel_sub_group_shuffle_up( double prev, double cur, uint );
+double __ovld __conv intel_sub_group_shuffle_xor( double, uint );
#endif
#endif //cl_intel_subgroups
#if defined(cl_intel_subgroups_short)
-short __ovld __conv intel_sub_group_broadcast( short x, uint sub_group_local_id );
-short2 __ovld __conv intel_sub_group_broadcast( short2 x, uint sub_group_local_id );
-short3 __ovld __conv intel_sub_group_broadcast( short3 x, uint sub_group_local_id );
-short4 __ovld __conv intel_sub_group_broadcast( short4 x, uint sub_group_local_id );
-short8 __ovld __conv intel_sub_group_broadcast( short8 x, uint sub_group_local_id );
-
-ushort __ovld __conv intel_sub_group_broadcast( ushort x, uint sub_group_local_id );
-ushort2 __ovld __conv intel_sub_group_broadcast( ushort2 x, uint sub_group_local_id );
-ushort3 __ovld __conv intel_sub_group_broadcast( ushort3 x, uint sub_group_local_id );
-ushort4 __ovld __conv intel_sub_group_broadcast( ushort4 x, uint sub_group_local_id );
-ushort8 __ovld __conv intel_sub_group_broadcast( ushort8 x, uint sub_group_local_id );
-
-short __ovld __conv intel_sub_group_shuffle( short x, uint c );
-short2 __ovld __conv intel_sub_group_shuffle( short2 x, uint c );
-short3 __ovld __conv intel_sub_group_shuffle( short3 x, uint c );
-short4 __ovld __conv intel_sub_group_shuffle( short4 x, uint c );
-short8 __ovld __conv intel_sub_group_shuffle( short8 x, uint c );
-short16 __ovld __conv intel_sub_group_shuffle( short16 x, uint c);
-
-ushort __ovld __conv intel_sub_group_shuffle( ushort x, uint c );
-ushort2 __ovld __conv intel_sub_group_shuffle( ushort2 x, uint c );
-ushort3 __ovld __conv intel_sub_group_shuffle( ushort3 x, uint c );
-ushort4 __ovld __conv intel_sub_group_shuffle( ushort4 x, uint c );
-ushort8 __ovld __conv intel_sub_group_shuffle( ushort8 x, uint c );
-ushort16 __ovld __conv intel_sub_group_shuffle( ushort16 x, uint c );
-
-short __ovld __conv intel_sub_group_shuffle_down( short cur, short next, uint c );
-short2 __ovld __conv intel_sub_group_shuffle_down( short2 cur, short2 next, uint c );
-short3 __ovld __conv intel_sub_group_shuffle_down( short3 cur, short3 next, uint c );
-short4 __ovld __conv intel_sub_group_shuffle_down( short4 cur, short4 next, uint c );
-short8 __ovld __conv intel_sub_group_shuffle_down( short8 cur, short8 next, uint c );
-short16 __ovld __conv intel_sub_group_shuffle_down( short16 cur, short16 next, uint c );
-
-ushort __ovld __conv intel_sub_group_shuffle_down( ushort cur, ushort next, uint c );
-ushort2 __ovld __conv intel_sub_group_shuffle_down( ushort2 cur, ushort2 next, uint c );
-ushort3 __ovld __conv intel_sub_group_shuffle_down( ushort3 cur, ushort3 next, uint c );
-ushort4 __ovld __conv intel_sub_group_shuffle_down( ushort4 cur, ushort4 next, uint c );
-ushort8 __ovld __conv intel_sub_group_shuffle_down( ushort8 cur, ushort8 next, uint c );
-ushort16 __ovld __conv intel_sub_group_shuffle_down( ushort16 cur, ushort16 next, uint c );
-
-short __ovld __conv intel_sub_group_shuffle_up( short cur, short next, uint c );
-short2 __ovld __conv intel_sub_group_shuffle_up( short2 cur, short2 next, uint c );
-short3 __ovld __conv intel_sub_group_shuffle_up( short3 cur, short3 next, uint c );
-short4 __ovld __conv intel_sub_group_shuffle_up( short4 cur, short4 next, uint c );
-short8 __ovld __conv intel_sub_group_shuffle_up( short8 cur, short8 next, uint c );
-short16 __ovld __conv intel_sub_group_shuffle_up( short16 cur, short16 next, uint c );
-
-ushort __ovld __conv intel_sub_group_shuffle_up( ushort cur, ushort next, uint c );
-ushort2 __ovld __conv intel_sub_group_shuffle_up( ushort2 cur, ushort2 next, uint c );
-ushort3 __ovld __conv intel_sub_group_shuffle_up( ushort3 cur, ushort3 next, uint c );
-ushort4 __ovld __conv intel_sub_group_shuffle_up( ushort4 cur, ushort4 next, uint c );
-ushort8 __ovld __conv intel_sub_group_shuffle_up( ushort8 cur, ushort8 next, uint c );
-ushort16 __ovld __conv intel_sub_group_shuffle_up( ushort16 cur, ushort16 next, uint c );
-
-short __ovld __conv intel_sub_group_shuffle_xor( short x, uint c );
-short2 __ovld __conv intel_sub_group_shuffle_xor( short2 x, uint c );
-short3 __ovld __conv intel_sub_group_shuffle_xor( short3 x, uint c );
-short4 __ovld __conv intel_sub_group_shuffle_xor( short4 x, uint c );
-short8 __ovld __conv intel_sub_group_shuffle_xor( short8 x, uint c );
-short16 __ovld __conv intel_sub_group_shuffle_xor( short16 x, uint c );
-
-ushort __ovld __conv intel_sub_group_shuffle_xor( ushort x, uint c );
-ushort2 __ovld __conv intel_sub_group_shuffle_xor( ushort2 x, uint c );
-ushort3 __ovld __conv intel_sub_group_shuffle_xor( ushort3 x, uint c );
-ushort4 __ovld __conv intel_sub_group_shuffle_xor( ushort4 x, uint c );
-ushort8 __ovld __conv intel_sub_group_shuffle_xor( ushort8 x, uint c );
-ushort16 __ovld __conv intel_sub_group_shuffle_xor( ushort16 x, uint c );
+short __ovld __conv intel_sub_group_broadcast( short , uint sub_group_local_id );
+short2 __ovld __conv intel_sub_group_broadcast( short2, uint sub_group_local_id );
+short3 __ovld __conv intel_sub_group_broadcast( short3, uint sub_group_local_id );
+short4 __ovld __conv intel_sub_group_broadcast( short4, uint sub_group_local_id );
+short8 __ovld __conv intel_sub_group_broadcast( short8, uint sub_group_local_id );
+
+ushort __ovld __conv intel_sub_group_broadcast( ushort , uint sub_group_local_id );
+ushort2 __ovld __conv intel_sub_group_broadcast( ushort2, uint sub_group_local_id );
+ushort3 __ovld __conv intel_sub_group_broadcast( ushort3, uint sub_group_local_id );
+ushort4 __ovld __conv intel_sub_group_broadcast( ushort4, uint sub_group_local_id );
+ushort8 __ovld __conv intel_sub_group_broadcast( ushort8, uint sub_group_local_id );
+
+short __ovld __conv intel_sub_group_shuffle( short , uint );
+short2 __ovld __conv intel_sub_group_shuffle( short2 , uint );
+short3 __ovld __conv intel_sub_group_shuffle( short3 , uint );
+short4 __ovld __conv intel_sub_group_shuffle( short4 , uint );
+short8 __ovld __conv intel_sub_group_shuffle( short8 , uint );
+short16 __ovld __conv intel_sub_group_shuffle( short16, uint);
+
+ushort __ovld __conv intel_sub_group_shuffle( ushort , uint );
+ushort2 __ovld __conv intel_sub_group_shuffle( ushort2 , uint );
+ushort3 __ovld __conv intel_sub_group_shuffle( ushort3 , uint );
+ushort4 __ovld __conv intel_sub_group_shuffle( ushort4 , uint );
+ushort8 __ovld __conv intel_sub_group_shuffle( ushort8 , uint );
+ushort16 __ovld __conv intel_sub_group_shuffle( ushort16, uint );
+
+short __ovld __conv intel_sub_group_shuffle_down( short cur, short next, uint );
+short2 __ovld __conv intel_sub_group_shuffle_down( short2 cur, short2 next, uint );
+short3 __ovld __conv intel_sub_group_shuffle_down( short3 cur, short3 next, uint );
+short4 __ovld __conv intel_sub_group_shuffle_down( short4 cur, short4 next, uint );
+short8 __ovld __conv intel_sub_group_shuffle_down( short8 cur, short8 next, uint );
+short16 __ovld __conv intel_sub_group_shuffle_down( short16 cur, short16 next, uint );
+
+ushort __ovld __conv intel_sub_group_shuffle_down( ushort cur, ushort next, uint );
+ushort2 __ovld __conv intel_sub_group_shuffle_down( ushort2 cur, ushort2 next, uint );
+ushort3 __ovld __conv intel_sub_group_shuffle_down( ushort3 cur, ushort3 next, uint );
+ushort4 __ovld __conv intel_sub_group_shuffle_down( ushort4 cur, ushort4 next, uint );
+ushort8 __ovld __conv intel_sub_group_shuffle_down( ushort8 cur, ushort8 next, uint );
+ushort16 __ovld __conv intel_sub_group_shuffle_down( ushort16 cur, ushort16 next, uint );
+
+short __ovld __conv intel_sub_group_shuffle_up( short cur, short next, uint );
+short2 __ovld __conv intel_sub_group_shuffle_up( short2 cur, short2 next, uint );
+short3 __ovld __conv intel_sub_group_shuffle_up( short3 cur, short3 next, uint );
+short4 __ovld __conv intel_sub_group_shuffle_up( short4 cur, short4 next, uint );
+short8 __ovld __conv intel_sub_group_shuffle_up( short8 cur, short8 next, uint );
+short16 __ovld __conv intel_sub_group_shuffle_up( short16 cur, short16 next, uint );
+
+ushort __ovld __conv intel_sub_group_shuffle_up( ushort cur, ushort next, uint );
+ushort2 __ovld __conv intel_sub_group_shuffle_up( ushort2 cur, ushort2 next, uint );
+ushort3 __ovld __conv intel_sub_group_shuffle_up( ushort3 cur, ushort3 next, uint );
+ushort4 __ovld __conv intel_sub_group_shuffle_up( ushort4 cur, ushort4 next, uint );
+ushort8 __ovld __conv intel_sub_group_shuffle_up( ushort8 cur, ushort8 next, uint );
+ushort16 __ovld __conv intel_sub_group_shuffle_up( ushort16 cur, ushort16 next, uint );
+
+short __ovld __conv intel_sub_group_shuffle_xor( short , uint );
+short2 __ovld __conv intel_sub_group_shuffle_xor( short2 , uint );
+short3 __ovld __conv intel_sub_group_shuffle_xor( short3 , uint );
+short4 __ovld __conv intel_sub_group_shuffle_xor( short4 , uint );
+short8 __ovld __conv intel_sub_group_shuffle_xor( short8 , uint );
+short16 __ovld __conv intel_sub_group_shuffle_xor( short16, uint );
+
+ushort __ovld __conv intel_sub_group_shuffle_xor( ushort , uint );
+ushort2 __ovld __conv intel_sub_group_shuffle_xor( ushort2 , uint );
+ushort3 __ovld __conv intel_sub_group_shuffle_xor( ushort3 , uint );
+ushort4 __ovld __conv intel_sub_group_shuffle_xor( ushort4 , uint );
+ushort8 __ovld __conv intel_sub_group_shuffle_xor( ushort8 , uint );
+ushort16 __ovld __conv intel_sub_group_shuffle_xor( ushort16, uint );
short __ovld __conv intel_sub_group_reduce_add( short x );
ushort __ovld __conv intel_sub_group_reduce_add( ushort x );
@@ -16526,68 +17556,76 @@ ushort __ovld __conv intel_sub_group_scan_inclusive_min( ushort x );
short __ovld __conv intel_sub_group_scan_inclusive_max( short x );
ushort __ovld __conv intel_sub_group_scan_inclusive_max( ushort x );
-uint __ovld __conv intel_sub_group_block_read_ui( read_only image2d_t image, int2 byte_coord );
-uint2 __ovld __conv intel_sub_group_block_read_ui2( read_only image2d_t image, int2 byte_coord );
-uint4 __ovld __conv intel_sub_group_block_read_ui4( read_only image2d_t image, int2 byte_coord );
-uint8 __ovld __conv intel_sub_group_block_read_ui8( read_only image2d_t image, int2 byte_coord );
+#if defined(__opencl_c_images)
+uint __ovld __conv intel_sub_group_block_read_ui(read_only image2d_t, int2);
+uint2 __ovld __conv intel_sub_group_block_read_ui2(read_only image2d_t, int2);
+uint4 __ovld __conv intel_sub_group_block_read_ui4(read_only image2d_t, int2);
+uint8 __ovld __conv intel_sub_group_block_read_ui8(read_only image2d_t, int2);
+#endif // defined(__opencl_c_images)
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-uint __ovld __conv intel_sub_group_block_read_ui( read_write image2d_t image, int2 byte_coord );
-uint2 __ovld __conv intel_sub_group_block_read_ui2( read_write image2d_t image, int2 byte_coord );
-uint4 __ovld __conv intel_sub_group_block_read_ui4( read_write image2d_t image, int2 byte_coord );
-uint8 __ovld __conv intel_sub_group_block_read_ui8( read_write image2d_t image, int2 byte_coord );
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
+uint __ovld __conv intel_sub_group_block_read_ui(read_write image2d_t, int2);
+uint2 __ovld __conv intel_sub_group_block_read_ui2(read_write image2d_t, int2);
+uint4 __ovld __conv intel_sub_group_block_read_ui4(read_write image2d_t, int2);
+uint8 __ovld __conv intel_sub_group_block_read_ui8(read_write image2d_t, int2);
+#endif // defined(__opencl_c_read_write_images)
uint __ovld __conv intel_sub_group_block_read_ui( const __global uint* p );
uint2 __ovld __conv intel_sub_group_block_read_ui2( const __global uint* p );
uint4 __ovld __conv intel_sub_group_block_read_ui4( const __global uint* p );
uint8 __ovld __conv intel_sub_group_block_read_ui8( const __global uint* p );
-void __ovld __conv intel_sub_group_block_write_ui( read_only image2d_t image, int2 byte_coord, uint data );
-void __ovld __conv intel_sub_group_block_write_ui2( read_only image2d_t image, int2 byte_coord, uint2 data );
-void __ovld __conv intel_sub_group_block_write_ui4( read_only image2d_t image, int2 byte_coord, uint4 data );
-void __ovld __conv intel_sub_group_block_write_ui8( read_only image2d_t image, int2 byte_coord, uint8 data );
+#if defined(__opencl_c_images)
+void __ovld __conv intel_sub_group_block_write_ui(read_only image2d_t, int2, uint);
+void __ovld __conv intel_sub_group_block_write_ui2(read_only image2d_t, int2, uint2);
+void __ovld __conv intel_sub_group_block_write_ui4(read_only image2d_t, int2, uint4);
+void __ovld __conv intel_sub_group_block_write_ui8(read_only image2d_t, int2, uint8);
+#endif //defined(__opencl_c_images)
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-void __ovld __conv intel_sub_group_block_write_ui( read_write image2d_t image, int2 byte_coord, uint data );
-void __ovld __conv intel_sub_group_block_write_ui2( read_write image2d_t image, int2 byte_coord, uint2 data );
-void __ovld __conv intel_sub_group_block_write_ui4( read_write image2d_t image, int2 byte_coord, uint4 data );
-void __ovld __conv intel_sub_group_block_write_ui8( read_write image2d_t image, int2 byte_coord, uint8 data );
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
+void __ovld __conv intel_sub_group_block_write_ui(read_write image2d_t, int2, uint);
+void __ovld __conv intel_sub_group_block_write_ui2(read_write image2d_t, int2, uint2);
+void __ovld __conv intel_sub_group_block_write_ui4(read_write image2d_t, int2, uint4);
+void __ovld __conv intel_sub_group_block_write_ui8(read_write image2d_t, int2, uint8);
+#endif // defined(__opencl_c_read_write_images)
void __ovld __conv intel_sub_group_block_write_ui( __global uint* p, uint data );
void __ovld __conv intel_sub_group_block_write_ui2( __global uint* p, uint2 data );
void __ovld __conv intel_sub_group_block_write_ui4( __global uint* p, uint4 data );
void __ovld __conv intel_sub_group_block_write_ui8( __global uint* p, uint8 data );
-ushort __ovld __conv intel_sub_group_block_read_us( read_only image2d_t image, int2 coord );
-ushort2 __ovld __conv intel_sub_group_block_read_us2( read_only image2d_t image, int2 coord );
-ushort4 __ovld __conv intel_sub_group_block_read_us4( read_only image2d_t image, int2 coord );
-ushort8 __ovld __conv intel_sub_group_block_read_us8( read_only image2d_t image, int2 coord );
+#if defined(__opencl_c_images)
+ushort __ovld __conv intel_sub_group_block_read_us(read_only image2d_t, int2);
+ushort2 __ovld __conv intel_sub_group_block_read_us2(read_only image2d_t, int2);
+ushort4 __ovld __conv intel_sub_group_block_read_us4(read_only image2d_t, int2);
+ushort8 __ovld __conv intel_sub_group_block_read_us8(read_only image2d_t, int2);
+#endif // defined(__opencl_c_images)
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-ushort __ovld __conv intel_sub_group_block_read_us(read_write image2d_t image, int2 coord);
-ushort2 __ovld __conv intel_sub_group_block_read_us2(read_write image2d_t image, int2 coord);
-ushort4 __ovld __conv intel_sub_group_block_read_us4(read_write image2d_t image, int2 coord);
-ushort8 __ovld __conv intel_sub_group_block_read_us8(read_write image2d_t image, int2 coord);
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
+ushort __ovld __conv intel_sub_group_block_read_us(read_write image2d_t, int2);
+ushort2 __ovld __conv intel_sub_group_block_read_us2(read_write image2d_t, int2);
+ushort4 __ovld __conv intel_sub_group_block_read_us4(read_write image2d_t, int2);
+ushort8 __ovld __conv intel_sub_group_block_read_us8(read_write image2d_t, int2);
+#endif // defined(__opencl_c_read_write_images)
ushort __ovld __conv intel_sub_group_block_read_us( const __global ushort* p );
ushort2 __ovld __conv intel_sub_group_block_read_us2( const __global ushort* p );
ushort4 __ovld __conv intel_sub_group_block_read_us4( const __global ushort* p );
ushort8 __ovld __conv intel_sub_group_block_read_us8( const __global ushort* p );
-void __ovld __conv intel_sub_group_block_write_us(write_only image2d_t image, int2 coord, ushort data);
-void __ovld __conv intel_sub_group_block_write_us2(write_only image2d_t image, int2 coord, ushort2 data);
-void __ovld __conv intel_sub_group_block_write_us4(write_only image2d_t image, int2 coord, ushort4 data);
-void __ovld __conv intel_sub_group_block_write_us8(write_only image2d_t image, int2 coord, ushort8 data);
+#if defined(__opencl_c_images)
+void __ovld __conv intel_sub_group_block_write_us(write_only image2d_t, int2, ushort);
+void __ovld __conv intel_sub_group_block_write_us2(write_only image2d_t, int2, ushort2);
+void __ovld __conv intel_sub_group_block_write_us4(write_only image2d_t, int2, ushort4);
+void __ovld __conv intel_sub_group_block_write_us8(write_only image2d_t, int2, ushort8);
+#endif // defined(__opencl_c_images)
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-void __ovld __conv intel_sub_group_block_write_us(read_write image2d_t image, int2 coord, ushort data);
-void __ovld __conv intel_sub_group_block_write_us2(read_write image2d_t image, int2 coord, ushort2 data);
-void __ovld __conv intel_sub_group_block_write_us4(read_write image2d_t image, int2 coord, ushort4 data);
-void __ovld __conv intel_sub_group_block_write_us8(read_write image2d_t image, int2 coord, ushort8 data);
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_read_write_images)
+void __ovld __conv intel_sub_group_block_write_us(read_write image2d_t, int2, ushort);
+void __ovld __conv intel_sub_group_block_write_us2(read_write image2d_t, int2, ushort2);
+void __ovld __conv intel_sub_group_block_write_us4(read_write image2d_t, int2, ushort4);
+void __ovld __conv intel_sub_group_block_write_us8(read_write image2d_t, int2, ushort8);
+#endif // defined(__opencl_c_read_write_images)
void __ovld __conv intel_sub_group_block_write_us( __global ushort* p, ushort data );
void __ovld __conv intel_sub_group_block_write_us2( __global ushort* p, ushort2 data );
@@ -16705,6 +17743,7 @@ short2 __ovld intel_sub_group_avc_ime_adjust_ref_offset(
short2 ref_offset, ushort2 src_coord, ushort2 ref_window_size,
ushort2 image_size);
+#if defined(__opencl_c_images)
intel_sub_group_avc_ime_result_t __ovld
intel_sub_group_avc_ime_evaluate_with_single_reference(
read_only image2d_t src_image, read_only image2d_t ref_image,
@@ -16745,6 +17784,7 @@ intel_sub_group_avc_ime_evaluate_with_dual_reference_streaminout(
read_only image2d_t bwd_ref_image, sampler_t vme_media_sampler,
intel_sub_group_avc_ime_payload_t payload,
intel_sub_group_avc_ime_dual_reference_streamin_t streamin_components);
+#endif
intel_sub_group_avc_ime_single_reference_streamin_t __ovld
intel_sub_group_avc_ime_get_single_reference_streamin(
@@ -16809,6 +17849,7 @@ intel_sub_group_avc_ref_payload_t __ovld
intel_sub_group_avc_ref_set_bilinear_filter_enable(
intel_sub_group_avc_ref_payload_t payload);
+#if defined(__opencl_c_images)
intel_sub_group_avc_ref_result_t __ovld
intel_sub_group_avc_ref_evaluate_with_single_reference(
read_only image2d_t src_image, read_only image2d_t ref_image,
@@ -16827,6 +17868,7 @@ intel_sub_group_avc_ref_evaluate_with_multi_reference(
read_only image2d_t src_image, uint packed_reference_ids,
uchar packed_reference_field_polarities, sampler_t vme_media_sampler,
intel_sub_group_avc_ref_payload_t payload);
+#endif //defined(__opencl_c_images)
// SIC built-in functions
intel_sub_group_avc_sic_payload_t __ovld
@@ -16837,15 +17879,13 @@ intel_sub_group_avc_sic_configure_skc(
uint skip_block_partition_type, uint skip_motion_vector_mask,
ulong motion_vectors, uchar bidirectional_weight, uchar skip_sad_adjustment,
intel_sub_group_avc_sic_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_configure_ipe(
- uchar luma_intra_partition_mask, uchar intra_neighbour_availabilty,
+intel_sub_group_avc_sic_payload_t __ovld intel_sub_group_avc_sic_configure_ipe(
+ uchar luma_intra_partition_mask, uchar intra_neighbour_availability,
uchar left_edge_luma_pixels, uchar upper_left_corner_luma_pixel,
uchar upper_edge_luma_pixels, uchar upper_right_edge_luma_pixels,
uchar intra_sad_adjustment, intel_sub_group_avc_sic_payload_t payload);
-intel_sub_group_avc_sic_payload_t __ovld
-intel_sub_group_avc_sic_configure_ipe(
- uchar luma_intra_partition_mask, uchar intra_neighbour_availabilty,
+intel_sub_group_avc_sic_payload_t __ovld intel_sub_group_avc_sic_configure_ipe(
+ uchar luma_intra_partition_mask, uchar intra_neighbour_availability,
uchar left_edge_luma_pixels, uchar upper_left_corner_luma_pixel,
uchar upper_edge_luma_pixels, uchar upper_right_edge_luma_pixels,
ushort left_edge_chroma_pixels, ushort upper_left_corner_chroma_pixel,
@@ -16877,6 +17917,7 @@ intel_sub_group_avc_sic_set_block_based_raw_skip_sad(
uchar block_based_skip_type,
intel_sub_group_avc_sic_payload_t payload);
+#if defined(__opencl_c_images)
intel_sub_group_avc_sic_result_t __ovld
intel_sub_group_avc_sic_evaluate_ipe(
read_only image2d_t src_image, sampler_t vme_media_sampler,
@@ -16899,6 +17940,7 @@ intel_sub_group_avc_sic_evaluate_with_multi_reference(
read_only image2d_t src_image, uint packed_reference_ids,
uchar packed_reference_field_polarities, sampler_t vme_media_sampler,
intel_sub_group_avc_sic_payload_t payload);
+#endif //defined(__opencl_c_images)
uchar __ovld intel_sub_group_avc_sic_get_ipe_luma_shape(
intel_sub_group_avc_sic_result_t result);
@@ -17095,72 +18137,72 @@ intel_sub_group_avc_mce_convert_to_sic_result(
#endif // cl_intel_device_side_avc_motion_estimation
#ifdef cl_amd_media_ops
-uint __ovld amd_bitalign(uint a, uint b, uint c);
-uint2 __ovld amd_bitalign(uint2 a, uint2 b, uint2 c);
-uint3 __ovld amd_bitalign(uint3 a, uint3 b, uint3 c);
-uint4 __ovld amd_bitalign(uint4 a, uint4 b, uint4 c);
-uint8 __ovld amd_bitalign(uint8 a, uint8 b, uint8 c);
-uint16 __ovld amd_bitalign(uint16 a, uint16 b, uint16 c);
-
-uint __ovld amd_bytealign(uint a, uint b, uint c);
-uint2 __ovld amd_bytealign(uint2 a, uint2 b, uint2 c);
-uint3 __ovld amd_bytealign(uint3 a, uint3 b, uint3 c);
-uint4 __ovld amd_bytealign(uint4 a, uint4 b, uint4 c);
-uint8 __ovld amd_bytealign(uint8 a, uint8 b, uint8 c);
-uint16 __ovld amd_bytealign(uint16 a, uint16 b, uint16 c);
-
-uint __ovld amd_lerp(uint a, uint b, uint c);
-uint2 __ovld amd_lerp(uint2 a, uint2 b, uint2 c);
-uint3 __ovld amd_lerp(uint3 a, uint3 b, uint3 c);
-uint4 __ovld amd_lerp(uint4 a, uint4 b, uint4 c);
-uint8 __ovld amd_lerp(uint8 a, uint8 b, uint8 c);
-uint16 __ovld amd_lerp(uint16 a, uint16 b, uint16 c);
+uint __ovld amd_bitalign(uint, uint, uint);
+uint2 __ovld amd_bitalign(uint2, uint2, uint2);
+uint3 __ovld amd_bitalign(uint3, uint3, uint3);
+uint4 __ovld amd_bitalign(uint4, uint4, uint4);
+uint8 __ovld amd_bitalign(uint8, uint8, uint8);
+uint16 __ovld amd_bitalign(uint16, uint16, uint16);
+
+uint __ovld amd_bytealign(uint, uint, uint);
+uint2 __ovld amd_bytealign(uint2, uint2, uint2);
+uint3 __ovld amd_bytealign(uint3, uint3, uint3);
+uint4 __ovld amd_bytealign(uint4, uint4, uint4);
+uint8 __ovld amd_bytealign(uint8, uint8, uint8);
+uint16 __ovld amd_bytealign(uint16, uint16, uint16);
+
+uint __ovld amd_lerp(uint, uint, uint);
+uint2 __ovld amd_lerp(uint2, uint2, uint2);
+uint3 __ovld amd_lerp(uint3, uint3, uint3);
+uint4 __ovld amd_lerp(uint4, uint4, uint4);
+uint8 __ovld amd_lerp(uint8, uint8, uint8);
+uint16 __ovld amd_lerp(uint16, uint16, uint16);
uint __ovld amd_pack(float4 v);
-uint __ovld amd_sad4(uint4 x, uint4 y, uint z);
-
-uint __ovld amd_sadhi(uint a, uint b, uint c);
-uint2 __ovld amd_sadhi(uint2 a, uint2 b, uint2 c);
-uint3 __ovld amd_sadhi(uint3 a, uint3 b, uint3 c);
-uint4 __ovld amd_sadhi(uint4 a, uint4 b, uint4 c);
-uint8 __ovld amd_sadhi(uint8 a, uint8 b, uint8 c);
-uint16 __ovld amd_sadhi(uint16 a, uint16 b, uint16 c);
-
-uint __ovld amd_sad(uint a, uint b, uint c);
-uint2 __ovld amd_sad(uint2 a, uint2 b, uint2 c);
-uint3 __ovld amd_sad(uint3 a, uint3 b, uint3 c);
-uint4 __ovld amd_sad(uint4 a, uint4 b, uint4 c);
-uint8 __ovld amd_sad(uint8 a, uint8 b, uint8 c);
-uint16 __ovld amd_sad(uint16 a, uint16 b, uint16 c);
-
-float __ovld amd_unpack0(uint a);
-float2 __ovld amd_unpack0(uint2 a);
-float3 __ovld amd_unpack0(uint3 a);
-float4 __ovld amd_unpack0(uint4 a);
-float8 __ovld amd_unpack0(uint8 a);
-float16 __ovld amd_unpack0(uint16 a);
-
-float __ovld amd_unpack1(uint a);
-float2 __ovld amd_unpack1(uint2 a);
-float3 __ovld amd_unpack1(uint3 a);
-float4 __ovld amd_unpack1(uint4 a);
-float8 __ovld amd_unpack1(uint8 a);
-float16 __ovld amd_unpack1(uint16 a);
-
-float __ovld amd_unpack2(uint a);
-float2 __ovld amd_unpack2(uint2 a);
-float3 __ovld amd_unpack2(uint3 a);
-float4 __ovld amd_unpack2(uint4 a);
-float8 __ovld amd_unpack2(uint8 a);
-float16 __ovld amd_unpack2(uint16 a);
-
-float __ovld amd_unpack3(uint a);
-float2 __ovld amd_unpack3(uint2 a);
-float3 __ovld amd_unpack3(uint3 a);
-float4 __ovld amd_unpack3(uint4 a);
-float8 __ovld amd_unpack3(uint8 a);
-float16 __ovld amd_unpack3(uint16 a);
+uint __ovld amd_sad4(uint4, uint4, uint);
+
+uint __ovld amd_sadhi(uint, uint, uint);
+uint2 __ovld amd_sadhi(uint2, uint2, uint2);
+uint3 __ovld amd_sadhi(uint3, uint3, uint3);
+uint4 __ovld amd_sadhi(uint4, uint4, uint4);
+uint8 __ovld amd_sadhi(uint8, uint8, uint8);
+uint16 __ovld amd_sadhi(uint16, uint16, uint16);
+
+uint __ovld amd_sad(uint, uint, uint);
+uint2 __ovld amd_sad(uint2, uint2, uint2);
+uint3 __ovld amd_sad(uint3, uint3, uint3);
+uint4 __ovld amd_sad(uint4, uint4, uint4);
+uint8 __ovld amd_sad(uint8, uint8, uint8);
+uint16 __ovld amd_sad(uint16, uint16, uint16);
+
+float __ovld amd_unpack0(uint);
+float2 __ovld amd_unpack0(uint2);
+float3 __ovld amd_unpack0(uint3);
+float4 __ovld amd_unpack0(uint4);
+float8 __ovld amd_unpack0(uint8);
+float16 __ovld amd_unpack0(uint16);
+
+float __ovld amd_unpack1(uint);
+float2 __ovld amd_unpack1(uint2);
+float3 __ovld amd_unpack1(uint3);
+float4 __ovld amd_unpack1(uint4);
+float8 __ovld amd_unpack1(uint8);
+float16 __ovld amd_unpack1(uint16);
+
+float __ovld amd_unpack2(uint);
+float2 __ovld amd_unpack2(uint2);
+float3 __ovld amd_unpack2(uint3);
+float4 __ovld amd_unpack2(uint4);
+float8 __ovld amd_unpack2(uint8);
+float16 __ovld amd_unpack2(uint16);
+
+float __ovld amd_unpack3(uint);
+float2 __ovld amd_unpack3(uint2);
+float3 __ovld amd_unpack3(uint3);
+float4 __ovld amd_unpack3(uint4);
+float8 __ovld amd_unpack3(uint8);
+float16 __ovld amd_unpack3(uint16);
#endif // cl_amd_media_ops
#ifdef cl_amd_media_ops2
@@ -17285,28 +18327,30 @@ uint16 __ovld amd_sadw(uint16 src0, uint16 src1, uint16 src2);
#endif // cl_amd_media_ops2
#if defined(cl_arm_integer_dot_product_int8)
-uint __ovld arm_dot(uchar4 a, uchar4 b);
-int __ovld arm_dot(char4 a, char4 b);
+uint __ovld arm_dot(uchar4, uchar4);
+int __ovld arm_dot(char4, char4);
#endif // defined(cl_arm_integer_dot_product_int8)
#if defined(cl_arm_integer_dot_product_accumulate_int8)
-uint __ovld arm_dot_acc(uchar4 a, uchar4 b, uint c);
-int __ovld arm_dot_acc(char4 a, char4 b, int c);
+uint __ovld arm_dot_acc(uchar4, uchar4, uint);
+int __ovld arm_dot_acc(char4, char4, int);
#endif // defined(cl_arm_integer_dot_product_accumulate_int8)
#if defined(cl_arm_integer_dot_product_accumulate_int16)
-uint __ovld arm_dot_acc(ushort2 a, ushort2 b, uint c);
-int __ovld arm_dot_acc(short2 a, short2 b, int c);
+uint __ovld arm_dot_acc(ushort2, ushort2, uint);
+int __ovld arm_dot_acc(short2, short2, int);
#endif // defined(cl_arm_integer_dot_product_accumulate_int16)
#if defined(cl_arm_integer_dot_product_accumulate_saturate_int8)
-uint __ovld arm_dot_acc_sat(uchar4 a, uchar4 b, uint c);
-int __ovld arm_dot_acc_sat(char4 a, char4 b, int c);
+uint __ovld arm_dot_acc_sat(uchar4, uchar4, uint);
+int __ovld arm_dot_acc_sat(char4, char4, int);
#endif // defined(cl_arm_integer_dot_product_accumulate_saturate_int8)
// Disable any extensions we may have enabled previously.
#pragma OPENCL EXTENSION all : disable
+#undef __opencl_c_named_address_space_builtins
+
#undef __cnfn
#undef __ovld
#endif //_OPENCL_H_
diff --git a/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/__clang_openmp_device_functions.h b/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/__clang_openmp_device_functions.h
index 279fb26fbaf7..d5b6846b0348 100644
--- a/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/__clang_openmp_device_functions.h
+++ b/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/__clang_openmp_device_functions.h
@@ -40,7 +40,6 @@ extern "C" {
// Import types which will be used by __clang_hip_libdevice_declares.h
#ifndef __cplusplus
-#include <stdbool.h>
#include <stdint.h>
#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/cmath b/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/cmath
index 22a720aca956..e1b71516e72c 100644
--- a/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/cmath
+++ b/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/cmath
@@ -1,4 +1,4 @@
-/*===-- __clang_openmp_device_functions.h - OpenMP math declares ------ c++ -===
+/*===-- __clang_openmp_device_functions.h - OpenMP math declares -*- c++ -*-===
*
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
* See https://llvm.org/LICENSE.txt for license information.
diff --git a/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/complex b/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/complex
index eb1ead207d58..1ceecc1af8ae 100644
--- a/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/complex
+++ b/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/complex
@@ -17,9 +17,18 @@
// We require std::math functions in the complex builtins below.
#include <cmath>
+#ifdef __NVPTX__
#define __OPENMP_NVPTX__
#include <__clang_cuda_complex_builtins.h>
#undef __OPENMP_NVPTX__
+#endif // __NVPTX__
+
+#ifdef __AMDGCN__
+#define __OPENMP_AMDGCN__
+#include <__clang_cuda_complex_builtins.h>
+#undef __OPENMP_AMDGCN__
+#endif // __AMDGCN__
+
#endif
// Grab the host header too.
@@ -43,4 +52,4 @@
#pragma omp end declare variant
-#endif
+#endif // _LIBCPP_STD_VER
diff --git a/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/complex.h b/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/complex.h
index 15dc415b8126..7e7c0866426b 100644
--- a/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/complex.h
+++ b/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/complex.h
@@ -17,10 +17,19 @@
// We require math functions in the complex builtins below.
#include <math.h>
+#ifdef __NVPTX__
#define __OPENMP_NVPTX__
#include <__clang_cuda_complex_builtins.h>
#undef __OPENMP_NVPTX__
#endif
+#ifdef __AMDGCN__
+#define __OPENMP_AMDGCN__
+#include <__clang_cuda_complex_builtins.h>
+#undef __OPENMP_AMDGCN__
+#endif
+
+#endif
+
// Grab the host header too.
#include_next <complex.h>
diff --git a/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/new b/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/new
index 985ddc567f49..8bad3f19d625 100644
--- a/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/new
+++ b/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/new
@@ -13,7 +13,7 @@
// which do not use nothrow_t are provided without the <new> header.
#include_next <new>
-#if defined(__NVPTX__) && defined(_OPENMP)
+#if (defined(__NVPTX__) || defined(__AMDGPU__)) && defined(_OPENMP)
#include <cstdlib>
diff --git a/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/stdlib.h b/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/stdlib.h
new file mode 100644
index 000000000000..d607469e04f7
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/openmp_wrappers/stdlib.h
@@ -0,0 +1,29 @@
+/*===---- openmp_wrapper/stdlib.h ------ OpenMP math.h intercept ----- c++ -===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CLANG_OPENMP_STDLIB_H__
+#define __CLANG_OPENMP_STDLIB_H__
+
+#ifndef _OPENMP
+#error "This file is for OpenMP compilation only."
+#endif
+
+#include_next <stdlib.h>
+
+#ifdef __AMDGCN__
+#pragma omp begin declare variant match(device = {arch(amdgcn)})
+
+#define __OPENMP_AMDGCN__
+#include <__clang_hip_stdlib.h>
+#undef __OPENMP_AMDGCN__
+
+#pragma omp end declare variant
+#endif
+
+#endif // __CLANG_OPENMP_STDLIB_H__
diff --git a/contrib/llvm-project/clang/lib/Headers/pmmintrin.h b/contrib/llvm-project/clang/lib/Headers/pmmintrin.h
index a83b2eb6d8e2..91cee1edda30 100644
--- a/contrib/llvm-project/clang/lib/Headers/pmmintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/pmmintrin.h
@@ -10,11 +10,16 @@
#ifndef __PMMINTRIN_H
#define __PMMINTRIN_H
+#if !defined(__i386__) && !defined(__x86_64__)
+#error "This header is only meant to be used on x86 and x64 architecture"
+#endif
+
#include <emmintrin.h>
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS \
- __attribute__((__always_inline__, __nodebug__, __target__("sse3"), __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("sse3,no-evex512"), __min_vector_width__(128)))
/// Loads data from an unaligned memory location to elements in a 128-bit
/// vector.
@@ -31,7 +36,7 @@
/// A pointer to a 128-bit integer vector containing integer values.
/// \returns A 128-bit vector containing the moved values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_lddqu_si128(__m128i const *__p)
+_mm_lddqu_si128(__m128i_u const *__p)
{
return (__m128i)__builtin_ia32_lddqu((char const *)__p);
}
@@ -249,9 +254,12 @@ _mm_movedup_pd(__m128d __a)
/// the processor in the monitor event pending state. Data stored in the
/// monitored address range causes the processor to exit the pending state.
///
+/// The \c MONITOR instruction can be used in kernel mode, and in other modes
+/// if MSR <c> C001_0015h[MonMwaitUserEn] </c> is set.
+///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> MONITOR </c> instruction.
+/// This intrinsic corresponds to the \c MONITOR instruction.
///
/// \param __p
/// The memory range to be monitored. The size of the range is determined by
@@ -266,19 +274,22 @@ _mm_monitor(void const *__p, unsigned __extensions, unsigned __hints)
__builtin_ia32_monitor(__p, __extensions, __hints);
}
-/// Used with the MONITOR instruction to wait while the processor is in
+/// Used with the \c MONITOR instruction to wait while the processor is in
/// the monitor event pending state. Data stored in the monitored address
-/// range causes the processor to exit the pending state.
+/// range, or an interrupt, causes the processor to exit the pending state.
+///
+/// The \c MWAIT instruction can be used in kernel mode, and in other modes if
+/// MSR <c> C001_0015h[MonMwaitUserEn] </c> is set.
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> MWAIT </c> instruction.
+/// This intrinsic corresponds to the \c MWAIT instruction.
///
/// \param __extensions
-/// Optional extensions for the monitoring state, which may vary by
+/// Optional extensions for the monitoring state, which can vary by
/// processor.
/// \param __hints
-/// Optional hints for the monitoring state, which may vary by processor.
+/// Optional hints for the monitoring state, which can vary by processor.
static __inline__ void __DEFAULT_FN_ATTRS
_mm_mwait(unsigned __extensions, unsigned __hints)
{
diff --git a/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/bmi2intrin.h b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/bmi2intrin.h
new file mode 100644
index 000000000000..0dc0d14ad480
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/bmi2intrin.h
@@ -0,0 +1,134 @@
+/*===---- bmiintrin.h - Implementation of BMI2 intrinsics on PowerPC -------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !defined X86GPRINTRIN_H_
+#error "Never use <bmi2intrin.h> directly; include <x86gprintrin.h> instead."
+#endif
+
+#ifndef BMI2INTRIN_H_
+#define BMI2INTRIN_H_
+
+extern __inline unsigned int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _bzhi_u32(unsigned int __X, unsigned int __Y) {
+ return ((__X << (32 - __Y)) >> (32 - __Y));
+}
+
+extern __inline unsigned int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mulx_u32(unsigned int __X, unsigned int __Y, unsigned int *__P) {
+ unsigned long long __res = (unsigned long long)__X * __Y;
+ *__P = (unsigned int)(__res >> 32);
+ return (unsigned int)__res;
+}
+
+#ifdef __PPC64__
+extern __inline unsigned long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _bzhi_u64(unsigned long long __X, unsigned long long __Y) {
+ return ((__X << (64 - __Y)) >> (64 - __Y));
+}
+
+/* __int128 requires base 64-bit. */
+extern __inline unsigned long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mulx_u64(unsigned long long __X, unsigned long long __Y,
+ unsigned long long *__P) {
+ unsigned __int128 __res = (unsigned __int128)__X * __Y;
+ *__P = (unsigned long long)(__res >> 64);
+ return (unsigned long long)__res;
+}
+
+#ifdef _ARCH_PWR7
+/* popcount and bpermd require power7 minimum. */
+extern __inline unsigned long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _pdep_u64(unsigned long long __X, unsigned long long __M) {
+ unsigned long __result = 0x0UL;
+ const unsigned long __mask = 0x8000000000000000UL;
+ unsigned long __m = __M;
+ unsigned long __c, __t;
+ unsigned long __p;
+
+ /* The pop-count of the mask gives the number of the bits from
+ source to process. This is also needed to shift bits from the
+ source into the correct position for the result. */
+ __p = 64 - __builtin_popcountl(__M);
+
+ /* The loop is for the number of '1' bits in the mask and clearing
+ each mask bit as it is processed. */
+ while (__m != 0) {
+ __c = __builtin_clzl(__m);
+ __t = __X << (__p - __c);
+ __m ^= (__mask >> __c);
+ __result |= (__t & (__mask >> __c));
+ __p++;
+ }
+ return __result;
+}
+
+extern __inline unsigned long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _pext_u64(unsigned long long __X, unsigned long long __M) {
+ unsigned long __p = 0x4040404040404040UL; // initial bit permute control
+ const unsigned long __mask = 0x8000000000000000UL;
+ unsigned long __m = __M;
+ unsigned long __c;
+ unsigned long __result;
+
+ /* if the mask is constant and selects 8 bits or less we can use
+ the Power8 Bit permute instruction. */
+ if (__builtin_constant_p(__M) && (__builtin_popcountl(__M) <= 8)) {
+ /* Also if the pext mask is constant, then the popcount is
+ constant, we can evaluate the following loop at compile
+ time and use a constant bit permute vector. */
+ long __i;
+ for (__i = 0; __i < __builtin_popcountl(__M); __i++) {
+ __c = __builtin_clzl(__m);
+ __p = (__p << 8) | __c;
+ __m ^= (__mask >> __c);
+ }
+ __result = __builtin_bpermd(__p, __X);
+ } else {
+ __p = 64 - __builtin_popcountl(__M);
+ __result = 0;
+ /* We could a use a for loop here, but that combined with
+ -funroll-loops can expand to a lot of code. The while
+ loop avoids unrolling and the compiler commons the xor
+ from clearing the mask bit with the (m != 0) test. The
+ result is a more compact loop setup and body. */
+ while (__m != 0) {
+ unsigned long __t;
+ __c = __builtin_clzl(__m);
+ __t = (__X & (__mask >> __c)) >> (__p - __c);
+ __m ^= (__mask >> __c);
+ __result |= (__t);
+ __p++;
+ }
+ }
+ return __result;
+}
+
+/* these 32-bit implementations depend on 64-bit pdep/pext
+ which depend on _ARCH_PWR7. */
+extern __inline unsigned int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _pdep_u32(unsigned int __X, unsigned int __Y) {
+ return _pdep_u64(__X, __Y);
+}
+
+extern __inline unsigned int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _pext_u32(unsigned int __X, unsigned int __Y) {
+ return _pext_u64(__X, __Y);
+}
+#endif /* _ARCH_PWR7 */
+#endif /* __PPC64__ */
+
+#endif /* BMI2INTRIN_H_ */
diff --git a/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/bmiintrin.h b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/bmiintrin.h
new file mode 100644
index 000000000000..7d3315958c7b
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/bmiintrin.h
@@ -0,0 +1,165 @@
+/*===---- bmiintrin.h - Implementation of BMI intrinsics on PowerPC --------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !defined X86GPRINTRIN_H_
+#error "Never use <bmiintrin.h> directly; include <x86gprintrin.h> instead."
+#endif
+
+#ifndef BMIINTRIN_H_
+#define BMIINTRIN_H_
+
+extern __inline unsigned short
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __tzcnt_u16(unsigned short __X) {
+ return __builtin_ctz(__X);
+}
+
+extern __inline unsigned int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __andn_u32(unsigned int __X, unsigned int __Y) {
+ return (~__X & __Y);
+}
+
+extern __inline unsigned int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _bextr_u32(unsigned int __X, unsigned int __P, unsigned int __L) {
+ return ((__X << (32 - (__L + __P))) >> (32 - __L));
+}
+
+extern __inline unsigned int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __bextr_u32(unsigned int __X, unsigned int __Y) {
+ unsigned int __P, __L;
+ __P = __Y & 0xFF;
+ __L = (__Y >> 8) & 0xFF;
+ return (_bextr_u32(__X, __P, __L));
+}
+
+extern __inline unsigned int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __blsi_u32(unsigned int __X) {
+ return (__X & -__X);
+}
+
+extern __inline unsigned int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _blsi_u32(unsigned int __X) {
+ return __blsi_u32(__X);
+}
+
+extern __inline unsigned int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __blsmsk_u32(unsigned int __X) {
+ return (__X ^ (__X - 1));
+}
+
+extern __inline unsigned int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _blsmsk_u32(unsigned int __X) {
+ return __blsmsk_u32(__X);
+}
+
+extern __inline unsigned int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __blsr_u32(unsigned int __X) {
+ return (__X & (__X - 1));
+}
+
+extern __inline unsigned int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _blsr_u32(unsigned int __X) {
+ return __blsr_u32(__X);
+}
+
+extern __inline unsigned int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __tzcnt_u32(unsigned int __X) {
+ return __builtin_ctz(__X);
+}
+
+extern __inline unsigned int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _tzcnt_u32(unsigned int __X) {
+ return __builtin_ctz(__X);
+}
+
+/* use the 64-bit shift, rotate, and count leading zeros instructions
+ for long long. */
+#ifdef __PPC64__
+extern __inline unsigned long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __andn_u64(unsigned long long __X, unsigned long long __Y) {
+ return (~__X & __Y);
+}
+
+extern __inline unsigned long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _bextr_u64(unsigned long long __X, unsigned int __P, unsigned int __L) {
+ return ((__X << (64 - (__L + __P))) >> (64 - __L));
+}
+
+extern __inline unsigned long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __bextr_u64(unsigned long long __X, unsigned long long __Y) {
+ unsigned int __P, __L;
+ __P = __Y & 0xFF;
+ __L = (__Y & 0xFF00) >> 8;
+ return (_bextr_u64(__X, __P, __L));
+}
+
+extern __inline unsigned long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __blsi_u64(unsigned long long __X) {
+ return __X & -__X;
+}
+
+extern __inline unsigned long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _blsi_u64(unsigned long long __X) {
+ return __blsi_u64(__X);
+}
+
+extern __inline unsigned long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __blsmsk_u64(unsigned long long __X) {
+ return (__X ^ (__X - 1));
+}
+
+extern __inline unsigned long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _blsmsk_u64(unsigned long long __X) {
+ return __blsmsk_u64(__X);
+}
+
+extern __inline unsigned long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __blsr_u64(unsigned long long __X) {
+ return (__X & (__X - 1));
+}
+
+extern __inline unsigned long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _blsr_u64(unsigned long long __X) {
+ return __blsr_u64(__X);
+}
+
+extern __inline unsigned long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __tzcnt_u64(unsigned long long __X) {
+ return __builtin_ctzll(__X);
+}
+
+extern __inline unsigned long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _tzcnt_u64(unsigned long long __X) {
+ return __builtin_ctzll(__X);
+}
+#endif /* __PPC64__ */
+
+#endif /* BMIINTRIN_H_ */
diff --git a/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/emmintrin.h b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/emmintrin.h
index 4dcb8485e2e9..fc18ab9d43b1 100644
--- a/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/emmintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/emmintrin.h
@@ -29,13 +29,15 @@
efficiently as C language float scalar operations or optimized to
use vector SIMD operations. We recommend this for new applications.
*/
-#error "Please read comment above. Use -DNO_WARN_X86_INTRINSICS to disable this error."
+#error \
+ "Please read comment above. Use -DNO_WARN_X86_INTRINSICS to disable this error."
#endif
#ifndef EMMINTRIN_H_
#define EMMINTRIN_H_
-#if defined(__linux__) && defined(__ppc64__)
+#if defined(__powerpc64__) && \
+ (defined(__linux__) || defined(__FreeBSD__) || defined(_AIX))
#include <altivec.h>
@@ -44,6 +46,7 @@
/* SSE2 */
typedef __vector double __v2df;
+typedef __vector float __v4f;
typedef __vector long long __v2di;
typedef __vector unsigned long long __v2du;
typedef __vector int __v4si;
@@ -55,523 +58,515 @@ typedef __vector unsigned char __v16qu;
/* The Intel API is flexible enough that we must allow aliasing with other
vector types, and their scalar components. */
-typedef long long __m128i __attribute__ ((__vector_size__ (16), __may_alias__));
-typedef double __m128d __attribute__ ((__vector_size__ (16), __may_alias__));
+typedef long long __m128i __attribute__((__vector_size__(16), __may_alias__));
+typedef double __m128d __attribute__((__vector_size__(16), __may_alias__));
/* Unaligned version of the same types. */
-typedef long long __m128i_u __attribute__ ((__vector_size__ (16), __may_alias__, __aligned__ (1)));
-typedef double __m128d_u __attribute__ ((__vector_size__ (16), __may_alias__, __aligned__ (1)));
+typedef long long __m128i_u
+ __attribute__((__vector_size__(16), __may_alias__, __aligned__(1)));
+typedef double __m128d_u
+ __attribute__((__vector_size__(16), __may_alias__, __aligned__(1)));
/* Define two value permute mask. */
-#define _MM_SHUFFLE2(x,y) (((x) << 1) | (y))
+#define _MM_SHUFFLE2(x, y) (((x) << 1) | (y))
/* Create a vector with element 0 as F and the rest zero. */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set_sd (double __F)
-{
- return __extension__ (__m128d){ __F, 0.0 };
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_set_sd(double __F) {
+ return __extension__(__m128d){__F, 0.0};
}
/* Create a vector with both elements equal to F. */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set1_pd (double __F)
-{
- return __extension__ (__m128d){ __F, __F };
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_set1_pd(double __F) {
+ return __extension__(__m128d){__F, __F};
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set_pd1 (double __F)
-{
- return _mm_set1_pd (__F);
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_set_pd1(double __F) {
+ return _mm_set1_pd(__F);
}
/* Create a vector with the lower value X and upper value W. */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set_pd (double __W, double __X)
-{
- return __extension__ (__m128d){ __X, __W };
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_set_pd(double __W, double __X) {
+ return __extension__(__m128d){__X, __W};
}
/* Create a vector with the lower value W and upper value X. */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_setr_pd (double __W, double __X)
-{
- return __extension__ (__m128d){ __W, __X };
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_setr_pd(double __W, double __X) {
+ return __extension__(__m128d){__W, __X};
}
/* Create an undefined vector. */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_undefined_pd (void)
-{
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_undefined_pd(void) {
__m128d __Y = __Y;
return __Y;
}
/* Create a vector of zeros. */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_setzero_pd (void)
-{
- return (__m128d) vec_splats (0);
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_setzero_pd(void) {
+ return (__m128d)vec_splats(0);
}
/* Sets the low DPFP value of A from the low value of B. */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_move_sd (__m128d __A, __m128d __B)
-{
- __v2df result = (__v2df) __A;
- result [0] = ((__v2df) __B)[0];
- return (__m128d) result;
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_move_sd(__m128d __A, __m128d __B) {
+ __v2df __result = (__v2df)__A;
+ __result[0] = ((__v2df)__B)[0];
+ return (__m128d)__result;
}
/* Load two DPFP values from P. The address must be 16-byte aligned. */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_load_pd (double const *__P)
-{
- return ((__m128d)vec_ld(0, (__v16qu*)__P));
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_load_pd(double const *__P) {
+ return ((__m128d)vec_ld(0, (__v16qu *)__P));
}
/* Load two DPFP values from P. The address need not be 16-byte aligned. */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_loadu_pd (double const *__P)
-{
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_loadu_pd(double const *__P) {
return (vec_vsx_ld(0, __P));
}
/* Create a vector with all two elements equal to *P. */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_load1_pd (double const *__P)
-{
- return (vec_splats (*__P));
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_load1_pd(double const *__P) {
+ return (vec_splats(*__P));
}
/* Create a vector with element 0 as *P and the rest zero. */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_load_sd (double const *__P)
-{
- return _mm_set_sd (*__P);
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_load_sd(double const *__P) {
+ return _mm_set_sd(*__P);
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_load_pd1 (double const *__P)
-{
- return _mm_load1_pd (__P);
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_load_pd1(double const *__P) {
+ return _mm_load1_pd(__P);
}
/* Load two DPFP values in reverse order. The address must be aligned. */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_loadr_pd (double const *__P)
-{
- __v2df __tmp = _mm_load_pd (__P);
- return (__m128d)vec_xxpermdi (__tmp, __tmp, 2);
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_loadr_pd(double const *__P) {
+ __v2df __tmp = _mm_load_pd(__P);
+ return (__m128d)vec_xxpermdi(__tmp, __tmp, 2);
}
/* Store two DPFP values. The address must be 16-byte aligned. */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_store_pd (double *__P, __m128d __A)
-{
- vec_st((__v16qu)__A, 0, (__v16qu*)__P);
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_store_pd(double *__P, __m128d __A) {
+ vec_st((__v16qu)__A, 0, (__v16qu *)__P);
}
/* Store two DPFP values. The address need not be 16-byte aligned. */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_storeu_pd (double *__P, __m128d __A)
-{
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_storeu_pd(double *__P, __m128d __A) {
*(__m128d_u *)__P = __A;
}
/* Stores the lower DPFP value. */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_store_sd (double *__P, __m128d __A)
-{
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_store_sd(double *__P, __m128d __A) {
*__P = ((__v2df)__A)[0];
}
-extern __inline double __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsd_f64 (__m128d __A)
-{
+extern __inline double
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtsd_f64(__m128d __A) {
return ((__v2df)__A)[0];
}
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_storel_pd (double *__P, __m128d __A)
-{
- _mm_store_sd (__P, __A);
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_storel_pd(double *__P, __m128d __A) {
+ _mm_store_sd(__P, __A);
}
/* Stores the upper DPFP value. */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_storeh_pd (double *__P, __m128d __A)
-{
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_storeh_pd(double *__P, __m128d __A) {
*__P = ((__v2df)__A)[1];
}
/* Store the lower DPFP value across two words.
The address must be 16-byte aligned. */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_store1_pd (double *__P, __m128d __A)
-{
- _mm_store_pd (__P, vec_splat (__A, 0));
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_store1_pd(double *__P, __m128d __A) {
+ _mm_store_pd(__P, vec_splat(__A, 0));
}
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_store_pd1 (double *__P, __m128d __A)
-{
- _mm_store1_pd (__P, __A);
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_store_pd1(double *__P, __m128d __A) {
+ _mm_store1_pd(__P, __A);
}
/* Store two DPFP values in reverse order. The address must be aligned. */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_storer_pd (double *__P, __m128d __A)
-{
- _mm_store_pd (__P, vec_xxpermdi (__A, __A, 2));
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_storer_pd(double *__P, __m128d __A) {
+ _mm_store_pd(__P, vec_xxpermdi(__A, __A, 2));
}
/* Intel intrinsic. */
-extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsi128_si64 (__m128i __A)
-{
+extern __inline long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtsi128_si64(__m128i __A) {
return ((__v2di)__A)[0];
}
/* Microsoft intrinsic. */
-extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsi128_si64x (__m128i __A)
-{
+extern __inline long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtsi128_si64x(__m128i __A) {
return ((__v2di)__A)[0];
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_add_pd (__m128d __A, __m128d __B)
-{
- return (__m128d) ((__v2df)__A + (__v2df)__B);
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_add_pd(__m128d __A, __m128d __B) {
+ return (__m128d)((__v2df)__A + (__v2df)__B);
}
/* Add the lower double-precision (64-bit) floating-point element in
a and b, store the result in the lower element of dst, and copy
the upper element from a to the upper element of dst. */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_add_sd (__m128d __A, __m128d __B)
-{
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_add_sd(__m128d __A, __m128d __B) {
__A[0] = __A[0] + __B[0];
return (__A);
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sub_pd (__m128d __A, __m128d __B)
-{
- return (__m128d) ((__v2df)__A - (__v2df)__B);
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sub_pd(__m128d __A, __m128d __B) {
+ return (__m128d)((__v2df)__A - (__v2df)__B);
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sub_sd (__m128d __A, __m128d __B)
-{
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sub_sd(__m128d __A, __m128d __B) {
__A[0] = __A[0] - __B[0];
return (__A);
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mul_pd (__m128d __A, __m128d __B)
-{
- return (__m128d) ((__v2df)__A * (__v2df)__B);
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_mul_pd(__m128d __A, __m128d __B) {
+ return (__m128d)((__v2df)__A * (__v2df)__B);
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mul_sd (__m128d __A, __m128d __B)
-{
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_mul_sd(__m128d __A, __m128d __B) {
__A[0] = __A[0] * __B[0];
return (__A);
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_div_pd (__m128d __A, __m128d __B)
-{
- return (__m128d) ((__v2df)__A / (__v2df)__B);
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_div_pd(__m128d __A, __m128d __B) {
+ return (__m128d)((__v2df)__A / (__v2df)__B);
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_div_sd (__m128d __A, __m128d __B)
-{
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_div_sd(__m128d __A, __m128d __B) {
__A[0] = __A[0] / __B[0];
return (__A);
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sqrt_pd (__m128d __A)
-{
- return (vec_sqrt (__A));
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sqrt_pd(__m128d __A) {
+ return (vec_sqrt(__A));
}
/* Return pair {sqrt (B[0]), A[1]}. */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sqrt_sd (__m128d __A, __m128d __B)
-{
- __v2df c;
- c = vec_sqrt ((__v2df) _mm_set1_pd (__B[0]));
- return (__m128d) _mm_setr_pd (c[0], __A[1]);
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sqrt_sd(__m128d __A, __m128d __B) {
+ __v2df __c;
+ __c = vec_sqrt((__v2df)_mm_set1_pd(__B[0]));
+ return (__m128d)_mm_setr_pd(__c[0], __A[1]);
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_min_pd (__m128d __A, __m128d __B)
-{
- return (vec_min (__A, __B));
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_min_pd(__m128d __A, __m128d __B) {
+ return (vec_min(__A, __B));
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_min_sd (__m128d __A, __m128d __B)
-{
- __v2df a, b, c;
- a = vec_splats (__A[0]);
- b = vec_splats (__B[0]);
- c = vec_min (a, b);
- return (__m128d) _mm_setr_pd (c[0], __A[1]);
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_min_sd(__m128d __A, __m128d __B) {
+ __v2df __a, __b, __c;
+ __a = vec_splats(__A[0]);
+ __b = vec_splats(__B[0]);
+ __c = vec_min(__a, __b);
+ return (__m128d)_mm_setr_pd(__c[0], __A[1]);
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_max_pd (__m128d __A, __m128d __B)
-{
- return (vec_max (__A, __B));
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_max_pd(__m128d __A, __m128d __B) {
+ return (vec_max(__A, __B));
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_max_sd (__m128d __A, __m128d __B)
-{
- __v2df a, b, c;
- a = vec_splats (__A[0]);
- b = vec_splats (__B[0]);
- c = vec_max (a, b);
- return (__m128d) _mm_setr_pd (c[0], __A[1]);
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_max_sd(__m128d __A, __m128d __B) {
+ __v2df __a, __b, __c;
+ __a = vec_splats(__A[0]);
+ __b = vec_splats(__B[0]);
+ __c = vec_max(__a, __b);
+ return (__m128d)_mm_setr_pd(__c[0], __A[1]);
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpeq_pd (__m128d __A, __m128d __B)
-{
- return ((__m128d)vec_cmpeq ((__v2df) __A, (__v2df) __B));
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpeq_pd(__m128d __A, __m128d __B) {
+ return ((__m128d)vec_cmpeq((__v2df)__A, (__v2df)__B));
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmplt_pd (__m128d __A, __m128d __B)
-{
- return ((__m128d)vec_cmplt ((__v2df) __A, (__v2df) __B));
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmplt_pd(__m128d __A, __m128d __B) {
+ return ((__m128d)vec_cmplt((__v2df)__A, (__v2df)__B));
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmple_pd (__m128d __A, __m128d __B)
-{
- return ((__m128d)vec_cmple ((__v2df) __A, (__v2df) __B));
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmple_pd(__m128d __A, __m128d __B) {
+ return ((__m128d)vec_cmple((__v2df)__A, (__v2df)__B));
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpgt_pd (__m128d __A, __m128d __B)
-{
- return ((__m128d)vec_cmpgt ((__v2df) __A, (__v2df) __B));
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpgt_pd(__m128d __A, __m128d __B) {
+ return ((__m128d)vec_cmpgt((__v2df)__A, (__v2df)__B));
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpge_pd (__m128d __A, __m128d __B)
-{
- return ((__m128d)vec_cmpge ((__v2df) __A,(__v2df) __B));
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpge_pd(__m128d __A, __m128d __B) {
+ return ((__m128d)vec_cmpge((__v2df)__A, (__v2df)__B));
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpneq_pd (__m128d __A, __m128d __B)
-{
- __v2df temp = (__v2df) vec_cmpeq ((__v2df) __A, (__v2df)__B);
- return ((__m128d)vec_nor (temp, temp));
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpneq_pd(__m128d __A, __m128d __B) {
+ __v2df __temp = (__v2df)vec_cmpeq((__v2df)__A, (__v2df)__B);
+ return ((__m128d)vec_nor(__temp, __temp));
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpnlt_pd (__m128d __A, __m128d __B)
-{
- return ((__m128d)vec_cmpge ((__v2df) __A, (__v2df) __B));
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpnlt_pd(__m128d __A, __m128d __B) {
+ return ((__m128d)vec_cmpge((__v2df)__A, (__v2df)__B));
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpnle_pd (__m128d __A, __m128d __B)
-{
- return ((__m128d)vec_cmpgt ((__v2df) __A, (__v2df) __B));
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpnle_pd(__m128d __A, __m128d __B) {
+ return ((__m128d)vec_cmpgt((__v2df)__A, (__v2df)__B));
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpngt_pd (__m128d __A, __m128d __B)
-{
- return ((__m128d)vec_cmple ((__v2df) __A, (__v2df) __B));
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpngt_pd(__m128d __A, __m128d __B) {
+ return ((__m128d)vec_cmple((__v2df)__A, (__v2df)__B));
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpnge_pd (__m128d __A, __m128d __B)
-{
- return ((__m128d)vec_cmplt ((__v2df) __A, (__v2df) __B));
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpnge_pd(__m128d __A, __m128d __B) {
+ return ((__m128d)vec_cmplt((__v2df)__A, (__v2df)__B));
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpord_pd (__m128d __A, __m128d __B)
-{
-#if _ARCH_PWR8
- __v2du c, d;
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpord_pd(__m128d __A, __m128d __B) {
+ __v2du __c, __d;
/* Compare against self will return false (0's) if NAN. */
- c = (__v2du)vec_cmpeq (__A, __A);
- d = (__v2du)vec_cmpeq (__B, __B);
-#else
- __v2du a, b;
- __v2du c, d;
- const __v2du double_exp_mask = {0x7ff0000000000000, 0x7ff0000000000000};
- a = (__v2du)vec_abs ((__v2df)__A);
- b = (__v2du)vec_abs ((__v2df)__B);
- c = (__v2du)vec_cmpgt (double_exp_mask, a);
- d = (__v2du)vec_cmpgt (double_exp_mask, b);
-#endif
+ __c = (__v2du)vec_cmpeq(__A, __A);
+ __d = (__v2du)vec_cmpeq(__B, __B);
/* A != NAN and B != NAN. */
- return ((__m128d)vec_and(c, d));
+ return ((__m128d)vec_and(__c, __d));
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpunord_pd (__m128d __A, __m128d __B)
-{
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpunord_pd(__m128d __A, __m128d __B) {
#if _ARCH_PWR8
- __v2du c, d;
+ __v2du __c, __d;
/* Compare against self will return false (0's) if NAN. */
- c = (__v2du)vec_cmpeq ((__v2df)__A, (__v2df)__A);
- d = (__v2du)vec_cmpeq ((__v2df)__B, (__v2df)__B);
+ __c = (__v2du)vec_cmpeq((__v2df)__A, (__v2df)__A);
+ __d = (__v2du)vec_cmpeq((__v2df)__B, (__v2df)__B);
/* A == NAN OR B == NAN converts too:
NOT(A != NAN) OR NOT(B != NAN). */
- c = vec_nor (c, c);
- return ((__m128d)vec_orc(c, d));
+ __c = vec_nor(__c, __c);
+ return ((__m128d)vec_orc(__c, __d));
#else
- __v2du c, d;
+ __v2du __c, __d;
/* Compare against self will return false (0's) if NAN. */
- c = (__v2du)vec_cmpeq ((__v2df)__A, (__v2df)__A);
- d = (__v2du)vec_cmpeq ((__v2df)__B, (__v2df)__B);
+ __c = (__v2du)vec_cmpeq((__v2df)__A, (__v2df)__A);
+ __d = (__v2du)vec_cmpeq((__v2df)__B, (__v2df)__B);
/* Convert the true ('1's) is NAN. */
- c = vec_nor (c, c);
- d = vec_nor (d, d);
- return ((__m128d)vec_or(c, d));
+ __c = vec_nor(__c, __c);
+ __d = vec_nor(__d, __d);
+ return ((__m128d)vec_or(__c, __d));
#endif
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpeq_sd(__m128d __A, __m128d __B)
-{
- __v2df a, b, c;
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpeq_sd(__m128d __A, __m128d __B) {
+ __v2df __a, __b, __c;
/* PowerISA VSX does not allow partial (for just lower double)
results. So to insure we don't generate spurious exceptions
(from the upper double values) we splat the lower double
before we do the operation. */
- a = vec_splats (__A[0]);
- b = vec_splats (__B[0]);
- c = (__v2df) vec_cmpeq(a, b);
+ __a = vec_splats(__A[0]);
+ __b = vec_splats(__B[0]);
+ __c = (__v2df)vec_cmpeq(__a, __b);
/* Then we merge the lower double result with the original upper
double from __A. */
- return (__m128d) _mm_setr_pd (c[0], __A[1]);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmplt_sd (__m128d __A, __m128d __B)
-{
- __v2df a, b, c;
- a = vec_splats (__A[0]);
- b = vec_splats (__B[0]);
- c = (__v2df) vec_cmplt(a, b);
- return (__m128d) _mm_setr_pd (c[0], __A[1]);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmple_sd (__m128d __A, __m128d __B)
-{
- __v2df a, b, c;
- a = vec_splats (__A[0]);
- b = vec_splats (__B[0]);
- c = (__v2df) vec_cmple(a, b);
- return (__m128d) _mm_setr_pd (c[0], __A[1]);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpgt_sd (__m128d __A, __m128d __B)
-{
- __v2df a, b, c;
- a = vec_splats (__A[0]);
- b = vec_splats (__B[0]);
- c = (__v2df) vec_cmpgt(a, b);
- return (__m128d) _mm_setr_pd (c[0], __A[1]);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpge_sd (__m128d __A, __m128d __B)
-{
- __v2df a, b, c;
- a = vec_splats (__A[0]);
- b = vec_splats (__B[0]);
- c = (__v2df) vec_cmpge(a, b);
- return (__m128d) _mm_setr_pd (c[0], __A[1]);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpneq_sd (__m128d __A, __m128d __B)
-{
- __v2df a, b, c;
- a = vec_splats (__A[0]);
- b = vec_splats (__B[0]);
- c = (__v2df) vec_cmpeq(a, b);
- c = vec_nor (c, c);
- return (__m128d) _mm_setr_pd (c[0], __A[1]);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpnlt_sd (__m128d __A, __m128d __B)
-{
- __v2df a, b, c;
- a = vec_splats (__A[0]);
- b = vec_splats (__B[0]);
+ return (__m128d)_mm_setr_pd(__c[0], __A[1]);
+}
+
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmplt_sd(__m128d __A, __m128d __B) {
+ __v2df __a, __b, __c;
+ __a = vec_splats(__A[0]);
+ __b = vec_splats(__B[0]);
+ __c = (__v2df)vec_cmplt(__a, __b);
+ return (__m128d)_mm_setr_pd(__c[0], __A[1]);
+}
+
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmple_sd(__m128d __A, __m128d __B) {
+ __v2df __a, __b, __c;
+ __a = vec_splats(__A[0]);
+ __b = vec_splats(__B[0]);
+ __c = (__v2df)vec_cmple(__a, __b);
+ return (__m128d)_mm_setr_pd(__c[0], __A[1]);
+}
+
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpgt_sd(__m128d __A, __m128d __B) {
+ __v2df __a, __b, __c;
+ __a = vec_splats(__A[0]);
+ __b = vec_splats(__B[0]);
+ __c = (__v2df)vec_cmpgt(__a, __b);
+ return (__m128d)_mm_setr_pd(__c[0], __A[1]);
+}
+
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpge_sd(__m128d __A, __m128d __B) {
+ __v2df __a, __b, __c;
+ __a = vec_splats(__A[0]);
+ __b = vec_splats(__B[0]);
+ __c = (__v2df)vec_cmpge(__a, __b);
+ return (__m128d)_mm_setr_pd(__c[0], __A[1]);
+}
+
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpneq_sd(__m128d __A, __m128d __B) {
+ __v2df __a, __b, __c;
+ __a = vec_splats(__A[0]);
+ __b = vec_splats(__B[0]);
+ __c = (__v2df)vec_cmpeq(__a, __b);
+ __c = vec_nor(__c, __c);
+ return (__m128d)_mm_setr_pd(__c[0], __A[1]);
+}
+
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpnlt_sd(__m128d __A, __m128d __B) {
+ __v2df __a, __b, __c;
+ __a = vec_splats(__A[0]);
+ __b = vec_splats(__B[0]);
/* Not less than is just greater than or equal. */
- c = (__v2df) vec_cmpge(a, b);
- return (__m128d) _mm_setr_pd (c[0], __A[1]);
+ __c = (__v2df)vec_cmpge(__a, __b);
+ return (__m128d)_mm_setr_pd(__c[0], __A[1]);
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpnle_sd (__m128d __A, __m128d __B)
-{
- __v2df a, b, c;
- a = vec_splats (__A[0]);
- b = vec_splats (__B[0]);
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpnle_sd(__m128d __A, __m128d __B) {
+ __v2df __a, __b, __c;
+ __a = vec_splats(__A[0]);
+ __b = vec_splats(__B[0]);
/* Not less than or equal is just greater than. */
- c = (__v2df) vec_cmpge(a, b);
- return (__m128d) _mm_setr_pd (c[0], __A[1]);
+ __c = (__v2df)vec_cmpge(__a, __b);
+ return (__m128d)_mm_setr_pd(__c[0], __A[1]);
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpngt_sd (__m128d __A, __m128d __B)
-{
- __v2df a, b, c;
- a = vec_splats (__A[0]);
- b = vec_splats (__B[0]);
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpngt_sd(__m128d __A, __m128d __B) {
+ __v2df __a, __b, __c;
+ __a = vec_splats(__A[0]);
+ __b = vec_splats(__B[0]);
/* Not greater than is just less than or equal. */
- c = (__v2df) vec_cmple(a, b);
- return (__m128d) _mm_setr_pd (c[0], __A[1]);
+ __c = (__v2df)vec_cmple(__a, __b);
+ return (__m128d)_mm_setr_pd(__c[0], __A[1]);
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpnge_sd (__m128d __A, __m128d __B)
-{
- __v2df a, b, c;
- a = vec_splats (__A[0]);
- b = vec_splats (__B[0]);
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpnge_sd(__m128d __A, __m128d __B) {
+ __v2df __a, __b, __c;
+ __a = vec_splats(__A[0]);
+ __b = vec_splats(__B[0]);
/* Not greater than or equal is just less than. */
- c = (__v2df) vec_cmplt(a, b);
- return (__m128d) _mm_setr_pd (c[0], __A[1]);
+ __c = (__v2df)vec_cmplt(__a, __b);
+ return (__m128d)_mm_setr_pd(__c[0], __A[1]);
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpord_sd (__m128d __A, __m128d __B)
-{
- __v2df r;
- r = (__v2df)_mm_cmpord_pd (vec_splats (__A[0]), vec_splats (__B[0]));
- return (__m128d) _mm_setr_pd (r[0], ((__v2df)__A)[1]);
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpord_sd(__m128d __A, __m128d __B) {
+ __v2df __r;
+ __r = (__v2df)_mm_cmpord_pd(vec_splats(__A[0]), vec_splats(__B[0]));
+ return (__m128d)_mm_setr_pd(__r[0], ((__v2df)__A)[1]);
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpunord_sd (__m128d __A, __m128d __B)
-{
- __v2df r;
- r = _mm_cmpunord_pd (vec_splats (__A[0]), vec_splats (__B[0]));
- return (__m128d) _mm_setr_pd (r[0], __A[1]);
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpunord_sd(__m128d __A, __m128d __B) {
+ __v2df __r;
+ __r = _mm_cmpunord_pd(vec_splats(__A[0]), vec_splats(__B[0]));
+ return (__m128d)_mm_setr_pd(__r[0], __A[1]);
}
/* FIXME
@@ -581,1744 +576,1694 @@ _mm_cmpunord_sd (__m128d __A, __m128d __B)
Technically __mm_comieq_sp et all should be using the ordered
compare and signal for QNaNs. The __mm_ucomieq_sd et all should
be OK. */
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_comieq_sd (__m128d __A, __m128d __B)
-{
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_comieq_sd(__m128d __A, __m128d __B) {
return (__A[0] == __B[0]);
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_comilt_sd (__m128d __A, __m128d __B)
-{
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_comilt_sd(__m128d __A, __m128d __B) {
return (__A[0] < __B[0]);
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_comile_sd (__m128d __A, __m128d __B)
-{
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_comile_sd(__m128d __A, __m128d __B) {
return (__A[0] <= __B[0]);
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_comigt_sd (__m128d __A, __m128d __B)
-{
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_comigt_sd(__m128d __A, __m128d __B) {
return (__A[0] > __B[0]);
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_comige_sd (__m128d __A, __m128d __B)
-{
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_comige_sd(__m128d __A, __m128d __B) {
return (__A[0] >= __B[0]);
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_comineq_sd (__m128d __A, __m128d __B)
-{
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_comineq_sd(__m128d __A, __m128d __B) {
return (__A[0] != __B[0]);
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ucomieq_sd (__m128d __A, __m128d __B)
-{
- return (__A[0] == __B[0]);
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_ucomieq_sd(__m128d __A, __m128d __B) {
+ return (__A[0] == __B[0]);
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ucomilt_sd (__m128d __A, __m128d __B)
-{
- return (__A[0] < __B[0]);
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_ucomilt_sd(__m128d __A, __m128d __B) {
+ return (__A[0] < __B[0]);
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ucomile_sd (__m128d __A, __m128d __B)
-{
- return (__A[0] <= __B[0]);
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_ucomile_sd(__m128d __A, __m128d __B) {
+ return (__A[0] <= __B[0]);
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ucomigt_sd (__m128d __A, __m128d __B)
-{
- return (__A[0] > __B[0]);
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_ucomigt_sd(__m128d __A, __m128d __B) {
+ return (__A[0] > __B[0]);
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ucomige_sd (__m128d __A, __m128d __B)
-{
- return (__A[0] >= __B[0]);
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_ucomige_sd(__m128d __A, __m128d __B) {
+ return (__A[0] >= __B[0]);
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ucomineq_sd (__m128d __A, __m128d __B)
-{
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_ucomineq_sd(__m128d __A, __m128d __B) {
return (__A[0] != __B[0]);
}
/* Create a vector of Qi, where i is the element number. */
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set_epi64x (long long __q1, long long __q0)
-{
- return __extension__ (__m128i)(__v2di){ __q0, __q1 };
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_set_epi64x(long long __q1, long long __q0) {
+ return __extension__(__m128i)(__v2di){__q0, __q1};
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set_epi64 (__m64 __q1, __m64 __q0)
-{
- return _mm_set_epi64x ((long long)__q1, (long long)__q0);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_set_epi64(__m64 __q1, __m64 __q0) {
+ return _mm_set_epi64x((long long)__q1, (long long)__q0);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set_epi32 (int __q3, int __q2, int __q1, int __q0)
-{
- return __extension__ (__m128i)(__v4si){ __q0, __q1, __q2, __q3 };
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_set_epi32(int __q3, int __q2, int __q1, int __q0) {
+ return __extension__(__m128i)(__v4si){__q0, __q1, __q2, __q3};
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set_epi16 (short __q7, short __q6, short __q5, short __q4,
- short __q3, short __q2, short __q1, short __q0)
-{
- return __extension__ (__m128i)(__v8hi){
- __q0, __q1, __q2, __q3, __q4, __q5, __q6, __q7 };
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_set_epi16(short __q7, short __q6, short __q5, short __q4, short __q3,
+ short __q2, short __q1, short __q0) {
+ return __extension__(__m128i)(__v8hi){__q0, __q1, __q2, __q3,
+ __q4, __q5, __q6, __q7};
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set_epi8 (char __q15, char __q14, char __q13, char __q12,
- char __q11, char __q10, char __q09, char __q08,
- char __q07, char __q06, char __q05, char __q04,
- char __q03, char __q02, char __q01, char __q00)
-{
- return __extension__ (__m128i)(__v16qi){
- __q00, __q01, __q02, __q03, __q04, __q05, __q06, __q07,
- __q08, __q09, __q10, __q11, __q12, __q13, __q14, __q15
- };
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_set_epi8(char __q15, char __q14, char __q13, char __q12, char __q11,
+ char __q10, char __q09, char __q08, char __q07, char __q06,
+ char __q05, char __q04, char __q03, char __q02, char __q01,
+ char __q00) {
+ return __extension__(__m128i)(__v16qi){
+ __q00, __q01, __q02, __q03, __q04, __q05, __q06, __q07,
+ __q08, __q09, __q10, __q11, __q12, __q13, __q14, __q15};
}
/* Set all of the elements of the vector to A. */
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set1_epi64x (long long __A)
-{
- return _mm_set_epi64x (__A, __A);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_set1_epi64x(long long __A) {
+ return _mm_set_epi64x(__A, __A);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set1_epi64 (__m64 __A)
-{
- return _mm_set_epi64 (__A, __A);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_set1_epi64(__m64 __A) {
+ return _mm_set_epi64(__A, __A);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set1_epi32 (int __A)
-{
- return _mm_set_epi32 (__A, __A, __A, __A);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_set1_epi32(int __A) {
+ return _mm_set_epi32(__A, __A, __A, __A);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set1_epi16 (short __A)
-{
- return _mm_set_epi16 (__A, __A, __A, __A, __A, __A, __A, __A);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_set1_epi16(short __A) {
+ return _mm_set_epi16(__A, __A, __A, __A, __A, __A, __A, __A);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set1_epi8 (char __A)
-{
- return _mm_set_epi8 (__A, __A, __A, __A, __A, __A, __A, __A,
- __A, __A, __A, __A, __A, __A, __A, __A);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_set1_epi8(char __A) {
+ return _mm_set_epi8(__A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A,
+ __A, __A, __A, __A, __A);
}
/* Create a vector of Qi, where i is the element number.
The parameter order is reversed from the _mm_set_epi* functions. */
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_setr_epi64 (__m64 __q0, __m64 __q1)
-{
- return _mm_set_epi64 (__q1, __q0);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_setr_epi64(__m64 __q0, __m64 __q1) {
+ return _mm_set_epi64(__q1, __q0);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_setr_epi32 (int __q0, int __q1, int __q2, int __q3)
-{
- return _mm_set_epi32 (__q3, __q2, __q1, __q0);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_setr_epi32(int __q0, int __q1, int __q2, int __q3) {
+ return _mm_set_epi32(__q3, __q2, __q1, __q0);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_setr_epi16 (short __q0, short __q1, short __q2, short __q3,
- short __q4, short __q5, short __q6, short __q7)
-{
- return _mm_set_epi16 (__q7, __q6, __q5, __q4, __q3, __q2, __q1, __q0);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_setr_epi16(short __q0, short __q1, short __q2, short __q3, short __q4,
+ short __q5, short __q6, short __q7) {
+ return _mm_set_epi16(__q7, __q6, __q5, __q4, __q3, __q2, __q1, __q0);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_setr_epi8 (char __q00, char __q01, char __q02, char __q03,
- char __q04, char __q05, char __q06, char __q07,
- char __q08, char __q09, char __q10, char __q11,
- char __q12, char __q13, char __q14, char __q15)
-{
- return _mm_set_epi8 (__q15, __q14, __q13, __q12, __q11, __q10, __q09, __q08,
- __q07, __q06, __q05, __q04, __q03, __q02, __q01, __q00);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_setr_epi8(char __q00, char __q01, char __q02, char __q03, char __q04,
+ char __q05, char __q06, char __q07, char __q08, char __q09,
+ char __q10, char __q11, char __q12, char __q13, char __q14,
+ char __q15) {
+ return _mm_set_epi8(__q15, __q14, __q13, __q12, __q11, __q10, __q09, __q08,
+ __q07, __q06, __q05, __q04, __q03, __q02, __q01, __q00);
}
/* Create a vector with element 0 as *P and the rest zero. */
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_load_si128 (__m128i const *__P)
-{
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_load_si128(__m128i const *__P) {
return *__P;
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_loadu_si128 (__m128i_u const *__P)
-{
- return (__m128i) (vec_vsx_ld(0, (signed int const *)__P));
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_loadu_si128(__m128i_u const *__P) {
+ return (__m128i)(vec_vsx_ld(0, (signed int const *)__P));
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_loadl_epi64 (__m128i_u const *__P)
-{
- return _mm_set_epi64 ((__m64)0LL, *(__m64 *)__P);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_loadl_epi64(__m128i_u const *__P) {
+ return _mm_set_epi64((__m64)0LL, *(__m64 *)__P);
}
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_store_si128 (__m128i *__P, __m128i __B)
-{
- vec_st ((__v16qu) __B, 0, (__v16qu*)__P);
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_store_si128(__m128i *__P, __m128i __B) {
+ vec_st((__v16qu)__B, 0, (__v16qu *)__P);
}
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_storeu_si128 (__m128i_u *__P, __m128i __B)
-{
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_storeu_si128(__m128i_u *__P, __m128i __B) {
*__P = __B;
}
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_storel_epi64 (__m128i_u *__P, __m128i __B)
-{
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_storel_epi64(__m128i_u *__P, __m128i __B) {
*(long long *)__P = ((__v2di)__B)[0];
}
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_movepi64_pi64 (__m128i_u __B)
-{
- return (__m64) ((__v2di)__B)[0];
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_movepi64_pi64(__m128i_u __B) {
+ return (__m64)((__v2di)__B)[0];
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_movpi64_epi64 (__m64 __A)
-{
- return _mm_set_epi64 ((__m64)0LL, __A);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_movpi64_epi64(__m64 __A) {
+ return _mm_set_epi64((__m64)0LL, __A);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_move_epi64 (__m128i __A)
-{
- return _mm_set_epi64 ((__m64)0LL, (__m64)__A[0]);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_move_epi64(__m128i __A) {
+ return _mm_set_epi64((__m64)0LL, (__m64)__A[0]);
}
/* Create an undefined vector. */
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_undefined_si128 (void)
-{
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_undefined_si128(void) {
__m128i __Y = __Y;
return __Y;
}
/* Create a vector of zeros. */
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_setzero_si128 (void)
-{
- return __extension__ (__m128i)(__v4si){ 0, 0, 0, 0 };
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_setzero_si128(void) {
+ return __extension__(__m128i)(__v4si){0, 0, 0, 0};
}
#ifdef _ARCH_PWR8
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtepi32_pd (__m128i __A)
-{
- __v2di val;
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtepi32_pd(__m128i __A) {
+ __v2di __val;
/* For LE need to generate Vector Unpack Low Signed Word.
Which is generated from unpackh. */
- val = (__v2di)vec_unpackh ((__v4si)__A);
+ __val = (__v2di)vec_unpackh((__v4si)__A);
- return (__m128d)vec_ctf (val, 0);
+ return (__m128d)vec_ctf(__val, 0);
}
#endif
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtepi32_ps (__m128i __A)
-{
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtepi32_ps(__m128i __A) {
return ((__m128)vec_ctf((__v4si)__A, 0));
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtpd_epi32 (__m128d __A)
-{
- __v2df rounded = vec_rint (__A);
- __v4si result, temp;
- const __v4si vzero =
- { 0, 0, 0, 0 };
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtpd_epi32(__m128d __A) {
+ __v2df __rounded = vec_rint(__A);
+ __v4si __result, __temp;
+ const __v4si __vzero = {0, 0, 0, 0};
/* VSX Vector truncate Double-Precision to integer and Convert to
Signed Integer Word format with Saturate. */
- __asm__(
- "xvcvdpsxws %x0,%x1"
- : "=wa" (temp)
- : "wa" (rounded)
- : );
+ __asm__("xvcvdpsxws %x0,%x1" : "=wa"(__temp) : "wa"(__rounded) :);
#ifdef _ARCH_PWR8
- temp = vec_mergeo (temp, temp);
- result = (__v4si) vec_vpkudum ((__vector long long) temp,
- (__vector long long) vzero);
+#ifdef __LITTLE_ENDIAN__
+ __temp = vec_mergeo(__temp, __temp);
+#else
+ __temp = vec_mergee(__temp, __temp);
+#endif
+ __result = (__v4si)vec_vpkudum((__vector long long)__temp,
+ (__vector long long)__vzero);
#else
{
- const __v16qu pkperm = {0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0a, 0x0b,
- 0x14, 0x15, 0x16, 0x17, 0x1c, 0x1d, 0x1e, 0x1f };
- result = (__v4si) vec_perm ((__v16qu) temp, (__v16qu) vzero, pkperm);
+ const __v16qu __pkperm = {0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0a, 0x0b,
+ 0x14, 0x15, 0x16, 0x17, 0x1c, 0x1d, 0x1e, 0x1f};
+ __result = (__v4si)vec_perm((__v16qu)__temp, (__v16qu)__vzero, __pkperm);
}
#endif
- return (__m128i) result;
+ return (__m128i)__result;
}
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtpd_pi32 (__m128d __A)
-{
- __m128i result = _mm_cvtpd_epi32(__A);
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtpd_pi32(__m128d __A) {
+ __m128i __result = _mm_cvtpd_epi32(__A);
- return (__m64) result[0];
+ return (__m64)__result[0];
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtpd_ps (__m128d __A)
-{
- __v4sf result;
- __v4si temp;
- const __v4si vzero = { 0, 0, 0, 0 };
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtpd_ps(__m128d __A) {
+ __v4sf __result;
+ __v4si __temp;
+ const __v4si __vzero = {0, 0, 0, 0};
- __asm__(
- "xvcvdpsp %x0,%x1"
- : "=wa" (temp)
- : "wa" (__A)
- : );
+ __asm__("xvcvdpsp %x0,%x1" : "=wa"(__temp) : "wa"(__A) :);
#ifdef _ARCH_PWR8
- temp = vec_mergeo (temp, temp);
- result = (__v4sf) vec_vpkudum ((__vector long long) temp,
- (__vector long long) vzero);
+#ifdef __LITTLE_ENDIAN__
+ __temp = vec_mergeo(__temp, __temp);
+#else
+ __temp = vec_mergee(__temp, __temp);
+#endif
+ __result = (__v4sf)vec_vpkudum((__vector long long)__temp,
+ (__vector long long)__vzero);
#else
{
- const __v16qu pkperm = {0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0a, 0x0b,
- 0x14, 0x15, 0x16, 0x17, 0x1c, 0x1d, 0x1e, 0x1f };
- result = (__v4sf) vec_perm ((__v16qu) temp, (__v16qu) vzero, pkperm);
+ const __v16qu __pkperm = {0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0a, 0x0b,
+ 0x14, 0x15, 0x16, 0x17, 0x1c, 0x1d, 0x1e, 0x1f};
+ __result = (__v4sf)vec_perm((__v16qu)__temp, (__v16qu)__vzero, __pkperm);
}
#endif
- return ((__m128)result);
+ return ((__m128)__result);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttpd_epi32 (__m128d __A)
-{
- __v4si result;
- __v4si temp;
- const __v4si vzero = { 0, 0, 0, 0 };
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvttpd_epi32(__m128d __A) {
+ __v4si __result;
+ __v4si __temp;
+ const __v4si __vzero = {0, 0, 0, 0};
/* VSX Vector truncate Double-Precision to integer and Convert to
Signed Integer Word format with Saturate. */
- __asm__(
- "xvcvdpsxws %x0,%x1"
- : "=wa" (temp)
- : "wa" (__A)
- : );
+ __asm__("xvcvdpsxws %x0,%x1" : "=wa"(__temp) : "wa"(__A) :);
#ifdef _ARCH_PWR8
- temp = vec_mergeo (temp, temp);
- result = (__v4si) vec_vpkudum ((__vector long long) temp,
- (__vector long long) vzero);
+#ifdef __LITTLE_ENDIAN__
+ __temp = vec_mergeo(__temp, __temp);
+#else
+ __temp = vec_mergee(__temp, __temp);
+#endif
+ __result = (__v4si)vec_vpkudum((__vector long long)__temp,
+ (__vector long long)__vzero);
#else
{
- const __v16qu pkperm = {0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0a, 0x0b,
- 0x14, 0x15, 0x16, 0x17, 0x1c, 0x1d, 0x1e, 0x1f };
- result = (__v4si) vec_perm ((__v16qu) temp, (__v16qu) vzero, pkperm);
+ const __v16qu __pkperm = {0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0a, 0x0b,
+ 0x14, 0x15, 0x16, 0x17, 0x1c, 0x1d, 0x1e, 0x1f};
+ __result = (__v4si)vec_perm((__v16qu)__temp, (__v16qu)__vzero, __pkperm);
}
#endif
- return ((__m128i) result);
+ return ((__m128i)__result);
}
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttpd_pi32 (__m128d __A)
-{
- __m128i result = _mm_cvttpd_epi32 (__A);
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvttpd_pi32(__m128d __A) {
+ __m128i __result = _mm_cvttpd_epi32(__A);
- return (__m64) result[0];
+ return (__m64)__result[0];
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsi128_si32 (__m128i __A)
-{
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtsi128_si32(__m128i __A) {
return ((__v4si)__A)[0];
}
#ifdef _ARCH_PWR8
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtpi32_pd (__m64 __A)
-{
- __v4si temp;
- __v2di tmp2;
- __v2df result;
-
- temp = (__v4si)vec_splats (__A);
- tmp2 = (__v2di)vec_unpackl (temp);
- result = vec_ctf ((__vector signed long long) tmp2, 0);
- return (__m128d)result;
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtpi32_pd(__m64 __A) {
+ __v4si __temp;
+ __v2di __tmp2;
+ __v4f __result;
+
+ __temp = (__v4si)vec_splats(__A);
+ __tmp2 = (__v2di)vec_unpackl(__temp);
+ __result = vec_ctf((__vector signed long long)__tmp2, 0);
+ return (__m128d)__result;
}
#endif
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtps_epi32 (__m128 __A)
-{
- __v4sf rounded;
- __v4si result;
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtps_epi32(__m128 __A) {
+ __v4sf __rounded;
+ __v4si __result;
- rounded = vec_rint((__v4sf) __A);
- result = vec_cts (rounded, 0);
- return (__m128i) result;
+ __rounded = vec_rint((__v4sf)__A);
+ __result = vec_cts(__rounded, 0);
+ return (__m128i)__result;
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttps_epi32 (__m128 __A)
-{
- __v4si result;
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvttps_epi32(__m128 __A) {
+ __v4si __result;
- result = vec_cts ((__v4sf) __A, 0);
- return (__m128i) result;
+ __result = vec_cts((__v4sf)__A, 0);
+ return (__m128i)__result;
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtps_pd (__m128 __A)
-{
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtps_pd(__m128 __A) {
/* Check if vec_doubleh is defined by <altivec.h>. If so use that. */
#ifdef vec_doubleh
- return (__m128d) vec_doubleh ((__v4sf)__A);
+ return (__m128d)vec_doubleh((__v4sf)__A);
#else
/* Otherwise the compiler is not current and so need to generate the
equivalent code. */
- __v4sf a = (__v4sf)__A;
- __v4sf temp;
- __v2df result;
+ __v4sf __a = (__v4sf)__A;
+ __v4sf __temp;
+ __v2df __result;
#ifdef __LITTLE_ENDIAN__
/* The input float values are in elements {[0], [1]} but the convert
instruction needs them in elements {[1], [3]}, So we use two
shift left double vector word immediates to get the elements
lined up. */
- temp = __builtin_vsx_xxsldwi (a, a, 3);
- temp = __builtin_vsx_xxsldwi (a, temp, 2);
+ __temp = __builtin_vsx_xxsldwi(__a, __a, 3);
+ __temp = __builtin_vsx_xxsldwi(__a, __temp, 2);
#else
/* The input float values are in elements {[0], [1]} but the convert
instruction needs them in elements {[0], [2]}, So we use two
shift left double vector word immediates to get the elements
lined up. */
- temp = vec_vmrghw (a, a);
+ __temp = vec_vmrghw(__a, __a);
#endif
- __asm__(
- " xvcvspdp %x0,%x1"
- : "=wa" (result)
- : "wa" (temp)
- : );
- return (__m128d) result;
+ __asm__(" xvcvspdp %x0,%x1" : "=wa"(__result) : "wa"(__temp) :);
+ return (__m128d)__result;
#endif
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsd_si32 (__m128d __A)
-{
- __v2df rounded = vec_rint((__v2df) __A);
- int result = ((__v2df)rounded)[0];
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtsd_si32(__m128d __A) {
+ __v2df __rounded = vec_rint((__v2df)__A);
+ int __result = ((__v2df)__rounded)[0];
- return result;
+ return __result;
}
/* Intel intrinsic. */
-extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsd_si64 (__m128d __A)
-{
- __v2df rounded = vec_rint ((__v2df) __A );
- long long result = ((__v2df) rounded)[0];
+extern __inline long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtsd_si64(__m128d __A) {
+ __v2df __rounded = vec_rint((__v2df)__A);
+ long long __result = ((__v2df)__rounded)[0];
- return result;
+ return __result;
}
/* Microsoft intrinsic. */
-extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsd_si64x (__m128d __A)
-{
- return _mm_cvtsd_si64 ((__v2df)__A);
+extern __inline long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtsd_si64x(__m128d __A) {
+ return _mm_cvtsd_si64((__v2df)__A);
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttsd_si32 (__m128d __A)
-{
- int result = ((__v2df)__A)[0];
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvttsd_si32(__m128d __A) {
+ int __result = ((__v2df)__A)[0];
- return result;
+ return __result;
}
/* Intel intrinsic. */
-extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttsd_si64 (__m128d __A)
-{
- long long result = ((__v2df)__A)[0];
+extern __inline long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvttsd_si64(__m128d __A) {
+ long long __result = ((__v2df)__A)[0];
- return result;
+ return __result;
}
/* Microsoft intrinsic. */
-extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttsd_si64x (__m128d __A)
-{
- return _mm_cvttsd_si64 (__A);
+extern __inline long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvttsd_si64x(__m128d __A) {
+ return _mm_cvttsd_si64(__A);
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsd_ss (__m128 __A, __m128d __B)
-{
- __v4sf result = (__v4sf)__A;
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtsd_ss(__m128 __A, __m128d __B) {
+ __v4sf __result = (__v4sf)__A;
#ifdef __LITTLE_ENDIAN__
- __v4sf temp_s;
+ __v4sf __temp_s;
/* Copy double element[0] to element [1] for conversion. */
- __v2df temp_b = vec_splat((__v2df)__B, 0);
+ __v2df __temp_b = vec_splat((__v2df)__B, 0);
/* Pre-rotate __A left 3 (logically right 1) elements. */
- result = __builtin_vsx_xxsldwi (result, result, 3);
+ __result = __builtin_vsx_xxsldwi(__result, __result, 3);
/* Convert double to single float scalar in a vector. */
- __asm__(
- "xscvdpsp %x0,%x1"
- : "=wa" (temp_s)
- : "wa" (temp_b)
- : );
+ __asm__("xscvdpsp %x0,%x1" : "=wa"(__temp_s) : "wa"(__temp_b) :);
/* Shift the resulting scalar into vector element [0]. */
- result = __builtin_vsx_xxsldwi (result, temp_s, 1);
+ __result = __builtin_vsx_xxsldwi(__result, __temp_s, 1);
#else
- result [0] = ((__v2df)__B)[0];
+ __result[0] = ((__v2df)__B)[0];
#endif
- return (__m128) result;
+ return (__m128)__result;
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsi32_sd (__m128d __A, int __B)
-{
- __v2df result = (__v2df)__A;
- double db = __B;
- result [0] = db;
- return (__m128d)result;
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtsi32_sd(__m128d __A, int __B) {
+ __v2df __result = (__v2df)__A;
+ double __db = __B;
+ __result[0] = __db;
+ return (__m128d)__result;
}
/* Intel intrinsic. */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsi64_sd (__m128d __A, long long __B)
-{
- __v2df result = (__v2df)__A;
- double db = __B;
- result [0] = db;
- return (__m128d)result;
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtsi64_sd(__m128d __A, long long __B) {
+ __v2df __result = (__v2df)__A;
+ double __db = __B;
+ __result[0] = __db;
+ return (__m128d)__result;
}
/* Microsoft intrinsic. */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsi64x_sd (__m128d __A, long long __B)
-{
- return _mm_cvtsi64_sd (__A, __B);
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtsi64x_sd(__m128d __A, long long __B) {
+ return _mm_cvtsi64_sd(__A, __B);
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtss_sd (__m128d __A, __m128 __B)
-{
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtss_sd(__m128d __A, __m128 __B) {
#ifdef __LITTLE_ENDIAN__
/* Use splat to move element [0] into position for the convert. */
- __v4sf temp = vec_splat ((__v4sf)__B, 0);
- __v2df res;
+ __v4sf __temp = vec_splat((__v4sf)__B, 0);
+ __v2df __res;
/* Convert single float scalar to double in a vector. */
- __asm__(
- "xscvspdp %x0,%x1"
- : "=wa" (res)
- : "wa" (temp)
- : );
- return (__m128d) vec_mergel (res, (__v2df)__A);
+ __asm__("xscvspdp %x0,%x1" : "=wa"(__res) : "wa"(__temp) :);
+ return (__m128d)vec_mergel(__res, (__v2df)__A);
#else
- __v2df res = (__v2df)__A;
- res [0] = ((__v4sf)__B) [0];
- return (__m128d) res;
+ __v2df __res = (__v2df)__A;
+ __res[0] = ((__v4sf)__B)[0];
+ return (__m128d)__res;
#endif
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_shuffle_pd(__m128d __A, __m128d __B, const int __mask)
-{
- __vector double result;
- const int litmsk = __mask & 0x3;
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_shuffle_pd(__m128d __A, __m128d __B, const int __mask) {
+ __vector double __result;
+ const int __litmsk = __mask & 0x3;
- if (litmsk == 0)
- result = vec_mergeh (__A, __B);
+ if (__litmsk == 0)
+ __result = vec_mergeh(__A, __B);
#if __GNUC__ < 6
- else if (litmsk == 1)
- result = vec_xxpermdi (__B, __A, 2);
- else if (litmsk == 2)
- result = vec_xxpermdi (__B, __A, 1);
+ else if (__litmsk == 1)
+ __result = vec_xxpermdi(__B, __A, 2);
+ else if (__litmsk == 2)
+ __result = vec_xxpermdi(__B, __A, 1);
#else
- else if (litmsk == 1)
- result = vec_xxpermdi (__A, __B, 2);
- else if (litmsk == 2)
- result = vec_xxpermdi (__A, __B, 1);
+ else if (__litmsk == 1)
+ __result = vec_xxpermdi(__A, __B, 2);
+ else if (__litmsk == 2)
+ __result = vec_xxpermdi(__A, __B, 1);
#endif
else
- result = vec_mergel (__A, __B);
+ __result = vec_mergel(__A, __B);
- return result;
+ return __result;
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_unpackhi_pd (__m128d __A, __m128d __B)
-{
- return (__m128d) vec_mergel ((__v2df)__A, (__v2df)__B);
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_unpackhi_pd(__m128d __A, __m128d __B) {
+ return (__m128d)vec_mergel((__v2df)__A, (__v2df)__B);
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_unpacklo_pd (__m128d __A, __m128d __B)
-{
- return (__m128d) vec_mergeh ((__v2df)__A, (__v2df)__B);
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_unpacklo_pd(__m128d __A, __m128d __B) {
+ return (__m128d)vec_mergeh((__v2df)__A, (__v2df)__B);
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_loadh_pd (__m128d __A, double const *__B)
-{
- __v2df result = (__v2df)__A;
- result [1] = *__B;
- return (__m128d)result;
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_loadh_pd(__m128d __A, double const *__B) {
+ __v2df __result = (__v2df)__A;
+ __result[1] = *__B;
+ return (__m128d)__result;
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_loadl_pd (__m128d __A, double const *__B)
-{
- __v2df result = (__v2df)__A;
- result [0] = *__B;
- return (__m128d)result;
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_loadl_pd(__m128d __A, double const *__B) {
+ __v2df __result = (__v2df)__A;
+ __result[0] = *__B;
+ return (__m128d)__result;
}
#ifdef _ARCH_PWR8
/* Intrinsic functions that require PowerISA 2.07 minimum. */
/* Creates a 2-bit mask from the most significant bits of the DPFP values. */
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_movemask_pd (__m128d __A)
-{
- __vector unsigned long long result;
- static const __vector unsigned int perm_mask =
- {
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_movemask_pd(__m128d __A) {
+#ifdef _ARCH_PWR10
+ return vec_extractm((__v2du)__A);
+#else
+ __vector unsigned long long __result;
+ static const __vector unsigned int __perm_mask = {
#ifdef __LITTLE_ENDIAN__
- 0x80800040, 0x80808080, 0x80808080, 0x80808080
+ 0x80800040, 0x80808080, 0x80808080, 0x80808080
#else
0x80808080, 0x80808080, 0x80808080, 0x80804000
#endif
- };
+ };
- result = ((__vector unsigned long long)
- vec_vbpermq ((__vector unsigned char) __A,
- (__vector unsigned char) perm_mask));
+ __result = ((__vector unsigned long long)vec_vbpermq(
+ (__vector unsigned char)__A, (__vector unsigned char)__perm_mask));
#ifdef __LITTLE_ENDIAN__
- return result[1];
+ return __result[1];
#else
- return result[0];
+ return __result[0];
#endif
+#endif /* !_ARCH_PWR10 */
}
#endif /* _ARCH_PWR8 */
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_packs_epi16 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_packs ((__v8hi) __A, (__v8hi)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_packs_epi16(__m128i __A, __m128i __B) {
+ return (__m128i)vec_packs((__v8hi)__A, (__v8hi)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_packs_epi32 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_packs ((__v4si)__A, (__v4si)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_packs_epi32(__m128i __A, __m128i __B) {
+ return (__m128i)vec_packs((__v4si)__A, (__v4si)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_packus_epi16 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_packsu ((__v8hi) __A, (__v8hi)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_packus_epi16(__m128i __A, __m128i __B) {
+ return (__m128i)vec_packsu((__v8hi)__A, (__v8hi)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_unpackhi_epi8 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_mergel ((__v16qu)__A, (__v16qu)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_unpackhi_epi8(__m128i __A, __m128i __B) {
+ return (__m128i)vec_mergel((__v16qu)__A, (__v16qu)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_unpackhi_epi16 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_mergel ((__v8hu)__A, (__v8hu)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_unpackhi_epi16(__m128i __A, __m128i __B) {
+ return (__m128i)vec_mergel((__v8hu)__A, (__v8hu)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_unpackhi_epi32 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_mergel ((__v4su)__A, (__v4su)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_unpackhi_epi32(__m128i __A, __m128i __B) {
+ return (__m128i)vec_mergel((__v4su)__A, (__v4su)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_unpackhi_epi64 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_mergel ((__vector long long) __A,
- (__vector long long) __B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_unpackhi_epi64(__m128i __A, __m128i __B) {
+ return (__m128i)vec_mergel((__vector long long)__A, (__vector long long)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_unpacklo_epi8 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_mergeh ((__v16qu)__A, (__v16qu)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_unpacklo_epi8(__m128i __A, __m128i __B) {
+ return (__m128i)vec_mergeh((__v16qu)__A, (__v16qu)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_unpacklo_epi16 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_mergeh ((__v8hi)__A, (__v8hi)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_unpacklo_epi16(__m128i __A, __m128i __B) {
+ return (__m128i)vec_mergeh((__v8hi)__A, (__v8hi)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_unpacklo_epi32 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_mergeh ((__v4si)__A, (__v4si)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_unpacklo_epi32(__m128i __A, __m128i __B) {
+ return (__m128i)vec_mergeh((__v4si)__A, (__v4si)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_unpacklo_epi64 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_mergeh ((__vector long long) __A,
- (__vector long long) __B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_unpacklo_epi64(__m128i __A, __m128i __B) {
+ return (__m128i)vec_mergeh((__vector long long)__A, (__vector long long)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_add_epi8 (__m128i __A, __m128i __B)
-{
- return (__m128i) ((__v16qu)__A + (__v16qu)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_add_epi8(__m128i __A, __m128i __B) {
+ return (__m128i)((__v16qu)__A + (__v16qu)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_add_epi16 (__m128i __A, __m128i __B)
-{
- return (__m128i) ((__v8hu)__A + (__v8hu)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_add_epi16(__m128i __A, __m128i __B) {
+ return (__m128i)((__v8hu)__A + (__v8hu)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_add_epi32 (__m128i __A, __m128i __B)
-{
- return (__m128i) ((__v4su)__A + (__v4su)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_add_epi32(__m128i __A, __m128i __B) {
+ return (__m128i)((__v4su)__A + (__v4su)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_add_epi64 (__m128i __A, __m128i __B)
-{
- return (__m128i) ((__v2du)__A + (__v2du)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_add_epi64(__m128i __A, __m128i __B) {
+ return (__m128i)((__v2du)__A + (__v2du)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_adds_epi8 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_adds ((__v16qi)__A, (__v16qi)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_adds_epi8(__m128i __A, __m128i __B) {
+ return (__m128i)vec_adds((__v16qi)__A, (__v16qi)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_adds_epi16 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_adds ((__v8hi)__A, (__v8hi)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_adds_epi16(__m128i __A, __m128i __B) {
+ return (__m128i)vec_adds((__v8hi)__A, (__v8hi)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_adds_epu8 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_adds ((__v16qu)__A, (__v16qu)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_adds_epu8(__m128i __A, __m128i __B) {
+ return (__m128i)vec_adds((__v16qu)__A, (__v16qu)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_adds_epu16 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_adds ((__v8hu)__A, (__v8hu)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_adds_epu16(__m128i __A, __m128i __B) {
+ return (__m128i)vec_adds((__v8hu)__A, (__v8hu)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sub_epi8 (__m128i __A, __m128i __B)
-{
- return (__m128i) ((__v16qu)__A - (__v16qu)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sub_epi8(__m128i __A, __m128i __B) {
+ return (__m128i)((__v16qu)__A - (__v16qu)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sub_epi16 (__m128i __A, __m128i __B)
-{
- return (__m128i) ((__v8hu)__A - (__v8hu)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sub_epi16(__m128i __A, __m128i __B) {
+ return (__m128i)((__v8hu)__A - (__v8hu)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sub_epi32 (__m128i __A, __m128i __B)
-{
- return (__m128i) ((__v4su)__A - (__v4su)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sub_epi32(__m128i __A, __m128i __B) {
+ return (__m128i)((__v4su)__A - (__v4su)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sub_epi64 (__m128i __A, __m128i __B)
-{
- return (__m128i) ((__v2du)__A - (__v2du)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sub_epi64(__m128i __A, __m128i __B) {
+ return (__m128i)((__v2du)__A - (__v2du)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_subs_epi8 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_subs ((__v16qi)__A, (__v16qi)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_subs_epi8(__m128i __A, __m128i __B) {
+ return (__m128i)vec_subs((__v16qi)__A, (__v16qi)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_subs_epi16 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_subs ((__v8hi)__A, (__v8hi)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_subs_epi16(__m128i __A, __m128i __B) {
+ return (__m128i)vec_subs((__v8hi)__A, (__v8hi)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_subs_epu8 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_subs ((__v16qu)__A, (__v16qu)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_subs_epu8(__m128i __A, __m128i __B) {
+ return (__m128i)vec_subs((__v16qu)__A, (__v16qu)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_subs_epu16 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_subs ((__v8hu)__A, (__v8hu)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_subs_epu16(__m128i __A, __m128i __B) {
+ return (__m128i)vec_subs((__v8hu)__A, (__v8hu)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_madd_epi16 (__m128i __A, __m128i __B)
-{
- __vector signed int zero = {0, 0, 0, 0};
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_madd_epi16(__m128i __A, __m128i __B) {
+ __vector signed int __zero = {0, 0, 0, 0};
- return (__m128i) vec_vmsumshm ((__v8hi)__A, (__v8hi)__B, zero);
+ return (__m128i)vec_vmsumshm((__v8hi)__A, (__v8hi)__B, __zero);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mulhi_epi16 (__m128i __A, __m128i __B)
-{
- __vector signed int w0, w1;
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_mulhi_epi16(__m128i __A, __m128i __B) {
+ __vector signed int __w0, __w1;
- __vector unsigned char xform1 = {
+ __vector unsigned char __xform1 = {
#ifdef __LITTLE_ENDIAN__
- 0x02, 0x03, 0x12, 0x13, 0x06, 0x07, 0x16, 0x17,
- 0x0A, 0x0B, 0x1A, 0x1B, 0x0E, 0x0F, 0x1E, 0x1F
+ 0x02, 0x03, 0x12, 0x13, 0x06, 0x07, 0x16, 0x17, 0x0A,
+ 0x0B, 0x1A, 0x1B, 0x0E, 0x0F, 0x1E, 0x1F
#else
- 0x00, 0x01, 0x10, 0x11, 0x04, 0x05, 0x14, 0x15,
- 0x08, 0x09, 0x18, 0x19, 0x0C, 0x0D, 0x1C, 0x1D
+ 0x00, 0x01, 0x10, 0x11, 0x04, 0x05, 0x14, 0x15, 0x08,
+ 0x09, 0x18, 0x19, 0x0C, 0x0D, 0x1C, 0x1D
#endif
- };
+ };
- w0 = vec_vmulesh ((__v8hi)__A, (__v8hi)__B);
- w1 = vec_vmulosh ((__v8hi)__A, (__v8hi)__B);
- return (__m128i) vec_perm (w0, w1, xform1);
+ __w0 = vec_vmulesh((__v8hi)__A, (__v8hi)__B);
+ __w1 = vec_vmulosh((__v8hi)__A, (__v8hi)__B);
+ return (__m128i)vec_perm(__w0, __w1, __xform1);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mullo_epi16 (__m128i __A, __m128i __B)
-{
- return (__m128i) ((__v8hi)__A * (__v8hi)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_mullo_epi16(__m128i __A, __m128i __B) {
+ return (__m128i)((__v8hi)__A * (__v8hi)__B);
}
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mul_su32 (__m64 __A, __m64 __B)
-{
- unsigned int a = __A;
- unsigned int b = __B;
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_mul_su32(__m64 __A, __m64 __B) {
+ unsigned int __a = __A;
+ unsigned int __b = __B;
- return ((__m64)a * (__m64)b);
+ return ((__m64)__a * (__m64)__b);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mul_epu32 (__m128i __A, __m128i __B)
-{
+#ifdef _ARCH_PWR8
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_mul_epu32(__m128i __A, __m128i __B) {
#if __GNUC__ < 8
- __v2du result;
+ __v2du __result;
#ifdef __LITTLE_ENDIAN__
/* VMX Vector Multiply Odd Unsigned Word. */
- __asm__(
- "vmulouw %0,%1,%2"
- : "=v" (result)
- : "v" (__A), "v" (__B)
- : );
+ __asm__("vmulouw %0,%1,%2" : "=v"(__result) : "v"(__A), "v"(__B) :);
#else
/* VMX Vector Multiply Even Unsigned Word. */
- __asm__(
- "vmuleuw %0,%1,%2"
- : "=v" (result)
- : "v" (__A), "v" (__B)
- : );
+ __asm__("vmuleuw %0,%1,%2" : "=v"(__result) : "v"(__A), "v"(__B) :);
#endif
- return (__m128i) result;
+ return (__m128i)__result;
#else
- return (__m128i) vec_mule ((__v4su)__A, (__v4su)__B);
+ return (__m128i)vec_mule((__v4su)__A, (__v4su)__B);
#endif
}
+#endif
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_slli_epi16 (__m128i __A, int __B)
-{
- __v8hu lshift;
- __v8hi result = { 0, 0, 0, 0, 0, 0, 0, 0 };
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_slli_epi16(__m128i __A, int __B) {
+ __v8hu __lshift;
+ __v8hi __result = {0, 0, 0, 0, 0, 0, 0, 0};
- if (__B >= 0 && __B < 16)
- {
- if (__builtin_constant_p(__B))
- lshift = (__v8hu) vec_splat_s16(__B);
- else
- lshift = vec_splats ((unsigned short) __B);
+ if (__B >= 0 && __B < 16) {
+ if (__builtin_constant_p(__B))
+ __lshift = (__v8hu)vec_splat_s16(__B);
+ else
+ __lshift = vec_splats((unsigned short)__B);
- result = vec_sl ((__v8hi) __A, lshift);
- }
+ __result = vec_sl((__v8hi)__A, __lshift);
+ }
- return (__m128i) result;
+ return (__m128i)__result;
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_slli_epi32 (__m128i __A, int __B)
-{
- __v4su lshift;
- __v4si result = { 0, 0, 0, 0 };
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_slli_epi32(__m128i __A, int __B) {
+ __v4su __lshift;
+ __v4si __result = {0, 0, 0, 0};
- if (__B >= 0 && __B < 32)
- {
- if (__builtin_constant_p(__B) && __B < 16)
- lshift = (__v4su) vec_splat_s32(__B);
- else
- lshift = vec_splats ((unsigned int) __B);
+ if (__B >= 0 && __B < 32) {
+ if (__builtin_constant_p(__B) && __B < 16)
+ __lshift = (__v4su)vec_splat_s32(__B);
+ else
+ __lshift = vec_splats((unsigned int)__B);
- result = vec_sl ((__v4si) __A, lshift);
- }
+ __result = vec_sl((__v4si)__A, __lshift);
+ }
- return (__m128i) result;
+ return (__m128i)__result;
}
#ifdef _ARCH_PWR8
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_slli_epi64 (__m128i __A, int __B)
-{
- __v2du lshift;
- __v2di result = { 0, 0 };
-
- if (__B >= 0 && __B < 64)
- {
- if (__builtin_constant_p(__B) && __B < 16)
- lshift = (__v2du) vec_splat_s32(__B);
- else
- lshift = (__v2du) vec_splats ((unsigned int) __B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_slli_epi64(__m128i __A, int __B) {
+ __v2du __lshift;
+ __v2di __result = {0, 0};
+
+ if (__B >= 0 && __B < 64) {
+ if (__builtin_constant_p(__B) && __B < 16)
+ __lshift = (__v2du)vec_splat_s32(__B);
+ else
+ __lshift = (__v2du)vec_splats((unsigned int)__B);
- result = vec_sl ((__v2di) __A, lshift);
- }
+ __result = vec_sl((__v2di)__A, __lshift);
+ }
- return (__m128i) result;
+ return (__m128i)__result;
}
#endif
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_srai_epi16 (__m128i __A, int __B)
-{
- __v8hu rshift = { 15, 15, 15, 15, 15, 15, 15, 15 };
- __v8hi result;
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_srai_epi16(__m128i __A, int __B) {
+ __v8hu __rshift = {15, 15, 15, 15, 15, 15, 15, 15};
+ __v8hi __result;
- if (__B < 16)
- {
- if (__builtin_constant_p(__B))
- rshift = (__v8hu) vec_splat_s16(__B);
- else
- rshift = vec_splats ((unsigned short) __B);
- }
- result = vec_sra ((__v8hi) __A, rshift);
+ if (__B < 16) {
+ if (__builtin_constant_p(__B))
+ __rshift = (__v8hu)vec_splat_s16(__B);
+ else
+ __rshift = vec_splats((unsigned short)__B);
+ }
+ __result = vec_sra((__v8hi)__A, __rshift);
- return (__m128i) result;
+ return (__m128i)__result;
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_srai_epi32 (__m128i __A, int __B)
-{
- __v4su rshift = { 31, 31, 31, 31 };
- __v4si result;
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_srai_epi32(__m128i __A, int __B) {
+ __v4su __rshift = {31, 31, 31, 31};
+ __v4si __result;
- if (__B < 32)
- {
- if (__builtin_constant_p(__B))
- {
- if (__B < 16)
- rshift = (__v4su) vec_splat_s32(__B);
- else
- rshift = (__v4su) vec_splats((unsigned int)__B);
- }
+ if (__B < 32) {
+ if (__builtin_constant_p(__B)) {
+ if (__B < 16)
+ __rshift = (__v4su)vec_splat_s32(__B);
else
- rshift = vec_splats ((unsigned int) __B);
- }
- result = vec_sra ((__v4si) __A, rshift);
+ __rshift = (__v4su)vec_splats((unsigned int)__B);
+ } else
+ __rshift = vec_splats((unsigned int)__B);
+ }
+ __result = vec_sra((__v4si)__A, __rshift);
- return (__m128i) result;
+ return (__m128i)__result;
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_bslli_si128 (__m128i __A, const int __N)
-{
- __v16qu result;
- const __v16qu zeros = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_bslli_si128(__m128i __A, const int __N) {
+ __v16qu __result;
+ const __v16qu __zeros = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
if (__N < 16)
- result = vec_sld ((__v16qu) __A, zeros, __N);
+ __result = vec_sld((__v16qu)__A, __zeros, __N);
else
- result = zeros;
+ __result = __zeros;
- return (__m128i) result;
+ return (__m128i)__result;
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_bsrli_si128 (__m128i __A, const int __N)
-{
- __v16qu result;
- const __v16qu zeros = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_bsrli_si128(__m128i __A, const int __N) {
+ __v16qu __result;
+ const __v16qu __zeros = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
if (__N < 16)
#ifdef __LITTLE_ENDIAN__
if (__builtin_constant_p(__N))
/* Would like to use Vector Shift Left Double by Octet
- Immediate here to use the immediate form and avoid
- load of __N * 8 value into a separate VR. */
- result = vec_sld (zeros, (__v16qu) __A, (16 - __N));
+ Immediate here to use the immediate form and avoid
+ load of __N * 8 value into a separate VR. */
+ __result = vec_sld(__zeros, (__v16qu)__A, (16 - __N));
else
#endif
- {
- __v16qu shift = vec_splats((unsigned char)(__N*8));
+ {
+ __v16qu __shift = vec_splats((unsigned char)(__N * 8));
#ifdef __LITTLE_ENDIAN__
- result = vec_sro ((__v16qu)__A, shift);
+ __result = vec_sro((__v16qu)__A, __shift);
#else
- result = vec_slo ((__v16qu)__A, shift);
+ __result = vec_slo((__v16qu)__A, __shift);
#endif
- }
+ }
else
- result = zeros;
+ __result = __zeros;
- return (__m128i) result;
+ return (__m128i)__result;
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_srli_si128 (__m128i __A, const int __N)
-{
- return _mm_bsrli_si128 (__A, __N);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_srli_si128(__m128i __A, const int __N) {
+ return _mm_bsrli_si128(__A, __N);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_slli_si128 (__m128i __A, const int _imm5)
-{
- __v16qu result;
- const __v16qu zeros = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_slli_si128(__m128i __A, const int _imm5) {
+ __v16qu __result;
+ const __v16qu __zeros = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
if (_imm5 < 16)
#ifdef __LITTLE_ENDIAN__
- result = vec_sld ((__v16qu) __A, zeros, _imm5);
+ __result = vec_sld((__v16qu)__A, __zeros, _imm5);
#else
- result = vec_sld (zeros, (__v16qu) __A, (16 - _imm5));
+ __result = vec_sld(__zeros, (__v16qu)__A, (16 - _imm5));
#endif
else
- result = zeros;
+ __result = __zeros;
- return (__m128i) result;
+ return (__m128i)__result;
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_srli_epi16 (__m128i __A, int __B)
-{
- __v8hu rshift;
- __v8hi result = { 0, 0, 0, 0, 0, 0, 0, 0 };
+ _mm_srli_epi16(__m128i __A, int __B) {
+ __v8hu __rshift;
+ __v8hi __result = {0, 0, 0, 0, 0, 0, 0, 0};
- if (__B < 16)
- {
- if (__builtin_constant_p(__B))
- rshift = (__v8hu) vec_splat_s16(__B);
- else
- rshift = vec_splats ((unsigned short) __B);
+ if (__B < 16) {
+ if (__builtin_constant_p(__B))
+ __rshift = (__v8hu)vec_splat_s16(__B);
+ else
+ __rshift = vec_splats((unsigned short)__B);
- result = vec_sr ((__v8hi) __A, rshift);
- }
+ __result = vec_sr((__v8hi)__A, __rshift);
+ }
- return (__m128i) result;
+ return (__m128i)__result;
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_srli_epi32 (__m128i __A, int __B)
-{
- __v4su rshift;
- __v4si result = { 0, 0, 0, 0 };
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_srli_epi32(__m128i __A, int __B) {
+ __v4su __rshift;
+ __v4si __result = {0, 0, 0, 0};
- if (__B < 32)
- {
- if (__builtin_constant_p(__B))
- {
- if (__B < 16)
- rshift = (__v4su) vec_splat_s32(__B);
- else
- rshift = (__v4su) vec_splats((unsigned int)__B);
- }
+ if (__B < 32) {
+ if (__builtin_constant_p(__B)) {
+ if (__B < 16)
+ __rshift = (__v4su)vec_splat_s32(__B);
else
- rshift = vec_splats ((unsigned int) __B);
+ __rshift = (__v4su)vec_splats((unsigned int)__B);
+ } else
+ __rshift = vec_splats((unsigned int)__B);
- result = vec_sr ((__v4si) __A, rshift);
- }
+ __result = vec_sr((__v4si)__A, __rshift);
+ }
- return (__m128i) result;
+ return (__m128i)__result;
}
#ifdef _ARCH_PWR8
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_srli_epi64 (__m128i __A, int __B)
-{
- __v2du rshift;
- __v2di result = { 0, 0 };
-
- if (__B < 64)
- {
- if (__builtin_constant_p(__B))
- {
- if (__B < 16)
- rshift = (__v2du) vec_splat_s32(__B);
- else
- rshift = (__v2du) vec_splats((unsigned long long)__B);
- }
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_srli_epi64(__m128i __A, int __B) {
+ __v2du __rshift;
+ __v2di __result = {0, 0};
+
+ if (__B < 64) {
+ if (__builtin_constant_p(__B)) {
+ if (__B < 16)
+ __rshift = (__v2du)vec_splat_s32(__B);
else
- rshift = (__v2du) vec_splats ((unsigned int) __B);
+ __rshift = (__v2du)vec_splats((unsigned long long)__B);
+ } else
+ __rshift = (__v2du)vec_splats((unsigned int)__B);
- result = vec_sr ((__v2di) __A, rshift);
- }
+ __result = vec_sr((__v2di)__A, __rshift);
+ }
- return (__m128i) result;
+ return (__m128i)__result;
}
#endif
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sll_epi16 (__m128i __A, __m128i __B)
-{
- __v8hu lshift;
- __vector __bool short shmask;
- const __v8hu shmax = { 15, 15, 15, 15, 15, 15, 15, 15 };
- __v8hu result;
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sll_epi16(__m128i __A, __m128i __B) {
+ __v8hu __lshift;
+ __vector __bool short __shmask;
+ const __v8hu __shmax = {15, 15, 15, 15, 15, 15, 15, 15};
+ __v8hu __result;
#ifdef __LITTLE_ENDIAN__
- lshift = vec_splat ((__v8hu) __B, 0);
+ __lshift = vec_splat((__v8hu)__B, 0);
#else
- lshift = vec_splat ((__v8hu) __B, 3);
+ __lshift = vec_splat((__v8hu)__B, 3);
#endif
- shmask = vec_cmple (lshift, shmax);
- result = vec_sl ((__v8hu) __A, lshift);
- result = vec_sel ((__v8hu) shmask, result, shmask);
+ __shmask = vec_cmple(__lshift, __shmax);
+ __result = vec_sl((__v8hu)__A, __lshift);
+ __result = vec_sel((__v8hu)__shmask, __result, __shmask);
- return (__m128i) result;
+ return (__m128i)__result;
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sll_epi32 (__m128i __A, __m128i __B)
-{
- __v4su lshift;
- __vector __bool int shmask;
- const __v4su shmax = { 32, 32, 32, 32 };
- __v4su result;
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sll_epi32(__m128i __A, __m128i __B) {
+ __v4su __lshift;
+ __vector __bool int __shmask;
+ const __v4su __shmax = {32, 32, 32, 32};
+ __v4su __result;
#ifdef __LITTLE_ENDIAN__
- lshift = vec_splat ((__v4su) __B, 0);
+ __lshift = vec_splat((__v4su)__B, 0);
#else
- lshift = vec_splat ((__v4su) __B, 1);
+ __lshift = vec_splat((__v4su)__B, 1);
#endif
- shmask = vec_cmplt (lshift, shmax);
- result = vec_sl ((__v4su) __A, lshift);
- result = vec_sel ((__v4su) shmask, result, shmask);
+ __shmask = vec_cmplt(__lshift, __shmax);
+ __result = vec_sl((__v4su)__A, __lshift);
+ __result = vec_sel((__v4su)__shmask, __result, __shmask);
- return (__m128i) result;
+ return (__m128i)__result;
}
#ifdef _ARCH_PWR8
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sll_epi64 (__m128i __A, __m128i __B)
-{
- __v2du lshift;
- __vector __bool long long shmask;
- const __v2du shmax = { 64, 64 };
- __v2du result;
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sll_epi64(__m128i __A, __m128i __B) {
+ __v2du __lshift;
+ __vector __bool long long __shmask;
+ const __v2du __shmax = {64, 64};
+ __v2du __result;
- lshift = vec_splat ((__v2du) __B, 0);
- shmask = vec_cmplt (lshift, shmax);
- result = vec_sl ((__v2du) __A, lshift);
- result = (__v2du)vec_sel ((__v2df) shmask, (__v2df)result, shmask);
+ __lshift = vec_splat((__v2du)__B, 0);
+ __shmask = vec_cmplt(__lshift, __shmax);
+ __result = vec_sl((__v2du)__A, __lshift);
+ __result = vec_sel((__v2du)__shmask, __result, __shmask);
- return (__m128i) result;
+ return (__m128i)__result;
}
#endif
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sra_epi16 (__m128i __A, __m128i __B)
-{
- const __v8hu rshmax = { 15, 15, 15, 15, 15, 15, 15, 15 };
- __v8hu rshift;
- __v8hi result;
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sra_epi16(__m128i __A, __m128i __B) {
+ const __v8hu __rshmax = {15, 15, 15, 15, 15, 15, 15, 15};
+ __v8hu __rshift;
+ __v8hi __result;
#ifdef __LITTLE_ENDIAN__
- rshift = vec_splat ((__v8hu)__B, 0);
+ __rshift = vec_splat((__v8hu)__B, 0);
#else
- rshift = vec_splat ((__v8hu)__B, 3);
+ __rshift = vec_splat((__v8hu)__B, 3);
#endif
- rshift = vec_min (rshift, rshmax);
- result = vec_sra ((__v8hi) __A, rshift);
+ __rshift = vec_min(__rshift, __rshmax);
+ __result = vec_sra((__v8hi)__A, __rshift);
- return (__m128i) result;
+ return (__m128i)__result;
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sra_epi32 (__m128i __A, __m128i __B)
-{
- const __v4su rshmax = { 31, 31, 31, 31 };
- __v4su rshift;
- __v4si result;
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sra_epi32(__m128i __A, __m128i __B) {
+ const __v4su __rshmax = {31, 31, 31, 31};
+ __v4su __rshift;
+ __v4si __result;
#ifdef __LITTLE_ENDIAN__
- rshift = vec_splat ((__v4su)__B, 0);
+ __rshift = vec_splat((__v4su)__B, 0);
#else
- rshift = vec_splat ((__v4su)__B, 1);
+ __rshift = vec_splat((__v4su)__B, 1);
#endif
- rshift = vec_min (rshift, rshmax);
- result = vec_sra ((__v4si) __A, rshift);
+ __rshift = vec_min(__rshift, __rshmax);
+ __result = vec_sra((__v4si)__A, __rshift);
- return (__m128i) result;
+ return (__m128i)__result;
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_srl_epi16 (__m128i __A, __m128i __B)
-{
- __v8hu rshift;
- __vector __bool short shmask;
- const __v8hu shmax = { 15, 15, 15, 15, 15, 15, 15, 15 };
- __v8hu result;
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_srl_epi16(__m128i __A, __m128i __B) {
+ __v8hu __rshift;
+ __vector __bool short __shmask;
+ const __v8hu __shmax = {15, 15, 15, 15, 15, 15, 15, 15};
+ __v8hu __result;
#ifdef __LITTLE_ENDIAN__
- rshift = vec_splat ((__v8hu) __B, 0);
+ __rshift = vec_splat((__v8hu)__B, 0);
#else
- rshift = vec_splat ((__v8hu) __B, 3);
+ __rshift = vec_splat((__v8hu)__B, 3);
#endif
- shmask = vec_cmple (rshift, shmax);
- result = vec_sr ((__v8hu) __A, rshift);
- result = vec_sel ((__v8hu) shmask, result, shmask);
+ __shmask = vec_cmple(__rshift, __shmax);
+ __result = vec_sr((__v8hu)__A, __rshift);
+ __result = vec_sel((__v8hu)__shmask, __result, __shmask);
- return (__m128i) result;
+ return (__m128i)__result;
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_srl_epi32 (__m128i __A, __m128i __B)
-{
- __v4su rshift;
- __vector __bool int shmask;
- const __v4su shmax = { 32, 32, 32, 32 };
- __v4su result;
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_srl_epi32(__m128i __A, __m128i __B) {
+ __v4su __rshift;
+ __vector __bool int __shmask;
+ const __v4su __shmax = {32, 32, 32, 32};
+ __v4su __result;
#ifdef __LITTLE_ENDIAN__
- rshift = vec_splat ((__v4su) __B, 0);
+ __rshift = vec_splat((__v4su)__B, 0);
#else
- rshift = vec_splat ((__v4su) __B, 1);
+ __rshift = vec_splat((__v4su)__B, 1);
#endif
- shmask = vec_cmplt (rshift, shmax);
- result = vec_sr ((__v4su) __A, rshift);
- result = vec_sel ((__v4su) shmask, result, shmask);
+ __shmask = vec_cmplt(__rshift, __shmax);
+ __result = vec_sr((__v4su)__A, __rshift);
+ __result = vec_sel((__v4su)__shmask, __result, __shmask);
- return (__m128i) result;
+ return (__m128i)__result;
}
#ifdef _ARCH_PWR8
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_srl_epi64 (__m128i __A, __m128i __B)
-{
- __v2du rshift;
- __vector __bool long long shmask;
- const __v2du shmax = { 64, 64 };
- __v2du result;
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_srl_epi64(__m128i __A, __m128i __B) {
+ __v2du __rshift;
+ __vector __bool long long __shmask;
+ const __v2du __shmax = {64, 64};
+ __v2du __result;
- rshift = vec_splat ((__v2du) __B, 0);
- shmask = vec_cmplt (rshift, shmax);
- result = vec_sr ((__v2du) __A, rshift);
- result = (__v2du)vec_sel ((__v2df) shmask, (__v2df)result, shmask);
+ __rshift = vec_splat((__v2du)__B, 0);
+ __shmask = vec_cmplt(__rshift, __shmax);
+ __result = vec_sr((__v2du)__A, __rshift);
+ __result = vec_sel((__v2du)__shmask, __result, __shmask);
- return (__m128i) result;
+ return (__m128i)__result;
}
#endif
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_and_pd (__m128d __A, __m128d __B)
-{
- return (vec_and ((__v2df) __A, (__v2df) __B));
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_and_pd(__m128d __A, __m128d __B) {
+ return (vec_and((__v2df)__A, (__v2df)__B));
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_andnot_pd (__m128d __A, __m128d __B)
-{
- return (vec_andc ((__v2df) __B, (__v2df) __A));
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_andnot_pd(__m128d __A, __m128d __B) {
+ return (vec_andc((__v2df)__B, (__v2df)__A));
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_or_pd (__m128d __A, __m128d __B)
-{
- return (vec_or ((__v2df) __A, (__v2df) __B));
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_or_pd(__m128d __A, __m128d __B) {
+ return (vec_or((__v2df)__A, (__v2df)__B));
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_xor_pd (__m128d __A, __m128d __B)
-{
- return (vec_xor ((__v2df) __A, (__v2df) __B));
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_xor_pd(__m128d __A, __m128d __B) {
+ return (vec_xor((__v2df)__A, (__v2df)__B));
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_and_si128 (__m128i __A, __m128i __B)
-{
- return (__m128i)vec_and ((__v2di) __A, (__v2di) __B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_and_si128(__m128i __A, __m128i __B) {
+ return (__m128i)vec_and((__v2di)__A, (__v2di)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_andnot_si128 (__m128i __A, __m128i __B)
-{
- return (__m128i)vec_andc ((__v2di) __B, (__v2di) __A);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_andnot_si128(__m128i __A, __m128i __B) {
+ return (__m128i)vec_andc((__v2di)__B, (__v2di)__A);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_or_si128 (__m128i __A, __m128i __B)
-{
- return (__m128i)vec_or ((__v2di) __A, (__v2di) __B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_or_si128(__m128i __A, __m128i __B) {
+ return (__m128i)vec_or((__v2di)__A, (__v2di)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_xor_si128 (__m128i __A, __m128i __B)
-{
- return (__m128i)vec_xor ((__v2di) __A, (__v2di) __B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_xor_si128(__m128i __A, __m128i __B) {
+ return (__m128i)vec_xor((__v2di)__A, (__v2di)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpeq_epi8 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_cmpeq ((__v16qi) __A, (__v16qi)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpeq_epi8(__m128i __A, __m128i __B) {
+ return (__m128i)vec_cmpeq((__v16qi)__A, (__v16qi)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpeq_epi16 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_cmpeq ((__v8hi) __A, (__v8hi)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpeq_epi16(__m128i __A, __m128i __B) {
+ return (__m128i)vec_cmpeq((__v8hi)__A, (__v8hi)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpeq_epi32 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_cmpeq ((__v4si) __A, (__v4si)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpeq_epi32(__m128i __A, __m128i __B) {
+ return (__m128i)vec_cmpeq((__v4si)__A, (__v4si)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmplt_epi8 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_cmplt ((__v16qi) __A, (__v16qi)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmplt_epi8(__m128i __A, __m128i __B) {
+ return (__m128i)vec_cmplt((__v16qi)__A, (__v16qi)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmplt_epi16 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_cmplt ((__v8hi) __A, (__v8hi)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmplt_epi16(__m128i __A, __m128i __B) {
+ return (__m128i)vec_cmplt((__v8hi)__A, (__v8hi)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmplt_epi32 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_cmplt ((__v4si) __A, (__v4si)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmplt_epi32(__m128i __A, __m128i __B) {
+ return (__m128i)vec_cmplt((__v4si)__A, (__v4si)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpgt_epi8 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_cmpgt ((__v16qi) __A, (__v16qi)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpgt_epi8(__m128i __A, __m128i __B) {
+ return (__m128i)vec_cmpgt((__v16qi)__A, (__v16qi)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpgt_epi16 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_cmpgt ((__v8hi) __A, (__v8hi)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpgt_epi16(__m128i __A, __m128i __B) {
+ return (__m128i)vec_cmpgt((__v8hi)__A, (__v8hi)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpgt_epi32 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_cmpgt ((__v4si) __A, (__v4si)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpgt_epi32(__m128i __A, __m128i __B) {
+ return (__m128i)vec_cmpgt((__v4si)__A, (__v4si)__B);
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_extract_epi16 (__m128i const __A, int const __N)
-{
- return (unsigned short) ((__v8hi)__A)[__N & 7];
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_extract_epi16(__m128i const __A, int const __N) {
+ return (unsigned short)((__v8hi)__A)[__N & 7];
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_insert_epi16 (__m128i const __A, int const __D, int const __N)
-{
- __v8hi result = (__v8hi)__A;
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_insert_epi16(__m128i const __A, int const __D, int const __N) {
+ __v8hi __result = (__v8hi)__A;
- result [(__N & 7)] = __D;
+ __result[(__N & 7)] = __D;
- return (__m128i) result;
+ return (__m128i)__result;
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_max_epi16 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_max ((__v8hi)__A, (__v8hi)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_max_epi16(__m128i __A, __m128i __B) {
+ return (__m128i)vec_max((__v8hi)__A, (__v8hi)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_max_epu8 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_max ((__v16qu) __A, (__v16qu)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_max_epu8(__m128i __A, __m128i __B) {
+ return (__m128i)vec_max((__v16qu)__A, (__v16qu)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_min_epi16 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_min ((__v8hi) __A, (__v8hi)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_min_epi16(__m128i __A, __m128i __B) {
+ return (__m128i)vec_min((__v8hi)__A, (__v8hi)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_min_epu8 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_min ((__v16qu) __A, (__v16qu)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_min_epu8(__m128i __A, __m128i __B) {
+ return (__m128i)vec_min((__v16qu)__A, (__v16qu)__B);
}
-
#ifdef _ARCH_PWR8
/* Intrinsic functions that require PowerISA 2.07 minimum. */
-/* Creates a 4-bit mask from the most significant bits of the SPFP values. */
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_movemask_epi8 (__m128i __A)
-{
- __vector unsigned long long result;
- static const __vector unsigned char perm_mask =
- {
- 0x78, 0x70, 0x68, 0x60, 0x58, 0x50, 0x48, 0x40,
- 0x38, 0x30, 0x28, 0x20, 0x18, 0x10, 0x08, 0x00
- };
+/* Return a mask created from the most significant bit of each 8-bit
+ element in A. */
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_movemask_epi8(__m128i __A) {
+#ifdef _ARCH_PWR10
+ return vec_extractm((__v16qu)__A);
+#else
+ __vector unsigned long long __result;
+ static const __vector unsigned char __perm_mask = {
+ 0x78, 0x70, 0x68, 0x60, 0x58, 0x50, 0x48, 0x40,
+ 0x38, 0x30, 0x28, 0x20, 0x18, 0x10, 0x08, 0x00};
- result = ((__vector unsigned long long)
- vec_vbpermq ((__vector unsigned char) __A,
- (__vector unsigned char) perm_mask));
+ __result = ((__vector unsigned long long)vec_vbpermq(
+ (__vector unsigned char)__A, (__vector unsigned char)__perm_mask));
#ifdef __LITTLE_ENDIAN__
- return result[1];
+ return __result[1];
#else
- return result[0];
+ return __result[0];
#endif
+#endif /* !_ARCH_PWR10 */
}
#endif /* _ARCH_PWR8 */
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mulhi_epu16 (__m128i __A, __m128i __B)
-{
- __v4su w0, w1;
- __v16qu xform1 = {
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_mulhi_epu16(__m128i __A, __m128i __B) {
+ __v4su __w0, __w1;
+ __v16qu __xform1 = {
#ifdef __LITTLE_ENDIAN__
- 0x02, 0x03, 0x12, 0x13, 0x06, 0x07, 0x16, 0x17,
- 0x0A, 0x0B, 0x1A, 0x1B, 0x0E, 0x0F, 0x1E, 0x1F
+ 0x02, 0x03, 0x12, 0x13, 0x06, 0x07, 0x16, 0x17, 0x0A,
+ 0x0B, 0x1A, 0x1B, 0x0E, 0x0F, 0x1E, 0x1F
#else
- 0x00, 0x01, 0x10, 0x11, 0x04, 0x05, 0x14, 0x15,
- 0x08, 0x09, 0x18, 0x19, 0x0C, 0x0D, 0x1C, 0x1D
+ 0x00, 0x01, 0x10, 0x11, 0x04, 0x05, 0x14, 0x15, 0x08,
+ 0x09, 0x18, 0x19, 0x0C, 0x0D, 0x1C, 0x1D
#endif
- };
+ };
- w0 = vec_vmuleuh ((__v8hu)__A, (__v8hu)__B);
- w1 = vec_vmulouh ((__v8hu)__A, (__v8hu)__B);
- return (__m128i) vec_perm (w0, w1, xform1);
+ __w0 = vec_vmuleuh((__v8hu)__A, (__v8hu)__B);
+ __w1 = vec_vmulouh((__v8hu)__A, (__v8hu)__B);
+ return (__m128i)vec_perm(__w0, __w1, __xform1);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_shufflehi_epi16 (__m128i __A, const int __mask)
-{
- unsigned long element_selector_98 = __mask & 0x03;
- unsigned long element_selector_BA = (__mask >> 2) & 0x03;
- unsigned long element_selector_DC = (__mask >> 4) & 0x03;
- unsigned long element_selector_FE = (__mask >> 6) & 0x03;
- static const unsigned short permute_selectors[4] =
- {
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_shufflehi_epi16(__m128i __A, const int __mask) {
+ unsigned long __element_selector_98 = __mask & 0x03;
+ unsigned long __element_selector_BA = (__mask >> 2) & 0x03;
+ unsigned long __element_selector_DC = (__mask >> 4) & 0x03;
+ unsigned long __element_selector_FE = (__mask >> 6) & 0x03;
+ static const unsigned short __permute_selectors[4] = {
#ifdef __LITTLE_ENDIAN__
- 0x0908, 0x0B0A, 0x0D0C, 0x0F0E
+ 0x0908, 0x0B0A, 0x0D0C, 0x0F0E
#else
- 0x0809, 0x0A0B, 0x0C0D, 0x0E0F
+ 0x0809, 0x0A0B, 0x0C0D, 0x0E0F
#endif
- };
- __v2du pmask =
+ };
+ __v2du __pmask =
#ifdef __LITTLE_ENDIAN__
- { 0x1716151413121110UL, 0UL};
+ {0x1716151413121110UL, 0UL};
#else
- { 0x1011121314151617UL, 0UL};
+ {0x1011121314151617UL, 0UL};
#endif
- __m64_union t;
- __v2du a, r;
-
- t.as_short[0] = permute_selectors[element_selector_98];
- t.as_short[1] = permute_selectors[element_selector_BA];
- t.as_short[2] = permute_selectors[element_selector_DC];
- t.as_short[3] = permute_selectors[element_selector_FE];
- pmask[1] = t.as_m64;
- a = (__v2du)__A;
- r = vec_perm (a, a, (__vector unsigned char)pmask);
- return (__m128i) r;
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_shufflelo_epi16 (__m128i __A, const int __mask)
-{
- unsigned long element_selector_10 = __mask & 0x03;
- unsigned long element_selector_32 = (__mask >> 2) & 0x03;
- unsigned long element_selector_54 = (__mask >> 4) & 0x03;
- unsigned long element_selector_76 = (__mask >> 6) & 0x03;
- static const unsigned short permute_selectors[4] =
- {
+ __m64_union __t;
+ __v2du __a, __r;
+
+ __t.as_short[0] = __permute_selectors[__element_selector_98];
+ __t.as_short[1] = __permute_selectors[__element_selector_BA];
+ __t.as_short[2] = __permute_selectors[__element_selector_DC];
+ __t.as_short[3] = __permute_selectors[__element_selector_FE];
+ __pmask[1] = __t.as_m64;
+ __a = (__v2du)__A;
+ __r = vec_perm(__a, __a, (__vector unsigned char)__pmask);
+ return (__m128i)__r;
+}
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_shufflelo_epi16(__m128i __A, const int __mask) {
+ unsigned long __element_selector_10 = __mask & 0x03;
+ unsigned long __element_selector_32 = (__mask >> 2) & 0x03;
+ unsigned long __element_selector_54 = (__mask >> 4) & 0x03;
+ unsigned long __element_selector_76 = (__mask >> 6) & 0x03;
+ static const unsigned short __permute_selectors[4] = {
#ifdef __LITTLE_ENDIAN__
- 0x0100, 0x0302, 0x0504, 0x0706
+ 0x0100, 0x0302, 0x0504, 0x0706
#else
- 0x0001, 0x0203, 0x0405, 0x0607
+ 0x0001, 0x0203, 0x0405, 0x0607
#endif
- };
- __v2du pmask =
+ };
+ __v2du __pmask =
#ifdef __LITTLE_ENDIAN__
- { 0UL, 0x1f1e1d1c1b1a1918UL};
+ {0UL, 0x1f1e1d1c1b1a1918UL};
#else
- { 0UL, 0x18191a1b1c1d1e1fUL};
+ {0UL, 0x18191a1b1c1d1e1fUL};
#endif
- __m64_union t;
- __v2du a, r;
- t.as_short[0] = permute_selectors[element_selector_10];
- t.as_short[1] = permute_selectors[element_selector_32];
- t.as_short[2] = permute_selectors[element_selector_54];
- t.as_short[3] = permute_selectors[element_selector_76];
- pmask[0] = t.as_m64;
- a = (__v2du)__A;
- r = vec_perm (a, a, (__vector unsigned char)pmask);
- return (__m128i) r;
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_shuffle_epi32 (__m128i __A, const int __mask)
-{
- unsigned long element_selector_10 = __mask & 0x03;
- unsigned long element_selector_32 = (__mask >> 2) & 0x03;
- unsigned long element_selector_54 = (__mask >> 4) & 0x03;
- unsigned long element_selector_76 = (__mask >> 6) & 0x03;
- static const unsigned int permute_selectors[4] =
- {
+ __m64_union __t;
+ __v2du __a, __r;
+ __t.as_short[0] = __permute_selectors[__element_selector_10];
+ __t.as_short[1] = __permute_selectors[__element_selector_32];
+ __t.as_short[2] = __permute_selectors[__element_selector_54];
+ __t.as_short[3] = __permute_selectors[__element_selector_76];
+ __pmask[0] = __t.as_m64;
+ __a = (__v2du)__A;
+ __r = vec_perm(__a, __a, (__vector unsigned char)__pmask);
+ return (__m128i)__r;
+}
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_shuffle_epi32(__m128i __A, const int __mask) {
+ unsigned long __element_selector_10 = __mask & 0x03;
+ unsigned long __element_selector_32 = (__mask >> 2) & 0x03;
+ unsigned long __element_selector_54 = (__mask >> 4) & 0x03;
+ unsigned long __element_selector_76 = (__mask >> 6) & 0x03;
+ static const unsigned int __permute_selectors[4] = {
#ifdef __LITTLE_ENDIAN__
- 0x03020100, 0x07060504, 0x0B0A0908, 0x0F0E0D0C
+ 0x03020100, 0x07060504, 0x0B0A0908, 0x0F0E0D0C
#else
0x00010203, 0x04050607, 0x08090A0B, 0x0C0D0E0F
#endif
- };
- __v4su t;
-
- t[0] = permute_selectors[element_selector_10];
- t[1] = permute_selectors[element_selector_32];
- t[2] = permute_selectors[element_selector_54] + 0x10101010;
- t[3] = permute_selectors[element_selector_76] + 0x10101010;
- return (__m128i)vec_perm ((__v4si) __A, (__v4si)__A, (__vector unsigned char)t);
-}
-
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskmoveu_si128 (__m128i __A, __m128i __B, char *__C)
-{
- __v2du hibit = { 0x7f7f7f7f7f7f7f7fUL, 0x7f7f7f7f7f7f7f7fUL};
- __v16qu mask, tmp;
- __m128i_u *p = (__m128i_u*)__C;
-
- tmp = (__v16qu)_mm_loadu_si128(p);
- mask = (__v16qu)vec_cmpgt ((__v16qu)__B, (__v16qu)hibit);
- tmp = vec_sel (tmp, (__v16qu)__A, mask);
- _mm_storeu_si128 (p, (__m128i)tmp);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_avg_epu8 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_avg ((__v16qu)__A, (__v16qu)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_avg_epu16 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_avg ((__v8hu)__A, (__v8hu)__B);
-}
-
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sad_epu8 (__m128i __A, __m128i __B)
-{
- __v16qu a, b;
- __v16qu vmin, vmax, vabsdiff;
- __v4si vsum;
- const __v4su zero = { 0, 0, 0, 0 };
- __v4si result;
-
- a = (__v16qu) __A;
- b = (__v16qu) __B;
- vmin = vec_min (a, b);
- vmax = vec_max (a, b);
- vabsdiff = vec_sub (vmax, vmin);
+ };
+ __v4su __t;
+
+ __t[0] = __permute_selectors[__element_selector_10];
+ __t[1] = __permute_selectors[__element_selector_32];
+ __t[2] = __permute_selectors[__element_selector_54] + 0x10101010;
+ __t[3] = __permute_selectors[__element_selector_76] + 0x10101010;
+ return (__m128i)vec_perm((__v4si)__A, (__v4si)__A,
+ (__vector unsigned char)__t);
+}
+
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_maskmoveu_si128(__m128i __A, __m128i __B, char *__C) {
+ __v2du __hibit = {0x7f7f7f7f7f7f7f7fUL, 0x7f7f7f7f7f7f7f7fUL};
+ __v16qu __mask, __tmp;
+ __m128i_u *__p = (__m128i_u *)__C;
+
+ __tmp = (__v16qu)_mm_loadu_si128(__p);
+ __mask = (__v16qu)vec_cmpgt((__v16qu)__B, (__v16qu)__hibit);
+ __tmp = vec_sel(__tmp, (__v16qu)__A, __mask);
+ _mm_storeu_si128(__p, (__m128i)__tmp);
+}
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_avg_epu8(__m128i __A, __m128i __B) {
+ return (__m128i)vec_avg((__v16qu)__A, (__v16qu)__B);
+}
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_avg_epu16(__m128i __A, __m128i __B) {
+ return (__m128i)vec_avg((__v8hu)__A, (__v8hu)__B);
+}
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sad_epu8(__m128i __A, __m128i __B) {
+ __v16qu __a, __b;
+ __v16qu __vabsdiff;
+ __v4si __vsum;
+ const __v4su __zero = {0, 0, 0, 0};
+ __v4si __result;
+
+ __a = (__v16qu)__A;
+ __b = (__v16qu)__B;
+#ifndef _ARCH_PWR9
+ __v16qu __vmin = vec_min(__a, __b);
+ __v16qu __vmax = vec_max(__a, __b);
+ __vabsdiff = vec_sub(__vmax, __vmin);
+#else
+ __vabsdiff = vec_absd(__a, __b);
+#endif
/* Sum four groups of bytes into integers. */
- vsum = (__vector signed int) vec_sum4s (vabsdiff, zero);
- /* Sum across four integers with two integer results. */
- result = vec_sum2s (vsum, (__vector signed int) zero);
- /* Rotate the sums into the correct position. */
+ __vsum = (__vector signed int)vec_sum4s(__vabsdiff, __zero);
#ifdef __LITTLE_ENDIAN__
- result = vec_sld (result, result, 4);
+ /* Sum across four integers with two integer results. */
+ __asm__("vsum2sws %0,%1,%2" : "=v"(__result) : "v"(__vsum), "v"(__zero));
+ /* Note: vec_sum2s could be used here, but on little-endian, vector
+ shifts are added that are not needed for this use-case.
+ A vector shift to correctly position the 32-bit integer results
+ (currently at [0] and [2]) to [1] and [3] would then need to be
+ swapped back again since the desired results are two 64-bit
+ integers ([1]|[0] and [3]|[2]). Thus, no shift is performed. */
#else
- result = vec_sld (result, result, 6);
-#endif
+ /* Sum across four integers with two integer results. */
+ __result = vec_sum2s(__vsum, (__vector signed int)__zero);
/* Rotate the sums into the correct position. */
- return (__m128i) result;
+ __result = vec_sld(__result, __result, 6);
+#endif
+ return (__m128i)__result;
}
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_stream_si32 (int *__A, int __B)
-{
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_stream_si32(int *__A, int __B) {
/* Use the data cache block touch for store transient. */
- __asm__ (
- "dcbtstt 0,%0"
- :
- : "b" (__A)
- : "memory"
- );
+ __asm__("dcbtstt 0,%0" : : "b"(__A) : "memory");
*__A = __B;
}
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_stream_si64 (long long int *__A, long long int __B)
-{
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_stream_si64(long long int *__A, long long int __B) {
/* Use the data cache block touch for store transient. */
- __asm__ (
- " dcbtstt 0,%0"
- :
- : "b" (__A)
- : "memory"
- );
+ __asm__(" dcbtstt 0,%0" : : "b"(__A) : "memory");
*__A = __B;
}
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_stream_si128 (__m128i *__A, __m128i __B)
-{
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_stream_si128(__m128i *__A, __m128i __B) {
/* Use the data cache block touch for store transient. */
- __asm__ (
- "dcbtstt 0,%0"
- :
- : "b" (__A)
- : "memory"
- );
+ __asm__("dcbtstt 0,%0" : : "b"(__A) : "memory");
*__A = __B;
}
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_stream_pd (double *__A, __m128d __B)
-{
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_stream_pd(double *__A, __m128d __B) {
/* Use the data cache block touch for store transient. */
- __asm__ (
- "dcbtstt 0,%0"
- :
- : "b" (__A)
- : "memory"
- );
- *(__m128d*)__A = __B;
-}
-
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_clflush (void const *__A)
-{
+ __asm__("dcbtstt 0,%0" : : "b"(__A) : "memory");
+ *(__m128d *)__A = __B;
+}
+
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_clflush(void const *__A) {
/* Use the data cache block flush. */
- __asm__ (
- "dcbf 0,%0"
- :
- : "b" (__A)
- : "memory"
- );
+ __asm__("dcbf 0,%0" : : "b"(__A) : "memory");
}
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_lfence (void)
-{
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_lfence(void) {
/* Use light weight sync for load to load ordering. */
- __atomic_thread_fence (__ATOMIC_RELEASE);
+ __atomic_thread_fence(__ATOMIC_RELEASE);
}
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mfence (void)
-{
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_mfence(void) {
/* Use heavy weight sync for any to any ordering. */
- __atomic_thread_fence (__ATOMIC_SEQ_CST);
+ __atomic_thread_fence(__ATOMIC_SEQ_CST);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsi32_si128 (int __A)
-{
- return _mm_set_epi32 (0, 0, 0, __A);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtsi32_si128(int __A) {
+ return _mm_set_epi32(0, 0, 0, __A);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsi64_si128 (long long __A)
-{
- return __extension__ (__m128i)(__v2di){ __A, 0LL };
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtsi64_si128(long long __A) {
+ return __extension__(__m128i)(__v2di){__A, 0LL};
}
/* Microsoft intrinsic. */
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsi64x_si128 (long long __A)
-{
- return __extension__ (__m128i)(__v2di){ __A, 0LL };
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtsi64x_si128(long long __A) {
+ return __extension__(__m128i)(__v2di){__A, 0LL};
}
/* Casts between various SP, DP, INT vector types. Note that these do no
conversion of values, they just change the type. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_castpd_ps(__m128d __A)
-{
- return (__m128) __A;
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_castpd_ps(__m128d __A) {
+ return (__m128)__A;
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_castpd_si128(__m128d __A)
-{
- return (__m128i) __A;
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_castpd_si128(__m128d __A) {
+ return (__m128i)__A;
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_castps_pd(__m128 __A)
-{
- return (__m128d) __A;
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_castps_pd(__m128 __A) {
+ return (__m128d)__A;
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_castps_si128(__m128 __A)
-{
- return (__m128i) __A;
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_castps_si128(__m128 __A) {
+ return (__m128i)__A;
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_castsi128_ps(__m128i __A)
-{
- return (__m128) __A;
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_castsi128_ps(__m128i __A) {
+ return (__m128)__A;
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_castsi128_pd(__m128i __A)
-{
- return (__m128d) __A;
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_castsi128_pd(__m128i __A) {
+ return (__m128d)__A;
}
#else
#include_next <emmintrin.h>
-#endif /* defined(__linux__) && defined(__ppc64__) */
+#endif /* defined(__powerpc64__) && \
+ * (defined(__linux__) || defined(__FreeBSD__) || defined(_AIX)) */
#endif /* EMMINTRIN_H_ */
diff --git a/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/immintrin.h b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/immintrin.h
new file mode 100644
index 000000000000..c1ada9889d4a
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/immintrin.h
@@ -0,0 +1,27 @@
+/*===---- immintrin.h - Implementation of Intel intrinsics on PowerPC ------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef IMMINTRIN_H_
+#define IMMINTRIN_H_
+
+#include <x86gprintrin.h>
+
+#include <mmintrin.h>
+
+#include <xmmintrin.h>
+
+#include <emmintrin.h>
+
+#include <pmmintrin.h>
+
+#include <tmmintrin.h>
+
+#include <smmintrin.h>
+
+#endif /* IMMINTRIN_H_ */
diff --git a/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/mm_malloc.h b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/mm_malloc.h
index 24b14c8e07c0..7c1e625e44d5 100644
--- a/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/mm_malloc.h
+++ b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/mm_malloc.h
@@ -10,38 +10,33 @@
#ifndef _MM_MALLOC_H_INCLUDED
#define _MM_MALLOC_H_INCLUDED
-#if defined(__linux__) && defined(__ppc64__)
+#if defined(__powerpc64__) && \
+ (defined(__linux__) || defined(__FreeBSD__) || defined(_AIX))
#include <stdlib.h>
/* We can't depend on <stdlib.h> since the prototype of posix_memalign
may not be visible. */
#ifndef __cplusplus
-extern int posix_memalign (void **, size_t, size_t);
+extern int posix_memalign(void **, size_t, size_t);
#else
-extern "C" int posix_memalign (void **, size_t, size_t) throw ();
+extern "C" int posix_memalign(void **, size_t, size_t);
#endif
-static __inline void *
-_mm_malloc (size_t size, size_t alignment)
-{
+static __inline void *_mm_malloc(size_t __size, size_t __alignment) {
/* PowerPC64 ELF V2 ABI requires quadword alignment. */
- size_t vec_align = sizeof (__vector float);
- void *ptr;
+ size_t __vec_align = sizeof(__vector float);
+ void *__ptr;
- if (alignment < vec_align)
- alignment = vec_align;
- if (posix_memalign (&ptr, alignment, size) == 0)
- return ptr;
+ if (__alignment < __vec_align)
+ __alignment = __vec_align;
+ if (posix_memalign(&__ptr, __alignment, __size) == 0)
+ return __ptr;
else
return NULL;
}
-static __inline void
-_mm_free (void * ptr)
-{
- free (ptr);
-}
+static __inline void _mm_free(void *__ptr) { free(__ptr); }
#else
#include_next <mm_malloc.h>
diff --git a/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/mmintrin.h b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/mmintrin.h
index c55c44726f00..0be3af2b0bd7 100644
--- a/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/mmintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/mmintrin.h
@@ -35,7 +35,8 @@
#ifndef _MMINTRIN_H_INCLUDED
#define _MMINTRIN_H_INCLUDED
-#if defined(__linux__) && defined(__ppc64__)
+#if defined(__powerpc64__) && \
+ (defined(__linux__) || defined(__FreeBSD__) || defined(_AIX))
#include <altivec.h>
/* The Intel API is flexible enough that we must allow aliasing with other
@@ -149,17 +150,17 @@ extern __inline long long
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_packs_pi16(__m64 __m1, __m64 __m2) {
- __vector signed short vm1;
- __vector signed char vresult;
+ __vector signed short __vm1;
+ __vector signed char __vresult;
- vm1 = (__vector signed short)(__vector unsigned long long)
+ __vm1 = (__vector signed short)(__vector unsigned long long)
#ifdef __LITTLE_ENDIAN__
{__m1, __m2};
#else
{__m2, __m1};
#endif
- vresult = vec_packs(vm1, vm1);
- return (__m64)((__vector long long)vresult)[0];
+ __vresult = vec_packs(__vm1, __vm1);
+ return (__m64)((__vector long long)__vresult)[0];
}
extern __inline __m64
@@ -174,17 +175,17 @@ extern __inline __m64
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_packs_pi32(__m64 __m1, __m64 __m2) {
- __vector signed int vm1;
- __vector signed short vresult;
+ __vector signed int __vm1;
+ __vector signed short __vresult;
- vm1 = (__vector signed int)(__vector unsigned long long)
+ __vm1 = (__vector signed int)(__vector unsigned long long)
#ifdef __LITTLE_ENDIAN__
{__m1, __m2};
#else
{__m2, __m1};
#endif
- vresult = vec_packs(vm1, vm1);
- return (__m64)((__vector long long)vresult)[0];
+ __vresult = vec_packs(__vm1, __vm1);
+ return (__m64)((__vector long long)__vresult)[0];
}
extern __inline __m64
@@ -199,19 +200,20 @@ extern __inline __m64
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_packs_pu16(__m64 __m1, __m64 __m2) {
- __vector unsigned char r;
- __vector signed short vm1 = (__vector signed short)(__vector long long)
+ __vector unsigned char __r;
+ __vector signed short __vm1 = (__vector signed short)(__vector long long)
#ifdef __LITTLE_ENDIAN__
{__m1, __m2};
#else
{__m2, __m1};
#endif
const __vector signed short __zero = {0};
- __vector __bool short __select = vec_cmplt(vm1, __zero);
- r = vec_packs((__vector unsigned short)vm1, (__vector unsigned short)vm1);
- __vector __bool char packsel = vec_pack(__select, __select);
- r = vec_sel(r, (const __vector unsigned char)__zero, packsel);
- return (__m64)((__vector long long)r)[0];
+ __vector __bool short __select = vec_cmplt(__vm1, __zero);
+ __r =
+ vec_packs((__vector unsigned short)__vm1, (__vector unsigned short)__vm1);
+ __vector __bool char __packsel = vec_pack(__select, __select);
+ __r = vec_sel(__r, (const __vector unsigned char)__zero, __packsel);
+ return (__m64)((__vector long long)__r)[0];
}
extern __inline __m64
@@ -227,28 +229,28 @@ extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_unpackhi_pi8(__m64 __m1, __m64 __m2) {
#if _ARCH_PWR8
- __vector unsigned char a, b, c;
+ __vector unsigned char __a, __b, __c;
- a = (__vector unsigned char)vec_splats(__m1);
- b = (__vector unsigned char)vec_splats(__m2);
- c = vec_mergel(a, b);
- return (__m64)((__vector long long)c)[1];
+ __a = (__vector unsigned char)vec_splats(__m1);
+ __b = (__vector unsigned char)vec_splats(__m2);
+ __c = vec_mergel(__a, __b);
+ return (__m64)((__vector long long)__c)[1];
#else
- __m64_union m1, m2, res;
+ __m64_union __mu1, __mu2, __res;
- m1.as_m64 = __m1;
- m2.as_m64 = __m2;
+ __mu1.as_m64 = __m1;
+ __mu2.as_m64 = __m2;
- res.as_char[0] = m1.as_char[4];
- res.as_char[1] = m2.as_char[4];
- res.as_char[2] = m1.as_char[5];
- res.as_char[3] = m2.as_char[5];
- res.as_char[4] = m1.as_char[6];
- res.as_char[5] = m2.as_char[6];
- res.as_char[6] = m1.as_char[7];
- res.as_char[7] = m2.as_char[7];
+ __res.as_char[0] = __mu1.as_char[4];
+ __res.as_char[1] = __mu2.as_char[4];
+ __res.as_char[2] = __mu1.as_char[5];
+ __res.as_char[3] = __mu2.as_char[5];
+ __res.as_char[4] = __mu1.as_char[6];
+ __res.as_char[5] = __mu2.as_char[6];
+ __res.as_char[6] = __mu1.as_char[7];
+ __res.as_char[7] = __mu2.as_char[7];
- return (__m64)res.as_m64;
+ return (__m64)__res.as_m64;
#endif
}
@@ -263,17 +265,17 @@ extern __inline __m64
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_unpackhi_pi16(__m64 __m1, __m64 __m2) {
- __m64_union m1, m2, res;
+ __m64_union __mu1, __mu2, __res;
- m1.as_m64 = __m1;
- m2.as_m64 = __m2;
+ __mu1.as_m64 = __m1;
+ __mu2.as_m64 = __m2;
- res.as_short[0] = m1.as_short[2];
- res.as_short[1] = m2.as_short[2];
- res.as_short[2] = m1.as_short[3];
- res.as_short[3] = m2.as_short[3];
+ __res.as_short[0] = __mu1.as_short[2];
+ __res.as_short[1] = __mu2.as_short[2];
+ __res.as_short[2] = __mu1.as_short[3];
+ __res.as_short[3] = __mu2.as_short[3];
- return (__m64)res.as_m64;
+ return (__m64)__res.as_m64;
}
extern __inline __m64
@@ -286,15 +288,15 @@ extern __inline __m64
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_unpackhi_pi32(__m64 __m1, __m64 __m2) {
- __m64_union m1, m2, res;
+ __m64_union __mu1, __mu2, __res;
- m1.as_m64 = __m1;
- m2.as_m64 = __m2;
+ __mu1.as_m64 = __m1;
+ __mu2.as_m64 = __m2;
- res.as_int[0] = m1.as_int[1];
- res.as_int[1] = m2.as_int[1];
+ __res.as_int[0] = __mu1.as_int[1];
+ __res.as_int[1] = __mu2.as_int[1];
- return (__m64)res.as_m64;
+ return (__m64)__res.as_m64;
}
extern __inline __m64
@@ -308,28 +310,28 @@ extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_unpacklo_pi8(__m64 __m1, __m64 __m2) {
#if _ARCH_PWR8
- __vector unsigned char a, b, c;
+ __vector unsigned char __a, __b, __c;
- a = (__vector unsigned char)vec_splats(__m1);
- b = (__vector unsigned char)vec_splats(__m2);
- c = vec_mergel(a, b);
- return (__m64)((__vector long long)c)[0];
+ __a = (__vector unsigned char)vec_splats(__m1);
+ __b = (__vector unsigned char)vec_splats(__m2);
+ __c = vec_mergel(__a, __b);
+ return (__m64)((__vector long long)__c)[0];
#else
- __m64_union m1, m2, res;
+ __m64_union __mu1, __mu2, __res;
- m1.as_m64 = __m1;
- m2.as_m64 = __m2;
+ __mu1.as_m64 = __m1;
+ __mu2.as_m64 = __m2;
- res.as_char[0] = m1.as_char[0];
- res.as_char[1] = m2.as_char[0];
- res.as_char[2] = m1.as_char[1];
- res.as_char[3] = m2.as_char[1];
- res.as_char[4] = m1.as_char[2];
- res.as_char[5] = m2.as_char[2];
- res.as_char[6] = m1.as_char[3];
- res.as_char[7] = m2.as_char[3];
+ __res.as_char[0] = __mu1.as_char[0];
+ __res.as_char[1] = __mu2.as_char[0];
+ __res.as_char[2] = __mu1.as_char[1];
+ __res.as_char[3] = __mu2.as_char[1];
+ __res.as_char[4] = __mu1.as_char[2];
+ __res.as_char[5] = __mu2.as_char[2];
+ __res.as_char[6] = __mu1.as_char[3];
+ __res.as_char[7] = __mu2.as_char[3];
- return (__m64)res.as_m64;
+ return (__m64)__res.as_m64;
#endif
}
@@ -343,17 +345,17 @@ extern __inline __m64
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_unpacklo_pi16(__m64 __m1, __m64 __m2) {
- __m64_union m1, m2, res;
+ __m64_union __mu1, __mu2, __res;
- m1.as_m64 = __m1;
- m2.as_m64 = __m2;
+ __mu1.as_m64 = __m1;
+ __mu2.as_m64 = __m2;
- res.as_short[0] = m1.as_short[0];
- res.as_short[1] = m2.as_short[0];
- res.as_short[2] = m1.as_short[1];
- res.as_short[3] = m2.as_short[1];
+ __res.as_short[0] = __mu1.as_short[0];
+ __res.as_short[1] = __mu2.as_short[0];
+ __res.as_short[2] = __mu1.as_short[1];
+ __res.as_short[3] = __mu2.as_short[1];
- return (__m64)res.as_m64;
+ return (__m64)__res.as_m64;
}
extern __inline __m64
@@ -367,15 +369,15 @@ extern __inline __m64
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_unpacklo_pi32(__m64 __m1, __m64 __m2) {
- __m64_union m1, m2, res;
+ __m64_union __mu1, __mu2, __res;
- m1.as_m64 = __m1;
- m2.as_m64 = __m2;
+ __mu1.as_m64 = __m1;
+ __mu2.as_m64 = __m2;
- res.as_int[0] = m1.as_int[0];
- res.as_int[1] = m2.as_int[0];
+ __res.as_int[0] = __mu1.as_int[0];
+ __res.as_int[1] = __mu2.as_int[0];
- return (__m64)res.as_m64;
+ return (__m64)__res.as_m64;
}
extern __inline __m64
@@ -389,28 +391,28 @@ extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_add_pi8(__m64 __m1, __m64 __m2) {
#if _ARCH_PWR8
- __vector signed char a, b, c;
+ __vector signed char __a, __b, __c;
- a = (__vector signed char)vec_splats(__m1);
- b = (__vector signed char)vec_splats(__m2);
- c = vec_add(a, b);
- return (__m64)((__vector long long)c)[0];
+ __a = (__vector signed char)vec_splats(__m1);
+ __b = (__vector signed char)vec_splats(__m2);
+ __c = vec_add(__a, __b);
+ return (__m64)((__vector long long)__c)[0];
#else
- __m64_union m1, m2, res;
+ __m64_union __mu1, __mu2, __res;
- m1.as_m64 = __m1;
- m2.as_m64 = __m2;
+ __mu1.as_m64 = __m1;
+ __mu2.as_m64 = __m2;
- res.as_char[0] = m1.as_char[0] + m2.as_char[0];
- res.as_char[1] = m1.as_char[1] + m2.as_char[1];
- res.as_char[2] = m1.as_char[2] + m2.as_char[2];
- res.as_char[3] = m1.as_char[3] + m2.as_char[3];
- res.as_char[4] = m1.as_char[4] + m2.as_char[4];
- res.as_char[5] = m1.as_char[5] + m2.as_char[5];
- res.as_char[6] = m1.as_char[6] + m2.as_char[6];
- res.as_char[7] = m1.as_char[7] + m2.as_char[7];
+ __res.as_char[0] = __mu1.as_char[0] + __mu2.as_char[0];
+ __res.as_char[1] = __mu1.as_char[1] + __mu2.as_char[1];
+ __res.as_char[2] = __mu1.as_char[2] + __mu2.as_char[2];
+ __res.as_char[3] = __mu1.as_char[3] + __mu2.as_char[3];
+ __res.as_char[4] = __mu1.as_char[4] + __mu2.as_char[4];
+ __res.as_char[5] = __mu1.as_char[5] + __mu2.as_char[5];
+ __res.as_char[6] = __mu1.as_char[6] + __mu2.as_char[6];
+ __res.as_char[7] = __mu1.as_char[7] + __mu2.as_char[7];
- return (__m64)res.as_m64;
+ return (__m64)__res.as_m64;
#endif
}
@@ -425,24 +427,24 @@ extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_add_pi16(__m64 __m1, __m64 __m2) {
#if _ARCH_PWR8
- __vector signed short a, b, c;
+ __vector signed short __a, __b, __c;
- a = (__vector signed short)vec_splats(__m1);
- b = (__vector signed short)vec_splats(__m2);
- c = vec_add(a, b);
- return (__m64)((__vector long long)c)[0];
+ __a = (__vector signed short)vec_splats(__m1);
+ __b = (__vector signed short)vec_splats(__m2);
+ __c = vec_add(__a, __b);
+ return (__m64)((__vector long long)__c)[0];
#else
- __m64_union m1, m2, res;
+ __m64_union __mu1, __mu2, __res;
- m1.as_m64 = __m1;
- m2.as_m64 = __m2;
+ __mu1.as_m64 = __m1;
+ __mu2.as_m64 = __m2;
- res.as_short[0] = m1.as_short[0] + m2.as_short[0];
- res.as_short[1] = m1.as_short[1] + m2.as_short[1];
- res.as_short[2] = m1.as_short[2] + m2.as_short[2];
- res.as_short[3] = m1.as_short[3] + m2.as_short[3];
+ __res.as_short[0] = __mu1.as_short[0] + __mu2.as_short[0];
+ __res.as_short[1] = __mu1.as_short[1] + __mu2.as_short[1];
+ __res.as_short[2] = __mu1.as_short[2] + __mu2.as_short[2];
+ __res.as_short[3] = __mu1.as_short[3] + __mu2.as_short[3];
- return (__m64)res.as_m64;
+ return (__m64)__res.as_m64;
#endif
}
@@ -457,22 +459,22 @@ extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_add_pi32(__m64 __m1, __m64 __m2) {
#if _ARCH_PWR9
- __vector signed int a, b, c;
+ __vector signed int __a, __b, __c;
- a = (__vector signed int)vec_splats(__m1);
- b = (__vector signed int)vec_splats(__m2);
- c = vec_add(a, b);
- return (__m64)((__vector long long)c)[0];
+ __a = (__vector signed int)vec_splats(__m1);
+ __b = (__vector signed int)vec_splats(__m2);
+ __c = vec_add(__a, __b);
+ return (__m64)((__vector long long)__c)[0];
#else
- __m64_union m1, m2, res;
+ __m64_union __mu1, __mu2, __res;
- m1.as_m64 = __m1;
- m2.as_m64 = __m2;
+ __mu1.as_m64 = __m1;
+ __mu2.as_m64 = __m2;
- res.as_int[0] = m1.as_int[0] + m2.as_int[0];
- res.as_int[1] = m1.as_int[1] + m2.as_int[1];
+ __res.as_int[0] = __mu1.as_int[0] + __mu2.as_int[0];
+ __res.as_int[1] = __mu1.as_int[1] + __mu2.as_int[1];
- return (__m64)res.as_m64;
+ return (__m64)__res.as_m64;
#endif
}
@@ -487,28 +489,28 @@ extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_sub_pi8(__m64 __m1, __m64 __m2) {
#if _ARCH_PWR8
- __vector signed char a, b, c;
+ __vector signed char __a, __b, __c;
- a = (__vector signed char)vec_splats(__m1);
- b = (__vector signed char)vec_splats(__m2);
- c = vec_sub(a, b);
- return (__m64)((__vector long long)c)[0];
+ __a = (__vector signed char)vec_splats(__m1);
+ __b = (__vector signed char)vec_splats(__m2);
+ __c = vec_sub(__a, __b);
+ return (__m64)((__vector long long)__c)[0];
#else
- __m64_union m1, m2, res;
+ __m64_union __mu1, __mu2, __res;
- m1.as_m64 = __m1;
- m2.as_m64 = __m2;
+ __mu1.as_m64 = __m1;
+ __mu2.as_m64 = __m2;
- res.as_char[0] = m1.as_char[0] - m2.as_char[0];
- res.as_char[1] = m1.as_char[1] - m2.as_char[1];
- res.as_char[2] = m1.as_char[2] - m2.as_char[2];
- res.as_char[3] = m1.as_char[3] - m2.as_char[3];
- res.as_char[4] = m1.as_char[4] - m2.as_char[4];
- res.as_char[5] = m1.as_char[5] - m2.as_char[5];
- res.as_char[6] = m1.as_char[6] - m2.as_char[6];
- res.as_char[7] = m1.as_char[7] - m2.as_char[7];
+ __res.as_char[0] = __mu1.as_char[0] - __mu2.as_char[0];
+ __res.as_char[1] = __mu1.as_char[1] - __mu2.as_char[1];
+ __res.as_char[2] = __mu1.as_char[2] - __mu2.as_char[2];
+ __res.as_char[3] = __mu1.as_char[3] - __mu2.as_char[3];
+ __res.as_char[4] = __mu1.as_char[4] - __mu2.as_char[4];
+ __res.as_char[5] = __mu1.as_char[5] - __mu2.as_char[5];
+ __res.as_char[6] = __mu1.as_char[6] - __mu2.as_char[6];
+ __res.as_char[7] = __mu1.as_char[7] - __mu2.as_char[7];
- return (__m64)res.as_m64;
+ return (__m64)__res.as_m64;
#endif
}
@@ -523,24 +525,24 @@ extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_sub_pi16(__m64 __m1, __m64 __m2) {
#if _ARCH_PWR8
- __vector signed short a, b, c;
+ __vector signed short __a, __b, __c;
- a = (__vector signed short)vec_splats(__m1);
- b = (__vector signed short)vec_splats(__m2);
- c = vec_sub(a, b);
- return (__m64)((__vector long long)c)[0];
+ __a = (__vector signed short)vec_splats(__m1);
+ __b = (__vector signed short)vec_splats(__m2);
+ __c = vec_sub(__a, __b);
+ return (__m64)((__vector long long)__c)[0];
#else
- __m64_union m1, m2, res;
+ __m64_union __mu1, __mu2, __res;
- m1.as_m64 = __m1;
- m2.as_m64 = __m2;
+ __mu1.as_m64 = __m1;
+ __mu2.as_m64 = __m2;
- res.as_short[0] = m1.as_short[0] - m2.as_short[0];
- res.as_short[1] = m1.as_short[1] - m2.as_short[1];
- res.as_short[2] = m1.as_short[2] - m2.as_short[2];
- res.as_short[3] = m1.as_short[3] - m2.as_short[3];
+ __res.as_short[0] = __mu1.as_short[0] - __mu2.as_short[0];
+ __res.as_short[1] = __mu1.as_short[1] - __mu2.as_short[1];
+ __res.as_short[2] = __mu1.as_short[2] - __mu2.as_short[2];
+ __res.as_short[3] = __mu1.as_short[3] - __mu2.as_short[3];
- return (__m64)res.as_m64;
+ return (__m64)__res.as_m64;
#endif
}
@@ -555,22 +557,22 @@ extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_sub_pi32(__m64 __m1, __m64 __m2) {
#if _ARCH_PWR9
- __vector signed int a, b, c;
+ __vector signed int __a, __b, __c;
- a = (__vector signed int)vec_splats(__m1);
- b = (__vector signed int)vec_splats(__m2);
- c = vec_sub(a, b);
- return (__m64)((__vector long long)c)[0];
+ __a = (__vector signed int)vec_splats(__m1);
+ __b = (__vector signed int)vec_splats(__m2);
+ __c = vec_sub(__a, __b);
+ return (__m64)((__vector long long)__c)[0];
#else
- __m64_union m1, m2, res;
+ __m64_union __mu1, __mu2, __res;
- m1.as_m64 = __m1;
- m2.as_m64 = __m2;
+ __mu1.as_m64 = __m1;
+ __mu2.as_m64 = __m2;
- res.as_int[0] = m1.as_int[0] - m2.as_int[0];
- res.as_int[1] = m1.as_int[1] - m2.as_int[1];
+ __res.as_int[0] = __mu1.as_int[0] - __mu2.as_int[0];
+ __res.as_int[1] = __mu1.as_int[1] - __mu2.as_int[1];
- return (__m64)res.as_m64;
+ return (__m64)__res.as_m64;
#endif
}
@@ -708,25 +710,25 @@ extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_cmpeq_pi8(__m64 __m1, __m64 __m2) {
#if defined(_ARCH_PWR6) && defined(__powerpc64__)
- __m64 res;
- __asm__("cmpb %0,%1,%2;\n" : "=r"(res) : "r"(__m1), "r"(__m2) :);
- return (res);
+ __m64 __res;
+ __asm__("cmpb %0,%1,%2;\n" : "=r"(__res) : "r"(__m1), "r"(__m2) :);
+ return (__res);
#else
- __m64_union m1, m2, res;
+ __m64_union __mu1, __mu2, __res;
- m1.as_m64 = __m1;
- m2.as_m64 = __m2;
+ __mu1.as_m64 = __m1;
+ __mu2.as_m64 = __m2;
- res.as_char[0] = (m1.as_char[0] == m2.as_char[0]) ? -1 : 0;
- res.as_char[1] = (m1.as_char[1] == m2.as_char[1]) ? -1 : 0;
- res.as_char[2] = (m1.as_char[2] == m2.as_char[2]) ? -1 : 0;
- res.as_char[3] = (m1.as_char[3] == m2.as_char[3]) ? -1 : 0;
- res.as_char[4] = (m1.as_char[4] == m2.as_char[4]) ? -1 : 0;
- res.as_char[5] = (m1.as_char[5] == m2.as_char[5]) ? -1 : 0;
- res.as_char[6] = (m1.as_char[6] == m2.as_char[6]) ? -1 : 0;
- res.as_char[7] = (m1.as_char[7] == m2.as_char[7]) ? -1 : 0;
+ __res.as_char[0] = (__mu1.as_char[0] == __mu2.as_char[0]) ? -1 : 0;
+ __res.as_char[1] = (__mu1.as_char[1] == __mu2.as_char[1]) ? -1 : 0;
+ __res.as_char[2] = (__mu1.as_char[2] == __mu2.as_char[2]) ? -1 : 0;
+ __res.as_char[3] = (__mu1.as_char[3] == __mu2.as_char[3]) ? -1 : 0;
+ __res.as_char[4] = (__mu1.as_char[4] == __mu2.as_char[4]) ? -1 : 0;
+ __res.as_char[5] = (__mu1.as_char[5] == __mu2.as_char[5]) ? -1 : 0;
+ __res.as_char[6] = (__mu1.as_char[6] == __mu2.as_char[6]) ? -1 : 0;
+ __res.as_char[7] = (__mu1.as_char[7] == __mu2.as_char[7]) ? -1 : 0;
- return (__m64)res.as_m64;
+ return (__m64)__res.as_m64;
#endif
}
@@ -740,28 +742,28 @@ extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_cmpgt_pi8(__m64 __m1, __m64 __m2) {
#if _ARCH_PWR8
- __vector signed char a, b, c;
+ __vector signed char __a, __b, __c;
- a = (__vector signed char)vec_splats(__m1);
- b = (__vector signed char)vec_splats(__m2);
- c = (__vector signed char)vec_cmpgt(a, b);
- return (__m64)((__vector long long)c)[0];
+ __a = (__vector signed char)vec_splats(__m1);
+ __b = (__vector signed char)vec_splats(__m2);
+ __c = (__vector signed char)vec_cmpgt(__a, __b);
+ return (__m64)((__vector long long)__c)[0];
#else
- __m64_union m1, m2, res;
+ __m64_union __mu1, __mu2, __res;
- m1.as_m64 = __m1;
- m2.as_m64 = __m2;
+ __mu1.as_m64 = __m1;
+ __mu2.as_m64 = __m2;
- res.as_char[0] = (m1.as_char[0] > m2.as_char[0]) ? -1 : 0;
- res.as_char[1] = (m1.as_char[1] > m2.as_char[1]) ? -1 : 0;
- res.as_char[2] = (m1.as_char[2] > m2.as_char[2]) ? -1 : 0;
- res.as_char[3] = (m1.as_char[3] > m2.as_char[3]) ? -1 : 0;
- res.as_char[4] = (m1.as_char[4] > m2.as_char[4]) ? -1 : 0;
- res.as_char[5] = (m1.as_char[5] > m2.as_char[5]) ? -1 : 0;
- res.as_char[6] = (m1.as_char[6] > m2.as_char[6]) ? -1 : 0;
- res.as_char[7] = (m1.as_char[7] > m2.as_char[7]) ? -1 : 0;
+ __res.as_char[0] = (__mu1.as_char[0] > __mu2.as_char[0]) ? -1 : 0;
+ __res.as_char[1] = (__mu1.as_char[1] > __mu2.as_char[1]) ? -1 : 0;
+ __res.as_char[2] = (__mu1.as_char[2] > __mu2.as_char[2]) ? -1 : 0;
+ __res.as_char[3] = (__mu1.as_char[3] > __mu2.as_char[3]) ? -1 : 0;
+ __res.as_char[4] = (__mu1.as_char[4] > __mu2.as_char[4]) ? -1 : 0;
+ __res.as_char[5] = (__mu1.as_char[5] > __mu2.as_char[5]) ? -1 : 0;
+ __res.as_char[6] = (__mu1.as_char[6] > __mu2.as_char[6]) ? -1 : 0;
+ __res.as_char[7] = (__mu1.as_char[7] > __mu2.as_char[7]) ? -1 : 0;
- return (__m64)res.as_m64;
+ return (__m64)__res.as_m64;
#endif
}
@@ -777,24 +779,24 @@ extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_cmpeq_pi16(__m64 __m1, __m64 __m2) {
#if _ARCH_PWR8
- __vector signed short a, b, c;
+ __vector signed short __a, __b, __c;
- a = (__vector signed short)vec_splats(__m1);
- b = (__vector signed short)vec_splats(__m2);
- c = (__vector signed short)vec_cmpeq(a, b);
- return (__m64)((__vector long long)c)[0];
+ __a = (__vector signed short)vec_splats(__m1);
+ __b = (__vector signed short)vec_splats(__m2);
+ __c = (__vector signed short)vec_cmpeq(__a, __b);
+ return (__m64)((__vector long long)__c)[0];
#else
- __m64_union m1, m2, res;
+ __m64_union __mu1, __mu2, __res;
- m1.as_m64 = __m1;
- m2.as_m64 = __m2;
+ __mu1.as_m64 = __m1;
+ __mu2.as_m64 = __m2;
- res.as_short[0] = (m1.as_short[0] == m2.as_short[0]) ? -1 : 0;
- res.as_short[1] = (m1.as_short[1] == m2.as_short[1]) ? -1 : 0;
- res.as_short[2] = (m1.as_short[2] == m2.as_short[2]) ? -1 : 0;
- res.as_short[3] = (m1.as_short[3] == m2.as_short[3]) ? -1 : 0;
+ __res.as_short[0] = (__mu1.as_short[0] == __mu2.as_short[0]) ? -1 : 0;
+ __res.as_short[1] = (__mu1.as_short[1] == __mu2.as_short[1]) ? -1 : 0;
+ __res.as_short[2] = (__mu1.as_short[2] == __mu2.as_short[2]) ? -1 : 0;
+ __res.as_short[3] = (__mu1.as_short[3] == __mu2.as_short[3]) ? -1 : 0;
- return (__m64)res.as_m64;
+ return (__m64)__res.as_m64;
#endif
}
@@ -808,24 +810,24 @@ extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_cmpgt_pi16(__m64 __m1, __m64 __m2) {
#if _ARCH_PWR8
- __vector signed short a, b, c;
+ __vector signed short __a, __b, __c;
- a = (__vector signed short)vec_splats(__m1);
- b = (__vector signed short)vec_splats(__m2);
- c = (__vector signed short)vec_cmpgt(a, b);
- return (__m64)((__vector long long)c)[0];
+ __a = (__vector signed short)vec_splats(__m1);
+ __b = (__vector signed short)vec_splats(__m2);
+ __c = (__vector signed short)vec_cmpgt(__a, __b);
+ return (__m64)((__vector long long)__c)[0];
#else
- __m64_union m1, m2, res;
+ __m64_union __mu1, __mu2, __res;
- m1.as_m64 = __m1;
- m2.as_m64 = __m2;
+ __mu1.as_m64 = __m1;
+ __mu2.as_m64 = __m2;
- res.as_short[0] = (m1.as_short[0] > m2.as_short[0]) ? -1 : 0;
- res.as_short[1] = (m1.as_short[1] > m2.as_short[1]) ? -1 : 0;
- res.as_short[2] = (m1.as_short[2] > m2.as_short[2]) ? -1 : 0;
- res.as_short[3] = (m1.as_short[3] > m2.as_short[3]) ? -1 : 0;
+ __res.as_short[0] = (__mu1.as_short[0] > __mu2.as_short[0]) ? -1 : 0;
+ __res.as_short[1] = (__mu1.as_short[1] > __mu2.as_short[1]) ? -1 : 0;
+ __res.as_short[2] = (__mu1.as_short[2] > __mu2.as_short[2]) ? -1 : 0;
+ __res.as_short[3] = (__mu1.as_short[3] > __mu2.as_short[3]) ? -1 : 0;
- return (__m64)res.as_m64;
+ return (__m64)__res.as_m64;
#endif
}
@@ -841,22 +843,22 @@ extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_cmpeq_pi32(__m64 __m1, __m64 __m2) {
#if _ARCH_PWR9
- __vector signed int a, b, c;
+ __vector signed int __a, __b, __c;
- a = (__vector signed int)vec_splats(__m1);
- b = (__vector signed int)vec_splats(__m2);
- c = (__vector signed int)vec_cmpeq(a, b);
- return (__m64)((__vector long long)c)[0];
+ __a = (__vector signed int)vec_splats(__m1);
+ __b = (__vector signed int)vec_splats(__m2);
+ __c = (__vector signed int)vec_cmpeq(__a, __b);
+ return (__m64)((__vector long long)__c)[0];
#else
- __m64_union m1, m2, res;
+ __m64_union __mu1, __mu2, __res;
- m1.as_m64 = __m1;
- m2.as_m64 = __m2;
+ __mu1.as_m64 = __m1;
+ __mu2.as_m64 = __m2;
- res.as_int[0] = (m1.as_int[0] == m2.as_int[0]) ? -1 : 0;
- res.as_int[1] = (m1.as_int[1] == m2.as_int[1]) ? -1 : 0;
+ __res.as_int[0] = (__mu1.as_int[0] == __mu2.as_int[0]) ? -1 : 0;
+ __res.as_int[1] = (__mu1.as_int[1] == __mu2.as_int[1]) ? -1 : 0;
- return (__m64)res.as_m64;
+ return (__m64)__res.as_m64;
#endif
}
@@ -870,22 +872,22 @@ extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_cmpgt_pi32(__m64 __m1, __m64 __m2) {
#if _ARCH_PWR9
- __vector signed int a, b, c;
+ __vector signed int __a, __b, __c;
- a = (__vector signed int)vec_splats(__m1);
- b = (__vector signed int)vec_splats(__m2);
- c = (__vector signed int)vec_cmpgt(a, b);
- return (__m64)((__vector long long)c)[0];
+ __a = (__vector signed int)vec_splats(__m1);
+ __b = (__vector signed int)vec_splats(__m2);
+ __c = (__vector signed int)vec_cmpgt(__a, __b);
+ return (__m64)((__vector long long)__c)[0];
#else
- __m64_union m1, m2, res;
+ __m64_union __mu1, __mu2, __res;
- m1.as_m64 = __m1;
- m2.as_m64 = __m2;
+ __mu1.as_m64 = __m1;
+ __mu2.as_m64 = __m2;
- res.as_int[0] = (m1.as_int[0] > m2.as_int[0]) ? -1 : 0;
- res.as_int[1] = (m1.as_int[1] > m2.as_int[1]) ? -1 : 0;
+ __res.as_int[0] = (__mu1.as_int[0] > __mu2.as_int[0]) ? -1 : 0;
+ __res.as_int[1] = (__mu1.as_int[1] > __mu2.as_int[1]) ? -1 : 0;
- return (__m64)res.as_m64;
+ return (__m64)__res.as_m64;
#endif
}
@@ -901,12 +903,12 @@ extern __inline __m64
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_adds_pi8(__m64 __m1, __m64 __m2) {
- __vector signed char a, b, c;
+ __vector signed char __a, __b, __c;
- a = (__vector signed char)vec_splats(__m1);
- b = (__vector signed char)vec_splats(__m2);
- c = vec_adds(a, b);
- return (__m64)((__vector long long)c)[0];
+ __a = (__vector signed char)vec_splats(__m1);
+ __b = (__vector signed char)vec_splats(__m2);
+ __c = vec_adds(__a, __b);
+ return (__m64)((__vector long long)__c)[0];
}
extern __inline __m64
@@ -919,12 +921,12 @@ extern __inline __m64
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_adds_pi16(__m64 __m1, __m64 __m2) {
- __vector signed short a, b, c;
+ __vector signed short __a, __b, __c;
- a = (__vector signed short)vec_splats(__m1);
- b = (__vector signed short)vec_splats(__m2);
- c = vec_adds(a, b);
- return (__m64)((__vector long long)c)[0];
+ __a = (__vector signed short)vec_splats(__m1);
+ __b = (__vector signed short)vec_splats(__m2);
+ __c = vec_adds(__a, __b);
+ return (__m64)((__vector long long)__c)[0];
}
extern __inline __m64
@@ -937,12 +939,12 @@ extern __inline __m64
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_adds_pu8(__m64 __m1, __m64 __m2) {
- __vector unsigned char a, b, c;
+ __vector unsigned char __a, __b, __c;
- a = (__vector unsigned char)vec_splats(__m1);
- b = (__vector unsigned char)vec_splats(__m2);
- c = vec_adds(a, b);
- return (__m64)((__vector long long)c)[0];
+ __a = (__vector unsigned char)vec_splats(__m1);
+ __b = (__vector unsigned char)vec_splats(__m2);
+ __c = vec_adds(__a, __b);
+ return (__m64)((__vector long long)__c)[0];
}
extern __inline __m64
@@ -956,12 +958,12 @@ extern __inline __m64
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_adds_pu16(__m64 __m1, __m64 __m2) {
- __vector unsigned short a, b, c;
+ __vector unsigned short __a, __b, __c;
- a = (__vector unsigned short)vec_splats(__m1);
- b = (__vector unsigned short)vec_splats(__m2);
- c = vec_adds(a, b);
- return (__m64)((__vector long long)c)[0];
+ __a = (__vector unsigned short)vec_splats(__m1);
+ __b = (__vector unsigned short)vec_splats(__m2);
+ __c = vec_adds(__a, __b);
+ return (__m64)((__vector long long)__c)[0];
}
extern __inline __m64
@@ -975,12 +977,12 @@ extern __inline __m64
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_subs_pi8(__m64 __m1, __m64 __m2) {
- __vector signed char a, b, c;
+ __vector signed char __a, __b, __c;
- a = (__vector signed char)vec_splats(__m1);
- b = (__vector signed char)vec_splats(__m2);
- c = vec_subs(a, b);
- return (__m64)((__vector long long)c)[0];
+ __a = (__vector signed char)vec_splats(__m1);
+ __b = (__vector signed char)vec_splats(__m2);
+ __c = vec_subs(__a, __b);
+ return (__m64)((__vector long long)__c)[0];
}
extern __inline __m64
@@ -994,12 +996,12 @@ extern __inline __m64
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_subs_pi16(__m64 __m1, __m64 __m2) {
- __vector signed short a, b, c;
+ __vector signed short __a, __b, __c;
- a = (__vector signed short)vec_splats(__m1);
- b = (__vector signed short)vec_splats(__m2);
- c = vec_subs(a, b);
- return (__m64)((__vector long long)c)[0];
+ __a = (__vector signed short)vec_splats(__m1);
+ __b = (__vector signed short)vec_splats(__m2);
+ __c = vec_subs(__a, __b);
+ return (__m64)((__vector long long)__c)[0];
}
extern __inline __m64
@@ -1013,12 +1015,12 @@ extern __inline __m64
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_subs_pu8(__m64 __m1, __m64 __m2) {
- __vector unsigned char a, b, c;
+ __vector unsigned char __a, __b, __c;
- a = (__vector unsigned char)vec_splats(__m1);
- b = (__vector unsigned char)vec_splats(__m2);
- c = vec_subs(a, b);
- return (__m64)((__vector long long)c)[0];
+ __a = (__vector unsigned char)vec_splats(__m1);
+ __b = (__vector unsigned char)vec_splats(__m2);
+ __c = vec_subs(__a, __b);
+ return (__m64)((__vector long long)__c)[0];
}
extern __inline __m64
@@ -1032,12 +1034,12 @@ extern __inline __m64
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_subs_pu16(__m64 __m1, __m64 __m2) {
- __vector unsigned short a, b, c;
+ __vector unsigned short __a, __b, __c;
- a = (__vector unsigned short)vec_splats(__m1);
- b = (__vector unsigned short)vec_splats(__m2);
- c = vec_subs(a, b);
- return (__m64)((__vector long long)c)[0];
+ __a = (__vector unsigned short)vec_splats(__m1);
+ __b = (__vector unsigned short)vec_splats(__m2);
+ __c = vec_subs(__a, __b);
+ return (__m64)((__vector long long)__c)[0];
}
extern __inline __m64
@@ -1052,14 +1054,14 @@ extern __inline __m64
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_madd_pi16(__m64 __m1, __m64 __m2) {
- __vector signed short a, b;
- __vector signed int c;
- __vector signed int zero = {0, 0, 0, 0};
+ __vector signed short __a, __b;
+ __vector signed int __c;
+ __vector signed int __zero = {0, 0, 0, 0};
- a = (__vector signed short)vec_splats(__m1);
- b = (__vector signed short)vec_splats(__m2);
- c = vec_vmsumshm(a, b, zero);
- return (__m64)((__vector long long)c)[0];
+ __a = (__vector signed short)vec_splats(__m1);
+ __b = (__vector signed short)vec_splats(__m2);
+ __c = vec_vmsumshm(__a, __b, __zero);
+ return (__m64)((__vector long long)__c)[0];
}
extern __inline __m64
@@ -1072,10 +1074,10 @@ extern __inline __m64
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_mulhi_pi16(__m64 __m1, __m64 __m2) {
- __vector signed short a, b;
- __vector signed short c;
- __vector signed int w0, w1;
- __vector unsigned char xform1 = {
+ __vector signed short __a, __b;
+ __vector signed short __c;
+ __vector signed int __w0, __w1;
+ __vector unsigned char __xform1 = {
#ifdef __LITTLE_ENDIAN__
0x02, 0x03, 0x12, 0x13, 0x06, 0x07, 0x16, 0x17, 0x0A,
0x0B, 0x1A, 0x1B, 0x0E, 0x0F, 0x1E, 0x1F
@@ -1085,14 +1087,14 @@ extern __inline __m64
#endif
};
- a = (__vector signed short)vec_splats(__m1);
- b = (__vector signed short)vec_splats(__m2);
+ __a = (__vector signed short)vec_splats(__m1);
+ __b = (__vector signed short)vec_splats(__m2);
- w0 = vec_vmulesh(a, b);
- w1 = vec_vmulosh(a, b);
- c = (__vector signed short)vec_perm(w0, w1, xform1);
+ __w0 = vec_vmulesh(__a, __b);
+ __w1 = vec_vmulosh(__a, __b);
+ __c = (__vector signed short)vec_perm(__w0, __w1, __xform1);
- return (__m64)((__vector long long)c)[0];
+ return (__m64)((__vector long long)__c)[0];
}
extern __inline __m64
@@ -1106,12 +1108,12 @@ extern __inline __m64
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_mullo_pi16(__m64 __m1, __m64 __m2) {
- __vector signed short a, b, c;
+ __vector signed short __a, __b, __c;
- a = (__vector signed short)vec_splats(__m1);
- b = (__vector signed short)vec_splats(__m2);
- c = a * b;
- return (__m64)((__vector long long)c)[0];
+ __a = (__vector signed short)vec_splats(__m1);
+ __b = (__vector signed short)vec_splats(__m2);
+ __c = __a * __b;
+ return (__m64)((__vector long long)__c)[0];
}
extern __inline __m64
@@ -1124,14 +1126,14 @@ extern __inline __m64
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_sll_pi16(__m64 __m, __m64 __count) {
- __vector signed short m, r;
- __vector unsigned short c;
+ __vector signed short __r;
+ __vector unsigned short __c;
if (__count <= 15) {
- m = (__vector signed short)vec_splats(__m);
- c = (__vector unsigned short)vec_splats((unsigned short)__count);
- r = vec_sl(m, (__vector unsigned short)c);
- return (__m64)((__vector long long)r)[0];
+ __r = (__vector signed short)vec_splats(__m);
+ __c = (__vector unsigned short)vec_splats((unsigned short)__count);
+ __r = vec_sl(__r, (__vector unsigned short)__c);
+ return (__m64)((__vector long long)__r)[0];
} else
return (0);
}
@@ -1159,13 +1161,13 @@ extern __inline __m64
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_sll_pi32(__m64 __m, __m64 __count) {
- __m64_union m, res;
+ __m64_union __res;
- m.as_m64 = __m;
+ __res.as_m64 = __m;
- res.as_int[0] = m.as_int[0] << __count;
- res.as_int[1] = m.as_int[1] << __count;
- return (res.as_m64);
+ __res.as_int[0] = __res.as_int[0] << __count;
+ __res.as_int[1] = __res.as_int[1] << __count;
+ return (__res.as_m64);
}
extern __inline __m64
@@ -1191,14 +1193,14 @@ extern __inline __m64
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_sra_pi16(__m64 __m, __m64 __count) {
- __vector signed short m, r;
- __vector unsigned short c;
+ __vector signed short __r;
+ __vector unsigned short __c;
if (__count <= 15) {
- m = (__vector signed short)vec_splats(__m);
- c = (__vector unsigned short)vec_splats((unsigned short)__count);
- r = vec_sra(m, (__vector unsigned short)c);
- return (__m64)((__vector long long)r)[0];
+ __r = (__vector signed short)vec_splats(__m);
+ __c = (__vector unsigned short)vec_splats((unsigned short)__count);
+ __r = vec_sra(__r, (__vector unsigned short)__c);
+ return (__m64)((__vector long long)__r)[0];
} else
return (0);
}
@@ -1226,13 +1228,13 @@ extern __inline __m64
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_sra_pi32(__m64 __m, __m64 __count) {
- __m64_union m, res;
+ __m64_union __res;
- m.as_m64 = __m;
+ __res.as_m64 = __m;
- res.as_int[0] = m.as_int[0] >> __count;
- res.as_int[1] = m.as_int[1] >> __count;
- return (res.as_m64);
+ __res.as_int[0] = __res.as_int[0] >> __count;
+ __res.as_int[1] = __res.as_int[1] >> __count;
+ return (__res.as_m64);
}
extern __inline __m64
@@ -1258,14 +1260,14 @@ extern __inline __m64
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_srl_pi16(__m64 __m, __m64 __count) {
- __vector unsigned short m, r;
- __vector unsigned short c;
+ __vector unsigned short __r;
+ __vector unsigned short __c;
if (__count <= 15) {
- m = (__vector unsigned short)vec_splats(__m);
- c = (__vector unsigned short)vec_splats((unsigned short)__count);
- r = vec_sr(m, (__vector unsigned short)c);
- return (__m64)((__vector long long)r)[0];
+ __r = (__vector unsigned short)vec_splats(__m);
+ __c = (__vector unsigned short)vec_splats((unsigned short)__count);
+ __r = vec_sr(__r, (__vector unsigned short)__c);
+ return (__m64)((__vector long long)__r)[0];
} else
return (0);
}
@@ -1293,13 +1295,13 @@ extern __inline __m64
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_srl_pi32(__m64 __m, __m64 __count) {
- __m64_union m, res;
+ __m64_union __res;
- m.as_m64 = __m;
+ __res.as_m64 = __m;
- res.as_int[0] = (unsigned int)m.as_int[0] >> __count;
- res.as_int[1] = (unsigned int)m.as_int[1] >> __count;
- return (res.as_m64);
+ __res.as_int[0] = (unsigned int)__res.as_int[0] >> __count;
+ __res.as_int[1] = (unsigned int)__res.as_int[1] >> __count;
+ return (__res.as_m64);
}
extern __inline __m64
@@ -1326,24 +1328,24 @@ extern __inline __m64
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_set_pi32(int __i1, int __i0) {
- __m64_union res;
+ __m64_union __res;
- res.as_int[0] = __i0;
- res.as_int[1] = __i1;
- return (res.as_m64);
+ __res.as_int[0] = __i0;
+ __res.as_int[1] = __i1;
+ return (__res.as_m64);
}
/* Creates a vector of four 16-bit values; W0 is least significant. */
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_set_pi16(short __w3, short __w2, short __w1, short __w0) {
- __m64_union res;
+ __m64_union __res;
- res.as_short[0] = __w0;
- res.as_short[1] = __w1;
- res.as_short[2] = __w2;
- res.as_short[3] = __w3;
- return (res.as_m64);
+ __res.as_short[0] = __w0;
+ __res.as_short[1] = __w1;
+ __res.as_short[2] = __w2;
+ __res.as_short[3] = __w3;
+ return (__res.as_m64);
}
/* Creates a vector of eight 8-bit values; B0 is least significant. */
@@ -1351,28 +1353,28 @@ extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_set_pi8(char __b7, char __b6, char __b5, char __b4, char __b3,
char __b2, char __b1, char __b0) {
- __m64_union res;
+ __m64_union __res;
- res.as_char[0] = __b0;
- res.as_char[1] = __b1;
- res.as_char[2] = __b2;
- res.as_char[3] = __b3;
- res.as_char[4] = __b4;
- res.as_char[5] = __b5;
- res.as_char[6] = __b6;
- res.as_char[7] = __b7;
- return (res.as_m64);
+ __res.as_char[0] = __b0;
+ __res.as_char[1] = __b1;
+ __res.as_char[2] = __b2;
+ __res.as_char[3] = __b3;
+ __res.as_char[4] = __b4;
+ __res.as_char[5] = __b5;
+ __res.as_char[6] = __b6;
+ __res.as_char[7] = __b7;
+ return (__res.as_m64);
}
/* Similar, but with the arguments in reverse order. */
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_setr_pi32(int __i0, int __i1) {
- __m64_union res;
+ __m64_union __res;
- res.as_int[0] = __i0;
- res.as_int[1] = __i1;
- return (res.as_m64);
+ __res.as_int[0] = __i0;
+ __res.as_int[1] = __i1;
+ return (__res.as_m64);
}
extern __inline __m64
@@ -1392,11 +1394,11 @@ extern __inline __m64
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_set1_pi32(int __i) {
- __m64_union res;
+ __m64_union __res;
- res.as_int[0] = __i;
- res.as_int[1] = __i;
- return (res.as_m64);
+ __res.as_int[0] = __i;
+ __res.as_int[1] = __i;
+ return (__res.as_m64);
}
/* Creates a vector of four 16-bit values, all elements containing W. */
@@ -1409,13 +1411,13 @@ extern __inline __m64
w = (__vector signed short)vec_splats(__w);
return (__m64)((__vector long long)w)[0];
#else
- __m64_union res;
+ __m64_union __res;
- res.as_short[0] = __w;
- res.as_short[1] = __w;
- res.as_short[2] = __w;
- res.as_short[3] = __w;
- return (res.as_m64);
+ __res.as_short[0] = __w;
+ __res.as_short[1] = __w;
+ __res.as_short[2] = __w;
+ __res.as_short[3] = __w;
+ return (__res.as_m64);
#endif
}
@@ -1424,27 +1426,28 @@ extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_set1_pi8(signed char __b) {
#if _ARCH_PWR8
- __vector signed char b;
+ __vector signed char __res;
- b = (__vector signed char)vec_splats(__b);
- return (__m64)((__vector long long)b)[0];
+ __res = (__vector signed char)vec_splats(__b);
+ return (__m64)((__vector long long)__res)[0];
#else
- __m64_union res;
-
- res.as_char[0] = __b;
- res.as_char[1] = __b;
- res.as_char[2] = __b;
- res.as_char[3] = __b;
- res.as_char[4] = __b;
- res.as_char[5] = __b;
- res.as_char[6] = __b;
- res.as_char[7] = __b;
- return (res.as_m64);
+ __m64_union __res;
+
+ __res.as_char[0] = __b;
+ __res.as_char[1] = __b;
+ __res.as_char[2] = __b;
+ __res.as_char[3] = __b;
+ __res.as_char[4] = __b;
+ __res.as_char[5] = __b;
+ __res.as_char[6] = __b;
+ __res.as_char[7] = __b;
+ return (__res.as_m64);
#endif
}
#else
#include_next <mmintrin.h>
-#endif /* defined(__linux__) && defined(__ppc64__) */
+#endif /* defined(__powerpc64__) && \
+ * (defined(__linux__) || defined(__FreeBSD__) || defined(_AIX)) */
#endif /* _MMINTRIN_H_INCLUDED */
diff --git a/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/nmmintrin.h b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/nmmintrin.h
new file mode 100644
index 000000000000..789bba6bc0d3
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/nmmintrin.h
@@ -0,0 +1,26 @@
+/*===---- nmmintrin.h - Implementation of SSE4 intrinsics on PowerPC -------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef NO_WARN_X86_INTRINSICS
+/* This header is distributed to simplify porting x86_64 code that
+ makes explicit use of Intel intrinsics to powerpc64le.
+ It is the user's responsibility to determine if the results are
+ acceptable and make additional changes as necessary.
+ Note that much code that uses Intel intrinsics can be rewritten in
+ standard C or GNU C extensions, which are more portable and better
+ optimized across multiple targets. */
+#endif
+
+#ifndef NMMINTRIN_H_
+#define NMMINTRIN_H_
+
+/* We just include SSE4.1 header file. */
+#include <smmintrin.h>
+
+#endif /* NMMINTRIN_H_ */
diff --git a/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/pmmintrin.h b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/pmmintrin.h
index 6d93383d5412..db128192abfb 100644
--- a/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/pmmintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/pmmintrin.h
@@ -32,119 +32,114 @@
In the specific case of the monitor and mwait instructions there are
no direct equivalent in the PowerISA at this time. So those
intrinsics are not implemented. */
-#error "Please read comment above. Use -DNO_WARN_X86_INTRINSICS to disable this warning."
+#error \
+ "Please read comment above. Use -DNO_WARN_X86_INTRINSICS to disable this warning."
#endif
#ifndef PMMINTRIN_H_
#define PMMINTRIN_H_
-#if defined(__linux__) && defined(__ppc64__)
+#if defined(__powerpc64__) && \
+ (defined(__linux__) || defined(__FreeBSD__) || defined(_AIX))
/* We need definitions from the SSE2 and SSE header files*/
#include <emmintrin.h>
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_addsub_ps (__m128 __X, __m128 __Y)
-{
- const __v4sf even_n0 = {-0.0, 0.0, -0.0, 0.0};
- __v4sf even_neg_Y = vec_xor(__Y, even_n0);
- return (__m128) vec_add (__X, even_neg_Y);
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_addsub_ps(__m128 __X, __m128 __Y) {
+ const __v4sf __even_n0 = {-0.0, 0.0, -0.0, 0.0};
+ __v4sf __even_neg_Y = vec_xor(__Y, __even_n0);
+ return (__m128)vec_add(__X, __even_neg_Y);
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_addsub_pd (__m128d __X, __m128d __Y)
-{
- const __v2df even_n0 = {-0.0, 0.0};
- __v2df even_neg_Y = vec_xor(__Y, even_n0);
- return (__m128d) vec_add (__X, even_neg_Y);
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_addsub_pd(__m128d __X, __m128d __Y) {
+ const __v2df __even_n0 = {-0.0, 0.0};
+ __v2df __even_neg_Y = vec_xor(__Y, __even_n0);
+ return (__m128d)vec_add(__X, __even_neg_Y);
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hadd_ps (__m128 __X, __m128 __Y)
-{
- __vector unsigned char xform2 = {
- 0x00, 0x01, 0x02, 0x03,
- 0x08, 0x09, 0x0A, 0x0B,
- 0x10, 0x11, 0x12, 0x13,
- 0x18, 0x19, 0x1A, 0x1B
- };
- __vector unsigned char xform1 = {
- 0x04, 0x05, 0x06, 0x07,
- 0x0C, 0x0D, 0x0E, 0x0F,
- 0x14, 0x15, 0x16, 0x17,
- 0x1C, 0x1D, 0x1E, 0x1F
- };
- return (__m128) vec_add (vec_perm ((__v4sf) __X, (__v4sf) __Y, xform2),
- vec_perm ((__v4sf) __X, (__v4sf) __Y, xform1));
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_hadd_ps(__m128 __X, __m128 __Y) {
+ __vector unsigned char __xform2 = {0x00, 0x01, 0x02, 0x03, 0x08, 0x09,
+ 0x0A, 0x0B, 0x10, 0x11, 0x12, 0x13,
+ 0x18, 0x19, 0x1A, 0x1B};
+ __vector unsigned char __xform1 = {0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D,
+ 0x0E, 0x0F, 0x14, 0x15, 0x16, 0x17,
+ 0x1C, 0x1D, 0x1E, 0x1F};
+ return (__m128)vec_add(vec_perm((__v4sf)__X, (__v4sf)__Y, __xform2),
+ vec_perm((__v4sf)__X, (__v4sf)__Y, __xform1));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hsub_ps (__m128 __X, __m128 __Y)
-{
- __vector unsigned char xform2 = {
- 0x00, 0x01, 0x02, 0x03,
- 0x08, 0x09, 0x0A, 0x0B,
- 0x10, 0x11, 0x12, 0x13,
- 0x18, 0x19, 0x1A, 0x1B
- };
- __vector unsigned char xform1 = {
- 0x04, 0x05, 0x06, 0x07,
- 0x0C, 0x0D, 0x0E, 0x0F,
- 0x14, 0x15, 0x16, 0x17,
- 0x1C, 0x1D, 0x1E, 0x1F
- };
- return (__m128) vec_sub (vec_perm ((__v4sf) __X, (__v4sf) __Y, xform2),
- vec_perm ((__v4sf) __X, (__v4sf) __Y, xform1));
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_hsub_ps(__m128 __X, __m128 __Y) {
+ __vector unsigned char __xform2 = {0x00, 0x01, 0x02, 0x03, 0x08, 0x09,
+ 0x0A, 0x0B, 0x10, 0x11, 0x12, 0x13,
+ 0x18, 0x19, 0x1A, 0x1B};
+ __vector unsigned char __xform1 = {0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D,
+ 0x0E, 0x0F, 0x14, 0x15, 0x16, 0x17,
+ 0x1C, 0x1D, 0x1E, 0x1F};
+ return (__m128)vec_sub(vec_perm((__v4sf)__X, (__v4sf)__Y, __xform2),
+ vec_perm((__v4sf)__X, (__v4sf)__Y, __xform1));
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hadd_pd (__m128d __X, __m128d __Y)
-{
- return (__m128d) vec_add (vec_mergeh ((__v2df) __X, (__v2df)__Y),
- vec_mergel ((__v2df) __X, (__v2df)__Y));
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_hadd_pd(__m128d __X, __m128d __Y) {
+ return (__m128d)vec_add(vec_mergeh((__v2df)__X, (__v2df)__Y),
+ vec_mergel((__v2df)__X, (__v2df)__Y));
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hsub_pd (__m128d __X, __m128d __Y)
-{
- return (__m128d) vec_sub (vec_mergeh ((__v2df) __X, (__v2df)__Y),
- vec_mergel ((__v2df) __X, (__v2df)__Y));
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_hsub_pd(__m128d __X, __m128d __Y) {
+ return (__m128d)vec_sub(vec_mergeh((__v2df)__X, (__v2df)__Y),
+ vec_mergel((__v2df)__X, (__v2df)__Y));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_movehdup_ps (__m128 __X)
-{
- return (__m128)vec_mergeo ((__v4su)__X, (__v4su)__X);
+#ifdef _ARCH_PWR8
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_movehdup_ps(__m128 __X) {
+ return (__m128)vec_mergeo((__v4su)__X, (__v4su)__X);
}
+#endif
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_moveldup_ps (__m128 __X)
-{
- return (__m128)vec_mergee ((__v4su)__X, (__v4su)__X);
+#ifdef _ARCH_PWR8
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_moveldup_ps(__m128 __X) {
+ return (__m128)vec_mergee((__v4su)__X, (__v4su)__X);
}
+#endif
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_loaddup_pd (double const *__P)
-{
- return (__m128d) vec_splats (*__P);
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_loaddup_pd(double const *__P) {
+ return (__m128d)vec_splats(*__P);
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_movedup_pd (__m128d __X)
-{
- return _mm_shuffle_pd (__X, __X, _MM_SHUFFLE2 (0,0));
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_movedup_pd(__m128d __X) {
+ return _mm_shuffle_pd(__X, __X, _MM_SHUFFLE2(0, 0));
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_lddqu_si128 (__m128i const *__P)
-{
- return (__m128i) (vec_vsx_ld(0, (signed int const *)__P));
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_lddqu_si128(__m128i const *__P) {
+ return (__m128i)(vec_vsx_ld(0, (signed int const *)__P));
}
/* POWER8 / POWER9 have no equivalent for _mm_monitor nor _mm_wait. */
#else
#include_next <pmmintrin.h>
-#endif /* defined(__linux__) && defined(__ppc64__) */
+#endif /* defined(__powerpc64__) && \
+ * (defined(__linux__) || defined(__FreeBSD__) || defined(_AIX)) */
#endif /* PMMINTRIN_H_ */
diff --git a/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/smmintrin.h b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/smmintrin.h
index 64f0c761994d..19cdecb18d2b 100644
--- a/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/smmintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/smmintrin.h
@@ -14,7 +14,7 @@
#ifndef NO_WARN_X86_INTRINSICS
/* This header is distributed to simplify porting x86_64 code that
- makes explicit use of Intel intrinsics to powerp64/powerpc64le.
+ makes explicit use of Intel intrinsics to powerpc64/powerpc64le.
It is the user's responsibility to determine if the results are
acceptable and make additional changes as necessary.
@@ -29,10 +29,273 @@
#ifndef SMMINTRIN_H_
#define SMMINTRIN_H_
-#if defined(__linux__) && defined(__ppc64__)
+#if defined(__powerpc64__) && \
+ (defined(__linux__) || defined(__FreeBSD__) || defined(_AIX))
#include <altivec.h>
-#include <emmintrin.h>
+#include <tmmintrin.h>
+
+/* Rounding mode macros. */
+#define _MM_FROUND_TO_NEAREST_INT 0x00
+#define _MM_FROUND_TO_ZERO 0x01
+#define _MM_FROUND_TO_POS_INF 0x02
+#define _MM_FROUND_TO_NEG_INF 0x03
+#define _MM_FROUND_CUR_DIRECTION 0x04
+
+#define _MM_FROUND_NINT (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_RAISE_EXC)
+#define _MM_FROUND_FLOOR (_MM_FROUND_TO_NEG_INF | _MM_FROUND_RAISE_EXC)
+#define _MM_FROUND_CEIL (_MM_FROUND_TO_POS_INF | _MM_FROUND_RAISE_EXC)
+#define _MM_FROUND_TRUNC (_MM_FROUND_TO_ZERO | _MM_FROUND_RAISE_EXC)
+#define _MM_FROUND_RINT (_MM_FROUND_CUR_DIRECTION | _MM_FROUND_RAISE_EXC)
+#define _MM_FROUND_NEARBYINT (_MM_FROUND_CUR_DIRECTION | _MM_FROUND_NO_EXC)
+
+#define _MM_FROUND_RAISE_EXC 0x00
+#define _MM_FROUND_NO_EXC 0x08
+
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_round_pd(__m128d __A, int __rounding) {
+ __v2df __r;
+ union {
+ double __fr;
+ long long __fpscr;
+ } __enables_save, __fpscr_save;
+
+ if (__rounding & _MM_FROUND_NO_EXC) {
+ /* Save enabled exceptions, disable all exceptions,
+ and preserve the rounding mode. */
+#ifdef _ARCH_PWR9
+ __asm__("mffsce %0" : "=f"(__fpscr_save.__fr));
+ __enables_save.__fpscr = __fpscr_save.__fpscr & 0xf8;
+#else
+ __fpscr_save.__fr = __builtin_ppc_mffs();
+ __enables_save.__fpscr = __fpscr_save.__fpscr & 0xf8;
+ __fpscr_save.__fpscr &= ~0xf8;
+ __builtin_ppc_mtfsf(0b00000011, __fpscr_save.__fr);
+#endif
+ /* Insert an artificial "read/write" reference to the variable
+ read below, to ensure the compiler does not schedule
+ a read/use of the variable before the FPSCR is modified, above.
+ This can be removed if and when GCC PR102783 is fixed.
+ */
+ __asm__("" : "+wa"(__A));
+ }
+
+ switch (__rounding) {
+ case _MM_FROUND_TO_NEAREST_INT:
+#ifdef _ARCH_PWR9
+ __fpscr_save.__fr = __builtin_ppc_mffsl();
+#else
+ __fpscr_save.__fr = __builtin_ppc_mffs();
+ __fpscr_save.__fpscr &= 0x70007f0ffL;
+#endif
+ __attribute__((fallthrough));
+ case _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC:
+ __builtin_ppc_set_fpscr_rn(0b00);
+ /* Insert an artificial "read/write" reference to the variable
+ read below, to ensure the compiler does not schedule
+ a read/use of the variable before the FPSCR is modified, above.
+ This can be removed if and when GCC PR102783 is fixed.
+ */
+ __asm__("" : "+wa"(__A));
+
+ __r = vec_rint((__v2df)__A);
+
+ /* Insert an artificial "read" reference to the variable written
+ above, to ensure the compiler does not schedule the computation
+ of the value after the manipulation of the FPSCR, below.
+ This can be removed if and when GCC PR102783 is fixed.
+ */
+ __asm__("" : : "wa"(__r));
+ __builtin_ppc_set_fpscr_rn(__fpscr_save.__fpscr);
+ break;
+ case _MM_FROUND_TO_NEG_INF:
+ case _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC:
+ __r = vec_floor((__v2df)__A);
+ break;
+ case _MM_FROUND_TO_POS_INF:
+ case _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC:
+ __r = vec_ceil((__v2df)__A);
+ break;
+ case _MM_FROUND_TO_ZERO:
+ case _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC:
+ __r = vec_trunc((__v2df)__A);
+ break;
+ case _MM_FROUND_CUR_DIRECTION:
+ __r = vec_rint((__v2df)__A);
+ break;
+ }
+ if (__rounding & _MM_FROUND_NO_EXC) {
+ /* Insert an artificial "read" reference to the variable written
+ above, to ensure the compiler does not schedule the computation
+ of the value after the manipulation of the FPSCR, below.
+ This can be removed if and when GCC PR102783 is fixed.
+ */
+ __asm__("" : : "wa"(__r));
+ /* Restore enabled exceptions. */
+#ifdef _ARCH_PWR9
+ __fpscr_save.__fr = __builtin_ppc_mffsl();
+#else
+ __fpscr_save.__fr = __builtin_ppc_mffs();
+ __fpscr_save.__fpscr &= 0x70007f0ffL;
+#endif
+ __fpscr_save.__fpscr |= __enables_save.__fpscr;
+ __builtin_ppc_mtfsf(0b00000011, __fpscr_save.__fr);
+ }
+ return (__m128d)__r;
+}
+
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_round_sd(__m128d __A, __m128d __B, int __rounding) {
+ __B = _mm_round_pd(__B, __rounding);
+ __v2df __r = {((__v2df)__B)[0], ((__v2df)__A)[1]};
+ return (__m128d)__r;
+}
+
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_round_ps(__m128 __A, int __rounding) {
+ __v4sf __r;
+ union {
+ double __fr;
+ long long __fpscr;
+ } __enables_save, __fpscr_save;
+
+ if (__rounding & _MM_FROUND_NO_EXC) {
+ /* Save enabled exceptions, disable all exceptions,
+ and preserve the rounding mode. */
+#ifdef _ARCH_PWR9
+ __asm__("mffsce %0" : "=f"(__fpscr_save.__fr));
+ __enables_save.__fpscr = __fpscr_save.__fpscr & 0xf8;
+#else
+ __fpscr_save.__fr = __builtin_ppc_mffs();
+ __enables_save.__fpscr = __fpscr_save.__fpscr & 0xf8;
+ __fpscr_save.__fpscr &= ~0xf8;
+ __builtin_ppc_mtfsf(0b00000011, __fpscr_save.__fr);
+#endif
+ /* Insert an artificial "read/write" reference to the variable
+ read below, to ensure the compiler does not schedule
+ a read/use of the variable before the FPSCR is modified, above.
+ This can be removed if and when GCC PR102783 is fixed.
+ */
+ __asm__("" : "+wa"(__A));
+ }
+
+ switch (__rounding) {
+ case _MM_FROUND_TO_NEAREST_INT:
+#ifdef _ARCH_PWR9
+ __fpscr_save.__fr = __builtin_ppc_mffsl();
+#else
+ __fpscr_save.__fr = __builtin_ppc_mffs();
+ __fpscr_save.__fpscr &= 0x70007f0ffL;
+#endif
+ __attribute__((fallthrough));
+ case _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC:
+ __builtin_ppc_set_fpscr_rn(0b00);
+ /* Insert an artificial "read/write" reference to the variable
+ read below, to ensure the compiler does not schedule
+ a read/use of the variable before the FPSCR is modified, above.
+ This can be removed if and when GCC PR102783 is fixed.
+ */
+ __asm__("" : "+wa"(__A));
+
+ __r = vec_rint((__v4sf)__A);
+
+ /* Insert an artificial "read" reference to the variable written
+ above, to ensure the compiler does not schedule the computation
+ of the value after the manipulation of the FPSCR, below.
+ This can be removed if and when GCC PR102783 is fixed.
+ */
+ __asm__("" : : "wa"(__r));
+ __builtin_ppc_set_fpscr_rn(__fpscr_save.__fpscr);
+ break;
+ case _MM_FROUND_TO_NEG_INF:
+ case _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC:
+ __r = vec_floor((__v4sf)__A);
+ break;
+ case _MM_FROUND_TO_POS_INF:
+ case _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC:
+ __r = vec_ceil((__v4sf)__A);
+ break;
+ case _MM_FROUND_TO_ZERO:
+ case _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC:
+ __r = vec_trunc((__v4sf)__A);
+ break;
+ case _MM_FROUND_CUR_DIRECTION:
+ __r = vec_rint((__v4sf)__A);
+ break;
+ }
+ if (__rounding & _MM_FROUND_NO_EXC) {
+ /* Insert an artificial "read" reference to the variable written
+ above, to ensure the compiler does not schedule the computation
+ of the value after the manipulation of the FPSCR, below.
+ This can be removed if and when GCC PR102783 is fixed.
+ */
+ __asm__("" : : "wa"(__r));
+ /* Restore enabled exceptions. */
+#ifdef _ARCH_PWR9
+ __fpscr_save.__fr = __builtin_ppc_mffsl();
+#else
+ __fpscr_save.__fr = __builtin_ppc_mffs();
+ __fpscr_save.__fpscr &= 0x70007f0ffL;
+#endif
+ __fpscr_save.__fpscr |= __enables_save.__fpscr;
+ __builtin_ppc_mtfsf(0b00000011, __fpscr_save.__fr);
+ }
+ return (__m128)__r;
+}
+
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_round_ss(__m128 __A, __m128 __B, int __rounding) {
+ __B = _mm_round_ps(__B, __rounding);
+ __v4sf __r = (__v4sf)__A;
+ __r[0] = ((__v4sf)__B)[0];
+ return (__m128)__r;
+}
+
+#define _mm_ceil_pd(V) _mm_round_pd((V), _MM_FROUND_CEIL)
+#define _mm_ceil_sd(D, V) _mm_round_sd((D), (V), _MM_FROUND_CEIL)
+
+#define _mm_floor_pd(V) _mm_round_pd((V), _MM_FROUND_FLOOR)
+#define _mm_floor_sd(D, V) _mm_round_sd((D), (V), _MM_FROUND_FLOOR)
+
+#define _mm_ceil_ps(V) _mm_round_ps((V), _MM_FROUND_CEIL)
+#define _mm_ceil_ss(D, V) _mm_round_ss((D), (V), _MM_FROUND_CEIL)
+
+#define _mm_floor_ps(V) _mm_round_ps((V), _MM_FROUND_FLOOR)
+#define _mm_floor_ss(D, V) _mm_round_ss((D), (V), _MM_FROUND_FLOOR)
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_insert_epi8(__m128i const __A, int const __D, int const __N) {
+ __v16qi __result = (__v16qi)__A;
+
+ __result[__N & 0xf] = __D;
+
+ return (__m128i)__result;
+}
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_insert_epi32(__m128i const __A, int const __D, int const __N) {
+ __v4si __result = (__v4si)__A;
+
+ __result[__N & 3] = __D;
+
+ return (__m128i)__result;
+}
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_insert_epi64(__m128i const __A, long long const __D, int const __N) {
+ __v2di __result = (__v2di)__A;
+
+ __result[__N & 1] = __D;
+
+ return (__m128i)__result;
+}
extern __inline int
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
@@ -58,52 +321,363 @@ extern __inline int
return ((__v4si)__X)[__N & 3];
}
+#ifdef _ARCH_PWR8
extern __inline __m128i
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_blend_epi16(__m128i __A, __m128i __B, const int __imm8) {
- __v16qi __charmask = vec_splats((signed char)__imm8);
+ __v16qu __charmask = vec_splats((unsigned char)__imm8);
__charmask = vec_gb(__charmask);
- __v8hu __shortmask = (__v8hu)vec_unpackh(__charmask);
+ __v8hu __shortmask = (__v8hu)vec_unpackh((__v16qi)__charmask);
#ifdef __BIG_ENDIAN__
__shortmask = vec_reve(__shortmask);
#endif
return (__m128i)vec_sel((__v8hu)__A, (__v8hu)__B, __shortmask);
}
+#endif
extern __inline __m128i
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_blendv_epi8(__m128i __A, __m128i __B, __m128i __mask) {
+#ifdef _ARCH_PWR10
+ return (__m128i)vec_blendv((__v16qi)__A, (__v16qi)__B, (__v16qu)__mask);
+#else
const __v16qu __seven = vec_splats((unsigned char)0x07);
__v16qu __lmask = vec_sra((__v16qu)__mask, __seven);
- return (__m128i)vec_sel((__v16qu)__A, (__v16qu)__B, __lmask);
+ return (__m128i)vec_sel((__v16qi)__A, (__v16qi)__B, __lmask);
+#endif
+}
+
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_blend_ps(__m128 __A, __m128 __B, const int __imm8) {
+ __v16qu __pcv[] = {
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
+ {16, 17, 18, 19, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
+ {0, 1, 2, 3, 20, 21, 22, 23, 8, 9, 10, 11, 12, 13, 14, 15},
+ {16, 17, 18, 19, 20, 21, 22, 23, 8, 9, 10, 11, 12, 13, 14, 15},
+ {0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 12, 13, 14, 15},
+ {16, 17, 18, 19, 4, 5, 6, 7, 24, 25, 26, 27, 12, 13, 14, 15},
+ {0, 1, 2, 3, 20, 21, 22, 23, 24, 25, 26, 27, 12, 13, 14, 15},
+ {16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 12, 13, 14, 15},
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 28, 29, 30, 31},
+ {16, 17, 18, 19, 4, 5, 6, 7, 8, 9, 10, 11, 28, 29, 30, 31},
+ {0, 1, 2, 3, 20, 21, 22, 23, 8, 9, 10, 11, 28, 29, 30, 31},
+ {16, 17, 18, 19, 20, 21, 22, 23, 8, 9, 10, 11, 28, 29, 30, 31},
+ {0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31},
+ {16, 17, 18, 19, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31},
+ {0, 1, 2, 3, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
+ {16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
+ };
+ __v16qu __r = vec_perm((__v16qu)__A, (__v16qu)__B, __pcv[__imm8]);
+ return (__m128)__r;
+}
+
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_blendv_ps(__m128 __A, __m128 __B, __m128 __mask) {
+#ifdef _ARCH_PWR10
+ return (__m128)vec_blendv((__v4sf)__A, (__v4sf)__B, (__v4su)__mask);
+#else
+ const __v4si __zero = {0};
+ const __vector __bool int __boolmask = vec_cmplt((__v4si)__mask, __zero);
+ return (__m128)vec_sel((__v4su)__A, (__v4su)__B, (__v4su)__boolmask);
+#endif
+}
+
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_blend_pd(__m128d __A, __m128d __B, const int __imm8) {
+ __v16qu __pcv[] = {
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
+ {16, 17, 18, 19, 20, 21, 22, 23, 8, 9, 10, 11, 12, 13, 14, 15},
+ {0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31},
+ {16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}};
+ __v16qu __r = vec_perm((__v16qu)__A, (__v16qu)__B, __pcv[__imm8]);
+ return (__m128d)__r;
}
+#ifdef _ARCH_PWR8
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_blendv_pd(__m128d __A, __m128d __B, __m128d __mask) {
+#ifdef _ARCH_PWR10
+ return (__m128d)vec_blendv((__v2df)__A, (__v2df)__B, (__v2du)__mask);
+#else
+ const __v2di __zero = {0};
+ const __vector __bool long long __boolmask =
+ vec_cmplt((__v2di)__mask, __zero);
+ return (__m128d)vec_sel((__v2du)__A, (__v2du)__B, (__v2du)__boolmask);
+#endif
+}
+#endif
+
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_testz_si128(__m128i __A, __m128i __B) {
+ /* Note: This implementation does NOT set "zero" or "carry" flags. */
+ const __v16qu __zero = {0};
+ return vec_all_eq(vec_and((__v16qu)__A, (__v16qu)__B), __zero);
+}
+
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_testc_si128(__m128i __A, __m128i __B) {
+ /* Note: This implementation does NOT set "zero" or "carry" flags. */
+ const __v16qu __zero = {0};
+ const __v16qu __notA = vec_nor((__v16qu)__A, (__v16qu)__A);
+ return vec_all_eq(vec_and((__v16qu)__notA, (__v16qu)__B), __zero);
+}
+
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_testnzc_si128(__m128i __A, __m128i __B) {
+ /* Note: This implementation does NOT set "zero" or "carry" flags. */
+ return _mm_testz_si128(__A, __B) == 0 && _mm_testc_si128(__A, __B) == 0;
+}
+
+#define _mm_test_all_zeros(M, V) _mm_testz_si128((M), (V))
+
+#define _mm_test_all_ones(V) _mm_testc_si128((V), _mm_cmpeq_epi32((V), (V)))
+
+#define _mm_test_mix_ones_zeros(M, V) _mm_testnzc_si128((M), (V))
+
+#ifdef _ARCH_PWR8
extern __inline __m128i
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
- _mm_insert_epi8(__m128i const __A, int const __D, int const __N) {
- __v16qi result = (__v16qi)__A;
- result[__N & 0xf] = __D;
- return (__m128i)result;
+ _mm_cmpeq_epi64(__m128i __X, __m128i __Y) {
+ return (__m128i)vec_cmpeq((__v2di)__X, (__v2di)__Y);
}
+#endif
extern __inline __m128i
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
- _mm_insert_epi32(__m128i const __A, int const __D, int const __N) {
- __v4si result = (__v4si)__A;
- result[__N & 3] = __D;
- return (__m128i)result;
+ _mm_min_epi8(__m128i __X, __m128i __Y) {
+ return (__m128i)vec_min((__v16qi)__X, (__v16qi)__Y);
}
extern __inline __m128i
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
- _mm_insert_epi64(__m128i const __A, long long const __D, int const __N) {
- __v2di result = (__v2di)__A;
- result[__N & 1] = __D;
- return (__m128i)result;
+ _mm_min_epu16(__m128i __X, __m128i __Y) {
+ return (__m128i)vec_min((__v8hu)__X, (__v8hu)__Y);
+}
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_min_epi32(__m128i __X, __m128i __Y) {
+ return (__m128i)vec_min((__v4si)__X, (__v4si)__Y);
+}
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_min_epu32(__m128i __X, __m128i __Y) {
+ return (__m128i)vec_min((__v4su)__X, (__v4su)__Y);
+}
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_max_epi8(__m128i __X, __m128i __Y) {
+ return (__m128i)vec_max((__v16qi)__X, (__v16qi)__Y);
+}
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_max_epu16(__m128i __X, __m128i __Y) {
+ return (__m128i)vec_max((__v8hu)__X, (__v8hu)__Y);
+}
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_max_epi32(__m128i __X, __m128i __Y) {
+ return (__m128i)vec_max((__v4si)__X, (__v4si)__Y);
+}
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_max_epu32(__m128i __X, __m128i __Y) {
+ return (__m128i)vec_max((__v4su)__X, (__v4su)__Y);
+}
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_mullo_epi32(__m128i __X, __m128i __Y) {
+ return (__m128i)vec_mul((__v4su)__X, (__v4su)__Y);
+}
+
+#ifdef _ARCH_PWR8
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_mul_epi32(__m128i __X, __m128i __Y) {
+ return (__m128i)vec_mule((__v4si)__X, (__v4si)__Y);
+}
+#endif
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtepi8_epi16(__m128i __A) {
+ return (__m128i)vec_unpackh((__v16qi)__A);
+}
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtepi8_epi32(__m128i __A) {
+ __A = (__m128i)vec_unpackh((__v16qi)__A);
+ return (__m128i)vec_unpackh((__v8hi)__A);
+}
+
+#ifdef _ARCH_PWR8
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtepi8_epi64(__m128i __A) {
+ __A = (__m128i)vec_unpackh((__v16qi)__A);
+ __A = (__m128i)vec_unpackh((__v8hi)__A);
+ return (__m128i)vec_unpackh((__v4si)__A);
+}
+#endif
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtepi16_epi32(__m128i __A) {
+ return (__m128i)vec_unpackh((__v8hi)__A);
}
+#ifdef _ARCH_PWR8
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtepi16_epi64(__m128i __A) {
+ __A = (__m128i)vec_unpackh((__v8hi)__A);
+ return (__m128i)vec_unpackh((__v4si)__A);
+}
+#endif
+
+#ifdef _ARCH_PWR8
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtepi32_epi64(__m128i __A) {
+ return (__m128i)vec_unpackh((__v4si)__A);
+}
+#endif
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtepu8_epi16(__m128i __A) {
+ const __v16qu __zero = {0};
+#ifdef __LITTLE_ENDIAN__
+ __A = (__m128i)vec_mergeh((__v16qu)__A, __zero);
+#else /* __BIG_ENDIAN__. */
+ __A = (__m128i)vec_mergeh(__zero, (__v16qu)__A);
+#endif /* __BIG_ENDIAN__. */
+ return __A;
+}
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtepu8_epi32(__m128i __A) {
+ const __v16qu __zero = {0};
+#ifdef __LITTLE_ENDIAN__
+ __A = (__m128i)vec_mergeh((__v16qu)__A, __zero);
+ __A = (__m128i)vec_mergeh((__v8hu)__A, (__v8hu)__zero);
+#else /* __BIG_ENDIAN__. */
+ __A = (__m128i)vec_mergeh(__zero, (__v16qu)__A);
+ __A = (__m128i)vec_mergeh((__v8hu)__zero, (__v8hu)__A);
+#endif /* __BIG_ENDIAN__. */
+ return __A;
+}
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtepu8_epi64(__m128i __A) {
+ const __v16qu __zero = {0};
+#ifdef __LITTLE_ENDIAN__
+ __A = (__m128i)vec_mergeh((__v16qu)__A, __zero);
+ __A = (__m128i)vec_mergeh((__v8hu)__A, (__v8hu)__zero);
+ __A = (__m128i)vec_mergeh((__v4su)__A, (__v4su)__zero);
+#else /* __BIG_ENDIAN__. */
+ __A = (__m128i)vec_mergeh(__zero, (__v16qu)__A);
+ __A = (__m128i)vec_mergeh((__v8hu)__zero, (__v8hu)__A);
+ __A = (__m128i)vec_mergeh((__v4su)__zero, (__v4su)__A);
+#endif /* __BIG_ENDIAN__. */
+ return __A;
+}
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtepu16_epi32(__m128i __A) {
+ const __v8hu __zero = {0};
+#ifdef __LITTLE_ENDIAN__
+ __A = (__m128i)vec_mergeh((__v8hu)__A, __zero);
+#else /* __BIG_ENDIAN__. */
+ __A = (__m128i)vec_mergeh(__zero, (__v8hu)__A);
+#endif /* __BIG_ENDIAN__. */
+ return __A;
+}
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtepu16_epi64(__m128i __A) {
+ const __v8hu __zero = {0};
+#ifdef __LITTLE_ENDIAN__
+ __A = (__m128i)vec_mergeh((__v8hu)__A, __zero);
+ __A = (__m128i)vec_mergeh((__v4su)__A, (__v4su)__zero);
+#else /* __BIG_ENDIAN__. */
+ __A = (__m128i)vec_mergeh(__zero, (__v8hu)__A);
+ __A = (__m128i)vec_mergeh((__v4su)__zero, (__v4su)__A);
+#endif /* __BIG_ENDIAN__. */
+ return __A;
+}
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtepu32_epi64(__m128i __A) {
+ const __v4su __zero = {0};
+#ifdef __LITTLE_ENDIAN__
+ __A = (__m128i)vec_mergeh((__v4su)__A, __zero);
+#else /* __BIG_ENDIAN__. */
+ __A = (__m128i)vec_mergeh(__zero, (__v4su)__A);
+#endif /* __BIG_ENDIAN__. */
+ return __A;
+}
+
+/* Return horizontal packed word minimum and its index in bits [15:0]
+ and bits [18:16] respectively. */
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_minpos_epu16(__m128i __A) {
+ union __u {
+ __m128i __m;
+ __v8hu __uh;
+ };
+ union __u __u = {.__m = __A}, __r = {.__m = {0}};
+ unsigned short __ridx = 0;
+ unsigned short __rmin = __u.__uh[__ridx];
+ unsigned long __i;
+ for (__i = 1; __i < 8; __i++) {
+ if (__u.__uh[__i] < __rmin) {
+ __rmin = __u.__uh[__i];
+ __ridx = __i;
+ }
+ }
+ __r.__uh[0] = __rmin;
+ __r.__uh[1] = __ridx;
+ return __r.__m;
+}
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_packus_epi32(__m128i __X, __m128i __Y) {
+ return (__m128i)vec_packsu((__v4si)__X, (__v4si)__Y);
+}
+
+#ifdef _ARCH_PWR8
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpgt_epi64(__m128i __X, __m128i __Y) {
+ return (__m128i)vec_cmpgt((__v2di)__X, (__v2di)__Y);
+}
+#endif
+
#else
#include_next <smmintrin.h>
-#endif /* defined(__linux__) && defined(__ppc64__) */
+#endif /* defined(__powerpc64__) && \
+ * (defined(__linux__) || defined(__FreeBSD__) || defined(_AIX)) */
-#endif /* _SMMINTRIN_H_ */
+#endif /* SMMINTRIN_H_ */
diff --git a/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/tmmintrin.h b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/tmmintrin.h
index b5a935d5e47e..92f08676d2df 100644
--- a/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/tmmintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/tmmintrin.h
@@ -25,7 +25,8 @@
#ifndef TMMINTRIN_H_
#define TMMINTRIN_H_
-#if defined(__linux__) && defined(__ppc64__)
+#if defined(__powerpc64__) && \
+ (defined(__linux__) || defined(__FreeBSD__) || defined(_AIX))
#include <altivec.h>
@@ -33,463 +34,420 @@
#include <pmmintrin.h>
extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_abs_epi16 (__m128i __A)
-{
- return (__m128i) vec_abs ((__v8hi) __A);
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_abs_epi16(__m128i __A) {
+ return (__m128i)vec_abs((__v8hi)__A);
}
extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_abs_epi32 (__m128i __A)
-{
- return (__m128i) vec_abs ((__v4si) __A);
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_abs_epi32(__m128i __A) {
+ return (__m128i)vec_abs((__v4si)__A);
}
extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_abs_epi8 (__m128i __A)
-{
- return (__m128i) vec_abs ((__v16qi) __A);
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_abs_epi8(__m128i __A) {
+ return (__m128i)vec_abs((__v16qi)__A);
}
extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_abs_pi16 (__m64 __A)
-{
- __v8hi __B = (__v8hi) (__v2du) { __A, __A };
- return (__m64) ((__v2du) vec_abs (__B))[0];
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_abs_pi16(__m64 __A) {
+ __v8hi __B = (__v8hi)(__v2du){__A, __A};
+ return (__m64)((__v2du)vec_abs(__B))[0];
}
extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_abs_pi32 (__m64 __A)
-{
- __v4si __B = (__v4si) (__v2du) { __A, __A };
- return (__m64) ((__v2du) vec_abs (__B))[0];
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_abs_pi32(__m64 __A) {
+ __v4si __B = (__v4si)(__v2du){__A, __A};
+ return (__m64)((__v2du)vec_abs(__B))[0];
}
extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_abs_pi8 (__m64 __A)
-{
- __v16qi __B = (__v16qi) (__v2du) { __A, __A };
- return (__m64) ((__v2du) vec_abs (__B))[0];
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_abs_pi8(__m64 __A) {
+ __v16qi __B = (__v16qi)(__v2du){__A, __A};
+ return (__m64)((__v2du)vec_abs(__B))[0];
}
extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_alignr_epi8 (__m128i __A, __m128i __B, const unsigned int __count)
-{
- if (__builtin_constant_p (__count) && __count < 16)
- {
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_alignr_epi8(__m128i __A, __m128i __B, const unsigned int __count) {
+ if (__builtin_constant_p(__count) && __count < 16) {
#ifdef __LITTLE_ENDIAN__
- __A = (__m128i) vec_reve ((__v16qu) __A);
- __B = (__m128i) vec_reve ((__v16qu) __B);
+ __A = (__m128i)vec_reve((__v16qu)__A);
+ __B = (__m128i)vec_reve((__v16qu)__B);
#endif
- __A = (__m128i) vec_sld ((__v16qu) __B, (__v16qu) __A, __count);
+ __A = (__m128i)vec_sld((__v16qu)__B, (__v16qu)__A, __count);
#ifdef __LITTLE_ENDIAN__
- __A = (__m128i) vec_reve ((__v16qu) __A);
+ __A = (__m128i)vec_reve((__v16qu)__A);
#endif
- return __A;
- }
+ return __A;
+ }
if (__count == 0)
return __B;
- if (__count >= 16)
- {
- if (__count >= 32)
- {
- const __v16qu zero = { 0 };
- return (__m128i) zero;
- }
- else
- {
- const __v16qu __shift =
- vec_splats ((unsigned char) ((__count - 16) * 8));
+ if (__count >= 16) {
+ if (__count >= 32) {
+ const __v16qu __zero = {0};
+ return (__m128i)__zero;
+ } else {
+ const __v16qu __shift = vec_splats((unsigned char)((__count - 16) * 8));
#ifdef __LITTLE_ENDIAN__
- return (__m128i) vec_sro ((__v16qu) __A, __shift);
+ return (__m128i)vec_sro((__v16qu)__A, __shift);
#else
- return (__m128i) vec_slo ((__v16qu) __A, __shift);
+ return (__m128i)vec_slo((__v16qu)__A, __shift);
#endif
- }
}
- else
- {
- const __v16qu __shiftA =
- vec_splats ((unsigned char) ((16 - __count) * 8));
- const __v16qu __shiftB = vec_splats ((unsigned char) (__count * 8));
+ } else {
+ const __v16qu __shiftA = vec_splats((unsigned char)((16 - __count) * 8));
+ const __v16qu __shiftB = vec_splats((unsigned char)(__count * 8));
#ifdef __LITTLE_ENDIAN__
- __A = (__m128i) vec_slo ((__v16qu) __A, __shiftA);
- __B = (__m128i) vec_sro ((__v16qu) __B, __shiftB);
+ __A = (__m128i)vec_slo((__v16qu)__A, __shiftA);
+ __B = (__m128i)vec_sro((__v16qu)__B, __shiftB);
#else
- __A = (__m128i) vec_sro ((__v16qu) __A, __shiftA);
- __B = (__m128i) vec_slo ((__v16qu) __B, __shiftB);
+ __A = (__m128i)vec_sro((__v16qu)__A, __shiftA);
+ __B = (__m128i)vec_slo((__v16qu)__B, __shiftB);
#endif
- return (__m128i) vec_or ((__v16qu) __A, (__v16qu) __B);
- }
+ return (__m128i)vec_or((__v16qu)__A, (__v16qu)__B);
+ }
}
extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_alignr_pi8 (__m64 __A, __m64 __B, unsigned int __count)
-{
- if (__count < 16)
- {
- __v2du __C = { __B, __A };
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_alignr_pi8(__m64 __A, __m64 __B, unsigned int __count) {
+ if (__count < 16) {
+ __v2du __C = {__B, __A};
#ifdef __LITTLE_ENDIAN__
- const __v4su __shift = { __count << 3, 0, 0, 0 };
- __C = (__v2du) vec_sro ((__v16qu) __C, (__v16qu) __shift);
+ const __v4su __shift = {__count << 3, 0, 0, 0};
+ __C = (__v2du)vec_sro((__v16qu)__C, (__v16qu)__shift);
#else
- const __v4su __shift = { 0, 0, 0, __count << 3 };
- __C = (__v2du) vec_slo ((__v16qu) __C, (__v16qu) __shift);
+ const __v4su __shift = {0, 0, 0, __count << 3};
+ __C = (__v2du)vec_slo((__v16qu)__C, (__v16qu)__shift);
#endif
- return (__m64) __C[0];
- }
- else
- {
- const __m64 __zero = { 0 };
- return __zero;
- }
+ return (__m64)__C[0];
+ } else {
+ const __m64 __zero = {0};
+ return __zero;
+ }
}
extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hadd_epi16 (__m128i __A, __m128i __B)
-{
- const __v16qu __P =
- { 0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29 };
- const __v16qu __Q =
- { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 };
- __v8hi __C = vec_perm ((__v8hi) __A, (__v8hi) __B, __P);
- __v8hi __D = vec_perm ((__v8hi) __A, (__v8hi) __B, __Q);
- return (__m128i) vec_add (__C, __D);
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_hadd_epi16(__m128i __A, __m128i __B) {
+ const __v16qu __P = {0, 1, 4, 5, 8, 9, 12, 13,
+ 16, 17, 20, 21, 24, 25, 28, 29};
+ const __v16qu __Q = {2, 3, 6, 7, 10, 11, 14, 15,
+ 18, 19, 22, 23, 26, 27, 30, 31};
+ __v8hi __C = vec_perm((__v8hi)__A, (__v8hi)__B, __P);
+ __v8hi __D = vec_perm((__v8hi)__A, (__v8hi)__B, __Q);
+ return (__m128i)vec_add(__C, __D);
}
extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hadd_epi32 (__m128i __A, __m128i __B)
-{
- const __v16qu __P =
- { 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27 };
- const __v16qu __Q =
- { 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 };
- __v4si __C = vec_perm ((__v4si) __A, (__v4si) __B, __P);
- __v4si __D = vec_perm ((__v4si) __A, (__v4si) __B, __Q);
- return (__m128i) vec_add (__C, __D);
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_hadd_epi32(__m128i __A, __m128i __B) {
+ const __v16qu __P = {0, 1, 2, 3, 8, 9, 10, 11,
+ 16, 17, 18, 19, 24, 25, 26, 27};
+ const __v16qu __Q = {4, 5, 6, 7, 12, 13, 14, 15,
+ 20, 21, 22, 23, 28, 29, 30, 31};
+ __v4si __C = vec_perm((__v4si)__A, (__v4si)__B, __P);
+ __v4si __D = vec_perm((__v4si)__A, (__v4si)__B, __Q);
+ return (__m128i)vec_add(__C, __D);
}
extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hadd_pi16 (__m64 __A, __m64 __B)
-{
- __v8hi __C = (__v8hi) (__v2du) { __A, __B };
- const __v16qu __P =
- { 0, 1, 4, 5, 8, 9, 12, 13, 0, 1, 4, 5, 8, 9, 12, 13 };
- const __v16qu __Q =
- { 2, 3, 6, 7, 10, 11, 14, 15, 2, 3, 6, 7, 10, 11, 14, 15 };
- __v8hi __D = vec_perm (__C, __C, __Q);
- __C = vec_perm (__C, __C, __P);
- __C = vec_add (__C, __D);
- return (__m64) ((__v2du) __C)[1];
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_hadd_pi16(__m64 __A, __m64 __B) {
+ __v8hi __C = (__v8hi)(__v2du){__A, __B};
+ const __v16qu __P = {0, 1, 4, 5, 8, 9, 12, 13, 0, 1, 4, 5, 8, 9, 12, 13};
+ const __v16qu __Q = {2, 3, 6, 7, 10, 11, 14, 15, 2, 3, 6, 7, 10, 11, 14, 15};
+ __v8hi __D = vec_perm(__C, __C, __Q);
+ __C = vec_perm(__C, __C, __P);
+ __C = vec_add(__C, __D);
+ return (__m64)((__v2du)__C)[1];
}
extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hadd_pi32 (__m64 __A, __m64 __B)
-{
- __v4si __C = (__v4si) (__v2du) { __A, __B };
- const __v16qu __P =
- { 0, 1, 2, 3, 8, 9, 10, 11, 0, 1, 2, 3, 8, 9, 10, 11 };
- const __v16qu __Q =
- { 4, 5, 6, 7, 12, 13, 14, 15, 4, 5, 6, 7, 12, 13, 14, 15 };
- __v4si __D = vec_perm (__C, __C, __Q);
- __C = vec_perm (__C, __C, __P);
- __C = vec_add (__C, __D);
- return (__m64) ((__v2du) __C)[1];
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_hadd_pi32(__m64 __A, __m64 __B) {
+ __v4si __C = (__v4si)(__v2du){__A, __B};
+ const __v16qu __P = {0, 1, 2, 3, 8, 9, 10, 11, 0, 1, 2, 3, 8, 9, 10, 11};
+ const __v16qu __Q = {4, 5, 6, 7, 12, 13, 14, 15, 4, 5, 6, 7, 12, 13, 14, 15};
+ __v4si __D = vec_perm(__C, __C, __Q);
+ __C = vec_perm(__C, __C, __P);
+ __C = vec_add(__C, __D);
+ return (__m64)((__v2du)__C)[1];
}
extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hadds_epi16 (__m128i __A, __m128i __B)
-{
- __v4si __C = { 0 }, __D = { 0 };
- __C = vec_sum4s ((__v8hi) __A, __C);
- __D = vec_sum4s ((__v8hi) __B, __D);
- __C = (__v4si) vec_packs (__C, __D);
- return (__m128i) __C;
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_hadds_epi16(__m128i __A, __m128i __B) {
+ __v4si __C = {0}, __D = {0};
+ __C = vec_sum4s((__v8hi)__A, __C);
+ __D = vec_sum4s((__v8hi)__B, __D);
+ __C = (__v4si)vec_packs(__C, __D);
+ return (__m128i)__C;
}
extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hadds_pi16 (__m64 __A, __m64 __B)
-{
- const __v4si __zero = { 0 };
- __v8hi __C = (__v8hi) (__v2du) { __A, __B };
- __v4si __D = vec_sum4s (__C, __zero);
- __C = vec_packs (__D, __D);
- return (__m64) ((__v2du) __C)[1];
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_hadds_pi16(__m64 __A, __m64 __B) {
+ const __v4si __zero = {0};
+ __v8hi __C = (__v8hi)(__v2du){__A, __B};
+ __v4si __D = vec_sum4s(__C, __zero);
+ __C = vec_packs(__D, __D);
+ return (__m64)((__v2du)__C)[1];
}
extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hsub_epi16 (__m128i __A, __m128i __B)
-{
- const __v16qu __P =
- { 0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29 };
- const __v16qu __Q =
- { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 };
- __v8hi __C = vec_perm ((__v8hi) __A, (__v8hi) __B, __P);
- __v8hi __D = vec_perm ((__v8hi) __A, (__v8hi) __B, __Q);
- return (__m128i) vec_sub (__C, __D);
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_hsub_epi16(__m128i __A, __m128i __B) {
+ const __v16qu __P = {0, 1, 4, 5, 8, 9, 12, 13,
+ 16, 17, 20, 21, 24, 25, 28, 29};
+ const __v16qu __Q = {2, 3, 6, 7, 10, 11, 14, 15,
+ 18, 19, 22, 23, 26, 27, 30, 31};
+ __v8hi __C = vec_perm((__v8hi)__A, (__v8hi)__B, __P);
+ __v8hi __D = vec_perm((__v8hi)__A, (__v8hi)__B, __Q);
+ return (__m128i)vec_sub(__C, __D);
}
extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hsub_epi32 (__m128i __A, __m128i __B)
-{
- const __v16qu __P =
- { 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27 };
- const __v16qu __Q =
- { 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 };
- __v4si __C = vec_perm ((__v4si) __A, (__v4si) __B, __P);
- __v4si __D = vec_perm ((__v4si) __A, (__v4si) __B, __Q);
- return (__m128i) vec_sub (__C, __D);
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_hsub_epi32(__m128i __A, __m128i __B) {
+ const __v16qu __P = {0, 1, 2, 3, 8, 9, 10, 11,
+ 16, 17, 18, 19, 24, 25, 26, 27};
+ const __v16qu __Q = {4, 5, 6, 7, 12, 13, 14, 15,
+ 20, 21, 22, 23, 28, 29, 30, 31};
+ __v4si __C = vec_perm((__v4si)__A, (__v4si)__B, __P);
+ __v4si __D = vec_perm((__v4si)__A, (__v4si)__B, __Q);
+ return (__m128i)vec_sub(__C, __D);
}
extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hsub_pi16 (__m64 __A, __m64 __B)
-{
- const __v16qu __P =
- { 0, 1, 4, 5, 8, 9, 12, 13, 0, 1, 4, 5, 8, 9, 12, 13 };
- const __v16qu __Q =
- { 2, 3, 6, 7, 10, 11, 14, 15, 2, 3, 6, 7, 10, 11, 14, 15 };
- __v8hi __C = (__v8hi) (__v2du) { __A, __B };
- __v8hi __D = vec_perm (__C, __C, __Q);
- __C = vec_perm (__C, __C, __P);
- __C = vec_sub (__C, __D);
- return (__m64) ((__v2du) __C)[1];
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_hsub_pi16(__m64 __A, __m64 __B) {
+ const __v16qu __P = {0, 1, 4, 5, 8, 9, 12, 13, 0, 1, 4, 5, 8, 9, 12, 13};
+ const __v16qu __Q = {2, 3, 6, 7, 10, 11, 14, 15, 2, 3, 6, 7, 10, 11, 14, 15};
+ __v8hi __C = (__v8hi)(__v2du){__A, __B};
+ __v8hi __D = vec_perm(__C, __C, __Q);
+ __C = vec_perm(__C, __C, __P);
+ __C = vec_sub(__C, __D);
+ return (__m64)((__v2du)__C)[1];
}
extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hsub_pi32 (__m64 __A, __m64 __B)
-{
- const __v16qu __P =
- { 0, 1, 2, 3, 8, 9, 10, 11, 0, 1, 2, 3, 8, 9, 10, 11 };
- const __v16qu __Q =
- { 4, 5, 6, 7, 12, 13, 14, 15, 4, 5, 6, 7, 12, 13, 14, 15 };
- __v4si __C = (__v4si) (__v2du) { __A, __B };
- __v4si __D = vec_perm (__C, __C, __Q);
- __C = vec_perm (__C, __C, __P);
- __C = vec_sub (__C, __D);
- return (__m64) ((__v2du) __C)[1];
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_hsub_pi32(__m64 __A, __m64 __B) {
+ const __v16qu __P = {0, 1, 2, 3, 8, 9, 10, 11, 0, 1, 2, 3, 8, 9, 10, 11};
+ const __v16qu __Q = {4, 5, 6, 7, 12, 13, 14, 15, 4, 5, 6, 7, 12, 13, 14, 15};
+ __v4si __C = (__v4si)(__v2du){__A, __B};
+ __v4si __D = vec_perm(__C, __C, __Q);
+ __C = vec_perm(__C, __C, __P);
+ __C = vec_sub(__C, __D);
+ return (__m64)((__v2du)__C)[1];
}
extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hsubs_epi16 (__m128i __A, __m128i __B)
-{
- const __v16qu __P =
- { 0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29 };
- const __v16qu __Q =
- { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 };
- __v8hi __C = vec_perm ((__v8hi) __A, (__v8hi) __B, __P);
- __v8hi __D = vec_perm ((__v8hi) __A, (__v8hi) __B, __Q);
- return (__m128i) vec_subs (__C, __D);
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_hsubs_epi16(__m128i __A, __m128i __B) {
+ const __v16qu __P = {0, 1, 4, 5, 8, 9, 12, 13,
+ 16, 17, 20, 21, 24, 25, 28, 29};
+ const __v16qu __Q = {2, 3, 6, 7, 10, 11, 14, 15,
+ 18, 19, 22, 23, 26, 27, 30, 31};
+ __v8hi __C = vec_perm((__v8hi)__A, (__v8hi)__B, __P);
+ __v8hi __D = vec_perm((__v8hi)__A, (__v8hi)__B, __Q);
+ return (__m128i)vec_subs(__C, __D);
}
extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hsubs_pi16 (__m64 __A, __m64 __B)
-{
- const __v16qu __P =
- { 0, 1, 4, 5, 8, 9, 12, 13, 0, 1, 4, 5, 8, 9, 12, 13 };
- const __v16qu __Q =
- { 2, 3, 6, 7, 10, 11, 14, 15, 2, 3, 6, 7, 10, 11, 14, 15 };
- __v8hi __C = (__v8hi) (__v2du) { __A, __B };
- __v8hi __D = vec_perm (__C, __C, __P);
- __v8hi __E = vec_perm (__C, __C, __Q);
- __C = vec_subs (__D, __E);
- return (__m64) ((__v2du) __C)[1];
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_hsubs_pi16(__m64 __A, __m64 __B) {
+ const __v16qu __P = {0, 1, 4, 5, 8, 9, 12, 13, 0, 1, 4, 5, 8, 9, 12, 13};
+ const __v16qu __Q = {2, 3, 6, 7, 10, 11, 14, 15, 2, 3, 6, 7, 10, 11, 14, 15};
+ __v8hi __C = (__v8hi)(__v2du){__A, __B};
+ __v8hi __D = vec_perm(__C, __C, __P);
+ __v8hi __E = vec_perm(__C, __C, __Q);
+ __C = vec_subs(__D, __E);
+ return (__m64)((__v2du)__C)[1];
}
extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_shuffle_epi8 (__m128i __A, __m128i __B)
-{
- const __v16qi __zero = { 0 };
- __vector __bool char __select = vec_cmplt ((__v16qi) __B, __zero);
- __v16qi __C = vec_perm ((__v16qi) __A, (__v16qi) __A, (__v16qu) __B);
- return (__m128i) vec_sel (__C, __zero, __select);
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_shuffle_epi8(__m128i __A, __m128i __B) {
+ const __v16qi __zero = {0};
+ __vector __bool char __select = vec_cmplt((__v16qi)__B, __zero);
+ __v16qi __C = vec_perm((__v16qi)__A, (__v16qi)__A, (__v16qu)__B);
+ return (__m128i)vec_sel(__C, __zero, __select);
}
extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_shuffle_pi8 (__m64 __A, __m64 __B)
-{
- const __v16qi __zero = { 0 };
- __v16qi __C = (__v16qi) (__v2du) { __A, __A };
- __v16qi __D = (__v16qi) (__v2du) { __B, __B };
- __vector __bool char __select = vec_cmplt ((__v16qi) __D, __zero);
- __C = vec_perm ((__v16qi) __C, (__v16qi) __C, (__v16qu) __D);
- __C = vec_sel (__C, __zero, __select);
- return (__m64) ((__v2du) (__C))[0];
-}
-
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_shuffle_pi8(__m64 __A, __m64 __B) {
+ const __v16qi __zero = {0};
+ __v16qi __C = (__v16qi)(__v2du){__A, __A};
+ __v16qi __D = (__v16qi)(__v2du){__B, __B};
+ __vector __bool char __select = vec_cmplt((__v16qi)__D, __zero);
+ __C = vec_perm((__v16qi)__C, (__v16qi)__C, (__v16qu)__D);
+ __C = vec_sel(__C, __zero, __select);
+ return (__m64)((__v2du)(__C))[0];
+}
+
+#ifdef _ARCH_PWR8
extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sign_epi8 (__m128i __A, __m128i __B)
-{
- const __v16qi __zero = { 0 };
- __v16qi __selectneg = (__v16qi) vec_cmplt ((__v16qi) __B, __zero);
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sign_epi8(__m128i __A, __m128i __B) {
+ const __v16qi __zero = {0};
+ __v16qi __selectneg = (__v16qi)vec_cmplt((__v16qi)__B, __zero);
__v16qi __selectpos =
- (__v16qi) vec_neg ((__v16qi) vec_cmpgt ((__v16qi) __B, __zero));
- __v16qi __conv = vec_add (__selectneg, __selectpos);
- return (__m128i) vec_mul ((__v16qi) __A, (__v16qi) __conv);
+ (__v16qi)vec_neg((__v16qi)vec_cmpgt((__v16qi)__B, __zero));
+ __v16qi __conv = vec_add(__selectneg, __selectpos);
+ return (__m128i)vec_mul((__v16qi)__A, (__v16qi)__conv);
}
+#endif
+#ifdef _ARCH_PWR8
extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sign_epi16 (__m128i __A, __m128i __B)
-{
- const __v8hi __zero = { 0 };
- __v8hi __selectneg = (__v8hi) vec_cmplt ((__v8hi) __B, __zero);
- __v8hi __selectpos =
- (__v8hi) vec_neg ((__v8hi) vec_cmpgt ((__v8hi) __B, __zero));
- __v8hi __conv = vec_add (__selectneg, __selectpos);
- return (__m128i) vec_mul ((__v8hi) __A, (__v8hi) __conv);
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sign_epi16(__m128i __A, __m128i __B) {
+ const __v8hi __zero = {0};
+ __v8hi __selectneg = (__v8hi)vec_cmplt((__v8hi)__B, __zero);
+ __v8hi __selectpos = (__v8hi)vec_neg((__v8hi)vec_cmpgt((__v8hi)__B, __zero));
+ __v8hi __conv = vec_add(__selectneg, __selectpos);
+ return (__m128i)vec_mul((__v8hi)__A, (__v8hi)__conv);
}
+#endif
+#ifdef _ARCH_PWR8
extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sign_epi32 (__m128i __A, __m128i __B)
-{
- const __v4si __zero = { 0 };
- __v4si __selectneg = (__v4si) vec_cmplt ((__v4si) __B, __zero);
- __v4si __selectpos =
- (__v4si) vec_neg ((__v4si) vec_cmpgt ((__v4si) __B, __zero));
- __v4si __conv = vec_add (__selectneg, __selectpos);
- return (__m128i) vec_mul ((__v4si) __A, (__v4si) __conv);
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sign_epi32(__m128i __A, __m128i __B) {
+ const __v4si __zero = {0};
+ __v4si __selectneg = (__v4si)vec_cmplt((__v4si)__B, __zero);
+ __v4si __selectpos = (__v4si)vec_neg((__v4si)vec_cmpgt((__v4si)__B, __zero));
+ __v4si __conv = vec_add(__selectneg, __selectpos);
+ return (__m128i)vec_mul((__v4si)__A, (__v4si)__conv);
}
+#endif
+#ifdef _ARCH_PWR8
extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sign_pi8 (__m64 __A, __m64 __B)
-{
- const __v16qi __zero = { 0 };
- __v16qi __C = (__v16qi) (__v2du) { __A, __A };
- __v16qi __D = (__v16qi) (__v2du) { __B, __B };
- __C = (__v16qi) _mm_sign_epi8 ((__m128i) __C, (__m128i) __D);
- return (__m64) ((__v2du) (__C))[0];
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sign_pi8(__m64 __A, __m64 __B) {
+ const __v16qi __zero = {0};
+ __v16qi __C = (__v16qi)(__v2du){__A, __A};
+ __v16qi __D = (__v16qi)(__v2du){__B, __B};
+ __C = (__v16qi)_mm_sign_epi8((__m128i)__C, (__m128i)__D);
+ return (__m64)((__v2du)(__C))[0];
}
+#endif
+#ifdef _ARCH_PWR8
extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sign_pi16 (__m64 __A, __m64 __B)
-{
- const __v8hi __zero = { 0 };
- __v8hi __C = (__v8hi) (__v2du) { __A, __A };
- __v8hi __D = (__v8hi) (__v2du) { __B, __B };
- __C = (__v8hi) _mm_sign_epi16 ((__m128i) __C, (__m128i) __D);
- return (__m64) ((__v2du) (__C))[0];
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sign_pi16(__m64 __A, __m64 __B) {
+ const __v8hi __zero = {0};
+ __v8hi __C = (__v8hi)(__v2du){__A, __A};
+ __v8hi __D = (__v8hi)(__v2du){__B, __B};
+ __C = (__v8hi)_mm_sign_epi16((__m128i)__C, (__m128i)__D);
+ return (__m64)((__v2du)(__C))[0];
}
+#endif
+#ifdef _ARCH_PWR8
extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sign_pi32 (__m64 __A, __m64 __B)
-{
- const __v4si __zero = { 0 };
- __v4si __C = (__v4si) (__v2du) { __A, __A };
- __v4si __D = (__v4si) (__v2du) { __B, __B };
- __C = (__v4si) _mm_sign_epi32 ((__m128i) __C, (__m128i) __D);
- return (__m64) ((__v2du) (__C))[0];
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sign_pi32(__m64 __A, __m64 __B) {
+ const __v4si __zero = {0};
+ __v4si __C = (__v4si)(__v2du){__A, __A};
+ __v4si __D = (__v4si)(__v2du){__B, __B};
+ __C = (__v4si)_mm_sign_epi32((__m128i)__C, (__m128i)__D);
+ return (__m64)((__v2du)(__C))[0];
}
+#endif
extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maddubs_epi16 (__m128i __A, __m128i __B)
-{
- __v8hi __unsigned = vec_splats ((signed short) 0x00ff);
- __v8hi __C = vec_and (vec_unpackh ((__v16qi) __A), __unsigned);
- __v8hi __D = vec_and (vec_unpackl ((__v16qi) __A), __unsigned);
- __v8hi __E = vec_unpackh ((__v16qi) __B);
- __v8hi __F = vec_unpackl ((__v16qi) __B);
- __C = vec_mul (__C, __E);
- __D = vec_mul (__D, __F);
- const __v16qu __odds =
- { 0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29 };
- const __v16qu __evens =
- { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 };
- __E = vec_perm (__C, __D, __odds);
- __F = vec_perm (__C, __D, __evens);
- return (__m128i) vec_adds (__E, __F);
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_maddubs_epi16(__m128i __A, __m128i __B) {
+ __v8hi __unsigned = vec_splats((signed short)0x00ff);
+ __v8hi __C = vec_and(vec_unpackh((__v16qi)__A), __unsigned);
+ __v8hi __D = vec_and(vec_unpackl((__v16qi)__A), __unsigned);
+ __v8hi __E = vec_unpackh((__v16qi)__B);
+ __v8hi __F = vec_unpackl((__v16qi)__B);
+ __C = vec_mul(__C, __E);
+ __D = vec_mul(__D, __F);
+ const __v16qu __odds = {0, 1, 4, 5, 8, 9, 12, 13,
+ 16, 17, 20, 21, 24, 25, 28, 29};
+ const __v16qu __evens = {2, 3, 6, 7, 10, 11, 14, 15,
+ 18, 19, 22, 23, 26, 27, 30, 31};
+ __E = vec_perm(__C, __D, __odds);
+ __F = vec_perm(__C, __D, __evens);
+ return (__m128i)vec_adds(__E, __F);
}
extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maddubs_pi16 (__m64 __A, __m64 __B)
-{
- __v8hi __C = (__v8hi) (__v2du) { __A, __A };
- __C = vec_unpackl ((__v16qi) __C);
- const __v8hi __unsigned = vec_splats ((signed short) 0x00ff);
- __C = vec_and (__C, __unsigned);
- __v8hi __D = (__v8hi) (__v2du) { __B, __B };
- __D = vec_unpackl ((__v16qi) __D);
- __D = vec_mul (__C, __D);
- const __v16qu __odds =
- { 0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29 };
- const __v16qu __evens =
- { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 };
- __C = vec_perm (__D, __D, __odds);
- __D = vec_perm (__D, __D, __evens);
- __C = vec_adds (__C, __D);
- return (__m64) ((__v2du) (__C))[0];
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_maddubs_pi16(__m64 __A, __m64 __B) {
+ __v8hi __C = (__v8hi)(__v2du){__A, __A};
+ __C = vec_unpackl((__v16qi)__C);
+ const __v8hi __unsigned = vec_splats((signed short)0x00ff);
+ __C = vec_and(__C, __unsigned);
+ __v8hi __D = (__v8hi)(__v2du){__B, __B};
+ __D = vec_unpackl((__v16qi)__D);
+ __D = vec_mul(__C, __D);
+ const __v16qu __odds = {0, 1, 4, 5, 8, 9, 12, 13,
+ 16, 17, 20, 21, 24, 25, 28, 29};
+ const __v16qu __evens = {2, 3, 6, 7, 10, 11, 14, 15,
+ 18, 19, 22, 23, 26, 27, 30, 31};
+ __C = vec_perm(__D, __D, __odds);
+ __D = vec_perm(__D, __D, __evens);
+ __C = vec_adds(__C, __D);
+ return (__m64)((__v2du)(__C))[0];
}
extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mulhrs_epi16 (__m128i __A, __m128i __B)
-{
- __v4si __C = vec_unpackh ((__v8hi) __A);
- __v4si __D = vec_unpackh ((__v8hi) __B);
- __C = vec_mul (__C, __D);
- __D = vec_unpackl ((__v8hi) __A);
- __v4si __E = vec_unpackl ((__v8hi) __B);
- __D = vec_mul (__D, __E);
- const __v4su __shift = vec_splats ((unsigned int) 14);
- __C = vec_sr (__C, __shift);
- __D = vec_sr (__D, __shift);
- const __v4si __ones = vec_splats ((signed int) 1);
- __C = vec_add (__C, __ones);
- __C = vec_sr (__C, (__v4su) __ones);
- __D = vec_add (__D, __ones);
- __D = vec_sr (__D, (__v4su) __ones);
- return (__m128i) vec_pack (__C, __D);
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_mulhrs_epi16(__m128i __A, __m128i __B) {
+ __v4si __C = vec_unpackh((__v8hi)__A);
+ __v4si __D = vec_unpackh((__v8hi)__B);
+ __C = vec_mul(__C, __D);
+ __D = vec_unpackl((__v8hi)__A);
+ __v4si __E = vec_unpackl((__v8hi)__B);
+ __D = vec_mul(__D, __E);
+ const __v4su __shift = vec_splats((unsigned int)14);
+ __C = vec_sr(__C, __shift);
+ __D = vec_sr(__D, __shift);
+ const __v4si __ones = vec_splats((signed int)1);
+ __C = vec_add(__C, __ones);
+ __C = vec_sr(__C, (__v4su)__ones);
+ __D = vec_add(__D, __ones);
+ __D = vec_sr(__D, (__v4su)__ones);
+ return (__m128i)vec_pack(__C, __D);
}
extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mulhrs_pi16 (__m64 __A, __m64 __B)
-{
- __v4si __C = (__v4si) (__v2du) { __A, __A };
- __C = vec_unpackh ((__v8hi) __C);
- __v4si __D = (__v4si) (__v2du) { __B, __B };
- __D = vec_unpackh ((__v8hi) __D);
- __C = vec_mul (__C, __D);
- const __v4su __shift = vec_splats ((unsigned int) 14);
- __C = vec_sr (__C, __shift);
- const __v4si __ones = vec_splats ((signed int) 1);
- __C = vec_add (__C, __ones);
- __C = vec_sr (__C, (__v4su) __ones);
- __v8hi __E = vec_pack (__C, __D);
- return (__m64) ((__v2du) (__E))[0];
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_mulhrs_pi16(__m64 __A, __m64 __B) {
+ __v4si __C = (__v4si)(__v2du){__A, __A};
+ __C = vec_unpackh((__v8hi)__C);
+ __v4si __D = (__v4si)(__v2du){__B, __B};
+ __D = vec_unpackh((__v8hi)__D);
+ __C = vec_mul(__C, __D);
+ const __v4su __shift = vec_splats((unsigned int)14);
+ __C = vec_sr(__C, __shift);
+ const __v4si __ones = vec_splats((signed int)1);
+ __C = vec_add(__C, __ones);
+ __C = vec_sr(__C, (__v4su)__ones);
+ __v8hi __E = vec_pack(__C, __D);
+ return (__m64)((__v2du)(__E))[0];
}
#else
#include_next <tmmintrin.h>
-#endif /* defined(__linux__) && defined(__ppc64__) */
+#endif /* defined(__powerpc64__) && \
+ * (defined(__linux__) || defined(__FreeBSD__) || defined(_AIX)) */
#endif /* TMMINTRIN_H_ */
diff --git a/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/x86gprintrin.h b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/x86gprintrin.h
new file mode 100644
index 000000000000..cbfac262395c
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/x86gprintrin.h
@@ -0,0 +1,17 @@
+/*===--- x86gprintrin.h - Implementation of X86 GPR intrinsics on PowerPC --===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef X86GPRINTRIN_H_
+#define X86GPRINTRIN_H_
+
+#include <bmiintrin.h>
+
+#include <bmi2intrin.h>
+
+#endif /* X86GPRINTRIN_H_ */
diff --git a/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/x86intrin.h b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/x86intrin.h
new file mode 100644
index 000000000000..f5c201262e69
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/x86intrin.h
@@ -0,0 +1,28 @@
+/*===---- x86intrin.h - Implementation of X86 intrinsics on PowerPC --------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef NO_WARN_X86_INTRINSICS
+/* This header is distributed to simplify porting x86_64 code that
+ makes explicit use of Intel intrinsics to powerpc64le.
+ It is the user's responsibility to determine if the results are
+ acceptable and make additional changes as necessary.
+ Note that much code that uses Intel intrinsics can be rewritten in
+ standard C or GNU C extensions, which are more portable and better
+ optimized across multiple targets. */
+#error "Please read comment above. Use -DNO_WARN_X86_INTRINSICS to disable this error."
+#endif
+
+#ifndef X86INTRIN_H_
+#define X86INTRIN_H_
+
+#ifdef __ALTIVEC__
+#include <immintrin.h>
+#endif /* __ALTIVEC__ */
+
+#endif /* X86INTRIN_H_ */
diff --git a/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/xmmintrin.h b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/xmmintrin.h
index 0e45b96769f8..9dd21b65c2f7 100644
--- a/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/xmmintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/xmmintrin.h
@@ -28,25 +28,27 @@
Most SSE scalar float intrinsic operations can be performed more
efficiently as C language float scalar operations or optimized to
use vector SIMD operations. We recommend this for new applications. */
-#error "Please read comment above. Use -DNO_WARN_X86_INTRINSICS to disable this error."
+#error \
+ "Please read comment above. Use -DNO_WARN_X86_INTRINSICS to disable this error."
#endif
-#ifndef _XMMINTRIN_H_INCLUDED
-#define _XMMINTRIN_H_INCLUDED
+#ifndef XMMINTRIN_H_
+#define XMMINTRIN_H_
-#if defined(__linux__) && defined(__ppc64__)
+#if defined(__powerpc64__) && \
+ (defined(__linux__) || defined(__FreeBSD__) || defined(_AIX))
/* Define four value permute mask */
-#define _MM_SHUFFLE(w,x,y,z) (((w) << 6) | ((x) << 4) | ((y) << 2) | (z))
+#define _MM_SHUFFLE(w, x, y, z) (((w) << 6) | ((x) << 4) | ((y) << 2) | (z))
#include <altivec.h>
/* Avoid collisions between altivec.h and strict adherence to C++ and
C11 standards. This should eventually be done inside altivec.h itself,
but only after testing a full distro build. */
-#if defined(__STRICT_ANSI__) && (defined(__cplusplus) || \
- (defined(__STDC_VERSION__) && \
- __STDC_VERSION__ >= 201112L))
+#if defined(__STRICT_ANSI__) && \
+ (defined(__cplusplus) || \
+ (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L))
#undef vector
#undef pixel
#undef bool
@@ -71,145 +73,145 @@ typedef vector float __m128_u __attribute__((__may_alias__, __aligned__(1)));
typedef vector float __v4sf;
/* Create an undefined vector. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_undefined_ps (void)
-{
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_undefined_ps(void) {
__m128 __Y = __Y;
return __Y;
}
/* Create a vector of zeros. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_setzero_ps (void)
-{
- return __extension__ (__m128){ 0.0f, 0.0f, 0.0f, 0.0f };
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_setzero_ps(void) {
+ return __extension__(__m128){0.0f, 0.0f, 0.0f, 0.0f};
}
/* Load four SPFP values from P. The address must be 16-byte aligned. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_load_ps (float const *__P)
-{
- return ((__m128)vec_ld(0, (__v4sf*)__P));
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_load_ps(float const *__P) {
+ return ((__m128)vec_ld(0, (__v4sf *)__P));
}
/* Load four SPFP values from P. The address need not be 16-byte aligned. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_loadu_ps (float const *__P)
-{
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_loadu_ps(float const *__P) {
return (vec_vsx_ld(0, __P));
}
/* Load four SPFP values in reverse order. The address must be aligned. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_loadr_ps (float const *__P)
-{
- __v4sf __tmp;
- __m128 result;
- static const __vector unsigned char permute_vector =
- { 0x1C, 0x1D, 0x1E, 0x1F, 0x18, 0x19, 0x1A, 0x1B, 0x14, 0x15, 0x16,
- 0x17, 0x10, 0x11, 0x12, 0x13 };
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_loadr_ps(float const *__P) {
+ __v4sf __tmp;
+ __m128 __result;
+ static const __vector unsigned char __permute_vector = {
+ 0x1C, 0x1D, 0x1E, 0x1F, 0x18, 0x19, 0x1A, 0x1B,
+ 0x14, 0x15, 0x16, 0x17, 0x10, 0x11, 0x12, 0x13};
- __tmp = vec_ld (0, (__v4sf *) __P);
- result = (__m128) vec_perm (__tmp, __tmp, permute_vector);
- return result;
+ __tmp = vec_ld(0, (__v4sf *)__P);
+ __result = (__m128)vec_perm(__tmp, __tmp, __permute_vector);
+ return __result;
}
/* Create a vector with all four elements equal to F. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set1_ps (float __F)
-{
- return __extension__ (__m128)(__v4sf){ __F, __F, __F, __F };
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_set1_ps(float __F) {
+ return __extension__(__m128)(__v4sf){__F, __F, __F, __F};
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set_ps1 (float __F)
-{
- return _mm_set1_ps (__F);
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_set_ps1(float __F) {
+ return _mm_set1_ps(__F);
}
/* Create the vector [Z Y X W]. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set_ps (const float __Z, const float __Y, const float __X, const float __W)
-{
- return __extension__ (__m128)(__v4sf){ __W, __X, __Y, __Z };
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__,
+ __artificial__))
+_mm_set_ps(const float __Z, const float __Y, const float __X, const float __W) {
+ return __extension__(__m128)(__v4sf){__W, __X, __Y, __Z};
}
/* Create the vector [W X Y Z]. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_setr_ps (float __Z, float __Y, float __X, float __W)
-{
- return __extension__ (__m128)(__v4sf){ __Z, __Y, __X, __W };
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_setr_ps(float __Z, float __Y, float __X, float __W) {
+ return __extension__(__m128)(__v4sf){__Z, __Y, __X, __W};
}
/* Store four SPFP values. The address must be 16-byte aligned. */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_store_ps (float *__P, __m128 __A)
-{
- vec_st((__v4sf)__A, 0, (__v4sf*)__P);
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_store_ps(float *__P, __m128 __A) {
+ vec_st((__v4sf)__A, 0, (__v4sf *)__P);
}
/* Store four SPFP values. The address need not be 16-byte aligned. */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_storeu_ps (float *__P, __m128 __A)
-{
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_storeu_ps(float *__P, __m128 __A) {
*(__m128_u *)__P = __A;
}
/* Store four SPFP values in reverse order. The address must be aligned. */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_storer_ps (float *__P, __m128 __A)
-{
- __v4sf __tmp;
- static const __vector unsigned char permute_vector =
- { 0x1C, 0x1D, 0x1E, 0x1F, 0x18, 0x19, 0x1A, 0x1B, 0x14, 0x15, 0x16,
- 0x17, 0x10, 0x11, 0x12, 0x13 };
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_storer_ps(float *__P, __m128 __A) {
+ __v4sf __tmp;
+ static const __vector unsigned char __permute_vector = {
+ 0x1C, 0x1D, 0x1E, 0x1F, 0x18, 0x19, 0x1A, 0x1B,
+ 0x14, 0x15, 0x16, 0x17, 0x10, 0x11, 0x12, 0x13};
- __tmp = (__m128) vec_perm (__A, __A, permute_vector);
+ __tmp = (__m128)vec_perm(__A, __A, __permute_vector);
- _mm_store_ps (__P, __tmp);
+ _mm_store_ps(__P, __tmp);
}
/* Store the lower SPFP value across four words. */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_store1_ps (float *__P, __m128 __A)
-{
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_store1_ps(float *__P, __m128 __A) {
__v4sf __va = vec_splat((__v4sf)__A, 0);
- _mm_store_ps (__P, __va);
+ _mm_store_ps(__P, __va);
}
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_store_ps1 (float *__P, __m128 __A)
-{
- _mm_store1_ps (__P, __A);
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_store_ps1(float *__P, __m128 __A) {
+ _mm_store1_ps(__P, __A);
}
/* Create a vector with element 0 as F and the rest zero. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set_ss (float __F)
-{
- return __extension__ (__m128)(__v4sf){ __F, 0.0f, 0.0f, 0.0f };
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_set_ss(float __F) {
+ return __extension__(__m128)(__v4sf){__F, 0.0f, 0.0f, 0.0f};
}
/* Sets the low SPFP value of A from the low value of B. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_move_ss (__m128 __A, __m128 __B)
-{
- static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_move_ss(__m128 __A, __m128 __B) {
+ static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
- return (vec_sel ((__v4sf)__A, (__v4sf)__B, mask));
+ return (vec_sel((__v4sf)__A, (__v4sf)__B, __mask));
}
/* Create a vector with element 0 as *P and the rest zero. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_load_ss (float const *__P)
-{
- return _mm_set_ss (*__P);
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_load_ss(float const *__P) {
+ return _mm_set_ss(*__P);
}
/* Stores the lower SPFP value. */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_store_ss (float *__P, __m128 __A)
-{
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_store_ss(float *__P, __m128 __A) {
*__P = ((__v4sf)__A)[0];
}
@@ -217,612 +219,600 @@ _mm_store_ss (float *__P, __m128 __A)
floating-point) values of A and B; the upper three SPFP values are
passed through from A. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_add_ss (__m128 __A, __m128 __B)
-{
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_add_ss(__m128 __A, __m128 __B) {
#ifdef _ARCH_PWR7
- __m128 a, b, c;
- static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
+ __m128 __a, __b, __c;
+ static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
/* PowerISA VSX does not allow partial (for just lower double)
results. So to insure we don't generate spurious exceptions
(from the upper double values) we splat the lower double
before we to the operation. */
- a = vec_splat (__A, 0);
- b = vec_splat (__B, 0);
- c = a + b;
+ __a = vec_splat(__A, 0);
+ __b = vec_splat(__B, 0);
+ __c = __a + __b;
/* Then we merge the lower float result with the original upper
float elements from __A. */
- return (vec_sel (__A, c, mask));
+ return (vec_sel(__A, __c, __mask));
#else
__A[0] = __A[0] + __B[0];
return (__A);
#endif
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sub_ss (__m128 __A, __m128 __B)
-{
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sub_ss(__m128 __A, __m128 __B) {
#ifdef _ARCH_PWR7
- __m128 a, b, c;
- static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
+ __m128 __a, __b, __c;
+ static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
/* PowerISA VSX does not allow partial (for just lower double)
results. So to insure we don't generate spurious exceptions
(from the upper double values) we splat the lower double
before we to the operation. */
- a = vec_splat (__A, 0);
- b = vec_splat (__B, 0);
- c = a - b;
+ __a = vec_splat(__A, 0);
+ __b = vec_splat(__B, 0);
+ __c = __a - __b;
/* Then we merge the lower float result with the original upper
float elements from __A. */
- return (vec_sel (__A, c, mask));
+ return (vec_sel(__A, __c, __mask));
#else
__A[0] = __A[0] - __B[0];
return (__A);
#endif
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mul_ss (__m128 __A, __m128 __B)
-{
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_mul_ss(__m128 __A, __m128 __B) {
#ifdef _ARCH_PWR7
- __m128 a, b, c;
- static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
+ __m128 __a, __b, __c;
+ static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
/* PowerISA VSX does not allow partial (for just lower double)
results. So to insure we don't generate spurious exceptions
(from the upper double values) we splat the lower double
before we to the operation. */
- a = vec_splat (__A, 0);
- b = vec_splat (__B, 0);
- c = a * b;
+ __a = vec_splat(__A, 0);
+ __b = vec_splat(__B, 0);
+ __c = __a * __b;
/* Then we merge the lower float result with the original upper
float elements from __A. */
- return (vec_sel (__A, c, mask));
+ return (vec_sel(__A, __c, __mask));
#else
__A[0] = __A[0] * __B[0];
return (__A);
#endif
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_div_ss (__m128 __A, __m128 __B)
-{
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_div_ss(__m128 __A, __m128 __B) {
#ifdef _ARCH_PWR7
- __m128 a, b, c;
- static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
+ __m128 __a, __b, __c;
+ static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
/* PowerISA VSX does not allow partial (for just lower double)
results. So to insure we don't generate spurious exceptions
(from the upper double values) we splat the lower double
before we to the operation. */
- a = vec_splat (__A, 0);
- b = vec_splat (__B, 0);
- c = a / b;
+ __a = vec_splat(__A, 0);
+ __b = vec_splat(__B, 0);
+ __c = __a / __b;
/* Then we merge the lower float result with the original upper
float elements from __A. */
- return (vec_sel (__A, c, mask));
+ return (vec_sel(__A, __c, __mask));
#else
__A[0] = __A[0] / __B[0];
return (__A);
#endif
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sqrt_ss (__m128 __A)
-{
- __m128 a, c;
- static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sqrt_ss(__m128 __A) {
+ __m128 __a, __c;
+ static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
/* PowerISA VSX does not allow partial (for just lower double)
* results. So to insure we don't generate spurious exceptions
* (from the upper double values) we splat the lower double
* before we to the operation. */
- a = vec_splat (__A, 0);
- c = vec_sqrt (a);
+ __a = vec_splat(__A, 0);
+ __c = vec_sqrt(__a);
/* Then we merge the lower float result with the original upper
* float elements from __A. */
- return (vec_sel (__A, c, mask));
+ return (vec_sel(__A, __c, __mask));
}
/* Perform the respective operation on the four SPFP values in A and B. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_add_ps (__m128 __A, __m128 __B)
-{
- return (__m128) ((__v4sf)__A + (__v4sf)__B);
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_add_ps(__m128 __A, __m128 __B) {
+ return (__m128)((__v4sf)__A + (__v4sf)__B);
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sub_ps (__m128 __A, __m128 __B)
-{
- return (__m128) ((__v4sf)__A - (__v4sf)__B);
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sub_ps(__m128 __A, __m128 __B) {
+ return (__m128)((__v4sf)__A - (__v4sf)__B);
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mul_ps (__m128 __A, __m128 __B)
-{
- return (__m128) ((__v4sf)__A * (__v4sf)__B);
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_mul_ps(__m128 __A, __m128 __B) {
+ return (__m128)((__v4sf)__A * (__v4sf)__B);
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_div_ps (__m128 __A, __m128 __B)
-{
- return (__m128) ((__v4sf)__A / (__v4sf)__B);
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_div_ps(__m128 __A, __m128 __B) {
+ return (__m128)((__v4sf)__A / (__v4sf)__B);
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sqrt_ps (__m128 __A)
-{
- return (vec_sqrt ((__v4sf)__A));
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sqrt_ps(__m128 __A) {
+ return (vec_sqrt((__v4sf)__A));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_rcp_ps (__m128 __A)
-{
- return (vec_re ((__v4sf)__A));
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_rcp_ps(__m128 __A) {
+ return (vec_re((__v4sf)__A));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_rsqrt_ps (__m128 __A)
-{
- return (vec_rsqrte (__A));
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_rsqrt_ps(__m128 __A) {
+ return (vec_rsqrte(__A));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_rcp_ss (__m128 __A)
-{
- __m128 a, c;
- static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_rcp_ss(__m128 __A) {
+ __m128 __a, __c;
+ static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
/* PowerISA VSX does not allow partial (for just lower double)
* results. So to insure we don't generate spurious exceptions
* (from the upper double values) we splat the lower double
* before we to the operation. */
- a = vec_splat (__A, 0);
- c = _mm_rcp_ps (a);
+ __a = vec_splat(__A, 0);
+ __c = _mm_rcp_ps(__a);
/* Then we merge the lower float result with the original upper
* float elements from __A. */
- return (vec_sel (__A, c, mask));
+ return (vec_sel(__A, __c, __mask));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_rsqrt_ss (__m128 __A)
-{
- __m128 a, c;
- static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_rsqrt_ss(__m128 __A) {
+ __m128 __a, __c;
+ static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
/* PowerISA VSX does not allow partial (for just lower double)
* results. So to insure we don't generate spurious exceptions
* (from the upper double values) we splat the lower double
* before we to the operation. */
- a = vec_splat (__A, 0);
- c = vec_rsqrte (a);
+ __a = vec_splat(__A, 0);
+ __c = vec_rsqrte(__a);
/* Then we merge the lower float result with the original upper
* float elements from __A. */
- return (vec_sel (__A, c, mask));
+ return (vec_sel(__A, __c, __mask));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_min_ss (__m128 __A, __m128 __B)
-{
- __v4sf a, b, c;
- static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_min_ss(__m128 __A, __m128 __B) {
+ __v4sf __a, __b, __c;
+ static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
/* PowerISA VSX does not allow partial (for just lower float)
* results. So to insure we don't generate spurious exceptions
* (from the upper float values) we splat the lower float
* before we to the operation. */
- a = vec_splat ((__v4sf)__A, 0);
- b = vec_splat ((__v4sf)__B, 0);
- c = vec_min (a, b);
+ __a = vec_splat((__v4sf)__A, 0);
+ __b = vec_splat((__v4sf)__B, 0);
+ __c = vec_min(__a, __b);
/* Then we merge the lower float result with the original upper
* float elements from __A. */
- return (vec_sel ((__v4sf)__A, c, mask));
+ return (vec_sel((__v4sf)__A, __c, __mask));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_max_ss (__m128 __A, __m128 __B)
-{
- __v4sf a, b, c;
- static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_max_ss(__m128 __A, __m128 __B) {
+ __v4sf __a, __b, __c;
+ static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
/* PowerISA VSX does not allow partial (for just lower float)
* results. So to insure we don't generate spurious exceptions
* (from the upper float values) we splat the lower float
* before we to the operation. */
- a = vec_splat (__A, 0);
- b = vec_splat (__B, 0);
- c = vec_max (a, b);
+ __a = vec_splat(__A, 0);
+ __b = vec_splat(__B, 0);
+ __c = vec_max(__a, __b);
/* Then we merge the lower float result with the original upper
* float elements from __A. */
- return (vec_sel ((__v4sf)__A, c, mask));
+ return (vec_sel((__v4sf)__A, __c, __mask));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_min_ps (__m128 __A, __m128 __B)
-{
- __vector __bool int m = vec_cmpgt ((__v4sf) __B, (__v4sf) __A);
- return vec_sel (__B, __A, m);
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_min_ps(__m128 __A, __m128 __B) {
+ __vector __bool int __m = vec_cmpgt((__v4sf)__B, (__v4sf)__A);
+ return vec_sel(__B, __A, __m);
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_max_ps (__m128 __A, __m128 __B)
-{
- __vector __bool int m = vec_cmpgt ((__v4sf) __A, (__v4sf) __B);
- return vec_sel (__B, __A, m);
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_max_ps(__m128 __A, __m128 __B) {
+ __vector __bool int __m = vec_cmpgt((__v4sf)__A, (__v4sf)__B);
+ return vec_sel(__B, __A, __m);
}
/* Perform logical bit-wise operations on 128-bit values. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_and_ps (__m128 __A, __m128 __B)
-{
- return ((__m128)vec_and ((__v4sf)__A, (__v4sf)__B));
-// return __builtin_ia32_andps (__A, __B);
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_and_ps(__m128 __A, __m128 __B) {
+ return ((__m128)vec_and((__v4sf)__A, (__v4sf)__B));
+ // return __builtin_ia32_andps (__A, __B);
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_andnot_ps (__m128 __A, __m128 __B)
-{
- return ((__m128)vec_andc ((__v4sf)__B, (__v4sf)__A));
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_andnot_ps(__m128 __A, __m128 __B) {
+ return ((__m128)vec_andc((__v4sf)__B, (__v4sf)__A));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_or_ps (__m128 __A, __m128 __B)
-{
- return ((__m128)vec_or ((__v4sf)__A, (__v4sf)__B));
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_or_ps(__m128 __A, __m128 __B) {
+ return ((__m128)vec_or((__v4sf)__A, (__v4sf)__B));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_xor_ps (__m128 __A, __m128 __B)
-{
- return ((__m128)vec_xor ((__v4sf)__A, (__v4sf)__B));
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_xor_ps(__m128 __A, __m128 __B) {
+ return ((__m128)vec_xor((__v4sf)__A, (__v4sf)__B));
}
/* Perform a comparison on the four SPFP values of A and B. For each
element, if the comparison is true, place a mask of all ones in the
result, otherwise a mask of zeros. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpeq_ps (__m128 __A, __m128 __B)
-{
- return ((__m128)vec_cmpeq ((__v4sf)__A,(__v4sf) __B));
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpeq_ps(__m128 __A, __m128 __B) {
+ return ((__m128)vec_cmpeq((__v4sf)__A, (__v4sf)__B));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmplt_ps (__m128 __A, __m128 __B)
-{
- return ((__m128)vec_cmplt ((__v4sf)__A, (__v4sf)__B));
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmplt_ps(__m128 __A, __m128 __B) {
+ return ((__m128)vec_cmplt((__v4sf)__A, (__v4sf)__B));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmple_ps (__m128 __A, __m128 __B)
-{
- return ((__m128)vec_cmple ((__v4sf)__A, (__v4sf)__B));
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmple_ps(__m128 __A, __m128 __B) {
+ return ((__m128)vec_cmple((__v4sf)__A, (__v4sf)__B));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpgt_ps (__m128 __A, __m128 __B)
-{
- return ((__m128)vec_cmpgt ((__v4sf)__A, (__v4sf)__B));
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpgt_ps(__m128 __A, __m128 __B) {
+ return ((__m128)vec_cmpgt((__v4sf)__A, (__v4sf)__B));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpge_ps (__m128 __A, __m128 __B)
-{
- return ((__m128)vec_cmpge ((__v4sf)__A, (__v4sf)__B));
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpge_ps(__m128 __A, __m128 __B) {
+ return ((__m128)vec_cmpge((__v4sf)__A, (__v4sf)__B));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpneq_ps (__m128 __A, __m128 __B)
-{
- __v4sf temp = (__v4sf ) vec_cmpeq ((__v4sf) __A, (__v4sf)__B);
- return ((__m128)vec_nor (temp, temp));
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpneq_ps(__m128 __A, __m128 __B) {
+ __v4sf __temp = (__v4sf)vec_cmpeq((__v4sf)__A, (__v4sf)__B);
+ return ((__m128)vec_nor(__temp, __temp));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpnlt_ps (__m128 __A, __m128 __B)
-{
- return ((__m128)vec_cmpge ((__v4sf)__A, (__v4sf)__B));
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpnlt_ps(__m128 __A, __m128 __B) {
+ return ((__m128)vec_cmpge((__v4sf)__A, (__v4sf)__B));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpnle_ps (__m128 __A, __m128 __B)
-{
- return ((__m128)vec_cmpgt ((__v4sf)__A, (__v4sf)__B));
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpnle_ps(__m128 __A, __m128 __B) {
+ return ((__m128)vec_cmpgt((__v4sf)__A, (__v4sf)__B));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpngt_ps (__m128 __A, __m128 __B)
-{
- return ((__m128)vec_cmple ((__v4sf)__A, (__v4sf)__B));
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpngt_ps(__m128 __A, __m128 __B) {
+ return ((__m128)vec_cmple((__v4sf)__A, (__v4sf)__B));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpnge_ps (__m128 __A, __m128 __B)
-{
- return ((__m128)vec_cmplt ((__v4sf)__A, (__v4sf)__B));
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpnge_ps(__m128 __A, __m128 __B) {
+ return ((__m128)vec_cmplt((__v4sf)__A, (__v4sf)__B));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpord_ps (__m128 __A, __m128 __B)
-{
- __vector unsigned int a, b;
- __vector unsigned int c, d;
- static const __vector unsigned int float_exp_mask =
- { 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000 };
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpord_ps(__m128 __A, __m128 __B) {
+ __vector unsigned int __a, __b;
+ __vector unsigned int __c, __d;
+ static const __vector unsigned int __float_exp_mask = {
+ 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000};
- a = (__vector unsigned int) vec_abs ((__v4sf)__A);
- b = (__vector unsigned int) vec_abs ((__v4sf)__B);
- c = (__vector unsigned int) vec_cmpgt (float_exp_mask, a);
- d = (__vector unsigned int) vec_cmpgt (float_exp_mask, b);
- return ((__m128 ) vec_and (c, d));
+ __a = (__vector unsigned int)vec_abs((__v4sf)__A);
+ __b = (__vector unsigned int)vec_abs((__v4sf)__B);
+ __c = (__vector unsigned int)vec_cmpgt(__float_exp_mask, __a);
+ __d = (__vector unsigned int)vec_cmpgt(__float_exp_mask, __b);
+ return ((__m128)vec_and(__c, __d));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpunord_ps (__m128 __A, __m128 __B)
-{
- __vector unsigned int a, b;
- __vector unsigned int c, d;
- static const __vector unsigned int float_exp_mask =
- { 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000 };
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpunord_ps(__m128 __A, __m128 __B) {
+ __vector unsigned int __a, __b;
+ __vector unsigned int __c, __d;
+ static const __vector unsigned int __float_exp_mask = {
+ 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000};
- a = (__vector unsigned int) vec_abs ((__v4sf)__A);
- b = (__vector unsigned int) vec_abs ((__v4sf)__B);
- c = (__vector unsigned int) vec_cmpgt (a, float_exp_mask);
- d = (__vector unsigned int) vec_cmpgt (b, float_exp_mask);
- return ((__m128 ) vec_or (c, d));
+ __a = (__vector unsigned int)vec_abs((__v4sf)__A);
+ __b = (__vector unsigned int)vec_abs((__v4sf)__B);
+ __c = (__vector unsigned int)vec_cmpgt(__a, __float_exp_mask);
+ __d = (__vector unsigned int)vec_cmpgt(__b, __float_exp_mask);
+ return ((__m128)vec_or(__c, __d));
}
/* Perform a comparison on the lower SPFP values of A and B. If the
comparison is true, place a mask of all ones in the result, otherwise a
mask of zeros. The upper three SPFP values are passed through from A. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpeq_ss (__m128 __A, __m128 __B)
-{
- static const __vector unsigned int mask =
- { 0xffffffff, 0, 0, 0 };
- __v4sf a, b, c;
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpeq_ss(__m128 __A, __m128 __B) {
+ static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+ __v4sf __a, __b, __c;
/* PowerISA VMX does not allow partial (for just element 0)
* results. So to insure we don't generate spurious exceptions
* (from the upper elements) we splat the lower float
* before we to the operation. */
- a = vec_splat ((__v4sf) __A, 0);
- b = vec_splat ((__v4sf) __B, 0);
- c = (__v4sf) vec_cmpeq(a, b);
+ __a = vec_splat((__v4sf)__A, 0);
+ __b = vec_splat((__v4sf)__B, 0);
+ __c = (__v4sf)vec_cmpeq(__a, __b);
/* Then we merge the lower float result with the original upper
* float elements from __A. */
- return ((__m128)vec_sel ((__v4sf)__A, c, mask));
+ return ((__m128)vec_sel((__v4sf)__A, __c, __mask));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmplt_ss (__m128 __A, __m128 __B)
-{
- static const __vector unsigned int mask =
- { 0xffffffff, 0, 0, 0 };
- __v4sf a, b, c;
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmplt_ss(__m128 __A, __m128 __B) {
+ static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+ __v4sf __a, __b, __c;
/* PowerISA VMX does not allow partial (for just element 0)
* results. So to insure we don't generate spurious exceptions
* (from the upper elements) we splat the lower float
* before we to the operation. */
- a = vec_splat ((__v4sf) __A, 0);
- b = vec_splat ((__v4sf) __B, 0);
- c = (__v4sf) vec_cmplt(a, b);
+ __a = vec_splat((__v4sf)__A, 0);
+ __b = vec_splat((__v4sf)__B, 0);
+ __c = (__v4sf)vec_cmplt(__a, __b);
/* Then we merge the lower float result with the original upper
* float elements from __A. */
- return ((__m128)vec_sel ((__v4sf)__A, c, mask));
+ return ((__m128)vec_sel((__v4sf)__A, __c, __mask));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmple_ss (__m128 __A, __m128 __B)
-{
- static const __vector unsigned int mask =
- { 0xffffffff, 0, 0, 0 };
- __v4sf a, b, c;
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmple_ss(__m128 __A, __m128 __B) {
+ static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+ __v4sf __a, __b, __c;
/* PowerISA VMX does not allow partial (for just element 0)
* results. So to insure we don't generate spurious exceptions
* (from the upper elements) we splat the lower float
* before we to the operation. */
- a = vec_splat ((__v4sf) __A, 0);
- b = vec_splat ((__v4sf) __B, 0);
- c = (__v4sf) vec_cmple(a, b);
+ __a = vec_splat((__v4sf)__A, 0);
+ __b = vec_splat((__v4sf)__B, 0);
+ __c = (__v4sf)vec_cmple(__a, __b);
/* Then we merge the lower float result with the original upper
* float elements from __A. */
- return ((__m128)vec_sel ((__v4sf)__A, c, mask));
+ return ((__m128)vec_sel((__v4sf)__A, __c, __mask));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpgt_ss (__m128 __A, __m128 __B)
-{
- static const __vector unsigned int mask =
- { 0xffffffff, 0, 0, 0 };
- __v4sf a, b, c;
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpgt_ss(__m128 __A, __m128 __B) {
+ static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+ __v4sf __a, __b, __c;
/* PowerISA VMX does not allow partial (for just element 0)
* results. So to insure we don't generate spurious exceptions
* (from the upper elements) we splat the lower float
* before we to the operation. */
- a = vec_splat ((__v4sf) __A, 0);
- b = vec_splat ((__v4sf) __B, 0);
- c = (__v4sf) vec_cmpgt(a, b);
+ __a = vec_splat((__v4sf)__A, 0);
+ __b = vec_splat((__v4sf)__B, 0);
+ __c = (__v4sf)vec_cmpgt(__a, __b);
/* Then we merge the lower float result with the original upper
* float elements from __A. */
- return ((__m128)vec_sel ((__v4sf)__A, c, mask));
+ return ((__m128)vec_sel((__v4sf)__A, __c, __mask));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpge_ss (__m128 __A, __m128 __B)
-{
- static const __vector unsigned int mask =
- { 0xffffffff, 0, 0, 0 };
- __v4sf a, b, c;
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpge_ss(__m128 __A, __m128 __B) {
+ static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+ __v4sf __a, __b, __c;
/* PowerISA VMX does not allow partial (for just element 0)
* results. So to insure we don't generate spurious exceptions
* (from the upper elements) we splat the lower float
* before we to the operation. */
- a = vec_splat ((__v4sf) __A, 0);
- b = vec_splat ((__v4sf) __B, 0);
- c = (__v4sf) vec_cmpge(a, b);
+ __a = vec_splat((__v4sf)__A, 0);
+ __b = vec_splat((__v4sf)__B, 0);
+ __c = (__v4sf)vec_cmpge(__a, __b);
/* Then we merge the lower float result with the original upper
* float elements from __A. */
- return ((__m128)vec_sel ((__v4sf)__A, c, mask));
+ return ((__m128)vec_sel((__v4sf)__A, __c, __mask));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpneq_ss (__m128 __A, __m128 __B)
-{
- static const __vector unsigned int mask =
- { 0xffffffff, 0, 0, 0 };
- __v4sf a, b, c;
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpneq_ss(__m128 __A, __m128 __B) {
+ static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+ __v4sf __a, __b, __c;
/* PowerISA VMX does not allow partial (for just element 0)
* results. So to insure we don't generate spurious exceptions
* (from the upper elements) we splat the lower float
* before we to the operation. */
- a = vec_splat ((__v4sf) __A, 0);
- b = vec_splat ((__v4sf) __B, 0);
- c = (__v4sf) vec_cmpeq(a, b);
- c = vec_nor (c, c);
+ __a = vec_splat((__v4sf)__A, 0);
+ __b = vec_splat((__v4sf)__B, 0);
+ __c = (__v4sf)vec_cmpeq(__a, __b);
+ __c = vec_nor(__c, __c);
/* Then we merge the lower float result with the original upper
* float elements from __A. */
- return ((__m128)vec_sel ((__v4sf)__A, c, mask));
+ return ((__m128)vec_sel((__v4sf)__A, __c, __mask));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpnlt_ss (__m128 __A, __m128 __B)
-{
- static const __vector unsigned int mask =
- { 0xffffffff, 0, 0, 0 };
- __v4sf a, b, c;
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpnlt_ss(__m128 __A, __m128 __B) {
+ static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+ __v4sf __a, __b, __c;
/* PowerISA VMX does not allow partial (for just element 0)
* results. So to insure we don't generate spurious exceptions
* (from the upper elements) we splat the lower float
* before we to the operation. */
- a = vec_splat ((__v4sf) __A, 0);
- b = vec_splat ((__v4sf) __B, 0);
- c = (__v4sf) vec_cmpge(a, b);
+ __a = vec_splat((__v4sf)__A, 0);
+ __b = vec_splat((__v4sf)__B, 0);
+ __c = (__v4sf)vec_cmpge(__a, __b);
/* Then we merge the lower float result with the original upper
* float elements from __A. */
- return ((__m128)vec_sel ((__v4sf)__A, c, mask));
+ return ((__m128)vec_sel((__v4sf)__A, __c, __mask));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpnle_ss (__m128 __A, __m128 __B)
-{
- static const __vector unsigned int mask =
- { 0xffffffff, 0, 0, 0 };
- __v4sf a, b, c;
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpnle_ss(__m128 __A, __m128 __B) {
+ static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+ __v4sf __a, __b, __c;
/* PowerISA VMX does not allow partial (for just element 0)
* results. So to insure we don't generate spurious exceptions
* (from the upper elements) we splat the lower float
* before we to the operation. */
- a = vec_splat ((__v4sf) __A, 0);
- b = vec_splat ((__v4sf) __B, 0);
- c = (__v4sf) vec_cmpgt(a, b);
+ __a = vec_splat((__v4sf)__A, 0);
+ __b = vec_splat((__v4sf)__B, 0);
+ __c = (__v4sf)vec_cmpgt(__a, __b);
/* Then we merge the lower float result with the original upper
* float elements from __A. */
- return ((__m128)vec_sel ((__v4sf)__A, c, mask));
+ return ((__m128)vec_sel((__v4sf)__A, __c, __mask));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpngt_ss (__m128 __A, __m128 __B)
-{
- static const __vector unsigned int mask =
- { 0xffffffff, 0, 0, 0 };
- __v4sf a, b, c;
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpngt_ss(__m128 __A, __m128 __B) {
+ static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+ __v4sf __a, __b, __c;
/* PowerISA VMX does not allow partial (for just element 0)
* results. So to insure we don't generate spurious exceptions
* (from the upper elements) we splat the lower float
* before we to the operation. */
- a = vec_splat ((__v4sf) __A, 0);
- b = vec_splat ((__v4sf) __B, 0);
- c = (__v4sf) vec_cmple(a, b);
+ __a = vec_splat((__v4sf)__A, 0);
+ __b = vec_splat((__v4sf)__B, 0);
+ __c = (__v4sf)vec_cmple(__a, __b);
/* Then we merge the lower float result with the original upper
* float elements from __A. */
- return ((__m128)vec_sel ((__v4sf)__A, c, mask));
+ return ((__m128)vec_sel((__v4sf)__A, __c, __mask));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpnge_ss (__m128 __A, __m128 __B)
-{
- static const __vector unsigned int mask =
- { 0xffffffff, 0, 0, 0 };
- __v4sf a, b, c;
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpnge_ss(__m128 __A, __m128 __B) {
+ static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+ __v4sf __a, __b, __c;
/* PowerISA VMX does not allow partial (for just element 0)
* results. So to insure we don't generate spurious exceptions
* (from the upper elements) we splat the lower float
* before we do the operation. */
- a = vec_splat ((__v4sf) __A, 0);
- b = vec_splat ((__v4sf) __B, 0);
- c = (__v4sf) vec_cmplt(a, b);
+ __a = vec_splat((__v4sf)__A, 0);
+ __b = vec_splat((__v4sf)__B, 0);
+ __c = (__v4sf)vec_cmplt(__a, __b);
/* Then we merge the lower float result with the original upper
* float elements from __A. */
- return ((__m128)vec_sel ((__v4sf)__A, c, mask));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpord_ss (__m128 __A, __m128 __B)
-{
- __vector unsigned int a, b;
- __vector unsigned int c, d;
- static const __vector unsigned int float_exp_mask =
- { 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000 };
- static const __vector unsigned int mask =
- { 0xffffffff, 0, 0, 0 };
-
- a = (__vector unsigned int) vec_abs ((__v4sf)__A);
- b = (__vector unsigned int) vec_abs ((__v4sf)__B);
- c = (__vector unsigned int) vec_cmpgt (float_exp_mask, a);
- d = (__vector unsigned int) vec_cmpgt (float_exp_mask, b);
- c = vec_and (c, d);
+ return ((__m128)vec_sel((__v4sf)__A, __c, __mask));
+}
+
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpord_ss(__m128 __A, __m128 __B) {
+ __vector unsigned int __a, __b;
+ __vector unsigned int __c, __d;
+ static const __vector unsigned int __float_exp_mask = {
+ 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000};
+ static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+
+ __a = (__vector unsigned int)vec_abs((__v4sf)__A);
+ __b = (__vector unsigned int)vec_abs((__v4sf)__B);
+ __c = (__vector unsigned int)vec_cmpgt(__float_exp_mask, __a);
+ __d = (__vector unsigned int)vec_cmpgt(__float_exp_mask, __b);
+ __c = vec_and(__c, __d);
/* Then we merge the lower float result with the original upper
* float elements from __A. */
- return ((__m128)vec_sel ((__v4sf)__A, (__v4sf)c, mask));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpunord_ss (__m128 __A, __m128 __B)
-{
- __vector unsigned int a, b;
- __vector unsigned int c, d;
- static const __vector unsigned int float_exp_mask =
- { 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000 };
- static const __vector unsigned int mask =
- { 0xffffffff, 0, 0, 0 };
-
- a = (__vector unsigned int) vec_abs ((__v4sf)__A);
- b = (__vector unsigned int) vec_abs ((__v4sf)__B);
- c = (__vector unsigned int) vec_cmpgt (a, float_exp_mask);
- d = (__vector unsigned int) vec_cmpgt (b, float_exp_mask);
- c = vec_or (c, d);
+ return ((__m128)vec_sel((__v4sf)__A, (__v4sf)__c, __mask));
+}
+
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpunord_ss(__m128 __A, __m128 __B) {
+ __vector unsigned int __a, __b;
+ __vector unsigned int __c, __d;
+ static const __vector unsigned int __float_exp_mask = {
+ 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000};
+ static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+
+ __a = (__vector unsigned int)vec_abs((__v4sf)__A);
+ __b = (__vector unsigned int)vec_abs((__v4sf)__B);
+ __c = (__vector unsigned int)vec_cmpgt(__a, __float_exp_mask);
+ __d = (__vector unsigned int)vec_cmpgt(__b, __float_exp_mask);
+ __c = vec_or(__c, __d);
/* Then we merge the lower float result with the original upper
* float elements from __A. */
- return ((__m128)vec_sel ((__v4sf)__A, (__v4sf)c, mask));
+ return ((__m128)vec_sel((__v4sf)__A, (__v4sf)__c, __mask));
}
/* Compare the lower SPFP values of A and B and return 1 if true
and 0 if false. */
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_comieq_ss (__m128 __A, __m128 __B)
-{
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_comieq_ss(__m128 __A, __m128 __B) {
return (__A[0] == __B[0]);
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_comilt_ss (__m128 __A, __m128 __B)
-{
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_comilt_ss(__m128 __A, __m128 __B) {
return (__A[0] < __B[0]);
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_comile_ss (__m128 __A, __m128 __B)
-{
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_comile_ss(__m128 __A, __m128 __B) {
return (__A[0] <= __B[0]);
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_comigt_ss (__m128 __A, __m128 __B)
-{
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_comigt_ss(__m128 __A, __m128 __B) {
return (__A[0] > __B[0]);
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_comige_ss (__m128 __A, __m128 __B)
-{
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_comige_ss(__m128 __A, __m128 __B) {
return (__A[0] >= __B[0]);
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_comineq_ss (__m128 __A, __m128 __B)
-{
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_comineq_ss(__m128 __A, __m128 __B) {
return (__A[0] != __B[0]);
}
@@ -834,56 +824,56 @@ _mm_comineq_ss (__m128 __A, __m128 __B)
* compare and signal for QNaNs.
* The __mm_ucomieq_sd et all should be OK, as is.
*/
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ucomieq_ss (__m128 __A, __m128 __B)
-{
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_ucomieq_ss(__m128 __A, __m128 __B) {
return (__A[0] == __B[0]);
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ucomilt_ss (__m128 __A, __m128 __B)
-{
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_ucomilt_ss(__m128 __A, __m128 __B) {
return (__A[0] < __B[0]);
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ucomile_ss (__m128 __A, __m128 __B)
-{
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_ucomile_ss(__m128 __A, __m128 __B) {
return (__A[0] <= __B[0]);
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ucomigt_ss (__m128 __A, __m128 __B)
-{
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_ucomigt_ss(__m128 __A, __m128 __B) {
return (__A[0] > __B[0]);
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ucomige_ss (__m128 __A, __m128 __B)
-{
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_ucomige_ss(__m128 __A, __m128 __B) {
return (__A[0] >= __B[0]);
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ucomineq_ss (__m128 __A, __m128 __B)
-{
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_ucomineq_ss(__m128 __A, __m128 __B) {
return (__A[0] != __B[0]);
}
-extern __inline float __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtss_f32 (__m128 __A)
-{
+extern __inline float
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtss_f32(__m128 __A) {
return ((__v4sf)__A)[0];
}
/* Convert the lower SPFP value to a 32-bit integer according to the current
rounding mode. */
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtss_si32 (__m128 __A)
-{
- __m64 res = 0;
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtss_si32(__m128 __A) {
+ int __res;
#ifdef _ARCH_PWR8
- double dtmp;
+ double __dtmp;
__asm__(
#ifdef __LITTLE_ENDIAN__
"xxsldwi %x0,%x0,%x0,3;\n"
@@ -891,32 +881,30 @@ _mm_cvtss_si32 (__m128 __A)
"xscvspdp %x2,%x0;\n"
"fctiw %2,%2;\n"
"mfvsrd %1,%x2;\n"
- : "+wa" (__A),
- "=r" (res),
- "=f" (dtmp)
- : );
+ : "+wa"(__A), "=r"(__res), "=f"(__dtmp)
+ :);
#else
- res = __builtin_rint(__A[0]);
+ __res = __builtin_rint(__A[0]);
#endif
- return (res);
+ return __res;
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvt_ss2si (__m128 __A)
-{
- return _mm_cvtss_si32 (__A);
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvt_ss2si(__m128 __A) {
+ return _mm_cvtss_si32(__A);
}
/* Convert the lower SPFP value to a 32-bit integer according to the
current rounding mode. */
/* Intel intrinsic. */
-extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtss_si64 (__m128 __A)
-{
- __m64 res = 0;
-#ifdef _ARCH_PWR8
- double dtmp;
+extern __inline long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtss_si64(__m128 __A) {
+ long long __res;
+#if defined(_ARCH_PWR8) && defined(__powerpc64__)
+ double __dtmp;
__asm__(
#ifdef __LITTLE_ENDIAN__
"xxsldwi %x0,%x0,%x0,3;\n"
@@ -924,26 +912,23 @@ _mm_cvtss_si64 (__m128 __A)
"xscvspdp %x2,%x0;\n"
"fctid %2,%2;\n"
"mfvsrd %1,%x2;\n"
- : "+wa" (__A),
- "=r" (res),
- "=f" (dtmp)
- : );
+ : "+wa"(__A), "=r"(__res), "=f"(__dtmp)
+ :);
#else
- res = __builtin_llrint(__A[0]);
+ __res = __builtin_llrint(__A[0]);
#endif
- return (res);
+ return __res;
}
/* Microsoft intrinsic. */
-extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtss_si64x (__m128 __A)
-{
- return _mm_cvtss_si64 ((__v4sf) __A);
+extern __inline long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtss_si64x(__m128 __A) {
+ return _mm_cvtss_si64((__v4sf)__A);
}
/* Constants for use with _mm_prefetch. */
-enum _mm_hint
-{
+enum _mm_hint {
/* _MM_HINT_ET is _MM_HINT_T with set 3rd bit. */
_MM_HINT_ET0 = 7,
_MM_HINT_ET1 = 6,
@@ -955,368 +940,365 @@ enum _mm_hint
/* Loads one cache line from address P to a location "closer" to the
processor. The selector I specifies the type of prefetch operation. */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_prefetch (const void *__P, enum _mm_hint __I)
-{
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_prefetch(const void *__P, enum _mm_hint __I) {
/* Current PowerPC will ignores the hint parameters. */
- __builtin_prefetch (__P);
+ __builtin_prefetch(__P);
}
/* Convert the two lower SPFP values to 32-bit integers according to the
current rounding mode. Return the integers in packed form. */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtps_pi32 (__m128 __A)
-{
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtps_pi32(__m128 __A) {
/* Splat two lower SPFP values to both halves. */
- __v4sf temp, rounded;
- __vector unsigned long long result;
+ __v4sf __temp, __rounded;
+ __vector unsigned long long __result;
/* Splat two lower SPFP values to both halves. */
- temp = (__v4sf) vec_splat ((__vector long long)__A, 0);
- rounded = vec_rint(temp);
- result = (__vector unsigned long long) vec_cts (rounded, 0);
+ __temp = (__v4sf)vec_splat((__vector long long)__A, 0);
+ __rounded = vec_rint(__temp);
+ __result = (__vector unsigned long long)vec_cts(__rounded, 0);
- return (__m64) ((__vector long long) result)[0];
+ return (__m64)((__vector long long)__result)[0];
}
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvt_ps2pi (__m128 __A)
-{
- return _mm_cvtps_pi32 (__A);
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvt_ps2pi(__m128 __A) {
+ return _mm_cvtps_pi32(__A);
}
/* Truncate the lower SPFP value to a 32-bit integer. */
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttss_si32 (__m128 __A)
-{
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvttss_si32(__m128 __A) {
/* Extract the lower float element. */
- float temp = __A[0];
+ float __temp = __A[0];
/* truncate to 32-bit integer and return. */
- return temp;
+ return __temp;
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtt_ss2si (__m128 __A)
-{
- return _mm_cvttss_si32 (__A);
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtt_ss2si(__m128 __A) {
+ return _mm_cvttss_si32(__A);
}
/* Intel intrinsic. */
-extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttss_si64 (__m128 __A)
-{
+extern __inline long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvttss_si64(__m128 __A) {
/* Extract the lower float element. */
- float temp = __A[0];
+ float __temp = __A[0];
/* truncate to 32-bit integer and return. */
- return temp;
+ return __temp;
}
/* Microsoft intrinsic. */
-extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttss_si64x (__m128 __A)
-{
+extern __inline long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvttss_si64x(__m128 __A) {
/* Extract the lower float element. */
- float temp = __A[0];
+ float __temp = __A[0];
/* truncate to 32-bit integer and return. */
- return temp;
+ return __temp;
}
/* Truncate the two lower SPFP values to 32-bit integers. Return the
integers in packed form. */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttps_pi32 (__m128 __A)
-{
- __v4sf temp;
- __vector unsigned long long result;
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvttps_pi32(__m128 __A) {
+ __v4sf __temp;
+ __vector unsigned long long __result;
/* Splat two lower SPFP values to both halves. */
- temp = (__v4sf) vec_splat ((__vector long long)__A, 0);
- result = (__vector unsigned long long) vec_cts (temp, 0);
+ __temp = (__v4sf)vec_splat((__vector long long)__A, 0);
+ __result = (__vector unsigned long long)vec_cts(__temp, 0);
- return (__m64) ((__vector long long) result)[0];
+ return (__m64)((__vector long long)__result)[0];
}
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtt_ps2pi (__m128 __A)
-{
- return _mm_cvttps_pi32 (__A);
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtt_ps2pi(__m128 __A) {
+ return _mm_cvttps_pi32(__A);
}
/* Convert B to a SPFP value and insert it as element zero in A. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsi32_ss (__m128 __A, int __B)
-{
- float temp = __B;
- __A[0] = temp;
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtsi32_ss(__m128 __A, int __B) {
+ float __temp = __B;
+ __A[0] = __temp;
return __A;
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvt_si2ss (__m128 __A, int __B)
-{
- return _mm_cvtsi32_ss (__A, __B);
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvt_si2ss(__m128 __A, int __B) {
+ return _mm_cvtsi32_ss(__A, __B);
}
/* Convert B to a SPFP value and insert it as element zero in A. */
/* Intel intrinsic. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsi64_ss (__m128 __A, long long __B)
-{
- float temp = __B;
- __A[0] = temp;
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtsi64_ss(__m128 __A, long long __B) {
+ float __temp = __B;
+ __A[0] = __temp;
return __A;
}
/* Microsoft intrinsic. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsi64x_ss (__m128 __A, long long __B)
-{
- return _mm_cvtsi64_ss (__A, __B);
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtsi64x_ss(__m128 __A, long long __B) {
+ return _mm_cvtsi64_ss(__A, __B);
}
/* Convert the two 32-bit values in B to SPFP form and insert them
as the two lower elements in A. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtpi32_ps (__m128 __A, __m64 __B)
-{
- __vector signed int vm1;
- __vector float vf1;
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtpi32_ps(__m128 __A, __m64 __B) {
+ __vector signed int __vm1;
+ __vector float __vf1;
- vm1 = (__vector signed int) (__vector unsigned long long) {__B, __B};
- vf1 = (__vector float) vec_ctf (vm1, 0);
+ __vm1 = (__vector signed int)(__vector unsigned long long){__B, __B};
+ __vf1 = (__vector float)vec_ctf(__vm1, 0);
- return ((__m128) (__vector unsigned long long)
- { ((__vector unsigned long long)vf1) [0],
- ((__vector unsigned long long)__A) [1]});
+ return ((__m128)(__vector unsigned long long){
+ ((__vector unsigned long long)__vf1)[0],
+ ((__vector unsigned long long)__A)[1]});
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvt_pi2ps (__m128 __A, __m64 __B)
-{
- return _mm_cvtpi32_ps (__A, __B);
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvt_pi2ps(__m128 __A, __m64 __B) {
+ return _mm_cvtpi32_ps(__A, __B);
}
/* Convert the four signed 16-bit values in A to SPFP form. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtpi16_ps (__m64 __A)
-{
- __vector signed short vs8;
- __vector signed int vi4;
- __vector float vf1;
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtpi16_ps(__m64 __A) {
+ __vector signed short __vs8;
+ __vector signed int __vi4;
+ __vector float __vf1;
- vs8 = (__vector signed short) (__vector unsigned long long) { __A, __A };
- vi4 = vec_vupklsh (vs8);
- vf1 = (__vector float) vec_ctf (vi4, 0);
+ __vs8 = (__vector signed short)(__vector unsigned long long){__A, __A};
+ __vi4 = vec_vupklsh(__vs8);
+ __vf1 = (__vector float)vec_ctf(__vi4, 0);
- return (__m128) vf1;
+ return (__m128)__vf1;
}
/* Convert the four unsigned 16-bit values in A to SPFP form. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtpu16_ps (__m64 __A)
-{
- const __vector unsigned short zero =
- { 0, 0, 0, 0, 0, 0, 0, 0 };
- __vector unsigned short vs8;
- __vector unsigned int vi4;
- __vector float vf1;
-
- vs8 = (__vector unsigned short) (__vector unsigned long long) { __A, __A };
- vi4 = (__vector unsigned int) vec_mergel
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtpu16_ps(__m64 __A) {
+ const __vector unsigned short __zero = {0, 0, 0, 0, 0, 0, 0, 0};
+ __vector unsigned short __vs8;
+ __vector unsigned int __vi4;
+ __vector float __vf1;
+
+ __vs8 = (__vector unsigned short)(__vector unsigned long long){__A, __A};
+ __vi4 = (__vector unsigned int)vec_mergel
#ifdef __LITTLE_ENDIAN__
- (vs8, zero);
+ (__vs8, __zero);
#else
- (zero, vs8);
+ (__zero, __vs8);
#endif
- vf1 = (__vector float) vec_ctf (vi4, 0);
+ __vf1 = (__vector float)vec_ctf(__vi4, 0);
- return (__m128) vf1;
+ return (__m128)__vf1;
}
/* Convert the low four signed 8-bit values in A to SPFP form. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtpi8_ps (__m64 __A)
-{
- __vector signed char vc16;
- __vector signed short vs8;
- __vector signed int vi4;
- __vector float vf1;
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtpi8_ps(__m64 __A) {
+ __vector signed char __vc16;
+ __vector signed short __vs8;
+ __vector signed int __vi4;
+ __vector float __vf1;
- vc16 = (__vector signed char) (__vector unsigned long long) { __A, __A };
- vs8 = vec_vupkhsb (vc16);
- vi4 = vec_vupkhsh (vs8);
- vf1 = (__vector float) vec_ctf (vi4, 0);
+ __vc16 = (__vector signed char)(__vector unsigned long long){__A, __A};
+ __vs8 = vec_vupkhsb(__vc16);
+ __vi4 = vec_vupkhsh(__vs8);
+ __vf1 = (__vector float)vec_ctf(__vi4, 0);
- return (__m128) vf1;
+ return (__m128)__vf1;
}
/* Convert the low four unsigned 8-bit values in A to SPFP form. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-
-_mm_cvtpu8_ps (__m64 __A)
-{
- const __vector unsigned char zero =
- { 0, 0, 0, 0, 0, 0, 0, 0 };
- __vector unsigned char vc16;
- __vector unsigned short vs8;
- __vector unsigned int vi4;
- __vector float vf1;
-
- vc16 = (__vector unsigned char) (__vector unsigned long long) { __A, __A };
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+
+ _mm_cvtpu8_ps(__m64 __A) {
+ const __vector unsigned char __zero = {0, 0, 0, 0, 0, 0, 0, 0};
+ __vector unsigned char __vc16;
+ __vector unsigned short __vs8;
+ __vector unsigned int __vi4;
+ __vector float __vf1;
+
+ __vc16 = (__vector unsigned char)(__vector unsigned long long){__A, __A};
#ifdef __LITTLE_ENDIAN__
- vs8 = (__vector unsigned short) vec_mergel (vc16, zero);
- vi4 = (__vector unsigned int) vec_mergeh (vs8,
- (__vector unsigned short) zero);
+ __vs8 = (__vector unsigned short)vec_mergel(__vc16, __zero);
+ __vi4 =
+ (__vector unsigned int)vec_mergeh(__vs8, (__vector unsigned short)__zero);
#else
- vs8 = (__vector unsigned short) vec_mergel (zero, vc16);
- vi4 = (__vector unsigned int) vec_mergeh ((__vector unsigned short) zero,
- vs8);
+ __vs8 = (__vector unsigned short)vec_mergel(__zero, __vc16);
+ __vi4 =
+ (__vector unsigned int)vec_mergeh((__vector unsigned short)__zero, __vs8);
#endif
- vf1 = (__vector float) vec_ctf (vi4, 0);
+ __vf1 = (__vector float)vec_ctf(__vi4, 0);
- return (__m128) vf1;
+ return (__m128)__vf1;
}
/* Convert the four signed 32-bit values in A and B to SPFP form. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtpi32x2_ps (__m64 __A, __m64 __B)
-{
- __vector signed int vi4;
- __vector float vf4;
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtpi32x2_ps(__m64 __A, __m64 __B) {
+ __vector signed int __vi4;
+ __vector float __vf4;
- vi4 = (__vector signed int) (__vector unsigned long long) { __A, __B };
- vf4 = (__vector float) vec_ctf (vi4, 0);
- return (__m128) vf4;
+ __vi4 = (__vector signed int)(__vector unsigned long long){__A, __B};
+ __vf4 = (__vector float)vec_ctf(__vi4, 0);
+ return (__m128)__vf4;
}
/* Convert the four SPFP values in A to four signed 16-bit integers. */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtps_pi16 (__m128 __A)
-{
- __v4sf rounded;
- __vector signed int temp;
- __vector unsigned long long result;
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtps_pi16(__m128 __A) {
+ __v4sf __rounded;
+ __vector signed int __temp;
+ __vector unsigned long long __result;
- rounded = vec_rint(__A);
- temp = vec_cts (rounded, 0);
- result = (__vector unsigned long long) vec_pack (temp, temp);
+ __rounded = vec_rint(__A);
+ __temp = vec_cts(__rounded, 0);
+ __result = (__vector unsigned long long)vec_pack(__temp, __temp);
- return (__m64) ((__vector long long) result)[0];
+ return (__m64)((__vector long long)__result)[0];
}
/* Convert the four SPFP values in A to four signed 8-bit integers. */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtps_pi8 (__m128 __A)
-{
- __v4sf rounded;
- __vector signed int tmp_i;
- static const __vector signed int zero = {0, 0, 0, 0};
- __vector signed short tmp_s;
- __vector signed char res_v;
-
- rounded = vec_rint(__A);
- tmp_i = vec_cts (rounded, 0);
- tmp_s = vec_pack (tmp_i, zero);
- res_v = vec_pack (tmp_s, tmp_s);
- return (__m64) ((__vector long long) res_v)[0];
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtps_pi8(__m128 __A) {
+ __v4sf __rounded;
+ __vector signed int __tmp_i;
+ static const __vector signed int __zero = {0, 0, 0, 0};
+ __vector signed short __tmp_s;
+ __vector signed char __res_v;
+
+ __rounded = vec_rint(__A);
+ __tmp_i = vec_cts(__rounded, 0);
+ __tmp_s = vec_pack(__tmp_i, __zero);
+ __res_v = vec_pack(__tmp_s, __tmp_s);
+ return (__m64)((__vector long long)__res_v)[0];
}
/* Selects four specific SPFP values from A and B based on MASK. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-
-_mm_shuffle_ps (__m128 __A, __m128 __B, int const __mask)
-{
- unsigned long element_selector_10 = __mask & 0x03;
- unsigned long element_selector_32 = (__mask >> 2) & 0x03;
- unsigned long element_selector_54 = (__mask >> 4) & 0x03;
- unsigned long element_selector_76 = (__mask >> 6) & 0x03;
- static const unsigned int permute_selectors[4] =
- {
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+
+ _mm_shuffle_ps(__m128 __A, __m128 __B, int const __mask) {
+ unsigned long __element_selector_10 = __mask & 0x03;
+ unsigned long __element_selector_32 = (__mask >> 2) & 0x03;
+ unsigned long __element_selector_54 = (__mask >> 4) & 0x03;
+ unsigned long __element_selector_76 = (__mask >> 6) & 0x03;
+ static const unsigned int __permute_selectors[4] = {
#ifdef __LITTLE_ENDIAN__
0x03020100, 0x07060504, 0x0B0A0908, 0x0F0E0D0C
#else
0x00010203, 0x04050607, 0x08090A0B, 0x0C0D0E0F
#endif
- };
- __vector unsigned int t;
+ };
+ __vector unsigned int __t;
- t[0] = permute_selectors[element_selector_10];
- t[1] = permute_selectors[element_selector_32];
- t[2] = permute_selectors[element_selector_54] + 0x10101010;
- t[3] = permute_selectors[element_selector_76] + 0x10101010;
- return vec_perm ((__v4sf) __A, (__v4sf)__B, (__vector unsigned char)t);
+ __t[0] = __permute_selectors[__element_selector_10];
+ __t[1] = __permute_selectors[__element_selector_32];
+ __t[2] = __permute_selectors[__element_selector_54] + 0x10101010;
+ __t[3] = __permute_selectors[__element_selector_76] + 0x10101010;
+ return vec_perm((__v4sf)__A, (__v4sf)__B, (__vector unsigned char)__t);
}
/* Selects and interleaves the upper two SPFP values from A and B. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_unpackhi_ps (__m128 __A, __m128 __B)
-{
- return (__m128) vec_vmrglw ((__v4sf) __A, (__v4sf)__B);
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_unpackhi_ps(__m128 __A, __m128 __B) {
+ return (__m128)vec_vmrglw((__v4sf)__A, (__v4sf)__B);
}
/* Selects and interleaves the lower two SPFP values from A and B. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_unpacklo_ps (__m128 __A, __m128 __B)
-{
- return (__m128) vec_vmrghw ((__v4sf) __A, (__v4sf)__B);
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_unpacklo_ps(__m128 __A, __m128 __B) {
+ return (__m128)vec_vmrghw((__v4sf)__A, (__v4sf)__B);
}
/* Sets the upper two SPFP values with 64-bits of data loaded from P;
the lower two values are passed through from A. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_loadh_pi (__m128 __A, __m64 const *__P)
-{
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_loadh_pi(__m128 __A, __m64 const *__P) {
__vector unsigned long long __a = (__vector unsigned long long)__A;
__vector unsigned long long __p = vec_splats(*__P);
- __a [1] = __p [1];
+ __a[1] = __p[1];
return (__m128)__a;
}
/* Stores the upper two SPFP values of A into P. */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_storeh_pi (__m64 *__P, __m128 __A)
-{
- __vector unsigned long long __a = (__vector unsigned long long) __A;
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_storeh_pi(__m64 *__P, __m128 __A) {
+ __vector unsigned long long __a = (__vector unsigned long long)__A;
*__P = __a[1];
}
/* Moves the upper two values of B into the lower two values of A. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_movehl_ps (__m128 __A, __m128 __B)
-{
- return (__m128) vec_mergel ((__vector unsigned long long)__B,
- (__vector unsigned long long)__A);
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_movehl_ps(__m128 __A, __m128 __B) {
+ return (__m128)vec_mergel((__vector unsigned long long)__B,
+ (__vector unsigned long long)__A);
}
/* Moves the lower two values of B into the upper two values of A. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_movelh_ps (__m128 __A, __m128 __B)
-{
- return (__m128) vec_mergeh ((__vector unsigned long long)__A,
- (__vector unsigned long long)__B);
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_movelh_ps(__m128 __A, __m128 __B) {
+ return (__m128)vec_mergeh((__vector unsigned long long)__A,
+ (__vector unsigned long long)__B);
}
/* Sets the lower two SPFP values with 64-bits of data loaded from P;
the upper two values are passed through from A. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_loadl_pi (__m128 __A, __m64 const *__P)
-{
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_loadl_pi(__m128 __A, __m64 const *__P) {
__vector unsigned long long __a = (__vector unsigned long long)__A;
__vector unsigned long long __p = vec_splats(*__P);
- __a [0] = __p [0];
+ __a[0] = __p[0];
return (__m128)__a;
}
/* Stores the lower two SPFP values of A into P. */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_storel_pi (__m64 *__P, __m128 __A)
-{
- __vector unsigned long long __a = (__vector unsigned long long) __A;
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_storel_pi(__m64 *__P, __m128 __A) {
+ __vector unsigned long long __a = (__vector unsigned long long)__A;
*__P = __a[0];
}
@@ -1325,453 +1307,456 @@ _mm_storel_pi (__m64 *__P, __m128 __A)
/* Intrinsic functions that require PowerISA 2.07 minimum. */
/* Creates a 4-bit mask from the most significant bits of the SPFP values. */
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_movemask_ps (__m128 __A)
-{
- __vector unsigned long long result;
- static const __vector unsigned int perm_mask =
- {
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_movemask_ps(__m128 __A) {
+#ifdef _ARCH_PWR10
+ return vec_extractm((__vector unsigned int)__A);
+#else
+ __vector unsigned long long __result;
+ static const __vector unsigned int __perm_mask = {
#ifdef __LITTLE_ENDIAN__
- 0x00204060, 0x80808080, 0x80808080, 0x80808080
+ 0x00204060, 0x80808080, 0x80808080, 0x80808080
#else
0x80808080, 0x80808080, 0x80808080, 0x00204060
#endif
- };
+ };
- result = ((__vector unsigned long long)
- vec_vbpermq ((__vector unsigned char) __A,
- (__vector unsigned char) perm_mask));
+ __result = ((__vector unsigned long long)vec_vbpermq(
+ (__vector unsigned char)__A, (__vector unsigned char)__perm_mask));
#ifdef __LITTLE_ENDIAN__
- return result[1];
+ return __result[1];
#else
- return result[0];
+ return __result[0];
#endif
+#endif /* !_ARCH_PWR10 */
}
#endif /* _ARCH_PWR8 */
/* Create a vector with all four elements equal to *P. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_load1_ps (float const *__P)
-{
- return _mm_set1_ps (*__P);
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_load1_ps(float const *__P) {
+ return _mm_set1_ps(*__P);
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_load_ps1 (float const *__P)
-{
- return _mm_load1_ps (__P);
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_load_ps1(float const *__P) {
+ return _mm_load1_ps(__P);
}
/* Extracts one of the four words of A. The selector N must be immediate. */
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_extract_pi16 (__m64 const __A, int const __N)
-{
- unsigned int shiftr = __N & 3;
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_extract_pi16(__m64 const __A, int const __N) {
+ unsigned int __shiftr = __N & 3;
#ifdef __BIG_ENDIAN__
- shiftr = 3 - shiftr;
+ __shiftr = 3 - __shiftr;
#endif
- return ((__A >> (shiftr * 16)) & 0xffff);
+ return ((__A >> (__shiftr * 16)) & 0xffff);
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_m_pextrw (__m64 const __A, int const __N)
-{
- return _mm_extract_pi16 (__A, __N);
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_pextrw(__m64 const __A, int const __N) {
+ return _mm_extract_pi16(__A, __N);
}
/* Inserts word D into one of four words of A. The selector N must be
immediate. */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_insert_pi16 (__m64 const __A, int const __D, int const __N)
-{
- const int shiftl = (__N & 3) * 16;
- const __m64 shiftD = (const __m64) __D << shiftl;
- const __m64 mask = 0xffffUL << shiftl;
- __m64 result = (__A & (~mask)) | (shiftD & mask);
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_insert_pi16(__m64 const __A, int const __D, int const __N) {
+ const int __shiftl = (__N & 3) * 16;
+ const __m64 __shiftD = (const __m64)__D << __shiftl;
+ const __m64 __mask = 0xffffUL << __shiftl;
+ __m64 __result = (__A & (~__mask)) | (__shiftD & __mask);
- return (result);
+ return __result;
}
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_m_pinsrw (__m64 const __A, int const __D, int const __N)
-{
- return _mm_insert_pi16 (__A, __D, __N);
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_pinsrw(__m64 const __A, int const __D, int const __N) {
+ return _mm_insert_pi16(__A, __D, __N);
}
/* Compute the element-wise maximum of signed 16-bit values. */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_max_pi16 (__m64 __A, __m64 __B)
-{
+ _mm_max_pi16(__m64 __A, __m64 __B) {
#if _ARCH_PWR8
- __vector signed short a, b, r;
- __vector __bool short c;
-
- a = (__vector signed short)vec_splats (__A);
- b = (__vector signed short)vec_splats (__B);
- c = (__vector __bool short)vec_cmpgt (a, b);
- r = vec_sel (b, a, c);
- return (__m64) ((__vector long long) r)[0];
+ __vector signed short __a, __b, __r;
+ __vector __bool short __c;
+
+ __a = (__vector signed short)vec_splats(__A);
+ __b = (__vector signed short)vec_splats(__B);
+ __c = (__vector __bool short)vec_cmpgt(__a, __b);
+ __r = vec_sel(__b, __a, __c);
+ return (__m64)((__vector long long)__r)[0];
#else
- __m64_union m1, m2, res;
+ __m64_union __m1, __m2, __res;
- m1.as_m64 = __A;
- m2.as_m64 = __B;
+ __m1.as_m64 = __A;
+ __m2.as_m64 = __B;
- res.as_short[0] =
- (m1.as_short[0] > m2.as_short[0]) ? m1.as_short[0] : m2.as_short[0];
- res.as_short[1] =
- (m1.as_short[1] > m2.as_short[1]) ? m1.as_short[1] : m2.as_short[1];
- res.as_short[2] =
- (m1.as_short[2] > m2.as_short[2]) ? m1.as_short[2] : m2.as_short[2];
- res.as_short[3] =
- (m1.as_short[3] > m2.as_short[3]) ? m1.as_short[3] : m2.as_short[3];
+ __res.as_short[0] = (__m1.as_short[0] > __m2.as_short[0]) ? __m1.as_short[0]
+ : __m2.as_short[0];
+ __res.as_short[1] = (__m1.as_short[1] > __m2.as_short[1]) ? __m1.as_short[1]
+ : __m2.as_short[1];
+ __res.as_short[2] = (__m1.as_short[2] > __m2.as_short[2]) ? __m1.as_short[2]
+ : __m2.as_short[2];
+ __res.as_short[3] = (__m1.as_short[3] > __m2.as_short[3]) ? __m1.as_short[3]
+ : __m2.as_short[3];
- return (__m64) res.as_m64;
+ return (__m64)__res.as_m64;
#endif
}
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_m_pmaxsw (__m64 __A, __m64 __B)
-{
- return _mm_max_pi16 (__A, __B);
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_pmaxsw(__m64 __A, __m64 __B) {
+ return _mm_max_pi16(__A, __B);
}
/* Compute the element-wise maximum of unsigned 8-bit values. */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_max_pu8 (__m64 __A, __m64 __B)
-{
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_max_pu8(__m64 __A, __m64 __B) {
#if _ARCH_PWR8
- __vector unsigned char a, b, r;
- __vector __bool char c;
-
- a = (__vector unsigned char)vec_splats (__A);
- b = (__vector unsigned char)vec_splats (__B);
- c = (__vector __bool char)vec_cmpgt (a, b);
- r = vec_sel (b, a, c);
- return (__m64) ((__vector long long) r)[0];
+ __vector unsigned char __a, __b, __r;
+ __vector __bool char __c;
+
+ __a = (__vector unsigned char)vec_splats(__A);
+ __b = (__vector unsigned char)vec_splats(__B);
+ __c = (__vector __bool char)vec_cmpgt(__a, __b);
+ __r = vec_sel(__b, __a, __c);
+ return (__m64)((__vector long long)__r)[0];
#else
- __m64_union m1, m2, res;
- long i;
-
- m1.as_m64 = __A;
- m2.as_m64 = __B;
+ __m64_union __m1, __m2, __res;
+ long __i;
+ __m1.as_m64 = __A;
+ __m2.as_m64 = __B;
- for (i = 0; i < 8; i++)
- res.as_char[i] =
- ((unsigned char) m1.as_char[i] > (unsigned char) m2.as_char[i]) ?
- m1.as_char[i] : m2.as_char[i];
+ for (__i = 0; __i < 8; __i++)
+ __res.as_char[__i] =
+ ((unsigned char)__m1.as_char[__i] > (unsigned char)__m2.as_char[__i])
+ ? __m1.as_char[__i]
+ : __m2.as_char[__i];
- return (__m64) res.as_m64;
+ return (__m64)__res.as_m64;
#endif
}
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_m_pmaxub (__m64 __A, __m64 __B)
-{
- return _mm_max_pu8 (__A, __B);
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_pmaxub(__m64 __A, __m64 __B) {
+ return _mm_max_pu8(__A, __B);
}
/* Compute the element-wise minimum of signed 16-bit values. */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_min_pi16 (__m64 __A, __m64 __B)
-{
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_min_pi16(__m64 __A, __m64 __B) {
#if _ARCH_PWR8
- __vector signed short a, b, r;
- __vector __bool short c;
-
- a = (__vector signed short)vec_splats (__A);
- b = (__vector signed short)vec_splats (__B);
- c = (__vector __bool short)vec_cmplt (a, b);
- r = vec_sel (b, a, c);
- return (__m64) ((__vector long long) r)[0];
+ __vector signed short __a, __b, __r;
+ __vector __bool short __c;
+
+ __a = (__vector signed short)vec_splats(__A);
+ __b = (__vector signed short)vec_splats(__B);
+ __c = (__vector __bool short)vec_cmplt(__a, __b);
+ __r = vec_sel(__b, __a, __c);
+ return (__m64)((__vector long long)__r)[0];
#else
- __m64_union m1, m2, res;
+ __m64_union __m1, __m2, __res;
- m1.as_m64 = __A;
- m2.as_m64 = __B;
+ __m1.as_m64 = __A;
+ __m2.as_m64 = __B;
- res.as_short[0] =
- (m1.as_short[0] < m2.as_short[0]) ? m1.as_short[0] : m2.as_short[0];
- res.as_short[1] =
- (m1.as_short[1] < m2.as_short[1]) ? m1.as_short[1] : m2.as_short[1];
- res.as_short[2] =
- (m1.as_short[2] < m2.as_short[2]) ? m1.as_short[2] : m2.as_short[2];
- res.as_short[3] =
- (m1.as_short[3] < m2.as_short[3]) ? m1.as_short[3] : m2.as_short[3];
+ __res.as_short[0] = (__m1.as_short[0] < __m2.as_short[0]) ? __m1.as_short[0]
+ : __m2.as_short[0];
+ __res.as_short[1] = (__m1.as_short[1] < __m2.as_short[1]) ? __m1.as_short[1]
+ : __m2.as_short[1];
+ __res.as_short[2] = (__m1.as_short[2] < __m2.as_short[2]) ? __m1.as_short[2]
+ : __m2.as_short[2];
+ __res.as_short[3] = (__m1.as_short[3] < __m2.as_short[3]) ? __m1.as_short[3]
+ : __m2.as_short[3];
- return (__m64) res.as_m64;
+ return (__m64)__res.as_m64;
#endif
}
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_m_pminsw (__m64 __A, __m64 __B)
-{
- return _mm_min_pi16 (__A, __B);
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_pminsw(__m64 __A, __m64 __B) {
+ return _mm_min_pi16(__A, __B);
}
/* Compute the element-wise minimum of unsigned 8-bit values. */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_min_pu8 (__m64 __A, __m64 __B)
-{
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_min_pu8(__m64 __A, __m64 __B) {
#if _ARCH_PWR8
- __vector unsigned char a, b, r;
- __vector __bool char c;
-
- a = (__vector unsigned char)vec_splats (__A);
- b = (__vector unsigned char)vec_splats (__B);
- c = (__vector __bool char)vec_cmplt (a, b);
- r = vec_sel (b, a, c);
- return (__m64) ((__vector long long) r)[0];
+ __vector unsigned char __a, __b, __r;
+ __vector __bool char __c;
+
+ __a = (__vector unsigned char)vec_splats(__A);
+ __b = (__vector unsigned char)vec_splats(__B);
+ __c = (__vector __bool char)vec_cmplt(__a, __b);
+ __r = vec_sel(__b, __a, __c);
+ return (__m64)((__vector long long)__r)[0];
#else
- __m64_union m1, m2, res;
- long i;
+ __m64_union __m1, __m2, __res;
+ long __i;
- m1.as_m64 = __A;
- m2.as_m64 = __B;
+ __m1.as_m64 = __A;
+ __m2.as_m64 = __B;
+ for (__i = 0; __i < 8; __i++)
+ __res.as_char[__i] =
+ ((unsigned char)__m1.as_char[__i] < (unsigned char)__m2.as_char[__i])
+ ? __m1.as_char[__i]
+ : __m2.as_char[__i];
- for (i = 0; i < 8; i++)
- res.as_char[i] =
- ((unsigned char) m1.as_char[i] < (unsigned char) m2.as_char[i]) ?
- m1.as_char[i] : m2.as_char[i];
-
- return (__m64) res.as_m64;
+ return (__m64)__res.as_m64;
#endif
}
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_m_pminub (__m64 __A, __m64 __B)
-{
- return _mm_min_pu8 (__A, __B);
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_pminub(__m64 __A, __m64 __B) {
+ return _mm_min_pu8(__A, __B);
}
/* Create an 8-bit mask of the signs of 8-bit values. */
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_movemask_pi8 (__m64 __A)
-{
- unsigned long long p =
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_movemask_pi8(__m64 __A) {
+#ifdef __powerpc64__
+ unsigned long long __p =
+#ifdef __LITTLE_ENDIAN__
+ 0x0008101820283038UL; // permute control for sign bits
+#else
+ 0x3830282018100800UL; // permute control for sign bits
+#endif
+ return __builtin_bpermd(__p, __A);
+#else
#ifdef __LITTLE_ENDIAN__
- 0x0008101820283038UL; // permute control for sign bits
+ unsigned int __mask = 0x20283038UL;
+ unsigned int __r1 = __builtin_bpermd(__mask, __A) & 0xf;
+ unsigned int __r2 = __builtin_bpermd(__mask, __A >> 32) & 0xf;
#else
- 0x3830282018100800UL; // permute control for sign bits
+ unsigned int __mask = 0x38302820UL;
+ unsigned int __r1 = __builtin_bpermd(__mask, __A >> 32) & 0xf;
+ unsigned int __r2 = __builtin_bpermd(__mask, __A) & 0xf;
+#endif
+ return (__r2 << 4) | __r1;
#endif
- return __builtin_bpermd (p, __A);
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_m_pmovmskb (__m64 __A)
-{
- return _mm_movemask_pi8 (__A);
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_pmovmskb(__m64 __A) {
+ return _mm_movemask_pi8(__A);
}
/* Multiply four unsigned 16-bit values in A by four unsigned 16-bit values
in B and produce the high 16 bits of the 32-bit results. */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mulhi_pu16 (__m64 __A, __m64 __B)
-{
- __vector unsigned short a, b;
- __vector unsigned short c;
- __vector unsigned int w0, w1;
- __vector unsigned char xform1 = {
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_mulhi_pu16(__m64 __A, __m64 __B) {
+ __vector unsigned short __a, __b;
+ __vector unsigned short __c;
+ __vector unsigned int __w0, __w1;
+ __vector unsigned char __xform1 = {
#ifdef __LITTLE_ENDIAN__
- 0x02, 0x03, 0x12, 0x13, 0x06, 0x07, 0x16, 0x17,
- 0x0A, 0x0B, 0x1A, 0x1B, 0x0E, 0x0F, 0x1E, 0x1F
+ 0x02, 0x03, 0x12, 0x13, 0x06, 0x07, 0x16, 0x17, 0x0A,
+ 0x0B, 0x1A, 0x1B, 0x0E, 0x0F, 0x1E, 0x1F
#else
- 0x00, 0x01, 0x10, 0x11, 0x04, 0x05, 0x14, 0x15,
- 0x00, 0x01, 0x10, 0x11, 0x04, 0x05, 0x14, 0x15
+ 0x00, 0x01, 0x10, 0x11, 0x04, 0x05, 0x14, 0x15, 0x00,
+ 0x01, 0x10, 0x11, 0x04, 0x05, 0x14, 0x15
#endif
- };
+ };
- a = (__vector unsigned short)vec_splats (__A);
- b = (__vector unsigned short)vec_splats (__B);
+ __a = (__vector unsigned short)vec_splats(__A);
+ __b = (__vector unsigned short)vec_splats(__B);
- w0 = vec_vmuleuh (a, b);
- w1 = vec_vmulouh (a, b);
- c = (__vector unsigned short)vec_perm (w0, w1, xform1);
+ __w0 = vec_vmuleuh(__a, __b);
+ __w1 = vec_vmulouh(__a, __b);
+ __c = (__vector unsigned short)vec_perm(__w0, __w1, __xform1);
- return (__m64) ((__vector long long) c)[0];
+ return (__m64)((__vector long long)__c)[0];
}
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_m_pmulhuw (__m64 __A, __m64 __B)
-{
- return _mm_mulhi_pu16 (__A, __B);
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_pmulhuw(__m64 __A, __m64 __B) {
+ return _mm_mulhi_pu16(__A, __B);
}
/* Return a combination of the four 16-bit values in A. The selector
must be an immediate. */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_shuffle_pi16 (__m64 __A, int const __N)
-{
- unsigned long element_selector_10 = __N & 0x03;
- unsigned long element_selector_32 = (__N >> 2) & 0x03;
- unsigned long element_selector_54 = (__N >> 4) & 0x03;
- unsigned long element_selector_76 = (__N >> 6) & 0x03;
- static const unsigned short permute_selectors[4] =
- {
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_shuffle_pi16(__m64 __A, int const __N) {
+ unsigned long __element_selector_10 = __N & 0x03;
+ unsigned long __element_selector_32 = (__N >> 2) & 0x03;
+ unsigned long __element_selector_54 = (__N >> 4) & 0x03;
+ unsigned long __element_selector_76 = (__N >> 6) & 0x03;
+ static const unsigned short __permute_selectors[4] = {
#ifdef __LITTLE_ENDIAN__
- 0x0908, 0x0B0A, 0x0D0C, 0x0F0E
+ 0x0908, 0x0B0A, 0x0D0C, 0x0F0E
#else
- 0x0607, 0x0405, 0x0203, 0x0001
+ 0x0607, 0x0405, 0x0203, 0x0001
#endif
- };
- __m64_union t;
- __vector unsigned long long a, p, r;
+ };
+ __m64_union __t;
+ __vector unsigned long long __a, __p, __r;
#ifdef __LITTLE_ENDIAN__
- t.as_short[0] = permute_selectors[element_selector_10];
- t.as_short[1] = permute_selectors[element_selector_32];
- t.as_short[2] = permute_selectors[element_selector_54];
- t.as_short[3] = permute_selectors[element_selector_76];
+ __t.as_short[0] = __permute_selectors[__element_selector_10];
+ __t.as_short[1] = __permute_selectors[__element_selector_32];
+ __t.as_short[2] = __permute_selectors[__element_selector_54];
+ __t.as_short[3] = __permute_selectors[__element_selector_76];
#else
- t.as_short[3] = permute_selectors[element_selector_10];
- t.as_short[2] = permute_selectors[element_selector_32];
- t.as_short[1] = permute_selectors[element_selector_54];
- t.as_short[0] = permute_selectors[element_selector_76];
+ __t.as_short[3] = __permute_selectors[__element_selector_10];
+ __t.as_short[2] = __permute_selectors[__element_selector_32];
+ __t.as_short[1] = __permute_selectors[__element_selector_54];
+ __t.as_short[0] = __permute_selectors[__element_selector_76];
#endif
- p = vec_splats (t.as_m64);
- a = vec_splats (__A);
- r = vec_perm (a, a, (__vector unsigned char)p);
- return (__m64) ((__vector long long) r)[0];
+ __p = vec_splats(__t.as_m64);
+ __a = vec_splats(__A);
+ __r = vec_perm(__a, __a, (__vector unsigned char)__p);
+ return (__m64)((__vector long long)__r)[0];
}
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_m_pshufw (__m64 __A, int const __N)
-{
- return _mm_shuffle_pi16 (__A, __N);
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_pshufw(__m64 __A, int const __N) {
+ return _mm_shuffle_pi16(__A, __N);
}
/* Conditionally store byte elements of A into P. The high bit of each
byte in the selector N determines whether the corresponding byte from
A is stored. */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskmove_si64 (__m64 __A, __m64 __N, char *__P)
-{
- __m64 hibit = 0x8080808080808080UL;
- __m64 mask, tmp;
- __m64 *p = (__m64*)__P;
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_maskmove_si64(__m64 __A, __m64 __N, char *__P) {
+ __m64 __hibit = 0x8080808080808080UL;
+ __m64 __mask, __tmp;
+ __m64 *__p = (__m64 *)__P;
- tmp = *p;
- mask = _mm_cmpeq_pi8 ((__N & hibit), hibit);
- tmp = (tmp & (~mask)) | (__A & mask);
- *p = tmp;
+ __tmp = *__p;
+ __mask = _mm_cmpeq_pi8((__N & __hibit), __hibit);
+ __tmp = (__tmp & (~__mask)) | (__A & __mask);
+ *__p = __tmp;
}
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_m_maskmovq (__m64 __A, __m64 __N, char *__P)
-{
- _mm_maskmove_si64 (__A, __N, __P);
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_maskmovq(__m64 __A, __m64 __N, char *__P) {
+ _mm_maskmove_si64(__A, __N, __P);
}
/* Compute the rounded averages of the unsigned 8-bit values in A and B. */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_avg_pu8 (__m64 __A, __m64 __B)
-{
- __vector unsigned char a, b, c;
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_avg_pu8(__m64 __A, __m64 __B) {
+ __vector unsigned char __a, __b, __c;
- a = (__vector unsigned char)vec_splats (__A);
- b = (__vector unsigned char)vec_splats (__B);
- c = vec_avg (a, b);
- return (__m64) ((__vector long long) c)[0];
+ __a = (__vector unsigned char)vec_splats(__A);
+ __b = (__vector unsigned char)vec_splats(__B);
+ __c = vec_avg(__a, __b);
+ return (__m64)((__vector long long)__c)[0];
}
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_m_pavgb (__m64 __A, __m64 __B)
-{
- return _mm_avg_pu8 (__A, __B);
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_pavgb(__m64 __A, __m64 __B) {
+ return _mm_avg_pu8(__A, __B);
}
/* Compute the rounded averages of the unsigned 16-bit values in A and B. */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_avg_pu16 (__m64 __A, __m64 __B)
-{
- __vector unsigned short a, b, c;
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_avg_pu16(__m64 __A, __m64 __B) {
+ __vector unsigned short __a, __b, __c;
- a = (__vector unsigned short)vec_splats (__A);
- b = (__vector unsigned short)vec_splats (__B);
- c = vec_avg (a, b);
- return (__m64) ((__vector long long) c)[0];
+ __a = (__vector unsigned short)vec_splats(__A);
+ __b = (__vector unsigned short)vec_splats(__B);
+ __c = vec_avg(__a, __b);
+ return (__m64)((__vector long long)__c)[0];
}
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_m_pavgw (__m64 __A, __m64 __B)
-{
- return _mm_avg_pu16 (__A, __B);
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_pavgw(__m64 __A, __m64 __B) {
+ return _mm_avg_pu16(__A, __B);
}
/* Compute the sum of the absolute differences of the unsigned 8-bit
values in A and B. Return the value in the lower 16-bit word; the
upper words are cleared. */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sad_pu8 (__m64 __A, __m64 __B)
-{
- __vector unsigned char a, b;
- __vector unsigned char vmin, vmax, vabsdiff;
- __vector signed int vsum;
- const __vector unsigned int zero =
- { 0, 0, 0, 0 };
- __m64_union result = {0};
-
- a = (__vector unsigned char) (__vector unsigned long long) { 0UL, __A };
- b = (__vector unsigned char) (__vector unsigned long long) { 0UL, __B };
- vmin = vec_min (a, b);
- vmax = vec_max (a, b);
- vabsdiff = vec_sub (vmax, vmin);
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sad_pu8(__m64 __A, __m64 __B) {
+ __vector unsigned char __a, __b;
+ __vector unsigned char __vmin, __vmax, __vabsdiff;
+ __vector signed int __vsum;
+ const __vector unsigned int __zero = {0, 0, 0, 0};
+ __m64_union __result = {0};
+
+ __a = (__vector unsigned char)(__vector unsigned long long){0UL, __A};
+ __b = (__vector unsigned char)(__vector unsigned long long){0UL, __B};
+ __vmin = vec_min(__a, __b);
+ __vmax = vec_max(__a, __b);
+ __vabsdiff = vec_sub(__vmax, __vmin);
/* Sum four groups of bytes into integers. */
- vsum = (__vector signed int) vec_sum4s (vabsdiff, zero);
+ __vsum = (__vector signed int)vec_sum4s(__vabsdiff, __zero);
/* Sum across four integers with integer result. */
- vsum = vec_sums (vsum, (__vector signed int) zero);
+ __vsum = vec_sums(__vsum, (__vector signed int)__zero);
/* The sum is in the right most 32-bits of the vector result.
Transfer to a GPR and truncate to 16 bits. */
- result.as_short[0] = vsum[3];
- return result.as_m64;
+ __result.as_short[0] = __vsum[3];
+ return __result.as_m64;
}
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_m_psadbw (__m64 __A, __m64 __B)
-{
- return _mm_sad_pu8 (__A, __B);
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_psadbw(__m64 __A, __m64 __B) {
+ return _mm_sad_pu8(__A, __B);
}
/* Stores the data in A to the address P without polluting the caches. */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_stream_pi (__m64 *__P, __m64 __A)
-{
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_stream_pi(__m64 *__P, __m64 __A) {
/* Use the data cache block touch for store transient. */
- __asm__ (
- " dcbtstt 0,%0"
- :
- : "b" (__P)
- : "memory"
- );
+ __asm__(" dcbtstt 0,%0" : : "b"(__P) : "memory");
*__P = __A;
}
/* Likewise. The address must be 16-byte aligned. */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_stream_ps (float *__P, __m128 __A)
-{
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_stream_ps(float *__P, __m128 __A) {
/* Use the data cache block touch for store transient. */
- __asm__ (
- " dcbtstt 0,%0"
- :
- : "b" (__P)
- : "memory"
- );
- _mm_store_ps (__P, __A);
+ __asm__(" dcbtstt 0,%0" : : "b"(__P) : "memory");
+ _mm_store_ps(__P, __A);
}
/* Guarantees that every preceding store is globally visible before
any subsequent store. */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sfence (void)
-{
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sfence(void) {
/* Generate a light weight sync. */
- __atomic_thread_fence (__ATOMIC_RELEASE);
+ __atomic_thread_fence(__ATOMIC_RELEASE);
}
/* The execution of the next instruction is delayed by an implementation
@@ -1779,9 +1764,9 @@ _mm_sfence (void)
architectural state. This is after the pop_options pragma because
it does not require SSE support in the processor--the encoding is a
nop on processors that do not support it. */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_pause (void)
-{
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_pause(void) {
/* There is no exact match with this construct, but the following is
close to the desired effect. */
#if _ARCH_PWR8
@@ -1797,47 +1782,46 @@ _mm_pause (void)
PRI and continue execution. */
unsigned long __PPR;
- __asm__ volatile (
- " mfppr %0;"
- " or 31,31,31;"
- " isync;"
- " lwsync;"
- " isync;"
- " mtppr %0;"
- : "=r" (__PPR)
- :
- : "memory"
- );
+ __asm__ volatile(" mfppr %0;"
+ " or 31,31,31;"
+ " isync;"
+ " lwsync;"
+ " isync;"
+ " mtppr %0;"
+ : "=r"(__PPR)
+ :
+ : "memory");
#else
/* For older processor where we may not even have Program Priority
controls we can only depend on Heavy Weight Sync. */
- __atomic_thread_fence (__ATOMIC_SEQ_CST);
+ __atomic_thread_fence(__ATOMIC_SEQ_CST);
#endif
}
/* Transpose the 4x4 matrix composed of row[0-3]. */
-#define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
-do { \
- __v4sf __r0 = (row0), __r1 = (row1), __r2 = (row2), __r3 = (row3); \
- __v4sf __t0 = vec_vmrghw (__r0, __r1); \
- __v4sf __t1 = vec_vmrghw (__r2, __r3); \
- __v4sf __t2 = vec_vmrglw (__r0, __r1); \
- __v4sf __t3 = vec_vmrglw (__r2, __r3); \
- (row0) = (__v4sf)vec_mergeh ((__vector long long)__t0, \
- (__vector long long)__t1); \
- (row1) = (__v4sf)vec_mergel ((__vector long long)__t0, \
- (__vector long long)__t1); \
- (row2) = (__v4sf)vec_mergeh ((__vector long long)__t2, \
- (__vector long long)__t3); \
- (row3) = (__v4sf)vec_mergel ((__vector long long)__t2, \
- (__vector long long)__t3); \
-} while (0)
+#define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
+ do { \
+ __v4sf __r0 = (row0), __r1 = (row1), __r2 = (row2), __r3 = (row3); \
+ __v4sf __t0 = vec_vmrghw(__r0, __r1); \
+ __v4sf __t1 = vec_vmrghw(__r2, __r3); \
+ __v4sf __t2 = vec_vmrglw(__r0, __r1); \
+ __v4sf __t3 = vec_vmrglw(__r2, __r3); \
+ (row0) = (__v4sf)vec_mergeh((__vector long long)__t0, \
+ (__vector long long)__t1); \
+ (row1) = (__v4sf)vec_mergel((__vector long long)__t0, \
+ (__vector long long)__t1); \
+ (row2) = (__v4sf)vec_mergeh((__vector long long)__t2, \
+ (__vector long long)__t3); \
+ (row3) = (__v4sf)vec_mergel((__vector long long)__t2, \
+ (__vector long long)__t3); \
+ } while (0)
/* For backward source compatibility. */
//# include <emmintrin.h>
#else
#include_next <xmmintrin.h>
-#endif /* defined(__linux__) && defined(__ppc64__) */
+#endif /* defined(__powerpc64__) && \
+ * (defined(__linux__) || defined(__FreeBSD__) || defined(_AIX)) */
-#endif /* _XMMINTRIN_H_INCLUDED */
+#endif /* XMMINTRIN_H_ */
diff --git a/contrib/llvm-project/clang/lib/Headers/prfchiintrin.h b/contrib/llvm-project/clang/lib/Headers/prfchiintrin.h
new file mode 100644
index 000000000000..36600b25aa1d
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/prfchiintrin.h
@@ -0,0 +1,61 @@
+/*===---- prfchiintrin.h - PREFETCHI intrinsic -----------------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __PRFCHIINTRIN_H
+#define __PRFCHIINTRIN_H
+
+#ifdef __x86_64__
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS \
+ __attribute__((__always_inline__, __nodebug__, __target__("prefetchi")))
+
+/// Loads an instruction sequence containing the specified memory address into
+/// all level cache.
+///
+/// Note that the effect of this intrinsic is dependent on the processor
+/// implementation.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PREFETCHIT0 instruction.
+///
+/// \param __P
+/// A pointer specifying the memory address to be prefetched.
+static __inline__ void __DEFAULT_FN_ATTRS
+_m_prefetchit0(volatile const void *__P) {
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wcast-qual"
+ __builtin_ia32_prefetchi((const void *)__P, 3 /* _MM_HINT_T0 */);
+#pragma clang diagnostic pop
+}
+
+/// Loads an instruction sequence containing the specified memory address into
+/// all but the first-level cache.
+///
+/// Note that the effect of this intrinsic is dependent on the processor
+/// implementation.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c PREFETCHIT1 instruction.
+///
+/// \param __P
+/// A pointer specifying the memory address to be prefetched.
+static __inline__ void __DEFAULT_FN_ATTRS
+_m_prefetchit1(volatile const void *__P) {
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wcast-qual"
+ __builtin_ia32_prefetchi((const void *)__P, 2 /* _MM_HINT_T1 */);
+#pragma clang diagnostic pop
+}
+#endif /* __x86_64__ */
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __PRFCHWINTRIN_H */
diff --git a/contrib/llvm-project/clang/lib/Headers/prfchwintrin.h b/contrib/llvm-project/clang/lib/Headers/prfchwintrin.h
index 6e8a4ef2ec97..d2f91aa0123e 100644
--- a/contrib/llvm-project/clang/lib/Headers/prfchwintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/prfchwintrin.h
@@ -47,9 +47,12 @@ _m_prefetch(void *__P)
/// \param __P
/// A pointer specifying the memory address to be prefetched.
static __inline__ void __attribute__((__always_inline__, __nodebug__))
-_m_prefetchw(void *__P)
+_m_prefetchw(volatile const void *__P)
{
- __builtin_prefetch (__P, 1, 3 /* _MM_HINT_T0 */);
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wcast-qual"
+ __builtin_prefetch ((const void*)__P, 1, 3 /* _MM_HINT_T0 */);
+#pragma clang diagnostic pop
}
#endif /* __PRFCHWINTRIN_H */
diff --git a/contrib/llvm-project/clang/lib/Headers/raointintrin.h b/contrib/llvm-project/clang/lib/Headers/raointintrin.h
new file mode 100644
index 000000000000..d3290eb62abf
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/raointintrin.h
@@ -0,0 +1,203 @@
+/*===----------------------- raointintrin.h - RAOINT ------------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __X86GPRINTRIN_H
+#error "Never use <raointintrin.h> directly; include <x86gprintrin.h> instead."
+#endif // __X86GPRINTRIN_H
+
+#ifndef __RAOINTINTRIN_H
+#define __RAOINTINTRIN_H
+
+#define __DEFAULT_FN_ATTRS \
+ __attribute__((__always_inline__, __nodebug__, __target__("raoint")))
+
+/// Atomically add a 32-bit value at memory operand \a __A and a 32-bit \a __B,
+/// and store the result to the same memory location.
+///
+/// This intrinsic should be used for contention or weak ordering. It may
+/// result in bad performance for hot data used by single thread only.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c AADD instruction.
+///
+/// \param __A
+/// A pointer to a 32-bit memory location.
+/// \param __B
+/// A 32-bit integer value.
+///
+/// \code{.operation}
+/// MEM[__A+31:__A] := MEM[__A+31:__A] + __B[31:0]
+/// \endcode
+static __inline__ void __DEFAULT_FN_ATTRS _aadd_i32(int *__A, int __B) {
+ __builtin_ia32_aadd32((int *)__A, __B);
+}
+
+/// Atomically and a 32-bit value at memory operand \a __A and a 32-bit \a __B,
+/// and store the result to the same memory location.
+///
+/// This intrinsic should be used for contention or weak ordering. It may
+/// result in bad performance for hot data used by single thread only.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c AAND instruction.
+///
+/// \param __A
+/// A pointer to a 32-bit memory location.
+/// \param __B
+/// A 32-bit integer value.
+///
+/// \code{.operation}
+/// MEM[__A+31:__A] := MEM[__A+31:__A] AND __B[31:0]
+/// \endcode
+static __inline__ void __DEFAULT_FN_ATTRS _aand_i32(int *__A, int __B) {
+ __builtin_ia32_aand32((int *)__A, __B);
+}
+
+/// Atomically or a 32-bit value at memory operand \a __A and a 32-bit \a __B,
+/// and store the result to the same memory location.
+///
+/// This intrinsic should be used for contention or weak ordering. It may
+/// result in bad performance for hot data used by single thread only.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c AOR instruction.
+///
+/// \param __A
+/// A pointer to a 32-bit memory location.
+/// \param __B
+/// A 32-bit integer value.
+///
+/// \code{.operation}
+/// MEM[__A+31:__A] := MEM[__A+31:__A] OR __B[31:0]
+/// \endcode
+static __inline__ void __DEFAULT_FN_ATTRS _aor_i32(int *__A, int __B) {
+ __builtin_ia32_aor32((int *)__A, __B);
+}
+
+/// Atomically xor a 32-bit value at memory operand \a __A and a 32-bit \a __B,
+/// and store the result to the same memory location.
+///
+/// This intrinsic should be used for contention or weak ordering. It may
+/// result in bad performance for hot data used by single thread only.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c AXOR instruction.
+///
+/// \param __A
+/// A pointer to a 32-bit memory location.
+/// \param __B
+/// A 32-bit integer value.
+///
+/// \code{.operation}
+/// MEM[__A+31:__A] := MEM[__A+31:__A] XOR __B[31:0]
+/// \endcode
+static __inline__ void __DEFAULT_FN_ATTRS _axor_i32(int *__A, int __B) {
+ __builtin_ia32_axor32((int *)__A, __B);
+}
+
+#ifdef __x86_64__
+/// Atomically add a 64-bit value at memory operand \a __A and a 64-bit \a __B,
+/// and store the result to the same memory location.
+///
+/// This intrinsic should be used for contention or weak ordering. It may
+/// result in bad performance for hot data used by single thread only.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c AADD instruction.
+///
+/// \param __A
+/// A pointer to a 64-bit memory location.
+/// \param __B
+/// A 64-bit integer value.
+///
+/// \code{.operation}
+/// MEM[__A+63:__A] := MEM[__A+63:__A] + __B[63:0]
+/// \endcode
+static __inline__ void __DEFAULT_FN_ATTRS _aadd_i64(long long *__A,
+ long long __B) {
+ __builtin_ia32_aadd64((long long *)__A, __B);
+}
+
+/// Atomically and a 64-bit value at memory operand \a __A and a 64-bit \a __B,
+/// and store the result to the same memory location.
+///
+/// This intrinsic should be used for contention or weak ordering. It may
+/// result in bad performance for hot data used by single thread only.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c AAND instruction.
+///
+/// \param __A
+/// A pointer to a 64-bit memory location.
+/// \param __B
+/// A 64-bit integer value.
+///
+/// \code{.operation}
+/// MEM[__A+63:__A] := MEM[__A+63:__A] AND __B[63:0]
+/// \endcode
+static __inline__ void __DEFAULT_FN_ATTRS _aand_i64(long long *__A,
+ long long __B) {
+ __builtin_ia32_aand64((long long *)__A, __B);
+}
+
+/// Atomically or a 64-bit value at memory operand \a __A and a 64-bit \a __B,
+/// and store the result to the same memory location.
+///
+/// This intrinsic should be used for contention or weak ordering. It may
+/// result in bad performance for hot data used by single thread only.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c AOR instruction.
+///
+/// \param __A
+/// A pointer to a 64-bit memory location.
+/// \param __B
+/// A 64-bit integer value.
+///
+/// \code{.operation}
+/// MEM[__A+63:__A] := MEM[__A+63:__A] OR __B[63:0]
+/// \endcode
+static __inline__ void __DEFAULT_FN_ATTRS _aor_i64(long long *__A,
+ long long __B) {
+ __builtin_ia32_aor64((long long *)__A, __B);
+}
+
+/// Atomically xor a 64-bit value at memory operand \a __A and a 64-bit \a __B,
+/// and store the result to the same memory location.
+///
+/// This intrinsic should be used for contention or weak ordering. It may
+/// result in bad performance for hot data used by single thread only.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c AXOR instruction.
+///
+/// \param __A
+/// A pointer to a 64-bit memory location.
+/// \param __B
+/// A 64-bit integer value.
+///
+/// \code{.operation}
+/// MEM[__A+63:__A] := MEM[__A+63:__A] XOR __B[63:0]
+/// \endcode
+static __inline__ void __DEFAULT_FN_ATTRS _axor_i64(long long *__A,
+ long long __B) {
+ __builtin_ia32_axor64((long long *)__A, __B);
+}
+#endif // __x86_64__
+
+#undef __DEFAULT_FN_ATTRS
+#endif // __RAOINTINTRIN_H
diff --git a/contrib/llvm-project/clang/lib/Headers/rdpruintrin.h b/contrib/llvm-project/clang/lib/Headers/rdpruintrin.h
new file mode 100644
index 000000000000..89732bb8b3cf
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/rdpruintrin.h
@@ -0,0 +1,57 @@
+/*===---- rdpruintrin.h - RDPRU intrinsics ---------------------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !defined __X86INTRIN_H
+#error "Never use <rdpruintrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __RDPRUINTRIN_H
+#define __RDPRUINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS \
+ __attribute__((__always_inline__, __nodebug__, __target__("rdpru")))
+
+
+/// Reads the content of a processor register.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> RDPRU </c> instruction.
+///
+/// \param reg_id
+/// A processor register identifier.
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+__rdpru (int reg_id)
+{
+ return __builtin_ia32_rdpru(reg_id);
+}
+
+#define __RDPRU_MPERF 0
+#define __RDPRU_APERF 1
+
+/// Reads the content of processor register MPERF.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic generates instruction <c> RDPRU </c> to read the value of
+/// register MPERF.
+#define __mperf() __builtin_ia32_rdpru(__RDPRU_MPERF)
+
+/// Reads the content of processor register APERF.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic generates instruction <c> RDPRU </c> to read the value of
+/// register APERF.
+#define __aperf() __builtin_ia32_rdpru(__RDPRU_APERF)
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* __RDPRUINTRIN_H */
diff --git a/contrib/llvm-project/clang/lib/Headers/rdseedintrin.h b/contrib/llvm-project/clang/lib/Headers/rdseedintrin.h
index ccb3d2dd2294..8a4fe093055b 100644
--- a/contrib/llvm-project/clang/lib/Headers/rdseedintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/rdseedintrin.h
@@ -7,8 +7,8 @@
*===-----------------------------------------------------------------------===
*/
-#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
-#error "Never use <rdseedintrin.h> directly; include <x86intrin.h> instead."
+#ifndef __IMMINTRIN_H
+#error "Never use <rdseedintrin.h> directly; include <immintrin.h> instead."
#endif
#ifndef __RDSEEDINTRIN_H
@@ -17,23 +17,86 @@
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("rdseed")))
+/// Stores a hardware-generated 16-bit random value in the memory at \a __p.
+///
+/// The random number generator complies with NIST SP800-90B and SP800-90C.
+///
+/// \code{.operation}
+/// IF HW_NRND_GEN.ready == 1
+/// Store16(__p, HW_NRND_GEN.data)
+/// result := 1
+/// ELSE
+/// Store16(__p, 0)
+/// result := 0
+/// END
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c RDSEED instruction.
+///
+/// \param __p
+/// Pointer to memory for storing the 16-bit random number.
+/// \returns 1 if a random number was generated, 0 if not.
static __inline__ int __DEFAULT_FN_ATTRS
_rdseed16_step(unsigned short *__p)
{
- return __builtin_ia32_rdseed16_step(__p);
+ return (int) __builtin_ia32_rdseed16_step(__p);
}
+/// Stores a hardware-generated 32-bit random value in the memory at \a __p.
+///
+/// The random number generator complies with NIST SP800-90B and SP800-90C.
+///
+/// \code{.operation}
+/// IF HW_NRND_GEN.ready == 1
+/// Store32(__p, HW_NRND_GEN.data)
+/// result := 1
+/// ELSE
+/// Store32(__p, 0)
+/// result := 0
+/// END
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c RDSEED instruction.
+///
+/// \param __p
+/// Pointer to memory for storing the 32-bit random number.
+/// \returns 1 if a random number was generated, 0 if not.
static __inline__ int __DEFAULT_FN_ATTRS
_rdseed32_step(unsigned int *__p)
{
- return __builtin_ia32_rdseed32_step(__p);
+ return (int) __builtin_ia32_rdseed32_step(__p);
}
#ifdef __x86_64__
+/// Stores a hardware-generated 64-bit random value in the memory at \a __p.
+///
+/// The random number generator complies with NIST SP800-90B and SP800-90C.
+///
+/// \code{.operation}
+/// IF HW_NRND_GEN.ready == 1
+/// Store64(__p, HW_NRND_GEN.data)
+/// result := 1
+/// ELSE
+/// Store64(__p, 0)
+/// result := 0
+/// END
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c RDSEED instruction.
+///
+/// \param __p
+/// Pointer to memory for storing the 64-bit random number.
+/// \returns 1 if a random number was generated, 0 if not.
static __inline__ int __DEFAULT_FN_ATTRS
_rdseed64_step(unsigned long long *__p)
{
- return __builtin_ia32_rdseed64_step(__p);
+ return (int) __builtin_ia32_rdseed64_step(__p);
}
#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/riscv_bitmanip.h b/contrib/llvm-project/clang/lib/Headers/riscv_bitmanip.h
new file mode 100644
index 000000000000..2bc7ee022a96
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/riscv_bitmanip.h
@@ -0,0 +1,195 @@
+/*===---- riscv_bitmanip.h - RISC-V Zb* intrinsics --------------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __RISCV_BITMANIP_H
+#define __RISCV_BITMANIP_H
+
+#include <stdint.h>
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#if defined(__riscv_zbb)
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__riscv_orc_b_32(uint32_t __x) {
+ return __builtin_riscv_orc_b_32(__x);
+}
+
+static __inline__ unsigned __attribute__((__always_inline__, __nodebug__))
+__riscv_clz_32(uint32_t __x) {
+ return __builtin_riscv_clz_32(__x);
+}
+
+static __inline__ unsigned __attribute__((__always_inline__, __nodebug__))
+__riscv_ctz_32(uint32_t __x) {
+ return __builtin_riscv_ctz_32(__x);
+}
+
+static __inline__ unsigned __attribute__((__always_inline__, __nodebug__))
+__riscv_cpop_32(uint32_t __x) {
+ return __builtin_popcount(__x);
+}
+
+#if __riscv_xlen == 64
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+__riscv_orc_b_64(uint64_t __x) {
+ return __builtin_riscv_orc_b_64(__x);
+}
+
+static __inline__ unsigned __attribute__((__always_inline__, __nodebug__))
+__riscv_clz_64(uint64_t __x) {
+ return __builtin_riscv_clz_64(__x);
+}
+
+static __inline__ unsigned __attribute__((__always_inline__, __nodebug__))
+__riscv_ctz_64(uint64_t __x) {
+ return __builtin_riscv_ctz_64(__x);
+}
+
+static __inline__ unsigned __attribute__((__always_inline__, __nodebug__))
+__riscv_cpop_64(uint64_t __x) {
+ return __builtin_popcountll(__x);
+}
+#endif
+#endif // defined(__riscv_zbb)
+
+#if defined(__riscv_zbb) || defined(__riscv_zbkb)
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__riscv_rev8_32(uint32_t __x) {
+ return __builtin_bswap32(__x);
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__riscv_rol_32(uint32_t __x, uint32_t __y) {
+ return __builtin_rotateleft32(__x, __y);
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__riscv_ror_32(uint32_t __x, uint32_t __y) {
+ return __builtin_rotateright32(__x, __y);
+}
+
+#if __riscv_xlen == 64
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+__riscv_rev8_64(uint64_t __x) {
+ return __builtin_bswap64(__x);
+}
+
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+__riscv_rol_64(uint64_t __x, uint32_t __y) {
+ return __builtin_rotateleft64(__x, __y);
+}
+
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+__riscv_ror_64(uint64_t __x, uint32_t __y) {
+ return __builtin_rotateright64(__x, __y);
+}
+#endif
+#endif // defined(__riscv_zbb) || defined(__riscv_zbkb)
+
+#if defined(__riscv_zbkb)
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__riscv_brev8_32(uint32_t __x) {
+ return __builtin_riscv_brev8_32(__x);
+}
+
+#if __riscv_xlen == 64
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+__riscv_brev8_64(uint64_t __x) {
+ return __builtin_riscv_brev8_64(__x);
+}
+#endif
+
+#if __riscv_xlen == 32
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__riscv_unzip_32(uint32_t __x) {
+ return __builtin_riscv_unzip_32(__x);
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__riscv_zip_32(uint32_t __x) {
+ return __builtin_riscv_zip_32(__x);
+}
+#endif
+#endif // defined(__riscv_zbkb)
+
+#if defined(__riscv_zbc)
+#if __riscv_xlen == 32
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__riscv_clmulr_32(uint32_t __x, uint32_t __y) {
+ return __builtin_riscv_clmulr_32(__x, __y);
+}
+#endif
+
+#if __riscv_xlen == 64
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+__riscv_clmulr_64(uint64_t __x, uint64_t __y) {
+ return __builtin_riscv_clmulr_64(__x, __y);
+}
+#endif
+#endif // defined(__riscv_zbc)
+
+#if defined(__riscv_zbkc) || defined(__riscv_zbc)
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__riscv_clmul_32(uint32_t __x, uint32_t __y) {
+ return __builtin_riscv_clmul_32(__x, __y);
+}
+
+#if __riscv_xlen == 32
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__riscv_clmulh_32(uint32_t __x, uint32_t __y) {
+ return __builtin_riscv_clmulh_32(__x, __y);
+}
+#endif
+
+#if __riscv_xlen == 64
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+__riscv_clmul_64(uint64_t __x, uint64_t __y) {
+ return __builtin_riscv_clmul_64(__x, __y);
+}
+
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+__riscv_clmulh_64(uint64_t __x, uint64_t __y) {
+ return __builtin_riscv_clmulh_64(__x, __y);
+}
+#endif
+#endif // defined(__riscv_zbkc) || defined(__riscv_zbc)
+
+#if defined(__riscv_zbkx)
+#if __riscv_xlen == 32
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__riscv_xperm4_32(uint32_t __x, uint32_t __y) {
+ return __builtin_riscv_xperm4_32(__x, __y);
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__riscv_xperm8_32(uint32_t __x, uint32_t __y) {
+ return __builtin_riscv_xperm8_32(__x, __y);
+}
+#endif
+
+#if __riscv_xlen == 64
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+__riscv_xperm4_64(uint64_t __x, uint64_t __y) {
+ return __builtin_riscv_xperm4_64(__x, __y);
+}
+
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+__riscv_xperm8_64(uint64_t __x, uint64_t __y) {
+ return __builtin_riscv_xperm8_64(__x, __y);
+}
+#endif
+#endif // defined(__riscv_zbkx)
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/riscv_crypto.h b/contrib/llvm-project/clang/lib/Headers/riscv_crypto.h
new file mode 100644
index 000000000000..7cd2a708f557
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/riscv_crypto.h
@@ -0,0 +1,170 @@
+/*===---- riscv_crypto.h - RISC-V Zk* intrinsics ---------------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __RISCV_CRYPTO_H
+#define __RISCV_CRYPTO_H
+
+#include <stdint.h>
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#if defined(__riscv_zknd)
+#if __riscv_xlen == 32
+#define __riscv_aes32dsi(x, y, bs) __builtin_riscv_aes32dsi(x, y, bs)
+#define __riscv_aes32dsmi(x, y, bs) __builtin_riscv_aes32dsmi(x, y, bs)
+#endif
+
+#if __riscv_xlen == 64
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+__riscv_aes64ds(uint64_t __x, uint64_t __y) {
+ return __builtin_riscv_aes64ds(__x, __y);
+}
+
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+__riscv_aes64dsm(uint64_t __x, uint64_t __y) {
+ return __builtin_riscv_aes64dsm(__x, __y);
+}
+
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+__riscv_aes64im(uint64_t __x) {
+ return __builtin_riscv_aes64im(__x);
+}
+#endif
+#endif // defined(__riscv_zknd)
+
+#if defined(__riscv_zkne)
+#if __riscv_xlen == 32
+#define __riscv_aes32esi(x, y, bs) __builtin_riscv_aes32esi(x, y, bs)
+#define __riscv_aes32esmi(x, y, bs) __builtin_riscv_aes32esmi(x, y, bs)
+#endif
+
+#if __riscv_xlen == 64
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+__riscv_aes64es(uint64_t __x, uint64_t __y) {
+ return __builtin_riscv_aes64es(__x, __y);
+}
+
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+__riscv_aes64esm(uint64_t __x, uint64_t __y) {
+ return __builtin_riscv_aes64esm(__x, __y);
+}
+#endif
+#endif // defined(__riscv_zkne)
+
+#if defined(__riscv_zknd) || defined(__riscv_zkne)
+#if __riscv_xlen == 64
+#define __riscv_aes64ks1i(x, rnum) __builtin_riscv_aes64ks1i(x, rnum)
+
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+__riscv_aes64ks2(uint64_t __x, uint64_t __y) {
+ return __builtin_riscv_aes64ks2(__x, __y);
+}
+#endif
+#endif // defined(__riscv_zknd) || defined(__riscv_zkne)
+
+#if defined(__riscv_zknh)
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__riscv_sha256sig0(uint32_t __x) {
+ return __builtin_riscv_sha256sig0(__x);
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__riscv_sha256sig1(uint32_t __x) {
+ return __builtin_riscv_sha256sig1(__x);
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__riscv_sha256sum0(uint32_t __x) {
+ return __builtin_riscv_sha256sum0(__x);
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__riscv_sha256sum1(uint32_t __x) {
+ return __builtin_riscv_sha256sum1(__x);
+}
+
+#if __riscv_xlen == 32
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__riscv_sha512sig0h(uint32_t __x, uint32_t __y) {
+ return __builtin_riscv_sha512sig0h(__x, __y);
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__riscv_sha512sig0l(uint32_t __x, uint32_t __y) {
+ return __builtin_riscv_sha512sig0l(__x, __y);
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__riscv_sha512sig1h(uint32_t __x, uint32_t __y) {
+ return __builtin_riscv_sha512sig1h(__x, __y);
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__riscv_sha512sig1l(uint32_t __x, uint32_t __y) {
+ return __builtin_riscv_sha512sig1l(__x, __y);
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__riscv_sha512sum0r(uint32_t __x, uint32_t __y) {
+ return __builtin_riscv_sha512sum0r(__x, __y);
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__riscv_sha512sum1r(uint32_t __x, uint32_t __y) {
+ return __builtin_riscv_sha512sum1r(__x, __y);
+}
+#endif
+
+#if __riscv_xlen == 64
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+__riscv_sha512sig0(uint64_t __x) {
+ return __builtin_riscv_sha512sig0(__x);
+}
+
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+__riscv_sha512sig1(uint64_t __x) {
+ return __builtin_riscv_sha512sig1(__x);
+}
+
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+__riscv_sha512sum0(uint64_t __x) {
+ return __builtin_riscv_sha512sum0(__x);
+}
+
+static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__))
+__riscv_sha512sum1(uint64_t __x) {
+ return __builtin_riscv_sha512sum1(__x);
+}
+#endif
+#endif // defined(__riscv_zknh)
+
+#if defined(__riscv_zksh)
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__riscv_sm3p0(uint32_t __x) {
+ return __builtin_riscv_sm3p0(__x);
+}
+
+static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__))
+__riscv_sm3p1(uint32_t __x) {
+ return __builtin_riscv_sm3p1(__x);
+}
+#endif // defined(__riscv_zksh)
+
+#if defined(__riscv_zksed)
+#define __riscv_sm4ed(x, y, bs) __builtin_riscv_sm4ed(x, y, bs);
+#define __riscv_sm4ks(x, y, bs) __builtin_riscv_sm4ks(x, y, bs);
+#endif // defined(__riscv_zksed)
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/riscv_ntlh.h b/contrib/llvm-project/clang/lib/Headers/riscv_ntlh.h
new file mode 100644
index 000000000000..c92e580a0a63
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/riscv_ntlh.h
@@ -0,0 +1,26 @@
+/*===---- riscv_ntlh.h - RISC-V NTLH intrinsics ----------------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __RISCV_NTLH_H
+#define __RISCV_NTLH_H
+
+#ifndef __riscv_zihintntl
+#error "NTLH intrinsics require the NTLH extension."
+#endif
+
+enum {
+ __RISCV_NTLH_INNERMOST_PRIVATE = 2,
+ __RISCV_NTLH_ALL_PRIVATE,
+ __RISCV_NTLH_INNERMOST_SHARED,
+ __RISCV_NTLH_ALL
+};
+
+#define __riscv_ntl_load __builtin_riscv_ntl_load
+#define __riscv_ntl_store __builtin_riscv_ntl_store
+#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/rtmintrin.h b/contrib/llvm-project/clang/lib/Headers/rtmintrin.h
index 36ff5835173f..a3ec81e3f740 100644
--- a/contrib/llvm-project/clang/lib/Headers/rtmintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/rtmintrin.h
@@ -29,7 +29,7 @@
static __inline__ unsigned int __DEFAULT_FN_ATTRS
_xbegin(void)
{
- return __builtin_ia32_xbegin();
+ return (unsigned int)__builtin_ia32_xbegin();
}
static __inline__ void __DEFAULT_FN_ATTRS
diff --git a/contrib/llvm-project/clang/lib/Headers/sha512intrin.h b/contrib/llvm-project/clang/lib/Headers/sha512intrin.h
new file mode 100644
index 000000000000..065ef5dac25a
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/sha512intrin.h
@@ -0,0 +1,200 @@
+/*===--------------- sha512intrin.h - SHA512 intrinsics -----------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <sha512intrin.h> directly; include <immintrin.h> instead."
+#endif // __IMMINTRIN_H
+
+#ifndef __SHA512INTRIN_H
+#define __SHA512INTRIN_H
+
+#define __DEFAULT_FN_ATTRS256 \
+ __attribute__((__always_inline__, __nodebug__, __target__("sha512"), \
+ __min_vector_width__(256)))
+
+/// This intrinisc is one of the two SHA512 message scheduling instructions.
+/// The intrinsic performs an intermediate calculation for the next four
+/// SHA512 message qwords. The calculated results are stored in \a dst.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_sha512msg1_epi64(__m256i __A, __m128i __B)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VSHA512MSG1 instruction.
+///
+/// \param __A
+/// A 256-bit vector of [4 x long long].
+/// \param __B
+/// A 128-bit vector of [2 x long long].
+/// \returns
+/// A 256-bit vector of [4 x long long].
+///
+/// \code{.operation}
+/// DEFINE ROR64(qword, n) {
+/// count := n % 64
+/// dest := (qword >> count) | (qword << (64 - count))
+/// RETURN dest
+/// }
+/// DEFINE SHR64(qword, n) {
+/// RETURN qword >> n
+/// }
+/// DEFINE s0(qword):
+/// RETURN ROR64(qword,1) ^ ROR64(qword, 8) ^ SHR64(qword, 7)
+/// }
+/// W[4] := __B.qword[0]
+/// W[3] := __A.qword[3]
+/// W[2] := __A.qword[2]
+/// W[1] := __A.qword[1]
+/// W[0] := __A.qword[0]
+/// dst.qword[3] := W[3] + s0(W[4])
+/// dst.qword[2] := W[2] + s0(W[3])
+/// dst.qword[1] := W[1] + s0(W[2])
+/// dst.qword[0] := W[0] + s0(W[1])
+/// dst[MAX:256] := 0
+/// \endcode
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_sha512msg1_epi64(__m256i __A, __m128i __B) {
+ return (__m256i)__builtin_ia32_vsha512msg1((__v4du)__A, (__v2du)__B);
+}
+
+/// This intrinisc is one of the two SHA512 message scheduling instructions.
+/// The intrinsic performs the final calculation for the next four SHA512
+/// message qwords. The calculated results are stored in \a dst.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_sha512msg2_epi64(__m256i __A, __m256i __B)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VSHA512MSG2 instruction.
+///
+/// \param __A
+/// A 256-bit vector of [4 x long long].
+/// \param __B
+/// A 256-bit vector of [4 x long long].
+/// \returns
+/// A 256-bit vector of [4 x long long].
+///
+/// \code{.operation}
+/// DEFINE ROR64(qword, n) {
+/// count := n % 64
+/// dest := (qword >> count) | (qword << (64 - count))
+/// RETURN dest
+/// }
+/// DEFINE SHR64(qword, n) {
+/// RETURN qword >> n
+/// }
+/// DEFINE s1(qword) {
+/// RETURN ROR64(qword,19) ^ ROR64(qword, 61) ^ SHR64(qword, 6)
+/// }
+/// W[14] := __B.qword[2]
+/// W[15] := __B.qword[3]
+/// W[16] := __A.qword[0] + s1(W[14])
+/// W[17] := __A.qword[1] + s1(W[15])
+/// W[18] := __A.qword[2] + s1(W[16])
+/// W[19] := __A.qword[3] + s1(W[17])
+/// dst.qword[3] := W[19]
+/// dst.qword[2] := W[18]
+/// dst.qword[1] := W[17]
+/// dst.qword[0] := W[16]
+/// dst[MAX:256] := 0
+/// \endcode
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_sha512msg2_epi64(__m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_vsha512msg2((__v4du)__A, (__v4du)__B);
+}
+
+/// This intrinisc performs two rounds of SHA512 operation using initial SHA512
+/// state (C,D,G,H) from \a __A, an initial SHA512 state (A,B,E,F) from
+/// \a __A, and a pre-computed sum of the next two round message qwords and
+/// the corresponding round constants from \a __C (only the two lower qwords
+/// of the third operand). The updated SHA512 state (A,B,E,F) is written to
+/// \a __A, and \a __A can be used as the updated state (C,D,G,H) in later
+/// rounds.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_sha512rnds2_epi64(__m256i __A, __m256i __B, __m128i __C)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VSHA512RNDS2 instruction.
+///
+/// \param __A
+/// A 256-bit vector of [4 x long long].
+/// \param __B
+/// A 256-bit vector of [4 x long long].
+/// \param __C
+/// A 128-bit vector of [2 x long long].
+/// \returns
+/// A 256-bit vector of [4 x long long].
+///
+/// \code{.operation}
+/// DEFINE ROR64(qword, n) {
+/// count := n % 64
+/// dest := (qword >> count) | (qword << (64 - count))
+/// RETURN dest
+/// }
+/// DEFINE SHR64(qword, n) {
+/// RETURN qword >> n
+/// }
+/// DEFINE cap_sigma0(qword) {
+/// RETURN ROR64(qword,28) ^ ROR64(qword, 34) ^ ROR64(qword, 39)
+/// }
+/// DEFINE cap_sigma1(qword) {
+/// RETURN ROR64(qword,14) ^ ROR64(qword, 18) ^ ROR64(qword, 41)
+/// }
+/// DEFINE MAJ(a,b,c) {
+/// RETURN (a & b) ^ (a & c) ^ (b & c)
+/// }
+/// DEFINE CH(e,f,g) {
+/// RETURN (e & f) ^ (g & ~e)
+/// }
+/// A[0] := __B.qword[3]
+/// B[0] := __B.qword[2]
+/// C[0] := __C.qword[3]
+/// D[0] := __C.qword[2]
+/// E[0] := __B.qword[1]
+/// F[0] := __B.qword[0]
+/// G[0] := __C.qword[1]
+/// H[0] := __C.qword[0]
+/// WK[0]:= __A.qword[0]
+/// WK[1]:= __A.qword[1]
+/// FOR i := 0 to 1:
+/// A[i+1] := CH(E[i], F[i], G[i]) +
+/// cap_sigma1(E[i]) + WK[i] + H[i] +
+/// MAJ(A[i], B[i], C[i]) +
+/// cap_sigma0(A[i])
+/// B[i+1] := A[i]
+/// C[i+1] := B[i]
+/// D[i+1] := C[i]
+/// E[i+1] := CH(E[i], F[i], G[i]) +
+/// cap_sigma1(E[i]) + WK[i] + H[i] + D[i]
+/// F[i+1] := E[i]
+/// G[i+1] := F[i]
+/// H[i+1] := G[i]
+/// ENDFOR
+/// dst.qword[3] := A[2]
+/// dst.qword[2] := B[2]
+/// dst.qword[1] := E[2]
+/// dst.qword[0] := F[2]
+/// dst[MAX:256] := 0
+/// \endcode
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_sha512rnds2_epi64(__m256i __A, __m256i __B, __m128i __C) {
+ return (__m256i)__builtin_ia32_vsha512rnds2((__v4du)__A, (__v4du)__B,
+ (__v2du)__C);
+}
+
+#undef __DEFAULT_FN_ATTRS256
+
+#endif // __SHA512INTRIN_H
diff --git a/contrib/llvm-project/clang/lib/Headers/shaintrin.h b/contrib/llvm-project/clang/lib/Headers/shaintrin.h
index 08b1fb1dc16a..232e1fa29823 100644
--- a/contrib/llvm-project/clang/lib/Headers/shaintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/shaintrin.h
@@ -17,39 +17,167 @@
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sha"), __min_vector_width__(128)))
+/// Performs four iterations of the inner loop of the SHA-1 message digest
+/// algorithm using the starting SHA-1 state (A, B, C, D) from the 128-bit
+/// vector of [4 x i32] in \a V1 and the next four 32-bit elements of the
+/// message from the 128-bit vector of [4 x i32] in \a V2. Note that the
+/// SHA-1 state variable E must have already been added to \a V2
+/// (\c _mm_sha1nexte_epu32() can perform this step). Returns the updated
+/// SHA-1 state (A, B, C, D) as a 128-bit vector of [4 x i32].
+///
+/// The SHA-1 algorithm has an inner loop of 80 iterations, twenty each
+/// with a different combining function and rounding constant. This
+/// intrinsic performs four iterations using a combining function and
+/// rounding constant selected by \a M[1:0].
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128i _mm_sha1rnds4_epu32(__m128i V1, __m128i V2, const int M);
+/// \endcode
+///
+/// This intrinsic corresponds to the \c SHA1RNDS4 instruction.
+///
+/// \param V1
+/// A 128-bit vector of [4 x i32] containing the initial SHA-1 state.
+/// \param V2
+/// A 128-bit vector of [4 x i32] containing the next four elements of
+/// the message, plus SHA-1 state variable E.
+/// \param M
+/// An immediate value where bits [1:0] select among four possible
+/// combining functions and rounding constants (not specified here).
+/// \returns A 128-bit vector of [4 x i32] containing the updated SHA-1 state.
#define _mm_sha1rnds4_epu32(V1, V2, M) \
__builtin_ia32_sha1rnds4((__v4si)(__m128i)(V1), (__v4si)(__m128i)(V2), (M))
+/// Calculates the SHA-1 state variable E from the SHA-1 state variables in
+/// the 128-bit vector of [4 x i32] in \a __X, adds that to the next set of
+/// four message elements in the 128-bit vector of [4 x i32] in \a __Y, and
+/// returns the result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c SHA1NEXTE instruction.
+///
+/// \param __X
+/// A 128-bit vector of [4 x i32] containing the current SHA-1 state.
+/// \param __Y
+/// A 128-bit vector of [4 x i32] containing the next four elements of the
+/// message.
+/// \returns A 128-bit vector of [4 x i32] containing the updated SHA-1
+/// values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_sha1nexte_epu32(__m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_sha1nexte((__v4si)__X, (__v4si)__Y);
}
+/// Performs an intermediate calculation for deriving the next four SHA-1
+/// message elements using previous message elements from the 128-bit
+/// vectors of [4 x i32] in \a __X and \a __Y, and returns the result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c SHA1MSG1 instruction.
+///
+/// \param __X
+/// A 128-bit vector of [4 x i32] containing previous message elements.
+/// \param __Y
+/// A 128-bit vector of [4 x i32] containing previous message elements.
+/// \returns A 128-bit vector of [4 x i32] containing the derived SHA-1
+/// elements.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_sha1msg1_epu32(__m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_sha1msg1((__v4si)__X, (__v4si)__Y);
}
+/// Performs the final calculation for deriving the next four SHA-1 message
+/// elements using previous message elements from the 128-bit vectors of
+/// [4 x i32] in \a __X and \a __Y, and returns the result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c SHA1MSG2 instruction.
+///
+/// \param __X
+/// A 128-bit vector of [4 x i32] containing an intermediate result.
+/// \param __Y
+/// A 128-bit vector of [4 x i32] containing previous message values.
+/// \returns A 128-bit vector of [4 x i32] containing the updated SHA-1
+/// values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_sha1msg2_epu32(__m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_sha1msg2((__v4si)__X, (__v4si)__Y);
}
+/// Performs two rounds of SHA-256 operation using the following inputs: a
+/// starting SHA-256 state (C, D, G, H) from the 128-bit vector of
+/// [4 x i32] in \a __X; a starting SHA-256 state (A, B, E, F) from the
+/// 128-bit vector of [4 x i32] in \a __Y; and a pre-computed sum of the
+/// next two message elements (unsigned 32-bit integers) and corresponding
+/// rounding constants from the 128-bit vector of [4 x i32] in \a __Z.
+/// Returns the updated SHA-256 state (A, B, E, F) as a 128-bit vector of
+/// [4 x i32].
+///
+/// The SHA-256 algorithm has a core loop of 64 iterations. This intrinsic
+/// performs two of those iterations.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c SHA256RNDS2 instruction.
+///
+/// \param __X
+/// A 128-bit vector of [4 x i32] containing part of the initial SHA-256
+/// state.
+/// \param __Y
+/// A 128-bit vector of [4 x i32] containing part of the initial SHA-256
+/// state.
+/// \param __Z
+/// A 128-bit vector of [4 x i32] containing additional input to the
+/// SHA-256 operation.
+/// \returns A 128-bit vector of [4 x i32] containing the updated SHA-1 state.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_sha256rnds2_epu32(__m128i __X, __m128i __Y, __m128i __Z)
{
return (__m128i)__builtin_ia32_sha256rnds2((__v4si)__X, (__v4si)__Y, (__v4si)__Z);
}
+/// Performs an intermediate calculation for deriving the next four SHA-256
+/// message elements using previous message elements from the 128-bit
+/// vectors of [4 x i32] in \a __X and \a __Y, and returns the result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c SHA256MSG1 instruction.
+///
+/// \param __X
+/// A 128-bit vector of [4 x i32] containing previous message elements.
+/// \param __Y
+/// A 128-bit vector of [4 x i32] containing previous message elements.
+/// \returns A 128-bit vector of [4 x i32] containing the updated SHA-256
+/// values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_sha256msg1_epu32(__m128i __X, __m128i __Y)
{
return (__m128i)__builtin_ia32_sha256msg1((__v4si)__X, (__v4si)__Y);
}
+/// Performs the final calculation for deriving the next four SHA-256 message
+/// elements using previous message elements from the 128-bit vectors of
+/// [4 x i32] in \a __X and \a __Y, and returns the result.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c SHA256MSG2 instruction.
+///
+/// \param __X
+/// A 128-bit vector of [4 x i32] containing an intermediate result.
+/// \param __Y
+/// A 128-bit vector of [4 x i32] containing previous message values.
+/// \returns A 128-bit vector of [4 x i32] containing the updated SHA-256
+/// values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_sha256msg2_epu32(__m128i __X, __m128i __Y)
{
diff --git a/contrib/llvm-project/clang/include/clang/Analysis/AnalysisDiagnostic.h b/contrib/llvm-project/clang/lib/Headers/sifive_vector.h
index fd5f2ffe6483..42d7224db614 100644
--- a/contrib/llvm-project/clang/include/clang/Analysis/AnalysisDiagnostic.h
+++ b/contrib/llvm-project/clang/lib/Headers/sifive_vector.h
@@ -1,4 +1,4 @@
-//===--- DiagnosticAnalysis.h - Diagnostics for libanalysis -----*- C++ -*-===//
+//===----- sifive_vector.h - SiFive Vector definitions --------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -6,9 +6,11 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_ANALYSIS_ANALYSISDIAGNOSTIC_H
-#define LLVM_CLANG_ANALYSIS_ANALYSISDIAGNOSTIC_H
+#ifndef _SIFIVE_VECTOR_H_
+#define _SIFIVE_VECTOR_H_
-#include "clang/Basic/DiagnosticAnalysis.h"
+#include "riscv_vector.h"
-#endif
+#pragma clang riscv intrinsic sifive_vector
+
+#endif //_SIFIVE_VECTOR_H_
diff --git a/contrib/llvm-project/clang/lib/Headers/sm3intrin.h b/contrib/llvm-project/clang/lib/Headers/sm3intrin.h
new file mode 100644
index 000000000000..8a3d8bc9ef01
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/sm3intrin.h
@@ -0,0 +1,238 @@
+/*===-------------------- sm3intrin.h - SM3 intrinsics ---------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <sm3intrin.h> directly; include <immintrin.h> instead."
+#endif // __IMMINTRIN_H
+
+#ifndef __SM3INTRIN_H
+#define __SM3INTRIN_H
+
+#define __DEFAULT_FN_ATTRS128 \
+ __attribute__((__always_inline__, __nodebug__, __target__("sm3"), \
+ __min_vector_width__(128)))
+
+/// This intrinisc is one of the two SM3 message scheduling intrinsics. The
+/// intrinsic performs an initial calculation for the next four SM3 message
+/// words. The calculated results are stored in \a dst.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128i _mm_sm3msg1_epi32(__m128i __A, __m128i __B, __m128i __C)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VSM3MSG1 instruction.
+///
+/// \param __A
+/// A 128-bit vector of [4 x int].
+/// \param __B
+/// A 128-bit vector of [4 x int].
+/// \param __C
+/// A 128-bit vector of [4 x int].
+/// \returns
+/// A 128-bit vector of [4 x int].
+///
+/// \code{.operation}
+/// DEFINE ROL32(dword, n) {
+/// count := n % 32
+/// dest := (dword << count) | (dword >> (32 - count))
+/// RETURN dest
+/// }
+/// DEFINE P1(x) {
+/// RETURN x ^ ROL32(x, 15) ^ ROL32(x, 23)
+/// }
+/// W[0] := __C.dword[0]
+/// W[1] := __C.dword[1]
+/// W[2] := __C.dword[2]
+/// W[3] := __C.dword[3]
+/// W[7] := __A.dword[0]
+/// W[8] := __A.dword[1]
+/// W[9] := __A.dword[2]
+/// W[10] := __A.dword[3]
+/// W[13] := __B.dword[0]
+/// W[14] := __B.dword[1]
+/// W[15] := __B.dword[2]
+/// TMP0 := W[7] ^ W[0] ^ ROL32(W[13], 15)
+/// TMP1 := W[8] ^ W[1] ^ ROL32(W[14], 15)
+/// TMP2 := W[9] ^ W[2] ^ ROL32(W[15], 15)
+/// TMP3 := W[10] ^ W[3]
+/// dst.dword[0] := P1(TMP0)
+/// dst.dword[1] := P1(TMP1)
+/// dst.dword[2] := P1(TMP2)
+/// dst.dword[3] := P1(TMP3)
+/// dst[MAX:128] := 0
+/// \endcode
+static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_sm3msg1_epi32(__m128i __A,
+ __m128i __B,
+ __m128i __C) {
+ return (__m128i)__builtin_ia32_vsm3msg1((__v4su)__A, (__v4su)__B,
+ (__v4su)__C);
+}
+
+/// This intrinisc is one of the two SM3 message scheduling intrinsics. The
+/// intrinsic performs the final calculation for the next four SM3 message
+/// words. The calculated results are stored in \a dst.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128i _mm_sm3msg2_epi32(__m128i __A, __m128i __B, __m128i __C)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VSM3MSG2 instruction.
+///
+/// \param __A
+/// A 128-bit vector of [4 x int].
+/// \param __B
+/// A 128-bit vector of [4 x int].
+/// \param __C
+/// A 128-bit vector of [4 x int].
+/// \returns
+/// A 128-bit vector of [4 x int].
+///
+/// \code{.operation}
+/// DEFINE ROL32(dword, n) {
+/// count := n % 32
+/// dest := (dword << count) | (dword >> (32-count))
+/// RETURN dest
+/// }
+/// WTMP[0] := __A.dword[0]
+/// WTMP[1] := __A.dword[1]
+/// WTMP[2] := __A.dword[2]
+/// WTMP[3] := __A.dword[3]
+/// W[3] := __B.dword[0]
+/// W[4] := __B.dword[1]
+/// W[5] := __B.dword[2]
+/// W[6] := __B.dword[3]
+/// W[10] := __C.dword[0]
+/// W[11] := __C.dword[1]
+/// W[12] := __C.dword[2]
+/// W[13] := __C.dword[3]
+/// W[16] := ROL32(W[3], 7) ^ W[10] ^ WTMP[0]
+/// W[17] := ROL32(W[4], 7) ^ W[11] ^ WTMP[1]
+/// W[18] := ROL32(W[5], 7) ^ W[12] ^ WTMP[2]
+/// W[19] := ROL32(W[6], 7) ^ W[13] ^ WTMP[3]
+/// W[19] := W[19] ^ ROL32(W[16], 6) ^ ROL32(W[16], 15) ^ ROL32(W[16], 30)
+/// dst.dword[0] := W[16]
+/// dst.dword[1] := W[17]
+/// dst.dword[2] := W[18]
+/// dst.dword[3] := W[19]
+/// dst[MAX:128] := 0
+/// \endcode
+static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_sm3msg2_epi32(__m128i __A,
+ __m128i __B,
+ __m128i __C) {
+ return (__m128i)__builtin_ia32_vsm3msg2((__v4su)__A, (__v4su)__B,
+ (__v4su)__C);
+}
+
+/// This intrinsic performs two rounds of SM3 operation using initial SM3 state
+/// (C, D, G, H) from \a __A, an initial SM3 states (A, B, E, F)
+/// from \a __B and a pre-computed words from the \a __C. \a __A with
+/// initial SM3 state of (C, D, G, H) assumes input of non-rotated left
+/// variables from previous state. The updated SM3 state (A, B, E, F) is
+/// written to \a __A. The \a imm8 should contain the even round number
+/// for the first of the two rounds computed by this instruction. The
+/// computation masks the \a imm8 value by AND’ing it with 0x3E so that only
+/// even round numbers from 0 through 62 are used for this operation. The
+/// calculated results are stored in \a dst.
+///
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128i _mm_sm3rnds2_epi32(__m128i __A, __m128i __B, __m128i __C, const int
+/// imm8) \endcode
+///
+/// This intrinsic corresponds to the \c VSM3RNDS2 instruction.
+///
+/// \param __A
+/// A 128-bit vector of [4 x int].
+/// \param __B
+/// A 128-bit vector of [4 x int].
+/// \param __C
+/// A 128-bit vector of [4 x int].
+/// \param imm8
+/// A 8-bit constant integer.
+/// \returns
+/// A 128-bit vector of [4 x int].
+///
+/// \code{.operation}
+/// DEFINE ROL32(dword, n) {
+/// count := n % 32
+/// dest := (dword << count) | (dword >> (32-count))
+/// RETURN dest
+/// }
+/// DEFINE P0(dword) {
+/// RETURN dword ^ ROL32(dword, 9) ^ ROL32(dword, 17)
+/// }
+/// DEFINE FF(x,y,z, round){
+/// IF round < 16
+/// RETURN (x ^ y ^ z)
+/// ELSE
+/// RETURN (x & y) | (x & z) | (y & z)
+/// FI
+/// }
+/// DEFINE GG(x, y, z, round){
+/// IF round < 16
+/// RETURN (x ^ y ^ z)
+/// ELSE
+/// RETURN (x & y) | (~x & z)
+/// FI
+/// }
+/// A[0] := __B.dword[3]
+/// B[0] := __B.dword[2]
+/// C[0] := __A.dword[3]
+/// D[0] := __A.dword[2]
+/// E[0] := __B.dword[1]
+/// F[0] := __B.dword[0]
+/// G[0] := __A.dword[1]
+/// H[0] := __A.dword[0]
+/// W[0] := __C.dword[0]
+/// W[1] := __C.dword[1]
+/// W[4] := __C.dword[2]
+/// W[5] := __C.dword[3]
+/// C[0] := ROL32(C[0], 9)
+/// D[0] := ROL32(D[0], 9)
+/// G[0] := ROL32(G[0], 19)
+/// H[0] := ROL32(H[0], 19)
+/// ROUND := __D & 0x3E
+/// IF ROUND < 16
+/// CONST := 0x79CC4519
+/// ELSE
+/// CONST := 0x7A879D8A
+/// FI
+/// CONST := ROL32(CONST,ROUND)
+/// FOR i:= 0 to 1
+/// S1 := ROL32((ROL32(A[i], 12) + E[i] + CONST), 7)
+/// S2 := S1 ^ ROL32(A[i], 12)
+/// T1 := FF(A[i], B[i], C[i], ROUND) + D[i] + S2 + (W[i] ^ W[i+4])
+/// T2 := GG(E[i], F[i], G[i], ROUND) + H[i] + S1 + W[i]
+/// D[i+1] := C[i]
+/// C[i+1] := ROL32(B[i],9)
+/// B[i+1] := A[i]
+/// A[i+1] := T1
+/// H[i+1] := G[i]
+/// G[i+1] := ROL32(F[i], 19)
+/// F[i+1] := E[i]
+/// E[i+1] := P0(T2)
+/// CONST := ROL32(CONST, 1)
+/// ENDFOR
+/// dst.dword[3] := A[2]
+/// dst.dword[2] := B[2]
+/// dst.dword[1] := E[2]
+/// dst.dword[0] := F[2]
+/// dst[MAX:128] := 0
+/// \endcode
+#define _mm_sm3rnds2_epi32(A, B, C, D) \
+ (__m128i) __builtin_ia32_vsm3rnds2((__v4su)A, (__v4su)B, (__v4su)C, (int)D)
+
+#undef __DEFAULT_FN_ATTRS128
+
+#endif // __SM3INTRIN_H
diff --git a/contrib/llvm-project/clang/lib/Headers/sm4intrin.h b/contrib/llvm-project/clang/lib/Headers/sm4intrin.h
new file mode 100644
index 000000000000..47aeec46a6fc
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/sm4intrin.h
@@ -0,0 +1,269 @@
+/*===--------------- sm4intrin.h - SM4 intrinsics -----------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <sm4intrin.h> directly; include <immintrin.h> instead."
+#endif // __IMMINTRIN_H
+
+#ifndef __SM4INTRIN_H
+#define __SM4INTRIN_H
+
+/// This intrinsic performs four rounds of SM4 key expansion. The intrinsic
+/// operates on independent 128-bit lanes. The calculated results are
+/// stored in \a dst.
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128i _mm_sm4key4_epi32(__m128i __A, __m128i __B)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VSM4KEY4 instruction.
+///
+/// \param __A
+/// A 128-bit vector of [4 x int].
+/// \param __B
+/// A 128-bit vector of [4 x int].
+/// \returns
+/// A 128-bit vector of [4 x int].
+///
+/// \code{.operation}
+/// DEFINE ROL32(dword, n) {
+/// count := n % 32
+/// dest := (dword << count) | (dword >> (32-count))
+/// RETURN dest
+/// }
+/// DEFINE SBOX_BYTE(dword, i) {
+/// RETURN sbox[dword.byte[i]]
+/// }
+/// DEFINE lower_t(dword) {
+/// tmp.byte[0] := SBOX_BYTE(dword, 0)
+/// tmp.byte[1] := SBOX_BYTE(dword, 1)
+/// tmp.byte[2] := SBOX_BYTE(dword, 2)
+/// tmp.byte[3] := SBOX_BYTE(dword, 3)
+/// RETURN tmp
+/// }
+/// DEFINE L_KEY(dword) {
+/// RETURN dword ^ ROL32(dword, 13) ^ ROL32(dword, 23)
+/// }
+/// DEFINE T_KEY(dword) {
+/// RETURN L_KEY(lower_t(dword))
+/// }
+/// DEFINE F_KEY(X0, X1, X2, X3, round_key) {
+/// RETURN X0 ^ T_KEY(X1 ^ X2 ^ X3 ^ round_key)
+/// }
+/// FOR i:= 0 to 0
+/// P[0] := __B.xmm[i].dword[0]
+/// P[1] := __B.xmm[i].dword[1]
+/// P[2] := __B.xmm[i].dword[2]
+/// P[3] := __B.xmm[i].dword[3]
+/// C[0] := F_KEY(P[0], P[1], P[2], P[3], __A.xmm[i].dword[0])
+/// C[1] := F_KEY(P[1], P[2], P[3], C[0], __A.xmm[i].dword[1])
+/// C[2] := F_KEY(P[2], P[3], C[0], C[1], __A.xmm[i].dword[2])
+/// C[3] := F_KEY(P[3], C[0], C[1], C[2], __A.xmm[i].dword[3])
+/// DEST.xmm[i].dword[0] := C[0]
+/// DEST.xmm[i].dword[1] := C[1]
+/// DEST.xmm[i].dword[2] := C[2]
+/// DEST.xmm[i].dword[3] := C[3]
+/// ENDFOR
+/// DEST[MAX:128] := 0
+/// \endcode
+#define _mm_sm4key4_epi32(A, B) \
+ (__m128i) __builtin_ia32_vsm4key4128((__v4su)A, (__v4su)B)
+
+/// This intrinsic performs four rounds of SM4 key expansion. The intrinsic
+/// operates on independent 128-bit lanes. The calculated results are
+/// stored in \a dst.
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_sm4key4_epi32(__m256i __A, __m256i __B)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VSM4KEY4 instruction.
+///
+/// \param __A
+/// A 256-bit vector of [8 x int].
+/// \param __B
+/// A 256-bit vector of [8 x int].
+/// \returns
+/// A 256-bit vector of [8 x int].
+///
+/// \code{.operation}
+/// DEFINE ROL32(dword, n) {
+/// count := n % 32
+/// dest := (dword << count) | (dword >> (32-count))
+/// RETURN dest
+/// }
+/// DEFINE SBOX_BYTE(dword, i) {
+/// RETURN sbox[dword.byte[i]]
+/// }
+/// DEFINE lower_t(dword) {
+/// tmp.byte[0] := SBOX_BYTE(dword, 0)
+/// tmp.byte[1] := SBOX_BYTE(dword, 1)
+/// tmp.byte[2] := SBOX_BYTE(dword, 2)
+/// tmp.byte[3] := SBOX_BYTE(dword, 3)
+/// RETURN tmp
+/// }
+/// DEFINE L_KEY(dword) {
+/// RETURN dword ^ ROL32(dword, 13) ^ ROL32(dword, 23)
+/// }
+/// DEFINE T_KEY(dword) {
+/// RETURN L_KEY(lower_t(dword))
+/// }
+/// DEFINE F_KEY(X0, X1, X2, X3, round_key) {
+/// RETURN X0 ^ T_KEY(X1 ^ X2 ^ X3 ^ round_key)
+/// }
+/// FOR i:= 0 to 1
+/// P[0] := __B.xmm[i].dword[0]
+/// P[1] := __B.xmm[i].dword[1]
+/// P[2] := __B.xmm[i].dword[2]
+/// P[3] := __B.xmm[i].dword[3]
+/// C[0] := F_KEY(P[0], P[1], P[2], P[3], __A.xmm[i].dword[0])
+/// C[1] := F_KEY(P[1], P[2], P[3], C[0], __A.xmm[i].dword[1])
+/// C[2] := F_KEY(P[2], P[3], C[0], C[1], __A.xmm[i].dword[2])
+/// C[3] := F_KEY(P[3], C[0], C[1], C[2], __A.xmm[i].dword[3])
+/// DEST.xmm[i].dword[0] := C[0]
+/// DEST.xmm[i].dword[1] := C[1]
+/// DEST.xmm[i].dword[2] := C[2]
+/// DEST.xmm[i].dword[3] := C[3]
+/// ENDFOR
+/// DEST[MAX:256] := 0
+/// \endcode
+#define _mm256_sm4key4_epi32(A, B) \
+ (__m256i) __builtin_ia32_vsm4key4256((__v8su)A, (__v8su)B)
+
+/// This intrinisc performs four rounds of SM4 encryption. The intrinisc
+/// operates on independent 128-bit lanes. The calculated results are
+/// stored in \a dst.
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m128i _mm_sm4rnds4_epi32(__m128i __A, __m128i __B)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VSM4RNDS4 instruction.
+///
+/// \param __A
+/// A 128-bit vector of [4 x int].
+/// \param __B
+/// A 128-bit vector of [4 x int].
+/// \returns
+/// A 128-bit vector of [4 x int].
+///
+/// \code{.operation}
+/// DEFINE ROL32(dword, n) {
+/// count := n % 32
+/// dest := (dword << count) | (dword >> (32-count))
+/// RETURN dest
+/// }
+/// DEFINE lower_t(dword) {
+/// tmp.byte[0] := SBOX_BYTE(dword, 0)
+/// tmp.byte[1] := SBOX_BYTE(dword, 1)
+/// tmp.byte[2] := SBOX_BYTE(dword, 2)
+/// tmp.byte[3] := SBOX_BYTE(dword, 3)
+/// RETURN tmp
+/// }
+/// DEFINE L_RND(dword) {
+/// tmp := dword
+/// tmp := tmp ^ ROL32(dword, 2)
+/// tmp := tmp ^ ROL32(dword, 10)
+/// tmp := tmp ^ ROL32(dword, 18)
+/// tmp := tmp ^ ROL32(dword, 24)
+/// RETURN tmp
+/// }
+/// DEFINE T_RND(dword) {
+/// RETURN L_RND(lower_t(dword))
+/// }
+/// DEFINE F_RND(X0, X1, X2, X3, round_key) {
+/// RETURN X0 ^ T_RND(X1 ^ X2 ^ X3 ^ round_key)
+/// }
+/// FOR i:= 0 to 0
+/// P[0] := __B.xmm[i].dword[0]
+/// P[1] := __B.xmm[i].dword[1]
+/// P[2] := __B.xmm[i].dword[2]
+/// P[3] := __B.xmm[i].dword[3]
+/// C[0] := F_RND(P[0], P[1], P[2], P[3], __A.xmm[i].dword[0])
+/// C[1] := F_RND(P[1], P[2], P[3], C[0], __A.xmm[i].dword[1])
+/// C[2] := F_RND(P[2], P[3], C[0], C[1], __A.xmm[i].dword[2])
+/// C[3] := F_RND(P[3], C[0], C[1], C[2], __A.xmm[i].dword[3])
+/// DEST.xmm[i].dword[0] := C[0]
+/// DEST.xmm[i].dword[1] := C[1]
+/// DEST.xmm[i].dword[2] := C[2]
+/// DEST.xmm[i].dword[3] := C[3]
+/// ENDFOR
+/// DEST[MAX:128] := 0
+/// \endcode
+#define _mm_sm4rnds4_epi32(A, B) \
+ (__m128i) __builtin_ia32_vsm4rnds4128((__v4su)A, (__v4su)B)
+
+/// This intrinisc performs four rounds of SM4 encryption. The intrinisc
+/// operates on independent 128-bit lanes. The calculated results are
+/// stored in \a dst.
+/// \headerfile <immintrin.h>
+///
+/// \code
+/// __m256i _mm256_sm4rnds4_epi32(__m256i __A, __m256i __B)
+/// \endcode
+///
+/// This intrinsic corresponds to the \c VSM4RNDS4 instruction.
+///
+/// \param __A
+/// A 256-bit vector of [8 x int].
+/// \param __B
+/// A 256-bit vector of [8 x int].
+/// \returns
+/// A 256-bit vector of [8 x int].
+///
+/// \code{.operation}
+/// DEFINE ROL32(dword, n) {
+/// count := n % 32
+/// dest := (dword << count) | (dword >> (32-count))
+/// RETURN dest
+/// }
+/// DEFINE lower_t(dword) {
+/// tmp.byte[0] := SBOX_BYTE(dword, 0)
+/// tmp.byte[1] := SBOX_BYTE(dword, 1)
+/// tmp.byte[2] := SBOX_BYTE(dword, 2)
+/// tmp.byte[3] := SBOX_BYTE(dword, 3)
+/// RETURN tmp
+/// }
+/// DEFINE L_RND(dword) {
+/// tmp := dword
+/// tmp := tmp ^ ROL32(dword, 2)
+/// tmp := tmp ^ ROL32(dword, 10)
+/// tmp := tmp ^ ROL32(dword, 18)
+/// tmp := tmp ^ ROL32(dword, 24)
+/// RETURN tmp
+/// }
+/// DEFINE T_RND(dword) {
+/// RETURN L_RND(lower_t(dword))
+/// }
+/// DEFINE F_RND(X0, X1, X2, X3, round_key) {
+/// RETURN X0 ^ T_RND(X1 ^ X2 ^ X3 ^ round_key)
+/// }
+/// FOR i:= 0 to 0
+/// P[0] := __B.xmm[i].dword[0]
+/// P[1] := __B.xmm[i].dword[1]
+/// P[2] := __B.xmm[i].dword[2]
+/// P[3] := __B.xmm[i].dword[3]
+/// C[0] := F_RND(P[0], P[1], P[2], P[3], __A.xmm[i].dword[0])
+/// C[1] := F_RND(P[1], P[2], P[3], C[0], __A.xmm[i].dword[1])
+/// C[2] := F_RND(P[2], P[3], C[0], C[1], __A.xmm[i].dword[2])
+/// C[3] := F_RND(P[3], C[0], C[1], C[2], __A.xmm[i].dword[3])
+/// DEST.xmm[i].dword[0] := C[0]
+/// DEST.xmm[i].dword[1] := C[1]
+/// DEST.xmm[i].dword[2] := C[2]
+/// DEST.xmm[i].dword[3] := C[3]
+/// ENDFOR
+/// DEST[MAX:256] := 0
+/// \endcode
+#define _mm256_sm4rnds4_epi32(A, B) \
+ (__m256i) __builtin_ia32_vsm4rnds4256((__v8su)A, (__v8su)B)
+
+#endif // __SM4INTRIN_H
diff --git a/contrib/llvm-project/clang/lib/Headers/smmintrin.h b/contrib/llvm-project/clang/lib/Headers/smmintrin.h
index 025830a74280..005d7db9c3c3 100644
--- a/contrib/llvm-project/clang/lib/Headers/smmintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/smmintrin.h
@@ -10,26 +10,32 @@
#ifndef __SMMINTRIN_H
#define __SMMINTRIN_H
+#if !defined(__i386__) && !defined(__x86_64__)
+#error "This header is only meant to be used on x86 and x64 architecture"
+#endif
+
#include <tmmintrin.h>
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse4.1"), __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("sse4.1,no-evex512"), __min_vector_width__(128)))
/* SSE4 Rounding macros. */
-#define _MM_FROUND_TO_NEAREST_INT 0x00
-#define _MM_FROUND_TO_NEG_INF 0x01
-#define _MM_FROUND_TO_POS_INF 0x02
-#define _MM_FROUND_TO_ZERO 0x03
-#define _MM_FROUND_CUR_DIRECTION 0x04
-
-#define _MM_FROUND_RAISE_EXC 0x00
-#define _MM_FROUND_NO_EXC 0x08
-
-#define _MM_FROUND_NINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEAREST_INT)
-#define _MM_FROUND_FLOOR (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEG_INF)
-#define _MM_FROUND_CEIL (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_POS_INF)
-#define _MM_FROUND_TRUNC (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_ZERO)
-#define _MM_FROUND_RINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_CUR_DIRECTION)
+#define _MM_FROUND_TO_NEAREST_INT 0x00
+#define _MM_FROUND_TO_NEG_INF 0x01
+#define _MM_FROUND_TO_POS_INF 0x02
+#define _MM_FROUND_TO_ZERO 0x03
+#define _MM_FROUND_CUR_DIRECTION 0x04
+
+#define _MM_FROUND_RAISE_EXC 0x00
+#define _MM_FROUND_NO_EXC 0x08
+
+#define _MM_FROUND_NINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEAREST_INT)
+#define _MM_FROUND_FLOOR (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEG_INF)
+#define _MM_FROUND_CEIL (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_POS_INF)
+#define _MM_FROUND_TRUNC (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_ZERO)
+#define _MM_FROUND_RINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_CUR_DIRECTION)
#define _MM_FROUND_NEARBYINT (_MM_FROUND_NO_EXC | _MM_FROUND_CUR_DIRECTION)
/// Rounds up each element of the 128-bit vector of [4 x float] to an
@@ -47,7 +53,7 @@
/// \param X
/// A 128-bit vector of [4 x float] values to be rounded up.
/// \returns A 128-bit vector of [4 x float] containing the rounded values.
-#define _mm_ceil_ps(X) _mm_round_ps((X), _MM_FROUND_CEIL)
+#define _mm_ceil_ps(X) _mm_round_ps((X), _MM_FROUND_CEIL)
/// Rounds up each element of the 128-bit vector of [2 x double] to an
/// integer and returns the rounded values in a 128-bit vector of
@@ -64,7 +70,7 @@
/// \param X
/// A 128-bit vector of [2 x double] values to be rounded up.
/// \returns A 128-bit vector of [2 x double] containing the rounded values.
-#define _mm_ceil_pd(X) _mm_round_pd((X), _MM_FROUND_CEIL)
+#define _mm_ceil_pd(X) _mm_round_pd((X), _MM_FROUND_CEIL)
/// Copies three upper elements of the first 128-bit vector operand to
/// the corresponding three upper elements of the 128-bit result vector of
@@ -89,7 +95,7 @@
/// of the result.
/// \returns A 128-bit vector of [4 x float] containing the copied and rounded
/// values.
-#define _mm_ceil_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_CEIL)
+#define _mm_ceil_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_CEIL)
/// Copies the upper element of the first 128-bit vector operand to the
/// corresponding upper element of the 128-bit result vector of [2 x double].
@@ -114,7 +120,7 @@
/// of the result.
/// \returns A 128-bit vector of [2 x double] containing the copied and rounded
/// values.
-#define _mm_ceil_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_CEIL)
+#define _mm_ceil_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_CEIL)
/// Rounds down each element of the 128-bit vector of [4 x float] to an
/// an integer and returns the rounded values in a 128-bit vector of
@@ -131,7 +137,7 @@
/// \param X
/// A 128-bit vector of [4 x float] values to be rounded down.
/// \returns A 128-bit vector of [4 x float] containing the rounded values.
-#define _mm_floor_ps(X) _mm_round_ps((X), _MM_FROUND_FLOOR)
+#define _mm_floor_ps(X) _mm_round_ps((X), _MM_FROUND_FLOOR)
/// Rounds down each element of the 128-bit vector of [2 x double] to an
/// integer and returns the rounded values in a 128-bit vector of
@@ -148,7 +154,7 @@
/// \param X
/// A 128-bit vector of [2 x double].
/// \returns A 128-bit vector of [2 x double] containing the rounded values.
-#define _mm_floor_pd(X) _mm_round_pd((X), _MM_FROUND_FLOOR)
+#define _mm_floor_pd(X) _mm_round_pd((X), _MM_FROUND_FLOOR)
/// Copies three upper elements of the first 128-bit vector operand to
/// the corresponding three upper elements of the 128-bit result vector of
@@ -173,7 +179,7 @@
/// of the result.
/// \returns A 128-bit vector of [4 x float] containing the copied and rounded
/// values.
-#define _mm_floor_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_FLOOR)
+#define _mm_floor_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_FLOOR)
/// Copies the upper element of the first 128-bit vector operand to the
/// corresponding upper element of the 128-bit result vector of [2 x double].
@@ -198,7 +204,7 @@
/// of the result.
/// \returns A 128-bit vector of [2 x double] containing the copied and rounded
/// values.
-#define _mm_floor_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_FLOOR)
+#define _mm_floor_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_FLOOR)
/// Rounds each element of the 128-bit vector of [4 x float] to an
/// integer value according to the rounding control specified by the second
@@ -230,8 +236,8 @@
/// 10: Upward (toward positive infinity) \n
/// 11: Truncated
/// \returns A 128-bit vector of [4 x float] containing the rounded values.
-#define _mm_round_ps(X, M) \
- (__m128)__builtin_ia32_roundps((__v4sf)(__m128)(X), (M))
+#define _mm_round_ps(X, M) \
+ ((__m128)__builtin_ia32_roundps((__v4sf)(__m128)(X), (M)))
/// Copies three upper elements of the first 128-bit vector operand to
/// the corresponding three upper elements of the 128-bit result vector of
@@ -271,9 +277,9 @@
/// 11: Truncated
/// \returns A 128-bit vector of [4 x float] containing the copied and rounded
/// values.
-#define _mm_round_ss(X, Y, M) \
- (__m128)__builtin_ia32_roundss((__v4sf)(__m128)(X), \
- (__v4sf)(__m128)(Y), (M))
+#define _mm_round_ss(X, Y, M) \
+ ((__m128)__builtin_ia32_roundss((__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), \
+ (M)))
/// Rounds each element of the 128-bit vector of [2 x double] to an
/// integer value according to the rounding control specified by the second
@@ -305,8 +311,8 @@
/// 10: Upward (toward positive infinity) \n
/// 11: Truncated
/// \returns A 128-bit vector of [2 x double] containing the rounded values.
-#define _mm_round_pd(X, M) \
- (__m128d)__builtin_ia32_roundpd((__v2df)(__m128d)(X), (M))
+#define _mm_round_pd(X, M) \
+ ((__m128d)__builtin_ia32_roundpd((__v2df)(__m128d)(X), (M)))
/// Copies the upper element of the first 128-bit vector operand to the
/// corresponding upper element of the 128-bit result vector of [2 x double].
@@ -346,9 +352,9 @@
/// 11: Truncated
/// \returns A 128-bit vector of [2 x double] containing the copied and rounded
/// values.
-#define _mm_round_sd(X, Y, M) \
- (__m128d)__builtin_ia32_roundsd((__v2df)(__m128d)(X), \
- (__v2df)(__m128d)(Y), (M))
+#define _mm_round_sd(X, Y, M) \
+ ((__m128d)__builtin_ia32_roundsd((__v2df)(__m128d)(X), (__v2df)(__m128d)(Y), \
+ (M)))
/* SSE4 Packed Blending Intrinsics. */
/// Returns a 128-bit vector of [2 x double] where the values are
@@ -375,9 +381,9 @@
/// When a mask bit is 1, the corresponding 64-bit element in operand \a V2
/// is copied to the same position in the result.
/// \returns A 128-bit vector of [2 x double] containing the copied values.
-#define _mm_blend_pd(V1, V2, M) \
- (__m128d) __builtin_ia32_blendpd ((__v2df)(__m128d)(V1), \
- (__v2df)(__m128d)(V2), (int)(M))
+#define _mm_blend_pd(V1, V2, M) \
+ ((__m128d)__builtin_ia32_blendpd((__v2df)(__m128d)(V1), \
+ (__v2df)(__m128d)(V2), (int)(M)))
/// Returns a 128-bit vector of [4 x float] where the values are selected
/// from either the first or second operand as specified by the third
@@ -403,9 +409,9 @@
/// When a mask bit is 1, the corresponding 32-bit element in operand \a V2
/// is copied to the same position in the result.
/// \returns A 128-bit vector of [4 x float] containing the copied values.
-#define _mm_blend_ps(V1, V2, M) \
- (__m128) __builtin_ia32_blendps ((__v4sf)(__m128)(V1), \
- (__v4sf)(__m128)(V2), (int)(M))
+#define _mm_blend_ps(V1, V2, M) \
+ ((__m128)__builtin_ia32_blendps((__v4sf)(__m128)(V1), (__v4sf)(__m128)(V2), \
+ (int)(M)))
/// Returns a 128-bit vector of [2 x double] where the values are
/// selected from either the first or second operand as specified by the
@@ -427,11 +433,11 @@
/// position in the result. When a mask bit is 1, the corresponding 64-bit
/// element in operand \a __V2 is copied to the same position in the result.
/// \returns A 128-bit vector of [2 x double] containing the copied values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_blendv_pd (__m128d __V1, __m128d __V2, __m128d __M)
-{
- return (__m128d) __builtin_ia32_blendvpd ((__v2df)__V1, (__v2df)__V2,
- (__v2df)__M);
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_blendv_pd(__m128d __V1,
+ __m128d __V2,
+ __m128d __M) {
+ return (__m128d)__builtin_ia32_blendvpd((__v2df)__V1, (__v2df)__V2,
+ (__v2df)__M);
}
/// Returns a 128-bit vector of [4 x float] where the values are
@@ -454,11 +460,11 @@ _mm_blendv_pd (__m128d __V1, __m128d __V2, __m128d __M)
/// position in the result. When a mask bit is 1, the corresponding 32-bit
/// element in operand \a __V2 is copied to the same position in the result.
/// \returns A 128-bit vector of [4 x float] containing the copied values.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_blendv_ps (__m128 __V1, __m128 __V2, __m128 __M)
-{
- return (__m128) __builtin_ia32_blendvps ((__v4sf)__V1, (__v4sf)__V2,
- (__v4sf)__M);
+static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_blendv_ps(__m128 __V1,
+ __m128 __V2,
+ __m128 __M) {
+ return (__m128)__builtin_ia32_blendvps((__v4sf)__V1, (__v4sf)__V2,
+ (__v4sf)__M);
}
/// Returns a 128-bit vector of [16 x i8] where the values are selected
@@ -481,11 +487,11 @@ _mm_blendv_ps (__m128 __V1, __m128 __V2, __m128 __M)
/// position in the result. When a mask bit is 1, the corresponding 8-bit
/// element in operand \a __V2 is copied to the same position in the result.
/// \returns A 128-bit vector of [16 x i8] containing the copied values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_blendv_epi8 (__m128i __V1, __m128i __V2, __m128i __M)
-{
- return (__m128i) __builtin_ia32_pblendvb128 ((__v16qi)__V1, (__v16qi)__V2,
- (__v16qi)__M);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_blendv_epi8(__m128i __V1,
+ __m128i __V2,
+ __m128i __M) {
+ return (__m128i)__builtin_ia32_pblendvb128((__v16qi)__V1, (__v16qi)__V2,
+ (__v16qi)__M);
}
/// Returns a 128-bit vector of [8 x i16] where the values are selected
@@ -512,9 +518,9 @@ _mm_blendv_epi8 (__m128i __V1, __m128i __V2, __m128i __M)
/// When a mask bit is 1, the corresponding 16-bit element in operand \a V2
/// is copied to the same position in the result.
/// \returns A 128-bit vector of [8 x i16] containing the copied values.
-#define _mm_blend_epi16(V1, V2, M) \
- (__m128i) __builtin_ia32_pblendw128 ((__v8hi)(__m128i)(V1), \
- (__v8hi)(__m128i)(V2), (int)(M))
+#define _mm_blend_epi16(V1, V2, M) \
+ ((__m128i)__builtin_ia32_pblendw128((__v8hi)(__m128i)(V1), \
+ (__v8hi)(__m128i)(V2), (int)(M)))
/* SSE4 Dword Multiply Instructions. */
/// Multiples corresponding elements of two 128-bit vectors of [4 x i32]
@@ -530,10 +536,9 @@ _mm_blendv_epi8 (__m128i __V1, __m128i __V2, __m128i __M)
/// \param __V2
/// A 128-bit integer vector.
/// \returns A 128-bit integer vector containing the products of both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mullo_epi32 (__m128i __V1, __m128i __V2)
-{
- return (__m128i) ((__v4su)__V1 * (__v4su)__V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mullo_epi32(__m128i __V1,
+ __m128i __V2) {
+ return (__m128i)((__v4su)__V1 * (__v4su)__V2);
}
/// Multiplies corresponding even-indexed elements of two 128-bit
@@ -550,10 +555,9 @@ _mm_mullo_epi32 (__m128i __V1, __m128i __V2)
/// A 128-bit vector of [4 x i32].
/// \returns A 128-bit vector of [2 x i64] containing the products of both
/// operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mul_epi32 (__m128i __V1, __m128i __V2)
-{
- return (__m128i) __builtin_ia32_pmuldq128 ((__v4si)__V1, (__v4si)__V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mul_epi32(__m128i __V1,
+ __m128i __V2) {
+ return (__m128i)__builtin_ia32_pmuldq128((__v4si)__V1, (__v4si)__V2);
}
/* SSE4 Floating Point Dot Product Instructions. */
@@ -589,9 +593,8 @@ _mm_mul_epi32 (__m128i __V1, __m128i __V2)
/// each [4 x float] subvector. If a bit is set, the dot product is returned
/// in the corresponding element; otherwise that element is set to zero.
/// \returns A 128-bit vector of [4 x float] containing the dot product.
-#define _mm_dp_ps(X, Y, M) \
- (__m128) __builtin_ia32_dpps((__v4sf)(__m128)(X), \
- (__v4sf)(__m128)(Y), (M))
+#define _mm_dp_ps(X, Y, M) \
+ ((__m128)__builtin_ia32_dpps((__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), (M)))
/// Computes the dot product of the two 128-bit vectors of [2 x double]
/// and returns it in the elements of the 128-bit result vector of
@@ -624,9 +627,9 @@ _mm_mul_epi32 (__m128i __V1, __m128i __V2)
/// to the lowest element and bit [1] corresponding to the highest element of
/// each [2 x double] vector. If a bit is set, the dot product is returned in
/// the corresponding element; otherwise that element is set to zero.
-#define _mm_dp_pd(X, Y, M) \
- (__m128d) __builtin_ia32_dppd((__v2df)(__m128d)(X), \
- (__v2df)(__m128d)(Y), (M))
+#define _mm_dp_pd(X, Y, M) \
+ ((__m128d)__builtin_ia32_dppd((__v2df)(__m128d)(X), (__v2df)(__m128d)(Y), \
+ (M)))
/* SSE4 Streaming Load Hint Instruction. */
/// Loads integer values from a 128-bit aligned memory location to a
@@ -641,10 +644,9 @@ _mm_mul_epi32 (__m128i __V1, __m128i __V2)
/// values.
/// \returns A 128-bit integer vector containing the data stored at the
/// specified memory location.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_stream_load_si128 (__m128i const *__V)
-{
- return (__m128i) __builtin_nontemporal_load ((const __v2di *) __V);
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_stream_load_si128(const void *__V) {
+ return (__m128i)__builtin_nontemporal_load((const __v2di *)__V);
}
/* SSE4 Packed Integer Min/Max Instructions. */
@@ -661,10 +663,9 @@ _mm_stream_load_si128 (__m128i const *__V)
/// \param __V2
/// A 128-bit vector of [16 x i8]
/// \returns A 128-bit vector of [16 x i8] containing the lesser values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_min_epi8 (__m128i __V1, __m128i __V2)
-{
- return (__m128i) __builtin_ia32_pminsb128 ((__v16qi) __V1, (__v16qi) __V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epi8(__m128i __V1,
+ __m128i __V2) {
+ return (__m128i)__builtin_elementwise_min((__v16qs)__V1, (__v16qs)__V2);
}
/// Compares the corresponding elements of two 128-bit vectors of
@@ -680,10 +681,9 @@ _mm_min_epi8 (__m128i __V1, __m128i __V2)
/// \param __V2
/// A 128-bit vector of [16 x i8].
/// \returns A 128-bit vector of [16 x i8] containing the greater values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_max_epi8 (__m128i __V1, __m128i __V2)
-{
- return (__m128i) __builtin_ia32_pmaxsb128 ((__v16qi) __V1, (__v16qi) __V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epi8(__m128i __V1,
+ __m128i __V2) {
+ return (__m128i)__builtin_elementwise_max((__v16qs)__V1, (__v16qs)__V2);
}
/// Compares the corresponding elements of two 128-bit vectors of
@@ -699,10 +699,9 @@ _mm_max_epi8 (__m128i __V1, __m128i __V2)
/// \param __V2
/// A 128-bit vector of [8 x u16].
/// \returns A 128-bit vector of [8 x u16] containing the lesser values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_min_epu16 (__m128i __V1, __m128i __V2)
-{
- return (__m128i) __builtin_ia32_pminuw128 ((__v8hi) __V1, (__v8hi) __V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epu16(__m128i __V1,
+ __m128i __V2) {
+ return (__m128i)__builtin_elementwise_min((__v8hu)__V1, (__v8hu)__V2);
}
/// Compares the corresponding elements of two 128-bit vectors of
@@ -718,10 +717,9 @@ _mm_min_epu16 (__m128i __V1, __m128i __V2)
/// \param __V2
/// A 128-bit vector of [8 x u16].
/// \returns A 128-bit vector of [8 x u16] containing the greater values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_max_epu16 (__m128i __V1, __m128i __V2)
-{
- return (__m128i) __builtin_ia32_pmaxuw128 ((__v8hi) __V1, (__v8hi) __V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epu16(__m128i __V1,
+ __m128i __V2) {
+ return (__m128i)__builtin_elementwise_max((__v8hu)__V1, (__v8hu)__V2);
}
/// Compares the corresponding elements of two 128-bit vectors of
@@ -737,10 +735,9 @@ _mm_max_epu16 (__m128i __V1, __m128i __V2)
/// \param __V2
/// A 128-bit vector of [4 x i32].
/// \returns A 128-bit vector of [4 x i32] containing the lesser values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_min_epi32 (__m128i __V1, __m128i __V2)
-{
- return (__m128i) __builtin_ia32_pminsd128 ((__v4si) __V1, (__v4si) __V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epi32(__m128i __V1,
+ __m128i __V2) {
+ return (__m128i)__builtin_elementwise_min((__v4si)__V1, (__v4si)__V2);
}
/// Compares the corresponding elements of two 128-bit vectors of
@@ -756,10 +753,9 @@ _mm_min_epi32 (__m128i __V1, __m128i __V2)
/// \param __V2
/// A 128-bit vector of [4 x i32].
/// \returns A 128-bit vector of [4 x i32] containing the greater values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_max_epi32 (__m128i __V1, __m128i __V2)
-{
- return (__m128i) __builtin_ia32_pmaxsd128 ((__v4si) __V1, (__v4si) __V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epi32(__m128i __V1,
+ __m128i __V2) {
+ return (__m128i)__builtin_elementwise_max((__v4si)__V1, (__v4si)__V2);
}
/// Compares the corresponding elements of two 128-bit vectors of
@@ -775,10 +771,9 @@ _mm_max_epi32 (__m128i __V1, __m128i __V2)
/// \param __V2
/// A 128-bit vector of [4 x u32].
/// \returns A 128-bit vector of [4 x u32] containing the lesser values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_min_epu32 (__m128i __V1, __m128i __V2)
-{
- return (__m128i) __builtin_ia32_pminud128((__v4si) __V1, (__v4si) __V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epu32(__m128i __V1,
+ __m128i __V2) {
+ return (__m128i)__builtin_elementwise_min((__v4su)__V1, (__v4su)__V2);
}
/// Compares the corresponding elements of two 128-bit vectors of
@@ -794,10 +789,9 @@ _mm_min_epu32 (__m128i __V1, __m128i __V2)
/// \param __V2
/// A 128-bit vector of [4 x u32].
/// \returns A 128-bit vector of [4 x u32] containing the greater values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_max_epu32 (__m128i __V1, __m128i __V2)
-{
- return (__m128i) __builtin_ia32_pmaxud128((__v4si) __V1, (__v4si) __V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epu32(__m128i __V1,
+ __m128i __V2) {
+ return (__m128i)__builtin_elementwise_max((__v4su)__V1, (__v4su)__V2);
}
/* SSE4 Insertion and Extraction from XMM Register Instructions. */
@@ -824,7 +818,7 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// parameter, is copied to the result.
/// \param N
/// Specifies which bits from operand \a Y will be copied, which bits in the
-/// result they will be be copied to, and which bits in the result will be
+/// result they will be copied to, and which bits in the result will be
/// cleared. The following assignments are made: \n
/// Bits [7:6] specify the bits to copy from operand \a Y: \n
/// 00: Selects bits [31:0] from operand \a Y. \n
@@ -865,23 +859,24 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// 10: Bits [95:64] of parameter \a X are returned. \n
/// 11: Bits [127:96] of parameter \a X are returned.
/// \returns A 32-bit integer containing the extracted 32 bits of float data.
-#define _mm_extract_ps(X, N) (__extension__ \
- ({ union { int __i; float __f; } __t; \
- __t.__f = __builtin_ia32_vec_ext_v4sf((__v4sf)(__m128)(X), (int)(N)); \
- __t.__i;}))
+#define _mm_extract_ps(X, N) \
+ __builtin_bit_cast( \
+ int, __builtin_ia32_vec_ext_v4sf((__v4sf)(__m128)(X), (int)(N)))
/* Miscellaneous insert and extract macros. */
/* Extract a single-precision float from X at index N into D. */
-#define _MM_EXTRACT_FLOAT(D, X, N) \
- { (D) = __builtin_ia32_vec_ext_v4sf((__v4sf)(__m128)(X), (int)(N)); }
+#define _MM_EXTRACT_FLOAT(D, X, N) \
+ do { \
+ (D) = __builtin_ia32_vec_ext_v4sf((__v4sf)(__m128)(X), (int)(N)); \
+ } while (0)
/* Or together 2 sets of indexes (X and Y) with the zeroing bits (Z) to create
an index suitable for _mm_insert_ps. */
#define _MM_MK_INSERTPS_NDX(X, Y, Z) (((X) << 6) | ((Y) << 4) | (Z))
/* Extract a float from X at index N into the first index of the return. */
-#define _MM_PICK_OUT_PS(X, N) _mm_insert_ps (_mm_setzero_ps(), (X), \
- _MM_MK_INSERTPS_NDX((N), 0, 0x0e))
+#define _MM_PICK_OUT_PS(X, N) \
+ _mm_insert_ps(_mm_setzero_ps(), (X), _MM_MK_INSERTPS_NDX((N), 0, 0x0e))
/* Insert int into packed integer array at index. */
/// Constructs a 128-bit vector of [16 x i8] by first making a copy of
@@ -924,9 +919,9 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// 1110: Bits [119:112] of the result are used for insertion. \n
/// 1111: Bits [127:120] of the result are used for insertion.
/// \returns A 128-bit integer vector containing the constructed values.
-#define _mm_insert_epi8(X, I, N) \
- (__m128i)__builtin_ia32_vec_set_v16qi((__v16qi)(__m128i)(X), \
- (int)(I), (int)(N))
+#define _mm_insert_epi8(X, I, N) \
+ ((__m128i)__builtin_ia32_vec_set_v16qi((__v16qi)(__m128i)(X), (int)(I), \
+ (int)(N)))
/// Constructs a 128-bit vector of [4 x i32] by first making a copy of
/// the 128-bit integer vector parameter, and then inserting the 32-bit
@@ -956,9 +951,9 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// 10: Bits [95:64] of the result are used for insertion. \n
/// 11: Bits [127:96] of the result are used for insertion.
/// \returns A 128-bit integer vector containing the constructed values.
-#define _mm_insert_epi32(X, I, N) \
- (__m128i)__builtin_ia32_vec_set_v4si((__v4si)(__m128i)(X), \
- (int)(I), (int)(N))
+#define _mm_insert_epi32(X, I, N) \
+ ((__m128i)__builtin_ia32_vec_set_v4si((__v4si)(__m128i)(X), (int)(I), \
+ (int)(N)))
#ifdef __x86_64__
/// Constructs a 128-bit vector of [2 x i64] by first making a copy of
@@ -987,9 +982,9 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// 0: Bits [63:0] of the result are used for insertion. \n
/// 1: Bits [127:64] of the result are used for insertion. \n
/// \returns A 128-bit integer vector containing the constructed values.
-#define _mm_insert_epi64(X, I, N) \
- (__m128i)__builtin_ia32_vec_set_v2di((__v2di)(__m128i)(X), \
- (long long)(I), (int)(N))
+#define _mm_insert_epi64(X, I, N) \
+ ((__m128i)__builtin_ia32_vec_set_v2di((__v2di)(__m128i)(X), (long long)(I), \
+ (int)(N)))
#endif /* __x86_64__ */
/* Extract int from packed integer array at index. This returns the element
@@ -1030,9 +1025,9 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// \returns An unsigned integer, whose lower 8 bits are selected from the
/// 128-bit integer vector parameter and the remaining bits are assigned
/// zeros.
-#define _mm_extract_epi8(X, N) \
- (int)(unsigned char)__builtin_ia32_vec_ext_v16qi((__v16qi)(__m128i)(X), \
- (int)(N))
+#define _mm_extract_epi8(X, N) \
+ ((int)(unsigned char)__builtin_ia32_vec_ext_v16qi((__v16qi)(__m128i)(X), \
+ (int)(N)))
/// Extracts a 32-bit element from the 128-bit integer vector of
/// [4 x i32], using the immediate value parameter \a N as a selector.
@@ -1056,10 +1051,9 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// 11: Bits [127:96] of the parameter \a X are exracted.
/// \returns An integer, whose lower 32 bits are selected from the 128-bit
/// integer vector parameter and the remaining bits are assigned zeros.
-#define _mm_extract_epi32(X, N) \
- (int)__builtin_ia32_vec_ext_v4si((__v4si)(__m128i)(X), (int)(N))
+#define _mm_extract_epi32(X, N) \
+ ((int)__builtin_ia32_vec_ext_v4si((__v4si)(__m128i)(X), (int)(N)))
-#ifdef __x86_64__
/// Extracts a 64-bit element from the 128-bit integer vector of
/// [2 x i64], using the immediate value parameter \a N as a selector.
///
@@ -1069,7 +1063,8 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// long long _mm_extract_epi64(__m128i X, const int N);
/// \endcode
///
-/// This intrinsic corresponds to the <c> VPEXTRQ / PEXTRQ </c> instruction.
+/// This intrinsic corresponds to the <c> VPEXTRQ / PEXTRQ </c> instruction
+/// in 64-bit mode.
///
/// \param X
/// A 128-bit integer vector.
@@ -1079,9 +1074,8 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// 0: Bits [63:0] are returned. \n
/// 1: Bits [127:64] are returned. \n
/// \returns A 64-bit integer.
-#define _mm_extract_epi64(X, N) \
- (long long)__builtin_ia32_vec_ext_v2di((__v2di)(__m128i)(X), (int)(N))
-#endif /* __x86_64 */
+#define _mm_extract_epi64(X, N) \
+ ((long long)__builtin_ia32_vec_ext_v2di((__v2di)(__m128i)(X), (int)(N)))
/* SSE4 128-bit Packed Integer Comparisons. */
/// Tests whether the specified bits in a 128-bit integer vector are all
@@ -1096,9 +1090,8 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// \param __V
/// A 128-bit integer vector selecting which bits to test in operand \a __M.
/// \returns TRUE if the specified bits are all zeros; FALSE otherwise.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_testz_si128(__m128i __M, __m128i __V)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_testz_si128(__m128i __M,
+ __m128i __V) {
return __builtin_ia32_ptestz128((__v2di)__M, (__v2di)__V);
}
@@ -1114,9 +1107,8 @@ _mm_testz_si128(__m128i __M, __m128i __V)
/// \param __V
/// A 128-bit integer vector selecting which bits to test in operand \a __M.
/// \returns TRUE if the specified bits are all ones; FALSE otherwise.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_testc_si128(__m128i __M, __m128i __V)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_testc_si128(__m128i __M,
+ __m128i __V) {
return __builtin_ia32_ptestc128((__v2di)__M, (__v2di)__V);
}
@@ -1133,9 +1125,8 @@ _mm_testc_si128(__m128i __M, __m128i __V)
/// A 128-bit integer vector selecting which bits to test in operand \a __M.
/// \returns TRUE if the specified bits are neither all zeros nor all ones;
/// FALSE otherwise.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_testnzc_si128(__m128i __M, __m128i __V)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_testnzc_si128(__m128i __M,
+ __m128i __V) {
return __builtin_ia32_ptestnzc128((__v2di)__M, (__v2di)__V);
}
@@ -1154,7 +1145,7 @@ _mm_testnzc_si128(__m128i __M, __m128i __V)
/// A 128-bit integer vector containing the bits to be tested.
/// \returns TRUE if the bits specified in the operand are all set to 1; FALSE
/// otherwise.
-#define _mm_test_all_ones(V) _mm_testc_si128((V), _mm_cmpeq_epi32((V), (V)))
+#define _mm_test_all_ones(V) _mm_testc_si128((V), _mm_set1_epi32(-1))
/// Tests whether the specified bits in a 128-bit integer vector are
/// neither all zeros nor all ones.
@@ -1191,7 +1182,7 @@ _mm_testnzc_si128(__m128i __M, __m128i __V)
/// \param V
/// A 128-bit integer vector selecting which bits to test in operand \a M.
/// \returns TRUE if the specified bits are all zeros; FALSE otherwise.
-#define _mm_test_all_zeros(M, V) _mm_testz_si128 ((M), (V))
+#define _mm_test_all_zeros(M, V) _mm_testz_si128((M), (V))
/* SSE4 64-bit Packed Integer Comparisons. */
/// Compares each of the corresponding 64-bit values of the 128-bit
@@ -1206,9 +1197,8 @@ _mm_testnzc_si128(__m128i __M, __m128i __V)
/// \param __V2
/// A 128-bit integer vector.
/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpeq_epi64(__m128i __V1, __m128i __V2)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi64(__m128i __V1,
+ __m128i __V2) {
return (__m128i)((__v2di)__V1 == (__v2di)__V2);
}
@@ -1223,15 +1213,16 @@ _mm_cmpeq_epi64(__m128i __V1, __m128i __V2)
/// This intrinsic corresponds to the <c> VPMOVSXBW / PMOVSXBW </c> instruction.
///
/// \param __V
-/// A 128-bit vector of [16 x i8]. The lower eight 8-bit elements are sign-
-/// extended to 16-bit values.
+/// A 128-bit vector of [16 x i8]. The lower eight 8-bit elements are
+/// sign-extended to 16-bit values.
/// \returns A 128-bit vector of [8 x i16] containing the sign-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepi8_epi16(__m128i __V)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi8_epi16(__m128i __V) {
/* This function always performs a signed extension, but __v16qi is a char
which may be signed or unsigned, so use __v16qs. */
- return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8hi);
+ return (__m128i) __builtin_convertvector(
+ __builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3, 4, 5, 6,
+ 7),
+ __v8hi);
}
/// Sign-extends each of the lower four 8-bit integer elements of a
@@ -1247,12 +1238,11 @@ _mm_cvtepi8_epi16(__m128i __V)
/// A 128-bit vector of [16 x i8]. The lower four 8-bit elements are
/// sign-extended to 32-bit values.
/// \returns A 128-bit vector of [4 x i32] containing the sign-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepi8_epi32(__m128i __V)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi8_epi32(__m128i __V) {
/* This function always performs a signed extension, but __v16qi is a char
which may be signed or unsigned, so use __v16qs. */
- return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3), __v4si);
+ return (__m128i) __builtin_convertvector(
+ __builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3), __v4si);
}
/// Sign-extends each of the lower two 8-bit integer elements of a
@@ -1268,12 +1258,11 @@ _mm_cvtepi8_epi32(__m128i __V)
/// A 128-bit vector of [16 x i8]. The lower two 8-bit elements are
/// sign-extended to 64-bit values.
/// \returns A 128-bit vector of [2 x i64] containing the sign-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepi8_epi64(__m128i __V)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi8_epi64(__m128i __V) {
/* This function always performs a signed extension, but __v16qi is a char
which may be signed or unsigned, so use __v16qs. */
- return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1), __v2di);
+ return (__m128i) __builtin_convertvector(
+ __builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1), __v2di);
}
/// Sign-extends each of the lower four 16-bit integer elements of a
@@ -1289,10 +1278,9 @@ _mm_cvtepi8_epi64(__m128i __V)
/// A 128-bit vector of [8 x i16]. The lower four 16-bit elements are
/// sign-extended to 32-bit values.
/// \returns A 128-bit vector of [4 x i32] containing the sign-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepi16_epi32(__m128i __V)
-{
- return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1, 2, 3), __v4si);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi16_epi32(__m128i __V) {
+ return (__m128i) __builtin_convertvector(
+ __builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1, 2, 3), __v4si);
}
/// Sign-extends each of the lower two 16-bit integer elements of a
@@ -1308,10 +1296,9 @@ _mm_cvtepi16_epi32(__m128i __V)
/// A 128-bit vector of [8 x i16]. The lower two 16-bit elements are
/// sign-extended to 64-bit values.
/// \returns A 128-bit vector of [2 x i64] containing the sign-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepi16_epi64(__m128i __V)
-{
- return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1), __v2di);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi16_epi64(__m128i __V) {
+ return (__m128i) __builtin_convertvector(
+ __builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1), __v2di);
}
/// Sign-extends each of the lower two 32-bit integer elements of a
@@ -1327,10 +1314,9 @@ _mm_cvtepi16_epi64(__m128i __V)
/// A 128-bit vector of [4 x i32]. The lower two 32-bit elements are
/// sign-extended to 64-bit values.
/// \returns A 128-bit vector of [2 x i64] containing the sign-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepi32_epi64(__m128i __V)
-{
- return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v4si)__V, (__v4si)__V, 0, 1), __v2di);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi32_epi64(__m128i __V) {
+ return (__m128i) __builtin_convertvector(
+ __builtin_shufflevector((__v4si)__V, (__v4si)__V, 0, 1), __v2di);
}
/* SSE4 Packed Integer Zero-Extension. */
@@ -1347,10 +1333,11 @@ _mm_cvtepi32_epi64(__m128i __V)
/// A 128-bit vector of [16 x i8]. The lower eight 8-bit elements are
/// zero-extended to 16-bit values.
/// \returns A 128-bit vector of [8 x i16] containing the zero-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepu8_epi16(__m128i __V)
-{
- return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8hi);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu8_epi16(__m128i __V) {
+ return (__m128i) __builtin_convertvector(
+ __builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3, 4, 5, 6,
+ 7),
+ __v8hi);
}
/// Zero-extends each of the lower four 8-bit integer elements of a
@@ -1366,10 +1353,9 @@ _mm_cvtepu8_epi16(__m128i __V)
/// A 128-bit vector of [16 x i8]. The lower four 8-bit elements are
/// zero-extended to 32-bit values.
/// \returns A 128-bit vector of [4 x i32] containing the zero-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepu8_epi32(__m128i __V)
-{
- return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3), __v4si);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu8_epi32(__m128i __V) {
+ return (__m128i) __builtin_convertvector(
+ __builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3), __v4si);
}
/// Zero-extends each of the lower two 8-bit integer elements of a
@@ -1385,10 +1371,9 @@ _mm_cvtepu8_epi32(__m128i __V)
/// A 128-bit vector of [16 x i8]. The lower two 8-bit elements are
/// zero-extended to 64-bit values.
/// \returns A 128-bit vector of [2 x i64] containing the zero-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepu8_epi64(__m128i __V)
-{
- return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1), __v2di);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu8_epi64(__m128i __V) {
+ return (__m128i) __builtin_convertvector(
+ __builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1), __v2di);
}
/// Zero-extends each of the lower four 16-bit integer elements of a
@@ -1404,10 +1389,9 @@ _mm_cvtepu8_epi64(__m128i __V)
/// A 128-bit vector of [8 x i16]. The lower four 16-bit elements are
/// zero-extended to 32-bit values.
/// \returns A 128-bit vector of [4 x i32] containing the zero-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepu16_epi32(__m128i __V)
-{
- return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1, 2, 3), __v4si);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu16_epi32(__m128i __V) {
+ return (__m128i) __builtin_convertvector(
+ __builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1, 2, 3), __v4si);
}
/// Zero-extends each of the lower two 16-bit integer elements of a
@@ -1423,10 +1407,9 @@ _mm_cvtepu16_epi32(__m128i __V)
/// A 128-bit vector of [8 x i16]. The lower two 16-bit elements are
/// zero-extended to 64-bit values.
/// \returns A 128-bit vector of [2 x i64] containing the zero-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepu16_epi64(__m128i __V)
-{
- return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1), __v2di);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu16_epi64(__m128i __V) {
+ return (__m128i) __builtin_convertvector(
+ __builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1), __v2di);
}
/// Zero-extends each of the lower two 32-bit integer elements of a
@@ -1442,10 +1425,9 @@ _mm_cvtepu16_epi64(__m128i __V)
/// A 128-bit vector of [4 x i32]. The lower two 32-bit elements are
/// zero-extended to 64-bit values.
/// \returns A 128-bit vector of [2 x i64] containing the zero-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepu32_epi64(__m128i __V)
-{
- return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v4su)__V, (__v4su)__V, 0, 1), __v2di);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu32_epi64(__m128i __V) {
+ return (__m128i) __builtin_convertvector(
+ __builtin_shufflevector((__v4su)__V, (__v4su)__V, 0, 1), __v2di);
}
/* SSE4 Pack with Unsigned Saturation. */
@@ -1471,10 +1453,9 @@ _mm_cvtepu32_epi64(__m128i __V)
/// less than 0x0000 are saturated to 0x0000. The converted [4 x i16] values
/// are written to the higher 64 bits of the result.
/// \returns A 128-bit vector of [8 x i16] containing the converted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_packus_epi32(__m128i __V1, __m128i __V2)
-{
- return (__m128i) __builtin_ia32_packusdw128((__v4si)__V1, (__v4si)__V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packus_epi32(__m128i __V1,
+ __m128i __V2) {
+ return (__m128i)__builtin_ia32_packusdw128((__v4si)__V1, (__v4si)__V2);
}
/* SSE4 Multiple Packed Sums of Absolute Difference. */
@@ -1513,9 +1494,9 @@ _mm_packus_epi32(__m128i __V1, __m128i __V2)
/// \endcode
/// \returns A 128-bit integer vector containing the sums of the sets of
/// absolute differences between both operands.
-#define _mm_mpsadbw_epu8(X, Y, M) \
- (__m128i) __builtin_ia32_mpsadbw128((__v16qi)(__m128i)(X), \
- (__v16qi)(__m128i)(Y), (M))
+#define _mm_mpsadbw_epu8(X, Y, M) \
+ ((__m128i)__builtin_ia32_mpsadbw128((__v16qi)(__m128i)(X), \
+ (__v16qi)(__m128i)(Y), (M)))
/// Finds the minimum unsigned 16-bit element in the input 128-bit
/// vector of [8 x u16] and returns it and along with its index.
@@ -1530,10 +1511,8 @@ _mm_packus_epi32(__m128i __V1, __m128i __V2)
/// \returns A 128-bit value where bits [15:0] contain the minimum value found
/// in parameter \a __V, bits [18:16] contain the index of the minimum value
/// and the remaining bits are set to 0.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_minpos_epu16(__m128i __V)
-{
- return (__m128i) __builtin_ia32_phminposuw128((__v8hi)__V);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_minpos_epu16(__m128i __V) {
+ return (__m128i)__builtin_ia32_phminposuw128((__v8hi)__V);
}
/* Handle the sse4.2 definitions here. */
@@ -1542,33 +1521,34 @@ _mm_minpos_epu16(__m128i __V)
so we'll do the same. */
#undef __DEFAULT_FN_ATTRS
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse4.2")))
+#define __DEFAULT_FN_ATTRS \
+ __attribute__((__always_inline__, __nodebug__, __target__("sse4.2")))
/* These specify the type of data that we're comparing. */
-#define _SIDD_UBYTE_OPS 0x00
-#define _SIDD_UWORD_OPS 0x01
-#define _SIDD_SBYTE_OPS 0x02
-#define _SIDD_SWORD_OPS 0x03
+#define _SIDD_UBYTE_OPS 0x00
+#define _SIDD_UWORD_OPS 0x01
+#define _SIDD_SBYTE_OPS 0x02
+#define _SIDD_SWORD_OPS 0x03
/* These specify the type of comparison operation. */
-#define _SIDD_CMP_EQUAL_ANY 0x00
-#define _SIDD_CMP_RANGES 0x04
-#define _SIDD_CMP_EQUAL_EACH 0x08
-#define _SIDD_CMP_EQUAL_ORDERED 0x0c
+#define _SIDD_CMP_EQUAL_ANY 0x00
+#define _SIDD_CMP_RANGES 0x04
+#define _SIDD_CMP_EQUAL_EACH 0x08
+#define _SIDD_CMP_EQUAL_ORDERED 0x0c
/* These macros specify the polarity of the operation. */
-#define _SIDD_POSITIVE_POLARITY 0x00
-#define _SIDD_NEGATIVE_POLARITY 0x10
-#define _SIDD_MASKED_POSITIVE_POLARITY 0x20
-#define _SIDD_MASKED_NEGATIVE_POLARITY 0x30
+#define _SIDD_POSITIVE_POLARITY 0x00
+#define _SIDD_NEGATIVE_POLARITY 0x10
+#define _SIDD_MASKED_POSITIVE_POLARITY 0x20
+#define _SIDD_MASKED_NEGATIVE_POLARITY 0x30
/* These macros are used in _mm_cmpXstri() to specify the return. */
-#define _SIDD_LEAST_SIGNIFICANT 0x00
-#define _SIDD_MOST_SIGNIFICANT 0x40
+#define _SIDD_LEAST_SIGNIFICANT 0x00
+#define _SIDD_MOST_SIGNIFICANT 0x40
/* These macros are used in _mm_cmpXstri() to specify the return. */
-#define _SIDD_BIT_MASK 0x00
-#define _SIDD_UNIT_MASK 0x40
+#define _SIDD_BIT_MASK 0x00
+#define _SIDD_UNIT_MASK 0x40
/* SSE4.2 Packed Comparison Intrinsics. */
/// Uses the immediate operand \a M to perform a comparison of string
@@ -1623,9 +1603,9 @@ _mm_minpos_epu16(__m128i __V)
/// repeating each bit 8 or 16 times).
/// \returns Returns a 128-bit integer vector representing the result mask of
/// the comparison.
-#define _mm_cmpistrm(A, B, M) \
- (__m128i)__builtin_ia32_pcmpistrm128((__v16qi)(__m128i)(A), \
- (__v16qi)(__m128i)(B), (int)(M))
+#define _mm_cmpistrm(A, B, M) \
+ ((__m128i)__builtin_ia32_pcmpistrm128((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (int)(M)))
/// Uses the immediate operand \a M to perform a comparison of string
/// data with implicitly defined lengths that is contained in source operands
@@ -1677,9 +1657,9 @@ _mm_minpos_epu16(__m128i __V)
/// 0: The index of the least significant set bit. \n
/// 1: The index of the most significant set bit. \n
/// \returns Returns an integer representing the result index of the comparison.
-#define _mm_cmpistri(A, B, M) \
- (int)__builtin_ia32_pcmpistri128((__v16qi)(__m128i)(A), \
- (__v16qi)(__m128i)(B), (int)(M))
+#define _mm_cmpistri(A, B, M) \
+ ((int)__builtin_ia32_pcmpistri128((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (int)(M)))
/// Uses the immediate operand \a M to perform a comparison of string
/// data with explicitly defined lengths that is contained in source operands
@@ -1737,10 +1717,10 @@ _mm_minpos_epu16(__m128i __V)
/// repeating each bit 8 or 16 times). \n
/// \returns Returns a 128-bit integer vector representing the result mask of
/// the comparison.
-#define _mm_cmpestrm(A, LA, B, LB, M) \
- (__m128i)__builtin_ia32_pcmpestrm128((__v16qi)(__m128i)(A), (int)(LA), \
- (__v16qi)(__m128i)(B), (int)(LB), \
- (int)(M))
+#define _mm_cmpestrm(A, LA, B, LB, M) \
+ ((__m128i)__builtin_ia32_pcmpestrm128((__v16qi)(__m128i)(A), (int)(LA), \
+ (__v16qi)(__m128i)(B), (int)(LB), \
+ (int)(M)))
/// Uses the immediate operand \a M to perform a comparison of string
/// data with explicitly defined lengths that is contained in source operands
@@ -1796,10 +1776,10 @@ _mm_minpos_epu16(__m128i __V)
/// 0: The index of the least significant set bit. \n
/// 1: The index of the most significant set bit. \n
/// \returns Returns an integer representing the result index of the comparison.
-#define _mm_cmpestri(A, LA, B, LB, M) \
- (int)__builtin_ia32_pcmpestri128((__v16qi)(__m128i)(A), (int)(LA), \
- (__v16qi)(__m128i)(B), (int)(LB), \
- (int)(M))
+#define _mm_cmpestri(A, LA, B, LB, M) \
+ ((int)__builtin_ia32_pcmpestri128((__v16qi)(__m128i)(A), (int)(LA), \
+ (__v16qi)(__m128i)(B), (int)(LB), \
+ (int)(M)))
/* SSE4.2 Packed Comparison Intrinsics and EFlag Reading. */
/// Uses the immediate operand \a M to perform a comparison of string
@@ -1848,9 +1828,9 @@ _mm_minpos_epu16(__m128i __V)
/// to the size of \a A or \a B. \n
/// \returns Returns 1 if the bit mask is zero and the length of the string in
/// \a B is the maximum; otherwise, returns 0.
-#define _mm_cmpistra(A, B, M) \
- (int)__builtin_ia32_pcmpistria128((__v16qi)(__m128i)(A), \
- (__v16qi)(__m128i)(B), (int)(M))
+#define _mm_cmpistra(A, B, M) \
+ ((int)__builtin_ia32_pcmpistria128((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (int)(M)))
/// Uses the immediate operand \a M to perform a comparison of string
/// data with implicitly defined lengths that is contained in source operands
@@ -1897,9 +1877,9 @@ _mm_minpos_epu16(__m128i __V)
/// 11: Negate the bit mask only for bits with an index less than or equal
/// to the size of \a A or \a B.
/// \returns Returns 1 if the bit mask is non-zero, otherwise, returns 0.
-#define _mm_cmpistrc(A, B, M) \
- (int)__builtin_ia32_pcmpistric128((__v16qi)(__m128i)(A), \
- (__v16qi)(__m128i)(B), (int)(M))
+#define _mm_cmpistrc(A, B, M) \
+ ((int)__builtin_ia32_pcmpistric128((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (int)(M)))
/// Uses the immediate operand \a M to perform a comparison of string
/// data with implicitly defined lengths that is contained in source operands
@@ -1945,9 +1925,9 @@ _mm_minpos_epu16(__m128i __V)
/// 11: Negate the bit mask only for bits with an index less than or equal
/// to the size of \a A or \a B. \n
/// \returns Returns bit 0 of the resulting bit mask.
-#define _mm_cmpistro(A, B, M) \
- (int)__builtin_ia32_pcmpistrio128((__v16qi)(__m128i)(A), \
- (__v16qi)(__m128i)(B), (int)(M))
+#define _mm_cmpistro(A, B, M) \
+ ((int)__builtin_ia32_pcmpistrio128((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (int)(M)))
/// Uses the immediate operand \a M to perform a comparison of string
/// data with implicitly defined lengths that is contained in source operands
@@ -1995,9 +1975,9 @@ _mm_minpos_epu16(__m128i __V)
/// to the size of \a A or \a B. \n
/// \returns Returns 1 if the length of the string in \a A is less than the
/// maximum, otherwise, returns 0.
-#define _mm_cmpistrs(A, B, M) \
- (int)__builtin_ia32_pcmpistris128((__v16qi)(__m128i)(A), \
- (__v16qi)(__m128i)(B), (int)(M))
+#define _mm_cmpistrs(A, B, M) \
+ ((int)__builtin_ia32_pcmpistris128((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (int)(M)))
/// Uses the immediate operand \a M to perform a comparison of string
/// data with implicitly defined lengths that is contained in source operands
@@ -2045,9 +2025,9 @@ _mm_minpos_epu16(__m128i __V)
/// to the size of \a A or \a B.
/// \returns Returns 1 if the length of the string in \a B is less than the
/// maximum, otherwise, returns 0.
-#define _mm_cmpistrz(A, B, M) \
- (int)__builtin_ia32_pcmpistriz128((__v16qi)(__m128i)(A), \
- (__v16qi)(__m128i)(B), (int)(M))
+#define _mm_cmpistrz(A, B, M) \
+ ((int)__builtin_ia32_pcmpistriz128((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (int)(M)))
/// Uses the immediate operand \a M to perform a comparison of string
/// data with explicitly defined lengths that is contained in source operands
@@ -2099,10 +2079,10 @@ _mm_minpos_epu16(__m128i __V)
/// to the size of \a A or \a B.
/// \returns Returns 1 if the bit mask is zero and the length of the string in
/// \a B is the maximum, otherwise, returns 0.
-#define _mm_cmpestra(A, LA, B, LB, M) \
- (int)__builtin_ia32_pcmpestria128((__v16qi)(__m128i)(A), (int)(LA), \
- (__v16qi)(__m128i)(B), (int)(LB), \
- (int)(M))
+#define _mm_cmpestra(A, LA, B, LB, M) \
+ ((int)__builtin_ia32_pcmpestria128((__v16qi)(__m128i)(A), (int)(LA), \
+ (__v16qi)(__m128i)(B), (int)(LB), \
+ (int)(M)))
/// Uses the immediate operand \a M to perform a comparison of string
/// data with explicitly defined lengths that is contained in source operands
@@ -2153,10 +2133,10 @@ _mm_minpos_epu16(__m128i __V)
/// 11: Negate the bit mask only for bits with an index less than or equal
/// to the size of \a A or \a B. \n
/// \returns Returns 1 if the resulting mask is non-zero, otherwise, returns 0.
-#define _mm_cmpestrc(A, LA, B, LB, M) \
- (int)__builtin_ia32_pcmpestric128((__v16qi)(__m128i)(A), (int)(LA), \
- (__v16qi)(__m128i)(B), (int)(LB), \
- (int)(M))
+#define _mm_cmpestrc(A, LA, B, LB, M) \
+ ((int)__builtin_ia32_pcmpestric128((__v16qi)(__m128i)(A), (int)(LA), \
+ (__v16qi)(__m128i)(B), (int)(LB), \
+ (int)(M)))
/// Uses the immediate operand \a M to perform a comparison of string
/// data with explicitly defined lengths that is contained in source operands
@@ -2206,10 +2186,10 @@ _mm_minpos_epu16(__m128i __V)
/// 11: Negate the bit mask only for bits with an index less than or equal
/// to the size of \a A or \a B.
/// \returns Returns bit 0 of the resulting bit mask.
-#define _mm_cmpestro(A, LA, B, LB, M) \
- (int)__builtin_ia32_pcmpestrio128((__v16qi)(__m128i)(A), (int)(LA), \
- (__v16qi)(__m128i)(B), (int)(LB), \
- (int)(M))
+#define _mm_cmpestro(A, LA, B, LB, M) \
+ ((int)__builtin_ia32_pcmpestrio128((__v16qi)(__m128i)(A), (int)(LA), \
+ (__v16qi)(__m128i)(B), (int)(LB), \
+ (int)(M)))
/// Uses the immediate operand \a M to perform a comparison of string
/// data with explicitly defined lengths that is contained in source operands
@@ -2261,10 +2241,10 @@ _mm_minpos_epu16(__m128i __V)
/// to the size of \a A or \a B. \n
/// \returns Returns 1 if the length of the string in \a A is less than the
/// maximum, otherwise, returns 0.
-#define _mm_cmpestrs(A, LA, B, LB, M) \
- (int)__builtin_ia32_pcmpestris128((__v16qi)(__m128i)(A), (int)(LA), \
- (__v16qi)(__m128i)(B), (int)(LB), \
- (int)(M))
+#define _mm_cmpestrs(A, LA, B, LB, M) \
+ ((int)__builtin_ia32_pcmpestris128((__v16qi)(__m128i)(A), (int)(LA), \
+ (__v16qi)(__m128i)(B), (int)(LB), \
+ (int)(M)))
/// Uses the immediate operand \a M to perform a comparison of string
/// data with explicitly defined lengths that is contained in source operands
@@ -2315,10 +2295,10 @@ _mm_minpos_epu16(__m128i __V)
/// to the size of \a A or \a B.
/// \returns Returns 1 if the length of the string in \a B is less than the
/// maximum, otherwise, returns 0.
-#define _mm_cmpestrz(A, LA, B, LB, M) \
- (int)__builtin_ia32_pcmpestriz128((__v16qi)(__m128i)(A), (int)(LA), \
- (__v16qi)(__m128i)(B), (int)(LB), \
- (int)(M))
+#define _mm_cmpestrz(A, LA, B, LB, M) \
+ ((int)__builtin_ia32_pcmpestriz128((__v16qi)(__m128i)(A), (int)(LA), \
+ (__v16qi)(__m128i)(B), (int)(LB), \
+ (int)(M)))
/* SSE4.2 Compare Packed Data -- Greater Than. */
/// Compares each of the corresponding 64-bit values of the 128-bit
@@ -2334,97 +2314,15 @@ _mm_minpos_epu16(__m128i __V)
/// \param __V2
/// A 128-bit integer vector.
/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpgt_epi64(__m128i __V1, __m128i __V2)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi64(__m128i __V1,
+ __m128i __V2) {
return (__m128i)((__v2di)__V1 > (__v2di)__V2);
}
-/* SSE4.2 Accumulate CRC32. */
-/// Adds the unsigned integer operand to the CRC-32C checksum of the
-/// unsigned char operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> CRC32B </c> instruction.
-///
-/// \param __C
-/// An unsigned integer operand to add to the CRC-32C checksum of operand
-/// \a __D.
-/// \param __D
-/// An unsigned 8-bit integer operand used to compute the CRC-32C checksum.
-/// \returns The result of adding operand \a __C to the CRC-32C checksum of
-/// operand \a __D.
-static __inline__ unsigned int __DEFAULT_FN_ATTRS
-_mm_crc32_u8(unsigned int __C, unsigned char __D)
-{
- return __builtin_ia32_crc32qi(__C, __D);
-}
-
-/// Adds the unsigned integer operand to the CRC-32C checksum of the
-/// unsigned short operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> CRC32W </c> instruction.
-///
-/// \param __C
-/// An unsigned integer operand to add to the CRC-32C checksum of operand
-/// \a __D.
-/// \param __D
-/// An unsigned 16-bit integer operand used to compute the CRC-32C checksum.
-/// \returns The result of adding operand \a __C to the CRC-32C checksum of
-/// operand \a __D.
-static __inline__ unsigned int __DEFAULT_FN_ATTRS
-_mm_crc32_u16(unsigned int __C, unsigned short __D)
-{
- return __builtin_ia32_crc32hi(__C, __D);
-}
-
-/// Adds the first unsigned integer operand to the CRC-32C checksum of
-/// the second unsigned integer operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> CRC32L </c> instruction.
-///
-/// \param __C
-/// An unsigned integer operand to add to the CRC-32C checksum of operand
-/// \a __D.
-/// \param __D
-/// An unsigned 32-bit integer operand used to compute the CRC-32C checksum.
-/// \returns The result of adding operand \a __C to the CRC-32C checksum of
-/// operand \a __D.
-static __inline__ unsigned int __DEFAULT_FN_ATTRS
-_mm_crc32_u32(unsigned int __C, unsigned int __D)
-{
- return __builtin_ia32_crc32si(__C, __D);
-}
-
-#ifdef __x86_64__
-/// Adds the unsigned integer operand to the CRC-32C checksum of the
-/// unsigned 64-bit integer operand.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> CRC32Q </c> instruction.
-///
-/// \param __C
-/// An unsigned integer operand to add to the CRC-32C checksum of operand
-/// \a __D.
-/// \param __D
-/// An unsigned 64-bit integer operand used to compute the CRC-32C checksum.
-/// \returns The result of adding operand \a __C to the CRC-32C checksum of
-/// operand \a __D.
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS
-_mm_crc32_u64(unsigned long long __C, unsigned long long __D)
-{
- return __builtin_ia32_crc32di(__C, __D);
-}
-#endif /* __x86_64__ */
-
#undef __DEFAULT_FN_ATTRS
#include <popcntintrin.h>
+#include <crc32intrin.h>
+
#endif /* __SMMINTRIN_H */
diff --git a/contrib/llvm-project/clang/lib/Headers/stdalign.h b/contrib/llvm-project/clang/lib/Headers/stdalign.h
index 6ad25db4539a..158508e65d2b 100644
--- a/contrib/llvm-project/clang/lib/Headers/stdalign.h
+++ b/contrib/llvm-project/clang/lib/Headers/stdalign.h
@@ -10,6 +10,8 @@
#ifndef __STDALIGN_H
#define __STDALIGN_H
+#if defined(__cplusplus) || \
+ (defined(__STDC_VERSION__) && __STDC_VERSION__ < 202311L)
#ifndef __cplusplus
#define alignas _Alignas
#define alignof _Alignof
@@ -17,5 +19,6 @@
#define __alignas_is_defined 1
#define __alignof_is_defined 1
+#endif /* __STDC_VERSION__ */
#endif /* __STDALIGN_H */
diff --git a/contrib/llvm-project/clang/lib/Headers/stdarg.h b/contrib/llvm-project/clang/lib/Headers/stdarg.h
index 0bc39408c1e5..94b066566f08 100644
--- a/contrib/llvm-project/clang/lib/Headers/stdarg.h
+++ b/contrib/llvm-project/clang/lib/Headers/stdarg.h
@@ -7,29 +7,73 @@
*===-----------------------------------------------------------------------===
*/
-#ifndef __STDARG_H
-#define __STDARG_H
-
-#ifndef _VA_LIST
-typedef __builtin_va_list va_list;
-#define _VA_LIST
-#endif
-#define va_start(ap, param) __builtin_va_start(ap, param)
-#define va_end(ap) __builtin_va_end(ap)
-#define va_arg(ap, type) __builtin_va_arg(ap, type)
+/*
+ * This header is designed to be included multiple times. If any of the __need_
+ * macros are defined, then only that subset of interfaces are provided. This
+ * can be useful for POSIX headers that need to not expose all of stdarg.h, but
+ * need to use some of its interfaces. Otherwise this header provides all of
+ * the expected interfaces.
+ *
+ * When clang modules are enabled, this header is a textual header. It ignores
+ * its header guard so that multiple submodules can export its interfaces.
+ * Take module SM with submodules A and B, whose headers both include stdarg.h
+ * When SM.A builds, __STDARG_H will be defined. When SM.B builds, the
+ * definition from SM.A will leak when building without local submodule
+ * visibility. stdarg.h wouldn't include any of its implementation headers, and
+ * SM.B wouldn't import any of the stdarg modules, and SM.B's `export *`
+ * wouldn't export any stdarg interfaces as expected. However, since stdarg.h
+ * ignores its header guard when building with modules, it all works as
+ * expected.
+ *
+ * When clang modules are not enabled, the header guards can function in the
+ * normal simple fashion.
+ */
+#if !defined(__STDARG_H) || __has_feature(modules) || \
+ defined(__need___va_list) || defined(__need_va_list) || \
+ defined(__need_va_arg) || defined(__need___va_copy) || \
+ defined(__need_va_copy)
+#if !defined(__need___va_list) && !defined(__need_va_list) && \
+ !defined(__need_va_arg) && !defined(__need___va_copy) && \
+ !defined(__need_va_copy)
+#define __STDARG_H
+#define __need___va_list
+#define __need_va_list
+#define __need_va_arg
+#define __need___va_copy
/* GCC always defines __va_copy, but does not define va_copy unless in c99 mode
* or -ansi is not specified, since it was not part of C90.
*/
-#define __va_copy(d,s) __builtin_va_copy(d,s)
-
-#if __STDC_VERSION__ >= 199901L || __cplusplus >= 201103L || !defined(__STRICT_ANSI__)
-#define va_copy(dest, src) __builtin_va_copy(dest, src)
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || \
+ (defined(__cplusplus) && __cplusplus >= 201103L) || \
+ !defined(__STRICT_ANSI__)
+#define __need_va_copy
#endif
-
-#ifndef __GNUC_VA_LIST
-#define __GNUC_VA_LIST 1
-typedef __builtin_va_list __gnuc_va_list;
#endif
-#endif /* __STDARG_H */
+#ifdef __need___va_list
+#include <__stdarg___gnuc_va_list.h>
+#undef __need___va_list
+#endif /* defined(__need___va_list) */
+
+#ifdef __need_va_list
+#include <__stdarg_va_list.h>
+#undef __need_va_list
+#endif /* defined(__need_va_list) */
+
+#ifdef __need_va_arg
+#include <__stdarg_va_arg.h>
+#undef __need_va_arg
+#endif /* defined(__need_va_arg) */
+
+#ifdef __need___va_copy
+#include <__stdarg___va_copy.h>
+#undef __need___va_copy
+#endif /* defined(__need___va_copy) */
+
+#ifdef __need_va_copy
+#include <__stdarg_va_copy.h>
+#undef __need_va_copy
+#endif /* defined(__need_va_copy) */
+
+#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/stdatomic.h b/contrib/llvm-project/clang/lib/Headers/stdatomic.h
index 665551ea69a4..521c473dd169 100644
--- a/contrib/llvm-project/clang/lib/Headers/stdatomic.h
+++ b/contrib/llvm-project/clang/lib/Headers/stdatomic.h
@@ -12,8 +12,15 @@
/* If we're hosted, fall back to the system's stdatomic.h. FreeBSD, for
* example, already has a Clang-compatible stdatomic.h header.
+ *
+ * Exclude the MSVC path as well as the MSVC header as of the 14.31.30818
+ * explicitly disallows `stdatomic.h` in the C mode via an `#error`. Fallback
+ * to the clang resource header until that is fully supported. The
+ * `stdatomic.h` header requires C++ 23 or newer.
*/
-#if __STDC_HOSTED__ && __has_include_next(<stdatomic.h>)
+#if __STDC_HOSTED__ && \
+ __has_include_next(<stdatomic.h>) && \
+ (!defined(_MSC_VER) || (defined(__cplusplus) && __cplusplus >= 202002L))
# include_next <stdatomic.h>
#else
@@ -38,8 +45,19 @@ extern "C" {
#define ATOMIC_POINTER_LOCK_FREE __CLANG_ATOMIC_POINTER_LOCK_FREE
/* 7.17.2 Initialization */
-
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ < 202311L) || \
+ defined(__cplusplus)
+/* ATOMIC_VAR_INIT was removed in C23, but still remains in C++23. */
#define ATOMIC_VAR_INIT(value) (value)
+#endif
+
+#if ((defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201710L && \
+ __STDC_VERSION__ < 202311L) || \
+ (defined(__cplusplus) && __cplusplus >= 202002L)) && \
+ !defined(_CLANG_DISABLE_CRT_DEPRECATION_WARNINGS)
+/* ATOMIC_VAR_INIT was deprecated in C17 and C++20. */
+#pragma clang deprecated(ATOMIC_VAR_INIT)
+#endif
#define atomic_init __c11_atomic_init
/* 7.17.3 Order and consistency */
diff --git a/contrib/llvm-project/clang/lib/Headers/stdbool.h b/contrib/llvm-project/clang/lib/Headers/stdbool.h
index 2525363dd02a..9406aab0ca72 100644
--- a/contrib/llvm-project/clang/lib/Headers/stdbool.h
+++ b/contrib/llvm-project/clang/lib/Headers/stdbool.h
@@ -10,22 +10,25 @@
#ifndef __STDBOOL_H
#define __STDBOOL_H
-/* Don't define bool, true, and false in C++, except as a GNU extension. */
-#ifndef __cplusplus
+#define __bool_true_false_are_defined 1
+
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ > 201710L
+/* FIXME: We should be issuing a deprecation warning here, but cannot yet due
+ * to system headers which include this header file unconditionally.
+ */
+#elif !defined(__cplusplus)
#define bool _Bool
#define true 1
#define false 0
#elif defined(__GNUC__) && !defined(__STRICT_ANSI__)
/* Define _Bool as a GNU extension. */
#define _Bool bool
-#if __cplusplus < 201103L
+#if defined(__cplusplus) && __cplusplus < 201103L
/* For C++98, define bool, false, true as a GNU extension. */
-#define bool bool
+#define bool bool
#define false false
-#define true true
+#define true true
#endif
#endif
-#define __bool_true_false_are_defined 1
-
#endif /* __STDBOOL_H */
diff --git a/contrib/llvm-project/clang/lib/Headers/stdckdint.h b/contrib/llvm-project/clang/lib/Headers/stdckdint.h
new file mode 100644
index 000000000000..20bc34ffb302
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/stdckdint.h
@@ -0,0 +1,42 @@
+/*===---- stdckdint.h - Standard header for checking integer----------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __STDCKDINT_H
+#define __STDCKDINT_H
+
+/* If we're hosted, fall back to the system's stdckdint.h. FreeBSD, for
+ * example, already has a Clang-compatible stdckdint.h header.
+ *
+ * The `stdckdint.h` header requires C 23 or newer.
+ */
+#if __STDC_HOSTED__ && __has_include_next(<stdckdint.h>)
+#include_next <stdckdint.h>
+#else
+
+/* C23 7.20.1 Defines several macros for performing checked integer arithmetic*/
+
+#define __STDC_VERSION_STDCKDINT_H__ 202311L
+
+// Both A and B shall be any integer type other than "plain" char, bool, a bit-
+// precise integer type, or an enumerated type, and they need not be the same.
+
+// R shall be a modifiable lvalue of any integer type other than "plain" char,
+// bool, a bit-precise integer type, or an enumerated type. It shouldn't be
+// short type, either. Otherwise, it may be unable to hold two the result of
+// operating two 'int's.
+
+// A diagnostic message will be produced if A or B are not suitable integer
+// types, or if R is not a modifiable lvalue of a suitable integer type or R
+// is short type.
+#define ckd_add(R, A, B) __builtin_add_overflow((A), (B), (R))
+#define ckd_sub(R, A, B) __builtin_sub_overflow((A), (B), (R))
+#define ckd_mul(R, A, B) __builtin_mul_overflow((A), (B), (R))
+
+#endif /* __STDC_HOSTED__ */
+#endif /* __STDCKDINT_H */
diff --git a/contrib/llvm-project/clang/lib/Headers/stddef.h b/contrib/llvm-project/clang/lib/Headers/stddef.h
index 15acd4427ca1..e0ad7b8d17af 100644
--- a/contrib/llvm-project/clang/lib/Headers/stddef.h
+++ b/contrib/llvm-project/clang/lib/Headers/stddef.h
@@ -7,114 +7,116 @@
*===-----------------------------------------------------------------------===
*/
-#if !defined(__STDDEF_H) || defined(__need_ptrdiff_t) || \
- defined(__need_size_t) || defined(__need_wchar_t) || \
- defined(__need_NULL) || defined(__need_wint_t)
+/*
+ * This header is designed to be included multiple times. If any of the __need_
+ * macros are defined, then only that subset of interfaces are provided. This
+ * can be useful for POSIX headers that need to not expose all of stddef.h, but
+ * need to use some of its interfaces. Otherwise this header provides all of
+ * the expected interfaces.
+ *
+ * When clang modules are enabled, this header is a textual header. It ignores
+ * its header guard so that multiple submodules can export its interfaces.
+ * Take module SM with submodules A and B, whose headers both include stddef.h
+ * When SM.A builds, __STDDEF_H will be defined. When SM.B builds, the
+ * definition from SM.A will leak when building without local submodule
+ * visibility. stddef.h wouldn't include any of its implementation headers, and
+ * SM.B wouldn't import any of the stddef modules, and SM.B's `export *`
+ * wouldn't export any stddef interfaces as expected. However, since stddef.h
+ * ignores its header guard when building with modules, it all works as
+ * expected.
+ *
+ * When clang modules are not enabled, the header guards can function in the
+ * normal simple fashion.
+ */
+#if !defined(__STDDEF_H) || __has_feature(modules) || \
+ (defined(__STDC_WANT_LIB_EXT1__) && __STDC_WANT_LIB_EXT1__ >= 1) || \
+ defined(__need_ptrdiff_t) || defined(__need_size_t) || \
+ defined(__need_rsize_t) || defined(__need_wchar_t) || \
+ defined(__need_NULL) || defined(__need_nullptr_t) || \
+ defined(__need_unreachable) || defined(__need_max_align_t) || \
+ defined(__need_offsetof) || defined(__need_wint_t)
#if !defined(__need_ptrdiff_t) && !defined(__need_size_t) && \
- !defined(__need_wchar_t) && !defined(__need_NULL) && \
- !defined(__need_wint_t)
-/* Always define miscellaneous pieces when modules are available. */
-#if !__has_feature(modules)
+ !defined(__need_rsize_t) && !defined(__need_wchar_t) && \
+ !defined(__need_NULL) && !defined(__need_nullptr_t) && \
+ !defined(__need_unreachable) && !defined(__need_max_align_t) && \
+ !defined(__need_offsetof) && !defined(__need_wint_t)
#define __STDDEF_H
-#endif
#define __need_ptrdiff_t
#define __need_size_t
+/* ISO9899:2011 7.20 (C11 Annex K): Define rsize_t if __STDC_WANT_LIB_EXT1__ is
+ * enabled. */
+#if defined(__STDC_WANT_LIB_EXT1__) && __STDC_WANT_LIB_EXT1__ >= 1
+#define __need_rsize_t
+#endif
#define __need_wchar_t
#define __need_NULL
-#define __need_STDDEF_H_misc
-/* __need_wint_t is intentionally not defined here. */
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L) || \
+ defined(__cplusplus)
+#define __need_nullptr_t
#endif
-
-#if defined(__need_ptrdiff_t)
-#if !defined(_PTRDIFF_T) || __has_feature(modules)
-/* Always define ptrdiff_t when modules are available. */
-#if !__has_feature(modules)
-#define _PTRDIFF_T
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L
+#define __need_unreachable
+#endif
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) || \
+ (defined(__cplusplus) && __cplusplus >= 201103L)
+#define __need_max_align_t
#endif
-typedef __PTRDIFF_TYPE__ ptrdiff_t;
+#define __need_offsetof
+/* wint_t is provided by <wchar.h> and not <stddef.h>. It's here
+ * for compatibility, but must be explicitly requested. Therefore
+ * __need_wint_t is intentionally not defined here. */
#endif
+
+#if defined(__need_ptrdiff_t)
+#include <__stddef_ptrdiff_t.h>
#undef __need_ptrdiff_t
#endif /* defined(__need_ptrdiff_t) */
#if defined(__need_size_t)
-#if !defined(_SIZE_T) || __has_feature(modules)
-/* Always define size_t when modules are available. */
-#if !__has_feature(modules)
-#define _SIZE_T
-#endif
-typedef __SIZE_TYPE__ size_t;
-#endif
+#include <__stddef_size_t.h>
#undef __need_size_t
#endif /*defined(__need_size_t) */
-#if defined(__need_STDDEF_H_misc)
-/* ISO9899:2011 7.20 (C11 Annex K): Define rsize_t if __STDC_WANT_LIB_EXT1__ is
- * enabled. */
-#if (defined(__STDC_WANT_LIB_EXT1__) && __STDC_WANT_LIB_EXT1__ >= 1 && \
- !defined(_RSIZE_T)) || __has_feature(modules)
-/* Always define rsize_t when modules are available. */
-#if !__has_feature(modules)
-#define _RSIZE_T
-#endif
-typedef __SIZE_TYPE__ rsize_t;
-#endif
-#endif /* defined(__need_STDDEF_H_misc) */
+#if defined(__need_rsize_t)
+#include <__stddef_rsize_t.h>
+#undef __need_rsize_t
+#endif /* defined(__need_rsize_t) */
#if defined(__need_wchar_t)
-#ifndef __cplusplus
-/* Always define wchar_t when modules are available. */
-#if !defined(_WCHAR_T) || __has_feature(modules)
-#if !__has_feature(modules)
-#define _WCHAR_T
-#if defined(_MSC_EXTENSIONS)
-#define _WCHAR_T_DEFINED
-#endif
-#endif
-typedef __WCHAR_TYPE__ wchar_t;
-#endif
-#endif
+#include <__stddef_wchar_t.h>
#undef __need_wchar_t
#endif /* defined(__need_wchar_t) */
#if defined(__need_NULL)
-#undef NULL
-#ifdef __cplusplus
-# if !defined(__MINGW32__) && !defined(_MSC_VER)
-# define NULL __null
-# else
-# define NULL 0
-# endif
-#else
-# define NULL ((void*)0)
-#endif
-#ifdef __cplusplus
-#if defined(_MSC_EXTENSIONS) && defined(_NATIVE_NULLPTR_SUPPORTED)
-namespace std { typedef decltype(nullptr) nullptr_t; }
-using ::std::nullptr_t;
-#endif
-#endif
+#include <__stddef_null.h>
#undef __need_NULL
#endif /* defined(__need_NULL) */
-#if defined(__need_STDDEF_H_misc)
-#if __STDC_VERSION__ >= 201112L || __cplusplus >= 201103L
-#include "__stddef_max_align_t.h"
-#endif
-#define offsetof(t, d) __builtin_offsetof(t, d)
-#undef __need_STDDEF_H_misc
-#endif /* defined(__need_STDDEF_H_misc) */
+#if defined(__need_nullptr_t)
+#include <__stddef_nullptr_t.h>
+#undef __need_nullptr_t
+#endif /* defined(__need_nullptr_t) */
+
+#if defined(__need_unreachable)
+#include <__stddef_unreachable.h>
+#undef __need_unreachable
+#endif /* defined(__need_unreachable) */
+
+#if defined(__need_max_align_t)
+#include <__stddef_max_align_t.h>
+#undef __need_max_align_t
+#endif /* defined(__need_max_align_t) */
+
+#if defined(__need_offsetof)
+#include <__stddef_offsetof.h>
+#undef __need_offsetof
+#endif /* defined(__need_offsetof) */
/* Some C libraries expect to see a wint_t here. Others (notably MinGW) will use
__WINT_TYPE__ directly; accommodate both by requiring __need_wint_t */
#if defined(__need_wint_t)
-/* Always define wint_t when modules are available. */
-#if !defined(_WINT_T) || __has_feature(modules)
-#if !__has_feature(modules)
-#define _WINT_T
-#endif
-typedef __WINT_TYPE__ wint_t;
-#endif
+#include <__stddef_wint_t.h>
#undef __need_wint_t
#endif /* __need_wint_t */
diff --git a/contrib/llvm-project/clang/lib/Headers/stdint.h b/contrib/llvm-project/clang/lib/Headers/stdint.h
index 192f653e95a1..b6699b6ca3d4 100644
--- a/contrib/llvm-project/clang/lib/Headers/stdint.h
+++ b/contrib/llvm-project/clang/lib/Headers/stdint.h
@@ -96,13 +96,21 @@
typedef __INT64_TYPE__ int64_t;
# endif /* __int8_t_defined */
typedef __UINT64_TYPE__ uint64_t;
+# undef __int_least64_t
# define __int_least64_t int64_t
+# undef __uint_least64_t
# define __uint_least64_t uint64_t
+# undef __int_least32_t
# define __int_least32_t int64_t
+# undef __uint_least32_t
# define __uint_least32_t uint64_t
+# undef __int_least16_t
# define __int_least16_t int64_t
+# undef __uint_least16_t
# define __uint_least16_t uint64_t
+# undef __int_least8_t
# define __int_least8_t int64_t
+# undef __uint_least8_t
# define __uint_least8_t uint64_t
#endif /* __INT64_TYPE__ */
@@ -120,11 +128,17 @@ typedef int56_t int_least56_t;
typedef uint56_t uint_least56_t;
typedef int56_t int_fast56_t;
typedef uint56_t uint_fast56_t;
+# undef __int_least32_t
# define __int_least32_t int56_t
+# undef __uint_least32_t
# define __uint_least32_t uint56_t
+# undef __int_least16_t
# define __int_least16_t int56_t
+# undef __uint_least16_t
# define __uint_least16_t uint56_t
+# undef __int_least8_t
# define __int_least8_t int56_t
+# undef __uint_least8_t
# define __uint_least8_t uint56_t
#endif /* __INT56_TYPE__ */
@@ -136,11 +150,17 @@ typedef int48_t int_least48_t;
typedef uint48_t uint_least48_t;
typedef int48_t int_fast48_t;
typedef uint48_t uint_fast48_t;
+# undef __int_least32_t
# define __int_least32_t int48_t
+# undef __uint_least32_t
# define __uint_least32_t uint48_t
+# undef __int_least16_t
# define __int_least16_t int48_t
+# undef __uint_least16_t
# define __uint_least16_t uint48_t
+# undef __int_least8_t
# define __int_least8_t int48_t
+# undef __uint_least8_t
# define __uint_least8_t uint48_t
#endif /* __INT48_TYPE__ */
@@ -152,11 +172,17 @@ typedef int40_t int_least40_t;
typedef uint40_t uint_least40_t;
typedef int40_t int_fast40_t;
typedef uint40_t uint_fast40_t;
+# undef __int_least32_t
# define __int_least32_t int40_t
+# undef __uint_least32_t
# define __uint_least32_t uint40_t
+# undef __int_least16_t
# define __int_least16_t int40_t
+# undef __uint_least16_t
# define __uint_least16_t uint40_t
+# undef __int_least8_t
# define __int_least8_t int40_t
+# undef __uint_least8_t
# define __uint_least8_t uint40_t
#endif /* __INT40_TYPE__ */
@@ -172,11 +198,17 @@ typedef __INT32_TYPE__ int32_t;
typedef __UINT32_TYPE__ uint32_t;
# endif /* __uint32_t_defined */
+# undef __int_least32_t
# define __int_least32_t int32_t
+# undef __uint_least32_t
# define __uint_least32_t uint32_t
+# undef __int_least16_t
# define __int_least16_t int32_t
+# undef __uint_least16_t
# define __uint_least16_t uint32_t
+# undef __int_least8_t
# define __int_least8_t int32_t
+# undef __uint_least8_t
# define __uint_least8_t uint32_t
#endif /* __INT32_TYPE__ */
@@ -194,9 +226,13 @@ typedef int24_t int_least24_t;
typedef uint24_t uint_least24_t;
typedef int24_t int_fast24_t;
typedef uint24_t uint_fast24_t;
+# undef __int_least16_t
# define __int_least16_t int24_t
+# undef __uint_least16_t
# define __uint_least16_t uint24_t
+# undef __int_least8_t
# define __int_least8_t int24_t
+# undef __uint_least8_t
# define __uint_least8_t uint24_t
#endif /* __INT24_TYPE__ */
@@ -205,9 +241,13 @@ typedef uint24_t uint_fast24_t;
typedef __INT16_TYPE__ int16_t;
#endif /* __int8_t_defined */
typedef __UINT16_TYPE__ uint16_t;
+# undef __int_least16_t
# define __int_least16_t int16_t
+# undef __uint_least16_t
# define __uint_least16_t uint16_t
+# undef __int_least8_t
# define __int_least8_t int16_t
+# undef __uint_least8_t
# define __uint_least8_t uint16_t
#endif /* __INT16_TYPE__ */
@@ -224,7 +264,9 @@ typedef __uint_least16_t uint_fast16_t;
typedef __INT8_TYPE__ int8_t;
#endif /* __int8_t_defined */
typedef __UINT8_TYPE__ uint8_t;
+# undef __int_least8_t
# define __int_least8_t int8_t
+# undef __uint_least8_t
# define __uint_least8_t uint8_t
#endif /* __INT8_TYPE__ */
@@ -285,16 +327,15 @@ typedef __UINTMAX_TYPE__ uintmax_t;
#ifdef __INT64_TYPE__
+# undef __int64_c_suffix
+# undef __int32_c_suffix
+# undef __int16_c_suffix
+# undef __int8_c_suffix
# ifdef __INT64_C_SUFFIX__
# define __int64_c_suffix __INT64_C_SUFFIX__
# define __int32_c_suffix __INT64_C_SUFFIX__
# define __int16_c_suffix __INT64_C_SUFFIX__
# define __int8_c_suffix __INT64_C_SUFFIX__
-# else
-# undef __int64_c_suffix
-# undef __int32_c_suffix
-# undef __int16_c_suffix
-# undef __int8_c_suffix
# endif /* __INT64_C_SUFFIX__ */
#endif /* __INT64_TYPE__ */
@@ -310,6 +351,9 @@ typedef __UINTMAX_TYPE__ uintmax_t;
#ifdef __INT56_TYPE__
+# undef __int32_c_suffix
+# undef __int16_c_suffix
+# undef __int8_c_suffix
# ifdef __INT56_C_SUFFIX__
# define INT56_C(v) __int_c(v, __INT56_C_SUFFIX__)
# define UINT56_C(v) __uint_c(v, __INT56_C_SUFFIX__)
@@ -319,14 +363,14 @@ typedef __UINTMAX_TYPE__ uintmax_t;
# else
# define INT56_C(v) v
# define UINT56_C(v) v ## U
-# undef __int32_c_suffix
-# undef __int16_c_suffix
-# undef __int8_c_suffix
# endif /* __INT56_C_SUFFIX__ */
#endif /* __INT56_TYPE__ */
#ifdef __INT48_TYPE__
+# undef __int32_c_suffix
+# undef __int16_c_suffix
+# undef __int8_c_suffix
# ifdef __INT48_C_SUFFIX__
# define INT48_C(v) __int_c(v, __INT48_C_SUFFIX__)
# define UINT48_C(v) __uint_c(v, __INT48_C_SUFFIX__)
@@ -336,14 +380,14 @@ typedef __UINTMAX_TYPE__ uintmax_t;
# else
# define INT48_C(v) v
# define UINT48_C(v) v ## U
-# undef __int32_c_suffix
-# undef __int16_c_suffix
-# undef __int8_c_suffix
# endif /* __INT48_C_SUFFIX__ */
#endif /* __INT48_TYPE__ */
#ifdef __INT40_TYPE__
+# undef __int32_c_suffix
+# undef __int16_c_suffix
+# undef __int8_c_suffix
# ifdef __INT40_C_SUFFIX__
# define INT40_C(v) __int_c(v, __INT40_C_SUFFIX__)
# define UINT40_C(v) __uint_c(v, __INT40_C_SUFFIX__)
@@ -353,22 +397,18 @@ typedef __UINTMAX_TYPE__ uintmax_t;
# else
# define INT40_C(v) v
# define UINT40_C(v) v ## U
-# undef __int32_c_suffix
-# undef __int16_c_suffix
-# undef __int8_c_suffix
# endif /* __INT40_C_SUFFIX__ */
#endif /* __INT40_TYPE__ */
#ifdef __INT32_TYPE__
+# undef __int32_c_suffix
+# undef __int16_c_suffix
+# undef __int8_c_suffix
# ifdef __INT32_C_SUFFIX__
# define __int32_c_suffix __INT32_C_SUFFIX__
# define __int16_c_suffix __INT32_C_SUFFIX__
# define __int8_c_suffix __INT32_C_SUFFIX__
-#else
-# undef __int32_c_suffix
-# undef __int16_c_suffix
-# undef __int8_c_suffix
# endif /* __INT32_C_SUFFIX__ */
#endif /* __INT32_TYPE__ */
@@ -384,6 +424,8 @@ typedef __UINTMAX_TYPE__ uintmax_t;
#ifdef __INT24_TYPE__
+# undef __int16_c_suffix
+# undef __int8_c_suffix
# ifdef __INT24_C_SUFFIX__
# define INT24_C(v) __int_c(v, __INT24_C_SUFFIX__)
# define UINT24_C(v) __uint_c(v, __INT24_C_SUFFIX__)
@@ -392,19 +434,16 @@ typedef __UINTMAX_TYPE__ uintmax_t;
# else
# define INT24_C(v) v
# define UINT24_C(v) v ## U
-# undef __int16_c_suffix
-# undef __int8_c_suffix
# endif /* __INT24_C_SUFFIX__ */
#endif /* __INT24_TYPE__ */
#ifdef __INT16_TYPE__
+# undef __int16_c_suffix
+# undef __int8_c_suffix
# ifdef __INT16_C_SUFFIX__
# define __int16_c_suffix __INT16_C_SUFFIX__
# define __int8_c_suffix __INT16_C_SUFFIX__
-#else
-# undef __int16_c_suffix
-# undef __int8_c_suffix
# endif /* __INT16_C_SUFFIX__ */
#endif /* __INT16_TYPE__ */
@@ -420,10 +459,9 @@ typedef __UINTMAX_TYPE__ uintmax_t;
#ifdef __INT8_TYPE__
+# undef __int8_c_suffix
# ifdef __INT8_C_SUFFIX__
# define __int8_c_suffix __INT8_C_SUFFIX__
-#else
-# undef __int8_c_suffix
# endif /* __INT8_C_SUFFIX__ */
#endif /* __INT8_TYPE__ */
@@ -461,17 +499,40 @@ typedef __UINTMAX_TYPE__ uintmax_t;
# define INT64_MAX INT64_C( 9223372036854775807)
# define INT64_MIN (-INT64_C( 9223372036854775807)-1)
# define UINT64_MAX UINT64_C(18446744073709551615)
+
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L
+# define UINT64_WIDTH 64
+# define INT64_WIDTH UINT64_WIDTH
+
+# define __UINT_LEAST64_WIDTH UINT64_WIDTH
+# undef __UINT_LEAST32_WIDTH
+# define __UINT_LEAST32_WIDTH UINT64_WIDTH
+# undef __UINT_LEAST16_WIDTH
+# define __UINT_LEAST16_WIDTH UINT64_WIDTH
+# undef __UINT_LEAST8_MAX
+# define __UINT_LEAST8_MAX UINT64_MAX
+#endif /* __STDC_VERSION__ */
+
# define __INT_LEAST64_MIN INT64_MIN
# define __INT_LEAST64_MAX INT64_MAX
# define __UINT_LEAST64_MAX UINT64_MAX
+# undef __INT_LEAST32_MIN
# define __INT_LEAST32_MIN INT64_MIN
+# undef __INT_LEAST32_MAX
# define __INT_LEAST32_MAX INT64_MAX
+# undef __UINT_LEAST32_MAX
# define __UINT_LEAST32_MAX UINT64_MAX
+# undef __INT_LEAST16_MIN
# define __INT_LEAST16_MIN INT64_MIN
+# undef __INT_LEAST16_MAX
# define __INT_LEAST16_MAX INT64_MAX
+# undef __UINT_LEAST16_MAX
# define __UINT_LEAST16_MAX UINT64_MAX
+# undef __INT_LEAST8_MIN
# define __INT_LEAST8_MIN INT64_MIN
+# undef __INT_LEAST8_MAX
# define __INT_LEAST8_MAX INT64_MAX
+# undef __UINT_LEAST8_MAX
# define __UINT_LEAST8_MAX UINT64_MAX
#endif /* __INT64_TYPE__ */
@@ -482,6 +543,13 @@ typedef __UINTMAX_TYPE__ uintmax_t;
# define INT_FAST64_MIN __INT_LEAST64_MIN
# define INT_FAST64_MAX __INT_LEAST64_MAX
# define UINT_FAST64_MAX __UINT_LEAST64_MAX
+
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L
+# define UINT_LEAST64_WIDTH __UINT_LEAST64_WIDTH
+# define INT_LEAST64_WIDTH UINT_LEAST64_WIDTH
+# define UINT_FAST64_WIDTH __UINT_LEAST64_WIDTH
+# define INT_FAST64_WIDTH UINT_FAST64_WIDTH
+#endif /* __STDC_VERSION__ */
#endif /* __INT_LEAST64_MIN */
@@ -495,15 +563,40 @@ typedef __UINTMAX_TYPE__ uintmax_t;
# define INT_FAST56_MIN INT56_MIN
# define INT_FAST56_MAX INT56_MAX
# define UINT_FAST56_MAX UINT56_MAX
+
+# undef __INT_LEAST32_MIN
# define __INT_LEAST32_MIN INT56_MIN
+# undef __INT_LEAST32_MAX
# define __INT_LEAST32_MAX INT56_MAX
+# undef __UINT_LEAST32_MAX
# define __UINT_LEAST32_MAX UINT56_MAX
+# undef __INT_LEAST16_MIN
# define __INT_LEAST16_MIN INT56_MIN
+# undef __INT_LEAST16_MAX
# define __INT_LEAST16_MAX INT56_MAX
+# undef __UINT_LEAST16_MAX
# define __UINT_LEAST16_MAX UINT56_MAX
+# undef __INT_LEAST8_MIN
# define __INT_LEAST8_MIN INT56_MIN
+# undef __INT_LEAST8_MAX
# define __INT_LEAST8_MAX INT56_MAX
+# undef __UINT_LEAST8_MAX
# define __UINT_LEAST8_MAX UINT56_MAX
+
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L
+# define UINT56_WIDTH 56
+# define INT56_WIDTH UINT56_WIDTH
+# define UINT_LEAST56_WIDTH UINT56_WIDTH
+# define INT_LEAST56_WIDTH UINT_LEAST56_WIDTH
+# define UINT_FAST56_WIDTH UINT56_WIDTH
+# define INT_FAST56_WIDTH UINT_FAST56_WIDTH
+# undef __UINT_LEAST32_WIDTH
+# define __UINT_LEAST32_WIDTH UINT56_WIDTH
+# undef __UINT_LEAST16_WIDTH
+# define __UINT_LEAST16_WIDTH UINT56_WIDTH
+# undef __UINT_LEAST8_WIDTH
+# define __UINT_LEAST8_WIDTH UINT56_WIDTH
+#endif /* __STDC_VERSION__ */
#endif /* __INT56_TYPE__ */
@@ -517,15 +610,40 @@ typedef __UINTMAX_TYPE__ uintmax_t;
# define INT_FAST48_MIN INT48_MIN
# define INT_FAST48_MAX INT48_MAX
# define UINT_FAST48_MAX UINT48_MAX
+
+# undef __INT_LEAST32_MIN
# define __INT_LEAST32_MIN INT48_MIN
+# undef __INT_LEAST32_MAX
# define __INT_LEAST32_MAX INT48_MAX
+# undef __UINT_LEAST32_MAX
# define __UINT_LEAST32_MAX UINT48_MAX
+# undef __INT_LEAST16_MIN
# define __INT_LEAST16_MIN INT48_MIN
+# undef __INT_LEAST16_MAX
# define __INT_LEAST16_MAX INT48_MAX
+# undef __UINT_LEAST16_MAX
# define __UINT_LEAST16_MAX UINT48_MAX
+# undef __INT_LEAST8_MIN
# define __INT_LEAST8_MIN INT48_MIN
+# undef __INT_LEAST8_MAX
# define __INT_LEAST8_MAX INT48_MAX
+# undef __UINT_LEAST8_MAX
# define __UINT_LEAST8_MAX UINT48_MAX
+
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L
+#define UINT48_WIDTH 48
+#define INT48_WIDTH UINT48_WIDTH
+#define UINT_LEAST48_WIDTH UINT48_WIDTH
+#define INT_LEAST48_WIDTH UINT_LEAST48_WIDTH
+#define UINT_FAST48_WIDTH UINT48_WIDTH
+#define INT_FAST48_WIDTH UINT_FAST48_WIDTH
+#undef __UINT_LEAST32_WIDTH
+#define __UINT_LEAST32_WIDTH UINT48_WIDTH
+# undef __UINT_LEAST16_WIDTH
+#define __UINT_LEAST16_WIDTH UINT48_WIDTH
+# undef __UINT_LEAST8_WIDTH
+#define __UINT_LEAST8_WIDTH UINT48_WIDTH
+#endif /* __STDC_VERSION__ */
#endif /* __INT48_TYPE__ */
@@ -539,15 +657,40 @@ typedef __UINTMAX_TYPE__ uintmax_t;
# define INT_FAST40_MIN INT40_MIN
# define INT_FAST40_MAX INT40_MAX
# define UINT_FAST40_MAX UINT40_MAX
+
+# undef __INT_LEAST32_MIN
# define __INT_LEAST32_MIN INT40_MIN
+# undef __INT_LEAST32_MAX
# define __INT_LEAST32_MAX INT40_MAX
+# undef __UINT_LEAST32_MAX
# define __UINT_LEAST32_MAX UINT40_MAX
+# undef __INT_LEAST16_MIN
# define __INT_LEAST16_MIN INT40_MIN
+# undef __INT_LEAST16_MAX
# define __INT_LEAST16_MAX INT40_MAX
+# undef __UINT_LEAST16_MAX
# define __UINT_LEAST16_MAX UINT40_MAX
+# undef __INT_LEAST8_MIN
# define __INT_LEAST8_MIN INT40_MIN
+# undef __INT_LEAST8_MAX
# define __INT_LEAST8_MAX INT40_MAX
+# undef __UINT_LEAST8_MAX
# define __UINT_LEAST8_MAX UINT40_MAX
+
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L
+# define UINT40_WIDTH 40
+# define INT40_WIDTH UINT40_WIDTH
+# define UINT_LEAST40_WIDTH UINT40_WIDTH
+# define INT_LEAST40_WIDTH UINT_LEAST40_WIDTH
+# define UINT_FAST40_WIDTH UINT40_WIDTH
+# define INT_FAST40_WIDTH UINT_FAST40_WIDTH
+# undef __UINT_LEAST32_WIDTH
+# define __UINT_LEAST32_WIDTH UINT40_WIDTH
+# undef __UINT_LEAST16_WIDTH
+# define __UINT_LEAST16_WIDTH UINT40_WIDTH
+# undef __UINT_LEAST8_WIDTH
+# define __UINT_LEAST8_WIDTH UINT40_WIDTH
+#endif /* __STDC_VERSION__ */
#endif /* __INT40_TYPE__ */
@@ -555,15 +698,36 @@ typedef __UINTMAX_TYPE__ uintmax_t;
# define INT32_MAX INT32_C(2147483647)
# define INT32_MIN (-INT32_C(2147483647)-1)
# define UINT32_MAX UINT32_C(4294967295)
+
+# undef __INT_LEAST32_MIN
# define __INT_LEAST32_MIN INT32_MIN
+# undef __INT_LEAST32_MAX
# define __INT_LEAST32_MAX INT32_MAX
+# undef __UINT_LEAST32_MAX
# define __UINT_LEAST32_MAX UINT32_MAX
+# undef __INT_LEAST16_MIN
# define __INT_LEAST16_MIN INT32_MIN
+# undef __INT_LEAST16_MAX
# define __INT_LEAST16_MAX INT32_MAX
+# undef __UINT_LEAST16_MAX
# define __UINT_LEAST16_MAX UINT32_MAX
+# undef __INT_LEAST8_MIN
# define __INT_LEAST8_MIN INT32_MIN
+# undef __INT_LEAST8_MAX
# define __INT_LEAST8_MAX INT32_MAX
+# undef __UINT_LEAST8_MAX
# define __UINT_LEAST8_MAX UINT32_MAX
+
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L
+# define UINT32_WIDTH 32
+# define INT32_WIDTH UINT32_WIDTH
+# undef __UINT_LEAST32_WIDTH
+# define __UINT_LEAST32_WIDTH UINT32_WIDTH
+# undef __UINT_LEAST16_WIDTH
+# define __UINT_LEAST16_WIDTH UINT32_WIDTH
+# undef __UINT_LEAST8_WIDTH
+# define __UINT_LEAST8_WIDTH UINT32_WIDTH
+#endif /* __STDC_VERSION__ */
#endif /* __INT32_TYPE__ */
#ifdef __INT_LEAST32_MIN
@@ -573,6 +737,13 @@ typedef __UINTMAX_TYPE__ uintmax_t;
# define INT_FAST32_MIN __INT_LEAST32_MIN
# define INT_FAST32_MAX __INT_LEAST32_MAX
# define UINT_FAST32_MAX __UINT_LEAST32_MAX
+
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L
+# define UINT_LEAST32_WIDTH __UINT_LEAST32_WIDTH
+# define INT_LEAST32_WIDTH UINT_LEAST32_WIDTH
+# define UINT_FAST32_WIDTH __UINT_LEAST32_WIDTH
+# define INT_FAST32_WIDTH UINT_FAST32_WIDTH
+#endif /* __STDC_VERSION__ */
#endif /* __INT_LEAST32_MIN */
@@ -586,12 +757,32 @@ typedef __UINTMAX_TYPE__ uintmax_t;
# define INT_FAST24_MIN INT24_MIN
# define INT_FAST24_MAX INT24_MAX
# define UINT_FAST24_MAX UINT24_MAX
+
+# undef __INT_LEAST16_MIN
# define __INT_LEAST16_MIN INT24_MIN
+# undef __INT_LEAST16_MAX
# define __INT_LEAST16_MAX INT24_MAX
+# undef __UINT_LEAST16_MAX
# define __UINT_LEAST16_MAX UINT24_MAX
+# undef __INT_LEAST8_MIN
# define __INT_LEAST8_MIN INT24_MIN
+# undef __INT_LEAST8_MAX
# define __INT_LEAST8_MAX INT24_MAX
+# undef __UINT_LEAST8_MAX
# define __UINT_LEAST8_MAX UINT24_MAX
+
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L
+# define UINT24_WIDTH 24
+# define INT24_WIDTH UINT24_WIDTH
+# define UINT_LEAST24_WIDTH UINT24_WIDTH
+# define INT_LEAST24_WIDTH UINT_LEAST24_WIDTH
+# define UINT_FAST24_WIDTH UINT24_WIDTH
+# define INT_FAST24_WIDTH UINT_FAST24_WIDTH
+# undef __UINT_LEAST16_WIDTH
+# define __UINT_LEAST16_WIDTH UINT24_WIDTH
+# undef __UINT_LEAST8_WIDTH
+# define __UINT_LEAST8_WIDTH UINT24_WIDTH
+#endif /* __STDC_VERSION__ */
#endif /* __INT24_TYPE__ */
@@ -599,12 +790,28 @@ typedef __UINTMAX_TYPE__ uintmax_t;
#define INT16_MAX INT16_C(32767)
#define INT16_MIN (-INT16_C(32767)-1)
#define UINT16_MAX UINT16_C(65535)
+
+# undef __INT_LEAST16_MIN
# define __INT_LEAST16_MIN INT16_MIN
+# undef __INT_LEAST16_MAX
# define __INT_LEAST16_MAX INT16_MAX
+# undef __UINT_LEAST16_MAX
# define __UINT_LEAST16_MAX UINT16_MAX
+# undef __INT_LEAST8_MIN
# define __INT_LEAST8_MIN INT16_MIN
+# undef __INT_LEAST8_MAX
# define __INT_LEAST8_MAX INT16_MAX
+# undef __UINT_LEAST8_MAX
# define __UINT_LEAST8_MAX UINT16_MAX
+
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L
+# define UINT16_WIDTH 16
+# define INT16_WIDTH UINT16_WIDTH
+# undef __UINT_LEAST16_WIDTH
+# define __UINT_LEAST16_WIDTH UINT16_WIDTH
+# undef __UINT_LEAST8_WIDTH
+# define __UINT_LEAST8_WIDTH UINT16_WIDTH
+#endif /* __STDC_VERSION__ */
#endif /* __INT16_TYPE__ */
#ifdef __INT_LEAST16_MIN
@@ -614,6 +821,13 @@ typedef __UINTMAX_TYPE__ uintmax_t;
# define INT_FAST16_MIN __INT_LEAST16_MIN
# define INT_FAST16_MAX __INT_LEAST16_MAX
# define UINT_FAST16_MAX __UINT_LEAST16_MAX
+
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L
+# define UINT_LEAST16_WIDTH __UINT_LEAST16_WIDTH
+# define INT_LEAST16_WIDTH UINT_LEAST16_WIDTH
+# define UINT_FAST16_WIDTH __UINT_LEAST16_WIDTH
+# define INT_FAST16_WIDTH UINT_FAST16_WIDTH
+#endif /* __STDC_VERSION__ */
#endif /* __INT_LEAST16_MIN */
@@ -621,9 +835,20 @@ typedef __UINTMAX_TYPE__ uintmax_t;
# define INT8_MAX INT8_C(127)
# define INT8_MIN (-INT8_C(127)-1)
# define UINT8_MAX UINT8_C(255)
+
+# undef __INT_LEAST8_MIN
# define __INT_LEAST8_MIN INT8_MIN
+# undef __INT_LEAST8_MAX
# define __INT_LEAST8_MAX INT8_MAX
+# undef __UINT_LEAST8_MAX
# define __UINT_LEAST8_MAX UINT8_MAX
+
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L
+# define UINT8_WIDTH 8
+# define INT8_WIDTH UINT8_WIDTH
+# undef __UINT_LEAST8_WIDTH
+# define __UINT_LEAST8_WIDTH UINT8_WIDTH
+#endif /* __STDC_VERSION__ */
#endif /* __INT8_TYPE__ */
#ifdef __INT_LEAST8_MIN
@@ -633,6 +858,13 @@ typedef __UINTMAX_TYPE__ uintmax_t;
# define INT_FAST8_MIN __INT_LEAST8_MIN
# define INT_FAST8_MAX __INT_LEAST8_MAX
# define UINT_FAST8_MAX __UINT_LEAST8_MAX
+
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L
+# define UINT_LEAST8_WIDTH __UINT_LEAST8_WIDTH
+# define INT_LEAST8_WIDTH UINT_LEAST8_WIDTH
+# define UINT_FAST8_WIDTH __UINT_LEAST8_WIDTH
+# define INT_FAST8_WIDTH UINT_FAST8_WIDTH
+#endif /* __STDC_VERSION__ */
#endif /* __INT_LEAST8_MIN */
/* Some utility macros */
@@ -652,6 +884,14 @@ typedef __UINTMAX_TYPE__ uintmax_t;
#define PTRDIFF_MAX __PTRDIFF_MAX__
#define SIZE_MAX __SIZE_MAX__
+/* C23 7.22.2.4 Width of integer types capable of holding object pointers. */
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L
+/* NB: The C standard requires that these be the same value, but the compiler
+ exposes separate internal width macros. */
+#define INTPTR_WIDTH __INTPTR_WIDTH__
+#define UINTPTR_WIDTH __UINTPTR_WIDTH__
+#endif
+
/* ISO9899:2011 7.20 (C11 Annex K): Define RSIZE_MAX if __STDC_WANT_LIB_EXT1__
* is enabled. */
#if defined(__STDC_WANT_LIB_EXT1__) && __STDC_WANT_LIB_EXT1__ >= 1
@@ -663,6 +903,14 @@ typedef __UINTMAX_TYPE__ uintmax_t;
#define INTMAX_MAX __INTMAX_MAX__
#define UINTMAX_MAX __UINTMAX_MAX__
+/* C23 7.22.2.5 Width of greatest-width integer types. */
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L
+/* NB: The C standard requires that these be the same value, but the compiler
+ exposes separate internal width macros. */
+#define INTMAX_WIDTH __INTMAX_WIDTH__
+#define UINTMAX_WIDTH __UINTMAX_WIDTH__
+#endif
+
/* C99 7.18.3 Limits of other integer types. */
#define SIG_ATOMIC_MIN __INTN_MIN(__SIG_ATOMIC_WIDTH__)
#define SIG_ATOMIC_MAX __INTN_MAX(__SIG_ATOMIC_WIDTH__)
@@ -689,5 +937,14 @@ typedef __UINTMAX_TYPE__ uintmax_t;
#define INTMAX_C(v) __int_c(v, __INTMAX_C_SUFFIX__)
#define UINTMAX_C(v) __int_c(v, __UINTMAX_C_SUFFIX__)
+/* C23 7.22.3.x Width of other integer types. */
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L
+#define PTRDIFF_WIDTH __PTRDIFF_WIDTH__
+#define SIG_ATOMIC_WIDTH __SIG_ATOMIC_WIDTH__
+#define SIZE_WIDTH __SIZE_WIDTH__
+#define WCHAR_WIDTH __WCHAR_WIDTH__
+#define WINT_WIDTH __WINT_WIDTH__
+#endif
+
#endif /* __STDC_HOSTED__ */
#endif /* __CLANG_STDINT_H */
diff --git a/contrib/llvm-project/clang/lib/Headers/stdnoreturn.h b/contrib/llvm-project/clang/lib/Headers/stdnoreturn.h
index e83cd8153752..c90bf77e840e 100644
--- a/contrib/llvm-project/clang/lib/Headers/stdnoreturn.h
+++ b/contrib/llvm-project/clang/lib/Headers/stdnoreturn.h
@@ -13,4 +13,17 @@
#define noreturn _Noreturn
#define __noreturn_is_defined 1
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ > 201710L) && \
+ !defined(_CLANG_DISABLE_CRT_DEPRECATION_WARNINGS)
+/* The noreturn macro is deprecated in C23. We do not mark it as such because
+ including the header file in C23 is also deprecated and we do not want to
+ issue a confusing diagnostic for code which includes <stdnoreturn.h>
+ followed by code that writes [[noreturn]]. The issue with such code is not
+ with the attribute, or the use of 'noreturn', but the inclusion of the
+ header. */
+/* FIXME: We should be issuing a deprecation warning here, but cannot yet due
+ * to system headers which include this header file unconditionally.
+ */
+#endif
+
#endif /* __STDNORETURN_H */
diff --git a/contrib/llvm-project/clang/lib/Headers/tmmintrin.h b/contrib/llvm-project/clang/lib/Headers/tmmintrin.h
index 35533e115c7d..7d8dc46c57bf 100644
--- a/contrib/llvm-project/clang/lib/Headers/tmmintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/tmmintrin.h
@@ -10,11 +10,20 @@
#ifndef __TMMINTRIN_H
#define __TMMINTRIN_H
+#if !defined(__i386__) && !defined(__x86_64__)
+#error "This header is only meant to be used on x86 and x64 architecture"
+#endif
+
#include <pmmintrin.h>
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("ssse3"), __min_vector_width__(64)))
-#define __DEFAULT_FN_ATTRS_MMX __attribute__((__always_inline__, __nodebug__, __target__("mmx,ssse3"), __min_vector_width__(64)))
+#define __DEFAULT_FN_ATTRS \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("ssse3,no-evex512"), __min_vector_width__(64)))
+#define __DEFAULT_FN_ATTRS_MMX \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("mmx,ssse3,no-evex512"), \
+ __min_vector_width__(64)))
/// Computes the absolute value of each of the packed 8-bit signed
/// integers in the source operand and stores the 8-bit unsigned integer
@@ -49,7 +58,7 @@ _mm_abs_pi8(__m64 __a)
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_abs_epi8(__m128i __a)
{
- return (__m128i)__builtin_ia32_pabsb128((__v16qi)__a);
+ return (__m128i)__builtin_elementwise_abs((__v16qs)__a);
}
/// Computes the absolute value of each of the packed 16-bit signed
@@ -85,7 +94,7 @@ _mm_abs_pi16(__m64 __a)
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_abs_epi16(__m128i __a)
{
- return (__m128i)__builtin_ia32_pabsw128((__v8hi)__a);
+ return (__m128i)__builtin_elementwise_abs((__v8hi)__a);
}
/// Computes the absolute value of each of the packed 32-bit signed
@@ -121,7 +130,7 @@ _mm_abs_pi32(__m64 __a)
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_abs_epi32(__m128i __a)
{
- return (__m128i)__builtin_ia32_pabsd128((__v4si)__a);
+ return (__m128i)__builtin_elementwise_abs((__v4si)__a);
}
/// Concatenates the two 128-bit integer vector operands, and
@@ -145,8 +154,8 @@ _mm_abs_epi32(__m128i __a)
/// \returns A 128-bit integer vector containing the concatenated right-shifted
/// value.
#define _mm_alignr_epi8(a, b, n) \
- (__m128i)__builtin_ia32_palignr128((__v16qi)(__m128i)(a), \
- (__v16qi)(__m128i)(b), (n))
+ ((__m128i)__builtin_ia32_palignr128((__v16qi)(__m128i)(a), \
+ (__v16qi)(__m128i)(b), (n)))
/// Concatenates the two 64-bit integer vector operands, and right-shifts
/// the result by the number of bytes specified in the immediate operand.
@@ -168,7 +177,7 @@ _mm_abs_epi32(__m128i __a)
/// \returns A 64-bit integer vector containing the concatenated right-shifted
/// value.
#define _mm_alignr_pi8(a, b, n) \
- (__m64)__builtin_ia32_palignr((__v8qi)(__m64)(a), (__v8qi)(__m64)(b), (n))
+ ((__m64)__builtin_ia32_palignr((__v8qi)(__m64)(a), (__v8qi)(__m64)(b), (n)))
/// Horizontally adds the adjacent pairs of values contained in 2 packed
/// 128-bit vectors of [8 x i16].
diff --git a/contrib/llvm-project/clang/lib/Headers/uintrintrin.h b/contrib/llvm-project/clang/lib/Headers/uintrintrin.h
index e3839dcebe1e..135dc814c72e 100644
--- a/contrib/llvm-project/clang/lib/Headers/uintrintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/uintrintrin.h
@@ -39,9 +39,9 @@ struct __uintr_frame
///
/// This intrinsic corresponds to the <c> CLUI </c> instruction.
///
-/// \operation
+/// \code{.operation}
/// UIF := 0
-/// \endoperation
+/// \endcode
static __inline__ void __DEFAULT_FN_ATTRS
_clui (void)
{
@@ -60,9 +60,9 @@ _clui (void)
///
/// This intrinsic corresponds to the <c> STUI </c> instruction.
///
-/// \operation
+/// \code{.operation}
/// UIF := 1
-/// \endoperation
+/// \endcode
static __inline__ void __DEFAULT_FN_ATTRS
_stui (void)
{
@@ -81,7 +81,7 @@ _stui (void)
///
/// \returns The current value of the user interrupt flag (UIF).
///
-/// \operation
+/// \code{.operation}
/// CF := UIF
/// ZF := 0
/// AF := 0
@@ -89,7 +89,7 @@ _stui (void)
/// PF := 0
/// SF := 0
/// dst := CF
-/// \endoperation
+/// \endcode
static __inline__ unsigned char __DEFAULT_FN_ATTRS
_testui (void)
{
@@ -110,7 +110,7 @@ _testui (void)
/// Index of user-interrupt target table entry in user-interrupt target
/// table.
///
-/// \operation
+/// \code{.operation}
/// IF __a > UITTSZ
/// GP (0)
/// FI
@@ -143,7 +143,7 @@ _testui (void)
/// SendOrdinaryIPI(tempUPID.NV, tempUPID.NDST[15:8])
/// FI
/// FI
-/// \endoperation
+/// \endcode
static __inline__ void __DEFAULT_FN_ATTRS
_senduipi (unsigned long long __a)
{
diff --git a/contrib/llvm-project/clang/lib/Headers/unwind.h b/contrib/llvm-project/clang/lib/Headers/unwind.h
index 029524b7bc84..33e1792cd1fb 100644
--- a/contrib/llvm-project/clang/lib/Headers/unwind.h
+++ b/contrib/llvm-project/clang/lib/Headers/unwind.h
@@ -62,9 +62,11 @@ typedef intptr_t _sleb128_t;
typedef uintptr_t _uleb128_t;
struct _Unwind_Context;
-#if defined(__arm__) && !(defined(__USING_SJLJ_EXCEPTIONS__) || defined(__ARM_DWARF_EH__))
+#if defined(__arm__) && !(defined(__USING_SJLJ_EXCEPTIONS__) || \
+ defined(__ARM_DWARF_EH__) || defined(__SEH__))
struct _Unwind_Control_Block;
-typedef struct _Unwind_Control_Block _Unwind_Exception; /* Alias */
+typedef struct _Unwind_Control_Block _Unwind_Control_Block;
+#define _Unwind_Exception _Unwind_Control_Block /* Alias */
#else
struct _Unwind_Exception;
typedef struct _Unwind_Exception _Unwind_Exception;
@@ -72,7 +74,7 @@ typedef struct _Unwind_Exception _Unwind_Exception;
typedef enum {
_URC_NO_REASON = 0,
#if defined(__arm__) && !defined(__USING_SJLJ_EXCEPTIONS__) && \
- !defined(__ARM_DWARF_EH__)
+ !defined(__ARM_DWARF_EH__) && !defined(__SEH__)
_URC_OK = 0, /* used by ARM EHABI */
#endif
_URC_FOREIGN_EXCEPTION_CAUGHT = 1,
@@ -86,7 +88,7 @@ typedef enum {
_URC_INSTALL_CONTEXT = 7,
_URC_CONTINUE_UNWIND = 8,
#if defined(__arm__) && !defined(__USING_SJLJ_EXCEPTIONS__) && \
- !defined(__ARM_DWARF_EH__)
+ !defined(__ARM_DWARF_EH__) && !defined(__SEH__)
_URC_FAILURE = 9 /* used by ARM EHABI */
#endif
} _Unwind_Reason_Code;
@@ -103,7 +105,8 @@ typedef enum {
typedef void (*_Unwind_Exception_Cleanup_Fn)(_Unwind_Reason_Code,
_Unwind_Exception *);
-#if defined(__arm__) && !(defined(__USING_SJLJ_EXCEPTIONS__) || defined(__ARM_DWARF_EH__))
+#if defined(__arm__) && !(defined(__USING_SJLJ_EXCEPTIONS__) || \
+ defined(__ARM_DWARF_EH__) || defined(__SEH__))
typedef struct _Unwind_Control_Block _Unwind_Control_Block;
typedef uint32_t _Unwind_EHT_Header;
@@ -167,12 +170,14 @@ typedef _Unwind_Personality_Fn __personality_routine;
typedef _Unwind_Reason_Code (*_Unwind_Trace_Fn)(struct _Unwind_Context *,
void *);
-#if defined(__arm__) && !(defined(__USING_SJLJ_EXCEPTIONS__) || defined(__ARM_DWARF_EH__))
+#if defined(__arm__) && !(defined(__USING_SJLJ_EXCEPTIONS__) || \
+ defined(__ARM_DWARF_EH__) || defined(__SEH__))
typedef enum {
_UVRSC_CORE = 0, /* integer register */
_UVRSC_VFP = 1, /* vfp */
_UVRSC_WMMXD = 3, /* Intel WMMX data register */
- _UVRSC_WMMXC = 4 /* Intel WMMX control register */
+ _UVRSC_WMMXC = 4, /* Intel WMMX control register */
+ _UVRSC_PSEUDO = 5 /* Special purpose pseudo register */
} _Unwind_VRS_RegClass;
typedef enum {
diff --git a/contrib/llvm-project/clang/lib/Headers/usermsrintrin.h b/contrib/llvm-project/clang/lib/Headers/usermsrintrin.h
new file mode 100644
index 000000000000..61388376706d
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/usermsrintrin.h
@@ -0,0 +1,51 @@
+/*===--------------- usermsrintrin.h - USERMSR intrinsics -----------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __X86GPRINTRIN_H
+#error "Never use <usermsrintrin.h> directly; include <x86gprintrin.h> instead."
+#endif // __X86GPRINTRIN_H
+
+#ifndef __USERMSRINTRIN_H
+#define __USERMSRINTRIN_H
+#ifdef __x86_64__
+
+/// Reads the contents of a 64-bit MSR specified in \a __A into \a dst.
+///
+/// This intrinsic corresponds to the <c> URDMSR </c> instruction.
+/// \param __A
+/// An unsigned long long.
+///
+/// \code{.operation}
+/// DEST := MSR[__A]
+/// \endcode
+static __inline__ unsigned long long
+ __attribute__((__always_inline__, __nodebug__, __target__("usermsr")))
+ _urdmsr(unsigned long long __A) {
+ return __builtin_ia32_urdmsr(__A);
+}
+
+/// Writes the contents of \a __B into the 64-bit MSR specified in \a __A.
+///
+/// This intrinsic corresponds to the <c> UWRMSR </c> instruction.
+///
+/// \param __A
+/// An unsigned long long.
+/// \param __B
+/// An unsigned long long.
+///
+/// \code{.operation}
+/// MSR[__A] := __B
+/// \endcode
+static __inline__ void
+ __attribute__((__always_inline__, __nodebug__, __target__("usermsr")))
+ _uwrmsr(unsigned long long __A, unsigned long long __B) {
+ return __builtin_ia32_uwrmsr(__A, __B);
+}
+
+#endif // __x86_64__
+#endif // __USERMSRINTRIN_H
diff --git a/contrib/llvm-project/clang/lib/Headers/vaesintrin.h b/contrib/llvm-project/clang/lib/Headers/vaesintrin.h
index f3c0807bb94a..d7c162f5c0b1 100644
--- a/contrib/llvm-project/clang/lib/Headers/vaesintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/vaesintrin.h
@@ -18,8 +18,10 @@
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("vaes"), __min_vector_width__(256)))
/* Default attributes for ZMM forms. */
-#define __DEFAULT_FN_ATTRS_F __attribute__((__always_inline__, __nodebug__, __target__("avx512f,vaes"), __min_vector_width__(512)))
-
+#define __DEFAULT_FN_ATTRS_F \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512f,evex512,vaes"), \
+ __min_vector_width__(512)))
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm256_aesenc_epi128(__m256i __A, __m256i __B)
@@ -82,4 +84,4 @@ static __inline__ __m512i __DEFAULT_FN_ATTRS_F
#undef __DEFAULT_FN_ATTRS
#undef __DEFAULT_FN_ATTRS_F
-#endif
+#endif // __VAESINTRIN_H
diff --git a/contrib/llvm-project/clang/lib/Headers/vecintrin.h b/contrib/llvm-project/clang/lib/Headers/vecintrin.h
index ec1dbfd015f6..1f51e32c0d13 100644
--- a/contrib/llvm-project/clang/lib/Headers/vecintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/vecintrin.h
@@ -1543,7 +1543,7 @@ vec_load_len(const double *__ptr, unsigned int __len) {
#if __ARCH__ >= 12
static inline __ATTRS_ai __vector unsigned char
vec_load_len_r(const unsigned char *__ptr, unsigned int __len) {
- return (__vector unsigned char)__builtin_s390_vlrl(__len, __ptr);
+ return (__vector unsigned char)__builtin_s390_vlrlr(__len, __ptr);
}
#endif
@@ -1617,7 +1617,7 @@ vec_store_len(__vector double __vec, double *__ptr,
static inline __ATTRS_ai void
vec_store_len_r(__vector unsigned char __vec, unsigned char *__ptr,
unsigned int __len) {
- __builtin_s390_vstrl((__vector signed char)__vec, __len, __ptr);
+ __builtin_s390_vstrlr((__vector signed char)__vec, __len, __ptr);
}
#endif
@@ -2689,7 +2689,8 @@ vec_cmplt(__vector double __a, __vector double __b) {
static inline __ATTRS_o_ai int
vec_all_eq(__vector signed char __a, __vector signed char __b) {
int __cc;
- __builtin_s390_vceqbs(__a, __b, &__cc);
+ __builtin_s390_vceqbs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, &__cc);
return __cc == 0;
}
@@ -2697,7 +2698,8 @@ vec_all_eq(__vector signed char __a, __vector signed char __b) {
static inline __ATTRS_o_ai int
vec_all_eq(__vector signed char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vceqbs(__a, (__vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, &__cc);
return __cc == 0;
}
@@ -2705,15 +2707,15 @@ vec_all_eq(__vector signed char __a, __vector __bool char __b) {
static inline __ATTRS_o_ai int
vec_all_eq(__vector __bool char __a, __vector signed char __b) {
int __cc;
- __builtin_s390_vceqbs((__vector signed char)__a, __b, &__cc);
+ __builtin_s390_vceqbs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
vec_all_eq(__vector unsigned char __a, __vector unsigned char __b) {
int __cc;
- __builtin_s390_vceqbs((__vector signed char)__a,
- (__vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs(__a, __b, &__cc);
return __cc == 0;
}
@@ -2721,8 +2723,7 @@ vec_all_eq(__vector unsigned char __a, __vector unsigned char __b) {
static inline __ATTRS_o_ai int
vec_all_eq(__vector unsigned char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vceqbs((__vector signed char)__a,
- (__vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs(__a, (__vector unsigned char)__b, &__cc);
return __cc == 0;
}
@@ -2730,23 +2731,23 @@ vec_all_eq(__vector unsigned char __a, __vector __bool char __b) {
static inline __ATTRS_o_ai int
vec_all_eq(__vector __bool char __a, __vector unsigned char __b) {
int __cc;
- __builtin_s390_vceqbs((__vector signed char)__a,
- (__vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs((__vector unsigned char)__a, __b, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
vec_all_eq(__vector __bool char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vceqbs((__vector signed char)__a,
- (__vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
vec_all_eq(__vector signed short __a, __vector signed short __b) {
int __cc;
- __builtin_s390_vceqhs(__a, __b, &__cc);
+ __builtin_s390_vceqhs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, &__cc);
return __cc == 0;
}
@@ -2754,7 +2755,8 @@ vec_all_eq(__vector signed short __a, __vector signed short __b) {
static inline __ATTRS_o_ai int
vec_all_eq(__vector signed short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vceqhs(__a, (__vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, &__cc);
return __cc == 0;
}
@@ -2762,15 +2764,15 @@ vec_all_eq(__vector signed short __a, __vector __bool short __b) {
static inline __ATTRS_o_ai int
vec_all_eq(__vector __bool short __a, __vector signed short __b) {
int __cc;
- __builtin_s390_vceqhs((__vector signed short)__a, __b, &__cc);
+ __builtin_s390_vceqhs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
vec_all_eq(__vector unsigned short __a, __vector unsigned short __b) {
int __cc;
- __builtin_s390_vceqhs((__vector signed short)__a,
- (__vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs(__a, __b, &__cc);
return __cc == 0;
}
@@ -2778,8 +2780,7 @@ vec_all_eq(__vector unsigned short __a, __vector unsigned short __b) {
static inline __ATTRS_o_ai int
vec_all_eq(__vector unsigned short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vceqhs((__vector signed short)__a,
- (__vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs(__a, (__vector unsigned short)__b, &__cc);
return __cc == 0;
}
@@ -2787,23 +2788,23 @@ vec_all_eq(__vector unsigned short __a, __vector __bool short __b) {
static inline __ATTRS_o_ai int
vec_all_eq(__vector __bool short __a, __vector unsigned short __b) {
int __cc;
- __builtin_s390_vceqhs((__vector signed short)__a,
- (__vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs((__vector unsigned short)__a, __b, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
vec_all_eq(__vector __bool short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vceqhs((__vector signed short)__a,
- (__vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
vec_all_eq(__vector signed int __a, __vector signed int __b) {
int __cc;
- __builtin_s390_vceqfs(__a, __b, &__cc);
+ __builtin_s390_vceqfs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, &__cc);
return __cc == 0;
}
@@ -2811,7 +2812,8 @@ vec_all_eq(__vector signed int __a, __vector signed int __b) {
static inline __ATTRS_o_ai int
vec_all_eq(__vector signed int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vceqfs(__a, (__vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, &__cc);
return __cc == 0;
}
@@ -2819,15 +2821,15 @@ vec_all_eq(__vector signed int __a, __vector __bool int __b) {
static inline __ATTRS_o_ai int
vec_all_eq(__vector __bool int __a, __vector signed int __b) {
int __cc;
- __builtin_s390_vceqfs((__vector signed int)__a, __b, &__cc);
+ __builtin_s390_vceqfs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
vec_all_eq(__vector unsigned int __a, __vector unsigned int __b) {
int __cc;
- __builtin_s390_vceqfs((__vector signed int)__a,
- (__vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs(__a, __b, &__cc);
return __cc == 0;
}
@@ -2835,8 +2837,7 @@ vec_all_eq(__vector unsigned int __a, __vector unsigned int __b) {
static inline __ATTRS_o_ai int
vec_all_eq(__vector unsigned int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vceqfs((__vector signed int)__a,
- (__vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs(__a, (__vector unsigned int)__b, &__cc);
return __cc == 0;
}
@@ -2844,23 +2845,23 @@ vec_all_eq(__vector unsigned int __a, __vector __bool int __b) {
static inline __ATTRS_o_ai int
vec_all_eq(__vector __bool int __a, __vector unsigned int __b) {
int __cc;
- __builtin_s390_vceqfs((__vector signed int)__a,
- (__vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs((__vector unsigned int)__a, __b, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
vec_all_eq(__vector __bool int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vceqfs((__vector signed int)__a,
- (__vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
vec_all_eq(__vector signed long long __a, __vector signed long long __b) {
int __cc;
- __builtin_s390_vceqgs(__a, __b, &__cc);
+ __builtin_s390_vceqgs((__vector unsigned long long)__a,
+ (__vector unsigned long long)__b, &__cc);
return __cc == 0;
}
@@ -2868,7 +2869,8 @@ vec_all_eq(__vector signed long long __a, __vector signed long long __b) {
static inline __ATTRS_o_ai int
vec_all_eq(__vector signed long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vceqgs(__a, (__vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs((__vector unsigned long long)__a,
+ (__vector unsigned long long)__b, &__cc);
return __cc == 0;
}
@@ -2876,15 +2878,15 @@ vec_all_eq(__vector signed long long __a, __vector __bool long long __b) {
static inline __ATTRS_o_ai int
vec_all_eq(__vector __bool long long __a, __vector signed long long __b) {
int __cc;
- __builtin_s390_vceqgs((__vector signed long long)__a, __b, &__cc);
+ __builtin_s390_vceqgs((__vector unsigned long long)__a,
+ (__vector unsigned long long)__b, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
vec_all_eq(__vector unsigned long long __a, __vector unsigned long long __b) {
int __cc;
- __builtin_s390_vceqgs((__vector signed long long)__a,
- (__vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs(__a, __b, &__cc);
return __cc == 0;
}
@@ -2892,8 +2894,7 @@ vec_all_eq(__vector unsigned long long __a, __vector unsigned long long __b) {
static inline __ATTRS_o_ai int
vec_all_eq(__vector unsigned long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vceqgs((__vector signed long long)__a,
- (__vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs(__a, (__vector unsigned long long)__b, &__cc);
return __cc == 0;
}
@@ -2901,16 +2902,15 @@ vec_all_eq(__vector unsigned long long __a, __vector __bool long long __b) {
static inline __ATTRS_o_ai int
vec_all_eq(__vector __bool long long __a, __vector unsigned long long __b) {
int __cc;
- __builtin_s390_vceqgs((__vector signed long long)__a,
- (__vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs((__vector unsigned long long)__a, __b, &__cc);
return __cc == 0;
}
static inline __ATTRS_o_ai int
vec_all_eq(__vector __bool long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vceqgs((__vector signed long long)__a,
- (__vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs((__vector unsigned long long)__a,
+ (__vector unsigned long long)__b, &__cc);
return __cc == 0;
}
@@ -2935,7 +2935,8 @@ vec_all_eq(__vector double __a, __vector double __b) {
static inline __ATTRS_o_ai int
vec_all_ne(__vector signed char __a, __vector signed char __b) {
int __cc;
- __builtin_s390_vceqbs(__a, __b, &__cc);
+ __builtin_s390_vceqbs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, &__cc);
return __cc == 3;
}
@@ -2943,7 +2944,8 @@ vec_all_ne(__vector signed char __a, __vector signed char __b) {
static inline __ATTRS_o_ai int
vec_all_ne(__vector signed char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vceqbs(__a, (__vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, &__cc);
return __cc == 3;
}
@@ -2951,15 +2953,16 @@ vec_all_ne(__vector signed char __a, __vector __bool char __b) {
static inline __ATTRS_o_ai int
vec_all_ne(__vector __bool char __a, __vector signed char __b) {
int __cc;
- __builtin_s390_vceqbs((__vector signed char)__a, __b, &__cc);
+ __builtin_s390_vceqbs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
vec_all_ne(__vector unsigned char __a, __vector unsigned char __b) {
int __cc;
- __builtin_s390_vceqbs((__vector signed char)__a,
- (__vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, &__cc);
return __cc == 3;
}
@@ -2967,8 +2970,7 @@ vec_all_ne(__vector unsigned char __a, __vector unsigned char __b) {
static inline __ATTRS_o_ai int
vec_all_ne(__vector unsigned char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vceqbs((__vector signed char)__a,
- (__vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs(__a, (__vector unsigned char)__b, &__cc);
return __cc == 3;
}
@@ -2976,23 +2978,23 @@ vec_all_ne(__vector unsigned char __a, __vector __bool char __b) {
static inline __ATTRS_o_ai int
vec_all_ne(__vector __bool char __a, __vector unsigned char __b) {
int __cc;
- __builtin_s390_vceqbs((__vector signed char)__a,
- (__vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs((__vector unsigned char)__a, __b, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
vec_all_ne(__vector __bool char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vceqbs((__vector signed char)__a,
- (__vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
vec_all_ne(__vector signed short __a, __vector signed short __b) {
int __cc;
- __builtin_s390_vceqhs(__a, __b, &__cc);
+ __builtin_s390_vceqhs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, &__cc);
return __cc == 3;
}
@@ -3000,7 +3002,8 @@ vec_all_ne(__vector signed short __a, __vector signed short __b) {
static inline __ATTRS_o_ai int
vec_all_ne(__vector signed short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vceqhs(__a, (__vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, &__cc);
return __cc == 3;
}
@@ -3008,15 +3011,15 @@ vec_all_ne(__vector signed short __a, __vector __bool short __b) {
static inline __ATTRS_o_ai int
vec_all_ne(__vector __bool short __a, __vector signed short __b) {
int __cc;
- __builtin_s390_vceqhs((__vector signed short)__a, __b, &__cc);
+ __builtin_s390_vceqhs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
vec_all_ne(__vector unsigned short __a, __vector unsigned short __b) {
int __cc;
- __builtin_s390_vceqhs((__vector signed short)__a,
- (__vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs(__a, __b, &__cc);
return __cc == 3;
}
@@ -3024,8 +3027,7 @@ vec_all_ne(__vector unsigned short __a, __vector unsigned short __b) {
static inline __ATTRS_o_ai int
vec_all_ne(__vector unsigned short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vceqhs((__vector signed short)__a,
- (__vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs(__a, (__vector unsigned short)__b, &__cc);
return __cc == 3;
}
@@ -3033,23 +3035,23 @@ vec_all_ne(__vector unsigned short __a, __vector __bool short __b) {
static inline __ATTRS_o_ai int
vec_all_ne(__vector __bool short __a, __vector unsigned short __b) {
int __cc;
- __builtin_s390_vceqhs((__vector signed short)__a,
- (__vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs((__vector unsigned short)__a, __b, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
vec_all_ne(__vector __bool short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vceqhs((__vector signed short)__a,
- (__vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
vec_all_ne(__vector signed int __a, __vector signed int __b) {
int __cc;
- __builtin_s390_vceqfs(__a, __b, &__cc);
+ __builtin_s390_vceqfs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, &__cc);
return __cc == 3;
}
@@ -3057,7 +3059,8 @@ vec_all_ne(__vector signed int __a, __vector signed int __b) {
static inline __ATTRS_o_ai int
vec_all_ne(__vector signed int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vceqfs(__a, (__vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, &__cc);
return __cc == 3;
}
@@ -3065,15 +3068,15 @@ vec_all_ne(__vector signed int __a, __vector __bool int __b) {
static inline __ATTRS_o_ai int
vec_all_ne(__vector __bool int __a, __vector signed int __b) {
int __cc;
- __builtin_s390_vceqfs((__vector signed int)__a, __b, &__cc);
+ __builtin_s390_vceqfs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
vec_all_ne(__vector unsigned int __a, __vector unsigned int __b) {
int __cc;
- __builtin_s390_vceqfs((__vector signed int)__a,
- (__vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs(__a, __b, &__cc);
return __cc == 3;
}
@@ -3081,8 +3084,7 @@ vec_all_ne(__vector unsigned int __a, __vector unsigned int __b) {
static inline __ATTRS_o_ai int
vec_all_ne(__vector unsigned int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vceqfs((__vector signed int)__a,
- (__vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs(__a, (__vector unsigned int)__b, &__cc);
return __cc == 3;
}
@@ -3090,23 +3092,23 @@ vec_all_ne(__vector unsigned int __a, __vector __bool int __b) {
static inline __ATTRS_o_ai int
vec_all_ne(__vector __bool int __a, __vector unsigned int __b) {
int __cc;
- __builtin_s390_vceqfs((__vector signed int)__a,
- (__vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs((__vector unsigned int)__a, __b, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
vec_all_ne(__vector __bool int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vceqfs((__vector signed int)__a,
- (__vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
vec_all_ne(__vector signed long long __a, __vector signed long long __b) {
int __cc;
- __builtin_s390_vceqgs(__a, __b, &__cc);
+ __builtin_s390_vceqgs((__vector unsigned long long)__a,
+ (__vector unsigned long long)__b, &__cc);
return __cc == 3;
}
@@ -3114,7 +3116,8 @@ vec_all_ne(__vector signed long long __a, __vector signed long long __b) {
static inline __ATTRS_o_ai int
vec_all_ne(__vector signed long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vceqgs(__a, (__vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs((__vector unsigned long long)__a,
+ (__vector unsigned long long)__b, &__cc);
return __cc == 3;
}
@@ -3122,15 +3125,15 @@ vec_all_ne(__vector signed long long __a, __vector __bool long long __b) {
static inline __ATTRS_o_ai int
vec_all_ne(__vector __bool long long __a, __vector signed long long __b) {
int __cc;
- __builtin_s390_vceqgs((__vector signed long long)__a, __b, &__cc);
+ __builtin_s390_vceqgs((__vector unsigned long long)__a,
+ (__vector unsigned long long)__b, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
vec_all_ne(__vector unsigned long long __a, __vector unsigned long long __b) {
int __cc;
- __builtin_s390_vceqgs((__vector signed long long)__a,
- (__vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs(__a, __b, &__cc);
return __cc == 3;
}
@@ -3138,8 +3141,7 @@ vec_all_ne(__vector unsigned long long __a, __vector unsigned long long __b) {
static inline __ATTRS_o_ai int
vec_all_ne(__vector unsigned long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vceqgs((__vector signed long long)__a,
- (__vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs(__a, (__vector unsigned long long)__b, &__cc);
return __cc == 3;
}
@@ -3147,16 +3149,15 @@ vec_all_ne(__vector unsigned long long __a, __vector __bool long long __b) {
static inline __ATTRS_o_ai int
vec_all_ne(__vector __bool long long __a, __vector unsigned long long __b) {
int __cc;
- __builtin_s390_vceqgs((__vector signed long long)__a,
- (__vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs((__vector unsigned long long)__a, __b, &__cc);
return __cc == 3;
}
static inline __ATTRS_o_ai int
vec_all_ne(__vector __bool long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vceqgs((__vector signed long long)__a,
- (__vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs((__vector unsigned long long)__a,
+ (__vector unsigned long long)__b, &__cc);
return __cc == 3;
}
@@ -4241,7 +4242,8 @@ vec_all_numeric(__vector double __a) {
static inline __ATTRS_o_ai int
vec_any_eq(__vector signed char __a, __vector signed char __b) {
int __cc;
- __builtin_s390_vceqbs(__a, __b, &__cc);
+ __builtin_s390_vceqbs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, &__cc);
return __cc <= 1;
}
@@ -4249,7 +4251,8 @@ vec_any_eq(__vector signed char __a, __vector signed char __b) {
static inline __ATTRS_o_ai int
vec_any_eq(__vector signed char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vceqbs(__a, (__vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, &__cc);
return __cc <= 1;
}
@@ -4257,15 +4260,15 @@ vec_any_eq(__vector signed char __a, __vector __bool char __b) {
static inline __ATTRS_o_ai int
vec_any_eq(__vector __bool char __a, __vector signed char __b) {
int __cc;
- __builtin_s390_vceqbs((__vector signed char)__a, __b, &__cc);
+ __builtin_s390_vceqbs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
vec_any_eq(__vector unsigned char __a, __vector unsigned char __b) {
int __cc;
- __builtin_s390_vceqbs((__vector signed char)__a,
- (__vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs(__a, __b, &__cc);
return __cc <= 1;
}
@@ -4273,8 +4276,7 @@ vec_any_eq(__vector unsigned char __a, __vector unsigned char __b) {
static inline __ATTRS_o_ai int
vec_any_eq(__vector unsigned char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vceqbs((__vector signed char)__a,
- (__vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs(__a, (__vector unsigned char)__b, &__cc);
return __cc <= 1;
}
@@ -4282,23 +4284,23 @@ vec_any_eq(__vector unsigned char __a, __vector __bool char __b) {
static inline __ATTRS_o_ai int
vec_any_eq(__vector __bool char __a, __vector unsigned char __b) {
int __cc;
- __builtin_s390_vceqbs((__vector signed char)__a,
- (__vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs((__vector unsigned char)__a, __b, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
vec_any_eq(__vector __bool char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vceqbs((__vector signed char)__a,
- (__vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
vec_any_eq(__vector signed short __a, __vector signed short __b) {
int __cc;
- __builtin_s390_vceqhs(__a, __b, &__cc);
+ __builtin_s390_vceqhs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, &__cc);
return __cc <= 1;
}
@@ -4306,7 +4308,8 @@ vec_any_eq(__vector signed short __a, __vector signed short __b) {
static inline __ATTRS_o_ai int
vec_any_eq(__vector signed short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vceqhs(__a, (__vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, &__cc);
return __cc <= 1;
}
@@ -4314,15 +4317,15 @@ vec_any_eq(__vector signed short __a, __vector __bool short __b) {
static inline __ATTRS_o_ai int
vec_any_eq(__vector __bool short __a, __vector signed short __b) {
int __cc;
- __builtin_s390_vceqhs((__vector signed short)__a, __b, &__cc);
+ __builtin_s390_vceqhs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
vec_any_eq(__vector unsigned short __a, __vector unsigned short __b) {
int __cc;
- __builtin_s390_vceqhs((__vector signed short)__a,
- (__vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs(__a, __b, &__cc);
return __cc <= 1;
}
@@ -4330,8 +4333,7 @@ vec_any_eq(__vector unsigned short __a, __vector unsigned short __b) {
static inline __ATTRS_o_ai int
vec_any_eq(__vector unsigned short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vceqhs((__vector signed short)__a,
- (__vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs(__a, (__vector unsigned short)__b, &__cc);
return __cc <= 1;
}
@@ -4339,23 +4341,23 @@ vec_any_eq(__vector unsigned short __a, __vector __bool short __b) {
static inline __ATTRS_o_ai int
vec_any_eq(__vector __bool short __a, __vector unsigned short __b) {
int __cc;
- __builtin_s390_vceqhs((__vector signed short)__a,
- (__vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs((__vector unsigned short)__a, __b, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
vec_any_eq(__vector __bool short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vceqhs((__vector signed short)__a,
- (__vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
vec_any_eq(__vector signed int __a, __vector signed int __b) {
int __cc;
- __builtin_s390_vceqfs(__a, __b, &__cc);
+ __builtin_s390_vceqfs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, &__cc);
return __cc <= 1;
}
@@ -4363,7 +4365,8 @@ vec_any_eq(__vector signed int __a, __vector signed int __b) {
static inline __ATTRS_o_ai int
vec_any_eq(__vector signed int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vceqfs(__a, (__vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, &__cc);
return __cc <= 1;
}
@@ -4371,15 +4374,15 @@ vec_any_eq(__vector signed int __a, __vector __bool int __b) {
static inline __ATTRS_o_ai int
vec_any_eq(__vector __bool int __a, __vector signed int __b) {
int __cc;
- __builtin_s390_vceqfs((__vector signed int)__a, __b, &__cc);
+ __builtin_s390_vceqfs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
vec_any_eq(__vector unsigned int __a, __vector unsigned int __b) {
int __cc;
- __builtin_s390_vceqfs((__vector signed int)__a,
- (__vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs(__a, __b, &__cc);
return __cc <= 1;
}
@@ -4387,8 +4390,7 @@ vec_any_eq(__vector unsigned int __a, __vector unsigned int __b) {
static inline __ATTRS_o_ai int
vec_any_eq(__vector unsigned int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vceqfs((__vector signed int)__a,
- (__vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs(__a, (__vector unsigned int)__b, &__cc);
return __cc <= 1;
}
@@ -4396,23 +4398,23 @@ vec_any_eq(__vector unsigned int __a, __vector __bool int __b) {
static inline __ATTRS_o_ai int
vec_any_eq(__vector __bool int __a, __vector unsigned int __b) {
int __cc;
- __builtin_s390_vceqfs((__vector signed int)__a,
- (__vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs((__vector unsigned int)__a, __b, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
vec_any_eq(__vector __bool int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vceqfs((__vector signed int)__a,
- (__vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
vec_any_eq(__vector signed long long __a, __vector signed long long __b) {
int __cc;
- __builtin_s390_vceqgs(__a, __b, &__cc);
+ __builtin_s390_vceqgs((__vector unsigned long long)__a,
+ (__vector unsigned long long)__b, &__cc);
return __cc <= 1;
}
@@ -4420,7 +4422,8 @@ vec_any_eq(__vector signed long long __a, __vector signed long long __b) {
static inline __ATTRS_o_ai int
vec_any_eq(__vector signed long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vceqgs(__a, (__vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs((__vector unsigned long long)__a,
+ (__vector unsigned long long)__b, &__cc);
return __cc <= 1;
}
@@ -4428,15 +4431,15 @@ vec_any_eq(__vector signed long long __a, __vector __bool long long __b) {
static inline __ATTRS_o_ai int
vec_any_eq(__vector __bool long long __a, __vector signed long long __b) {
int __cc;
- __builtin_s390_vceqgs((__vector signed long long)__a, __b, &__cc);
+ __builtin_s390_vceqgs((__vector unsigned long long)__a,
+ (__vector unsigned long long)__b, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
vec_any_eq(__vector unsigned long long __a, __vector unsigned long long __b) {
int __cc;
- __builtin_s390_vceqgs((__vector signed long long)__a,
- (__vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs(__a, __b, &__cc);
return __cc <= 1;
}
@@ -4444,8 +4447,7 @@ vec_any_eq(__vector unsigned long long __a, __vector unsigned long long __b) {
static inline __ATTRS_o_ai int
vec_any_eq(__vector unsigned long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vceqgs((__vector signed long long)__a,
- (__vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs(__a, (__vector unsigned long long)__b, &__cc);
return __cc <= 1;
}
@@ -4453,16 +4455,15 @@ vec_any_eq(__vector unsigned long long __a, __vector __bool long long __b) {
static inline __ATTRS_o_ai int
vec_any_eq(__vector __bool long long __a, __vector unsigned long long __b) {
int __cc;
- __builtin_s390_vceqgs((__vector signed long long)__a,
- (__vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs((__vector unsigned long long)__a, __b, &__cc);
return __cc <= 1;
}
static inline __ATTRS_o_ai int
vec_any_eq(__vector __bool long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vceqgs((__vector signed long long)__a,
- (__vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs((__vector unsigned long long)__a,
+ (__vector unsigned long long)__b, &__cc);
return __cc <= 1;
}
@@ -4487,7 +4488,8 @@ vec_any_eq(__vector double __a, __vector double __b) {
static inline __ATTRS_o_ai int
vec_any_ne(__vector signed char __a, __vector signed char __b) {
int __cc;
- __builtin_s390_vceqbs(__a, __b, &__cc);
+ __builtin_s390_vceqbs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, &__cc);
return __cc != 0;
}
@@ -4495,7 +4497,8 @@ vec_any_ne(__vector signed char __a, __vector signed char __b) {
static inline __ATTRS_o_ai int
vec_any_ne(__vector signed char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vceqbs(__a, (__vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, &__cc);
return __cc != 0;
}
@@ -4503,15 +4506,15 @@ vec_any_ne(__vector signed char __a, __vector __bool char __b) {
static inline __ATTRS_o_ai int
vec_any_ne(__vector __bool char __a, __vector signed char __b) {
int __cc;
- __builtin_s390_vceqbs((__vector signed char)__a, __b, &__cc);
+ __builtin_s390_vceqbs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
vec_any_ne(__vector unsigned char __a, __vector unsigned char __b) {
int __cc;
- __builtin_s390_vceqbs((__vector signed char)__a,
- (__vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs(__a, __b, &__cc);
return __cc != 0;
}
@@ -4519,8 +4522,7 @@ vec_any_ne(__vector unsigned char __a, __vector unsigned char __b) {
static inline __ATTRS_o_ai int
vec_any_ne(__vector unsigned char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vceqbs((__vector signed char)__a,
- (__vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs(__a, (__vector unsigned char)__b, &__cc);
return __cc != 0;
}
@@ -4528,23 +4530,23 @@ vec_any_ne(__vector unsigned char __a, __vector __bool char __b) {
static inline __ATTRS_o_ai int
vec_any_ne(__vector __bool char __a, __vector unsigned char __b) {
int __cc;
- __builtin_s390_vceqbs((__vector signed char)__a,
- (__vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs((__vector unsigned char)__a, __b, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
vec_any_ne(__vector __bool char __a, __vector __bool char __b) {
int __cc;
- __builtin_s390_vceqbs((__vector signed char)__a,
- (__vector signed char)__b, &__cc);
+ __builtin_s390_vceqbs((__vector unsigned char)__a,
+ (__vector unsigned char)__b, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
vec_any_ne(__vector signed short __a, __vector signed short __b) {
int __cc;
- __builtin_s390_vceqhs(__a, __b, &__cc);
+ __builtin_s390_vceqhs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, &__cc);
return __cc != 0;
}
@@ -4552,7 +4554,8 @@ vec_any_ne(__vector signed short __a, __vector signed short __b) {
static inline __ATTRS_o_ai int
vec_any_ne(__vector signed short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vceqhs(__a, (__vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, &__cc);
return __cc != 0;
}
@@ -4560,15 +4563,15 @@ vec_any_ne(__vector signed short __a, __vector __bool short __b) {
static inline __ATTRS_o_ai int
vec_any_ne(__vector __bool short __a, __vector signed short __b) {
int __cc;
- __builtin_s390_vceqhs((__vector signed short)__a, __b, &__cc);
+ __builtin_s390_vceqhs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
vec_any_ne(__vector unsigned short __a, __vector unsigned short __b) {
int __cc;
- __builtin_s390_vceqhs((__vector signed short)__a,
- (__vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs(__a, __b, &__cc);
return __cc != 0;
}
@@ -4576,8 +4579,7 @@ vec_any_ne(__vector unsigned short __a, __vector unsigned short __b) {
static inline __ATTRS_o_ai int
vec_any_ne(__vector unsigned short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vceqhs((__vector signed short)__a,
- (__vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs(__a, (__vector unsigned short)__b, &__cc);
return __cc != 0;
}
@@ -4585,23 +4587,23 @@ vec_any_ne(__vector unsigned short __a, __vector __bool short __b) {
static inline __ATTRS_o_ai int
vec_any_ne(__vector __bool short __a, __vector unsigned short __b) {
int __cc;
- __builtin_s390_vceqhs((__vector signed short)__a,
- (__vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs((__vector unsigned short)__a, __b, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
vec_any_ne(__vector __bool short __a, __vector __bool short __b) {
int __cc;
- __builtin_s390_vceqhs((__vector signed short)__a,
- (__vector signed short)__b, &__cc);
+ __builtin_s390_vceqhs((__vector unsigned short)__a,
+ (__vector unsigned short)__b, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
vec_any_ne(__vector signed int __a, __vector signed int __b) {
int __cc;
- __builtin_s390_vceqfs(__a, __b, &__cc);
+ __builtin_s390_vceqfs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, &__cc);
return __cc != 0;
}
@@ -4609,7 +4611,8 @@ vec_any_ne(__vector signed int __a, __vector signed int __b) {
static inline __ATTRS_o_ai int
vec_any_ne(__vector signed int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vceqfs(__a, (__vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, &__cc);
return __cc != 0;
}
@@ -4617,15 +4620,15 @@ vec_any_ne(__vector signed int __a, __vector __bool int __b) {
static inline __ATTRS_o_ai int
vec_any_ne(__vector __bool int __a, __vector signed int __b) {
int __cc;
- __builtin_s390_vceqfs((__vector signed int)__a, __b, &__cc);
+ __builtin_s390_vceqfs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
vec_any_ne(__vector unsigned int __a, __vector unsigned int __b) {
int __cc;
- __builtin_s390_vceqfs((__vector signed int)__a,
- (__vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs(__a, __b, &__cc);
return __cc != 0;
}
@@ -4633,8 +4636,7 @@ vec_any_ne(__vector unsigned int __a, __vector unsigned int __b) {
static inline __ATTRS_o_ai int
vec_any_ne(__vector unsigned int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vceqfs((__vector signed int)__a,
- (__vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs(__a, (__vector unsigned int)__b, &__cc);
return __cc != 0;
}
@@ -4642,23 +4644,23 @@ vec_any_ne(__vector unsigned int __a, __vector __bool int __b) {
static inline __ATTRS_o_ai int
vec_any_ne(__vector __bool int __a, __vector unsigned int __b) {
int __cc;
- __builtin_s390_vceqfs((__vector signed int)__a,
- (__vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs((__vector unsigned int)__a, __b, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
vec_any_ne(__vector __bool int __a, __vector __bool int __b) {
int __cc;
- __builtin_s390_vceqfs((__vector signed int)__a,
- (__vector signed int)__b, &__cc);
+ __builtin_s390_vceqfs((__vector unsigned int)__a,
+ (__vector unsigned int)__b, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
vec_any_ne(__vector signed long long __a, __vector signed long long __b) {
int __cc;
- __builtin_s390_vceqgs(__a, __b, &__cc);
+ __builtin_s390_vceqgs((__vector unsigned long long)__a,
+ (__vector unsigned long long)__b, &__cc);
return __cc != 0;
}
@@ -4666,7 +4668,8 @@ vec_any_ne(__vector signed long long __a, __vector signed long long __b) {
static inline __ATTRS_o_ai int
vec_any_ne(__vector signed long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vceqgs(__a, (__vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs((__vector unsigned long long)__a,
+ (__vector unsigned long long)__b, &__cc);
return __cc != 0;
}
@@ -4674,15 +4677,15 @@ vec_any_ne(__vector signed long long __a, __vector __bool long long __b) {
static inline __ATTRS_o_ai int
vec_any_ne(__vector __bool long long __a, __vector signed long long __b) {
int __cc;
- __builtin_s390_vceqgs((__vector signed long long)__a, __b, &__cc);
+ __builtin_s390_vceqgs((__vector unsigned long long)__a,
+ (__vector unsigned long long)__b, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
vec_any_ne(__vector unsigned long long __a, __vector unsigned long long __b) {
int __cc;
- __builtin_s390_vceqgs((__vector signed long long)__a,
- (__vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs(__a, __b, &__cc);
return __cc != 0;
}
@@ -4690,8 +4693,7 @@ vec_any_ne(__vector unsigned long long __a, __vector unsigned long long __b) {
static inline __ATTRS_o_ai int
vec_any_ne(__vector unsigned long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vceqgs((__vector signed long long)__a,
- (__vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs(__a, (__vector unsigned long long)__b, &__cc);
return __cc != 0;
}
@@ -4699,16 +4701,15 @@ vec_any_ne(__vector unsigned long long __a, __vector __bool long long __b) {
static inline __ATTRS_o_ai int
vec_any_ne(__vector __bool long long __a, __vector unsigned long long __b) {
int __cc;
- __builtin_s390_vceqgs((__vector signed long long)__a,
- (__vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs((__vector unsigned long long)__a, __b, &__cc);
return __cc != 0;
}
static inline __ATTRS_o_ai int
vec_any_ne(__vector __bool long long __a, __vector __bool long long __b) {
int __cc;
- __builtin_s390_vceqgs((__vector signed long long)__a,
- (__vector signed long long)__b, &__cc);
+ __builtin_s390_vceqgs((__vector unsigned long long)__a,
+ (__vector unsigned long long)__b, &__cc);
return __cc != 0;
}
@@ -6565,45 +6566,45 @@ vec_rl(__vector unsigned long long __a, __vector unsigned long long __b) {
static inline __ATTRS_o_ai __vector signed char
vec_rli(__vector signed char __a, unsigned long __b) {
return (__vector signed char)__builtin_s390_verllb(
- (__vector unsigned char)__a, (int)__b);
+ (__vector unsigned char)__a, (unsigned char)__b);
}
static inline __ATTRS_o_ai __vector unsigned char
vec_rli(__vector unsigned char __a, unsigned long __b) {
- return __builtin_s390_verllb(__a, (int)__b);
+ return __builtin_s390_verllb(__a, (unsigned char)__b);
}
static inline __ATTRS_o_ai __vector signed short
vec_rli(__vector signed short __a, unsigned long __b) {
return (__vector signed short)__builtin_s390_verllh(
- (__vector unsigned short)__a, (int)__b);
+ (__vector unsigned short)__a, (unsigned char)__b);
}
static inline __ATTRS_o_ai __vector unsigned short
vec_rli(__vector unsigned short __a, unsigned long __b) {
- return __builtin_s390_verllh(__a, (int)__b);
+ return __builtin_s390_verllh(__a, (unsigned char)__b);
}
static inline __ATTRS_o_ai __vector signed int
vec_rli(__vector signed int __a, unsigned long __b) {
return (__vector signed int)__builtin_s390_verllf(
- (__vector unsigned int)__a, (int)__b);
+ (__vector unsigned int)__a, (unsigned char)__b);
}
static inline __ATTRS_o_ai __vector unsigned int
vec_rli(__vector unsigned int __a, unsigned long __b) {
- return __builtin_s390_verllf(__a, (int)__b);
+ return __builtin_s390_verllf(__a, (unsigned char)__b);
}
static inline __ATTRS_o_ai __vector signed long long
vec_rli(__vector signed long long __a, unsigned long __b) {
return (__vector signed long long)__builtin_s390_verllg(
- (__vector unsigned long long)__a, (int)__b);
+ (__vector unsigned long long)__a, (unsigned char)__b);
}
static inline __ATTRS_o_ai __vector unsigned long long
vec_rli(__vector unsigned long long __a, unsigned long __b) {
- return __builtin_s390_verllg(__a, (int)__b);
+ return __builtin_s390_verllg(__a, (unsigned char)__b);
}
/*-- vec_rl_mask ------------------------------------------------------------*/
@@ -8358,7 +8359,7 @@ vec_min(__vector double __a, __vector double __b) {
static inline __ATTRS_ai __vector unsigned char
vec_add_u128(__vector unsigned char __a, __vector unsigned char __b) {
- return __builtin_s390_vaq(__a, __b);
+ return (__vector unsigned char)((__int128)__a + (__int128)__b);
}
/*-- vec_addc ---------------------------------------------------------------*/
@@ -8387,7 +8388,8 @@ vec_addc(__vector unsigned long long __a, __vector unsigned long long __b) {
static inline __ATTRS_ai __vector unsigned char
vec_addc_u128(__vector unsigned char __a, __vector unsigned char __b) {
- return __builtin_s390_vaccq(__a, __b);
+ return (__vector unsigned char)
+ __builtin_s390_vaccq((unsigned __int128)__a, (unsigned __int128)__b);
}
/*-- vec_adde_u128 ----------------------------------------------------------*/
@@ -8395,7 +8397,9 @@ vec_addc_u128(__vector unsigned char __a, __vector unsigned char __b) {
static inline __ATTRS_ai __vector unsigned char
vec_adde_u128(__vector unsigned char __a, __vector unsigned char __b,
__vector unsigned char __c) {
- return __builtin_s390_vacq(__a, __b, __c);
+ return (__vector unsigned char)
+ __builtin_s390_vacq((unsigned __int128)__a, (unsigned __int128)__b,
+ (unsigned __int128)__c);
}
/*-- vec_addec_u128 ---------------------------------------------------------*/
@@ -8403,7 +8407,9 @@ vec_adde_u128(__vector unsigned char __a, __vector unsigned char __b,
static inline __ATTRS_ai __vector unsigned char
vec_addec_u128(__vector unsigned char __a, __vector unsigned char __b,
__vector unsigned char __c) {
- return __builtin_s390_vacccq(__a, __b, __c);
+ return (__vector unsigned char)
+ __builtin_s390_vacccq((unsigned __int128)__a, (unsigned __int128)__b,
+ (unsigned __int128)__c);
}
/*-- vec_avg ----------------------------------------------------------------*/
@@ -8477,7 +8483,7 @@ vec_gfmsum(__vector unsigned int __a, __vector unsigned int __b) {
static inline __ATTRS_o_ai __vector unsigned char
vec_gfmsum_128(__vector unsigned long long __a,
__vector unsigned long long __b) {
- return __builtin_s390_vgfmg(__a, __b);
+ return (__vector unsigned char)__builtin_s390_vgfmg(__a, __b);
}
/*-- vec_gfmsum_accum -------------------------------------------------------*/
@@ -8506,7 +8512,8 @@ static inline __ATTRS_o_ai __vector unsigned char
vec_gfmsum_accum_128(__vector unsigned long long __a,
__vector unsigned long long __b,
__vector unsigned char __c) {
- return __builtin_s390_vgfmag(__a, __b, __c);
+ return (__vector unsigned char)
+ __builtin_s390_vgfmag(__a, __b, (unsigned __int128)__c);
}
/*-- vec_mladd --------------------------------------------------------------*/
@@ -8796,15 +8803,21 @@ vec_mulo(__vector unsigned int __a, __vector unsigned int __b) {
/*-- vec_msum_u128 ----------------------------------------------------------*/
#if __ARCH__ >= 12
+extern __ATTRS_o __vector unsigned char
+vec_msum_u128(__vector unsigned long long __a, __vector unsigned long long __b,
+ __vector unsigned char __c, int __d)
+ __constant_range(__d, 0, 15);
+
#define vec_msum_u128(X, Y, Z, W) \
- ((__vector unsigned char)__builtin_s390_vmslg((X), (Y), (Z), (W)));
+ ((__typeof__((vec_msum_u128)((X), (Y), (Z), (W)))) \
+ __builtin_s390_vmslg((X), (Y), (unsigned __int128)(Z), (W)))
#endif
/*-- vec_sub_u128 -----------------------------------------------------------*/
static inline __ATTRS_ai __vector unsigned char
vec_sub_u128(__vector unsigned char __a, __vector unsigned char __b) {
- return __builtin_s390_vsq(__a, __b);
+ return (__vector unsigned char)((__int128)__a - (__int128)__b);
}
/*-- vec_subc ---------------------------------------------------------------*/
@@ -8833,7 +8846,8 @@ vec_subc(__vector unsigned long long __a, __vector unsigned long long __b) {
static inline __ATTRS_ai __vector unsigned char
vec_subc_u128(__vector unsigned char __a, __vector unsigned char __b) {
- return __builtin_s390_vscbiq(__a, __b);
+ return (__vector unsigned char)
+ __builtin_s390_vscbiq((unsigned __int128)__a, (unsigned __int128)__b);
}
/*-- vec_sube_u128 ----------------------------------------------------------*/
@@ -8841,7 +8855,9 @@ vec_subc_u128(__vector unsigned char __a, __vector unsigned char __b) {
static inline __ATTRS_ai __vector unsigned char
vec_sube_u128(__vector unsigned char __a, __vector unsigned char __b,
__vector unsigned char __c) {
- return __builtin_s390_vsbiq(__a, __b, __c);
+ return (__vector unsigned char)
+ __builtin_s390_vsbiq((unsigned __int128)__a, (unsigned __int128)__b,
+ (unsigned __int128)__c);
}
/*-- vec_subec_u128 ---------------------------------------------------------*/
@@ -8849,7 +8865,9 @@ vec_sube_u128(__vector unsigned char __a, __vector unsigned char __b,
static inline __ATTRS_ai __vector unsigned char
vec_subec_u128(__vector unsigned char __a, __vector unsigned char __b,
__vector unsigned char __c) {
- return __builtin_s390_vsbcbiq(__a, __b, __c);
+ return (__vector unsigned char)
+ __builtin_s390_vsbcbiq((unsigned __int128)__a, (unsigned __int128)__b,
+ (unsigned __int128)__c);
}
/*-- vec_sum2 ---------------------------------------------------------------*/
@@ -8868,12 +8886,12 @@ vec_sum2(__vector unsigned int __a, __vector unsigned int __b) {
static inline __ATTRS_o_ai __vector unsigned char
vec_sum_u128(__vector unsigned int __a, __vector unsigned int __b) {
- return __builtin_s390_vsumqf(__a, __b);
+ return (__vector unsigned char)__builtin_s390_vsumqf(__a, __b);
}
static inline __ATTRS_o_ai __vector unsigned char
vec_sum_u128(__vector unsigned long long __a, __vector unsigned long long __b) {
- return __builtin_s390_vsumqg(__a, __b);
+ return (__vector unsigned char)__builtin_s390_vsumqg(__a, __b);
}
/*-- vec_sum4 ---------------------------------------------------------------*/
diff --git a/contrib/llvm-project/clang/lib/Headers/velintrin.h b/contrib/llvm-project/clang/lib/Headers/velintrin.h
new file mode 100644
index 000000000000..3f2bc00442e7
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/velintrin.h
@@ -0,0 +1,71 @@
+/*===---- velintrin.h - VEL intrinsics for VE ------------------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __VEL_INTRIN_H__
+#define __VEL_INTRIN_H__
+
+// Vector registers
+typedef double __vr __attribute__((__vector_size__(2048)));
+
+// Vector mask registers
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+// For C99
+typedef _Bool __vm __attribute__((ext_vector_type(256)));
+typedef _Bool __vm256 __attribute__((ext_vector_type(256)));
+typedef _Bool __vm512 __attribute__((ext_vector_type(512)));
+#else
+#ifdef __cplusplus
+// For C++
+typedef bool __vm __attribute__((ext_vector_type(256)));
+typedef bool __vm256 __attribute__((ext_vector_type(256)));
+typedef bool __vm512 __attribute__((ext_vector_type(512)));
+#else
+#error need C++ or C99 to use vector intrinsics for VE
+#endif
+#endif
+
+enum VShuffleCodes {
+ VE_VSHUFFLE_YUYU = 0,
+ VE_VSHUFFLE_YUYL = 1,
+ VE_VSHUFFLE_YUZU = 2,
+ VE_VSHUFFLE_YUZL = 3,
+ VE_VSHUFFLE_YLYU = 4,
+ VE_VSHUFFLE_YLYL = 5,
+ VE_VSHUFFLE_YLZU = 6,
+ VE_VSHUFFLE_YLZL = 7,
+ VE_VSHUFFLE_ZUYU = 8,
+ VE_VSHUFFLE_ZUYL = 9,
+ VE_VSHUFFLE_ZUZU = 10,
+ VE_VSHUFFLE_ZUZL = 11,
+ VE_VSHUFFLE_ZLYU = 12,
+ VE_VSHUFFLE_ZLYL = 13,
+ VE_VSHUFFLE_ZLZU = 14,
+ VE_VSHUFFLE_ZLZL = 15,
+};
+
+// Use generated intrinsic name definitions
+#include <velintrin_gen.h>
+
+// Use helper functions
+#include <velintrin_approx.h>
+
+// pack
+
+#define _vel_pack_f32p __builtin_ve_vl_pack_f32p
+#define _vel_pack_f32a __builtin_ve_vl_pack_f32a
+
+static inline unsigned long int _vel_pack_i32(unsigned int a, unsigned int b) {
+ return (((unsigned long int)a) << 32) | b;
+}
+
+#define _vel_extract_vm512u(vm) __builtin_ve_vl_extract_vm512u(vm)
+#define _vel_extract_vm512l(vm) __builtin_ve_vl_extract_vm512l(vm)
+#define _vel_insert_vm512u(vm512, vm) __builtin_ve_vl_insert_vm512u(vm512, vm)
+#define _vel_insert_vm512l(vm512, vm) __builtin_ve_vl_insert_vm512l(vm512, vm)
+
+#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/velintrin_approx.h b/contrib/llvm-project/clang/lib/Headers/velintrin_approx.h
new file mode 100644
index 000000000000..89d270fef3c7
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/velintrin_approx.h
@@ -0,0 +1,120 @@
+/*===---- velintrin_approx.h - VEL intrinsics helper for VE ----------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __VEL_INTRIN_APPROX_H__
+#define __VEL_INTRIN_APPROX_H__
+
+static inline __vr _vel_approx_vfdivs_vvvl(__vr v0, __vr v1, int l) {
+ float s0;
+ __vr v2, v3, v4, v5;
+ v5 = _vel_vrcps_vvl(v1, l);
+ s0 = 1.0;
+ v4 = _vel_vfnmsbs_vsvvl(s0, v1, v5, l);
+ v3 = _vel_vfmads_vvvvl(v5, v5, v4, l);
+ v2 = _vel_vfmuls_vvvl(v0, v3, l);
+ v4 = _vel_vfnmsbs_vvvvl(v0, v2, v1, l);
+ v2 = _vel_vfmads_vvvvl(v2, v5, v4, l);
+ v0 = _vel_vfnmsbs_vvvvl(v0, v2, v1, l);
+ v0 = _vel_vfmads_vvvvl(v2, v3, v0, l);
+ return v0;
+}
+
+static inline __vr _vel_approx_pvfdiv_vvvl(__vr v0, __vr v1, int l) {
+ float s0;
+ __vr v2, v3, v4, v5;
+ v5 = _vel_pvrcp_vvl(v1, l);
+ s0 = 1.0;
+ v4 = _vel_pvfnmsb_vsvvl(s0, v1, v5, l);
+ v3 = _vel_pvfmad_vvvvl(v5, v5, v4, l);
+ v2 = _vel_pvfmul_vvvl(v0, v3, l);
+ v4 = _vel_pvfnmsb_vvvvl(v0, v2, v1, l);
+ v2 = _vel_pvfmad_vvvvl(v2, v5, v4, l);
+ v0 = _vel_pvfnmsb_vvvvl(v0, v2, v1, l);
+ v0 = _vel_pvfmad_vvvvl(v2, v3, v0, l);
+ return v0;
+}
+
+static inline __vr _vel_approx_vfdivs_vsvl(float s0, __vr v0, int l) {
+ float s1;
+ __vr v1, v2, v3, v4;
+ v4 = _vel_vrcps_vvl(v0, l);
+ s1 = 1.0;
+ v2 = _vel_vfnmsbs_vsvvl(s1, v0, v4, l);
+ v2 = _vel_vfmads_vvvvl(v4, v4, v2, l);
+ v1 = _vel_vfmuls_vsvl(s0, v2, l);
+ v3 = _vel_vfnmsbs_vsvvl(s0, v1, v0, l);
+ v1 = _vel_vfmads_vvvvl(v1, v4, v3, l);
+ v3 = _vel_vfnmsbs_vsvvl(s0, v1, v0, l);
+ v0 = _vel_vfmads_vvvvl(v1, v2, v3, l);
+ return v0;
+}
+
+static inline __vr _vel_approx_vfdivs_vvsl(__vr v0, float s0, int l) {
+ float s1;
+ __vr v1, v2;
+ s1 = 1.0f / s0;
+ v1 = _vel_vfmuls_vsvl(s1, v0, l);
+ v2 = _vel_vfnmsbs_vvsvl(v0, s0, v1, l);
+ v0 = _vel_vfmads_vvsvl(v1, s1, v2, l);
+ return v0;
+}
+
+static inline __vr _vel_approx_vfdivd_vsvl(double s0, __vr v0, int l) {
+ __vr v1, v2, v3;
+ v2 = _vel_vrcpd_vvl(v0, l);
+ double s1 = 1.0;
+ v3 = _vel_vfnmsbd_vsvvl(s1, v0, v2, l);
+ v2 = _vel_vfmadd_vvvvl(v2, v2, v3, l);
+ v1 = _vel_vfnmsbd_vsvvl(s1, v0, v2, l);
+ v1 = _vel_vfmadd_vvvvl(v2, v2, v1, l);
+ v1 = _vel_vaddul_vsvl(1, v1, l);
+ v3 = _vel_vfnmsbd_vsvvl(s1, v0, v1, l);
+ v3 = _vel_vfmadd_vvvvl(v1, v1, v3, l);
+ v1 = _vel_vfmuld_vsvl(s0, v3, l);
+ v0 = _vel_vfnmsbd_vsvvl(s0, v1, v0, l);
+ v0 = _vel_vfmadd_vvvvl(v1, v3, v0, l);
+ return v0;
+}
+
+static inline __vr _vel_approx_vfsqrtd_vvl(__vr v0, int l) {
+ double s0, s1;
+ __vr v1, v2, v3;
+ v2 = _vel_vrsqrtdnex_vvl(v0, l);
+ v1 = _vel_vfmuld_vvvl(v0, v2, l);
+ s0 = 1.0;
+ s1 = 0.5;
+ v3 = _vel_vfnmsbd_vsvvl(s0, v1, v2, l);
+ v3 = _vel_vfmuld_vsvl(s1, v3, l);
+ v2 = _vel_vfmadd_vvvvl(v2, v2, v3, l);
+ v1 = _vel_vfmuld_vvvl(v0, v2, l);
+ v3 = _vel_vfnmsbd_vsvvl(s0, v1, v2, l);
+ v3 = _vel_vfmuld_vsvl(s1, v3, l);
+ v0 = _vel_vfmadd_vvvvl(v1, v1, v3, l);
+ return v0;
+}
+
+static inline __vr _vel_approx_vfsqrts_vvl(__vr v0, int l) {
+ float s0, s1;
+ __vr v1, v2, v3;
+ v0 = _vel_vcvtds_vvl(v0, l);
+ v2 = _vel_vrsqrtdnex_vvl(v0, l);
+ v1 = _vel_vfmuld_vvvl(v0, v2, l);
+ s0 = 1.0;
+ s1 = 0.5;
+ v3 = _vel_vfnmsbd_vsvvl(s0, v1, v2, l);
+ v3 = _vel_vfmuld_vsvl(s1, v3, l);
+ v2 = _vel_vfmadd_vvvvl(v2, v2, v3, l);
+ v1 = _vel_vfmuld_vvvl(v0, v2, l);
+ v3 = _vel_vfnmsbd_vsvvl(s0, v1, v2, l);
+ v3 = _vel_vfmuld_vsvl(s1, v3, l);
+ v0 = _vel_vfmadd_vvvvl(v1, v1, v3, l);
+ v0 = _vel_vcvtsd_vvl(v0, l);
+ return v0;
+}
+
+#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/velintrin_gen.h b/contrib/llvm-project/clang/lib/Headers/velintrin_gen.h
new file mode 100644
index 000000000000..845c0da2ffa2
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/velintrin_gen.h
@@ -0,0 +1,1257 @@
+#define _vel_vld_vssl __builtin_ve_vl_vld_vssl
+#define _vel_vld_vssvl __builtin_ve_vl_vld_vssvl
+#define _vel_vldnc_vssl __builtin_ve_vl_vldnc_vssl
+#define _vel_vldnc_vssvl __builtin_ve_vl_vldnc_vssvl
+#define _vel_vldu_vssl __builtin_ve_vl_vldu_vssl
+#define _vel_vldu_vssvl __builtin_ve_vl_vldu_vssvl
+#define _vel_vldunc_vssl __builtin_ve_vl_vldunc_vssl
+#define _vel_vldunc_vssvl __builtin_ve_vl_vldunc_vssvl
+#define _vel_vldlsx_vssl __builtin_ve_vl_vldlsx_vssl
+#define _vel_vldlsx_vssvl __builtin_ve_vl_vldlsx_vssvl
+#define _vel_vldlsxnc_vssl __builtin_ve_vl_vldlsxnc_vssl
+#define _vel_vldlsxnc_vssvl __builtin_ve_vl_vldlsxnc_vssvl
+#define _vel_vldlzx_vssl __builtin_ve_vl_vldlzx_vssl
+#define _vel_vldlzx_vssvl __builtin_ve_vl_vldlzx_vssvl
+#define _vel_vldlzxnc_vssl __builtin_ve_vl_vldlzxnc_vssl
+#define _vel_vldlzxnc_vssvl __builtin_ve_vl_vldlzxnc_vssvl
+#define _vel_vld2d_vssl __builtin_ve_vl_vld2d_vssl
+#define _vel_vld2d_vssvl __builtin_ve_vl_vld2d_vssvl
+#define _vel_vld2dnc_vssl __builtin_ve_vl_vld2dnc_vssl
+#define _vel_vld2dnc_vssvl __builtin_ve_vl_vld2dnc_vssvl
+#define _vel_vldu2d_vssl __builtin_ve_vl_vldu2d_vssl
+#define _vel_vldu2d_vssvl __builtin_ve_vl_vldu2d_vssvl
+#define _vel_vldu2dnc_vssl __builtin_ve_vl_vldu2dnc_vssl
+#define _vel_vldu2dnc_vssvl __builtin_ve_vl_vldu2dnc_vssvl
+#define _vel_vldl2dsx_vssl __builtin_ve_vl_vldl2dsx_vssl
+#define _vel_vldl2dsx_vssvl __builtin_ve_vl_vldl2dsx_vssvl
+#define _vel_vldl2dsxnc_vssl __builtin_ve_vl_vldl2dsxnc_vssl
+#define _vel_vldl2dsxnc_vssvl __builtin_ve_vl_vldl2dsxnc_vssvl
+#define _vel_vldl2dzx_vssl __builtin_ve_vl_vldl2dzx_vssl
+#define _vel_vldl2dzx_vssvl __builtin_ve_vl_vldl2dzx_vssvl
+#define _vel_vldl2dzxnc_vssl __builtin_ve_vl_vldl2dzxnc_vssl
+#define _vel_vldl2dzxnc_vssvl __builtin_ve_vl_vldl2dzxnc_vssvl
+#define _vel_vst_vssl __builtin_ve_vl_vst_vssl
+#define _vel_vst_vssml __builtin_ve_vl_vst_vssml
+#define _vel_vstnc_vssl __builtin_ve_vl_vstnc_vssl
+#define _vel_vstnc_vssml __builtin_ve_vl_vstnc_vssml
+#define _vel_vstot_vssl __builtin_ve_vl_vstot_vssl
+#define _vel_vstot_vssml __builtin_ve_vl_vstot_vssml
+#define _vel_vstncot_vssl __builtin_ve_vl_vstncot_vssl
+#define _vel_vstncot_vssml __builtin_ve_vl_vstncot_vssml
+#define _vel_vstu_vssl __builtin_ve_vl_vstu_vssl
+#define _vel_vstu_vssml __builtin_ve_vl_vstu_vssml
+#define _vel_vstunc_vssl __builtin_ve_vl_vstunc_vssl
+#define _vel_vstunc_vssml __builtin_ve_vl_vstunc_vssml
+#define _vel_vstuot_vssl __builtin_ve_vl_vstuot_vssl
+#define _vel_vstuot_vssml __builtin_ve_vl_vstuot_vssml
+#define _vel_vstuncot_vssl __builtin_ve_vl_vstuncot_vssl
+#define _vel_vstuncot_vssml __builtin_ve_vl_vstuncot_vssml
+#define _vel_vstl_vssl __builtin_ve_vl_vstl_vssl
+#define _vel_vstl_vssml __builtin_ve_vl_vstl_vssml
+#define _vel_vstlnc_vssl __builtin_ve_vl_vstlnc_vssl
+#define _vel_vstlnc_vssml __builtin_ve_vl_vstlnc_vssml
+#define _vel_vstlot_vssl __builtin_ve_vl_vstlot_vssl
+#define _vel_vstlot_vssml __builtin_ve_vl_vstlot_vssml
+#define _vel_vstlncot_vssl __builtin_ve_vl_vstlncot_vssl
+#define _vel_vstlncot_vssml __builtin_ve_vl_vstlncot_vssml
+#define _vel_vst2d_vssl __builtin_ve_vl_vst2d_vssl
+#define _vel_vst2d_vssml __builtin_ve_vl_vst2d_vssml
+#define _vel_vst2dnc_vssl __builtin_ve_vl_vst2dnc_vssl
+#define _vel_vst2dnc_vssml __builtin_ve_vl_vst2dnc_vssml
+#define _vel_vst2dot_vssl __builtin_ve_vl_vst2dot_vssl
+#define _vel_vst2dot_vssml __builtin_ve_vl_vst2dot_vssml
+#define _vel_vst2dncot_vssl __builtin_ve_vl_vst2dncot_vssl
+#define _vel_vst2dncot_vssml __builtin_ve_vl_vst2dncot_vssml
+#define _vel_vstu2d_vssl __builtin_ve_vl_vstu2d_vssl
+#define _vel_vstu2d_vssml __builtin_ve_vl_vstu2d_vssml
+#define _vel_vstu2dnc_vssl __builtin_ve_vl_vstu2dnc_vssl
+#define _vel_vstu2dnc_vssml __builtin_ve_vl_vstu2dnc_vssml
+#define _vel_vstu2dot_vssl __builtin_ve_vl_vstu2dot_vssl
+#define _vel_vstu2dot_vssml __builtin_ve_vl_vstu2dot_vssml
+#define _vel_vstu2dncot_vssl __builtin_ve_vl_vstu2dncot_vssl
+#define _vel_vstu2dncot_vssml __builtin_ve_vl_vstu2dncot_vssml
+#define _vel_vstl2d_vssl __builtin_ve_vl_vstl2d_vssl
+#define _vel_vstl2d_vssml __builtin_ve_vl_vstl2d_vssml
+#define _vel_vstl2dnc_vssl __builtin_ve_vl_vstl2dnc_vssl
+#define _vel_vstl2dnc_vssml __builtin_ve_vl_vstl2dnc_vssml
+#define _vel_vstl2dot_vssl __builtin_ve_vl_vstl2dot_vssl
+#define _vel_vstl2dot_vssml __builtin_ve_vl_vstl2dot_vssml
+#define _vel_vstl2dncot_vssl __builtin_ve_vl_vstl2dncot_vssl
+#define _vel_vstl2dncot_vssml __builtin_ve_vl_vstl2dncot_vssml
+#define _vel_pfchv_ssl __builtin_ve_vl_pfchv_ssl
+#define _vel_pfchvnc_ssl __builtin_ve_vl_pfchvnc_ssl
+#define _vel_lsv_vvss __builtin_ve_vl_lsv_vvss
+#define _vel_lvsl_svs __builtin_ve_vl_lvsl_svs
+#define _vel_lvsd_svs __builtin_ve_vl_lvsd_svs
+#define _vel_lvss_svs __builtin_ve_vl_lvss_svs
+#define _vel_lvm_mmss __builtin_ve_vl_lvm_mmss
+#define _vel_lvm_MMss __builtin_ve_vl_lvm_MMss
+#define _vel_svm_sms __builtin_ve_vl_svm_sms
+#define _vel_svm_sMs __builtin_ve_vl_svm_sMs
+#define _vel_vbrdd_vsl __builtin_ve_vl_vbrdd_vsl
+#define _vel_vbrdd_vsvl __builtin_ve_vl_vbrdd_vsvl
+#define _vel_vbrdd_vsmvl __builtin_ve_vl_vbrdd_vsmvl
+#define _vel_vbrdl_vsl __builtin_ve_vl_vbrdl_vsl
+#define _vel_vbrdl_vsvl __builtin_ve_vl_vbrdl_vsvl
+#define _vel_vbrdl_vsmvl __builtin_ve_vl_vbrdl_vsmvl
+#define _vel_vbrds_vsl __builtin_ve_vl_vbrds_vsl
+#define _vel_vbrds_vsvl __builtin_ve_vl_vbrds_vsvl
+#define _vel_vbrds_vsmvl __builtin_ve_vl_vbrds_vsmvl
+#define _vel_vbrdw_vsl __builtin_ve_vl_vbrdw_vsl
+#define _vel_vbrdw_vsvl __builtin_ve_vl_vbrdw_vsvl
+#define _vel_vbrdw_vsmvl __builtin_ve_vl_vbrdw_vsmvl
+#define _vel_pvbrd_vsl __builtin_ve_vl_pvbrd_vsl
+#define _vel_pvbrd_vsvl __builtin_ve_vl_pvbrd_vsvl
+#define _vel_pvbrd_vsMvl __builtin_ve_vl_pvbrd_vsMvl
+#define _vel_vmv_vsvl __builtin_ve_vl_vmv_vsvl
+#define _vel_vmv_vsvvl __builtin_ve_vl_vmv_vsvvl
+#define _vel_vmv_vsvmvl __builtin_ve_vl_vmv_vsvmvl
+#define _vel_vaddul_vvvl __builtin_ve_vl_vaddul_vvvl
+#define _vel_vaddul_vvvvl __builtin_ve_vl_vaddul_vvvvl
+#define _vel_vaddul_vsvl __builtin_ve_vl_vaddul_vsvl
+#define _vel_vaddul_vsvvl __builtin_ve_vl_vaddul_vsvvl
+#define _vel_vaddul_vvvmvl __builtin_ve_vl_vaddul_vvvmvl
+#define _vel_vaddul_vsvmvl __builtin_ve_vl_vaddul_vsvmvl
+#define _vel_vadduw_vvvl __builtin_ve_vl_vadduw_vvvl
+#define _vel_vadduw_vvvvl __builtin_ve_vl_vadduw_vvvvl
+#define _vel_vadduw_vsvl __builtin_ve_vl_vadduw_vsvl
+#define _vel_vadduw_vsvvl __builtin_ve_vl_vadduw_vsvvl
+#define _vel_vadduw_vvvmvl __builtin_ve_vl_vadduw_vvvmvl
+#define _vel_vadduw_vsvmvl __builtin_ve_vl_vadduw_vsvmvl
+#define _vel_pvaddu_vvvl __builtin_ve_vl_pvaddu_vvvl
+#define _vel_pvaddu_vvvvl __builtin_ve_vl_pvaddu_vvvvl
+#define _vel_pvaddu_vsvl __builtin_ve_vl_pvaddu_vsvl
+#define _vel_pvaddu_vsvvl __builtin_ve_vl_pvaddu_vsvvl
+#define _vel_pvaddu_vvvMvl __builtin_ve_vl_pvaddu_vvvMvl
+#define _vel_pvaddu_vsvMvl __builtin_ve_vl_pvaddu_vsvMvl
+#define _vel_vaddswsx_vvvl __builtin_ve_vl_vaddswsx_vvvl
+#define _vel_vaddswsx_vvvvl __builtin_ve_vl_vaddswsx_vvvvl
+#define _vel_vaddswsx_vsvl __builtin_ve_vl_vaddswsx_vsvl
+#define _vel_vaddswsx_vsvvl __builtin_ve_vl_vaddswsx_vsvvl
+#define _vel_vaddswsx_vvvmvl __builtin_ve_vl_vaddswsx_vvvmvl
+#define _vel_vaddswsx_vsvmvl __builtin_ve_vl_vaddswsx_vsvmvl
+#define _vel_vaddswzx_vvvl __builtin_ve_vl_vaddswzx_vvvl
+#define _vel_vaddswzx_vvvvl __builtin_ve_vl_vaddswzx_vvvvl
+#define _vel_vaddswzx_vsvl __builtin_ve_vl_vaddswzx_vsvl
+#define _vel_vaddswzx_vsvvl __builtin_ve_vl_vaddswzx_vsvvl
+#define _vel_vaddswzx_vvvmvl __builtin_ve_vl_vaddswzx_vvvmvl
+#define _vel_vaddswzx_vsvmvl __builtin_ve_vl_vaddswzx_vsvmvl
+#define _vel_pvadds_vvvl __builtin_ve_vl_pvadds_vvvl
+#define _vel_pvadds_vvvvl __builtin_ve_vl_pvadds_vvvvl
+#define _vel_pvadds_vsvl __builtin_ve_vl_pvadds_vsvl
+#define _vel_pvadds_vsvvl __builtin_ve_vl_pvadds_vsvvl
+#define _vel_pvadds_vvvMvl __builtin_ve_vl_pvadds_vvvMvl
+#define _vel_pvadds_vsvMvl __builtin_ve_vl_pvadds_vsvMvl
+#define _vel_vaddsl_vvvl __builtin_ve_vl_vaddsl_vvvl
+#define _vel_vaddsl_vvvvl __builtin_ve_vl_vaddsl_vvvvl
+#define _vel_vaddsl_vsvl __builtin_ve_vl_vaddsl_vsvl
+#define _vel_vaddsl_vsvvl __builtin_ve_vl_vaddsl_vsvvl
+#define _vel_vaddsl_vvvmvl __builtin_ve_vl_vaddsl_vvvmvl
+#define _vel_vaddsl_vsvmvl __builtin_ve_vl_vaddsl_vsvmvl
+#define _vel_vsubul_vvvl __builtin_ve_vl_vsubul_vvvl
+#define _vel_vsubul_vvvvl __builtin_ve_vl_vsubul_vvvvl
+#define _vel_vsubul_vsvl __builtin_ve_vl_vsubul_vsvl
+#define _vel_vsubul_vsvvl __builtin_ve_vl_vsubul_vsvvl
+#define _vel_vsubul_vvvmvl __builtin_ve_vl_vsubul_vvvmvl
+#define _vel_vsubul_vsvmvl __builtin_ve_vl_vsubul_vsvmvl
+#define _vel_vsubuw_vvvl __builtin_ve_vl_vsubuw_vvvl
+#define _vel_vsubuw_vvvvl __builtin_ve_vl_vsubuw_vvvvl
+#define _vel_vsubuw_vsvl __builtin_ve_vl_vsubuw_vsvl
+#define _vel_vsubuw_vsvvl __builtin_ve_vl_vsubuw_vsvvl
+#define _vel_vsubuw_vvvmvl __builtin_ve_vl_vsubuw_vvvmvl
+#define _vel_vsubuw_vsvmvl __builtin_ve_vl_vsubuw_vsvmvl
+#define _vel_pvsubu_vvvl __builtin_ve_vl_pvsubu_vvvl
+#define _vel_pvsubu_vvvvl __builtin_ve_vl_pvsubu_vvvvl
+#define _vel_pvsubu_vsvl __builtin_ve_vl_pvsubu_vsvl
+#define _vel_pvsubu_vsvvl __builtin_ve_vl_pvsubu_vsvvl
+#define _vel_pvsubu_vvvMvl __builtin_ve_vl_pvsubu_vvvMvl
+#define _vel_pvsubu_vsvMvl __builtin_ve_vl_pvsubu_vsvMvl
+#define _vel_vsubswsx_vvvl __builtin_ve_vl_vsubswsx_vvvl
+#define _vel_vsubswsx_vvvvl __builtin_ve_vl_vsubswsx_vvvvl
+#define _vel_vsubswsx_vsvl __builtin_ve_vl_vsubswsx_vsvl
+#define _vel_vsubswsx_vsvvl __builtin_ve_vl_vsubswsx_vsvvl
+#define _vel_vsubswsx_vvvmvl __builtin_ve_vl_vsubswsx_vvvmvl
+#define _vel_vsubswsx_vsvmvl __builtin_ve_vl_vsubswsx_vsvmvl
+#define _vel_vsubswzx_vvvl __builtin_ve_vl_vsubswzx_vvvl
+#define _vel_vsubswzx_vvvvl __builtin_ve_vl_vsubswzx_vvvvl
+#define _vel_vsubswzx_vsvl __builtin_ve_vl_vsubswzx_vsvl
+#define _vel_vsubswzx_vsvvl __builtin_ve_vl_vsubswzx_vsvvl
+#define _vel_vsubswzx_vvvmvl __builtin_ve_vl_vsubswzx_vvvmvl
+#define _vel_vsubswzx_vsvmvl __builtin_ve_vl_vsubswzx_vsvmvl
+#define _vel_pvsubs_vvvl __builtin_ve_vl_pvsubs_vvvl
+#define _vel_pvsubs_vvvvl __builtin_ve_vl_pvsubs_vvvvl
+#define _vel_pvsubs_vsvl __builtin_ve_vl_pvsubs_vsvl
+#define _vel_pvsubs_vsvvl __builtin_ve_vl_pvsubs_vsvvl
+#define _vel_pvsubs_vvvMvl __builtin_ve_vl_pvsubs_vvvMvl
+#define _vel_pvsubs_vsvMvl __builtin_ve_vl_pvsubs_vsvMvl
+#define _vel_vsubsl_vvvl __builtin_ve_vl_vsubsl_vvvl
+#define _vel_vsubsl_vvvvl __builtin_ve_vl_vsubsl_vvvvl
+#define _vel_vsubsl_vsvl __builtin_ve_vl_vsubsl_vsvl
+#define _vel_vsubsl_vsvvl __builtin_ve_vl_vsubsl_vsvvl
+#define _vel_vsubsl_vvvmvl __builtin_ve_vl_vsubsl_vvvmvl
+#define _vel_vsubsl_vsvmvl __builtin_ve_vl_vsubsl_vsvmvl
+#define _vel_vmulul_vvvl __builtin_ve_vl_vmulul_vvvl
+#define _vel_vmulul_vvvvl __builtin_ve_vl_vmulul_vvvvl
+#define _vel_vmulul_vsvl __builtin_ve_vl_vmulul_vsvl
+#define _vel_vmulul_vsvvl __builtin_ve_vl_vmulul_vsvvl
+#define _vel_vmulul_vvvmvl __builtin_ve_vl_vmulul_vvvmvl
+#define _vel_vmulul_vsvmvl __builtin_ve_vl_vmulul_vsvmvl
+#define _vel_vmuluw_vvvl __builtin_ve_vl_vmuluw_vvvl
+#define _vel_vmuluw_vvvvl __builtin_ve_vl_vmuluw_vvvvl
+#define _vel_vmuluw_vsvl __builtin_ve_vl_vmuluw_vsvl
+#define _vel_vmuluw_vsvvl __builtin_ve_vl_vmuluw_vsvvl
+#define _vel_vmuluw_vvvmvl __builtin_ve_vl_vmuluw_vvvmvl
+#define _vel_vmuluw_vsvmvl __builtin_ve_vl_vmuluw_vsvmvl
+#define _vel_vmulswsx_vvvl __builtin_ve_vl_vmulswsx_vvvl
+#define _vel_vmulswsx_vvvvl __builtin_ve_vl_vmulswsx_vvvvl
+#define _vel_vmulswsx_vsvl __builtin_ve_vl_vmulswsx_vsvl
+#define _vel_vmulswsx_vsvvl __builtin_ve_vl_vmulswsx_vsvvl
+#define _vel_vmulswsx_vvvmvl __builtin_ve_vl_vmulswsx_vvvmvl
+#define _vel_vmulswsx_vsvmvl __builtin_ve_vl_vmulswsx_vsvmvl
+#define _vel_vmulswzx_vvvl __builtin_ve_vl_vmulswzx_vvvl
+#define _vel_vmulswzx_vvvvl __builtin_ve_vl_vmulswzx_vvvvl
+#define _vel_vmulswzx_vsvl __builtin_ve_vl_vmulswzx_vsvl
+#define _vel_vmulswzx_vsvvl __builtin_ve_vl_vmulswzx_vsvvl
+#define _vel_vmulswzx_vvvmvl __builtin_ve_vl_vmulswzx_vvvmvl
+#define _vel_vmulswzx_vsvmvl __builtin_ve_vl_vmulswzx_vsvmvl
+#define _vel_vmulsl_vvvl __builtin_ve_vl_vmulsl_vvvl
+#define _vel_vmulsl_vvvvl __builtin_ve_vl_vmulsl_vvvvl
+#define _vel_vmulsl_vsvl __builtin_ve_vl_vmulsl_vsvl
+#define _vel_vmulsl_vsvvl __builtin_ve_vl_vmulsl_vsvvl
+#define _vel_vmulsl_vvvmvl __builtin_ve_vl_vmulsl_vvvmvl
+#define _vel_vmulsl_vsvmvl __builtin_ve_vl_vmulsl_vsvmvl
+#define _vel_vmulslw_vvvl __builtin_ve_vl_vmulslw_vvvl
+#define _vel_vmulslw_vvvvl __builtin_ve_vl_vmulslw_vvvvl
+#define _vel_vmulslw_vsvl __builtin_ve_vl_vmulslw_vsvl
+#define _vel_vmulslw_vsvvl __builtin_ve_vl_vmulslw_vsvvl
+#define _vel_vdivul_vvvl __builtin_ve_vl_vdivul_vvvl
+#define _vel_vdivul_vvvvl __builtin_ve_vl_vdivul_vvvvl
+#define _vel_vdivul_vsvl __builtin_ve_vl_vdivul_vsvl
+#define _vel_vdivul_vsvvl __builtin_ve_vl_vdivul_vsvvl
+#define _vel_vdivul_vvvmvl __builtin_ve_vl_vdivul_vvvmvl
+#define _vel_vdivul_vsvmvl __builtin_ve_vl_vdivul_vsvmvl
+#define _vel_vdivuw_vvvl __builtin_ve_vl_vdivuw_vvvl
+#define _vel_vdivuw_vvvvl __builtin_ve_vl_vdivuw_vvvvl
+#define _vel_vdivuw_vsvl __builtin_ve_vl_vdivuw_vsvl
+#define _vel_vdivuw_vsvvl __builtin_ve_vl_vdivuw_vsvvl
+#define _vel_vdivuw_vvvmvl __builtin_ve_vl_vdivuw_vvvmvl
+#define _vel_vdivuw_vsvmvl __builtin_ve_vl_vdivuw_vsvmvl
+#define _vel_vdivul_vvsl __builtin_ve_vl_vdivul_vvsl
+#define _vel_vdivul_vvsvl __builtin_ve_vl_vdivul_vvsvl
+#define _vel_vdivul_vvsmvl __builtin_ve_vl_vdivul_vvsmvl
+#define _vel_vdivuw_vvsl __builtin_ve_vl_vdivuw_vvsl
+#define _vel_vdivuw_vvsvl __builtin_ve_vl_vdivuw_vvsvl
+#define _vel_vdivuw_vvsmvl __builtin_ve_vl_vdivuw_vvsmvl
+#define _vel_vdivswsx_vvvl __builtin_ve_vl_vdivswsx_vvvl
+#define _vel_vdivswsx_vvvvl __builtin_ve_vl_vdivswsx_vvvvl
+#define _vel_vdivswsx_vsvl __builtin_ve_vl_vdivswsx_vsvl
+#define _vel_vdivswsx_vsvvl __builtin_ve_vl_vdivswsx_vsvvl
+#define _vel_vdivswsx_vvvmvl __builtin_ve_vl_vdivswsx_vvvmvl
+#define _vel_vdivswsx_vsvmvl __builtin_ve_vl_vdivswsx_vsvmvl
+#define _vel_vdivswzx_vvvl __builtin_ve_vl_vdivswzx_vvvl
+#define _vel_vdivswzx_vvvvl __builtin_ve_vl_vdivswzx_vvvvl
+#define _vel_vdivswzx_vsvl __builtin_ve_vl_vdivswzx_vsvl
+#define _vel_vdivswzx_vsvvl __builtin_ve_vl_vdivswzx_vsvvl
+#define _vel_vdivswzx_vvvmvl __builtin_ve_vl_vdivswzx_vvvmvl
+#define _vel_vdivswzx_vsvmvl __builtin_ve_vl_vdivswzx_vsvmvl
+#define _vel_vdivswsx_vvsl __builtin_ve_vl_vdivswsx_vvsl
+#define _vel_vdivswsx_vvsvl __builtin_ve_vl_vdivswsx_vvsvl
+#define _vel_vdivswsx_vvsmvl __builtin_ve_vl_vdivswsx_vvsmvl
+#define _vel_vdivswzx_vvsl __builtin_ve_vl_vdivswzx_vvsl
+#define _vel_vdivswzx_vvsvl __builtin_ve_vl_vdivswzx_vvsvl
+#define _vel_vdivswzx_vvsmvl __builtin_ve_vl_vdivswzx_vvsmvl
+#define _vel_vdivsl_vvvl __builtin_ve_vl_vdivsl_vvvl
+#define _vel_vdivsl_vvvvl __builtin_ve_vl_vdivsl_vvvvl
+#define _vel_vdivsl_vsvl __builtin_ve_vl_vdivsl_vsvl
+#define _vel_vdivsl_vsvvl __builtin_ve_vl_vdivsl_vsvvl
+#define _vel_vdivsl_vvvmvl __builtin_ve_vl_vdivsl_vvvmvl
+#define _vel_vdivsl_vsvmvl __builtin_ve_vl_vdivsl_vsvmvl
+#define _vel_vdivsl_vvsl __builtin_ve_vl_vdivsl_vvsl
+#define _vel_vdivsl_vvsvl __builtin_ve_vl_vdivsl_vvsvl
+#define _vel_vdivsl_vvsmvl __builtin_ve_vl_vdivsl_vvsmvl
+#define _vel_vcmpul_vvvl __builtin_ve_vl_vcmpul_vvvl
+#define _vel_vcmpul_vvvvl __builtin_ve_vl_vcmpul_vvvvl
+#define _vel_vcmpul_vsvl __builtin_ve_vl_vcmpul_vsvl
+#define _vel_vcmpul_vsvvl __builtin_ve_vl_vcmpul_vsvvl
+#define _vel_vcmpul_vvvmvl __builtin_ve_vl_vcmpul_vvvmvl
+#define _vel_vcmpul_vsvmvl __builtin_ve_vl_vcmpul_vsvmvl
+#define _vel_vcmpuw_vvvl __builtin_ve_vl_vcmpuw_vvvl
+#define _vel_vcmpuw_vvvvl __builtin_ve_vl_vcmpuw_vvvvl
+#define _vel_vcmpuw_vsvl __builtin_ve_vl_vcmpuw_vsvl
+#define _vel_vcmpuw_vsvvl __builtin_ve_vl_vcmpuw_vsvvl
+#define _vel_vcmpuw_vvvmvl __builtin_ve_vl_vcmpuw_vvvmvl
+#define _vel_vcmpuw_vsvmvl __builtin_ve_vl_vcmpuw_vsvmvl
+#define _vel_pvcmpu_vvvl __builtin_ve_vl_pvcmpu_vvvl
+#define _vel_pvcmpu_vvvvl __builtin_ve_vl_pvcmpu_vvvvl
+#define _vel_pvcmpu_vsvl __builtin_ve_vl_pvcmpu_vsvl
+#define _vel_pvcmpu_vsvvl __builtin_ve_vl_pvcmpu_vsvvl
+#define _vel_pvcmpu_vvvMvl __builtin_ve_vl_pvcmpu_vvvMvl
+#define _vel_pvcmpu_vsvMvl __builtin_ve_vl_pvcmpu_vsvMvl
+#define _vel_vcmpswsx_vvvl __builtin_ve_vl_vcmpswsx_vvvl
+#define _vel_vcmpswsx_vvvvl __builtin_ve_vl_vcmpswsx_vvvvl
+#define _vel_vcmpswsx_vsvl __builtin_ve_vl_vcmpswsx_vsvl
+#define _vel_vcmpswsx_vsvvl __builtin_ve_vl_vcmpswsx_vsvvl
+#define _vel_vcmpswsx_vvvmvl __builtin_ve_vl_vcmpswsx_vvvmvl
+#define _vel_vcmpswsx_vsvmvl __builtin_ve_vl_vcmpswsx_vsvmvl
+#define _vel_vcmpswzx_vvvl __builtin_ve_vl_vcmpswzx_vvvl
+#define _vel_vcmpswzx_vvvvl __builtin_ve_vl_vcmpswzx_vvvvl
+#define _vel_vcmpswzx_vsvl __builtin_ve_vl_vcmpswzx_vsvl
+#define _vel_vcmpswzx_vsvvl __builtin_ve_vl_vcmpswzx_vsvvl
+#define _vel_vcmpswzx_vvvmvl __builtin_ve_vl_vcmpswzx_vvvmvl
+#define _vel_vcmpswzx_vsvmvl __builtin_ve_vl_vcmpswzx_vsvmvl
+#define _vel_pvcmps_vvvl __builtin_ve_vl_pvcmps_vvvl
+#define _vel_pvcmps_vvvvl __builtin_ve_vl_pvcmps_vvvvl
+#define _vel_pvcmps_vsvl __builtin_ve_vl_pvcmps_vsvl
+#define _vel_pvcmps_vsvvl __builtin_ve_vl_pvcmps_vsvvl
+#define _vel_pvcmps_vvvMvl __builtin_ve_vl_pvcmps_vvvMvl
+#define _vel_pvcmps_vsvMvl __builtin_ve_vl_pvcmps_vsvMvl
+#define _vel_vcmpsl_vvvl __builtin_ve_vl_vcmpsl_vvvl
+#define _vel_vcmpsl_vvvvl __builtin_ve_vl_vcmpsl_vvvvl
+#define _vel_vcmpsl_vsvl __builtin_ve_vl_vcmpsl_vsvl
+#define _vel_vcmpsl_vsvvl __builtin_ve_vl_vcmpsl_vsvvl
+#define _vel_vcmpsl_vvvmvl __builtin_ve_vl_vcmpsl_vvvmvl
+#define _vel_vcmpsl_vsvmvl __builtin_ve_vl_vcmpsl_vsvmvl
+#define _vel_vmaxswsx_vvvl __builtin_ve_vl_vmaxswsx_vvvl
+#define _vel_vmaxswsx_vvvvl __builtin_ve_vl_vmaxswsx_vvvvl
+#define _vel_vmaxswsx_vsvl __builtin_ve_vl_vmaxswsx_vsvl
+#define _vel_vmaxswsx_vsvvl __builtin_ve_vl_vmaxswsx_vsvvl
+#define _vel_vmaxswsx_vvvmvl __builtin_ve_vl_vmaxswsx_vvvmvl
+#define _vel_vmaxswsx_vsvmvl __builtin_ve_vl_vmaxswsx_vsvmvl
+#define _vel_vmaxswzx_vvvl __builtin_ve_vl_vmaxswzx_vvvl
+#define _vel_vmaxswzx_vvvvl __builtin_ve_vl_vmaxswzx_vvvvl
+#define _vel_vmaxswzx_vsvl __builtin_ve_vl_vmaxswzx_vsvl
+#define _vel_vmaxswzx_vsvvl __builtin_ve_vl_vmaxswzx_vsvvl
+#define _vel_vmaxswzx_vvvmvl __builtin_ve_vl_vmaxswzx_vvvmvl
+#define _vel_vmaxswzx_vsvmvl __builtin_ve_vl_vmaxswzx_vsvmvl
+#define _vel_pvmaxs_vvvl __builtin_ve_vl_pvmaxs_vvvl
+#define _vel_pvmaxs_vvvvl __builtin_ve_vl_pvmaxs_vvvvl
+#define _vel_pvmaxs_vsvl __builtin_ve_vl_pvmaxs_vsvl
+#define _vel_pvmaxs_vsvvl __builtin_ve_vl_pvmaxs_vsvvl
+#define _vel_pvmaxs_vvvMvl __builtin_ve_vl_pvmaxs_vvvMvl
+#define _vel_pvmaxs_vsvMvl __builtin_ve_vl_pvmaxs_vsvMvl
+#define _vel_vminswsx_vvvl __builtin_ve_vl_vminswsx_vvvl
+#define _vel_vminswsx_vvvvl __builtin_ve_vl_vminswsx_vvvvl
+#define _vel_vminswsx_vsvl __builtin_ve_vl_vminswsx_vsvl
+#define _vel_vminswsx_vsvvl __builtin_ve_vl_vminswsx_vsvvl
+#define _vel_vminswsx_vvvmvl __builtin_ve_vl_vminswsx_vvvmvl
+#define _vel_vminswsx_vsvmvl __builtin_ve_vl_vminswsx_vsvmvl
+#define _vel_vminswzx_vvvl __builtin_ve_vl_vminswzx_vvvl
+#define _vel_vminswzx_vvvvl __builtin_ve_vl_vminswzx_vvvvl
+#define _vel_vminswzx_vsvl __builtin_ve_vl_vminswzx_vsvl
+#define _vel_vminswzx_vsvvl __builtin_ve_vl_vminswzx_vsvvl
+#define _vel_vminswzx_vvvmvl __builtin_ve_vl_vminswzx_vvvmvl
+#define _vel_vminswzx_vsvmvl __builtin_ve_vl_vminswzx_vsvmvl
+#define _vel_pvmins_vvvl __builtin_ve_vl_pvmins_vvvl
+#define _vel_pvmins_vvvvl __builtin_ve_vl_pvmins_vvvvl
+#define _vel_pvmins_vsvl __builtin_ve_vl_pvmins_vsvl
+#define _vel_pvmins_vsvvl __builtin_ve_vl_pvmins_vsvvl
+#define _vel_pvmins_vvvMvl __builtin_ve_vl_pvmins_vvvMvl
+#define _vel_pvmins_vsvMvl __builtin_ve_vl_pvmins_vsvMvl
+#define _vel_vmaxsl_vvvl __builtin_ve_vl_vmaxsl_vvvl
+#define _vel_vmaxsl_vvvvl __builtin_ve_vl_vmaxsl_vvvvl
+#define _vel_vmaxsl_vsvl __builtin_ve_vl_vmaxsl_vsvl
+#define _vel_vmaxsl_vsvvl __builtin_ve_vl_vmaxsl_vsvvl
+#define _vel_vmaxsl_vvvmvl __builtin_ve_vl_vmaxsl_vvvmvl
+#define _vel_vmaxsl_vsvmvl __builtin_ve_vl_vmaxsl_vsvmvl
+#define _vel_vminsl_vvvl __builtin_ve_vl_vminsl_vvvl
+#define _vel_vminsl_vvvvl __builtin_ve_vl_vminsl_vvvvl
+#define _vel_vminsl_vsvl __builtin_ve_vl_vminsl_vsvl
+#define _vel_vminsl_vsvvl __builtin_ve_vl_vminsl_vsvvl
+#define _vel_vminsl_vvvmvl __builtin_ve_vl_vminsl_vvvmvl
+#define _vel_vminsl_vsvmvl __builtin_ve_vl_vminsl_vsvmvl
+#define _vel_vand_vvvl __builtin_ve_vl_vand_vvvl
+#define _vel_vand_vvvvl __builtin_ve_vl_vand_vvvvl
+#define _vel_vand_vsvl __builtin_ve_vl_vand_vsvl
+#define _vel_vand_vsvvl __builtin_ve_vl_vand_vsvvl
+#define _vel_vand_vvvmvl __builtin_ve_vl_vand_vvvmvl
+#define _vel_vand_vsvmvl __builtin_ve_vl_vand_vsvmvl
+#define _vel_pvand_vvvl __builtin_ve_vl_pvand_vvvl
+#define _vel_pvand_vvvvl __builtin_ve_vl_pvand_vvvvl
+#define _vel_pvand_vsvl __builtin_ve_vl_pvand_vsvl
+#define _vel_pvand_vsvvl __builtin_ve_vl_pvand_vsvvl
+#define _vel_pvand_vvvMvl __builtin_ve_vl_pvand_vvvMvl
+#define _vel_pvand_vsvMvl __builtin_ve_vl_pvand_vsvMvl
+#define _vel_vor_vvvl __builtin_ve_vl_vor_vvvl
+#define _vel_vor_vvvvl __builtin_ve_vl_vor_vvvvl
+#define _vel_vor_vsvl __builtin_ve_vl_vor_vsvl
+#define _vel_vor_vsvvl __builtin_ve_vl_vor_vsvvl
+#define _vel_vor_vvvmvl __builtin_ve_vl_vor_vvvmvl
+#define _vel_vor_vsvmvl __builtin_ve_vl_vor_vsvmvl
+#define _vel_pvor_vvvl __builtin_ve_vl_pvor_vvvl
+#define _vel_pvor_vvvvl __builtin_ve_vl_pvor_vvvvl
+#define _vel_pvor_vsvl __builtin_ve_vl_pvor_vsvl
+#define _vel_pvor_vsvvl __builtin_ve_vl_pvor_vsvvl
+#define _vel_pvor_vvvMvl __builtin_ve_vl_pvor_vvvMvl
+#define _vel_pvor_vsvMvl __builtin_ve_vl_pvor_vsvMvl
+#define _vel_vxor_vvvl __builtin_ve_vl_vxor_vvvl
+#define _vel_vxor_vvvvl __builtin_ve_vl_vxor_vvvvl
+#define _vel_vxor_vsvl __builtin_ve_vl_vxor_vsvl
+#define _vel_vxor_vsvvl __builtin_ve_vl_vxor_vsvvl
+#define _vel_vxor_vvvmvl __builtin_ve_vl_vxor_vvvmvl
+#define _vel_vxor_vsvmvl __builtin_ve_vl_vxor_vsvmvl
+#define _vel_pvxor_vvvl __builtin_ve_vl_pvxor_vvvl
+#define _vel_pvxor_vvvvl __builtin_ve_vl_pvxor_vvvvl
+#define _vel_pvxor_vsvl __builtin_ve_vl_pvxor_vsvl
+#define _vel_pvxor_vsvvl __builtin_ve_vl_pvxor_vsvvl
+#define _vel_pvxor_vvvMvl __builtin_ve_vl_pvxor_vvvMvl
+#define _vel_pvxor_vsvMvl __builtin_ve_vl_pvxor_vsvMvl
+#define _vel_veqv_vvvl __builtin_ve_vl_veqv_vvvl
+#define _vel_veqv_vvvvl __builtin_ve_vl_veqv_vvvvl
+#define _vel_veqv_vsvl __builtin_ve_vl_veqv_vsvl
+#define _vel_veqv_vsvvl __builtin_ve_vl_veqv_vsvvl
+#define _vel_veqv_vvvmvl __builtin_ve_vl_veqv_vvvmvl
+#define _vel_veqv_vsvmvl __builtin_ve_vl_veqv_vsvmvl
+#define _vel_pveqv_vvvl __builtin_ve_vl_pveqv_vvvl
+#define _vel_pveqv_vvvvl __builtin_ve_vl_pveqv_vvvvl
+#define _vel_pveqv_vsvl __builtin_ve_vl_pveqv_vsvl
+#define _vel_pveqv_vsvvl __builtin_ve_vl_pveqv_vsvvl
+#define _vel_pveqv_vvvMvl __builtin_ve_vl_pveqv_vvvMvl
+#define _vel_pveqv_vsvMvl __builtin_ve_vl_pveqv_vsvMvl
+#define _vel_vldz_vvl __builtin_ve_vl_vldz_vvl
+#define _vel_vldz_vvvl __builtin_ve_vl_vldz_vvvl
+#define _vel_vldz_vvmvl __builtin_ve_vl_vldz_vvmvl
+#define _vel_pvldzlo_vvl __builtin_ve_vl_pvldzlo_vvl
+#define _vel_pvldzlo_vvvl __builtin_ve_vl_pvldzlo_vvvl
+#define _vel_pvldzlo_vvmvl __builtin_ve_vl_pvldzlo_vvmvl
+#define _vel_pvldzup_vvl __builtin_ve_vl_pvldzup_vvl
+#define _vel_pvldzup_vvvl __builtin_ve_vl_pvldzup_vvvl
+#define _vel_pvldzup_vvmvl __builtin_ve_vl_pvldzup_vvmvl
+#define _vel_pvldz_vvl __builtin_ve_vl_pvldz_vvl
+#define _vel_pvldz_vvvl __builtin_ve_vl_pvldz_vvvl
+#define _vel_pvldz_vvMvl __builtin_ve_vl_pvldz_vvMvl
+#define _vel_vpcnt_vvl __builtin_ve_vl_vpcnt_vvl
+#define _vel_vpcnt_vvvl __builtin_ve_vl_vpcnt_vvvl
+#define _vel_vpcnt_vvmvl __builtin_ve_vl_vpcnt_vvmvl
+#define _vel_pvpcntlo_vvl __builtin_ve_vl_pvpcntlo_vvl
+#define _vel_pvpcntlo_vvvl __builtin_ve_vl_pvpcntlo_vvvl
+#define _vel_pvpcntlo_vvmvl __builtin_ve_vl_pvpcntlo_vvmvl
+#define _vel_pvpcntup_vvl __builtin_ve_vl_pvpcntup_vvl
+#define _vel_pvpcntup_vvvl __builtin_ve_vl_pvpcntup_vvvl
+#define _vel_pvpcntup_vvmvl __builtin_ve_vl_pvpcntup_vvmvl
+#define _vel_pvpcnt_vvl __builtin_ve_vl_pvpcnt_vvl
+#define _vel_pvpcnt_vvvl __builtin_ve_vl_pvpcnt_vvvl
+#define _vel_pvpcnt_vvMvl __builtin_ve_vl_pvpcnt_vvMvl
+#define _vel_vbrv_vvl __builtin_ve_vl_vbrv_vvl
+#define _vel_vbrv_vvvl __builtin_ve_vl_vbrv_vvvl
+#define _vel_vbrv_vvmvl __builtin_ve_vl_vbrv_vvmvl
+#define _vel_pvbrvlo_vvl __builtin_ve_vl_pvbrvlo_vvl
+#define _vel_pvbrvlo_vvvl __builtin_ve_vl_pvbrvlo_vvvl
+#define _vel_pvbrvlo_vvmvl __builtin_ve_vl_pvbrvlo_vvmvl
+#define _vel_pvbrvup_vvl __builtin_ve_vl_pvbrvup_vvl
+#define _vel_pvbrvup_vvvl __builtin_ve_vl_pvbrvup_vvvl
+#define _vel_pvbrvup_vvmvl __builtin_ve_vl_pvbrvup_vvmvl
+#define _vel_pvbrv_vvl __builtin_ve_vl_pvbrv_vvl
+#define _vel_pvbrv_vvvl __builtin_ve_vl_pvbrv_vvvl
+#define _vel_pvbrv_vvMvl __builtin_ve_vl_pvbrv_vvMvl
+#define _vel_vseq_vl __builtin_ve_vl_vseq_vl
+#define _vel_vseq_vvl __builtin_ve_vl_vseq_vvl
+#define _vel_pvseqlo_vl __builtin_ve_vl_pvseqlo_vl
+#define _vel_pvseqlo_vvl __builtin_ve_vl_pvseqlo_vvl
+#define _vel_pvsequp_vl __builtin_ve_vl_pvsequp_vl
+#define _vel_pvsequp_vvl __builtin_ve_vl_pvsequp_vvl
+#define _vel_pvseq_vl __builtin_ve_vl_pvseq_vl
+#define _vel_pvseq_vvl __builtin_ve_vl_pvseq_vvl
+#define _vel_vsll_vvvl __builtin_ve_vl_vsll_vvvl
+#define _vel_vsll_vvvvl __builtin_ve_vl_vsll_vvvvl
+#define _vel_vsll_vvsl __builtin_ve_vl_vsll_vvsl
+#define _vel_vsll_vvsvl __builtin_ve_vl_vsll_vvsvl
+#define _vel_vsll_vvvmvl __builtin_ve_vl_vsll_vvvmvl
+#define _vel_vsll_vvsmvl __builtin_ve_vl_vsll_vvsmvl
+#define _vel_pvsll_vvvl __builtin_ve_vl_pvsll_vvvl
+#define _vel_pvsll_vvvvl __builtin_ve_vl_pvsll_vvvvl
+#define _vel_pvsll_vvsl __builtin_ve_vl_pvsll_vvsl
+#define _vel_pvsll_vvsvl __builtin_ve_vl_pvsll_vvsvl
+#define _vel_pvsll_vvvMvl __builtin_ve_vl_pvsll_vvvMvl
+#define _vel_pvsll_vvsMvl __builtin_ve_vl_pvsll_vvsMvl
+#define _vel_vsrl_vvvl __builtin_ve_vl_vsrl_vvvl
+#define _vel_vsrl_vvvvl __builtin_ve_vl_vsrl_vvvvl
+#define _vel_vsrl_vvsl __builtin_ve_vl_vsrl_vvsl
+#define _vel_vsrl_vvsvl __builtin_ve_vl_vsrl_vvsvl
+#define _vel_vsrl_vvvmvl __builtin_ve_vl_vsrl_vvvmvl
+#define _vel_vsrl_vvsmvl __builtin_ve_vl_vsrl_vvsmvl
+#define _vel_pvsrl_vvvl __builtin_ve_vl_pvsrl_vvvl
+#define _vel_pvsrl_vvvvl __builtin_ve_vl_pvsrl_vvvvl
+#define _vel_pvsrl_vvsl __builtin_ve_vl_pvsrl_vvsl
+#define _vel_pvsrl_vvsvl __builtin_ve_vl_pvsrl_vvsvl
+#define _vel_pvsrl_vvvMvl __builtin_ve_vl_pvsrl_vvvMvl
+#define _vel_pvsrl_vvsMvl __builtin_ve_vl_pvsrl_vvsMvl
+#define _vel_vslawsx_vvvl __builtin_ve_vl_vslawsx_vvvl
+#define _vel_vslawsx_vvvvl __builtin_ve_vl_vslawsx_vvvvl
+#define _vel_vslawsx_vvsl __builtin_ve_vl_vslawsx_vvsl
+#define _vel_vslawsx_vvsvl __builtin_ve_vl_vslawsx_vvsvl
+#define _vel_vslawsx_vvvmvl __builtin_ve_vl_vslawsx_vvvmvl
+#define _vel_vslawsx_vvsmvl __builtin_ve_vl_vslawsx_vvsmvl
+#define _vel_vslawzx_vvvl __builtin_ve_vl_vslawzx_vvvl
+#define _vel_vslawzx_vvvvl __builtin_ve_vl_vslawzx_vvvvl
+#define _vel_vslawzx_vvsl __builtin_ve_vl_vslawzx_vvsl
+#define _vel_vslawzx_vvsvl __builtin_ve_vl_vslawzx_vvsvl
+#define _vel_vslawzx_vvvmvl __builtin_ve_vl_vslawzx_vvvmvl
+#define _vel_vslawzx_vvsmvl __builtin_ve_vl_vslawzx_vvsmvl
+#define _vel_pvsla_vvvl __builtin_ve_vl_pvsla_vvvl
+#define _vel_pvsla_vvvvl __builtin_ve_vl_pvsla_vvvvl
+#define _vel_pvsla_vvsl __builtin_ve_vl_pvsla_vvsl
+#define _vel_pvsla_vvsvl __builtin_ve_vl_pvsla_vvsvl
+#define _vel_pvsla_vvvMvl __builtin_ve_vl_pvsla_vvvMvl
+#define _vel_pvsla_vvsMvl __builtin_ve_vl_pvsla_vvsMvl
+#define _vel_vslal_vvvl __builtin_ve_vl_vslal_vvvl
+#define _vel_vslal_vvvvl __builtin_ve_vl_vslal_vvvvl
+#define _vel_vslal_vvsl __builtin_ve_vl_vslal_vvsl
+#define _vel_vslal_vvsvl __builtin_ve_vl_vslal_vvsvl
+#define _vel_vslal_vvvmvl __builtin_ve_vl_vslal_vvvmvl
+#define _vel_vslal_vvsmvl __builtin_ve_vl_vslal_vvsmvl
+#define _vel_vsrawsx_vvvl __builtin_ve_vl_vsrawsx_vvvl
+#define _vel_vsrawsx_vvvvl __builtin_ve_vl_vsrawsx_vvvvl
+#define _vel_vsrawsx_vvsl __builtin_ve_vl_vsrawsx_vvsl
+#define _vel_vsrawsx_vvsvl __builtin_ve_vl_vsrawsx_vvsvl
+#define _vel_vsrawsx_vvvmvl __builtin_ve_vl_vsrawsx_vvvmvl
+#define _vel_vsrawsx_vvsmvl __builtin_ve_vl_vsrawsx_vvsmvl
+#define _vel_vsrawzx_vvvl __builtin_ve_vl_vsrawzx_vvvl
+#define _vel_vsrawzx_vvvvl __builtin_ve_vl_vsrawzx_vvvvl
+#define _vel_vsrawzx_vvsl __builtin_ve_vl_vsrawzx_vvsl
+#define _vel_vsrawzx_vvsvl __builtin_ve_vl_vsrawzx_vvsvl
+#define _vel_vsrawzx_vvvmvl __builtin_ve_vl_vsrawzx_vvvmvl
+#define _vel_vsrawzx_vvsmvl __builtin_ve_vl_vsrawzx_vvsmvl
+#define _vel_pvsra_vvvl __builtin_ve_vl_pvsra_vvvl
+#define _vel_pvsra_vvvvl __builtin_ve_vl_pvsra_vvvvl
+#define _vel_pvsra_vvsl __builtin_ve_vl_pvsra_vvsl
+#define _vel_pvsra_vvsvl __builtin_ve_vl_pvsra_vvsvl
+#define _vel_pvsra_vvvMvl __builtin_ve_vl_pvsra_vvvMvl
+#define _vel_pvsra_vvsMvl __builtin_ve_vl_pvsra_vvsMvl
+#define _vel_vsral_vvvl __builtin_ve_vl_vsral_vvvl
+#define _vel_vsral_vvvvl __builtin_ve_vl_vsral_vvvvl
+#define _vel_vsral_vvsl __builtin_ve_vl_vsral_vvsl
+#define _vel_vsral_vvsvl __builtin_ve_vl_vsral_vvsvl
+#define _vel_vsral_vvvmvl __builtin_ve_vl_vsral_vvvmvl
+#define _vel_vsral_vvsmvl __builtin_ve_vl_vsral_vvsmvl
+#define _vel_vsfa_vvssl __builtin_ve_vl_vsfa_vvssl
+#define _vel_vsfa_vvssvl __builtin_ve_vl_vsfa_vvssvl
+#define _vel_vsfa_vvssmvl __builtin_ve_vl_vsfa_vvssmvl
+#define _vel_vfaddd_vvvl __builtin_ve_vl_vfaddd_vvvl
+#define _vel_vfaddd_vvvvl __builtin_ve_vl_vfaddd_vvvvl
+#define _vel_vfaddd_vsvl __builtin_ve_vl_vfaddd_vsvl
+#define _vel_vfaddd_vsvvl __builtin_ve_vl_vfaddd_vsvvl
+#define _vel_vfaddd_vvvmvl __builtin_ve_vl_vfaddd_vvvmvl
+#define _vel_vfaddd_vsvmvl __builtin_ve_vl_vfaddd_vsvmvl
+#define _vel_vfadds_vvvl __builtin_ve_vl_vfadds_vvvl
+#define _vel_vfadds_vvvvl __builtin_ve_vl_vfadds_vvvvl
+#define _vel_vfadds_vsvl __builtin_ve_vl_vfadds_vsvl
+#define _vel_vfadds_vsvvl __builtin_ve_vl_vfadds_vsvvl
+#define _vel_vfadds_vvvmvl __builtin_ve_vl_vfadds_vvvmvl
+#define _vel_vfadds_vsvmvl __builtin_ve_vl_vfadds_vsvmvl
+#define _vel_pvfadd_vvvl __builtin_ve_vl_pvfadd_vvvl
+#define _vel_pvfadd_vvvvl __builtin_ve_vl_pvfadd_vvvvl
+#define _vel_pvfadd_vsvl __builtin_ve_vl_pvfadd_vsvl
+#define _vel_pvfadd_vsvvl __builtin_ve_vl_pvfadd_vsvvl
+#define _vel_pvfadd_vvvMvl __builtin_ve_vl_pvfadd_vvvMvl
+#define _vel_pvfadd_vsvMvl __builtin_ve_vl_pvfadd_vsvMvl
+#define _vel_vfsubd_vvvl __builtin_ve_vl_vfsubd_vvvl
+#define _vel_vfsubd_vvvvl __builtin_ve_vl_vfsubd_vvvvl
+#define _vel_vfsubd_vsvl __builtin_ve_vl_vfsubd_vsvl
+#define _vel_vfsubd_vsvvl __builtin_ve_vl_vfsubd_vsvvl
+#define _vel_vfsubd_vvvmvl __builtin_ve_vl_vfsubd_vvvmvl
+#define _vel_vfsubd_vsvmvl __builtin_ve_vl_vfsubd_vsvmvl
+#define _vel_vfsubs_vvvl __builtin_ve_vl_vfsubs_vvvl
+#define _vel_vfsubs_vvvvl __builtin_ve_vl_vfsubs_vvvvl
+#define _vel_vfsubs_vsvl __builtin_ve_vl_vfsubs_vsvl
+#define _vel_vfsubs_vsvvl __builtin_ve_vl_vfsubs_vsvvl
+#define _vel_vfsubs_vvvmvl __builtin_ve_vl_vfsubs_vvvmvl
+#define _vel_vfsubs_vsvmvl __builtin_ve_vl_vfsubs_vsvmvl
+#define _vel_pvfsub_vvvl __builtin_ve_vl_pvfsub_vvvl
+#define _vel_pvfsub_vvvvl __builtin_ve_vl_pvfsub_vvvvl
+#define _vel_pvfsub_vsvl __builtin_ve_vl_pvfsub_vsvl
+#define _vel_pvfsub_vsvvl __builtin_ve_vl_pvfsub_vsvvl
+#define _vel_pvfsub_vvvMvl __builtin_ve_vl_pvfsub_vvvMvl
+#define _vel_pvfsub_vsvMvl __builtin_ve_vl_pvfsub_vsvMvl
+#define _vel_vfmuld_vvvl __builtin_ve_vl_vfmuld_vvvl
+#define _vel_vfmuld_vvvvl __builtin_ve_vl_vfmuld_vvvvl
+#define _vel_vfmuld_vsvl __builtin_ve_vl_vfmuld_vsvl
+#define _vel_vfmuld_vsvvl __builtin_ve_vl_vfmuld_vsvvl
+#define _vel_vfmuld_vvvmvl __builtin_ve_vl_vfmuld_vvvmvl
+#define _vel_vfmuld_vsvmvl __builtin_ve_vl_vfmuld_vsvmvl
+#define _vel_vfmuls_vvvl __builtin_ve_vl_vfmuls_vvvl
+#define _vel_vfmuls_vvvvl __builtin_ve_vl_vfmuls_vvvvl
+#define _vel_vfmuls_vsvl __builtin_ve_vl_vfmuls_vsvl
+#define _vel_vfmuls_vsvvl __builtin_ve_vl_vfmuls_vsvvl
+#define _vel_vfmuls_vvvmvl __builtin_ve_vl_vfmuls_vvvmvl
+#define _vel_vfmuls_vsvmvl __builtin_ve_vl_vfmuls_vsvmvl
+#define _vel_pvfmul_vvvl __builtin_ve_vl_pvfmul_vvvl
+#define _vel_pvfmul_vvvvl __builtin_ve_vl_pvfmul_vvvvl
+#define _vel_pvfmul_vsvl __builtin_ve_vl_pvfmul_vsvl
+#define _vel_pvfmul_vsvvl __builtin_ve_vl_pvfmul_vsvvl
+#define _vel_pvfmul_vvvMvl __builtin_ve_vl_pvfmul_vvvMvl
+#define _vel_pvfmul_vsvMvl __builtin_ve_vl_pvfmul_vsvMvl
+#define _vel_vfdivd_vvvl __builtin_ve_vl_vfdivd_vvvl
+#define _vel_vfdivd_vvvvl __builtin_ve_vl_vfdivd_vvvvl
+#define _vel_vfdivd_vsvl __builtin_ve_vl_vfdivd_vsvl
+#define _vel_vfdivd_vsvvl __builtin_ve_vl_vfdivd_vsvvl
+#define _vel_vfdivd_vvvmvl __builtin_ve_vl_vfdivd_vvvmvl
+#define _vel_vfdivd_vsvmvl __builtin_ve_vl_vfdivd_vsvmvl
+#define _vel_vfdivs_vvvl __builtin_ve_vl_vfdivs_vvvl
+#define _vel_vfdivs_vvvvl __builtin_ve_vl_vfdivs_vvvvl
+#define _vel_vfdivs_vsvl __builtin_ve_vl_vfdivs_vsvl
+#define _vel_vfdivs_vsvvl __builtin_ve_vl_vfdivs_vsvvl
+#define _vel_vfdivs_vvvmvl __builtin_ve_vl_vfdivs_vvvmvl
+#define _vel_vfdivs_vsvmvl __builtin_ve_vl_vfdivs_vsvmvl
+#define _vel_vfsqrtd_vvl __builtin_ve_vl_vfsqrtd_vvl
+#define _vel_vfsqrtd_vvvl __builtin_ve_vl_vfsqrtd_vvvl
+#define _vel_vfsqrts_vvl __builtin_ve_vl_vfsqrts_vvl
+#define _vel_vfsqrts_vvvl __builtin_ve_vl_vfsqrts_vvvl
+#define _vel_vfcmpd_vvvl __builtin_ve_vl_vfcmpd_vvvl
+#define _vel_vfcmpd_vvvvl __builtin_ve_vl_vfcmpd_vvvvl
+#define _vel_vfcmpd_vsvl __builtin_ve_vl_vfcmpd_vsvl
+#define _vel_vfcmpd_vsvvl __builtin_ve_vl_vfcmpd_vsvvl
+#define _vel_vfcmpd_vvvmvl __builtin_ve_vl_vfcmpd_vvvmvl
+#define _vel_vfcmpd_vsvmvl __builtin_ve_vl_vfcmpd_vsvmvl
+#define _vel_vfcmps_vvvl __builtin_ve_vl_vfcmps_vvvl
+#define _vel_vfcmps_vvvvl __builtin_ve_vl_vfcmps_vvvvl
+#define _vel_vfcmps_vsvl __builtin_ve_vl_vfcmps_vsvl
+#define _vel_vfcmps_vsvvl __builtin_ve_vl_vfcmps_vsvvl
+#define _vel_vfcmps_vvvmvl __builtin_ve_vl_vfcmps_vvvmvl
+#define _vel_vfcmps_vsvmvl __builtin_ve_vl_vfcmps_vsvmvl
+#define _vel_pvfcmp_vvvl __builtin_ve_vl_pvfcmp_vvvl
+#define _vel_pvfcmp_vvvvl __builtin_ve_vl_pvfcmp_vvvvl
+#define _vel_pvfcmp_vsvl __builtin_ve_vl_pvfcmp_vsvl
+#define _vel_pvfcmp_vsvvl __builtin_ve_vl_pvfcmp_vsvvl
+#define _vel_pvfcmp_vvvMvl __builtin_ve_vl_pvfcmp_vvvMvl
+#define _vel_pvfcmp_vsvMvl __builtin_ve_vl_pvfcmp_vsvMvl
+#define _vel_vfmaxd_vvvl __builtin_ve_vl_vfmaxd_vvvl
+#define _vel_vfmaxd_vvvvl __builtin_ve_vl_vfmaxd_vvvvl
+#define _vel_vfmaxd_vsvl __builtin_ve_vl_vfmaxd_vsvl
+#define _vel_vfmaxd_vsvvl __builtin_ve_vl_vfmaxd_vsvvl
+#define _vel_vfmaxd_vvvmvl __builtin_ve_vl_vfmaxd_vvvmvl
+#define _vel_vfmaxd_vsvmvl __builtin_ve_vl_vfmaxd_vsvmvl
+#define _vel_vfmaxs_vvvl __builtin_ve_vl_vfmaxs_vvvl
+#define _vel_vfmaxs_vvvvl __builtin_ve_vl_vfmaxs_vvvvl
+#define _vel_vfmaxs_vsvl __builtin_ve_vl_vfmaxs_vsvl
+#define _vel_vfmaxs_vsvvl __builtin_ve_vl_vfmaxs_vsvvl
+#define _vel_vfmaxs_vvvmvl __builtin_ve_vl_vfmaxs_vvvmvl
+#define _vel_vfmaxs_vsvmvl __builtin_ve_vl_vfmaxs_vsvmvl
+#define _vel_pvfmax_vvvl __builtin_ve_vl_pvfmax_vvvl
+#define _vel_pvfmax_vvvvl __builtin_ve_vl_pvfmax_vvvvl
+#define _vel_pvfmax_vsvl __builtin_ve_vl_pvfmax_vsvl
+#define _vel_pvfmax_vsvvl __builtin_ve_vl_pvfmax_vsvvl
+#define _vel_pvfmax_vvvMvl __builtin_ve_vl_pvfmax_vvvMvl
+#define _vel_pvfmax_vsvMvl __builtin_ve_vl_pvfmax_vsvMvl
+#define _vel_vfmind_vvvl __builtin_ve_vl_vfmind_vvvl
+#define _vel_vfmind_vvvvl __builtin_ve_vl_vfmind_vvvvl
+#define _vel_vfmind_vsvl __builtin_ve_vl_vfmind_vsvl
+#define _vel_vfmind_vsvvl __builtin_ve_vl_vfmind_vsvvl
+#define _vel_vfmind_vvvmvl __builtin_ve_vl_vfmind_vvvmvl
+#define _vel_vfmind_vsvmvl __builtin_ve_vl_vfmind_vsvmvl
+#define _vel_vfmins_vvvl __builtin_ve_vl_vfmins_vvvl
+#define _vel_vfmins_vvvvl __builtin_ve_vl_vfmins_vvvvl
+#define _vel_vfmins_vsvl __builtin_ve_vl_vfmins_vsvl
+#define _vel_vfmins_vsvvl __builtin_ve_vl_vfmins_vsvvl
+#define _vel_vfmins_vvvmvl __builtin_ve_vl_vfmins_vvvmvl
+#define _vel_vfmins_vsvmvl __builtin_ve_vl_vfmins_vsvmvl
+#define _vel_pvfmin_vvvl __builtin_ve_vl_pvfmin_vvvl
+#define _vel_pvfmin_vvvvl __builtin_ve_vl_pvfmin_vvvvl
+#define _vel_pvfmin_vsvl __builtin_ve_vl_pvfmin_vsvl
+#define _vel_pvfmin_vsvvl __builtin_ve_vl_pvfmin_vsvvl
+#define _vel_pvfmin_vvvMvl __builtin_ve_vl_pvfmin_vvvMvl
+#define _vel_pvfmin_vsvMvl __builtin_ve_vl_pvfmin_vsvMvl
+#define _vel_vfmadd_vvvvl __builtin_ve_vl_vfmadd_vvvvl
+#define _vel_vfmadd_vvvvvl __builtin_ve_vl_vfmadd_vvvvvl
+#define _vel_vfmadd_vsvvl __builtin_ve_vl_vfmadd_vsvvl
+#define _vel_vfmadd_vsvvvl __builtin_ve_vl_vfmadd_vsvvvl
+#define _vel_vfmadd_vvsvl __builtin_ve_vl_vfmadd_vvsvl
+#define _vel_vfmadd_vvsvvl __builtin_ve_vl_vfmadd_vvsvvl
+#define _vel_vfmadd_vvvvmvl __builtin_ve_vl_vfmadd_vvvvmvl
+#define _vel_vfmadd_vsvvmvl __builtin_ve_vl_vfmadd_vsvvmvl
+#define _vel_vfmadd_vvsvmvl __builtin_ve_vl_vfmadd_vvsvmvl
+#define _vel_vfmads_vvvvl __builtin_ve_vl_vfmads_vvvvl
+#define _vel_vfmads_vvvvvl __builtin_ve_vl_vfmads_vvvvvl
+#define _vel_vfmads_vsvvl __builtin_ve_vl_vfmads_vsvvl
+#define _vel_vfmads_vsvvvl __builtin_ve_vl_vfmads_vsvvvl
+#define _vel_vfmads_vvsvl __builtin_ve_vl_vfmads_vvsvl
+#define _vel_vfmads_vvsvvl __builtin_ve_vl_vfmads_vvsvvl
+#define _vel_vfmads_vvvvmvl __builtin_ve_vl_vfmads_vvvvmvl
+#define _vel_vfmads_vsvvmvl __builtin_ve_vl_vfmads_vsvvmvl
+#define _vel_vfmads_vvsvmvl __builtin_ve_vl_vfmads_vvsvmvl
+#define _vel_pvfmad_vvvvl __builtin_ve_vl_pvfmad_vvvvl
+#define _vel_pvfmad_vvvvvl __builtin_ve_vl_pvfmad_vvvvvl
+#define _vel_pvfmad_vsvvl __builtin_ve_vl_pvfmad_vsvvl
+#define _vel_pvfmad_vsvvvl __builtin_ve_vl_pvfmad_vsvvvl
+#define _vel_pvfmad_vvsvl __builtin_ve_vl_pvfmad_vvsvl
+#define _vel_pvfmad_vvsvvl __builtin_ve_vl_pvfmad_vvsvvl
+#define _vel_pvfmad_vvvvMvl __builtin_ve_vl_pvfmad_vvvvMvl
+#define _vel_pvfmad_vsvvMvl __builtin_ve_vl_pvfmad_vsvvMvl
+#define _vel_pvfmad_vvsvMvl __builtin_ve_vl_pvfmad_vvsvMvl
+#define _vel_vfmsbd_vvvvl __builtin_ve_vl_vfmsbd_vvvvl
+#define _vel_vfmsbd_vvvvvl __builtin_ve_vl_vfmsbd_vvvvvl
+#define _vel_vfmsbd_vsvvl __builtin_ve_vl_vfmsbd_vsvvl
+#define _vel_vfmsbd_vsvvvl __builtin_ve_vl_vfmsbd_vsvvvl
+#define _vel_vfmsbd_vvsvl __builtin_ve_vl_vfmsbd_vvsvl
+#define _vel_vfmsbd_vvsvvl __builtin_ve_vl_vfmsbd_vvsvvl
+#define _vel_vfmsbd_vvvvmvl __builtin_ve_vl_vfmsbd_vvvvmvl
+#define _vel_vfmsbd_vsvvmvl __builtin_ve_vl_vfmsbd_vsvvmvl
+#define _vel_vfmsbd_vvsvmvl __builtin_ve_vl_vfmsbd_vvsvmvl
+#define _vel_vfmsbs_vvvvl __builtin_ve_vl_vfmsbs_vvvvl
+#define _vel_vfmsbs_vvvvvl __builtin_ve_vl_vfmsbs_vvvvvl
+#define _vel_vfmsbs_vsvvl __builtin_ve_vl_vfmsbs_vsvvl
+#define _vel_vfmsbs_vsvvvl __builtin_ve_vl_vfmsbs_vsvvvl
+#define _vel_vfmsbs_vvsvl __builtin_ve_vl_vfmsbs_vvsvl
+#define _vel_vfmsbs_vvsvvl __builtin_ve_vl_vfmsbs_vvsvvl
+#define _vel_vfmsbs_vvvvmvl __builtin_ve_vl_vfmsbs_vvvvmvl
+#define _vel_vfmsbs_vsvvmvl __builtin_ve_vl_vfmsbs_vsvvmvl
+#define _vel_vfmsbs_vvsvmvl __builtin_ve_vl_vfmsbs_vvsvmvl
+#define _vel_pvfmsb_vvvvl __builtin_ve_vl_pvfmsb_vvvvl
+#define _vel_pvfmsb_vvvvvl __builtin_ve_vl_pvfmsb_vvvvvl
+#define _vel_pvfmsb_vsvvl __builtin_ve_vl_pvfmsb_vsvvl
+#define _vel_pvfmsb_vsvvvl __builtin_ve_vl_pvfmsb_vsvvvl
+#define _vel_pvfmsb_vvsvl __builtin_ve_vl_pvfmsb_vvsvl
+#define _vel_pvfmsb_vvsvvl __builtin_ve_vl_pvfmsb_vvsvvl
+#define _vel_pvfmsb_vvvvMvl __builtin_ve_vl_pvfmsb_vvvvMvl
+#define _vel_pvfmsb_vsvvMvl __builtin_ve_vl_pvfmsb_vsvvMvl
+#define _vel_pvfmsb_vvsvMvl __builtin_ve_vl_pvfmsb_vvsvMvl
+#define _vel_vfnmadd_vvvvl __builtin_ve_vl_vfnmadd_vvvvl
+#define _vel_vfnmadd_vvvvvl __builtin_ve_vl_vfnmadd_vvvvvl
+#define _vel_vfnmadd_vsvvl __builtin_ve_vl_vfnmadd_vsvvl
+#define _vel_vfnmadd_vsvvvl __builtin_ve_vl_vfnmadd_vsvvvl
+#define _vel_vfnmadd_vvsvl __builtin_ve_vl_vfnmadd_vvsvl
+#define _vel_vfnmadd_vvsvvl __builtin_ve_vl_vfnmadd_vvsvvl
+#define _vel_vfnmadd_vvvvmvl __builtin_ve_vl_vfnmadd_vvvvmvl
+#define _vel_vfnmadd_vsvvmvl __builtin_ve_vl_vfnmadd_vsvvmvl
+#define _vel_vfnmadd_vvsvmvl __builtin_ve_vl_vfnmadd_vvsvmvl
+#define _vel_vfnmads_vvvvl __builtin_ve_vl_vfnmads_vvvvl
+#define _vel_vfnmads_vvvvvl __builtin_ve_vl_vfnmads_vvvvvl
+#define _vel_vfnmads_vsvvl __builtin_ve_vl_vfnmads_vsvvl
+#define _vel_vfnmads_vsvvvl __builtin_ve_vl_vfnmads_vsvvvl
+#define _vel_vfnmads_vvsvl __builtin_ve_vl_vfnmads_vvsvl
+#define _vel_vfnmads_vvsvvl __builtin_ve_vl_vfnmads_vvsvvl
+#define _vel_vfnmads_vvvvmvl __builtin_ve_vl_vfnmads_vvvvmvl
+#define _vel_vfnmads_vsvvmvl __builtin_ve_vl_vfnmads_vsvvmvl
+#define _vel_vfnmads_vvsvmvl __builtin_ve_vl_vfnmads_vvsvmvl
+#define _vel_pvfnmad_vvvvl __builtin_ve_vl_pvfnmad_vvvvl
+#define _vel_pvfnmad_vvvvvl __builtin_ve_vl_pvfnmad_vvvvvl
+#define _vel_pvfnmad_vsvvl __builtin_ve_vl_pvfnmad_vsvvl
+#define _vel_pvfnmad_vsvvvl __builtin_ve_vl_pvfnmad_vsvvvl
+#define _vel_pvfnmad_vvsvl __builtin_ve_vl_pvfnmad_vvsvl
+#define _vel_pvfnmad_vvsvvl __builtin_ve_vl_pvfnmad_vvsvvl
+#define _vel_pvfnmad_vvvvMvl __builtin_ve_vl_pvfnmad_vvvvMvl
+#define _vel_pvfnmad_vsvvMvl __builtin_ve_vl_pvfnmad_vsvvMvl
+#define _vel_pvfnmad_vvsvMvl __builtin_ve_vl_pvfnmad_vvsvMvl
+#define _vel_vfnmsbd_vvvvl __builtin_ve_vl_vfnmsbd_vvvvl
+#define _vel_vfnmsbd_vvvvvl __builtin_ve_vl_vfnmsbd_vvvvvl
+#define _vel_vfnmsbd_vsvvl __builtin_ve_vl_vfnmsbd_vsvvl
+#define _vel_vfnmsbd_vsvvvl __builtin_ve_vl_vfnmsbd_vsvvvl
+#define _vel_vfnmsbd_vvsvl __builtin_ve_vl_vfnmsbd_vvsvl
+#define _vel_vfnmsbd_vvsvvl __builtin_ve_vl_vfnmsbd_vvsvvl
+#define _vel_vfnmsbd_vvvvmvl __builtin_ve_vl_vfnmsbd_vvvvmvl
+#define _vel_vfnmsbd_vsvvmvl __builtin_ve_vl_vfnmsbd_vsvvmvl
+#define _vel_vfnmsbd_vvsvmvl __builtin_ve_vl_vfnmsbd_vvsvmvl
+#define _vel_vfnmsbs_vvvvl __builtin_ve_vl_vfnmsbs_vvvvl
+#define _vel_vfnmsbs_vvvvvl __builtin_ve_vl_vfnmsbs_vvvvvl
+#define _vel_vfnmsbs_vsvvl __builtin_ve_vl_vfnmsbs_vsvvl
+#define _vel_vfnmsbs_vsvvvl __builtin_ve_vl_vfnmsbs_vsvvvl
+#define _vel_vfnmsbs_vvsvl __builtin_ve_vl_vfnmsbs_vvsvl
+#define _vel_vfnmsbs_vvsvvl __builtin_ve_vl_vfnmsbs_vvsvvl
+#define _vel_vfnmsbs_vvvvmvl __builtin_ve_vl_vfnmsbs_vvvvmvl
+#define _vel_vfnmsbs_vsvvmvl __builtin_ve_vl_vfnmsbs_vsvvmvl
+#define _vel_vfnmsbs_vvsvmvl __builtin_ve_vl_vfnmsbs_vvsvmvl
+#define _vel_pvfnmsb_vvvvl __builtin_ve_vl_pvfnmsb_vvvvl
+#define _vel_pvfnmsb_vvvvvl __builtin_ve_vl_pvfnmsb_vvvvvl
+#define _vel_pvfnmsb_vsvvl __builtin_ve_vl_pvfnmsb_vsvvl
+#define _vel_pvfnmsb_vsvvvl __builtin_ve_vl_pvfnmsb_vsvvvl
+#define _vel_pvfnmsb_vvsvl __builtin_ve_vl_pvfnmsb_vvsvl
+#define _vel_pvfnmsb_vvsvvl __builtin_ve_vl_pvfnmsb_vvsvvl
+#define _vel_pvfnmsb_vvvvMvl __builtin_ve_vl_pvfnmsb_vvvvMvl
+#define _vel_pvfnmsb_vsvvMvl __builtin_ve_vl_pvfnmsb_vsvvMvl
+#define _vel_pvfnmsb_vvsvMvl __builtin_ve_vl_pvfnmsb_vvsvMvl
+#define _vel_vrcpd_vvl __builtin_ve_vl_vrcpd_vvl
+#define _vel_vrcpd_vvvl __builtin_ve_vl_vrcpd_vvvl
+#define _vel_vrcps_vvl __builtin_ve_vl_vrcps_vvl
+#define _vel_vrcps_vvvl __builtin_ve_vl_vrcps_vvvl
+#define _vel_pvrcp_vvl __builtin_ve_vl_pvrcp_vvl
+#define _vel_pvrcp_vvvl __builtin_ve_vl_pvrcp_vvvl
+#define _vel_vrsqrtd_vvl __builtin_ve_vl_vrsqrtd_vvl
+#define _vel_vrsqrtd_vvvl __builtin_ve_vl_vrsqrtd_vvvl
+#define _vel_vrsqrts_vvl __builtin_ve_vl_vrsqrts_vvl
+#define _vel_vrsqrts_vvvl __builtin_ve_vl_vrsqrts_vvvl
+#define _vel_pvrsqrt_vvl __builtin_ve_vl_pvrsqrt_vvl
+#define _vel_pvrsqrt_vvvl __builtin_ve_vl_pvrsqrt_vvvl
+#define _vel_vrsqrtdnex_vvl __builtin_ve_vl_vrsqrtdnex_vvl
+#define _vel_vrsqrtdnex_vvvl __builtin_ve_vl_vrsqrtdnex_vvvl
+#define _vel_vrsqrtsnex_vvl __builtin_ve_vl_vrsqrtsnex_vvl
+#define _vel_vrsqrtsnex_vvvl __builtin_ve_vl_vrsqrtsnex_vvvl
+#define _vel_pvrsqrtnex_vvl __builtin_ve_vl_pvrsqrtnex_vvl
+#define _vel_pvrsqrtnex_vvvl __builtin_ve_vl_pvrsqrtnex_vvvl
+#define _vel_vcvtwdsx_vvl __builtin_ve_vl_vcvtwdsx_vvl
+#define _vel_vcvtwdsx_vvvl __builtin_ve_vl_vcvtwdsx_vvvl
+#define _vel_vcvtwdsx_vvmvl __builtin_ve_vl_vcvtwdsx_vvmvl
+#define _vel_vcvtwdsxrz_vvl __builtin_ve_vl_vcvtwdsxrz_vvl
+#define _vel_vcvtwdsxrz_vvvl __builtin_ve_vl_vcvtwdsxrz_vvvl
+#define _vel_vcvtwdsxrz_vvmvl __builtin_ve_vl_vcvtwdsxrz_vvmvl
+#define _vel_vcvtwdzx_vvl __builtin_ve_vl_vcvtwdzx_vvl
+#define _vel_vcvtwdzx_vvvl __builtin_ve_vl_vcvtwdzx_vvvl
+#define _vel_vcvtwdzx_vvmvl __builtin_ve_vl_vcvtwdzx_vvmvl
+#define _vel_vcvtwdzxrz_vvl __builtin_ve_vl_vcvtwdzxrz_vvl
+#define _vel_vcvtwdzxrz_vvvl __builtin_ve_vl_vcvtwdzxrz_vvvl
+#define _vel_vcvtwdzxrz_vvmvl __builtin_ve_vl_vcvtwdzxrz_vvmvl
+#define _vel_vcvtwssx_vvl __builtin_ve_vl_vcvtwssx_vvl
+#define _vel_vcvtwssx_vvvl __builtin_ve_vl_vcvtwssx_vvvl
+#define _vel_vcvtwssx_vvmvl __builtin_ve_vl_vcvtwssx_vvmvl
+#define _vel_vcvtwssxrz_vvl __builtin_ve_vl_vcvtwssxrz_vvl
+#define _vel_vcvtwssxrz_vvvl __builtin_ve_vl_vcvtwssxrz_vvvl
+#define _vel_vcvtwssxrz_vvmvl __builtin_ve_vl_vcvtwssxrz_vvmvl
+#define _vel_vcvtwszx_vvl __builtin_ve_vl_vcvtwszx_vvl
+#define _vel_vcvtwszx_vvvl __builtin_ve_vl_vcvtwszx_vvvl
+#define _vel_vcvtwszx_vvmvl __builtin_ve_vl_vcvtwszx_vvmvl
+#define _vel_vcvtwszxrz_vvl __builtin_ve_vl_vcvtwszxrz_vvl
+#define _vel_vcvtwszxrz_vvvl __builtin_ve_vl_vcvtwszxrz_vvvl
+#define _vel_vcvtwszxrz_vvmvl __builtin_ve_vl_vcvtwszxrz_vvmvl
+#define _vel_pvcvtws_vvl __builtin_ve_vl_pvcvtws_vvl
+#define _vel_pvcvtws_vvvl __builtin_ve_vl_pvcvtws_vvvl
+#define _vel_pvcvtws_vvMvl __builtin_ve_vl_pvcvtws_vvMvl
+#define _vel_pvcvtwsrz_vvl __builtin_ve_vl_pvcvtwsrz_vvl
+#define _vel_pvcvtwsrz_vvvl __builtin_ve_vl_pvcvtwsrz_vvvl
+#define _vel_pvcvtwsrz_vvMvl __builtin_ve_vl_pvcvtwsrz_vvMvl
+#define _vel_vcvtld_vvl __builtin_ve_vl_vcvtld_vvl
+#define _vel_vcvtld_vvvl __builtin_ve_vl_vcvtld_vvvl
+#define _vel_vcvtld_vvmvl __builtin_ve_vl_vcvtld_vvmvl
+#define _vel_vcvtldrz_vvl __builtin_ve_vl_vcvtldrz_vvl
+#define _vel_vcvtldrz_vvvl __builtin_ve_vl_vcvtldrz_vvvl
+#define _vel_vcvtldrz_vvmvl __builtin_ve_vl_vcvtldrz_vvmvl
+#define _vel_vcvtdw_vvl __builtin_ve_vl_vcvtdw_vvl
+#define _vel_vcvtdw_vvvl __builtin_ve_vl_vcvtdw_vvvl
+#define _vel_vcvtsw_vvl __builtin_ve_vl_vcvtsw_vvl
+#define _vel_vcvtsw_vvvl __builtin_ve_vl_vcvtsw_vvvl
+#define _vel_pvcvtsw_vvl __builtin_ve_vl_pvcvtsw_vvl
+#define _vel_pvcvtsw_vvvl __builtin_ve_vl_pvcvtsw_vvvl
+#define _vel_vcvtdl_vvl __builtin_ve_vl_vcvtdl_vvl
+#define _vel_vcvtdl_vvvl __builtin_ve_vl_vcvtdl_vvvl
+#define _vel_vcvtds_vvl __builtin_ve_vl_vcvtds_vvl
+#define _vel_vcvtds_vvvl __builtin_ve_vl_vcvtds_vvvl
+#define _vel_vcvtsd_vvl __builtin_ve_vl_vcvtsd_vvl
+#define _vel_vcvtsd_vvvl __builtin_ve_vl_vcvtsd_vvvl
+#define _vel_vmrg_vvvml __builtin_ve_vl_vmrg_vvvml
+#define _vel_vmrg_vvvmvl __builtin_ve_vl_vmrg_vvvmvl
+#define _vel_vmrg_vsvml __builtin_ve_vl_vmrg_vsvml
+#define _vel_vmrg_vsvmvl __builtin_ve_vl_vmrg_vsvmvl
+#define _vel_vmrgw_vvvMl __builtin_ve_vl_vmrgw_vvvMl
+#define _vel_vmrgw_vvvMvl __builtin_ve_vl_vmrgw_vvvMvl
+#define _vel_vmrgw_vsvMl __builtin_ve_vl_vmrgw_vsvMl
+#define _vel_vmrgw_vsvMvl __builtin_ve_vl_vmrgw_vsvMvl
+#define _vel_vshf_vvvsl __builtin_ve_vl_vshf_vvvsl
+#define _vel_vshf_vvvsvl __builtin_ve_vl_vshf_vvvsvl
+#define _vel_vcp_vvmvl __builtin_ve_vl_vcp_vvmvl
+#define _vel_vex_vvmvl __builtin_ve_vl_vex_vvmvl
+#define _vel_vfmklat_ml __builtin_ve_vl_vfmklat_ml
+#define _vel_vfmklaf_ml __builtin_ve_vl_vfmklaf_ml
+#define _vel_pvfmkat_Ml __builtin_ve_vl_pvfmkat_Ml
+#define _vel_pvfmkaf_Ml __builtin_ve_vl_pvfmkaf_Ml
+#define _vel_vfmklgt_mvl __builtin_ve_vl_vfmklgt_mvl
+#define _vel_vfmklgt_mvml __builtin_ve_vl_vfmklgt_mvml
+#define _vel_vfmkllt_mvl __builtin_ve_vl_vfmkllt_mvl
+#define _vel_vfmkllt_mvml __builtin_ve_vl_vfmkllt_mvml
+#define _vel_vfmklne_mvl __builtin_ve_vl_vfmklne_mvl
+#define _vel_vfmklne_mvml __builtin_ve_vl_vfmklne_mvml
+#define _vel_vfmkleq_mvl __builtin_ve_vl_vfmkleq_mvl
+#define _vel_vfmkleq_mvml __builtin_ve_vl_vfmkleq_mvml
+#define _vel_vfmklge_mvl __builtin_ve_vl_vfmklge_mvl
+#define _vel_vfmklge_mvml __builtin_ve_vl_vfmklge_mvml
+#define _vel_vfmklle_mvl __builtin_ve_vl_vfmklle_mvl
+#define _vel_vfmklle_mvml __builtin_ve_vl_vfmklle_mvml
+#define _vel_vfmklnum_mvl __builtin_ve_vl_vfmklnum_mvl
+#define _vel_vfmklnum_mvml __builtin_ve_vl_vfmklnum_mvml
+#define _vel_vfmklnan_mvl __builtin_ve_vl_vfmklnan_mvl
+#define _vel_vfmklnan_mvml __builtin_ve_vl_vfmklnan_mvml
+#define _vel_vfmklgtnan_mvl __builtin_ve_vl_vfmklgtnan_mvl
+#define _vel_vfmklgtnan_mvml __builtin_ve_vl_vfmklgtnan_mvml
+#define _vel_vfmklltnan_mvl __builtin_ve_vl_vfmklltnan_mvl
+#define _vel_vfmklltnan_mvml __builtin_ve_vl_vfmklltnan_mvml
+#define _vel_vfmklnenan_mvl __builtin_ve_vl_vfmklnenan_mvl
+#define _vel_vfmklnenan_mvml __builtin_ve_vl_vfmklnenan_mvml
+#define _vel_vfmkleqnan_mvl __builtin_ve_vl_vfmkleqnan_mvl
+#define _vel_vfmkleqnan_mvml __builtin_ve_vl_vfmkleqnan_mvml
+#define _vel_vfmklgenan_mvl __builtin_ve_vl_vfmklgenan_mvl
+#define _vel_vfmklgenan_mvml __builtin_ve_vl_vfmklgenan_mvml
+#define _vel_vfmkllenan_mvl __builtin_ve_vl_vfmkllenan_mvl
+#define _vel_vfmkllenan_mvml __builtin_ve_vl_vfmkllenan_mvml
+#define _vel_vfmkwgt_mvl __builtin_ve_vl_vfmkwgt_mvl
+#define _vel_vfmkwgt_mvml __builtin_ve_vl_vfmkwgt_mvml
+#define _vel_vfmkwlt_mvl __builtin_ve_vl_vfmkwlt_mvl
+#define _vel_vfmkwlt_mvml __builtin_ve_vl_vfmkwlt_mvml
+#define _vel_vfmkwne_mvl __builtin_ve_vl_vfmkwne_mvl
+#define _vel_vfmkwne_mvml __builtin_ve_vl_vfmkwne_mvml
+#define _vel_vfmkweq_mvl __builtin_ve_vl_vfmkweq_mvl
+#define _vel_vfmkweq_mvml __builtin_ve_vl_vfmkweq_mvml
+#define _vel_vfmkwge_mvl __builtin_ve_vl_vfmkwge_mvl
+#define _vel_vfmkwge_mvml __builtin_ve_vl_vfmkwge_mvml
+#define _vel_vfmkwle_mvl __builtin_ve_vl_vfmkwle_mvl
+#define _vel_vfmkwle_mvml __builtin_ve_vl_vfmkwle_mvml
+#define _vel_vfmkwnum_mvl __builtin_ve_vl_vfmkwnum_mvl
+#define _vel_vfmkwnum_mvml __builtin_ve_vl_vfmkwnum_mvml
+#define _vel_vfmkwnan_mvl __builtin_ve_vl_vfmkwnan_mvl
+#define _vel_vfmkwnan_mvml __builtin_ve_vl_vfmkwnan_mvml
+#define _vel_vfmkwgtnan_mvl __builtin_ve_vl_vfmkwgtnan_mvl
+#define _vel_vfmkwgtnan_mvml __builtin_ve_vl_vfmkwgtnan_mvml
+#define _vel_vfmkwltnan_mvl __builtin_ve_vl_vfmkwltnan_mvl
+#define _vel_vfmkwltnan_mvml __builtin_ve_vl_vfmkwltnan_mvml
+#define _vel_vfmkwnenan_mvl __builtin_ve_vl_vfmkwnenan_mvl
+#define _vel_vfmkwnenan_mvml __builtin_ve_vl_vfmkwnenan_mvml
+#define _vel_vfmkweqnan_mvl __builtin_ve_vl_vfmkweqnan_mvl
+#define _vel_vfmkweqnan_mvml __builtin_ve_vl_vfmkweqnan_mvml
+#define _vel_vfmkwgenan_mvl __builtin_ve_vl_vfmkwgenan_mvl
+#define _vel_vfmkwgenan_mvml __builtin_ve_vl_vfmkwgenan_mvml
+#define _vel_vfmkwlenan_mvl __builtin_ve_vl_vfmkwlenan_mvl
+#define _vel_vfmkwlenan_mvml __builtin_ve_vl_vfmkwlenan_mvml
+#define _vel_pvfmkwlogt_mvl __builtin_ve_vl_pvfmkwlogt_mvl
+#define _vel_pvfmkwupgt_mvl __builtin_ve_vl_pvfmkwupgt_mvl
+#define _vel_pvfmkwlogt_mvml __builtin_ve_vl_pvfmkwlogt_mvml
+#define _vel_pvfmkwupgt_mvml __builtin_ve_vl_pvfmkwupgt_mvml
+#define _vel_pvfmkwlolt_mvl __builtin_ve_vl_pvfmkwlolt_mvl
+#define _vel_pvfmkwuplt_mvl __builtin_ve_vl_pvfmkwuplt_mvl
+#define _vel_pvfmkwlolt_mvml __builtin_ve_vl_pvfmkwlolt_mvml
+#define _vel_pvfmkwuplt_mvml __builtin_ve_vl_pvfmkwuplt_mvml
+#define _vel_pvfmkwlone_mvl __builtin_ve_vl_pvfmkwlone_mvl
+#define _vel_pvfmkwupne_mvl __builtin_ve_vl_pvfmkwupne_mvl
+#define _vel_pvfmkwlone_mvml __builtin_ve_vl_pvfmkwlone_mvml
+#define _vel_pvfmkwupne_mvml __builtin_ve_vl_pvfmkwupne_mvml
+#define _vel_pvfmkwloeq_mvl __builtin_ve_vl_pvfmkwloeq_mvl
+#define _vel_pvfmkwupeq_mvl __builtin_ve_vl_pvfmkwupeq_mvl
+#define _vel_pvfmkwloeq_mvml __builtin_ve_vl_pvfmkwloeq_mvml
+#define _vel_pvfmkwupeq_mvml __builtin_ve_vl_pvfmkwupeq_mvml
+#define _vel_pvfmkwloge_mvl __builtin_ve_vl_pvfmkwloge_mvl
+#define _vel_pvfmkwupge_mvl __builtin_ve_vl_pvfmkwupge_mvl
+#define _vel_pvfmkwloge_mvml __builtin_ve_vl_pvfmkwloge_mvml
+#define _vel_pvfmkwupge_mvml __builtin_ve_vl_pvfmkwupge_mvml
+#define _vel_pvfmkwlole_mvl __builtin_ve_vl_pvfmkwlole_mvl
+#define _vel_pvfmkwuple_mvl __builtin_ve_vl_pvfmkwuple_mvl
+#define _vel_pvfmkwlole_mvml __builtin_ve_vl_pvfmkwlole_mvml
+#define _vel_pvfmkwuple_mvml __builtin_ve_vl_pvfmkwuple_mvml
+#define _vel_pvfmkwlonum_mvl __builtin_ve_vl_pvfmkwlonum_mvl
+#define _vel_pvfmkwupnum_mvl __builtin_ve_vl_pvfmkwupnum_mvl
+#define _vel_pvfmkwlonum_mvml __builtin_ve_vl_pvfmkwlonum_mvml
+#define _vel_pvfmkwupnum_mvml __builtin_ve_vl_pvfmkwupnum_mvml
+#define _vel_pvfmkwlonan_mvl __builtin_ve_vl_pvfmkwlonan_mvl
+#define _vel_pvfmkwupnan_mvl __builtin_ve_vl_pvfmkwupnan_mvl
+#define _vel_pvfmkwlonan_mvml __builtin_ve_vl_pvfmkwlonan_mvml
+#define _vel_pvfmkwupnan_mvml __builtin_ve_vl_pvfmkwupnan_mvml
+#define _vel_pvfmkwlogtnan_mvl __builtin_ve_vl_pvfmkwlogtnan_mvl
+#define _vel_pvfmkwupgtnan_mvl __builtin_ve_vl_pvfmkwupgtnan_mvl
+#define _vel_pvfmkwlogtnan_mvml __builtin_ve_vl_pvfmkwlogtnan_mvml
+#define _vel_pvfmkwupgtnan_mvml __builtin_ve_vl_pvfmkwupgtnan_mvml
+#define _vel_pvfmkwloltnan_mvl __builtin_ve_vl_pvfmkwloltnan_mvl
+#define _vel_pvfmkwupltnan_mvl __builtin_ve_vl_pvfmkwupltnan_mvl
+#define _vel_pvfmkwloltnan_mvml __builtin_ve_vl_pvfmkwloltnan_mvml
+#define _vel_pvfmkwupltnan_mvml __builtin_ve_vl_pvfmkwupltnan_mvml
+#define _vel_pvfmkwlonenan_mvl __builtin_ve_vl_pvfmkwlonenan_mvl
+#define _vel_pvfmkwupnenan_mvl __builtin_ve_vl_pvfmkwupnenan_mvl
+#define _vel_pvfmkwlonenan_mvml __builtin_ve_vl_pvfmkwlonenan_mvml
+#define _vel_pvfmkwupnenan_mvml __builtin_ve_vl_pvfmkwupnenan_mvml
+#define _vel_pvfmkwloeqnan_mvl __builtin_ve_vl_pvfmkwloeqnan_mvl
+#define _vel_pvfmkwupeqnan_mvl __builtin_ve_vl_pvfmkwupeqnan_mvl
+#define _vel_pvfmkwloeqnan_mvml __builtin_ve_vl_pvfmkwloeqnan_mvml
+#define _vel_pvfmkwupeqnan_mvml __builtin_ve_vl_pvfmkwupeqnan_mvml
+#define _vel_pvfmkwlogenan_mvl __builtin_ve_vl_pvfmkwlogenan_mvl
+#define _vel_pvfmkwupgenan_mvl __builtin_ve_vl_pvfmkwupgenan_mvl
+#define _vel_pvfmkwlogenan_mvml __builtin_ve_vl_pvfmkwlogenan_mvml
+#define _vel_pvfmkwupgenan_mvml __builtin_ve_vl_pvfmkwupgenan_mvml
+#define _vel_pvfmkwlolenan_mvl __builtin_ve_vl_pvfmkwlolenan_mvl
+#define _vel_pvfmkwuplenan_mvl __builtin_ve_vl_pvfmkwuplenan_mvl
+#define _vel_pvfmkwlolenan_mvml __builtin_ve_vl_pvfmkwlolenan_mvml
+#define _vel_pvfmkwuplenan_mvml __builtin_ve_vl_pvfmkwuplenan_mvml
+#define _vel_pvfmkwgt_Mvl __builtin_ve_vl_pvfmkwgt_Mvl
+#define _vel_pvfmkwgt_MvMl __builtin_ve_vl_pvfmkwgt_MvMl
+#define _vel_pvfmkwlt_Mvl __builtin_ve_vl_pvfmkwlt_Mvl
+#define _vel_pvfmkwlt_MvMl __builtin_ve_vl_pvfmkwlt_MvMl
+#define _vel_pvfmkwne_Mvl __builtin_ve_vl_pvfmkwne_Mvl
+#define _vel_pvfmkwne_MvMl __builtin_ve_vl_pvfmkwne_MvMl
+#define _vel_pvfmkweq_Mvl __builtin_ve_vl_pvfmkweq_Mvl
+#define _vel_pvfmkweq_MvMl __builtin_ve_vl_pvfmkweq_MvMl
+#define _vel_pvfmkwge_Mvl __builtin_ve_vl_pvfmkwge_Mvl
+#define _vel_pvfmkwge_MvMl __builtin_ve_vl_pvfmkwge_MvMl
+#define _vel_pvfmkwle_Mvl __builtin_ve_vl_pvfmkwle_Mvl
+#define _vel_pvfmkwle_MvMl __builtin_ve_vl_pvfmkwle_MvMl
+#define _vel_pvfmkwnum_Mvl __builtin_ve_vl_pvfmkwnum_Mvl
+#define _vel_pvfmkwnum_MvMl __builtin_ve_vl_pvfmkwnum_MvMl
+#define _vel_pvfmkwnan_Mvl __builtin_ve_vl_pvfmkwnan_Mvl
+#define _vel_pvfmkwnan_MvMl __builtin_ve_vl_pvfmkwnan_MvMl
+#define _vel_pvfmkwgtnan_Mvl __builtin_ve_vl_pvfmkwgtnan_Mvl
+#define _vel_pvfmkwgtnan_MvMl __builtin_ve_vl_pvfmkwgtnan_MvMl
+#define _vel_pvfmkwltnan_Mvl __builtin_ve_vl_pvfmkwltnan_Mvl
+#define _vel_pvfmkwltnan_MvMl __builtin_ve_vl_pvfmkwltnan_MvMl
+#define _vel_pvfmkwnenan_Mvl __builtin_ve_vl_pvfmkwnenan_Mvl
+#define _vel_pvfmkwnenan_MvMl __builtin_ve_vl_pvfmkwnenan_MvMl
+#define _vel_pvfmkweqnan_Mvl __builtin_ve_vl_pvfmkweqnan_Mvl
+#define _vel_pvfmkweqnan_MvMl __builtin_ve_vl_pvfmkweqnan_MvMl
+#define _vel_pvfmkwgenan_Mvl __builtin_ve_vl_pvfmkwgenan_Mvl
+#define _vel_pvfmkwgenan_MvMl __builtin_ve_vl_pvfmkwgenan_MvMl
+#define _vel_pvfmkwlenan_Mvl __builtin_ve_vl_pvfmkwlenan_Mvl
+#define _vel_pvfmkwlenan_MvMl __builtin_ve_vl_pvfmkwlenan_MvMl
+#define _vel_vfmkdgt_mvl __builtin_ve_vl_vfmkdgt_mvl
+#define _vel_vfmkdgt_mvml __builtin_ve_vl_vfmkdgt_mvml
+#define _vel_vfmkdlt_mvl __builtin_ve_vl_vfmkdlt_mvl
+#define _vel_vfmkdlt_mvml __builtin_ve_vl_vfmkdlt_mvml
+#define _vel_vfmkdne_mvl __builtin_ve_vl_vfmkdne_mvl
+#define _vel_vfmkdne_mvml __builtin_ve_vl_vfmkdne_mvml
+#define _vel_vfmkdeq_mvl __builtin_ve_vl_vfmkdeq_mvl
+#define _vel_vfmkdeq_mvml __builtin_ve_vl_vfmkdeq_mvml
+#define _vel_vfmkdge_mvl __builtin_ve_vl_vfmkdge_mvl
+#define _vel_vfmkdge_mvml __builtin_ve_vl_vfmkdge_mvml
+#define _vel_vfmkdle_mvl __builtin_ve_vl_vfmkdle_mvl
+#define _vel_vfmkdle_mvml __builtin_ve_vl_vfmkdle_mvml
+#define _vel_vfmkdnum_mvl __builtin_ve_vl_vfmkdnum_mvl
+#define _vel_vfmkdnum_mvml __builtin_ve_vl_vfmkdnum_mvml
+#define _vel_vfmkdnan_mvl __builtin_ve_vl_vfmkdnan_mvl
+#define _vel_vfmkdnan_mvml __builtin_ve_vl_vfmkdnan_mvml
+#define _vel_vfmkdgtnan_mvl __builtin_ve_vl_vfmkdgtnan_mvl
+#define _vel_vfmkdgtnan_mvml __builtin_ve_vl_vfmkdgtnan_mvml
+#define _vel_vfmkdltnan_mvl __builtin_ve_vl_vfmkdltnan_mvl
+#define _vel_vfmkdltnan_mvml __builtin_ve_vl_vfmkdltnan_mvml
+#define _vel_vfmkdnenan_mvl __builtin_ve_vl_vfmkdnenan_mvl
+#define _vel_vfmkdnenan_mvml __builtin_ve_vl_vfmkdnenan_mvml
+#define _vel_vfmkdeqnan_mvl __builtin_ve_vl_vfmkdeqnan_mvl
+#define _vel_vfmkdeqnan_mvml __builtin_ve_vl_vfmkdeqnan_mvml
+#define _vel_vfmkdgenan_mvl __builtin_ve_vl_vfmkdgenan_mvl
+#define _vel_vfmkdgenan_mvml __builtin_ve_vl_vfmkdgenan_mvml
+#define _vel_vfmkdlenan_mvl __builtin_ve_vl_vfmkdlenan_mvl
+#define _vel_vfmkdlenan_mvml __builtin_ve_vl_vfmkdlenan_mvml
+#define _vel_vfmksgt_mvl __builtin_ve_vl_vfmksgt_mvl
+#define _vel_vfmksgt_mvml __builtin_ve_vl_vfmksgt_mvml
+#define _vel_vfmkslt_mvl __builtin_ve_vl_vfmkslt_mvl
+#define _vel_vfmkslt_mvml __builtin_ve_vl_vfmkslt_mvml
+#define _vel_vfmksne_mvl __builtin_ve_vl_vfmksne_mvl
+#define _vel_vfmksne_mvml __builtin_ve_vl_vfmksne_mvml
+#define _vel_vfmkseq_mvl __builtin_ve_vl_vfmkseq_mvl
+#define _vel_vfmkseq_mvml __builtin_ve_vl_vfmkseq_mvml
+#define _vel_vfmksge_mvl __builtin_ve_vl_vfmksge_mvl
+#define _vel_vfmksge_mvml __builtin_ve_vl_vfmksge_mvml
+#define _vel_vfmksle_mvl __builtin_ve_vl_vfmksle_mvl
+#define _vel_vfmksle_mvml __builtin_ve_vl_vfmksle_mvml
+#define _vel_vfmksnum_mvl __builtin_ve_vl_vfmksnum_mvl
+#define _vel_vfmksnum_mvml __builtin_ve_vl_vfmksnum_mvml
+#define _vel_vfmksnan_mvl __builtin_ve_vl_vfmksnan_mvl
+#define _vel_vfmksnan_mvml __builtin_ve_vl_vfmksnan_mvml
+#define _vel_vfmksgtnan_mvl __builtin_ve_vl_vfmksgtnan_mvl
+#define _vel_vfmksgtnan_mvml __builtin_ve_vl_vfmksgtnan_mvml
+#define _vel_vfmksltnan_mvl __builtin_ve_vl_vfmksltnan_mvl
+#define _vel_vfmksltnan_mvml __builtin_ve_vl_vfmksltnan_mvml
+#define _vel_vfmksnenan_mvl __builtin_ve_vl_vfmksnenan_mvl
+#define _vel_vfmksnenan_mvml __builtin_ve_vl_vfmksnenan_mvml
+#define _vel_vfmkseqnan_mvl __builtin_ve_vl_vfmkseqnan_mvl
+#define _vel_vfmkseqnan_mvml __builtin_ve_vl_vfmkseqnan_mvml
+#define _vel_vfmksgenan_mvl __builtin_ve_vl_vfmksgenan_mvl
+#define _vel_vfmksgenan_mvml __builtin_ve_vl_vfmksgenan_mvml
+#define _vel_vfmkslenan_mvl __builtin_ve_vl_vfmkslenan_mvl
+#define _vel_vfmkslenan_mvml __builtin_ve_vl_vfmkslenan_mvml
+#define _vel_pvfmkslogt_mvl __builtin_ve_vl_pvfmkslogt_mvl
+#define _vel_pvfmksupgt_mvl __builtin_ve_vl_pvfmksupgt_mvl
+#define _vel_pvfmkslogt_mvml __builtin_ve_vl_pvfmkslogt_mvml
+#define _vel_pvfmksupgt_mvml __builtin_ve_vl_pvfmksupgt_mvml
+#define _vel_pvfmkslolt_mvl __builtin_ve_vl_pvfmkslolt_mvl
+#define _vel_pvfmksuplt_mvl __builtin_ve_vl_pvfmksuplt_mvl
+#define _vel_pvfmkslolt_mvml __builtin_ve_vl_pvfmkslolt_mvml
+#define _vel_pvfmksuplt_mvml __builtin_ve_vl_pvfmksuplt_mvml
+#define _vel_pvfmkslone_mvl __builtin_ve_vl_pvfmkslone_mvl
+#define _vel_pvfmksupne_mvl __builtin_ve_vl_pvfmksupne_mvl
+#define _vel_pvfmkslone_mvml __builtin_ve_vl_pvfmkslone_mvml
+#define _vel_pvfmksupne_mvml __builtin_ve_vl_pvfmksupne_mvml
+#define _vel_pvfmksloeq_mvl __builtin_ve_vl_pvfmksloeq_mvl
+#define _vel_pvfmksupeq_mvl __builtin_ve_vl_pvfmksupeq_mvl
+#define _vel_pvfmksloeq_mvml __builtin_ve_vl_pvfmksloeq_mvml
+#define _vel_pvfmksupeq_mvml __builtin_ve_vl_pvfmksupeq_mvml
+#define _vel_pvfmksloge_mvl __builtin_ve_vl_pvfmksloge_mvl
+#define _vel_pvfmksupge_mvl __builtin_ve_vl_pvfmksupge_mvl
+#define _vel_pvfmksloge_mvml __builtin_ve_vl_pvfmksloge_mvml
+#define _vel_pvfmksupge_mvml __builtin_ve_vl_pvfmksupge_mvml
+#define _vel_pvfmkslole_mvl __builtin_ve_vl_pvfmkslole_mvl
+#define _vel_pvfmksuple_mvl __builtin_ve_vl_pvfmksuple_mvl
+#define _vel_pvfmkslole_mvml __builtin_ve_vl_pvfmkslole_mvml
+#define _vel_pvfmksuple_mvml __builtin_ve_vl_pvfmksuple_mvml
+#define _vel_pvfmkslonum_mvl __builtin_ve_vl_pvfmkslonum_mvl
+#define _vel_pvfmksupnum_mvl __builtin_ve_vl_pvfmksupnum_mvl
+#define _vel_pvfmkslonum_mvml __builtin_ve_vl_pvfmkslonum_mvml
+#define _vel_pvfmksupnum_mvml __builtin_ve_vl_pvfmksupnum_mvml
+#define _vel_pvfmkslonan_mvl __builtin_ve_vl_pvfmkslonan_mvl
+#define _vel_pvfmksupnan_mvl __builtin_ve_vl_pvfmksupnan_mvl
+#define _vel_pvfmkslonan_mvml __builtin_ve_vl_pvfmkslonan_mvml
+#define _vel_pvfmksupnan_mvml __builtin_ve_vl_pvfmksupnan_mvml
+#define _vel_pvfmkslogtnan_mvl __builtin_ve_vl_pvfmkslogtnan_mvl
+#define _vel_pvfmksupgtnan_mvl __builtin_ve_vl_pvfmksupgtnan_mvl
+#define _vel_pvfmkslogtnan_mvml __builtin_ve_vl_pvfmkslogtnan_mvml
+#define _vel_pvfmksupgtnan_mvml __builtin_ve_vl_pvfmksupgtnan_mvml
+#define _vel_pvfmksloltnan_mvl __builtin_ve_vl_pvfmksloltnan_mvl
+#define _vel_pvfmksupltnan_mvl __builtin_ve_vl_pvfmksupltnan_mvl
+#define _vel_pvfmksloltnan_mvml __builtin_ve_vl_pvfmksloltnan_mvml
+#define _vel_pvfmksupltnan_mvml __builtin_ve_vl_pvfmksupltnan_mvml
+#define _vel_pvfmkslonenan_mvl __builtin_ve_vl_pvfmkslonenan_mvl
+#define _vel_pvfmksupnenan_mvl __builtin_ve_vl_pvfmksupnenan_mvl
+#define _vel_pvfmkslonenan_mvml __builtin_ve_vl_pvfmkslonenan_mvml
+#define _vel_pvfmksupnenan_mvml __builtin_ve_vl_pvfmksupnenan_mvml
+#define _vel_pvfmksloeqnan_mvl __builtin_ve_vl_pvfmksloeqnan_mvl
+#define _vel_pvfmksupeqnan_mvl __builtin_ve_vl_pvfmksupeqnan_mvl
+#define _vel_pvfmksloeqnan_mvml __builtin_ve_vl_pvfmksloeqnan_mvml
+#define _vel_pvfmksupeqnan_mvml __builtin_ve_vl_pvfmksupeqnan_mvml
+#define _vel_pvfmkslogenan_mvl __builtin_ve_vl_pvfmkslogenan_mvl
+#define _vel_pvfmksupgenan_mvl __builtin_ve_vl_pvfmksupgenan_mvl
+#define _vel_pvfmkslogenan_mvml __builtin_ve_vl_pvfmkslogenan_mvml
+#define _vel_pvfmksupgenan_mvml __builtin_ve_vl_pvfmksupgenan_mvml
+#define _vel_pvfmkslolenan_mvl __builtin_ve_vl_pvfmkslolenan_mvl
+#define _vel_pvfmksuplenan_mvl __builtin_ve_vl_pvfmksuplenan_mvl
+#define _vel_pvfmkslolenan_mvml __builtin_ve_vl_pvfmkslolenan_mvml
+#define _vel_pvfmksuplenan_mvml __builtin_ve_vl_pvfmksuplenan_mvml
+#define _vel_pvfmksgt_Mvl __builtin_ve_vl_pvfmksgt_Mvl
+#define _vel_pvfmksgt_MvMl __builtin_ve_vl_pvfmksgt_MvMl
+#define _vel_pvfmkslt_Mvl __builtin_ve_vl_pvfmkslt_Mvl
+#define _vel_pvfmkslt_MvMl __builtin_ve_vl_pvfmkslt_MvMl
+#define _vel_pvfmksne_Mvl __builtin_ve_vl_pvfmksne_Mvl
+#define _vel_pvfmksne_MvMl __builtin_ve_vl_pvfmksne_MvMl
+#define _vel_pvfmkseq_Mvl __builtin_ve_vl_pvfmkseq_Mvl
+#define _vel_pvfmkseq_MvMl __builtin_ve_vl_pvfmkseq_MvMl
+#define _vel_pvfmksge_Mvl __builtin_ve_vl_pvfmksge_Mvl
+#define _vel_pvfmksge_MvMl __builtin_ve_vl_pvfmksge_MvMl
+#define _vel_pvfmksle_Mvl __builtin_ve_vl_pvfmksle_Mvl
+#define _vel_pvfmksle_MvMl __builtin_ve_vl_pvfmksle_MvMl
+#define _vel_pvfmksnum_Mvl __builtin_ve_vl_pvfmksnum_Mvl
+#define _vel_pvfmksnum_MvMl __builtin_ve_vl_pvfmksnum_MvMl
+#define _vel_pvfmksnan_Mvl __builtin_ve_vl_pvfmksnan_Mvl
+#define _vel_pvfmksnan_MvMl __builtin_ve_vl_pvfmksnan_MvMl
+#define _vel_pvfmksgtnan_Mvl __builtin_ve_vl_pvfmksgtnan_Mvl
+#define _vel_pvfmksgtnan_MvMl __builtin_ve_vl_pvfmksgtnan_MvMl
+#define _vel_pvfmksltnan_Mvl __builtin_ve_vl_pvfmksltnan_Mvl
+#define _vel_pvfmksltnan_MvMl __builtin_ve_vl_pvfmksltnan_MvMl
+#define _vel_pvfmksnenan_Mvl __builtin_ve_vl_pvfmksnenan_Mvl
+#define _vel_pvfmksnenan_MvMl __builtin_ve_vl_pvfmksnenan_MvMl
+#define _vel_pvfmkseqnan_Mvl __builtin_ve_vl_pvfmkseqnan_Mvl
+#define _vel_pvfmkseqnan_MvMl __builtin_ve_vl_pvfmkseqnan_MvMl
+#define _vel_pvfmksgenan_Mvl __builtin_ve_vl_pvfmksgenan_Mvl
+#define _vel_pvfmksgenan_MvMl __builtin_ve_vl_pvfmksgenan_MvMl
+#define _vel_pvfmkslenan_Mvl __builtin_ve_vl_pvfmkslenan_Mvl
+#define _vel_pvfmkslenan_MvMl __builtin_ve_vl_pvfmkslenan_MvMl
+#define _vel_vsumwsx_vvl __builtin_ve_vl_vsumwsx_vvl
+#define _vel_vsumwsx_vvml __builtin_ve_vl_vsumwsx_vvml
+#define _vel_vsumwzx_vvl __builtin_ve_vl_vsumwzx_vvl
+#define _vel_vsumwzx_vvml __builtin_ve_vl_vsumwzx_vvml
+#define _vel_vsuml_vvl __builtin_ve_vl_vsuml_vvl
+#define _vel_vsuml_vvml __builtin_ve_vl_vsuml_vvml
+#define _vel_vfsumd_vvl __builtin_ve_vl_vfsumd_vvl
+#define _vel_vfsumd_vvml __builtin_ve_vl_vfsumd_vvml
+#define _vel_vfsums_vvl __builtin_ve_vl_vfsums_vvl
+#define _vel_vfsums_vvml __builtin_ve_vl_vfsums_vvml
+#define _vel_vrmaxswfstsx_vvl __builtin_ve_vl_vrmaxswfstsx_vvl
+#define _vel_vrmaxswfstsx_vvvl __builtin_ve_vl_vrmaxswfstsx_vvvl
+#define _vel_vrmaxswlstsx_vvl __builtin_ve_vl_vrmaxswlstsx_vvl
+#define _vel_vrmaxswlstsx_vvvl __builtin_ve_vl_vrmaxswlstsx_vvvl
+#define _vel_vrmaxswfstzx_vvl __builtin_ve_vl_vrmaxswfstzx_vvl
+#define _vel_vrmaxswfstzx_vvvl __builtin_ve_vl_vrmaxswfstzx_vvvl
+#define _vel_vrmaxswlstzx_vvl __builtin_ve_vl_vrmaxswlstzx_vvl
+#define _vel_vrmaxswlstzx_vvvl __builtin_ve_vl_vrmaxswlstzx_vvvl
+#define _vel_vrminswfstsx_vvl __builtin_ve_vl_vrminswfstsx_vvl
+#define _vel_vrminswfstsx_vvvl __builtin_ve_vl_vrminswfstsx_vvvl
+#define _vel_vrminswlstsx_vvl __builtin_ve_vl_vrminswlstsx_vvl
+#define _vel_vrminswlstsx_vvvl __builtin_ve_vl_vrminswlstsx_vvvl
+#define _vel_vrminswfstzx_vvl __builtin_ve_vl_vrminswfstzx_vvl
+#define _vel_vrminswfstzx_vvvl __builtin_ve_vl_vrminswfstzx_vvvl
+#define _vel_vrminswlstzx_vvl __builtin_ve_vl_vrminswlstzx_vvl
+#define _vel_vrminswlstzx_vvvl __builtin_ve_vl_vrminswlstzx_vvvl
+#define _vel_vrmaxslfst_vvl __builtin_ve_vl_vrmaxslfst_vvl
+#define _vel_vrmaxslfst_vvvl __builtin_ve_vl_vrmaxslfst_vvvl
+#define _vel_vrmaxsllst_vvl __builtin_ve_vl_vrmaxsllst_vvl
+#define _vel_vrmaxsllst_vvvl __builtin_ve_vl_vrmaxsllst_vvvl
+#define _vel_vrminslfst_vvl __builtin_ve_vl_vrminslfst_vvl
+#define _vel_vrminslfst_vvvl __builtin_ve_vl_vrminslfst_vvvl
+#define _vel_vrminsllst_vvl __builtin_ve_vl_vrminsllst_vvl
+#define _vel_vrminsllst_vvvl __builtin_ve_vl_vrminsllst_vvvl
+#define _vel_vfrmaxdfst_vvl __builtin_ve_vl_vfrmaxdfst_vvl
+#define _vel_vfrmaxdfst_vvvl __builtin_ve_vl_vfrmaxdfst_vvvl
+#define _vel_vfrmaxdlst_vvl __builtin_ve_vl_vfrmaxdlst_vvl
+#define _vel_vfrmaxdlst_vvvl __builtin_ve_vl_vfrmaxdlst_vvvl
+#define _vel_vfrmaxsfst_vvl __builtin_ve_vl_vfrmaxsfst_vvl
+#define _vel_vfrmaxsfst_vvvl __builtin_ve_vl_vfrmaxsfst_vvvl
+#define _vel_vfrmaxslst_vvl __builtin_ve_vl_vfrmaxslst_vvl
+#define _vel_vfrmaxslst_vvvl __builtin_ve_vl_vfrmaxslst_vvvl
+#define _vel_vfrmindfst_vvl __builtin_ve_vl_vfrmindfst_vvl
+#define _vel_vfrmindfst_vvvl __builtin_ve_vl_vfrmindfst_vvvl
+#define _vel_vfrmindlst_vvl __builtin_ve_vl_vfrmindlst_vvl
+#define _vel_vfrmindlst_vvvl __builtin_ve_vl_vfrmindlst_vvvl
+#define _vel_vfrminsfst_vvl __builtin_ve_vl_vfrminsfst_vvl
+#define _vel_vfrminsfst_vvvl __builtin_ve_vl_vfrminsfst_vvvl
+#define _vel_vfrminslst_vvl __builtin_ve_vl_vfrminslst_vvl
+#define _vel_vfrminslst_vvvl __builtin_ve_vl_vfrminslst_vvvl
+#define _vel_vrand_vvl __builtin_ve_vl_vrand_vvl
+#define _vel_vrand_vvml __builtin_ve_vl_vrand_vvml
+#define _vel_vror_vvl __builtin_ve_vl_vror_vvl
+#define _vel_vror_vvml __builtin_ve_vl_vror_vvml
+#define _vel_vrxor_vvl __builtin_ve_vl_vrxor_vvl
+#define _vel_vrxor_vvml __builtin_ve_vl_vrxor_vvml
+#define _vel_vgt_vvssl __builtin_ve_vl_vgt_vvssl
+#define _vel_vgt_vvssvl __builtin_ve_vl_vgt_vvssvl
+#define _vel_vgt_vvssml __builtin_ve_vl_vgt_vvssml
+#define _vel_vgt_vvssmvl __builtin_ve_vl_vgt_vvssmvl
+#define _vel_vgtnc_vvssl __builtin_ve_vl_vgtnc_vvssl
+#define _vel_vgtnc_vvssvl __builtin_ve_vl_vgtnc_vvssvl
+#define _vel_vgtnc_vvssml __builtin_ve_vl_vgtnc_vvssml
+#define _vel_vgtnc_vvssmvl __builtin_ve_vl_vgtnc_vvssmvl
+#define _vel_vgtu_vvssl __builtin_ve_vl_vgtu_vvssl
+#define _vel_vgtu_vvssvl __builtin_ve_vl_vgtu_vvssvl
+#define _vel_vgtu_vvssml __builtin_ve_vl_vgtu_vvssml
+#define _vel_vgtu_vvssmvl __builtin_ve_vl_vgtu_vvssmvl
+#define _vel_vgtunc_vvssl __builtin_ve_vl_vgtunc_vvssl
+#define _vel_vgtunc_vvssvl __builtin_ve_vl_vgtunc_vvssvl
+#define _vel_vgtunc_vvssml __builtin_ve_vl_vgtunc_vvssml
+#define _vel_vgtunc_vvssmvl __builtin_ve_vl_vgtunc_vvssmvl
+#define _vel_vgtlsx_vvssl __builtin_ve_vl_vgtlsx_vvssl
+#define _vel_vgtlsx_vvssvl __builtin_ve_vl_vgtlsx_vvssvl
+#define _vel_vgtlsx_vvssml __builtin_ve_vl_vgtlsx_vvssml
+#define _vel_vgtlsx_vvssmvl __builtin_ve_vl_vgtlsx_vvssmvl
+#define _vel_vgtlsxnc_vvssl __builtin_ve_vl_vgtlsxnc_vvssl
+#define _vel_vgtlsxnc_vvssvl __builtin_ve_vl_vgtlsxnc_vvssvl
+#define _vel_vgtlsxnc_vvssml __builtin_ve_vl_vgtlsxnc_vvssml
+#define _vel_vgtlsxnc_vvssmvl __builtin_ve_vl_vgtlsxnc_vvssmvl
+#define _vel_vgtlzx_vvssl __builtin_ve_vl_vgtlzx_vvssl
+#define _vel_vgtlzx_vvssvl __builtin_ve_vl_vgtlzx_vvssvl
+#define _vel_vgtlzx_vvssml __builtin_ve_vl_vgtlzx_vvssml
+#define _vel_vgtlzx_vvssmvl __builtin_ve_vl_vgtlzx_vvssmvl
+#define _vel_vgtlzxnc_vvssl __builtin_ve_vl_vgtlzxnc_vvssl
+#define _vel_vgtlzxnc_vvssvl __builtin_ve_vl_vgtlzxnc_vvssvl
+#define _vel_vgtlzxnc_vvssml __builtin_ve_vl_vgtlzxnc_vvssml
+#define _vel_vgtlzxnc_vvssmvl __builtin_ve_vl_vgtlzxnc_vvssmvl
+#define _vel_vsc_vvssl __builtin_ve_vl_vsc_vvssl
+#define _vel_vsc_vvssml __builtin_ve_vl_vsc_vvssml
+#define _vel_vscnc_vvssl __builtin_ve_vl_vscnc_vvssl
+#define _vel_vscnc_vvssml __builtin_ve_vl_vscnc_vvssml
+#define _vel_vscot_vvssl __builtin_ve_vl_vscot_vvssl
+#define _vel_vscot_vvssml __builtin_ve_vl_vscot_vvssml
+#define _vel_vscncot_vvssl __builtin_ve_vl_vscncot_vvssl
+#define _vel_vscncot_vvssml __builtin_ve_vl_vscncot_vvssml
+#define _vel_vscu_vvssl __builtin_ve_vl_vscu_vvssl
+#define _vel_vscu_vvssml __builtin_ve_vl_vscu_vvssml
+#define _vel_vscunc_vvssl __builtin_ve_vl_vscunc_vvssl
+#define _vel_vscunc_vvssml __builtin_ve_vl_vscunc_vvssml
+#define _vel_vscuot_vvssl __builtin_ve_vl_vscuot_vvssl
+#define _vel_vscuot_vvssml __builtin_ve_vl_vscuot_vvssml
+#define _vel_vscuncot_vvssl __builtin_ve_vl_vscuncot_vvssl
+#define _vel_vscuncot_vvssml __builtin_ve_vl_vscuncot_vvssml
+#define _vel_vscl_vvssl __builtin_ve_vl_vscl_vvssl
+#define _vel_vscl_vvssml __builtin_ve_vl_vscl_vvssml
+#define _vel_vsclnc_vvssl __builtin_ve_vl_vsclnc_vvssl
+#define _vel_vsclnc_vvssml __builtin_ve_vl_vsclnc_vvssml
+#define _vel_vsclot_vvssl __builtin_ve_vl_vsclot_vvssl
+#define _vel_vsclot_vvssml __builtin_ve_vl_vsclot_vvssml
+#define _vel_vsclncot_vvssl __builtin_ve_vl_vsclncot_vvssl
+#define _vel_vsclncot_vvssml __builtin_ve_vl_vsclncot_vvssml
+#define _vel_andm_mmm __builtin_ve_vl_andm_mmm
+#define _vel_andm_MMM __builtin_ve_vl_andm_MMM
+#define _vel_orm_mmm __builtin_ve_vl_orm_mmm
+#define _vel_orm_MMM __builtin_ve_vl_orm_MMM
+#define _vel_xorm_mmm __builtin_ve_vl_xorm_mmm
+#define _vel_xorm_MMM __builtin_ve_vl_xorm_MMM
+#define _vel_eqvm_mmm __builtin_ve_vl_eqvm_mmm
+#define _vel_eqvm_MMM __builtin_ve_vl_eqvm_MMM
+#define _vel_nndm_mmm __builtin_ve_vl_nndm_mmm
+#define _vel_nndm_MMM __builtin_ve_vl_nndm_MMM
+#define _vel_negm_mm __builtin_ve_vl_negm_mm
+#define _vel_negm_MM __builtin_ve_vl_negm_MM
+#define _vel_pcvm_sml __builtin_ve_vl_pcvm_sml
+#define _vel_lzvm_sml __builtin_ve_vl_lzvm_sml
+#define _vel_tovm_sml __builtin_ve_vl_tovm_sml
+#define _vel_lcr_sss __builtin_ve_vl_lcr_sss
+#define _vel_scr_sss __builtin_ve_vl_scr_sss
+#define _vel_tscr_ssss __builtin_ve_vl_tscr_ssss
+#define _vel_fidcr_sss __builtin_ve_vl_fidcr_sss
+#define _vel_fencei __builtin_ve_vl_fencei
+#define _vel_fencem_s __builtin_ve_vl_fencem_s
+#define _vel_fencec_s __builtin_ve_vl_fencec_s
+#define _vel_svob __builtin_ve_vl_svob
diff --git a/contrib/llvm-project/clang/lib/Headers/vpclmulqdqintrin.h b/contrib/llvm-project/clang/lib/Headers/vpclmulqdqintrin.h
index 44daadb07d57..485692ea2b5b 100644
--- a/contrib/llvm-project/clang/lib/Headers/vpclmulqdqintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/vpclmulqdqintrin.h
@@ -15,15 +15,15 @@
#define __VPCLMULQDQINTRIN_H
#define _mm256_clmulepi64_epi128(A, B, I) \
- (__m256i)__builtin_ia32_pclmulqdq256((__v4di)(__m256i)(A), \
- (__v4di)(__m256i)(B), \
- (char)(I))
+ ((__m256i)__builtin_ia32_pclmulqdq256((__v4di)(__m256i)(A), \
+ (__v4di)(__m256i)(B), \
+ (char)(I)))
#ifdef __AVX512FINTRIN_H
#define _mm512_clmulepi64_epi128(A, B, I) \
- (__m512i)__builtin_ia32_pclmulqdq512((__v8di)(__m512i)(A), \
- (__v8di)(__m512i)(B), \
- (char)(I))
+ ((__m512i)__builtin_ia32_pclmulqdq512((__v8di)(__m512i)(A), \
+ (__v8di)(__m512i)(B), \
+ (char)(I)))
#endif // __AVX512FINTRIN_H
#endif /* __VPCLMULQDQINTRIN_H */
diff --git a/contrib/llvm-project/clang/lib/Headers/wasm_simd128.h b/contrib/llvm-project/clang/lib/Headers/wasm_simd128.h
index 712fa0378098..2327bec52522 100644
--- a/contrib/llvm-project/clang/lib/Headers/wasm_simd128.h
+++ b/contrib/llvm-project/clang/lib/Headers/wasm_simd128.h
@@ -277,11 +277,27 @@ wasm_i8x16_make(int8_t __c0, int8_t __c1, int8_t __c2, int8_t __c3, int8_t __c4,
}
static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u8x16_make(uint8_t __c0, uint8_t __c1, uint8_t __c2, uint8_t __c3,
+ uint8_t __c4, uint8_t __c5, uint8_t __c6, uint8_t __c7,
+ uint8_t __c8, uint8_t __c9, uint8_t __c10, uint8_t __c11,
+ uint8_t __c12, uint8_t __c13, uint8_t __c14, uint8_t __c15) {
+ return (v128_t)(__u8x16){__c0, __c1, __c2, __c3, __c4, __c5,
+ __c6, __c7, __c8, __c9, __c10, __c11,
+ __c12, __c13, __c14, __c15};
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
wasm_i16x8_make(int16_t __c0, int16_t __c1, int16_t __c2, int16_t __c3,
int16_t __c4, int16_t __c5, int16_t __c6, int16_t __c7) {
return (v128_t)(__i16x8){__c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7};
}
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u16x8_make(uint16_t __c0, uint16_t __c1, uint16_t __c2, uint16_t __c3,
+ uint16_t __c4, uint16_t __c5, uint16_t __c6, uint16_t __c7) {
+ return (v128_t)(__u16x8){__c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7};
+}
+
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_make(int32_t __c0,
int32_t __c1,
int32_t __c2,
@@ -289,11 +305,23 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_make(int32_t __c0,
return (v128_t)(__i32x4){__c0, __c1, __c2, __c3};
}
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_make(uint32_t __c0,
+ uint32_t __c1,
+ uint32_t __c2,
+ uint32_t __c3) {
+ return (v128_t)(__u32x4){__c0, __c1, __c2, __c3};
+}
+
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_make(int64_t __c0,
int64_t __c1) {
return (v128_t)(__i64x2){__c0, __c1};
}
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u64x2_make(uint64_t __c0,
+ uint64_t __c1) {
+ return (v128_t)(__u64x2){__c0, __c1};
+}
+
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_make(float __c0,
float __c1,
float __c2,
@@ -325,6 +353,24 @@ wasm_i8x16_const(int8_t __c0, int8_t __c1, int8_t __c2, int8_t __c3,
}
static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u8x16_const(uint8_t __c0, uint8_t __c1, uint8_t __c2, uint8_t __c3,
+ uint8_t __c4, uint8_t __c5, uint8_t __c6, uint8_t __c7,
+ uint8_t __c8, uint8_t __c9, uint8_t __c10, uint8_t __c11,
+ uint8_t __c12, uint8_t __c13, uint8_t __c14, uint8_t __c15)
+ __REQUIRE_CONSTANT(__c0) __REQUIRE_CONSTANT(__c1) __REQUIRE_CONSTANT(__c2)
+ __REQUIRE_CONSTANT(__c3) __REQUIRE_CONSTANT(__c4)
+ __REQUIRE_CONSTANT(__c5) __REQUIRE_CONSTANT(__c6)
+ __REQUIRE_CONSTANT(__c7) __REQUIRE_CONSTANT(__c8)
+ __REQUIRE_CONSTANT(__c9) __REQUIRE_CONSTANT(__c10)
+ __REQUIRE_CONSTANT(__c11) __REQUIRE_CONSTANT(__c12)
+ __REQUIRE_CONSTANT(__c13) __REQUIRE_CONSTANT(__c14)
+ __REQUIRE_CONSTANT(__c15) {
+ return (v128_t)(__u8x16){__c0, __c1, __c2, __c3, __c4, __c5,
+ __c6, __c7, __c8, __c9, __c10, __c11,
+ __c12, __c13, __c14, __c15};
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
wasm_i16x8_const(int16_t __c0, int16_t __c1, int16_t __c2, int16_t __c3,
int16_t __c4, int16_t __c5, int16_t __c6, int16_t __c7)
__REQUIRE_CONSTANT(__c0) __REQUIRE_CONSTANT(__c1) __REQUIRE_CONSTANT(__c2)
@@ -335,18 +381,41 @@ wasm_i16x8_const(int16_t __c0, int16_t __c1, int16_t __c2, int16_t __c3,
}
static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u16x8_const(uint16_t __c0, uint16_t __c1, uint16_t __c2, uint16_t __c3,
+ uint16_t __c4, uint16_t __c5, uint16_t __c6, uint16_t __c7)
+ __REQUIRE_CONSTANT(__c0) __REQUIRE_CONSTANT(__c1) __REQUIRE_CONSTANT(__c2)
+ __REQUIRE_CONSTANT(__c3) __REQUIRE_CONSTANT(__c4)
+ __REQUIRE_CONSTANT(__c5) __REQUIRE_CONSTANT(__c6)
+ __REQUIRE_CONSTANT(__c7) {
+ return (v128_t)(__u16x8){__c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7};
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
wasm_i32x4_const(int32_t __c0, int32_t __c1, int32_t __c2, int32_t __c3)
__REQUIRE_CONSTANT(__c0) __REQUIRE_CONSTANT(__c1) __REQUIRE_CONSTANT(__c2)
__REQUIRE_CONSTANT(__c3) {
return (v128_t)(__i32x4){__c0, __c1, __c2, __c3};
}
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u32x4_const(uint32_t __c0, uint32_t __c1, uint32_t __c2, uint32_t __c3)
+ __REQUIRE_CONSTANT(__c0) __REQUIRE_CONSTANT(__c1) __REQUIRE_CONSTANT(__c2)
+ __REQUIRE_CONSTANT(__c3) {
+ return (v128_t)(__u32x4){__c0, __c1, __c2, __c3};
+}
+
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_const(int64_t __c0,
int64_t __c1)
__REQUIRE_CONSTANT(__c0) __REQUIRE_CONSTANT(__c1) {
return (v128_t)(__i64x2){__c0, __c1};
}
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u64x2_const(uint64_t __c0,
+ uint64_t __c1)
+ __REQUIRE_CONSTANT(__c0) __REQUIRE_CONSTANT(__c1) {
+ return (v128_t)(__u64x2){__c0, __c1};
+}
+
static __inline__ v128_t __DEFAULT_FN_ATTRS
wasm_f32x4_const(float __c0, float __c1, float __c2, float __c3)
__REQUIRE_CONSTANT(__c0) __REQUIRE_CONSTANT(__c1) __REQUIRE_CONSTANT(__c2)
@@ -366,21 +435,42 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_const_splat(int8_t __c)
__c, __c, __c, __c, __c, __c, __c, __c};
}
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_const_splat(uint8_t __c)
+ __REQUIRE_CONSTANT(__c) {
+ return (v128_t)(__u8x16){__c, __c, __c, __c, __c, __c, __c, __c,
+ __c, __c, __c, __c, __c, __c, __c, __c};
+}
+
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_const_splat(int16_t __c)
__REQUIRE_CONSTANT(__c) {
return (v128_t)(__i16x8){__c, __c, __c, __c, __c, __c, __c, __c};
}
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_const_splat(uint16_t __c)
+ __REQUIRE_CONSTANT(__c) {
+ return (v128_t)(__u16x8){__c, __c, __c, __c, __c, __c, __c, __c};
+}
+
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_const_splat(int32_t __c)
__REQUIRE_CONSTANT(__c) {
return (v128_t)(__i32x4){__c, __c, __c, __c};
}
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_const_splat(uint32_t __c)
+ __REQUIRE_CONSTANT(__c) {
+ return (v128_t)(__u32x4){__c, __c, __c, __c};
+}
+
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_const_splat(int64_t __c)
__REQUIRE_CONSTANT(__c) {
return (v128_t)(__i64x2){__c, __c};
}
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u64x2_const_splat(uint64_t __c)
+ __REQUIRE_CONSTANT(__c) {
+ return (v128_t)(__u64x2){__c, __c};
+}
+
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_const_splat(float __c)
__REQUIRE_CONSTANT(__c) {
return (v128_t)(__f32x4){__c, __c, __c, __c};
@@ -396,6 +486,11 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_splat(int8_t __a) {
__a, __a, __a, __a, __a, __a, __a, __a};
}
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_splat(uint8_t __a) {
+ return (v128_t)(__u8x16){__a, __a, __a, __a, __a, __a, __a, __a,
+ __a, __a, __a, __a, __a, __a, __a, __a};
+}
+
static __inline__ int8_t __DEFAULT_FN_ATTRS wasm_i8x16_extract_lane(v128_t __a,
int __i)
__REQUIRE_CONSTANT(__i) {
@@ -417,10 +512,23 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_replace_lane(v128_t __a,
return (v128_t)__v;
}
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_replace_lane(v128_t __a,
+ int __i,
+ uint8_t __b)
+ __REQUIRE_CONSTANT(__i) {
+ __u8x16 __v = (__u8x16)__a;
+ __v[__i] = __b;
+ return (v128_t)__v;
+}
+
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_splat(int16_t __a) {
return (v128_t)(__i16x8){__a, __a, __a, __a, __a, __a, __a, __a};
}
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_splat(uint16_t __a) {
+ return (v128_t)(__u16x8){__a, __a, __a, __a, __a, __a, __a, __a};
+}
+
static __inline__ int16_t __DEFAULT_FN_ATTRS wasm_i16x8_extract_lane(v128_t __a,
int __i)
__REQUIRE_CONSTANT(__i) {
@@ -441,16 +549,32 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_replace_lane(v128_t __a,
return (v128_t)__v;
}
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_replace_lane(
+ v128_t __a, int __i, uint16_t __b) __REQUIRE_CONSTANT(__i) {
+ __u16x8 __v = (__u16x8)__a;
+ __v[__i] = __b;
+ return (v128_t)__v;
+}
+
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_splat(int32_t __a) {
return (v128_t)(__i32x4){__a, __a, __a, __a};
}
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_splat(uint32_t __a) {
+ return (v128_t)(__u32x4){__a, __a, __a, __a};
+}
+
static __inline__ int32_t __DEFAULT_FN_ATTRS wasm_i32x4_extract_lane(v128_t __a,
int __i)
__REQUIRE_CONSTANT(__i) {
return ((__i32x4)__a)[__i];
}
+static __inline__ uint32_t __DEFAULT_FN_ATTRS
+wasm_u32x4_extract_lane(v128_t __a, int __i) __REQUIRE_CONSTANT(__i) {
+ return ((__u32x4)__a)[__i];
+}
+
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_replace_lane(v128_t __a,
int __i,
int32_t __b)
@@ -460,16 +584,32 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_replace_lane(v128_t __a,
return (v128_t)__v;
}
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_replace_lane(
+ v128_t __a, int __i, uint32_t __b) __REQUIRE_CONSTANT(__i) {
+ __u32x4 __v = (__u32x4)__a;
+ __v[__i] = __b;
+ return (v128_t)__v;
+}
+
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_splat(int64_t __a) {
return (v128_t)(__i64x2){__a, __a};
}
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u64x2_splat(uint64_t __a) {
+ return (v128_t)(__u64x2){__a, __a};
+}
+
static __inline__ int64_t __DEFAULT_FN_ATTRS wasm_i64x2_extract_lane(v128_t __a,
int __i)
__REQUIRE_CONSTANT(__i) {
return ((__i64x2)__a)[__i];
}
+static __inline__ uint64_t __DEFAULT_FN_ATTRS
+wasm_u64x2_extract_lane(v128_t __a, int __i) __REQUIRE_CONSTANT(__i) {
+ return ((__u64x2)__a)[__i];
+}
+
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_replace_lane(v128_t __a,
int __i,
int64_t __b)
@@ -479,6 +619,13 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_replace_lane(v128_t __a,
return (v128_t)__v;
}
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u64x2_replace_lane(
+ v128_t __a, int __i, uint64_t __b) __REQUIRE_CONSTANT(__i) {
+ __u64x2 __v = (__u64x2)__a;
+ __v[__i] = __b;
+ return (v128_t)__v;
+}
+
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_splat(float __a) {
return (v128_t)(__f32x4){__a, __a, __a, __a};
}
@@ -804,7 +951,7 @@ static __inline__ bool __DEFAULT_FN_ATTRS wasm_i8x16_all_true(v128_t __a) {
return __builtin_wasm_all_true_i8x16((__i8x16)__a);
}
-static __inline__ int32_t __DEFAULT_FN_ATTRS wasm_i8x16_bitmask(v128_t __a) {
+static __inline__ uint32_t __DEFAULT_FN_ATTRS wasm_i8x16_bitmask(v128_t __a) {
return __builtin_wasm_bitmask_i8x16((__i8x16)__a);
}
@@ -813,18 +960,18 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_popcnt(v128_t __a) {
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_shl(v128_t __a,
- int32_t __b) {
- return (v128_t)((__i8x16)__a << __b);
+ uint32_t __b) {
+ return (v128_t)((__i8x16)__a << (__b & 0x7));
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_shr(v128_t __a,
- int32_t __b) {
- return (v128_t)((__i8x16)__a >> __b);
+ uint32_t __b) {
+ return (v128_t)((__i8x16)__a >> (__b & 0x7));
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_shr(v128_t __a,
- int32_t __b) {
- return (v128_t)((__u8x16)__a >> __b);
+ uint32_t __b) {
+ return (v128_t)((__u8x16)__a >> (__b & 0x7));
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_add(v128_t __a,
@@ -894,23 +1041,23 @@ static __inline__ bool __DEFAULT_FN_ATTRS wasm_i16x8_all_true(v128_t __a) {
return __builtin_wasm_all_true_i16x8((__i16x8)__a);
}
-static __inline__ int32_t __DEFAULT_FN_ATTRS wasm_i16x8_bitmask(v128_t __a) {
+static __inline__ uint32_t __DEFAULT_FN_ATTRS wasm_i16x8_bitmask(v128_t __a) {
return __builtin_wasm_bitmask_i16x8((__i16x8)__a);
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_shl(v128_t __a,
- int32_t __b) {
- return (v128_t)((__i16x8)__a << __b);
+ uint32_t __b) {
+ return (v128_t)((__i16x8)__a << (__b & 0xF));
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_shr(v128_t __a,
- int32_t __b) {
- return (v128_t)((__i16x8)__a >> __b);
+ uint32_t __b) {
+ return (v128_t)((__i16x8)__a >> (__b & 0xF));
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_shr(v128_t __a,
- int32_t __b) {
- return (v128_t)((__u16x8)__a >> __b);
+ uint32_t __b) {
+ return (v128_t)((__u16x8)__a >> (__b & 0xF));
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_add(v128_t __a,
@@ -985,23 +1132,23 @@ static __inline__ bool __DEFAULT_FN_ATTRS wasm_i32x4_all_true(v128_t __a) {
return __builtin_wasm_all_true_i32x4((__i32x4)__a);
}
-static __inline__ int32_t __DEFAULT_FN_ATTRS wasm_i32x4_bitmask(v128_t __a) {
+static __inline__ uint32_t __DEFAULT_FN_ATTRS wasm_i32x4_bitmask(v128_t __a) {
return __builtin_wasm_bitmask_i32x4((__i32x4)__a);
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_shl(v128_t __a,
- int32_t __b) {
- return (v128_t)((__i32x4)__a << __b);
+ uint32_t __b) {
+ return (v128_t)((__i32x4)__a << (__b & 0x1F));
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_shr(v128_t __a,
- int32_t __b) {
- return (v128_t)((__i32x4)__a >> __b);
+ uint32_t __b) {
+ return (v128_t)((__i32x4)__a >> (__b & 0x1F));
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_shr(v128_t __a,
- int32_t __b) {
- return (v128_t)((__u32x4)__a >> __b);
+ uint32_t __b) {
+ return (v128_t)((__u32x4)__a >> (__b & 0x1F));
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_add(v128_t __a,
@@ -1056,23 +1203,23 @@ static __inline__ bool __DEFAULT_FN_ATTRS wasm_i64x2_all_true(v128_t __a) {
return __builtin_wasm_all_true_i64x2((__i64x2)__a);
}
-static __inline__ int32_t __DEFAULT_FN_ATTRS wasm_i64x2_bitmask(v128_t __a) {
+static __inline__ uint32_t __DEFAULT_FN_ATTRS wasm_i64x2_bitmask(v128_t __a) {
return __builtin_wasm_bitmask_i64x2((__i64x2)__a);
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_shl(v128_t __a,
- int32_t __b) {
- return (v128_t)((__i64x2)__a << (int64_t)__b);
+ uint32_t __b) {
+ return (v128_t)((__i64x2)__a << ((int64_t)__b & 0x3F));
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_shr(v128_t __a,
- int32_t __b) {
- return (v128_t)((__i64x2)__a >> (int64_t)__b);
+ uint32_t __b) {
+ return (v128_t)((__i64x2)__a >> ((int64_t)__b & 0x3F));
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u64x2_shr(v128_t __a,
- int32_t __b) {
- return (v128_t)((__u64x2)__a >> (int64_t)__b);
+ uint32_t __b) {
+ return (v128_t)((__u64x2)__a >> ((int64_t)__b & 0x3F));
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_add(v128_t __a,
@@ -1150,14 +1297,12 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_max(v128_t __a,
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_pmin(v128_t __a,
v128_t __b) {
- __i32x4 __mask = (__i32x4)((__f32x4)__b < (__f32x4)__a);
- return (v128_t)((((__i32x4)__b) & __mask) | (((__i32x4)__a) & ~__mask));
+ return (v128_t)__builtin_wasm_pmin_f32x4((__f32x4)__a, (__f32x4)__b);
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_pmax(v128_t __a,
v128_t __b) {
- __i32x4 __mask = (__i32x4)((__f32x4)__a < (__f32x4)__b);
- return (v128_t)((((__i32x4)__b) & __mask) | (((__i32x4)__a) & ~__mask));
+ return (v128_t)__builtin_wasm_pmax_f32x4((__f32x4)__a, (__f32x4)__b);
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_abs(v128_t __a) {
@@ -1220,14 +1365,12 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_max(v128_t __a,
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_pmin(v128_t __a,
v128_t __b) {
- __i64x2 __mask = (__i64x2)((__f64x2)__b < (__f64x2)__a);
- return (v128_t)((((__i64x2)__b) & __mask) | (((__i64x2)__a) & ~__mask));
+ return (v128_t)__builtin_wasm_pmin_f64x2((__f64x2)__a, (__f64x2)__b);
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_pmax(v128_t __a,
v128_t __b) {
- __i64x2 __mask = (__i64x2)((__f64x2)__a < (__f64x2)__b);
- return (v128_t)((((__i64x2)__b) & __mask) | (((__i64x2)__a) & ~__mask));
+ return (v128_t)__builtin_wasm_pmax_f64x2((__f64x2)__a, (__f64x2)__b);
}
static __inline__ v128_t __DEFAULT_FN_ATTRS
@@ -1262,12 +1405,12 @@ wasm_f64x2_convert_low_u32x4(v128_t __a) {
static __inline__ v128_t __DEFAULT_FN_ATTRS
wasm_i32x4_trunc_sat_f64x2_zero(v128_t __a) {
- return (v128_t)__builtin_wasm_trunc_sat_zero_s_f64x2_i32x4((__f64x2)__a);
+ return (v128_t)__builtin_wasm_trunc_sat_s_zero_f64x2_i32x4((__f64x2)__a);
}
static __inline__ v128_t __DEFAULT_FN_ATTRS
wasm_u32x4_trunc_sat_f64x2_zero(v128_t __a) {
- return (v128_t)__builtin_wasm_trunc_sat_zero_u_f64x2_i32x4((__f64x2)__a);
+ return (v128_t)__builtin_wasm_trunc_sat_u_zero_f64x2_i32x4((__f64x2)__a);
}
static __inline__ v128_t __DEFAULT_FN_ATTRS
@@ -1617,6 +1760,126 @@ wasm_u64x2_load_32x2(const void *__mem) {
__DEPRECATED_WASM_MACRO("wasm_v64x2_shuffle", "wasm_i64x2_shuffle") \
wasm_i64x2_shuffle(__a, __b, __c0, __c1)
+// Relaxed SIMD intrinsics
+
+#define __RELAXED_FN_ATTRS \
+ __attribute__((__always_inline__, __nodebug__, __target__("relaxed-simd"), \
+ __min_vector_width__(128)))
+
+static __inline__ v128_t __RELAXED_FN_ATTRS
+wasm_f32x4_relaxed_madd(v128_t __a, v128_t __b, v128_t __c) {
+ return (v128_t)__builtin_wasm_relaxed_madd_f32x4((__f32x4)__a, (__f32x4)__b,
+ (__f32x4)__c);
+}
+
+static __inline__ v128_t __RELAXED_FN_ATTRS
+wasm_f32x4_relaxed_nmadd(v128_t __a, v128_t __b, v128_t __c) {
+ return (v128_t)__builtin_wasm_relaxed_nmadd_f32x4((__f32x4)__a, (__f32x4)__b,
+ (__f32x4)__c);
+}
+
+static __inline__ v128_t __RELAXED_FN_ATTRS
+wasm_f64x2_relaxed_madd(v128_t __a, v128_t __b, v128_t __c) {
+ return (v128_t)__builtin_wasm_relaxed_madd_f64x2((__f64x2)__a, (__f64x2)__b,
+ (__f64x2)__c);
+}
+
+static __inline__ v128_t __RELAXED_FN_ATTRS
+wasm_f64x2_relaxed_nmadd(v128_t __a, v128_t __b, v128_t __c) {
+ return (v128_t)__builtin_wasm_relaxed_nmadd_f64x2((__f64x2)__a, (__f64x2)__b,
+ (__f64x2)__c);
+}
+
+static __inline__ v128_t __RELAXED_FN_ATTRS
+wasm_i8x16_relaxed_laneselect(v128_t __a, v128_t __b, v128_t __m) {
+ return (v128_t)__builtin_wasm_relaxed_laneselect_i8x16(
+ (__i8x16)__a, (__i8x16)__b, (__i8x16)__m);
+}
+
+static __inline__ v128_t __RELAXED_FN_ATTRS
+wasm_i16x8_relaxed_laneselect(v128_t __a, v128_t __b, v128_t __m) {
+ return (v128_t)__builtin_wasm_relaxed_laneselect_i16x8(
+ (__i16x8)__a, (__i16x8)__b, (__i16x8)__m);
+}
+
+static __inline__ v128_t __RELAXED_FN_ATTRS
+wasm_i32x4_relaxed_laneselect(v128_t __a, v128_t __b, v128_t __m) {
+ return (v128_t)__builtin_wasm_relaxed_laneselect_i32x4(
+ (__i32x4)__a, (__i32x4)__b, (__i32x4)__m);
+}
+
+static __inline__ v128_t __RELAXED_FN_ATTRS
+wasm_i64x2_relaxed_laneselect(v128_t __a, v128_t __b, v128_t __m) {
+ return (v128_t)__builtin_wasm_relaxed_laneselect_i64x2(
+ (__i64x2)__a, (__i64x2)__b, (__i64x2)__m);
+}
+
+static __inline__ v128_t __RELAXED_FN_ATTRS
+wasm_i8x16_relaxed_swizzle(v128_t __a, v128_t __s) {
+ return (v128_t)__builtin_wasm_relaxed_swizzle_i8x16((__i8x16)__a,
+ (__i8x16)__s);
+}
+
+static __inline__ v128_t __RELAXED_FN_ATTRS wasm_f32x4_relaxed_min(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_relaxed_min_f32x4((__f32x4)__a, (__f32x4)__b);
+}
+
+static __inline__ v128_t __RELAXED_FN_ATTRS wasm_f32x4_relaxed_max(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_relaxed_max_f32x4((__f32x4)__a, (__f32x4)__b);
+}
+
+static __inline__ v128_t __RELAXED_FN_ATTRS wasm_f64x2_relaxed_min(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_relaxed_min_f64x2((__f64x2)__a, (__f64x2)__b);
+}
+
+static __inline__ v128_t __RELAXED_FN_ATTRS wasm_f64x2_relaxed_max(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_relaxed_max_f64x2((__f64x2)__a, (__f64x2)__b);
+}
+
+static __inline__ v128_t __RELAXED_FN_ATTRS
+wasm_i32x4_relaxed_trunc_f32x4(v128_t __a) {
+ return (v128_t)__builtin_wasm_relaxed_trunc_s_i32x4_f32x4((__f32x4)__a);
+}
+
+static __inline__ v128_t __RELAXED_FN_ATTRS
+wasm_u32x4_relaxed_trunc_f32x4(v128_t __a) {
+ return (v128_t)__builtin_wasm_relaxed_trunc_u_i32x4_f32x4((__f32x4)__a);
+}
+
+static __inline__ v128_t __RELAXED_FN_ATTRS
+wasm_i32x4_relaxed_trunc_f64x2_zero(v128_t __a) {
+ return (v128_t)__builtin_wasm_relaxed_trunc_s_zero_i32x4_f64x2((__f64x2)__a);
+}
+
+static __inline__ v128_t __RELAXED_FN_ATTRS
+wasm_u32x4_relaxed_trunc_f64x2_zero(v128_t __a) {
+ return (v128_t)__builtin_wasm_relaxed_trunc_u_zero_i32x4_f64x2((__f64x2)__a);
+}
+
+static __inline__ v128_t __RELAXED_FN_ATTRS
+wasm_i16x8_relaxed_q15mulr(v128_t __a, v128_t __b) {
+ return (v128_t)__builtin_wasm_relaxed_q15mulr_s_i16x8((__i16x8)__a,
+ (__i16x8)__b);
+}
+
+static __inline__ v128_t __RELAXED_FN_ATTRS
+wasm_i16x8_relaxed_dot_i8x16_i7x16(v128_t __a, v128_t __b) {
+ return (v128_t)__builtin_wasm_relaxed_dot_i8x16_i7x16_s_i16x8((__i8x16)__a,
+ (__i8x16)__b);
+}
+
+static __inline__ v128_t __RELAXED_FN_ATTRS
+wasm_i32x4_relaxed_dot_i8x16_i7x16_add(v128_t __a, v128_t __b, v128_t __c) {
+ return (v128_t)__builtin_wasm_relaxed_dot_i8x16_i7x16_add_s_i32x4(
+ (__i8x16)__a, (__i8x16)__b, (__i32x4)__c);
+}
+
+// Deprecated intrinsics
+
static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i8x16_swizzle")
wasm_v8x16_swizzle(v128_t __a, v128_t __b) {
return wasm_i8x16_swizzle(__a, __b);
diff --git a/contrib/llvm-project/clang/lib/Headers/wmmintrin.h b/contrib/llvm-project/clang/lib/Headers/wmmintrin.h
index f932ca81089c..49148dbf3ac6 100644
--- a/contrib/llvm-project/clang/lib/Headers/wmmintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/wmmintrin.h
@@ -10,6 +10,10 @@
#ifndef __WMMINTRIN_H
#define __WMMINTRIN_H
+#if !defined(__i386__) && !defined(__x86_64__)
+#error "This header is only meant to be used on x86 and x64 architecture"
+#endif
+
#include <emmintrin.h>
#include <__wmmintrin_aes.h>
diff --git a/contrib/llvm-project/clang/lib/Headers/x86gprintrin.h b/contrib/llvm-project/clang/lib/Headers/x86gprintrin.h
index 1fc6cab4b28f..ed141879fbc7 100644
--- a/contrib/llvm-project/clang/lib/Headers/x86gprintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/x86gprintrin.h
@@ -20,4 +20,51 @@
#include <uintrintrin.h>
#endif
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__USERMSR__)
+#include <usermsrintrin.h>
+#endif
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__CRC32__)
+#include <crc32intrin.h>
+#endif
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__PRFCHI__)
+#include <prfchiintrin.h>
+#endif
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__RAOINT__)
+#include <raointintrin.h>
+#endif
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__CMPCCXADD__)
+#include <cmpccxaddintrin.h>
+#endif
+
+#if defined(__i386__)
+#define __SAVE_GPRBX "mov {%%ebx, %%eax |eax, ebx};"
+#define __RESTORE_GPRBX "mov {%%eax, %%ebx |ebx, eax};"
+#define __TMPGPR "eax"
+#else
+// When in 64-bit target, the 32-bit operands generate a 32-bit result,
+// zero-extended to a 64-bit result in the destination general-purpose,
+// It means "mov x %ebx" will clobber the higher 32 bits of rbx, so we
+// should preserve the 64-bit register rbx.
+#define __SAVE_GPRBX "mov {%%rbx, %%rax |rax, rbx};"
+#define __RESTORE_GPRBX "mov {%%rax, %%rbx |rbx, rax};"
+#define __TMPGPR "rax"
+#endif
+
+#define __SSC_MARK(__Tag) \
+ __asm__ __volatile__( __SAVE_GPRBX \
+ "mov {%0, %%ebx|ebx, %0}; " \
+ ".byte 0x64, 0x67, 0x90; " \
+ __RESTORE_GPRBX \
+ ::"i"(__Tag) \
+ : __TMPGPR );
+
#endif /* __X86GPRINTRIN_H */
diff --git a/contrib/llvm-project/clang/lib/Headers/x86intrin.h b/contrib/llvm-project/clang/lib/Headers/x86intrin.h
index 768d0e56ab05..450fd008dab9 100644
--- a/contrib/llvm-project/clang/lib/Headers/x86intrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/x86intrin.h
@@ -59,5 +59,9 @@
#include <clzerointrin.h>
#endif
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__RDPRU__)
+#include <rdpruintrin.h>
+#endif
#endif /* __X86INTRIN_H */
diff --git a/contrib/llvm-project/clang/lib/Headers/xmmintrin.h b/contrib/llvm-project/clang/lib/Headers/xmmintrin.h
index f4686691c7ed..47368f3c23d2 100644
--- a/contrib/llvm-project/clang/lib/Headers/xmmintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/xmmintrin.h
@@ -10,6 +10,10 @@
#ifndef __XMMINTRIN_H
#define __XMMINTRIN_H
+#if !defined(__i386__) && !defined(__x86_64__)
+#error "This header is only meant to be used on x86 and x64 architecture"
+#endif
+
#include <mmintrin.h>
typedef int __v4si __attribute__((__vector_size__(16)));
@@ -28,8 +32,12 @@ typedef unsigned int __v4su __attribute__((__vector_size__(16)));
#endif
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse"), __min_vector_width__(128)))
-#define __DEFAULT_FN_ATTRS_MMX __attribute__((__always_inline__, __nodebug__, __target__("mmx,sse"), __min_vector_width__(64)))
+#define __DEFAULT_FN_ATTRS \
+ __attribute__((__always_inline__, __nodebug__, __target__("sse,no-evex512"), \
+ __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS_MMX \
+ __attribute__((__always_inline__, __nodebug__, \
+ __target__("mmx,sse,no-evex512"), __min_vector_width__(64)))
/// Adds the 32-bit float values in the low-order bits of the operands.
///
@@ -1902,7 +1910,7 @@ _mm_setr_ps(float __z, float __y, float __x, float __w)
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_setzero_ps(void)
{
- return __extension__ (__m128){ 0, 0, 0, 0 };
+ return __extension__ (__m128){ 0.0f, 0.0f, 0.0f, 0.0f };
}
/// Stores the upper 64 bits of a 128-bit vector of [4 x float] to a
@@ -2082,7 +2090,7 @@ _mm_storer_ps(float *__p, __m128 __a)
/// \headerfile <x86intrin.h>
///
/// \code
-/// void _mm_prefetch(const void * a, const int sel);
+/// void _mm_prefetch(const void *a, const int sel);
/// \endcode
///
/// This intrinsic corresponds to the <c> PREFETCHNTA </c> instruction.
@@ -2117,9 +2125,9 @@ _mm_storer_ps(float *__p, __m128 __a)
/// \param __a
/// A 64-bit integer containing the value to be stored.
static __inline__ void __DEFAULT_FN_ATTRS_MMX
-_mm_stream_pi(__m64 *__p, __m64 __a)
+_mm_stream_pi(void *__p, __m64 __a)
{
- __builtin_ia32_movntq(__p, __a);
+ __builtin_ia32_movntq((__m64 *)__p, __a);
}
/// Moves packed float values from a 128-bit vector of [4 x float] to a
@@ -2136,7 +2144,7 @@ _mm_stream_pi(__m64 *__p, __m64 __a)
/// \param __a
/// A 128-bit vector of [4 x float] containing the values to be moved.
static __inline__ void __DEFAULT_FN_ATTRS
-_mm_stream_ps(float *__p, __m128 __a)
+_mm_stream_ps(void *__p, __m128 __a)
{
__builtin_nontemporal_store((__v4sf)__a, (__v4sf*)__p);
}
@@ -2181,7 +2189,7 @@ void _mm_sfence(void);
/// 3: Bits [63:48] are copied to the destination.
/// \returns A 16-bit integer containing the extracted 16 bits of packed data.
#define _mm_extract_pi16(a, n) \
- (int)__builtin_ia32_vec_ext_v4hi((__v4hi)a, (int)n)
+ ((int)__builtin_ia32_vec_ext_v4hi((__v4hi)a, (int)n))
/// Copies data from the 64-bit vector of [4 x i16] to the destination,
/// and inserts the lower 16-bits of an integer operand at the 16-bit offset
@@ -2212,7 +2220,7 @@ void _mm_sfence(void);
/// \returns A 64-bit integer vector containing the copied packed data from the
/// operands.
#define _mm_insert_pi16(a, d, n) \
- (__m64)__builtin_ia32_vec_set_v4hi((__v4hi)a, (int)d, (int)n)
+ ((__m64)__builtin_ia32_vec_set_v4hi((__v4hi)a, (int)d, (int)n))
/// Compares each of the corresponding packed 16-bit integer values of
/// the 64-bit integer vectors, and writes the greater value to the
@@ -2356,10 +2364,13 @@ _mm_mulhi_pu16(__m64 __a, __m64 __b)
/// 00: assigned from bits [15:0] of \a a. \n
/// 01: assigned from bits [31:16] of \a a. \n
/// 10: assigned from bits [47:32] of \a a. \n
-/// 11: assigned from bits [63:48] of \a a.
+/// 11: assigned from bits [63:48] of \a a. \n
+/// Note: To generate a mask, you can use the \c _MM_SHUFFLE macro.
+/// <c>_MM_SHUFFLE(b6, b4, b2, b0)</c> can create an 8-bit mask of the form
+/// <c>[b6, b4, b2, b0]</c>.
/// \returns A 64-bit integer vector containing the shuffled values.
#define _mm_shuffle_pi16(a, n) \
- (__m64)__builtin_ia32_pshufw((__v4hi)(__m64)(a), (n))
+ ((__m64)__builtin_ia32_pshufw((__v4hi)(__m64)(a), (n)))
/// Conditionally copies the values from each 8-bit element in the first
/// 64-bit integer vector operand to the specified memory location, as
@@ -2598,11 +2609,14 @@ void _mm_setcsr(unsigned int __i);
/// 00: Bits [31:0] copied from the specified operand. \n
/// 01: Bits [63:32] copied from the specified operand. \n
/// 10: Bits [95:64] copied from the specified operand. \n
-/// 11: Bits [127:96] copied from the specified operand.
+/// 11: Bits [127:96] copied from the specified operand. \n
+/// Note: To generate a mask, you can use the \c _MM_SHUFFLE macro.
+/// <c>_MM_SHUFFLE(b6, b4, b2, b0)</c> can create an 8-bit mask of the form
+/// <c>[b6, b4, b2, b0]</c>.
/// \returns A 128-bit vector of [4 x float] containing the shuffled values.
#define _mm_shuffle_ps(a, b, mask) \
- (__m128)__builtin_ia32_shufps((__v4sf)(__m128)(a), (__v4sf)(__m128)(b), \
- (int)(mask))
+ ((__m128)__builtin_ia32_shufps((__v4sf)(__m128)(a), (__v4sf)(__m128)(b), \
+ (int)(mask)))
/// Unpacks the high-order (index 2,3) values from two 128-bit vectors of
/// [4 x float] and interleaves them into a 128-bit vector of [4 x float].
@@ -2995,7 +3009,6 @@ do { \
#define _m_pavgw _mm_avg_pu16
#define _m_psadbw _mm_sad_pu8
#define _m_ _mm_
-#define _m_ _mm_
#undef __DEFAULT_FN_ATTRS
#undef __DEFAULT_FN_ATTRS_MMX
diff --git a/contrib/llvm-project/clang/lib/Headers/xopintrin.h b/contrib/llvm-project/clang/lib/Headers/xopintrin.h
index 5cedde41b625..976cdf4902a4 100644
--- a/contrib/llvm-project/clang/lib/Headers/xopintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/xopintrin.h
@@ -225,16 +225,16 @@ _mm_rot_epi64(__m128i __A, __m128i __B)
}
#define _mm_roti_epi8(A, N) \
- (__m128i)__builtin_ia32_vprotbi((__v16qi)(__m128i)(A), (N))
+ ((__m128i)__builtin_ia32_vprotbi((__v16qi)(__m128i)(A), (N)))
#define _mm_roti_epi16(A, N) \
- (__m128i)__builtin_ia32_vprotwi((__v8hi)(__m128i)(A), (N))
+ ((__m128i)__builtin_ia32_vprotwi((__v8hi)(__m128i)(A), (N)))
#define _mm_roti_epi32(A, N) \
- (__m128i)__builtin_ia32_vprotdi((__v4si)(__m128i)(A), (N))
+ ((__m128i)__builtin_ia32_vprotdi((__v4si)(__m128i)(A), (N)))
#define _mm_roti_epi64(A, N) \
- (__m128i)__builtin_ia32_vprotqi((__v2di)(__m128i)(A), (N))
+ ((__m128i)__builtin_ia32_vprotqi((__v2di)(__m128i)(A), (N)))
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_shl_epi8(__m128i __A, __m128i __B)
@@ -285,36 +285,36 @@ _mm_sha_epi64(__m128i __A, __m128i __B)
}
#define _mm_com_epu8(A, B, N) \
- (__m128i)__builtin_ia32_vpcomub((__v16qi)(__m128i)(A), \
- (__v16qi)(__m128i)(B), (N))
+ ((__m128i)__builtin_ia32_vpcomub((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (N)))
#define _mm_com_epu16(A, B, N) \
- (__m128i)__builtin_ia32_vpcomuw((__v8hi)(__m128i)(A), \
- (__v8hi)(__m128i)(B), (N))
+ ((__m128i)__builtin_ia32_vpcomuw((__v8hi)(__m128i)(A), \
+ (__v8hi)(__m128i)(B), (N)))
#define _mm_com_epu32(A, B, N) \
- (__m128i)__builtin_ia32_vpcomud((__v4si)(__m128i)(A), \
- (__v4si)(__m128i)(B), (N))
+ ((__m128i)__builtin_ia32_vpcomud((__v4si)(__m128i)(A), \
+ (__v4si)(__m128i)(B), (N)))
#define _mm_com_epu64(A, B, N) \
- (__m128i)__builtin_ia32_vpcomuq((__v2di)(__m128i)(A), \
- (__v2di)(__m128i)(B), (N))
+ ((__m128i)__builtin_ia32_vpcomuq((__v2di)(__m128i)(A), \
+ (__v2di)(__m128i)(B), (N)))
#define _mm_com_epi8(A, B, N) \
- (__m128i)__builtin_ia32_vpcomb((__v16qi)(__m128i)(A), \
- (__v16qi)(__m128i)(B), (N))
+ ((__m128i)__builtin_ia32_vpcomb((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (N)))
#define _mm_com_epi16(A, B, N) \
- (__m128i)__builtin_ia32_vpcomw((__v8hi)(__m128i)(A), \
- (__v8hi)(__m128i)(B), (N))
+ ((__m128i)__builtin_ia32_vpcomw((__v8hi)(__m128i)(A), \
+ (__v8hi)(__m128i)(B), (N)))
#define _mm_com_epi32(A, B, N) \
- (__m128i)__builtin_ia32_vpcomd((__v4si)(__m128i)(A), \
- (__v4si)(__m128i)(B), (N))
+ ((__m128i)__builtin_ia32_vpcomd((__v4si)(__m128i)(A), \
+ (__v4si)(__m128i)(B), (N)))
#define _mm_com_epi64(A, B, N) \
- (__m128i)__builtin_ia32_vpcomq((__v2di)(__m128i)(A), \
- (__v2di)(__m128i)(B), (N))
+ ((__m128i)__builtin_ia32_vpcomq((__v2di)(__m128i)(A), \
+ (__v2di)(__m128i)(B), (N)))
#define _MM_PCOMCTRL_LT 0
#define _MM_PCOMCTRL_LE 1
@@ -710,23 +710,23 @@ _mm_comtrue_epi64(__m128i __A, __m128i __B)
}
#define _mm_permute2_pd(X, Y, C, I) \
- (__m128d)__builtin_ia32_vpermil2pd((__v2df)(__m128d)(X), \
- (__v2df)(__m128d)(Y), \
- (__v2di)(__m128i)(C), (I))
+ ((__m128d)__builtin_ia32_vpermil2pd((__v2df)(__m128d)(X), \
+ (__v2df)(__m128d)(Y), \
+ (__v2di)(__m128i)(C), (I)))
#define _mm256_permute2_pd(X, Y, C, I) \
- (__m256d)__builtin_ia32_vpermil2pd256((__v4df)(__m256d)(X), \
- (__v4df)(__m256d)(Y), \
- (__v4di)(__m256i)(C), (I))
+ ((__m256d)__builtin_ia32_vpermil2pd256((__v4df)(__m256d)(X), \
+ (__v4df)(__m256d)(Y), \
+ (__v4di)(__m256i)(C), (I)))
#define _mm_permute2_ps(X, Y, C, I) \
- (__m128)__builtin_ia32_vpermil2ps((__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), \
- (__v4si)(__m128i)(C), (I))
+ ((__m128)__builtin_ia32_vpermil2ps((__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), \
+ (__v4si)(__m128i)(C), (I)))
#define _mm256_permute2_ps(X, Y, C, I) \
- (__m256)__builtin_ia32_vpermil2ps256((__v8sf)(__m256)(X), \
- (__v8sf)(__m256)(Y), \
- (__v8si)(__m256i)(C), (I))
+ ((__m256)__builtin_ia32_vpermil2ps256((__v8sf)(__m256)(X), \
+ (__v8sf)(__m256)(Y), \
+ (__v8si)(__m256i)(C), (I)))
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_frcz_ss(__m128 __A)
diff --git a/contrib/llvm-project/clang/lib/Headers/xsavecintrin.h b/contrib/llvm-project/clang/lib/Headers/xsavecintrin.h
index 5524947fa98e..1f2d001207e7 100644
--- a/contrib/llvm-project/clang/lib/Headers/xsavecintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/xsavecintrin.h
@@ -17,12 +17,62 @@
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xsavec")))
+/// Performs a full or partial save of processor state to the memory at
+/// \a __p. The exact state saved depends on the 64-bit mask \a __m and
+/// processor control register \c XCR0.
+///
+/// \code{.operation}
+/// mask[62:0] := __m[62:0] AND XCR0[62:0]
+/// FOR i := 0 TO 62
+/// IF mask[i] == 1
+/// CASE (i) OF
+/// 0: save X87 FPU state
+/// 1: save SSE state
+/// DEFAULT: __p.Ext_Save_Area[i] := ProcessorState[i]
+/// FI
+/// ENDFOR
+/// __p.Header.XSTATE_BV[62:0] := INIT_FUNCTION(mask[62:0])
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c XSAVEC instruction.
+///
+/// \param __p
+/// Pointer to the save area; must be 64-byte aligned.
+/// \param __m
+/// A 64-bit mask indicating what state should be saved.
static __inline__ void __DEFAULT_FN_ATTRS
_xsavec(void *__p, unsigned long long __m) {
__builtin_ia32_xsavec(__p, __m);
}
#ifdef __x86_64__
+/// Performs a full or partial save of processor state to the memory at
+/// \a __p. The exact state saved depends on the 64-bit mask \a __m and
+/// processor control register \c XCR0.
+///
+/// \code{.operation}
+/// mask[62:0] := __m[62:0] AND XCR0[62:0]
+/// FOR i := 0 TO 62
+/// IF mask[i] == 1
+/// CASE (i) OF
+/// 0: save X87 FPU state
+/// 1: save SSE state
+/// DEFAULT: __p.Ext_Save_Area[i] := ProcessorState[i]
+/// FI
+/// ENDFOR
+/// __p.Header.XSTATE_BV[62:0] := INIT_FUNCTION(mask[62:0])
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c XSAVEC64 instruction.
+///
+/// \param __p
+/// Pointer to the save area; must be 64-byte aligned.
+/// \param __m
+/// A 64-bit mask indicating what state should be saved.
static __inline__ void __DEFAULT_FN_ATTRS
_xsavec64(void *__p, unsigned long long __m) {
__builtin_ia32_xsavec64(__p, __m);
diff --git a/contrib/llvm-project/clang/lib/Index/CommentToXML.cpp b/contrib/llvm-project/clang/lib/Index/CommentToXML.cpp
index 1cbd14cd326c..295f3f228ff7 100644
--- a/contrib/llvm-project/clang/lib/Index/CommentToXML.cpp
+++ b/contrib/llvm-project/clang/lib/Index/CommentToXML.cpp
@@ -103,10 +103,10 @@ FullCommentParts::FullCommentParts(const FullComment *C,
if (!Child)
continue;
switch (Child->getCommentKind()) {
- case Comment::NoCommentKind:
+ case CommentKind::None:
continue;
- case Comment::ParagraphCommentKind: {
+ case CommentKind::ParagraphComment: {
const ParagraphComment *PC = cast<ParagraphComment>(Child);
if (PC->isWhitespace())
break;
@@ -117,7 +117,7 @@ FullCommentParts::FullCommentParts(const FullComment *C,
break;
}
- case Comment::BlockCommandCommentKind: {
+ case CommentKind::BlockCommandComment: {
const BlockCommandComment *BCC = cast<BlockCommandComment>(Child);
const CommandInfo *Info = Traits.getCommandInfo(BCC->getCommandID());
if (!Brief && Info->IsBriefCommand) {
@@ -140,7 +140,7 @@ FullCommentParts::FullCommentParts(const FullComment *C,
break;
}
- case Comment::ParamCommandCommentKind: {
+ case CommentKind::ParamCommandComment: {
const ParamCommandComment *PCC = cast<ParamCommandComment>(Child);
if (!PCC->hasParamName())
break;
@@ -152,7 +152,7 @@ FullCommentParts::FullCommentParts(const FullComment *C,
break;
}
- case Comment::TParamCommandCommentKind: {
+ case CommentKind::TParamCommandComment: {
const TParamCommandComment *TPCC = cast<TParamCommandComment>(Child);
if (!TPCC->hasParamName())
break;
@@ -164,11 +164,11 @@ FullCommentParts::FullCommentParts(const FullComment *C,
break;
}
- case Comment::VerbatimBlockCommentKind:
+ case CommentKind::VerbatimBlockComment:
MiscBlocks.push_back(cast<BlockCommandComment>(Child));
break;
- case Comment::VerbatimLineCommentKind: {
+ case CommentKind::VerbatimLineComment: {
const VerbatimLineComment *VLC = cast<VerbatimLineComment>(Child);
const CommandInfo *Info = Traits.getCommandInfo(VLC->getCommandID());
if (!Info->IsDeclarationCommand)
@@ -176,12 +176,12 @@ FullCommentParts::FullCommentParts(const FullComment *C,
break;
}
- case Comment::TextCommentKind:
- case Comment::InlineCommandCommentKind:
- case Comment::HTMLStartTagCommentKind:
- case Comment::HTMLEndTagCommentKind:
- case Comment::VerbatimBlockLineCommentKind:
- case Comment::FullCommentKind:
+ case CommentKind::TextComment:
+ case CommentKind::InlineCommandComment:
+ case CommentKind::HTMLStartTagComment:
+ case CommentKind::HTMLEndTagComment:
+ case CommentKind::VerbatimBlockLineComment:
+ case CommentKind::FullComment:
llvm_unreachable("AST node of this kind can't be a child of "
"a FullComment");
}
@@ -274,32 +274,32 @@ void CommentASTToHTMLConverter::visitInlineCommandComment(
return;
switch (C->getRenderKind()) {
- case InlineCommandComment::RenderNormal:
+ case InlineCommandRenderKind::Normal:
for (unsigned i = 0, e = C->getNumArgs(); i != e; ++i) {
appendToResultWithHTMLEscaping(C->getArgText(i));
Result << " ";
}
return;
- case InlineCommandComment::RenderBold:
+ case InlineCommandRenderKind::Bold:
assert(C->getNumArgs() == 1);
Result << "<b>";
appendToResultWithHTMLEscaping(Arg0);
Result << "</b>";
return;
- case InlineCommandComment::RenderMonospaced:
+ case InlineCommandRenderKind::Monospaced:
assert(C->getNumArgs() == 1);
Result << "<tt>";
appendToResultWithHTMLEscaping(Arg0);
Result<< "</tt>";
return;
- case InlineCommandComment::RenderEmphasized:
+ case InlineCommandRenderKind::Emphasized:
assert(C->getNumArgs() == 1);
Result << "<em>";
appendToResultWithHTMLEscaping(Arg0);
Result << "</em>";
return;
- case InlineCommandComment::RenderAnchor:
+ case InlineCommandRenderKind::Anchor:
assert(C->getNumArgs() == 1);
Result << "<span id=\"" << Arg0 << "\"></span>";
return;
@@ -623,31 +623,31 @@ void CommentASTToXMLConverter::visitInlineCommandComment(
return;
switch (C->getRenderKind()) {
- case InlineCommandComment::RenderNormal:
+ case InlineCommandRenderKind::Normal:
for (unsigned i = 0, e = C->getNumArgs(); i != e; ++i) {
appendToResultWithXMLEscaping(C->getArgText(i));
Result << " ";
}
return;
- case InlineCommandComment::RenderBold:
+ case InlineCommandRenderKind::Bold:
assert(C->getNumArgs() == 1);
Result << "<bold>";
appendToResultWithXMLEscaping(Arg0);
Result << "</bold>";
return;
- case InlineCommandComment::RenderMonospaced:
+ case InlineCommandRenderKind::Monospaced:
assert(C->getNumArgs() == 1);
Result << "<monospaced>";
appendToResultWithXMLEscaping(Arg0);
Result << "</monospaced>";
return;
- case InlineCommandComment::RenderEmphasized:
+ case InlineCommandRenderKind::Emphasized:
assert(C->getNumArgs() == 1);
Result << "<emphasized>";
appendToResultWithXMLEscaping(Arg0);
Result << "</emphasized>";
return;
- case InlineCommandComment::RenderAnchor:
+ case InlineCommandRenderKind::Anchor:
assert(C->getNumArgs() == 1);
Result << "<anchor id=\"" << Arg0 << "\"></anchor>";
return;
@@ -751,13 +751,13 @@ void CommentASTToXMLConverter::visitParamCommandComment(
Result << "<Direction isExplicit=\"" << C->isDirectionExplicit() << "\">";
switch (C->getDirection()) {
- case ParamCommandComment::In:
+ case ParamCommandPassDirection::In:
Result << "in";
break;
- case ParamCommandComment::Out:
+ case ParamCommandPassDirection::Out:
Result << "out";
break;
- case ParamCommandComment::InOut:
+ case ParamCommandPassDirection::InOut:
Result << "in,out";
break;
}
@@ -891,7 +891,7 @@ void CommentASTToXMLConverter::visitFullComment(const FullComment *C) {
unsigned FileOffset = LocInfo.second;
if (FID.isValid()) {
- if (const FileEntry *FE = SM.getFileEntryForID(FID)) {
+ if (OptionalFileEntryRef FE = SM.getFileEntryRefForID(FID)) {
Result << " file=\"";
appendToResultWithXMLEscaping(FE->getName());
Result << "\"";
diff --git a/contrib/llvm-project/clang/lib/Index/FileIndexRecord.cpp b/contrib/llvm-project/clang/lib/Index/FileIndexRecord.cpp
index d392a2bedeba..f3a5e6b63bbc 100644
--- a/contrib/llvm-project/clang/lib/Index/FileIndexRecord.cpp
+++ b/contrib/llvm-project/clang/lib/Index/FileIndexRecord.cpp
@@ -1,9 +1,8 @@
//===--- FileIndexRecord.cpp - Index data per file --------------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -46,13 +45,11 @@ void FileIndexRecord::addMacroOccurence(SymbolRoleSet Roles, unsigned Offset,
}
void FileIndexRecord::removeHeaderGuardMacros() {
- auto It =
- std::remove_if(Decls.begin(), Decls.end(), [](const DeclOccurrence &D) {
- if (const auto *MI = D.DeclOrMacro.dyn_cast<const MacroInfo *>())
- return MI->isUsedForHeaderGuard();
- return false;
- });
- Decls.erase(It, Decls.end());
+ llvm::erase_if(Decls, [](const DeclOccurrence &D) {
+ if (const auto *MI = D.DeclOrMacro.dyn_cast<const MacroInfo *>())
+ return MI->isUsedForHeaderGuard();
+ return false;
+ });
}
void FileIndexRecord::print(llvm::raw_ostream &OS, SourceManager &SM) const {
diff --git a/contrib/llvm-project/clang/lib/Index/IndexBody.cpp b/contrib/llvm-project/clang/lib/Index/IndexBody.cpp
index fa35f749d028..08136baa5d40 100644
--- a/contrib/llvm-project/clang/lib/Index/IndexBody.cpp
+++ b/contrib/llvm-project/clang/lib/Index/IndexBody.cpp
@@ -7,8 +7,12 @@
//===----------------------------------------------------------------------===//
#include "IndexingContext.h"
-#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/ASTConcept.h"
#include "clang/AST/ASTLambda.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/ExprConcepts.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/Type.h"
using namespace clang;
using namespace clang::index;
@@ -73,9 +77,15 @@ public:
const Stmt *Parent = *It;
if (auto BO = dyn_cast<BinaryOperator>(Parent)) {
- if (BO->getOpcode() == BO_Assign && BO->getLHS()->IgnoreParenCasts() == E)
- Roles |= (unsigned)SymbolRole::Write;
-
+ if (BO->getOpcode() == BO_Assign) {
+ if (BO->getLHS()->IgnoreParenCasts() == E)
+ Roles |= (unsigned)SymbolRole::Write;
+ } else if (auto CA = dyn_cast<CompoundAssignOperator>(Parent)) {
+ if (CA->getLHS()->IgnoreParenCasts() == E) {
+ Roles |= (unsigned)SymbolRole::Read;
+ Roles |= (unsigned)SymbolRole::Write;
+ }
+ }
} else if (auto UO = dyn_cast<UnaryOperator>(Parent)) {
if (UO->isIncrementDecrementOp()) {
Roles |= (unsigned)SymbolRole::Read;
@@ -84,12 +94,6 @@ public:
Roles |= (unsigned)SymbolRole::AddressOf;
}
- } else if (auto CA = dyn_cast<CompoundAssignOperator>(Parent)) {
- if (CA->getLHS()->IgnoreParenCasts() == E) {
- Roles |= (unsigned)SymbolRole::Read;
- Roles |= (unsigned)SymbolRole::Write;
- }
-
} else if (auto CE = dyn_cast<CallExpr>(Parent)) {
if (CE->getCallee()->IgnoreParenCasts() == E) {
addCallRole(Roles, Relations);
@@ -140,6 +144,17 @@ public:
Parent, ParentDC, Roles, Relations, E);
}
+ bool VisitGotoStmt(GotoStmt *S) {
+ return IndexCtx.handleReference(S->getLabel(), S->getLabelLoc(), Parent,
+ ParentDC);
+ }
+
+ bool VisitLabelStmt(LabelStmt *S) {
+ if (IndexCtx.shouldIndexFunctionLocalSymbols())
+ return IndexCtx.handleDecl(S->getDecl());
+ return true;
+ }
+
bool VisitMemberExpr(MemberExpr *E) {
SourceLocation Loc = E->getMemberLoc();
if (Loc.isInvalid())
@@ -199,9 +214,12 @@ public:
bool VisitDesignatedInitExpr(DesignatedInitExpr *E) {
for (DesignatedInitExpr::Designator &D : llvm::reverse(E->designators())) {
- if (D.isFieldDesignator() && D.getField())
- return IndexCtx.handleReference(D.getField(), D.getFieldLoc(), Parent,
- ParentDC, SymbolRoleSet(), {}, E);
+ if (D.isFieldDesignator()) {
+ if (const FieldDecl *FD = D.getFieldDecl()) {
+ return IndexCtx.handleReference(FD, D.getFieldLoc(), Parent,
+ ParentDC, SymbolRoleSet(), {}, E);
+ }
+ }
}
return true;
}
@@ -413,10 +431,13 @@ public:
auto visitSyntacticDesignatedInitExpr = [&](DesignatedInitExpr *E) -> bool {
for (DesignatedInitExpr::Designator &D : llvm::reverse(E->designators())) {
- if (D.isFieldDesignator() && D.getField())
- return IndexCtx.handleReference(D.getField(), D.getFieldLoc(),
- Parent, ParentDC, SymbolRoleSet(),
- {}, E);
+ if (D.isFieldDesignator()) {
+ if (const FieldDecl *FD = D.getFieldDecl()) {
+ return IndexCtx.handleReference(FD, D.getFieldLoc(), Parent,
+ ParentDC, SymbolRoleSet(),
+ /*Relations=*/{}, E);
+ }
+ }
}
return true;
};
@@ -455,16 +476,16 @@ public:
}
bool VisitParmVarDecl(ParmVarDecl* D) {
- // Index the parameters of lambda expression.
+ // Index the parameters of lambda expression and requires expression.
if (IndexCtx.shouldIndexFunctionLocalSymbols()) {
const auto *DC = D->getDeclContext();
- if (DC && isLambdaCallOperator(DC))
+ if (DC && (isLambdaCallOperator(DC) || isa<RequiresExprBodyDecl>(DC)))
IndexCtx.handleDecl(D);
}
return true;
}
- bool VisitUnresolvedLookupExpr(UnresolvedLookupExpr *E) {
+ bool VisitOverloadExpr(OverloadExpr *E) {
SmallVector<SymbolRelation, 4> Relations;
SymbolRoleSet Roles = getRolesForRef(E, Relations);
for (auto *D : E->decls())
@@ -472,6 +493,18 @@ public:
Relations, E);
return true;
}
+
+ bool VisitConceptSpecializationExpr(ConceptSpecializationExpr *R) {
+ IndexCtx.handleReference(R->getNamedConcept(), R->getConceptNameLoc(),
+ Parent, ParentDC);
+ return true;
+ }
+
+ bool TraverseTypeConstraint(const TypeConstraint *C) {
+ IndexCtx.handleReference(C->getNamedConcept(), C->getConceptNameLoc(),
+ Parent, ParentDC);
+ return RecursiveASTVisitor::TraverseTypeConstraint(C);
+ }
};
} // anonymous namespace
diff --git a/contrib/llvm-project/clang/lib/Index/IndexDecl.cpp b/contrib/llvm-project/clang/lib/Index/IndexDecl.cpp
index 00adb3644ff2..1c04aa17d53f 100644
--- a/contrib/llvm-project/clang/lib/Index/IndexDecl.cpp
+++ b/contrib/llvm-project/clang/lib/Index/IndexDecl.cpp
@@ -7,9 +7,13 @@
//===----------------------------------------------------------------------===//
#include "IndexingContext.h"
+#include "clang/AST/ASTConcept.h"
#include "clang/AST/Attr.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclVisitor.h"
#include "clang/Index/IndexDataConsumer.h"
+#include "clang/Index/IndexSymbol.h"
using namespace clang;
using namespace index;
@@ -128,6 +132,8 @@ public:
}
}
}
+ if (auto *C = D->getTrailingRequiresClause())
+ IndexCtx.indexBody(C, Parent);
}
bool handleObjCMethod(const ObjCMethodDecl *D,
@@ -372,6 +378,15 @@ public:
return true;
}
+ bool VisitEnumDecl(const EnumDecl *ED) {
+ TRY_TO(VisitTagDecl(ED));
+ // Indexing for enumdecl itself is handled inside TagDecl, we just want to
+ // visit integer-base here, which is different than other TagDecl bases.
+ if (auto *TSI = ED->getIntegerTypeSourceInfo())
+ IndexCtx.indexTypeSourceInfo(TSI, ED, ED, /*isBase=*/true);
+ return true;
+ }
+
bool handleReferencedProtocols(const ObjCProtocolList &ProtList,
const ObjCContainerDecl *ContD,
SourceLocation SuperLoc) {
@@ -595,9 +610,16 @@ public:
const NamedDecl *Parent = dyn_cast<NamedDecl>(DC);
IndexCtx.indexNestedNameSpecifierLoc(D->getQualifierLoc(), Parent,
D->getLexicalDeclContext());
- for (const auto *I : D->shadows())
+ for (const auto *I : D->shadows()) {
+ // Skip unresolved using decls - we already have a decl for the using
+ // itself, so there's not much point adding another decl or reference to
+ // refer to the same location.
+ if (isa<UnresolvedUsingIfExistsDecl>(I->getUnderlyingDecl()))
+ continue;
+
IndexCtx.handleReference(I->getUnderlyingDecl(), D->getLocation(), Parent,
D->getLexicalDeclContext(), SymbolRoleSet());
+ }
return true;
}
@@ -671,36 +693,53 @@ public:
return true;
}
- bool VisitTemplateDecl(const TemplateDecl *D) {
+ void indexTemplateParameters(TemplateParameterList *Params,
+ const NamedDecl *Parent) {
+ for (const NamedDecl *TP : *Params) {
+ if (IndexCtx.shouldIndexTemplateParameters())
+ IndexCtx.handleDecl(TP);
+ if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(TP)) {
+ if (TTP->hasDefaultArgument())
+ IndexCtx.indexTypeSourceInfo(TTP->getDefaultArgumentInfo(), Parent);
+ if (auto *C = TTP->getTypeConstraint())
+ IndexCtx.handleReference(C->getNamedConcept(), C->getConceptNameLoc(),
+ Parent, TTP->getLexicalDeclContext());
+ } else if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(TP)) {
+ IndexCtx.indexTypeSourceInfo(NTTP->getTypeSourceInfo(), Parent);
+ if (NTTP->hasDefaultArgument())
+ IndexCtx.indexBody(NTTP->getDefaultArgument(), Parent);
+ } else if (const auto *TTPD = dyn_cast<TemplateTemplateParmDecl>(TP)) {
+ if (TTPD->hasDefaultArgument())
+ handleTemplateArgumentLoc(TTPD->getDefaultArgument(), Parent,
+ TP->getLexicalDeclContext());
+ }
+ }
+ if (auto *R = Params->getRequiresClause())
+ IndexCtx.indexBody(R, Parent);
+ }
+ bool VisitTemplateDecl(const TemplateDecl *D) {
const NamedDecl *Parent = D->getTemplatedDecl();
if (!Parent)
return true;
// Index the default values for the template parameters.
- if (D->getTemplateParameters() &&
- shouldIndexTemplateParameterDefaultValue(Parent)) {
- const TemplateParameterList *Params = D->getTemplateParameters();
- for (const NamedDecl *TP : *Params) {
- if (IndexCtx.shouldIndexTemplateParameters())
- IndexCtx.handleDecl(TP);
- if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(TP)) {
- if (TTP->hasDefaultArgument())
- IndexCtx.indexTypeSourceInfo(TTP->getDefaultArgumentInfo(), Parent);
- } else if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(TP)) {
- if (NTTP->hasDefaultArgument())
- IndexCtx.indexBody(NTTP->getDefaultArgument(), Parent);
- } else if (const auto *TTPD = dyn_cast<TemplateTemplateParmDecl>(TP)) {
- if (TTPD->hasDefaultArgument())
- handleTemplateArgumentLoc(TTPD->getDefaultArgument(), Parent,
- TP->getLexicalDeclContext());
- }
- }
+ auto *Params = D->getTemplateParameters();
+ if (Params && shouldIndexTemplateParameterDefaultValue(Parent)) {
+ indexTemplateParameters(Params, Parent);
}
return Visit(Parent);
}
+ bool VisitConceptDecl(const ConceptDecl *D) {
+ if (auto *Params = D->getTemplateParameters())
+ indexTemplateParameters(Params, D);
+ if (auto *E = D->getConstraintExpr())
+ IndexCtx.indexBody(E, D);
+ return IndexCtx.handleDecl(D);
+ }
+
bool VisitFriendDecl(const FriendDecl *D) {
if (auto ND = D->getFriendDecl()) {
// FIXME: Ignore a class template in a dependent context, these are not
diff --git a/contrib/llvm-project/clang/lib/Index/IndexSymbol.cpp b/contrib/llvm-project/clang/lib/Index/IndexSymbol.cpp
index 68e457de5265..0f79694d1faa 100644
--- a/contrib/llvm-project/clang/lib/Index/IndexSymbol.cpp
+++ b/contrib/llvm-project/clang/lib/Index/IndexSymbol.cpp
@@ -36,7 +36,7 @@ static bool isUnitTest(const ObjCMethodDecl *D) {
return false;
if (!D->getReturnType()->isVoidType())
return false;
- if (!D->getSelector().getNameForSlot(0).startswith("test"))
+ if (!D->getSelector().getNameForSlot(0).starts_with("test"))
return false;
return isUnitTestCase(D->getClassInterface());
}
@@ -66,16 +66,17 @@ bool index::isFunctionLocalSymbol(const Decl *D) {
if (const NamedDecl *ND = dyn_cast<NamedDecl>(D)) {
switch (ND->getFormalLinkage()) {
- case NoLinkage:
- case InternalLinkage:
- return true;
- case VisibleNoLinkage:
- case UniqueExternalLinkage:
- case ModuleInternalLinkage:
- llvm_unreachable("Not a sema linkage");
- case ModuleLinkage:
- case ExternalLinkage:
- return false;
+ case Linkage::Invalid:
+ llvm_unreachable("Linkage hasn't been computed!");
+ case Linkage::None:
+ case Linkage::Internal:
+ return true;
+ case Linkage::VisibleNone:
+ case Linkage::UniqueExternal:
+ llvm_unreachable("Not a sema linkage");
+ case Linkage::Module:
+ case Linkage::External:
+ return false;
}
}
@@ -106,19 +107,19 @@ SymbolInfo index::getSymbolInfo(const Decl *D) {
if (const TagDecl *TD = dyn_cast<TagDecl>(D)) {
switch (TD->getTagKind()) {
- case TTK_Struct:
+ case TagTypeKind::Struct:
Info.Kind = SymbolKind::Struct; break;
- case TTK_Union:
+ case TagTypeKind::Union:
Info.Kind = SymbolKind::Union; break;
- case TTK_Class:
+ case TagTypeKind::Class:
Info.Kind = SymbolKind::Class;
Info.Lang = SymbolLanguage::CXX;
break;
- case TTK_Interface:
+ case TagTypeKind::Interface:
Info.Kind = SymbolKind::Protocol;
Info.Lang = SymbolLanguage::CXX;
break;
- case TTK_Enum:
+ case TagTypeKind::Enum:
Info.Kind = SymbolKind::Enum; break;
}
@@ -347,7 +348,6 @@ SymbolInfo index::getSymbolInfo(const Decl *D) {
}
break;
case Decl::ClassTemplatePartialSpecialization:
- case Decl::ClassScopeFunctionSpecialization:
case Decl::ClassTemplateSpecialization:
case Decl::CXXRecord:
case Decl::Enum:
@@ -371,6 +371,9 @@ SymbolInfo index::getSymbolInfo(const Decl *D) {
case Decl::NonTypeTemplateParm:
Info.Kind = SymbolKind::NonTypeTemplateParm;
break;
+ case Decl::Concept:
+ Info.Kind = SymbolKind::Concept;
+ break;
// Other decls get the 'unknown' kind.
default:
break;
@@ -534,6 +537,8 @@ StringRef index::getSymbolKindString(SymbolKind K) {
case SymbolKind::TemplateTypeParm: return "template-type-param";
case SymbolKind::TemplateTemplateParm: return "template-template-param";
case SymbolKind::NonTypeTemplateParm: return "non-type-template-param";
+ case SymbolKind::Concept:
+ return "concept";
}
llvm_unreachable("invalid symbol kind");
}
diff --git a/contrib/llvm-project/clang/lib/Index/IndexTypeSourceInfo.cpp b/contrib/llvm-project/clang/lib/Index/IndexTypeSourceInfo.cpp
index ec4ca23942ca..b986ccde5745 100644
--- a/contrib/llvm-project/clang/lib/Index/IndexTypeSourceInfo.cpp
+++ b/contrib/llvm-project/clang/lib/Index/IndexTypeSourceInfo.cpp
@@ -7,7 +7,10 @@
//===----------------------------------------------------------------------===//
#include "IndexingContext.h"
+#include "clang/AST/ASTConcept.h"
+#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/TypeLoc.h"
#include "llvm/ADT/ScopeExit.h"
using namespace clang;
@@ -77,6 +80,13 @@ public:
return true;
}
+ bool VisitAutoTypeLoc(AutoTypeLoc TL) {
+ if (auto *C = TL.getNamedConcept())
+ return IndexCtx.handleReference(C, TL.getConceptNameLoc(), Parent,
+ ParentDC);
+ return true;
+ }
+
bool traverseParamVarHelper(ParmVarDecl *D) {
TRY_TO(TraverseNestedNameSpecifierLoc(D->getQualifierLoc()));
if (D->getTypeSourceInfo())
diff --git a/contrib/llvm-project/clang/lib/Index/IndexingContext.cpp b/contrib/llvm-project/clang/lib/Index/IndexingContext.cpp
index 8a962a055bac..2dd68dfcc5a7 100644
--- a/contrib/llvm-project/clang/lib/Index/IndexingContext.cpp
+++ b/contrib/llvm-project/clang/lib/Index/IndexingContext.cpp
@@ -76,8 +76,7 @@ bool IndexingContext::handleReference(const NamedDecl *D, SourceLocation Loc,
const DeclContext *DC,
SymbolRoleSet Roles,
ArrayRef<SymbolRelation> Relations,
- const Expr *RefE,
- const Decl *RefD) {
+ const Expr *RefE) {
if (!shouldIndexFunctionLocalSymbols() && isFunctionLocalSymbol(D))
return true;
@@ -86,9 +85,8 @@ bool IndexingContext::handleReference(const NamedDecl *D, SourceLocation Loc,
isa<TemplateTemplateParmDecl>(D))) {
return true;
}
-
return handleDeclOccurrence(D, Loc, /*IsRef=*/true, Parent, Roles, Relations,
- RefE, RefD, DC);
+ RefE, nullptr, DC);
}
static void reportModuleReferences(const Module *Mod,
@@ -259,12 +257,9 @@ static bool isDeclADefinition(const Decl *D, const DeclContext *ContainerDC, AST
if (auto MD = dyn_cast<ObjCMethodDecl>(D))
return MD->isThisDeclarationADefinition() || isa<ObjCImplDecl>(ContainerDC);
- if (isa<TypedefNameDecl>(D) ||
- isa<EnumConstantDecl>(D) ||
- isa<FieldDecl>(D) ||
- isa<MSPropertyDecl>(D) ||
- isa<ObjCImplDecl>(D) ||
- isa<ObjCPropertyImplDecl>(D))
+ if (isa<TypedefNameDecl>(D) || isa<EnumConstantDecl>(D) ||
+ isa<FieldDecl>(D) || isa<MSPropertyDecl>(D) || isa<ObjCImplDecl>(D) ||
+ isa<ObjCPropertyImplDecl>(D) || isa<ConceptDecl>(D))
return true;
return false;
diff --git a/contrib/llvm-project/clang/lib/Index/IndexingContext.h b/contrib/llvm-project/clang/lib/Index/IndexingContext.h
index 626d81f003e9..89363b529fe9 100644
--- a/contrib/llvm-project/clang/lib/Index/IndexingContext.h
+++ b/contrib/llvm-project/clang/lib/Index/IndexingContext.h
@@ -68,20 +68,18 @@ public:
static bool isTemplateImplicitInstantiation(const Decl *D);
bool handleDecl(const Decl *D, SymbolRoleSet Roles = SymbolRoleSet(),
- ArrayRef<SymbolRelation> Relations = None);
+ ArrayRef<SymbolRelation> Relations = std::nullopt);
bool handleDecl(const Decl *D, SourceLocation Loc,
SymbolRoleSet Roles = SymbolRoleSet(),
- ArrayRef<SymbolRelation> Relations = None,
+ ArrayRef<SymbolRelation> Relations = std::nullopt,
const DeclContext *DC = nullptr);
bool handleReference(const NamedDecl *D, SourceLocation Loc,
- const NamedDecl *Parent,
- const DeclContext *DC,
+ const NamedDecl *Parent, const DeclContext *DC,
SymbolRoleSet Roles = SymbolRoleSet(),
- ArrayRef<SymbolRelation> Relations = None,
- const Expr *RefE = nullptr,
- const Decl *RefD = nullptr);
+ ArrayRef<SymbolRelation> Relations = std::nullopt,
+ const Expr *RefE = nullptr);
void handleMacroDefined(const IdentifierInfo &Name, SourceLocation Loc,
const MacroInfo &MI);
@@ -97,7 +95,7 @@ public:
bool indexDecl(const Decl *D);
void indexTagDecl(const TagDecl *D,
- ArrayRef<SymbolRelation> Relations = None);
+ ArrayRef<SymbolRelation> Relations = std::nullopt);
void indexTypeSourceInfo(TypeSourceInfo *TInfo, const NamedDecl *Parent,
const DeclContext *DC = nullptr,
diff --git a/contrib/llvm-project/clang/lib/Index/USRGeneration.cpp b/contrib/llvm-project/clang/lib/Index/USRGeneration.cpp
index 6db763ca6f2b..5acc86191f8f 100644
--- a/contrib/llvm-project/clang/lib/Index/USRGeneration.cpp
+++ b/contrib/llvm-project/clang/lib/Index/USRGeneration.cpp
@@ -9,8 +9,10 @@
#include "clang/Index/USRGeneration.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
+#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclVisitor.h"
+#include "clang/AST/ODRHash.h"
#include "clang/Basic/FileManager.h"
#include "clang/Lex/PreprocessingRecord.h"
#include "llvm/Support/Path.h"
@@ -31,7 +33,7 @@ static bool printLoc(llvm::raw_ostream &OS, SourceLocation Loc,
}
Loc = SM.getExpansionLoc(Loc);
const std::pair<FileID, unsigned> &Decomposed = SM.getDecomposedLoc(Loc);
- const FileEntry *FE = SM.getFileEntryForID(Decomposed.first);
+ OptionalFileEntryRef FE = SM.getFileEntryRefForID(Decomposed.first);
if (FE) {
OS << llvm::sys::path::filename(FE->getName());
} else {
@@ -103,6 +105,7 @@ public:
void VisitTemplateTemplateParmDecl(const TemplateTemplateParmDecl *D);
void VisitUnresolvedUsingValueDecl(const UnresolvedUsingValueDecl *D);
void VisitUnresolvedUsingTypenameDecl(const UnresolvedUsingTypenameDecl *D);
+ void VisitConceptDecl(const ConceptDecl *D);
void VisitLinkageSpecDecl(const LinkageSpecDecl *D) {
IgnoreResults = true; // No USRs for linkage specs themselves.
@@ -167,6 +170,8 @@ public:
void VisitTemplateName(TemplateName Name);
void VisitTemplateArgument(const TemplateArgument &Arg);
+ void VisitMSGuidDecl(const MSGuidDecl *D);
+
/// Emit a Decl's name using NamedDecl::printName() and return true if
/// the decl had no name.
bool EmitDeclName(const NamedDecl *D);
@@ -178,10 +183,11 @@ public:
//===----------------------------------------------------------------------===//
bool USRGenerator::EmitDeclName(const NamedDecl *D) {
- const unsigned startSize = Buf.size();
- D->printName(Out);
- const unsigned endSize = Buf.size();
- return startSize == endSize;
+ DeclarationName N = D->getDeclName();
+ if (N.isEmpty())
+ return true;
+ Out << N;
+ return false;
}
bool USRGenerator::ShouldGenerateLocation(const NamedDecl *D) {
@@ -222,6 +228,11 @@ void USRGenerator::VisitFunctionDecl(const FunctionDecl *D) {
if (ShouldGenerateLocation(D) && GenLoc(D, /*IncludeOffset=*/isLocal(D)))
return;
+ if (D->getType().isNull()) {
+ IgnoreResults = true;
+ return;
+ }
+
const unsigned StartSize = Buf.size();
VisitDeclContext(D->getDeclContext());
if (Buf.size() == StartSize)
@@ -257,7 +268,7 @@ void USRGenerator::VisitFunctionDecl(const FunctionDecl *D) {
}
// Mangle in type information for the arguments.
- for (auto PD : D->parameters()) {
+ for (auto *PD : D->parameters()) {
Out << '#';
VisitType(PD->getType());
}
@@ -359,14 +370,14 @@ void USRGenerator::VisitTemplateTemplateParmDecl(
}
void USRGenerator::VisitNamespaceDecl(const NamespaceDecl *D) {
+ if (IgnoreResults)
+ return;
+ VisitDeclContext(D->getDeclContext());
if (D->isAnonymousNamespace()) {
Out << "@aN";
return;
}
-
- VisitDeclContext(D->getDeclContext());
- if (!IgnoreResults)
- Out << "@N@" << D->getName();
+ Out << "@N@" << D->getName();
}
void USRGenerator::VisitFunctionTemplateDecl(const FunctionTemplateDecl *D) {
@@ -509,11 +520,16 @@ void USRGenerator::VisitTagDecl(const TagDecl *D) {
AlreadyStarted = true;
switch (D->getTagKind()) {
- case TTK_Interface:
- case TTK_Class:
- case TTK_Struct: Out << "@ST"; break;
- case TTK_Union: Out << "@UT"; break;
- case TTK_Enum: llvm_unreachable("enum template");
+ case TagTypeKind::Interface:
+ case TagTypeKind::Class:
+ case TagTypeKind::Struct:
+ Out << "@ST";
+ break;
+ case TagTypeKind::Union:
+ Out << "@UT";
+ break;
+ case TagTypeKind::Enum:
+ llvm_unreachable("enum template");
}
VisitTemplateParameterList(ClassTmpl->getTemplateParameters());
} else if (const ClassTemplatePartialSpecializationDecl *PartialSpec
@@ -521,11 +537,16 @@ void USRGenerator::VisitTagDecl(const TagDecl *D) {
AlreadyStarted = true;
switch (D->getTagKind()) {
- case TTK_Interface:
- case TTK_Class:
- case TTK_Struct: Out << "@SP"; break;
- case TTK_Union: Out << "@UP"; break;
- case TTK_Enum: llvm_unreachable("enum partial specialization");
+ case TagTypeKind::Interface:
+ case TagTypeKind::Class:
+ case TagTypeKind::Struct:
+ Out << "@SP";
+ break;
+ case TagTypeKind::Union:
+ Out << "@UP";
+ break;
+ case TagTypeKind::Enum:
+ llvm_unreachable("enum partial specialization");
}
VisitTemplateParameterList(PartialSpec->getTemplateParameters());
}
@@ -533,11 +554,17 @@ void USRGenerator::VisitTagDecl(const TagDecl *D) {
if (!AlreadyStarted) {
switch (D->getTagKind()) {
- case TTK_Interface:
- case TTK_Class:
- case TTK_Struct: Out << "@S"; break;
- case TTK_Union: Out << "@U"; break;
- case TTK_Enum: Out << "@E"; break;
+ case TagTypeKind::Interface:
+ case TagTypeKind::Class:
+ case TagTypeKind::Struct:
+ Out << "@S";
+ break;
+ case TagTypeKind::Union:
+ Out << "@U";
+ break;
+ case TagTypeKind::Enum:
+ Out << "@E";
+ break;
}
}
@@ -549,22 +576,22 @@ void USRGenerator::VisitTagDecl(const TagDecl *D) {
if (const TypedefNameDecl *TD = D->getTypedefNameForAnonDecl()) {
Buf[off] = 'A';
Out << '@' << *TD;
- }
- else {
- if (D->isEmbeddedInDeclarator() && !D->isFreeStanding()) {
- printLoc(Out, D->getLocation(), Context->getSourceManager(), true);
} else {
- Buf[off] = 'a';
- if (auto *ED = dyn_cast<EnumDecl>(D)) {
- // Distinguish USRs of anonymous enums by using their first enumerator.
- auto enum_range = ED->enumerators();
- if (enum_range.begin() != enum_range.end()) {
- Out << '@' << **enum_range.begin();
+ if (D->isEmbeddedInDeclarator() && !D->isFreeStanding()) {
+ printLoc(Out, D->getLocation(), Context->getSourceManager(), true);
+ } else {
+ Buf[off] = 'a';
+ if (auto *ED = dyn_cast<EnumDecl>(D)) {
+ // Distinguish USRs of anonymous enums by using their first
+ // enumerator.
+ auto enum_range = ED->enumerators();
+ if (enum_range.begin() != enum_range.end()) {
+ Out << '@' << **enum_range.begin();
+ }
}
}
}
}
- }
// For a class template specialization, mangle the template arguments.
if (const ClassTemplateSpecializationDecl *Spec
@@ -656,119 +683,159 @@ void USRGenerator::VisitType(QualType T) {
}
if (const BuiltinType *BT = T->getAs<BuiltinType>()) {
- unsigned char c = '\0';
switch (BT->getKind()) {
case BuiltinType::Void:
- c = 'v'; break;
+ Out << 'v'; break;
case BuiltinType::Bool:
- c = 'b'; break;
+ Out << 'b'; break;
case BuiltinType::UChar:
- c = 'c'; break;
+ Out << 'c'; break;
case BuiltinType::Char8:
- c = 'u'; break; // FIXME: Check this doesn't collide
+ Out << 'u'; break;
case BuiltinType::Char16:
- c = 'q'; break;
+ Out << 'q'; break;
case BuiltinType::Char32:
- c = 'w'; break;
+ Out << 'w'; break;
case BuiltinType::UShort:
- c = 's'; break;
+ Out << 's'; break;
case BuiltinType::UInt:
- c = 'i'; break;
+ Out << 'i'; break;
case BuiltinType::ULong:
- c = 'l'; break;
+ Out << 'l'; break;
case BuiltinType::ULongLong:
- c = 'k'; break;
+ Out << 'k'; break;
case BuiltinType::UInt128:
- c = 'j'; break;
+ Out << 'j'; break;
case BuiltinType::Char_U:
case BuiltinType::Char_S:
- c = 'C'; break;
+ Out << 'C'; break;
case BuiltinType::SChar:
- c = 'r'; break;
+ Out << 'r'; break;
case BuiltinType::WChar_S:
case BuiltinType::WChar_U:
- c = 'W'; break;
+ Out << 'W'; break;
case BuiltinType::Short:
- c = 'S'; break;
+ Out << 'S'; break;
case BuiltinType::Int:
- c = 'I'; break;
+ Out << 'I'; break;
case BuiltinType::Long:
- c = 'L'; break;
+ Out << 'L'; break;
case BuiltinType::LongLong:
- c = 'K'; break;
+ Out << 'K'; break;
case BuiltinType::Int128:
- c = 'J'; break;
+ Out << 'J'; break;
case BuiltinType::Float16:
case BuiltinType::Half:
- c = 'h'; break;
+ Out << 'h'; break;
case BuiltinType::Float:
- c = 'f'; break;
+ Out << 'f'; break;
case BuiltinType::Double:
- c = 'd'; break;
+ Out << 'd'; break;
case BuiltinType::LongDouble:
- c = 'D'; break;
+ Out << 'D'; break;
case BuiltinType::Float128:
- c = 'Q'; break;
+ Out << 'Q'; break;
case BuiltinType::NullPtr:
- c = 'n'; break;
-#define BUILTIN_TYPE(Id, SingletonId)
-#define PLACEHOLDER_TYPE(Id, SingletonId) case BuiltinType::Id:
-#include "clang/AST/BuiltinTypes.def"
- case BuiltinType::Dependent:
+ Out << 'n'; break;
#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
- case BuiltinType::Id:
+ case BuiltinType::Id: \
+ Out << "@BT@" << #Suffix << "_" << #ImgType; break;
#include "clang/Basic/OpenCLImageTypes.def"
#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
- case BuiltinType::Id:
+ case BuiltinType::Id: \
+ Out << "@BT@" << #ExtType; break;
#include "clang/Basic/OpenCLExtensionTypes.def"
case BuiltinType::OCLEvent:
+ Out << "@BT@OCLEvent"; break;
case BuiltinType::OCLClkEvent:
+ Out << "@BT@OCLClkEvent"; break;
case BuiltinType::OCLQueue:
+ Out << "@BT@OCLQueue"; break;
case BuiltinType::OCLReserveID:
+ Out << "@BT@OCLReserveID"; break;
case BuiltinType::OCLSampler:
+ Out << "@BT@OCLSampler"; break;
#define SVE_TYPE(Name, Id, SingletonId) \
- case BuiltinType::Id:
+ case BuiltinType::Id: \
+ Out << "@BT@" << Name; break;
#include "clang/Basic/AArch64SVEACLETypes.def"
#define PPC_VECTOR_TYPE(Name, Id, Size) \
- case BuiltinType::Id:
+ case BuiltinType::Id: \
+ Out << "@BT@" << #Name; break;
#include "clang/Basic/PPCTypes.def"
-#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#define RVV_TYPE(Name, Id, SingletonId) \
+ case BuiltinType::Id: \
+ Out << "@BT@" << Name; break;
#include "clang/Basic/RISCVVTypes.def"
+#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
case BuiltinType::ShortAccum:
+ Out << "@BT@ShortAccum"; break;
case BuiltinType::Accum:
+ Out << "@BT@Accum"; break;
case BuiltinType::LongAccum:
+ Out << "@BT@LongAccum"; break;
case BuiltinType::UShortAccum:
+ Out << "@BT@UShortAccum"; break;
case BuiltinType::UAccum:
+ Out << "@BT@UAccum"; break;
case BuiltinType::ULongAccum:
+ Out << "@BT@ULongAccum"; break;
case BuiltinType::ShortFract:
+ Out << "@BT@ShortFract"; break;
case BuiltinType::Fract:
+ Out << "@BT@Fract"; break;
case BuiltinType::LongFract:
+ Out << "@BT@LongFract"; break;
case BuiltinType::UShortFract:
+ Out << "@BT@UShortFract"; break;
case BuiltinType::UFract:
+ Out << "@BT@UFract"; break;
case BuiltinType::ULongFract:
+ Out << "@BT@ULongFract"; break;
case BuiltinType::SatShortAccum:
+ Out << "@BT@SatShortAccum"; break;
case BuiltinType::SatAccum:
+ Out << "@BT@SatAccum"; break;
case BuiltinType::SatLongAccum:
+ Out << "@BT@SatLongAccum"; break;
case BuiltinType::SatUShortAccum:
+ Out << "@BT@SatUShortAccum"; break;
case BuiltinType::SatUAccum:
+ Out << "@BT@SatUAccum"; break;
case BuiltinType::SatULongAccum:
+ Out << "@BT@SatULongAccum"; break;
case BuiltinType::SatShortFract:
+ Out << "@BT@SatShortFract"; break;
case BuiltinType::SatFract:
+ Out << "@BT@SatFract"; break;
case BuiltinType::SatLongFract:
+ Out << "@BT@SatLongFract"; break;
case BuiltinType::SatUShortFract:
+ Out << "@BT@SatUShortFract"; break;
case BuiltinType::SatUFract:
+ Out << "@BT@SatUFract"; break;
case BuiltinType::SatULongFract:
+ Out << "@BT@SatULongFract"; break;
case BuiltinType::BFloat16:
- IgnoreResults = true;
- return;
+ Out << "@BT@__bf16"; break;
+ case BuiltinType::Ibm128:
+ Out << "@BT@__ibm128"; break;
case BuiltinType::ObjCId:
- c = 'o'; break;
+ Out << 'o'; break;
case BuiltinType::ObjCClass:
- c = 'O'; break;
+ Out << 'O'; break;
case BuiltinType::ObjCSel:
- c = 'e'; break;
+ Out << 'e'; break;
+#define BUILTIN_TYPE(Id, SingletonId)
+#define PLACEHOLDER_TYPE(Id, SingletonId) case BuiltinType::Id:
+#include "clang/AST/BuiltinTypes.def"
+ case BuiltinType::Dependent:
+ // If you're adding a new builtin type, please add its name prefixed
+ // with "@BT@" to `Out` (see cases above).
+ IgnoreResults = true;
+ break;
}
- Out << c;
return;
}
@@ -853,9 +920,9 @@ void USRGenerator::VisitType(QualType T) {
= T->getAs<TemplateSpecializationType>()) {
Out << '>';
VisitTemplateName(Spec->getTemplateName());
- Out << Spec->getNumArgs();
- for (unsigned I = 0, N = Spec->getNumArgs(); I != N; ++I)
- VisitTemplateArgument(Spec->getArg(I));
+ Out << Spec->template_arguments().size();
+ for (const auto &Arg : Spec->template_arguments())
+ VisitTemplateArgument(Arg);
return;
}
if (const DependentNameType *DNT = T->getAs<DependentNameType>()) {
@@ -877,13 +944,13 @@ void USRGenerator::VisitType(QualType T) {
if (const auto *const AT = dyn_cast<ArrayType>(T)) {
Out << '{';
switch (AT->getSizeModifier()) {
- case ArrayType::Static:
+ case ArraySizeModifier::Static:
Out << 's';
break;
- case ArrayType::Star:
+ case ArraySizeModifier::Star:
Out << '*';
break;
- case ArrayType::Normal:
+ case ArraySizeModifier::Normal:
Out << 'n';
break;
}
@@ -961,7 +1028,7 @@ void USRGenerator::VisitTemplateArgument(const TemplateArgument &Arg) {
case TemplateArgument::TemplateExpansion:
Out << 'P'; // pack expansion of...
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case TemplateArgument::Template:
VisitTemplateName(Arg.getAsTemplateOrTemplatePattern());
break;
@@ -985,6 +1052,15 @@ void USRGenerator::VisitTemplateArgument(const TemplateArgument &Arg) {
VisitType(Arg.getIntegralType());
Out << Arg.getAsIntegral();
break;
+
+ case TemplateArgument::StructuralValue: {
+ Out << 'S';
+ VisitType(Arg.getStructuralValueType());
+ ODRHash Hash{};
+ Hash.AddStructuralValue(Arg.getAsStructuralValue());
+ Out << Hash.CalculateHash();
+ break;
+ }
}
}
@@ -1006,7 +1082,19 @@ void USRGenerator::VisitUnresolvedUsingTypenameDecl(const UnresolvedUsingTypenam
Out << D->getName(); // Simple name.
}
+void USRGenerator::VisitConceptDecl(const ConceptDecl *D) {
+ if (ShouldGenerateLocation(D) && GenLoc(D, /*IncludeOffset=*/isLocal(D)))
+ return;
+ VisitDeclContext(D->getDeclContext());
+ Out << "@CT@";
+ EmitDeclName(D);
+}
+void USRGenerator::VisitMSGuidDecl(const MSGuidDecl *D) {
+ VisitDeclContext(D->getDeclContext());
+ Out << "@MG@";
+ D->NamedDecl::printName(Out);
+}
//===----------------------------------------------------------------------===//
// USR generation functions.
@@ -1085,6 +1173,15 @@ bool clang::index::generateUSRForDecl(const Decl *D,
// C++'s operator new function, can have invalid locations but it is fine to
// create USRs that can identify them.
+ // Check if the declaration has explicit external USR specified.
+ auto *CD = D->getCanonicalDecl();
+ if (auto *ExternalSymAttr = CD->getAttr<ExternalSourceSymbolAttr>()) {
+ if (!ExternalSymAttr->getUSR().empty()) {
+ llvm::raw_svector_ostream Out(Buf);
+ Out << ExternalSymAttr->getUSR();
+ return false;
+ }
+ }
USRGenerator UG(&D->getASTContext(), Buf);
UG.Visit(D);
return UG.ignoreResults();
diff --git a/contrib/llvm-project/clang/lib/IndexSerialization/SerializablePathCollection.cpp b/contrib/llvm-project/clang/lib/IndexSerialization/SerializablePathCollection.cpp
index 34663738088e..74ed18a4f612 100644
--- a/contrib/llvm-project/clang/lib/IndexSerialization/SerializablePathCollection.cpp
+++ b/contrib/llvm-project/clang/lib/IndexSerialization/SerializablePathCollection.cpp
@@ -45,8 +45,8 @@ SerializablePathCollection::SerializablePathCollection(
SysRootPath(Paths.addDirPath(SysRoot)),
OutputFilePath(Paths.addDirPath(OutputFile)) {}
-size_t SerializablePathCollection::tryStoreFilePath(const FileEntry &FE) {
- auto FileIt = UniqueFiles.find(&FE);
+size_t SerializablePathCollection::tryStoreFilePath(FileEntryRef FE) {
+ auto FileIt = UniqueFiles.find(FE);
if (FileIt != UniqueFiles.end())
return FileIt->second;
@@ -54,7 +54,7 @@ size_t SerializablePathCollection::tryStoreFilePath(const FileEntry &FE) {
const auto FileIdx =
Paths.addFilePath(Dir.Root, Dir.Path, sys::path::filename(FE.getName()));
- UniqueFiles.try_emplace(&FE, FileIdx);
+ UniqueFiles.try_emplace(FE, FileIdx);
return FileIdx;
}
@@ -70,11 +70,11 @@ PathPool::DirPath SerializablePathCollection::tryStoreDirPath(StringRef Dir) {
const std::string OrigDir = Dir.str();
PathPool::RootDirKind Root = PathPool::RootDirKind::Regular;
- if (!SysRoot.empty() && Dir.startswith(SysRoot) &&
+ if (!SysRoot.empty() && Dir.starts_with(SysRoot) &&
llvm::sys::path::is_separator(Dir[SysRoot.size()])) {
Root = PathPool::RootDirKind::SysRoot;
Dir = Dir.drop_front(SysRoot.size());
- } else if (!WorkDir.empty() && Dir.startswith(WorkDir) &&
+ } else if (!WorkDir.empty() && Dir.starts_with(WorkDir) &&
llvm::sys::path::is_separator(Dir[WorkDir.size()])) {
Root = PathPool::RootDirKind::CurrentWorkDir;
Dir = Dir.drop_front(WorkDir.size());
diff --git a/contrib/llvm-project/clang/lib/Interpreter/CodeCompletion.cpp b/contrib/llvm-project/clang/lib/Interpreter/CodeCompletion.cpp
new file mode 100644
index 000000000000..25183ae9eeb9
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Interpreter/CodeCompletion.cpp
@@ -0,0 +1,388 @@
+//===------ CodeCompletion.cpp - Code Completion for ClangRepl -------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the classes which performs code completion at the REPL.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Interpreter/CodeCompletion.h"
+#include "clang/AST/ASTImporter.h"
+#include "clang/AST/DeclLookups.h"
+#include "clang/AST/DeclarationName.h"
+#include "clang/AST/ExternalASTSource.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Frontend/ASTUnit.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/FrontendActions.h"
+#include "clang/Interpreter/Interpreter.h"
+#include "clang/Lex/PreprocessorOptions.h"
+#include "clang/Sema/CodeCompleteConsumer.h"
+#include "clang/Sema/CodeCompleteOptions.h"
+#include "clang/Sema/Sema.h"
+#include "llvm/Support/Debug.h"
+#define DEBUG_TYPE "REPLCC"
+
+namespace clang {
+
+const std::string CodeCompletionFileName = "input_line_[Completion]";
+
+clang::CodeCompleteOptions getClangCompleteOpts() {
+ clang::CodeCompleteOptions Opts;
+ Opts.IncludeCodePatterns = true;
+ Opts.IncludeMacros = true;
+ Opts.IncludeGlobals = true;
+ Opts.IncludeBriefComments = true;
+ return Opts;
+}
+
+class ReplCompletionConsumer : public CodeCompleteConsumer {
+public:
+ ReplCompletionConsumer(std::vector<std::string> &Results,
+ ReplCodeCompleter &CC)
+ : CodeCompleteConsumer(getClangCompleteOpts()),
+ CCAllocator(std::make_shared<GlobalCodeCompletionAllocator>()),
+ CCTUInfo(CCAllocator), Results(Results), CC(CC) {}
+
+ // The entry of handling code completion. When the function is called, we
+ // create a `Context`-based handler (see classes defined below) to handle each
+ // completion result.
+ void ProcessCodeCompleteResults(class Sema &S, CodeCompletionContext Context,
+ CodeCompletionResult *InResults,
+ unsigned NumResults) final;
+
+ CodeCompletionAllocator &getAllocator() override { return *CCAllocator; }
+
+ CodeCompletionTUInfo &getCodeCompletionTUInfo() override { return CCTUInfo; }
+
+private:
+ std::shared_ptr<GlobalCodeCompletionAllocator> CCAllocator;
+ CodeCompletionTUInfo CCTUInfo;
+ std::vector<std::string> &Results;
+ ReplCodeCompleter &CC;
+};
+
+/// The class CompletionContextHandler contains four interfaces, each of
+/// which handles one type of completion result.
+/// Its derived classes are used to create concrete handlers based on
+/// \c CodeCompletionContext.
+class CompletionContextHandler {
+protected:
+ CodeCompletionContext CCC;
+ std::vector<std::string> &Results;
+
+private:
+ Sema &S;
+
+public:
+ CompletionContextHandler(Sema &S, CodeCompletionContext CCC,
+ std::vector<std::string> &Results)
+ : CCC(CCC), Results(Results), S(S) {}
+
+ virtual ~CompletionContextHandler() = default;
+ /// Converts a Declaration completion result to a completion string, and then
+ /// stores it in Results.
+ virtual void handleDeclaration(const CodeCompletionResult &Result) {
+ auto PreferredType = CCC.getPreferredType();
+ if (PreferredType.isNull()) {
+ Results.push_back(Result.Declaration->getName().str());
+ return;
+ }
+
+ if (auto *VD = dyn_cast<VarDecl>(Result.Declaration)) {
+ auto ArgumentType = VD->getType();
+ if (PreferredType->isReferenceType()) {
+ QualType RT = PreferredType->castAs<ReferenceType>()->getPointeeType();
+ Sema::ReferenceConversions RefConv;
+ Sema::ReferenceCompareResult RefRelationship =
+ S.CompareReferenceRelationship(SourceLocation(), RT, ArgumentType,
+ &RefConv);
+ switch (RefRelationship) {
+ case Sema::Ref_Compatible:
+ case Sema::Ref_Related:
+ Results.push_back(VD->getName().str());
+ break;
+ case Sema::Ref_Incompatible:
+ break;
+ }
+ } else if (S.Context.hasSameType(ArgumentType, PreferredType)) {
+ Results.push_back(VD->getName().str());
+ }
+ }
+ }
+
+ /// Converts a Keyword completion result to a completion string, and then
+ /// stores it in Results.
+ virtual void handleKeyword(const CodeCompletionResult &Result) {
+ auto Prefix = S.getPreprocessor().getCodeCompletionFilter();
+ // Add keyword to the completion results only if we are in a type-aware
+ // situation.
+ if (!CCC.getBaseType().isNull() || !CCC.getPreferredType().isNull())
+ return;
+ if (StringRef(Result.Keyword).starts_with(Prefix))
+ Results.push_back(Result.Keyword);
+ }
+
+ /// Converts a Pattern completion result to a completion string, and then
+ /// stores it in Results.
+ virtual void handlePattern(const CodeCompletionResult &Result) {}
+
+ /// Converts a Macro completion result to a completion string, and then stores
+ /// it in Results.
+ virtual void handleMacro(const CodeCompletionResult &Result) {}
+};
+
+class DotMemberAccessHandler : public CompletionContextHandler {
+public:
+ DotMemberAccessHandler(Sema &S, CodeCompletionContext CCC,
+ std::vector<std::string> &Results)
+ : CompletionContextHandler(S, CCC, Results) {}
+ void handleDeclaration(const CodeCompletionResult &Result) override {
+ auto *ID = Result.Declaration->getIdentifier();
+ if (!ID)
+ return;
+ if (!isa<CXXMethodDecl>(Result.Declaration))
+ return;
+ const auto *Fun = cast<CXXMethodDecl>(Result.Declaration);
+ if (Fun->getParent()->getCanonicalDecl() ==
+ CCC.getBaseType()->getAsCXXRecordDecl()->getCanonicalDecl()) {
+ LLVM_DEBUG(llvm::dbgs() << "[In HandleCodeCompleteDOT] Name : "
+ << ID->getName() << "\n");
+ Results.push_back(ID->getName().str());
+ }
+ }
+
+ void handleKeyword(const CodeCompletionResult &Result) override {}
+};
+
+void ReplCompletionConsumer::ProcessCodeCompleteResults(
+ class Sema &S, CodeCompletionContext Context,
+ CodeCompletionResult *InResults, unsigned NumResults) {
+
+ auto Prefix = S.getPreprocessor().getCodeCompletionFilter();
+ CC.Prefix = Prefix;
+
+ std::unique_ptr<CompletionContextHandler> CCH;
+
+ // initialize fine-grained code completion handler based on the code
+ // completion context.
+ switch (Context.getKind()) {
+ case CodeCompletionContext::CCC_DotMemberAccess:
+ CCH.reset(new DotMemberAccessHandler(S, Context, this->Results));
+ break;
+ default:
+ CCH.reset(new CompletionContextHandler(S, Context, this->Results));
+ };
+
+ for (unsigned I = 0; I < NumResults; I++) {
+ auto &Result = InResults[I];
+ switch (Result.Kind) {
+ case CodeCompletionResult::RK_Declaration:
+ if (Result.Hidden) {
+ break;
+ }
+ if (!Result.Declaration->getDeclName().isIdentifier() ||
+ !Result.Declaration->getName().starts_with(Prefix)) {
+ break;
+ }
+ CCH->handleDeclaration(Result);
+ break;
+ case CodeCompletionResult::RK_Keyword:
+ CCH->handleKeyword(Result);
+ break;
+ case CodeCompletionResult::RK_Macro:
+ CCH->handleMacro(Result);
+ break;
+ case CodeCompletionResult::RK_Pattern:
+ CCH->handlePattern(Result);
+ break;
+ }
+ }
+
+ std::sort(Results.begin(), Results.end());
+}
+
+class IncrementalSyntaxOnlyAction : public SyntaxOnlyAction {
+ const CompilerInstance *ParentCI;
+
+public:
+ IncrementalSyntaxOnlyAction(const CompilerInstance *ParentCI)
+ : ParentCI(ParentCI) {}
+
+protected:
+ void ExecuteAction() override;
+};
+
+class ExternalSource : public clang::ExternalASTSource {
+ TranslationUnitDecl *ChildTUDeclCtxt;
+ ASTContext &ParentASTCtxt;
+ TranslationUnitDecl *ParentTUDeclCtxt;
+
+ std::unique_ptr<ASTImporter> Importer;
+
+public:
+ ExternalSource(ASTContext &ChildASTCtxt, FileManager &ChildFM,
+ ASTContext &ParentASTCtxt, FileManager &ParentFM);
+ bool FindExternalVisibleDeclsByName(const DeclContext *DC,
+ DeclarationName Name) override;
+ void
+ completeVisibleDeclsMap(const clang::DeclContext *childDeclContext) override;
+};
+
+// This method is intended to set up `ExternalASTSource` to the running
+// compiler instance before the super `ExecuteAction` triggers parsing
+void IncrementalSyntaxOnlyAction::ExecuteAction() {
+ CompilerInstance &CI = getCompilerInstance();
+ ExternalSource *myExternalSource =
+ new ExternalSource(CI.getASTContext(), CI.getFileManager(),
+ ParentCI->getASTContext(), ParentCI->getFileManager());
+ llvm::IntrusiveRefCntPtr<clang::ExternalASTSource> astContextExternalSource(
+ myExternalSource);
+ CI.getASTContext().setExternalSource(astContextExternalSource);
+ CI.getASTContext().getTranslationUnitDecl()->setHasExternalVisibleStorage(
+ true);
+
+ // Load all external decls into current context. Under the hood, it calls
+ // ExternalSource::completeVisibleDeclsMap, which make all decls on the redecl
+ // chain visible.
+ //
+ // This is crucial to code completion on dot members, since a bound variable
+ // before "." would be otherwise treated out-of-scope.
+ //
+ // clang-repl> Foo f1;
+ // clang-repl> f1.<tab>
+ CI.getASTContext().getTranslationUnitDecl()->lookups();
+ SyntaxOnlyAction::ExecuteAction();
+}
+
+ExternalSource::ExternalSource(ASTContext &ChildASTCtxt, FileManager &ChildFM,
+ ASTContext &ParentASTCtxt, FileManager &ParentFM)
+ : ChildTUDeclCtxt(ChildASTCtxt.getTranslationUnitDecl()),
+ ParentASTCtxt(ParentASTCtxt),
+ ParentTUDeclCtxt(ParentASTCtxt.getTranslationUnitDecl()) {
+ ASTImporter *importer =
+ new ASTImporter(ChildASTCtxt, ChildFM, ParentASTCtxt, ParentFM,
+ /*MinimalImport : ON*/ true);
+ Importer.reset(importer);
+}
+
+bool ExternalSource::FindExternalVisibleDeclsByName(const DeclContext *DC,
+ DeclarationName Name) {
+
+ IdentifierTable &ParentIdTable = ParentASTCtxt.Idents;
+
+ auto ParentDeclName =
+ DeclarationName(&(ParentIdTable.get(Name.getAsString())));
+
+ DeclContext::lookup_result lookup_result =
+ ParentTUDeclCtxt->lookup(ParentDeclName);
+
+ if (!lookup_result.empty()) {
+ return true;
+ }
+ return false;
+}
+
+void ExternalSource::completeVisibleDeclsMap(
+ const DeclContext *ChildDeclContext) {
+ assert(ChildDeclContext && ChildDeclContext == ChildTUDeclCtxt &&
+ "No child decl context!");
+
+ if (!ChildDeclContext->hasExternalVisibleStorage())
+ return;
+
+ for (auto *DeclCtxt = ParentTUDeclCtxt; DeclCtxt != nullptr;
+ DeclCtxt = DeclCtxt->getPreviousDecl()) {
+ for (auto &IDeclContext : DeclCtxt->decls()) {
+ if (!llvm::isa<NamedDecl>(IDeclContext))
+ continue;
+
+ NamedDecl *Decl = llvm::cast<NamedDecl>(IDeclContext);
+
+ auto DeclOrErr = Importer->Import(Decl);
+ if (!DeclOrErr) {
+ // if an error happens, it usually means the decl has already been
+ // imported or the decl is a result of a failed import. But in our
+ // case, every import is fresh each time code completion is
+ // triggered. So Import usually doesn't fail. If it does, it just means
+ // the related decl can't be used in code completion and we can safely
+ // drop it.
+ llvm::consumeError(DeclOrErr.takeError());
+ continue;
+ }
+
+ if (!llvm::isa<NamedDecl>(*DeclOrErr))
+ continue;
+
+ NamedDecl *importedNamedDecl = llvm::cast<NamedDecl>(*DeclOrErr);
+
+ SetExternalVisibleDeclsForName(ChildDeclContext,
+ importedNamedDecl->getDeclName(),
+ importedNamedDecl);
+
+ if (!llvm::isa<CXXRecordDecl>(importedNamedDecl))
+ continue;
+
+ auto *Record = llvm::cast<CXXRecordDecl>(importedNamedDecl);
+
+ if (auto Err = Importer->ImportDefinition(Decl)) {
+ // the same as above
+ consumeError(std::move(Err));
+ continue;
+ }
+
+ Record->setHasLoadedFieldsFromExternalStorage(true);
+ LLVM_DEBUG(llvm::dbgs()
+ << "\nCXXRecrod : " << Record->getName() << " size(methods): "
+ << std::distance(Record->method_begin(), Record->method_end())
+ << " has def?: " << Record->hasDefinition()
+ << " # (methods): "
+ << std::distance(Record->getDefinition()->method_begin(),
+ Record->getDefinition()->method_end())
+ << "\n");
+ for (auto *Meth : Record->methods())
+ SetExternalVisibleDeclsForName(ChildDeclContext, Meth->getDeclName(),
+ Meth);
+ }
+ ChildDeclContext->setHasExternalLexicalStorage(false);
+ }
+}
+
+void ReplCodeCompleter::codeComplete(CompilerInstance *InterpCI,
+ llvm::StringRef Content, unsigned Line,
+ unsigned Col,
+ const CompilerInstance *ParentCI,
+ std::vector<std::string> &CCResults) {
+ auto DiagOpts = DiagnosticOptions();
+ auto consumer = ReplCompletionConsumer(CCResults, *this);
+
+ auto diag = InterpCI->getDiagnosticsPtr();
+ std::unique_ptr<ASTUnit> AU(ASTUnit::LoadFromCompilerInvocationAction(
+ InterpCI->getInvocationPtr(), std::make_shared<PCHContainerOperations>(),
+ diag));
+ llvm::SmallVector<clang::StoredDiagnostic, 8> sd = {};
+ llvm::SmallVector<const llvm::MemoryBuffer *, 1> tb = {};
+ InterpCI->getFrontendOpts().Inputs[0] = FrontendInputFile(
+ CodeCompletionFileName, Language::CXX, InputKind::Source);
+ auto Act = std::unique_ptr<IncrementalSyntaxOnlyAction>(
+ new IncrementalSyntaxOnlyAction(ParentCI));
+ std::unique_ptr<llvm::MemoryBuffer> MB =
+ llvm::MemoryBuffer::getMemBufferCopy(Content, CodeCompletionFileName);
+ llvm::SmallVector<ASTUnit::RemappedFile, 4> RemappedFiles;
+
+ RemappedFiles.push_back(std::make_pair(CodeCompletionFileName, MB.get()));
+ // we don't want the AU destructor to release the memory buffer that MB
+ // owns twice, because MB handles its resource on its own.
+ AU->setOwnsRemappedFileBuffers(false);
+ AU->CodeComplete(CodeCompletionFileName, 1, Col, RemappedFiles, false, false,
+ false, consumer,
+ std::make_shared<clang::PCHContainerOperations>(), *diag,
+ InterpCI->getLangOpts(), InterpCI->getSourceManager(),
+ InterpCI->getFileManager(), sd, tb, std::move(Act));
+}
+
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Interpreter/DeviceOffload.cpp b/contrib/llvm-project/clang/lib/Interpreter/DeviceOffload.cpp
new file mode 100644
index 000000000000..fb42964e4936
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Interpreter/DeviceOffload.cpp
@@ -0,0 +1,176 @@
+//===---------- DeviceOffload.cpp - Device Offloading------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements offloading to CUDA devices.
+//
+//===----------------------------------------------------------------------===//
+
+#include "DeviceOffload.h"
+
+#include "clang/Basic/TargetOptions.h"
+#include "clang/CodeGen/ModuleBuilder.h"
+#include "clang/Frontend/CompilerInstance.h"
+
+#include "llvm/IR/LegacyPassManager.h"
+#include "llvm/MC/TargetRegistry.h"
+#include "llvm/Target/TargetMachine.h"
+
+namespace clang {
+
+IncrementalCUDADeviceParser::IncrementalCUDADeviceParser(
+ Interpreter &Interp, std::unique_ptr<CompilerInstance> Instance,
+ IncrementalParser &HostParser, llvm::LLVMContext &LLVMCtx,
+ llvm::IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> FS,
+ llvm::Error &Err)
+ : IncrementalParser(Interp, std::move(Instance), LLVMCtx, Err),
+ HostParser(HostParser), VFS(FS) {
+ if (Err)
+ return;
+ StringRef Arch = CI->getTargetOpts().CPU;
+ if (!Arch.starts_with("sm_") || Arch.substr(3).getAsInteger(10, SMVersion)) {
+ Err = llvm::joinErrors(std::move(Err), llvm::make_error<llvm::StringError>(
+ "Invalid CUDA architecture",
+ llvm::inconvertibleErrorCode()));
+ return;
+ }
+}
+
+llvm::Expected<PartialTranslationUnit &>
+IncrementalCUDADeviceParser::Parse(llvm::StringRef Input) {
+ auto PTU = IncrementalParser::Parse(Input);
+ if (!PTU)
+ return PTU.takeError();
+
+ auto PTX = GeneratePTX();
+ if (!PTX)
+ return PTX.takeError();
+
+ auto Err = GenerateFatbinary();
+ if (Err)
+ return std::move(Err);
+
+ std::string FatbinFileName =
+ "/incr_module_" + std::to_string(PTUs.size()) + ".fatbin";
+ VFS->addFile(FatbinFileName, 0,
+ llvm::MemoryBuffer::getMemBuffer(
+ llvm::StringRef(FatbinContent.data(), FatbinContent.size()),
+ "", false));
+
+ HostParser.getCI()->getCodeGenOpts().CudaGpuBinaryFileName = FatbinFileName;
+
+ FatbinContent.clear();
+
+ return PTU;
+}
+
+llvm::Expected<llvm::StringRef> IncrementalCUDADeviceParser::GeneratePTX() {
+ auto &PTU = PTUs.back();
+ std::string Error;
+
+ const llvm::Target *Target = llvm::TargetRegistry::lookupTarget(
+ PTU.TheModule->getTargetTriple(), Error);
+ if (!Target)
+ return llvm::make_error<llvm::StringError>(std::move(Error),
+ std::error_code());
+ llvm::TargetOptions TO = llvm::TargetOptions();
+ llvm::TargetMachine *TargetMachine = Target->createTargetMachine(
+ PTU.TheModule->getTargetTriple(), getCI()->getTargetOpts().CPU, "", TO,
+ llvm::Reloc::Model::PIC_);
+ PTU.TheModule->setDataLayout(TargetMachine->createDataLayout());
+
+ PTXCode.clear();
+ llvm::raw_svector_ostream dest(PTXCode);
+
+ llvm::legacy::PassManager PM;
+ if (TargetMachine->addPassesToEmitFile(PM, dest, nullptr,
+ llvm::CodeGenFileType::AssemblyFile)) {
+ return llvm::make_error<llvm::StringError>(
+ "NVPTX backend cannot produce PTX code.",
+ llvm::inconvertibleErrorCode());
+ }
+
+ if (!PM.run(*PTU.TheModule))
+ return llvm::make_error<llvm::StringError>("Failed to emit PTX code.",
+ llvm::inconvertibleErrorCode());
+
+ PTXCode += '\0';
+ while (PTXCode.size() % 8)
+ PTXCode += '\0';
+ return PTXCode.str();
+}
+
+llvm::Error IncrementalCUDADeviceParser::GenerateFatbinary() {
+ enum FatBinFlags {
+ AddressSize64 = 0x01,
+ HasDebugInfo = 0x02,
+ ProducerCuda = 0x04,
+ HostLinux = 0x10,
+ HostMac = 0x20,
+ HostWindows = 0x40
+ };
+
+ struct FatBinInnerHeader {
+ uint16_t Kind; // 0x00
+ uint16_t unknown02; // 0x02
+ uint32_t HeaderSize; // 0x04
+ uint32_t DataSize; // 0x08
+ uint32_t unknown0c; // 0x0c
+ uint32_t CompressedSize; // 0x10
+ uint32_t SubHeaderSize; // 0x14
+ uint16_t VersionMinor; // 0x18
+ uint16_t VersionMajor; // 0x1a
+ uint32_t CudaArch; // 0x1c
+ uint32_t unknown20; // 0x20
+ uint32_t unknown24; // 0x24
+ uint32_t Flags; // 0x28
+ uint32_t unknown2c; // 0x2c
+ uint32_t unknown30; // 0x30
+ uint32_t unknown34; // 0x34
+ uint32_t UncompressedSize; // 0x38
+ uint32_t unknown3c; // 0x3c
+ uint32_t unknown40; // 0x40
+ uint32_t unknown44; // 0x44
+ FatBinInnerHeader(uint32_t DataSize, uint32_t CudaArch, uint32_t Flags)
+ : Kind(1 /*PTX*/), unknown02(0x0101), HeaderSize(sizeof(*this)),
+ DataSize(DataSize), unknown0c(0), CompressedSize(0),
+ SubHeaderSize(HeaderSize - 8), VersionMinor(2), VersionMajor(4),
+ CudaArch(CudaArch), unknown20(0), unknown24(0), Flags(Flags),
+ unknown2c(0), unknown30(0), unknown34(0), UncompressedSize(0),
+ unknown3c(0), unknown40(0), unknown44(0) {}
+ };
+
+ struct FatBinHeader {
+ uint32_t Magic; // 0x00
+ uint16_t Version; // 0x04
+ uint16_t HeaderSize; // 0x06
+ uint32_t DataSize; // 0x08
+ uint32_t unknown0c; // 0x0c
+ public:
+ FatBinHeader(uint32_t DataSize)
+ : Magic(0xba55ed50), Version(1), HeaderSize(sizeof(*this)),
+ DataSize(DataSize), unknown0c(0) {}
+ };
+
+ FatBinHeader OuterHeader(sizeof(FatBinInnerHeader) + PTXCode.size());
+ FatbinContent.append((char *)&OuterHeader,
+ ((char *)&OuterHeader) + OuterHeader.HeaderSize);
+
+ FatBinInnerHeader InnerHeader(PTXCode.size(), SMVersion,
+ FatBinFlags::AddressSize64 |
+ FatBinFlags::HostLinux);
+ FatbinContent.append((char *)&InnerHeader,
+ ((char *)&InnerHeader) + InnerHeader.HeaderSize);
+
+ FatbinContent.append(PTXCode.begin(), PTXCode.end());
+
+ return llvm::Error::success();
+}
+
+IncrementalCUDADeviceParser::~IncrementalCUDADeviceParser() {}
+
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Interpreter/DeviceOffload.h b/contrib/llvm-project/clang/lib/Interpreter/DeviceOffload.h
new file mode 100644
index 000000000000..ce4f218c94c7
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Interpreter/DeviceOffload.h
@@ -0,0 +1,51 @@
+//===----------- DeviceOffload.h - Device Offloading ------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements classes required for offloading to CUDA devices.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_INTERPRETER_DEVICE_OFFLOAD_H
+#define LLVM_CLANG_LIB_INTERPRETER_DEVICE_OFFLOAD_H
+
+#include "IncrementalParser.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/VirtualFileSystem.h"
+
+namespace clang {
+
+class IncrementalCUDADeviceParser : public IncrementalParser {
+public:
+ IncrementalCUDADeviceParser(
+ Interpreter &Interp, std::unique_ptr<CompilerInstance> Instance,
+ IncrementalParser &HostParser, llvm::LLVMContext &LLVMCtx,
+ llvm::IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> VFS,
+ llvm::Error &Err);
+
+ llvm::Expected<PartialTranslationUnit &>
+ Parse(llvm::StringRef Input) override;
+
+ // Generate PTX for the last PTU
+ llvm::Expected<llvm::StringRef> GeneratePTX();
+
+ // Generate fatbinary contents in memory
+ llvm::Error GenerateFatbinary();
+
+ ~IncrementalCUDADeviceParser();
+
+protected:
+ IncrementalParser &HostParser;
+ int SMVersion;
+ llvm::SmallString<1024> PTXCode;
+ llvm::SmallVector<char, 1024> FatbinContent;
+ llvm::IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> VFS;
+};
+
+} // namespace clang
+
+#endif // LLVM_CLANG_LIB_INTERPRETER_DEVICE_OFFLOAD_H
diff --git a/contrib/llvm-project/clang/lib/Interpreter/IncrementalExecutor.cpp b/contrib/llvm-project/clang/lib/Interpreter/IncrementalExecutor.cpp
index 9a368d9122bc..40bcef94797d 100644
--- a/contrib/llvm-project/clang/lib/Interpreter/IncrementalExecutor.cpp
+++ b/contrib/llvm-project/clang/lib/Interpreter/IncrementalExecutor.cpp
@@ -12,52 +12,105 @@
#include "IncrementalExecutor.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/TargetOptions.h"
+#include "clang/Interpreter/PartialTranslationUnit.h"
#include "llvm/ExecutionEngine/ExecutionEngine.h"
#include "llvm/ExecutionEngine/Orc/CompileUtils.h"
+#include "llvm/ExecutionEngine/Orc/Debugging/DebuggerSupport.h"
#include "llvm/ExecutionEngine/Orc/ExecutionUtils.h"
#include "llvm/ExecutionEngine/Orc/IRCompileLayer.h"
#include "llvm/ExecutionEngine/Orc/LLJIT.h"
#include "llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h"
+#include "llvm/ExecutionEngine/Orc/TargetProcess/JITLoaderGDB.h"
#include "llvm/ExecutionEngine/SectionMemoryManager.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/TargetSelect.h"
+// Force linking some of the runtimes that helps attaching to a debugger.
+LLVM_ATTRIBUTE_USED void linkComponents() {
+ llvm::errs() << (void *)&llvm_orc_registerJITLoaderGDBWrapper
+ << (void *)&llvm_orc_registerJITLoaderGDBAllocAction;
+}
+
namespace clang {
IncrementalExecutor::IncrementalExecutor(llvm::orc::ThreadSafeContext &TSC,
llvm::Error &Err,
- const llvm::Triple &Triple)
+ const clang::TargetInfo &TI)
: TSCtx(TSC) {
using namespace llvm::orc;
llvm::ErrorAsOutParameter EAO(&Err);
- auto JTMB = JITTargetMachineBuilder(Triple);
- if (auto JitOrErr = LLJITBuilder().setJITTargetMachineBuilder(JTMB).create())
+ auto JTMB = JITTargetMachineBuilder(TI.getTriple());
+ JTMB.addFeatures(TI.getTargetOpts().Features);
+ LLJITBuilder Builder;
+ Builder.setJITTargetMachineBuilder(JTMB);
+ Builder.setPrePlatformSetup(
+ [](LLJIT &J) {
+ // Try to enable debugging of JIT'd code (only works with JITLink for
+ // ELF and MachO).
+ consumeError(enableDebuggerSupport(J));
+ return llvm::Error::success();
+ });
+
+ if (auto JitOrErr = Builder.create())
Jit = std::move(*JitOrErr);
else {
Err = JitOrErr.takeError();
return;
}
-
- const char Pref = Jit->getDataLayout().getGlobalPrefix();
- // Discover symbols from the process as a fallback.
- if (auto PSGOrErr = DynamicLibrarySearchGenerator::GetForCurrentProcess(Pref))
- Jit->getMainJITDylib().addGenerator(std::move(*PSGOrErr));
- else {
- Err = PSGOrErr.takeError();
- return;
- }
}
IncrementalExecutor::~IncrementalExecutor() {}
-llvm::Error IncrementalExecutor::addModule(std::unique_ptr<llvm::Module> M) {
- return Jit->addIRModule(llvm::orc::ThreadSafeModule(std::move(M), TSCtx));
+llvm::Error IncrementalExecutor::addModule(PartialTranslationUnit &PTU) {
+ llvm::orc::ResourceTrackerSP RT =
+ Jit->getMainJITDylib().createResourceTracker();
+ ResourceTrackers[&PTU] = RT;
+
+ return Jit->addIRModule(RT, {std::move(PTU.TheModule), TSCtx});
+}
+
+llvm::Error IncrementalExecutor::removeModule(PartialTranslationUnit &PTU) {
+
+ llvm::orc::ResourceTrackerSP RT = std::move(ResourceTrackers[&PTU]);
+ if (!RT)
+ return llvm::Error::success();
+
+ ResourceTrackers.erase(&PTU);
+ if (llvm::Error Err = RT->remove())
+ return Err;
+ return llvm::Error::success();
+}
+
+// Clean up the JIT instance.
+llvm::Error IncrementalExecutor::cleanUp() {
+ // This calls the global dtors of registered modules.
+ return Jit->deinitialize(Jit->getMainJITDylib());
}
llvm::Error IncrementalExecutor::runCtors() const {
return Jit->initialize(Jit->getMainJITDylib());
}
+llvm::Expected<llvm::orc::ExecutorAddr>
+IncrementalExecutor::getSymbolAddress(llvm::StringRef Name,
+ SymbolNameKind NameKind) const {
+ using namespace llvm::orc;
+ auto SO = makeJITDylibSearchOrder({&Jit->getMainJITDylib(),
+ Jit->getPlatformJITDylib().get(),
+ Jit->getProcessSymbolsJITDylib().get()});
+
+ ExecutionSession &ES = Jit->getExecutionSession();
+
+ auto SymOrErr =
+ ES.lookup(SO, (NameKind == LinkerName) ? ES.intern(Name)
+ : Jit->mangleAndIntern(Name));
+ if (auto Err = SymOrErr.takeError())
+ return std::move(Err);
+ return SymOrErr->getAddress();
+}
+
} // end namespace clang
diff --git a/contrib/llvm-project/clang/lib/Interpreter/IncrementalExecutor.h b/contrib/llvm-project/clang/lib/Interpreter/IncrementalExecutor.h
index b4c6ddec1047..dd0a210a0614 100644
--- a/contrib/llvm-project/clang/lib/Interpreter/IncrementalExecutor.h
+++ b/contrib/llvm-project/clang/lib/Interpreter/IncrementalExecutor.h
@@ -13,15 +13,15 @@
#ifndef LLVM_CLANG_LIB_INTERPRETER_INCREMENTALEXECUTOR_H
#define LLVM_CLANG_LIB_INTERPRETER_INCREMENTALEXECUTOR_H
+#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/ExecutionEngine/Orc/ExecutionUtils.h"
+#include "llvm/ExecutionEngine/Orc/Shared/ExecutorAddress.h"
#include <memory>
namespace llvm {
class Error;
-class Module;
namespace orc {
class LLJIT;
class ThreadSafeContext;
@@ -29,18 +29,33 @@ class ThreadSafeContext;
} // namespace llvm
namespace clang {
+
+struct PartialTranslationUnit;
+class TargetInfo;
+
class IncrementalExecutor {
using CtorDtorIterator = llvm::orc::CtorDtorIterator;
std::unique_ptr<llvm::orc::LLJIT> Jit;
llvm::orc::ThreadSafeContext &TSCtx;
+ llvm::DenseMap<const PartialTranslationUnit *, llvm::orc::ResourceTrackerSP>
+ ResourceTrackers;
+
public:
+ enum SymbolNameKind { IRName, LinkerName };
+
IncrementalExecutor(llvm::orc::ThreadSafeContext &TSC, llvm::Error &Err,
- const llvm::Triple &Triple);
+ const clang::TargetInfo &TI);
~IncrementalExecutor();
- llvm::Error addModule(std::unique_ptr<llvm::Module> M);
+ llvm::Error addModule(PartialTranslationUnit &PTU);
+ llvm::Error removeModule(PartialTranslationUnit &PTU);
llvm::Error runCtors() const;
+ llvm::Error cleanUp();
+ llvm::Expected<llvm::orc::ExecutorAddr>
+ getSymbolAddress(llvm::StringRef Name, SymbolNameKind NameKind) const;
+
+ llvm::orc::LLJIT &GetExecutionEngine() { return *Jit; }
};
} // end namespace clang
diff --git a/contrib/llvm-project/clang/lib/Interpreter/IncrementalParser.cpp b/contrib/llvm-project/clang/lib/Interpreter/IncrementalParser.cpp
index 897e2cd1aaed..370bcbfee8b0 100644
--- a/contrib/llvm-project/clang/lib/Interpreter/IncrementalParser.cpp
+++ b/contrib/llvm-project/clang/lib/Interpreter/IncrementalParser.cpp
@@ -19,9 +19,9 @@
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/FrontendAction.h"
#include "clang/FrontendTool/Utils.h"
+#include "clang/Interpreter/Interpreter.h"
#include "clang/Parse/Parser.h"
#include "clang/Sema/Sema.h"
-
#include "llvm/Option/ArgList.h"
#include "llvm/Support/CrashRecoveryContext.h"
#include "llvm/Support/Error.h"
@@ -31,6 +31,79 @@
namespace clang {
+class IncrementalASTConsumer final : public ASTConsumer {
+ Interpreter &Interp;
+ std::unique_ptr<ASTConsumer> Consumer;
+
+public:
+ IncrementalASTConsumer(Interpreter &InterpRef, std::unique_ptr<ASTConsumer> C)
+ : Interp(InterpRef), Consumer(std::move(C)) {}
+
+ bool HandleTopLevelDecl(DeclGroupRef DGR) override final {
+ if (DGR.isNull())
+ return true;
+ if (!Consumer)
+ return true;
+
+ for (Decl *D : DGR)
+ if (auto *TSD = llvm::dyn_cast<TopLevelStmtDecl>(D);
+ TSD && TSD->isSemiMissing())
+ TSD->setStmt(Interp.SynthesizeExpr(cast<Expr>(TSD->getStmt())));
+
+ return Consumer->HandleTopLevelDecl(DGR);
+ }
+ void HandleTranslationUnit(ASTContext &Ctx) override final {
+ Consumer->HandleTranslationUnit(Ctx);
+ }
+ void HandleInlineFunctionDefinition(FunctionDecl *D) override final {
+ Consumer->HandleInlineFunctionDefinition(D);
+ }
+ void HandleInterestingDecl(DeclGroupRef D) override final {
+ Consumer->HandleInterestingDecl(D);
+ }
+ void HandleTagDeclDefinition(TagDecl *D) override final {
+ Consumer->HandleTagDeclDefinition(D);
+ }
+ void HandleTagDeclRequiredDefinition(const TagDecl *D) override final {
+ Consumer->HandleTagDeclRequiredDefinition(D);
+ }
+ void HandleCXXImplicitFunctionInstantiation(FunctionDecl *D) override final {
+ Consumer->HandleCXXImplicitFunctionInstantiation(D);
+ }
+ void HandleTopLevelDeclInObjCContainer(DeclGroupRef D) override final {
+ Consumer->HandleTopLevelDeclInObjCContainer(D);
+ }
+ void HandleImplicitImportDecl(ImportDecl *D) override final {
+ Consumer->HandleImplicitImportDecl(D);
+ }
+ void CompleteTentativeDefinition(VarDecl *D) override final {
+ Consumer->CompleteTentativeDefinition(D);
+ }
+ void CompleteExternalDeclaration(VarDecl *D) override final {
+ Consumer->CompleteExternalDeclaration(D);
+ }
+ void AssignInheritanceModel(CXXRecordDecl *RD) override final {
+ Consumer->AssignInheritanceModel(RD);
+ }
+ void HandleCXXStaticMemberVarInstantiation(VarDecl *D) override final {
+ Consumer->HandleCXXStaticMemberVarInstantiation(D);
+ }
+ void HandleVTable(CXXRecordDecl *RD) override final {
+ Consumer->HandleVTable(RD);
+ }
+ ASTMutationListener *GetASTMutationListener() override final {
+ return Consumer->GetASTMutationListener();
+ }
+ ASTDeserializationListener *GetASTDeserializationListener() override final {
+ return Consumer->GetASTDeserializationListener();
+ }
+ void PrintStats() override final { Consumer->PrintStats(); }
+ bool shouldSkipFunctionBody(Decl *D) override final {
+ return Consumer->shouldSkipFunctionBody(D);
+ }
+ static bool classof(const clang::ASTConsumer *) { return true; }
+};
+
/// A custom action enabling the incremental processing functionality.
///
/// The usual \p FrontendAction expects one call to ExecuteAction and once it
@@ -59,16 +132,22 @@ public:
CI.getFrontendOpts().ProgramAction);
return Act;
case frontend::ASTDump:
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case frontend::ASTPrint:
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case frontend::ParseSyntaxOnly:
Act = CreateFrontendAction(CI);
break;
+ case frontend::PluginAction:
+ [[fallthrough]];
case frontend::EmitAssembly:
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
+ case frontend::EmitBC:
+ [[fallthrough]];
case frontend::EmitObj:
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
+ case frontend::PrintPreprocessedInput:
+ [[fallthrough]];
case frontend::EmitLLVMOnly:
Act.reset(new EmitLLVMOnlyAction(&LLVMCtx));
break;
@@ -79,23 +158,17 @@ public:
TranslationUnitKind getTranslationUnitKind() override {
return TU_Incremental;
}
+
void ExecuteAction() override {
CompilerInstance &CI = getCompilerInstance();
assert(CI.hasPreprocessor() && "No PP!");
- // FIXME: Move the truncation aspect of this into Sema, we delayed this till
- // here so the source manager would be initialized.
- if (hasCodeCompletionSupport() &&
- !CI.getFrontendOpts().CodeCompletionAt.FileName.empty())
- CI.createCodeCompletionConsumer();
-
// Use a code completion consumer?
CodeCompleteConsumer *CompletionConsumer = nullptr;
if (CI.hasCodeCompletionConsumer())
CompletionConsumer = &CI.getCodeCompletionConsumer();
Preprocessor &PP = CI.getPreprocessor();
- PP.enableIncrementalProcessing();
PP.EnterMainSourceFile();
if (!CI.hasSema())
@@ -117,7 +190,17 @@ public:
}
};
-IncrementalParser::IncrementalParser(std::unique_ptr<CompilerInstance> Instance,
+CodeGenerator *IncrementalParser::getCodeGen() const {
+ FrontendAction *WrappedAct = Act->getWrapped();
+ if (!WrappedAct->hasIRSupport())
+ return nullptr;
+ return static_cast<CodeGenAction *>(WrappedAct)->getCodeGenerator();
+}
+
+IncrementalParser::IncrementalParser() {}
+
+IncrementalParser::IncrementalParser(Interpreter &Interp,
+ std::unique_ptr<CompilerInstance> Instance,
llvm::LLVMContext &LLVMCtx,
llvm::Error &Err)
: CI(std::move(Instance)) {
@@ -126,13 +209,34 @@ IncrementalParser::IncrementalParser(std::unique_ptr<CompilerInstance> Instance,
if (Err)
return;
CI->ExecuteAction(*Act);
+ std::unique_ptr<ASTConsumer> IncrConsumer =
+ std::make_unique<IncrementalASTConsumer>(Interp, CI->takeASTConsumer());
+ CI->setASTConsumer(std::move(IncrConsumer));
Consumer = &CI->getASTConsumer();
P.reset(
new Parser(CI->getPreprocessor(), CI->getSema(), /*SkipBodies=*/false));
P->Initialize();
+
+ // An initial PTU is needed as CUDA includes some headers automatically
+ auto PTU = ParseOrWrapTopLevelDecl();
+ if (auto E = PTU.takeError()) {
+ consumeError(std::move(E)); // FIXME
+ return; // PTU.takeError();
+ }
+
+ if (CodeGenerator *CG = getCodeGen()) {
+ std::unique_ptr<llvm::Module> M(CG->ReleaseModule());
+ CG->StartModule("incr_module_" + std::to_string(PTUs.size()),
+ M->getContext());
+ PTU->TheModule = std::move(M);
+ assert(PTU->TheModule && "Failed to create initial PTU");
+ }
}
-IncrementalParser::~IncrementalParser() { Act->FinalizeAction(); }
+IncrementalParser::~IncrementalParser() {
+ P.reset();
+ Act->FinalizeAction();
+}
llvm::Expected<PartialTranslationUnit &>
IncrementalParser::ParseOrWrapTopLevelDecl() {
@@ -150,8 +254,8 @@ IncrementalParser::ParseOrWrapTopLevelDecl() {
LastPTU.TUPart = C.getTranslationUnitDecl();
// Skip previous eof due to last incremental input.
- if (P->getCurToken().is(tok::eof)) {
- P->ConsumeToken();
+ if (P->getCurToken().is(tok::annot_repl_input_end)) {
+ P->ConsumeAnyToken();
// FIXME: Clang does not call ExitScope on finalizing the regular TU, we
// might want to do that around HandleEndOfTranslationUnit.
P->ExitScope();
@@ -162,11 +266,9 @@ IncrementalParser::ParseOrWrapTopLevelDecl() {
}
Parser::DeclGroupPtrTy ADecl;
- for (bool AtEOF = P->ParseFirstTopLevelDecl(ADecl); !AtEOF;
- AtEOF = P->ParseTopLevelDecl(ADecl)) {
- // If we got a null return and something *was* parsed, ignore it. This
- // is due to a top-level semicolon, an action override, or a parse error
- // skipping something.
+ Sema::ModuleImportState ImportState;
+ for (bool AtEOF = P->ParseFirstTopLevelDecl(ADecl, ImportState); !AtEOF;
+ AtEOF = P->ParseTopLevelDecl(ADecl, ImportState)) {
if (ADecl && !Consumer->HandleTopLevelDecl(ADecl.get()))
return llvm::make_error<llvm::StringError>("Parsing failed. "
"The consumer rejected a decl",
@@ -175,30 +277,12 @@ IncrementalParser::ParseOrWrapTopLevelDecl() {
DiagnosticsEngine &Diags = getCI()->getDiagnostics();
if (Diags.hasErrorOccurred()) {
- TranslationUnitDecl *MostRecentTU = C.getTranslationUnitDecl();
- TranslationUnitDecl *PreviousTU = MostRecentTU->getPreviousDecl();
- assert(PreviousTU && "Must have a TU from the ASTContext initialization!");
- TranslationUnitDecl *FirstTU = MostRecentTU->getFirstDecl();
- assert(FirstTU);
- FirstTU->RedeclLink.setLatest(PreviousTU);
- C.TUDecl = PreviousTU;
- S.TUScope->setEntity(PreviousTU);
-
- // Clean up the lookup table
- if (StoredDeclsMap *Map = PreviousTU->getLookupPtr()) {
- for (auto I = Map->begin(); I != Map->end(); ++I) {
- StoredDeclsList &List = I->second;
- DeclContextLookupResult R = List.getLookupResult();
- for (NamedDecl *D : R)
- if (D->getTranslationUnitDecl() == MostRecentTU)
- List.remove(D);
- if (List.isNull())
- Map->erase(I);
- }
- }
+ PartialTranslationUnit MostRecentPTU = {C.getTranslationUnitDecl(),
+ nullptr};
+ CleanUpPTU(MostRecentPTU);
- // FIXME: Do not reset the pragma handlers.
- Diags.Reset();
+ Diags.Reset(/*soft=*/true);
+ Diags.getClient()->clear();
return llvm::make_error<llvm::StringError>("Parsing failed.",
std::error_code());
}
@@ -217,14 +301,6 @@ IncrementalParser::ParseOrWrapTopLevelDecl() {
return LastPTU;
}
-static CodeGenerator *getCodeGen(FrontendAction *Act) {
- IncrementalAction *IncrAct = static_cast<IncrementalAction *>(Act);
- FrontendAction *WrappedAct = IncrAct->getWrapped();
- if (!WrappedAct->hasIRSupport())
- return nullptr;
- return static_cast<CodeGenAction *>(WrappedAct)->getCodeGenerator();
-}
-
llvm::Expected<PartialTranslationUnit &>
IncrementalParser::Parse(llvm::StringRef input) {
Preprocessor &PP = CI->getPreprocessor();
@@ -254,7 +330,7 @@ IncrementalParser::Parse(llvm::StringRef input) {
/*LoadedOffset=*/0, NewLoc);
// NewLoc only used for diags.
- if (PP.EnterSourceFile(FID, /*DirLookup=*/0, NewLoc))
+ if (PP.EnterSourceFile(FID, /*DirLookup=*/nullptr, NewLoc))
return llvm::make_error<llvm::StringError>("Parsing failed. "
"Cannot enter source file.",
std::error_code());
@@ -271,22 +347,51 @@ IncrementalParser::Parse(llvm::StringRef input) {
Token Tok;
do {
PP.Lex(Tok);
- } while (Tok.isNot(tok::eof));
+ } while (Tok.isNot(tok::annot_repl_input_end));
+ } else {
+ Token AssertTok;
+ PP.Lex(AssertTok);
+ assert(AssertTok.is(tok::annot_repl_input_end) &&
+ "Lexer must be EOF when starting incremental parse!");
}
- Token AssertTok;
- PP.Lex(AssertTok);
- assert(AssertTok.is(tok::eof) &&
- "Lexer must be EOF when starting incremental parse!");
+ if (std::unique_ptr<llvm::Module> M = GenModule())
+ PTU->TheModule = std::move(M);
+
+ return PTU;
+}
- if (CodeGenerator *CG = getCodeGen(Act.get())) {
+std::unique_ptr<llvm::Module> IncrementalParser::GenModule() {
+ static unsigned ID = 0;
+ if (CodeGenerator *CG = getCodeGen()) {
std::unique_ptr<llvm::Module> M(CG->ReleaseModule());
- CG->StartModule("incr_module_" + std::to_string(PTUs.size()),
- M->getContext());
+ CG->StartModule("incr_module_" + std::to_string(ID++), M->getContext());
+ return M;
+ }
+ return nullptr;
+}
- PTU->TheModule = std::move(M);
+void IncrementalParser::CleanUpPTU(PartialTranslationUnit &PTU) {
+ TranslationUnitDecl *MostRecentTU = PTU.TUPart;
+ TranslationUnitDecl *FirstTU = MostRecentTU->getFirstDecl();
+ if (StoredDeclsMap *Map = FirstTU->getPrimaryContext()->getLookupPtr()) {
+ for (auto I = Map->begin(); I != Map->end(); ++I) {
+ StoredDeclsList &List = I->second;
+ DeclContextLookupResult R = List.getLookupResult();
+ for (NamedDecl *D : R) {
+ if (D->getTranslationUnitDecl() == MostRecentTU) {
+ List.remove(D);
+ }
+ }
+ if (List.isNull())
+ Map->erase(I);
+ }
}
+}
- return PTU;
+llvm::StringRef IncrementalParser::GetMangledName(GlobalDecl GD) const {
+ CodeGenerator *CG = getCodeGen();
+ assert(CG);
+ return CG->GetMangledName(GD);
}
} // end namespace clang
diff --git a/contrib/llvm-project/clang/lib/Interpreter/IncrementalParser.h b/contrib/llvm-project/clang/lib/Interpreter/IncrementalParser.h
index aa8142cbe493..e13b74c7f659 100644
--- a/contrib/llvm-project/clang/lib/Interpreter/IncrementalParser.h
+++ b/contrib/llvm-project/clang/lib/Interpreter/IncrementalParser.h
@@ -13,6 +13,7 @@
#ifndef LLVM_CLANG_LIB_INTERPRETER_INCREMENTALPARSER_H
#define LLVM_CLANG_LIB_INTERPRETER_INCREMENTALPARSER_H
+#include "clang/AST/GlobalDecl.h"
#include "clang/Interpreter/PartialTranslationUnit.h"
#include "llvm/ADT/ArrayRef.h"
@@ -23,21 +24,20 @@
#include <memory>
namespace llvm {
class LLVMContext;
-}
+} // namespace llvm
namespace clang {
class ASTConsumer;
-class CompilerInstance;
class CodeGenerator;
-class DeclGroupRef;
-class FrontendAction;
+class CompilerInstance;
class IncrementalAction;
+class Interpreter;
class Parser;
-
/// Provides support for incremental compilation. Keeps track of the state
/// changes between the subsequent incremental input.
///
class IncrementalParser {
+protected:
/// Long-lived, incremental parsing action.
std::unique_ptr<IncrementalAction> Act;
@@ -57,17 +57,31 @@ class IncrementalParser {
/// of code.
std::list<PartialTranslationUnit> PTUs;
+ IncrementalParser();
+
public:
- IncrementalParser(std::unique_ptr<CompilerInstance> Instance,
+ IncrementalParser(Interpreter &Interp,
+ std::unique_ptr<CompilerInstance> Instance,
llvm::LLVMContext &LLVMCtx, llvm::Error &Err);
- ~IncrementalParser();
+ virtual ~IncrementalParser();
- const CompilerInstance *getCI() const { return CI.get(); }
+ CompilerInstance *getCI() { return CI.get(); }
+ CodeGenerator *getCodeGen() const;
/// Parses incremental input by creating an in-memory file.
///\returns a \c PartialTranslationUnit which holds information about the
/// \c TranslationUnitDecl and \c llvm::Module corresponding to the input.
- llvm::Expected<PartialTranslationUnit &> Parse(llvm::StringRef Input);
+ virtual llvm::Expected<PartialTranslationUnit &> Parse(llvm::StringRef Input);
+
+ /// Uses the CodeGenModule mangled name cache and avoids recomputing.
+ ///\returns the mangled name of a \c GD.
+ llvm::StringRef GetMangledName(GlobalDecl GD) const;
+
+ void CleanUpPTU(PartialTranslationUnit &PTU);
+
+ std::list<PartialTranslationUnit> &getPTUs() { return PTUs; }
+
+ std::unique_ptr<llvm::Module> GenModule();
private:
llvm::Expected<PartialTranslationUnit &> ParseOrWrapTopLevelDecl();
diff --git a/contrib/llvm-project/clang/lib/Interpreter/Interpreter.cpp b/contrib/llvm-project/clang/lib/Interpreter/Interpreter.cpp
index 937504f34739..9f97a3c6b0be 100644
--- a/contrib/llvm-project/clang/lib/Interpreter/Interpreter.cpp
+++ b/contrib/llvm-project/clang/lib/Interpreter/Interpreter.cpp
@@ -11,13 +11,17 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Interpreter/Interpreter.h"
-
+#include "DeviceOffload.h"
#include "IncrementalExecutor.h"
#include "IncrementalParser.h"
+#include "InterpreterUtils.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/Mangle.h"
+#include "clang/AST/TypeVisitor.h"
+#include "clang/Basic/DiagnosticSema.h"
#include "clang/Basic/TargetInfo.h"
+#include "clang/CodeGen/CodeGenAction.h"
#include "clang/CodeGen/ModuleBuilder.h"
#include "clang/CodeGen/ObjectFilePCHContainerOperations.h"
#include "clang/Driver/Compilation.h"
@@ -27,16 +31,21 @@
#include "clang/Driver/Tool.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/TextDiagnosticBuffer.h"
+#include "clang/Interpreter/Interpreter.h"
+#include "clang/Interpreter/Value.h"
#include "clang/Lex/PreprocessorOptions.h"
-
+#include "clang/Sema/Lookup.h"
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/ExecutionEngine/Orc/LLJIT.h"
#include "llvm/IR/Module.h"
-#include "llvm/Support/Host.h"
-
+#include "llvm/Support/Errc.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/Host.h"
using namespace clang;
// FIXME: Figure out how to unify with namespace init_convenience from
-// tools/clang-import-test/clang-import-test.cpp and
-// examples/clang-interpreter/main.cpp
+// tools/clang-import-test/clang-import-test.cpp
namespace {
/// Retrieves the clang CC1 specific flags out of the compilation's jobs.
/// \returns NULL on error.
@@ -47,14 +56,14 @@ GetCC1Arguments(DiagnosticsEngine *Diagnostics,
// failed. Extract that job from the Compilation.
const driver::JobList &Jobs = Compilation->getJobs();
if (!Jobs.size() || !isa<driver::Command>(*Jobs.begin()))
- return llvm::createStringError(std::errc::state_not_recoverable,
+ return llvm::createStringError(llvm::errc::not_supported,
"Driver initialization failed. "
"Unable to create a driver job");
// The one job we find should be to invoke clang again.
const driver::Command *Cmd = cast<driver::Command>(&(*Jobs.begin()));
if (llvm::StringRef(Cmd->getCreator().getName()) != "clang")
- return llvm::createStringError(std::errc::state_not_recoverable,
+ return llvm::createStringError(llvm::errc::not_supported,
"Driver initialization failed");
return &Cmd->getArguments();
@@ -77,8 +86,7 @@ CreateCI(const llvm::opt::ArgStringList &Argv) {
TextDiagnosticBuffer *DiagsBuffer = new TextDiagnosticBuffer;
DiagnosticsEngine Diags(DiagID, &*DiagOpts, DiagsBuffer);
bool Success = CompilerInvocation::CreateFromArgs(
- Clang->getInvocation(), llvm::makeArrayRef(Argv.begin(), Argv.size()),
- Diags);
+ Clang->getInvocation(), llvm::ArrayRef(Argv.begin(), Argv.size()), Diags);
// Infer the builtin include path if unspecified.
if (Clang->getHeaderSearchOpts().UseBuiltinIncludes &&
@@ -89,13 +97,13 @@ CreateCI(const llvm::opt::ArgStringList &Argv) {
// Create the actual diagnostics engine.
Clang->createDiagnostics();
if (!Clang->hasDiagnostics())
- return llvm::createStringError(std::errc::state_not_recoverable,
+ return llvm::createStringError(llvm::errc::not_supported,
"Initialization failed. "
"Unable to create diagnostics engine");
DiagsBuffer->FlushDiagnostics(Clang->getDiagnostics());
if (!Success)
- return llvm::createStringError(std::errc::state_not_recoverable,
+ return llvm::createStringError(llvm::errc::not_supported,
"Initialization failed. "
"Unable to flush diagnostics");
@@ -106,12 +114,18 @@ CreateCI(const llvm::opt::ArgStringList &Argv) {
Clang->setTarget(TargetInfo::CreateTargetInfo(
Clang->getDiagnostics(), Clang->getInvocation().TargetOpts));
if (!Clang->hasTarget())
- return llvm::createStringError(std::errc::state_not_recoverable,
+ return llvm::createStringError(llvm::errc::not_supported,
"Initialization failed. "
"Target is missing");
Clang->getTarget().adjust(Clang->getDiagnostics(), Clang->getLangOpts());
+ // Don't clear the AST before backend codegen since we do codegen multiple
+ // times, reusing the same AST.
+ Clang->getCodeGenOpts().ClearASTBeforeBackend = false;
+
+ Clang->getFrontendOpts().DisableFree = false;
+ Clang->getCodeGenOpts().DisableFree = false;
return std::move(Clang);
}
@@ -131,36 +145,27 @@ IncrementalCompilerBuilder::create(std::vector<const char *> &ClangArgv) {
// specified. By prepending we allow users to override the default
// action and use other actions in incremental mode.
// FIXME: Print proper driver diagnostics if the driver flags are wrong.
- ClangArgv.insert(ClangArgv.begin() + 1, "-c");
-
- if (!llvm::is_contained(ClangArgv, " -x")) {
- // We do C++ by default; append right after argv[0] if no "-x" given
- ClangArgv.push_back("-x");
- ClangArgv.push_back("c++");
- }
+ // We do C++ by default; append right after argv[0] if no "-x" given
+ ClangArgv.insert(ClangArgv.end(), "-Xclang");
+ ClangArgv.insert(ClangArgv.end(), "-fincremental-extensions");
+ ClangArgv.insert(ClangArgv.end(), "-c");
// Put a dummy C++ file on to ensure there's at least one compile job for the
// driver to construct.
ClangArgv.push_back("<<< inputs >>>");
- CompilerInvocation Invocation;
// Buffer diagnostics from argument parsing so that we can output them using a
// well formed diagnostic object.
IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
- IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts = new DiagnosticOptions();
+ IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts =
+ CreateAndPopulateDiagOpts(ClangArgv);
TextDiagnosticBuffer *DiagsBuffer = new TextDiagnosticBuffer;
DiagnosticsEngine Diags(DiagID, &*DiagOpts, DiagsBuffer);
- unsigned MissingArgIndex, MissingArgCount;
- const llvm::opt::OptTable &Opts = driver::getDriverOptTable();
- llvm::opt::InputArgList ParsedArgs =
- Opts.ParseArgs(ArrayRef<const char *>(ClangArgv).slice(1),
- MissingArgIndex, MissingArgCount);
- ParseDiagnosticArgs(*DiagOpts, ParsedArgs, &Diags);
driver::Driver Driver(/*MainBinaryName=*/ClangArgv[0],
llvm::sys::getProcessTriple(), Diags);
Driver.setCheckInputsExist(false); // the input comes from mem buffers
- llvm::ArrayRef<const char *> RF = llvm::makeArrayRef(ClangArgv);
+ llvm::ArrayRef<const char *> RF = llvm::ArrayRef(ClangArgv);
std::unique_ptr<driver::Compilation> Compilation(Driver.BuildCompilation(RF));
if (Compilation->getArgs().hasArg(driver::options::OPT_v))
@@ -173,16 +178,97 @@ IncrementalCompilerBuilder::create(std::vector<const char *> &ClangArgv) {
return CreateCI(**ErrOrCC1Args);
}
+llvm::Expected<std::unique_ptr<CompilerInstance>>
+IncrementalCompilerBuilder::CreateCpp() {
+ std::vector<const char *> Argv;
+ Argv.reserve(5 + 1 + UserArgs.size());
+ Argv.push_back("-xc++");
+ Argv.insert(Argv.end(), UserArgs.begin(), UserArgs.end());
+
+ return IncrementalCompilerBuilder::create(Argv);
+}
+
+llvm::Expected<std::unique_ptr<CompilerInstance>>
+IncrementalCompilerBuilder::createCuda(bool device) {
+ std::vector<const char *> Argv;
+ Argv.reserve(5 + 4 + UserArgs.size());
+
+ Argv.push_back("-xcuda");
+ if (device)
+ Argv.push_back("--cuda-device-only");
+ else
+ Argv.push_back("--cuda-host-only");
+
+ std::string SDKPathArg = "--cuda-path=";
+ if (!CudaSDKPath.empty()) {
+ SDKPathArg += CudaSDKPath;
+ Argv.push_back(SDKPathArg.c_str());
+ }
+
+ std::string ArchArg = "--offload-arch=";
+ if (!OffloadArch.empty()) {
+ ArchArg += OffloadArch;
+ Argv.push_back(ArchArg.c_str());
+ }
+
+ Argv.insert(Argv.end(), UserArgs.begin(), UserArgs.end());
+
+ return IncrementalCompilerBuilder::create(Argv);
+}
+
+llvm::Expected<std::unique_ptr<CompilerInstance>>
+IncrementalCompilerBuilder::CreateCudaDevice() {
+ return IncrementalCompilerBuilder::createCuda(true);
+}
+
+llvm::Expected<std::unique_ptr<CompilerInstance>>
+IncrementalCompilerBuilder::CreateCudaHost() {
+ return IncrementalCompilerBuilder::createCuda(false);
+}
+
Interpreter::Interpreter(std::unique_ptr<CompilerInstance> CI,
llvm::Error &Err) {
llvm::ErrorAsOutParameter EAO(&Err);
auto LLVMCtx = std::make_unique<llvm::LLVMContext>();
TSCtx = std::make_unique<llvm::orc::ThreadSafeContext>(std::move(LLVMCtx));
- IncrParser = std::make_unique<IncrementalParser>(std::move(CI),
+ IncrParser = std::make_unique<IncrementalParser>(*this, std::move(CI),
*TSCtx->getContext(), Err);
}
-Interpreter::~Interpreter() {}
+Interpreter::~Interpreter() {
+ if (IncrExecutor) {
+ if (llvm::Error Err = IncrExecutor->cleanUp())
+ llvm::report_fatal_error(
+ llvm::Twine("Failed to clean up IncrementalExecutor: ") +
+ toString(std::move(Err)));
+ }
+}
+
+// These better to put in a runtime header but we can't. This is because we
+// can't find the precise resource directory in unittests so we have to hard
+// code them.
+const char *const Runtimes = R"(
+#ifdef __cplusplus
+ void *__clang_Interpreter_SetValueWithAlloc(void*, void*, void*);
+ void __clang_Interpreter_SetValueNoAlloc(void*, void*, void*);
+ void __clang_Interpreter_SetValueNoAlloc(void*, void*, void*, void*);
+ void __clang_Interpreter_SetValueNoAlloc(void*, void*, void*, float);
+ void __clang_Interpreter_SetValueNoAlloc(void*, void*, void*, double);
+ void __clang_Interpreter_SetValueNoAlloc(void*, void*, void*, long double);
+ void __clang_Interpreter_SetValueNoAlloc(void*,void*,void*,unsigned long long);
+ struct __clang_Interpreter_NewTag{} __ci_newtag;
+ void* operator new(__SIZE_TYPE__, void* __p, __clang_Interpreter_NewTag) noexcept;
+ template <class T, class = T (*)() /*disable for arrays*/>
+ void __clang_Interpreter_SetValueCopyArr(T* Src, void* Placement, unsigned long Size) {
+ for (auto Idx = 0; Idx < Size; ++Idx)
+ new ((void*)(((T*)Placement) + Idx), __ci_newtag) T(Src[Idx]);
+ }
+ template <class T, unsigned long N>
+ void __clang_Interpreter_SetValueCopyArr(const T (*Src)[N], void* Placement, unsigned long Size) {
+ __clang_Interpreter_SetValueCopyArr(Src[0], Placement, Size);
+ }
+#endif // __cplusplus
+)";
llvm::Expected<std::unique_ptr<Interpreter>>
Interpreter::create(std::unique_ptr<CompilerInstance> CI) {
@@ -191,31 +277,115 @@ Interpreter::create(std::unique_ptr<CompilerInstance> CI) {
std::unique_ptr<Interpreter>(new Interpreter(std::move(CI), Err));
if (Err)
return std::move(Err);
+
+ auto PTU = Interp->Parse(Runtimes);
+ if (!PTU)
+ return PTU.takeError();
+
+ Interp->ValuePrintingInfo.resize(4);
+ // FIXME: This is a ugly hack. Undo command checks its availability by looking
+ // at the size of the PTU list. However we have parsed something in the
+ // beginning of the REPL so we have to mark them as 'Irrevocable'.
+ Interp->InitPTUSize = Interp->IncrParser->getPTUs().size();
return std::move(Interp);
}
+llvm::Expected<std::unique_ptr<Interpreter>>
+Interpreter::createWithCUDA(std::unique_ptr<CompilerInstance> CI,
+ std::unique_ptr<CompilerInstance> DCI) {
+ // avoid writing fat binary to disk using an in-memory virtual file system
+ llvm::IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> IMVFS =
+ std::make_unique<llvm::vfs::InMemoryFileSystem>();
+ llvm::IntrusiveRefCntPtr<llvm::vfs::OverlayFileSystem> OverlayVFS =
+ std::make_unique<llvm::vfs::OverlayFileSystem>(
+ llvm::vfs::getRealFileSystem());
+ OverlayVFS->pushOverlay(IMVFS);
+ CI->createFileManager(OverlayVFS);
+
+ auto Interp = Interpreter::create(std::move(CI));
+ if (auto E = Interp.takeError())
+ return std::move(E);
+
+ llvm::Error Err = llvm::Error::success();
+ auto DeviceParser = std::make_unique<IncrementalCUDADeviceParser>(
+ **Interp, std::move(DCI), *(*Interp)->IncrParser.get(),
+ *(*Interp)->TSCtx->getContext(), IMVFS, Err);
+ if (Err)
+ return std::move(Err);
+
+ (*Interp)->DeviceParser = std::move(DeviceParser);
+
+ return Interp;
+}
+
const CompilerInstance *Interpreter::getCompilerInstance() const {
return IncrParser->getCI();
}
+CompilerInstance *Interpreter::getCompilerInstance() {
+ return IncrParser->getCI();
+}
+
+llvm::Expected<llvm::orc::LLJIT &> Interpreter::getExecutionEngine() {
+ if (!IncrExecutor) {
+ if (auto Err = CreateExecutor())
+ return std::move(Err);
+ }
+
+ return IncrExecutor->GetExecutionEngine();
+}
+
+ASTContext &Interpreter::getASTContext() {
+ return getCompilerInstance()->getASTContext();
+}
+
+const ASTContext &Interpreter::getASTContext() const {
+ return getCompilerInstance()->getASTContext();
+}
+
+size_t Interpreter::getEffectivePTUSize() const {
+ std::list<PartialTranslationUnit> &PTUs = IncrParser->getPTUs();
+ assert(PTUs.size() >= InitPTUSize && "empty PTU list?");
+ return PTUs.size() - InitPTUSize;
+}
+
llvm::Expected<PartialTranslationUnit &>
Interpreter::Parse(llvm::StringRef Code) {
+ // If we have a device parser, parse it first.
+ // The generated code will be included in the host compilation
+ if (DeviceParser) {
+ auto DevicePTU = DeviceParser->Parse(Code);
+ if (auto E = DevicePTU.takeError())
+ return std::move(E);
+ }
+
+ // Tell the interpreter sliently ignore unused expressions since value
+ // printing could cause it.
+ getCompilerInstance()->getDiagnostics().setSeverity(
+ clang::diag::warn_unused_expr, diag::Severity::Ignored, SourceLocation());
return IncrParser->Parse(Code);
}
+llvm::Error Interpreter::CreateExecutor() {
+ const clang::TargetInfo &TI =
+ getCompilerInstance()->getASTContext().getTargetInfo();
+ llvm::Error Err = llvm::Error::success();
+ auto Executor = std::make_unique<IncrementalExecutor>(*TSCtx, Err, TI);
+ if (!Err)
+ IncrExecutor = std::move(Executor);
+
+ return Err;
+}
+
llvm::Error Interpreter::Execute(PartialTranslationUnit &T) {
assert(T.TheModule);
if (!IncrExecutor) {
- const llvm::Triple &Triple =
- getCompilerInstance()->getASTContext().getTargetInfo().getTriple();
- llvm::Error Err = llvm::Error::success();
- IncrExecutor = std::make_unique<IncrementalExecutor>(*TSCtx, Err, Triple);
-
+ auto Err = CreateExecutor();
if (Err)
return Err;
}
// FIXME: Add a callback to retain the llvm::Module once the JIT is done.
- if (auto Err = IncrExecutor->addModule(std::move(T.TheModule)))
+ if (auto Err = IncrExecutor->addModule(T))
return Err;
if (auto Err = IncrExecutor->runCtors())
@@ -223,3 +393,445 @@ llvm::Error Interpreter::Execute(PartialTranslationUnit &T) {
return llvm::Error::success();
}
+
+llvm::Error Interpreter::ParseAndExecute(llvm::StringRef Code, Value *V) {
+
+ auto PTU = Parse(Code);
+ if (!PTU)
+ return PTU.takeError();
+ if (PTU->TheModule)
+ if (llvm::Error Err = Execute(*PTU))
+ return Err;
+
+ if (LastValue.isValid()) {
+ if (!V) {
+ LastValue.dump();
+ LastValue.clear();
+ } else
+ *V = std::move(LastValue);
+ }
+ return llvm::Error::success();
+}
+
+llvm::Expected<llvm::orc::ExecutorAddr>
+Interpreter::getSymbolAddress(GlobalDecl GD) const {
+ if (!IncrExecutor)
+ return llvm::make_error<llvm::StringError>("Operation failed. "
+ "No execution engine",
+ std::error_code());
+ llvm::StringRef MangledName = IncrParser->GetMangledName(GD);
+ return getSymbolAddress(MangledName);
+}
+
+llvm::Expected<llvm::orc::ExecutorAddr>
+Interpreter::getSymbolAddress(llvm::StringRef IRName) const {
+ if (!IncrExecutor)
+ return llvm::make_error<llvm::StringError>("Operation failed. "
+ "No execution engine",
+ std::error_code());
+
+ return IncrExecutor->getSymbolAddress(IRName, IncrementalExecutor::IRName);
+}
+
+llvm::Expected<llvm::orc::ExecutorAddr>
+Interpreter::getSymbolAddressFromLinkerName(llvm::StringRef Name) const {
+ if (!IncrExecutor)
+ return llvm::make_error<llvm::StringError>("Operation failed. "
+ "No execution engine",
+ std::error_code());
+
+ return IncrExecutor->getSymbolAddress(Name, IncrementalExecutor::LinkerName);
+}
+
+llvm::Error Interpreter::Undo(unsigned N) {
+
+ std::list<PartialTranslationUnit> &PTUs = IncrParser->getPTUs();
+ if (N > getEffectivePTUSize())
+ return llvm::make_error<llvm::StringError>("Operation failed. "
+ "Too many undos",
+ std::error_code());
+ for (unsigned I = 0; I < N; I++) {
+ if (IncrExecutor) {
+ if (llvm::Error Err = IncrExecutor->removeModule(PTUs.back()))
+ return Err;
+ }
+
+ IncrParser->CleanUpPTU(PTUs.back());
+ PTUs.pop_back();
+ }
+ return llvm::Error::success();
+}
+
+llvm::Error Interpreter::LoadDynamicLibrary(const char *name) {
+ auto EE = getExecutionEngine();
+ if (!EE)
+ return EE.takeError();
+
+ auto &DL = EE->getDataLayout();
+
+ if (auto DLSG = llvm::orc::DynamicLibrarySearchGenerator::Load(
+ name, DL.getGlobalPrefix()))
+ EE->getMainJITDylib().addGenerator(std::move(*DLSG));
+ else
+ return DLSG.takeError();
+
+ return llvm::Error::success();
+}
+
+llvm::Expected<llvm::orc::ExecutorAddr>
+Interpreter::CompileDtorCall(CXXRecordDecl *CXXRD) {
+ assert(CXXRD && "Cannot compile a destructor for a nullptr");
+ if (auto Dtor = Dtors.find(CXXRD); Dtor != Dtors.end())
+ return Dtor->getSecond();
+
+ if (CXXRD->hasIrrelevantDestructor())
+ return llvm::orc::ExecutorAddr{};
+
+ CXXDestructorDecl *DtorRD =
+ getCompilerInstance()->getSema().LookupDestructor(CXXRD);
+
+ llvm::StringRef Name =
+ IncrParser->GetMangledName(GlobalDecl(DtorRD, Dtor_Base));
+ auto AddrOrErr = getSymbolAddress(Name);
+ if (!AddrOrErr)
+ return AddrOrErr.takeError();
+
+ Dtors[CXXRD] = *AddrOrErr;
+ return AddrOrErr;
+}
+
+static constexpr llvm::StringRef MagicRuntimeInterface[] = {
+ "__clang_Interpreter_SetValueNoAlloc",
+ "__clang_Interpreter_SetValueWithAlloc",
+ "__clang_Interpreter_SetValueCopyArr", "__ci_newtag"};
+
+bool Interpreter::FindRuntimeInterface() {
+ if (llvm::all_of(ValuePrintingInfo, [](Expr *E) { return E != nullptr; }))
+ return true;
+
+ Sema &S = getCompilerInstance()->getSema();
+ ASTContext &Ctx = S.getASTContext();
+
+ auto LookupInterface = [&](Expr *&Interface, llvm::StringRef Name) {
+ LookupResult R(S, &Ctx.Idents.get(Name), SourceLocation(),
+ Sema::LookupOrdinaryName, Sema::ForVisibleRedeclaration);
+ S.LookupQualifiedName(R, Ctx.getTranslationUnitDecl());
+ if (R.empty())
+ return false;
+
+ CXXScopeSpec CSS;
+ Interface = S.BuildDeclarationNameExpr(CSS, R, /*ADL=*/false).get();
+ return true;
+ };
+
+ if (!LookupInterface(ValuePrintingInfo[NoAlloc],
+ MagicRuntimeInterface[NoAlloc]))
+ return false;
+ if (!LookupInterface(ValuePrintingInfo[WithAlloc],
+ MagicRuntimeInterface[WithAlloc]))
+ return false;
+ if (!LookupInterface(ValuePrintingInfo[CopyArray],
+ MagicRuntimeInterface[CopyArray]))
+ return false;
+ if (!LookupInterface(ValuePrintingInfo[NewTag],
+ MagicRuntimeInterface[NewTag]))
+ return false;
+ return true;
+}
+
+namespace {
+
+class RuntimeInterfaceBuilder
+ : public TypeVisitor<RuntimeInterfaceBuilder, Interpreter::InterfaceKind> {
+ clang::Interpreter &Interp;
+ ASTContext &Ctx;
+ Sema &S;
+ Expr *E;
+ llvm::SmallVector<Expr *, 3> Args;
+
+public:
+ RuntimeInterfaceBuilder(clang::Interpreter &In, ASTContext &C, Sema &SemaRef,
+ Expr *VE, ArrayRef<Expr *> FixedArgs)
+ : Interp(In), Ctx(C), S(SemaRef), E(VE) {
+ // The Interpreter* parameter and the out parameter `OutVal`.
+ for (Expr *E : FixedArgs)
+ Args.push_back(E);
+
+ // Get rid of ExprWithCleanups.
+ if (auto *EWC = llvm::dyn_cast_if_present<ExprWithCleanups>(E))
+ E = EWC->getSubExpr();
+ }
+
+ ExprResult getCall() {
+ QualType Ty = E->getType();
+ QualType DesugaredTy = Ty.getDesugaredType(Ctx);
+
+ // For lvalue struct, we treat it as a reference.
+ if (DesugaredTy->isRecordType() && E->isLValue()) {
+ DesugaredTy = Ctx.getLValueReferenceType(DesugaredTy);
+ Ty = Ctx.getLValueReferenceType(Ty);
+ }
+
+ Expr *TypeArg =
+ CStyleCastPtrExpr(S, Ctx.VoidPtrTy, (uintptr_t)Ty.getAsOpaquePtr());
+ // The QualType parameter `OpaqueType`, represented as `void*`.
+ Args.push_back(TypeArg);
+
+ // We push the last parameter based on the type of the Expr. Note we need
+ // special care for rvalue struct.
+ Interpreter::InterfaceKind Kind = Visit(&*DesugaredTy);
+ switch (Kind) {
+ case Interpreter::InterfaceKind::WithAlloc:
+ case Interpreter::InterfaceKind::CopyArray: {
+ // __clang_Interpreter_SetValueWithAlloc.
+ ExprResult AllocCall = S.ActOnCallExpr(
+ /*Scope=*/nullptr,
+ Interp.getValuePrintingInfo()[Interpreter::InterfaceKind::WithAlloc],
+ E->getBeginLoc(), Args, E->getEndLoc());
+ assert(!AllocCall.isInvalid() && "Can't create runtime interface call!");
+
+ TypeSourceInfo *TSI = Ctx.getTrivialTypeSourceInfo(Ty, SourceLocation());
+
+ // Force CodeGen to emit destructor.
+ if (auto *RD = Ty->getAsCXXRecordDecl()) {
+ auto *Dtor = S.LookupDestructor(RD);
+ Dtor->addAttr(UsedAttr::CreateImplicit(Ctx));
+ Interp.getCompilerInstance()->getASTConsumer().HandleTopLevelDecl(
+ DeclGroupRef(Dtor));
+ }
+
+ // __clang_Interpreter_SetValueCopyArr.
+ if (Kind == Interpreter::InterfaceKind::CopyArray) {
+ const auto *ConstantArrTy =
+ cast<ConstantArrayType>(DesugaredTy.getTypePtr());
+ size_t ArrSize = Ctx.getConstantArrayElementCount(ConstantArrTy);
+ Expr *ArrSizeExpr = IntegerLiteralExpr(Ctx, ArrSize);
+ Expr *Args[] = {E, AllocCall.get(), ArrSizeExpr};
+ return S.ActOnCallExpr(
+ /*Scope *=*/nullptr,
+ Interp
+ .getValuePrintingInfo()[Interpreter::InterfaceKind::CopyArray],
+ SourceLocation(), Args, SourceLocation());
+ }
+ Expr *Args[] = {
+ AllocCall.get(),
+ Interp.getValuePrintingInfo()[Interpreter::InterfaceKind::NewTag]};
+ ExprResult CXXNewCall = S.BuildCXXNew(
+ E->getSourceRange(),
+ /*UseGlobal=*/true, /*PlacementLParen=*/SourceLocation(), Args,
+ /*PlacementRParen=*/SourceLocation(),
+ /*TypeIdParens=*/SourceRange(), TSI->getType(), TSI, std::nullopt,
+ E->getSourceRange(), E);
+
+ assert(!CXXNewCall.isInvalid() &&
+ "Can't create runtime placement new call!");
+
+ return S.ActOnFinishFullExpr(CXXNewCall.get(),
+ /*DiscardedValue=*/false);
+ }
+ // __clang_Interpreter_SetValueNoAlloc.
+ case Interpreter::InterfaceKind::NoAlloc: {
+ return S.ActOnCallExpr(
+ /*Scope=*/nullptr,
+ Interp.getValuePrintingInfo()[Interpreter::InterfaceKind::NoAlloc],
+ E->getBeginLoc(), Args, E->getEndLoc());
+ }
+ default:
+ llvm_unreachable("Unhandled Interpreter::InterfaceKind");
+ }
+ }
+
+ Interpreter::InterfaceKind VisitRecordType(const RecordType *Ty) {
+ return Interpreter::InterfaceKind::WithAlloc;
+ }
+
+ Interpreter::InterfaceKind
+ VisitMemberPointerType(const MemberPointerType *Ty) {
+ return Interpreter::InterfaceKind::WithAlloc;
+ }
+
+ Interpreter::InterfaceKind
+ VisitConstantArrayType(const ConstantArrayType *Ty) {
+ return Interpreter::InterfaceKind::CopyArray;
+ }
+
+ Interpreter::InterfaceKind
+ VisitFunctionProtoType(const FunctionProtoType *Ty) {
+ HandlePtrType(Ty);
+ return Interpreter::InterfaceKind::NoAlloc;
+ }
+
+ Interpreter::InterfaceKind VisitPointerType(const PointerType *Ty) {
+ HandlePtrType(Ty);
+ return Interpreter::InterfaceKind::NoAlloc;
+ }
+
+ Interpreter::InterfaceKind VisitReferenceType(const ReferenceType *Ty) {
+ ExprResult AddrOfE = S.CreateBuiltinUnaryOp(SourceLocation(), UO_AddrOf, E);
+ assert(!AddrOfE.isInvalid() && "Can not create unary expression");
+ Args.push_back(AddrOfE.get());
+ return Interpreter::InterfaceKind::NoAlloc;
+ }
+
+ Interpreter::InterfaceKind VisitBuiltinType(const BuiltinType *Ty) {
+ if (Ty->isNullPtrType())
+ Args.push_back(E);
+ else if (Ty->isFloatingType())
+ Args.push_back(E);
+ else if (Ty->isIntegralOrEnumerationType())
+ HandleIntegralOrEnumType(Ty);
+ else if (Ty->isVoidType()) {
+ // Do we need to still run `E`?
+ }
+
+ return Interpreter::InterfaceKind::NoAlloc;
+ }
+
+ Interpreter::InterfaceKind VisitEnumType(const EnumType *Ty) {
+ HandleIntegralOrEnumType(Ty);
+ return Interpreter::InterfaceKind::NoAlloc;
+ }
+
+private:
+ // Force cast these types to uint64 to reduce the number of overloads of
+ // `__clang_Interpreter_SetValueNoAlloc`.
+ void HandleIntegralOrEnumType(const Type *Ty) {
+ TypeSourceInfo *TSI = Ctx.getTrivialTypeSourceInfo(Ctx.UnsignedLongLongTy);
+ ExprResult CastedExpr =
+ S.BuildCStyleCastExpr(SourceLocation(), TSI, SourceLocation(), E);
+ assert(!CastedExpr.isInvalid() && "Cannot create cstyle cast expr");
+ Args.push_back(CastedExpr.get());
+ }
+
+ void HandlePtrType(const Type *Ty) {
+ TypeSourceInfo *TSI = Ctx.getTrivialTypeSourceInfo(Ctx.VoidPtrTy);
+ ExprResult CastedExpr =
+ S.BuildCStyleCastExpr(SourceLocation(), TSI, SourceLocation(), E);
+ assert(!CastedExpr.isInvalid() && "Can not create cstyle cast expression");
+ Args.push_back(CastedExpr.get());
+ }
+};
+} // namespace
+
+// This synthesizes a call expression to a speciall
+// function that is responsible for generating the Value.
+// In general, we transform:
+// clang-repl> x
+// To:
+// // 1. If x is a built-in type like int, float.
+// __clang_Interpreter_SetValueNoAlloc(ThisInterp, OpaqueValue, xQualType, x);
+// // 2. If x is a struct, and a lvalue.
+// __clang_Interpreter_SetValueNoAlloc(ThisInterp, OpaqueValue, xQualType,
+// &x);
+// // 3. If x is a struct, but a rvalue.
+// new (__clang_Interpreter_SetValueWithAlloc(ThisInterp, OpaqueValue,
+// xQualType)) (x);
+
+Expr *Interpreter::SynthesizeExpr(Expr *E) {
+ Sema &S = getCompilerInstance()->getSema();
+ ASTContext &Ctx = S.getASTContext();
+
+ if (!FindRuntimeInterface())
+ llvm_unreachable("We can't find the runtime iterface for pretty print!");
+
+ // Create parameter `ThisInterp`.
+ auto *ThisInterp = CStyleCastPtrExpr(S, Ctx.VoidPtrTy, (uintptr_t)this);
+
+ // Create parameter `OutVal`.
+ auto *OutValue = CStyleCastPtrExpr(S, Ctx.VoidPtrTy, (uintptr_t)&LastValue);
+
+ // Build `__clang_Interpreter_SetValue*` call.
+ RuntimeInterfaceBuilder Builder(*this, Ctx, S, E, {ThisInterp, OutValue});
+
+ ExprResult Result = Builder.getCall();
+ // It could fail, like printing an array type in C. (not supported)
+ if (Result.isInvalid())
+ return E;
+ return Result.get();
+}
+
+// Temporary rvalue struct that need special care.
+REPL_EXTERNAL_VISIBILITY void *
+__clang_Interpreter_SetValueWithAlloc(void *This, void *OutVal,
+ void *OpaqueType) {
+ Value &VRef = *(Value *)OutVal;
+ VRef = Value(static_cast<Interpreter *>(This), OpaqueType);
+ return VRef.getPtr();
+}
+
+// Pointers, lvalue struct that can take as a reference.
+REPL_EXTERNAL_VISIBILITY void
+__clang_Interpreter_SetValueNoAlloc(void *This, void *OutVal, void *OpaqueType,
+ void *Val) {
+ Value &VRef = *(Value *)OutVal;
+ VRef = Value(static_cast<Interpreter *>(This), OpaqueType);
+ VRef.setPtr(Val);
+}
+
+REPL_EXTERNAL_VISIBILITY void
+__clang_Interpreter_SetValueNoAlloc(void *This, void *OutVal,
+ void *OpaqueType) {
+ Value &VRef = *(Value *)OutVal;
+ VRef = Value(static_cast<Interpreter *>(This), OpaqueType);
+}
+
+static void SetValueDataBasedOnQualType(Value &V, unsigned long long Data) {
+ QualType QT = V.getType();
+ if (const auto *ET = QT->getAs<EnumType>())
+ QT = ET->getDecl()->getIntegerType();
+
+ switch (QT->castAs<BuiltinType>()->getKind()) {
+ default:
+ llvm_unreachable("unknown type kind!");
+#define X(type, name) \
+ case BuiltinType::name: \
+ V.set##name(Data); \
+ break;
+ REPL_BUILTIN_TYPES
+#undef X
+ }
+}
+
+REPL_EXTERNAL_VISIBILITY void
+__clang_Interpreter_SetValueNoAlloc(void *This, void *OutVal, void *OpaqueType,
+ unsigned long long Val) {
+ Value &VRef = *(Value *)OutVal;
+ VRef = Value(static_cast<Interpreter *>(This), OpaqueType);
+ SetValueDataBasedOnQualType(VRef, Val);
+}
+
+REPL_EXTERNAL_VISIBILITY void
+__clang_Interpreter_SetValueNoAlloc(void *This, void *OutVal, void *OpaqueType,
+ float Val) {
+ Value &VRef = *(Value *)OutVal;
+ VRef = Value(static_cast<Interpreter *>(This), OpaqueType);
+ VRef.setFloat(Val);
+}
+
+REPL_EXTERNAL_VISIBILITY void
+__clang_Interpreter_SetValueNoAlloc(void *This, void *OutVal, void *OpaqueType,
+ double Val) {
+ Value &VRef = *(Value *)OutVal;
+ VRef = Value(static_cast<Interpreter *>(This), OpaqueType);
+ VRef.setDouble(Val);
+}
+
+REPL_EXTERNAL_VISIBILITY void
+__clang_Interpreter_SetValueNoAlloc(void *This, void *OutVal, void *OpaqueType,
+ long double Val) {
+ Value &VRef = *(Value *)OutVal;
+ VRef = Value(static_cast<Interpreter *>(This), OpaqueType);
+ VRef.setLongDouble(Val);
+}
+
+// A trampoline to work around the fact that operator placement new cannot
+// really be forward declared due to libc++ and libstdc++ declaration mismatch.
+// FIXME: __clang_Interpreter_NewTag is ODR violation because we get the same
+// definition in the interpreter runtime. We should move it in a runtime header
+// which gets included by the interpreter and here.
+struct __clang_Interpreter_NewTag {};
+REPL_EXTERNAL_VISIBILITY void *
+operator new(size_t __sz, void *__p, __clang_Interpreter_NewTag) noexcept {
+ // Just forward to the standard operator placement new.
+ return operator new(__sz, __p);
+}
diff --git a/contrib/llvm-project/clang/lib/Interpreter/InterpreterUtils.cpp b/contrib/llvm-project/clang/lib/Interpreter/InterpreterUtils.cpp
new file mode 100644
index 000000000000..c19cf6aa3156
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Interpreter/InterpreterUtils.cpp
@@ -0,0 +1,111 @@
+//===--- InterpreterUtils.cpp - Incremental Utils --------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements some common utils used in the incremental library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "InterpreterUtils.h"
+
+namespace clang {
+
+IntegerLiteral *IntegerLiteralExpr(ASTContext &C, uint64_t Val) {
+ return IntegerLiteral::Create(C, llvm::APSInt::getUnsigned(Val),
+ C.UnsignedLongLongTy, SourceLocation());
+}
+
+Expr *CStyleCastPtrExpr(Sema &S, QualType Ty, Expr *E) {
+ ASTContext &Ctx = S.getASTContext();
+ if (!Ty->isPointerType())
+ Ty = Ctx.getPointerType(Ty);
+
+ TypeSourceInfo *TSI = Ctx.getTrivialTypeSourceInfo(Ty, SourceLocation());
+ Expr *Result =
+ S.BuildCStyleCastExpr(SourceLocation(), TSI, SourceLocation(), E).get();
+ assert(Result && "Cannot create CStyleCastPtrExpr");
+ return Result;
+}
+
+Expr *CStyleCastPtrExpr(Sema &S, QualType Ty, uintptr_t Ptr) {
+ ASTContext &Ctx = S.getASTContext();
+ return CStyleCastPtrExpr(S, Ty, IntegerLiteralExpr(Ctx, (uint64_t)Ptr));
+}
+
+Sema::DeclGroupPtrTy CreateDGPtrFrom(Sema &S, Decl *D) {
+ SmallVector<Decl *, 1> DeclsInGroup;
+ DeclsInGroup.push_back(D);
+ Sema::DeclGroupPtrTy DeclGroupPtr = S.BuildDeclaratorGroup(DeclsInGroup);
+ return DeclGroupPtr;
+}
+
+NamespaceDecl *LookupNamespace(Sema &S, llvm::StringRef Name,
+ const DeclContext *Within) {
+ DeclarationName DName = &S.Context.Idents.get(Name);
+ LookupResult R(S, DName, SourceLocation(),
+ Sema::LookupNestedNameSpecifierName);
+ R.suppressDiagnostics();
+ if (!Within)
+ S.LookupName(R, S.TUScope);
+ else {
+ if (const auto *TD = dyn_cast<clang::TagDecl>(Within);
+ TD && !TD->getDefinition())
+ // No definition, no lookup result.
+ return nullptr;
+
+ S.LookupQualifiedName(R, const_cast<DeclContext *>(Within));
+ }
+
+ if (R.empty())
+ return nullptr;
+
+ R.resolveKind();
+
+ return dyn_cast<NamespaceDecl>(R.getFoundDecl());
+}
+
+NamedDecl *LookupNamed(Sema &S, llvm::StringRef Name,
+ const DeclContext *Within) {
+ DeclarationName DName = &S.Context.Idents.get(Name);
+ LookupResult R(S, DName, SourceLocation(), Sema::LookupOrdinaryName,
+ Sema::ForVisibleRedeclaration);
+
+ R.suppressDiagnostics();
+
+ if (!Within)
+ S.LookupName(R, S.TUScope);
+ else {
+ const DeclContext *PrimaryWithin = nullptr;
+ if (const auto *TD = dyn_cast<TagDecl>(Within))
+ PrimaryWithin = llvm::dyn_cast_or_null<DeclContext>(TD->getDefinition());
+ else
+ PrimaryWithin = Within->getPrimaryContext();
+
+ // No definition, no lookup result.
+ if (!PrimaryWithin)
+ return nullptr;
+
+ S.LookupQualifiedName(R, const_cast<DeclContext *>(PrimaryWithin));
+ }
+
+ if (R.empty())
+ return nullptr;
+ R.resolveKind();
+
+ if (R.isSingleResult())
+ return llvm::dyn_cast<NamedDecl>(R.getFoundDecl());
+
+ return nullptr;
+}
+
+std::string GetFullTypeName(ASTContext &Ctx, QualType QT) {
+ PrintingPolicy Policy(Ctx.getPrintingPolicy());
+ Policy.SuppressScope = false;
+ Policy.AnonymousTagLocations = false;
+ return QT.getAsString(Policy);
+}
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Interpreter/InterpreterUtils.h b/contrib/llvm-project/clang/lib/Interpreter/InterpreterUtils.h
new file mode 100644
index 000000000000..8df158c17d49
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Interpreter/InterpreterUtils.h
@@ -0,0 +1,54 @@
+//===--- InterpreterUtils.h - Incremental Utils --------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements some common utils used in the incremental library.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_INTERPRETER_UTILS_H
+#define LLVM_CLANG_INTERPRETER_UTILS_H
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Mangle.h"
+#include "clang/AST/TypeVisitor.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/CodeGen/ModuleBuilder.h"
+#include "clang/CodeGen/ObjectFilePCHContainerOperations.h"
+#include "clang/Driver/Compilation.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/Job.h"
+#include "clang/Driver/Options.h"
+#include "clang/Driver/Tool.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/TextDiagnosticBuffer.h"
+#include "clang/Lex/PreprocessorOptions.h"
+
+#include "clang/Sema/Lookup.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/Errc.h"
+#include "llvm/TargetParser/Host.h"
+
+namespace clang {
+IntegerLiteral *IntegerLiteralExpr(ASTContext &C, uint64_t Val);
+
+Expr *CStyleCastPtrExpr(Sema &S, QualType Ty, Expr *E);
+
+Expr *CStyleCastPtrExpr(Sema &S, QualType Ty, uintptr_t Ptr);
+
+Sema::DeclGroupPtrTy CreateDGPtrFrom(Sema &S, Decl *D);
+
+NamespaceDecl *LookupNamespace(Sema &S, llvm::StringRef Name,
+ const DeclContext *Within = nullptr);
+
+NamedDecl *LookupNamed(Sema &S, llvm::StringRef Name,
+ const DeclContext *Within);
+
+std::string GetFullTypeName(ASTContext &Ctx, QualType QT);
+} // namespace clang
+
+#endif
diff --git a/contrib/llvm-project/clang/lib/Interpreter/Value.cpp b/contrib/llvm-project/clang/lib/Interpreter/Value.cpp
new file mode 100644
index 000000000000..1d6b2da087e9
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Interpreter/Value.cpp
@@ -0,0 +1,267 @@
+//===--- Interpreter.h - Incremental Compiation and Execution---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the class that used to represent a value in incremental
+// C++.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Interpreter/Value.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Type.h"
+#include "clang/Interpreter/Interpreter.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_os_ostream.h"
+#include <cassert>
+#include <cstdint>
+#include <utility>
+
+using namespace clang;
+
+namespace {
+
+// This is internal buffer maintained by Value, used to hold temporaries.
+class ValueStorage {
+public:
+ using DtorFunc = void (*)(void *);
+
+ static unsigned char *CreatePayload(void *DtorF, size_t AllocSize,
+ size_t ElementsSize) {
+ if (AllocSize < sizeof(Canary))
+ AllocSize = sizeof(Canary);
+ unsigned char *Buf =
+ new unsigned char[ValueStorage::getPayloadOffset() + AllocSize];
+ ValueStorage *VS = new (Buf) ValueStorage(DtorF, AllocSize, ElementsSize);
+ std::memcpy(VS->getPayload(), Canary, sizeof(Canary));
+ return VS->getPayload();
+ }
+
+ unsigned char *getPayload() { return Storage; }
+ const unsigned char *getPayload() const { return Storage; }
+
+ static unsigned getPayloadOffset() {
+ static ValueStorage Dummy(nullptr, 0, 0);
+ return Dummy.getPayload() - reinterpret_cast<unsigned char *>(&Dummy);
+ }
+
+ static ValueStorage *getFromPayload(void *Payload) {
+ ValueStorage *R = reinterpret_cast<ValueStorage *>(
+ (unsigned char *)Payload - getPayloadOffset());
+ return R;
+ }
+
+ void Retain() { ++RefCnt; }
+
+ void Release() {
+ assert(RefCnt > 0 && "Can't release if reference count is already zero");
+ if (--RefCnt == 0) {
+ // We hace a non-trivial dtor.
+ if (Dtor && IsAlive()) {
+ assert(Elements && "We at least should have 1 element in Value");
+ size_t Stride = AllocSize / Elements;
+ for (size_t Idx = 0; Idx < Elements; ++Idx)
+ (*Dtor)(getPayload() + Idx * Stride);
+ }
+ delete[] reinterpret_cast<unsigned char *>(this);
+ }
+ }
+
+ // Check whether the storage is valid by validating the canary bits.
+ // If someone accidentally write some invalid bits in the storage, the canary
+ // will be changed first, and `IsAlive` will return false then.
+ bool IsAlive() const {
+ return std::memcmp(getPayload(), Canary, sizeof(Canary)) != 0;
+ }
+
+private:
+ ValueStorage(void *DtorF, size_t AllocSize, size_t ElementsNum)
+ : RefCnt(1), Dtor(reinterpret_cast<DtorFunc>(DtorF)),
+ AllocSize(AllocSize), Elements(ElementsNum) {}
+
+ mutable unsigned RefCnt;
+ DtorFunc Dtor = nullptr;
+ size_t AllocSize = 0;
+ size_t Elements = 0;
+ unsigned char Storage[1];
+
+ // These are some canary bits that are used for protecting the storage been
+ // damaged.
+ static constexpr unsigned char Canary[8] = {0x4c, 0x37, 0xad, 0x8f,
+ 0x2d, 0x23, 0x95, 0x91};
+};
+} // namespace
+
+static Value::Kind ConvertQualTypeToKind(const ASTContext &Ctx, QualType QT) {
+ if (Ctx.hasSameType(QT, Ctx.VoidTy))
+ return Value::K_Void;
+
+ if (const auto *ET = QT->getAs<EnumType>())
+ QT = ET->getDecl()->getIntegerType();
+
+ const auto *BT = QT->getAs<BuiltinType>();
+ if (!BT || BT->isNullPtrType())
+ return Value::K_PtrOrObj;
+
+ switch (QT->castAs<BuiltinType>()->getKind()) {
+ default:
+ assert(false && "Type not supported");
+ return Value::K_Unspecified;
+#define X(type, name) \
+ case BuiltinType::name: \
+ return Value::K_##name;
+ REPL_BUILTIN_TYPES
+#undef X
+ }
+}
+
+Value::Value(Interpreter *In, void *Ty) : Interp(In), OpaqueType(Ty) {
+ setKind(ConvertQualTypeToKind(getASTContext(), getType()));
+ if (ValueKind == K_PtrOrObj) {
+ QualType Canon = getType().getCanonicalType();
+ if ((Canon->isPointerType() || Canon->isObjectType() ||
+ Canon->isReferenceType()) &&
+ (Canon->isRecordType() || Canon->isConstantArrayType() ||
+ Canon->isMemberPointerType())) {
+ IsManuallyAlloc = true;
+ // Compile dtor function.
+ Interpreter &Interp = getInterpreter();
+ void *DtorF = nullptr;
+ size_t ElementsSize = 1;
+ QualType DtorTy = getType();
+
+ if (const auto *ArrTy =
+ llvm::dyn_cast<ConstantArrayType>(DtorTy.getTypePtr())) {
+ DtorTy = ArrTy->getElementType();
+ llvm::APInt ArrSize(sizeof(size_t) * 8, 1);
+ do {
+ ArrSize *= ArrTy->getSize();
+ ArrTy = llvm::dyn_cast<ConstantArrayType>(
+ ArrTy->getElementType().getTypePtr());
+ } while (ArrTy);
+ ElementsSize = static_cast<size_t>(ArrSize.getZExtValue());
+ }
+ if (const auto *RT = DtorTy->getAs<RecordType>()) {
+ if (CXXRecordDecl *CXXRD =
+ llvm::dyn_cast<CXXRecordDecl>(RT->getDecl())) {
+ if (llvm::Expected<llvm::orc::ExecutorAddr> Addr =
+ Interp.CompileDtorCall(CXXRD))
+ DtorF = reinterpret_cast<void *>(Addr->getValue());
+ else
+ llvm::logAllUnhandledErrors(Addr.takeError(), llvm::errs());
+ }
+ }
+
+ size_t AllocSize =
+ getASTContext().getTypeSizeInChars(getType()).getQuantity();
+ unsigned char *Payload =
+ ValueStorage::CreatePayload(DtorF, AllocSize, ElementsSize);
+ setPtr((void *)Payload);
+ }
+ }
+}
+
+Value::Value(const Value &RHS)
+ : Interp(RHS.Interp), OpaqueType(RHS.OpaqueType), Data(RHS.Data),
+ ValueKind(RHS.ValueKind), IsManuallyAlloc(RHS.IsManuallyAlloc) {
+ if (IsManuallyAlloc)
+ ValueStorage::getFromPayload(getPtr())->Retain();
+}
+
+Value::Value(Value &&RHS) noexcept {
+ Interp = std::exchange(RHS.Interp, nullptr);
+ OpaqueType = std::exchange(RHS.OpaqueType, nullptr);
+ Data = RHS.Data;
+ ValueKind = std::exchange(RHS.ValueKind, K_Unspecified);
+ IsManuallyAlloc = std::exchange(RHS.IsManuallyAlloc, false);
+
+ if (IsManuallyAlloc)
+ ValueStorage::getFromPayload(getPtr())->Release();
+}
+
+Value &Value::operator=(const Value &RHS) {
+ if (IsManuallyAlloc)
+ ValueStorage::getFromPayload(getPtr())->Release();
+
+ Interp = RHS.Interp;
+ OpaqueType = RHS.OpaqueType;
+ Data = RHS.Data;
+ ValueKind = RHS.ValueKind;
+ IsManuallyAlloc = RHS.IsManuallyAlloc;
+
+ if (IsManuallyAlloc)
+ ValueStorage::getFromPayload(getPtr())->Retain();
+
+ return *this;
+}
+
+Value &Value::operator=(Value &&RHS) noexcept {
+ if (this != &RHS) {
+ if (IsManuallyAlloc)
+ ValueStorage::getFromPayload(getPtr())->Release();
+
+ Interp = std::exchange(RHS.Interp, nullptr);
+ OpaqueType = std::exchange(RHS.OpaqueType, nullptr);
+ ValueKind = std::exchange(RHS.ValueKind, K_Unspecified);
+ IsManuallyAlloc = std::exchange(RHS.IsManuallyAlloc, false);
+
+ Data = RHS.Data;
+ }
+ return *this;
+}
+
+void Value::clear() {
+ if (IsManuallyAlloc)
+ ValueStorage::getFromPayload(getPtr())->Release();
+ ValueKind = K_Unspecified;
+ OpaqueType = nullptr;
+ Interp = nullptr;
+ IsManuallyAlloc = false;
+}
+
+Value::~Value() { clear(); }
+
+void *Value::getPtr() const {
+ assert(ValueKind == K_PtrOrObj);
+ return Data.m_Ptr;
+}
+
+QualType Value::getType() const {
+ return QualType::getFromOpaquePtr(OpaqueType);
+}
+
+Interpreter &Value::getInterpreter() {
+ assert(Interp != nullptr &&
+ "Can't get interpreter from a default constructed value");
+ return *Interp;
+}
+
+const Interpreter &Value::getInterpreter() const {
+ assert(Interp != nullptr &&
+ "Can't get interpreter from a default constructed value");
+ return *Interp;
+}
+
+ASTContext &Value::getASTContext() { return getInterpreter().getASTContext(); }
+
+const ASTContext &Value::getASTContext() const {
+ return getInterpreter().getASTContext();
+}
+
+void Value::dump() const { print(llvm::outs()); }
+
+void Value::printType(llvm::raw_ostream &Out) const {
+ Out << "Not implement yet.\n";
+}
+void Value::printData(llvm::raw_ostream &Out) const {
+ Out << "Not implement yet.\n";
+}
+void Value::print(llvm::raw_ostream &Out) const {
+ assert(OpaqueType != nullptr && "Can't print default Value");
+ Out << "Not implement yet.\n";
+}
diff --git a/contrib/llvm-project/clang/lib/Lex/DependencyDirectivesScanner.cpp b/contrib/llvm-project/clang/lib/Lex/DependencyDirectivesScanner.cpp
new file mode 100644
index 000000000000..980f865cf24c
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Lex/DependencyDirectivesScanner.cpp
@@ -0,0 +1,994 @@
+//===- DependencyDirectivesScanner.cpp ------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This is the interface for scanning header and source files to get the
+/// minimum necessary preprocessor directives for evaluating includes. It
+/// reduces the source down to #define, #include, #import, @import, and any
+/// conditional preprocessor logic that contains one of those.
+///
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/DependencyDirectivesScanner.h"
+#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Lex/LexDiagnostic.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Lex/Pragma.h"
+#include "llvm/ADT/ScopeExit.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringSwitch.h"
+#include <optional>
+
+using namespace clang;
+using namespace clang::dependency_directives_scan;
+using namespace llvm;
+
+namespace {
+
+struct DirectiveWithTokens {
+ DirectiveKind Kind;
+ unsigned NumTokens;
+
+ DirectiveWithTokens(DirectiveKind Kind, unsigned NumTokens)
+ : Kind(Kind), NumTokens(NumTokens) {}
+};
+
+/// Does an efficient "scan" of the sources to detect the presence of
+/// preprocessor (or module import) directives and collects the raw lexed tokens
+/// for those directives so that the \p Lexer can "replay" them when the file is
+/// included.
+///
+/// Note that the behavior of the raw lexer is affected by the language mode,
+/// while at this point we want to do a scan and collect tokens once,
+/// irrespective of the language mode that the file will get included in. To
+/// compensate for that the \p Lexer, while "replaying", will adjust a token
+/// where appropriate, when it could affect the preprocessor's state.
+/// For example in a directive like
+///
+/// \code
+/// #if __has_cpp_attribute(clang::fallthrough)
+/// \endcode
+///
+/// The preprocessor needs to see '::' as 'tok::coloncolon' instead of 2
+/// 'tok::colon'. The \p Lexer will adjust if it sees consecutive 'tok::colon'
+/// while in C++ mode.
+struct Scanner {
+ Scanner(StringRef Input,
+ SmallVectorImpl<dependency_directives_scan::Token> &Tokens,
+ DiagnosticsEngine *Diags, SourceLocation InputSourceLoc)
+ : Input(Input), Tokens(Tokens), Diags(Diags),
+ InputSourceLoc(InputSourceLoc), LangOpts(getLangOptsForDepScanning()),
+ TheLexer(InputSourceLoc, LangOpts, Input.begin(), Input.begin(),
+ Input.end()) {}
+
+ static LangOptions getLangOptsForDepScanning() {
+ LangOptions LangOpts;
+ // Set the lexer to use 'tok::at' for '@', instead of 'tok::unknown'.
+ LangOpts.ObjC = true;
+ LangOpts.LineComment = true;
+ // FIXME: we do not enable C11 or C++11, so we are missing u/u8/U"" and
+ // R"()" literals.
+ return LangOpts;
+ }
+
+ /// Lex the provided source and emit the directive tokens.
+ ///
+ /// \returns True on error.
+ bool scan(SmallVectorImpl<Directive> &Directives);
+
+private:
+ /// Lexes next token and advances \p First and the \p Lexer.
+ [[nodiscard]] dependency_directives_scan::Token &
+ lexToken(const char *&First, const char *const End);
+
+ dependency_directives_scan::Token &lexIncludeFilename(const char *&First,
+ const char *const End);
+
+ void skipLine(const char *&First, const char *const End);
+ void skipDirective(StringRef Name, const char *&First, const char *const End);
+
+ /// Returns the spelling of a string literal or identifier after performing
+ /// any processing needed to handle \c clang::Token::NeedsCleaning.
+ StringRef cleanStringIfNeeded(const dependency_directives_scan::Token &Tok);
+
+ /// Lexes next token and if it is identifier returns its string, otherwise
+ /// it skips the current line and returns \p std::nullopt.
+ ///
+ /// In any case (whatever the token kind) \p First and the \p Lexer will
+ /// advance beyond the token.
+ [[nodiscard]] std::optional<StringRef>
+ tryLexIdentifierOrSkipLine(const char *&First, const char *const End);
+
+ /// Used when it is certain that next token is an identifier.
+ [[nodiscard]] StringRef lexIdentifier(const char *&First,
+ const char *const End);
+
+ /// Lexes next token and returns true iff it is an identifier that matches \p
+ /// Id, otherwise it skips the current line and returns false.
+ ///
+ /// In any case (whatever the token kind) \p First and the \p Lexer will
+ /// advance beyond the token.
+ [[nodiscard]] bool isNextIdentifierOrSkipLine(StringRef Id,
+ const char *&First,
+ const char *const End);
+
+ /// Lexes next token and returns true iff it matches the kind \p K.
+ /// Otherwise it skips the current line and returns false.
+ ///
+ /// In any case (whatever the token kind) \p First and the \p Lexer will
+ /// advance beyond the token.
+ [[nodiscard]] bool isNextTokenOrSkipLine(tok::TokenKind K, const char *&First,
+ const char *const End);
+
+ /// Lexes next token and if it is string literal, returns its string.
+ /// Otherwise, it skips the current line and returns \p std::nullopt.
+ ///
+ /// In any case (whatever the token kind) \p First and the \p Lexer will
+ /// advance beyond the token.
+ [[nodiscard]] std::optional<StringRef>
+ tryLexStringLiteralOrSkipLine(const char *&First, const char *const End);
+
+ [[nodiscard]] bool scanImpl(const char *First, const char *const End);
+ [[nodiscard]] bool lexPPLine(const char *&First, const char *const End);
+ [[nodiscard]] bool lexAt(const char *&First, const char *const End);
+ [[nodiscard]] bool lexModule(const char *&First, const char *const End);
+ [[nodiscard]] bool lexDefine(const char *HashLoc, const char *&First,
+ const char *const End);
+ [[nodiscard]] bool lexPragma(const char *&First, const char *const End);
+ [[nodiscard]] bool lex_Pragma(const char *&First, const char *const End);
+ [[nodiscard]] bool lexEndif(const char *&First, const char *const End);
+ [[nodiscard]] bool lexDefault(DirectiveKind Kind, const char *&First,
+ const char *const End);
+ [[nodiscard]] bool lexModuleDirectiveBody(DirectiveKind Kind,
+ const char *&First,
+ const char *const End);
+ void lexPPDirectiveBody(const char *&First, const char *const End);
+
+ DirectiveWithTokens &pushDirective(DirectiveKind Kind) {
+ Tokens.append(CurDirToks);
+ DirsWithToks.emplace_back(Kind, CurDirToks.size());
+ CurDirToks.clear();
+ return DirsWithToks.back();
+ }
+ void popDirective() {
+ Tokens.pop_back_n(DirsWithToks.pop_back_val().NumTokens);
+ }
+ DirectiveKind topDirective() const {
+ return DirsWithToks.empty() ? pp_none : DirsWithToks.back().Kind;
+ }
+
+ unsigned getOffsetAt(const char *CurPtr) const {
+ return CurPtr - Input.data();
+ }
+
+ /// Reports a diagnostic if the diagnostic engine is provided. Always returns
+ /// true at the end.
+ bool reportError(const char *CurPtr, unsigned Err);
+
+ StringMap<char> SplitIds;
+ StringRef Input;
+ SmallVectorImpl<dependency_directives_scan::Token> &Tokens;
+ DiagnosticsEngine *Diags;
+ SourceLocation InputSourceLoc;
+
+ const char *LastTokenPtr = nullptr;
+ /// Keeps track of the tokens for the currently lexed directive. Once a
+ /// directive is fully lexed and "committed" then the tokens get appended to
+ /// \p Tokens and \p CurDirToks is cleared for the next directive.
+ SmallVector<dependency_directives_scan::Token, 32> CurDirToks;
+ /// The directives that were lexed along with the number of tokens that each
+ /// directive contains. The tokens of all the directives are kept in \p Tokens
+ /// vector, in the same order as the directives order in \p DirsWithToks.
+ SmallVector<DirectiveWithTokens, 64> DirsWithToks;
+ LangOptions LangOpts;
+ Lexer TheLexer;
+};
+
+} // end anonymous namespace
+
+bool Scanner::reportError(const char *CurPtr, unsigned Err) {
+ if (!Diags)
+ return true;
+ assert(CurPtr >= Input.data() && "invalid buffer ptr");
+ Diags->Report(InputSourceLoc.getLocWithOffset(getOffsetAt(CurPtr)), Err);
+ return true;
+}
+
+static void skipOverSpaces(const char *&First, const char *const End) {
+ while (First != End && isHorizontalWhitespace(*First))
+ ++First;
+}
+
+[[nodiscard]] static bool isRawStringLiteral(const char *First,
+ const char *Current) {
+ assert(First <= Current);
+
+ // Check if we can even back up.
+ if (*Current != '"' || First == Current)
+ return false;
+
+ // Check for an "R".
+ --Current;
+ if (*Current != 'R')
+ return false;
+ if (First == Current || !isAsciiIdentifierContinue(*--Current))
+ return true;
+
+ // Check for a prefix of "u", "U", or "L".
+ if (*Current == 'u' || *Current == 'U' || *Current == 'L')
+ return First == Current || !isAsciiIdentifierContinue(*--Current);
+
+ // Check for a prefix of "u8".
+ if (*Current != '8' || First == Current || *Current-- != 'u')
+ return false;
+ return First == Current || !isAsciiIdentifierContinue(*--Current);
+}
+
+static void skipRawString(const char *&First, const char *const End) {
+ assert(First[0] == '"');
+ assert(First[-1] == 'R');
+
+ const char *Last = ++First;
+ while (Last != End && *Last != '(')
+ ++Last;
+ if (Last == End) {
+ First = Last; // Hit the end... just give up.
+ return;
+ }
+
+ StringRef Terminator(First, Last - First);
+ for (;;) {
+ // Move First to just past the next ")".
+ First = Last;
+ while (First != End && *First != ')')
+ ++First;
+ if (First == End)
+ return;
+ ++First;
+
+ // Look ahead for the terminator sequence.
+ Last = First;
+ while (Last != End && size_t(Last - First) < Terminator.size() &&
+ Terminator[Last - First] == *Last)
+ ++Last;
+
+ // Check if we hit it (or the end of the file).
+ if (Last == End) {
+ First = Last;
+ return;
+ }
+ if (size_t(Last - First) < Terminator.size())
+ continue;
+ if (*Last != '"')
+ continue;
+ First = Last + 1;
+ return;
+ }
+}
+
+// Returns the length of EOL, either 0 (no end-of-line), 1 (\n) or 2 (\r\n)
+static unsigned isEOL(const char *First, const char *const End) {
+ if (First == End)
+ return 0;
+ if (End - First > 1 && isVerticalWhitespace(First[0]) &&
+ isVerticalWhitespace(First[1]) && First[0] != First[1])
+ return 2;
+ return !!isVerticalWhitespace(First[0]);
+}
+
+static void skipString(const char *&First, const char *const End) {
+ assert(*First == '\'' || *First == '"' || *First == '<');
+ const char Terminator = *First == '<' ? '>' : *First;
+ for (++First; First != End && *First != Terminator; ++First) {
+ // String and character literals don't extend past the end of the line.
+ if (isVerticalWhitespace(*First))
+ return;
+ if (*First != '\\')
+ continue;
+ // Skip past backslash to the next character. This ensures that the
+ // character right after it is skipped as well, which matters if it's
+ // the terminator.
+ if (++First == End)
+ return;
+ if (!isWhitespace(*First))
+ continue;
+ // Whitespace after the backslash might indicate a line continuation.
+ const char *FirstAfterBackslashPastSpace = First;
+ skipOverSpaces(FirstAfterBackslashPastSpace, End);
+ if (unsigned NLSize = isEOL(FirstAfterBackslashPastSpace, End)) {
+ // Advance the character pointer to the next line for the next
+ // iteration.
+ First = FirstAfterBackslashPastSpace + NLSize - 1;
+ }
+ }
+ if (First != End)
+ ++First; // Finish off the string.
+}
+
+// Returns the length of the skipped newline
+static unsigned skipNewline(const char *&First, const char *End) {
+ if (First == End)
+ return 0;
+ assert(isVerticalWhitespace(*First));
+ unsigned Len = isEOL(First, End);
+ assert(Len && "expected newline");
+ First += Len;
+ return Len;
+}
+
+static bool wasLineContinuation(const char *First, unsigned EOLLen) {
+ return *(First - (int)EOLLen - 1) == '\\';
+}
+
+static void skipToNewlineRaw(const char *&First, const char *const End) {
+ for (;;) {
+ if (First == End)
+ return;
+
+ unsigned Len = isEOL(First, End);
+ if (Len)
+ return;
+
+ do {
+ if (++First == End)
+ return;
+ Len = isEOL(First, End);
+ } while (!Len);
+
+ if (First[-1] != '\\')
+ return;
+
+ First += Len;
+ // Keep skipping lines...
+ }
+}
+
+static void skipLineComment(const char *&First, const char *const End) {
+ assert(First[0] == '/' && First[1] == '/');
+ First += 2;
+ skipToNewlineRaw(First, End);
+}
+
+static void skipBlockComment(const char *&First, const char *const End) {
+ assert(First[0] == '/' && First[1] == '*');
+ if (End - First < 4) {
+ First = End;
+ return;
+ }
+ for (First += 3; First != End; ++First)
+ if (First[-1] == '*' && First[0] == '/') {
+ ++First;
+ return;
+ }
+}
+
+/// \returns True if the current single quotation mark character is a C++ 14
+/// digit separator.
+static bool isQuoteCppDigitSeparator(const char *const Start,
+ const char *const Cur,
+ const char *const End) {
+ assert(*Cur == '\'' && "expected quotation character");
+ // skipLine called in places where we don't expect a valid number
+ // body before `start` on the same line, so always return false at the start.
+ if (Start == Cur)
+ return false;
+ // The previous character must be a valid PP number character.
+ // Make sure that the L, u, U, u8 prefixes don't get marked as a
+ // separator though.
+ char Prev = *(Cur - 1);
+ if (Prev == 'L' || Prev == 'U' || Prev == 'u')
+ return false;
+ if (Prev == '8' && (Cur - 1 != Start) && *(Cur - 2) == 'u')
+ return false;
+ if (!isPreprocessingNumberBody(Prev))
+ return false;
+ // The next character should be a valid identifier body character.
+ return (Cur + 1) < End && isAsciiIdentifierContinue(*(Cur + 1));
+}
+
+void Scanner::skipLine(const char *&First, const char *const End) {
+ for (;;) {
+ assert(First <= End);
+ if (First == End)
+ return;
+
+ if (isVerticalWhitespace(*First)) {
+ skipNewline(First, End);
+ return;
+ }
+ const char *Start = First;
+ while (First != End && !isVerticalWhitespace(*First)) {
+ // Iterate over strings correctly to avoid comments and newlines.
+ if (*First == '"' ||
+ (*First == '\'' && !isQuoteCppDigitSeparator(Start, First, End))) {
+ LastTokenPtr = First;
+ if (isRawStringLiteral(Start, First))
+ skipRawString(First, End);
+ else
+ skipString(First, End);
+ continue;
+ }
+
+ // Iterate over comments correctly.
+ if (*First != '/' || End - First < 2) {
+ LastTokenPtr = First;
+ ++First;
+ continue;
+ }
+
+ if (First[1] == '/') {
+ // "//...".
+ skipLineComment(First, End);
+ continue;
+ }
+
+ if (First[1] != '*') {
+ LastTokenPtr = First;
+ ++First;
+ continue;
+ }
+
+ // "/*...*/".
+ skipBlockComment(First, End);
+ }
+ if (First == End)
+ return;
+
+ // Skip over the newline.
+ unsigned Len = skipNewline(First, End);
+ if (!wasLineContinuation(First, Len)) // Continue past line-continuations.
+ break;
+ }
+}
+
+void Scanner::skipDirective(StringRef Name, const char *&First,
+ const char *const End) {
+ if (llvm::StringSwitch<bool>(Name)
+ .Case("warning", true)
+ .Case("error", true)
+ .Default(false))
+ // Do not process quotes or comments.
+ skipToNewlineRaw(First, End);
+ else
+ skipLine(First, End);
+}
+
+static void skipWhitespace(const char *&First, const char *const End) {
+ for (;;) {
+ assert(First <= End);
+ skipOverSpaces(First, End);
+
+ if (End - First < 2)
+ return;
+
+ if (First[0] == '\\' && isVerticalWhitespace(First[1])) {
+ skipNewline(++First, End);
+ continue;
+ }
+
+ // Check for a non-comment character.
+ if (First[0] != '/')
+ return;
+
+ // "// ...".
+ if (First[1] == '/') {
+ skipLineComment(First, End);
+ return;
+ }
+
+ // Cannot be a comment.
+ if (First[1] != '*')
+ return;
+
+ // "/*...*/".
+ skipBlockComment(First, End);
+ }
+}
+
+bool Scanner::lexModuleDirectiveBody(DirectiveKind Kind, const char *&First,
+ const char *const End) {
+ const char *DirectiveLoc = Input.data() + CurDirToks.front().Offset;
+ for (;;) {
+ const dependency_directives_scan::Token &Tok = lexToken(First, End);
+ if (Tok.is(tok::eof))
+ return reportError(
+ DirectiveLoc,
+ diag::err_dep_source_scanner_missing_semi_after_at_import);
+ if (Tok.is(tok::semi))
+ break;
+ }
+ pushDirective(Kind);
+ skipWhitespace(First, End);
+ if (First == End)
+ return false;
+ if (!isVerticalWhitespace(*First))
+ return reportError(
+ DirectiveLoc, diag::err_dep_source_scanner_unexpected_tokens_at_import);
+ skipNewline(First, End);
+ return false;
+}
+
+dependency_directives_scan::Token &Scanner::lexToken(const char *&First,
+ const char *const End) {
+ clang::Token Tok;
+ TheLexer.LexFromRawLexer(Tok);
+ First = Input.data() + TheLexer.getCurrentBufferOffset();
+ assert(First <= End);
+
+ unsigned Offset = TheLexer.getCurrentBufferOffset() - Tok.getLength();
+ CurDirToks.emplace_back(Offset, Tok.getLength(), Tok.getKind(),
+ Tok.getFlags());
+ return CurDirToks.back();
+}
+
+dependency_directives_scan::Token &
+Scanner::lexIncludeFilename(const char *&First, const char *const End) {
+ clang::Token Tok;
+ TheLexer.LexIncludeFilename(Tok);
+ First = Input.data() + TheLexer.getCurrentBufferOffset();
+ assert(First <= End);
+
+ unsigned Offset = TheLexer.getCurrentBufferOffset() - Tok.getLength();
+ CurDirToks.emplace_back(Offset, Tok.getLength(), Tok.getKind(),
+ Tok.getFlags());
+ return CurDirToks.back();
+}
+
+void Scanner::lexPPDirectiveBody(const char *&First, const char *const End) {
+ while (true) {
+ const dependency_directives_scan::Token &Tok = lexToken(First, End);
+ if (Tok.is(tok::eod))
+ break;
+ }
+}
+
+StringRef
+Scanner::cleanStringIfNeeded(const dependency_directives_scan::Token &Tok) {
+ bool NeedsCleaning = Tok.Flags & clang::Token::NeedsCleaning;
+ if (LLVM_LIKELY(!NeedsCleaning))
+ return Input.slice(Tok.Offset, Tok.getEnd());
+
+ SmallString<64> Spelling;
+ Spelling.resize(Tok.Length);
+
+ // FIXME: C++11 raw string literals need special handling (see getSpellingSlow
+ // in the Lexer). Currently we cannot see them due to our LangOpts.
+
+ unsigned SpellingLength = 0;
+ const char *BufPtr = Input.begin() + Tok.Offset;
+ const char *AfterIdent = Input.begin() + Tok.getEnd();
+ while (BufPtr < AfterIdent) {
+ auto [Char, Size] = Lexer::getCharAndSizeNoWarn(BufPtr, LangOpts);
+ Spelling[SpellingLength++] = Char;
+ BufPtr += Size;
+ }
+
+ return SplitIds.try_emplace(StringRef(Spelling.begin(), SpellingLength), 0)
+ .first->first();
+}
+
+std::optional<StringRef>
+Scanner::tryLexIdentifierOrSkipLine(const char *&First, const char *const End) {
+ const dependency_directives_scan::Token &Tok = lexToken(First, End);
+ if (Tok.isNot(tok::raw_identifier)) {
+ if (!Tok.is(tok::eod))
+ skipLine(First, End);
+ return std::nullopt;
+ }
+
+ return cleanStringIfNeeded(Tok);
+}
+
+StringRef Scanner::lexIdentifier(const char *&First, const char *const End) {
+ std::optional<StringRef> Id = tryLexIdentifierOrSkipLine(First, End);
+ assert(Id && "expected identifier token");
+ return *Id;
+}
+
+bool Scanner::isNextIdentifierOrSkipLine(StringRef Id, const char *&First,
+ const char *const End) {
+ if (std::optional<StringRef> FoundId =
+ tryLexIdentifierOrSkipLine(First, End)) {
+ if (*FoundId == Id)
+ return true;
+ skipLine(First, End);
+ }
+ return false;
+}
+
+bool Scanner::isNextTokenOrSkipLine(tok::TokenKind K, const char *&First,
+ const char *const End) {
+ const dependency_directives_scan::Token &Tok = lexToken(First, End);
+ if (Tok.is(K))
+ return true;
+ skipLine(First, End);
+ return false;
+}
+
+std::optional<StringRef>
+Scanner::tryLexStringLiteralOrSkipLine(const char *&First,
+ const char *const End) {
+ const dependency_directives_scan::Token &Tok = lexToken(First, End);
+ if (!tok::isStringLiteral(Tok.Kind)) {
+ if (!Tok.is(tok::eod))
+ skipLine(First, End);
+ return std::nullopt;
+ }
+
+ return cleanStringIfNeeded(Tok);
+}
+
+bool Scanner::lexAt(const char *&First, const char *const End) {
+ // Handle "@import".
+
+ // Lex '@'.
+ const dependency_directives_scan::Token &AtTok = lexToken(First, End);
+ assert(AtTok.is(tok::at));
+ (void)AtTok;
+
+ if (!isNextIdentifierOrSkipLine("import", First, End))
+ return false;
+ return lexModuleDirectiveBody(decl_at_import, First, End);
+}
+
+bool Scanner::lexModule(const char *&First, const char *const End) {
+ StringRef Id = lexIdentifier(First, End);
+ bool Export = false;
+ if (Id == "export") {
+ Export = true;
+ std::optional<StringRef> NextId = tryLexIdentifierOrSkipLine(First, End);
+ if (!NextId)
+ return false;
+ Id = *NextId;
+ }
+
+ if (Id != "module" && Id != "import") {
+ skipLine(First, End);
+ return false;
+ }
+
+ skipWhitespace(First, End);
+
+ // Ignore this as a module directive if the next character can't be part of
+ // an import.
+
+ switch (*First) {
+ case ':':
+ case '<':
+ case '"':
+ break;
+ default:
+ if (!isAsciiIdentifierContinue(*First)) {
+ skipLine(First, End);
+ return false;
+ }
+ }
+
+ TheLexer.seek(getOffsetAt(First), /*IsAtStartOfLine*/ false);
+
+ DirectiveKind Kind;
+ if (Id == "module")
+ Kind = Export ? cxx_export_module_decl : cxx_module_decl;
+ else
+ Kind = Export ? cxx_export_import_decl : cxx_import_decl;
+
+ return lexModuleDirectiveBody(Kind, First, End);
+}
+
+bool Scanner::lex_Pragma(const char *&First, const char *const End) {
+ if (!isNextTokenOrSkipLine(tok::l_paren, First, End))
+ return false;
+
+ std::optional<StringRef> Str = tryLexStringLiteralOrSkipLine(First, End);
+
+ if (!Str || !isNextTokenOrSkipLine(tok::r_paren, First, End))
+ return false;
+
+ SmallString<64> Buffer(*Str);
+ prepare_PragmaString(Buffer);
+
+ // Use a new scanner instance since the tokens will be inside the allocated
+ // string. We should already have captured all the relevant tokens in the
+ // current scanner.
+ SmallVector<dependency_directives_scan::Token> DiscardTokens;
+ const char *Begin = Buffer.c_str();
+ Scanner PragmaScanner{StringRef(Begin, Buffer.size()), DiscardTokens, Diags,
+ InputSourceLoc};
+
+ PragmaScanner.TheLexer.setParsingPreprocessorDirective(true);
+ if (PragmaScanner.lexPragma(Begin, Buffer.end()))
+ return true;
+
+ DirectiveKind K = PragmaScanner.topDirective();
+ if (K == pp_none) {
+ skipLine(First, End);
+ return false;
+ }
+
+ assert(Begin == Buffer.end());
+ pushDirective(K);
+ return false;
+}
+
+bool Scanner::lexPragma(const char *&First, const char *const End) {
+ std::optional<StringRef> FoundId = tryLexIdentifierOrSkipLine(First, End);
+ if (!FoundId)
+ return false;
+
+ StringRef Id = *FoundId;
+ auto Kind = llvm::StringSwitch<DirectiveKind>(Id)
+ .Case("once", pp_pragma_once)
+ .Case("push_macro", pp_pragma_push_macro)
+ .Case("pop_macro", pp_pragma_pop_macro)
+ .Case("include_alias", pp_pragma_include_alias)
+ .Default(pp_none);
+ if (Kind != pp_none) {
+ lexPPDirectiveBody(First, End);
+ pushDirective(Kind);
+ return false;
+ }
+
+ if (Id != "clang") {
+ skipLine(First, End);
+ return false;
+ }
+
+ FoundId = tryLexIdentifierOrSkipLine(First, End);
+ if (!FoundId)
+ return false;
+ Id = *FoundId;
+
+ // #pragma clang system_header
+ if (Id == "system_header") {
+ lexPPDirectiveBody(First, End);
+ pushDirective(pp_pragma_system_header);
+ return false;
+ }
+
+ if (Id != "module") {
+ skipLine(First, End);
+ return false;
+ }
+
+ // #pragma clang module.
+ if (!isNextIdentifierOrSkipLine("import", First, End))
+ return false;
+
+ // #pragma clang module import.
+ lexPPDirectiveBody(First, End);
+ pushDirective(pp_pragma_import);
+ return false;
+}
+
+bool Scanner::lexEndif(const char *&First, const char *const End) {
+ // Strip out "#else" if it's empty.
+ if (topDirective() == pp_else)
+ popDirective();
+
+ // If "#ifdef" is empty, strip it and skip the "#endif".
+ //
+ // FIXME: Once/if Clang starts disallowing __has_include in macro expansions,
+ // we can skip empty `#if` and `#elif` blocks as well after scanning for a
+ // literal __has_include in the condition. Even without that rule we could
+ // drop the tokens if we scan for identifiers in the condition and find none.
+ if (topDirective() == pp_ifdef || topDirective() == pp_ifndef) {
+ popDirective();
+ skipLine(First, End);
+ return false;
+ }
+
+ return lexDefault(pp_endif, First, End);
+}
+
+bool Scanner::lexDefault(DirectiveKind Kind, const char *&First,
+ const char *const End) {
+ lexPPDirectiveBody(First, End);
+ pushDirective(Kind);
+ return false;
+}
+
+static bool isStartOfRelevantLine(char First) {
+ switch (First) {
+ case '#':
+ case '@':
+ case 'i':
+ case 'e':
+ case 'm':
+ case '_':
+ return true;
+ }
+ return false;
+}
+
+bool Scanner::lexPPLine(const char *&First, const char *const End) {
+ assert(First != End);
+
+ skipWhitespace(First, End);
+ assert(First <= End);
+ if (First == End)
+ return false;
+
+ if (!isStartOfRelevantLine(*First)) {
+ skipLine(First, End);
+ assert(First <= End);
+ return false;
+ }
+
+ LastTokenPtr = First;
+
+ TheLexer.seek(getOffsetAt(First), /*IsAtStartOfLine*/ true);
+
+ auto ScEx1 = make_scope_exit([&]() {
+ /// Clear Scanner's CurDirToks before returning, in case we didn't push a
+ /// new directive.
+ CurDirToks.clear();
+ });
+
+ // Handle "@import".
+ if (*First == '@')
+ return lexAt(First, End);
+
+ if (*First == 'i' || *First == 'e' || *First == 'm')
+ return lexModule(First, End);
+
+ if (*First == '_') {
+ if (isNextIdentifierOrSkipLine("_Pragma", First, End))
+ return lex_Pragma(First, End);
+ return false;
+ }
+
+ // Handle preprocessing directives.
+
+ TheLexer.setParsingPreprocessorDirective(true);
+ auto ScEx2 = make_scope_exit(
+ [&]() { TheLexer.setParsingPreprocessorDirective(false); });
+
+ // Lex '#'.
+ const dependency_directives_scan::Token &HashTok = lexToken(First, End);
+ if (HashTok.is(tok::hashhash)) {
+ // A \p tok::hashhash at this location is passed by the preprocessor to the
+ // parser to interpret, like any other token. So for dependency scanning
+ // skip it like a normal token not affecting the preprocessor.
+ skipLine(First, End);
+ assert(First <= End);
+ return false;
+ }
+ assert(HashTok.is(tok::hash));
+ (void)HashTok;
+
+ std::optional<StringRef> FoundId = tryLexIdentifierOrSkipLine(First, End);
+ if (!FoundId)
+ return false;
+
+ StringRef Id = *FoundId;
+
+ if (Id == "pragma")
+ return lexPragma(First, End);
+
+ auto Kind = llvm::StringSwitch<DirectiveKind>(Id)
+ .Case("include", pp_include)
+ .Case("__include_macros", pp___include_macros)
+ .Case("define", pp_define)
+ .Case("undef", pp_undef)
+ .Case("import", pp_import)
+ .Case("include_next", pp_include_next)
+ .Case("if", pp_if)
+ .Case("ifdef", pp_ifdef)
+ .Case("ifndef", pp_ifndef)
+ .Case("elif", pp_elif)
+ .Case("elifdef", pp_elifdef)
+ .Case("elifndef", pp_elifndef)
+ .Case("else", pp_else)
+ .Case("endif", pp_endif)
+ .Default(pp_none);
+ if (Kind == pp_none) {
+ skipDirective(Id, First, End);
+ return false;
+ }
+
+ if (Kind == pp_endif)
+ return lexEndif(First, End);
+
+ switch (Kind) {
+ case pp_include:
+ case pp___include_macros:
+ case pp_include_next:
+ case pp_import:
+ lexIncludeFilename(First, End);
+ break;
+ default:
+ break;
+ }
+
+ // Everything else.
+ return lexDefault(Kind, First, End);
+}
+
+static void skipUTF8ByteOrderMark(const char *&First, const char *const End) {
+ if ((End - First) >= 3 && First[0] == '\xef' && First[1] == '\xbb' &&
+ First[2] == '\xbf')
+ First += 3;
+}
+
+bool Scanner::scanImpl(const char *First, const char *const End) {
+ skipUTF8ByteOrderMark(First, End);
+ while (First != End)
+ if (lexPPLine(First, End))
+ return true;
+ return false;
+}
+
+bool Scanner::scan(SmallVectorImpl<Directive> &Directives) {
+ bool Error = scanImpl(Input.begin(), Input.end());
+
+ if (!Error) {
+ // Add an EOF on success.
+ if (LastTokenPtr &&
+ (Tokens.empty() || LastTokenPtr > Input.begin() + Tokens.back().Offset))
+ pushDirective(tokens_present_before_eof);
+ pushDirective(pp_eof);
+ }
+
+ ArrayRef<dependency_directives_scan::Token> RemainingTokens = Tokens;
+ for (const DirectiveWithTokens &DirWithToks : DirsWithToks) {
+ assert(RemainingTokens.size() >= DirWithToks.NumTokens);
+ Directives.emplace_back(DirWithToks.Kind,
+ RemainingTokens.take_front(DirWithToks.NumTokens));
+ RemainingTokens = RemainingTokens.drop_front(DirWithToks.NumTokens);
+ }
+ assert(RemainingTokens.empty());
+
+ return Error;
+}
+
+bool clang::scanSourceForDependencyDirectives(
+ StringRef Input, SmallVectorImpl<dependency_directives_scan::Token> &Tokens,
+ SmallVectorImpl<Directive> &Directives, DiagnosticsEngine *Diags,
+ SourceLocation InputSourceLoc) {
+ return Scanner(Input, Tokens, Diags, InputSourceLoc).scan(Directives);
+}
+
+void clang::printDependencyDirectivesAsSource(
+ StringRef Source,
+ ArrayRef<dependency_directives_scan::Directive> Directives,
+ llvm::raw_ostream &OS) {
+ // Add a space separator where it is convenient for testing purposes.
+ auto needsSpaceSeparator =
+ [](tok::TokenKind Prev,
+ const dependency_directives_scan::Token &Tok) -> bool {
+ if (Prev == Tok.Kind)
+ return !Tok.isOneOf(tok::l_paren, tok::r_paren, tok::l_square,
+ tok::r_square);
+ if (Prev == tok::raw_identifier &&
+ Tok.isOneOf(tok::hash, tok::numeric_constant, tok::string_literal,
+ tok::char_constant, tok::header_name))
+ return true;
+ if (Prev == tok::r_paren &&
+ Tok.isOneOf(tok::raw_identifier, tok::hash, tok::string_literal,
+ tok::char_constant, tok::unknown))
+ return true;
+ if (Prev == tok::comma &&
+ Tok.isOneOf(tok::l_paren, tok::string_literal, tok::less))
+ return true;
+ return false;
+ };
+
+ for (const dependency_directives_scan::Directive &Directive : Directives) {
+ if (Directive.Kind == tokens_present_before_eof)
+ OS << "<TokBeforeEOF>";
+ std::optional<tok::TokenKind> PrevTokenKind;
+ for (const dependency_directives_scan::Token &Tok : Directive.Tokens) {
+ if (PrevTokenKind && needsSpaceSeparator(*PrevTokenKind, Tok))
+ OS << ' ';
+ PrevTokenKind = Tok.Kind;
+ OS << Source.slice(Tok.Offset, Tok.getEnd());
+ }
+ }
+}
diff --git a/contrib/llvm-project/clang/lib/Lex/DependencyDirectivesSourceMinimizer.cpp b/contrib/llvm-project/clang/lib/Lex/DependencyDirectivesSourceMinimizer.cpp
deleted file mode 100644
index cfca167f8bf1..000000000000
--- a/contrib/llvm-project/clang/lib/Lex/DependencyDirectivesSourceMinimizer.cpp
+++ /dev/null
@@ -1,961 +0,0 @@
-//===- DependencyDirectivesSourceMinimizer.cpp - -------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-///
-/// \file
-/// This is the implementation for minimizing header and source files to the
-/// minimum necessary preprocessor directives for evaluating includes. It
-/// reduces the source down to #define, #include, #import, @import, and any
-/// conditional preprocessor logic that contains one of those.
-///
-//===----------------------------------------------------------------------===//
-
-#include "clang/Lex/DependencyDirectivesSourceMinimizer.h"
-#include "clang/Basic/CharInfo.h"
-#include "clang/Basic/Diagnostic.h"
-#include "clang/Lex/LexDiagnostic.h"
-#include "llvm/ADT/StringMap.h"
-#include "llvm/ADT/StringSwitch.h"
-#include "llvm/Support/MemoryBuffer.h"
-
-using namespace llvm;
-using namespace clang;
-using namespace clang::minimize_source_to_dependency_directives;
-
-namespace {
-
-struct Minimizer {
- /// Minimized output.
- SmallVectorImpl<char> &Out;
- /// The known tokens encountered during the minimization.
- SmallVectorImpl<Token> &Tokens;
-
- Minimizer(SmallVectorImpl<char> &Out, SmallVectorImpl<Token> &Tokens,
- StringRef Input, DiagnosticsEngine *Diags,
- SourceLocation InputSourceLoc)
- : Out(Out), Tokens(Tokens), Input(Input), Diags(Diags),
- InputSourceLoc(InputSourceLoc) {}
-
- /// Lex the provided source and emit the minimized output.
- ///
- /// \returns True on error.
- bool minimize();
-
-private:
- struct IdInfo {
- const char *Last;
- StringRef Name;
- };
-
- /// Lex an identifier.
- ///
- /// \pre First points at a valid identifier head.
- LLVM_NODISCARD IdInfo lexIdentifier(const char *First, const char *const End);
- LLVM_NODISCARD bool isNextIdentifier(StringRef Id, const char *&First,
- const char *const End);
- LLVM_NODISCARD bool minimizeImpl(const char *First, const char *const End);
- LLVM_NODISCARD bool lexPPLine(const char *&First, const char *const End);
- LLVM_NODISCARD bool lexAt(const char *&First, const char *const End);
- LLVM_NODISCARD bool lexModule(const char *&First, const char *const End);
- LLVM_NODISCARD bool lexDefine(const char *&First, const char *const End);
- LLVM_NODISCARD bool lexPragma(const char *&First, const char *const End);
- LLVM_NODISCARD bool lexEndif(const char *&First, const char *const End);
- LLVM_NODISCARD bool lexDefault(TokenKind Kind, StringRef Directive,
- const char *&First, const char *const End);
- Token &makeToken(TokenKind K) {
- Tokens.emplace_back(K, Out.size());
- return Tokens.back();
- }
- void popToken() {
- Out.resize(Tokens.back().Offset);
- Tokens.pop_back();
- }
- TokenKind top() const { return Tokens.empty() ? pp_none : Tokens.back().K; }
-
- Minimizer &put(char Byte) {
- Out.push_back(Byte);
- return *this;
- }
- Minimizer &append(StringRef S) { return append(S.begin(), S.end()); }
- Minimizer &append(const char *First, const char *Last) {
- Out.append(First, Last);
- return *this;
- }
-
- void printToNewline(const char *&First, const char *const End);
- void printAdjacentModuleNameParts(const char *&First, const char *const End);
- LLVM_NODISCARD bool printAtImportBody(const char *&First,
- const char *const End);
- void printDirectiveBody(const char *&First, const char *const End);
- void printAdjacentMacroArgs(const char *&First, const char *const End);
- LLVM_NODISCARD bool printMacroArgs(const char *&First, const char *const End);
-
- /// Reports a diagnostic if the diagnostic engine is provided. Always returns
- /// true at the end.
- bool reportError(const char *CurPtr, unsigned Err);
-
- StringMap<char> SplitIds;
- StringRef Input;
- DiagnosticsEngine *Diags;
- SourceLocation InputSourceLoc;
-};
-
-} // end anonymous namespace
-
-bool Minimizer::reportError(const char *CurPtr, unsigned Err) {
- if (!Diags)
- return true;
- assert(CurPtr >= Input.data() && "invalid buffer ptr");
- Diags->Report(InputSourceLoc.getLocWithOffset(CurPtr - Input.data()), Err);
- return true;
-}
-
-static void skipOverSpaces(const char *&First, const char *const End) {
- while (First != End && isHorizontalWhitespace(*First))
- ++First;
-}
-
-LLVM_NODISCARD static bool isRawStringLiteral(const char *First,
- const char *Current) {
- assert(First <= Current);
-
- // Check if we can even back up.
- if (*Current != '"' || First == Current)
- return false;
-
- // Check for an "R".
- --Current;
- if (*Current != 'R')
- return false;
- if (First == Current || !isIdentifierBody(*--Current))
- return true;
-
- // Check for a prefix of "u", "U", or "L".
- if (*Current == 'u' || *Current == 'U' || *Current == 'L')
- return First == Current || !isIdentifierBody(*--Current);
-
- // Check for a prefix of "u8".
- if (*Current != '8' || First == Current || *Current-- != 'u')
- return false;
- return First == Current || !isIdentifierBody(*--Current);
-}
-
-static void skipRawString(const char *&First, const char *const End) {
- assert(First[0] == '"');
- assert(First[-1] == 'R');
-
- const char *Last = ++First;
- while (Last != End && *Last != '(')
- ++Last;
- if (Last == End) {
- First = Last; // Hit the end... just give up.
- return;
- }
-
- StringRef Terminator(First, Last - First);
- for (;;) {
- // Move First to just past the next ")".
- First = Last;
- while (First != End && *First != ')')
- ++First;
- if (First == End)
- return;
- ++First;
-
- // Look ahead for the terminator sequence.
- Last = First;
- while (Last != End && size_t(Last - First) < Terminator.size() &&
- Terminator[Last - First] == *Last)
- ++Last;
-
- // Check if we hit it (or the end of the file).
- if (Last == End) {
- First = Last;
- return;
- }
- if (size_t(Last - First) < Terminator.size())
- continue;
- if (*Last != '"')
- continue;
- First = Last + 1;
- return;
- }
-}
-
-// Returns the length of EOL, either 0 (no end-of-line), 1 (\n) or 2 (\r\n)
-static unsigned isEOL(const char *First, const char *const End) {
- if (First == End)
- return 0;
- if (End - First > 1 && isVerticalWhitespace(First[0]) &&
- isVerticalWhitespace(First[1]) && First[0] != First[1])
- return 2;
- return !!isVerticalWhitespace(First[0]);
-}
-
-static void skipString(const char *&First, const char *const End) {
- assert(*First == '\'' || *First == '"' || *First == '<');
- const char Terminator = *First == '<' ? '>' : *First;
- for (++First; First != End && *First != Terminator; ++First) {
- // String and character literals don't extend past the end of the line.
- if (isVerticalWhitespace(*First))
- return;
- if (*First != '\\')
- continue;
- // Skip past backslash to the next character. This ensures that the
- // character right after it is skipped as well, which matters if it's
- // the terminator.
- if (++First == End)
- return;
- if (!isWhitespace(*First))
- continue;
- // Whitespace after the backslash might indicate a line continuation.
- const char *FirstAfterBackslashPastSpace = First;
- skipOverSpaces(FirstAfterBackslashPastSpace, End);
- if (unsigned NLSize = isEOL(FirstAfterBackslashPastSpace, End)) {
- // Advance the character pointer to the next line for the next
- // iteration.
- First = FirstAfterBackslashPastSpace + NLSize - 1;
- }
- }
- if (First != End)
- ++First; // Finish off the string.
-}
-
-// Returns the length of the skipped newline
-static unsigned skipNewline(const char *&First, const char *End) {
- if (First == End)
- return 0;
- assert(isVerticalWhitespace(*First));
- unsigned Len = isEOL(First, End);
- assert(Len && "expected newline");
- First += Len;
- return Len;
-}
-
-static bool wasLineContinuation(const char *First, unsigned EOLLen) {
- return *(First - (int)EOLLen - 1) == '\\';
-}
-
-static void skipToNewlineRaw(const char *&First, const char *const End) {
- for (;;) {
- if (First == End)
- return;
-
- unsigned Len = isEOL(First, End);
- if (Len)
- return;
-
- do {
- if (++First == End)
- return;
- Len = isEOL(First, End);
- } while (!Len);
-
- if (First[-1] != '\\')
- return;
-
- First += Len;
- // Keep skipping lines...
- }
-}
-
-static const char *findLastNonSpace(const char *First, const char *Last) {
- assert(First <= Last);
- while (First != Last && isHorizontalWhitespace(Last[-1]))
- --Last;
- return Last;
-}
-
-static const char *findFirstTrailingSpace(const char *First,
- const char *Last) {
- const char *LastNonSpace = findLastNonSpace(First, Last);
- if (Last == LastNonSpace)
- return Last;
- assert(isHorizontalWhitespace(LastNonSpace[0]));
- return LastNonSpace + 1;
-}
-
-static void skipLineComment(const char *&First, const char *const End) {
- assert(First[0] == '/' && First[1] == '/');
- First += 2;
- skipToNewlineRaw(First, End);
-}
-
-static void skipBlockComment(const char *&First, const char *const End) {
- assert(First[0] == '/' && First[1] == '*');
- if (End - First < 4) {
- First = End;
- return;
- }
- for (First += 3; First != End; ++First)
- if (First[-1] == '*' && First[0] == '/') {
- ++First;
- return;
- }
-}
-
-/// \returns True if the current single quotation mark character is a C++ 14
-/// digit separator.
-static bool isQuoteCppDigitSeparator(const char *const Start,
- const char *const Cur,
- const char *const End) {
- assert(*Cur == '\'' && "expected quotation character");
- // skipLine called in places where we don't expect a valid number
- // body before `start` on the same line, so always return false at the start.
- if (Start == Cur)
- return false;
- // The previous character must be a valid PP number character.
- // Make sure that the L, u, U, u8 prefixes don't get marked as a
- // separator though.
- char Prev = *(Cur - 1);
- if (Prev == 'L' || Prev == 'U' || Prev == 'u')
- return false;
- if (Prev == '8' && (Cur - 1 != Start) && *(Cur - 2) == 'u')
- return false;
- if (!isPreprocessingNumberBody(Prev))
- return false;
- // The next character should be a valid identifier body character.
- return (Cur + 1) < End && isIdentifierBody(*(Cur + 1));
-}
-
-static void skipLine(const char *&First, const char *const End) {
- for (;;) {
- assert(First <= End);
- if (First == End)
- return;
-
- if (isVerticalWhitespace(*First)) {
- skipNewline(First, End);
- return;
- }
- const char *Start = First;
- while (First != End && !isVerticalWhitespace(*First)) {
- // Iterate over strings correctly to avoid comments and newlines.
- if (*First == '"' ||
- (*First == '\'' && !isQuoteCppDigitSeparator(Start, First, End))) {
- if (isRawStringLiteral(Start, First))
- skipRawString(First, End);
- else
- skipString(First, End);
- continue;
- }
-
- // Iterate over comments correctly.
- if (*First != '/' || End - First < 2) {
- ++First;
- continue;
- }
-
- if (First[1] == '/') {
- // "//...".
- skipLineComment(First, End);
- continue;
- }
-
- if (First[1] != '*') {
- ++First;
- continue;
- }
-
- // "/*...*/".
- skipBlockComment(First, End);
- }
- if (First == End)
- return;
-
- // Skip over the newline.
- unsigned Len = skipNewline(First, End);
- if (!wasLineContinuation(First, Len)) // Continue past line-continuations.
- break;
- }
-}
-
-static void skipDirective(StringRef Name, const char *&First,
- const char *const End) {
- if (llvm::StringSwitch<bool>(Name)
- .Case("warning", true)
- .Case("error", true)
- .Default(false))
- // Do not process quotes or comments.
- skipToNewlineRaw(First, End);
- else
- skipLine(First, End);
-}
-
-void Minimizer::printToNewline(const char *&First, const char *const End) {
- while (First != End && !isVerticalWhitespace(*First)) {
- const char *Last = First;
- do {
- // Iterate over strings correctly to avoid comments and newlines.
- if (*Last == '"' || *Last == '\'' ||
- (*Last == '<' && top() == pp_include)) {
- if (LLVM_UNLIKELY(isRawStringLiteral(First, Last)))
- skipRawString(Last, End);
- else
- skipString(Last, End);
- continue;
- }
- if (*Last != '/' || End - Last < 2) {
- ++Last;
- continue; // Gather the rest up to print verbatim.
- }
-
- if (Last[1] != '/' && Last[1] != '*') {
- ++Last;
- continue;
- }
-
- // Deal with "//..." and "/*...*/".
- append(First, findFirstTrailingSpace(First, Last));
- First = Last;
-
- if (Last[1] == '/') {
- skipLineComment(First, End);
- return;
- }
-
- put(' ');
- skipBlockComment(First, End);
- skipOverSpaces(First, End);
- Last = First;
- } while (Last != End && !isVerticalWhitespace(*Last));
-
- // Print out the string.
- const char *LastBeforeTrailingSpace = findLastNonSpace(First, Last);
- if (Last == End || LastBeforeTrailingSpace == First ||
- LastBeforeTrailingSpace[-1] != '\\') {
- append(First, LastBeforeTrailingSpace);
- First = Last;
- skipNewline(First, End);
- return;
- }
-
- // Print up to the backslash, backing up over spaces. Preserve at least one
- // space, as the space matters when tokens are separated by a line
- // continuation.
- append(First, findFirstTrailingSpace(
- First, LastBeforeTrailingSpace - 1));
-
- First = Last;
- skipNewline(First, End);
- skipOverSpaces(First, End);
- }
-}
-
-static void skipWhitespace(const char *&First, const char *const End) {
- for (;;) {
- assert(First <= End);
- skipOverSpaces(First, End);
-
- if (End - First < 2)
- return;
-
- if (First[0] == '\\' && isVerticalWhitespace(First[1])) {
- skipNewline(++First, End);
- continue;
- }
-
- // Check for a non-comment character.
- if (First[0] != '/')
- return;
-
- // "// ...".
- if (First[1] == '/') {
- skipLineComment(First, End);
- return;
- }
-
- // Cannot be a comment.
- if (First[1] != '*')
- return;
-
- // "/*...*/".
- skipBlockComment(First, End);
- }
-}
-
-void Minimizer::printAdjacentModuleNameParts(const char *&First,
- const char *const End) {
- // Skip over parts of the body.
- const char *Last = First;
- do
- ++Last;
- while (Last != End && (isIdentifierBody(*Last) || *Last == '.'));
- append(First, Last);
- First = Last;
-}
-
-bool Minimizer::printAtImportBody(const char *&First, const char *const End) {
- for (;;) {
- skipWhitespace(First, End);
- if (First == End)
- return true;
-
- if (isVerticalWhitespace(*First)) {
- skipNewline(First, End);
- continue;
- }
-
- // Found a semicolon.
- if (*First == ';') {
- put(*First++).put('\n');
- return false;
- }
-
- // Don't handle macro expansions inside @import for now.
- if (!isIdentifierBody(*First) && *First != '.')
- return true;
-
- printAdjacentModuleNameParts(First, End);
- }
-}
-
-void Minimizer::printDirectiveBody(const char *&First, const char *const End) {
- skipWhitespace(First, End); // Skip initial whitespace.
- printToNewline(First, End);
- while (Out.back() == ' ')
- Out.pop_back();
- put('\n');
-}
-
-LLVM_NODISCARD static const char *lexRawIdentifier(const char *First,
- const char *const End) {
- assert(isIdentifierBody(*First) && "invalid identifer");
- const char *Last = First + 1;
- while (Last != End && isIdentifierBody(*Last))
- ++Last;
- return Last;
-}
-
-LLVM_NODISCARD static const char *
-getIdentifierContinuation(const char *First, const char *const End) {
- if (End - First < 3 || First[0] != '\\' || !isVerticalWhitespace(First[1]))
- return nullptr;
-
- ++First;
- skipNewline(First, End);
- if (First == End)
- return nullptr;
- return isIdentifierBody(First[0]) ? First : nullptr;
-}
-
-Minimizer::IdInfo Minimizer::lexIdentifier(const char *First,
- const char *const End) {
- const char *Last = lexRawIdentifier(First, End);
- const char *Next = getIdentifierContinuation(Last, End);
- if (LLVM_LIKELY(!Next))
- return IdInfo{Last, StringRef(First, Last - First)};
-
- // Slow path, where identifiers are split over lines.
- SmallVector<char, 64> Id(First, Last);
- while (Next) {
- Last = lexRawIdentifier(Next, End);
- Id.append(Next, Last);
- Next = getIdentifierContinuation(Last, End);
- }
- return IdInfo{
- Last,
- SplitIds.try_emplace(StringRef(Id.begin(), Id.size()), 0).first->first()};
-}
-
-void Minimizer::printAdjacentMacroArgs(const char *&First,
- const char *const End) {
- // Skip over parts of the body.
- const char *Last = First;
- do
- ++Last;
- while (Last != End &&
- (isIdentifierBody(*Last) || *Last == '.' || *Last == ','));
- append(First, Last);
- First = Last;
-}
-
-bool Minimizer::printMacroArgs(const char *&First, const char *const End) {
- assert(*First == '(');
- put(*First++);
- for (;;) {
- skipWhitespace(First, End);
- if (First == End)
- return true;
-
- if (*First == ')') {
- put(*First++);
- return false;
- }
-
- // This is intentionally fairly liberal.
- if (!(isIdentifierBody(*First) || *First == '.' || *First == ','))
- return true;
-
- printAdjacentMacroArgs(First, End);
- }
-}
-
-/// Looks for an identifier starting from Last.
-///
-/// Updates "First" to just past the next identifier, if any. Returns true iff
-/// the identifier matches "Id".
-bool Minimizer::isNextIdentifier(StringRef Id, const char *&First,
- const char *const End) {
- skipWhitespace(First, End);
- if (First == End || !isIdentifierHead(*First))
- return false;
-
- IdInfo FoundId = lexIdentifier(First, End);
- First = FoundId.Last;
- return FoundId.Name == Id;
-}
-
-bool Minimizer::lexAt(const char *&First, const char *const End) {
- // Handle "@import".
- const char *ImportLoc = First++;
- if (!isNextIdentifier("import", First, End)) {
- skipLine(First, End);
- return false;
- }
- makeToken(decl_at_import);
- append("@import ");
- if (printAtImportBody(First, End))
- return reportError(
- ImportLoc, diag::err_dep_source_minimizer_missing_sema_after_at_import);
- skipWhitespace(First, End);
- if (First == End)
- return false;
- if (!isVerticalWhitespace(*First))
- return reportError(
- ImportLoc, diag::err_dep_source_minimizer_unexpected_tokens_at_import);
- skipNewline(First, End);
- return false;
-}
-
-bool Minimizer::lexModule(const char *&First, const char *const End) {
- IdInfo Id = lexIdentifier(First, End);
- First = Id.Last;
- bool Export = false;
- if (Id.Name == "export") {
- Export = true;
- skipWhitespace(First, End);
- if (!isIdentifierBody(*First)) {
- skipLine(First, End);
- return false;
- }
- Id = lexIdentifier(First, End);
- First = Id.Last;
- }
-
- if (Id.Name != "module" && Id.Name != "import") {
- skipLine(First, End);
- return false;
- }
-
- skipWhitespace(First, End);
-
- // Ignore this as a module directive if the next character can't be part of
- // an import.
-
- switch (*First) {
- case ':':
- case '<':
- case '"':
- break;
- default:
- if (!isIdentifierBody(*First)) {
- skipLine(First, End);
- return false;
- }
- }
-
- if (Export) {
- makeToken(cxx_export_decl);
- append("export ");
- }
-
- if (Id.Name == "module")
- makeToken(cxx_module_decl);
- else
- makeToken(cxx_import_decl);
- append(Id.Name);
- append(" ");
- printToNewline(First, End);
- append("\n");
- return false;
-}
-
-bool Minimizer::lexDefine(const char *&First, const char *const End) {
- makeToken(pp_define);
- append("#define ");
- skipWhitespace(First, End);
-
- if (!isIdentifierHead(*First))
- return reportError(First, diag::err_pp_macro_not_identifier);
-
- IdInfo Id = lexIdentifier(First, End);
- const char *Last = Id.Last;
- append(Id.Name);
- if (Last == End)
- return false;
- if (*Last == '(') {
- size_t Size = Out.size();
- if (printMacroArgs(Last, End)) {
- // Be robust to bad macro arguments, since they can show up in disabled
- // code.
- Out.resize(Size);
- append("(/* invalid */\n");
- skipLine(Last, End);
- return false;
- }
- }
- skipWhitespace(Last, End);
- if (Last == End)
- return false;
- if (!isVerticalWhitespace(*Last))
- put(' ');
- printDirectiveBody(Last, End);
- First = Last;
- return false;
-}
-
-bool Minimizer::lexPragma(const char *&First, const char *const End) {
- // #pragma.
- skipWhitespace(First, End);
- if (First == End || !isIdentifierHead(*First))
- return false;
-
- IdInfo FoundId = lexIdentifier(First, End);
- First = FoundId.Last;
- if (FoundId.Name == "once") {
- // #pragma once
- skipLine(First, End);
- makeToken(pp_pragma_once);
- append("#pragma once\n");
- return false;
- }
-
- if (FoundId.Name != "clang") {
- skipLine(First, End);
- return false;
- }
-
- // #pragma clang.
- if (!isNextIdentifier("module", First, End)) {
- skipLine(First, End);
- return false;
- }
-
- // #pragma clang module.
- if (!isNextIdentifier("import", First, End)) {
- skipLine(First, End);
- return false;
- }
-
- // #pragma clang module import.
- makeToken(pp_pragma_import);
- append("#pragma clang module import ");
- printDirectiveBody(First, End);
- return false;
-}
-
-bool Minimizer::lexEndif(const char *&First, const char *const End) {
- // Strip out "#else" if it's empty.
- if (top() == pp_else)
- popToken();
-
- // If "#ifdef" is empty, strip it and skip the "#endif".
- //
- // FIXME: Once/if Clang starts disallowing __has_include in macro expansions,
- // we can skip empty `#if` and `#elif` blocks as well after scanning for a
- // literal __has_include in the condition. Even without that rule we could
- // drop the tokens if we scan for identifiers in the condition and find none.
- if (top() == pp_ifdef || top() == pp_ifndef) {
- popToken();
- skipLine(First, End);
- return false;
- }
-
- return lexDefault(pp_endif, "endif", First, End);
-}
-
-bool Minimizer::lexDefault(TokenKind Kind, StringRef Directive,
- const char *&First, const char *const End) {
- makeToken(Kind);
- put('#').append(Directive).put(' ');
- printDirectiveBody(First, End);
- return false;
-}
-
-static bool isStartOfRelevantLine(char First) {
- switch (First) {
- case '#':
- case '@':
- case 'i':
- case 'e':
- case 'm':
- return true;
- }
- return false;
-}
-
-bool Minimizer::lexPPLine(const char *&First, const char *const End) {
- assert(First != End);
-
- skipWhitespace(First, End);
- assert(First <= End);
- if (First == End)
- return false;
-
- if (!isStartOfRelevantLine(*First)) {
- skipLine(First, End);
- assert(First <= End);
- return false;
- }
-
- // Handle "@import".
- if (*First == '@')
- return lexAt(First, End);
-
- if (*First == 'i' || *First == 'e' || *First == 'm')
- return lexModule(First, End);
-
- // Handle preprocessing directives.
- ++First; // Skip over '#'.
- skipWhitespace(First, End);
-
- if (First == End)
- return reportError(First, diag::err_pp_expected_eol);
-
- if (!isIdentifierHead(*First)) {
- skipLine(First, End);
- return false;
- }
-
- // Figure out the token.
- IdInfo Id = lexIdentifier(First, End);
- First = Id.Last;
- auto Kind = llvm::StringSwitch<TokenKind>(Id.Name)
- .Case("include", pp_include)
- .Case("__include_macros", pp___include_macros)
- .Case("define", pp_define)
- .Case("undef", pp_undef)
- .Case("import", pp_import)
- .Case("include_next", pp_include_next)
- .Case("if", pp_if)
- .Case("ifdef", pp_ifdef)
- .Case("ifndef", pp_ifndef)
- .Case("elif", pp_elif)
- .Case("elifdef", pp_elifdef)
- .Case("elifndef", pp_elifndef)
- .Case("else", pp_else)
- .Case("endif", pp_endif)
- .Case("pragma", pp_pragma_import)
- .Default(pp_none);
- if (Kind == pp_none) {
- skipDirective(Id.Name, First, End);
- return false;
- }
-
- if (Kind == pp_endif)
- return lexEndif(First, End);
-
- if (Kind == pp_define)
- return lexDefine(First, End);
-
- if (Kind == pp_pragma_import)
- return lexPragma(First, End);
-
- // Everything else.
- return lexDefault(Kind, Id.Name, First, End);
-}
-
-static void skipUTF8ByteOrderMark(const char *&First, const char *const End) {
- if ((End - First) >= 3 && First[0] == '\xef' && First[1] == '\xbb' &&
- First[2] == '\xbf')
- First += 3;
-}
-
-bool Minimizer::minimizeImpl(const char *First, const char *const End) {
- skipUTF8ByteOrderMark(First, End);
- while (First != End)
- if (lexPPLine(First, End))
- return true;
- return false;
-}
-
-bool Minimizer::minimize() {
- bool Error = minimizeImpl(Input.begin(), Input.end());
-
- if (!Error) {
- // Add a trailing newline and an EOF on success.
- if (!Out.empty() && Out.back() != '\n')
- Out.push_back('\n');
- makeToken(pp_eof);
- }
-
- // Null-terminate the output. This way the memory buffer that's passed to
- // Clang will not have to worry about the terminating '\0'.
- Out.push_back(0);
- Out.pop_back();
- return Error;
-}
-
-bool clang::minimize_source_to_dependency_directives::computeSkippedRanges(
- ArrayRef<Token> Input, llvm::SmallVectorImpl<SkippedRange> &Range) {
- struct Directive {
- enum DirectiveKind {
- If, // if/ifdef/ifndef
- Else // elif/elifdef/elifndef, else
- };
- int Offset;
- DirectiveKind Kind;
- };
- llvm::SmallVector<Directive, 32> Offsets;
- for (const Token &T : Input) {
- switch (T.K) {
- case pp_if:
- case pp_ifdef:
- case pp_ifndef:
- Offsets.push_back({T.Offset, Directive::If});
- break;
-
- case pp_elif:
- case pp_elifdef:
- case pp_elifndef:
- case pp_else: {
- if (Offsets.empty())
- return true;
- int PreviousOffset = Offsets.back().Offset;
- Range.push_back({PreviousOffset, T.Offset - PreviousOffset});
- Offsets.push_back({T.Offset, Directive::Else});
- break;
- }
-
- case pp_endif: {
- if (Offsets.empty())
- return true;
- int PreviousOffset = Offsets.back().Offset;
- Range.push_back({PreviousOffset, T.Offset - PreviousOffset});
- do {
- Directive::DirectiveKind Kind = Offsets.pop_back_val().Kind;
- if (Kind == Directive::If)
- break;
- } while (!Offsets.empty());
- break;
- }
- default:
- break;
- }
- }
- return false;
-}
-
-bool clang::minimizeSourceToDependencyDirectives(
- StringRef Input, SmallVectorImpl<char> &Output,
- SmallVectorImpl<Token> &Tokens, DiagnosticsEngine *Diags,
- SourceLocation InputSourceLoc) {
- Output.clear();
- Tokens.clear();
- return Minimizer(Output, Tokens, Input, Diags, InputSourceLoc).minimize();
-}
diff --git a/contrib/llvm-project/clang/lib/Lex/HeaderMap.cpp b/contrib/llvm-project/clang/lib/Lex/HeaderMap.cpp
index ae5e6b221953..00bf880726ee 100644
--- a/contrib/llvm-project/clang/lib/Lex/HeaderMap.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/HeaderMap.cpp
@@ -11,18 +11,20 @@
//===----------------------------------------------------------------------===//
#include "clang/Lex/HeaderMap.h"
-#include "clang/Lex/HeaderMapTypes.h"
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/FileManager.h"
+#include "clang/Lex/HeaderMapTypes.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/Debug.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/SwapByteOrder.h"
-#include "llvm/Support/Debug.h"
+#include "llvm/Support/SystemZ/zOSSupport.h"
#include <cstring>
#include <memory>
+#include <optional>
using namespace clang;
/// HashHMapKey - This is the 'well known' hash function required by the file
@@ -47,10 +49,9 @@ static inline unsigned HashHMapKey(StringRef Str) {
/// map. If it doesn't look like a HeaderMap, it gives up and returns null.
/// If it looks like a HeaderMap but is obviously corrupted, it puts a reason
/// into the string error argument and returns null.
-std::unique_ptr<HeaderMap> HeaderMap::Create(const FileEntry *FE,
- FileManager &FM) {
+std::unique_ptr<HeaderMap> HeaderMap::Create(FileEntryRef FE, FileManager &FM) {
// If the file is too small to be a header map, ignore it.
- unsigned FileSize = FE->getSize();
+ unsigned FileSize = FE.getSize();
if (FileSize <= sizeof(HMapHeader)) return nullptr;
auto FileBuffer = FM.getBufferForFile(FE);
@@ -76,8 +77,8 @@ bool HeaderMapImpl::checkHeader(const llvm::MemoryBuffer &File,
if (Header->Magic == HMAP_HeaderMagicNumber &&
Header->Version == HMAP_HeaderVersion)
NeedsByteSwap = false;
- else if (Header->Magic == llvm::ByteSwap_32(HMAP_HeaderMagicNumber) &&
- Header->Version == llvm::ByteSwap_16(HMAP_HeaderVersion))
+ else if (Header->Magic == llvm::byteswap<uint32_t>(HMAP_HeaderMagicNumber) &&
+ Header->Version == llvm::byteswap<uint16_t>(HMAP_HeaderVersion))
NeedsByteSwap = true; // Mixed endianness headermap.
else
return false; // Not a header map.
@@ -87,9 +88,8 @@ bool HeaderMapImpl::checkHeader(const llvm::MemoryBuffer &File,
// Check the number of buckets. It should be a power of two, and there
// should be enough space in the file for all of them.
- uint32_t NumBuckets = NeedsByteSwap
- ? llvm::sys::getSwappedBytes(Header->NumBuckets)
- : Header->NumBuckets;
+ uint32_t NumBuckets =
+ NeedsByteSwap ? llvm::byteswap(Header->NumBuckets) : Header->NumBuckets;
if (!llvm::isPowerOf2_32(NumBuckets))
return false;
if (File.getBufferSize() <
@@ -112,7 +112,7 @@ StringRef HeaderMapImpl::getFileName() const {
unsigned HeaderMapImpl::getEndianAdjustedWord(unsigned X) const {
if (!NeedsBSwap) return X;
- return llvm::ByteSwap_32(X);
+ return llvm::byteswap<uint32_t>(X);
}
/// getHeader - Return a reference to the file header, in unbyte-swapped form.
@@ -145,13 +145,13 @@ HMapBucket HeaderMapImpl::getBucket(unsigned BucketNo) const {
return Result;
}
-Optional<StringRef> HeaderMapImpl::getString(unsigned StrTabIdx) const {
+std::optional<StringRef> HeaderMapImpl::getString(unsigned StrTabIdx) const {
// Add the start of the string table to the idx.
StrTabIdx += getEndianAdjustedWord(getHeader().StringsOffset);
// Check for invalid index.
if (StrTabIdx >= FileBuffer->getBufferSize())
- return None;
+ return std::nullopt;
const char *Data = FileBuffer->getBufferStart() + StrTabIdx;
unsigned MaxLen = FileBuffer->getBufferSize() - StrTabIdx;
@@ -159,7 +159,7 @@ Optional<StringRef> HeaderMapImpl::getString(unsigned StrTabIdx) const {
// Check whether the buffer is null-terminated.
if (Len == MaxLen && Data[Len - 1])
- return None;
+ return std::nullopt;
return StringRef(Data, Len);
}
@@ -177,7 +177,7 @@ LLVM_DUMP_METHOD void HeaderMapImpl::dump() const {
<< ", " << getEndianAdjustedWord(Hdr.NumEntries) << "\n";
auto getStringOrInvalid = [this](unsigned Id) -> StringRef {
- if (Optional<StringRef> S = getString(Id))
+ if (std::optional<StringRef> S = getString(Id))
return *S;
return "<invalid>";
};
@@ -194,19 +194,6 @@ LLVM_DUMP_METHOD void HeaderMapImpl::dump() const {
}
}
-/// LookupFile - Check to see if the specified relative filename is located in
-/// this HeaderMap. If so, open it and return its FileEntry.
-Optional<FileEntryRef> HeaderMap::LookupFile(StringRef Filename,
- FileManager &FM) const {
-
- SmallString<1024> Path;
- StringRef Dest = HeaderMapImpl::lookupFilename(Filename, Path);
- if (Dest.empty())
- return None;
-
- return FM.getOptionalFileRef(Dest);
-}
-
StringRef HeaderMapImpl::lookupFilename(StringRef Filename,
SmallVectorImpl<char> &DestPath) const {
const HMapHeader &Hdr = getHeader();
@@ -221,7 +208,7 @@ StringRef HeaderMapImpl::lookupFilename(StringRef Filename,
if (B.Key == HMAP_EmptyBucketKey) return StringRef(); // Hash miss.
// See if the key matches. If not, probe on.
- Optional<StringRef> Key = getString(B.Key);
+ std::optional<StringRef> Key = getString(B.Key);
if (LLVM_UNLIKELY(!Key))
continue;
if (!Filename.equals_insensitive(*Key))
@@ -229,8 +216,8 @@ StringRef HeaderMapImpl::lookupFilename(StringRef Filename,
// If so, we have a match in the hash table. Construct the destination
// path.
- Optional<StringRef> Prefix = getString(B.Prefix);
- Optional<StringRef> Suffix = getString(B.Suffix);
+ std::optional<StringRef> Prefix = getString(B.Prefix);
+ std::optional<StringRef> Suffix = getString(B.Suffix);
DestPath.clear();
if (LLVM_LIKELY(Prefix && Suffix)) {
@@ -253,9 +240,9 @@ StringRef HeaderMapImpl::reverseLookupFilename(StringRef DestPath) const {
if (B.Key == HMAP_EmptyBucketKey)
continue;
- Optional<StringRef> Key = getString(B.Key);
- Optional<StringRef> Prefix = getString(B.Prefix);
- Optional<StringRef> Suffix = getString(B.Suffix);
+ std::optional<StringRef> Key = getString(B.Key);
+ std::optional<StringRef> Prefix = getString(B.Prefix);
+ std::optional<StringRef> Suffix = getString(B.Suffix);
if (LLVM_LIKELY(Key && Prefix && Suffix)) {
SmallVector<char, 1024> Buf;
Buf.append(Prefix->begin(), Prefix->end());
diff --git a/contrib/llvm-project/clang/lib/Lex/HeaderSearch.cpp b/contrib/llvm-project/clang/lib/Lex/HeaderSearch.cpp
index d5adbcf62cbc..dfa974e9a67e 100644
--- a/contrib/llvm-project/clang/lib/Lex/HeaderSearch.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/HeaderSearch.cpp
@@ -29,6 +29,7 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Capacity.h"
#include "llvm/Support/Errc.h"
@@ -89,16 +90,10 @@ HeaderSearch::HeaderSearch(std::shared_ptr<HeaderSearchOptions> HSOpts,
void HeaderSearch::PrintStats() {
llvm::errs() << "\n*** HeaderSearch Stats:\n"
<< FileInfo.size() << " files tracked.\n";
- unsigned NumOnceOnlyFiles = 0, MaxNumIncludes = 0, NumSingleIncludedFiles = 0;
- for (unsigned i = 0, e = FileInfo.size(); i != e; ++i) {
- NumOnceOnlyFiles += FileInfo[i].isImport;
- if (MaxNumIncludes < FileInfo[i].NumIncludes)
- MaxNumIncludes = FileInfo[i].NumIncludes;
- NumSingleIncludedFiles += FileInfo[i].NumIncludes == 1;
- }
- llvm::errs() << " " << NumOnceOnlyFiles << " #import/#pragma once files.\n"
- << " " << NumSingleIncludedFiles << " included exactly once.\n"
- << " " << MaxNumIncludes << " max times a file is included.\n";
+ unsigned NumOnceOnlyFiles = 0;
+ for (unsigned i = 0, e = FileInfo.size(); i != e; ++i)
+ NumOnceOnlyFiles += (FileInfo[i].isPragmaOnce || FileInfo[i].isImport);
+ llvm::errs() << " " << NumOnceOnlyFiles << " #import/#pragma once files.\n";
llvm::errs() << " " << NumIncluded << " #include/#include_next/#import.\n"
<< " " << NumMultiIncludeFileOptzn
@@ -108,9 +103,47 @@ void HeaderSearch::PrintStats() {
<< NumSubFrameworkLookups << " subframework lookups.\n";
}
+void HeaderSearch::SetSearchPaths(
+ std::vector<DirectoryLookup> dirs, unsigned int angledDirIdx,
+ unsigned int systemDirIdx,
+ llvm::DenseMap<unsigned int, unsigned int> searchDirToHSEntry) {
+ assert(angledDirIdx <= systemDirIdx && systemDirIdx <= dirs.size() &&
+ "Directory indices are unordered");
+ SearchDirs = std::move(dirs);
+ SearchDirsUsage.assign(SearchDirs.size(), false);
+ AngledDirIdx = angledDirIdx;
+ SystemDirIdx = systemDirIdx;
+ SearchDirToHSEntry = std::move(searchDirToHSEntry);
+ //LookupFileCache.clear();
+ indexInitialHeaderMaps();
+}
+
+void HeaderSearch::AddSearchPath(const DirectoryLookup &dir, bool isAngled) {
+ unsigned idx = isAngled ? SystemDirIdx : AngledDirIdx;
+ SearchDirs.insert(SearchDirs.begin() + idx, dir);
+ SearchDirsUsage.insert(SearchDirsUsage.begin() + idx, false);
+ if (!isAngled)
+ AngledDirIdx++;
+ SystemDirIdx++;
+}
+
+std::vector<bool> HeaderSearch::computeUserEntryUsage() const {
+ std::vector<bool> UserEntryUsage(HSOpts->UserEntries.size());
+ for (unsigned I = 0, E = SearchDirsUsage.size(); I < E; ++I) {
+ // Check whether this DirectoryLookup has been successfully used.
+ if (SearchDirsUsage[I]) {
+ auto UserEntryIdxIt = SearchDirToHSEntry.find(I);
+ // Check whether this DirectoryLookup maps to a HeaderSearch::UserEntry.
+ if (UserEntryIdxIt != SearchDirToHSEntry.end())
+ UserEntryUsage[UserEntryIdxIt->second] = true;
+ }
+ }
+ return UserEntryUsage;
+}
+
/// CreateHeaderMap - This method returns a HeaderMap for the specified
/// FileEntry, uniquing them through the 'HeaderMaps' datastructure.
-const HeaderMap *HeaderSearch::CreateHeaderMap(const FileEntry *FE) {
+const HeaderMap *HeaderSearch::CreateHeaderMap(FileEntryRef FE) {
// We expect the number of headermaps to be small, and almost always empty.
// If it ever grows, use of a linear search should be re-evaluated.
if (!HeaderMaps.empty()) {
@@ -133,13 +166,17 @@ const HeaderMap *HeaderSearch::CreateHeaderMap(const FileEntry *FE) {
void HeaderSearch::getHeaderMapFileNames(
SmallVectorImpl<std::string> &Names) const {
for (auto &HM : HeaderMaps)
- Names.push_back(std::string(HM.first->getName()));
+ Names.push_back(std::string(HM.first.getName()));
}
std::string HeaderSearch::getCachedModuleFileName(Module *Module) {
- const FileEntry *ModuleMap =
+ OptionalFileEntryRef ModuleMap =
getModuleMap().getModuleMapFileForUniquing(Module);
- return getCachedModuleFileName(Module->Name, ModuleMap->getName());
+ // The ModuleMap maybe a nullptr, when we load a cached C++ module without
+ // *.modulemap file. In this case, just return an empty string.
+ if (!ModuleMap)
+ return {};
+ return getCachedModuleFileName(Module->Name, ModuleMap->getNameAsRequested());
}
std::string HeaderSearch::getPrebuiltModuleFileName(StringRef ModuleName,
@@ -157,15 +194,24 @@ std::string HeaderSearch::getPrebuiltModuleFileName(StringRef ModuleName,
for (const std::string &Dir : HSOpts->PrebuiltModulePaths) {
SmallString<256> Result(Dir);
llvm::sys::fs::make_absolute(Result);
- llvm::sys::path::append(Result, ModuleName + ".pcm");
+ if (ModuleName.contains(':'))
+ // The separator of C++20 modules partitions (':') is not good for file
+ // systems, here clang and gcc choose '-' by default since it is not a
+ // valid character of C++ indentifiers. So we could avoid conflicts.
+ llvm::sys::path::append(Result, ModuleName.split(':').first + "-" +
+ ModuleName.split(':').second +
+ ".pcm");
+ else
+ llvm::sys::path::append(Result, ModuleName + ".pcm");
if (getFileMgr().getFile(Result.str()))
return std::string(Result);
}
+
return {};
}
std::string HeaderSearch::getPrebuiltImplicitModuleFileName(Module *Module) {
- const FileEntry *ModuleMap =
+ OptionalFileEntryRef ModuleMap =
getModuleMap().getModuleMapFileForUniquing(Module);
StringRef ModuleName = Module->Name;
StringRef ModuleMapPath = ModuleMap->getName();
@@ -209,18 +255,11 @@ std::string HeaderSearch::getCachedModuleFileNameImpl(StringRef ModuleName,
//
// To avoid false-negatives, we form as canonical a path as we can, and map
// to lower-case in case we're on a case-insensitive file system.
- std::string Parent =
- std::string(llvm::sys::path::parent_path(ModuleMapPath));
- if (Parent.empty())
- Parent = ".";
- auto Dir = FileMgr.getDirectory(Parent);
- if (!Dir)
+ SmallString<128> CanonicalPath(ModuleMapPath);
+ if (getModuleMap().canonicalizeModuleMapPath(CanonicalPath))
return {};
- auto DirName = FileMgr.getCanonicalName(*Dir);
- auto FileName = llvm::sys::path::filename(ModuleMapPath);
- llvm::hash_code Hash =
- llvm::hash_combine(DirName.lower(), FileName.lower());
+ llvm::hash_code Hash = llvm::hash_combine(CanonicalPath.str().lower());
SmallString<128> HashStr;
llvm::APInt(64, size_t(Hash)).toStringUnsigned(HashStr, /*Radix*/36);
@@ -229,7 +268,8 @@ std::string HeaderSearch::getCachedModuleFileNameImpl(StringRef ModuleName,
return Result.str().str();
}
-Module *HeaderSearch::lookupModule(StringRef ModuleName, bool AllowSearch,
+Module *HeaderSearch::lookupModule(StringRef ModuleName,
+ SourceLocation ImportLoc, bool AllowSearch,
bool AllowExtraModuleMapSearch) {
// Look in the module map to determine if there is a module by this name.
Module *Module = ModMap.findModule(ModuleName);
@@ -237,7 +277,8 @@ Module *HeaderSearch::lookupModule(StringRef ModuleName, bool AllowSearch,
return Module;
StringRef SearchName = ModuleName;
- Module = lookupModule(ModuleName, SearchName, AllowExtraModuleMapSearch);
+ Module = lookupModule(ModuleName, SearchName, ImportLoc,
+ AllowExtraModuleMapSearch);
// The facility for "private modules" -- adjacent, optional module maps named
// module.private.modulemap that are supposed to define private submodules --
@@ -248,29 +289,32 @@ Module *HeaderSearch::lookupModule(StringRef ModuleName, bool AllowSearch,
// could force building unwanted dependencies into the parent module and cause
// dependency cycles.
if (!Module && SearchName.consume_back("_Private"))
- Module = lookupModule(ModuleName, SearchName, AllowExtraModuleMapSearch);
+ Module = lookupModule(ModuleName, SearchName, ImportLoc,
+ AllowExtraModuleMapSearch);
if (!Module && SearchName.consume_back("Private"))
- Module = lookupModule(ModuleName, SearchName, AllowExtraModuleMapSearch);
+ Module = lookupModule(ModuleName, SearchName, ImportLoc,
+ AllowExtraModuleMapSearch);
return Module;
}
Module *HeaderSearch::lookupModule(StringRef ModuleName, StringRef SearchName,
+ SourceLocation ImportLoc,
bool AllowExtraModuleMapSearch) {
Module *Module = nullptr;
// Look through the various header search paths to load any available module
// maps, searching for a module map that describes this module.
- for (unsigned Idx = 0, N = SearchDirs.size(); Idx != N; ++Idx) {
- if (SearchDirs[Idx].isFramework()) {
+ for (DirectoryLookup &Dir : search_dir_range()) {
+ if (Dir.isFramework()) {
// Search for or infer a module map for a framework. Here we use
// SearchName rather than ModuleName, to permit finding private modules
// named FooPrivate in buggy frameworks named Foo.
SmallString<128> FrameworkDirName;
- FrameworkDirName += SearchDirs[Idx].getFrameworkDir()->getName();
+ FrameworkDirName += Dir.getFrameworkDirRef()->getName();
llvm::sys::path::append(FrameworkDirName, SearchName + ".framework");
- if (auto FrameworkDir = FileMgr.getDirectory(FrameworkDirName)) {
- bool IsSystem
- = SearchDirs[Idx].getDirCharacteristic() != SrcMgr::C_User;
+ if (auto FrameworkDir =
+ FileMgr.getOptionalDirectoryRef(FrameworkDirName)) {
+ bool IsSystem = Dir.getDirCharacteristic() != SrcMgr::C_User;
Module = loadFrameworkModule(ModuleName, *FrameworkDir, IsSystem);
if (Module)
break;
@@ -280,12 +324,15 @@ Module *HeaderSearch::lookupModule(StringRef ModuleName, StringRef SearchName,
// FIXME: Figure out how header maps and module maps will work together.
// Only deal with normal search directories.
- if (!SearchDirs[Idx].isNormalDir())
+ if (!Dir.isNormalDir())
continue;
- bool IsSystem = SearchDirs[Idx].isSystemHeaderDirectory();
+ bool IsSystem = Dir.isSystemHeaderDirectory();
+ // Only returns std::nullopt if not a normal directory, which we just
+ // checked
+ DirectoryEntryRef NormalDir = *Dir.getDirRef();
// Search for a module map file in this directory.
- if (loadModuleMapFile(SearchDirs[Idx].getDir(), IsSystem,
+ if (loadModuleMapFile(NormalDir, IsSystem,
/*IsFramework*/false) == LMM_NewlyLoaded) {
// We just loaded a module map file; check whether the module is
// available now.
@@ -297,7 +344,7 @@ Module *HeaderSearch::lookupModule(StringRef ModuleName, StringRef SearchName,
// Search for a module map in a subdirectory with the same name as the
// module.
SmallString<128> NestedModuleMapDirName;
- NestedModuleMapDirName = SearchDirs[Idx].getDir()->getName();
+ NestedModuleMapDirName = Dir.getDirRef()->getName();
llvm::sys::path::append(NestedModuleMapDirName, ModuleName);
if (loadModuleMapFile(NestedModuleMapDirName, IsSystem,
/*IsFramework*/false) == LMM_NewlyLoaded){
@@ -309,13 +356,13 @@ Module *HeaderSearch::lookupModule(StringRef ModuleName, StringRef SearchName,
// If we've already performed the exhaustive search for module maps in this
// search directory, don't do it again.
- if (SearchDirs[Idx].haveSearchedAllModuleMaps())
+ if (Dir.haveSearchedAllModuleMaps())
continue;
// Load all module maps in the immediate subdirectories of this search
// directory if ModuleName was from @import.
if (AllowExtraModuleMapSearch)
- loadSubdirectoryModuleMaps(SearchDirs[Idx]);
+ loadSubdirectoryModuleMaps(Dir);
// Look again for the module.
Module = ModMap.findModule(ModuleName);
@@ -326,6 +373,30 @@ Module *HeaderSearch::lookupModule(StringRef ModuleName, StringRef SearchName,
return Module;
}
+void HeaderSearch::indexInitialHeaderMaps() {
+ llvm::StringMap<unsigned, llvm::BumpPtrAllocator> Index(SearchDirs.size());
+
+ // Iterate over all filename keys and associate them with the index i.
+ for (unsigned i = 0; i != SearchDirs.size(); ++i) {
+ auto &Dir = SearchDirs[i];
+
+ // We're concerned with only the initial contiguous run of header
+ // maps within SearchDirs, which can be 99% of SearchDirs when
+ // SearchDirs.size() is ~10000.
+ if (!Dir.isHeaderMap()) {
+ SearchDirHeaderMapIndex = std::move(Index);
+ FirstNonHeaderMapSearchDirIdx = i;
+ break;
+ }
+
+ // Give earlier keys precedence over identical later keys.
+ auto Callback = [&](StringRef Filename) {
+ Index.try_emplace(Filename.lower(), i);
+ };
+ Dir.getHeaderMap()->forEachKey(Callback);
+ }
+}
+
//===----------------------------------------------------------------------===//
// File lookup within a DirectoryLookup scope
//===----------------------------------------------------------------------===//
@@ -333,22 +404,22 @@ Module *HeaderSearch::lookupModule(StringRef ModuleName, StringRef SearchName,
/// getName - Return the directory or filename corresponding to this lookup
/// object.
StringRef DirectoryLookup::getName() const {
- // FIXME: Use the name from \c DirectoryEntryRef.
if (isNormalDir())
- return getDir()->getName();
+ return getDirRef()->getName();
if (isFramework())
- return getFrameworkDir()->getName();
+ return getFrameworkDirRef()->getName();
assert(isHeaderMap() && "Unknown DirectoryLookup");
return getHeaderMap()->getFileName();
}
-Optional<FileEntryRef> HeaderSearch::getFileAndSuggestModule(
+OptionalFileEntryRef HeaderSearch::getFileAndSuggestModule(
StringRef FileName, SourceLocation IncludeLoc, const DirectoryEntry *Dir,
bool IsSystemHeaderDir, Module *RequestingModule,
- ModuleMap::KnownHeader *SuggestedModule) {
+ ModuleMap::KnownHeader *SuggestedModule, bool OpenFile /*=true*/,
+ bool CacheFailures /*=true*/) {
// If we have a module map that might map this header, load it and
// check whether we'll have a suggestion for a module.
- auto File = getFileMgr().getFileRef(FileName, /*OpenFile=*/true);
+ auto File = getFileMgr().getFileRef(FileName, OpenFile, CacheFailures);
if (!File) {
// For rare, surprising errors (e.g. "out of file handles"), diag the EC
// message.
@@ -359,26 +430,27 @@ Optional<FileEntryRef> HeaderSearch::getFileAndSuggestModule(
Diags.Report(IncludeLoc, diag::err_cannot_open_file)
<< FileName << EC.message();
}
- return None;
+ return std::nullopt;
}
// If there is a module that corresponds to this header, suggest it.
if (!findUsableModuleForHeader(
- &File->getFileEntry(), Dir ? Dir : File->getFileEntry().getDir(),
- RequestingModule, SuggestedModule, IsSystemHeaderDir))
- return None;
+ *File, Dir ? Dir : File->getFileEntry().getDir(), RequestingModule,
+ SuggestedModule, IsSystemHeaderDir))
+ return std::nullopt;
return *File;
}
/// LookupFile - Lookup the specified file in this search path, returning it
/// if it exists or returning null if not.
-Optional<FileEntryRef> DirectoryLookup::LookupFile(
+OptionalFileEntryRef DirectoryLookup::LookupFile(
StringRef &Filename, HeaderSearch &HS, SourceLocation IncludeLoc,
SmallVectorImpl<char> *SearchPath, SmallVectorImpl<char> *RelativePath,
Module *RequestingModule, ModuleMap::KnownHeader *SuggestedModule,
bool &InUserSpecifiedSystemFramework, bool &IsFrameworkFound,
- bool &IsInHeaderMap, SmallVectorImpl<char> &MappedName) const {
+ bool &IsInHeaderMap, SmallVectorImpl<char> &MappedName,
+ bool OpenFile) const {
InUserSpecifiedSystemFramework = false;
IsInHeaderMap = false;
MappedName.clear();
@@ -386,10 +458,10 @@ Optional<FileEntryRef> DirectoryLookup::LookupFile(
SmallString<1024> TmpDir;
if (isNormalDir()) {
// Concatenate the requested file onto the directory.
- TmpDir = getDir()->getName();
+ TmpDir = getDirRef()->getName();
llvm::sys::path::append(TmpDir, Filename);
if (SearchPath) {
- StringRef SearchPathRef(getDir()->getName());
+ StringRef SearchPathRef(getDirRef()->getName());
SearchPath->clear();
SearchPath->append(SearchPathRef.begin(), SearchPathRef.end());
}
@@ -398,9 +470,9 @@ Optional<FileEntryRef> DirectoryLookup::LookupFile(
RelativePath->append(Filename.begin(), Filename.end());
}
- return HS.getFileAndSuggestModule(TmpDir, IncludeLoc, getDir(),
- isSystemHeaderDirectory(),
- RequestingModule, SuggestedModule);
+ return HS.getFileAndSuggestModule(
+ TmpDir, IncludeLoc, getDir(), isSystemHeaderDirectory(),
+ RequestingModule, SuggestedModule, OpenFile);
}
if (isFramework())
@@ -413,11 +485,12 @@ Optional<FileEntryRef> DirectoryLookup::LookupFile(
SmallString<1024> Path;
StringRef Dest = HM->lookupFilename(Filename, Path);
if (Dest.empty())
- return None;
+ return std::nullopt;
IsInHeaderMap = true;
- auto FixupSearchPath = [&]() {
+ auto FixupSearchPathAndFindUsableModule =
+ [&](FileEntryRef File) -> OptionalFileEntryRef {
if (SearchPath) {
StringRef SearchPathRef(getName());
SearchPath->clear();
@@ -427,6 +500,12 @@ Optional<FileEntryRef> DirectoryLookup::LookupFile(
RelativePath->clear();
RelativePath->append(Filename.begin(), Filename.end());
}
+ if (!HS.findUsableModuleForHeader(File, File.getFileEntry().getDir(),
+ RequestingModule, SuggestedModule,
+ isSystemHeaderDirectory())) {
+ return std::nullopt;
+ }
+ return File;
};
// Check if the headermap maps the filename to a framework include
@@ -435,17 +514,19 @@ Optional<FileEntryRef> DirectoryLookup::LookupFile(
if (llvm::sys::path::is_relative(Dest)) {
MappedName.append(Dest.begin(), Dest.end());
Filename = StringRef(MappedName.begin(), MappedName.size());
- Optional<FileEntryRef> Result = HM->LookupFile(Filename, HS.getFileMgr());
- if (Result) {
- FixupSearchPath();
- return *Result;
- }
- } else if (auto Res = HS.getFileMgr().getOptionalFileRef(Dest)) {
- FixupSearchPath();
- return *Res;
+ Dest = HM->lookupFilename(Filename, Path);
+ }
+
+ if (auto Res = HS.getFileMgr().getOptionalFileRef(Dest, OpenFile)) {
+ return FixupSearchPathAndFindUsableModule(*Res);
}
- return None;
+ // Header maps need to be marked as used whenever the filename matches.
+ // The case where the target file **exists** is handled by callee of this
+ // function as part of the regular logic that applies to include search paths.
+ // The case where the target file **does not exist** is handled here:
+ HS.noteLookupUsage(HS.searchDirIdx(*this), IncludeLoc);
+ return std::nullopt;
}
/// Given a framework directory, find the top-most framework directory.
@@ -454,7 +535,7 @@ Optional<FileEntryRef> DirectoryLookup::LookupFile(
/// \param DirName The name of the framework directory.
/// \param SubmodulePath Will be populated with the submodule path from the
/// returned top-level module to the originally named framework.
-static const DirectoryEntry *
+static OptionalDirectoryEntryRef
getTopFrameworkDir(FileManager &FileMgr, StringRef DirName,
SmallVectorImpl<std::string> &SubmodulePath) {
assert(llvm::sys::path::extension(DirName) == ".framework" &&
@@ -474,12 +555,10 @@ getTopFrameworkDir(FileManager &FileMgr, StringRef DirName,
//
// Similar issues occur when a top-level framework has moved into an
// embedded framework.
- const DirectoryEntry *TopFrameworkDir = nullptr;
- if (auto TopFrameworkDirOrErr = FileMgr.getDirectory(DirName))
- TopFrameworkDir = *TopFrameworkDirOrErr;
+ auto TopFrameworkDir = FileMgr.getOptionalDirectoryRef(DirName);
if (TopFrameworkDir)
- DirName = FileMgr.getCanonicalName(TopFrameworkDir);
+ DirName = FileMgr.getCanonicalName(*TopFrameworkDir);
do {
// Get the parent directory name.
DirName = llvm::sys::path::parent_path(DirName);
@@ -487,7 +566,7 @@ getTopFrameworkDir(FileManager &FileMgr, StringRef DirName,
break;
// Determine whether this directory exists.
- auto Dir = FileMgr.getDirectory(DirName);
+ auto Dir = FileMgr.getOptionalDirectoryRef(DirName);
if (!Dir)
break;
@@ -510,7 +589,7 @@ static bool needModuleLookup(Module *RequestingModule,
/// DoFrameworkLookup - Do a lookup of the specified file in the current
/// DirectoryLookup, which is a framework directory.
-Optional<FileEntryRef> DirectoryLookup::DoFrameworkLookup(
+OptionalFileEntryRef DirectoryLookup::DoFrameworkLookup(
StringRef Filename, HeaderSearch &HS, SmallVectorImpl<char> *SearchPath,
SmallVectorImpl<char> *RelativePath, Module *RequestingModule,
ModuleMap::KnownHeader *SuggestedModule,
@@ -520,7 +599,7 @@ Optional<FileEntryRef> DirectoryLookup::DoFrameworkLookup(
// Framework names must have a '/' in the filename.
size_t SlashPos = Filename.find('/');
if (SlashPos == StringRef::npos)
- return None;
+ return std::nullopt;
// Find out if this is the home for the specified framework, by checking
// HeaderSearch. Possible answers are yes/no and unknown.
@@ -528,8 +607,8 @@ Optional<FileEntryRef> DirectoryLookup::DoFrameworkLookup(
HS.LookupFrameworkCache(Filename.substr(0, SlashPos));
// If it is known and in some other directory, fail.
- if (CacheEntry.Directory && CacheEntry.Directory != getFrameworkDir())
- return None;
+ if (CacheEntry.Directory && CacheEntry.Directory != getFrameworkDirRef())
+ return std::nullopt;
// Otherwise, construct the path to this framework dir.
@@ -553,11 +632,11 @@ Optional<FileEntryRef> DirectoryLookup::DoFrameworkLookup(
// If the framework dir doesn't exist, we fail.
auto Dir = FileMgr.getDirectory(FrameworkName);
if (!Dir)
- return None;
+ return std::nullopt;
// Otherwise, if it does, remember that this is the right direntry for this
// framework.
- CacheEntry.Directory = getFrameworkDir();
+ CacheEntry.Directory = getFrameworkDirRef();
// If this is a user search directory, check if the framework has been
// user-specified as a system framework.
@@ -572,7 +651,7 @@ Optional<FileEntryRef> DirectoryLookup::DoFrameworkLookup(
// Set out flags.
InUserSpecifiedSystemFramework = CacheEntry.IsUserSpecifiedSystemFramework;
- IsFrameworkFound = CacheEntry.Directory;
+ IsFrameworkFound = CacheEntry.Directory.has_value();
if (RelativePath) {
RelativePath->clear();
@@ -610,7 +689,7 @@ Optional<FileEntryRef> DirectoryLookup::DoFrameworkLookup(
// If we found the header and are allowed to suggest a module, do so now.
if (File && needModuleLookup(RequestingModule, SuggestedModule)) {
// Find the framework in which this header occurs.
- StringRef FrameworkPath = File->getFileEntry().getDir()->getName();
+ StringRef FrameworkPath = File->getDir().getName();
bool FoundFramework = false;
do {
// Determine whether this directory exists.
@@ -633,20 +712,35 @@ Optional<FileEntryRef> DirectoryLookup::DoFrameworkLookup(
bool IsSystem = getDirCharacteristic() != SrcMgr::C_User;
if (FoundFramework) {
- if (!HS.findUsableModuleForFrameworkHeader(
- &File->getFileEntry(), FrameworkPath, RequestingModule,
- SuggestedModule, IsSystem))
- return None;
+ if (!HS.findUsableModuleForFrameworkHeader(*File, FrameworkPath,
+ RequestingModule,
+ SuggestedModule, IsSystem))
+ return std::nullopt;
} else {
- if (!HS.findUsableModuleForHeader(&File->getFileEntry(), getDir(),
- RequestingModule, SuggestedModule,
- IsSystem))
- return None;
+ if (!HS.findUsableModuleForHeader(*File, getDir(), RequestingModule,
+ SuggestedModule, IsSystem))
+ return std::nullopt;
}
}
if (File)
return *File;
- return None;
+ return std::nullopt;
+}
+
+void HeaderSearch::cacheLookupSuccess(LookupFileCacheInfo &CacheLookup,
+ ConstSearchDirIterator HitIt,
+ SourceLocation Loc) {
+ CacheLookup.HitIt = HitIt;
+ noteLookupUsage(HitIt.Idx, Loc);
+}
+
+void HeaderSearch::noteLookupUsage(unsigned HitIdx, SourceLocation Loc) {
+ SearchDirsUsage[HitIdx] = true;
+
+ auto UserEntryIdxIt = SearchDirToHSEntry.find(HitIdx);
+ if (UserEntryIdxIt != SearchDirToHSEntry.end())
+ Diags.Report(Loc, diag::remark_pp_search_path_usage)
+ << HSOpts->UserEntries[UserEntryIdxIt->second].Path;
}
void HeaderSearch::setTarget(const TargetInfo &Target) {
@@ -661,9 +755,10 @@ void HeaderSearch::setTarget(const TargetInfo &Target) {
/// fails to match the one that Clang would have found with MSVC header search
/// disabled.
static bool checkMSVCHeaderSearch(DiagnosticsEngine &Diags,
- const FileEntry *MSFE, const FileEntry *FE,
+ OptionalFileEntryRef MSFE,
+ const FileEntry *FE,
SourceLocation IncludeLoc) {
- if (MSFE && FE != MSFE) {
+ if (MSFE && FE != *MSFE) {
Diags.Report(IncludeLoc, diag::ext_pp_include_search_ms) << MSFE->getName();
return true;
}
@@ -679,7 +774,8 @@ static const char *copyString(StringRef Str, llvm::BumpPtrAllocator &Alloc) {
}
static bool isFrameworkStylePath(StringRef Path, bool &IsPrivateHeader,
- SmallVectorImpl<char> &FrameworkName) {
+ SmallVectorImpl<char> &FrameworkName,
+ SmallVectorImpl<char> &IncludeSpelling) {
using namespace llvm::sys;
path::const_iterator I = path::begin(Path);
path::const_iterator E = path::end(Path);
@@ -695,15 +791,22 @@ static bool isFrameworkStylePath(StringRef Path, bool &IsPrivateHeader,
// and some other variations among these lines.
int FoundComp = 0;
while (I != E) {
- if (*I == "Headers")
+ if (*I == "Headers") {
++FoundComp;
- if (I->endswith(".framework")) {
- FrameworkName.append(I->begin(), I->end());
- ++FoundComp;
- }
- if (*I == "PrivateHeaders") {
+ } else if (*I == "PrivateHeaders") {
++FoundComp;
IsPrivateHeader = true;
+ } else if (I->ends_with(".framework")) {
+ StringRef Name = I->drop_back(10); // Drop .framework
+ // Need to reset the strings and counter to support nested frameworks.
+ FrameworkName.clear();
+ FrameworkName.append(Name.begin(), Name.end());
+ IncludeSpelling.clear();
+ IncludeSpelling.append(Name.begin(), Name.end());
+ FoundComp = 1;
+ } else if (FoundComp >= 2) {
+ IncludeSpelling.push_back('/');
+ IncludeSpelling.append(I->begin(), I->end());
}
++I;
}
@@ -714,24 +817,28 @@ static bool isFrameworkStylePath(StringRef Path, bool &IsPrivateHeader,
static void
diagnoseFrameworkInclude(DiagnosticsEngine &Diags, SourceLocation IncludeLoc,
StringRef Includer, StringRef IncludeFilename,
- const FileEntry *IncludeFE, bool isAngled = false,
+ FileEntryRef IncludeFE, bool isAngled = false,
bool FoundByHeaderMap = false) {
bool IsIncluderPrivateHeader = false;
SmallString<128> FromFramework, ToFramework;
- if (!isFrameworkStylePath(Includer, IsIncluderPrivateHeader, FromFramework))
+ SmallString<128> FromIncludeSpelling, ToIncludeSpelling;
+ if (!isFrameworkStylePath(Includer, IsIncluderPrivateHeader, FromFramework,
+ FromIncludeSpelling))
return;
bool IsIncludeePrivateHeader = false;
- bool IsIncludeeInFramework = isFrameworkStylePath(
- IncludeFE->getName(), IsIncludeePrivateHeader, ToFramework);
+ bool IsIncludeeInFramework =
+ isFrameworkStylePath(IncludeFE.getName(), IsIncludeePrivateHeader,
+ ToFramework, ToIncludeSpelling);
if (!isAngled && !FoundByHeaderMap) {
SmallString<128> NewInclude("<");
if (IsIncludeeInFramework) {
- NewInclude += ToFramework.str().drop_back(10); // drop .framework
- NewInclude += "/";
+ NewInclude += ToIncludeSpelling;
+ NewInclude += ">";
+ } else {
+ NewInclude += IncludeFilename;
+ NewInclude += ">";
}
- NewInclude += IncludeFilename;
- NewInclude += ">";
Diags.Report(IncludeLoc, diag::warn_quoted_include_in_framework_header)
<< IncludeFilename
<< FixItHint::CreateReplacement(IncludeLoc, NewInclude);
@@ -751,14 +858,17 @@ diagnoseFrameworkInclude(DiagnosticsEngine &Diags, SourceLocation IncludeLoc,
/// for system \#include's or not (i.e. using <> instead of ""). Includers, if
/// non-empty, indicates where the \#including file(s) are, in case a relative
/// search is needed. Microsoft mode will pass all \#including files.
-Optional<FileEntryRef> HeaderSearch::LookupFile(
+OptionalFileEntryRef HeaderSearch::LookupFile(
StringRef Filename, SourceLocation IncludeLoc, bool isAngled,
- const DirectoryLookup *FromDir, const DirectoryLookup *&CurDir,
- ArrayRef<std::pair<const FileEntry *, const DirectoryEntry *>> Includers,
+ ConstSearchDirIterator FromDir, ConstSearchDirIterator *CurDirArg,
+ ArrayRef<std::pair<OptionalFileEntryRef, DirectoryEntryRef>> Includers,
SmallVectorImpl<char> *SearchPath, SmallVectorImpl<char> *RelativePath,
Module *RequestingModule, ModuleMap::KnownHeader *SuggestedModule,
bool *IsMapped, bool *IsFrameworkFound, bool SkipCache,
- bool BuildSystemModule) {
+ bool BuildSystemModule, bool OpenFile, bool CacheFailures) {
+ ConstSearchDirIterator CurDirLocal = nullptr;
+ ConstSearchDirIterator &CurDir = CurDirArg ? *CurDirArg : CurDirLocal;
+
if (IsMapped)
*IsMapped = false;
@@ -774,7 +884,7 @@ Optional<FileEntryRef> HeaderSearch::LookupFile(
// If this was an #include_next "/absolute/file", fail.
if (FromDir)
- return None;
+ return std::nullopt;
if (SearchPath)
SearchPath->clear();
@@ -784,30 +894,29 @@ Optional<FileEntryRef> HeaderSearch::LookupFile(
}
// Otherwise, just return the file.
return getFileAndSuggestModule(Filename, IncludeLoc, nullptr,
- /*IsSystemHeaderDir*/false,
- RequestingModule, SuggestedModule);
+ /*IsSystemHeaderDir*/ false,
+ RequestingModule, SuggestedModule, OpenFile,
+ CacheFailures);
}
// This is the header that MSVC's header search would have found.
ModuleMap::KnownHeader MSSuggestedModule;
- Optional<FileEntryRef> MSFE;
-
- // Unless disabled, check to see if the file is in the #includer's
- // directory. This cannot be based on CurDir, because each includer could be
- // a #include of a subdirectory (#include "foo/bar.h") and a subsequent
- // include of "baz.h" should resolve to "whatever/foo/baz.h".
- // This search is not done for <> headers.
- if (!Includers.empty() && !isAngled && !NoCurDirSearch) {
+ OptionalFileEntryRef MSFE;
+
+ // Check to see if the file is in the #includer's directory. This cannot be
+ // based on CurDir, because each includer could be a #include of a
+ // subdirectory (#include "foo/bar.h") and a subsequent include of "baz.h"
+ // should resolve to "whatever/foo/baz.h". This search is not done for <>
+ // headers.
+ if (!Includers.empty() && !isAngled) {
SmallString<1024> TmpDir;
bool First = true;
for (const auto &IncluderAndDir : Includers) {
- const FileEntry *Includer = IncluderAndDir.first;
+ OptionalFileEntryRef Includer = IncluderAndDir.first;
// Concatenate the requested file onto the directory.
- // FIXME: Portability. Filename concatenation should be in sys::Path.
- TmpDir = IncluderAndDir.second->getName();
- TmpDir.push_back('/');
- TmpDir.append(Filename.begin(), Filename.end());
+ TmpDir = IncluderAndDir.second.getName();
+ llvm::sys::path::append(TmpDir, Filename);
// FIXME: We don't cache the result of getFileInfo across the call to
// getFileAndSuggestModule, because it's a reference to an element of
@@ -817,9 +926,9 @@ Optional<FileEntryRef> HeaderSearch::LookupFile(
// from a module build. We should treat this as a system header if we're
// building a [system] module.
bool IncluderIsSystemHeader =
- Includer ? getFileInfo(Includer).DirInfo != SrcMgr::C_User :
+ Includer ? getFileInfo(*Includer).DirInfo != SrcMgr::C_User :
BuildSystemModule;
- if (Optional<FileEntryRef> FE = getFileAndSuggestModule(
+ if (OptionalFileEntryRef FE = getFileAndSuggestModule(
TmpDir, IncludeLoc, IncluderAndDir.second, IncluderIsSystemHeader,
RequestingModule, SuggestedModule)) {
if (!Includer) {
@@ -833,18 +942,18 @@ Optional<FileEntryRef> HeaderSearch::LookupFile(
// Note that we only use one of FromHFI/ToHFI at once, due to potential
// reallocation of the underlying vector potentially making the first
// reference binding dangling.
- HeaderFileInfo &FromHFI = getFileInfo(Includer);
+ HeaderFileInfo &FromHFI = getFileInfo(*Includer);
unsigned DirInfo = FromHFI.DirInfo;
bool IndexHeaderMapHeader = FromHFI.IndexHeaderMapHeader;
StringRef Framework = FromHFI.Framework;
- HeaderFileInfo &ToHFI = getFileInfo(&FE->getFileEntry());
+ HeaderFileInfo &ToHFI = getFileInfo(*FE);
ToHFI.DirInfo = DirInfo;
ToHFI.IndexHeaderMapHeader = IndexHeaderMapHeader;
ToHFI.Framework = Framework;
if (SearchPath) {
- StringRef SearchPathRef(IncluderAndDir.second->getName());
+ StringRef SearchPathRef(IncluderAndDir.second.getName());
SearchPath->clear();
SearchPath->append(SearchPathRef.begin(), SearchPathRef.end());
}
@@ -854,8 +963,8 @@ Optional<FileEntryRef> HeaderSearch::LookupFile(
}
if (First) {
diagnoseFrameworkInclude(Diags, IncludeLoc,
- IncluderAndDir.second->getName(), Filename,
- &FE->getFileEntry());
+ IncluderAndDir.second.getName(), Filename,
+ *FE);
return FE;
}
@@ -880,12 +989,13 @@ Optional<FileEntryRef> HeaderSearch::LookupFile(
CurDir = nullptr;
// If this is a system #include, ignore the user #include locs.
- unsigned i = isAngled ? AngledDirIdx : 0;
+ ConstSearchDirIterator It =
+ isAngled ? angled_dir_begin() : search_dir_begin();
// If this is a #include_next request, start searching after the directory the
// file was found in.
if (FromDir)
- i = FromDir-&SearchDirs[0];
+ It = FromDir;
// Cache all of the lookups performed by this method. Many headers are
// multiply included, and the "pragma once" optimization prevents them from
@@ -893,35 +1003,53 @@ Optional<FileEntryRef> HeaderSearch::LookupFile(
// (potentially huge) series of SearchDirs to find it.
LookupFileCacheInfo &CacheLookup = LookupFileCache[Filename];
- // If the entry has been previously looked up, the first value will be
- // non-zero. If the value is equal to i (the start point of our search), then
- // this is a matching hit.
- if (!SkipCache && CacheLookup.StartIdx == i+1) {
- // Skip querying potentially lots of directories for this lookup.
- i = CacheLookup.HitIdx;
- if (CacheLookup.MappedName) {
- Filename = CacheLookup.MappedName;
- if (IsMapped)
- *IsMapped = true;
+ ConstSearchDirIterator NextIt = std::next(It);
+
+ if (!SkipCache) {
+ if (CacheLookup.StartIt == NextIt &&
+ CacheLookup.RequestingModule == RequestingModule) {
+ // HIT: Skip querying potentially lots of directories for this lookup.
+ if (CacheLookup.HitIt)
+ It = CacheLookup.HitIt;
+ if (CacheLookup.MappedName) {
+ Filename = CacheLookup.MappedName;
+ if (IsMapped)
+ *IsMapped = true;
+ }
+ } else {
+ // MISS: This is the first query, or the previous query didn't match
+ // our search start. We will fill in our found location below, so prime
+ // the start point value.
+ CacheLookup.reset(RequestingModule, /*NewStartIt=*/NextIt);
+
+ if (It == search_dir_begin() && FirstNonHeaderMapSearchDirIdx > 0) {
+ // Handle cold misses of user includes in the presence of many header
+ // maps. We avoid searching perhaps thousands of header maps by
+ // jumping directly to the correct one or jumping beyond all of them.
+ auto Iter = SearchDirHeaderMapIndex.find(Filename.lower());
+ if (Iter == SearchDirHeaderMapIndex.end())
+ // Not in index => Skip to first SearchDir after initial header maps
+ It = search_dir_nth(FirstNonHeaderMapSearchDirIdx);
+ else
+ // In index => Start with a specific header map
+ It = search_dir_nth(Iter->second);
+ }
}
} else {
- // Otherwise, this is the first query, or the previous query didn't match
- // our search start. We will fill in our found location below, so prime the
- // start point value.
- CacheLookup.reset(/*StartIdx=*/i+1);
+ CacheLookup.reset(RequestingModule, /*NewStartIt=*/NextIt);
}
SmallString<64> MappedName;
// Check each directory in sequence to see if it contains this file.
- for (; i != SearchDirs.size(); ++i) {
+ for (; It != search_dir_end(); ++It) {
bool InUserSpecifiedSystemFramework = false;
bool IsInHeaderMap = false;
bool IsFrameworkFoundInDir = false;
- Optional<FileEntryRef> File = SearchDirs[i].LookupFile(
+ OptionalFileEntryRef File = It->LookupFile(
Filename, *this, IncludeLoc, SearchPath, RelativePath, RequestingModule,
SuggestedModule, InUserSpecifiedSystemFramework, IsFrameworkFoundInDir,
- IsInHeaderMap, MappedName);
+ IsInHeaderMap, MappedName, OpenFile);
if (!MappedName.empty()) {
assert(IsInHeaderMap && "MappedName should come from a header map");
CacheLookup.MappedName =
@@ -940,10 +1068,12 @@ Optional<FileEntryRef> HeaderSearch::LookupFile(
if (!File)
continue;
- CurDir = &SearchDirs[i];
+ CurDir = It;
+
+ IncludeNames[*File] = Filename;
// This file is a system header or C++ unfriendly if the dir is.
- HeaderFileInfo &HFI = getFileInfo(&File->getFileEntry());
+ HeaderFileInfo &HFI = getFileInfo(*File);
HFI.DirInfo = CurDir->getDirCharacteristic();
// If the directory characteristic is User but this framework was
@@ -955,26 +1085,31 @@ Optional<FileEntryRef> HeaderSearch::LookupFile(
// If the filename matches a known system header prefix, override
// whether the file is a system header.
for (unsigned j = SystemHeaderPrefixes.size(); j; --j) {
- if (Filename.startswith(SystemHeaderPrefixes[j-1].first)) {
+ if (Filename.starts_with(SystemHeaderPrefixes[j - 1].first)) {
HFI.DirInfo = SystemHeaderPrefixes[j-1].second ? SrcMgr::C_System
: SrcMgr::C_User;
break;
}
}
- // If this file is found in a header map and uses the framework style of
- // includes, then this header is part of a framework we're building.
- if (CurDir->isIndexHeaderMap()) {
+ // Set the `Framework` info if this file is in a header map with framework
+ // style include spelling or found in a framework dir. The header map case
+ // is possible when building frameworks which use header maps.
+ if (CurDir->isHeaderMap() && isAngled) {
size_t SlashPos = Filename.find('/');
- if (SlashPos != StringRef::npos) {
+ if (SlashPos != StringRef::npos)
+ HFI.Framework =
+ getUniqueFrameworkName(StringRef(Filename.begin(), SlashPos));
+ if (CurDir->isIndexHeaderMap())
HFI.IndexHeaderMapHeader = 1;
- HFI.Framework = getUniqueFrameworkName(StringRef(Filename.begin(),
- SlashPos));
- }
+ } else if (CurDir->isFramework()) {
+ size_t SlashPos = Filename.find('/');
+ if (SlashPos != StringRef::npos)
+ HFI.Framework =
+ getUniqueFrameworkName(StringRef(Filename.begin(), SlashPos));
}
- if (checkMSVCHeaderSearch(Diags, MSFE ? &MSFE->getFileEntry() : nullptr,
- &File->getFileEntry(), IncludeLoc)) {
+ if (checkMSVCHeaderSearch(Diags, MSFE, &File->getFileEntry(), IncludeLoc)) {
if (SuggestedModule)
*SuggestedModule = MSSuggestedModule;
return MSFE;
@@ -982,12 +1117,12 @@ Optional<FileEntryRef> HeaderSearch::LookupFile(
bool FoundByHeaderMap = !IsMapped ? false : *IsMapped;
if (!Includers.empty())
- diagnoseFrameworkInclude(
- Diags, IncludeLoc, Includers.front().second->getName(), Filename,
- &File->getFileEntry(), isAngled, FoundByHeaderMap);
+ diagnoseFrameworkInclude(Diags, IncludeLoc,
+ Includers.front().second.getName(), Filename,
+ *File, isAngled, FoundByHeaderMap);
// Remember this location for the next lookup we do.
- CacheLookup.HitIdx = i;
+ cacheLookupSuccess(CacheLookup, It, IncludeLoc);
return File;
}
@@ -996,20 +1131,20 @@ Optional<FileEntryRef> HeaderSearch::LookupFile(
// resolve "foo.h" any other way, change the include to <Foo/foo.h>, where
// "Foo" is the name of the framework in which the including header was found.
if (!Includers.empty() && Includers.front().first && !isAngled &&
- Filename.find('/') == StringRef::npos) {
- HeaderFileInfo &IncludingHFI = getFileInfo(Includers.front().first);
+ !Filename.contains('/')) {
+ HeaderFileInfo &IncludingHFI = getFileInfo(*Includers.front().first);
if (IncludingHFI.IndexHeaderMapHeader) {
SmallString<128> ScratchFilename;
ScratchFilename += IncludingHFI.Framework;
ScratchFilename += '/';
ScratchFilename += Filename;
- Optional<FileEntryRef> File = LookupFile(
- ScratchFilename, IncludeLoc, /*isAngled=*/true, FromDir, CurDir,
+ OptionalFileEntryRef File = LookupFile(
+ ScratchFilename, IncludeLoc, /*isAngled=*/true, FromDir, &CurDir,
Includers.front(), SearchPath, RelativePath, RequestingModule,
SuggestedModule, IsMapped, /*IsFrameworkFound=*/nullptr);
- if (checkMSVCHeaderSearch(Diags, MSFE ? &MSFE->getFileEntry() : nullptr,
+ if (checkMSVCHeaderSearch(Diags, MSFE,
File ? &File->getFileEntry() : nullptr,
IncludeLoc)) {
if (SuggestedModule)
@@ -1017,23 +1152,22 @@ Optional<FileEntryRef> HeaderSearch::LookupFile(
return MSFE;
}
- LookupFileCacheInfo &CacheLookup = LookupFileCache[Filename];
- CacheLookup.HitIdx = LookupFileCache[ScratchFilename].HitIdx;
+ cacheLookupSuccess(LookupFileCache[Filename],
+ LookupFileCache[ScratchFilename].HitIt, IncludeLoc);
// FIXME: SuggestedModule.
return File;
}
}
- if (checkMSVCHeaderSearch(Diags, MSFE ? &MSFE->getFileEntry() : nullptr,
- nullptr, IncludeLoc)) {
+ if (checkMSVCHeaderSearch(Diags, MSFE, nullptr, IncludeLoc)) {
if (SuggestedModule)
*SuggestedModule = MSSuggestedModule;
return MSFE;
}
// Otherwise, didn't find it. Remember we didn't find this.
- CacheLookup.HitIdx = SearchDirs.size();
- return None;
+ CacheLookup.HitIt = search_dir_end();
+ return std::nullopt;
}
/// LookupSubframeworkHeader - Look up a subframework for the specified
@@ -1041,20 +1175,18 @@ Optional<FileEntryRef> HeaderSearch::LookupFile(
/// within ".../Carbon.framework/Headers/Carbon.h", check to see if HIToolbox
/// is a subframework within Carbon.framework. If so, return the FileEntry
/// for the designated file, otherwise return null.
-Optional<FileEntryRef> HeaderSearch::LookupSubframeworkHeader(
- StringRef Filename, const FileEntry *ContextFileEnt,
+OptionalFileEntryRef HeaderSearch::LookupSubframeworkHeader(
+ StringRef Filename, FileEntryRef ContextFileEnt,
SmallVectorImpl<char> *SearchPath, SmallVectorImpl<char> *RelativePath,
Module *RequestingModule, ModuleMap::KnownHeader *SuggestedModule) {
- assert(ContextFileEnt && "No context file?");
-
// Framework names must have a '/' in the filename. Find it.
// FIXME: Should we permit '\' on Windows?
size_t SlashPos = Filename.find('/');
if (SlashPos == StringRef::npos)
- return None;
+ return std::nullopt;
// Look up the base framework name of the ContextFileEnt.
- StringRef ContextName = ContextFileEnt->getName();
+ StringRef ContextName = ContextFileEnt.getName();
// If the context info wasn't a framework, couldn't be a subframework.
const unsigned DotFrameworkLen = 10;
@@ -1062,7 +1194,7 @@ Optional<FileEntryRef> HeaderSearch::LookupSubframeworkHeader(
if (FrameworkPos == StringRef::npos ||
(ContextName[FrameworkPos + DotFrameworkLen] != '/' &&
ContextName[FrameworkPos + DotFrameworkLen] != '\\'))
- return None;
+ return std::nullopt;
SmallString<1024> FrameworkName(ContextName.data(), ContextName.data() +
FrameworkPos +
@@ -1082,20 +1214,20 @@ Optional<FileEntryRef> HeaderSearch::LookupSubframeworkHeader(
CacheLookup.first().size() == FrameworkName.size() &&
memcmp(CacheLookup.first().data(), &FrameworkName[0],
CacheLookup.first().size()) != 0)
- return None;
+ return std::nullopt;
// Cache subframework.
if (!CacheLookup.second.Directory) {
++NumSubFrameworkLookups;
// If the framework dir doesn't exist, we fail.
- auto Dir = FileMgr.getDirectory(FrameworkName);
+ auto Dir = FileMgr.getOptionalDirectoryRef(FrameworkName);
if (!Dir)
- return None;
+ return std::nullopt;
// Otherwise, if it does, remember that this is the right direntry for this
// framework.
- CacheLookup.second.Directory = *Dir;
+ CacheLookup.second.Directory = Dir;
}
@@ -1129,7 +1261,7 @@ Optional<FileEntryRef> HeaderSearch::LookupSubframeworkHeader(
File = FileMgr.getOptionalFileRef(HeadersFilename, /*OpenFile=*/true);
if (!File)
- return None;
+ return std::nullopt;
}
// This file is a system header or C++ unfriendly if the old file is.
@@ -1138,13 +1270,13 @@ Optional<FileEntryRef> HeaderSearch::LookupSubframeworkHeader(
// getFileInfo could resize the vector and we don't want to rely on order
// of evaluation.
unsigned DirInfo = getFileInfo(ContextFileEnt).DirInfo;
- getFileInfo(&File->getFileEntry()).DirInfo = DirInfo;
+ getFileInfo(*File).DirInfo = DirInfo;
FrameworkName.pop_back(); // remove the trailing '/'
- if (!findUsableModuleForFrameworkHeader(&File->getFileEntry(), FrameworkName,
+ if (!findUsableModuleForFrameworkHeader(*File, FrameworkName,
RequestingModule, SuggestedModule,
/*IsSystem*/ false))
- return None;
+ return std::nullopt;
return *File;
}
@@ -1162,7 +1294,6 @@ static void mergeHeaderFileInfo(HeaderFileInfo &HFI,
HFI.isImport |= OtherHFI.isImport;
HFI.isPragmaOnce |= OtherHFI.isPragmaOnce;
HFI.isModuleHeader |= OtherHFI.isModuleHeader;
- HFI.NumIncludes += OtherHFI.NumIncludes;
if (!HFI.ControllingMacro && !HFI.ControllingMacroID) {
HFI.ControllingMacro = OtherHFI.ControllingMacro;
@@ -1180,11 +1311,11 @@ static void mergeHeaderFileInfo(HeaderFileInfo &HFI,
/// getFileInfo - Return the HeaderFileInfo structure for the specified
/// FileEntry.
-HeaderFileInfo &HeaderSearch::getFileInfo(const FileEntry *FE) {
- if (FE->getUID() >= FileInfo.size())
- FileInfo.resize(FE->getUID() + 1);
+HeaderFileInfo &HeaderSearch::getFileInfo(FileEntryRef FE) {
+ if (FE.getUID() >= FileInfo.size())
+ FileInfo.resize(FE.getUID() + 1);
- HeaderFileInfo *HFI = &FileInfo[FE->getUID()];
+ HeaderFileInfo *HFI = &FileInfo[FE.getUID()];
// FIXME: Use a generation count to check whether this is really up to date.
if (ExternalSource && !HFI->Resolved) {
auto ExternalHFI = ExternalSource->GetHeaderFileInfo(FE);
@@ -1203,19 +1334,18 @@ HeaderFileInfo &HeaderSearch::getFileInfo(const FileEntry *FE) {
}
const HeaderFileInfo *
-HeaderSearch::getExistingFileInfo(const FileEntry *FE,
- bool WantExternal) const {
+HeaderSearch::getExistingFileInfo(FileEntryRef FE, bool WantExternal) const {
// If we have an external source, ensure we have the latest information.
// FIXME: Use a generation count to check whether this is really up to date.
HeaderFileInfo *HFI;
if (ExternalSource) {
- if (FE->getUID() >= FileInfo.size()) {
+ if (FE.getUID() >= FileInfo.size()) {
if (!WantExternal)
return nullptr;
- FileInfo.resize(FE->getUID() + 1);
+ FileInfo.resize(FE.getUID() + 1);
}
- HFI = &FileInfo[FE->getUID()];
+ HFI = &FileInfo[FE.getUID()];
if (!WantExternal && (!HFI->IsValid || HFI->External))
return nullptr;
if (!HFI->Resolved) {
@@ -1226,10 +1356,10 @@ HeaderSearch::getExistingFileInfo(const FileEntry *FE,
mergeHeaderFileInfo(*HFI, ExternalHFI);
}
}
- } else if (FE->getUID() >= FileInfo.size()) {
+ } else if (FE.getUID() >= FileInfo.size()) {
return nullptr;
} else {
- HFI = &FileInfo[FE->getUID()];
+ HFI = &FileInfo[FE.getUID()];
}
if (!HFI->IsValid || (HFI->External && !WantExternal))
@@ -1238,7 +1368,7 @@ HeaderSearch::getExistingFileInfo(const FileEntry *FE,
return HFI;
}
-bool HeaderSearch::isFileMultipleIncludeGuarded(const FileEntry *File) {
+bool HeaderSearch::isFileMultipleIncludeGuarded(FileEntryRef File) const {
// Check if we've entered this file and found an include guard or #pragma
// once. Note that we dor't check for #import, because that's not a property
// of the file itself.
@@ -1248,10 +1378,10 @@ bool HeaderSearch::isFileMultipleIncludeGuarded(const FileEntry *File) {
return false;
}
-void HeaderSearch::MarkFileModuleHeader(const FileEntry *FE,
+void HeaderSearch::MarkFileModuleHeader(FileEntryRef FE,
ModuleMap::ModuleHeaderRole Role,
bool isCompilingModuleHeader) {
- bool isModularHeader = !(Role & ModuleMap::TextualHeader);
+ bool isModularHeader = ModuleMap::isModular(Role);
// Don't mark the file info as non-external if there's nothing to change.
if (!isCompilingModuleHeader) {
@@ -1268,64 +1398,67 @@ void HeaderSearch::MarkFileModuleHeader(const FileEntry *FE,
}
bool HeaderSearch::ShouldEnterIncludeFile(Preprocessor &PP,
- const FileEntry *File, bool isImport,
- bool ModulesEnabled, Module *M) {
+ FileEntryRef File, bool isImport,
+ bool ModulesEnabled, Module *M,
+ bool &IsFirstIncludeOfFile) {
++NumIncluded; // Count # of attempted #includes.
+ IsFirstIncludeOfFile = false;
+
// Get information about this file.
HeaderFileInfo &FileInfo = getFileInfo(File);
- // FIXME: this is a workaround for the lack of proper modules-aware support
- // for #import / #pragma once
- auto TryEnterImported = [&]() -> bool {
- if (!ModulesEnabled)
- return false;
- // Ensure FileInfo bits are up to date.
- ModMap.resolveHeaderDirectives(File);
- // Modules with builtins are special; multiple modules use builtins as
- // modular headers, example:
- //
- // module stddef { header "stddef.h" export * }
- //
- // After module map parsing, this expands to:
- //
- // module stddef {
- // header "/path_to_builtin_dirs/stddef.h"
- // textual "stddef.h"
- // }
- //
- // It's common that libc++ and system modules will both define such
- // submodules. Make sure cached results for a builtin header won't
- // prevent other builtin modules from potentially entering the builtin
- // header. Note that builtins are header guarded and the decision to
- // actually enter them is postponed to the controlling macros logic below.
- bool TryEnterHdr = false;
- if (FileInfo.isCompilingModuleHeader && FileInfo.isModuleHeader)
- TryEnterHdr = ModMap.isBuiltinHeader(File);
-
- // Textual headers can be #imported from different modules. Since ObjC
- // headers find in the wild might rely only on #import and do not contain
- // controlling macros, be conservative and only try to enter textual headers
- // if such macro is present.
- if (!FileInfo.isModuleHeader &&
- FileInfo.getControllingMacro(ExternalLookup))
- TryEnterHdr = true;
- return TryEnterHdr;
- };
-
// If this is a #import directive, check that we have not already imported
// this header.
if (isImport) {
// If this has already been imported, don't import it again.
FileInfo.isImport = true;
+ // FIXME: this is a workaround for the lack of proper modules-aware support
+ // for #import / #pragma once
+ auto TryEnterImported = [&]() -> bool {
+ if (!ModulesEnabled)
+ return false;
+ // Ensure FileInfo bits are up to date.
+ ModMap.resolveHeaderDirectives(File);
+ // Modules with builtins are special; multiple modules use builtins as
+ // modular headers, example:
+ //
+ // module stddef { header "stddef.h" export * }
+ //
+ // After module map parsing, this expands to:
+ //
+ // module stddef {
+ // header "/path_to_builtin_dirs/stddef.h"
+ // textual "stddef.h"
+ // }
+ //
+ // It's common that libc++ and system modules will both define such
+ // submodules. Make sure cached results for a builtin header won't
+ // prevent other builtin modules from potentially entering the builtin
+ // header. Note that builtins are header guarded and the decision to
+ // actually enter them is postponed to the controlling macros logic below.
+ bool TryEnterHdr = false;
+ if (FileInfo.isCompilingModuleHeader && FileInfo.isModuleHeader)
+ TryEnterHdr = ModMap.isBuiltinHeader(File);
+
+ // Textual headers can be #imported from different modules. Since ObjC
+ // headers find in the wild might rely only on #import and do not contain
+ // controlling macros, be conservative and only try to enter textual
+ // headers if such macro is present.
+ if (!FileInfo.isModuleHeader &&
+ FileInfo.getControllingMacro(ExternalLookup))
+ TryEnterHdr = true;
+ return TryEnterHdr;
+ };
+
// Has this already been #import'ed or #include'd?
- if (FileInfo.NumIncludes && !TryEnterImported())
+ if (PP.alreadyIncluded(File) && !TryEnterImported())
return false;
} else {
// Otherwise, if this is a #include of a file that was previously #import'd
// or if this is the second #include of a #pragma once file, ignore it.
- if (FileInfo.isImport && !TryEnterImported())
+ if (FileInfo.isPragmaOnce || FileInfo.isImport)
return false;
}
@@ -1343,8 +1476,7 @@ bool HeaderSearch::ShouldEnterIncludeFile(Preprocessor &PP,
}
}
- // Increment the number of times this file has been included.
- ++FileInfo.NumIncludes;
+ IsFirstIncludeOfFile = PP.markIncluded(File);
return true;
}
@@ -1357,10 +1489,21 @@ size_t HeaderSearch::getTotalMemory() const {
+ FrameworkMap.getAllocator().getTotalMemory();
}
+unsigned HeaderSearch::searchDirIdx(const DirectoryLookup &DL) const {
+ return &DL - &*SearchDirs.begin();
+}
+
StringRef HeaderSearch::getUniqueFrameworkName(StringRef Framework) {
return FrameworkNames.insert(Framework).first->first();
}
+StringRef HeaderSearch::getIncludeNameForHeader(const FileEntry *File) const {
+ auto It = IncludeNames.find(File);
+ if (It == IncludeNames.end())
+ return {};
+ return It->second;
+}
+
bool HeaderSearch::hasModuleMap(StringRef FileName,
const DirectoryEntry *Root,
bool IsSystem) {
@@ -1377,13 +1520,13 @@ bool HeaderSearch::hasModuleMap(StringRef FileName,
return false;
// Determine whether this directory exists.
- auto Dir = FileMgr.getDirectory(DirName);
+ auto Dir = FileMgr.getOptionalDirectoryRef(DirName);
if (!Dir)
return false;
// Try to load the module map file in this directory.
switch (loadModuleMapFile(*Dir, IsSystem,
- llvm::sys::path::extension((*Dir)->getName()) ==
+ llvm::sys::path::extension(Dir->getName()) ==
".framework")) {
case LMM_NewlyLoaded:
case LMM_AlreadyLoaded:
@@ -1409,18 +1552,18 @@ bool HeaderSearch::hasModuleMap(StringRef FileName,
}
ModuleMap::KnownHeader
-HeaderSearch::findModuleForHeader(const FileEntry *File,
- bool AllowTextual) const {
+HeaderSearch::findModuleForHeader(FileEntryRef File, bool AllowTextual,
+ bool AllowExcluded) const {
if (ExternalSource) {
// Make sure the external source has handled header info about this file,
// which includes whether the file is part of a module.
(void)getExistingFileInfo(File);
}
- return ModMap.findModuleForHeader(File, AllowTextual);
+ return ModMap.findModuleForHeader(File, AllowTextual, AllowExcluded);
}
ArrayRef<ModuleMap::KnownHeader>
-HeaderSearch::findAllModulesForHeader(const FileEntry *File) const {
+HeaderSearch::findAllModulesForHeader(FileEntryRef File) const {
if (ExternalSource) {
// Make sure the external source has handled header info about this file,
// which includes whether the file is part of a module.
@@ -1429,7 +1572,17 @@ HeaderSearch::findAllModulesForHeader(const FileEntry *File) const {
return ModMap.findAllModulesForHeader(File);
}
-static bool suggestModule(HeaderSearch &HS, const FileEntry *File,
+ArrayRef<ModuleMap::KnownHeader>
+HeaderSearch::findResolvedModulesForHeader(FileEntryRef File) const {
+ if (ExternalSource) {
+ // Make sure the external source has handled header info about this file,
+ // which includes whether the file is part of a module.
+ (void)getExistingFileInfo(File);
+ }
+ return ModMap.findResolvedModulesForHeader(File);
+}
+
+static bool suggestModule(HeaderSearch &HS, FileEntryRef File,
Module *RequestingModule,
ModuleMap::KnownHeader *SuggestedModule) {
ModuleMap::KnownHeader Module =
@@ -1450,6 +1603,8 @@ static bool suggestModule(HeaderSearch &HS, const FileEntry *File,
*SuggestedModule = ModuleMap::KnownHeader();
return true;
}
+ // TODO: Add this module (or just its module map file) into something like
+ // `RequestingModule->AffectingClangModules`.
return false;
}
}
@@ -1463,32 +1618,33 @@ static bool suggestModule(HeaderSearch &HS, const FileEntry *File,
}
bool HeaderSearch::findUsableModuleForHeader(
- const FileEntry *File, const DirectoryEntry *Root, Module *RequestingModule,
+ FileEntryRef File, const DirectoryEntry *Root, Module *RequestingModule,
ModuleMap::KnownHeader *SuggestedModule, bool IsSystemHeaderDir) {
- if (File && needModuleLookup(RequestingModule, SuggestedModule)) {
+ if (needModuleLookup(RequestingModule, SuggestedModule)) {
// If there is a module that corresponds to this header, suggest it.
- hasModuleMap(File->getName(), Root, IsSystemHeaderDir);
+ hasModuleMap(File.getNameAsRequested(), Root, IsSystemHeaderDir);
return suggestModule(*this, File, RequestingModule, SuggestedModule);
}
return true;
}
bool HeaderSearch::findUsableModuleForFrameworkHeader(
- const FileEntry *File, StringRef FrameworkName, Module *RequestingModule,
+ FileEntryRef File, StringRef FrameworkName, Module *RequestingModule,
ModuleMap::KnownHeader *SuggestedModule, bool IsSystemFramework) {
// If we're supposed to suggest a module, look for one now.
if (needModuleLookup(RequestingModule, SuggestedModule)) {
// Find the top-level framework based on this framework.
SmallVector<std::string, 4> SubmodulePath;
- const DirectoryEntry *TopFrameworkDir
- = ::getTopFrameworkDir(FileMgr, FrameworkName, SubmodulePath);
+ OptionalDirectoryEntryRef TopFrameworkDir =
+ ::getTopFrameworkDir(FileMgr, FrameworkName, SubmodulePath);
+ assert(TopFrameworkDir && "Could not find the top-most framework dir");
// Determine the name of the top-level framework.
StringRef ModuleName = llvm::sys::path::stem(TopFrameworkDir->getName());
// Load this framework module. If that succeeds, find the suggested module
// for this header, if any.
- loadFrameworkModule(ModuleName, TopFrameworkDir, IsSystemFramework);
+ loadFrameworkModule(ModuleName, *TopFrameworkDir, IsSystemFramework);
// FIXME: This can find a module not part of ModuleName, which is
// important so that we're consistent about whether this header
@@ -1499,59 +1655,64 @@ bool HeaderSearch::findUsableModuleForFrameworkHeader(
return true;
}
-static const FileEntry *getPrivateModuleMap(const FileEntry *File,
- FileManager &FileMgr) {
- StringRef Filename = llvm::sys::path::filename(File->getName());
- SmallString<128> PrivateFilename(File->getDir()->getName());
+static OptionalFileEntryRef getPrivateModuleMap(FileEntryRef File,
+ FileManager &FileMgr,
+ DiagnosticsEngine &Diags) {
+ StringRef Filename = llvm::sys::path::filename(File.getName());
+ SmallString<128> PrivateFilename(File.getDir().getName());
if (Filename == "module.map")
llvm::sys::path::append(PrivateFilename, "module_private.map");
else if (Filename == "module.modulemap")
llvm::sys::path::append(PrivateFilename, "module.private.modulemap");
else
- return nullptr;
- if (auto File = FileMgr.getFile(PrivateFilename))
- return *File;
- return nullptr;
+ return std::nullopt;
+ auto PMMFile = FileMgr.getOptionalFileRef(PrivateFilename);
+ if (PMMFile) {
+ if (Filename == "module.map")
+ Diags.Report(diag::warn_deprecated_module_dot_map)
+ << PrivateFilename << 1
+ << File.getDir().getName().ends_with(".framework");
+ }
+ return PMMFile;
}
-bool HeaderSearch::loadModuleMapFile(const FileEntry *File, bool IsSystem,
+bool HeaderSearch::loadModuleMapFile(FileEntryRef File, bool IsSystem,
FileID ID, unsigned *Offset,
StringRef OriginalModuleMapFile) {
// Find the directory for the module. For frameworks, that may require going
// up from the 'Modules' directory.
- const DirectoryEntry *Dir = nullptr;
+ OptionalDirectoryEntryRef Dir;
if (getHeaderSearchOpts().ModuleMapFileHomeIsCwd) {
- if (auto DirOrErr = FileMgr.getDirectory("."))
- Dir = *DirOrErr;
+ Dir = FileMgr.getOptionalDirectoryRef(".");
} else {
if (!OriginalModuleMapFile.empty()) {
// We're building a preprocessed module map. Find or invent the directory
// that it originally occupied.
- auto DirOrErr = FileMgr.getDirectory(
+ Dir = FileMgr.getOptionalDirectoryRef(
llvm::sys::path::parent_path(OriginalModuleMapFile));
- if (DirOrErr) {
- Dir = *DirOrErr;
- } else {
- auto *FakeFile = FileMgr.getVirtualFile(OriginalModuleMapFile, 0, 0);
- Dir = FakeFile->getDir();
+ if (!Dir) {
+ auto FakeFile = FileMgr.getVirtualFileRef(OriginalModuleMapFile, 0, 0);
+ Dir = FakeFile.getDir();
}
} else {
- Dir = File->getDir();
+ Dir = File.getDir();
}
+ assert(Dir && "parent must exist");
StringRef DirName(Dir->getName());
if (llvm::sys::path::filename(DirName) == "Modules") {
DirName = llvm::sys::path::parent_path(DirName);
- if (DirName.endswith(".framework"))
- if (auto DirOrErr = FileMgr.getDirectory(DirName))
- Dir = *DirOrErr;
+ if (DirName.ends_with(".framework"))
+ if (auto MaybeDir = FileMgr.getOptionalDirectoryRef(DirName))
+ Dir = *MaybeDir;
// FIXME: This assert can fail if there's a race between the above check
// and the removal of the directory.
assert(Dir && "parent must exist");
}
}
- switch (loadModuleMapFileImpl(File, IsSystem, Dir, ID, Offset)) {
+ assert(Dir && "module map home directory must exist");
+ switch (loadModuleMapFileImpl(File, IsSystem, *Dir, ID, Offset)) {
case LMM_AlreadyLoaded:
case LMM_NewlyLoaded:
return false;
@@ -1563,11 +1724,9 @@ bool HeaderSearch::loadModuleMapFile(const FileEntry *File, bool IsSystem,
}
HeaderSearch::LoadModuleMapResult
-HeaderSearch::loadModuleMapFileImpl(const FileEntry *File, bool IsSystem,
- const DirectoryEntry *Dir, FileID ID,
+HeaderSearch::loadModuleMapFileImpl(FileEntryRef File, bool IsSystem,
+ DirectoryEntryRef Dir, FileID ID,
unsigned *Offset) {
- assert(File && "expected FileEntry");
-
// Check whether we've already loaded this module map, and mark it as being
// loaded in case we recursively try to load it from itself.
auto AddResult = LoadedModuleMaps.insert(std::make_pair(File, true));
@@ -1580,8 +1739,9 @@ HeaderSearch::loadModuleMapFileImpl(const FileEntry *File, bool IsSystem,
}
// Try to load a corresponding private module map.
- if (const FileEntry *PMMFile = getPrivateModuleMap(File, FileMgr)) {
- if (ModMap.parseModuleMapFile(PMMFile, IsSystem, Dir)) {
+ if (OptionalFileEntryRef PMMFile =
+ getPrivateModuleMap(File, FileMgr, Diags)) {
+ if (ModMap.parseModuleMapFile(*PMMFile, IsSystem, Dir)) {
LoadedModuleMaps[File] = false;
return LMM_InvalidModuleMap;
}
@@ -1591,43 +1751,42 @@ HeaderSearch::loadModuleMapFileImpl(const FileEntry *File, bool IsSystem,
return LMM_NewlyLoaded;
}
-const FileEntry *
-HeaderSearch::lookupModuleMapFile(const DirectoryEntry *Dir, bool IsFramework) {
+OptionalFileEntryRef
+HeaderSearch::lookupModuleMapFile(DirectoryEntryRef Dir, bool IsFramework) {
if (!HSOpts->ImplicitModuleMaps)
- return nullptr;
+ return std::nullopt;
// For frameworks, the preferred spelling is Modules/module.modulemap, but
// module.map at the framework root is also accepted.
- SmallString<128> ModuleMapFileName(Dir->getName());
+ SmallString<128> ModuleMapFileName(Dir.getName());
if (IsFramework)
llvm::sys::path::append(ModuleMapFileName, "Modules");
llvm::sys::path::append(ModuleMapFileName, "module.modulemap");
- if (auto F = FileMgr.getFile(ModuleMapFileName))
+ if (auto F = FileMgr.getOptionalFileRef(ModuleMapFileName))
return *F;
- // Continue to allow module.map
- ModuleMapFileName = Dir->getName();
+ // Continue to allow module.map, but warn it's deprecated.
+ ModuleMapFileName = Dir.getName();
llvm::sys::path::append(ModuleMapFileName, "module.map");
- if (auto F = FileMgr.getFile(ModuleMapFileName))
+ if (auto F = FileMgr.getOptionalFileRef(ModuleMapFileName)) {
+ Diags.Report(diag::warn_deprecated_module_dot_map)
+ << ModuleMapFileName << 0 << IsFramework;
return *F;
+ }
// For frameworks, allow to have a private module map with a preferred
// spelling when a public module map is absent.
if (IsFramework) {
- ModuleMapFileName = Dir->getName();
+ ModuleMapFileName = Dir.getName();
llvm::sys::path::append(ModuleMapFileName, "Modules",
"module.private.modulemap");
- if (auto F = FileMgr.getFile(ModuleMapFileName))
+ if (auto F = FileMgr.getOptionalFileRef(ModuleMapFileName))
return *F;
}
- return nullptr;
+ return std::nullopt;
}
-Module *HeaderSearch::loadFrameworkModule(StringRef Name,
- const DirectoryEntry *Dir,
+Module *HeaderSearch::loadFrameworkModule(StringRef Name, DirectoryEntryRef Dir,
bool IsSystem) {
- if (Module *Module = ModMap.findModule(Name))
- return Module;
-
// Try to load a module map file.
switch (loadModuleMapFile(Dir, IsSystem, /*IsFramework*/true)) {
case LMM_InvalidModuleMap:
@@ -1636,10 +1795,10 @@ Module *HeaderSearch::loadFrameworkModule(StringRef Name,
ModMap.inferFrameworkModule(Dir, IsSystem, /*Parent=*/nullptr);
break;
- case LMM_AlreadyLoaded:
case LMM_NoDirectory:
return nullptr;
+ case LMM_AlreadyLoaded:
case LMM_NewlyLoaded:
break;
}
@@ -1650,22 +1809,23 @@ Module *HeaderSearch::loadFrameworkModule(StringRef Name,
HeaderSearch::LoadModuleMapResult
HeaderSearch::loadModuleMapFile(StringRef DirName, bool IsSystem,
bool IsFramework) {
- if (auto Dir = FileMgr.getDirectory(DirName))
+ if (auto Dir = FileMgr.getOptionalDirectoryRef(DirName))
return loadModuleMapFile(*Dir, IsSystem, IsFramework);
return LMM_NoDirectory;
}
HeaderSearch::LoadModuleMapResult
-HeaderSearch::loadModuleMapFile(const DirectoryEntry *Dir, bool IsSystem,
+HeaderSearch::loadModuleMapFile(DirectoryEntryRef Dir, bool IsSystem,
bool IsFramework) {
auto KnownDir = DirectoryHasModuleMap.find(Dir);
if (KnownDir != DirectoryHasModuleMap.end())
return KnownDir->second ? LMM_AlreadyLoaded : LMM_InvalidModuleMap;
- if (const FileEntry *ModuleMapFile = lookupModuleMapFile(Dir, IsFramework)) {
+ if (OptionalFileEntryRef ModuleMapFile =
+ lookupModuleMapFile(Dir, IsFramework)) {
LoadModuleMapResult Result =
- loadModuleMapFileImpl(ModuleMapFile, IsSystem, Dir);
+ loadModuleMapFileImpl(*ModuleMapFile, IsSystem, Dir);
// Add Dir explicitly in case ModuleMapFile is in a subdirectory.
// E.g. Foo.framework/Modules/module.modulemap
// ^Dir ^ModuleMapFile
@@ -1683,13 +1843,12 @@ void HeaderSearch::collectAllModules(SmallVectorImpl<Module *> &Modules) {
if (HSOpts->ImplicitModuleMaps) {
// Load module maps for each of the header search directories.
- for (unsigned Idx = 0, N = SearchDirs.size(); Idx != N; ++Idx) {
- bool IsSystem = SearchDirs[Idx].isSystemHeaderDirectory();
- if (SearchDirs[Idx].isFramework()) {
+ for (DirectoryLookup &DL : search_dir_range()) {
+ bool IsSystem = DL.isSystemHeaderDirectory();
+ if (DL.isFramework()) {
std::error_code EC;
SmallString<128> DirNative;
- llvm::sys::path::native(SearchDirs[Idx].getFrameworkDir()->getName(),
- DirNative);
+ llvm::sys::path::native(DL.getFrameworkDirRef()->getName(), DirNative);
// Search each of the ".framework" directories to load them as modules.
llvm::vfs::FileSystem &FS = FileMgr.getVirtualFileSystem();
@@ -1699,8 +1858,7 @@ void HeaderSearch::collectAllModules(SmallVectorImpl<Module *> &Modules) {
if (llvm::sys::path::extension(Dir->path()) != ".framework")
continue;
- auto FrameworkDir =
- FileMgr.getDirectory(Dir->path());
+ auto FrameworkDir = FileMgr.getOptionalDirectoryRef(Dir->path());
if (!FrameworkDir)
continue;
@@ -1712,25 +1870,21 @@ void HeaderSearch::collectAllModules(SmallVectorImpl<Module *> &Modules) {
}
// FIXME: Deal with header maps.
- if (SearchDirs[Idx].isHeaderMap())
+ if (DL.isHeaderMap())
continue;
// Try to load a module map file for the search directory.
- loadModuleMapFile(SearchDirs[Idx].getDir(), IsSystem,
- /*IsFramework*/ false);
+ loadModuleMapFile(*DL.getDirRef(), IsSystem, /*IsFramework*/ false);
// Try to load module map files for immediate subdirectories of this
// search directory.
- loadSubdirectoryModuleMaps(SearchDirs[Idx]);
+ loadSubdirectoryModuleMaps(DL);
}
}
// Populate the list of modules.
- for (ModuleMap::module_iterator M = ModMap.module_begin(),
- MEnd = ModMap.module_end();
- M != MEnd; ++M) {
- Modules.push_back(M->getValue());
- }
+ llvm::transform(ModMap.modules(), std::back_inserter(Modules),
+ [](const auto &NameAndMod) { return NameAndMod.second; });
}
void HeaderSearch::loadTopLevelSystemModules() {
@@ -1738,16 +1892,14 @@ void HeaderSearch::loadTopLevelSystemModules() {
return;
// Load module maps for each of the header search directories.
- for (unsigned Idx = 0, N = SearchDirs.size(); Idx != N; ++Idx) {
+ for (const DirectoryLookup &DL : search_dir_range()) {
// We only care about normal header directories.
- if (!SearchDirs[Idx].isNormalDir()) {
+ if (!DL.isNormalDir())
continue;
- }
// Try to load a module map file for the search directory.
- loadModuleMapFile(SearchDirs[Idx].getDir(),
- SearchDirs[Idx].isSystemHeaderDirectory(),
- SearchDirs[Idx].isFramework());
+ loadModuleMapFile(*DL.getDirRef(), DL.isSystemHeaderDirectory(),
+ DL.isFramework());
}
}
@@ -1759,13 +1911,15 @@ void HeaderSearch::loadSubdirectoryModuleMaps(DirectoryLookup &SearchDir) {
return;
std::error_code EC;
- SmallString<128> Dir = SearchDir.getDir()->getName();
+ SmallString<128> Dir = SearchDir.getDirRef()->getName();
FileMgr.makeAbsolutePath(Dir);
SmallString<128> DirNative;
llvm::sys::path::native(Dir, DirNative);
llvm::vfs::FileSystem &FS = FileMgr.getVirtualFileSystem();
for (llvm::vfs::directory_iterator Dir = FS.dir_begin(DirNative, EC), DirEnd;
Dir != DirEnd && !EC; Dir.increment(EC)) {
+ if (Dir->type() == llvm::sys::fs::file_type::regular_file)
+ continue;
bool IsFramework = llvm::sys::path::extension(Dir->path()) == ".framework";
if (IsFramework == SearchDir.isFramework())
loadModuleMapFile(Dir->path(), SearchDir.isSystemHeaderDirectory(),
@@ -1776,44 +1930,37 @@ void HeaderSearch::loadSubdirectoryModuleMaps(DirectoryLookup &SearchDir) {
}
std::string HeaderSearch::suggestPathToFileForDiagnostics(
- const FileEntry *File, llvm::StringRef MainFile, bool *IsSystem) {
- // FIXME: We assume that the path name currently cached in the FileEntry is
- // the most appropriate one for this analysis (and that it's spelled the
- // same way as the corresponding header search path).
- return suggestPathToFileForDiagnostics(File->getName(), /*WorkingDir=*/"",
- MainFile, IsSystem);
+ FileEntryRef File, llvm::StringRef MainFile, bool *IsAngled) const {
+ return suggestPathToFileForDiagnostics(File.getName(), /*WorkingDir=*/"",
+ MainFile, IsAngled);
}
std::string HeaderSearch::suggestPathToFileForDiagnostics(
llvm::StringRef File, llvm::StringRef WorkingDir, llvm::StringRef MainFile,
- bool *IsSystem) {
+ bool *IsAngled) const {
using namespace llvm::sys;
+ llvm::SmallString<32> FilePath = File;
+ // remove_dots switches to backslashes on windows as a side-effect!
+ // We always want to suggest forward slashes for includes.
+ // (not remove_dots(..., posix) as that misparses windows paths).
+ path::remove_dots(FilePath, /*remove_dot_dot=*/true);
+ path::native(FilePath, path::Style::posix);
+ File = FilePath;
+
unsigned BestPrefixLength = 0;
- // Checks whether Dir and File shares a common prefix, if they do and that's
- // the longest prefix we've seen so for it returns true and updates the
- // BestPrefixLength accordingly.
- auto CheckDir = [&](llvm::StringRef Dir) -> bool {
- llvm::SmallString<32> DirPath(Dir.begin(), Dir.end());
+ // Checks whether `Dir` is a strict path prefix of `File`. If so and that's
+ // the longest prefix we've seen so for it, returns true and updates the
+ // `BestPrefixLength` accordingly.
+ auto CheckDir = [&](llvm::SmallString<32> Dir) -> bool {
if (!WorkingDir.empty() && !path::is_absolute(Dir))
- fs::make_absolute(WorkingDir, DirPath);
- path::remove_dots(DirPath, /*remove_dot_dot=*/true);
- Dir = DirPath;
+ fs::make_absolute(WorkingDir, Dir);
+ path::remove_dots(Dir, /*remove_dot_dot=*/true);
for (auto NI = path::begin(File), NE = path::end(File),
DI = path::begin(Dir), DE = path::end(Dir);
- /*termination condition in loop*/; ++NI, ++DI) {
- // '.' components in File are ignored.
- while (NI != NE && *NI == ".")
- ++NI;
- if (NI == NE)
- break;
-
- // '.' components in Dir are ignored.
- while (DI != DE && *DI == ".")
- ++DI;
+ NI != NE; ++NI, ++DI) {
if (DI == DE) {
- // Dir is a prefix of File, up to '.' components and choice of path
- // separators.
+ // Dir is a prefix of File, up to choice of path separators.
unsigned PrefixLength = NI - path::begin(File);
if (PrefixLength > BestPrefixLength) {
BestPrefixLength = PrefixLength;
@@ -1827,40 +1974,74 @@ std::string HeaderSearch::suggestPathToFileForDiagnostics(
path::is_separator(NI->front()) && path::is_separator(DI->front()))
continue;
+ // Special case Apple .sdk folders since the search path is typically a
+ // symlink like `iPhoneSimulator14.5.sdk` while the file is instead
+ // located in `iPhoneSimulator.sdk` (the real folder).
+ if (NI->ends_with(".sdk") && DI->ends_with(".sdk")) {
+ StringRef NBasename = path::stem(*NI);
+ StringRef DBasename = path::stem(*DI);
+ if (DBasename.starts_with(NBasename))
+ continue;
+ }
+
if (*NI != *DI)
break;
}
return false;
};
- for (unsigned I = 0; I != SearchDirs.size(); ++I) {
- // FIXME: Support this search within frameworks.
- if (!SearchDirs[I].isNormalDir())
- continue;
-
- StringRef Dir = SearchDirs[I].getDir()->getName();
- if (CheckDir(Dir) && IsSystem)
- *IsSystem = BestPrefixLength ? I >= SystemDirIdx : false;
+ bool BestPrefixIsFramework = false;
+ for (const DirectoryLookup &DL : search_dir_range()) {
+ if (DL.isNormalDir()) {
+ StringRef Dir = DL.getDirRef()->getName();
+ if (CheckDir(Dir)) {
+ if (IsAngled)
+ *IsAngled = BestPrefixLength && isSystem(DL.getDirCharacteristic());
+ BestPrefixIsFramework = false;
+ }
+ } else if (DL.isFramework()) {
+ StringRef Dir = DL.getFrameworkDirRef()->getName();
+ if (CheckDir(Dir)) {
+ // Framework includes by convention use <>.
+ if (IsAngled)
+ *IsAngled = BestPrefixLength;
+ BestPrefixIsFramework = true;
+ }
+ }
}
// Try to shorten include path using TUs directory, if we couldn't find any
// suitable prefix in include search paths.
- if (!BestPrefixLength && CheckDir(path::parent_path(MainFile)) && IsSystem)
- *IsSystem = false;
+ if (!BestPrefixLength && CheckDir(path::parent_path(MainFile))) {
+ if (IsAngled)
+ *IsAngled = false;
+ BestPrefixIsFramework = false;
+ }
// Try resolving resulting filename via reverse search in header maps,
- // key from header name is user prefered name for the include file.
+ // key from header name is user preferred name for the include file.
StringRef Filename = File.drop_front(BestPrefixLength);
- for (unsigned I = 0; I != SearchDirs.size(); ++I) {
- if (!SearchDirs[I].isHeaderMap())
+ for (const DirectoryLookup &DL : search_dir_range()) {
+ if (!DL.isHeaderMap())
continue;
StringRef SpelledFilename =
- SearchDirs[I].getHeaderMap()->reverseLookupFilename(Filename);
+ DL.getHeaderMap()->reverseLookupFilename(Filename);
if (!SpelledFilename.empty()) {
Filename = SpelledFilename;
+ BestPrefixIsFramework = false;
break;
}
}
+
+ // If the best prefix is a framework path, we need to compute the proper
+ // include spelling for the framework header.
+ bool IsPrivateHeader;
+ SmallString<128> FrameworkName, IncludeSpelling;
+ if (BestPrefixIsFramework &&
+ isFrameworkStylePath(Filename, IsPrivateHeader, FrameworkName,
+ IncludeSpelling)) {
+ Filename = IncludeSpelling;
+ }
return path::convert_to_slash(Filename);
}
diff --git a/contrib/llvm-project/clang/lib/Frontend/InitHeaderSearch.cpp b/contrib/llvm-project/clang/lib/Lex/InitHeaderSearch.cpp
index ba9f96384f81..2218db15013d 100644
--- a/contrib/llvm-project/clang/lib/Frontend/InitHeaderSearch.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/InitHeaderSearch.cpp
@@ -10,11 +10,10 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/Basic/DiagnosticFrontend.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Config/config.h" // C_INCLUDE_DIRS
-#include "clang/Frontend/FrontendDiagnostic.h"
-#include "clang/Frontend/Utils.h"
#include "clang/Lex/HeaderMap.h"
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/HeaderSearchOptions.h"
@@ -22,11 +21,12 @@
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/Triple.h"
+#include <optional>
using namespace clang;
using namespace clang::frontend;
@@ -36,14 +36,16 @@ namespace {
struct DirectoryLookupInfo {
IncludeDirGroup Group;
DirectoryLookup Lookup;
+ std::optional<unsigned> UserEntryIdx;
- DirectoryLookupInfo(IncludeDirGroup Group, DirectoryLookup Lookup)
- : Group(Group), Lookup(Lookup) {}
+ DirectoryLookupInfo(IncludeDirGroup Group, DirectoryLookup Lookup,
+ std::optional<unsigned> UserEntryIdx)
+ : Group(Group), Lookup(Lookup), UserEntryIdx(UserEntryIdx) {}
};
-/// InitHeaderSearch - This class makes it easier to set the search paths of
-/// a HeaderSearch object. InitHeaderSearch stores several search path lists
-/// internally, which can be sent to a HeaderSearch object in one swoop.
+/// This class makes it easier to set the search paths of a HeaderSearch object.
+/// InitHeaderSearch stores several search path lists internally, which can be
+/// sent to a HeaderSearch object in one swoop.
class InitHeaderSearch {
std::vector<DirectoryLookupInfo> IncludePath;
std::vector<std::pair<std::string, bool> > SystemHeaderPrefixes;
@@ -57,54 +59,48 @@ public:
: Headers(HS), Verbose(verbose), IncludeSysroot(std::string(sysroot)),
HasSysroot(!(sysroot.empty() || sysroot == "/")) {}
- /// AddPath - Add the specified path to the specified group list, prefixing
- /// the sysroot if used.
+ /// Add the specified path to the specified group list, prefixing the sysroot
+ /// if used.
/// Returns true if the path exists, false if it was ignored.
- bool AddPath(const Twine &Path, IncludeDirGroup Group, bool isFramework);
+ bool AddPath(const Twine &Path, IncludeDirGroup Group, bool isFramework,
+ std::optional<unsigned> UserEntryIdx = std::nullopt);
- /// AddUnmappedPath - Add the specified path to the specified group list,
- /// without performing any sysroot remapping.
+ /// Add the specified path to the specified group list, without performing any
+ /// sysroot remapping.
/// Returns true if the path exists, false if it was ignored.
bool AddUnmappedPath(const Twine &Path, IncludeDirGroup Group,
- bool isFramework);
+ bool isFramework,
+ std::optional<unsigned> UserEntryIdx = std::nullopt);
- /// AddSystemHeaderPrefix - Add the specified prefix to the system header
- /// prefix list.
+ /// Add the specified prefix to the system header prefix list.
void AddSystemHeaderPrefix(StringRef Prefix, bool IsSystemHeader) {
SystemHeaderPrefixes.emplace_back(std::string(Prefix), IsSystemHeader);
}
- /// AddGnuCPlusPlusIncludePaths - Add the necessary paths to support a gnu
- /// libstdc++.
- /// Returns true if the \p Base path was found, false if it does not exist.
- bool AddGnuCPlusPlusIncludePaths(StringRef Base, StringRef ArchDir,
- StringRef Dir32, StringRef Dir64,
- const llvm::Triple &triple);
-
- /// AddMinGWCPlusPlusIncludePaths - Add the necessary paths to support a MinGW
- /// libstdc++.
+ /// Add the necessary paths to support a MinGW libstdc++.
void AddMinGWCPlusPlusIncludePaths(StringRef Base,
StringRef Arch,
StringRef Version);
- // AddDefaultCIncludePaths - Add paths that should always be searched.
+ /// Add paths that should always be searched.
void AddDefaultCIncludePaths(const llvm::Triple &triple,
const HeaderSearchOptions &HSOpts);
- // AddDefaultCPlusPlusIncludePaths - Add paths that should be searched when
- // compiling c++.
+ /// Add paths that should be searched when compiling c++.
void AddDefaultCPlusPlusIncludePaths(const LangOptions &LangOpts,
const llvm::Triple &triple,
const HeaderSearchOptions &HSOpts);
- /// AddDefaultSystemIncludePaths - Adds the default system include paths so
- /// that e.g. stdio.h is found.
+ /// Returns true iff AddDefaultIncludePaths should do anything. If this
+ /// returns false, include paths should instead be handled in the driver.
+ bool ShouldAddDefaultIncludePaths(const llvm::Triple &triple);
+
+ /// Adds the default system include paths so that e.g. stdio.h is found.
void AddDefaultIncludePaths(const LangOptions &Lang,
const llvm::Triple &triple,
const HeaderSearchOptions &HSOpts);
- /// Realize - Merges all search path lists into one list and send it to
- /// HeaderSearch.
+ /// Merges all search path lists into one list and send it to HeaderSearch.
void Realize(const LangOptions &Lang);
};
@@ -119,22 +115,25 @@ static bool CanPrefixSysroot(StringRef Path) {
}
bool InitHeaderSearch::AddPath(const Twine &Path, IncludeDirGroup Group,
- bool isFramework) {
+ bool isFramework,
+ std::optional<unsigned> UserEntryIdx) {
// Add the path with sysroot prepended, if desired and this is a system header
// group.
if (HasSysroot) {
SmallString<256> MappedPathStorage;
StringRef MappedPathStr = Path.toStringRef(MappedPathStorage);
if (CanPrefixSysroot(MappedPathStr)) {
- return AddUnmappedPath(IncludeSysroot + Path, Group, isFramework);
+ return AddUnmappedPath(IncludeSysroot + Path, Group, isFramework,
+ UserEntryIdx);
}
}
- return AddUnmappedPath(Path, Group, isFramework);
+ return AddUnmappedPath(Path, Group, isFramework, UserEntryIdx);
}
bool InitHeaderSearch::AddUnmappedPath(const Twine &Path, IncludeDirGroup Group,
- bool isFramework) {
+ bool isFramework,
+ std::optional<unsigned> UserEntryIdx) {
assert(!Path.isTriviallyEmpty() && "can't handle empty path here");
FileManager &FM = Headers.getFileMgr();
@@ -142,8 +141,8 @@ bool InitHeaderSearch::AddUnmappedPath(const Twine &Path, IncludeDirGroup Group,
StringRef MappedPathStr = Path.toStringRef(MappedPathStorage);
// If use system headers while cross-compiling, emit the warning.
- if (HasSysroot && (MappedPathStr.startswith("/usr/include") ||
- MappedPathStr.startswith("/usr/local/include"))) {
+ if (HasSysroot && (MappedPathStr.starts_with("/usr/include") ||
+ MappedPathStr.starts_with("/usr/local/include"))) {
Headers.getDiags().Report(diag::warn_poison_system_directories)
<< MappedPathStr;
}
@@ -160,18 +159,20 @@ bool InitHeaderSearch::AddUnmappedPath(const Twine &Path, IncludeDirGroup Group,
// If the directory exists, add it.
if (auto DE = FM.getOptionalDirectoryRef(MappedPathStr)) {
- IncludePath.emplace_back(Group, DirectoryLookup(*DE, Type, isFramework));
+ IncludePath.emplace_back(Group, DirectoryLookup(*DE, Type, isFramework),
+ UserEntryIdx);
return true;
}
// Check to see if this is an apple-style headermap (which are not allowed to
// be frameworks).
if (!isFramework) {
- if (auto FE = FM.getFile(MappedPathStr)) {
+ if (auto FE = FM.getOptionalFileRef(MappedPathStr)) {
if (const HeaderMap *HM = Headers.CreateHeaderMap(*FE)) {
// It is a headermap, add it to the search path.
IncludePath.emplace_back(
- Group, DirectoryLookup(HM, Type, Group == IndexHeaderMap));
+ Group, DirectoryLookup(HM, Type, Group == IndexHeaderMap),
+ UserEntryIdx);
return true;
}
}
@@ -183,27 +184,6 @@ bool InitHeaderSearch::AddUnmappedPath(const Twine &Path, IncludeDirGroup Group,
return false;
}
-bool InitHeaderSearch::AddGnuCPlusPlusIncludePaths(StringRef Base,
- StringRef ArchDir,
- StringRef Dir32,
- StringRef Dir64,
- const llvm::Triple &triple) {
- // Add the base dir
- bool IsBaseFound = AddPath(Base, CXXSystem, false);
-
- // Add the multilib dirs
- llvm::Triple::ArchType arch = triple.getArch();
- bool is64bit = arch == llvm::Triple::ppc64 || arch == llvm::Triple::x86_64;
- if (is64bit)
- AddPath(Base + "/" + ArchDir + "/" + Dir64, CXXSystem, false);
- else
- AddPath(Base + "/" + ArchDir + "/" + Dir32, CXXSystem, false);
-
- // Add the backward dir
- AddPath(Base + "/backward", CXXSystem, false);
- return IsBaseFound;
-}
-
void InitHeaderSearch::AddMinGWCPlusPlusIncludePaths(StringRef Base,
StringRef Arch,
StringRef Version) {
@@ -217,27 +197,17 @@ void InitHeaderSearch::AddMinGWCPlusPlusIncludePaths(StringRef Base,
void InitHeaderSearch::AddDefaultCIncludePaths(const llvm::Triple &triple,
const HeaderSearchOptions &HSOpts) {
- llvm::Triple::OSType os = triple.getOS();
-
- if (triple.isOSDarwin()) {
+ if (!ShouldAddDefaultIncludePaths(triple))
llvm_unreachable("Include management is handled in the driver.");
- }
+
+ llvm::Triple::OSType os = triple.getOS();
if (HSOpts.UseStandardSystemIncludes) {
switch (os) {
- case llvm::Triple::CloudABI:
- case llvm::Triple::FreeBSD:
- case llvm::Triple::NetBSD:
- case llvm::Triple::OpenBSD:
- case llvm::Triple::NaCl:
- case llvm::Triple::PS4:
- case llvm::Triple::ELFIAMCU:
- case llvm::Triple::Fuchsia:
- break;
case llvm::Triple::Win32:
if (triple.getEnvironment() != llvm::Triple::Cygnus)
break;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
default:
// FIXME: temporary hack: hard-coded paths.
AddPath("/usr/local/include", System, false);
@@ -271,57 +241,6 @@ void InitHeaderSearch::AddDefaultCIncludePaths(const llvm::Triple &triple,
}
switch (os) {
- case llvm::Triple::Linux:
- case llvm::Triple::Hurd:
- case llvm::Triple::Solaris:
- case llvm::Triple::OpenBSD:
- llvm_unreachable("Include management is handled in the driver.");
-
- case llvm::Triple::CloudABI: {
- // <sysroot>/<triple>/include
- SmallString<128> P = StringRef(HSOpts.ResourceDir);
- llvm::sys::path::append(P, "../../..", triple.str(), "include");
- AddPath(P, System, false);
- break;
- }
-
- case llvm::Triple::Haiku:
- AddPath("/boot/system/non-packaged/develop/headers", System, false);
- AddPath("/boot/system/develop/headers/os", System, false);
- AddPath("/boot/system/develop/headers/os/app", System, false);
- AddPath("/boot/system/develop/headers/os/arch", System, false);
- AddPath("/boot/system/develop/headers/os/device", System, false);
- AddPath("/boot/system/develop/headers/os/drivers", System, false);
- AddPath("/boot/system/develop/headers/os/game", System, false);
- AddPath("/boot/system/develop/headers/os/interface", System, false);
- AddPath("/boot/system/develop/headers/os/kernel", System, false);
- AddPath("/boot/system/develop/headers/os/locale", System, false);
- AddPath("/boot/system/develop/headers/os/mail", System, false);
- AddPath("/boot/system/develop/headers/os/media", System, false);
- AddPath("/boot/system/develop/headers/os/midi", System, false);
- AddPath("/boot/system/develop/headers/os/midi2", System, false);
- AddPath("/boot/system/develop/headers/os/net", System, false);
- AddPath("/boot/system/develop/headers/os/opengl", System, false);
- AddPath("/boot/system/develop/headers/os/storage", System, false);
- AddPath("/boot/system/develop/headers/os/support", System, false);
- AddPath("/boot/system/develop/headers/os/translation", System, false);
- AddPath("/boot/system/develop/headers/os/add-ons/graphics", System, false);
- AddPath("/boot/system/develop/headers/os/add-ons/input_server", System, false);
- AddPath("/boot/system/develop/headers/os/add-ons/mail_daemon", System, false);
- AddPath("/boot/system/develop/headers/os/add-ons/registrar", System, false);
- AddPath("/boot/system/develop/headers/os/add-ons/screen_saver", System, false);
- AddPath("/boot/system/develop/headers/os/add-ons/tracker", System, false);
- AddPath("/boot/system/develop/headers/os/be_apps/Deskbar", System, false);
- AddPath("/boot/system/develop/headers/os/be_apps/NetPositive", System, false);
- AddPath("/boot/system/develop/headers/os/be_apps/Tracker", System, false);
- AddPath("/boot/system/develop/headers/3rdparty", System, false);
- AddPath("/boot/system/develop/headers/bsd", System, false);
- AddPath("/boot/system/develop/headers/glibc", System, false);
- AddPath("/boot/system/develop/headers/posix", System, false);
- AddPath("/boot/system/develop/headers", System, false);
- break;
- case llvm::Triple::RTEMS:
- break;
case llvm::Triple::Win32:
switch (triple.getEnvironment()) {
default: llvm_unreachable("Include management is handled in the driver.");
@@ -336,59 +255,18 @@ void InitHeaderSearch::AddDefaultCIncludePaths(const llvm::Triple &triple,
break;
}
- switch (os) {
- case llvm::Triple::CloudABI:
- case llvm::Triple::RTEMS:
- case llvm::Triple::NaCl:
- case llvm::Triple::ELFIAMCU:
- case llvm::Triple::Fuchsia:
- break;
- case llvm::Triple::PS4: {
- // <isysroot> gets prepended later in AddPath().
- std::string BaseSDKPath = "";
- if (!HasSysroot) {
- const char *envValue = getenv("SCE_ORBIS_SDK_DIR");
- if (envValue)
- BaseSDKPath = envValue;
- else {
- // HSOpts.ResourceDir variable contains the location of Clang's
- // resource files.
- // Assuming that Clang is configured for PS4 without
- // --with-clang-resource-dir option, the location of Clang's resource
- // files is <SDK_DIR>/host_tools/lib/clang
- SmallString<128> P = StringRef(HSOpts.ResourceDir);
- llvm::sys::path::append(P, "../../..");
- BaseSDKPath = std::string(P.str());
- }
- }
- AddPath(BaseSDKPath + "/target/include", System, false);
- if (triple.isPS4CPU())
- AddPath(BaseSDKPath + "/target/include_common", System, false);
- LLVM_FALLTHROUGH;
- }
- default:
- AddPath("/usr/include", ExternCSystem, false);
- break;
- }
+ AddPath("/usr/include", ExternCSystem, false);
}
void InitHeaderSearch::AddDefaultCPlusPlusIncludePaths(
const LangOptions &LangOpts, const llvm::Triple &triple,
const HeaderSearchOptions &HSOpts) {
- llvm::Triple::OSType os = triple.getOS();
- // FIXME: temporary hack: hard-coded paths.
-
- if (triple.isOSDarwin()) {
+ if (!ShouldAddDefaultIncludePaths(triple))
llvm_unreachable("Include management is handled in the driver.");
- }
+ // FIXME: temporary hack: hard-coded paths.
+ llvm::Triple::OSType os = triple.getOS();
switch (os) {
- case llvm::Triple::Linux:
- case llvm::Triple::Hurd:
- case llvm::Triple::Solaris:
- case llvm::Triple::AIX:
- llvm_unreachable("Include management is handled in the driver.");
- break;
case llvm::Triple::Win32:
switch (triple.getEnvironment()) {
default: llvm_unreachable("Include management is handled in the driver.");
@@ -402,56 +280,75 @@ void InitHeaderSearch::AddDefaultCPlusPlusIncludePaths(
break;
}
break;
- case llvm::Triple::DragonFly:
- AddPath("/usr/include/c++/5.0", CXXSystem, false);
- break;
- case llvm::Triple::Minix:
- AddGnuCPlusPlusIncludePaths("/usr/gnu/include/c++/4.4.3",
- "", "", "", triple);
- break;
default:
break;
}
}
-void InitHeaderSearch::AddDefaultIncludePaths(const LangOptions &Lang,
- const llvm::Triple &triple,
- const HeaderSearchOptions &HSOpts) {
- // NB: This code path is going away. All of the logic is moving into the
- // driver which has the information necessary to do target-specific
- // selections of default include paths. Each target which moves there will be
- // exempted from this logic here until we can delete the entire pile of code.
+bool InitHeaderSearch::ShouldAddDefaultIncludePaths(
+ const llvm::Triple &triple) {
switch (triple.getOS()) {
- default:
- break; // Everything else continues to use this routine's logic.
-
+ case llvm::Triple::AIX:
+ case llvm::Triple::DragonFly:
+ case llvm::Triple::ELFIAMCU:
case llvm::Triple::Emscripten:
- case llvm::Triple::Linux:
+ case llvm::Triple::FreeBSD:
+ case llvm::Triple::Fuchsia:
+ case llvm::Triple::Haiku:
case llvm::Triple::Hurd:
+ case llvm::Triple::Linux:
+ case llvm::Triple::LiteOS:
+ case llvm::Triple::NaCl:
+ case llvm::Triple::NetBSD:
case llvm::Triple::OpenBSD:
+ case llvm::Triple::PS4:
+ case llvm::Triple::PS5:
+ case llvm::Triple::RTEMS:
case llvm::Triple::Solaris:
case llvm::Triple::WASI:
- case llvm::Triple::AIX:
- return;
+ case llvm::Triple::ZOS:
+ return false;
case llvm::Triple::Win32:
if (triple.getEnvironment() != llvm::Triple::Cygnus ||
triple.isOSBinFormatMachO())
- return;
+ return false;
break;
case llvm::Triple::UnknownOS:
if (triple.isWasm())
- return;
+ return false;
+ break;
+
+ default:
break;
}
- // All header search logic is handled in the Driver for Darwin.
+ return true; // Everything else uses AddDefaultIncludePaths().
+}
+
+void InitHeaderSearch::AddDefaultIncludePaths(
+ const LangOptions &Lang, const llvm::Triple &triple,
+ const HeaderSearchOptions &HSOpts) {
+ // NB: This code path is going away. All of the logic is moving into the
+ // driver which has the information necessary to do target-specific
+ // selections of default include paths. Each target which moves there will be
+ // exempted from this logic in ShouldAddDefaultIncludePaths() until we can
+ // delete the entire pile of code.
+ if (!ShouldAddDefaultIncludePaths(triple))
+ return;
+
+ // NOTE: some additional header search logic is handled in the driver for
+ // Darwin.
if (triple.isOSDarwin()) {
if (HSOpts.UseStandardSystemIncludes) {
// Add the default framework include paths on Darwin.
- AddPath("/System/Library/Frameworks", System, true);
- AddPath("/Library/Frameworks", System, true);
+ if (triple.isDriverKit()) {
+ AddPath("/System/DriverKit/System/Library/Frameworks", System, true);
+ } else {
+ AddPath("/System/Library/Frameworks", System, true);
+ AddPath("/Library/Frameworks", System, true);
+ }
}
return;
}
@@ -468,10 +365,10 @@ void InitHeaderSearch::AddDefaultIncludePaths(const LangOptions &Lang,
AddDefaultCIncludePaths(triple, HSOpts);
}
-/// RemoveDuplicates - If there are duplicate directory entries in the specified
-/// search list, remove the later (dead) ones. Returns the number of non-system
-/// headers removed, which is used to update NumAngled.
-static unsigned RemoveDuplicates(std::vector<DirectoryLookup> &SearchList,
+/// If there are duplicate directory entries in the specified search list,
+/// remove the later (dead) ones. Returns the number of non-system headers
+/// removed, which is used to update NumAngled.
+static unsigned RemoveDuplicates(std::vector<DirectoryLookupInfo> &SearchList,
unsigned First, bool Verbose) {
llvm::SmallPtrSet<const DirectoryEntry *, 8> SeenDirs;
llvm::SmallPtrSet<const DirectoryEntry *, 8> SeenFrameworkDirs;
@@ -480,7 +377,7 @@ static unsigned RemoveDuplicates(std::vector<DirectoryLookup> &SearchList,
for (unsigned i = First; i != SearchList.size(); ++i) {
unsigned DirToRemove = i;
- const DirectoryLookup &CurEntry = SearchList[i];
+ const DirectoryLookup &CurEntry = SearchList[i].Lookup;
if (CurEntry.isNormalDir()) {
// If this isn't the first time we've seen this dir, remove it.
@@ -510,7 +407,7 @@ static unsigned RemoveDuplicates(std::vector<DirectoryLookup> &SearchList,
for (FirstDir = First;; ++FirstDir) {
assert(FirstDir != i && "Didn't find dupe?");
- const DirectoryLookup &SearchEntry = SearchList[FirstDir];
+ const DirectoryLookup &SearchEntry = SearchList[FirstDir].Lookup;
// If these are different lookup types, then they can't be the dupe.
if (SearchEntry.getLookupType() != CurEntry.getLookupType())
@@ -532,7 +429,7 @@ static unsigned RemoveDuplicates(std::vector<DirectoryLookup> &SearchList,
// If the first dir in the search path is a non-system dir, zap it
// instead of the system one.
- if (SearchList[FirstDir].getDirCharacteristic() == SrcMgr::C_User)
+ if (SearchList[FirstDir].Lookup.getDirCharacteristic() == SrcMgr::C_User)
DirToRemove = FirstDir;
}
@@ -554,16 +451,37 @@ static unsigned RemoveDuplicates(std::vector<DirectoryLookup> &SearchList,
return NonSystemRemoved;
}
+/// Extract DirectoryLookups from DirectoryLookupInfos.
+static std::vector<DirectoryLookup>
+extractLookups(const std::vector<DirectoryLookupInfo> &Infos) {
+ std::vector<DirectoryLookup> Lookups;
+ Lookups.reserve(Infos.size());
+ llvm::transform(Infos, std::back_inserter(Lookups),
+ [](const DirectoryLookupInfo &Info) { return Info.Lookup; });
+ return Lookups;
+}
+
+/// Collect the mapping between indices of DirectoryLookups and UserEntries.
+static llvm::DenseMap<unsigned, unsigned>
+mapToUserEntries(const std::vector<DirectoryLookupInfo> &Infos) {
+ llvm::DenseMap<unsigned, unsigned> LookupsToUserEntries;
+ for (unsigned I = 0, E = Infos.size(); I < E; ++I) {
+ // Check whether this DirectoryLookup maps to a HeaderSearch::UserEntry.
+ if (Infos[I].UserEntryIdx)
+ LookupsToUserEntries.insert({I, *Infos[I].UserEntryIdx});
+ }
+ return LookupsToUserEntries;
+}
void InitHeaderSearch::Realize(const LangOptions &Lang) {
// Concatenate ANGLE+SYSTEM+AFTER chains together into SearchList.
- std::vector<DirectoryLookup> SearchList;
+ std::vector<DirectoryLookupInfo> SearchList;
SearchList.reserve(IncludePath.size());
// Quoted arguments go first.
for (auto &Include : IncludePath)
if (Include.Group == Quoted)
- SearchList.push_back(Include.Lookup);
+ SearchList.push_back(Include);
// Deduplicate and remember index.
RemoveDuplicates(SearchList, 0, Verbose);
@@ -571,7 +489,7 @@ void InitHeaderSearch::Realize(const LangOptions &Lang) {
for (auto &Include : IncludePath)
if (Include.Group == Angled || Include.Group == IndexHeaderMap)
- SearchList.push_back(Include.Lookup);
+ SearchList.push_back(Include);
RemoveDuplicates(SearchList, NumQuoted, Verbose);
unsigned NumAngled = SearchList.size();
@@ -583,11 +501,11 @@ void InitHeaderSearch::Realize(const LangOptions &Lang) {
Include.Group == CXXSystem) ||
(Lang.ObjC && !Lang.CPlusPlus && Include.Group == ObjCSystem) ||
(Lang.ObjC && Lang.CPlusPlus && Include.Group == ObjCXXSystem))
- SearchList.push_back(Include.Lookup);
+ SearchList.push_back(Include);
for (auto &Include : IncludePath)
if (Include.Group == After)
- SearchList.push_back(Include.Lookup);
+ SearchList.push_back(Include);
// Remove duplicates across both the Angled and System directories. GCC does
// this and failing to remove duplicates across these two groups breaks
@@ -595,8 +513,8 @@ void InitHeaderSearch::Realize(const LangOptions &Lang) {
unsigned NonSystemRemoved = RemoveDuplicates(SearchList, NumQuoted, Verbose);
NumAngled -= NonSystemRemoved;
- bool DontSearchCurDir = false; // TODO: set to true if -I- is set?
- Headers.SetSearchPaths(SearchList, NumQuoted, NumAngled, DontSearchCurDir);
+ Headers.SetSearchPaths(extractLookups(SearchList), NumQuoted, NumAngled,
+ mapToUserEntries(SearchList));
Headers.SetSystemHeaderPrefixes(SystemHeaderPrefixes);
@@ -606,14 +524,14 @@ void InitHeaderSearch::Realize(const LangOptions &Lang) {
for (unsigned i = 0, e = SearchList.size(); i != e; ++i) {
if (i == NumQuoted)
llvm::errs() << "#include <...> search starts here:\n";
- StringRef Name = SearchList[i].getName();
+ StringRef Name = SearchList[i].Lookup.getName();
const char *Suffix;
- if (SearchList[i].isNormalDir())
+ if (SearchList[i].Lookup.isNormalDir())
Suffix = "";
- else if (SearchList[i].isFramework())
+ else if (SearchList[i].Lookup.isFramework())
Suffix = " (framework directory)";
else {
- assert(SearchList[i].isHeaderMap() && "Unknown DirectoryLookup");
+ assert(SearchList[i].Lookup.isHeaderMap() && "Unknown DirectoryLookup");
Suffix = " (headermap)";
}
llvm::errs() << " " << Name << Suffix << "\n";
@@ -632,9 +550,9 @@ void clang::ApplyHeaderSearchOptions(HeaderSearch &HS,
for (unsigned i = 0, e = HSOpts.UserEntries.size(); i != e; ++i) {
const HeaderSearchOptions::Entry &E = HSOpts.UserEntries[i];
if (E.IgnoreSysRoot) {
- Init.AddUnmappedPath(E.Path, E.Group, E.IsFramework);
+ Init.AddUnmappedPath(E.Path, E.Group, E.IsFramework, i);
} else {
- Init.AddPath(E.Path, E.Group, E.IsFramework);
+ Init.AddPath(E.Path, E.Group, E.IsFramework, i);
}
}
@@ -648,7 +566,7 @@ void clang::ApplyHeaderSearchOptions(HeaderSearch &HS,
// Set up the builtin include directory in the module map.
SmallString<128> P = StringRef(HSOpts.ResourceDir);
llvm::sys::path::append(P, "include");
- if (auto Dir = HS.getFileMgr().getDirectory(P))
+ if (auto Dir = HS.getFileMgr().getOptionalDirectoryRef(P))
HS.getModuleMap().setBuiltinIncludeDir(*Dir);
}
diff --git a/contrib/llvm-project/clang/lib/Lex/Lexer.cpp b/contrib/llvm-project/clang/lib/Lex/Lexer.cpp
index 64944492eb99..50b56265f6e1 100644
--- a/contrib/llvm-project/clang/lib/Lex/Lexer.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/Lexer.cpp
@@ -26,8 +26,6 @@
#include "clang/Lex/Preprocessor.h"
#include "clang/Lex/PreprocessorOptions.h"
#include "clang/Lex/Token.h"
-#include "llvm/ADT/None.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
@@ -37,16 +35,22 @@
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/MemoryBufferRef.h"
#include "llvm/Support/NativeFormatting.h"
+#include "llvm/Support/Unicode.h"
#include "llvm/Support/UnicodeCharRanges.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <cstring>
+#include <optional>
#include <string>
#include <tuple>
#include <utility>
+#ifdef __SSE4_2__
+#include <nmmintrin.h>
+#endif
+
using namespace clang;
//===----------------------------------------------------------------------===//
@@ -57,7 +61,7 @@ using namespace clang;
bool Token::isObjCAtKeyword(tok::ObjCKeywordKind objcKey) const {
if (isAnnotation())
return false;
- if (IdentifierInfo *II = getIdentifierInfo())
+ if (const IdentifierInfo *II = getIdentifierInfo())
return II->getObjCKeywordID() == objcKey;
return false;
}
@@ -66,7 +70,7 @@ bool Token::isObjCAtKeyword(tok::ObjCKeywordKind objcKey) const {
tok::ObjCKeywordKind Token::getObjCKeywordID() const {
if (isAnnotation())
return tok::objc_not_keyword;
- IdentifierInfo *specId = getIdentifierInfo();
+ const IdentifierInfo *specId = getIdentifierInfo();
return specId ? specId->getObjCKeywordID() : tok::objc_not_keyword;
}
@@ -133,10 +137,11 @@ void Lexer::InitLexer(const char *BufStart, const char *BufPtr,
/// assumes that the associated file buffer and Preprocessor objects will
/// outlive it, so it doesn't take ownership of either of them.
Lexer::Lexer(FileID FID, const llvm::MemoryBufferRef &InputFile,
- Preprocessor &PP)
+ Preprocessor &PP, bool IsFirstIncludeOfFile)
: PreprocessorLexer(&PP, FID),
FileLoc(PP.getSourceManager().getLocForStartOfFile(FID)),
- LangOpts(PP.getLangOpts()) {
+ LangOpts(PP.getLangOpts()), LineComment(LangOpts.LineComment),
+ IsFirstTimeLexingFile(IsFirstIncludeOfFile) {
InitLexer(InputFile.getBufferStart(), InputFile.getBufferStart(),
InputFile.getBufferEnd());
@@ -147,8 +152,10 @@ Lexer::Lexer(FileID FID, const llvm::MemoryBufferRef &InputFile,
/// suitable for calls to 'LexFromRawLexer'. This lexer assumes that the text
/// range will outlive it, so it doesn't take ownership of it.
Lexer::Lexer(SourceLocation fileloc, const LangOptions &langOpts,
- const char *BufStart, const char *BufPtr, const char *BufEnd)
- : FileLoc(fileloc), LangOpts(langOpts) {
+ const char *BufStart, const char *BufPtr, const char *BufEnd,
+ bool IsFirstIncludeOfFile)
+ : FileLoc(fileloc), LangOpts(langOpts), LineComment(LangOpts.LineComment),
+ IsFirstTimeLexingFile(IsFirstIncludeOfFile) {
InitLexer(BufStart, BufPtr, BufEnd);
// We *are* in raw mode.
@@ -159,9 +166,11 @@ Lexer::Lexer(SourceLocation fileloc, const LangOptions &langOpts,
/// suitable for calls to 'LexFromRawLexer'. This lexer assumes that the text
/// range will outlive it, so it doesn't take ownership of it.
Lexer::Lexer(FileID FID, const llvm::MemoryBufferRef &FromFile,
- const SourceManager &SM, const LangOptions &langOpts)
+ const SourceManager &SM, const LangOptions &langOpts,
+ bool IsFirstIncludeOfFile)
: Lexer(SM.getLocForStartOfFile(FID), langOpts, FromFile.getBufferStart(),
- FromFile.getBufferStart(), FromFile.getBufferEnd()) {}
+ FromFile.getBufferStart(), FromFile.getBufferEnd(),
+ IsFirstIncludeOfFile) {}
void Lexer::resetExtendedTokenMode() {
assert(PP && "Cannot reset token mode without a preprocessor");
@@ -221,13 +230,11 @@ Lexer *Lexer::Create_PragmaLexer(SourceLocation SpellingLoc,
return L;
}
-bool Lexer::skipOver(unsigned NumBytes) {
- IsAtPhysicalStartOfLine = true;
- IsAtStartOfLine = true;
- if ((BufferPtr + NumBytes) > BufferEnd)
- return true;
- BufferPtr += NumBytes;
- return false;
+void Lexer::seek(unsigned Offset, bool IsAtStartOfLine) {
+ this->IsAtPhysicalStartOfLine = IsAtStartOfLine;
+ this->IsAtStartOfLine = IsAtStartOfLine;
+ assert((BufferStart + Offset) <= BufferEnd);
+ BufferPtr = BufferStart + Offset;
}
template <typename T> static void StringifyImpl(T &Str, char Quote) {
@@ -280,9 +287,9 @@ static size_t getSpellingSlow(const Token &Tok, const char *BufPtr,
if (tok::isStringLiteral(Tok.getKind())) {
// Munch the encoding-prefix and opening double-quote.
while (BufPtr < BufEnd) {
- unsigned Size;
- Spelling[Length++] = Lexer::getCharAndSizeNoWarn(BufPtr, Size, LangOpts);
- BufPtr += Size;
+ auto CharAndSize = Lexer::getCharAndSizeNoWarn(BufPtr, LangOpts);
+ Spelling[Length++] = CharAndSize.Char;
+ BufPtr += CharAndSize.Size;
if (Spelling[Length - 1] == '"')
break;
@@ -309,9 +316,9 @@ static size_t getSpellingSlow(const Token &Tok, const char *BufPtr,
}
while (BufPtr < BufEnd) {
- unsigned Size;
- Spelling[Length++] = Lexer::getCharAndSizeNoWarn(BufPtr, Size, LangOpts);
- BufPtr += Size;
+ auto CharAndSize = Lexer::getCharAndSizeNoWarn(BufPtr, LangOpts);
+ Spelling[Length++] = CharAndSize.Char;
+ BufPtr += CharAndSize.Size;
}
assert(Length < Tok.getLength() &&
@@ -702,6 +709,22 @@ PreambleBounds Lexer::ComputePreamble(StringRef Buffer,
// directive or it was one that can't occur in the preamble at this
// point. Roll back the current token to the location of the '#'.
TheTok = HashTok;
+ } else if (TheTok.isAtStartOfLine() &&
+ TheTok.getKind() == tok::raw_identifier &&
+ TheTok.getRawIdentifier() == "module" &&
+ LangOpts.CPlusPlusModules) {
+ // The initial global module fragment introducer "module;" is part of
+ // the preamble, which runs up to the module declaration "module foo;".
+ Token ModuleTok = TheTok;
+ do {
+ TheLexer.LexFromRawLexer(TheTok);
+ } while (TheTok.getKind() == tok::comment);
+ if (TheTok.getKind() != tok::semi) {
+ // Not global module fragment, roll back.
+ TheTok = ModuleTok;
+ break;
+ }
+ continue;
}
// We hit a token that we don't recognize as being in the
@@ -749,10 +772,9 @@ unsigned Lexer::getTokenPrefixLength(SourceLocation TokStart, unsigned CharNo,
// If we have a character that may be a trigraph or escaped newline, use a
// lexer to parse it correctly.
for (; CharNo; --CharNo) {
- unsigned Size;
- Lexer::getCharAndSizeNoWarn(TokPtr, Size, LangOpts);
- TokPtr += Size;
- PhysOffset += Size;
+ auto CharAndSize = Lexer::getCharAndSizeNoWarn(TokPtr, LangOpts);
+ TokPtr += CharAndSize.Size;
+ PhysOffset += CharAndSize.Size;
}
// Final detail: if we end up on an escaped newline, we want to return the
@@ -1044,9 +1066,11 @@ StringRef Lexer::getImmediateMacroNameForDiagnostics(
while (SM.isMacroArgExpansion(Loc))
Loc = SM.getImmediateExpansionRange(Loc).getBegin();
- // If the macro's spelling has no FileID, then it's actually a token paste
- // or stringization (or similar) and not a macro at all.
- if (!SM.getFileEntryForID(SM.getFileID(SM.getSpellingLoc(Loc))))
+ // If the macro's spelling isn't FileID or from scratch space, then it's
+ // actually a token paste or stringization (or similar) and not a macro at
+ // all.
+ SourceLocation SpellLoc = SM.getSpellingLoc(Loc);
+ if (!SpellLoc.isFileID() || SM.isWrittenInScratchSpace(SpellLoc))
return {};
// Find the spelling location of the start of the non-argument expansion
@@ -1062,8 +1086,8 @@ StringRef Lexer::getImmediateMacroNameForDiagnostics(
return ExpansionBuffer.substr(ExpansionInfo.second, MacroTokenLength);
}
-bool Lexer::isIdentifierBodyChar(char c, const LangOptions &LangOpts) {
- return isIdentifierBody(c, LangOpts.DollarIdents);
+bool Lexer::isAsciiIdentifierContinueChar(char c, const LangOptions &LangOpts) {
+ return isAsciiIdentifierContinue(c, LangOpts.DollarIdents);
}
bool Lexer::isNewLineEscaped(const char *BufferStart, const char *Str) {
@@ -1189,17 +1213,18 @@ static char GetTrigraphCharForLetter(char Letter) {
/// prefixed with ??, emit a trigraph warning. If trigraphs are enabled,
/// return the result character. Finally, emit a warning about trigraph use
/// whether trigraphs are enabled or not.
-static char DecodeTrigraphChar(const char *CP, Lexer *L) {
+static char DecodeTrigraphChar(const char *CP, Lexer *L, bool Trigraphs) {
char Res = GetTrigraphCharForLetter(*CP);
- if (!Res || !L) return Res;
+ if (!Res)
+ return Res;
- if (!L->getLangOpts().Trigraphs) {
- if (!L->isLexingRawMode())
+ if (!Trigraphs) {
+ if (L && !L->isLexingRawMode())
L->Diag(CP-2, diag::trigraph_ignored);
return 0;
}
- if (!L->isLexingRawMode())
+ if (L && !L->isLexingRawMode())
L->Diag(CP-2, diag::trigraph_converted) << StringRef(&Res, 1);
return Res;
}
@@ -1252,12 +1277,12 @@ const char *Lexer::SkipEscapedNewLines(const char *P) {
}
}
-Optional<Token> Lexer::findNextToken(SourceLocation Loc,
- const SourceManager &SM,
- const LangOptions &LangOpts) {
+std::optional<Token> Lexer::findNextToken(SourceLocation Loc,
+ const SourceManager &SM,
+ const LangOptions &LangOpts) {
if (Loc.isMacroID()) {
if (!Lexer::isAtEndOfMacroExpansion(Loc, SM, LangOpts, &Loc))
- return None;
+ return std::nullopt;
}
Loc = Lexer::getLocForEndOfToken(Loc, 0, SM, LangOpts);
@@ -1268,7 +1293,7 @@ Optional<Token> Lexer::findNextToken(SourceLocation Loc,
bool InvalidTemp = false;
StringRef File = SM.getBufferData(LocInfo.first, &InvalidTemp);
if (InvalidTemp)
- return None;
+ return std::nullopt;
const char *TokenBegin = File.data() + LocInfo.second;
@@ -1288,7 +1313,7 @@ Optional<Token> Lexer::findNextToken(SourceLocation Loc,
SourceLocation Lexer::findLocationAfterToken(
SourceLocation Loc, tok::TokenKind TKind, const SourceManager &SM,
const LangOptions &LangOpts, bool SkipTrailingWhitespaceAndNewLine) {
- Optional<Token> Tok = findNextToken(Loc, SM, LangOpts);
+ std::optional<Token> Tok = findNextToken(Loc, SM, LangOpts);
if (!Tok || Tok->isNot(TKind))
return {};
SourceLocation TokenLoc = Tok->getLocation();
@@ -1331,15 +1356,16 @@ SourceLocation Lexer::findLocationAfterToken(
///
/// NOTE: When this method is updated, getCharAndSizeSlowNoWarn (below) should
/// be updated to match.
-char Lexer::getCharAndSizeSlow(const char *Ptr, unsigned &Size,
- Token *Tok) {
+Lexer::SizedChar Lexer::getCharAndSizeSlow(const char *Ptr, Token *Tok) {
+ unsigned Size = 0;
// If we have a slash, look for an escaped newline.
if (Ptr[0] == '\\') {
++Size;
++Ptr;
Slash:
// Common case, backslash-char where the char is not whitespace.
- if (!isWhitespace(Ptr[0])) return '\\';
+ if (!isWhitespace(Ptr[0]))
+ return {'\\', Size};
// See if we have optional whitespace characters between the slash and
// newline.
@@ -1356,31 +1382,33 @@ Slash:
Ptr += EscapedNewLineSize;
// Use slow version to accumulate a correct size field.
- return getCharAndSizeSlow(Ptr, Size, Tok);
+ auto CharAndSize = getCharAndSizeSlow(Ptr, Tok);
+ CharAndSize.Size += Size;
+ return CharAndSize;
}
// Otherwise, this is not an escaped newline, just return the slash.
- return '\\';
+ return {'\\', Size};
}
// If this is a trigraph, process it.
if (Ptr[0] == '?' && Ptr[1] == '?') {
// If this is actually a legal trigraph (not something like "??x"), emit
// a trigraph warning. If so, and if trigraphs are enabled, return it.
- if (char C = DecodeTrigraphChar(Ptr+2, Tok ? this : nullptr)) {
+ if (char C = DecodeTrigraphChar(Ptr + 2, Tok ? this : nullptr,
+ LangOpts.Trigraphs)) {
// Remember that this token needs to be cleaned.
if (Tok) Tok->setFlag(Token::NeedsCleaning);
Ptr += 3;
Size += 3;
if (C == '\\') goto Slash;
- return C;
+ return {C, Size};
}
}
// If this is neither, return a single character.
- ++Size;
- return *Ptr;
+ return {*Ptr, Size + 1u};
}
/// getCharAndSizeSlowNoWarn - Handle the slow/uncommon case of the
@@ -1389,15 +1417,18 @@ Slash:
///
/// NOTE: When this method is updated, getCharAndSizeSlow (above) should
/// be updated to match.
-char Lexer::getCharAndSizeSlowNoWarn(const char *Ptr, unsigned &Size,
- const LangOptions &LangOpts) {
+Lexer::SizedChar Lexer::getCharAndSizeSlowNoWarn(const char *Ptr,
+ const LangOptions &LangOpts) {
+
+ unsigned Size = 0;
// If we have a slash, look for an escaped newline.
if (Ptr[0] == '\\') {
++Size;
++Ptr;
Slash:
// Common case, backslash-char where the char is not whitespace.
- if (!isWhitespace(Ptr[0])) return '\\';
+ if (!isWhitespace(Ptr[0]))
+ return {'\\', Size};
// See if we have optional whitespace characters followed by a newline.
if (unsigned EscapedNewLineSize = getEscapedNewLineSize(Ptr)) {
@@ -1406,11 +1437,13 @@ Slash:
Ptr += EscapedNewLineSize;
// Use slow version to accumulate a correct size field.
- return getCharAndSizeSlowNoWarn(Ptr, Size, LangOpts);
+ auto CharAndSize = getCharAndSizeSlowNoWarn(Ptr, LangOpts);
+ CharAndSize.Size += Size;
+ return CharAndSize;
}
// Otherwise, this is not an escaped newline, just return the slash.
- return '\\';
+ return {'\\', Size};
}
// If this is a trigraph, process it.
@@ -1421,13 +1454,12 @@ Slash:
Ptr += 3;
Size += 3;
if (C == '\\') goto Slash;
- return C;
+ return {C, Size};
}
}
// If this is neither, return a single character.
- ++Size;
- return *Ptr;
+ return {*Ptr, Size + 1u};
}
//===----------------------------------------------------------------------===//
@@ -1446,19 +1478,60 @@ void Lexer::SetByteOffset(unsigned Offset, bool StartOfLine) {
IsAtPhysicalStartOfLine = StartOfLine;
}
-static bool isAllowedIDChar(uint32_t C, const LangOptions &LangOpts) {
+static bool isUnicodeWhitespace(uint32_t Codepoint) {
+ static const llvm::sys::UnicodeCharSet UnicodeWhitespaceChars(
+ UnicodeWhitespaceCharRanges);
+ return UnicodeWhitespaceChars.contains(Codepoint);
+}
+
+static llvm::SmallString<5> codepointAsHexString(uint32_t C) {
+ llvm::SmallString<5> CharBuf;
+ llvm::raw_svector_ostream CharOS(CharBuf);
+ llvm::write_hex(CharOS, C, llvm::HexPrintStyle::Upper, 4);
+ return CharBuf;
+}
+
+// To mitigate https://github.com/llvm/llvm-project/issues/54732,
+// we allow "Mathematical Notation Characters" in identifiers.
+// This is a proposed profile that extends the XID_Start/XID_continue
+// with mathematical symbols, superscipts and subscripts digits
+// found in some production software.
+// https://www.unicode.org/L2/L2022/22230-math-profile.pdf
+static bool isMathematicalExtensionID(uint32_t C, const LangOptions &LangOpts,
+ bool IsStart, bool &IsExtension) {
+ static const llvm::sys::UnicodeCharSet MathStartChars(
+ MathematicalNotationProfileIDStartRanges);
+ static const llvm::sys::UnicodeCharSet MathContinueChars(
+ MathematicalNotationProfileIDContinueRanges);
+ if (MathStartChars.contains(C) ||
+ (!IsStart && MathContinueChars.contains(C))) {
+ IsExtension = true;
+ return true;
+ }
+ return false;
+}
+
+static bool isAllowedIDChar(uint32_t C, const LangOptions &LangOpts,
+ bool &IsExtension) {
if (LangOpts.AsmPreprocessor) {
return false;
} else if (LangOpts.DollarIdents && '$' == C) {
return true;
- } else if (LangOpts.CPlusPlus11 || LangOpts.C11) {
+ } else if (LangOpts.CPlusPlus || LangOpts.C23) {
+ // A non-leading codepoint must have the XID_Continue property.
+ // XIDContinueRanges doesn't contains characters also in XIDStartRanges,
+ // so we need to check both tables.
+ // '_' doesn't have the XID_Continue property but is allowed in C and C++.
+ static const llvm::sys::UnicodeCharSet XIDStartChars(XIDStartRanges);
+ static const llvm::sys::UnicodeCharSet XIDContinueChars(XIDContinueRanges);
+ if (C == '_' || XIDStartChars.contains(C) || XIDContinueChars.contains(C))
+ return true;
+ return isMathematicalExtensionID(C, LangOpts, /*IsStart=*/false,
+ IsExtension);
+ } else if (LangOpts.C11) {
static const llvm::sys::UnicodeCharSet C11AllowedIDChars(
C11AllowedIDCharRanges);
return C11AllowedIDChars.contains(C);
- } else if (LangOpts.CPlusPlus) {
- static const llvm::sys::UnicodeCharSet CXX03AllowedIDChars(
- CXX03AllowedIDCharRanges);
- return CXX03AllowedIDChars.contains(C);
} else {
static const llvm::sys::UnicodeCharSet C99AllowedIDChars(
C99AllowedIDCharRanges);
@@ -1466,21 +1539,46 @@ static bool isAllowedIDChar(uint32_t C, const LangOptions &LangOpts) {
}
}
-static bool isAllowedInitiallyIDChar(uint32_t C, const LangOptions &LangOpts) {
- assert(isAllowedIDChar(C, LangOpts));
+static bool isAllowedInitiallyIDChar(uint32_t C, const LangOptions &LangOpts,
+ bool &IsExtension) {
+ assert(C > 0x7F && "isAllowedInitiallyIDChar called with an ASCII codepoint");
+ IsExtension = false;
if (LangOpts.AsmPreprocessor) {
return false;
- } else if (LangOpts.CPlusPlus11 || LangOpts.C11) {
+ }
+ if (LangOpts.CPlusPlus || LangOpts.C23) {
+ static const llvm::sys::UnicodeCharSet XIDStartChars(XIDStartRanges);
+ if (XIDStartChars.contains(C))
+ return true;
+ return isMathematicalExtensionID(C, LangOpts, /*IsStart=*/true,
+ IsExtension);
+ }
+ if (!isAllowedIDChar(C, LangOpts, IsExtension))
+ return false;
+ if (LangOpts.C11) {
static const llvm::sys::UnicodeCharSet C11DisallowedInitialIDChars(
C11DisallowedInitialIDCharRanges);
return !C11DisallowedInitialIDChars.contains(C);
- } else if (LangOpts.CPlusPlus) {
- return true;
- } else {
- static const llvm::sys::UnicodeCharSet C99DisallowedInitialIDChars(
- C99DisallowedInitialIDCharRanges);
- return !C99DisallowedInitialIDChars.contains(C);
}
+ static const llvm::sys::UnicodeCharSet C99DisallowedInitialIDChars(
+ C99DisallowedInitialIDCharRanges);
+ return !C99DisallowedInitialIDChars.contains(C);
+}
+
+static void diagnoseExtensionInIdentifier(DiagnosticsEngine &Diags, uint32_t C,
+ CharSourceRange Range) {
+
+ static const llvm::sys::UnicodeCharSet MathStartChars(
+ MathematicalNotationProfileIDStartRanges);
+ static const llvm::sys::UnicodeCharSet MathContinueChars(
+ MathematicalNotationProfileIDContinueRanges);
+
+ (void)MathStartChars;
+ (void)MathContinueChars;
+ assert((MathStartChars.contains(C) || MathContinueChars.contains(C)) &&
+ "Unexpected mathematical notation codepoint");
+ Diags.Report(Range.getBegin(), diag::ext_mathematical_notation)
+ << codepointAsHexString(C) << Range;
}
static inline CharSourceRange makeCharRange(Lexer &L, const char *Begin,
@@ -1512,16 +1610,6 @@ static void maybeDiagnoseIDCharCompat(DiagnosticsEngine &Diags, uint32_t C,
<< CannotStartIdentifier;
}
}
-
- // Check C++98 compatibility.
- if (!Diags.isIgnored(diag::warn_cxx98_compat_unicode_id, Range.getBegin())) {
- static const llvm::sys::UnicodeCharSet CXX03AllowedIDChars(
- CXX03AllowedIDCharRanges);
- if (!CXX03AllowedIDChars.contains(C)) {
- Diags.Report(Range.getBegin(), diag::warn_cxx98_compat_unicode_id)
- << Range;
- }
- }
}
/// After encountering UTF-8 character C and interpreting it as an identifier
@@ -1592,33 +1680,74 @@ static void maybeDiagnoseUTF8Homoglyph(DiagnosticsEngine &Diags, uint32_t C,
std::lower_bound(std::begin(SortedHomoglyphs),
std::end(SortedHomoglyphs) - 1, HomoglyphPair{C, '\0'});
if (Homoglyph->Character == C) {
- llvm::SmallString<5> CharBuf;
- {
- llvm::raw_svector_ostream CharOS(CharBuf);
- llvm::write_hex(CharOS, C, llvm::HexPrintStyle::Upper, 4);
- }
if (Homoglyph->LooksLike) {
const char LooksLikeStr[] = {Homoglyph->LooksLike, 0};
Diags.Report(Range.getBegin(), diag::warn_utf8_symbol_homoglyph)
- << Range << CharBuf << LooksLikeStr;
+ << Range << codepointAsHexString(C) << LooksLikeStr;
} else {
Diags.Report(Range.getBegin(), diag::warn_utf8_symbol_zero_width)
- << Range << CharBuf;
+ << Range << codepointAsHexString(C);
}
}
}
+static void diagnoseInvalidUnicodeCodepointInIdentifier(
+ DiagnosticsEngine &Diags, const LangOptions &LangOpts, uint32_t CodePoint,
+ CharSourceRange Range, bool IsFirst) {
+ if (isASCII(CodePoint))
+ return;
+
+ bool IsExtension;
+ bool IsIDStart = isAllowedInitiallyIDChar(CodePoint, LangOpts, IsExtension);
+ bool IsIDContinue =
+ IsIDStart || isAllowedIDChar(CodePoint, LangOpts, IsExtension);
+
+ if ((IsFirst && IsIDStart) || (!IsFirst && IsIDContinue))
+ return;
+
+ bool InvalidOnlyAtStart = IsFirst && !IsIDStart && IsIDContinue;
+
+ if (!IsFirst || InvalidOnlyAtStart) {
+ Diags.Report(Range.getBegin(), diag::err_character_not_allowed_identifier)
+ << Range << codepointAsHexString(CodePoint) << int(InvalidOnlyAtStart)
+ << FixItHint::CreateRemoval(Range);
+ } else {
+ Diags.Report(Range.getBegin(), diag::err_character_not_allowed)
+ << Range << codepointAsHexString(CodePoint)
+ << FixItHint::CreateRemoval(Range);
+ }
+}
+
bool Lexer::tryConsumeIdentifierUCN(const char *&CurPtr, unsigned Size,
Token &Result) {
const char *UCNPtr = CurPtr + Size;
uint32_t CodePoint = tryReadUCN(UCNPtr, CurPtr, /*Token=*/nullptr);
- if (CodePoint == 0 || !isAllowedIDChar(CodePoint, LangOpts))
+ if (CodePoint == 0) {
return false;
+ }
+ bool IsExtension = false;
+ if (!isAllowedIDChar(CodePoint, LangOpts, IsExtension)) {
+ if (isASCII(CodePoint) || isUnicodeWhitespace(CodePoint))
+ return false;
+ if (!isLexingRawMode() && !ParsingPreprocessorDirective &&
+ !PP->isPreprocessedOutput())
+ diagnoseInvalidUnicodeCodepointInIdentifier(
+ PP->getDiagnostics(), LangOpts, CodePoint,
+ makeCharRange(*this, CurPtr, UCNPtr),
+ /*IsFirst=*/false);
+
+ // We got a unicode codepoint that is neither a space nor a
+ // a valid identifier part.
+ // Carry on as if the codepoint was valid for recovery purposes.
+ } else if (!isLexingRawMode()) {
+ if (IsExtension)
+ diagnoseExtensionInIdentifier(PP->getDiagnostics(), CodePoint,
+ makeCharRange(*this, CurPtr, UCNPtr));
- if (!isLexingRawMode())
maybeDiagnoseIDCharCompat(PP->getDiagnostics(), CodePoint,
makeCharRange(*this, CurPtr, UCNPtr),
/*IsFirst=*/false);
+ }
Result.setFlag(Token::HasUCN);
if ((UCNPtr - CurPtr == 6 && CurPtr[1] == 'u') ||
@@ -1630,137 +1759,224 @@ bool Lexer::tryConsumeIdentifierUCN(const char *&CurPtr, unsigned Size,
return true;
}
-bool Lexer::tryConsumeIdentifierUTF8Char(const char *&CurPtr) {
- const char *UnicodePtr = CurPtr;
+bool Lexer::tryConsumeIdentifierUTF8Char(const char *&CurPtr, Token &Result) {
llvm::UTF32 CodePoint;
- llvm::ConversionResult Result =
- llvm::convertUTF8Sequence((const llvm::UTF8 **)&UnicodePtr,
- (const llvm::UTF8 *)BufferEnd,
- &CodePoint,
- llvm::strictConversion);
- if (Result != llvm::conversionOK ||
- !isAllowedIDChar(static_cast<uint32_t>(CodePoint), LangOpts))
+
+ // If a UTF-8 codepoint appears immediately after an escaped new line,
+ // CurPtr may point to the splicing \ on the preceding line,
+ // so we need to skip it.
+ unsigned FirstCodeUnitSize;
+ getCharAndSize(CurPtr, FirstCodeUnitSize);
+ const char *CharStart = CurPtr + FirstCodeUnitSize - 1;
+ const char *UnicodePtr = CharStart;
+
+ llvm::ConversionResult ConvResult = llvm::convertUTF8Sequence(
+ (const llvm::UTF8 **)&UnicodePtr, (const llvm::UTF8 *)BufferEnd,
+ &CodePoint, llvm::strictConversion);
+ if (ConvResult != llvm::conversionOK)
return false;
- if (!isLexingRawMode()) {
+ bool IsExtension = false;
+ if (!isAllowedIDChar(static_cast<uint32_t>(CodePoint), LangOpts,
+ IsExtension)) {
+ if (isASCII(CodePoint) || isUnicodeWhitespace(CodePoint))
+ return false;
+
+ if (!isLexingRawMode() && !ParsingPreprocessorDirective &&
+ !PP->isPreprocessedOutput())
+ diagnoseInvalidUnicodeCodepointInIdentifier(
+ PP->getDiagnostics(), LangOpts, CodePoint,
+ makeCharRange(*this, CharStart, UnicodePtr), /*IsFirst=*/false);
+ // We got a unicode codepoint that is neither a space nor a
+ // a valid identifier part. Carry on as if the codepoint was
+ // valid for recovery purposes.
+ } else if (!isLexingRawMode()) {
+ if (IsExtension)
+ diagnoseExtensionInIdentifier(
+ PP->getDiagnostics(), CodePoint,
+ makeCharRange(*this, CharStart, UnicodePtr));
maybeDiagnoseIDCharCompat(PP->getDiagnostics(), CodePoint,
- makeCharRange(*this, CurPtr, UnicodePtr),
+ makeCharRange(*this, CharStart, UnicodePtr),
/*IsFirst=*/false);
maybeDiagnoseUTF8Homoglyph(PP->getDiagnostics(), CodePoint,
- makeCharRange(*this, CurPtr, UnicodePtr));
+ makeCharRange(*this, CharStart, UnicodePtr));
}
+ // Once we sucessfully parsed some UTF-8,
+ // calling ConsumeChar ensures the NeedsCleaning flag is set on the token
+ // being lexed, and that warnings about trailing spaces are emitted.
+ ConsumeChar(CurPtr, FirstCodeUnitSize, Result);
CurPtr = UnicodePtr;
return true;
}
-bool Lexer::LexIdentifier(Token &Result, const char *CurPtr) {
- // Match [_A-Za-z0-9]*, we have already matched [_A-Za-z$]
- unsigned Size;
- unsigned char C = *CurPtr++;
- while (isIdentifierBody(C))
- C = *CurPtr++;
+bool Lexer::LexUnicodeIdentifierStart(Token &Result, uint32_t C,
+ const char *CurPtr) {
+ bool IsExtension = false;
+ if (isAllowedInitiallyIDChar(C, LangOpts, IsExtension)) {
+ if (!isLexingRawMode() && !ParsingPreprocessorDirective &&
+ !PP->isPreprocessedOutput()) {
+ if (IsExtension)
+ diagnoseExtensionInIdentifier(PP->getDiagnostics(), C,
+ makeCharRange(*this, BufferPtr, CurPtr));
+ maybeDiagnoseIDCharCompat(PP->getDiagnostics(), C,
+ makeCharRange(*this, BufferPtr, CurPtr),
+ /*IsFirst=*/true);
+ maybeDiagnoseUTF8Homoglyph(PP->getDiagnostics(), C,
+ makeCharRange(*this, BufferPtr, CurPtr));
+ }
- --CurPtr; // Back up over the skipped character.
+ MIOpt.ReadToken();
+ return LexIdentifierContinue(Result, CurPtr);
+ }
- // Fast path, no $,\,? in identifier found. '\' might be an escaped newline
- // or UCN, and ? might be a trigraph for '\', an escaped newline or UCN.
- //
- // TODO: Could merge these checks into an InfoTable flag to make the
- // comparison cheaper
- if (isASCII(C) && C != '\\' && C != '?' &&
- (C != '$' || !LangOpts.DollarIdents)) {
-FinishIdentifier:
- const char *IdStart = BufferPtr;
- FormTokenWithChars(Result, CurPtr, tok::raw_identifier);
- Result.setRawIdentifierData(IdStart);
-
- // If we are in raw mode, return this identifier raw. There is no need to
- // look up identifier information or attempt to macro expand it.
- if (LexingRawMode)
- return true;
+ if (!isLexingRawMode() && !ParsingPreprocessorDirective &&
+ !PP->isPreprocessedOutput() && !isASCII(*BufferPtr) &&
+ !isUnicodeWhitespace(C)) {
+ // Non-ASCII characters tend to creep into source code unintentionally.
+ // Instead of letting the parser complain about the unknown token,
+ // just drop the character.
+ // Note that we can /only/ do this when the non-ASCII character is actually
+ // spelled as Unicode, not written as a UCN. The standard requires that
+ // we not throw away any possible preprocessor tokens, but there's a
+ // loophole in the mapping of Unicode characters to basic character set
+ // characters that allows us to map these particular characters to, say,
+ // whitespace.
+ diagnoseInvalidUnicodeCodepointInIdentifier(
+ PP->getDiagnostics(), LangOpts, C,
+ makeCharRange(*this, BufferPtr, CurPtr), /*IsStart*/ true);
+ BufferPtr = CurPtr;
+ return false;
+ }
- // Fill in Result.IdentifierInfo and update the token kind,
- // looking up the identifier in the identifier table.
- IdentifierInfo *II = PP->LookUpIdentifierInfo(Result);
- // Note that we have to call PP->LookUpIdentifierInfo() even for code
- // completion, it writes IdentifierInfo into Result, and callers rely on it.
+ // Otherwise, we have an explicit UCN or a character that's unlikely to show
+ // up by accident.
+ MIOpt.ReadToken();
+ FormTokenWithChars(Result, CurPtr, tok::unknown);
+ return true;
+}
- // If the completion point is at the end of an identifier, we want to treat
- // the identifier as incomplete even if it resolves to a macro or a keyword.
- // This allows e.g. 'class^' to complete to 'classifier'.
- if (isCodeCompletionPoint(CurPtr)) {
- // Return the code-completion token.
- Result.setKind(tok::code_completion);
- // Skip the code-completion char and all immediate identifier characters.
- // This ensures we get consistent behavior when completing at any point in
- // an identifier (i.e. at the start, in the middle, at the end). Note that
- // only simple cases (i.e. [a-zA-Z0-9_]) are supported to keep the code
- // simpler.
- assert(*CurPtr == 0 && "Completion character must be 0");
- ++CurPtr;
- // Note that code completion token is not added as a separate character
- // when the completion point is at the end of the buffer. Therefore, we need
- // to check if the buffer has ended.
- if (CurPtr < BufferEnd) {
- while (isIdentifierBody(*CurPtr))
- ++CurPtr;
- }
- BufferPtr = CurPtr;
- return true;
- }
+static const char *
+fastParseASCIIIdentifier(const char *CurPtr,
+ [[maybe_unused]] const char *BufferEnd) {
+#ifdef __SSE4_2__
+ alignas(16) static constexpr char AsciiIdentifierRange[16] = {
+ '_', '_', 'A', 'Z', 'a', 'z', '0', '9',
+ };
+ constexpr ssize_t BytesPerRegister = 16;
- // Finally, now that we know we have an identifier, pass this off to the
- // preprocessor, which may macro expand it or something.
- if (II->isHandleIdentifierCase())
- return PP->HandleIdentifier(Result);
+ __m128i AsciiIdentifierRangeV =
+ _mm_load_si128((const __m128i *)AsciiIdentifierRange);
- return true;
+ while (LLVM_LIKELY(BufferEnd - CurPtr >= BytesPerRegister)) {
+ __m128i Cv = _mm_loadu_si128((const __m128i *)(CurPtr));
+
+ int Consumed = _mm_cmpistri(AsciiIdentifierRangeV, Cv,
+ _SIDD_LEAST_SIGNIFICANT | _SIDD_CMP_RANGES |
+ _SIDD_UBYTE_OPS | _SIDD_NEGATIVE_POLARITY);
+ CurPtr += Consumed;
+ if (Consumed == BytesPerRegister)
+ continue;
+ return CurPtr;
}
+#endif
- // Otherwise, $,\,? in identifier found. Enter slower path.
+ unsigned char C = *CurPtr;
+ while (isAsciiIdentifierContinue(C))
+ C = *++CurPtr;
+ return CurPtr;
+}
+
+bool Lexer::LexIdentifierContinue(Token &Result, const char *CurPtr) {
+ // Match [_A-Za-z0-9]*, we have already matched an identifier start.
- C = getCharAndSize(CurPtr, Size);
while (true) {
+
+ CurPtr = fastParseASCIIIdentifier(CurPtr, BufferEnd);
+
+ unsigned Size;
+ // Slow path: handle trigraph, unicode codepoints, UCNs.
+ unsigned char C = getCharAndSize(CurPtr, Size);
+ if (isAsciiIdentifierContinue(C)) {
+ CurPtr = ConsumeChar(CurPtr, Size, Result);
+ continue;
+ }
if (C == '$') {
// If we hit a $ and they are not supported in identifiers, we are done.
- if (!LangOpts.DollarIdents) goto FinishIdentifier;
-
+ if (!LangOpts.DollarIdents)
+ break;
// Otherwise, emit a diagnostic and continue.
if (!isLexingRawMode())
Diag(CurPtr, diag::ext_dollar_in_identifier);
CurPtr = ConsumeChar(CurPtr, Size, Result);
- C = getCharAndSize(CurPtr, Size);
continue;
- } else if (C == '\\' && tryConsumeIdentifierUCN(CurPtr, Size, Result)) {
- C = getCharAndSize(CurPtr, Size);
+ }
+ if (C == '\\' && tryConsumeIdentifierUCN(CurPtr, Size, Result))
continue;
- } else if (!isASCII(C) && tryConsumeIdentifierUTF8Char(CurPtr)) {
- C = getCharAndSize(CurPtr, Size);
+ if (!isASCII(C) && tryConsumeIdentifierUTF8Char(CurPtr, Result))
continue;
- } else if (!isIdentifierBody(C)) {
- goto FinishIdentifier;
- }
+ // Neither an expected Unicode codepoint nor a UCN.
+ break;
+ }
- // Otherwise, this character is good, consume it.
- CurPtr = ConsumeChar(CurPtr, Size, Result);
+ const char *IdStart = BufferPtr;
+ FormTokenWithChars(Result, CurPtr, tok::raw_identifier);
+ Result.setRawIdentifierData(IdStart);
- C = getCharAndSize(CurPtr, Size);
- while (isIdentifierBody(C)) {
- CurPtr = ConsumeChar(CurPtr, Size, Result);
- C = getCharAndSize(CurPtr, Size);
+ // If we are in raw mode, return this identifier raw. There is no need to
+ // look up identifier information or attempt to macro expand it.
+ if (LexingRawMode)
+ return true;
+
+ // Fill in Result.IdentifierInfo and update the token kind,
+ // looking up the identifier in the identifier table.
+ const IdentifierInfo *II = PP->LookUpIdentifierInfo(Result);
+ // Note that we have to call PP->LookUpIdentifierInfo() even for code
+ // completion, it writes IdentifierInfo into Result, and callers rely on it.
+
+ // If the completion point is at the end of an identifier, we want to treat
+ // the identifier as incomplete even if it resolves to a macro or a keyword.
+ // This allows e.g. 'class^' to complete to 'classifier'.
+ if (isCodeCompletionPoint(CurPtr)) {
+ // Return the code-completion token.
+ Result.setKind(tok::code_completion);
+ // Skip the code-completion char and all immediate identifier characters.
+ // This ensures we get consistent behavior when completing at any point in
+ // an identifier (i.e. at the start, in the middle, at the end). Note that
+ // only simple cases (i.e. [a-zA-Z0-9_]) are supported to keep the code
+ // simpler.
+ assert(*CurPtr == 0 && "Completion character must be 0");
+ ++CurPtr;
+ // Note that code completion token is not added as a separate character
+ // when the completion point is at the end of the buffer. Therefore, we need
+ // to check if the buffer has ended.
+ if (CurPtr < BufferEnd) {
+ while (isAsciiIdentifierContinue(*CurPtr))
+ ++CurPtr;
}
+ BufferPtr = CurPtr;
+ return true;
}
+
+ // Finally, now that we know we have an identifier, pass this off to the
+ // preprocessor, which may macro expand it or something.
+ if (II->isHandleIdentifierCase())
+ return PP->HandleIdentifier(Result);
+
+ return true;
}
/// isHexaLiteral - Return true if Start points to a hex constant.
/// in microsoft mode (where this is supposed to be several different tokens).
bool Lexer::isHexaLiteral(const char *Start, const LangOptions &LangOpts) {
- unsigned Size;
- char C1 = Lexer::getCharAndSizeNoWarn(Start, Size, LangOpts);
+ auto CharAndSize1 = Lexer::getCharAndSizeNoWarn(Start, LangOpts);
+ char C1 = CharAndSize1.Char;
if (C1 != '0')
return false;
- char C2 = Lexer::getCharAndSizeNoWarn(Start + Size, Size, LangOpts);
+
+ auto CharAndSize2 =
+ Lexer::getCharAndSizeNoWarn(Start + CharAndSize1.Size, LangOpts);
+ char C2 = CharAndSize2.Char;
return (C2 == 'x' || C2 == 'X');
}
@@ -1774,6 +1990,10 @@ bool Lexer::LexNumericConstant(Token &Result, const char *CurPtr) {
while (isPreprocessingNumberBody(C)) {
CurPtr = ConsumeChar(CurPtr, Size, Result);
PrevCh = C;
+ if (LangOpts.HLSL && C == '.' && (*CurPtr == 'x' || *CurPtr == 'r')) {
+ CurPtr -= Size;
+ break;
+ }
C = getCharAndSize(CurPtr, Size);
}
@@ -1794,7 +2014,7 @@ bool Lexer::LexNumericConstant(Token &Result, const char *CurPtr) {
if (!LangOpts.C99) {
if (!isHexaLiteral(BufferPtr, LangOpts))
IsHexFloat = false;
- else if (!getLangOpts().CPlusPlus17 &&
+ else if (!LangOpts.CPlusPlus17 &&
std::find(BufferPtr, CurPtr, '_') != CurPtr)
IsHexFloat = false;
}
@@ -1803,14 +2023,13 @@ bool Lexer::LexNumericConstant(Token &Result, const char *CurPtr) {
}
// If we have a digit separator, continue.
- if (C == '\'' && (getLangOpts().CPlusPlus14 || getLangOpts().C2x)) {
- unsigned NextSize;
- char Next = getCharAndSizeNoWarn(CurPtr + Size, NextSize, getLangOpts());
- if (isIdentifierBody(Next)) {
+ if (C == '\'' && (LangOpts.CPlusPlus14 || LangOpts.C23)) {
+ auto [Next, NextSize] = getCharAndSizeNoWarn(CurPtr + Size, LangOpts);
+ if (isAsciiIdentifierContinue(Next)) {
if (!isLexingRawMode())
- Diag(CurPtr, getLangOpts().CPlusPlus
+ Diag(CurPtr, LangOpts.CPlusPlus
? diag::warn_cxx11_compat_digit_separator
- : diag::warn_c2x_compat_digit_separator);
+ : diag::warn_c23_compat_digit_separator);
CurPtr = ConsumeChar(CurPtr, Size, Result);
CurPtr = ConsumeChar(CurPtr, NextSize, Result);
return LexNumericConstant(Result, CurPtr);
@@ -1820,7 +2039,7 @@ bool Lexer::LexNumericConstant(Token &Result, const char *CurPtr) {
// If we have a UCN or UTF-8 character (perhaps in a ud-suffix), continue.
if (C == '\\' && tryConsumeIdentifierUCN(CurPtr, Size, Result))
return LexNumericConstant(Result, CurPtr);
- if (!isASCII(C) && tryConsumeIdentifierUTF8Char(CurPtr))
+ if (!isASCII(C) && tryConsumeIdentifierUTF8Char(CurPtr, Result))
return LexNumericConstant(Result, CurPtr);
// Update the location of token as well as BufferPtr.
@@ -1834,23 +2053,23 @@ bool Lexer::LexNumericConstant(Token &Result, const char *CurPtr) {
/// in C++11, or warn on a ud-suffix in C++98.
const char *Lexer::LexUDSuffix(Token &Result, const char *CurPtr,
bool IsStringLiteral) {
- assert(getLangOpts().CPlusPlus);
+ assert(LangOpts.CPlusPlus);
// Maximally munch an identifier.
unsigned Size;
char C = getCharAndSize(CurPtr, Size);
bool Consumed = false;
- if (!isIdentifierHead(C)) {
+ if (!isAsciiIdentifierStart(C)) {
if (C == '\\' && tryConsumeIdentifierUCN(CurPtr, Size, Result))
Consumed = true;
- else if (!isASCII(C) && tryConsumeIdentifierUTF8Char(CurPtr))
+ else if (!isASCII(C) && tryConsumeIdentifierUTF8Char(CurPtr, Result))
Consumed = true;
else
return CurPtr;
}
- if (!getLangOpts().CPlusPlus11) {
+ if (!LangOpts.CPlusPlus11) {
if (!isLexingRawMode())
Diag(CurPtr,
C == '_' ? diag::warn_cxx11_compat_user_defined_literal
@@ -1868,7 +2087,7 @@ const char *Lexer::LexUDSuffix(Token &Result, const char *CurPtr,
bool IsUDSuffix = false;
if (C == '_')
IsUDSuffix = true;
- else if (IsStringLiteral && getLangOpts().CPlusPlus14) {
+ else if (IsStringLiteral && LangOpts.CPlusPlus14) {
// In C++1y, we need to look ahead a few characters to see if this is a
// valid suffix for a string literal or a numeric literal (this could be
// the 'operator""if' defining a numeric literal operator).
@@ -1877,14 +2096,13 @@ const char *Lexer::LexUDSuffix(Token &Result, const char *CurPtr,
unsigned Consumed = Size;
unsigned Chars = 1;
while (true) {
- unsigned NextSize;
- char Next = getCharAndSizeNoWarn(CurPtr + Consumed, NextSize,
- getLangOpts());
- if (!isIdentifierBody(Next)) {
+ auto [Next, NextSize] =
+ getCharAndSizeNoWarn(CurPtr + Consumed, LangOpts);
+ if (!isAsciiIdentifierContinue(Next)) {
// End of suffix. Check whether this is on the allowed list.
const StringRef CompleteSuffix(Buffer, Chars);
- IsUDSuffix = StringLiteralParser::isValidUDSuffix(getLangOpts(),
- CompleteSuffix);
+ IsUDSuffix =
+ StringLiteralParser::isValidUDSuffix(LangOpts, CompleteSuffix);
break;
}
@@ -1899,10 +2117,10 @@ const char *Lexer::LexUDSuffix(Token &Result, const char *CurPtr,
if (!IsUDSuffix) {
if (!isLexingRawMode())
- Diag(CurPtr, getLangOpts().MSVCCompat
+ Diag(CurPtr, LangOpts.MSVCCompat
? diag::ext_ms_reserved_user_defined_literal
: diag::ext_reserved_user_defined_literal)
- << FixItHint::CreateInsertion(getSourceLocation(CurPtr), " ");
+ << FixItHint::CreateInsertion(getSourceLocation(CurPtr), " ");
return CurPtr;
}
@@ -1912,10 +2130,12 @@ const char *Lexer::LexUDSuffix(Token &Result, const char *CurPtr,
Result.setFlag(Token::HasUDSuffix);
while (true) {
C = getCharAndSize(CurPtr, Size);
- if (isIdentifierBody(C)) { CurPtr = ConsumeChar(CurPtr, Size, Result); }
- else if (C == '\\' && tryConsumeIdentifierUCN(CurPtr, Size, Result)) {}
- else if (!isASCII(C) && tryConsumeIdentifierUTF8Char(CurPtr)) {}
- else break;
+ if (isAsciiIdentifierContinue(C)) {
+ CurPtr = ConsumeChar(CurPtr, Size, Result);
+ } else if (C == '\\' && tryConsumeIdentifierUCN(CurPtr, Size, Result)) {
+ } else if (!isASCII(C) && tryConsumeIdentifierUTF8Char(CurPtr, Result)) {
+ } else
+ break;
}
return CurPtr;
@@ -1933,9 +2153,8 @@ bool Lexer::LexStringLiteral(Token &Result, const char *CurPtr,
(Kind == tok::utf8_string_literal ||
Kind == tok::utf16_string_literal ||
Kind == tok::utf32_string_literal))
- Diag(BufferPtr, getLangOpts().CPlusPlus
- ? diag::warn_cxx98_compat_unicode_literal
- : diag::warn_c99_compat_unicode_literal);
+ Diag(BufferPtr, LangOpts.CPlusPlus ? diag::warn_cxx98_compat_unicode_literal
+ : diag::warn_c99_compat_unicode_literal);
char C = getAndAdvanceChar(CurPtr, Result);
while (C != '"') {
@@ -1969,7 +2188,7 @@ bool Lexer::LexStringLiteral(Token &Result, const char *CurPtr,
}
// If we are in C++11, lex the optional ud-suffix.
- if (getLangOpts().CPlusPlus)
+ if (LangOpts.CPlusPlus)
CurPtr = LexUDSuffix(Result, CurPtr, true);
// If a nul character existed in the string, warn about it.
@@ -2053,7 +2272,7 @@ bool Lexer::LexRawStringLiteral(Token &Result, const char *CurPtr,
}
// If we are in C++11, lex the optional ud-suffix.
- if (getLangOpts().CPlusPlus)
+ if (LangOpts.CPlusPlus)
CurPtr = LexUDSuffix(Result, CurPtr, true);
// Update the location of token as well as BufferPtr.
@@ -2130,7 +2349,7 @@ void Lexer::codeCompleteIncludedFile(const char *PathStart,
++CompletionPoint;
if (Next == (IsAngled ? '>' : '"'))
break;
- if (llvm::is_contained(SlashChars, Next))
+ if (SlashChars.contains(Next))
break;
}
@@ -2149,7 +2368,7 @@ bool Lexer::LexCharConstant(Token &Result, const char *CurPtr,
if (!isLexingRawMode()) {
if (Kind == tok::utf16_char_constant || Kind == tok::utf32_char_constant)
- Diag(BufferPtr, getLangOpts().CPlusPlus
+ Diag(BufferPtr, LangOpts.CPlusPlus
? diag::warn_cxx98_compat_unicode_literal
: diag::warn_c99_compat_unicode_literal);
else if (Kind == tok::utf8_char_constant)
@@ -2191,7 +2410,7 @@ bool Lexer::LexCharConstant(Token &Result, const char *CurPtr,
}
// If we are in C++11, lex the optional ud-suffix.
- if (getLangOpts().CPlusPlus)
+ if (LangOpts.CPlusPlus)
CurPtr = LexUDSuffix(Result, CurPtr, false);
// If a nul character existed in the character, warn about it.
@@ -2289,12 +2508,13 @@ bool Lexer::SkipLineComment(Token &Result, const char *CurPtr,
bool &TokAtPhysicalStartOfLine) {
// If Line comments aren't explicitly enabled for this language, emit an
// extension warning.
- if (!LangOpts.LineComment && !isLexingRawMode()) {
- Diag(BufferPtr, diag::ext_line_comment);
+ if (!LineComment) {
+ if (!isLexingRawMode()) // There's no PP in raw mode, so can't emit diags.
+ Diag(BufferPtr, diag::ext_line_comment);
// Mark them enabled so we only emit one warning for this translation
// unit.
- LangOpts.LineComment = true;
+ LineComment = true;
}
// Scan over the body of the comment. The common case, when scanning, is that
@@ -2303,13 +2523,37 @@ bool Lexer::SkipLineComment(Token &Result, const char *CurPtr,
//
// This loop terminates with CurPtr pointing at the newline (or end of buffer)
// character that ends the line comment.
+
+ // C++23 [lex.phases] p1
+ // Diagnose invalid UTF-8 if the corresponding warning is enabled, emitting a
+ // diagnostic only once per entire ill-formed subsequence to avoid
+ // emiting to many diagnostics (see http://unicode.org/review/pr-121.html).
+ bool UnicodeDecodingAlreadyDiagnosed = false;
+
char C;
while (true) {
C = *CurPtr;
// Skip over characters in the fast loop.
- while (C != 0 && // Potentially EOF.
- C != '\n' && C != '\r') // Newline or DOS-style newline.
+ while (isASCII(C) && C != 0 && // Potentially EOF.
+ C != '\n' && C != '\r') { // Newline or DOS-style newline.
C = *++CurPtr;
+ UnicodeDecodingAlreadyDiagnosed = false;
+ }
+
+ if (!isASCII(C)) {
+ unsigned Length = llvm::getUTF8SequenceSize(
+ (const llvm::UTF8 *)CurPtr, (const llvm::UTF8 *)BufferEnd);
+ if (Length == 0) {
+ if (!UnicodeDecodingAlreadyDiagnosed && !isLexingRawMode())
+ Diag(CurPtr, diag::warn_invalid_utf8_in_comment);
+ UnicodeDecodingAlreadyDiagnosed = true;
+ ++CurPtr;
+ } else {
+ UnicodeDecodingAlreadyDiagnosed = false;
+ CurPtr += Length;
+ }
+ continue;
+ }
const char *NextLine = CurPtr;
if (C != 0) {
@@ -2412,7 +2656,7 @@ bool Lexer::SkipLineComment(Token &Result, const char *CurPtr,
// \r\n sequence. This is an efficiency hack (because we know the \n can't
// contribute to another token), it isn't needed for correctness. Note that
// this is ok even in KeepWhitespaceMode, because we would have returned the
- /// comment above in that mode.
+ // comment above in that mode.
NewLinePtr = CurPtr++;
// The next returned token is at the start of the line.
@@ -2454,14 +2698,14 @@ bool Lexer::SaveLineComment(Token &Result, const char *CurPtr) {
/// isBlockCommentEndOfEscapedNewLine - Return true if the specified newline
/// character (either \\n or \\r) is part of an escaped newline sequence. Issue
/// a diagnostic if so. We know that the newline is inside of a block comment.
-static bool isEndOfBlockCommentWithEscapedNewLine(const char *CurPtr,
- Lexer *L) {
+static bool isEndOfBlockCommentWithEscapedNewLine(const char *CurPtr, Lexer *L,
+ bool Trigraphs) {
assert(CurPtr[0] == '\n' || CurPtr[0] == '\r');
// Position of the first trigraph in the ending sequence.
- const char *TrigraphPos = 0;
+ const char *TrigraphPos = nullptr;
// Position of the first whitespace after a '\' in the ending sequence.
- const char *SpacePos = 0;
+ const char *SpacePos = nullptr;
while (true) {
// Back up off the newline.
@@ -2506,7 +2750,7 @@ static bool isEndOfBlockCommentWithEscapedNewLine(const char *CurPtr,
if (TrigraphPos) {
// If no trigraphs are enabled, warn that we ignored this trigraph and
// ignore this * character.
- if (!L->getLangOpts().Trigraphs) {
+ if (!Trigraphs) {
if (!L->isLexingRawMode())
L->Diag(TrigraphPos, diag::trigraph_ignored_block_comment);
return false;
@@ -2576,6 +2820,12 @@ bool Lexer::SkipBlockComment(Token &Result, const char *CurPtr,
if (C == '/')
C = *CurPtr++;
+ // C++23 [lex.phases] p1
+ // Diagnose invalid UTF-8 if the corresponding warning is enabled, emitting a
+ // diagnostic only once per entire ill-formed subsequence to avoid
+ // emiting to many diagnostics (see http://unicode.org/review/pr-121.html).
+ bool UnicodeDecodingAlreadyDiagnosed = false;
+
while (true) {
// Skip over all non-interesting characters until we find end of buffer or a
// (probably ending) '/' character.
@@ -2584,41 +2834,65 @@ bool Lexer::SkipBlockComment(Token &Result, const char *CurPtr,
// doesn't check for '\0'.
!(PP && PP->getCodeCompletionFileLoc() == FileLoc)) {
// While not aligned to a 16-byte boundary.
- while (C != '/' && ((intptr_t)CurPtr & 0x0F) != 0)
+ while (C != '/' && (intptr_t)CurPtr % 16 != 0) {
+ if (!isASCII(C))
+ goto MultiByteUTF8;
C = *CurPtr++;
-
+ }
if (C == '/') goto FoundSlash;
#ifdef __SSE2__
__m128i Slashes = _mm_set1_epi8('/');
- while (CurPtr+16 <= BufferEnd) {
+ while (CurPtr + 16 < BufferEnd) {
+ int Mask = _mm_movemask_epi8(*(const __m128i *)CurPtr);
+ if (LLVM_UNLIKELY(Mask != 0)) {
+ goto MultiByteUTF8;
+ }
+ // look for slashes
int cmp = _mm_movemask_epi8(_mm_cmpeq_epi8(*(const __m128i*)CurPtr,
Slashes));
if (cmp != 0) {
// Adjust the pointer to point directly after the first slash. It's
// not necessary to set C here, it will be overwritten at the end of
// the outer loop.
- CurPtr += llvm::countTrailingZeros<unsigned>(cmp) + 1;
+ CurPtr += llvm::countr_zero<unsigned>(cmp) + 1;
goto FoundSlash;
}
CurPtr += 16;
}
#elif __ALTIVEC__
+ __vector unsigned char LongUTF = {0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80};
__vector unsigned char Slashes = {
'/', '/', '/', '/', '/', '/', '/', '/',
'/', '/', '/', '/', '/', '/', '/', '/'
};
- while (CurPtr + 16 <= BufferEnd &&
- !vec_any_eq(*(const __vector unsigned char *)CurPtr, Slashes))
+ while (CurPtr + 16 < BufferEnd) {
+ if (LLVM_UNLIKELY(
+ vec_any_ge(*(const __vector unsigned char *)CurPtr, LongUTF)))
+ goto MultiByteUTF8;
+ if (vec_any_eq(*(const __vector unsigned char *)CurPtr, Slashes)) {
+ break;
+ }
CurPtr += 16;
+ }
+
#else
- // Scan for '/' quickly. Many block comments are very large.
- while (CurPtr[0] != '/' &&
- CurPtr[1] != '/' &&
- CurPtr[2] != '/' &&
- CurPtr[3] != '/' &&
- CurPtr+4 < BufferEnd) {
- CurPtr += 4;
+ while (CurPtr + 16 < BufferEnd) {
+ bool HasNonASCII = false;
+ for (unsigned I = 0; I < 16; ++I)
+ HasNonASCII |= !isASCII(CurPtr[I]);
+
+ if (LLVM_UNLIKELY(HasNonASCII))
+ goto MultiByteUTF8;
+
+ bool HasSlash = false;
+ for (unsigned I = 0; I < 16; ++I)
+ HasSlash |= CurPtr[I] == '/';
+ if (HasSlash)
+ break;
+ CurPtr += 16;
}
#endif
@@ -2626,9 +2900,30 @@ bool Lexer::SkipBlockComment(Token &Result, const char *CurPtr,
C = *CurPtr++;
}
- // Loop to scan the remainder.
- while (C != '/' && C != '\0')
+ // Loop to scan the remainder, warning on invalid UTF-8
+ // if the corresponding warning is enabled, emitting a diagnostic only once
+ // per sequence that cannot be decoded.
+ while (C != '/' && C != '\0') {
+ if (isASCII(C)) {
+ UnicodeDecodingAlreadyDiagnosed = false;
+ C = *CurPtr++;
+ continue;
+ }
+ MultiByteUTF8:
+ // CurPtr is 1 code unit past C, so to decode
+ // the codepoint, we need to read from the previous position.
+ unsigned Length = llvm::getUTF8SequenceSize(
+ (const llvm::UTF8 *)CurPtr - 1, (const llvm::UTF8 *)BufferEnd);
+ if (Length == 0) {
+ if (!UnicodeDecodingAlreadyDiagnosed && !isLexingRawMode())
+ Diag(CurPtr - 1, diag::warn_invalid_utf8_in_comment);
+ UnicodeDecodingAlreadyDiagnosed = true;
+ } else {
+ UnicodeDecodingAlreadyDiagnosed = false;
+ CurPtr += Length - 1;
+ }
C = *CurPtr++;
+ }
if (C == '/') {
FoundSlash:
@@ -2636,7 +2931,8 @@ bool Lexer::SkipBlockComment(Token &Result, const char *CurPtr,
break;
if ((CurPtr[-2] == '\n' || CurPtr[-2] == '\r')) {
- if (isEndOfBlockCommentWithEscapedNewLine(CurPtr-2, this)) {
+ if (isEndOfBlockCommentWithEscapedNewLine(CurPtr - 2, this,
+ LangOpts.Trigraphs)) {
// We found the final */, though it had an escaped newline between the
// * and /. We're done!
break;
@@ -2740,7 +3036,7 @@ void Lexer::ReadToEndOfLine(SmallVectorImpl<char> *Result) {
break;
}
// FALL THROUGH.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case '\r':
case '\n':
// Okay, we found the end of the line. First, back up past the \0, \r, \n.
@@ -2811,11 +3107,11 @@ bool Lexer::LexEndOfFile(Token &Result, const char *CurPtr) {
ConditionalStack.pop_back();
}
- SourceLocation EndLoc = getSourceLocation(BufferEnd);
// C99 5.1.1.2p2: If the file is non-empty and didn't end in a newline, issue
// a pedwarn.
if (CurPtr != BufferStart && (CurPtr[-1] != '\n' && CurPtr[-1] != '\r')) {
DiagnosticsEngine &Diags = PP->getDiagnostics();
+ SourceLocation EndLoc = getSourceLocation(BufferEnd);
unsigned DiagID;
if (LangOpts.CPlusPlus11) {
@@ -2838,7 +3134,7 @@ bool Lexer::LexEndOfFile(Token &Result, const char *CurPtr) {
BufferPtr = CurPtr;
// Finally, let the preprocessor handle this.
- return PP->HandleEndOfFile(Result, EndLoc, isPragmaLexer());
+ return PP->HandleEndOfFile(Result, isPragmaLexer());
}
/// isNextPPTokenLParen - Return 1 if the next unexpanded token lexed from
@@ -2848,6 +3144,13 @@ bool Lexer::LexEndOfFile(Token &Result, const char *CurPtr) {
unsigned Lexer::isNextPPTokenLParen() {
assert(!LexingRawMode && "How can we expand a macro from a skipping buffer?");
+ if (isDependencyDirectivesLexer()) {
+ if (NextDepDirectiveTokenIndex == DepDirectives.front().Tokens.size())
+ return 2;
+ return DepDirectives.front().Tokens[NextDepDirectiveTokenIndex].is(
+ tok::l_paren);
+ }
+
// Switch to 'skipping' mode. This will ensure that we can lex a token
// without emitting diagnostics, disables macro expansion, and will cause EOF
// to return an EOF token instead of popping the include stack.
@@ -2909,8 +3212,8 @@ bool Lexer::IsStartOfConflictMarker(const char *CurPtr) {
return false;
// Check to see if we have <<<<<<< or >>>>.
- if (!StringRef(CurPtr, BufferEnd - CurPtr).startswith("<<<<<<<") &&
- !StringRef(CurPtr, BufferEnd - CurPtr).startswith(">>>> "))
+ if (!StringRef(CurPtr, BufferEnd - CurPtr).starts_with("<<<<<<<") &&
+ !StringRef(CurPtr, BufferEnd - CurPtr).starts_with(">>>> "))
return false;
// If we have a situation where we don't care about conflict markers, ignore
@@ -3023,62 +3326,204 @@ bool Lexer::isCodeCompletionPoint(const char *CurPtr) const {
return false;
}
-uint32_t Lexer::tryReadUCN(const char *&StartPtr, const char *SlashLoc,
- Token *Result) {
+std::optional<uint32_t> Lexer::tryReadNumericUCN(const char *&StartPtr,
+ const char *SlashLoc,
+ Token *Result) {
unsigned CharSize;
char Kind = getCharAndSize(StartPtr, CharSize);
+ assert((Kind == 'u' || Kind == 'U') && "expected a UCN");
unsigned NumHexDigits;
if (Kind == 'u')
NumHexDigits = 4;
else if (Kind == 'U')
NumHexDigits = 8;
- else
- return 0;
+
+ bool Delimited = false;
+ bool FoundEndDelimiter = false;
+ unsigned Count = 0;
+ bool Diagnose = Result && !isLexingRawMode();
if (!LangOpts.CPlusPlus && !LangOpts.C99) {
- if (Result && !isLexingRawMode())
+ if (Diagnose)
Diag(SlashLoc, diag::warn_ucn_not_valid_in_c89);
- return 0;
+ return std::nullopt;
}
const char *CurPtr = StartPtr + CharSize;
const char *KindLoc = &CurPtr[-1];
uint32_t CodePoint = 0;
- for (unsigned i = 0; i < NumHexDigits; ++i) {
+ while (Count != NumHexDigits || Delimited) {
char C = getCharAndSize(CurPtr, CharSize);
+ if (!Delimited && Count == 0 && C == '{') {
+ Delimited = true;
+ CurPtr += CharSize;
+ continue;
+ }
+
+ if (Delimited && C == '}') {
+ CurPtr += CharSize;
+ FoundEndDelimiter = true;
+ break;
+ }
unsigned Value = llvm::hexDigitValue(C);
if (Value == -1U) {
- if (Result && !isLexingRawMode()) {
- if (i == 0) {
- Diag(BufferPtr, diag::warn_ucn_escape_no_digits)
+ if (!Delimited)
+ break;
+ if (Diagnose)
+ Diag(SlashLoc, diag::warn_delimited_ucn_incomplete)
<< StringRef(KindLoc, 1);
- } else {
- Diag(BufferPtr, diag::warn_ucn_escape_incomplete);
-
- // If the user wrote \U1234, suggest a fixit to \u.
- if (i == 4 && NumHexDigits == 8) {
- CharSourceRange URange = makeCharRange(*this, KindLoc, KindLoc + 1);
- Diag(KindLoc, diag::note_ucn_four_not_eight)
- << FixItHint::CreateReplacement(URange, "u");
- }
- }
- }
+ return std::nullopt;
+ }
- return 0;
+ if (CodePoint & 0xF000'0000) {
+ if (Diagnose)
+ Diag(KindLoc, diag::err_escape_too_large) << 0;
+ return std::nullopt;
}
CodePoint <<= 4;
- CodePoint += Value;
+ CodePoint |= Value;
+ CurPtr += CharSize;
+ Count++;
+ }
+ if (Count == 0) {
+ if (Diagnose)
+ Diag(SlashLoc, FoundEndDelimiter ? diag::warn_delimited_ucn_empty
+ : diag::warn_ucn_escape_no_digits)
+ << StringRef(KindLoc, 1);
+ return std::nullopt;
+ }
+
+ if (Delimited && Kind == 'U') {
+ if (Diagnose)
+ Diag(SlashLoc, diag::err_hex_escape_no_digits) << StringRef(KindLoc, 1);
+ return std::nullopt;
+ }
+
+ if (!Delimited && Count != NumHexDigits) {
+ if (Diagnose) {
+ Diag(SlashLoc, diag::warn_ucn_escape_incomplete);
+ // If the user wrote \U1234, suggest a fixit to \u.
+ if (Count == 4 && NumHexDigits == 8) {
+ CharSourceRange URange = makeCharRange(*this, KindLoc, KindLoc + 1);
+ Diag(KindLoc, diag::note_ucn_four_not_eight)
+ << FixItHint::CreateReplacement(URange, "u");
+ }
+ }
+ return std::nullopt;
+ }
+
+ if (Delimited && PP) {
+ Diag(SlashLoc, PP->getLangOpts().CPlusPlus23
+ ? diag::warn_cxx23_delimited_escape_sequence
+ : diag::ext_delimited_escape_sequence)
+ << /*delimited*/ 0 << (PP->getLangOpts().CPlusPlus ? 1 : 0);
+ }
+
+ if (Result) {
+ Result->setFlag(Token::HasUCN);
+ // If the UCN contains either a trigraph or a line splicing,
+ // we need to call getAndAdvanceChar again to set the appropriate flags
+ // on Result.
+ if (CurPtr - StartPtr == (ptrdiff_t)(Count + 1 + (Delimited ? 2 : 0)))
+ StartPtr = CurPtr;
+ else
+ while (StartPtr != CurPtr)
+ (void)getAndAdvanceChar(StartPtr, *Result);
+ } else {
+ StartPtr = CurPtr;
+ }
+ return CodePoint;
+}
+
+std::optional<uint32_t> Lexer::tryReadNamedUCN(const char *&StartPtr,
+ const char *SlashLoc,
+ Token *Result) {
+ unsigned CharSize;
+ bool Diagnose = Result && !isLexingRawMode();
+
+ char C = getCharAndSize(StartPtr, CharSize);
+ assert(C == 'N' && "expected \\N{...}");
+
+ const char *CurPtr = StartPtr + CharSize;
+ const char *KindLoc = &CurPtr[-1];
+
+ C = getCharAndSize(CurPtr, CharSize);
+ if (C != '{') {
+ if (Diagnose)
+ Diag(SlashLoc, diag::warn_ucn_escape_incomplete);
+ return std::nullopt;
+ }
+ CurPtr += CharSize;
+ const char *StartName = CurPtr;
+ bool FoundEndDelimiter = false;
+ llvm::SmallVector<char, 30> Buffer;
+ while (C) {
+ C = getCharAndSize(CurPtr, CharSize);
CurPtr += CharSize;
+ if (C == '}') {
+ FoundEndDelimiter = true;
+ break;
+ }
+
+ if (isVerticalWhitespace(C))
+ break;
+ Buffer.push_back(C);
+ }
+
+ if (!FoundEndDelimiter || Buffer.empty()) {
+ if (Diagnose)
+ Diag(SlashLoc, FoundEndDelimiter ? diag::warn_delimited_ucn_empty
+ : diag::warn_delimited_ucn_incomplete)
+ << StringRef(KindLoc, 1);
+ return std::nullopt;
+ }
+
+ StringRef Name(Buffer.data(), Buffer.size());
+ std::optional<char32_t> Match =
+ llvm::sys::unicode::nameToCodepointStrict(Name);
+ std::optional<llvm::sys::unicode::LooseMatchingResult> LooseMatch;
+ if (!Match) {
+ LooseMatch = llvm::sys::unicode::nameToCodepointLooseMatching(Name);
+ if (Diagnose) {
+ Diag(StartName, diag::err_invalid_ucn_name)
+ << StringRef(Buffer.data(), Buffer.size())
+ << makeCharRange(*this, StartName, CurPtr - CharSize);
+ if (LooseMatch) {
+ Diag(StartName, diag::note_invalid_ucn_name_loose_matching)
+ << FixItHint::CreateReplacement(
+ makeCharRange(*this, StartName, CurPtr - CharSize),
+ LooseMatch->Name);
+ }
+ }
+ // We do not offer misspelled character names suggestions here
+ // as the set of what would be a valid suggestion depends on context,
+ // and we should not make invalid suggestions.
}
+ if (Diagnose && Match)
+ Diag(SlashLoc, PP->getLangOpts().CPlusPlus23
+ ? diag::warn_cxx23_delimited_escape_sequence
+ : diag::ext_delimited_escape_sequence)
+ << /*named*/ 1 << (PP->getLangOpts().CPlusPlus ? 1 : 0);
+
+ // If no diagnostic has been emitted yet, likely because we are doing a
+ // tentative lexing, we do not want to recover here to make sure the token
+ // will not be incorrectly considered valid. This function will be called
+ // again and a diagnostic emitted then.
+ if (LooseMatch && Diagnose)
+ Match = LooseMatch->CodePoint;
+
if (Result) {
Result->setFlag(Token::HasUCN);
- if (CurPtr - StartPtr == (ptrdiff_t)NumHexDigits + 2)
+ // If the UCN contains either a trigraph or a line splicing,
+ // we need to call getAndAdvanceChar again to set the appropriate flags
+ // on Result.
+ if (CurPtr - StartPtr == (ptrdiff_t)(Buffer.size() + 3))
StartPtr = CurPtr;
else
while (StartPtr != CurPtr)
@@ -3086,14 +3531,37 @@ uint32_t Lexer::tryReadUCN(const char *&StartPtr, const char *SlashLoc,
} else {
StartPtr = CurPtr;
}
+ return Match ? std::optional<uint32_t>(*Match) : std::nullopt;
+}
+
+uint32_t Lexer::tryReadUCN(const char *&StartPtr, const char *SlashLoc,
+ Token *Result) {
+
+ unsigned CharSize;
+ std::optional<uint32_t> CodePointOpt;
+ char Kind = getCharAndSize(StartPtr, CharSize);
+ if (Kind == 'u' || Kind == 'U')
+ CodePointOpt = tryReadNumericUCN(StartPtr, SlashLoc, Result);
+ else if (Kind == 'N')
+ CodePointOpt = tryReadNamedUCN(StartPtr, SlashLoc, Result);
+
+ if (!CodePointOpt)
+ return 0;
+
+ uint32_t CodePoint = *CodePointOpt;
// Don't apply C family restrictions to UCNs in assembly mode
if (LangOpts.AsmPreprocessor)
return CodePoint;
- // C99 6.4.3p2: A universal character name shall not specify a character whose
- // short identifier is less than 00A0 other than 0024 ($), 0040 (@), or
- // 0060 (`), nor one in the range D800 through DFFF inclusive.)
+ // C23 6.4.3p2: A universal character name shall not designate a code point
+ // where the hexadecimal value is:
+ // - in the range D800 through DFFF inclusive; or
+ // - greater than 10FFFF.
+ // A universal-character-name outside the c-char-sequence of a character
+ // constant, or the s-char-sequence of a string-literal shall not designate
+ // a control character or a character in the basic character set.
+
// C++11 [lex.charset]p2: If the hexadecimal value for a
// universal-character-name corresponds to a surrogate code point (in the
// range 0xD800-0xDFFF, inclusive), the program is ill-formed. Additionally,
@@ -3103,9 +3571,6 @@ uint32_t Lexer::tryReadUCN(const char *&StartPtr, const char *SlashLoc,
// ranges 0x00-0x1F or 0x7F-0x9F, both inclusive) or to a character in the
// basic source character set, the program is ill-formed.
if (CodePoint < 0xA0) {
- if (CodePoint == 0x24 || CodePoint == 0x40 || CodePoint == 0x60)
- return CodePoint;
-
// We don't use isLexingRawMode() here because we need to warn about bad
// UCNs even when skipping preprocessing tokens in a #if block.
if (Result && PP) {
@@ -3136,10 +3601,8 @@ uint32_t Lexer::tryReadUCN(const char *&StartPtr, const char *SlashLoc,
bool Lexer::CheckUnicodeWhitespace(Token &Result, uint32_t C,
const char *CurPtr) {
- static const llvm::sys::UnicodeCharSet UnicodeWhitespaceChars(
- UnicodeWhitespaceCharRanges);
if (!isLexingRawMode() && !PP->isPreprocessedOutput() &&
- UnicodeWhitespaceChars.contains(C)) {
+ isUnicodeWhitespace(C)) {
Diag(BufferPtr, diag::ext_unicode_whitespace)
<< makeCharRange(*this, BufferPtr, CurPtr);
@@ -3149,47 +3612,6 @@ bool Lexer::CheckUnicodeWhitespace(Token &Result, uint32_t C,
return false;
}
-bool Lexer::LexUnicode(Token &Result, uint32_t C, const char *CurPtr) {
- if (isAllowedIDChar(C, LangOpts) && isAllowedInitiallyIDChar(C, LangOpts)) {
- if (!isLexingRawMode() && !ParsingPreprocessorDirective &&
- !PP->isPreprocessedOutput()) {
- maybeDiagnoseIDCharCompat(PP->getDiagnostics(), C,
- makeCharRange(*this, BufferPtr, CurPtr),
- /*IsFirst=*/true);
- maybeDiagnoseUTF8Homoglyph(PP->getDiagnostics(), C,
- makeCharRange(*this, BufferPtr, CurPtr));
- }
-
- MIOpt.ReadToken();
- return LexIdentifier(Result, CurPtr);
- }
-
- if (!isLexingRawMode() && !ParsingPreprocessorDirective &&
- !PP->isPreprocessedOutput() &&
- !isASCII(*BufferPtr) && !isAllowedIDChar(C, LangOpts)) {
- // Non-ASCII characters tend to creep into source code unintentionally.
- // Instead of letting the parser complain about the unknown token,
- // just drop the character.
- // Note that we can /only/ do this when the non-ASCII character is actually
- // spelled as Unicode, not written as a UCN. The standard requires that
- // we not throw away any possible preprocessor tokens, but there's a
- // loophole in the mapping of Unicode characters to basic character set
- // characters that allows us to map these particular characters to, say,
- // whitespace.
- Diag(BufferPtr, diag::err_non_ascii)
- << FixItHint::CreateRemoval(makeCharRange(*this, BufferPtr, CurPtr));
-
- BufferPtr = CurPtr;
- return false;
- }
-
- // Otherwise, we have an explicit UCN or a character that's unlikely to show
- // up by accident.
- MIOpt.ReadToken();
- FormTokenWithChars(Result, CurPtr, tok::unknown);
- return true;
-}
-
void Lexer::PropagateLineStartLeadingSpaceInfo(Token &Result) {
IsAtStartOfLine = Result.isAtStartOfLine();
HasLeadingSpace = Result.hasLeadingSpace();
@@ -3198,6 +3620,8 @@ void Lexer::PropagateLineStartLeadingSpaceInfo(Token &Result) {
}
bool Lexer::Lex(Token &Result) {
+ assert(!isDependencyDirectivesLexer());
+
// Start a new token.
Result.startToken();
@@ -3233,10 +3657,9 @@ bool Lexer::Lex(Token &Result) {
/// token, not a normal token, as such, it is an internal interface. It assumes
/// that the Flags of result have been cleared before calling this.
bool Lexer::LexTokenInternal(Token &Result, bool TokAtPhysicalStartOfLine) {
-LexNextToken:
- // New token, can't need cleaning yet.
- Result.clearFlag(Token::NeedsCleaning);
- Result.setIdentifierInfo(nullptr);
+LexStart:
+ assert(!Result.needsCleaning() && "Result needs cleaning");
+ assert(!Result.hasPtrData() && "Result has not been reset");
// CurPtr - Cache BufferPtr in an automatic variable.
const char *CurPtr = BufferPtr;
@@ -3308,7 +3731,7 @@ LexNextToken:
case '\r':
if (CurPtr[0] == '\n')
(void)getAndAdvanceChar(CurPtr, Result);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case '\n':
// If we are inside a preprocessor directive and we see the end of line,
// we know we are done with the directive, so return an EOD token.
@@ -3353,8 +3776,7 @@ LexNextToken:
// If the next token is obviously a // or /* */ comment, skip it efficiently
// too (without going through the big switch stmt).
if (CurPtr[0] == '/' && CurPtr[1] == '/' && !inKeepCommentMode() &&
- LangOpts.LineComment &&
- (LangOpts.CPlusPlus || !LangOpts.TraditionalCPP)) {
+ LineComment && (LangOpts.CPlusPlus || !LangOpts.TraditionalCPP)) {
if (SkipLineComment(Result, CurPtr+2, TokAtPhysicalStartOfLine))
return true; // There is a token to return.
goto SkipIgnoredUnits;
@@ -3377,7 +3799,10 @@ LexNextToken:
MIOpt.ReadToken();
return LexNumericConstant(Result, CurPtr);
- case 'u': // Identifier (uber) or C11/C++11 UTF-8 or UTF-16 string literal
+ // Identifier (e.g., uber), or
+ // UTF-8 (C23/C++17) or UTF-16 (C11/C++11) character literal, or
+ // UTF-8 or UTF-16 string literal (C11/C++11).
+ case 'u':
// Notify MIOpt that we read a non-whitespace/non-comment token.
MIOpt.ReadToken();
@@ -3411,7 +3836,7 @@ LexNextToken:
ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
SizeTmp2, Result),
tok::utf8_string_literal);
- if (Char2 == '\'' && LangOpts.CPlusPlus17)
+ if (Char2 == '\'' && (LangOpts.CPlusPlus17 || LangOpts.C23))
return LexCharConstant(
Result, ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
SizeTmp2, Result),
@@ -3433,9 +3858,9 @@ LexNextToken:
}
// treat u like the start of an identifier.
- return LexIdentifier(Result, CurPtr);
+ return LexIdentifierContinue(Result, CurPtr);
- case 'U': // Identifier (Uber) or C11/C++11 UTF-32 string literal
+ case 'U': // Identifier (e.g. Uber) or C11/C++11 UTF-32 string literal
// Notify MIOpt that we read a non-whitespace/non-comment token.
MIOpt.ReadToken();
@@ -3462,7 +3887,7 @@ LexNextToken:
}
// treat U like the start of an identifier.
- return LexIdentifier(Result, CurPtr);
+ return LexIdentifierContinue(Result, CurPtr);
case 'R': // Identifier or C++0x raw string literal
// Notify MIOpt that we read a non-whitespace/non-comment token.
@@ -3478,7 +3903,7 @@ LexNextToken:
}
// treat R like the start of an identifier.
- return LexIdentifier(Result, CurPtr);
+ return LexIdentifierContinue(Result, CurPtr);
case 'L': // Identifier (Loony) or wide literal (L'x' or L"xyz").
// Notify MIOpt that we read a non-whitespace/non-comment token.
@@ -3503,7 +3928,7 @@ LexNextToken:
return LexCharConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result),
tok::wide_char_constant);
// FALL THROUGH, treating L like the start of an identifier.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
// C99 6.4.2: Identifiers.
case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G':
@@ -3517,7 +3942,7 @@ LexNextToken:
case '_':
// Notify MIOpt that we read a non-whitespace/non-comment token.
MIOpt.ReadToken();
- return LexIdentifier(Result, CurPtr);
+ return LexIdentifierContinue(Result, CurPtr);
case '$': // $ in identifiers.
if (LangOpts.DollarIdents) {
@@ -3525,7 +3950,7 @@ LexNextToken:
Diag(CurPtr-1, diag::ext_dollar_in_identifier);
// Notify MIOpt that we read a non-whitespace/non-comment token.
MIOpt.ReadToken();
- return LexIdentifier(Result, CurPtr);
+ return LexIdentifierContinue(Result, CurPtr);
}
Kind = tok::unknown;
@@ -3661,8 +4086,8 @@ LexNextToken:
// "foo". Check to see if the character after the second slash is a '*'.
// If so, we will lex that as a "/" instead of the start of a comment.
// However, we never do this if we are just preprocessing.
- bool TreatAsComment = LangOpts.LineComment &&
- (LangOpts.CPlusPlus || !LangOpts.TraditionalCPP);
+ bool TreatAsComment =
+ LineComment && (LangOpts.CPlusPlus || !LangOpts.TraditionalCPP);
if (!TreatAsComment)
if (!(PP && PP->isPreprocessedOutput()))
TreatAsComment = getCharAndSize(CurPtr+SizeTmp, SizeTmp2) != '*';
@@ -3759,7 +4184,7 @@ LexNextToken:
} else if (Char == '=') {
char After = getCharAndSize(CurPtr+SizeTmp, SizeTmp2);
if (After == '>') {
- if (getLangOpts().CPlusPlus20) {
+ if (LangOpts.CPlusPlus20) {
if (!isLexingRawMode())
Diag(BufferPtr, diag::warn_cxx17_compat_spaceship);
CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
@@ -3769,7 +4194,7 @@ LexNextToken:
}
// Suggest adding a space between the '<=' and the '>' to avoid a
// change in semantics if this turns up in C++ <=17 mode.
- if (getLangOpts().CPlusPlus && !isLexingRawMode()) {
+ if (LangOpts.CPlusPlus && !isLexingRawMode()) {
Diag(BufferPtr, diag::warn_cxx20_compat_spaceship)
<< FixItHint::CreateInsertion(
getSourceLocation(CurPtr + SizeTmp, SizeTmp2), " ");
@@ -3869,9 +4294,7 @@ LexNextToken:
if (LangOpts.Digraphs && Char == '>') {
Kind = tok::r_square; // ':>' -> ']'
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
- } else if ((LangOpts.CPlusPlus ||
- LangOpts.DoubleSquareBracketAttributes) &&
- Char == ':') {
+ } else if (Char == ':') {
Kind = tok::coloncolon;
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
} else {
@@ -3940,7 +4363,7 @@ LexNextToken:
goto LexNextToken;
}
- return LexUnicode(Result, CodePoint, CurPtr);
+ return LexUnicodeIdentifierStart(Result, CodePoint, CurPtr);
}
}
@@ -3972,7 +4395,7 @@ LexNextToken:
// (We manually eliminate the tail call to avoid recursion.)
goto LexNextToken;
}
- return LexUnicode(Result, CodePoint, CurPtr);
+ return LexUnicodeIdentifierStart(Result, CodePoint, CurPtr);
}
if (isLexingRawMode() || ParsingPreprocessorDirective ||
@@ -4008,12 +4431,164 @@ HandleDirective:
FormTokenWithChars(Result, CurPtr, tok::hash);
PP->HandleDirective(Result);
- if (PP->hadModuleLoaderFatalFailure()) {
+ if (PP->hadModuleLoaderFatalFailure())
// With a fatal failure in the module loader, we abort parsing.
- assert(Result.is(tok::eof) && "Preprocessor did not set tok:eof");
return true;
- }
// We parsed the directive; lex a token with the new state.
return false;
+
+LexNextToken:
+ Result.clearFlag(Token::NeedsCleaning);
+ goto LexStart;
+}
+
+const char *Lexer::convertDependencyDirectiveToken(
+ const dependency_directives_scan::Token &DDTok, Token &Result) {
+ const char *TokPtr = BufferStart + DDTok.Offset;
+ Result.startToken();
+ Result.setLocation(getSourceLocation(TokPtr));
+ Result.setKind(DDTok.Kind);
+ Result.setFlag((Token::TokenFlags)DDTok.Flags);
+ Result.setLength(DDTok.Length);
+ BufferPtr = TokPtr + DDTok.Length;
+ return TokPtr;
+}
+
+bool Lexer::LexDependencyDirectiveToken(Token &Result) {
+ assert(isDependencyDirectivesLexer());
+
+ using namespace dependency_directives_scan;
+
+ while (NextDepDirectiveTokenIndex == DepDirectives.front().Tokens.size()) {
+ if (DepDirectives.front().Kind == pp_eof)
+ return LexEndOfFile(Result, BufferEnd);
+ if (DepDirectives.front().Kind == tokens_present_before_eof)
+ MIOpt.ReadToken();
+ NextDepDirectiveTokenIndex = 0;
+ DepDirectives = DepDirectives.drop_front();
+ }
+
+ const dependency_directives_scan::Token &DDTok =
+ DepDirectives.front().Tokens[NextDepDirectiveTokenIndex++];
+ if (NextDepDirectiveTokenIndex > 1 || DDTok.Kind != tok::hash) {
+ // Read something other than a preprocessor directive hash.
+ MIOpt.ReadToken();
+ }
+
+ if (ParsingFilename && DDTok.is(tok::less)) {
+ BufferPtr = BufferStart + DDTok.Offset;
+ LexAngledStringLiteral(Result, BufferPtr + 1);
+ if (Result.isNot(tok::header_name))
+ return true;
+ // Advance the index of lexed tokens.
+ while (true) {
+ const dependency_directives_scan::Token &NextTok =
+ DepDirectives.front().Tokens[NextDepDirectiveTokenIndex];
+ if (BufferStart + NextTok.Offset >= BufferPtr)
+ break;
+ ++NextDepDirectiveTokenIndex;
+ }
+ return true;
+ }
+
+ const char *TokPtr = convertDependencyDirectiveToken(DDTok, Result);
+
+ if (Result.is(tok::hash) && Result.isAtStartOfLine()) {
+ PP->HandleDirective(Result);
+ return false;
+ }
+ if (Result.is(tok::raw_identifier)) {
+ Result.setRawIdentifierData(TokPtr);
+ if (!isLexingRawMode()) {
+ const IdentifierInfo *II = PP->LookUpIdentifierInfo(Result);
+ if (II->isHandleIdentifierCase())
+ return PP->HandleIdentifier(Result);
+ }
+ return true;
+ }
+ if (Result.isLiteral()) {
+ Result.setLiteralData(TokPtr);
+ return true;
+ }
+ if (Result.is(tok::colon)) {
+ // Convert consecutive colons to 'tok::coloncolon'.
+ if (*BufferPtr == ':') {
+ assert(DepDirectives.front().Tokens[NextDepDirectiveTokenIndex].is(
+ tok::colon));
+ ++NextDepDirectiveTokenIndex;
+ Result.setKind(tok::coloncolon);
+ }
+ return true;
+ }
+ if (Result.is(tok::eod))
+ ParsingPreprocessorDirective = false;
+
+ return true;
+}
+
+bool Lexer::LexDependencyDirectiveTokenWhileSkipping(Token &Result) {
+ assert(isDependencyDirectivesLexer());
+
+ using namespace dependency_directives_scan;
+
+ bool Stop = false;
+ unsigned NestedIfs = 0;
+ do {
+ DepDirectives = DepDirectives.drop_front();
+ switch (DepDirectives.front().Kind) {
+ case pp_none:
+ llvm_unreachable("unexpected 'pp_none'");
+ case pp_include:
+ case pp___include_macros:
+ case pp_define:
+ case pp_undef:
+ case pp_import:
+ case pp_pragma_import:
+ case pp_pragma_once:
+ case pp_pragma_push_macro:
+ case pp_pragma_pop_macro:
+ case pp_pragma_include_alias:
+ case pp_pragma_system_header:
+ case pp_include_next:
+ case decl_at_import:
+ case cxx_module_decl:
+ case cxx_import_decl:
+ case cxx_export_module_decl:
+ case cxx_export_import_decl:
+ case tokens_present_before_eof:
+ break;
+ case pp_if:
+ case pp_ifdef:
+ case pp_ifndef:
+ ++NestedIfs;
+ break;
+ case pp_elif:
+ case pp_elifdef:
+ case pp_elifndef:
+ case pp_else:
+ if (!NestedIfs) {
+ Stop = true;
+ }
+ break;
+ case pp_endif:
+ if (!NestedIfs) {
+ Stop = true;
+ } else {
+ --NestedIfs;
+ }
+ break;
+ case pp_eof:
+ NextDepDirectiveTokenIndex = 0;
+ return LexEndOfFile(Result, BufferEnd);
+ }
+ } while (!Stop);
+
+ const dependency_directives_scan::Token &DDTok =
+ DepDirectives.front().Tokens.front();
+ assert(DDTok.is(tok::hash));
+ NextDepDirectiveTokenIndex = 1;
+
+ convertDependencyDirectiveToken(DDTok, Result);
+ return false;
}
diff --git a/contrib/llvm-project/clang/lib/Lex/LiteralSupport.cpp b/contrib/llvm-project/clang/lib/Lex/LiteralSupport.cpp
index 85d826ce9c6f..0a78638f6805 100644
--- a/contrib/llvm-project/clang/lib/Lex/LiteralSupport.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/LiteralSupport.cpp
@@ -27,6 +27,7 @@
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Unicode.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
@@ -56,6 +57,26 @@ static unsigned getCharWidth(tok::TokenKind kind, const TargetInfo &Target) {
}
}
+static unsigned getEncodingPrefixLen(tok::TokenKind kind) {
+ switch (kind) {
+ default:
+ llvm_unreachable("Unknown token type!");
+ case tok::char_constant:
+ case tok::string_literal:
+ return 0;
+ case tok::utf8_char_constant:
+ case tok::utf8_string_literal:
+ return 2;
+ case tok::wide_char_constant:
+ case tok::wide_string_literal:
+ case tok::utf16_char_constant:
+ case tok::utf16_string_literal:
+ case tok::utf32_char_constant:
+ case tok::utf32_string_literal:
+ return 1;
+ }
+}
+
static CharSourceRange MakeCharSourceRange(const LangOptions &Features,
FullSourceLoc TokLoc,
const char *TokBegin,
@@ -86,6 +107,24 @@ static DiagnosticBuilder Diag(DiagnosticsEngine *Diags,
MakeCharSourceRange(Features, TokLoc, TokBegin, TokRangeBegin, TokRangeEnd);
}
+static bool IsEscapeValidInUnevaluatedStringLiteral(char Escape) {
+ switch (Escape) {
+ case '\'':
+ case '"':
+ case '?':
+ case '\\':
+ case 'a':
+ case 'b':
+ case 'f':
+ case 'n':
+ case 'r':
+ case 't':
+ case 'v':
+ return true;
+ }
+ return false;
+}
+
/// ProcessCharEscape - Parse a standard C escape sequence, which can occur in
/// either a character or a string literal.
static unsigned ProcessCharEscape(const char *ThisTokBegin,
@@ -93,8 +132,11 @@ static unsigned ProcessCharEscape(const char *ThisTokBegin,
const char *ThisTokEnd, bool &HadError,
FullSourceLoc Loc, unsigned CharWidth,
DiagnosticsEngine *Diags,
- const LangOptions &Features) {
+ const LangOptions &Features,
+ StringLiteralEvalMethod EvalMethod) {
const char *EscapeBegin = ThisTokBuf;
+ bool Delimited = false;
+ bool EndDelimiterFound = false;
// Skip the '\' char.
++ThisTokBuf;
@@ -102,6 +144,7 @@ static unsigned ProcessCharEscape(const char *ThisTokBegin,
// We know that this character can't be off the end of the buffer, because
// that would have been \", which would not have been the end of string.
unsigned ResultChar = *ThisTokBuf++;
+ char Escape = ResultChar;
switch (ResultChar) {
// These map to themselves.
case '\\': case '\'': case '"': case '?': break;
@@ -143,26 +186,47 @@ static unsigned ProcessCharEscape(const char *ThisTokBegin,
break;
case 'x': { // Hex escape.
ResultChar = 0;
- if (ThisTokBuf == ThisTokEnd || !isHexDigit(*ThisTokBuf)) {
+ if (ThisTokBuf != ThisTokEnd && *ThisTokBuf == '{') {
+ Delimited = true;
+ ThisTokBuf++;
+ if (*ThisTokBuf == '}') {
+ Diag(Diags, Features, Loc, ThisTokBegin, EscapeBegin, ThisTokBuf,
+ diag::err_delimited_escape_empty);
+ return ResultChar;
+ }
+ } else if (ThisTokBuf == ThisTokEnd || !isHexDigit(*ThisTokBuf)) {
if (Diags)
Diag(Diags, Features, Loc, ThisTokBegin, EscapeBegin, ThisTokBuf,
diag::err_hex_escape_no_digits) << "x";
- HadError = true;
- break;
+ return ResultChar;
}
// Hex escapes are a maximal series of hex digits.
bool Overflow = false;
for (; ThisTokBuf != ThisTokEnd; ++ThisTokBuf) {
- int CharVal = llvm::hexDigitValue(ThisTokBuf[0]);
- if (CharVal == -1) break;
+ if (Delimited && *ThisTokBuf == '}') {
+ ThisTokBuf++;
+ EndDelimiterFound = true;
+ break;
+ }
+ int CharVal = llvm::hexDigitValue(*ThisTokBuf);
+ if (CharVal == -1) {
+ // Non delimited hex escape sequences stop at the first non-hex digit.
+ if (!Delimited)
+ break;
+ HadError = true;
+ if (Diags)
+ Diag(Diags, Features, Loc, ThisTokBegin, EscapeBegin, ThisTokBuf,
+ diag::err_delimited_escape_invalid)
+ << StringRef(ThisTokBuf, 1);
+ continue;
+ }
// About to shift out a digit?
if (ResultChar & 0xF0000000)
Overflow = true;
ResultChar <<= 4;
ResultChar |= CharVal;
}
-
// See if any bits will be truncated when evaluated as a character.
if (CharWidth != 32 && (ResultChar >> CharWidth) != 0) {
Overflow = true;
@@ -170,9 +234,13 @@ static unsigned ProcessCharEscape(const char *ThisTokBegin,
}
// Check for overflow.
- if (Overflow && Diags) // Too many digits to fit in
- Diag(Diags, Features, Loc, ThisTokBegin, EscapeBegin, ThisTokBuf,
- diag::err_escape_too_large) << 0;
+ if (!HadError && Overflow) { // Too many digits to fit in
+ HadError = true;
+ if (Diags)
+ Diag(Diags, Features, Loc, ThisTokBegin, EscapeBegin, ThisTokBuf,
+ diag::err_escape_too_large)
+ << 0;
+ }
break;
}
case '0': case '1': case '2': case '3':
@@ -200,7 +268,60 @@ static unsigned ProcessCharEscape(const char *ThisTokBegin,
}
break;
}
+ case 'o': {
+ bool Overflow = false;
+ if (ThisTokBuf == ThisTokEnd || *ThisTokBuf != '{') {
+ HadError = true;
+ if (Diags)
+ Diag(Diags, Features, Loc, ThisTokBegin, EscapeBegin, ThisTokBuf,
+ diag::err_delimited_escape_missing_brace)
+ << "o";
+
+ break;
+ }
+ ResultChar = 0;
+ Delimited = true;
+ ++ThisTokBuf;
+ if (*ThisTokBuf == '}') {
+ Diag(Diags, Features, Loc, ThisTokBegin, EscapeBegin, ThisTokBuf,
+ diag::err_delimited_escape_empty);
+ return ResultChar;
+ }
+ while (ThisTokBuf != ThisTokEnd) {
+ if (*ThisTokBuf == '}') {
+ EndDelimiterFound = true;
+ ThisTokBuf++;
+ break;
+ }
+ if (*ThisTokBuf < '0' || *ThisTokBuf > '7') {
+ HadError = true;
+ if (Diags)
+ Diag(Diags, Features, Loc, ThisTokBegin, EscapeBegin, ThisTokBuf,
+ diag::err_delimited_escape_invalid)
+ << StringRef(ThisTokBuf, 1);
+ ThisTokBuf++;
+ continue;
+ }
+ // Check if one of the top three bits is set before shifting them out.
+ if (ResultChar & 0xE0000000)
+ Overflow = true;
+
+ ResultChar <<= 3;
+ ResultChar |= *ThisTokBuf++ - '0';
+ }
+ // Check for overflow. Reject '\777', but not L'\777'.
+ if (!HadError &&
+ (Overflow || (CharWidth != 32 && (ResultChar >> CharWidth) != 0))) {
+ HadError = true;
+ if (Diags)
+ Diag(Diags, Features, Loc, ThisTokBegin, EscapeBegin, ThisTokBuf,
+ diag::err_escape_too_large)
+ << 1;
+ ResultChar &= ~0U >> (32 - CharWidth);
+ }
+ break;
+ }
// Otherwise, these are not valid escapes.
case '(': case '{': case '[': case '%':
// GCC accepts these as extensions. We warn about them as such though.
@@ -224,6 +345,27 @@ static unsigned ProcessCharEscape(const char *ThisTokBegin,
break;
}
+ if (Delimited && Diags) {
+ if (!EndDelimiterFound)
+ Diag(Diags, Features, Loc, ThisTokBegin, EscapeBegin, ThisTokBuf,
+ diag::err_expected)
+ << tok::r_brace;
+ else if (!HadError) {
+ Diag(Diags, Features, Loc, ThisTokBegin, EscapeBegin, ThisTokBuf,
+ Features.CPlusPlus23 ? diag::warn_cxx23_delimited_escape_sequence
+ : diag::ext_delimited_escape_sequence)
+ << /*delimited*/ 0 << (Features.CPlusPlus ? 1 : 0);
+ }
+ }
+
+ if (EvalMethod == StringLiteralEvalMethod::Unevaluated &&
+ !IsEscapeValidInUnevaluatedStringLiteral(Escape)) {
+ Diag(Diags, Features, Loc, ThisTokBegin, EscapeBegin, ThisTokBuf,
+ diag::err_unevaluated_string_invalid_escape_sequence)
+ << StringRef(EscapeBegin, ThisTokBuf - EscapeBegin);
+ HadError = true;
+ }
+
return ResultChar;
}
@@ -231,10 +373,8 @@ static void appendCodePoint(unsigned Codepoint,
llvm::SmallVectorImpl<char> &Str) {
char ResultBuf[4];
char *ResultPtr = ResultBuf;
- bool Res = llvm::ConvertCodePointToUTF8(Codepoint, ResultPtr);
- (void)Res;
- assert(Res && "Unexpected conversion failure");
- Str.append(ResultBuf, ResultPtr);
+ if (llvm::ConvertCodePointToUTF8(Codepoint, ResultPtr))
+ Str.append(ResultBuf, ResultPtr);
}
void clang::expandUCNs(SmallVectorImpl<char> &Buf, StringRef Input) {
@@ -245,18 +385,48 @@ void clang::expandUCNs(SmallVectorImpl<char> &Buf, StringRef Input) {
}
++I;
- assert(*I == 'u' || *I == 'U');
+ char Kind = *I;
+ ++I;
+
+ assert(Kind == 'u' || Kind == 'U' || Kind == 'N');
+ uint32_t CodePoint = 0;
+
+ if (Kind == 'u' && *I == '{') {
+ for (++I; *I != '}'; ++I) {
+ unsigned Value = llvm::hexDigitValue(*I);
+ assert(Value != -1U);
+ CodePoint <<= 4;
+ CodePoint += Value;
+ }
+ appendCodePoint(CodePoint, Buf);
+ continue;
+ }
+
+ if (Kind == 'N') {
+ assert(*I == '{');
+ ++I;
+ auto Delim = std::find(I, Input.end(), '}');
+ assert(Delim != Input.end());
+ StringRef Name(I, std::distance(I, Delim));
+ std::optional<llvm::sys::unicode::LooseMatchingResult> Res =
+ llvm::sys::unicode::nameToCodepointLooseMatching(Name);
+ assert(Res && "could not find a codepoint that was previously found");
+ CodePoint = Res->CodePoint;
+ assert(CodePoint != 0xFFFFFFFF);
+ appendCodePoint(CodePoint, Buf);
+ I = Delim;
+ continue;
+ }
unsigned NumHexDigits;
- if (*I == 'u')
+ if (Kind == 'u')
NumHexDigits = 4;
else
NumHexDigits = 8;
assert(I + NumHexDigits <= E);
- uint32_t CodePoint = 0;
- for (++I; NumHexDigits != 0; ++I, --NumHexDigits) {
+ for (; NumHexDigits != 0; ++I, --NumHexDigits) {
unsigned Value = llvm::hexDigitValue(*I);
assert(Value != -1U);
@@ -269,40 +439,232 @@ void clang::expandUCNs(SmallVectorImpl<char> &Buf, StringRef Input) {
}
}
-/// ProcessUCNEscape - Read the Universal Character Name, check constraints and
-/// return the UTF32.
-static bool ProcessUCNEscape(const char *ThisTokBegin, const char *&ThisTokBuf,
- const char *ThisTokEnd,
- uint32_t &UcnVal, unsigned short &UcnLen,
- FullSourceLoc Loc, DiagnosticsEngine *Diags,
- const LangOptions &Features,
- bool in_char_string_literal = false) {
+bool clang::isFunctionLocalStringLiteralMacro(tok::TokenKind K,
+ const LangOptions &LO) {
+ return LO.MicrosoftExt &&
+ (K == tok::kw___FUNCTION__ || K == tok::kw_L__FUNCTION__ ||
+ K == tok::kw___FUNCSIG__ || K == tok::kw_L__FUNCSIG__ ||
+ K == tok::kw___FUNCDNAME__);
+}
+
+bool clang::tokenIsLikeStringLiteral(const Token &Tok, const LangOptions &LO) {
+ return tok::isStringLiteral(Tok.getKind()) ||
+ isFunctionLocalStringLiteralMacro(Tok.getKind(), LO);
+}
+
+static bool ProcessNumericUCNEscape(const char *ThisTokBegin,
+ const char *&ThisTokBuf,
+ const char *ThisTokEnd, uint32_t &UcnVal,
+ unsigned short &UcnLen, bool &Delimited,
+ FullSourceLoc Loc, DiagnosticsEngine *Diags,
+ const LangOptions &Features,
+ bool in_char_string_literal = false) {
const char *UcnBegin = ThisTokBuf;
+ bool HasError = false;
+ bool EndDelimiterFound = false;
// Skip the '\u' char's.
ThisTokBuf += 2;
-
- if (ThisTokBuf == ThisTokEnd || !isHexDigit(*ThisTokBuf)) {
+ Delimited = false;
+ if (UcnBegin[1] == 'u' && in_char_string_literal &&
+ ThisTokBuf != ThisTokEnd && *ThisTokBuf == '{') {
+ Delimited = true;
+ ThisTokBuf++;
+ } else if (ThisTokBuf == ThisTokEnd || !isHexDigit(*ThisTokBuf)) {
if (Diags)
Diag(Diags, Features, Loc, ThisTokBegin, UcnBegin, ThisTokBuf,
- diag::err_hex_escape_no_digits) << StringRef(&ThisTokBuf[-1], 1);
+ diag::err_hex_escape_no_digits)
+ << StringRef(&ThisTokBuf[-1], 1);
return false;
}
UcnLen = (ThisTokBuf[-1] == 'u' ? 4 : 8);
- unsigned short UcnLenSave = UcnLen;
- for (; ThisTokBuf != ThisTokEnd && UcnLenSave; ++ThisTokBuf, UcnLenSave--) {
- int CharVal = llvm::hexDigitValue(ThisTokBuf[0]);
- if (CharVal == -1) break;
+
+ bool Overflow = false;
+ unsigned short Count = 0;
+ for (; ThisTokBuf != ThisTokEnd && (Delimited || Count != UcnLen);
+ ++ThisTokBuf) {
+ if (Delimited && *ThisTokBuf == '}') {
+ ++ThisTokBuf;
+ EndDelimiterFound = true;
+ break;
+ }
+ int CharVal = llvm::hexDigitValue(*ThisTokBuf);
+ if (CharVal == -1) {
+ HasError = true;
+ if (!Delimited)
+ break;
+ if (Diags) {
+ Diag(Diags, Features, Loc, ThisTokBegin, UcnBegin, ThisTokBuf,
+ diag::err_delimited_escape_invalid)
+ << StringRef(ThisTokBuf, 1);
+ }
+ Count++;
+ continue;
+ }
+ if (UcnVal & 0xF0000000) {
+ Overflow = true;
+ continue;
+ }
UcnVal <<= 4;
UcnVal |= CharVal;
+ Count++;
+ }
+
+ if (Overflow) {
+ if (Diags)
+ Diag(Diags, Features, Loc, ThisTokBegin, UcnBegin, ThisTokBuf,
+ diag::err_escape_too_large)
+ << 0;
+ return false;
+ }
+
+ if (Delimited && !EndDelimiterFound) {
+ if (Diags) {
+ Diag(Diags, Features, Loc, ThisTokBegin, UcnBegin, ThisTokBuf,
+ diag::err_expected)
+ << tok::r_brace;
+ }
+ return false;
}
+
// If we didn't consume the proper number of digits, there is a problem.
- if (UcnLenSave) {
+ if (Count == 0 || (!Delimited && Count != UcnLen)) {
if (Diags)
Diag(Diags, Features, Loc, ThisTokBegin, UcnBegin, ThisTokBuf,
- diag::err_ucn_escape_incomplete);
+ Delimited ? diag::err_delimited_escape_empty
+ : diag::err_ucn_escape_incomplete);
+ return false;
+ }
+ return !HasError;
+}
+
+static void DiagnoseInvalidUnicodeCharacterName(
+ DiagnosticsEngine *Diags, const LangOptions &Features, FullSourceLoc Loc,
+ const char *TokBegin, const char *TokRangeBegin, const char *TokRangeEnd,
+ llvm::StringRef Name) {
+
+ Diag(Diags, Features, Loc, TokBegin, TokRangeBegin, TokRangeEnd,
+ diag::err_invalid_ucn_name)
+ << Name;
+
+ namespace u = llvm::sys::unicode;
+
+ std::optional<u::LooseMatchingResult> Res =
+ u::nameToCodepointLooseMatching(Name);
+ if (Res) {
+ Diag(Diags, Features, Loc, TokBegin, TokRangeBegin, TokRangeEnd,
+ diag::note_invalid_ucn_name_loose_matching)
+ << FixItHint::CreateReplacement(
+ MakeCharSourceRange(Features, Loc, TokBegin, TokRangeBegin,
+ TokRangeEnd),
+ Res->Name);
+ return;
+ }
+
+ unsigned Distance = 0;
+ SmallVector<u::MatchForCodepointName> Matches =
+ u::nearestMatchesForCodepointName(Name, 5);
+ assert(!Matches.empty() && "No unicode characters found");
+
+ for (const auto &Match : Matches) {
+ if (Distance == 0)
+ Distance = Match.Distance;
+ if (std::max(Distance, Match.Distance) -
+ std::min(Distance, Match.Distance) >
+ 3)
+ break;
+ Distance = Match.Distance;
+
+ std::string Str;
+ llvm::UTF32 V = Match.Value;
+ bool Converted =
+ llvm::convertUTF32ToUTF8String(llvm::ArrayRef<llvm::UTF32>(&V, 1), Str);
+ (void)Converted;
+ assert(Converted && "Found a match wich is not a unicode character");
+
+ Diag(Diags, Features, Loc, TokBegin, TokRangeBegin, TokRangeEnd,
+ diag::note_invalid_ucn_name_candidate)
+ << Match.Name << llvm::utohexstr(Match.Value)
+ << Str // FIXME: Fix the rendering of non printable characters
+ << FixItHint::CreateReplacement(
+ MakeCharSourceRange(Features, Loc, TokBegin, TokRangeBegin,
+ TokRangeEnd),
+ Match.Name);
+ }
+}
+
+static bool ProcessNamedUCNEscape(const char *ThisTokBegin,
+ const char *&ThisTokBuf,
+ const char *ThisTokEnd, uint32_t &UcnVal,
+ unsigned short &UcnLen, FullSourceLoc Loc,
+ DiagnosticsEngine *Diags,
+ const LangOptions &Features) {
+ const char *UcnBegin = ThisTokBuf;
+ assert(UcnBegin[0] == '\\' && UcnBegin[1] == 'N');
+ ThisTokBuf += 2;
+ if (ThisTokBuf == ThisTokEnd || *ThisTokBuf != '{') {
+ if (Diags) {
+ Diag(Diags, Features, Loc, ThisTokBegin, UcnBegin, ThisTokBuf,
+ diag::err_delimited_escape_missing_brace)
+ << StringRef(&ThisTokBuf[-1], 1);
+ }
+ return false;
+ }
+ ThisTokBuf++;
+ const char *ClosingBrace = std::find_if(ThisTokBuf, ThisTokEnd, [](char C) {
+ return C == '}' || isVerticalWhitespace(C);
+ });
+ bool Incomplete = ClosingBrace == ThisTokEnd;
+ bool Empty = ClosingBrace == ThisTokBuf;
+ if (Incomplete || Empty) {
+ if (Diags) {
+ Diag(Diags, Features, Loc, ThisTokBegin, UcnBegin, ThisTokBuf,
+ Incomplete ? diag::err_ucn_escape_incomplete
+ : diag::err_delimited_escape_empty)
+ << StringRef(&UcnBegin[1], 1);
+ }
+ ThisTokBuf = ClosingBrace == ThisTokEnd ? ClosingBrace : ClosingBrace + 1;
return false;
}
+ StringRef Name(ThisTokBuf, ClosingBrace - ThisTokBuf);
+ ThisTokBuf = ClosingBrace + 1;
+ std::optional<char32_t> Res = llvm::sys::unicode::nameToCodepointStrict(Name);
+ if (!Res) {
+ if (Diags)
+ DiagnoseInvalidUnicodeCharacterName(Diags, Features, Loc, ThisTokBegin,
+ &UcnBegin[3], ClosingBrace, Name);
+ return false;
+ }
+ UcnVal = *Res;
+ UcnLen = UcnVal > 0xFFFF ? 8 : 4;
+ return true;
+}
+
+/// ProcessUCNEscape - Read the Universal Character Name, check constraints and
+/// return the UTF32.
+static bool ProcessUCNEscape(const char *ThisTokBegin, const char *&ThisTokBuf,
+ const char *ThisTokEnd, uint32_t &UcnVal,
+ unsigned short &UcnLen, FullSourceLoc Loc,
+ DiagnosticsEngine *Diags,
+ const LangOptions &Features,
+ bool in_char_string_literal = false) {
+
+ bool HasError;
+ const char *UcnBegin = ThisTokBuf;
+ bool IsDelimitedEscapeSequence = false;
+ bool IsNamedEscapeSequence = false;
+ if (ThisTokBuf[1] == 'N') {
+ IsNamedEscapeSequence = true;
+ HasError = !ProcessNamedUCNEscape(ThisTokBegin, ThisTokBuf, ThisTokEnd,
+ UcnVal, UcnLen, Loc, Diags, Features);
+ } else {
+ HasError =
+ !ProcessNumericUCNEscape(ThisTokBegin, ThisTokBuf, ThisTokEnd, UcnVal,
+ UcnLen, IsDelimitedEscapeSequence, Loc, Diags,
+ Features, in_char_string_literal);
+ }
+ if (HasError)
+ return false;
// Check UCN constraints (C99 6.4.3p2) [C++11 lex.charset p2]
if ((0xD800 <= UcnVal && UcnVal <= 0xDFFF) || // surrogate codepoints
@@ -313,22 +675,28 @@ static bool ProcessUCNEscape(const char *ThisTokBegin, const char *&ThisTokBuf,
return false;
}
- // C++11 allows UCNs that refer to control characters and basic source
- // characters inside character and string literals
+ // C23 and C++11 allow UCNs that refer to control characters
+ // and basic source characters inside character and string literals
if (UcnVal < 0xa0 &&
- (UcnVal != 0x24 && UcnVal != 0x40 && UcnVal != 0x60)) { // $, @, `
- bool IsError = (!Features.CPlusPlus11 || !in_char_string_literal);
+ // $, @, ` are allowed in all language modes
+ (UcnVal != 0x24 && UcnVal != 0x40 && UcnVal != 0x60)) {
+ bool IsError =
+ (!(Features.CPlusPlus11 || Features.C23) || !in_char_string_literal);
if (Diags) {
char BasicSCSChar = UcnVal;
if (UcnVal >= 0x20 && UcnVal < 0x7f)
Diag(Diags, Features, Loc, ThisTokBegin, UcnBegin, ThisTokBuf,
- IsError ? diag::err_ucn_escape_basic_scs :
- diag::warn_cxx98_compat_literal_ucn_escape_basic_scs)
+ IsError ? diag::err_ucn_escape_basic_scs
+ : Features.CPlusPlus
+ ? diag::warn_cxx98_compat_literal_ucn_escape_basic_scs
+ : diag::warn_c23_compat_literal_ucn_escape_basic_scs)
<< StringRef(&BasicSCSChar, 1);
else
Diag(Diags, Features, Loc, ThisTokBegin, UcnBegin, ThisTokBuf,
- IsError ? diag::err_ucn_control_character :
- diag::warn_cxx98_compat_literal_ucn_control_character);
+ IsError ? diag::err_ucn_control_character
+ : Features.CPlusPlus
+ ? diag::warn_cxx98_compat_literal_ucn_control_character
+ : diag::warn_c23_compat_literal_ucn_control_character);
}
if (IsError)
return false;
@@ -338,6 +706,12 @@ static bool ProcessUCNEscape(const char *ThisTokBegin, const char *&ThisTokBuf,
Diag(Diags, Features, Loc, ThisTokBegin, UcnBegin, ThisTokBuf,
diag::warn_ucn_not_valid_in_c89_literal);
+ if ((IsDelimitedEscapeSequence || IsNamedEscapeSequence) && Diags)
+ Diag(Diags, Features, Loc, ThisTokBegin, UcnBegin, ThisTokBuf,
+ Features.CPlusPlus23 ? diag::warn_cxx23_delimited_escape_sequence
+ : diag::ext_delimited_escape_sequence)
+ << (IsNamedEscapeSequence ? 1 : 0) << (Features.CPlusPlus ? 1 : 0);
+
return true;
}
@@ -458,13 +832,13 @@ static void EncodeUCNEscape(const char *ThisTokBegin, const char *&ThisTokBuf,
switch (bytesToWrite) { // note: everything falls through.
case 4:
*--ResultBuf = (UTF8)((UcnVal | byteMark) & byteMask); UcnVal >>= 6;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case 3:
*--ResultBuf = (UTF8)((UcnVal | byteMark) & byteMask); UcnVal >>= 6;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case 2:
*--ResultBuf = (UTF8)((UcnVal | byteMark) & byteMask); UcnVal >>= 6;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case 1:
*--ResultBuf = (UTF8) (UcnVal | firstByteMark[bytesToWrite]);
}
@@ -532,12 +906,6 @@ NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
: SM(SM), LangOpts(LangOpts), Diags(Diags),
ThisTokBegin(TokSpelling.begin()), ThisTokEnd(TokSpelling.end()) {
- // This routine assumes that the range begin/end matches the regex for integer
- // and FP constants (specifically, the 'pp-number' regex), and assumes that
- // the byte at "*end" is both valid and not part of the regex. Because of
- // this, it doesn't have to check for 'overscan' in various places.
- assert(!isPreprocessingNumberBody(*ThisTokEnd) && "didn't maximally munch?");
-
s = DigitsBegin = ThisTokBegin;
saw_exponent = false;
saw_period = false;
@@ -556,6 +924,21 @@ NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
isFract = false;
isAccum = false;
hadError = false;
+ isBitInt = false;
+
+ // This routine assumes that the range begin/end matches the regex for integer
+ // and FP constants (specifically, the 'pp-number' regex), and assumes that
+ // the byte at "*end" is both valid and not part of the regex. Because of
+ // this, it doesn't have to check for 'overscan' in various places.
+ // Note: For HLSL, the end token is allowed to be '.' which would be in the
+ // 'pp-number' regex. This is required to support vector swizzles on numeric
+ // constants (i.e. 1.xx or 1.5f.rrr).
+ if (isPreprocessingNumberBody(*ThisTokEnd) &&
+ !(LangOpts.HLSL && *ThisTokEnd == '.')) {
+ Diags.Report(TokLoc, diag::err_lexing_numeric);
+ hadError = true;
+ return;
+ }
if (*s == '0') { // parse radix
ParseNumberStartingWithZero(TokLoc);
@@ -632,9 +1015,13 @@ NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
// CUDA host and device may have different _Float16 support, therefore
// allows f16 literals to avoid false alarm.
+ // When we compile for OpenMP target offloading on NVPTX, f16 suffix
+ // should also be supported.
// ToDo: more precise check for CUDA.
- if ((Target.hasFloat16Type() || LangOpts.CUDA) && s + 2 < ThisTokEnd &&
- s[1] == '1' && s[2] == '6') {
+ // TODO: AMDGPU might also support it in the future.
+ if ((Target.hasFloat16Type() || LangOpts.CUDA ||
+ (LangOpts.OpenMPIsTargetDevice && Target.getTriple().isNVPTX())) &&
+ s + 2 < ThisTokEnd && s[1] == '1' && s[2] == '6') {
s += 2; // success, eat up 2 characters.
isFloat16 = true;
continue;
@@ -724,12 +1111,30 @@ NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
break;
}
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case 'j':
case 'J':
if (isImaginary) break; // Cannot be repeated.
isImaginary = true;
continue; // Success.
+ case 'w':
+ case 'W':
+ if (isFPConstant)
+ break; // Invalid for floats.
+ if (HasSize)
+ break; // Invalid if we already have a size for the literal.
+
+ // wb and WB are allowed, but a mixture of cases like Wb or wB is not. We
+ // explicitly do not support the suffix in C++ as an extension because a
+ // library-based UDL that resolves to a library type may be more
+ // appropriate there.
+ if (!LangOpts.CPlusPlus && ((s[0] == 'w' && s[1] == 'b') ||
+ (s[0] == 'W' && s[1] == 'B'))) {
+ isBitInt = true;
+ HasSize = true;
+ ++s; // Skip both characters (2nd char skipped on continue).
+ continue; // Success.
+ }
}
// If we reached here, there was an error or a ud-suffix.
break;
@@ -751,6 +1156,7 @@ NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
isFloat16 = false;
isHalf = false;
isImaginary = false;
+ isBitInt = false;
MicrosoftInteger = 0;
saw_fixed_point_suffix = false;
isFract = false;
@@ -980,8 +1386,14 @@ void NumericLiteralParser::ParseNumberStartingWithZero(SourceLocation TokLoc) {
// floating point constant, the radix will change to 10. Octal floating
// point constants are not permitted (only decimal and hexadecimal).
radix = 8;
- DigitsBegin = s;
+ const char *PossibleNewDigitStart = s;
s = SkipOctalDigits(s);
+ // When the value is 0 followed by a suffix (like 0wb), we want to leave 0
+ // as the start of the digits. So if skipping octal digits does not skip
+ // anything, we leave the digit start where it was.
+ if (s != PossibleNewDigitStart)
+ DigitsBegin = PossibleNewDigitStart;
+
if (s == ThisTokEnd)
return; // Done, simple octal number like 01234
@@ -1081,7 +1493,7 @@ NumericLiteralParser::GetFloatValue(llvm::APFloat &Result) {
llvm::SmallString<16> Buffer;
StringRef Str(ThisTokBegin, n);
- if (Str.find('\'') != StringRef::npos) {
+ if (Str.contains('\'')) {
Buffer.reserve(n);
std::remove_copy_if(Str.begin(), Str.end(), std::back_inserter(Buffer),
&isDigitSeparator);
@@ -1196,7 +1608,7 @@ bool NumericLiteralParser::GetFixedPointValue(llvm::APInt &StoreVal, unsigned Sc
Val *= Base;
}
} else if (BaseShift < 0) {
- for (int64_t i = BaseShift; i < 0 && !Val.isNullValue(); ++i)
+ for (int64_t i = BaseShift; i < 0 && !Val.isZero(); ++i)
Val = Val.udiv(Base);
}
@@ -1271,7 +1683,12 @@ CharLiteralParser::CharLiteralParser(const char *begin, const char *end,
++begin;
// Skip over the entry quote.
- assert(begin[0] == '\'' && "Invalid token lexed");
+ if (begin[0] != '\'') {
+ PP.Diag(Loc, diag::err_lexing_char);
+ HadError = true;
+ return;
+ }
+
++begin;
// Remove an optional ud-suffix.
@@ -1340,7 +1757,7 @@ CharLiteralParser::CharLiteralParser(const char *begin, const char *end,
// If we see bad encoding for unprefixed character literals, warn and
// simply copy the byte values, for compatibility with gcc and
// older versions of clang.
- bool NoErrorOnBadEncoding = isAscii();
+ bool NoErrorOnBadEncoding = isOrdinary();
unsigned Msg = diag::err_bad_character_encoding;
if (NoErrorOnBadEncoding)
Msg = diag::warn_bad_character_encoding;
@@ -1365,7 +1782,7 @@ CharLiteralParser::CharLiteralParser(const char *begin, const char *end,
continue;
}
// Is this a Universal Character Name escape?
- if (begin[1] == 'u' || begin[1] == 'U') {
+ if (begin[1] == 'u' || begin[1] == 'U' || begin[1] == 'N') {
unsigned short UcnLen = 0;
if (!ProcessUCNEscape(TokBegin, begin, end, *buffer_begin, UcnLen,
FullSourceLoc(Loc, PP.getSourceManager()),
@@ -1381,23 +1798,24 @@ CharLiteralParser::CharLiteralParser(const char *begin, const char *end,
}
unsigned CharWidth = getCharWidth(Kind, PP.getTargetInfo());
uint64_t result =
- ProcessCharEscape(TokBegin, begin, end, HadError,
- FullSourceLoc(Loc,PP.getSourceManager()),
- CharWidth, &PP.getDiagnostics(), PP.getLangOpts());
+ ProcessCharEscape(TokBegin, begin, end, HadError,
+ FullSourceLoc(Loc, PP.getSourceManager()), CharWidth,
+ &PP.getDiagnostics(), PP.getLangOpts(),
+ StringLiteralEvalMethod::Evaluated);
*buffer_begin++ = result;
}
unsigned NumCharsSoFar = buffer_begin - &codepoint_buffer.front();
if (NumCharsSoFar > 1) {
- if (isWide())
- PP.Diag(Loc, diag::warn_extraneous_char_constant);
- else if (isAscii() && NumCharsSoFar == 4)
+ if (isOrdinary() && NumCharsSoFar == 4)
PP.Diag(Loc, diag::warn_four_char_character_literal);
- else if (isAscii())
+ else if (isOrdinary())
PP.Diag(Loc, diag::warn_multichar_character_literal);
- else
- PP.Diag(Loc, diag::err_multichar_utf_character_literal);
+ else {
+ PP.Diag(Loc, diag::err_multichar_character_literal) << (isWide() ? 0 : 1);
+ HadError = true;
+ }
IsMultiChar = true;
} else {
IsMultiChar = false;
@@ -1408,11 +1826,11 @@ CharLiteralParser::CharLiteralParser(const char *begin, const char *end,
// Narrow character literals act as though their value is concatenated
// in this implementation, but warn on overflow.
bool multi_char_too_long = false;
- if (isAscii() && isMultiChar()) {
+ if (isOrdinary() && isMultiChar()) {
LitVal = 0;
for (size_t i = 0; i < NumCharsSoFar; ++i) {
// check for enough leading zeros to shift into
- multi_char_too_long |= (LitVal.countLeadingZeros() < 8);
+ multi_char_too_long |= (LitVal.countl_zero() < 8);
LitVal <<= 8;
LitVal = LitVal + (codepoint_buffer[i] & 0xFF);
}
@@ -1432,7 +1850,7 @@ CharLiteralParser::CharLiteralParser(const char *begin, const char *end,
// if 'char' is signed for this target (C99 6.4.4.4p10). Note that multiple
// character constants are not sign extended in the this implementation:
// '\xFF\xFF' = 65536 and '\x0\xFF' = 255, which matches GCC.
- if (isAscii() && NumCharsSoFar == 1 && (Value & 128) &&
+ if (isOrdinary() && NumCharsSoFar == 1 && (Value & 128) &&
PP.getLangOpts().CharIsSigned)
Value = (signed char)Value;
}
@@ -1491,13 +1909,14 @@ CharLiteralParser::CharLiteralParser(const char *begin, const char *end,
/// hex-digit hex-digit hex-digit hex-digit
/// \endverbatim
///
-StringLiteralParser::
-StringLiteralParser(ArrayRef<Token> StringToks,
- Preprocessor &PP, bool Complain)
- : SM(PP.getSourceManager()), Features(PP.getLangOpts()),
- Target(PP.getTargetInfo()), Diags(Complain ? &PP.getDiagnostics() :nullptr),
- MaxTokenLength(0), SizeBound(0), CharByteWidth(0), Kind(tok::unknown),
- ResultPtr(ResultBuf.data()), hadError(false), Pascal(false) {
+StringLiteralParser::StringLiteralParser(ArrayRef<Token> StringToks,
+ Preprocessor &PP,
+ StringLiteralEvalMethod EvalMethod)
+ : SM(PP.getSourceManager()), Features(PP.getLangOpts()),
+ Target(PP.getTargetInfo()), Diags(&PP.getDiagnostics()),
+ MaxTokenLength(0), SizeBound(0), CharByteWidth(0), Kind(tok::unknown),
+ ResultPtr(ResultBuf.data()), EvalMethod(EvalMethod), hadError(false),
+ Pascal(false) {
init(StringToks);
}
@@ -1514,35 +1933,51 @@ void StringLiteralParser::init(ArrayRef<Token> StringToks){
assert(!StringToks.empty() && "expected at least one token");
MaxTokenLength = StringToks[0].getLength();
assert(StringToks[0].getLength() >= 2 && "literal token is invalid!");
- SizeBound = StringToks[0].getLength()-2; // -2 for "".
- Kind = StringToks[0].getKind();
-
+ SizeBound = StringToks[0].getLength() - 2; // -2 for "".
hadError = false;
- // Implement Translation Phase #6: concatenation of string literals
+ // Determines the kind of string from the prefix
+ Kind = tok::string_literal;
+
/// (C99 5.1.1.2p1). The common case is only one string fragment.
- for (unsigned i = 1; i != StringToks.size(); ++i) {
- if (StringToks[i].getLength() < 2)
- return DiagnoseLexingError(StringToks[i].getLocation());
+ for (const Token &Tok : StringToks) {
+ if (Tok.getLength() < 2)
+ return DiagnoseLexingError(Tok.getLocation());
// The string could be shorter than this if it needs cleaning, but this is a
// reasonable bound, which is all we need.
- assert(StringToks[i].getLength() >= 2 && "literal token is invalid!");
- SizeBound += StringToks[i].getLength()-2; // -2 for "".
+ assert(Tok.getLength() >= 2 && "literal token is invalid!");
+ SizeBound += Tok.getLength() - 2; // -2 for "".
// Remember maximum string piece length.
- if (StringToks[i].getLength() > MaxTokenLength)
- MaxTokenLength = StringToks[i].getLength();
+ if (Tok.getLength() > MaxTokenLength)
+ MaxTokenLength = Tok.getLength();
// Remember if we see any wide or utf-8/16/32 strings.
// Also check for illegal concatenations.
- if (StringToks[i].isNot(Kind) && StringToks[i].isNot(tok::string_literal)) {
- if (isAscii()) {
- Kind = StringToks[i].getKind();
+ if (isUnevaluated() && Tok.getKind() != tok::string_literal) {
+ if (Diags) {
+ SourceLocation PrefixEndLoc = Lexer::AdvanceToTokenCharacter(
+ Tok.getLocation(), getEncodingPrefixLen(Tok.getKind()), SM,
+ Features);
+ CharSourceRange Range =
+ CharSourceRange::getCharRange({Tok.getLocation(), PrefixEndLoc});
+ StringRef Prefix(SM.getCharacterData(Tok.getLocation()),
+ getEncodingPrefixLen(Tok.getKind()));
+ Diags->Report(Tok.getLocation(),
+ Features.CPlusPlus26
+ ? diag::err_unevaluated_string_prefix
+ : diag::warn_unevaluated_string_prefix)
+ << Prefix << Features.CPlusPlus << FixItHint::CreateRemoval(Range);
+ }
+ if (Features.CPlusPlus26)
+ hadError = true;
+ } else if (Tok.isNot(Kind) && Tok.isNot(tok::string_literal)) {
+ if (isOrdinary()) {
+ Kind = Tok.getKind();
} else {
if (Diags)
- Diags->Report(StringToks[i].getLocation(),
- diag::err_unsupported_string_concat);
+ Diags->Report(Tok.getLocation(), diag::err_unsupported_string_concat);
hadError = true;
}
}
@@ -1620,13 +2055,18 @@ void StringLiteralParser::init(ArrayRef<Token> StringToks){
// result of a concatenation involving at least one user-defined-string-
// literal, all the participating user-defined-string-literals shall
// have the same ud-suffix.
- if (UDSuffixBuf != UDSuffix) {
+ bool UnevaluatedStringHasUDL = isUnevaluated() && !UDSuffix.empty();
+ if (UDSuffixBuf != UDSuffix || UnevaluatedStringHasUDL) {
if (Diags) {
SourceLocation TokLoc = StringToks[i].getLocation();
- Diags->Report(TokLoc, diag::err_string_concat_mixed_suffix)
- << UDSuffixBuf << UDSuffix
- << SourceRange(UDSuffixTokLoc, UDSuffixTokLoc)
- << SourceRange(TokLoc, TokLoc);
+ if (UnevaluatedStringHasUDL) {
+ Diags->Report(TokLoc, diag::err_unevaluated_string_udl)
+ << SourceRange(TokLoc, TokLoc);
+ } else {
+ Diags->Report(TokLoc, diag::err_string_concat_mixed_suffix)
+ << UDSuffixBuf << UDSuffix
+ << SourceRange(UDSuffixTokLoc, UDSuffixTokLoc);
+ }
}
hadError = true;
}
@@ -1698,8 +2138,9 @@ void StringLiteralParser::init(ArrayRef<Token> StringToks){
++ThisTokBuf; // skip "
// Check if this is a pascal string
- if (Features.PascalStrings && ThisTokBuf + 1 != ThisTokEnd &&
- ThisTokBuf[0] == '\\' && ThisTokBuf[1] == 'p') {
+ if (!isUnevaluated() && Features.PascalStrings &&
+ ThisTokBuf + 1 != ThisTokEnd && ThisTokBuf[0] == '\\' &&
+ ThisTokBuf[1] == 'p') {
// If the \p sequence is found in the first token, we have a pascal string
// Otherwise, if we already have a pascal string, ignore the first \p
@@ -1725,7 +2166,8 @@ void StringLiteralParser::init(ArrayRef<Token> StringToks){
continue;
}
// Is this a Universal Character Name escape?
- if (ThisTokBuf[1] == 'u' || ThisTokBuf[1] == 'U') {
+ if (ThisTokBuf[1] == 'u' || ThisTokBuf[1] == 'U' ||
+ ThisTokBuf[1] == 'N') {
EncodeUCNEscape(ThisTokBegin, ThisTokBuf, ThisTokEnd,
ResultPtr, hadError,
FullSourceLoc(StringToks[i].getLocation(), SM),
@@ -1734,9 +2176,9 @@ void StringLiteralParser::init(ArrayRef<Token> StringToks){
}
// Otherwise, this is a non-UCN escape character. Process it.
unsigned ResultChar =
- ProcessCharEscape(ThisTokBegin, ThisTokBuf, ThisTokEnd, hadError,
- FullSourceLoc(StringToks[i].getLocation(), SM),
- CharByteWidth*8, Diags, Features);
+ ProcessCharEscape(ThisTokBegin, ThisTokBuf, ThisTokEnd, hadError,
+ FullSourceLoc(StringToks[i].getLocation(), SM),
+ CharByteWidth * 8, Diags, Features, EvalMethod);
if (CharByteWidth == 4) {
// FIXME: Make the type of the result buffer correct instead of
@@ -1758,6 +2200,8 @@ void StringLiteralParser::init(ArrayRef<Token> StringToks){
}
}
+ assert((!Pascal || !isUnevaluated()) &&
+ "Pascal string in unevaluated context");
if (Pascal) {
if (CharByteWidth == 4) {
// FIXME: Make the type of the result buffer correct instead of
@@ -1820,7 +2264,7 @@ bool StringLiteralParser::CopyStringFragment(const Token &Tok,
// If we see bad encoding for unprefixed string literals, warn and
// simply copy the byte values, for compatibility with gcc and older
// versions of clang.
- bool NoErrorOnBadEncoding = isAscii();
+ bool NoErrorOnBadEncoding = isOrdinary();
if (NoErrorOnBadEncoding) {
memcpy(ResultPtr, Fragment.data(), Fragment.size());
ResultPtr += Fragment.size();
@@ -1918,7 +2362,8 @@ unsigned StringLiteralParser::getOffsetOfStringByte(const Token &Tok,
// Otherwise, this is an escape character. Advance over it.
bool HadError = false;
- if (SpellingPtr[1] == 'u' || SpellingPtr[1] == 'U') {
+ if (SpellingPtr[1] == 'u' || SpellingPtr[1] == 'U' ||
+ SpellingPtr[1] == 'N') {
const char *EscapePtr = SpellingPtr;
unsigned Len = MeasureUCNEscape(SpellingStart, SpellingPtr, SpellingEnd,
1, Features, HadError);
@@ -1930,8 +2375,8 @@ unsigned StringLiteralParser::getOffsetOfStringByte(const Token &Tok,
ByteNo -= Len;
} else {
ProcessCharEscape(SpellingStart, SpellingPtr, SpellingEnd, HadError,
- FullSourceLoc(Tok.getLocation(), SM),
- CharByteWidth*8, Diags, Features);
+ FullSourceLoc(Tok.getLocation(), SM), CharByteWidth * 8,
+ Diags, Features, StringLiteralEvalMethod::Evaluated);
--ByteNo;
}
assert(!HadError && "This method isn't valid on erroneous strings");
diff --git a/contrib/llvm-project/clang/lib/Lex/MacroArgs.cpp b/contrib/llvm-project/clang/lib/Lex/MacroArgs.cpp
index 7ede00b4aa64..c54f69bb9ead 100644
--- a/contrib/llvm-project/clang/lib/Lex/MacroArgs.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/MacroArgs.cpp
@@ -62,7 +62,7 @@ MacroArgs *MacroArgs::create(const MacroInfo *MI,
// Copy the actual unexpanded tokens to immediately after the result ptr.
if (!UnexpArgTokens.empty()) {
- static_assert(std::is_trivial<Token>::value,
+ static_assert(std::is_trivial_v<Token>,
"assume trivial copyability if copying into the "
"uninitialized array (as opposed to reusing a cached "
"MacroArgs)");
@@ -94,7 +94,7 @@ MacroArgs *MacroArgs::deallocate() {
// Run the dtor to deallocate the vectors.
this->~MacroArgs();
// Release the memory for the object.
- static_assert(std::is_trivially_destructible<Token>::value,
+ static_assert(std::is_trivially_destructible_v<Token>,
"assume trivially destructible and forego destructors");
free(this);
@@ -169,7 +169,7 @@ const std::vector<Token> &MacroArgs::getPreExpArgument(unsigned Arg,
std::vector<Token> &Result = PreExpArgTokens[Arg];
if (!Result.empty()) return Result;
- SaveAndRestore<bool> PreExpandingMacroArgs(PP.InMacroArgPreExpansion, true);
+ SaveAndRestore PreExpandingMacroArgs(PP.InMacroArgPreExpansion, true);
const Token *AT = getUnexpArgument(Arg);
unsigned NumToks = getArgLength(AT)+1; // Include the EOF.
diff --git a/contrib/llvm-project/clang/lib/Lex/MacroInfo.cpp b/contrib/llvm-project/clang/lib/Lex/MacroInfo.cpp
index 1ccd140364ae..39bb0f44eff2 100644
--- a/contrib/llvm-project/clang/lib/Lex/MacroInfo.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/MacroInfo.cpp
@@ -18,16 +18,35 @@
#include "clang/Basic/TokenKinds.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Lex/Token.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
+#include <optional>
#include <utility>
using namespace clang;
+namespace {
+
+// MacroInfo is expected to take 40 bytes on platforms with an 8 byte pointer
+// and 4 byte SourceLocation.
+template <int> class MacroInfoSizeChecker {
+public:
+ [[maybe_unused]] constexpr static bool AsExpected = true;
+};
+template <> class MacroInfoSizeChecker<8> {
+public:
+ [[maybe_unused]] constexpr static bool AsExpected =
+ sizeof(MacroInfo) == (32 + sizeof(SourceLocation) * 2);
+};
+
+static_assert(MacroInfoSizeChecker<sizeof(void *)>::AsExpected,
+ "Unexpected size of MacroInfo");
+
+} // end namespace
+
MacroInfo::MacroInfo(SourceLocation DefLoc)
: Location(DefLoc), IsDefinitionLengthCached(false), IsFunctionLike(false),
IsC99Varargs(false), IsGNUVarargs(false), IsBuiltinMacro(false),
@@ -39,6 +58,7 @@ unsigned MacroInfo::getDefinitionLengthSlow(const SourceManager &SM) const {
assert(!IsDefinitionLengthCached);
IsDefinitionLengthCached = true;
+ ArrayRef<Token> ReplacementTokens = tokens();
if (ReplacementTokens.empty())
return (DefinitionLength = 0);
@@ -76,7 +96,7 @@ bool MacroInfo::isIdenticalTo(const MacroInfo &Other, Preprocessor &PP,
bool Lexically = !Syntactically;
// Check # tokens in replacement, number of args, and various flags all match.
- if (ReplacementTokens.size() != Other.ReplacementTokens.size() ||
+ if (getNumTokens() != Other.getNumTokens() ||
getNumParams() != Other.getNumParams() ||
isFunctionLike() != Other.isFunctionLike() ||
isC99Varargs() != Other.isC99Varargs() ||
@@ -92,13 +112,13 @@ bool MacroInfo::isIdenticalTo(const MacroInfo &Other, Preprocessor &PP,
}
// Check all the tokens.
- for (unsigned i = 0, e = ReplacementTokens.size(); i != e; ++i) {
+ for (unsigned i = 0; i != NumReplacementTokens; ++i) {
const Token &A = ReplacementTokens[i];
const Token &B = Other.ReplacementTokens[i];
if (A.getKind() != B.getKind())
return false;
- // If this isn't the first first token, check that the whitespace and
+ // If this isn't the first token, check that the whitespace and
// start-of-line characteristics match.
if (i != 0 &&
(A.isAtStartOfLine() != B.isAtStartOfLine() ||
@@ -157,7 +177,7 @@ LLVM_DUMP_METHOD void MacroInfo::dump() const {
}
bool First = true;
- for (const Token &Tok : ReplacementTokens) {
+ for (const Token &Tok : tokens()) {
// Leading space is semantically meaningful in a macro definition,
// so preserve it in the dump output.
if (First || Tok.hasLeadingSpace())
@@ -178,11 +198,10 @@ LLVM_DUMP_METHOD void MacroInfo::dump() const {
MacroDirective::DefInfo MacroDirective::getDefinition() {
MacroDirective *MD = this;
SourceLocation UndefLoc;
- Optional<bool> isPublic;
+ std::optional<bool> isPublic;
for (; MD; MD = MD->getPrevious()) {
if (DefMacroDirective *DefMD = dyn_cast<DefMacroDirective>(MD))
- return DefInfo(DefMD, UndefLoc,
- !isPublic.hasValue() || isPublic.getValue());
+ return DefInfo(DefMD, UndefLoc, !isPublic || *isPublic);
if (UndefMacroDirective *UndefMD = dyn_cast<UndefMacroDirective>(MD)) {
UndefLoc = UndefMD->getLocation();
@@ -190,12 +209,11 @@ MacroDirective::DefInfo MacroDirective::getDefinition() {
}
VisibilityMacroDirective *VisMD = cast<VisibilityMacroDirective>(MD);
- if (!isPublic.hasValue())
+ if (!isPublic)
isPublic = VisMD->isPublic();
}
- return DefInfo(nullptr, UndefLoc,
- !isPublic.hasValue() || isPublic.getValue());
+ return DefInfo(nullptr, UndefLoc, !isPublic || *isPublic);
}
const MacroDirective::DefInfo
diff --git a/contrib/llvm-project/clang/lib/Lex/ModuleMap.cpp b/contrib/llvm-project/clang/lib/Lex/ModuleMap.cpp
index f9af7c2a24fb..10c475f617d4 100644
--- a/contrib/llvm-project/clang/lib/Lex/ModuleMap.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/ModuleMap.cpp
@@ -28,7 +28,6 @@
#include "clang/Lex/LiteralSupport.h"
#include "clang/Lex/Token.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/None.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallString.h"
@@ -47,6 +46,7 @@
#include <cassert>
#include <cstdint>
#include <cstring>
+#include <optional>
#include <string>
#include <system_error>
#include <utility>
@@ -75,7 +75,6 @@ void ModuleMap::addLinkAsDependency(Module *Mod) {
Module::HeaderKind ModuleMap::headerRoleToKind(ModuleHeaderRole Role) {
switch ((int)Role) {
- default: llvm_unreachable("unknown header role");
case NormalHeader:
return Module::HK_Normal;
case PrivateHeader:
@@ -84,7 +83,10 @@ Module::HeaderKind ModuleMap::headerRoleToKind(ModuleHeaderRole Role) {
return Module::HK_Textual;
case PrivateHeader | TextualHeader:
return Module::HK_PrivateTextual;
+ case ExcludedHeader:
+ return Module::HK_Excluded;
}
+ llvm_unreachable("unknown header role");
}
ModuleMap::ModuleHeaderRole
@@ -99,11 +101,15 @@ ModuleMap::headerKindToRole(Module::HeaderKind Kind) {
case Module::HK_PrivateTextual:
return ModuleHeaderRole(PrivateHeader | TextualHeader);
case Module::HK_Excluded:
- llvm_unreachable("unexpected header kind");
+ return ExcludedHeader;
}
llvm_unreachable("unknown header kind");
}
+bool ModuleMap::isModular(ModuleHeaderRole Role) {
+ return !(Role & (ModuleMap::TextualHeader | ModuleMap::ExcludedHeader));
+}
+
Module::ExportDecl
ModuleMap::resolveExport(Module *Mod,
const Module::UnresolvedExportDecl &Unresolved,
@@ -167,27 +173,27 @@ static void appendSubframeworkPaths(Module *Mod,
return;
// Add Frameworks/Name.framework for each subframework.
- for (unsigned I = Paths.size() - 1; I != 0; --I)
- llvm::sys::path::append(Path, "Frameworks", Paths[I-1] + ".framework");
+ for (StringRef Framework : llvm::drop_begin(llvm::reverse(Paths)))
+ llvm::sys::path::append(Path, "Frameworks", Framework + ".framework");
}
-Optional<FileEntryRef> ModuleMap::findHeader(
+OptionalFileEntryRef ModuleMap::findHeader(
Module *M, const Module::UnresolvedHeaderDirective &Header,
SmallVectorImpl<char> &RelativePathName, bool &NeedsFramework) {
// Search for the header file within the module's home directory.
- auto *Directory = M->Directory;
+ auto Directory = M->Directory;
SmallString<128> FullPathName(Directory->getName());
- auto GetFile = [&](StringRef Filename) -> Optional<FileEntryRef> {
+ auto GetFile = [&](StringRef Filename) -> OptionalFileEntryRef {
auto File =
expectedToOptional(SourceMgr.getFileManager().getFileRef(Filename));
if (!File || (Header.Size && File->getSize() != *Header.Size) ||
(Header.ModTime && File->getModificationTime() != *Header.ModTime))
- return None;
+ return std::nullopt;
return *File;
};
- auto GetFrameworkFile = [&]() -> Optional<FileEntryRef> {
+ auto GetFrameworkFile = [&]() -> OptionalFileEntryRef {
unsigned FullPathLength = FullPathName.size();
appendSubframeworkPaths(M, RelativePathName);
unsigned RelativePathLength = RelativePathName.size();
@@ -229,7 +235,7 @@ Optional<FileEntryRef> ModuleMap::findHeader(
llvm::sys::path::append(FullPathName, RelativePathName);
auto NormalHdrFile = GetFile(FullPathName);
- if (!NormalHdrFile && Directory->getName().endswith(".framework")) {
+ if (!NormalHdrFile && Directory->getName().ends_with(".framework")) {
// The lack of 'framework' keyword in a module declaration it's a simple
// mistake we can diagnose when the header exists within the proper
// framework style path.
@@ -241,17 +247,56 @@ Optional<FileEntryRef> ModuleMap::findHeader(
<< Header.FileName << M->getFullModuleName();
NeedsFramework = true;
}
- return None;
+ return std::nullopt;
}
return NormalHdrFile;
}
+/// Determine whether the given file name is the name of a builtin
+/// header, supplied by Clang to replace, override, or augment existing system
+/// headers.
+static bool isBuiltinHeaderName(StringRef FileName) {
+ return llvm::StringSwitch<bool>(FileName)
+ .Case("float.h", true)
+ .Case("iso646.h", true)
+ .Case("limits.h", true)
+ .Case("stdalign.h", true)
+ .Case("stdarg.h", true)
+ .Case("stdatomic.h", true)
+ .Case("stdbool.h", true)
+ .Case("stddef.h", true)
+ .Case("stdint.h", true)
+ .Case("tgmath.h", true)
+ .Case("unwind.h", true)
+ .Default(false);
+}
+
+/// Determine whether the given module name is the name of a builtin
+/// module that is cyclic with a system module on some platforms.
+static bool isBuiltInModuleName(StringRef ModuleName) {
+ return llvm::StringSwitch<bool>(ModuleName)
+ .Case("_Builtin_float", true)
+ .Case("_Builtin_inttypes", true)
+ .Case("_Builtin_iso646", true)
+ .Case("_Builtin_limits", true)
+ .Case("_Builtin_stdalign", true)
+ .Case("_Builtin_stdarg", true)
+ .Case("_Builtin_stdatomic", true)
+ .Case("_Builtin_stdbool", true)
+ .Case("_Builtin_stddef", true)
+ .Case("_Builtin_stdint", true)
+ .Case("_Builtin_stdnoreturn", true)
+ .Case("_Builtin_tgmath", true)
+ .Case("_Builtin_unwind", true)
+ .Default(false);
+}
+
void ModuleMap::resolveHeader(Module *Mod,
const Module::UnresolvedHeaderDirective &Header,
bool &NeedsFramework) {
SmallString<128> RelativePathName;
- if (Optional<FileEntryRef> File =
+ if (OptionalFileEntryRef File =
findHeader(Mod, Header, RelativePathName, NeedsFramework)) {
if (Header.IsUmbrella) {
const DirectoryEntry *UmbrellaDir = &File->getDir().getDirEntry();
@@ -260,14 +305,12 @@ void ModuleMap::resolveHeader(Module *Mod,
<< UmbrellaMod->getFullModuleName();
else
// Record this umbrella header.
- setUmbrellaHeader(Mod, *File, Header.FileName, RelativePathName.str());
+ setUmbrellaHeaderAsWritten(Mod, *File, Header.FileName,
+ RelativePathName.str());
} else {
- Module::Header H = {Header.FileName, std::string(RelativePathName.str()),
+ Module::Header H = {Header.FileName, std::string(RelativePathName),
*File};
- if (Header.Kind == Module::HK_Excluded)
- excludeHeader(Mod, H);
- else
- addHeader(Mod, H, headerKindToRole(Header.Kind));
+ addHeader(Mod, H, headerKindToRole(Header.Kind));
}
} else if (Header.HasBuiltinHeader && !Header.Size && !Header.ModTime) {
// There's a builtin header but no corresponding on-disk header. Assume
@@ -293,7 +336,7 @@ bool ModuleMap::resolveAsBuiltinHeader(
llvm::sys::path::is_absolute(Header.FileName) ||
Mod->isPartOfFramework() || !Mod->IsSystem || Header.IsUmbrella ||
!BuiltinIncludeDir || BuiltinIncludeDir == Mod->Directory ||
- !isBuiltinHeader(Header.FileName))
+ !LangOpts.BuiltinHeadersInSystemModules || !isBuiltinHeaderName(Header.FileName))
return false;
// This is a system module with a top-level header. This header
@@ -301,12 +344,12 @@ bool ModuleMap::resolveAsBuiltinHeader(
// supplied by Clang. Find that builtin header.
SmallString<128> Path;
llvm::sys::path::append(Path, BuiltinIncludeDir->getName(), Header.FileName);
- auto File = SourceMgr.getFileManager().getFile(Path);
+ auto File = SourceMgr.getFileManager().getOptionalFileRef(Path);
if (!File)
return false;
+ Module::Header H = {Header.FileName, Header.FileName, *File};
auto Role = headerKindToRole(Header.Kind);
- Module::Header H = {Header.FileName, std::string(Path.str()), *File};
addHeader(Mod, H, Role);
return true;
}
@@ -338,7 +381,7 @@ static StringRef sanitizeFilenameAsIdentifier(StringRef Name,
if (Name.empty())
return Name;
- if (!isValidIdentifier(Name)) {
+ if (!isValidAsciiIdentifier(Name)) {
// If we don't already have something with the form of an identifier,
// create a buffer with the sanitized name.
Buffer.clear();
@@ -346,7 +389,7 @@ static StringRef sanitizeFilenameAsIdentifier(StringRef Name,
Buffer.push_back('_');
Buffer.reserve(Buffer.size() + Name.size());
for (unsigned I = 0, N = Name.size(); I != N; ++I) {
- if (isIdentifierBody(Name[I]))
+ if (isAsciiIdentifierContinue(Name[I]))
Buffer.push_back(Name[I]);
else
Buffer.push_back('_');
@@ -369,32 +412,19 @@ static StringRef sanitizeFilenameAsIdentifier(StringRef Name,
return Name;
}
-/// Determine whether the given file name is the name of a builtin
-/// header, supplied by Clang to replace, override, or augment existing system
-/// headers.
-bool ModuleMap::isBuiltinHeader(StringRef FileName) {
- return llvm::StringSwitch<bool>(FileName)
- .Case("float.h", true)
- .Case("iso646.h", true)
- .Case("limits.h", true)
- .Case("stdalign.h", true)
- .Case("stdarg.h", true)
- .Case("stdatomic.h", true)
- .Case("stdbool.h", true)
- .Case("stddef.h", true)
- .Case("stdint.h", true)
- .Case("tgmath.h", true)
- .Case("unwind.h", true)
- .Default(false);
+bool ModuleMap::isBuiltinHeader(FileEntryRef File) {
+ return File.getDir() == BuiltinIncludeDir && LangOpts.BuiltinHeadersInSystemModules &&
+ isBuiltinHeaderName(llvm::sys::path::filename(File.getName()));
}
-bool ModuleMap::isBuiltinHeader(const FileEntry *File) {
- return File->getDir() == BuiltinIncludeDir &&
- ModuleMap::isBuiltinHeader(llvm::sys::path::filename(File->getName()));
+bool ModuleMap::shouldImportRelativeToBuiltinIncludeDir(StringRef FileName,
+ Module *Module) const {
+ return LangOpts.BuiltinHeadersInSystemModules && BuiltinIncludeDir &&
+ Module->IsSystem && !Module->isPartOfFramework() &&
+ isBuiltinHeaderName(FileName);
}
-ModuleMap::HeadersMap::iterator
-ModuleMap::findKnownHeader(const FileEntry *File) {
+ModuleMap::HeadersMap::iterator ModuleMap::findKnownHeader(FileEntryRef File) {
resolveHeaderDirectives(File);
HeadersMap::iterator Known = Headers.find(File);
if (HeaderInfo.getHeaderSearchOpts().ImplicitModuleMaps &&
@@ -405,29 +435,27 @@ ModuleMap::findKnownHeader(const FileEntry *File) {
return Known;
}
-ModuleMap::KnownHeader
-ModuleMap::findHeaderInUmbrellaDirs(const FileEntry *File,
- SmallVectorImpl<const DirectoryEntry *> &IntermediateDirs) {
+ModuleMap::KnownHeader ModuleMap::findHeaderInUmbrellaDirs(
+ FileEntryRef File, SmallVectorImpl<DirectoryEntryRef> &IntermediateDirs) {
if (UmbrellaDirs.empty())
return {};
- const DirectoryEntry *Dir = File->getDir();
- assert(Dir && "file in no directory");
+ OptionalDirectoryEntryRef Dir = File.getDir();
// Note: as an egregious but useful hack we use the real path here, because
// frameworks moving from top-level frameworks to embedded frameworks tend
// to be symlinked from the top-level location to the embedded location,
// and we need to resolve lookups as if we had found the embedded location.
- StringRef DirName = SourceMgr.getFileManager().getCanonicalName(Dir);
+ StringRef DirName = SourceMgr.getFileManager().getCanonicalName(*Dir);
// Keep walking up the directory hierarchy, looking for a directory with
// an umbrella header.
do {
- auto KnownDir = UmbrellaDirs.find(Dir);
+ auto KnownDir = UmbrellaDirs.find(*Dir);
if (KnownDir != UmbrellaDirs.end())
return KnownHeader(KnownDir->second, NormalHeader);
- IntermediateDirs.push_back(Dir);
+ IntermediateDirs.push_back(*Dir);
// Retrieve our parent path.
DirName = llvm::sys::path::parent_path(DirName);
@@ -435,10 +463,7 @@ ModuleMap::findHeaderInUmbrellaDirs(const FileEntry *File,
break;
// Resolve the parent path to a directory entry.
- if (auto DirEntry = SourceMgr.getFileManager().getDirectory(DirName))
- Dir = *DirEntry;
- else
- Dir = nullptr;
+ Dir = SourceMgr.getFileManager().getOptionalDirectoryRef(DirName);
} while (Dir);
return {};
}
@@ -456,10 +481,8 @@ static bool violatesPrivateInclude(Module *RequestingModule,
&Header.getModule()->Headers[Module::HK_Private],
&Header.getModule()->Headers[Module::HK_PrivateTextual]};
for (auto *Hs : HeaderList)
- IsPrivate |=
- std::find_if(Hs->begin(), Hs->end(), [&](const Module::Header &H) {
- return H.Entry == IncFileEnt;
- }) != Hs->end();
+ IsPrivate |= llvm::any_of(
+ *Hs, [&](const Module::Header &H) { return H.Entry == IncFileEnt; });
assert(IsPrivate && "inconsistent headers and roles");
}
#endif
@@ -473,8 +496,7 @@ static Module *getTopLevelOrNull(Module *M) {
void ModuleMap::diagnoseHeaderInclusion(Module *RequestingModule,
bool RequestingModuleIsModuleInterface,
SourceLocation FilenameLoc,
- StringRef Filename,
- const FileEntry *File) {
+ StringRef Filename, FileEntryRef File) {
// No errors for indirect modules. This may be a bit of a problem for modules
// with no source files.
if (getTopLevelOrNull(RequestingModule) != getTopLevelOrNull(SourceModule))
@@ -482,7 +504,7 @@ void ModuleMap::diagnoseHeaderInclusion(Module *RequestingModule,
if (RequestingModule) {
resolveUses(RequestingModule, /*Complain=*/false);
- resolveHeaderDirectives(RequestingModule);
+ resolveHeaderDirectives(RequestingModule, /*File=*/std::nullopt);
}
bool Excluded = false;
@@ -492,6 +514,12 @@ void ModuleMap::diagnoseHeaderInclusion(Module *RequestingModule,
HeadersMap::iterator Known = findKnownHeader(File);
if (Known != Headers.end()) {
for (const KnownHeader &Header : Known->second) {
+ // Excluded headers don't really belong to a module.
+ if (Header.getRole() == ModuleMap::ExcludedHeader) {
+ Excluded = true;
+ continue;
+ }
+
// Remember private headers for later printing of a diagnostic.
if (violatesPrivateInclude(RequestingModule, File, Header)) {
Private = Header.getModule();
@@ -522,8 +550,9 @@ void ModuleMap::diagnoseHeaderInclusion(Module *RequestingModule,
// We have found a module, but we don't use it.
if (NotUsed) {
- Diags.Report(FilenameLoc, diag::err_undeclared_use_of_module)
- << RequestingModule->getTopLevelModule()->Name << Filename;
+ Diags.Report(FilenameLoc, diag::err_undeclared_use_of_module_indirect)
+ << RequestingModule->getTopLevelModule()->Name << Filename
+ << NotUsed->Name;
return;
}
@@ -542,7 +571,7 @@ void ModuleMap::diagnoseHeaderInclusion(Module *RequestingModule,
diag::warn_non_modular_include_in_framework_module :
diag::warn_non_modular_include_in_module;
Diags.Report(FilenameLoc, DiagID) << RequestingModule->getFullModuleName()
- << File->getName();
+ << File.getName();
}
}
@@ -565,12 +594,18 @@ static bool isBetterKnownHeader(const ModuleMap::KnownHeader &New,
(Old.getRole() & ModuleMap::TextualHeader))
return !(New.getRole() & ModuleMap::TextualHeader);
+ // Prefer a non-excluded header over an excluded header.
+ if ((New.getRole() == ModuleMap::ExcludedHeader) !=
+ (Old.getRole() == ModuleMap::ExcludedHeader))
+ return New.getRole() != ModuleMap::ExcludedHeader;
+
// Don't have a reason to choose between these. Just keep the first one.
return false;
}
-ModuleMap::KnownHeader ModuleMap::findModuleForHeader(const FileEntry *File,
- bool AllowTextual) {
+ModuleMap::KnownHeader ModuleMap::findModuleForHeader(FileEntryRef File,
+ bool AllowTextual,
+ bool AllowExcluded) {
auto MakeResult = [&](ModuleMap::KnownHeader R) -> ModuleMap::KnownHeader {
if (!AllowTextual && R.getRole() & ModuleMap::TextualHeader)
return {};
@@ -582,6 +617,9 @@ ModuleMap::KnownHeader ModuleMap::findModuleForHeader(const FileEntry *File,
ModuleMap::KnownHeader Result;
// Iterate over all modules that 'File' is part of to find the best fit.
for (KnownHeader &H : Known->second) {
+ // Cannot use a module if the header is excluded in it.
+ if (!AllowExcluded && H.getRole() == ModuleMap::ExcludedHeader)
+ continue;
// Prefer a header from the source module over all others.
if (H.getModule()->getTopLevelModule() == SourceModule)
return MakeResult(H);
@@ -595,10 +633,10 @@ ModuleMap::KnownHeader ModuleMap::findModuleForHeader(const FileEntry *File,
}
ModuleMap::KnownHeader
-ModuleMap::findOrCreateModuleForHeaderInUmbrellaDir(const FileEntry *File) {
+ModuleMap::findOrCreateModuleForHeaderInUmbrellaDir(FileEntryRef File) {
assert(!Headers.count(File) && "already have a module for this header");
- SmallVector<const DirectoryEntry *, 2> SkippedDirs;
+ SmallVector<DirectoryEntryRef, 2> SkippedDirs;
KnownHeader H = findHeaderInUmbrellaDirs(File, SkippedDirs);
if (H) {
Module *Result = H.getModule();
@@ -606,11 +644,11 @@ ModuleMap::findOrCreateModuleForHeaderInUmbrellaDir(const FileEntry *File) {
// Search up the module stack until we find a module with an umbrella
// directory.
Module *UmbrellaModule = Result;
- while (!UmbrellaModule->getUmbrellaDir() && UmbrellaModule->Parent)
+ while (!UmbrellaModule->getEffectiveUmbrellaDir() && UmbrellaModule->Parent)
UmbrellaModule = UmbrellaModule->Parent;
if (UmbrellaModule->InferSubmodules) {
- const FileEntry *UmbrellaModuleMap =
+ OptionalFileEntryRef UmbrellaModuleMap =
getModuleMapFileForUniquing(UmbrellaModule);
// Infer submodules for each of the directories we found between
@@ -618,18 +656,18 @@ ModuleMap::findOrCreateModuleForHeaderInUmbrellaDir(const FileEntry *File) {
// the actual header is located.
bool Explicit = UmbrellaModule->InferExplicitSubmodules;
- for (unsigned I = SkippedDirs.size(); I != 0; --I) {
+ for (DirectoryEntryRef SkippedDir : llvm::reverse(SkippedDirs)) {
// Find or create the module that corresponds to this directory name.
SmallString<32> NameBuf;
StringRef Name = sanitizeFilenameAsIdentifier(
- llvm::sys::path::stem(SkippedDirs[I-1]->getName()), NameBuf);
+ llvm::sys::path::stem(SkippedDir.getName()), NameBuf);
Result = findOrCreateModule(Name, Result, /*IsFramework=*/false,
Explicit).first;
InferredModuleAllowedBy[Result] = UmbrellaModuleMap;
Result->IsInferred = true;
// Associate the module and the directory.
- UmbrellaDirs[SkippedDirs[I-1]] = Result;
+ UmbrellaDirs[SkippedDir] = Result;
// If inferred submodules export everything they import, add a
// wildcard to the set of exports.
@@ -640,7 +678,7 @@ ModuleMap::findOrCreateModuleForHeaderInUmbrellaDir(const FileEntry *File) {
// Infer a submodule with the same name as this header file.
SmallString<32> NameBuf;
StringRef Name = sanitizeFilenameAsIdentifier(
- llvm::sys::path::stem(File->getName()), NameBuf);
+ llvm::sys::path::stem(File.getName()), NameBuf);
Result = findOrCreateModule(Name, Result, /*IsFramework=*/false,
Explicit).first;
InferredModuleAllowedBy[Result] = UmbrellaModuleMap;
@@ -667,7 +705,7 @@ ModuleMap::findOrCreateModuleForHeaderInUmbrellaDir(const FileEntry *File) {
}
ArrayRef<ModuleMap::KnownHeader>
-ModuleMap::findAllModulesForHeader(const FileEntry *File) {
+ModuleMap::findAllModulesForHeader(FileEntryRef File) {
HeadersMap::iterator Known = findKnownHeader(File);
if (Known != Headers.end())
return Known->second;
@@ -675,26 +713,25 @@ ModuleMap::findAllModulesForHeader(const FileEntry *File) {
if (findOrCreateModuleForHeaderInUmbrellaDir(File))
return Headers.find(File)->second;
- return None;
+ return std::nullopt;
}
ArrayRef<ModuleMap::KnownHeader>
-ModuleMap::findResolvedModulesForHeader(const FileEntry *File) const {
+ModuleMap::findResolvedModulesForHeader(FileEntryRef File) const {
// FIXME: Is this necessary?
resolveHeaderDirectives(File);
auto It = Headers.find(File);
if (It == Headers.end())
- return None;
+ return std::nullopt;
return It->second;
}
-bool ModuleMap::isHeaderInUnavailableModule(const FileEntry *Header) const {
+bool ModuleMap::isHeaderInUnavailableModule(FileEntryRef Header) const {
return isHeaderUnavailableInModule(Header, nullptr);
}
-bool
-ModuleMap::isHeaderUnavailableInModule(const FileEntry *Header,
- const Module *RequestingModule) const {
+bool ModuleMap::isHeaderUnavailableInModule(
+ FileEntryRef Header, const Module *RequestingModule) const {
resolveHeaderDirectives(Header);
HeadersMap::const_iterator Known = Headers.find(Header);
if (Known != Headers.end()) {
@@ -703,6 +740,9 @@ ModuleMap::isHeaderUnavailableInModule(const FileEntry *Header,
E = Known->second.end();
I != E; ++I) {
+ if (I->getRole() == ModuleMap::ExcludedHeader)
+ continue;
+
if (I->isAvailable() &&
(!RequestingModule ||
I->getModule()->isSubModuleOf(RequestingModule))) {
@@ -719,8 +759,8 @@ ModuleMap::isHeaderUnavailableInModule(const FileEntry *Header,
return true;
}
- const DirectoryEntry *Dir = Header->getDir();
- SmallVector<const DirectoryEntry *, 2> SkippedDirs;
+ OptionalDirectoryEntryRef Dir = Header.getDir();
+ SmallVector<DirectoryEntryRef, 2> SkippedDirs;
StringRef DirName = Dir->getName();
auto IsUnavailable = [&](const Module *M) {
@@ -731,8 +771,7 @@ ModuleMap::isHeaderUnavailableInModule(const FileEntry *Header,
// Keep walking up the directory hierarchy, looking for a directory with
// an umbrella header.
do {
- llvm::DenseMap<const DirectoryEntry *, Module *>::const_iterator KnownDir
- = UmbrellaDirs.find(Dir);
+ auto KnownDir = UmbrellaDirs.find(*Dir);
if (KnownDir != UmbrellaDirs.end()) {
Module *Found = KnownDir->second;
if (IsUnavailable(Found))
@@ -741,16 +780,16 @@ ModuleMap::isHeaderUnavailableInModule(const FileEntry *Header,
// Search up the module stack until we find a module with an umbrella
// directory.
Module *UmbrellaModule = Found;
- while (!UmbrellaModule->getUmbrellaDir() && UmbrellaModule->Parent)
+ while (!UmbrellaModule->getEffectiveUmbrellaDir() &&
+ UmbrellaModule->Parent)
UmbrellaModule = UmbrellaModule->Parent;
if (UmbrellaModule->InferSubmodules) {
- for (unsigned I = SkippedDirs.size(); I != 0; --I) {
+ for (DirectoryEntryRef SkippedDir : llvm::reverse(SkippedDirs)) {
// Find or create the module that corresponds to this directory name.
SmallString<32> NameBuf;
StringRef Name = sanitizeFilenameAsIdentifier(
- llvm::sys::path::stem(SkippedDirs[I-1]->getName()),
- NameBuf);
+ llvm::sys::path::stem(SkippedDir.getName()), NameBuf);
Found = lookupModuleQualified(Name, Found);
if (!Found)
return false;
@@ -761,7 +800,7 @@ ModuleMap::isHeaderUnavailableInModule(const FileEntry *Header,
// Infer a submodule with the same name as this header file.
SmallString<32> NameBuf;
StringRef Name = sanitizeFilenameAsIdentifier(
- llvm::sys::path::stem(Header->getName()),
+ llvm::sys::path::stem(Header.getName()),
NameBuf);
Found = lookupModuleQualified(Name, Found);
if (!Found)
@@ -771,7 +810,7 @@ ModuleMap::isHeaderUnavailableInModule(const FileEntry *Header,
return IsUnavailable(Found);
}
- SkippedDirs.push_back(Dir);
+ SkippedDirs.push_back(*Dir);
// Retrieve our parent path.
DirName = llvm::sys::path::parent_path(DirName);
@@ -779,10 +818,7 @@ ModuleMap::isHeaderUnavailableInModule(const FileEntry *Header,
break;
// Resolve the parent path to a directory entry.
- if (auto DirEntry = SourceMgr.getFileManager().getDirectory(DirName))
- Dir = *DirEntry;
- else
- Dir = nullptr;
+ Dir = SourceMgr.getFileManager().getOptionalDirectoryRef(DirName);
} while (Dir);
return false;
@@ -833,12 +869,31 @@ std::pair<Module *, bool> ModuleMap::findOrCreateModule(StringRef Name,
return std::make_pair(Result, true);
}
-Module *ModuleMap::createGlobalModuleFragmentForModuleUnit(SourceLocation Loc) {
- PendingSubmodules.emplace_back(
- new Module("<global>", Loc, nullptr, /*IsFramework*/ false,
- /*IsExplicit*/ true, NumCreatedModules++));
- PendingSubmodules.back()->Kind = Module::GlobalModuleFragment;
- return PendingSubmodules.back().get();
+Module *ModuleMap::createGlobalModuleFragmentForModuleUnit(SourceLocation Loc,
+ Module *Parent) {
+ auto *Result = new Module("<global>", Loc, Parent, /*IsFramework*/ false,
+ /*IsExplicit*/ true, NumCreatedModules++);
+ Result->Kind = Module::ExplicitGlobalModuleFragment;
+ // If the created module isn't owned by a parent, send it to PendingSubmodules
+ // to wait for its parent.
+ if (!Result->Parent)
+ PendingSubmodules.emplace_back(Result);
+ return Result;
+}
+
+Module *
+ModuleMap::createImplicitGlobalModuleFragmentForModuleUnit(SourceLocation Loc,
+ Module *Parent) {
+ assert(Parent && "We should only create an implicit global module fragment "
+ "in a module purview");
+ // Note: Here the `IsExplicit` parameter refers to the semantics in clang
+ // modules. All the non-explicit submodules in clang modules will be exported
+ // too. Here we simplify the implementation by using the concept.
+ auto *Result =
+ new Module("<implicit global>", Loc, Parent, /*IsFramework=*/false,
+ /*IsExplicit=*/false, NumCreatedModules++);
+ Result->Kind = Module::ImplicitGlobalModuleFragment;
+ return Result;
}
Module *
@@ -851,90 +906,98 @@ ModuleMap::createPrivateModuleFragmentForInterfaceUnit(Module *Parent,
return Result;
}
-Module *ModuleMap::createModuleForInterfaceUnit(SourceLocation Loc,
- StringRef Name,
- Module *GlobalModule) {
- assert(LangOpts.CurrentModule == Name && "module name mismatch");
- assert(!Modules[Name] && "redefining existing module");
-
+Module *ModuleMap::createModuleUnitWithKind(SourceLocation Loc, StringRef Name,
+ Module::ModuleKind Kind) {
auto *Result =
new Module(Name, Loc, nullptr, /*IsFramework*/ false,
/*IsExplicit*/ false, NumCreatedModules++);
- Result->Kind = Module::ModuleInterfaceUnit;
- Modules[Name] = SourceModule = Result;
+ Result->Kind = Kind;
- // Reparent the current global module fragment as a submodule of this module.
+ // Reparent any current global module fragment as a submodule of this module.
for (auto &Submodule : PendingSubmodules) {
Submodule->setParent(Result);
Submodule.release(); // now owned by parent
}
PendingSubmodules.clear();
+ return Result;
+}
+
+Module *ModuleMap::createModuleForInterfaceUnit(SourceLocation Loc,
+ StringRef Name) {
+ assert(LangOpts.CurrentModule == Name && "module name mismatch");
+ assert(!Modules[Name] && "redefining existing module");
+
+ auto *Result =
+ createModuleUnitWithKind(Loc, Name, Module::ModuleInterfaceUnit);
+ Modules[Name] = SourceModule = Result;
// Mark the main source file as being within the newly-created module so that
// declarations and macros are properly visibility-restricted to it.
- auto *MainFile = SourceMgr.getFileEntryForID(SourceMgr.getMainFileID());
+ auto MainFile = SourceMgr.getFileEntryRefForID(SourceMgr.getMainFileID());
assert(MainFile && "no input file for module interface");
- Headers[MainFile].push_back(KnownHeader(Result, PrivateHeader));
+ Headers[*MainFile].push_back(KnownHeader(Result, PrivateHeader));
return Result;
}
-Module *ModuleMap::createHeaderModule(StringRef Name,
- ArrayRef<Module::Header> Headers) {
+Module *ModuleMap::createModuleForImplementationUnit(SourceLocation Loc,
+ StringRef Name) {
assert(LangOpts.CurrentModule == Name && "module name mismatch");
- assert(!Modules[Name] && "redefining existing module");
+ // The interface for this implementation must exist and be loaded.
+ assert(Modules[Name] && Modules[Name]->Kind == Module::ModuleInterfaceUnit &&
+ "creating implementation module without an interface");
+
+ // Create an entry in the modules map to own the implementation unit module.
+ // User module names must not start with a period (so that this cannot clash
+ // with any legal user-defined module name).
+ StringRef IName = ".ImplementationUnit";
+ assert(!Modules[IName] && "multiple implementation units?");
auto *Result =
- new Module(Name, SourceLocation(), nullptr, /*IsFramework*/ false,
- /*IsExplicit*/ false, NumCreatedModules++);
- Result->Kind = Module::ModuleInterfaceUnit;
- Modules[Name] = SourceModule = Result;
+ createModuleUnitWithKind(Loc, Name, Module::ModuleImplementationUnit);
+ Modules[IName] = SourceModule = Result;
- for (const Module::Header &H : Headers) {
- auto *M = new Module(H.NameAsWritten, SourceLocation(), Result,
- /*IsFramework*/ false,
- /*IsExplicit*/ true, NumCreatedModules++);
- // Header modules are implicitly 'export *'.
- M->Exports.push_back(Module::ExportDecl(nullptr, true));
- addHeader(M, H, NormalHeader);
- }
+ // Check that the main file is present.
+ assert(SourceMgr.getFileEntryForID(SourceMgr.getMainFileID()) &&
+ "no input file for module implementation");
+
+ return Result;
+}
+Module *ModuleMap::createHeaderUnit(SourceLocation Loc, StringRef Name,
+ Module::Header H) {
+ assert(LangOpts.CurrentModule == Name && "module name mismatch");
+ assert(!Modules[Name] && "redefining existing module");
+
+ auto *Result = new Module(Name, Loc, nullptr, /*IsFramework*/ false,
+ /*IsExplicit*/ false, NumCreatedModules++);
+ Result->Kind = Module::ModuleHeaderUnit;
+ Modules[Name] = SourceModule = Result;
+ addHeader(Result, H, NormalHeader);
return Result;
}
/// For a framework module, infer the framework against which we
/// should link.
-static void inferFrameworkLink(Module *Mod, const DirectoryEntry *FrameworkDir,
- FileManager &FileMgr) {
+static void inferFrameworkLink(Module *Mod) {
assert(Mod->IsFramework && "Can only infer linking for framework modules");
assert(!Mod->isSubFramework() &&
"Can only infer linking for top-level frameworks");
- SmallString<128> LibName;
- LibName += FrameworkDir->getName();
- llvm::sys::path::append(LibName, Mod->Name);
-
- // The library name of a framework has more than one possible extension since
- // the introduction of the text-based dynamic library format. We need to check
- // for both before we give up.
- for (const char *extension : {"", ".tbd"}) {
- llvm::sys::path::replace_extension(LibName, extension);
- if (FileMgr.getFile(LibName)) {
- Mod->LinkLibraries.push_back(Module::LinkLibrary(Mod->Name,
- /*IsFramework=*/true));
- return;
- }
- }
+ StringRef FrameworkName(Mod->Name);
+ FrameworkName.consume_back("_Private");
+ Mod->LinkLibraries.push_back(Module::LinkLibrary(FrameworkName.str(),
+ /*IsFramework=*/true));
}
-Module *ModuleMap::inferFrameworkModule(const DirectoryEntry *FrameworkDir,
+Module *ModuleMap::inferFrameworkModule(DirectoryEntryRef FrameworkDir,
bool IsSystem, Module *Parent) {
Attributes Attrs;
Attrs.IsSystem = IsSystem;
return inferFrameworkModule(FrameworkDir, Attrs, Parent);
}
-Module *ModuleMap::inferFrameworkModule(const DirectoryEntry *FrameworkDir,
+Module *ModuleMap::inferFrameworkModule(DirectoryEntryRef FrameworkDir,
Attributes Attrs, Module *Parent) {
// Note: as an egregious but useful hack we use the real path here, because
// we might be looking at an embedded framework that symlinks out to a
@@ -958,14 +1021,14 @@ Module *ModuleMap::inferFrameworkModule(const DirectoryEntry *FrameworkDir,
// If the framework has a parent path from which we're allowed to infer
// a framework module, do so.
- const FileEntry *ModuleMapFile = nullptr;
+ OptionalFileEntryRef ModuleMapFile;
if (!Parent) {
// Determine whether we're allowed to infer a module map.
bool canInfer = false;
if (llvm::sys::path::has_parent_path(FrameworkDirName)) {
// Figure out the parent path.
StringRef Parent = llvm::sys::path::parent_path(FrameworkDirName);
- if (auto ParentDir = FileMgr.getDirectory(Parent)) {
+ if (auto ParentDir = FileMgr.getOptionalDirectoryRef(Parent)) {
// Check whether we have already looked into the parent directory
// for a module map.
llvm::DenseMap<const DirectoryEntry *, InferredDirectory>::const_iterator
@@ -973,10 +1036,10 @@ Module *ModuleMap::inferFrameworkModule(const DirectoryEntry *FrameworkDir,
if (inferred == InferredDirectories.end()) {
// We haven't looked here before. Load a module map, if there is
// one.
- bool IsFrameworkDir = Parent.endswith(".framework");
- if (const FileEntry *ModMapFile =
- HeaderInfo.lookupModuleMapFile(*ParentDir, IsFrameworkDir)) {
- parseModuleMapFile(ModMapFile, Attrs.IsSystem, *ParentDir);
+ bool IsFrameworkDir = Parent.ends_with(".framework");
+ if (OptionalFileEntryRef ModMapFile =
+ HeaderInfo.lookupModuleMapFile(*ParentDir, IsFrameworkDir)) {
+ parseModuleMapFile(*ModMapFile, Attrs.IsSystem, *ParentDir);
inferred = InferredDirectories.find(*ParentDir);
}
@@ -989,9 +1052,8 @@ Module *ModuleMap::inferFrameworkModule(const DirectoryEntry *FrameworkDir,
// We're allowed to infer for this directory, but make sure it's okay
// to infer this particular module.
StringRef Name = llvm::sys::path::stem(FrameworkDirName);
- canInfer = std::find(inferred->second.ExcludedModules.begin(),
- inferred->second.ExcludedModules.end(),
- Name) == inferred->second.ExcludedModules.end();
+ canInfer =
+ !llvm::is_contained(inferred->second.ExcludedModules, Name);
Attrs.IsSystem |= inferred->second.Attrs.IsSystem;
Attrs.IsExternC |= inferred->second.Attrs.IsExternC;
@@ -1006,14 +1068,14 @@ Module *ModuleMap::inferFrameworkModule(const DirectoryEntry *FrameworkDir,
// If we're not allowed to infer a framework module, don't.
if (!canInfer)
return nullptr;
- } else
+ } else {
ModuleMapFile = getModuleMapFileForUniquing(Parent);
-
+ }
// Look for an umbrella header.
- SmallString<128> UmbrellaName = StringRef(FrameworkDir->getName());
+ SmallString<128> UmbrellaName = FrameworkDir.getName();
llvm::sys::path::append(UmbrellaName, "Headers", ModuleName + ".h");
- auto UmbrellaHeader = FileMgr.getFile(UmbrellaName);
+ auto UmbrellaHeader = FileMgr.getOptionalFileRef(UmbrellaName);
// FIXME: If there's no umbrella header, we could probably scan the
// framework to load *everything*. But, it's not clear that this is a good
@@ -1045,7 +1107,8 @@ Module *ModuleMap::inferFrameworkModule(const DirectoryEntry *FrameworkDir,
RelativePath = llvm::sys::path::relative_path(RelativePath);
// umbrella header "umbrella-header-name"
- setUmbrellaHeader(Result, *UmbrellaHeader, ModuleName + ".h", RelativePath);
+ setUmbrellaHeaderAsWritten(Result, *UmbrellaHeader, ModuleName + ".h",
+ RelativePath);
// export *
Result->Exports.push_back(Module::ExportDecl(nullptr, true));
@@ -1056,8 +1119,7 @@ Module *ModuleMap::inferFrameworkModule(const DirectoryEntry *FrameworkDir,
// Look for subframeworks.
std::error_code EC;
- SmallString<128> SubframeworksDirName
- = StringRef(FrameworkDir->getName());
+ SmallString<128> SubframeworksDirName = FrameworkDir.getName();
llvm::sys::path::append(SubframeworksDirName, "Frameworks");
llvm::sys::path::native(SubframeworksDirName);
llvm::vfs::FileSystem &FS = FileMgr.getVirtualFileSystem();
@@ -1065,11 +1127,10 @@ Module *ModuleMap::inferFrameworkModule(const DirectoryEntry *FrameworkDir,
Dir = FS.dir_begin(SubframeworksDirName, EC),
DirEnd;
Dir != DirEnd && !EC; Dir.increment(EC)) {
- if (!StringRef(Dir->path()).endswith(".framework"))
+ if (!StringRef(Dir->path()).ends_with(".framework"))
continue;
- if (auto SubframeworkDir =
- FileMgr.getDirectory(Dir->path())) {
+ if (auto SubframeworkDir = FileMgr.getOptionalDirectoryRef(Dir->path())) {
// Note: as an egregious but useful hack, we use the real path here and
// check whether it is actually a subdirectory of the parent directory.
// This will not be the case if the 'subframework' is actually a symlink
@@ -1102,9 +1163,8 @@ Module *ModuleMap::inferFrameworkModule(const DirectoryEntry *FrameworkDir,
// If the module is a top-level framework, automatically link against the
// framework.
- if (!Result->isSubFramework()) {
- inferFrameworkLink(Result, FrameworkDir, FileMgr);
- }
+ if (!Result->isSubFramework())
+ inferFrameworkLink(Result);
return Result;
}
@@ -1124,24 +1184,24 @@ Module *ModuleMap::createShadowedModule(StringRef Name, bool IsFramework,
return Result;
}
-void ModuleMap::setUmbrellaHeader(
- Module *Mod, const FileEntry *UmbrellaHeader, const Twine &NameAsWritten,
+void ModuleMap::setUmbrellaHeaderAsWritten(
+ Module *Mod, FileEntryRef UmbrellaHeader, const Twine &NameAsWritten,
const Twine &PathRelativeToRootModuleDirectory) {
Headers[UmbrellaHeader].push_back(KnownHeader(Mod, NormalHeader));
Mod->Umbrella = UmbrellaHeader;
Mod->UmbrellaAsWritten = NameAsWritten.str();
Mod->UmbrellaRelativeToRootModuleDirectory =
PathRelativeToRootModuleDirectory.str();
- UmbrellaDirs[UmbrellaHeader->getDir()] = Mod;
+ UmbrellaDirs[UmbrellaHeader.getDir()] = Mod;
// Notify callbacks that we just added a new header.
for (const auto &Cb : Callbacks)
- Cb->moduleMapAddUmbrellaHeader(&SourceMgr.getFileManager(), UmbrellaHeader);
+ Cb->moduleMapAddUmbrellaHeader(UmbrellaHeader);
}
-void ModuleMap::setUmbrellaDir(Module *Mod, const DirectoryEntry *UmbrellaDir,
- const Twine &NameAsWritten,
- const Twine &PathRelativeToRootModuleDirectory) {
+void ModuleMap::setUmbrellaDirAsWritten(
+ Module *Mod, DirectoryEntryRef UmbrellaDir, const Twine &NameAsWritten,
+ const Twine &PathRelativeToRootModuleDirectory) {
Mod->Umbrella = UmbrellaDir;
Mod->UmbrellaAsWritten = NameAsWritten.str();
Mod->UmbrellaRelativeToRootModuleDirectory =
@@ -1189,25 +1249,35 @@ void ModuleMap::resolveHeaderDirectives(const FileEntry *File) const {
auto BySize = LazyHeadersBySize.find(File->getSize());
if (BySize != LazyHeadersBySize.end()) {
for (auto *M : BySize->second)
- resolveHeaderDirectives(M);
+ resolveHeaderDirectives(M, File);
LazyHeadersBySize.erase(BySize);
}
auto ByModTime = LazyHeadersByModTime.find(File->getModificationTime());
if (ByModTime != LazyHeadersByModTime.end()) {
for (auto *M : ByModTime->second)
- resolveHeaderDirectives(M);
+ resolveHeaderDirectives(M, File);
LazyHeadersByModTime.erase(ByModTime);
}
}
-void ModuleMap::resolveHeaderDirectives(Module *Mod) const {
+void ModuleMap::resolveHeaderDirectives(
+ Module *Mod, std::optional<const FileEntry *> File) const {
bool NeedsFramework = false;
- for (auto &Header : Mod->UnresolvedHeaders)
- // This operation is logically const; we're just changing how we represent
- // the header information for this file.
- const_cast<ModuleMap*>(this)->resolveHeader(Mod, Header, NeedsFramework);
- Mod->UnresolvedHeaders.clear();
+ SmallVector<Module::UnresolvedHeaderDirective, 1> NewHeaders;
+ const auto Size = File ? (*File)->getSize() : 0;
+ const auto ModTime = File ? (*File)->getModificationTime() : 0;
+
+ for (auto &Header : Mod->UnresolvedHeaders) {
+ if (File && ((Header.ModTime && Header.ModTime != ModTime) ||
+ (Header.Size && Header.Size != Size)))
+ NewHeaders.push_back(Header);
+ else
+ // This operation is logically const; we're just changing how we represent
+ // the header information for this file.
+ const_cast<ModuleMap *>(this)->resolveHeader(Mod, Header, NeedsFramework);
+ }
+ Mod->UnresolvedHeaders.swap(NewHeaders);
}
void ModuleMap::addHeader(Module *Mod, Module::Header Header,
@@ -1218,15 +1288,13 @@ void ModuleMap::addHeader(Module *Mod, Module::Header Header,
// FIXME: Should we diagnose if a header is listed twice in the
// same module definition?
auto &HeaderList = Headers[Header.Entry];
- for (auto H : HeaderList)
- if (H == KH)
- return;
+ if (llvm::is_contained(HeaderList, KH))
+ return;
HeaderList.push_back(KH);
Mod->Headers[headerRoleToKind(Role)].push_back(Header);
- bool isCompilingModuleHeader =
- LangOpts.isCompilingModule() && Mod->getTopLevelModule() == SourceModule;
+ bool isCompilingModuleHeader = Mod->isForBuilding(LangOpts);
if (!Imported || isCompilingModuleHeader) {
// When we import HeaderFileInfo, the external source is expected to
// set the isModuleHeader flag itself.
@@ -1236,29 +1304,20 @@ void ModuleMap::addHeader(Module *Mod, Module::Header Header,
// Notify callbacks that we just added a new header.
for (const auto &Cb : Callbacks)
- Cb->moduleMapAddHeader(Header.Entry->getName());
-}
-
-void ModuleMap::excludeHeader(Module *Mod, Module::Header Header) {
- // Add this as a known header so we won't implicitly add it to any
- // umbrella directory module.
- // FIXME: Should we only exclude it from umbrella modules within the
- // specified module?
- (void) Headers[Header.Entry];
-
- Mod->Headers[Module::HK_Excluded].push_back(std::move(Header));
+ Cb->moduleMapAddHeader(Header.Entry.getName());
}
-const FileEntry *
+OptionalFileEntryRef
ModuleMap::getContainingModuleMapFile(const Module *Module) const {
if (Module->DefinitionLoc.isInvalid())
- return nullptr;
+ return std::nullopt;
- return SourceMgr.getFileEntryForID(
- SourceMgr.getFileID(Module->DefinitionLoc));
+ return SourceMgr.getFileEntryRefForID(
+ SourceMgr.getFileID(Module->DefinitionLoc));
}
-const FileEntry *ModuleMap::getModuleMapFileForUniquing(const Module *M) const {
+OptionalFileEntryRef
+ModuleMap::getModuleMapFileForUniquing(const Module *M) const {
if (M->IsInferred) {
assert(InferredModuleAllowedBy.count(M) && "missing inferred module map");
return InferredModuleAllowedBy.find(M)->second;
@@ -1266,13 +1325,47 @@ const FileEntry *ModuleMap::getModuleMapFileForUniquing(const Module *M) const {
return getContainingModuleMapFile(M);
}
-void ModuleMap::setInferredModuleAllowedBy(Module *M, const FileEntry *ModMap) {
+void ModuleMap::setInferredModuleAllowedBy(Module *M,
+ OptionalFileEntryRef ModMap) {
assert(M->IsInferred && "module not inferred");
InferredModuleAllowedBy[M] = ModMap;
}
+std::error_code
+ModuleMap::canonicalizeModuleMapPath(SmallVectorImpl<char> &Path) {
+ StringRef Dir = llvm::sys::path::parent_path({Path.data(), Path.size()});
+
+ // Do not canonicalize within the framework; the module map parser expects
+ // Modules/ not Versions/A/Modules.
+ if (llvm::sys::path::filename(Dir) == "Modules") {
+ StringRef Parent = llvm::sys::path::parent_path(Dir);
+ if (Parent.ends_with(".framework"))
+ Dir = Parent;
+ }
+
+ FileManager &FM = SourceMgr.getFileManager();
+ auto DirEntry = FM.getDirectoryRef(Dir.empty() ? "." : Dir);
+ if (!DirEntry)
+ return llvm::errorToErrorCode(DirEntry.takeError());
+
+ // Canonicalize the directory.
+ StringRef CanonicalDir = FM.getCanonicalName(*DirEntry);
+ if (CanonicalDir != Dir)
+ llvm::sys::path::replace_path_prefix(Path, Dir, CanonicalDir);
+
+ // In theory, the filename component should also be canonicalized if it
+ // on a case-insensitive filesystem. However, the extra canonicalization is
+ // expensive and if clang looked up the filename it will always be lowercase.
+
+ // Remove ., remove redundant separators, and switch to native separators.
+ // This is needed for separators between CanonicalDir and the filename.
+ llvm::sys::path::remove_dots(Path);
+
+ return std::error_code();
+}
+
void ModuleMap::addAdditionalModuleMapFile(const Module *M,
- const FileEntry *ModuleMap) {
+ FileEntryRef ModuleMap) {
AdditionalModMaps[M].insert(ModuleMap);
}
@@ -1286,7 +1379,7 @@ LLVM_DUMP_METHOD void ModuleMap::dump() {
llvm::errs() << "Headers:";
for (HeadersMap::iterator H = Headers.begin(), HEnd = Headers.end();
H != HEnd; ++H) {
- llvm::errs() << " \"" << H->first->getName() << "\" -> ";
+ llvm::errs() << " \"" << H->first.getName() << "\" -> ";
for (SmallVectorImpl<KnownHeader>::const_iterator I = H->second.begin(),
E = H->second.end();
I != E; ++I) {
@@ -1312,16 +1405,17 @@ bool ModuleMap::resolveExports(Module *Mod, bool Complain) {
}
bool ModuleMap::resolveUses(Module *Mod, bool Complain) {
- auto Unresolved = std::move(Mod->UnresolvedDirectUses);
- Mod->UnresolvedDirectUses.clear();
+ auto *Top = Mod->getTopLevelModule();
+ auto Unresolved = std::move(Top->UnresolvedDirectUses);
+ Top->UnresolvedDirectUses.clear();
for (auto &UDU : Unresolved) {
- Module *DirectUse = resolveModuleId(UDU, Mod, Complain);
+ Module *DirectUse = resolveModuleId(UDU, Top, Complain);
if (DirectUse)
- Mod->DirectUses.push_back(DirectUse);
+ Top->DirectUses.push_back(DirectUse);
else
- Mod->UnresolvedDirectUses.push_back(UDU);
+ Top->UnresolvedDirectUses.push_back(UDU);
}
- return !Mod->UnresolvedDirectUses.empty();
+ return !Top->UnresolvedDirectUses.empty();
}
bool ModuleMap::resolveConflicts(Module *Mod, bool Complain) {
@@ -1423,14 +1517,14 @@ namespace clang {
ModuleMap &Map;
/// The current module map file.
- const FileEntry *ModuleMapFile;
+ FileEntryRef ModuleMapFile;
/// Source location of most recent parsed module declaration
SourceLocation CurrModuleDeclLoc;
/// The directory that file names in this module map file should
/// be resolved relative to.
- const DirectoryEntry *Directory;
+ DirectoryEntryRef Directory;
/// Whether this module map is in a system header directory.
bool IsSystem;
@@ -1465,8 +1559,6 @@ namespace clang {
/// (or the end of the file).
void skipUntil(MMToken::TokenKind K);
- using ModuleId = SmallVector<std::pair<std::string, SourceLocation>, 2>;
-
bool parseModuleId(ModuleId &Id);
void parseModuleDecl();
void parseExternModuleDecl();
@@ -1495,8 +1587,8 @@ namespace clang {
public:
explicit ModuleMapParser(Lexer &L, SourceManager &SourceMgr,
const TargetInfo *Target, DiagnosticsEngine &Diags,
- ModuleMap &Map, const FileEntry *ModuleMapFile,
- const DirectoryEntry *Directory, bool IsSystem)
+ ModuleMap &Map, FileEntryRef ModuleMapFile,
+ DirectoryEntryRef Directory, bool IsSystem)
: L(L), SourceMgr(SourceMgr), Target(Target), Diags(Diags), Map(Map),
ModuleMapFile(ModuleMapFile), Directory(Directory),
IsSystem(IsSystem) {
@@ -1614,7 +1706,7 @@ retry:
SpellingBuffer.resize(LToken.getLength() + 1);
const char *Start = SpellingBuffer.data();
unsigned Length =
- Lexer::getSpelling(LToken, Start, SourceMgr, L.getLangOpts());
+ Lexer::getSpelling(LToken, Start, SourceMgr, Map.LangOpts);
uint64_t Value;
if (StringRef(Start, Length).getAsInteger(0, Value)) {
Diags.Report(Tok.getLocation(), diag::err_mmap_unknown_token);
@@ -1647,7 +1739,7 @@ retry:
break;
}
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
default:
Diags.Report(Tok.getLocation(), diag::err_mmap_unknown_token);
@@ -1774,7 +1866,7 @@ void ModuleMapParser::diagnosePrivateModules(SourceLocation ExplicitLoc,
continue;
SmallString<128> FullName(ActiveModule->getFullModuleName());
- if (!FullName.startswith(M->Name) && !FullName.endswith("Private"))
+ if (!FullName.starts_with(M->Name) && !FullName.ends_with("Private"))
continue;
SmallString<128> FixedPrivModDecl;
SmallString<128> Canonical(M->Name);
@@ -1948,10 +2040,28 @@ void ModuleMapParser::parseModuleDecl() {
Module *ShadowingModule = nullptr;
if (Module *Existing = Map.lookupModuleQualified(ModuleName, ActiveModule)) {
// We might see a (re)definition of a module that we already have a
- // definition for in two cases:
+ // definition for in four cases:
// - If we loaded one definition from an AST file and we've just found a
// corresponding definition in a module map file, or
- bool LoadedFromASTFile = Existing->DefinitionLoc.isInvalid();
+ bool LoadedFromASTFile = Existing->IsFromModuleFile;
+ // - If we previously inferred this module from different module map file.
+ bool Inferred = Existing->IsInferred;
+ // - If we're building a framework that vends a module map, we might've
+ // previously seen the one in intermediate products and now the system
+ // one.
+ // FIXME: If we're parsing module map file that looks like this:
+ // framework module FW { ... }
+ // module FW.Sub { ... }
+ // We can't check the framework qualifier, since it's not attached to
+ // the definition of Sub. Checking that qualifier on \c Existing is
+ // not correct either, since we might've previously seen:
+ // module FW { ... }
+ // module FW.Sub { ... }
+ // We should enforce consistency of redefinitions so that we can rely
+ // that \c Existing is part of a framework iff the redefinition of FW
+ // we have just skipped had it too. Once we do that, stop checking
+ // the local framework qualifier and only rely on \c Existing.
+ bool PartOfFramework = Framework || Existing->isPartOfFramework();
// - If we're building a (preprocessed) module and we've just loaded the
// module map file from which it was created.
bool ParsedAsMainInput =
@@ -1959,7 +2069,8 @@ void ModuleMapParser::parseModuleDecl() {
Map.LangOpts.CurrentModule == ModuleName &&
SourceMgr.getDecomposedLoc(ModuleNameLoc).first !=
SourceMgr.getDecomposedLoc(Existing->DefinitionLoc).first;
- if (!ActiveModule && (LoadedFromASTFile || ParsedAsMainInput)) {
+ if (LoadedFromASTFile || Inferred || PartOfFramework || ParsedAsMainInput) {
+ ActiveModule = PreviousActiveModule;
// Skip the module definition.
skipUntil(MMToken::RBrace);
if (Tok.is(MMToken::RBrace))
@@ -2005,14 +2116,13 @@ void ModuleMapParser::parseModuleDecl() {
ActiveModule->IsSystem = true;
if (Attrs.IsExternC)
ActiveModule->IsExternC = true;
- if (Attrs.NoUndeclaredIncludes ||
- (!ActiveModule->Parent && ModuleName == "Darwin"))
+ if (Attrs.NoUndeclaredIncludes)
ActiveModule->NoUndeclaredIncludes = true;
ActiveModule->Directory = Directory;
- StringRef MapFileName(ModuleMapFile->getName());
- if (MapFileName.endswith("module.private.modulemap") ||
- MapFileName.endswith("module_private.map")) {
+ StringRef MapFileName(ModuleMapFile.getName());
+ if (MapFileName.ends_with("module.private.modulemap") ||
+ MapFileName.ends_with("module_private.map")) {
ActiveModule->ModuleMapIsPrivate = true;
}
@@ -2115,9 +2225,8 @@ void ModuleMapParser::parseModuleDecl() {
// If the active module is a top-level framework, and there are no link
// libraries, automatically link against the framework.
if (ActiveModule->IsFramework && !ActiveModule->isSubFramework() &&
- ActiveModule->LinkLibraries.empty()) {
- inferFrameworkLink(ActiveModule, Directory, SourceMgr.getFileManager());
- }
+ ActiveModule->LinkLibraries.empty())
+ inferFrameworkLink(ActiveModule);
// If the module meets all requirements but is still unavailable, mark the
// whole tree as unavailable to prevent it from building.
@@ -2168,16 +2277,16 @@ void ModuleMapParser::parseExternModuleDecl() {
StringRef FileNameRef = FileName;
SmallString<128> ModuleMapFileName;
if (llvm::sys::path::is_relative(FileNameRef)) {
- ModuleMapFileName += Directory->getName();
+ ModuleMapFileName += Directory.getName();
llvm::sys::path::append(ModuleMapFileName, FileName);
FileNameRef = ModuleMapFileName;
}
- if (auto File = SourceMgr.getFileManager().getFile(FileNameRef))
+ if (auto File = SourceMgr.getFileManager().getOptionalFileRef(FileNameRef))
Map.parseModuleMapFile(
- *File, /*IsSystem=*/false,
+ *File, IsSystem,
Map.HeaderInfo.getHeaderSearchOpts().ModuleMapFileHomeIsCwd
? Directory
- : (*File)->getDir(),
+ : File->getDir(),
FileID(), nullptr, ExternLoc);
}
@@ -2279,6 +2388,7 @@ void ModuleMapParser::parseHeaderDecl(MMToken::TokenKind LeadingToken,
SourceLocation LeadingLoc) {
// We've already consumed the first token.
ModuleMap::ModuleHeaderRole Role = ModuleMap::NormalHeader;
+
if (LeadingToken == MMToken::PrivateKeyword) {
Role = ModuleMap::PrivateHeader;
// 'private' may optionally be followed by 'textual'.
@@ -2286,6 +2396,8 @@ void ModuleMapParser::parseHeaderDecl(MMToken::TokenKind LeadingToken,
LeadingToken = Tok.Kind;
consumeToken();
}
+ } else if (LeadingToken == MMToken::ExcludeKeyword) {
+ Role = ModuleMap::ExcludedHeader;
}
if (LeadingToken == MMToken::TextualKeyword)
@@ -2319,12 +2431,11 @@ void ModuleMapParser::parseHeaderDecl(MMToken::TokenKind LeadingToken,
Header.FileName = std::string(Tok.getString());
Header.FileNameLoc = consumeToken();
Header.IsUmbrella = LeadingToken == MMToken::UmbrellaKeyword;
- Header.Kind =
- (LeadingToken == MMToken::ExcludeKeyword ? Module::HK_Excluded
- : Map.headerRoleToKind(Role));
+ Header.Kind = Map.headerRoleToKind(Role);
// Check whether we already have an umbrella.
- if (Header.IsUmbrella && ActiveModule->Umbrella) {
+ if (Header.IsUmbrella &&
+ !std::holds_alternative<std::monostate>(ActiveModule->Umbrella)) {
Diags.Report(Header.FileNameLoc, diag::err_mmap_umbrella_clash)
<< ActiveModule->getFullModuleName();
HadError = true;
@@ -2387,17 +2498,23 @@ void ModuleMapParser::parseHeaderDecl(MMToken::TokenKind LeadingToken,
}
bool NeedsFramework = false;
- Map.addUnresolvedHeader(ActiveModule, std::move(Header), NeedsFramework);
-
- if (NeedsFramework && ActiveModule)
+ // Don't add headers to the builtin modules if the builtin headers belong to
+ // the system modules, with the exception of __stddef_max_align_t.h which
+ // always had its own module.
+ if (!Map.LangOpts.BuiltinHeadersInSystemModules ||
+ !isBuiltInModuleName(ActiveModule->getTopLevelModuleName()) ||
+ ActiveModule->fullModuleNameIs({"_Builtin_stddef", "max_align_t"}))
+ Map.addUnresolvedHeader(ActiveModule, std::move(Header), NeedsFramework);
+
+ if (NeedsFramework)
Diags.Report(CurrModuleDeclLoc, diag::note_mmap_add_framework_keyword)
<< ActiveModule->getFullModuleName()
<< FixItHint::CreateReplacement(CurrModuleDeclLoc, "framework module");
}
-static int compareModuleHeaders(const Module::Header *A,
- const Module::Header *B) {
- return A->NameAsWritten.compare(B->NameAsWritten);
+static bool compareModuleHeaders(const Module::Header &A,
+ const Module::Header &B) {
+ return A.NameAsWritten < B.NameAsWritten;
}
/// Parse an umbrella directory declaration.
@@ -2418,7 +2535,7 @@ void ModuleMapParser::parseUmbrellaDirDecl(SourceLocation UmbrellaLoc) {
SourceLocation DirNameLoc = consumeToken();
// Check whether we already have an umbrella.
- if (ActiveModule->Umbrella) {
+ if (!std::holds_alternative<std::monostate>(ActiveModule->Umbrella)) {
Diags.Report(DirNameLoc, diag::err_mmap_umbrella_clash)
<< ActiveModule->getFullModuleName();
HadError = true;
@@ -2426,16 +2543,14 @@ void ModuleMapParser::parseUmbrellaDirDecl(SourceLocation UmbrellaLoc) {
}
// Look for this file.
- const DirectoryEntry *Dir = nullptr;
+ OptionalDirectoryEntryRef Dir;
if (llvm::sys::path::is_absolute(DirName)) {
- if (auto D = SourceMgr.getFileManager().getDirectory(DirName))
- Dir = *D;
+ Dir = SourceMgr.getFileManager().getOptionalDirectoryRef(DirName);
} else {
SmallString<128> PathName;
- PathName = Directory->getName();
+ PathName = Directory.getName();
llvm::sys::path::append(PathName, DirName);
- if (auto D = SourceMgr.getFileManager().getDirectory(PathName))
- Dir = *D;
+ Dir = SourceMgr.getFileManager().getOptionalDirectoryRef(PathName);
}
if (!Dir) {
@@ -2455,21 +2570,21 @@ void ModuleMapParser::parseUmbrellaDirDecl(SourceLocation UmbrellaLoc) {
SourceMgr.getFileManager().getVirtualFileSystem();
for (llvm::vfs::recursive_directory_iterator I(FS, Dir->getName(), EC), E;
I != E && !EC; I.increment(EC)) {
- if (auto FE = SourceMgr.getFileManager().getFile(I->path())) {
+ if (auto FE = SourceMgr.getFileManager().getOptionalFileRef(I->path())) {
Module::Header Header = {"", std::string(I->path()), *FE};
Headers.push_back(std::move(Header));
}
}
// Sort header paths so that the pcm doesn't depend on iteration order.
- llvm::array_pod_sort(Headers.begin(), Headers.end(), compareModuleHeaders);
+ std::stable_sort(Headers.begin(), Headers.end(), compareModuleHeaders);
for (auto &Header : Headers)
Map.addHeader(ActiveModule, std::move(Header), ModuleMap::TextualHeader);
return;
}
- if (Module *OwningModule = Map.UmbrellaDirs[Dir]) {
+ if (Module *OwningModule = Map.UmbrellaDirs[*Dir]) {
Diags.Report(UmbrellaLoc, diag::err_mmap_umbrella_clash)
<< OwningModule->getFullModuleName();
HadError = true;
@@ -2477,7 +2592,7 @@ void ModuleMapParser::parseUmbrellaDirDecl(SourceLocation UmbrellaLoc) {
}
// Record this umbrella directory.
- Map.setUmbrellaDir(ActiveModule, Dir, DirNameAsWritten, DirName);
+ Map.setUmbrellaDirAsWritten(ActiveModule, *Dir, DirNameAsWritten, DirName);
}
/// Parse a module export declaration.
@@ -2741,7 +2856,7 @@ void ModuleMapParser::parseInferredModuleDecl(bool Framework, bool Explicit) {
if (ActiveModule) {
// Inferred modules must have umbrella directories.
if (!Failed && ActiveModule->IsAvailable &&
- !ActiveModule->getUmbrellaDir()) {
+ !ActiveModule->getEffectiveUmbrellaDir()) {
Diags.Report(StarLoc, diag::err_mmap_inferred_no_umbrella);
Failed = true;
}
@@ -2994,8 +3109,8 @@ bool ModuleMapParser::parseModuleMapFile() {
} while (true);
}
-bool ModuleMap::parseModuleMapFile(const FileEntry *File, bool IsSystem,
- const DirectoryEntry *Dir, FileID ID,
+bool ModuleMap::parseModuleMapFile(FileEntryRef File, bool IsSystem,
+ DirectoryEntryRef Dir, FileID ID,
unsigned *Offset,
SourceLocation ExternModuleLoc) {
assert(Target && "Missing target information");
@@ -3012,7 +3127,7 @@ bool ModuleMap::parseModuleMapFile(const FileEntry *File, bool IsSystem,
}
assert(Target && "Missing target information");
- llvm::Optional<llvm::MemoryBufferRef> Buffer = SourceMgr.getBufferOrNone(ID);
+ std::optional<llvm::MemoryBufferRef> Buffer = SourceMgr.getBufferOrNone(ID);
if (!Buffer)
return ParsedModuleMap[File] = true;
assert((!Offset || *Offset <= Buffer->getBufferSize()) &&
@@ -3037,7 +3152,7 @@ bool ModuleMap::parseModuleMapFile(const FileEntry *File, bool IsSystem,
// Notify callbacks that we parsed it.
for (const auto &Cb : Callbacks)
- Cb->moduleMapFileRead(Start, *File, IsSystem);
+ Cb->moduleMapFileRead(Start, File, IsSystem);
return Result;
}
diff --git a/contrib/llvm-project/clang/lib/Lex/PPCaching.cpp b/contrib/llvm-project/clang/lib/Lex/PPCaching.cpp
index e05e52ba9bb5..f38ff62ebf43 100644
--- a/contrib/llvm-project/clang/lib/Lex/PPCaching.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/PPCaching.cpp
@@ -88,7 +88,7 @@ void Preprocessor::EnterCachingLexMode() {
"entered caching lex mode while lexing something else");
if (InCachingLexMode()) {
- assert(CurLexerKind == CLK_CachingLexer && "Unexpected lexer kind");
+ assert(CurLexerCallback == CLK_CachingLexer && "Unexpected lexer kind");
return;
}
@@ -96,9 +96,9 @@ void Preprocessor::EnterCachingLexMode() {
}
void Preprocessor::EnterCachingLexModeUnchecked() {
- assert(CurLexerKind != CLK_CachingLexer && "already in caching lex mode");
+ assert(CurLexerCallback != CLK_CachingLexer && "already in caching lex mode");
PushIncludeMacroStack();
- CurLexerKind = CLK_CachingLexer;
+ CurLexerCallback = CLK_CachingLexer;
}
diff --git a/contrib/llvm-project/clang/lib/Lex/PPCallbacks.cpp b/contrib/llvm-project/clang/lib/Lex/PPCallbacks.cpp
index b618071590ba..f2b60a728e90 100644
--- a/contrib/llvm-project/clang/lib/Lex/PPCallbacks.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/PPCallbacks.cpp
@@ -15,16 +15,15 @@ using namespace clang;
PPCallbacks::~PPCallbacks() = default;
void PPCallbacks::HasInclude(SourceLocation Loc, StringRef FileName,
- bool IsAngled, Optional<FileEntryRef> File,
+ bool IsAngled, OptionalFileEntryRef File,
SrcMgr::CharacteristicKind FileType) {}
// Out of line key method.
PPChainedCallbacks::~PPChainedCallbacks() = default;
void PPChainedCallbacks::HasInclude(SourceLocation Loc, StringRef FileName,
- bool IsAngled, Optional<FileEntryRef> File,
+ bool IsAngled, OptionalFileEntryRef File,
SrcMgr::CharacteristicKind FileType) {
First->HasInclude(Loc, FileName, IsAngled, File, FileType);
Second->HasInclude(Loc, FileName, IsAngled, File, FileType);
}
-
diff --git a/contrib/llvm-project/clang/lib/Lex/PPDirectives.cpp b/contrib/llvm-project/clang/lib/Lex/PPDirectives.cpp
index 3fa8746653b0..a980f4bcbae1 100644
--- a/contrib/llvm-project/clang/lib/Lex/PPDirectives.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/PPDirectives.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/DirectoryEntry.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LangOptions.h"
@@ -21,6 +22,7 @@
#include "clang/Basic/TokenKinds.h"
#include "clang/Lex/CodeCompletionHandler.h"
#include "clang/Lex/HeaderSearch.h"
+#include "clang/Lex/HeaderSearchOptions.h"
#include "clang/Lex/LexDiagnostic.h"
#include "clang/Lex/LiteralSupport.h"
#include "clang/Lex/MacroInfo.h"
@@ -33,19 +35,21 @@
#include "clang/Lex/Token.h"
#include "clang/Lex/VariadicMacroSupport.h"
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/ScopeExit.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/AlignOf.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Path.h"
+#include "llvm/Support/SaveAndRestore.h"
#include <algorithm>
#include <cassert>
#include <cstring>
#include <new>
+#include <optional>
#include <string>
#include <utility>
@@ -56,9 +60,8 @@ using namespace clang;
//===----------------------------------------------------------------------===//
MacroInfo *Preprocessor::AllocateMacroInfo(SourceLocation L) {
- auto *MIChain = new (BP) MacroInfoChain{L, MIChainHead};
- MIChainHead = MIChain;
- return &MIChain->MI;
+ static_assert(std::is_trivially_destructible_v<MacroInfo>, "");
+ return new (BP) MacroInfo(L);
}
DefMacroDirective *Preprocessor::AllocateDefMacroDirective(MacroInfo *MI,
@@ -108,71 +111,76 @@ enum PPElifDiag {
PED_Elifndef
};
-// The -fmodule-name option tells the compiler to textually include headers in
-// the specified module, meaning clang won't build the specified module. This is
-// useful in a number of situations, for instance, when building a library that
-// vends a module map, one might want to avoid hitting intermediate build
-// products containing the the module map or avoid finding the system installed
-// modulemap for that library.
-static bool isForModuleBuilding(Module *M, StringRef CurrentModule,
- StringRef ModuleName) {
- StringRef TopLevelName = M->getTopLevelModuleName();
-
- // When building framework Foo, we wanna make sure that Foo *and* Foo_Private
- // are textually included and no modules are built for both.
- if (M->getTopLevelModule()->IsFramework && CurrentModule == ModuleName &&
- !CurrentModule.endswith("_Private") && TopLevelName.endswith("_Private"))
- TopLevelName = TopLevelName.drop_back(8);
-
- return TopLevelName == CurrentModule;
+static bool isFeatureTestMacro(StringRef MacroName) {
+ // list from:
+ // * https://gcc.gnu.org/onlinedocs/libstdc++/manual/using_macros.html
+ // * https://docs.microsoft.com/en-us/cpp/c-runtime-library/security-features-in-the-crt?view=msvc-160
+ // * man 7 feature_test_macros
+ // The list must be sorted for correct binary search.
+ static constexpr StringRef ReservedMacro[] = {
+ "_ATFILE_SOURCE",
+ "_BSD_SOURCE",
+ "_CRT_NONSTDC_NO_WARNINGS",
+ "_CRT_SECURE_CPP_OVERLOAD_STANDARD_NAMES",
+ "_CRT_SECURE_NO_WARNINGS",
+ "_FILE_OFFSET_BITS",
+ "_FORTIFY_SOURCE",
+ "_GLIBCXX_ASSERTIONS",
+ "_GLIBCXX_CONCEPT_CHECKS",
+ "_GLIBCXX_DEBUG",
+ "_GLIBCXX_DEBUG_PEDANTIC",
+ "_GLIBCXX_PARALLEL",
+ "_GLIBCXX_PARALLEL_ASSERTIONS",
+ "_GLIBCXX_SANITIZE_VECTOR",
+ "_GLIBCXX_USE_CXX11_ABI",
+ "_GLIBCXX_USE_DEPRECATED",
+ "_GNU_SOURCE",
+ "_ISOC11_SOURCE",
+ "_ISOC95_SOURCE",
+ "_ISOC99_SOURCE",
+ "_LARGEFILE64_SOURCE",
+ "_POSIX_C_SOURCE",
+ "_REENTRANT",
+ "_SVID_SOURCE",
+ "_THREAD_SAFE",
+ "_XOPEN_SOURCE",
+ "_XOPEN_SOURCE_EXTENDED",
+ "__STDCPP_WANT_MATH_SPEC_FUNCS__",
+ "__STDC_FORMAT_MACROS",
+ };
+ return std::binary_search(std::begin(ReservedMacro), std::end(ReservedMacro),
+ MacroName);
+}
+
+static bool isLanguageDefinedBuiltin(const SourceManager &SourceMgr,
+ const MacroInfo *MI,
+ const StringRef MacroName) {
+ // If this is a macro with special handling (like __LINE__) then it's language
+ // defined.
+ if (MI->isBuiltinMacro())
+ return true;
+ // Builtin macros are defined in the builtin file
+ if (!SourceMgr.isWrittenInBuiltinFile(MI->getDefinitionLoc()))
+ return false;
+ // C defines macros starting with __STDC, and C++ defines macros starting with
+ // __STDCPP
+ if (MacroName.starts_with("__STDC"))
+ return true;
+ // C++ defines the __cplusplus macro
+ if (MacroName == "__cplusplus")
+ return true;
+ // C++ defines various feature-test macros starting with __cpp
+ if (MacroName.starts_with("__cpp"))
+ return true;
+ // Anything else isn't language-defined
+ return false;
}
static MacroDiag shouldWarnOnMacroDef(Preprocessor &PP, IdentifierInfo *II) {
const LangOptions &Lang = PP.getLangOpts();
- if (II->isReserved(Lang) != ReservedIdentifierStatus::NotReserved) {
- // list from:
- // - https://gcc.gnu.org/onlinedocs/libstdc++/manual/using_macros.html
- // - https://docs.microsoft.com/en-us/cpp/c-runtime-library/security-features-in-the-crt?view=msvc-160
- // - man 7 feature_test_macros
- // The list must be sorted for correct binary search.
- static constexpr StringRef ReservedMacro[] = {
- "_ATFILE_SOURCE",
- "_BSD_SOURCE",
- "_CRT_NONSTDC_NO_WARNINGS",
- "_CRT_SECURE_CPP_OVERLOAD_STANDARD_NAMES",
- "_CRT_SECURE_NO_WARNINGS",
- "_FILE_OFFSET_BITS",
- "_FORTIFY_SOURCE",
- "_GLIBCXX_ASSERTIONS",
- "_GLIBCXX_CONCEPT_CHECKS",
- "_GLIBCXX_DEBUG",
- "_GLIBCXX_DEBUG_PEDANTIC",
- "_GLIBCXX_PARALLEL",
- "_GLIBCXX_PARALLEL_ASSERTIONS",
- "_GLIBCXX_SANITIZE_VECTOR",
- "_GLIBCXX_USE_CXX11_ABI",
- "_GLIBCXX_USE_DEPRECATED",
- "_GNU_SOURCE",
- "_ISOC11_SOURCE",
- "_ISOC95_SOURCE",
- "_ISOC99_SOURCE",
- "_LARGEFILE64_SOURCE",
- "_POSIX_C_SOURCE",
- "_REENTRANT",
- "_SVID_SOURCE",
- "_THREAD_SAFE",
- "_XOPEN_SOURCE",
- "_XOPEN_SOURCE_EXTENDED",
- "__STDCPP_WANT_MATH_SPEC_FUNCS__",
- "__STDC_FORMAT_MACROS",
- };
- if (std::binary_search(std::begin(ReservedMacro), std::end(ReservedMacro),
- II->getName()))
- return MD_NoWarn;
-
- return MD_ReservedMacro;
- }
StringRef Text = II->getName();
+ if (isReservedInAllContexts(II->isReserved(Lang)))
+ return isFeatureTestMacro(Text) ? MD_NoWarn : MD_ReservedMacro;
if (II->isKeyword(Lang))
return MD_KeywordDef;
if (Lang.CPlusPlus11 && (Text.equals("override") || Text.equals("final")))
@@ -183,7 +191,7 @@ static MacroDiag shouldWarnOnMacroDef(Preprocessor &PP, IdentifierInfo *II) {
static MacroDiag shouldWarnOnMacroUndef(Preprocessor &PP, IdentifierInfo *II) {
const LangOptions &Lang = PP.getLangOpts();
// Do not warn on keyword undef. It is generally harmless and widely used.
- if (II->isReserved(Lang) != ReservedIdentifierStatus::NotReserved)
+ if (isReservedInAllContexts(II->isReserved(Lang)))
return MD_ReservedMacro;
return MD_NoWarn;
}
@@ -225,9 +233,10 @@ static bool warnByDefaultOnWrongCase(StringRef Include) {
.Cases("assert.h", "complex.h", "ctype.h", "errno.h", "fenv.h", true)
.Cases("float.h", "inttypes.h", "iso646.h", "limits.h", "locale.h", true)
.Cases("math.h", "setjmp.h", "signal.h", "stdalign.h", "stdarg.h", true)
- .Cases("stdatomic.h", "stdbool.h", "stddef.h", "stdint.h", "stdio.h", true)
- .Cases("stdlib.h", "stdnoreturn.h", "string.h", "tgmath.h", "threads.h", true)
- .Cases("time.h", "uchar.h", "wchar.h", "wctype.h", true)
+ .Cases("stdatomic.h", "stdbool.h", "stdckdint.h", "stddef.h", true)
+ .Cases("stdint.h", "stdio.h", "stdlib.h", "stdnoreturn.h", true)
+ .Cases("string.h", "tgmath.h", "threads.h", "time.h", "uchar.h", true)
+ .Cases("wchar.h", "wctype.h", true)
// C++ headers for C library facilities
.Cases("cassert", "ccomplex", "cctype", "cerrno", "cfenv", true)
@@ -266,6 +275,51 @@ static bool warnByDefaultOnWrongCase(StringRef Include) {
.Default(false);
}
+/// Find a similar string in `Candidates`.
+///
+/// \param LHS a string for a similar string in `Candidates`
+///
+/// \param Candidates the candidates to find a similar string.
+///
+/// \returns a similar string if exists. If no similar string exists,
+/// returns std::nullopt.
+static std::optional<StringRef>
+findSimilarStr(StringRef LHS, const std::vector<StringRef> &Candidates) {
+ // We need to check if `Candidates` has the exact case-insensitive string
+ // because the Levenshtein distance match does not care about it.
+ for (StringRef C : Candidates) {
+ if (LHS.equals_insensitive(C)) {
+ return C;
+ }
+ }
+
+ // Keep going with the Levenshtein distance match.
+ // If the LHS size is less than 3, use the LHS size minus 1 and if not,
+ // use the LHS size divided by 3.
+ size_t Length = LHS.size();
+ size_t MaxDist = Length < 3 ? Length - 1 : Length / 3;
+
+ std::optional<std::pair<StringRef, size_t>> SimilarStr;
+ for (StringRef C : Candidates) {
+ size_t CurDist = LHS.edit_distance(C, true);
+ if (CurDist <= MaxDist) {
+ if (!SimilarStr) {
+ // The first similar string found.
+ SimilarStr = {C, CurDist};
+ } else if (CurDist < SimilarStr->second) {
+ // More similar string found.
+ SimilarStr = {C, CurDist};
+ }
+ }
+ }
+
+ if (SimilarStr) {
+ return SimilarStr->first;
+ } else {
+ return std::nullopt;
+ }
+}
+
bool Preprocessor::CheckMacroName(Token &MacroNameTok, MacroUse isDefineUndef,
bool *ShadowFlag) {
// Missing macro name?
@@ -292,15 +346,6 @@ bool Preprocessor::CheckMacroName(Token &MacroNameTok, MacroUse isDefineUndef,
return Diag(MacroNameTok, diag::err_defined_macro_name);
}
- if (isDefineUndef == MU_Undef) {
- auto *MI = getMacroInfo(II);
- if (MI && MI->isBuiltinMacro()) {
- // Warn if undefining "__LINE__" and other builtins, per C99 6.10.8/4
- // and C++ [cpp.predefined]p4], but allow it as an extension.
- Diag(MacroNameTok, diag::ext_pp_undef_builtin_macro);
- }
- }
-
// If defining/undefining reserved identifier or a keyword, we need to issue
// a warning.
SourceLocation MacroNameLoc = MacroNameTok.getLocation();
@@ -398,39 +443,29 @@ SourceLocation Preprocessor::CheckEndOfDirective(const char *DirType,
return DiscardUntilEndOfDirective().getEnd();
}
-Optional<unsigned> Preprocessor::getSkippedRangeForExcludedConditionalBlock(
- SourceLocation HashLoc) {
- if (!ExcludedConditionalDirectiveSkipMappings)
- return None;
- if (!HashLoc.isFileID())
- return None;
-
- std::pair<FileID, unsigned> HashFileOffset =
- SourceMgr.getDecomposedLoc(HashLoc);
- Optional<llvm::MemoryBufferRef> Buf =
- SourceMgr.getBufferOrNone(HashFileOffset.first);
- if (!Buf)
- return None;
- auto It =
- ExcludedConditionalDirectiveSkipMappings->find(Buf->getBufferStart());
- if (It == ExcludedConditionalDirectiveSkipMappings->end())
- return None;
-
- const PreprocessorSkippedRangeMapping &SkippedRanges = *It->getSecond();
- // Check if the offset of '#' is mapped in the skipped ranges.
- auto MappingIt = SkippedRanges.find(HashFileOffset.second);
- if (MappingIt == SkippedRanges.end())
- return None;
-
- unsigned BytesToSkip = MappingIt->getSecond();
- unsigned CurLexerBufferOffset = CurLexer->getCurrentBufferOffset();
- assert(CurLexerBufferOffset >= HashFileOffset.second &&
- "lexer is before the hash?");
- // Take into account the fact that the lexer has already advanced, so the
- // number of bytes to skip must be adjusted.
- unsigned LengthDiff = CurLexerBufferOffset - HashFileOffset.second;
- assert(BytesToSkip >= LengthDiff && "lexer is after the skipped range?");
- return BytesToSkip - LengthDiff;
+void Preprocessor::SuggestTypoedDirective(const Token &Tok,
+ StringRef Directive) const {
+ // If this is a `.S` file, treat unknown # directives as non-preprocessor
+ // directives.
+ if (getLangOpts().AsmPreprocessor) return;
+
+ std::vector<StringRef> Candidates = {
+ "if", "ifdef", "ifndef", "elif", "else", "endif"
+ };
+ if (LangOpts.C23 || LangOpts.CPlusPlus23)
+ Candidates.insert(Candidates.end(), {"elifdef", "elifndef"});
+
+ if (std::optional<StringRef> Sugg = findSimilarStr(Directive, Candidates)) {
+ // Directive cannot be coming from macro.
+ assert(Tok.getLocation().isFileID());
+ CharSourceRange DirectiveRange = CharSourceRange::getCharRange(
+ Tok.getLocation(),
+ Tok.getLocation().getLocWithOffset(Directive.size()));
+ StringRef SuggValue = *Sugg;
+
+ auto Hint = FixItHint::CreateReplacement(DirectiveRange, SuggValue);
+ Diag(Tok, diag::warn_pp_invalid_directive) << 1 << SuggValue << Hint;
+ }
}
/// SkipExcludedConditionalBlock - We just read a \#if or related directive and
@@ -446,8 +481,22 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation HashTokenLoc,
bool FoundNonSkipPortion,
bool FoundElse,
SourceLocation ElseLoc) {
+ // In SkippingRangeStateTy we are depending on SkipExcludedConditionalBlock()
+ // not getting called recursively by storing the RecordedSkippedRanges
+ // DenseMap lookup pointer (field SkipRangePtr). SkippingRangeStateTy expects
+ // that RecordedSkippedRanges won't get modified and SkipRangePtr won't be
+ // invalidated. If this changes and there is a need to call
+ // SkipExcludedConditionalBlock() recursively, SkippingRangeStateTy should
+ // change to do a second lookup in endLexPass function instead of reusing the
+ // lookup pointer.
+ assert(!SkippingExcludedConditionalBlock &&
+ "calling SkipExcludedConditionalBlock recursively");
+ llvm::SaveAndRestore SARSkipping(SkippingExcludedConditionalBlock, true);
+
++NumSkipped;
- assert(!CurTokenLexer && CurPPLexer && "Lexing a macro, not a file?");
+ assert(!CurTokenLexer && "Conditional PP block cannot appear in a macro!");
+ assert(CurPPLexer && "Conditional PP block must be in a file!");
+ assert(CurLexer && "Conditional PP block but no current lexer set!");
if (PreambleConditionalStack.reachedEOFWhileSkipping())
PreambleConditionalStack.clearSkipInfo();
@@ -459,36 +508,85 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation HashTokenLoc,
// disabling warnings, etc.
CurPPLexer->LexingRawMode = true;
Token Tok;
- if (auto SkipLength =
- getSkippedRangeForExcludedConditionalBlock(HashTokenLoc)) {
- // Skip to the next '#endif' / '#else' / '#elif'.
- CurLexer->skipOver(*SkipLength);
- }
SourceLocation endLoc;
- while (true) {
- CurLexer->Lex(Tok);
- if (Tok.is(tok::code_completion)) {
- setCodeCompletionReached();
- if (CodeComplete)
- CodeComplete->CodeCompleteInConditionalExclusion();
- continue;
+ /// Keeps track and caches skipped ranges and also retrieves a prior skipped
+ /// range if the same block is re-visited.
+ struct SkippingRangeStateTy {
+ Preprocessor &PP;
+
+ const char *BeginPtr = nullptr;
+ unsigned *SkipRangePtr = nullptr;
+
+ SkippingRangeStateTy(Preprocessor &PP) : PP(PP) {}
+
+ void beginLexPass() {
+ if (BeginPtr)
+ return; // continue skipping a block.
+
+ // Initiate a skipping block and adjust the lexer if we already skipped it
+ // before.
+ BeginPtr = PP.CurLexer->getBufferLocation();
+ SkipRangePtr = &PP.RecordedSkippedRanges[BeginPtr];
+ if (*SkipRangePtr) {
+ PP.CurLexer->seek(PP.CurLexer->getCurrentBufferOffset() + *SkipRangePtr,
+ /*IsAtStartOfLine*/ true);
+ }
}
- // If this is the end of the buffer, we have an error.
- if (Tok.is(tok::eof)) {
- // We don't emit errors for unterminated conditionals here,
- // Lexer::LexEndOfFile can do that properly.
- // Just return and let the caller lex after this #include.
- if (PreambleConditionalStack.isRecording())
- PreambleConditionalStack.SkipInfo.emplace(
- HashTokenLoc, IfTokenLoc, FoundNonSkipPortion, FoundElse, ElseLoc);
- break;
+ void endLexPass(const char *Hashptr) {
+ if (!BeginPtr) {
+ // Not doing normal lexing.
+ assert(PP.CurLexer->isDependencyDirectivesLexer());
+ return;
+ }
+
+ // Finished skipping a block, record the range if it's first time visited.
+ if (!*SkipRangePtr) {
+ *SkipRangePtr = Hashptr - BeginPtr;
+ }
+ assert(*SkipRangePtr == Hashptr - BeginPtr);
+ BeginPtr = nullptr;
+ SkipRangePtr = nullptr;
}
+ } SkippingRangeState(*this);
- // If this token is not a preprocessor directive, just skip it.
- if (Tok.isNot(tok::hash) || !Tok.isAtStartOfLine())
- continue;
+ while (true) {
+ if (CurLexer->isDependencyDirectivesLexer()) {
+ CurLexer->LexDependencyDirectiveTokenWhileSkipping(Tok);
+ } else {
+ SkippingRangeState.beginLexPass();
+ while (true) {
+ CurLexer->Lex(Tok);
+
+ if (Tok.is(tok::code_completion)) {
+ setCodeCompletionReached();
+ if (CodeComplete)
+ CodeComplete->CodeCompleteInConditionalExclusion();
+ continue;
+ }
+
+ // If this is the end of the buffer, we have an error.
+ if (Tok.is(tok::eof)) {
+ // We don't emit errors for unterminated conditionals here,
+ // Lexer::LexEndOfFile can do that properly.
+ // Just return and let the caller lex after this #include.
+ if (PreambleConditionalStack.isRecording())
+ PreambleConditionalStack.SkipInfo.emplace(HashTokenLoc, IfTokenLoc,
+ FoundNonSkipPortion,
+ FoundElse, ElseLoc);
+ break;
+ }
+
+ // If this token is not a preprocessor directive, just skip it.
+ if (Tok.isNot(tok::hash) || !Tok.isAtStartOfLine())
+ continue;
+
+ break;
+ }
+ }
+ if (Tok.is(tok::eof))
+ break;
// We just parsed a # character at the start of a line, so we're in
// directive mode. Tell the lexer this so any newlines we see will be
@@ -496,6 +594,9 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation HashTokenLoc,
CurPPLexer->ParsingPreprocessorDirective = true;
if (CurLexer) CurLexer->SetKeepWhitespaceMode(false);
+ assert(Tok.is(tok::hash));
+ const char *Hashptr = CurLexer->getBufferLocation() - Tok.getLength();
+ assert(CurLexer->getSourceLocation(Hashptr) == Tok.getLocation());
// Read the next token, the directive flavor.
LexUnexpandedToken(Tok);
@@ -545,7 +646,7 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation HashTokenLoc,
Directive = StringRef(DirectiveBuf, IdLen);
}
- if (Directive.startswith("if")) {
+ if (Directive.starts_with("if")) {
StringRef Sub = Directive.substr(2);
if (Sub.empty() || // "if"
Sub == "def" || // "ifdef"
@@ -556,6 +657,8 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation HashTokenLoc,
CurPPLexer->pushConditionalLevel(Tok.getLocation(), /*wasskipping*/true,
/*foundnonskip*/false,
/*foundelse*/false);
+ } else {
+ SuggestTypoedDirective(Tok, Directive);
}
} else if (Directive[0] == 'e') {
StringRef Sub = Directive.substr(1);
@@ -568,6 +671,7 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation HashTokenLoc,
// If we popped the outermost skipping block, we're done skipping!
if (!CondInfo.WasSkipping) {
+ SkippingRangeState.endLexPass(Hashptr);
// Restore the value of LexingRawMode so that trailing comments
// are handled correctly, if we've reached the outermost block.
CurPPLexer->LexingRawMode = false;
@@ -585,6 +689,9 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation HashTokenLoc,
// as a non-skipping conditional.
PPConditionalInfo &CondInfo = CurPPLexer->peekConditionalLevel();
+ if (!CondInfo.WasSkipping)
+ SkippingRangeState.endLexPass(Hashptr);
+
// If this is a #else with a #else before it, report the error.
if (CondInfo.FoundElse)
Diag(Tok, diag::pp_err_else_after_else);
@@ -610,6 +717,9 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation HashTokenLoc,
} else if (Sub == "lif") { // "elif".
PPConditionalInfo &CondInfo = CurPPLexer->peekConditionalLevel();
+ if (!CondInfo.WasSkipping)
+ SkippingRangeState.endLexPass(Hashptr);
+
// If this is a #elif with a #else before it, report the error.
if (CondInfo.FoundElse)
Diag(Tok, diag::pp_err_elif_after_else) << PED_Elif;
@@ -617,6 +727,10 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation HashTokenLoc,
// If this is in a skipping block or if we're already handled this #if
// block, don't bother parsing the condition.
if (CondInfo.WasSkipping || CondInfo.FoundNonSkip) {
+ // FIXME: We should probably do at least some minimal parsing of the
+ // condition to verify that it is well-formed. The current state
+ // allows #elif* directives with completely malformed (or missing)
+ // conditions.
DiscardUntilEndOfDirective();
} else {
// Restore the value of LexingRawMode so that identifiers are
@@ -648,6 +762,20 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation HashTokenLoc,
PPConditionalInfo &CondInfo = CurPPLexer->peekConditionalLevel();
Token DirectiveToken = Tok;
+ if (!CondInfo.WasSkipping)
+ SkippingRangeState.endLexPass(Hashptr);
+
+ // Warn if using `#elifdef` & `#elifndef` in not C23 & C++23 mode even
+ // if this branch is in a skipping block.
+ unsigned DiagID;
+ if (LangOpts.CPlusPlus)
+ DiagID = LangOpts.CPlusPlus23 ? diag::warn_cxx23_compat_pp_directive
+ : diag::ext_cxx23_pp_directive;
+ else
+ DiagID = LangOpts.C23 ? diag::warn_c23_compat_pp_directive
+ : diag::ext_c23_pp_directive;
+ Diag(Tok, DiagID) << (IsElifDef ? PED_Elifdef : PED_Elifndef);
+
// If this is a #elif with a #else before it, report the error.
if (CondInfo.FoundElse)
Diag(Tok, diag::pp_err_elif_after_else)
@@ -656,6 +784,10 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation HashTokenLoc,
// If this is in a skipping block or if we're already handled this #if
// block, don't bother parsing the condition.
if (CondInfo.WasSkipping || CondInfo.FoundNonSkip) {
+ // FIXME: We should probably do at least some minimal parsing of the
+ // condition to verify that it is well-formed. The current state
+ // allows #elif* directives with completely malformed (or missing)
+ // conditions.
DiscardUntilEndOfDirective();
} else {
// Restore the value of LexingRawMode so that identifiers are
@@ -674,6 +806,8 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation HashTokenLoc,
continue;
}
+ emitMacroExpansionWarnings(MacroNameTok);
+
CheckEndOfDirective(IsElifDef ? "elifdef" : "elifndef");
IdentifierInfo *MII = MacroNameTok.getIdentifierInfo();
@@ -695,7 +829,11 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation HashTokenLoc,
break;
}
}
+ } else {
+ SuggestTypoedDirective(Tok, Directive);
}
+ } else {
+ SuggestTypoedDirective(Tok, Directive);
}
CurPPLexer->ParsingPreprocessorDirective = false;
@@ -718,15 +856,16 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation HashTokenLoc,
Tok.getLocation());
}
-Module *Preprocessor::getModuleForLocation(SourceLocation Loc) {
+Module *Preprocessor::getModuleForLocation(SourceLocation Loc,
+ bool AllowTextual) {
if (!SourceMgr.isInMainFile(Loc)) {
// Try to determine the module of the include directive.
// FIXME: Look into directly passing the FileEntry from LookupFile instead.
FileID IDOfIncl = SourceMgr.getFileID(SourceMgr.getExpansionLoc(Loc));
- if (const FileEntry *EntryOfIncl = SourceMgr.getFileEntryForID(IDOfIncl)) {
+ if (auto EntryOfIncl = SourceMgr.getFileEntryRefForID(IDOfIncl)) {
// The include comes from an included file.
return HeaderInfo.getModuleMap()
- .findModuleForHeader(EntryOfIncl)
+ .findModuleForHeader(*EntryOfIncl, AllowTextual)
.getModule();
}
}
@@ -735,13 +874,14 @@ Module *Preprocessor::getModuleForLocation(SourceLocation Loc) {
// to the current module, if there is one.
return getLangOpts().CurrentModule.empty()
? nullptr
- : HeaderInfo.lookupModule(getLangOpts().CurrentModule);
+ : HeaderInfo.lookupModule(getLangOpts().CurrentModule, Loc);
}
-const FileEntry *
+OptionalFileEntryRef
Preprocessor::getHeaderToIncludeForDiagnostics(SourceLocation IncLoc,
SourceLocation Loc) {
- Module *IncM = getModuleForLocation(IncLoc);
+ Module *IncM = getModuleForLocation(
+ IncLoc, LangOpts.ModulesValidateTextualHeaderIncludes);
// Walk up through the include stack, looking through textual headers of M
// until we hit a non-textual header that we can #include. (We assume textual
@@ -750,7 +890,7 @@ Preprocessor::getHeaderToIncludeForDiagnostics(SourceLocation IncLoc,
auto &SM = getSourceManager();
while (!Loc.isInvalid() && !SM.isInMainFile(Loc)) {
auto ID = SM.getFileID(SM.getExpansionLoc(Loc));
- auto *FE = SM.getFileEntryForID(ID);
+ auto FE = SM.getFileEntryRefForID(ID);
if (!FE)
break;
@@ -760,7 +900,7 @@ Preprocessor::getHeaderToIncludeForDiagnostics(SourceLocation IncLoc,
SourceMgr.isInSystemHeader(Loc));
bool InPrivateHeader = false;
- for (auto Header : HeaderInfo.findAllModulesForHeader(FE)) {
+ for (auto Header : HeaderInfo.findAllModulesForHeader(*FE)) {
if (!Header.isAccessibleFrom(IncM)) {
// It's in a private header; we can't #include it.
// FIXME: If there's a public header in some module that re-exports it,
@@ -770,6 +910,10 @@ Preprocessor::getHeaderToIncludeForDiagnostics(SourceLocation IncLoc,
continue;
}
+ // Don't suggest explicitly excluded headers.
+ if (Header.getRole() == ModuleMap::ExcludedHeader)
+ continue;
+
// We'll suggest including textual headers below if they're
// include-guarded.
if (Header.getRole() & ModuleMap::TextualHeader)
@@ -778,51 +922,52 @@ Preprocessor::getHeaderToIncludeForDiagnostics(SourceLocation IncLoc,
// If we have a module import syntax, we shouldn't include a header to
// make a particular module visible. Let the caller know they should
// suggest an import instead.
- if (getLangOpts().ObjC || getLangOpts().CPlusPlusModules ||
- getLangOpts().ModulesTS)
- return nullptr;
+ if (getLangOpts().ObjC || getLangOpts().CPlusPlusModules)
+ return std::nullopt;
// If this is an accessible, non-textual header of M's top-level module
// that transitively includes the given location and makes the
// corresponding module visible, this is the thing to #include.
- return FE;
+ return *FE;
}
// FIXME: If we're bailing out due to a private header, we shouldn't suggest
// an import either.
if (InPrivateHeader)
- return nullptr;
+ return std::nullopt;
// If the header is includable and has an include guard, assume the
// intended way to expose its contents is by #include, not by importing a
// module that transitively includes it.
- if (getHeaderSearchInfo().isFileMultipleIncludeGuarded(FE))
- return FE;
+ if (getHeaderSearchInfo().isFileMultipleIncludeGuarded(*FE))
+ return *FE;
Loc = SM.getIncludeLoc(ID);
}
- return nullptr;
+ return std::nullopt;
}
-Optional<FileEntryRef> Preprocessor::LookupFile(
+OptionalFileEntryRef Preprocessor::LookupFile(
SourceLocation FilenameLoc, StringRef Filename, bool isAngled,
- const DirectoryLookup *FromDir, const FileEntry *FromFile,
- const DirectoryLookup *&CurDir, SmallVectorImpl<char> *SearchPath,
+ ConstSearchDirIterator FromDir, const FileEntry *FromFile,
+ ConstSearchDirIterator *CurDirArg, SmallVectorImpl<char> *SearchPath,
SmallVectorImpl<char> *RelativePath,
ModuleMap::KnownHeader *SuggestedModule, bool *IsMapped,
- bool *IsFrameworkFound, bool SkipCache) {
- Module *RequestingModule = getModuleForLocation(FilenameLoc);
- bool RequestingModuleIsModuleInterface = !SourceMgr.isInMainFile(FilenameLoc);
+ bool *IsFrameworkFound, bool SkipCache, bool OpenFile, bool CacheFailures) {
+ ConstSearchDirIterator CurDirLocal = nullptr;
+ ConstSearchDirIterator &CurDir = CurDirArg ? *CurDirArg : CurDirLocal;
+
+ Module *RequestingModule = getModuleForLocation(
+ FilenameLoc, LangOpts.ModulesValidateTextualHeaderIncludes);
// If the header lookup mechanism may be relative to the current inclusion
// stack, record the parent #includes.
- SmallVector<std::pair<const FileEntry *, const DirectoryEntry *>, 16>
- Includers;
+ SmallVector<std::pair<OptionalFileEntryRef, DirectoryEntryRef>, 16> Includers;
bool BuildSystemModule = false;
if (!FromDir && !FromFile) {
FileID FID = getCurrentFileLexer()->getFileID();
- const FileEntry *FileEnt = SourceMgr.getFileEntryForID(FID);
+ OptionalFileEntryRef FileEnt = SourceMgr.getFileEntryRefForID(FID);
// If there is no file entry associated with this file, it must be the
// predefines buffer or the module includes buffer. Any other file is not
@@ -838,13 +983,20 @@ Optional<FileEntryRef> Preprocessor::LookupFile(
// map file.
if (!FileEnt) {
if (FID == SourceMgr.getMainFileID() && MainFileDir) {
- Includers.push_back(std::make_pair(nullptr, MainFileDir));
+ auto IncludeDir =
+ HeaderInfo.getModuleMap().shouldImportRelativeToBuiltinIncludeDir(
+ Filename, getCurrentModule())
+ ? HeaderInfo.getModuleMap().getBuiltinDir()
+ : MainFileDir;
+ Includers.push_back(std::make_pair(std::nullopt, *IncludeDir));
BuildSystemModule = getCurrentModule()->IsSystem;
- } else if ((FileEnt =
- SourceMgr.getFileEntryForID(SourceMgr.getMainFileID())))
- Includers.push_back(std::make_pair(FileEnt, *FileMgr.getDirectory(".")));
+ } else if ((FileEnt = SourceMgr.getFileEntryRefForID(
+ SourceMgr.getMainFileID()))) {
+ auto CWD = FileMgr.getOptionalDirectoryRef(".");
+ Includers.push_back(std::make_pair(*FileEnt, *CWD));
+ }
} else {
- Includers.push_back(std::make_pair(FileEnt, FileEnt->getDir()));
+ Includers.push_back(std::make_pair(*FileEnt, FileEnt->getDir()));
}
// MSVC searches the current include stack from top to bottom for
@@ -854,7 +1006,7 @@ Optional<FileEntryRef> Preprocessor::LookupFile(
for (IncludeStackInfo &ISEntry : llvm::reverse(IncludeMacroStack)) {
if (IsFileLexer(ISEntry))
if ((FileEnt = ISEntry.ThePPLexer->getFileEntry()))
- Includers.push_back(std::make_pair(FileEnt, FileEnt->getDir()));
+ Includers.push_back(std::make_pair(*FileEnt, FileEnt->getDir()));
}
}
}
@@ -864,10 +1016,10 @@ Optional<FileEntryRef> Preprocessor::LookupFile(
if (FromFile) {
// We're supposed to start looking from after a particular file. Search
// the include path until we find that file or run out of files.
- const DirectoryLookup *TmpCurDir = CurDir;
- const DirectoryLookup *TmpFromDir = nullptr;
- while (Optional<FileEntryRef> FE = HeaderInfo.LookupFile(
- Filename, FilenameLoc, isAngled, TmpFromDir, TmpCurDir,
+ ConstSearchDirIterator TmpCurDir = CurDir;
+ ConstSearchDirIterator TmpFromDir = nullptr;
+ while (OptionalFileEntryRef FE = HeaderInfo.LookupFile(
+ Filename, FilenameLoc, isAngled, TmpFromDir, &TmpCurDir,
Includers, SearchPath, RelativePath, RequestingModule,
SuggestedModule, /*IsMapped=*/nullptr,
/*IsFrameworkFound=*/nullptr, SkipCache)) {
@@ -884,31 +1036,22 @@ Optional<FileEntryRef> Preprocessor::LookupFile(
}
// Do a standard file entry lookup.
- Optional<FileEntryRef> FE = HeaderInfo.LookupFile(
- Filename, FilenameLoc, isAngled, FromDir, CurDir, Includers, SearchPath,
+ OptionalFileEntryRef FE = HeaderInfo.LookupFile(
+ Filename, FilenameLoc, isAngled, FromDir, &CurDir, Includers, SearchPath,
RelativePath, RequestingModule, SuggestedModule, IsMapped,
- IsFrameworkFound, SkipCache, BuildSystemModule);
- if (FE) {
- if (SuggestedModule && !LangOpts.AsmPreprocessor)
- HeaderInfo.getModuleMap().diagnoseHeaderInclusion(
- RequestingModule, RequestingModuleIsModuleInterface, FilenameLoc,
- Filename, &FE->getFileEntry());
+ IsFrameworkFound, SkipCache, BuildSystemModule, OpenFile, CacheFailures);
+ if (FE)
return FE;
- }
- const FileEntry *CurFileEnt;
+ OptionalFileEntryRef CurFileEnt;
// Otherwise, see if this is a subframework header. If so, this is relative
// to one of the headers on the #include stack. Walk the list of the current
// headers on the #include stack and pass them to HeaderInfo.
if (IsFileLexer()) {
if ((CurFileEnt = CurPPLexer->getFileEntry())) {
- if (Optional<FileEntryRef> FE = HeaderInfo.LookupSubframeworkHeader(
- Filename, CurFileEnt, SearchPath, RelativePath, RequestingModule,
+ if (OptionalFileEntryRef FE = HeaderInfo.LookupSubframeworkHeader(
+ Filename, *CurFileEnt, SearchPath, RelativePath, RequestingModule,
SuggestedModule)) {
- if (SuggestedModule && !LangOpts.AsmPreprocessor)
- HeaderInfo.getModuleMap().diagnoseHeaderInclusion(
- RequestingModule, RequestingModuleIsModuleInterface, FilenameLoc,
- Filename, &FE->getFileEntry());
return FE;
}
}
@@ -917,13 +1060,9 @@ Optional<FileEntryRef> Preprocessor::LookupFile(
for (IncludeStackInfo &ISEntry : llvm::reverse(IncludeMacroStack)) {
if (IsFileLexer(ISEntry)) {
if ((CurFileEnt = ISEntry.ThePPLexer->getFileEntry())) {
- if (Optional<FileEntryRef> FE = HeaderInfo.LookupSubframeworkHeader(
- Filename, CurFileEnt, SearchPath, RelativePath,
+ if (OptionalFileEntryRef FE = HeaderInfo.LookupSubframeworkHeader(
+ Filename, *CurFileEnt, SearchPath, RelativePath,
RequestingModule, SuggestedModule)) {
- if (SuggestedModule && !LangOpts.AsmPreprocessor)
- HeaderInfo.getModuleMap().diagnoseHeaderInclusion(
- RequestingModule, RequestingModuleIsModuleInterface,
- FilenameLoc, Filename, &FE->getFileEntry());
return FE;
}
}
@@ -931,7 +1070,7 @@ Optional<FileEntryRef> Preprocessor::LookupFile(
}
// Otherwise, we really couldn't find the file.
- return None;
+ return std::nullopt;
}
//===----------------------------------------------------------------------===//
@@ -1050,6 +1189,10 @@ void Preprocessor::HandleDirective(Token &Result) {
switch (Result.getKind()) {
case tok::eod:
+ // Ignore the null directive with regards to the multiple-include
+ // optimization, i.e. allow the null directive to appear outside of the
+ // include guard and still enable the multiple-include optimization.
+ CurPPLexer->MIOpt.SetReadToken(ReadAnyTokensBeforeDirective);
return; // null directive.
case tok::code_completion:
setCodeCompletionReached();
@@ -1058,8 +1201,12 @@ void Preprocessor::HandleDirective(Token &Result) {
CurPPLexer->getConditionalStackDepth() > 0);
return;
case tok::numeric_constant: // # 7 GNU line marker directive.
- if (getLangOpts().AsmPreprocessor)
- break; // # 4 is not a preprocessor directive in .S files.
+ // In a .S file "# 4" may be a comment so don't treat it as a preprocessor
+ // directive. However do permit it in the predefines file, as we use line
+ // markers to mark the builtin macros as being in a system header.
+ if (getLangOpts().AsmPreprocessor &&
+ SourceMgr.getFileID(SavedHash.getLocation()) != getPredefinesFileID())
+ break;
return HandleDigitDirective(Result);
default:
IdentifierInfo *II = Result.getIdentifierInfo();
@@ -1120,7 +1267,16 @@ void Preprocessor::HandleDirective(Token &Result) {
return HandleIncludeNextDirective(SavedHash.getLocation(), Result);
case tok::pp_warning:
- Diag(Result, diag::ext_pp_warning_directive);
+ if (LangOpts.CPlusPlus)
+ Diag(Result, LangOpts.CPlusPlus23
+ ? diag::warn_cxx23_compat_warning_directive
+ : diag::ext_pp_warning_directive)
+ << /*C++23*/ 1;
+ else
+ Diag(Result, LangOpts.C23 ? diag::warn_c23_compat_warning_directive
+ : diag::ext_pp_warning_directive)
+ << /*C23*/ 0;
+
return HandleUserDiagnosticDirective(Result, true);
case tok::pp_ident:
return HandleIdentSCCSDirective(Result);
@@ -1169,7 +1325,8 @@ void Preprocessor::HandleDirective(Token &Result) {
}
// If we reached here, the preprocessing token is not valid!
- Diag(Result, diag::err_pp_invalid_directive);
+ // Start suggesting if a similar directive found.
+ Diag(Result, diag::err_pp_invalid_directive) << 0;
// Read the rest of the PP line.
DiscardUntilEndOfDirective();
@@ -1281,7 +1438,7 @@ void Preprocessor::HandleLineDirective() {
} else {
// Parse and validate the string, converting it into a unique ID.
StringLiteralParser Literal(StrTok, *this);
- assert(Literal.isAscii() && "Didn't allow wide strings in");
+ assert(Literal.isOrdinary() && "Didn't allow wide strings in");
if (Literal.hadError) {
DiscardUntilEndOfDirective();
return;
@@ -1418,6 +1575,7 @@ void Preprocessor::HandleDigitDirective(Token &DigitTok) {
// If the StrTok is "eod", then it wasn't present. Otherwise, it must be a
// string followed by eod.
if (StrTok.is(tok::eod)) {
+ Diag(StrTok, diag::ext_pp_gnu_line_directive);
// Treat this like "#line NN", which doesn't change file characteristics.
FileKind = SourceMgr.getFileCharacteristic(DigitTok.getLocation());
} else if (StrTok.isNot(tok::string_literal)) {
@@ -1431,7 +1589,7 @@ void Preprocessor::HandleDigitDirective(Token &DigitTok) {
} else {
// Parse and validate the string, converting it into a unique ID.
StringLiteralParser Literal(StrTok, *this);
- assert(Literal.isAscii() && "Didn't allow wide strings in");
+ assert(Literal.isOrdinary() && "Didn't allow wide strings in");
if (Literal.hadError) {
DiscardUntilEndOfDirective();
return;
@@ -1441,11 +1599,18 @@ void Preprocessor::HandleDigitDirective(Token &DigitTok) {
DiscardUntilEndOfDirective();
return;
}
- FilenameID = SourceMgr.getLineTableFilenameID(Literal.GetString());
// If a filename was present, read any flags that are present.
if (ReadLineMarkerFlags(IsFileEntry, IsFileExit, FileKind, *this))
return;
+ if (!SourceMgr.isWrittenInBuiltinFile(DigitTok.getLocation()) &&
+ !SourceMgr.isWrittenInCommandLineFile(DigitTok.getLocation()))
+ Diag(StrTok, diag::ext_pp_gnu_line_directive);
+
+ // Exiting to an empty string means pop to the including file, so leave
+ // FilenameID as -1 in that case.
+ if (!(IsFileExit && Literal.GetString().empty()))
+ FilenameID = SourceMgr.getLineTableFilenameID(Literal.GetString());
}
// Create a line note with this information.
@@ -1656,22 +1821,14 @@ static void diagnoseAutoModuleImport(
Preprocessor &PP, SourceLocation HashLoc, Token &IncludeTok,
ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> Path,
SourceLocation PathEnd) {
- StringRef ImportKeyword;
- if (PP.getLangOpts().ObjC)
- ImportKeyword = "@import";
- else if (PP.getLangOpts().ModulesTS || PP.getLangOpts().CPlusPlusModules)
- ImportKeyword = "import";
- else
- return; // no import syntax available
-
SmallString<128> PathString;
for (size_t I = 0, N = Path.size(); I != N; ++I) {
if (I)
PathString += '.';
PathString += Path[I].first->getName();
}
- int IncludeKind = 0;
+ int IncludeKind = 0;
switch (IncludeTok.getIdentifierInfo()->getPPKeywordID()) {
case tok::pp_include:
IncludeKind = 0;
@@ -1693,23 +1850,26 @@ static void diagnoseAutoModuleImport(
llvm_unreachable("unknown include directive kind");
}
- CharSourceRange ReplaceRange(SourceRange(HashLoc, PathEnd),
- /*IsTokenRange=*/false);
- PP.Diag(HashLoc, diag::warn_auto_module_import)
- << IncludeKind << PathString
- << FixItHint::CreateReplacement(
- ReplaceRange, (ImportKeyword + " " + PathString + ";").str());
+ PP.Diag(HashLoc, diag::remark_pp_include_directive_modular_translation)
+ << IncludeKind << PathString;
}
// Given a vector of path components and a string containing the real
// path to the file, build a properly-cased replacement in the vector,
// and return true if the replacement should be suggested.
static bool trySimplifyPath(SmallVectorImpl<StringRef> &Components,
- StringRef RealPathName) {
+ StringRef RealPathName,
+ llvm::sys::path::Style Separator) {
auto RealPathComponentIter = llvm::sys::path::rbegin(RealPathName);
auto RealPathComponentEnd = llvm::sys::path::rend(RealPathName);
int Cnt = 0;
bool SuggestReplacement = false;
+
+ auto IsSep = [Separator](StringRef Component) {
+ return Component.size() == 1 &&
+ llvm::sys::path::is_separator(Component[0], Separator);
+ };
+
// Below is a best-effort to handle ".." in paths. It is admittedly
// not 100% correct in the presence of symlinks.
for (auto &Component : llvm::reverse(Components)) {
@@ -1719,10 +1879,11 @@ static bool trySimplifyPath(SmallVectorImpl<StringRef> &Components,
} else if (Cnt) {
--Cnt;
} else if (RealPathComponentIter != RealPathComponentEnd) {
- if (Component != *RealPathComponentIter) {
- // If these path components differ by more than just case, then we
- // may be looking at symlinked paths. Bail on this diagnostic to avoid
- // noisy false positives.
+ if (!IsSep(Component) && !IsSep(*RealPathComponentIter) &&
+ Component != *RealPathComponentIter) {
+ // If these non-separator path components differ by more than just case,
+ // then we may be looking at symlinked paths. Bail on this diagnostic to
+ // avoid noisy false positives.
SuggestReplacement =
RealPathComponentIter->equals_insensitive(Component);
if (!SuggestReplacement)
@@ -1737,30 +1898,67 @@ static bool trySimplifyPath(SmallVectorImpl<StringRef> &Components,
bool Preprocessor::checkModuleIsAvailable(const LangOptions &LangOpts,
const TargetInfo &TargetInfo,
- DiagnosticsEngine &Diags, Module *M) {
+ const Module &M,
+ DiagnosticsEngine &Diags) {
Module::Requirement Requirement;
Module::UnresolvedHeaderDirective MissingHeader;
Module *ShadowingModule = nullptr;
- if (M->isAvailable(LangOpts, TargetInfo, Requirement, MissingHeader,
- ShadowingModule))
+ if (M.isAvailable(LangOpts, TargetInfo, Requirement, MissingHeader,
+ ShadowingModule))
return false;
if (MissingHeader.FileNameLoc.isValid()) {
Diags.Report(MissingHeader.FileNameLoc, diag::err_module_header_missing)
<< MissingHeader.IsUmbrella << MissingHeader.FileName;
} else if (ShadowingModule) {
- Diags.Report(M->DefinitionLoc, diag::err_module_shadowed) << M->Name;
+ Diags.Report(M.DefinitionLoc, diag::err_module_shadowed) << M.Name;
Diags.Report(ShadowingModule->DefinitionLoc,
diag::note_previous_definition);
} else {
// FIXME: Track the location at which the requirement was specified, and
// use it here.
- Diags.Report(M->DefinitionLoc, diag::err_module_unavailable)
- << M->getFullModuleName() << Requirement.second << Requirement.first;
+ Diags.Report(M.DefinitionLoc, diag::err_module_unavailable)
+ << M.getFullModuleName() << Requirement.second << Requirement.first;
}
return true;
}
+std::pair<ConstSearchDirIterator, const FileEntry *>
+Preprocessor::getIncludeNextStart(const Token &IncludeNextTok) const {
+ // #include_next is like #include, except that we start searching after
+ // the current found directory. If we can't do this, issue a
+ // diagnostic.
+ ConstSearchDirIterator Lookup = CurDirLookup;
+ const FileEntry *LookupFromFile = nullptr;
+
+ if (isInPrimaryFile() && LangOpts.IsHeaderFile) {
+ // If the main file is a header, then it's either for PCH/AST generation,
+ // or libclang opened it. Either way, handle it as a normal include below
+ // and do not complain about include_next.
+ } else if (isInPrimaryFile()) {
+ Lookup = nullptr;
+ Diag(IncludeNextTok, diag::pp_include_next_in_primary);
+ } else if (CurLexerSubmodule) {
+ // Start looking up in the directory *after* the one in which the current
+ // file would be found, if any.
+ assert(CurPPLexer && "#include_next directive in macro?");
+ if (auto FE = CurPPLexer->getFileEntry())
+ LookupFromFile = *FE;
+ Lookup = nullptr;
+ } else if (!Lookup) {
+ // The current file was not found by walking the include path. Either it
+ // is the primary file (handled above), or it was found by absolute path,
+ // or it was found relative to such a file.
+ // FIXME: Track enough information so we know which case we're in.
+ Diag(IncludeNextTok, diag::pp_include_next_absolute_path);
+ } else {
+ // Start looking up in the next directory.
+ ++Lookup;
+ }
+
+ return {Lookup, LookupFromFile};
+}
+
/// HandleIncludeDirective - The "\#include" tokens have just been read, read
/// the file to be included from the lexer, then include it! This is a common
/// routine with functionality shared between \#include, \#include_next and
@@ -1768,7 +1966,7 @@ bool Preprocessor::checkModuleIsAvailable(const LangOptions &LangOpts,
/// specifies the file to start searching from.
void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
Token &IncludeTok,
- const DirectoryLookup *LookupFrom,
+ ConstSearchDirIterator LookupFrom,
const FileEntry *LookupFromFile) {
Token FilenameTok;
if (LexHeaderName(FilenameTok))
@@ -1798,6 +1996,10 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
EnterAnnotationToken(SourceRange(HashLoc, EndLoc),
tok::annot_module_begin, Action.ModuleForHeader);
break;
+ case ImportAction::HeaderUnitImport:
+ EnterAnnotationToken(SourceRange(HashLoc, EndLoc), tok::annot_header_unit,
+ Action.ModuleForHeader);
+ break;
case ImportAction::ModuleImport:
EnterAnnotationToken(SourceRange(HashLoc, EndLoc),
tok::annot_module_include, Action.ModuleForHeader);
@@ -1812,58 +2014,55 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
}
}
-Optional<FileEntryRef> Preprocessor::LookupHeaderIncludeOrImport(
- const DirectoryLookup *&CurDir, StringRef& Filename,
+OptionalFileEntryRef Preprocessor::LookupHeaderIncludeOrImport(
+ ConstSearchDirIterator *CurDir, StringRef &Filename,
SourceLocation FilenameLoc, CharSourceRange FilenameRange,
const Token &FilenameTok, bool &IsFrameworkFound, bool IsImportDecl,
- bool &IsMapped, const DirectoryLookup *LookupFrom,
- const FileEntry *LookupFromFile, StringRef& LookupFilename,
+ bool &IsMapped, ConstSearchDirIterator LookupFrom,
+ const FileEntry *LookupFromFile, StringRef &LookupFilename,
SmallVectorImpl<char> &RelativePath, SmallVectorImpl<char> &SearchPath,
ModuleMap::KnownHeader &SuggestedModule, bool isAngled) {
- Optional<FileEntryRef> File = LookupFile(
- FilenameLoc, LookupFilename,
- isAngled, LookupFrom, LookupFromFile, CurDir,
+ auto DiagnoseHeaderInclusion = [&](FileEntryRef FE) {
+ if (LangOpts.AsmPreprocessor)
+ return;
+
+ Module *RequestingModule = getModuleForLocation(
+ FilenameLoc, LangOpts.ModulesValidateTextualHeaderIncludes);
+ bool RequestingModuleIsModuleInterface =
+ !SourceMgr.isInMainFile(FilenameLoc);
+
+ HeaderInfo.getModuleMap().diagnoseHeaderInclusion(
+ RequestingModule, RequestingModuleIsModuleInterface, FilenameLoc,
+ Filename, FE);
+ };
+
+ OptionalFileEntryRef File = LookupFile(
+ FilenameLoc, LookupFilename, isAngled, LookupFrom, LookupFromFile, CurDir,
Callbacks ? &SearchPath : nullptr, Callbacks ? &RelativePath : nullptr,
&SuggestedModule, &IsMapped, &IsFrameworkFound);
- if (File)
+ if (File) {
+ DiagnoseHeaderInclusion(*File);
return File;
-
- if (Callbacks) {
- // Give the clients a chance to recover.
- SmallString<128> RecoveryPath;
- if (Callbacks->FileNotFound(Filename, RecoveryPath)) {
- if (auto DE = FileMgr.getOptionalDirectoryRef(RecoveryPath)) {
- // Add the recovery path to the list of search paths.
- DirectoryLookup DL(*DE, SrcMgr::C_User, false);
- HeaderInfo.AddSearchPath(DL, isAngled);
-
- // Try the lookup again, skipping the cache.
- Optional<FileEntryRef> File = LookupFile(
- FilenameLoc,
- LookupFilename, isAngled,
- LookupFrom, LookupFromFile, CurDir, nullptr, nullptr,
- &SuggestedModule, &IsMapped, /*IsFrameworkFound=*/nullptr,
- /*SkipCache*/ true);
- if (File)
- return File;
- }
- }
}
+ // Give the clients a chance to silently skip this include.
+ if (Callbacks && Callbacks->FileNotFound(Filename))
+ return std::nullopt;
+
if (SuppressIncludeNotFoundError)
- return None;
+ return std::nullopt;
// If the file could not be located and it was included via angle
// brackets, we can attempt a lookup as though it were a quoted path to
// provide the user with a possible fixit.
if (isAngled) {
- Optional<FileEntryRef> File = LookupFile(
- FilenameLoc, LookupFilename,
- false, LookupFrom, LookupFromFile, CurDir,
+ OptionalFileEntryRef File = LookupFile(
+ FilenameLoc, LookupFilename, false, LookupFrom, LookupFromFile, CurDir,
Callbacks ? &SearchPath : nullptr, Callbacks ? &RelativePath : nullptr,
&SuggestedModule, &IsMapped,
/*IsFrameworkFound=*/nullptr);
if (File) {
+ DiagnoseHeaderInclusion(*File);
Diag(FilenameTok, diag::err_pp_file_not_found_angled_include_not_fatal)
<< Filename << IsImportDecl
<< FixItHint::CreateReplacement(FilenameRange,
@@ -1888,12 +2087,13 @@ Optional<FileEntryRef> Preprocessor::LookupHeaderIncludeOrImport(
StringRef TypoCorrectionName = CorrectTypoFilename(Filename);
StringRef TypoCorrectionLookupName = CorrectTypoFilename(LookupFilename);
- Optional<FileEntryRef> File = LookupFile(
- FilenameLoc, TypoCorrectionLookupName, isAngled, LookupFrom, LookupFromFile,
- CurDir, Callbacks ? &SearchPath : nullptr,
+ OptionalFileEntryRef File = LookupFile(
+ FilenameLoc, TypoCorrectionLookupName, isAngled, LookupFrom,
+ LookupFromFile, CurDir, Callbacks ? &SearchPath : nullptr,
Callbacks ? &RelativePath : nullptr, &SuggestedModule, &IsMapped,
/*IsFrameworkFound=*/nullptr);
if (File) {
+ DiagnoseHeaderInclusion(*File);
auto Hint =
isAngled ? FixItHint::CreateReplacement(
FilenameRange, "<" + TypoCorrectionName.str() + ">")
@@ -1910,7 +2110,7 @@ Optional<FileEntryRef> Preprocessor::LookupHeaderIncludeOrImport(
}
// If the file is still not found, just go with the vanilla diagnostic
- assert(!File.hasValue() && "expected missing file");
+ assert(!File && "expected missing file");
Diag(FilenameTok, diag::err_pp_file_not_found)
<< OriginalFilename << FilenameRange;
if (IsFrameworkFound) {
@@ -1926,7 +2126,7 @@ Optional<FileEntryRef> Preprocessor::LookupHeaderIncludeOrImport(
<< CacheEntry.Directory->getName();
}
- return None;
+ return std::nullopt;
}
/// Handle either a #include-like directive or an import declaration that names
@@ -1943,7 +2143,7 @@ Optional<FileEntryRef> Preprocessor::LookupHeaderIncludeOrImport(
/// lookup.
Preprocessor::ImportAction Preprocessor::HandleHeaderIncludeOrImport(
SourceLocation HashLoc, Token &IncludeTok, Token &FilenameTok,
- SourceLocation EndLoc, const DirectoryLookup *LookupFrom,
+ SourceLocation EndLoc, ConstSearchDirIterator LookupFrom,
const FileEntry *LookupFromFile) {
SmallString<128> FilenameBuffer;
StringRef Filename = getSpelling(FilenameTok, FilenameBuffer);
@@ -1993,7 +2193,7 @@ Preprocessor::ImportAction Preprocessor::HandleHeaderIncludeOrImport(
// Search include directories.
bool IsMapped = false;
bool IsFrameworkFound = false;
- const DirectoryLookup *CurDir;
+ ConstSearchDirIterator CurDir = nullptr;
SmallString<1024> SearchPath;
SmallString<1024> RelativePath;
// We get the raw path only if we have 'Callbacks' to which we later pass
@@ -2002,30 +2202,22 @@ Preprocessor::ImportAction Preprocessor::HandleHeaderIncludeOrImport(
SourceLocation FilenameLoc = FilenameTok.getLocation();
StringRef LookupFilename = Filename;
-#ifdef _WIN32
- llvm::sys::path::Style BackslashStyle = llvm::sys::path::Style::windows;
-#else
// Normalize slashes when compiling with -fms-extensions on non-Windows. This
// is unnecessary on Windows since the filesystem there handles backslashes.
SmallString<128> NormalizedPath;
- llvm::sys::path::Style BackslashStyle = llvm::sys::path::Style::posix;
- if (LangOpts.MicrosoftExt) {
+ llvm::sys::path::Style BackslashStyle = llvm::sys::path::Style::native;
+ if (is_style_posix(BackslashStyle) && LangOpts.MicrosoftExt) {
NormalizedPath = Filename.str();
llvm::sys::path::native(NormalizedPath);
LookupFilename = NormalizedPath;
BackslashStyle = llvm::sys::path::Style::windows;
}
-#endif
- Optional<FileEntryRef> File = LookupHeaderIncludeOrImport(
- CurDir, Filename, FilenameLoc, FilenameRange, FilenameTok,
+ OptionalFileEntryRef File = LookupHeaderIncludeOrImport(
+ &CurDir, Filename, FilenameLoc, FilenameRange, FilenameTok,
IsFrameworkFound, IsImportDecl, IsMapped, LookupFrom, LookupFromFile,
LookupFilename, RelativePath, SearchPath, SuggestedModule, isAngled);
- // Record the header's filename for later use.
- if (File)
- CurLexer->addInclude(OriginalFilename, File->getFileEntry(), FilenameLoc);
-
if (usingPCHWithThroughHeader() && SkippingUntilPCHThroughHeader) {
if (File && isPCHThroughHeader(&File->getFileEntry()))
SkippingUntilPCHThroughHeader = false;
@@ -2036,6 +2228,17 @@ Preprocessor::ImportAction Preprocessor::HandleHeaderIncludeOrImport(
// known to have no effect beyond its effect on module visibility -- that is,
// if it's got an include guard that is already defined, set to Import if it
// is a modular header we've already built and should import.
+
+ // For C++20 Modules
+ // [cpp.include]/7 If the header identified by the header-name denotes an
+ // importable header, it is implementation-defined whether the #include
+ // preprocessing directive is instead replaced by an import directive.
+ // For this implementation, the translation is permitted when we are parsing
+ // the Global Module Fragment, and not otherwise (the cases where it would be
+ // valid to replace an include with an import are highly constrained once in
+ // named module purview; this choice avoids considerable complexity in
+ // determining valid cases).
+
enum { Enter, Import, Skip, IncludeLimitReached } Action = Enter;
if (PPOpts->SingleFileParseMode)
@@ -2045,22 +2248,43 @@ Preprocessor::ImportAction Preprocessor::HandleHeaderIncludeOrImport(
// include cycle. Don't enter already processed files again as it can lead to
// reaching the max allowed include depth again.
if (Action == Enter && HasReachedMaxIncludeDepth && File &&
- HeaderInfo.getFileInfo(&File->getFileEntry()).NumIncludes)
+ alreadyIncluded(*File))
Action = IncludeLimitReached;
+ // FIXME: We do not have a good way to disambiguate C++ clang modules from
+ // C++ standard modules (other than use/non-use of Header Units).
+ Module *SM = SuggestedModule.getModule();
+
+ bool MaybeTranslateInclude =
+ Action == Enter && File && SM && !SM->isForBuilding(getLangOpts());
+
+ // Maybe a usable Header Unit
+ bool UsableHeaderUnit = false;
+ if (getLangOpts().CPlusPlusModules && SM && SM->isHeaderUnit()) {
+ if (TrackGMFState.inGMF() || IsImportDecl)
+ UsableHeaderUnit = true;
+ else if (!IsImportDecl) {
+ // This is a Header Unit that we do not include-translate
+ SuggestedModule = ModuleMap::KnownHeader();
+ SM = nullptr;
+ }
+ }
+ // Maybe a usable clang header module.
+ bool UsableClangHeaderModule =
+ (getLangOpts().CPlusPlusModules || getLangOpts().Modules) && SM &&
+ !SM->isHeaderUnit();
+
// Determine whether we should try to import the module for this #include, if
// there is one. Don't do so if precompiled module support is disabled or we
// are processing this module textually (because we're building the module).
- if (Action == Enter && File && SuggestedModule && getLangOpts().Modules &&
- !isForModuleBuilding(SuggestedModule.getModule(),
- getLangOpts().CurrentModule,
- getLangOpts().ModuleName)) {
+ if (MaybeTranslateInclude && (UsableHeaderUnit || UsableClangHeaderModule)) {
// If this include corresponds to a module but that module is
// unavailable, diagnose the situation and bail out.
// FIXME: Remove this; loadModule does the same check (but produces
// slightly worse diagnostics).
- if (checkModuleIsAvailable(getLangOpts(), getTargetInfo(), getDiagnostics(),
- SuggestedModule.getModule())) {
+ if (checkModuleIsAvailable(getLangOpts(), getTargetInfo(),
+ *SuggestedModule.getModule(),
+ getDiagnostics())) {
Diag(FilenameTok.getLocation(),
diag::note_implicit_top_level_module_import_here)
<< SuggestedModule.getModule()->getTopLevelModuleName();
@@ -2071,7 +2295,7 @@ Preprocessor::ImportAction Preprocessor::HandleHeaderIncludeOrImport(
// FIXME: Should we have a second loadModule() overload to avoid this
// extra lookup step?
SmallVector<std::pair<IdentifierInfo *, SourceLocation>, 2> Path;
- for (Module *Mod = SuggestedModule.getModule(); Mod; Mod = Mod->Parent)
+ for (Module *Mod = SM; Mod; Mod = Mod->Parent)
Path.push_back(std::make_pair(getIdentifierInfo(Mod->Name),
FilenameTok.getLocation()));
std::reverse(Path.begin(), Path.end());
@@ -2093,11 +2317,14 @@ Preprocessor::ImportAction Preprocessor::HandleHeaderIncludeOrImport(
if (Imported) {
Action = Import;
} else if (Imported.isMissingExpected()) {
+ markClangModuleAsAffecting(
+ static_cast<Module *>(Imported)->getTopLevelModule());
// We failed to find a submodule that we assumed would exist (because it
// was in the directory of an umbrella header, for instance), but no
// actual module containing it exists (because the umbrella header is
// incomplete). Treat this as a textual inclusion.
SuggestedModule = ModuleMap::KnownHeader();
+ SM = nullptr;
} else if (Imported.isConfigMismatch()) {
// On a configuration mismatch, enter the header textually. We still know
// that it's part of the corresponding module.
@@ -2121,8 +2348,7 @@ Preprocessor::ImportAction Preprocessor::HandleHeaderIncludeOrImport(
SrcMgr::CharacteristicKind FileCharacter =
SourceMgr.getFileCharacteristic(FilenameTok.getLocation());
if (File)
- FileCharacter = std::max(HeaderInfo.getFileDirFlavor(&File->getFileEntry()),
- FileCharacter);
+ FileCharacter = std::max(HeaderInfo.getFileDirFlavor(*File), FileCharacter);
// If this is a '#import' or an import-declaration, don't re-enter the file.
//
@@ -2133,12 +2359,17 @@ Preprocessor::ImportAction Preprocessor::HandleHeaderIncludeOrImport(
IsImportDecl ||
IncludeTok.getIdentifierInfo()->getPPKeywordID() == tok::pp_import;
+ bool IsFirstIncludeOfFile = false;
+
// Ask HeaderInfo if we should enter this #include file. If not, #including
// this file will have no effect.
if (Action == Enter && File &&
- !HeaderInfo.ShouldEnterIncludeFile(*this, &File->getFileEntry(),
- EnterOnce, getLangOpts().Modules,
- SuggestedModule.getModule())) {
+ !HeaderInfo.ShouldEnterIncludeFile(*this, *File, EnterOnce,
+ getLangOpts().Modules, SM,
+ IsFirstIncludeOfFile)) {
+ // C++ standard modules:
+ // If we are not in the GMF, then we textually include only
+ // clang modules:
// Even if we've already preprocessed this header once and know that we
// don't need to see its contents again, we still need to import it if it's
// modular because we might not have imported it from this submodule before.
@@ -2146,7 +2377,10 @@ Preprocessor::ImportAction Preprocessor::HandleHeaderIncludeOrImport(
// FIXME: We don't do this when compiling a PCH because the AST
// serialization layer can't cope with it. This means we get local
// submodule visibility semantics wrong in that case.
- Action = (SuggestedModule && !getLangOpts().CompilingPCH) ? Import : Skip;
+ if (UsableHeaderUnit && !getLangOpts().CompilingPCH)
+ Action = TrackGMFState.inGMF() ? Import : Skip;
+ else
+ Action = (SuggestedModule && !getLangOpts().CompilingPCH) ? Import : Skip;
}
// Check for circular inclusion of the main file.
@@ -2164,11 +2398,11 @@ Preprocessor::ImportAction Preprocessor::HandleHeaderIncludeOrImport(
if (Callbacks && !IsImportDecl) {
// Notify the callback object that we've seen an inclusion directive.
// FIXME: Use a different callback for a pp-import?
- Callbacks->InclusionDirective(
- HashLoc, IncludeTok, LookupFilename, isAngled, FilenameRange,
- File ? &File->getFileEntry() : nullptr, SearchPath, RelativePath,
- Action == Import ? SuggestedModule.getModule() : nullptr,
- FileCharacter);
+ Callbacks->InclusionDirective(HashLoc, IncludeTok, LookupFilename, isAngled,
+ FilenameRange, File, SearchPath, RelativePath,
+ Action == Import ? SuggestedModule.getModule()
+ : nullptr,
+ FileCharacter);
if (Action == Skip && File)
Callbacks->FileSkipped(*File, FilenameTok, FileCharacter);
}
@@ -2225,7 +2459,7 @@ Preprocessor::ImportAction Preprocessor::HandleHeaderIncludeOrImport(
}
#endif
- if (trySimplifyPath(Components, RealPathName)) {
+ if (trySimplifyPath(Components, RealPathName, BackslashStyle)) {
SmallString<128> Path;
Path.reserve(Name.size()+2);
Path.push_back(isAngled ? '<' : '"');
@@ -2248,7 +2482,7 @@ Preprocessor::ImportAction Preprocessor::HandleHeaderIncludeOrImport(
// got copied when the C: was processed and we want to skip that entry.
if (!(Component.size() == 1 && IsSep(Component[0])))
Path.append(Component);
- else if (!Path.empty())
+ else if (Path.size() != 1)
continue;
// Append the separator(s) the user used, or the close quote
@@ -2283,8 +2517,8 @@ Preprocessor::ImportAction Preprocessor::HandleHeaderIncludeOrImport(
switch (Action) {
case Skip:
// If we don't need to enter the file, stop now.
- if (Module *M = SuggestedModule.getModule())
- return {ImportAction::SkippedModuleImport, M};
+ if (SM)
+ return {ImportAction::SkippedModuleImport, SM};
return {ImportAction::None};
case IncludeLimitReached:
@@ -2294,16 +2528,15 @@ Preprocessor::ImportAction Preprocessor::HandleHeaderIncludeOrImport(
case Import: {
// If this is a module import, make it visible if needed.
- Module *M = SuggestedModule.getModule();
- assert(M && "no module to import");
+ assert(SM && "no module to import");
- makeModuleVisible(M, EndLoc);
+ makeModuleVisible(SM, EndLoc);
if (IncludeTok.getIdentifierInfo()->getPPKeywordID() ==
tok::pp___include_macros)
return {ImportAction::None};
- return {ImportAction::ModuleImport, M};
+ return {ImportAction::ModuleImport, SM};
}
case Enter:
@@ -2317,6 +2550,10 @@ Preprocessor::ImportAction Preprocessor::HandleHeaderIncludeOrImport(
return {ImportAction::None};
}
+ if (isAngled && isInNamedModule())
+ Diag(FilenameTok, diag::warn_pp_include_angled_in_module_purview)
+ << getNamedModuleName();
+
// Look up the file, create a File ID for it.
SourceLocation IncludePos = FilenameTok.getLocation();
// If the filename string was the result of macro expansions, set the include
@@ -2330,17 +2567,19 @@ Preprocessor::ImportAction Preprocessor::HandleHeaderIncludeOrImport(
}
// If all is good, enter the new file!
- if (EnterSourceFile(FID, CurDir, FilenameTok.getLocation()))
+ if (EnterSourceFile(FID, CurDir, FilenameTok.getLocation(),
+ IsFirstIncludeOfFile))
return {ImportAction::None};
// Determine if we're switching to building a new submodule, and which one.
- if (auto *M = SuggestedModule.getModule()) {
- if (M->getTopLevelModule()->ShadowingModule) {
+ // This does not apply for C++20 modules header units.
+ if (SM && !SM->isHeaderUnit()) {
+ if (SM->getTopLevelModule()->ShadowingModule) {
// We are building a submodule that belongs to a shadowed module. This
// means we find header files in the shadowed module.
- Diag(M->DefinitionLoc, diag::err_module_build_shadowed_submodule)
- << M->getFullModuleName();
- Diag(M->getTopLevelModule()->ShadowingModule->DefinitionLoc,
+ Diag(SM->DefinitionLoc, diag::err_module_build_shadowed_submodule)
+ << SM->getFullModuleName();
+ Diag(SM->getTopLevelModule()->ShadowingModule->DefinitionLoc,
diag::note_previous_definition);
return {ImportAction::None};
}
@@ -2352,23 +2591,21 @@ Preprocessor::ImportAction Preprocessor::HandleHeaderIncludeOrImport(
// that behaves the same as the header would behave in a compilation using
// that PCH, which means we should enter the submodule. We need to teach
// the AST serialization layer to deal with the resulting AST.
- if (getLangOpts().CompilingPCH &&
- isForModuleBuilding(M, getLangOpts().CurrentModule,
- getLangOpts().ModuleName))
+ if (getLangOpts().CompilingPCH && SM->isForBuilding(getLangOpts()))
return {ImportAction::None};
assert(!CurLexerSubmodule && "should not have marked this as a module yet");
- CurLexerSubmodule = M;
+ CurLexerSubmodule = SM;
// Let the macro handling code know that any future macros are within
// the new submodule.
- EnterSubmodule(M, EndLoc, /*ForPragma*/false);
+ EnterSubmodule(SM, EndLoc, /*ForPragma*/ false);
// Let the parser know that any future declarations are within the new
// submodule.
// FIXME: There's no point doing this if we're handling a #__include_macros
// directive.
- return {ImportAction::ModuleBegin, M};
+ return {ImportAction::ModuleBegin, SM};
}
assert(!IsImportDecl && "failed to diagnose missing module for import decl");
@@ -2381,34 +2618,9 @@ void Preprocessor::HandleIncludeNextDirective(SourceLocation HashLoc,
Token &IncludeNextTok) {
Diag(IncludeNextTok, diag::ext_pp_include_next_directive);
- // #include_next is like #include, except that we start searching after
- // the current found directory. If we can't do this, issue a
- // diagnostic.
- const DirectoryLookup *Lookup = CurDirLookup;
- const FileEntry *LookupFromFile = nullptr;
- if (isInPrimaryFile() && LangOpts.IsHeaderFile) {
- // If the main file is a header, then it's either for PCH/AST generation,
- // or libclang opened it. Either way, handle it as a normal include below
- // and do not complain about include_next.
- } else if (isInPrimaryFile()) {
- Lookup = nullptr;
- Diag(IncludeNextTok, diag::pp_include_next_in_primary);
- } else if (CurLexerSubmodule) {
- // Start looking up in the directory *after* the one in which the current
- // file would be found, if any.
- assert(CurPPLexer && "#include_next directive in macro?");
- LookupFromFile = CurPPLexer->getFileEntry();
- Lookup = nullptr;
- } else if (!Lookup) {
- // The current file was not found by walking the include path. Either it
- // is the primary file (handled above), or it was found by absolute path,
- // or it was found relative to such a file.
- // FIXME: Track enough information so we know which case we're in.
- Diag(IncludeNextTok, diag::pp_include_next_absolute_path);
- } else {
- // Start looking up in the next directory.
- ++Lookup;
- }
+ ConstSearchDirIterator Lookup = nullptr;
+ const FileEntry *LookupFromFile;
+ std::tie(Lookup, LookupFromFile) = getIncludeNextStart(IncludeNextTok);
return HandleIncludeDirective(HashLoc, IncludeNextTok, Lookup,
LookupFromFile);
@@ -2479,7 +2691,7 @@ bool Preprocessor::ReadMacroParameterList(MacroInfo *MI, Token &Tok) {
SmallVector<IdentifierInfo*, 32> Parameters;
while (true) {
- LexUnexpandedToken(Tok);
+ LexUnexpandedNonComment(Tok);
switch (Tok.getKind()) {
case tok::r_paren:
// Found the end of the parameter list.
@@ -2500,7 +2712,7 @@ bool Preprocessor::ReadMacroParameterList(MacroInfo *MI, Token &Tok) {
}
// Lex the token after the identifier.
- LexUnexpandedToken(Tok);
+ LexUnexpandedNonComment(Tok);
if (Tok.isNot(tok::r_paren)) {
Diag(Tok, diag::err_pp_missing_rparen_in_macro_def);
return true;
@@ -2525,7 +2737,7 @@ bool Preprocessor::ReadMacroParameterList(MacroInfo *MI, Token &Tok) {
// If this is already used as a parameter, it is used multiple times (e.g.
// #define X(A,A.
- if (llvm::find(Parameters, II) != Parameters.end()) { // C99 6.10.3p6
+ if (llvm::is_contained(Parameters, II)) { // C99 6.10.3p6
Diag(Tok, diag::err_pp_duplicate_name_in_arg_list) << II;
return true;
}
@@ -2534,7 +2746,7 @@ bool Preprocessor::ReadMacroParameterList(MacroInfo *MI, Token &Tok) {
Parameters.push_back(II);
// Lex the token after the identifier.
- LexUnexpandedToken(Tok);
+ LexUnexpandedNonComment(Tok);
switch (Tok.getKind()) {
default: // #define X(A B
@@ -2550,7 +2762,7 @@ bool Preprocessor::ReadMacroParameterList(MacroInfo *MI, Token &Tok) {
Diag(Tok, diag::ext_named_variadic_macro);
// Lex the token after the identifier.
- LexUnexpandedToken(Tok);
+ LexUnexpandedNonComment(Tok);
if (Tok.isNot(tok::r_paren)) {
Diag(Tok, diag::err_pp_missing_rparen_in_macro_def);
return true;
@@ -2584,14 +2796,14 @@ static bool isConfigurationPattern(Token &MacroName, MacroInfo *MI,
return false;
StringRef ValueText = II->getName();
StringRef TrimmedValue = ValueText;
- if (!ValueText.startswith("__")) {
- if (ValueText.startswith("_"))
+ if (!ValueText.starts_with("__")) {
+ if (ValueText.starts_with("_"))
TrimmedValue = TrimmedValue.drop_front(1);
else
return false;
} else {
TrimmedValue = TrimmedValue.drop_front(2);
- if (TrimmedValue.endswith("__"))
+ if (TrimmedValue.ends_with("__"))
TrimmedValue = TrimmedValue.drop_back(2);
}
return TrimmedValue.equals(MacroText);
@@ -2695,12 +2907,14 @@ MacroInfo *Preprocessor::ReadOptionalMacroParameterListAndBody(
if (!Tok.is(tok::eod))
LastTok = Tok;
+ SmallVector<Token, 16> Tokens;
+
// Read the rest of the macro body.
if (MI->isObjectLike()) {
// Object-like macros are very simple, just read their body.
while (Tok.isNot(tok::eod)) {
LastTok = Tok;
- MI->AddTokenToBody(Tok);
+ Tokens.push_back(Tok);
// Get the next token of the macro.
LexUnexpandedToken(Tok);
}
@@ -2715,7 +2929,7 @@ MacroInfo *Preprocessor::ReadOptionalMacroParameterListAndBody(
LastTok = Tok;
if (!Tok.isOneOf(tok::hash, tok::hashat, tok::hashhash)) {
- MI->AddTokenToBody(Tok);
+ Tokens.push_back(Tok);
if (VAOCtx.isVAOptToken(Tok)) {
// If we're already within a VAOPT, emit an error.
@@ -2729,7 +2943,7 @@ MacroInfo *Preprocessor::ReadOptionalMacroParameterListAndBody(
Diag(Tok, diag::err_pp_missing_lparen_in_vaopt_use);
return nullptr;
}
- MI->AddTokenToBody(Tok);
+ Tokens.push_back(Tok);
VAOCtx.sawVAOptFollowedByOpeningParens(Tok.getLocation());
LexUnexpandedToken(Tok);
if (Tok.is(tok::hashhash)) {
@@ -2740,10 +2954,10 @@ MacroInfo *Preprocessor::ReadOptionalMacroParameterListAndBody(
} else if (VAOCtx.isInVAOpt()) {
if (Tok.is(tok::r_paren)) {
if (VAOCtx.sawClosingParen()) {
- const unsigned NumTokens = MI->getNumTokens();
- assert(NumTokens >= 3 && "Must have seen at least __VA_OPT__( "
- "and a subsequent tok::r_paren");
- if (MI->getReplacementToken(NumTokens - 2).is(tok::hashhash)) {
+ assert(Tokens.size() >= 3 &&
+ "Must have seen at least __VA_OPT__( "
+ "and a subsequent tok::r_paren");
+ if (Tokens[Tokens.size() - 2].is(tok::hashhash)) {
Diag(Tok, diag::err_vaopt_paste_at_end);
return nullptr;
}
@@ -2762,7 +2976,7 @@ MacroInfo *Preprocessor::ReadOptionalMacroParameterListAndBody(
// things.
if (getLangOpts().TraditionalCPP) {
Tok.setKind(tok::unknown);
- MI->AddTokenToBody(Tok);
+ Tokens.push_back(Tok);
// Get the next token of the macro.
LexUnexpandedToken(Tok);
@@ -2778,17 +2992,16 @@ MacroInfo *Preprocessor::ReadOptionalMacroParameterListAndBody(
LexUnexpandedToken(Tok);
if (Tok.is(tok::eod)) {
- MI->AddTokenToBody(LastTok);
+ Tokens.push_back(LastTok);
break;
}
- unsigned NumTokens = MI->getNumTokens();
- if (NumTokens && Tok.getIdentifierInfo() == Ident__VA_ARGS__ &&
- MI->getReplacementToken(NumTokens-1).is(tok::comma))
+ if (!Tokens.empty() && Tok.getIdentifierInfo() == Ident__VA_ARGS__ &&
+ Tokens[Tokens.size() - 1].is(tok::comma))
MI->setHasCommaPasting();
// Things look ok, add the '##' token to the macro.
- MI->AddTokenToBody(LastTok);
+ Tokens.push_back(LastTok);
continue;
}
@@ -2807,7 +3020,7 @@ MacroInfo *Preprocessor::ReadOptionalMacroParameterListAndBody(
// confused.
if (getLangOpts().AsmPreprocessor && Tok.isNot(tok::eod)) {
LastTok.setKind(tok::unknown);
- MI->AddTokenToBody(LastTok);
+ Tokens.push_back(LastTok);
continue;
} else {
Diag(Tok, diag::err_pp_stringize_not_parameter)
@@ -2817,13 +3030,13 @@ MacroInfo *Preprocessor::ReadOptionalMacroParameterListAndBody(
}
// Things look ok, add the '#' and param name tokens to the macro.
- MI->AddTokenToBody(LastTok);
+ Tokens.push_back(LastTok);
// If the token following '#' is VAOPT, let the next iteration handle it
// and check it for correctness, otherwise add the token and prime the
// loop with the next one.
if (!VAOCtx.isVAOptToken(Tok)) {
- MI->AddTokenToBody(Tok);
+ Tokens.push_back(Tok);
LastTok = Tok;
// Get the next token of the macro.
@@ -2839,8 +3052,16 @@ MacroInfo *Preprocessor::ReadOptionalMacroParameterListAndBody(
}
}
MI->setDefinitionEndLoc(LastTok.getLocation());
+
+ MI->setTokens(Tokens, BP);
return MI;
}
+
+static bool isObjCProtectedMacro(const IdentifierInfo *II) {
+ return II->isStr("__strong") || II->isStr("__weak") ||
+ II->isStr("__unsafe_unretained") || II->isStr("__autoreleasing");
+}
+
/// HandleDefineDirective - Implements \#define. This consumes the entire macro
/// line then lets the caller lex the next real token.
void Preprocessor::HandleDefineDirective(
@@ -2855,6 +3076,12 @@ void Preprocessor::HandleDefineDirective(
if (MacroNameTok.is(tok::eod))
return;
+ IdentifierInfo *II = MacroNameTok.getIdentifierInfo();
+ // Issue a final pragma warning if we're defining a macro that was has been
+ // undefined and is being redefined.
+ if (!II->hasMacroDefinition() && II->hadMacroDefinition() && II->isFinal())
+ emitFinalMacroWarning(MacroNameTok, /*IsUndef=*/false);
+
// If we are supposed to keep comments in #defines, reenable comment saving
// mode.
if (CurLexer) CurLexer->SetCommentRetentionState(KeepMacroComments);
@@ -2897,18 +3124,18 @@ void Preprocessor::HandleDefineDirective(
// Finally, if this identifier already had a macro defined for it, verify that
// the macro bodies are identical, and issue diagnostics if they are not.
if (const MacroInfo *OtherMI=getMacroInfo(MacroNameTok.getIdentifierInfo())) {
+ // Final macros are hard-mode: they always warn. Even if the bodies are
+ // identical. Even if they are in system headers. Even if they are things we
+ // would silently allow in the past.
+ if (MacroNameTok.getIdentifierInfo()->isFinal())
+ emitFinalMacroWarning(MacroNameTok, /*IsUndef=*/false);
+
// In Objective-C, ignore attempts to directly redefine the builtin
// definitions of the ownership qualifiers. It's still possible to
// #undef them.
- auto isObjCProtectedMacro = [](const IdentifierInfo *II) -> bool {
- return II->isStr("__strong") ||
- II->isStr("__weak") ||
- II->isStr("__unsafe_unretained") ||
- II->isStr("__autoreleasing");
- };
- if (getLangOpts().ObjC &&
- SourceMgr.getFileID(OtherMI->getDefinitionLoc())
- == getPredefinesFileID() &&
+ if (getLangOpts().ObjC &&
+ SourceMgr.getFileID(OtherMI->getDefinitionLoc()) ==
+ getPredefinesFileID() &&
isObjCProtectedMacro(MacroNameTok.getIdentifierInfo())) {
// Warn if it changes the tokens.
if ((!getDiagnostics().getSuppressSystemWarnings() ||
@@ -2926,12 +3153,13 @@ void Preprocessor::HandleDefineDirective(
// then don't bother calling MacroInfo::isIdenticalTo.
if (!getDiagnostics().getSuppressSystemWarnings() ||
!SourceMgr.isInSystemHeader(DefineTok.getLocation())) {
+
if (!OtherMI->isUsed() && OtherMI->isWarnIfUnused())
Diag(OtherMI->getDefinitionLoc(), diag::pp_macro_not_used);
// Warn if defining "__LINE__" and other builtins, per C99 6.10.8/4 and
// C++ [cpp.predefined]p4, but allow it as an extension.
- if (OtherMI->isBuiltinMacro())
+ if (isLanguageDefinedBuiltin(SourceMgr, OtherMI, II->getName()))
Diag(MacroNameTok, diag::ext_pp_redef_builtin_macro);
// Macros must be identical. This means all tokens and whitespace
// separation must be the same. C99 6.10.3p2.
@@ -2978,7 +3206,7 @@ void Preprocessor::HandleDefineDirective(
Tok.startToken();
Tok.setKind(tok::kw__Static_assert);
Tok.setIdentifierInfo(getIdentifierInfo("_Static_assert"));
- MI->AddTokenToBody(Tok);
+ MI->setTokens({Tok}, BP);
(void)appendDefMacroDirective(getIdentifierInfo("static_assert"), MI);
}
}
@@ -3003,11 +3231,19 @@ void Preprocessor::HandleUndefDirective() {
auto MD = getMacroDefinition(II);
UndefMacroDirective *Undef = nullptr;
+ if (II->isFinal())
+ emitFinalMacroWarning(MacroNameTok, /*IsUndef=*/true);
+
// If the macro is not defined, this is a noop undef.
if (const MacroInfo *MI = MD.getMacroInfo()) {
if (!MI->isUsed() && MI->isWarnIfUnused())
Diag(MI->getDefinitionLoc(), diag::pp_macro_not_used);
+ // Warn if undefining "__LINE__" and other builtins, per C99 6.10.8/4 and
+ // C++ [cpp.predefined]p4, but allow it as an extension.
+ if (isLanguageDefinedBuiltin(SourceMgr, MI, II->getName()))
+ Diag(MacroNameTok, diag::ext_pp_undef_builtin_macro);
+
if (MI->isWarnIfUnused())
WarnUnusedMacroLocs.erase(MI->getDefinitionLoc());
@@ -3052,6 +3288,8 @@ void Preprocessor::HandleIfdefDirective(Token &Result,
return;
}
+ emitMacroExpansionWarnings(MacroNameTok, /*IsIfnDef=*/true);
+
// Check to see if this is the last token on the #if[n]def line.
CheckEndOfDirective(isIfndef ? "ifndef" : "ifdef");
@@ -3234,6 +3472,23 @@ void Preprocessor::HandleElifFamilyDirective(Token &ElifToken,
: PED_Elifndef;
++NumElse;
+ // Warn if using `#elifdef` & `#elifndef` in not C23 & C++23 mode.
+ switch (DirKind) {
+ case PED_Elifdef:
+ case PED_Elifndef:
+ unsigned DiagID;
+ if (LangOpts.CPlusPlus)
+ DiagID = LangOpts.CPlusPlus23 ? diag::warn_cxx23_compat_pp_directive
+ : diag::ext_cxx23_pp_directive;
+ else
+ DiagID = LangOpts.C23 ? diag::warn_c23_compat_pp_directive
+ : diag::ext_c23_pp_directive;
+ Diag(ElifToken, DiagID) << DirKind;
+ break;
+ default:
+ break;
+ }
+
// #elif directive in a non-skipping conditional... start skipping.
// We don't care what the condition is, because we will always skip it (since
// the block immediately before it was included).
diff --git a/contrib/llvm-project/clang/lib/Lex/PPExpressions.cpp b/contrib/llvm-project/clang/lib/Lex/PPExpressions.cpp
index cab4bab630dc..8f25c67ec9df 100644
--- a/contrib/llvm-project/clang/lib/Lex/PPExpressions.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/PPExpressions.cpp
@@ -44,7 +44,7 @@ namespace {
/// conditional and the source range covered by it.
class PPValue {
SourceRange Range;
- IdentifierInfo *II;
+ IdentifierInfo *II = nullptr;
public:
llvm::APSInt Val;
@@ -133,6 +133,10 @@ static bool EvaluateDefined(PPValue &Result, Token &PeekTok, DefinedTracker &DT,
Result.Val.setIsUnsigned(false); // Result is signed intmax_t.
DT.IncludedUndefinedIds = !Macro;
+ PP.emitMacroExpansionWarnings(
+ PeekTok,
+ (II->getName() == "INFINITY" || II->getName() == "NAN") ? true : false);
+
// If there is a macro, mark it used.
if (Result.Val != 0 && ValueLive)
PP.markMacroAsUsed(Macro.getMacroInfo());
@@ -265,7 +269,7 @@ static bool EvaluateValue(PPValue &Result, Token &PeekTok, DefinedTracker &DT,
const StringRef IdentifierName = II->getName();
if (llvm::any_of(UndefPrefixes,
[&IdentifierName](const std::string &Prefix) {
- return IdentifierName.startswith(Prefix);
+ return IdentifierName.starts_with(Prefix);
}))
PP.Diag(PeekTok, diag::warn_pp_undef_prefix)
<< AddFlagValue{llvm::join(UndefPrefixes, ",")} << II;
@@ -321,13 +325,21 @@ static bool EvaluateValue(PPValue &Result, Token &PeekTok, DefinedTracker &DT,
PP.Diag(PeekTok, diag::ext_c99_longlong);
}
- // 'z/uz' literals are a C++2b feature.
+ // 'z/uz' literals are a C++23 feature.
if (Literal.isSizeT)
PP.Diag(PeekTok, PP.getLangOpts().CPlusPlus
- ? PP.getLangOpts().CPlusPlus2b
+ ? PP.getLangOpts().CPlusPlus23
? diag::warn_cxx20_compat_size_t_suffix
- : diag::ext_cxx2b_size_t_suffix
- : diag::err_cxx2b_size_t_suffix);
+ : diag::ext_cxx23_size_t_suffix
+ : diag::err_cxx23_size_t_suffix);
+
+ // 'wb/uwb' literals are a C23 feature. We explicitly do not support the
+ // suffix in C++ as an extension because a library-based UDL that resolves
+ // to a library type may be more appropriate there.
+ if (Literal.isBitInt)
+ PP.Diag(PeekTok, PP.getLangOpts().C23
+ ? diag::warn_c23_compat_bitint_suffix
+ : diag::ext_c23_bitint_suffix);
// Parse the integer literal into Result.
if (Literal.GetIntegerValue(Result.Val)) {
@@ -398,9 +410,18 @@ static bool EvaluateValue(PPValue &Result, Token &PeekTok, DefinedTracker &DT,
// Set the value.
Val = Literal.getValue();
// Set the signedness. UTF-16 and UTF-32 are always unsigned
+ // UTF-8 is unsigned if -fchar8_t is specified.
if (Literal.isWide())
Val.setIsUnsigned(!TargetInfo::isTypeSigned(TI.getWCharType()));
- else if (!Literal.isUTF16() && !Literal.isUTF32())
+ else if (Literal.isUTF16() || Literal.isUTF32())
+ Val.setIsUnsigned(true);
+ else if (Literal.isUTF8()) {
+ if (PP.getLangOpts().CPlusPlus)
+ Val.setIsUnsigned(
+ PP.getLangOpts().Char8 ? true : !PP.getLangOpts().CharIsSigned);
+ else
+ Val.setIsUnsigned(true);
+ } else
Val.setIsUnsigned(!PP.getLangOpts().CharIsSigned);
if (Result.Val.getBitWidth() > Val.getBitWidth()) {
@@ -660,7 +681,7 @@ static bool EvaluateDirectiveSubExpr(PPValue &LHS, unsigned MinPrec,
case tok::ampamp: // Logical && does not do UACs.
break; // No UAC
default:
- Res.setIsUnsigned(LHS.isUnsigned()|RHS.isUnsigned());
+ Res.setIsUnsigned(LHS.isUnsigned() || RHS.isUnsigned());
// If this just promoted something from signed to unsigned, and if the
// value was negative, warn about it.
if (ValueLive && Res.isUnsigned()) {
@@ -820,7 +841,7 @@ static bool EvaluateDirectiveSubExpr(PPValue &LHS, unsigned MinPrec,
// Usual arithmetic conversions (C99 6.3.1.8p1): result is unsigned if
// either operand is unsigned.
- Res.setIsUnsigned(RHS.isUnsigned() | AfterColonVal.isUnsigned());
+ Res.setIsUnsigned(RHS.isUnsigned() || AfterColonVal.isUnsigned());
// Figure out the precedence of the token after the : part.
PeekPrec = getPrecedence(PeekTok.getKind());
@@ -850,7 +871,7 @@ static bool EvaluateDirectiveSubExpr(PPValue &LHS, unsigned MinPrec,
/// to "!defined(X)" return X in IfNDefMacro.
Preprocessor::DirectiveEvalResult
Preprocessor::EvaluateDirectiveExpression(IdentifierInfo *&IfNDefMacro) {
- SaveAndRestore<bool> PPDir(ParsingIfOrElifDirective, true);
+ SaveAndRestore PPDir(ParsingIfOrElifDirective, true);
// Save the current state of 'DisableMacroExpansion' and reset it to false. If
// 'DisableMacroExpansion' is true, then we must be in a macro argument list
// in which case a directive is undefined behavior. We want macros to be able
diff --git a/contrib/llvm-project/clang/lib/Lex/PPLexerChange.cpp b/contrib/llvm-project/clang/lib/Lex/PPLexerChange.cpp
index 16170969a322..3b1b6df1dbae 100644
--- a/contrib/llvm-project/clang/lib/Lex/PPLexerChange.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/PPLexerChange.cpp
@@ -12,7 +12,6 @@
//===----------------------------------------------------------------------===//
#include "clang/Basic/FileManager.h"
-#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/LexDiagnostic.h"
@@ -23,6 +22,7 @@
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBufferRef.h"
#include "llvm/Support/Path.h"
+#include <optional>
using namespace clang;
@@ -39,8 +39,8 @@ bool Preprocessor::isInPrimaryFile() const {
// If there are any stacked lexers, we're in a #include.
assert(IsFileLexer(IncludeMacroStack[0]) &&
"Top level include stack isn't our primary lexer?");
- return std::none_of(
- IncludeMacroStack.begin() + 1, IncludeMacroStack.end(),
+ return llvm::none_of(
+ llvm::drop_begin(IncludeMacroStack),
[&](const IncludeStackInfo &ISI) -> bool { return IsFileLexer(ISI); });
}
@@ -66,8 +66,9 @@ PreprocessorLexer *Preprocessor::getCurrentFileLexer() const {
/// EnterSourceFile - Add a source file to the top of the include stack and
/// start lexing tokens from it instead of the current buffer.
-bool Preprocessor::EnterSourceFile(FileID FID, const DirectoryLookup *CurDir,
- SourceLocation Loc) {
+bool Preprocessor::EnterSourceFile(FileID FID, ConstSearchDirIterator CurDir,
+ SourceLocation Loc,
+ bool IsFirstIncludeOfFile) {
assert(!CurTokenLexer && "Cannot #include a file inside a macro!");
++NumEnteredSourceFiles;
@@ -75,7 +76,7 @@ bool Preprocessor::EnterSourceFile(FileID FID, const DirectoryLookup *CurDir,
MaxIncludeStackDepth = IncludeMacroStack.size();
// Get the MemoryBuffer for this FID, if it fails, we fail.
- llvm::Optional<llvm::MemoryBufferRef> InputFile =
+ std::optional<llvm::MemoryBufferRef> InputFile =
getSourceManager().getBufferOrNone(FID, Loc);
if (!InputFile) {
SourceLocation FileStart = SourceMgr.getLocForStartOfFile(FID);
@@ -91,14 +92,27 @@ bool Preprocessor::EnterSourceFile(FileID FID, const DirectoryLookup *CurDir,
CodeCompletionFileLoc.getLocWithOffset(CodeCompletionOffset);
}
- EnterSourceFileWithLexer(new Lexer(FID, *InputFile, *this), CurDir);
+ Lexer *TheLexer = new Lexer(FID, *InputFile, *this, IsFirstIncludeOfFile);
+ if (getPreprocessorOpts().DependencyDirectivesForFile &&
+ FID != PredefinesFileID) {
+ if (OptionalFileEntryRef File = SourceMgr.getFileEntryRefForID(FID)) {
+ if (std::optional<ArrayRef<dependency_directives_scan::Directive>>
+ DepDirectives =
+ getPreprocessorOpts().DependencyDirectivesForFile(*File)) {
+ TheLexer->DepDirectives = *DepDirectives;
+ }
+ }
+ }
+
+ EnterSourceFileWithLexer(TheLexer, CurDir);
return false;
}
/// EnterSourceFileWithLexer - Add a source file to the top of the include stack
/// and start lexing tokens from it instead of the current buffer.
void Preprocessor::EnterSourceFileWithLexer(Lexer *TheLexer,
- const DirectoryLookup *CurDir) {
+ ConstSearchDirIterator CurDir) {
+ PreprocessorLexer *PrevPPLexer = CurPPLexer;
// Add the current lexer to the include stack.
if (CurPPLexer || CurTokenLexer)
@@ -108,16 +122,27 @@ void Preprocessor::EnterSourceFileWithLexer(Lexer *TheLexer,
CurPPLexer = TheLexer;
CurDirLookup = CurDir;
CurLexerSubmodule = nullptr;
- if (CurLexerKind != CLK_LexAfterModuleImport)
- CurLexerKind = CLK_Lexer;
+ if (CurLexerCallback != CLK_LexAfterModuleImport)
+ CurLexerCallback = TheLexer->isDependencyDirectivesLexer()
+ ? CLK_DependencyDirectivesLexer
+ : CLK_Lexer;
// Notify the client, if desired, that we are in a new source file.
if (Callbacks && !CurLexer->Is_PragmaLexer) {
SrcMgr::CharacteristicKind FileType =
SourceMgr.getFileCharacteristic(CurLexer->getFileLoc());
- Callbacks->FileChanged(CurLexer->getFileLoc(),
- PPCallbacks::EnterFile, FileType);
+ FileID PrevFID;
+ SourceLocation EnterLoc;
+ if (PrevPPLexer) {
+ PrevFID = PrevPPLexer->getFileID();
+ EnterLoc = PrevPPLexer->getSourceLocation();
+ }
+ Callbacks->FileChanged(CurLexer->getFileLoc(), PPCallbacks::EnterFile,
+ FileType, PrevFID);
+ Callbacks->LexedFileChanged(CurLexer->getFileID(),
+ PPCallbacks::LexedFileChangeReason::EnterFile,
+ FileType, PrevFID, EnterLoc);
}
}
@@ -136,8 +161,8 @@ void Preprocessor::EnterMacro(Token &Tok, SourceLocation ILEnd,
PushIncludeMacroStack();
CurDirLookup = nullptr;
CurTokenLexer = std::move(TokLexer);
- if (CurLexerKind != CLK_LexAfterModuleImport)
- CurLexerKind = CLK_TokenLexer;
+ if (CurLexerCallback != CLK_LexAfterModuleImport)
+ CurLexerCallback = CLK_TokenLexer;
}
/// EnterTokenStream - Add a "macro" context to the top of the include stack,
@@ -155,7 +180,7 @@ void Preprocessor::EnterMacro(Token &Tok, SourceLocation ILEnd,
void Preprocessor::EnterTokenStream(const Token *Toks, unsigned NumToks,
bool DisableMacroExpansion, bool OwnsTokens,
bool IsReinject) {
- if (CurLexerKind == CLK_CachingLexer) {
+ if (CurLexerCallback == CLK_CachingLexer) {
if (CachedLexPos < CachedTokens.size()) {
assert(IsReinject && "new tokens in the middle of cached stream");
// We're entering tokens into the middle of our cached token stream. We
@@ -191,25 +216,24 @@ void Preprocessor::EnterTokenStream(const Token *Toks, unsigned NumToks,
PushIncludeMacroStack();
CurDirLookup = nullptr;
CurTokenLexer = std::move(TokLexer);
- if (CurLexerKind != CLK_LexAfterModuleImport)
- CurLexerKind = CLK_TokenLexer;
+ if (CurLexerCallback != CLK_LexAfterModuleImport)
+ CurLexerCallback = CLK_TokenLexer;
}
/// Compute the relative path that names the given file relative to
/// the given directory.
static void computeRelativePath(FileManager &FM, const DirectoryEntry *Dir,
- const FileEntry *File,
- SmallString<128> &Result) {
+ FileEntryRef File, SmallString<128> &Result) {
Result.clear();
- StringRef FilePath = File->getDir()->getName();
+ StringRef FilePath = File.getDir().getName();
StringRef Path = FilePath;
while (!Path.empty()) {
if (auto CurDir = FM.getDirectory(Path)) {
if (*CurDir == Dir) {
Result = FilePath.substr(Path.size());
llvm::sys::path::append(Result,
- llvm::sys::path::filename(File->getName()));
+ llvm::sys::path::filename(File.getName()));
return;
}
}
@@ -217,7 +241,7 @@ static void computeRelativePath(FileManager &FM, const DirectoryEntry *Dir,
Path = llvm::sys::path::parent_path(Path);
}
- Result = File->getName();
+ Result = File.getName();
}
void Preprocessor::PropagateLineStartLeadingSpaceInfo(Token &Result) {
@@ -257,23 +281,24 @@ const char *Preprocessor::getCurLexerEndPos() {
static void collectAllSubModulesWithUmbrellaHeader(
const Module &Mod, SmallVectorImpl<const Module *> &SubMods) {
- if (Mod.getUmbrellaHeader())
+ if (Mod.getUmbrellaHeaderAsWritten())
SubMods.push_back(&Mod);
for (auto *M : Mod.submodules())
collectAllSubModulesWithUmbrellaHeader(*M, SubMods);
}
void Preprocessor::diagnoseMissingHeaderInUmbrellaDir(const Module &Mod) {
- const Module::Header &UmbrellaHeader = Mod.getUmbrellaHeader();
- assert(UmbrellaHeader.Entry && "Module must use umbrella header");
- const FileID &File = SourceMgr.translateFile(UmbrellaHeader.Entry);
+ std::optional<Module::Header> UmbrellaHeader =
+ Mod.getUmbrellaHeaderAsWritten();
+ assert(UmbrellaHeader && "Module must use umbrella header");
+ const FileID &File = SourceMgr.translateFile(UmbrellaHeader->Entry);
SourceLocation ExpectedHeadersLoc = SourceMgr.getLocForEndOfFile(File);
if (getDiagnostics().isIgnored(diag::warn_uncovered_module_header,
ExpectedHeadersLoc))
return;
ModuleMap &ModMap = getHeaderSearchInfo().getModuleMap();
- const DirectoryEntry *Dir = Mod.getUmbrellaDir().Entry;
+ OptionalDirectoryEntryRef Dir = Mod.getEffectiveUmbrellaDir();
llvm::vfs::FileSystem &FS = FileMgr.getVirtualFileSystem();
std::error_code EC;
for (llvm::vfs::recursive_directory_iterator Entry(FS, Dir->getName(), EC),
@@ -288,12 +313,12 @@ void Preprocessor::diagnoseMissingHeaderInUmbrellaDir(const Module &Mod) {
.Default(false))
continue;
- if (auto Header = getFileManager().getFile(Entry->path()))
+ if (auto Header = getFileManager().getOptionalFileRef(Entry->path()))
if (!getSourceManager().hasFileInfo(*Header)) {
if (!ModMap.isHeaderInUnavailableModule(*Header)) {
// Find the relative path that would access this header.
SmallString<128> RelativePath;
- computeRelativePath(FileMgr, Dir, *Header, RelativePath);
+ computeRelativePath(FileMgr, *Dir, *Header, RelativePath);
Diag(ExpectedHeadersLoc, diag::warn_uncovered_module_header)
<< Mod.getFullModuleName() << RelativePath;
}
@@ -301,49 +326,22 @@ void Preprocessor::diagnoseMissingHeaderInUmbrellaDir(const Module &Mod) {
}
}
-void Preprocessor::ResolvePragmaIncludeInstead(
- const SourceLocation Location) const {
- assert(Location.isValid());
- if (CurLexer == nullptr)
- return;
-
- if (SourceMgr.isInSystemHeader(Location))
- return;
-
- for (const auto &Include : CurLexer->getIncludeHistory()) {
- StringRef Filename = Include.getKey();
- const PreprocessorLexer::IncludeInfo &Info = Include.getValue();
- ArrayRef<SmallString<32>> Aliases =
- HeaderInfo.getFileInfo(Info.File).Aliases.getArrayRef();
-
- if (Aliases.empty())
- continue;
-
- switch (Aliases.size()) {
- case 1:
- Diag(Info.Location, diag::err_pragma_include_instead_system_reserved)
- << Filename << 0 << Aliases[0];
- continue;
- case 2:
- Diag(Info.Location, diag::err_pragma_include_instead_system_reserved)
- << Filename << 1 << Aliases[0] << Aliases[1];
- continue;
- default: {
- Diag(Info.Location, diag::err_pragma_include_instead_system_reserved)
- << Filename << 2 << ("{'" + llvm::join(Aliases, "', '") + "'}");
- }
- }
- }
-}
-
/// HandleEndOfFile - This callback is invoked when the lexer hits the end of
/// the current file. This either returns the EOF token or pops a level off
/// the include stack and keeps going.
-bool Preprocessor::HandleEndOfFile(Token &Result, SourceLocation EndLoc,
- bool isEndOfMacro) {
+bool Preprocessor::HandleEndOfFile(Token &Result, bool isEndOfMacro) {
assert(!CurTokenLexer &&
"Ending a file when currently in a macro!");
+ SourceLocation UnclosedSafeBufferOptOutLoc;
+
+ if (IncludeMacroStack.empty() &&
+ isPPInSafeBufferOptOutRegion(UnclosedSafeBufferOptOutLoc)) {
+ // To warn if a "-Wunsafe-buffer-usage" opt-out region is still open by the
+ // end of a file.
+ Diag(UnclosedSafeBufferOptOutLoc,
+ diag::err_pp_unclosed_pragma_unsafe_buffer_usage);
+ }
// If we have an unclosed module region from a pragma at the end of a
// module, complain and close it now.
const bool LeavingSubmodule = CurLexer && CurLexerSubmodule;
@@ -368,8 +366,8 @@ bool Preprocessor::HandleEndOfFile(Token &Result, SourceLocation EndLoc,
if (const IdentifierInfo *ControllingMacro =
CurPPLexer->MIOpt.GetControllingMacroAtEndOfFile()) {
// Okay, this has a controlling macro, remember in HeaderFileInfo.
- if (const FileEntry *FE = CurPPLexer->getFileEntry()) {
- HeaderInfo.SetFileControllingMacro(FE, ControllingMacro);
+ if (OptionalFileEntryRef FE = CurPPLexer->getFileEntry()) {
+ HeaderInfo.SetFileControllingMacro(*FE, ControllingMacro);
if (MacroInfo *MI =
getMacroInfo(const_cast<IdentifierInfo*>(ControllingMacro)))
MI->setUsedForHeaderGuard(true);
@@ -377,7 +375,7 @@ bool Preprocessor::HandleEndOfFile(Token &Result, SourceLocation EndLoc,
CurPPLexer->MIOpt.GetDefinedMacro()) {
if (!isMacroDefined(ControllingMacro) &&
DefinedMacro != ControllingMacro &&
- HeaderInfo.FirstTimeLexingFile(FE)) {
+ CurLexer->isFirstTimeLexingFile()) {
// If the edit distance between the two macros is more than 50%,
// DefinedMacro may not be header guard, or can be header guard of
@@ -410,9 +408,6 @@ bool Preprocessor::HandleEndOfFile(Token &Result, SourceLocation EndLoc,
}
}
- if (EndLoc.isValid())
- ResolvePragmaIncludeInstead(EndLoc);
-
// Complain about reaching a true EOF within arc_cf_code_audited.
// We don't want to complain about reaching the end of a macro
// instantiation or a _Pragma.
@@ -430,8 +425,13 @@ bool Preprocessor::HandleEndOfFile(Token &Result, SourceLocation EndLoc,
// instantiation or a _Pragma.
if (PragmaAssumeNonNullLoc.isValid() &&
!isEndOfMacro && !(CurLexer && CurLexer->Is_PragmaLexer)) {
- Diag(PragmaAssumeNonNullLoc, diag::err_pp_eof_in_assume_nonnull);
-
+ // If we're at the end of generating a preamble, we should record the
+ // unterminated \#pragma clang assume_nonnull so we can restore it later
+ // when the preamble is loaded into the main file.
+ if (isRecordingPreamble() && isInPrimaryFile())
+ PreambleRecordedPragmaAssumeNonNullLoc = PragmaAssumeNonNullLoc;
+ else
+ Diag(PragmaAssumeNonNullLoc, diag::err_pp_eof_in_assume_nonnull);
// Recover by leaving immediately.
PragmaAssumeNonNullLoc = SourceLocation();
}
@@ -506,16 +506,23 @@ bool Preprocessor::HandleEndOfFile(Token &Result, SourceLocation EndLoc,
// Notify the client, if desired, that we are in a new source file.
if (Callbacks && !isEndOfMacro && CurPPLexer) {
+ SourceLocation Loc = CurPPLexer->getSourceLocation();
SrcMgr::CharacteristicKind FileType =
- SourceMgr.getFileCharacteristic(CurPPLexer->getSourceLocation());
- Callbacks->FileChanged(CurPPLexer->getSourceLocation(),
- PPCallbacks::ExitFile, FileType, ExitedFID);
+ SourceMgr.getFileCharacteristic(Loc);
+ Callbacks->FileChanged(Loc, PPCallbacks::ExitFile, FileType, ExitedFID);
+ Callbacks->LexedFileChanged(CurPPLexer->getFileID(),
+ PPCallbacks::LexedFileChangeReason::ExitFile,
+ FileType, ExitedFID, Loc);
}
- // Restore conditional stack from the preamble right after exiting from the
- // predefines file.
- if (ExitedFromPredefinesFile)
+ // Restore conditional stack as well as the recorded
+ // \#pragma clang assume_nonnull from the preamble right after exiting
+ // from the predefines file.
+ if (ExitedFromPredefinesFile) {
replayPreambleConditionalStack();
+ if (PreambleRecordedPragmaAssumeNonNullLoc.isValid())
+ PragmaAssumeNonNullLoc = PreambleRecordedPragmaAssumeNonNullLoc;
+ }
if (!isEndOfMacro && CurPPLexer && FoundPCHThroughHeader &&
(isInPrimaryFile() ||
@@ -528,13 +535,19 @@ bool Preprocessor::HandleEndOfFile(Token &Result, SourceLocation EndLoc,
return LeavingSubmodule;
}
}
-
// If this is the end of the main file, form an EOF token.
assert(CurLexer && "Got EOF but no current lexer set!");
const char *EndPos = getCurLexerEndPos();
Result.startToken();
CurLexer->BufferPtr = EndPos;
- CurLexer->FormTokenWithChars(Result, EndPos, tok::eof);
+
+ if (getLangOpts().IncrementalExtensions) {
+ CurLexer->FormTokenWithChars(Result, EndPos, tok::annot_repl_input_end);
+ Result.setAnnotationEndLoc(Result.getLocation());
+ Result.setAnnotationValue(nullptr);
+ } else {
+ CurLexer->FormTokenWithChars(Result, EndPos, tok::eof);
+ }
if (isCodeCompletionEnabled()) {
// Inserting the code-completion point increases the source buffer by 1,
@@ -601,7 +614,7 @@ bool Preprocessor::HandleEndOfTokenLexer(Token &Result) {
TokenLexerCache[NumCachedTokenLexers++] = std::move(CurTokenLexer);
// Handle this like a #include file being popped off the stack.
- return HandleEndOfFile(Result, {}, true);
+ return HandleEndOfFile(Result, true);
}
/// RemoveTopOfLexerStack - Pop the current lexer/macro exp off the top of the
diff --git a/contrib/llvm-project/clang/lib/Lex/PPMacroExpansion.cpp b/contrib/llvm-project/clang/lib/Lex/PPMacroExpansion.cpp
index d8ad9d845e7a..ad02f31209b0 100644
--- a/contrib/llvm-project/clang/lib/Lex/PPMacroExpansion.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/PPMacroExpansion.cpp
@@ -11,6 +11,7 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/Basic/AttributeCommonInfo.h"
#include "clang/Basic/Attributes.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/FileManager.h"
@@ -36,8 +37,6 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/FoldingSet.h"
-#include "llvm/ADT/None.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
@@ -53,6 +52,7 @@
#include <cstddef>
#include <cstring>
#include <ctime>
+#include <optional>
#include <string>
#include <tuple>
#include <utility>
@@ -87,7 +87,7 @@ void Preprocessor::appendMacroDirective(IdentifierInfo *II, MacroDirective *MD){
// Set up the identifier as having associated macro history.
II->setHasMacroDefinition(true);
- if (!MD->isDefined() && LeafModuleMacros.find(II) == LeafModuleMacros.end())
+ if (!MD->isDefined() && !LeafModuleMacros.contains(II))
II->setHasMacroDefinition(false);
if (II->isFromAST())
II->setChangedSinceDeserialization();
@@ -125,7 +125,7 @@ void Preprocessor::setLoadedMacroDirective(IdentifierInfo *II,
// Setup the identifier as having associated macro history.
II->setHasMacroDefinition(true);
- if (!MD->isDefined() && LeafModuleMacros.find(II) == LeafModuleMacros.end())
+ if (!MD->isDefined() && !LeafModuleMacros.contains(II))
II->setHasMacroDefinition(false);
}
@@ -155,11 +155,8 @@ ModuleMacro *Preprocessor::addModuleMacro(Module *Mod, IdentifierInfo *II,
// If we were the first overrider for any macro, it's no longer a leaf.
auto &LeafMacros = LeafModuleMacros[II];
if (HidAny) {
- LeafMacros.erase(std::remove_if(LeafMacros.begin(), LeafMacros.end(),
- [](ModuleMacro *MM) {
- return MM->NumOverriddenBy != 0;
- }),
- LeafMacros.end());
+ llvm::erase_if(LeafMacros,
+ [](ModuleMacro *MM) { return MM->NumOverriddenBy != 0; });
}
// The new macro is always a leaf macro.
@@ -287,7 +284,8 @@ void Preprocessor::dumpMacroInfo(const IdentifierInfo *II) {
// Dump module macros.
llvm::DenseSet<ModuleMacro*> Active;
- for (auto *MM : State ? State->getActiveModuleMacros(*this, II) : None)
+ for (auto *MM :
+ State ? State->getActiveModuleMacros(*this, II) : std::nullopt)
Active.insert(MM);
llvm::DenseSet<ModuleMacro*> Visited;
llvm::SmallVector<ModuleMacro *, 16> Worklist(Leaf.begin(), Leaf.end());
@@ -345,6 +343,7 @@ void Preprocessor::RegisterBuiltinMacros() {
Ident__TIME__ = RegisterBuiltinMacro(*this, "__TIME__");
Ident__COUNTER__ = RegisterBuiltinMacro(*this, "__COUNTER__");
Ident_Pragma = RegisterBuiltinMacro(*this, "_Pragma");
+ Ident__FLT_EVAL_METHOD__ = RegisterBuiltinMacro(*this, "__FLT_EVAL_METHOD__");
// C++ Standing Document Extensions.
if (getLangOpts().CPlusPlus)
@@ -372,6 +371,8 @@ void Preprocessor::RegisterBuiltinMacros() {
Ident__has_feature = RegisterBuiltinMacro(*this, "__has_feature");
Ident__has_extension = RegisterBuiltinMacro(*this, "__has_extension");
Ident__has_builtin = RegisterBuiltinMacro(*this, "__has_builtin");
+ Ident__has_constexpr_builtin =
+ RegisterBuiltinMacro(*this, "__has_constexpr_builtin");
Ident__has_attribute = RegisterBuiltinMacro(*this, "__has_attribute");
if (!getLangOpts().CPlusPlus)
Ident__has_c_attribute = RegisterBuiltinMacro(*this, "__has_c_attribute");
@@ -388,6 +389,10 @@ void Preprocessor::RegisterBuiltinMacros() {
Ident__is_target_os = RegisterBuiltinMacro(*this, "__is_target_os");
Ident__is_target_environment =
RegisterBuiltinMacro(*this, "__is_target_environment");
+ Ident__is_target_variant_os =
+ RegisterBuiltinMacro(*this, "__is_target_variant_os");
+ Ident__is_target_variant_environment =
+ RegisterBuiltinMacro(*this, "__is_target_variant_environment");
// Modules.
Ident__building_module = RegisterBuiltinMacro(*this, "__building_module");
@@ -426,7 +431,7 @@ static bool isTrivialSingleTokenExpansion(const MacroInfo *MI,
// If this is a function-like macro invocation, it's safe to trivially expand
// as long as the identifier is not a macro argument.
- return std::find(MI->param_begin(), MI->param_end(), II) == MI->param_end();
+ return !llvm::is_contained(MI->params(), II);
}
/// isNextPPTokenLParen - Determine whether the next preprocessor token to be
@@ -471,6 +476,8 @@ bool Preprocessor::isNextPPTokenLParen() {
/// expanded as a macro, handle it and return the next token as 'Identifier'.
bool Preprocessor::HandleMacroExpandedIdentifier(Token &Identifier,
const MacroDefinition &M) {
+ emitMacroExpansionWarnings(Identifier);
+
MacroInfo *MI = M.getMacroInfo();
// If this is a macro expansion in the "#if !defined(x)" line for the file,
@@ -986,7 +993,11 @@ MacroArgs *Preprocessor::ReadMacroCallArgumentList(Token &MacroName,
// If the macro contains the comma pasting extension, the diagnostic
// is suppressed; we know we'll get another diagnostic later.
if (!MI->hasCommaPasting()) {
- Diag(Tok, diag::ext_missing_varargs_arg);
+ // C++20 allows this construct, but standards before C++20 and all C
+ // standards do not allow the construct (we allow it as an extension).
+ Diag(Tok, getLangOpts().CPlusPlus20
+ ? diag::warn_cxx17_compat_missing_varargs_arg
+ : diag::ext_missing_varargs_arg);
Diag(MI->getDefinitionLoc(), diag::note_macro_here)
<< MacroName.getIdentifierInfo();
}
@@ -1076,8 +1087,15 @@ void Preprocessor::removeCachedMacroExpandedTokensOfLastLexer() {
/// the identifier tokens inserted.
static void ComputeDATE_TIME(SourceLocation &DATELoc, SourceLocation &TIMELoc,
Preprocessor &PP) {
- time_t TT = time(nullptr);
- struct tm *TM = localtime(&TT);
+ time_t TT;
+ std::tm *TM;
+ if (PP.getPreprocessorOpts().SourceDateEpoch) {
+ TT = *PP.getPreprocessorOpts().SourceDateEpoch;
+ TM = std::gmtime(&TT);
+ } else {
+ TT = std::time(nullptr);
+ TM = std::localtime(&TT);
+ }
static const char * const Months[] = {
"Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"
@@ -1086,8 +1104,11 @@ static void ComputeDATE_TIME(SourceLocation &DATELoc, SourceLocation &TIMELoc,
{
SmallString<32> TmpBuffer;
llvm::raw_svector_ostream TmpStream(TmpBuffer);
- TmpStream << llvm::format("\"%s %2d %4d\"", Months[TM->tm_mon],
- TM->tm_mday, TM->tm_year + 1900);
+ if (TM)
+ TmpStream << llvm::format("\"%s %2d %4d\"", Months[TM->tm_mon],
+ TM->tm_mday, TM->tm_year + 1900);
+ else
+ TmpStream << "??? ?? ????";
Token TmpTok;
TmpTok.startToken();
PP.CreateString(TmpStream.str(), TmpTok);
@@ -1097,8 +1118,11 @@ static void ComputeDATE_TIME(SourceLocation &DATELoc, SourceLocation &TIMELoc,
{
SmallString<32> TmpBuffer;
llvm::raw_svector_ostream TmpStream(TmpBuffer);
- TmpStream << llvm::format("\"%02d:%02d:%02d\"",
- TM->tm_hour, TM->tm_min, TM->tm_sec);
+ if (TM)
+ TmpStream << llvm::format("\"%02d:%02d:%02d\"", TM->tm_hour, TM->tm_min,
+ TM->tm_sec);
+ else
+ TmpStream << "??:??:??";
Token TmpTok;
TmpTok.startToken();
PP.CreateString(TmpStream.str(), TmpTok);
@@ -1112,7 +1136,8 @@ static bool HasFeature(const Preprocessor &PP, StringRef Feature) {
const LangOptions &LangOpts = PP.getLangOpts();
// Normalize the feature name, __foo__ becomes foo.
- if (Feature.startswith("__") && Feature.endswith("__") && Feature.size() >= 4)
+ if (Feature.starts_with("__") && Feature.ends_with("__") &&
+ Feature.size() >= 4)
Feature = Feature.substr(2, Feature.size() - 4);
#define FEATURE(Name, Predicate) .Case(#Name, Predicate)
@@ -1138,7 +1163,7 @@ static bool HasExtension(const Preprocessor &PP, StringRef Extension) {
const LangOptions &LangOpts = PP.getLangOpts();
// Normalize the extension name, __foo__ becomes foo.
- if (Extension.startswith("__") && Extension.endswith("__") &&
+ if (Extension.starts_with("__") && Extension.ends_with("__") &&
Extension.size() >= 4)
Extension = Extension.substr(2, Extension.size() - 4);
@@ -1154,9 +1179,9 @@ static bool HasExtension(const Preprocessor &PP, StringRef Extension) {
/// EvaluateHasIncludeCommon - Process a '__has_include("path")'
/// or '__has_include_next("path")' expression.
/// Returns true if successful.
-static bool EvaluateHasIncludeCommon(Token &Tok,
- IdentifierInfo *II, Preprocessor &PP,
- const DirectoryLookup *LookupFrom,
+static bool EvaluateHasIncludeCommon(Token &Tok, IdentifierInfo *II,
+ Preprocessor &PP,
+ ConstSearchDirIterator LookupFrom,
const FileEntry *LookupFromFile) {
// Save the location of the current token. If a '(' is later found, use
// that location. If not, use the end of this location instead.
@@ -1224,70 +1249,44 @@ static bool EvaluateHasIncludeCommon(Token &Tok,
if (Filename.empty())
return false;
+ // Passing this to LookupFile forces header search to check whether the found
+ // file belongs to a module. Skipping that check could incorrectly mark
+ // modular header as textual, causing issues down the line.
+ ModuleMap::KnownHeader KH;
+
// Search include directories.
- const DirectoryLookup *CurDir;
- Optional<FileEntryRef> File =
+ OptionalFileEntryRef File =
PP.LookupFile(FilenameLoc, Filename, isAngled, LookupFrom, LookupFromFile,
- CurDir, nullptr, nullptr, nullptr, nullptr, nullptr);
+ nullptr, nullptr, nullptr, &KH, nullptr, nullptr);
if (PPCallbacks *Callbacks = PP.getPPCallbacks()) {
SrcMgr::CharacteristicKind FileType = SrcMgr::C_User;
if (File)
- FileType =
- PP.getHeaderSearchInfo().getFileDirFlavor(&File->getFileEntry());
+ FileType = PP.getHeaderSearchInfo().getFileDirFlavor(*File);
Callbacks->HasInclude(FilenameLoc, Filename, isAngled, File, FileType);
}
// Get the result value. A result of true means the file exists.
- return File.hasValue();
+ return File.has_value();
}
-/// EvaluateHasInclude - Process a '__has_include("path")' expression.
-/// Returns true if successful.
-static bool EvaluateHasInclude(Token &Tok, IdentifierInfo *II,
- Preprocessor &PP) {
- return EvaluateHasIncludeCommon(Tok, II, PP, nullptr, nullptr);
+bool Preprocessor::EvaluateHasInclude(Token &Tok, IdentifierInfo *II) {
+ return EvaluateHasIncludeCommon(Tok, II, *this, nullptr, nullptr);
}
-/// EvaluateHasIncludeNext - Process '__has_include_next("path")' expression.
-/// Returns true if successful.
-static bool EvaluateHasIncludeNext(Token &Tok,
- IdentifierInfo *II, Preprocessor &PP) {
- // __has_include_next is like __has_include, except that we start
- // searching after the current found directory. If we can't do this,
- // issue a diagnostic.
- // FIXME: Factor out duplication with
- // Preprocessor::HandleIncludeNextDirective.
- const DirectoryLookup *Lookup = PP.GetCurDirLookup();
- const FileEntry *LookupFromFile = nullptr;
- if (PP.isInPrimaryFile() && PP.getLangOpts().IsHeaderFile) {
- // If the main file is a header, then it's either for PCH/AST generation,
- // or libclang opened it. Either way, handle it as a normal include below
- // and do not complain about __has_include_next.
- } else if (PP.isInPrimaryFile()) {
- Lookup = nullptr;
- PP.Diag(Tok, diag::pp_include_next_in_primary);
- } else if (PP.getCurrentLexerSubmodule()) {
- // Start looking up in the directory *after* the one in which the current
- // file would be found, if any.
- assert(PP.getCurrentLexer() && "#include_next directive in macro?");
- LookupFromFile = PP.getCurrentLexer()->getFileEntry();
- Lookup = nullptr;
- } else if (!Lookup) {
- PP.Diag(Tok, diag::pp_include_next_absolute_path);
- } else {
- // Start looking up in the next directory.
- ++Lookup;
- }
+bool Preprocessor::EvaluateHasIncludeNext(Token &Tok, IdentifierInfo *II) {
+ ConstSearchDirIterator Lookup = nullptr;
+ const FileEntry *LookupFromFile;
+ std::tie(Lookup, LookupFromFile) = getIncludeNextStart(Tok);
- return EvaluateHasIncludeCommon(Tok, II, PP, Lookup, LookupFromFile);
+ return EvaluateHasIncludeCommon(Tok, II, *this, Lookup, LookupFromFile);
}
/// Process single-argument builtin feature-like macros that return
/// integer values.
static void EvaluateFeatureLikeBuiltinMacro(llvm::raw_svector_ostream& OS,
Token &Tok, IdentifierInfo *II,
- Preprocessor &PP,
+ Preprocessor &PP, bool ExpandArgs,
llvm::function_ref<
int(Token &Tok,
bool &HasLexedNextTok)> Op) {
@@ -1307,13 +1306,16 @@ static void EvaluateFeatureLikeBuiltinMacro(llvm::raw_svector_ostream& OS,
unsigned ParenDepth = 1;
SourceLocation LParenLoc = Tok.getLocation();
- llvm::Optional<int> Result;
+ std::optional<int> Result;
Token ResultTok;
bool SuppressDiagnostic = false;
while (true) {
// Parse next token.
- PP.LexUnexpandedToken(Tok);
+ if (ExpandArgs)
+ PP.Lex(Tok);
+ else
+ PP.LexUnexpandedToken(Tok);
already_lexed:
switch (Tok.getKind()) {
@@ -1333,7 +1335,7 @@ already_lexed:
case tok::l_paren:
++ParenDepth;
- if (Result.hasValue())
+ if (Result)
break;
if (!SuppressDiagnostic) {
PP.Diag(Tok.getLocation(), diag::err_pp_nested_paren) << II;
@@ -1347,11 +1349,11 @@ already_lexed:
// The last ')' has been reached; return the value if one found or
// a diagnostic and a dummy value.
- if (Result.hasValue()) {
- OS << Result.getValue();
+ if (Result) {
+ OS << *Result;
// For strict conformance to __has_cpp_attribute rules, use 'L'
// suffix for dated literals.
- if (Result.getValue() > 1)
+ if (*Result > 1)
OS << 'L';
} else {
OS << 0;
@@ -1363,7 +1365,7 @@ already_lexed:
default: {
// Parse the macro argument, if one not found so far.
- if (Result.hasValue())
+ if (Result)
break;
bool HasLexedNextToken = false;
@@ -1450,9 +1452,47 @@ static bool isTargetEnvironment(const TargetInfo &TI,
const IdentifierInfo *II) {
std::string EnvName = (llvm::Twine("---") + II->getName().lower()).str();
llvm::Triple Env(EnvName);
+ // The unknown environment is matched only if
+ // '__is_target_environment(unknown)' is used.
+ if (Env.getEnvironment() == llvm::Triple::UnknownEnvironment &&
+ EnvName != "---unknown")
+ return false;
return TI.getTriple().getEnvironment() == Env.getEnvironment();
}
+/// Implements the __is_target_variant_os builtin macro.
+static bool isTargetVariantOS(const TargetInfo &TI, const IdentifierInfo *II) {
+ if (TI.getTriple().isOSDarwin()) {
+ const llvm::Triple *VariantTriple = TI.getDarwinTargetVariantTriple();
+ if (!VariantTriple)
+ return false;
+
+ std::string OSName =
+ (llvm::Twine("unknown-unknown-") + II->getName().lower()).str();
+ llvm::Triple OS(OSName);
+ if (OS.getOS() == llvm::Triple::Darwin) {
+ // Darwin matches macos, ios, etc.
+ return VariantTriple->isOSDarwin();
+ }
+ return VariantTriple->getOS() == OS.getOS();
+ }
+ return false;
+}
+
+/// Implements the __is_target_variant_environment builtin macro.
+static bool isTargetVariantEnvironment(const TargetInfo &TI,
+ const IdentifierInfo *II) {
+ if (TI.getTriple().isOSDarwin()) {
+ const llvm::Triple *VariantTriple = TI.getDarwinTargetVariantTriple();
+ if (!VariantTriple)
+ return false;
+ std::string EnvName = (llvm::Twine("---") + II->getName().lower()).str();
+ llvm::Triple Env(EnvName);
+ return VariantTriple->getEnvironment() == Env.getEnvironment();
+ }
+ return false;
+}
+
/// ExpandBuiltinMacro - If an identifier token is read that is to be expanded
/// as a builtin macro, handle it and return the next token as 'Tok'.
void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
@@ -1524,17 +1564,11 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
// __FILE_NAME__ is a Clang-specific extension that expands to the
// the last part of __FILE__.
if (II == Ident__FILE_NAME__) {
- // Try to get the last path component, failing that return the original
- // presumed location.
- StringRef PLFileName = llvm::sys::path::filename(PLoc.getFilename());
- if (PLFileName != "")
- FN += PLFileName;
- else
- FN += PLoc.getFilename();
+ processPathToFileName(FN, PLoc, getLangOpts(), getTargetInfo());
} else {
FN += PLoc.getFilename();
+ processPathForFileMacro(FN, getLangOpts(), getTargetInfo());
}
- getLangOpts().remapPathPrefix(FN);
Lexer::Stringify(FN);
OS << '"' << FN << '"';
}
@@ -1578,46 +1612,59 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
Diag(Tok.getLocation(), diag::warn_pp_date_time);
// MSVC, ICC, GCC, VisualAge C++ extension. The generated string should be
// of the form "Ddd Mmm dd hh::mm::ss yyyy", which is returned by asctime.
-
- // Get the file that we are lexing out of. If we're currently lexing from
- // a macro, dig into the include stack.
- const FileEntry *CurFile = nullptr;
- PreprocessorLexer *TheLexer = getCurrentFileLexer();
-
- if (TheLexer)
- CurFile = SourceMgr.getFileEntryForID(TheLexer->getFileID());
-
const char *Result;
- if (CurFile) {
- time_t TT = CurFile->getModificationTime();
- struct tm *TM = localtime(&TT);
+ if (getPreprocessorOpts().SourceDateEpoch) {
+ time_t TT = *getPreprocessorOpts().SourceDateEpoch;
+ std::tm *TM = std::gmtime(&TT);
Result = asctime(TM);
} else {
- Result = "??? ??? ?? ??:??:?? ????\n";
+ // Get the file that we are lexing out of. If we're currently lexing from
+ // a macro, dig into the include stack.
+ const FileEntry *CurFile = nullptr;
+ if (PreprocessorLexer *TheLexer = getCurrentFileLexer())
+ CurFile = SourceMgr.getFileEntryForID(TheLexer->getFileID());
+ if (CurFile) {
+ time_t TT = CurFile->getModificationTime();
+ struct tm *TM = localtime(&TT);
+ Result = asctime(TM);
+ } else {
+ Result = "??? ??? ?? ??:??:?? ????\n";
+ }
}
// Surround the string with " and strip the trailing newline.
OS << '"' << StringRef(Result).drop_back() << '"';
Tok.setKind(tok::string_literal);
+ } else if (II == Ident__FLT_EVAL_METHOD__) {
+ // __FLT_EVAL_METHOD__ is set to the default value.
+ OS << getTUFPEvalMethod();
+ // __FLT_EVAL_METHOD__ expands to a simple numeric value.
+ Tok.setKind(tok::numeric_constant);
+ if (getLastFPEvalPragmaLocation().isValid()) {
+ // The program is ill-formed. The value of __FLT_EVAL_METHOD__ is altered
+ // by the pragma.
+ Diag(Tok, diag::err_illegal_use_of_flt_eval_macro);
+ Diag(getLastFPEvalPragmaLocation(), diag::note_pragma_entered_here);
+ }
} else if (II == Ident__COUNTER__) {
// __COUNTER__ expands to a simple numeric value.
OS << CounterValue++;
Tok.setKind(tok::numeric_constant);
} else if (II == Ident__has_feature) {
- EvaluateFeatureLikeBuiltinMacro(OS, Tok, II, *this,
+ EvaluateFeatureLikeBuiltinMacro(OS, Tok, II, *this, false,
[this](Token &Tok, bool &HasLexedNextToken) -> int {
IdentifierInfo *II = ExpectFeatureIdentifierInfo(Tok, *this,
diag::err_feature_check_malformed);
return II && HasFeature(*this, II->getName());
});
} else if (II == Ident__has_extension) {
- EvaluateFeatureLikeBuiltinMacro(OS, Tok, II, *this,
+ EvaluateFeatureLikeBuiltinMacro(OS, Tok, II, *this, false,
[this](Token &Tok, bool &HasLexedNextToken) -> int {
IdentifierInfo *II = ExpectFeatureIdentifierInfo(Tok, *this,
diag::err_feature_check_malformed);
return II && HasExtension(*this, II->getName());
});
} else if (II == Ident__has_builtin) {
- EvaluateFeatureLikeBuiltinMacro(OS, Tok, II, *this,
+ EvaluateFeatureLikeBuiltinMacro(OS, Tok, II, *this, false,
[this](Token &Tok, bool &HasLexedNextToken) -> int {
IdentifierInfo *II = ExpectFeatureIdentifierInfo(Tok, *this,
diag::err_feature_check_malformed);
@@ -1631,7 +1678,9 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
// usual allocation and deallocation functions. Required by libc++
return 201802;
default:
- return true;
+ return Builtin::evaluateRequiredTargetFeatures(
+ getBuiltinInfo().getRequiredFeatures(II->getBuiltinID()),
+ getTargetInfo().getTargetOpts().FeatureMap);
}
return true;
} else if (II->getTokenID() != tok::identifier ||
@@ -1643,15 +1692,17 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
// as being "builtin functions", even if the syntax isn't a valid
// function call (for example, because the builtin takes a type
// argument).
- if (II->getName().startswith("__builtin_") ||
- II->getName().startswith("__is_") ||
- II->getName().startswith("__has_"))
+ if (II->getName().starts_with("__builtin_") ||
+ II->getName().starts_with("__is_") ||
+ II->getName().starts_with("__has_"))
return true;
return llvm::StringSwitch<bool>(II->getName())
.Case("__array_rank", true)
.Case("__array_extent", true)
.Case("__reference_binds_to_temporary", true)
- .Case("__underlying_type", true)
+ .Case("__reference_constructs_from_temporary", true)
+#define TRANSFORM_TYPE_TRAIT_DEF(_, Trait) .Case("__" #Trait, true)
+#include "clang/Basic/TransformTypeTraits.def"
.Default(false);
} else {
return llvm::StringSwitch<bool>(II->getName())
@@ -1665,32 +1716,47 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
.Case("__is_target_vendor", true)
.Case("__is_target_os", true)
.Case("__is_target_environment", true)
+ .Case("__is_target_variant_os", true)
+ .Case("__is_target_variant_environment", true)
.Default(false);
}
});
+ } else if (II == Ident__has_constexpr_builtin) {
+ EvaluateFeatureLikeBuiltinMacro(
+ OS, Tok, II, *this, false,
+ [this](Token &Tok, bool &HasLexedNextToken) -> int {
+ IdentifierInfo *II = ExpectFeatureIdentifierInfo(
+ Tok, *this, diag::err_feature_check_malformed);
+ if (!II)
+ return false;
+ unsigned BuiltinOp = II->getBuiltinID();
+ return BuiltinOp != 0 &&
+ this->getBuiltinInfo().isConstantEvaluated(BuiltinOp);
+ });
} else if (II == Ident__is_identifier) {
- EvaluateFeatureLikeBuiltinMacro(OS, Tok, II, *this,
+ EvaluateFeatureLikeBuiltinMacro(OS, Tok, II, *this, false,
[](Token &Tok, bool &HasLexedNextToken) -> int {
return Tok.is(tok::identifier);
});
} else if (II == Ident__has_attribute) {
- EvaluateFeatureLikeBuiltinMacro(OS, Tok, II, *this,
+ EvaluateFeatureLikeBuiltinMacro(OS, Tok, II, *this, true,
[this](Token &Tok, bool &HasLexedNextToken) -> int {
IdentifierInfo *II = ExpectFeatureIdentifierInfo(Tok, *this,
diag::err_feature_check_malformed);
- return II ? hasAttribute(AttrSyntax::GNU, nullptr, II,
- getTargetInfo(), getLangOpts()) : 0;
+ return II ? hasAttribute(AttributeCommonInfo::Syntax::AS_GNU, nullptr,
+ II, getTargetInfo(), getLangOpts())
+ : 0;
});
} else if (II == Ident__has_declspec) {
- EvaluateFeatureLikeBuiltinMacro(OS, Tok, II, *this,
+ EvaluateFeatureLikeBuiltinMacro(OS, Tok, II, *this, true,
[this](Token &Tok, bool &HasLexedNextToken) -> int {
IdentifierInfo *II = ExpectFeatureIdentifierInfo(Tok, *this,
diag::err_feature_check_malformed);
if (II) {
const LangOptions &LangOpts = getLangOpts();
return LangOpts.DeclSpecKeyword &&
- hasAttribute(AttrSyntax::Declspec, nullptr, II,
- getTargetInfo(), LangOpts);
+ hasAttribute(AttributeCommonInfo::Syntax::AS_Declspec, nullptr,
+ II, getTargetInfo(), LangOpts);
}
return false;
@@ -1698,8 +1764,8 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
} else if (II == Ident__has_cpp_attribute ||
II == Ident__has_c_attribute) {
bool IsCXX = II == Ident__has_cpp_attribute;
- EvaluateFeatureLikeBuiltinMacro(
- OS, Tok, II, *this, [&](Token &Tok, bool &HasLexedNextToken) -> int {
+ EvaluateFeatureLikeBuiltinMacro(OS, Tok, II, *this, true,
+ [&](Token &Tok, bool &HasLexedNextToken) -> int {
IdentifierInfo *ScopeII = nullptr;
IdentifierInfo *II = ExpectFeatureIdentifierInfo(
Tok, *this, diag::err_feature_check_malformed);
@@ -1713,12 +1779,15 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
HasLexedNextToken = true;
else {
ScopeII = II;
- LexUnexpandedToken(Tok);
+ // Lex an expanded token for the attribute name.
+ Lex(Tok);
II = ExpectFeatureIdentifierInfo(Tok, *this,
diag::err_feature_check_malformed);
}
- AttrSyntax Syntax = IsCXX ? AttrSyntax::CXX : AttrSyntax::C;
+ AttributeCommonInfo::Syntax Syntax =
+ IsCXX ? AttributeCommonInfo::Syntax::AS_CXX11
+ : AttributeCommonInfo::Syntax::AS_C23;
return II ? hasAttribute(Syntax, ScopeII, II, getTargetInfo(),
getLangOpts())
: 0;
@@ -1730,9 +1799,9 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
// double-quotes ("").
bool Value;
if (II == Ident__has_include)
- Value = EvaluateHasInclude(Tok, II, *this);
+ Value = EvaluateHasInclude(Tok, II);
else
- Value = EvaluateHasIncludeNext(Tok, II, *this);
+ Value = EvaluateHasIncludeNext(Tok, II);
if (Tok.isNot(tok::r_paren))
return;
@@ -1740,7 +1809,7 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
Tok.setKind(tok::numeric_constant);
} else if (II == Ident__has_warning) {
// The argument should be a parenthesized string literal.
- EvaluateFeatureLikeBuiltinMacro(OS, Tok, II, *this,
+ EvaluateFeatureLikeBuiltinMacro(OS, Tok, II, *this, false,
[this](Token &Tok, bool &HasLexedNextToken) -> int {
std::string WarningName;
SourceLocation StrStartLoc = Tok.getLocation();
@@ -1771,7 +1840,7 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
// The argument to this builtin should be an identifier. The
// builtin evaluates to 1 when that identifier names the module we are
// currently building.
- EvaluateFeatureLikeBuiltinMacro(OS, Tok, II, *this,
+ EvaluateFeatureLikeBuiltinMacro(OS, Tok, II, *this, false,
[this](Token &Tok, bool &HasLexedNextToken) -> int {
IdentifierInfo *II = ExpectFeatureIdentifierInfo(Tok, *this,
diag::err_expected_id_building_module);
@@ -1806,7 +1875,8 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
if (!Tok.isAnnotation() && Tok.getIdentifierInfo())
Tok.setKind(tok::identifier);
else if (Tok.is(tok::string_literal) && !Tok.hasUDSuffix()) {
- StringLiteralParser Literal(Tok, *this);
+ StringLiteralParser Literal(Tok, *this,
+ StringLiteralEvalMethod::Unevaluated);
if (Literal.hadError)
return;
@@ -1831,32 +1901,52 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
return;
} else if (II == Ident__is_target_arch) {
EvaluateFeatureLikeBuiltinMacro(
- OS, Tok, II, *this, [this](Token &Tok, bool &HasLexedNextToken) -> int {
+ OS, Tok, II, *this, false,
+ [this](Token &Tok, bool &HasLexedNextToken) -> int {
IdentifierInfo *II = ExpectFeatureIdentifierInfo(
Tok, *this, diag::err_feature_check_malformed);
return II && isTargetArch(getTargetInfo(), II);
});
} else if (II == Ident__is_target_vendor) {
EvaluateFeatureLikeBuiltinMacro(
- OS, Tok, II, *this, [this](Token &Tok, bool &HasLexedNextToken) -> int {
+ OS, Tok, II, *this, false,
+ [this](Token &Tok, bool &HasLexedNextToken) -> int {
IdentifierInfo *II = ExpectFeatureIdentifierInfo(
Tok, *this, diag::err_feature_check_malformed);
return II && isTargetVendor(getTargetInfo(), II);
});
} else if (II == Ident__is_target_os) {
EvaluateFeatureLikeBuiltinMacro(
- OS, Tok, II, *this, [this](Token &Tok, bool &HasLexedNextToken) -> int {
+ OS, Tok, II, *this, false,
+ [this](Token &Tok, bool &HasLexedNextToken) -> int {
IdentifierInfo *II = ExpectFeatureIdentifierInfo(
Tok, *this, diag::err_feature_check_malformed);
return II && isTargetOS(getTargetInfo(), II);
});
} else if (II == Ident__is_target_environment) {
EvaluateFeatureLikeBuiltinMacro(
- OS, Tok, II, *this, [this](Token &Tok, bool &HasLexedNextToken) -> int {
+ OS, Tok, II, *this, false,
+ [this](Token &Tok, bool &HasLexedNextToken) -> int {
IdentifierInfo *II = ExpectFeatureIdentifierInfo(
Tok, *this, diag::err_feature_check_malformed);
return II && isTargetEnvironment(getTargetInfo(), II);
});
+ } else if (II == Ident__is_target_variant_os) {
+ EvaluateFeatureLikeBuiltinMacro(
+ OS, Tok, II, *this, false,
+ [this](Token &Tok, bool &HasLexedNextToken) -> int {
+ IdentifierInfo *II = ExpectFeatureIdentifierInfo(
+ Tok, *this, diag::err_feature_check_malformed);
+ return II && isTargetVariantOS(getTargetInfo(), II);
+ });
+ } else if (II == Ident__is_target_variant_environment) {
+ EvaluateFeatureLikeBuiltinMacro(
+ OS, Tok, II, *this, false,
+ [this](Token &Tok, bool &HasLexedNextToken) -> int {
+ IdentifierInfo *II = ExpectFeatureIdentifierInfo(
+ Tok, *this, diag::err_feature_check_malformed);
+ return II && isTargetVariantEnvironment(getTargetInfo(), II);
+ });
} else {
llvm_unreachable("Unknown identifier!");
}
@@ -1872,3 +1962,29 @@ void Preprocessor::markMacroAsUsed(MacroInfo *MI) {
WarnUnusedMacroLocs.erase(MI->getDefinitionLoc());
MI->setIsUsed(true);
}
+
+void Preprocessor::processPathForFileMacro(SmallVectorImpl<char> &Path,
+ const LangOptions &LangOpts,
+ const TargetInfo &TI) {
+ LangOpts.remapPathPrefix(Path);
+ if (LangOpts.UseTargetPathSeparator) {
+ if (TI.getTriple().isOSWindows())
+ llvm::sys::path::remove_dots(Path, false,
+ llvm::sys::path::Style::windows_backslash);
+ else
+ llvm::sys::path::remove_dots(Path, false, llvm::sys::path::Style::posix);
+ }
+}
+
+void Preprocessor::processPathToFileName(SmallVectorImpl<char> &FileName,
+ const PresumedLoc &PLoc,
+ const LangOptions &LangOpts,
+ const TargetInfo &TI) {
+ // Try to get the last path component, failing that return the original
+ // presumed location.
+ StringRef PLFileName = llvm::sys::path::filename(PLoc.getFilename());
+ if (PLFileName.empty())
+ PLFileName = PLoc.getFilename();
+ FileName.append(PLFileName.begin(), PLFileName.end());
+ processPathForFileMacro(FileName, LangOpts, TI);
+}
diff --git a/contrib/llvm-project/clang/lib/Lex/Pragma.cpp b/contrib/llvm-project/clang/lib/Lex/Pragma.cpp
index 27765af34fed..499813f8ab7d 100644
--- a/contrib/llvm-project/clang/lib/Lex/Pragma.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/Pragma.cpp
@@ -12,8 +12,8 @@
//===----------------------------------------------------------------------===//
#include "clang/Lex/Pragma.h"
+#include "clang/Basic/CLWarnings.h"
#include "clang/Basic/Diagnostic.h"
-#include "clang/Basic/DiagnosticLex.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
@@ -36,12 +36,10 @@
#include "clang/Lex/TokenLexer.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Timer.h"
@@ -50,6 +48,7 @@
#include <cstddef>
#include <cstdint>
#include <limits>
+#include <optional>
#include <string>
#include <utility>
#include <vector>
@@ -262,13 +261,49 @@ void Preprocessor::Handle_Pragma(Token &Tok) {
}
SourceLocation RParenLoc = Tok.getLocation();
- std::string StrVal = getSpelling(StrTok);
+ bool Invalid = false;
+ SmallString<64> StrVal;
+ StrVal.resize(StrTok.getLength());
+ StringRef StrValRef = getSpelling(StrTok, StrVal, &Invalid);
+ if (Invalid) {
+ Diag(PragmaLoc, diag::err__Pragma_malformed);
+ return;
+ }
+
+ assert(StrValRef.size() <= StrVal.size());
+
+ // If the token was spelled somewhere else, copy it.
+ if (StrValRef.begin() != StrVal.begin())
+ StrVal.assign(StrValRef);
+ // Truncate if necessary.
+ else if (StrValRef.size() != StrVal.size())
+ StrVal.resize(StrValRef.size());
+
+ // The _Pragma is lexically sound. Destringize according to C11 6.10.9.1.
+ prepare_PragmaString(StrVal);
+
+ // Plop the string (including the newline and trailing null) into a buffer
+ // where we can lex it.
+ Token TmpTok;
+ TmpTok.startToken();
+ CreateString(StrVal, TmpTok);
+ SourceLocation TokLoc = TmpTok.getLocation();
+
+ // Make and enter a lexer object so that we lex and expand the tokens just
+ // like any others.
+ Lexer *TL = Lexer::Create_PragmaLexer(TokLoc, PragmaLoc, RParenLoc,
+ StrVal.size(), *this);
+
+ EnterSourceFileWithLexer(TL, nullptr);
+
+ // With everything set up, lex this as a #pragma directive.
+ HandlePragmaDirective({PIK__Pragma, PragmaLoc});
+
+ // Finally, return whatever came after the pragma directive.
+ return Lex(Tok);
+}
- // The _Pragma is lexically sound. Destringize according to C11 6.10.9.1:
- // "The string literal is destringized by deleting any encoding prefix,
- // deleting the leading and trailing double-quotes, replacing each escape
- // sequence \" by a double-quote, and replacing each escape sequence \\ by a
- // single backslash."
+void clang::prepare_PragmaString(SmallVectorImpl<char> &StrVal) {
if (StrVal[0] == 'L' || StrVal[0] == 'U' ||
(StrVal[0] == 'u' && StrVal[1] != '8'))
StrVal.erase(StrVal.begin());
@@ -292,8 +327,8 @@ void Preprocessor::Handle_Pragma(Token &Tok) {
// Remove 'R " d-char-sequence' and 'd-char-sequence "'. We'll replace the
// parens below.
- StrVal.erase(0, 2 + NumDChars);
- StrVal.erase(StrVal.size() - 1 - NumDChars);
+ StrVal.erase(StrVal.begin(), StrVal.begin() + 2 + NumDChars);
+ StrVal.erase(StrVal.end() - 1 - NumDChars, StrVal.end());
} else {
assert(StrVal[0] == '"' && StrVal[StrVal.size()-1] == '"' &&
"Invalid string token!");
@@ -315,27 +350,7 @@ void Preprocessor::Handle_Pragma(Token &Tok) {
StrVal[0] = ' ';
// Replace the terminating quote with a \n.
- StrVal[StrVal.size()-1] = '\n';
-
- // Plop the string (including the newline and trailing null) into a buffer
- // where we can lex it.
- Token TmpTok;
- TmpTok.startToken();
- CreateString(StrVal, TmpTok);
- SourceLocation TokLoc = TmpTok.getLocation();
-
- // Make and enter a lexer object so that we lex and expand the tokens just
- // like any others.
- Lexer *TL = Lexer::Create_PragmaLexer(TokLoc, PragmaLoc, RParenLoc,
- StrVal.size(), *this);
-
- EnterSourceFileWithLexer(TL, nullptr);
-
- // With everything set up, lex this as a #pragma directive.
- HandlePragmaDirective({PIK__Pragma, PragmaLoc});
-
- // Finally, return whatever came after the pragma directive.
- return Lex(Tok);
+ StrVal[StrVal.size() - 1] = '\n';
}
/// HandleMicrosoft__pragma - Like Handle_Pragma except the pragma text
@@ -411,7 +426,7 @@ void Preprocessor::HandlePragmaOnce(Token &OnceTok) {
// Get the current file lexer we're looking at. Ignore _Pragma 'files' etc.
// Mark the file as a once-only file now.
- HeaderInfo.MarkFileIncludeOnce(getCurrentFileLexer()->getFileEntry());
+ HeaderInfo.MarkFileIncludeOnce(*getCurrentFileLexer()->getFileEntry());
}
void Preprocessor::HandlePragmaMark(Token &MarkTok) {
@@ -476,7 +491,7 @@ void Preprocessor::HandlePragmaSystemHeader(Token &SysHeaderTok) {
PreprocessorLexer *TheLexer = getCurrentFileLexer();
// Mark the file as a system header.
- HeaderInfo.MarkFileSystemHeader(TheLexer->getFileEntry());
+ HeaderInfo.MarkFileSystemHeader(*TheLexer->getFileEntry());
PresumedLoc PLoc = SourceMgr.getPresumedLoc(SysHeaderTok.getLocation());
if (PLoc.isInvalid())
@@ -497,89 +512,43 @@ void Preprocessor::HandlePragmaSystemHeader(Token &SysHeaderTok) {
SrcMgr::C_System);
}
-static llvm::Optional<Token> LexHeader(Preprocessor &PP,
- Optional<FileEntryRef> &File,
- bool SuppressIncludeNotFoundError) {
+/// HandlePragmaDependency - Handle \#pragma GCC dependency "foo" blah.
+void Preprocessor::HandlePragmaDependency(Token &DependencyTok) {
Token FilenameTok;
- if (PP.LexHeaderName(FilenameTok, /*AllowConcatenation*/ false))
- return llvm::None;
+ if (LexHeaderName(FilenameTok, /*AllowConcatenation*/false))
+ return;
// If the next token wasn't a header-name, diagnose the error.
if (FilenameTok.isNot(tok::header_name)) {
- PP.Diag(FilenameTok.getLocation(), diag::err_pp_expects_filename);
- return llvm::None;
+ Diag(FilenameTok.getLocation(), diag::err_pp_expects_filename);
+ return;
}
// Reserve a buffer to get the spelling.
SmallString<128> FilenameBuffer;
bool Invalid = false;
- StringRef Filename = PP.getSpelling(FilenameTok, FilenameBuffer, &Invalid);
+ StringRef Filename = getSpelling(FilenameTok, FilenameBuffer, &Invalid);
if (Invalid)
- return llvm::None;
+ return;
bool isAngled =
- PP.GetIncludeFilenameSpelling(FilenameTok.getLocation(), Filename);
+ GetIncludeFilenameSpelling(FilenameTok.getLocation(), Filename);
// If GetIncludeFilenameSpelling set the start ptr to null, there was an
// error.
if (Filename.empty())
- return llvm::None;
+ return;
// Search include directories for this file.
- const DirectoryLookup *CurDir;
- File = PP.LookupFile(FilenameTok.getLocation(), Filename, isAngled, nullptr,
- nullptr, CurDir, nullptr, nullptr, nullptr, nullptr,
- nullptr);
+ OptionalFileEntryRef File =
+ LookupFile(FilenameTok.getLocation(), Filename, isAngled, nullptr,
+ nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr);
if (!File) {
if (!SuppressIncludeNotFoundError)
- PP.Diag(FilenameTok, diag::err_pp_file_not_found) << Filename;
- return llvm::None;
- }
-
- return FilenameTok;
-}
-
-/// HandlePragmaIncludeInstead - Handle \#pragma clang include_instead(header).
-void Preprocessor::HandlePragmaIncludeInstead(Token &Tok) {
- // Get the current file lexer we're looking at. Ignore _Pragma 'files' etc.
- PreprocessorLexer *TheLexer = getCurrentFileLexer();
-
- if (!SourceMgr.isInSystemHeader(Tok.getLocation())) {
- Diag(Tok, diag::err_pragma_include_instead_not_sysheader);
+ Diag(FilenameTok, diag::err_pp_file_not_found) << Filename;
return;
}
- Lex(Tok);
- if (Tok.isNot(tok::l_paren)) {
- Diag(Tok, diag::err_expected) << "(";
- return;
- }
-
- Optional<FileEntryRef> File;
- llvm::Optional<Token> FilenameTok =
- LexHeader(*this, File, SuppressIncludeNotFoundError);
- if (!FilenameTok)
- return;
-
- Lex(Tok);
- if (Tok.isNot(tok::r_paren)) {
- Diag(Tok, diag::err_expected) << ")";
- return;
- }
-
- SmallString<128> FilenameBuffer;
- StringRef Filename = getSpelling(*FilenameTok, FilenameBuffer);
- HeaderInfo.AddFileAlias(TheLexer->getFileEntry(), Filename);
-}
-
-/// HandlePragmaDependency - Handle \#pragma GCC dependency "foo" blah.
-void Preprocessor::HandlePragmaDependency(Token &DependencyTok) {
- Optional<FileEntryRef> File;
- llvm::Optional<Token> FilenameTok =
- LexHeader(*this, File, SuppressIncludeNotFoundError);
- if (!FilenameTok)
- return;
-
- const FileEntry *CurFile = getCurrentFileLexer()->getFileEntry();
+ OptionalFileEntryRef CurFile = getCurrentFileLexer()->getFileEntry();
// If this file is older than the file it depends on, emit a diagnostic.
if (CurFile && CurFile->getModificationTime() < File->getModificationTime()) {
@@ -594,7 +563,7 @@ void Preprocessor::HandlePragmaDependency(Token &DependencyTok) {
// Remove the trailing ' ' if present.
if (!Message.empty())
Message.erase(Message.end()-1);
- Diag(*FilenameTok, diag::pp_out_of_date_dependency) << Message;
+ Diag(FilenameTok, diag::pp_out_of_date_dependency) << Message;
}
}
@@ -1069,18 +1038,6 @@ struct PragmaSystemHeaderHandler : public PragmaHandler {
}
};
-/// PragmaIncludeInsteadHandler - "\#pragma clang include_instead(header)" marks
-/// the current file as non-includable if the including header is not a system
-/// header.
-struct PragmaIncludeInsteadHandler : public PragmaHandler {
- PragmaIncludeInsteadHandler() : PragmaHandler("include_instead") {}
-
- void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
- Token &IIToken) override {
- PP.HandlePragmaIncludeInstead(IIToken);
- }
-};
-
struct PragmaDependencyHandler : public PragmaHandler {
PragmaDependencyHandler() : PragmaHandler("dependency") {}
@@ -1098,7 +1055,7 @@ struct PragmaDebugHandler : public PragmaHandler {
Token Tok;
PP.LexUnexpandedToken(Tok);
if (Tok.isNot(tok::identifier)) {
- PP.Diag(Tok, diag::warn_pragma_diagnostic_invalid);
+ PP.Diag(Tok, diag::warn_pragma_debug_missing_command);
return;
}
IdentifierInfo *II = Tok.getIdentifierInfo();
@@ -1120,28 +1077,19 @@ struct PragmaDebugHandler : public PragmaHandler {
PP.EnterToken(Crasher, /*IsReinject*/ false);
}
} else if (II->isStr("dump")) {
- Token Identifier;
- PP.LexUnexpandedToken(Identifier);
- if (auto *DumpII = Identifier.getIdentifierInfo()) {
- Token DumpAnnot;
- DumpAnnot.startToken();
- DumpAnnot.setKind(tok::annot_pragma_dump);
- DumpAnnot.setAnnotationRange(
- SourceRange(Tok.getLocation(), Identifier.getLocation()));
- DumpAnnot.setAnnotationValue(DumpII);
- PP.DiscardUntilEndOfDirective();
- PP.EnterToken(DumpAnnot, /*IsReinject*/false);
- } else {
- PP.Diag(Identifier, diag::warn_pragma_debug_missing_argument)
- << II->getName();
- }
+ Token DumpAnnot;
+ DumpAnnot.startToken();
+ DumpAnnot.setKind(tok::annot_pragma_dump);
+ DumpAnnot.setAnnotationRange(SourceRange(Tok.getLocation()));
+ PP.EnterToken(DumpAnnot, /*IsReinject*/false);
} else if (II->isStr("diag_mapping")) {
Token DiagName;
PP.LexUnexpandedToken(DiagName);
if (DiagName.is(tok::eod))
PP.getDiagnostics().dump();
else if (DiagName.is(tok::string_literal) && !DiagName.hasUDSuffix()) {
- StringLiteralParser Literal(DiagName, PP);
+ StringLiteralParser Literal(DiagName, PP,
+ StringLiteralEvalMethod::Unevaluated);
if (Literal.hadError)
return;
PP.getDiagnostics().dump(Literal.GetString());
@@ -1236,6 +1184,23 @@ struct PragmaDebugHandler : public PragmaHandler {
PP.Diag(Tok, diag::warn_pragma_debug_unexpected_command)
<< DumpII->getName();
}
+ } else if (II->isStr("sloc_usage")) {
+ // An optional integer literal argument specifies the number of files to
+ // specifically report information about.
+ std::optional<unsigned> MaxNotes;
+ Token ArgToken;
+ PP.Lex(ArgToken);
+ uint64_t Value;
+ if (ArgToken.is(tok::numeric_constant) &&
+ PP.parseSimpleIntegerLiteral(ArgToken, Value)) {
+ MaxNotes = Value;
+ } else if (ArgToken.isNot(tok::eod)) {
+ PP.Diag(ArgToken, diag::warn_pragma_debug_unexpected_argument);
+ }
+
+ PP.Diag(Tok, diag::remark_sloc_usage);
+ PP.getSourceManager().noteSLocAddressSpaceUsage(PP.getDiagnostics(),
+ MaxNotes);
} else {
PP.Diag(Tok, diag::warn_pragma_debug_unexpected_command)
<< II->getName();
@@ -1280,6 +1245,32 @@ struct PragmaDebugHandler : public PragmaHandler {
#endif
};
+struct PragmaUnsafeBufferUsageHandler : public PragmaHandler {
+ PragmaUnsafeBufferUsageHandler() : PragmaHandler("unsafe_buffer_usage") {}
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
+ Token &FirstToken) override {
+ Token Tok;
+
+ PP.LexUnexpandedToken(Tok);
+ if (Tok.isNot(tok::identifier)) {
+ PP.Diag(Tok, diag::err_pp_pragma_unsafe_buffer_usage_syntax);
+ return;
+ }
+
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ SourceLocation Loc = Tok.getLocation();
+
+ if (II->isStr("begin")) {
+ if (PP.enterOrExitSafeBufferOptOutRegion(true, Loc))
+ PP.Diag(Loc, diag::err_pp_double_begin_pragma_unsafe_buffer_usage);
+ } else if (II->isStr("end")) {
+ if (PP.enterOrExitSafeBufferOptOutRegion(false, Loc))
+ PP.Diag(Loc, diag::err_pp_unmatched_end_begin_pragma_unsafe_buffer_usage);
+ } else
+ PP.Diag(Tok, diag::err_pp_pragma_unsafe_buffer_usage_syntax);
+ }
+};
+
/// PragmaDiagnosticHandler - e.g. '\#pragma GCC diagnostic ignored "-Wformat"'
struct PragmaDiagnosticHandler : public PragmaHandler {
private:
@@ -1301,16 +1292,26 @@ public:
IdentifierInfo *II = Tok.getIdentifierInfo();
PPCallbacks *Callbacks = PP.getPPCallbacks();
+ // Get the next token, which is either an EOD or a string literal. We lex
+ // it now so that we can early return if the previous token was push or pop.
+ PP.LexUnexpandedToken(Tok);
+
if (II->isStr("pop")) {
if (!PP.getDiagnostics().popMappings(DiagLoc))
PP.Diag(Tok, diag::warn_pragma_diagnostic_cannot_pop);
else if (Callbacks)
Callbacks->PragmaDiagnosticPop(DiagLoc, Namespace);
+
+ if (Tok.isNot(tok::eod))
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_diagnostic_invalid_token);
return;
} else if (II->isStr("push")) {
PP.getDiagnostics().pushMappings(DiagLoc);
if (Callbacks)
Callbacks->PragmaDiagnosticPush(DiagLoc, Namespace);
+
+ if (Tok.isNot(tok::eod))
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_diagnostic_invalid_token);
return;
}
@@ -1326,9 +1327,8 @@ public:
return;
}
- PP.LexUnexpandedToken(Tok);
+ // At this point, we expect a string literal.
SourceLocation StringLoc = Tok.getLocation();
-
std::string WarningName;
if (!PP.FinishLexStringLiteral(Tok, WarningName, "pragma diagnostic",
/*AllowMacroExpansion=*/false))
@@ -1413,12 +1413,15 @@ struct PragmaWarningHandler : public PragmaHandler {
return;
}
}
+ PP.getDiagnostics().pushMappings(DiagLoc);
if (Callbacks)
Callbacks->PragmaWarningPush(DiagLoc, Level);
} else if (II && II->isStr("pop")) {
// #pragma warning( pop )
PP.Lex(Tok);
- if (Callbacks)
+ if (!PP.getDiagnostics().popMappings(DiagLoc))
+ PP.Diag(Tok, diag::warn_pragma_diagnostic_cannot_pop);
+ else if (Callbacks)
Callbacks->PragmaWarningPop(DiagLoc);
} else {
// #pragma warning( warning-specifier : warning-number-list
@@ -1432,14 +1435,19 @@ struct PragmaWarningHandler : public PragmaHandler {
// Figure out which warning specifier this is.
bool SpecifierValid;
- StringRef Specifier;
- llvm::SmallString<1> SpecifierBuf;
+ PPCallbacks::PragmaWarningSpecifier Specifier;
if (II) {
- Specifier = II->getName();
- SpecifierValid = llvm::StringSwitch<bool>(Specifier)
- .Cases("default", "disable", "error", "once",
- "suppress", true)
- .Default(false);
+ int SpecifierInt = llvm::StringSwitch<int>(II->getName())
+ .Case("default", PPCallbacks::PWS_Default)
+ .Case("disable", PPCallbacks::PWS_Disable)
+ .Case("error", PPCallbacks::PWS_Error)
+ .Case("once", PPCallbacks::PWS_Once)
+ .Case("suppress", PPCallbacks::PWS_Suppress)
+ .Default(-1);
+ if ((SpecifierValid = SpecifierInt != -1))
+ Specifier =
+ static_cast<PPCallbacks::PragmaWarningSpecifier>(SpecifierInt);
+
// If we read a correct specifier, snatch next token (that should be
// ":", checked later).
if (SpecifierValid)
@@ -1447,9 +1455,10 @@ struct PragmaWarningHandler : public PragmaHandler {
} else {
// Token is a numeric constant. It should be either 1, 2, 3 or 4.
uint64_t Value;
- Specifier = PP.getSpelling(Tok, SpecifierBuf);
if (PP.parseSimpleIntegerLiteral(Tok, Value)) {
- SpecifierValid = (Value >= 1) && (Value <= 4);
+ if ((SpecifierValid = (Value >= 1) && (Value <= 4)))
+ Specifier = static_cast<PPCallbacks::PragmaWarningSpecifier>(
+ PPCallbacks::PWS_Level1 + Value - 1);
} else
SpecifierValid = false;
// Next token already snatched by parseSimpleIntegerLiteral.
@@ -1476,6 +1485,22 @@ struct PragmaWarningHandler : public PragmaHandler {
}
Ids.push_back(int(Value));
}
+
+ // Only act on disable for now.
+ diag::Severity SV = diag::Severity();
+ if (Specifier == PPCallbacks::PWS_Disable)
+ SV = diag::Severity::Ignored;
+ if (SV != diag::Severity())
+ for (int Id : Ids) {
+ if (auto Group = diagGroupFromCLWarningID(Id)) {
+ bool unknownDiag = PP.getDiagnostics().setSeverityForGroup(
+ diag::Flavor::WarningOrError, *Group, SV, DiagLoc);
+ assert(!unknownDiag &&
+ "wd table should only contain known diags");
+ (void)unknownDiag;
+ }
+ }
+
if (Callbacks)
Callbacks->PragmaWarning(DiagLoc, Specifier, Ids);
@@ -1726,7 +1751,7 @@ struct PragmaModuleBeginHandler : public PragmaHandler {
// Find the module we're entering. We require that a module map for it
// be loaded or implicitly loadable.
auto &HSI = PP.getHeaderSearchInfo();
- Module *M = HSI.lookupModule(Current);
+ Module *M = HSI.lookupModule(Current, ModuleName.front().second);
if (!M) {
PP.Diag(ModuleName.front().second,
diag::err_pp_module_begin_no_module_map) << Current;
@@ -1744,7 +1769,7 @@ struct PragmaModuleBeginHandler : public PragmaHandler {
// If the module isn't available, it doesn't make sense to enter it.
if (Preprocessor::checkModuleIsAvailable(
- PP.getLangOpts(), PP.getTargetInfo(), PP.getDiagnostics(), M)) {
+ PP.getLangOpts(), PP.getTargetInfo(), *M, PP.getDiagnostics())) {
PP.Diag(BeginLoc, diag::note_pp_module_begin_here)
<< M->getTopLevelModuleName();
return;
@@ -1970,6 +1995,135 @@ struct PragmaRegionHandler : public PragmaHandler {
}
};
+/// "\#pragma managed"
+/// "\#pragma managed(...)"
+/// "\#pragma unmanaged"
+/// MSVC ignores this pragma when not compiling using /clr, which clang doesn't
+/// support. We parse it and ignore it to avoid -Wunknown-pragma warnings.
+struct PragmaManagedHandler : public EmptyPragmaHandler {
+ PragmaManagedHandler(const char *pragma) : EmptyPragmaHandler(pragma) {}
+};
+
+/// This handles parsing pragmas that take a macro name and optional message
+static IdentifierInfo *HandleMacroAnnotationPragma(Preprocessor &PP, Token &Tok,
+ const char *Pragma,
+ std::string &MessageString) {
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::l_paren)) {
+ PP.Diag(Tok, diag::err_expected) << "(";
+ return nullptr;
+ }
+
+ PP.LexUnexpandedToken(Tok);
+ if (!Tok.is(tok::identifier)) {
+ PP.Diag(Tok, diag::err_expected) << tok::identifier;
+ return nullptr;
+ }
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+
+ if (!II->hasMacroDefinition()) {
+ PP.Diag(Tok, diag::err_pp_visibility_non_macro) << II;
+ return nullptr;
+ }
+
+ PP.Lex(Tok);
+ if (Tok.is(tok::comma)) {
+ PP.Lex(Tok);
+ if (!PP.FinishLexStringLiteral(Tok, MessageString, Pragma,
+ /*AllowMacroExpansion=*/true))
+ return nullptr;
+ }
+
+ if (Tok.isNot(tok::r_paren)) {
+ PP.Diag(Tok, diag::err_expected) << ")";
+ return nullptr;
+ }
+ return II;
+}
+
+/// "\#pragma clang deprecated(...)"
+///
+/// The syntax is
+/// \code
+/// #pragma clang deprecate(MACRO_NAME [, Message])
+/// \endcode
+struct PragmaDeprecatedHandler : public PragmaHandler {
+ PragmaDeprecatedHandler() : PragmaHandler("deprecated") {}
+
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
+ Token &Tok) override {
+ std::string MessageString;
+
+ if (IdentifierInfo *II = HandleMacroAnnotationPragma(
+ PP, Tok, "#pragma clang deprecated", MessageString)) {
+ II->setIsDeprecatedMacro(true);
+ PP.addMacroDeprecationMsg(II, std::move(MessageString),
+ Tok.getLocation());
+ }
+ }
+};
+
+/// "\#pragma clang restrict_expansion(...)"
+///
+/// The syntax is
+/// \code
+/// #pragma clang restrict_expansion(MACRO_NAME [, Message])
+/// \endcode
+struct PragmaRestrictExpansionHandler : public PragmaHandler {
+ PragmaRestrictExpansionHandler() : PragmaHandler("restrict_expansion") {}
+
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
+ Token &Tok) override {
+ std::string MessageString;
+
+ if (IdentifierInfo *II = HandleMacroAnnotationPragma(
+ PP, Tok, "#pragma clang restrict_expansion", MessageString)) {
+ II->setIsRestrictExpansion(true);
+ PP.addRestrictExpansionMsg(II, std::move(MessageString),
+ Tok.getLocation());
+ }
+ }
+};
+
+/// "\#pragma clang final(...)"
+///
+/// The syntax is
+/// \code
+/// #pragma clang final(MACRO_NAME)
+/// \endcode
+struct PragmaFinalHandler : public PragmaHandler {
+ PragmaFinalHandler() : PragmaHandler("final") {}
+
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
+ Token &Tok) override {
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::l_paren)) {
+ PP.Diag(Tok, diag::err_expected) << "(";
+ return;
+ }
+
+ PP.LexUnexpandedToken(Tok);
+ if (!Tok.is(tok::identifier)) {
+ PP.Diag(Tok, diag::err_expected) << tok::identifier;
+ return;
+ }
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+
+ if (!II->hasMacroDefinition()) {
+ PP.Diag(Tok, diag::err_pp_visibility_non_macro) << II;
+ return;
+ }
+
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::r_paren)) {
+ PP.Diag(Tok, diag::err_expected) << ")";
+ return;
+ }
+ II->setIsFinal(true);
+ PP.addFinalLoc(II, Tok.getLocation());
+ }
+};
+
} // namespace
/// RegisterBuiltinPragmas - Install the standard preprocessor pragmas:
@@ -1993,12 +2147,14 @@ void Preprocessor::RegisterBuiltinPragmas() {
// #pragma clang ...
AddPragmaHandler("clang", new PragmaPoisonHandler());
AddPragmaHandler("clang", new PragmaSystemHeaderHandler());
- AddPragmaHandler("clang", new PragmaIncludeInsteadHandler());
AddPragmaHandler("clang", new PragmaDebugHandler());
AddPragmaHandler("clang", new PragmaDependencyHandler());
AddPragmaHandler("clang", new PragmaDiagnosticHandler("clang"));
AddPragmaHandler("clang", new PragmaARCCFCodeAuditedHandler());
AddPragmaHandler("clang", new PragmaAssumeNonNullHandler());
+ AddPragmaHandler("clang", new PragmaDeprecatedHandler());
+ AddPragmaHandler("clang", new PragmaRestrictExpansionHandler());
+ AddPragmaHandler("clang", new PragmaFinalHandler());
// #pragma clang module ...
auto *ModuleHandler = new PragmaNamespace("module");
@@ -2009,6 +2165,9 @@ void Preprocessor::RegisterBuiltinPragmas() {
ModuleHandler->AddPragma(new PragmaModuleBuildHandler());
ModuleHandler->AddPragma(new PragmaModuleLoadHandler());
+ // Safe Buffers pragmas
+ AddPragmaHandler("clang", new PragmaUnsafeBufferUsageHandler);
+
// Add region pragmas.
AddPragmaHandler(new PragmaRegionHandler("region"));
AddPragmaHandler(new PragmaRegionHandler("endregion"));
@@ -2020,6 +2179,8 @@ void Preprocessor::RegisterBuiltinPragmas() {
AddPragmaHandler(new PragmaIncludeAliasHandler());
AddPragmaHandler(new PragmaHdrstopHandler());
AddPragmaHandler(new PragmaSystemHeaderHandler());
+ AddPragmaHandler(new PragmaManagedHandler("managed"));
+ AddPragmaHandler(new PragmaManagedHandler("unmanaged"));
}
// Pragmas added by plugins
diff --git a/contrib/llvm-project/clang/lib/Lex/PreprocessingRecord.cpp b/contrib/llvm-project/clang/lib/Lex/PreprocessingRecord.cpp
index ed59dbdf018d..aab6a2bed89d 100644
--- a/contrib/llvm-project/clang/lib/Lex/PreprocessingRecord.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/PreprocessingRecord.cpp
@@ -20,7 +20,6 @@
#include "clang/Lex/MacroInfo.h"
#include "clang/Lex/Token.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Capacity.h"
@@ -31,6 +30,7 @@
#include <cstddef>
#include <cstring>
#include <iterator>
+#include <optional>
#include <utility>
#include <vector>
@@ -42,7 +42,8 @@ ExternalPreprocessingRecordSource::~ExternalPreprocessingRecordSource() =
InclusionDirective::InclusionDirective(PreprocessingRecord &PPRec,
InclusionKind Kind, StringRef FileName,
bool InQuotes, bool ImportedModule,
- const FileEntry *File, SourceRange Range)
+ OptionalFileEntryRef File,
+ SourceRange Range)
: PreprocessingDirective(InclusionDirectiveKind, Range), InQuotes(InQuotes),
Kind(Kind), ImportedModule(ImportedModule), File(File) {
char *Memory = (char *)PPRec.Allocate(FileName.size() + 1, alignof(char));
@@ -111,10 +112,9 @@ bool PreprocessingRecord::isEntityInFileID(iterator PPEI, FileID FID) {
// See if the external source can see if the entity is in the file without
// deserializing it.
- Optional<bool> IsInFile =
- ExternalSource->isPreprocessedEntityInFileID(LoadedIndex, FID);
- if (IsInFile.hasValue())
- return IsInFile.getValue();
+ if (std::optional<bool> IsInFile =
+ ExternalSource->isPreprocessedEntityInFileID(LoadedIndex, FID))
+ return *IsInFile;
// The external source did not provide a definite answer, go and deserialize
// the entity to check it.
@@ -381,12 +381,7 @@ PreprocessingRecord::getLoadedPreprocessedEntity(unsigned Index) {
MacroDefinitionRecord *
PreprocessingRecord::findMacroDefinition(const MacroInfo *MI) {
- llvm::DenseMap<const MacroInfo *, MacroDefinitionRecord *>::iterator Pos =
- MacroDefinitions.find(MI);
- if (Pos == MacroDefinitions.end())
- return nullptr;
-
- return Pos->second;
+ return MacroDefinitions.lookup(MI);
}
void PreprocessingRecord::addMacroExpansion(const Token &Id,
@@ -475,15 +470,9 @@ void PreprocessingRecord::MacroUndefined(const Token &Id,
}
void PreprocessingRecord::InclusionDirective(
- SourceLocation HashLoc,
- const Token &IncludeTok,
- StringRef FileName,
- bool IsAngled,
- CharSourceRange FilenameRange,
- const FileEntry *File,
- StringRef SearchPath,
- StringRef RelativePath,
- const Module *Imported,
+ SourceLocation HashLoc, const Token &IncludeTok, StringRef FileName,
+ bool IsAngled, CharSourceRange FilenameRange, OptionalFileEntryRef File,
+ StringRef SearchPath, StringRef RelativePath, const Module *Imported,
SrcMgr::CharacteristicKind FileType) {
InclusionDirective::InclusionKind Kind = InclusionDirective::Include;
diff --git a/contrib/llvm-project/clang/lib/Lex/Preprocessor.cpp b/contrib/llvm-project/clang/lib/Lex/Preprocessor.cpp
index e376fff90432..7fdb5d4c0d7b 100644
--- a/contrib/llvm-project/clang/lib/Lex/Preprocessor.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/Preprocessor.cpp
@@ -58,7 +58,6 @@
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/Capacity.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MemoryBuffer.h"
@@ -66,6 +65,7 @@
#include <algorithm>
#include <cassert>
#include <memory>
+#include <optional>
#include <string>
#include <utility>
#include <vector>
@@ -77,7 +77,7 @@ LLVM_INSTANTIATE_REGISTRY(PragmaHandlerRegistry)
ExternalPreprocessorSource::~ExternalPreprocessorSource() = default;
Preprocessor::Preprocessor(std::shared_ptr<PreprocessorOptions> PPOpts,
- DiagnosticsEngine &diags, LangOptions &opts,
+ DiagnosticsEngine &diags, const LangOptions &opts,
SourceManager &SM, HeaderSearch &Headers,
ModuleLoader &TheModuleLoader,
IdentifierInfoLookup *IILookup, bool OwnsHeaders,
@@ -146,6 +146,10 @@ Preprocessor::Preprocessor(std::shared_ptr<PreprocessorOptions> PPOpts,
Ident_AbnormalTermination = nullptr;
}
+ // Default incremental processing to -fincremental-extensions, clients can
+ // override with `enableIncrementalProcessing` if desired.
+ IncrementalProcessing = LangOpts.IncrementalExtensions;
+
// If using a PCH where a #pragma hdrstop is expected, start skipping tokens.
if (usingPCHWithPragmaHdrStop())
SkippingUntilPragmaHdrStop = true;
@@ -158,11 +162,6 @@ Preprocessor::Preprocessor(std::shared_ptr<PreprocessorOptions> PPOpts,
if (this->PPOpts->GeneratePreamble)
PreambleConditionalStack.startRecording();
- ExcludedConditionalDirectiveSkipMappings =
- this->PPOpts->ExcludedConditionalDirectiveSkipMappings;
- if (ExcludedConditionalDirectiveSkipMappings)
- ExcludedConditionalDirectiveSkipMappings->clear();
-
MaxTokens = LangOpts.MaxTokens;
}
@@ -171,12 +170,6 @@ Preprocessor::~Preprocessor() {
IncludeMacroStack.clear();
- // Destroy any macro definitions.
- while (MacroInfoChain *I = MIChainHead) {
- MIChainHead = I->Next;
- I->~MacroInfoChain();
- }
-
// Free any cached macro expanders.
// This populates MacroArgCache, so all TokenLexers need to be destroyed
// before the code below that frees up the MacroArgCache list.
@@ -208,6 +201,16 @@ void Preprocessor::Initialize(const TargetInfo &Target,
// Populate the identifier table with info about keywords for the current language.
Identifiers.AddKeywords(LangOpts);
+
+ // Initialize the __FTL_EVAL_METHOD__ macro to the TargetInfo.
+ setTUFPEvalMethod(getTargetInfo().getFPEvalMethod());
+
+ if (getLangOpts().getFPEvalMethod() == LangOptions::FEM_UnsetOnCommandLine)
+ // Use setting from TargetInfo.
+ setCurrentFPEvalMethod(SourceLocation(), Target.getFPEvalMethod());
+ else
+ // Set initial value of __FLT_EVAL_METHOD__ from the command line.
+ setCurrentFPEvalMethod(SourceLocation(), getLangOpts().getFPEvalMethod());
}
void Preprocessor::InitializeForModelFile() {
@@ -229,8 +232,10 @@ void Preprocessor::FinalizeForModelFile() {
}
void Preprocessor::DumpToken(const Token &Tok, bool DumpFlags) const {
- llvm::errs() << tok::getTokenName(Tok.getKind()) << " '"
- << getSpelling(Tok) << "'";
+ llvm::errs() << tok::getTokenName(Tok.getKind());
+
+ if (!Tok.isAnnotation())
+ llvm::errs() << " '" << getSpelling(Tok) << "'";
if (!DumpFlags) return;
@@ -377,22 +382,23 @@ StringRef Preprocessor::getLastMacroWithSpelling(
void Preprocessor::recomputeCurLexerKind() {
if (CurLexer)
- CurLexerKind = CLK_Lexer;
+ CurLexerCallback = CurLexer->isDependencyDirectivesLexer()
+ ? CLK_DependencyDirectivesLexer
+ : CLK_Lexer;
else if (CurTokenLexer)
- CurLexerKind = CLK_TokenLexer;
+ CurLexerCallback = CLK_TokenLexer;
else
- CurLexerKind = CLK_CachingLexer;
+ CurLexerCallback = CLK_CachingLexer;
}
-bool Preprocessor::SetCodeCompletionPoint(const FileEntry *File,
+bool Preprocessor::SetCodeCompletionPoint(FileEntryRef File,
unsigned CompleteLine,
unsigned CompleteColumn) {
- assert(File);
assert(CompleteLine && CompleteColumn && "Starts from 1:1");
assert(!CodeCompletionFile && "Already set");
// Load the actual file's contents.
- Optional<llvm::MemoryBufferRef> Buffer =
+ std::optional<llvm::MemoryBufferRef> Buffer =
SourceMgr.getMemoryBufferForFileOrNone(File);
if (!Buffer)
return true;
@@ -521,6 +527,13 @@ Module *Preprocessor::getCurrentModule() {
return getHeaderSearchInfo().lookupModule(getLangOpts().CurrentModule);
}
+Module *Preprocessor::getCurrentModuleImplementation() {
+ if (!getLangOpts().isCompilingModuleImplementation())
+ return nullptr;
+
+ return getHeaderSearchInfo().lookupModule(getLangOpts().ModuleName);
+}
+
//===----------------------------------------------------------------------===//
// Preprocessor Initialization Methods
//===----------------------------------------------------------------------===//
@@ -548,8 +561,8 @@ void Preprocessor::EnterMainSourceFile() {
// Tell the header info that the main file was entered. If the file is later
// #imported, it won't be re-entered.
- if (const FileEntry *FE = SourceMgr.getFileEntryForID(MainFileID))
- HeaderInfo.IncrementIncludeCount(FE);
+ if (OptionalFileEntryRef FE = SourceMgr.getFileEntryRefForID(MainFileID))
+ markIncluded(*FE);
}
// Preprocess Predefines to populate the initial preprocessor state.
@@ -566,11 +579,10 @@ void Preprocessor::EnterMainSourceFile() {
if (!PPOpts->PCHThroughHeader.empty()) {
// Lookup and save the FileID for the through header. If it isn't found
// in the search path, it's a fatal error.
- const DirectoryLookup *CurDir;
- Optional<FileEntryRef> File = LookupFile(
+ OptionalFileEntryRef File = LookupFile(
SourceLocation(), PPOpts->PCHThroughHeader,
- /*isAngled=*/false, /*FromDir=*/nullptr, /*FromFile=*/nullptr, CurDir,
- /*SearchPath=*/nullptr, /*RelativePath=*/nullptr,
+ /*isAngled=*/false, /*FromDir=*/nullptr, /*FromFile=*/nullptr,
+ /*CurDir=*/nullptr, /*SearchPath=*/nullptr, /*RelativePath=*/nullptr,
/*SuggestedModule=*/nullptr, /*IsMapped=*/nullptr,
/*IsFrameworkFound=*/nullptr);
if (!File) {
@@ -631,20 +643,7 @@ void Preprocessor::SkipTokensWhileUsingPCH() {
while (true) {
bool InPredefines =
(CurLexer && CurLexer->getFileID() == getPredefinesFileID());
- switch (CurLexerKind) {
- case CLK_Lexer:
- CurLexer->Lex(Tok);
- break;
- case CLK_TokenLexer:
- CurTokenLexer->Lex(Tok);
- break;
- case CLK_CachingLexer:
- CachingLex(Tok);
- break;
- case CLK_LexAfterModuleImport:
- LexAfterModuleImport(Tok);
- break;
- }
+ CurLexerCallback(*this, Tok);
if (Tok.is(tok::eof) && !InPredefines) {
ReachedMainFileEOF = true;
break;
@@ -716,12 +715,14 @@ IdentifierInfo *Preprocessor::LookUpIdentifierInfo(Token &Identifier) const {
}
// Update the token info (identifier info and appropriate token kind).
+ // FIXME: the raw_identifier may contain leading whitespace which is removed
+ // from the cleaned identifier token. The SourceLocation should be updated to
+ // refer to the non-whitespace character. For instance, the text "\\\nB" (a
+ // line continuation before 'B') is parsed as a single tok::raw_identifier and
+ // is cleaned to tok::identifier "B". After cleaning the token's length is
+ // still 3 and the SourceLocation refers to the location of the backslash.
Identifier.setIdentifierInfo(II);
- if (getLangOpts().MSVCCompat && II->isCPlusPlusOperatorKeyword() &&
- getSourceManager().isInSystemHeader(Identifier.getLocation()))
- Identifier.setKind(tok::identifier);
- else
- Identifier.setKind(II->getTokenID());
+ Identifier.setKind(II->getTokenID());
return II;
}
@@ -755,29 +756,6 @@ void Preprocessor::HandlePoisonedIdentifier(Token & Identifier) {
Diag(Identifier,it->second) << Identifier.getIdentifierInfo();
}
-/// Returns a diagnostic message kind for reporting a future keyword as
-/// appropriate for the identifier and specified language.
-static diag::kind getFutureCompatDiagKind(const IdentifierInfo &II,
- const LangOptions &LangOpts) {
- assert(II.isFutureCompatKeyword() && "diagnostic should not be needed");
-
- if (LangOpts.CPlusPlus)
- return llvm::StringSwitch<diag::kind>(II.getName())
-#define CXX11_KEYWORD(NAME, FLAGS) \
- .Case(#NAME, diag::warn_cxx11_keyword)
-#define CXX20_KEYWORD(NAME, FLAGS) \
- .Case(#NAME, diag::warn_cxx20_keyword)
-#include "clang/Basic/TokenKinds.def"
- // char8_t is not modeled as a CXX20_KEYWORD because it's not
- // unconditionally enabled in C++20 mode. (It can be disabled
- // by -fno-char8_t.)
- .Case("char8_t", diag::warn_cxx20_keyword)
- ;
-
- llvm_unreachable(
- "Keyword not known to come from a newer Standard or proposed Standard");
-}
-
void Preprocessor::updateOutOfDateIdentifier(IdentifierInfo &II) const {
assert(II.isOutOfDate() && "not out of date");
getExternalSource()->updateOutOfDateIdentifier(II);
@@ -823,8 +801,8 @@ bool Preprocessor::HandleIdentifier(Token &Identifier) {
}
// If this is a macro to be expanded, do it.
- if (MacroDefinition MD = getMacroDefinition(&II)) {
- auto *MI = MD.getMacroInfo();
+ if (const MacroDefinition MD = getMacroDefinition(&II)) {
+ const auto *MI = MD.getMacroInfo();
assert(MI && "macro definition with no macro info?");
if (!DisableMacroExpansion) {
if (!Identifier.isExpandDisabled() && MI->isEnabled()) {
@@ -849,7 +827,7 @@ bool Preprocessor::HandleIdentifier(Token &Identifier) {
// FIXME: This warning is disabled in cases where it shouldn't be, like
// "#define constexpr constexpr", "int constexpr;"
if (II.isFutureCompatKeyword() && !DisableMacroExpansion) {
- Diag(Identifier, getFutureCompatDiagKind(II, getLangOpts()))
+ Diag(Identifier, getIdentifierTable().getFutureCompatDiagKind(II, getLangOpts()))
<< II.getName();
// Don't diagnose this keyword again in this translation unit.
II.setIsFutureCompatKeyword(false);
@@ -869,16 +847,17 @@ bool Preprocessor::HandleIdentifier(Token &Identifier) {
// keyword when we're in a caching lexer, because caching lexers only get
// used in contexts where import declarations are disallowed.
//
- // Likewise if this is the C++ Modules TS import keyword.
+ // Likewise if this is the standard C++ import keyword.
if (((LastTokenWasAt && II.isModulesImport()) ||
Identifier.is(tok::kw_import)) &&
!InMacroArgs && !DisableMacroExpansion &&
(getLangOpts().Modules || getLangOpts().DebuggerSupport) &&
- CurLexerKind != CLK_CachingLexer) {
+ CurLexerCallback != CLK_CachingLexer) {
ModuleImportLoc = Identifier.getLocation();
- ModuleImportPath.clear();
+ NamedModuleImportPath.clear();
+ IsAtImport = true;
ModuleImportExpectsIdentifier = true;
- CurLexerKind = CLK_LexAfterModuleImport;
+ CurLexerCallback = CLK_LexAfterModuleImport;
}
return true;
}
@@ -887,24 +866,8 @@ void Preprocessor::Lex(Token &Result) {
++LexLevel;
// We loop here until a lex function returns a token; this avoids recursion.
- bool ReturnedToken;
- do {
- switch (CurLexerKind) {
- case CLK_Lexer:
- ReturnedToken = CurLexer->Lex(Result);
- break;
- case CLK_TokenLexer:
- ReturnedToken = CurTokenLexer->Lex(Result);
- break;
- case CLK_CachingLexer:
- CachingLex(Result);
- ReturnedToken = true;
- break;
- case CLK_LexAfterModuleImport:
- ReturnedToken = LexAfterModuleImport(Result);
- break;
- }
- } while (!ReturnedToken);
+ while (!CurLexerCallback(*this, Result))
+ ;
if (Result.is(tok::unknown) && TheModuleLoader.HadFatalFailure)
return;
@@ -918,44 +881,75 @@ void Preprocessor::Lex(Token &Result) {
Result.setIdentifierInfo(nullptr);
}
- // Update ImportSeqState to track our position within a C++20 import-seq
+ // Update StdCXXImportSeqState to track our position within a C++20 import-seq
// if this token is being produced as a result of phase 4 of translation.
+ // Update TrackGMFState to decide if we are currently in a Global Module
+ // Fragment. GMF state updates should precede StdCXXImportSeq ones, since GMF state
+ // depends on the prevailing StdCXXImportSeq state in two cases.
if (getLangOpts().CPlusPlusModules && LexLevel == 1 &&
!Result.getFlag(Token::IsReinjected)) {
switch (Result.getKind()) {
case tok::l_paren: case tok::l_square: case tok::l_brace:
- ImportSeqState.handleOpenBracket();
+ StdCXXImportSeqState.handleOpenBracket();
break;
case tok::r_paren: case tok::r_square:
- ImportSeqState.handleCloseBracket();
+ StdCXXImportSeqState.handleCloseBracket();
break;
case tok::r_brace:
- ImportSeqState.handleCloseBrace();
+ StdCXXImportSeqState.handleCloseBrace();
break;
+ // This token is injected to represent the translation of '#include "a.h"'
+ // into "import a.h;". Mimic the notional ';'.
+ case tok::annot_module_include:
case tok::semi:
- ImportSeqState.handleSemi();
+ TrackGMFState.handleSemi();
+ StdCXXImportSeqState.handleSemi();
+ ModuleDeclState.handleSemi();
break;
case tok::header_name:
case tok::annot_header_unit:
- ImportSeqState.handleHeaderName();
+ StdCXXImportSeqState.handleHeaderName();
break;
case tok::kw_export:
- ImportSeqState.handleExport();
+ TrackGMFState.handleExport();
+ StdCXXImportSeqState.handleExport();
+ ModuleDeclState.handleExport();
+ break;
+ case tok::colon:
+ ModuleDeclState.handleColon();
+ break;
+ case tok::period:
+ ModuleDeclState.handlePeriod();
break;
case tok::identifier:
- if (Result.getIdentifierInfo()->isModulesImport()) {
- ImportSeqState.handleImport();
- if (ImportSeqState.afterImportSeq()) {
- ModuleImportLoc = Result.getLocation();
- ModuleImportPath.clear();
- ModuleImportExpectsIdentifier = true;
- CurLexerKind = CLK_LexAfterModuleImport;
+ // Check "import" and "module" when there is no open bracket. The two
+ // identifiers are not meaningful with open brackets.
+ if (StdCXXImportSeqState.atTopLevel()) {
+ if (Result.getIdentifierInfo()->isModulesImport()) {
+ TrackGMFState.handleImport(StdCXXImportSeqState.afterTopLevelSeq());
+ StdCXXImportSeqState.handleImport();
+ if (StdCXXImportSeqState.afterImportSeq()) {
+ ModuleImportLoc = Result.getLocation();
+ NamedModuleImportPath.clear();
+ IsAtImport = false;
+ ModuleImportExpectsIdentifier = true;
+ CurLexerCallback = CLK_LexAfterModuleImport;
+ }
+ break;
+ } else if (Result.getIdentifierInfo() == getIdentifierInfo("module")) {
+ TrackGMFState.handleModule(StdCXXImportSeqState.afterTopLevelSeq());
+ ModuleDeclState.handleModule();
+ break;
}
- break;
}
- LLVM_FALLTHROUGH;
+ ModuleDeclState.handleIdentifier(Result.getIdentifierInfo());
+ if (ModuleDeclState.isModuleCandidate())
+ break;
+ [[fallthrough]];
default:
- ImportSeqState.handleMisc();
+ TrackGMFState.handleMisc();
+ StdCXXImportSeqState.handleMisc();
+ ModuleDeclState.handleMisc();
break;
}
}
@@ -972,6 +966,18 @@ void Preprocessor::Lex(Token &Result) {
}
}
+void Preprocessor::LexTokensUntilEOF(std::vector<Token> *Tokens) {
+ while (1) {
+ Token Tok;
+ Lex(Tok);
+ if (Tok.isOneOf(tok::unknown, tok::eof, tok::eod,
+ tok::annot_repl_input_end))
+ break;
+ if (Tokens != nullptr)
+ Tokens->push_back(Tok);
+ }
+}
+
/// Lex a header-name token (including one formed from header-name-tokens if
/// \p AllowConcatenation is \c true).
///
@@ -1136,9 +1142,18 @@ bool Preprocessor::LexAfterModuleImport(Token &Result) {
// For now, we only support header-name imports in C++20 mode.
// FIXME: Should we allow this in all language modes that support an import
// declaration as an extension?
- if (ModuleImportPath.empty() && getLangOpts().CPlusPlusModules) {
+ if (NamedModuleImportPath.empty() && getLangOpts().CPlusPlusModules) {
if (LexHeaderName(Result))
return true;
+
+ if (Result.is(tok::colon) && ModuleDeclState.isNamedModule()) {
+ std::string Name = ModuleDeclState.getPrimaryName().str();
+ Name += ":";
+ NamedModuleImportPath.push_back(
+ {getIdentifierInfo(Name), Result.getLocation()});
+ CurLexerCallback = CLK_LexAfterModuleImport;
+ return true;
+ }
} else {
Lex(Result);
}
@@ -1152,9 +1167,10 @@ bool Preprocessor::LexAfterModuleImport(Token &Result) {
/*DisableMacroExpansion*/ true, /*IsReinject*/ false);
};
+ bool ImportingHeader = Result.is(tok::header_name);
// Check for a header-name.
SmallVector<Token, 32> Suffix;
- if (Result.is(tok::header_name)) {
+ if (ImportingHeader) {
// Enter the header-name token into the token stream; a Lex action cannot
// both return a token and cache tokens (doing so would corrupt the token
// cache if the call to Lex comes from CachingLex / PeekAhead).
@@ -1198,9 +1214,10 @@ bool Preprocessor::LexAfterModuleImport(Token &Result) {
Suffix.back().setLocation(SemiLoc);
Suffix.back().setAnnotationEndLoc(SemiLoc);
Suffix.back().setAnnotationValue(Action.ModuleForHeader);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ImportAction::ModuleImport:
+ case ImportAction::HeaderUnitImport:
case ImportAction::SkippedModuleImport:
// We chose to import (or textually enter) the file. Convert the
// header-name token into a header unit annotation token.
@@ -1231,24 +1248,24 @@ bool Preprocessor::LexAfterModuleImport(Token &Result) {
if (ModuleImportExpectsIdentifier && Result.getKind() == tok::identifier) {
// We expected to see an identifier here, and we did; continue handling
// identifiers.
- ModuleImportPath.push_back(std::make_pair(Result.getIdentifierInfo(),
- Result.getLocation()));
+ NamedModuleImportPath.push_back(
+ std::make_pair(Result.getIdentifierInfo(), Result.getLocation()));
ModuleImportExpectsIdentifier = false;
- CurLexerKind = CLK_LexAfterModuleImport;
+ CurLexerCallback = CLK_LexAfterModuleImport;
return true;
}
// If we're expecting a '.' or a ';', and we got a '.', then wait until we
// see the next identifier. (We can also see a '[[' that begins an
- // attribute-specifier-seq here under the C++ Modules TS.)
+ // attribute-specifier-seq here under the Standard C++ Modules.)
if (!ModuleImportExpectsIdentifier && Result.getKind() == tok::period) {
ModuleImportExpectsIdentifier = true;
- CurLexerKind = CLK_LexAfterModuleImport;
+ CurLexerCallback = CLK_LexAfterModuleImport;
return true;
}
// If we didn't recognize a module name at all, this is not a (valid) import.
- if (ModuleImportPath.empty() || Result.is(tok::eof))
+ if (NamedModuleImportPath.empty() || Result.is(tok::eof))
return true;
// Consume the pp-import-suffix and expand any macros in it now, if we're not
@@ -1265,34 +1282,37 @@ bool Preprocessor::LexAfterModuleImport(Token &Result) {
SemiLoc = Suffix.back().getLocation();
}
- // Under the Modules TS, the dot is just part of the module name, and not
- // a real hierarchy separator. Flatten such module names now.
+ // Under the standard C++ Modules, the dot is just part of the module name,
+ // and not a real hierarchy separator. Flatten such module names now.
//
// FIXME: Is this the right level to be performing this transformation?
std::string FlatModuleName;
- if (getLangOpts().ModulesTS || getLangOpts().CPlusPlusModules) {
- for (auto &Piece : ModuleImportPath) {
- if (!FlatModuleName.empty())
+ if (getLangOpts().CPlusPlusModules) {
+ for (auto &Piece : NamedModuleImportPath) {
+ // If the FlatModuleName ends with colon, it implies it is a partition.
+ if (!FlatModuleName.empty() && FlatModuleName.back() != ':')
FlatModuleName += ".";
FlatModuleName += Piece.first->getName();
}
- SourceLocation FirstPathLoc = ModuleImportPath[0].second;
- ModuleImportPath.clear();
- ModuleImportPath.push_back(
+ SourceLocation FirstPathLoc = NamedModuleImportPath[0].second;
+ NamedModuleImportPath.clear();
+ NamedModuleImportPath.push_back(
std::make_pair(getIdentifierInfo(FlatModuleName), FirstPathLoc));
}
Module *Imported = nullptr;
- if (getLangOpts().Modules) {
+ // We don't/shouldn't load the standard c++20 modules when preprocessing.
+ if (getLangOpts().Modules && !isInImportingCXXNamedModules()) {
Imported = TheModuleLoader.loadModule(ModuleImportLoc,
- ModuleImportPath,
+ NamedModuleImportPath,
Module::Hidden,
/*IsInclusionDirective=*/false);
if (Imported)
makeModuleVisible(Imported, SemiLoc);
}
+
if (Callbacks)
- Callbacks->moduleImport(ModuleImportLoc, ModuleImportPath, Imported);
+ Callbacks->moduleImport(ModuleImportLoc, NamedModuleImportPath, Imported);
if (!Suffix.empty()) {
EnterTokens(Suffix);
@@ -1344,7 +1364,7 @@ bool Preprocessor::FinishLexStringLiteral(Token &Result, std::string &String,
// Concatenate and parse the strings.
StringLiteralParser Literal(StrToks, *this);
- assert(Literal.isAscii() && "Didn't allow wide strings in");
+ assert(Literal.isOrdinary() && "Didn't allow wide strings in");
if (Literal.hadError)
return false;
@@ -1381,7 +1401,7 @@ bool Preprocessor::parseSimpleIntegerLiteral(Token &Tok, uint64_t &Value) {
void Preprocessor::addCommentHandler(CommentHandler *Handler) {
assert(Handler && "NULL comment handler");
- assert(llvm::find(CommentHandlers, Handler) == CommentHandlers.end() &&
+ assert(!llvm::is_contained(CommentHandlers, Handler) &&
"Comment handler already registered");
CommentHandlers.push_back(Handler);
}
@@ -1407,6 +1427,122 @@ bool Preprocessor::HandleComment(Token &result, SourceRange Comment) {
return true;
}
+void Preprocessor::emitMacroDeprecationWarning(const Token &Identifier) const {
+ const MacroAnnotations &A =
+ getMacroAnnotations(Identifier.getIdentifierInfo());
+ assert(A.DeprecationInfo &&
+ "Macro deprecation warning without recorded annotation!");
+ const MacroAnnotationInfo &Info = *A.DeprecationInfo;
+ if (Info.Message.empty())
+ Diag(Identifier, diag::warn_pragma_deprecated_macro_use)
+ << Identifier.getIdentifierInfo() << 0;
+ else
+ Diag(Identifier, diag::warn_pragma_deprecated_macro_use)
+ << Identifier.getIdentifierInfo() << 1 << Info.Message;
+ Diag(Info.Location, diag::note_pp_macro_annotation) << 0;
+}
+
+void Preprocessor::emitRestrictExpansionWarning(const Token &Identifier) const {
+ const MacroAnnotations &A =
+ getMacroAnnotations(Identifier.getIdentifierInfo());
+ assert(A.RestrictExpansionInfo &&
+ "Macro restricted expansion warning without recorded annotation!");
+ const MacroAnnotationInfo &Info = *A.RestrictExpansionInfo;
+ if (Info.Message.empty())
+ Diag(Identifier, diag::warn_pragma_restrict_expansion_macro_use)
+ << Identifier.getIdentifierInfo() << 0;
+ else
+ Diag(Identifier, diag::warn_pragma_restrict_expansion_macro_use)
+ << Identifier.getIdentifierInfo() << 1 << Info.Message;
+ Diag(Info.Location, diag::note_pp_macro_annotation) << 1;
+}
+
+void Preprocessor::emitRestrictInfNaNWarning(const Token &Identifier,
+ unsigned DiagSelection) const {
+ Diag(Identifier, diag::warn_fp_nan_inf_when_disabled) << DiagSelection << 1;
+}
+
+void Preprocessor::emitFinalMacroWarning(const Token &Identifier,
+ bool IsUndef) const {
+ const MacroAnnotations &A =
+ getMacroAnnotations(Identifier.getIdentifierInfo());
+ assert(A.FinalAnnotationLoc &&
+ "Final macro warning without recorded annotation!");
+
+ Diag(Identifier, diag::warn_pragma_final_macro)
+ << Identifier.getIdentifierInfo() << (IsUndef ? 0 : 1);
+ Diag(*A.FinalAnnotationLoc, diag::note_pp_macro_annotation) << 2;
+}
+
+bool Preprocessor::isSafeBufferOptOut(const SourceManager &SourceMgr,
+ const SourceLocation &Loc) const {
+ // Try to find a region in `SafeBufferOptOutMap` where `Loc` is in:
+ auto FirstRegionEndingAfterLoc = llvm::partition_point(
+ SafeBufferOptOutMap,
+ [&SourceMgr,
+ &Loc](const std::pair<SourceLocation, SourceLocation> &Region) {
+ return SourceMgr.isBeforeInTranslationUnit(Region.second, Loc);
+ });
+
+ if (FirstRegionEndingAfterLoc != SafeBufferOptOutMap.end()) {
+ // To test if the start location of the found region precedes `Loc`:
+ return SourceMgr.isBeforeInTranslationUnit(FirstRegionEndingAfterLoc->first,
+ Loc);
+ }
+ // If we do not find a region whose end location passes `Loc`, we want to
+ // check if the current region is still open:
+ if (!SafeBufferOptOutMap.empty() &&
+ SafeBufferOptOutMap.back().first == SafeBufferOptOutMap.back().second)
+ return SourceMgr.isBeforeInTranslationUnit(SafeBufferOptOutMap.back().first,
+ Loc);
+ return false;
+}
+
+bool Preprocessor::enterOrExitSafeBufferOptOutRegion(
+ bool isEnter, const SourceLocation &Loc) {
+ if (isEnter) {
+ if (isPPInSafeBufferOptOutRegion())
+ return true; // invalid enter action
+ InSafeBufferOptOutRegion = true;
+ CurrentSafeBufferOptOutStart = Loc;
+
+ // To set the start location of a new region:
+
+ if (!SafeBufferOptOutMap.empty()) {
+ [[maybe_unused]] auto *PrevRegion = &SafeBufferOptOutMap.back();
+ assert(PrevRegion->first != PrevRegion->second &&
+ "Shall not begin a safe buffer opt-out region before closing the "
+ "previous one.");
+ }
+ // If the start location equals to the end location, we call the region a
+ // open region or a unclosed region (i.e., end location has not been set
+ // yet).
+ SafeBufferOptOutMap.emplace_back(Loc, Loc);
+ } else {
+ if (!isPPInSafeBufferOptOutRegion())
+ return true; // invalid enter action
+ InSafeBufferOptOutRegion = false;
+
+ // To set the end location of the current open region:
+
+ assert(!SafeBufferOptOutMap.empty() &&
+ "Misordered safe buffer opt-out regions");
+ auto *CurrRegion = &SafeBufferOptOutMap.back();
+ assert(CurrRegion->first == CurrRegion->second &&
+ "Set end location to a closed safe buffer opt-out region");
+ CurrRegion->second = Loc;
+ }
+ return false;
+}
+
+bool Preprocessor::isPPInSafeBufferOptOutRegion() {
+ return InSafeBufferOptOutRegion;
+}
+bool Preprocessor::isPPInSafeBufferOptOutRegion(SourceLocation &StartLoc) {
+ StartLoc = CurrentSafeBufferOptOutStart;
+ return InSafeBufferOptOutRegion;
+}
+
ModuleLoader::~ModuleLoader() = default;
CommentHandler::~CommentHandler() = default;
diff --git a/contrib/llvm-project/clang/lib/Lex/PreprocessorLexer.cpp b/contrib/llvm-project/clang/lib/Lex/PreprocessorLexer.cpp
index 5f6f4a13419b..7551ba235fe9 100644
--- a/contrib/llvm-project/clang/lib/Lex/PreprocessorLexer.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/PreprocessorLexer.cpp
@@ -47,6 +47,6 @@ void PreprocessorLexer::LexIncludeFilename(Token &FilenameTok) {
/// getFileEntry - Return the FileEntry corresponding to this FileID. Like
/// getFileID(), this only works for lexers with attached preprocessors.
-const FileEntry *PreprocessorLexer::getFileEntry() const {
- return PP->getSourceManager().getFileEntryForID(getFileID());
+OptionalFileEntryRef PreprocessorLexer::getFileEntry() const {
+ return PP->getSourceManager().getFileEntryRefForID(getFileID());
}
diff --git a/contrib/llvm-project/clang/lib/Lex/TokenConcatenation.cpp b/contrib/llvm-project/clang/lib/Lex/TokenConcatenation.cpp
index f6b005d9e19c..1b3201bd805b 100644
--- a/contrib/llvm-project/clang/lib/Lex/TokenConcatenation.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/TokenConcatenation.cpp
@@ -240,7 +240,7 @@ bool TokenConcatenation::AvoidConcat(const Token &PrevPrevTok,
// it as an identifier.
if (!PrevTok.hasUDSuffix())
return false;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case tok::identifier: // id+id or id+number or id+L"foo".
// id+'.'... will not append.
if (Tok.is(tok::numeric_constant))
diff --git a/contrib/llvm-project/clang/lib/Lex/TokenLexer.cpp b/contrib/llvm-project/clang/lib/Lex/TokenLexer.cpp
index 41e7f3f1dccb..856d5682727f 100644
--- a/contrib/llvm-project/clang/lib/Lex/TokenLexer.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/TokenLexer.cpp
@@ -25,11 +25,13 @@
#include "clang/Lex/Token.h"
#include "clang/Lex/VariadicMacroSupport.h"
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/iterator_range.h"
#include <cassert>
#include <cstring>
+#include <optional>
using namespace clang;
@@ -203,7 +205,7 @@ void TokenLexer::stringifyVAOPTContents(
assert(CurTokenIdx != 0 &&
"Can not have __VAOPT__ contents begin with a ##");
Token &LHS = VAOPTTokens[CurTokenIdx - 1];
- pasteTokens(LHS, llvm::makeArrayRef(VAOPTTokens, NumVAOptTokens),
+ pasteTokens(LHS, llvm::ArrayRef(VAOPTTokens, NumVAOptTokens),
CurTokenIdx);
// Replace the token prior to the first ## in this iteration.
ConcatenatedVAOPTResultToks.back() = LHS;
@@ -247,7 +249,7 @@ void TokenLexer::ExpandFunctionArguments() {
// we install the newly expanded sequence as the new 'Tokens' list.
bool MadeChange = false;
- Optional<bool> CalledWithVariadicArguments;
+ std::optional<bool> CalledWithVariadicArguments;
VAOptExpansionContext VCtx(PP);
@@ -295,7 +297,7 @@ void TokenLexer::ExpandFunctionArguments() {
// the closing r_paren of the __VA_OPT__.
if (!Tokens[I].is(tok::r_paren) || !VCtx.sawClosingParen()) {
// Lazily expand __VA_ARGS__ when we see the first __VA_OPT__.
- if (!CalledWithVariadicArguments.hasValue()) {
+ if (!CalledWithVariadicArguments) {
CalledWithVariadicArguments =
ActualArgs->invokedWithVariadicArgument(Macro, PP);
}
@@ -472,11 +474,9 @@ void TokenLexer::ExpandFunctionArguments() {
// If the '##' came from expanding an argument, turn it into 'unknown'
// to avoid pasting.
- for (Token &Tok : llvm::make_range(ResultToks.begin() + FirstResult,
- ResultToks.end())) {
+ for (Token &Tok : llvm::drop_begin(ResultToks, FirstResult))
if (Tok.is(tok::hashhash))
Tok.setKind(tok::unknown);
- }
if(ExpandLocStart.isValid()) {
updateLocForMacroArgTokens(CurTok.getLocation(),
@@ -500,8 +500,7 @@ void TokenLexer::ExpandFunctionArguments() {
// the first token in a __VA_OPT__ after a ##, delete the ##.
assert(VCtx.isInVAOpt() && "should only happen inside a __VA_OPT__");
VCtx.hasPlaceholderAfterHashhashAtStart();
- }
- if (RParenAfter)
+ } else if (RParenAfter)
VCtx.hasPlaceholderBeforeRParen();
}
continue;
@@ -567,7 +566,7 @@ void TokenLexer::ExpandFunctionArguments() {
continue;
}
- if (RParenAfter)
+ if (RParenAfter && !NonEmptyPasteBefore)
VCtx.hasPlaceholderBeforeRParen();
// If this is on the RHS of a paste operator, we've already copied the
@@ -723,7 +722,7 @@ bool TokenLexer::Lex(Token &Tok) {
}
bool TokenLexer::pasteTokens(Token &Tok) {
- return pasteTokens(Tok, llvm::makeArrayRef(Tokens, NumTokens), CurTokenIdx);
+ return pasteTokens(Tok, llvm::ArrayRef(Tokens, NumTokens), CurTokenIdx);
}
/// LHSTok is the LHS of a ## operator, and CurTokenIdx is the ##
@@ -986,65 +985,79 @@ TokenLexer::getExpansionLocForMacroDefLoc(SourceLocation loc) const {
/// \arg begin_tokens will be updated to a position past all the found
/// consecutive tokens.
static void updateConsecutiveMacroArgTokens(SourceManager &SM,
- SourceLocation InstLoc,
+ SourceLocation ExpandLoc,
Token *&begin_tokens,
Token * end_tokens) {
- assert(begin_tokens < end_tokens);
-
- SourceLocation FirstLoc = begin_tokens->getLocation();
- SourceLocation CurLoc = FirstLoc;
-
- // Compare the source location offset of tokens and group together tokens that
- // are close, even if their locations point to different FileIDs. e.g.
- //
- // |bar | foo | cake | (3 tokens from 3 consecutive FileIDs)
- // ^ ^
- // |bar foo cake| (one SLocEntry chunk for all tokens)
- //
- // we can perform this "merge" since the token's spelling location depends
- // on the relative offset.
-
- Token *NextTok = begin_tokens + 1;
- for (; NextTok < end_tokens; ++NextTok) {
- SourceLocation NextLoc = NextTok->getLocation();
- if (CurLoc.isFileID() != NextLoc.isFileID())
- break; // Token from different kind of FileID.
-
- SourceLocation::IntTy RelOffs;
- if (!SM.isInSameSLocAddrSpace(CurLoc, NextLoc, &RelOffs))
- break; // Token from different local/loaded location.
- // Check that token is not before the previous token or more than 50
- // "characters" away.
- if (RelOffs < 0 || RelOffs > 50)
- break;
-
- if (CurLoc.isMacroID() && !SM.isWrittenInSameFile(CurLoc, NextLoc))
- break; // Token from a different macro.
-
- CurLoc = NextLoc;
+ assert(begin_tokens + 1 < end_tokens);
+ SourceLocation BeginLoc = begin_tokens->getLocation();
+ llvm::MutableArrayRef<Token> All(begin_tokens, end_tokens);
+ llvm::MutableArrayRef<Token> Partition;
+
+ auto NearLast = [&, Last = BeginLoc](SourceLocation Loc) mutable {
+ // The maximum distance between two consecutive tokens in a partition.
+ // This is an important trick to avoid using too much SourceLocation address
+ // space!
+ static constexpr SourceLocation::IntTy MaxDistance = 50;
+ auto Distance = Loc.getRawEncoding() - Last.getRawEncoding();
+ Last = Loc;
+ return Distance <= MaxDistance;
+ };
+
+ // Partition the tokens by their FileID.
+ // This is a hot function, and calling getFileID can be expensive, the
+ // implementation is optimized by reducing the number of getFileID.
+ if (BeginLoc.isFileID()) {
+ // Consecutive tokens not written in macros must be from the same file.
+ // (Neither #include nor eof can occur inside a macro argument.)
+ Partition = All.take_while([&](const Token &T) {
+ return T.getLocation().isFileID() && NearLast(T.getLocation());
+ });
+ } else {
+ // Call getFileID once to calculate the bounds, and use the cheaper
+ // sourcelocation-against-bounds comparison.
+ FileID BeginFID = SM.getFileID(BeginLoc);
+ SourceLocation Limit =
+ SM.getComposedLoc(BeginFID, SM.getFileIDSize(BeginFID));
+ Partition = All.take_while([&](const Token &T) {
+ // NOTE: the Limit is included! The lexer recovery only ever inserts a
+ // single token past the end of the FileID, specifically the ) when a
+ // macro-arg containing a comma should be guarded by parentheses.
+ //
+ // It is safe to include the Limit here because SourceManager allocates
+ // FileSize + 1 for each SLocEntry.
+ //
+ // See https://github.com/llvm/llvm-project/issues/60722.
+ return T.getLocation() >= BeginLoc && T.getLocation() <= Limit
+ && NearLast(T.getLocation());
+ });
}
+ assert(!Partition.empty());
// For the consecutive tokens, find the length of the SLocEntry to contain
// all of them.
- Token &LastConsecutiveTok = *(NextTok-1);
- SourceLocation::IntTy LastRelOffs = 0;
- SM.isInSameSLocAddrSpace(FirstLoc, LastConsecutiveTok.getLocation(),
- &LastRelOffs);
SourceLocation::UIntTy FullLength =
- LastRelOffs + LastConsecutiveTok.getLength();
-
+ Partition.back().getEndLoc().getRawEncoding() -
+ Partition.front().getLocation().getRawEncoding();
// Create a macro expansion SLocEntry that will "contain" all of the tokens.
SourceLocation Expansion =
- SM.createMacroArgExpansionLoc(FirstLoc, InstLoc,FullLength);
-
+ SM.createMacroArgExpansionLoc(BeginLoc, ExpandLoc, FullLength);
+
+#ifdef EXPENSIVE_CHECKS
+ assert(llvm::all_of(Partition.drop_front(),
+ [&SM, ID = SM.getFileID(Partition.front().getLocation())](
+ const Token &T) {
+ return ID == SM.getFileID(T.getLocation());
+ }) &&
+ "Must have the same FIleID!");
+#endif
// Change the location of the tokens from the spelling location to the new
// expanded location.
- for (; begin_tokens < NextTok; ++begin_tokens) {
- Token &Tok = *begin_tokens;
- SourceLocation::IntTy RelOffs = 0;
- SM.isInSameSLocAddrSpace(FirstLoc, Tok.getLocation(), &RelOffs);
- Tok.setLocation(Expansion.getLocWithOffset(RelOffs));
+ for (Token& T : Partition) {
+ SourceLocation::IntTy RelativeOffset =
+ T.getLocation().getRawEncoding() - BeginLoc.getRawEncoding();
+ T.setLocation(Expansion.getLocWithOffset(RelativeOffset));
}
+ begin_tokens = &Partition.back() + 1;
}
/// Creates SLocEntries and updates the locations of macro argument
@@ -1057,7 +1070,7 @@ void TokenLexer::updateLocForMacroArgTokens(SourceLocation ArgIdSpellLoc,
Token *end_tokens) {
SourceManager &SM = PP.getSourceManager();
- SourceLocation InstLoc =
+ SourceLocation ExpandLoc =
getExpansionLocForMacroDefLoc(ArgIdSpellLoc);
while (begin_tokens < end_tokens) {
@@ -1065,12 +1078,12 @@ void TokenLexer::updateLocForMacroArgTokens(SourceLocation ArgIdSpellLoc,
if (end_tokens - begin_tokens == 1) {
Token &Tok = *begin_tokens;
Tok.setLocation(SM.createMacroArgExpansionLoc(Tok.getLocation(),
- InstLoc,
+ ExpandLoc,
Tok.getLength()));
return;
}
- updateConsecutiveMacroArgTokens(SM, InstLoc, begin_tokens, end_tokens);
+ updateConsecutiveMacroArgTokens(SM, ExpandLoc, begin_tokens, end_tokens);
}
}
diff --git a/contrib/llvm-project/clang/lib/Lex/UnicodeCharSets.h b/contrib/llvm-project/clang/lib/Lex/UnicodeCharSets.h
index 74dd57fdf118..b63908024e5a 100644
--- a/contrib/llvm-project/clang/lib/Lex/UnicodeCharSets.h
+++ b/contrib/llvm-project/clang/lib/Lex/UnicodeCharSets.h
@@ -10,6 +10,395 @@
#include "llvm/Support/UnicodeCharRanges.h"
+// Unicode 15.1 XID_Start
+static const llvm::sys::UnicodeCharRange XIDStartRanges[] = {
+ {0x0041, 0x005A}, {0x0061, 0x007A}, {0x00AA, 0x00AA},
+ {0x00B5, 0x00B5}, {0x00BA, 0x00BA}, {0x00C0, 0x00D6},
+ {0x00D8, 0x00F6}, {0x00F8, 0x02C1}, {0x02C6, 0x02D1},
+ {0x02E0, 0x02E4}, {0x02EC, 0x02EC}, {0x02EE, 0x02EE},
+ {0x0370, 0x0374}, {0x0376, 0x0377}, {0x037B, 0x037D},
+ {0x037F, 0x037F}, {0x0386, 0x0386}, {0x0388, 0x038A},
+ {0x038C, 0x038C}, {0x038E, 0x03A1}, {0x03A3, 0x03F5},
+ {0x03F7, 0x0481}, {0x048A, 0x052F}, {0x0531, 0x0556},
+ {0x0559, 0x0559}, {0x0560, 0x0588}, {0x05D0, 0x05EA},
+ {0x05EF, 0x05F2}, {0x0620, 0x064A}, {0x066E, 0x066F},
+ {0x0671, 0x06D3}, {0x06D5, 0x06D5}, {0x06E5, 0x06E6},
+ {0x06EE, 0x06EF}, {0x06FA, 0x06FC}, {0x06FF, 0x06FF},
+ {0x0710, 0x0710}, {0x0712, 0x072F}, {0x074D, 0x07A5},
+ {0x07B1, 0x07B1}, {0x07CA, 0x07EA}, {0x07F4, 0x07F5},
+ {0x07FA, 0x07FA}, {0x0800, 0x0815}, {0x081A, 0x081A},
+ {0x0824, 0x0824}, {0x0828, 0x0828}, {0x0840, 0x0858},
+ {0x0860, 0x086A}, {0x0870, 0x0887}, {0x0889, 0x088E},
+ {0x08A0, 0x08C9}, {0x0904, 0x0939}, {0x093D, 0x093D},
+ {0x0950, 0x0950}, {0x0958, 0x0961}, {0x0971, 0x0980},
+ {0x0985, 0x098C}, {0x098F, 0x0990}, {0x0993, 0x09A8},
+ {0x09AA, 0x09B0}, {0x09B2, 0x09B2}, {0x09B6, 0x09B9},
+ {0x09BD, 0x09BD}, {0x09CE, 0x09CE}, {0x09DC, 0x09DD},
+ {0x09DF, 0x09E1}, {0x09F0, 0x09F1}, {0x09FC, 0x09FC},
+ {0x0A05, 0x0A0A}, {0x0A0F, 0x0A10}, {0x0A13, 0x0A28},
+ {0x0A2A, 0x0A30}, {0x0A32, 0x0A33}, {0x0A35, 0x0A36},
+ {0x0A38, 0x0A39}, {0x0A59, 0x0A5C}, {0x0A5E, 0x0A5E},
+ {0x0A72, 0x0A74}, {0x0A85, 0x0A8D}, {0x0A8F, 0x0A91},
+ {0x0A93, 0x0AA8}, {0x0AAA, 0x0AB0}, {0x0AB2, 0x0AB3},
+ {0x0AB5, 0x0AB9}, {0x0ABD, 0x0ABD}, {0x0AD0, 0x0AD0},
+ {0x0AE0, 0x0AE1}, {0x0AF9, 0x0AF9}, {0x0B05, 0x0B0C},
+ {0x0B0F, 0x0B10}, {0x0B13, 0x0B28}, {0x0B2A, 0x0B30},
+ {0x0B32, 0x0B33}, {0x0B35, 0x0B39}, {0x0B3D, 0x0B3D},
+ {0x0B5C, 0x0B5D}, {0x0B5F, 0x0B61}, {0x0B71, 0x0B71},
+ {0x0B83, 0x0B83}, {0x0B85, 0x0B8A}, {0x0B8E, 0x0B90},
+ {0x0B92, 0x0B95}, {0x0B99, 0x0B9A}, {0x0B9C, 0x0B9C},
+ {0x0B9E, 0x0B9F}, {0x0BA3, 0x0BA4}, {0x0BA8, 0x0BAA},
+ {0x0BAE, 0x0BB9}, {0x0BD0, 0x0BD0}, {0x0C05, 0x0C0C},
+ {0x0C0E, 0x0C10}, {0x0C12, 0x0C28}, {0x0C2A, 0x0C39},
+ {0x0C3D, 0x0C3D}, {0x0C58, 0x0C5A}, {0x0C5D, 0x0C5D},
+ {0x0C60, 0x0C61}, {0x0C80, 0x0C80}, {0x0C85, 0x0C8C},
+ {0x0C8E, 0x0C90}, {0x0C92, 0x0CA8}, {0x0CAA, 0x0CB3},
+ {0x0CB5, 0x0CB9}, {0x0CBD, 0x0CBD}, {0x0CDD, 0x0CDE},
+ {0x0CE0, 0x0CE1}, {0x0CF1, 0x0CF2}, {0x0D04, 0x0D0C},
+ {0x0D0E, 0x0D10}, {0x0D12, 0x0D3A}, {0x0D3D, 0x0D3D},
+ {0x0D4E, 0x0D4E}, {0x0D54, 0x0D56}, {0x0D5F, 0x0D61},
+ {0x0D7A, 0x0D7F}, {0x0D85, 0x0D96}, {0x0D9A, 0x0DB1},
+ {0x0DB3, 0x0DBB}, {0x0DBD, 0x0DBD}, {0x0DC0, 0x0DC6},
+ {0x0E01, 0x0E30}, {0x0E32, 0x0E32}, {0x0E40, 0x0E46},
+ {0x0E81, 0x0E82}, {0x0E84, 0x0E84}, {0x0E86, 0x0E8A},
+ {0x0E8C, 0x0EA3}, {0x0EA5, 0x0EA5}, {0x0EA7, 0x0EB0},
+ {0x0EB2, 0x0EB2}, {0x0EBD, 0x0EBD}, {0x0EC0, 0x0EC4},
+ {0x0EC6, 0x0EC6}, {0x0EDC, 0x0EDF}, {0x0F00, 0x0F00},
+ {0x0F40, 0x0F47}, {0x0F49, 0x0F6C}, {0x0F88, 0x0F8C},
+ {0x1000, 0x102A}, {0x103F, 0x103F}, {0x1050, 0x1055},
+ {0x105A, 0x105D}, {0x1061, 0x1061}, {0x1065, 0x1066},
+ {0x106E, 0x1070}, {0x1075, 0x1081}, {0x108E, 0x108E},
+ {0x10A0, 0x10C5}, {0x10C7, 0x10C7}, {0x10CD, 0x10CD},
+ {0x10D0, 0x10FA}, {0x10FC, 0x1248}, {0x124A, 0x124D},
+ {0x1250, 0x1256}, {0x1258, 0x1258}, {0x125A, 0x125D},
+ {0x1260, 0x1288}, {0x128A, 0x128D}, {0x1290, 0x12B0},
+ {0x12B2, 0x12B5}, {0x12B8, 0x12BE}, {0x12C0, 0x12C0},
+ {0x12C2, 0x12C5}, {0x12C8, 0x12D6}, {0x12D8, 0x1310},
+ {0x1312, 0x1315}, {0x1318, 0x135A}, {0x1380, 0x138F},
+ {0x13A0, 0x13F5}, {0x13F8, 0x13FD}, {0x1401, 0x166C},
+ {0x166F, 0x167F}, {0x1681, 0x169A}, {0x16A0, 0x16EA},
+ {0x16EE, 0x16F8}, {0x1700, 0x1711}, {0x171F, 0x1731},
+ {0x1740, 0x1751}, {0x1760, 0x176C}, {0x176E, 0x1770},
+ {0x1780, 0x17B3}, {0x17D7, 0x17D7}, {0x17DC, 0x17DC},
+ {0x1820, 0x1878}, {0x1880, 0x18A8}, {0x18AA, 0x18AA},
+ {0x18B0, 0x18F5}, {0x1900, 0x191E}, {0x1950, 0x196D},
+ {0x1970, 0x1974}, {0x1980, 0x19AB}, {0x19B0, 0x19C9},
+ {0x1A00, 0x1A16}, {0x1A20, 0x1A54}, {0x1AA7, 0x1AA7},
+ {0x1B05, 0x1B33}, {0x1B45, 0x1B4C}, {0x1B83, 0x1BA0},
+ {0x1BAE, 0x1BAF}, {0x1BBA, 0x1BE5}, {0x1C00, 0x1C23},
+ {0x1C4D, 0x1C4F}, {0x1C5A, 0x1C7D}, {0x1C80, 0x1C88},
+ {0x1C90, 0x1CBA}, {0x1CBD, 0x1CBF}, {0x1CE9, 0x1CEC},
+ {0x1CEE, 0x1CF3}, {0x1CF5, 0x1CF6}, {0x1CFA, 0x1CFA},
+ {0x1D00, 0x1DBF}, {0x1E00, 0x1F15}, {0x1F18, 0x1F1D},
+ {0x1F20, 0x1F45}, {0x1F48, 0x1F4D}, {0x1F50, 0x1F57},
+ {0x1F59, 0x1F59}, {0x1F5B, 0x1F5B}, {0x1F5D, 0x1F5D},
+ {0x1F5F, 0x1F7D}, {0x1F80, 0x1FB4}, {0x1FB6, 0x1FBC},
+ {0x1FBE, 0x1FBE}, {0x1FC2, 0x1FC4}, {0x1FC6, 0x1FCC},
+ {0x1FD0, 0x1FD3}, {0x1FD6, 0x1FDB}, {0x1FE0, 0x1FEC},
+ {0x1FF2, 0x1FF4}, {0x1FF6, 0x1FFC}, {0x2071, 0x2071},
+ {0x207F, 0x207F}, {0x2090, 0x209C}, {0x2102, 0x2102},
+ {0x2107, 0x2107}, {0x210A, 0x2113}, {0x2115, 0x2115},
+ {0x2118, 0x211D}, {0x2124, 0x2124}, {0x2126, 0x2126},
+ {0x2128, 0x2128}, {0x212A, 0x2139}, {0x213C, 0x213F},
+ {0x2145, 0x2149}, {0x214E, 0x214E}, {0x2160, 0x2188},
+ {0x2C00, 0x2CE4}, {0x2CEB, 0x2CEE}, {0x2CF2, 0x2CF3},
+ {0x2D00, 0x2D25}, {0x2D27, 0x2D27}, {0x2D2D, 0x2D2D},
+ {0x2D30, 0x2D67}, {0x2D6F, 0x2D6F}, {0x2D80, 0x2D96},
+ {0x2DA0, 0x2DA6}, {0x2DA8, 0x2DAE}, {0x2DB0, 0x2DB6},
+ {0x2DB8, 0x2DBE}, {0x2DC0, 0x2DC6}, {0x2DC8, 0x2DCE},
+ {0x2DD0, 0x2DD6}, {0x2DD8, 0x2DDE}, {0x3005, 0x3007},
+ {0x3021, 0x3029}, {0x3031, 0x3035}, {0x3038, 0x303C},
+ {0x3041, 0x3096}, {0x309D, 0x309F}, {0x30A1, 0x30FA},
+ {0x30FC, 0x30FF}, {0x3105, 0x312F}, {0x3131, 0x318E},
+ {0x31A0, 0x31BF}, {0x31F0, 0x31FF}, {0x3400, 0x4DBF},
+ {0x4E00, 0xA48C}, {0xA4D0, 0xA4FD}, {0xA500, 0xA60C},
+ {0xA610, 0xA61F}, {0xA62A, 0xA62B}, {0xA640, 0xA66E},
+ {0xA67F, 0xA69D}, {0xA6A0, 0xA6EF}, {0xA717, 0xA71F},
+ {0xA722, 0xA788}, {0xA78B, 0xA7CA}, {0xA7D0, 0xA7D1},
+ {0xA7D3, 0xA7D3}, {0xA7D5, 0xA7D9}, {0xA7F2, 0xA801},
+ {0xA803, 0xA805}, {0xA807, 0xA80A}, {0xA80C, 0xA822},
+ {0xA840, 0xA873}, {0xA882, 0xA8B3}, {0xA8F2, 0xA8F7},
+ {0xA8FB, 0xA8FB}, {0xA8FD, 0xA8FE}, {0xA90A, 0xA925},
+ {0xA930, 0xA946}, {0xA960, 0xA97C}, {0xA984, 0xA9B2},
+ {0xA9CF, 0xA9CF}, {0xA9E0, 0xA9E4}, {0xA9E6, 0xA9EF},
+ {0xA9FA, 0xA9FE}, {0xAA00, 0xAA28}, {0xAA40, 0xAA42},
+ {0xAA44, 0xAA4B}, {0xAA60, 0xAA76}, {0xAA7A, 0xAA7A},
+ {0xAA7E, 0xAAAF}, {0xAAB1, 0xAAB1}, {0xAAB5, 0xAAB6},
+ {0xAAB9, 0xAABD}, {0xAAC0, 0xAAC0}, {0xAAC2, 0xAAC2},
+ {0xAADB, 0xAADD}, {0xAAE0, 0xAAEA}, {0xAAF2, 0xAAF4},
+ {0xAB01, 0xAB06}, {0xAB09, 0xAB0E}, {0xAB11, 0xAB16},
+ {0xAB20, 0xAB26}, {0xAB28, 0xAB2E}, {0xAB30, 0xAB5A},
+ {0xAB5C, 0xAB69}, {0xAB70, 0xABE2}, {0xAC00, 0xD7A3},
+ {0xD7B0, 0xD7C6}, {0xD7CB, 0xD7FB}, {0xF900, 0xFA6D},
+ {0xFA70, 0xFAD9}, {0xFB00, 0xFB06}, {0xFB13, 0xFB17},
+ {0xFB1D, 0xFB1D}, {0xFB1F, 0xFB28}, {0xFB2A, 0xFB36},
+ {0xFB38, 0xFB3C}, {0xFB3E, 0xFB3E}, {0xFB40, 0xFB41},
+ {0xFB43, 0xFB44}, {0xFB46, 0xFBB1}, {0xFBD3, 0xFC5D},
+ {0xFC64, 0xFD3D}, {0xFD50, 0xFD8F}, {0xFD92, 0xFDC7},
+ {0xFDF0, 0xFDF9}, {0xFE71, 0xFE71}, {0xFE73, 0xFE73},
+ {0xFE77, 0xFE77}, {0xFE79, 0xFE79}, {0xFE7B, 0xFE7B},
+ {0xFE7D, 0xFE7D}, {0xFE7F, 0xFEFC}, {0xFF21, 0xFF3A},
+ {0xFF41, 0xFF5A}, {0xFF66, 0xFF9D}, {0xFFA0, 0xFFBE},
+ {0xFFC2, 0xFFC7}, {0xFFCA, 0xFFCF}, {0xFFD2, 0xFFD7},
+ {0xFFDA, 0xFFDC}, {0x10000, 0x1000B}, {0x1000D, 0x10026},
+ {0x10028, 0x1003A}, {0x1003C, 0x1003D}, {0x1003F, 0x1004D},
+ {0x10050, 0x1005D}, {0x10080, 0x100FA}, {0x10140, 0x10174},
+ {0x10280, 0x1029C}, {0x102A0, 0x102D0}, {0x10300, 0x1031F},
+ {0x1032D, 0x1034A}, {0x10350, 0x10375}, {0x10380, 0x1039D},
+ {0x103A0, 0x103C3}, {0x103C8, 0x103CF}, {0x103D1, 0x103D5},
+ {0x10400, 0x1049D}, {0x104B0, 0x104D3}, {0x104D8, 0x104FB},
+ {0x10500, 0x10527}, {0x10530, 0x10563}, {0x10570, 0x1057A},
+ {0x1057C, 0x1058A}, {0x1058C, 0x10592}, {0x10594, 0x10595},
+ {0x10597, 0x105A1}, {0x105A3, 0x105B1}, {0x105B3, 0x105B9},
+ {0x105BB, 0x105BC}, {0x10600, 0x10736}, {0x10740, 0x10755},
+ {0x10760, 0x10767}, {0x10780, 0x10785}, {0x10787, 0x107B0},
+ {0x107B2, 0x107BA}, {0x10800, 0x10805}, {0x10808, 0x10808},
+ {0x1080A, 0x10835}, {0x10837, 0x10838}, {0x1083C, 0x1083C},
+ {0x1083F, 0x10855}, {0x10860, 0x10876}, {0x10880, 0x1089E},
+ {0x108E0, 0x108F2}, {0x108F4, 0x108F5}, {0x10900, 0x10915},
+ {0x10920, 0x10939}, {0x10980, 0x109B7}, {0x109BE, 0x109BF},
+ {0x10A00, 0x10A00}, {0x10A10, 0x10A13}, {0x10A15, 0x10A17},
+ {0x10A19, 0x10A35}, {0x10A60, 0x10A7C}, {0x10A80, 0x10A9C},
+ {0x10AC0, 0x10AC7}, {0x10AC9, 0x10AE4}, {0x10B00, 0x10B35},
+ {0x10B40, 0x10B55}, {0x10B60, 0x10B72}, {0x10B80, 0x10B91},
+ {0x10C00, 0x10C48}, {0x10C80, 0x10CB2}, {0x10CC0, 0x10CF2},
+ {0x10D00, 0x10D23}, {0x10E80, 0x10EA9}, {0x10EB0, 0x10EB1},
+ {0x10F00, 0x10F1C}, {0x10F27, 0x10F27}, {0x10F30, 0x10F45},
+ {0x10F70, 0x10F81}, {0x10FB0, 0x10FC4}, {0x10FE0, 0x10FF6},
+ {0x11003, 0x11037}, {0x11071, 0x11072}, {0x11075, 0x11075},
+ {0x11083, 0x110AF}, {0x110D0, 0x110E8}, {0x11103, 0x11126},
+ {0x11144, 0x11144}, {0x11147, 0x11147}, {0x11150, 0x11172},
+ {0x11176, 0x11176}, {0x11183, 0x111B2}, {0x111C1, 0x111C4},
+ {0x111DA, 0x111DA}, {0x111DC, 0x111DC}, {0x11200, 0x11211},
+ {0x11213, 0x1122B}, {0x1123F, 0x11240}, {0x11280, 0x11286},
+ {0x11288, 0x11288}, {0x1128A, 0x1128D}, {0x1128F, 0x1129D},
+ {0x1129F, 0x112A8}, {0x112B0, 0x112DE}, {0x11305, 0x1130C},
+ {0x1130F, 0x11310}, {0x11313, 0x11328}, {0x1132A, 0x11330},
+ {0x11332, 0x11333}, {0x11335, 0x11339}, {0x1133D, 0x1133D},
+ {0x11350, 0x11350}, {0x1135D, 0x11361}, {0x11400, 0x11434},
+ {0x11447, 0x1144A}, {0x1145F, 0x11461}, {0x11480, 0x114AF},
+ {0x114C4, 0x114C5}, {0x114C7, 0x114C7}, {0x11580, 0x115AE},
+ {0x115D8, 0x115DB}, {0x11600, 0x1162F}, {0x11644, 0x11644},
+ {0x11680, 0x116AA}, {0x116B8, 0x116B8}, {0x11700, 0x1171A},
+ {0x11740, 0x11746}, {0x11800, 0x1182B}, {0x118A0, 0x118DF},
+ {0x118FF, 0x11906}, {0x11909, 0x11909}, {0x1190C, 0x11913},
+ {0x11915, 0x11916}, {0x11918, 0x1192F}, {0x1193F, 0x1193F},
+ {0x11941, 0x11941}, {0x119A0, 0x119A7}, {0x119AA, 0x119D0},
+ {0x119E1, 0x119E1}, {0x119E3, 0x119E3}, {0x11A00, 0x11A00},
+ {0x11A0B, 0x11A32}, {0x11A3A, 0x11A3A}, {0x11A50, 0x11A50},
+ {0x11A5C, 0x11A89}, {0x11A9D, 0x11A9D}, {0x11AB0, 0x11AF8},
+ {0x11C00, 0x11C08}, {0x11C0A, 0x11C2E}, {0x11C40, 0x11C40},
+ {0x11C72, 0x11C8F}, {0x11D00, 0x11D06}, {0x11D08, 0x11D09},
+ {0x11D0B, 0x11D30}, {0x11D46, 0x11D46}, {0x11D60, 0x11D65},
+ {0x11D67, 0x11D68}, {0x11D6A, 0x11D89}, {0x11D98, 0x11D98},
+ {0x11EE0, 0x11EF2}, {0x11F02, 0x11F02}, {0x11F04, 0x11F10},
+ {0x11F12, 0x11F33}, {0x11FB0, 0x11FB0}, {0x12000, 0x12399},
+ {0x12400, 0x1246E}, {0x12480, 0x12543}, {0x12F90, 0x12FF0},
+ {0x13000, 0x1342F}, {0x13441, 0x13446}, {0x14400, 0x14646},
+ {0x16800, 0x16A38}, {0x16A40, 0x16A5E}, {0x16A70, 0x16ABE},
+ {0x16AD0, 0x16AED}, {0x16B00, 0x16B2F}, {0x16B40, 0x16B43},
+ {0x16B63, 0x16B77}, {0x16B7D, 0x16B8F}, {0x16E40, 0x16E7F},
+ {0x16F00, 0x16F4A}, {0x16F50, 0x16F50}, {0x16F93, 0x16F9F},
+ {0x16FE0, 0x16FE1}, {0x16FE3, 0x16FE3}, {0x17000, 0x187F7},
+ {0x18800, 0x18CD5}, {0x18D00, 0x18D08}, {0x1AFF0, 0x1AFF3},
+ {0x1AFF5, 0x1AFFB}, {0x1AFFD, 0x1AFFE}, {0x1B000, 0x1B122},
+ {0x1B132, 0x1B132}, {0x1B150, 0x1B152}, {0x1B155, 0x1B155},
+ {0x1B164, 0x1B167}, {0x1B170, 0x1B2FB}, {0x1BC00, 0x1BC6A},
+ {0x1BC70, 0x1BC7C}, {0x1BC80, 0x1BC88}, {0x1BC90, 0x1BC99},
+ {0x1D400, 0x1D454}, {0x1D456, 0x1D49C}, {0x1D49E, 0x1D49F},
+ {0x1D4A2, 0x1D4A2}, {0x1D4A5, 0x1D4A6}, {0x1D4A9, 0x1D4AC},
+ {0x1D4AE, 0x1D4B9}, {0x1D4BB, 0x1D4BB}, {0x1D4BD, 0x1D4C3},
+ {0x1D4C5, 0x1D505}, {0x1D507, 0x1D50A}, {0x1D50D, 0x1D514},
+ {0x1D516, 0x1D51C}, {0x1D51E, 0x1D539}, {0x1D53B, 0x1D53E},
+ {0x1D540, 0x1D544}, {0x1D546, 0x1D546}, {0x1D54A, 0x1D550},
+ {0x1D552, 0x1D6A5}, {0x1D6A8, 0x1D6C0}, {0x1D6C2, 0x1D6DA},
+ {0x1D6DC, 0x1D6FA}, {0x1D6FC, 0x1D714}, {0x1D716, 0x1D734},
+ {0x1D736, 0x1D74E}, {0x1D750, 0x1D76E}, {0x1D770, 0x1D788},
+ {0x1D78A, 0x1D7A8}, {0x1D7AA, 0x1D7C2}, {0x1D7C4, 0x1D7CB},
+ {0x1DF00, 0x1DF1E}, {0x1DF25, 0x1DF2A}, {0x1E030, 0x1E06D},
+ {0x1E100, 0x1E12C}, {0x1E137, 0x1E13D}, {0x1E14E, 0x1E14E},
+ {0x1E290, 0x1E2AD}, {0x1E2C0, 0x1E2EB}, {0x1E4D0, 0x1E4EB},
+ {0x1E7E0, 0x1E7E6}, {0x1E7E8, 0x1E7EB}, {0x1E7ED, 0x1E7EE},
+ {0x1E7F0, 0x1E7FE}, {0x1E800, 0x1E8C4}, {0x1E900, 0x1E943},
+ {0x1E94B, 0x1E94B}, {0x1EE00, 0x1EE03}, {0x1EE05, 0x1EE1F},
+ {0x1EE21, 0x1EE22}, {0x1EE24, 0x1EE24}, {0x1EE27, 0x1EE27},
+ {0x1EE29, 0x1EE32}, {0x1EE34, 0x1EE37}, {0x1EE39, 0x1EE39},
+ {0x1EE3B, 0x1EE3B}, {0x1EE42, 0x1EE42}, {0x1EE47, 0x1EE47},
+ {0x1EE49, 0x1EE49}, {0x1EE4B, 0x1EE4B}, {0x1EE4D, 0x1EE4F},
+ {0x1EE51, 0x1EE52}, {0x1EE54, 0x1EE54}, {0x1EE57, 0x1EE57},
+ {0x1EE59, 0x1EE59}, {0x1EE5B, 0x1EE5B}, {0x1EE5D, 0x1EE5D},
+ {0x1EE5F, 0x1EE5F}, {0x1EE61, 0x1EE62}, {0x1EE64, 0x1EE64},
+ {0x1EE67, 0x1EE6A}, {0x1EE6C, 0x1EE72}, {0x1EE74, 0x1EE77},
+ {0x1EE79, 0x1EE7C}, {0x1EE7E, 0x1EE7E}, {0x1EE80, 0x1EE89},
+ {0x1EE8B, 0x1EE9B}, {0x1EEA1, 0x1EEA3}, {0x1EEA5, 0x1EEA9},
+ {0x1EEAB, 0x1EEBB}, {0x20000, 0x2A6DF}, {0x2A700, 0x2B739},
+ {0x2B740, 0x2B81D}, {0x2B820, 0x2CEA1}, {0x2CEB0, 0x2EBE0},
+ {0x2EBF0, 0x2EE5D}, {0x2F800, 0x2FA1D}, {0x30000, 0x3134A},
+ {0x31350, 0x323AF}};
+
+// Unicode 15.1 XID_Continue, excluding XID_Start
+// The Unicode Property XID_Continue is a super set of XID_Start.
+// To save Space, the table below only contains the codepoints
+// that are not also in XID_Start.
+static const llvm::sys::UnicodeCharRange XIDContinueRanges[] = {
+ {0x0030, 0x0039}, {0x005F, 0x005F}, {0x00B7, 0x00B7},
+ {0x0300, 0x036F}, {0x0387, 0x0387}, {0x0483, 0x0487},
+ {0x0591, 0x05BD}, {0x05BF, 0x05BF}, {0x05C1, 0x05C2},
+ {0x05C4, 0x05C5}, {0x05C7, 0x05C7}, {0x0610, 0x061A},
+ {0x064B, 0x0669}, {0x0670, 0x0670}, {0x06D6, 0x06DC},
+ {0x06DF, 0x06E4}, {0x06E7, 0x06E8}, {0x06EA, 0x06ED},
+ {0x06F0, 0x06F9}, {0x0711, 0x0711}, {0x0730, 0x074A},
+ {0x07A6, 0x07B0}, {0x07C0, 0x07C9}, {0x07EB, 0x07F3},
+ {0x07FD, 0x07FD}, {0x0816, 0x0819}, {0x081B, 0x0823},
+ {0x0825, 0x0827}, {0x0829, 0x082D}, {0x0859, 0x085B},
+ {0x0898, 0x089F}, {0x08CA, 0x08E1}, {0x08E3, 0x0903},
+ {0x093A, 0x093C}, {0x093E, 0x094F}, {0x0951, 0x0957},
+ {0x0962, 0x0963}, {0x0966, 0x096F}, {0x0981, 0x0983},
+ {0x09BC, 0x09BC}, {0x09BE, 0x09C4}, {0x09C7, 0x09C8},
+ {0x09CB, 0x09CD}, {0x09D7, 0x09D7}, {0x09E2, 0x09E3},
+ {0x09E6, 0x09EF}, {0x09FE, 0x09FE}, {0x0A01, 0x0A03},
+ {0x0A3C, 0x0A3C}, {0x0A3E, 0x0A42}, {0x0A47, 0x0A48},
+ {0x0A4B, 0x0A4D}, {0x0A51, 0x0A51}, {0x0A66, 0x0A71},
+ {0x0A75, 0x0A75}, {0x0A81, 0x0A83}, {0x0ABC, 0x0ABC},
+ {0x0ABE, 0x0AC5}, {0x0AC7, 0x0AC9}, {0x0ACB, 0x0ACD},
+ {0x0AE2, 0x0AE3}, {0x0AE6, 0x0AEF}, {0x0AFA, 0x0AFF},
+ {0x0B01, 0x0B03}, {0x0B3C, 0x0B3C}, {0x0B3E, 0x0B44},
+ {0x0B47, 0x0B48}, {0x0B4B, 0x0B4D}, {0x0B55, 0x0B57},
+ {0x0B62, 0x0B63}, {0x0B66, 0x0B6F}, {0x0B82, 0x0B82},
+ {0x0BBE, 0x0BC2}, {0x0BC6, 0x0BC8}, {0x0BCA, 0x0BCD},
+ {0x0BD7, 0x0BD7}, {0x0BE6, 0x0BEF}, {0x0C00, 0x0C04},
+ {0x0C3C, 0x0C3C}, {0x0C3E, 0x0C44}, {0x0C46, 0x0C48},
+ {0x0C4A, 0x0C4D}, {0x0C55, 0x0C56}, {0x0C62, 0x0C63},
+ {0x0C66, 0x0C6F}, {0x0C81, 0x0C83}, {0x0CBC, 0x0CBC},
+ {0x0CBE, 0x0CC4}, {0x0CC6, 0x0CC8}, {0x0CCA, 0x0CCD},
+ {0x0CD5, 0x0CD6}, {0x0CE2, 0x0CE3}, {0x0CE6, 0x0CEF},
+ {0x0CF3, 0x0CF3}, {0x0D00, 0x0D03}, {0x0D3B, 0x0D3C},
+ {0x0D3E, 0x0D44}, {0x0D46, 0x0D48}, {0x0D4A, 0x0D4D},
+ {0x0D57, 0x0D57}, {0x0D62, 0x0D63}, {0x0D66, 0x0D6F},
+ {0x0D81, 0x0D83}, {0x0DCA, 0x0DCA}, {0x0DCF, 0x0DD4},
+ {0x0DD6, 0x0DD6}, {0x0DD8, 0x0DDF}, {0x0DE6, 0x0DEF},
+ {0x0DF2, 0x0DF3}, {0x0E31, 0x0E31}, {0x0E33, 0x0E3A},
+ {0x0E47, 0x0E4E}, {0x0E50, 0x0E59}, {0x0EB1, 0x0EB1},
+ {0x0EB3, 0x0EBC}, {0x0EC8, 0x0ECE}, {0x0ED0, 0x0ED9},
+ {0x0F18, 0x0F19}, {0x0F20, 0x0F29}, {0x0F35, 0x0F35},
+ {0x0F37, 0x0F37}, {0x0F39, 0x0F39}, {0x0F3E, 0x0F3F},
+ {0x0F71, 0x0F84}, {0x0F86, 0x0F87}, {0x0F8D, 0x0F97},
+ {0x0F99, 0x0FBC}, {0x0FC6, 0x0FC6}, {0x102B, 0x103E},
+ {0x1040, 0x1049}, {0x1056, 0x1059}, {0x105E, 0x1060},
+ {0x1062, 0x1064}, {0x1067, 0x106D}, {0x1071, 0x1074},
+ {0x1082, 0x108D}, {0x108F, 0x109D}, {0x135D, 0x135F},
+ {0x1369, 0x1371}, {0x1712, 0x1715}, {0x1732, 0x1734},
+ {0x1752, 0x1753}, {0x1772, 0x1773}, {0x17B4, 0x17D3},
+ {0x17DD, 0x17DD}, {0x17E0, 0x17E9}, {0x180B, 0x180D},
+ {0x180F, 0x1819}, {0x18A9, 0x18A9}, {0x1920, 0x192B},
+ {0x1930, 0x193B}, {0x1946, 0x194F}, {0x19D0, 0x19DA},
+ {0x1A17, 0x1A1B}, {0x1A55, 0x1A5E}, {0x1A60, 0x1A7C},
+ {0x1A7F, 0x1A89}, {0x1A90, 0x1A99}, {0x1AB0, 0x1ABD},
+ {0x1ABF, 0x1ACE}, {0x1B00, 0x1B04}, {0x1B34, 0x1B44},
+ {0x1B50, 0x1B59}, {0x1B6B, 0x1B73}, {0x1B80, 0x1B82},
+ {0x1BA1, 0x1BAD}, {0x1BB0, 0x1BB9}, {0x1BE6, 0x1BF3},
+ {0x1C24, 0x1C37}, {0x1C40, 0x1C49}, {0x1C50, 0x1C59},
+ {0x1CD0, 0x1CD2}, {0x1CD4, 0x1CE8}, {0x1CED, 0x1CED},
+ {0x1CF4, 0x1CF4}, {0x1CF7, 0x1CF9}, {0x1DC0, 0x1DFF},
+ {0x200C, 0x200D}, {0x203F, 0x2040}, {0x2054, 0x2054},
+ {0x20D0, 0x20DC}, {0x20E1, 0x20E1}, {0x20E5, 0x20F0},
+ {0x2CEF, 0x2CF1}, {0x2D7F, 0x2D7F}, {0x2DE0, 0x2DFF},
+ {0x302A, 0x302F}, {0x3099, 0x309A}, {0x30FB, 0x30FB},
+ {0xA620, 0xA629}, {0xA66F, 0xA66F}, {0xA674, 0xA67D},
+ {0xA69E, 0xA69F}, {0xA6F0, 0xA6F1}, {0xA802, 0xA802},
+ {0xA806, 0xA806}, {0xA80B, 0xA80B}, {0xA823, 0xA827},
+ {0xA82C, 0xA82C}, {0xA880, 0xA881}, {0xA8B4, 0xA8C5},
+ {0xA8D0, 0xA8D9}, {0xA8E0, 0xA8F1}, {0xA8FF, 0xA909},
+ {0xA926, 0xA92D}, {0xA947, 0xA953}, {0xA980, 0xA983},
+ {0xA9B3, 0xA9C0}, {0xA9D0, 0xA9D9}, {0xA9E5, 0xA9E5},
+ {0xA9F0, 0xA9F9}, {0xAA29, 0xAA36}, {0xAA43, 0xAA43},
+ {0xAA4C, 0xAA4D}, {0xAA50, 0xAA59}, {0xAA7B, 0xAA7D},
+ {0xAAB0, 0xAAB0}, {0xAAB2, 0xAAB4}, {0xAAB7, 0xAAB8},
+ {0xAABE, 0xAABF}, {0xAAC1, 0xAAC1}, {0xAAEB, 0xAAEF},
+ {0xAAF5, 0xAAF6}, {0xABE3, 0xABEA}, {0xABEC, 0xABED},
+ {0xABF0, 0xABF9}, {0xFB1E, 0xFB1E}, {0xFE00, 0xFE0F},
+ {0xFE20, 0xFE2F}, {0xFE33, 0xFE34}, {0xFE4D, 0xFE4F},
+ {0xFF10, 0xFF19}, {0xFF3F, 0xFF3F}, {0xFF65, 0xFF65},
+ {0xFF9E, 0xFF9F}, {0x101FD, 0x101FD}, {0x102E0, 0x102E0},
+ {0x10376, 0x1037A}, {0x104A0, 0x104A9}, {0x10A01, 0x10A03},
+ {0x10A05, 0x10A06}, {0x10A0C, 0x10A0F}, {0x10A38, 0x10A3A},
+ {0x10A3F, 0x10A3F}, {0x10AE5, 0x10AE6}, {0x10D24, 0x10D27},
+ {0x10D30, 0x10D39}, {0x10EAB, 0x10EAC}, {0x10EFD, 0x10EFF},
+ {0x10F46, 0x10F50}, {0x10F82, 0x10F85}, {0x11000, 0x11002},
+ {0x11038, 0x11046}, {0x11066, 0x11070}, {0x11073, 0x11074},
+ {0x1107F, 0x11082}, {0x110B0, 0x110BA}, {0x110C2, 0x110C2},
+ {0x110F0, 0x110F9}, {0x11100, 0x11102}, {0x11127, 0x11134},
+ {0x11136, 0x1113F}, {0x11145, 0x11146}, {0x11173, 0x11173},
+ {0x11180, 0x11182}, {0x111B3, 0x111C0}, {0x111C9, 0x111CC},
+ {0x111CE, 0x111D9}, {0x1122C, 0x11237}, {0x1123E, 0x1123E},
+ {0x11241, 0x11241}, {0x112DF, 0x112EA}, {0x112F0, 0x112F9},
+ {0x11300, 0x11303}, {0x1133B, 0x1133C}, {0x1133E, 0x11344},
+ {0x11347, 0x11348}, {0x1134B, 0x1134D}, {0x11357, 0x11357},
+ {0x11362, 0x11363}, {0x11366, 0x1136C}, {0x11370, 0x11374},
+ {0x11435, 0x11446}, {0x11450, 0x11459}, {0x1145E, 0x1145E},
+ {0x114B0, 0x114C3}, {0x114D0, 0x114D9}, {0x115AF, 0x115B5},
+ {0x115B8, 0x115C0}, {0x115DC, 0x115DD}, {0x11630, 0x11640},
+ {0x11650, 0x11659}, {0x116AB, 0x116B7}, {0x116C0, 0x116C9},
+ {0x1171D, 0x1172B}, {0x11730, 0x11739}, {0x1182C, 0x1183A},
+ {0x118E0, 0x118E9}, {0x11930, 0x11935}, {0x11937, 0x11938},
+ {0x1193B, 0x1193E}, {0x11940, 0x11940}, {0x11942, 0x11943},
+ {0x11950, 0x11959}, {0x119D1, 0x119D7}, {0x119DA, 0x119E0},
+ {0x119E4, 0x119E4}, {0x11A01, 0x11A0A}, {0x11A33, 0x11A39},
+ {0x11A3B, 0x11A3E}, {0x11A47, 0x11A47}, {0x11A51, 0x11A5B},
+ {0x11A8A, 0x11A99}, {0x11C2F, 0x11C36}, {0x11C38, 0x11C3F},
+ {0x11C50, 0x11C59}, {0x11C92, 0x11CA7}, {0x11CA9, 0x11CB6},
+ {0x11D31, 0x11D36}, {0x11D3A, 0x11D3A}, {0x11D3C, 0x11D3D},
+ {0x11D3F, 0x11D45}, {0x11D47, 0x11D47}, {0x11D50, 0x11D59},
+ {0x11D8A, 0x11D8E}, {0x11D90, 0x11D91}, {0x11D93, 0x11D97},
+ {0x11DA0, 0x11DA9}, {0x11EF3, 0x11EF6}, {0x11F00, 0x11F01},
+ {0x11F03, 0x11F03}, {0x11F34, 0x11F3A}, {0x11F3E, 0x11F42},
+ {0x11F50, 0x11F59}, {0x13440, 0x13440}, {0x13447, 0x13455},
+ {0x16A60, 0x16A69}, {0x16AC0, 0x16AC9}, {0x16AF0, 0x16AF4},
+ {0x16B30, 0x16B36}, {0x16B50, 0x16B59}, {0x16F4F, 0x16F4F},
+ {0x16F51, 0x16F87}, {0x16F8F, 0x16F92}, {0x16FE4, 0x16FE4},
+ {0x16FF0, 0x16FF1}, {0x1BC9D, 0x1BC9E}, {0x1CF00, 0x1CF2D},
+ {0x1CF30, 0x1CF46}, {0x1D165, 0x1D169}, {0x1D16D, 0x1D172},
+ {0x1D17B, 0x1D182}, {0x1D185, 0x1D18B}, {0x1D1AA, 0x1D1AD},
+ {0x1D242, 0x1D244}, {0x1D7CE, 0x1D7FF}, {0x1DA00, 0x1DA36},
+ {0x1DA3B, 0x1DA6C}, {0x1DA75, 0x1DA75}, {0x1DA84, 0x1DA84},
+ {0x1DA9B, 0x1DA9F}, {0x1DAA1, 0x1DAAF}, {0x1E000, 0x1E006},
+ {0x1E008, 0x1E018}, {0x1E01B, 0x1E021}, {0x1E023, 0x1E024},
+ {0x1E026, 0x1E02A}, {0x1E08F, 0x1E08F}, {0x1E130, 0x1E136},
+ {0x1E140, 0x1E149}, {0x1E2AE, 0x1E2AE}, {0x1E2EC, 0x1E2F9},
+ {0x1E4EC, 0x1E4F9}, {0x1E8D0, 0x1E8D6}, {0x1E944, 0x1E94A},
+ {0x1E950, 0x1E959}, {0x1FBF0, 0x1FBF9}, {0xE0100, 0xE01EF},
+};
+
+// Clang supports the "Mathematical notation profile" as an extension,
+// as described in https://www.unicode.org/L2/L2022/22230-math-profile.pdf
+// Math_Start
+static const llvm::sys::UnicodeCharRange
+ MathematicalNotationProfileIDStartRanges[] = {
+ {0x02202, 0x02202}, // ∂
+ {0x02207, 0x02207}, // ∇
+ {0x0221E, 0x0221E}, // ∞
+ {0x1D6C1, 0x1D6C1}, // 𝛁
+ {0x1D6DB, 0x1D6DB}, // 𝛛
+ {0x1D6FB, 0x1D6FB}, // 𝛻
+ {0x1D715, 0x1D715}, // 𝜕
+ {0x1D735, 0x1D735}, // 𝜵
+ {0x1D74F, 0x1D74F}, // 𝝏
+ {0x1D76F, 0x1D76F}, // 𝝯
+ {0x1D789, 0x1D789}, // 𝞉
+ {0x1D7A9, 0x1D7A9}, // 𝞩
+ {0x1D7C3, 0x1D7C3}, // 𝟃
+};
+
+// Math_Continue
+static const llvm::sys::UnicodeCharRange
+ MathematicalNotationProfileIDContinueRanges[] = {
+ {0x000B2, 0x000B3}, // ²-³
+ {0x000B9, 0x000B9}, // ¹
+ {0x02070, 0x02070}, // ⁰
+ {0x02074, 0x0207E}, // ⁴-⁾
+ {0x02080, 0x0208E}, // ₀-₎
+};
+
// C11 D.1, C++11 [charname.allowed]
static const llvm::sys::UnicodeCharRange C11AllowedIDCharRanges[] = {
// 1
@@ -40,127 +429,6 @@ static const llvm::sys::UnicodeCharRange C11AllowedIDCharRanges[] = {
{ 0xD0000, 0xDFFFD }, { 0xE0000, 0xEFFFD }
};
-// C++03 [extendid]
-// Note that this is not the same as C++98, but we don't distinguish C++98
-// and C++03 in Clang.
-static const llvm::sys::UnicodeCharRange CXX03AllowedIDCharRanges[] = {
- // Latin
- { 0x00C0, 0x00D6 }, { 0x00D8, 0x00F6 }, { 0x00F8, 0x01F5 },
- { 0x01FA, 0x0217 }, { 0x0250, 0x02A8 },
-
- // Greek
- { 0x0384, 0x0384 }, { 0x0388, 0x038A }, { 0x038C, 0x038C },
- { 0x038E, 0x03A1 }, { 0x03A3, 0x03CE }, { 0x03D0, 0x03D6 },
- { 0x03DA, 0x03DA }, { 0x03DC, 0x03DC }, { 0x03DE, 0x03DE },
- { 0x03E0, 0x03E0 }, { 0x03E2, 0x03F3 },
-
- // Cyrillic
- { 0x0401, 0x040D }, { 0x040F, 0x044F }, { 0x0451, 0x045C },
- { 0x045E, 0x0481 }, { 0x0490, 0x04C4 }, { 0x04C7, 0x04C8 },
- { 0x04CB, 0x04CC }, { 0x04D0, 0x04EB }, { 0x04EE, 0x04F5 },
- { 0x04F8, 0x04F9 },
-
- // Armenian
- { 0x0531, 0x0556 }, { 0x0561, 0x0587 },
-
- // Hebrew
- { 0x05D0, 0x05EA }, { 0x05F0, 0x05F4 },
-
- // Arabic
- { 0x0621, 0x063A }, { 0x0640, 0x0652 }, { 0x0670, 0x06B7 },
- { 0x06BA, 0x06BE }, { 0x06C0, 0x06CE }, { 0x06E5, 0x06E7 },
-
- // Devanagari
- { 0x0905, 0x0939 }, { 0x0958, 0x0962 },
-
- // Bengali
- { 0x0985, 0x098C }, { 0x098F, 0x0990 }, { 0x0993, 0x09A8 },
- { 0x09AA, 0x09B0 }, { 0x09B2, 0x09B2 }, { 0x09B6, 0x09B9 },
- { 0x09DC, 0x09DD }, { 0x09DF, 0x09E1 }, { 0x09F0, 0x09F1 },
-
- // Gurmukhi
- { 0x0A05, 0x0A0A }, { 0x0A0F, 0x0A10 }, { 0x0A13, 0x0A28 },
- { 0x0A2A, 0x0A30 }, { 0x0A32, 0x0A33 }, { 0x0A35, 0x0A36 },
- { 0x0A38, 0x0A39 }, { 0x0A59, 0x0A5C }, { 0x0A5E, 0x0A5E },
-
- // Gujarti
- { 0x0A85, 0x0A8B }, { 0x0A8D, 0x0A8D }, { 0x0A8F, 0x0A91 },
- { 0x0A93, 0x0AA8 }, { 0x0AAA, 0x0AB0 }, { 0x0AB2, 0x0AB3 },
- { 0x0AB5, 0x0AB9 }, { 0x0AE0, 0x0AE0 },
-
- // Oriya
- { 0x0B05, 0x0B0C }, { 0x0B0F, 0x0B10 }, { 0x0B13, 0x0B28 },
- { 0x0B2A, 0x0B30 }, { 0x0B32, 0x0B33 }, { 0x0B36, 0x0B39 },
- { 0x0B5C, 0x0B5D }, { 0x0B5F, 0x0B61 },
-
- // Tamil
- { 0x0B85, 0x0B8A }, { 0x0B8E, 0x0B90 }, { 0x0B92, 0x0B95 },
- { 0x0B99, 0x0B9A }, { 0x0B9C, 0x0B9C }, { 0x0B9E, 0x0B9F },
- { 0x0BA3, 0x0BA4 }, { 0x0BA8, 0x0BAA }, { 0x0BAE, 0x0BB5 },
- { 0x0BB7, 0x0BB9 },
-
- // Telugu
- { 0x0C05, 0x0C0C }, { 0x0C0E, 0x0C10 }, { 0x0C12, 0x0C28 },
- { 0x0C2A, 0x0C33 }, { 0x0C35, 0x0C39 }, { 0x0C60, 0x0C61 },
-
- // Kannada
- { 0x0C85, 0x0C8C }, { 0x0C8E, 0x0C90 }, { 0x0C92, 0x0CA8 },
- { 0x0CAA, 0x0CB3 }, { 0x0CB5, 0x0CB9 }, { 0x0CE0, 0x0CE1 },
-
- // Malayam
- { 0x0D05, 0x0D0C }, { 0x0D0E, 0x0D10 }, { 0x0D12, 0x0D28 },
- { 0x0D2A, 0x0D39 }, { 0x0D60, 0x0D61 },
-
- // Thai
- { 0x0E01, 0x0E30 }, { 0x0E32, 0x0E33 }, { 0x0E40, 0x0E46 },
- { 0x0E4F, 0x0E5B },
-
- // Lao
- { 0x0E81, 0x0E82 }, { 0x0E84, 0x0E84 }, { 0x0E87, 0x0E87 },
- { 0x0E88, 0x0E88 }, { 0x0E8A, 0x0E8A }, { 0x0E8D, 0x0E8D },
- { 0x0E94, 0x0E97 }, { 0x0E99, 0x0E9F }, { 0x0EA1, 0x0EA3 },
- { 0x0EA5, 0x0EA5 }, { 0x0EA7, 0x0EA7 }, { 0x0EAA, 0x0EAA },
- { 0x0EAB, 0x0EAB }, { 0x0EAD, 0x0EB0 }, { 0x0EB2, 0x0EB2 },
- { 0x0EB3, 0x0EB3 }, { 0x0EBD, 0x0EBD }, { 0x0EC0, 0x0EC4 },
- { 0x0EC6, 0x0EC6 },
-
- // Georgian
- { 0x10A0, 0x10C5 }, { 0x10D0, 0x10F6 },
-
- // Hangul
- { 0x1100, 0x1159 }, { 0x1161, 0x11A2 }, { 0x11A8, 0x11F9 },
-
- // Latin (2)
- { 0x1E00, 0x1E9A }, { 0x1EA0, 0x1EF9 },
-
- // Greek (2)
- { 0x1F00, 0x1F15 }, { 0x1F18, 0x1F1D }, { 0x1F20, 0x1F45 },
- { 0x1F48, 0x1F4D }, { 0x1F50, 0x1F57 }, { 0x1F59, 0x1F59 },
- { 0x1F5B, 0x1F5B }, { 0x1F5D, 0x1F5D }, { 0x1F5F, 0x1F7D },
- { 0x1F80, 0x1FB4 }, { 0x1FB6, 0x1FBC }, { 0x1FC2, 0x1FC4 },
- { 0x1FC6, 0x1FCC }, { 0x1FD0, 0x1FD3 }, { 0x1FD6, 0x1FDB },
- { 0x1FE0, 0x1FEC }, { 0x1FF2, 0x1FF4 }, { 0x1FF6, 0x1FFC },
-
- // Hiragana
- { 0x3041, 0x3094 }, { 0x309B, 0x309E },
-
- // Katakana
- { 0x30A1, 0x30FE },
-
- // Bopmofo [sic]
- { 0x3105, 0x312C },
-
- // CJK Unified Ideographs
- { 0x4E00, 0x9FA5 }, { 0xF900, 0xFA2D }, { 0xFB1F, 0xFB36 },
- { 0xFB38, 0xFB3C }, { 0xFB3E, 0xFB3E }, { 0xFB40, 0xFB41 },
- { 0xFB42, 0xFB44 }, { 0xFB46, 0xFBB1 }, { 0xFBD3, 0xFD3F },
- { 0xFD50, 0xFD8F }, { 0xFD92, 0xFDC7 }, { 0xFDF0, 0xFDFB },
- { 0xFE70, 0xFE72 }, { 0xFE74, 0xFE74 }, { 0xFE76, 0xFEFC },
- { 0xFF21, 0xFF3A }, { 0xFF41, 0xFF5A }, { 0xFF66, 0xFFBE },
- { 0xFFC2, 0xFFC7 }, { 0xFFCA, 0xFFCF }, { 0xFFD2, 0xFFD7 },
- { 0xFFDA, 0xFFDC }
-};
-
// C99 Annex D
static const llvm::sys::UnicodeCharRange C99AllowedIDCharRanges[] = {
// Latin (1)
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseAST.cpp b/contrib/llvm-project/clang/lib/Parse/ParseAST.cpp
index 01510e8caf3b..77ab3b556da5 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseAST.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseAST.cpp
@@ -18,6 +18,7 @@
#include "clang/Parse/ParseDiagnostic.h"
#include "clang/Parse/Parser.h"
#include "clang/Sema/CodeCompleteConsumer.h"
+#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/Sema.h"
#include "clang/Sema/SemaConsumer.h"
#include "clang/Sema/TemplateInstCallback.h"
@@ -154,8 +155,12 @@ void clang::ParseAST(Sema &S, bool PrintStats, bool SkipFunctionBodies) {
llvm::TimeTraceScope TimeScope("Frontend");
P.Initialize();
Parser::DeclGroupPtrTy ADecl;
- for (bool AtEOF = P.ParseFirstTopLevelDecl(ADecl); !AtEOF;
- AtEOF = P.ParseTopLevelDecl(ADecl)) {
+ Sema::ModuleImportState ImportState;
+ EnterExpressionEvaluationContext PotentiallyEvaluated(
+ S, Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
+
+ for (bool AtEOF = P.ParseFirstTopLevelDecl(ADecl, ImportState); !AtEOF;
+ AtEOF = P.ParseTopLevelDecl(ADecl, ImportState)) {
// If we got a null return and something *was* parsed, ignore it. This
// is due to a top-level semicolon, an action override, or a parse error
// skipping something.
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseCXXInlineMethods.cpp b/contrib/llvm-project/clang/lib/Parse/ParseCXXInlineMethods.cpp
index 116724a0d50b..573c90a36eea 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseCXXInlineMethods.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseCXXInlineMethods.cpp
@@ -10,21 +10,23 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Parse/Parser.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/Parse/ParseDiagnostic.h"
+#include "clang/Parse/Parser.h"
#include "clang/Parse/RAIIObjectsForParser.h"
#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/Scope.h"
+
using namespace clang;
/// ParseCXXInlineMethodDef - We parsed and verified that the specified
/// Declarator is a well formed C++ inline method definition. Now lex its body
/// and store its tokens for parsing after the C++ class is complete.
NamedDecl *Parser::ParseCXXInlineMethodDef(
- AccessSpecifier AS, ParsedAttributes &AccessAttrs, ParsingDeclarator &D,
- const ParsedTemplateInfo &TemplateInfo, const VirtSpecifiers &VS,
- SourceLocation PureSpecLoc) {
+ AccessSpecifier AS, const ParsedAttributesView &AccessAttrs,
+ ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo,
+ const VirtSpecifiers &VS, SourceLocation PureSpecLoc) {
assert(D.isFunctionDeclarator() && "This isn't a function declarator!");
assert(Tok.isOneOf(tok::l_brace, tok::colon, tok::kw_try, tok::equal) &&
"Current token not a '{', ':', '=', or 'try'!");
@@ -140,8 +142,22 @@ NamedDecl *Parser::ParseCXXInlineMethodDef(
// function body.
if (ConsumeAndStoreFunctionPrologue(Toks)) {
// We didn't find the left-brace we expected after the
- // constructor initializer; we already printed an error, and it's likely
- // impossible to recover, so don't try to parse this method later.
+ // constructor initializer.
+
+ // If we're code-completing and the completion point was in the broken
+ // initializer, we want to parse it even though that will fail.
+ if (PP.isCodeCompletionEnabled() &&
+ llvm::any_of(Toks, [](const Token &Tok) {
+ return Tok.is(tok::code_completion);
+ })) {
+ // If we gave up at the completion point, the initializer list was
+ // likely truncated, so don't eat more tokens. We'll hit some extra
+ // errors, but they should be ignored in code completion.
+ return FnD;
+ }
+
+ // We already printed an error, and it's likely impossible to recover,
+ // so don't try to parse this method later.
// Skip over the rest of the decl and back to somewhere that looks
// reasonable.
SkipMalformedDecl();
@@ -379,9 +395,10 @@ void Parser::ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM) {
DefArgResult = ParseBraceInitializer();
} else
DefArgResult = ParseAssignmentExpression();
- DefArgResult = Actions.CorrectDelayedTyposInExpr(DefArgResult);
+ DefArgResult = Actions.CorrectDelayedTyposInExpr(DefArgResult, Param);
if (DefArgResult.isInvalid()) {
- Actions.ActOnParamDefaultArgumentError(Param, EqualLoc);
+ Actions.ActOnParamDefaultArgumentError(Param, EqualLoc,
+ /*DefaultArg=*/nullptr);
} else {
if (Tok.isNot(tok::eof) || Tok.getEofData() != Param) {
// The last two tokens are the terminator and the saved value of
@@ -452,13 +469,14 @@ void Parser::ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM) {
CXXMethodDecl *Method;
if (FunctionTemplateDecl *FunTmpl
= dyn_cast<FunctionTemplateDecl>(LM.Method))
- Method = cast<CXXMethodDecl>(FunTmpl->getTemplatedDecl());
+ Method = dyn_cast<CXXMethodDecl>(FunTmpl->getTemplatedDecl());
else
- Method = cast<CXXMethodDecl>(LM.Method);
+ Method = dyn_cast<CXXMethodDecl>(LM.Method);
- Sema::CXXThisScopeRAII ThisScope(Actions, Method->getParent(),
- Method->getMethodQualifiers(),
- getLangOpts().CPlusPlus11);
+ Sema::CXXThisScopeRAII ThisScope(
+ Actions, Method ? Method->getParent() : nullptr,
+ Method ? Method->getMethodQualifiers() : Qualifiers{},
+ Method && getLangOpts().CPlusPlus11);
// Parse the exception-specification.
SourceRange SpecificationRange;
@@ -633,6 +651,11 @@ void Parser::ParseLexedMemberInitializer(LateParsedMemberInitializer &MI) {
Actions.ActOnStartCXXInClassMemberInitializer();
+ // The initializer isn't actually potentially evaluated unless it is
+ // used.
+ EnterExpressionEvaluationContext Eval(
+ Actions, Sema::ExpressionEvaluationContext::PotentiallyEvaluatedIfUsed);
+
ExprResult Init = ParseCXXMemberInitializer(MI.Field, /*IsFunction=*/false,
EqualLoc);
@@ -705,7 +728,6 @@ void Parser::ParseLexedAttribute(LateParsedAttribute &LA,
ConsumeAnyToken(/*ConsumeCodeCompletionTok=*/true);
ParsedAttributes Attrs(AttrFactory);
- SourceLocation endLoc;
if (LA.Decls.size() > 0) {
Decl *D = LA.Decls[0];
@@ -728,8 +750,8 @@ void Parser::ParseLexedAttribute(LateParsedAttribute &LA,
Actions.ActOnReenterFunctionContext(Actions.CurScope, D);
}
- ParseGNUAttributeArgs(&LA.AttrName, LA.AttrNameLoc, Attrs, &endLoc,
- nullptr, SourceLocation(), ParsedAttr::AS_GNU,
+ ParseGNUAttributeArgs(&LA.AttrName, LA.AttrNameLoc, Attrs, nullptr,
+ nullptr, SourceLocation(), ParsedAttr::Form::GNU(),
nullptr);
if (HasFunScope)
@@ -737,8 +759,8 @@ void Parser::ParseLexedAttribute(LateParsedAttribute &LA,
} else {
// If there are multiple decls, then the decl cannot be within the
// function scope.
- ParseGNUAttributeArgs(&LA.AttrName, LA.AttrNameLoc, Attrs, &endLoc,
- nullptr, SourceLocation(), ParsedAttr::AS_GNU,
+ ParseGNUAttributeArgs(&LA.AttrName, LA.AttrNameLoc, Attrs, nullptr,
+ nullptr, SourceLocation(), ParsedAttr::Form::GNU(),
nullptr);
}
} else {
@@ -781,7 +803,7 @@ void Parser::ParseLexedPragma(LateParsedPragma &LP) {
case tok::annot_attr_openmp:
case tok::annot_pragma_openmp: {
AccessSpecifier AS = LP.getAccessSpecifier();
- ParsedAttributesWithRange Attrs(AttrFactory);
+ ParsedAttributes Attrs(AttrFactory);
(void)ParseOpenMPDeclarativeDirectiveWithExtDecl(AS, Attrs);
break;
}
@@ -802,7 +824,7 @@ bool Parser::ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2,
// We always want this function to consume at least one token if the first
// token isn't T and if not at EOF.
bool isFirstTokenConsumed = true;
- while (1) {
+ while (true) {
// If we found one of the tokens, stop and return true.
if (Tok.is(T1) || Tok.is(T2)) {
if (ConsumeFinalToken) {
@@ -817,6 +839,7 @@ bool Parser::ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2,
case tok::annot_module_begin:
case tok::annot_module_end:
case tok::annot_module_include:
+ case tok::annot_repl_input_end:
// Ran out of tokens.
return false;
@@ -866,7 +889,7 @@ bool Parser::ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2,
case tok::semi:
if (StopAtSemi)
return false;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
default:
// consume this token.
Toks.push_back(Tok);
@@ -1158,7 +1181,7 @@ bool Parser::ConsumeAndStoreInitializer(CachedTokens &Toks,
unsigned AngleCount = 0;
unsigned KnownTemplateCount = 0;
- while (1) {
+ while (true) {
switch (Tok.getKind()) {
case tok::comma:
// If we might be in a template, perform a tentative parse to check.
@@ -1223,6 +1246,7 @@ bool Parser::ConsumeAndStoreInitializer(CachedTokens &Toks,
case tok::annot_module_begin:
case tok::annot_module_end:
case tok::annot_module_include:
+ case tok::annot_repl_input_end:
// Ran out of tokens.
return false;
@@ -1244,13 +1268,13 @@ bool Parser::ConsumeAndStoreInitializer(CachedTokens &Toks,
goto consume_token;
if (AngleCount) --AngleCount;
if (KnownTemplateCount) --KnownTemplateCount;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case tok::greatergreater:
if (!getLangOpts().CPlusPlus11)
goto consume_token;
if (AngleCount) --AngleCount;
if (KnownTemplateCount) --KnownTemplateCount;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case tok::greater:
if (AngleCount) --AngleCount;
if (KnownTemplateCount) --KnownTemplateCount;
@@ -1355,7 +1379,7 @@ bool Parser::ConsumeAndStoreInitializer(CachedTokens &Toks,
case tok::semi:
if (CIK == CIK_DefaultInitializer)
return true; // End of the default initializer.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
default:
consume_token:
Toks.push_back(Tok);
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseDecl.cpp b/contrib/llvm-project/clang/lib/Parse/ParseDecl.cpp
index 939323517b4d..356e7851ec63 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseDecl.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseDecl.cpp
@@ -10,24 +10,27 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Parse/Parser.h"
-#include "clang/Parse/RAIIObjectsForParser.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/PrettyDeclStackTrace.h"
#include "clang/Basic/AddressSpaces.h"
+#include "clang/Basic/AttributeCommonInfo.h"
#include "clang/Basic/Attributes.h"
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/TokenKinds.h"
#include "clang/Parse/ParseDiagnostic.h"
+#include "clang/Parse/Parser.h"
+#include "clang/Parse/RAIIObjectsForParser.h"
+#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/SemaDiagnostic.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringSwitch.h"
+#include <optional>
using namespace clang;
@@ -40,10 +43,8 @@ using namespace clang;
/// specifier-qualifier-list abstract-declarator[opt]
///
/// Called type-id in C++.
-TypeResult Parser::ParseTypeName(SourceRange *Range,
- DeclaratorContext Context,
- AccessSpecifier AS,
- Decl **OwnedType,
+TypeResult Parser::ParseTypeName(SourceRange *Range, DeclaratorContext Context,
+ AccessSpecifier AS, Decl **OwnedType,
ParsedAttributes *Attrs) {
DeclSpecContext DSC = getDeclSpecContextFromDeclaratorContext(Context);
if (DSC == DeclSpecContext::DSC_normal)
@@ -57,8 +58,20 @@ TypeResult Parser::ParseTypeName(SourceRange *Range,
if (OwnedType)
*OwnedType = DS.isTypeSpecOwned() ? DS.getRepAsDecl() : nullptr;
+ // Move declspec attributes to ParsedAttributes
+ if (Attrs) {
+ llvm::SmallVector<ParsedAttr *, 1> ToBeMoved;
+ for (ParsedAttr &AL : DS.getAttributes()) {
+ if (AL.isDeclspecAttribute())
+ ToBeMoved.push_back(&AL);
+ }
+
+ for (ParsedAttr *AL : ToBeMoved)
+ Attrs->takeOneFrom(DS.getAttributes(), AL);
+ }
+
// Parse the abstract-declarator, if present.
- Declarator DeclaratorInfo(DS, Context);
+ Declarator DeclaratorInfo(DS, ParsedAttributesView::none(), Context);
ParseDeclarator(DeclaratorInfo);
if (Range)
*Range = DeclaratorInfo.getSourceRange();
@@ -66,12 +79,12 @@ TypeResult Parser::ParseTypeName(SourceRange *Range,
if (DeclaratorInfo.isInvalidType())
return true;
- return Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
+ return Actions.ActOnTypeName(DeclaratorInfo);
}
/// Normalizes an attribute name by dropping prefixed and suffixed __.
static StringRef normalizeAttrName(StringRef Name) {
- if (Name.size() >= 4 && Name.startswith("__") && Name.endswith("__"))
+ if (Name.size() >= 4 && Name.starts_with("__") && Name.ends_with("__"))
return Name.drop_front(2).drop_back(2);
return Name;
}
@@ -103,9 +116,7 @@ static bool FindLocsWithCommonFileID(Preprocessor &PP, SourceLocation StartLoc,
return AttrStartIsInMacro && AttrEndIsInMacro;
}
-void Parser::ParseAttributes(unsigned WhichAttrKinds,
- ParsedAttributesWithRange &Attrs,
- SourceLocation *End,
+void Parser::ParseAttributes(unsigned WhichAttrKinds, ParsedAttributes &Attrs,
LateParsedAttrList *LateAttrs) {
bool MoreToParse;
do {
@@ -113,11 +124,11 @@ void Parser::ParseAttributes(unsigned WhichAttrKinds,
// parsed, loop to ensure all specified attribute combinations are parsed.
MoreToParse = false;
if (WhichAttrKinds & PAKM_CXX11)
- MoreToParse |= MaybeParseCXX11Attributes(Attrs, End);
+ MoreToParse |= MaybeParseCXX11Attributes(Attrs);
if (WhichAttrKinds & PAKM_GNU)
- MoreToParse |= MaybeParseGNUAttributes(Attrs, End, LateAttrs);
+ MoreToParse |= MaybeParseGNUAttributes(Attrs, LateAttrs);
if (WhichAttrKinds & PAKM_Declspec)
- MoreToParse |= MaybeParseMicrosoftDeclSpecs(Attrs, End);
+ MoreToParse |= MaybeParseMicrosoftDeclSpecs(Attrs);
} while (MoreToParse);
}
@@ -162,15 +173,12 @@ void Parser::ParseAttributes(unsigned WhichAttrKinds,
/// ',' or ')' are ignored, otherwise they produce a parse error.
///
/// We follow the C++ model, but don't allow junk after the identifier.
-void Parser::ParseGNUAttributes(ParsedAttributesWithRange &Attrs,
- SourceLocation *EndLoc,
+void Parser::ParseGNUAttributes(ParsedAttributes &Attrs,
LateParsedAttrList *LateAttrs, Declarator *D) {
assert(Tok.is(tok::kw___attribute) && "Not a GNU attribute list!");
- SourceLocation StartLoc = Tok.getLocation(), Loc;
-
- if (!EndLoc)
- EndLoc = &Loc;
+ SourceLocation StartLoc = Tok.getLocation();
+ SourceLocation EndLoc = StartLoc;
while (Tok.is(tok::kw___attribute)) {
SourceLocation AttrTokLoc = ConsumeToken();
@@ -195,6 +203,11 @@ void Parser::ParseGNUAttributes(ParsedAttributesWithRange &Attrs,
// Expect an identifier or declaration specifier (const, int, etc.)
if (Tok.isAnnotation())
break;
+ if (Tok.is(tok::code_completion)) {
+ cutOffParsing();
+ Actions.CodeCompleteAttribute(AttributeCommonInfo::Syntax::AS_GNU);
+ break;
+ }
IdentifierInfo *AttrName = Tok.getIdentifierInfo();
if (!AttrName)
break;
@@ -203,14 +216,14 @@ void Parser::ParseGNUAttributes(ParsedAttributesWithRange &Attrs,
if (Tok.isNot(tok::l_paren)) {
Attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0,
- ParsedAttr::AS_GNU);
+ ParsedAttr::Form::GNU());
continue;
}
// Handle "parameterized" attributes
if (!LateAttrs || !isAttributeLateParsed(*AttrName)) {
- ParseGNUAttributeArgs(AttrName, AttrNameLoc, Attrs, EndLoc, nullptr,
- SourceLocation(), ParsedAttr::AS_GNU, D);
+ ParseGNUAttributeArgs(AttrName, AttrNameLoc, Attrs, &EndLoc, nullptr,
+ SourceLocation(), ParsedAttr::Form::GNU(), D);
continue;
}
@@ -242,8 +255,7 @@ void Parser::ParseGNUAttributes(ParsedAttributesWithRange &Attrs,
SourceLocation Loc = Tok.getLocation();
if (ExpectAndConsume(tok::r_paren))
SkipUntil(tok::r_paren, StopAtSemi);
- if (EndLoc)
- *EndLoc = Loc;
+ EndLoc = Loc;
// If this was declared in a macro, attach the macro IdentifierInfo to the
// parsed attribute.
@@ -265,7 +277,7 @@ void Parser::ParseGNUAttributes(ParsedAttributesWithRange &Attrs,
}
}
- Attrs.Range = SourceRange(StartLoc, *EndLoc);
+ Attrs.Range = SourceRange(StartLoc, EndLoc);
}
/// Determine whether the given attribute has an identifier argument.
@@ -277,6 +289,16 @@ static bool attributeHasIdentifierArg(const IdentifierInfo &II) {
#undef CLANG_ATTR_IDENTIFIER_ARG_LIST
}
+/// Determine whether the given attribute has an identifier argument.
+static ParsedAttributeArgumentsProperties
+attributeStringLiteralListArg(const IdentifierInfo &II) {
+#define CLANG_ATTR_STRING_LITERAL_ARG_LIST
+ return llvm::StringSwitch<uint32_t>(normalizeAttrName(II.getName()))
+#include "clang/Parse/AttrParserStringSwitches.inc"
+ .Default(0);
+#undef CLANG_ATTR_STRING_LITERAL_ARG_LIST
+}
+
/// Determine whether the given attribute has a variadic identifier argument.
static bool attributeHasVariadicIdentifierArg(const IdentifierInfo &II) {
#define CLANG_ATTR_VARIADIC_IDENTIFIER_ARG_LIST
@@ -295,6 +317,15 @@ static bool attributeTreatsKeywordThisAsIdentifier(const IdentifierInfo &II) {
#undef CLANG_ATTR_THIS_ISA_IDENTIFIER_ARG_LIST
}
+/// Determine if an attribute accepts parameter packs.
+static bool attributeAcceptsExprPack(const IdentifierInfo &II) {
+#define CLANG_ATTR_ACCEPTS_EXPR_PACK
+ return llvm::StringSwitch<bool>(normalizeAttrName(II.getName()))
+#include "clang/Parse/AttrParserStringSwitches.inc"
+ .Default(false);
+#undef CLANG_ATTR_ACCEPTS_EXPR_PACK
+}
+
/// Determine whether the given attribute parses a type argument.
static bool attributeIsTypeArgAttr(const IdentifierInfo &II) {
#define CLANG_ATTR_TYPE_ARG_LIST
@@ -326,10 +357,9 @@ IdentifierLoc *Parser::ParseIdentifierLoc() {
void Parser::ParseAttributeWithTypeArg(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
- SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
- ParsedAttr::Syntax Syntax) {
+ ParsedAttr::Form Form) {
BalancedDelimiterTracker Parens(*this, tok::l_paren);
Parens.consumeOpen();
@@ -346,21 +376,98 @@ void Parser::ParseAttributeWithTypeArg(IdentifierInfo &AttrName,
if (T.isUsable())
Attrs.addNewTypeAttr(&AttrName,
SourceRange(AttrNameLoc, Parens.getCloseLocation()),
- ScopeName, ScopeLoc, T.get(), Syntax);
+ ScopeName, ScopeLoc, T.get(), Form);
else
Attrs.addNew(&AttrName, SourceRange(AttrNameLoc, Parens.getCloseLocation()),
- ScopeName, ScopeLoc, nullptr, 0, Syntax);
+ ScopeName, ScopeLoc, nullptr, 0, Form);
+}
+
+ExprResult
+Parser::ParseUnevaluatedStringInAttribute(const IdentifierInfo &AttrName) {
+ if (Tok.is(tok::l_paren)) {
+ BalancedDelimiterTracker Paren(*this, tok::l_paren);
+ Paren.consumeOpen();
+ ExprResult Res = ParseUnevaluatedStringInAttribute(AttrName);
+ Paren.consumeClose();
+ return Res;
+ }
+ if (!isTokenStringLiteral()) {
+ Diag(Tok.getLocation(), diag::err_expected_string_literal)
+ << /*in attribute...*/ 4 << AttrName.getName();
+ return ExprError();
+ }
+ return ParseUnevaluatedStringLiteralExpression();
+}
+
+bool Parser::ParseAttributeArgumentList(
+ const IdentifierInfo &AttrName, SmallVectorImpl<Expr *> &Exprs,
+ ParsedAttributeArgumentsProperties ArgsProperties) {
+ bool SawError = false;
+ unsigned Arg = 0;
+ while (true) {
+ ExprResult Expr;
+ if (ArgsProperties.isStringLiteralArg(Arg)) {
+ Expr = ParseUnevaluatedStringInAttribute(AttrName);
+ } else if (getLangOpts().CPlusPlus11 && Tok.is(tok::l_brace)) {
+ Diag(Tok, diag::warn_cxx98_compat_generalized_initializer_lists);
+ Expr = ParseBraceInitializer();
+ } else {
+ Expr = ParseAssignmentExpression();
+ }
+ Expr = Actions.CorrectDelayedTyposInExpr(Expr);
+
+ if (Tok.is(tok::ellipsis))
+ Expr = Actions.ActOnPackExpansion(Expr.get(), ConsumeToken());
+ else if (Tok.is(tok::code_completion)) {
+ // There's nothing to suggest in here as we parsed a full expression.
+ // Instead fail and propagate the error since caller might have something
+ // the suggest, e.g. signature help in function call. Note that this is
+ // performed before pushing the \p Expr, so that signature help can report
+ // current argument correctly.
+ SawError = true;
+ cutOffParsing();
+ break;
+ }
+
+ if (Expr.isInvalid()) {
+ SawError = true;
+ break;
+ }
+
+ Exprs.push_back(Expr.get());
+
+ if (Tok.isNot(tok::comma))
+ break;
+ // Move to the next argument, remember where the comma was.
+ Token Comma = Tok;
+ ConsumeToken();
+ checkPotentialAngleBracketDelimiter(Comma);
+ Arg++;
+ }
+
+ if (SawError) {
+ // Ensure typos get diagnosed when errors were encountered while parsing the
+ // expression list.
+ for (auto &E : Exprs) {
+ ExprResult Expr = Actions.CorrectDelayedTyposInExpr(E);
+ if (Expr.isUsable())
+ E = Expr.get();
+ }
+ }
+ return SawError;
}
unsigned Parser::ParseAttributeArgsCommon(
IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName,
- SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax) {
+ SourceLocation ScopeLoc, ParsedAttr::Form Form) {
// Ignore the left paren location for now.
ConsumeParen();
bool ChangeKWThisToIdent = attributeTreatsKeywordThisAsIdentifier(*AttrName);
bool AttributeIsTypeArgAttr = attributeIsTypeArgAttr(*AttrName);
+ bool AttributeHasVariadicIdentifierArg =
+ attributeHasVariadicIdentifierArg(*AttrName);
// Interpret "kw_this" as an identifier if the attributed requests it.
if (ChangeKWThisToIdent && Tok.is(tok::kw_this))
@@ -369,10 +476,10 @@ unsigned Parser::ParseAttributeArgsCommon(
ArgsVector ArgExprs;
if (Tok.is(tok::identifier)) {
// If this attribute wants an 'identifier' argument, make it so.
- bool IsIdentifierArg = attributeHasIdentifierArg(*AttrName) ||
- attributeHasVariadicIdentifierArg(*AttrName);
+ bool IsIdentifierArg = AttributeHasVariadicIdentifierArg ||
+ attributeHasIdentifierArg(*AttrName);
ParsedAttr::Kind AttrKind =
- ParsedAttr::getParsedKind(AttrName, ScopeName, Syntax);
+ ParsedAttr::getParsedKind(AttrName, ScopeName, Form.getSyntax());
// If we don't know how to parse this attribute, but this is the only
// token in this argument, assume it's meant to be an identifier.
@@ -392,42 +499,79 @@ unsigned Parser::ParseAttributeArgsCommon(
if (!ArgExprs.empty())
ConsumeToken();
- // Parse the non-empty comma-separated list of expressions.
- do {
- // Interpret "kw_this" as an identifier if the attributed requests it.
- if (ChangeKWThisToIdent && Tok.is(tok::kw_this))
- Tok.setKind(tok::identifier);
+ if (AttributeIsTypeArgAttr) {
+ // FIXME: Multiple type arguments are not implemented.
+ TypeResult T = ParseTypeName();
+ if (T.isInvalid()) {
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return 0;
+ }
+ if (T.isUsable())
+ TheParsedType = T.get();
+ } else if (AttributeHasVariadicIdentifierArg) {
+ // Parse variadic identifier arg. This can either consume identifiers or
+ // expressions. Variadic identifier args do not support parameter packs
+ // because those are typically used for attributes with enumeration
+ // arguments, and those enumerations are not something the user could
+ // express via a pack.
+ do {
+ // Interpret "kw_this" as an identifier if the attributed requests it.
+ if (ChangeKWThisToIdent && Tok.is(tok::kw_this))
+ Tok.setKind(tok::identifier);
+
+ ExprResult ArgExpr;
+ if (Tok.is(tok::identifier)) {
+ ArgExprs.push_back(ParseIdentifierLoc());
+ } else {
+ bool Uneval = attributeParsedArgsUnevaluated(*AttrName);
+ EnterExpressionEvaluationContext Unevaluated(
+ Actions,
+ Uneval ? Sema::ExpressionEvaluationContext::Unevaluated
+ : Sema::ExpressionEvaluationContext::ConstantEvaluated);
- ExprResult ArgExpr;
- if (AttributeIsTypeArgAttr) {
- TypeResult T = ParseTypeName();
- if (T.isInvalid()) {
- SkipUntil(tok::r_paren, StopAtSemi);
- return 0;
+ ExprResult ArgExpr(
+ Actions.CorrectDelayedTyposInExpr(ParseAssignmentExpression()));
+
+ if (ArgExpr.isInvalid()) {
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return 0;
+ }
+ ArgExprs.push_back(ArgExpr.get());
}
- if (T.isUsable())
- TheParsedType = T.get();
- break; // FIXME: Multiple type arguments are not implemented.
- } else if (Tok.is(tok::identifier) &&
- attributeHasVariadicIdentifierArg(*AttrName)) {
- ArgExprs.push_back(ParseIdentifierLoc());
- } else {
- bool Uneval = attributeParsedArgsUnevaluated(*AttrName);
- EnterExpressionEvaluationContext Unevaluated(
- Actions,
- Uneval ? Sema::ExpressionEvaluationContext::Unevaluated
- : Sema::ExpressionEvaluationContext::ConstantEvaluated);
-
- ExprResult ArgExpr(
- Actions.CorrectDelayedTyposInExpr(ParseAssignmentExpression()));
- if (ArgExpr.isInvalid()) {
+ // Eat the comma, move to the next argument
+ } while (TryConsumeToken(tok::comma));
+ } else {
+ // General case. Parse all available expressions.
+ bool Uneval = attributeParsedArgsUnevaluated(*AttrName);
+ EnterExpressionEvaluationContext Unevaluated(
+ Actions, Uneval
+ ? Sema::ExpressionEvaluationContext::Unevaluated
+ : Sema::ExpressionEvaluationContext::ConstantEvaluated);
+
+ ExprVector ParsedExprs;
+ ParsedAttributeArgumentsProperties ArgProperties =
+ attributeStringLiteralListArg(*AttrName);
+ if (ParseAttributeArgumentList(*AttrName, ParsedExprs, ArgProperties)) {
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return 0;
+ }
+
+ // Pack expansion must currently be explicitly supported by an attribute.
+ for (size_t I = 0; I < ParsedExprs.size(); ++I) {
+ if (!isa<PackExpansionExpr>(ParsedExprs[I]))
+ continue;
+
+ if (!attributeAcceptsExprPack(*AttrName)) {
+ Diag(Tok.getLocation(),
+ diag::err_attribute_argument_parm_pack_not_supported)
+ << AttrName;
SkipUntil(tok::r_paren, StopAtSemi);
return 0;
}
- ArgExprs.push_back(ArgExpr.get());
}
- // Eat the comma, move to the next argument
- } while (TryConsumeToken(tok::comma));
+
+ ArgExprs.insert(ArgExprs.end(), ParsedExprs.begin(), ParsedExprs.end());
+ }
}
SourceLocation RParen = Tok.getLocation();
@@ -436,10 +580,10 @@ unsigned Parser::ParseAttributeArgsCommon(
if (AttributeIsTypeArgAttr && !TheParsedType.get().isNull()) {
Attrs.addNewTypeAttr(AttrName, SourceRange(AttrNameLoc, RParen),
- ScopeName, ScopeLoc, TheParsedType, Syntax);
+ ScopeName, ScopeLoc, TheParsedType, Form);
} else {
Attrs.addNew(AttrName, SourceRange(AttrLoc, RParen), ScopeName, ScopeLoc,
- ArgExprs.data(), ArgExprs.size(), Syntax);
+ ArgExprs.data(), ArgExprs.size(), Form);
}
}
@@ -451,49 +595,45 @@ unsigned Parser::ParseAttributeArgsCommon(
/// Parse the arguments to a parameterized GNU attribute or
/// a C++11 attribute in "gnu" namespace.
-void Parser::ParseGNUAttributeArgs(IdentifierInfo *AttrName,
- SourceLocation AttrNameLoc,
- ParsedAttributes &Attrs,
- SourceLocation *EndLoc,
- IdentifierInfo *ScopeName,
- SourceLocation ScopeLoc,
- ParsedAttr::Syntax Syntax,
- Declarator *D) {
+void Parser::ParseGNUAttributeArgs(
+ IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
+ ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName,
+ SourceLocation ScopeLoc, ParsedAttr::Form Form, Declarator *D) {
assert(Tok.is(tok::l_paren) && "Attribute arg list not starting with '('");
ParsedAttr::Kind AttrKind =
- ParsedAttr::getParsedKind(AttrName, ScopeName, Syntax);
+ ParsedAttr::getParsedKind(AttrName, ScopeName, Form.getSyntax());
if (AttrKind == ParsedAttr::AT_Availability) {
ParseAvailabilityAttribute(*AttrName, AttrNameLoc, Attrs, EndLoc, ScopeName,
- ScopeLoc, Syntax);
+ ScopeLoc, Form);
return;
} else if (AttrKind == ParsedAttr::AT_ExternalSourceSymbol) {
ParseExternalSourceSymbolAttribute(*AttrName, AttrNameLoc, Attrs, EndLoc,
- ScopeName, ScopeLoc, Syntax);
+ ScopeName, ScopeLoc, Form);
return;
} else if (AttrKind == ParsedAttr::AT_ObjCBridgeRelated) {
ParseObjCBridgeRelatedAttribute(*AttrName, AttrNameLoc, Attrs, EndLoc,
- ScopeName, ScopeLoc, Syntax);
+ ScopeName, ScopeLoc, Form);
return;
} else if (AttrKind == ParsedAttr::AT_SwiftNewType) {
ParseSwiftNewTypeAttribute(*AttrName, AttrNameLoc, Attrs, EndLoc, ScopeName,
- ScopeLoc, Syntax);
+ ScopeLoc, Form);
return;
} else if (AttrKind == ParsedAttr::AT_TypeTagForDatatype) {
ParseTypeTagForDatatypeAttribute(*AttrName, AttrNameLoc, Attrs, EndLoc,
- ScopeName, ScopeLoc, Syntax);
+ ScopeName, ScopeLoc, Form);
return;
} else if (attributeIsTypeArgAttr(*AttrName)) {
- ParseAttributeWithTypeArg(*AttrName, AttrNameLoc, Attrs, EndLoc, ScopeName,
- ScopeLoc, Syntax);
+ ParseAttributeWithTypeArg(*AttrName, AttrNameLoc, Attrs, ScopeName,
+ ScopeLoc, Form);
return;
}
// These may refer to the function arguments, but need to be parsed early to
// participate in determining whether it's a redeclaration.
- llvm::Optional<ParseScope> PrototypeScope;
+ std::optional<ParseScope> PrototypeScope;
if (normalizeAttrName(AttrName->getName()) == "enable_if" &&
D && D->isFunctionDeclarator()) {
DeclaratorChunk::FunctionTypeInfo FTI = D->getFunctionTypeInfo();
@@ -507,41 +647,41 @@ void Parser::ParseGNUAttributeArgs(IdentifierInfo *AttrName,
}
ParseAttributeArgsCommon(AttrName, AttrNameLoc, Attrs, EndLoc, ScopeName,
- ScopeLoc, Syntax);
+ ScopeLoc, Form);
}
unsigned Parser::ParseClangAttributeArgs(
IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName,
- SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax) {
+ SourceLocation ScopeLoc, ParsedAttr::Form Form) {
assert(Tok.is(tok::l_paren) && "Attribute arg list not starting with '('");
ParsedAttr::Kind AttrKind =
- ParsedAttr::getParsedKind(AttrName, ScopeName, Syntax);
+ ParsedAttr::getParsedKind(AttrName, ScopeName, Form.getSyntax());
switch (AttrKind) {
default:
return ParseAttributeArgsCommon(AttrName, AttrNameLoc, Attrs, EndLoc,
- ScopeName, ScopeLoc, Syntax);
+ ScopeName, ScopeLoc, Form);
case ParsedAttr::AT_ExternalSourceSymbol:
ParseExternalSourceSymbolAttribute(*AttrName, AttrNameLoc, Attrs, EndLoc,
- ScopeName, ScopeLoc, Syntax);
+ ScopeName, ScopeLoc, Form);
break;
case ParsedAttr::AT_Availability:
ParseAvailabilityAttribute(*AttrName, AttrNameLoc, Attrs, EndLoc, ScopeName,
- ScopeLoc, Syntax);
+ ScopeLoc, Form);
break;
case ParsedAttr::AT_ObjCBridgeRelated:
ParseObjCBridgeRelatedAttribute(*AttrName, AttrNameLoc, Attrs, EndLoc,
- ScopeName, ScopeLoc, Syntax);
+ ScopeName, ScopeLoc, Form);
break;
case ParsedAttr::AT_SwiftNewType:
ParseSwiftNewTypeAttribute(*AttrName, AttrNameLoc, Attrs, EndLoc, ScopeName,
- ScopeLoc, Syntax);
+ ScopeLoc, Form);
break;
case ParsedAttr::AT_TypeTagForDatatype:
ParseTypeTagForDatatypeAttribute(*AttrName, AttrNameLoc, Attrs, EndLoc,
- ScopeName, ScopeLoc, Syntax);
+ ScopeName, ScopeLoc, Form);
break;
}
return !Attrs.empty() ? Attrs.begin()->getNumArgs() : 0;
@@ -550,9 +690,11 @@ unsigned Parser::ParseClangAttributeArgs(
bool Parser::ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs) {
+ unsigned ExistingAttrs = Attrs.size();
+
// If the attribute isn't known, we will not attempt to parse any
// arguments.
- if (!hasAttribute(AttrSyntax::Declspec, nullptr, AttrName,
+ if (!hasAttribute(AttributeCommonInfo::Syntax::AS_Declspec, nullptr, AttrName,
getTargetInfo(), getLangOpts())) {
// Eat the left paren, then skip to the ending right paren.
ConsumeParen();
@@ -671,18 +813,18 @@ bool Parser::ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName,
if (!HasInvalidAccessor)
Attrs.addNewPropertyAttr(AttrName, AttrNameLoc, nullptr, SourceLocation(),
AccessorNames[AK_Get], AccessorNames[AK_Put],
- ParsedAttr::AS_Declspec);
+ ParsedAttr::Form::Declspec());
T.skipToEnd();
return !HasInvalidAccessor;
}
unsigned NumArgs =
ParseAttributeArgsCommon(AttrName, AttrNameLoc, Attrs, nullptr, nullptr,
- SourceLocation(), ParsedAttr::AS_Declspec);
+ SourceLocation(), ParsedAttr::Form::Declspec());
// If this attribute's args were parsed, and it was expected to have
// arguments but none were provided, emit a diagnostic.
- if (!Attrs.empty() && Attrs.begin()->getMaxArgs() && !NumArgs) {
+ if (ExistingAttrs < Attrs.size() && Attrs.back().getMaxArgs() && !NumArgs) {
Diag(OpenParenLoc, diag::err_attribute_requires_arguments) << AttrName;
return false;
}
@@ -695,11 +837,13 @@ bool Parser::ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName,
/// [MS] extended-decl-modifier-seq:
/// extended-decl-modifier[opt]
/// extended-decl-modifier extended-decl-modifier-seq
-void Parser::ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
- SourceLocation *End) {
+void Parser::ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs) {
assert(getLangOpts().DeclSpecKeyword && "__declspec keyword is not enabled");
assert(Tok.is(tok::kw___declspec) && "Not a declspec!");
+ SourceLocation StartLoc = Tok.getLocation();
+ SourceLocation EndLoc = StartLoc;
+
while (Tok.is(tok::kw___declspec)) {
ConsumeToken();
BalancedDelimiterTracker T(*this, tok::l_paren);
@@ -714,6 +858,12 @@ void Parser::ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
if (TryConsumeToken(tok::comma))
continue;
+ if (Tok.is(tok::code_completion)) {
+ cutOffParsing();
+ Actions.CodeCompleteAttribute(AttributeCommonInfo::AS_Declspec);
+ return;
+ }
+
// We expect either a well-known identifier or a generic string. Anything
// else is a malformed declspec.
bool IsString = Tok.getKind() == tok::string_literal;
@@ -753,18 +903,20 @@ void Parser::ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
if (!AttrHandled)
Attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0,
- ParsedAttr::AS_Declspec);
+ ParsedAttr::Form::Declspec());
}
T.consumeClose();
- if (End)
- *End = T.getCloseLocation();
+ EndLoc = T.getCloseLocation();
}
+
+ Attrs.Range = SourceRange(StartLoc, EndLoc);
}
void Parser::ParseMicrosoftTypeAttributes(ParsedAttributes &attrs) {
// Treat these like attributes
while (true) {
- switch (Tok.getKind()) {
+ auto Kind = Tok.getKind();
+ switch (Kind) {
case tok::kw___fastcall:
case tok::kw___stdcall:
case tok::kw___thiscall:
@@ -779,7 +931,7 @@ void Parser::ParseMicrosoftTypeAttributes(ParsedAttributes &attrs) {
IdentifierInfo *AttrName = Tok.getIdentifierInfo();
SourceLocation AttrNameLoc = ConsumeToken();
attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0,
- ParsedAttr::AS_Keyword);
+ Kind);
break;
}
default:
@@ -788,6 +940,22 @@ void Parser::ParseMicrosoftTypeAttributes(ParsedAttributes &attrs) {
}
}
+void Parser::ParseWebAssemblyFuncrefTypeAttribute(ParsedAttributes &attrs) {
+ assert(Tok.is(tok::kw___funcref));
+ SourceLocation StartLoc = Tok.getLocation();
+ if (!getTargetInfo().getTriple().isWasm()) {
+ ConsumeToken();
+ Diag(StartLoc, diag::err_wasm_funcref_not_wasm);
+ return;
+ }
+
+ IdentifierInfo *AttrName = Tok.getIdentifierInfo();
+ SourceLocation AttrNameLoc = ConsumeToken();
+ attrs.addNew(AttrName, AttrNameLoc, /*ScopeName=*/nullptr,
+ /*ScopeLoc=*/SourceLocation{}, /*Args=*/nullptr, /*numArgs=*/0,
+ tok::kw___funcref);
+}
+
void Parser::DiagnoseAndSkipExtendedMicrosoftTypeAttributes() {
SourceLocation StartLoc = Tok.getLocation();
SourceLocation EndLoc = SkipExtendedMicrosoftTypeAttributes();
@@ -830,7 +998,7 @@ void Parser::ParseBorlandTypeAttributes(ParsedAttributes &attrs) {
IdentifierInfo *AttrName = Tok.getIdentifierInfo();
SourceLocation AttrNameLoc = ConsumeToken();
attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0,
- ParsedAttr::AS_Keyword);
+ tok::kw___pascal);
}
}
@@ -840,7 +1008,16 @@ void Parser::ParseOpenCLKernelAttributes(ParsedAttributes &attrs) {
IdentifierInfo *AttrName = Tok.getIdentifierInfo();
SourceLocation AttrNameLoc = ConsumeToken();
attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0,
- ParsedAttr::AS_Keyword);
+ tok::kw___kernel);
+ }
+}
+
+void Parser::ParseCUDAFunctionAttributes(ParsedAttributes &attrs) {
+ while (Tok.is(tok::kw___noinline__)) {
+ IdentifierInfo *AttrName = Tok.getIdentifierInfo();
+ SourceLocation AttrNameLoc = ConsumeToken();
+ attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0,
+ tok::kw___noinline__);
}
}
@@ -848,13 +1025,25 @@ void Parser::ParseOpenCLQualifiers(ParsedAttributes &Attrs) {
IdentifierInfo *AttrName = Tok.getIdentifierInfo();
SourceLocation AttrNameLoc = Tok.getLocation();
Attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0,
- ParsedAttr::AS_Keyword);
+ Tok.getKind());
+}
+
+bool Parser::isHLSLQualifier(const Token &Tok) const {
+ return Tok.is(tok::kw_groupshared);
+}
+
+void Parser::ParseHLSLQualifiers(ParsedAttributes &Attrs) {
+ IdentifierInfo *AttrName = Tok.getIdentifierInfo();
+ auto Kind = Tok.getKind();
+ SourceLocation AttrNameLoc = ConsumeToken();
+ Attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0, Kind);
}
void Parser::ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs) {
// Treat these like attributes, even though they're type specifiers.
while (true) {
- switch (Tok.getKind()) {
+ auto Kind = Tok.getKind();
+ switch (Kind) {
case tok::kw__Nonnull:
case tok::kw__Nullable:
case tok::kw__Nullable_result:
@@ -865,7 +1054,7 @@ void Parser::ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs) {
Diag(AttrNameLoc, diag::ext_nullability)
<< AttrName;
attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0,
- ParsedAttr::AS_Keyword);
+ Kind);
break;
}
default:
@@ -1022,13 +1211,10 @@ VersionTuple Parser::ParseVersionTuple(SourceRange &Range) {
/// 'replacement' '=' <string>
/// opt-message:
/// 'message' '=' <string>
-void Parser::ParseAvailabilityAttribute(IdentifierInfo &Availability,
- SourceLocation AvailabilityLoc,
- ParsedAttributes &attrs,
- SourceLocation *endLoc,
- IdentifierInfo *ScopeName,
- SourceLocation ScopeLoc,
- ParsedAttr::Syntax Syntax) {
+void Parser::ParseAvailabilityAttribute(
+ IdentifierInfo &Availability, SourceLocation AvailabilityLoc,
+ ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName,
+ SourceLocation ScopeLoc, ParsedAttr::Form Form) {
enum { Introduced, Deprecated, Obsoleted, Unknown };
AvailabilityChange Changes[Unknown];
ExprResult MessageExpr, ReplacementExpr;
@@ -1130,31 +1316,19 @@ void Parser::ParseAvailabilityAttribute(IdentifierInfo &Availability,
}
ConsumeToken();
if (Keyword == Ident_message || Keyword == Ident_replacement) {
- if (Tok.isNot(tok::string_literal)) {
+ if (!isTokenStringLiteral()) {
Diag(Tok, diag::err_expected_string_literal)
<< /*Source='availability attribute'*/2;
SkipUntil(tok::r_paren, StopAtSemi);
return;
}
- if (Keyword == Ident_message)
- MessageExpr = ParseStringLiteralExpression();
- else
- ReplacementExpr = ParseStringLiteralExpression();
- // Also reject wide string literals.
- if (StringLiteral *MessageStringLiteral =
- cast_or_null<StringLiteral>(MessageExpr.get())) {
- if (!MessageStringLiteral->isAscii()) {
- Diag(MessageStringLiteral->getSourceRange().getBegin(),
- diag::err_expected_string_literal)
- << /*Source='availability attribute'*/ 2;
- SkipUntil(tok::r_paren, StopAtSemi);
- return;
- }
- }
- if (Keyword == Ident_message)
+ if (Keyword == Ident_message) {
+ MessageExpr = ParseUnevaluatedStringLiteralExpression();
break;
- else
+ } else {
+ ReplacementExpr = ParseUnevaluatedStringLiteralExpression();
continue;
+ }
}
// Special handling of 'NA' only when applied to introduced or
@@ -1234,14 +1408,10 @@ void Parser::ParseAvailabilityAttribute(IdentifierInfo &Availability,
// Record this attribute
attrs.addNew(&Availability,
- SourceRange(AvailabilityLoc, T.getCloseLocation()),
- ScopeName, ScopeLoc,
- Platform,
- Changes[Introduced],
- Changes[Deprecated],
- Changes[Obsoleted],
- UnavailableLoc, MessageExpr.get(),
- Syntax, StrictLoc, ReplacementExpr.get());
+ SourceRange(AvailabilityLoc, T.getCloseLocation()), ScopeName,
+ ScopeLoc, Platform, Changes[Introduced], Changes[Deprecated],
+ Changes[Obsoleted], UnavailableLoc, MessageExpr.get(), Form,
+ StrictLoc, ReplacementExpr.get());
}
/// Parse the contents of the "external_source_symbol" attribute.
@@ -1256,11 +1426,12 @@ void Parser::ParseAvailabilityAttribute(IdentifierInfo &Availability,
/// keyword-arg:
/// 'language' '=' <string>
/// 'defined_in' '=' <string>
+/// 'USR' '=' <string>
/// 'generated_declaration'
void Parser::ParseExternalSourceSymbolAttribute(
IdentifierInfo &ExternalSourceSymbol, SourceLocation Loc,
ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName,
- SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax) {
+ SourceLocation ScopeLoc, ParsedAttr::Form Form) {
// Opening '('.
BalancedDelimiterTracker T(*this, tok::l_paren);
if (T.expectAndConsume())
@@ -1271,6 +1442,7 @@ void Parser::ParseExternalSourceSymbolAttribute(
Ident_language = PP.getIdentifierInfo("language");
Ident_defined_in = PP.getIdentifierInfo("defined_in");
Ident_generated_declaration = PP.getIdentifierInfo("generated_declaration");
+ Ident_USR = PP.getIdentifierInfo("USR");
}
ExprResult Language;
@@ -1278,6 +1450,8 @@ void Parser::ParseExternalSourceSymbolAttribute(
ExprResult DefinedInExpr;
bool HasDefinedIn = false;
IdentifierLoc *GeneratedDeclaration = nullptr;
+ ExprResult USR;
+ bool HasUSR = false;
// Parse the language/defined_in/generated_declaration keywords
do {
@@ -1299,7 +1473,8 @@ void Parser::ParseExternalSourceSymbolAttribute(
continue;
}
- if (Keyword != Ident_language && Keyword != Ident_defined_in) {
+ if (Keyword != Ident_language && Keyword != Ident_defined_in &&
+ Keyword != Ident_USR) {
Diag(Tok, diag::err_external_source_symbol_expected_keyword);
SkipUntil(tok::r_paren, StopAtSemi);
return;
@@ -1312,16 +1487,22 @@ void Parser::ParseExternalSourceSymbolAttribute(
return;
}
- bool HadLanguage = HasLanguage, HadDefinedIn = HasDefinedIn;
+ bool HadLanguage = HasLanguage, HadDefinedIn = HasDefinedIn,
+ HadUSR = HasUSR;
if (Keyword == Ident_language)
HasLanguage = true;
+ else if (Keyword == Ident_USR)
+ HasUSR = true;
else
HasDefinedIn = true;
- if (Tok.isNot(tok::string_literal)) {
+ if (!isTokenStringLiteral()) {
Diag(Tok, diag::err_expected_string_literal)
<< /*Source='external_source_symbol attribute'*/ 3
- << /*language | source container*/ (Keyword != Ident_language);
+ << /*language | source container | USR*/ (
+ Keyword == Ident_language
+ ? 0
+ : (Keyword == Ident_defined_in ? 1 : 2));
SkipUntil(tok::comma, tok::r_paren, StopAtSemi | StopBeforeMatch);
continue;
}
@@ -1329,19 +1510,27 @@ void Parser::ParseExternalSourceSymbolAttribute(
if (HadLanguage) {
Diag(KeywordLoc, diag::err_external_source_symbol_duplicate_clause)
<< Keyword;
- ParseStringLiteralExpression();
+ ParseUnevaluatedStringLiteralExpression();
continue;
}
- Language = ParseStringLiteralExpression();
+ Language = ParseUnevaluatedStringLiteralExpression();
+ } else if (Keyword == Ident_USR) {
+ if (HadUSR) {
+ Diag(KeywordLoc, diag::err_external_source_symbol_duplicate_clause)
+ << Keyword;
+ ParseUnevaluatedStringLiteralExpression();
+ continue;
+ }
+ USR = ParseUnevaluatedStringLiteralExpression();
} else {
assert(Keyword == Ident_defined_in && "Invalid clause keyword!");
if (HadDefinedIn) {
Diag(KeywordLoc, diag::err_external_source_symbol_duplicate_clause)
<< Keyword;
- ParseStringLiteralExpression();
+ ParseUnevaluatedStringLiteralExpression();
continue;
}
- DefinedInExpr = ParseStringLiteralExpression();
+ DefinedInExpr = ParseUnevaluatedStringLiteralExpression();
}
} while (TryConsumeToken(tok::comma));
@@ -1351,10 +1540,10 @@ void Parser::ParseExternalSourceSymbolAttribute(
if (EndLoc)
*EndLoc = T.getCloseLocation();
- ArgsUnion Args[] = {Language.get(), DefinedInExpr.get(),
- GeneratedDeclaration};
+ ArgsUnion Args[] = {Language.get(), DefinedInExpr.get(), GeneratedDeclaration,
+ USR.get()};
Attrs.addNew(&ExternalSourceSymbol, SourceRange(Loc, T.getCloseLocation()),
- ScopeName, ScopeLoc, Args, llvm::array_lengthof(Args), Syntax);
+ ScopeName, ScopeLoc, Args, std::size(Args), Form);
}
/// Parse the contents of the "objc_bridge_related" attribute.
@@ -1368,13 +1557,10 @@ void Parser::ParseExternalSourceSymbolAttribute(
/// opt-instance_method:
/// Identifier | <empty>
///
-void Parser::ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated,
- SourceLocation ObjCBridgeRelatedLoc,
- ParsedAttributes &attrs,
- SourceLocation *endLoc,
- IdentifierInfo *ScopeName,
- SourceLocation ScopeLoc,
- ParsedAttr::Syntax Syntax) {
+void Parser::ParseObjCBridgeRelatedAttribute(
+ IdentifierInfo &ObjCBridgeRelated, SourceLocation ObjCBridgeRelatedLoc,
+ ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName,
+ SourceLocation ScopeLoc, ParsedAttr::Form Form) {
// Opening '('.
BalancedDelimiterTracker T(*this, tok::l_paren);
if (T.consumeOpen()) {
@@ -1430,24 +1616,20 @@ void Parser::ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated,
if (T.consumeClose())
return;
- if (endLoc)
- *endLoc = T.getCloseLocation();
+ if (EndLoc)
+ *EndLoc = T.getCloseLocation();
// Record this attribute
- attrs.addNew(&ObjCBridgeRelated,
+ Attrs.addNew(&ObjCBridgeRelated,
SourceRange(ObjCBridgeRelatedLoc, T.getCloseLocation()),
- ScopeName, ScopeLoc,
- RelatedClass,
- ClassMethod,
- InstanceMethod,
- Syntax);
+ ScopeName, ScopeLoc, RelatedClass, ClassMethod, InstanceMethod,
+ Form);
}
-
void Parser::ParseSwiftNewTypeAttribute(
IdentifierInfo &AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName,
- SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax) {
+ SourceLocation ScopeLoc, ParsedAttr::Form Form) {
BalancedDelimiterTracker T(*this, tok::l_paren);
// Opening '('
@@ -1482,17 +1664,13 @@ void Parser::ParseSwiftNewTypeAttribute(
ArgsUnion Args[] = {SwiftType};
Attrs.addNew(&AttrName, SourceRange(AttrNameLoc, T.getCloseLocation()),
- ScopeName, ScopeLoc, Args, llvm::array_lengthof(Args), Syntax);
+ ScopeName, ScopeLoc, Args, std::size(Args), Form);
}
-
-void Parser::ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName,
- SourceLocation AttrNameLoc,
- ParsedAttributes &Attrs,
- SourceLocation *EndLoc,
- IdentifierInfo *ScopeName,
- SourceLocation ScopeLoc,
- ParsedAttr::Syntax Syntax) {
+void Parser::ParseTypeTagForDatatypeAttribute(
+ IdentifierInfo &AttrName, SourceLocation AttrNameLoc,
+ ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName,
+ SourceLocation ScopeLoc, ParsedAttr::Form Form) {
assert(Tok.is(tok::l_paren) && "Attribute arg list not starting with '('");
BalancedDelimiterTracker T(*this, tok::l_paren);
@@ -1541,7 +1719,7 @@ void Parser::ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName,
if (!T.consumeClose()) {
Attrs.addNewTypeTagForDatatype(&AttrName, AttrNameLoc, ScopeName, ScopeLoc,
ArgumentKind, MatchingCType.get(),
- LayoutCompatible, MustBeNull, Syntax);
+ LayoutCompatible, MustBeNull, Form);
}
if (EndLoc)
@@ -1586,34 +1764,48 @@ bool Parser::DiagnoseProhibitedCXX11Attribute() {
/// attribute-specifier in a location where an attribute is not permitted, but
/// we know where the attributes ought to be written. Parse them anyway, and
/// provide a fixit moving them to the right place.
-void Parser::DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
+void Parser::DiagnoseMisplacedCXX11Attribute(ParsedAttributes &Attrs,
SourceLocation CorrectLocation) {
assert((Tok.is(tok::l_square) && NextToken().is(tok::l_square)) ||
- Tok.is(tok::kw_alignas));
+ Tok.is(tok::kw_alignas) || Tok.isRegularKeywordAttribute());
// Consume the attributes.
+ auto Keyword =
+ Tok.isRegularKeywordAttribute() ? Tok.getIdentifierInfo() : nullptr;
SourceLocation Loc = Tok.getLocation();
ParseCXX11Attributes(Attrs);
CharSourceRange AttrRange(SourceRange(Loc, Attrs.Range.getEnd()), true);
// FIXME: use err_attributes_misplaced
- Diag(Loc, diag::err_attributes_not_allowed)
- << FixItHint::CreateInsertionFromRange(CorrectLocation, AttrRange)
- << FixItHint::CreateRemoval(AttrRange);
+ (Keyword ? Diag(Loc, diag::err_keyword_not_allowed) << Keyword
+ : Diag(Loc, diag::err_attributes_not_allowed))
+ << FixItHint::CreateInsertionFromRange(CorrectLocation, AttrRange)
+ << FixItHint::CreateRemoval(AttrRange);
}
void Parser::DiagnoseProhibitedAttributes(
- const SourceRange &Range, const SourceLocation CorrectLocation) {
+ const ParsedAttributesView &Attrs, const SourceLocation CorrectLocation) {
+ auto *FirstAttr = Attrs.empty() ? nullptr : &Attrs.front();
if (CorrectLocation.isValid()) {
- CharSourceRange AttrRange(Range, true);
- Diag(CorrectLocation, diag::err_attributes_misplaced)
+ CharSourceRange AttrRange(Attrs.Range, true);
+ (FirstAttr && FirstAttr->isRegularKeywordAttribute()
+ ? Diag(CorrectLocation, diag::err_keyword_misplaced) << FirstAttr
+ : Diag(CorrectLocation, diag::err_attributes_misplaced))
<< FixItHint::CreateInsertionFromRange(CorrectLocation, AttrRange)
<< FixItHint::CreateRemoval(AttrRange);
- } else
- Diag(Range.getBegin(), diag::err_attributes_not_allowed) << Range;
+ } else {
+ const SourceRange &Range = Attrs.Range;
+ (FirstAttr && FirstAttr->isRegularKeywordAttribute()
+ ? Diag(Range.getBegin(), diag::err_keyword_not_allowed) << FirstAttr
+ : Diag(Range.getBegin(), diag::err_attributes_not_allowed))
+ << Range;
+ }
}
-void Parser::ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs,
- unsigned DiagID, bool DiagnoseEmptyAttrs) {
+void Parser::ProhibitCXX11Attributes(ParsedAttributes &Attrs,
+ unsigned AttrDiagID,
+ unsigned KeywordDiagID,
+ bool DiagnoseEmptyAttrs,
+ bool WarnOnUnknownAttrs) {
if (DiagnoseEmptyAttrs && Attrs.empty() && Attrs.Range.isValid()) {
// An attribute list has been parsed, but it was empty.
@@ -1624,36 +1816,43 @@ void Parser::ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs,
Lexer::getRawToken(Attrs.Range.getBegin(), FirstLSquare, SM, LangOpts);
if (FirstLSquare.is(tok::l_square)) {
- llvm::Optional<Token> SecondLSquare =
+ std::optional<Token> SecondLSquare =
Lexer::findNextToken(FirstLSquare.getLocation(), SM, LangOpts);
if (SecondLSquare && SecondLSquare->is(tok::l_square)) {
// The attribute range starts with [[, but is empty. So this must
// be [[]], which we are supposed to diagnose because
// DiagnoseEmptyAttrs is true.
- Diag(Attrs.Range.getBegin(), DiagID) << Attrs.Range;
+ Diag(Attrs.Range.getBegin(), AttrDiagID) << Attrs.Range;
return;
}
}
}
for (const ParsedAttr &AL : Attrs) {
- if (!AL.isCXX11Attribute() && !AL.isC2xAttribute())
+ if (AL.isRegularKeywordAttribute()) {
+ Diag(AL.getLoc(), KeywordDiagID) << AL;
+ AL.setInvalid();
continue;
- if (AL.getKind() == ParsedAttr::UnknownAttribute)
- Diag(AL.getLoc(), diag::warn_unknown_attribute_ignored)
- << AL << AL.getRange();
- else {
- Diag(AL.getLoc(), DiagID) << AL;
+ }
+ if (!AL.isStandardAttributeSyntax())
+ continue;
+ if (AL.getKind() == ParsedAttr::UnknownAttribute) {
+ if (WarnOnUnknownAttrs)
+ Diag(AL.getLoc(), diag::warn_unknown_attribute_ignored)
+ << AL << AL.getRange();
+ } else {
+ Diag(AL.getLoc(), AttrDiagID) << AL;
AL.setInvalid();
}
}
}
-void Parser::DiagnoseCXX11AttributeExtension(ParsedAttributesWithRange &Attrs) {
+void Parser::DiagnoseCXX11AttributeExtension(ParsedAttributes &Attrs) {
for (const ParsedAttr &PA : Attrs) {
- if (PA.isCXX11Attribute() || PA.isC2xAttribute())
- Diag(PA.getLoc(), diag::ext_cxx11_attr_placement) << PA << PA.getRange();
+ if (PA.isStandardAttributeSyntax() || PA.isRegularKeywordAttribute())
+ Diag(PA.getLoc(), diag::ext_cxx11_attr_placement)
+ << PA << PA.isRegularKeywordAttribute() << PA.getRange();
}
}
@@ -1664,7 +1863,7 @@ void Parser::DiagnoseCXX11AttributeExtension(ParsedAttributesWithRange &Attrs) {
// Also, Microsoft-style [attributes] seem to affect the type instead of the
// variable.
// This function moves attributes that should apply to the type off DS to Attrs.
-void Parser::stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs,
+void Parser::stripTypeAttributesOffDeclSpec(ParsedAttributes &Attrs,
DeclSpec &DS,
Sema::TagUseKind TUK) {
if (TUK == Sema::TUK_Reference)
@@ -1701,10 +1900,11 @@ void Parser::stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs,
/// [C++11/C11] static_assert-declaration
/// others... [FIXME]
///
-Parser::DeclGroupPtrTy
-Parser::ParseDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd,
- ParsedAttributesWithRange &attrs,
- SourceLocation *DeclSpecStart) {
+Parser::DeclGroupPtrTy Parser::ParseDeclaration(DeclaratorContext Context,
+ SourceLocation &DeclEnd,
+ ParsedAttributes &DeclAttrs,
+ ParsedAttributes &DeclSpecAttrs,
+ SourceLocation *DeclSpecStart) {
ParenBraceBracketBalancer BalancerRAIIObj(*this);
// Must temporarily exit the objective-c container scope for
// parsing c none objective-c decls.
@@ -1714,32 +1914,45 @@ Parser::ParseDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd,
switch (Tok.getKind()) {
case tok::kw_template:
case tok::kw_export:
- ProhibitAttributes(attrs);
- SingleDecl = ParseDeclarationStartingWithTemplate(Context, DeclEnd, attrs);
+ ProhibitAttributes(DeclAttrs);
+ ProhibitAttributes(DeclSpecAttrs);
+ SingleDecl =
+ ParseDeclarationStartingWithTemplate(Context, DeclEnd, DeclAttrs);
break;
case tok::kw_inline:
// Could be the start of an inline namespace. Allowed as an ext in C++03.
if (getLangOpts().CPlusPlus && NextToken().is(tok::kw_namespace)) {
- ProhibitAttributes(attrs);
+ ProhibitAttributes(DeclAttrs);
+ ProhibitAttributes(DeclSpecAttrs);
SourceLocation InlineLoc = ConsumeToken();
return ParseNamespace(Context, DeclEnd, InlineLoc);
}
- return ParseSimpleDeclaration(Context, DeclEnd, attrs, true, nullptr,
- DeclSpecStart);
+ return ParseSimpleDeclaration(Context, DeclEnd, DeclAttrs, DeclSpecAttrs,
+ true, nullptr, DeclSpecStart);
+
+ case tok::kw_cbuffer:
+ case tok::kw_tbuffer:
+ SingleDecl = ParseHLSLBuffer(DeclEnd);
+ break;
case tok::kw_namespace:
- ProhibitAttributes(attrs);
+ ProhibitAttributes(DeclAttrs);
+ ProhibitAttributes(DeclSpecAttrs);
return ParseNamespace(Context, DeclEnd);
- case tok::kw_using:
+ case tok::kw_using: {
+ ParsedAttributes Attrs(AttrFactory);
+ takeAndConcatenateAttrs(DeclAttrs, DeclSpecAttrs, Attrs);
return ParseUsingDirectiveOrDeclaration(Context, ParsedTemplateInfo(),
- DeclEnd, attrs);
+ DeclEnd, Attrs);
+ }
case tok::kw_static_assert:
case tok::kw__Static_assert:
- ProhibitAttributes(attrs);
+ ProhibitAttributes(DeclAttrs);
+ ProhibitAttributes(DeclSpecAttrs);
SingleDecl = ParseStaticAssertDeclaration(DeclEnd);
break;
default:
- return ParseSimpleDeclaration(Context, DeclEnd, attrs, true, nullptr,
- DeclSpecStart);
+ return ParseSimpleDeclaration(Context, DeclEnd, DeclAttrs, DeclSpecAttrs,
+ true, nullptr, DeclSpecStart);
}
// This routine returns a DeclGroup, if the thing we parsed only contains a
@@ -1770,10 +1983,16 @@ Parser::ParseDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd,
/// DeclSpecStart if DeclSpecStart is non-null.
Parser::DeclGroupPtrTy Parser::ParseSimpleDeclaration(
DeclaratorContext Context, SourceLocation &DeclEnd,
- ParsedAttributesWithRange &Attrs, bool RequireSemi, ForRangeInit *FRI,
- SourceLocation *DeclSpecStart) {
+ ParsedAttributes &DeclAttrs, ParsedAttributes &DeclSpecAttrs,
+ bool RequireSemi, ForRangeInit *FRI, SourceLocation *DeclSpecStart) {
+ // Need to retain these for diagnostics before we add them to the DeclSepc.
+ ParsedAttributesView OriginalDeclSpecAttrs;
+ OriginalDeclSpecAttrs.addAll(DeclSpecAttrs.begin(), DeclSpecAttrs.end());
+ OriginalDeclSpecAttrs.Range = DeclSpecAttrs.Range;
+
// Parse the common declaration-specifiers piece.
ParsingDeclSpec DS(*this);
+ DS.takeAttributesFrom(DeclSpecAttrs);
DeclSpecContext DSContext = getDeclSpecContextFromDeclaratorContext(Context);
ParseDeclarationSpecifiers(DS, ParsedTemplateInfo(), AS_none, DSContext);
@@ -1787,12 +2006,13 @@ Parser::DeclGroupPtrTy Parser::ParseSimpleDeclaration(
// C99 6.7.2.3p6: Handle "struct-or-union identifier;", "enum { X };"
// declaration-specifiers init-declarator-list[opt] ';'
if (Tok.is(tok::semi)) {
- ProhibitAttributes(Attrs);
+ ProhibitAttributes(DeclAttrs);
DeclEnd = Tok.getLocation();
if (RequireSemi) ConsumeToken();
RecordDecl *AnonRecord = nullptr;
- Decl *TheDecl = Actions.ParsedFreeStandingDeclSpec(getCurScope(), AS_none,
- DS, AnonRecord);
+ Decl *TheDecl = Actions.ParsedFreeStandingDeclSpec(
+ getCurScope(), AS_none, DS, ParsedAttributesView::none(), AnonRecord);
+ Actions.ActOnDefinedDeclarationSpecifier(TheDecl);
DS.complete(TheDecl);
if (AnonRecord) {
Decl* decls[] = {AnonRecord, TheDecl};
@@ -1801,11 +2021,13 @@ Parser::DeclGroupPtrTy Parser::ParseSimpleDeclaration(
return Actions.ConvertDeclToDeclGroup(TheDecl);
}
+ if (DS.hasTagDefinition())
+ Actions.ActOnDefinedDeclarationSpecifier(DS.getRepAsDecl());
+
if (DeclSpecStart)
DS.SetRangeStart(*DeclSpecStart);
- DS.takeAttributesFrom(Attrs);
- return ParseDeclGroup(DS, Context, &DeclEnd, FRI);
+ return ParseDeclGroup(DS, Context, DeclAttrs, &DeclEnd, FRI);
}
/// Returns true if this might be the start of a declarator, or a common typo
@@ -1866,11 +2088,11 @@ bool Parser::MightBeDeclarator(DeclaratorContext Context) {
return getLangOpts().CPlusPlus11 && isCXX11VirtSpecifier(NextToken());
default:
- return false;
+ return Tok.isRegularKeywordAttribute();
}
default:
- return false;
+ return Tok.isRegularKeywordAttribute();
}
}
@@ -1945,6 +2167,7 @@ void Parser::SkipMalformedDecl() {
case tok::annot_module_begin:
case tok::annot_module_end:
case tok::annot_module_include:
+ case tok::annot_repl_input_end:
return;
default:
@@ -1960,10 +2183,16 @@ void Parser::SkipMalformedDecl() {
/// result.
Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS,
DeclaratorContext Context,
+ ParsedAttributes &Attrs,
SourceLocation *DeclEnd,
ForRangeInit *FRI) {
// Parse the first declarator.
- ParsingDeclarator D(*this, DS, Context);
+ // Consume all of the attributes from `Attrs` by moving them to our own local
+ // list. This ensures that we will not attempt to interpret them as statement
+ // attributes higher up the callchain.
+ ParsedAttributes LocalAttrs(AttrFactory);
+ LocalAttrs.takeAllFrom(Attrs);
+ ParsingDeclarator D(*this, DS, LocalAttrs, Context);
ParseDeclarator(D);
// Bail out if the first declarator didn't seem well-formed.
@@ -1972,6 +2201,9 @@ Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS,
return nullptr;
}
+ if (getLangOpts().HLSL)
+ MaybeParseHLSLSemantics(D);
+
if (Tok.is(tok::kw_requires))
ParseTrailingRequiresClause(D);
@@ -2001,15 +2233,25 @@ Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS,
<< (Fixit ? FixItHint::CreateInsertion(D.getBeginLoc(), "_Noreturn ")
: FixItHint());
}
- }
- // Check to see if we have a function *definition* which must have a body.
- if (D.isFunctionDeclarator()) {
+ // Check to see if we have a function *definition* which must have a body.
if (Tok.is(tok::equal) && NextToken().is(tok::code_completion)) {
cutOffParsing();
Actions.CodeCompleteAfterFunctionEquals(D);
return nullptr;
}
+ // We're at the point where the parsing of function declarator is finished.
+ //
+ // A common error is that users accidently add a virtual specifier
+ // (e.g. override) in an out-line method definition.
+ // We attempt to recover by stripping all these specifiers coming after
+ // the declarator.
+ while (auto Specifier = isCXX11VirtSpecifier()) {
+ Diag(Tok, diag::err_virt_specifier_outside_class)
+ << VirtSpecifiers::getSpecifierName(Specifier)
+ << FixItHint::CreateRemoval(Tok.getLocation());
+ ConsumeToken();
+ }
// Look at the next token to make sure that this isn't a function
// declaration. We have to check this because __attribute__ might be the
// start of a function definition in GCC-extended K&R C.
@@ -2032,13 +2274,16 @@ Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS,
return Actions.ConvertDeclToDeclGroup(TheDecl);
}
- if (isDeclarationSpecifier()) {
- // If there is an invalid declaration specifier right after the
- // function prototype, then we must be in a missing semicolon case
- // where this isn't actually a body. Just fall through into the code
- // that handles it as a prototype, and let the top-level code handle
- // the erroneous declspec where it would otherwise expect a comma or
- // semicolon.
+ if (isDeclarationSpecifier(ImplicitTypenameContext::No) ||
+ Tok.is(tok::kw_namespace)) {
+ // If there is an invalid declaration specifier or a namespace
+ // definition right after the function prototype, then we must be in a
+ // missing semicolon case where this isn't actually a body. Just fall
+ // through into the code that handles it as a prototype, and let the
+ // top-level code handle the erroneous declspec where it would
+ // otherwise expect a comma or semicolon. Note that
+ // isDeclarationSpecifier already covers 'inline namespace', since
+ // 'inline' can be a declaration specifier.
} else {
Diag(Tok, diag::err_expected_fn_body);
SkipUntil(tok::semi);
@@ -2131,6 +2376,10 @@ Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS,
DiagnoseAndSkipExtendedMicrosoftTypeAttributes();
ParseDeclarator(D);
+
+ if (getLangOpts().HLSL)
+ MaybeParseHLSLSemantics(D);
+
if (!D.isInvalidType()) {
// C++2a [dcl.decl]p1
// init-declarator:
@@ -2155,10 +2404,8 @@ Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS,
// Okay, there was no semicolon and one was expected. If we see a
// declaration specifier, just assume it was missing and continue parsing.
// Otherwise things are very confused and we skip to recover.
- if (!isDeclarationSpecifier()) {
- SkipUntil(tok::r_brace, StopAtSemi | StopBeforeMatch);
- TryConsumeToken(tok::semi);
- }
+ if (!isDeclarationSpecifier(ImplicitTypenameContext::No))
+ SkipMalformedDecl();
}
return Actions.FinalizeDeclaratorGroup(getCurScope(), DS, DeclsInGroup);
@@ -2313,8 +2560,8 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(
// Recover as if it were an explicit specialization.
TemplateParameterLists FakedParamLists;
FakedParamLists.push_back(Actions.ActOnTemplateParameterList(
- 0, SourceLocation(), TemplateInfo.TemplateLoc, LAngleLoc, None,
- LAngleLoc, nullptr));
+ 0, SourceLocation(), TemplateInfo.TemplateLoc, LAngleLoc,
+ std::nullopt, LAngleLoc, nullptr));
ThisDecl =
Actions.ActOnTemplateDeclarator(getCurScope(), FakedParamLists, D);
@@ -2324,6 +2571,7 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(
}
}
+ Sema::CUDATargetContextRAII X(Actions, Sema::CTCK_InitGlobalVar, ThisDecl);
switch (TheInitKind) {
// Parse declarator '=' initializer.
case InitKind::Equal: {
@@ -2389,15 +2637,15 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(
T.consumeOpen();
ExprVector Exprs;
- CommaLocsTy CommaLocs;
InitializerScopeRAII InitScope(*this, D, ThisDecl);
auto ThisVarDecl = dyn_cast_or_null<VarDecl>(ThisDecl);
auto RunSignatureHelp = [&]() {
QualType PreferredType = Actions.ProduceConstructorSignatureHelp(
- getCurScope(), ThisVarDecl->getType()->getCanonicalTypeInternal(),
- ThisDecl->getLocation(), Exprs, T.getOpenLocation());
+ ThisVarDecl->getType()->getCanonicalTypeInternal(),
+ ThisDecl->getLocation(), Exprs, T.getOpenLocation(),
+ /*Braced=*/false);
CalledSignatureHelp = true;
return PreferredType;
};
@@ -2413,11 +2661,17 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(
// ProduceConstructorSignatureHelp only on VarDecls.
ExpressionStarts = SetPreferredType;
}
- if (ParseExpressionList(Exprs, CommaLocs, ExpressionStarts)) {
+
+ bool SawError = ParseExpressionList(Exprs, ExpressionStarts);
+
+ InitScope.pop();
+
+ if (SawError) {
if (ThisVarDecl && PP.isCodeCompletionReached() && !CalledSignatureHelp) {
Actions.ProduceConstructorSignatureHelp(
- getCurScope(), ThisVarDecl->getType()->getCanonicalTypeInternal(),
- ThisDecl->getLocation(), Exprs, T.getOpenLocation());
+ ThisVarDecl->getType()->getCanonicalTypeInternal(),
+ ThisDecl->getLocation(), Exprs, T.getOpenLocation(),
+ /*Braced=*/false);
CalledSignatureHelp = true;
}
Actions.ActOnInitializerError(ThisDecl);
@@ -2426,11 +2680,6 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(
// Match the ')'.
T.consumeClose();
- assert(!Exprs.empty() && Exprs.size()-1 == CommaLocs.size() &&
- "Unexpected number of commas!");
-
- InitScope.pop();
-
ExprResult Initializer = Actions.ActOnParenListExpr(T.getOpenLocation(),
T.getCloseLocation(),
Exprs);
@@ -2472,12 +2721,14 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(
/// type-qualifier specifier-qualifier-list[opt]
/// [GNU] attributes specifier-qualifier-list[opt]
///
-void Parser::ParseSpecifierQualifierList(DeclSpec &DS, AccessSpecifier AS,
- DeclSpecContext DSC) {
+void Parser::ParseSpecifierQualifierList(
+ DeclSpec &DS, ImplicitTypenameContext AllowImplicitTypename,
+ AccessSpecifier AS, DeclSpecContext DSC) {
/// specifier-qualifier-list is a subset of declaration-specifiers. Just
/// parse declaration-specifiers and complain about extra stuff.
/// TODO: diagnose attribute-specifiers and alignment-specifiers.
- ParseDeclarationSpecifiers(DS, ParsedTemplateInfo(), AS, DSC);
+ ParseDeclarationSpecifiers(DS, ParsedTemplateInfo(), AS, DSC, nullptr,
+ AllowImplicitTypename);
// Validate declspec for type-name.
unsigned Specs = DS.getParsedSpecifiers();
@@ -2508,6 +2759,8 @@ void Parser::ParseSpecifierQualifierList(DeclSpec &DS, AccessSpecifier AS,
Diag(DS.getVirtualSpecLoc(), diag::err_typename_invalid_functionspec);
if (DS.hasExplicitSpecifier())
Diag(DS.getExplicitSpecLoc(), diag::err_typename_invalid_functionspec);
+ if (DS.isNoreturnSpecified())
+ Diag(DS.getNoreturnSpecLoc(), diag::err_typename_invalid_functionspec);
DS.ClearFunctionSpecs();
}
@@ -2555,7 +2808,7 @@ static bool isValidAfterIdentifierInDeclarator(const Token &T) {
bool Parser::ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC,
- ParsedAttributesWithRange &Attrs) {
+ ParsedAttributes &Attrs) {
assert(Tok.is(tok::identifier) && "should have identifier");
SourceLocation Loc = Tok.getLocation();
@@ -2577,8 +2830,8 @@ bool Parser::ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
// error, do lookahead to try to do better recovery. This never applies
// within a type specifier. Outside of C++, we allow this even if the
// language doesn't "officially" support implicit int -- we support
- // implicit int as an extension in C99 and C11.
- if (!isTypeSpecifier(DSC) && !getLangOpts().CPlusPlus &&
+ // implicit int as an extension in some language modes.
+ if (!isTypeSpecifier(DSC) && getLangOpts().isImplicitIntAllowed() &&
isValidAfterIdentifierInDeclarator(NextToken())) {
// If this token is valid for implicit int, e.g. "static x = 4", then
// we just avoid eating the identifier, so it will be parsed as the
@@ -2709,7 +2962,7 @@ bool Parser::ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
}
}
// Fall through.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case tok::comma:
case tok::equal:
@@ -2785,44 +3038,75 @@ bool Parser::ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
/// DeclaratorContext enumerator values.
Parser::DeclSpecContext
Parser::getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context) {
- if (Context == DeclaratorContext::Member)
+ switch (Context) {
+ case DeclaratorContext::Member:
return DeclSpecContext::DSC_class;
- if (Context == DeclaratorContext::File)
+ case DeclaratorContext::File:
return DeclSpecContext::DSC_top_level;
- if (Context == DeclaratorContext::TemplateParam)
+ case DeclaratorContext::TemplateParam:
return DeclSpecContext::DSC_template_param;
- if (Context == DeclaratorContext::TemplateArg ||
- Context == DeclaratorContext::TemplateTypeArg)
+ case DeclaratorContext::TemplateArg:
+ return DeclSpecContext::DSC_template_arg;
+ case DeclaratorContext::TemplateTypeArg:
return DeclSpecContext::DSC_template_type_arg;
- if (Context == DeclaratorContext::TrailingReturn ||
- Context == DeclaratorContext::TrailingReturnVar)
+ case DeclaratorContext::TrailingReturn:
+ case DeclaratorContext::TrailingReturnVar:
return DeclSpecContext::DSC_trailing;
- if (Context == DeclaratorContext::AliasDecl ||
- Context == DeclaratorContext::AliasTemplate)
+ case DeclaratorContext::AliasDecl:
+ case DeclaratorContext::AliasTemplate:
return DeclSpecContext::DSC_alias_declaration;
- return DeclSpecContext::DSC_normal;
+ case DeclaratorContext::Association:
+ return DeclSpecContext::DSC_association;
+ case DeclaratorContext::TypeName:
+ return DeclSpecContext::DSC_type_specifier;
+ case DeclaratorContext::Condition:
+ return DeclSpecContext::DSC_condition;
+ case DeclaratorContext::ConversionId:
+ return DeclSpecContext::DSC_conv_operator;
+ case DeclaratorContext::CXXNew:
+ return DeclSpecContext::DSC_new;
+ case DeclaratorContext::Prototype:
+ case DeclaratorContext::ObjCResult:
+ case DeclaratorContext::ObjCParameter:
+ case DeclaratorContext::KNRTypeList:
+ case DeclaratorContext::FunctionalCast:
+ case DeclaratorContext::Block:
+ case DeclaratorContext::ForInit:
+ case DeclaratorContext::SelectionInit:
+ case DeclaratorContext::CXXCatch:
+ case DeclaratorContext::ObjCCatch:
+ case DeclaratorContext::BlockLiteral:
+ case DeclaratorContext::LambdaExpr:
+ case DeclaratorContext::LambdaExprParameter:
+ case DeclaratorContext::RequiresExpr:
+ return DeclSpecContext::DSC_normal;
+ }
+
+ llvm_unreachable("Missing DeclaratorContext case");
}
/// ParseAlignArgument - Parse the argument to an alignment-specifier.
///
-/// FIXME: Simply returns an alignof() expression if the argument is a
-/// type. Ideally, the type should be propagated directly into Sema.
-///
/// [C11] type-id
/// [C11] constant-expression
/// [C++0x] type-id ...[opt]
/// [C++0x] assignment-expression ...[opt]
-ExprResult Parser::ParseAlignArgument(SourceLocation Start,
- SourceLocation &EllipsisLoc) {
+ExprResult Parser::ParseAlignArgument(StringRef KWName, SourceLocation Start,
+ SourceLocation &EllipsisLoc, bool &IsType,
+ ParsedType &TypeResult) {
ExprResult ER;
if (isTypeIdInParens()) {
SourceLocation TypeLoc = Tok.getLocation();
ParsedType Ty = ParseTypeName().get();
SourceRange TypeRange(Start, Tok.getLocation());
- ER = Actions.ActOnUnaryExprOrTypeTraitExpr(TypeLoc, UETT_AlignOf, true,
- Ty.getAsOpaquePtr(), TypeRange);
- } else
+ if (Actions.ActOnAlignasTypeArgument(KWName, Ty, TypeLoc, TypeRange))
+ return ExprError();
+ TypeResult = Ty;
+ IsType = true;
+ } else {
ER = ParseConstantExpression();
+ IsType = false;
+ }
if (getLangOpts().CPlusPlus11)
TryConsumeToken(tok::ellipsis, EllipsisLoc);
@@ -2842,16 +3126,21 @@ void Parser::ParseAlignmentSpecifier(ParsedAttributes &Attrs,
SourceLocation *EndLoc) {
assert(Tok.isOneOf(tok::kw_alignas, tok::kw__Alignas) &&
"Not an alignment-specifier!");
-
- IdentifierInfo *KWName = Tok.getIdentifierInfo();
+ Token KWTok = Tok;
+ IdentifierInfo *KWName = KWTok.getIdentifierInfo();
+ auto Kind = KWTok.getKind();
SourceLocation KWLoc = ConsumeToken();
BalancedDelimiterTracker T(*this, tok::l_paren);
if (T.expectAndConsume())
return;
+ bool IsType;
+ ParsedType TypeResult;
SourceLocation EllipsisLoc;
- ExprResult ArgExpr = ParseAlignArgument(T.getOpenLocation(), EllipsisLoc);
+ ExprResult ArgExpr =
+ ParseAlignArgument(PP.getSpelling(KWTok), T.getOpenLocation(),
+ EllipsisLoc, IsType, TypeResult);
if (ArgExpr.isInvalid()) {
T.skipToEnd();
return;
@@ -2861,14 +3150,20 @@ void Parser::ParseAlignmentSpecifier(ParsedAttributes &Attrs,
if (EndLoc)
*EndLoc = T.getCloseLocation();
- ArgsVector ArgExprs;
- ArgExprs.push_back(ArgExpr.get());
- Attrs.addNew(KWName, KWLoc, nullptr, KWLoc, ArgExprs.data(), 1,
- ParsedAttr::AS_Keyword, EllipsisLoc);
+ if (IsType) {
+ Attrs.addNewTypeAttr(KWName, KWLoc, nullptr, KWLoc, TypeResult, Kind,
+ EllipsisLoc);
+ } else {
+ ArgsVector ArgExprs;
+ ArgExprs.push_back(ArgExpr.get());
+ Attrs.addNew(KWName, KWLoc, nullptr, KWLoc, ArgExprs.data(), 1, Kind,
+ EllipsisLoc);
+ }
}
ExprResult Parser::ParseExtIntegerArgument() {
- assert(Tok.is(tok::kw__ExtInt) && "Not an extended int type");
+ assert(Tok.isOneOf(tok::kw__ExtInt, tok::kw__BitInt) &&
+ "Not an extended int type");
ConsumeToken();
BalancedDelimiterTracker T(*this, tok::l_paren);
@@ -2997,17 +3292,6 @@ Parser::DiagnoseMissingSemiAfterTagDefinition(DeclSpec &DS, AccessSpecifier AS,
return false;
}
-// Choose the apprpriate diagnostic error for why fixed point types are
-// disabled, set the previous specifier, and mark as invalid.
-static void SetupFixedPointError(const LangOptions &LangOpts,
- const char *&PrevSpec, unsigned &DiagID,
- bool &isInvalid) {
- assert(!LangOpts.FixedPoint);
- DiagID = diag::err_fixed_point_not_enabled;
- PrevSpec = ""; // Not used by diagnostic
- isInvalid = true;
-}
-
/// ParseDeclarationSpecifiers
/// declaration-specifiers: [C99 6.7]
/// storage-class-specifier declaration-specifiers[opt]
@@ -3035,11 +3319,10 @@ static void SetupFixedPointError(const LangOptions &LangOpts,
/// [OpenCL] '__kernel'
/// 'friend': [C++ dcl.friend]
/// 'constexpr': [C++0x dcl.constexpr]
-void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
- const ParsedTemplateInfo &TemplateInfo,
- AccessSpecifier AS,
- DeclSpecContext DSContext,
- LateParsedAttrList *LateAttrs) {
+void Parser::ParseDeclarationSpecifiers(
+ DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS,
+ DeclSpecContext DSContext, LateParsedAttrList *LateAttrs,
+ ImplicitTypenameContext AllowImplicitTypename) {
if (DS.getSourceRange().isInvalid()) {
// Start the range at the current token but make the end of the range
// invalid. This will make the entire range invalid unless we successfully
@@ -3048,13 +3331,21 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
DS.SetRangeEnd(SourceLocation());
}
+ // If we are in a operator context, convert it back into a type specifier
+ // context for better error handling later on.
+ if (DSContext == DeclSpecContext::DSC_conv_operator) {
+ // No implicit typename here.
+ AllowImplicitTypename = ImplicitTypenameContext::No;
+ DSContext = DeclSpecContext::DSC_type_specifier;
+ }
+
bool EnteringContext = (DSContext == DeclSpecContext::DSC_class ||
DSContext == DeclSpecContext::DSC_top_level);
bool AttrsLastTime = false;
- ParsedAttributesWithRange attrs(AttrFactory);
+ ParsedAttributes attrs(AttrFactory);
// We use Sema's policy to get bool macros right.
PrintingPolicy Policy = Actions.getPrintingPolicy();
- while (1) {
+ while (true) {
bool isInvalid = false;
bool isStorageClass = false;
const char *PrevSpec = nullptr;
@@ -3091,16 +3382,47 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
return true;
};
+ // Turn off usual access checking for template specializations and
+ // instantiations.
+ bool IsTemplateSpecOrInst =
+ (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation ||
+ TemplateInfo.Kind == ParsedTemplateInfo::ExplicitSpecialization);
+
switch (Tok.getKind()) {
default:
+ if (Tok.isRegularKeywordAttribute())
+ goto Attribute;
+
DoneWithDeclSpec:
if (!AttrsLastTime)
ProhibitAttributes(attrs);
else {
- // Reject C++11 attributes that appertain to decl specifiers as
- // we don't support any C++11 attributes that appertain to decl
- // specifiers. This also conforms to what g++ 4.8 is doing.
- ProhibitCXX11Attributes(attrs, diag::err_attribute_not_type_attr);
+ // Reject C++11 / C23 attributes that aren't type attributes.
+ for (const ParsedAttr &PA : attrs) {
+ if (!PA.isCXX11Attribute() && !PA.isC23Attribute() &&
+ !PA.isRegularKeywordAttribute())
+ continue;
+ if (PA.getKind() == ParsedAttr::UnknownAttribute)
+ // We will warn about the unknown attribute elsewhere (in
+ // SemaDeclAttr.cpp)
+ continue;
+ // GCC ignores this attribute when placed on the DeclSpec in [[]]
+ // syntax, so we do the same.
+ if (PA.getKind() == ParsedAttr::AT_VectorSize) {
+ Diag(PA.getLoc(), diag::warn_attribute_ignored) << PA;
+ PA.setInvalid();
+ continue;
+ }
+ // We reject AT_LifetimeBound and AT_AnyX86NoCfCheck, even though they
+ // are type attributes, because we historically haven't allowed these
+ // to be used as type attributes in C++11 / C23 syntax.
+ if (PA.isTypeAttr() && PA.getKind() != ParsedAttr::AT_LifetimeBound &&
+ PA.getKind() != ParsedAttr::AT_AnyX86NoCfCheck)
+ continue;
+ Diag(PA.getLoc(), diag::err_attribute_not_type_attr)
+ << PA << PA.isRegularKeywordAttribute();
+ PA.setInvalid();
+ }
DS.takeAttributesFrom(attrs);
}
@@ -3112,9 +3434,10 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
case tok::l_square:
case tok::kw_alignas:
- if (!standardAttributesAllowed() || !isCXX11AttributeSpecifier())
+ if (!isAllowedCXX11AttributeSpecifier())
goto DoneWithDeclSpec;
+ Attribute:
ProhibitAttributes(attrs);
// FIXME: It would be good to recover by accepting the attributes,
// but attempting to do that now would cause serious
@@ -3146,13 +3469,14 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
return;
}
- if (getCurScope()->getFnParent() || getCurScope()->getBlockParent())
- CCC = Sema::PCC_LocalDeclarationSpecifiers;
- else if (TemplateInfo.Kind != ParsedTemplateInfo::NonTemplate)
+ // Class context can appear inside a function/block, so prioritise that.
+ if (TemplateInfo.Kind != ParsedTemplateInfo::NonTemplate)
CCC = DSContext == DeclSpecContext::DSC_class ? Sema::PCC_MemberTemplate
: Sema::PCC_Template;
else if (DSContext == DeclSpecContext::DSC_class)
CCC = Sema::PCC_Class;
+ else if (getCurScope()->getFnParent() || getCurScope()->getBlockParent())
+ CCC = Sema::PCC_LocalDeclarationSpecifiers;
else if (CurParsedObjCImpl)
CCC = Sema::PCC_ObjCImplementation;
@@ -3163,7 +3487,8 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
case tok::coloncolon: // ::foo::bar
// C++ scope specifier. Annotate and loop, or bail out on error.
- if (TryAnnotateCXXScopeToken(EnteringContext)) {
+ if (getLangOpts().CPlusPlus &&
+ TryAnnotateCXXScopeToken(EnteringContext)) {
if (!DS.hasTypeSpecifier())
DS.SetTypeSpecError();
goto DoneWithDeclSpec;
@@ -3177,6 +3502,8 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
goto DoneWithDeclSpec;
CXXScopeSpec SS;
+ if (TemplateInfo.TemplateParams)
+ SS.setTemplateParamLists(*TemplateInfo.TemplateParams);
Actions.RestoreNestedNameSpecifierAnnotation(Tok.getAnnotationValue(),
Tok.getAnnotationRange(),
SS);
@@ -3209,7 +3536,9 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
DSContext == DeclSpecContext::DSC_class) &&
TemplateId->Name &&
Actions.isCurrentClassName(*TemplateId->Name, getCurScope(), &SS) &&
- isConstructorDeclarator(/*Unqualified=*/false)) {
+ isConstructorDeclarator(/*Unqualified=*/false,
+ /*DeductionGuide=*/false,
+ DS.isFriendSpecified())) {
// The user meant this to be an out-of-line constructor
// definition, but template arguments are not allowed
// there. Just allow this as a constructor; we'll
@@ -3221,16 +3550,16 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
ConsumeAnnotationToken(); // The C++ scope.
assert(Tok.is(tok::annot_template_id) &&
"ParseOptionalCXXScopeSpecifier not working");
- AnnotateTemplateIdTokenAsType(SS);
+ AnnotateTemplateIdTokenAsType(SS, AllowImplicitTypename);
continue;
}
- if (TemplateId && TemplateId->Kind == TNK_Concept_template &&
- GetLookAheadToken(2).isOneOf(tok::kw_auto, tok::kw_decltype)) {
+ if (TemplateId && TemplateId->Kind == TNK_Concept_template) {
DS.getTypeSpecScope() = SS;
- // This is a qualified placeholder-specifier, e.g., ::C<int> auto ...
- // Consume the scope annotation and continue to consume the template-id
- // as a placeholder-specifier.
+ // This is probably a qualified placeholder-specifier, e.g., ::C<int>
+ // auto ... Consume the scope annotation and continue to consume the
+ // template-id as a placeholder-specifier. Let the next iteration
+ // diagnose a missing auto.
ConsumeAnnotationToken();
continue;
}
@@ -3248,6 +3577,16 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
ConsumeAnnotationToken(); // The typename
}
+ if (AllowImplicitTypename == ImplicitTypenameContext::Yes &&
+ Next.is(tok::annot_template_id) &&
+ static_cast<TemplateIdAnnotation *>(Next.getAnnotationValue())
+ ->Kind == TNK_Dependent_template_name) {
+ DS.getTypeSpecScope() = SS;
+ ConsumeAnnotationToken(); // The C++ scope.
+ AnnotateTemplateIdTokenAsType(SS, AllowImplicitTypename);
+ continue;
+ }
+
if (Next.isNot(tok::identifier))
goto DoneWithDeclSpec;
@@ -3258,15 +3597,27 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
DSContext == DeclSpecContext::DSC_class) &&
Actions.isCurrentClassName(*Next.getIdentifierInfo(), getCurScope(),
&SS) &&
- isConstructorDeclarator(/*Unqualified*/ false))
+ isConstructorDeclarator(/*Unqualified=*/false,
+ /*DeductionGuide=*/false,
+ DS.isFriendSpecified(),
+ &TemplateInfo))
goto DoneWithDeclSpec;
- ParsedType TypeRep =
- Actions.getTypeName(*Next.getIdentifierInfo(), Next.getLocation(),
- getCurScope(), &SS, false, false, nullptr,
- /*IsCtorOrDtorName=*/false,
- /*WantNontrivialTypeSourceInfo=*/true,
- isClassTemplateDeductionContext(DSContext));
+ // C++20 [temp.spec] 13.9/6.
+ // This disables the access checking rules for function template explicit
+ // instantiation and explicit specialization:
+ // - `return type`.
+ SuppressAccessChecks SAC(*this, IsTemplateSpecOrInst);
+
+ ParsedType TypeRep = Actions.getTypeName(
+ *Next.getIdentifierInfo(), Next.getLocation(), getCurScope(), &SS,
+ false, false, nullptr,
+ /*IsCtorOrDtorName=*/false,
+ /*WantNontrivialTypeSourceInfo=*/true,
+ isClassTemplateDeductionContext(DSContext), AllowImplicitTypename);
+
+ if (IsTemplateSpecOrInst)
+ SAC.done();
// If the referenced identifier is not a type, then this declspec is
// erroneous: We already checked about that it has no type specifier, and
@@ -3280,7 +3631,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
continue;
// Eat the scope spec so the identifier is current.
ConsumeAnnotationToken();
- ParsedAttributesWithRange Attrs(AttrFactory);
+ ParsedAttributes Attrs(AttrFactory);
if (ParseImplicitInt(DS, &SS, TemplateInfo, AS, DSContext, Attrs)) {
if (!Attrs.empty()) {
AttrsLastTime = true;
@@ -3342,7 +3693,8 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// typedef-name
case tok::kw___super:
case tok::kw_decltype:
- case tok::identifier: {
+ case tok::identifier:
+ ParseIdentifier: {
// This identifier can only be a typedef name if we haven't already seen
// a type-specifier. Without this check we misparse:
// typedef int X; struct Y { short X; }; as 'short int'.
@@ -3377,10 +3729,24 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// In C++, check to see if this is a scope specifier like foo::bar::, if
// so handle it as such. This is important for ctor parsing.
if (getLangOpts().CPlusPlus) {
- if (TryAnnotateCXXScopeToken(EnteringContext)) {
+ // C++20 [temp.spec] 13.9/6.
+ // This disables the access checking rules for function template
+ // explicit instantiation and explicit specialization:
+ // - `return type`.
+ SuppressAccessChecks SAC(*this, IsTemplateSpecOrInst);
+
+ const bool Success = TryAnnotateCXXScopeToken(EnteringContext);
+
+ if (IsTemplateSpecOrInst)
+ SAC.done();
+
+ if (Success) {
+ if (IsTemplateSpecOrInst)
+ SAC.redelay();
DS.SetTypeSpecError();
goto DoneWithDeclSpec;
}
+
if (!Tok.is(tok::identifier))
continue;
}
@@ -3412,7 +3778,9 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// check whether this is a constructor declaration.
if (getLangOpts().CPlusPlus && DSContext == DeclSpecContext::DSC_class &&
Actions.isCurrentClassName(*Tok.getIdentifierInfo(), getCurScope()) &&
- isConstructorDeclarator(/*Unqualified*/true))
+ isConstructorDeclarator(/*Unqualified=*/true,
+ /*DeductionGuide=*/false,
+ DS.isFriendSpecified()))
goto DoneWithDeclSpec;
ParsedType TypeRep = Actions.getTypeName(
@@ -3427,7 +3795,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
goto DoneWithDeclSpec;
if (Tok.isNot(tok::identifier))
continue;
- ParsedAttributesWithRange Attrs(AttrFactory);
+ ParsedAttributes Attrs(AttrFactory);
if (ParseImplicitInt(DS, nullptr, TemplateInfo, AS, DSContext, Attrs)) {
if (!Attrs.empty()) {
AttrsLastTime = true;
@@ -3440,11 +3808,12 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// Likewise, if this is a context where the identifier could be a template
// name, check whether this is a deduction guide declaration.
+ CXXScopeSpec SS;
if (getLangOpts().CPlusPlus17 &&
(DSContext == DeclSpecContext::DSC_class ||
DSContext == DeclSpecContext::DSC_top_level) &&
Actions.isDeductionGuideName(getCurScope(), *Tok.getIdentifierInfo(),
- Tok.getLocation()) &&
+ Tok.getLocation(), SS) &&
isConstructorDeclarator(/*Unqualified*/ true,
/*DeductionGuide*/ true))
goto DoneWithDeclSpec;
@@ -3487,11 +3856,18 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
if (TemplateId->Kind == TNK_Concept_template) {
// If we've already diagnosed that this type-constraint has invalid
- // arguemnts, drop it and just form 'auto' or 'decltype(auto)'.
+ // arguments, drop it and just form 'auto' or 'decltype(auto)'.
if (TemplateId->hasInvalidArgs())
TemplateId = nullptr;
- if (NextToken().is(tok::identifier)) {
+ // Any of the following tokens are likely the start of the user
+ // forgetting 'auto' or 'decltype(auto)', so diagnose.
+ // Note: if updating this list, please make sure we update
+ // isCXXDeclarationSpecifier's check for IsPlaceholderSpecifier to have
+ // a matching list.
+ if (NextToken().isOneOf(tok::identifier, tok::kw_const,
+ tok::kw_volatile, tok::kw_restrict, tok::amp,
+ tok::ampamp)) {
Diag(Loc, diag::err_placeholder_expected_auto_or_decltype_auto)
<< FixItHint::CreateInsertion(NextToken().getLocation(), "auto");
// Attempt to continue as if 'auto' was placed here.
@@ -3501,6 +3877,10 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
}
if (!NextToken().isOneOf(tok::kw_auto, tok::kw_decltype))
goto DoneWithDeclSpec;
+
+ if (TemplateId && !isInvalid && Actions.CheckTypeConstraint(TemplateId))
+ TemplateId = nullptr;
+
ConsumeAnnotationToken();
SourceLocation AutoLoc = Tok.getLocation();
if (TryConsumeToken(tok::kw_decltype)) {
@@ -3521,12 +3901,13 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
}
}
ConsumedEnd = Tok.getLocation();
+ DS.setTypeArgumentRange(Tracker.getRange());
// Even if something went wrong above, continue as if we've seen
// `decltype(auto)`.
isInvalid = DS.SetTypeSpecType(TST_decltype_auto, Loc, PrevSpec,
DiagID, TemplateId, Policy);
} else {
- isInvalid = DS.SetTypeSpecType(TST_auto, Loc, PrevSpec, DiagID,
+ isInvalid = DS.SetTypeSpecType(TST_auto, AutoLoc, PrevSpec, DiagID,
TemplateId, Policy);
}
break;
@@ -3544,21 +3925,22 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// constructor declaration.
if (getLangOpts().CPlusPlus && DSContext == DeclSpecContext::DSC_class &&
Actions.isCurrentClassName(*TemplateId->Name, getCurScope()) &&
- isConstructorDeclarator(/*Unqualified=*/true))
+ isConstructorDeclarator(/*Unqualified=*/true,
+ /*DeductionGuide=*/false,
+ DS.isFriendSpecified()))
goto DoneWithDeclSpec;
// Turn the template-id annotation token into a type annotation
// token, then try again to parse it as a type-specifier.
CXXScopeSpec SS;
- AnnotateTemplateIdTokenAsType(SS);
+ AnnotateTemplateIdTokenAsType(SS, AllowImplicitTypename);
continue;
}
// Attributes support.
case tok::kw___attribute:
case tok::kw___declspec:
- ParseAttributes(PAKM_GNU | PAKM_Declspec, DS.getAttributes(), nullptr,
- LateAttrs);
+ ParseAttributes(PAKM_GNU | PAKM_Declspec, DS.getAttributes(), LateAttrs);
continue;
// Microsoft single token adornments.
@@ -3567,7 +3949,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
IdentifierInfo *AttrName = Tok.getIdentifierInfo();
SourceLocation AttrNameLoc = Tok.getLocation();
DS.getAttributes().addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc,
- nullptr, 0, ParsedAttr::AS_Keyword);
+ nullptr, 0, tok::kw___forceinline);
break;
}
@@ -3590,6 +3972,10 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
ParseMicrosoftTypeAttributes(DS.getAttributes());
continue;
+ case tok::kw___funcref:
+ ParseWebAssemblyFuncrefTypeAttribute(DS.getAttributes());
+ continue;
+
// Borland single token adornments.
case tok::kw___pascal:
ParseBorlandTypeAttributes(DS.getAttributes());
@@ -3600,6 +3986,11 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
ParseOpenCLKernelAttributes(DS.getAttributes());
continue;
+ // CUDA/HIP single token adornments.
+ case tok::kw___noinline__:
+ ParseCUDAFunctionAttributes(DS.getAttributes());
+ continue;
+
// Nullability type specifiers.
case tok::kw__Nonnull:
case tok::kw__Nullable:
@@ -3611,7 +4002,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// Objective-C 'kindof' types.
case tok::kw___kindof:
DS.getAttributes().addNew(Tok.getIdentifierInfo(), Loc, nullptr, Loc,
- nullptr, 0, ParsedAttr::AS_Keyword);
+ nullptr, 0, tok::kw___kindof);
(void)ConsumeToken();
continue;
@@ -3641,11 +4032,11 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
isStorageClass = true;
break;
case tok::kw_auto:
- if (getLangOpts().CPlusPlus11) {
+ if (getLangOpts().CPlusPlus11 || getLangOpts().C23) {
if (isKnownToBeTypeSpecifier(GetLookAheadToken(1))) {
isInvalid = DS.SetStorageClassSpec(Actions, DeclSpec::SCS_auto, Loc,
PrevSpec, DiagID, Policy);
- if (!isInvalid)
+ if (!isInvalid && !getLangOpts().C23)
Diag(Tok, diag::ext_auto_storage_class)
<< FixItHint::CreateRemoval(DS.getStorageClassSpecLoc());
} else
@@ -3677,8 +4068,17 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
isStorageClass = true;
break;
case tok::kw_thread_local:
- isInvalid = DS.SetStorageClassSpecThread(DeclSpec::TSCS_thread_local, Loc,
- PrevSpec, DiagID);
+ if (getLangOpts().C23)
+ Diag(Tok, diag::warn_c23_compat_keyword) << Tok.getName();
+ // We map thread_local to _Thread_local in C23 mode so it retains the C
+ // semantics rather than getting the C++ semantics.
+ // FIXME: diagnostics will show _Thread_local when the user wrote
+ // thread_local in source in C23 mode; we need some general way to
+ // identify which way the user spelled the keyword in source.
+ isInvalid = DS.SetStorageClassSpecThread(
+ getLangOpts().C23 ? DeclSpec::TSCS__Thread_local
+ : DeclSpec::TSCS_thread_local,
+ Loc, PrevSpec, DiagID);
isStorageClass = true;
break;
case tok::kw__Thread_local:
@@ -3721,7 +4121,11 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
ExprResult ExplicitExpr(static_cast<Expr *>(nullptr));
BalancedDelimiterTracker Tracker(*this, tok::l_paren);
Tracker.consumeOpen();
- ExplicitExpr = ParseConstantExpression();
+
+ EnterExpressionEvaluationContext ConstantEvaluated(
+ Actions, Sema::ExpressionEvaluationContext::ConstantEvaluated);
+
+ ExplicitExpr = ParseConstantExpressionInExprEvalContext();
ConsumedEnd = Tok.getLocation();
if (ExplicitExpr.isUsable()) {
CloseParenLoc = Tok.getLocation();
@@ -3830,11 +4234,13 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
isInvalid = DS.SetTypeSpecType(DeclSpec::TST_int, Loc, PrevSpec,
DiagID, Policy);
break;
- case tok::kw__ExtInt: {
+ case tok::kw__ExtInt:
+ case tok::kw__BitInt: {
+ DiagnoseBitIntUse(Tok);
ExprResult ER = ParseExtIntegerArgument();
if (ER.isInvalid())
continue;
- isInvalid = DS.SetExtIntType(Loc, ER.get(), PrevSpec, DiagID, Policy);
+ isInvalid = DS.SetBitIntType(Loc, ER.get(), PrevSpec, DiagID, Policy);
ConsumedEnd = PrevTokLocation;
break;
}
@@ -3863,32 +4269,33 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
DiagID, Policy);
break;
case tok::kw__Accum:
- if (!getLangOpts().FixedPoint) {
- SetupFixedPointError(getLangOpts(), PrevSpec, DiagID, isInvalid);
- } else {
- isInvalid = DS.SetTypeSpecType(DeclSpec::TST_accum, Loc, PrevSpec,
- DiagID, Policy);
- }
+ assert(getLangOpts().FixedPoint &&
+ "This keyword is only used when fixed point types are enabled "
+ "with `-ffixed-point`");
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_accum, Loc, PrevSpec, DiagID,
+ Policy);
break;
case tok::kw__Fract:
- if (!getLangOpts().FixedPoint) {
- SetupFixedPointError(getLangOpts(), PrevSpec, DiagID, isInvalid);
- } else {
- isInvalid = DS.SetTypeSpecType(DeclSpec::TST_fract, Loc, PrevSpec,
- DiagID, Policy);
- }
+ assert(getLangOpts().FixedPoint &&
+ "This keyword is only used when fixed point types are enabled "
+ "with `-ffixed-point`");
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_fract, Loc, PrevSpec, DiagID,
+ Policy);
break;
case tok::kw__Sat:
- if (!getLangOpts().FixedPoint) {
- SetupFixedPointError(getLangOpts(), PrevSpec, DiagID, isInvalid);
- } else {
- isInvalid = DS.SetTypeSpecSat(Loc, PrevSpec, DiagID);
- }
+ assert(getLangOpts().FixedPoint &&
+ "This keyword is only used when fixed point types are enabled "
+ "with `-ffixed-point`");
+ isInvalid = DS.SetTypeSpecSat(Loc, PrevSpec, DiagID);
break;
case tok::kw___float128:
isInvalid = DS.SetTypeSpecType(DeclSpec::TST_float128, Loc, PrevSpec,
DiagID, Policy);
break;
+ case tok::kw___ibm128:
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_ibm128, Loc, PrevSpec,
+ DiagID, Policy);
+ break;
case tok::kw_wchar_t:
isInvalid = DS.SetTypeSpecType(DeclSpec::TST_wchar, Loc, PrevSpec,
DiagID, Policy);
@@ -3906,6 +4313,9 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
DiagID, Policy);
break;
case tok::kw_bool:
+ if (getLangOpts().C23)
+ Diag(Tok, diag::warn_c23_compat_keyword) << Tok.getName();
+ [[fallthrough]];
case tok::kw__Bool:
if (Tok.is(tok::kw__Bool) && !getLangOpts().C99)
Diag(Tok, diag::ext_c99_feature) << Tok.getName();
@@ -3945,8 +4355,8 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
isInvalid = DS.SetTypeAltiVecBool(true, Loc, PrevSpec, DiagID, Policy);
break;
case tok::kw_pipe:
- if (!getLangOpts().OpenCL || (getLangOpts().OpenCLVersion < 200 &&
- !getLangOpts().OpenCLCPlusPlus)) {
+ if (!getLangOpts().OpenCL ||
+ getLangOpts().getOpenCLCompatibleVersion() < 200) {
// OpenCL 2.0 and later define this keyword. OpenCL 1.2 and earlier
// should support the "pipe" word as identifier.
Tok.getIdentifierInfo()->revertTokenIDToIdentifier();
@@ -3984,7 +4394,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// These are attributes following class specifiers.
// To produce better diagnostic, we parse them when
// parsing class specifier.
- ParsedAttributesWithRange Attributes(AttrFactory);
+ ParsedAttributes Attributes(AttrFactory);
ParseClassSpecifier(Kind, Loc, DS, TemplateInfo, AS,
EnteringContext, DSContext, Attributes);
@@ -4027,8 +4437,9 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
continue;
break;
- // GNU typeof support.
+ // C23/GNU typeof support.
case tok::kw_typeof:
+ case tok::kw_typeof_unqual:
ParseTypeofSpecifier(DS);
continue;
@@ -4052,8 +4463,13 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
HandlePragmaMSPointersToMembers();
continue;
- case tok::kw___underlying_type:
- ParseUnderlyingTypeSpecifier(DS);
+#define TRANSFORM_TYPE_TRAIT_DEF(_, Trait) case tok::kw___##Trait:
+#include "clang/Basic/TransformTypeTraits.def"
+ // HACK: libstdc++ already uses '__remove_cv' as an alias template so we
+ // work around this by expecting all transform type traits to be suffixed
+ // with '('. They're an identifier otherwise.
+ if (!MaybeParseTypeTransformTypeSpecifier(DS))
+ goto ParseIdentifier;
continue;
case tok::kw__Atomic:
@@ -4084,13 +4500,13 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
isInvalid = true;
break;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case tok::kw_private:
// It's fine (but redundant) to check this for __generic on the
// fallthrough path; we only form the __generic token in OpenCL mode.
if (!getLangOpts().OpenCL)
goto DoneWithDeclSpec;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case tok::kw___private:
case tok::kw___global:
case tok::kw___local:
@@ -4102,6 +4518,14 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
ParseOpenCLQualifiers(DS.getAttributes());
break;
+ case tok::kw_groupshared:
+ case tok::kw_in:
+ case tok::kw_inout:
+ case tok::kw_out:
+ // NOTE: ParseHLSLQualifiers will consume the qualifier token.
+ ParseHLSLQualifiers(DS.getAttributes());
+ continue;
+
case tok::less:
// GCC ObjC supports types like "<SomeProtocol>" as a synonym for
// "id<SomeProtocol>". This is hopelessly old fashioned and dangerous,
@@ -4142,9 +4566,8 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
<< FixItHint::CreateRemoval(
SourceRange(Loc, DS.getEndLoc()));
else if (DiagID == diag::err_opencl_unknown_type_specifier) {
- Diag(Loc, DiagID) << getLangOpts().OpenCLCPlusPlus
- << getLangOpts().getOpenCLVersionTuple().getAsString()
- << PrevSpec << isStorageClass;
+ Diag(Loc, DiagID) << getLangOpts().getOpenCLVersionString() << PrevSpec
+ << isStorageClass;
} else
Diag(Loc, DiagID) << PrevSpec;
}
@@ -4164,7 +4587,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
/// not to the declaration of a struct.
///
/// struct-declaration:
-/// [C2x] attributes-specifier-seq[opt]
+/// [C23] attributes-specifier-seq[opt]
/// specifier-qualifier-list struct-declarator-list
/// [GNU] __extension__ struct-declaration
/// [GNU] specifier-qualifier-list
@@ -4190,9 +4613,8 @@ void Parser::ParseStructDeclaration(
}
// Parse leading attributes.
- ParsedAttributesWithRange Attrs(AttrFactory);
+ ParsedAttributes Attrs(AttrFactory);
MaybeParseCXX11Attributes(Attrs);
- DS.takeAttributesFrom(Attrs);
// Parse the common specifier-qualifiers-list piece.
ParseSpecifierQualifierList(DS);
@@ -4200,9 +4622,14 @@ void Parser::ParseStructDeclaration(
// If there are no declarators, this is a free-standing declaration
// specifier. Let the actions module cope with it.
if (Tok.is(tok::semi)) {
+ // C23 6.7.2.1p9 : "The optional attribute specifier sequence in a
+ // member declaration appertains to each of the members declared by the
+ // member declarator list; it shall not appear if the optional member
+ // declarator list is omitted."
+ ProhibitAttributes(Attrs);
RecordDecl *AnonRecord = nullptr;
- Decl *TheDecl = Actions.ParsedFreeStandingDeclSpec(getCurScope(), AS_none,
- DS, AnonRecord);
+ Decl *TheDecl = Actions.ParsedFreeStandingDeclSpec(
+ getCurScope(), AS_none, DS, ParsedAttributesView::none(), AnonRecord);
assert(!AnonRecord && "Did not expect anonymous struct or union here");
DS.complete(TheDecl);
return;
@@ -4211,8 +4638,8 @@ void Parser::ParseStructDeclaration(
// Read struct-declarators until we find the semicolon.
bool FirstDeclarator = true;
SourceLocation CommaLoc;
- while (1) {
- ParsingFieldDeclarator DeclaratorInfo(*this, DS);
+ while (true) {
+ ParsingFieldDeclarator DeclaratorInfo(*this, DS, Attrs);
DeclaratorInfo.D.setCommaLoc(CommaLoc);
// Attributes are only allowed here on successive declarators.
@@ -4310,11 +4737,16 @@ void Parser::ParseStructUnionBody(SourceLocation RecordLoc,
if (Tok.isOneOf(tok::annot_pragma_openmp, tok::annot_attr_openmp)) {
// Result can be ignored, because it must be always empty.
AccessSpecifier AS = AS_none;
- ParsedAttributesWithRange Attrs(AttrFactory);
+ ParsedAttributes Attrs(AttrFactory);
(void)ParseOpenMPDeclarativeDirectiveWithExtDecl(AS, Attrs);
continue;
}
+ if (Tok.is(tok::annot_pragma_openacc)) {
+ ParseOpenACCDirectiveDecl();
+ continue;
+ }
+
if (tok::isPragmaAnnotation(Tok.getKind())) {
Diag(Tok.getLocation(), diag::err_pragma_misplaced_in_decl)
<< DeclSpec::getSpecifierName(
@@ -4378,8 +4810,7 @@ void Parser::ParseStructUnionBody(SourceLocation RecordLoc,
// If attributes exist after struct contents, parse them.
MaybeParseGNUAttributes(attrs);
- SmallVector<Decl *, 32> FieldDecls(TagDecl->field_begin(),
- TagDecl->field_end());
+ SmallVector<Decl *, 32> FieldDecls(TagDecl->fields());
Actions.ActOnFields(getCurScope(), RecordLoc, TagDecl, FieldDecls,
T.getOpenLocation(), T.getCloseLocation(), attrs);
@@ -4425,18 +4856,19 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
// Code completion for an enum name.
cutOffParsing();
Actions.CodeCompleteTag(getCurScope(), DeclSpec::TST_enum);
+ DS.SetTypeSpecError(); // Needed by ActOnUsingDeclaration.
return;
}
// If attributes exist after tag, parse them.
- ParsedAttributesWithRange attrs(AttrFactory);
+ ParsedAttributes attrs(AttrFactory);
MaybeParseAttributes(PAKM_GNU | PAKM_Declspec | PAKM_CXX11, attrs);
SourceLocation ScopedEnumKWLoc;
bool IsScopedUsingClassTag = false;
// In C++11, recognize 'enum class' and 'enum struct'.
- if (Tok.isOneOf(tok::kw_class, tok::kw_struct)) {
+ if (Tok.isOneOf(tok::kw_class, tok::kw_struct) && getLangOpts().CPlusPlus) {
Diag(Tok, getLangOpts().CPlusPlus11 ? diag::warn_cxx98_compat_scoped_enum
: diag::ext_scoped_enum);
IsScopedUsingClassTag = Tok.is(tok::kw_class);
@@ -4463,7 +4895,7 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
// Determine whether this declaration is permitted to have an enum-base.
AllowDefiningTypeSpec AllowEnumSpecifier =
- isDefiningTypeSpecifierContext(DSC);
+ isDefiningTypeSpecifierContext(DSC, getLangOpts().CPlusPlus);
bool CanBeOpaqueEnumDeclaration =
DS.isEmpty() && isOpaqueEnumDeclarationContext(DSC);
bool CanHaveEnumBase = (getLangOpts().CPlusPlus11 || getLangOpts().ObjC ||
@@ -4478,12 +4910,13 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
CXXScopeSpec Spec;
if (ParseOptionalCXXScopeSpecifier(Spec, /*ObjectType=*/nullptr,
- /*ObjectHadErrors=*/false,
+ /*ObjectHasErrors=*/false,
/*EnteringContext=*/true))
return;
if (Spec.isSet() && Tok.isNot(tok::identifier)) {
Diag(Tok, diag::err_expected) << tok::identifier;
+ DS.SetTypeSpecError();
if (Tok.isNot(tok::l_brace)) {
// Has no name and is not a definition.
// Skip the rest of this declarator, up until the comma or semicolon.
@@ -4500,6 +4933,7 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
Tok.isNot(tok::colon)) {
Diag(Tok, diag::err_expected_either) << tok::identifier << tok::l_brace;
+ DS.SetTypeSpecError();
// Skip the rest of this declarator, up until the comma or semicolon.
SkipUntil(tok::comma, StopAtSemi);
return;
@@ -4529,8 +4963,8 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
TypeResult BaseType;
SourceRange BaseRange;
- bool CanBeBitfield = (getCurScope()->getFlags() & Scope::ClassScope) &&
- ScopedEnumKWLoc.isInvalid() && Name;
+ bool CanBeBitfield =
+ getCurScope()->isClassScope() && ScopedEnumKWLoc.isInvalid() && Name;
// Parse the fixed underlying type.
if (Tok.is(tok::colon)) {
@@ -4571,13 +5005,17 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
// enum E : int *p;
// declares 'enum E : int; E *p;' not 'enum E : int*; E p;'.
DeclSpec DS(AttrFactory);
- ParseSpecifierQualifierList(DS, AS, DeclSpecContext::DSC_type_specifier);
- Declarator DeclaratorInfo(DS, DeclaratorContext::TypeName);
- BaseType = Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
+ // enum-base is not assumed to be a type and therefore requires the
+ // typename keyword [p0634r3].
+ ParseSpecifierQualifierList(DS, ImplicitTypenameContext::No, AS,
+ DeclSpecContext::DSC_type_specifier);
+ Declarator DeclaratorInfo(DS, ParsedAttributesView::none(),
+ DeclaratorContext::TypeName);
+ BaseType = Actions.ActOnTypeName(DeclaratorInfo);
BaseRange = SourceRange(ColonLoc, DeclaratorInfo.getSourceRange().getEnd());
- if (!getLangOpts().ObjC) {
+ if (!getLangOpts().ObjC && !getLangOpts().C23) {
if (getLangOpts().CPlusPlus11)
Diag(ColonLoc, diag::warn_cxx98_compat_enum_fixed_underlying_type)
<< BaseRange;
@@ -4669,11 +5107,13 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
assert(TemplateInfo.TemplateParams && "no template parameters");
TParams = MultiTemplateParamsArg(TemplateInfo.TemplateParams->data(),
TemplateInfo.TemplateParams->size());
+ SS.setTemplateParamLists(TParams);
}
if (!Name && TUK != Sema::TUK_Definition) {
Diag(Tok, diag::err_enumerator_unnamed_no_def);
+ DS.SetTypeSpecError();
// Skip the rest of this declarator, up until the comma or semicolon.
SkipUntil(tok::comma, StopAtSemi);
return;
@@ -4690,6 +5130,7 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
if (IsElaboratedTypeSpecifier && !getLangOpts().MicrosoftExt &&
!getLangOpts().ObjC) {
ProhibitCXX11Attributes(attrs, diag::err_attributes_not_allowed,
+ diag::err_keyword_not_allowed,
/*DiagnoseEmptyAttrs=*/true);
if (BaseType.isUsable())
Diag(BaseRange.getBegin(), diag::ext_enum_base_in_type_specifier)
@@ -4712,14 +5153,15 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
bool IsDependent = false;
const char *PrevSpec = nullptr;
unsigned DiagID;
- Decl *TagDecl = Actions.ActOnTag(
- getCurScope(), DeclSpec::TST_enum, TUK, StartLoc, SS, Name, NameLoc,
- attrs, AS, DS.getModulePrivateSpecLoc(), TParams, Owned, IsDependent,
- ScopedEnumKWLoc, IsScopedUsingClassTag, BaseType,
- DSC == DeclSpecContext::DSC_type_specifier,
- DSC == DeclSpecContext::DSC_template_param ||
- DSC == DeclSpecContext::DSC_template_type_arg,
- &SkipBody);
+ Decl *TagDecl =
+ Actions.ActOnTag(getCurScope(), DeclSpec::TST_enum, TUK, StartLoc, SS,
+ Name, NameLoc, attrs, AS, DS.getModulePrivateSpecLoc(),
+ TParams, Owned, IsDependent, ScopedEnumKWLoc,
+ IsScopedUsingClassTag,
+ BaseType, DSC == DeclSpecContext::DSC_type_specifier,
+ DSC == DeclSpecContext::DSC_template_param ||
+ DSC == DeclSpecContext::DSC_template_type_arg,
+ OffsetOfState, &SkipBody).get();
if (SkipBody.ShouldSkip) {
assert(TUK == Sema::TUK_Definition && "can only skip a definition");
@@ -4777,7 +5219,7 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
Decl *D = SkipBody.CheckSameAsPrevious ? SkipBody.New : TagDecl;
ParseEnumBody(StartLoc, D);
if (SkipBody.CheckSameAsPrevious &&
- !Actions.ActOnDuplicateDefinition(DS, TagDecl, SkipBody)) {
+ !Actions.ActOnDuplicateDefinition(TagDecl, SkipBody)) {
DS.SetTypeSpecError();
return;
}
@@ -4832,9 +5274,9 @@ void Parser::ParseEnumBody(SourceLocation StartLoc, Decl *EnumDecl) {
SourceLocation IdentLoc = ConsumeToken();
// If attributes exist after the enumerator, parse them.
- ParsedAttributesWithRange attrs(AttrFactory);
+ ParsedAttributes attrs(AttrFactory);
MaybeParseGNUAttributes(attrs);
- if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
+ if (isAllowedCXX11AttributeSpecifier()) {
if (getLangOpts().CPlusPlus)
Diag(Tok.getLocation(), getLangOpts().CPlusPlus17
? diag::warn_cxx14_compat_ns_enum_attribute
@@ -4926,7 +5368,7 @@ void Parser::ParseEnumBody(SourceLocation StartLoc, Decl *EnumDecl) {
// The next token must be valid after an enum definition. If not, a ';'
// was probably forgotten.
- bool CanBeBitfield = getCurScope()->getFlags() & Scope::ClassScope;
+ bool CanBeBitfield = getCurScope()->isClassScope();
if (!isValidAfterTypeSpecifier(CanBeBitfield)) {
ExpectAndConsume(tok::semi, diag::err_expected_after, "enum");
// Push this token back into the preprocessor and change our current token
@@ -4960,6 +5402,7 @@ bool Parser::isKnownToBeTypeSpecifier(const Token &Tok) const {
case tok::kw_char32_t:
case tok::kw_int:
case tok::kw__ExtInt:
+ case tok::kw__BitInt:
case tok::kw___bf16:
case tok::kw_half:
case tok::kw_float:
@@ -4968,6 +5411,7 @@ bool Parser::isKnownToBeTypeSpecifier(const Token &Tok) const {
case tok::kw__Fract:
case tok::kw__Float16:
case tok::kw___float128:
+ case tok::kw___ibm128:
case tok::kw_bool:
case tok::kw__Bool:
case tok::kw__Decimal32:
@@ -5000,7 +5444,7 @@ bool Parser::isTypeSpecifierQualifier() {
case tok::identifier: // foo::bar
if (TryAltiVecVectorToken())
return true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case tok::kw_typename: // typename T::type
// Annotate typenames and C++ scope specifiers. If we get one, just
// recurse to handle whatever we get.
@@ -5021,8 +5465,9 @@ bool Parser::isTypeSpecifierQualifier() {
// GNU attributes support.
case tok::kw___attribute:
- // GNU typeof support.
+ // C23/GNU typeof support.
case tok::kw_typeof:
+ case tok::kw_typeof_unqual:
// type-specifiers
case tok::kw_short:
@@ -5041,6 +5486,7 @@ bool Parser::isTypeSpecifierQualifier() {
case tok::kw_char32_t:
case tok::kw_int:
case tok::kw__ExtInt:
+ case tok::kw__BitInt:
case tok::kw_half:
case tok::kw___bf16:
case tok::kw_float:
@@ -5049,6 +5495,7 @@ bool Parser::isTypeSpecifierQualifier() {
case tok::kw__Fract:
case tok::kw__Float16:
case tok::kw___float128:
+ case tok::kw___ibm128:
case tok::kw_bool:
case tok::kw__Bool:
case tok::kw__Decimal32:
@@ -5110,6 +5557,7 @@ bool Parser::isTypeSpecifierQualifier() {
case tok::kw___read_only:
case tok::kw___read_write:
case tok::kw___write_only:
+ case tok::kw___funcref:
return true;
case tok::kw_private:
@@ -5118,22 +5566,61 @@ bool Parser::isTypeSpecifierQualifier() {
// C11 _Atomic
case tok::kw__Atomic:
return true;
+
+ // HLSL type qualifiers
+ case tok::kw_groupshared:
+ case tok::kw_in:
+ case tok::kw_inout:
+ case tok::kw_out:
+ return getLangOpts().HLSL;
}
}
+Parser::DeclGroupPtrTy Parser::ParseTopLevelStmtDecl() {
+ assert(PP.isIncrementalProcessingEnabled() && "Not in incremental mode");
+
+ // Parse a top-level-stmt.
+ Parser::StmtVector Stmts;
+ ParsedStmtContext SubStmtCtx = ParsedStmtContext();
+ Actions.PushFunctionScope();
+ StmtResult R = ParseStatementOrDeclaration(Stmts, SubStmtCtx);
+ Actions.PopFunctionScopeInfo();
+ if (!R.isUsable())
+ return nullptr;
+
+ SmallVector<Decl *, 2> DeclsInGroup;
+ DeclsInGroup.push_back(Actions.ActOnTopLevelStmtDecl(R.get()));
+
+ if (Tok.is(tok::annot_repl_input_end) &&
+ Tok.getAnnotationValue() != nullptr) {
+ ConsumeAnnotationToken();
+ cast<TopLevelStmtDecl>(DeclsInGroup.back())->setSemiMissing();
+ }
+
+ // Currently happens for things like -fms-extensions and use `__if_exists`.
+ for (Stmt *S : Stmts)
+ DeclsInGroup.push_back(Actions.ActOnTopLevelStmtDecl(S));
+
+ return Actions.BuildDeclaratorGroup(DeclsInGroup);
+}
+
/// isDeclarationSpecifier() - Return true if the current token is part of a
/// declaration specifier.
///
+/// \param AllowImplicitTypename whether this is a context where T::type [T
+/// dependent] can appear.
/// \param DisambiguatingWithExpression True to indicate that the purpose of
/// this check is to disambiguate between an expression and a declaration.
-bool Parser::isDeclarationSpecifier(bool DisambiguatingWithExpression) {
+bool Parser::isDeclarationSpecifier(
+ ImplicitTypenameContext AllowImplicitTypename,
+ bool DisambiguatingWithExpression) {
switch (Tok.getKind()) {
default: return false;
// OpenCL 2.0 and later define this keyword.
case tok::kw_pipe:
- return (getLangOpts().OpenCL && getLangOpts().OpenCLVersion >= 200) ||
- getLangOpts().OpenCLCPlusPlus;
+ return getLangOpts().OpenCL &&
+ getLangOpts().getOpenCLCompatibleVersion() >= 200;
case tok::identifier: // foo::bar
// Unfortunate hack to support "Class.factoryMethod" notation.
@@ -5141,12 +5628,12 @@ bool Parser::isDeclarationSpecifier(bool DisambiguatingWithExpression) {
return false;
if (TryAltiVecVectorToken())
return true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case tok::kw_decltype: // decltype(T())::type
case tok::kw_typename: // typename T::type
// Annotate typenames and C++ scope specifiers. If we get one, just
// recurse to handle whatever we get.
- if (TryAnnotateTypeOrScopeToken())
+ if (TryAnnotateTypeOrScopeToken(AllowImplicitTypename))
return true;
if (TryAnnotateTypeConstraint())
return true;
@@ -5162,9 +5649,11 @@ bool Parser::isDeclarationSpecifier(bool DisambiguatingWithExpression) {
isStartOfObjCClassMessageMissingOpenBracket())
return false;
- return isDeclarationSpecifier();
+ return isDeclarationSpecifier(AllowImplicitTypename);
case tok::coloncolon: // ::foo::bar
+ if (!getLangOpts().CPlusPlus)
+ return false;
if (NextToken().is(tok::kw_new) || // ::new
NextToken().is(tok::kw_delete)) // ::delete
return false;
@@ -5173,7 +5662,7 @@ bool Parser::isDeclarationSpecifier(bool DisambiguatingWithExpression) {
// recurse to handle whatever we get.
if (TryAnnotateTypeOrScopeToken())
return true;
- return isDeclarationSpecifier();
+ return isDeclarationSpecifier(ImplicitTypenameContext::No);
// storage-class-specifier
case tok::kw_typedef:
@@ -5211,6 +5700,7 @@ bool Parser::isDeclarationSpecifier(bool DisambiguatingWithExpression) {
case tok::kw_int:
case tok::kw__ExtInt:
+ case tok::kw__BitInt:
case tok::kw_half:
case tok::kw___bf16:
case tok::kw_float:
@@ -5219,6 +5709,7 @@ bool Parser::isDeclarationSpecifier(bool DisambiguatingWithExpression) {
case tok::kw__Fract:
case tok::kw__Float16:
case tok::kw___float128:
+ case tok::kw___ibm128:
case tok::kw_bool:
case tok::kw__Bool:
case tok::kw__Decimal32:
@@ -5256,8 +5747,9 @@ bool Parser::isDeclarationSpecifier(bool DisambiguatingWithExpression) {
case tok::kw_static_assert:
case tok::kw__Static_assert:
- // GNU typeof support.
+ // C23/GNU typeof support.
case tok::kw_typeof:
+ case tok::kw_typeof_unqual:
// GNU attributes.
case tok::kw___attribute:
@@ -5343,6 +5835,8 @@ bool Parser::isDeclarationSpecifier(bool DisambiguatingWithExpression) {
#define GENERIC_IMAGE_TYPE(ImgType, Id) case tok::kw_##ImgType##_t:
#include "clang/Basic/OpenCLImageTypes.def"
+ case tok::kw___funcref:
+ case tok::kw_groupshared:
return true;
case tok::kw_private:
@@ -5350,15 +5844,18 @@ bool Parser::isDeclarationSpecifier(bool DisambiguatingWithExpression) {
}
}
-bool Parser::isConstructorDeclarator(bool IsUnqualified, bool DeductionGuide) {
- TentativeParsingAction TPA(*this);
-
+bool Parser::isConstructorDeclarator(bool IsUnqualified, bool DeductionGuide,
+ DeclSpec::FriendSpecified IsFriend,
+ const ParsedTemplateInfo *TemplateInfo) {
+ RevertingTentativeParsingAction TPA(*this);
// Parse the C++ scope specifier.
CXXScopeSpec SS;
+ if (TemplateInfo && TemplateInfo->TemplateParams)
+ SS.setTemplateParamLists(*TemplateInfo->TemplateParams);
+
if (ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
- /*ObjectHadErrors=*/false,
+ /*ObjectHasErrors=*/false,
/*EnteringContext=*/true)) {
- TPA.Revert();
return false;
}
@@ -5370,7 +5867,6 @@ bool Parser::isConstructorDeclarator(bool IsUnqualified, bool DeductionGuide) {
} else if (Tok.is(tok::annot_template_id)) {
ConsumeAnnotationToken();
} else {
- TPA.Revert();
return false;
}
@@ -5380,7 +5876,6 @@ bool Parser::isConstructorDeclarator(bool IsUnqualified, bool DeductionGuide) {
// Current class name must be followed by a left parenthesis.
if (Tok.isNot(tok::l_paren)) {
- TPA.Revert();
return false;
}
ConsumeParen();
@@ -5389,7 +5884,6 @@ bool Parser::isConstructorDeclarator(bool IsUnqualified, bool DeductionGuide) {
// that we have a constructor.
if (Tok.is(tok::r_paren) ||
(Tok.is(tok::ellipsis) && NextToken().is(tok::r_paren))) {
- TPA.Revert();
return true;
}
@@ -5398,7 +5892,6 @@ bool Parser::isConstructorDeclarator(bool IsUnqualified, bool DeductionGuide) {
if (getLangOpts().CPlusPlus11 &&
isCXX11AttributeSpecifier(/*Disambiguate*/ false,
/*OuterMightBeMessageSend*/ true)) {
- TPA.Revert();
return true;
}
@@ -5414,8 +5907,22 @@ bool Parser::isConstructorDeclarator(bool IsUnqualified, bool DeductionGuide) {
// Check whether the next token(s) are part of a declaration
// specifier, in which case we have the start of a parameter and,
// therefore, we know that this is a constructor.
+ // Due to an ambiguity with implicit typename, the above is not enough.
+ // Additionally, check to see if we are a friend.
+ // If we parsed a scope specifier as well as friend,
+ // we might be parsing a friend constructor.
bool IsConstructor = false;
- if (isDeclarationSpecifier())
+ ImplicitTypenameContext ITC = IsFriend && !SS.isSet()
+ ? ImplicitTypenameContext::No
+ : ImplicitTypenameContext::Yes;
+ // Constructors cannot have this parameters, but we support that scenario here
+ // to improve diagnostic.
+ if (Tok.is(tok::kw_this)) {
+ ConsumeToken();
+ return isDeclarationSpecifier(ITC);
+ }
+
+ if (isDeclarationSpecifier(ITC))
IsConstructor = true;
else if (Tok.is(tok::identifier) ||
(Tok.is(tok::annot_cxxscope) && NextToken().is(tok::identifier))) {
@@ -5484,8 +5991,6 @@ bool Parser::isConstructorDeclarator(bool IsUnqualified, bool DeductionGuide) {
break;
}
}
-
- TPA.Revert();
return IsConstructor;
}
@@ -5504,17 +6009,17 @@ bool Parser::isConstructorDeclarator(bool IsUnqualified, bool DeductionGuide) {
void Parser::ParseTypeQualifierListOpt(
DeclSpec &DS, unsigned AttrReqs, bool AtomicAllowed,
bool IdentifierRequired,
- Optional<llvm::function_ref<void()>> CodeCompletionHandler) {
- if (standardAttributesAllowed() && (AttrReqs & AR_CXX11AttributesParsed) &&
- isCXX11AttributeSpecifier()) {
- ParsedAttributesWithRange attrs(AttrFactory);
- ParseCXX11Attributes(attrs);
- DS.takeAttributesFrom(attrs);
+ std::optional<llvm::function_ref<void()>> CodeCompletionHandler) {
+ if ((AttrReqs & AR_CXX11AttributesParsed) &&
+ isAllowedCXX11AttributeSpecifier()) {
+ ParsedAttributes Attrs(AttrFactory);
+ ParseCXX11Attributes(Attrs);
+ DS.takeAttributesFrom(Attrs);
}
SourceLocation EndLoc;
- while (1) {
+ while (true) {
bool isInvalid = false;
const char *PrevSpec = nullptr;
unsigned DiagID = 0;
@@ -5554,7 +6059,7 @@ void Parser::ParseTypeQualifierListOpt(
case tok::kw_private:
if (!getLangOpts().OpenCL)
goto DoneWithTypeQuals;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case tok::kw___private:
case tok::kw___global:
case tok::kw___local:
@@ -5566,6 +6071,14 @@ void Parser::ParseTypeQualifierListOpt(
ParseOpenCLQualifiers(DS.getAttributes());
break;
+ case tok::kw_groupshared:
+ case tok::kw_in:
+ case tok::kw_inout:
+ case tok::kw_out:
+ // NOTE: ParseHLSLQualifiers will consume the qualifier token.
+ ParseHLSLQualifiers(DS.getAttributes());
+ continue;
+
case tok::kw___unaligned:
isInvalid = DS.SetTypeQual(DeclSpec::TQ_unaligned, Loc, PrevSpec, DiagID,
getLangOpts());
@@ -5578,7 +6091,7 @@ void Parser::ParseTypeQualifierListOpt(
if (TryKeywordIdentFallback(false))
continue;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case tok::kw___sptr:
case tok::kw___w64:
case tok::kw___ptr64:
@@ -5594,6 +6107,12 @@ void Parser::ParseTypeQualifierListOpt(
continue;
}
goto DoneWithTypeQuals;
+
+ case tok::kw___funcref:
+ ParseWebAssemblyFuncrefTypeAttribute(DS.getAttributes());
+ continue;
+ goto DoneWithTypeQuals;
+
case tok::kw___pascal:
if (AttrReqs & AR_VendorAttributesParsed) {
ParseBorlandTypeAttributes(DS.getAttributes());
@@ -5612,7 +6131,7 @@ void Parser::ParseTypeQualifierListOpt(
// Objective-C 'kindof' types.
case tok::kw___kindof:
DS.getAttributes().addNew(Tok.getIdentifierInfo(), Loc, nullptr, Loc,
- nullptr, 0, ParsedAttr::AS_Keyword);
+ nullptr, 0, tok::kw___kindof);
(void)ConsumeToken();
continue;
@@ -5629,7 +6148,7 @@ void Parser::ParseTypeQualifierListOpt(
continue; // do *not* consume the next token!
}
// otherwise, FALL THROUGH!
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
default:
DoneWithTypeQuals:
// If this is not a type-qualifier token, we're done reading type
@@ -5650,11 +6169,12 @@ void Parser::ParseTypeQualifierListOpt(
}
/// ParseDeclarator - Parse and verify a newly-initialized declarator.
-///
void Parser::ParseDeclarator(Declarator &D) {
/// This implements the 'declarator' production in the C grammar, then checks
/// for well-formedness and issues diagnostics.
- ParseDeclaratorInternal(D, &Parser::ParseDirectDeclarator);
+ Actions.runWithSufficientStackSpace(D.getBeginLoc(), [&] {
+ ParseDeclaratorInternal(D, &Parser::ParseDirectDeclarator);
+ });
}
static bool isPtrOperatorToken(tok::TokenKind Kind, const LangOptions &Lang,
@@ -5663,8 +6183,8 @@ static bool isPtrOperatorToken(tok::TokenKind Kind, const LangOptions &Lang,
return true;
// OpenCL 2.0 and later define this keyword.
- if (Kind == tok::kw_pipe &&
- ((Lang.OpenCL && Lang.OpenCLVersion >= 200) || Lang.OpenCLCPlusPlus))
+ if (Kind == tok::kw_pipe && Lang.OpenCL &&
+ Lang.getOpenCLCompatibleVersion() >= 200)
return true;
if (!Lang.CPlusPlus)
@@ -5686,7 +6206,7 @@ static bool isPtrOperatorToken(tok::TokenKind Kind, const LangOptions &Lang,
}
// Indicates whether the given declarator is a pipe declarator.
-static bool isPipeDeclerator(const Declarator &D) {
+static bool isPipeDeclarator(const Declarator &D) {
const unsigned NumTypes = D.getNumTypeObjects();
for (unsigned Idx = 0; Idx != NumTypes; ++Idx)
@@ -5737,8 +6257,9 @@ void Parser::ParseDeclaratorInternal(Declarator &D,
bool EnteringContext = D.getContext() == DeclaratorContext::File ||
D.getContext() == DeclaratorContext::Member;
CXXScopeSpec SS;
+ SS.setTemplateParamLists(D.getTemplateParameterLists());
ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
- /*ObjectHadErrors=*/false, EnteringContext);
+ /*ObjectHasErrors=*/false, EnteringContext);
if (SS.isNotEmpty()) {
if (Tok.isNot(tok::star)) {
@@ -5765,7 +6286,9 @@ void Parser::ParseDeclaratorInternal(Declarator &D,
D.ExtendWithDeclSpec(DS);
// Recurse to parse whatever is left.
- ParseDeclaratorInternal(D, DirectDeclParser);
+ Actions.runWithSufficientStackSpace(D.getBeginLoc(), [&] {
+ ParseDeclaratorInternal(D, DirectDeclParser);
+ });
// Sema will have to catch (syntactically invalid) pointers into global
// scope. It has to catch pointers into namespace scope anyway.
@@ -5779,7 +6302,7 @@ void Parser::ParseDeclaratorInternal(Declarator &D,
tok::TokenKind Kind = Tok.getKind();
- if (D.getDeclSpec().isTypeSpecPipe() && !isPipeDeclerator(D)) {
+ if (D.getDeclSpec().isTypeSpecPipe() && !isPipeDeclarator(D)) {
DeclSpec DS(AttrFactory);
ParseTypeQualifierListOpt(DS);
@@ -5814,7 +6337,8 @@ void Parser::ParseDeclaratorInternal(Declarator &D,
D.ExtendWithDeclSpec(DS);
// Recursively parse the declarator.
- ParseDeclaratorInternal(D, DirectDeclParser);
+ Actions.runWithSufficientStackSpace(
+ D.getBeginLoc(), [&] { ParseDeclaratorInternal(D, DirectDeclParser); });
if (Kind == tok::star)
// Remember that we parsed a pointer type, and remember the type-quals.
D.AddTypeInfo(DeclaratorChunk::getPointer(
@@ -5859,7 +6383,8 @@ void Parser::ParseDeclaratorInternal(Declarator &D,
}
// Recursively parse the declarator.
- ParseDeclaratorInternal(D, DirectDeclParser);
+ Actions.runWithSufficientStackSpace(
+ D.getBeginLoc(), [&] { ParseDeclaratorInternal(D, DirectDeclParser); });
if (D.getNumTypeObjects() > 0) {
// C++ [dcl.ref]p4: There shall be no references to references.
@@ -5967,7 +6492,7 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
D.getContext() == DeclaratorContext::Member;
ParseOptionalCXXScopeSpecifier(
D.getCXXScopeSpec(), /*ObjectType=*/nullptr,
- /*ObjectHadErrors=*/false, EnteringContext);
+ /*ObjectHasErrors=*/false, EnteringContext);
}
if (D.getCXXScopeSpec().isValid()) {
@@ -6116,8 +6641,9 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
// that it's an initializer instead.
if (D.mayOmitIdentifier() && D.mayBeFollowedByCXXDirectInit()) {
RevertingTentativeParsingAction PA(*this);
- if (TryParseDeclarator(true, D.mayHaveIdentifier(), true) ==
- TPResult::False) {
+ if (TryParseDeclarator(true, D.mayHaveIdentifier(), true,
+ D.getDeclSpec().getTypeSpecType() == TST_auto) ==
+ TPResult::False) {
D.SetIdentifier(nullptr, Tok.getLocation());
goto PastIdentifier;
}
@@ -6160,7 +6686,7 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
// Objective-C++: Detect C++ keywords and try to prevent further errors by
// treating these keyword as valid member names.
if (getLangOpts().ObjC && getLangOpts().CPlusPlus &&
- Tok.getIdentifierInfo() &&
+ !Tok.isAnnotation() && Tok.getIdentifierInfo() &&
Tok.getIdentifierInfo()->isCPlusPlusKeyword(getLangOpts())) {
Diag(getMissingDeclaratorIdLoc(D, Tok.getLocation()),
diag::err_expected_member_name_or_semi_objcxx_keyword)
@@ -6176,23 +6702,27 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
diag::err_expected_member_name_or_semi)
<< (D.getDeclSpec().isEmpty() ? SourceRange()
: D.getDeclSpec().getSourceRange());
- } else if (getLangOpts().CPlusPlus) {
- if (Tok.isOneOf(tok::period, tok::arrow))
- Diag(Tok, diag::err_invalid_operator_on_type) << Tok.is(tok::arrow);
- else {
- SourceLocation Loc = D.getCXXScopeSpec().getEndLoc();
- if (Tok.isAtStartOfLine() && Loc.isValid())
- Diag(PP.getLocForEndOfToken(Loc), diag::err_expected_unqualified_id)
- << getLangOpts().CPlusPlus;
- else
- Diag(getMissingDeclaratorIdLoc(D, Tok.getLocation()),
- diag::err_expected_unqualified_id)
- << getLangOpts().CPlusPlus;
- }
} else {
- Diag(getMissingDeclaratorIdLoc(D, Tok.getLocation()),
- diag::err_expected_either)
- << tok::identifier << tok::l_paren;
+ if (Tok.getKind() == tok::TokenKind::kw_while) {
+ Diag(Tok, diag::err_while_loop_outside_of_a_function);
+ } else if (getLangOpts().CPlusPlus) {
+ if (Tok.isOneOf(tok::period, tok::arrow))
+ Diag(Tok, diag::err_invalid_operator_on_type) << Tok.is(tok::arrow);
+ else {
+ SourceLocation Loc = D.getCXXScopeSpec().getEndLoc();
+ if (Tok.isAtStartOfLine() && Loc.isValid())
+ Diag(PP.getLocForEndOfToken(Loc), diag::err_expected_unqualified_id)
+ << getLangOpts().CPlusPlus;
+ else
+ Diag(getMissingDeclaratorIdLoc(D, Tok.getLocation()),
+ diag::err_expected_unqualified_id)
+ << getLangOpts().CPlusPlus;
+ }
+ } else {
+ Diag(getMissingDeclaratorIdLoc(D, Tok.getLocation()),
+ diag::err_expected_either)
+ << tok::identifier << tok::l_paren;
+ }
}
D.SetIdentifier(nullptr, Tok.getLocation());
D.setInvalidType(true);
@@ -6206,7 +6736,7 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
if (D.hasName() && !D.getNumTypeObjects())
MaybeParseCXX11Attributes(D);
- while (1) {
+ while (true) {
if (Tok.is(tok::l_paren)) {
bool IsFunctionDeclaration = D.isFunctionDeclaratorAFunctionDeclaration();
// Enter function-declaration scope, limiting any declarators to the
@@ -6221,10 +6751,27 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
// is not, the declarator has been fully parsed.
bool IsAmbiguous = false;
if (getLangOpts().CPlusPlus && D.mayBeFollowedByCXXDirectInit()) {
+ // C++2a [temp.res]p5
+ // A qualified-id is assumed to name a type if
+ // - [...]
+ // - it is a decl-specifier of the decl-specifier-seq of a
+ // - [...]
+ // - parameter-declaration in a member-declaration [...]
+ // - parameter-declaration in a declarator of a function or function
+ // template declaration whose declarator-id is qualified [...]
+ auto AllowImplicitTypename = ImplicitTypenameContext::No;
+ if (D.getCXXScopeSpec().isSet())
+ AllowImplicitTypename =
+ (ImplicitTypenameContext)Actions.isDeclaratorFunctionLike(D);
+ else if (D.getContext() == DeclaratorContext::Member) {
+ AllowImplicitTypename = ImplicitTypenameContext::Yes;
+ }
+
// The name of the declarator, if any, is tentatively declared within
// a possible direct initializer.
TentativelyDeclaredIdentifiers.push_back(D.getIdentifier());
- bool IsFunctionDecl = isCXXFunctionDeclarator(&IsAmbiguous);
+ bool IsFunctionDecl =
+ isCXXFunctionDeclarator(&IsAmbiguous, AllowImplicitTypename);
TentativelyDeclaredIdentifiers.pop_back();
if (!IsFunctionDecl)
break;
@@ -6241,6 +6788,16 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
PrototypeScope.Exit();
} else if (Tok.is(tok::l_square)) {
ParseBracketDeclarator(D);
+ } else if (Tok.isRegularKeywordAttribute()) {
+ // For consistency with attribute parsing.
+ Diag(Tok, diag::err_keyword_not_allowed) << Tok.getIdentifierInfo();
+ bool TakesArgs = doesKeywordAttributeTakeArgs(Tok.getKind());
+ ConsumeToken();
+ if (TakesArgs) {
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ if (!T.consumeOpen())
+ T.skipToEnd();
+ }
} else if (Tok.is(tok::kw_requires) && D.hasGroupingParens()) {
// This declarator is declaring a function, but the requires clause is
// in the wrong place:
@@ -6383,11 +6940,12 @@ void Parser::ParseParenDeclarator(Declarator &D) {
// If this can't be an abstract-declarator, this *must* be a grouping
// paren, because we haven't seen the identifier yet.
isGrouping = true;
- } else if (Tok.is(tok::r_paren) || // 'int()' is a function.
+ } else if (Tok.is(tok::r_paren) || // 'int()' is a function.
(getLangOpts().CPlusPlus && Tok.is(tok::ellipsis) &&
NextToken().is(tok::r_paren)) || // C++ int(...)
- isDeclarationSpecifier() || // 'int(int)' is a function.
- isCXX11AttributeSpecifier()) { // 'int([[]]int)' is a function.
+ isDeclarationSpecifier(
+ ImplicitTypenameContext::No) || // 'int(int)' is a function.
+ isCXX11AttributeSpecifier()) { // 'int([[]]int)' is a function.
// This handles C99 6.7.5.3p11: in "typedef int X; void foo(X)", X is
// considered to be a type, not a K&R identifier-list.
isGrouping = false;
@@ -6439,7 +6997,7 @@ void Parser::ParseParenDeclarator(Declarator &D) {
void Parser::InitCXXThisScopeForDeclaratorIfRelevant(
const Declarator &D, const DeclSpec &DS,
- llvm::Optional<Sema::CXXThisScopeRAII> &ThisScope) {
+ std::optional<Sema::CXXThisScopeRAII> &ThisScope) {
// C++11 [expr.prim.general]p3:
// If a declaration declares a member function or member function
// template of a class X, the expression this is a prvalue of type
@@ -6483,9 +7041,9 @@ void Parser::InitCXXThisScopeForDeclaratorIfRelevant(
/// declarator D up to a paren, which indicates that we are parsing function
/// arguments.
///
-/// If FirstArgAttrs is non-null, then the caller parsed those arguments
-/// immediately after the open paren - they should be considered to be the
-/// first argument of a parameter.
+/// If FirstArgAttrs is non-null, then the caller parsed those attributes
+/// immediately after the open paren - they will be applied to the DeclSpec
+/// of the first parameter.
///
/// If RequiresArg is true, then the first argument of the function is required
/// to be present and required to not be an identifier list.
@@ -6526,7 +7084,7 @@ void Parser::ParseFunctionDeclarator(Declarator &D,
SmallVector<SourceRange, 2> DynamicExceptionRanges;
ExprResult NoexceptExpr;
CachedTokens *ExceptionSpecTokens = nullptr;
- ParsedAttributesWithRange FnAttrs(AttrFactory);
+ ParsedAttributes FnAttrs(AttrFactory);
TypeResult TrailingReturnType;
SourceLocation TrailingReturnTypeLoc;
@@ -6555,13 +7113,15 @@ void Parser::ParseFunctionDeclarator(Declarator &D,
ProhibitAttributes(FnAttrs);
} else {
if (Tok.isNot(tok::r_paren))
- ParseParameterDeclarationClause(D.getContext(), FirstArgAttrs, ParamInfo,
- EllipsisLoc);
+ ParseParameterDeclarationClause(D, FirstArgAttrs, ParamInfo, EllipsisLoc);
else if (RequiresArg)
Diag(Tok, diag::err_argument_required_after_attribute);
- HasProto = ParamInfo.size() || getLangOpts().CPlusPlus
- || getLangOpts().OpenCL;
+ // OpenCL disallows functions without a prototype, but it doesn't enforce
+ // strict prototypes as in C23 because it allows a function definition to
+ // have an identifier list. See OpenCL 3.0 6.11/g for more details.
+ HasProto = ParamInfo.size() || getLangOpts().requiresStrictPrototypes() ||
+ getLangOpts().OpenCL;
// If we have the closing ')', eat it.
Tracker.consumeClose();
@@ -6589,7 +7149,7 @@ void Parser::ParseFunctionDeclarator(Declarator &D,
if (ParseRefQualifier(RefQualifierIsLValueRef, RefQualifierLoc))
EndLoc = RefQualifierLoc;
- llvm::Optional<Sema::CXXThisScopeRAII> ThisScope;
+ std::optional<Sema::CXXThisScopeRAII> ThisScope;
InitCXXThisScopeForDeclaratorIfRelevant(D, DS, ThisScope);
// Parse exception-specification[opt].
@@ -6642,7 +7202,7 @@ void Parser::ParseFunctionDeclarator(Declarator &D,
TrailingReturnTypeLoc = Range.getBegin();
EndLoc = Range.getEnd();
}
- } else if (standardAttributesAllowed()) {
+ } else {
MaybeParseCXX11Attributes(FnAttrs);
}
}
@@ -6652,14 +7212,22 @@ void Parser::ParseFunctionDeclarator(Declarator &D,
// this in C and not C++, where the decls will continue to live in the
// surrounding context.
SmallVector<NamedDecl *, 0> DeclsInPrototype;
- if (getCurScope()->getFlags() & Scope::FunctionDeclarationScope &&
- !getLangOpts().CPlusPlus) {
+ if (getCurScope()->isFunctionDeclarationScope() && !getLangOpts().CPlusPlus) {
for (Decl *D : getCurScope()->decls()) {
NamedDecl *ND = dyn_cast<NamedDecl>(D);
if (!ND || isa<ParmVarDecl>(ND))
continue;
DeclsInPrototype.push_back(ND);
}
+ // Sort DeclsInPrototype based on raw encoding of the source location.
+ // Scope::decls() is iterating over a SmallPtrSet so sort the Decls before
+ // moving to DeclContext. This provides a stable ordering for traversing
+ // Decls in DeclContext, which is important for tasks like ASTWriter for
+ // deterministic output.
+ llvm::sort(DeclsInPrototype, [](Decl *D1, Decl *D2) {
+ return D1->getLocation().getRawEncoding() <
+ D2->getLocation().getRawEncoding();
+ });
}
// Remember that we parsed a function type, and remember the attributes.
@@ -6699,7 +7267,7 @@ bool Parser::ParseRefQualifier(bool &RefQualifierIsLValueRef,
/// Note that identifier-lists are only allowed for normal declarators, not for
/// abstract-declarators.
bool Parser::isFunctionDeclaratorIdentifierList() {
- return !getLangOpts().CPlusPlus
+ return !getLangOpts().requiresStrictPrototypes()
&& Tok.is(tok::identifier)
&& !TryAltiVecVectorToken()
// K&R identifier lists can't have typedefs as identifiers, per C99
@@ -6733,6 +7301,10 @@ bool Parser::isFunctionDeclaratorIdentifierList() {
void Parser::ParseFunctionDeclaratorIdentifierList(
Declarator &D,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo) {
+ // We should never reach this point in C23 or C++.
+ assert(!getLangOpts().requiresStrictPrototypes() &&
+ "Cannot parse an identifier list in C23 or C++");
+
// If there was no identifier specified for the declarator, either we are in
// an abstract-declarator, or we are in a parameter declarator which was found
// to be abstract. In abstract-declarators, identifier lists are not valid:
@@ -6781,7 +7353,7 @@ void Parser::ParseFunctionDeclaratorIdentifierList(
///
/// DeclContext is the context of the declarator being parsed. If FirstArgAttrs
/// is non-null, then the caller parsed those attributes immediately after the
-/// open paren - they should be considered to be part of the first parameter.
+/// open paren - they will be applied to the DeclSpec of the first parameter.
///
/// After returning, ParamInfo will hold the parsed parameters. EllipsisLoc will
/// be the location of the ellipsis, if any was parsed.
@@ -6805,12 +7377,12 @@ void Parser::ParseFunctionDeclaratorIdentifierList(
/// '=' assignment-expression
/// [GNU] declaration-specifiers abstract-declarator[opt] attributes
/// [C++11] attribute-specifier-seq parameter-declaration
+/// [C++2b] attribute-specifier-seq 'this' parameter-declaration
///
void Parser::ParseParameterDeclarationClause(
- DeclaratorContext DeclaratorCtx,
- ParsedAttributes &FirstArgAttrs,
- SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo,
- SourceLocation &EllipsisLoc) {
+ DeclaratorContext DeclaratorCtx, ParsedAttributes &FirstArgAttrs,
+ SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo,
+ SourceLocation &EllipsisLoc, bool IsACXXFunctionDeclaration) {
// Avoid exceeding the maximum function scope depth.
// See https://bugs.llvm.org/show_bug.cgi?id=19607
@@ -6824,6 +7396,23 @@ void Parser::ParseParameterDeclarationClause(
return;
}
+ // C++2a [temp.res]p5
+ // A qualified-id is assumed to name a type if
+ // - [...]
+ // - it is a decl-specifier of the decl-specifier-seq of a
+ // - [...]
+ // - parameter-declaration in a member-declaration [...]
+ // - parameter-declaration in a declarator of a function or function
+ // template declaration whose declarator-id is qualified [...]
+ // - parameter-declaration in a lambda-declarator [...]
+ auto AllowImplicitTypename = ImplicitTypenameContext::No;
+ if (DeclaratorCtx == DeclaratorContext::Member ||
+ DeclaratorCtx == DeclaratorContext::LambdaExpr ||
+ DeclaratorCtx == DeclaratorContext::RequiresExpr ||
+ IsACXXFunctionDeclaration) {
+ AllowImplicitTypename = ImplicitTypenameContext::Yes;
+ }
+
do {
// FIXME: Issue a diagnostic if we parsed an attribute-specifier-seq
// before deciding this was a parameter-declaration-clause.
@@ -6834,37 +7423,55 @@ void Parser::ParseParameterDeclarationClause(
// Just use the ParsingDeclaration "scope" of the declarator.
DeclSpec DS(AttrFactory);
- // Parse any C++11 attributes.
- MaybeParseCXX11Attributes(DS.getAttributes());
+ ParsedAttributes ArgDeclAttrs(AttrFactory);
+ ParsedAttributes ArgDeclSpecAttrs(AttrFactory);
+
+ if (FirstArgAttrs.Range.isValid()) {
+ // If the caller parsed attributes for the first argument, add them now.
+ // Take them so that we only apply the attributes to the first parameter.
+ // We have already started parsing the decl-specifier sequence, so don't
+ // parse any parameter-declaration pieces that precede it.
+ ArgDeclSpecAttrs.takeAllFrom(FirstArgAttrs);
+ } else {
+ // Parse any C++11 attributes.
+ MaybeParseCXX11Attributes(ArgDeclAttrs);
- // Skip any Microsoft attributes before a param.
- MaybeParseMicrosoftAttributes(DS.getAttributes());
+ // Skip any Microsoft attributes before a param.
+ MaybeParseMicrosoftAttributes(ArgDeclSpecAttrs);
+ }
SourceLocation DSStart = Tok.getLocation();
- // If the caller parsed attributes for the first argument, add them now.
- // Take them so that we only apply the attributes to the first parameter.
- // FIXME: If we can leave the attributes in the token stream somehow, we can
- // get rid of a parameter (FirstArgAttrs) and this statement. It might be
- // too much hassle.
- DS.takeAttributesFrom(FirstArgAttrs);
+ // Parse a C++23 Explicit Object Parameter
+ // We do that in all language modes to produce a better diagnostic.
+ SourceLocation ThisLoc;
+ if (getLangOpts().CPlusPlus && Tok.is(tok::kw_this))
+ ThisLoc = ConsumeToken();
- ParseDeclarationSpecifiers(DS);
+ ParseDeclarationSpecifiers(DS, /*TemplateInfo=*/ParsedTemplateInfo(),
+ AS_none, DeclSpecContext::DSC_normal,
+ /*LateAttrs=*/nullptr, AllowImplicitTypename);
+ DS.takeAttributesFrom(ArgDeclSpecAttrs);
// Parse the declarator. This is "PrototypeContext" or
// "LambdaExprParameterContext", because we must accept either
// 'declarator' or 'abstract-declarator' here.
- Declarator ParmDeclarator(
- DS, DeclaratorCtx == DeclaratorContext::RequiresExpr
- ? DeclaratorContext::RequiresExpr
- : DeclaratorCtx == DeclaratorContext::LambdaExpr
- ? DeclaratorContext::LambdaExprParameter
- : DeclaratorContext::Prototype);
+ Declarator ParmDeclarator(DS, ArgDeclAttrs,
+ DeclaratorCtx == DeclaratorContext::RequiresExpr
+ ? DeclaratorContext::RequiresExpr
+ : DeclaratorCtx == DeclaratorContext::LambdaExpr
+ ? DeclaratorContext::LambdaExprParameter
+ : DeclaratorContext::Prototype);
ParseDeclarator(ParmDeclarator);
+ if (ThisLoc.isValid())
+ ParmDeclarator.SetRangeBegin(ThisLoc);
+
// Parse GNU attributes, if present.
MaybeParseGNUAttributes(ParmDeclarator);
+ if (getLangOpts().HLSL)
+ MaybeParseHLSLSemantics(DS.getAttributes());
if (Tok.is(tok::kw_requires)) {
// User tried to define a requires clause in a parameter declaration,
@@ -6920,17 +7527,18 @@ void Parser::ParseParameterDeclarationClause(
//
// We care about case 1) where the declarator type should be known, and
// the identifier should be null.
- if (!ParmDeclarator.isInvalidType() && !ParmDeclarator.hasName()) {
- if (Tok.getIdentifierInfo() &&
- Tok.getIdentifierInfo()->isKeyword(getLangOpts())) {
- Diag(Tok, diag::err_keyword_as_parameter) << PP.getSpelling(Tok);
- // Consume the keyword.
- ConsumeToken();
- }
+ if (!ParmDeclarator.isInvalidType() && !ParmDeclarator.hasName() &&
+ Tok.isNot(tok::raw_identifier) && !Tok.isAnnotation() &&
+ Tok.getIdentifierInfo() &&
+ Tok.getIdentifierInfo()->isKeyword(getLangOpts())) {
+ Diag(Tok, diag::err_keyword_as_parameter) << PP.getSpelling(Tok);
+ // Consume the keyword.
+ ConsumeToken();
}
// Inform the actions module about the parameter declarator, so it gets
// added to the current scope.
- Decl *Param = Actions.ActOnParamDeclarator(getCurScope(), ParmDeclarator);
+ Decl *Param =
+ Actions.ActOnParamDeclarator(getCurScope(), ParmDeclarator, ThisLoc);
// Parse the default argument, if any. We parse the default
// arguments in all dialects; the semantic analysis in
// ActOnParamDefaultArgument will reject the default argument in
@@ -6946,13 +7554,9 @@ void Parser::ParseParameterDeclarationClause(
DefArgToks.reset(new CachedTokens);
SourceLocation ArgStartLoc = NextToken().getLocation();
- if (!ConsumeAndStoreInitializer(*DefArgToks, CIK_DefaultArgument)) {
- DefArgToks.reset();
- Actions.ActOnParamDefaultArgumentError(Param, EqualLoc);
- } else {
- Actions.ActOnParamUnparsedDefaultArgument(Param, EqualLoc,
- ArgStartLoc);
- }
+ ConsumeAndStoreInitializer(*DefArgToks, CIK_DefaultArgument);
+ Actions.ActOnParamUnparsedDefaultArgument(Param, EqualLoc,
+ ArgStartLoc);
} else {
// Consume the '='.
ConsumeToken();
@@ -6968,11 +7572,21 @@ void Parser::ParseParameterDeclarationClause(
if (getLangOpts().CPlusPlus11 && Tok.is(tok::l_brace)) {
Diag(Tok, diag::warn_cxx98_compat_generalized_initializer_lists);
DefArgResult = ParseBraceInitializer();
- } else
+ } else {
+ if (Tok.is(tok::l_paren) && NextToken().is(tok::l_brace)) {
+ Diag(Tok, diag::err_stmt_expr_in_default_arg) << 0;
+ Actions.ActOnParamDefaultArgumentError(Param, EqualLoc,
+ /*DefaultArg=*/nullptr);
+ // Skip the statement expression and continue parsing
+ SkipUntil(tok::comma, StopBeforeMatch);
+ continue;
+ }
DefArgResult = ParseAssignmentExpression();
+ }
DefArgResult = Actions.CorrectDelayedTyposInExpr(DefArgResult);
if (DefArgResult.isInvalid()) {
- Actions.ActOnParamDefaultArgumentError(Param, EqualLoc);
+ Actions.ActOnParamDefaultArgumentError(Param, EqualLoc,
+ /*DefaultArg=*/nullptr);
SkipUntil(tok::comma, tok::r_paren, StopAtSemi | StopBeforeMatch);
} else {
// Inform the actions module about the default argument
@@ -7110,7 +7724,7 @@ void Parser::ParseBracketDeclarator(Declarator &D) {
// Parse the constant-expression or assignment-expression now (depending
// on dialect).
if (getLangOpts().CPlusPlus) {
- NumElements = ParseConstantExpression();
+ NumElements = ParseArrayBoundExpression();
} else {
EnterExpressionEvaluationContext Unevaluated(
Actions, Sema::ExpressionEvaluationContext::ConstantEvaluated);
@@ -7150,7 +7764,8 @@ void Parser::ParseMisplacedBracketDeclarator(Declarator &D) {
assert(!D.mayOmitIdentifier() && "Declarator cannot omit identifier");
SourceLocation StartBracketLoc = Tok.getLocation();
- Declarator TempDeclarator(D.getDeclSpec(), D.getContext());
+ Declarator TempDeclarator(D.getDeclSpec(), ParsedAttributesView::none(),
+ D.getContext());
while (Tok.is(tok::l_square)) {
ParseBracketDeclarator(TempDeclarator);
@@ -7236,13 +7851,26 @@ void Parser::ParseMisplacedBracketDeclarator(Declarator &D) {
/// typeof ( expressions )
/// typeof ( type-name )
/// [GNU/C++] typeof unary-expression
+/// [C23] typeof-specifier:
+/// typeof '(' typeof-specifier-argument ')'
+/// typeof_unqual '(' typeof-specifier-argument ')'
+///
+/// typeof-specifier-argument:
+/// expression
+/// type-name
///
void Parser::ParseTypeofSpecifier(DeclSpec &DS) {
- assert(Tok.is(tok::kw_typeof) && "Not a typeof specifier");
+ assert(Tok.isOneOf(tok::kw_typeof, tok::kw_typeof_unqual) &&
+ "Not a typeof specifier");
+
+ bool IsUnqual = Tok.is(tok::kw_typeof_unqual);
+ const IdentifierInfo *II = Tok.getIdentifierInfo();
+ if (getLangOpts().C23 && !II->getName().starts_with("__"))
+ Diag(Tok.getLocation(), diag::warn_c23_compat_keyword) << Tok.getName();
+
Token OpTok = Tok;
SourceLocation StartLoc = ConsumeToken();
-
- const bool hasParens = Tok.is(tok::l_paren);
+ bool HasParens = Tok.is(tok::l_paren);
EnterExpressionEvaluationContext Unevaluated(
Actions, Sema::ExpressionEvaluationContext::Unevaluated,
@@ -7253,8 +7881,8 @@ void Parser::ParseTypeofSpecifier(DeclSpec &DS) {
SourceRange CastRange;
ExprResult Operand = Actions.CorrectDelayedTyposInExpr(
ParseExprAfterUnaryExprOrTypeTrait(OpTok, isCastExpr, CastTy, CastRange));
- if (hasParens)
- DS.setTypeofParensRange(CastRange);
+ if (HasParens)
+ DS.setTypeArgumentRange(CastRange);
if (CastRange.getEnd().isInvalid())
// FIXME: Not accurate, the range gets one token more than it should.
@@ -7271,7 +7899,9 @@ void Parser::ParseTypeofSpecifier(DeclSpec &DS) {
const char *PrevSpec = nullptr;
unsigned DiagID;
// Check for duplicate type specifiers (e.g. "int typeof(int)").
- if (DS.SetTypeSpecType(DeclSpec::TST_typeofType, StartLoc, PrevSpec,
+ if (DS.SetTypeSpecType(IsUnqual ? DeclSpec::TST_typeof_unqualType
+ : DeclSpec::TST_typeofType,
+ StartLoc, PrevSpec,
DiagID, CastTy,
Actions.getASTContext().getPrintingPolicy()))
Diag(StartLoc, DiagID) << PrevSpec;
@@ -7294,7 +7924,9 @@ void Parser::ParseTypeofSpecifier(DeclSpec &DS) {
const char *PrevSpec = nullptr;
unsigned DiagID;
// Check for duplicate type specifiers (e.g. "int typeof(int)").
- if (DS.SetTypeSpecType(DeclSpec::TST_typeofExpr, StartLoc, PrevSpec,
+ if (DS.SetTypeSpecType(IsUnqual ? DeclSpec::TST_typeof_unqualExpr
+ : DeclSpec::TST_typeofExpr,
+ StartLoc, PrevSpec,
DiagID, Operand.get(),
Actions.getASTContext().getPrintingPolicy()))
Diag(StartLoc, DiagID) << PrevSpec;
@@ -7324,7 +7956,7 @@ void Parser::ParseAtomicSpecifier(DeclSpec &DS) {
if (T.getCloseLocation().isInvalid())
return;
- DS.setTypeofParensRange(T.getRange());
+ DS.setTypeArgumentRange(T.getRange());
DS.SetRangeEnd(T.getCloseLocation());
const char *PrevSpec = nullptr;
@@ -7418,3 +8050,89 @@ bool Parser::TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc,
}
return false;
}
+
+TypeResult Parser::ParseTypeFromString(StringRef TypeStr, StringRef Context,
+ SourceLocation IncludeLoc) {
+ // Consume (unexpanded) tokens up to the end-of-directive.
+ SmallVector<Token, 4> Tokens;
+ {
+ // Create a new buffer from which we will parse the type.
+ auto &SourceMgr = PP.getSourceManager();
+ FileID FID = SourceMgr.createFileID(
+ llvm::MemoryBuffer::getMemBufferCopy(TypeStr, Context), SrcMgr::C_User,
+ 0, 0, IncludeLoc);
+
+ // Form a new lexer that references the buffer.
+ Lexer L(FID, SourceMgr.getBufferOrFake(FID), PP);
+ L.setParsingPreprocessorDirective(true);
+
+ // Lex the tokens from that buffer.
+ Token Tok;
+ do {
+ L.Lex(Tok);
+ Tokens.push_back(Tok);
+ } while (Tok.isNot(tok::eod));
+ }
+
+ // Replace the "eod" token with an "eof" token identifying the end of
+ // the provided string.
+ Token &EndToken = Tokens.back();
+ EndToken.startToken();
+ EndToken.setKind(tok::eof);
+ EndToken.setLocation(Tok.getLocation());
+ EndToken.setEofData(TypeStr.data());
+
+ // Add the current token back.
+ Tokens.push_back(Tok);
+
+ // Enter the tokens into the token stream.
+ PP.EnterTokenStream(Tokens, /*DisableMacroExpansion=*/false,
+ /*IsReinject=*/false);
+
+ // Consume the current token so that we'll start parsing the tokens we
+ // added to the stream.
+ ConsumeAnyToken();
+
+ // Enter a new scope.
+ ParseScope LocalScope(this, 0);
+
+ // Parse the type.
+ TypeResult Result = ParseTypeName(nullptr);
+
+ // Check if we parsed the whole thing.
+ if (Result.isUsable() &&
+ (Tok.isNot(tok::eof) || Tok.getEofData() != TypeStr.data())) {
+ Diag(Tok.getLocation(), diag::err_type_unparsed);
+ }
+
+ // There could be leftover tokens (e.g. because of an error).
+ // Skip through until we reach the 'end of directive' token.
+ while (Tok.isNot(tok::eof))
+ ConsumeAnyToken();
+
+ // Consume the end token.
+ if (Tok.is(tok::eof) && Tok.getEofData() == TypeStr.data())
+ ConsumeAnyToken();
+ return Result;
+}
+
+void Parser::DiagnoseBitIntUse(const Token &Tok) {
+ // If the token is for _ExtInt, diagnose it as being deprecated. Otherwise,
+ // the token is about _BitInt and gets (potentially) diagnosed as use of an
+ // extension.
+ assert(Tok.isOneOf(tok::kw__ExtInt, tok::kw__BitInt) &&
+ "expected either an _ExtInt or _BitInt token!");
+
+ SourceLocation Loc = Tok.getLocation();
+ if (Tok.is(tok::kw__ExtInt)) {
+ Diag(Loc, diag::warn_ext_int_deprecated)
+ << FixItHint::CreateReplacement(Loc, "_BitInt");
+ } else {
+ // In C23 mode, diagnose that the use is not compatible with pre-C23 modes.
+ // Otherwise, diagnose that the use is a Clang extension.
+ if (getLangOpts().C23)
+ Diag(Loc, diag::warn_c23_compat_keyword) << Tok.getName();
+ else
+ Diag(Loc, diag::ext_bit_int) << getLangOpts().CPlusPlus;
+ }
+}
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseDeclCXX.cpp b/contrib/llvm-project/clang/lib/Parse/ParseDeclCXX.cpp
index ca5c013a51fe..c0d771dc93da 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseDeclCXX.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseDeclCXX.cpp
@@ -10,21 +10,26 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Parse/Parser.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/PrettyDeclStackTrace.h"
+#include "clang/Basic/AttributeCommonInfo.h"
#include "clang/Basic/Attributes.h"
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/OperatorKinds.h"
#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/TokenKinds.h"
+#include "clang/Lex/LiteralSupport.h"
#include "clang/Parse/ParseDiagnostic.h"
+#include "clang/Parse/Parser.h"
#include "clang/Parse/RAIIObjectsForParser.h"
#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/Scope.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/TimeProfiler.h"
+#include <optional>
using namespace clang;
@@ -59,7 +64,7 @@ Parser::DeclGroupPtrTy Parser::ParseNamespace(DeclaratorContext Context,
SourceLocation &DeclEnd,
SourceLocation InlineLoc) {
assert(Tok.is(tok::kw_namespace) && "Not a namespace!");
- SourceLocation NamespaceLoc = ConsumeToken(); // eat the 'namespace'.
+ SourceLocation NamespaceLoc = ConsumeToken(); // eat the 'namespace'.
ObjCDeclContextSwitch ObjCDC(*this);
if (Tok.is(tok::code_completion)) {
@@ -73,20 +78,32 @@ Parser::DeclGroupPtrTy Parser::ParseNamespace(DeclaratorContext Context,
InnerNamespaceInfoList ExtraNSs;
SourceLocation FirstNestedInlineLoc;
- ParsedAttributesWithRange attrs(AttrFactory);
- SourceLocation attrLoc;
- if (getLangOpts().CPlusPlus11 && isCXX11AttributeSpecifier()) {
- Diag(Tok.getLocation(), getLangOpts().CPlusPlus17
- ? diag::warn_cxx14_compat_ns_enum_attribute
- : diag::ext_ns_enum_attribute)
- << 0 /*namespace*/;
- attrLoc = Tok.getLocation();
- ParseCXX11Attributes(attrs);
- }
+ ParsedAttributes attrs(AttrFactory);
+
+ auto ReadAttributes = [&] {
+ bool MoreToParse;
+ do {
+ MoreToParse = false;
+ if (Tok.is(tok::kw___attribute)) {
+ ParseGNUAttributes(attrs);
+ MoreToParse = true;
+ }
+ if (getLangOpts().CPlusPlus11 && isCXX11AttributeSpecifier()) {
+ Diag(Tok.getLocation(), getLangOpts().CPlusPlus17
+ ? diag::warn_cxx14_compat_ns_enum_attribute
+ : diag::ext_ns_enum_attribute)
+ << 0 /*namespace*/;
+ ParseCXX11Attributes(attrs);
+ MoreToParse = true;
+ }
+ } while (MoreToParse);
+ };
+
+ ReadAttributes();
if (Tok.is(tok::identifier)) {
Ident = Tok.getIdentifierInfo();
- IdentLoc = ConsumeToken(); // eat the identifier.
+ IdentLoc = ConsumeToken(); // eat the identifier.
while (Tok.is(tok::coloncolon) &&
(NextToken().is(tok::identifier) ||
(NextToken().is(tok::kw_inline) &&
@@ -108,16 +125,14 @@ Parser::DeclGroupPtrTy Parser::ParseNamespace(DeclaratorContext Context,
}
}
+ ReadAttributes();
+
+ SourceLocation attrLoc = attrs.Range.getBegin();
+
// A nested namespace definition cannot have attributes.
if (!ExtraNSs.empty() && attrLoc.isValid())
Diag(attrLoc, diag::err_unexpected_nested_namespace_attribute);
- // Read label attributes, if present.
- if (Tok.is(tok::kw___attribute)) {
- attrLoc = Tok.getLocation();
- ParseGNUAttributes(attrs);
- }
-
if (Tok.is(tok::equal)) {
if (!Ident) {
Diag(Tok, diag::err_expected) << tok::identifier;
@@ -188,7 +203,7 @@ Parser::DeclGroupPtrTy Parser::ParseNamespace(DeclaratorContext Context,
std::string RBraces;
for (unsigned i = 0, e = ExtraNSs.size(); i != e; ++i)
- RBraces += "} ";
+ RBraces += "} ";
Diag(ExtraNSs[0].NamespaceLoc, diag::ext_nested_namespace_definition)
<< FixItHint::CreateReplacement(
@@ -205,8 +220,9 @@ Parser::DeclGroupPtrTy Parser::ParseNamespace(DeclaratorContext Context,
// If we're still good, complain about inline namespaces in non-C++0x now.
if (InlineLoc.isValid())
- Diag(InlineLoc, getLangOpts().CPlusPlus11 ?
- diag::warn_cxx98_compat_inline_namespace : diag::ext_inline_namespace);
+ Diag(InlineLoc, getLangOpts().CPlusPlus11
+ ? diag::warn_cxx98_compat_inline_namespace
+ : diag::ext_inline_namespace);
// Enter a scope for the namespace.
ParseScope NamespaceScope(this, Scope::DeclScope);
@@ -214,7 +230,7 @@ Parser::DeclGroupPtrTy Parser::ParseNamespace(DeclaratorContext Context,
UsingDirectiveDecl *ImplicitUsingDirectiveDecl = nullptr;
Decl *NamespcDecl = Actions.ActOnStartNamespaceDef(
getCurScope(), InlineLoc, NamespaceLoc, IdentLoc, Ident,
- T.getOpenLocation(), attrs, ImplicitUsingDirectiveDecl);
+ T.getOpenLocation(), attrs, ImplicitUsingDirectiveDecl, false);
PrettyDeclStackTraceEntry CrashInfo(Actions.Context, NamespcDecl,
NamespaceLoc, "parsing namespace");
@@ -241,9 +257,10 @@ void Parser::ParseInnerNamespace(const InnerNamespaceInfoList &InnerNSs,
if (index == InnerNSs.size()) {
while (!tryParseMisplacedModuleImport() && Tok.isNot(tok::r_brace) &&
Tok.isNot(tok::eof)) {
- ParsedAttributesWithRange attrs(AttrFactory);
- MaybeParseCXX11Attributes(attrs);
- ParseExternalDeclaration(attrs);
+ ParsedAttributes DeclAttrs(AttrFactory);
+ MaybeParseCXX11Attributes(DeclAttrs);
+ ParsedAttributes EmptyDeclSpecAttrs(AttrFactory);
+ ParseExternalDeclaration(DeclAttrs, EmptyDeclSpecAttrs);
}
// The caller is what called check -- we are simply calling
@@ -261,7 +278,7 @@ void Parser::ParseInnerNamespace(const InnerNamespaceInfoList &InnerNSs,
Decl *NamespcDecl = Actions.ActOnStartNamespaceDef(
getCurScope(), InnerNSs[index].InlineLoc, InnerNSs[index].NamespaceLoc,
InnerNSs[index].IdentLoc, InnerNSs[index].Ident,
- Tracker.getOpenLocation(), attrs, ImplicitUsingDirectiveDecl);
+ Tracker.getOpenLocation(), attrs, ImplicitUsingDirectiveDecl, true);
assert(!ImplicitUsingDirectiveDecl &&
"nested namespace definition cannot define anonymous namespace");
@@ -291,7 +308,7 @@ Decl *Parser::ParseNamespaceAlias(SourceLocation NamespaceLoc,
CXXScopeSpec SS;
// Parse (optional) nested-name-specifier.
ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
- /*ObjectHadErrors=*/false,
+ /*ObjectHasErrors=*/false,
/*EnteringContext=*/false,
/*MayBePseudoDestructor=*/nullptr,
/*IsTypename=*/false,
@@ -334,7 +351,7 @@ Decl *Parser::ParseNamespaceAlias(SourceLocation NamespaceLoc,
///
Decl *Parser::ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context) {
assert(isTokenStringLiteral() && "Not a string literal!");
- ExprResult Lang = ParseStringLiteralExpression(false);
+ ExprResult Lang = ParseUnevaluatedStringLiteralExpression();
ParseScope LinkageScope(this, Scope::DeclScope);
Decl *LinkageSpec =
@@ -344,8 +361,12 @@ Decl *Parser::ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context) {
getCurScope(), DS.getSourceRange().getBegin(), Lang.get(),
Tok.is(tok::l_brace) ? Tok.getLocation() : SourceLocation());
- ParsedAttributesWithRange attrs(AttrFactory);
- MaybeParseCXX11Attributes(attrs);
+ ParsedAttributes DeclAttrs(AttrFactory);
+ ParsedAttributes DeclSpecAttrs(AttrFactory);
+
+ while (MaybeParseCXX11Attributes(DeclAttrs) ||
+ MaybeParseGNUAttributes(DeclSpecAttrs))
+ ;
if (Tok.isNot(tok::l_brace)) {
// Reset the source range in DS, as the leading "extern"
@@ -354,7 +375,7 @@ Decl *Parser::ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context) {
DS.SetRangeEnd(SourceLocation());
// ... but anyway remember that such an "extern" was seen.
DS.setExternInLinkageSpec(true);
- ParseExternalDeclaration(attrs, &DS);
+ ParseExternalDeclaration(DeclAttrs, DeclSpecAttrs, &DS);
return LinkageSpec ? Actions.ActOnFinishLinkageSpecification(
getCurScope(), LinkageSpec, SourceLocation())
: nullptr;
@@ -362,7 +383,7 @@ Decl *Parser::ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context) {
DS.abort();
- ProhibitAttributes(attrs);
+ ProhibitAttributes(DeclAttrs);
BalancedDelimiterTracker T(*this, tok::l_brace);
T.consumeOpen();
@@ -392,11 +413,11 @@ Decl *Parser::ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context) {
case tok::r_brace:
if (!NestedModules)
break;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
default:
- ParsedAttributesWithRange attrs(AttrFactory);
- MaybeParseCXX11Attributes(attrs);
- ParseExternalDeclaration(attrs);
+ ParsedAttributes DeclAttrs(AttrFactory);
+ MaybeParseCXX11Attributes(DeclAttrs);
+ ParseExternalDeclaration(DeclAttrs, DeclSpecAttrs);
continue;
}
@@ -409,7 +430,7 @@ Decl *Parser::ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context) {
: nullptr;
}
-/// Parse a C++ Modules TS export-declaration.
+/// Parse a standard C++ Modules export-declaration.
///
/// export-declaration:
/// 'export' declaration
@@ -426,10 +447,10 @@ Decl *Parser::ParseExportDeclaration() {
if (Tok.isNot(tok::l_brace)) {
// FIXME: Factor out a ParseExternalDeclarationWithAttrs.
- ParsedAttributesWithRange Attrs(AttrFactory);
- MaybeParseCXX11Attributes(Attrs);
- MaybeParseMicrosoftAttributes(Attrs);
- ParseExternalDeclaration(Attrs);
+ ParsedAttributes DeclAttrs(AttrFactory);
+ MaybeParseCXX11Attributes(DeclAttrs);
+ ParsedAttributes EmptyDeclSpecAttrs(AttrFactory);
+ ParseExternalDeclaration(DeclAttrs, EmptyDeclSpecAttrs);
return Actions.ActOnFinishExportDecl(getCurScope(), ExportDecl,
SourceLocation());
}
@@ -437,19 +458,12 @@ Decl *Parser::ParseExportDeclaration() {
BalancedDelimiterTracker T(*this, tok::l_brace);
T.consumeOpen();
- // The Modules TS draft says "An export-declaration shall declare at least one
- // entity", but the intent is that it shall contain at least one declaration.
- if (Tok.is(tok::r_brace) && getLangOpts().ModulesTS) {
- Diag(ExportLoc, diag::err_export_empty)
- << SourceRange(ExportLoc, Tok.getLocation());
- }
-
while (!tryParseMisplacedModuleImport() && Tok.isNot(tok::r_brace) &&
Tok.isNot(tok::eof)) {
- ParsedAttributesWithRange Attrs(AttrFactory);
- MaybeParseCXX11Attributes(Attrs);
- MaybeParseMicrosoftAttributes(Attrs);
- ParseExternalDeclaration(Attrs);
+ ParsedAttributes DeclAttrs(AttrFactory);
+ MaybeParseCXX11Attributes(DeclAttrs);
+ ParsedAttributes EmptyDeclSpecAttrs(AttrFactory);
+ ParseExternalDeclaration(DeclAttrs, EmptyDeclSpecAttrs);
}
T.consumeClose();
@@ -459,11 +473,9 @@ Decl *Parser::ParseExportDeclaration() {
/// ParseUsingDirectiveOrDeclaration - Parse C++ using using-declaration or
/// using-directive. Assumes that current token is 'using'.
-Parser::DeclGroupPtrTy
-Parser::ParseUsingDirectiveOrDeclaration(DeclaratorContext Context,
- const ParsedTemplateInfo &TemplateInfo,
- SourceLocation &DeclEnd,
- ParsedAttributesWithRange &attrs) {
+Parser::DeclGroupPtrTy Parser::ParseUsingDirectiveOrDeclaration(
+ DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
+ SourceLocation &DeclEnd, ParsedAttributes &Attrs) {
assert(Tok.is(tok::kw_using) && "Not using token");
ObjCDeclContextSwitch ObjCDC(*this);
@@ -489,15 +501,15 @@ Parser::ParseUsingDirectiveOrDeclaration(DeclaratorContext Context,
if (TemplateInfo.Kind) {
SourceRange R = TemplateInfo.getSourceRange();
Diag(UsingLoc, diag::err_templated_using_directive_declaration)
- << 0 /* directive */ << R << FixItHint::CreateRemoval(R);
+ << 0 /* directive */ << R << FixItHint::CreateRemoval(R);
}
- Decl *UsingDir = ParseUsingDirective(Context, UsingLoc, DeclEnd, attrs);
+ Decl *UsingDir = ParseUsingDirective(Context, UsingLoc, DeclEnd, Attrs);
return Actions.ConvertDeclToDeclGroup(UsingDir);
}
// Otherwise, it must be a using-declaration or an alias-declaration.
- return ParseUsingDeclaration(Context, TemplateInfo, UsingLoc, DeclEnd, attrs,
+ return ParseUsingDeclaration(Context, TemplateInfo, UsingLoc, DeclEnd, Attrs,
AS_none);
}
@@ -529,7 +541,7 @@ Decl *Parser::ParseUsingDirective(DeclaratorContext Context,
CXXScopeSpec SS;
// Parse (optional) nested-name-specifier.
ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
- /*ObjectHadErrors=*/false,
+ /*ObjectHasErrors=*/false,
/*EnteringContext=*/false,
/*MayBePseudoDestructor=*/nullptr,
/*IsTypename=*/false,
@@ -598,7 +610,7 @@ bool Parser::ParseUsingDeclarator(DeclaratorContext Context,
// Parse nested-name-specifier.
IdentifierInfo *LastII = nullptr;
if (ParseOptionalCXXScopeSpecifier(D.SS, /*ObjectType=*/nullptr,
- /*ObjectHadErrors=*/false,
+ /*ObjectHasErrors=*/false,
/*EnteringContext=*/false,
/*MayBePseudoDtor=*/nullptr,
/*IsTypename=*/false,
@@ -624,6 +636,7 @@ bool Parser::ParseUsingDeclarator(DeclaratorContext Context,
Tok.is(tok::identifier) &&
(NextToken().is(tok::semi) || NextToken().is(tok::comma) ||
NextToken().is(tok::ellipsis) || NextToken().is(tok::l_square) ||
+ NextToken().isRegularKeywordAttribute() ||
NextToken().is(tok::kw___attribute)) &&
D.SS.isNotEmpty() && LastII == Tok.getIdentifierInfo() &&
!D.SS.getScopeRep()->getAsNamespace() &&
@@ -644,9 +657,9 @@ bool Parser::ParseUsingDeclarator(DeclaratorContext Context,
}
if (TryConsumeToken(tok::ellipsis, D.EllipsisLoc))
- Diag(Tok.getLocation(), getLangOpts().CPlusPlus17 ?
- diag::warn_cxx17_compat_using_declaration_pack :
- diag::ext_using_declaration_pack);
+ Diag(Tok.getLocation(), getLangOpts().CPlusPlus17
+ ? diag::warn_cxx17_compat_using_declaration_pack
+ : diag::ext_using_declaration_pack);
return false;
}
@@ -669,16 +682,20 @@ bool Parser::ParseUsingDeclarator(DeclaratorContext Context,
///
/// using-enum-declaration: [C++20, dcl.enum]
/// 'using' elaborated-enum-specifier ;
+/// The terminal name of the elaborated-enum-specifier undergoes
+/// ordinary lookup
///
/// elaborated-enum-specifier:
/// 'enum' nested-name-specifier[opt] identifier
-Parser::DeclGroupPtrTy
-Parser::ParseUsingDeclaration(
+Parser::DeclGroupPtrTy Parser::ParseUsingDeclaration(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
SourceLocation UsingLoc, SourceLocation &DeclEnd,
- ParsedAttributesWithRange &PrefixAttrs, AccessSpecifier AS) {
+ ParsedAttributes &PrefixAttrs, AccessSpecifier AS) {
SourceLocation UELoc;
- if (TryConsumeToken(tok::kw_enum, UELoc)) {
+ bool InInitStatement = Context == DeclaratorContext::SelectionInit ||
+ Context == DeclaratorContext::ForInit;
+
+ if (TryConsumeToken(tok::kw_enum, UELoc) && !InInitStatement) {
// C++20 using-enum
Diag(UELoc, getLangOpts().CPlusPlus20
? diag::warn_cxx17_compat_using_enum_declaration
@@ -686,21 +703,47 @@ Parser::ParseUsingDeclaration(
DiagnoseCXX11AttributeExtension(PrefixAttrs);
- DeclSpec DS(AttrFactory);
- ParseEnumSpecifier(UELoc, DS, TemplateInfo, AS,
- // DSC_trailing has the semantics we desire
- DeclSpecContext::DSC_trailing);
-
if (TemplateInfo.Kind) {
SourceRange R = TemplateInfo.getSourceRange();
Diag(UsingLoc, diag::err_templated_using_directive_declaration)
<< 1 /* declaration */ << R << FixItHint::CreateRemoval(R);
+ SkipUntil(tok::semi);
+ return nullptr;
+ }
+ CXXScopeSpec SS;
+ if (ParseOptionalCXXScopeSpecifier(SS, /*ParsedType=*/nullptr,
+ /*ObectHasErrors=*/false,
+ /*EnteringConttext=*/false,
+ /*MayBePseudoDestructor=*/nullptr,
+ /*IsTypename=*/false,
+ /*IdentifierInfo=*/nullptr,
+ /*OnlyNamespace=*/false,
+ /*InUsingDeclaration=*/true)) {
+ SkipUntil(tok::semi);
+ return nullptr;
+ }
+
+ if (Tok.is(tok::code_completion)) {
+ cutOffParsing();
+ Actions.CodeCompleteUsing(getCurScope());
+ return nullptr;
+ }
+ if (!Tok.is(tok::identifier)) {
+ Diag(Tok.getLocation(), diag::err_using_enum_expect_identifier)
+ << Tok.is(tok::kw_enum);
+ SkipUntil(tok::semi);
+ return nullptr;
+ }
+ IdentifierInfo *IdentInfo = Tok.getIdentifierInfo();
+ SourceLocation IdentLoc = ConsumeToken();
+ Decl *UED = Actions.ActOnUsingEnumDeclaration(
+ getCurScope(), AS, UsingLoc, UELoc, IdentLoc, *IdentInfo, &SS);
+ if (!UED) {
+ SkipUntil(tok::semi);
return nullptr;
}
- Decl *UED = Actions.ActOnUsingEnumDeclaration(getCurScope(), AS, UsingLoc,
- UELoc, DS);
DeclEnd = Tok.getLocation();
if (ExpectAndConsume(tok::semi, diag::err_expected_after,
"using-enum declaration"))
@@ -711,28 +754,35 @@ Parser::ParseUsingDeclaration(
// Check for misplaced attributes before the identifier in an
// alias-declaration.
- ParsedAttributesWithRange MisplacedAttrs(AttrFactory);
+ ParsedAttributes MisplacedAttrs(AttrFactory);
MaybeParseCXX11Attributes(MisplacedAttrs);
+ if (InInitStatement && Tok.isNot(tok::identifier))
+ return nullptr;
+
UsingDeclarator D;
bool InvalidDeclarator = ParseUsingDeclarator(Context, D);
- ParsedAttributesWithRange Attrs(AttrFactory);
+ ParsedAttributes Attrs(AttrFactory);
MaybeParseAttributes(PAKM_GNU | PAKM_CXX11, Attrs);
// If we had any misplaced attributes from earlier, this is where they
// should have been written.
if (MisplacedAttrs.Range.isValid()) {
- Diag(MisplacedAttrs.Range.getBegin(), diag::err_attributes_not_allowed)
+ auto *FirstAttr =
+ MisplacedAttrs.empty() ? nullptr : &MisplacedAttrs.front();
+ auto &Range = MisplacedAttrs.Range;
+ (FirstAttr && FirstAttr->isRegularKeywordAttribute()
+ ? Diag(Range.getBegin(), diag::err_keyword_not_allowed) << FirstAttr
+ : Diag(Range.getBegin(), diag::err_attributes_not_allowed))
<< FixItHint::CreateInsertionFromRange(
- Tok.getLocation(),
- CharSourceRange::getTokenRange(MisplacedAttrs.Range))
- << FixItHint::CreateRemoval(MisplacedAttrs.Range);
+ Tok.getLocation(), CharSourceRange::getTokenRange(Range))
+ << FixItHint::CreateRemoval(Range);
Attrs.takeAllFrom(MisplacedAttrs);
}
// Maybe this is an alias-declaration.
- if (Tok.is(tok::equal)) {
+ if (Tok.is(tok::equal) || InInitStatement) {
if (InvalidDeclarator) {
SkipUntil(tok::semi);
return nullptr;
@@ -754,7 +804,7 @@ Parser::ParseUsingDeclaration(
if (TemplateInfo.Kind) {
SourceRange R = TemplateInfo.getSourceRange();
Diag(UsingLoc, diag::err_templated_using_directive_declaration)
- << 1 /* declaration */ << R << FixItHint::CreateRemoval(R);
+ << 1 /* declaration */ << R << FixItHint::CreateRemoval(R);
// Unfortunately, we have to bail out instead of recovering by
// ignoring the parameters, just in case the nested name specifier
@@ -799,9 +849,10 @@ Parser::ParseUsingDeclaration(
}
if (DeclsInGroup.size() > 1)
- Diag(Tok.getLocation(), getLangOpts().CPlusPlus17 ?
- diag::warn_cxx17_compat_multi_using_declaration :
- diag::ext_multi_using_declaration);
+ Diag(Tok.getLocation(),
+ getLangOpts().CPlusPlus17
+ ? diag::warn_cxx17_compat_multi_using_declaration
+ : diag::ext_multi_using_declaration);
// Eat ';'.
DeclEnd = Tok.getLocation();
@@ -823,9 +874,9 @@ Decl *Parser::ParseAliasDeclarationAfterDeclarator(
return nullptr;
}
- Diag(Tok.getLocation(), getLangOpts().CPlusPlus11 ?
- diag::warn_cxx98_compat_alias_declaration :
- diag::ext_alias_declaration);
+ Diag(Tok.getLocation(), getLangOpts().CPlusPlus11
+ ? diag::warn_cxx98_compat_alias_declaration
+ : diag::ext_alias_declaration);
// Type alias templates cannot be specialized.
int SpecKind = -1;
@@ -844,7 +895,7 @@ Decl *Parser::ParseAliasDeclarationAfterDeclarator(
else
Range = TemplateInfo.getSourceRange();
Diag(Range.getBegin(), diag::err_alias_declaration_specialization)
- << SpecKind << Range;
+ << SpecKind << Range;
SkipUntil(tok::semi);
return nullptr;
}
@@ -857,15 +908,15 @@ Decl *Parser::ParseAliasDeclarationAfterDeclarator(
return nullptr;
} else if (D.TypenameLoc.isValid())
Diag(D.TypenameLoc, diag::err_alias_declaration_not_identifier)
- << FixItHint::CreateRemoval(SourceRange(
- D.TypenameLoc,
- D.SS.isNotEmpty() ? D.SS.getEndLoc() : D.TypenameLoc));
+ << FixItHint::CreateRemoval(
+ SourceRange(D.TypenameLoc, D.SS.isNotEmpty() ? D.SS.getEndLoc()
+ : D.TypenameLoc));
else if (D.SS.isNotEmpty())
Diag(D.SS.getBeginLoc(), diag::err_alias_declaration_not_identifier)
- << FixItHint::CreateRemoval(D.SS.getRange());
+ << FixItHint::CreateRemoval(D.SS.getRange());
if (D.EllipsisLoc.isValid())
Diag(D.EllipsisLoc, diag::err_alias_declaration_pack_expansion)
- << FixItHint::CreateRemoval(SourceRange(D.EllipsisLoc));
+ << FixItHint::CreateRemoval(SourceRange(D.EllipsisLoc));
Decl *DeclFromDeclSpec = nullptr;
TypeResult TypeAlias =
@@ -885,8 +936,8 @@ Decl *Parser::ParseAliasDeclarationAfterDeclarator(
TemplateParameterLists *TemplateParams = TemplateInfo.TemplateParams;
MultiTemplateParamsArg TemplateParamsArg(
- TemplateParams ? TemplateParams->data() : nullptr,
- TemplateParams ? TemplateParams->size() : 0);
+ TemplateParams ? TemplateParams->data() : nullptr,
+ TemplateParams ? TemplateParams->size() : 0);
return Actions.ActOnAliasDeclaration(getCurScope(), AS, TemplateParamsArg,
UsingLoc, D.Name, Attrs, TypeAlias,
DeclFromDeclSpec);
@@ -910,17 +961,23 @@ static FixItHint getStaticAssertNoMessageFixIt(const Expr *AssertExpr,
/// [C11] static_assert-declaration:
/// _Static_assert ( constant-expression , string-literal ) ;
///
-Decl *Parser::ParseStaticAssertDeclaration(SourceLocation &DeclEnd){
+Decl *Parser::ParseStaticAssertDeclaration(SourceLocation &DeclEnd) {
assert(Tok.isOneOf(tok::kw_static_assert, tok::kw__Static_assert) &&
"Not a static_assert declaration");
+ // Save the token name used for static assertion.
+ const char *TokName = Tok.getName();
+
if (Tok.is(tok::kw__Static_assert) && !getLangOpts().C11)
Diag(Tok, diag::ext_c11_feature) << Tok.getName();
if (Tok.is(tok::kw_static_assert)) {
- if (!getLangOpts().CPlusPlus)
- Diag(Tok, diag::ext_ms_static_assert)
- << FixItHint::CreateReplacement(Tok.getLocation(), "_Static_assert");
- else
+ if (!getLangOpts().CPlusPlus) {
+ if (getLangOpts().C23)
+ Diag(Tok, diag::warn_c23_compat_keyword) << Tok.getName();
+ else
+ Diag(Tok, diag::ext_ms_static_assert) << FixItHint::CreateReplacement(
+ Tok.getLocation(), "_Static_assert");
+ } else
Diag(Tok, diag::warn_cxx98_compat_static_assert);
}
@@ -948,7 +1005,7 @@ Decl *Parser::ParseStaticAssertDeclaration(SourceLocation &DeclEnd){
DiagVal = diag::warn_cxx14_compat_static_assert_no_message;
else if (getLangOpts().CPlusPlus)
DiagVal = diag::ext_cxx_static_assert_no_message;
- else if (getLangOpts().C2x)
+ else if (getLangOpts().C23)
DiagVal = diag::warn_c17_compat_static_assert_no_message;
else
DiagVal = diag::ext_c_static_assert_no_message;
@@ -960,14 +1017,30 @@ Decl *Parser::ParseStaticAssertDeclaration(SourceLocation &DeclEnd){
return nullptr;
}
- if (!isTokenStringLiteral()) {
+ bool ParseAsExpression = false;
+ if (getLangOpts().CPlusPlus26) {
+ for (unsigned I = 0;; ++I) {
+ const Token &T = GetLookAheadToken(I);
+ if (T.is(tok::r_paren))
+ break;
+ if (!tokenIsLikeStringLiteral(T, getLangOpts()) || T.hasUDSuffix()) {
+ ParseAsExpression = true;
+ break;
+ }
+ }
+ }
+
+ if (ParseAsExpression)
+ AssertMessage = ParseConstantExpressionInExprEvalContext();
+ else if (tokenIsLikeStringLiteral(Tok, getLangOpts()))
+ AssertMessage = ParseUnevaluatedStringLiteralExpression();
+ else {
Diag(Tok, diag::err_expected_string_literal)
- << /*Source='static_assert'*/1;
+ << /*Source='static_assert'*/ 1;
SkipMalformedDecl();
return nullptr;
}
- AssertMessage = ParseStringLiteralExpression();
if (AssertMessage.isInvalid()) {
SkipMalformedDecl();
return nullptr;
@@ -977,10 +1050,9 @@ Decl *Parser::ParseStaticAssertDeclaration(SourceLocation &DeclEnd){
T.consumeClose();
DeclEnd = Tok.getLocation();
- ExpectAndConsumeSemi(diag::err_expected_semi_after_static_assert);
+ ExpectAndConsumeSemi(diag::err_expected_semi_after_static_assert, TokName);
- return Actions.ActOnStaticAssertDeclaration(StaticAssertLoc,
- AssertExpr.get(),
+ return Actions.ActOnStaticAssertDeclaration(StaticAssertLoc, AssertExpr.get(),
AssertMessage.get(),
T.getCloseLocation());
}
@@ -991,8 +1063,8 @@ Decl *Parser::ParseStaticAssertDeclaration(SourceLocation &DeclEnd){
/// 'decltype' ( 'auto' ) [C++1y]
///
SourceLocation Parser::ParseDecltypeSpecifier(DeclSpec &DS) {
- assert(Tok.isOneOf(tok::kw_decltype, tok::annot_decltype)
- && "Not a decltype specifier");
+ assert(Tok.isOneOf(tok::kw_decltype, tok::annot_decltype) &&
+ "Not a decltype specifier");
ExprResult Result;
SourceLocation StartLoc = Tok.getLocation();
@@ -1001,6 +1073,9 @@ SourceLocation Parser::ParseDecltypeSpecifier(DeclSpec &DS) {
if (Tok.is(tok::annot_decltype)) {
Result = getExprAnnotation(Tok);
EndLoc = Tok.getAnnotationEndLoc();
+ // Unfortunately, we don't know the LParen source location as the annotated
+ // token doesn't have it.
+ DS.setTypeArgumentRange(SourceRange(SourceLocation(), EndLoc));
ConsumeAnnotationToken();
if (Result.isInvalid()) {
DS.SetTypeSpecError();
@@ -1013,22 +1088,21 @@ SourceLocation Parser::ParseDecltypeSpecifier(DeclSpec &DS) {
ConsumeToken();
BalancedDelimiterTracker T(*this, tok::l_paren);
- if (T.expectAndConsume(diag::err_expected_lparen_after,
- "decltype", tok::r_paren)) {
+ if (T.expectAndConsume(diag::err_expected_lparen_after, "decltype",
+ tok::r_paren)) {
DS.SetTypeSpecError();
- return T.getOpenLocation() == Tok.getLocation() ?
- StartLoc : T.getOpenLocation();
+ return T.getOpenLocation() == Tok.getLocation() ? StartLoc
+ : T.getOpenLocation();
}
// Check for C++1y 'decltype(auto)'.
- if (Tok.is(tok::kw_auto)) {
- // No need to disambiguate here: an expression can't start with 'auto',
- // because the typename-specifier in a function-style cast operation can't
- // be 'auto'.
+ if (Tok.is(tok::kw_auto) && NextToken().is(tok::r_paren)) {
+ // the typename-specifier in a function-style cast expression may
+ // be 'auto' since C++23.
Diag(Tok.getLocation(),
getLangOpts().CPlusPlus14
- ? diag::warn_cxx11_compat_decltype_auto_type_specifier
- : diag::ext_decltype_auto_type_specifier);
+ ? diag::warn_cxx11_compat_decltype_auto_type_specifier
+ : diag::ext_decltype_auto_type_specifier);
ConsumeToken();
} else {
// Parse the expression
@@ -1065,6 +1139,7 @@ SourceLocation Parser::ParseDecltypeSpecifier(DeclSpec &DS) {
// Match the ')'
T.consumeClose();
+ DS.setTypeArgumentRange(T.getRange());
if (T.getCloseLocation().isInvalid()) {
DS.SetTypeSpecError();
// FIXME: this should return the location of the last token
@@ -1085,18 +1160,17 @@ SourceLocation Parser::ParseDecltypeSpecifier(DeclSpec &DS) {
unsigned DiagID;
const PrintingPolicy &Policy = Actions.getASTContext().getPrintingPolicy();
// Check for duplicate type specifiers (e.g. "int decltype(a)").
- if (Result.get()
- ? DS.SetTypeSpecType(DeclSpec::TST_decltype, StartLoc, PrevSpec,
- DiagID, Result.get(), Policy)
- : DS.SetTypeSpecType(DeclSpec::TST_decltype_auto, StartLoc, PrevSpec,
- DiagID, Policy)) {
+ if (Result.get() ? DS.SetTypeSpecType(DeclSpec::TST_decltype, StartLoc,
+ PrevSpec, DiagID, Result.get(), Policy)
+ : DS.SetTypeSpecType(DeclSpec::TST_decltype_auto, StartLoc,
+ PrevSpec, DiagID, Policy)) {
Diag(StartLoc, DiagID) << PrevSpec;
DS.SetTypeSpecError();
}
return EndLoc;
}
-void Parser::AnnotateExistingDecltypeSpecifier(const DeclSpec& DS,
+void Parser::AnnotateExistingDecltypeSpecifier(const DeclSpec &DS,
SourceLocation StartLoc,
SourceLocation EndLoc) {
// make sure we have a token we can turn into an annotation token
@@ -1109,49 +1183,61 @@ void Parser::AnnotateExistingDecltypeSpecifier(const DeclSpec& DS,
// semi-colon.
EndLoc = PP.getLastCachedTokenLocation();
}
- }
- else
- PP.EnterToken(Tok, /*IsReinject*/true);
+ } else
+ PP.EnterToken(Tok, /*IsReinject*/ true);
Tok.setKind(tok::annot_decltype);
setExprAnnotation(Tok,
- DS.getTypeSpecType() == TST_decltype ? DS.getRepAsExpr() :
- DS.getTypeSpecType() == TST_decltype_auto ? ExprResult() :
- ExprError());
+ DS.getTypeSpecType() == TST_decltype ? DS.getRepAsExpr()
+ : DS.getTypeSpecType() == TST_decltype_auto ? ExprResult()
+ : ExprError());
Tok.setAnnotationEndLoc(EndLoc);
Tok.setLocation(StartLoc);
PP.AnnotateCachedTokens(Tok);
}
-void Parser::ParseUnderlyingTypeSpecifier(DeclSpec &DS) {
- assert(Tok.is(tok::kw___underlying_type) &&
- "Not an underlying type specifier");
+DeclSpec::TST Parser::TypeTransformTokToDeclSpec() {
+ switch (Tok.getKind()) {
+#define TRANSFORM_TYPE_TRAIT_DEF(_, Trait) \
+ case tok::kw___##Trait: \
+ return DeclSpec::TST_##Trait;
+#include "clang/Basic/TransformTypeTraits.def"
+ default:
+ llvm_unreachable("passed in an unhandled type transformation built-in");
+ }
+}
+bool Parser::MaybeParseTypeTransformTypeSpecifier(DeclSpec &DS) {
+ if (!NextToken().is(tok::l_paren)) {
+ Tok.setKind(tok::identifier);
+ return false;
+ }
+ DeclSpec::TST TypeTransformTST = TypeTransformTokToDeclSpec();
SourceLocation StartLoc = ConsumeToken();
+
BalancedDelimiterTracker T(*this, tok::l_paren);
- if (T.expectAndConsume(diag::err_expected_lparen_after,
- "__underlying_type", tok::r_paren)) {
- return;
- }
+ if (T.expectAndConsume(diag::err_expected_lparen_after, Tok.getName(),
+ tok::r_paren))
+ return true;
TypeResult Result = ParseTypeName();
if (Result.isInvalid()) {
SkipUntil(tok::r_paren, StopAtSemi);
- return;
+ return true;
}
- // Match the ')'
T.consumeClose();
if (T.getCloseLocation().isInvalid())
- return;
+ return true;
const char *PrevSpec = nullptr;
unsigned DiagID;
- if (DS.SetTypeSpecType(DeclSpec::TST_underlyingType, StartLoc, PrevSpec,
- DiagID, Result.get(),
+ if (DS.SetTypeSpecType(TypeTransformTST, StartLoc, PrevSpec, DiagID,
+ Result.get(),
Actions.getASTContext().getPrintingPolicy()))
Diag(StartLoc, DiagID) << PrevSpec;
- DS.setTypeofParensRange(T.getRange());
+ DS.setTypeArgumentRange(T.getRange());
+ return true;
}
/// ParseBaseTypeSpecifier - Parse a C++ base-type-specifier which is either a
@@ -1177,14 +1263,14 @@ TypeResult Parser::ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
// Ignore attempts to use typename
if (Tok.is(tok::kw_typename)) {
Diag(Tok, diag::err_expected_class_name_not_template)
- << FixItHint::CreateRemoval(Tok.getLocation());
+ << FixItHint::CreateRemoval(Tok.getLocation());
ConsumeToken();
}
// Parse optional nested-name-specifier
CXXScopeSpec SS;
if (ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
- /*ObjectHadErrors=*/false,
+ /*ObjectHasErrors=*/false,
/*EnteringContext=*/false))
return true;
@@ -1196,21 +1282,23 @@ TypeResult Parser::ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
if (Tok.isOneOf(tok::kw_decltype, tok::annot_decltype)) {
if (SS.isNotEmpty())
Diag(SS.getBeginLoc(), diag::err_unexpected_scope_on_base_decltype)
- << FixItHint::CreateRemoval(SS.getRange());
+ << FixItHint::CreateRemoval(SS.getRange());
// Fake up a Declarator to use with ActOnTypeName.
DeclSpec DS(AttrFactory);
EndLocation = ParseDecltypeSpecifier(DS);
- Declarator DeclaratorInfo(DS, DeclaratorContext::TypeName);
- return Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
+ Declarator DeclaratorInfo(DS, ParsedAttributesView::none(),
+ DeclaratorContext::TypeName);
+ return Actions.ActOnTypeName(DeclaratorInfo);
}
// Check whether we have a template-id that names a type.
if (Tok.is(tok::annot_template_id)) {
TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
if (TemplateId->mightBeType()) {
- AnnotateTemplateIdTokenAsType(SS, /*IsClassName*/true);
+ AnnotateTemplateIdTokenAsType(SS, ImplicitTypenameContext::No,
+ /*IsClassName=*/true);
assert(Tok.is(tok::annot_typename) && "template-id -> type failed");
TypeResult Type = getTypeAnnotation(Tok);
@@ -1237,10 +1325,9 @@ TypeResult Parser::ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
// required nor permitted" mode, and do this there.
TemplateNameKind TNK = TNK_Non_template;
TemplateTy Template;
- if (!Actions.DiagnoseUnknownTemplateName(*Id, IdLoc, getCurScope(),
- &SS, Template, TNK)) {
- Diag(IdLoc, diag::err_unknown_template_name)
- << Id;
+ if (!Actions.DiagnoseUnknownTemplateName(*Id, IdLoc, getCurScope(), &SS,
+ Template, TNK)) {
+ Diag(IdLoc, diag::err_unknown_template_name) << Id;
}
// Form the template name
@@ -1253,7 +1340,8 @@ TypeResult Parser::ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
return true;
if (Tok.is(tok::annot_template_id) &&
takeTemplateIdAnnotation(Tok)->mightBeType())
- AnnotateTemplateIdTokenAsType(SS, /*IsClassName*/true);
+ AnnotateTemplateIdTokenAsType(SS, ImplicitTypenameContext::No,
+ /*IsClassName=*/true);
// If we didn't end up with a typename token, there's nothing more we
// can do.
@@ -1274,7 +1362,8 @@ TypeResult Parser::ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
*Id, IdLoc, getCurScope(), &SS, /*isClassName=*/true, false, nullptr,
/*IsCtorOrDtorName=*/false,
/*WantNontrivialTypeSourceInfo=*/true,
- /*IsClassTemplateDeductionContext*/ false, &CorrectedII);
+ /*IsClassTemplateDeductionContext=*/false, ImplicitTypenameContext::No,
+ &CorrectedII);
if (!Type) {
Diag(IdLoc, diag::err_expected_class_name);
return true;
@@ -1294,8 +1383,9 @@ TypeResult Parser::ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
DS.SetTypeSpecType(TST_typename, IdLoc, PrevSpec, DiagID, Type,
Actions.getASTContext().getPrintingPolicy());
- Declarator DeclaratorInfo(DS, DeclaratorContext::TypeName);
- return Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
+ Declarator DeclaratorInfo(DS, ParsedAttributesView::none(),
+ DeclaratorContext::TypeName);
+ return Actions.ActOnTypeName(DeclaratorInfo);
}
void Parser::ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs) {
@@ -1303,9 +1393,9 @@ void Parser::ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs) {
tok::kw___multiple_inheritance,
tok::kw___virtual_inheritance)) {
IdentifierInfo *AttrName = Tok.getIdentifierInfo();
+ auto Kind = Tok.getKind();
SourceLocation AttrNameLoc = ConsumeToken();
- attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0,
- ParsedAttr::AS_Keyword);
+ attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0, Kind);
}
}
@@ -1315,28 +1405,31 @@ void Parser::ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs) {
bool Parser::isValidAfterTypeSpecifier(bool CouldBeBitfield) {
// This switch enumerates the valid "follow" set for type-specifiers.
switch (Tok.getKind()) {
- default: break;
- case tok::semi: // struct foo {...} ;
- case tok::star: // struct foo {...} * P;
- case tok::amp: // struct foo {...} & R = ...
- case tok::ampamp: // struct foo {...} && R = ...
- case tok::identifier: // struct foo {...} V ;
- case tok::r_paren: //(struct foo {...} ) {4}
- case tok::coloncolon: // struct foo {...} :: a::b;
- case tok::annot_cxxscope: // struct foo {...} a:: b;
- case tok::annot_typename: // struct foo {...} a ::b;
- case tok::annot_template_id: // struct foo {...} a<int> ::b;
- case tok::kw_decltype: // struct foo {...} decltype (a)::b;
- case tok::l_paren: // struct foo {...} ( x);
- case tok::comma: // __builtin_offsetof(struct foo{...} ,
- case tok::kw_operator: // struct foo operator ++() {...}
- case tok::kw___declspec: // struct foo {...} __declspec(...)
- case tok::l_square: // void f(struct f [ 3])
- case tok::ellipsis: // void f(struct f ... [Ns])
+ default:
+ if (Tok.isRegularKeywordAttribute())
+ return true;
+ break;
+ case tok::semi: // struct foo {...} ;
+ case tok::star: // struct foo {...} * P;
+ case tok::amp: // struct foo {...} & R = ...
+ case tok::ampamp: // struct foo {...} && R = ...
+ case tok::identifier: // struct foo {...} V ;
+ case tok::r_paren: //(struct foo {...} ) {4}
+ case tok::coloncolon: // struct foo {...} :: a::b;
+ case tok::annot_cxxscope: // struct foo {...} a:: b;
+ case tok::annot_typename: // struct foo {...} a ::b;
+ case tok::annot_template_id: // struct foo {...} a<int> ::b;
+ case tok::kw_decltype: // struct foo {...} decltype (a)::b;
+ case tok::l_paren: // struct foo {...} ( x);
+ case tok::comma: // __builtin_offsetof(struct foo{...} ,
+ case tok::kw_operator: // struct foo operator ++() {...}
+ case tok::kw___declspec: // struct foo {...} __declspec(...)
+ case tok::l_square: // void f(struct f [ 3])
+ case tok::ellipsis: // void f(struct f ... [Ns])
// FIXME: we should emit semantic diagnostic when declaration
// attribute is in type attribute position.
- case tok::kw___attribute: // struct foo __attribute__((used)) x;
- case tok::annot_pragma_pack: // struct foo {...} _Pragma(pack(pop));
+ case tok::kw___attribute: // struct foo __attribute__((used)) x;
+ case tok::annot_pragma_pack: // struct foo {...} _Pragma(pack(pop));
// struct foo {...} _Pragma(section(...));
case tok::annot_pragma_ms_pragma:
// struct foo {...} _Pragma(vtordisp(pop));
@@ -1345,40 +1438,40 @@ bool Parser::isValidAfterTypeSpecifier(bool CouldBeBitfield) {
case tok::annot_pragma_ms_pointers_to_members:
return true;
case tok::colon:
- return CouldBeBitfield || // enum E { ... } : 2;
- ColonIsSacred; // _Generic(..., enum E : 2);
+ return CouldBeBitfield || // enum E { ... } : 2;
+ ColonIsSacred; // _Generic(..., enum E : 2);
// Microsoft compatibility
- case tok::kw___cdecl: // struct foo {...} __cdecl x;
- case tok::kw___fastcall: // struct foo {...} __fastcall x;
- case tok::kw___stdcall: // struct foo {...} __stdcall x;
- case tok::kw___thiscall: // struct foo {...} __thiscall x;
- case tok::kw___vectorcall: // struct foo {...} __vectorcall x;
+ case tok::kw___cdecl: // struct foo {...} __cdecl x;
+ case tok::kw___fastcall: // struct foo {...} __fastcall x;
+ case tok::kw___stdcall: // struct foo {...} __stdcall x;
+ case tok::kw___thiscall: // struct foo {...} __thiscall x;
+ case tok::kw___vectorcall: // struct foo {...} __vectorcall x;
// We will diagnose these calling-convention specifiers on non-function
// declarations later, so claim they are valid after a type specifier.
return getLangOpts().MicrosoftExt;
// Type qualifiers
- case tok::kw_const: // struct foo {...} const x;
- case tok::kw_volatile: // struct foo {...} volatile x;
- case tok::kw_restrict: // struct foo {...} restrict x;
- case tok::kw__Atomic: // struct foo {...} _Atomic x;
- case tok::kw___unaligned: // struct foo {...} __unaligned *x;
+ case tok::kw_const: // struct foo {...} const x;
+ case tok::kw_volatile: // struct foo {...} volatile x;
+ case tok::kw_restrict: // struct foo {...} restrict x;
+ case tok::kw__Atomic: // struct foo {...} _Atomic x;
+ case tok::kw___unaligned: // struct foo {...} __unaligned *x;
// Function specifiers
// Note, no 'explicit'. An explicit function must be either a conversion
// operator or a constructor. Either way, it can't have a return type.
- case tok::kw_inline: // struct foo inline f();
- case tok::kw_virtual: // struct foo virtual f();
- case tok::kw_friend: // struct foo friend f();
+ case tok::kw_inline: // struct foo inline f();
+ case tok::kw_virtual: // struct foo virtual f();
+ case tok::kw_friend: // struct foo friend f();
// Storage-class specifiers
- case tok::kw_static: // struct foo {...} static x;
- case tok::kw_extern: // struct foo {...} extern x;
- case tok::kw_typedef: // struct foo {...} typedef x;
- case tok::kw_register: // struct foo {...} register x;
- case tok::kw_auto: // struct foo {...} auto x;
- case tok::kw_mutable: // struct foo {...} mutable x;
- case tok::kw_thread_local: // struct foo {...} thread_local x;
- case tok::kw_constexpr: // struct foo {...} constexpr x;
- case tok::kw_consteval: // struct foo {...} consteval x;
- case tok::kw_constinit: // struct foo {...} constinit x;
+ case tok::kw_static: // struct foo {...} static x;
+ case tok::kw_extern: // struct foo {...} extern x;
+ case tok::kw_typedef: // struct foo {...} typedef x;
+ case tok::kw_register: // struct foo {...} register x;
+ case tok::kw_auto: // struct foo {...} auto x;
+ case tok::kw_mutable: // struct foo {...} mutable x;
+ case tok::kw_thread_local: // struct foo {...} thread_local x;
+ case tok::kw_constexpr: // struct foo {...} constexpr x;
+ case tok::kw_consteval: // struct foo {...} consteval x;
+ case tok::kw_constinit: // struct foo {...} constinit x;
// As shown above, type qualifiers and storage class specifiers absolutely
// can occur after class specifiers according to the grammar. However,
// almost no one actually writes code like this. If we see one of these,
@@ -1397,7 +1490,7 @@ bool Parser::isValidAfterTypeSpecifier(bool CouldBeBitfield) {
if (!isKnownToBeTypeSpecifier(NextToken()))
return true;
break;
- case tok::r_brace: // struct bar { struct foo {...} }
+ case tok::r_brace: // struct bar { struct foo {...} }
// Missing ';' at end of struct is accepted as an extension in C mode.
if (!getLangOpts().CPlusPlus)
return true;
@@ -1452,9 +1545,9 @@ bool Parser::isValidAfterTypeSpecifier(bool CouldBeBitfield) {
void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
SourceLocation StartLoc, DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo,
- AccessSpecifier AS,
- bool EnteringContext, DeclSpecContext DSC,
- ParsedAttributesWithRange &Attributes) {
+ AccessSpecifier AS, bool EnteringContext,
+ DeclSpecContext DSC,
+ ParsedAttributes &Attributes) {
DeclSpec::TST TagType;
if (TagTokKind == tok::kw_struct)
TagType = DeclSpec::TST_struct;
@@ -1474,28 +1567,23 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
return;
}
- // C++03 [temp.explicit] 14.7.2/8:
- // The usual access checking rules do not apply to names used to specify
- // explicit instantiations.
- //
- // As an extension we do not perform access checking on the names used to
- // specify explicit specializations either. This is important to allow
- // specializing traits classes for private types.
- //
- // Note that we don't suppress if this turns out to be an elaborated
- // type specifier.
- bool shouldDelayDiagsInTag =
- (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation ||
- TemplateInfo.Kind == ParsedTemplateInfo::ExplicitSpecialization);
+ // C++20 [temp.class.spec] 13.7.5/10
+ // The usual access checking rules do not apply to non-dependent names
+ // used to specify template arguments of the simple-template-id of the
+ // partial specialization.
+ // C++20 [temp.spec] 13.9/6:
+ // The usual access checking rules do not apply to names in a declaration
+ // of an explicit instantiation or explicit specialization...
+ const bool shouldDelayDiagsInTag =
+ (TemplateInfo.Kind != ParsedTemplateInfo::NonTemplate);
SuppressAccessChecks diagsFromTag(*this, shouldDelayDiagsInTag);
- ParsedAttributesWithRange attrs(AttrFactory);
+ ParsedAttributes attrs(AttrFactory);
// If attributes exist after tag, parse them.
MaybeParseAttributes(PAKM_CXX11 | PAKM_Declspec | PAKM_GNU, attrs);
// Parse inheritance specifiers.
- if (Tok.isOneOf(tok::kw___single_inheritance,
- tok::kw___multiple_inheritance,
+ if (Tok.isOneOf(tok::kw___single_inheritance, tok::kw___multiple_inheritance,
tok::kw___virtual_inheritance))
ParseMicrosoftInheritanceClassAttributes(attrs);
@@ -1506,61 +1594,70 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// C++11 attributes
SourceLocation AttrFixitLoc = Tok.getLocation();
- if (TagType == DeclSpec::TST_struct &&
- Tok.isNot(tok::identifier) &&
- !Tok.isAnnotation() &&
- Tok.getIdentifierInfo() &&
- Tok.isOneOf(tok::kw___is_abstract,
- tok::kw___is_aggregate,
- tok::kw___is_arithmetic,
- tok::kw___is_array,
- tok::kw___is_assignable,
- tok::kw___is_base_of,
- tok::kw___is_class,
- tok::kw___is_complete_type,
- tok::kw___is_compound,
- tok::kw___is_const,
- tok::kw___is_constructible,
- tok::kw___is_convertible,
- tok::kw___is_convertible_to,
- tok::kw___is_destructible,
- tok::kw___is_empty,
- tok::kw___is_enum,
- tok::kw___is_floating_point,
- tok::kw___is_final,
- tok::kw___is_function,
- tok::kw___is_fundamental,
- tok::kw___is_integral,
- tok::kw___is_interface_class,
- tok::kw___is_literal,
- tok::kw___is_lvalue_expr,
- tok::kw___is_lvalue_reference,
- tok::kw___is_member_function_pointer,
- tok::kw___is_member_object_pointer,
- tok::kw___is_member_pointer,
- tok::kw___is_nothrow_assignable,
- tok::kw___is_nothrow_constructible,
- tok::kw___is_nothrow_destructible,
- tok::kw___is_object,
- tok::kw___is_pod,
- tok::kw___is_pointer,
- tok::kw___is_polymorphic,
- tok::kw___is_reference,
- tok::kw___is_rvalue_expr,
- tok::kw___is_rvalue_reference,
- tok::kw___is_same,
- tok::kw___is_scalar,
- tok::kw___is_sealed,
- tok::kw___is_signed,
- tok::kw___is_standard_layout,
- tok::kw___is_trivial,
- tok::kw___is_trivially_assignable,
- tok::kw___is_trivially_constructible,
- tok::kw___is_trivially_copyable,
- tok::kw___is_union,
- tok::kw___is_unsigned,
- tok::kw___is_void,
- tok::kw___is_volatile))
+ if (TagType == DeclSpec::TST_struct && Tok.isNot(tok::identifier) &&
+ !Tok.isAnnotation() && Tok.getIdentifierInfo() &&
+ Tok.isOneOf(
+#define TRANSFORM_TYPE_TRAIT_DEF(_, Trait) tok::kw___##Trait,
+#include "clang/Basic/TransformTypeTraits.def"
+ tok::kw___is_abstract,
+ tok::kw___is_aggregate,
+ tok::kw___is_arithmetic,
+ tok::kw___is_array,
+ tok::kw___is_assignable,
+ tok::kw___is_base_of,
+ tok::kw___is_bounded_array,
+ tok::kw___is_class,
+ tok::kw___is_complete_type,
+ tok::kw___is_compound,
+ tok::kw___is_const,
+ tok::kw___is_constructible,
+ tok::kw___is_convertible,
+ tok::kw___is_convertible_to,
+ tok::kw___is_destructible,
+ tok::kw___is_empty,
+ tok::kw___is_enum,
+ tok::kw___is_floating_point,
+ tok::kw___is_final,
+ tok::kw___is_function,
+ tok::kw___is_fundamental,
+ tok::kw___is_integral,
+ tok::kw___is_interface_class,
+ tok::kw___is_literal,
+ tok::kw___is_lvalue_expr,
+ tok::kw___is_lvalue_reference,
+ tok::kw___is_member_function_pointer,
+ tok::kw___is_member_object_pointer,
+ tok::kw___is_member_pointer,
+ tok::kw___is_nothrow_assignable,
+ tok::kw___is_nothrow_constructible,
+ tok::kw___is_nothrow_destructible,
+ tok::kw___is_nullptr,
+ tok::kw___is_object,
+ tok::kw___is_pod,
+ tok::kw___is_pointer,
+ tok::kw___is_polymorphic,
+ tok::kw___is_reference,
+ tok::kw___is_referenceable,
+ tok::kw___is_rvalue_expr,
+ tok::kw___is_rvalue_reference,
+ tok::kw___is_same,
+ tok::kw___is_scalar,
+ tok::kw___is_scoped_enum,
+ tok::kw___is_sealed,
+ tok::kw___is_signed,
+ tok::kw___is_standard_layout,
+ tok::kw___is_trivial,
+ tok::kw___is_trivially_equality_comparable,
+ tok::kw___is_trivially_assignable,
+ tok::kw___is_trivially_constructible,
+ tok::kw___is_trivially_copyable,
+ tok::kw___is_unbounded_array,
+ tok::kw___is_union,
+ tok::kw___is_unsigned,
+ tok::kw___is_void,
+ tok::kw___is_volatile,
+ tok::kw___reference_binds_to_temporary,
+ tok::kw___reference_constructs_from_temporary))
// GNU libstdc++ 4.2 and libc++ use certain intrinsic names as the
// name of struct templates, but some are keywords in GCC >= 4.3
// and Clang. Therefore, when we see the token sequence "struct
@@ -1605,9 +1702,12 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
ColonProtectionRAIIObject X(*this);
CXXScopeSpec Spec;
+ if (TemplateInfo.TemplateParams)
+ Spec.setTemplateParamLists(*TemplateInfo.TemplateParams);
+
bool HasValidSpec = true;
if (ParseOptionalCXXScopeSpecifier(Spec, /*ObjectType=*/nullptr,
- /*ObjectHadErrors=*/false,
+ /*ObjectHasErrors=*/false,
EnteringContext)) {
DS.SetTypeSpecError();
HasValidSpec = false;
@@ -1741,18 +1841,20 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
const PrintingPolicy &Policy = Actions.getASTContext().getPrintingPolicy();
Sema::TagUseKind TUK;
- if (isDefiningTypeSpecifierContext(DSC) == AllowDefiningTypeSpec::No ||
+ if (isDefiningTypeSpecifierContext(DSC, getLangOpts().CPlusPlus) ==
+ AllowDefiningTypeSpec::No ||
(getLangOpts().OpenMP && OpenMPDirectiveParsing))
TUK = Sema::TUK_Reference;
else if (Tok.is(tok::l_brace) ||
- (getLangOpts().CPlusPlus && Tok.is(tok::colon)) ||
+ (DSC != DeclSpecContext::DSC_association &&
+ getLangOpts().CPlusPlus && Tok.is(tok::colon)) ||
(isClassCompatibleKeyword() &&
(NextToken().is(tok::l_brace) || NextToken().is(tok::colon)))) {
if (DS.isFriendSpecified()) {
// C++ [class.friend]p2:
// A class shall not be defined in a friend declaration.
Diag(Tok.getLocation(), diag::err_friend_decl_defines_type)
- << SourceRange(DS.getFriendSpecLoc());
+ << SourceRange(DS.getFriendSpecLoc());
// Skip everything up to the semicolon, so that this looks like a proper
// friend class (or template thereof) declaration.
@@ -1765,6 +1867,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
} else if (isClassCompatibleKeyword() &&
(NextToken().is(tok::l_square) ||
NextToken().is(tok::kw_alignas) ||
+ NextToken().isRegularKeywordAttribute() ||
isCXX11VirtSpecifier(NextToken()) != VirtSpecifiers::VS_None)) {
// We can't tell if this is a definition or reference
// until we skipped the 'final' and C++11 attribute specifiers.
@@ -1786,6 +1889,14 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
ConsumeParen();
if (!SkipUntil(tok::r_paren, StopAtSemi))
break;
+ } else if (Tok.isRegularKeywordAttribute()) {
+ bool TakesArgs = doesKeywordAttributeTakeArgs(Tok.getKind());
+ ConsumeToken();
+ if (TakesArgs) {
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ if (!T.consumeOpen())
+ T.skipToEnd();
+ }
} else {
break;
}
@@ -1806,7 +1917,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// A semicolon was missing after this declaration. Diagnose and recover.
ExpectAndConsume(tok::semi, diag::err_expected_after,
DeclSpec::getSpecifierName(TagType, PPol));
- PP.EnterToken(Tok, /*IsReinject*/true);
+ PP.EnterToken(Tok, /*IsReinject*/ true);
Tok.setKind(tok::semi);
}
} else
@@ -1822,11 +1933,15 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// them to the right place.
SourceRange AttrRange = Attributes.Range;
if (AttrRange.isValid()) {
- Diag(AttrRange.getBegin(), diag::err_attributes_not_allowed)
- << AttrRange
- << FixItHint::CreateInsertionFromRange(AttrFixitLoc,
- CharSourceRange(AttrRange, true))
- << FixItHint::CreateRemoval(AttrRange);
+ auto *FirstAttr = Attributes.empty() ? nullptr : &Attributes.front();
+ auto Loc = AttrRange.getBegin();
+ (FirstAttr && FirstAttr->isRegularKeywordAttribute()
+ ? Diag(Loc, diag::err_keyword_not_allowed) << FirstAttr
+ : Diag(Loc, diag::err_attributes_not_allowed))
+ << AttrRange
+ << FixItHint::CreateInsertionFromRange(
+ AttrFixitLoc, CharSourceRange(AttrRange, true))
+ << FixItHint::CreateRemoval(AttrRange);
// Recover by adding misplaced attributes to the attribute list
// of the class so they can be applied on the class later.
@@ -1834,20 +1949,13 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
}
}
- // If this is an elaborated type specifier, and we delayed
- // diagnostics before, just merge them into the current pool.
- if (shouldDelayDiagsInTag) {
- diagsFromTag.done();
- if (TUK == Sema::TUK_Reference)
- diagsFromTag.redelay();
- }
-
- if (!Name && !TemplateId && (DS.getTypeSpecType() == DeclSpec::TST_error ||
- TUK != Sema::TUK_Definition)) {
+ if (!Name && !TemplateId &&
+ (DS.getTypeSpecType() == DeclSpec::TST_error ||
+ TUK != Sema::TUK_Definition)) {
if (DS.getTypeSpecType() != DeclSpec::TST_error) {
// We have a declaration or reference to an anonymous class.
Diag(StartLoc, diag::err_anon_type_definition)
- << DeclSpec::getSpecifierName(TagType, Policy);
+ << DeclSpec::getSpecifierName(TagType, Policy);
}
// If we are parsing a definition and stop at a base-clause, continue on
@@ -1862,7 +1970,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// Create the tag portion of the class or class template.
DeclResult TagOrTempResult = true; // invalid
- TypeResult TypeResult = true; // invalid
+ TypeResult TypeResult = true; // invalid
bool Owned = false;
Sema::SkipBodyInfo SkipBody;
@@ -1874,9 +1982,10 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
if (TemplateId->isInvalid()) {
// Can't build the declaration.
} else if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation &&
- TUK == Sema::TUK_Declaration) {
+ TUK == Sema::TUK_Declaration) {
// This is an explicit instantiation of a class template.
ProhibitCXX11Attributes(attrs, diag::err_attributes_not_allowed,
+ diag::err_keyword_not_allowed,
/*DiagnoseEmptyAttrs=*/true);
TagOrTempResult = Actions.ActOnExplicitInstantiation(
@@ -1893,15 +2002,12 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
(TUK == Sema::TUK_Friend &&
TemplateInfo.Kind == ParsedTemplateInfo::NonTemplate)) {
ProhibitCXX11Attributes(attrs, diag::err_attributes_not_allowed,
+ diag::err_keyword_not_allowed,
/*DiagnoseEmptyAttrs=*/true);
- TypeResult = Actions.ActOnTagTemplateIdType(TUK, TagType, StartLoc,
- SS,
- TemplateId->TemplateKWLoc,
- TemplateId->Template,
- TemplateId->TemplateNameLoc,
- TemplateId->LAngleLoc,
- TemplateArgsPtr,
- TemplateId->RAngleLoc);
+ TypeResult = Actions.ActOnTagTemplateIdType(
+ TUK, TagType, StartLoc, SS, TemplateId->TemplateKWLoc,
+ TemplateId->Template, TemplateId->TemplateNameLoc,
+ TemplateId->LAngleLoc, TemplateArgsPtr, TemplateId->RAngleLoc);
} else {
// This is an explicit specialization or a class template
// partial specialization.
@@ -1936,8 +2042,8 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// "template<>", so that we treat this construct as a class
// template specialization.
FakedParamLists.push_back(Actions.ActOnTemplateParameterList(
- 0, SourceLocation(), TemplateInfo.TemplateLoc, LAngleLoc, None,
- LAngleLoc, nullptr));
+ 0, SourceLocation(), TemplateInfo.TemplateLoc, LAngleLoc,
+ std::nullopt, LAngleLoc, nullptr));
TemplateParams = &FakedParamLists;
}
}
@@ -1966,6 +2072,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
} else if (TUK == Sema::TUK_Friend &&
TemplateInfo.Kind != ParsedTemplateInfo::NonTemplate) {
ProhibitCXX11Attributes(attrs, diag::err_attributes_not_allowed,
+ diag::err_keyword_not_allowed,
/*DiagnoseEmptyAttrs=*/true);
TagOrTempResult = Actions.ActOnTemplatedFriendTag(
@@ -1976,6 +2083,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
} else {
if (TUK != Sema::TUK_Declaration && TUK != Sema::TUK_Definition)
ProhibitCXX11Attributes(attrs, diag::err_attributes_not_allowed,
+ diag::err_keyword_not_allowed,
/* DiagnoseEmptyAttrs=*/true);
if (TUK == Sema::TUK_Definition &&
@@ -1983,7 +2091,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// If the declarator-id is not a template-id, issue a diagnostic and
// recover by ignoring the 'template' keyword.
Diag(Tok, diag::err_template_defn_explicit_instantiation)
- << 1 << FixItHint::CreateRemoval(TemplateInfo.TemplateLoc);
+ << 1 << FixItHint::CreateRemoval(TemplateInfo.TemplateLoc);
TemplateParams = nullptr;
}
@@ -1995,7 +2103,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
MultiTemplateParamsArg TParams;
if (TUK != Sema::TUK_Reference && TemplateParams)
TParams =
- MultiTemplateParamsArg(&(*TemplateParams)[0], TemplateParams->size());
+ MultiTemplateParamsArg(&(*TemplateParams)[0], TemplateParams->size());
stripTypeAttributesOffDeclSpec(attrs, DS, TUK);
@@ -2007,17 +2115,27 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
DSC == DeclSpecContext::DSC_type_specifier,
DSC == DeclSpecContext::DSC_template_param ||
DSC == DeclSpecContext::DSC_template_type_arg,
- &SkipBody);
+ OffsetOfState, &SkipBody);
// If ActOnTag said the type was dependent, try again with the
// less common call.
if (IsDependent) {
assert(TUK == Sema::TUK_Reference || TUK == Sema::TUK_Friend);
- TypeResult = Actions.ActOnDependentTag(getCurScope(), TagType, TUK,
- SS, Name, StartLoc, NameLoc);
+ TypeResult = Actions.ActOnDependentTag(getCurScope(), TagType, TUK, SS,
+ Name, StartLoc, NameLoc);
}
}
+ // If this is an elaborated type specifier in function template,
+ // and we delayed diagnostics before,
+ // just merge them into the current pool.
+ if (shouldDelayDiagsInTag) {
+ diagsFromTag.done();
+ if (TUK == Sema::TUK_Reference &&
+ TemplateInfo.Kind == ParsedTemplateInfo::Template)
+ diagsFromTag.redelay();
+ }
+
// If there is a body, parse it and inform the actions module.
if (TUK == Sema::TUK_Definition) {
assert(Tok.is(tok::l_brace) ||
@@ -2035,8 +2153,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// Parse the definition body.
ParseStructUnionBody(StartLoc, TagType, cast<RecordDecl>(D));
if (SkipBody.CheckSameAsPrevious &&
- !Actions.ActOnDuplicateDefinition(DS, TagOrTempResult.get(),
- SkipBody)) {
+ !Actions.ActOnDuplicateDefinition(TagOrTempResult.get(), SkipBody)) {
DS.SetTypeSpecError();
return;
}
@@ -2055,10 +2172,9 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
NameLoc.isValid() ? NameLoc : StartLoc,
PrevSpec, DiagID, TypeResult.get(), Policy);
} else if (!TagOrTempResult.isInvalid()) {
- Result = DS.SetTypeSpecType(TagType, StartLoc,
- NameLoc.isValid() ? NameLoc : StartLoc,
- PrevSpec, DiagID, TagOrTempResult.get(), Owned,
- Policy);
+ Result = DS.SetTypeSpecType(
+ TagType, StartLoc, NameLoc.isValid() ? NameLoc : StartLoc, PrevSpec,
+ DiagID, TagOrTempResult.get(), Owned, Policy);
} else {
DS.SetTypeSpecError();
return;
@@ -2146,7 +2262,7 @@ BaseResult Parser::ParseBaseSpecifier(Decl *ClassDecl) {
bool IsVirtual = false;
SourceLocation StartLoc = Tok.getLocation();
- ParsedAttributesWithRange Attributes(AttrFactory);
+ ParsedAttributes Attributes(AttrFactory);
MaybeParseCXX11Attributes(Attributes);
// Parse the 'virtual' keyword.
@@ -2157,19 +2273,22 @@ BaseResult Parser::ParseBaseSpecifier(Decl *ClassDecl) {
// Parse an (optional) access specifier.
AccessSpecifier Access = getAccessSpecifierIfPresent();
- if (Access != AS_none)
+ if (Access != AS_none) {
ConsumeToken();
+ if (getLangOpts().HLSL)
+ Diag(Tok.getLocation(), diag::ext_hlsl_access_specifiers);
+ }
CheckMisplacedCXX11Attribute(Attributes, StartLoc);
// Parse the 'virtual' keyword (again!), in case it came after the
// access specifier.
- if (Tok.is(tok::kw_virtual)) {
+ if (Tok.is(tok::kw_virtual)) {
SourceLocation VirtualLoc = ConsumeToken();
if (IsVirtual) {
// Complain about duplicate 'virtual'
Diag(VirtualLoc, diag::err_dup_virtual)
- << FixItHint::CreateRemoval(VirtualLoc);
+ << FixItHint::CreateRemoval(VirtualLoc);
}
IsVirtual = true;
@@ -2218,10 +2337,14 @@ BaseResult Parser::ParseBaseSpecifier(Decl *ClassDecl) {
/// 'public'
AccessSpecifier Parser::getAccessSpecifierIfPresent() const {
switch (Tok.getKind()) {
- default: return AS_none;
- case tok::kw_private: return AS_private;
- case tok::kw_protected: return AS_protected;
- case tok::kw_public: return AS_public;
+ default:
+ return AS_none;
+ case tok::kw_private:
+ return AS_private;
+ case tok::kw_protected:
+ return AS_protected;
+ case tok::kw_public:
+ return AS_public;
}
}
@@ -2229,10 +2352,9 @@ AccessSpecifier Parser::getAccessSpecifierIfPresent() const {
/// delayed, e.g., default arguments or an exception-specification, create a
/// late-parsed method declaration record to handle the parsing at the end of
/// the class definition.
-void Parser::HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo,
+void Parser::HandleMemberFunctionDeclDelays(Declarator &DeclaratorInfo,
Decl *ThisDecl) {
- DeclaratorChunk::FunctionTypeInfo &FTI
- = DeclaratorInfo.getFunctionTypeInfo();
+ DeclaratorChunk::FunctionTypeInfo &FTI = DeclaratorInfo.getFunctionTypeInfo();
// If there was a late-parsed exception-specification, we'll need a
// late parse
bool NeedLateParse = FTI.getExceptionSpecType() == EST_Unparsed;
@@ -2329,9 +2451,9 @@ void Parser::ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS,
if (FriendLoc.isValid()) {
Diag(Tok.getLocation(), diag::err_friend_decl_spec)
- << VirtSpecifiers::getSpecifierName(Specifier)
- << FixItHint::CreateRemoval(Tok.getLocation())
- << SourceRange(FriendLoc, FriendLoc);
+ << VirtSpecifiers::getSpecifierName(Specifier)
+ << FixItHint::CreateRemoval(Tok.getLocation())
+ << SourceRange(FriendLoc, FriendLoc);
ConsumeToken();
continue;
}
@@ -2341,13 +2463,12 @@ void Parser::ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS,
const char *PrevSpec = nullptr;
if (VS.SetSpecifier(Specifier, Tok.getLocation(), PrevSpec))
Diag(Tok.getLocation(), diag::err_duplicate_virt_specifier)
- << PrevSpec
- << FixItHint::CreateRemoval(Tok.getLocation());
+ << PrevSpec << FixItHint::CreateRemoval(Tok.getLocation());
if (IsInterface && (Specifier == VirtSpecifiers::VS_Final ||
Specifier == VirtSpecifiers::VS_Sealed)) {
Diag(Tok.getLocation(), diag::err_override_control_interface)
- << VirtSpecifiers::getSpecifierName(Specifier);
+ << VirtSpecifiers::getSpecifierName(Specifier);
} else if (Specifier == VirtSpecifiers::VS_Sealed) {
Diag(Tok.getLocation(), diag::ext_ms_sealed_keyword);
} else if (Specifier == VirtSpecifiers::VS_Abstract) {
@@ -2417,7 +2538,8 @@ bool Parser::ParseCXXMemberDeclaratorBeforeInitializer(
VS, getCurrentClass().IsInterface,
DeclaratorInfo.getDeclSpec().getFriendSpecLoc());
if (!VS.isUnset())
- MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(DeclaratorInfo, VS);
+ MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(DeclaratorInfo,
+ VS);
}
// If a simple-asm-expr is present, parse it.
@@ -2451,7 +2573,8 @@ bool Parser::ParseCXXMemberDeclaratorBeforeInitializer(
if (AL.isKnownToGCC() && !AL.isCXX11Attribute())
Diag(AL.getLoc(), diag::warn_gcc_attribute_location);
- MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(DeclaratorInfo, VS);
+ MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(DeclaratorInfo,
+ VS);
}
}
@@ -2468,8 +2591,7 @@ bool Parser::ParseCXXMemberDeclaratorBeforeInitializer(
/// Look for declaration specifiers possibly occurring after C++11
/// virt-specifier-seq and diagnose them.
void Parser::MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(
- Declarator &D,
- VirtSpecifiers &VS) {
+ Declarator &D, VirtSpecifiers &VS) {
DeclSpec DS(AttrFactory);
// GNU-style and C++11 attributes are not allowed here, but they will be
@@ -2507,15 +2629,15 @@ void Parser::MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(
SourceLocation RefQualifierLoc;
if (ParseRefQualifier(RefQualifierIsLValueRef, RefQualifierLoc)) {
const char *Name = (RefQualifierIsLValueRef ? "& " : "&& ");
- FixItHint Insertion = FixItHint::CreateInsertion(VS.getFirstLocation(), Name);
+ FixItHint Insertion =
+ FixItHint::CreateInsertion(VS.getFirstLocation(), Name);
Function.RefQualifierIsLValueRef = RefQualifierIsLValueRef;
Function.RefQualifierLoc = RefQualifierLoc;
Diag(RefQualifierLoc, diag::err_declspec_after_virtspec)
- << (RefQualifierIsLValueRef ? "&" : "&&")
- << VirtSpecifiers::getSpecifierName(VS.getLastSpecifier())
- << FixItHint::CreateRemoval(RefQualifierLoc)
- << Insertion;
+ << (RefQualifierIsLValueRef ? "&" : "&&")
+ << VirtSpecifiers::getSpecifierName(VS.getLastSpecifier())
+ << FixItHint::CreateRemoval(RefQualifierLoc) << Insertion;
D.SetRangeEnd(RefQualifierLoc);
}
}
@@ -2563,6 +2685,8 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
ParsedAttributes &AccessAttrs,
const ParsedTemplateInfo &TemplateInfo,
ParsingDeclRAIIObject *TemplateDiags) {
+ assert(getLangOpts().CPlusPlus &&
+ "ParseCXXClassMemberDeclaration should only be called in C++ mode");
if (Tok.is(tok::at)) {
if (getLangOpts().ObjC && NextToken().isObjCAtKeyword(tok::objc_defs))
Diag(Tok, diag::err_at_defs_cxx);
@@ -2601,7 +2725,7 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
// Collect the scope specifier token we annotated earlier.
CXXScopeSpec SS;
ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
- /*ObjectHadErrors=*/false,
+ /*ObjectHasErrors=*/false,
/*EnteringContext=*/false);
if (SS.isInvalid()) {
@@ -2657,29 +2781,21 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
// Handle: member-declaration ::= '__extension__' member-declaration
if (Tok.is(tok::kw___extension__)) {
// __extension__ silences extension warnings in the subexpression.
- ExtensionRAIIObject O(Diags); // Use RAII to do this.
+ ExtensionRAIIObject O(Diags); // Use RAII to do this.
ConsumeToken();
- return ParseCXXClassMemberDeclaration(AS, AccessAttrs,
- TemplateInfo, TemplateDiags);
+ return ParseCXXClassMemberDeclaration(AS, AccessAttrs, TemplateInfo,
+ TemplateDiags);
}
- ParsedAttributesWithRange attrs(AttrFactory);
- ParsedAttributesViewWithRange FnAttrs;
+ ParsedAttributes DeclAttrs(AttrFactory);
// Optional C++11 attribute-specifier
- MaybeParseCXX11Attributes(attrs);
+ MaybeParseCXX11Attributes(DeclAttrs);
// The next token may be an OpenMP pragma annotation token. That would
// normally be handled from ParseCXXClassMemberDeclarationWithPragmas, but in
// this case, it came from an *attribute* rather than a pragma. Handle it now.
if (Tok.is(tok::annot_attr_openmp))
- return ParseOpenMPDeclarativeDirectiveWithExtDecl(AS, attrs);
-
- // We need to keep these attributes for future diagnostic
- // before they are taken over by declaration specifier.
- FnAttrs.addAll(attrs.begin(), attrs.end());
- FnAttrs.Range = attrs.Range;
-
- MaybeParseMicrosoftAttributes(attrs);
+ return ParseOpenMPDeclarativeDirectiveWithExtDecl(AS, DeclAttrs);
if (Tok.is(tok::kw_using)) {
// Eat 'using'.
@@ -2700,22 +2816,39 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
SourceLocation DeclEnd;
// Otherwise, it must be a using-declaration or an alias-declaration.
return ParseUsingDeclaration(DeclaratorContext::Member, TemplateInfo,
- UsingLoc, DeclEnd, attrs, AS);
+ UsingLoc, DeclEnd, DeclAttrs, AS);
}
+ ParsedAttributes DeclSpecAttrs(AttrFactory);
+ MaybeParseMicrosoftAttributes(DeclSpecAttrs);
+
// Hold late-parsed attributes so we can attach a Decl to them later.
LateParsedAttrList CommonLateParsedAttrs;
// decl-specifier-seq:
// Parse the common declaration-specifiers piece.
ParsingDeclSpec DS(*this, TemplateDiags);
- DS.takeAttributesFrom(attrs);
+ DS.takeAttributesFrom(DeclSpecAttrs);
+
if (MalformedTypeSpec)
DS.SetTypeSpecError();
+ // Turn off usual access checking for templates explicit specialization
+ // and instantiation.
+ // C++20 [temp.spec] 13.9/6.
+ // This disables the access checking rules for member function template
+ // explicit instantiation and explicit specialization.
+ bool IsTemplateSpecOrInst =
+ (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation ||
+ TemplateInfo.Kind == ParsedTemplateInfo::ExplicitSpecialization);
+ SuppressAccessChecks diagsFromTag(*this, IsTemplateSpecOrInst);
+
ParseDeclarationSpecifiers(DS, TemplateInfo, AS, DeclSpecContext::DSC_class,
&CommonLateParsedAttrs);
+ if (IsTemplateSpecOrInst)
+ diagsFromTag.done();
+
// Turn off colon protection that was set for declspec.
X.restore();
@@ -2728,26 +2861,31 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
return nullptr;
MultiTemplateParamsArg TemplateParams(
- TemplateInfo.TemplateParams? TemplateInfo.TemplateParams->data()
- : nullptr,
- TemplateInfo.TemplateParams? TemplateInfo.TemplateParams->size() : 0);
+ TemplateInfo.TemplateParams ? TemplateInfo.TemplateParams->data()
+ : nullptr,
+ TemplateInfo.TemplateParams ? TemplateInfo.TemplateParams->size() : 0);
if (TryConsumeToken(tok::semi)) {
if (DS.isFriendSpecified())
- ProhibitAttributes(FnAttrs);
+ ProhibitAttributes(DeclAttrs);
RecordDecl *AnonRecord = nullptr;
Decl *TheDecl = Actions.ParsedFreeStandingDeclSpec(
- getCurScope(), AS, DS, TemplateParams, false, AnonRecord);
+ getCurScope(), AS, DS, DeclAttrs, TemplateParams, false, AnonRecord);
+ Actions.ActOnDefinedDeclarationSpecifier(TheDecl);
DS.complete(TheDecl);
if (AnonRecord) {
- Decl* decls[] = {AnonRecord, TheDecl};
+ Decl *decls[] = {AnonRecord, TheDecl};
return Actions.BuildDeclaratorGroup(decls);
}
return Actions.ConvertDeclToDeclGroup(TheDecl);
}
- ParsingDeclarator DeclaratorInfo(*this, DS, DeclaratorContext::Member);
+ if (DS.hasTagDefinition())
+ Actions.ActOnDefinedDeclarationSpecifier(DS.getRepAsDecl());
+
+ ParsingDeclarator DeclaratorInfo(*this, DS, DeclAttrs,
+ DeclaratorContext::Member);
if (TemplateInfo.TemplateParams)
DeclaratorInfo.setTemplateParameterLists(TemplateParams);
VirtSpecifiers VS;
@@ -2758,7 +2896,7 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
SourceLocation EqualLoc;
SourceLocation PureSpecLoc;
- auto TryConsumePureSpecifier = [&] (bool AllowDefinition) {
+ auto TryConsumePureSpecifier = [&](bool AllowDefinition) {
if (Tok.isNot(tok::equal))
return false;
@@ -2784,6 +2922,11 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
ExprResult TrailingRequiresClause;
bool ExpectSemi = true;
+ // C++20 [temp.spec] 13.9/6.
+ // This disables the access checking rules for member function template
+ // explicit instantiation and explicit specialization.
+ SuppressAccessChecks SAC(*this, IsTemplateSpecOrInst);
+
// Parse the first declarator.
if (ParseCXXMemberDeclaratorBeforeInitializer(
DeclaratorInfo, VS, BitfieldSize, LateParsedAttrs)) {
@@ -2791,6 +2934,9 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
return nullptr;
}
+ if (IsTemplateSpecOrInst)
+ SAC.done();
+
// Check for a member function definition.
if (BitfieldSize.isUnset()) {
// MSVC permits pure specifier on inline functions defined at class scope.
@@ -2831,7 +2977,7 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
DS.isFriendSpecified()) {
// Diagnose attributes that appear before decl specifier:
// [[]] friend int foo();
- ProhibitAttributes(FnAttrs);
+ ProhibitAttributes(DeclAttrs);
}
if (DefinitionKind != FunctionDefinitionKind::Declaration) {
@@ -2854,9 +3000,8 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
DS.ClearStorageClassSpecs();
}
- Decl *FunDecl =
- ParseCXXInlineMethodDef(AS, AccessAttrs, DeclaratorInfo, TemplateInfo,
- VS, PureSpecLoc);
+ Decl *FunDecl = ParseCXXInlineMethodDef(AS, AccessAttrs, DeclaratorInfo,
+ TemplateInfo, VS, PureSpecLoc);
if (FunDecl) {
for (unsigned i = 0, ni = CommonLateParsedAttrs.size(); i < ni; ++i) {
@@ -2880,7 +3025,7 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
// member-declarator
// member-declarator-list ',' member-declarator
- while (1) {
+ while (true) {
InClassInitStyle HasInClassInit = ICIS_NoInit;
bool HasStaticInitializer = false;
if (Tok.isOneOf(tok::equal, tok::l_brace) && PureSpecLoc.isInvalid()) {
@@ -2921,20 +3066,21 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
//
// Diagnose attributes that appear in a friend member function declarator:
// friend int foo [[]] ();
- SmallVector<SourceRange, 4> Ranges;
- DeclaratorInfo.getCXX11AttributeRanges(Ranges);
- for (SmallVectorImpl<SourceRange>::iterator I = Ranges.begin(),
- E = Ranges.end(); I != E; ++I)
- Diag((*I).getBegin(), diag::err_attributes_not_allowed) << *I;
+ for (const ParsedAttr &AL : DeclaratorInfo.getAttributes())
+ if (AL.isCXX11Attribute() || AL.isRegularKeywordAttribute()) {
+ auto Loc = AL.getRange().getBegin();
+ (AL.isRegularKeywordAttribute()
+ ? Diag(Loc, diag::err_keyword_not_allowed) << AL
+ : Diag(Loc, diag::err_attributes_not_allowed))
+ << AL.getRange();
+ }
ThisDecl = Actions.ActOnFriendFunctionDecl(getCurScope(), DeclaratorInfo,
TemplateParams);
} else {
- ThisDecl = Actions.ActOnCXXMemberDeclarator(getCurScope(), AS,
- DeclaratorInfo,
- TemplateParams,
- BitfieldSize.get(),
- VS, HasInClassInit);
+ ThisDecl = Actions.ActOnCXXMemberDeclarator(
+ getCurScope(), AS, DeclaratorInfo, TemplateParams, BitfieldSize.get(),
+ VS, HasInClassInit);
if (VarTemplateDecl *VT =
ThisDecl ? dyn_cast<VarTemplateDecl>(ThisDecl) : nullptr)
@@ -2989,10 +3135,13 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
ExprResult Init = ParseCXXMemberInitializer(
ThisDecl, DeclaratorInfo.isDeclarationOfFunction(), EqualLoc);
- if (Init.isInvalid())
+ if (Init.isInvalid()) {
+ if (ThisDecl)
+ Actions.ActOnUninitializedDecl(ThisDecl);
SkipUntil(tok::comma, StopAtSemi | StopBeforeMatch);
- else if (ThisDecl)
- Actions.AddInitializerToDecl(ThisDecl, Init.get(), EqualLoc.isInvalid());
+ } else if (ThisDecl)
+ Actions.AddInitializerToDecl(ThisDecl, Init.get(),
+ EqualLoc.isInvalid());
} else if (ThisDecl && DS.getStorageClassSpec() == DeclSpec::SCS_static)
// No initializer.
Actions.ActOnUninitializedDecl(ThisDecl);
@@ -3030,7 +3179,7 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
// the start of a declarator. The comma was probably a typo for a
// semicolon.
Diag(CommaLoc, diag::err_expected_semi_declaration)
- << FixItHint::CreateReplacement(CommaLoc, ";");
+ << FixItHint::CreateReplacement(CommaLoc, ";");
ExpectSemi = false;
break;
}
@@ -3088,23 +3237,36 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
/// be a constant-expression.
ExprResult Parser::ParseCXXMemberInitializer(Decl *D, bool IsFunction,
SourceLocation &EqualLoc) {
- assert(Tok.isOneOf(tok::equal, tok::l_brace)
- && "Data member initializer not starting with '=' or '{'");
+ assert(Tok.isOneOf(tok::equal, tok::l_brace) &&
+ "Data member initializer not starting with '=' or '{'");
+
+ bool IsFieldInitialization = isa_and_present<FieldDecl>(D);
EnterExpressionEvaluationContext Context(
- Actions, Sema::ExpressionEvaluationContext::PotentiallyEvaluated, D);
+ Actions,
+ IsFieldInitialization
+ ? Sema::ExpressionEvaluationContext::PotentiallyEvaluatedIfUsed
+ : Sema::ExpressionEvaluationContext::PotentiallyEvaluated,
+ D);
+
+ // CWG2760
+ // Default member initializers used to initialize a base or member subobject
+ // [...] are considered to be part of the function body
+ Actions.ExprEvalContexts.back().InImmediateEscalatingFunctionContext =
+ IsFieldInitialization;
+
if (TryConsumeToken(tok::equal, EqualLoc)) {
if (Tok.is(tok::kw_delete)) {
// In principle, an initializer of '= delete p;' is legal, but it will
- // never type-check. It's better to diagnose it as an ill-formed expression
- // than as an ill-formed deleted non-function member.
- // An initializer of '= delete p, foo' will never be parsed, because
- // a top-level comma always ends the initializer expression.
+ // never type-check. It's better to diagnose it as an ill-formed
+ // expression than as an ill-formed deleted non-function member. An
+ // initializer of '= delete p, foo' will never be parsed, because a
+ // top-level comma always ends the initializer expression.
const Token &Next = NextToken();
if (IsFunction || Next.isOneOf(tok::semi, tok::comma, tok::eof)) {
if (IsFunction)
Diag(ConsumeToken(), diag::err_default_delete_in_multiple_declaration)
- << 1 /* delete */;
+ << 1 /* delete */;
else
Diag(ConsumeToken(), diag::err_deleted_non_function);
return ExprError();
@@ -3112,7 +3274,7 @@ ExprResult Parser::ParseCXXMemberInitializer(Decl *D, bool IsFunction,
} else if (Tok.is(tok::kw_default)) {
if (IsFunction)
Diag(Tok, diag::err_default_delete_in_multiple_declaration)
- << 0 /* default */;
+ << 0 /* default */;
else
Diag(ConsumeToken(), diag::err_default_special_members)
<< getLangOpts().CPlusPlus20;
@@ -3136,7 +3298,7 @@ void Parser::SkipCXXMemberSpecification(SourceLocation RecordLoc,
// Diagnose any C++11 attributes after 'final' keyword.
// We deliberately discard these attributes.
- ParsedAttributesWithRange Attrs(AttrFactory);
+ ParsedAttributes Attrs(AttrFactory);
CheckMisplacedCXX11Attribute(Attrs, AttrFixitLoc);
// This can only happen if we had malformed misplaced attributes;
@@ -3151,7 +3313,7 @@ void Parser::SkipCXXMemberSpecification(SourceLocation RecordLoc,
// within a template argument).
if (Tok.is(tok::colon)) {
// Enter the scope of the class so that we can correctly parse its bases.
- ParseScope ClassScope(this, Scope::ClassScope|Scope::DeclScope);
+ ParseScope ClassScope(this, Scope::ClassScope | Scope::DeclScope);
ParsingClassDefinition ParsingDef(*this, TagDecl, /*NonNestedClass*/ true,
TagType == DeclSpec::TST_interface);
auto OldContext =
@@ -3176,14 +3338,15 @@ void Parser::SkipCXXMemberSpecification(SourceLocation RecordLoc,
T.skipToEnd();
// Parse and discard any trailing attributes.
- ParsedAttributes Attrs(AttrFactory);
- if (Tok.is(tok::kw___attribute))
+ if (Tok.is(tok::kw___attribute)) {
+ ParsedAttributes Attrs(AttrFactory);
MaybeParseGNUAttributes(Attrs);
+ }
}
Parser::DeclGroupPtrTy Parser::ParseCXXClassMemberDeclarationWithPragmas(
- AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs,
- DeclSpec::TST TagType, Decl *TagDecl) {
+ AccessSpecifier &AS, ParsedAttributes &AccessAttrs, DeclSpec::TST TagType,
+ Decl *TagDecl) {
ParenBraceBracketBalancer BalancerRAIIObj(*this);
switch (Tok.getKind()) {
@@ -3230,9 +3393,11 @@ Parser::DeclGroupPtrTy Parser::ParseCXXClassMemberDeclarationWithPragmas(
// yet.
if (getLangOpts().OpenCL && !NextToken().is(tok::colon))
return ParseCXXClassMemberDeclaration(AS, AccessAttrs);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case tok::kw_public:
case tok::kw_protected: {
+ if (getLangOpts().HLSL)
+ Diag(Tok.getLocation(), diag::ext_hlsl_access_specifiers);
AccessSpecifier NewAS = getAccessSpecifierIfPresent();
assert(NewAS != AS_none);
// Current token is a C++ access specifier.
@@ -3272,12 +3437,14 @@ Parser::DeclGroupPtrTy Parser::ParseCXXClassMemberDeclarationWithPragmas(
case tok::annot_pragma_openmp:
return ParseOpenMPDeclarativeDirectiveWithExtDecl(
AS, AccessAttrs, /*Delayed=*/true, TagType, TagDecl);
+ case tok::annot_pragma_openacc:
+ return ParseOpenACCDirectiveDecl();
default:
if (tok::isPragmaAnnotation(Tok.getKind())) {
Diag(Tok.getLocation(), diag::err_pragma_misplaced_in_decl)
- << DeclSpec::getSpecifierName(TagType,
- Actions.getASTContext().getPrintingPolicy());
+ << DeclSpec::getSpecifierName(
+ TagType, Actions.getASTContext().getPrintingPolicy());
ConsumeAnnotationToken();
return nullptr;
}
@@ -3293,12 +3460,12 @@ Parser::DeclGroupPtrTy Parser::ParseCXXClassMemberDeclarationWithPragmas(
///
void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
SourceLocation AttrFixitLoc,
- ParsedAttributesWithRange &Attrs,
+ ParsedAttributes &Attrs,
unsigned TagType, Decl *TagDecl) {
assert((TagType == DeclSpec::TST_struct ||
- TagType == DeclSpec::TST_interface ||
- TagType == DeclSpec::TST_union ||
- TagType == DeclSpec::TST_class) && "Invalid TagType!");
+ TagType == DeclSpec::TST_interface ||
+ TagType == DeclSpec::TST_union || TagType == DeclSpec::TST_class) &&
+ "Invalid TagType!");
llvm::TimeTraceScope TimeScope("ParseClass", [&]() {
if (auto *TD = dyn_cast_or_null<NamedDecl>(TagDecl))
@@ -3321,15 +3488,15 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
// The Microsoft extension __interface does not permit nested classes.
if (getCurrentClass().IsInterface) {
Diag(RecordLoc, diag::err_invalid_member_in_interface)
- << /*ErrorType=*/6
- << (isa<NamedDecl>(TagDecl)
- ? cast<NamedDecl>(TagDecl)->getQualifiedNameAsString()
- : "(anonymous)");
+ << /*ErrorType=*/6
+ << (isa<NamedDecl>(TagDecl)
+ ? cast<NamedDecl>(TagDecl)->getQualifiedNameAsString()
+ : "(anonymous)");
}
break;
}
- if ((S->getFlags() & Scope::FnScope))
+ if (S->isFunctionScope())
// If we're in a function or function template then this is a local
// class rather than a nested class.
break;
@@ -3337,7 +3504,7 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
}
// Enter a scope for the class.
- ParseScope ClassScope(this, Scope::ClassScope|Scope::DeclScope);
+ ParseScope ClassScope(this, Scope::ClassScope | Scope::DeclScope);
// Note that we are parsing a new (potentially-nested) class definition.
ParsingClassDefinition ParsingDef(*this, TagDecl, NonNestedClass,
@@ -3450,7 +3617,7 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
if (SuggestFixIt) {
LBraceDiag << FixItHint::CreateInsertion(BraceLoc, " {");
// Try recovering from missing { after base-clause.
- PP.EnterToken(Tok, /*IsReinject*/true);
+ PP.EnterToken(Tok, /*IsReinject*/ true);
Tok.setKind(tok::l_brace);
} else {
if (TagDecl)
@@ -3472,12 +3639,13 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
// C++ 11p3: Members of a class defined with the keyword class are private
// by default. Members of a class defined with the keywords struct or union
// are public by default.
+ // HLSL: In HLSL members of a class are public by default.
AccessSpecifier CurAS;
- if (TagType == DeclSpec::TST_class)
+ if (TagType == DeclSpec::TST_class && !getLangOpts().HLSL)
CurAS = AS_private;
else
CurAS = AS_public;
- ParsedAttributesWithRange AccessAttrs(AttrFactory);
+ ParsedAttributes AccessAttrs(AttrFactory);
if (TagDecl) {
// While we still have something to read, read the member-declarations.
@@ -3543,10 +3711,8 @@ void Parser::DiagnoseUnexpectedNamespace(NamedDecl *D) {
// FIXME: Suggest where the close brace should have gone by looking
// at indentation changes within the definition body.
- Diag(D->getLocation(),
- diag::err_missing_end_of_definition) << D;
- Diag(Tok.getLocation(),
- diag::note_missing_end_of_definition_before) << D;
+ Diag(D->getLocation(), diag::err_missing_end_of_definition) << D;
+ Diag(Tok.getLocation(), diag::note_missing_end_of_definition_before) << D;
// Push '};' onto the token stream to recover.
PP.EnterToken(Tok, /*IsReinject*/ true);
@@ -3589,7 +3755,7 @@ void Parser::ParseConstructorInitializer(Decl *ConstructorDecl) {
PoisonSEHIdentifiersRAIIObject PoisonSEHIdentifiers(*this, true);
SourceLocation ColonLoc = ConsumeToken();
- SmallVector<CXXCtorInitializer*, 4> MemInitializers;
+ SmallVector<CXXCtorInitializer *, 4> MemInitializers;
bool AnyErrors = false;
do {
@@ -3616,12 +3782,12 @@ void Parser::ParseConstructorInitializer(Decl *ConstructorDecl) {
Tok.isOneOf(tok::identifier, tok::coloncolon)) {
SourceLocation Loc = PP.getLocForEndOfToken(PrevTokLocation);
Diag(Loc, diag::err_ctor_init_missing_comma)
- << FixItHint::CreateInsertion(Loc, ", ");
+ << FixItHint::CreateInsertion(Loc, ", ");
} else {
// Skip over garbage, until we get to '{'. Don't eat the '{'.
if (!MemInit.isInvalid())
- Diag(Tok.getLocation(), diag::err_expected_either) << tok::l_brace
- << tok::comma;
+ Diag(Tok.getLocation(), diag::err_expected_either)
+ << tok::l_brace << tok::comma;
SkipUntil(tok::l_brace, StopAtSemi | StopBeforeMatch);
break;
}
@@ -3647,7 +3813,7 @@ MemInitResult Parser::ParseMemInitializer(Decl *ConstructorDecl) {
// parse '::'[opt] nested-name-specifier[opt]
CXXScopeSpec SS;
if (ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
- /*ObjectHadErrors=*/false,
+ /*ObjectHasErrors=*/false,
/*EnteringContext=*/false))
return true;
@@ -3675,7 +3841,8 @@ MemInitResult Parser::ParseMemInitializer(Decl *ConstructorDecl) {
? takeTemplateIdAnnotation(Tok)
: nullptr;
if (TemplateId && TemplateId->mightBeType()) {
- AnnotateTemplateIdTokenAsType(SS, /*IsClassName*/true);
+ AnnotateTemplateIdTokenAsType(SS, ImplicitTypenameContext::No,
+ /*IsClassName=*/true);
assert(Tok.is(tok::annot_typename) && "template-id -> type failed");
TemplateTypeTy = getTypeAnnotation(Tok);
ConsumeAnnotationToken();
@@ -3702,24 +3869,22 @@ MemInitResult Parser::ParseMemInitializer(Decl *ConstructorDecl) {
return Actions.ActOnMemInitializer(ConstructorDecl, getCurScope(), SS, II,
TemplateTypeTy.get(), DS, IdLoc,
InitList.get(), EllipsisLoc);
- } else if(Tok.is(tok::l_paren)) {
+ } else if (Tok.is(tok::l_paren)) {
BalancedDelimiterTracker T(*this, tok::l_paren);
T.consumeOpen();
// Parse the optional expression-list.
ExprVector ArgExprs;
- CommaLocsTy CommaLocs;
auto RunSignatureHelp = [&] {
if (TemplateTypeTy.isInvalid())
return QualType();
QualType PreferredType = Actions.ProduceCtorInitMemberSignatureHelp(
- getCurScope(), ConstructorDecl, SS, TemplateTypeTy.get(), ArgExprs, II,
- T.getOpenLocation());
+ ConstructorDecl, SS, TemplateTypeTy.get(), ArgExprs, II,
+ T.getOpenLocation(), /*Braced=*/false);
CalledSignatureHelp = true;
return PreferredType;
};
- if (Tok.isNot(tok::r_paren) &&
- ParseExpressionList(ArgExprs, CommaLocs, [&] {
+ if (Tok.isNot(tok::r_paren) && ParseExpressionList(ArgExprs, [&] {
PreferredType.enterFunctionArgument(Tok.getLocation(),
RunSignatureHelp);
})) {
@@ -3736,10 +3901,9 @@ MemInitResult Parser::ParseMemInitializer(Decl *ConstructorDecl) {
if (TemplateTypeTy.isInvalid())
return true;
- return Actions.ActOnMemInitializer(ConstructorDecl, getCurScope(), SS, II,
- TemplateTypeTy.get(), DS, IdLoc,
- T.getOpenLocation(), ArgExprs,
- T.getCloseLocation(), EllipsisLoc);
+ return Actions.ActOnMemInitializer(
+ ConstructorDecl, getCurScope(), SS, II, TemplateTypeTy.get(), DS, IdLoc,
+ T.getOpenLocation(), ArgExprs, T.getCloseLocation(), EllipsisLoc);
}
if (TemplateTypeTy.isInvalid())
@@ -3760,13 +3924,11 @@ MemInitResult Parser::ParseMemInitializer(Decl *ConstructorDecl) {
/// noexcept-specification:
/// 'noexcept'
/// 'noexcept' '(' constant-expression ')'
-ExceptionSpecificationType
-Parser::tryParseExceptionSpecification(bool Delayed,
- SourceRange &SpecificationRange,
- SmallVectorImpl<ParsedType> &DynamicExceptions,
- SmallVectorImpl<SourceRange> &DynamicExceptionRanges,
- ExprResult &NoexceptExpr,
- CachedTokens *&ExceptionSpecTokens) {
+ExceptionSpecificationType Parser::tryParseExceptionSpecification(
+ bool Delayed, SourceRange &SpecificationRange,
+ SmallVectorImpl<ParsedType> &DynamicExceptions,
+ SmallVectorImpl<SourceRange> &DynamicExceptionRanges,
+ ExprResult &NoexceptExpr, CachedTokens *&ExceptionSpecTokens) {
ExceptionSpecificationType Result = EST_None;
ExceptionSpecTokens = nullptr;
@@ -3795,8 +3957,8 @@ Parser::tryParseExceptionSpecification(bool Delayed,
// Cache the tokens for the exception-specification.
ExceptionSpecTokens = new CachedTokens;
- ExceptionSpecTokens->push_back(StartTok); // 'throw' or 'noexcept'
- ExceptionSpecTokens->push_back(Tok); // '('
+ ExceptionSpecTokens->push_back(StartTok); // 'throw' or 'noexcept'
+ ExceptionSpecTokens->push_back(Tok); // '('
SpecificationRange.setEnd(ConsumeParen()); // '('
ConsumeAndStoreUntil(tok::r_paren, *ExceptionSpecTokens,
@@ -3809,9 +3971,8 @@ Parser::tryParseExceptionSpecification(bool Delayed,
// See if there's a dynamic specification.
if (Tok.is(tok::kw_throw)) {
- Result = ParseDynamicExceptionSpecification(SpecificationRange,
- DynamicExceptions,
- DynamicExceptionRanges);
+ Result = ParseDynamicExceptionSpecification(
+ SpecificationRange, DynamicExceptions, DynamicExceptionRanges);
assert(DynamicExceptions.size() == DynamicExceptionRanges.size() &&
"Produced different number of exception types and ranges.");
}
@@ -3832,11 +3993,15 @@ Parser::tryParseExceptionSpecification(bool Delayed,
// There is an argument.
BalancedDelimiterTracker T(*this, tok::l_paren);
T.consumeOpen();
- NoexceptExpr = ParseConstantExpression();
+
+ EnterExpressionEvaluationContext ConstantEvaluated(
+ Actions, Sema::ExpressionEvaluationContext::ConstantEvaluated);
+ NoexceptExpr = ParseConstantExpressionInExprEvalContext();
+
T.consumeClose();
if (!NoexceptExpr.isInvalid()) {
- NoexceptExpr = Actions.ActOnNoexceptSpec(KeywordLoc, NoexceptExpr.get(),
- NoexceptType);
+ NoexceptExpr =
+ Actions.ActOnNoexceptSpec(NoexceptExpr.get(), NoexceptType);
NoexceptRange = SourceRange(KeywordLoc, T.getCloseLocation());
} else {
NoexceptType = EST_BasicNoexcept;
@@ -3865,17 +4030,16 @@ Parser::tryParseExceptionSpecification(bool Delayed,
return Result;
}
-static void diagnoseDynamicExceptionSpecification(
- Parser &P, SourceRange Range, bool IsNoexcept) {
+static void diagnoseDynamicExceptionSpecification(Parser &P, SourceRange Range,
+ bool IsNoexcept) {
if (P.getLangOpts().CPlusPlus11) {
const char *Replacement = IsNoexcept ? "noexcept" : "noexcept(false)";
- P.Diag(Range.getBegin(),
- P.getLangOpts().CPlusPlus17 && !IsNoexcept
- ? diag::ext_dynamic_exception_spec
- : diag::warn_exception_spec_deprecated)
+ P.Diag(Range.getBegin(), P.getLangOpts().CPlusPlus17 && !IsNoexcept
+ ? diag::ext_dynamic_exception_spec
+ : diag::warn_exception_spec_deprecated)
<< Range;
P.Diag(Range.getBegin(), diag::note_exception_spec_deprecated)
- << Replacement << FixItHint::CreateReplacement(Range, Replacement);
+ << Replacement << FixItHint::CreateReplacement(Range, Replacement);
}
}
@@ -3891,9 +4055,8 @@ static void diagnoseDynamicExceptionSpecification(
/// type-id-list ',' type-id ... [opt]
///
ExceptionSpecificationType Parser::ParseDynamicExceptionSpecification(
- SourceRange &SpecificationRange,
- SmallVectorImpl<ParsedType> &Exceptions,
- SmallVectorImpl<SourceRange> &Ranges) {
+ SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &Exceptions,
+ SmallVectorImpl<SourceRange> &Ranges) {
assert(Tok.is(tok::kw_throw) && "expected throw");
SpecificationRange.setBegin(ConsumeToken());
@@ -3967,14 +4130,13 @@ void Parser::ParseTrailingRequiresClause(Declarator &D) {
SourceLocation RequiresKWLoc = ConsumeToken();
ExprResult TrailingRequiresClause;
- ParseScope ParamScope(this,
- Scope::DeclScope |
- Scope::FunctionDeclarationScope |
- Scope::FunctionPrototypeScope);
+ ParseScope ParamScope(this, Scope::DeclScope |
+ Scope::FunctionDeclarationScope |
+ Scope::FunctionPrototypeScope);
Actions.ActOnStartTrailingRequiresClause(getCurScope(), D);
- llvm::Optional<Sema::CXXThisScopeRAII> ThisScope;
+ std::optional<Sema::CXXThisScopeRAII> ThisScope;
InitCXXThisScopeForDeclaratorIfRelevant(D, D.getDeclSpec(), ThisScope);
TrailingRequiresClause =
@@ -4020,9 +4182,9 @@ void Parser::ParseTrailingRequiresClause(Declarator &D) {
/// We have just started parsing the definition of a new class,
/// so push that class onto our stack of classes that is currently
/// being parsed.
-Sema::ParsingClassState
-Parser::PushParsingClass(Decl *ClassDecl, bool NonNestedClass,
- bool IsInterface) {
+Sema::ParsingClassState Parser::PushParsingClass(Decl *ClassDecl,
+ bool NonNestedClass,
+ bool IsInterface) {
assert((NonNestedClass || !ClassStack.empty()) &&
"Nested class without outer class");
ClassStack.push(new ParsingClass(ClassDecl, NonNestedClass, IsInterface));
@@ -4070,7 +4232,8 @@ void Parser::PopParsingClass(Sema::ParsingClassState state) {
// This nested class has some members that will need to be processed
// after the top-level class is completely defined. Therefore, add
// it to the list of nested classes within its parent.
- assert(getCurScope()->isClassScope() && "Nested class outside of class scope?");
+ assert(getCurScope()->isClassScope() &&
+ "Nested class outside of class scope?");
ClassStack.top()->LateParsedDeclarations.push_back(
new LateParsedClass(this, Victim));
}
@@ -4084,7 +4247,10 @@ void Parser::PopParsingClass(Sema::ParsingClassState state) {
/// If a keyword or an alternative token that satisfies the syntactic
/// requirements of an identifier is contained in an attribute-token,
/// it is considered an identifier.
-IdentifierInfo *Parser::TryParseCXX11AttributeIdentifier(SourceLocation &Loc) {
+IdentifierInfo *
+Parser::TryParseCXX11AttributeIdentifier(SourceLocation &Loc,
+ Sema::AttributeCompletion Completion,
+ const IdentifierInfo *Scope) {
switch (Tok.getKind()) {
default:
// Identifiers and keywords have identifier info attached.
@@ -4096,6 +4262,13 @@ IdentifierInfo *Parser::TryParseCXX11AttributeIdentifier(SourceLocation &Loc) {
}
return nullptr;
+ case tok::code_completion:
+ cutOffParsing();
+ Actions.CodeCompleteAttribute(getLangOpts().CPlusPlus ? ParsedAttr::AS_CXX11
+ : ParsedAttr::AS_C23,
+ Completion, Scope);
+ return nullptr;
+
case tok::numeric_constant: {
// If we got a numeric constant, check to see if it comes from a macro that
// corresponds to the predefined __clang__ macro. If it does, warn the user
@@ -4143,7 +4316,7 @@ IdentifierInfo *Parser::TryParseCXX11AttributeIdentifier(SourceLocation &Loc) {
}
}
-void Parser::ParseOpenMPAttributeArgs(IdentifierInfo *AttrName,
+void Parser::ParseOpenMPAttributeArgs(const IdentifierInfo *AttrName,
CachedTokens &OpenMPTokens) {
// Both 'sequence' and 'directive' attributes require arguments, so parse the
// open paren for the argument list.
@@ -4181,7 +4354,7 @@ void Parser::ParseOpenMPAttributeArgs(IdentifierInfo *AttrName,
// * An identifier (omp) for the attribute namespace followed by ::
// * An identifier (directive) or an identifier (sequence).
SourceLocation IdentLoc;
- IdentifierInfo *Ident = TryParseCXX11AttributeIdentifier(IdentLoc);
+ const IdentifierInfo *Ident = TryParseCXX11AttributeIdentifier(IdentLoc);
// If there is an identifier and it is 'omp', a double colon is required
// followed by the actual identifier we're after.
@@ -4242,23 +4415,29 @@ static bool IsBuiltInOrStandardCXX11Attribute(IdentifierInfo *AttrName,
/// '[' balanced-token-seq ']'
/// '{' balanced-token-seq '}'
/// any token but '(', ')', '[', ']', '{', or '}'
-bool Parser::ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
- SourceLocation AttrNameLoc,
- ParsedAttributes &Attrs,
- SourceLocation *EndLoc,
- IdentifierInfo *ScopeName,
- SourceLocation ScopeLoc,
- CachedTokens &OpenMPTokens) {
+bool Parser::ParseCXX11AttributeArgs(
+ IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
+ ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName,
+ SourceLocation ScopeLoc, CachedTokens &OpenMPTokens) {
assert(Tok.is(tok::l_paren) && "Not a C++11 attribute argument list");
SourceLocation LParenLoc = Tok.getLocation();
const LangOptions &LO = getLangOpts();
- ParsedAttr::Syntax Syntax =
- LO.CPlusPlus ? ParsedAttr::AS_CXX11 : ParsedAttr::AS_C2x;
+ ParsedAttr::Form Form =
+ LO.CPlusPlus ? ParsedAttr::Form::CXX11() : ParsedAttr::Form::C23();
+
+ // Try parsing microsoft attributes
+ if (getLangOpts().MicrosoftExt || getLangOpts().HLSL) {
+ if (hasAttribute(AttributeCommonInfo::Syntax::AS_Microsoft, ScopeName,
+ AttrName, getTargetInfo(), getLangOpts()))
+ Form = ParsedAttr::Form::Microsoft();
+ }
// If the attribute isn't known, we will not attempt to parse any
// arguments.
- if (!hasAttribute(LO.CPlusPlus ? AttrSyntax::CXX : AttrSyntax::C, ScopeName,
- AttrName, getTargetInfo(), getLangOpts())) {
+ if (Form.getSyntax() != ParsedAttr::AS_Microsoft &&
+ !hasAttribute(LO.CPlusPlus ? AttributeCommonInfo::Syntax::AS_CXX11
+ : AttributeCommonInfo::Syntax::AS_C23,
+ ScopeName, AttrName, getTargetInfo(), getLangOpts())) {
// Eat the left paren, then skip to the ending right paren.
ConsumeParen();
SkipUntil(tok::r_paren);
@@ -4269,14 +4448,14 @@ bool Parser::ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
// GNU-scoped attributes have some special cases to handle GNU-specific
// behaviors.
ParseGNUAttributeArgs(AttrName, AttrNameLoc, Attrs, EndLoc, ScopeName,
- ScopeLoc, Syntax, nullptr);
+ ScopeLoc, Form, nullptr);
return true;
}
if (ScopeName && ScopeName->isStr("omp")) {
Diag(AttrNameLoc, getLangOpts().OpenMP >= 51
? diag::warn_omp51_compat_attributes
- : diag::ext_omp_attributes);
+ : diag::ext_omp_attributes);
ParseOpenMPAttributeArgs(AttrName, OpenMPTokens);
@@ -4289,15 +4468,22 @@ bool Parser::ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
// Some Clang-scoped attributes have some special parsing behavior.
if (ScopeName && (ScopeName->isStr("clang") || ScopeName->isStr("_Clang")))
NumArgs = ParseClangAttributeArgs(AttrName, AttrNameLoc, Attrs, EndLoc,
- ScopeName, ScopeLoc, Syntax);
+ ScopeName, ScopeLoc, Form);
else
- NumArgs =
- ParseAttributeArgsCommon(AttrName, AttrNameLoc, Attrs, EndLoc,
- ScopeName, ScopeLoc, Syntax);
+ NumArgs = ParseAttributeArgsCommon(AttrName, AttrNameLoc, Attrs, EndLoc,
+ ScopeName, ScopeLoc, Form);
if (!Attrs.empty() &&
IsBuiltInOrStandardCXX11Attribute(AttrName, ScopeName)) {
ParsedAttr &Attr = Attrs.back();
+
+ // Ignore attributes that don't exist for the target.
+ if (!Attr.existsInTarget(getTargetInfo())) {
+ Diag(LParenLoc, diag::warn_unknown_attribute_ignored) << AttrName;
+ Attr.setInvalid(true);
+ return true;
+ }
+
// If the attribute is a standard or built-in attribute and we are
// parsing an argument list, we need to determine whether this attribute
// was allowed to have an argument list (such as [[deprecated]]), and how
@@ -4320,7 +4506,7 @@ bool Parser::ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
return true;
}
-/// ParseCXX11AttributeSpecifier - Parse a C++11 or C2x attribute-specifier.
+/// Parse a C++11 or C23 attribute-specifier.
///
/// [C++11] attribute-specifier:
/// '[' '[' attribute-list ']' ']'
@@ -4348,16 +4534,43 @@ void Parser::ParseCXX11AttributeSpecifierInternal(ParsedAttributes &Attrs,
CachedTokens &OpenMPTokens,
SourceLocation *EndLoc) {
if (Tok.is(tok::kw_alignas)) {
- Diag(Tok.getLocation(), diag::warn_cxx98_compat_alignas);
+ if (getLangOpts().C23)
+ Diag(Tok, diag::warn_c23_compat_keyword) << Tok.getName();
+ else
+ Diag(Tok.getLocation(), diag::warn_cxx98_compat_alignas);
ParseAlignmentSpecifier(Attrs, EndLoc);
return;
}
+ if (Tok.isRegularKeywordAttribute()) {
+ SourceLocation Loc = Tok.getLocation();
+ IdentifierInfo *AttrName = Tok.getIdentifierInfo();
+ ParsedAttr::Form Form = ParsedAttr::Form(Tok.getKind());
+ bool TakesArgs = doesKeywordAttributeTakeArgs(Tok.getKind());
+ ConsumeToken();
+ if (TakesArgs) {
+ if (!Tok.is(tok::l_paren))
+ Diag(Tok.getLocation(), diag::err_expected_lparen_after) << AttrName;
+ else
+ ParseAttributeArgsCommon(AttrName, Loc, Attrs, EndLoc,
+ /*ScopeName*/ nullptr,
+ /*ScopeLoc*/ Loc, Form);
+ } else
+ Attrs.addNew(AttrName, Loc, nullptr, Loc, nullptr, 0, Form);
+ return;
+ }
+
assert(Tok.is(tok::l_square) && NextToken().is(tok::l_square) &&
"Not a double square bracket attribute list");
SourceLocation OpenLoc = Tok.getLocation();
- Diag(OpenLoc, diag::warn_cxx98_compat_attribute);
+ if (getLangOpts().CPlusPlus) {
+ Diag(OpenLoc, getLangOpts().CPlusPlus11 ? diag::warn_cxx98_compat_attribute
+ : diag::warn_ext_cxx11_attributes);
+ } else {
+ Diag(OpenLoc, getLangOpts().C23 ? diag::warn_pre_c23_compat_attributes
+ : diag::warn_ext_c23_attributes);
+ }
ConsumeBracket();
checkCompoundToken(OpenLoc, tok::l_square, CompoundToken::AttrBegin);
@@ -4371,7 +4584,8 @@ void Parser::ParseCXX11AttributeSpecifierInternal(ParsedAttributes &Attrs,
: diag::ext_using_attribute_ns);
ConsumeToken();
- CommonScopeName = TryParseCXX11AttributeIdentifier(CommonScopeLoc);
+ CommonScopeName = TryParseCXX11AttributeIdentifier(
+ CommonScopeLoc, Sema::AttributeCompletion::Scope);
if (!CommonScopeName) {
Diag(Tok.getLocation(), diag::err_expected) << tok::identifier;
SkipUntil(tok::r_square, tok::colon, StopBeforeMatch);
@@ -4380,10 +4594,8 @@ void Parser::ParseCXX11AttributeSpecifierInternal(ParsedAttributes &Attrs,
Diag(Tok.getLocation(), diag::err_expected) << tok::colon;
}
- llvm::SmallDenseMap<IdentifierInfo*, SourceLocation, 4> SeenAttrs;
-
bool AttrParsed = false;
- while (!Tok.isOneOf(tok::r_square, tok::semi)) {
+ while (!Tok.isOneOf(tok::r_square, tok::semi, tok::eof)) {
if (AttrParsed) {
// If we parsed an attribute, a comma is required before parsing any
// additional attributes.
@@ -4401,7 +4613,8 @@ void Parser::ParseCXX11AttributeSpecifierInternal(ParsedAttributes &Attrs,
SourceLocation ScopeLoc, AttrLoc;
IdentifierInfo *ScopeName = nullptr, *AttrName = nullptr;
- AttrName = TryParseCXX11AttributeIdentifier(AttrLoc);
+ AttrName = TryParseCXX11AttributeIdentifier(
+ AttrLoc, Sema::AttributeCompletion::Attribute, CommonScopeName);
if (!AttrName)
// Break out to the "expected ']'" diagnostic.
break;
@@ -4411,7 +4624,8 @@ void Parser::ParseCXX11AttributeSpecifierInternal(ParsedAttributes &Attrs,
ScopeName = AttrName;
ScopeLoc = AttrLoc;
- AttrName = TryParseCXX11AttributeIdentifier(AttrLoc);
+ AttrName = TryParseCXX11AttributeIdentifier(
+ AttrLoc, Sema::AttributeCompletion::Attribute, ScopeName);
if (!AttrName) {
Diag(Tok.getLocation(), diag::err_expected) << tok::identifier;
SkipUntil(tok::r_square, tok::comma, StopAtSemi | StopBeforeMatch);
@@ -4439,13 +4653,13 @@ void Parser::ParseCXX11AttributeSpecifierInternal(ParsedAttributes &Attrs,
AttrName,
SourceRange(ScopeLoc.isValid() ? ScopeLoc : AttrLoc, AttrLoc),
ScopeName, ScopeLoc, nullptr, 0,
- getLangOpts().CPlusPlus ? ParsedAttr::AS_CXX11 : ParsedAttr::AS_C2x);
+ getLangOpts().CPlusPlus ? ParsedAttr::Form::CXX11()
+ : ParsedAttr::Form::C23());
AttrParsed = true;
}
if (TryConsumeToken(tok::ellipsis))
- Diag(Tok, diag::err_cxx11_attribute_forbids_ellipsis)
- << AttrName;
+ Diag(Tok, diag::err_cxx11_attribute_forbids_ellipsis) << AttrName;
}
// If we hit an error and recovered by parsing up to a semicolon, eat the
@@ -4466,34 +4680,33 @@ void Parser::ParseCXX11AttributeSpecifierInternal(ParsedAttributes &Attrs,
SkipUntil(tok::r_square);
}
-/// ParseCXX11Attributes - Parse a C++11 or C2x attribute-specifier-seq.
+/// ParseCXX11Attributes - Parse a C++11 or C23 attribute-specifier-seq.
///
/// attribute-specifier-seq:
/// attribute-specifier-seq[opt] attribute-specifier
-void Parser::ParseCXX11Attributes(ParsedAttributesWithRange &attrs,
- SourceLocation *endLoc) {
- assert(standardAttributesAllowed());
-
- SourceLocation StartLoc = Tok.getLocation(), Loc;
- if (!endLoc)
- endLoc = &Loc;
+void Parser::ParseCXX11Attributes(ParsedAttributes &Attrs) {
+ SourceLocation StartLoc = Tok.getLocation();
+ SourceLocation EndLoc = StartLoc;
do {
- ParseCXX11AttributeSpecifier(attrs, endLoc);
- } while (isCXX11AttributeSpecifier());
+ ParseCXX11AttributeSpecifier(Attrs, &EndLoc);
+ } while (isAllowedCXX11AttributeSpecifier());
- attrs.Range = SourceRange(StartLoc, *endLoc);
+ Attrs.Range = SourceRange(StartLoc, EndLoc);
}
void Parser::DiagnoseAndSkipCXX11Attributes() {
+ auto Keyword =
+ Tok.isRegularKeywordAttribute() ? Tok.getIdentifierInfo() : nullptr;
// Start and end location of an attribute or an attribute list.
SourceLocation StartLoc = Tok.getLocation();
SourceLocation EndLoc = SkipCXX11Attributes();
if (EndLoc.isValid()) {
SourceRange Range(StartLoc, EndLoc);
- Diag(StartLoc, diag::err_attributes_not_allowed)
- << Range;
+ (Keyword ? Diag(StartLoc, diag::err_keyword_not_allowed) << Keyword
+ : Diag(StartLoc, diag::err_attributes_not_allowed))
+ << Range;
}
}
@@ -4509,8 +4722,13 @@ SourceLocation Parser::SkipCXX11Attributes() {
T.consumeOpen();
T.skipToEnd();
EndLoc = T.getCloseLocation();
+ } else if (Tok.isRegularKeywordAttribute() &&
+ !doesKeywordAttributeTakeArgs(Tok.getKind())) {
+ EndLoc = Tok.getLocation();
+ ConsumeToken();
} else {
- assert(Tok.is(tok::kw_alignas) && "not an attribute specifier");
+ assert((Tok.is(tok::kw_alignas) || Tok.isRegularKeywordAttribute()) &&
+ "not an attribute specifier");
ConsumeToken();
BalancedDelimiterTracker T(*this, tok::l_paren);
if (!T.consumeOpen())
@@ -4539,9 +4757,9 @@ void Parser::ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs) {
}
ArgsVector ArgExprs;
- if (Tok.is(tok::string_literal)) {
+ if (isTokenStringLiteral()) {
// Easy case: uuid("...") -- quoted string.
- ExprResult StringResult = ParseStringLiteralExpression();
+ ExprResult StringResult = ParseUnevaluatedStringLiteralExpression();
if (StringResult.isInvalid())
return;
ArgExprs.push_back(StringResult.get());
@@ -4596,14 +4814,14 @@ void Parser::ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs) {
Toks[0].setLiteralData(StrBuffer.data());
Toks[0].setLength(StrBuffer.size());
StringLiteral *UuidString =
- cast<StringLiteral>(Actions.ActOnStringLiteral(Toks, nullptr).get());
+ cast<StringLiteral>(Actions.ActOnUnevaluatedStringLiteral(Toks).get());
ArgExprs.push_back(UuidString);
}
if (!T.consumeClose()) {
Attrs.addNew(UuidIdent, SourceRange(UuidLoc, T.getCloseLocation()), nullptr,
SourceLocation(), ArgExprs.data(), ArgExprs.size(),
- ParsedAttr::AS_Microsoft);
+ ParsedAttr::Form::Microsoft());
}
}
@@ -4615,10 +4833,11 @@ void Parser::ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs) {
/// [MS] ms-attribute-seq:
/// ms-attribute[opt]
/// ms-attribute ms-attribute-seq
-void Parser::ParseMicrosoftAttributes(ParsedAttributes &attrs,
- SourceLocation *endLoc) {
+void Parser::ParseMicrosoftAttributes(ParsedAttributes &Attrs) {
assert(Tok.is(tok::l_square) && "Not a Microsoft attribute list");
+ SourceLocation StartLoc = Tok.getLocation();
+ SourceLocation EndLoc = StartLoc;
do {
// FIXME: If this is actually a C++11 attribute, parse it as one.
BalancedDelimiterTracker T(*this, tok::l_square);
@@ -4626,19 +4845,49 @@ void Parser::ParseMicrosoftAttributes(ParsedAttributes &attrs,
// Skip most ms attributes except for a specific list.
while (true) {
- SkipUntil(tok::r_square, tok::identifier, StopAtSemi | StopBeforeMatch);
+ SkipUntil(tok::r_square, tok::identifier,
+ StopAtSemi | StopBeforeMatch | StopAtCodeCompletion);
+ if (Tok.is(tok::code_completion)) {
+ cutOffParsing();
+ Actions.CodeCompleteAttribute(AttributeCommonInfo::AS_Microsoft,
+ Sema::AttributeCompletion::Attribute,
+ /*Scope=*/nullptr);
+ break;
+ }
if (Tok.isNot(tok::identifier)) // ']', but also eof
break;
if (Tok.getIdentifierInfo()->getName() == "uuid")
- ParseMicrosoftUuidAttributeArgs(attrs);
- else
+ ParseMicrosoftUuidAttributeArgs(Attrs);
+ else {
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ SourceLocation NameLoc = Tok.getLocation();
ConsumeToken();
+ ParsedAttr::Kind AttrKind =
+ ParsedAttr::getParsedKind(II, nullptr, ParsedAttr::AS_Microsoft);
+ // For HLSL we want to handle all attributes, but for MSVC compat, we
+ // silently ignore unknown Microsoft attributes.
+ if (getLangOpts().HLSL || AttrKind != ParsedAttr::UnknownAttribute) {
+ bool AttrParsed = false;
+ if (Tok.is(tok::l_paren)) {
+ CachedTokens OpenMPTokens;
+ AttrParsed =
+ ParseCXX11AttributeArgs(II, NameLoc, Attrs, &EndLoc, nullptr,
+ SourceLocation(), OpenMPTokens);
+ ReplayOpenMPAttributeTokens(OpenMPTokens);
+ }
+ if (!AttrParsed) {
+ Attrs.addNew(II, NameLoc, nullptr, SourceLocation(), nullptr, 0,
+ ParsedAttr::Form::Microsoft());
+ }
+ }
+ }
}
T.consumeClose();
- if (endLoc)
- *endLoc = T.getCloseLocation();
+ EndLoc = T.getCloseLocation();
} while (Tok.is(tok::l_square));
+
+ Attrs.Range = SourceRange(StartLoc, EndLoc);
}
void Parser::ParseMicrosoftIfExistsClassDeclaration(
@@ -4661,9 +4910,9 @@ void Parser::ParseMicrosoftIfExistsClassDeclaration(
case IEB_Dependent:
Diag(Result.KeywordLoc, diag::warn_microsoft_dependent_exists)
- << Result.IsIfExists;
+ << Result.IsIfExists;
// Fall through to skip.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case IEB_Skip:
Braces.skipToEnd();
@@ -4673,8 +4922,7 @@ void Parser::ParseMicrosoftIfExistsClassDeclaration(
while (Tok.isNot(tok::r_brace) && !isEofOrEom()) {
// __if_exists, __if_not_exists can nest.
if (Tok.isOneOf(tok::kw___if_exists, tok::kw___if_not_exists)) {
- ParseMicrosoftIfExistsClassDeclaration(TagType,
- AccessAttrs, CurAS);
+ ParseMicrosoftIfExistsClassDeclaration(TagType, AccessAttrs, CurAS);
continue;
}
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseExpr.cpp b/contrib/llvm-project/clang/lib/Parse/ParseExpr.cpp
index 22f3b7624c45..e862856a08ca 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseExpr.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseExpr.cpp
@@ -20,16 +20,19 @@
///
//===----------------------------------------------------------------------===//
-#include "clang/Parse/Parser.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ExprCXX.h"
#include "clang/Basic/PrettyStackTrace.h"
+#include "clang/Lex/LiteralSupport.h"
+#include "clang/Parse/Parser.h"
#include "clang/Parse/RAIIObjectsForParser.h"
#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/TypoCorrection.h"
#include "llvm/ADT/SmallVector.h"
+#include <optional>
using namespace clang;
/// Simple precedence-based parser for binary/ternary operators.
@@ -208,14 +211,23 @@ Parser::ParseConstantExpressionInExprEvalContext(TypeCastState isTypeCast) {
return Actions.ActOnConstantExpression(Res);
}
-ExprResult Parser::ParseConstantExpression(TypeCastState isTypeCast) {
+ExprResult Parser::ParseConstantExpression() {
// C++03 [basic.def.odr]p2:
// An expression is potentially evaluated unless it appears where an
// integral constant expression is required (see 5.19) [...].
// C++98 and C++11 have no such rule, but this is only a defect in C++98.
EnterExpressionEvaluationContext ConstantEvaluated(
Actions, Sema::ExpressionEvaluationContext::ConstantEvaluated);
- return ParseConstantExpressionInExprEvalContext(isTypeCast);
+ return ParseConstantExpressionInExprEvalContext(NotTypeCast);
+}
+
+ExprResult Parser::ParseArrayBoundExpression() {
+ EnterExpressionEvaluationContext ConstantEvaluated(
+ Actions, Sema::ExpressionEvaluationContext::ConstantEvaluated);
+ // If we parse the bound of a VLA... we parse a non-constant
+ // constant-expression!
+ Actions.ExprEvalContexts.back().InConditionallyConstantEvaluateContext = true;
+ return ParseConstantExpressionInExprEvalContext(NotTypeCast);
}
ExprResult Parser::ParseCaseExpression(SourceLocation CaseLoc) {
@@ -400,7 +412,7 @@ Parser::ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec) {
SourceLocation ColonLoc;
auto SavedType = PreferredType;
- while (1) {
+ while (true) {
// Every iteration may rely on a preferred type for the whole expression.
PreferredType = SavedType;
// If this token has a lower precedence than we are allowed to parse (e.g.
@@ -788,9 +800,12 @@ class CastExpressionIdValidator final : public CorrectionCandidateCallback {
/// [GNU] '__builtin_choose_expr' '(' assign-expr ',' assign-expr ','
/// assign-expr ')'
/// [GNU] '__builtin_FILE' '(' ')'
+/// [CLANG] '__builtin_FILE_NAME' '(' ')'
/// [GNU] '__builtin_FUNCTION' '(' ')'
+/// [MS] '__builtin_FUNCSIG' '(' ')'
/// [GNU] '__builtin_LINE' '(' ')'
/// [CLANG] '__builtin_COLUMN' '(' ')'
+/// [GNU] '__builtin_source_location' '(' ')'
/// [GNU] '__builtin_types_compatible_p' '(' type-name ',' type-name ')'
/// [GNU] '__null'
/// [OBJC] '[' objc-message-expr ']'
@@ -942,9 +957,8 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
ParenParseOption ParenExprType;
switch (ParseKind) {
case CastParseKind::UnaryExprOnly:
- if (!getLangOpts().CPlusPlus)
- ParenExprType = CompoundLiteral;
- LLVM_FALLTHROUGH;
+ assert(getLangOpts().CPlusPlus && "not possible to get here in C");
+ [[fallthrough]];
case CastParseKind::AnyCastExpr:
ParenExprType = ParenParseOption::CastExpr;
break;
@@ -1003,7 +1017,12 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
break;
case tok::kw_nullptr:
- Diag(Tok, diag::warn_cxx98_compat_nullptr);
+ if (getLangOpts().CPlusPlus)
+ Diag(Tok, diag::warn_cxx98_compat_nullptr);
+ else
+ Diag(Tok, getLangOpts().C23 ? diag::warn_c23_compat_keyword
+ : diag::ext_c_nullptr) << Tok.getName();
+
Res = Actions.ActOnCXXNullPtrLiteral(ConsumeToken());
break;
@@ -1037,9 +1056,10 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
return ParseCastExpression(ParseKind, isAddressOfOperand, isTypeCast,
isVectorLiteral, NotPrimaryExpression);
- case tok::identifier: { // primary-expression: identifier
- // unqualified-id: identifier
- // constant: enumeration-constant
+ case tok::identifier:
+ ParseIdentifier: { // primary-expression: identifier
+ // unqualified-id: identifier
+ // constant: enumeration-constant
// Turn a potentially qualified name into a annot_typename or
// annot_cxxscope if it would be valid. This handles things like x::y, etc.
if (getLangOpts().CPlusPlus) {
@@ -1067,6 +1087,7 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
REVERTIBLE_TYPE_TRAIT(__is_array);
REVERTIBLE_TYPE_TRAIT(__is_assignable);
REVERTIBLE_TYPE_TRAIT(__is_base_of);
+ REVERTIBLE_TYPE_TRAIT(__is_bounded_array);
REVERTIBLE_TYPE_TRAIT(__is_class);
REVERTIBLE_TYPE_TRAIT(__is_complete_type);
REVERTIBLE_TYPE_TRAIT(__is_compound);
@@ -1092,15 +1113,18 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
REVERTIBLE_TYPE_TRAIT(__is_nothrow_assignable);
REVERTIBLE_TYPE_TRAIT(__is_nothrow_constructible);
REVERTIBLE_TYPE_TRAIT(__is_nothrow_destructible);
+ REVERTIBLE_TYPE_TRAIT(__is_nullptr);
REVERTIBLE_TYPE_TRAIT(__is_object);
REVERTIBLE_TYPE_TRAIT(__is_pod);
REVERTIBLE_TYPE_TRAIT(__is_pointer);
REVERTIBLE_TYPE_TRAIT(__is_polymorphic);
REVERTIBLE_TYPE_TRAIT(__is_reference);
+ REVERTIBLE_TYPE_TRAIT(__is_referenceable);
REVERTIBLE_TYPE_TRAIT(__is_rvalue_expr);
REVERTIBLE_TYPE_TRAIT(__is_rvalue_reference);
REVERTIBLE_TYPE_TRAIT(__is_same);
REVERTIBLE_TYPE_TRAIT(__is_scalar);
+ REVERTIBLE_TYPE_TRAIT(__is_scoped_enum);
REVERTIBLE_TYPE_TRAIT(__is_sealed);
REVERTIBLE_TYPE_TRAIT(__is_signed);
REVERTIBLE_TYPE_TRAIT(__is_standard_layout);
@@ -1108,10 +1132,16 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
REVERTIBLE_TYPE_TRAIT(__is_trivially_assignable);
REVERTIBLE_TYPE_TRAIT(__is_trivially_constructible);
REVERTIBLE_TYPE_TRAIT(__is_trivially_copyable);
+ REVERTIBLE_TYPE_TRAIT(__is_unbounded_array);
REVERTIBLE_TYPE_TRAIT(__is_union);
REVERTIBLE_TYPE_TRAIT(__is_unsigned);
REVERTIBLE_TYPE_TRAIT(__is_void);
REVERTIBLE_TYPE_TRAIT(__is_volatile);
+ REVERTIBLE_TYPE_TRAIT(__reference_binds_to_temporary);
+ REVERTIBLE_TYPE_TRAIT(__reference_constructs_from_temporary);
+#define TRANSFORM_TYPE_TRAIT_DEF(_, Trait) \
+ REVERTIBLE_TYPE_TRAIT(RTT_JOIN(__, Trait));
+#include "clang/Basic/TransformTypeTraits.def"
#undef REVERTIBLE_TYPE_TRAIT
#undef RTT_JOIN
}
@@ -1211,9 +1241,9 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
DS.SetTypeSpecType(TST_typename, ILoc, PrevSpec, DiagID, Typ,
Actions.getASTContext().getPrintingPolicy());
- Declarator DeclaratorInfo(DS, DeclaratorContext::TypeName);
- TypeResult Ty = Actions.ActOnTypeName(getCurScope(),
- DeclaratorInfo);
+ Declarator DeclaratorInfo(DS, ParsedAttributesView::none(),
+ DeclaratorContext::TypeName);
+ TypeResult Ty = Actions.ActOnTypeName(DeclaratorInfo);
if (Ty.isInvalid())
break;
@@ -1278,9 +1308,17 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
case tok::kw_L__FUNCTION__: // primary-expression: L__FUNCTION__ [MS]
case tok::kw_L__FUNCSIG__: // primary-expression: L__FUNCSIG__ [MS]
case tok::kw___PRETTY_FUNCTION__: // primary-expression: __P..Y_F..N__ [GNU]
- Res = Actions.ActOnPredefinedExpr(Tok.getLocation(), SavedKind);
- ConsumeToken();
- break;
+ // Function local predefined macros are represented by PredefinedExpr except
+ // when Microsoft extensions are enabled and one of these macros is adjacent
+ // to a string literal or another one of these macros.
+ if (!(getLangOpts().MicrosoftExt &&
+ tokenIsLikeStringLiteral(Tok, getLangOpts()) &&
+ tokenIsLikeStringLiteral(NextToken(), getLangOpts()))) {
+ Res = Actions.ActOnPredefinedExpr(Tok.getLocation(), SavedKind);
+ ConsumeToken();
+ break;
+ }
+ [[fallthrough]]; // treat MS function local macros as concatenable strings
case tok::string_literal: // primary-expression: string-literal
case tok::wide_string_literal:
case tok::utf8_string_literal:
@@ -1301,8 +1339,11 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
case tok::kw___builtin_convertvector:
case tok::kw___builtin_COLUMN:
case tok::kw___builtin_FILE:
+ case tok::kw___builtin_FILE_NAME:
case tok::kw___builtin_FUNCTION:
+ case tok::kw___builtin_FUNCSIG:
case tok::kw___builtin_LINE:
+ case tok::kw___builtin_source_location:
if (NotPrimaryExpression)
*NotPrimaryExpression = true;
// This parses the complete suffix; we can return early.
@@ -1354,7 +1395,8 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
// Special treatment because of member pointers
SourceLocation SavedLoc = ConsumeToken();
PreferredType.enterUnary(Actions, Tok.getLocation(), tok::amp, SavedLoc);
- Res = ParseCastExpression(AnyCastExpr, true);
+
+ Res = ParseCastExpression(AnyCastExpr, /*isAddressOfOperand=*/true);
if (!Res.isInvalid()) {
Expr *Arg = Res.get();
Res = Actions.ActOnUnaryOp(getCurScope(), SavedLoc, SavedKind, Arg);
@@ -1379,7 +1421,8 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
Res = ParseCastExpression(AnyCastExpr);
if (!Res.isInvalid()) {
Expr *Arg = Res.get();
- Res = Actions.ActOnUnaryOp(getCurScope(), SavedLoc, SavedKind, Arg);
+ Res = Actions.ActOnUnaryOp(getCurScope(), SavedLoc, SavedKind, Arg,
+ isAddressOfOperand);
if (Res.isInvalid())
Res = Actions.CreateRecoveryExpr(SavedLoc, Arg->getEndLoc(), Arg);
}
@@ -1410,15 +1453,19 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
case tok::kw__Alignof: // unary-expression: '_Alignof' '(' type-name ')'
if (!getLangOpts().C11)
Diag(Tok, diag::ext_c11_feature) << Tok.getName();
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case tok::kw_alignof: // unary-expression: 'alignof' '(' type-id ')'
case tok::kw___alignof: // unary-expression: '__alignof' unary-expression
// unary-expression: '__alignof' '(' type-name ')'
case tok::kw_sizeof: // unary-expression: 'sizeof' unary-expression
// unary-expression: 'sizeof' '(' type-name ')'
+ // unary-expression: '__datasizeof' unary-expression
+ // unary-expression: '__datasizeof' '(' type-name ')'
+ case tok::kw___datasizeof:
case tok::kw_vec_step: // unary-expression: OpenCL 'vec_step' expression
// unary-expression: '__builtin_omp_required_simd_align' '(' type-name ')'
case tok::kw___builtin_omp_required_simd_align:
+ case tok::kw___builtin_vectorelements:
if (NotPrimaryExpression)
*NotPrimaryExpression = true;
AllowSuffix = false;
@@ -1488,8 +1535,9 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
PrevSpec, DiagID, Type,
Actions.getASTContext().getPrintingPolicy());
- Declarator DeclaratorInfo(DS, DeclaratorContext::TypeName);
- TypeResult Ty = Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
+ Declarator DeclaratorInfo(DS, ParsedAttributesView::none(),
+ DeclaratorContext::TypeName);
+ TypeResult Ty = Actions.ActOnTypeName(DeclaratorInfo);
if (Ty.isInvalid())
break;
@@ -1498,7 +1546,7 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
Ty.get(), nullptr);
break;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case tok::annot_decltype:
case tok::kw_char:
@@ -1513,6 +1561,7 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
case tok::kw___int64:
case tok::kw___int128:
case tok::kw__ExtInt:
+ case tok::kw__BitInt:
case tok::kw_signed:
case tok::kw_unsigned:
case tok::kw_half:
@@ -1521,10 +1570,15 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
case tok::kw___bf16:
case tok::kw__Float16:
case tok::kw___float128:
+ case tok::kw___ibm128:
case tok::kw_void:
+ case tok::kw_auto:
case tok::kw_typename:
case tok::kw_typeof:
case tok::kw___vector:
+ case tok::kw__Accum:
+ case tok::kw__Fract:
+ case tok::kw__Sat:
#define GENERIC_IMAGE_TYPE(ImgType, Id) case tok::kw_##ImgType##_t:
#include "clang/Basic/OpenCLImageTypes.def"
{
@@ -1586,9 +1640,9 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
// cast expression.
CXXScopeSpec SS;
ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
- /*ObjectHadErrors=*/false,
+ /*ObjectHasErrors=*/false,
/*EnteringContext=*/false);
- AnnotateTemplateIdTokenAsType(SS);
+ AnnotateTemplateIdTokenAsType(SS, ImplicitTypenameContext::Yes);
return ParseCastExpression(ParseKind, isAddressOfOperand, NotCastExpr,
isTypeCast, isVectorLiteral,
NotPrimaryExpression);
@@ -1607,14 +1661,14 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
// translate it into a type and continue parsing as a cast
// expression.
CXXScopeSpec SS;
- AnnotateTemplateIdTokenAsType(SS);
+ AnnotateTemplateIdTokenAsType(SS, ImplicitTypenameContext::Yes);
return ParseCastExpression(ParseKind, isAddressOfOperand,
NotCastExpr, isTypeCast, isVectorLiteral,
NotPrimaryExpression);
}
// Fall through to treat the template-id as an id-expression.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case tok::kw_operator: // [C++] id-expression: operator/conversion-function-id
@@ -1732,6 +1786,17 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
PreferredType.get(Tok.getLocation()));
return ExprError();
}
+#define TRANSFORM_TYPE_TRAIT_DEF(_, Trait) case tok::kw___##Trait:
+#include "clang/Basic/TransformTypeTraits.def"
+ // HACK: libstdc++ uses some of the transform-type-traits as alias
+ // templates, so we need to work around this.
+ if (!NextToken().is(tok::l_paren)) {
+ Tok.setKind(tok::identifier);
+ Diag(Tok, diag::ext_keyword_as_ident)
+ << Tok.getIdentifierInfo()->getName() << 0;
+ goto ParseIdentifier;
+ }
+ goto ExpectedExpression;
case tok::l_square:
if (getLangOpts().CPlusPlus11) {
if (getLangOpts().ObjC) {
@@ -1757,8 +1822,9 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
Res = ParseObjCMessageExpression();
break;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
default:
+ ExpectedExpression:
NotCastExpr = true;
return ExprError();
}
@@ -1788,7 +1854,7 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
if (Tok.isAtStartOfLine())
return Res;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case tok::period:
case tok::arrow:
break;
@@ -1833,6 +1899,7 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
/// primary-expression
/// postfix-expression '[' expression ']'
/// postfix-expression '[' braced-init-list ']'
+/// postfix-expression '[' expression-list [opt] ']' [C++23 12.4.5]
/// postfix-expression '(' argument-expression-list[opt] ')'
/// postfix-expression '.' identifier
/// postfix-expression '->' identifier
@@ -1851,7 +1918,7 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
// parsed, see if there are any postfix-expression pieces here.
SourceLocation Loc;
auto SavedType = PreferredType;
- while (1) {
+ while (true) {
// Each iteration relies on preferred type for the whole expression.
PreferredType = SavedType;
switch (Tok.getKind()) {
@@ -1875,7 +1942,7 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
break;
}
// Fall through; this isn't a message send.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
default: // Not a postfix-expression suffix.
return LHS;
@@ -1896,30 +1963,66 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
(void)Actions.CorrectDelayedTyposInExpr(LHS);
return ExprError();
}
-
BalancedDelimiterTracker T(*this, tok::l_square);
T.consumeOpen();
Loc = T.getOpenLocation();
- ExprResult Idx, Length, Stride;
+ ExprResult Length, Stride;
SourceLocation ColonLocFirst, ColonLocSecond;
+ ExprVector ArgExprs;
+ bool HasError = false;
PreferredType.enterSubscript(Actions, Tok.getLocation(), LHS.get());
- if (getLangOpts().CPlusPlus11 && Tok.is(tok::l_brace)) {
- Diag(Tok, diag::warn_cxx98_compat_generalized_initializer_lists);
- Idx = ParseBraceInitializer();
- } else if (getLangOpts().OpenMP) {
+
+ // We try to parse a list of indexes in all language mode first
+ // and, in we find 0 or one index, we try to parse an OpenMP/OpenACC array
+ // section. This allow us to support C++23 multi dimensional subscript and
+ // OpenMP/OpenACC sections in the same language mode.
+ if ((!getLangOpts().OpenMP && !AllowOpenACCArraySections) ||
+ Tok.isNot(tok::colon)) {
+ if (!getLangOpts().CPlusPlus23) {
+ ExprResult Idx;
+ if (getLangOpts().CPlusPlus11 && Tok.is(tok::l_brace)) {
+ Diag(Tok, diag::warn_cxx98_compat_generalized_initializer_lists);
+ Idx = ParseBraceInitializer();
+ } else {
+ Idx = ParseExpression(); // May be a comma expression
+ }
+ LHS = Actions.CorrectDelayedTyposInExpr(LHS);
+ Idx = Actions.CorrectDelayedTyposInExpr(Idx);
+ if (Idx.isInvalid()) {
+ HasError = true;
+ } else {
+ ArgExprs.push_back(Idx.get());
+ }
+ } else if (Tok.isNot(tok::r_square)) {
+ if (ParseExpressionList(ArgExprs)) {
+ LHS = Actions.CorrectDelayedTyposInExpr(LHS);
+ HasError = true;
+ }
+ }
+ }
+
+ // Handle OpenACC first, since 'AllowOpenACCArraySections' is only enabled
+ // when actively parsing a 'var' in a 'var-list' during clause/'cache'
+ // parsing, so it is the most specific, and best allows us to handle
+ // OpenACC and OpenMP at the same time.
+ if (ArgExprs.size() <= 1 && AllowOpenACCArraySections) {
ColonProtectionRAIIObject RAII(*this);
- // Parse [: or [ expr or [ expr :
- if (!Tok.is(tok::colon)) {
- // [ expr
- Idx = ParseExpression();
+ if (Tok.is(tok::colon)) {
+ // Consume ':'
+ ColonLocFirst = ConsumeToken();
+ Length = Actions.CorrectDelayedTyposInExpr(ParseExpression());
}
+ } else if (ArgExprs.size() <= 1 && getLangOpts().OpenMP) {
+ ColonProtectionRAIIObject RAII(*this);
if (Tok.is(tok::colon)) {
// Consume ':'
ColonLocFirst = ConsumeToken();
if (Tok.isNot(tok::r_square) &&
(getLangOpts().OpenMP < 50 ||
- ((Tok.isNot(tok::colon) && getLangOpts().OpenMP >= 50))))
+ ((Tok.isNot(tok::colon) && getLangOpts().OpenMP >= 50)))) {
Length = ParseExpression();
+ Length = Actions.CorrectDelayedTyposInExpr(Length);
+ }
}
if (getLangOpts().OpenMP >= 50 &&
(OMPClauseKind == llvm::omp::Clause::OMPC_to ||
@@ -1931,27 +2034,29 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
Stride = ParseExpression();
}
}
- } else
- Idx = ParseExpression();
+ }
SourceLocation RLoc = Tok.getLocation();
-
LHS = Actions.CorrectDelayedTyposInExpr(LHS);
- Idx = Actions.CorrectDelayedTyposInExpr(Idx);
- Length = Actions.CorrectDelayedTyposInExpr(Length);
- if (!LHS.isInvalid() && !Idx.isInvalid() && !Length.isInvalid() &&
+
+ if (!LHS.isInvalid() && !HasError && !Length.isInvalid() &&
!Stride.isInvalid() && Tok.is(tok::r_square)) {
if (ColonLocFirst.isValid() || ColonLocSecond.isValid()) {
+ // FIXME: OpenACC hasn't implemented Sema/Array section handling at a
+ // semantic level yet. For now, just reuse the OpenMP implementation
+ // as it gets the parsing/type management mostly right, and we can
+ // replace this call to ActOnOpenACCArraySectionExpr in the future.
+ // Eventually we'll genericize the OPenMPArraySectionExpr type as
+ // well.
LHS = Actions.ActOnOMPArraySectionExpr(
- LHS.get(), Loc, Idx.get(), ColonLocFirst, ColonLocSecond,
- Length.get(), Stride.get(), RLoc);
+ LHS.get(), Loc, ArgExprs.empty() ? nullptr : ArgExprs[0],
+ ColonLocFirst, ColonLocSecond, Length.get(), Stride.get(), RLoc);
} else {
LHS = Actions.ActOnArraySubscriptExpr(getCurScope(), LHS.get(), Loc,
- Idx.get(), RLoc);
+ ArgExprs, RLoc);
}
} else {
LHS = ExprError();
- Idx = ExprError();
}
// Match the ']'.
@@ -1971,10 +2076,9 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
if (OpKind == tok::lesslessless) {
ExprVector ExecConfigExprs;
- CommaLocsTy ExecConfigCommaLocs;
SourceLocation OpenLoc = ConsumeToken();
- if (ParseSimpleExpressionList(ExecConfigExprs, ExecConfigCommaLocs)) {
+ if (ParseSimpleExpressionList(ExecConfigExprs)) {
(void)Actions.CorrectDelayedTyposInExpr(LHS);
LHS = ExprError();
}
@@ -2014,16 +2118,15 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
}
ExprVector ArgExprs;
- CommaLocsTy CommaLocs;
auto RunSignatureHelp = [&]() -> QualType {
QualType PreferredType = Actions.ProduceCallSignatureHelp(
- getCurScope(), LHS.get(), ArgExprs, PT.getOpenLocation());
+ LHS.get(), ArgExprs, PT.getOpenLocation());
CalledSignatureHelp = true;
return PreferredType;
};
if (OpKind == tok::l_paren || !LHS.isInvalid()) {
if (Tok.isNot(tok::r_paren)) {
- if (ParseExpressionList(ArgExprs, CommaLocs, [&] {
+ if (ParseExpressionList(ArgExprs, [&] {
PreferredType.enterFunctionArgument(Tok.getLocation(),
RunSignatureHelp);
})) {
@@ -2061,9 +2164,6 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
PT.consumeClose();
LHS = ExprError();
} else {
- assert(
- (ArgExprs.size() == 0 || ArgExprs.size() - 1 == CommaLocs.size()) &&
- "Unexpected number of commas!");
Expr *Fn = LHS.get();
SourceLocation RParLoc = Tok.getLocation();
LHS = Actions.ActOnCallExpr(getCurScope(), Fn, Loc, ArgExprs, RParLoc,
@@ -2230,6 +2330,8 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
/// unary-expression: [C99 6.5.3]
/// 'sizeof' unary-expression
/// 'sizeof' '(' type-name ')'
+/// [Clang] '__datasizeof' unary-expression
+/// [Clang] '__datasizeof' '(' type-name ')'
/// [GNU] '__alignof' unary-expression
/// [GNU] '__alignof' '(' type-name ')'
/// [C11] '_Alignof' '(' type-name ')'
@@ -2239,6 +2341,13 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
/// typeof ( expressions )
/// typeof ( type-name )
/// [GNU/C++] typeof unary-expression
+/// [C23] typeof-specifier:
+/// typeof '(' typeof-specifier-argument ')'
+/// typeof_unqual '(' typeof-specifier-argument ')'
+///
+/// typeof-specifier-argument:
+/// expression
+/// type-name
///
/// [OpenCL 1.1 6.11.12] vec_step built-in function:
/// vec_step ( expressions )
@@ -2250,9 +2359,11 @@ Parser::ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
ParsedType &CastTy,
SourceRange &CastRange) {
- assert(OpTok.isOneOf(tok::kw_typeof, tok::kw_sizeof, tok::kw___alignof,
- tok::kw_alignof, tok::kw__Alignof, tok::kw_vec_step,
- tok::kw___builtin_omp_required_simd_align) &&
+ assert(OpTok.isOneOf(tok::kw_typeof, tok::kw_typeof_unqual, tok::kw_sizeof,
+ tok::kw___datasizeof, tok::kw___alignof, tok::kw_alignof,
+ tok::kw__Alignof, tok::kw_vec_step,
+ tok::kw___builtin_omp_required_simd_align,
+ tok::kw___builtin_vectorelements) &&
"Not a typeof/sizeof/alignof/vec_step expression!");
ExprResult Operand;
@@ -2261,12 +2372,13 @@ Parser::ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
if (Tok.isNot(tok::l_paren)) {
// If construct allows a form without parenthesis, user may forget to put
// pathenthesis around type name.
- if (OpTok.isOneOf(tok::kw_sizeof, tok::kw___alignof, tok::kw_alignof,
- tok::kw__Alignof)) {
+ if (OpTok.isOneOf(tok::kw_sizeof, tok::kw___datasizeof, tok::kw___alignof,
+ tok::kw_alignof, tok::kw__Alignof)) {
if (isTypeIdUnambiguously()) {
DeclSpec DS(AttrFactory);
ParseSpecifierQualifierList(DS);
- Declarator DeclaratorInfo(DS, DeclaratorContext::TypeName);
+ Declarator DeclaratorInfo(DS, ParsedAttributesView::none(),
+ DeclaratorContext::TypeName);
ParseDeclarator(DeclaratorInfo);
SourceLocation LParenLoc = PP.getLocForEndOfToken(OpTok.getLocation());
@@ -2286,7 +2398,8 @@ Parser::ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
}
isCastExpr = false;
- if (OpTok.is(tok::kw_typeof) && !getLangOpts().CPlusPlus) {
+ if (OpTok.isOneOf(tok::kw_typeof, tok::kw_typeof_unqual) &&
+ !getLangOpts().CPlusPlus) {
Diag(Tok, diag::err_expected_after) << OpTok.getIdentifierInfo()
<< tok::l_paren;
return ExprError();
@@ -2312,7 +2425,8 @@ Parser::ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
return ExprEmpty();
}
- if (getLangOpts().CPlusPlus || OpTok.isNot(tok::kw_typeof)) {
+ if (getLangOpts().CPlusPlus ||
+ !OpTok.isOneOf(tok::kw_typeof, tok::kw_typeof_unqual)) {
// GNU typeof in C requires the expression to be parenthesized. Not so for
// sizeof/alignof or in C++. Therefore, the parenthesized expression is
// the start of a unary-expression, but doesn't include any postfix
@@ -2331,7 +2445,7 @@ Parser::ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
/// a parameter.
ExprResult Parser::ParseSYCLUniqueStableNameExpression() {
assert(Tok.is(tok::kw___builtin_sycl_unique_stable_name) &&
- "Not __bulitin_sycl_unique_stable_name");
+ "Not __builtin_sycl_unique_stable_name");
SourceLocation OpLoc = ConsumeToken();
BalancedDelimiterTracker T(*this, tok::l_paren);
@@ -2362,15 +2476,18 @@ ExprResult Parser::ParseSYCLUniqueStableNameExpression() {
/// 'sizeof' unary-expression
/// 'sizeof' '(' type-name ')'
/// [C++11] 'sizeof' '...' '(' identifier ')'
+/// [Clang] '__datasizeof' unary-expression
+/// [Clang] '__datasizeof' '(' type-name ')'
/// [GNU] '__alignof' unary-expression
/// [GNU] '__alignof' '(' type-name ')'
/// [C11] '_Alignof' '(' type-name ')'
/// [C++11] 'alignof' '(' type-id ')'
/// \endverbatim
ExprResult Parser::ParseUnaryExprOrTypeTraitExpression() {
- assert(Tok.isOneOf(tok::kw_sizeof, tok::kw___alignof, tok::kw_alignof,
- tok::kw__Alignof, tok::kw_vec_step,
- tok::kw___builtin_omp_required_simd_align) &&
+ assert(Tok.isOneOf(tok::kw_sizeof, tok::kw___datasizeof, tok::kw___alignof,
+ tok::kw_alignof, tok::kw__Alignof, tok::kw_vec_step,
+ tok::kw___builtin_omp_required_simd_align,
+ tok::kw___builtin_vectorelements) &&
"Not a sizeof/alignof/vec_step expression!");
Token OpTok = Tok;
ConsumeToken();
@@ -2422,8 +2539,11 @@ ExprResult Parser::ParseUnaryExprOrTypeTraitExpression() {
RParenLoc);
}
- if (OpTok.isOneOf(tok::kw_alignof, tok::kw__Alignof))
+ if (getLangOpts().CPlusPlus &&
+ OpTok.isOneOf(tok::kw_alignof, tok::kw__Alignof))
Diag(OpTok, diag::warn_cxx98_compat_alignof);
+ else if (getLangOpts().C23 && OpTok.is(tok::kw_alignof))
+ Diag(OpTok, diag::warn_c23_compat_keyword) << OpTok.getName();
EnterExpressionEvaluationContext Unevaluated(
Actions, Sema::ExpressionEvaluationContext::Unevaluated,
@@ -2438,14 +2558,29 @@ ExprResult Parser::ParseUnaryExprOrTypeTraitExpression() {
CastRange);
UnaryExprOrTypeTrait ExprKind = UETT_SizeOf;
- if (OpTok.isOneOf(tok::kw_alignof, tok::kw__Alignof))
+ switch (OpTok.getKind()) {
+ case tok::kw_alignof:
+ case tok::kw__Alignof:
ExprKind = UETT_AlignOf;
- else if (OpTok.is(tok::kw___alignof))
+ break;
+ case tok::kw___alignof:
ExprKind = UETT_PreferredAlignOf;
- else if (OpTok.is(tok::kw_vec_step))
+ break;
+ case tok::kw_vec_step:
ExprKind = UETT_VecStep;
- else if (OpTok.is(tok::kw___builtin_omp_required_simd_align))
+ break;
+ case tok::kw___builtin_omp_required_simd_align:
ExprKind = UETT_OpenMPRequiredSimdAlign;
+ break;
+ case tok::kw___datasizeof:
+ ExprKind = UETT_DataSizeOf;
+ break;
+ case tok::kw___builtin_vectorelements:
+ ExprKind = UETT_VectorElements;
+ break;
+ default:
+ break;
+ }
if (isCastExpr)
return Actions.ActOnUnaryExprOrTypeTraitExpr(OpTok.getLocation(),
@@ -2477,9 +2612,12 @@ ExprResult Parser::ParseUnaryExprOrTypeTraitExpression() {
/// assign-expr ')'
/// [GNU] '__builtin_types_compatible_p' '(' type-name ',' type-name ')'
/// [GNU] '__builtin_FILE' '(' ')'
+/// [CLANG] '__builtin_FILE_NAME' '(' ')'
/// [GNU] '__builtin_FUNCTION' '(' ')'
+/// [MS] '__builtin_FUNCSIG' '(' ')'
/// [GNU] '__builtin_LINE' '(' ')'
/// [CLANG] '__builtin_COLUMN' '(' ')'
+/// [GNU] '__builtin_source_location' '(' ')'
/// [OCL] '__builtin_astype' '(' assignment-expression ',' type-name ')'
///
/// [GNU] offsetof-member-designator:
@@ -2529,10 +2667,21 @@ ExprResult Parser::ParseBuiltinPrimaryExpression() {
}
case tok::kw___builtin_offsetof: {
SourceLocation TypeLoc = Tok.getLocation();
- TypeResult Ty = ParseTypeName();
- if (Ty.isInvalid()) {
- SkipUntil(tok::r_paren, StopAtSemi);
- return ExprError();
+ auto OOK = Sema::OffsetOfKind::OOK_Builtin;
+ if (Tok.getLocation().isMacroID()) {
+ StringRef MacroName = Lexer::getImmediateMacroNameForDiagnostics(
+ Tok.getLocation(), PP.getSourceManager(), getLangOpts());
+ if (MacroName == "offsetof")
+ OOK = Sema::OffsetOfKind::OOK_Macro;
+ }
+ TypeResult Ty;
+ {
+ OffsetOfStateRAIIObject InOffsetof(*this, OOK);
+ Ty = ParseTypeName();
+ if (Ty.isInvalid()) {
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return ExprError();
+ }
}
if (ExpectAndConsume(tok::comma)) {
@@ -2556,7 +2705,7 @@ ExprResult Parser::ParseBuiltinPrimaryExpression() {
Comps.back().LocStart = Comps.back().LocEnd = ConsumeToken();
// FIXME: This loop leaks the index expressions on error.
- while (1) {
+ while (true) {
if (Tok.is(tok::period)) {
// offsetof-member-designator: offsetof-member-designator '.' identifier
Comps.push_back(Sema::OffsetOfComponent());
@@ -2570,7 +2719,6 @@ ExprResult Parser::ParseBuiltinPrimaryExpression() {
}
Comps.back().U.IdentInfo = Tok.getIdentifierInfo();
Comps.back().LocEnd = ConsumeToken();
-
} else if (Tok.is(tok::l_square)) {
if (CheckProhibitedCXX11Attribute())
return ExprError();
@@ -2701,24 +2849,33 @@ ExprResult Parser::ParseBuiltinPrimaryExpression() {
}
case tok::kw___builtin_COLUMN:
case tok::kw___builtin_FILE:
+ case tok::kw___builtin_FILE_NAME:
case tok::kw___builtin_FUNCTION:
- case tok::kw___builtin_LINE: {
+ case tok::kw___builtin_FUNCSIG:
+ case tok::kw___builtin_LINE:
+ case tok::kw___builtin_source_location: {
// Attempt to consume the r-paren.
if (Tok.isNot(tok::r_paren)) {
Diag(Tok, diag::err_expected) << tok::r_paren;
SkipUntil(tok::r_paren, StopAtSemi);
return ExprError();
}
- SourceLocExpr::IdentKind Kind = [&] {
+ SourceLocIdentKind Kind = [&] {
switch (T) {
case tok::kw___builtin_FILE:
- return SourceLocExpr::File;
+ return SourceLocIdentKind::File;
+ case tok::kw___builtin_FILE_NAME:
+ return SourceLocIdentKind::FileName;
case tok::kw___builtin_FUNCTION:
- return SourceLocExpr::Function;
+ return SourceLocIdentKind::Function;
+ case tok::kw___builtin_FUNCSIG:
+ return SourceLocIdentKind::FuncSig;
case tok::kw___builtin_LINE:
- return SourceLocExpr::Line;
+ return SourceLocIdentKind::Line;
case tok::kw___builtin_COLUMN:
- return SourceLocExpr::Column;
+ return SourceLocIdentKind::Column;
+ case tok::kw___builtin_source_location:
+ return SourceLocIdentKind::SourceLocStruct;
default:
llvm_unreachable("invalid keyword");
}
@@ -2836,7 +2993,8 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
// None of these cases should fall through with an invalid Result
// unless they've already reported an error.
if (ExprType >= CompoundStmt && Tok.is(tok::l_brace)) {
- Diag(Tok, diag::ext_gnu_statement_expr);
+ Diag(Tok, OpenLoc.isMacroID() ? diag::ext_gnu_statement_expr_macro
+ : diag::ext_gnu_statement_expr);
checkCompoundToken(OpenLoc, tok::l_paren, CompoundToken::StmtExprBegin);
@@ -2924,7 +3082,8 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
// Parse the type declarator.
DeclSpec DS(AttrFactory);
ParseSpecifierQualifierList(DS);
- Declarator DeclaratorInfo(DS, DeclaratorContext::TypeName);
+ Declarator DeclaratorInfo(DS, ParsedAttributesView::none(),
+ DeclaratorContext::TypeName);
ParseDeclarator(DeclaratorInfo);
// If our type is followed by an identifier and either ':' or ']', then
@@ -2936,7 +3095,7 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
TypeResult Ty;
{
InMessageExpressionRAIIObject InMessage(*this, false);
- Ty = Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
+ Ty = Actions.ActOnTypeName(DeclaratorInfo);
}
Result = ParseObjCMessageExpressionBody(SourceLocation(),
SourceLocation(),
@@ -2951,7 +3110,7 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
TypeResult Ty;
{
InMessageExpressionRAIIObject InMessage(*this, false);
- Ty = Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
+ Ty = Actions.ActOnTypeName(DeclaratorInfo);
}
return ParseCompoundLiteralExpression(Ty.get(), OpenLoc, RParenLoc);
}
@@ -2963,7 +3122,7 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
TypeResult Ty;
{
InMessageExpressionRAIIObject InMessage(*this, false);
- Ty = Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
+ Ty = Actions.ActOnTypeName(DeclaratorInfo);
}
if(Ty.isInvalid())
{
@@ -3010,7 +3169,7 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
TypeResult Ty;
{
InMessageExpressionRAIIObject InMessage(*this, false);
- Ty = Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
+ Ty = Actions.ActOnTypeName(DeclaratorInfo);
}
CastTy = Ty.get();
return ExprResult();
@@ -3050,11 +3209,9 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
} else if (isTypeCast) {
// Parse the expression-list.
InMessageExpressionRAIIObject InMessage(*this, false);
-
ExprVector ArgExprs;
- CommaLocsTy CommaLocs;
- if (!ParseSimpleExpressionList(ArgExprs, CommaLocs)) {
+ if (!ParseSimpleExpressionList(ArgExprs)) {
// FIXME: If we ever support comma expressions as operands to
// fold-expressions, we'll need to allow multiple ArgExprs here.
if (ExprType >= FoldExpr && ArgExprs.size() == 1 &&
@@ -3103,7 +3260,7 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
InMessageExpressionRAIIObject InMessage(*this, false);
Result = ParseExpression(MaybeTypeCast);
- if (!getLangOpts().CPlusPlus && MaybeTypeCast && Result.isUsable()) {
+ if (!getLangOpts().CPlusPlus && Result.isUsable()) {
// Correct typos in non-C++ code earlier so that implicit-cast-like
// expressions are parsed correctly.
Result = Actions.CorrectDelayedTyposInExpr(Result);
@@ -3164,16 +3321,34 @@ Parser::ParseCompoundLiteralExpression(ParsedType Ty,
/// string-literal
/// \verbatim
ExprResult Parser::ParseStringLiteralExpression(bool AllowUserDefinedLiteral) {
- assert(isTokenStringLiteral() && "Not a string literal!");
+ return ParseStringLiteralExpression(AllowUserDefinedLiteral,
+ /*Unevaluated=*/false);
+}
- // String concat. Note that keywords like __func__ and __FUNCTION__ are not
- // considered to be strings for concatenation purposes.
+ExprResult Parser::ParseUnevaluatedStringLiteralExpression() {
+ return ParseStringLiteralExpression(/*AllowUserDefinedLiteral=*/false,
+ /*Unevaluated=*/true);
+}
+
+ExprResult Parser::ParseStringLiteralExpression(bool AllowUserDefinedLiteral,
+ bool Unevaluated) {
+ assert(tokenIsLikeStringLiteral(Tok, getLangOpts()) &&
+ "Not a string-literal-like token!");
+
+ // String concatenation.
+ // Note: some keywords like __FUNCTION__ are not considered to be strings
+ // for concatenation purposes, unless Microsoft extensions are enabled.
SmallVector<Token, 4> StringToks;
do {
StringToks.push_back(Tok);
- ConsumeStringToken();
- } while (isTokenStringLiteral());
+ ConsumeAnyToken();
+ } while (tokenIsLikeStringLiteral(Tok, getLangOpts()));
+
+ if (Unevaluated) {
+ assert(!AllowUserDefinedLiteral && "UDL are always evaluated");
+ return Actions.ActOnUnevaluatedStringLiteral(StringToks);
+ }
// Pass the set of string tokens, ready for concatenation, to the actions.
return Actions.ActOnStringLiteral(StringToks,
@@ -3194,6 +3369,12 @@ ExprResult Parser::ParseStringLiteralExpression(bool AllowUserDefinedLiteral) {
/// type-name : assignment-expression
/// default : assignment-expression
/// \endverbatim
+///
+/// As an extension, Clang also accepts:
+/// \verbatim
+/// generic-selection:
+/// _Generic ( type-name, generic-assoc-list )
+/// \endverbatim
ExprResult Parser::ParseGenericSelectionExpression() {
assert(Tok.is(tok::kw__Generic) && "_Generic keyword expected");
if (!getLangOpts().C11)
@@ -3204,8 +3385,20 @@ ExprResult Parser::ParseGenericSelectionExpression() {
if (T.expectAndConsume())
return ExprError();
+ // We either have a controlling expression or we have a controlling type, and
+ // we need to figure out which it is.
+ TypeResult ControllingType;
ExprResult ControllingExpr;
- {
+ if (isTypeIdForGenericSelection()) {
+ ControllingType = ParseTypeName();
+ if (ControllingType.isInvalid()) {
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return ExprError();
+ }
+ const auto *LIT = cast<LocInfoType>(ControllingType.get().get());
+ SourceLocation Loc = LIT->getTypeSourceInfo()->getTypeLoc().getBeginLoc();
+ Diag(Loc, diag::ext_generic_with_type_arg);
+ } else {
// C11 6.5.1.1p3 "The controlling expression of a generic selection is
// not evaluated."
EnterExpressionEvaluationContext Unevaluated(
@@ -3224,7 +3417,7 @@ ExprResult Parser::ParseGenericSelectionExpression() {
}
SourceLocation DefaultLoc;
- TypeVector Types;
+ SmallVector<ParsedType, 12> Types;
ExprVector Exprs;
do {
ParsedType Ty;
@@ -3241,7 +3434,7 @@ ExprResult Parser::ParseGenericSelectionExpression() {
Ty = nullptr;
} else {
ColonProtectionRAIIObject X(*this);
- TypeResult TR = ParseTypeName();
+ TypeResult TR = ParseTypeName(nullptr, DeclaratorContext::Association);
if (TR.isInvalid()) {
SkipUntil(tok::r_paren, StopAtSemi);
return ExprError();
@@ -3270,10 +3463,13 @@ ExprResult Parser::ParseGenericSelectionExpression() {
if (T.getCloseLocation().isInvalid())
return ExprError();
- return Actions.ActOnGenericSelectionExpr(KeyLoc, DefaultLoc,
- T.getCloseLocation(),
- ControllingExpr.get(),
- Types, Exprs);
+ void *ExprOrTy = ControllingExpr.isUsable()
+ ? ControllingExpr.get()
+ : ControllingType.get().getAsOpaquePtr();
+
+ return Actions.ActOnGenericSelectionExpr(
+ KeyLoc, DefaultLoc, T.getCloseLocation(), ControllingExpr.isUsable(),
+ ExprOrTy, Types, Exprs);
}
/// Parse A C++1z fold-expression after the opening paren and optional
@@ -3353,10 +3549,11 @@ ExprResult Parser::ParseFoldExpression(ExprResult LHS,
/// [C++0x] braced-init-list
/// \endverbatim
bool Parser::ParseExpressionList(SmallVectorImpl<Expr *> &Exprs,
- SmallVectorImpl<SourceLocation> &CommaLocs,
- llvm::function_ref<void()> ExpressionStarts) {
+ llvm::function_ref<void()> ExpressionStarts,
+ bool FailImmediatelyOnInvalidExpr,
+ bool EarlyTypoCorrection) {
bool SawError = false;
- while (1) {
+ while (true) {
if (ExpressionStarts)
ExpressionStarts();
@@ -3367,11 +3564,14 @@ bool Parser::ParseExpressionList(SmallVectorImpl<Expr *> &Exprs,
} else
Expr = ParseAssignmentExpression();
+ if (EarlyTypoCorrection)
+ Expr = Actions.CorrectDelayedTyposInExpr(Expr);
+
if (Tok.is(tok::ellipsis))
Expr = Actions.ActOnPackExpansion(Expr.get(), ConsumeToken());
else if (Tok.is(tok::code_completion)) {
// There's nothing to suggest in here as we parsed a full expression.
- // Instead fail and propogate the error since caller might have something
+ // Instead fail and propagate the error since caller might have something
// the suggest, e.g. signature help in function call. Note that this is
// performed before pushing the \p Expr, so that signature help can report
// current argument correctly.
@@ -3380,8 +3580,10 @@ bool Parser::ParseExpressionList(SmallVectorImpl<Expr *> &Exprs,
break;
}
if (Expr.isInvalid()) {
- SkipUntil(tok::comma, tok::r_paren, StopBeforeMatch);
SawError = true;
+ if (FailImmediatelyOnInvalidExpr)
+ break;
+ SkipUntil(tok::comma, tok::r_paren, StopBeforeMatch);
} else {
Exprs.push_back(Expr.get());
}
@@ -3390,8 +3592,7 @@ bool Parser::ParseExpressionList(SmallVectorImpl<Expr *> &Exprs,
break;
// Move to the next argument, remember where the comma was.
Token Comma = Tok;
- CommaLocs.push_back(ConsumeToken());
-
+ ConsumeToken();
checkPotentialAngleBracketDelimiter(Comma);
}
if (SawError) {
@@ -3413,23 +3614,22 @@ bool Parser::ParseExpressionList(SmallVectorImpl<Expr *> &Exprs,
/// assignment-expression
/// simple-expression-list , assignment-expression
/// \endverbatim
-bool
-Parser::ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs,
- SmallVectorImpl<SourceLocation> &CommaLocs) {
- while (1) {
+bool Parser::ParseSimpleExpressionList(SmallVectorImpl<Expr *> &Exprs) {
+ while (true) {
ExprResult Expr = ParseAssignmentExpression();
if (Expr.isInvalid())
return true;
Exprs.push_back(Expr.get());
- if (Tok.isNot(tok::comma))
+ // We might be parsing the LHS of a fold-expression. If we reached the fold
+ // operator, stop.
+ if (Tok.isNot(tok::comma) || NextToken().is(tok::ellipsis))
return false;
// Move to the next argument, remember where the comma was.
Token Comma = Tok;
- CommaLocs.push_back(ConsumeToken());
-
+ ConsumeToken();
checkPotentialAngleBracketDelimiter(Comma);
}
}
@@ -3452,7 +3652,8 @@ void Parser::ParseBlockId(SourceLocation CaretLoc) {
ParseSpecifierQualifierList(DS);
// Parse the block-declarator.
- Declarator DeclaratorInfo(DS, DeclaratorContext::BlockLiteral);
+ Declarator DeclaratorInfo(DS, ParsedAttributesView::none(),
+ DeclaratorContext::BlockLiteral);
DeclaratorInfo.setFunctionDefinitionKind(FunctionDefinitionKind::Definition);
ParseDeclarator(DeclaratorInfo);
@@ -3491,7 +3692,8 @@ ExprResult Parser::ParseBlockLiteralExpression() {
// Parse the return type if present.
DeclSpec DS(AttrFactory);
- Declarator ParamInfo(DS, DeclaratorContext::BlockLiteral);
+ Declarator ParamInfo(DS, ParsedAttributesView::none(),
+ DeclaratorContext::BlockLiteral);
ParamInfo.setFunctionDefinitionKind(FunctionDefinitionKind::Definition);
// FIXME: Since the return type isn't actually parsed, it can't be used to
// fill ParamInfo with an initial valid range, so do it manually.
@@ -3541,8 +3743,8 @@ ExprResult Parser::ParseBlockLiteralExpression() {
/*NumExceptions=*/0,
/*NoexceptExpr=*/nullptr,
/*ExceptionSpecTokens=*/nullptr,
- /*DeclsInPrototype=*/None, CaretLoc,
- CaretLoc, ParamInfo),
+ /*DeclsInPrototype=*/std::nullopt,
+ CaretLoc, CaretLoc, ParamInfo),
CaretLoc);
MaybeParseGNUAttributes(ParamInfo);
@@ -3623,7 +3825,7 @@ static bool CheckAvailabilitySpecList(Parser &P,
/// availability-spec:
/// '*'
/// identifier version-tuple
-Optional<AvailabilitySpec> Parser::ParseAvailabilitySpec() {
+std::optional<AvailabilitySpec> Parser::ParseAvailabilitySpec() {
if (Tok.is(tok::star)) {
return AvailabilitySpec(ConsumeToken());
} else {
@@ -3631,11 +3833,11 @@ Optional<AvailabilitySpec> Parser::ParseAvailabilitySpec() {
if (Tok.is(tok::code_completion)) {
cutOffParsing();
Actions.CodeCompleteAvailabilityPlatformName();
- return None;
+ return std::nullopt;
}
if (Tok.isNot(tok::identifier)) {
Diag(Tok, diag::err_avail_query_expected_platform_name);
- return None;
+ return std::nullopt;
}
IdentifierLoc *PlatformIdentifier = ParseIdentifierLoc();
@@ -3643,7 +3845,7 @@ Optional<AvailabilitySpec> Parser::ParseAvailabilitySpec() {
VersionTuple Version = ParseVersionTuple(VersionRange);
if (Version.empty())
- return None;
+ return std::nullopt;
StringRef GivenPlatform = PlatformIdentifier->Ident->getName();
StringRef Platform =
@@ -3653,7 +3855,7 @@ Optional<AvailabilitySpec> Parser::ParseAvailabilitySpec() {
Diag(PlatformIdentifier->Loc,
diag::err_avail_query_unrecognized_platform_name)
<< GivenPlatform;
- return None;
+ return std::nullopt;
}
return AvailabilitySpec(Version, Platform, PlatformIdentifier->Loc,
@@ -3675,7 +3877,7 @@ ExprResult Parser::ParseAvailabilityCheckExpr(SourceLocation BeginLoc) {
SmallVector<AvailabilitySpec, 4> AvailSpecs;
bool HasError = false;
while (true) {
- Optional<AvailabilitySpec> Spec = ParseAvailabilitySpec();
+ std::optional<AvailabilitySpec> Spec = ParseAvailabilitySpec();
if (!Spec)
HasError = true;
else
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseExprCXX.cpp b/contrib/llvm-project/clang/lib/Parse/ParseExprCXX.cpp
index f3d10b4a0889..d61f414406f0 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseExprCXX.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseExprCXX.cpp
@@ -14,13 +14,16 @@
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/ExprCXX.h"
#include "clang/Basic/PrettyStackTrace.h"
+#include "clang/Basic/TokenKinds.h"
#include "clang/Lex/LiteralSupport.h"
#include "clang/Parse/ParseDiagnostic.h"
#include "clang/Parse/Parser.h"
#include "clang/Parse/RAIIObjectsForParser.h"
#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/Scope.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include <numeric>
@@ -453,8 +456,8 @@ bool Parser::ParseOptionalCXXScopeSpecifier(
bool IsCorrectedToColon = false;
bool *CorrectionFlagPtr = ColonIsSacred ? &IsCorrectedToColon : nullptr;
if (Actions.ActOnCXXNestedNameSpecifier(
- getCurScope(), IdInfo, EnteringContext, SS, false,
- CorrectionFlagPtr, OnlyNamespace)) {
+ getCurScope(), IdInfo, EnteringContext, SS, CorrectionFlagPtr,
+ OnlyNamespace)) {
// Identifier is not recognized as a nested name, but we can have
// mistyped '::' instead of ':'.
if (CorrectionFlagPtr && IsCorrectedToColon) {
@@ -668,7 +671,7 @@ ExprResult Parser::ParseCXXIdExpression(bool isAddressOfOperand) {
//
CXXScopeSpec SS;
ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
- /*ObjectHadErrors=*/false,
+ /*ObjectHasErrors=*/false,
/*EnteringContext=*/false);
Token Replacement;
@@ -722,7 +725,7 @@ ExprResult Parser::ParseCXXIdExpression(bool isAddressOfOperand) {
/// '&' identifier initializer
///
/// lambda-declarator:
-/// lambda-specifiers [C++2b]
+/// lambda-specifiers [C++23]
/// '(' parameter-declaration-clause ')' lambda-specifiers
/// requires-clause[opt]
///
@@ -979,11 +982,10 @@ bool Parser::ParseLambdaIntroducer(LambdaIntroducer &Intro,
InitKind = LambdaCaptureInitKind::DirectInit;
ExprVector Exprs;
- CommaLocsTy Commas;
if (Tentative) {
Parens.skipToEnd();
*Tentative = LambdaIntroducerTentativeParse::Incomplete;
- } else if (ParseExpressionList(Exprs, Commas)) {
+ } else if (ParseExpressionList(Exprs)) {
Parens.skipToEnd();
Init = ExprError();
} else {
@@ -1068,8 +1070,8 @@ bool Parser::ParseLambdaIntroducer(LambdaIntroducer &Intro,
// Ensure that any ellipsis was in the right place.
SourceLocation EllipsisLoc;
- if (std::any_of(std::begin(EllipsisLocs), std::end(EllipsisLocs),
- [](SourceLocation Loc) { return Loc.isValid(); })) {
+ if (llvm::any_of(EllipsisLocs,
+ [](SourceLocation Loc) { return Loc.isValid(); })) {
// The '...' should appear before the identifier in an init-capture, and
// after the identifier otherwise.
bool InitCapture = InitKind != LambdaCaptureInitKind::NoInit;
@@ -1156,51 +1158,66 @@ bool Parser::ParseLambdaIntroducer(LambdaIntroducer &Intro,
static void tryConsumeLambdaSpecifierToken(Parser &P,
SourceLocation &MutableLoc,
+ SourceLocation &StaticLoc,
SourceLocation &ConstexprLoc,
SourceLocation &ConstevalLoc,
SourceLocation &DeclEndLoc) {
assert(MutableLoc.isInvalid());
+ assert(StaticLoc.isInvalid());
assert(ConstexprLoc.isInvalid());
+ assert(ConstevalLoc.isInvalid());
// Consume constexpr-opt mutable-opt in any sequence, and set the DeclEndLoc
// to the final of those locations. Emit an error if we have multiple
// copies of those keywords and recover.
+ auto ConsumeLocation = [&P, &DeclEndLoc](SourceLocation &SpecifierLoc,
+ int DiagIndex) {
+ if (SpecifierLoc.isValid()) {
+ P.Diag(P.getCurToken().getLocation(),
+ diag::err_lambda_decl_specifier_repeated)
+ << DiagIndex
+ << FixItHint::CreateRemoval(P.getCurToken().getLocation());
+ }
+ SpecifierLoc = P.ConsumeToken();
+ DeclEndLoc = SpecifierLoc;
+ };
+
while (true) {
switch (P.getCurToken().getKind()) {
- case tok::kw_mutable: {
- if (MutableLoc.isValid()) {
- P.Diag(P.getCurToken().getLocation(),
- diag::err_lambda_decl_specifier_repeated)
- << 0 << FixItHint::CreateRemoval(P.getCurToken().getLocation());
- }
- MutableLoc = P.ConsumeToken();
- DeclEndLoc = MutableLoc;
- break /*switch*/;
- }
+ case tok::kw_mutable:
+ ConsumeLocation(MutableLoc, 0);
+ break;
+ case tok::kw_static:
+ ConsumeLocation(StaticLoc, 1);
+ break;
case tok::kw_constexpr:
- if (ConstexprLoc.isValid()) {
- P.Diag(P.getCurToken().getLocation(),
- diag::err_lambda_decl_specifier_repeated)
- << 1 << FixItHint::CreateRemoval(P.getCurToken().getLocation());
- }
- ConstexprLoc = P.ConsumeToken();
- DeclEndLoc = ConstexprLoc;
- break /*switch*/;
+ ConsumeLocation(ConstexprLoc, 2);
+ break;
case tok::kw_consteval:
- if (ConstevalLoc.isValid()) {
- P.Diag(P.getCurToken().getLocation(),
- diag::err_lambda_decl_specifier_repeated)
- << 2 << FixItHint::CreateRemoval(P.getCurToken().getLocation());
- }
- ConstevalLoc = P.ConsumeToken();
- DeclEndLoc = ConstevalLoc;
- break /*switch*/;
+ ConsumeLocation(ConstevalLoc, 3);
+ break;
default:
return;
}
}
}
+static void addStaticToLambdaDeclSpecifier(Parser &P, SourceLocation StaticLoc,
+ DeclSpec &DS) {
+ if (StaticLoc.isValid()) {
+ P.Diag(StaticLoc, !P.getLangOpts().CPlusPlus23
+ ? diag::err_static_lambda
+ : diag::warn_cxx20_compat_static_lambda);
+ const char *PrevSpec = nullptr;
+ unsigned DiagID = 0;
+ DS.SetStorageClassSpec(P.getActions(), DeclSpec::SCS_static, StaticLoc,
+ PrevSpec, DiagID,
+ P.getActions().getASTContext().getPrintingPolicy());
+ assert(PrevSpec == nullptr && DiagID == 0 &&
+ "Static cannot have been set previously!");
+ }
+}
+
static void
addConstexprToLambdaDeclSpecifier(Parser &P, SourceLocation ConstexprLoc,
DeclSpec &DS) {
@@ -1231,6 +1248,24 @@ static void addConstevalToLambdaDeclSpecifier(Parser &P,
}
}
+static void DiagnoseStaticSpecifierRestrictions(Parser &P,
+ SourceLocation StaticLoc,
+ SourceLocation MutableLoc,
+ const LambdaIntroducer &Intro) {
+ if (StaticLoc.isInvalid())
+ return;
+
+ // [expr.prim.lambda.general] p4
+ // The lambda-specifier-seq shall not contain both mutable and static.
+ // If the lambda-specifier-seq contains static, there shall be no
+ // lambda-capture.
+ if (MutableLoc.isValid())
+ P.Diag(StaticLoc, diag::err_static_mutable_lambda);
+ if (Intro.hasLambdaCapture()) {
+ P.Diag(StaticLoc, diag::err_static_lambda_captures);
+ }
+}
+
/// ParseLambdaExpressionAfterIntroducer - Parse the rest of a lambda
/// expression.
ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
@@ -1241,35 +1276,40 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
PrettyStackTraceLoc CrashInfo(PP.getSourceManager(), LambdaBeginLoc,
"lambda expression parsing");
-
-
- // FIXME: Call into Actions to add any init-capture declarations to the
- // scope while parsing the lambda-declarator and compound-statement.
-
// Parse lambda-declarator[opt].
DeclSpec DS(AttrFactory);
- Declarator D(DS, DeclaratorContext::LambdaExpr);
+ Declarator D(DS, ParsedAttributesView::none(), DeclaratorContext::LambdaExpr);
TemplateParameterDepthRAII CurTemplateDepthTracker(TemplateParameterDepth);
+
+ ParseScope LambdaScope(this, Scope::LambdaScope | Scope::DeclScope |
+ Scope::FunctionDeclarationScope |
+ Scope::FunctionPrototypeScope);
+
Actions.PushLambdaScope();
+ Actions.ActOnLambdaExpressionAfterIntroducer(Intro, getCurScope());
- ParsedAttributes Attr(AttrFactory);
+ ParsedAttributes Attributes(AttrFactory);
if (getLangOpts().CUDA) {
// In CUDA code, GNU attributes are allowed to appear immediately after the
// "[...]", even if there is no "(...)" before the lambda body.
- MaybeParseGNUAttributes(D);
- }
+ //
+ // Note that we support __noinline__ as a keyword in this mode and thus
+ // it has to be separately handled.
+ while (true) {
+ if (Tok.is(tok::kw___noinline__)) {
+ IdentifierInfo *AttrName = Tok.getIdentifierInfo();
+ SourceLocation AttrNameLoc = ConsumeToken();
+ Attributes.addNew(AttrName, AttrNameLoc, /*ScopeName=*/nullptr,
+ AttrNameLoc, /*ArgsUnion=*/nullptr,
+ /*numArgs=*/0, tok::kw___noinline__);
+ } else if (Tok.is(tok::kw___attribute))
+ ParseGNUAttributes(Attributes, /*LatePArsedAttrList=*/nullptr, &D);
+ else
+ break;
+ }
- // Helper to emit a warning if we see a CUDA host/device/global attribute
- // after '(...)'. nvcc doesn't accept this.
- auto WarnIfHasCUDATargetAttr = [&] {
- if (getLangOpts().CUDA)
- for (const ParsedAttr &A : Attr)
- if (A.getKind() == ParsedAttr::AT_CUDADevice ||
- A.getKind() == ParsedAttr::AT_CUDAHost ||
- A.getKind() == ParsedAttr::AT_CUDAGlobal)
- Diag(A.getLoc(), diag::warn_cuda_attr_lambda_position)
- << A.getAttrName()->getName();
- };
+ D.takeAttributes(Attributes);
+ }
MultiParseScope TemplateParamScope(*this);
if (Tok.is(tok::less)) {
@@ -1300,7 +1340,7 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
}
Actions.ActOnLambdaExplicitTemplateParameterList(
- LAngleLoc, TemplateParams, RAngleLoc, RequiresClause);
+ Intro, LAngleLoc, TemplateParams, RAngleLoc, RequiresClause);
++CurTemplateDepthTracker;
}
}
@@ -1310,109 +1350,39 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
// or operator template declaration. We accept this as a conforming extension
// in all language modes that support lambdas.
if (isCXX11AttributeSpecifier()) {
- Diag(Tok, getLangOpts().CPlusPlus2b
+ Diag(Tok, getLangOpts().CPlusPlus23
? diag::warn_cxx20_compat_decl_attrs_on_lambda
- : diag::ext_decl_attrs_on_lambda);
+ : diag::ext_decl_attrs_on_lambda)
+ << Tok.getIdentifierInfo() << Tok.isRegularKeywordAttribute();
MaybeParseCXX11Attributes(D);
}
TypeResult TrailingReturnType;
SourceLocation TrailingReturnTypeLoc;
+ SourceLocation LParenLoc, RParenLoc;
+ SourceLocation DeclEndLoc;
+ bool HasParentheses = false;
+ bool HasSpecifiers = false;
+ SourceLocation MutableLoc;
- auto ParseLambdaSpecifiers =
- [&](SourceLocation LParenLoc, SourceLocation RParenLoc,
- MutableArrayRef<DeclaratorChunk::ParamInfo> ParamInfo,
- SourceLocation EllipsisLoc) {
- SourceLocation DeclEndLoc = RParenLoc;
-
- // GNU-style attributes must be parsed before the mutable specifier to
- // be compatible with GCC. MSVC-style attributes must be parsed before
- // the mutable specifier to be compatible with MSVC.
- MaybeParseAttributes(PAKM_GNU | PAKM_Declspec, Attr);
-
- // Parse mutable-opt and/or constexpr-opt or consteval-opt, and update
- // the DeclEndLoc.
- SourceLocation MutableLoc;
- SourceLocation ConstexprLoc;
- SourceLocation ConstevalLoc;
- tryConsumeLambdaSpecifierToken(*this, MutableLoc, ConstexprLoc,
- ConstevalLoc, DeclEndLoc);
-
- addConstexprToLambdaDeclSpecifier(*this, ConstexprLoc, DS);
- addConstevalToLambdaDeclSpecifier(*this, ConstevalLoc, DS);
- // Parse exception-specification[opt].
- ExceptionSpecificationType ESpecType = EST_None;
- SourceRange ESpecRange;
- SmallVector<ParsedType, 2> DynamicExceptions;
- SmallVector<SourceRange, 2> DynamicExceptionRanges;
- ExprResult NoexceptExpr;
- CachedTokens *ExceptionSpecTokens;
- ESpecType = tryParseExceptionSpecification(
- /*Delayed=*/false, ESpecRange, DynamicExceptions,
- DynamicExceptionRanges, NoexceptExpr, ExceptionSpecTokens);
-
- if (ESpecType != EST_None)
- DeclEndLoc = ESpecRange.getEnd();
-
- // Parse attribute-specifier[opt].
- MaybeParseCXX11Attributes(Attr, &DeclEndLoc);
-
- // Parse OpenCL addr space attribute.
- if (Tok.isOneOf(tok::kw___private, tok::kw___global, tok::kw___local,
- tok::kw___constant, tok::kw___generic)) {
- ParseOpenCLQualifiers(DS.getAttributes());
- ConsumeToken();
- }
-
- SourceLocation FunLocalRangeEnd = DeclEndLoc;
-
- // Parse trailing-return-type[opt].
- if (Tok.is(tok::arrow)) {
- FunLocalRangeEnd = Tok.getLocation();
- SourceRange Range;
- TrailingReturnType = ParseTrailingReturnType(
- Range, /*MayBeFollowedByDirectInit*/ false);
- TrailingReturnTypeLoc = Range.getBegin();
- if (Range.getEnd().isValid())
- DeclEndLoc = Range.getEnd();
- }
+ ParseScope Prototype(this, Scope::FunctionPrototypeScope |
+ Scope::FunctionDeclarationScope |
+ Scope::DeclScope);
- SourceLocation NoLoc;
- D.AddTypeInfo(
- DeclaratorChunk::getFunction(
- /*HasProto=*/true,
- /*IsAmbiguous=*/false, LParenLoc, ParamInfo.data(),
- ParamInfo.size(), EllipsisLoc, RParenLoc,
- /*RefQualifierIsLvalueRef=*/true,
- /*RefQualifierLoc=*/NoLoc, MutableLoc, ESpecType, ESpecRange,
- DynamicExceptions.data(), DynamicExceptionRanges.data(),
- DynamicExceptions.size(),
- NoexceptExpr.isUsable() ? NoexceptExpr.get() : nullptr,
- /*ExceptionSpecTokens*/ nullptr,
- /*DeclsInPrototype=*/None, LParenLoc, FunLocalRangeEnd, D,
- TrailingReturnType, TrailingReturnTypeLoc, &DS),
- std::move(Attr), DeclEndLoc);
- };
+ // Parse parameter-declaration-clause.
+ SmallVector<DeclaratorChunk::ParamInfo, 16> ParamInfo;
+ SourceLocation EllipsisLoc;
if (Tok.is(tok::l_paren)) {
- ParseScope PrototypeScope(this, Scope::FunctionPrototypeScope |
- Scope::FunctionDeclarationScope |
- Scope::DeclScope);
-
BalancedDelimiterTracker T(*this, tok::l_paren);
T.consumeOpen();
- SourceLocation LParenLoc = T.getOpenLocation();
-
- // Parse parameter-declaration-clause.
- SmallVector<DeclaratorChunk::ParamInfo, 16> ParamInfo;
- SourceLocation EllipsisLoc;
+ LParenLoc = T.getOpenLocation();
if (Tok.isNot(tok::r_paren)) {
Actions.RecordParsingTemplateParameterDepth(
CurTemplateDepthTracker.getOriginalDepth());
- ParseParameterDeclarationClause(D.getContext(), Attr, ParamInfo,
- EllipsisLoc);
+ ParseParameterDeclarationClause(D, Attributes, ParamInfo, EllipsisLoc);
// For a generic lambda, each 'auto' within the parameter declaration
// clause creates a template type parameter, so increment the depth.
// If we've parsed any explicit template parameters, then the depth will
@@ -1423,35 +1393,127 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
}
T.consumeClose();
+ DeclEndLoc = RParenLoc = T.getCloseLocation();
+ HasParentheses = true;
+ }
- // Parse lambda-specifiers.
- ParseLambdaSpecifiers(LParenLoc, /*DeclEndLoc=*/T.getCloseLocation(),
- ParamInfo, EllipsisLoc);
+ HasSpecifiers =
+ Tok.isOneOf(tok::kw_mutable, tok::arrow, tok::kw___attribute,
+ tok::kw_constexpr, tok::kw_consteval, tok::kw_static,
+ tok::kw___private, tok::kw___global, tok::kw___local,
+ tok::kw___constant, tok::kw___generic, tok::kw_groupshared,
+ tok::kw_requires, tok::kw_noexcept) ||
+ Tok.isRegularKeywordAttribute() ||
+ (Tok.is(tok::l_square) && NextToken().is(tok::l_square));
+
+ if (HasSpecifiers && !HasParentheses && !getLangOpts().CPlusPlus23) {
+ // It's common to forget that one needs '()' before 'mutable', an
+ // attribute specifier, the result type, or the requires clause. Deal with
+ // this.
+ Diag(Tok, diag::ext_lambda_missing_parens)
+ << FixItHint::CreateInsertion(Tok.getLocation(), "() ");
+ }
- // Parse requires-clause[opt].
- if (Tok.is(tok::kw_requires))
- ParseTrailingRequiresClause(D);
- } else if (Tok.isOneOf(tok::kw_mutable, tok::arrow, tok::kw___attribute,
- tok::kw_constexpr, tok::kw_consteval,
- tok::kw___private, tok::kw___global, tok::kw___local,
- tok::kw___constant, tok::kw___generic,
- tok::kw_requires, tok::kw_noexcept) ||
- (Tok.is(tok::l_square) && NextToken().is(tok::l_square))) {
- if (!getLangOpts().CPlusPlus2b)
- // It's common to forget that one needs '()' before 'mutable', an
- // attribute specifier, the result type, or the requires clause. Deal with
- // this.
- Diag(Tok, diag::ext_lambda_missing_parens)
- << FixItHint::CreateInsertion(Tok.getLocation(), "() ");
+ if (HasParentheses || HasSpecifiers) {
+ // GNU-style attributes must be parsed before the mutable specifier to
+ // be compatible with GCC. MSVC-style attributes must be parsed before
+ // the mutable specifier to be compatible with MSVC.
+ MaybeParseAttributes(PAKM_GNU | PAKM_Declspec, Attributes);
+ // Parse mutable-opt and/or constexpr-opt or consteval-opt, and update
+ // the DeclEndLoc.
+ SourceLocation ConstexprLoc;
+ SourceLocation ConstevalLoc;
+ SourceLocation StaticLoc;
+
+ tryConsumeLambdaSpecifierToken(*this, MutableLoc, StaticLoc, ConstexprLoc,
+ ConstevalLoc, DeclEndLoc);
+
+ DiagnoseStaticSpecifierRestrictions(*this, StaticLoc, MutableLoc, Intro);
+
+ addStaticToLambdaDeclSpecifier(*this, StaticLoc, DS);
+ addConstexprToLambdaDeclSpecifier(*this, ConstexprLoc, DS);
+ addConstevalToLambdaDeclSpecifier(*this, ConstevalLoc, DS);
+ }
+
+ Actions.ActOnLambdaClosureParameters(getCurScope(), ParamInfo);
+
+ if (!HasParentheses)
+ Actions.ActOnLambdaClosureQualifiers(Intro, MutableLoc);
+
+ if (HasSpecifiers || HasParentheses) {
+ // Parse exception-specification[opt].
+ ExceptionSpecificationType ESpecType = EST_None;
+ SourceRange ESpecRange;
+ SmallVector<ParsedType, 2> DynamicExceptions;
+ SmallVector<SourceRange, 2> DynamicExceptionRanges;
+ ExprResult NoexceptExpr;
+ CachedTokens *ExceptionSpecTokens;
+
+ ESpecType = tryParseExceptionSpecification(
+ /*Delayed=*/false, ESpecRange, DynamicExceptions,
+ DynamicExceptionRanges, NoexceptExpr, ExceptionSpecTokens);
+
+ if (ESpecType != EST_None)
+ DeclEndLoc = ESpecRange.getEnd();
+
+ // Parse attribute-specifier[opt].
+ if (MaybeParseCXX11Attributes(Attributes))
+ DeclEndLoc = Attributes.Range.getEnd();
+
+ // Parse OpenCL addr space attribute.
+ if (Tok.isOneOf(tok::kw___private, tok::kw___global, tok::kw___local,
+ tok::kw___constant, tok::kw___generic)) {
+ ParseOpenCLQualifiers(DS.getAttributes());
+ ConsumeToken();
+ }
+
+ SourceLocation FunLocalRangeEnd = DeclEndLoc;
+
+ // Parse trailing-return-type[opt].
+ if (Tok.is(tok::arrow)) {
+ FunLocalRangeEnd = Tok.getLocation();
+ SourceRange Range;
+ TrailingReturnType =
+ ParseTrailingReturnType(Range, /*MayBeFollowedByDirectInit=*/false);
+ TrailingReturnTypeLoc = Range.getBegin();
+ if (Range.getEnd().isValid())
+ DeclEndLoc = Range.getEnd();
+ }
SourceLocation NoLoc;
- // Parse lambda-specifiers.
- std::vector<DeclaratorChunk::ParamInfo> EmptyParamInfo;
- ParseLambdaSpecifiers(/*LParenLoc=*/NoLoc, /*RParenLoc=*/NoLoc,
- EmptyParamInfo, /*EllipsisLoc=*/NoLoc);
+ D.AddTypeInfo(DeclaratorChunk::getFunction(
+ /*HasProto=*/true,
+ /*IsAmbiguous=*/false, LParenLoc, ParamInfo.data(),
+ ParamInfo.size(), EllipsisLoc, RParenLoc,
+ /*RefQualifierIsLvalueRef=*/true,
+ /*RefQualifierLoc=*/NoLoc, MutableLoc, ESpecType,
+ ESpecRange, DynamicExceptions.data(),
+ DynamicExceptionRanges.data(), DynamicExceptions.size(),
+ NoexceptExpr.isUsable() ? NoexceptExpr.get() : nullptr,
+ /*ExceptionSpecTokens*/ nullptr,
+ /*DeclsInPrototype=*/std::nullopt, LParenLoc,
+ FunLocalRangeEnd, D, TrailingReturnType,
+ TrailingReturnTypeLoc, &DS),
+ std::move(Attributes), DeclEndLoc);
+
+ Actions.ActOnLambdaClosureQualifiers(Intro, MutableLoc);
+
+ if (HasParentheses && Tok.is(tok::kw_requires))
+ ParseTrailingRequiresClause(D);
+ }
+
+ // Emit a warning if we see a CUDA host/device/global attribute
+ // after '(...)'. nvcc doesn't accept this.
+ if (getLangOpts().CUDA) {
+ for (const ParsedAttr &A : Attributes)
+ if (A.getKind() == ParsedAttr::AT_CUDADevice ||
+ A.getKind() == ParsedAttr::AT_CUDAHost ||
+ A.getKind() == ParsedAttr::AT_CUDAGlobal)
+ Diag(A.getLoc(), diag::warn_cuda_attr_lambda_position)
+ << A.getAttrName()->getName();
}
- WarnIfHasCUDATargetAttr();
+ Prototype.Exit();
// FIXME: Rename BlockScope -> ClosureScope if we decide to continue using
// it.
@@ -1459,7 +1521,7 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
Scope::CompoundStmtScope;
ParseScope BodyScope(this, ScopeFlags);
- Actions.ActOnStartOfLambdaDefinition(Intro, D, getCurScope());
+ Actions.ActOnStartOfLambdaDefinition(Intro, D, DS);
// Parse compound-statement.
if (!Tok.is(tok::l_brace)) {
@@ -1471,9 +1533,11 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
StmtResult Stmt(ParseCompoundStatementBody());
BodyScope.Exit();
TemplateParamScope.Exit();
+ LambdaScope.Exit();
- if (!Stmt.isInvalid() && !TrailingReturnType.isInvalid())
- return Actions.ActOnLambdaExpr(LambdaBeginLoc, Stmt.get(), getCurScope());
+ if (!Stmt.isInvalid() && !TrailingReturnType.isInvalid() &&
+ !D.isInvalidType())
+ return Actions.ActOnLambdaExpr(LambdaBeginLoc, Stmt.get());
Actions.ActOnLambdaError(LambdaBeginLoc, getCurScope());
return ExprError();
@@ -1519,10 +1583,12 @@ ExprResult Parser::ParseCXXCasts() {
// Parse the common declaration-specifiers piece.
DeclSpec DS(AttrFactory);
- ParseSpecifierQualifierList(DS);
+ ParseSpecifierQualifierList(DS, /*AccessSpecifier=*/AS_none,
+ DeclSpecContext::DSC_type_specifier);
// Parse the abstract-declarator, if present.
- Declarator DeclaratorInfo(DS, DeclaratorContext::TypeName);
+ Declarator DeclaratorInfo(DS, ParsedAttributesView::none(),
+ DeclaratorContext::TypeName);
ParseDeclarator(DeclaratorInfo);
SourceLocation RAngleBracketLoc = Tok.getLocation();
@@ -1848,8 +1914,9 @@ ExprResult Parser::ParseCXXThis() {
/// In C++1z onwards, the type specifier can also be a template-name.
ExprResult
Parser::ParseCXXTypeConstructExpression(const DeclSpec &DS) {
- Declarator DeclaratorInfo(DS, DeclaratorContext::FunctionalCast);
- ParsedType TypeRep = Actions.ActOnTypeName(getCurScope(), DeclaratorInfo).get();
+ Declarator DeclaratorInfo(DS, ParsedAttributesView::none(),
+ DeclaratorContext::FunctionalCast);
+ ParsedType TypeRep = Actions.ActOnTypeName(DeclaratorInfo).get();
assert((Tok.is(tok::l_paren) ||
(getLangOpts().CPlusPlus11 && Tok.is(tok::l_brace)))
@@ -1871,20 +1938,19 @@ Parser::ParseCXXTypeConstructExpression(const DeclSpec &DS) {
PreferredType.enterTypeCast(Tok.getLocation(), TypeRep.get());
ExprVector Exprs;
- CommaLocsTy CommaLocs;
auto RunSignatureHelp = [&]() {
QualType PreferredType;
if (TypeRep)
PreferredType = Actions.ProduceConstructorSignatureHelp(
- getCurScope(), TypeRep.get()->getCanonicalTypeInternal(),
- DS.getEndLoc(), Exprs, T.getOpenLocation());
+ TypeRep.get()->getCanonicalTypeInternal(), DS.getEndLoc(), Exprs,
+ T.getOpenLocation(), /*Braced=*/false);
CalledSignatureHelp = true;
return PreferredType;
};
if (Tok.isNot(tok::r_paren)) {
- if (ParseExpressionList(Exprs, CommaLocs, [&] {
+ if (ParseExpressionList(Exprs, [&] {
PreferredType.enterFunctionArgument(Tok.getLocation(),
RunSignatureHelp);
})) {
@@ -1902,14 +1968,34 @@ Parser::ParseCXXTypeConstructExpression(const DeclSpec &DS) {
if (!TypeRep)
return ExprError();
- assert((Exprs.size() == 0 || Exprs.size()-1 == CommaLocs.size())&&
- "Unexpected number of commas!");
return Actions.ActOnCXXTypeConstructExpr(TypeRep, T.getOpenLocation(),
Exprs, T.getCloseLocation(),
/*ListInitialization=*/false);
}
}
+Parser::DeclGroupPtrTy
+Parser::ParseAliasDeclarationInInitStatement(DeclaratorContext Context,
+ ParsedAttributes &Attrs) {
+ assert(Tok.is(tok::kw_using) && "Expected using");
+ assert((Context == DeclaratorContext::ForInit ||
+ Context == DeclaratorContext::SelectionInit) &&
+ "Unexpected Declarator Context");
+ DeclGroupPtrTy DG;
+ SourceLocation DeclStart = ConsumeToken(), DeclEnd;
+
+ DG = ParseUsingDeclaration(Context, {}, DeclStart, DeclEnd, Attrs, AS_none);
+ if (!DG)
+ return DG;
+
+ Diag(DeclStart, !getLangOpts().CPlusPlus23
+ ? diag::ext_alias_in_init_statement
+ : diag::warn_cxx20_alias_in_init_statement)
+ << SourceRange(DeclStart, DeclEnd);
+
+ return DG;
+}
+
/// ParseCXXCondition - if/switch/while condition expression.
///
/// condition:
@@ -1931,6 +2017,9 @@ Parser::ParseCXXTypeConstructExpression(const DeclSpec &DS) {
/// \param Loc The location of the start of the statement that requires this
/// condition, e.g., the "for" in a for loop.
///
+/// \param MissingOK Whether an empty condition is acceptable here. Otherwise
+/// it is considered an error to be recovered from.
+///
/// \param FRI If non-null, a for range declaration is permitted, and if
/// present will be parsed and stored here, and a null result will be returned.
///
@@ -1938,11 +2027,10 @@ Parser::ParseCXXTypeConstructExpression(const DeclSpec &DS) {
/// appropriate moment for a 'for' loop.
///
/// \returns The parsed condition.
-Sema::ConditionResult Parser::ParseCXXCondition(StmtResult *InitStmt,
- SourceLocation Loc,
- Sema::ConditionKind CK,
- ForRangeInfo *FRI,
- bool EnterForConditionScope) {
+Sema::ConditionResult
+Parser::ParseCXXCondition(StmtResult *InitStmt, SourceLocation Loc,
+ Sema::ConditionKind CK, bool MissingOK,
+ ForRangeInfo *FRI, bool EnterForConditionScope) {
// Helper to ensure we always enter a continue/break scope if requested.
struct ForConditionScopeRAII {
Scope *S;
@@ -1967,7 +2055,7 @@ Sema::ConditionResult Parser::ParseCXXCondition(StmtResult *InitStmt,
return Sema::ConditionError();
}
- ParsedAttributesWithRange attrs(AttrFactory);
+ ParsedAttributes attrs(AttrFactory);
MaybeParseCXX11Attributes(attrs);
const auto WarnOnInit = [this, &CK] {
@@ -1997,7 +2085,7 @@ Sema::ConditionResult Parser::ParseCXXCondition(StmtResult *InitStmt,
}
ConsumeToken();
*InitStmt = Actions.ActOnNullStmt(SemiLoc);
- return ParseCXXCondition(nullptr, Loc, CK);
+ return ParseCXXCondition(nullptr, Loc, CK, MissingOK);
}
// Parse the expression.
@@ -2009,19 +2097,27 @@ Sema::ConditionResult Parser::ParseCXXCondition(StmtResult *InitStmt,
WarnOnInit();
*InitStmt = Actions.ActOnExprStmt(Expr.get());
ConsumeToken();
- return ParseCXXCondition(nullptr, Loc, CK);
+ return ParseCXXCondition(nullptr, Loc, CK, MissingOK);
}
- return Actions.ActOnCondition(getCurScope(), Loc, Expr.get(), CK);
+ return Actions.ActOnCondition(getCurScope(), Loc, Expr.get(), CK,
+ MissingOK);
}
case ConditionOrInitStatement::InitStmtDecl: {
WarnOnInit();
+ DeclGroupPtrTy DG;
SourceLocation DeclStart = Tok.getLocation(), DeclEnd;
- DeclGroupPtrTy DG = ParseSimpleDeclaration(
- DeclaratorContext::SelectionInit, DeclEnd, attrs, /*RequireSemi=*/true);
+ if (Tok.is(tok::kw_using))
+ DG = ParseAliasDeclarationInInitStatement(
+ DeclaratorContext::SelectionInit, attrs);
+ else {
+ ParsedAttributes DeclSpecAttrs(AttrFactory);
+ DG = ParseSimpleDeclaration(DeclaratorContext::SelectionInit, DeclEnd,
+ attrs, DeclSpecAttrs, /*RequireSemi=*/true);
+ }
*InitStmt = Actions.ActOnDeclStmt(DG, DeclStart, DeclEnd);
- return ParseCXXCondition(nullptr, Loc, CK);
+ return ParseCXXCondition(nullptr, Loc, CK, MissingOK);
}
case ConditionOrInitStatement::ForRangeDecl: {
@@ -2030,11 +2126,10 @@ Sema::ConditionResult Parser::ParseCXXCondition(StmtResult *InitStmt,
// permitted here.
assert(FRI && "should not parse a for range declaration here");
SourceLocation DeclStart = Tok.getLocation(), DeclEnd;
- DeclGroupPtrTy DG = ParseSimpleDeclaration(DeclaratorContext::ForInit,
- DeclEnd, attrs, false, FRI);
+ ParsedAttributes DeclSpecAttrs(AttrFactory);
+ DeclGroupPtrTy DG = ParseSimpleDeclaration(
+ DeclaratorContext::ForInit, DeclEnd, attrs, DeclSpecAttrs, false, FRI);
FRI->LoopVar = Actions.ActOnDeclStmt(DG, DeclStart, Tok.getLocation());
- assert((FRI->ColonLoc.isValid() || !DG) &&
- "cannot find for range declaration");
return Sema::ConditionResult();
}
@@ -2048,11 +2143,10 @@ Sema::ConditionResult Parser::ParseCXXCondition(StmtResult *InitStmt,
// type-specifier-seq
DeclSpec DS(AttrFactory);
- DS.takeAttributesFrom(attrs);
ParseSpecifierQualifierList(DS, AS_none, DeclSpecContext::DSC_condition);
// declarator
- Declarator DeclaratorInfo(DS, DeclaratorContext::Condition);
+ Declarator DeclaratorInfo(DS, attrs, DeclaratorContext::Condition);
ParseDeclarator(DeclaratorInfo);
// simple-asm-expr[opt]
@@ -2164,12 +2258,14 @@ void Parser::ParseCXXSimpleTypeSpecifier(DeclSpec &DS) {
return;
}
- case tok::kw__ExtInt: {
+ case tok::kw__ExtInt:
+ case tok::kw__BitInt: {
+ DiagnoseBitIntUse(Tok);
ExprResult ER = ParseExtIntegerArgument();
if (ER.isInvalid())
DS.SetTypeSpecError();
else
- DS.SetExtIntType(Loc, ER.get(), PrevSpec, DiagID, Policy);
+ DS.SetBitIntType(Loc, ER.get(), PrevSpec, DiagID, Policy);
// Do this here because we have already consumed the close paren.
DS.SetRangeEnd(PrevTokLocation);
@@ -2199,6 +2295,9 @@ void Parser::ParseCXXSimpleTypeSpecifier(DeclSpec &DS) {
case tok::kw_void:
DS.SetTypeSpecType(DeclSpec::TST_void, Loc, PrevSpec, DiagID, Policy);
break;
+ case tok::kw_auto:
+ DS.SetTypeSpecType(DeclSpec::TST_auto, Loc, PrevSpec, DiagID, Policy);
+ break;
case tok::kw_char:
DS.SetTypeSpecType(DeclSpec::TST_char, Loc, PrevSpec, DiagID, Policy);
break;
@@ -2226,6 +2325,9 @@ void Parser::ParseCXXSimpleTypeSpecifier(DeclSpec &DS) {
case tok::kw___float128:
DS.SetTypeSpecType(DeclSpec::TST_float128, Loc, PrevSpec, DiagID, Policy);
break;
+ case tok::kw___ibm128:
+ DS.SetTypeSpecType(DeclSpec::TST_ibm128, Loc, PrevSpec, DiagID, Policy);
+ break;
case tok::kw_wchar_t:
DS.SetTypeSpecType(DeclSpec::TST_wchar, Loc, PrevSpec, DiagID, Policy);
break;
@@ -2241,6 +2343,15 @@ void Parser::ParseCXXSimpleTypeSpecifier(DeclSpec &DS) {
case tok::kw_bool:
DS.SetTypeSpecType(DeclSpec::TST_bool, Loc, PrevSpec, DiagID, Policy);
break;
+ case tok::kw__Accum:
+ DS.SetTypeSpecType(DeclSpec::TST_accum, Loc, PrevSpec, DiagID, Policy);
+ break;
+ case tok::kw__Fract:
+ DS.SetTypeSpecType(DeclSpec::TST_fract, Loc, PrevSpec, DiagID, Policy);
+ break;
+ case tok::kw__Sat:
+ DS.SetTypeSpecSat(Loc, PrevSpec, DiagID);
+ break;
#define GENERIC_IMAGE_TYPE(ImgType, Id) \
case tok::kw_##ImgType##_t: \
DS.SetTypeSpecType(DeclSpec::TST_##ImgType##_t, Loc, PrevSpec, DiagID, \
@@ -2275,8 +2386,9 @@ void Parser::ParseCXXSimpleTypeSpecifier(DeclSpec &DS) {
/// type-specifier-seq: [C++ 8.1]
/// type-specifier type-specifier-seq[opt]
///
-bool Parser::ParseCXXTypeSpecifierSeq(DeclSpec &DS) {
- ParseSpecifierQualifierList(DS, AS_none, DeclSpecContext::DSC_type_specifier);
+bool Parser::ParseCXXTypeSpecifierSeq(DeclSpec &DS, DeclaratorContext Context) {
+ ParseSpecifierQualifierList(DS, AS_none,
+ getDeclSpecContextFromDeclaratorContext(Context));
DS.Finish(Actions, Actions.getASTContext().getPrintingPolicy());
return false;
}
@@ -2422,8 +2534,8 @@ bool Parser::ParseUnqualifiedIdTemplateId(
// Parse the enclosed template argument list.
SourceLocation LAngleLoc, RAngleLoc;
TemplateArgList TemplateArgs;
- if (ParseTemplateIdAfterTemplateName(true, LAngleLoc, TemplateArgs,
- RAngleLoc))
+ if (ParseTemplateIdAfterTemplateName(true, LAngleLoc, TemplateArgs, RAngleLoc,
+ Template))
return true;
// If this is a non-template, we already issued a diagnostic.
@@ -2692,16 +2804,18 @@ bool Parser::ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
// Parse the type-specifier-seq.
DeclSpec DS(AttrFactory);
- if (ParseCXXTypeSpecifierSeq(DS)) // FIXME: ObjectType?
+ if (ParseCXXTypeSpecifierSeq(
+ DS, DeclaratorContext::ConversionId)) // FIXME: ObjectType?
return true;
// Parse the conversion-declarator, which is merely a sequence of
// ptr-operators.
- Declarator D(DS, DeclaratorContext::ConversionId);
+ Declarator D(DS, ParsedAttributesView::none(),
+ DeclaratorContext::ConversionId);
ParseDeclaratorInternal(D, /*DirectDeclParser=*/nullptr);
// Finish up the type.
- TypeResult Ty = Actions.ActOnTypeName(getCurScope(), D);
+ TypeResult Ty = Actions.ActOnTypeName(D);
if (Ty.isInvalid())
return true;
@@ -2775,6 +2889,7 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, ParsedType ObjectType,
// identifier
// template-id (when it hasn't already been annotated)
if (Tok.is(tok::identifier)) {
+ ParseIdentifier:
// Consume the identifier.
IdentifierInfo *Id = Tok.getIdentifierInfo();
SourceLocation IdLoc = ConsumeToken();
@@ -2795,9 +2910,9 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, ParsedType ObjectType,
if (!Ty)
return true;
Result.setConstructorName(Ty, IdLoc, IdLoc);
- } else if (getLangOpts().CPlusPlus17 &&
- AllowDeductionGuide && SS.isEmpty() &&
- Actions.isDeductionGuideName(getCurScope(), *Id, IdLoc,
+ } else if (getLangOpts().CPlusPlus17 && AllowDeductionGuide &&
+ SS.isEmpty() &&
+ Actions.isDeductionGuideName(getCurScope(), *Id, IdLoc, SS,
&TemplateName)) {
// We have parsed a template-name naming a deduction guide.
Result.setDeductionGuideName(TemplateName, IdLoc);
@@ -2998,10 +3113,9 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, ParsedType ObjectType,
}
// Note that this is a destructor name.
- ParsedType Ty = Actions.getDestructorName(TildeLoc, *ClassName,
- ClassNameLoc, getCurScope(),
- SS, ObjectType,
- EnteringContext);
+ ParsedType Ty =
+ Actions.getDestructorName(*ClassName, ClassNameLoc, getCurScope(), SS,
+ ObjectType, EnteringContext);
if (!Ty)
return true;
@@ -3009,9 +3123,20 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, ParsedType ObjectType,
return false;
}
- Diag(Tok, diag::err_expected_unqualified_id)
- << getLangOpts().CPlusPlus;
- return true;
+ switch (Tok.getKind()) {
+#define TRANSFORM_TYPE_TRAIT_DEF(_, Trait) case tok::kw___##Trait:
+#include "clang/Basic/TransformTypeTraits.def"
+ if (!NextToken().is(tok::l_paren)) {
+ Tok.setKind(tok::identifier);
+ Diag(Tok, diag::ext_keyword_as_ident)
+ << Tok.getIdentifierInfo()->getName() << 0;
+ goto ParseIdentifier;
+ }
+ [[fallthrough]];
+ default:
+ Diag(Tok, diag::err_expected_unqualified_id) << getLangOpts().CPlusPlus;
+ return true;
+ }
}
/// ParseCXXNewExpression - Parse a C++ new-expression. New is used to allocate
@@ -3055,7 +3180,8 @@ Parser::ParseCXXNewExpression(bool UseGlobal, SourceLocation Start) {
SourceRange TypeIdParens;
DeclSpec DS(AttrFactory);
- Declarator DeclaratorInfo(DS, DeclaratorContext::CXXNew);
+ Declarator DeclaratorInfo(DS, ParsedAttributesView::none(),
+ DeclaratorContext::CXXNew);
if (Tok.is(tok::l_paren)) {
// If it turns out to be a placement, we change the type location.
BalancedDelimiterTracker T(*this, tok::l_paren);
@@ -3103,7 +3229,7 @@ Parser::ParseCXXNewExpression(bool UseGlobal, SourceLocation Start) {
// A new-type-id is a simplified type-id, where essentially the
// direct-declarator is replaced by a direct-new-declarator.
MaybeParseGNUAttributes(DeclaratorInfo);
- if (ParseCXXTypeSpecifierSeq(DS))
+ if (ParseCXXTypeSpecifierSeq(DS, DeclaratorContext::CXXNew))
DeclaratorInfo.setInvalidType(true);
else {
DeclaratorInfo.SetSourceRange(DS.getSourceRange());
@@ -3125,22 +3251,21 @@ Parser::ParseCXXNewExpression(bool UseGlobal, SourceLocation Start) {
T.consumeOpen();
ConstructorLParen = T.getOpenLocation();
if (Tok.isNot(tok::r_paren)) {
- CommaLocsTy CommaLocs;
auto RunSignatureHelp = [&]() {
- ParsedType TypeRep =
- Actions.ActOnTypeName(getCurScope(), DeclaratorInfo).get();
+ ParsedType TypeRep = Actions.ActOnTypeName(DeclaratorInfo).get();
QualType PreferredType;
// ActOnTypeName might adjust DeclaratorInfo and return a null type even
// the passing DeclaratorInfo is valid, e.g. running SignatureHelp on
// `new decltype(invalid) (^)`.
if (TypeRep)
PreferredType = Actions.ProduceConstructorSignatureHelp(
- getCurScope(), TypeRep.get()->getCanonicalTypeInternal(),
- DeclaratorInfo.getEndLoc(), ConstructorArgs, ConstructorLParen);
+ TypeRep.get()->getCanonicalTypeInternal(),
+ DeclaratorInfo.getEndLoc(), ConstructorArgs, ConstructorLParen,
+ /*Braced=*/false);
CalledSignatureHelp = true;
return PreferredType;
};
- if (ParseExpressionList(ConstructorArgs, CommaLocs, [&] {
+ if (ParseExpressionList(ConstructorArgs, [&] {
PreferredType.enterFunctionArgument(Tok.getLocation(),
RunSignatureHelp);
})) {
@@ -3239,9 +3364,7 @@ bool Parser::ParseExpressionListOrTypeId(
}
// It's not a type, it has to be an expression list.
- // Discard the comma locations - ActOnCXXNew has enough parameters.
- CommaLocsTy CommaLocs;
- return ParseExpressionList(PlacementArgs, CommaLocs);
+ return ParseExpressionList(PlacementArgs);
}
/// ParseCXXDeleteExpression - Parse a C++ delete-expression. Delete is used
@@ -3367,11 +3490,11 @@ ExprResult Parser::ParseRequiresExpression() {
SourceLocation RequiresKWLoc = ConsumeToken(); // Consume 'requires'
llvm::SmallVector<ParmVarDecl *, 2> LocalParameterDecls;
+ BalancedDelimiterTracker Parens(*this, tok::l_paren);
if (Tok.is(tok::l_paren)) {
// requirement parameter list is present.
ParseScope LocalParametersScope(this, Scope::FunctionPrototypeScope |
Scope::DeclScope);
- BalancedDelimiterTracker Parens(*this, tok::l_paren);
Parens.consumeOpen();
if (!Tok.is(tok::r_paren)) {
ParsedAttributes FirstArgAttrs(getAttrFactory());
@@ -3401,6 +3524,10 @@ ExprResult Parser::ParseRequiresExpression() {
Actions, Sema::ExpressionEvaluationContext::Unevaluated);
ParseScope BodyScope(this, Scope::DeclScope);
+ // Create a separate diagnostic pool for RequiresExprBodyDecl.
+ // Dependent diagnostics are attached to this Decl and non-depenedent
+ // diagnostics are surfaced after this parse.
+ ParsingDeclRAIIObject ParsingBodyDecl(*this, ParsingDeclRAIIObject::NoParent);
RequiresExprBodyDecl *Body = Actions.ActOnStartRequiresExpr(
RequiresKWLoc, LocalParameterDecls, getCurScope());
@@ -3496,10 +3623,12 @@ ExprResult Parser::ParseRequiresExpression() {
auto Res = TryParseParameterDeclarationClause();
if (Res != TPResult::False) {
// Skip to the closing parenthesis
- // FIXME: Don't traverse these tokens twice (here and in
- // TryParseParameterDeclarationClause).
unsigned Depth = 1;
while (Depth != 0) {
+ bool FoundParen = SkipUntil(tok::l_paren, tok::r_paren,
+ SkipUntilFlags::StopBeforeMatch);
+ if (!FoundParen)
+ break;
if (Tok.is(tok::l_paren))
Depth++;
else if (Tok.is(tok::r_paren))
@@ -3553,7 +3682,7 @@ ExprResult Parser::ParseRequiresExpression() {
// We need to consume the typename to allow 'requires { typename a; }'
SourceLocation TypenameKWLoc = ConsumeToken();
- if (TryAnnotateCXXScopeToken()) {
+ if (TryAnnotateOptionalCXXScopeToken()) {
TPA.Commit();
SkipUntil(tok::semi, tok::r_brace, SkipUntilFlags::StopBeforeMatch);
break;
@@ -3602,7 +3731,7 @@ ExprResult Parser::ParseRequiresExpression() {
break;
}
if (!Expression.isInvalid() && PossibleRequiresExprInSimpleRequirement)
- Diag(StartLoc, diag::warn_requires_expr_in_simple_requirement)
+ Diag(StartLoc, diag::err_requires_expr_in_simple_requirement)
<< FixItHint::CreateInsertion(StartLoc, "requires");
if (auto *Req = Actions.ActOnSimpleRequirement(Expression.get()))
Requirements.push_back(Req);
@@ -3638,8 +3767,10 @@ ExprResult Parser::ParseRequiresExpression() {
}
Braces.consumeClose();
Actions.ActOnFinishRequiresExpr();
- return Actions.ActOnRequiresExpr(RequiresKWLoc, Body, LocalParameterDecls,
- Requirements, Braces.getCloseLocation());
+ ParsingBodyDecl.complete(Body);
+ return Actions.ActOnRequiresExpr(
+ RequiresKWLoc, Body, Parens.getOpenLocation(), LocalParameterDecls,
+ Parens.getCloseLocation(), Requirements, Braces.getCloseLocation());
}
static TypeTrait TypeTraitFromTokKind(tok::TokenKind kind) {
@@ -3678,14 +3809,6 @@ static ExpressionTrait ExpressionTraitFromTokKind(tok::TokenKind kind) {
}
}
-static unsigned TypeTraitArity(tok::TokenKind kind) {
- switch (kind) {
- default: llvm_unreachable("Not a known type trait");
-#define TYPE_TRAIT(N,Spelling,K) case tok::kw_##Spelling: return N;
-#include "clang/Basic/TokenKinds.def"
- }
-}
-
/// Parse the built-in type-trait pseudo-functions that allow
/// implementation of the TR1/C++11 type traits templates.
///
@@ -3699,7 +3822,6 @@ static unsigned TypeTraitArity(tok::TokenKind kind) {
///
ExprResult Parser::ParseTypeTrait() {
tok::TokenKind Kind = Tok.getKind();
- unsigned Arity = TypeTraitArity(Kind);
SourceLocation Loc = ConsumeToken();
@@ -3734,18 +3856,6 @@ ExprResult Parser::ParseTypeTrait() {
SourceLocation EndLoc = Parens.getCloseLocation();
- if (Arity && Args.size() != Arity) {
- Diag(EndLoc, diag::err_type_trait_arity)
- << Arity << 0 << (Arity > 1) << (int)Args.size() << SourceRange(Loc);
- return ExprError();
- }
-
- if (!Arity && Args.empty()) {
- Diag(EndLoc, diag::err_type_trait_arity)
- << 1 << 1 << 1 << (int)Args.size() << SourceRange(Loc);
- return ExprError();
- }
-
return Actions.ActOnTypeTrait(TypeTraitFromTokKind(Kind), Loc, Args, EndLoc);
}
@@ -3905,7 +4015,8 @@ Parser::ParseCXXAmbiguousParenExpression(ParenParseOption &ExprType,
if (ParseAs >= CompoundLiteral) {
// Parse the type declarator.
DeclSpec DS(AttrFactory);
- Declarator DeclaratorInfo(DS, DeclaratorContext::TypeName);
+ Declarator DeclaratorInfo(DS, ParsedAttributesView::none(),
+ DeclaratorContext::TypeName);
{
ColonProtectionRAIIObject InnerColonProtection(*this);
ParseSpecifierQualifierList(DS);
@@ -3925,7 +4036,7 @@ Parser::ParseCXXAmbiguousParenExpression(ParenParseOption &ExprType,
if (DeclaratorInfo.isInvalidType())
return ExprError();
- TypeResult Ty = Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
+ TypeResult Ty = Actions.ActOnTypeName(DeclaratorInfo);
return ParseCompoundLiteralExpression(Ty.get(),
Tracker.getOpenLocation(),
Tracker.getCloseLocation());
@@ -3983,7 +4094,8 @@ ExprResult Parser::ParseBuiltinBitCast() {
ParseSpecifierQualifierList(DS);
// Parse the abstract-declarator, if present.
- Declarator DeclaratorInfo(DS, DeclaratorContext::TypeName);
+ Declarator DeclaratorInfo(DS, ParsedAttributesView::none(),
+ DeclaratorContext::TypeName);
ParseDeclarator(DeclaratorInfo);
if (ExpectAndConsume(tok::comma)) {
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseHLSL.cpp b/contrib/llvm-project/clang/lib/Parse/ParseHLSL.cpp
new file mode 100644
index 000000000000..4fc6a2203cec
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Parse/ParseHLSL.cpp
@@ -0,0 +1,200 @@
+//===--- ParseHLSL.cpp - HLSL-specific parsing support --------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the parsing logic for HLSL language features.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/Attr.h"
+#include "clang/Basic/AttributeCommonInfo.h"
+#include "clang/Parse/ParseDiagnostic.h"
+#include "clang/Parse/Parser.h"
+#include "clang/Parse/RAIIObjectsForParser.h"
+
+using namespace clang;
+
+static bool validateDeclsInsideHLSLBuffer(Parser::DeclGroupPtrTy DG,
+ SourceLocation BufferLoc,
+ bool IsCBuffer, Parser &P) {
+ // The parse is failed, just return false.
+ if (!DG)
+ return false;
+ DeclGroupRef Decls = DG.get();
+ bool IsValid = true;
+ // Only allow function, variable, record decls inside HLSLBuffer.
+ for (DeclGroupRef::iterator I = Decls.begin(), E = Decls.end(); I != E; ++I) {
+ Decl *D = *I;
+ if (isa<CXXRecordDecl, RecordDecl, FunctionDecl, VarDecl>(D))
+ continue;
+
+ // FIXME: support nested HLSLBuffer and namespace inside HLSLBuffer.
+ if (isa<HLSLBufferDecl, NamespaceDecl>(D)) {
+ P.Diag(D->getLocation(), diag::err_invalid_declaration_in_hlsl_buffer)
+ << IsCBuffer;
+ IsValid = false;
+ continue;
+ }
+
+ IsValid = false;
+ P.Diag(D->getLocation(), diag::err_invalid_declaration_in_hlsl_buffer)
+ << IsCBuffer;
+ }
+ return IsValid;
+}
+
+Decl *Parser::ParseHLSLBuffer(SourceLocation &DeclEnd) {
+ assert((Tok.is(tok::kw_cbuffer) || Tok.is(tok::kw_tbuffer)) &&
+ "Not a cbuffer or tbuffer!");
+ bool IsCBuffer = Tok.is(tok::kw_cbuffer);
+ SourceLocation BufferLoc = ConsumeToken(); // Eat the 'cbuffer' or 'tbuffer'.
+
+ if (!Tok.is(tok::identifier)) {
+ Diag(Tok, diag::err_expected) << tok::identifier;
+ return nullptr;
+ }
+
+ IdentifierInfo *Identifier = Tok.getIdentifierInfo();
+ SourceLocation IdentifierLoc = ConsumeToken();
+
+ ParsedAttributes Attrs(AttrFactory);
+ MaybeParseHLSLSemantics(Attrs, nullptr);
+
+ ParseScope BufferScope(this, Scope::DeclScope);
+ BalancedDelimiterTracker T(*this, tok::l_brace);
+ if (T.consumeOpen()) {
+ Diag(Tok, diag::err_expected) << tok::l_brace;
+ return nullptr;
+ }
+
+ Decl *D = Actions.ActOnStartHLSLBuffer(getCurScope(), IsCBuffer, BufferLoc,
+ Identifier, IdentifierLoc,
+ T.getOpenLocation());
+
+ while (Tok.isNot(tok::r_brace) && Tok.isNot(tok::eof)) {
+ // FIXME: support attribute on constants inside cbuffer/tbuffer.
+ ParsedAttributes DeclAttrs(AttrFactory);
+ ParsedAttributes EmptyDeclSpecAttrs(AttrFactory);
+
+ DeclGroupPtrTy Result =
+ ParseExternalDeclaration(DeclAttrs, EmptyDeclSpecAttrs);
+ if (!validateDeclsInsideHLSLBuffer(Result, IdentifierLoc, IsCBuffer,
+ *this)) {
+ T.skipToEnd();
+ DeclEnd = T.getCloseLocation();
+ BufferScope.Exit();
+ Actions.ActOnFinishHLSLBuffer(D, DeclEnd);
+ return nullptr;
+ }
+ }
+
+ T.consumeClose();
+ DeclEnd = T.getCloseLocation();
+ BufferScope.Exit();
+ Actions.ActOnFinishHLSLBuffer(D, DeclEnd);
+
+ Actions.ProcessDeclAttributeList(Actions.CurScope, D, Attrs);
+ return D;
+}
+
+static void fixSeparateAttrArgAndNumber(StringRef ArgStr, SourceLocation ArgLoc,
+ Token Tok, ArgsVector &ArgExprs,
+ Parser &P, ASTContext &Ctx,
+ Preprocessor &PP) {
+ StringRef Num = StringRef(Tok.getLiteralData(), Tok.getLength());
+ SourceLocation EndNumLoc = Tok.getEndLoc();
+
+ P.ConsumeToken(); // consume constant.
+ std::string FixedArg = ArgStr.str() + Num.str();
+ P.Diag(ArgLoc, diag::err_hlsl_separate_attr_arg_and_number)
+ << FixedArg
+ << FixItHint::CreateReplacement(SourceRange(ArgLoc, EndNumLoc), FixedArg);
+ ArgsUnion &Slot = ArgExprs.back();
+ Slot = IdentifierLoc::create(Ctx, ArgLoc, PP.getIdentifierInfo(FixedArg));
+}
+
+void Parser::ParseHLSLSemantics(ParsedAttributes &Attrs,
+ SourceLocation *EndLoc) {
+ // FIXME: HLSLSemantic is shared for Semantic and resource binding which is
+ // confusing. Need a better name to avoid misunderstanding. Issue
+ // https://github.com/llvm/llvm-project/issues/57882
+ assert(Tok.is(tok::colon) && "Not a HLSL Semantic");
+ ConsumeToken();
+
+ IdentifierInfo *II = nullptr;
+ if (Tok.is(tok::kw_register))
+ II = PP.getIdentifierInfo("register");
+ else if (Tok.is(tok::identifier))
+ II = Tok.getIdentifierInfo();
+
+ if (!II) {
+ Diag(Tok.getLocation(), diag::err_expected_semantic_identifier);
+ return;
+ }
+
+ SourceLocation Loc = ConsumeToken();
+ if (EndLoc)
+ *EndLoc = Tok.getLocation();
+ ParsedAttr::Kind AttrKind =
+ ParsedAttr::getParsedKind(II, nullptr, ParsedAttr::AS_HLSLSemantic);
+
+ ArgsVector ArgExprs;
+ switch (AttrKind) {
+ case ParsedAttr::AT_HLSLResourceBinding: {
+ if (ExpectAndConsume(tok::l_paren, diag::err_expected_lparen_after)) {
+ SkipUntil(tok::r_paren, StopAtSemi); // skip through )
+ return;
+ }
+ if (!Tok.is(tok::identifier)) {
+ Diag(Tok.getLocation(), diag::err_expected) << tok::identifier;
+ SkipUntil(tok::r_paren, StopAtSemi); // skip through )
+ return;
+ }
+ StringRef SlotStr = Tok.getIdentifierInfo()->getName();
+ SourceLocation SlotLoc = Tok.getLocation();
+ ArgExprs.push_back(ParseIdentifierLoc());
+
+ // Add numeric_constant for fix-it.
+ if (SlotStr.size() == 1 && Tok.is(tok::numeric_constant))
+ fixSeparateAttrArgAndNumber(SlotStr, SlotLoc, Tok, ArgExprs, *this,
+ Actions.Context, PP);
+
+ if (Tok.is(tok::comma)) {
+ ConsumeToken(); // consume comma
+ if (!Tok.is(tok::identifier)) {
+ Diag(Tok.getLocation(), diag::err_expected) << tok::identifier;
+ SkipUntil(tok::r_paren, StopAtSemi); // skip through )
+ return;
+ }
+ StringRef SpaceStr = Tok.getIdentifierInfo()->getName();
+ SourceLocation SpaceLoc = Tok.getLocation();
+ ArgExprs.push_back(ParseIdentifierLoc());
+
+ // Add numeric_constant for fix-it.
+ if (SpaceStr.equals("space") && Tok.is(tok::numeric_constant))
+ fixSeparateAttrArgAndNumber(SpaceStr, SpaceLoc, Tok, ArgExprs, *this,
+ Actions.Context, PP);
+ }
+ if (ExpectAndConsume(tok::r_paren, diag::err_expected)) {
+ SkipUntil(tok::r_paren, StopAtSemi); // skip through )
+ return;
+ }
+ } break;
+ case ParsedAttr::UnknownAttribute:
+ Diag(Loc, diag::err_unknown_hlsl_semantic) << II;
+ return;
+ case ParsedAttr::AT_HLSLSV_GroupIndex:
+ case ParsedAttr::AT_HLSLSV_DispatchThreadID:
+ break;
+ default:
+ llvm_unreachable("invalid HLSL Semantic");
+ break;
+ }
+
+ Attrs.addNew(II, Loc, nullptr, SourceLocation(), ArgExprs.data(),
+ ArgExprs.size(), ParsedAttr::Form::HLSLSemantic());
+}
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseInit.cpp b/contrib/llvm-project/clang/lib/Parse/ParseInit.cpp
index 9d9c03d28a97..637f21176792 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseInit.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseInit.cpp
@@ -15,6 +15,7 @@
#include "clang/Parse/Parser.h"
#include "clang/Parse/RAIIObjectsForParser.h"
#include "clang/Sema/Designator.h"
+#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "llvm/ADT/STLExtras.h"
@@ -181,7 +182,8 @@ ExprResult Parser::ParseInitializerWithPotentialDesignator(
NewSyntax);
Designation D;
- D.AddDesignator(Designator::getField(FieldName, SourceLocation(), NameLoc));
+ D.AddDesignator(Designator::CreateFieldDesignator(
+ FieldName, SourceLocation(), NameLoc));
PreferredType.enterDesignatedInitializer(
Tok.getLocation(), DesignatorCompletion.PreferredBaseType, D);
return Actions.ActOnDesignatedInitializer(D, ColonLoc, true,
@@ -210,8 +212,8 @@ ExprResult Parser::ParseInitializerWithPotentialDesignator(
return ExprError();
}
- Desig.AddDesignator(Designator::getField(Tok.getIdentifierInfo(), DotLoc,
- Tok.getLocation()));
+ Desig.AddDesignator(Designator::CreateFieldDesignator(
+ Tok.getIdentifierInfo(), DotLoc, Tok.getLocation()));
ConsumeToken(); // Eat the identifier.
continue;
}
@@ -360,7 +362,8 @@ ExprResult Parser::ParseInitializerWithPotentialDesignator(
// If this is a normal array designator, remember it.
if (Tok.isNot(tok::ellipsis)) {
- Desig.AddDesignator(Designator::getArray(Idx.get(), StartLoc));
+ Desig.AddDesignator(Designator::CreateArrayDesignator(Idx.get(),
+ StartLoc));
} else {
// Handle the gnu array range extension.
Diag(Tok, diag::ext_gnu_array_range);
@@ -371,9 +374,8 @@ ExprResult Parser::ParseInitializerWithPotentialDesignator(
SkipUntil(tok::r_square, StopAtSemi);
return RHS;
}
- Desig.AddDesignator(Designator::getArrayRange(Idx.get(),
- RHS.get(),
- StartLoc, EllipsisLoc));
+ Desig.AddDesignator(Designator::CreateArrayRangeDesignator(
+ Idx.get(), RHS.get(), StartLoc, EllipsisLoc));
}
T.consumeClose();
@@ -429,7 +431,7 @@ ExprResult Parser::ParseInitializerWithPotentialDesignator(
/// initializer: [C99 6.7.8]
/// '{' initializer-list '}'
/// '{' initializer-list ',' '}'
-/// [GNU] '{' '}'
+/// [C23] '{' '}'
///
/// initializer-list:
/// designation[opt] initializer ...[opt]
@@ -447,11 +449,14 @@ ExprResult Parser::ParseBraceInitializer() {
ExprVector InitExprs;
if (Tok.is(tok::r_brace)) {
- // Empty initializers are a C++ feature and a GNU extension to C.
- if (!getLangOpts().CPlusPlus)
- Diag(LBraceLoc, diag::ext_gnu_empty_initializer);
+ // Empty initializers are a C++ feature and a GNU extension to C before C23.
+ if (!getLangOpts().CPlusPlus) {
+ Diag(LBraceLoc, getLangOpts().C23
+ ? diag::warn_c23_compat_empty_initializer
+ : diag::ext_c_empty_initializer);
+ }
// Match the '}'.
- return Actions.ActOnInitList(LBraceLoc, None, ConsumeBrace());
+ return Actions.ActOnInitList(LBraceLoc, std::nullopt, ConsumeBrace());
}
// Enter an appropriate expression evaluation context for an initializer list.
@@ -459,12 +464,22 @@ ExprResult Parser::ParseBraceInitializer() {
Actions, EnterExpressionEvaluationContext::InitList);
bool InitExprsOk = true;
- DesignatorCompletionInfo DesignatorCompletion{
- InitExprs,
- PreferredType.get(T.getOpenLocation()),
+ QualType LikelyType = PreferredType.get(T.getOpenLocation());
+ DesignatorCompletionInfo DesignatorCompletion{InitExprs, LikelyType};
+ bool CalledSignatureHelp = false;
+ auto RunSignatureHelp = [&] {
+ QualType PreferredType;
+ if (!LikelyType.isNull())
+ PreferredType = Actions.ProduceConstructorSignatureHelp(
+ LikelyType->getCanonicalTypeInternal(), T.getOpenLocation(),
+ InitExprs, T.getOpenLocation(), /*Braced=*/true);
+ CalledSignatureHelp = true;
+ return PreferredType;
};
- while (1) {
+ while (true) {
+ PreferredType.enterFunctionArgument(Tok.getLocation(), RunSignatureHelp);
+
// Handle Microsoft __if_exists/if_not_exists if necessary.
if (getLangOpts().MicrosoftExt && (Tok.is(tok::kw___if_exists) ||
Tok.is(tok::kw___if_not_exists))) {
@@ -555,7 +570,7 @@ bool Parser::ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs,
Diag(Result.KeywordLoc, diag::warn_microsoft_dependent_exists)
<< Result.IsIfExists;
// Fall through to skip.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case IEB_Skip:
Braces.skipToEnd();
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseObjc.cpp b/contrib/llvm-project/clang/lib/Parse/ParseObjc.cpp
index 9e145f57d61f..849fd1ac95a4 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseObjc.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseObjc.cpp
@@ -11,6 +11,7 @@
//===----------------------------------------------------------------------===//
#include "clang/AST/ASTContext.h"
+#include "clang/AST/ODRDiagsEmitter.h"
#include "clang/AST/PrettyDeclStackTrace.h"
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/TargetInfo.h"
@@ -46,7 +47,10 @@ void Parser::MaybeSkipAttributes(tok::ObjCKeywordKind Kind) {
/// [OBJC] objc-method-definition
/// [OBJC] '@' 'end'
Parser::DeclGroupPtrTy
-Parser::ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs) {
+Parser::ParseObjCAtDirectives(ParsedAttributes &DeclAttrs,
+ ParsedAttributes &DeclSpecAttrs) {
+ DeclAttrs.takeAllFrom(DeclSpecAttrs);
+
SourceLocation AtLoc = ConsumeToken(); // the "@"
if (Tok.is(tok::code_completion)) {
@@ -55,17 +59,29 @@ Parser::ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs) {
return nullptr;
}
+ switch (Tok.getObjCKeywordID()) {
+ case tok::objc_interface:
+ case tok::objc_protocol:
+ case tok::objc_implementation:
+ break;
+ default:
+ for (const auto &Attr : DeclAttrs) {
+ if (Attr.isGNUAttribute())
+ Diag(Tok.getLocation(), diag::err_objc_unexpected_attr);
+ }
+ }
+
Decl *SingleDecl = nullptr;
switch (Tok.getObjCKeywordID()) {
case tok::objc_class:
return ParseObjCAtClassDeclaration(AtLoc);
case tok::objc_interface:
- SingleDecl = ParseObjCAtInterfaceDeclaration(AtLoc, Attrs);
+ SingleDecl = ParseObjCAtInterfaceDeclaration(AtLoc, DeclAttrs);
break;
case tok::objc_protocol:
- return ParseObjCAtProtocolDeclaration(AtLoc, Attrs);
+ return ParseObjCAtProtocolDeclaration(AtLoc, DeclAttrs);
case tok::objc_implementation:
- return ParseObjCAtImplementationDeclaration(AtLoc, Attrs);
+ return ParseObjCAtImplementationDeclaration(AtLoc, DeclAttrs);
case tok::objc_end:
return ParseObjCAtEndDeclaration(AtLoc);
case tok::objc_compatibility_alias:
@@ -79,7 +95,8 @@ Parser::ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs) {
break;
case tok::objc_import:
if (getLangOpts().Modules || getLangOpts().DebuggerSupport) {
- SingleDecl = ParseModuleImport(AtLoc);
+ Sema::ModuleImportState IS = Sema::ModuleImportState::NotACXX20Module;
+ SingleDecl = ParseModuleImport(AtLoc, IS);
break;
}
Diag(AtLoc, diag::err_atimport);
@@ -134,8 +151,13 @@ Parser::ParseObjCAtClassDeclaration(SourceLocation atLoc) {
SmallVector<SourceLocation, 8> ClassLocs;
SmallVector<ObjCTypeParamList *, 8> ClassTypeParams;
- while (1) {
+ while (true) {
MaybeSkipAttributes(tok::objc_class);
+ if (Tok.is(tok::code_completion)) {
+ cutOffParsing();
+ Actions.CodeCompleteObjCClassForwardDecl(getCurScope());
+ return Actions.ConvertDeclToDeclGroup(nullptr);
+ }
if (expectIdentifier()) {
SkipUntil(tok::semi);
return Actions.ConvertDeclToDeclGroup(nullptr);
@@ -283,7 +305,7 @@ Decl *Parser::ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
/*consumeLastToken=*/true))
return nullptr;
- Decl *CategoryType = Actions.ActOnStartCategoryInterface(
+ ObjCCategoryDecl *CategoryType = Actions.ActOnStartCategoryInterface(
AtLoc, nameId, nameLoc, typeParameterList, categoryId, categoryLoc,
ProtocolRefs.data(), ProtocolRefs.size(), ProtocolLocs.data(),
EndProtoLoc, attrs);
@@ -353,17 +375,30 @@ Decl *Parser::ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
Actions.ActOnTypedefedProtocols(protocols, protocolLocs,
superClassId, superClassLoc);
- Decl *ClsType = Actions.ActOnStartClassInterface(
+ Sema::SkipBodyInfo SkipBody;
+ ObjCInterfaceDecl *ClsType = Actions.ActOnStartClassInterface(
getCurScope(), AtLoc, nameId, nameLoc, typeParameterList, superClassId,
superClassLoc, typeArgs,
SourceRange(typeArgsLAngleLoc, typeArgsRAngleLoc), protocols.data(),
- protocols.size(), protocolLocs.data(), EndProtoLoc, attrs);
+ protocols.size(), protocolLocs.data(), EndProtoLoc, attrs, &SkipBody);
if (Tok.is(tok::l_brace))
ParseObjCClassInstanceVariables(ClsType, tok::objc_protected, AtLoc);
ParseObjCInterfaceDeclList(tok::objc_interface, ClsType);
+ if (SkipBody.CheckSameAsPrevious) {
+ auto *PreviousDef = cast<ObjCInterfaceDecl>(SkipBody.Previous);
+ if (Actions.ActOnDuplicateODRHashDefinition(ClsType, PreviousDef)) {
+ ClsType->mergeDuplicateDefinitionWithCommon(PreviousDef->getDefinition());
+ } else {
+ ODRDiagsEmitter DiagsEmitter(Diags, Actions.getASTContext(),
+ getPreprocessor().getLangOpts());
+ DiagsEmitter.diagnoseMismatch(PreviousDef, ClsType);
+ ClsType->setInvalidDecl();
+ }
+ }
+
return ClsType;
}
@@ -378,7 +413,7 @@ static void addContextSensitiveTypeNullability(Parser &P,
auto getNullabilityAttr = [&](AttributePool &Pool) -> ParsedAttr * {
return Pool.create(P.getNullabilityKeyword(nullability),
SourceRange(nullabilityLoc), nullptr, SourceLocation(),
- nullptr, 0, ParsedAttr::AS_ContextSensitiveKeyword);
+ nullptr, 0, ParsedAttr::Form::ContextSensitiveKeyword());
};
if (D.getNumTypeObjects() > 0) {
@@ -578,6 +613,19 @@ ObjCTypeParamList *Parser::parseObjCTypeParamList() {
/*mayBeProtocolList=*/false);
}
+static bool isTopLevelObjCKeyword(tok::ObjCKeywordKind DirectiveKind) {
+ switch (DirectiveKind) {
+ case tok::objc_class:
+ case tok::objc_compatibility_alias:
+ case tok::objc_interface:
+ case tok::objc_implementation:
+ case tok::objc_protocol:
+ return true;
+ default:
+ return false;
+ }
+}
+
/// objc-interface-decl-list:
/// empty
/// objc-interface-decl-list objc-property-decl [OBJC2]
@@ -598,7 +646,7 @@ void Parser::ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
SourceRange AtEnd;
- while (1) {
+ while (true) {
// If this is a method prototype, parse it.
if (Tok.isOneOf(tok::minus, tok::plus)) {
if (Decl *methodPrototype =
@@ -650,43 +698,54 @@ void Parser::ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
if (Tok.is(tok::r_brace))
break;
- ParsedAttributesWithRange attrs(AttrFactory);
+ ParsedAttributes EmptyDeclAttrs(AttrFactory);
+ ParsedAttributes EmptyDeclSpecAttrs(AttrFactory);
// Since we call ParseDeclarationOrFunctionDefinition() instead of
// ParseExternalDeclaration() below (so that this doesn't parse nested
// @interfaces), this needs to duplicate some code from the latter.
if (Tok.isOneOf(tok::kw_static_assert, tok::kw__Static_assert)) {
SourceLocation DeclEnd;
- allTUVariables.push_back(
- ParseDeclaration(DeclaratorContext::File, DeclEnd, attrs));
+ ParsedAttributes EmptyDeclSpecAttrs(AttrFactory);
+ allTUVariables.push_back(ParseDeclaration(DeclaratorContext::File,
+ DeclEnd, EmptyDeclAttrs,
+ EmptyDeclSpecAttrs));
continue;
}
- allTUVariables.push_back(ParseDeclarationOrFunctionDefinition(attrs));
+ allTUVariables.push_back(ParseDeclarationOrFunctionDefinition(
+ EmptyDeclAttrs, EmptyDeclSpecAttrs));
continue;
}
- // Otherwise, we have an @ directive, eat the @.
- SourceLocation AtLoc = ConsumeToken(); // the "@"
- if (Tok.is(tok::code_completion)) {
+ // Otherwise, we have an @ directive, peak at the next token
+ SourceLocation AtLoc = Tok.getLocation();
+ const auto &NextTok = NextToken();
+ if (NextTok.is(tok::code_completion)) {
cutOffParsing();
Actions.CodeCompleteObjCAtDirective(getCurScope());
return;
}
- tok::ObjCKeywordKind DirectiveKind = Tok.getObjCKeywordID();
-
+ tok::ObjCKeywordKind DirectiveKind = NextTok.getObjCKeywordID();
if (DirectiveKind == tok::objc_end) { // @end -> terminate list
+ ConsumeToken(); // the "@"
AtEnd.setBegin(AtLoc);
AtEnd.setEnd(Tok.getLocation());
break;
} else if (DirectiveKind == tok::objc_not_keyword) {
- Diag(Tok, diag::err_objc_unknown_at);
+ Diag(NextTok, diag::err_objc_unknown_at);
SkipUntil(tok::semi);
continue;
}
- // Eat the identifier.
+ // If we see something like '@interface' that's only allowed at the top
+ // level, bail out as if we saw an '@end'. We'll diagnose this below.
+ if (isTopLevelObjCKeyword(DirectiveKind))
+ break;
+
+ // Otherwise parse it as part of the current declaration. Eat "@identifier".
+ ConsumeToken();
ConsumeToken();
switch (DirectiveKind) {
@@ -700,15 +759,6 @@ void Parser::ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
SkipUntil(tok::r_brace, tok::at, StopAtSemi);
break;
- case tok::objc_implementation:
- case tok::objc_interface:
- Diag(AtLoc, diag::err_objc_missing_end)
- << FixItHint::CreateInsertion(AtLoc, "@end\n");
- Diag(CDecl->getBeginLoc(), diag::note_objc_container_start)
- << (int)Actions.getObjCContainerKind();
- ConsumeToken();
- break;
-
case tok::objc_required:
case tok::objc_optional:
// This is only valid on protocols.
@@ -777,13 +827,10 @@ void Parser::ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
}
}
- // We break out of the big loop in two cases: when we see @end or when we see
- // EOF. In the former case, eat the @end. In the later case, emit an error.
- if (Tok.is(tok::code_completion)) {
- cutOffParsing();
- Actions.CodeCompleteObjCAtDirective(getCurScope());
- return;
- } else if (Tok.isObjCAtKeyword(tok::objc_end)) {
+ // We break out of the big loop in 3 cases: when we see @end or when we see
+ // top-level ObjC keyword or EOF. In the former case, eat the @end. In the
+ // later cases, emit an error.
+ if (Tok.isObjCAtKeyword(tok::objc_end)) {
ConsumeToken(); // the "end" identifier
} else {
Diag(Tok, diag::err_objc_missing_end)
@@ -848,7 +895,7 @@ void Parser::ParseObjCPropertyAttribute(ObjCDeclSpec &DS) {
BalancedDelimiterTracker T(*this, tok::l_paren);
T.consumeOpen();
- while (1) {
+ while (true) {
if (Tok.is(tok::code_completion)) {
cutOffParsing();
Actions.CodeCompleteObjCPropertyFlags(getCurScope(), DS);
@@ -1149,7 +1196,7 @@ void Parser::ParseObjCTypeQualifierList(ObjCDeclSpec &DS,
assert(Context == DeclaratorContext::ObjCParameter ||
Context == DeclaratorContext::ObjCResult);
- while (1) {
+ while (true) {
if (Tok.is(tok::code_completion)) {
cutOffParsing();
Actions.CodeCompleteObjCPassingType(
@@ -1225,6 +1272,10 @@ static void takeDeclAttributes(ParsedAttributesView &attrs,
/// declarator and add them to the given list.
static void takeDeclAttributes(ParsedAttributes &attrs,
Declarator &D) {
+ // This gets called only from Parser::ParseObjCTypeName(), and that should
+ // never add declaration attributes to the Declarator.
+ assert(D.getDeclarationAttributes().empty());
+
// First, take ownership of all attributes.
attrs.getPool().takeAllFrom(D.getAttributePool());
attrs.getPool().takeAllFrom(D.getDeclSpec().getAttributePool());
@@ -1268,7 +1319,7 @@ ParsedType Parser::ParseObjCTypeName(ObjCDeclSpec &DS,
if (context == DeclaratorContext::ObjCResult)
dsContext = DeclSpecContext::DSC_objc_method_result;
ParseSpecifierQualifierList(declSpec, AS_none, dsContext);
- Declarator declarator(declSpec, context);
+ Declarator declarator(declSpec, ParsedAttributesView::none(), context);
ParseDeclarator(declarator);
// If that's not invalid, extract a type.
@@ -1281,7 +1332,7 @@ ParsedType Parser::ParseObjCTypeName(ObjCDeclSpec &DS,
DS.getNullabilityLoc(),
addedToDeclSpec);
- TypeResult type = Actions.ActOnTypeName(getCurScope(), declarator);
+ TypeResult type = Actions.ActOnTypeName(declarator);
if (!type.isInvalid())
Ty = type.get();
@@ -1401,7 +1452,7 @@ Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc,
Scope::FunctionDeclarationScope | Scope::DeclScope);
AttributePool allParamAttrs(AttrFactory);
- while (1) {
+ while (true) {
ParsedAttributes paramAttrs(AttrFactory);
Sema::ObjCArgInfo ArgInfo;
@@ -1487,7 +1538,8 @@ Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc,
DeclSpec DS(AttrFactory);
ParseDeclarationSpecifiers(DS);
// Parse the declarator.
- Declarator ParmDecl(DS, DeclaratorContext::Prototype);
+ Declarator ParmDecl(DS, ParsedAttributesView::none(),
+ DeclaratorContext::Prototype);
ParseDeclarator(ParmDecl);
IdentifierInfo *ParmII = ParmDecl.getIdentifier();
Decl *Param = Actions.ActOnParamDeclarator(getCurScope(), ParmDecl);
@@ -1531,7 +1583,7 @@ ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &Protocols,
SmallVector<IdentifierLocPair, 8> ProtocolIdents;
- while (1) {
+ while (true) {
if (Tok.is(tok::code_completion)) {
cutOffParsing();
Actions.CodeCompleteObjCProtocolReferences(ProtocolIdents);
@@ -1693,8 +1745,9 @@ void Parser::parseObjCTypeArgsOrProtocolQualifiers(
typeArg, Actions.getASTContext().getPrintingPolicy());
// Form a declarator to turn this into a type.
- Declarator D(DS, DeclaratorContext::TypeName);
- TypeResult fullTypeArg = Actions.ActOnTypeName(getCurScope(), D);
+ Declarator D(DS, ParsedAttributesView::none(),
+ DeclaratorContext::TypeName);
+ TypeResult fullTypeArg = Actions.ActOnTypeName(D);
if (fullTypeArg.isUsable()) {
typeArgs.push_back(fullTypeArg.get());
if (!foundValidTypeId) {
@@ -1864,16 +1917,16 @@ TypeResult Parser::parseObjCTypeArgsAndProtocolQualifiers(
protocolRAngleLoc);
}
-void Parser::HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc,
- BalancedDelimiterTracker &T,
- SmallVectorImpl<Decl *> &AllIvarDecls,
- bool RBraceMissing) {
+void Parser::HelperActionsForIvarDeclarations(
+ ObjCContainerDecl *interfaceDecl, SourceLocation atLoc,
+ BalancedDelimiterTracker &T, SmallVectorImpl<Decl *> &AllIvarDecls,
+ bool RBraceMissing) {
if (!RBraceMissing)
T.consumeClose();
- Actions.ActOnObjCContainerStartDefinition(interfaceDecl);
+ assert(getObjCDeclContext() == interfaceDecl &&
+ "Ivars should have interfaceDecl as their decl context");
Actions.ActOnLastBitfield(T.getCloseLocation(), AllIvarDecls);
- Actions.ActOnObjCContainerFinishDefinition();
// Call ActOnFields() even if we don't have any decls. This is useful
// for code rewriting tools that need to be aware of the empty list.
Actions.ActOnFields(getCurScope(), atLoc, interfaceDecl, AllIvarDecls,
@@ -1902,14 +1955,13 @@ void Parser::HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocatio
/// objc-instance-variable-decl:
/// struct-declaration
///
-void Parser::ParseObjCClassInstanceVariables(Decl *interfaceDecl,
+void Parser::ParseObjCClassInstanceVariables(ObjCContainerDecl *interfaceDecl,
tok::ObjCKeywordKind visibility,
SourceLocation atLoc) {
assert(Tok.is(tok::l_brace) && "expected {");
SmallVector<Decl *, 32> AllIvarDecls;
- ParseScope ClassScope(this, Scope::DeclScope|Scope::ClassScope);
- ObjCDeclContextSwitch ObjCDC(*this);
+ ParseScope ClassScope(this, Scope::DeclScope | Scope::ClassScope);
BalancedDelimiterTracker T(*this, tok::l_brace);
T.consumeOpen();
@@ -1973,13 +2025,13 @@ void Parser::ParseObjCClassInstanceVariables(Decl *interfaceDecl,
}
auto ObjCIvarCallback = [&](ParsingFieldDeclarator &FD) {
- Actions.ActOnObjCContainerStartDefinition(interfaceDecl);
+ assert(getObjCDeclContext() == interfaceDecl &&
+ "Ivar should have interfaceDecl as its decl context");
// Install the declarator into the interface decl.
FD.D.setObjCIvar(true);
Decl *Field = Actions.ActOnIvar(
getCurScope(), FD.D.getDeclSpec().getSourceRange().getBegin(), FD.D,
FD.BitfieldSize, visibility);
- Actions.ActOnObjCContainerFinishDefinition();
if (Field)
AllIvarDecls.push_back(Field);
FD.complete(Field);
@@ -2050,7 +2102,7 @@ Parser::ParseObjCAtProtocolDeclaration(SourceLocation AtLoc,
ProtocolRefs.push_back(std::make_pair(protocolName, nameLoc));
// Parse the list of forward declarations.
- while (1) {
+ while (true) {
ConsumeToken(); // the ','
if (expectIdentifier()) {
SkipUntil(tok::semi);
@@ -2081,11 +2133,23 @@ Parser::ParseObjCAtProtocolDeclaration(SourceLocation AtLoc,
/*consumeLastToken=*/true))
return nullptr;
- Decl *ProtoType = Actions.ActOnStartProtocolInterface(
+ Sema::SkipBodyInfo SkipBody;
+ ObjCProtocolDecl *ProtoType = Actions.ActOnStartProtocolInterface(
AtLoc, protocolName, nameLoc, ProtocolRefs.data(), ProtocolRefs.size(),
- ProtocolLocs.data(), EndProtoLoc, attrs);
+ ProtocolLocs.data(), EndProtoLoc, attrs, &SkipBody);
ParseObjCInterfaceDeclList(tok::objc_protocol, ProtoType);
+ if (SkipBody.CheckSameAsPrevious) {
+ auto *PreviousDef = cast<ObjCProtocolDecl>(SkipBody.Previous);
+ if (Actions.ActOnDuplicateODRHashDefinition(ProtoType, PreviousDef)) {
+ ProtoType->mergeDuplicateDefinitionWithCommon(
+ PreviousDef->getDefinition());
+ } else {
+ ODRDiagsEmitter DiagsEmitter(Diags, Actions.getASTContext(),
+ getPreprocessor().getLangOpts());
+ DiagsEmitter.diagnoseMismatch(PreviousDef, ProtoType);
+ }
+ }
return Actions.ConvertDeclToDeclGroup(ProtoType);
}
@@ -2121,7 +2185,7 @@ Parser::ParseObjCAtImplementationDeclaration(SourceLocation AtLoc,
// We have a class or category name - consume it.
IdentifierInfo *nameId = Tok.getIdentifierInfo();
SourceLocation nameLoc = ConsumeToken(); // consume class or category name
- Decl *ObjCImpDecl = nullptr;
+ ObjCImplDecl *ObjCImpDecl = nullptr;
// Neither a type parameter list nor a list of protocol references is
// permitted here. Parse and diagnose them.
@@ -2216,9 +2280,11 @@ Parser::ParseObjCAtImplementationDeclaration(SourceLocation AtLoc,
{
ObjCImplParsingDataRAII ObjCImplParsing(*this, ObjCImpDecl);
while (!ObjCImplParsing.isFinished() && !isEofOrEom()) {
- ParsedAttributesWithRange attrs(AttrFactory);
- MaybeParseCXX11Attributes(attrs);
- if (DeclGroupPtrTy DGP = ParseExternalDeclaration(attrs)) {
+ ParsedAttributes DeclAttrs(AttrFactory);
+ MaybeParseCXX11Attributes(DeclAttrs);
+ ParsedAttributes EmptyDeclSpecAttrs(AttrFactory);
+ if (DeclGroupPtrTy DGP =
+ ParseExternalDeclaration(DeclAttrs, EmptyDeclSpecAttrs)) {
DeclGroupRef DG = DGP.get();
DeclsInGroup.append(DG.begin(), DG.end());
}
@@ -2539,7 +2605,8 @@ StmtResult Parser::ParseObjCTryStmt(SourceLocation atLoc) {
if (Tok.isNot(tok::ellipsis)) {
DeclSpec DS(AttrFactory);
ParseDeclarationSpecifiers(DS);
- Declarator ParmDecl(DS, DeclaratorContext::ObjCCatch);
+ Declarator ParmDecl(DS, ParsedAttributesView::none(),
+ DeclaratorContext::ObjCCatch);
ParseDeclarator(ParmDecl);
// Inform the actions module about the declarator, so it
@@ -2955,8 +3022,9 @@ bool Parser::ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr) {
// We have a class message. Turn the simple-type-specifier or
// typename-specifier we parsed into a type and parse the
// remainder of the class message.
- Declarator DeclaratorInfo(DS, DeclaratorContext::TypeName);
- TypeResult Type = Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
+ Declarator DeclaratorInfo(DS, ParsedAttributesView::none(),
+ DeclaratorContext::TypeName);
+ TypeResult Type = Actions.ActOnTypeName(DeclaratorInfo);
if (Type.isInvalid())
return true;
@@ -3159,14 +3227,14 @@ Parser::ParseObjCMessageExpressionBody(SourceLocation LBracLoc,
if (Tok.is(tok::code_completion)) {
cutOffParsing();
if (SuperLoc.isValid())
- Actions.CodeCompleteObjCSuperMessage(getCurScope(), SuperLoc, None,
- false);
+ Actions.CodeCompleteObjCSuperMessage(getCurScope(), SuperLoc,
+ std::nullopt, false);
else if (ReceiverType)
- Actions.CodeCompleteObjCClassMessage(getCurScope(), ReceiverType, None,
- false);
+ Actions.CodeCompleteObjCClassMessage(getCurScope(), ReceiverType,
+ std::nullopt, false);
else
Actions.CodeCompleteObjCInstanceMessage(getCurScope(), ReceiverExpr,
- None, false);
+ std::nullopt, false);
return ExprError();
}
@@ -3179,7 +3247,7 @@ Parser::ParseObjCMessageExpressionBody(SourceLocation LBracLoc,
ExprVector KeyExprs;
if (Tok.is(tok::colon)) {
- while (1) {
+ while (true) {
// Each iteration parses a single keyword argument.
KeyIdents.push_back(selIdent);
KeyLocs.push_back(Loc);
@@ -3498,9 +3566,8 @@ ExprResult Parser::ParseObjCDictionaryLiteral(SourceLocation AtLoc) {
// We have a valid expression. Collect it in a vector so we can
// build the argument list.
- ObjCDictionaryElement Element = {
- KeyExpr.get(), ValueExpr.get(), EllipsisLoc, None
- };
+ ObjCDictionaryElement Element = {KeyExpr.get(), ValueExpr.get(),
+ EllipsisLoc, std::nullopt};
Elements.push_back(Element);
if (!TryConsumeToken(tok::comma) && Tok.isNot(tok::r_brace))
@@ -3599,7 +3666,7 @@ ExprResult Parser::ParseObjCSelectorExpression(SourceLocation AtLoc) {
unsigned nColons = 0;
if (Tok.isNot(tok::r_paren)) {
- while (1) {
+ while (true) {
if (TryConsumeToken(tok::coloncolon)) { // Handle :: in C++.
++nColons;
KeyIdents.push_back(nullptr);
@@ -3697,6 +3764,8 @@ void Parser::ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod) {
while (Tok.getLocation() != OrigLoc && Tok.isNot(tok::eof))
ConsumeAnyToken();
}
- // Clean up the remaining EOF token.
- ConsumeAnyToken();
+ // Clean up the remaining EOF token, only if it's inserted by us. Otherwise
+ // this might be code-completion token, which must be propagated to callers.
+ if (Tok.is(tok::eof) && Tok.getEofData() == MCDecl)
+ ConsumeAnyToken();
}
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseOpenACC.cpp b/contrib/llvm-project/clang/lib/Parse/ParseOpenACC.cpp
new file mode 100644
index 000000000000..9f7e63ecdc95
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Parse/ParseOpenACC.cpp
@@ -0,0 +1,1044 @@
+//===--- ParseOpenACC.cpp - OpenACC-specific parsing support --------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the parsing logic for OpenACC language features.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/OpenACCKinds.h"
+#include "clang/Parse/ParseDiagnostic.h"
+#include "clang/Parse/Parser.h"
+#include "clang/Parse/RAIIObjectsForParser.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringSwitch.h"
+
+using namespace clang;
+using namespace llvm;
+
+namespace {
+// An enum that contains the extended 'partial' parsed variants. This type
+// should never escape the initial parse functionality, but is useful for
+// simplifying the implementation.
+enum class OpenACCDirectiveKindEx {
+ Invalid = static_cast<int>(OpenACCDirectiveKind::Invalid),
+ // 'enter data' and 'exit data'
+ Enter,
+ Exit,
+};
+
+// Translate single-token string representations to the OpenACC Directive Kind.
+// This doesn't completely comprehend 'Compound Constructs' (as it just
+// identifies the first token), and doesn't fully handle 'enter data', 'exit
+// data', nor any of the 'atomic' variants, just the first token of each. So
+// this should only be used by `ParseOpenACCDirectiveKind`.
+OpenACCDirectiveKindEx getOpenACCDirectiveKind(Token Tok) {
+ if (!Tok.is(tok::identifier))
+ return OpenACCDirectiveKindEx::Invalid;
+ OpenACCDirectiveKind DirKind =
+ llvm::StringSwitch<OpenACCDirectiveKind>(
+ Tok.getIdentifierInfo()->getName())
+ .Case("parallel", OpenACCDirectiveKind::Parallel)
+ .Case("serial", OpenACCDirectiveKind::Serial)
+ .Case("kernels", OpenACCDirectiveKind::Kernels)
+ .Case("data", OpenACCDirectiveKind::Data)
+ .Case("host_data", OpenACCDirectiveKind::HostData)
+ .Case("loop", OpenACCDirectiveKind::Loop)
+ .Case("cache", OpenACCDirectiveKind::Cache)
+ .Case("atomic", OpenACCDirectiveKind::Atomic)
+ .Case("routine", OpenACCDirectiveKind::Routine)
+ .Case("declare", OpenACCDirectiveKind::Declare)
+ .Case("init", OpenACCDirectiveKind::Init)
+ .Case("shutdown", OpenACCDirectiveKind::Shutdown)
+ .Case("set", OpenACCDirectiveKind::Shutdown)
+ .Case("update", OpenACCDirectiveKind::Update)
+ .Case("wait", OpenACCDirectiveKind::Wait)
+ .Default(OpenACCDirectiveKind::Invalid);
+
+ if (DirKind != OpenACCDirectiveKind::Invalid)
+ return static_cast<OpenACCDirectiveKindEx>(DirKind);
+
+ return llvm::StringSwitch<OpenACCDirectiveKindEx>(
+ Tok.getIdentifierInfo()->getName())
+ .Case("enter", OpenACCDirectiveKindEx::Enter)
+ .Case("exit", OpenACCDirectiveKindEx::Exit)
+ .Default(OpenACCDirectiveKindEx::Invalid);
+}
+
+// Translate single-token string representations to the OpenCC Clause Kind.
+OpenACCClauseKind getOpenACCClauseKind(Token Tok) {
+ // auto is a keyword in some language modes, so make sure we parse it
+ // correctly.
+ if (Tok.is(tok::kw_auto))
+ return OpenACCClauseKind::Auto;
+
+ // default is a keyword, so make sure we parse it correctly.
+ if (Tok.is(tok::kw_default))
+ return OpenACCClauseKind::Default;
+
+ // if is also a keyword, make sure we parse it correctly.
+ if (Tok.is(tok::kw_if))
+ return OpenACCClauseKind::If;
+
+ if (!Tok.is(tok::identifier))
+ return OpenACCClauseKind::Invalid;
+
+ return llvm::StringSwitch<OpenACCClauseKind>(
+ Tok.getIdentifierInfo()->getName())
+ .Case("attach", OpenACCClauseKind::Attach)
+ .Case("auto", OpenACCClauseKind::Auto)
+ .Case("bind", OpenACCClauseKind::Bind)
+ .Case("create", OpenACCClauseKind::Create)
+ .Case("collapse", OpenACCClauseKind::Collapse)
+ .Case("copy", OpenACCClauseKind::Copy)
+ .Case("copyin", OpenACCClauseKind::CopyIn)
+ .Case("copyout", OpenACCClauseKind::CopyOut)
+ .Case("default", OpenACCClauseKind::Default)
+ .Case("default_async", OpenACCClauseKind::DefaultAsync)
+ .Case("delete", OpenACCClauseKind::Delete)
+ .Case("detach", OpenACCClauseKind::Detach)
+ .Case("device", OpenACCClauseKind::Device)
+ .Case("device_num", OpenACCClauseKind::DeviceNum)
+ .Case("device_resident", OpenACCClauseKind::DeviceResident)
+ .Case("device_type", OpenACCClauseKind::DeviceType)
+ .Case("deviceptr", OpenACCClauseKind::DevicePtr)
+ .Case("dtype", OpenACCClauseKind::DType)
+ .Case("finalize", OpenACCClauseKind::Finalize)
+ .Case("firstprivate", OpenACCClauseKind::FirstPrivate)
+ .Case("host", OpenACCClauseKind::Host)
+ .Case("if", OpenACCClauseKind::If)
+ .Case("if_present", OpenACCClauseKind::IfPresent)
+ .Case("independent", OpenACCClauseKind::Independent)
+ .Case("link", OpenACCClauseKind::Link)
+ .Case("no_create", OpenACCClauseKind::NoCreate)
+ .Case("num_gangs", OpenACCClauseKind::NumGangs)
+ .Case("num_workers", OpenACCClauseKind::NumWorkers)
+ .Case("nohost", OpenACCClauseKind::NoHost)
+ .Case("present", OpenACCClauseKind::Present)
+ .Case("private", OpenACCClauseKind::Private)
+ .Case("reduction", OpenACCClauseKind::Reduction)
+ .Case("self", OpenACCClauseKind::Self)
+ .Case("seq", OpenACCClauseKind::Seq)
+ .Case("use_device", OpenACCClauseKind::UseDevice)
+ .Case("vector", OpenACCClauseKind::Vector)
+ .Case("vector_length", OpenACCClauseKind::VectorLength)
+ .Case("worker", OpenACCClauseKind::Worker)
+ .Default(OpenACCClauseKind::Invalid);
+}
+
+// Since 'atomic' is effectively a compound directive, this will decode the
+// second part of the directive.
+OpenACCAtomicKind getOpenACCAtomicKind(Token Tok) {
+ if (!Tok.is(tok::identifier))
+ return OpenACCAtomicKind::Invalid;
+ return llvm::StringSwitch<OpenACCAtomicKind>(
+ Tok.getIdentifierInfo()->getName())
+ .Case("read", OpenACCAtomicKind::Read)
+ .Case("write", OpenACCAtomicKind::Write)
+ .Case("update", OpenACCAtomicKind::Update)
+ .Case("capture", OpenACCAtomicKind::Capture)
+ .Default(OpenACCAtomicKind::Invalid);
+}
+
+OpenACCDefaultClauseKind getOpenACCDefaultClauseKind(Token Tok) {
+ if (!Tok.is(tok::identifier))
+ return OpenACCDefaultClauseKind::Invalid;
+
+ return llvm::StringSwitch<OpenACCDefaultClauseKind>(
+ Tok.getIdentifierInfo()->getName())
+ .Case("none", OpenACCDefaultClauseKind::None)
+ .Case("present", OpenACCDefaultClauseKind::Present)
+ .Default(OpenACCDefaultClauseKind::Invalid);
+}
+
+enum class OpenACCSpecialTokenKind {
+ ReadOnly,
+ DevNum,
+ Queues,
+ Zero,
+ Force,
+ Num,
+ Length,
+};
+
+bool isOpenACCSpecialToken(OpenACCSpecialTokenKind Kind, Token Tok) {
+ if (!Tok.is(tok::identifier))
+ return false;
+
+ switch (Kind) {
+ case OpenACCSpecialTokenKind::ReadOnly:
+ return Tok.getIdentifierInfo()->isStr("readonly");
+ case OpenACCSpecialTokenKind::DevNum:
+ return Tok.getIdentifierInfo()->isStr("devnum");
+ case OpenACCSpecialTokenKind::Queues:
+ return Tok.getIdentifierInfo()->isStr("queues");
+ case OpenACCSpecialTokenKind::Zero:
+ return Tok.getIdentifierInfo()->isStr("zero");
+ case OpenACCSpecialTokenKind::Force:
+ return Tok.getIdentifierInfo()->isStr("force");
+ case OpenACCSpecialTokenKind::Num:
+ return Tok.getIdentifierInfo()->isStr("num");
+ case OpenACCSpecialTokenKind::Length:
+ return Tok.getIdentifierInfo()->isStr("length");
+ }
+ llvm_unreachable("Unknown 'Kind' Passed");
+}
+
+/// Used for cases where we have a token we want to check against an
+/// 'identifier-like' token, but don't want to give awkward error messages in
+/// cases where it is accidentially a keyword.
+bool isTokenIdentifierOrKeyword(Parser &P, Token Tok) {
+ if (Tok.is(tok::identifier))
+ return true;
+
+ if (!Tok.isAnnotation() && Tok.getIdentifierInfo() &&
+ Tok.getIdentifierInfo()->isKeyword(P.getLangOpts()))
+ return true;
+
+ return false;
+}
+
+/// Parses and consumes an identifer followed immediately by a single colon, and
+/// diagnoses if it is not the 'special token' kind that we require. Used when
+/// the tag is the only valid value.
+/// Return 'true' if the special token was matched, false if no special token,
+/// or an invalid special token was found.
+template <typename DirOrClauseTy>
+bool tryParseAndConsumeSpecialTokenKind(Parser &P, OpenACCSpecialTokenKind Kind,
+ DirOrClauseTy DirOrClause) {
+ Token IdentTok = P.getCurToken();
+ // If this is an identifier-like thing followed by ':', it is one of the
+ // OpenACC 'special' name tags, so consume it.
+ if (isTokenIdentifierOrKeyword(P, IdentTok) && P.NextToken().is(tok::colon)) {
+ P.ConsumeToken();
+ P.ConsumeToken();
+
+ if (!isOpenACCSpecialToken(Kind, IdentTok)) {
+ P.Diag(IdentTok, diag::err_acc_invalid_tag_kind)
+ << IdentTok.getIdentifierInfo() << DirOrClause
+ << std::is_same_v<DirOrClauseTy, OpenACCClauseKind>;
+ return false;
+ }
+
+ return true;
+ }
+
+ return false;
+}
+
+bool isOpenACCDirectiveKind(OpenACCDirectiveKind Kind, Token Tok) {
+ if (!Tok.is(tok::identifier))
+ return false;
+
+ switch (Kind) {
+ case OpenACCDirectiveKind::Parallel:
+ return Tok.getIdentifierInfo()->isStr("parallel");
+ case OpenACCDirectiveKind::Serial:
+ return Tok.getIdentifierInfo()->isStr("serial");
+ case OpenACCDirectiveKind::Kernels:
+ return Tok.getIdentifierInfo()->isStr("kernels");
+ case OpenACCDirectiveKind::Data:
+ return Tok.getIdentifierInfo()->isStr("data");
+ case OpenACCDirectiveKind::HostData:
+ return Tok.getIdentifierInfo()->isStr("host_data");
+ case OpenACCDirectiveKind::Loop:
+ return Tok.getIdentifierInfo()->isStr("loop");
+ case OpenACCDirectiveKind::Cache:
+ return Tok.getIdentifierInfo()->isStr("cache");
+
+ case OpenACCDirectiveKind::ParallelLoop:
+ case OpenACCDirectiveKind::SerialLoop:
+ case OpenACCDirectiveKind::KernelsLoop:
+ case OpenACCDirectiveKind::EnterData:
+ case OpenACCDirectiveKind::ExitData:
+ return false;
+
+ case OpenACCDirectiveKind::Atomic:
+ return Tok.getIdentifierInfo()->isStr("atomic");
+ case OpenACCDirectiveKind::Routine:
+ return Tok.getIdentifierInfo()->isStr("routine");
+ case OpenACCDirectiveKind::Declare:
+ return Tok.getIdentifierInfo()->isStr("declare");
+ case OpenACCDirectiveKind::Init:
+ return Tok.getIdentifierInfo()->isStr("init");
+ case OpenACCDirectiveKind::Shutdown:
+ return Tok.getIdentifierInfo()->isStr("shutdown");
+ case OpenACCDirectiveKind::Set:
+ return Tok.getIdentifierInfo()->isStr("set");
+ case OpenACCDirectiveKind::Update:
+ return Tok.getIdentifierInfo()->isStr("update");
+ case OpenACCDirectiveKind::Wait:
+ return Tok.getIdentifierInfo()->isStr("wait");
+ case OpenACCDirectiveKind::Invalid:
+ return false;
+ }
+ llvm_unreachable("Unknown 'Kind' Passed");
+}
+
+OpenACCReductionOperator ParseReductionOperator(Parser &P) {
+ // If there is no colon, treat as if the reduction operator was missing, else
+ // we probably will not recover from it in the case where an expression starts
+ // with one of the operator tokens.
+ if (P.NextToken().isNot(tok::colon)) {
+ P.Diag(P.getCurToken(), diag::err_acc_expected_reduction_operator);
+ return OpenACCReductionOperator::Invalid;
+ }
+ Token ReductionKindTok = P.getCurToken();
+ // Consume both the kind and the colon.
+ P.ConsumeToken();
+ P.ConsumeToken();
+
+ switch (ReductionKindTok.getKind()) {
+ case tok::plus:
+ return OpenACCReductionOperator::Addition;
+ case tok::star:
+ return OpenACCReductionOperator::Multiplication;
+ case tok::amp:
+ return OpenACCReductionOperator::BitwiseAnd;
+ case tok::pipe:
+ return OpenACCReductionOperator::BitwiseOr;
+ case tok::caret:
+ return OpenACCReductionOperator::BitwiseXOr;
+ case tok::ampamp:
+ return OpenACCReductionOperator::And;
+ case tok::pipepipe:
+ return OpenACCReductionOperator::Or;
+ case tok::identifier:
+ if (ReductionKindTok.getIdentifierInfo()->isStr("max"))
+ return OpenACCReductionOperator::Max;
+ if (ReductionKindTok.getIdentifierInfo()->isStr("min"))
+ return OpenACCReductionOperator::Min;
+ LLVM_FALLTHROUGH;
+ default:
+ P.Diag(ReductionKindTok, diag::err_acc_invalid_reduction_operator);
+ return OpenACCReductionOperator::Invalid;
+ }
+ llvm_unreachable("Reduction op token kind not caught by 'default'?");
+}
+
+/// Used for cases where we expect an identifier-like token, but don't want to
+/// give awkward error messages in cases where it is accidentially a keyword.
+bool expectIdentifierOrKeyword(Parser &P) {
+ Token Tok = P.getCurToken();
+
+ if (isTokenIdentifierOrKeyword(P, Tok))
+ return false;
+
+ P.Diag(P.getCurToken(), diag::err_expected) << tok::identifier;
+ return true;
+}
+
+OpenACCDirectiveKind
+ParseOpenACCEnterExitDataDirective(Parser &P, Token FirstTok,
+ OpenACCDirectiveKindEx ExtDirKind) {
+ Token SecondTok = P.getCurToken();
+
+ if (SecondTok.isAnnotation()) {
+ P.Diag(FirstTok, diag::err_acc_invalid_directive)
+ << 0 << FirstTok.getIdentifierInfo();
+ return OpenACCDirectiveKind::Invalid;
+ }
+
+ // Consume the second name anyway, this way we can continue on without making
+ // this oddly look like a clause.
+ P.ConsumeAnyToken();
+
+ if (!isOpenACCDirectiveKind(OpenACCDirectiveKind::Data, SecondTok)) {
+ if (!SecondTok.is(tok::identifier))
+ P.Diag(SecondTok, diag::err_expected) << tok::identifier;
+ else
+ P.Diag(FirstTok, diag::err_acc_invalid_directive)
+ << 1 << FirstTok.getIdentifierInfo()->getName()
+ << SecondTok.getIdentifierInfo()->getName();
+ return OpenACCDirectiveKind::Invalid;
+ }
+
+ return ExtDirKind == OpenACCDirectiveKindEx::Enter
+ ? OpenACCDirectiveKind::EnterData
+ : OpenACCDirectiveKind::ExitData;
+}
+
+OpenACCAtomicKind ParseOpenACCAtomicKind(Parser &P) {
+ Token AtomicClauseToken = P.getCurToken();
+
+ // #pragma acc atomic is equivilent to update:
+ if (AtomicClauseToken.isAnnotation())
+ return OpenACCAtomicKind::Update;
+
+ OpenACCAtomicKind AtomicKind = getOpenACCAtomicKind(AtomicClauseToken);
+
+ // If we don't know what this is, treat it as 'nothing', and treat the rest of
+ // this as a clause list, which, despite being invalid, is likely what the
+ // user was trying to do.
+ if (AtomicKind == OpenACCAtomicKind::Invalid)
+ return OpenACCAtomicKind::Update;
+
+ P.ConsumeToken();
+ return AtomicKind;
+}
+
+// Parse and consume the tokens for OpenACC Directive/Construct kinds.
+OpenACCDirectiveKind ParseOpenACCDirectiveKind(Parser &P) {
+ Token FirstTok = P.getCurToken();
+
+ // Just #pragma acc can get us immediately to the end, make sure we don't
+ // introspect on the spelling before then.
+ if (FirstTok.isNot(tok::identifier)) {
+ P.Diag(FirstTok, diag::err_acc_missing_directive);
+
+ if (P.getCurToken().isNot(tok::annot_pragma_openacc_end))
+ P.ConsumeAnyToken();
+
+ return OpenACCDirectiveKind::Invalid;
+ }
+
+ P.ConsumeToken();
+
+ OpenACCDirectiveKindEx ExDirKind = getOpenACCDirectiveKind(FirstTok);
+
+ // OpenACCDirectiveKindEx is meant to be an extended list
+ // over OpenACCDirectiveKind, so any value below Invalid is one of the
+ // OpenACCDirectiveKind values. This switch takes care of all of the extra
+ // parsing required for the Extended values. At the end of this block,
+ // ExDirKind can be assumed to be a valid OpenACCDirectiveKind, so we can
+ // immediately cast it and use it as that.
+ if (ExDirKind >= OpenACCDirectiveKindEx::Invalid) {
+ switch (ExDirKind) {
+ case OpenACCDirectiveKindEx::Invalid: {
+ P.Diag(FirstTok, diag::err_acc_invalid_directive)
+ << 0 << FirstTok.getIdentifierInfo();
+ return OpenACCDirectiveKind::Invalid;
+ }
+ case OpenACCDirectiveKindEx::Enter:
+ case OpenACCDirectiveKindEx::Exit:
+ return ParseOpenACCEnterExitDataDirective(P, FirstTok, ExDirKind);
+ }
+ }
+
+ OpenACCDirectiveKind DirKind = static_cast<OpenACCDirectiveKind>(ExDirKind);
+
+ // Combined Constructs allows parallel loop, serial loop, or kernels loop. Any
+ // other attempt at a combined construct will be diagnosed as an invalid
+ // clause.
+ Token SecondTok = P.getCurToken();
+ if (!SecondTok.isAnnotation() &&
+ isOpenACCDirectiveKind(OpenACCDirectiveKind::Loop, SecondTok)) {
+ switch (DirKind) {
+ default:
+ // Nothing to do except in the below cases, as they should be diagnosed as
+ // a clause.
+ break;
+ case OpenACCDirectiveKind::Parallel:
+ P.ConsumeToken();
+ return OpenACCDirectiveKind::ParallelLoop;
+ case OpenACCDirectiveKind::Serial:
+ P.ConsumeToken();
+ return OpenACCDirectiveKind::SerialLoop;
+ case OpenACCDirectiveKind::Kernels:
+ P.ConsumeToken();
+ return OpenACCDirectiveKind::KernelsLoop;
+ }
+ }
+
+ return DirKind;
+}
+
+enum ClauseParensKind {
+ None,
+ Optional,
+ Required
+};
+
+ClauseParensKind getClauseParensKind(OpenACCDirectiveKind DirKind,
+ OpenACCClauseKind Kind) {
+ switch (Kind) {
+ case OpenACCClauseKind::Self:
+ return DirKind == OpenACCDirectiveKind::Update ? ClauseParensKind::Required
+ : ClauseParensKind::Optional;
+ case OpenACCClauseKind::Worker:
+ case OpenACCClauseKind::Vector:
+ return ClauseParensKind::Optional;
+
+ case OpenACCClauseKind::Default:
+ case OpenACCClauseKind::If:
+ case OpenACCClauseKind::Create:
+ case OpenACCClauseKind::Copy:
+ case OpenACCClauseKind::CopyIn:
+ case OpenACCClauseKind::CopyOut:
+ case OpenACCClauseKind::UseDevice:
+ case OpenACCClauseKind::NoCreate:
+ case OpenACCClauseKind::Present:
+ case OpenACCClauseKind::DevicePtr:
+ case OpenACCClauseKind::Attach:
+ case OpenACCClauseKind::Detach:
+ case OpenACCClauseKind::Private:
+ case OpenACCClauseKind::FirstPrivate:
+ case OpenACCClauseKind::Delete:
+ case OpenACCClauseKind::DeviceResident:
+ case OpenACCClauseKind::Device:
+ case OpenACCClauseKind::Link:
+ case OpenACCClauseKind::Host:
+ case OpenACCClauseKind::Reduction:
+ case OpenACCClauseKind::Collapse:
+ case OpenACCClauseKind::Bind:
+ case OpenACCClauseKind::VectorLength:
+ case OpenACCClauseKind::NumGangs:
+ case OpenACCClauseKind::NumWorkers:
+ case OpenACCClauseKind::DeviceNum:
+ case OpenACCClauseKind::DefaultAsync:
+ case OpenACCClauseKind::DeviceType:
+ case OpenACCClauseKind::DType:
+ return ClauseParensKind::Required;
+
+ case OpenACCClauseKind::Auto:
+ case OpenACCClauseKind::Finalize:
+ case OpenACCClauseKind::IfPresent:
+ case OpenACCClauseKind::Independent:
+ case OpenACCClauseKind::Invalid:
+ case OpenACCClauseKind::NoHost:
+ case OpenACCClauseKind::Seq:
+ return ClauseParensKind::None;
+ }
+ llvm_unreachable("Unhandled clause kind");
+}
+
+bool ClauseHasOptionalParens(OpenACCDirectiveKind DirKind,
+ OpenACCClauseKind Kind) {
+ return getClauseParensKind(DirKind, Kind) == ClauseParensKind::Optional;
+}
+
+bool ClauseHasRequiredParens(OpenACCDirectiveKind DirKind,
+ OpenACCClauseKind Kind) {
+ return getClauseParensKind(DirKind, Kind) == ClauseParensKind::Required;
+}
+
+ExprResult ParseOpenACCConditionalExpr(Parser &P) {
+ // FIXME: It isn't clear if the spec saying 'condition' means the same as
+ // it does in an if/while/etc (See ParseCXXCondition), however as it was
+ // written with Fortran/C in mind, we're going to assume it just means an
+ // 'expression evaluating to boolean'.
+ return P.getActions().CorrectDelayedTyposInExpr(P.ParseExpression());
+}
+
+// Skip until we see the end of pragma token, but don't consume it. This is us
+// just giving up on the rest of the pragma so we can continue executing. We
+// have to do this because 'SkipUntil' considers paren balancing, which isn't
+// what we want.
+void SkipUntilEndOfDirective(Parser &P) {
+ while (P.getCurToken().isNot(tok::annot_pragma_openacc_end))
+ P.ConsumeAnyToken();
+}
+
+} // namespace
+
+// OpenACC 3.3, section 1.7:
+// To simplify the specification and convey appropriate constraint information,
+// a pqr-list is a comma-separated list of pdr items. The one exception is a
+// clause-list, which is a list of one or more clauses optionally separated by
+// commas.
+void Parser::ParseOpenACCClauseList(OpenACCDirectiveKind DirKind) {
+ bool FirstClause = true;
+ while (getCurToken().isNot(tok::annot_pragma_openacc_end)) {
+ // Comma is optional in a clause-list.
+ if (!FirstClause && getCurToken().is(tok::comma))
+ ConsumeToken();
+ FirstClause = false;
+
+ // Recovering from a bad clause is really difficult, so we just give up on
+ // error.
+ if (ParseOpenACCClause(DirKind)) {
+ SkipUntilEndOfDirective(*this);
+ return;
+ }
+ }
+}
+
+ExprResult Parser::ParseOpenACCIntExpr() {
+ // FIXME: this is required to be an integer expression (or dependent), so we
+ // should ensure that is the case by passing this to SEMA here.
+ return getActions().CorrectDelayedTyposInExpr(ParseAssignmentExpression());
+}
+
+bool Parser::ParseOpenACCClauseVarList(OpenACCClauseKind Kind) {
+ // FIXME: Future clauses will require 'special word' parsing, check for one,
+ // then parse it based on whether it is a clause that requires a 'special
+ // word'.
+ (void)Kind;
+
+ // If the var parsing fails, skip until the end of the directive as this is
+ // an expression and gets messy if we try to continue otherwise.
+ if (ParseOpenACCVar())
+ return true;
+
+ while (!getCurToken().isOneOf(tok::r_paren, tok::annot_pragma_openacc_end)) {
+ ExpectAndConsume(tok::comma);
+
+ // If the var parsing fails, skip until the end of the directive as this is
+ // an expression and gets messy if we try to continue otherwise.
+ if (ParseOpenACCVar())
+ return true;
+ }
+ return false;
+}
+
+/// OpenACC 3.3 Section 2.4:
+/// The argument to the device_type clause is a comma-separated list of one or
+/// more device architecture name identifiers, or an asterisk.
+///
+/// The syntax of the device_type clause is
+/// device_type( * )
+/// device_type( device-type-list )
+///
+/// The device_type clause may be abbreviated to dtype.
+bool Parser::ParseOpenACCDeviceTypeList() {
+
+ if (expectIdentifierOrKeyword(*this)) {
+ SkipUntil(tok::r_paren, tok::annot_pragma_openacc_end,
+ Parser::StopBeforeMatch);
+ return false;
+ }
+ ConsumeToken();
+
+ while (!getCurToken().isOneOf(tok::r_paren, tok::annot_pragma_openacc_end)) {
+ ExpectAndConsume(tok::comma);
+
+ if (expectIdentifierOrKeyword(*this)) {
+ SkipUntil(tok::r_paren, tok::annot_pragma_openacc_end,
+ Parser::StopBeforeMatch);
+ return false;
+ }
+ ConsumeToken();
+ }
+ return false;
+}
+
+// The OpenACC Clause List is a comma or space-delimited list of clauses (see
+// the comment on ParseOpenACCClauseList). The concept of a 'clause' doesn't
+// really have its owner grammar and each individual one has its own definition.
+// However, they all are named with a single-identifier (or auto/default!)
+// token, followed in some cases by either braces or parens.
+bool Parser::ParseOpenACCClause(OpenACCDirectiveKind DirKind) {
+ // A number of clause names are actually keywords, so accept a keyword that
+ // can be converted to a name.
+ if (expectIdentifierOrKeyword(*this))
+ return true;
+
+ OpenACCClauseKind Kind = getOpenACCClauseKind(getCurToken());
+
+ if (Kind == OpenACCClauseKind::Invalid)
+ return Diag(getCurToken(), diag::err_acc_invalid_clause)
+ << getCurToken().getIdentifierInfo();
+
+ // Consume the clause name.
+ ConsumeToken();
+
+ return ParseOpenACCClauseParams(DirKind, Kind);
+}
+
+bool Parser::ParseOpenACCClauseParams(OpenACCDirectiveKind DirKind,
+ OpenACCClauseKind Kind) {
+ BalancedDelimiterTracker Parens(*this, tok::l_paren,
+ tok::annot_pragma_openacc_end);
+
+ if (ClauseHasRequiredParens(DirKind, Kind)) {
+ if (Parens.expectAndConsume()) {
+ // We are missing a paren, so assume that the person just forgot the
+ // parameter. Return 'false' so we try to continue on and parse the next
+ // clause.
+ SkipUntil(tok::comma, tok::r_paren, tok::annot_pragma_openacc_end,
+ Parser::StopBeforeMatch);
+ return false;
+ }
+
+ switch (Kind) {
+ case OpenACCClauseKind::Default: {
+ Token DefKindTok = getCurToken();
+
+ if (expectIdentifierOrKeyword(*this))
+ break;
+
+ ConsumeToken();
+
+ if (getOpenACCDefaultClauseKind(DefKindTok) ==
+ OpenACCDefaultClauseKind::Invalid)
+ Diag(DefKindTok, diag::err_acc_invalid_default_clause_kind);
+
+ break;
+ }
+ case OpenACCClauseKind::If: {
+ ExprResult CondExpr = ParseOpenACCConditionalExpr(*this);
+ // An invalid expression can be just about anything, so just give up on
+ // this clause list.
+ if (CondExpr.isInvalid())
+ return true;
+ break;
+ }
+ case OpenACCClauseKind::CopyIn:
+ tryParseAndConsumeSpecialTokenKind(
+ *this, OpenACCSpecialTokenKind::ReadOnly, Kind);
+ if (ParseOpenACCClauseVarList(Kind))
+ return true;
+ break;
+ case OpenACCClauseKind::Create:
+ case OpenACCClauseKind::CopyOut:
+ tryParseAndConsumeSpecialTokenKind(*this, OpenACCSpecialTokenKind::Zero,
+ Kind);
+ if (ParseOpenACCClauseVarList(Kind))
+ return true;
+ break;
+ case OpenACCClauseKind::Reduction:
+ // If we're missing a clause-kind (or it is invalid), see if we can parse
+ // the var-list anyway.
+ ParseReductionOperator(*this);
+ if (ParseOpenACCClauseVarList(Kind))
+ return true;
+ break;
+ case OpenACCClauseKind::Self:
+ // The 'self' clause is a var-list instead of a 'condition' in the case of
+ // the 'update' clause, so we have to handle it here. U se an assert to
+ // make sure we get the right differentiator.
+ assert(DirKind == OpenACCDirectiveKind::Update);
+ LLVM_FALLTHROUGH;
+ case OpenACCClauseKind::Attach:
+ case OpenACCClauseKind::Copy:
+ case OpenACCClauseKind::Delete:
+ case OpenACCClauseKind::Detach:
+ case OpenACCClauseKind::Device:
+ case OpenACCClauseKind::DeviceResident:
+ case OpenACCClauseKind::DevicePtr:
+ case OpenACCClauseKind::FirstPrivate:
+ case OpenACCClauseKind::Host:
+ case OpenACCClauseKind::Link:
+ case OpenACCClauseKind::NoCreate:
+ case OpenACCClauseKind::Present:
+ case OpenACCClauseKind::Private:
+ case OpenACCClauseKind::UseDevice:
+ if (ParseOpenACCClauseVarList(Kind))
+ return true;
+ break;
+ case OpenACCClauseKind::Collapse: {
+ tryParseAndConsumeSpecialTokenKind(*this, OpenACCSpecialTokenKind::Force,
+ Kind);
+ ExprResult NumLoops =
+ getActions().CorrectDelayedTyposInExpr(ParseConstantExpression());
+ if (NumLoops.isInvalid())
+ return true;
+ break;
+ }
+ case OpenACCClauseKind::Bind: {
+ ExprResult BindArg = ParseOpenACCBindClauseArgument();
+ if (BindArg.isInvalid())
+ return true;
+ break;
+ }
+ case OpenACCClauseKind::NumGangs:
+ case OpenACCClauseKind::NumWorkers:
+ case OpenACCClauseKind::DeviceNum:
+ case OpenACCClauseKind::DefaultAsync:
+ case OpenACCClauseKind::VectorLength: {
+ ExprResult IntExpr = ParseOpenACCIntExpr();
+ if (IntExpr.isInvalid())
+ return true;
+ break;
+ }
+ case OpenACCClauseKind::DType:
+ case OpenACCClauseKind::DeviceType:
+ if (getCurToken().is(tok::star)) {
+ // FIXME: We want to mark that this is an 'everything else' type of
+ // device_type in Sema.
+ ConsumeToken();
+ } else if (ParseOpenACCDeviceTypeList()) {
+ return true;
+ }
+ break;
+ default:
+ llvm_unreachable("Not a required parens type?");
+ }
+
+ return Parens.consumeClose();
+ } else if (ClauseHasOptionalParens(DirKind, Kind)) {
+ if (!Parens.consumeOpen()) {
+ switch (Kind) {
+ case OpenACCClauseKind::Self: {
+ assert(DirKind != OpenACCDirectiveKind::Update);
+ ExprResult CondExpr = ParseOpenACCConditionalExpr(*this);
+ // An invalid expression can be just about anything, so just give up on
+ // this clause list.
+ if (CondExpr.isInvalid())
+ return true;
+ break;
+ }
+ case OpenACCClauseKind::Vector:
+ case OpenACCClauseKind::Worker: {
+ tryParseAndConsumeSpecialTokenKind(*this,
+ Kind == OpenACCClauseKind::Vector
+ ? OpenACCSpecialTokenKind::Length
+ : OpenACCSpecialTokenKind::Num,
+ Kind);
+ ExprResult IntExpr = ParseOpenACCIntExpr();
+ if (IntExpr.isInvalid())
+ return true;
+ break;
+ }
+ default:
+ llvm_unreachable("Not an optional parens type?");
+ }
+ Parens.consumeClose();
+ }
+ }
+ return false;
+}
+
+/// OpenACC 3.3, section 2.16:
+/// In this section and throughout the specification, the term wait-argument
+/// means:
+/// [ devnum : int-expr : ] [ queues : ] async-argument-list
+bool Parser::ParseOpenACCWaitArgument() {
+ // [devnum : int-expr : ]
+ if (isOpenACCSpecialToken(OpenACCSpecialTokenKind::DevNum, Tok) &&
+ NextToken().is(tok::colon)) {
+ // Consume devnum.
+ ConsumeToken();
+ // Consume colon.
+ ConsumeToken();
+
+ ExprResult IntExpr =
+ getActions().CorrectDelayedTyposInExpr(ParseAssignmentExpression());
+ if (IntExpr.isInvalid())
+ return true;
+
+ if (ExpectAndConsume(tok::colon))
+ return true;
+ }
+
+ // [ queues : ]
+ if (isOpenACCSpecialToken(OpenACCSpecialTokenKind::Queues, Tok) &&
+ NextToken().is(tok::colon)) {
+ // Consume queues.
+ ConsumeToken();
+ // Consume colon.
+ ConsumeToken();
+ }
+
+ // OpenACC 3.3, section 2.16:
+ // the term 'async-argument' means a nonnegative scalar integer expression, or
+ // one of the special values 'acc_async_noval' or 'acc_async_sync', as defined
+ // in the C header file and the Fortran opacc module.
+ //
+ // We are parsing this simply as list of assignment expressions (to avoid
+ // comma being troublesome), and will ensure it is an integral type. The
+ // 'special' types are defined as macros, so we can't really check those
+ // (other than perhaps as values at one point?), but the standard does say it
+ // is implementation-defined to use any other negative value.
+ //
+ //
+ bool FirstArg = true;
+ while (!getCurToken().isOneOf(tok::r_paren, tok::annot_pragma_openacc_end)) {
+ if (!FirstArg) {
+ if (ExpectAndConsume(tok::comma))
+ return true;
+ }
+ FirstArg = false;
+
+ ExprResult CurArg =
+ getActions().CorrectDelayedTyposInExpr(ParseAssignmentExpression());
+
+ if (CurArg.isInvalid())
+ return true;
+ }
+
+ return false;
+}
+
+ExprResult Parser::ParseOpenACCIDExpression() {
+ ExprResult Res;
+ if (getLangOpts().CPlusPlus) {
+ Res = ParseCXXIdExpression(/*isAddressOfOperand=*/true);
+ } else {
+ // There isn't anything quite the same as ParseCXXIdExpression for C, so we
+ // need to get the identifier, then call into Sema ourselves.
+
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected) << tok::identifier;
+ return ExprError();
+ }
+
+ Token FuncName = getCurToken();
+ UnqualifiedId Name;
+ CXXScopeSpec ScopeSpec;
+ SourceLocation TemplateKWLoc;
+ Name.setIdentifier(FuncName.getIdentifierInfo(), ConsumeToken());
+
+ // Ensure this is a valid identifier. We don't accept causing implicit
+ // function declarations per the spec, so always claim to not have trailing
+ // L Paren.
+ Res = Actions.ActOnIdExpression(getCurScope(), ScopeSpec, TemplateKWLoc,
+ Name, /*HasTrailingLParen=*/false,
+ /*isAddressOfOperand=*/false);
+ }
+
+ return getActions().CorrectDelayedTyposInExpr(Res);
+}
+
+ExprResult Parser::ParseOpenACCBindClauseArgument() {
+ // OpenACC 3.3 section 2.15:
+ // The bind clause specifies the name to use when calling the procedure on a
+ // device other than the host. If the name is specified as an identifier, it
+ // is called as if that name were specified in the language being compiled. If
+ // the name is specified as a string, the string is used for the procedure
+ // name unmodified.
+ if (getCurToken().is(tok::r_paren)) {
+ Diag(getCurToken(), diag::err_acc_incorrect_bind_arg);
+ return ExprError();
+ }
+
+ if (tok::isStringLiteral(getCurToken().getKind()))
+ return getActions().CorrectDelayedTyposInExpr(ParseStringLiteralExpression(
+ /*AllowUserDefinedLiteral=*/false, /*Unevaluated=*/true));
+
+ return ParseOpenACCIDExpression();
+}
+
+/// OpenACC 3.3, section 1.6:
+/// In this spec, a 'var' (in italics) is one of the following:
+/// - a variable name (a scalar, array, or compisite variable name)
+/// - a subarray specification with subscript ranges
+/// - an array element
+/// - a member of a composite variable
+/// - a common block name between slashes (fortran only)
+bool Parser::ParseOpenACCVar() {
+ OpenACCArraySectionRAII ArraySections(*this);
+ ExprResult Res =
+ getActions().CorrectDelayedTyposInExpr(ParseAssignmentExpression());
+ return Res.isInvalid();
+}
+
+/// OpenACC 3.3, section 2.10:
+/// In C and C++, the syntax of the cache directive is:
+///
+/// #pragma acc cache ([readonly:]var-list) new-line
+void Parser::ParseOpenACCCacheVarList() {
+ // If this is the end of the line, just return 'false' and count on the close
+ // paren diagnostic to catch the issue.
+ if (getCurToken().isAnnotation())
+ return;
+
+ // The VarList is an optional `readonly:` followed by a list of a variable
+ // specifications. Consume something that looks like a 'tag', and diagnose if
+ // it isn't 'readonly'.
+ if (tryParseAndConsumeSpecialTokenKind(*this,
+ OpenACCSpecialTokenKind::ReadOnly,
+ OpenACCDirectiveKind::Cache)) {
+ // FIXME: Record that this is a 'readonly' so that we can use that during
+ // Sema/AST generation.
+ }
+
+ bool FirstArray = true;
+ while (!getCurToken().isOneOf(tok::r_paren, tok::annot_pragma_openacc_end)) {
+ if (!FirstArray)
+ ExpectAndConsume(tok::comma);
+ FirstArray = false;
+
+ // OpenACC 3.3, section 2.10:
+ // A 'var' in a cache directive must be a single array element or a simple
+ // subarray. In C and C++, a simple subarray is an array name followed by
+ // an extended array range specification in brackets, with a start and
+ // length such as:
+ //
+ // arr[lower:length]
+ //
+ if (ParseOpenACCVar())
+ SkipUntil(tok::r_paren, tok::annot_pragma_openacc_end, tok::comma,
+ StopBeforeMatch);
+ }
+}
+
+void Parser::ParseOpenACCDirective() {
+ OpenACCDirectiveKind DirKind = ParseOpenACCDirectiveKind(*this);
+
+ // Once we've parsed the construct/directive name, some have additional
+ // specifiers that need to be taken care of. Atomic has an 'atomic-clause'
+ // that needs to be parsed.
+ if (DirKind == OpenACCDirectiveKind::Atomic)
+ ParseOpenACCAtomicKind(*this);
+
+ // We've successfully parsed the construct/directive name, however a few of
+ // the constructs have optional parens that contain further details.
+ BalancedDelimiterTracker T(*this, tok::l_paren,
+ tok::annot_pragma_openacc_end);
+
+ if (!T.consumeOpen()) {
+ switch (DirKind) {
+ default:
+ Diag(T.getOpenLocation(), diag::err_acc_invalid_open_paren);
+ T.skipToEnd();
+ break;
+ case OpenACCDirectiveKind::Routine: {
+ // Routine has an optional paren-wrapped name of a function in the local
+ // scope. We parse the name, emitting any diagnostics
+ ExprResult RoutineName = ParseOpenACCIDExpression();
+ // If the routine name is invalid, just skip until the closing paren to
+ // recover more gracefully.
+ if (RoutineName.isInvalid())
+ T.skipToEnd();
+ else
+ T.consumeClose();
+ break;
+ }
+ case OpenACCDirectiveKind::Cache:
+ ParseOpenACCCacheVarList();
+ // The ParseOpenACCCacheVarList function manages to recover from failures,
+ // so we can always consume the close.
+ T.consumeClose();
+ break;
+ case OpenACCDirectiveKind::Wait:
+ // OpenACC has an optional paren-wrapped 'wait-argument'.
+ if (ParseOpenACCWaitArgument())
+ T.skipToEnd();
+ else
+ T.consumeClose();
+ break;
+ }
+ } else if (DirKind == OpenACCDirectiveKind::Cache) {
+ // Cache's paren var-list is required, so error here if it isn't provided.
+ // We know that the consumeOpen above left the first non-paren here, so
+ // diagnose, then continue as if it was completely omitted.
+ Diag(Tok, diag::err_expected) << tok::l_paren;
+ }
+
+ // Parses the list of clauses, if present.
+ ParseOpenACCClauseList(DirKind);
+
+ Diag(getCurToken(), diag::warn_pragma_acc_unimplemented);
+ assert(Tok.is(tok::annot_pragma_openacc_end) &&
+ "Didn't parse all OpenACC Clauses");
+ ConsumeAnnotationToken();
+}
+
+// Parse OpenACC directive on a declaration.
+Parser::DeclGroupPtrTy Parser::ParseOpenACCDirectiveDecl() {
+ assert(Tok.is(tok::annot_pragma_openacc) && "expected OpenACC Start Token");
+
+ ParsingOpenACCDirectiveRAII DirScope(*this);
+ ConsumeAnnotationToken();
+
+ ParseOpenACCDirective();
+
+ return nullptr;
+}
+
+// Parse OpenACC Directive on a Statement.
+StmtResult Parser::ParseOpenACCDirectiveStmt() {
+ assert(Tok.is(tok::annot_pragma_openacc) && "expected OpenACC Start Token");
+
+ ParsingOpenACCDirectiveRAII DirScope(*this);
+ ConsumeAnnotationToken();
+
+ ParseOpenACCDirective();
+
+ return StmtEmpty();
+}
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseOpenMP.cpp b/contrib/llvm-project/clang/lib/Parse/ParseOpenMP.cpp
index 18e43c3734ac..da5f6605c6ff 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseOpenMP.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseOpenMP.cpp
@@ -19,11 +19,14 @@
#include "clang/Parse/ParseDiagnostic.h"
#include "clang/Parse/Parser.h"
#include "clang/Parse/RAIIObjectsForParser.h"
+#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/Scope.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/UniqueVector.h"
+#include "llvm/Frontend/OpenMP/OMPAssume.h"
#include "llvm/Frontend/OpenMP/OMPContext.h"
+#include <optional>
using namespace clang;
using namespace llvm::omp;
@@ -149,10 +152,12 @@ static OpenMPDirectiveKindExWrapper parseOpenMPDirectiveKind(Parser &P) {
{OMPD_for, OMPD_simd, OMPD_for_simd},
{OMPD_parallel, OMPD_for, OMPD_parallel_for},
{OMPD_parallel_for, OMPD_simd, OMPD_parallel_for_simd},
+ {OMPD_parallel, OMPD_loop, OMPD_parallel_loop},
{OMPD_parallel, OMPD_sections, OMPD_parallel_sections},
{OMPD_taskloop, OMPD_simd, OMPD_taskloop_simd},
{OMPD_target, OMPD_parallel, OMPD_target_parallel},
{OMPD_target, OMPD_simd, OMPD_target_simd},
+ {OMPD_target_parallel, OMPD_loop, OMPD_target_parallel_loop},
{OMPD_target_parallel, OMPD_for, OMPD_target_parallel_for},
{OMPD_target_parallel_for, OMPD_simd, OMPD_target_parallel_for_simd},
{OMPD_teams, OMPD_distribute, OMPD_teams_distribute},
@@ -162,8 +167,10 @@ static OpenMPDirectiveKindExWrapper parseOpenMPDirectiveKind(Parser &P) {
OMPD_teams_distribute_parallel_for},
{OMPD_teams_distribute_parallel_for, OMPD_simd,
OMPD_teams_distribute_parallel_for_simd},
+ {OMPD_teams, OMPD_loop, OMPD_teams_loop},
{OMPD_target, OMPD_teams, OMPD_target_teams},
{OMPD_target_teams, OMPD_distribute, OMPD_target_teams_distribute},
+ {OMPD_target_teams, OMPD_loop, OMPD_target_teams_loop},
{OMPD_target_teams_distribute, OMPD_parallel,
OMPD_target_teams_distribute_parallel},
{OMPD_target_teams_distribute, OMPD_simd,
@@ -173,11 +180,17 @@ static OpenMPDirectiveKindExWrapper parseOpenMPDirectiveKind(Parser &P) {
{OMPD_target_teams_distribute_parallel_for, OMPD_simd,
OMPD_target_teams_distribute_parallel_for_simd},
{OMPD_master, OMPD_taskloop, OMPD_master_taskloop},
+ {OMPD_masked, OMPD_taskloop, OMPD_masked_taskloop},
{OMPD_master_taskloop, OMPD_simd, OMPD_master_taskloop_simd},
+ {OMPD_masked_taskloop, OMPD_simd, OMPD_masked_taskloop_simd},
{OMPD_parallel, OMPD_master, OMPD_parallel_master},
+ {OMPD_parallel, OMPD_masked, OMPD_parallel_masked},
{OMPD_parallel_master, OMPD_taskloop, OMPD_parallel_master_taskloop},
+ {OMPD_parallel_masked, OMPD_taskloop, OMPD_parallel_masked_taskloop},
{OMPD_parallel_master_taskloop, OMPD_simd,
- OMPD_parallel_master_taskloop_simd}};
+ OMPD_parallel_master_taskloop_simd},
+ {OMPD_parallel_masked_taskloop, OMPD_simd,
+ OMPD_parallel_masked_taskloop_simd}};
enum { CancellationPoint = 0, DeclareReduction = 1, TargetData = 2 };
Token Tok = P.getCurToken();
OpenMPDirectiveKindExWrapper DKind =
@@ -187,8 +200,8 @@ static OpenMPDirectiveKindExWrapper parseOpenMPDirectiveKind(Parser &P) {
if (DKind == OMPD_unknown)
return OMPD_unknown;
- for (unsigned I = 0; I < llvm::array_lengthof(F); ++I) {
- if (DKind != F[I][0])
+ for (const auto &I : F) {
+ if (DKind != I[0])
continue;
Tok = P.getPreprocessor().LookAhead(0);
@@ -199,9 +212,9 @@ static OpenMPDirectiveKindExWrapper parseOpenMPDirectiveKind(Parser &P) {
if (SDKind == OMPD_unknown)
continue;
- if (SDKind == F[I][1]) {
+ if (SDKind == I[1]) {
P.ConsumeToken();
- DKind = F[I][2];
+ DKind = I[2];
}
}
return unsigned(DKind) < llvm::omp::Directive_enumSize
@@ -248,7 +261,7 @@ static DeclarationName parseOpenMPReductionId(Parser &P) {
case tok::identifier: // identifier
if (!WithOperator)
break;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
default:
P.Diag(Tok.getLocation(), diag::err_omp_expected_reduction_identifier);
P.SkipUntil(tok::colon, tok::r_paren, tok::annot_pragma_openmp_end,
@@ -464,17 +477,16 @@ void Parser::ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm) {
T.consumeOpen();
ExprVector Exprs;
- CommaLocsTy CommaLocs;
SourceLocation LParLoc = T.getOpenLocation();
auto RunSignatureHelp = [this, OmpPrivParm, LParLoc, &Exprs]() {
QualType PreferredType = Actions.ProduceConstructorSignatureHelp(
- getCurScope(), OmpPrivParm->getType()->getCanonicalTypeInternal(),
- OmpPrivParm->getLocation(), Exprs, LParLoc);
+ OmpPrivParm->getType()->getCanonicalTypeInternal(),
+ OmpPrivParm->getLocation(), Exprs, LParLoc, /*Braced=*/false);
CalledSignatureHelp = true;
return PreferredType;
};
- if (ParseExpressionList(Exprs, CommaLocs, [&] {
+ if (ParseExpressionList(Exprs, [&] {
PreferredType.enterFunctionArgument(Tok.getLocation(),
RunSignatureHelp);
})) {
@@ -488,9 +500,6 @@ void Parser::ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm) {
if (!T.consumeClose())
RLoc = T.getCloseLocation();
- assert(!Exprs.empty() && Exprs.size() - 1 == CommaLocs.size() &&
- "Unexpected number of commas!");
-
ExprResult Initializer =
Actions.ActOnParenListExpr(T.getOpenLocation(), RLoc, Exprs);
Actions.AddInitializerToDecl(OmpPrivParm, Initializer.get(),
@@ -634,7 +643,7 @@ TypeResult Parser::parseOpenMPDeclareMapperVarDecl(SourceRange &Range,
// Parse the declarator.
DeclaratorContext Context = DeclaratorContext::Prototype;
- Declarator DeclaratorInfo(DS, Context);
+ Declarator DeclaratorInfo(DS, ParsedAttributesView::none(), Context);
ParseDeclarator(DeclaratorInfo);
Range = DeclaratorInfo.getSourceRange();
if (DeclaratorInfo.getIdentifier() == nullptr) {
@@ -739,7 +748,7 @@ static bool parseDeclareSimdClauses(
OpenMPClauseKind CKind = getOpenMPClauseKind(ClauseName);
if (CKind == OMPC_uniform || CKind == OMPC_aligned ||
CKind == OMPC_linear) {
- Parser::OpenMPVarListDataTy Data;
+ Sema::OpenMPVarListDataTy Data;
SmallVectorImpl<Expr *> *Vars = &Uniforms;
if (CKind == OMPC_aligned) {
Vars = &Aligneds;
@@ -828,7 +837,7 @@ static StringRef stringLiteralParser(Parser &P) {
static StringRef getNameFromIdOrString(Parser &P, Token &Tok,
OMPContextLvl Lvl) {
- if (Tok.is(tok::identifier)) {
+ if (Tok.is(tok::identifier) || Tok.is(tok::kw_for)) {
llvm::SmallString<16> Buffer;
StringRef Name = P.getPreprocessor().getSpelling(Tok, Buffer);
(void)P.ConsumeToken();
@@ -952,6 +961,10 @@ static bool checkExtensionProperty(Parser &P, SourceLocation Loc,
TraitProperty::implementation_extension_allow_templates)
return true;
+ if (TIProperty.Kind ==
+ TraitProperty::implementation_extension_bind_to_declaration)
+ return true;
+
auto IsMatchExtension = [](OMPTraitProperty &TP) {
return (TP.Kind ==
llvm::omp::TraitProperty::implementation_extension_match_all ||
@@ -1402,26 +1415,129 @@ void Parser::ParseOMPDeclareVariantClauses(Parser::DeclGroupPtrTy Ptr,
OMPTraitInfo *ParentTI = Actions.getOMPTraitInfoForSurroundingScope();
ASTContext &ASTCtx = Actions.getASTContext();
OMPTraitInfo &TI = ASTCtx.getNewOMPTraitInfo();
- if (parseOMPDeclareVariantMatchClause(Loc, TI, ParentTI))
- return;
+ SmallVector<Expr *, 6> AdjustNothing;
+ SmallVector<Expr *, 6> AdjustNeedDevicePtr;
+ SmallVector<OMPInteropInfo, 3> AppendArgs;
+ SourceLocation AdjustArgsLoc, AppendArgsLoc;
+
+ // At least one clause is required.
+ if (Tok.is(tok::annot_pragma_openmp_end)) {
+ Diag(Tok.getLocation(), diag::err_omp_declare_variant_wrong_clause)
+ << (getLangOpts().OpenMP < 51 ? 0 : 1);
+ }
+
+ bool IsError = false;
+ while (Tok.isNot(tok::annot_pragma_openmp_end)) {
+ OpenMPClauseKind CKind = Tok.isAnnotation()
+ ? OMPC_unknown
+ : getOpenMPClauseKind(PP.getSpelling(Tok));
+ if (!isAllowedClauseForDirective(OMPD_declare_variant, CKind,
+ getLangOpts().OpenMP)) {
+ Diag(Tok.getLocation(), diag::err_omp_declare_variant_wrong_clause)
+ << (getLangOpts().OpenMP < 51 ? 0 : 1);
+ IsError = true;
+ }
+ if (!IsError) {
+ switch (CKind) {
+ case OMPC_match:
+ IsError = parseOMPDeclareVariantMatchClause(Loc, TI, ParentTI);
+ break;
+ case OMPC_adjust_args: {
+ AdjustArgsLoc = Tok.getLocation();
+ ConsumeToken();
+ Sema::OpenMPVarListDataTy Data;
+ SmallVector<Expr *> Vars;
+ IsError = ParseOpenMPVarList(OMPD_declare_variant, OMPC_adjust_args,
+ Vars, Data);
+ if (!IsError)
+ llvm::append_range(Data.ExtraModifier == OMPC_ADJUST_ARGS_nothing
+ ? AdjustNothing
+ : AdjustNeedDevicePtr,
+ Vars);
+ break;
+ }
+ case OMPC_append_args:
+ if (!AppendArgs.empty()) {
+ Diag(AppendArgsLoc, diag::err_omp_more_one_clause)
+ << getOpenMPDirectiveName(OMPD_declare_variant)
+ << getOpenMPClauseName(CKind) << 0;
+ IsError = true;
+ }
+ if (!IsError) {
+ AppendArgsLoc = Tok.getLocation();
+ ConsumeToken();
+ IsError = parseOpenMPAppendArgs(AppendArgs);
+ }
+ break;
+ default:
+ llvm_unreachable("Unexpected clause for declare variant.");
+ }
+ }
+ if (IsError) {
+ while (!SkipUntil(tok::annot_pragma_openmp_end, StopBeforeMatch))
+ ;
+ // Skip the last annot_pragma_openmp_end.
+ (void)ConsumeAnnotationToken();
+ return;
+ }
+ // Skip ',' if any.
+ if (Tok.is(tok::comma))
+ ConsumeToken();
+ }
- Optional<std::pair<FunctionDecl *, Expr *>> DeclVarData =
+ std::optional<std::pair<FunctionDecl *, Expr *>> DeclVarData =
Actions.checkOpenMPDeclareVariantFunction(
- Ptr, AssociatedFunction.get(), TI,
+ Ptr, AssociatedFunction.get(), TI, AppendArgs.size(),
SourceRange(Loc, Tok.getLocation()));
- // Skip last tokens.
- while (Tok.isNot(tok::annot_pragma_openmp_end))
- ConsumeAnyToken();
if (DeclVarData && !TI.Sets.empty())
Actions.ActOnOpenMPDeclareVariantDirective(
- DeclVarData->first, DeclVarData->second, TI,
+ DeclVarData->first, DeclVarData->second, TI, AdjustNothing,
+ AdjustNeedDevicePtr, AppendArgs, AdjustArgsLoc, AppendArgsLoc,
SourceRange(Loc, Tok.getLocation()));
// Skip the last annot_pragma_openmp_end.
(void)ConsumeAnnotationToken();
}
+bool Parser::parseOpenMPAppendArgs(
+ SmallVectorImpl<OMPInteropInfo> &InteropInfos) {
+ bool HasError = false;
+ // Parse '('.
+ BalancedDelimiterTracker T(*this, tok::l_paren, tok::annot_pragma_openmp_end);
+ if (T.expectAndConsume(diag::err_expected_lparen_after,
+ getOpenMPClauseName(OMPC_append_args).data()))
+ return true;
+
+ // Parse the list of append-ops, each is;
+ // interop(interop-type[,interop-type]...)
+ while (Tok.is(tok::identifier) && Tok.getIdentifierInfo()->isStr("interop")) {
+ ConsumeToken();
+ BalancedDelimiterTracker IT(*this, tok::l_paren,
+ tok::annot_pragma_openmp_end);
+ if (IT.expectAndConsume(diag::err_expected_lparen_after, "interop"))
+ return true;
+
+ OMPInteropInfo InteropInfo;
+ if (ParseOMPInteropInfo(InteropInfo, OMPC_append_args))
+ HasError = true;
+ else
+ InteropInfos.push_back(InteropInfo);
+
+ IT.consumeClose();
+ if (Tok.is(tok::comma))
+ ConsumeToken();
+ }
+ if (!HasError && InteropInfos.empty()) {
+ HasError = true;
+ Diag(Tok.getLocation(), diag::err_omp_unexpected_append_op);
+ SkipUntil(tok::comma, tok::r_paren, tok::annot_pragma_openmp_end,
+ StopBeforeMatch);
+ }
+ HasError = T.consumeClose() || HasError;
+ return HasError;
+}
+
bool Parser::parseOMPDeclareVariantMatchClause(SourceLocation Loc,
OMPTraitInfo &TI,
OMPTraitInfo *ParentTI) {
@@ -1431,24 +1547,15 @@ bool Parser::parseOMPDeclareVariantMatchClause(SourceLocation Loc,
: getOpenMPClauseKind(PP.getSpelling(Tok));
if (CKind != OMPC_match) {
Diag(Tok.getLocation(), diag::err_omp_declare_variant_wrong_clause)
- << getOpenMPClauseName(OMPC_match);
- while (!SkipUntil(tok::annot_pragma_openmp_end, Parser::StopBeforeMatch))
- ;
- // Skip the last annot_pragma_openmp_end.
- (void)ConsumeAnnotationToken();
+ << (getLangOpts().OpenMP < 51 ? 0 : 1);
return true;
}
(void)ConsumeToken();
// Parse '('.
BalancedDelimiterTracker T(*this, tok::l_paren, tok::annot_pragma_openmp_end);
if (T.expectAndConsume(diag::err_expected_lparen_after,
- getOpenMPClauseName(OMPC_match).data())) {
- while (!SkipUntil(tok::annot_pragma_openmp_end, StopBeforeMatch))
- ;
- // Skip the last annot_pragma_openmp_end.
- (void)ConsumeAnnotationToken();
+ getOpenMPClauseName(OMPC_match).data()))
return true;
- }
// Parse inner context selectors.
parseOMPContextSelectors(Loc, TI);
@@ -1518,6 +1625,42 @@ bool Parser::parseOMPDeclareVariantMatchClause(SourceLocation Loc,
return false;
}
+/// <clause> [clause[ [,] clause] ... ]
+///
+/// clauses: for error directive
+/// 'at' '(' compilation | execution ')'
+/// 'severity' '(' fatal | warning ')'
+/// 'message' '(' msg-string ')'
+/// ....
+void Parser::ParseOpenMPClauses(OpenMPDirectiveKind DKind,
+ SmallVectorImpl<OMPClause *> &Clauses,
+ SourceLocation Loc) {
+ SmallVector<llvm::PointerIntPair<OMPClause *, 1, bool>,
+ llvm::omp::Clause_enumSize + 1>
+ FirstClauses(llvm::omp::Clause_enumSize + 1);
+ while (Tok.isNot(tok::annot_pragma_openmp_end)) {
+ OpenMPClauseKind CKind = Tok.isAnnotation()
+ ? OMPC_unknown
+ : getOpenMPClauseKind(PP.getSpelling(Tok));
+ Actions.StartOpenMPClause(CKind);
+ OMPClause *Clause = ParseOpenMPClause(
+ DKind, CKind, !FirstClauses[unsigned(CKind)].getInt());
+ SkipUntil(tok::comma, tok::identifier, tok::annot_pragma_openmp_end,
+ StopBeforeMatch);
+ FirstClauses[unsigned(CKind)].setInt(true);
+ if (Clause != nullptr)
+ Clauses.push_back(Clause);
+ if (Tok.is(tok::annot_pragma_openmp_end)) {
+ Actions.EndOpenMPClause();
+ break;
+ }
+ // Skip ',' if any.
+ if (Tok.is(tok::comma))
+ ConsumeToken();
+ Actions.EndOpenMPClause();
+ }
+}
+
/// `omp assumes` or `omp begin/end assumes` <clause> [[,]<clause>]...
/// where
///
@@ -1532,7 +1675,7 @@ bool Parser::parseOMPDeclareVariantMatchClause(SourceLocation Loc,
///
void Parser::ParseOpenMPAssumesDirective(OpenMPDirectiveKind DKind,
SourceLocation Loc) {
- SmallVector<StringRef, 4> Assumptions;
+ SmallVector<std::string, 4> Assumptions;
bool SkippedClauses = false;
auto SkipBraces = [&](llvm::StringRef Spelling, bool IssueNote) {
@@ -1599,9 +1742,11 @@ void Parser::ParseOpenMPAssumesDirective(OpenMPDirectiveKind DKind,
}
assert(II && "Expected an identifier clause!");
- StringRef Assumption = II->getName();
+ std::string Assumption = II->getName().str();
if (ACMI.StartsWith)
- Assumption = Assumption.substr(ACMI.Identifier.size());
+ Assumption = "ompx_" + Assumption.substr(ACMI.Identifier.size());
+ else
+ Assumption = "omp_" + Assumption;
Assumptions.push_back(Assumption);
}
@@ -1618,7 +1763,7 @@ void Parser::ParseOpenMPEndAssumesDirective(SourceLocation Loc) {
/// Parsing of simple OpenMP clauses like 'default' or 'proc_bind'.
///
/// default-clause:
-/// 'default' '(' 'none' | 'shared' | 'firstprivate' ')
+/// 'default' '(' 'none' | 'shared' | 'private' | 'firstprivate' ')
///
/// proc_bind-clause:
/// 'proc_bind' '(' 'master' | 'close' | 'spread' ')
@@ -1638,7 +1783,7 @@ struct SimpleClauseData {
};
} // anonymous namespace
-static Optional<SimpleClauseData>
+static std::optional<SimpleClauseData>
parseOpenMPSimpleClause(Parser &P, OpenMPClauseKind Kind) {
const Token &Tok = P.getCurToken();
SourceLocation Loc = Tok.getLocation();
@@ -1647,11 +1792,11 @@ parseOpenMPSimpleClause(Parser &P, OpenMPClauseKind Kind) {
BalancedDelimiterTracker T(P, tok::l_paren, tok::annot_pragma_openmp_end);
if (T.expectAndConsume(diag::err_expected_lparen_after,
getOpenMPClauseName(Kind).data()))
- return llvm::None;
+ return std::nullopt;
unsigned Type = getOpenMPSimpleClauseType(
Kind, Tok.isAnnotation() ? "" : P.getPreprocessor().getSpelling(Tok),
- P.getLangOpts().OpenMP);
+ P.getLangOpts());
SourceLocation TypeLoc = Tok.getLocation();
if (Tok.isNot(tok::r_paren) && Tok.isNot(tok::comma) &&
Tok.isNot(tok::annot_pragma_openmp_end))
@@ -1668,50 +1813,81 @@ parseOpenMPSimpleClause(Parser &P, OpenMPClauseKind Kind) {
void Parser::ParseOMPDeclareTargetClauses(
Sema::DeclareTargetContextInfo &DTCI) {
SourceLocation DeviceTypeLoc;
- bool RequiresToOrLinkClause = false;
- bool HasToOrLinkClause = false;
+ bool RequiresToOrLinkOrIndirectClause = false;
+ bool HasToOrLinkOrIndirectClause = false;
while (Tok.isNot(tok::annot_pragma_openmp_end)) {
OMPDeclareTargetDeclAttr::MapTypeTy MT = OMPDeclareTargetDeclAttr::MT_To;
bool HasIdentifier = Tok.is(tok::identifier);
if (HasIdentifier) {
// If we see any clause we need a to or link clause.
- RequiresToOrLinkClause = true;
+ RequiresToOrLinkOrIndirectClause = true;
IdentifierInfo *II = Tok.getIdentifierInfo();
StringRef ClauseName = II->getName();
bool IsDeviceTypeClause =
getLangOpts().OpenMP >= 50 &&
getOpenMPClauseKind(ClauseName) == OMPC_device_type;
- bool IsToOrLinkClause =
+ bool IsIndirectClause = getLangOpts().OpenMP >= 51 &&
+ getOpenMPClauseKind(ClauseName) == OMPC_indirect;
+ if (DTCI.Indirect && IsIndirectClause) {
+ Diag(Tok, diag::err_omp_more_one_clause)
+ << getOpenMPDirectiveName(OMPD_declare_target)
+ << getOpenMPClauseName(OMPC_indirect) << 0;
+ break;
+ }
+ bool IsToEnterOrLinkClause =
OMPDeclareTargetDeclAttr::ConvertStrToMapTypeTy(ClauseName, MT);
- assert((!IsDeviceTypeClause || !IsToOrLinkClause) && "Cannot be both!");
+ assert((!IsDeviceTypeClause || !IsToEnterOrLinkClause) &&
+ "Cannot be both!");
- if (!IsDeviceTypeClause && DTCI.Kind == OMPD_begin_declare_target) {
- Diag(Tok, diag::err_omp_declare_target_unexpected_clause)
- << ClauseName << 0;
+ // Starting with OpenMP 5.2 the `to` clause has been replaced by the
+ // `enter` clause.
+ if (getLangOpts().OpenMP >= 52 && ClauseName == "to") {
+ Diag(Tok, diag::err_omp_declare_target_unexpected_to_clause);
+ break;
+ }
+ if (getLangOpts().OpenMP <= 51 && ClauseName == "enter") {
+ Diag(Tok, diag::err_omp_declare_target_unexpected_enter_clause);
break;
}
- if (!IsDeviceTypeClause && !IsToOrLinkClause) {
+
+ if (!IsDeviceTypeClause && !IsIndirectClause &&
+ DTCI.Kind == OMPD_begin_declare_target) {
Diag(Tok, diag::err_omp_declare_target_unexpected_clause)
- << ClauseName << (getLangOpts().OpenMP >= 50 ? 2 : 1);
+ << ClauseName << (getLangOpts().OpenMP >= 51 ? 3 : 0);
+ break;
+ }
+ if (!IsDeviceTypeClause && !IsToEnterOrLinkClause && !IsIndirectClause) {
+ Diag(Tok, getLangOpts().OpenMP >= 52
+ ? diag::err_omp_declare_target_unexpected_clause_52
+ : diag::err_omp_declare_target_unexpected_clause)
+ << ClauseName
+ << (getLangOpts().OpenMP >= 51
+ ? 4
+ : getLangOpts().OpenMP >= 50 ? 2 : 1);
break;
}
- if (IsToOrLinkClause)
- HasToOrLinkClause = true;
+ if (IsToEnterOrLinkClause || IsIndirectClause)
+ HasToOrLinkOrIndirectClause = true;
+ if (IsIndirectClause) {
+ if (!ParseOpenMPIndirectClause(DTCI, /*ParseOnly*/ false))
+ break;
+ continue;
+ }
// Parse 'device_type' clause and go to next clause if any.
if (IsDeviceTypeClause) {
- Optional<SimpleClauseData> DevTypeData =
+ std::optional<SimpleClauseData> DevTypeData =
parseOpenMPSimpleClause(*this, OMPC_device_type);
- if (DevTypeData.hasValue()) {
+ if (DevTypeData) {
if (DeviceTypeLoc.isValid()) {
// We already saw another device_type clause, diagnose it.
- Diag(DevTypeData.getValue().Loc,
+ Diag(DevTypeData->Loc,
diag::warn_omp_more_one_device_type_clause);
break;
}
- switch (static_cast<OpenMPDeviceType>(DevTypeData.getValue().Type)) {
+ switch (static_cast<OpenMPDeviceType>(DevTypeData->Type)) {
case OMPC_DEVICE_TYPE_any:
DTCI.DT = OMPDeclareTargetDeclAttr::DT_Any;
break;
@@ -1724,7 +1900,7 @@ void Parser::ParseOMPDeclareTargetClauses(
case OMPC_DEVICE_TYPE_unknown:
llvm_unreachable("Unexpected device_type");
}
- DeviceTypeLoc = DevTypeData.getValue().Loc;
+ DeviceTypeLoc = DevTypeData->Loc;
}
continue;
}
@@ -1756,7 +1932,9 @@ void Parser::ParseOMPDeclareTargetClauses(
}
if (!HasIdentifier && Tok.isNot(tok::annot_pragma_openmp_end)) {
Diag(Tok,
- diag::err_omp_declare_target_unexpected_clause_after_implicit_to);
+ getLangOpts().OpenMP >= 52
+ ? diag::err_omp_declare_target_wrong_clause_after_implicit_enter
+ : diag::err_omp_declare_target_wrong_clause_after_implicit_to);
break;
}
@@ -1765,10 +1943,17 @@ void Parser::ParseOMPDeclareTargetClauses(
ConsumeToken();
}
+ if (DTCI.Indirect && DTCI.DT != OMPDeclareTargetDeclAttr::DT_Any)
+ Diag(DeviceTypeLoc, diag::err_omp_declare_target_indirect_device_type);
+
// For declare target require at least 'to' or 'link' to be present.
- if (DTCI.Kind == OMPD_declare_target && RequiresToOrLinkClause &&
- !HasToOrLinkClause)
- Diag(DTCI.Loc, diag::err_omp_declare_target_missing_to_or_link_clause);
+ if (DTCI.Kind == OMPD_declare_target && RequiresToOrLinkOrIndirectClause &&
+ !HasToOrLinkOrIndirectClause)
+ Diag(DTCI.Loc,
+ getLangOpts().OpenMP >= 52
+ ? diag::err_omp_declare_target_missing_enter_or_link_clause
+ : diag::err_omp_declare_target_missing_to_or_link_clause)
+ << (getLangOpts().OpenMP >= 51 ? 1 : 0);
SkipUntil(tok::annot_pragma_openmp_end, StopBeforeMatch);
}
@@ -1855,7 +2040,7 @@ void Parser::ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind BeginDKind,
/// annot_pragma_openmp_end
///
Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
- AccessSpecifier &AS, ParsedAttributesWithRange &Attrs, bool Delayed,
+ AccessSpecifier &AS, ParsedAttributes &Attrs, bool Delayed,
DeclSpec::TST TagType, Decl *Tag) {
assert(Tok.isOneOf(tok::annot_pragma_openmp, tok::annot_attr_openmp) &&
"Not an OpenMP directive!");
@@ -1992,6 +2177,14 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
ConsumeAnnotationToken();
return Actions.ActOnOpenMPRequiresDirective(StartLoc, Clauses);
}
+ case OMPD_error: {
+ SmallVector<OMPClause *, 1> Clauses;
+ SourceLocation StartLoc = ConsumeToken();
+ ParseOpenMPClauses(DKind, Clauses, StartLoc);
+ Actions.ActOnOpenMPErrorDirective(Clauses, StartLoc, SourceLocation(),
+ /*InExContext = */ false);
+ break;
+ }
case OMPD_assumes:
case OMPD_begin_assumes:
ParseOpenMPAssumesDirective(DKind, ConsumeToken());
@@ -2027,8 +2220,13 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
OMPTraitInfo *ParentTI = Actions.getOMPTraitInfoForSurroundingScope();
ASTContext &ASTCtx = Actions.getASTContext();
OMPTraitInfo &TI = ASTCtx.getNewOMPTraitInfo();
- if (parseOMPDeclareVariantMatchClause(Loc, TI, ParentTI))
+ if (parseOMPDeclareVariantMatchClause(Loc, TI, ParentTI)) {
+ while (!SkipUntil(tok::annot_pragma_openmp_end, Parser::StopBeforeMatch))
+ ;
+ // Skip the last annot_pragma_openmp_end.
+ (void)ConsumeAnnotationToken();
break;
+ }
// Skip last tokens.
skipUntilPragmaOpenMPEnd(OMPD_begin_declare_variant);
@@ -2038,14 +2236,16 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
VariantMatchInfo VMI;
TI.getAsVariantMatchInfo(ASTCtx, VMI);
- std::function<void(StringRef)> DiagUnknownTrait = [this, Loc](
- StringRef ISATrait) {
- // TODO Track the selector locations in a way that is accessible here to
- // improve the diagnostic location.
- Diag(Loc, diag::warn_unknown_begin_declare_variant_isa_trait) << ISATrait;
- };
- TargetOMPContext OMPCtx(ASTCtx, std::move(DiagUnknownTrait),
- /* CurrentFunctionDecl */ nullptr);
+ std::function<void(StringRef)> DiagUnknownTrait =
+ [this, Loc](StringRef ISATrait) {
+ // TODO Track the selector locations in a way that is accessible here
+ // to improve the diagnostic location.
+ Diag(Loc, diag::warn_unknown_declare_variant_isa_trait) << ISATrait;
+ };
+ TargetOMPContext OMPCtx(
+ ASTCtx, std::move(DiagUnknownTrait),
+ /* CurrentFunctionDecl */ nullptr,
+ /* ConstructTraits */ ArrayRef<llvm::omp::TraitProperty>());
if (isVariantApplicableInContext(VMI, OMPCtx, /* DeviceSetOnly */ true)) {
Actions.ActOnOpenMPBeginDeclareVariant(Loc, TI);
@@ -2106,9 +2306,10 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
// Here we expect to see some function declaration.
if (AS == AS_none) {
assert(TagType == DeclSpec::TST_unspecified);
+ ParsedAttributes EmptyDeclSpecAttrs(AttrFactory);
MaybeParseCXX11Attributes(Attrs);
ParsingDeclSpec PDS(*this);
- Ptr = ParseExternalDeclaration(Attrs, &PDS);
+ Ptr = ParseExternalDeclaration(Attrs, EmptyDeclSpecAttrs, &PDS);
} else {
Ptr =
ParseCXXClassMemberDeclarationWithPragmas(AS, Attrs, TagType, Tag);
@@ -2130,11 +2331,12 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
case OMPD_declare_target: {
SourceLocation DTLoc = ConsumeAnyToken();
bool HasClauses = Tok.isNot(tok::annot_pragma_openmp_end);
- bool HasImplicitMappings =
- DKind == OMPD_begin_declare_target || !HasClauses;
Sema::DeclareTargetContextInfo DTCI(DKind, DTLoc);
if (HasClauses)
ParseOMPDeclareTargetClauses(DTCI);
+ bool HasImplicitMappings = DKind == OMPD_begin_declare_target ||
+ !HasClauses ||
+ (DTCI.ExplicitlyMapped.empty() && DTCI.Indirect);
// Skip the last annot_pragma_openmp_end.
ConsumeAnyToken();
@@ -2188,6 +2390,7 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
case OMPD_parallel_for_simd:
case OMPD_parallel_sections:
case OMPD_parallel_master:
+ case OMPD_parallel_masked:
case OMPD_atomic:
case OMPD_target:
case OMPD_teams:
@@ -2204,6 +2407,10 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
case OMPD_master_taskloop_simd:
case OMPD_parallel_master_taskloop:
case OMPD_parallel_master_taskloop_simd:
+ case OMPD_masked_taskloop:
+ case OMPD_masked_taskloop_simd:
+ case OMPD_parallel_masked_taskloop:
+ case OMPD_parallel_masked_taskloop_simd:
case OMPD_distribute:
case OMPD_target_update:
case OMPD_distribute_parallel_for:
@@ -2211,6 +2418,7 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
case OMPD_distribute_simd:
case OMPD_target_parallel_for_simd:
case OMPD_target_simd:
+ case OMPD_scope:
case OMPD_teams_distribute:
case OMPD_teams_distribute_simd:
case OMPD_teams_distribute_parallel_for_simd:
@@ -2222,6 +2430,12 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
case OMPD_target_teams_distribute_simd:
case OMPD_dispatch:
case OMPD_masked:
+ case OMPD_metadirective:
+ case OMPD_loop:
+ case OMPD_teams_loop:
+ case OMPD_target_teams_loop:
+ case OMPD_parallel_loop:
+ case OMPD_target_parallel_loop:
Diag(Tok, diag::err_omp_unexpected_directive)
<< 1 << getOpenMPDirectiveName(DKind);
break;
@@ -2259,8 +2473,8 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
/// annot_pragma_openmp 'parallel' | 'simd' | 'for' | 'sections' |
/// 'section' | 'single' | 'master' | 'critical' [ '(' <name> ')' ] |
/// 'parallel for' | 'parallel sections' | 'parallel master' | 'task' |
-/// 'taskyield' | 'barrier' | 'taskwait' | 'flush' | 'ordered' |
-/// 'atomic' | 'for simd' | 'parallel for simd' | 'target' | 'target
+/// 'taskyield' | 'barrier' | 'taskwait' | 'flush' | 'ordered' | 'error'
+/// | 'atomic' | 'for simd' | 'parallel for simd' | 'target' | 'target
/// data' | 'taskgroup' | 'teams' | 'taskloop' | 'taskloop simd' |
/// 'master taskloop' | 'master taskloop simd' | 'parallel master
/// taskloop' | 'parallel master taskloop simd' | 'distribute' | 'target
@@ -2271,13 +2485,14 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
/// simd' | 'teams distribute parallel for simd' | 'teams distribute
/// parallel for' | 'target teams' | 'target teams distribute' | 'target
/// teams distribute parallel for' | 'target teams distribute parallel
-/// for simd' | 'target teams distribute simd' | 'masked' {clause}
-/// annot_pragma_openmp_end
+/// for simd' | 'target teams distribute simd' | 'masked' |
+/// 'parallel masked' {clause} annot_pragma_openmp_end
///
-StmtResult
-Parser::ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx) {
- assert(Tok.isOneOf(tok::annot_pragma_openmp, tok::annot_attr_openmp) &&
- "Not an OpenMP directive!");
+StmtResult Parser::ParseOpenMPDeclarativeOrExecutableDirective(
+ ParsedStmtContext StmtCtx, bool ReadDirectiveWithinMetadirective) {
+ if (!ReadDirectiveWithinMetadirective)
+ assert(Tok.isOneOf(tok::annot_pragma_openmp, tok::annot_attr_openmp) &&
+ "Not an OpenMP directive!");
ParsingOpenMPDirectiveRAII DirScope(*this);
ParenBraceBracketBalancer BalancerRAIIObj(*this);
SmallVector<OMPClause *, 5> Clauses;
@@ -2286,8 +2501,15 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx) {
FirstClauses(llvm::omp::Clause_enumSize + 1);
unsigned ScopeFlags = Scope::FnScope | Scope::DeclScope |
Scope::CompoundStmtScope | Scope::OpenMPDirectiveScope;
- SourceLocation Loc = ConsumeAnnotationToken(), EndLoc;
+ SourceLocation Loc = ReadDirectiveWithinMetadirective
+ ? Tok.getLocation()
+ : ConsumeAnnotationToken(),
+ EndLoc;
OpenMPDirectiveKind DKind = parseOpenMPDirectiveKind(*this);
+ if (ReadDirectiveWithinMetadirective && DKind == OMPD_unknown) {
+ Diag(Tok, diag::err_omp_unknown_directive);
+ return StmtError();
+ }
OpenMPDirectiveKind CancelRegion = OMPD_unknown;
// Name of critical directive.
DeclarationNameInfo DirName;
@@ -2295,9 +2517,163 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx) {
bool HasAssociatedStatement = true;
switch (DKind) {
+ case OMPD_nothing:
+ ConsumeToken();
+ // If we are parsing the directive within a metadirective, the directive
+ // ends with a ')'.
+ if (ReadDirectiveWithinMetadirective && Tok.is(tok::r_paren))
+ while (Tok.isNot(tok::annot_pragma_openmp_end))
+ ConsumeAnyToken();
+ else
+ skipUntilPragmaOpenMPEnd(DKind);
+ if (Tok.is(tok::annot_pragma_openmp_end))
+ ConsumeAnnotationToken();
+ // return an empty statement
+ return StmtEmpty();
+ case OMPD_metadirective: {
+ ConsumeToken();
+ SmallVector<VariantMatchInfo, 4> VMIs;
+
+ // First iteration of parsing all clauses of metadirective.
+ // This iteration only parses and collects all context selector ignoring the
+ // associated directives.
+ TentativeParsingAction TPA(*this);
+ ASTContext &ASTContext = Actions.getASTContext();
+
+ BalancedDelimiterTracker T(*this, tok::l_paren,
+ tok::annot_pragma_openmp_end);
+ while (Tok.isNot(tok::annot_pragma_openmp_end)) {
+ OpenMPClauseKind CKind = Tok.isAnnotation()
+ ? OMPC_unknown
+ : getOpenMPClauseKind(PP.getSpelling(Tok));
+ SourceLocation Loc = ConsumeToken();
+
+ // Parse '('.
+ if (T.expectAndConsume(diag::err_expected_lparen_after,
+ getOpenMPClauseName(CKind).data()))
+ return Directive;
+
+ OMPTraitInfo &TI = Actions.getASTContext().getNewOMPTraitInfo();
+ if (CKind == OMPC_when) {
+ // parse and get OMPTraitInfo to pass to the When clause
+ parseOMPContextSelectors(Loc, TI);
+ if (TI.Sets.size() == 0) {
+ Diag(Tok, diag::err_omp_expected_context_selector) << "when clause";
+ TPA.Commit();
+ return Directive;
+ }
+
+ // Parse ':'
+ if (Tok.is(tok::colon))
+ ConsumeAnyToken();
+ else {
+ Diag(Tok, diag::err_omp_expected_colon) << "when clause";
+ TPA.Commit();
+ return Directive;
+ }
+ }
+ // Skip Directive for now. We will parse directive in the second iteration
+ int paren = 0;
+ while (Tok.isNot(tok::r_paren) || paren != 0) {
+ if (Tok.is(tok::l_paren))
+ paren++;
+ if (Tok.is(tok::r_paren))
+ paren--;
+ if (Tok.is(tok::annot_pragma_openmp_end)) {
+ Diag(Tok, diag::err_omp_expected_punc)
+ << getOpenMPClauseName(CKind) << 0;
+ TPA.Commit();
+ return Directive;
+ }
+ ConsumeAnyToken();
+ }
+ // Parse ')'
+ if (Tok.is(tok::r_paren))
+ T.consumeClose();
+
+ VariantMatchInfo VMI;
+ TI.getAsVariantMatchInfo(ASTContext, VMI);
+
+ VMIs.push_back(VMI);
+ }
+
+ TPA.Revert();
+ // End of the first iteration. Parser is reset to the start of metadirective
+
+ std::function<void(StringRef)> DiagUnknownTrait =
+ [this, Loc](StringRef ISATrait) {
+ // TODO Track the selector locations in a way that is accessible here
+ // to improve the diagnostic location.
+ Diag(Loc, diag::warn_unknown_declare_variant_isa_trait) << ISATrait;
+ };
+ TargetOMPContext OMPCtx(ASTContext, std::move(DiagUnknownTrait),
+ /* CurrentFunctionDecl */ nullptr,
+ ArrayRef<llvm::omp::TraitProperty>());
+
+ // A single match is returned for OpenMP 5.0
+ int BestIdx = getBestVariantMatchForContext(VMIs, OMPCtx);
+
+ int Idx = 0;
+ // In OpenMP 5.0 metadirective is either replaced by another directive or
+ // ignored.
+ // TODO: In OpenMP 5.1 generate multiple directives based upon the matches
+ // found by getBestWhenMatchForContext.
+ while (Tok.isNot(tok::annot_pragma_openmp_end)) {
+ // OpenMP 5.0 implementation - Skip to the best index found.
+ if (Idx++ != BestIdx) {
+ ConsumeToken(); // Consume clause name
+ T.consumeOpen(); // Consume '('
+ int paren = 0;
+ // Skip everything inside the clause
+ while (Tok.isNot(tok::r_paren) || paren != 0) {
+ if (Tok.is(tok::l_paren))
+ paren++;
+ if (Tok.is(tok::r_paren))
+ paren--;
+ ConsumeAnyToken();
+ }
+ // Parse ')'
+ if (Tok.is(tok::r_paren))
+ T.consumeClose();
+ continue;
+ }
+
+ OpenMPClauseKind CKind = Tok.isAnnotation()
+ ? OMPC_unknown
+ : getOpenMPClauseKind(PP.getSpelling(Tok));
+ SourceLocation Loc = ConsumeToken();
+
+ // Parse '('.
+ T.consumeOpen();
+
+ // Skip ContextSelectors for when clause
+ if (CKind == OMPC_when) {
+ OMPTraitInfo &TI = Actions.getASTContext().getNewOMPTraitInfo();
+ // parse and skip the ContextSelectors
+ parseOMPContextSelectors(Loc, TI);
+
+ // Parse ':'
+ ConsumeAnyToken();
+ }
+
+ // If no directive is passed, skip in OpenMP 5.0.
+ // TODO: Generate nothing directive from OpenMP 5.1.
+ if (Tok.is(tok::r_paren)) {
+ SkipUntil(tok::annot_pragma_openmp_end);
+ break;
+ }
+
+ // Parse Directive
+ Directive = ParseOpenMPDeclarativeOrExecutableDirective(
+ StmtCtx,
+ /*ReadDirectiveWithinMetadirective=*/true);
+ break;
+ }
+ break;
+ }
case OMPD_threadprivate: {
// FIXME: Should this be permitted in C++?
- if ((StmtCtx & ParsedStmtContext::AllowDeclarationsInC) ==
+ if ((StmtCtx & ParsedStmtContext::AllowStandaloneOpenMPDirectives) ==
ParsedStmtContext()) {
Diag(Tok, diag::err_omp_immediate_directive)
<< getOpenMPDirectiveName(DKind) << 0;
@@ -2316,7 +2692,7 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx) {
}
case OMPD_allocate: {
// FIXME: Should this be permitted in C++?
- if ((StmtCtx & ParsedStmtContext::AllowDeclarationsInC) ==
+ if ((StmtCtx & ParsedStmtContext::AllowStandaloneOpenMPDirectives) ==
ParsedStmtContext()) {
Diag(Tok, diag::err_omp_immediate_directive)
<< getOpenMPDirectiveName(DKind) << 0;
@@ -2387,6 +2763,7 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx) {
case OMPD_depobj:
case OMPD_scan:
case OMPD_taskyield:
+ case OMPD_error:
case OMPD_barrier:
case OMPD_taskwait:
case OMPD_cancellation_point:
@@ -2399,10 +2776,14 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx) {
ParsedStmtContext()) {
Diag(Tok, diag::err_omp_immediate_directive)
<< getOpenMPDirectiveName(DKind) << 0;
+ if (DKind == OMPD_error) {
+ SkipUntil(tok::annot_pragma_openmp_end);
+ break;
+ }
}
HasAssociatedStatement = false;
// Fall through for further analysis.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case OMPD_parallel:
case OMPD_simd:
case OMPD_tile:
@@ -2418,6 +2799,7 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx) {
case OMPD_parallel_for_simd:
case OMPD_parallel_sections:
case OMPD_parallel_master:
+ case OMPD_parallel_masked:
case OMPD_task:
case OMPD_ordered:
case OMPD_atomic:
@@ -2427,12 +2809,22 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx) {
case OMPD_target_data:
case OMPD_target_parallel:
case OMPD_target_parallel_for:
+ case OMPD_loop:
+ case OMPD_teams_loop:
+ case OMPD_target_teams_loop:
+ case OMPD_parallel_loop:
+ case OMPD_target_parallel_loop:
+ case OMPD_scope:
case OMPD_taskloop:
case OMPD_taskloop_simd:
case OMPD_master_taskloop:
+ case OMPD_masked_taskloop:
case OMPD_master_taskloop_simd:
+ case OMPD_masked_taskloop_simd:
case OMPD_parallel_master_taskloop:
+ case OMPD_parallel_masked_taskloop:
case OMPD_parallel_master_taskloop_simd:
+ case OMPD_parallel_masked_taskloop_simd:
case OMPD_distribute:
case OMPD_distribute_parallel_for:
case OMPD_distribute_parallel_for_simd:
@@ -2486,6 +2878,13 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx) {
Actions.StartOpenMPDSABlock(DKind, DirName, Actions.getCurScope(), Loc);
while (Tok.isNot(tok::annot_pragma_openmp_end)) {
+ // If we are parsing for a directive within a metadirective, the directive
+ // ends with a ')'.
+ if (ReadDirectiveWithinMetadirective && Tok.is(tok::r_paren)) {
+ while (Tok.isNot(tok::annot_pragma_openmp_end))
+ ConsumeAnyToken();
+ break;
+ }
bool HasImplicitClause = false;
if (ImplicitClauseAllowed && Tok.is(tok::l_paren)) {
HasImplicitClause = true;
@@ -2530,17 +2929,20 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx) {
// Consume final annot_pragma_openmp_end.
ConsumeAnnotationToken();
- // OpenMP [2.13.8, ordered Construct, Syntax]
- // If the depend clause is specified, the ordered construct is a stand-alone
- // directive.
- if (DKind == OMPD_ordered && FirstClauses[unsigned(OMPC_depend)].getInt()) {
- if ((StmtCtx & ParsedStmtContext::AllowStandaloneOpenMPDirectives) ==
- ParsedStmtContext()) {
- Diag(Loc, diag::err_omp_immediate_directive)
- << getOpenMPDirectiveName(DKind) << 1
- << getOpenMPClauseName(OMPC_depend);
+ if (DKind == OMPD_ordered) {
+ // If the depend or doacross clause is specified, the ordered construct
+ // is a stand-alone directive.
+ for (auto CK : {OMPC_depend, OMPC_doacross}) {
+ if (FirstClauses[unsigned(CK)].getInt()) {
+ if ((StmtCtx & ParsedStmtContext::AllowStandaloneOpenMPDirectives) ==
+ ParsedStmtContext()) {
+ Diag(Loc, diag::err_omp_immediate_directive)
+ << getOpenMPDirectiveName(DKind) << 1
+ << getOpenMPClauseName(CK);
+ }
+ HasAssociatedStatement = false;
+ }
}
- HasAssociatedStatement = false;
}
if (DKind == OMPD_tile && !FirstClauses[unsigned(OMPC_sizes)].getInt()) {
@@ -2562,15 +2964,14 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx) {
if (AssociatedStmt.isUsable() && isOpenMPLoopDirective(DKind) &&
getLangOpts().OpenMPIRBuilder)
- AssociatedStmt =
- Actions.ActOnOpenMPCanonicalLoop(AssociatedStmt.get());
+ AssociatedStmt = Actions.ActOnOpenMPLoopnest(AssociatedStmt.get());
}
AssociatedStmt = Actions.ActOnOpenMPRegionEnd(AssociatedStmt, Clauses);
} else if (DKind == OMPD_target_update || DKind == OMPD_target_enter_data ||
DKind == OMPD_target_exit_data) {
Actions.ActOnOpenMPRegionStart(DKind, getCurScope());
AssociatedStmt = (Sema::CompoundScopeRAII(Actions),
- Actions.ActOnCompoundStmt(Loc, Loc, llvm::None,
+ Actions.ActOnCompoundStmt(Loc, Loc, std::nullopt,
/*isStmtExpr=*/false));
AssociatedStmt = Actions.ActOnOpenMPRegionEnd(AssociatedStmt, Clauses);
}
@@ -2631,7 +3032,7 @@ bool Parser::ParseOpenMPSimpleVarList(
if (AllowScopeSpecifier && getLangOpts().CPlusPlus &&
ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
- /*ObjectHadErrors=*/false, false)) {
+ /*ObjectHasErrors=*/false, false)) {
IsCorrect = false;
SkipUntil(tok::comma, tok::r_paren, tok::annot_pragma_openmp_end,
StopBeforeMatch);
@@ -2710,8 +3111,13 @@ OMPClause *Parser::ParseOpenMPUsesAllocatorClause(OpenMPDirectiveKind DKind) {
return nullptr;
SmallVector<Sema::UsesAllocatorsData, 4> Data;
do {
+ CXXScopeSpec SS;
+ Token Replacement;
ExprResult Allocator =
- getLangOpts().CPlusPlus ? ParseCXXIdExpression() : ParseExpression();
+ getLangOpts().CPlusPlus
+ ? ParseCXXIdExpression()
+ : tryParseCXXIdExpression(SS, /*isAddressOfOperand=*/false,
+ Replacement);
if (Allocator.isInvalid()) {
SkipUntil(tok::comma, tok::r_paren, tok::annot_pragma_openmp_end,
StopBeforeMatch);
@@ -2751,7 +3157,7 @@ OMPClause *Parser::ParseOpenMPUsesAllocatorClause(OpenMPDirectiveKind DKind) {
/// clause:
/// if-clause | final-clause | num_threads-clause | safelen-clause |
/// default-clause | private-clause | firstprivate-clause | shared-clause
-/// | linear-clause | aligned-clause | collapse-clause |
+/// | linear-clause | aligned-clause | collapse-clause | bind-clause |
/// lastprivate-clause | reduction-clause | proc_bind-clause |
/// schedule-clause | copyin-clause | copyprivate-clause | untied-clause |
/// mergeable-clause | flush-clause | read-clause | write-clause |
@@ -2763,7 +3169,8 @@ OMPClause *Parser::ParseOpenMPUsesAllocatorClause(OpenMPDirectiveKind DKind) {
/// in_reduction-clause | allocator-clause | allocate-clause |
/// acq_rel-clause | acquire-clause | release-clause | relaxed-clause |
/// depobj-clause | destroy-clause | detach-clause | inclusive-clause |
-/// exclusive-clause | uses_allocators-clause | use_device_addr-clause
+/// exclusive-clause | uses_allocators-clause | use_device_addr-clause |
+/// has_device_addr
///
OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind CKind, bool FirstClause) {
@@ -2800,6 +3207,9 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
case OMPC_nocontext:
case OMPC_filter:
case OMPC_partial:
+ case OMPC_align:
+ case OMPC_message:
+ case OMPC_ompx_dyn_cgroup_mem:
// OpenMP [2.5, Restrictions]
// At most one num_threads clause can appear on the directive.
// OpenMP [2.8.1, simd construct, Restrictions]
@@ -2825,6 +3235,8 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
// OpenMP 5.1, 2.3.6 dispatch Construct, Restrictions.
// At most one novariants clause can appear on a dispatch directive.
// At most one nocontext clause can appear on a dispatch directive.
+ // OpenMP [5.1, error directive, Restrictions]
+ // At most one message clause can appear on the directive
if (!FirstClause) {
Diag(Tok, diag::err_omp_more_one_clause)
<< getOpenMPDirectiveName(DKind) << getOpenMPClauseName(CKind) << 0;
@@ -2834,13 +3246,18 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
if ((CKind == OMPC_ordered || CKind == OMPC_partial) &&
PP.LookAhead(/*N=*/0).isNot(tok::l_paren))
Clause = ParseOpenMPClause(CKind, WrongDirective);
+ else if (CKind == OMPC_grainsize || CKind == OMPC_num_tasks)
+ Clause = ParseOpenMPSingleExprWithArgClause(DKind, CKind, WrongDirective);
else
Clause = ParseOpenMPSingleExprClause(CKind, WrongDirective);
break;
+ case OMPC_fail:
case OMPC_default:
case OMPC_proc_bind:
case OMPC_atomic_default_mem_order:
- case OMPC_order:
+ case OMPC_at:
+ case OMPC_severity:
+ case OMPC_bind:
// OpenMP [2.14.3.1, Restrictions]
// Only a single default clause may be specified on a parallel, task or
// teams directive.
@@ -2849,7 +3266,12 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
// OpenMP [5.0, Requires directive, Restrictions]
// At most one atomic_default_mem_order clause can appear
// on the directive
- if (!FirstClause && CKind != OMPC_order) {
+ // OpenMP [5.1, error directive, Restrictions]
+ // At most one at clause can appear on the directive
+ // At most one severity clause can appear on the directive
+ // OpenMP 5.1, 2.11.7 loop Construct, Restrictions.
+ // At most one bind clause can appear on a loop directive.
+ if (!FirstClause) {
Diag(Tok, diag::err_omp_more_one_clause)
<< getOpenMPDirectiveName(DKind) << getOpenMPClauseName(CKind) << 0;
ErrorFound = true;
@@ -2861,19 +3283,22 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
case OMPC_schedule:
case OMPC_dist_schedule:
case OMPC_defaultmap:
+ case OMPC_order:
// OpenMP [2.7.1, Restrictions, p. 3]
// Only one schedule clause can appear on a loop directive.
// OpenMP 4.5 [2.10.4, Restrictions, p. 106]
// At most one defaultmap clause can appear on the directive.
// OpenMP 5.0 [2.12.5, target construct, Restrictions]
// At most one device clause can appear on the directive.
+ // OpenMP 5.1 [2.11.3, order clause, Restrictions]
+ // At most one order clause may appear on a construct.
if ((getLangOpts().OpenMP < 50 || CKind != OMPC_defaultmap) &&
- !FirstClause) {
+ (CKind != OMPC_order || getLangOpts().OpenMP >= 51) && !FirstClause) {
Diag(Tok, diag::err_omp_more_one_clause)
<< getOpenMPDirectiveName(DKind) << getOpenMPClauseName(CKind) << 0;
ErrorFound = true;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case OMPC_if:
Clause = ParseOpenMPSingleExprWithArgClause(DKind, CKind, WrongDirective);
break;
@@ -2883,6 +3308,7 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
case OMPC_read:
case OMPC_write:
case OMPC_capture:
+ case OMPC_compare:
case OMPC_seq_cst:
case OMPC_acq_rel:
case OMPC_acquire:
@@ -2940,11 +3366,17 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
case OMPC_use_device_ptr:
case OMPC_use_device_addr:
case OMPC_is_device_ptr:
+ case OMPC_has_device_addr:
case OMPC_allocate:
case OMPC_nontemporal:
case OMPC_inclusive:
case OMPC_exclusive:
case OMPC_affinity:
+ case OMPC_doacross:
+ case OMPC_enter:
+ if (getLangOpts().OpenMP >= 52 && DKind == OMPD_ordered &&
+ CKind == OMPC_depend)
+ Diag(Tok, diag::warn_omp_depend_in_ordered_deprecated);
Clause = ParseOpenMPVarListClause(DKind, CKind, WrongDirective);
break;
case OMPC_sizes:
@@ -2969,7 +3401,7 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
Clause = ParseOpenMPClause(CKind, WrongDirective);
break;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case OMPC_init:
case OMPC_use:
Clause = ParseOpenMPInteropClause(CKind, WrongDirective);
@@ -2986,6 +3418,20 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
<< getOpenMPClauseName(CKind) << getOpenMPDirectiveName(DKind);
SkipUntil(tok::comma, tok::annot_pragma_openmp_end, StopBeforeMatch);
break;
+ case OMPC_ompx_attribute:
+ Clause = ParseOpenMPOMPXAttributesClause(WrongDirective);
+ break;
+ case OMPC_ompx_bare:
+ if (WrongDirective)
+ Diag(Tok, diag::note_ompx_bare_clause)
+ << getOpenMPClauseName(CKind) << "target teams";
+ if (!ErrorFound && !getLangOpts().OpenMPExtensions) {
+ Diag(Tok, diag::err_omp_unexpected_clause_extension_only)
+ << getOpenMPClauseName(CKind) << getOpenMPDirectiveName(DKind);
+ ErrorFound = true;
+ }
+ Clause = ParseOpenMPClause(CKind, WrongDirective);
+ break;
default:
break;
}
@@ -3054,6 +3500,9 @@ ExprResult Parser::ParseOpenMPParensExpr(StringRef ClauseName,
/// detach-clause:
/// 'detach' '(' event-handler-expression ')'
///
+/// align-clause
+/// 'align' '(' positive-integer-constant ')'
+///
OMPClause *Parser::ParseOpenMPSingleExprClause(OpenMPClauseKind Kind,
bool ParseOnly) {
SourceLocation Loc = ConsumeToken();
@@ -3070,6 +3519,131 @@ OMPClause *Parser::ParseOpenMPSingleExprClause(OpenMPClauseKind Kind,
return Actions.ActOnOpenMPSingleExprClause(Kind, Val.get(), Loc, LLoc, RLoc);
}
+/// Parse indirect clause for '#pragma omp declare target' directive.
+/// 'indirect' '[' '(' invoked-by-fptr ')' ']'
+/// where invoked-by-fptr is a constant boolean expression that evaluates to
+/// true or false at compile time.
+bool Parser::ParseOpenMPIndirectClause(Sema::DeclareTargetContextInfo &DTCI,
+ bool ParseOnly) {
+ SourceLocation Loc = ConsumeToken();
+ SourceLocation RLoc;
+
+ if (Tok.isNot(tok::l_paren)) {
+ if (ParseOnly)
+ return false;
+ DTCI.Indirect = nullptr;
+ return true;
+ }
+
+ ExprResult Val =
+ ParseOpenMPParensExpr(getOpenMPClauseName(OMPC_indirect), RLoc);
+ if (Val.isInvalid())
+ return false;
+
+ if (ParseOnly)
+ return false;
+
+ if (!Val.get()->isValueDependent() && !Val.get()->isTypeDependent() &&
+ !Val.get()->isInstantiationDependent() &&
+ !Val.get()->containsUnexpandedParameterPack()) {
+ ExprResult Ret = Actions.CheckBooleanCondition(Loc, Val.get());
+ if (Ret.isInvalid())
+ return false;
+ llvm::APSInt Result;
+ Ret = Actions.VerifyIntegerConstantExpression(Val.get(), &Result,
+ Sema::AllowFold);
+ if (Ret.isInvalid())
+ return false;
+ DTCI.Indirect = Val.get();
+ return true;
+ }
+ return false;
+}
+
+/// Parses a comma-separated list of interop-types and a prefer_type list.
+///
+bool Parser::ParseOMPInteropInfo(OMPInteropInfo &InteropInfo,
+ OpenMPClauseKind Kind) {
+ const Token &Tok = getCurToken();
+ bool HasError = false;
+ bool IsTarget = false;
+ bool IsTargetSync = false;
+
+ while (Tok.is(tok::identifier)) {
+ // Currently prefer_type is only allowed with 'init' and it must be first.
+ bool PreferTypeAllowed = Kind == OMPC_init &&
+ InteropInfo.PreferTypes.empty() && !IsTarget &&
+ !IsTargetSync;
+ if (Tok.getIdentifierInfo()->isStr("target")) {
+ // OpenMP 5.1 [2.15.1, interop Construct, Restrictions]
+ // Each interop-type may be specified on an action-clause at most
+ // once.
+ if (IsTarget)
+ Diag(Tok, diag::warn_omp_more_one_interop_type) << "target";
+ IsTarget = true;
+ ConsumeToken();
+ } else if (Tok.getIdentifierInfo()->isStr("targetsync")) {
+ if (IsTargetSync)
+ Diag(Tok, diag::warn_omp_more_one_interop_type) << "targetsync";
+ IsTargetSync = true;
+ ConsumeToken();
+ } else if (Tok.getIdentifierInfo()->isStr("prefer_type") &&
+ PreferTypeAllowed) {
+ ConsumeToken();
+ BalancedDelimiterTracker PT(*this, tok::l_paren,
+ tok::annot_pragma_openmp_end);
+ if (PT.expectAndConsume(diag::err_expected_lparen_after, "prefer_type"))
+ HasError = true;
+
+ while (Tok.isNot(tok::r_paren)) {
+ SourceLocation Loc = Tok.getLocation();
+ ExprResult LHS = ParseCastExpression(AnyCastExpr);
+ ExprResult PTExpr = Actions.CorrectDelayedTyposInExpr(
+ ParseRHSOfBinaryExpression(LHS, prec::Conditional));
+ PTExpr = Actions.ActOnFinishFullExpr(PTExpr.get(), Loc,
+ /*DiscardedValue=*/false);
+ if (PTExpr.isUsable()) {
+ InteropInfo.PreferTypes.push_back(PTExpr.get());
+ } else {
+ HasError = true;
+ SkipUntil(tok::comma, tok::r_paren, tok::annot_pragma_openmp_end,
+ StopBeforeMatch);
+ }
+
+ if (Tok.is(tok::comma))
+ ConsumeToken();
+ }
+ PT.consumeClose();
+ } else {
+ HasError = true;
+ Diag(Tok, diag::err_omp_expected_interop_type);
+ ConsumeToken();
+ }
+ if (!Tok.is(tok::comma))
+ break;
+ ConsumeToken();
+ }
+
+ if (!HasError && !IsTarget && !IsTargetSync) {
+ Diag(Tok, diag::err_omp_expected_interop_type);
+ HasError = true;
+ }
+
+ if (Kind == OMPC_init) {
+ if (Tok.isNot(tok::colon) && (IsTarget || IsTargetSync))
+ Diag(Tok, diag::warn_pragma_expected_colon) << "interop types";
+ if (Tok.is(tok::colon))
+ ConsumeToken();
+ }
+
+ // As of OpenMP 5.1,there are two interop-types, "target" and
+ // "targetsync". Either or both are allowed for a single interop.
+ InteropInfo.IsTarget = IsTarget;
+ InteropInfo.IsTargetSync = IsTargetSync;
+
+ return HasError;
+}
+
/// Parsing of OpenMP clauses that use an interop-var.
///
/// init-clause:
@@ -3102,78 +3676,10 @@ OMPClause *Parser::ParseOpenMPInteropClause(OpenMPClauseKind Kind,
getOpenMPClauseName(Kind).data()))
return nullptr;
- bool IsTarget = false;
- bool IsTargetSync = false;
- SmallVector<Expr *, 4> Prefs;
-
- if (Kind == OMPC_init) {
-
- // Parse optional interop-modifier.
- if (Tok.is(tok::identifier) && PP.getSpelling(Tok) == "prefer_type") {
- ConsumeToken();
- BalancedDelimiterTracker PT(*this, tok::l_paren,
- tok::annot_pragma_openmp_end);
- if (PT.expectAndConsume(diag::err_expected_lparen_after, "prefer_type"))
- return nullptr;
-
- while (Tok.isNot(tok::r_paren)) {
- SourceLocation Loc = Tok.getLocation();
- ExprResult LHS = ParseCastExpression(AnyCastExpr);
- ExprResult PTExpr = Actions.CorrectDelayedTyposInExpr(
- ParseRHSOfBinaryExpression(LHS, prec::Conditional));
- PTExpr = Actions.ActOnFinishFullExpr(PTExpr.get(), Loc,
- /*DiscardedValue=*/false);
- if (PTExpr.isUsable())
- Prefs.push_back(PTExpr.get());
- else
- SkipUntil(tok::comma, tok::r_paren, tok::annot_pragma_openmp_end,
- StopBeforeMatch);
-
- if (Tok.is(tok::comma))
- ConsumeToken();
- }
- PT.consumeClose();
- }
-
- if (!Prefs.empty()) {
- if (Tok.is(tok::comma))
- ConsumeToken();
- else
- Diag(Tok, diag::err_omp_expected_punc_after_interop_mod);
- }
-
- // Parse the interop-types.
- bool HasError = false;
- while (Tok.is(tok::identifier)) {
- if (PP.getSpelling(Tok) == "target") {
- // OpenMP 5.1 [2.15.1, interop Construct, Restrictions]
- // Each interop-type may be specified on an action-clause at most
- // once.
- if (IsTarget)
- Diag(Tok, diag::warn_omp_more_one_interop_type) << "target";
- IsTarget = true;
- } else if (PP.getSpelling(Tok) == "targetsync") {
- if (IsTargetSync)
- Diag(Tok, diag::warn_omp_more_one_interop_type) << "targetsync";
- IsTargetSync = true;
- } else {
- HasError = true;
- Diag(Tok, diag::err_omp_expected_interop_type);
- }
- ConsumeToken();
-
- if (!Tok.is(tok::comma))
- break;
- ConsumeToken();
- }
- if (!HasError && !IsTarget && !IsTargetSync)
- Diag(Tok, diag::err_omp_expected_interop_type);
-
- if (Tok.is(tok::colon))
- ConsumeToken();
- else if (IsTarget || IsTargetSync)
- Diag(Tok, diag::warn_pragma_expected_colon) << "interop types";
- }
+ bool InteropError = false;
+ OMPInteropInfo InteropInfo;
+ if (Kind == OMPC_init)
+ InteropError = ParseOMPInteropInfo(InteropInfo, OMPC_init);
// Parse the variable.
SourceLocation VarLoc = Tok.getLocation();
@@ -3189,14 +3695,12 @@ OMPClause *Parser::ParseOpenMPInteropClause(OpenMPClauseKind Kind,
if (!T.consumeClose())
RLoc = T.getCloseLocation();
- if (ParseOnly || !InteropVarExpr.isUsable() ||
- (Kind == OMPC_init && !IsTarget && !IsTargetSync))
+ if (ParseOnly || !InteropVarExpr.isUsable() || InteropError)
return nullptr;
if (Kind == OMPC_init)
- return Actions.ActOnOpenMPInitClause(InteropVarExpr.get(), Prefs, IsTarget,
- IsTargetSync, Loc, T.getOpenLocation(),
- VarLoc, RLoc);
+ return Actions.ActOnOpenMPInitClause(InteropVarExpr.get(), InteropInfo, Loc,
+ T.getOpenLocation(), VarLoc, RLoc);
if (Kind == OMPC_use)
return Actions.ActOnOpenMPUseClause(InteropVarExpr.get(), Loc,
T.getOpenLocation(), VarLoc, RLoc);
@@ -3208,33 +3712,98 @@ OMPClause *Parser::ParseOpenMPInteropClause(OpenMPClauseKind Kind,
llvm_unreachable("Unexpected interop variable clause.");
}
+OMPClause *Parser::ParseOpenMPOMPXAttributesClause(bool ParseOnly) {
+ SourceLocation Loc = ConsumeToken();
+ // Parse '('.
+ BalancedDelimiterTracker T(*this, tok::l_paren, tok::annot_pragma_openmp_end);
+ if (T.expectAndConsume(diag::err_expected_lparen_after,
+ getOpenMPClauseName(OMPC_ompx_attribute).data()))
+ return nullptr;
+
+ ParsedAttributes ParsedAttrs(AttrFactory);
+ ParseAttributes(PAKM_GNU | PAKM_CXX11, ParsedAttrs);
+
+ // Parse ')'.
+ if (T.consumeClose())
+ return nullptr;
+
+ if (ParseOnly)
+ return nullptr;
+
+ SmallVector<Attr *> Attrs;
+ for (const ParsedAttr &PA : ParsedAttrs) {
+ switch (PA.getKind()) {
+ case ParsedAttr::AT_AMDGPUFlatWorkGroupSize:
+ if (!PA.checkExactlyNumArgs(Actions, 2))
+ continue;
+ if (auto *A = Actions.CreateAMDGPUFlatWorkGroupSizeAttr(
+ PA, PA.getArgAsExpr(0), PA.getArgAsExpr(1)))
+ Attrs.push_back(A);
+ continue;
+ case ParsedAttr::AT_AMDGPUWavesPerEU:
+ if (!PA.checkAtLeastNumArgs(Actions, 1) ||
+ !PA.checkAtMostNumArgs(Actions, 2))
+ continue;
+ if (auto *A = Actions.CreateAMDGPUWavesPerEUAttr(
+ PA, PA.getArgAsExpr(0),
+ PA.getNumArgs() > 1 ? PA.getArgAsExpr(1) : nullptr))
+ Attrs.push_back(A);
+ continue;
+ case ParsedAttr::AT_CUDALaunchBounds:
+ if (!PA.checkAtLeastNumArgs(Actions, 1) ||
+ !PA.checkAtMostNumArgs(Actions, 2))
+ continue;
+ if (auto *A = Actions.CreateLaunchBoundsAttr(
+ PA, PA.getArgAsExpr(0),
+ PA.getNumArgs() > 1 ? PA.getArgAsExpr(1) : nullptr,
+ PA.getNumArgs() > 2 ? PA.getArgAsExpr(2) : nullptr))
+ Attrs.push_back(A);
+ continue;
+ default:
+ Diag(Loc, diag::warn_omp_invalid_attribute_for_ompx_attributes) << PA;
+ continue;
+ };
+ }
+
+ return Actions.ActOnOpenMPXAttributeClause(Attrs, Loc, T.getOpenLocation(),
+ T.getCloseLocation());
+}
+
/// Parsing of simple OpenMP clauses like 'default' or 'proc_bind'.
///
/// default-clause:
-/// 'default' '(' 'none' | 'shared' | 'firstprivate' ')'
+/// 'default' '(' 'none' | 'shared' | 'private' | 'firstprivate' ')'
///
/// proc_bind-clause:
/// 'proc_bind' '(' 'master' | 'close' | 'spread' ')'
///
+/// bind-clause:
+/// 'bind' '(' 'teams' | 'parallel' | 'thread' ')'
+///
/// update-clause:
-/// 'update' '(' 'in' | 'out' | 'inout' | 'mutexinoutset' ')'
+/// 'update' '(' 'in' | 'out' | 'inout' | 'mutexinoutset' |
+/// 'inoutset' ')'
///
OMPClause *Parser::ParseOpenMPSimpleClause(OpenMPClauseKind Kind,
bool ParseOnly) {
- llvm::Optional<SimpleClauseData> Val = parseOpenMPSimpleClause(*this, Kind);
+ std::optional<SimpleClauseData> Val = parseOpenMPSimpleClause(*this, Kind);
if (!Val || ParseOnly)
return nullptr;
if (getLangOpts().OpenMP < 51 && Kind == OMPC_default &&
- static_cast<DefaultKind>(Val.getValue().Type) ==
- OMP_DEFAULT_firstprivate) {
- Diag(Val.getValue().LOpen, diag::err_omp_invalid_dsa)
- << getOpenMPClauseName(OMPC_firstprivate)
+ (static_cast<DefaultKind>(Val->Type) == OMP_DEFAULT_private ||
+ static_cast<DefaultKind>(Val->Type) ==
+ OMP_DEFAULT_firstprivate)) {
+ Diag(Val->LOpen, diag::err_omp_invalid_dsa)
+ << getOpenMPClauseName(static_cast<DefaultKind>(Val->Type) ==
+ OMP_DEFAULT_private
+ ? OMPC_private
+ : OMPC_firstprivate)
<< getOpenMPClauseName(OMPC_default) << "5.1";
return nullptr;
}
- return Actions.ActOnOpenMPSimpleClause(
- Kind, Val.getValue().Type, Val.getValue().TypeLoc, Val.getValue().LOpen,
- Val.getValue().Loc, Val.getValue().RLoc);
+ return Actions.ActOnOpenMPSimpleClause(Kind, Val->Type,
+ Val->TypeLoc, Val->LOpen,
+ Val->Loc, Val->RLoc);
}
/// Parsing of OpenMP clauses like 'ordered'.
@@ -3310,8 +3879,7 @@ OMPClause *Parser::ParseOpenMPSingleExprWithArgClause(OpenMPDirectiveKind DKind,
Arg[Modifier2] = OMPC_SCHEDULE_MODIFIER_unknown;
Arg[ScheduleKind] = OMPC_SCHEDULE_unknown;
unsigned KindModifier = getOpenMPSimpleClauseType(
- Kind, Tok.isAnnotation() ? "" : PP.getSpelling(Tok),
- getLangOpts().OpenMP);
+ Kind, Tok.isAnnotation() ? "" : PP.getSpelling(Tok), getLangOpts());
if (KindModifier > OMPC_SCHEDULE_unknown) {
// Parse 'modifier'
Arg[Modifier1] = KindModifier;
@@ -3323,8 +3891,7 @@ OMPClause *Parser::ParseOpenMPSingleExprWithArgClause(OpenMPDirectiveKind DKind,
// Parse ',' 'modifier'
ConsumeAnyToken();
KindModifier = getOpenMPSimpleClauseType(
- Kind, Tok.isAnnotation() ? "" : PP.getSpelling(Tok),
- getLangOpts().OpenMP);
+ Kind, Tok.isAnnotation() ? "" : PP.getSpelling(Tok), getLangOpts());
Arg[Modifier2] = KindModifier > OMPC_SCHEDULE_unknown
? KindModifier
: (unsigned)OMPC_SCHEDULE_unknown;
@@ -3339,8 +3906,7 @@ OMPClause *Parser::ParseOpenMPSingleExprWithArgClause(OpenMPDirectiveKind DKind,
else
Diag(Tok, diag::warn_pragma_expected_colon) << "schedule modifier";
KindModifier = getOpenMPSimpleClauseType(
- Kind, Tok.isAnnotation() ? "" : PP.getSpelling(Tok),
- getLangOpts().OpenMP);
+ Kind, Tok.isAnnotation() ? "" : PP.getSpelling(Tok), getLangOpts());
}
Arg[ScheduleKind] = KindModifier;
KLoc[ScheduleKind] = Tok.getLocation();
@@ -3354,8 +3920,7 @@ OMPClause *Parser::ParseOpenMPSingleExprWithArgClause(OpenMPDirectiveKind DKind,
DelimLoc = ConsumeAnyToken();
} else if (Kind == OMPC_dist_schedule) {
Arg.push_back(getOpenMPSimpleClauseType(
- Kind, Tok.isAnnotation() ? "" : PP.getSpelling(Tok),
- getLangOpts().OpenMP));
+ Kind, Tok.isAnnotation() ? "" : PP.getSpelling(Tok), getLangOpts()));
KLoc.push_back(Tok.getLocation());
if (Tok.isNot(tok::r_paren) && Tok.isNot(tok::comma) &&
Tok.isNot(tok::annot_pragma_openmp_end))
@@ -3365,8 +3930,7 @@ OMPClause *Parser::ParseOpenMPSingleExprWithArgClause(OpenMPDirectiveKind DKind,
} else if (Kind == OMPC_defaultmap) {
// Get a defaultmap modifier
unsigned Modifier = getOpenMPSimpleClauseType(
- Kind, Tok.isAnnotation() ? "" : PP.getSpelling(Tok),
- getLangOpts().OpenMP);
+ Kind, Tok.isAnnotation() ? "" : PP.getSpelling(Tok), getLangOpts());
// Set defaultmap modifier to unknown if it is either scalar, aggregate, or
// pointer
if (Modifier < OMPC_DEFAULTMAP_MODIFIER_unknown)
@@ -3384,8 +3948,7 @@ OMPClause *Parser::ParseOpenMPSingleExprWithArgClause(OpenMPDirectiveKind DKind,
Diag(Tok, diag::warn_pragma_expected_colon) << "defaultmap modifier";
// Get a defaultmap kind
Arg.push_back(getOpenMPSimpleClauseType(
- Kind, Tok.isAnnotation() ? "" : PP.getSpelling(Tok),
- getLangOpts().OpenMP));
+ Kind, Tok.isAnnotation() ? "" : PP.getSpelling(Tok), getLangOpts()));
KLoc.push_back(Tok.getLocation());
if (Tok.isNot(tok::r_paren) && Tok.isNot(tok::comma) &&
Tok.isNot(tok::annot_pragma_openmp_end))
@@ -3394,14 +3957,41 @@ OMPClause *Parser::ParseOpenMPSingleExprWithArgClause(OpenMPDirectiveKind DKind,
Arg.push_back(OMPC_DEFAULTMAP_unknown);
KLoc.push_back(SourceLocation());
}
+ } else if (Kind == OMPC_order) {
+ enum { Modifier, OrderKind, NumberOfElements };
+ Arg.resize(NumberOfElements);
+ KLoc.resize(NumberOfElements);
+ Arg[Modifier] = OMPC_ORDER_MODIFIER_unknown;
+ Arg[OrderKind] = OMPC_ORDER_unknown;
+ unsigned KindModifier = getOpenMPSimpleClauseType(
+ Kind, Tok.isAnnotation() ? "" : PP.getSpelling(Tok), getLangOpts());
+ if (KindModifier > OMPC_ORDER_unknown) {
+ // Parse 'modifier'
+ Arg[Modifier] = KindModifier;
+ KLoc[Modifier] = Tok.getLocation();
+ if (Tok.isNot(tok::r_paren) && Tok.isNot(tok::comma) &&
+ Tok.isNot(tok::annot_pragma_openmp_end))
+ ConsumeAnyToken();
+ // Parse ':'
+ if (Tok.is(tok::colon))
+ ConsumeAnyToken();
+ else
+ Diag(Tok, diag::warn_pragma_expected_colon) << "order modifier";
+ KindModifier = getOpenMPSimpleClauseType(
+ Kind, Tok.isAnnotation() ? "" : PP.getSpelling(Tok), getLangOpts());
+ }
+ Arg[OrderKind] = KindModifier;
+ KLoc[OrderKind] = Tok.getLocation();
+ if (Tok.isNot(tok::r_paren) && Tok.isNot(tok::comma) &&
+ Tok.isNot(tok::annot_pragma_openmp_end))
+ ConsumeAnyToken();
} else if (Kind == OMPC_device) {
// Only target executable directives support extended device construct.
if (isOpenMPTargetExecutionDirective(DKind) && getLangOpts().OpenMP >= 50 &&
NextToken().is(tok::colon)) {
// Parse optional <device modifier> ':'
Arg.push_back(getOpenMPSimpleClauseType(
- Kind, Tok.isAnnotation() ? "" : PP.getSpelling(Tok),
- getLangOpts().OpenMP));
+ Kind, Tok.isAnnotation() ? "" : PP.getSpelling(Tok), getLangOpts()));
KLoc.push_back(Tok.getLocation());
ConsumeAnyToken();
// Parse ':'
@@ -3410,6 +4000,60 @@ OMPClause *Parser::ParseOpenMPSingleExprWithArgClause(OpenMPDirectiveKind DKind,
Arg.push_back(OMPC_DEVICE_unknown);
KLoc.emplace_back();
}
+ } else if (Kind == OMPC_grainsize) {
+ // Parse optional <grainsize modifier> ':'
+ OpenMPGrainsizeClauseModifier Modifier =
+ static_cast<OpenMPGrainsizeClauseModifier>(getOpenMPSimpleClauseType(
+ Kind, Tok.isAnnotation() ? "" : PP.getSpelling(Tok),
+ getLangOpts()));
+ if (getLangOpts().OpenMP >= 51) {
+ if (NextToken().is(tok::colon)) {
+ Arg.push_back(Modifier);
+ KLoc.push_back(Tok.getLocation());
+ // Parse modifier
+ ConsumeAnyToken();
+ // Parse ':'
+ ConsumeAnyToken();
+ } else {
+ if (Modifier == OMPC_GRAINSIZE_strict) {
+ Diag(Tok, diag::err_modifier_expected_colon) << "strict";
+ // Parse modifier
+ ConsumeAnyToken();
+ }
+ Arg.push_back(OMPC_GRAINSIZE_unknown);
+ KLoc.emplace_back();
+ }
+ } else {
+ Arg.push_back(OMPC_GRAINSIZE_unknown);
+ KLoc.emplace_back();
+ }
+ } else if (Kind == OMPC_num_tasks) {
+ // Parse optional <num_tasks modifier> ':'
+ OpenMPNumTasksClauseModifier Modifier =
+ static_cast<OpenMPNumTasksClauseModifier>(getOpenMPSimpleClauseType(
+ Kind, Tok.isAnnotation() ? "" : PP.getSpelling(Tok),
+ getLangOpts()));
+ if (getLangOpts().OpenMP >= 51) {
+ if (NextToken().is(tok::colon)) {
+ Arg.push_back(Modifier);
+ KLoc.push_back(Tok.getLocation());
+ // Parse modifier
+ ConsumeAnyToken();
+ // Parse ':'
+ ConsumeAnyToken();
+ } else {
+ if (Modifier == OMPC_NUMTASKS_strict) {
+ Diag(Tok, diag::err_modifier_expected_colon) << "strict";
+ // Parse modifier
+ ConsumeAnyToken();
+ }
+ Arg.push_back(OMPC_NUMTASKS_unknown);
+ KLoc.emplace_back();
+ }
+ } else {
+ Arg.push_back(OMPC_NUMTASKS_unknown);
+ KLoc.emplace_back();
+ }
} else {
assert(Kind == OMPC_if);
KLoc.push_back(Tok.getLocation());
@@ -3432,7 +4076,8 @@ OMPClause *Parser::ParseOpenMPSingleExprWithArgClause(OpenMPDirectiveKind DKind,
bool NeedAnExpression = (Kind == OMPC_schedule && DelimLoc.isValid()) ||
(Kind == OMPC_dist_schedule && DelimLoc.isValid()) ||
- Kind == OMPC_if || Kind == OMPC_device;
+ Kind == OMPC_if || Kind == OMPC_device ||
+ Kind == OMPC_grainsize || Kind == OMPC_num_tasks;
if (NeedAnExpression) {
SourceLocation ELoc = Tok.getLocation();
ExprResult LHS(ParseCastExpression(AnyCastExpr, false, NotTypeCast));
@@ -3512,12 +4157,12 @@ static OpenMPMapModifierKind isMapModifier(Parser &P) {
Preprocessor &PP = P.getPreprocessor();
OpenMPMapModifierKind TypeModifier =
static_cast<OpenMPMapModifierKind>(getOpenMPSimpleClauseType(
- OMPC_map, PP.getSpelling(Tok), P.getLangOpts().OpenMP));
+ OMPC_map, PP.getSpelling(Tok), P.getLangOpts()));
return TypeModifier;
}
/// Parse the mapper modifier in map, to, and from clauses.
-bool Parser::parseMapperModifier(OpenMPVarListDataTy &Data) {
+bool Parser::parseMapperModifier(Sema::OpenMPVarListDataTy &Data) {
// Parse '('.
BalancedDelimiterTracker T(*this, tok::l_paren, tok::colon);
if (T.expectAndConsume(diag::err_expected_lparen_after, "mapper")) {
@@ -3529,7 +4174,7 @@ bool Parser::parseMapperModifier(OpenMPVarListDataTy &Data) {
if (getLangOpts().CPlusPlus)
ParseOptionalCXXScopeSpecifier(Data.ReductionOrMapperIdScopeSpec,
/*ObjectType=*/nullptr,
- /*ObjectHadErrors=*/false,
+ /*ObjectHasErrors=*/false,
/*EnteringContext=*/false);
if (Tok.isNot(tok::identifier) && Tok.isNot(tok::kw_default)) {
Diag(Tok.getLocation(), diag::err_omp_mapper_illegal_identifier);
@@ -3549,14 +4194,19 @@ bool Parser::parseMapperModifier(OpenMPVarListDataTy &Data) {
/// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list)
/// where, map-type-modifier ::= always | close | mapper(mapper-identifier) |
/// present
-bool Parser::parseMapTypeModifiers(OpenMPVarListDataTy &Data) {
+bool Parser::parseMapTypeModifiers(Sema::OpenMPVarListDataTy &Data) {
while (getCurToken().isNot(tok::colon)) {
OpenMPMapModifierKind TypeModifier = isMapModifier(*this);
if (TypeModifier == OMPC_MAP_MODIFIER_always ||
TypeModifier == OMPC_MAP_MODIFIER_close ||
- TypeModifier == OMPC_MAP_MODIFIER_present) {
+ TypeModifier == OMPC_MAP_MODIFIER_present ||
+ TypeModifier == OMPC_MAP_MODIFIER_ompx_hold) {
Data.MapTypeModifiers.push_back(TypeModifier);
Data.MapTypeModifiersLoc.push_back(Tok.getLocation());
+ if (PP.LookAhead(0).isNot(tok::comma) &&
+ PP.LookAhead(0).isNot(tok::colon) && getLangOpts().OpenMP >= 52)
+ Diag(Tok.getLocation(), diag::err_omp_missing_comma)
+ << "map type modifier";
ConsumeToken();
} else if (TypeModifier == OMPC_MAP_MODIFIER_mapper) {
Data.MapTypeModifiers.push_back(TypeModifier);
@@ -3564,6 +4214,11 @@ bool Parser::parseMapTypeModifiers(OpenMPVarListDataTy &Data) {
ConsumeToken();
if (parseMapperModifier(Data))
return true;
+ if (Tok.isNot(tok::comma) && Tok.isNot(tok::colon) &&
+ getLangOpts().OpenMP >= 52)
+ Diag(Data.MapTypeModifiersLoc.back(), diag::err_omp_missing_comma)
+ << "map type modifier";
+
} else {
// For the case of unknown map-type-modifier or a map-type.
// Map-type is followed by a colon; the function returns when it
@@ -3577,7 +4232,9 @@ bool Parser::parseMapTypeModifiers(OpenMPVarListDataTy &Data) {
if (PP.LookAhead(0).is(tok::colon))
return false;
Diag(Tok, diag::err_omp_unknown_map_type_modifier)
- << (getLangOpts().OpenMP >= 51 ? 1 : 0);
+ << (getLangOpts().OpenMP >= 51 ? (getLangOpts().OpenMP >= 52 ? 2 : 1)
+ : 0)
+ << getLangOpts().OpenMPExtensions;
ConsumeToken();
}
if (getCurToken().is(tok::comma))
@@ -3596,14 +4253,14 @@ static OpenMPMapClauseKind isMapType(Parser &P) {
Preprocessor &PP = P.getPreprocessor();
OpenMPMapClauseKind MapType =
static_cast<OpenMPMapClauseKind>(getOpenMPSimpleClauseType(
- OMPC_map, PP.getSpelling(Tok), P.getLangOpts().OpenMP));
+ OMPC_map, PP.getSpelling(Tok), P.getLangOpts()));
return MapType;
}
/// Parse map-type in map clause.
/// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list)
/// where, map-type ::= to | from | tofrom | alloc | release | delete
-static void parseMapType(Parser &P, Parser::OpenMPVarListDataTy &Data) {
+static void parseMapType(Parser &P, Sema::OpenMPVarListDataTy &Data) {
Token Tok = P.getCurToken();
if (Tok.is(tok::colon)) {
P.Diag(Tok, diag::err_omp_map_type_missing);
@@ -3722,11 +4379,57 @@ ExprResult Parser::ParseOpenMPIteratorsExpr() {
Data);
}
+bool Parser::ParseOpenMPReservedLocator(OpenMPClauseKind Kind,
+ Sema::OpenMPVarListDataTy &Data,
+ const LangOptions &LangOpts) {
+ // Currently the only reserved locator is 'omp_all_memory' which is only
+ // allowed on a depend clause.
+ if (Kind != OMPC_depend || LangOpts.OpenMP < 51)
+ return false;
+
+ if (Tok.is(tok::identifier) &&
+ Tok.getIdentifierInfo()->isStr("omp_all_memory")) {
+
+ if (Data.ExtraModifier == OMPC_DEPEND_outallmemory ||
+ Data.ExtraModifier == OMPC_DEPEND_inoutallmemory)
+ Diag(Tok, diag::warn_omp_more_one_omp_all_memory);
+ else if (Data.ExtraModifier != OMPC_DEPEND_out &&
+ Data.ExtraModifier != OMPC_DEPEND_inout)
+ Diag(Tok, diag::err_omp_requires_out_inout_depend_type);
+ else
+ Data.ExtraModifier = Data.ExtraModifier == OMPC_DEPEND_out
+ ? OMPC_DEPEND_outallmemory
+ : OMPC_DEPEND_inoutallmemory;
+ ConsumeToken();
+ return true;
+ }
+ return false;
+}
+
+/// Parse step size expression. Returns true if parsing is successfull,
+/// otherwise returns false.
+static bool parseStepSize(Parser &P, Sema::OpenMPVarListDataTy &Data,
+ OpenMPClauseKind CKind, SourceLocation ELoc) {
+ ExprResult Tail = P.ParseAssignmentExpression();
+ Sema &Actions = P.getActions();
+ Tail = Actions.ActOnFinishFullExpr(Tail.get(), ELoc,
+ /*DiscardedValue*/ false);
+ if (Tail.isUsable()) {
+ Data.DepModOrTailExpr = Tail.get();
+ Token CurTok = P.getCurToken();
+ if (CurTok.isNot(tok::r_paren) && CurTok.isNot(tok::comma)) {
+ P.Diag(CurTok, diag::err_expected_punc) << "step expression";
+ }
+ return true;
+ }
+ return false;
+}
+
/// Parses clauses with list.
bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
OpenMPClauseKind Kind,
SmallVectorImpl<Expr *> &Vars,
- OpenMPVarListDataTy &Data) {
+ Sema::OpenMPVarListDataTy &Data) {
UnqualifiedId UnqualifiedReductionId;
bool InvalidReductionId = false;
bool IsInvalidMapperModifier = false;
@@ -3738,6 +4441,7 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
return true;
bool HasIterator = false;
+ bool InvalidIterator = false;
bool NeedRParenForLinear = false;
BalancedDelimiterTracker LinearT(*this, tok::l_paren,
tok::annot_pragma_openmp_end);
@@ -3749,8 +4453,8 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
(Tok.is(tok::identifier) || Tok.is(tok::kw_default)) &&
NextToken().is(tok::comma)) {
// Parse optional reduction modifier.
- Data.ExtraModifier = getOpenMPSimpleClauseType(Kind, PP.getSpelling(Tok),
- getLangOpts().OpenMP);
+ Data.ExtraModifier =
+ getOpenMPSimpleClauseType(Kind, PP.getSpelling(Tok), getLangOpts());
Data.ExtraModifierLoc = Tok.getLocation();
ConsumeToken();
assert(Tok.is(tok::comma) && "Expected comma.");
@@ -3760,7 +4464,7 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
if (getLangOpts().CPlusPlus)
ParseOptionalCXXScopeSpecifier(Data.ReductionOrMapperIdScopeSpec,
/*ObjectType=*/nullptr,
- /*ObjectHadErrors=*/false,
+ /*ObjectHasErrors=*/false,
/*EnteringContext=*/false);
InvalidReductionId = ParseReductionId(
*this, Data.ReductionOrMapperIdScopeSpec, UnqualifiedReductionId);
@@ -3775,7 +4479,7 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
if (!InvalidReductionId)
Data.ReductionOrMapperId =
Actions.GetNameFromUnqualifiedId(UnqualifiedReductionId);
- } else if (Kind == OMPC_depend) {
+ } else if (Kind == OMPC_depend || Kind == OMPC_doacross) {
if (getLangOpts().OpenMP >= 50) {
if (Tok.is(tok::identifier) && PP.getSpelling(Tok) == "iterator") {
// Handle optional dependence modifier.
@@ -3796,15 +4500,18 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
ColonProtectionRAIIObject ColonRAII(*this);
Data.ExtraModifier = getOpenMPSimpleClauseType(
Kind, Tok.is(tok::identifier) ? PP.getSpelling(Tok) : "",
- getLangOpts().OpenMP);
+ getLangOpts());
Data.ExtraModifierLoc = Tok.getLocation();
- if (Data.ExtraModifier == OMPC_DEPEND_unknown) {
+ if ((Kind == OMPC_depend && Data.ExtraModifier == OMPC_DEPEND_unknown) ||
+ (Kind == OMPC_doacross &&
+ Data.ExtraModifier == OMPC_DOACROSS_unknown)) {
SkipUntil(tok::colon, tok::r_paren, tok::annot_pragma_openmp_end,
StopBeforeMatch);
} else {
ConsumeToken();
// Special processing for depend(source) clause.
- if (DKind == OMPD_ordered && Data.ExtraModifier == OMPC_DEPEND_source) {
+ if (DKind == OMPD_ordered && Kind == OMPC_depend &&
+ Data.ExtraModifier == OMPC_DEPEND_source) {
// Parse ')'.
T.consumeClose();
return false;
@@ -3812,20 +4519,69 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
}
if (Tok.is(tok::colon)) {
Data.ColonLoc = ConsumeToken();
- } else {
+ } else if (Kind != OMPC_doacross || Tok.isNot(tok::r_paren)) {
Diag(Tok, DKind == OMPD_ordered ? diag::warn_pragma_expected_colon_r_paren
: diag::warn_pragma_expected_colon)
- << "dependency type";
+ << (Kind == OMPC_depend ? "dependency type" : "dependence-type");
+ }
+ if (Kind == OMPC_doacross) {
+ if (Tok.is(tok::identifier) &&
+ Tok.getIdentifierInfo()->isStr("omp_cur_iteration")) {
+ Data.ExtraModifier = Data.ExtraModifier == OMPC_DOACROSS_source
+ ? OMPC_DOACROSS_source_omp_cur_iteration
+ : OMPC_DOACROSS_sink_omp_cur_iteration;
+ ConsumeToken();
+ }
+ if (Data.ExtraModifier == OMPC_DOACROSS_sink_omp_cur_iteration) {
+ if (Tok.isNot(tok::minus)) {
+ Diag(Tok, diag::err_omp_sink_and_source_iteration_not_allowd)
+ << getOpenMPClauseName(Kind) << 0 << 0;
+ SkipUntil(tok::r_paren);
+ return false;
+ } else {
+ ConsumeToken();
+ SourceLocation Loc = Tok.getLocation();
+ uint64_t Value = 0;
+ if (Tok.isNot(tok::numeric_constant) ||
+ (PP.parseSimpleIntegerLiteral(Tok, Value) && Value != 1)) {
+ Diag(Loc, diag::err_omp_sink_and_source_iteration_not_allowd)
+ << getOpenMPClauseName(Kind) << 0 << 0;
+ SkipUntil(tok::r_paren);
+ return false;
+ }
+ }
+ }
+ if (Data.ExtraModifier == OMPC_DOACROSS_source_omp_cur_iteration) {
+ if (Tok.isNot(tok::r_paren)) {
+ Diag(Tok, diag::err_omp_sink_and_source_iteration_not_allowd)
+ << getOpenMPClauseName(Kind) << 1 << 1;
+ SkipUntil(tok::r_paren);
+ return false;
+ }
+ }
+ // Only the 'sink' case has the expression list.
+ if (Kind == OMPC_doacross &&
+ (Data.ExtraModifier == OMPC_DOACROSS_source ||
+ Data.ExtraModifier == OMPC_DOACROSS_source_omp_cur_iteration ||
+ Data.ExtraModifier == OMPC_DOACROSS_sink_omp_cur_iteration)) {
+ // Parse ')'.
+ T.consumeClose();
+ return false;
+ }
}
} else if (Kind == OMPC_linear) {
// Try to parse modifier if any.
Data.ExtraModifier = OMPC_LINEAR_val;
if (Tok.is(tok::identifier) && PP.LookAhead(0).is(tok::l_paren)) {
- Data.ExtraModifier = getOpenMPSimpleClauseType(Kind, PP.getSpelling(Tok),
- getLangOpts().OpenMP);
+ Data.ExtraModifier =
+ getOpenMPSimpleClauseType(Kind, PP.getSpelling(Tok), getLangOpts());
Data.ExtraModifierLoc = ConsumeToken();
LinearT.consumeOpen();
NeedRParenForLinear = true;
+ if (getLangOpts().OpenMP >= 52)
+ Diag(Data.ExtraModifierLoc, diag::err_omp_deprecate_old_syntax)
+ << "linear-modifier(list)" << getOpenMPClauseName(Kind)
+ << "linear(list: [linear-modifier,] step(step-size))";
}
} else if (Kind == OMPC_lastprivate) {
// Try to parse modifier if any.
@@ -3835,14 +4591,31 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
if ((getLangOpts().OpenMP >= 50 && !isOpenMPDistributeDirective(DKind) &&
!isOpenMPTaskLoopDirective(DKind)) &&
Tok.is(tok::identifier) && PP.LookAhead(0).is(tok::colon)) {
- Data.ExtraModifier = getOpenMPSimpleClauseType(Kind, PP.getSpelling(Tok),
- getLangOpts().OpenMP);
+ Data.ExtraModifier =
+ getOpenMPSimpleClauseType(Kind, PP.getSpelling(Tok), getLangOpts());
Data.ExtraModifierLoc = Tok.getLocation();
ConsumeToken();
assert(Tok.is(tok::colon) && "Expected colon.");
Data.ColonLoc = ConsumeToken();
}
} else if (Kind == OMPC_map) {
+ // Handle optional iterator map modifier.
+ if (Tok.is(tok::identifier) && PP.getSpelling(Tok) == "iterator") {
+ HasIterator = true;
+ EnterScope(Scope::OpenMPDirectiveScope | Scope::DeclScope);
+ Data.MapTypeModifiers.push_back(OMPC_MAP_MODIFIER_iterator);
+ Data.MapTypeModifiersLoc.push_back(Tok.getLocation());
+ ExprResult IteratorRes = ParseOpenMPIteratorsExpr();
+ Data.IteratorExpr = IteratorRes.get();
+ // Parse ','
+ ExpectAndConsume(tok::comma);
+ if (getLangOpts().OpenMP < 52) {
+ Diag(Tok, diag::err_omp_unknown_map_type_modifier)
+ << (getLangOpts().OpenMP >= 51 ? 1 : 0)
+ << getLangOpts().OpenMPExtensions;
+ InvalidIterator = true;
+ }
+ }
// Handle map type for map clause.
ColonProtectionRAIIObject ColonRAII(*this);
@@ -3872,6 +4645,12 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
}
if (Data.ExtraModifier == OMPC_MAP_unknown) {
Data.ExtraModifier = OMPC_MAP_tofrom;
+ if (getLangOpts().OpenMP >= 52) {
+ if (DKind == OMPD_target_enter_data)
+ Data.ExtraModifier = OMPC_MAP_to;
+ else if (DKind == OMPD_target_exit_data)
+ Data.ExtraModifier = OMPC_MAP_from;
+ }
Data.IsMapTypeImplicit = true;
}
@@ -3879,9 +4658,8 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
Data.ColonLoc = ConsumeToken();
} else if (Kind == OMPC_to || Kind == OMPC_from) {
while (Tok.is(tok::identifier)) {
- auto Modifier =
- static_cast<OpenMPMotionModifierKind>(getOpenMPSimpleClauseType(
- Kind, PP.getSpelling(Tok), getLangOpts().OpenMP));
+ auto Modifier = static_cast<OpenMPMotionModifierKind>(
+ getOpenMPSimpleClauseType(Kind, PP.getSpelling(Tok), getLangOpts()));
if (Modifier == OMPC_MOTION_MODIFIER_unknown)
break;
Data.MotionModifiers.push_back(Modifier);
@@ -3951,27 +4729,50 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
SkipUntil(tok::comma, tok::r_paren, tok::annot_pragma_openmp_end,
StopBeforeMatch);
}
+ } else if (Kind == OMPC_adjust_args) {
+ // Handle adjust-op for adjust_args clause.
+ ColonProtectionRAIIObject ColonRAII(*this);
+ Data.ExtraModifier = getOpenMPSimpleClauseType(
+ Kind, Tok.is(tok::identifier) ? PP.getSpelling(Tok) : "",
+ getLangOpts());
+ Data.ExtraModifierLoc = Tok.getLocation();
+ if (Data.ExtraModifier == OMPC_ADJUST_ARGS_unknown) {
+ SkipUntil(tok::colon, tok::r_paren, tok::annot_pragma_openmp_end,
+ StopBeforeMatch);
+ } else {
+ ConsumeToken();
+ if (Tok.is(tok::colon))
+ Data.ColonLoc = Tok.getLocation();
+ ExpectAndConsume(tok::colon, diag::warn_pragma_expected_colon,
+ "adjust-op");
+ }
}
bool IsComma =
(Kind != OMPC_reduction && Kind != OMPC_task_reduction &&
- Kind != OMPC_in_reduction && Kind != OMPC_depend && Kind != OMPC_map) ||
+ Kind != OMPC_in_reduction && Kind != OMPC_depend &&
+ Kind != OMPC_doacross && Kind != OMPC_map) ||
(Kind == OMPC_reduction && !InvalidReductionId) ||
(Kind == OMPC_map && Data.ExtraModifier != OMPC_MAP_unknown) ||
- (Kind == OMPC_depend && Data.ExtraModifier != OMPC_DEPEND_unknown);
+ (Kind == OMPC_depend && Data.ExtraModifier != OMPC_DEPEND_unknown) ||
+ (Kind == OMPC_doacross && Data.ExtraModifier != OMPC_DOACROSS_unknown) ||
+ (Kind == OMPC_adjust_args &&
+ Data.ExtraModifier != OMPC_ADJUST_ARGS_unknown);
const bool MayHaveTail = (Kind == OMPC_linear || Kind == OMPC_aligned);
while (IsComma || (Tok.isNot(tok::r_paren) && Tok.isNot(tok::colon) &&
Tok.isNot(tok::annot_pragma_openmp_end))) {
ParseScope OMPListScope(this, Scope::OpenMPDirectiveScope);
ColonProtectionRAIIObject ColonRAII(*this, MayHaveTail);
- // Parse variable
- ExprResult VarExpr =
- Actions.CorrectDelayedTyposInExpr(ParseAssignmentExpression());
- if (VarExpr.isUsable()) {
- Vars.push_back(VarExpr.get());
- } else {
- SkipUntil(tok::comma, tok::r_paren, tok::annot_pragma_openmp_end,
- StopBeforeMatch);
+ if (!ParseOpenMPReservedLocator(Kind, Data, getLangOpts())) {
+ // Parse variable
+ ExprResult VarExpr =
+ Actions.CorrectDelayedTyposInExpr(ParseAssignmentExpression());
+ if (VarExpr.isUsable()) {
+ Vars.push_back(VarExpr.get());
+ } else {
+ SkipUntil(tok::comma, tok::r_paren, tok::annot_pragma_openmp_end,
+ StopBeforeMatch);
+ }
}
// Skip ',' if any
IsComma = Tok.is(tok::comma);
@@ -3990,19 +4791,76 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
if (NeedRParenForLinear)
LinearT.consumeClose();
- // Parse ':' linear-step (or ':' alignment).
+ // Parse ':' linear modifiers (val, uval, ref or step(step-size))
+ // or parse ':' alignment.
const bool MustHaveTail = MayHaveTail && Tok.is(tok::colon);
+ bool StepFound = false;
+ bool ModifierFound = false;
if (MustHaveTail) {
Data.ColonLoc = Tok.getLocation();
SourceLocation ELoc = ConsumeToken();
- ExprResult Tail = ParseAssignmentExpression();
- Tail =
- Actions.ActOnFinishFullExpr(Tail.get(), ELoc, /*DiscardedValue*/ false);
- if (Tail.isUsable())
- Data.DepModOrTailExpr = Tail.get();
- else
- SkipUntil(tok::comma, tok::r_paren, tok::annot_pragma_openmp_end,
- StopBeforeMatch);
+
+ if (getLangOpts().OpenMP >= 52 && Kind == OMPC_linear) {
+ while (Tok.isNot(tok::r_paren)) {
+ if (Tok.is(tok::identifier)) {
+ // identifier could be a linear kind (val, uval, ref) or step
+ // modifier or step size
+ OpenMPLinearClauseKind LinKind =
+ static_cast<OpenMPLinearClauseKind>(getOpenMPSimpleClauseType(
+ Kind, Tok.isAnnotation() ? "" : PP.getSpelling(Tok),
+ getLangOpts()));
+
+ if (LinKind == OMPC_LINEAR_step) {
+ if (StepFound)
+ Diag(Tok, diag::err_omp_multiple_step_or_linear_modifier) << 0;
+
+ BalancedDelimiterTracker StepT(*this, tok::l_paren,
+ tok::annot_pragma_openmp_end);
+ SourceLocation StepModifierLoc = ConsumeToken();
+ // parse '('
+ if (StepT.consumeOpen())
+ Diag(StepModifierLoc, diag::err_expected_lparen_after) << "step";
+
+ // parse step size expression
+ StepFound = parseStepSize(*this, Data, Kind, Tok.getLocation());
+ if (StepFound)
+ Data.StepModifierLoc = StepModifierLoc;
+
+ // parse ')'
+ StepT.consumeClose();
+ } else if (LinKind >= 0 && LinKind < OMPC_LINEAR_step) {
+ if (ModifierFound)
+ Diag(Tok, diag::err_omp_multiple_step_or_linear_modifier) << 1;
+
+ Data.ExtraModifier = LinKind;
+ Data.ExtraModifierLoc = ConsumeToken();
+ ModifierFound = true;
+ } else {
+ StepFound = parseStepSize(*this, Data, Kind, Tok.getLocation());
+ }
+ } else {
+ // parse an integer expression as step size
+ StepFound = parseStepSize(*this, Data, Kind, Tok.getLocation());
+ }
+
+ if (Tok.is(tok::comma))
+ ConsumeToken();
+ if (Tok.is(tok::r_paren) || Tok.is(tok::annot_pragma_openmp_end))
+ break;
+ }
+ if (!StepFound && !ModifierFound)
+ Diag(ELoc, diag::err_expected_expression);
+ } else {
+ // for OMPC_aligned and OMPC_linear (with OpenMP <= 5.1)
+ ExprResult Tail = ParseAssignmentExpression();
+ Tail = Actions.ActOnFinishFullExpr(Tail.get(), ELoc,
+ /*DiscardedValue*/ false);
+ if (Tail.isUsable())
+ Data.DepModOrTailExpr = Tail.get();
+ else
+ SkipUntil(tok::comma, tok::r_paren, tok::annot_pragma_openmp_end,
+ StopBeforeMatch);
+ }
}
// Parse ')'.
@@ -4012,9 +4870,10 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
// Exit from scope when the iterator is used in depend clause.
if (HasIterator)
ExitScope();
- return (Kind != OMPC_depend && Kind != OMPC_map && Vars.empty()) ||
- (MustHaveTail && !Data.DepModOrTailExpr) || InvalidReductionId ||
- IsInvalidMapperModifier;
+ return (Kind != OMPC_depend && Kind != OMPC_doacross && Kind != OMPC_map &&
+ Vars.empty()) ||
+ (MustHaveTail && !Data.DepModOrTailExpr && StepFound) ||
+ InvalidReductionId || IsInvalidMapperModifier || InvalidIterator;
}
/// Parsing of OpenMP clause 'private', 'firstprivate', 'lastprivate',
@@ -4059,6 +4918,8 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
/// 'use_device_addr' '(' list ')'
/// is_device_ptr-clause:
/// 'is_device_ptr' '(' list ')'
+/// has_device_addr-clause:
+/// 'has_device_addr' '(' list ')'
/// allocate-clause:
/// 'allocate' '(' [ allocator ':' ] list ')'
/// nontemporal-clause:
@@ -4078,7 +4939,7 @@ OMPClause *Parser::ParseOpenMPVarListClause(OpenMPDirectiveKind DKind,
SourceLocation Loc = Tok.getLocation();
SourceLocation LOpen = ConsumeToken();
SmallVector<Expr *, 4> Vars;
- OpenMPVarListDataTy Data;
+ Sema::OpenMPVarListDataTy Data;
if (ParseOpenMPVarList(DKind, Kind, Vars, Data))
return nullptr;
@@ -4086,10 +4947,5 @@ OMPClause *Parser::ParseOpenMPVarListClause(OpenMPDirectiveKind DKind,
if (ParseOnly)
return nullptr;
OMPVarListLocTy Locs(Loc, LOpen, Data.RLoc);
- return Actions.ActOnOpenMPVarListClause(
- Kind, Vars, Data.DepModOrTailExpr, Locs, Data.ColonLoc,
- Data.ReductionOrMapperIdScopeSpec, Data.ReductionOrMapperId,
- Data.ExtraModifier, Data.MapTypeModifiers, Data.MapTypeModifiersLoc,
- Data.IsMapTypeImplicit, Data.ExtraModifierLoc, Data.MotionModifiers,
- Data.MotionModifiersLoc);
+ return Actions.ActOnOpenMPVarListClause(Kind, Vars, Locs, Data);
}
diff --git a/contrib/llvm-project/clang/lib/Parse/ParsePragma.cpp b/contrib/llvm-project/clang/lib/Parse/ParsePragma.cpp
index 42072fe63fc8..730ac1a0fee5 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParsePragma.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParsePragma.cpp
@@ -19,9 +19,11 @@
#include "clang/Parse/ParseDiagnostic.h"
#include "clang/Parse/Parser.h"
#include "clang/Parse/RAIIObjectsForParser.h"
+#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/Scope.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringSwitch.h"
+#include <optional>
using namespace clang;
namespace {
@@ -135,7 +137,20 @@ struct PragmaSTDC_CX_LIMITED_RANGEHandler : public PragmaHandler {
void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &Tok) override {
tok::OnOffSwitch OOS;
- PP.LexOnOffSwitch(OOS);
+ if (PP.LexOnOffSwitch(OOS))
+ return;
+
+ MutableArrayRef<Token> Toks(
+ PP.getPreprocessorAllocator().Allocate<Token>(1), 1);
+
+ Toks[0].startToken();
+ Toks[0].setKind(tok::annot_pragma_cx_limited_range);
+ Toks[0].setLocation(Tok.getLocation());
+ Toks[0].setAnnotationEndLoc(Tok.getLocation());
+ Toks[0].setAnnotationValue(
+ reinterpret_cast<void *>(static_cast<uintptr_t>(OOS)));
+ PP.EnterTokenStream(Toks, /*DisableMacroExpansion=*/true,
+ /*IsReinject=*/false);
}
};
@@ -164,18 +179,51 @@ struct PragmaFPHandler : public PragmaHandler {
Token &FirstToken) override;
};
-struct PragmaNoOpenMPHandler : public PragmaHandler {
- PragmaNoOpenMPHandler() : PragmaHandler("omp") { }
+// A pragma handler to be the base of the NoOpenMPHandler and NoOpenACCHandler,
+// which are identical other than the name given to them, and the diagnostic
+// emitted.
+template <diag::kind IgnoredDiag>
+struct PragmaNoSupportHandler : public PragmaHandler {
+ PragmaNoSupportHandler(StringRef Name) : PragmaHandler(Name) {}
void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &FirstToken) override;
};
-struct PragmaOpenMPHandler : public PragmaHandler {
- PragmaOpenMPHandler() : PragmaHandler("omp") { }
+struct PragmaNoOpenMPHandler
+ : public PragmaNoSupportHandler<diag::warn_pragma_omp_ignored> {
+ PragmaNoOpenMPHandler() : PragmaNoSupportHandler("omp") {}
+};
+
+struct PragmaNoOpenACCHandler
+ : public PragmaNoSupportHandler<diag::warn_pragma_acc_ignored> {
+ PragmaNoOpenACCHandler() : PragmaNoSupportHandler("acc") {}
+};
+
+// A pragma handler to be the base for the OpenMPHandler and OpenACCHandler,
+// which are identical other than the tokens used for the start/end of a pragma
+// section, and some diagnostics.
+template <tok::TokenKind StartTok, tok::TokenKind EndTok,
+ diag::kind UnexpectedDiag>
+struct PragmaSupportHandler : public PragmaHandler {
+ PragmaSupportHandler(StringRef Name) : PragmaHandler(Name) {}
void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &FirstToken) override;
};
+struct PragmaOpenMPHandler
+ : public PragmaSupportHandler<tok::annot_pragma_openmp,
+ tok::annot_pragma_openmp_end,
+ diag::err_omp_unexpected_directive> {
+ PragmaOpenMPHandler() : PragmaSupportHandler("omp") {}
+};
+
+struct PragmaOpenACCHandler
+ : public PragmaSupportHandler<tok::annot_pragma_openacc,
+ tok::annot_pragma_openacc_end,
+ diag::err_acc_unexpected_directive> {
+ PragmaOpenACCHandler() : PragmaSupportHandler("acc") {}
+};
+
/// PragmaCommentHandler - "\#pragma comment ...".
struct PragmaCommentHandler : public PragmaHandler {
PragmaCommentHandler(Sema &Actions)
@@ -255,10 +303,66 @@ struct PragmaMSIntrinsicHandler : public PragmaHandler {
Token &FirstToken) override;
};
-struct PragmaMSOptimizeHandler : public PragmaHandler {
- PragmaMSOptimizeHandler() : PragmaHandler("optimize") {}
+// "\#pragma fenv_access (on)".
+struct PragmaMSFenvAccessHandler : public PragmaHandler {
+ PragmaMSFenvAccessHandler() : PragmaHandler("fenv_access") {}
void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
- Token &FirstToken) override;
+ Token &FirstToken) override {
+ StringRef PragmaName = FirstToken.getIdentifierInfo()->getName();
+ if (!PP.getTargetInfo().hasStrictFP() && !PP.getLangOpts().ExpStrictFP) {
+ PP.Diag(FirstToken.getLocation(), diag::warn_pragma_fp_ignored)
+ << PragmaName;
+ return;
+ }
+
+ Token Tok;
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::l_paren)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_lparen)
+ << PragmaName;
+ return;
+ }
+ PP.Lex(Tok); // Consume the l_paren.
+ if (Tok.isNot(tok::identifier)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_ms_fenv_access);
+ return;
+ }
+ const IdentifierInfo *II = Tok.getIdentifierInfo();
+ tok::OnOffSwitch OOS;
+ if (II->isStr("on")) {
+ OOS = tok::OOS_ON;
+ PP.Lex(Tok);
+ } else if (II->isStr("off")) {
+ OOS = tok::OOS_OFF;
+ PP.Lex(Tok);
+ } else {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_ms_fenv_access);
+ return;
+ }
+ if (Tok.isNot(tok::r_paren)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_rparen)
+ << PragmaName;
+ return;
+ }
+ PP.Lex(Tok); // Consume the r_paren.
+
+ if (Tok.isNot(tok::eod)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol)
+ << PragmaName;
+ return;
+ }
+
+ MutableArrayRef<Token> Toks(
+ PP.getPreprocessorAllocator().Allocate<Token>(1), 1);
+ Toks[0].startToken();
+ Toks[0].setKind(tok::annot_pragma_fenv_access_ms);
+ Toks[0].setLocation(FirstToken.getLocation());
+ Toks[0].setAnnotationEndLoc(Tok.getLocation());
+ Toks[0].setAnnotationValue(
+ reinterpret_cast<void*>(static_cast<uintptr_t>(OOS)));
+ PP.EnterTokenStream(Toks, /*DisableMacroExpansion=*/true,
+ /*IsReinject=*/false);
+ }
};
struct PragmaForceCUDAHostDeviceHandler : public PragmaHandler {
@@ -294,6 +398,16 @@ struct PragmaMaxTokensTotalHandler : public PragmaHandler {
Token &FirstToken) override;
};
+struct PragmaRISCVHandler : public PragmaHandler {
+ PragmaRISCVHandler(Sema &Actions)
+ : PragmaHandler("riscv"), Actions(Actions) {}
+ void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
+ Token &FirstToken) override;
+
+private:
+ Sema &Actions;
+};
+
void markAsReinjectedForRelexing(llvm::MutableArrayRef<clang::Token> Toks) {
for (auto &T : Toks)
T.setFlag(clang::Token::IsReinjected);
@@ -355,6 +469,12 @@ void Parser::initializePragmaHandlers() {
OpenMPHandler = std::make_unique<PragmaNoOpenMPHandler>();
PP.AddPragmaHandler(OpenMPHandler.get());
+ if (getLangOpts().OpenACC)
+ OpenACCHandler = std::make_unique<PragmaOpenACCHandler>();
+ else
+ OpenACCHandler = std::make_unique<PragmaNoOpenACCHandler>();
+ PP.AddPragmaHandler(OpenACCHandler.get());
+
if (getLangOpts().MicrosoftExt ||
getTargetInfo().getTriple().isOSBinFormatELF()) {
MSCommentHandler = std::make_unique<PragmaCommentHandler>(Actions);
@@ -383,12 +503,21 @@ void Parser::initializePragmaHandlers() {
PP.AddPragmaHandler(MSCodeSeg.get());
MSSection = std::make_unique<PragmaMSPragma>("section");
PP.AddPragmaHandler(MSSection.get());
+ MSStrictGuardStackCheck =
+ std::make_unique<PragmaMSPragma>("strict_gs_check");
+ PP.AddPragmaHandler(MSStrictGuardStackCheck.get());
+ MSFunction = std::make_unique<PragmaMSPragma>("function");
+ PP.AddPragmaHandler(MSFunction.get());
+ MSAllocText = std::make_unique<PragmaMSPragma>("alloc_text");
+ PP.AddPragmaHandler(MSAllocText.get());
+ MSOptimize = std::make_unique<PragmaMSPragma>("optimize");
+ PP.AddPragmaHandler(MSOptimize.get());
MSRuntimeChecks = std::make_unique<PragmaMSRuntimeChecksHandler>();
PP.AddPragmaHandler(MSRuntimeChecks.get());
MSIntrinsic = std::make_unique<PragmaMSIntrinsicHandler>();
PP.AddPragmaHandler(MSIntrinsic.get());
- MSOptimize = std::make_unique<PragmaMSOptimizeHandler>();
- PP.AddPragmaHandler(MSOptimize.get());
+ MSFenvAccess = std::make_unique<PragmaMSFenvAccessHandler>();
+ PP.AddPragmaHandler(MSFenvAccess.get());
}
if (getLangOpts().CUDA) {
@@ -431,6 +560,11 @@ void Parser::initializePragmaHandlers() {
MaxTokensTotalPragmaHandler = std::make_unique<PragmaMaxTokensTotalHandler>();
PP.AddPragmaHandler("clang", MaxTokensTotalPragmaHandler.get());
+
+ if (getTargetInfo().getTriple().isRISCV()) {
+ RISCVPragmaHandler = std::make_unique<PragmaRISCVHandler>(Actions);
+ PP.AddPragmaHandler("clang", RISCVPragmaHandler.get());
+ }
}
void Parser::resetPragmaHandlers() {
@@ -460,6 +594,9 @@ void Parser::resetPragmaHandlers() {
PP.RemovePragmaHandler(OpenMPHandler.get());
OpenMPHandler.reset();
+ PP.RemovePragmaHandler(OpenACCHandler.get());
+ OpenACCHandler.reset();
+
if (getLangOpts().MicrosoftExt ||
getTargetInfo().getTriple().isOSBinFormatELF()) {
PP.RemovePragmaHandler(MSCommentHandler.get());
@@ -490,12 +627,20 @@ void Parser::resetPragmaHandlers() {
MSCodeSeg.reset();
PP.RemovePragmaHandler(MSSection.get());
MSSection.reset();
+ PP.RemovePragmaHandler(MSStrictGuardStackCheck.get());
+ MSStrictGuardStackCheck.reset();
+ PP.RemovePragmaHandler(MSFunction.get());
+ MSFunction.reset();
+ PP.RemovePragmaHandler(MSAllocText.get());
+ MSAllocText.reset();
PP.RemovePragmaHandler(MSRuntimeChecks.get());
MSRuntimeChecks.reset();
PP.RemovePragmaHandler(MSIntrinsic.get());
MSIntrinsic.reset();
PP.RemovePragmaHandler(MSOptimize.get());
MSOptimize.reset();
+ PP.RemovePragmaHandler(MSFenvAccess.get());
+ MSFenvAccess.reset();
}
if (getLangOpts().CUDA) {
@@ -549,6 +694,11 @@ void Parser::resetPragmaHandlers() {
PP.RemovePragmaHandler("clang", MaxTokensTotalPragmaHandler.get());
MaxTokensTotalPragmaHandler.reset();
+
+ if (getTargetInfo().getTriple().isRISCV()) {
+ PP.RemovePragmaHandler("clang", RISCVPragmaHandler.get());
+ RISCVPragmaHandler.reset();
+ }
}
/// Handle the annotation token produced for #pragma unused(...)
@@ -571,18 +721,10 @@ void Parser::HandlePragmaVisibility() {
Actions.ActOnPragmaVisibility(VisType, VisLoc);
}
-namespace {
-struct PragmaPackInfo {
- Sema::PragmaMsStackAction Action;
- StringRef SlotLabel;
- Token Alignment;
-};
-} // end anonymous namespace
-
void Parser::HandlePragmaPack() {
assert(Tok.is(tok::annot_pragma_pack));
- PragmaPackInfo *Info =
- static_cast<PragmaPackInfo *>(Tok.getAnnotationValue());
+ Sema::PragmaPackInfo *Info =
+ static_cast<Sema::PragmaPackInfo *>(Tok.getAnnotationValue());
SourceLocation PragmaLoc = Tok.getLocation();
ExprResult Alignment;
if (Info->Alignment.is(tok::numeric_constant)) {
@@ -620,10 +762,36 @@ void Parser::HandlePragmaAlign() {
void Parser::HandlePragmaDump() {
assert(Tok.is(tok::annot_pragma_dump));
- IdentifierInfo *II =
- reinterpret_cast<IdentifierInfo *>(Tok.getAnnotationValue());
- Actions.ActOnPragmaDump(getCurScope(), Tok.getLocation(), II);
ConsumeAnnotationToken();
+ if (Tok.is(tok::eod)) {
+ PP.Diag(Tok, diag::warn_pragma_debug_missing_argument) << "dump";
+ } else if (NextToken().is(tok::eod)) {
+ if (Tok.isNot(tok::identifier)) {
+ PP.Diag(Tok, diag::warn_pragma_debug_unexpected_argument);
+ ConsumeAnyToken();
+ ExpectAndConsume(tok::eod);
+ return;
+ }
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ Actions.ActOnPragmaDump(getCurScope(), Tok.getLocation(), II);
+ ConsumeToken();
+ } else {
+ SourceLocation StartLoc = Tok.getLocation();
+ EnterExpressionEvaluationContext Ctx(
+ Actions, Sema::ExpressionEvaluationContext::Unevaluated);
+ ExprResult E = ParseExpression();
+ if (!E.isUsable() || E.get()->containsErrors()) {
+ // Diagnostics were emitted during parsing. No action needed.
+ } else if (E.get()->getDependence() != ExprDependence::None) {
+ PP.Diag(StartLoc, diag::warn_pragma_debug_dependent_argument)
+ << E.get()->isTypeDependent()
+ << SourceRange(StartLoc, Tok.getLocation());
+ } else {
+ Actions.ActOnPragmaDump(E.get());
+ }
+ SkipUntil(tok::eod, StopBeforeMatch);
+ }
+ ExpectAndConsume(tok::eod);
}
void Parser::HandlePragmaWeak() {
@@ -701,7 +869,8 @@ void Parser::HandlePragmaFloatControl() {
}
void Parser::HandlePragmaFEnvAccess() {
- assert(Tok.is(tok::annot_pragma_fenv_access));
+ assert(Tok.is(tok::annot_pragma_fenv_access) ||
+ Tok.is(tok::annot_pragma_fenv_access_ms));
tok::OnOffSwitch OOS =
static_cast<tok::OnOffSwitch>(
reinterpret_cast<uintptr_t>(Tok.getAnnotationValue()));
@@ -729,7 +898,32 @@ void Parser::HandlePragmaFEnvRound() {
reinterpret_cast<uintptr_t>(Tok.getAnnotationValue()));
SourceLocation PragmaLoc = ConsumeAnnotationToken();
- Actions.setRoundingMode(PragmaLoc, RM);
+ Actions.ActOnPragmaFEnvRound(PragmaLoc, RM);
+}
+
+void Parser::HandlePragmaCXLimitedRange() {
+ assert(Tok.is(tok::annot_pragma_cx_limited_range));
+ tok::OnOffSwitch OOS = static_cast<tok::OnOffSwitch>(
+ reinterpret_cast<uintptr_t>(Tok.getAnnotationValue()));
+
+ LangOptions::ComplexRangeKind Range;
+ switch (OOS) {
+ case tok::OOS_ON:
+ Range = LangOptions::CX_Limited;
+ break;
+ case tok::OOS_OFF:
+ Range = LangOptions::CX_Full;
+ break;
+ case tok::OOS_DEFAULT:
+ // According to ISO C99 standard chapter 7.3.4, the default value
+ // for the pragma is ``off'. -fcx-limited-range and -fcx-fortran-rules
+ // control the default value of these pragmas.
+ Range = getLangOpts().getComplexRange();
+ break;
+ }
+
+ SourceLocation PragmaLoc = ConsumeAnnotationToken();
+ Actions.ActOnPragmaCXLimitedRange(PragmaLoc, Range);
}
StmtResult Parser::HandlePragmaCaptured()
@@ -839,13 +1033,18 @@ void Parser::HandlePragmaMSPragma() {
// Figure out which #pragma we're dealing with. The switch has no default
// because lex shouldn't emit the annotation token for unrecognized pragmas.
typedef bool (Parser::*PragmaHandler)(StringRef, SourceLocation);
- PragmaHandler Handler = llvm::StringSwitch<PragmaHandler>(PragmaName)
- .Case("data_seg", &Parser::HandlePragmaMSSegment)
- .Case("bss_seg", &Parser::HandlePragmaMSSegment)
- .Case("const_seg", &Parser::HandlePragmaMSSegment)
- .Case("code_seg", &Parser::HandlePragmaMSSegment)
- .Case("section", &Parser::HandlePragmaMSSection)
- .Case("init_seg", &Parser::HandlePragmaMSInitSeg);
+ PragmaHandler Handler =
+ llvm::StringSwitch<PragmaHandler>(PragmaName)
+ .Case("data_seg", &Parser::HandlePragmaMSSegment)
+ .Case("bss_seg", &Parser::HandlePragmaMSSegment)
+ .Case("const_seg", &Parser::HandlePragmaMSSegment)
+ .Case("code_seg", &Parser::HandlePragmaMSSegment)
+ .Case("section", &Parser::HandlePragmaMSSection)
+ .Case("init_seg", &Parser::HandlePragmaMSInitSeg)
+ .Case("strict_gs_check", &Parser::HandlePragmaMSStrictGuardStackCheck)
+ .Case("function", &Parser::HandlePragmaMSFunction)
+ .Case("alloc_text", &Parser::HandlePragmaMSAllocText)
+ .Case("optimize", &Parser::HandlePragmaMSOptimize);
if (!(this->*Handler)(PragmaName, PragmaLocation)) {
// Pragma handling failed, and has been diagnosed. Slurp up the tokens
@@ -1082,17 +1281,123 @@ bool Parser::HandlePragmaMSInitSeg(StringRef PragmaName,
return true;
}
-namespace {
-struct PragmaLoopHintInfo {
- Token PragmaName;
- Token Option;
- ArrayRef<Token> Toks;
-};
-} // end anonymous namespace
+// #pragma strict_gs_check(pop)
+// #pragma strict_gs_check(push, "on" | "off")
+// #pragma strict_gs_check("on" | "off")
+bool Parser::HandlePragmaMSStrictGuardStackCheck(
+ StringRef PragmaName, SourceLocation PragmaLocation) {
+ if (ExpectAndConsume(tok::l_paren, diag::warn_pragma_expected_lparen,
+ PragmaName))
+ return false;
+
+ Sema::PragmaMsStackAction Action = Sema::PSK_Set;
+ if (Tok.is(tok::identifier)) {
+ StringRef PushPop = Tok.getIdentifierInfo()->getName();
+ if (PushPop == "push") {
+ PP.Lex(Tok);
+ Action = Sema::PSK_Push;
+ if (ExpectAndConsume(tok::comma, diag::warn_pragma_expected_punc,
+ PragmaName))
+ return false;
+ } else if (PushPop == "pop") {
+ PP.Lex(Tok);
+ Action = Sema::PSK_Pop;
+ }
+ }
+
+ bool Value = false;
+ if (Action & Sema::PSK_Push || Action & Sema::PSK_Set) {
+ const IdentifierInfo *II = Tok.getIdentifierInfo();
+ if (II && II->isStr("off")) {
+ PP.Lex(Tok);
+ Value = false;
+ } else if (II && II->isStr("on")) {
+ PP.Lex(Tok);
+ Value = true;
+ } else {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_invalid_action)
+ << PragmaName;
+ return false;
+ }
+ }
+
+ // Finish the pragma: ')' $
+ if (ExpectAndConsume(tok::r_paren, diag::warn_pragma_expected_rparen,
+ PragmaName))
+ return false;
+
+ if (ExpectAndConsume(tok::eof, diag::warn_pragma_extra_tokens_at_eol,
+ PragmaName))
+ return false;
+
+ Actions.ActOnPragmaMSStrictGuardStackCheck(PragmaLocation, Action, Value);
+ return true;
+}
+
+bool Parser::HandlePragmaMSAllocText(StringRef PragmaName,
+ SourceLocation PragmaLocation) {
+ Token FirstTok = Tok;
+ if (ExpectAndConsume(tok::l_paren, diag::warn_pragma_expected_lparen,
+ PragmaName))
+ return false;
+
+ StringRef Section;
+ if (Tok.is(tok::string_literal)) {
+ ExprResult StringResult = ParseStringLiteralExpression();
+ if (StringResult.isInvalid())
+ return false; // Already diagnosed.
+ StringLiteral *SegmentName = cast<StringLiteral>(StringResult.get());
+ if (SegmentName->getCharByteWidth() != 1) {
+ PP.Diag(PragmaLocation, diag::warn_pragma_expected_non_wide_string)
+ << PragmaName;
+ return false;
+ }
+ Section = SegmentName->getString();
+ } else if (Tok.is(tok::identifier)) {
+ Section = Tok.getIdentifierInfo()->getName();
+ PP.Lex(Tok);
+ } else {
+ PP.Diag(PragmaLocation, diag::warn_pragma_expected_section_name)
+ << PragmaName;
+ return false;
+ }
+
+ if (ExpectAndConsume(tok::comma, diag::warn_pragma_expected_comma,
+ PragmaName))
+ return false;
+
+ SmallVector<std::tuple<IdentifierInfo *, SourceLocation>> Functions;
+ while (true) {
+ if (Tok.isNot(tok::identifier)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_identifier)
+ << PragmaName;
+ return false;
+ }
+
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ Functions.emplace_back(II, Tok.getLocation());
+
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::comma))
+ break;
+ PP.Lex(Tok);
+ }
+
+ if (ExpectAndConsume(tok::r_paren, diag::warn_pragma_expected_rparen,
+ PragmaName) ||
+ ExpectAndConsume(tok::eof, diag::warn_pragma_extra_tokens_at_eol,
+ PragmaName))
+ return false;
+
+ Actions.ActOnPragmaMSAllocText(FirstTok.getLocation(), Section, Functions);
+ return true;
+}
static std::string PragmaLoopHintString(Token PragmaName, Token Option) {
StringRef Str = PragmaName.getIdentifierInfo()->getName();
- std::string ClangLoopStr = (llvm::Twine("clang loop ") + Str).str();
+ std::string ClangLoopStr("clang loop ");
+ if (Str == "loop" && Option.getIdentifierInfo())
+ ClangLoopStr += Option.getIdentifierInfo()->getName();
return std::string(llvm::StringSwitch<StringRef>(Str)
.Case("loop", ClangLoopStr)
.Case("unroll_and_jam", Str)
@@ -1376,8 +1681,8 @@ bool Parser::ParsePragmaAttributeSubjectMatchRuleSet(
Diag(Tok, diag::err_pragma_attribute_expected_subject_identifier);
return true;
}
- std::pair<Optional<attr::SubjectMatchRule>,
- Optional<attr::SubjectMatchRule> (*)(StringRef, bool)>
+ std::pair<std::optional<attr::SubjectMatchRule>,
+ std::optional<attr::SubjectMatchRule> (*)(StringRef, bool)>
Rule = isAttributeSubjectMatchRule(Name);
if (!Rule.first) {
Diag(Tok, diag::err_pragma_attribute_unknown_subject_rule) << Name;
@@ -1493,7 +1798,7 @@ getAttributeSubjectRulesRecoveryPointForToken(const Token &Tok) {
/// suggests the possible attribute subject rules in a fix-it together with
/// any other missing tokens.
DiagnosticBuilder createExpectedAttributeSubjectRulesTokenDiagnostic(
- unsigned DiagID, ParsedAttr &Attribute,
+ unsigned DiagID, ParsedAttributes &Attrs,
MissingAttributeSubjectRulesRecoveryPoint Point, Parser &PRef) {
SourceLocation Loc = PRef.getEndOfPreviousToken();
if (Loc.isInvalid())
@@ -1513,25 +1818,38 @@ DiagnosticBuilder createExpectedAttributeSubjectRulesTokenDiagnostic(
SourceRange FixItRange(Loc);
if (EndPoint == MissingAttributeSubjectRulesRecoveryPoint::None) {
// Gather the subject match rules that are supported by the attribute.
- SmallVector<std::pair<attr::SubjectMatchRule, bool>, 4> SubjectMatchRuleSet;
- Attribute.getMatchRules(PRef.getLangOpts(), SubjectMatchRuleSet);
- if (SubjectMatchRuleSet.empty()) {
+ // Add all the possible rules initially.
+ llvm::BitVector IsMatchRuleAvailable(attr::SubjectMatchRule_Last + 1, true);
+ // Remove the ones that are not supported by any of the attributes.
+ for (const ParsedAttr &Attribute : Attrs) {
+ SmallVector<std::pair<attr::SubjectMatchRule, bool>, 4> MatchRules;
+ Attribute.getMatchRules(PRef.getLangOpts(), MatchRules);
+ llvm::BitVector IsSupported(attr::SubjectMatchRule_Last + 1);
+ for (const auto &Rule : MatchRules) {
+ // Ensure that the missing rule is reported in the fix-it only when it's
+ // supported in the current language mode.
+ if (!Rule.second)
+ continue;
+ IsSupported[Rule.first] = true;
+ }
+ IsMatchRuleAvailable &= IsSupported;
+ }
+ if (IsMatchRuleAvailable.count() == 0) {
// FIXME: We can emit a "fix-it" with a subject list placeholder when
// placeholders will be supported by the fix-its.
return Diagnostic;
}
FixIt += "any(";
bool NeedsComma = false;
- for (const auto &I : SubjectMatchRuleSet) {
- // Ensure that the missing rule is reported in the fix-it only when it's
- // supported in the current language mode.
- if (!I.second)
+ for (unsigned I = 0; I <= attr::SubjectMatchRule_Last; I++) {
+ if (!IsMatchRuleAvailable[I])
continue;
if (NeedsComma)
FixIt += ", ";
else
NeedsComma = true;
- FixIt += attr::getSubjectMatchRuleSpelling(I.first);
+ FixIt += attr::getSubjectMatchRuleSpelling(
+ static_cast<attr::SubjectMatchRule>(I));
}
FixIt += ")";
// Check if we need to remove the range
@@ -1581,7 +1899,8 @@ void Parser::HandlePragmaAttribute() {
ConsumeToken();
};
- if (Tok.is(tok::l_square) && NextToken().is(tok::l_square)) {
+ if ((Tok.is(tok::l_square) && NextToken().is(tok::l_square)) ||
+ Tok.isRegularKeywordAttribute()) {
// Parse the CXX11 style attribute.
ParseCXX11AttributeSpecifier(Attrs);
} else if (Tok.is(tok::kw___attribute)) {
@@ -1592,22 +1911,35 @@ void Parser::HandlePragmaAttribute() {
if (ExpectAndConsume(tok::l_paren, diag::err_expected_lparen_after, "("))
return SkipToEnd();
- if (Tok.isNot(tok::identifier)) {
- Diag(Tok, diag::err_pragma_attribute_expected_attribute_name);
- SkipToEnd();
- return;
+ // FIXME: The practical usefulness of completion here is limited because
+ // we only get here if the line has balanced parens.
+ if (Tok.is(tok::code_completion)) {
+ cutOffParsing();
+ // FIXME: suppress completion of unsupported attributes?
+ Actions.CodeCompleteAttribute(AttributeCommonInfo::Syntax::AS_GNU);
+ return SkipToEnd();
}
- IdentifierInfo *AttrName = Tok.getIdentifierInfo();
- SourceLocation AttrNameLoc = ConsumeToken();
- if (Tok.isNot(tok::l_paren))
- Attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0,
- ParsedAttr::AS_GNU);
- else
- ParseGNUAttributeArgs(AttrName, AttrNameLoc, Attrs, /*EndLoc=*/nullptr,
- /*ScopeName=*/nullptr,
- /*ScopeLoc=*/SourceLocation(), ParsedAttr::AS_GNU,
- /*Declarator=*/nullptr);
+ // Parse the comma-separated list of attributes.
+ do {
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_pragma_attribute_expected_attribute_name);
+ SkipToEnd();
+ return;
+ }
+ IdentifierInfo *AttrName = Tok.getIdentifierInfo();
+ SourceLocation AttrNameLoc = ConsumeToken();
+
+ if (Tok.isNot(tok::l_paren))
+ Attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0,
+ ParsedAttr::Form::GNU());
+ else
+ ParseGNUAttributeArgs(AttrName, AttrNameLoc, Attrs, /*EndLoc=*/nullptr,
+ /*ScopeName=*/nullptr,
+ /*ScopeLoc=*/SourceLocation(),
+ ParsedAttr::Form::GNU(),
+ /*Declarator=*/nullptr);
+ } while (TryConsumeToken(tok::comma));
if (ExpectAndConsume(tok::r_paren))
return SkipToEnd();
@@ -1645,26 +1977,19 @@ void Parser::HandlePragmaAttribute() {
return;
}
- // Ensure that we don't have more than one attribute.
- if (Attrs.size() > 1) {
- SourceLocation Loc = Attrs[1].getLoc();
- Diag(Loc, diag::err_pragma_attribute_multiple_attributes);
- SkipToEnd();
- return;
- }
-
- ParsedAttr &Attribute = *Attrs.begin();
- if (!Attribute.isSupportedByPragmaAttribute()) {
- Diag(PragmaLoc, diag::err_pragma_attribute_unsupported_attribute)
- << Attribute;
- SkipToEnd();
- return;
+ for (const ParsedAttr &Attribute : Attrs) {
+ if (!Attribute.isSupportedByPragmaAttribute()) {
+ Diag(PragmaLoc, diag::err_pragma_attribute_unsupported_attribute)
+ << Attribute;
+ SkipToEnd();
+ return;
+ }
}
// Parse the subject-list.
if (!TryConsumeToken(tok::comma)) {
createExpectedAttributeSubjectRulesTokenDiagnostic(
- diag::err_expected, Attribute,
+ diag::err_expected, Attrs,
MissingAttributeSubjectRulesRecoveryPoint::Comma, *this)
<< tok::comma;
SkipToEnd();
@@ -1673,7 +1998,7 @@ void Parser::HandlePragmaAttribute() {
if (Tok.isNot(tok::identifier)) {
createExpectedAttributeSubjectRulesTokenDiagnostic(
- diag::err_pragma_attribute_invalid_subject_set_specifier, Attribute,
+ diag::err_pragma_attribute_invalid_subject_set_specifier, Attrs,
MissingAttributeSubjectRulesRecoveryPoint::ApplyTo, *this);
SkipToEnd();
return;
@@ -1681,7 +2006,7 @@ void Parser::HandlePragmaAttribute() {
const IdentifierInfo *II = Tok.getIdentifierInfo();
if (!II->isStr("apply_to")) {
createExpectedAttributeSubjectRulesTokenDiagnostic(
- diag::err_pragma_attribute_invalid_subject_set_specifier, Attribute,
+ diag::err_pragma_attribute_invalid_subject_set_specifier, Attrs,
MissingAttributeSubjectRulesRecoveryPoint::ApplyTo, *this);
SkipToEnd();
return;
@@ -1690,7 +2015,7 @@ void Parser::HandlePragmaAttribute() {
if (!TryConsumeToken(tok::equal)) {
createExpectedAttributeSubjectRulesTokenDiagnostic(
- diag::err_expected, Attribute,
+ diag::err_expected, Attrs,
MissingAttributeSubjectRulesRecoveryPoint::Equals, *this)
<< tok::equal;
SkipToEnd();
@@ -1720,8 +2045,10 @@ void Parser::HandlePragmaAttribute() {
if (Info->Action == PragmaAttributeInfo::Push)
Actions.ActOnPragmaAttributeEmptyPush(PragmaLoc, Info->Namespace);
- Actions.ActOnPragmaAttributeAttribute(Attribute, PragmaLoc,
- std::move(SubjectMatchRules));
+ for (ParsedAttr &Attribute : Attrs) {
+ Actions.ActOnPragmaAttributeAttribute(Attribute, PragmaLoc,
+ SubjectMatchRules);
+ }
}
// #pragma GCC visibility comes in two variants:
@@ -1884,8 +2211,8 @@ void PragmaPackHandler::HandlePragma(Preprocessor &PP,
return;
}
- PragmaPackInfo *Info =
- PP.getPreprocessorAllocator().Allocate<PragmaPackInfo>(1);
+ Sema::PragmaPackInfo *Info =
+ PP.getPreprocessorAllocator().Allocate<Sema::PragmaPackInfo>(1);
Info->Action = Action;
Info->SlotLabel = SlotLabel;
Info->Alignment = Alignment;
@@ -2363,42 +2690,42 @@ void PragmaOpenCLExtensionHandler::HandlePragma(Preprocessor &PP,
StateLoc, State);
}
-/// Handle '#pragma omp ...' when OpenMP is disabled.
-///
-void PragmaNoOpenMPHandler::HandlePragma(Preprocessor &PP,
- PragmaIntroducer Introducer,
- Token &FirstTok) {
- if (!PP.getDiagnostics().isIgnored(diag::warn_pragma_omp_ignored,
- FirstTok.getLocation())) {
- PP.Diag(FirstTok, diag::warn_pragma_omp_ignored);
- PP.getDiagnostics().setSeverity(diag::warn_pragma_omp_ignored,
- diag::Severity::Ignored, SourceLocation());
+/// Handle '#pragma omp ...' when OpenMP is disabled and '#pragma acc ...' when
+/// OpenACC is disabled.
+template <diag::kind IgnoredDiag>
+void PragmaNoSupportHandler<IgnoredDiag>::HandlePragma(
+ Preprocessor &PP, PragmaIntroducer Introducer, Token &FirstTok) {
+ if (!PP.getDiagnostics().isIgnored(IgnoredDiag, FirstTok.getLocation())) {
+ PP.Diag(FirstTok, IgnoredDiag);
+ PP.getDiagnostics().setSeverity(IgnoredDiag, diag::Severity::Ignored,
+ SourceLocation());
}
PP.DiscardUntilEndOfDirective();
}
-/// Handle '#pragma omp ...' when OpenMP is enabled.
-///
-void PragmaOpenMPHandler::HandlePragma(Preprocessor &PP,
- PragmaIntroducer Introducer,
- Token &FirstTok) {
+/// Handle '#pragma omp ...' when OpenMP is enabled, and handle '#pragma acc...'
+/// when OpenACC is enabled.
+template <tok::TokenKind StartTok, tok::TokenKind EndTok,
+ diag::kind UnexpectedDiag>
+void PragmaSupportHandler<StartTok, EndTok, UnexpectedDiag>::HandlePragma(
+ Preprocessor &PP, PragmaIntroducer Introducer, Token &FirstTok) {
SmallVector<Token, 16> Pragma;
Token Tok;
Tok.startToken();
- Tok.setKind(tok::annot_pragma_openmp);
+ Tok.setKind(StartTok);
Tok.setLocation(Introducer.Loc);
while (Tok.isNot(tok::eod) && Tok.isNot(tok::eof)) {
Pragma.push_back(Tok);
PP.Lex(Tok);
- if (Tok.is(tok::annot_pragma_openmp)) {
- PP.Diag(Tok, diag::err_omp_unexpected_directive) << 0;
+ if (Tok.is(StartTok)) {
+ PP.Diag(Tok, UnexpectedDiag) << 0;
unsigned InnerPragmaCnt = 1;
while (InnerPragmaCnt != 0) {
PP.Lex(Tok);
- if (Tok.is(tok::annot_pragma_openmp))
+ if (Tok.is(StartTok))
++InnerPragmaCnt;
- else if (Tok.is(tok::annot_pragma_openmp_end))
+ else if (Tok.is(EndTok))
--InnerPragmaCnt;
}
PP.Lex(Tok);
@@ -2406,7 +2733,7 @@ void PragmaOpenMPHandler::HandlePragma(Preprocessor &PP,
}
SourceLocation EodLoc = Tok.getLocation();
Tok.startToken();
- Tok.setKind(tok::annot_pragma_openmp_end);
+ Tok.setKind(EndTok);
Tok.setLocation(EodLoc);
Pragma.push_back(Tok);
@@ -2870,14 +3197,6 @@ void PragmaCommentHandler::HandlePragma(Preprocessor &PP,
return;
}
- // On PS4, issue a warning about any pragma comments other than
- // #pragma comment lib.
- if (PP.getTargetInfo().getTriple().isPS4() && Kind != PCK_Lib) {
- PP.Diag(Tok.getLocation(), diag::warn_pragma_comment_ignored)
- << II->getName();
- return;
- }
-
// Read the optional string if present.
PP.Lex(Tok);
std::string ArgumentString;
@@ -2952,12 +3271,13 @@ void PragmaOptimizeHandler::HandlePragma(Preprocessor &PP,
namespace {
/// Used as the annotation value for tok::annot_pragma_fp.
struct TokFPAnnotValue {
- enum FlagKinds { Contract, Reassociate, Exceptions };
enum FlagValues { On, Off, Fast };
- llvm::Optional<LangOptions::FPModeKind> ContractValue;
- llvm::Optional<LangOptions::FPModeKind> ReassociateValue;
- llvm::Optional<LangOptions::FPExceptionModeKind> ExceptionsValue;
+ std::optional<LangOptions::FPModeKind> ContractValue;
+ std::optional<LangOptions::FPModeKind> ReassociateValue;
+ std::optional<LangOptions::FPModeKind> ReciprocalValue;
+ std::optional<LangOptions::FPExceptionModeKind> ExceptionsValue;
+ std::optional<LangOptions::FPEvalMethodKind> EvalMethodValue;
};
} // end anonymous namespace
@@ -2979,12 +3299,13 @@ void PragmaFPHandler::HandlePragma(Preprocessor &PP,
IdentifierInfo *OptionInfo = Tok.getIdentifierInfo();
auto FlagKind =
- llvm::StringSwitch<llvm::Optional<TokFPAnnotValue::FlagKinds>>(
- OptionInfo->getName())
- .Case("contract", TokFPAnnotValue::Contract)
- .Case("reassociate", TokFPAnnotValue::Reassociate)
- .Case("exceptions", TokFPAnnotValue::Exceptions)
- .Default(None);
+ llvm::StringSwitch<std::optional<PragmaFPKind>>(OptionInfo->getName())
+ .Case("contract", PFK_Contract)
+ .Case("reassociate", PFK_Reassociate)
+ .Case("exceptions", PFK_Exceptions)
+ .Case("eval_method", PFK_EvalMethod)
+ .Case("reciprocal", PFK_Reciprocal)
+ .Default(std::nullopt);
if (!FlagKind) {
PP.Diag(Tok.getLocation(), diag::err_pragma_fp_invalid_option)
<< /*MissingOption=*/false << OptionInfo;
@@ -2998,8 +3319,11 @@ void PragmaFPHandler::HandlePragma(Preprocessor &PP,
return;
}
PP.Lex(Tok);
+ bool isEvalMethodDouble =
+ Tok.is(tok::kw_double) && FlagKind == PFK_EvalMethod;
- if (Tok.isNot(tok::identifier)) {
+ // Don't diagnose if we have an eval_metod pragma with "double" kind.
+ if (Tok.isNot(tok::identifier) && !isEvalMethodDouble) {
PP.Diag(Tok.getLocation(), diag::err_pragma_fp_invalid_argument)
<< PP.getSpelling(Tok) << OptionInfo->getName()
<< static_cast<int>(*FlagKind);
@@ -3007,44 +3331,58 @@ void PragmaFPHandler::HandlePragma(Preprocessor &PP,
}
const IdentifierInfo *II = Tok.getIdentifierInfo();
- if (FlagKind == TokFPAnnotValue::Contract) {
+ if (FlagKind == PFK_Contract) {
AnnotValue->ContractValue =
- llvm::StringSwitch<llvm::Optional<LangOptions::FPModeKind>>(
+ llvm::StringSwitch<std::optional<LangOptions::FPModeKind>>(
II->getName())
.Case("on", LangOptions::FPModeKind::FPM_On)
.Case("off", LangOptions::FPModeKind::FPM_Off)
.Case("fast", LangOptions::FPModeKind::FPM_Fast)
- .Default(llvm::None);
+ .Default(std::nullopt);
if (!AnnotValue->ContractValue) {
PP.Diag(Tok.getLocation(), diag::err_pragma_fp_invalid_argument)
<< PP.getSpelling(Tok) << OptionInfo->getName() << *FlagKind;
return;
}
- } else if (FlagKind == TokFPAnnotValue::Reassociate) {
- AnnotValue->ReassociateValue =
- llvm::StringSwitch<llvm::Optional<LangOptions::FPModeKind>>(
- II->getName())
- .Case("on", LangOptions::FPModeKind::FPM_On)
- .Case("off", LangOptions::FPModeKind::FPM_Off)
- .Default(llvm::None);
- if (!AnnotValue->ReassociateValue) {
+ } else if (FlagKind == PFK_Reassociate || FlagKind == PFK_Reciprocal) {
+ auto &Value = FlagKind == PFK_Reassociate ? AnnotValue->ReassociateValue
+ : AnnotValue->ReciprocalValue;
+ Value = llvm::StringSwitch<std::optional<LangOptions::FPModeKind>>(
+ II->getName())
+ .Case("on", LangOptions::FPModeKind::FPM_On)
+ .Case("off", LangOptions::FPModeKind::FPM_Off)
+ .Default(std::nullopt);
+ if (!Value) {
PP.Diag(Tok.getLocation(), diag::err_pragma_fp_invalid_argument)
<< PP.getSpelling(Tok) << OptionInfo->getName() << *FlagKind;
return;
}
- } else if (FlagKind == TokFPAnnotValue::Exceptions) {
+ } else if (FlagKind == PFK_Exceptions) {
AnnotValue->ExceptionsValue =
- llvm::StringSwitch<llvm::Optional<LangOptions::FPExceptionModeKind>>(
+ llvm::StringSwitch<std::optional<LangOptions::FPExceptionModeKind>>(
II->getName())
.Case("ignore", LangOptions::FPE_Ignore)
.Case("maytrap", LangOptions::FPE_MayTrap)
.Case("strict", LangOptions::FPE_Strict)
- .Default(llvm::None);
+ .Default(std::nullopt);
if (!AnnotValue->ExceptionsValue) {
PP.Diag(Tok.getLocation(), diag::err_pragma_fp_invalid_argument)
<< PP.getSpelling(Tok) << OptionInfo->getName() << *FlagKind;
return;
}
+ } else if (FlagKind == PFK_EvalMethod) {
+ AnnotValue->EvalMethodValue =
+ llvm::StringSwitch<std::optional<LangOptions::FPEvalMethodKind>>(
+ II->getName())
+ .Case("source", LangOptions::FPEvalMethodKind::FEM_Source)
+ .Case("double", LangOptions::FPEvalMethodKind::FEM_Double)
+ .Case("extended", LangOptions::FPEvalMethodKind::FEM_Extended)
+ .Default(std::nullopt);
+ if (!AnnotValue->EvalMethodValue) {
+ PP.Diag(Tok.getLocation(), diag::err_pragma_fp_invalid_argument)
+ << PP.getSpelling(Tok) << OptionInfo->getName() << *FlagKind;
+ return;
+ }
}
PP.Lex(Tok);
@@ -3138,15 +3476,24 @@ void Parser::HandlePragmaFP() {
reinterpret_cast<TokFPAnnotValue *>(Tok.getAnnotationValue());
if (AnnotValue->ReassociateValue)
- Actions.ActOnPragmaFPReassociate(Tok.getLocation(),
- *AnnotValue->ReassociateValue ==
- LangOptions::FPModeKind::FPM_On);
+ Actions.ActOnPragmaFPValueChangingOption(
+ Tok.getLocation(), PFK_Reassociate,
+ *AnnotValue->ReassociateValue == LangOptions::FPModeKind::FPM_On);
+
+ if (AnnotValue->ReciprocalValue)
+ Actions.ActOnPragmaFPValueChangingOption(
+ Tok.getLocation(), PFK_Reciprocal,
+ *AnnotValue->ReciprocalValue == LangOptions::FPModeKind::FPM_On);
+
if (AnnotValue->ContractValue)
Actions.ActOnPragmaFPContract(Tok.getLocation(),
*AnnotValue->ContractValue);
if (AnnotValue->ExceptionsValue)
Actions.ActOnPragmaFPExceptions(Tok.getLocation(),
*AnnotValue->ExceptionsValue);
+ if (AnnotValue->EvalMethodValue)
+ Actions.ActOnPragmaFPEvalMethod(Tok.getLocation(),
+ *AnnotValue->EvalMethodValue);
ConsumeAnnotationToken();
}
@@ -3186,7 +3533,7 @@ static bool ParseLoopHintValue(Preprocessor &PP, Token &Tok, Token PragmaName,
ValueList.push_back(EOFTok); // Terminates expression for parsing.
markAsReinjectedForRelexing(ValueList);
- Info.Toks = llvm::makeArrayRef(ValueList).copy(PP.getPreprocessorAllocator());
+ Info.Toks = llvm::ArrayRef(ValueList).copy(PP.getPreprocessorAllocator());
Info.PragmaName = PragmaName;
Info.Option = Option;
@@ -3439,58 +3786,100 @@ void PragmaMSIntrinsicHandler::HandlePragma(Preprocessor &PP,
<< "intrinsic";
}
-// #pragma optimize("gsty", on|off)
-void PragmaMSOptimizeHandler::HandlePragma(Preprocessor &PP,
- PragmaIntroducer Introducer,
- Token &Tok) {
- SourceLocation StartLoc = Tok.getLocation();
- PP.Lex(Tok);
+bool Parser::HandlePragmaMSFunction(StringRef PragmaName,
+ SourceLocation PragmaLocation) {
+ Token FirstTok = Tok;
- if (Tok.isNot(tok::l_paren)) {
- PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_lparen) << "optimize";
- return;
+ if (ExpectAndConsume(tok::l_paren, diag::warn_pragma_expected_lparen,
+ PragmaName))
+ return false;
+
+ bool SuggestIntrinH = !PP.isMacroDefined("__INTRIN_H");
+
+ llvm::SmallVector<StringRef> NoBuiltins;
+ while (Tok.is(tok::identifier)) {
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ if (!II->getBuiltinID())
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_intrinsic_builtin)
+ << II << SuggestIntrinH;
+ else
+ NoBuiltins.emplace_back(II->getName());
+
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::comma))
+ break;
+ PP.Lex(Tok); // ,
}
- PP.Lex(Tok);
+
+ if (ExpectAndConsume(tok::r_paren, diag::warn_pragma_expected_rparen,
+ PragmaName) ||
+ ExpectAndConsume(tok::eof, diag::warn_pragma_extra_tokens_at_eol,
+ PragmaName))
+ return false;
+
+ Actions.ActOnPragmaMSFunction(FirstTok.getLocation(), NoBuiltins);
+ return true;
+}
+
+// #pragma optimize("gsty", on|off)
+bool Parser::HandlePragmaMSOptimize(StringRef PragmaName,
+ SourceLocation PragmaLocation) {
+ Token FirstTok = Tok;
+ if (ExpectAndConsume(tok::l_paren, diag::warn_pragma_expected_lparen,
+ PragmaName))
+ return false;
if (Tok.isNot(tok::string_literal)) {
- PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_string) << "optimize";
- return;
+ PP.Diag(PragmaLocation, diag::warn_pragma_expected_string) << PragmaName;
+ return false;
}
- // We could syntax check the string but it's probably not worth the effort.
- PP.Lex(Tok);
-
- if (Tok.isNot(tok::comma)) {
- PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_comma) << "optimize";
- return;
+ ExprResult StringResult = ParseStringLiteralExpression();
+ if (StringResult.isInvalid())
+ return false; // Already diagnosed.
+ StringLiteral *OptimizationList = cast<StringLiteral>(StringResult.get());
+ if (OptimizationList->getCharByteWidth() != 1) {
+ PP.Diag(PragmaLocation, diag::warn_pragma_expected_non_wide_string)
+ << PragmaName;
+ return false;
}
- PP.Lex(Tok);
- if (Tok.is(tok::eod) || Tok.is(tok::r_paren)) {
- PP.Diag(Tok.getLocation(), diag::warn_pragma_missing_argument)
- << "optimize" << /*Expected=*/true << "'on' or 'off'";
- return;
+ if (ExpectAndConsume(tok::comma, diag::warn_pragma_expected_comma,
+ PragmaName))
+ return false;
+
+ if (Tok.is(tok::eof) || Tok.is(tok::r_paren)) {
+ PP.Diag(PragmaLocation, diag::warn_pragma_missing_argument)
+ << PragmaName << /*Expected=*/true << "'on' or 'off'";
+ return false;
}
IdentifierInfo *II = Tok.getIdentifierInfo();
if (!II || (!II->isStr("on") && !II->isStr("off"))) {
- PP.Diag(Tok.getLocation(), diag::warn_pragma_invalid_argument)
- << PP.getSpelling(Tok) << "optimize" << /*Expected=*/true
+ PP.Diag(PragmaLocation, diag::warn_pragma_invalid_argument)
+ << PP.getSpelling(Tok) << PragmaName << /*Expected=*/true
<< "'on' or 'off'";
- return;
+ return false;
}
+ bool IsOn = II->isStr("on");
PP.Lex(Tok);
- if (Tok.isNot(tok::r_paren)) {
- PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_rparen) << "optimize";
- return;
- }
- PP.Lex(Tok);
+ if (ExpectAndConsume(tok::r_paren, diag::warn_pragma_expected_rparen,
+ PragmaName))
+ return false;
- if (Tok.isNot(tok::eod)) {
- PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol)
- << "optimize";
- return;
+ // TODO: Add support for "sgty"
+ if (!OptimizationList->getString().empty()) {
+ PP.Diag(PragmaLocation, diag::warn_pragma_invalid_argument)
+ << OptimizationList->getString() << PragmaName << /*Expected=*/true
+ << "\"\"";
+ return false;
}
- PP.Diag(StartLoc, diag::warn_pragma_optimize);
+
+ if (ExpectAndConsume(tok::eof, diag::warn_pragma_extra_tokens_at_eol,
+ PragmaName))
+ return false;
+
+ Actions.ActOnPragmaMSOptimize(FirstTok.getLocation(), IsOn);
+ return true;
}
void PragmaForceCUDAHostDeviceHandler::HandlePragma(
@@ -3644,7 +4033,7 @@ void PragmaAttributeHandler::HandlePragma(Preprocessor &PP,
markAsReinjectedForRelexing(AttributeTokens);
Info->Tokens =
- llvm::makeArrayRef(AttributeTokens).copy(PP.getPreprocessorAllocator());
+ llvm::ArrayRef(AttributeTokens).copy(PP.getPreprocessorAllocator());
}
if (Tok.isNot(tok::eod))
@@ -3722,3 +4111,40 @@ void PragmaMaxTokensTotalHandler::HandlePragma(Preprocessor &PP,
PP.overrideMaxTokens(MaxTokens, Loc);
}
+
+// Handle '#pragma clang riscv intrinsic vector'.
+// '#pragma clang riscv intrinsic sifive_vector'.
+void PragmaRISCVHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducer Introducer,
+ Token &FirstToken) {
+ Token Tok;
+ PP.Lex(Tok);
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+
+ if (!II || !II->isStr("intrinsic")) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_invalid_argument)
+ << PP.getSpelling(Tok) << "riscv" << /*Expected=*/true << "'intrinsic'";
+ return;
+ }
+
+ PP.Lex(Tok);
+ II = Tok.getIdentifierInfo();
+ if (!II || !(II->isStr("vector") || II->isStr("sifive_vector"))) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_invalid_argument)
+ << PP.getSpelling(Tok) << "riscv" << /*Expected=*/true
+ << "'vector' or 'sifive_vector'";
+ return;
+ }
+
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::eod)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol)
+ << "clang riscv intrinsic";
+ return;
+ }
+
+ if (II->isStr("vector"))
+ Actions.DeclareRISCVVBuiltins = true;
+ else if (II->isStr("sifive_vector"))
+ Actions.DeclareRISCVSiFiveVectorBuiltins = true;
+}
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseStmt.cpp b/contrib/llvm-project/clang/lib/Parse/ParseStmt.cpp
index ebfe048513b1..d0ff33bd1379 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseStmt.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseStmt.cpp
@@ -14,13 +14,17 @@
#include "clang/AST/PrettyDeclStackTrace.h"
#include "clang/Basic/Attributes.h"
#include "clang/Basic/PrettyStackTrace.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/TokenKinds.h"
#include "clang/Parse/LoopHint.h"
#include "clang/Parse/Parser.h"
#include "clang/Parse/RAIIObjectsForParser.h"
#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/TypoCorrection.h"
#include "llvm/ADT/STLExtras.h"
+#include <optional>
using namespace clang;
@@ -36,8 +40,8 @@ StmtResult Parser::ParseStatement(SourceLocation *TrailingElseLoc,
// We may get back a null statement if we found a #pragma. Keep going until
// we get an actual statement.
+ StmtVector Stmts;
do {
- StmtVector Stmts;
Res = ParseStatementOrDeclaration(Stmts, StmtCtx, TrailingElseLoc);
} while (!Res.isInvalid() && !Res.get());
@@ -105,15 +109,21 @@ Parser::ParseStatementOrDeclaration(StmtVector &Stmts,
// statement are different from [[]] attributes that follow an __attribute__
// at the start of the statement. Thus, we're not using MaybeParseAttributes
// here because we don't want to allow arbitrary orderings.
- ParsedAttributesWithRange Attrs(AttrFactory);
- MaybeParseCXX11Attributes(Attrs, nullptr, /*MightBeObjCMessageSend*/ true);
+ ParsedAttributes CXX11Attrs(AttrFactory);
+ MaybeParseCXX11Attributes(CXX11Attrs, /*MightBeObjCMessageSend*/ true);
+ ParsedAttributes GNUAttrs(AttrFactory);
if (getLangOpts().OpenCL)
- MaybeParseGNUAttributes(Attrs);
+ MaybeParseGNUAttributes(GNUAttrs);
StmtResult Res = ParseStatementOrDeclarationAfterAttributes(
- Stmts, StmtCtx, TrailingElseLoc, Attrs);
+ Stmts, StmtCtx, TrailingElseLoc, CXX11Attrs, GNUAttrs);
MaybeDestroyTemplateIds();
+ // Attributes that are left should all go on the statement, so concatenate the
+ // two lists.
+ ParsedAttributes Attrs(AttrFactory);
+ takeAndConcatenateAttrs(CXX11Attrs, GNUAttrs, Attrs);
+
assert((Attrs.empty() || Res.isInvalid() || Res.isUsable()) &&
"attributes on empty statement");
@@ -158,7 +168,8 @@ private:
StmtResult Parser::ParseStatementOrDeclarationAfterAttributes(
StmtVector &Stmts, ParsedStmtContext StmtCtx,
- SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs) {
+ SourceLocation *TrailingElseLoc, ParsedAttributes &CXX11Attrs,
+ ParsedAttributes &GNUAttrs) {
const char *SemiError = nullptr;
StmtResult Res;
SourceLocation GNUAttributeLoc;
@@ -181,9 +192,16 @@ Retry:
Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_Statement);
return StmtError();
- case tok::identifier: {
+ case tok::identifier:
+ ParseIdentifier: {
Token Next = NextToken();
if (Next.is(tok::colon)) { // C99 6.8.1: labeled-statement
+ // Both C++11 and GNU attributes preceding the label appertain to the
+ // label, so put them in a single list to pass on to
+ // ParseLabeledStatement().
+ ParsedAttributes Attrs(AttrFactory);
+ takeAndConcatenateAttrs(CXX11Attrs, GNUAttrs, Attrs);
+
// identifier ':' statement
return ParseLabeledStatement(Attrs, StmtCtx);
}
@@ -209,29 +227,34 @@ Retry:
}
// Fall through
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
default: {
- if ((getLangOpts().CPlusPlus || getLangOpts().MicrosoftExt ||
- (StmtCtx & ParsedStmtContext::AllowDeclarationsInC) !=
- ParsedStmtContext()) &&
- ((GNUAttributeLoc.isValid() &&
- !(!Attrs.empty() &&
- llvm::all_of(
- Attrs, [](ParsedAttr &Attr) { return Attr.isStmtAttr(); }))) ||
+ bool HaveAttrs = !CXX11Attrs.empty() || !GNUAttrs.empty();
+ auto IsStmtAttr = [](ParsedAttr &Attr) { return Attr.isStmtAttr(); };
+ bool AllAttrsAreStmtAttrs = llvm::all_of(CXX11Attrs, IsStmtAttr) &&
+ llvm::all_of(GNUAttrs, IsStmtAttr);
+ if (((GNUAttributeLoc.isValid() && !(HaveAttrs && AllAttrsAreStmtAttrs)) ||
isDeclarationStatement())) {
SourceLocation DeclStart = Tok.getLocation(), DeclEnd;
DeclGroupPtrTy Decl;
if (GNUAttributeLoc.isValid()) {
DeclStart = GNUAttributeLoc;
- Decl = ParseDeclaration(DeclaratorContext::Block, DeclEnd, Attrs,
- &GNUAttributeLoc);
+ Decl = ParseDeclaration(DeclaratorContext::Block, DeclEnd, CXX11Attrs,
+ GNUAttrs, &GNUAttributeLoc);
} else {
- Decl = ParseDeclaration(DeclaratorContext::Block, DeclEnd, Attrs);
+ Decl = ParseDeclaration(DeclaratorContext::Block, DeclEnd, CXX11Attrs,
+ GNUAttrs);
}
- if (Attrs.Range.getBegin().isValid())
- DeclStart = Attrs.Range.getBegin();
+ if (CXX11Attrs.Range.getBegin().isValid()) {
+ // The caller must guarantee that the CXX11Attrs appear before the
+ // GNUAttrs, and we rely on that here.
+ assert(GNUAttrs.Range.getBegin().isInvalid() ||
+ GNUAttrs.Range.getBegin() > CXX11Attrs.Range.getBegin());
+ DeclStart = CXX11Attrs.Range.getBegin();
+ } else if (GNUAttrs.Range.getBegin().isValid())
+ DeclStart = GNUAttrs.Range.getBegin();
return Actions.ActOnDeclStmt(Decl, DeclStart, DeclEnd);
}
@@ -240,12 +263,24 @@ Retry:
return StmtError();
}
- return ParseExprStatement(StmtCtx);
+ switch (Tok.getKind()) {
+#define TRANSFORM_TYPE_TRAIT_DEF(_, Trait) case tok::kw___##Trait:
+#include "clang/Basic/TransformTypeTraits.def"
+ if (NextToken().is(tok::less)) {
+ Tok.setKind(tok::identifier);
+ Diag(Tok, diag::ext_keyword_as_ident)
+ << Tok.getIdentifierInfo()->getName() << 0;
+ goto ParseIdentifier;
+ }
+ [[fallthrough]];
+ default:
+ return ParseExprStatement(StmtCtx);
+ }
}
case tok::kw___attribute: {
GNUAttributeLoc = Tok.getLocation();
- ParseGNUAttributes(Attrs);
+ ParseGNUAttributes(GNUAttrs);
goto Retry;
}
@@ -297,10 +332,18 @@ Retry:
break;
case tok::kw_asm: {
- ProhibitAttributes(Attrs);
+ for (const ParsedAttr &AL : CXX11Attrs)
+ // Could be relaxed if asm-related regular keyword attributes are
+ // added later.
+ (AL.isRegularKeywordAttribute()
+ ? Diag(AL.getRange().getBegin(), diag::err_keyword_not_allowed)
+ : Diag(AL.getRange().getBegin(), diag::warn_attribute_ignored))
+ << AL;
+ // Prevent these from being interpreted as statement attributes later on.
+ CXX11Attrs.clear();
+ ProhibitAttributes(GNUAttrs);
bool msAsm = false;
Res = ParseAsmStatement(msAsm);
- Res = Actions.ActOnFinishFullStmt(Res.get());
if (msAsm) return Res;
SemiError = "asm";
break;
@@ -308,7 +351,8 @@ Retry:
case tok::kw___if_exists:
case tok::kw___if_not_exists:
- ProhibitAttributes(Attrs);
+ ProhibitAttributes(CXX11Attrs);
+ ProhibitAttributes(GNUAttrs);
ParseMicrosoftIfExistsStatement(Stmts);
// An __if_exists block is like a compound statement, but it doesn't create
// a new scope.
@@ -318,7 +362,8 @@ Retry:
return ParseCXXTryBlock();
case tok::kw___try:
- ProhibitAttributes(Attrs); // TODO: is it correct?
+ ProhibitAttributes(CXX11Attrs);
+ ProhibitAttributes(GNUAttrs);
return ParseSEHTryBlock();
case tok::kw___leave:
@@ -327,106 +372,139 @@ Retry:
break;
case tok::annot_pragma_vis:
- ProhibitAttributes(Attrs);
+ ProhibitAttributes(CXX11Attrs);
+ ProhibitAttributes(GNUAttrs);
HandlePragmaVisibility();
return StmtEmpty();
case tok::annot_pragma_pack:
- ProhibitAttributes(Attrs);
+ ProhibitAttributes(CXX11Attrs);
+ ProhibitAttributes(GNUAttrs);
HandlePragmaPack();
return StmtEmpty();
case tok::annot_pragma_msstruct:
- ProhibitAttributes(Attrs);
+ ProhibitAttributes(CXX11Attrs);
+ ProhibitAttributes(GNUAttrs);
HandlePragmaMSStruct();
return StmtEmpty();
case tok::annot_pragma_align:
- ProhibitAttributes(Attrs);
+ ProhibitAttributes(CXX11Attrs);
+ ProhibitAttributes(GNUAttrs);
HandlePragmaAlign();
return StmtEmpty();
case tok::annot_pragma_weak:
- ProhibitAttributes(Attrs);
+ ProhibitAttributes(CXX11Attrs);
+ ProhibitAttributes(GNUAttrs);
HandlePragmaWeak();
return StmtEmpty();
case tok::annot_pragma_weakalias:
- ProhibitAttributes(Attrs);
+ ProhibitAttributes(CXX11Attrs);
+ ProhibitAttributes(GNUAttrs);
HandlePragmaWeakAlias();
return StmtEmpty();
case tok::annot_pragma_redefine_extname:
- ProhibitAttributes(Attrs);
+ ProhibitAttributes(CXX11Attrs);
+ ProhibitAttributes(GNUAttrs);
HandlePragmaRedefineExtname();
return StmtEmpty();
case tok::annot_pragma_fp_contract:
- ProhibitAttributes(Attrs);
+ ProhibitAttributes(CXX11Attrs);
+ ProhibitAttributes(GNUAttrs);
Diag(Tok, diag::err_pragma_file_or_compound_scope) << "fp_contract";
ConsumeAnnotationToken();
return StmtError();
case tok::annot_pragma_fp:
- ProhibitAttributes(Attrs);
+ ProhibitAttributes(CXX11Attrs);
+ ProhibitAttributes(GNUAttrs);
Diag(Tok, diag::err_pragma_file_or_compound_scope) << "clang fp";
ConsumeAnnotationToken();
return StmtError();
case tok::annot_pragma_fenv_access:
- ProhibitAttributes(Attrs);
- Diag(Tok, diag::err_pragma_stdc_fenv_access_scope);
+ case tok::annot_pragma_fenv_access_ms:
+ ProhibitAttributes(CXX11Attrs);
+ ProhibitAttributes(GNUAttrs);
+ Diag(Tok, diag::err_pragma_file_or_compound_scope)
+ << (Kind == tok::annot_pragma_fenv_access ? "STDC FENV_ACCESS"
+ : "fenv_access");
ConsumeAnnotationToken();
return StmtEmpty();
case tok::annot_pragma_fenv_round:
- ProhibitAttributes(Attrs);
+ ProhibitAttributes(CXX11Attrs);
+ ProhibitAttributes(GNUAttrs);
Diag(Tok, diag::err_pragma_file_or_compound_scope) << "STDC FENV_ROUND";
ConsumeAnnotationToken();
return StmtError();
+ case tok::annot_pragma_cx_limited_range:
+ ProhibitAttributes(CXX11Attrs);
+ ProhibitAttributes(GNUAttrs);
+ Diag(Tok, diag::err_pragma_file_or_compound_scope)
+ << "STDC CX_LIMITED_RANGE";
+ ConsumeAnnotationToken();
+ return StmtError();
+
case tok::annot_pragma_float_control:
- ProhibitAttributes(Attrs);
+ ProhibitAttributes(CXX11Attrs);
+ ProhibitAttributes(GNUAttrs);
Diag(Tok, diag::err_pragma_file_or_compound_scope) << "float_control";
ConsumeAnnotationToken();
return StmtError();
case tok::annot_pragma_opencl_extension:
- ProhibitAttributes(Attrs);
+ ProhibitAttributes(CXX11Attrs);
+ ProhibitAttributes(GNUAttrs);
HandlePragmaOpenCLExtension();
return StmtEmpty();
case tok::annot_pragma_captured:
- ProhibitAttributes(Attrs);
+ ProhibitAttributes(CXX11Attrs);
+ ProhibitAttributes(GNUAttrs);
return HandlePragmaCaptured();
case tok::annot_pragma_openmp:
// Prohibit attributes that are not OpenMP attributes, but only before
// processing a #pragma omp clause.
- ProhibitAttributes(Attrs);
- LLVM_FALLTHROUGH;
+ ProhibitAttributes(CXX11Attrs);
+ ProhibitAttributes(GNUAttrs);
+ [[fallthrough]];
case tok::annot_attr_openmp:
// Do not prohibit attributes if they were OpenMP attributes.
return ParseOpenMPDeclarativeOrExecutableDirective(StmtCtx);
+ case tok::annot_pragma_openacc:
+ return ParseOpenACCDirectiveStmt();
+
case tok::annot_pragma_ms_pointers_to_members:
- ProhibitAttributes(Attrs);
+ ProhibitAttributes(CXX11Attrs);
+ ProhibitAttributes(GNUAttrs);
HandlePragmaMSPointersToMembers();
return StmtEmpty();
case tok::annot_pragma_ms_pragma:
- ProhibitAttributes(Attrs);
+ ProhibitAttributes(CXX11Attrs);
+ ProhibitAttributes(GNUAttrs);
HandlePragmaMSPragma();
return StmtEmpty();
case tok::annot_pragma_ms_vtordisp:
- ProhibitAttributes(Attrs);
+ ProhibitAttributes(CXX11Attrs);
+ ProhibitAttributes(GNUAttrs);
HandlePragmaMSVtorDisp();
return StmtEmpty();
case tok::annot_pragma_loop_hint:
- ProhibitAttributes(Attrs);
- return ParsePragmaLoopHint(Stmts, StmtCtx, TrailingElseLoc, Attrs);
+ ProhibitAttributes(CXX11Attrs);
+ ProhibitAttributes(GNUAttrs);
+ return ParsePragmaLoopHint(Stmts, StmtCtx, TrailingElseLoc, CXX11Attrs);
case tok::annot_pragma_dump:
HandlePragmaDump();
@@ -480,9 +558,22 @@ StmtResult Parser::ParseExprStatement(ParsedStmtContext StmtCtx) {
return ParseCaseStatement(StmtCtx, /*MissingCase=*/true, Expr);
}
- // Otherwise, eat the semicolon.
- ExpectAndConsumeSemi(diag::err_expected_semi_after_expr);
- return handleExprStmt(Expr, StmtCtx);
+ Token *CurTok = nullptr;
+ // If the semicolon is missing at the end of REPL input, consider if
+ // we want to do value printing. Note this is only enabled in C++ mode
+ // since part of the implementation requires C++ language features.
+ // Note we shouldn't eat the token since the callback needs it.
+ if (Tok.is(tok::annot_repl_input_end) && Actions.getLangOpts().CPlusPlus)
+ CurTok = &Tok;
+ else
+ // Otherwise, eat the semicolon.
+ ExpectAndConsumeSemi(diag::err_expected_semi_after_expr);
+
+ StmtResult R = handleExprStmt(Expr, StmtCtx);
+ if (CurTok && !R.isInvalid())
+ CurTok->setAnnotationValue(R.get());
+
+ return R;
}
/// ParseSEHTryBlockCommon
@@ -615,20 +706,36 @@ StmtResult Parser::ParseSEHLeaveStatement() {
return Actions.ActOnSEHLeaveStmt(LeaveLoc, getCurScope());
}
+static void DiagnoseLabelFollowedByDecl(Parser &P, const Stmt *SubStmt) {
+ // When in C mode (but not Microsoft extensions mode), diagnose use of a
+ // label that is followed by a declaration rather than a statement.
+ if (!P.getLangOpts().CPlusPlus && !P.getLangOpts().MicrosoftExt &&
+ isa<DeclStmt>(SubStmt)) {
+ P.Diag(SubStmt->getBeginLoc(),
+ P.getLangOpts().C23
+ ? diag::warn_c23_compat_label_followed_by_declaration
+ : diag::ext_c_label_followed_by_declaration);
+ }
+}
+
/// ParseLabeledStatement - We have an identifier and a ':' after it.
///
+/// label:
+/// identifier ':'
+/// [GNU] identifier ':' attributes[opt]
+///
/// labeled-statement:
-/// identifier ':' statement
-/// [GNU] identifier ':' attributes[opt] statement
+/// label statement
///
-StmtResult Parser::ParseLabeledStatement(ParsedAttributesWithRange &attrs,
+StmtResult Parser::ParseLabeledStatement(ParsedAttributes &Attrs,
ParsedStmtContext StmtCtx) {
assert(Tok.is(tok::identifier) && Tok.getIdentifierInfo() &&
"Not an identifier!");
- // The substatement is always a 'statement', not a 'declaration', but is
- // otherwise in the same context as the labeled-statement.
- StmtCtx &= ~ParsedStmtContext::AllowDeclarationsInC;
+ // [OpenMP 5.1] 2.1.3: A stand-alone directive may not be used in place of a
+ // substatement in a selection statement, in place of the loop body in an
+ // iteration statement, or in place of the statement that follows a label.
+ StmtCtx &= ~ParsedStmtContext::AllowStandaloneOpenMPDirectives;
Token IdentTok = Tok; // Save the whole token.
ConsumeToken(); // eat the identifier.
@@ -641,7 +748,7 @@ StmtResult Parser::ParseLabeledStatement(ParsedAttributesWithRange &attrs,
// Read label attributes, if present.
StmtResult SubStmt;
if (Tok.is(tok::kw___attribute)) {
- ParsedAttributesWithRange TempAttrs(AttrFactory);
+ ParsedAttributes TempAttrs(AttrFactory);
ParseGNUAttributes(TempAttrs);
// In C++, GNU attributes only apply to the label if they are followed by a
@@ -652,16 +759,23 @@ StmtResult Parser::ParseLabeledStatement(ParsedAttributesWithRange &attrs,
// and followed by a semicolon, GCC will reject (it appears to parse the
// attributes as part of a statement in that case). That looks like a bug.
if (!getLangOpts().CPlusPlus || Tok.is(tok::semi))
- attrs.takeAllFrom(TempAttrs);
+ Attrs.takeAllFrom(TempAttrs);
else {
StmtVector Stmts;
- SubStmt = ParseStatementOrDeclarationAfterAttributes(Stmts, StmtCtx,
- nullptr, TempAttrs);
+ ParsedAttributes EmptyCXX11Attrs(AttrFactory);
+ SubStmt = ParseStatementOrDeclarationAfterAttributes(
+ Stmts, StmtCtx, nullptr, EmptyCXX11Attrs, TempAttrs);
if (!TempAttrs.empty() && !SubStmt.isInvalid())
SubStmt = Actions.ActOnAttributedStmt(TempAttrs, SubStmt.get());
}
}
+ // The label may have no statement following it
+ if (SubStmt.isUnset() && Tok.is(tok::r_brace)) {
+ DiagnoseLabelAtEndOfCompoundStatement();
+ SubStmt = Actions.ActOnNullStmt(ColonLoc);
+ }
+
// If we've not parsed a statement yet, parse one now.
if (!SubStmt.isInvalid() && !SubStmt.isUsable())
SubStmt = ParseStatement(nullptr, StmtCtx);
@@ -670,10 +784,12 @@ StmtResult Parser::ParseLabeledStatement(ParsedAttributesWithRange &attrs,
if (SubStmt.isInvalid())
SubStmt = Actions.ActOnNullStmt(ColonLoc);
+ DiagnoseLabelFollowedByDecl(*this, SubStmt.get());
+
LabelDecl *LD = Actions.LookupOrCreateLabel(IdentTok.getIdentifierInfo(),
IdentTok.getLocation());
- Actions.ProcessDeclAttributeList(Actions.CurScope, LD, attrs);
- attrs.clear();
+ Actions.ProcessDeclAttributeList(Actions.CurScope, LD, Attrs);
+ Attrs.clear();
return Actions.ActOnLabelStmt(IdentTok.getLocation(), LD, ColonLoc,
SubStmt.get());
@@ -688,11 +804,12 @@ StmtResult Parser::ParseCaseStatement(ParsedStmtContext StmtCtx,
bool MissingCase, ExprResult Expr) {
assert((MissingCase || Tok.is(tok::kw_case)) && "Not a case stmt!");
- // The substatement is always a 'statement', not a 'declaration', but is
- // otherwise in the same context as the labeled-statement.
- StmtCtx &= ~ParsedStmtContext::AllowDeclarationsInC;
+ // [OpenMP 5.1] 2.1.3: A stand-alone directive may not be used in place of a
+ // substatement in a selection statement, in place of the loop body in an
+ // iteration statement, or in place of the statement that follows a label.
+ StmtCtx &= ~ParsedStmtContext::AllowStandaloneOpenMPDirectives;
- // It is very very common for code to contain many case statements recursively
+ // It is very common for code to contain many case statements recursively
// nested, as in (but usually without indentation):
// case 1:
// case 2:
@@ -802,18 +919,13 @@ StmtResult Parser::ParseCaseStatement(ParsedStmtContext StmtCtx,
// If we found a non-case statement, start by parsing it.
StmtResult SubStmt;
- if (Tok.isNot(tok::r_brace)) {
- SubStmt = ParseStatement(/*TrailingElseLoc=*/nullptr, StmtCtx);
+ if (Tok.is(tok::r_brace)) {
+ // "switch (X) { case 4: }", is valid and is treated as if label was
+ // followed by a null statement.
+ DiagnoseLabelAtEndOfCompoundStatement();
+ SubStmt = Actions.ActOnNullStmt(ColonLoc);
} else {
- // Nicely diagnose the common error "switch (X) { case 4: }", which is
- // not valid. If ColonLoc doesn't point to a valid text location, there was
- // another parsing error, so avoid producing extra diagnostics.
- if (ColonLoc.isValid()) {
- SourceLocation AfterColonLoc = PP.getLocForEndOfToken(ColonLoc);
- Diag(AfterColonLoc, diag::err_label_end_of_compound_statement)
- << FixItHint::CreateInsertion(AfterColonLoc, " ;");
- }
- SubStmt = StmtError();
+ SubStmt = ParseStatement(/*TrailingElseLoc=*/nullptr, StmtCtx);
}
// Install the body into the most deeply-nested case.
@@ -821,6 +933,7 @@ StmtResult Parser::ParseCaseStatement(ParsedStmtContext StmtCtx,
// Broken sub-stmt shouldn't prevent forming the case statement properly.
if (SubStmt.isInvalid())
SubStmt = Actions.ActOnNullStmt(SourceLocation());
+ DiagnoseLabelFollowedByDecl(*this, SubStmt.get());
Actions.ActOnCaseStmtBody(DeepestParsedCaseStmt, SubStmt.get());
}
@@ -836,9 +949,10 @@ StmtResult Parser::ParseCaseStatement(ParsedStmtContext StmtCtx,
StmtResult Parser::ParseDefaultStatement(ParsedStmtContext StmtCtx) {
assert(Tok.is(tok::kw_default) && "Not a default stmt!");
- // The substatement is always a 'statement', not a 'declaration', but is
- // otherwise in the same context as the labeled-statement.
- StmtCtx &= ~ParsedStmtContext::AllowDeclarationsInC;
+ // [OpenMP 5.1] 2.1.3: A stand-alone directive may not be used in place of a
+ // substatement in a selection statement, in place of the loop body in an
+ // iteration statement, or in place of the statement that follows a label.
+ StmtCtx &= ~ParsedStmtContext::AllowStandaloneOpenMPDirectives;
SourceLocation DefaultLoc = ConsumeToken(); // eat the 'default'.
@@ -859,21 +973,20 @@ StmtResult Parser::ParseDefaultStatement(ParsedStmtContext StmtCtx) {
StmtResult SubStmt;
- if (Tok.isNot(tok::r_brace)) {
- SubStmt = ParseStatement(/*TrailingElseLoc=*/nullptr, StmtCtx);
+ if (Tok.is(tok::r_brace)) {
+ // "switch (X) {... default: }", is valid and is treated as if label was
+ // followed by a null statement.
+ DiagnoseLabelAtEndOfCompoundStatement();
+ SubStmt = Actions.ActOnNullStmt(ColonLoc);
} else {
- // Diagnose the common error "switch (X) {... default: }", which is
- // not valid.
- SourceLocation AfterColonLoc = PP.getLocForEndOfToken(ColonLoc);
- Diag(AfterColonLoc, diag::err_label_end_of_compound_statement)
- << FixItHint::CreateInsertion(AfterColonLoc, " ;");
- SubStmt = true;
+ SubStmt = ParseStatement(/*TrailingElseLoc=*/nullptr, StmtCtx);
}
// Broken sub-stmt shouldn't prevent forming the case statement properly.
if (SubStmt.isInvalid())
SubStmt = Actions.ActOnNullStmt(ColonLoc);
+ DiagnoseLabelFollowedByDecl(*this, SubStmt.get());
return Actions.ActOnDefaultStmt(DefaultLoc, ColonLoc,
SubStmt.get(), getCurScope());
}
@@ -907,7 +1020,7 @@ StmtResult Parser::ParseCompoundStatement(bool isStmtExpr) {
///
StmtResult Parser::ParseCompoundStatement(bool isStmtExpr,
unsigned ScopeFlags) {
- assert(Tok.is(tok::l_brace) && "Not a compount stmt!");
+ assert(Tok.is(tok::l_brace) && "Not a compound stmt!");
// Enter a scope to hold everything within the compound stmt. Compound
// statements can always hold declarations.
@@ -955,11 +1068,15 @@ void Parser::ParseCompoundStatementLeadingPragmas() {
HandlePragmaFP();
break;
case tok::annot_pragma_fenv_access:
+ case tok::annot_pragma_fenv_access_ms:
HandlePragmaFEnvAccess();
break;
case tok::annot_pragma_fenv_round:
HandlePragmaFEnvRound();
break;
+ case tok::annot_pragma_cx_limited_range:
+ HandlePragmaCXLimitedRange();
+ break;
case tok::annot_pragma_float_control:
HandlePragmaFloatControl();
break;
@@ -983,6 +1100,18 @@ void Parser::ParseCompoundStatementLeadingPragmas() {
}
+void Parser::DiagnoseLabelAtEndOfCompoundStatement() {
+ if (getLangOpts().CPlusPlus) {
+ Diag(Tok, getLangOpts().CPlusPlus23
+ ? diag::warn_cxx20_compat_label_end_of_compound_statement
+ : diag::ext_cxx_label_end_of_compound_statement);
+ } else {
+ Diag(Tok, getLangOpts().C23
+ ? diag::warn_c23_compat_label_end_of_compound_statement
+ : diag::ext_c_label_end_of_compound_statement);
+ }
+}
+
/// Consume any extra semi-colons resulting in null statements,
/// returning true if any tok::semi were consumed.
bool Parser::ConsumeNullStmt(StmtVector &Stmts) {
@@ -1021,7 +1150,7 @@ StmtResult Parser::handleExprStmt(ExprResult E, ParsedStmtContext StmtCtx) {
++LookAhead;
}
// Then look to see if the next two tokens close the statement expression;
- // if so, this expression statement is the last statement in a statment
+ // if so, this expression statement is the last statement in a statement
// expression.
IsStmtExprResult = GetLookAheadToken(LookAhead).is(tok::r_brace) &&
GetLookAheadToken(LookAhead + 1).is(tok::r_paren);
@@ -1032,10 +1161,10 @@ StmtResult Parser::handleExprStmt(ExprResult E, ParsedStmtContext StmtCtx) {
return Actions.ActOnExprStmt(E, /*DiscardedValue=*/!IsStmtExprResult);
}
-/// ParseCompoundStatementBody - Parse a sequence of statements and invoke the
-/// ActOnCompoundStmt action. This expects the '{' to be the current token, and
-/// consume the '}' at the end of the block. It does not manipulate the scope
-/// stack.
+/// ParseCompoundStatementBody - Parse a sequence of statements optionally
+/// followed by a label and invoke the ActOnCompoundStmt action. This expects
+/// the '{' to be the current token, and consume the '}' at the end of the
+/// block. It does not manipulate the scope stack.
StmtResult Parser::ParseCompoundStatementBody(bool isStmtExpr) {
PrettyStackTraceLoc CrashInfo(PP.getSourceManager(),
Tok.getLocation(),
@@ -1064,7 +1193,7 @@ StmtResult Parser::ParseCompoundStatementBody(bool isStmtExpr) {
SourceLocation LabelLoc = ConsumeToken();
SmallVector<Decl *, 8> DeclsInGroup;
- while (1) {
+ while (true) {
if (Tok.isNot(tok::identifier)) {
Diag(Tok, diag::err_expected) << tok::identifier;
break;
@@ -1114,9 +1243,8 @@ StmtResult Parser::ParseCompoundStatementBody(bool isStmtExpr) {
while (Tok.is(tok::kw___extension__))
ConsumeToken();
- ParsedAttributesWithRange attrs(AttrFactory);
- MaybeParseCXX11Attributes(attrs, nullptr,
- /*MightBeObjCMessageSend*/ true);
+ ParsedAttributes attrs(AttrFactory);
+ MaybeParseCXX11Attributes(attrs, /*MightBeObjCMessageSend*/ true);
// If this is the start of a declaration, parse it as such.
if (isDeclarationStatement()) {
@@ -1125,8 +1253,9 @@ StmtResult Parser::ParseCompoundStatementBody(bool isStmtExpr) {
ExtensionRAIIObject O(Diags);
SourceLocation DeclStart = Tok.getLocation(), DeclEnd;
- DeclGroupPtrTy Res =
- ParseDeclaration(DeclaratorContext::Block, DeclEnd, attrs);
+ ParsedAttributes DeclSpecAttrs(AttrFactory);
+ DeclGroupPtrTy Res = ParseDeclaration(DeclaratorContext::Block, DeclEnd,
+ attrs, DeclSpecAttrs);
R = Actions.ActOnDeclStmt(Res, DeclStart, DeclEnd);
} else {
// Otherwise this was a unary __extension__ marker.
@@ -1149,6 +1278,16 @@ StmtResult Parser::ParseCompoundStatementBody(bool isStmtExpr) {
if (R.isUsable())
Stmts.push_back(R.get());
}
+ // Warn the user that using option `-ffp-eval-method=source` on a
+ // 32-bit target and feature `sse` disabled, or using
+ // `pragma clang fp eval_method=source` and feature `sse` disabled, is not
+ // supported.
+ if (!PP.getTargetInfo().supportSourceEvalMethod() &&
+ (PP.getLastFPEvalPragmaLocation().isValid() ||
+ PP.getCurrentFPEvalMethod() ==
+ LangOptions::FPEvalMethodKind::FEM_Source))
+ Diag(Tok.getLocation(),
+ diag::warn_no_support_for_eval_method_source_on_m32);
SourceLocation CloseLoc = Tok.getLocation();
@@ -1182,27 +1321,29 @@ StmtResult Parser::ParseCompoundStatementBody(bool isStmtExpr) {
/// should try to recover harder. It returns false if the condition is
/// successfully parsed. Note that a successful parse can still have semantic
/// errors in the condition.
-/// Additionally, if LParenLoc and RParenLoc are non-null, it will assign
-/// the location of the outer-most '(' and ')', respectively, to them.
+/// Additionally, it will assign the location of the outer-most '(' and ')',
+/// to LParenLoc and RParenLoc, respectively.
bool Parser::ParseParenExprOrCondition(StmtResult *InitStmt,
Sema::ConditionResult &Cond,
SourceLocation Loc,
Sema::ConditionKind CK,
- SourceLocation *LParenLoc,
- SourceLocation *RParenLoc) {
+ SourceLocation &LParenLoc,
+ SourceLocation &RParenLoc) {
BalancedDelimiterTracker T(*this, tok::l_paren);
T.consumeOpen();
+ SourceLocation Start = Tok.getLocation();
- if (getLangOpts().CPlusPlus)
- Cond = ParseCXXCondition(InitStmt, Loc, CK);
- else {
+ if (getLangOpts().CPlusPlus) {
+ Cond = ParseCXXCondition(InitStmt, Loc, CK, false);
+ } else {
ExprResult CondExpr = ParseExpression();
// If required, convert to a boolean value.
if (CondExpr.isInvalid())
Cond = Sema::ConditionError();
else
- Cond = Actions.ActOnCondition(getCurScope(), Loc, CondExpr.get(), CK);
+ Cond = Actions.ActOnCondition(getCurScope(), Loc, CondExpr.get(), CK,
+ /*MissingOK=*/false);
}
// If the parser was confused by the condition and we don't have a ')', try to
@@ -1216,16 +1357,20 @@ bool Parser::ParseParenExprOrCondition(StmtResult *InitStmt,
return true;
}
- // Otherwise the condition is valid or the rparen is present.
- T.consumeClose();
-
- if (LParenLoc != nullptr) {
- *LParenLoc = T.getOpenLocation();
- }
- if (RParenLoc != nullptr) {
- *RParenLoc = T.getCloseLocation();
+ if (Cond.isInvalid()) {
+ ExprResult CondExpr = Actions.CreateRecoveryExpr(
+ Start, Tok.getLocation() == Start ? Start : PrevTokLocation, {},
+ Actions.PreferredConditionType(CK));
+ if (!CondExpr.isInvalid())
+ Cond = Actions.ActOnCondition(getCurScope(), Loc, CondExpr.get(), CK,
+ /*MissingOK=*/false);
}
+ // Either the condition is valid or the rparen is present.
+ T.consumeClose();
+ LParenLoc = T.getOpenLocation();
+ RParenLoc = T.getCloseLocation();
+
// Check for extraneous ')'s to catch things like "if (foo())) {". We know
// that all callers are looking for a statement after the condition, so ")"
// isn't valid.
@@ -1338,20 +1483,36 @@ struct MisleadingIndentationChecker {
/// 'if' '(' expression ')' statement 'else' statement
/// [C++] 'if' '(' condition ')' statement
/// [C++] 'if' '(' condition ')' statement 'else' statement
+/// [C++23] 'if' '!' [opt] consteval compound-statement
+/// [C++23] 'if' '!' [opt] consteval compound-statement 'else' statement
///
StmtResult Parser::ParseIfStatement(SourceLocation *TrailingElseLoc) {
assert(Tok.is(tok::kw_if) && "Not an if stmt!");
SourceLocation IfLoc = ConsumeToken(); // eat the 'if'.
bool IsConstexpr = false;
+ bool IsConsteval = false;
+ SourceLocation NotLocation;
+ SourceLocation ConstevalLoc;
+
if (Tok.is(tok::kw_constexpr)) {
Diag(Tok, getLangOpts().CPlusPlus17 ? diag::warn_cxx14_compat_constexpr_if
: diag::ext_constexpr_if);
IsConstexpr = true;
ConsumeToken();
- }
+ } else {
+ if (Tok.is(tok::exclaim)) {
+ NotLocation = ConsumeToken();
+ }
- if (Tok.isNot(tok::l_paren)) {
+ if (Tok.is(tok::kw_consteval)) {
+ Diag(Tok, getLangOpts().CPlusPlus23 ? diag::warn_cxx20_compat_consteval_if
+ : diag::ext_consteval_if);
+ IsConsteval = true;
+ ConstevalLoc = ConsumeToken();
+ }
+ }
+ if (!IsConsteval && (NotLocation.isValid() || Tok.isNot(tok::l_paren))) {
Diag(Tok, diag::err_expected_lparen_after) << "if";
SkipUntil(tok::semi);
return StmtError();
@@ -1378,15 +1539,18 @@ StmtResult Parser::ParseIfStatement(SourceLocation *TrailingElseLoc) {
Sema::ConditionResult Cond;
SourceLocation LParen;
SourceLocation RParen;
- if (ParseParenExprOrCondition(&InitStmt, Cond, IfLoc,
- IsConstexpr ? Sema::ConditionKind::ConstexprIf
- : Sema::ConditionKind::Boolean,
- &LParen, &RParen))
- return StmtError();
+ std::optional<bool> ConstexprCondition;
+ if (!IsConsteval) {
- llvm::Optional<bool> ConstexprCondition;
- if (IsConstexpr)
- ConstexprCondition = Cond.getKnownValue();
+ if (ParseParenExprOrCondition(&InitStmt, Cond, IfLoc,
+ IsConstexpr ? Sema::ConditionKind::ConstexprIf
+ : Sema::ConditionKind::Boolean,
+ LParen, RParen))
+ return StmtError();
+
+ if (IsConstexpr)
+ ConstexprCondition = Cond.getKnownValue();
+ }
bool IsBracedThen = Tok.is(tok::l_brace);
@@ -1418,10 +1582,17 @@ StmtResult Parser::ParseIfStatement(SourceLocation *TrailingElseLoc) {
SourceLocation InnerStatementTrailingElseLoc;
StmtResult ThenStmt;
{
+ bool ShouldEnter = ConstexprCondition && !*ConstexprCondition;
+ Sema::ExpressionEvaluationContext Context =
+ Sema::ExpressionEvaluationContext::DiscardedStatement;
+ if (NotLocation.isInvalid() && IsConsteval) {
+ Context = Sema::ExpressionEvaluationContext::ImmediateFunctionContext;
+ ShouldEnter = true;
+ }
+
EnterExpressionEvaluationContext PotentiallyDiscarded(
- Actions, Sema::ExpressionEvaluationContext::DiscardedStatement, nullptr,
- Sema::ExpressionEvaluationContextRecord::EK_Other,
- /*ShouldEnter=*/ConstexprCondition && !*ConstexprCondition);
+ Actions, Context, nullptr,
+ Sema::ExpressionEvaluationContextRecord::EK_Other, ShouldEnter);
ThenStmt = ParseStatement(&InnerStatementTrailingElseLoc);
}
@@ -1456,11 +1627,17 @@ StmtResult Parser::ParseIfStatement(SourceLocation *TrailingElseLoc) {
Tok.is(tok::l_brace));
MisleadingIndentationChecker MIChecker(*this, MSK_else, ElseLoc);
+ bool ShouldEnter = ConstexprCondition && *ConstexprCondition;
+ Sema::ExpressionEvaluationContext Context =
+ Sema::ExpressionEvaluationContext::DiscardedStatement;
+ if (NotLocation.isValid() && IsConsteval) {
+ Context = Sema::ExpressionEvaluationContext::ImmediateFunctionContext;
+ ShouldEnter = true;
+ }
EnterExpressionEvaluationContext PotentiallyDiscarded(
- Actions, Sema::ExpressionEvaluationContext::DiscardedStatement, nullptr,
- Sema::ExpressionEvaluationContextRecord::EK_Other,
- /*ShouldEnter=*/ConstexprCondition && *ConstexprCondition);
+ Actions, Context, nullptr,
+ Sema::ExpressionEvaluationContextRecord::EK_Other, ShouldEnter);
ElseStmt = ParseStatement();
if (ElseStmt.isUsable())
@@ -1479,7 +1656,7 @@ StmtResult Parser::ParseIfStatement(SourceLocation *TrailingElseLoc) {
IfScope.Exit();
// If the then or else stmt is invalid and the other is valid (and present),
- // make turn the invalid one into a null stmt to avoid dropping the other
+ // turn the invalid one into a null stmt to avoid dropping the other
// part. If both are invalid, return error.
if ((ThenStmt.isInvalid() && ElseStmt.isInvalid()) ||
(ThenStmt.isInvalid() && ElseStmt.get() == nullptr) ||
@@ -1488,14 +1665,40 @@ StmtResult Parser::ParseIfStatement(SourceLocation *TrailingElseLoc) {
return StmtError();
}
+ if (IsConsteval) {
+ auto IsCompoundStatement = [](const Stmt *S) {
+ if (const auto *Outer = dyn_cast_if_present<AttributedStmt>(S))
+ S = Outer->getSubStmt();
+ return isa_and_nonnull<clang::CompoundStmt>(S);
+ };
+
+ if (!IsCompoundStatement(ThenStmt.get())) {
+ Diag(ConstevalLoc, diag::err_expected_after) << "consteval"
+ << "{";
+ return StmtError();
+ }
+ if (!ElseStmt.isUnset() && !IsCompoundStatement(ElseStmt.get())) {
+ Diag(ElseLoc, diag::err_expected_after) << "else"
+ << "{";
+ return StmtError();
+ }
+ }
+
// Now if either are invalid, replace with a ';'.
if (ThenStmt.isInvalid())
ThenStmt = Actions.ActOnNullStmt(ThenStmtLoc);
if (ElseStmt.isInvalid())
ElseStmt = Actions.ActOnNullStmt(ElseStmtLoc);
- return Actions.ActOnIfStmt(IfLoc, IsConstexpr, LParen, InitStmt.get(), Cond,
- RParen, ThenStmt.get(), ElseLoc, ElseStmt.get());
+ IfStatementKind Kind = IfStatementKind::Ordinary;
+ if (IsConstexpr)
+ Kind = IfStatementKind::Constexpr;
+ else if (IsConsteval)
+ Kind = NotLocation.isValid() ? IfStatementKind::ConstevalNegated
+ : IfStatementKind::ConstevalNonNegated;
+
+ return Actions.ActOnIfStmt(IfLoc, Kind, LParen, InitStmt.get(), Cond, RParen,
+ ThenStmt.get(), ElseLoc, ElseStmt.get());
}
/// ParseSwitchStatement
@@ -1537,7 +1740,7 @@ StmtResult Parser::ParseSwitchStatement(SourceLocation *TrailingElseLoc) {
SourceLocation LParen;
SourceLocation RParen;
if (ParseParenExprOrCondition(&InitStmt, Cond, SwitchLoc,
- Sema::ConditionKind::Switch, &LParen, &RParen))
+ Sema::ConditionKind::Switch, LParen, RParen))
return StmtError();
StmtResult Switch = Actions.ActOnStartOfSwitchStmt(
@@ -1627,7 +1830,7 @@ StmtResult Parser::ParseWhileStatement(SourceLocation *TrailingElseLoc) {
SourceLocation LParen;
SourceLocation RParen;
if (ParseParenExprOrCondition(nullptr, Cond, WhileLoc,
- Sema::ConditionKind::Boolean, &LParen, &RParen))
+ Sema::ConditionKind::Boolean, LParen, RParen))
return StmtError();
// C99 6.8.5p5 - In C99, the body of the while statement is a scope, even if
@@ -1718,10 +1921,19 @@ StmtResult Parser::ParseDoStatement() {
// A do-while expression is not a condition, so can't have attributes.
DiagnoseAndSkipCXX11Attributes();
+ SourceLocation Start = Tok.getLocation();
ExprResult Cond = ParseExpression();
// Correct the typos in condition before closing the scope.
if (Cond.isUsable())
- Cond = Actions.CorrectDelayedTyposInExpr(Cond);
+ Cond = Actions.CorrectDelayedTyposInExpr(Cond, /*InitDecl=*/nullptr,
+ /*RecoverUncorrectedTypos=*/true);
+ else {
+ if (!Tok.isOneOf(tok::r_paren, tok::r_square, tok::r_brace))
+ SkipUntil(tok::semi);
+ Cond = Actions.CreateRecoveryExpr(
+ Start, Start == Tok.getLocation() ? Start : PrevTokLocation, {},
+ Actions.getASTContext().BoolTy);
+ }
T.consumeClose();
DoScope.Exit();
@@ -1767,6 +1979,7 @@ bool Parser::isForRangeIdentifier() {
/// [C++] for-init-statement:
/// [C++] expression-statement
/// [C++] simple-declaration
+/// [C++23] alias-declaration
///
/// [C++0x] for-range-declaration:
/// [C++0x] attribute-specifier-seq[opt] type-specifier-seq declarator
@@ -1831,7 +2044,7 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
return StmtError();
}
- ParsedAttributesWithRange attrs(AttrFactory);
+ ParsedAttributes attrs(AttrFactory);
MaybeParseCXX11Attributes(attrs);
SourceLocation EmptyInitStmtSemiLoc;
@@ -1862,8 +2075,8 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
? FixItHint::CreateInsertion(Loc, "auto &&")
: FixItHint());
- ForRangeInfo.LoopVar = Actions.ActOnCXXForRangeIdentifier(
- getCurScope(), Loc, Name, attrs, attrs.Range.getEnd());
+ ForRangeInfo.LoopVar =
+ Actions.ActOnCXXForRangeIdentifier(getCurScope(), Loc, Name, attrs);
} else if (isForInitDeclaration()) { // for (int X = 4;
ParenBraceBracketBalancer BalancerRAIIObj(*this);
@@ -1872,36 +2085,43 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
Diag(Tok, diag::ext_c99_variable_decl_in_for_loop);
Diag(Tok, diag::warn_gcc_variable_decl_in_for_loop);
}
-
- // In C++0x, "for (T NS:a" might not be a typo for ::
- bool MightBeForRangeStmt = getLangOpts().CPlusPlus;
- ColonProtectionRAIIObject ColonProtection(*this, MightBeForRangeStmt);
-
+ DeclGroupPtrTy DG;
SourceLocation DeclStart = Tok.getLocation(), DeclEnd;
- DeclGroupPtrTy DG = ParseSimpleDeclaration(
- DeclaratorContext::ForInit, DeclEnd, attrs, false,
- MightBeForRangeStmt ? &ForRangeInfo : nullptr);
- FirstPart = Actions.ActOnDeclStmt(DG, DeclStart, Tok.getLocation());
- if (ForRangeInfo.ParsedForRangeDecl()) {
- Diag(ForRangeInfo.ColonLoc, getLangOpts().CPlusPlus11 ?
- diag::warn_cxx98_compat_for_range : diag::ext_for_range);
- ForRangeInfo.LoopVar = FirstPart;
- FirstPart = StmtResult();
- } else if (Tok.is(tok::semi)) { // for (int x = 4;
- ConsumeToken();
- } else if ((ForEach = isTokIdentifier_in())) {
- Actions.ActOnForEachDeclStmt(DG);
- // ObjC: for (id x in expr)
- ConsumeToken(); // consume 'in'
-
- if (Tok.is(tok::code_completion)) {
- cutOffParsing();
- Actions.CodeCompleteObjCForCollection(getCurScope(), DG);
- return StmtError();
- }
- Collection = ParseExpression();
+ if (Tok.is(tok::kw_using)) {
+ DG = ParseAliasDeclarationInInitStatement(DeclaratorContext::ForInit,
+ attrs);
+ FirstPart = Actions.ActOnDeclStmt(DG, DeclStart, Tok.getLocation());
} else {
- Diag(Tok, diag::err_expected_semi_for);
+ // In C++0x, "for (T NS:a" might not be a typo for ::
+ bool MightBeForRangeStmt = getLangOpts().CPlusPlus;
+ ColonProtectionRAIIObject ColonProtection(*this, MightBeForRangeStmt);
+ ParsedAttributes DeclSpecAttrs(AttrFactory);
+ DG = ParseSimpleDeclaration(
+ DeclaratorContext::ForInit, DeclEnd, attrs, DeclSpecAttrs, false,
+ MightBeForRangeStmt ? &ForRangeInfo : nullptr);
+ FirstPart = Actions.ActOnDeclStmt(DG, DeclStart, Tok.getLocation());
+ if (ForRangeInfo.ParsedForRangeDecl()) {
+ Diag(ForRangeInfo.ColonLoc, getLangOpts().CPlusPlus11
+ ? diag::warn_cxx98_compat_for_range
+ : diag::ext_for_range);
+ ForRangeInfo.LoopVar = FirstPart;
+ FirstPart = StmtResult();
+ } else if (Tok.is(tok::semi)) { // for (int x = 4;
+ ConsumeToken();
+ } else if ((ForEach = isTokIdentifier_in())) {
+ Actions.ActOnForEachDeclStmt(DG);
+ // ObjC: for (id x in expr)
+ ConsumeToken(); // consume 'in'
+
+ if (Tok.is(tok::code_completion)) {
+ cutOffParsing();
+ Actions.CodeCompleteObjCForCollection(getCurScope(), DG);
+ return StmtError();
+ }
+ Collection = ParseExpression();
+ } else {
+ Diag(Tok, diag::err_expected_semi_for);
+ }
}
} else {
ProhibitAttributes(attrs);
@@ -1969,10 +2189,13 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
// for-range-declaration next.
bool MightBeForRangeStmt = !ForRangeInfo.ParsedForRangeDecl();
ColonProtectionRAIIObject ColonProtection(*this, MightBeForRangeStmt);
- SecondPart =
- ParseCXXCondition(nullptr, ForLoc, Sema::ConditionKind::Boolean,
- MightBeForRangeStmt ? &ForRangeInfo : nullptr,
- /*EnterForConditionScope*/ true);
+ SourceLocation SecondPartStart = Tok.getLocation();
+ Sema::ConditionKind CK = Sema::ConditionKind::Boolean;
+ SecondPart = ParseCXXCondition(
+ /*InitStmt=*/nullptr, ForLoc, CK,
+ // FIXME: recovery if we don't see another semi!
+ /*MissingOK=*/true, MightBeForRangeStmt ? &ForRangeInfo : nullptr,
+ /*EnterForConditionScope=*/true);
if (ForRangeInfo.ParsedForRangeDecl()) {
Diag(FirstPart.get() ? FirstPart.get()->getBeginLoc()
@@ -1988,6 +2211,19 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
<< FixItHint::CreateRemoval(EmptyInitStmtSemiLoc);
}
}
+
+ if (SecondPart.isInvalid()) {
+ ExprResult CondExpr = Actions.CreateRecoveryExpr(
+ SecondPartStart,
+ Tok.getLocation() == SecondPartStart ? SecondPartStart
+ : PrevTokLocation,
+ {}, Actions.PreferredConditionType(CK));
+ if (!CondExpr.isInvalid())
+ SecondPart = Actions.ActOnCondition(getCurScope(), ForLoc,
+ CondExpr.get(), CK,
+ /*MissingOK=*/false);
+ }
+
} else {
// We permit 'continue' and 'break' in the condition of a for loop.
getCurScope()->AddFlags(Scope::BreakScope | Scope::ContinueScope);
@@ -1996,16 +2232,16 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
if (SecondExpr.isInvalid())
SecondPart = Sema::ConditionError();
else
- SecondPart =
- Actions.ActOnCondition(getCurScope(), ForLoc, SecondExpr.get(),
- Sema::ConditionKind::Boolean);
+ SecondPart = Actions.ActOnCondition(
+ getCurScope(), ForLoc, SecondExpr.get(),
+ Sema::ConditionKind::Boolean, /*MissingOK=*/true);
}
}
}
// Enter a break / continue scope, if we didn't already enter one while
// parsing the second part.
- if (!(getCurScope()->getFlags() & Scope::ContinueScope))
+ if (!getCurScope()->isContinueScope())
getCurScope()->AddFlags(Scope::BreakScope | Scope::ContinueScope);
// Parse the third part of the for statement.
@@ -2013,9 +2249,7 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
if (Tok.isNot(tok::semi)) {
if (!SecondPart.isInvalid())
Diag(Tok, diag::err_expected_semi_for);
- else
- // Skip until semicolon or rparen, don't consume it.
- SkipUntil(tok::r_paren, StopAtSemi | StopBeforeMatch);
+ SkipUntil(tok::r_paren, StopAtSemi | StopBeforeMatch);
}
if (Tok.is(tok::semi)) {
@@ -2039,6 +2273,9 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
CoawaitLoc = SourceLocation();
}
+ if (CoawaitLoc.isValid() && getLangOpts().CPlusPlus20)
+ Diag(CoawaitLoc, diag::warn_deprecated_for_co_await);
+
// We need to perform most of the semantic analysis for a C++0x for-range
// statememt before parsing the body, in order to be able to deduce the type
// of an auto-typed loop variable.
@@ -2222,9 +2459,9 @@ StmtResult Parser::ParseReturnStatement() {
StmtResult Parser::ParsePragmaLoopHint(StmtVector &Stmts,
ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc,
- ParsedAttributesWithRange &Attrs) {
+ ParsedAttributes &Attrs) {
// Create temporary attribute list.
- ParsedAttributesWithRange TempAttrs(AttrFactory);
+ ParsedAttributes TempAttrs(AttrFactory);
SourceLocation StartLoc = Tok.getLocation();
@@ -2238,14 +2475,15 @@ StmtResult Parser::ParsePragmaLoopHint(StmtVector &Stmts,
ArgsUnion(Hint.ValueExpr)};
TempAttrs.addNew(Hint.PragmaNameLoc->Ident, Hint.Range, nullptr,
Hint.PragmaNameLoc->Loc, ArgHints, 4,
- ParsedAttr::AS_Pragma);
+ ParsedAttr::Form::Pragma());
}
// Get the next statement.
MaybeParseCXX11Attributes(Attrs);
+ ParsedAttributes EmptyDeclSpecAttrs(AttrFactory);
StmtResult S = ParseStatementOrDeclarationAfterAttributes(
- Stmts, StmtCtx, TrailingElseLoc, Attrs);
+ Stmts, StmtCtx, TrailingElseLoc, Attrs, EmptyDeclSpecAttrs);
Attrs.takeAllFrom(TempAttrs);
@@ -2278,7 +2516,8 @@ Decl *Parser::ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope) {
// If the function body could not be parsed, make a bogus compoundstmt.
if (FnBody.isInvalid()) {
Sema::CompoundScopeRAII CompoundScope(Actions);
- FnBody = Actions.ActOnCompoundStmt(LBraceLoc, LBraceLoc, None, false);
+ FnBody =
+ Actions.ActOnCompoundStmt(LBraceLoc, LBraceLoc, std::nullopt, false);
}
BodyScope.Exit();
@@ -2315,7 +2554,8 @@ Decl *Parser::ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope) {
// compound statement as the body.
if (FnBody.isInvalid()) {
Sema::CompoundScopeRAII CompoundScope(Actions);
- FnBody = Actions.ActOnCompoundStmt(LBraceLoc, LBraceLoc, None, false);
+ FnBody =
+ Actions.ActOnCompoundStmt(LBraceLoc, LBraceLoc, std::nullopt, false);
}
BodyScope.Exit();
@@ -2477,16 +2717,15 @@ StmtResult Parser::ParseCXXCatchBlock(bool FnCatch) {
// without default arguments.
Decl *ExceptionDecl = nullptr;
if (Tok.isNot(tok::ellipsis)) {
- ParsedAttributesWithRange Attributes(AttrFactory);
+ ParsedAttributes Attributes(AttrFactory);
MaybeParseCXX11Attributes(Attributes);
DeclSpec DS(AttrFactory);
- DS.takeAttributesFrom(Attributes);
if (ParseCXXTypeSpecifierSeq(DS))
return StmtError();
- Declarator ExDecl(DS, DeclaratorContext::CXXCatch);
+ Declarator ExDecl(DS, Attributes, DeclaratorContext::CXXCatch);
ParseDeclarator(ExDecl);
ExceptionDecl = Actions.ActOnExceptionDeclarator(getCurScope(), ExDecl);
} else
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseStmtAsm.cpp b/contrib/llvm-project/clang/lib/Parse/ParseStmtAsm.cpp
index e520151dcad7..04c3a8700c10 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseStmtAsm.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseStmtAsm.cpp
@@ -10,10 +10,10 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Parse/Parser.h"
#include "clang/AST/ASTContext.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/TargetInfo.h"
+#include "clang/Parse/Parser.h"
#include "clang/Parse/RAIIObjectsForParser.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
@@ -28,8 +28,8 @@
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MC/MCTargetOptions.h"
+#include "llvm/MC/TargetRegistry.h"
#include "llvm/Support/SourceMgr.h"
-#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/TargetSelect.h"
using namespace clang;
@@ -222,7 +222,7 @@ ExprResult Parser::ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks,
CXXScopeSpec SS;
if (getLangOpts().CPlusPlus)
ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
- /*ObjectHadErrors=*/false,
+ /*ObjectHasErrors=*/false,
/*EnteringContext=*/false);
// Require an identifier here.
@@ -508,7 +508,7 @@ StmtResult Parser::ParseMicrosoftAsmStatement(SourceLocation AsmLoc) {
TokLoc = Tok.getLocation();
++NumTokensRead;
SkippedStartOfLine = false;
- } while (1);
+ } while (true);
if (BraceNesting && BraceCount != savedBraceCount) {
// __asm without closing brace (this can happen at EOF).
@@ -681,7 +681,7 @@ StmtResult Parser::ParseMicrosoftAsmStatement(SourceLocation AsmLoc) {
/// asm-qualifier
/// asm-qualifier-list asm-qualifier
bool Parser::parseGNUAsmQualifierListOpt(GNUAsmQualifiers &AQ) {
- while (1) {
+ while (true) {
const GNUAsmQualifiers::AQ A = getGNUAsmQualifier(Tok);
if (A == GNUAsmQualifiers::AQ_unspecified) {
if (Tok.isNot(tok::l_paren)) {
@@ -810,7 +810,7 @@ StmtResult Parser::ParseAsmStatement(bool &msAsm) {
}
// Parse the asm-string list for clobbers if present.
if (!AteExtraColon && isTokenStringLiteral()) {
- while (1) {
+ while (true) {
ExprResult Clobber(ParseAsmStringLiteral(/*ForAsmLabel*/ false));
if (Clobber.isInvalid())
@@ -888,7 +888,7 @@ bool Parser::ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names,
if (!isTokenStringLiteral() && Tok.isNot(tok::l_square))
return false;
- while (1) {
+ while (true) {
// Read the [id] if present.
if (Tok.is(tok::l_square)) {
BalancedDelimiterTracker T(*this, tok::l_square);
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseTemplate.cpp b/contrib/llvm-project/clang/lib/Parse/ParseTemplate.cpp
index 828b9b2277ff..64fe4d50bba2 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseTemplate.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseTemplate.cpp
@@ -17,8 +17,10 @@
#include "clang/Parse/Parser.h"
#include "clang/Parse/RAIIObjectsForParser.h"
#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/Scope.h"
+#include "clang/Sema/SemaDiagnostic.h"
#include "llvm/Support/TimeProfiler.h"
using namespace clang;
@@ -199,13 +201,24 @@ Decl *Parser::ParseSingleDeclarationAfterTemplate(
if (Context == DeclaratorContext::Member) {
// We are parsing a member template.
- ParseCXXClassMemberDeclaration(AS, AccessAttrs, TemplateInfo,
- &DiagsFromTParams);
- return nullptr;
+ DeclGroupPtrTy D = ParseCXXClassMemberDeclaration(
+ AS, AccessAttrs, TemplateInfo, &DiagsFromTParams);
+
+ if (!D || !D.get().isSingleDecl())
+ return nullptr;
+ return D.get().getSingleDecl();
}
- ParsedAttributesWithRange prefixAttrs(AttrFactory);
- MaybeParseCXX11Attributes(prefixAttrs);
+ ParsedAttributes prefixAttrs(AttrFactory);
+ ParsedAttributes DeclSpecAttrs(AttrFactory);
+
+ // GNU attributes are applied to the declaration specification while the
+ // standard attributes are applied to the declaration. We parse the two
+ // attribute sets into different containters so we can apply them during
+ // the regular parsing process.
+ while (MaybeParseCXX11Attributes(prefixAttrs) ||
+ MaybeParseGNUAttributes(DeclSpecAttrs))
+ ;
if (Tok.is(tok::kw_using)) {
auto usingDeclPtr = ParseUsingDirectiveOrDeclaration(Context, TemplateInfo, DeclEnd,
@@ -218,6 +231,9 @@ Decl *Parser::ParseSingleDeclarationAfterTemplate(
// Parse the declaration specifiers, stealing any diagnostics from
// the template parameters.
ParsingDeclSpec DS(*this, &DiagsFromTParams);
+ DS.SetRangeStart(DeclSpecAttrs.Range.getBegin());
+ DS.SetRangeEnd(DeclSpecAttrs.Range.getEnd());
+ DS.takeAttributesFrom(DeclSpecAttrs);
ParseDeclarationSpecifiers(DS, TemplateInfo, AS,
getDeclSpecContextFromDeclaratorContext(Context));
@@ -227,47 +243,67 @@ Decl *Parser::ParseSingleDeclarationAfterTemplate(
DeclEnd = ConsumeToken();
RecordDecl *AnonRecord = nullptr;
Decl *Decl = Actions.ParsedFreeStandingDeclSpec(
- getCurScope(), AS, DS,
+ getCurScope(), AS, DS, ParsedAttributesView::none(),
TemplateInfo.TemplateParams ? *TemplateInfo.TemplateParams
: MultiTemplateParamsArg(),
TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation,
AnonRecord);
+ Actions.ActOnDefinedDeclarationSpecifier(Decl);
assert(!AnonRecord &&
"Anonymous unions/structs should not be valid with template");
DS.complete(Decl);
return Decl;
}
+ if (DS.hasTagDefinition())
+ Actions.ActOnDefinedDeclarationSpecifier(DS.getRepAsDecl());
+
// Move the attributes from the prefix into the DS.
if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation)
ProhibitAttributes(prefixAttrs);
- else
- DS.takeAttributesFrom(prefixAttrs);
// Parse the declarator.
- ParsingDeclarator DeclaratorInfo(*this, DS, (DeclaratorContext)Context);
+ ParsingDeclarator DeclaratorInfo(*this, DS, prefixAttrs,
+ (DeclaratorContext)Context);
if (TemplateInfo.TemplateParams)
DeclaratorInfo.setTemplateParameterLists(*TemplateInfo.TemplateParams);
+
+ // Turn off usual access checking for template specializations and
+ // instantiations.
+ // C++20 [temp.spec] 13.9/6.
+ // This disables the access checking rules for function template explicit
+ // instantiation and explicit specialization:
+ // - parameter-list;
+ // - template-argument-list;
+ // - noexcept-specifier;
+ // - dynamic-exception-specifications (deprecated in C++11, removed since
+ // C++17).
+ bool IsTemplateSpecOrInst =
+ (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation ||
+ TemplateInfo.Kind == ParsedTemplateInfo::ExplicitSpecialization);
+ SuppressAccessChecks SAC(*this, IsTemplateSpecOrInst);
+
ParseDeclarator(DeclaratorInfo);
+
+ if (IsTemplateSpecOrInst)
+ SAC.done();
+
// Error parsing the declarator?
if (!DeclaratorInfo.hasName()) {
- // If so, skip until the semi-colon or a }.
- SkipUntil(tok::r_brace, StopAtSemi | StopBeforeMatch);
- if (Tok.is(tok::semi))
- ConsumeToken();
+ SkipMalformedDecl();
return nullptr;
}
- llvm::TimeTraceScope TimeScope("ParseTemplate", [&]() {
- return std::string(DeclaratorInfo.getIdentifier() != nullptr
- ? DeclaratorInfo.getIdentifier()->getName()
- : "<unknown>");
- });
-
LateParsedAttrList LateParsedAttrs(true);
if (DeclaratorInfo.isFunctionDeclarator()) {
- if (Tok.is(tok::kw_requires))
+ if (Tok.is(tok::kw_requires)) {
+ CXXScopeSpec &ScopeSpec = DeclaratorInfo.getCXXScopeSpec();
+ DeclaratorScopeObj DeclScopeObj(*this, ScopeSpec);
+ if (ScopeSpec.isValid() &&
+ Actions.ShouldEnterDeclaratorScope(getCurScope(), ScopeSpec))
+ DeclScopeObj.EnterDeclaratorScope();
ParseTrailingRequiresClause(DeclaratorInfo);
+ }
MaybeParseGNUAttributes(DeclaratorInfo, &LateParsedAttrs);
}
@@ -312,8 +348,8 @@ Decl *Parser::ParseSingleDeclarationAfterTemplate(
// Recover as if it were an explicit specialization.
TemplateParameterLists FakedParamLists;
FakedParamLists.push_back(Actions.ActOnTemplateParameterList(
- 0, SourceLocation(), TemplateInfo.TemplateLoc, LAngleLoc, None,
- LAngleLoc, nullptr));
+ 0, SourceLocation(), TemplateInfo.TemplateLoc, LAngleLoc,
+ std::nullopt, LAngleLoc, nullptr));
return ParseFunctionDefinition(
DeclaratorInfo, ParsedTemplateInfo(&FakedParamLists,
@@ -363,7 +399,7 @@ Parser::ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo,
SourceLocation BoolKWLoc;
if (TryConsumeToken(tok::kw_bool, BoolKWLoc))
- Diag(Tok.getLocation(), diag::ext_concept_legacy_bool_keyword) <<
+ Diag(Tok.getLocation(), diag::err_concept_legacy_bool_keyword) <<
FixItHint::CreateRemoval(SourceLocation(BoolKWLoc));
DiagnoseAndSkipCXX11Attributes();
@@ -371,7 +407,7 @@ Parser::ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo,
CXXScopeSpec SS;
if (ParseOptionalCXXScopeSpecifier(
SS, /*ObjectType=*/nullptr,
- /*ObjectHadErrors=*/false, /*EnteringContext=*/false,
+ /*ObjectHasErrors=*/false, /*EnteringContext=*/false,
/*MayBePseudoDestructor=*/nullptr,
/*IsTypename=*/false, /*LastII=*/nullptr, /*OnlyNamespace=*/true) ||
SS.isInvalid()) {
@@ -480,7 +516,7 @@ bool Parser::ParseTemplateParameters(
bool
Parser::ParseTemplateParameterList(const unsigned Depth,
SmallVectorImpl<NamedDecl*> &TemplateParams) {
- while (1) {
+ while (true) {
if (NamedDecl *TmpParam
= ParseTemplateParameter(Depth, TemplateParams.size())) {
@@ -645,7 +681,8 @@ NamedDecl *Parser::ParseTemplateParameter(unsigned Depth, unsigned Position) {
// probably meant to write the type of a NTTP.
DeclSpec DS(getAttrFactory());
DS.SetTypeSpecError();
- Declarator D(DS, DeclaratorContext::TemplateParam);
+ Declarator D(DS, ParsedAttributesView::none(),
+ DeclaratorContext::TemplateParam);
D.SetIdentifier(nullptr, Tok.getLocation());
D.setInvalidType(true);
NamedDecl *ErrorParam = Actions.ActOnNonTypeTemplateParameter(
@@ -695,7 +732,7 @@ bool Parser::TryAnnotateTypeConstraint() {
CXXScopeSpec SS;
bool WasScopeAnnotation = Tok.is(tok::annot_cxxscope);
if (ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
- /*ObjectHadErrors=*/false,
+ /*ObjectHasErrors=*/false,
/*EnteringContext=*/false,
/*MayBePseudoDestructor=*/nullptr,
// If this is not a type-constraint, then
@@ -767,7 +804,7 @@ NamedDecl *Parser::ParseTypeParameter(unsigned Depth, unsigned Position) {
bool TypenameKeyword = false;
SourceLocation KeyLoc;
ParseOptionalCXXScopeSpecifier(TypeConstraintSS, /*ObjectType=*/nullptr,
- /*ObjectHadErrors=*/false,
+ /*ObjectHasErrors=*/false,
/*EnteringContext*/ false);
if (Tok.is(tok::annot_template_id)) {
// Consume the 'type-constraint'.
@@ -819,10 +856,17 @@ NamedDecl *Parser::ParseTypeParameter(unsigned Depth, unsigned Position) {
// we introduce the type parameter into the local scope.
SourceLocation EqualLoc;
ParsedType DefaultArg;
- if (TryConsumeToken(tok::equal, EqualLoc))
+ if (TryConsumeToken(tok::equal, EqualLoc)) {
+ // The default argument may declare template parameters, notably
+ // if it contains a generic lambda, so we need to increase
+ // the template depth as these parameters would not be instantiated
+ // at the current level.
+ TemplateParameterDepthRAII CurTemplateDepthTracker(TemplateParameterDepth);
+ ++CurTemplateDepthTracker;
DefaultArg =
ParseTypeName(/*Range=*/nullptr, DeclaratorContext::TemplateTypeArg)
.get();
+ }
NamedDecl *NewDecl = Actions.ActOnTypeParameter(getCurScope(),
TypenameKeyword, EllipsisLoc,
@@ -844,27 +888,39 @@ NamedDecl *Parser::ParseTypeParameter(unsigned Depth, unsigned Position) {
/// template parameters.
///
/// type-parameter: [C++ temp.param]
-/// 'template' '<' template-parameter-list '>' type-parameter-key
-/// ...[opt] identifier[opt]
-/// 'template' '<' template-parameter-list '>' type-parameter-key
-/// identifier[opt] = id-expression
+/// template-head type-parameter-key ...[opt] identifier[opt]
+/// template-head type-parameter-key identifier[opt] = id-expression
/// type-parameter-key:
/// 'class'
/// 'typename' [C++1z]
-NamedDecl *
-Parser::ParseTemplateTemplateParameter(unsigned Depth, unsigned Position) {
+/// template-head: [C++2a]
+/// 'template' '<' template-parameter-list '>'
+/// requires-clause[opt]
+NamedDecl *Parser::ParseTemplateTemplateParameter(unsigned Depth,
+ unsigned Position) {
assert(Tok.is(tok::kw_template) && "Expected 'template' keyword");
// Handle the template <...> part.
SourceLocation TemplateLoc = ConsumeToken();
SmallVector<NamedDecl*,8> TemplateParams;
SourceLocation LAngleLoc, RAngleLoc;
+ ExprResult OptionalRequiresClauseConstraintER;
{
MultiParseScope TemplateParmScope(*this);
if (ParseTemplateParameters(TemplateParmScope, Depth + 1, TemplateParams,
LAngleLoc, RAngleLoc)) {
return nullptr;
}
+ if (TryConsumeToken(tok::kw_requires)) {
+ OptionalRequiresClauseConstraintER =
+ Actions.ActOnRequiresClause(ParseConstraintLogicalOrExpression(
+ /*IsTrailingRequiresClause=*/false));
+ if (!OptionalRequiresClauseConstraintER.isUsable()) {
+ SkipUntil(tok::comma, tok::greater, tok::greatergreater,
+ StopAtSemi | StopBeforeMatch);
+ return nullptr;
+ }
+ }
}
// Provide an ExtWarn if the C++1z feature of using 'typename' here is used.
@@ -886,10 +942,13 @@ Parser::ParseTemplateTemplateParameter(unsigned Depth, unsigned Position) {
} else if (Next.isOneOf(tok::identifier, tok::comma, tok::greater,
tok::greatergreater, tok::ellipsis)) {
Diag(Tok.getLocation(), diag::err_class_on_template_template_param)
- << (Replace ? FixItHint::CreateReplacement(Tok.getLocation(), "class")
- : FixItHint::CreateInsertion(Tok.getLocation(), "class "));
+ << getLangOpts().CPlusPlus17
+ << (Replace
+ ? FixItHint::CreateReplacement(Tok.getLocation(), "class")
+ : FixItHint::CreateInsertion(Tok.getLocation(), "class "));
} else
- Diag(Tok.getLocation(), diag::err_class_on_template_template_param);
+ Diag(Tok.getLocation(), diag::err_class_on_template_template_param)
+ << getLangOpts().CPlusPlus17;
if (Replace)
ConsumeToken();
@@ -923,11 +982,9 @@ Parser::ParseTemplateTemplateParameter(unsigned Depth, unsigned Position) {
if (TryConsumeToken(tok::ellipsis, EllipsisLoc))
DiagnoseMisplacedEllipsis(EllipsisLoc, NameLoc, AlreadyHasEllipsis, true);
- TemplateParameterList *ParamList =
- Actions.ActOnTemplateParameterList(Depth, SourceLocation(),
- TemplateLoc, LAngleLoc,
- TemplateParams,
- RAngleLoc, nullptr);
+ TemplateParameterList *ParamList = Actions.ActOnTemplateParameterList(
+ Depth, SourceLocation(), TemplateLoc, LAngleLoc, TemplateParams,
+ RAngleLoc, OptionalRequiresClauseConstraintER.get());
// Grab a default argument (if available).
// Per C++0x [basic.scope.pdecl]p9, we parse the default argument before
@@ -966,7 +1023,8 @@ Parser::ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position) {
DeclSpecContext::DSC_template_param);
// Parse this as a typename.
- Declarator ParamDecl(DS, DeclaratorContext::TemplateParam);
+ Declarator ParamDecl(DS, ParsedAttributesView::none(),
+ DeclaratorContext::TemplateParam);
ParseDeclarator(ParamDecl);
if (DS.getTypeSpecType() == DeclSpec::TST_unspecified) {
Diag(Tok.getLocation(), diag::err_expected_template_parameter);
@@ -984,18 +1042,30 @@ Parser::ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position) {
SourceLocation EqualLoc;
ExprResult DefaultArg;
if (TryConsumeToken(tok::equal, EqualLoc)) {
- // C++ [temp.param]p15:
- // When parsing a default template-argument for a non-type
- // template-parameter, the first non-nested > is taken as the
- // end of the template-parameter-list rather than a greater-than
- // operator.
- GreaterThanIsOperatorScope G(GreaterThanIsOperator, false);
- EnterExpressionEvaluationContext ConstantEvaluated(
- Actions, Sema::ExpressionEvaluationContext::ConstantEvaluated);
-
- DefaultArg = Actions.CorrectDelayedTyposInExpr(ParseAssignmentExpression());
- if (DefaultArg.isInvalid())
+ if (Tok.is(tok::l_paren) && NextToken().is(tok::l_brace)) {
+ Diag(Tok.getLocation(), diag::err_stmt_expr_in_default_arg) << 1;
SkipUntil(tok::comma, tok::greater, StopAtSemi | StopBeforeMatch);
+ } else {
+ // C++ [temp.param]p15:
+ // When parsing a default template-argument for a non-type
+ // template-parameter, the first non-nested > is taken as the
+ // end of the template-parameter-list rather than a greater-than
+ // operator.
+ GreaterThanIsOperatorScope G(GreaterThanIsOperator, false);
+
+ // The default argument may declare template parameters, notably
+ // if it contains a generic lambda, so we need to increase
+ // the template depth as these parameters would not be instantiated
+ // at the current level.
+ TemplateParameterDepthRAII CurTemplateDepthTracker(
+ TemplateParameterDepth);
+ ++CurTemplateDepthTracker;
+ EnterExpressionEvaluationContext ConstantEvaluated(
+ Actions, Sema::ExpressionEvaluationContext::ConstantEvaluated);
+ DefaultArg = Actions.CorrectDelayedTyposInExpr(ParseInitializer());
+ if (DefaultArg.isInvalid())
+ SkipUntil(tok::comma, tok::greater, StopAtSemi | StopBeforeMatch);
+ }
}
// Create the parameter.
@@ -1199,7 +1269,6 @@ bool Parser::ParseGreaterThanInTemplateList(SourceLocation LAngleLoc,
return false;
}
-
/// Parses a template-id that after the template name has
/// already been parsed.
///
@@ -1211,11 +1280,11 @@ bool Parser::ParseGreaterThanInTemplateList(SourceLocation LAngleLoc,
/// token that forms the template-id. Otherwise, we will leave the
/// last token in the stream (e.g., so that it can be replaced with an
/// annotation token).
-bool
-Parser::ParseTemplateIdAfterTemplateName(bool ConsumeLastToken,
- SourceLocation &LAngleLoc,
- TemplateArgList &TemplateArgs,
- SourceLocation &RAngleLoc) {
+bool Parser::ParseTemplateIdAfterTemplateName(bool ConsumeLastToken,
+ SourceLocation &LAngleLoc,
+ TemplateArgList &TemplateArgs,
+ SourceLocation &RAngleLoc,
+ TemplateTy Template) {
assert(Tok.is(tok::less) && "Must have already parsed the template-name");
// Consume the '<'.
@@ -1228,7 +1297,7 @@ Parser::ParseTemplateIdAfterTemplateName(bool ConsumeLastToken,
if (!Tok.isOneOf(tok::greater, tok::greatergreater,
tok::greatergreatergreater, tok::greaterequal,
tok::greatergreaterequal))
- Invalid = ParseTemplateArgumentList(TemplateArgs);
+ Invalid = ParseTemplateArgumentList(TemplateArgs, Template, LAngleLoc);
if (Invalid) {
// Try to find the closing '>'.
@@ -1309,8 +1378,8 @@ bool Parser::AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
TemplateArgList TemplateArgs;
bool ArgsInvalid = false;
if (!TypeConstraint || Tok.is(tok::less)) {
- ArgsInvalid = ParseTemplateIdAfterTemplateName(false, LAngleLoc,
- TemplateArgs, RAngleLoc);
+ ArgsInvalid = ParseTemplateIdAfterTemplateName(
+ false, LAngleLoc, TemplateArgs, RAngleLoc, Template);
// If we couldn't recover from invalid arguments, don't form an annotation
// token -- we don't know how much to annotate.
// FIXME: This can lead to duplicate diagnostics if we retry parsing this
@@ -1382,12 +1451,15 @@ bool Parser::AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
///
/// \param SS The scope specifier appearing before the template-id, if any.
///
+/// \param AllowImplicitTypename whether this is a context where T::type
+/// denotes a dependent type.
/// \param IsClassName Is this template-id appearing in a context where we
/// know it names a class, such as in an elaborated-type-specifier or
/// base-specifier? ('typename' and 'template' are unneeded and disallowed
/// in those contexts.)
-void Parser::AnnotateTemplateIdTokenAsType(CXXScopeSpec &SS,
- bool IsClassName) {
+void Parser::AnnotateTemplateIdTokenAsType(
+ CXXScopeSpec &SS, ImplicitTypenameContext AllowImplicitTypename,
+ bool IsClassName) {
assert(Tok.is(tok::annot_template_id) && "Requires template-id tokens");
TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
@@ -1405,7 +1477,7 @@ void Parser::AnnotateTemplateIdTokenAsType(CXXScopeSpec &SS,
TemplateId->Template, TemplateId->Name,
TemplateId->TemplateNameLoc, TemplateId->LAngleLoc,
TemplateArgsPtr, TemplateId->RAngleLoc,
- /*IsCtorOrDtorName*/ false, IsClassName);
+ /*IsCtorOrDtorName=*/false, IsClassName, AllowImplicitTypename);
// Create the new "type" annotation token.
Tok.setKind(tok::annot_typename);
setTypeAnnotation(Tok, Type);
@@ -1444,7 +1516,7 @@ ParsedTemplateArgument Parser::ParseTemplateTemplateArgument() {
// '>', or (in some cases) '>>'.
CXXScopeSpec SS; // nested-name-specifier, if present
ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
- /*ObjectHadErrors=*/false,
+ /*ObjectHasErrors=*/false,
/*EnteringContext=*/false);
ParsedTemplateArgument Result;
@@ -1509,6 +1581,8 @@ ParsedTemplateArgument Parser::ParseTemplateTemplateArgument() {
/// constant-expression
/// type-id
/// id-expression
+/// braced-init-list [C++26, DR]
+///
ParsedTemplateArgument Parser::ParseTemplateArgument() {
// C++ [temp.arg]p2:
// In a template-argument, an ambiguity between a type-id and an
@@ -1546,8 +1620,12 @@ ParsedTemplateArgument Parser::ParseTemplateArgument() {
}
// Parse a non-type template argument.
+ ExprResult ExprArg;
SourceLocation Loc = Tok.getLocation();
- ExprResult ExprArg = ParseConstantExpressionInExprEvalContext(MaybeTypeCast);
+ if (getLangOpts().CPlusPlus11 && Tok.is(tok::l_brace))
+ ExprArg = ParseBraceInitializer();
+ else
+ ExprArg = ParseConstantExpressionInExprEvalContext(MaybeTypeCast);
if (ExprArg.isInvalid() || !ExprArg.get()) {
return ParsedTemplateArgument();
}
@@ -1562,19 +1640,34 @@ ParsedTemplateArgument Parser::ParseTemplateArgument() {
/// template-argument-list: [C++ 14.2]
/// template-argument
/// template-argument-list ',' template-argument
-bool
-Parser::ParseTemplateArgumentList(TemplateArgList &TemplateArgs) {
+///
+/// \param Template is only used for code completion, and may be null.
+bool Parser::ParseTemplateArgumentList(TemplateArgList &TemplateArgs,
+ TemplateTy Template,
+ SourceLocation OpenLoc) {
ColonProtectionRAIIObject ColonProtection(*this, false);
+ auto RunSignatureHelp = [&] {
+ if (!Template)
+ return QualType();
+ CalledSignatureHelp = true;
+ return Actions.ProduceTemplateArgumentSignatureHelp(Template, TemplateArgs,
+ OpenLoc);
+ };
+
do {
+ PreferredType.enterFunctionArgument(Tok.getLocation(), RunSignatureHelp);
ParsedTemplateArgument Arg = ParseTemplateArgument();
SourceLocation EllipsisLoc;
if (TryConsumeToken(tok::ellipsis, EllipsisLoc))
Arg = Actions.ActOnPackExpansion(Arg, EllipsisLoc);
- if (Arg.isInvalid())
+ if (Arg.isInvalid()) {
+ if (PP.isCodeCompletionReached() && !CalledSignatureHelp)
+ RunSignatureHelp();
return true;
+ }
// Save this template argument.
TemplateArgs.push_back(Arg);
@@ -1658,6 +1751,11 @@ void Parser::ParseLateTemplatedFuncDef(LateParsedTemplate &LPT) {
Actions.PushDeclContext(Actions.getCurScope(), DC);
}
+ // Parsing should occur with empty FP pragma stack and FP options used in the
+ // point of the template definition.
+ Sema::FpPragmaStackSaveRAII SavedStack(Actions);
+ Actions.resetFPOptions(LPT.FPO);
+
assert(!LPT.Toks.empty() && "Empty body!");
// Append the current token at the end of the new token stream so that it
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseTentative.cpp b/contrib/llvm-project/clang/lib/Parse/ParseTentative.cpp
index c0bfbbde40ac..5bfabf55f50c 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseTentative.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseTentative.cpp
@@ -46,7 +46,10 @@ using namespace clang;
/// 'using' 'namespace' '::'[opt] nested-name-specifier[opt]
/// namespace-name ';'
///
-bool Parser::isCXXDeclarationStatement() {
+bool Parser::isCXXDeclarationStatement(
+ bool DisambiguatingWithExpression /*=false*/) {
+ assert(getLangOpts().CPlusPlus && "Must be called for C++ only.");
+
switch (Tok.getKind()) {
// asm-definition
case tok::kw_asm:
@@ -59,6 +62,51 @@ bool Parser::isCXXDeclarationStatement() {
case tok::kw_static_assert:
case tok::kw__Static_assert:
return true;
+ case tok::coloncolon:
+ case tok::identifier: {
+ if (DisambiguatingWithExpression) {
+ RevertingTentativeParsingAction TPA(*this);
+ // Parse the C++ scope specifier.
+ CXXScopeSpec SS;
+ ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
+ /*ObjectHasErrors=*/false,
+ /*EnteringContext=*/true);
+
+ switch (Tok.getKind()) {
+ case tok::identifier: {
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ bool isDeductionGuide = Actions.isDeductionGuideName(
+ getCurScope(), *II, Tok.getLocation(), SS, /*Template=*/nullptr);
+ if (Actions.isCurrentClassName(*II, getCurScope(), &SS) ||
+ isDeductionGuide) {
+ if (isConstructorDeclarator(/*Unqualified=*/SS.isEmpty(),
+ isDeductionGuide,
+ DeclSpec::FriendSpecified::No))
+ return true;
+ } else if (SS.isNotEmpty()) {
+ // If the scope is not empty, it could alternatively be something like
+ // a typedef or using declaration. That declaration might be private
+ // in the global context, which would be diagnosed by calling into
+ // isCXXSimpleDeclaration, but may actually be fine in the context of
+ // member functions and static variable definitions. Check if the next
+ // token is also an identifier and assume a declaration.
+ // We cannot check if the scopes match because the declarations could
+ // involve namespaces and friend declarations.
+ if (NextToken().is(tok::identifier))
+ return true;
+ }
+ break;
+ }
+ case tok::kw_operator:
+ return true;
+ case tok::tilde:
+ return true;
+ default:
+ break;
+ }
+ }
+ }
+ [[fallthrough]];
// simple-declaration
default:
return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/false);
@@ -111,8 +159,8 @@ bool Parser::isCXXSimpleDeclaration(bool AllowForRangeDecl) {
// a case.
bool InvalidAsDeclaration = false;
- TPResult TPR = isCXXDeclarationSpecifier(TPResult::False,
- &InvalidAsDeclaration);
+ TPResult TPR = isCXXDeclarationSpecifier(
+ ImplicitTypenameContext::No, TPResult::False, &InvalidAsDeclaration);
if (TPR != TPResult::Ambiguous)
return TPR != TPResult::False; // Returns true for TPResult::True or
// TPResult::Error.
@@ -158,10 +206,12 @@ Parser::TPResult Parser::TryConsumeDeclarationSpecifier() {
ConsumeToken();
break;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case tok::kw_typeof:
case tok::kw___attribute:
- case tok::kw___underlying_type: {
+#define TRANSFORM_TYPE_TRAIT_DEF(_, Trait) case tok::kw___##Trait:
+#include "clang/Basic/TransformTypeTraits.def"
+ {
ConsumeToken();
if (Tok.isNot(tok::l_paren))
return TPResult::Error;
@@ -203,7 +253,7 @@ Parser::TPResult Parser::TryConsumeDeclarationSpecifier() {
case tok::annot_cxxscope:
ConsumeAnnotationToken();
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
default:
ConsumeAnyToken();
@@ -224,6 +274,7 @@ Parser::TPResult Parser::TryConsumeDeclarationSpecifier() {
/// attribute-specifier-seqopt type-specifier-seq declarator
///
Parser::TPResult Parser::TryParseSimpleDeclaration(bool AllowForRangeDecl) {
+ bool DeclSpecifierIsAuto = Tok.is(tok::kw_auto);
if (TryConsumeDeclarationSpecifier() == TPResult::Error)
return TPResult::Error;
@@ -231,7 +282,7 @@ Parser::TPResult Parser::TryParseSimpleDeclaration(bool AllowForRangeDecl) {
// simple-declaration. Don't bother calling isCXXDeclarationSpecifier in the
// overwhelmingly common case that the next token is a '('.
if (Tok.isNot(tok::l_paren)) {
- TPResult TPR = isCXXDeclarationSpecifier();
+ TPResult TPR = isCXXDeclarationSpecifier(ImplicitTypenameContext::No);
if (TPR == TPResult::Ambiguous)
return TPResult::True;
if (TPR == TPResult::True || TPR == TPResult::Error)
@@ -239,7 +290,8 @@ Parser::TPResult Parser::TryParseSimpleDeclaration(bool AllowForRangeDecl) {
assert(TPR == TPResult::False);
}
- TPResult TPR = TryParseInitDeclaratorList();
+ TPResult TPR = TryParseInitDeclaratorList(
+ /*mayHaveTrailingReturnType=*/DeclSpecifierIsAuto);
if (TPR != TPResult::Ambiguous)
return TPR;
@@ -276,10 +328,15 @@ Parser::TPResult Parser::TryParseSimpleDeclaration(bool AllowForRangeDecl) {
/// '{' initializer-list ','[opt] '}'
/// '{' '}'
///
-Parser::TPResult Parser::TryParseInitDeclaratorList() {
- while (1) {
+Parser::TPResult
+Parser::TryParseInitDeclaratorList(bool MayHaveTrailingReturnType) {
+ while (true) {
// declarator
- TPResult TPR = TryParseDeclarator(false/*mayBeAbstract*/);
+ TPResult TPR = TryParseDeclarator(
+ /*mayBeAbstract=*/false,
+ /*mayHaveIdentifier=*/true,
+ /*mayHaveDirectInit=*/false,
+ /*mayHaveTrailingReturnType=*/MayHaveTrailingReturnType);
if (TPR != TPResult::Ambiguous)
return TPR;
@@ -440,7 +497,8 @@ bool Parser::isEnumBase(bool AllowSemi) {
// FIXME: We could disallow non-type decl-specifiers here, but it makes no
// difference: those specifiers are ill-formed regardless of the
// interpretation.
- TPResult R = isCXXDeclarationSpecifier(/*BracedCastResult*/ TPResult::True,
+ TPResult R = isCXXDeclarationSpecifier(ImplicitTypenameContext::No,
+ /*BracedCastResult=*/TPResult::True,
&InvalidAsDeclSpec);
if (R == TPResult::Ambiguous) {
// We either have a decl-specifier followed by '(' or an undeclared
@@ -454,7 +512,8 @@ bool Parser::isEnumBase(bool AllowSemi) {
return true;
// A second decl-specifier unambiguously indicatges an enum-base.
- R = isCXXDeclarationSpecifier(TPResult::True, &InvalidAsDeclSpec);
+ R = isCXXDeclarationSpecifier(ImplicitTypenameContext::No, TPResult::True,
+ &InvalidAsDeclSpec);
}
return R != TPResult::False;
@@ -483,20 +542,27 @@ Parser::isCXXConditionDeclarationOrInitStatement(bool CanBeInitStatement,
ConditionDeclarationOrInitStatementState State(*this, CanBeInitStatement,
CanBeForRangeDecl);
- if (State.update(isCXXDeclarationSpecifier()))
+ if (CanBeInitStatement && Tok.is(tok::kw_using))
+ return ConditionOrInitStatement::InitStmtDecl;
+ if (State.update(isCXXDeclarationSpecifier(ImplicitTypenameContext::No)))
return State.result();
// It might be a declaration; we need tentative parsing.
RevertingTentativeParsingAction PA(*this);
// FIXME: A tag definition unambiguously tells us this is an init-statement.
+ bool MayHaveTrailingReturnType = Tok.is(tok::kw_auto);
if (State.update(TryConsumeDeclarationSpecifier()))
return State.result();
assert(Tok.is(tok::l_paren) && "Expected '('");
while (true) {
// Consume a declarator.
- if (State.update(TryParseDeclarator(false/*mayBeAbstract*/)))
+ if (State.update(TryParseDeclarator(
+ /*mayBeAbstract=*/false,
+ /*mayHaveIdentifier=*/true,
+ /*mayHaveDirectInit=*/false,
+ /*mayHaveTrailingReturnType=*/MayHaveTrailingReturnType)))
return State.result();
// Attributes, asm label, or an initializer imply this is not an expression.
@@ -569,7 +635,7 @@ bool Parser::isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous) {
// type. The resolution is that any construct that could possibly be a type-id
// in its syntactic context shall be considered a type-id.
- TPResult TPR = isCXXDeclarationSpecifier();
+ TPResult TPR = isCXXDeclarationSpecifier(ImplicitTypenameContext::No);
if (TPR != TPResult::Ambiguous)
return TPR != TPResult::False; // Returns true for TPResult::True or
// TPResult::Error.
@@ -581,13 +647,16 @@ bool Parser::isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous) {
// We need tentative parsing...
RevertingTentativeParsingAction PA(*this);
+ bool MayHaveTrailingReturnType = Tok.is(tok::kw_auto);
// type-specifier-seq
TryConsumeDeclarationSpecifier();
assert(Tok.is(tok::l_paren) && "Expected '('");
// declarator
- TPR = TryParseDeclarator(true/*mayBeAbstract*/, false/*mayHaveIdentifier*/);
+ TPR = TryParseDeclarator(true /*mayBeAbstract*/, false /*mayHaveIdentifier*/,
+ /*mayHaveDirectInit=*/false,
+ MayHaveTrailingReturnType);
// In case of an error, let the declaration parsing code handle it.
if (TPR == TPResult::Error)
@@ -599,7 +668,12 @@ bool Parser::isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous) {
if (Context == TypeIdInParens && Tok.is(tok::r_paren)) {
TPR = TPResult::True;
isAmbiguous = true;
-
+ // We are supposed to be inside the first operand to a _Generic selection
+ // expression, so if we find a comma after the declarator, we've found a
+ // type and not an expression.
+ } else if (Context == TypeIdAsGenericSelectionArgument && Tok.is(tok::comma)) {
+ TPR = TPResult::True;
+ isAmbiguous = true;
// We are supposed to be inside a template argument, so if after
// the abstract declarator we encounter a '>', '>>' (in C++0x), or
// ','; or, in C++0x, an ellipsis immediately preceding such, this
@@ -616,6 +690,9 @@ bool Parser::isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous) {
TPR = TPResult::True;
isAmbiguous = true;
+ } else if (Context == TypeIdInTrailingReturnType) {
+ TPR = TPResult::True;
+ isAmbiguous = true;
} else
TPR = TPResult::False;
}
@@ -663,6 +740,9 @@ Parser::isCXX11AttributeSpecifier(bool Disambiguate,
if (Tok.is(tok::kw_alignas))
return CAK_AttributeSpecifier;
+ if (Tok.isRegularKeywordAttribute())
+ return CAK_AttributeSpecifier;
+
if (Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square))
return CAK_NotAttributeSpecifier;
@@ -802,7 +882,8 @@ Parser::isCXX11AttributeSpecifier(bool Disambiguate,
bool Parser::TrySkipAttributes() {
while (Tok.isOneOf(tok::l_square, tok::kw___attribute, tok::kw___declspec,
- tok::kw_alignas)) {
+ tok::kw_alignas) ||
+ Tok.isRegularKeywordAttribute()) {
if (Tok.is(tok::l_square)) {
ConsumeBracket();
if (Tok.isNot(tok::l_square))
@@ -813,6 +894,9 @@ bool Parser::TrySkipAttributes() {
// Note that explicitly checking for `[[` and `]]` allows to fail as
// expected in the case of the Objective-C message send syntax.
ConsumeBracket();
+ } else if (Tok.isRegularKeywordAttribute() &&
+ !doesKeywordAttributeTakeArgs(Tok.getKind())) {
+ ConsumeToken();
} else {
ConsumeToken();
if (Tok.isNot(tok::l_paren))
@@ -930,7 +1014,7 @@ Parser::TPResult Parser::TryParseOperatorId() {
// Maybe this is a conversion-function-id.
bool AnyDeclSpecifiers = false;
while (true) {
- TPResult TPR = isCXXDeclarationSpecifier();
+ TPResult TPR = isCXXDeclarationSpecifier(ImplicitTypenameContext::No);
if (TPR == TPResult::Error)
return TPR;
if (TPR == TPResult::False) {
@@ -1000,7 +1084,8 @@ Parser::TPResult Parser::TryParseOperatorId() {
///
Parser::TPResult Parser::TryParseDeclarator(bool mayBeAbstract,
bool mayHaveIdentifier,
- bool mayHaveDirectInit) {
+ bool mayHaveDirectInit,
+ bool mayHaveTrailingReturnType) {
// declarator:
// direct-declarator
// ptr-operator declarator
@@ -1035,13 +1120,14 @@ Parser::TPResult Parser::TryParseDeclarator(bool mayBeAbstract,
} else if (Tok.is(tok::l_paren)) {
ConsumeParen();
if (mayBeAbstract &&
- (Tok.is(tok::r_paren) || // 'int()' is a function.
- // 'int(...)' is a function.
+ (Tok.is(tok::r_paren) || // 'int()' is a function.
+ // 'int(...)' is a function.
(Tok.is(tok::ellipsis) && NextToken().is(tok::r_paren)) ||
- isDeclarationSpecifier())) { // 'int(int)' is a function.
+ isDeclarationSpecifier(
+ ImplicitTypenameContext::No))) { // 'int(int)' is a function.
// '(' parameter-declaration-clause ')' cv-qualifier-seq[opt]
// exception-specification[opt]
- TPResult TPR = TryParseFunctionDeclarator();
+ TPResult TPR = TryParseFunctionDeclarator(mayHaveTrailingReturnType);
if (TPR != TPResult::Ambiguous)
return TPR;
} else {
@@ -1066,7 +1152,7 @@ Parser::TPResult Parser::TryParseDeclarator(bool mayBeAbstract,
if (mayHaveDirectInit)
return TPResult::Ambiguous;
- while (1) {
+ while (true) {
TPResult TPR(TPResult::Ambiguous);
if (Tok.is(tok::l_paren)) {
@@ -1080,7 +1166,7 @@ Parser::TPResult Parser::TryParseDeclarator(bool mayBeAbstract,
// direct-declarator '(' parameter-declaration-clause ')'
// cv-qualifier-seq[opt] exception-specification[opt]
ConsumeParen();
- TPR = TryParseFunctionDeclarator();
+ TPR = TryParseFunctionDeclarator(mayHaveTrailingReturnType);
} else if (Tok.is(tok::l_square)) {
// direct-declarator '[' constant-expression[opt] ']'
// direct-abstract-declarator[opt] '[' constant-expression[opt] ']'
@@ -1101,9 +1187,7 @@ Parser::TPResult Parser::TryParseDeclarator(bool mayBeAbstract,
}
bool Parser::isTentativelyDeclared(IdentifierInfo *II) {
- return std::find(TentativelyDeclaredIdentifiers.begin(),
- TentativelyDeclaredIdentifiers.end(), II)
- != TentativelyDeclaredIdentifiers.end();
+ return llvm::is_contained(TentativelyDeclaredIdentifiers, II);
}
namespace {
@@ -1245,19 +1329,37 @@ public:
/// [GNU] restrict
///
Parser::TPResult
-Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
+Parser::isCXXDeclarationSpecifier(ImplicitTypenameContext AllowImplicitTypename,
+ Parser::TPResult BracedCastResult,
bool *InvalidAsDeclSpec) {
- auto IsPlaceholderSpecifier = [&] (TemplateIdAnnotation *TemplateId,
- int Lookahead) {
+ auto IsPlaceholderSpecifier = [&](TemplateIdAnnotation *TemplateId,
+ int Lookahead) {
// We have a placeholder-constraint (we check for 'auto' or 'decltype' to
// distinguish 'C<int>;' from 'C<int> auto c = 1;')
return TemplateId->Kind == TNK_Concept_template &&
- GetLookAheadToken(Lookahead + 1).isOneOf(tok::kw_auto, tok::kw_decltype,
- // If we have an identifier here, the user probably forgot the
- // 'auto' in the placeholder constraint, e.g. 'C<int> x = 2;'
- // This will be diagnosed nicely later, so disambiguate as a
- // declaration.
- tok::identifier);
+ (GetLookAheadToken(Lookahead + 1)
+ .isOneOf(tok::kw_auto, tok::kw_decltype,
+ // If we have an identifier here, the user probably
+ // forgot the 'auto' in the placeholder constraint,
+ // e.g. 'C<int> x = 2;' This will be diagnosed nicely
+ // later, so disambiguate as a declaration.
+ tok::identifier,
+ // CVR qualifierslikely the same situation for the
+ // user, so let this be diagnosed nicely later. We
+ // cannot handle references here, as `C<int> & Other`
+ // and `C<int> && Other` are both legal.
+ tok::kw_const, tok::kw_volatile, tok::kw_restrict) ||
+ // While `C<int> && Other` is legal, doing so while not specifying a
+ // template argument is NOT, so see if we can fix up in that case at
+ // minimum. Concepts require at least 1 template parameter, so we
+ // can count on the argument count.
+ // FIXME: In the future, we migth be able to have SEMA look up the
+ // declaration for this concept, and see how many template
+ // parameters it has. If the concept isn't fully specified, it is
+ // possibly a situation where we want deduction, such as:
+ // `BinaryConcept<int> auto f = bar();`
+ (TemplateId->NumArgs == 0 &&
+ GetLookAheadToken(Lookahead + 1).isOneOf(tok::amp, tok::ampamp)));
};
switch (Tok.getKind()) {
case tok::identifier: {
@@ -1288,7 +1390,7 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
// template template argument, we'll undo this when checking the
// validity of the argument.
if (getLangOpts().CPlusPlus17) {
- if (TryAnnotateTypeOrScopeToken())
+ if (TryAnnotateTypeOrScopeToken(AllowImplicitTypename))
return TPResult::Error;
if (Tok.isNot(tok::identifier))
break;
@@ -1309,7 +1411,7 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
// a missing 'typename' keyword. Don't use TryAnnotateName in this case,
// since it will annotate as a primary expression, and we want to use the
// "missing 'typename'" logic.
- if (TryAnnotateTypeOrScopeToken())
+ if (TryAnnotateTypeOrScopeToken(AllowImplicitTypename))
return TPResult::Error;
// If annotation failed, assume it's a non-type.
// FIXME: If this happens due to an undeclared identifier, treat it as
@@ -1319,30 +1421,43 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
}
// We annotated this token as something. Recurse to handle whatever we got.
- return isCXXDeclarationSpecifier(BracedCastResult, InvalidAsDeclSpec);
+ return isCXXDeclarationSpecifier(AllowImplicitTypename, BracedCastResult,
+ InvalidAsDeclSpec);
}
case tok::kw_typename: // typename T::type
// Annotate typenames and C++ scope specifiers. If we get one, just
// recurse to handle whatever we get.
- if (TryAnnotateTypeOrScopeToken())
+ if (TryAnnotateTypeOrScopeToken(ImplicitTypenameContext::Yes))
return TPResult::Error;
- return isCXXDeclarationSpecifier(BracedCastResult, InvalidAsDeclSpec);
+ return isCXXDeclarationSpecifier(ImplicitTypenameContext::Yes,
+ BracedCastResult, InvalidAsDeclSpec);
+
+ case tok::kw_auto: {
+ if (!getLangOpts().CPlusPlus23)
+ return TPResult::True;
+ if (NextToken().is(tok::l_brace))
+ return TPResult::False;
+ if (NextToken().is(tok::l_paren))
+ return TPResult::Ambiguous;
+ return TPResult::True;
+ }
case tok::coloncolon: { // ::foo::bar
const Token &Next = NextToken();
if (Next.isOneOf(tok::kw_new, // ::new
tok::kw_delete)) // ::delete
return TPResult::False;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case tok::kw___super:
case tok::kw_decltype:
// Annotate typenames and C++ scope specifiers. If we get one, just
// recurse to handle whatever we get.
- if (TryAnnotateTypeOrScopeToken())
+ if (TryAnnotateTypeOrScopeToken(AllowImplicitTypename))
return TPResult::Error;
- return isCXXDeclarationSpecifier(BracedCastResult, InvalidAsDeclSpec);
+ return isCXXDeclarationSpecifier(AllowImplicitTypename, BracedCastResult,
+ InvalidAsDeclSpec);
// decl-specifier:
// storage-class-specifier
@@ -1361,7 +1476,6 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
case tok::kw_static:
case tok::kw_extern:
case tok::kw_mutable:
- case tok::kw_auto:
case tok::kw___thread:
case tok::kw_thread_local:
case tok::kw__Thread_local:
@@ -1401,7 +1515,7 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
case tok::kw_private:
if (!getLangOpts().OpenCL)
return TPResult::False;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case tok::kw___private:
case tok::kw___local:
case tok::kw___global:
@@ -1414,6 +1528,12 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
// OpenCL pipe
case tok::kw_pipe:
+ // HLSL address space qualifiers
+ case tok::kw_groupshared:
+ case tok::kw_in:
+ case tok::kw_inout:
+ case tok::kw_out:
+
// GNU
case tok::kw_restrict:
case tok::kw__Complex:
@@ -1443,6 +1563,10 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
case tok::kw___kindof:
return TPResult::True;
+ // WebAssemblyFuncref
+ case tok::kw___funcref:
+ return TPResult::True;
+
// Borland
case tok::kw___pascal:
return TPResult::True;
@@ -1451,6 +1575,17 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
case tok::kw___vector:
return TPResult::True;
+ case tok::kw_this: {
+ // Try to parse a C++23 Explicit Object Parameter
+ // We do that in all language modes to produce a better diagnostic.
+ if (getLangOpts().CPlusPlus) {
+ RevertingTentativeParsingAction PA(*this);
+ ConsumeToken();
+ return isCXXDeclarationSpecifier(AllowImplicitTypename, BracedCastResult,
+ InvalidAsDeclSpec);
+ }
+ return TPResult::False;
+ }
case tok::annot_template_id: {
TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
// If lookup for the template-name found nothing, don't assume we have a
@@ -1472,14 +1607,14 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
if (TemplateId->Kind != TNK_Type_template)
return TPResult::False;
CXXScopeSpec SS;
- AnnotateTemplateIdTokenAsType(SS);
+ AnnotateTemplateIdTokenAsType(SS, AllowImplicitTypename);
assert(Tok.is(tok::annot_typename));
goto case_typename;
}
case tok::annot_cxxscope: // foo::bar or ::foo::bar, but already parsed
// We've already annotated a scope; try to annotate a type.
- if (TryAnnotateTypeOrScopeToken())
+ if (TryAnnotateTypeOrScopeToken(AllowImplicitTypename))
return TPResult::Error;
if (!Tok.is(tok::annot_typename)) {
if (Tok.is(tok::annot_cxxscope) &&
@@ -1510,8 +1645,8 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
bool isIdentifier = Tok.is(tok::identifier);
TPResult TPR = TPResult::False;
if (!isIdentifier)
- TPR = isCXXDeclarationSpecifier(BracedCastResult,
- InvalidAsDeclSpec);
+ TPR = isCXXDeclarationSpecifier(
+ AllowImplicitTypename, BracedCastResult, InvalidAsDeclSpec);
if (isIdentifier ||
TPR == TPResult::True || TPR == TPResult::Error)
@@ -1537,7 +1672,7 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
} else {
// Try to resolve the name. If it doesn't exist, assume it was
// intended to name a type and keep disambiguating.
- switch (TryAnnotateName()) {
+ switch (TryAnnotateName(/*CCC=*/nullptr, AllowImplicitTypename)) {
case ANK_Error:
return TPResult::Error;
case ANK_TentativeDecl:
@@ -1548,7 +1683,10 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
if (getLangOpts().CPlusPlus17) {
if (TryAnnotateTypeOrScopeToken())
return TPResult::Error;
- if (Tok.isNot(tok::identifier))
+ // If we annotated then the current token should not still be ::
+ // FIXME we may want to also check for tok::annot_typename but
+ // currently don't have a test case.
+ if (Tok.isNot(tok::annot_cxxscope))
break;
}
@@ -1567,13 +1705,14 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
// Annotated it, check again.
assert(Tok.isNot(tok::annot_cxxscope) ||
NextToken().isNot(tok::identifier));
- return isCXXDeclarationSpecifier(BracedCastResult, InvalidAsDeclSpec);
+ return isCXXDeclarationSpecifier(AllowImplicitTypename,
+ BracedCastResult, InvalidAsDeclSpec);
}
}
return TPResult::False;
}
// If that succeeded, fallthrough into the generic simple-type-id case.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
// The ambiguity resides in a simple-type-specifier/typename-specifier
// followed by a '('. The '(' could either be the start of:
@@ -1616,7 +1755,7 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
return TPResult::True;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case tok::kw_char:
case tok::kw_wchar_t:
@@ -1637,8 +1776,12 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
case tok::kw___bf16:
case tok::kw__Float16:
case tok::kw___float128:
+ case tok::kw___ibm128:
case tok::kw_void:
case tok::annot_decltype:
+ case tok::kw__Accum:
+ case tok::kw__Fract:
+ case tok::kw__Sat:
#define GENERIC_IMAGE_TYPE(ImgType, Id) case tok::kw_##ImgType##_t:
#include "clang/Basic/OpenCLImageTypes.def"
if (NextToken().is(tok::l_paren))
@@ -1681,14 +1824,15 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
return TPResult::True;
}
- // C++0x type traits support
- case tok::kw___underlying_type:
+#define TRANSFORM_TYPE_TRAIT_DEF(_, Trait) case tok::kw___##Trait:
+#include "clang/Basic/TransformTypeTraits.def"
return TPResult::True;
// C11 _Atomic
case tok::kw__Atomic:
return TPResult::True;
+ case tok::kw__BitInt:
case tok::kw__ExtInt: {
if (NextToken().isNot(tok::l_paren))
return TPResult::Error;
@@ -1719,7 +1863,8 @@ bool Parser::isCXXDeclarationSpecifierAType() {
case tok::annot_template_id:
case tok::annot_typename:
case tok::kw_typeof:
- case tok::kw___underlying_type:
+#define TRANSFORM_TYPE_TRAIT_DEF(_, Trait) case tok::kw___##Trait:
+#include "clang/Basic/TransformTypeTraits.def"
return true;
// elaborated-type-specifier
@@ -1740,6 +1885,7 @@ bool Parser::isCXXDeclarationSpecifierAType() {
case tok::kw_short:
case tok::kw_int:
case tok::kw__ExtInt:
+ case tok::kw__BitInt:
case tok::kw_long:
case tok::kw___int64:
case tok::kw___int128:
@@ -1751,9 +1897,13 @@ bool Parser::isCXXDeclarationSpecifierAType() {
case tok::kw___bf16:
case tok::kw__Float16:
case tok::kw___float128:
+ case tok::kw___ibm128:
case tok::kw_void:
case tok::kw___unknown_anytype:
case tok::kw___auto_type:
+ case tok::kw__Accum:
+ case tok::kw__Fract:
+ case tok::kw__Sat:
#define GENERIC_IMAGE_TYPE(ImgType, Id) case tok::kw_##ImgType##_t:
#include "clang/Basic/OpenCLImageTypes.def"
return true;
@@ -1821,7 +1971,8 @@ Parser::TPResult Parser::TryParseProtocolQualifiers() {
/// '(' parameter-declaration-clause ')' cv-qualifier-seq[opt]
/// exception-specification[opt]
///
-bool Parser::isCXXFunctionDeclarator(bool *IsAmbiguous) {
+bool Parser::isCXXFunctionDeclarator(
+ bool *IsAmbiguous, ImplicitTypenameContext AllowImplicitTypename) {
// C++ 8.2p1:
// The ambiguity arising from the similarity between a function-style cast and
@@ -1836,7 +1987,9 @@ bool Parser::isCXXFunctionDeclarator(bool *IsAmbiguous) {
ConsumeParen();
bool InvalidAsDeclaration = false;
- TPResult TPR = TryParseParameterDeclarationClause(&InvalidAsDeclaration);
+ TPResult TPR = TryParseParameterDeclarationClause(
+ &InvalidAsDeclaration, /*VersusTemplateArgument=*/false,
+ AllowImplicitTypename);
if (TPR == TPResult::Ambiguous) {
if (Tok.isNot(tok::r_paren))
TPR = TPResult::False;
@@ -1880,9 +2033,9 @@ bool Parser::isCXXFunctionDeclarator(bool *IsAmbiguous) {
/// attribute-specifier-seq[opt] decl-specifier-seq abstract-declarator[opt]
/// attributes[opt] '=' assignment-expression
///
-Parser::TPResult
-Parser::TryParseParameterDeclarationClause(bool *InvalidAsDeclaration,
- bool VersusTemplateArgument) {
+Parser::TPResult Parser::TryParseParameterDeclarationClause(
+ bool *InvalidAsDeclaration, bool VersusTemplateArgument,
+ ImplicitTypenameContext AllowImplicitTypename) {
if (Tok.is(tok::r_paren))
return TPResult::Ambiguous;
@@ -1894,7 +2047,7 @@ Parser::TryParseParameterDeclarationClause(bool *InvalidAsDeclaration,
// parameter-declaration
// parameter-declaration-list ',' parameter-declaration
//
- while (1) {
+ while (true) {
// '...'[opt]
if (Tok.is(tok::ellipsis)) {
ConsumeToken();
@@ -1915,8 +2068,8 @@ Parser::TryParseParameterDeclarationClause(bool *InvalidAsDeclaration,
// decl-specifier-seq
// A parameter-declaration's initializer must be preceded by an '=', so
// decl-specifier-seq '{' is not a parameter in C++11.
- TPResult TPR = isCXXDeclarationSpecifier(TPResult::False,
- InvalidAsDeclaration);
+ TPResult TPR = isCXXDeclarationSpecifier(
+ AllowImplicitTypename, TPResult::False, InvalidAsDeclaration);
// A declaration-specifier (not followed by '(' or '{') means this can't be
// an expression, but it could still be a template argument.
if (TPR != TPResult::Ambiguous &&
@@ -1924,6 +2077,7 @@ Parser::TryParseParameterDeclarationClause(bool *InvalidAsDeclaration,
return TPR;
bool SeenType = false;
+ bool DeclarationSpecifierIsAuto = Tok.is(tok::kw_auto);
do {
SeenType |= isCXXDeclarationSpecifierAType();
if (TryConsumeDeclarationSpecifier() == TPResult::Error)
@@ -1933,7 +2087,7 @@ Parser::TryParseParameterDeclarationClause(bool *InvalidAsDeclaration,
if (SeenType && Tok.is(tok::identifier))
return TPResult::True;
- TPR = isCXXDeclarationSpecifier(TPResult::False,
+ TPR = isCXXDeclarationSpecifier(AllowImplicitTypename, TPResult::False,
InvalidAsDeclaration);
if (TPR == TPResult::Error)
return TPR;
@@ -1945,7 +2099,11 @@ Parser::TryParseParameterDeclarationClause(bool *InvalidAsDeclaration,
// declarator
// abstract-declarator[opt]
- TPR = TryParseDeclarator(true/*mayBeAbstract*/);
+ TPR = TryParseDeclarator(
+ /*mayBeAbstract=*/true,
+ /*mayHaveIdentifier=*/true,
+ /*mayHaveDirectInit=*/false,
+ /*mayHaveTrailingReturnType=*/DeclarationSpecifierIsAuto);
if (TPR != TPResult::Ambiguous)
return TPR;
@@ -1999,7 +2157,8 @@ Parser::TryParseParameterDeclarationClause(bool *InvalidAsDeclaration,
/// exception-specification:
/// 'throw' '(' type-id-list[opt] ')'
///
-Parser::TPResult Parser::TryParseFunctionDeclarator() {
+Parser::TPResult
+Parser::TryParseFunctionDeclarator(bool MayHaveTrailingReturnType) {
// The '(' is already parsed.
TPResult TPR = TryParseParameterDeclarationClause();
@@ -2044,9 +2203,52 @@ Parser::TPResult Parser::TryParseFunctionDeclarator() {
}
}
+ // attribute-specifier-seq
+ if (!TrySkipAttributes())
+ return TPResult::Ambiguous;
+
+ // trailing-return-type
+ if (Tok.is(tok::arrow) && MayHaveTrailingReturnType) {
+ if (TPR == TPResult::True)
+ return TPR;
+ ConsumeToken();
+ if (Tok.is(tok::identifier) && NameAfterArrowIsNonType()) {
+ return TPResult::False;
+ }
+ if (isCXXTypeId(TentativeCXXTypeIdContext::TypeIdInTrailingReturnType))
+ return TPResult::True;
+ }
+
return TPResult::Ambiguous;
}
+// When parsing an identifier after an arrow it may be a member expression,
+// in which case we should not annotate it as an independant expression
+// so we just lookup that name, if it's not a type the construct is not
+// a function declaration.
+bool Parser::NameAfterArrowIsNonType() {
+ assert(Tok.is(tok::identifier));
+ Token Next = NextToken();
+ if (Next.is(tok::coloncolon))
+ return false;
+ IdentifierInfo *Name = Tok.getIdentifierInfo();
+ SourceLocation NameLoc = Tok.getLocation();
+ CXXScopeSpec SS;
+ TentativeParseCCC CCC(Next);
+ Sema::NameClassification Classification =
+ Actions.ClassifyName(getCurScope(), SS, Name, NameLoc, Next, &CCC);
+ switch (Classification.getKind()) {
+ case Sema::NC_OverloadSet:
+ case Sema::NC_NonType:
+ case Sema::NC_VarTemplate:
+ case Sema::NC_FunctionTemplate:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
/// '[' constant-expression[opt] ']'
///
Parser::TPResult Parser::TryParseBracketDeclarator() {
@@ -2095,9 +2297,9 @@ Parser::TPResult Parser::isTemplateArgumentList(unsigned TokensToSkip) {
// but one good distinguishing factor is that a "decl-specifier" not
// followed by '(' or '{' can't appear in an expression.
bool InvalidAsTemplateArgumentList = false;
- if (isCXXDeclarationSpecifier(TPResult::False,
- &InvalidAsTemplateArgumentList) ==
- TPResult::True)
+ if (isCXXDeclarationSpecifier(ImplicitTypenameContext::No, TPResult::False,
+ &InvalidAsTemplateArgumentList) ==
+ TPResult::True)
return TPResult::True;
if (InvalidAsTemplateArgumentList)
return TPResult::False;
diff --git a/contrib/llvm-project/clang/lib/Parse/Parser.cpp b/contrib/llvm-project/clang/lib/Parse/Parser.cpp
index c81dd03ffaaa..0b092181bca7 100644
--- a/contrib/llvm-project/clang/lib/Parse/Parser.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/Parser.cpp
@@ -13,6 +13,7 @@
#include "clang/Parse/Parser.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTLambda.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/Basic/FileManager.h"
#include "clang/Parse/ParseDiagnostic.h"
@@ -21,6 +22,7 @@
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/Scope.h"
#include "llvm/Support/Path.h"
+#include "llvm/Support/TimeProfiler.h"
using namespace clang;
@@ -68,6 +70,11 @@ Parser::Parser(Preprocessor &pp, Sema &actions, bool skipFunctionBodies)
PP.addCommentHandler(CommentSemaHandler.get());
PP.setCodeCompletionHandler(*this);
+
+ Actions.ParseTypeFromStringCallback =
+ [this](StringRef TypeStr, StringRef Context, SourceLocation IncludeLoc) {
+ return this->ParseTypeFromString(TypeStr, Context, IncludeLoc);
+ };
}
DiagnosticBuilder Parser::Diag(SourceLocation Loc, unsigned DiagID) {
@@ -153,7 +160,7 @@ bool Parser::ExpectAndConsume(tok::TokenKind ExpectedTok, unsigned DiagID,
return true;
}
-bool Parser::ExpectAndConsumeSemi(unsigned DiagID) {
+bool Parser::ExpectAndConsumeSemi(unsigned DiagID, StringRef TokenUsed) {
if (TryConsumeToken(tok::semi))
return false;
@@ -172,7 +179,7 @@ bool Parser::ExpectAndConsumeSemi(unsigned DiagID) {
return false;
}
- return ExpectAndConsume(tok::semi, DiagID);
+ return ExpectAndConsume(tok::semi, DiagID , TokenUsed);
}
void Parser::ConsumeExtraSemi(ExtraSemiKind Kind, DeclSpec::TST TST) {
@@ -279,7 +286,7 @@ bool Parser::SkipUntil(ArrayRef<tok::TokenKind> Toks, SkipUntilFlags Flags) {
// We always want this function to skip at least one token if the first token
// isn't T and if not at EOF.
bool isFirstTokenSkipped = true;
- while (1) {
+ while (true) {
// If we found one of the tokens, stop and return true.
for (unsigned i = 0, NumToks = Toks.size(); i != NumToks; ++i) {
if (Tok.is(Toks[i])) {
@@ -316,9 +323,17 @@ bool Parser::SkipUntil(ArrayRef<tok::TokenKind> Toks, SkipUntilFlags Flags) {
return false;
ConsumeAnnotationToken();
break;
+ case tok::annot_pragma_openacc:
+ case tok::annot_pragma_openacc_end:
+ // Stop before an OpenACC pragma boundary.
+ if (OpenACCDirectiveParsing)
+ return false;
+ ConsumeAnnotationToken();
+ break;
case tok::annot_module_begin:
case tok::annot_module_end:
case tok::annot_module_include:
+ case tok::annot_repl_input_end:
// Stop before we change submodules. They generally indicate a "good"
// place to pick up parsing again (except in the special case where
// we're trying to skip to EOF).
@@ -386,7 +401,7 @@ bool Parser::SkipUntil(ArrayRef<tok::TokenKind> Toks, SkipUntilFlags Flags) {
case tok::semi:
if (HasFlagsSet(Flags, StopAtSemi))
return false;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
default:
// Skip this token.
ConsumeAnyToken();
@@ -522,7 +537,8 @@ void Parser::Initialize() {
Ident_strict = nullptr;
Ident_replacement = nullptr;
- Ident_language = Ident_defined_in = Ident_generated_declaration = nullptr;
+ Ident_language = Ident_defined_in = Ident_generated_declaration = Ident_USR =
+ nullptr;
Ident__except = nullptr;
@@ -581,15 +597,20 @@ void Parser::DestroyTemplateIds() {
/// top-level-declaration-seq[opt] private-module-fragment[opt]
///
/// Note that in C, it is an error if there is no first declaration.
-bool Parser::ParseFirstTopLevelDecl(DeclGroupPtrTy &Result) {
+bool Parser::ParseFirstTopLevelDecl(DeclGroupPtrTy &Result,
+ Sema::ModuleImportState &ImportState) {
Actions.ActOnStartOfTranslationUnit();
+ // For C++20 modules, a module decl must be the first in the TU. We also
+ // need to track module imports.
+ ImportState = Sema::ModuleImportState::FirstDecl;
+ bool NoTopLevelDecls = ParseTopLevelDecl(Result, ImportState);
+
// C11 6.9p1 says translation units must have at least one top-level
// declaration. C++ doesn't have this restriction. We also don't want to
// complain if we have a precompiled header, although technically if the PCH
// is empty we should still emit the (pedantic) diagnostic.
// If the main file is a header, we're only pretending it's a TU; don't warn.
- bool NoTopLevelDecls = ParseTopLevelDecl(Result, true);
if (NoTopLevelDecls && !Actions.getASTContext().getExternalSource() &&
!getLangOpts().CPlusPlus && !getLangOpts().IsHeaderFile)
Diag(diag::ext_empty_translation_unit);
@@ -603,7 +624,8 @@ bool Parser::ParseFirstTopLevelDecl(DeclGroupPtrTy &Result) {
/// top-level-declaration:
/// declaration
/// [C++20] module-import-declaration
-bool Parser::ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl) {
+bool Parser::ParseTopLevelDecl(DeclGroupPtrTy &Result,
+ Sema::ModuleImportState &ImportState) {
DestroyTemplateIdAnnotationsRAIIObj CleanupRAII(*this);
// Skip over the EOF token, flagging end of previous input for incremental
@@ -623,8 +645,8 @@ bool Parser::ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl) {
goto module_decl;
// Note: no need to handle kw_import here. We only form kw_import under
- // the Modules TS, and in that case 'export import' is parsed as an
- // export-declaration containing an import-declaration.
+ // the Standard C++ Modules, and in that case 'export import' is parsed as
+ // an export-declaration containing an import-declaration.
// Recognize context-sensitive C++20 'export module' and 'export import'
// declarations.
@@ -647,37 +669,49 @@ bool Parser::ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl) {
case tok::kw_module:
module_decl:
- Result = ParseModuleDecl(IsFirstDecl);
+ Result = ParseModuleDecl(ImportState);
return false;
- // tok::kw_import is handled by ParseExternalDeclaration. (Under the Modules
- // TS, an import can occur within an export block.)
+ case tok::kw_import:
import_decl: {
- Decl *ImportDecl = ParseModuleImport(SourceLocation());
+ Decl *ImportDecl = ParseModuleImport(SourceLocation(), ImportState);
Result = Actions.ConvertDeclToDeclGroup(ImportDecl);
return false;
}
- case tok::annot_module_include:
- Actions.ActOnModuleInclude(Tok.getLocation(),
- reinterpret_cast<Module *>(
- Tok.getAnnotationValue()));
+ case tok::annot_module_include: {
+ auto Loc = Tok.getLocation();
+ Module *Mod = reinterpret_cast<Module *>(Tok.getAnnotationValue());
+ // FIXME: We need a better way to disambiguate C++ clang modules and
+ // standard C++ modules.
+ if (!getLangOpts().CPlusPlusModules || !Mod->isHeaderUnit())
+ Actions.ActOnModuleInclude(Loc, Mod);
+ else {
+ DeclResult Import =
+ Actions.ActOnModuleImport(Loc, SourceLocation(), Loc, Mod);
+ Decl *ImportDecl = Import.isInvalid() ? nullptr : Import.get();
+ Result = Actions.ConvertDeclToDeclGroup(ImportDecl);
+ }
ConsumeAnnotationToken();
return false;
+ }
case tok::annot_module_begin:
Actions.ActOnModuleBegin(Tok.getLocation(), reinterpret_cast<Module *>(
Tok.getAnnotationValue()));
ConsumeAnnotationToken();
+ ImportState = Sema::ModuleImportState::NotACXX20Module;
return false;
case tok::annot_module_end:
Actions.ActOnModuleEnd(Tok.getLocation(), reinterpret_cast<Module *>(
Tok.getAnnotationValue()));
ConsumeAnnotationToken();
+ ImportState = Sema::ModuleImportState::NotACXX20Module;
return false;
case tok::eof:
+ case tok::annot_repl_input_end:
// Check whether -fmax-tokens= was reached.
if (PP.getMaxTokens() != 0 && PP.getTokenCount() > PP.getMaxTokens()) {
PP.Diag(Tok.getLocation(), diag::warn_max_tokens_total)
@@ -690,8 +724,7 @@ bool Parser::ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl) {
// Late template parsing can begin.
Actions.SetLateTemplateParser(LateTemplateParserCallback, nullptr, this);
- if (!PP.isIncrementalProcessingEnabled())
- Actions.ActOnEndOfTranslationUnit();
+ Actions.ActOnEndOfTranslationUnit();
//else don't tell Sema that we ended parsing: more input might come.
return true;
@@ -714,15 +747,39 @@ bool Parser::ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl) {
break;
}
- ParsedAttributesWithRange attrs(AttrFactory);
- MaybeParseCXX11Attributes(attrs);
-
- Result = ParseExternalDeclaration(attrs);
+ ParsedAttributes DeclAttrs(AttrFactory);
+ ParsedAttributes DeclSpecAttrs(AttrFactory);
+ // GNU attributes are applied to the declaration specification while the
+ // standard attributes are applied to the declaration. We parse the two
+ // attribute sets into different containters so we can apply them during
+ // the regular parsing process.
+ while (MaybeParseCXX11Attributes(DeclAttrs) ||
+ MaybeParseGNUAttributes(DeclSpecAttrs))
+ ;
+
+ Result = ParseExternalDeclaration(DeclAttrs, DeclSpecAttrs);
+ // An empty Result might mean a line with ';' or some parsing error, ignore
+ // it.
+ if (Result) {
+ if (ImportState == Sema::ModuleImportState::FirstDecl)
+ // First decl was not modular.
+ ImportState = Sema::ModuleImportState::NotACXX20Module;
+ else if (ImportState == Sema::ModuleImportState::ImportAllowed)
+ // Non-imports disallow further imports.
+ ImportState = Sema::ModuleImportState::ImportFinished;
+ else if (ImportState ==
+ Sema::ModuleImportState::PrivateFragmentImportAllowed)
+ // Non-imports disallow further imports.
+ ImportState = Sema::ModuleImportState::PrivateFragmentImportFinished;
+ }
return false;
}
/// ParseExternalDeclaration:
///
+/// The `Attrs` that are passed in are C++11 attributes and appertain to the
+/// declaration.
+///
/// external-declaration: [C99 6.9], declaration: [C++ dcl.dcl]
/// function-definition
/// declaration
@@ -745,10 +802,11 @@ bool Parser::ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl) {
///
/// [C++0x/GNU] 'extern' 'template' declaration
///
-/// [Modules-TS] module-import-declaration
+/// [C++20] module-import-declaration
///
Parser::DeclGroupPtrTy
-Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
+Parser::ParseExternalDeclaration(ParsedAttributes &Attrs,
+ ParsedAttributes &DeclSpecAttrs,
ParsingDeclSpec *DS) {
DestroyTemplateIdAnnotationsRAIIObj CleanupRAII(*this);
ParenBraceBracketBalancer BalancerRAIIObj(*this);
@@ -785,11 +843,15 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
HandlePragmaFPContract();
return nullptr;
case tok::annot_pragma_fenv_access:
+ case tok::annot_pragma_fenv_access_ms:
HandlePragmaFEnvAccess();
return nullptr;
case tok::annot_pragma_fenv_round:
HandlePragmaFEnvRound();
return nullptr;
+ case tok::annot_pragma_cx_limited_range:
+ HandlePragmaCXLimitedRange();
+ return nullptr;
case tok::annot_pragma_float_control:
HandlePragmaFloatControl();
return nullptr;
@@ -802,8 +864,10 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
case tok::annot_attr_openmp:
case tok::annot_pragma_openmp: {
AccessSpecifier AS = AS_none;
- return ParseOpenMPDeclarativeDirectiveWithExtDecl(AS, attrs);
+ return ParseOpenMPDeclarativeDirectiveWithExtDecl(AS, Attrs);
}
+ case tok::annot_pragma_openacc:
+ return ParseOpenACCDirectiveDecl();
case tok::annot_pragma_ms_pointers_to_members:
HandlePragmaMSPointersToMembers();
return nullptr;
@@ -822,7 +886,7 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
case tok::semi:
// Either a C++11 empty-declaration or attribute-declaration.
SingleDecl =
- Actions.ActOnEmptyDeclaration(getCurScope(), attrs, Tok.getLocation());
+ Actions.ActOnEmptyDeclaration(getCurScope(), Attrs, Tok.getLocation());
ConsumeExtraSemi(OutsideFunction);
break;
case tok::r_brace:
@@ -836,10 +900,10 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
// __extension__ silences extension warnings in the subexpression.
ExtensionRAIIObject O(Diags); // Use RAII to do this.
ConsumeToken();
- return ParseExternalDeclaration(attrs);
+ return ParseExternalDeclaration(Attrs, DeclSpecAttrs);
}
case tok::kw_asm: {
- ProhibitAttributes(attrs);
+ ProhibitAttributes(Attrs);
SourceLocation StartLoc = Tok.getLocation();
SourceLocation EndLoc;
@@ -864,7 +928,7 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
break;
}
case tok::at:
- return ParseObjCAtDirectives(attrs);
+ return ParseObjCAtDirectives(Attrs, DeclSpecAttrs);
case tok::minus:
case tok::plus:
if (!getLangOpts().ObjC) {
@@ -879,24 +943,37 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
if (CurParsedObjCImpl) {
// Code-complete Objective-C methods even without leading '-'/'+' prefix.
Actions.CodeCompleteObjCMethodDecl(getCurScope(),
- /*IsInstanceMethod=*/None,
+ /*IsInstanceMethod=*/std::nullopt,
/*ReturnType=*/nullptr);
}
- Actions.CodeCompleteOrdinaryName(
- getCurScope(),
- CurParsedObjCImpl ? Sema::PCC_ObjCImplementation : Sema::PCC_Namespace);
+
+ Sema::ParserCompletionContext PCC;
+ if (CurParsedObjCImpl) {
+ PCC = Sema::PCC_ObjCImplementation;
+ } else if (PP.isIncrementalProcessingEnabled()) {
+ PCC = Sema::PCC_TopLevelOrExpression;
+ } else {
+ PCC = Sema::PCC_Namespace;
+ };
+ Actions.CodeCompleteOrdinaryName(getCurScope(), PCC);
return nullptr;
- case tok::kw_import:
- SingleDecl = ParseModuleImport(SourceLocation());
- break;
+ case tok::kw_import: {
+ Sema::ModuleImportState IS = Sema::ModuleImportState::NotACXX20Module;
+ if (getLangOpts().CPlusPlusModules) {
+ llvm_unreachable("not expecting a c++20 import here");
+ ProhibitAttributes(Attrs);
+ }
+ SingleDecl = ParseModuleImport(SourceLocation(), IS);
+ } break;
case tok::kw_export:
- if (getLangOpts().CPlusPlusModules || getLangOpts().ModulesTS) {
+ if (getLangOpts().CPlusPlusModules) {
+ ProhibitAttributes(Attrs);
SingleDecl = ParseExportDeclaration();
break;
}
// This must be 'export template'. Parse it so we can diagnose our lack
// of support.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case tok::kw_using:
case tok::kw_namespace:
case tok::kw_typedef:
@@ -906,9 +983,19 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
// A function definition cannot start with any of these keywords.
{
SourceLocation DeclEnd;
- return ParseDeclaration(DeclaratorContext::File, DeclEnd, attrs);
+ return ParseDeclaration(DeclaratorContext::File, DeclEnd, Attrs,
+ DeclSpecAttrs);
}
+ case tok::kw_cbuffer:
+ case tok::kw_tbuffer:
+ if (getLangOpts().HLSL) {
+ SourceLocation DeclEnd;
+ return ParseDeclaration(DeclaratorContext::File, DeclEnd, Attrs,
+ DeclSpecAttrs);
+ }
+ goto dont_know;
+
case tok::kw_static:
// Parse (then ignore) 'static' prior to a template instantiation. This is
// a GCC extension that we intentionally do not support.
@@ -916,7 +1003,8 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
Diag(ConsumeToken(), diag::warn_static_inline_explicit_inst_ignored)
<< 0;
SourceLocation DeclEnd;
- return ParseDeclaration(DeclaratorContext::File, DeclEnd, attrs);
+ return ParseDeclaration(DeclaratorContext::File, DeclEnd, Attrs,
+ DeclSpecAttrs);
}
goto dont_know;
@@ -927,7 +1015,8 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
// Inline namespaces. Allowed as an extension even in C++03.
if (NextKind == tok::kw_namespace) {
SourceLocation DeclEnd;
- return ParseDeclaration(DeclaratorContext::File, DeclEnd, attrs);
+ return ParseDeclaration(DeclaratorContext::File, DeclEnd, Attrs,
+ DeclSpecAttrs);
}
// Parse (then ignore) 'inline' prior to a template instantiation. This is
@@ -936,7 +1025,8 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
Diag(ConsumeToken(), diag::warn_static_inline_explicit_inst_ignored)
<< 1;
SourceLocation DeclEnd;
- return ParseDeclaration(DeclaratorContext::File, DeclEnd, attrs);
+ return ParseDeclaration(DeclaratorContext::File, DeclEnd, Attrs,
+ DeclSpecAttrs);
}
}
goto dont_know;
@@ -951,7 +1041,7 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
diag::ext_extern_template) << SourceRange(ExternLoc, TemplateLoc);
SourceLocation DeclEnd;
return Actions.ConvertDeclToDeclGroup(ParseExplicitInstantiation(
- DeclaratorContext::File, ExternLoc, TemplateLoc, DeclEnd, attrs));
+ DeclaratorContext::File, ExternLoc, TemplateLoc, DeclEnd, Attrs));
}
goto dont_know;
@@ -971,8 +1061,13 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
ConsumeToken();
return nullptr;
}
+ if (getLangOpts().IncrementalExtensions &&
+ !isDeclarationStatement(/*DisambiguatingWithExpression=*/true))
+ return ParseTopLevelStmtDecl();
+
// We can't tell whether this is a function-definition or declaration yet.
- return ParseDeclarationOrFunctionDefinition(attrs, DS);
+ if (!SingleDecl)
+ return ParseDeclarationOrFunctionDefinition(Attrs, DeclSpecAttrs, DS);
}
// This routine returns a DeclGroup, if the thing we parsed only contains a
@@ -1009,7 +1104,7 @@ bool Parser::isStartOfFunctionDefinition(const ParsingDeclarator &Declarator) {
// Handle K&R C argument lists: int X(f) int f; {}
if (!getLangOpts().CPlusPlus &&
Declarator.getFunctionTypeInfo().isKNRPrototype())
- return isDeclarationSpecifier();
+ return isDeclarationSpecifier(ImplicitTypenameContext::No);
if (getLangOpts().CPlusPlus && Tok.is(tok::equal)) {
const Token &KW = NextToken();
@@ -1036,10 +1131,18 @@ bool Parser::isStartOfFunctionDefinition(const ParsingDeclarator &Declarator) {
/// [OMP] threadprivate-directive
/// [OMP] allocate-directive [TODO]
///
-Parser::DeclGroupPtrTy
-Parser::ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
- ParsingDeclSpec &DS,
- AccessSpecifier AS) {
+Parser::DeclGroupPtrTy Parser::ParseDeclOrFunctionDefInternal(
+ ParsedAttributes &Attrs, ParsedAttributes &DeclSpecAttrs,
+ ParsingDeclSpec &DS, AccessSpecifier AS) {
+ // Because we assume that the DeclSpec has not yet been initialised, we simply
+ // overwrite the source range and attribute the provided leading declspec
+ // attributes.
+ assert(DS.getSourceRange().isInvalid() &&
+ "expected uninitialised source range");
+ DS.SetRangeStart(DeclSpecAttrs.Range.getBegin());
+ DS.SetRangeEnd(DeclSpecAttrs.Range.getEnd());
+ DS.takeAttributesFrom(DeclSpecAttrs);
+
MaybeParseMicrosoftAttributes(DS.getAttributes());
// Parse the common declaration-specifiers piece.
ParseDeclarationSpecifiers(DS, ParsedTemplateInfo(), AS,
@@ -1078,12 +1181,13 @@ Parser::ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
? DS.getTypeSpecTypeLoc().getLocWithOffset(
LengthOfTSTToken(DS.getTypeSpecType()))
: SourceLocation();
- ProhibitAttributes(attrs, CorrectLocationForAttributes);
+ ProhibitAttributes(Attrs, CorrectLocationForAttributes);
ConsumeToken();
RecordDecl *AnonRecord = nullptr;
- Decl *TheDecl = Actions.ParsedFreeStandingDeclSpec(getCurScope(), AS_none,
- DS, AnonRecord);
+ Decl *TheDecl = Actions.ParsedFreeStandingDeclSpec(
+ getCurScope(), AS_none, DS, ParsedAttributesView::none(), AnonRecord);
DS.complete(TheDecl);
+ Actions.ActOnDefinedDeclarationSpecifier(TheDecl);
if (AnonRecord) {
Decl* decls[] = {AnonRecord, TheDecl};
return Actions.BuildDeclaratorGroup(decls);
@@ -1091,7 +1195,8 @@ Parser::ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
return Actions.ConvertDeclToDeclGroup(TheDecl);
}
- DS.takeAttributesFrom(attrs);
+ if (DS.hasTagDefinition())
+ Actions.ActOnDefinedDeclarationSpecifier(DS.getRepAsDecl());
// ObjC2 allows prefix attributes on class interfaces and protocols.
// FIXME: This still needs better diagnostics. We should only accept
@@ -1107,6 +1212,7 @@ Parser::ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
}
DS.abort();
+ DS.takeAttributesFrom(Attrs);
const char *PrevSpec = nullptr;
unsigned DiagID;
@@ -1130,19 +1236,26 @@ Parser::ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
if (getLangOpts().CPlusPlus && isTokenStringLiteral() &&
DS.getStorageClassSpec() == DeclSpec::SCS_extern &&
DS.getParsedSpecifiers() == DeclSpec::PQ_StorageClassSpecifier) {
+ ProhibitAttributes(Attrs);
Decl *TheDecl = ParseLinkage(DS, DeclaratorContext::File);
return Actions.ConvertDeclToDeclGroup(TheDecl);
}
- return ParseDeclGroup(DS, DeclaratorContext::File);
+ return ParseDeclGroup(DS, DeclaratorContext::File, Attrs);
}
-Parser::DeclGroupPtrTy
-Parser::ParseDeclarationOrFunctionDefinition(ParsedAttributesWithRange &attrs,
- ParsingDeclSpec *DS,
- AccessSpecifier AS) {
+Parser::DeclGroupPtrTy Parser::ParseDeclarationOrFunctionDefinition(
+ ParsedAttributes &Attrs, ParsedAttributes &DeclSpecAttrs,
+ ParsingDeclSpec *DS, AccessSpecifier AS) {
+ // Add an enclosing time trace scope for a bunch of small scopes with
+ // "EvaluateAsConstExpr".
+ llvm::TimeTraceScope TimeScope("ParseDeclarationOrFunctionDefinition", [&]() {
+ return Tok.getLocation().printToString(
+ Actions.getASTContext().getSourceManager());
+ });
+
if (DS) {
- return ParseDeclOrFunctionDefInternal(attrs, *DS, AS);
+ return ParseDeclOrFunctionDefInternal(Attrs, DeclSpecAttrs, *DS, AS);
} else {
ParsingDeclSpec PDS(*this);
// Must temporarily exit the objective-c container scope for
@@ -1150,7 +1263,7 @@ Parser::ParseDeclarationOrFunctionDefinition(ParsedAttributesWithRange &attrs,
// afterwards.
ObjCDeclContextSwitch ObjCDC(*this);
- return ParseDeclOrFunctionDefInternal(attrs, PDS, AS);
+ return ParseDeclOrFunctionDefInternal(Attrs, DeclSpecAttrs, PDS, AS);
}
}
@@ -1171,15 +1284,21 @@ Parser::ParseDeclarationOrFunctionDefinition(ParsedAttributesWithRange &attrs,
Decl *Parser::ParseFunctionDefinition(ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo,
LateParsedAttrList *LateParsedAttrs) {
+ llvm::TimeTraceScope TimeScope("ParseFunctionDefinition", [&]() {
+ return Actions.GetNameForDeclarator(D).getName().getAsString();
+ });
+
// Poison SEH identifiers so they are flagged as illegal in function bodies.
PoisonSEHIdentifiersRAIIObject PoisonSEHIdentifiers(*this, true);
const DeclaratorChunk::FunctionTypeInfo &FTI = D.getFunctionTypeInfo();
TemplateParameterDepthRAII CurTemplateDepthTracker(TemplateParameterDepth);
- // If this is C90 and the declspecs were completely missing, fudge in an
+ // If this is C89 and the declspecs were completely missing, fudge in an
// implicit int. We do this here because this is the only place where
// declaration-specifiers are completely optional in the grammar.
- if (getLangOpts().ImplicitInt && D.getDeclSpec().isEmpty()) {
+ if (getLangOpts().isImplicitIntRequired() && D.getDeclSpec().isEmpty()) {
+ Diag(D.getIdentifierLoc(), diag::warn_missing_type_specifier)
+ << D.getDeclSpec().getSourceRange();
const char *PrevSpec;
unsigned DiagID;
const PrintingPolicy &Policy = Actions.getASTContext().getPrintingPolicy();
@@ -1280,72 +1399,92 @@ Decl *Parser::ParseFunctionDefinition(ParsingDeclarator &D,
ParseScope BodyScope(this, Scope::FnScope | Scope::DeclScope |
Scope::CompoundStmtScope);
- // Tell the actions module that we have entered a function definition with the
- // specified Declarator for the function.
- Sema::SkipBodyInfo SkipBody;
- Decl *Res = Actions.ActOnStartOfFunctionDef(getCurScope(), D,
- TemplateInfo.TemplateParams
- ? *TemplateInfo.TemplateParams
- : MultiTemplateParamsArg(),
- &SkipBody);
-
- if (SkipBody.ShouldSkip) {
- SkipFunctionBody();
- return Res;
- }
-
- // Break out of the ParsingDeclarator context before we parse the body.
- D.complete(Res);
-
- // Break out of the ParsingDeclSpec context, too. This const_cast is
- // safe because we're always the sole owner.
- D.getMutableDeclSpec().abort();
-
- // With abbreviated function templates - we need to explicitly add depth to
- // account for the implicit template parameter list induced by the template.
- if (auto *Template = dyn_cast_or_null<FunctionTemplateDecl>(Res))
- if (Template->isAbbreviated() &&
- Template->getTemplateParameters()->getParam(0)->isImplicit())
- // First template parameter is implicit - meaning no explicit template
- // parameter list was specified.
- CurTemplateDepthTracker.addDepth(1);
-
+ // Parse function body eagerly if it is either '= delete;' or '= default;' as
+ // ActOnStartOfFunctionDef needs to know whether the function is deleted.
+ Sema::FnBodyKind BodyKind = Sema::FnBodyKind::Other;
+ SourceLocation KWLoc;
if (TryConsumeToken(tok::equal)) {
assert(getLangOpts().CPlusPlus && "Only C++ function definitions have '='");
- bool Delete = false;
- SourceLocation KWLoc;
if (TryConsumeToken(tok::kw_delete, KWLoc)) {
Diag(KWLoc, getLangOpts().CPlusPlus11
? diag::warn_cxx98_compat_defaulted_deleted_function
: diag::ext_defaulted_deleted_function)
- << 1 /* deleted */;
- Actions.SetDeclDeleted(Res, KWLoc);
- Delete = true;
+ << 1 /* deleted */;
+ BodyKind = Sema::FnBodyKind::Delete;
} else if (TryConsumeToken(tok::kw_default, KWLoc)) {
Diag(KWLoc, getLangOpts().CPlusPlus11
? diag::warn_cxx98_compat_defaulted_deleted_function
: diag::ext_defaulted_deleted_function)
- << 0 /* defaulted */;
- Actions.SetDeclDefaulted(Res, KWLoc);
+ << 0 /* defaulted */;
+ BodyKind = Sema::FnBodyKind::Default;
} else {
llvm_unreachable("function definition after = not 'delete' or 'default'");
}
if (Tok.is(tok::comma)) {
Diag(KWLoc, diag::err_default_delete_in_multiple_declaration)
- << Delete;
+ << (BodyKind == Sema::FnBodyKind::Delete);
SkipUntil(tok::semi);
} else if (ExpectAndConsume(tok::semi, diag::err_expected_after,
- Delete ? "delete" : "default")) {
+ BodyKind == Sema::FnBodyKind::Delete
+ ? "delete"
+ : "default")) {
SkipUntil(tok::semi);
}
+ }
+ // Tell the actions module that we have entered a function definition with the
+ // specified Declarator for the function.
+ Sema::SkipBodyInfo SkipBody;
+ Decl *Res = Actions.ActOnStartOfFunctionDef(getCurScope(), D,
+ TemplateInfo.TemplateParams
+ ? *TemplateInfo.TemplateParams
+ : MultiTemplateParamsArg(),
+ &SkipBody, BodyKind);
+
+ if (SkipBody.ShouldSkip) {
+ // Do NOT enter SkipFunctionBody if we already consumed the tokens.
+ if (BodyKind == Sema::FnBodyKind::Other)
+ SkipFunctionBody();
+
+ // ExpressionEvaluationContext is pushed in ActOnStartOfFunctionDef
+ // and it would be popped in ActOnFinishFunctionBody.
+ // We pop it explcitly here since ActOnFinishFunctionBody won't get called.
+ //
+ // Do not call PopExpressionEvaluationContext() if it is a lambda because
+ // one is already popped when finishing the lambda in BuildLambdaExpr().
+ //
+ // FIXME: It looks not easy to balance PushExpressionEvaluationContext()
+ // and PopExpressionEvaluationContext().
+ if (!isLambdaCallOperator(dyn_cast_if_present<FunctionDecl>(Res)))
+ Actions.PopExpressionEvaluationContext();
+ return Res;
+ }
+
+ // Break out of the ParsingDeclarator context before we parse the body.
+ D.complete(Res);
+
+ // Break out of the ParsingDeclSpec context, too. This const_cast is
+ // safe because we're always the sole owner.
+ D.getMutableDeclSpec().abort();
+
+ if (BodyKind != Sema::FnBodyKind::Other) {
+ Actions.SetFunctionBodyKind(Res, KWLoc, BodyKind);
Stmt *GeneratedBody = Res ? Res->getBody() : nullptr;
Actions.ActOnFinishFunctionBody(Res, GeneratedBody, false);
return Res;
}
+ // With abbreviated function templates - we need to explicitly add depth to
+ // account for the implicit template parameter list induced by the template.
+ if (const auto *Template = dyn_cast_if_present<FunctionTemplateDecl>(Res);
+ Template && Template->isAbbreviated() &&
+ Template->getTemplateParameters()->getParam(0)->isImplicit())
+ // First template parameter is implicit - meaning no explicit template
+ // parameter list was specified.
+ CurTemplateDepthTracker.addDepth(1);
+
if (SkipFunctionBodies && (!Res || Actions.canSkipFunctionBody(Res)) &&
trySkippingFunctionBody()) {
BodyScope.Exit();
@@ -1411,7 +1550,7 @@ void Parser::ParseKNRParamDeclarations(Declarator &D) {
Scope::FunctionDeclarationScope | Scope::DeclScope);
// Read all the argument declarations.
- while (isDeclarationSpecifier()) {
+ while (isDeclarationSpecifier(ImplicitTypenameContext::No)) {
SourceLocation DSStart = Tok.getLocation();
// Parse the common declaration-specifiers piece.
@@ -1443,11 +1582,12 @@ void Parser::ParseKNRParamDeclarations(Declarator &D) {
}
// Parse the first declarator attached to this declspec.
- Declarator ParmDeclarator(DS, DeclaratorContext::KNRTypeList);
+ Declarator ParmDeclarator(DS, ParsedAttributesView::none(),
+ DeclaratorContext::KNRTypeList);
ParseDeclarator(ParmDeclarator);
// Handle the full declarator list.
- while (1) {
+ while (true) {
// If attributes are present, parse them.
MaybeParseGNUAttributes(ParmDeclarator);
@@ -1532,7 +1672,7 @@ ExprResult Parser::ParseAsmStringLiteral(bool ForAsmLabel) {
ExprResult AsmString(ParseStringLiteralExpression());
if (!AsmString.isInvalid()) {
const auto *SL = cast<StringLiteral>(AsmString.get());
- if (!SL->isAscii()) {
+ if (!SL->isOrdinary()) {
Diag(Tok, diag::err_asm_operand_wide_string_literal)
<< SL->isWide()
<< SL->getSourceRange();
@@ -1623,8 +1763,12 @@ void Parser::AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation) {
///
/// \param CCC Indicates how to perform typo-correction for this name. If NULL,
/// no typo correction will be performed.
+/// \param AllowImplicitTypename Whether we are in a context where a dependent
+/// nested-name-specifier without typename is treated as a type (e.g.
+/// T::type).
Parser::AnnotatedNameKind
-Parser::TryAnnotateName(CorrectionCandidateCallback *CCC) {
+Parser::TryAnnotateName(CorrectionCandidateCallback *CCC,
+ ImplicitTypenameContext AllowImplicitTypename) {
assert(Tok.is(tok::identifier) || Tok.is(tok::annot_cxxscope));
const bool EnteringContext = false;
@@ -1633,12 +1777,13 @@ Parser::TryAnnotateName(CorrectionCandidateCallback *CCC) {
CXXScopeSpec SS;
if (getLangOpts().CPlusPlus &&
ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
- /*ObjectHadErrors=*/false,
+ /*ObjectHasErrors=*/false,
EnteringContext))
return ANK_Error;
if (Tok.isNot(tok::identifier) || SS.isInvalid()) {
- if (TryAnnotateTypeOrScopeTokenAfterScopeSpec(SS, !WasScopeAnnotation))
+ if (TryAnnotateTypeOrScopeTokenAfterScopeSpec(SS, !WasScopeAnnotation,
+ AllowImplicitTypename))
return ANK_Error;
return ANK_Unresolved;
}
@@ -1648,10 +1793,11 @@ Parser::TryAnnotateName(CorrectionCandidateCallback *CCC) {
// FIXME: Move the tentative declaration logic into ClassifyName so we can
// typo-correct to tentatively-declared identifiers.
- if (isTentativelyDeclared(Name)) {
+ if (isTentativelyDeclared(Name) && SS.isEmpty()) {
// Identifier has been tentatively declared, and thus cannot be resolved as
// an expression. Fall back to annotating it as a type.
- if (TryAnnotateTypeOrScopeTokenAfterScopeSpec(SS, !WasScopeAnnotation))
+ if (TryAnnotateTypeOrScopeTokenAfterScopeSpec(SS, !WasScopeAnnotation,
+ AllowImplicitTypename))
return ANK_Error;
return Tok.is(tok::annot_typename) ? ANK_Success : ANK_TentativeDecl;
}
@@ -1778,32 +1924,26 @@ Parser::TryAnnotateName(CorrectionCandidateCallback *CCC) {
AnnotateScopeToken(SS, !WasScopeAnnotation);
return ANK_TemplateName;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
+ case Sema::NC_Concept:
case Sema::NC_VarTemplate:
case Sema::NC_FunctionTemplate:
case Sema::NC_UndeclaredTemplate: {
- // We have a type, variable or function template followed by '<'.
- ConsumeToken();
- UnqualifiedId Id;
- Id.setIdentifier(Name, NameLoc);
- if (AnnotateTemplateIdToken(
- TemplateTy::make(Classification.getTemplateName()),
- Classification.getTemplateNameKind(), SS, SourceLocation(), Id))
- return ANK_Error;
- return ANK_Success;
- }
- case Sema::NC_Concept: {
- UnqualifiedId Id;
- Id.setIdentifier(Name, NameLoc);
+ bool IsConceptName = Classification.getKind() == Sema::NC_Concept;
+ // We have a template name followed by '<'. Consume the identifier token so
+ // we reach the '<' and annotate it.
if (Next.is(tok::less))
- // We have a concept name followed by '<'. Consume the identifier token so
- // we reach the '<' and annotate it.
ConsumeToken();
+ UnqualifiedId Id;
+ Id.setIdentifier(Name, NameLoc);
if (AnnotateTemplateIdToken(
TemplateTy::make(Classification.getTemplateName()),
Classification.getTemplateNameKind(), SS, SourceLocation(), Id,
- /*AllowTypeAnnotation=*/false, /*TypeConstraint=*/true))
+ /*AllowTypeAnnotation=*/!IsConceptName,
+ /*TypeConstraint=*/IsConceptName))
return ANK_Error;
+ if (SS.isNotEmpty())
+ AnnotateScopeToken(SS, !WasScopeAnnotation);
return ANK_Success;
}
}
@@ -1847,11 +1987,12 @@ bool Parser::TryKeywordIdentFallback(bool DisableKeyword) {
///
/// Note that this routine emits an error if you call it with ::new or ::delete
/// as the current tokens, so only call it in contexts where these are invalid.
-bool Parser::TryAnnotateTypeOrScopeToken() {
+bool Parser::TryAnnotateTypeOrScopeToken(
+ ImplicitTypenameContext AllowImplicitTypename) {
assert((Tok.is(tok::identifier) || Tok.is(tok::coloncolon) ||
Tok.is(tok::kw_typename) || Tok.is(tok::annot_cxxscope) ||
Tok.is(tok::kw_decltype) || Tok.is(tok::annot_template_id) ||
- Tok.is(tok::kw___super)) &&
+ Tok.is(tok::kw___super) || Tok.is(tok::kw_auto)) &&
"Cannot be a type or scope token!");
if (Tok.is(tok::kw_typename)) {
@@ -1864,7 +2005,7 @@ bool Parser::TryAnnotateTypeOrScopeToken() {
if (getLangOpts().MSVCCompat && NextToken().is(tok::kw_typedef)) {
Token TypedefToken;
PP.Lex(TypedefToken);
- bool Result = TryAnnotateTypeOrScopeToken();
+ bool Result = TryAnnotateTypeOrScopeToken(AllowImplicitTypename);
PP.EnterToken(Tok, /*IsReinject=*/true);
Tok = TypedefToken;
if (!Result)
@@ -1881,7 +2022,7 @@ bool Parser::TryAnnotateTypeOrScopeToken() {
SourceLocation TypenameLoc = ConsumeToken();
CXXScopeSpec SS;
if (ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
- /*ObjectHadErrors=*/false,
+ /*ObjectHasErrors=*/false,
/*EnteringContext=*/false, nullptr,
/*IsTypename*/ true))
return true;
@@ -1890,7 +2031,8 @@ bool Parser::TryAnnotateTypeOrScopeToken() {
Tok.is(tok::annot_decltype)) {
// Attempt to recover by skipping the invalid 'typename'
if (Tok.is(tok::annot_decltype) ||
- (!TryAnnotateTypeOrScopeToken() && Tok.isAnnotation())) {
+ (!TryAnnotateTypeOrScopeToken(AllowImplicitTypename) &&
+ Tok.isAnnotation())) {
unsigned DiagID = diag::err_expected_qualified_after_typename;
// MS compatibility: MSVC permits using known types with typename.
// e.g. "typedef typename T* pointer_type"
@@ -1952,26 +2094,28 @@ bool Parser::TryAnnotateTypeOrScopeToken() {
CXXScopeSpec SS;
if (getLangOpts().CPlusPlus)
if (ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
- /*ObjectHadErrors=*/false,
+ /*ObjectHasErrors=*/false,
/*EnteringContext*/ false))
return true;
- return TryAnnotateTypeOrScopeTokenAfterScopeSpec(SS, !WasScopeAnnotation);
+ return TryAnnotateTypeOrScopeTokenAfterScopeSpec(SS, !WasScopeAnnotation,
+ AllowImplicitTypename);
}
/// Try to annotate a type or scope token, having already parsed an
/// optional scope specifier. \p IsNewScope should be \c true unless the scope
/// specifier was extracted from an existing tok::annot_cxxscope annotation.
-bool Parser::TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS,
- bool IsNewScope) {
+bool Parser::TryAnnotateTypeOrScopeTokenAfterScopeSpec(
+ CXXScopeSpec &SS, bool IsNewScope,
+ ImplicitTypenameContext AllowImplicitTypename) {
if (Tok.is(tok::identifier)) {
// Determine whether the identifier is a type name.
if (ParsedType Ty = Actions.getTypeName(
*Tok.getIdentifierInfo(), Tok.getLocation(), getCurScope(), &SS,
false, NextToken().is(tok::period), nullptr,
/*IsCtorOrDtorName=*/false,
- /*NonTrivialTypeSourceInfo*/true,
- /*IsClassTemplateDeductionContext*/true)) {
+ /*NonTrivialTypeSourceInfo=*/true,
+ /*IsClassTemplateDeductionContext=*/true, AllowImplicitTypename)) {
SourceLocation BeginLoc = Tok.getLocation();
if (SS.isNotEmpty()) // it was a C++ qualified type name.
BeginLoc = SS.getBeginLoc();
@@ -2008,9 +2152,9 @@ bool Parser::TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS,
}
if (!getLangOpts().CPlusPlus) {
- // If we're in C, we can't have :: tokens at all (the lexer won't return
- // them). If the identifier is not a type, then it can't be scope either,
- // just early exit.
+ // If we're in C, the only place we can have :: tokens is C23
+ // attribute which is parsed elsewhere. If the identifier is not a type,
+ // then it can't be scope either, just early exit.
return false;
}
@@ -2057,7 +2201,7 @@ bool Parser::TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS,
// template-id annotation in a context where we weren't allowed
// to produce a type annotation token. Update the template-id
// annotation token to a type annotation token now.
- AnnotateTemplateIdTokenAsType(SS);
+ AnnotateTemplateIdTokenAsType(SS, AllowImplicitTypename);
return false;
}
}
@@ -2083,7 +2227,7 @@ bool Parser::TryAnnotateCXXScopeToken(bool EnteringContext) {
CXXScopeSpec SS;
if (ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/nullptr,
- /*ObjectHadErrors=*/false,
+ /*ObjectHasErrors=*/false,
EnteringContext))
return true;
if (SS.isEmpty())
@@ -2115,7 +2259,7 @@ bool Parser::isTokenEqualOrEqualTypo() {
Diag(Tok, diag::err_invalid_token_after_declarator_suggest_equal)
<< Kind
<< FixItHint::CreateReplacement(SourceRange(Tok.getLocation()), "=");
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case tok::equal:
return true;
}
@@ -2126,14 +2270,14 @@ SourceLocation Parser::handleUnexpectedCodeCompletionToken() {
PrevTokLocation = Tok.getLocation();
for (Scope *S = getCurScope(); S; S = S->getParent()) {
- if (S->getFlags() & Scope::FnScope) {
+ if (S->isFunctionScope()) {
cutOffParsing();
Actions.CodeCompleteOrdinaryName(getCurScope(),
Sema::PCC_RecoveryInFunction);
return PrevTokLocation;
}
- if (S->getFlags() & Scope::ClassScope) {
+ if (S->isClassScope()) {
cutOffParsing();
Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_Class);
return PrevTokLocation;
@@ -2194,7 +2338,7 @@ bool Parser::ParseMicrosoftIfExistsCondition(IfExistsCondition& Result) {
// Parse nested-name-specifier.
if (getLangOpts().CPlusPlus)
ParseOptionalCXXScopeSpecifier(Result.SS, /*ObjectType=*/nullptr,
- /*ObjectHadErrors=*/false,
+ /*ObjectHasErrors=*/false,
/*EnteringContext=*/false);
// Check nested-name specifier.
@@ -2268,9 +2412,10 @@ void Parser::ParseMicrosoftIfExistsExternalDeclaration() {
// Parse the declarations.
// FIXME: Support module import within __if_exists?
while (Tok.isNot(tok::r_brace) && !isEofOrEom()) {
- ParsedAttributesWithRange attrs(AttrFactory);
- MaybeParseCXX11Attributes(attrs);
- DeclGroupPtrTy Result = ParseExternalDeclaration(attrs);
+ ParsedAttributes Attrs(AttrFactory);
+ MaybeParseCXX11Attributes(Attrs);
+ ParsedAttributes EmptyDeclSpecAttrs(AttrFactory);
+ DeclGroupPtrTy Result = ParseExternalDeclaration(Attrs, EmptyDeclSpecAttrs);
if (Result && !getCurScope()->getParent())
Actions.getASTConsumer().HandleTopLevelDecl(Result.get());
}
@@ -2280,7 +2425,7 @@ void Parser::ParseMicrosoftIfExistsExternalDeclaration() {
/// Parse a declaration beginning with the 'module' keyword or C++20
/// context-sensitive keyword (optionally preceded by 'export').
///
-/// module-declaration: [Modules TS + P0629R0]
+/// module-declaration: [C++20]
/// 'export'[opt] 'module' module-name attribute-specifier-seq[opt] ';'
///
/// global-module-fragment: [C++2a]
@@ -2290,7 +2435,8 @@ void Parser::ParseMicrosoftIfExistsExternalDeclaration() {
/// attribute-specifier-seq[opt] ';'
/// private-module-fragment: [C++2a]
/// 'module' ':' 'private' ';' top-level-declaration-seq[opt]
-Parser::DeclGroupPtrTy Parser::ParseModuleDecl(bool IsFirstDecl) {
+Parser::DeclGroupPtrTy
+Parser::ParseModuleDecl(Sema::ModuleImportState &ImportState) {
SourceLocation StartLoc = Tok.getLocation();
Sema::ModuleDeclKind MDK = TryConsumeToken(tok::kw_export)
@@ -2310,7 +2456,7 @@ Parser::DeclGroupPtrTy Parser::ParseModuleDecl(bool IsFirstDecl) {
// Parse a global-module-fragment, if present.
if (getLangOpts().CPlusPlusModules && Tok.is(tok::semi)) {
SourceLocation SemiLoc = ConsumeToken();
- if (!IsFirstDecl) {
+ if (ImportState != Sema::ModuleImportState::FirstDecl) {
Diag(StartLoc, diag::err_global_module_introducer_not_at_start)
<< SourceRange(StartLoc, SemiLoc);
return nullptr;
@@ -2319,6 +2465,7 @@ Parser::DeclGroupPtrTy Parser::ParseModuleDecl(bool IsFirstDecl) {
Diag(StartLoc, diag::err_module_fragment_exported)
<< /*global*/0 << FixItHint::CreateRemoval(StartLoc);
}
+ ImportState = Sema::ModuleImportState::GlobalFragment;
return Actions.ActOnGlobalModuleFragmentDecl(ModuleLoc);
}
@@ -2333,52 +2480,59 @@ Parser::DeclGroupPtrTy Parser::ParseModuleDecl(bool IsFirstDecl) {
SourceLocation PrivateLoc = ConsumeToken();
DiagnoseAndSkipCXX11Attributes();
ExpectAndConsumeSemi(diag::err_private_module_fragment_expected_semi);
+ ImportState = ImportState == Sema::ModuleImportState::ImportAllowed
+ ? Sema::ModuleImportState::PrivateFragmentImportAllowed
+ : Sema::ModuleImportState::PrivateFragmentImportFinished;
return Actions.ActOnPrivateModuleFragmentDecl(ModuleLoc, PrivateLoc);
}
SmallVector<std::pair<IdentifierInfo *, SourceLocation>, 2> Path;
- if (ParseModuleName(ModuleLoc, Path, /*IsImport*/false))
+ if (ParseModuleName(ModuleLoc, Path, /*IsImport*/ false))
return nullptr;
// Parse the optional module-partition.
+ SmallVector<std::pair<IdentifierInfo *, SourceLocation>, 2> Partition;
if (Tok.is(tok::colon)) {
SourceLocation ColonLoc = ConsumeToken();
- SmallVector<std::pair<IdentifierInfo *, SourceLocation>, 2> Partition;
- if (ParseModuleName(ModuleLoc, Partition, /*IsImport*/false))
+ if (!getLangOpts().CPlusPlusModules)
+ Diag(ColonLoc, diag::err_unsupported_module_partition)
+ << SourceRange(ColonLoc, Partition.back().second);
+ // Recover by ignoring the partition name.
+ else if (ParseModuleName(ModuleLoc, Partition, /*IsImport*/ false))
return nullptr;
-
- // FIXME: Support module partition declarations.
- Diag(ColonLoc, diag::err_unsupported_module_partition)
- << SourceRange(ColonLoc, Partition.back().second);
- // Recover by parsing as a non-partition.
}
// We don't support any module attributes yet; just parse them and diagnose.
- ParsedAttributesWithRange Attrs(AttrFactory);
+ ParsedAttributes Attrs(AttrFactory);
MaybeParseCXX11Attributes(Attrs);
- ProhibitCXX11Attributes(Attrs, diag::err_attribute_not_module_attr);
+ ProhibitCXX11Attributes(Attrs, diag::err_attribute_not_module_attr,
+ diag::err_keyword_not_module_attr,
+ /*DiagnoseEmptyAttrs=*/false,
+ /*WarnOnUnknownAttrs=*/true);
ExpectAndConsumeSemi(diag::err_module_expected_semi);
- return Actions.ActOnModuleDecl(StartLoc, ModuleLoc, MDK, Path, IsFirstDecl);
+ return Actions.ActOnModuleDecl(StartLoc, ModuleLoc, MDK, Path, Partition,
+ ImportState);
}
/// Parse a module import declaration. This is essentially the same for
-/// Objective-C and the C++ Modules TS, except for the leading '@' (in ObjC)
-/// and the trailing optional attributes (in C++).
+/// Objective-C and C++20 except for the leading '@' (in ObjC) and the
+/// trailing optional attributes (in C++).
///
/// [ObjC] @import declaration:
/// '@' 'import' module-name ';'
/// [ModTS] module-import-declaration:
/// 'import' module-name attribute-specifier-seq[opt] ';'
-/// [C++2a] module-import-declaration:
+/// [C++20] module-import-declaration:
/// 'export'[opt] 'import' module-name
/// attribute-specifier-seq[opt] ';'
/// 'export'[opt] 'import' module-partition
/// attribute-specifier-seq[opt] ';'
/// 'export'[opt] 'import' header-name
/// attribute-specifier-seq[opt] ';'
-Decl *Parser::ParseModuleImport(SourceLocation AtLoc) {
+Decl *Parser::ParseModuleImport(SourceLocation AtLoc,
+ Sema::ModuleImportState &ImportState) {
SourceLocation StartLoc = AtLoc.isInvalid() ? Tok.getLocation() : AtLoc;
SourceLocation ExportLoc;
@@ -2390,9 +2544,10 @@ Decl *Parser::ParseModuleImport(SourceLocation AtLoc) {
bool IsObjCAtImport = Tok.isObjCAtKeyword(tok::objc_import);
SourceLocation ImportLoc = ConsumeToken();
+ // For C++20 modules, we can have "name" or ":Partition name" as valid input.
SmallVector<std::pair<IdentifierInfo *, SourceLocation>, 2> Path;
+ bool IsPartition = false;
Module *HeaderUnit = nullptr;
-
if (Tok.is(tok::header_name)) {
// This is a header import that the preprocessor decided we should skip
// because it was malformed in some way. Parse and ignore it; it's already
@@ -2402,24 +2557,28 @@ Decl *Parser::ParseModuleImport(SourceLocation AtLoc) {
// This is a header import that the preprocessor mapped to a module import.
HeaderUnit = reinterpret_cast<Module *>(Tok.getAnnotationValue());
ConsumeAnnotationToken();
- } else if (getLangOpts().CPlusPlusModules && Tok.is(tok::colon)) {
+ } else if (Tok.is(tok::colon)) {
SourceLocation ColonLoc = ConsumeToken();
- if (ParseModuleName(ImportLoc, Path, /*IsImport*/true))
+ if (!getLangOpts().CPlusPlusModules)
+ Diag(ColonLoc, diag::err_unsupported_module_partition)
+ << SourceRange(ColonLoc, Path.back().second);
+ // Recover by leaving partition empty.
+ else if (ParseModuleName(ColonLoc, Path, /*IsImport*/ true))
return nullptr;
-
- // FIXME: Support module partition import.
- Diag(ColonLoc, diag::err_unsupported_module_partition)
- << SourceRange(ColonLoc, Path.back().second);
- return nullptr;
+ else
+ IsPartition = true;
} else {
- if (ParseModuleName(ImportLoc, Path, /*IsImport*/true))
+ if (ParseModuleName(ImportLoc, Path, /*IsImport*/ true))
return nullptr;
}
- ParsedAttributesWithRange Attrs(AttrFactory);
+ ParsedAttributes Attrs(AttrFactory);
MaybeParseCXX11Attributes(Attrs);
// We don't support any module import attributes yet.
- ProhibitCXX11Attributes(Attrs, diag::err_attribute_not_import_attr);
+ ProhibitCXX11Attributes(Attrs, diag::err_attribute_not_import_attr,
+ diag::err_keyword_not_import_attr,
+ /*DiagnoseEmptyAttrs=*/false,
+ /*WarnOnUnknownAttrs=*/true);
if (PP.hadModuleLoaderFatalFailure()) {
// With a fatal failure in the module loader, we abort parsing.
@@ -2427,12 +2586,60 @@ Decl *Parser::ParseModuleImport(SourceLocation AtLoc) {
return nullptr;
}
+ // Diagnose mis-imports.
+ bool SeenError = true;
+ switch (ImportState) {
+ case Sema::ModuleImportState::ImportAllowed:
+ SeenError = false;
+ break;
+ case Sema::ModuleImportState::FirstDecl:
+ // If we found an import decl as the first declaration, we must be not in
+ // a C++20 module unit or we are in an invalid state.
+ ImportState = Sema::ModuleImportState::NotACXX20Module;
+ [[fallthrough]];
+ case Sema::ModuleImportState::NotACXX20Module:
+ // We can only import a partition within a module purview.
+ if (IsPartition)
+ Diag(ImportLoc, diag::err_partition_import_outside_module);
+ else
+ SeenError = false;
+ break;
+ case Sema::ModuleImportState::GlobalFragment:
+ case Sema::ModuleImportState::PrivateFragmentImportAllowed:
+ // We can only have pre-processor directives in the global module fragment
+ // which allows pp-import, but not of a partition (since the global module
+ // does not have partitions).
+ // We cannot import a partition into a private module fragment, since
+ // [module.private.frag]/1 disallows private module fragments in a multi-
+ // TU module.
+ if (IsPartition || (HeaderUnit && HeaderUnit->Kind !=
+ Module::ModuleKind::ModuleHeaderUnit))
+ Diag(ImportLoc, diag::err_import_in_wrong_fragment)
+ << IsPartition
+ << (ImportState == Sema::ModuleImportState::GlobalFragment ? 0 : 1);
+ else
+ SeenError = false;
+ break;
+ case Sema::ModuleImportState::ImportFinished:
+ case Sema::ModuleImportState::PrivateFragmentImportFinished:
+ if (getLangOpts().CPlusPlusModules)
+ Diag(ImportLoc, diag::err_import_not_allowed_here);
+ else
+ SeenError = false;
+ break;
+ }
+ if (SeenError) {
+ ExpectAndConsumeSemi(diag::err_module_expected_semi);
+ return nullptr;
+ }
+
DeclResult Import;
if (HeaderUnit)
Import =
Actions.ActOnModuleImport(StartLoc, ExportLoc, ImportLoc, HeaderUnit);
else if (!Path.empty())
- Import = Actions.ActOnModuleImport(StartLoc, ExportLoc, ImportLoc, Path);
+ Import = Actions.ActOnModuleImport(StartLoc, ExportLoc, ImportLoc, Path,
+ IsPartition);
ExpectAndConsumeSemi(diag::err_module_expected_semi);
if (Import.isInvalid())
return nullptr;
@@ -2441,16 +2648,16 @@ Decl *Parser::ParseModuleImport(SourceLocation AtLoc) {
// the header is parseable. Emit a warning to make the user aware.
if (IsObjCAtImport && AtLoc.isValid()) {
auto &SrcMgr = PP.getSourceManager();
- auto *FE = SrcMgr.getFileEntryForID(SrcMgr.getFileID(AtLoc));
- if (FE && llvm::sys::path::parent_path(FE->getDir()->getName())
- .endswith(".framework"))
+ auto FE = SrcMgr.getFileEntryRefForID(SrcMgr.getFileID(AtLoc));
+ if (FE && llvm::sys::path::parent_path(FE->getDir().getName())
+ .ends_with(".framework"))
Diags.Report(AtLoc, diag::warn_atimport_in_framework_header);
}
return Import.get();
}
-/// Parse a C++ Modules TS / Objective-C module name (both forms use the same
+/// Parse a C++ / Objective-C module name (both forms use the same
/// grammar).
///
/// module-name:
diff --git a/contrib/llvm-project/clang/lib/Rewrite/HTMLRewrite.cpp b/contrib/llvm-project/clang/lib/Rewrite/HTMLRewrite.cpp
index 2f5f2734aa46..083a9c09297e 100644
--- a/contrib/llvm-project/clang/lib/Rewrite/HTMLRewrite.cpp
+++ b/contrib/llvm-project/clang/lib/Rewrite/HTMLRewrite.cpp
@@ -203,7 +203,7 @@ std::string html::EscapeText(StringRef s, bool EscapeSpaces, bool ReplaceTabs) {
}
}
- return os.str();
+ return Str;
}
static void AddLineNumber(RewriteBuffer &RB, unsigned LineNo,
@@ -371,6 +371,7 @@ h1 { font-size:14pt }
.msg { border-radius:5px }
.msg { font-family:Helvetica, sans-serif; font-size:8pt }
.msg { float:left }
+.msg { position:relative }
.msg { padding:0.25em 1ex 0.25em 1ex }
.msg { margin-top:10px; margin-bottom:10px }
.msg { font-weight:bold }
@@ -391,7 +392,7 @@ h1 { font-size:14pt }
.CodeInsertionHint { font-weight: bold; background-color: #10dd10 }
.CodeRemovalHint { background-color:#de1010 }
.CodeRemovalHint { border-bottom:1px solid #6F9DBE }
-.selected{ background-color:orange !important; }
+.msg.selected{ background-color:orange !important; }
table.simpletable {
padding: 5px;
@@ -487,14 +488,14 @@ void html::SyntaxHighlight(Rewriter &R, FileID FID, const Preprocessor &PP) {
++TokOffs;
--TokLen;
// FALL THROUGH to chop the 8
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case tok::wide_string_literal:
case tok::utf16_string_literal:
case tok::utf32_string_literal:
// Chop off the L, u, U or 8 prefix
++TokOffs;
--TokLen;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case tok::string_literal:
// FIXME: Exclude the optional ud-suffix from the highlighted range.
HighlightRange(RB, TokOffs, TokOffs+TokLen, BufferStart,
@@ -541,7 +542,7 @@ void html::HighlightMacros(Rewriter &R, FileID FID, const Preprocessor& PP) {
// Lex all the tokens in raw mode, to avoid entering #includes or expanding
// macros.
- while (1) {
+ while (true) {
Token Tok;
L.LexFromRawLexer(Tok);
diff --git a/contrib/llvm-project/clang/lib/Rewrite/Rewriter.cpp b/contrib/llvm-project/clang/lib/Rewrite/Rewriter.cpp
index 040e1c284253..0e6ae3650644 100644
--- a/contrib/llvm-project/clang/lib/Rewrite/Rewriter.cpp
+++ b/contrib/llvm-project/clang/lib/Rewrite/Rewriter.cpp
@@ -14,22 +14,18 @@
#include "clang/Rewrite/Core/Rewriter.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/DiagnosticIDs.h"
-#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Lex/Lexer.h"
#include "clang/Rewrite/Core/RewriteBuffer.h"
#include "clang/Rewrite/Core/RewriteRope.h"
-#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Error.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
#include <iterator>
#include <map>
-#include <memory>
-#include <system_error>
#include <utility>
using namespace clang;
@@ -223,6 +219,7 @@ std::string Rewriter::getRewrittenText(CharSourceRange Range) const {
RewriteBuffer::iterator Start = RB.begin();
std::advance(Start, StartOff);
RewriteBuffer::iterator End = Start;
+ assert(EndOff >= StartOff && "Invalid iteration distance");
std::advance(End, EndOff-StartOff);
return std::string(Start, End);
@@ -259,7 +256,7 @@ bool Rewriter::InsertText(SourceLocation Loc, StringRef Str,
unsigned StartOffs = getLocationOffsetAndFileID(Loc, FID);
SmallString<128> indentedStr;
- if (indentNewLines && Str.find('\n') != StringRef::npos) {
+ if (indentNewLines && Str.contains('\n')) {
StringRef MB = SourceMgr->getBufferData(FID);
unsigned lineNo = SourceMgr->getLineNumber(FID, StartOffs) - 1;
@@ -389,7 +386,7 @@ bool Rewriter::IncreaseIndentation(CharSourceRange range,
}
if (parentSpace.size() >= startSpace.size())
return true;
- if (!startSpace.startswith(parentSpace))
+ if (!startSpace.starts_with(parentSpace))
return true;
StringRef indent = startSpace.substr(parentSpace.size());
@@ -402,75 +399,29 @@ bool Rewriter::IncreaseIndentation(CharSourceRange range,
while (isWhitespaceExceptNL(MB[i]))
++i;
StringRef origIndent = MB.substr(offs, i-offs);
- if (origIndent.startswith(startSpace))
+ if (origIndent.starts_with(startSpace))
RB.InsertText(offs, indent, /*InsertAfter=*/false);
}
return false;
}
-namespace {
-
-// A wrapper for a file stream that atomically overwrites the target.
-//
-// Creates a file output stream for a temporary file in the constructor,
-// which is later accessible via getStream() if ok() return true.
-// Flushes the stream and moves the temporary file to the target location
-// in the destructor.
-class AtomicallyMovedFile {
-public:
- AtomicallyMovedFile(DiagnosticsEngine &Diagnostics, StringRef Filename,
- bool &AllWritten)
- : Diagnostics(Diagnostics), Filename(Filename), AllWritten(AllWritten) {
- TempFilename = Filename;
- TempFilename += "-%%%%%%%%";
- int FD;
- if (llvm::sys::fs::createUniqueFile(TempFilename, FD, TempFilename)) {
- AllWritten = false;
- Diagnostics.Report(clang::diag::err_unable_to_make_temp)
- << TempFilename;
- } else {
- FileStream.reset(new llvm::raw_fd_ostream(FD, /*shouldClose=*/true));
- }
- }
-
- ~AtomicallyMovedFile() {
- if (!ok()) return;
-
- // Close (will also flush) theFileStream.
- FileStream->close();
- if (std::error_code ec = llvm::sys::fs::rename(TempFilename, Filename)) {
- AllWritten = false;
- Diagnostics.Report(clang::diag::err_unable_to_rename_temp)
- << TempFilename << Filename << ec.message();
- // If the remove fails, there's not a lot we can do - this is already an
- // error.
- llvm::sys::fs::remove(TempFilename);
- }
- }
-
- bool ok() { return (bool)FileStream; }
- raw_ostream &getStream() { return *FileStream; }
-
-private:
- DiagnosticsEngine &Diagnostics;
- StringRef Filename;
- SmallString<128> TempFilename;
- std::unique_ptr<llvm::raw_fd_ostream> FileStream;
- bool &AllWritten;
-};
-
-} // namespace
-
bool Rewriter::overwriteChangedFiles() {
bool AllWritten = true;
+ auto& Diag = getSourceMgr().getDiagnostics();
+ unsigned OverwriteFailure = Diag.getCustomDiagID(
+ DiagnosticsEngine::Error, "unable to overwrite file %0: %1");
for (buffer_iterator I = buffer_begin(), E = buffer_end(); I != E; ++I) {
- const FileEntry *Entry =
- getSourceMgr().getFileEntryForID(I->first);
- AtomicallyMovedFile File(getSourceMgr().getDiagnostics(), Entry->getName(),
- AllWritten);
- if (File.ok()) {
- I->second.write(File.getStream());
+ OptionalFileEntryRef Entry = getSourceMgr().getFileEntryRefForID(I->first);
+ llvm::SmallString<128> Path(Entry->getName());
+ getSourceMgr().getFileManager().makeAbsolutePath(Path);
+ if (auto Error = llvm::writeToOutput(Path, [&](llvm::raw_ostream &OS) {
+ I->second.write(OS);
+ return llvm::Error::success();
+ })) {
+ Diag.Report(OverwriteFailure)
+ << Entry->getName() << llvm::toString(std::move(Error));
+ AllWritten = false;
}
}
return !AllWritten;
diff --git a/contrib/llvm-project/clang/lib/Sema/AnalysisBasedWarnings.cpp b/contrib/llvm-project/clang/lib/Sema/AnalysisBasedWarnings.cpp
index aa2602c8d925..9e9294572df9 100644
--- a/contrib/llvm-project/clang/lib/Sema/AnalysisBasedWarnings.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/AnalysisBasedWarnings.cpp
@@ -13,25 +13,32 @@
//===----------------------------------------------------------------------===//
#include "clang/Sema/AnalysisBasedWarnings.h"
+#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/EvaluatedExprVisitor.h"
+#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
+#include "clang/AST/OperationKinds.h"
#include "clang/AST/ParentMap.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/Type.h"
#include "clang/Analysis/Analyses/CFGReachabilityAnalysis.h"
#include "clang/Analysis/Analyses/CalledOnceCheck.h"
#include "clang/Analysis/Analyses/Consumed.h"
#include "clang/Analysis/Analyses/ReachableCode.h"
#include "clang/Analysis/Analyses/ThreadSafety.h"
#include "clang/Analysis/Analyses/UninitializedValues.h"
+#include "clang/Analysis/Analyses/UnsafeBufferUsage.h"
#include "clang/Analysis/AnalysisDeclContext.h"
#include "clang/Analysis/CFG.h"
#include "clang/Analysis/CFGStmtMap.h"
+#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Lex/Preprocessor.h"
@@ -40,6 +47,7 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/STLFunctionalExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
@@ -47,6 +55,7 @@
#include <algorithm>
#include <deque>
#include <iterator>
+#include <optional>
using namespace clang;
@@ -62,11 +71,17 @@ namespace {
public:
UnreachableCodeHandler(Sema &s) : S(s) {}
- void HandleUnreachable(reachable_code::UnreachableKind UK,
- SourceLocation L,
- SourceRange SilenceableCondVal,
- SourceRange R1,
- SourceRange R2) override {
+ void HandleUnreachable(reachable_code::UnreachableKind UK, SourceLocation L,
+ SourceRange SilenceableCondVal, SourceRange R1,
+ SourceRange R2, bool HasFallThroughAttr) override {
+ // If the diagnosed code is `[[fallthrough]];` and
+ // `-Wunreachable-code-fallthrough` is enabled, suppress `code will never
+ // be executed` warning to avoid generating diagnostic twice
+ if (HasFallThroughAttr &&
+ !S.getDiagnostics().isIgnored(diag::warn_unreachable_fallthrough_attr,
+ SourceLocation()))
+ return;
+
// Avoid reporting multiple unreachable code diagnostics that are
// triggered by the same conditional value.
if (PreviousSilenceableCondVal.isValid() &&
@@ -128,7 +143,7 @@ class LogicalErrorHandler : public CFGCallback {
Sema &S;
public:
- LogicalErrorHandler(Sema &S) : CFGCallback(), S(S) {}
+ LogicalErrorHandler(Sema &S) : S(S) {}
static bool HasMacroID(const Expr *E) {
if (E->getExprLoc().isMacroID())
@@ -143,6 +158,17 @@ public:
return false;
}
+ void logicAlwaysTrue(const BinaryOperator *B, bool isAlwaysTrue) override {
+ if (HasMacroID(B))
+ return;
+
+ unsigned DiagID = isAlwaysTrue
+ ? diag::warn_tautological_negation_or_compare
+ : diag::warn_tautological_negation_and_compare;
+ SourceRange DiagRange = B->getSourceRange();
+ S.Diag(B->getExprLoc(), DiagID) << DiagRange;
+ }
+
void compareAlwaysTrue(const BinaryOperator *B, bool isAlwaysTrue) override {
if (HasMacroID(B))
return;
@@ -173,7 +199,8 @@ public:
static bool hasActiveDiagnostics(DiagnosticsEngine &Diags,
SourceLocation Loc) {
return !Diags.isIgnored(diag::warn_tautological_overlap_comparison, Loc) ||
- !Diags.isIgnored(diag::warn_comparison_bitwise_or, Loc);
+ !Diags.isIgnored(diag::warn_comparison_bitwise_or, Loc) ||
+ !Diags.isIgnored(diag::warn_tautological_negation_and_compare, Loc);
}
};
} // anonymous namespace
@@ -326,7 +353,7 @@ static void visitReachableThrows(
if (!Reachable[B->getBlockID()])
continue;
for (CFGElement &E : *B) {
- Optional<CFGStmt> S = E.getAs<CFGStmt>();
+ std::optional<CFGStmt> S = E.getAs<CFGStmt>();
if (!S)
continue;
if (auto *Throw = dyn_cast<CXXThrowExpr>(S->getStmt()))
@@ -464,7 +491,7 @@ static ControlFlowKind CheckFallThrough(AnalysisDeclContext &AC) {
// No more CFGElements in the block?
if (ri == re) {
const Stmt *Term = B.getTerminatorStmt();
- if (Term && isa<CXXTryStmt>(Term)) {
+ if (Term && (isa<CXXTryStmt>(Term) || isa<ObjCAtTryStmt>(Term))) {
HasAbnormalEdge = true;
continue;
}
@@ -497,8 +524,7 @@ static ControlFlowKind CheckFallThrough(AnalysisDeclContext &AC) {
HasAbnormalEdge = true;
continue;
}
- if (std::find(B.succ_begin(), B.succ_end(), &cfg->getExit())
- == B.succ_end()) {
+ if (!llvm::is_contained(B.succs(), &cfg->getExit())) {
HasAbnormalEdge = true;
continue;
}
@@ -571,6 +597,7 @@ struct CheckFallThroughDiagnostics {
D.diag_AlwaysFallThrough_HasNoReturn = 0;
D.diag_AlwaysFallThrough_ReturnsNonVoid =
diag::warn_falloff_nonvoid_coroutine;
+ D.diag_NeverFallThroughOrReturn = 0;
D.funMode = Coroutine;
return D;
}
@@ -1080,11 +1107,9 @@ namespace {
while (!BlockQueue.empty()) {
const CFGBlock *P = BlockQueue.front();
BlockQueue.pop_front();
- for (CFGBlock::const_succ_iterator I = P->succ_begin(),
- E = P->succ_end();
- I != E; ++I) {
- if (*I && ReachableBlocks.insert(*I).second)
- BlockQueue.push_back(*I);
+ for (const CFGBlock *B : P->succs()) {
+ if (B && ReachableBlocks.insert(B).second)
+ BlockQueue.push_back(B);
}
}
}
@@ -1115,22 +1140,20 @@ namespace {
continue; // Case label is preceded with a normal label, good.
if (!ReachableBlocks.count(P)) {
- for (CFGBlock::const_reverse_iterator ElemIt = P->rbegin(),
- ElemEnd = P->rend();
- ElemIt != ElemEnd; ++ElemIt) {
- if (Optional<CFGStmt> CS = ElemIt->getAs<CFGStmt>()) {
- if (const AttributedStmt *AS = asFallThroughAttr(CS->getStmt())) {
- // Don't issue a warning for an unreachable fallthrough
- // attribute in template instantiations as it may not be
- // unreachable in all instantiations of the template.
- if (!IsTemplateInstantiation)
- S.Diag(AS->getBeginLoc(),
- diag::warn_fallthrough_attr_unreachable);
- markFallthroughVisited(AS);
- ++AnnotatedCnt;
- break;
- }
- // Don't care about other unreachable statements.
+ for (const CFGElement &Elem : llvm::reverse(*P)) {
+ if (std::optional<CFGStmt> CS = Elem.getAs<CFGStmt>()) {
+ if (const AttributedStmt *AS = asFallThroughAttr(CS->getStmt())) {
+ // Don't issue a warning for an unreachable fallthrough
+ // attribute in template instantiations as it may not be
+ // unreachable in all instantiations of the template.
+ if (!IsTemplateInstantiation)
+ S.Diag(AS->getBeginLoc(),
+ diag::warn_unreachable_fallthrough_attr);
+ markFallthroughVisited(AS);
+ ++AnnotatedCnt;
+ break;
+ }
+ // Don't care about other unreachable statements.
}
}
// If there are no unreachable statements, this may be a special
@@ -1202,12 +1225,9 @@ namespace {
static const Stmt *getLastStmt(const CFGBlock &B) {
if (const Stmt *Term = B.getTerminatorStmt())
return Term;
- for (CFGBlock::const_reverse_iterator ElemIt = B.rbegin(),
- ElemEnd = B.rend();
- ElemIt != ElemEnd; ++ElemIt) {
- if (Optional<CFGStmt> CS = ElemIt->getAs<CFGStmt>())
+ for (const CFGElement &Elem : llvm::reverse(B))
+ if (std::optional<CFGStmt> CS = Elem.getAs<CFGStmt>())
return CS->getStmt();
- }
// Workaround to detect a statement thrown out by CFGBuilder:
// case X: {} case Y:
// case X: ; case Y:
@@ -1239,7 +1259,7 @@ static StringRef getFallthroughAttrSpelling(Preprocessor &PP,
tok::r_square, tok::r_square
};
- bool PreferClangAttr = !PP.getLangOpts().CPlusPlus17 && !PP.getLangOpts().C2x;
+ bool PreferClangAttr = !PP.getLangOpts().CPlusPlus17 && !PP.getLangOpts().C23;
StringRef MacroName;
if (PreferClangAttr)
@@ -1280,7 +1300,7 @@ static void DiagnoseSwitchLabelsFallthrough(Sema &S, AnalysisDeclContext &AC,
for (const CFGBlock *B : llvm::reverse(*Cfg)) {
const Stmt *Label = B->getLabel();
- if (!Label || !isa<SwitchCase>(Label))
+ if (!isa_and_nonnull<SwitchCase>(Label))
continue;
int AnnotatedCnt;
@@ -1583,8 +1603,7 @@ public:
// Sort the uses by their SourceLocations. While not strictly
// guaranteed to produce them in line/column order, this will provide
// a stable ordering.
- llvm::sort(vec->begin(), vec->end(),
- [](const UninitUse &a, const UninitUse &b) {
+ llvm::sort(*vec, [](const UninitUse &a, const UninitUse &b) {
// Prefer a more confident report over a less confident one.
if (a.getKind() != b.getKind())
return a.getKind() > b.getKind();
@@ -1637,7 +1656,7 @@ public:
private:
static bool hasAlwaysUninitializedUse(const UsesVec* vec) {
- return std::any_of(vec->begin(), vec->end(), [](const UninitUse &U) {
+ return llvm::any_of(*vec, [](const UninitUse &U) {
return U.getKind() == UninitUse::Always ||
U.getKind() == UninitUse::AfterCall ||
U.getKind() == UninitUse::AfterDecl;
@@ -1852,7 +1871,7 @@ class ThreadSafetyReporter : public clang::threadSafety::ThreadSafetyHandler {
}
}
- void handleInvalidLockExp(StringRef Kind, SourceLocation Loc) override {
+ void handleInvalidLockExp(SourceLocation Loc) override {
PartialDiagnosticAt Warning(Loc, S.PDiag(diag::warn_cannot_resolve_lock)
<< Loc);
Warnings.emplace_back(std::move(Warning), getNotes());
@@ -1930,9 +1949,8 @@ class ThreadSafetyReporter : public clang::threadSafety::ThreadSafetyHandler {
Warnings.emplace_back(std::move(Warning), getNotes(Note));
}
- void handleNoMutexHeld(StringRef Kind, const NamedDecl *D,
- ProtectedOperationKind POK, AccessKind AK,
- SourceLocation Loc) override {
+ void handleNoMutexHeld(const NamedDecl *D, ProtectedOperationKind POK,
+ AccessKind AK, SourceLocation Loc) override {
assert((POK == POK_VarAccess || POK == POK_VarDereference) &&
"Only works for variables");
unsigned DiagID = POK == POK_VarAccess?
@@ -1965,6 +1983,12 @@ class ThreadSafetyReporter : public clang::threadSafety::ThreadSafetyHandler {
case POK_PtPassByRef:
DiagID = diag::warn_pt_guarded_pass_by_reference;
break;
+ case POK_ReturnByRef:
+ DiagID = diag::warn_guarded_return_by_reference;
+ break;
+ case POK_PtReturnByRef:
+ DiagID = diag::warn_pt_guarded_return_by_reference;
+ break;
}
PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID) << Kind
<< D
@@ -1995,6 +2019,12 @@ class ThreadSafetyReporter : public clang::threadSafety::ThreadSafetyHandler {
case POK_PtPassByRef:
DiagID = diag::warn_pt_guarded_pass_by_reference;
break;
+ case POK_ReturnByRef:
+ DiagID = diag::warn_guarded_return_by_reference;
+ break;
+ case POK_PtReturnByRef:
+ DiagID = diag::warn_pt_guarded_return_by_reference;
+ break;
}
PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID) << Kind
<< D
@@ -2149,6 +2179,189 @@ public:
} // namespace clang
//===----------------------------------------------------------------------===//
+// Unsafe buffer usage analysis.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class UnsafeBufferUsageReporter : public UnsafeBufferUsageHandler {
+ Sema &S;
+ bool SuggestSuggestions; // Recommend -fsafe-buffer-usage-suggestions?
+
+ // Lists as a string the names of variables in `VarGroupForVD` except for `VD`
+ // itself:
+ std::string listVariableGroupAsString(
+ const VarDecl *VD, const ArrayRef<const VarDecl *> &VarGroupForVD) const {
+ if (VarGroupForVD.size() <= 1)
+ return "";
+
+ std::vector<StringRef> VarNames;
+ auto PutInQuotes = [](StringRef S) -> std::string {
+ return "'" + S.str() + "'";
+ };
+
+ for (auto *V : VarGroupForVD) {
+ if (V == VD)
+ continue;
+ VarNames.push_back(V->getName());
+ }
+ if (VarNames.size() == 1) {
+ return PutInQuotes(VarNames[0]);
+ }
+ if (VarNames.size() == 2) {
+ return PutInQuotes(VarNames[0]) + " and " + PutInQuotes(VarNames[1]);
+ }
+ assert(VarGroupForVD.size() > 3);
+ const unsigned N = VarNames.size() -
+ 2; // need to print the last two names as "..., X, and Y"
+ std::string AllVars = "";
+
+ for (unsigned I = 0; I < N; ++I)
+ AllVars.append(PutInQuotes(VarNames[I]) + ", ");
+ AllVars.append(PutInQuotes(VarNames[N]) + ", and " +
+ PutInQuotes(VarNames[N + 1]));
+ return AllVars;
+ }
+
+public:
+ UnsafeBufferUsageReporter(Sema &S, bool SuggestSuggestions)
+ : S(S), SuggestSuggestions(SuggestSuggestions) {}
+
+ void handleUnsafeOperation(const Stmt *Operation, bool IsRelatedToDecl,
+ ASTContext &Ctx) override {
+ SourceLocation Loc;
+ SourceRange Range;
+ unsigned MsgParam = 0;
+ if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Operation)) {
+ Loc = ASE->getBase()->getExprLoc();
+ Range = ASE->getBase()->getSourceRange();
+ MsgParam = 2;
+ } else if (const auto *BO = dyn_cast<BinaryOperator>(Operation)) {
+ BinaryOperator::Opcode Op = BO->getOpcode();
+ if (Op == BO_Add || Op == BO_AddAssign || Op == BO_Sub ||
+ Op == BO_SubAssign) {
+ if (BO->getRHS()->getType()->isIntegerType()) {
+ Loc = BO->getLHS()->getExprLoc();
+ Range = BO->getLHS()->getSourceRange();
+ } else {
+ Loc = BO->getRHS()->getExprLoc();
+ Range = BO->getRHS()->getSourceRange();
+ }
+ MsgParam = 1;
+ }
+ } else if (const auto *UO = dyn_cast<UnaryOperator>(Operation)) {
+ UnaryOperator::Opcode Op = UO->getOpcode();
+ if (Op == UO_PreInc || Op == UO_PreDec || Op == UO_PostInc ||
+ Op == UO_PostDec) {
+ Loc = UO->getSubExpr()->getExprLoc();
+ Range = UO->getSubExpr()->getSourceRange();
+ MsgParam = 1;
+ }
+ } else {
+ if (isa<CallExpr>(Operation)) {
+ // note_unsafe_buffer_operation doesn't have this mode yet.
+ assert(!IsRelatedToDecl && "Not implemented yet!");
+ MsgParam = 3;
+ } else if (const auto *ECE = dyn_cast<ExplicitCastExpr>(Operation)) {
+ QualType destType = ECE->getType();
+ if (!isa<PointerType>(destType))
+ return;
+
+ const uint64_t dSize =
+ Ctx.getTypeSize(destType.getTypePtr()->getPointeeType());
+
+ QualType srcType = ECE->getSubExpr()->getType();
+ const uint64_t sSize =
+ Ctx.getTypeSize(srcType.getTypePtr()->getPointeeType());
+ if (sSize >= dSize)
+ return;
+
+ MsgParam = 4;
+ }
+ Loc = Operation->getBeginLoc();
+ Range = Operation->getSourceRange();
+ }
+ if (IsRelatedToDecl) {
+ assert(!SuggestSuggestions &&
+ "Variables blamed for unsafe buffer usage without suggestions!");
+ S.Diag(Loc, diag::note_unsafe_buffer_operation) << MsgParam << Range;
+ } else {
+ S.Diag(Loc, diag::warn_unsafe_buffer_operation) << MsgParam << Range;
+ if (SuggestSuggestions) {
+ S.Diag(Loc, diag::note_safe_buffer_usage_suggestions_disabled);
+ }
+ }
+ }
+
+ void handleUnsafeVariableGroup(const VarDecl *Variable,
+ const VariableGroupsManager &VarGrpMgr,
+ FixItList &&Fixes, const Decl *D) override {
+ assert(!SuggestSuggestions &&
+ "Unsafe buffer usage fixits displayed without suggestions!");
+ S.Diag(Variable->getLocation(), diag::warn_unsafe_buffer_variable)
+ << Variable << (Variable->getType()->isPointerType() ? 0 : 1)
+ << Variable->getSourceRange();
+ if (!Fixes.empty()) {
+ assert(isa<NamedDecl>(D) &&
+ "Fix-its are generated only for `NamedDecl`s");
+ const NamedDecl *ND = cast<NamedDecl>(D);
+ bool BriefMsg = false;
+ // If the variable group involves parameters, the diagnostic message will
+ // NOT explain how the variables are grouped as the reason is non-trivial
+ // and irrelavant to users' experience:
+ const auto VarGroupForVD = VarGrpMgr.getGroupOfVar(Variable, &BriefMsg);
+ unsigned FixItStrategy = 0; // For now we only have 'std::span' strategy
+ const auto &FD =
+ S.Diag(Variable->getLocation(),
+ BriefMsg ? diag::note_unsafe_buffer_variable_fixit_together
+ : diag::note_unsafe_buffer_variable_fixit_group);
+
+ FD << Variable << FixItStrategy;
+ FD << listVariableGroupAsString(Variable, VarGroupForVD)
+ << (VarGroupForVD.size() > 1) << ND;
+ for (const auto &F : Fixes) {
+ FD << F;
+ }
+ }
+
+#ifndef NDEBUG
+ if (areDebugNotesRequested())
+ for (const DebugNote &Note: DebugNotesByVar[Variable])
+ S.Diag(Note.first, diag::note_safe_buffer_debug_mode) << Note.second;
+#endif
+ }
+
+ bool isSafeBufferOptOut(const SourceLocation &Loc) const override {
+ return S.PP.isSafeBufferOptOut(S.getSourceManager(), Loc);
+ }
+
+ // Returns the text representation of clang::unsafe_buffer_usage attribute.
+ // `WSSuffix` holds customized "white-space"s, e.g., newline or whilespace
+ // characters.
+ std::string
+ getUnsafeBufferUsageAttributeTextAt(SourceLocation Loc,
+ StringRef WSSuffix = "") const override {
+ Preprocessor &PP = S.getPreprocessor();
+ TokenValue ClangUnsafeBufferUsageTokens[] = {
+ tok::l_square,
+ tok::l_square,
+ PP.getIdentifierInfo("clang"),
+ tok::coloncolon,
+ PP.getIdentifierInfo("unsafe_buffer_usage"),
+ tok::r_square,
+ tok::r_square};
+
+ StringRef MacroName;
+
+ // The returned macro (it returns) is guaranteed not to be function-like:
+ MacroName = PP.getLastMacroWithSpelling(Loc, ClangUnsafeBufferUsageTokens);
+ if (MacroName.empty())
+ MacroName = "[[clang::unsafe_buffer_usage]]";
+ return MacroName.str() + WSSuffix.str();
+ }
+};
+} // namespace
+
+//===----------------------------------------------------------------------===//
// AnalysisBasedWarnings - Worker object used by Sema to execute analysis-based
// warnings on a function, method, or block.
//===----------------------------------------------------------------------===//
@@ -2212,6 +2425,94 @@ static void flushDiagnostics(Sema &S, const sema::FunctionScopeInfo *fscope) {
S.Diag(D.Loc, D.PD);
}
+// An AST Visitor that calls a callback function on each callable DEFINITION
+// that is NOT in a dependent context:
+class CallableVisitor : public RecursiveASTVisitor<CallableVisitor> {
+private:
+ llvm::function_ref<void(const Decl *)> Callback;
+
+public:
+ CallableVisitor(llvm::function_ref<void(const Decl *)> Callback)
+ : Callback(Callback) {}
+
+ bool VisitFunctionDecl(FunctionDecl *Node) {
+ if (cast<DeclContext>(Node)->isDependentContext())
+ return true; // Not to analyze dependent decl
+ // `FunctionDecl->hasBody()` returns true if the function has a body
+ // somewhere defined. But we want to know if this `Node` has a body
+ // child. So we use `doesThisDeclarationHaveABody`:
+ if (Node->doesThisDeclarationHaveABody())
+ Callback(Node);
+ return true;
+ }
+
+ bool VisitBlockDecl(BlockDecl *Node) {
+ if (cast<DeclContext>(Node)->isDependentContext())
+ return true; // Not to analyze dependent decl
+ Callback(Node);
+ return true;
+ }
+
+ bool VisitObjCMethodDecl(ObjCMethodDecl *Node) {
+ if (cast<DeclContext>(Node)->isDependentContext())
+ return true; // Not to analyze dependent decl
+ if (Node->hasBody())
+ Callback(Node);
+ return true;
+ }
+
+ bool VisitLambdaExpr(LambdaExpr *Node) {
+ return VisitFunctionDecl(Node->getCallOperator());
+ }
+
+ bool shouldVisitTemplateInstantiations() const { return true; }
+ bool shouldVisitImplicitCode() const { return false; }
+};
+
+void clang::sema::AnalysisBasedWarnings::IssueWarnings(
+ TranslationUnitDecl *TU) {
+ if (!TU)
+ return; // This is unexpected, give up quietly.
+
+ DiagnosticsEngine &Diags = S.getDiagnostics();
+
+ if (S.hasUncompilableErrorOccurred() || Diags.getIgnoreAllWarnings())
+ // exit if having uncompilable errors or ignoring all warnings:
+ return;
+
+ DiagnosticOptions &DiagOpts = Diags.getDiagnosticOptions();
+
+ // UnsafeBufferUsage analysis settings.
+ bool UnsafeBufferUsageCanEmitSuggestions = S.getLangOpts().CPlusPlus20;
+ bool UnsafeBufferUsageShouldEmitSuggestions = // Should != Can.
+ UnsafeBufferUsageCanEmitSuggestions &&
+ DiagOpts.ShowSafeBufferUsageSuggestions;
+ bool UnsafeBufferUsageShouldSuggestSuggestions =
+ UnsafeBufferUsageCanEmitSuggestions &&
+ !DiagOpts.ShowSafeBufferUsageSuggestions;
+ UnsafeBufferUsageReporter R(S, UnsafeBufferUsageShouldSuggestSuggestions);
+
+ // The Callback function that performs analyses:
+ auto CallAnalyzers = [&](const Decl *Node) -> void {
+ // Perform unsafe buffer usage analysis:
+ if (!Diags.isIgnored(diag::warn_unsafe_buffer_operation,
+ Node->getBeginLoc()) ||
+ !Diags.isIgnored(diag::warn_unsafe_buffer_variable,
+ Node->getBeginLoc())) {
+ clang::checkUnsafeBufferUsage(Node, R,
+ UnsafeBufferUsageShouldEmitSuggestions);
+ }
+
+ // More analysis ...
+ };
+ // Emit per-function analysis-based warnings that require the whole-TU
+ // reasoning. Check if any of them is enabled at all before scanning the AST:
+ if (!Diags.isIgnored(diag::warn_unsafe_buffer_operation, SourceLocation()) ||
+ !Diags.isIgnored(diag::warn_unsafe_buffer_variable, SourceLocation())) {
+ CallableVisitor(CallAnalyzers).TraverseTranslationUnitDecl(TU);
+ }
+}
+
void clang::sema::AnalysisBasedWarnings::IssueWarnings(
sema::AnalysisBasedWarnings::Policy P, sema::FunctionScopeInfo *fscope,
const Decl *D, QualType BlockType) {
@@ -2275,12 +2576,11 @@ void clang::sema::AnalysisBasedWarnings::IssueWarnings(
.setAlwaysAdd(Stmt::CStyleCastExprClass)
.setAlwaysAdd(Stmt::DeclRefExprClass)
.setAlwaysAdd(Stmt::ImplicitCastExprClass)
- .setAlwaysAdd(Stmt::UnaryOperatorClass)
- .setAlwaysAdd(Stmt::AttributedStmtClass);
+ .setAlwaysAdd(Stmt::UnaryOperatorClass);
}
// Install the logical handler.
- llvm::Optional<LogicalErrorHandler> LEH;
+ std::optional<LogicalErrorHandler> LEH;
if (LogicalErrorHandler::hasActiveDiagnostics(Diags, D->getBeginLoc())) {
LEH.emplace(S);
AC.getCFGBuildOptions().Observer = &*LEH;
@@ -2438,7 +2738,7 @@ void clang::sema::AnalysisBasedWarnings::IssueWarnings(
// Check for throw out of non-throwing function.
if (!Diags.isIgnored(diag::warn_throw_in_noexcept_func, D->getBeginLoc()))
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
- if (S.getLangOpts().CPlusPlus && isNoexcept(FD))
+ if (S.getLangOpts().CPlusPlus && !fscope->isCoroutine() && isNoexcept(FD))
checkThrowInNonThrowingFunc(S, FD, AC);
// If none of the previous checks caused a CFG build, trigger one here
diff --git a/contrib/llvm-project/clang/lib/Sema/CodeCompleteConsumer.cpp b/contrib/llvm-project/clang/lib/Sema/CodeCompleteConsumer.cpp
index 3ab2a18f5e8d..350bd78b5710 100644
--- a/contrib/llvm-project/clang/lib/Sema/CodeCompleteConsumer.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/CodeCompleteConsumer.cpp
@@ -51,6 +51,7 @@ bool CodeCompletionContext::wantConstructorResults() const {
case CCC_ParenthesizedExpression:
case CCC_Symbol:
case CCC_SymbolOrNewName:
+ case CCC_TopLevelOrExpression:
return true;
case CCC_TopLevel:
@@ -82,6 +83,8 @@ bool CodeCompletionContext::wantConstructorResults() const {
case CCC_ObjCInterfaceName:
case CCC_ObjCCategoryName:
case CCC_IncludedFile:
+ case CCC_Attribute:
+ case CCC_ObjCClassForwardDecl:
return false;
}
@@ -161,8 +164,14 @@ StringRef clang::getCompletionKindString(CodeCompletionContext::Kind Kind) {
return "ObjCCategoryName";
case CCKind::CCC_IncludedFile:
return "IncludedFile";
+ case CCKind::CCC_Attribute:
+ return "Attribute";
case CCKind::CCC_Recovery:
return "Recovery";
+ case CCKind::CCC_ObjCClassForwardDecl:
+ return "ObjCClassForwardDecl";
+ case CCKind::CCC_TopLevelOrExpression:
+ return "ReplTopLevel";
}
llvm_unreachable("Invalid CodeCompletionContext::Kind!");
}
@@ -332,7 +341,7 @@ std::string CodeCompletionString::getAsString() const {
break;
}
}
- return OS.str();
+ return Result;
}
const char *CodeCompletionString::getTypedText() const {
@@ -343,6 +352,15 @@ const char *CodeCompletionString::getTypedText() const {
return nullptr;
}
+std::string CodeCompletionString::getAllTypedText() const {
+ std::string Res;
+ for (const Chunk &C : *this)
+ if (C.Kind == CK_TypedText)
+ Res += C.Text;
+
+ return Res;
+}
+
const char *CodeCompletionAllocator::CopyString(const Twine &String) {
SmallString<128> Data;
StringRef Ref = String.toStringRef(Data);
@@ -384,14 +402,13 @@ StringRef CodeCompletionTUInfo::getParentName(const DeclContext *DC) {
SmallString<128> S;
llvm::raw_svector_ostream OS(S);
bool First = true;
- for (unsigned I = Contexts.size(); I != 0; --I) {
+ for (const DeclContext *CurDC : llvm::reverse(Contexts)) {
if (First)
First = false;
else {
OS << "::";
}
- const DeclContext *CurDC = Contexts[I - 1];
if (const auto *CatImpl = dyn_cast<ObjCCategoryImplDecl>(CurDC))
CurDC = CatImpl->getCategoryDecl();
@@ -504,11 +521,105 @@ CodeCompleteConsumer::OverloadCandidate::getFunctionType() const {
case CK_FunctionType:
return Type;
+ case CK_FunctionProtoTypeLoc:
+ return ProtoTypeLoc.getTypePtr();
+ case CK_Template:
+ case CK_Aggregate:
+ return nullptr;
}
llvm_unreachable("Invalid CandidateKind!");
}
+const FunctionProtoTypeLoc
+CodeCompleteConsumer::OverloadCandidate::getFunctionProtoTypeLoc() const {
+ if (Kind == CK_FunctionProtoTypeLoc)
+ return ProtoTypeLoc;
+ return FunctionProtoTypeLoc();
+}
+
+unsigned CodeCompleteConsumer::OverloadCandidate::getNumParams() const {
+ if (Kind == CK_Template)
+ return Template->getTemplateParameters()->size();
+
+ if (Kind == CK_Aggregate) {
+ unsigned Count =
+ std::distance(AggregateType->field_begin(), AggregateType->field_end());
+ if (const auto *CRD = dyn_cast<CXXRecordDecl>(AggregateType))
+ Count += CRD->getNumBases();
+ return Count;
+ }
+
+ if (const auto *FT = getFunctionType())
+ if (const auto *FPT = dyn_cast<FunctionProtoType>(FT))
+ return FPT->getNumParams();
+
+ return 0;
+}
+
+QualType
+CodeCompleteConsumer::OverloadCandidate::getParamType(unsigned N) const {
+ if (Kind == CK_Aggregate) {
+ if (const auto *CRD = dyn_cast<CXXRecordDecl>(AggregateType)) {
+ if (N < CRD->getNumBases())
+ return std::next(CRD->bases_begin(), N)->getType();
+ N -= CRD->getNumBases();
+ }
+ for (const auto *Field : AggregateType->fields())
+ if (N-- == 0)
+ return Field->getType();
+ return QualType();
+ }
+
+ if (Kind == CK_Template) {
+ TemplateParameterList *TPL = getTemplate()->getTemplateParameters();
+ if (N < TPL->size())
+ if (const auto *D = dyn_cast<NonTypeTemplateParmDecl>(TPL->getParam(N)))
+ return D->getType();
+ return QualType();
+ }
+
+ if (const auto *FT = getFunctionType())
+ if (const auto *FPT = dyn_cast<FunctionProtoType>(FT))
+ if (N < FPT->getNumParams())
+ return FPT->getParamType(N);
+ return QualType();
+}
+
+const NamedDecl *
+CodeCompleteConsumer::OverloadCandidate::getParamDecl(unsigned N) const {
+ if (Kind == CK_Aggregate) {
+ if (const auto *CRD = dyn_cast<CXXRecordDecl>(AggregateType)) {
+ if (N < CRD->getNumBases())
+ return std::next(CRD->bases_begin(), N)->getType()->getAsTagDecl();
+ N -= CRD->getNumBases();
+ }
+ for (const auto *Field : AggregateType->fields())
+ if (N-- == 0)
+ return Field;
+ return nullptr;
+ }
+
+ if (Kind == CK_Template) {
+ TemplateParameterList *TPL = getTemplate()->getTemplateParameters();
+ if (N < TPL->size())
+ return TPL->getParam(N);
+ return nullptr;
+ }
+
+ // Note that if we only have a FunctionProtoType, we don't have param decls.
+ if (const auto *FD = getFunction()) {
+ if (N < FD->param_size())
+ return FD->getParamDecl(N);
+ } else if (Kind == CK_FunctionProtoTypeLoc) {
+ if (N < ProtoTypeLoc.getNumParams()) {
+ return ProtoTypeLoc.getParam(N);
+ }
+ }
+
+ return nullptr;
+}
+
//===----------------------------------------------------------------------===//
// Code completion consumer implementation
//===----------------------------------------------------------------------===//
@@ -519,15 +630,16 @@ bool PrintingCodeCompleteConsumer::isResultFilteredOut(
StringRef Filter, CodeCompletionResult Result) {
switch (Result.Kind) {
case CodeCompletionResult::RK_Declaration:
- return !(Result.Declaration->getIdentifier() &&
- Result.Declaration->getIdentifier()->getName().startswith(Filter));
+ return !(
+ Result.Declaration->getIdentifier() &&
+ Result.Declaration->getIdentifier()->getName().starts_with(Filter));
case CodeCompletionResult::RK_Keyword:
- return !StringRef(Result.Keyword).startswith(Filter);
+ return !StringRef(Result.Keyword).starts_with(Filter);
case CodeCompletionResult::RK_Macro:
- return !Result.Macro->getName().startswith(Filter);
+ return !Result.Macro->getName().starts_with(Filter);
case CodeCompletionResult::RK_Pattern:
return !(Result.Pattern->getTypedText() &&
- StringRef(Result.Pattern->getTypedText()).startswith(Filter));
+ StringRef(Result.Pattern->getTypedText()).starts_with(Filter));
}
llvm_unreachable("Unknown code completion result Kind.");
}
@@ -538,8 +650,7 @@ void PrintingCodeCompleteConsumer::ProcessCodeCompleteResults(
std::stable_sort(Results, Results + NumResults);
if (!Context.getPreferredType().isNull())
- OS << "PREFERRED-TYPE: " << Context.getPreferredType().getAsString()
- << "\n";
+ OS << "PREFERRED-TYPE: " << Context.getPreferredType() << '\n';
StringRef Filter = SemaRef.getPreprocessor().getCodeCompletionFilter();
// Print the completions.
@@ -638,12 +749,12 @@ static std::string getOverloadAsString(const CodeCompletionString &CCS) {
break;
}
}
- return OS.str();
+ return Result;
}
void PrintingCodeCompleteConsumer::ProcessOverloadCandidates(
Sema &SemaRef, unsigned CurrentArg, OverloadCandidate *Candidates,
- unsigned NumCandidates, SourceLocation OpenParLoc) {
+ unsigned NumCandidates, SourceLocation OpenParLoc, bool Braced) {
OS << "OPENING_PAREN_LOC: ";
OpenParLoc.print(OS, SemaRef.getSourceManager());
OS << "\n";
@@ -651,7 +762,7 @@ void PrintingCodeCompleteConsumer::ProcessOverloadCandidates(
for (unsigned I = 0; I != NumCandidates; ++I) {
if (CodeCompletionString *CCS = Candidates[I].CreateSignatureString(
CurrentArg, SemaRef, getAllocator(), CCTUInfo,
- includeBriefComments())) {
+ includeBriefComments(), Braced)) {
OS << "OVERLOAD: " << getOverloadAsString(*CCS) << "\n";
}
}
@@ -672,7 +783,7 @@ void CodeCompletionResult::computeCursorKindAndAvailability(bool Accessible) {
// Do nothing: Patterns can come with cursor kinds!
break;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case RK_Declaration: {
// Set the availability based on attributes.
diff --git a/contrib/llvm-project/clang/lib/Sema/DeclSpec.cpp b/contrib/llvm-project/clang/lib/Sema/DeclSpec.cpp
index 72d9ea6dd3bf..781f24cb71ae 100644
--- a/contrib/llvm-project/clang/lib/Sema/DeclSpec.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/DeclSpec.cpp
@@ -18,6 +18,7 @@
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/Sema.h"
@@ -238,7 +239,7 @@ DeclaratorChunk DeclaratorChunk::getFunction(bool hasProto,
// is already used (consider a function returning a function pointer) or too
// small (function with too many parameters), go to the heap.
if (!TheDeclarator.InlineStorageUsed &&
- NumParams <= llvm::array_lengthof(TheDeclarator.InlineParams)) {
+ NumParams <= std::size(TheDeclarator.InlineParams)) {
I.Fun.Params = TheDeclarator.InlineParams;
new (I.Fun.Params) ParamInfo[NumParams];
I.Fun.DeleteParams = false;
@@ -307,8 +308,7 @@ void Declarator::setDecompositionBindings(
// Allocate storage for bindings and stash them away.
if (Bindings.size()) {
- if (!InlineStorageUsed &&
- Bindings.size() <= llvm::array_lengthof(InlineBindings)) {
+ if (!InlineStorageUsed && Bindings.size() <= std::size(InlineBindings)) {
BindingGroup.Bindings = InlineBindings;
BindingGroup.DeleteBindings = false;
InlineStorageUsed = true;
@@ -358,13 +358,14 @@ bool Declarator::isDeclarationOfFunction() const {
case TST_Fract:
case TST_Float16:
case TST_float128:
+ case TST_ibm128:
case TST_enum:
case TST_error:
case TST_float:
case TST_half:
case TST_int:
case TST_int128:
- case TST_extint:
+ case TST_bitint:
case TST_struct:
case TST_interface:
case TST_union:
@@ -383,13 +384,16 @@ bool Declarator::isDeclarationOfFunction() const {
return false;
case TST_decltype:
+ case TST_typeof_unqualExpr:
case TST_typeofExpr:
if (Expr *E = DS.getRepAsExpr())
return E->getType()->isFunctionType();
return false;
- case TST_underlyingType:
+#define TRANSFORM_TYPE_TRAIT_DEF(_, Trait) case TST_##Trait:
+#include "clang/Basic/TransformTypeTraits.def"
case TST_typename:
+ case TST_typeof_unqualType:
case TST_typeofType: {
QualType QT = DS.getRepAsType().get();
if (QT.isNull())
@@ -411,11 +415,23 @@ bool Declarator::isDeclarationOfFunction() const {
bool Declarator::isStaticMember() {
assert(getContext() == DeclaratorContext::Member);
return getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_static ||
- (getName().Kind == UnqualifiedIdKind::IK_OperatorFunctionId &&
+ (getName().getKind() == UnqualifiedIdKind::IK_OperatorFunctionId &&
CXXMethodDecl::isStaticOverloadedOperator(
getName().OperatorFunctionId.Operator));
}
+bool Declarator::isExplicitObjectMemberFunction() {
+ if (!isFunctionDeclarator())
+ return false;
+ DeclaratorChunk::FunctionTypeInfo &Fun = getFunctionTypeInfo();
+ if (Fun.NumParams) {
+ auto *P = dyn_cast_or_null<ParmVarDecl>(Fun.Params[0].Param);
+ if (P && P->isExplicitObjectParameter())
+ return true;
+ }
+ return false;
+}
+
bool Declarator::isCtorOrDtor() {
return (getName().getKind() == UnqualifiedIdKind::IK_ConstructorName) ||
(getName().getKind() == UnqualifiedIdKind::IK_DestructorName);
@@ -550,7 +566,7 @@ const char *DeclSpec::getSpecifierName(DeclSpec::TST T,
case DeclSpec::TST_char32: return "char32_t";
case DeclSpec::TST_int: return "int";
case DeclSpec::TST_int128: return "__int128";
- case DeclSpec::TST_extint: return "_ExtInt";
+ case DeclSpec::TST_bitint: return "_BitInt";
case DeclSpec::TST_half: return "half";
case DeclSpec::TST_float: return "float";
case DeclSpec::TST_double: return "double";
@@ -558,6 +574,7 @@ const char *DeclSpec::getSpecifierName(DeclSpec::TST T,
case DeclSpec::TST_fract: return "_Fract";
case DeclSpec::TST_float16: return "_Float16";
case DeclSpec::TST_float128: return "__float128";
+ case DeclSpec::TST_ibm128: return "__ibm128";
case DeclSpec::TST_bool: return Policy.Bool ? "bool" : "_Bool";
case DeclSpec::TST_decimal32: return "_Decimal32";
case DeclSpec::TST_decimal64: return "_Decimal64";
@@ -570,11 +587,16 @@ const char *DeclSpec::getSpecifierName(DeclSpec::TST T,
case DeclSpec::TST_typename: return "type-name";
case DeclSpec::TST_typeofType:
case DeclSpec::TST_typeofExpr: return "typeof";
+ case DeclSpec::TST_typeof_unqualType:
+ case DeclSpec::TST_typeof_unqualExpr: return "typeof_unqual";
case DeclSpec::TST_auto: return "auto";
case DeclSpec::TST_auto_type: return "__auto_type";
case DeclSpec::TST_decltype: return "(decltype)";
case DeclSpec::TST_decltype_auto: return "decltype(auto)";
- case DeclSpec::TST_underlyingType: return "__underlying_type";
+#define TRANSFORM_TYPE_TRAIT_DEF(_, Trait) \
+ case DeclSpec::TST_##Trait: \
+ return "__" #Trait;
+#include "clang/Basic/TransformTypeTraits.def"
case DeclSpec::TST_unknown_anytype: return "__unknown_anytype";
case DeclSpec::TST_atomic: return "_Atomic";
case DeclSpec::TST_BFloat16: return "__bf16";
@@ -631,8 +653,7 @@ bool DeclSpec::SetStorageClassSpec(Sema &S, SCS SC, SourceLocation Loc,
case SCS_extern:
case SCS_private_extern:
case SCS_static:
- if (S.getLangOpts().OpenCLVersion < 120 &&
- !S.getLangOpts().OpenCLCPlusPlus) {
+ if (S.getLangOpts().getOpenCLCompatibleVersion() < 120) {
DiagID = diag::err_opencl_unknown_type_specifier;
PrevSpec = getSpecifierName(SC);
return true;
@@ -931,7 +952,7 @@ bool DeclSpec::SetTypeSpecError() {
return false;
}
-bool DeclSpec::SetExtIntType(SourceLocation KWLoc, Expr *BitsExpr,
+bool DeclSpec::SetBitIntType(SourceLocation KWLoc, Expr *BitsExpr,
const char *&PrevSpec, unsigned &DiagID,
const PrintingPolicy &Policy) {
assert(BitsExpr && "no expression provided!");
@@ -944,7 +965,7 @@ bool DeclSpec::SetExtIntType(SourceLocation KWLoc, Expr *BitsExpr,
return true;
}
- TypeSpecType = TST_extint;
+ TypeSpecType = TST_bitint;
ExprRep = BitsExpr;
TSTLoc = KWLoc;
TSTNameLoc = KWLoc;
@@ -1108,9 +1129,8 @@ void DeclSpec::SaveWrittenBuiltinSpecs() {
}
/// Finish - This does final analysis of the declspec, rejecting things like
-/// "_Imaginary" (lacking an FP type). This returns a diagnostic to issue or
-/// diag::NUM_DIAGNOSTICS if there is no error. After calling this method,
-/// DeclSpec is guaranteed self-consistent, even if an error occurred.
+/// "_Imaginary" (lacking an FP type). After calling this method, DeclSpec is
+/// guaranteed to be self-consistent, even if an error occurred.
void DeclSpec::Finish(Sema &S, const PrintingPolicy &Policy) {
// Before possibly changing their values, save specs as written.
SaveWrittenBuiltinSpecs();
@@ -1155,6 +1175,17 @@ void DeclSpec::Finish(Sema &S, const PrintingPolicy &Policy) {
// Validate and finalize AltiVec vector declspec.
if (TypeAltiVecVector) {
+ // No vector long long without VSX (or ZVector).
+ if ((getTypeSpecWidth() == TypeSpecifierWidth::LongLong) &&
+ !S.Context.getTargetInfo().hasFeature("vsx") &&
+ !S.getLangOpts().ZVector)
+ S.Diag(TSWRange.getBegin(), diag::err_invalid_vector_long_long_decl_spec);
+
+ // No vector __int128 prior to Power8.
+ if ((TypeSpecType == TST_int128) &&
+ !S.Context.getTargetInfo().hasFeature("power8-vector"))
+ S.Diag(TSTLoc, diag::err_invalid_vector_int128_decl_spec);
+
if (TypeAltiVecBool) {
// Sign specifiers are not allowed with vector bool. (PIM 2.1)
if (getTypeSpecSign() != TypeSpecifierSign::Unspecified) {
@@ -1183,13 +1214,6 @@ void DeclSpec::Finish(Sema &S, const PrintingPolicy &Policy) {
S.Diag(TSWRange.getBegin(), diag::err_invalid_vector_bool_decl_spec)
<< getSpecifierName(getTypeSpecWidth());
- // vector bool long long requires VSX support or ZVector.
- if ((getTypeSpecWidth() == TypeSpecifierWidth::LongLong) &&
- (!S.Context.getTargetInfo().hasFeature("vsx")) &&
- (!S.Context.getTargetInfo().hasFeature("power8-vector")) &&
- !S.getLangOpts().ZVector)
- S.Diag(TSTLoc, diag::err_invalid_vector_long_long_decl_spec);
-
// Elements of vector bool are interpreted as unsigned. (PIM 2.1)
if ((TypeSpecType == TST_char) || (TypeSpecType == TST_int) ||
(TypeSpecType == TST_int128) ||
@@ -1212,13 +1236,15 @@ void DeclSpec::Finish(Sema &S, const PrintingPolicy &Policy) {
!S.Context.getTargetInfo().hasFeature("arch12"))
S.Diag(TSTLoc, diag::err_invalid_vector_float_decl_spec);
} else if (getTypeSpecWidth() == TypeSpecifierWidth::Long) {
- // vector long is unsupported for ZVector and deprecated for AltiVec.
+ // Vector long is unsupported for ZVector, or without VSX, and deprecated
+ // for AltiVec.
// It has also been historically deprecated on AIX (as an alias for
// "vector int" in both 32-bit and 64-bit modes). It was then made
// unsupported in the Clang-based XL compiler since the deprecated type
// has a number of conflicting semantics and continuing to support it
// is a disservice to users.
if (S.getLangOpts().ZVector ||
+ !S.Context.getTargetInfo().hasFeature("vsx") ||
S.Context.getTargetInfo().getTriple().isOSAIX())
S.Diag(TSWRange.getBegin(), diag::err_invalid_vector_long_decl_spec);
else
@@ -1245,7 +1271,7 @@ void DeclSpec::Finish(Sema &S, const PrintingPolicy &Policy) {
TypeSpecType = TST_int; // unsigned -> unsigned int, signed -> signed int.
else if (TypeSpecType != TST_int && TypeSpecType != TST_int128 &&
TypeSpecType != TST_char && TypeSpecType != TST_wchar &&
- !IsFixedPointType && TypeSpecType != TST_extint) {
+ !IsFixedPointType && TypeSpecType != TST_bitint) {
S.Diag(TSSLoc, diag::err_invalid_sign_spec)
<< getSpecifierName((TST)TypeSpecType, Policy);
// signed double -> double.
@@ -1295,13 +1321,14 @@ void DeclSpec::Finish(Sema &S, const PrintingPolicy &Policy) {
" double");
TypeSpecType = TST_double; // _Complex -> _Complex double.
} else if (TypeSpecType == TST_int || TypeSpecType == TST_char ||
- TypeSpecType == TST_extint) {
+ TypeSpecType == TST_bitint) {
// Note that this intentionally doesn't include _Complex _Bool.
if (!S.getLangOpts().CPlusPlus)
S.Diag(TSTLoc, diag::ext_integer_complex);
} else if (TypeSpecType != TST_float && TypeSpecType != TST_double &&
- TypeSpecType != TST_float128) {
- // FIXME: _Float16, __fp16?
+ TypeSpecType != TST_float128 && TypeSpecType != TST_float16 &&
+ TypeSpecType != TST_ibm128) {
+ // FIXME: __fp16?
S.Diag(TSCLoc, diag::err_invalid_complex_spec)
<< getSpecifierName((TST)TypeSpecType, Policy);
TypeSpecComplex = TSC_unspecified;
@@ -1348,8 +1375,9 @@ void DeclSpec::Finish(Sema &S, const PrintingPolicy &Policy) {
StorageClassSpecLoc = SourceLocation();
}
// Diagnose if we've recovered from an ill-formed 'auto' storage class
- // specifier in a pre-C++11 dialect of C++.
- if (!S.getLangOpts().CPlusPlus11 && TypeSpecType == TST_auto)
+ // specifier in a pre-C++11 dialect of C++ or in a pre-C23 dialect of C.
+ if (!S.getLangOpts().CPlusPlus11 && !S.getLangOpts().C23 &&
+ TypeSpecType == TST_auto)
S.Diag(TSTLoc, diag::ext_auto_type_specifier);
if (S.getLangOpts().CPlusPlus && !S.getLangOpts().CPlusPlus11 &&
StorageClassSpec == SCS_auto)
diff --git a/contrib/llvm-project/clang/lib/Sema/HLSLExternalSemaSource.cpp b/contrib/llvm-project/clang/lib/Sema/HLSLExternalSemaSource.cpp
new file mode 100644
index 000000000000..1a1febf7a352
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Sema/HLSLExternalSemaSource.cpp
@@ -0,0 +1,532 @@
+//===--- HLSLExternalSemaSource.cpp - HLSL Sema Source --------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/HLSLExternalSemaSource.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/Basic/AttrKinds.h"
+#include "clang/Basic/HLSLRuntime.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/Sema.h"
+#include "llvm/Frontend/HLSL/HLSLResource.h"
+
+#include <functional>
+
+using namespace clang;
+using namespace llvm::hlsl;
+
+namespace {
+
+struct TemplateParameterListBuilder;
+
+struct BuiltinTypeDeclBuilder {
+ CXXRecordDecl *Record = nullptr;
+ ClassTemplateDecl *Template = nullptr;
+ ClassTemplateDecl *PrevTemplate = nullptr;
+ NamespaceDecl *HLSLNamespace = nullptr;
+ llvm::StringMap<FieldDecl *> Fields;
+
+ BuiltinTypeDeclBuilder(CXXRecordDecl *R) : Record(R) {
+ Record->startDefinition();
+ Template = Record->getDescribedClassTemplate();
+ }
+
+ BuiltinTypeDeclBuilder(Sema &S, NamespaceDecl *Namespace, StringRef Name)
+ : HLSLNamespace(Namespace) {
+ ASTContext &AST = S.getASTContext();
+ IdentifierInfo &II = AST.Idents.get(Name, tok::TokenKind::identifier);
+
+ LookupResult Result(S, &II, SourceLocation(), Sema::LookupTagName);
+ CXXRecordDecl *PrevDecl = nullptr;
+ if (S.LookupQualifiedName(Result, HLSLNamespace)) {
+ NamedDecl *Found = Result.getFoundDecl();
+ if (auto *TD = dyn_cast<ClassTemplateDecl>(Found)) {
+ PrevDecl = TD->getTemplatedDecl();
+ PrevTemplate = TD;
+ } else
+ PrevDecl = dyn_cast<CXXRecordDecl>(Found);
+ assert(PrevDecl && "Unexpected lookup result type.");
+ }
+
+ if (PrevDecl && PrevDecl->isCompleteDefinition()) {
+ Record = PrevDecl;
+ return;
+ }
+
+ Record = CXXRecordDecl::Create(AST, TagDecl::TagKind::Class, HLSLNamespace,
+ SourceLocation(), SourceLocation(), &II,
+ PrevDecl, true);
+ Record->setImplicit(true);
+ Record->setLexicalDeclContext(HLSLNamespace);
+ Record->setHasExternalLexicalStorage();
+
+ // Don't let anyone derive from built-in types.
+ Record->addAttr(FinalAttr::CreateImplicit(AST, SourceRange(),
+ FinalAttr::Keyword_final));
+ }
+
+ ~BuiltinTypeDeclBuilder() {
+ if (HLSLNamespace && !Template && Record->getDeclContext() == HLSLNamespace)
+ HLSLNamespace->addDecl(Record);
+ }
+
+ BuiltinTypeDeclBuilder &
+ addMemberVariable(StringRef Name, QualType Type,
+ AccessSpecifier Access = AccessSpecifier::AS_private) {
+ if (Record->isCompleteDefinition())
+ return *this;
+ assert(Record->isBeingDefined() &&
+ "Definition must be started before adding members!");
+ ASTContext &AST = Record->getASTContext();
+
+ IdentifierInfo &II = AST.Idents.get(Name, tok::TokenKind::identifier);
+ TypeSourceInfo *MemTySource =
+ AST.getTrivialTypeSourceInfo(Type, SourceLocation());
+ auto *Field = FieldDecl::Create(
+ AST, Record, SourceLocation(), SourceLocation(), &II, Type, MemTySource,
+ nullptr, false, InClassInitStyle::ICIS_NoInit);
+ Field->setAccess(Access);
+ Field->setImplicit(true);
+ Record->addDecl(Field);
+ Fields[Name] = Field;
+ return *this;
+ }
+
+ BuiltinTypeDeclBuilder &
+ addHandleMember(AccessSpecifier Access = AccessSpecifier::AS_private) {
+ if (Record->isCompleteDefinition())
+ return *this;
+ QualType Ty = Record->getASTContext().VoidPtrTy;
+ if (Template) {
+ if (const auto *TTD = dyn_cast<TemplateTypeParmDecl>(
+ Template->getTemplateParameters()->getParam(0)))
+ Ty = Record->getASTContext().getPointerType(
+ QualType(TTD->getTypeForDecl(), 0));
+ }
+ return addMemberVariable("h", Ty, Access);
+ }
+
+ BuiltinTypeDeclBuilder &annotateResourceClass(ResourceClass RC,
+ ResourceKind RK, bool IsROV) {
+ if (Record->isCompleteDefinition())
+ return *this;
+ Record->addAttr(HLSLResourceAttr::CreateImplicit(Record->getASTContext(),
+ RC, RK, IsROV));
+ return *this;
+ }
+
+ static DeclRefExpr *lookupBuiltinFunction(ASTContext &AST, Sema &S,
+ StringRef Name) {
+ CXXScopeSpec SS;
+ IdentifierInfo &II = AST.Idents.get(Name, tok::TokenKind::identifier);
+ DeclarationNameInfo NameInfo =
+ DeclarationNameInfo(DeclarationName(&II), SourceLocation());
+ LookupResult R(S, NameInfo, Sema::LookupOrdinaryName);
+ S.LookupParsedName(R, S.getCurScope(), &SS, false);
+ assert(R.isSingleResult() &&
+ "Since this is a builtin it should always resolve!");
+ auto *VD = cast<ValueDecl>(R.getFoundDecl());
+ QualType Ty = VD->getType();
+ return DeclRefExpr::Create(AST, NestedNameSpecifierLoc(), SourceLocation(),
+ VD, false, NameInfo, Ty, VK_PRValue);
+ }
+
+ static Expr *emitResourceClassExpr(ASTContext &AST, ResourceClass RC) {
+ return IntegerLiteral::Create(
+ AST,
+ llvm::APInt(AST.getIntWidth(AST.UnsignedCharTy),
+ static_cast<uint8_t>(RC)),
+ AST.UnsignedCharTy, SourceLocation());
+ }
+
+ BuiltinTypeDeclBuilder &addDefaultHandleConstructor(Sema &S,
+ ResourceClass RC) {
+ if (Record->isCompleteDefinition())
+ return *this;
+ ASTContext &AST = Record->getASTContext();
+
+ QualType ConstructorType =
+ AST.getFunctionType(AST.VoidTy, {}, FunctionProtoType::ExtProtoInfo());
+
+ CanQualType CanTy = Record->getTypeForDecl()->getCanonicalTypeUnqualified();
+ DeclarationName Name = AST.DeclarationNames.getCXXConstructorName(CanTy);
+ CXXConstructorDecl *Constructor = CXXConstructorDecl::Create(
+ AST, Record, SourceLocation(),
+ DeclarationNameInfo(Name, SourceLocation()), ConstructorType,
+ AST.getTrivialTypeSourceInfo(ConstructorType, SourceLocation()),
+ ExplicitSpecifier(), false, true, false,
+ ConstexprSpecKind::Unspecified);
+
+ DeclRefExpr *Fn =
+ lookupBuiltinFunction(AST, S, "__builtin_hlsl_create_handle");
+
+ Expr *RCExpr = emitResourceClassExpr(AST, RC);
+ Expr *Call = CallExpr::Create(AST, Fn, {RCExpr}, AST.VoidPtrTy, VK_PRValue,
+ SourceLocation(), FPOptionsOverride());
+
+ CXXThisExpr *This = CXXThisExpr::Create(
+ AST, SourceLocation(), Constructor->getFunctionObjectParameterType(),
+ true);
+ Expr *Handle = MemberExpr::CreateImplicit(AST, This, false, Fields["h"],
+ Fields["h"]->getType(), VK_LValue,
+ OK_Ordinary);
+
+ // If the handle isn't a void pointer, cast the builtin result to the
+ // correct type.
+ if (Handle->getType().getCanonicalType() != AST.VoidPtrTy) {
+ Call = CXXStaticCastExpr::Create(
+ AST, Handle->getType(), VK_PRValue, CK_Dependent, Call, nullptr,
+ AST.getTrivialTypeSourceInfo(Handle->getType(), SourceLocation()),
+ FPOptionsOverride(), SourceLocation(), SourceLocation(),
+ SourceRange());
+ }
+
+ BinaryOperator *Assign = BinaryOperator::Create(
+ AST, Handle, Call, BO_Assign, Handle->getType(), VK_LValue, OK_Ordinary,
+ SourceLocation(), FPOptionsOverride());
+
+ Constructor->setBody(
+ CompoundStmt::Create(AST, {Assign}, FPOptionsOverride(),
+ SourceLocation(), SourceLocation()));
+ Constructor->setAccess(AccessSpecifier::AS_public);
+ Record->addDecl(Constructor);
+ return *this;
+ }
+
+ BuiltinTypeDeclBuilder &addArraySubscriptOperators() {
+ if (Record->isCompleteDefinition())
+ return *this;
+ addArraySubscriptOperator(true);
+ addArraySubscriptOperator(false);
+ return *this;
+ }
+
+ BuiltinTypeDeclBuilder &addArraySubscriptOperator(bool IsConst) {
+ if (Record->isCompleteDefinition())
+ return *this;
+ assert(Fields.count("h") > 0 &&
+ "Subscript operator must be added after the handle.");
+
+ FieldDecl *Handle = Fields["h"];
+ ASTContext &AST = Record->getASTContext();
+
+ assert(Handle->getType().getCanonicalType() != AST.VoidPtrTy &&
+ "Not yet supported for void pointer handles.");
+
+ QualType ElemTy =
+ QualType(Handle->getType()->getPointeeOrArrayElementType(), 0);
+ QualType ReturnTy = ElemTy;
+
+ FunctionProtoType::ExtProtoInfo ExtInfo;
+
+ // Subscript operators return references to elements, const makes the
+ // reference and method const so that the underlying data is not mutable.
+ ReturnTy = AST.getLValueReferenceType(ReturnTy);
+ if (IsConst) {
+ ExtInfo.TypeQuals.addConst();
+ ReturnTy.addConst();
+ }
+
+ QualType MethodTy =
+ AST.getFunctionType(ReturnTy, {AST.UnsignedIntTy}, ExtInfo);
+ auto *TSInfo = AST.getTrivialTypeSourceInfo(MethodTy, SourceLocation());
+ auto *MethodDecl = CXXMethodDecl::Create(
+ AST, Record, SourceLocation(),
+ DeclarationNameInfo(
+ AST.DeclarationNames.getCXXOperatorName(OO_Subscript),
+ SourceLocation()),
+ MethodTy, TSInfo, SC_None, false, false, ConstexprSpecKind::Unspecified,
+ SourceLocation());
+
+ IdentifierInfo &II = AST.Idents.get("Idx", tok::TokenKind::identifier);
+ auto *IdxParam = ParmVarDecl::Create(
+ AST, MethodDecl->getDeclContext(), SourceLocation(), SourceLocation(),
+ &II, AST.UnsignedIntTy,
+ AST.getTrivialTypeSourceInfo(AST.UnsignedIntTy, SourceLocation()),
+ SC_None, nullptr);
+ MethodDecl->setParams({IdxParam});
+
+ // Also add the parameter to the function prototype.
+ auto FnProtoLoc = TSInfo->getTypeLoc().getAs<FunctionProtoTypeLoc>();
+ FnProtoLoc.setParam(0, IdxParam);
+
+ auto *This =
+ CXXThisExpr::Create(AST, SourceLocation(),
+ MethodDecl->getFunctionObjectParameterType(), true);
+ auto *HandleAccess = MemberExpr::CreateImplicit(
+ AST, This, false, Handle, Handle->getType(), VK_LValue, OK_Ordinary);
+
+ auto *IndexExpr = DeclRefExpr::Create(
+ AST, NestedNameSpecifierLoc(), SourceLocation(), IdxParam, false,
+ DeclarationNameInfo(IdxParam->getDeclName(), SourceLocation()),
+ AST.UnsignedIntTy, VK_PRValue);
+
+ auto *Array =
+ new (AST) ArraySubscriptExpr(HandleAccess, IndexExpr, ElemTy, VK_LValue,
+ OK_Ordinary, SourceLocation());
+
+ auto *Return = ReturnStmt::Create(AST, SourceLocation(), Array, nullptr);
+
+ MethodDecl->setBody(CompoundStmt::Create(AST, {Return}, FPOptionsOverride(),
+ SourceLocation(),
+ SourceLocation()));
+ MethodDecl->setLexicalDeclContext(Record);
+ MethodDecl->setAccess(AccessSpecifier::AS_public);
+ MethodDecl->addAttr(AlwaysInlineAttr::CreateImplicit(
+ AST, SourceRange(), AlwaysInlineAttr::CXX11_clang_always_inline));
+ Record->addDecl(MethodDecl);
+
+ return *this;
+ }
+
+ BuiltinTypeDeclBuilder &startDefinition() {
+ if (Record->isCompleteDefinition())
+ return *this;
+ Record->startDefinition();
+ return *this;
+ }
+
+ BuiltinTypeDeclBuilder &completeDefinition() {
+ if (Record->isCompleteDefinition())
+ return *this;
+ assert(Record->isBeingDefined() &&
+ "Definition must be started before completing it.");
+
+ Record->completeDefinition();
+ return *this;
+ }
+
+ TemplateParameterListBuilder addTemplateArgumentList();
+ BuiltinTypeDeclBuilder &addSimpleTemplateParams(ArrayRef<StringRef> Names);
+};
+
+struct TemplateParameterListBuilder {
+ BuiltinTypeDeclBuilder &Builder;
+ ASTContext &AST;
+ llvm::SmallVector<NamedDecl *> Params;
+
+ TemplateParameterListBuilder(BuiltinTypeDeclBuilder &RB)
+ : Builder(RB), AST(RB.Record->getASTContext()) {}
+
+ ~TemplateParameterListBuilder() { finalizeTemplateArgs(); }
+
+ TemplateParameterListBuilder &
+ addTypeParameter(StringRef Name, QualType DefaultValue = QualType()) {
+ if (Builder.Record->isCompleteDefinition())
+ return *this;
+ unsigned Position = static_cast<unsigned>(Params.size());
+ auto *Decl = TemplateTypeParmDecl::Create(
+ AST, Builder.Record->getDeclContext(), SourceLocation(),
+ SourceLocation(), /* TemplateDepth */ 0, Position,
+ &AST.Idents.get(Name, tok::TokenKind::identifier), /* Typename */ false,
+ /* ParameterPack */ false);
+ if (!DefaultValue.isNull())
+ Decl->setDefaultArgument(AST.getTrivialTypeSourceInfo(DefaultValue));
+
+ Params.emplace_back(Decl);
+ return *this;
+ }
+
+ BuiltinTypeDeclBuilder &finalizeTemplateArgs() {
+ if (Params.empty())
+ return Builder;
+ auto *ParamList =
+ TemplateParameterList::Create(AST, SourceLocation(), SourceLocation(),
+ Params, SourceLocation(), nullptr);
+ Builder.Template = ClassTemplateDecl::Create(
+ AST, Builder.Record->getDeclContext(), SourceLocation(),
+ DeclarationName(Builder.Record->getIdentifier()), ParamList,
+ Builder.Record);
+ Builder.Record->setDescribedClassTemplate(Builder.Template);
+ Builder.Template->setImplicit(true);
+ Builder.Template->setLexicalDeclContext(Builder.Record->getDeclContext());
+ // NOTE: setPreviousDecl before addDecl so new decl replace old decl when
+ // make visible.
+ Builder.Template->setPreviousDecl(Builder.PrevTemplate);
+ Builder.Record->getDeclContext()->addDecl(Builder.Template);
+ Params.clear();
+
+ QualType T = Builder.Template->getInjectedClassNameSpecialization();
+ T = AST.getInjectedClassNameType(Builder.Record, T);
+
+ return Builder;
+ }
+};
+} // namespace
+
+TemplateParameterListBuilder BuiltinTypeDeclBuilder::addTemplateArgumentList() {
+ return TemplateParameterListBuilder(*this);
+}
+
+BuiltinTypeDeclBuilder &
+BuiltinTypeDeclBuilder::addSimpleTemplateParams(ArrayRef<StringRef> Names) {
+ TemplateParameterListBuilder Builder = this->addTemplateArgumentList();
+ for (StringRef Name : Names)
+ Builder.addTypeParameter(Name);
+ return Builder.finalizeTemplateArgs();
+}
+
+HLSLExternalSemaSource::~HLSLExternalSemaSource() {}
+
+void HLSLExternalSemaSource::InitializeSema(Sema &S) {
+ SemaPtr = &S;
+ ASTContext &AST = SemaPtr->getASTContext();
+ // If the translation unit has external storage force external decls to load.
+ if (AST.getTranslationUnitDecl()->hasExternalLexicalStorage())
+ (void)AST.getTranslationUnitDecl()->decls_begin();
+
+ IdentifierInfo &HLSL = AST.Idents.get("hlsl", tok::TokenKind::identifier);
+ LookupResult Result(S, &HLSL, SourceLocation(), Sema::LookupNamespaceName);
+ NamespaceDecl *PrevDecl = nullptr;
+ if (S.LookupQualifiedName(Result, AST.getTranslationUnitDecl()))
+ PrevDecl = Result.getAsSingle<NamespaceDecl>();
+ HLSLNamespace = NamespaceDecl::Create(
+ AST, AST.getTranslationUnitDecl(), /*Inline=*/false, SourceLocation(),
+ SourceLocation(), &HLSL, PrevDecl, /*Nested=*/false);
+ HLSLNamespace->setImplicit(true);
+ HLSLNamespace->setHasExternalLexicalStorage();
+ AST.getTranslationUnitDecl()->addDecl(HLSLNamespace);
+
+ // Force external decls in the HLSL namespace to load from the PCH.
+ (void)HLSLNamespace->getCanonicalDecl()->decls_begin();
+ defineTrivialHLSLTypes();
+ defineHLSLTypesWithForwardDeclarations();
+
+ // This adds a `using namespace hlsl` directive. In DXC, we don't put HLSL's
+ // built in types inside a namespace, but we are planning to change that in
+ // the near future. In order to be source compatible older versions of HLSL
+ // will need to implicitly use the hlsl namespace. For now in clang everything
+ // will get added to the namespace, and we can remove the using directive for
+ // future language versions to match HLSL's evolution.
+ auto *UsingDecl = UsingDirectiveDecl::Create(
+ AST, AST.getTranslationUnitDecl(), SourceLocation(), SourceLocation(),
+ NestedNameSpecifierLoc(), SourceLocation(), HLSLNamespace,
+ AST.getTranslationUnitDecl());
+
+ AST.getTranslationUnitDecl()->addDecl(UsingDecl);
+}
+
+void HLSLExternalSemaSource::defineHLSLVectorAlias() {
+ ASTContext &AST = SemaPtr->getASTContext();
+
+ llvm::SmallVector<NamedDecl *> TemplateParams;
+
+ auto *TypeParam = TemplateTypeParmDecl::Create(
+ AST, HLSLNamespace, SourceLocation(), SourceLocation(), 0, 0,
+ &AST.Idents.get("element", tok::TokenKind::identifier), false, false);
+ TypeParam->setDefaultArgument(AST.getTrivialTypeSourceInfo(AST.FloatTy));
+
+ TemplateParams.emplace_back(TypeParam);
+
+ auto *SizeParam = NonTypeTemplateParmDecl::Create(
+ AST, HLSLNamespace, SourceLocation(), SourceLocation(), 0, 1,
+ &AST.Idents.get("element_count", tok::TokenKind::identifier), AST.IntTy,
+ false, AST.getTrivialTypeSourceInfo(AST.IntTy));
+ Expr *LiteralExpr =
+ IntegerLiteral::Create(AST, llvm::APInt(AST.getIntWidth(AST.IntTy), 4),
+ AST.IntTy, SourceLocation());
+ SizeParam->setDefaultArgument(LiteralExpr);
+ TemplateParams.emplace_back(SizeParam);
+
+ auto *ParamList =
+ TemplateParameterList::Create(AST, SourceLocation(), SourceLocation(),
+ TemplateParams, SourceLocation(), nullptr);
+
+ IdentifierInfo &II = AST.Idents.get("vector", tok::TokenKind::identifier);
+
+ QualType AliasType = AST.getDependentSizedExtVectorType(
+ AST.getTemplateTypeParmType(0, 0, false, TypeParam),
+ DeclRefExpr::Create(
+ AST, NestedNameSpecifierLoc(), SourceLocation(), SizeParam, false,
+ DeclarationNameInfo(SizeParam->getDeclName(), SourceLocation()),
+ AST.IntTy, VK_LValue),
+ SourceLocation());
+
+ auto *Record = TypeAliasDecl::Create(AST, HLSLNamespace, SourceLocation(),
+ SourceLocation(), &II,
+ AST.getTrivialTypeSourceInfo(AliasType));
+ Record->setImplicit(true);
+
+ auto *Template =
+ TypeAliasTemplateDecl::Create(AST, HLSLNamespace, SourceLocation(),
+ Record->getIdentifier(), ParamList, Record);
+
+ Record->setDescribedAliasTemplate(Template);
+ Template->setImplicit(true);
+ Template->setLexicalDeclContext(Record->getDeclContext());
+ HLSLNamespace->addDecl(Template);
+}
+
+void HLSLExternalSemaSource::defineTrivialHLSLTypes() {
+ defineHLSLVectorAlias();
+
+ ResourceDecl = BuiltinTypeDeclBuilder(*SemaPtr, HLSLNamespace, "Resource")
+ .startDefinition()
+ .addHandleMember(AccessSpecifier::AS_public)
+ .completeDefinition()
+ .Record;
+}
+
+/// Set up common members and attributes for buffer types
+static BuiltinTypeDeclBuilder setupBufferType(CXXRecordDecl *Decl, Sema &S,
+ ResourceClass RC, ResourceKind RK,
+ bool IsROV) {
+ return BuiltinTypeDeclBuilder(Decl)
+ .addHandleMember()
+ .addDefaultHandleConstructor(S, RC)
+ .annotateResourceClass(RC, RK, IsROV);
+}
+
+void HLSLExternalSemaSource::defineHLSLTypesWithForwardDeclarations() {
+ CXXRecordDecl *Decl;
+ Decl = BuiltinTypeDeclBuilder(*SemaPtr, HLSLNamespace, "RWBuffer")
+ .addSimpleTemplateParams({"element_type"})
+ .Record;
+ onCompletion(Decl, [this](CXXRecordDecl *Decl) {
+ setupBufferType(Decl, *SemaPtr, ResourceClass::UAV,
+ ResourceKind::TypedBuffer, /*IsROV=*/false)
+ .addArraySubscriptOperators()
+ .completeDefinition();
+ });
+
+ Decl =
+ BuiltinTypeDeclBuilder(*SemaPtr, HLSLNamespace, "RasterizerOrderedBuffer")
+ .addSimpleTemplateParams({"element_type"})
+ .Record;
+ onCompletion(Decl, [this](CXXRecordDecl *Decl) {
+ setupBufferType(Decl, *SemaPtr, ResourceClass::UAV,
+ ResourceKind::TypedBuffer, /*IsROV=*/true)
+ .addArraySubscriptOperators()
+ .completeDefinition();
+ });
+}
+
+void HLSLExternalSemaSource::onCompletion(CXXRecordDecl *Record,
+ CompletionFunction Fn) {
+ Completions.insert(std::make_pair(Record->getCanonicalDecl(), Fn));
+}
+
+void HLSLExternalSemaSource::CompleteType(TagDecl *Tag) {
+ if (!isa<CXXRecordDecl>(Tag))
+ return;
+ auto Record = cast<CXXRecordDecl>(Tag);
+
+ // If this is a specialization, we need to get the underlying templated
+ // declaration and complete that.
+ if (auto TDecl = dyn_cast<ClassTemplateSpecializationDecl>(Record))
+ Record = TDecl->getSpecializedTemplate()->getTemplatedDecl();
+ Record = Record->getCanonicalDecl();
+ auto It = Completions.find(Record);
+ if (It == Completions.end())
+ return;
+ It->second(Record);
+}
diff --git a/contrib/llvm-project/clang/lib/Sema/IdentifierResolver.cpp b/contrib/llvm-project/clang/lib/Sema/IdentifierResolver.cpp
index 333f4d70986a..2213c3c83724 100644
--- a/contrib/llvm-project/clang/lib/Sema/IdentifierResolver.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/IdentifierResolver.cpp
@@ -60,6 +60,9 @@ public:
}
}
+ IdDeclInfoMap(const IdDeclInfoMap &) = delete;
+ IdDeclInfoMap &operator=(const IdDeclInfoMap &) = delete;
+
/// Returns the IdDeclInfo associated to the DeclarationName.
/// It creates a new IdDeclInfo if one was not created before for this id.
IdDeclInfo &operator[](DeclarationName Name);
@@ -99,10 +102,16 @@ IdentifierResolver::~IdentifierResolver() {
bool IdentifierResolver::isDeclInScope(Decl *D, DeclContext *Ctx, Scope *S,
bool AllowInlineNamespace) const {
Ctx = Ctx->getRedeclContext();
-
+ // The names for HLSL cbuffer/tbuffers only used by the CPU-side
+ // reflection API which supports querying bindings. It will not have name
+ // conflict with other Decls.
+ if (LangOpt.HLSL && isa<HLSLBufferDecl>(D))
+ return false;
if (Ctx->isFunctionOrMethod() || (S && S->isFunctionPrototypeScope())) {
// Ignore the scopes associated within transparent declaration contexts.
- while (S->getEntity() && S->getEntity()->isTransparentContext())
+ while (S->getEntity() &&
+ (S->getEntity()->isTransparentContext() ||
+ (!LangOpt.CPlusPlus && isa<RecordDecl>(S->getEntity()))))
S = S->getParent();
if (S->isDeclScope(D))
@@ -121,12 +130,14 @@ bool IdentifierResolver::isDeclInScope(Decl *D, DeclContext *Ctx, Scope *S,
// of the controlled statement.
//
assert(S->getParent() && "No TUScope?");
- if (S->getParent()->getFlags() & Scope::ControlScope) {
+ // If the current decl is in a lambda, we shouldn't consider this is a
+ // redefinition as lambda has its own scope.
+ if (S->getParent()->isControlScope() && !S->isFunctionScope()) {
S = S->getParent();
if (S->isDeclScope(D))
return true;
}
- if (S->getFlags() & Scope::FnTryCatchScope)
+ if (S->isFnTryCatchScope())
return S->getParent()->isDeclScope(D);
}
return false;
@@ -225,9 +236,12 @@ void IdentifierResolver::RemoveDecl(NamedDecl *D) {
return toIdDeclInfo(Ptr)->RemoveDecl(D);
}
-/// begin - Returns an iterator for decls with name 'Name'.
-IdentifierResolver::iterator
-IdentifierResolver::begin(DeclarationName Name) {
+llvm::iterator_range<IdentifierResolver::iterator>
+IdentifierResolver::decls(DeclarationName Name) {
+ return {begin(Name), end()};
+}
+
+IdentifierResolver::iterator IdentifierResolver::begin(DeclarationName Name) {
if (IdentifierInfo *II = Name.getAsIdentifierInfo())
readingIdentifier(*II);
@@ -285,7 +299,7 @@ static DeclMatchKind compareDeclarations(NamedDecl *Existing, NamedDecl *New) {
// If the existing declaration is somewhere in the previous declaration
// chain of the new declaration, then prefer the new declaration.
- for (auto RD : New->redecls()) {
+ for (auto *RD : New->redecls()) {
if (RD == Existing)
return DMK_Replace;
diff --git a/contrib/llvm-project/clang/lib/Sema/JumpDiagnostics.cpp b/contrib/llvm-project/clang/lib/Sema/JumpDiagnostics.cpp
index 999c2a481459..45ff36d5fe23 100644
--- a/contrib/llvm-project/clang/lib/Sema/JumpDiagnostics.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/JumpDiagnostics.cpp
@@ -72,10 +72,9 @@ class JumpScopeChecker {
SmallVector<Stmt*, 16> Jumps;
SmallVector<Stmt*, 4> IndirectJumps;
- SmallVector<Stmt*, 4> AsmJumps;
+ SmallVector<LabelDecl *, 4> IndirectJumpTargets;
SmallVector<AttributedStmt *, 4> MustTailStmts;
- SmallVector<LabelDecl*, 4> IndirectJumpTargets;
- SmallVector<LabelDecl*, 4> AsmJumpTargets;
+
public:
JumpScopeChecker(Stmt *Body, Sema &S);
private:
@@ -86,7 +85,7 @@ private:
void BuildScopeInformation(Stmt *S, unsigned &origParentScope);
void VerifyJumps();
- void VerifyIndirectOrAsmJumps(bool IsAsmGoto);
+ void VerifyIndirectJumps();
void VerifyMustTailStmts();
void NoteJumpIntoScopes(ArrayRef<unsigned> ToScopes);
void DiagnoseIndirectOrAsmJump(Stmt *IG, unsigned IGScope, LabelDecl *Target,
@@ -115,8 +114,7 @@ JumpScopeChecker::JumpScopeChecker(Stmt *Body, Sema &s)
// Check that all jumps we saw are kosher.
VerifyJumps();
- VerifyIndirectOrAsmJumps(false);
- VerifyIndirectOrAsmJumps(true);
+ VerifyIndirectJumps();
VerifyMustTailStmts();
}
@@ -333,11 +331,8 @@ void JumpScopeChecker::BuildScopeInformation(Stmt *S,
// operand (to avoid recording the address-of-label use), which
// works only because of the restricted set of expressions which
// we detect as constant targets.
- if (cast<IndirectGotoStmt>(S)->getConstantTarget()) {
- LabelAndGotoScopes[S] = ParentScope;
- Jumps.push_back(S);
- return;
- }
+ if (cast<IndirectGotoStmt>(S)->getConstantTarget())
+ goto RecordJumpScope;
LabelAndGotoScopes[S] = ParentScope;
IndirectJumps.push_back(S);
@@ -354,34 +349,32 @@ void JumpScopeChecker::BuildScopeInformation(Stmt *S,
BuildScopeInformation(Var, ParentScope);
++StmtsToSkip;
}
- LLVM_FALLTHROUGH;
+ goto RecordJumpScope;
+
+ case Stmt::GCCAsmStmtClass:
+ if (!cast<GCCAsmStmt>(S)->isAsmGoto())
+ break;
+ [[fallthrough]];
case Stmt::GotoStmtClass:
+ RecordJumpScope:
// Remember both what scope a goto is in as well as the fact that we have
// it. This makes the second scan not have to walk the AST again.
LabelAndGotoScopes[S] = ParentScope;
Jumps.push_back(S);
break;
- case Stmt::GCCAsmStmtClass:
- if (auto *GS = dyn_cast<GCCAsmStmt>(S))
- if (GS->isAsmGoto()) {
- // Remember both what scope a goto is in as well as the fact that we
- // have it. This makes the second scan not have to walk the AST again.
- LabelAndGotoScopes[S] = ParentScope;
- AsmJumps.push_back(GS);
- for (auto *E : GS->labels())
- AsmJumpTargets.push_back(E->getLabel());
- }
- break;
-
case Stmt::IfStmtClass: {
IfStmt *IS = cast<IfStmt>(S);
- if (!(IS->isConstexpr() || IS->isObjCAvailabilityCheck()))
+ if (!(IS->isConstexpr() || IS->isConsteval() ||
+ IS->isObjCAvailabilityCheck()))
break;
- unsigned Diag = IS->isConstexpr() ? diag::note_protected_by_constexpr_if
- : diag::note_protected_by_if_available;
+ unsigned Diag = diag::note_protected_by_if_available;
+ if (IS->isConstexpr())
+ Diag = diag::note_protected_by_constexpr_if;
+ else if (IS->isConsteval())
+ Diag = diag::note_protected_by_consteval_if;
if (VarDecl *Var = IS->getConditionVariable())
BuildScopeInformation(Var, ParentScope);
@@ -389,7 +382,9 @@ void JumpScopeChecker::BuildScopeInformation(Stmt *S,
// Cannot jump into the middle of the condition.
unsigned NewParentScope = Scopes.size();
Scopes.push_back(GotoScope(ParentScope, Diag, 0, IS->getBeginLoc()));
- BuildScopeInformation(IS->getCond(), NewParentScope);
+
+ if (!IS->isConsteval())
+ BuildScopeInformation(IS->getCond(), NewParentScope);
// Jumps into either arm of an 'if constexpr' are not allowed.
NewParentScope = Scopes.size();
@@ -471,6 +466,21 @@ void JumpScopeChecker::BuildScopeInformation(Stmt *S,
return;
}
+ case Stmt::StmtExprClass: {
+ // [GNU]
+ // Jumping into a statement expression with goto or using
+ // a switch statement outside the statement expression with
+ // a case or default label inside the statement expression is not permitted.
+ // Jumping out of a statement expression is permitted.
+ StmtExpr *SE = cast<StmtExpr>(S);
+ unsigned NewParentScope = Scopes.size();
+ Scopes.push_back(GotoScope(ParentScope,
+ diag::note_enters_statement_expression,
+ /*OutDiag=*/0, SE->getBeginLoc()));
+ BuildScopeInformation(SE->getSubStmt(), NewParentScope);
+ return;
+ }
+
case Stmt::ObjCAtTryStmtClass: {
// Disallow jumps into any part of an @try statement by pushing a scope and
// walking all sub-stmts in that scope.
@@ -487,8 +497,7 @@ void JumpScopeChecker::BuildScopeInformation(Stmt *S,
}
// Jump from the catch to the finally or try is not valid.
- for (unsigned I = 0, N = AT->getNumCatchStmts(); I != N; ++I) {
- ObjCAtCatchStmt *AC = AT->getCatchStmt(I);
+ for (ObjCAtCatchStmt *AC : AT->catch_stmts()) {
unsigned NewParentScope = Scopes.size();
Scopes.push_back(GotoScope(ParentScope,
diag::note_protected_by_objc_catch,
@@ -661,6 +670,22 @@ void JumpScopeChecker::VerifyJumps() {
continue;
}
+ // If an asm goto jumps to a different scope, things like destructors or
+ // initializers might not be run which may be suprising to users. Perhaps
+ // this behavior can be changed in the future, but today Clang will not
+ // generate such code. Produce a diagnostic instead. See also the
+ // discussion here: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=110728.
+ if (auto *G = dyn_cast<GCCAsmStmt>(Jump)) {
+ for (AddrLabelExpr *L : G->labels()) {
+ LabelDecl *LD = L->getLabel();
+ unsigned JumpScope = LabelAndGotoScopes[G];
+ unsigned TargetScope = LabelAndGotoScopes[LD->getStmt()];
+ if (JumpScope != TargetScope)
+ DiagnoseIndirectOrAsmJump(G, JumpScope, LD, TargetScope);
+ }
+ continue;
+ }
+
// We only get indirect gotos here when they have a constant target.
if (IndirectGotoStmt *IGS = dyn_cast<IndirectGotoStmt>(Jump)) {
LabelDecl *Target = IGS->getConstantTarget();
@@ -689,17 +714,16 @@ void JumpScopeChecker::VerifyJumps() {
}
}
-/// VerifyIndirectOrAsmJumps - Verify whether any possible indirect goto or
-/// asm goto jump might cross a protection boundary. Unlike direct jumps,
-/// indirect or asm goto jumps count cleanups as protection boundaries:
-/// since there's no way to know where the jump is going, we can't implicitly
-/// run the right cleanups the way we can with direct jumps.
-/// Thus, an indirect/asm jump is "trivial" if it bypasses no
-/// initializations and no teardowns. More formally, an indirect/asm jump
-/// from A to B is trivial if the path out from A to DCA(A,B) is
-/// trivial and the path in from DCA(A,B) to B is trivial, where
-/// DCA(A,B) is the deepest common ancestor of A and B.
-/// Jump-triviality is transitive but asymmetric.
+/// VerifyIndirectJumps - Verify whether any possible indirect goto jump might
+/// cross a protection boundary. Unlike direct jumps, indirect goto jumps
+/// count cleanups as protection boundaries: since there's no way to know where
+/// the jump is going, we can't implicitly run the right cleanups the way we
+/// can with direct jumps. Thus, an indirect/asm jump is "trivial" if it
+/// bypasses no initializations and no teardowns. More formally, an
+/// indirect/asm jump from A to B is trivial if the path out from A to DCA(A,B)
+/// is trivial and the path in from DCA(A,B) to B is trivial, where DCA(A,B) is
+/// the deepest common ancestor of A and B. Jump-triviality is transitive but
+/// asymmetric.
///
/// A path in is trivial if none of the entered scopes have an InDiag.
/// A path out is trivial is none of the exited scopes have an OutDiag.
@@ -707,57 +731,45 @@ void JumpScopeChecker::VerifyJumps() {
/// Under these definitions, this function checks that the indirect
/// jump between A and B is trivial for every indirect goto statement A
/// and every label B whose address was taken in the function.
-void JumpScopeChecker::VerifyIndirectOrAsmJumps(bool IsAsmGoto) {
- SmallVector<Stmt*, 4> GotoJumps = IsAsmGoto ? AsmJumps : IndirectJumps;
- if (GotoJumps.empty())
+void JumpScopeChecker::VerifyIndirectJumps() {
+ if (IndirectJumps.empty())
return;
- SmallVector<LabelDecl *, 4> JumpTargets =
- IsAsmGoto ? AsmJumpTargets : IndirectJumpTargets;
// If there aren't any address-of-label expressions in this function,
// complain about the first indirect goto.
- if (JumpTargets.empty()) {
- assert(!IsAsmGoto &&"only indirect goto can get here");
- S.Diag(GotoJumps[0]->getBeginLoc(),
+ if (IndirectJumpTargets.empty()) {
+ S.Diag(IndirectJumps[0]->getBeginLoc(),
diag::err_indirect_goto_without_addrlabel);
return;
}
- // Collect a single representative of every scope containing an
- // indirect or asm goto. For most code bases, this substantially cuts
- // down on the number of jump sites we'll have to consider later.
- typedef std::pair<unsigned, Stmt*> JumpScope;
+ // Collect a single representative of every scope containing an indirect
+ // goto. For most code bases, this substantially cuts down on the number of
+ // jump sites we'll have to consider later.
+ using JumpScope = std::pair<unsigned, Stmt *>;
SmallVector<JumpScope, 32> JumpScopes;
{
llvm::DenseMap<unsigned, Stmt*> JumpScopesMap;
- for (SmallVectorImpl<Stmt *>::iterator I = GotoJumps.begin(),
- E = GotoJumps.end();
- I != E; ++I) {
- Stmt *IG = *I;
+ for (Stmt *IG : IndirectJumps) {
if (CHECK_PERMISSIVE(!LabelAndGotoScopes.count(IG)))
continue;
unsigned IGScope = LabelAndGotoScopes[IG];
- Stmt *&Entry = JumpScopesMap[IGScope];
- if (!Entry) Entry = IG;
+ if (!JumpScopesMap.contains(IGScope))
+ JumpScopesMap[IGScope] = IG;
}
JumpScopes.reserve(JumpScopesMap.size());
- for (llvm::DenseMap<unsigned, Stmt *>::iterator I = JumpScopesMap.begin(),
- E = JumpScopesMap.end();
- I != E; ++I)
- JumpScopes.push_back(*I);
+ for (auto &Pair : JumpScopesMap)
+ JumpScopes.emplace_back(Pair);
}
// Collect a single representative of every scope containing a
// label whose address was taken somewhere in the function.
// For most code bases, there will be only one such scope.
llvm::DenseMap<unsigned, LabelDecl*> TargetScopes;
- for (SmallVectorImpl<LabelDecl *>::iterator I = JumpTargets.begin(),
- E = JumpTargets.end();
- I != E; ++I) {
- LabelDecl *TheLabel = *I;
+ for (LabelDecl *TheLabel : IndirectJumpTargets) {
if (CHECK_PERMISSIVE(!LabelAndGotoScopes.count(TheLabel->getStmt())))
continue;
unsigned LabelScope = LabelAndGotoScopes[TheLabel->getStmt()];
- LabelDecl *&Target = TargetScopes[LabelScope];
- if (!Target) Target = TheLabel;
+ if (!TargetScopes.contains(LabelScope))
+ TargetScopes[LabelScope] = TheLabel;
}
// For each target scope, make sure it's trivially reachable from
@@ -769,11 +781,7 @@ void JumpScopeChecker::VerifyIndirectOrAsmJumps(bool IsAsmGoto) {
// entered, then verify that every jump scope can be trivially
// exitted to reach a scope in S.
llvm::BitVector Reachable(Scopes.size(), false);
- for (llvm::DenseMap<unsigned,LabelDecl*>::iterator
- TI = TargetScopes.begin(), TE = TargetScopes.end(); TI != TE; ++TI) {
- unsigned TargetScope = TI->first;
- LabelDecl *TargetLabel = TI->second;
-
+ for (auto [TargetScope, TargetLabel] : TargetScopes) {
Reachable.reset();
// Mark all the enclosing scopes from which you can safely jump
@@ -794,10 +802,8 @@ void JumpScopeChecker::VerifyIndirectOrAsmJumps(bool IsAsmGoto) {
// Walk through all the jump sites, checking that they can trivially
// reach this label scope.
- for (SmallVectorImpl<JumpScope>::iterator
- I = JumpScopes.begin(), E = JumpScopes.end(); I != E; ++I) {
- unsigned Scope = I->first;
-
+ for (auto [JumpScope, JumpStmt] : JumpScopes) {
+ unsigned Scope = JumpScope;
// Walk out the "scope chain" for this scope, looking for a scope
// we've marked reachable. For well-formed code this amortizes
// to O(JumpScopes.size() / Scopes.size()): we only iterate
@@ -808,7 +814,7 @@ void JumpScopeChecker::VerifyIndirectOrAsmJumps(bool IsAsmGoto) {
if (Reachable.test(Scope)) {
// If we find something reachable, mark all the scopes we just
// walked through as reachable.
- for (unsigned S = I->first; S != Scope; S = Scopes[S].ParentScope)
+ for (unsigned S = JumpScope; S != Scope; S = Scopes[S].ParentScope)
Reachable.set(S);
IsReachable = true;
break;
@@ -827,7 +833,7 @@ void JumpScopeChecker::VerifyIndirectOrAsmJumps(bool IsAsmGoto) {
// Only diagnose if we didn't find something.
if (IsReachable) continue;
- DiagnoseIndirectOrAsmJump(I->second, I->first, TargetLabel, TargetScope);
+ DiagnoseIndirectOrAsmJump(JumpStmt, JumpScope, TargetLabel, TargetScope);
}
}
}
diff --git a/contrib/llvm-project/clang/lib/Sema/MultiplexExternalSemaSource.cpp b/contrib/llvm-project/clang/lib/Sema/MultiplexExternalSemaSource.cpp
index 072775642d75..058e22cb2b81 100644
--- a/contrib/llvm-project/clang/lib/Sema/MultiplexExternalSemaSource.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/MultiplexExternalSemaSource.cpp
@@ -16,24 +16,30 @@ using namespace clang;
char MultiplexExternalSemaSource::ID;
-///Constructs a new multiplexing external sema source and appends the
+/// Constructs a new multiplexing external sema source and appends the
/// given element to it.
///
-MultiplexExternalSemaSource::MultiplexExternalSemaSource(ExternalSemaSource &s1,
- ExternalSemaSource &s2){
- Sources.push_back(&s1);
- Sources.push_back(&s2);
+MultiplexExternalSemaSource::MultiplexExternalSemaSource(
+ ExternalSemaSource *S1, ExternalSemaSource *S2) {
+ S1->Retain();
+ S2->Retain();
+ Sources.push_back(S1);
+ Sources.push_back(S2);
}
// pin the vtable here.
-MultiplexExternalSemaSource::~MultiplexExternalSemaSource() {}
+MultiplexExternalSemaSource::~MultiplexExternalSemaSource() {
+ for (auto *S : Sources)
+ S->Release();
+}
-///Appends new source to the source list.
+/// Appends new source to the source list.
///
///\param[in] source - An ExternalSemaSource.
///
-void MultiplexExternalSemaSource::addSource(ExternalSemaSource &source) {
- Sources.push_back(&source);
+void MultiplexExternalSemaSource::AddSource(ExternalSemaSource *Source) {
+ Source->Retain();
+ Sources.push_back(Source);
}
//===----------------------------------------------------------------------===//
@@ -335,3 +341,9 @@ bool MultiplexExternalSemaSource::MaybeDiagnoseMissingCompleteType(
}
return false;
}
+
+void MultiplexExternalSemaSource::AssignedLambdaNumbering(
+ const CXXRecordDecl *Lambda) {
+ for (auto *Source : Sources)
+ Source->AssignedLambdaNumbering(Lambda);
+}
diff --git a/contrib/llvm-project/clang/lib/Sema/OpenCLBuiltins.td b/contrib/llvm-project/clang/lib/Sema/OpenCLBuiltins.td
index cd704fe395a9..0cceba090bd8 100644
--- a/contrib/llvm-project/clang/lib/Sema/OpenCLBuiltins.td
+++ b/contrib/llvm-project/clang/lib/Sema/OpenCLBuiltins.td
@@ -57,14 +57,33 @@ class FunctionExtension<string _Ext> : AbstractExtension<_Ext>;
// disabled.
class TypeExtension<string _Ext> : AbstractExtension<_Ext>;
+// Concatenate zero or more space-separated extensions in NewExts to Base and
+// return the resulting FunctionExtension in ret.
+class concatExtension<FunctionExtension Base, string NewExts> {
+ FunctionExtension ret = FunctionExtension<
+ !cond(
+ // Return Base extension if NewExts is empty,
+ !empty(NewExts) : Base.ExtName,
+
+ // otherwise, return NewExts if Base extension is empty,
+ !empty(Base.ExtName) : NewExts,
+
+ // otherwise, concatenate NewExts to Base.
+ true : Base.ExtName # " " # NewExts
+ )
+ >;
+}
+
// TypeExtension definitions.
def NoTypeExt : TypeExtension<"">;
def Fp16TypeExt : TypeExtension<"cl_khr_fp16">;
def Fp64TypeExt : TypeExtension<"cl_khr_fp64">;
+def Atomic64TypeExt : TypeExtension<"cl_khr_int64_base_atomics cl_khr_int64_extended_atomics">;
+def AtomicFp64TypeExt : TypeExtension<"cl_khr_int64_base_atomics cl_khr_int64_extended_atomics cl_khr_fp64">;
// FunctionExtension definitions.
def FuncExtNone : FunctionExtension<"">;
-def FuncExtKhrSubgroups : FunctionExtension<"cl_khr_subgroups">;
+def FuncExtKhrSubgroups : FunctionExtension<"__opencl_subgroup_builtins">;
def FuncExtKhrSubgroupExtendedTypes : FunctionExtension<"cl_khr_subgroup_extended_types">;
def FuncExtKhrSubgroupNonUniformVote : FunctionExtension<"cl_khr_subgroup_non_uniform_vote">;
def FuncExtKhrSubgroupBallot : FunctionExtension<"cl_khr_subgroup_ballot">;
@@ -83,12 +102,37 @@ def FuncExtKhrMipmapImage : FunctionExtension<"cl_khr_mipmap_imag
def FuncExtKhrMipmapImageWrites : FunctionExtension<"cl_khr_mipmap_image_writes">;
def FuncExtKhrGlMsaaSharing : FunctionExtension<"cl_khr_gl_msaa_sharing">;
+def FuncExtOpenCLCDeviceEnqueue : FunctionExtension<"__opencl_c_device_enqueue">;
+def FuncExtOpenCLCGenericAddressSpace : FunctionExtension<"__opencl_c_generic_address_space">;
+def FuncExtOpenCLCNamedAddressSpaceBuiltins : FunctionExtension<"__opencl_c_named_address_space_builtins">;
+def FuncExtOpenCLCPipes : FunctionExtension<"__opencl_c_pipes">;
+def FuncExtOpenCLCWGCollectiveFunctions : FunctionExtension<"__opencl_c_work_group_collective_functions">;
+def FuncExtOpenCLCReadWriteImages : FunctionExtension<"__opencl_c_read_write_images">;
+def FuncExtFloatAtomicsFp16GlobalASLoadStore : FunctionExtension<"cl_ext_float_atomics __opencl_c_ext_fp16_global_atomic_load_store">;
+def FuncExtFloatAtomicsFp16LocalASLoadStore : FunctionExtension<"cl_ext_float_atomics __opencl_c_ext_fp16_local_atomic_load_store">;
+def FuncExtFloatAtomicsFp16GenericASLoadStore : FunctionExtension<"cl_ext_float_atomics __opencl_c_ext_fp16_global_atomic_load_store __opencl_c_ext_fp16_local_atomic_load_store">;
+def FuncExtFloatAtomicsFp16GlobalASAdd : FunctionExtension<"cl_ext_float_atomics __opencl_c_ext_fp16_global_atomic_add">;
+def FuncExtFloatAtomicsFp32GlobalASAdd : FunctionExtension<"cl_ext_float_atomics __opencl_c_ext_fp32_global_atomic_add">;
+def FuncExtFloatAtomicsFp64GlobalASAdd : FunctionExtension<"cl_ext_float_atomics __opencl_c_ext_fp64_global_atomic_add">;
+def FuncExtFloatAtomicsFp16LocalASAdd : FunctionExtension<"cl_ext_float_atomics __opencl_c_ext_fp16_local_atomic_add">;
+def FuncExtFloatAtomicsFp32LocalASAdd : FunctionExtension<"cl_ext_float_atomics __opencl_c_ext_fp32_local_atomic_add">;
+def FuncExtFloatAtomicsFp64LocalASAdd : FunctionExtension<"cl_ext_float_atomics __opencl_c_ext_fp64_local_atomic_add">;
+def FuncExtFloatAtomicsFp16GenericASAdd : FunctionExtension<"cl_ext_float_atomics __opencl_c_ext_fp16_local_atomic_add __opencl_c_ext_fp16_global_atomic_add">;
+def FuncExtFloatAtomicsFp32GenericASAdd : FunctionExtension<"cl_ext_float_atomics __opencl_c_ext_fp32_local_atomic_add __opencl_c_ext_fp32_global_atomic_add">;
+def FuncExtFloatAtomicsFp64GenericASAdd : FunctionExtension<"cl_ext_float_atomics __opencl_c_ext_fp64_local_atomic_add __opencl_c_ext_fp64_global_atomic_add">;
+def FuncExtFloatAtomicsFp16GlobalASMinMax : FunctionExtension<"cl_ext_float_atomics __opencl_c_ext_fp16_global_atomic_min_max">;
+def FuncExtFloatAtomicsFp32GlobalASMinMax : FunctionExtension<"cl_ext_float_atomics __opencl_c_ext_fp32_global_atomic_min_max">;
+def FuncExtFloatAtomicsFp64GlobalASMinMax : FunctionExtension<"cl_ext_float_atomics __opencl_c_ext_fp64_global_atomic_min_max">;
+def FuncExtFloatAtomicsFp16LocalASMinMax : FunctionExtension<"cl_ext_float_atomics __opencl_c_ext_fp16_local_atomic_min_max">;
+def FuncExtFloatAtomicsFp32LocalASMinMax : FunctionExtension<"cl_ext_float_atomics __opencl_c_ext_fp32_local_atomic_min_max">;
+def FuncExtFloatAtomicsFp64LocalASMinMax : FunctionExtension<"cl_ext_float_atomics __opencl_c_ext_fp64_local_atomic_min_max">;
+def FuncExtFloatAtomicsFp16GenericASMinMax : FunctionExtension<"cl_ext_float_atomics __opencl_c_ext_fp16_local_atomic_min_max __opencl_c_ext_fp16_global_atomic_min_max">;
+def FuncExtFloatAtomicsFp32GenericASMinMax : FunctionExtension<"cl_ext_float_atomics __opencl_c_ext_fp32_local_atomic_min_max __opencl_c_ext_fp32_global_atomic_min_max">;
+def FuncExtFloatAtomicsFp64GenericASMinMax : FunctionExtension<"cl_ext_float_atomics __opencl_c_ext_fp64_local_atomic_min_max __opencl_c_ext_fp64_global_atomic_min_max">;
+
// Not a real extension, but a workaround to add C++ for OpenCL specific builtins.
def FuncExtOpenCLCxx : FunctionExtension<"__cplusplus">;
-// Multiple extensions
-def FuncExtKhrMipmapWritesAndWrite3d : FunctionExtension<"cl_khr_mipmap_image_writes cl_khr_3d_image_writes">;
-
// Arm extensions.
def ArmIntegerDotProductInt8 : FunctionExtension<"cl_arm_integer_dot_product_int8">;
def ArmIntegerDotProductAccumulateInt8 : FunctionExtension<"cl_arm_integer_dot_product_accumulate_int8">;
@@ -198,7 +242,13 @@ class ImageType<Type _Ty, string _AccessQualifier> :
let IsConst = _Ty.IsConst;
let IsVolatile = _Ty.IsVolatile;
let AddrSpace = _Ty.AddrSpace;
- let Extension = _Ty.Extension;
+ // Add TypeExtensions for writable "image3d_t" and "read_write" image types.
+ let Extension = !cond(
+ !and(!eq(_Ty.Name, "image3d_t"), !eq(_AccessQualifier, "WO")) : TypeExtension<"cl_khr_3d_image_writes">,
+ !and(!eq(_Ty.Name, "image3d_t"), !eq(_AccessQualifier, "RW")) : TypeExtension<"cl_khr_3d_image_writes __opencl_c_read_write_images">,
+ !or(!eq(_Ty.Name, "image2d_depth_t"), !eq(_Ty.Name, "image2d_array_depth_t")) : TypeExtension<"cl_khr_depth_images">,
+ !eq(_AccessQualifier, "RW") : TypeExtension<"__opencl_c_read_write_images">,
+ true : _Ty.Extension);
}
// OpenCL enum type (e.g. memory_scope).
@@ -303,9 +353,22 @@ def Float : Type<"float", QualType<"Context.FloatTy">>;
let Extension = Fp64TypeExt in {
def Double : Type<"double", QualType<"Context.DoubleTy">>;
}
+
+// The half type for builtins that require the cl_khr_fp16 extension.
let Extension = Fp16TypeExt in {
def Half : Type<"half", QualType<"Context.HalfTy">>;
}
+
+// Without the cl_khr_fp16 extension, the half type can only be used to declare
+// a pointer. Define const and non-const pointer types in all address spaces.
+// Use the "__half" alias to allow the TableGen emitter to distinguish the
+// (extensionless) pointee type of these pointer-to-half types from the "half"
+// type defined above that already carries the cl_khr_fp16 extension.
+foreach AS = [PrivateAS, GlobalAS, ConstantAS, LocalAS, GenericAS] in {
+ def "HalfPtr" # AS : PointerType<Type<"__half", QualType<"Context.HalfTy">>, AS>;
+ def "HalfPtrConst" # AS : PointerType<ConstType<Type<"__half", QualType<"Context.HalfTy">>>, AS>;
+}
+
def Size : Type<"size_t", QualType<"Context.getSizeType()">>;
def PtrDiff : Type<"ptrdiff_t", QualType<"Context.getPointerDiffType()">>;
def IntPtr : Type<"intptr_t", QualType<"Context.getIntPtrType()">>;
@@ -343,10 +406,15 @@ def NDRange : TypedefType<"ndrange_t">;
// OpenCL v2.0 s6.13.11: Atomic integer and floating-point types.
def AtomicInt : Type<"atomic_int", QualType<"Context.getAtomicType(Context.IntTy)">>;
def AtomicUInt : Type<"atomic_uint", QualType<"Context.getAtomicType(Context.UnsignedIntTy)">>;
-def AtomicLong : Type<"atomic_long", QualType<"Context.getAtomicType(Context.LongTy)">>;
-def AtomicULong : Type<"atomic_ulong", QualType<"Context.getAtomicType(Context.UnsignedLongTy)">>;
+let Extension = Atomic64TypeExt in {
+ def AtomicLong : Type<"atomic_long", QualType<"Context.getAtomicType(Context.LongTy)">>;
+ def AtomicULong : Type<"atomic_ulong", QualType<"Context.getAtomicType(Context.UnsignedLongTy)">>;
+}
def AtomicFloat : Type<"atomic_float", QualType<"Context.getAtomicType(Context.FloatTy)">>;
-def AtomicDouble : Type<"atomic_double", QualType<"Context.getAtomicType(Context.DoubleTy)">>;
+let Extension = AtomicFp64TypeExt in {
+ def AtomicDouble : Type<"atomic_double", QualType<"Context.getAtomicType(Context.DoubleTy)">>;
+}
+def AtomicHalf : Type<"atomic_half", QualType<"Context.getAtomicType(Context.HalfTy)">>;
def AtomicIntPtr : Type<"atomic_intptr_t", QualType<"Context.getAtomicType(Context.getIntPtrType())">>;
def AtomicUIntPtr : Type<"atomic_uintptr_t", QualType<"Context.getAtomicType(Context.getUIntPtrType())">>;
def AtomicSize : Type<"atomic_size_t", QualType<"Context.getAtomicType(Context.getSizeType())">>;
@@ -543,9 +611,10 @@ foreach name = ["fma", "mad"] in {
def : Builtin<name, [FGenTypeN, FGenTypeN, FGenTypeN, FGenTypeN], Attr.Const>;
}
-// --- Version dependent ---
-let MaxVersion = CL20 in {
- foreach AS = [GlobalAS, LocalAS, PrivateAS] in {
+// The following math builtins take pointer arguments. Which overloads are
+// available depends on whether the generic address space feature is enabled.
+multiclass MathWithPointer<list<AddressSpace> addrspaces> {
+ foreach AS = addrspaces in {
foreach name = ["fract", "modf", "sincos"] in {
def : Builtin<name, [FGenTypeN, FGenTypeN, PointerType<FGenTypeN, AS>]>;
}
@@ -561,19 +630,12 @@ let MaxVersion = CL20 in {
}
}
}
-let MinVersion = CL20 in {
- foreach name = ["fract", "modf", "sincos"] in {
- def : Builtin<name, [FGenTypeN, FGenTypeN, PointerType<FGenTypeN, GenericAS>]>;
- }
- foreach name = ["frexp", "lgamma_r"] in {
- foreach Type = [GenTypeFloatVecAndScalar, GenTypeDoubleVecAndScalar, GenTypeHalfVecAndScalar] in {
- def : Builtin<name, [Type, Type, PointerType<GenTypeIntVecAndScalar, GenericAS>]>;
- } }
- foreach name = ["remquo"] in {
- foreach Type = [GenTypeFloatVecAndScalar, GenTypeDoubleVecAndScalar, GenTypeHalfVecAndScalar] in {
- def : Builtin<name, [Type, Type, Type, PointerType<GenTypeIntVecAndScalar, GenericAS>]>;
- }
- }
+
+let Extension = FuncExtOpenCLCNamedAddressSpaceBuiltins in {
+ defm : MathWithPointer<[GlobalAS, LocalAS, PrivateAS]>;
+}
+let Extension = FuncExtOpenCLCGenericAddressSpace in {
+ defm : MathWithPointer<[GenericAS]>;
}
// --- Table 9 ---
@@ -783,165 +845,83 @@ foreach name = ["select"] in {
// OpenCL v1.1 s6.11.7, v1.2 s6.12.7, v2.0 s6.13.7 - Vector Data Load and Store Functions
// OpenCL Extension v1.1 s9.3.6 and s9.6.6, v1.2 s9.5.6, v2.0 s5.1.6 and s6.1.6 - Vector Data Load and Store Functions
// --- Table 15 ---
-// Variants for OpenCL versions below 2.0, using pointers to the global, local
-// and private address spaces.
-let MaxVersion = CL20 in {
- foreach AS = [GlobalAS, LocalAS, PrivateAS] in {
+multiclass VloadVstore<list<AddressSpace> addrspaces, bit defStores> {
+ foreach AS = addrspaces in {
foreach VSize = [2, 3, 4, 8, 16] in {
foreach name = ["vload" # VSize] in {
- def : Builtin<name, [VectorType<Char, VSize>, Size, PointerType<ConstType<Char>, AS>]>;
- def : Builtin<name, [VectorType<UChar, VSize>, Size, PointerType<ConstType<UChar>, AS>]>;
- def : Builtin<name, [VectorType<Short, VSize>, Size, PointerType<ConstType<Short>, AS>]>;
- def : Builtin<name, [VectorType<UShort, VSize>, Size, PointerType<ConstType<UShort>, AS>]>;
- def : Builtin<name, [VectorType<Int, VSize>, Size, PointerType<ConstType<Int>, AS>]>;
- def : Builtin<name, [VectorType<UInt, VSize>, Size, PointerType<ConstType<UInt>, AS>]>;
- def : Builtin<name, [VectorType<Long, VSize>, Size, PointerType<ConstType<Long>, AS>]>;
- def : Builtin<name, [VectorType<ULong, VSize>, Size, PointerType<ConstType<ULong>, AS>]>;
- def : Builtin<name, [VectorType<Float, VSize>, Size, PointerType<ConstType<Float>, AS>]>;
- def : Builtin<name, [VectorType<Double, VSize>, Size, PointerType<ConstType<Double>, AS>]>;
- def : Builtin<name, [VectorType<Half, VSize>, Size, PointerType<ConstType<Half>, AS>]>;
- }
- foreach name = ["vstore" # VSize] in {
- def : Builtin<name, [Void, VectorType<Char, VSize>, Size, PointerType<Char, AS>]>;
- def : Builtin<name, [Void, VectorType<UChar, VSize>, Size, PointerType<UChar, AS>]>;
- def : Builtin<name, [Void, VectorType<Short, VSize>, Size, PointerType<Short, AS>]>;
- def : Builtin<name, [Void, VectorType<UShort, VSize>, Size, PointerType<UShort, AS>]>;
- def : Builtin<name, [Void, VectorType<Int, VSize>, Size, PointerType<Int, AS>]>;
- def : Builtin<name, [Void, VectorType<UInt, VSize>, Size, PointerType<UInt, AS>]>;
- def : Builtin<name, [Void, VectorType<Long, VSize>, Size, PointerType<Long, AS>]>;
- def : Builtin<name, [Void, VectorType<ULong, VSize>, Size, PointerType<ULong, AS>]>;
- def : Builtin<name, [Void, VectorType<Float, VSize>, Size, PointerType<Float, AS>]>;
- def : Builtin<name, [Void, VectorType<Double, VSize>, Size, PointerType<Double, AS>]>;
- def : Builtin<name, [Void, VectorType<Half, VSize>, Size, PointerType<Half, AS>]>;
- }
- foreach name = ["vloada_half" # VSize] in {
- def : Builtin<name, [VectorType<Float, VSize>, Size, PointerType<ConstType<Half>, AS>]>;
+ def : Builtin<name, [VectorType<Char, VSize>, Size, PointerType<ConstType<Char>, AS>], Attr.Pure>;
+ def : Builtin<name, [VectorType<UChar, VSize>, Size, PointerType<ConstType<UChar>, AS>], Attr.Pure>;
+ def : Builtin<name, [VectorType<Short, VSize>, Size, PointerType<ConstType<Short>, AS>], Attr.Pure>;
+ def : Builtin<name, [VectorType<UShort, VSize>, Size, PointerType<ConstType<UShort>, AS>], Attr.Pure>;
+ def : Builtin<name, [VectorType<Int, VSize>, Size, PointerType<ConstType<Int>, AS>], Attr.Pure>;
+ def : Builtin<name, [VectorType<UInt, VSize>, Size, PointerType<ConstType<UInt>, AS>], Attr.Pure>;
+ def : Builtin<name, [VectorType<Long, VSize>, Size, PointerType<ConstType<Long>, AS>], Attr.Pure>;
+ def : Builtin<name, [VectorType<ULong, VSize>, Size, PointerType<ConstType<ULong>, AS>], Attr.Pure>;
+ def : Builtin<name, [VectorType<Float, VSize>, Size, PointerType<ConstType<Float>, AS>], Attr.Pure>;
+ def : Builtin<name, [VectorType<Double, VSize>, Size, PointerType<ConstType<Double>, AS>], Attr.Pure>;
+ def : Builtin<name, [VectorType<Half, VSize>, Size, PointerType<ConstType<Half>, AS>], Attr.Pure>;
}
- foreach rnd = ["", "_rte", "_rtz", "_rtp", "_rtn"] in {
- foreach name = ["vstorea_half" # VSize # rnd] in {
- def : Builtin<name, [Void, VectorType<Float, VSize>, Size, PointerType<Half, AS>]>;
- def : Builtin<name, [Void, VectorType<Double, VSize>, Size, PointerType<Half, AS>]>;
+ if defStores then {
+ foreach name = ["vstore" # VSize] in {
+ def : Builtin<name, [Void, VectorType<Char, VSize>, Size, PointerType<Char, AS>]>;
+ def : Builtin<name, [Void, VectorType<UChar, VSize>, Size, PointerType<UChar, AS>]>;
+ def : Builtin<name, [Void, VectorType<Short, VSize>, Size, PointerType<Short, AS>]>;
+ def : Builtin<name, [Void, VectorType<UShort, VSize>, Size, PointerType<UShort, AS>]>;
+ def : Builtin<name, [Void, VectorType<Int, VSize>, Size, PointerType<Int, AS>]>;
+ def : Builtin<name, [Void, VectorType<UInt, VSize>, Size, PointerType<UInt, AS>]>;
+ def : Builtin<name, [Void, VectorType<Long, VSize>, Size, PointerType<Long, AS>]>;
+ def : Builtin<name, [Void, VectorType<ULong, VSize>, Size, PointerType<ULong, AS>]>;
+ def : Builtin<name, [Void, VectorType<Float, VSize>, Size, PointerType<Float, AS>]>;
+ def : Builtin<name, [Void, VectorType<Double, VSize>, Size, PointerType<Double, AS>]>;
+ def : Builtin<name, [Void, VectorType<Half, VSize>, Size, PointerType<Half, AS>]>;
}
}
}
}
}
-// Variants for OpenCL versions above 2.0, using pointers to the generic
-// address space.
-let MinVersion = CL20 in {
- foreach VSize = [2, 3, 4, 8, 16] in {
- foreach name = ["vload" # VSize] in {
- def : Builtin<name, [VectorType<Char, VSize>, Size, PointerType<ConstType<Char>, GenericAS>]>;
- def : Builtin<name, [VectorType<UChar, VSize>, Size, PointerType<ConstType<UChar>, GenericAS>]>;
- def : Builtin<name, [VectorType<Short, VSize>, Size, PointerType<ConstType<Short>, GenericAS>]>;
- def : Builtin<name, [VectorType<UShort, VSize>, Size, PointerType<ConstType<UShort>, GenericAS>]>;
- def : Builtin<name, [VectorType<Int, VSize>, Size, PointerType<ConstType<Int>, GenericAS>]>;
- def : Builtin<name, [VectorType<UInt, VSize>, Size, PointerType<ConstType<UInt>, GenericAS>]>;
- def : Builtin<name, [VectorType<Long, VSize>, Size, PointerType<ConstType<Long>, GenericAS>]>;
- def : Builtin<name, [VectorType<ULong, VSize>, Size, PointerType<ConstType<ULong>, GenericAS>]>;
- def : Builtin<name, [VectorType<Float, VSize>, Size, PointerType<ConstType<Float>, GenericAS>]>;
- def : Builtin<name, [VectorType<Double, VSize>, Size, PointerType<ConstType<Double>, GenericAS>]>;
- def : Builtin<name, [VectorType<Half, VSize>, Size, PointerType<ConstType<Half>, GenericAS>]>;
- }
- foreach name = ["vstore" # VSize] in {
- def : Builtin<name, [Void, VectorType<Char, VSize>, Size, PointerType<Char, GenericAS>]>;
- def : Builtin<name, [Void, VectorType<UChar, VSize>, Size, PointerType<UChar, GenericAS>]>;
- def : Builtin<name, [Void, VectorType<Short, VSize>, Size, PointerType<Short, GenericAS>]>;
- def : Builtin<name, [Void, VectorType<UShort, VSize>, Size, PointerType<UShort, GenericAS>]>;
- def : Builtin<name, [Void, VectorType<Int, VSize>, Size, PointerType<Int, GenericAS>]>;
- def : Builtin<name, [Void, VectorType<UInt, VSize>, Size, PointerType<UInt, GenericAS>]>;
- def : Builtin<name, [Void, VectorType<Long, VSize>, Size, PointerType<Long, GenericAS>]>;
- def : Builtin<name, [Void, VectorType<ULong, VSize>, Size, PointerType<ULong, GenericAS>]>;
- def : Builtin<name, [Void, VectorType<Float, VSize>, Size, PointerType<Float, GenericAS>]>;
- def : Builtin<name, [Void, VectorType<Double, VSize>, Size, PointerType<Double, GenericAS>]>;
- def : Builtin<name, [Void, VectorType<Half, VSize>, Size, PointerType<Half, GenericAS>]>;
- }
- foreach name = ["vloada_half" # VSize] in {
- def : Builtin<name, [VectorType<Float, VSize>, Size, PointerType<ConstType<Half>, GenericAS>]>;
- }
- foreach rnd = ["", "_rte", "_rtz", "_rtp", "_rtn"] in {
- foreach name = ["vstorea_half" # VSize # rnd] in {
- def : Builtin<name, [Void, VectorType<Float, VSize>, Size, PointerType<Half, GenericAS>]>;
- def : Builtin<name, [Void, VectorType<Double, VSize>, Size, PointerType<Half, GenericAS>]>;
- }
- }
- }
+
+let Extension = FuncExtOpenCLCNamedAddressSpaceBuiltins in {
+ defm : VloadVstore<[GlobalAS, LocalAS, PrivateAS], 1>;
}
-// Variants using pointers to the constant address space.
-foreach VSize = [2, 3, 4, 8, 16] in {
- foreach name = ["vload" # VSize] in {
- def : Builtin<name, [VectorType<Char, VSize>, Size, PointerType<ConstType<Char>, ConstantAS>]>;
- def : Builtin<name, [VectorType<UChar, VSize>, Size, PointerType<ConstType<UChar>, ConstantAS>]>;
- def : Builtin<name, [VectorType<Short, VSize>, Size, PointerType<ConstType<Short>, ConstantAS>]>;
- def : Builtin<name, [VectorType<UShort, VSize>, Size, PointerType<ConstType<UShort>, ConstantAS>]>;
- def : Builtin<name, [VectorType<Int, VSize>, Size, PointerType<ConstType<Int>, ConstantAS>]>;
- def : Builtin<name, [VectorType<UInt, VSize>, Size, PointerType<ConstType<UInt>, ConstantAS>]>;
- def : Builtin<name, [VectorType<Long, VSize>, Size, PointerType<ConstType<Long>, ConstantAS>]>;
- def : Builtin<name, [VectorType<ULong, VSize>, Size, PointerType<ConstType<ULong>, ConstantAS>]>;
- def : Builtin<name, [VectorType<Float, VSize>, Size, PointerType<ConstType<Float>, ConstantAS>]>;
- def : Builtin<name, [VectorType<Double, VSize>, Size, PointerType<ConstType<Double>, ConstantAS>]>;
- def : Builtin<name, [VectorType<Half, VSize>, Size, PointerType<ConstType<Half>, ConstantAS>]>;
- }
- foreach name = ["vloada_half" # VSize] in {
- def : Builtin<name, [VectorType<Float, VSize>, Size, PointerType<ConstType<Half>, ConstantAS>]>;
- }
-}
-let MaxVersion = CL20 in {
- foreach AS = [GlobalAS, LocalAS, PrivateAS] in {
- def : Builtin<"vload_half", [Float, Size, PointerType<ConstType<Half>, AS>]>;
- def : Builtin<"vloada_half", [Float, Size, PointerType<ConstType<Half>, AS>]>;
- foreach VSize = [2, 3, 4, 8, 16] in {
- foreach name = ["vload_half" # VSize] in {
- def : Builtin<name, [VectorType<Float, VSize>, Size, PointerType<ConstType<Half>, AS>]>;
- }
- }
- foreach rnd = ["", "_rte", "_rtz", "_rtp", "_rtn"] in {
- foreach name = ["vstore_half" # rnd, "vstorea_half" # rnd] in {
- def : Builtin<name, [Void, Float, Size, PointerType<Half, AS>]>;
- def : Builtin<name, [Void, Double, Size, PointerType<Half, AS>]>;
- }
- foreach VSize = [2, 3, 4, 8, 16] in {
- foreach name = ["vstore_half" # VSize # rnd] in {
- def : Builtin<name, [Void, VectorType<Float, VSize>, Size, PointerType<Half, AS>]>;
- def : Builtin<name, [Void, VectorType<Double, VSize>, Size, PointerType<Half, AS>]>;
- }
- }
- }
- }
+let Extension = FuncExtOpenCLCGenericAddressSpace in {
+ defm : VloadVstore<[GenericAS], 1>;
}
-let MinVersion = CL20 in {
- foreach AS = [GenericAS] in {
- def : Builtin<"vload_half", [Float, Size, PointerType<ConstType<Half>, AS>]>;
- def : Builtin<"vloada_half", [Float, Size, PointerType<ConstType<Half>, AS>]>;
+// vload with constant address space is available regardless of version.
+defm : VloadVstore<[ConstantAS], 0>;
+
+multiclass VloadVstoreHalf<list<AddressSpace> addrspaces, bit defStores> {
+ foreach AS = addrspaces in {
+ def : Builtin<"vload_half", [Float, Size, !cast<Type>("HalfPtrConst" # AS)], Attr.Pure>;
foreach VSize = [2, 3, 4, 8, 16] in {
- foreach name = ["vload_half" # VSize] in {
- def : Builtin<name, [VectorType<Float, VSize>, Size, PointerType<ConstType<Half>, AS>]>;
+ foreach name = ["vload_half" # VSize, "vloada_half" # VSize] in {
+ def : Builtin<name, [VectorType<Float, VSize>, Size, !cast<Type>("HalfPtrConst" # AS)], Attr.Pure>;
}
}
- foreach rnd = ["", "_rte", "_rtz", "_rtp", "_rtn"] in {
- foreach name = ["vstore_half" # rnd, "vstorea_half" # rnd] in {
- def : Builtin<name, [Void, Float, Size, PointerType<Half, AS>]>;
- def : Builtin<name, [Void, Double, Size, PointerType<Half, AS>]>;
- }
- foreach VSize = [2, 3, 4, 8, 16] in {
- foreach name = ["vstore_half" # VSize # rnd] in {
- def : Builtin<name, [Void, VectorType<Float, VSize>, Size, PointerType<Half, AS>]>;
- def : Builtin<name, [Void, VectorType<Double, VSize>, Size, PointerType<Half, AS>]>;
+ if defStores then {
+ foreach rnd = ["", "_rte", "_rtz", "_rtp", "_rtn"] in {
+ foreach name = ["vstore_half" # rnd] in {
+ def : Builtin<name, [Void, Float, Size, !cast<Type>("HalfPtr" # AS)]>;
+ def : Builtin<name, [Void, Double, Size, !cast<Type>("HalfPtr" # AS)]>;
+ }
+ foreach VSize = [2, 3, 4, 8, 16] in {
+ foreach name = ["vstore_half" # VSize # rnd, "vstorea_half" # VSize # rnd] in {
+ def : Builtin<name, [Void, VectorType<Float, VSize>, Size, !cast<Type>("HalfPtr" # AS)]>;
+ def : Builtin<name, [Void, VectorType<Double, VSize>, Size, !cast<Type>("HalfPtr" # AS)]>;
+ }
}
}
}
}
}
-foreach AS = [ConstantAS] in {
- def : Builtin<"vload_half", [Float, Size, PointerType<ConstType<Half>, AS>]>;
- def : Builtin<"vloada_half", [Float, Size, PointerType<ConstType<Half>, AS>]>;
- foreach VSize = [2, 3, 4, 8, 16] in {
- foreach name = ["vload_half" # VSize] in {
- def : Builtin<name, [VectorType<Float, VSize>, Size, PointerType<ConstType<Half>, AS>]>;
- }
- }
+let Extension = FuncExtOpenCLCNamedAddressSpaceBuiltins in {
+ defm : VloadVstoreHalf<[GlobalAS, LocalAS, PrivateAS], 1>;
+}
+let Extension = FuncExtOpenCLCGenericAddressSpace in {
+ defm : VloadVstoreHalf<[GenericAS], 1>;
}
+// vload_half and vloada_half with constant address space are available regardless of version.
+defm : VloadVstoreHalf<[ConstantAS], 0>;
// OpenCL v3.0 s6.15.8 - Synchronization Functions.
def : Builtin<"barrier", [Void, MemFenceFlags], Attr.Convergent>;
@@ -958,7 +938,7 @@ def : Builtin<"write_mem_fence", [Void, MemFenceFlags]>;
// OpenCL v3.0 s6.15.10 - Address Space Qualifier Functions.
// to_global, to_local, to_private are declared in Builtins.def.
-let MinVersion = CL20 in {
+let Extension = FuncExtOpenCLCGenericAddressSpace in {
// The OpenCL 3.0 specification defines these with a "gentype" argument indicating any builtin
// type or user-defined type, which cannot be represented currently. Hence we slightly diverge
// by providing only the following overloads with a void pointer.
@@ -1099,42 +1079,61 @@ let Extension = FuncExtOpenCLCxx in {
}
// OpenCL v2.0 s6.13.11 - Atomic Functions.
-let MinVersion = CL20 in {
- def : Builtin<"atomic_work_item_fence", [Void, MemFenceFlags, MemoryOrder, MemoryScope]>;
+// An atomic builtin with 2 additional _explicit variants.
+multiclass BuiltinAtomicExplicit<string Name, list<Type> Types, FunctionExtension BaseExt> {
+ // Without explicit MemoryOrder or MemoryScope.
+ let Extension = concatExtension<BaseExt, "__opencl_c_atomic_order_seq_cst __opencl_c_atomic_scope_device">.ret in {
+ def : Builtin<Name, Types>;
+ }
+
+ // With an explicit MemoryOrder argument.
+ let Extension = concatExtension<BaseExt, "__opencl_c_atomic_scope_device">.ret in {
+ def : Builtin<Name # "_explicit", !listconcat(Types, [MemoryOrder])>;
+ }
+
+ // With explicit MemoryOrder and MemoryScope arguments.
+ let Extension = BaseExt in {
+ def : Builtin<Name # "_explicit", !listconcat(Types, [MemoryOrder, MemoryScope])>;
+ }
+}
+
+// OpenCL 2.0 atomic functions that have a pointer argument in a given address space.
+multiclass OpenCL2Atomics<AddressSpace addrspace, FunctionExtension BaseExt> {
foreach TypePair = [[AtomicInt, Int], [AtomicUInt, UInt],
[AtomicLong, Long], [AtomicULong, ULong],
[AtomicFloat, Float], [AtomicDouble, Double]] in {
- def : Builtin<"atomic_init",
- [Void, PointerType<VolatileType<TypePair[0]>, GenericAS>, TypePair[1]]>;
- def : Builtin<"atomic_store",
- [Void, PointerType<VolatileType<TypePair[0]>, GenericAS>, TypePair[1]]>;
- def : Builtin<"atomic_store_explicit",
- [Void, PointerType<VolatileType<TypePair[0]>, GenericAS>, TypePair[1], MemoryOrder]>;
- def : Builtin<"atomic_store_explicit",
- [Void, PointerType<VolatileType<TypePair[0]>, GenericAS>, TypePair[1], MemoryOrder, MemoryScope]>;
- def : Builtin<"atomic_load",
- [TypePair[1], PointerType<VolatileType<TypePair[0]>, GenericAS>]>;
- def : Builtin<"atomic_load_explicit",
- [TypePair[1], PointerType<VolatileType<TypePair[0]>, GenericAS>, MemoryOrder]>;
- def : Builtin<"atomic_load_explicit",
- [TypePair[1], PointerType<VolatileType<TypePair[0]>, GenericAS>, MemoryOrder, MemoryScope]>;
- def : Builtin<"atomic_exchange",
- [TypePair[1], PointerType<VolatileType<TypePair[0]>, GenericAS>, TypePair[1]]>;
- def : Builtin<"atomic_exchange_explicit",
- [TypePair[1], PointerType<VolatileType<TypePair[0]>, GenericAS>, TypePair[1], MemoryOrder]>;
- def : Builtin<"atomic_exchange_explicit",
- [TypePair[1], PointerType<VolatileType<TypePair[0]>, GenericAS>, TypePair[1], MemoryOrder, MemoryScope]>;
+ let Extension = BaseExt in {
+ def : Builtin<"atomic_init",
+ [Void, PointerType<VolatileType<TypePair[0]>, addrspace>, TypePair[1]]>;
+ }
+ defm : BuiltinAtomicExplicit<"atomic_store",
+ [Void, PointerType<VolatileType<TypePair[0]>, addrspace>, TypePair[1]], BaseExt>;
+ defm : BuiltinAtomicExplicit<"atomic_load",
+ [TypePair[1], PointerType<VolatileType<TypePair[0]>, addrspace>], BaseExt>;
+ defm : BuiltinAtomicExplicit<"atomic_exchange",
+ [TypePair[1], PointerType<VolatileType<TypePair[0]>, addrspace>, TypePair[1]], BaseExt>;
foreach Variant = ["weak", "strong"] in {
- def : Builtin<"atomic_compare_exchange_" # Variant,
- [Bool, PointerType<VolatileType<TypePair[0]>, GenericAS>,
- PointerType<TypePair[1], GenericAS>, TypePair[1]]>;
- def : Builtin<"atomic_compare_exchange_" # Variant # "_explicit",
- [Bool, PointerType<VolatileType<TypePair[0]>, GenericAS>,
- PointerType<TypePair[1], GenericAS>, TypePair[1], MemoryOrder, MemoryOrder]>;
- def : Builtin<"atomic_compare_exchange_" # Variant # "_explicit",
- [Bool, PointerType<VolatileType<TypePair[0]>, GenericAS>,
- PointerType<TypePair[1], GenericAS>, TypePair[1], MemoryOrder, MemoryOrder, MemoryScope]>;
+ foreach exp_ptr_addrspace = !cond(
+ !eq(BaseExt, FuncExtOpenCLCGenericAddressSpace): [GenericAS],
+ !eq(BaseExt, FuncExtOpenCLCNamedAddressSpaceBuiltins): [GlobalAS, LocalAS, PrivateAS])
+ in {
+ let Extension = concatExtension<BaseExt, "__opencl_c_atomic_order_seq_cst __opencl_c_atomic_scope_device">.ret in {
+ def : Builtin<"atomic_compare_exchange_" # Variant,
+ [Bool, PointerType<VolatileType<TypePair[0]>, addrspace>,
+ PointerType<TypePair[1], exp_ptr_addrspace>, TypePair[1]]>;
+ }
+ let Extension = concatExtension<BaseExt, "__opencl_c_atomic_scope_device">.ret in {
+ def : Builtin<"atomic_compare_exchange_" # Variant # "_explicit",
+ [Bool, PointerType<VolatileType<TypePair[0]>, addrspace>,
+ PointerType<TypePair[1], exp_ptr_addrspace>, TypePair[1], MemoryOrder, MemoryOrder]>;
+ }
+ let Extension = BaseExt in {
+ def : Builtin<"atomic_compare_exchange_" # Variant # "_explicit",
+ [Bool, PointerType<VolatileType<TypePair[0]>, addrspace>,
+ PointerType<TypePair[1], exp_ptr_addrspace>, TypePair[1], MemoryOrder, MemoryOrder, MemoryScope]>;
+ }
+ }
}
}
@@ -1142,39 +1141,71 @@ let MinVersion = CL20 in {
[AtomicLong, Long, Long], [AtomicULong, ULong, ULong],
[AtomicUIntPtr, UIntPtr, PtrDiff]] in {
foreach ModOp = ["add", "sub"] in {
- def : Builtin<"atomic_fetch_" # ModOp,
- [TypePair[1], PointerType<VolatileType<TypePair[0]>, GenericAS>, TypePair[2]]>;
- def : Builtin<"atomic_fetch_" # ModOp # "_explicit",
- [TypePair[1], PointerType<VolatileType<TypePair[0]>, GenericAS>, TypePair[2], MemoryOrder]>;
- def : Builtin<"atomic_fetch_" # ModOp # "_explicit",
- [TypePair[1], PointerType<VolatileType<TypePair[0]>, GenericAS>, TypePair[2], MemoryOrder, MemoryScope]>;
+ defm : BuiltinAtomicExplicit<"atomic_fetch_" # ModOp,
+ [TypePair[1], PointerType<VolatileType<TypePair[0]>, addrspace>, TypePair[2]], BaseExt>;
}
}
foreach TypePair = [[AtomicInt, Int, Int], [AtomicUInt, UInt, UInt],
[AtomicLong, Long, Long], [AtomicULong, ULong, ULong]] in {
foreach ModOp = ["or", "xor", "and", "min", "max"] in {
- def : Builtin<"atomic_fetch_" # ModOp,
- [TypePair[1], PointerType<VolatileType<TypePair[0]>, GenericAS>, TypePair[2]]>;
- def : Builtin<"atomic_fetch_" # ModOp # "_explicit",
- [TypePair[1], PointerType<VolatileType<TypePair[0]>, GenericAS>, TypePair[2], MemoryOrder]>;
- def : Builtin<"atomic_fetch_" # ModOp # "_explicit",
- [TypePair[1], PointerType<VolatileType<TypePair[0]>, GenericAS>, TypePair[2], MemoryOrder, MemoryScope]>;
+ defm : BuiltinAtomicExplicit<"atomic_fetch_" # ModOp,
+ [TypePair[1], PointerType<VolatileType<TypePair[0]>, addrspace>, TypePair[2]], BaseExt>;
}
}
- def : Builtin<"atomic_flag_clear",
- [Void, PointerType<VolatileType<AtomicFlag>, GenericAS>]>;
- def : Builtin<"atomic_flag_clear_explicit",
- [Void, PointerType<VolatileType<AtomicFlag>, GenericAS>, MemoryOrder]>;
- def : Builtin<"atomic_flag_clear_explicit",
- [Void, PointerType<VolatileType<AtomicFlag>, GenericAS>, MemoryOrder, MemoryScope]>;
+ defm : BuiltinAtomicExplicit<"atomic_flag_clear",
+ [Void, PointerType<VolatileType<AtomicFlag>, addrspace>], BaseExt>;
+
+ defm : BuiltinAtomicExplicit<"atomic_flag_test_and_set",
+ [Bool, PointerType<VolatileType<AtomicFlag>, addrspace>], BaseExt>;
+}
+
+let MinVersion = CL20 in {
+ def : Builtin<"atomic_work_item_fence", [Void, MemFenceFlags, MemoryOrder, MemoryScope]>;
+
+ defm : OpenCL2Atomics<GenericAS, FuncExtOpenCLCGenericAddressSpace>;
+ defm : OpenCL2Atomics<GlobalAS, FuncExtOpenCLCNamedAddressSpaceBuiltins>;
+ defm : OpenCL2Atomics<LocalAS, FuncExtOpenCLCNamedAddressSpaceBuiltins>;
+}
+
+// The functionality added by cl_ext_float_atomics extension
+let MinVersion = CL20 in {
+ foreach addrspace = [GlobalAS, LocalAS, GenericAS] in {
+ defvar extension_fp16 = !cast<FunctionExtension>("FuncExtFloatAtomicsFp16" # addrspace # "LoadStore");
+
+ defm : BuiltinAtomicExplicit<"atomic_store",
+ [Void, PointerType<VolatileType<AtomicHalf>, addrspace>, AtomicHalf], extension_fp16>;
+ defm : BuiltinAtomicExplicit<"atomic_load",
+ [Half, PointerType<VolatileType<AtomicHalf>, addrspace>], extension_fp16>;
+ defm : BuiltinAtomicExplicit<"atomic_exchange",
+ [Half, PointerType<VolatileType<AtomicHalf>, addrspace>, Half], extension_fp16>;
+
+ foreach ModOp = ["add", "sub"] in {
+ defvar extension_fp16 = !cast<FunctionExtension>("FuncExtFloatAtomicsFp16" # addrspace # "Add");
+ defvar extension_fp32 = !cast<FunctionExtension>("FuncExtFloatAtomicsFp32" # addrspace # "Add");
+ defvar extension_fp64 = !cast<FunctionExtension>("FuncExtFloatAtomicsFp64" # addrspace # "Add");
+
+ defm : BuiltinAtomicExplicit<"atomic_fetch_" # ModOp,
+ [Half, PointerType<VolatileType<AtomicHalf>, addrspace>, Half], extension_fp16>;
+ defm : BuiltinAtomicExplicit<"atomic_fetch_" # ModOp,
+ [Float, PointerType<VolatileType<AtomicFloat>, addrspace>, Float], extension_fp32>;
+ defm : BuiltinAtomicExplicit<"atomic_fetch_" # ModOp,
+ [Double, PointerType<VolatileType<AtomicDouble>, addrspace>, Double], extension_fp64>;
+ }
+
+ foreach ModOp = ["min", "max"] in {
+ defvar extension_fp16 = !cast<FunctionExtension>("FuncExtFloatAtomicsFp16" # addrspace # "MinMax");
+ defvar extension_fp32 = !cast<FunctionExtension>("FuncExtFloatAtomicsFp32" # addrspace # "MinMax");
+ defvar extension_fp64 = !cast<FunctionExtension>("FuncExtFloatAtomicsFp64" # addrspace # "MinMax");
- def : Builtin<"atomic_flag_test_and_set",
- [Bool, PointerType<VolatileType<AtomicFlag>, GenericAS>]>;
- def : Builtin<"atomic_flag_test_and_set_explicit",
- [Bool, PointerType<VolatileType<AtomicFlag>, GenericAS>, MemoryOrder]>;
- def : Builtin<"atomic_flag_test_and_set_explicit",
- [Bool, PointerType<VolatileType<AtomicFlag>, GenericAS>, MemoryOrder, MemoryScope]>;
+ defm : BuiltinAtomicExplicit<"atomic_fetch_" # ModOp,
+ [Half, PointerType<VolatileType<AtomicHalf>, addrspace>, Half], extension_fp16>;
+ defm : BuiltinAtomicExplicit<"atomic_fetch_" # ModOp,
+ [Float, PointerType<VolatileType<AtomicFloat>, addrspace>, Float], extension_fp32>;
+ defm : BuiltinAtomicExplicit<"atomic_fetch_" # ModOp,
+ [Double, PointerType<VolatileType<AtomicDouble>, addrspace>, Double], extension_fp64>;
+ }
+ }
}
//--------------------------------------------------------------------
@@ -1241,30 +1272,35 @@ foreach coordTy = [Int, Float] in {
}
// --- Table 23: Sampler-less Read Functions ---
+multiclass ImageReadSamplerless<string aQual> {
+ foreach imgTy = [Image2d, Image1dArray] in {
+ def : Builtin<"read_imagef", [VectorType<Float, 4>, ImageType<imgTy, aQual>, VectorType<Int, 2>], Attr.Pure>;
+ def : Builtin<"read_imagei", [VectorType<Int, 4>, ImageType<imgTy, aQual>, VectorType<Int, 2>], Attr.Pure>;
+ def : Builtin<"read_imageui", [VectorType<UInt, 4>, ImageType<imgTy, aQual>, VectorType<Int, 2>], Attr.Pure>;
+ }
+ foreach imgTy = [Image3d, Image2dArray] in {
+ def : Builtin<"read_imagef", [VectorType<Float, 4>, ImageType<imgTy, aQual>, VectorType<Int, 4>], Attr.Pure>;
+ def : Builtin<"read_imagei", [VectorType<Int, 4>, ImageType<imgTy, aQual>, VectorType<Int, 4>], Attr.Pure>;
+ def : Builtin<"read_imageui", [VectorType<UInt, 4>, ImageType<imgTy, aQual>, VectorType<Int, 4>], Attr.Pure>;
+ }
+ foreach imgTy = [Image1d, Image1dBuffer] in {
+ def : Builtin<"read_imagef", [VectorType<Float, 4>, ImageType<imgTy, aQual>, Int], Attr.Pure>;
+ def : Builtin<"read_imagei", [VectorType<Int, 4>, ImageType<imgTy, aQual>, Int], Attr.Pure>;
+ def : Builtin<"read_imageui", [VectorType<UInt, 4>, ImageType<imgTy, aQual>, Int], Attr.Pure>;
+ }
+ def : Builtin<"read_imagef", [Float, ImageType<Image2dDepth, aQual>, VectorType<Int, 2>], Attr.Pure>;
+ def : Builtin<"read_imagef", [Float, ImageType<Image2dArrayDepth, aQual>, VectorType<Int, 4>], Attr.Pure>;
+}
+
let MinVersion = CL12 in {
- foreach aQual = ["RO", "RW"] in {
- foreach imgTy = [Image2d, Image1dArray] in {
- def : Builtin<"read_imagef", [VectorType<Float, 4>, ImageType<imgTy, aQual>, VectorType<Int, 2>], Attr.Pure>;
- def : Builtin<"read_imagei", [VectorType<Int, 4>, ImageType<imgTy, aQual>, VectorType<Int, 2>], Attr.Pure>;
- def : Builtin<"read_imageui", [VectorType<UInt, 4>, ImageType<imgTy, aQual>, VectorType<Int, 2>], Attr.Pure>;
- }
- foreach imgTy = [Image3d, Image2dArray] in {
- def : Builtin<"read_imagef", [VectorType<Float, 4>, ImageType<imgTy, aQual>, VectorType<Int, 4>], Attr.Pure>;
- def : Builtin<"read_imagei", [VectorType<Int, 4>, ImageType<imgTy, aQual>, VectorType<Int, 4>], Attr.Pure>;
- def : Builtin<"read_imageui", [VectorType<UInt, 4>, ImageType<imgTy, aQual>, VectorType<Int, 4>], Attr.Pure>;
- }
- foreach imgTy = [Image1d, Image1dBuffer] in {
- def : Builtin<"read_imagef", [VectorType<Float, 4>, ImageType<imgTy, aQual>, Int], Attr.Pure>;
- def : Builtin<"read_imagei", [VectorType<Int, 4>, ImageType<imgTy, aQual>, Int], Attr.Pure>;
- def : Builtin<"read_imageui", [VectorType<UInt, 4>, ImageType<imgTy, aQual>, Int], Attr.Pure>;
- }
- def : Builtin<"read_imagef", [Float, ImageType<Image2dDepth, aQual>, VectorType<Int, 2>], Attr.Pure>;
- def : Builtin<"read_imagef", [Float, ImageType<Image2dArrayDepth, aQual>, VectorType<Int, 4>], Attr.Pure>;
+ defm : ImageReadSamplerless<"RO">;
+ let Extension = FuncExtOpenCLCReadWriteImages in {
+ defm : ImageReadSamplerless<"RW">;
}
}
// --- Table 24: Image Write Functions ---
-foreach aQual = ["WO", "RW"] in {
+multiclass ImageWrite<string aQual> {
foreach imgTy = [Image2d] in {
def : Builtin<"write_imagef", [Void, ImageType<imgTy, aQual>, VectorType<Int, 2>, VectorType<Float, 4>]>;
def : Builtin<"write_imagei", [Void, ImageType<imgTy, aQual>, VectorType<Int, 2>, VectorType<Int, 4>]>;
@@ -1294,8 +1330,13 @@ foreach aQual = ["WO", "RW"] in {
def : Builtin<"write_imagef", [Void, ImageType<Image2dArrayDepth, aQual>, VectorType<Int, 4>, Float]>;
}
+defm : ImageWrite<"WO">;
+let Extension = FuncExtOpenCLCReadWriteImages in {
+ defm : ImageWrite<"RW">;
+}
+
// --- Table 25: Image Query Functions ---
-foreach aQual = ["RO", "WO", "RW"] in {
+multiclass ImageQuery<string aQual> {
foreach imgTy = [Image1d, Image1dBuffer, Image2d, Image3d,
Image1dArray, Image2dArray, Image2dDepth,
Image2dArrayDepth] in {
@@ -1319,6 +1360,12 @@ foreach aQual = ["RO", "WO", "RW"] in {
}
}
+defm : ImageQuery<"RO">;
+defm : ImageQuery<"WO">;
+let Extension = FuncExtOpenCLCReadWriteImages in {
+ defm : ImageQuery<"RW">;
+}
+
// OpenCL extension v2.0 s5.1.9: Built-in Image Read Functions
// --- Table 8 ---
foreach aQual = ["RO"] in {
@@ -1339,7 +1386,7 @@ foreach aQual = ["RO"] in {
// OpenCL extension v2.0 s5.1.10: Built-in Image Sampler-less Read Functions
// --- Table 9 ---
let MinVersion = CL12 in {
- foreach aQual = ["RO", "RW"] in {
+ multiclass ImageReadHalf<string aQual> {
foreach name = ["read_imageh"] in {
foreach imgTy = [Image2d, Image1dArray] in {
def : Builtin<name, [VectorType<Half, 4>, ImageType<imgTy, aQual>, VectorType<Int, 2>], Attr.Pure>;
@@ -1352,10 +1399,14 @@ let MinVersion = CL12 in {
}
}
}
+ defm : ImageReadHalf<"RO">;
+ let Extension = FuncExtOpenCLCReadWriteImages in {
+ defm : ImageReadHalf<"RW">;
+ }
}
// OpenCL extension v2.0 s5.1.11: Built-in Image Write Functions
// --- Table 10 ---
-foreach aQual = ["WO", "RW"] in {
+multiclass ImageWriteHalf<string aQual> {
foreach name = ["write_imageh"] in {
def : Builtin<name, [Void, ImageType<Image2d, aQual>, VectorType<Int, 2>, VectorType<Half, 4>]>;
def : Builtin<name, [Void, ImageType<Image2dArray, aQual>, VectorType<Int, 4>, VectorType<Half, 4>]>;
@@ -1366,11 +1417,17 @@ foreach aQual = ["WO", "RW"] in {
}
}
+defm : ImageWriteHalf<"WO">;
+let Extension = FuncExtOpenCLCReadWriteImages in {
+ defm : ImageWriteHalf<"RW">;
+}
+
+
//--------------------------------------------------------------------
// OpenCL v2.0 s6.13.15 - Work-group Functions
// --- Table 26 ---
-let MinVersion = CL20 in {
+let Extension = FuncExtOpenCLCWGCollectiveFunctions in {
foreach name = ["work_group_all", "work_group_any"] in {
def : Builtin<name, [Int, Int], Attr.Convergent>;
}
@@ -1395,7 +1452,9 @@ let MinVersion = CL20 in {
// --- Table 28 ---
// Builtins taking pipe arguments are defined in Builtins.def
-def : Builtin<"is_valid_reserve_id", [Bool, ReserveId]>;
+let Extension = FuncExtOpenCLCPipes in {
+ def : Builtin<"is_valid_reserve_id", [Bool, ReserveId]>;
+}
// --- Table 29 ---
// Defined in Builtins.def
@@ -1410,7 +1469,7 @@ def : Builtin<"is_valid_reserve_id", [Bool, ReserveId]>;
// Defined in Builtins.def
// --- Table 33 ---
-let MinVersion = CL20 in {
+let Extension = FuncExtOpenCLCDeviceEnqueue in {
def : Builtin<"enqueue_marker",
[Int, Queue, UInt, PointerType<ConstType<ClkEvent>, GenericAS>, PointerType<ClkEvent, GenericAS>]>;
@@ -1537,14 +1596,21 @@ let Extension = FuncExtKhrMipmapImage in {
}
}
}
- // Added to section 6.13.14.5
- foreach aQual = ["RO", "WO", "RW"] in {
- foreach imgTy = [Image1d, Image2d, Image3d, Image1dArray, Image2dArray, Image2dDepth, Image2dArrayDepth] in {
- def : Builtin<"get_image_num_mip_levels", [Int, ImageType<imgTy, aQual>]>;
- }
+}
+
+// Added to section 6.13.14.5
+multiclass ImageQueryNumMipLevels<string aQual> {
+ foreach imgTy = [Image1d, Image2d, Image3d, Image1dArray, Image2dArray, Image2dDepth, Image2dArrayDepth] in {
+ def : Builtin<"get_image_num_mip_levels", [Int, ImageType<imgTy, aQual>]>;
}
}
+let Extension = FuncExtKhrMipmapImage in {
+ defm : ImageQueryNumMipLevels<"RO">;
+ defm : ImageQueryNumMipLevels<"WO">;
+ defm : ImageQueryNumMipLevels<"RW">;
+}
+
// Write functions are enabled using a separate extension.
let Extension = FuncExtKhrMipmapImageWrites in {
// Added to section 6.13.14.4.
@@ -1571,53 +1637,57 @@ let Extension = FuncExtKhrMipmapImageWrites in {
def : Builtin<"write_imageui", [Void, ImageType<imgTy, aQual>, VectorType<Int, 4>, Int, VectorType<UInt, 4>]>;
}
def : Builtin<"write_imagef", [Void, ImageType<Image2dArrayDepth, aQual>, VectorType<Int, 4>, Int, Float]>;
- let Extension = FuncExtKhrMipmapWritesAndWrite3d in {
- foreach imgTy = [Image3d] in {
- def : Builtin<"write_imagef", [Void, ImageType<imgTy, aQual>, VectorType<Int, 4>, Int, VectorType<Float, 4>]>;
- def : Builtin<"write_imagei", [Void, ImageType<imgTy, aQual>, VectorType<Int, 4>, Int, VectorType<Int, 4>]>;
- def : Builtin<"write_imageui", [Void, ImageType<imgTy, aQual>, VectorType<Int, 4>, Int, VectorType<UInt, 4>]>;
- }
+ foreach imgTy = [Image3d] in {
+ def : Builtin<"write_imagef", [Void, ImageType<imgTy, aQual>, VectorType<Int, 4>, Int, VectorType<Float, 4>]>;
+ def : Builtin<"write_imagei", [Void, ImageType<imgTy, aQual>, VectorType<Int, 4>, Int, VectorType<Int, 4>]>;
+ def : Builtin<"write_imageui", [Void, ImageType<imgTy, aQual>, VectorType<Int, 4>, Int, VectorType<UInt, 4>]>;
}
}
}
//--------------------------------------------------------------------
// OpenCL Extension v2.0 s18.3 - Creating OpenCL Memory Objects from OpenGL MSAA Textures
-let Extension = FuncExtKhrGlMsaaSharing in {
- // --- Table 6.13.14.3 ---
- foreach aQual = ["RO", "RW"] in {
- foreach imgTy = [Image2dMsaa] in {
- def : Builtin<"read_imagef", [VectorType<Float, 4>, ImageType<imgTy, aQual>, VectorType<Int, 2>, Int], Attr.Pure>;
- def : Builtin<"read_imagei", [VectorType<Int, 4>, ImageType<imgTy, aQual>, VectorType<Int, 2>, Int], Attr.Pure>;
- def : Builtin<"read_imageui", [VectorType<UInt, 4>, ImageType<imgTy, aQual>, VectorType<Int, 2>, Int], Attr.Pure>;
- }
- foreach imgTy = [Image2dArrayMsaa] in {
- def : Builtin<"read_imagef", [VectorType<Float, 4>, ImageType<imgTy, aQual>, VectorType<Int, 4>, Int], Attr.Pure>;
- def : Builtin<"read_imagei", [VectorType<Int, 4>, ImageType<imgTy, aQual>, VectorType<Int, 4>, Int], Attr.Pure>;
- def : Builtin<"read_imageui", [VectorType<UInt, 4>, ImageType<imgTy, aQual>, VectorType<Int, 4>, Int], Attr.Pure>;
- }
- foreach name = ["read_imagef"] in {
- def : Builtin<name, [Float, ImageType<Image2dMsaaDepth, aQual>, VectorType<Int, 2>, Int], Attr.Pure>;
- def : Builtin<name, [Float, ImageType<Image2dArrayMsaaDepth, aQual>, VectorType<Int, 4>, Int], Attr.Pure>;
+// --- Table 6.13.14.3 ---
+multiclass ImageReadMsaa<string aQual> {
+ foreach imgTy = [Image2dMsaa] in {
+ def : Builtin<"read_imagef", [VectorType<Float, 4>, ImageType<imgTy, aQual>, VectorType<Int, 2>, Int], Attr.Pure>;
+ def : Builtin<"read_imagei", [VectorType<Int, 4>, ImageType<imgTy, aQual>, VectorType<Int, 2>, Int], Attr.Pure>;
+ def : Builtin<"read_imageui", [VectorType<UInt, 4>, ImageType<imgTy, aQual>, VectorType<Int, 2>, Int], Attr.Pure>;
+ }
+ foreach imgTy = [Image2dArrayMsaa] in {
+ def : Builtin<"read_imagef", [VectorType<Float, 4>, ImageType<imgTy, aQual>, VectorType<Int, 4>, Int], Attr.Pure>;
+ def : Builtin<"read_imagei", [VectorType<Int, 4>, ImageType<imgTy, aQual>, VectorType<Int, 4>, Int], Attr.Pure>;
+ def : Builtin<"read_imageui", [VectorType<UInt, 4>, ImageType<imgTy, aQual>, VectorType<Int, 4>, Int], Attr.Pure>;
+ }
+ foreach name = ["read_imagef"] in {
+ def : Builtin<name, [Float, ImageType<Image2dMsaaDepth, aQual>, VectorType<Int, 2>, Int], Attr.Pure>;
+ def : Builtin<name, [Float, ImageType<Image2dArrayMsaaDepth, aQual>, VectorType<Int, 4>, Int], Attr.Pure>;
+ }
+}
+
+// --- Table 6.13.14.5 ---
+multiclass ImageQueryMsaa<string aQual> {
+ foreach imgTy = [Image2dMsaa, Image2dArrayMsaa, Image2dMsaaDepth, Image2dArrayMsaaDepth] in {
+ foreach name = ["get_image_width", "get_image_height",
+ "get_image_channel_data_type", "get_image_channel_order",
+ "get_image_num_samples"] in {
+ def : Builtin<name, [Int, ImageType<imgTy, aQual>], Attr.Const>;
}
+ def : Builtin<"get_image_dim", [VectorType<Int, 2>, ImageType<imgTy, aQual>], Attr.Const>;
}
-
- // --- Table 6.13.14.5 ---
- foreach aQual = ["RO", "WO", "RW"] in {
- foreach imgTy = [Image2dMsaa, Image2dArrayMsaa, Image2dMsaaDepth, Image2dArrayMsaaDepth] in {
- foreach name = ["get_image_width", "get_image_height",
- "get_image_channel_data_type", "get_image_channel_order",
- "get_image_num_samples"] in {
- def : Builtin<name, [Int, ImageType<imgTy, aQual>], Attr.Const>;
- }
- def : Builtin<"get_image_dim", [VectorType<Int, 2>, ImageType<imgTy, aQual>], Attr.Const>;
- }
- foreach imgTy = [Image2dArrayMsaa, Image2dArrayMsaaDepth] in {
- def : Builtin<"get_image_array_size", [Size, ImageType<imgTy, aQual>], Attr.Const>;
- }
+ foreach imgTy = [Image2dArrayMsaa, Image2dArrayMsaaDepth] in {
+ def : Builtin<"get_image_array_size", [Size, ImageType<imgTy, aQual>], Attr.Const>;
}
}
+let Extension = FuncExtKhrGlMsaaSharing in {
+ defm : ImageReadMsaa<"RO">;
+ defm : ImageQueryMsaa<"RO">;
+ defm : ImageQueryMsaa<"WO">;
+ defm : ImageReadMsaa<"RW">;
+ defm : ImageQueryMsaa<"RW">;
+}
+
//--------------------------------------------------------------------
// OpenCL Extension v2.0 s28 - Subgroups
// --- Table 28.2.1 ---
@@ -1637,7 +1707,9 @@ let Extension = FuncExtKhrSubgroups in {
// --- Table 28.2.2 ---
let Extension = FuncExtKhrSubgroups in {
def : Builtin<"sub_group_barrier", [Void, MemFenceFlags], Attr.Convergent>;
- def : Builtin<"sub_group_barrier", [Void, MemFenceFlags, MemoryScope], Attr.Convergent>;
+ let MinVersion = CL20 in {
+ def : Builtin<"sub_group_barrier", [Void, MemFenceFlags, MemoryScope], Attr.Convergent>;
+ }
}
// --- Table 28.2.4 ---
@@ -1774,6 +1846,12 @@ let Extension = FunctionExtension<"__opencl_c_integer_dot_product_input_4x8bit_p
def : Builtin<"dot_acc_sat_4x8packed_su_int", [Int, UInt, UInt, Int], Attr.Const>;
}
+// Section 48.3 - cl_khr_subgroup_rotate
+let Extension = FunctionExtension<"cl_khr_subgroup_rotate"> in {
+ def : Builtin<"sub_group_rotate", [AGenType1, AGenType1, Int], Attr.Convergent>;
+ def : Builtin<"sub_group_clustered_rotate", [AGenType1, AGenType1, Int, UInt], Attr.Convergent>;
+}
+
//--------------------------------------------------------------------
// Arm extensions.
let Extension = ArmIntegerDotProductInt8 in {
diff --git a/contrib/llvm-project/clang/lib/Sema/ParsedAttr.cpp b/contrib/llvm-project/clang/lib/Sema/ParsedAttr.cpp
index ed03b0c7f688..06c213267c7e 100644
--- a/contrib/llvm-project/clang/lib/Sema/ParsedAttr.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/ParsedAttr.cpp
@@ -19,15 +19,12 @@
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/Support/ManagedStatic.h"
#include <cassert>
#include <cstddef>
#include <utility>
using namespace clang;
-LLVM_INSTANTIATE_REGISTRY(ParsedAttrInfoRegistry)
-
IdentifierLoc *IdentifierLoc::create(ASTContext &Ctx, SourceLocation Loc,
IdentifierInfo *Ident) {
IdentifierLoc *Result = new (Ctx) IdentifierLoc;
@@ -111,7 +108,7 @@ namespace {
const ParsedAttrInfo &ParsedAttrInfo::get(const AttributeCommonInfo &A) {
// If we have a ParsedAttrInfo for this ParsedAttr then return that.
- if ((size_t)A.getParsedKind() < llvm::array_lengthof(AttrInfoMap))
+ if ((size_t)A.getParsedKind() < std::size(AttrInfoMap))
return *AttrInfoMap[A.getParsedKind()];
// If this is an ignored attribute then return an appropriate ParsedAttrInfo.
@@ -120,13 +117,7 @@ const ParsedAttrInfo &ParsedAttrInfo::get(const AttributeCommonInfo &A) {
if (A.getParsedKind() == AttributeCommonInfo::IgnoredAttribute)
return IgnoredParsedAttrInfo;
- // Otherwise this may be an attribute defined by a plugin. First instantiate
- // all plugin attributes if we haven't already done so.
- static llvm::ManagedStatic<std::list<std::unique_ptr<ParsedAttrInfo>>>
- PluginAttrInstances;
- if (PluginAttrInstances->empty())
- for (auto It : ParsedAttrInfoRegistry::entries())
- PluginAttrInstances->emplace_back(It.instantiate());
+ // Otherwise this may be an attribute defined by a plugin.
// Search for a ParsedAttrInfo whose name and syntax match.
std::string FullName = A.getNormalizedFullName();
@@ -134,10 +125,9 @@ const ParsedAttrInfo &ParsedAttrInfo::get(const AttributeCommonInfo &A) {
if (SyntaxUsed == AttributeCommonInfo::AS_ContextSensitiveKeyword)
SyntaxUsed = AttributeCommonInfo::AS_Keyword;
- for (auto &Ptr : *PluginAttrInstances)
- for (auto &S : Ptr->Spellings)
- if (S.Syntax == SyntaxUsed && S.NormalizedFullName == FullName)
- return *Ptr;
+ for (auto &Ptr : getAttributePluginInstances())
+ if (Ptr->hasSpelling(SyntaxUsed, FullName))
+ return *Ptr;
// If we failed to find a match then return a default ParsedAttrInfo.
static const ParsedAttrInfo DefaultParsedAttrInfo(
@@ -145,12 +135,20 @@ const ParsedAttrInfo &ParsedAttrInfo::get(const AttributeCommonInfo &A) {
return DefaultParsedAttrInfo;
}
+ArrayRef<const ParsedAttrInfo *> ParsedAttrInfo::getAllBuiltin() {
+ return llvm::ArrayRef(AttrInfoMap);
+}
+
unsigned ParsedAttr::getMinArgs() const { return getInfo().NumArgs; }
unsigned ParsedAttr::getMaxArgs() const {
return getMinArgs() + getInfo().OptArgs;
}
+unsigned ParsedAttr::getNumArgMembers() const {
+ return getInfo().NumArgMembers;
+}
+
bool ParsedAttr::hasCustomParsing() const {
return getInfo().HasCustomParsing;
}
@@ -180,7 +178,10 @@ void ParsedAttr::getMatchRules(
}
bool ParsedAttr::diagnoseLangOpts(Sema &S) const {
- return getInfo().diagLangOpts(S, *this);
+ if (getInfo().acceptsLangOpts(S.getLangOpts()))
+ return true;
+ S.Diag(getLoc(), diag::warn_attribute_ignored) << *this;
+ return false;
}
bool ParsedAttr::isTargetSpecificAttr() const {
@@ -192,7 +193,18 @@ bool ParsedAttr::isTypeAttr() const { return getInfo().IsType; }
bool ParsedAttr::isStmtAttr() const { return getInfo().IsStmt; }
bool ParsedAttr::existsInTarget(const TargetInfo &Target) const {
- return getInfo().existsInTarget(Target);
+ Kind K = getParsedKind();
+
+ // If the attribute has a target-specific spelling, check that it exists.
+ // Only call this if the attr is not ignored/unknown. For most targets, this
+ // function just returns true.
+ bool HasSpelling = K != IgnoredAttribute && K != UnknownAttribute &&
+ K != NoSemaHandlerAttribute;
+ bool TargetSpecificSpellingExists =
+ !HasSpelling ||
+ getInfo().spellingExistsInTarget(Target, getAttributeSpellingListIndex());
+
+ return getInfo().existsInTarget(Target) && TargetSpecificSpellingExists;
}
bool ParsedAttr::isKnownToGCC() const { return getInfo().IsKnownToGCC; }
@@ -201,6 +213,46 @@ bool ParsedAttr::isSupportedByPragmaAttribute() const {
return getInfo().IsSupportedByPragmaAttribute;
}
+bool ParsedAttr::slidesFromDeclToDeclSpecLegacyBehavior() const {
+ if (isRegularKeywordAttribute())
+ // The appurtenance rules are applied strictly for all regular keyword
+ // atributes.
+ return false;
+
+ assert(isStandardAttributeSyntax());
+
+ // We have historically allowed some type attributes with standard attribute
+ // syntax to slide to the decl-specifier-seq, so we have to keep supporting
+ // it. This property is consciously not defined as a flag in Attr.td because
+ // we don't want new attributes to specify it.
+ //
+ // Note: No new entries should be added to this list. Entries should be
+ // removed from this list after a suitable deprecation period, provided that
+ // there are no compatibility considerations with other compilers. If
+ // possible, we would like this list to go away entirely.
+ switch (getParsedKind()) {
+ case AT_AddressSpace:
+ case AT_OpenCLPrivateAddressSpace:
+ case AT_OpenCLGlobalAddressSpace:
+ case AT_OpenCLGlobalDeviceAddressSpace:
+ case AT_OpenCLGlobalHostAddressSpace:
+ case AT_OpenCLLocalAddressSpace:
+ case AT_OpenCLConstantAddressSpace:
+ case AT_OpenCLGenericAddressSpace:
+ case AT_NeonPolyVectorType:
+ case AT_NeonVectorType:
+ case AT_ArmMveStrictPolymorphism:
+ case AT_BTFTypeTag:
+ case AT_ObjCGC:
+ case AT_MatrixType:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool ParsedAttr::acceptsExprPack() const { return getInfo().AcceptsExprPack; }
+
unsigned ParsedAttr::getSemanticSpelling() const {
return getInfo().spellingIndexToSemanticSpelling(*this);
}
@@ -213,6 +265,14 @@ bool ParsedAttr::hasVariadicArg() const {
return getInfo().OptArgs == 15;
}
+bool ParsedAttr::isParamExpr(size_t N) const {
+ return getInfo().isParamExpr(N);
+}
+
+void ParsedAttr::handleAttrWithDelayedArgs(Sema &S, Decl *D) const {
+ ::handleAttrWithDelayedArgs(S, D, *this);
+}
+
static unsigned getNumAttributeArgs(const ParsedAttr &AL) {
// FIXME: Include the type in the argument list.
return AL.getNumArgs() + AL.hasParsedType();
@@ -244,3 +304,20 @@ bool ParsedAttr::checkAtMostNumArgs(Sema &S, unsigned Num) const {
diag::err_attribute_too_many_arguments,
std::greater<unsigned>());
}
+
+void clang::takeAndConcatenateAttrs(ParsedAttributes &First,
+ ParsedAttributes &Second,
+ ParsedAttributes &Result) {
+ // Note that takeAllFrom() puts the attributes at the beginning of the list,
+ // so to obtain the correct ordering, we add `Second`, then `First`.
+ Result.takeAllFrom(Second);
+ Result.takeAllFrom(First);
+ if (First.Range.getBegin().isValid())
+ Result.Range.setBegin(First.Range.getBegin());
+ else
+ Result.Range.setBegin(Second.Range.getBegin());
+ if (Second.Range.getEnd().isValid())
+ Result.Range.setEnd(Second.Range.getEnd());
+ else
+ Result.Range.setEnd(First.Range.getEnd());
+}
diff --git a/contrib/llvm-project/clang/lib/Sema/Scope.cpp b/contrib/llvm-project/clang/lib/Sema/Scope.cpp
index 51b0b24e57b7..4570d8c615fe 100644
--- a/contrib/llvm-project/clang/lib/Sema/Scope.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/Scope.cpp
@@ -43,6 +43,9 @@ void Scope::setFlags(Scope *parent, unsigned flags) {
FunctionPrototypeScope | AtCatchScope | ObjCMethodScope)) ==
0)
Flags |= parent->getFlags() & OpenMPSimdDirectiveScope;
+ // transmit the parent's 'order' flag, if exists
+ if (parent->getFlags() & OpenMPOrderClauseScope)
+ Flags |= OpenMPOrderClauseScope;
} else {
Depth = 0;
PrototypeDepth = 0;
@@ -67,8 +70,10 @@ void Scope::setFlags(Scope *parent, unsigned flags) {
if (flags & BlockScope) BlockParent = this;
if (flags & TemplateParamScope) TemplateParamParent = this;
- // If this is a prototype scope, record that.
- if (flags & FunctionPrototypeScope) PrototypeDepth++;
+ // If this is a prototype scope, record that. Lambdas have an extra prototype
+ // scope that doesn't add any depth.
+ if (flags & FunctionPrototypeScope && !(flags & LambdaScope))
+ PrototypeDepth++;
if (flags & DeclScope) {
if (flags & FunctionPrototypeScope)
@@ -91,7 +96,7 @@ void Scope::Init(Scope *parent, unsigned flags) {
UsingDirectives.clear();
Entity = nullptr;
ErrorTrap.reset();
- NRVO.setPointerAndInt(nullptr, 0);
+ NRVO = std::nullopt;
}
bool Scope::containedInPrototypeScope() const {
@@ -118,19 +123,71 @@ void Scope::AddFlags(unsigned FlagsToSet) {
Flags |= FlagsToSet;
}
-void Scope::mergeNRVOIntoParent() {
- if (VarDecl *Candidate = NRVO.getPointer()) {
- if (isDeclScope(Candidate))
- Candidate->setNRVOVariable(true);
+// The algorithm for updating NRVO candidate is as follows:
+// 1. All previous candidates become invalid because a new NRVO candidate is
+// obtained. Therefore, we need to clear return slots for other
+// variables defined before the current return statement in the current
+// scope and in outer scopes.
+// 2. Store the new candidate if its return slot is available. Otherwise,
+// there is no NRVO candidate so far.
+void Scope::updateNRVOCandidate(VarDecl *VD) {
+ auto UpdateReturnSlotsInScopeForVD = [VD](Scope *S) -> bool {
+ bool IsReturnSlotFound = S->ReturnSlots.contains(VD);
+
+ // We found a candidate variable that can be put into a return slot.
+ // Clear the set, because other variables cannot occupy a return
+ // slot in the same scope.
+ S->ReturnSlots.clear();
+
+ if (IsReturnSlotFound)
+ S->ReturnSlots.insert(VD);
+
+ return IsReturnSlotFound;
+ };
+
+ bool CanBePutInReturnSlot = false;
+
+ for (auto *S = this; S; S = S->getParent()) {
+ CanBePutInReturnSlot |= UpdateReturnSlotsInScopeForVD(S);
+
+ if (S->getEntity())
+ break;
}
- if (getEntity())
+ // Consider the variable as NRVO candidate if the return slot is available
+ // for it in the current scope, or if it can be available in outer scopes.
+ NRVO = CanBePutInReturnSlot ? VD : nullptr;
+}
+
+void Scope::applyNRVO() {
+ // There is no NRVO candidate in the current scope.
+ if (!NRVO.has_value())
return;
- if (NRVO.getInt())
- getParent()->setNoNRVO();
- else if (NRVO.getPointer())
- getParent()->addNRVOCandidate(NRVO.getPointer());
+ if (*NRVO && isDeclScope(*NRVO))
+ (*NRVO)->setNRVOVariable(true);
+
+ // It's necessary to propagate NRVO candidate to the parent scope for cases
+ // when the parent scope doesn't contain a return statement.
+ // For example:
+ // X foo(bool b) {
+ // X x;
+ // if (b)
+ // return x;
+ // exit(0);
+ // }
+ // Also, we need to propagate nullptr value that means NRVO is not
+ // allowed in this scope.
+ // For example:
+ // X foo(bool b) {
+ // X x;
+ // if (b)
+ // return x;
+ // else
+ // return X(); // NRVO is not allowed
+ // }
+ if (!getEntity())
+ getParent()->NRVO = *NRVO;
}
LLVM_DUMP_METHOD void Scope::dump() const { dumpImpl(llvm::errs()); }
@@ -193,8 +250,10 @@ void Scope::dumpImpl(raw_ostream &OS) const {
if (const DeclContext *DC = getEntity())
OS << "Entity : (clang::DeclContext*)" << DC << '\n';
- if (NRVO.getInt())
- OS << "NRVO not allowed\n";
- else if (NRVO.getPointer())
- OS << "NRVO candidate : (clang::VarDecl*)" << NRVO.getPointer() << '\n';
+ if (!NRVO)
+ OS << "there is no NRVO candidate\n";
+ else if (*NRVO)
+ OS << "NRVO candidate : (clang::VarDecl*)" << *NRVO << '\n';
+ else
+ OS << "NRVO is not allowed\n";
}
diff --git a/contrib/llvm-project/clang/lib/Sema/ScopeInfo.cpp b/contrib/llvm-project/clang/lib/Sema/ScopeInfo.cpp
index 4857346018ae..ce90451f2613 100644
--- a/contrib/llvm-project/clang/lib/Sema/ScopeInfo.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/ScopeInfo.cpp
@@ -37,8 +37,9 @@ void FunctionScopeInfo::Clear() {
ObjCIsSecondaryInit = false;
ObjCWarnForNoInitDelegation = false;
FirstReturnLoc = SourceLocation();
- FirstCXXTryLoc = SourceLocation();
+ FirstCXXOrObjCTryLoc = SourceLocation();
FirstSEHTryLoc = SourceLocation();
+ FoundImmediateEscalatingExpression = false;
// Coroutine state
FirstCoroutineStmtLoc = SourceLocation();
@@ -56,6 +57,7 @@ void FunctionScopeInfo::Clear() {
ModifiedNonNullParams.clear();
Blocks.clear();
ByrefBlockVars.clear();
+ AddrLabels.clear();
}
static const NamedDecl *getBestPropertyDecl(const ObjCPropertyRefExpr *PropE) {
@@ -231,14 +233,14 @@ bool CapturingScopeInfo::isVLATypeCaptured(const VariableArrayType *VAT) const {
}
void LambdaScopeInfo::visitPotentialCaptures(
- llvm::function_ref<void(VarDecl *, Expr *)> Callback) const {
+ llvm::function_ref<void(ValueDecl *, Expr *)> Callback) const {
for (Expr *E : PotentiallyCapturingExprs) {
if (auto *DRE = dyn_cast<DeclRefExpr>(E)) {
- Callback(cast<VarDecl>(DRE->getFoundDecl()), E);
+ Callback(cast<ValueDecl>(DRE->getFoundDecl()), E);
} else if (auto *ME = dyn_cast<MemberExpr>(E)) {
- Callback(cast<VarDecl>(ME->getMemberDecl()), E);
+ Callback(cast<ValueDecl>(ME->getMemberDecl()), E);
} else if (auto *FP = dyn_cast<FunctionParmPackExpr>(E)) {
- for (VarDecl *VD : *FP)
+ for (ValueDecl *VD : *FP)
Callback(VD, E);
} else {
llvm_unreachable("unexpected expression in potential captures list");
@@ -246,6 +248,14 @@ void LambdaScopeInfo::visitPotentialCaptures(
}
}
+bool LambdaScopeInfo::lambdaCaptureShouldBeConst() const {
+ if (ExplicitObjectParameter)
+ return ExplicitObjectParameter->getType()
+ .getNonReferenceType()
+ .isConstQualified();
+ return !Mutable;
+}
+
FunctionScopeInfo::~FunctionScopeInfo() { }
BlockScopeInfo::~BlockScopeInfo() { }
CapturedRegionScopeInfo::~CapturedRegionScopeInfo() { }
diff --git a/contrib/llvm-project/clang/lib/Sema/Sema.cpp b/contrib/llvm-project/clang/lib/Sema/Sema.cpp
index 191d89ea75c9..2d4e6d1d058c 100644
--- a/contrib/llvm-project/clang/lib/Sema/Sema.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/Sema.cpp
@@ -33,10 +33,12 @@
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/CXXFieldCollector.h"
#include "clang/Sema/DelayedDiagnostic.h"
+#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/MultiplexExternalSemaSource.h"
#include "clang/Sema/ObjCMethodList.h"
+#include "clang/Sema/RISCVIntrinsicManager.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/SemaConsumer.h"
@@ -45,8 +47,10 @@
#include "clang/Sema/TemplateInstCallback.h"
#include "clang/Sema/TypoCorrection.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Support/TimeProfiler.h"
+#include <optional>
using namespace clang;
using namespace sema;
@@ -60,6 +64,16 @@ ModuleLoader &Sema::getModuleLoader() const { return PP.getModuleLoader(); }
DarwinSDKInfo *
Sema::getDarwinSDKInfoForAvailabilityChecking(SourceLocation Loc,
StringRef Platform) {
+ auto *SDKInfo = getDarwinSDKInfoForAvailabilityChecking();
+ if (!SDKInfo && !WarnedDarwinSDKInfoMissing) {
+ Diag(Loc, diag::warn_missing_sdksettings_for_availability_checking)
+ << Platform;
+ WarnedDarwinSDKInfoMissing = true;
+ }
+ return SDKInfo;
+}
+
+DarwinSDKInfo *Sema::getDarwinSDKInfoForAvailabilityChecking() {
if (CachedDarwinSDKInfo)
return CachedDarwinSDKInfo->get();
auto SDKInfo = parseDarwinSDKInfo(
@@ -71,8 +85,6 @@ Sema::getDarwinSDKInfoForAvailabilityChecking(SourceLocation Loc,
}
if (!SDKInfo)
llvm::consumeError(SDKInfo.takeError());
- Diag(Loc, diag::warn_missing_sdksettings_for_availability_checking)
- << Platform;
CachedDarwinSDKInfo = std::unique_ptr<DarwinSDKInfo>();
return nullptr;
}
@@ -106,6 +118,9 @@ PrintingPolicy Sema::getPrintingPolicy(const ASTContext &Context,
}
}
+ // Shorten the data output if needed
+ Policy.EntireContentsOfLargeArray = false;
+
return Policy;
}
@@ -126,9 +141,9 @@ public:
void reset() { S = nullptr; }
- virtual void FileChanged(SourceLocation Loc, FileChangeReason Reason,
- SrcMgr::CharacteristicKind FileType,
- FileID PrevFID) override {
+ void FileChanged(SourceLocation Loc, FileChangeReason Reason,
+ SrcMgr::CharacteristicKind FileType,
+ FileID PrevFID) override {
if (!S)
return;
switch (Reason) {
@@ -137,9 +152,9 @@ public:
SourceLocation IncludeLoc = SM.getIncludeLoc(SM.getFileID(Loc));
if (IncludeLoc.isValid()) {
if (llvm::timeTraceProfilerEnabled()) {
- const FileEntry *FE = SM.getFileEntryForID(SM.getFileID(Loc));
- llvm::timeTraceProfilerBegin(
- "Source", FE != nullptr ? FE->getName() : StringRef("<unknown>"));
+ OptionalFileEntryRef FE = SM.getFileEntryRefForID(SM.getFileID(Loc));
+ llvm::timeTraceProfilerBegin("Source", FE ? FE->getName()
+ : StringRef("<unknown>"));
}
IncludeStack.push_back(IncludeLoc);
@@ -169,14 +184,14 @@ public:
} // end namespace clang
const unsigned Sema::MaxAlignmentExponent;
-const unsigned Sema::MaximumAlignment;
+const uint64_t Sema::MaximumAlignment;
Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind, CodeCompleteConsumer *CodeCompleter)
- : ExternalSource(nullptr), isMultiplexExternalSource(false),
- CurFPFeatures(pp.getLangOpts()), LangOpts(pp.getLangOpts()), PP(pp),
- Context(ctxt), Consumer(consumer), Diags(PP.getDiagnostics()),
- SourceMgr(PP.getSourceManager()), CollectStats(false),
+ : ExternalSource(nullptr), CurFPFeatures(pp.getLangOpts()),
+ LangOpts(pp.getLangOpts()), PP(pp), Context(ctxt), Consumer(consumer),
+ Diags(PP.getDiagnostics()), SourceMgr(PP.getSourceManager()),
+ APINotes(SourceMgr, LangOpts), CollectStats(false),
CodeCompleter(CodeCompleter), CurContext(nullptr),
OriginalLexicalContext(nullptr), MSStructPragmaOn(false),
MSPointerToMemberRepresentationMethod(
@@ -184,15 +199,15 @@ Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
VtorDispStack(LangOpts.getVtorDispMode()),
AlignPackStack(AlignPackInfo(getLangOpts().XLPragmaPack)),
DataSegStack(nullptr), BSSSegStack(nullptr), ConstSegStack(nullptr),
- CodeSegStack(nullptr), FpPragmaStack(FPOptionsOverride()),
- CurInitSeg(nullptr), VisContext(nullptr),
- PragmaAttributeCurrentTargetDecl(nullptr),
- IsBuildingRecoveryCallExpr(false), Cleanup{}, LateTemplateParser(nullptr),
+ CodeSegStack(nullptr), StrictGuardStackCheckStack(false),
+ FpPragmaStack(FPOptionsOverride()), CurInitSeg(nullptr),
+ VisContext(nullptr), PragmaAttributeCurrentTargetDecl(nullptr),
+ IsBuildingRecoveryCallExpr(false), LateTemplateParser(nullptr),
LateTemplateParserCleanup(nullptr), OpaqueParser(nullptr), IdResolver(pp),
- StdExperimentalNamespaceCache(nullptr), StdInitializerList(nullptr),
- StdCoroutineTraitsCache(nullptr), CXXTypeInfoDecl(nullptr),
- MSVCGuidDecl(nullptr), NSNumberDecl(nullptr), NSValueDecl(nullptr),
- NSStringDecl(nullptr), StringWithUTF8StringMethod(nullptr),
+ StdInitializerList(nullptr), StdCoroutineTraitsCache(nullptr),
+ CXXTypeInfoDecl(nullptr), StdSourceLocationImplDecl(nullptr),
+ NSNumberDecl(nullptr), NSValueDecl(nullptr), NSStringDecl(nullptr),
+ StringWithUTF8StringMethod(nullptr),
ValueWithBytesObjCTypeMethod(nullptr), NSArrayDecl(nullptr),
ArrayWithObjectsMethod(nullptr), NSDictionaryDecl(nullptr),
DictionaryWithObjectsMethod(nullptr), GlobalNewDeleteDeclared(false),
@@ -204,10 +219,9 @@ Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
ArgumentPackSubstitutionIndex(-1), CurrentInstantiationScope(nullptr),
DisableTypoCorrection(false), TyposCorrected(0), AnalysisWarnings(*this),
ThreadSafetyDeclCache(nullptr), VarDataSharingAttributesStack(nullptr),
- CurScope(nullptr), Ident_super(nullptr), Ident___float128(nullptr) {
+ CurScope(nullptr), Ident_super(nullptr) {
assert(pp.TUKind == TUKind);
TUScope = nullptr;
- isConstantEvaluatedOverride = false;
LoadedExternalKnownNamespaces = false;
for (unsigned I = 0; I != NSAPI::NumNSNumberLiteralMethods; ++I)
@@ -222,6 +236,9 @@ Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
// Tell diagnostics how to render things from the AST library.
Diags.SetArgToStringFn(&FormatASTNodeDiagnosticArgument, &Context);
+ // This evaluation context exists to ensure that there's always at least one
+ // valid evaluation context available. It is never removed from the
+ // evaluation stack.
ExprEvalContexts.emplace_back(
ExpressionEvaluationContext::PotentiallyEvaluated, 0, CleanupInfo{},
nullptr, ExpressionEvaluationContextRecord::EK_Other);
@@ -234,6 +251,8 @@ Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
SemaPPCallbackHandler = Callbacks.get();
PP.addPPCallbacks(std::move(Callbacks));
SemaPPCallbackHandler->set(*this);
+
+ CurFPFeatures.setFPEvalMethod(PP.getCurrentFPEvalMethod());
}
// Anchor Sema's type info to this TU.
@@ -311,8 +330,9 @@ void Sema::Initialize() {
if (getLangOpts().MSVCCompat) {
if (getLangOpts().CPlusPlus &&
IdResolver.begin(&Context.Idents.get("type_info")) == IdResolver.end())
- PushOnScopeChains(Context.buildImplicitRecord("type_info", TTK_Class),
- TUScope);
+ PushOnScopeChains(
+ Context.buildImplicitRecord("type_info", TagTypeKind::Class),
+ TUScope);
addImplicitTypedef("size_t", Context.getSizeType());
}
@@ -324,9 +344,12 @@ void Sema::Initialize() {
Context.getTargetInfo().getSupportedOpenCLOpts(), getLangOpts());
addImplicitTypedef("sampler_t", Context.OCLSamplerTy);
addImplicitTypedef("event_t", Context.OCLEventTy);
- if (getLangOpts().OpenCLCPlusPlus || getLangOpts().OpenCLVersion >= 200) {
- addImplicitTypedef("clk_event_t", Context.OCLClkEventTy);
- addImplicitTypedef("queue_t", Context.OCLQueueTy);
+ auto OCLCompatibleVersion = getLangOpts().getOpenCLCompatibleVersion();
+ if (OCLCompatibleVersion >= 200) {
+ if (getLangOpts().OpenCLCPlusPlus || getLangOpts().Blocks) {
+ addImplicitTypedef("clk_event_t", Context.OCLClkEventTy);
+ addImplicitTypedef("queue_t", Context.OCLQueueTy);
+ }
if (getLangOpts().OpenCLPipes)
addImplicitTypedef("reserve_id_t", Context.OCLReserveIDTy);
addImplicitTypedef("atomic_int", Context.getAtomicType(Context.IntTy));
@@ -367,6 +390,11 @@ void Sema::Initialize() {
AddPointerSizeDependentTypes();
}
+ if (getOpenCLOptions().isSupported("cl_khr_fp16", getLangOpts())) {
+ auto AtomicHalfT = Context.getAtomicType(Context.HalfTy);
+ addImplicitTypedef("atomic_half", AtomicHalfT);
+ }
+
std::vector<QualType> Atomic64BitTypes;
if (getOpenCLOptions().isSupported("cl_khr_int64_base_atomics",
getLangOpts()) &&
@@ -389,7 +417,6 @@ void Sema::Initialize() {
}
}
-
#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
if (getOpenCLOptions().isSupported(#Ext, getLangOpts())) { \
addImplicitTypedef(#ExtType, Context.Id##Ty); \
@@ -403,13 +430,10 @@ void Sema::Initialize() {
#include "clang/Basic/AArch64SVEACLETypes.def"
}
- if (Context.getTargetInfo().getTriple().isPPC64() &&
- Context.getTargetInfo().hasFeature("paired-vector-memops")) {
- if (Context.getTargetInfo().hasFeature("mma")) {
+ if (Context.getTargetInfo().getTriple().isPPC64()) {
#define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \
addImplicitTypedef(#Name, Context.Id##Ty);
#include "clang/Basic/PPCTypes.def"
- }
#define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \
addImplicitTypedef(#Name, Context.Id##Ty);
#include "clang/Basic/PPCTypes.def"
@@ -421,6 +445,13 @@ void Sema::Initialize() {
#include "clang/Basic/RISCVVTypes.def"
}
+ if (Context.getTargetInfo().getTriple().isWasm() &&
+ Context.getTargetInfo().hasFeature("reference-types")) {
+#define WASM_TYPE(Name, Id, SingletonId) \
+ addImplicitTypedef(Name, Context.SingletonId);
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
+ }
+
if (Context.getTargetInfo().hasBuiltinMSVaList()) {
DeclarationName MSVaList = &Context.Idents.get("__builtin_ms_va_list");
if (IdResolver.begin(MSVaList) == IdResolver.end())
@@ -451,13 +482,9 @@ Sema::~Sema() {
= dyn_cast_or_null<ExternalSemaSource>(Context.getExternalSource()))
ExternalSema->ForgetSema();
- // If Sema's ExternalSource is the multiplexer - we own it.
- if (isMultiplexExternalSource)
- delete ExternalSource;
-
// Delete cached satisfactions.
std::vector<ConstraintSatisfaction *> Satisfactions;
- Satisfactions.reserve(Satisfactions.size());
+ Satisfactions.reserve(SatisfactionCache.size());
for (auto &Node : SatisfactionCache)
Satisfactions.push_back(&Node);
for (auto *Node : Satisfactions)
@@ -528,12 +555,10 @@ void Sema::addExternalSource(ExternalSemaSource *E) {
return;
}
- if (isMultiplexExternalSource)
- static_cast<MultiplexExternalSemaSource*>(ExternalSource)->addSource(*E);
- else {
- ExternalSource = new MultiplexExternalSemaSource(*ExternalSource, *E);
- isMultiplexExternalSource = true;
- }
+ if (auto *Ex = dyn_cast<MultiplexExternalSemaSource>(ExternalSource))
+ Ex->AddSource(E);
+ else
+ ExternalSource = new MultiplexExternalSemaSource(ExternalSource.get(), E);
}
/// Print out statistics about the semantic analysis.
@@ -548,29 +573,34 @@ void Sema::PrintStats() const {
void Sema::diagnoseNullableToNonnullConversion(QualType DstType,
QualType SrcType,
SourceLocation Loc) {
- Optional<NullabilityKind> ExprNullability = SrcType->getNullability(Context);
+ std::optional<NullabilityKind> ExprNullability = SrcType->getNullability();
if (!ExprNullability || (*ExprNullability != NullabilityKind::Nullable &&
*ExprNullability != NullabilityKind::NullableResult))
return;
- Optional<NullabilityKind> TypeNullability = DstType->getNullability(Context);
+ std::optional<NullabilityKind> TypeNullability = DstType->getNullability();
if (!TypeNullability || *TypeNullability != NullabilityKind::NonNull)
return;
Diag(Loc, diag::warn_nullability_lost) << SrcType << DstType;
}
-void Sema::diagnoseZeroToNullptrConversion(CastKind Kind, const Expr* E) {
- if (Diags.isIgnored(diag::warn_zero_as_null_pointer_constant,
- E->getBeginLoc()))
- return;
+void Sema::diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E) {
// nullptr only exists from C++11 on, so don't warn on its absence earlier.
if (!getLangOpts().CPlusPlus11)
return;
if (Kind != CK_NullToPointer && Kind != CK_NullToMemberPointer)
return;
- if (E->IgnoreParenImpCasts()->getType()->isNullPtrType())
+
+ const Expr *EStripped = E->IgnoreParenImpCasts();
+ if (EStripped->getType()->isNullPtrType())
+ return;
+ if (isa<GNUNullExpr>(EStripped))
+ return;
+
+ if (Diags.isIgnored(diag::warn_zero_as_null_pointer_constant,
+ E->getBeginLoc()))
return;
// Don't diagnose the conversion from a 0 literal to a null pointer argument
@@ -580,8 +610,16 @@ void Sema::diagnoseZeroToNullptrConversion(CastKind Kind, const Expr* E) {
CodeSynthesisContext::RewritingOperatorAsSpaceship)
return;
+ // Ignore null pointers in defaulted comparison operators.
+ FunctionDecl *FD = getCurFunctionDecl();
+ if (FD && FD->isDefaulted()) {
+ return;
+ }
+
// If it is a macro from system header, and if the macro name is not "NULL",
// do not warn.
+ // Note that uses of "NULL" will be ignored above on systems that define it
+ // as __null.
SourceLocation MaybeMacroLoc = E->getBeginLoc();
if (Diags.getSuppressSystemWarnings() &&
SourceMgr.isInSystemMacro(MaybeMacroLoc) &&
@@ -754,15 +792,15 @@ static bool ShouldRemoveFromUnused(Sema *SemaRef, const DeclaratorDecl *D) {
return false;
}
-static bool isFunctionOrVarDeclExternC(NamedDecl *ND) {
- if (auto *FD = dyn_cast<FunctionDecl>(ND))
+static bool isFunctionOrVarDeclExternC(const NamedDecl *ND) {
+ if (const auto *FD = dyn_cast<FunctionDecl>(ND))
return FD->isExternC();
return cast<VarDecl>(ND)->isExternC();
}
/// Determine whether ND is an external-linkage function or variable whose
/// type has no linkage.
-bool Sema::isExternalWithNoLinkageType(ValueDecl *VD) {
+bool Sema::isExternalWithNoLinkageType(const ValueDecl *VD) const {
// Note: it's not quite enough to check whether VD has UniqueExternalLinkage,
// because we also want to catch the case where its type has VisibleNoLinkage,
// which does not affect the linkage of VD.
@@ -794,7 +832,7 @@ void Sema::getUndefinedButUsed(
continue;
}
- if (FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) {
+ if (const auto *FD = dyn_cast<FunctionDecl>(ND)) {
if (FD->isDefined())
continue;
if (FD->isExternallyVisible() &&
@@ -805,7 +843,7 @@ void Sema::getUndefinedButUsed(
if (FD->getBuiltinID())
continue;
} else {
- auto *VD = cast<VarDecl>(ND);
+ const auto *VD = cast<VarDecl>(ND);
if (VD->hasDefinition() != VarDecl::DeclarationOnly)
continue;
if (VD->isExternallyVisible() &&
@@ -832,9 +870,10 @@ static void checkUndefinedButUsed(Sema &S) {
// Collect all the still-undefined entities with internal linkage.
SmallVector<std::pair<NamedDecl *, SourceLocation>, 16> Undefined;
S.getUndefinedButUsed(Undefined);
+ S.UndefinedButUsed.clear();
if (Undefined.empty()) return;
- for (auto Undef : Undefined) {
+ for (const auto &Undef : Undefined) {
ValueDecl *VD = cast<ValueDecl>(Undef.first);
SourceLocation UseLoc = Undef.second;
@@ -885,8 +924,6 @@ static void checkUndefinedButUsed(Sema &S) {
if (UseLoc.isValid())
S.Diag(UseLoc, diag::note_used_here);
}
-
- S.UndefinedButUsed.clear();
}
void Sema::LoadExternalWeakUndeclaredIdentifiers() {
@@ -896,7 +933,7 @@ void Sema::LoadExternalWeakUndeclaredIdentifiers() {
SmallVector<std::pair<IdentifierInfo *, WeakInfo>, 4> WeakIDs;
ExternalSource->ReadWeakUndeclaredIdentifiers(WeakIDs);
for (auto &WeakID : WeakIDs)
- WeakUndeclaredIdentifiers.insert(WeakID);
+ (void)WeakUndeclaredIdentifiers[WeakID.first].insert(WeakID.second);
}
@@ -920,7 +957,7 @@ static bool MethodsAndNestedClassesComplete(const CXXRecordDecl *RD,
I != E && Complete; ++I) {
if (const CXXMethodDecl *M = dyn_cast<CXXMethodDecl>(*I))
Complete = M->isDefined() || M->isDefaulted() ||
- (M->isPure() && !isa<CXXDestructorDecl>(M));
+ (M->isPureVirtual() && !isa<CXXDestructorDecl>(M));
else if (const FunctionTemplateDecl *F = dyn_cast<FunctionTemplateDecl>(*I))
// If the template function is marked as late template parsed at this
// point, it has not been instantiated and therefore we have not
@@ -997,15 +1034,9 @@ void Sema::emitAndClearUnusedLocalTypedefWarnings() {
/// is parsed. Note that the ASTContext may have already injected some
/// declarations.
void Sema::ActOnStartOfTranslationUnit() {
- if (getLangOpts().ModulesTS &&
- (getLangOpts().getCompilingModule() == LangOptions::CMK_ModuleInterface ||
- getLangOpts().getCompilingModule() == LangOptions::CMK_None)) {
- // We start in an implied global module fragment.
- SourceLocation StartOfTU =
- SourceMgr.getLocForStartOfFile(SourceMgr.getMainFileID());
- ActOnGlobalModuleFragmentDecl(StartOfTU);
- ModuleScopes.back().ImplicitGlobalModuleFragment = true;
- }
+ if (getLangOpts().CPlusPlusModules &&
+ getLangOpts().getCompilingModule() == LangOptions::CMK_HeaderUnit)
+ HandleStartOfHeaderUnit();
}
void Sema::ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind) {
@@ -1119,6 +1150,7 @@ void Sema::ActOnEndOfTranslationUnit() {
DiagnoseUnterminatedPragmaAlignPack();
DiagnoseUnterminatedPragmaAttribute();
+ DiagnoseUnterminatedOpenMPDeclareTarget();
// All delayed member exception specs should be checked or we end up accepting
// incompatible declarations.
@@ -1147,19 +1179,21 @@ void Sema::ActOnEndOfTranslationUnit() {
// Check for #pragma weak identifiers that were never declared
LoadExternalWeakUndeclaredIdentifiers();
- for (auto WeakID : WeakUndeclaredIdentifiers) {
- if (WeakID.second.getUsed())
+ for (const auto &WeakIDs : WeakUndeclaredIdentifiers) {
+ if (WeakIDs.second.empty())
continue;
- Decl *PrevDecl = LookupSingleName(TUScope, WeakID.first, SourceLocation(),
+ Decl *PrevDecl = LookupSingleName(TUScope, WeakIDs.first, SourceLocation(),
LookupOrdinaryName);
if (PrevDecl != nullptr &&
!(isa<FunctionDecl>(PrevDecl) || isa<VarDecl>(PrevDecl)))
- Diag(WeakID.second.getLocation(), diag::warn_attribute_wrong_decl_type)
- << "'weak'" << ExpectedVariableOrFunction;
+ for (const auto &WI : WeakIDs.second)
+ Diag(WI.getLocation(), diag::warn_attribute_wrong_decl_type)
+ << "'weak'" << /*isRegularKeyword=*/0 << ExpectedVariableOrFunction;
else
- Diag(WeakID.second.getLocation(), diag::warn_weak_identifier_undeclared)
- << WeakID.first;
+ for (const auto &WI : WeakIDs.second)
+ Diag(WI.getLocation(), diag::warn_weak_identifier_undeclared)
+ << WeakIDs.first;
}
if (LangOpts.CPlusPlus11 &&
@@ -1174,9 +1208,8 @@ void Sema::ActOnEndOfTranslationUnit() {
// A global-module-fragment is only permitted within a module unit.
bool DiagnosedMissingModuleDeclaration = false;
- if (!ModuleScopes.empty() &&
- ModuleScopes.back().Module->Kind == Module::GlobalModuleFragment &&
- !ModuleScopes.back().ImplicitGlobalModuleFragment) {
+ if (!ModuleScopes.empty() && ModuleScopes.back().Module->Kind ==
+ Module::ExplicitGlobalModuleFragment) {
Diag(ModuleScopes.back().BeginLoc,
diag::err_module_declaration_missing_after_global_module_introducer);
DiagnosedMissingModuleDeclaration = true;
@@ -1187,9 +1220,7 @@ void Sema::ActOnEndOfTranslationUnit() {
// module declaration by now.
if (getLangOpts().getCompilingModule() ==
LangOptions::CMK_ModuleInterface &&
- (ModuleScopes.empty() ||
- !ModuleScopes.back().Module->isModulePurview()) &&
- !DiagnosedMissingModuleDeclaration) {
+ !isCurrentModulePurview() && !DiagnosedMissingModuleDeclaration) {
// FIXME: Make a better guess as to where to put the module declaration.
Diag(getSourceManager().getLocForStartOfFile(
getSourceManager().getMainFileID()),
@@ -1215,15 +1246,63 @@ void Sema::ActOnEndOfTranslationUnit() {
ModMap.resolveConflicts(Mod, /*Complain=*/false);
// Queue the submodules, so their exports will also be resolved.
- Stack.append(Mod->submodule_begin(), Mod->submodule_end());
+ auto SubmodulesRange = Mod->submodules();
+ Stack.append(SubmodulesRange.begin(), SubmodulesRange.end());
}
}
+ // Now we can decide whether the modules we're building need an initializer.
+ if (Module *CurrentModule = getCurrentModule();
+ CurrentModule && CurrentModule->isInterfaceOrPartition()) {
+ auto DoesModNeedInit = [this](Module *M) {
+ if (!getASTContext().getModuleInitializers(M).empty())
+ return true;
+ for (auto [Exported, _] : M->Exports)
+ if (Exported->isNamedModuleInterfaceHasInit())
+ return true;
+ for (Module *I : M->Imports)
+ if (I->isNamedModuleInterfaceHasInit())
+ return true;
+
+ return false;
+ };
+
+ CurrentModule->NamedModuleHasInit =
+ DoesModNeedInit(CurrentModule) ||
+ llvm::any_of(CurrentModule->submodules(),
+ [&](auto *SubM) { return DoesModNeedInit(SubM); });
+ }
+
// Warnings emitted in ActOnEndOfTranslationUnit() should be emitted for
// modules when they are built, not every time they are used.
emitAndClearUnusedLocalTypedefWarnings();
}
+ // C++ standard modules. Diagnose cases where a function is declared inline
+ // in the module purview but has no definition before the end of the TU or
+ // the start of a Private Module Fragment (if one is present).
+ if (!PendingInlineFuncDecls.empty()) {
+ for (auto *D : PendingInlineFuncDecls) {
+ if (auto *FD = dyn_cast<FunctionDecl>(D)) {
+ bool DefInPMF = false;
+ if (auto *FDD = FD->getDefinition()) {
+ DefInPMF = FDD->getOwningModule()->isPrivateModule();
+ if (!DefInPMF)
+ continue;
+ }
+ Diag(FD->getLocation(), diag::err_export_inline_not_defined)
+ << DefInPMF;
+ // If we have a PMF it should be at the end of the ModuleScopes.
+ if (DefInPMF &&
+ ModuleScopes.back().Module->Kind == Module::PrivateModuleFragment) {
+ Diag(ModuleScopes.back().BeginLoc,
+ diag::note_private_module_fragment);
+ }
+ }
+ }
+ PendingInlineFuncDecls.clear();
+ }
+
// C99 6.9.2p2:
// A declaration of an identifier for an object that has file
// scope without an initializer, and without a storage-class
@@ -1237,8 +1316,8 @@ void Sema::ActOnEndOfTranslationUnit() {
// translation unit, with an initializer equal to 0.
llvm::SmallSet<VarDecl *, 32> Seen;
for (TentativeDefinitionsType::iterator
- T = TentativeDefinitions.begin(ExternalSource),
- TEnd = TentativeDefinitions.end();
+ T = TentativeDefinitions.begin(ExternalSource.get()),
+ TEnd = TentativeDefinitions.end();
T != TEnd; ++T) {
VarDecl *VD = (*T)->getActingDefinition();
@@ -1253,8 +1332,8 @@ void Sema::ActOnEndOfTranslationUnit() {
// Set the length of the array to 1 (C99 6.9.2p5).
Diag(VD->getLocation(), diag::warn_tentative_incomplete_array);
llvm::APInt One(Context.getTypeSize(Context.getSizeType()), true);
- QualType T = Context.getConstantArrayType(ArrayT->getElementType(), One,
- nullptr, ArrayType::Normal, 0);
+ QualType T = Context.getConstantArrayType(
+ ArrayT->getElementType(), One, nullptr, ArraySizeModifier::Normal, 0);
VD->setType(T);
} else if (RequireCompleteType(VD->getLocation(), VD->getType(),
diag::err_tentative_def_incomplete_type))
@@ -1268,7 +1347,7 @@ void Sema::ActOnEndOfTranslationUnit() {
Consumer.CompleteTentativeDefinition(VD);
}
- for (auto D : ExternalDeclarations) {
+ for (auto *D : ExternalDeclarations) {
if (!D || D->isInvalidDecl() || D->getPreviousDecl() || !D->isUsed())
continue;
@@ -1282,8 +1361,9 @@ void Sema::ActOnEndOfTranslationUnit() {
if (!Diags.hasErrorOccurred() && TUKind != TU_Module) {
// Output warning for unused file scoped decls.
for (UnusedFileScopedDeclsType::iterator
- I = UnusedFileScopedDecls.begin(ExternalSource),
- E = UnusedFileScopedDecls.end(); I != E; ++I) {
+ I = UnusedFileScopedDecls.begin(ExternalSource.get()),
+ E = UnusedFileScopedDecls.end();
+ I != E; ++I) {
if (ShouldRemoveFromUnused(this, *I))
continue;
@@ -1293,10 +1373,14 @@ void Sema::ActOnEndOfTranslationUnit() {
DiagD = FD;
if (DiagD->isDeleted())
continue; // Deleted functions are supposed to be unused.
+ SourceRange DiagRange = DiagD->getLocation();
+ if (const ASTTemplateArgumentListInfo *ASTTAL =
+ DiagD->getTemplateSpecializationArgsAsWritten())
+ DiagRange.setEnd(ASTTAL->RAngleLoc);
if (DiagD->isReferenced()) {
if (isa<CXXMethodDecl>(DiagD))
Diag(DiagD->getLocation(), diag::warn_unneeded_member_function)
- << DiagD;
+ << DiagD << DiagRange;
else {
if (FD->getStorageClass() == SC_Static &&
!FD->isInlineSpecified() &&
@@ -1304,40 +1388,46 @@ void Sema::ActOnEndOfTranslationUnit() {
SourceMgr.getExpansionLoc(FD->getLocation())))
Diag(DiagD->getLocation(),
diag::warn_unneeded_static_internal_decl)
- << DiagD;
+ << DiagD << DiagRange;
else
Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl)
- << /*function*/ 0 << DiagD;
+ << /*function=*/0 << DiagD << DiagRange;
}
} else {
if (FD->getDescribedFunctionTemplate())
Diag(DiagD->getLocation(), diag::warn_unused_template)
- << /*function*/ 0 << DiagD;
+ << /*function=*/0 << DiagD << DiagRange;
else
Diag(DiagD->getLocation(), isa<CXXMethodDecl>(DiagD)
? diag::warn_unused_member_function
: diag::warn_unused_function)
- << DiagD;
+ << DiagD << DiagRange;
}
} else {
const VarDecl *DiagD = cast<VarDecl>(*I)->getDefinition();
if (!DiagD)
DiagD = cast<VarDecl>(*I);
+ SourceRange DiagRange = DiagD->getLocation();
+ if (const auto *VTSD = dyn_cast<VarTemplateSpecializationDecl>(DiagD)) {
+ if (const ASTTemplateArgumentListInfo *ASTTAL =
+ VTSD->getTemplateArgsInfo())
+ DiagRange.setEnd(ASTTAL->RAngleLoc);
+ }
if (DiagD->isReferenced()) {
Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl)
- << /*variable*/ 1 << DiagD;
+ << /*variable=*/1 << DiagD << DiagRange;
+ } else if (DiagD->getDescribedVarTemplate()) {
+ Diag(DiagD->getLocation(), diag::warn_unused_template)
+ << /*variable=*/1 << DiagD << DiagRange;
} else if (DiagD->getType().isConstQualified()) {
const SourceManager &SM = SourceMgr;
if (SM.getMainFileID() != SM.getFileID(DiagD->getLocation()) ||
!PP.getLangOpts().IsHeaderFile)
Diag(DiagD->getLocation(), diag::warn_unused_const_variable)
- << DiagD;
+ << DiagD << DiagRange;
} else {
- if (DiagD->getDescribedVarTemplate())
- Diag(DiagD->getLocation(), diag::warn_unused_template)
- << /*variable*/ 1 << DiagD;
- else
- Diag(DiagD->getLocation(), diag::warn_unused_variable) << DiagD;
+ Diag(DiagD->getLocation(), diag::warn_unused_variable)
+ << DiagD << DiagRange;
}
}
}
@@ -1350,9 +1440,7 @@ void Sema::ActOnEndOfTranslationUnit() {
// source.
RecordCompleteMap RecordsComplete;
RecordCompleteMap MNCComplete;
- for (NamedDeclSetType::iterator I = UnusedPrivateFields.begin(),
- E = UnusedPrivateFields.end(); I != E; ++I) {
- const NamedDecl *D = *I;
+ for (const NamedDecl *D : UnusedPrivateFields) {
const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D->getDeclContext());
if (RD && !RD->isUnion() &&
IsRecordFullyDefined(RD, RecordsComplete, MNCComplete)) {
@@ -1373,6 +1461,8 @@ void Sema::ActOnEndOfTranslationUnit() {
}
}
+ AnalysisWarnings.IssueWarnings(Context.getTranslationUnitDecl());
+
// Check we've noticed that we're no longer parsing the initializer for every
// variable. If we miss cases, then at best we have a performance issue and
// at worst a rejects-valid bug.
@@ -1388,19 +1478,18 @@ void Sema::ActOnEndOfTranslationUnit() {
// Helper functions.
//===----------------------------------------------------------------------===//
-DeclContext *Sema::getFunctionLevelDeclContext() {
+DeclContext *Sema::getFunctionLevelDeclContext(bool AllowLambda) const {
DeclContext *DC = CurContext;
while (true) {
if (isa<BlockDecl>(DC) || isa<EnumDecl>(DC) || isa<CapturedDecl>(DC) ||
isa<RequiresExprBodyDecl>(DC)) {
DC = DC->getParent();
- } else if (isa<CXXMethodDecl>(DC) &&
+ } else if (!AllowLambda && isa<CXXMethodDecl>(DC) &&
cast<CXXMethodDecl>(DC)->getOverloadedOperator() == OO_Call &&
cast<CXXRecordDecl>(DC->getParent())->isLambda()) {
DC = DC->getParent()->getParent();
- }
- else break;
+ } else break;
}
return DC;
@@ -1409,8 +1498,8 @@ DeclContext *Sema::getFunctionLevelDeclContext() {
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
-FunctionDecl *Sema::getCurFunctionDecl() {
- DeclContext *DC = getFunctionLevelDeclContext();
+FunctionDecl *Sema::getCurFunctionDecl(bool AllowLambda) const {
+ DeclContext *DC = getFunctionLevelDeclContext(AllowLambda);
return dyn_cast<FunctionDecl>(DC);
}
@@ -1421,7 +1510,7 @@ ObjCMethodDecl *Sema::getCurMethodDecl() {
return dyn_cast<ObjCMethodDecl>(DC);
}
-NamedDecl *Sema::getCurFunctionOrMethodDecl() {
+NamedDecl *Sema::getCurFunctionOrMethodDecl() const {
DeclContext *DC = getFunctionLevelDeclContext();
if (isa<ObjCMethodDecl>(DC) || isa<FunctionDecl>(DC))
return cast<NamedDecl>(DC);
@@ -1430,7 +1519,7 @@ NamedDecl *Sema::getCurFunctionOrMethodDecl() {
LangAS Sema::getDefaultCXXMethodAddrSpace() const {
if (getLangOpts().OpenCL)
- return LangAS::opencl_generic;
+ return getASTContext().getDefaultOpenCLPointeeAddrSpace();
return LangAS::Default;
}
@@ -1442,7 +1531,7 @@ void Sema::EmitCurrentDiagnostic(unsigned DiagID) {
// eliminated. If it truly cannot be (for example, there is some reentrancy
// issue I am not seeing yet), then there should at least be a clarifying
// comment somewhere.
- if (Optional<TemplateDeductionInfo*> Info = isSFINAEContext()) {
+ if (std::optional<TemplateDeductionInfo *> Info = isSFINAEContext()) {
switch (DiagnosticIDs::getDiagnosticSFINAEResponse(
Diags.getCurrentDiagID())) {
case DiagnosticIDs::SFINAE_Report:
@@ -1555,7 +1644,7 @@ bool Sema::hasUncompilableErrorOccurred() const {
// Print notes showing how we can reach FD starting from an a priori
// known-callable function.
-static void emitCallStackNotes(Sema &S, FunctionDecl *FD) {
+static void emitCallStackNotes(Sema &S, const FunctionDecl *FD) {
auto FnIt = S.DeviceKnownEmittedFns.find(FD);
while (FnIt != S.DeviceKnownEmittedFns.end()) {
// Respect error limit.
@@ -1737,7 +1826,7 @@ void Sema::emitDeferredDiags() {
return;
DeferredDiagnosticsEmitter DDE(*this);
- for (auto D : DeclsToCheckForDeferredDiags)
+ for (auto *D : DeclsToCheckForDeferredDiags)
DDE.checkRecordedDecl(D);
}
@@ -1767,7 +1856,8 @@ void Sema::emitDeferredDiags() {
Sema::SemaDiagnosticBuilder::SemaDiagnosticBuilder(Kind K, SourceLocation Loc,
unsigned DiagID,
- FunctionDecl *Fn, Sema &S)
+ const FunctionDecl *Fn,
+ Sema &S)
: S(S), Loc(Loc), DiagID(DiagID), Fn(Fn),
ShowCallStack(K == K_ImmediateWithCallStack || K == K_Deferred) {
switch (K) {
@@ -1813,11 +1903,12 @@ Sema::SemaDiagnosticBuilder::~SemaDiagnosticBuilder() {
}
Sema::SemaDiagnosticBuilder
-Sema::targetDiag(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD) {
+Sema::targetDiag(SourceLocation Loc, unsigned DiagID, const FunctionDecl *FD) {
FD = FD ? FD : getCurFunctionDecl();
if (LangOpts.OpenMP)
- return LangOpts.OpenMPIsDevice ? diagIfOpenMPDeviceCode(Loc, DiagID, FD)
- : diagIfOpenMPHostCode(Loc, DiagID, FD);
+ return LangOpts.OpenMPIsTargetDevice
+ ? diagIfOpenMPDeviceCode(Loc, DiagID, FD)
+ : diagIfOpenMPHostCode(Loc, DiagID, FD);
if (getLangOpts().CUDA)
return getLangOpts().CUDAIsDevice ? CUDADiagIfDeviceCode(Loc, DiagID)
: CUDADiagIfHostCode(Loc, DiagID);
@@ -1852,10 +1943,19 @@ Sema::SemaDiagnosticBuilder Sema::Diag(SourceLocation Loc, unsigned DiagID,
return DB;
}
-void Sema::checkDeviceDecl(ValueDecl *D, SourceLocation Loc) {
- if (isUnevaluatedContext())
+void Sema::checkTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D) {
+ if (isUnevaluatedContext() || Ty.isNull())
return;
+ // The original idea behind checkTypeSupport function is that unused
+ // declarations can be replaced with an array of bytes of the same size during
+ // codegen, such replacement doesn't seem to be possible for types without
+ // constant byte size like zero length arrays. So, do a deep check for SYCL.
+ if (D && LangOpts.SYCLIsDevice) {
+ llvm::DenseSet<QualType> Visited;
+ deepTypeCheckForSYCLDevice(Loc, Visited, D);
+ }
+
Decl *C = cast<Decl>(getCurLexicalContext());
// Memcpy operations for structs containing a member with unsupported type
@@ -1872,46 +1972,131 @@ void Sema::checkDeviceDecl(ValueDecl *D, SourceLocation Loc) {
// Try to associate errors with the lexical context, if that is a function, or
// the value declaration otherwise.
- FunctionDecl *FD =
- isa<FunctionDecl>(C) ? cast<FunctionDecl>(C) : dyn_cast<FunctionDecl>(D);
- auto CheckType = [&](QualType Ty) {
+ const FunctionDecl *FD = isa<FunctionDecl>(C)
+ ? cast<FunctionDecl>(C)
+ : dyn_cast_or_null<FunctionDecl>(D);
+
+ auto CheckDeviceType = [&](QualType Ty) {
if (Ty->isDependentType())
return;
- if (Ty->isExtIntType()) {
- if (!Context.getTargetInfo().hasExtIntType()) {
- targetDiag(Loc, diag::err_device_unsupported_type, FD)
- << D << false /*show bit size*/ << 0 /*bitsize*/
+ if (Ty->isBitIntType()) {
+ if (!Context.getTargetInfo().hasBitIntType()) {
+ PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type);
+ if (D)
+ PD << D;
+ else
+ PD << "expression";
+ targetDiag(Loc, PD, FD)
+ << false /*show bit size*/ << 0 /*bitsize*/ << false /*return*/
<< Ty << Context.getTargetInfo().getTriple().str();
}
return;
}
+ // Check if we are dealing with two 'long double' but with different
+ // semantics.
+ bool LongDoubleMismatched = false;
+ if (Ty->isRealFloatingType() && Context.getTypeSize(Ty) == 128) {
+ const llvm::fltSemantics &Sem = Context.getFloatTypeSemantics(Ty);
+ if ((&Sem != &llvm::APFloat::PPCDoubleDouble() &&
+ !Context.getTargetInfo().hasFloat128Type()) ||
+ (&Sem == &llvm::APFloat::PPCDoubleDouble() &&
+ !Context.getTargetInfo().hasIbm128Type()))
+ LongDoubleMismatched = true;
+ }
+
if ((Ty->isFloat16Type() && !Context.getTargetInfo().hasFloat16Type()) ||
- ((Ty->isFloat128Type() ||
- (Ty->isRealFloatingType() && Context.getTypeSize(Ty) == 128)) &&
- !Context.getTargetInfo().hasFloat128Type()) ||
+ (Ty->isFloat128Type() && !Context.getTargetInfo().hasFloat128Type()) ||
+ (Ty->isIbm128Type() && !Context.getTargetInfo().hasIbm128Type()) ||
(Ty->isIntegerType() && Context.getTypeSize(Ty) == 128 &&
- !Context.getTargetInfo().hasInt128Type())) {
- if (targetDiag(Loc, diag::err_device_unsupported_type, FD)
- << D << true /*show bit size*/
+ !Context.getTargetInfo().hasInt128Type()) ||
+ (Ty->isBFloat16Type() && !Context.getTargetInfo().hasBFloat16Type() &&
+ !LangOpts.CUDAIsDevice) ||
+ LongDoubleMismatched) {
+ PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type);
+ if (D)
+ PD << D;
+ else
+ PD << "expression";
+
+ if (targetDiag(Loc, PD, FD)
+ << true /*show bit size*/
<< static_cast<unsigned>(Context.getTypeSize(Ty)) << Ty
- << Context.getTargetInfo().getTriple().str())
- D->setInvalidDecl();
- targetDiag(D->getLocation(), diag::note_defined_here, FD) << D;
+ << false /*return*/ << Context.getTargetInfo().getTriple().str()) {
+ if (D)
+ D->setInvalidDecl();
+ }
+ if (D)
+ targetDiag(D->getLocation(), diag::note_defined_here, FD) << D;
}
};
- QualType Ty = D->getType();
- CheckType(Ty);
+ auto CheckType = [&](QualType Ty, bool IsRetTy = false) {
+ if (LangOpts.SYCLIsDevice ||
+ (LangOpts.OpenMP && LangOpts.OpenMPIsTargetDevice) ||
+ LangOpts.CUDAIsDevice)
+ CheckDeviceType(Ty);
+
+ QualType UnqualTy = Ty.getCanonicalType().getUnqualifiedType();
+ const TargetInfo &TI = Context.getTargetInfo();
+ if (!TI.hasLongDoubleType() && UnqualTy == Context.LongDoubleTy) {
+ PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type);
+ if (D)
+ PD << D;
+ else
+ PD << "expression";
+
+ if (Diag(Loc, PD, FD)
+ << false /*show bit size*/ << 0 << Ty << false /*return*/
+ << TI.getTriple().str()) {
+ if (D)
+ D->setInvalidDecl();
+ }
+ if (D)
+ targetDiag(D->getLocation(), diag::note_defined_here, FD) << D;
+ }
+
+ bool IsDouble = UnqualTy == Context.DoubleTy;
+ bool IsFloat = UnqualTy == Context.FloatTy;
+ if (IsRetTy && !TI.hasFPReturn() && (IsDouble || IsFloat)) {
+ PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type);
+ if (D)
+ PD << D;
+ else
+ PD << "expression";
+
+ if (Diag(Loc, PD, FD)
+ << false /*show bit size*/ << 0 << Ty << true /*return*/
+ << TI.getTriple().str()) {
+ if (D)
+ D->setInvalidDecl();
+ }
+ if (D)
+ targetDiag(D->getLocation(), diag::note_defined_here, FD) << D;
+ }
+
+ if (TI.hasRISCVVTypes() && Ty->isRVVSizelessBuiltinType())
+ checkRVVTypeSupport(Ty, Loc, D);
+
+ // Don't allow SVE types in functions without a SVE target.
+ if (Ty->isSVESizelessBuiltinType() && FD && FD->hasBody()) {
+ llvm::StringMap<bool> CallerFeatureMap;
+ Context.getFunctionFeatureMap(CallerFeatureMap, FD);
+ if (!Builtin::evaluateRequiredTargetFeatures("sve", CallerFeatureMap) &&
+ !Builtin::evaluateRequiredTargetFeatures("sme", CallerFeatureMap))
+ Diag(D->getLocation(), diag::err_sve_vector_in_non_sve_target) << Ty;
+ }
+ };
+ CheckType(Ty);
if (const auto *FPTy = dyn_cast<FunctionProtoType>(Ty)) {
for (const auto &ParamTy : FPTy->param_types())
CheckType(ParamTy);
- CheckType(FPTy->getReturnType());
+ CheckType(FPTy->getReturnType(), /*IsRetTy=*/true);
}
if (const auto *FNPTy = dyn_cast<FunctionNoProtoType>(Ty))
- CheckType(FNPTy->getReturnType());
+ CheckType(FNPTy->getReturnType(), /*IsRetTy=*/true);
}
/// Looks through the macro-expansion chain for the given
@@ -1980,11 +2165,13 @@ void Sema::PushFunctionScope() {
void Sema::PushBlockScope(Scope *BlockScope, BlockDecl *Block) {
FunctionScopes.push_back(new BlockScopeInfo(getDiagnostics(),
BlockScope, Block));
+ CapturingFunctionScopes++;
}
LambdaScopeInfo *Sema::PushLambdaScope() {
LambdaScopeInfo *const LSI = new LambdaScopeInfo(getDiagnostics());
FunctionScopes.push_back(LSI);
+ CapturingFunctionScopes++;
return LSI;
}
@@ -2011,7 +2198,7 @@ static void checkEscapingByref(VarDecl *VD, Sema &S) {
new (S.Context) DeclRefExpr(S.Context, VD, false, T, VK_LValue, Loc);
ExprResult Result;
auto IE = InitializedEntity::InitializeBlock(Loc, T);
- if (S.getLangOpts().CPlusPlus2b) {
+ if (S.getLangOpts().CPlusPlus23) {
auto *E = ImplicitCastExpr::Create(S.Context, T, CK_NoOp, VarRef, nullptr,
VK_XValue, FPOptionsOverride());
Result = S.PerformCopyInitialization(IE, SourceLocation(), E);
@@ -2106,6 +2293,8 @@ Sema::PopFunctionScopeInfo(const AnalysisBasedWarnings::Policy *WP,
void Sema::PoppedFunctionScopeDeleter::
operator()(sema::FunctionScopeInfo *Scope) const {
+ if (!Scope->isPlainFunction())
+ Self->CapturingFunctionScopes--;
// Stash the function scope for later reuse if it's for a normal function.
if (Scope->isPlainFunction() && !Self->CachedFunctionScope)
Self->CachedFunctionScope.reset(Scope);
@@ -2114,7 +2303,8 @@ operator()(sema::FunctionScopeInfo *Scope) const {
}
void Sema::PushCompoundScope(bool IsStmtExpr) {
- getCurFunction()->CompoundScopes.push_back(CompoundScopeInfo(IsStmtExpr));
+ getCurFunction()->CompoundScopes.push_back(
+ CompoundScopeInfo(IsStmtExpr, getCurFPFeatures()));
}
void Sema::PopCompoundScope() {
@@ -2180,7 +2370,8 @@ FunctionScopeInfo *Sema::getEnclosingFunction() const {
LambdaScopeInfo *Sema::getEnclosingLambda() const {
for (auto *Scope : llvm::reverse(FunctionScopes)) {
if (auto *LSI = dyn_cast<sema::LambdaScopeInfo>(Scope)) {
- if (LSI->Lambda && !LSI->Lambda->Encloses(CurContext)) {
+ if (LSI->Lambda && !LSI->Lambda->Encloses(CurContext) &&
+ LSI->AfterParameterList) {
// We have switched contexts due to template instantiation.
// FIXME: We should swap out the FunctionScopes during code synthesis
// so that we don't need to check for this.
@@ -2206,8 +2397,8 @@ LambdaScopeInfo *Sema::getCurLambda(bool IgnoreNonLambdaCapturingScope) {
return nullptr;
}
auto *CurLSI = dyn_cast<LambdaScopeInfo>(*I);
- if (CurLSI && CurLSI->Lambda &&
- !CurLSI->Lambda->Encloses(CurContext)) {
+ if (CurLSI && CurLSI->Lambda && CurLSI->CallOperator &&
+ !CurLSI->Lambda->Encloses(CurContext) && CurLSI->AfterParameterList) {
// We have switched contexts due to template instantiation.
assert(!CodeSynthesisContexts.empty());
return nullptr;
@@ -2232,7 +2423,7 @@ void Sema::ActOnComment(SourceRange Comment) {
SourceMgr.isInSystemHeader(Comment.getBegin()))
return;
RawComment RC(SourceMgr, Comment, LangOpts.CommentOpts, false);
- if (RC.isAlmostTrailingComment()) {
+ if (RC.isAlmostTrailingComment() || RC.hasUnsupportedSplice(SourceMgr)) {
SourceRange MagicMarkerRange(Comment.getBegin(),
Comment.getBegin().getLocWithOffset(3));
StringRef MagicMarkerText;
@@ -2243,6 +2434,11 @@ void Sema::ActOnComment(SourceRange Comment) {
case RawComment::RCK_OrdinaryC:
MagicMarkerText = "/**<";
break;
+ case RawComment::RCK_Invalid:
+ // FIXME: are there other scenarios that could produce an invalid
+ // raw comment here?
+ Diag(Comment.getBegin(), diag::warn_splice_in_doxygen_comment);
+ return;
default:
llvm_unreachable("if this is an almost Doxygen comment, "
"it should be ordinary");
@@ -2341,7 +2537,7 @@ bool Sema::tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
if (IsMemExpr && !E.isTypeDependent()) {
Sema::TentativeAnalysisScope Trap(*this);
ExprResult R = BuildCallToMemberFunction(nullptr, &E, SourceLocation(),
- None, SourceLocation());
+ std::nullopt, SourceLocation());
if (R.isUsable()) {
ZeroArgCallReturnTy = R.get()->getType();
return true;
@@ -2349,8 +2545,8 @@ bool Sema::tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
return false;
}
- if (const DeclRefExpr *DeclRef = dyn_cast<DeclRefExpr>(E.IgnoreParens())) {
- if (const FunctionDecl *Fun = dyn_cast<FunctionDecl>(DeclRef->getDecl())) {
+ if (const auto *DeclRef = dyn_cast<DeclRefExpr>(E.IgnoreParens())) {
+ if (const auto *Fun = dyn_cast<FunctionDecl>(DeclRef->getDecl())) {
if (Fun->getMinRequiredArguments() == 0)
ZeroArgCallReturnTy = Fun->getReturnType();
return true;
@@ -2367,8 +2563,7 @@ bool Sema::tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
if (!FunTy)
FunTy = ExprTy->getAs<FunctionType>();
- if (const FunctionProtoType *FPT =
- dyn_cast_or_null<FunctionProtoType>(FunTy)) {
+ if (const auto *FPT = dyn_cast_if_present<FunctionProtoType>(FunTy)) {
if (FPT->getNumParams() == 0)
ZeroArgCallReturnTy = FunTy->getReturnType();
return true;
@@ -2399,12 +2594,15 @@ static void noteOverloads(Sema &S, const UnresolvedSetImpl &Overloads,
continue;
}
- NamedDecl *Fn = (*It)->getUnderlyingDecl();
+ const NamedDecl *Fn = (*It)->getUnderlyingDecl();
// Don't print overloads for non-default multiversioned functions.
if (const auto *FD = Fn->getAsFunction()) {
if (FD->isMultiVersion() && FD->hasAttr<TargetAttr>() &&
!FD->getAttr<TargetAttr>()->isDefaultVersion())
continue;
+ if (FD->isMultiVersion() && FD->hasAttr<TargetVersionAttr>() &&
+ !FD->getAttr<TargetVersionAttr>()->isDefaultVersion())
+ continue;
}
S.Diag(Fn->getLocation(), diag::note_possible_target_of_call);
++ShownOverloads;
@@ -2426,7 +2624,7 @@ static void notePlausibleOverloads(Sema &S, SourceLocation Loc,
UnresolvedSet<2> PlausibleOverloads;
for (OverloadExpr::decls_iterator It = Overloads.begin(),
DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) {
- const FunctionDecl *OverloadDecl = cast<FunctionDecl>(*It);
+ const auto *OverloadDecl = cast<FunctionDecl>(*It);
QualType OverloadResultTy = OverloadDecl->getReturnType();
if (IsPlausibleResult(OverloadResultTy))
PlausibleOverloads.addDecl(It.getDecl());
@@ -2438,7 +2636,7 @@ static void notePlausibleOverloads(Sema &S, SourceLocation Loc,
/// putting parentheses after it. Notably, expressions with unary
/// operators can't be because the unary operator will start parsing
/// outside the call.
-static bool IsCallableWithAppend(Expr *E) {
+static bool IsCallableWithAppend(const Expr *E) {
E = E->IgnoreImplicit();
return (!isa<CStyleCastExpr>(E) &&
!isa<UnaryOperator>(E) &&
@@ -2466,32 +2664,36 @@ bool Sema::tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool (*IsPlausibleResult)(QualType)) {
SourceLocation Loc = E.get()->getExprLoc();
SourceRange Range = E.get()->getSourceRange();
-
- QualType ZeroArgCallTy;
UnresolvedSet<4> Overloads;
- if (tryExprAsCall(*E.get(), ZeroArgCallTy, Overloads) &&
- !ZeroArgCallTy.isNull() &&
- (!IsPlausibleResult || IsPlausibleResult(ZeroArgCallTy))) {
- // At this point, we know E is potentially callable with 0
- // arguments and that it returns something of a reasonable type,
- // so we can emit a fixit and carry on pretending that E was
- // actually a CallExpr.
- SourceLocation ParenInsertionLoc = getLocForEndOfToken(Range.getEnd());
- bool IsMV = IsCPUDispatchCPUSpecificMultiVersion(E.get());
- Diag(Loc, PD) << /*zero-arg*/ 1 << IsMV << Range
- << (IsCallableWithAppend(E.get())
- ? FixItHint::CreateInsertion(ParenInsertionLoc, "()")
- : FixItHint());
- if (!IsMV)
- notePlausibleOverloads(*this, Loc, Overloads, IsPlausibleResult);
-
- // FIXME: Try this before emitting the fixit, and suppress diagnostics
- // while doing so.
- E = BuildCallExpr(nullptr, E.get(), Range.getEnd(), None,
- Range.getEnd().getLocWithOffset(1));
- return true;
- }
+ // If this is a SFINAE context, don't try anything that might trigger ADL
+ // prematurely.
+ if (!isSFINAEContext()) {
+ QualType ZeroArgCallTy;
+ if (tryExprAsCall(*E.get(), ZeroArgCallTy, Overloads) &&
+ !ZeroArgCallTy.isNull() &&
+ (!IsPlausibleResult || IsPlausibleResult(ZeroArgCallTy))) {
+ // At this point, we know E is potentially callable with 0
+ // arguments and that it returns something of a reasonable type,
+ // so we can emit a fixit and carry on pretending that E was
+ // actually a CallExpr.
+ SourceLocation ParenInsertionLoc = getLocForEndOfToken(Range.getEnd());
+ bool IsMV = IsCPUDispatchCPUSpecificMultiVersion(E.get());
+ Diag(Loc, PD) << /*zero-arg*/ 1 << IsMV << Range
+ << (IsCallableWithAppend(E.get())
+ ? FixItHint::CreateInsertion(ParenInsertionLoc,
+ "()")
+ : FixItHint());
+ if (!IsMV)
+ notePlausibleOverloads(*this, Loc, Overloads, IsPlausibleResult);
+
+ // FIXME: Try this before emitting the fixit, and suppress diagnostics
+ // while doing so.
+ E = BuildCallExpr(nullptr, E.get(), Range.getEnd(), std::nullopt,
+ Range.getEnd().getLocWithOffset(1));
+ return true;
+ }
+ }
if (!ForceComplain) return false;
bool IsMV = IsCPUDispatchCPUSpecificMultiVersion(E.get());
@@ -2508,12 +2710,6 @@ IdentifierInfo *Sema::getSuperIdentifier() const {
return Ident_super;
}
-IdentifierInfo *Sema::getFloat128Identifier() const {
- if (!Ident___float128)
- Ident___float128 = &Context.Idents.get("__float128");
- return Ident___float128;
-}
-
void Sema::PushCapturedRegionScope(Scope *S, CapturedDecl *CD, RecordDecl *RD,
CapturedRegionKind K,
unsigned OpenMPCaptureLevel) {
@@ -2523,6 +2719,7 @@ void Sema::PushCapturedRegionScope(Scope *S, CapturedDecl *CD, RecordDecl *RD,
OpenMPCaptureLevel);
CSI->ReturnType = Context.VoidTy;
FunctionScopes.push_back(CSI);
+ CapturingFunctionScopes++;
}
CapturedRegionScopeInfo *Sema::getCurCapturedRegion() {
@@ -2536,3 +2733,38 @@ const llvm::MapVector<FieldDecl *, Sema::DeleteLocs> &
Sema::getMismatchingDeleteExpressions() const {
return DeleteExprs;
}
+
+Sema::FPFeaturesStateRAII::FPFeaturesStateRAII(Sema &S)
+ : S(S), OldFPFeaturesState(S.CurFPFeatures),
+ OldOverrides(S.FpPragmaStack.CurrentValue),
+ OldEvalMethod(S.PP.getCurrentFPEvalMethod()),
+ OldFPPragmaLocation(S.PP.getLastFPEvalPragmaLocation()) {}
+
+Sema::FPFeaturesStateRAII::~FPFeaturesStateRAII() {
+ S.CurFPFeatures = OldFPFeaturesState;
+ S.FpPragmaStack.CurrentValue = OldOverrides;
+ S.PP.setCurrentFPEvalMethod(OldFPPragmaLocation, OldEvalMethod);
+}
+
+bool Sema::isDeclaratorFunctionLike(Declarator &D) {
+ assert(D.getCXXScopeSpec().isSet() &&
+ "can only be called for qualified names");
+
+ auto LR = LookupResult(*this, D.getIdentifier(), D.getBeginLoc(),
+ LookupOrdinaryName, forRedeclarationInCurContext());
+ DeclContext *DC = computeDeclContext(D.getCXXScopeSpec(),
+ !D.getDeclSpec().isFriendSpecified());
+ if (!DC)
+ return false;
+
+ LookupQualifiedName(LR, DC);
+ bool Result = llvm::all_of(LR, [](Decl *Dcl) {
+ if (NamedDecl *ND = dyn_cast<NamedDecl>(Dcl)) {
+ ND = ND->getUnderlyingDecl();
+ return isa<FunctionDecl>(ND) || isa<FunctionTemplateDecl>(ND) ||
+ isa<UsingDecl>(ND);
+ }
+ return false;
+ });
+ return Result;
+}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaAccess.cpp b/contrib/llvm-project/clang/lib/Sema/SemaAccess.cpp
index 580305c1110b..4af3c0f30a8e 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaAccess.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaAccess.cpp
@@ -140,7 +140,7 @@ struct EffectiveContext {
bool includesClass(const CXXRecordDecl *R) const {
R = R->getCanonicalDecl();
- return llvm::find(Records, R) != Records.end();
+ return llvm::is_contained(Records, R);
}
/// Retrieves the innermost "useful" context. Can be null if we're
@@ -199,6 +199,16 @@ struct AccessTarget : public AccessedEntity {
: Target(S.Target), Has(S.Has) {
S.Target = nullptr;
}
+
+ // The move assignment operator is defined as deleted pending further
+ // motivation.
+ SavedInstanceContext &operator=(SavedInstanceContext &&) = delete;
+
+ // The copy constrcutor and copy assignment operator is defined as deleted
+ // pending further motivation.
+ SavedInstanceContext(const SavedInstanceContext &) = delete;
+ SavedInstanceContext &operator=(const SavedInstanceContext &) = delete;
+
~SavedInstanceContext() {
if (Target)
Target->HasInstanceContext = Has;
@@ -1493,6 +1503,8 @@ void Sema::HandleDelayedAccessCheck(DelayedDiagnostic &DD, Decl *D) {
} else if (TemplateDecl *TD = dyn_cast<TemplateDecl>(D)) {
if (isa<DeclContext>(TD->getTemplatedDecl()))
DC = cast<DeclContext>(TD->getTemplatedDecl());
+ } else if (auto *RD = dyn_cast<RequiresExprBodyDecl>(D)) {
+ DC = RD;
}
EffectiveContext EC(DC);
@@ -1649,7 +1661,8 @@ Sema::AccessResult Sema::CheckConstructorAccess(SourceLocation UseLoc,
<< Entity.getBaseSpecifier()->getType() << getSpecialMember(Constructor);
break;
- case InitializedEntity::EK_Member: {
+ case InitializedEntity::EK_Member:
+ case InitializedEntity::EK_ParenAggInitMember: {
const FieldDecl *Field = cast<FieldDecl>(Entity.getDecl());
PD = PDiag(diag::err_access_field_ctor);
PD << Field->getType() << getSpecialMember(Constructor);
@@ -1761,14 +1774,11 @@ Sema::CheckStructuredBindingMemberAccess(SourceLocation UseLoc,
return CheckAccess(*this, UseLoc, Entity);
}
-/// Checks access to an overloaded member operator, including
-/// conversion operators.
Sema::AccessResult Sema::CheckMemberOperatorAccess(SourceLocation OpLoc,
Expr *ObjectExpr,
- Expr *ArgExpr,
+ const SourceRange &Range,
DeclAccessPair Found) {
- if (!getLangOpts().AccessControl ||
- Found.getAccess() == AS_public)
+ if (!getLangOpts().AccessControl || Found.getAccess() == AS_public)
return AR_accessible;
const RecordType *RT = ObjectExpr->getType()->castAs<RecordType>();
@@ -1776,13 +1786,35 @@ Sema::AccessResult Sema::CheckMemberOperatorAccess(SourceLocation OpLoc,
AccessTarget Entity(Context, AccessTarget::Member, NamingClass, Found,
ObjectExpr->getType());
- Entity.setDiag(diag::err_access)
- << ObjectExpr->getSourceRange()
- << (ArgExpr ? ArgExpr->getSourceRange() : SourceRange());
+ Entity.setDiag(diag::err_access) << ObjectExpr->getSourceRange() << Range;
return CheckAccess(*this, OpLoc, Entity);
}
+/// Checks access to an overloaded member operator, including
+/// conversion operators.
+Sema::AccessResult Sema::CheckMemberOperatorAccess(SourceLocation OpLoc,
+ Expr *ObjectExpr,
+ Expr *ArgExpr,
+ DeclAccessPair Found) {
+ return CheckMemberOperatorAccess(
+ OpLoc, ObjectExpr, ArgExpr ? ArgExpr->getSourceRange() : SourceRange(),
+ Found);
+}
+
+Sema::AccessResult Sema::CheckMemberOperatorAccess(SourceLocation OpLoc,
+ Expr *ObjectExpr,
+ ArrayRef<Expr *> ArgExprs,
+ DeclAccessPair FoundDecl) {
+ SourceRange R;
+ if (!ArgExprs.empty()) {
+ R = SourceRange(ArgExprs.front()->getBeginLoc(),
+ ArgExprs.back()->getEndLoc());
+ }
+
+ return CheckMemberOperatorAccess(OpLoc, ObjectExpr, R, FoundDecl);
+}
+
/// Checks access to the target of a friend declaration.
Sema::AccessResult Sema::CheckFriendAccess(NamedDecl *target) {
assert(isa<CXXMethodDecl>(target->getAsFunction()));
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaAttr.cpp b/contrib/llvm-project/clang/lib/Sema/SemaAttr.cpp
index fe8f02f02368..0dcf42e48997 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaAttr.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaAttr.cpp
@@ -18,6 +18,7 @@
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/SemaInternal.h"
+#include <optional>
using namespace clang;
//===----------------------------------------------------------------------===//
@@ -34,6 +35,7 @@ Sema::PragmaStackSentinelRAII::PragmaStackSentinelRAII(Sema &S,
S.BSSSegStack.SentinelAction(PSK_Push, SlotLabel);
S.ConstSegStack.SentinelAction(PSK_Push, SlotLabel);
S.CodeSegStack.SentinelAction(PSK_Push, SlotLabel);
+ S.StrictGuardStackCheckStack.SentinelAction(PSK_Push, SlotLabel);
}
}
@@ -44,6 +46,7 @@ Sema::PragmaStackSentinelRAII::~PragmaStackSentinelRAII() {
S.BSSSegStack.SentinelAction(PSK_Pop, SlotLabel);
S.ConstSegStack.SentinelAction(PSK_Pop, SlotLabel);
S.CodeSegStack.SentinelAction(PSK_Pop, SlotLabel);
+ S.StrictGuardStackCheckStack.SentinelAction(PSK_Pop, SlotLabel);
}
}
@@ -220,8 +223,6 @@ void Sema::ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
switch (Kind) {
// For most of the platforms we support, native and natural are the same.
// With XL, native is the same as power, natural means something else.
- //
- // FIXME: This is not true on Darwin/PPC.
case POAK_Native:
case POAK_Power:
Action = Sema::PSK_Push_Set;
@@ -335,12 +336,12 @@ void Sema::ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
AlignPackInfo::Mode ModeVal = CurVal.getAlignMode();
if (Alignment) {
- Optional<llvm::APSInt> Val;
+ std::optional<llvm::APSInt> Val;
Val = Alignment->getIntegerConstantExpr(Context);
// pack(0) is like pack(), which just works out since that is what
// we use 0 for in PackAttr.
- if (Alignment->isTypeDependent() || Alignment->isValueDependent() || !Val ||
+ if (Alignment->isTypeDependent() || !Val ||
!(*Val == 0 || Val->isPowerOf2()) || Val->getZExtValue() > 16) {
Diag(PragmaLoc, diag::warn_pragma_pack_invalid_alignment);
return; // Ignore
@@ -384,6 +385,54 @@ void Sema::ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
AlignPackStack.Act(PragmaLoc, Action, SlotLabel, Info);
}
+bool Sema::ConstantFoldAttrArgs(const AttributeCommonInfo &CI,
+ MutableArrayRef<Expr *> Args) {
+ llvm::SmallVector<PartialDiagnosticAt, 8> Notes;
+ for (unsigned Idx = 0; Idx < Args.size(); Idx++) {
+ Expr *&E = Args.begin()[Idx];
+ assert(E && "error are handled before");
+ if (E->isValueDependent() || E->isTypeDependent())
+ continue;
+
+ // FIXME: Use DefaultFunctionArrayLValueConversion() in place of the logic
+ // that adds implicit casts here.
+ if (E->getType()->isArrayType())
+ E = ImpCastExprToType(E, Context.getPointerType(E->getType()),
+ clang::CK_ArrayToPointerDecay)
+ .get();
+ if (E->getType()->isFunctionType())
+ E = ImplicitCastExpr::Create(Context,
+ Context.getPointerType(E->getType()),
+ clang::CK_FunctionToPointerDecay, E, nullptr,
+ VK_PRValue, FPOptionsOverride());
+ if (E->isLValue())
+ E = ImplicitCastExpr::Create(Context, E->getType().getNonReferenceType(),
+ clang::CK_LValueToRValue, E, nullptr,
+ VK_PRValue, FPOptionsOverride());
+
+ Expr::EvalResult Eval;
+ Notes.clear();
+ Eval.Diag = &Notes;
+
+ bool Result = E->EvaluateAsConstantExpr(Eval, Context);
+
+ /// Result means the expression can be folded to a constant.
+ /// Note.empty() means the expression is a valid constant expression in the
+ /// current language mode.
+ if (!Result || !Notes.empty()) {
+ Diag(E->getBeginLoc(), diag::err_attribute_argument_n_type)
+ << CI << (Idx + 1) << AANT_ArgumentConstantExpr;
+ for (auto &Note : Notes)
+ Diag(Note.first, Note.second);
+ return false;
+ }
+ assert(Eval.Val.hasValue());
+ E = ConstantExpr::Create(Context, E, Eval.Val);
+ }
+
+ return true;
+}
+
void Sema::DiagnoseNonDefaultPragmaAlignPack(PragmaAlignPackDiagnoseKind Kind,
SourceLocation IncludeLoc) {
if (Kind == PragmaAlignPackDiagnoseKind::NonDefaultStateAtInclude) {
@@ -470,13 +519,41 @@ void Sema::ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
Consumer.HandleTopLevelDecl(DeclGroupRef(PDMD));
}
+void Sema::ActOnPragmaFPEvalMethod(SourceLocation Loc,
+ LangOptions::FPEvalMethodKind Value) {
+ FPOptionsOverride NewFPFeatures = CurFPFeatureOverrides();
+ switch (Value) {
+ default:
+ llvm_unreachable("invalid pragma eval_method kind");
+ case LangOptions::FEM_Source:
+ NewFPFeatures.setFPEvalMethodOverride(LangOptions::FEM_Source);
+ break;
+ case LangOptions::FEM_Double:
+ NewFPFeatures.setFPEvalMethodOverride(LangOptions::FEM_Double);
+ break;
+ case LangOptions::FEM_Extended:
+ NewFPFeatures.setFPEvalMethodOverride(LangOptions::FEM_Extended);
+ break;
+ }
+ if (getLangOpts().ApproxFunc)
+ Diag(Loc, diag::err_setting_eval_method_used_in_unsafe_context) << 0 << 0;
+ if (getLangOpts().AllowFPReassoc)
+ Diag(Loc, diag::err_setting_eval_method_used_in_unsafe_context) << 0 << 1;
+ if (getLangOpts().AllowRecip)
+ Diag(Loc, diag::err_setting_eval_method_used_in_unsafe_context) << 0 << 2;
+ FpPragmaStack.Act(Loc, PSK_Set, StringRef(), NewFPFeatures);
+ CurFPFeatures = NewFPFeatures.applyOverrides(getLangOpts());
+ PP.setCurrentFPEvalMethod(Loc, Value);
+}
+
void Sema::ActOnPragmaFloatControl(SourceLocation Loc,
PragmaMsStackAction Action,
PragmaFloatControlKind Value) {
FPOptionsOverride NewFPFeatures = CurFPFeatureOverrides();
if ((Action == PSK_Push_Set || Action == PSK_Push || Action == PSK_Pop) &&
- !(CurContext->isTranslationUnit()) && !CurContext->isNamespace()) {
- // Push and pop can only occur at file or namespace scope.
+ !CurContext->getRedeclContext()->isFileContext()) {
+ // Push and pop can only occur at file or namespace scope, or within a
+ // language linkage declaration.
Diag(Loc, diag::err_pragma_fc_pp_scope);
return;
}
@@ -488,7 +565,7 @@ void Sema::ActOnPragmaFloatControl(SourceLocation Loc,
FpPragmaStack.Act(Loc, Action, StringRef(), NewFPFeatures);
break;
case PFC_NoPrecise:
- if (CurFPFeatures.getFPExceptionMode() == LangOptions::FPE_Strict)
+ if (CurFPFeatures.getExceptionMode() == LangOptions::FPE_Strict)
Diag(Loc, diag::err_pragma_fc_noprecise_requires_noexcept);
else if (CurFPFeatures.getAllowFEnvAccess())
Diag(Loc, diag::err_pragma_fc_noprecise_requires_nofenv);
@@ -500,11 +577,11 @@ void Sema::ActOnPragmaFloatControl(SourceLocation Loc,
if (!isPreciseFPEnabled())
Diag(Loc, diag::err_pragma_fc_except_requires_precise);
else
- NewFPFeatures.setFPExceptionModeOverride(LangOptions::FPE_Strict);
+ NewFPFeatures.setSpecifiedExceptionModeOverride(LangOptions::FPE_Strict);
FpPragmaStack.Act(Loc, Action, StringRef(), NewFPFeatures);
break;
case PFC_NoExcept:
- NewFPFeatures.setFPExceptionModeOverride(LangOptions::FPE_Ignore);
+ NewFPFeatures.setSpecifiedExceptionModeOverride(LangOptions::FPE_Ignore);
FpPragmaStack.Act(Loc, Action, StringRef(), NewFPFeatures);
break;
case PFC_Push:
@@ -681,6 +758,17 @@ void Sema::ActOnPragmaMSSeg(SourceLocation PragmaLocation,
Stack->Act(PragmaLocation, Action, StackSlotLabel, SegmentName);
}
+/// Called on well formed \#pragma strict_gs_check().
+void Sema::ActOnPragmaMSStrictGuardStackCheck(SourceLocation PragmaLocation,
+ PragmaMsStackAction Action,
+ bool Value) {
+ if (Action & PSK_Pop && StrictGuardStackCheckStack.Stack.empty())
+ Diag(PragmaLocation, diag::warn_pragma_pop_failed) << "strict_gs_check"
+ << "stack empty";
+
+ StrictGuardStackCheckStack.Act(PragmaLocation, Action, StringRef(), Value);
+}
+
/// Called on well formed \#pragma bss_seg().
void Sema::ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName) {
@@ -696,6 +784,42 @@ void Sema::ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
CurInitSegLoc = PragmaLocation;
}
+void Sema::ActOnPragmaMSAllocText(
+ SourceLocation PragmaLocation, StringRef Section,
+ const SmallVector<std::tuple<IdentifierInfo *, SourceLocation>>
+ &Functions) {
+ if (!CurContext->getRedeclContext()->isFileContext()) {
+ Diag(PragmaLocation, diag::err_pragma_expected_file_scope) << "alloc_text";
+ return;
+ }
+
+ for (auto &Function : Functions) {
+ IdentifierInfo *II;
+ SourceLocation Loc;
+ std::tie(II, Loc) = Function;
+
+ DeclarationName DN(II);
+ NamedDecl *ND = LookupSingleName(TUScope, DN, Loc, LookupOrdinaryName);
+ if (!ND) {
+ Diag(Loc, diag::err_undeclared_use) << II->getName();
+ return;
+ }
+
+ auto *FD = dyn_cast<FunctionDecl>(ND->getCanonicalDecl());
+ if (!FD) {
+ Diag(Loc, diag::err_pragma_alloc_text_not_function);
+ return;
+ }
+
+ if (getLangOpts().CPlusPlus && !FD->isInExternCContext()) {
+ Diag(Loc, diag::err_pragma_alloc_text_c_linkage);
+ return;
+ }
+
+ FunctionToSectionMap[II->getName()] = std::make_tuple(Section, Loc);
+ }
+}
+
void Sema::ActOnPragmaUnused(const Token &IdTok, Scope *curScope,
SourceLocation PragmaLoc) {
@@ -721,7 +845,6 @@ void Sema::ActOnPragmaUnused(const Token &IdTok, Scope *curScope,
Diag(PragmaLoc, diag::warn_used_but_marked_unused) << Name;
VD->addAttr(UnusedAttr::CreateImplicit(Context, IdTok.getLocation(),
- AttributeCommonInfo::AS_Pragma,
UnusedAttr::GNU_unused));
}
@@ -737,18 +860,18 @@ void Sema::AddCFAuditedAttribute(Decl *D) {
return;
AttributeCommonInfo Info(Ident, SourceRange(Loc),
- AttributeCommonInfo::AS_Pragma);
+ AttributeCommonInfo::Form::Pragma());
D->addAttr(CFAuditedTransferAttr::CreateImplicit(Context, Info));
}
namespace {
-Optional<attr::SubjectMatchRule>
+std::optional<attr::SubjectMatchRule>
getParentAttrMatcherRule(attr::SubjectMatchRule Rule) {
using namespace attr;
switch (Rule) {
default:
- return None;
+ return std::nullopt;
#define ATTR_MATCH_RULE(Value, Spelling, IsAbstract)
#define ATTR_MATCH_SUB_RULE(Value, Spelling, IsAbstract, Parent, IsNegated) \
case Value: \
@@ -791,7 +914,7 @@ attrMatcherRuleListToString(ArrayRef<attr::SubjectMatchRule> Rules) {
OS << (I.index() == Rules.size() - 1 ? ", and " : ", ");
OS << "'" << attr::getSubjectMatchRuleSpelling(I.value()) << "'";
}
- return OS.str();
+ return Result;
}
} // end anonymous namespace
@@ -818,7 +941,7 @@ void Sema::ActOnPragmaAttributeAttribute(
RulesToFirstSpecifiedNegatedSubRule;
for (const auto &Rule : Rules) {
attr::SubjectMatchRule MatchRule = attr::SubjectMatchRule(Rule.first);
- Optional<attr::SubjectMatchRule> ParentRule =
+ std::optional<attr::SubjectMatchRule> ParentRule =
getParentAttrMatcherRule(MatchRule);
if (!ParentRule)
continue;
@@ -842,7 +965,7 @@ void Sema::ActOnPragmaAttributeAttribute(
bool IgnoreNegatedSubRules = false;
for (const auto &Rule : Rules) {
attr::SubjectMatchRule MatchRule = attr::SubjectMatchRule(Rule.first);
- Optional<attr::SubjectMatchRule> ParentRule =
+ std::optional<attr::SubjectMatchRule> ParentRule =
getParentAttrMatcherRule(MatchRule);
if (!ParentRule)
continue;
@@ -1020,6 +1143,25 @@ void Sema::ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc) {
OptimizeOffPragmaLocation = PragmaLoc;
}
+void Sema::ActOnPragmaMSOptimize(SourceLocation Loc, bool IsOn) {
+ if (!CurContext->getRedeclContext()->isFileContext()) {
+ Diag(Loc, diag::err_pragma_expected_file_scope) << "optimize";
+ return;
+ }
+
+ MSPragmaOptimizeIsOn = IsOn;
+}
+
+void Sema::ActOnPragmaMSFunction(
+ SourceLocation Loc, const llvm::SmallVectorImpl<StringRef> &NoBuiltins) {
+ if (!CurContext->getRedeclContext()->isFileContext()) {
+ Diag(Loc, diag::err_pragma_expected_file_scope) << "function";
+ return;
+ }
+
+ MSFunctionNoBuiltins.insert(NoBuiltins.begin(), NoBuiltins.end());
+}
+
void Sema::AddRangeBasedOptnone(FunctionDecl *FD) {
// In the future, check other pragmas if they're implemented (e.g. pragma
// optimize 0 will probably map to this functionality too).
@@ -1027,6 +1169,29 @@ void Sema::AddRangeBasedOptnone(FunctionDecl *FD) {
AddOptnoneAttributeIfNoConflicts(FD, OptimizeOffPragmaLocation);
}
+void Sema::AddSectionMSAllocText(FunctionDecl *FD) {
+ if (!FD->getIdentifier())
+ return;
+
+ StringRef Name = FD->getName();
+ auto It = FunctionToSectionMap.find(Name);
+ if (It != FunctionToSectionMap.end()) {
+ StringRef Section;
+ SourceLocation Loc;
+ std::tie(Section, Loc) = It->second;
+
+ if (!FD->hasAttr<SectionAttr>())
+ FD->addAttr(SectionAttr::CreateImplicit(Context, Section));
+ }
+}
+
+void Sema::ModifyFnAttributesMSPragmaOptimize(FunctionDecl *FD) {
+ // Don't modify the function attributes if it's "on". "on" resets the
+ // optimizations to the ones listed on the command line
+ if (!MSPragmaOptimizeIsOn)
+ AddOptnoneAttributeIfNoConflicts(FD, FD->getBeginLoc());
+}
+
void Sema::AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD,
SourceLocation Loc) {
// Don't add a conflicting attribute. No diagnostic is needed.
@@ -1041,6 +1206,13 @@ void Sema::AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD,
FD->addAttr(NoInlineAttr::CreateImplicit(Context, Loc));
}
+void Sema::AddImplicitMSFunctionNoBuiltinAttr(FunctionDecl *FD) {
+ SmallVector<StringRef> V(MSFunctionNoBuiltins.begin(),
+ MSFunctionNoBuiltins.end());
+ if (!MSFunctionNoBuiltins.empty())
+ FD->addAttr(NoBuiltinAttr::CreateImplicit(Context, V.data(), V.size()));
+}
+
typedef std::vector<std::pair<unsigned, SourceLocation> > VisStack;
enum : unsigned { NoVisibility = ~0U };
@@ -1113,23 +1285,45 @@ void Sema::ActOnPragmaFPContract(SourceLocation Loc,
CurFPFeatures = NewFPFeatures.applyOverrides(getLangOpts());
}
-void Sema::ActOnPragmaFPReassociate(SourceLocation Loc, bool IsEnabled) {
+void Sema::ActOnPragmaFPValueChangingOption(SourceLocation Loc,
+ PragmaFPKind Kind, bool IsEnabled) {
+ if (IsEnabled) {
+ // For value unsafe context, combining this pragma with eval method
+ // setting is not recommended. See comment in function FixupInvocation#506.
+ int Reason = -1;
+ if (getLangOpts().getFPEvalMethod() != LangOptions::FEM_UnsetOnCommandLine)
+ // Eval method set using the option 'ffp-eval-method'.
+ Reason = 1;
+ if (PP.getLastFPEvalPragmaLocation().isValid())
+ // Eval method set using the '#pragma clang fp eval_method'.
+ // We could have both an option and a pragma used to the set the eval
+ // method. The pragma overrides the option in the command line. The Reason
+ // of the diagnostic is overriden too.
+ Reason = 0;
+ if (Reason != -1)
+ Diag(Loc, diag::err_setting_eval_method_used_in_unsafe_context)
+ << Reason << (Kind == PFK_Reassociate ? 4 : 5);
+ }
+
FPOptionsOverride NewFPFeatures = CurFPFeatureOverrides();
- NewFPFeatures.setAllowFPReassociateOverride(IsEnabled);
+ switch (Kind) {
+ case PFK_Reassociate:
+ NewFPFeatures.setAllowFPReassociateOverride(IsEnabled);
+ break;
+ case PFK_Reciprocal:
+ NewFPFeatures.setAllowReciprocalOverride(IsEnabled);
+ break;
+ default:
+ llvm_unreachable("unhandled value changing pragma fp");
+ }
+
FpPragmaStack.Act(Loc, PSK_Set, StringRef(), NewFPFeatures);
CurFPFeatures = NewFPFeatures.applyOverrides(getLangOpts());
}
-void Sema::setRoundingMode(SourceLocation Loc, llvm::RoundingMode FPR) {
- // C2x: 7.6.2p3 If the FE_DYNAMIC mode is specified and FENV_ACCESS is "off",
- // the translator may assume that the default rounding mode is in effect.
- if (FPR == llvm::RoundingMode::Dynamic &&
- !CurFPFeatures.getAllowFEnvAccess() &&
- CurFPFeatures.getFPExceptionMode() == LangOptions::FPE_Ignore)
- FPR = llvm::RoundingMode::NearestTiesToEven;
-
+void Sema::ActOnPragmaFEnvRound(SourceLocation Loc, llvm::RoundingMode FPR) {
FPOptionsOverride NewFPFeatures = CurFPFeatureOverrides();
- NewFPFeatures.setRoundingModeOverride(FPR);
+ NewFPFeatures.setConstRoundingModeOverride(FPR);
FpPragmaStack.Act(Loc, PSK_Set, StringRef(), NewFPFeatures);
CurFPFeatures = NewFPFeatures.applyOverrides(getLangOpts());
}
@@ -1137,14 +1331,13 @@ void Sema::setRoundingMode(SourceLocation Loc, llvm::RoundingMode FPR) {
void Sema::setExceptionMode(SourceLocation Loc,
LangOptions::FPExceptionModeKind FPE) {
FPOptionsOverride NewFPFeatures = CurFPFeatureOverrides();
- NewFPFeatures.setFPExceptionModeOverride(FPE);
+ NewFPFeatures.setSpecifiedExceptionModeOverride(FPE);
FpPragmaStack.Act(Loc, PSK_Set, StringRef(), NewFPFeatures);
CurFPFeatures = NewFPFeatures.applyOverrides(getLangOpts());
}
void Sema::ActOnPragmaFEnvAccess(SourceLocation Loc, bool IsEnabled) {
FPOptionsOverride NewFPFeatures = CurFPFeatureOverrides();
- auto LO = getLangOpts();
if (IsEnabled) {
// Verify Microsoft restriction:
// You can't enable fenv_access unless precise semantics are enabled.
@@ -1152,16 +1345,19 @@ void Sema::ActOnPragmaFEnvAccess(SourceLocation Loc, bool IsEnabled) {
// pragma, or by using the /fp:precise or /fp:strict compiler options
if (!isPreciseFPEnabled())
Diag(Loc, diag::err_pragma_fenv_requires_precise);
- NewFPFeatures.setAllowFEnvAccessOverride(true);
- // Enabling FENV access sets the RoundingMode to Dynamic.
- // and ExceptionBehavior to Strict
- NewFPFeatures.setRoundingModeOverride(llvm::RoundingMode::Dynamic);
- NewFPFeatures.setFPExceptionModeOverride(LangOptions::FPE_Strict);
- } else {
- NewFPFeatures.setAllowFEnvAccessOverride(false);
}
+ NewFPFeatures.setAllowFEnvAccessOverride(IsEnabled);
+ NewFPFeatures.setRoundingMathOverride(IsEnabled);
+ FpPragmaStack.Act(Loc, PSK_Set, StringRef(), NewFPFeatures);
+ CurFPFeatures = NewFPFeatures.applyOverrides(getLangOpts());
+}
+
+void Sema::ActOnPragmaCXLimitedRange(SourceLocation Loc,
+ LangOptions::ComplexRangeKind Range) {
+ FPOptionsOverride NewFPFeatures = CurFPFeatureOverrides();
+ NewFPFeatures.setComplexRangeOverride(Range);
FpPragmaStack.Act(Loc, PSK_Set, StringRef(), NewFPFeatures);
- CurFPFeatures = NewFPFeatures.applyOverrides(LO);
+ CurFPFeatures = NewFPFeatures.applyOverrides(getLangOpts());
}
void Sema::ActOnPragmaFPExceptions(SourceLocation Loc,
@@ -1212,8 +1408,9 @@ void Sema::PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc) {
}
template <typename Ty>
-static bool checkCommonAttributeFeatures(Sema& S, const Ty *Node,
- const ParsedAttr& A) {
+static bool checkCommonAttributeFeatures(Sema &S, const Ty *Node,
+ const ParsedAttr &A,
+ bool SkipArgCountCheck) {
// Several attributes carry different semantics than the parsing requires, so
// those are opted out of the common argument checks.
//
@@ -1239,26 +1436,30 @@ static bool checkCommonAttributeFeatures(Sema& S, const Ty *Node,
if (A.hasCustomParsing())
return false;
- if (A.getMinArgs() == A.getMaxArgs()) {
- // If there are no optional arguments, then checking for the argument count
- // is trivial.
- if (!A.checkExactlyNumArgs(S, A.getMinArgs()))
- return true;
- } else {
- // There are optional arguments, so checking is slightly more involved.
- if (A.getMinArgs() && !A.checkAtLeastNumArgs(S, A.getMinArgs()))
- return true;
- else if (!A.hasVariadicArg() && A.getMaxArgs() &&
- !A.checkAtMostNumArgs(S, A.getMaxArgs()))
- return true;
+ if (!SkipArgCountCheck) {
+ if (A.getMinArgs() == A.getMaxArgs()) {
+ // If there are no optional arguments, then checking for the argument
+ // count is trivial.
+ if (!A.checkExactlyNumArgs(S, A.getMinArgs()))
+ return true;
+ } else {
+ // There are optional arguments, so checking is slightly more involved.
+ if (A.getMinArgs() && !A.checkAtLeastNumArgs(S, A.getMinArgs()))
+ return true;
+ else if (!A.hasVariadicArg() && A.getMaxArgs() &&
+ !A.checkAtMostNumArgs(S, A.getMaxArgs()))
+ return true;
+ }
}
return false;
}
-bool Sema::checkCommonAttributeFeatures(const Decl *D, const ParsedAttr &A) {
- return ::checkCommonAttributeFeatures(*this, D, A);
+bool Sema::checkCommonAttributeFeatures(const Decl *D, const ParsedAttr &A,
+ bool SkipArgCountCheck) {
+ return ::checkCommonAttributeFeatures(*this, D, A, SkipArgCountCheck);
}
-bool Sema::checkCommonAttributeFeatures(const Stmt *S, const ParsedAttr &A) {
- return ::checkCommonAttributeFeatures(*this, S, A);
+bool Sema::checkCommonAttributeFeatures(const Stmt *S, const ParsedAttr &A,
+ bool SkipArgCountCheck) {
+ return ::checkCommonAttributeFeatures(*this, S, A, SkipArgCountCheck);
}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaAvailability.cpp b/contrib/llvm-project/clang/lib/Sema/SemaAvailability.cpp
index bb704b1066cf..846a31a79673 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaAvailability.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaAvailability.cpp
@@ -19,6 +19,7 @@
#include "clang/Sema/DelayedDiagnostic.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/Sema.h"
+#include <optional>
using namespace clang;
using namespace sema;
@@ -57,7 +58,7 @@ static const AvailabilityAttr *getAttrForPlatform(ASTContext &Context,
/// \param D The declaration to check.
/// \param Message If non-null, this will be populated with the message from
/// the availability attribute that is selected.
-/// \param ClassReceiver If we're checking the the method of a class message
+/// \param ClassReceiver If we're checking the method of a class message
/// send, the class. Otherwise nullptr.
static std::pair<AvailabilityResult, const NamedDecl *>
ShouldDiagnoseAvailabilityOfDecl(Sema &S, const NamedDecl *D,
@@ -122,6 +123,18 @@ ShouldDiagnoseAvailabilityInContext(Sema &S, AvailabilityResult K,
const NamedDecl *OffendingDecl) {
assert(K != AR_Available && "Expected an unavailable declaration here!");
+ // If this was defined using CF_OPTIONS, etc. then ignore the diagnostic.
+ auto DeclLoc = Ctx->getBeginLoc();
+ // This is only a problem in Foundation's C++ implementation for CF_OPTIONS.
+ if (DeclLoc.isMacroID() && S.getLangOpts().CPlusPlus &&
+ isa<TypedefDecl>(OffendingDecl)) {
+ StringRef MacroName = S.getPreprocessor().getImmediateMacroName(DeclLoc);
+ if (MacroName == "CF_OPTIONS" || MacroName == "OBJC_OPTIONS" ||
+ MacroName == "SWIFT_OPTIONS" || MacroName == "NS_OPTIONS") {
+ return false;
+ }
+ }
+
// Checks if we should emit the availability diagnostic in the context of C.
auto CheckContext = [&](const Decl *C) {
if (K == AR_NotYetIntroduced) {
@@ -192,6 +205,9 @@ shouldDiagnoseAvailabilityByDefault(const ASTContext &Context,
case llvm::Triple::MacOSX:
ForceAvailabilityFromVersion = VersionTuple(/*Major=*/10, /*Minor=*/13);
break;
+ case llvm::Triple::ShaderModel:
+ // Always enable availability diagnostics for shader models.
+ return true;
default:
// New targets should always warn about availability.
return Triple.getVendor() == llvm::Triple::Apple;
@@ -241,16 +257,16 @@ struct AttributeInsertion {
/// attribute argument.
/// \param SlotNames The vector that will be populated with slot names. In case
/// of unsuccessful parsing can contain invalid data.
-/// \returns A number of method parameters if parsing was successful, None
-/// otherwise.
-static Optional<unsigned>
+/// \returns A number of method parameters if parsing was successful,
+/// std::nullopt otherwise.
+static std::optional<unsigned>
tryParseObjCMethodName(StringRef Name, SmallVectorImpl<StringRef> &SlotNames,
const LangOptions &LangOpts) {
// Accept replacements starting with - or + as valid ObjC method names.
if (!Name.empty() && (Name.front() == '-' || Name.front() == '+'))
Name = Name.drop_front(1);
if (Name.empty())
- return None;
+ return std::nullopt;
Name.split(SlotNames, ':');
unsigned NumParams;
if (Name.back() == ':') {
@@ -260,7 +276,7 @@ tryParseObjCMethodName(StringRef Name, SmallVectorImpl<StringRef> &SlotNames,
} else {
if (SlotNames.size() != 1)
// Not a valid method name, just a colon-separated string.
- return None;
+ return std::nullopt;
NumParams = 0;
}
// Verify all slot names are valid.
@@ -268,29 +284,29 @@ tryParseObjCMethodName(StringRef Name, SmallVectorImpl<StringRef> &SlotNames,
for (StringRef S : SlotNames) {
if (S.empty())
continue;
- if (!isValidIdentifier(S, AllowDollar))
- return None;
+ if (!isValidAsciiIdentifier(S, AllowDollar))
+ return std::nullopt;
}
return NumParams;
}
/// Returns a source location in which it's appropriate to insert a new
/// attribute for the given declaration \D.
-static Optional<AttributeInsertion>
+static std::optional<AttributeInsertion>
createAttributeInsertion(const NamedDecl *D, const SourceManager &SM,
const LangOptions &LangOpts) {
if (isa<ObjCPropertyDecl>(D))
return AttributeInsertion::createInsertionAfter(D);
if (const auto *MD = dyn_cast<ObjCMethodDecl>(D)) {
if (MD->hasBody())
- return None;
+ return std::nullopt;
return AttributeInsertion::createInsertionAfter(D);
}
if (const auto *TD = dyn_cast<TagDecl>(D)) {
SourceLocation Loc =
Lexer::getLocForEndOfToken(TD->getInnerLocStart(), 0, SM, LangOpts);
if (Loc.isInvalid())
- return None;
+ return std::nullopt;
// Insert after the 'struct'/whatever keyword.
return AttributeInsertion::createInsertionAfter(Loc);
}
@@ -397,7 +413,7 @@ static void DoEmitAvailabilityWarning(Sema &S, AvailabilityResult K,
return;
if (!S.getPreprocessor().isMacroDefined("API_AVAILABLE"))
return;
- Optional<AttributeInsertion> Insertion = createAttributeInsertion(
+ std::optional<AttributeInsertion> Insertion = createAttributeInsertion(
Enclosing, S.getSourceManager(), S.getLangOpts());
if (!Insertion)
return;
@@ -499,9 +515,9 @@ static void DoEmitAvailabilityWarning(Sema &S, AvailabilityResult K,
if (const auto *MethodDecl = dyn_cast<ObjCMethodDecl>(ReferringDecl)) {
Selector Sel = MethodDecl->getSelector();
SmallVector<StringRef, 12> SelectorSlotNames;
- Optional<unsigned> NumParams = tryParseObjCMethodName(
+ std::optional<unsigned> NumParams = tryParseObjCMethodName(
Replacement, SelectorSlotNames, S.getLangOpts());
- if (NumParams && NumParams.getValue() == Sel.getNumArgs()) {
+ if (NumParams && *NumParams == Sel.getNumArgs()) {
assert(SelectorSlotNames.size() == Locs.size());
for (unsigned I = 0; I < Locs.size(); ++I) {
if (!Sel.getNameForSlot(I).empty()) {
@@ -520,6 +536,29 @@ static void DoEmitAvailabilityWarning(Sema &S, AvailabilityResult K,
}
}
+ // We emit deprecation warning for deprecated specializations
+ // when their instantiation stacks originate outside
+ // of a system header, even if the diagnostics is suppresed at the
+ // point of definition.
+ SourceLocation InstantiationLoc =
+ S.getTopMostPointOfInstantiation(ReferringDecl);
+ bool ShouldAllowWarningInSystemHeader =
+ InstantiationLoc != Loc &&
+ !S.getSourceManager().isInSystemHeader(InstantiationLoc);
+ struct AllowWarningInSystemHeaders {
+ AllowWarningInSystemHeaders(DiagnosticsEngine &E,
+ bool AllowWarningInSystemHeaders)
+ : Engine(E), Prev(E.getSuppressSystemWarnings()) {
+ E.setSuppressSystemWarnings(!AllowWarningInSystemHeaders);
+ }
+ ~AllowWarningInSystemHeaders() { Engine.setSuppressSystemWarnings(Prev); }
+
+ private:
+ DiagnosticsEngine &Engine;
+ bool Prev;
+ } SystemWarningOverrideRAII(S.getDiagnostics(),
+ ShouldAllowWarningInSystemHeader);
+
if (!Message.empty()) {
S.Diag(Loc, diag_message) << ReferringDecl << Message << FixIts;
if (ObjCProperty)
@@ -630,8 +669,7 @@ public:
const CompoundStmt *Scope) {
LastDeclUSEFinder Visitor;
Visitor.D = D;
- for (auto I = Scope->body_rbegin(), E = Scope->body_rend(); I != E; ++I) {
- const Stmt *S = *I;
+ for (const Stmt *S : llvm::reverse(Scope->body())) {
if (!Visitor.TraverseStmt(const_cast<Stmt *>(S)))
return S;
}
@@ -896,6 +934,11 @@ void Sema::DiagnoseUnguardedAvailabilityViolations(Decl *D) {
return;
Body = FD->getBody();
+
+ if (auto *CD = dyn_cast<CXXConstructorDecl>(FD))
+ for (const CXXCtorInitializer *CI : CD->inits())
+ DiagnoseUnguardedAvailability(*this, D).IssueDiagnostics(CI->getInit());
+
} else if (auto *MD = dyn_cast<ObjCMethodDecl>(D))
Body = MD->getBody();
else if (auto *BD = dyn_cast<BlockDecl>(D))
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaCUDA.cpp b/contrib/llvm-project/clang/lib/Sema/SemaCUDA.cpp
index 75364c10c154..6a66ecf6f94c 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaCUDA.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaCUDA.cpp
@@ -22,8 +22,8 @@
#include "clang/Sema/SemaDiagnostic.h"
#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/Template.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallVector.h"
+#include <optional>
using namespace clang;
template <typename AttrT> static bool hasExplicitAttr(const VarDecl *D) {
@@ -105,19 +105,37 @@ Sema::IdentifyCUDATarget(const ParsedAttributesView &Attrs) {
}
template <typename A>
-static bool hasAttr(const FunctionDecl *D, bool IgnoreImplicitAttr) {
+static bool hasAttr(const Decl *D, bool IgnoreImplicitAttr) {
return D->hasAttrs() && llvm::any_of(D->getAttrs(), [&](Attr *Attribute) {
return isa<A>(Attribute) &&
!(IgnoreImplicitAttr && Attribute->isImplicit());
});
}
+Sema::CUDATargetContextRAII::CUDATargetContextRAII(Sema &S_,
+ CUDATargetContextKind K,
+ Decl *D)
+ : S(S_) {
+ SavedCtx = S.CurCUDATargetCtx;
+ assert(K == CTCK_InitGlobalVar);
+ auto *VD = dyn_cast_or_null<VarDecl>(D);
+ if (VD && VD->hasGlobalStorage() && !VD->isStaticLocal()) {
+ auto Target = CFT_Host;
+ if ((hasAttr<CUDADeviceAttr>(VD, /*IgnoreImplicit=*/true) &&
+ !hasAttr<CUDAHostAttr>(VD, /*IgnoreImplicit=*/true)) ||
+ hasAttr<CUDASharedAttr>(VD, /*IgnoreImplicit=*/true) ||
+ hasAttr<CUDAConstantAttr>(VD, /*IgnoreImplicit=*/true))
+ Target = CFT_Device;
+ S.CurCUDATargetCtx = {Target, K, VD};
+ }
+}
+
/// IdentifyCUDATarget - Determine the CUDA compilation target for this function
Sema::CUDAFunctionTarget Sema::IdentifyCUDATarget(const FunctionDecl *D,
bool IgnoreImplicitHDAttr) {
- // Code that lives outside a function is run on the host.
+ // Code that lives outside a function gets the target from CurCUDATargetCtx.
if (D == nullptr)
- return CFT_Host;
+ return CurCUDATargetCtx.Target;
if (D->hasAttr<CUDAInvalidTargetAttr>())
return CFT_InvalidTarget;
@@ -145,9 +163,11 @@ Sema::CUDAFunctionTarget Sema::IdentifyCUDATarget(const FunctionDecl *D,
Sema::CUDAVariableTarget Sema::IdentifyCUDATarget(const VarDecl *Var) {
if (Var->hasAttr<HIPManagedAttr>())
return CVT_Unified;
- if (Var->isConstexpr() && !hasExplicitAttr<CUDAConstantAttr>(Var))
- return CVT_Both;
- if (Var->getType().isConstQualified() && Var->hasAttr<CUDAConstantAttr>() &&
+ // Only constexpr and const variabless with implicit constant attribute
+ // are emitted on both sides. Such variables are promoted to device side
+ // only if they have static constant intializers on device side.
+ if ((Var->isConstexpr() || Var->getType().isConstQualified()) &&
+ Var->hasAttr<CUDAConstantAttr>() &&
!hasExplicitAttr<CUDAConstantAttr>(Var))
return CVT_Both;
if (Var->hasAttr<CUDADeviceAttr>() || Var->hasAttr<CUDAConstantAttr>() ||
@@ -205,6 +225,15 @@ Sema::CUDAFunctionPreference
Sema::IdentifyCUDAPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
assert(Callee && "Callee must be valid.");
+
+ // Treat ctor/dtor as host device function in device var initializer to allow
+ // trivial ctor/dtor without device attr to be used. Non-trivial ctor/dtor
+ // will be diagnosed by checkAllowedCUDAInitializer.
+ if (Caller == nullptr && CurCUDATargetCtx.Kind == CTCK_InitGlobalVar &&
+ CurCUDATargetCtx.Target == CFT_Device &&
+ (isa<CXXConstructorDecl>(Callee) || isa<CXXDestructorDecl>(Callee)))
+ return CFP_HostDevice;
+
CUDAFunctionTarget CallerTarget = IdentifyCUDATarget(Caller);
CUDAFunctionTarget CalleeTarget = IdentifyCUDATarget(Callee);
@@ -229,6 +258,15 @@ Sema::IdentifyCUDAPreference(const FunctionDecl *Caller,
(CallerTarget == CFT_Global && CalleeTarget == CFT_Device))
return CFP_Native;
+ // HipStdPar mode is special, in that assessing whether a device side call to
+ // a host target is deferred to a subsequent pass, and cannot unambiguously be
+ // adjudicated in the AST, hence we optimistically allow them to pass here.
+ if (getLangOpts().HIPStdPar &&
+ (CallerTarget == CFT_Global || CallerTarget == CFT_Device ||
+ CallerTarget == CFT_HostDevice) &&
+ CalleeTarget == CFT_Host)
+ return CFP_HostDevice;
+
// (d) HostDevice behavior depends on compilation mode.
if (CallerTarget == CFT_HostDevice) {
// It's OK to call a compilation-mode matching function from an HD one.
@@ -336,7 +374,7 @@ bool Sema::inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
if (!InClass || HasExplicitAttr)
return false;
- llvm::Optional<CUDAFunctionTarget> InferredTarget;
+ std::optional<CUDAFunctionTarget> InferredTarget;
// We're going to invoke special member lookup; mark that these special
// members are called from this one, and not from its caller.
@@ -353,9 +391,7 @@ bool Sema::inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
}
if (!ClassDecl->isAbstract()) {
- for (const auto &VB : ClassDecl->vbases()) {
- Bases.push_back(&VB);
- }
+ llvm::append_range(Bases, llvm::make_pointer_range(ClassDecl->vbases()));
}
for (const auto *B : Bases) {
@@ -377,17 +413,16 @@ bool Sema::inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
continue;
CUDAFunctionTarget BaseMethodTarget = IdentifyCUDATarget(SMOR.getMethod());
- if (!InferredTarget.hasValue()) {
+ if (!InferredTarget) {
InferredTarget = BaseMethodTarget;
} else {
bool ResolutionError = resolveCalleeCUDATargetConflict(
- InferredTarget.getValue(), BaseMethodTarget,
- InferredTarget.getPointer());
+ *InferredTarget, BaseMethodTarget, &*InferredTarget);
if (ResolutionError) {
if (Diagnose) {
Diag(ClassDecl->getLocation(),
diag::note_implicit_member_target_infer_collision)
- << (unsigned)CSM << InferredTarget.getValue() << BaseMethodTarget;
+ << (unsigned)CSM << *InferredTarget << BaseMethodTarget;
}
MemberDecl->addAttr(CUDAInvalidTargetAttr::CreateImplicit(Context));
return true;
@@ -421,18 +456,16 @@ bool Sema::inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CUDAFunctionTarget FieldMethodTarget =
IdentifyCUDATarget(SMOR.getMethod());
- if (!InferredTarget.hasValue()) {
+ if (!InferredTarget) {
InferredTarget = FieldMethodTarget;
} else {
bool ResolutionError = resolveCalleeCUDATargetConflict(
- InferredTarget.getValue(), FieldMethodTarget,
- InferredTarget.getPointer());
+ *InferredTarget, FieldMethodTarget, &*InferredTarget);
if (ResolutionError) {
if (Diagnose) {
Diag(ClassDecl->getLocation(),
diag::note_implicit_member_target_infer_collision)
- << (unsigned)CSM << InferredTarget.getValue()
- << FieldMethodTarget;
+ << (unsigned)CSM << *InferredTarget << FieldMethodTarget;
}
MemberDecl->addAttr(CUDAInvalidTargetAttr::CreateImplicit(Context));
return true;
@@ -444,10 +477,10 @@ bool Sema::inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
// If no target was inferred, mark this member as __host__ __device__;
// it's the least restrictive option that can be invoked from any target.
bool NeedsH = true, NeedsD = true;
- if (InferredTarget.hasValue()) {
- if (InferredTarget.getValue() == CFT_Device)
+ if (InferredTarget) {
+ if (*InferredTarget == CFT_Device)
NeedsH = false;
- else if (InferredTarget.getValue() == CFT_Host)
+ else if (*InferredTarget == CFT_Host)
NeedsD = false;
}
@@ -590,6 +623,8 @@ bool HasAllowedCUDADeviceStaticInitializer(Sema &S, VarDecl *VD,
};
auto IsConstantInit = [&](const Expr *Init) {
assert(Init);
+ ASTContext::CUDAConstantEvalContextRAII EvalCtx(S.Context,
+ /*NoWronSidedVars=*/true);
return Init->isConstantInitializer(S.Context,
VD->getType()->isReferenceType());
};
@@ -606,6 +641,13 @@ bool HasAllowedCUDADeviceStaticInitializer(Sema &S, VarDecl *VD,
} // namespace
void Sema::checkAllowedCUDAInitializer(VarDecl *VD) {
+ // Return early if VD is inside a non-instantiated template function since
+ // the implicit constructor is not defined yet.
+ if (const FunctionDecl *FD =
+ dyn_cast_or_null<FunctionDecl>(VD->getDeclContext()))
+ if (FD->isDependentContext())
+ return;
+
// Do not check dependent variables since the ctor/dtor/initializer are not
// determined. Do it after instantiation.
if (VD->isInvalidDecl() || !VD->hasInit() || !VD->hasGlobalStorage() ||
@@ -645,6 +687,27 @@ void Sema::checkAllowedCUDAInitializer(VarDecl *VD) {
}
}
+void Sema::CUDARecordImplicitHostDeviceFuncUsedByDevice(
+ const FunctionDecl *Callee) {
+ FunctionDecl *Caller = getCurFunctionDecl(/*AllowLambda=*/true);
+ if (!Caller)
+ return;
+
+ if (!isCUDAImplicitHostDeviceFunction(Callee))
+ return;
+
+ CUDAFunctionTarget CallerTarget = IdentifyCUDATarget(Caller);
+
+ // Record whether an implicit host device function is used on device side.
+ if (CallerTarget != CFT_Device && CallerTarget != CFT_Global &&
+ (CallerTarget != CFT_HostDevice ||
+ (isCUDAImplicitHostDeviceFunction(Caller) &&
+ !getASTContext().CUDAImplicitHostDeviceFunUsedByDevice.count(Caller))))
+ return;
+
+ getASTContext().CUDAImplicitHostDeviceFunUsedByDevice.insert(Callee);
+}
+
// With -fcuda-host-device-constexpr, an unattributed constexpr function is
// treated as implicitly __host__ __device__, unless:
// * it is a variadic function (device-side variadic functions are not
@@ -669,6 +732,18 @@ void Sema::maybeAddCUDAHostDeviceAttrs(FunctionDecl *NewD,
return;
}
+ // If a template function has no host/device/global attributes,
+ // make it implicitly host device function.
+ if (getLangOpts().OffloadImplicitHostDeviceTemplates &&
+ !NewD->hasAttr<CUDAHostAttr>() && !NewD->hasAttr<CUDADeviceAttr>() &&
+ !NewD->hasAttr<CUDAGlobalAttr>() &&
+ (NewD->getDescribedFunctionTemplate() ||
+ NewD->isFunctionTemplateSpecialization())) {
+ NewD->addAttr(CUDAHostAttr::CreateImplicit(Context));
+ NewD->addAttr(CUDADeviceAttr::CreateImplicit(Context));
+ return;
+ }
+
if (!getLangOpts().CUDAHostDeviceConstexpr || !NewD->isConstexpr() ||
NewD->isVariadic() || NewD->hasAttr<CUDAHostAttr>() ||
NewD->hasAttr<CUDADeviceAttr>() || NewD->hasAttr<CUDAGlobalAttr>())
@@ -713,12 +788,12 @@ void Sema::MaybeAddCUDAConstantAttr(VarDecl *VD) {
// Do not promote dependent variables since the cotr/dtor/initializer are
// not determined. Do it after instantiation.
if (getLangOpts().CUDAIsDevice && !VD->hasAttr<CUDAConstantAttr>() &&
- !VD->hasAttr<CUDAConstantAttr>() && !VD->hasAttr<CUDASharedAttr>() &&
+ !VD->hasAttr<CUDASharedAttr>() &&
(VD->isFileVarDecl() || VD->isStaticDataMember()) &&
!IsDependentVar(VD) &&
- (VD->isConstexpr() || (VD->getType().isConstQualified() &&
- HasAllowedCUDADeviceStaticInitializer(
- *this, VD, CICK_DeviceOrConstant)))) {
+ ((VD->isConstexpr() || VD->getType().isConstQualified()) &&
+ HasAllowedCUDADeviceStaticInitializer(*this, VD,
+ CICK_DeviceOrConstant))) {
VD->addAttr(CUDAConstantAttr::CreateImplicit(getASTContext()));
}
}
@@ -726,8 +801,9 @@ void Sema::MaybeAddCUDAConstantAttr(VarDecl *VD) {
Sema::SemaDiagnosticBuilder Sema::CUDADiagIfDeviceCode(SourceLocation Loc,
unsigned DiagID) {
assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
+ FunctionDecl *CurFunContext = getCurFunctionDecl(/*AllowLambda=*/true);
SemaDiagnosticBuilder::Kind DiagKind = [&] {
- if (!isa<FunctionDecl>(CurContext))
+ if (!CurFunContext)
return SemaDiagnosticBuilder::K_Nop;
switch (CurrentCUDATarget()) {
case CFT_Global:
@@ -741,7 +817,7 @@ Sema::SemaDiagnosticBuilder Sema::CUDADiagIfDeviceCode(SourceLocation Loc,
return SemaDiagnosticBuilder::K_Nop;
if (IsLastErrorImmediate && Diags.getDiagnosticIDs()->isBuiltinNote(DiagID))
return SemaDiagnosticBuilder::K_Immediate;
- return (getEmissionStatus(cast<FunctionDecl>(CurContext)) ==
+ return (getEmissionStatus(CurFunContext) ==
FunctionEmissionStatus::Emitted)
? SemaDiagnosticBuilder::K_ImmediateWithCallStack
: SemaDiagnosticBuilder::K_Deferred;
@@ -749,15 +825,15 @@ Sema::SemaDiagnosticBuilder Sema::CUDADiagIfDeviceCode(SourceLocation Loc,
return SemaDiagnosticBuilder::K_Nop;
}
}();
- return SemaDiagnosticBuilder(DiagKind, Loc, DiagID,
- dyn_cast<FunctionDecl>(CurContext), *this);
+ return SemaDiagnosticBuilder(DiagKind, Loc, DiagID, CurFunContext, *this);
}
Sema::SemaDiagnosticBuilder Sema::CUDADiagIfHostCode(SourceLocation Loc,
unsigned DiagID) {
assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
+ FunctionDecl *CurFunContext = getCurFunctionDecl(/*AllowLambda=*/true);
SemaDiagnosticBuilder::Kind DiagKind = [&] {
- if (!isa<FunctionDecl>(CurContext))
+ if (!CurFunContext)
return SemaDiagnosticBuilder::K_Nop;
switch (CurrentCUDATarget()) {
case CFT_Host:
@@ -770,7 +846,7 @@ Sema::SemaDiagnosticBuilder Sema::CUDADiagIfHostCode(SourceLocation Loc,
return SemaDiagnosticBuilder::K_Nop;
if (IsLastErrorImmediate && Diags.getDiagnosticIDs()->isBuiltinNote(DiagID))
return SemaDiagnosticBuilder::K_Immediate;
- return (getEmissionStatus(cast<FunctionDecl>(CurContext)) ==
+ return (getEmissionStatus(CurFunContext) ==
FunctionEmissionStatus::Emitted)
? SemaDiagnosticBuilder::K_ImmediateWithCallStack
: SemaDiagnosticBuilder::K_Deferred;
@@ -778,21 +854,20 @@ Sema::SemaDiagnosticBuilder Sema::CUDADiagIfHostCode(SourceLocation Loc,
return SemaDiagnosticBuilder::K_Nop;
}
}();
- return SemaDiagnosticBuilder(DiagKind, Loc, DiagID,
- dyn_cast<FunctionDecl>(CurContext), *this);
+ return SemaDiagnosticBuilder(DiagKind, Loc, DiagID, CurFunContext, *this);
}
bool Sema::CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee) {
assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
assert(Callee && "Callee may not be null.");
- auto &ExprEvalCtx = ExprEvalContexts.back();
+ const auto &ExprEvalCtx = currentEvaluationContext();
if (ExprEvalCtx.isUnevaluated() || ExprEvalCtx.isConstantEvaluated())
return true;
// FIXME: Is bailing out early correct here? Should we instead assume that
// the caller is a global initializer?
- FunctionDecl *Caller = dyn_cast<FunctionDecl>(CurContext);
+ FunctionDecl *Caller = getCurFunctionDecl(/*AllowLambda=*/true);
if (!Caller)
return true;
@@ -817,8 +892,13 @@ bool Sema::CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee) {
}
}();
- if (DiagKind == SemaDiagnosticBuilder::K_Nop)
+ if (DiagKind == SemaDiagnosticBuilder::K_Nop) {
+ // For -fgpu-rdc, keep track of external kernels used by host functions.
+ if (LangOpts.CUDAIsDevice && LangOpts.GPURelocatableDeviceCode &&
+ Callee->hasAttr<CUDAGlobalAttr>() && !Callee->isDefined())
+ getASTContext().CUDAExternalDeviceDeclODRUsedByHost.insert(Callee);
return true;
+ }
// Avoid emitting this error twice for the same location. Using a hashtable
// like this is unfortunate, but because we must continue parsing as normal
@@ -858,7 +938,7 @@ void Sema::CUDACheckLambdaCapture(CXXMethodDecl *Callee,
// File-scope lambda can only do init captures for global variables, which
// results in passing by value for these global variables.
- FunctionDecl *Caller = dyn_cast<FunctionDecl>(CurContext);
+ FunctionDecl *Caller = getCurFunctionDecl(/*AllowLambda=*/true);
if (!Caller)
return;
@@ -873,15 +953,19 @@ void Sema::CUDACheckLambdaCapture(CXXMethodDecl *Callee,
if (!ShouldCheck || !Capture.isReferenceCapture())
return;
auto DiagKind = SemaDiagnosticBuilder::K_Deferred;
- if (Capture.isVariableCapture()) {
+ if (Capture.isVariableCapture() && !getLangOpts().HIPStdPar) {
SemaDiagnosticBuilder(DiagKind, Capture.getLocation(),
diag::err_capture_bad_target, Callee, *this)
<< Capture.getVariable();
} else if (Capture.isThisCapture()) {
+ // Capture of this pointer is allowed since this pointer may be pointing to
+ // managed memory which is accessible on both device and host sides. It only
+ // results in invalid memory access if this pointer points to memory not
+ // accessible on device side.
SemaDiagnosticBuilder(DiagKind, Capture.getLocation(),
- diag::err_capture_bad_target_this_ptr, Callee, *this);
+ diag::warn_maybe_capture_bad_target_this_ptr, Callee,
+ *this);
}
- return;
}
void Sema::CUDASetLambdaAttrs(CXXMethodDecl *Method) {
@@ -908,7 +992,14 @@ void Sema::checkCUDATargetOverload(FunctionDecl *NewFD,
// HD/global functions "exist" in some sense on both the host and device, so
// should have the same implementation on both sides.
if (NewTarget != OldTarget &&
- ((NewTarget == CFT_HostDevice) || (OldTarget == CFT_HostDevice) ||
+ ((NewTarget == CFT_HostDevice &&
+ !(LangOpts.OffloadImplicitHostDeviceTemplates &&
+ isCUDAImplicitHostDeviceFunction(NewFD) &&
+ OldTarget == CFT_Device)) ||
+ (OldTarget == CFT_HostDevice &&
+ !(LangOpts.OffloadImplicitHostDeviceTemplates &&
+ isCUDAImplicitHostDeviceFunction(OldFD) &&
+ NewTarget == CFT_Device)) ||
(NewTarget == CFT_Global) || (OldTarget == CFT_Global)) &&
!IsOverload(NewFD, OldFD, /* UseMemberUsingDeclRules = */ false,
/* ConsiderCudaAttrs = */ false)) {
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaCXXScopeSpec.cpp b/contrib/llvm-project/clang/lib/Sema/SemaCXXScopeSpec.cpp
index 1c8f6329bd67..44a40215b90d 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaCXXScopeSpec.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaCXXScopeSpec.cpp
@@ -99,34 +99,53 @@ DeclContext *Sema::computeDeclContext(const CXXScopeSpec &SS,
if (ClassTemplateDecl *ClassTemplate
= dyn_cast_or_null<ClassTemplateDecl>(
SpecType->getTemplateName().getAsTemplateDecl())) {
- QualType ContextType
- = Context.getCanonicalType(QualType(SpecType, 0));
-
- // If the type of the nested name specifier is the same as the
- // injected class name of the named class template, we're entering
- // into that class template definition.
- QualType Injected
- = ClassTemplate->getInjectedClassNameSpecialization();
- if (Context.hasSameType(Injected, ContextType))
- return ClassTemplate->getTemplatedDecl();
+ QualType ContextType =
+ Context.getCanonicalType(QualType(SpecType, 0));
+
+ // FIXME: The fallback on the search of partial
+ // specialization using ContextType should be eventually removed since
+ // it doesn't handle the case of constrained template parameters
+ // correctly. Currently removing this fallback would change the
+ // diagnostic output for invalid code in a number of tests.
+ ClassTemplatePartialSpecializationDecl *PartialSpec = nullptr;
+ ArrayRef<TemplateParameterList *> TemplateParamLists =
+ SS.getTemplateParamLists();
+ if (!TemplateParamLists.empty()) {
+ unsigned Depth = ClassTemplate->getTemplateParameters()->getDepth();
+ auto L = find_if(TemplateParamLists,
+ [Depth](TemplateParameterList *TPL) {
+ return TPL->getDepth() == Depth;
+ });
+ if (L != TemplateParamLists.end()) {
+ void *Pos = nullptr;
+ PartialSpec = ClassTemplate->findPartialSpecialization(
+ SpecType->template_arguments(), *L, Pos);
+ }
+ } else {
+ PartialSpec = ClassTemplate->findPartialSpecialization(ContextType);
+ }
- // If the type of the nested name specifier is the same as the
- // type of one of the class template's class template partial
- // specializations, we're entering into the definition of that
- // class template partial specialization.
- if (ClassTemplatePartialSpecializationDecl *PartialSpec
- = ClassTemplate->findPartialSpecialization(ContextType)) {
+ if (PartialSpec) {
// A declaration of the partial specialization must be visible.
// We can always recover here, because this only happens when we're
// entering the context, and that can't happen in a SFINAE context.
- assert(!isSFINAEContext() &&
- "partial specialization scope specifier in SFINAE context?");
- if (!hasVisibleDeclaration(PartialSpec))
+ assert(!isSFINAEContext() && "partial specialization scope "
+ "specifier in SFINAE context?");
+ if (PartialSpec->hasDefinition() &&
+ !hasReachableDefinition(PartialSpec))
diagnoseMissingImport(SS.getLastQualifierNameLoc(), PartialSpec,
MissingImportKind::PartialSpecialization,
- /*Recover*/true);
+ true);
return PartialSpec;
}
+
+ // If the type of the nested name specifier is the same as the
+ // injected class name of the named class template, we're entering
+ // into that class template definition.
+ QualType Injected =
+ ClassTemplate->getInjectedClassNameSpecialization();
+ if (Context.hasSameType(Injected, ContextType))
+ return ClassTemplate->getTemplatedDecl();
}
} else if (const RecordType *RecordT = NNSType->getAs<RecordType>()) {
// The nested name specifier refers to a member of a class template.
@@ -243,8 +262,8 @@ bool Sema::RequireCompleteEnumDecl(EnumDecl *EnumD, SourceLocation L,
if (EnumD->isCompleteDefinition()) {
// If we know about the definition but it is not visible, complain.
NamedDecl *SuggestedDef = nullptr;
- if (!hasVisibleDefinition(EnumD, &SuggestedDef,
- /*OnlyNeedComplete*/false)) {
+ if (!hasReachableDefinition(EnumD, &SuggestedDef,
+ /*OnlyNeedComplete*/ false)) {
// If the user is going to see an error here, recover by making the
// definition visible.
bool TreatAsComplete = !isSFINAEContext();
@@ -292,6 +311,11 @@ bool Sema::ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc,
bool Sema::ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc,
CXXScopeSpec &SS) {
+ if (getCurLambda()) {
+ Diag(SuperLoc, diag::err_super_in_lambda_unsupported);
+ return true;
+ }
+
CXXRecordDecl *RD = nullptr;
for (Scope *S = getCurScope(); S; S = S->getParent()) {
if (S->isFunctionScope()) {
@@ -308,9 +332,6 @@ bool Sema::ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
if (!RD) {
Diag(SuperLoc, diag::err_invalid_super_scope);
return true;
- } else if (RD->isLambda()) {
- Diag(SuperLoc, diag::err_super_in_lambda_unsupported);
- return true;
} else if (RD->getNumBases() == 0) {
Diag(SuperLoc, diag::err_no_base_classes) << RD->getName();
return true;
@@ -394,55 +415,10 @@ NamedDecl *Sema::FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS) {
return nullptr;
}
-bool Sema::isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
- NestedNameSpecInfo &IdInfo) {
- QualType ObjectType = GetTypeFromParser(IdInfo.ObjectType);
- LookupResult Found(*this, IdInfo.Identifier, IdInfo.IdentifierLoc,
- LookupNestedNameSpecifierName);
-
- // Determine where to perform name lookup
- DeclContext *LookupCtx = nullptr;
- bool isDependent = false;
- if (!ObjectType.isNull()) {
- // This nested-name-specifier occurs in a member access expression, e.g.,
- // x->B::f, and we are looking into the type of the object.
- assert(!SS.isSet() && "ObjectType and scope specifier cannot coexist");
- LookupCtx = computeDeclContext(ObjectType);
- isDependent = ObjectType->isDependentType();
- } else if (SS.isSet()) {
- // This nested-name-specifier occurs after another nested-name-specifier,
- // so long into the context associated with the prior nested-name-specifier.
- LookupCtx = computeDeclContext(SS, false);
- isDependent = isDependentScopeSpecifier(SS);
- Found.setContextRange(SS.getRange());
- }
-
- if (LookupCtx) {
- // Perform "qualified" name lookup into the declaration context we
- // computed, which is either the type of the base of a member access
- // expression or the declaration context associated with a prior
- // nested-name-specifier.
-
- // The declaration context must be complete.
- if (!LookupCtx->isDependentContext() &&
- RequireCompleteDeclContext(SS, LookupCtx))
- return false;
-
- LookupQualifiedName(Found, LookupCtx);
- } else if (isDependent) {
- return false;
- } else {
- LookupName(Found, S);
- }
- Found.suppressDiagnostics();
-
- return Found.getAsSingle<NamespaceDecl>();
-}
-
namespace {
// Callback to only accept typo corrections that can be a valid C++ member
-// intializer: either a non-static field member or a base class.
+// initializer: either a non-static field member or a base class.
class NestedNameSpecifierValidatorCCC final
: public CorrectionCandidateCallback {
public:
@@ -736,8 +712,15 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo,
QualType T =
Context.getTypeDeclType(cast<TypeDecl>(SD->getUnderlyingDecl()));
+
+ if (T->isEnumeralType())
+ Diag(IdInfo.IdentifierLoc, diag::warn_cxx98_compat_enum_nested_name_spec);
+
TypeLocBuilder TLB;
- if (isa<InjectedClassNameType>(T)) {
+ if (const auto *USD = dyn_cast<UsingShadowDecl>(SD)) {
+ T = Context.getUsingType(USD, T);
+ TLB.pushTypeSpec(T).setNameLoc(IdInfo.IdentifierLoc);
+ } else if (isa<InjectedClassNameType>(T)) {
InjectedClassNameTypeLoc InjectedTL
= TLB.push<InjectedClassNameTypeLoc>(T);
InjectedTL.setNameLoc(IdInfo.IdentifierLoc);
@@ -770,9 +753,6 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo,
llvm_unreachable("Unhandled TypeDecl node in nested-name-specifier");
}
- if (T->isEnumeralType())
- Diag(IdInfo.IdentifierLoc, diag::warn_cxx98_compat_enum_nested_name_spec);
-
SS.Extend(Context, SourceLocation(), TLB.getTypeLocInContext(Context, T),
IdInfo.CCLoc);
return false;
@@ -824,10 +804,14 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo,
}
if (!Found.empty()) {
- if (TypeDecl *TD = Found.getAsSingle<TypeDecl>())
+ if (TypeDecl *TD = Found.getAsSingle<TypeDecl>()) {
Diag(IdInfo.IdentifierLoc, diag::err_expected_class_or_namespace)
<< Context.getTypeDeclType(TD) << getLangOpts().CPlusPlus;
- else {
+ } else if (Found.getAsSingle<TemplateDecl>()) {
+ ParsedType SuggestedType;
+ DiagnoseUnknownTypeName(IdInfo.Identifier, IdInfo.IdentifierLoc, S, &SS,
+ SuggestedType);
+ } else {
Diag(IdInfo.IdentifierLoc, diag::err_expected_class_or_namespace)
<< IdInfo.Identifier << getLangOpts().CPlusPlus;
if (NamedDecl *ND = Found.getAsSingle<NamedDecl>())
@@ -846,7 +830,6 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo,
bool Sema::ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo,
bool EnteringContext, CXXScopeSpec &SS,
- bool ErrorRecoveryLookup,
bool *IsCorrectedToColon,
bool OnlyNamespace) {
if (SS.isInvalid())
@@ -865,7 +848,7 @@ bool Sema::ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
assert(DS.getTypeSpecType() == DeclSpec::TST_decltype);
- QualType T = BuildDecltypeType(DS.getRepAsExpr(), DS.getTypeSpecTypeLoc());
+ QualType T = BuildDecltypeType(DS.getRepAsExpr());
if (T.isNull())
return true;
@@ -877,7 +860,8 @@ bool Sema::ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
TypeLocBuilder TLB;
DecltypeTypeLoc DecltypeTL = TLB.push<DecltypeTypeLoc>(T);
- DecltypeTL.setNameLoc(DS.getTypeSpecTypeLoc());
+ DecltypeTL.setDecltypeLoc(DS.getTypeSpecTypeLoc());
+ DecltypeTL.setRParenLoc(DS.getTypeofParensRange().getEnd());
SS.Extend(Context, SourceLocation(), TLB.getTypeLocInContext(Context, T),
ColonColonLoc);
return false;
@@ -923,10 +907,9 @@ bool Sema::ActOnCXXNestedNameSpecifier(Scope *S,
// Handle a dependent template specialization for which we cannot resolve
// the template name.
assert(DTN->getQualifier() == SS.getScopeRep());
- QualType T = Context.getDependentTemplateSpecializationType(ETK_None,
- DTN->getQualifier(),
- DTN->getIdentifier(),
- TemplateArgs);
+ QualType T = Context.getDependentTemplateSpecializationType(
+ ElaboratedTypeKeyword::None, DTN->getQualifier(), DTN->getIdentifier(),
+ TemplateArgs.arguments());
// Create source-location information for this type.
TypeLocBuilder Builder;
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaCast.cpp b/contrib/llvm-project/clang/lib/Sema/SemaCast.cpp
index cac43075f860..9d85568d97b2 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaCast.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaCast.cpp
@@ -25,6 +25,7 @@
#include "clang/Sema/Initialization.h"
#include "clang/Sema/SemaInternal.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
#include <set>
using namespace clang;
@@ -65,9 +66,13 @@ namespace {
// If a pr-value initially has the type cv-T, where T is a
// cv-unqualified non-class, non-array type, the type of the
// expression is adjusted to T prior to any further analysis.
+ // C23 6.5.4p6:
+ // Preceding an expression by a parenthesized type name converts the
+ // value of the expression to the unqualified, non-atomic version of
+ // the named type.
if (!S.Context.getLangOpts().ObjC && !DestType->isRecordType() &&
!DestType->isArrayType()) {
- DestType = DestType.getUnqualifiedType();
+ DestType = DestType.getAtomicUnqualifiedType();
}
if (const BuiltinType *placeholder =
@@ -449,6 +454,25 @@ static bool tryDiagnoseOverloadedCast(Sema &S, CastType CT,
switch (sequence.getFailureKind()) {
default: return false;
+ case InitializationSequence::FK_ParenthesizedListInitFailed:
+ // In C++20, if the underlying destination type is a RecordType, Clang
+ // attempts to perform parentesized aggregate initialization if constructor
+ // overload fails:
+ //
+ // C++20 [expr.static.cast]p4:
+ // An expression E can be explicitly converted to a type T...if overload
+ // resolution for a direct-initialization...would find at least one viable
+ // function ([over.match.viable]), or if T is an aggregate type having a
+ // first element X and there is an implicit conversion sequence from E to
+ // the type of X.
+ //
+ // If that fails, then we'll generate the diagnostics from the failed
+ // previous constructor overload attempt. Array initialization, however, is
+ // not done after attempting constructor overloading, so we exit as there
+ // won't be a failed overload result.
+ if (destType->isArrayType())
+ return false;
+ break;
case InitializationSequence::FK_ConstructorOverloadFailed:
case InitializationSequence::FK_UserConversionOverloadFailed:
break;
@@ -911,6 +935,14 @@ void CastOperation::CheckDynamicCast() {
<< isClangCL;
}
+ // For a dynamic_cast to a final type, IR generation might emit a reference
+ // to the vtable.
+ if (DestRecord) {
+ auto *DestDecl = DestRecord->getAsCXXRecordDecl();
+ if (DestDecl->isEffectivelyFinal())
+ Self.MarkVTableUsed(OpRange.getBegin(), DestDecl);
+ }
+
// Done. Everything else is run-time checks.
Kind = CK_Dynamic;
}
@@ -1059,11 +1091,19 @@ static bool argTypeIsABIEquivalent(QualType SrcType, QualType DestType,
return Context.hasSameUnqualifiedType(SrcType, DestType);
}
-static bool checkCastFunctionType(Sema &Self, const ExprResult &SrcExpr,
- QualType DestType) {
- if (Self.Diags.isIgnored(diag::warn_cast_function_type,
- SrcExpr.get()->getExprLoc()))
- return true;
+static unsigned int checkCastFunctionType(Sema &Self, const ExprResult &SrcExpr,
+ QualType DestType) {
+ unsigned int DiagID = 0;
+ const unsigned int DiagList[] = {diag::warn_cast_function_type_strict,
+ diag::warn_cast_function_type};
+ for (auto ID : DiagList) {
+ if (!Self.Diags.isIgnored(ID, SrcExpr.get()->getExprLoc())) {
+ DiagID = ID;
+ break;
+ }
+ }
+ if (!DiagID)
+ return 0;
QualType SrcType = SrcExpr.get()->getType();
const FunctionType *SrcFTy = nullptr;
@@ -1078,10 +1118,17 @@ static bool checkCastFunctionType(Sema &Self, const ExprResult &SrcExpr,
SrcFTy = SrcType->castAs<FunctionType>();
DstFTy = DestType.getNonReferenceType()->castAs<FunctionType>();
} else {
- return true;
+ return 0;
}
assert(SrcFTy && DstFTy);
+ if (Self.Context.hasSameType(SrcFTy, DstFTy))
+ return 0;
+
+ // For strict checks, ensure we have an exact match.
+ if (DiagID == diag::warn_cast_function_type_strict)
+ return DiagID;
+
auto IsVoidVoid = [](const FunctionType *T) {
if (!T->getReturnType()->isVoidType())
return false;
@@ -1092,16 +1139,16 @@ static bool checkCastFunctionType(Sema &Self, const ExprResult &SrcExpr,
// Skip if either function type is void(*)(void)
if (IsVoidVoid(SrcFTy) || IsVoidVoid(DstFTy))
- return true;
+ return 0;
// Check return type.
if (!argTypeIsABIEquivalent(SrcFTy->getReturnType(), DstFTy->getReturnType(),
Self.Context))
- return false;
+ return DiagID;
// Check if either has unspecified number of parameters
if (SrcFTy->isFunctionNoProtoType() || DstFTy->isFunctionNoProtoType())
- return true;
+ return 0;
// Check parameter types.
@@ -1114,19 +1161,19 @@ static bool checkCastFunctionType(Sema &Self, const ExprResult &SrcExpr,
unsigned DstNumParams = DstFPTy->getNumParams();
if (NumParams > DstNumParams) {
if (!DstFPTy->isVariadic())
- return false;
+ return DiagID;
NumParams = DstNumParams;
} else if (NumParams < DstNumParams) {
if (!SrcFPTy->isVariadic())
- return false;
+ return DiagID;
}
for (unsigned i = 0; i < NumParams; ++i)
if (!argTypeIsABIEquivalent(SrcFPTy->getParamType(i),
DstFPTy->getParamType(i), Self.Context))
- return false;
+ return DiagID;
- return true;
+ return 0;
}
/// CheckReinterpretCast - Check that a reinterpret_cast\<DestType\>(SrcExpr) is
@@ -1167,8 +1214,8 @@ void CastOperation::CheckReinterpretCast() {
checkObjCConversion(Sema::CCK_OtherCast);
DiagnoseReinterpretUpDownCast(Self, SrcExpr.get(), DestType, OpRange);
- if (!checkCastFunctionType(Self, SrcExpr, DestType))
- Self.Diag(OpRange.getBegin(), diag::warn_cast_function_type)
+ if (unsigned DiagID = checkCastFunctionType(Self, SrcExpr, DestType))
+ Self.Diag(OpRange.getBegin(), DiagID)
<< SrcExpr.get()->getType() << DestType << OpRange;
} else {
SrcExpr = ExprError();
@@ -1313,7 +1360,9 @@ static TryCastResult TryStaticCast(Sema &Self, ExprResult &SrcExpr,
// lvalue-to-rvalue, array-to-pointer, function-to-pointer, and boolean
// conversions, subject to further restrictions.
// Also, C++ 5.2.9p1 forbids casting away constness, which makes reversal
- // of qualification conversions impossible.
+ // of qualification conversions impossible. (In C++20, adding an array bound
+ // would be the reverse of a qualification conversion, but adding permission
+ // to add an array bound in a static_cast is a wording oversight.)
// In the CStyle case, the earlier attempt to const_cast should have taken
// care of reverse qualification conversions.
@@ -1354,7 +1403,7 @@ static TryCastResult TryStaticCast(Sema &Self, ExprResult &SrcExpr,
if (SrcType->isIntegralOrEnumerationType()) {
// [expr.static.cast]p10 If the enumeration type has a fixed underlying
// type, the value is first converted to that type by integral conversion
- const EnumType *Enum = DestType->getAs<EnumType>();
+ const EnumType *Enum = DestType->castAs<EnumType>();
Kind = Enum->getDecl()->isFixed() &&
Enum->getDecl()->getIntegerType()->isBooleanType()
? CK_IntegralToBoolean
@@ -2332,6 +2381,12 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
return TC_Success;
}
+ // Allow bitcasting between SVE VLATs and VLSTs, and vice-versa.
+ if (Self.isValidRVVBitcast(SrcType, DestType)) {
+ Kind = CK_BitCast;
+ return TC_Success;
+ }
+
// The non-vector type, if any, must have integral type. This is
// the same rule that C vector casts use; note, however, that enum
// types are not integral in C++.
@@ -2543,7 +2598,7 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
static TryCastResult TryAddressSpaceCast(Sema &Self, ExprResult &SrcExpr,
QualType DestType, bool CStyle,
unsigned &msg, CastKind &Kind) {
- if (!Self.getLangOpts().OpenCL)
+ if (!Self.getLangOpts().OpenCL && !Self.getLangOpts().SYCLIsDevice)
// FIXME: As compiler doesn't have any information about overlapping addr
// spaces at the moment we have to be permissive here.
return TC_NotApplicable;
@@ -2627,11 +2682,24 @@ void CastOperation::checkAddressSpaceCast(QualType SrcType, QualType DestType) {
bool Sema::ShouldSplatAltivecScalarInCast(const VectorType *VecTy) {
bool SrcCompatXL = this->getLangOpts().getAltivecSrcCompat() ==
LangOptions::AltivecSrcCompatKind::XL;
- VectorType::VectorKind VKind = VecTy->getVectorKind();
+ VectorKind VKind = VecTy->getVectorKind();
+
+ if ((VKind == VectorKind::AltiVecVector) ||
+ (SrcCompatXL && ((VKind == VectorKind::AltiVecBool) ||
+ (VKind == VectorKind::AltiVecPixel)))) {
+ return true;
+ }
+ return false;
+}
- if ((VKind == VectorType::AltiVecVector) ||
- (SrcCompatXL && ((VKind == VectorType::AltiVecBool) ||
- (VKind == VectorType::AltiVecPixel)))) {
+bool Sema::CheckAltivecInitFromScalar(SourceRange R, QualType VecTy,
+ QualType SrcTy) {
+ bool SrcCompatGCC = this->getLangOpts().getAltivecSrcCompat() ==
+ LangOptions::AltivecSrcCompatKind::GCC;
+ if (this->getLangOpts().AltiVec && SrcCompatGCC) {
+ this->Diag(R.getBegin(),
+ diag::err_invalid_conversion_between_vector_and_integer)
+ << VecTy << SrcTy << R;
return true;
}
return false;
@@ -2690,7 +2758,12 @@ void CastOperation::CheckCXXCStyleCast(bool FunctionalStyle,
}
// AltiVec vector initialization with a single literal.
- if (const VectorType *vecTy = DestType->getAs<VectorType>())
+ if (const VectorType *vecTy = DestType->getAs<VectorType>()) {
+ if (Self.CheckAltivecInitFromScalar(OpRange, DestType,
+ SrcExpr.get()->getType())) {
+ SrcExpr = ExprError();
+ return;
+ }
if (Self.ShouldSplatAltivecScalarInCast(vecTy) &&
(SrcExpr.get()->getType()->isIntegerType() ||
SrcExpr.get()->getType()->isFloatingType())) {
@@ -2698,6 +2771,16 @@ void CastOperation::CheckCXXCStyleCast(bool FunctionalStyle,
SrcExpr = Self.prepareVectorSplat(DestType, SrcExpr.get());
return;
}
+ }
+
+ // WebAssembly tables cannot be cast.
+ QualType SrcType = SrcExpr.get()->getType();
+ if (SrcType->isWebAssemblyTableType()) {
+ Self.Diag(OpRange.getBegin(), diag::err_wasm_cast_table)
+ << 1 << SrcExpr.get()->getSourceRange();
+ SrcExpr = ExprError();
+ return;
+ }
// C++ [expr.cast]p5: The conversions performed by
// - a const_cast,
@@ -2776,8 +2859,8 @@ void CastOperation::CheckCXXCStyleCast(bool FunctionalStyle,
if (Kind == CK_BitCast)
checkCastAlign();
- if (!checkCastFunctionType(Self, SrcExpr, DestType))
- Self.Diag(OpRange.getBegin(), diag::warn_cast_function_type)
+ if (unsigned DiagID = checkCastFunctionType(Self, SrcExpr, DestType))
+ Self.Diag(OpRange.getBegin(), DiagID)
<< SrcExpr.get()->getType() << DestType << OpRange;
} else {
@@ -2874,6 +2957,13 @@ void CastOperation::CheckCStyleCast() {
return;
QualType SrcType = SrcExpr.get()->getType();
+ if (SrcType->isWebAssemblyTableType()) {
+ Self.Diag(OpRange.getBegin(), diag::err_wasm_cast_table)
+ << 1 << SrcExpr.get()->getSourceRange();
+ SrcExpr = ExprError();
+ return;
+ }
+
assert(!SrcType->isPlaceholderType());
checkAddressSpaceCast(SrcType, DestType);
@@ -2900,6 +2990,13 @@ void CastOperation::CheckCStyleCast() {
return;
}
+ // Allow bitcasting between compatible RVV vector types.
+ if ((SrcType->isVectorType() || DestType->isVectorType()) &&
+ Self.isValidRVVBitcast(SrcType, DestType)) {
+ Kind = CK_BitCast;
+ return;
+ }
+
if (!DestType->isScalarType() && !DestType->isVectorType() &&
!DestType->isMatrixType()) {
const RecordType *DestRecordTy = DestType->getAs<RecordType>();
@@ -2964,6 +3061,37 @@ void CastOperation::CheckCStyleCast() {
return;
}
+ // C23 6.5.4p4:
+ // The type nullptr_t shall not be converted to any type other than void,
+ // bool, or a pointer type. No type other than nullptr_t shall be converted
+ // to nullptr_t.
+ if (SrcType->isNullPtrType()) {
+ // FIXME: 6.3.2.4p2 says that nullptr_t can be converted to itself, but
+ // 6.5.4p4 is a constraint check and nullptr_t is not void, bool, or a
+ // pointer type. We're not going to diagnose that as a constraint violation.
+ if (!DestType->isVoidType() && !DestType->isBooleanType() &&
+ !DestType->isPointerType() && !DestType->isNullPtrType()) {
+ Self.Diag(SrcExpr.get()->getExprLoc(), diag::err_nullptr_cast)
+ << /*nullptr to type*/ 0 << DestType;
+ SrcExpr = ExprError();
+ return;
+ }
+ if (!DestType->isNullPtrType()) {
+ // Implicitly cast from the null pointer type to the type of the
+ // destination.
+ CastKind CK = DestType->isPointerType() ? CK_NullToPointer : CK_BitCast;
+ SrcExpr = ImplicitCastExpr::Create(Self.Context, DestType, CK,
+ SrcExpr.get(), nullptr, VK_PRValue,
+ Self.CurFPFeatureOverrides());
+ }
+ }
+ if (DestType->isNullPtrType() && !SrcType->isNullPtrType()) {
+ Self.Diag(SrcExpr.get()->getExprLoc(), diag::err_nullptr_cast)
+ << /*type to nullptr*/ 1 << SrcType;
+ SrcExpr = ExprError();
+ return;
+ }
+
if (DestType->isExtVectorType()) {
SrcExpr = Self.CheckExtVectorCast(OpRange, DestType, SrcExpr.get(), Kind);
return;
@@ -2976,6 +3104,10 @@ void CastOperation::CheckCStyleCast() {
}
if (const VectorType *DestVecTy = DestType->getAs<VectorType>()) {
+ if (Self.CheckAltivecInitFromScalar(OpRange, DestType, SrcType)) {
+ SrcExpr = ExprError();
+ return;
+ }
if (Self.ShouldSplatAltivecScalarInCast(DestVecTy) &&
(SrcType->isIntegerType() || SrcType->isFloatingType())) {
Kind = CK_VectorSplat;
@@ -3003,20 +3135,6 @@ void CastOperation::CheckCStyleCast() {
return;
}
- // Can't cast to or from bfloat
- if (DestType->isBFloat16Type() && !SrcType->isBFloat16Type()) {
- Self.Diag(SrcExpr.get()->getExprLoc(), diag::err_cast_to_bfloat16)
- << SrcExpr.get()->getSourceRange();
- SrcExpr = ExprError();
- return;
- }
- if (SrcType->isBFloat16Type() && !DestType->isBFloat16Type()) {
- Self.Diag(SrcExpr.get()->getExprLoc(), diag::err_cast_from_bfloat16)
- << SrcExpr.get()->getSourceRange();
- SrcExpr = ExprError();
- return;
- }
-
// If either type is a pointer, the other type has to be either an
// integer or a pointer.
if (!DestType->isArithmeticType()) {
@@ -3100,9 +3218,25 @@ void CastOperation::CheckCStyleCast() {
}
}
- if (!checkCastFunctionType(Self, SrcExpr, DestType))
- Self.Diag(OpRange.getBegin(), diag::warn_cast_function_type)
- << SrcType << DestType << OpRange;
+ if (unsigned DiagID = checkCastFunctionType(Self, SrcExpr, DestType))
+ Self.Diag(OpRange.getBegin(), DiagID) << SrcType << DestType << OpRange;
+
+ if (isa<PointerType>(SrcType) && isa<PointerType>(DestType)) {
+ QualType SrcTy = cast<PointerType>(SrcType)->getPointeeType();
+ QualType DestTy = cast<PointerType>(DestType)->getPointeeType();
+
+ const RecordDecl *SrcRD = SrcTy->getAsRecordDecl();
+ const RecordDecl *DestRD = DestTy->getAsRecordDecl();
+
+ if (SrcRD && DestRD && SrcRD->hasAttr<RandomizeLayoutAttr>() &&
+ SrcRD != DestRD) {
+ // The struct we are casting the pointer from was randomized.
+ Self.Diag(OpRange.getBegin(), diag::err_cast_from_randomized_struct)
+ << SrcType << DestType;
+ SrcExpr = ExprError();
+ return;
+ }
+ }
DiagnoseCastOfObjCSEL(Self, SrcExpr, DestType);
DiagnoseCallingConvCast(Self, SrcExpr, DestType, OpRange);
@@ -3228,7 +3362,7 @@ ExprResult Sema::BuildCXXFunctionalCastExpr(TypeSourceInfo *CastTypeInfo,
assert(LPLoc.isValid() && "List-initialization shouldn't get here.");
CastOperation Op(*this, Type, CastExpr);
Op.DestRange = CastTypeInfo->getTypeLoc().getSourceRange();
- Op.OpRange = SourceRange(Op.DestRange.getBegin(), CastExpr->getEndLoc());
+ Op.OpRange = SourceRange(Op.DestRange.getBegin(), RPLoc);
Op.CheckCXXCStyleCast(/*FunctionalCast=*/true, /*ListInit=*/false);
if (Op.SrcExpr.isInvalid())
@@ -3240,6 +3374,9 @@ ExprResult Sema::BuildCXXFunctionalCastExpr(TypeSourceInfo *CastTypeInfo,
if (auto *ConstructExpr = dyn_cast<CXXConstructExpr>(SubExpr))
ConstructExpr->setParenOrBraceRange(SourceRange(LPLoc, RPLoc));
+ // -Wcast-qual
+ DiagnoseCastQual(Op.Self, Op.SrcExpr, Op.DestType);
+
return Op.complete(CXXFunctionalCastExpr::Create(
Context, Op.ResultType, Op.ValueKind, CastTypeInfo, Op.Kind,
Op.SrcExpr.get(), &Op.BasePath, CurFPFeatureOverrides(), LPLoc, RPLoc));
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaChecking.cpp b/contrib/llvm-project/clang/lib/Sema/SemaChecking.cpp
index de75c10417e7..09b7e1c62fbd 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaChecking.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaChecking.cpp
@@ -67,17 +67,15 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/FoldingSet.h"
-#include "llvm/ADT/None.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/ADT/StringSwitch.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/AtomicOrdering.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
@@ -88,6 +86,8 @@
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/SaveAndRestore.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/RISCVTargetParser.h"
+#include "llvm/TargetParser/Triple.h"
#include <algorithm>
#include <bitset>
#include <cassert>
@@ -96,6 +96,7 @@
#include <cstdint>
#include <functional>
#include <limits>
+#include <optional>
#include <string>
#include <tuple>
#include <utility>
@@ -109,24 +110,79 @@ SourceLocation Sema::getLocationOfStringLiteralByte(const StringLiteral *SL,
Context.getTargetInfo());
}
+static constexpr unsigned short combineFAPK(Sema::FormatArgumentPassingKind A,
+ Sema::FormatArgumentPassingKind B) {
+ return (A << 8) | B;
+}
+
+/// Checks that a call expression's argument count is at least the desired
+/// number. This is useful when doing custom type-checking on a variadic
+/// function. Returns true on error.
+static bool checkArgCountAtLeast(Sema &S, CallExpr *Call,
+ unsigned MinArgCount) {
+ unsigned ArgCount = Call->getNumArgs();
+ if (ArgCount >= MinArgCount)
+ return false;
+
+ return S.Diag(Call->getEndLoc(), diag::err_typecheck_call_too_few_args)
+ << 0 /*function call*/ << MinArgCount << ArgCount
+ << /*is non object*/ 0 << Call->getSourceRange();
+}
+
+/// Checks that a call expression's argument count is at most the desired
+/// number. This is useful when doing custom type-checking on a variadic
+/// function. Returns true on error.
+static bool checkArgCountAtMost(Sema &S, CallExpr *Call, unsigned MaxArgCount) {
+ unsigned ArgCount = Call->getNumArgs();
+ if (ArgCount <= MaxArgCount)
+ return false;
+ return S.Diag(Call->getEndLoc(),
+ diag::err_typecheck_call_too_many_args_at_most)
+ << 0 /*function call*/ << MaxArgCount << ArgCount
+ << /*is non object*/ 0 << Call->getSourceRange();
+}
+
+/// Checks that a call expression's argument count is in the desired range. This
+/// is useful when doing custom type-checking on a variadic function. Returns
+/// true on error.
+static bool checkArgCountRange(Sema &S, CallExpr *Call, unsigned MinArgCount,
+ unsigned MaxArgCount) {
+ return checkArgCountAtLeast(S, Call, MinArgCount) ||
+ checkArgCountAtMost(S, Call, MaxArgCount);
+}
+
/// Checks that a call expression's argument count is the desired number.
/// This is useful when doing custom type-checking. Returns true on error.
-static bool checkArgCount(Sema &S, CallExpr *call, unsigned desiredArgCount) {
- unsigned argCount = call->getNumArgs();
- if (argCount == desiredArgCount) return false;
+static bool checkArgCount(Sema &S, CallExpr *Call, unsigned DesiredArgCount) {
+ unsigned ArgCount = Call->getNumArgs();
+ if (ArgCount == DesiredArgCount)
+ return false;
- if (argCount < desiredArgCount)
- return S.Diag(call->getEndLoc(), diag::err_typecheck_call_too_few_args)
- << 0 /*function call*/ << desiredArgCount << argCount
- << call->getSourceRange();
+ if (checkArgCountAtLeast(S, Call, DesiredArgCount))
+ return true;
+ assert(ArgCount > DesiredArgCount && "should have diagnosed this");
// Highlight all the excess arguments.
- SourceRange range(call->getArg(desiredArgCount)->getBeginLoc(),
- call->getArg(argCount - 1)->getEndLoc());
+ SourceRange Range(Call->getArg(DesiredArgCount)->getBeginLoc(),
+ Call->getArg(ArgCount - 1)->getEndLoc());
- return S.Diag(range.getBegin(), diag::err_typecheck_call_too_many_args)
- << 0 /*function call*/ << desiredArgCount << argCount
- << call->getArg(1)->getSourceRange();
+ return S.Diag(Range.getBegin(), diag::err_typecheck_call_too_many_args)
+ << 0 /*function call*/ << DesiredArgCount << ArgCount
+ << /*is non object*/ 0 << Call->getArg(1)->getSourceRange();
+}
+
+static bool convertArgumentToType(Sema &S, Expr *&Value, QualType Ty) {
+ if (Value->isTypeDependent())
+ return false;
+
+ InitializedEntity Entity =
+ InitializedEntity::InitializeParameter(S.Context, Ty, false);
+ ExprResult Result =
+ S.PerformCopyInitialization(Entity, SourceLocation(), Value);
+ if (Result.isInvalid())
+ return true;
+ Value = Result.get();
+ return false;
}
/// Check that the first argument to __builtin_annotation is an integer
@@ -147,7 +203,7 @@ static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) {
// Second argument should be a constant string.
Expr *StrArg = TheCall->getArg(1)->IgnoreParenCasts();
StringLiteral *Literal = dyn_cast<StringLiteral>(StrArg);
- if (!Literal || !Literal->isAscii()) {
+ if (!Literal || !Literal->isOrdinary()) {
S.Diag(StrArg->getBeginLoc(), diag::err_builtin_annotation_second_arg)
<< StrArg->getSourceRange();
return true;
@@ -161,7 +217,7 @@ static bool SemaBuiltinMSVCAnnotation(Sema &S, CallExpr *TheCall) {
// We need at least one argument.
if (TheCall->getNumArgs() < 1) {
S.Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least)
- << 0 << 1 << TheCall->getNumArgs()
+ << 0 << 1 << TheCall->getNumArgs() << /*is non object*/ 0
<< TheCall->getCallee()->getSourceRange();
return true;
}
@@ -195,6 +251,29 @@ static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) {
return false;
}
+/// Check that the argument to __builtin_function_start is a function.
+static bool SemaBuiltinFunctionStart(Sema &S, CallExpr *TheCall) {
+ if (checkArgCount(S, TheCall, 1))
+ return true;
+
+ ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(0));
+ if (Arg.isInvalid())
+ return true;
+
+ TheCall->setArg(0, Arg.get());
+ const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(
+ Arg.get()->getAsBuiltinConstantDeclRef(S.getASTContext()));
+
+ if (!FD) {
+ S.Diag(TheCall->getBeginLoc(), diag::err_function_start_invalid_type)
+ << TheCall->getSourceRange();
+ return true;
+ }
+
+ return !S.checkAddressOfFunctionIsAvailable(FD, /*Complain=*/true,
+ TheCall->getBeginLoc());
+}
+
/// Check the number of arguments and set the result type to
/// the argument type.
static bool SemaBuiltinPreserveAI(Sema &S, CallExpr *TheCall) {
@@ -291,6 +370,32 @@ static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall,
if (checkArgCount(S, TheCall, 3))
return true;
+ std::pair<unsigned, const char *> Builtins[] = {
+ { Builtin::BI__builtin_add_overflow, "ckd_add" },
+ { Builtin::BI__builtin_sub_overflow, "ckd_sub" },
+ { Builtin::BI__builtin_mul_overflow, "ckd_mul" },
+ };
+
+ bool CkdOperation = llvm::any_of(Builtins, [&](const std::pair<unsigned,
+ const char *> &P) {
+ return BuiltinID == P.first && TheCall->getExprLoc().isMacroID() &&
+ Lexer::getImmediateMacroName(TheCall->getExprLoc(),
+ S.getSourceManager(), S.getLangOpts()) == P.second;
+ });
+
+ auto ValidCkdIntType = [](QualType QT) {
+ // A valid checked integer type is an integer type other than a plain char,
+ // bool, a bit-precise type, or an enumeration type.
+ if (const auto *BT = QT.getCanonicalType()->getAs<BuiltinType>())
+ return (BT->getKind() >= BuiltinType::Short &&
+ BT->getKind() <= BuiltinType::Int128) || (
+ BT->getKind() >= BuiltinType::UShort &&
+ BT->getKind() <= BuiltinType::UInt128) ||
+ BT->getKind() == BuiltinType::UChar ||
+ BT->getKind() == BuiltinType::SChar;
+ return false;
+ };
+
// First two arguments should be integers.
for (unsigned I = 0; I < 2; ++I) {
ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(I));
@@ -298,9 +403,10 @@ static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall,
TheCall->setArg(I, Arg.get());
QualType Ty = Arg.get()->getType();
- if (!Ty->isIntegerType()) {
+ bool IsValid = CkdOperation ? ValidCkdIntType(Ty) : Ty->isIntegerType();
+ if (!IsValid) {
S.Diag(Arg.get()->getBeginLoc(), diag::err_overflow_builtin_must_be_int)
- << Ty << Arg.get()->getSourceRange();
+ << CkdOperation << Ty << Arg.get()->getSourceRange();
return true;
}
}
@@ -317,25 +423,26 @@ static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall,
const auto *PtrTy = Ty->getAs<PointerType>();
if (!PtrTy ||
!PtrTy->getPointeeType()->isIntegerType() ||
+ (!ValidCkdIntType(PtrTy->getPointeeType()) && CkdOperation) ||
PtrTy->getPointeeType().isConstQualified()) {
S.Diag(Arg.get()->getBeginLoc(),
diag::err_overflow_builtin_must_be_ptr_int)
- << Ty << Arg.get()->getSourceRange();
+ << CkdOperation << Ty << Arg.get()->getSourceRange();
return true;
}
}
- // Disallow signed ExtIntType args larger than 128 bits to mul function until
- // we improve backend support.
+ // Disallow signed bit-precise integer args larger than 128 bits to mul
+ // function until we improve backend support.
if (BuiltinID == Builtin::BI__builtin_mul_overflow) {
for (unsigned I = 0; I < 3; ++I) {
const auto Arg = TheCall->getArg(I);
// Third argument will be a pointer.
auto Ty = I < 2 ? Arg->getType() : Arg->getType()->getPointeeType();
- if (Ty->isExtIntType() && Ty->isSignedIntegerType() &&
+ if (Ty->isBitIntType() && Ty->isSignedIntegerType() &&
S.getASTContext().getIntWidth(Ty) > 128)
return S.Diag(Arg->getBeginLoc(),
- diag::err_overflow_builtin_ext_int_max_size)
+ diag::err_overflow_builtin_bit_int_max_size)
<< 128;
}
}
@@ -343,6 +450,316 @@ static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall,
return false;
}
+namespace {
+struct BuiltinDumpStructGenerator {
+ Sema &S;
+ CallExpr *TheCall;
+ SourceLocation Loc = TheCall->getBeginLoc();
+ SmallVector<Expr *, 32> Actions;
+ DiagnosticErrorTrap ErrorTracker;
+ PrintingPolicy Policy;
+
+ BuiltinDumpStructGenerator(Sema &S, CallExpr *TheCall)
+ : S(S), TheCall(TheCall), ErrorTracker(S.getDiagnostics()),
+ Policy(S.Context.getPrintingPolicy()) {
+ Policy.AnonymousTagLocations = false;
+ }
+
+ Expr *makeOpaqueValueExpr(Expr *Inner) {
+ auto *OVE = new (S.Context)
+ OpaqueValueExpr(Loc, Inner->getType(), Inner->getValueKind(),
+ Inner->getObjectKind(), Inner);
+ Actions.push_back(OVE);
+ return OVE;
+ }
+
+ Expr *getStringLiteral(llvm::StringRef Str) {
+ Expr *Lit = S.Context.getPredefinedStringLiteralFromCache(Str);
+ // Wrap the literal in parentheses to attach a source location.
+ return new (S.Context) ParenExpr(Loc, Loc, Lit);
+ }
+
+ bool callPrintFunction(llvm::StringRef Format,
+ llvm::ArrayRef<Expr *> Exprs = {}) {
+ SmallVector<Expr *, 8> Args;
+ assert(TheCall->getNumArgs() >= 2);
+ Args.reserve((TheCall->getNumArgs() - 2) + /*Format*/ 1 + Exprs.size());
+ Args.assign(TheCall->arg_begin() + 2, TheCall->arg_end());
+ Args.push_back(getStringLiteral(Format));
+ Args.insert(Args.end(), Exprs.begin(), Exprs.end());
+
+ // Register a note to explain why we're performing the call.
+ Sema::CodeSynthesisContext Ctx;
+ Ctx.Kind = Sema::CodeSynthesisContext::BuildingBuiltinDumpStructCall;
+ Ctx.PointOfInstantiation = Loc;
+ Ctx.CallArgs = Args.data();
+ Ctx.NumCallArgs = Args.size();
+ S.pushCodeSynthesisContext(Ctx);
+
+ ExprResult RealCall =
+ S.BuildCallExpr(/*Scope=*/nullptr, TheCall->getArg(1),
+ TheCall->getBeginLoc(), Args, TheCall->getRParenLoc());
+
+ S.popCodeSynthesisContext();
+ if (!RealCall.isInvalid())
+ Actions.push_back(RealCall.get());
+ // Bail out if we've hit any errors, even if we managed to build the
+ // call. We don't want to produce more than one error.
+ return RealCall.isInvalid() || ErrorTracker.hasErrorOccurred();
+ }
+
+ Expr *getIndentString(unsigned Depth) {
+ if (!Depth)
+ return nullptr;
+
+ llvm::SmallString<32> Indent;
+ Indent.resize(Depth * Policy.Indentation, ' ');
+ return getStringLiteral(Indent);
+ }
+
+ Expr *getTypeString(QualType T) {
+ return getStringLiteral(T.getAsString(Policy));
+ }
+
+ bool appendFormatSpecifier(QualType T, llvm::SmallVectorImpl<char> &Str) {
+ llvm::raw_svector_ostream OS(Str);
+
+ // Format 'bool', 'char', 'signed char', 'unsigned char' as numbers, rather
+ // than trying to print a single character.
+ if (auto *BT = T->getAs<BuiltinType>()) {
+ switch (BT->getKind()) {
+ case BuiltinType::Bool:
+ OS << "%d";
+ return true;
+ case BuiltinType::Char_U:
+ case BuiltinType::UChar:
+ OS << "%hhu";
+ return true;
+ case BuiltinType::Char_S:
+ case BuiltinType::SChar:
+ OS << "%hhd";
+ return true;
+ default:
+ break;
+ }
+ }
+
+ analyze_printf::PrintfSpecifier Specifier;
+ if (Specifier.fixType(T, S.getLangOpts(), S.Context, /*IsObjCLiteral=*/false)) {
+ // We were able to guess how to format this.
+ if (Specifier.getConversionSpecifier().getKind() ==
+ analyze_printf::PrintfConversionSpecifier::sArg) {
+ // Wrap double-quotes around a '%s' specifier and limit its maximum
+ // length. Ideally we'd also somehow escape special characters in the
+ // contents but printf doesn't support that.
+ // FIXME: '%s' formatting is not safe in general.
+ OS << '"';
+ Specifier.setPrecision(analyze_printf::OptionalAmount(32u));
+ Specifier.toString(OS);
+ OS << '"';
+ // FIXME: It would be nice to include a '...' if the string doesn't fit
+ // in the length limit.
+ } else {
+ Specifier.toString(OS);
+ }
+ return true;
+ }
+
+ if (T->isPointerType()) {
+ // Format all pointers with '%p'.
+ OS << "%p";
+ return true;
+ }
+
+ return false;
+ }
+
+ bool dumpUnnamedRecord(const RecordDecl *RD, Expr *E, unsigned Depth) {
+ Expr *IndentLit = getIndentString(Depth);
+ Expr *TypeLit = getTypeString(S.Context.getRecordType(RD));
+ if (IndentLit ? callPrintFunction("%s%s", {IndentLit, TypeLit})
+ : callPrintFunction("%s", {TypeLit}))
+ return true;
+
+ return dumpRecordValue(RD, E, IndentLit, Depth);
+ }
+
+ // Dump a record value. E should be a pointer or lvalue referring to an RD.
+ bool dumpRecordValue(const RecordDecl *RD, Expr *E, Expr *RecordIndent,
+ unsigned Depth) {
+ // FIXME: Decide what to do if RD is a union. At least we should probably
+ // turn off printing `const char*` members with `%s`, because that is very
+ // likely to crash if that's not the active member. Whatever we decide, we
+ // should document it.
+
+ // Build an OpaqueValueExpr so we can refer to E more than once without
+ // triggering re-evaluation.
+ Expr *RecordArg = makeOpaqueValueExpr(E);
+ bool RecordArgIsPtr = RecordArg->getType()->isPointerType();
+
+ if (callPrintFunction(" {\n"))
+ return true;
+
+ // Dump each base class, regardless of whether they're aggregates.
+ if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ for (const auto &Base : CXXRD->bases()) {
+ QualType BaseType =
+ RecordArgIsPtr ? S.Context.getPointerType(Base.getType())
+ : S.Context.getLValueReferenceType(Base.getType());
+ ExprResult BasePtr = S.BuildCStyleCastExpr(
+ Loc, S.Context.getTrivialTypeSourceInfo(BaseType, Loc), Loc,
+ RecordArg);
+ if (BasePtr.isInvalid() ||
+ dumpUnnamedRecord(Base.getType()->getAsRecordDecl(), BasePtr.get(),
+ Depth + 1))
+ return true;
+ }
+ }
+
+ Expr *FieldIndentArg = getIndentString(Depth + 1);
+
+ // Dump each field.
+ for (auto *D : RD->decls()) {
+ auto *IFD = dyn_cast<IndirectFieldDecl>(D);
+ auto *FD = IFD ? IFD->getAnonField() : dyn_cast<FieldDecl>(D);
+ if (!FD || FD->isUnnamedBitfield() || FD->isAnonymousStructOrUnion())
+ continue;
+
+ llvm::SmallString<20> Format = llvm::StringRef("%s%s %s ");
+ llvm::SmallVector<Expr *, 5> Args = {FieldIndentArg,
+ getTypeString(FD->getType()),
+ getStringLiteral(FD->getName())};
+
+ if (FD->isBitField()) {
+ Format += ": %zu ";
+ QualType SizeT = S.Context.getSizeType();
+ llvm::APInt BitWidth(S.Context.getIntWidth(SizeT),
+ FD->getBitWidthValue(S.Context));
+ Args.push_back(IntegerLiteral::Create(S.Context, BitWidth, SizeT, Loc));
+ }
+
+ Format += "=";
+
+ ExprResult Field =
+ IFD ? S.BuildAnonymousStructUnionMemberReference(
+ CXXScopeSpec(), Loc, IFD,
+ DeclAccessPair::make(IFD, AS_public), RecordArg, Loc)
+ : S.BuildFieldReferenceExpr(
+ RecordArg, RecordArgIsPtr, Loc, CXXScopeSpec(), FD,
+ DeclAccessPair::make(FD, AS_public),
+ DeclarationNameInfo(FD->getDeclName(), Loc));
+ if (Field.isInvalid())
+ return true;
+
+ auto *InnerRD = FD->getType()->getAsRecordDecl();
+ auto *InnerCXXRD = dyn_cast_or_null<CXXRecordDecl>(InnerRD);
+ if (InnerRD && (!InnerCXXRD || InnerCXXRD->isAggregate())) {
+ // Recursively print the values of members of aggregate record type.
+ if (callPrintFunction(Format, Args) ||
+ dumpRecordValue(InnerRD, Field.get(), FieldIndentArg, Depth + 1))
+ return true;
+ } else {
+ Format += " ";
+ if (appendFormatSpecifier(FD->getType(), Format)) {
+ // We know how to print this field.
+ Args.push_back(Field.get());
+ } else {
+ // We don't know how to print this field. Print out its address
+ // with a format specifier that a smart tool will be able to
+ // recognize and treat specially.
+ Format += "*%p";
+ ExprResult FieldAddr =
+ S.BuildUnaryOp(nullptr, Loc, UO_AddrOf, Field.get());
+ if (FieldAddr.isInvalid())
+ return true;
+ Args.push_back(FieldAddr.get());
+ }
+ Format += "\n";
+ if (callPrintFunction(Format, Args))
+ return true;
+ }
+ }
+
+ return RecordIndent ? callPrintFunction("%s}\n", RecordIndent)
+ : callPrintFunction("}\n");
+ }
+
+ Expr *buildWrapper() {
+ auto *Wrapper = PseudoObjectExpr::Create(S.Context, TheCall, Actions,
+ PseudoObjectExpr::NoResult);
+ TheCall->setType(Wrapper->getType());
+ TheCall->setValueKind(Wrapper->getValueKind());
+ return Wrapper;
+ }
+};
+} // namespace
+
+static ExprResult SemaBuiltinDumpStruct(Sema &S, CallExpr *TheCall) {
+ if (checkArgCountAtLeast(S, TheCall, 2))
+ return ExprError();
+
+ ExprResult PtrArgResult = S.DefaultLvalueConversion(TheCall->getArg(0));
+ if (PtrArgResult.isInvalid())
+ return ExprError();
+ TheCall->setArg(0, PtrArgResult.get());
+
+ // First argument should be a pointer to a struct.
+ QualType PtrArgType = PtrArgResult.get()->getType();
+ if (!PtrArgType->isPointerType() ||
+ !PtrArgType->getPointeeType()->isRecordType()) {
+ S.Diag(PtrArgResult.get()->getBeginLoc(),
+ diag::err_expected_struct_pointer_argument)
+ << 1 << TheCall->getDirectCallee() << PtrArgType;
+ return ExprError();
+ }
+ QualType Pointee = PtrArgType->getPointeeType();
+ const RecordDecl *RD = Pointee->getAsRecordDecl();
+ // Try to instantiate the class template as appropriate; otherwise, access to
+ // its data() may lead to a crash.
+ if (S.RequireCompleteType(PtrArgResult.get()->getBeginLoc(), Pointee,
+ diag::err_incomplete_type))
+ return ExprError();
+ // Second argument is a callable, but we can't fully validate it until we try
+ // calling it.
+ QualType FnArgType = TheCall->getArg(1)->getType();
+ if (!FnArgType->isFunctionType() && !FnArgType->isFunctionPointerType() &&
+ !FnArgType->isBlockPointerType() &&
+ !(S.getLangOpts().CPlusPlus && FnArgType->isRecordType())) {
+ auto *BT = FnArgType->getAs<BuiltinType>();
+ switch (BT ? BT->getKind() : BuiltinType::Void) {
+ case BuiltinType::Dependent:
+ case BuiltinType::Overload:
+ case BuiltinType::BoundMember:
+ case BuiltinType::PseudoObject:
+ case BuiltinType::UnknownAny:
+ case BuiltinType::BuiltinFn:
+ // This might be a callable.
+ break;
+
+ default:
+ S.Diag(TheCall->getArg(1)->getBeginLoc(),
+ diag::err_expected_callable_argument)
+ << 2 << TheCall->getDirectCallee() << FnArgType;
+ return ExprError();
+ }
+ }
+
+ BuiltinDumpStructGenerator Generator(S, TheCall);
+
+ // Wrap parentheses around the given pointer. This is not necessary for
+ // correct code generation, but it means that when we pretty-print the call
+ // arguments in our diagnostics we will produce '(&s)->n' instead of the
+ // incorrect '&s->n'.
+ Expr *PtrArg = PtrArgResult.get();
+ PtrArg = new (S.Context)
+ ParenExpr(PtrArg->getBeginLoc(),
+ S.getLocForEndOfToken(PtrArg->getEndLoc()), PtrArg);
+ if (Generator.dumpUnnamedRecord(RD, PtrArg, 0))
+ return ExprError();
+
+ return Generator.buildWrapper();
+}
+
static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) {
if (checkArgCount(S, BuiltinCall, 2))
return true;
@@ -408,9 +825,71 @@ static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) {
namespace {
+class ScanfDiagnosticFormatHandler
+ : public analyze_format_string::FormatStringHandler {
+ // Accepts the argument index (relative to the first destination index) of the
+ // argument whose size we want.
+ using ComputeSizeFunction =
+ llvm::function_ref<std::optional<llvm::APSInt>(unsigned)>;
+
+ // Accepts the argument index (relative to the first destination index), the
+ // destination size, and the source size).
+ using DiagnoseFunction =
+ llvm::function_ref<void(unsigned, unsigned, unsigned)>;
+
+ ComputeSizeFunction ComputeSizeArgument;
+ DiagnoseFunction Diagnose;
+
+public:
+ ScanfDiagnosticFormatHandler(ComputeSizeFunction ComputeSizeArgument,
+ DiagnoseFunction Diagnose)
+ : ComputeSizeArgument(ComputeSizeArgument), Diagnose(Diagnose) {}
+
+ bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS,
+ const char *StartSpecifier,
+ unsigned specifierLen) override {
+ if (!FS.consumesDataArgument())
+ return true;
+
+ unsigned NulByte = 0;
+ switch ((FS.getConversionSpecifier().getKind())) {
+ default:
+ return true;
+ case analyze_format_string::ConversionSpecifier::sArg:
+ case analyze_format_string::ConversionSpecifier::ScanListArg:
+ NulByte = 1;
+ break;
+ case analyze_format_string::ConversionSpecifier::cArg:
+ break;
+ }
+
+ analyze_format_string::OptionalAmount FW = FS.getFieldWidth();
+ if (FW.getHowSpecified() !=
+ analyze_format_string::OptionalAmount::HowSpecified::Constant)
+ return true;
+
+ unsigned SourceSize = FW.getConstantAmount() + NulByte;
+
+ std::optional<llvm::APSInt> DestSizeAPS =
+ ComputeSizeArgument(FS.getArgIndex());
+ if (!DestSizeAPS)
+ return true;
+
+ unsigned DestSize = DestSizeAPS->getZExtValue();
+
+ if (DestSize < SourceSize)
+ Diagnose(FS.getArgIndex(), DestSize, SourceSize);
+
+ return true;
+ }
+};
+
class EstimateSizeFormatHandler
: public analyze_format_string::FormatStringHandler {
size_t Size;
+ /// Whether the format string contains Linux kernel's format specifier
+ /// extension.
+ bool IsKernelCompatible = true;
public:
EstimateSizeFormatHandler(StringRef Format)
@@ -418,7 +897,8 @@ public:
1 /* null byte always written by sprintf */) {}
bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS,
- const char *, unsigned SpecifierLen) override {
+ const char *, unsigned SpecifierLen,
+ const TargetInfo &) override {
const size_t FieldWidth = computeFieldWidth(FS);
const size_t Precision = computePrecision(FS);
@@ -444,9 +924,14 @@ public:
break;
// %g style conversion switches between %f or %e style dynamically.
- // %f always takes less space, so default to it.
+ // %g removes trailing zeros, and does not print decimal point if there are
+ // no digits that follow it. Thus %g can print a single digit.
+ // FIXME: If it is alternative form:
+ // For g and G conversions, trailing zeros are not removed from the result.
case analyze_format_string::ConversionSpecifier::gArg:
case analyze_format_string::ConversionSpecifier::GArg:
+ Size += 1;
+ break;
// Floating point number in the form '[+]ddd.ddd'.
case analyze_format_string::ConversionSpecifier::fArg:
@@ -484,6 +969,10 @@ public:
// Just a pointer in the form '0xddd'.
case analyze_format_string::ConversionSpecifier::pArg:
+ // Linux kernel has its own extesion for `%p` specifier.
+ // Kernel Document:
+ // https://docs.kernel.org/core-api/printk-formats.html#pointer-types
+ IsKernelCompatible = false;
Size += std::max(FieldWidth, 2 /* leading 0x */ + Precision);
break;
@@ -500,18 +989,26 @@ public:
if (FS.hasAlternativeForm()) {
switch (FS.getConversionSpecifier().getKind()) {
- default:
- break;
- // Force a leading '0'.
+ // For o conversion, it increases the precision, if and only if necessary,
+ // to force the first digit of the result to be a zero
+ // (if the value and precision are both 0, a single 0 is printed)
case analyze_format_string::ConversionSpecifier::oArg:
- Size += 1;
- break;
- // Force a leading '0x'.
+ // For b conversion, a nonzero result has 0b prefixed to it.
+ case analyze_format_string::ConversionSpecifier::bArg:
+ // For x (or X) conversion, a nonzero result has 0x (or 0X) prefixed to
+ // it.
case analyze_format_string::ConversionSpecifier::xArg:
case analyze_format_string::ConversionSpecifier::XArg:
- Size += 2;
+ // Note: even when the prefix is added, if
+ // (prefix_width <= FieldWidth - formatted_length) holds,
+ // the prefix does not increase the format
+ // size. e.g.(("%#3x", 0xf) is "0xf")
+
+ // If the result is zero, o, b, x, X adds nothing.
break;
- // Force a period '.' before decimal, even if precision is 0.
+ // For a, A, e, E, f, F, g, and G conversions,
+ // the result of converting a floating-point number always contains a
+ // decimal-point
case analyze_format_string::ConversionSpecifier::aArg:
case analyze_format_string::ConversionSpecifier::AArg:
case analyze_format_string::ConversionSpecifier::eArg:
@@ -522,6 +1019,9 @@ public:
case analyze_format_string::ConversionSpecifier::GArg:
Size += (Precision ? 0 : 1);
break;
+ // For other conversions, the behavior is undefined.
+ default:
+ break;
}
}
assert(SpecifierLen <= Size && "no underflow");
@@ -530,6 +1030,7 @@ public:
}
size_t getSizeLowerBound() const { return Size; }
+ bool isKernelCompatible() const { return IsKernelCompatible; }
private:
static size_t computeFieldWidth(const analyze_printf::PrintfSpecifier &FS) {
@@ -588,65 +1089,227 @@ private:
} // namespace
-/// Check a call to BuiltinID for buffer overflows. If BuiltinID is a
-/// __builtin_*_chk function, then use the object size argument specified in the
-/// source. Otherwise, infer the object size using __builtin_object_size.
+static bool ProcessFormatStringLiteral(const Expr *FormatExpr,
+ StringRef &FormatStrRef, size_t &StrLen,
+ ASTContext &Context) {
+ if (const auto *Format = dyn_cast<StringLiteral>(FormatExpr);
+ Format && (Format->isOrdinary() || Format->isUTF8())) {
+ FormatStrRef = Format->getString();
+ const ConstantArrayType *T =
+ Context.getAsConstantArrayType(Format->getType());
+ assert(T && "String literal not of constant array type!");
+ size_t TypeSize = T->getSize().getZExtValue();
+ // In case there's a null byte somewhere.
+ StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0));
+ return true;
+ }
+ return false;
+}
+
void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD,
CallExpr *TheCall) {
- // FIXME: There are some more useful checks we could be doing here:
- // - Evaluate strlen of strcpy arguments, use as object size.
-
if (TheCall->isValueDependent() || TheCall->isTypeDependent() ||
- isConstantEvaluated())
+ isConstantEvaluatedContext())
return;
- unsigned BuiltinID = FD->getBuiltinID(/*ConsiderWrappers=*/true);
+ bool UseDABAttr = false;
+ const FunctionDecl *UseDecl = FD;
+
+ const auto *DABAttr = FD->getAttr<DiagnoseAsBuiltinAttr>();
+ if (DABAttr) {
+ UseDecl = DABAttr->getFunction();
+ assert(UseDecl && "Missing FunctionDecl in DiagnoseAsBuiltin attribute!");
+ UseDABAttr = true;
+ }
+
+ unsigned BuiltinID = UseDecl->getBuiltinID(/*ConsiderWrappers=*/true);
+
if (!BuiltinID)
return;
const TargetInfo &TI = getASTContext().getTargetInfo();
unsigned SizeTypeWidth = TI.getTypeWidth(TI.getSizeType());
+ auto TranslateIndex = [&](unsigned Index) -> std::optional<unsigned> {
+ // If we refer to a diagnose_as_builtin attribute, we need to change the
+ // argument index to refer to the arguments of the called function. Unless
+ // the index is out of bounds, which presumably means it's a variadic
+ // function.
+ if (!UseDABAttr)
+ return Index;
+ unsigned DABIndices = DABAttr->argIndices_size();
+ unsigned NewIndex = Index < DABIndices
+ ? DABAttr->argIndices_begin()[Index]
+ : Index - DABIndices + FD->getNumParams();
+ if (NewIndex >= TheCall->getNumArgs())
+ return std::nullopt;
+ return NewIndex;
+ };
+
+ auto ComputeExplicitObjectSizeArgument =
+ [&](unsigned Index) -> std::optional<llvm::APSInt> {
+ std::optional<unsigned> IndexOptional = TranslateIndex(Index);
+ if (!IndexOptional)
+ return std::nullopt;
+ unsigned NewIndex = *IndexOptional;
+ Expr::EvalResult Result;
+ Expr *SizeArg = TheCall->getArg(NewIndex);
+ if (!SizeArg->EvaluateAsInt(Result, getASTContext()))
+ return std::nullopt;
+ llvm::APSInt Integer = Result.Val.getInt();
+ Integer.setIsUnsigned(true);
+ return Integer;
+ };
+
+ auto ComputeSizeArgument =
+ [&](unsigned Index) -> std::optional<llvm::APSInt> {
+ // If the parameter has a pass_object_size attribute, then we should use its
+ // (potentially) more strict checking mode. Otherwise, conservatively assume
+ // type 0.
+ int BOSType = 0;
+ // This check can fail for variadic functions.
+ if (Index < FD->getNumParams()) {
+ if (const auto *POS =
+ FD->getParamDecl(Index)->getAttr<PassObjectSizeAttr>())
+ BOSType = POS->getType();
+ }
+
+ std::optional<unsigned> IndexOptional = TranslateIndex(Index);
+ if (!IndexOptional)
+ return std::nullopt;
+ unsigned NewIndex = *IndexOptional;
+
+ if (NewIndex >= TheCall->getNumArgs())
+ return std::nullopt;
+
+ const Expr *ObjArg = TheCall->getArg(NewIndex);
+ uint64_t Result;
+ if (!ObjArg->tryEvaluateObjectSize(Result, getASTContext(), BOSType))
+ return std::nullopt;
+
+ // Get the object size in the target's size_t width.
+ return llvm::APSInt::getUnsigned(Result).extOrTrunc(SizeTypeWidth);
+ };
+
+ auto ComputeStrLenArgument =
+ [&](unsigned Index) -> std::optional<llvm::APSInt> {
+ std::optional<unsigned> IndexOptional = TranslateIndex(Index);
+ if (!IndexOptional)
+ return std::nullopt;
+ unsigned NewIndex = *IndexOptional;
+
+ const Expr *ObjArg = TheCall->getArg(NewIndex);
+ uint64_t Result;
+ if (!ObjArg->tryEvaluateStrLen(Result, getASTContext()))
+ return std::nullopt;
+ // Add 1 for null byte.
+ return llvm::APSInt::getUnsigned(Result + 1).extOrTrunc(SizeTypeWidth);
+ };
+
+ std::optional<llvm::APSInt> SourceSize;
+ std::optional<llvm::APSInt> DestinationSize;
unsigned DiagID = 0;
bool IsChkVariant = false;
- Optional<llvm::APSInt> UsedSize;
- unsigned SizeIndex, ObjectIndex;
+
+ auto GetFunctionName = [&]() {
+ StringRef FunctionName = getASTContext().BuiltinInfo.getName(BuiltinID);
+ // Skim off the details of whichever builtin was called to produce a better
+ // diagnostic, as it's unlikely that the user wrote the __builtin
+ // explicitly.
+ if (IsChkVariant) {
+ FunctionName = FunctionName.drop_front(std::strlen("__builtin___"));
+ FunctionName = FunctionName.drop_back(std::strlen("_chk"));
+ } else {
+ FunctionName.consume_front("__builtin_");
+ }
+ return FunctionName;
+ };
+
switch (BuiltinID) {
default:
return;
+ case Builtin::BI__builtin_strcpy:
+ case Builtin::BIstrcpy: {
+ DiagID = diag::warn_fortify_strlen_overflow;
+ SourceSize = ComputeStrLenArgument(1);
+ DestinationSize = ComputeSizeArgument(0);
+ break;
+ }
+
+ case Builtin::BI__builtin___strcpy_chk: {
+ DiagID = diag::warn_fortify_strlen_overflow;
+ SourceSize = ComputeStrLenArgument(1);
+ DestinationSize = ComputeExplicitObjectSizeArgument(2);
+ IsChkVariant = true;
+ break;
+ }
+
+ case Builtin::BIscanf:
+ case Builtin::BIfscanf:
+ case Builtin::BIsscanf: {
+ unsigned FormatIndex = 1;
+ unsigned DataIndex = 2;
+ if (BuiltinID == Builtin::BIscanf) {
+ FormatIndex = 0;
+ DataIndex = 1;
+ }
+
+ const auto *FormatExpr =
+ TheCall->getArg(FormatIndex)->IgnoreParenImpCasts();
+
+ StringRef FormatStrRef;
+ size_t StrLen;
+ if (!ProcessFormatStringLiteral(FormatExpr, FormatStrRef, StrLen, Context))
+ return;
+
+ auto Diagnose = [&](unsigned ArgIndex, unsigned DestSize,
+ unsigned SourceSize) {
+ DiagID = diag::warn_fortify_scanf_overflow;
+ unsigned Index = ArgIndex + DataIndex;
+ StringRef FunctionName = GetFunctionName();
+ DiagRuntimeBehavior(TheCall->getArg(Index)->getBeginLoc(), TheCall,
+ PDiag(DiagID) << FunctionName << (Index + 1)
+ << DestSize << SourceSize);
+ };
+
+ auto ShiftedComputeSizeArgument = [&](unsigned Index) {
+ return ComputeSizeArgument(Index + DataIndex);
+ };
+ ScanfDiagnosticFormatHandler H(ShiftedComputeSizeArgument, Diagnose);
+ const char *FormatBytes = FormatStrRef.data();
+ analyze_format_string::ParseScanfString(H, FormatBytes,
+ FormatBytes + StrLen, getLangOpts(),
+ Context.getTargetInfo());
+
+ // Unlike the other cases, in this one we have already issued the diagnostic
+ // here, so no need to continue (because unlike the other cases, here the
+ // diagnostic refers to the argument number).
+ return;
+ }
+
case Builtin::BIsprintf:
case Builtin::BI__builtin___sprintf_chk: {
size_t FormatIndex = BuiltinID == Builtin::BIsprintf ? 1 : 3;
auto *FormatExpr = TheCall->getArg(FormatIndex)->IgnoreParenImpCasts();
- if (auto *Format = dyn_cast<StringLiteral>(FormatExpr)) {
-
- if (!Format->isAscii() && !Format->isUTF8())
- return;
-
- StringRef FormatStrRef = Format->getString();
+ StringRef FormatStrRef;
+ size_t StrLen;
+ if (ProcessFormatStringLiteral(FormatExpr, FormatStrRef, StrLen, Context)) {
EstimateSizeFormatHandler H(FormatStrRef);
const char *FormatBytes = FormatStrRef.data();
- const ConstantArrayType *T =
- Context.getAsConstantArrayType(Format->getType());
- assert(T && "String literal not of constant array type!");
- size_t TypeSize = T->getSize().getZExtValue();
-
- // In case there's a null byte somewhere.
- size_t StrLen =
- std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0));
if (!analyze_format_string::ParsePrintfString(
H, FormatBytes, FormatBytes + StrLen, getLangOpts(),
Context.getTargetInfo(), false)) {
- DiagID = diag::warn_fortify_source_format_overflow;
- UsedSize = llvm::APSInt::getUnsigned(H.getSizeLowerBound())
- .extOrTrunc(SizeTypeWidth);
+ DiagID = H.isKernelCompatible()
+ ? diag::warn_format_overflow
+ : diag::warn_format_overflow_non_kprintf;
+ SourceSize = llvm::APSInt::getUnsigned(H.getSizeLowerBound())
+ .extOrTrunc(SizeTypeWidth);
if (BuiltinID == Builtin::BI__builtin___sprintf_chk) {
+ DestinationSize = ComputeExplicitObjectSizeArgument(2);
IsChkVariant = true;
- ObjectIndex = 2;
} else {
- IsChkVariant = false;
- ObjectIndex = 0;
+ DestinationSize = ComputeSizeArgument(0);
}
break;
}
@@ -664,18 +1327,19 @@ void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD,
case Builtin::BI__builtin___memccpy_chk:
case Builtin::BI__builtin___mempcpy_chk: {
DiagID = diag::warn_builtin_chk_overflow;
+ SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 2);
+ DestinationSize =
+ ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1);
IsChkVariant = true;
- SizeIndex = TheCall->getNumArgs() - 2;
- ObjectIndex = TheCall->getNumArgs() - 1;
break;
}
case Builtin::BI__builtin___snprintf_chk:
case Builtin::BI__builtin___vsnprintf_chk: {
DiagID = diag::warn_builtin_chk_overflow;
+ SourceSize = ComputeExplicitObjectSizeArgument(1);
+ DestinationSize = ComputeExplicitObjectSizeArgument(3);
IsChkVariant = true;
- SizeIndex = 1;
- ObjectIndex = 3;
break;
}
@@ -691,8 +1355,8 @@ void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD,
// size larger than the destination buffer though; this is a runtime abort
// in _FORTIFY_SOURCE mode, and is quite suspicious otherwise.
DiagID = diag::warn_fortify_source_size_mismatch;
- SizeIndex = TheCall->getNumArgs() - 1;
- ObjectIndex = 0;
+ SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1);
+ DestinationSize = ComputeSizeArgument(0);
break;
}
@@ -705,8 +1369,8 @@ void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD,
case Builtin::BImempcpy:
case Builtin::BI__builtin_mempcpy: {
DiagID = diag::warn_fortify_source_overflow;
- SizeIndex = TheCall->getNumArgs() - 1;
- ObjectIndex = 0;
+ SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1);
+ DestinationSize = ComputeSizeArgument(0);
break;
}
case Builtin::BIsnprintf:
@@ -714,66 +1378,52 @@ void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD,
case Builtin::BIvsnprintf:
case Builtin::BI__builtin_vsnprintf: {
DiagID = diag::warn_fortify_source_size_mismatch;
- SizeIndex = 1;
- ObjectIndex = 0;
- break;
- }
- }
-
- llvm::APSInt ObjectSize;
- // For __builtin___*_chk, the object size is explicitly provided by the caller
- // (usually using __builtin_object_size). Use that value to check this call.
- if (IsChkVariant) {
- Expr::EvalResult Result;
- Expr *SizeArg = TheCall->getArg(ObjectIndex);
- if (!SizeArg->EvaluateAsInt(Result, getASTContext()))
- return;
- ObjectSize = Result.Val.getInt();
-
- // Otherwise, try to evaluate an imaginary call to __builtin_object_size.
- } else {
- // If the parameter has a pass_object_size attribute, then we should use its
- // (potentially) more strict checking mode. Otherwise, conservatively assume
- // type 0.
- int BOSType = 0;
- if (const auto *POS =
- FD->getParamDecl(ObjectIndex)->getAttr<PassObjectSizeAttr>())
- BOSType = POS->getType();
-
- Expr *ObjArg = TheCall->getArg(ObjectIndex);
- uint64_t Result;
- if (!ObjArg->tryEvaluateObjectSize(Result, getASTContext(), BOSType))
- return;
- // Get the object size in the target's size_t width.
- ObjectSize = llvm::APSInt::getUnsigned(Result).extOrTrunc(SizeTypeWidth);
+ SourceSize = ComputeExplicitObjectSizeArgument(1);
+ const auto *FormatExpr = TheCall->getArg(2)->IgnoreParenImpCasts();
+ StringRef FormatStrRef;
+ size_t StrLen;
+ if (SourceSize &&
+ ProcessFormatStringLiteral(FormatExpr, FormatStrRef, StrLen, Context)) {
+ EstimateSizeFormatHandler H(FormatStrRef);
+ const char *FormatBytes = FormatStrRef.data();
+ if (!analyze_format_string::ParsePrintfString(
+ H, FormatBytes, FormatBytes + StrLen, getLangOpts(),
+ Context.getTargetInfo(), /*isFreeBSDKPrintf=*/false)) {
+ llvm::APSInt FormatSize =
+ llvm::APSInt::getUnsigned(H.getSizeLowerBound())
+ .extOrTrunc(SizeTypeWidth);
+ if (FormatSize > *SourceSize && *SourceSize != 0) {
+ unsigned TruncationDiagID =
+ H.isKernelCompatible() ? diag::warn_format_truncation
+ : diag::warn_format_truncation_non_kprintf;
+ SmallString<16> SpecifiedSizeStr;
+ SmallString<16> FormatSizeStr;
+ SourceSize->toString(SpecifiedSizeStr, /*Radix=*/10);
+ FormatSize.toString(FormatSizeStr, /*Radix=*/10);
+ DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall,
+ PDiag(TruncationDiagID)
+ << GetFunctionName() << SpecifiedSizeStr
+ << FormatSizeStr);
+ }
+ }
+ }
+ DestinationSize = ComputeSizeArgument(0);
}
-
- // Evaluate the number of bytes of the object that this call will use.
- if (!UsedSize) {
- Expr::EvalResult Result;
- Expr *UsedSizeArg = TheCall->getArg(SizeIndex);
- if (!UsedSizeArg->EvaluateAsInt(Result, getASTContext()))
- return;
- UsedSize = Result.Val.getInt().extOrTrunc(SizeTypeWidth);
}
- if (UsedSize.getValue().ule(ObjectSize))
+ if (!SourceSize || !DestinationSize ||
+ llvm::APSInt::compareValues(*SourceSize, *DestinationSize) <= 0)
return;
- StringRef FunctionName = getASTContext().BuiltinInfo.getName(BuiltinID);
- // Skim off the details of whichever builtin was called to produce a better
- // diagnostic, as it's unlikley that the user wrote the __builtin explicitly.
- if (IsChkVariant) {
- FunctionName = FunctionName.drop_front(std::strlen("__builtin___"));
- FunctionName = FunctionName.drop_back(std::strlen("_chk"));
- } else if (FunctionName.startswith("__builtin_")) {
- FunctionName = FunctionName.drop_front(std::strlen("__builtin_"));
- }
+ StringRef FunctionName = GetFunctionName();
+ SmallString<16> DestinationStr;
+ SmallString<16> SourceStr;
+ DestinationSize->toString(DestinationStr, /*Radix=*/10);
+ SourceSize->toString(SourceStr, /*Radix=*/10);
DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall,
PDiag(DiagID)
- << FunctionName << toString(ObjectSize, /*Radix=*/10)
- << toString(UsedSize.getValue(), /*Radix=*/10));
+ << FunctionName << DestinationStr << SourceStr);
}
static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall,
@@ -838,9 +1488,15 @@ static bool checkOpenCLBlockArgs(Sema &S, Expr *BlockArg) {
}
static bool checkOpenCLSubgroupExt(Sema &S, CallExpr *Call) {
- if (!S.getOpenCLOptions().isSupported("cl_khr_subgroups", S.getLangOpts())) {
+ // OpenCL device can support extension but not the feature as extension
+ // requires subgroup independent forward progress, but subgroup independent
+ // forward progress is optional in OpenCL C 3.0 __opencl_c_subgroups feature.
+ if (!S.getOpenCLOptions().isSupported("cl_khr_subgroups", S.getLangOpts()) &&
+ !S.getOpenCLOptions().isSupported("__opencl_c_subgroups",
+ S.getLangOpts())) {
S.Diag(Call->getBeginLoc(), diag::err_opencl_requires_extension)
- << 1 << Call->getDirectCallee() << "cl_khr_subgroups";
+ << 1 << Call->getDirectCallee()
+ << "cl_khr_subgroups or __opencl_c_subgroups";
return true;
}
return false;
@@ -955,7 +1611,7 @@ static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) {
if (NumArgs < 4) {
S.Diag(TheCall->getBeginLoc(),
diag::err_typecheck_call_too_few_args_at_least)
- << 0 << 4 << NumArgs;
+ << 0 << 4 << NumArgs << /*is non object*/ 0;
return true;
}
@@ -1336,18 +1992,18 @@ static ExprResult SemaBuiltinLaunder(Sema &S, CallExpr *TheCall) {
TheCall->setType(ParamTy);
- auto DiagSelect = [&]() -> llvm::Optional<unsigned> {
+ auto DiagSelect = [&]() -> std::optional<unsigned> {
if (!ParamTy->isPointerType())
return 0;
if (ParamTy->isFunctionPointerType())
return 1;
if (ParamTy->isVoidPointerType())
return 2;
- return llvm::Optional<unsigned>{};
+ return std::optional<unsigned>{};
}();
- if (DiagSelect.hasValue()) {
+ if (DiagSelect) {
S.Diag(TheCall->getBeginLoc(), diag::err_builtin_launder_invalid_arg)
- << DiagSelect.getValue() << TheCall->getSourceRange();
+ << *DiagSelect << TheCall->getSourceRange();
return ExprError();
}
@@ -1375,11 +2031,26 @@ static ExprResult SemaBuiltinLaunder(Sema &S, CallExpr *TheCall) {
return TheCall;
}
+// Emit an error and return true if the current object format type is in the
+// list of unsupported types.
+static bool CheckBuiltinTargetNotInUnsupported(
+ Sema &S, unsigned BuiltinID, CallExpr *TheCall,
+ ArrayRef<llvm::Triple::ObjectFormatType> UnsupportedObjectFormatTypes) {
+ llvm::Triple::ObjectFormatType CurObjFormat =
+ S.getASTContext().getTargetInfo().getTriple().getObjectFormat();
+ if (llvm::is_contained(UnsupportedObjectFormatTypes, CurObjFormat)) {
+ S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported)
+ << TheCall->getSourceRange();
+ return true;
+ }
+ return false;
+}
+
// Emit an error and return true if the current architecture is not in the list
// of supported architectures.
static bool
-CheckBuiltinTargetSupport(Sema &S, unsigned BuiltinID, CallExpr *TheCall,
- ArrayRef<llvm::Triple::ArchType> SupportedArchs) {
+CheckBuiltinTargetInSupported(Sema &S, unsigned BuiltinID, CallExpr *TheCall,
+ ArrayRef<llvm::Triple::ArchType> SupportedArchs) {
llvm::Triple::ArchType CurArch =
S.getASTContext().getTargetInfo().getTriple().getArch();
if (llvm::is_contained(SupportedArchs, CurArch))
@@ -1433,7 +2104,43 @@ bool Sema::CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
case llvm::Triple::riscv32:
case llvm::Triple::riscv64:
return CheckRISCVBuiltinFunctionCall(TI, BuiltinID, TheCall);
+ case llvm::Triple::loongarch32:
+ case llvm::Triple::loongarch64:
+ return CheckLoongArchBuiltinFunctionCall(TI, BuiltinID, TheCall);
+ case llvm::Triple::wasm32:
+ case llvm::Triple::wasm64:
+ return CheckWebAssemblyBuiltinFunctionCall(TI, BuiltinID, TheCall);
+ case llvm::Triple::nvptx:
+ case llvm::Triple::nvptx64:
+ return CheckNVPTXBuiltinFunctionCall(TI, BuiltinID, TheCall);
+ }
+}
+
+// Check if \p Ty is a valid type for the elementwise math builtins. If it is
+// not a valid type, emit an error message and return true. Otherwise return
+// false.
+static bool checkMathBuiltinElementType(Sema &S, SourceLocation Loc,
+ QualType Ty) {
+ if (!Ty->getAs<VectorType>() && !ConstantMatrixType::isValidElementType(Ty)) {
+ return S.Diag(Loc, diag::err_builtin_invalid_arg_type)
+ << 1 << /* vector, integer or float ty*/ 0 << Ty;
+ }
+
+ return false;
+}
+
+static bool checkFPMathBuiltinElementType(Sema &S, SourceLocation Loc,
+ QualType ArgTy, int ArgIndex) {
+ QualType EltTy = ArgTy;
+ if (auto *VecTy = EltTy->getAs<VectorType>())
+ EltTy = VecTy->getElementType();
+
+ if (!EltTy->isRealFloatingType()) {
+ return S.Diag(Loc, diag::err_builtin_invalid_arg_type)
+ << ArgIndex << /* vector or float ty*/ 5 << ArgTy;
}
+
+ return false;
}
ExprResult
@@ -1454,13 +2161,23 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
if ((ICEArguments & (1 << ArgNo)) == 0) continue;
llvm::APSInt Result;
- if (SemaBuiltinConstantArg(TheCall, ArgNo, Result))
+ // If we don't have enough arguments, continue so we can issue better
+ // diagnostic in checkArgCount(...)
+ if (ArgNo < TheCall->getNumArgs() &&
+ SemaBuiltinConstantArg(TheCall, ArgNo, Result))
return true;
ICEArguments &= ~(1 << ArgNo);
}
+ FPOptions FPO;
switch (BuiltinID) {
case Builtin::BI__builtin___CFStringMakeConstantString:
+ // CFStringMakeConstantString is currently not implemented for GOFF (i.e.,
+ // on z/OS) and for XCOFF (i.e., on AIX). Emit unsupported
+ if (CheckBuiltinTargetNotInUnsupported(
+ *this, BuiltinID, TheCall,
+ {llvm::Triple::GOFF, llvm::Triple::XCOFF}))
+ return ExprError();
assert(TheCall->getNumArgs() == 1 &&
"Wrong # arguments to builtin CFStringMakeConstantString");
if (CheckObjCString(TheCall->getArg(0)))
@@ -1495,7 +2212,7 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
case Builtin::BI_interlockedbittestandreset_acq:
case Builtin::BI_interlockedbittestandreset_rel:
case Builtin::BI_interlockedbittestandreset_nf:
- if (CheckBuiltinTargetSupport(
+ if (CheckBuiltinTargetInSupported(
*this, BuiltinID, TheCall,
{llvm::Triple::arm, llvm::Triple::thumb, llvm::Triple::aarch64}))
return ExprError();
@@ -1508,9 +2225,18 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
case Builtin::BI_bittestandset64:
case Builtin::BI_interlockedbittestandreset64:
case Builtin::BI_interlockedbittestandset64:
- if (CheckBuiltinTargetSupport(*this, BuiltinID, TheCall,
- {llvm::Triple::x86_64, llvm::Triple::arm,
- llvm::Triple::thumb, llvm::Triple::aarch64}))
+ if (CheckBuiltinTargetInSupported(*this, BuiltinID, TheCall,
+ {llvm::Triple::x86_64, llvm::Triple::arm,
+ llvm::Triple::thumb,
+ llvm::Triple::aarch64}))
+ return ExprError();
+ break;
+
+ case Builtin::BI__builtin_set_flt_rounds:
+ if (CheckBuiltinTargetInSupported(*this, BuiltinID, TheCall,
+ {llvm::Triple::x86, llvm::Triple::x86_64,
+ llvm::Triple::arm, llvm::Triple::thumb,
+ llvm::Triple::aarch64}))
return ExprError();
break;
@@ -1520,22 +2246,29 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
case Builtin::BI__builtin_islessequal:
case Builtin::BI__builtin_islessgreater:
case Builtin::BI__builtin_isunordered:
- if (SemaBuiltinUnorderedCompare(TheCall))
+ if (SemaBuiltinUnorderedCompare(TheCall, BuiltinID))
return ExprError();
break;
case Builtin::BI__builtin_fpclassify:
- if (SemaBuiltinFPClassification(TheCall, 6))
+ if (SemaBuiltinFPClassification(TheCall, 6, BuiltinID))
+ return ExprError();
+ break;
+ case Builtin::BI__builtin_isfpclass:
+ if (SemaBuiltinFPClassification(TheCall, 2, BuiltinID))
return ExprError();
break;
case Builtin::BI__builtin_isfinite:
case Builtin::BI__builtin_isinf:
case Builtin::BI__builtin_isinf_sign:
case Builtin::BI__builtin_isnan:
+ case Builtin::BI__builtin_issignaling:
case Builtin::BI__builtin_isnormal:
+ case Builtin::BI__builtin_issubnormal:
+ case Builtin::BI__builtin_iszero:
case Builtin::BI__builtin_signbit:
case Builtin::BI__builtin_signbitf:
case Builtin::BI__builtin_signbitl:
- if (SemaBuiltinFPClassification(TheCall, 1))
+ if (SemaBuiltinFPClassification(TheCall, 1, BuiltinID))
return ExprError();
break;
case Builtin::BI__builtin_shufflevector:
@@ -1547,10 +2280,12 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
return ExprError();
break;
case Builtin::BI__builtin_alloca_with_align:
+ case Builtin::BI__builtin_alloca_with_align_uninitialized:
if (SemaBuiltinAllocaWithAlign(TheCall))
return ExprError();
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Builtin::BI__builtin_alloca:
+ case Builtin::BI__builtin_alloca_uninitialized:
Diag(TheCall->getBeginLoc(), diag::warn_alloca)
<< TheCall->getDirectCallee();
break;
@@ -1715,12 +2450,23 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
// value so we bail out.
if (SizeOp->isValueDependent())
break;
- if (!SizeOp->EvaluateKnownConstInt(Context).isNullValue()) {
+ if (!SizeOp->EvaluateKnownConstInt(Context).isZero()) {
CheckNonNullArgument(*this, TheCall->getArg(0), TheCall->getExprLoc());
CheckNonNullArgument(*this, TheCall->getArg(1), TheCall->getExprLoc());
}
break;
}
+ case Builtin::BI__builtin_memset_inline: {
+ clang::Expr *SizeOp = TheCall->getArg(2);
+ // We warn about filling to `nullptr` pointers when `size` is greater than
+ // 0. When `size` is value dependent we cannot evaluate its value so we bail
+ // out.
+ if (SizeOp->isValueDependent())
+ break;
+ if (!SizeOp->EvaluateKnownConstInt(Context).isZero())
+ CheckNonNullArgument(*this, TheCall->getArg(0), TheCall->getExprLoc());
+ break;
+ }
#define BUILTIN(ID, TYPE, ATTRS)
#define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \
case Builtin::BI##ID: \
@@ -1738,6 +2484,10 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
if (SemaBuiltinAddressof(*this, TheCall))
return ExprError();
break;
+ case Builtin::BI__builtin_function_start:
+ if (SemaBuiltinFunctionStart(*this, TheCall))
+ return ExprError();
+ break;
case Builtin::BI__builtin_is_aligned:
case Builtin::BI__builtin_align_up:
case Builtin::BI__builtin_align_down:
@@ -1759,62 +2509,8 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
CorrectDelayedTyposInExpr(TheCallResult.get());
return Res;
}
- case Builtin::BI__builtin_dump_struct: {
- // We first want to ensure we are called with 2 arguments
- if (checkArgCount(*this, TheCall, 2))
- return ExprError();
- // Ensure that the first argument is of type 'struct XX *'
- const Expr *PtrArg = TheCall->getArg(0)->IgnoreParenImpCasts();
- const QualType PtrArgType = PtrArg->getType();
- if (!PtrArgType->isPointerType() ||
- !PtrArgType->getPointeeType()->isRecordType()) {
- Diag(PtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
- << PtrArgType << "structure pointer" << 1 << 0 << 3 << 1 << PtrArgType
- << "structure pointer";
- return ExprError();
- }
-
- // Ensure that the second argument is of type 'FunctionType'
- const Expr *FnPtrArg = TheCall->getArg(1)->IgnoreImpCasts();
- const QualType FnPtrArgType = FnPtrArg->getType();
- if (!FnPtrArgType->isPointerType()) {
- Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
- << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2
- << FnPtrArgType << "'int (*)(const char *, ...)'";
- return ExprError();
- }
-
- const auto *FuncType =
- FnPtrArgType->getPointeeType()->getAs<FunctionType>();
-
- if (!FuncType) {
- Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
- << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2
- << FnPtrArgType << "'int (*)(const char *, ...)'";
- return ExprError();
- }
-
- if (const auto *FT = dyn_cast<FunctionProtoType>(FuncType)) {
- if (!FT->getNumParams()) {
- Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
- << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3
- << 2 << FnPtrArgType << "'int (*)(const char *, ...)'";
- return ExprError();
- }
- QualType PT = FT->getParamType(0);
- if (!FT->isVariadic() || FT->getReturnType() != Context.IntTy ||
- !PT->isPointerType() || !PT->getPointeeType()->isCharType() ||
- !PT->getPointeeType().isConstQualified()) {
- Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
- << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3
- << 2 << FnPtrArgType << "'int (*)(const char *, ...)'";
- return ExprError();
- }
- }
-
- TheCall->setType(Context.IntTy);
- break;
- }
+ case Builtin::BI__builtin_dump_struct:
+ return SemaBuiltinDumpStruct(*this, TheCall);
case Builtin::BI__builtin_expect_with_probability: {
// We first want to ensure we are called with 3 arguments
if (checkArgCount(*this, TheCall, 3))
@@ -1876,6 +2572,33 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
TheCall->setType(Context.VoidPtrTy);
break;
+ case Builtin::BIaddressof:
+ case Builtin::BI__addressof:
+ case Builtin::BIforward:
+ case Builtin::BIforward_like:
+ case Builtin::BImove:
+ case Builtin::BImove_if_noexcept:
+ case Builtin::BIas_const: {
+ // These are all expected to be of the form
+ // T &/&&/* f(U &/&&)
+ // where T and U only differ in qualification.
+ if (checkArgCount(*this, TheCall, 1))
+ return ExprError();
+ QualType Param = FDecl->getParamDecl(0)->getType();
+ QualType Result = FDecl->getReturnType();
+ bool ReturnsPointer = BuiltinID == Builtin::BIaddressof ||
+ BuiltinID == Builtin::BI__addressof;
+ if (!(Param->isReferenceType() &&
+ (ReturnsPointer ? Result->isAnyPointerType()
+ : Result->isReferenceType()) &&
+ Context.hasSameUnqualifiedType(Param->getPointeeType(),
+ Result->getPointeeType()))) {
+ Diag(TheCall->getBeginLoc(), diag::err_builtin_move_forward_unsupported)
+ << FDecl;
+ return ExprError();
+ }
+ break;
+ }
// OpenCL v2.0, s6.13.16 - Pipe functions
case Builtin::BIread_pipe:
case Builtin::BIwrite_pipe:
@@ -1938,7 +2661,7 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
break;
case Builtin::BI__builtin_os_log_format:
Cleanup.setExprNeedsCleanups(true);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Builtin::BI__builtin_os_log_format_buffer_size:
if (SemaBuiltinOSLogFormat(TheCall))
return ExprError();
@@ -1962,6 +2685,194 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
break;
}
+ case Builtin::BI__builtin_nondeterministic_value: {
+ if (SemaBuiltinNonDeterministicValue(TheCall))
+ return ExprError();
+ break;
+ }
+
+ // __builtin_elementwise_abs restricts the element type to signed integers or
+ // floating point types only.
+ case Builtin::BI__builtin_elementwise_abs: {
+ if (PrepareBuiltinElementwiseMathOneArgCall(TheCall))
+ return ExprError();
+
+ QualType ArgTy = TheCall->getArg(0)->getType();
+ QualType EltTy = ArgTy;
+
+ if (auto *VecTy = EltTy->getAs<VectorType>())
+ EltTy = VecTy->getElementType();
+ if (EltTy->isUnsignedIntegerType()) {
+ Diag(TheCall->getArg(0)->getBeginLoc(),
+ diag::err_builtin_invalid_arg_type)
+ << 1 << /* signed integer or float ty*/ 3 << ArgTy;
+ return ExprError();
+ }
+ break;
+ }
+
+ // These builtins restrict the element type to floating point
+ // types only.
+ case Builtin::BI__builtin_elementwise_ceil:
+ case Builtin::BI__builtin_elementwise_cos:
+ case Builtin::BI__builtin_elementwise_exp:
+ case Builtin::BI__builtin_elementwise_exp2:
+ case Builtin::BI__builtin_elementwise_floor:
+ case Builtin::BI__builtin_elementwise_log:
+ case Builtin::BI__builtin_elementwise_log2:
+ case Builtin::BI__builtin_elementwise_log10:
+ case Builtin::BI__builtin_elementwise_roundeven:
+ case Builtin::BI__builtin_elementwise_round:
+ case Builtin::BI__builtin_elementwise_rint:
+ case Builtin::BI__builtin_elementwise_nearbyint:
+ case Builtin::BI__builtin_elementwise_sin:
+ case Builtin::BI__builtin_elementwise_sqrt:
+ case Builtin::BI__builtin_elementwise_trunc:
+ case Builtin::BI__builtin_elementwise_canonicalize: {
+ if (PrepareBuiltinElementwiseMathOneArgCall(TheCall))
+ return ExprError();
+
+ QualType ArgTy = TheCall->getArg(0)->getType();
+ if (checkFPMathBuiltinElementType(*this, TheCall->getArg(0)->getBeginLoc(),
+ ArgTy, 1))
+ return ExprError();
+ break;
+ }
+ case Builtin::BI__builtin_elementwise_fma: {
+ if (SemaBuiltinElementwiseTernaryMath(TheCall))
+ return ExprError();
+ break;
+ }
+
+ // These builtins restrict the element type to floating point
+ // types only, and take in two arguments.
+ case Builtin::BI__builtin_elementwise_pow: {
+ if (SemaBuiltinElementwiseMath(TheCall))
+ return ExprError();
+
+ QualType ArgTy = TheCall->getArg(0)->getType();
+ if (checkFPMathBuiltinElementType(*this, TheCall->getArg(0)->getBeginLoc(),
+ ArgTy, 1) ||
+ checkFPMathBuiltinElementType(*this, TheCall->getArg(1)->getBeginLoc(),
+ ArgTy, 2))
+ return ExprError();
+ break;
+ }
+
+ // These builtins restrict the element type to integer
+ // types only.
+ case Builtin::BI__builtin_elementwise_add_sat:
+ case Builtin::BI__builtin_elementwise_sub_sat: {
+ if (SemaBuiltinElementwiseMath(TheCall))
+ return ExprError();
+
+ const Expr *Arg = TheCall->getArg(0);
+ QualType ArgTy = Arg->getType();
+ QualType EltTy = ArgTy;
+
+ if (auto *VecTy = EltTy->getAs<VectorType>())
+ EltTy = VecTy->getElementType();
+
+ if (!EltTy->isIntegerType()) {
+ Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type)
+ << 1 << /* integer ty */ 6 << ArgTy;
+ return ExprError();
+ }
+ break;
+ }
+
+ case Builtin::BI__builtin_elementwise_min:
+ case Builtin::BI__builtin_elementwise_max:
+ if (SemaBuiltinElementwiseMath(TheCall))
+ return ExprError();
+ break;
+
+ case Builtin::BI__builtin_elementwise_bitreverse: {
+ if (PrepareBuiltinElementwiseMathOneArgCall(TheCall))
+ return ExprError();
+
+ const Expr *Arg = TheCall->getArg(0);
+ QualType ArgTy = Arg->getType();
+ QualType EltTy = ArgTy;
+
+ if (auto *VecTy = EltTy->getAs<VectorType>())
+ EltTy = VecTy->getElementType();
+
+ if (!EltTy->isIntegerType()) {
+ Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type)
+ << 1 << /* integer ty */ 6 << ArgTy;
+ return ExprError();
+ }
+ break;
+ }
+
+ case Builtin::BI__builtin_elementwise_copysign: {
+ if (checkArgCount(*this, TheCall, 2))
+ return ExprError();
+
+ ExprResult Magnitude = UsualUnaryConversions(TheCall->getArg(0));
+ ExprResult Sign = UsualUnaryConversions(TheCall->getArg(1));
+ if (Magnitude.isInvalid() || Sign.isInvalid())
+ return ExprError();
+
+ QualType MagnitudeTy = Magnitude.get()->getType();
+ QualType SignTy = Sign.get()->getType();
+ if (checkFPMathBuiltinElementType(*this, TheCall->getArg(0)->getBeginLoc(),
+ MagnitudeTy, 1) ||
+ checkFPMathBuiltinElementType(*this, TheCall->getArg(1)->getBeginLoc(),
+ SignTy, 2)) {
+ return ExprError();
+ }
+
+ if (MagnitudeTy.getCanonicalType() != SignTy.getCanonicalType()) {
+ return Diag(Sign.get()->getBeginLoc(),
+ diag::err_typecheck_call_different_arg_types)
+ << MagnitudeTy << SignTy;
+ }
+
+ TheCall->setArg(0, Magnitude.get());
+ TheCall->setArg(1, Sign.get());
+ TheCall->setType(Magnitude.get()->getType());
+ break;
+ }
+ case Builtin::BI__builtin_reduce_max:
+ case Builtin::BI__builtin_reduce_min: {
+ if (PrepareBuiltinReduceMathOneArgCall(TheCall))
+ return ExprError();
+
+ const Expr *Arg = TheCall->getArg(0);
+ const auto *TyA = Arg->getType()->getAs<VectorType>();
+ if (!TyA) {
+ Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type)
+ << 1 << /* vector ty*/ 4 << Arg->getType();
+ return ExprError();
+ }
+
+ TheCall->setType(TyA->getElementType());
+ break;
+ }
+
+ // These builtins support vectors of integers only.
+ // TODO: ADD/MUL should support floating-point types.
+ case Builtin::BI__builtin_reduce_add:
+ case Builtin::BI__builtin_reduce_mul:
+ case Builtin::BI__builtin_reduce_xor:
+ case Builtin::BI__builtin_reduce_or:
+ case Builtin::BI__builtin_reduce_and: {
+ if (PrepareBuiltinReduceMathOneArgCall(TheCall))
+ return ExprError();
+
+ const Expr *Arg = TheCall->getArg(0);
+ const auto *TyA = Arg->getType()->getAs<VectorType>();
+ if (!TyA || !TyA->getElementType()->isIntegerType()) {
+ Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type)
+ << 1 << /* vector of integers */ 6 << Arg->getType();
+ return ExprError();
+ }
+ TheCall->setType(TyA->getElementType());
+ break;
+ }
+
case Builtin::BI__builtin_matrix_transpose:
return SemaBuiltinMatrixTranspose(TheCall, TheCallResult);
@@ -2088,25 +2999,36 @@ static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context,
llvm_unreachable("Invalid NeonTypeFlag!");
}
-bool Sema::CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
- // Range check SVE intrinsics that take immediate values.
- SmallVector<std::tuple<int,int,int>, 3> ImmChecks;
+enum ArmStreamingType {
+ ArmNonStreaming,
+ ArmStreaming,
+ ArmStreamingCompatible,
+ ArmStreamingOrSVE2p1
+};
- switch (BuiltinID) {
- default:
- return false;
-#define GET_SVE_IMMEDIATE_CHECK
-#include "clang/Basic/arm_sve_sema_rangechecks.inc"
-#undef GET_SVE_IMMEDIATE_CHECK
- }
+enum ArmSMEState : unsigned {
+ ArmNoState = 0,
+
+ ArmInZA = 0b01,
+ ArmOutZA = 0b10,
+ ArmInOutZA = 0b11,
+ ArmZAMask = 0b11,
+
+ ArmInZT0 = 0b01 << 2,
+ ArmOutZT0 = 0b10 << 2,
+ ArmInOutZT0 = 0b11 << 2,
+ ArmZT0Mask = 0b11 << 2
+};
+bool Sema::ParseSVEImmChecks(
+ CallExpr *TheCall, SmallVector<std::tuple<int, int, int>, 3> &ImmChecks) {
// Perform all the immediate checks for this builtin call.
bool HasError = false;
for (auto &I : ImmChecks) {
int ArgNum, CheckTy, ElementSizeInBits;
std::tie(ArgNum, CheckTy, ElementSizeInBits) = I;
- typedef bool(*OptionSetCheckFnTy)(int64_t Value);
+ typedef bool (*OptionSetCheckFnTy)(int64_t Value);
// Function that checks whether the operand (ArgNum) is an immediate
// that is one of the predefined values.
@@ -2144,6 +3066,18 @@ bool Sema::CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 7))
HasError = true;
break;
+ case SVETypeFlags::ImmCheck1_1:
+ if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 1))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheck1_3:
+ if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 3))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheck1_7:
+ if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 7))
+ HasError = true;
+ break;
case SVETypeFlags::ImmCheckExtract:
if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0,
(2048 / ElementSizeInBits) - 1))
@@ -2203,14 +3137,180 @@ bool Sema::CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 3))
HasError = true;
break;
+ case SVETypeFlags::ImmCheck0_0:
+ if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 0))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheck0_15:
+ if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 15))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheck0_255:
+ if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 255))
+ HasError = true;
+ break;
+ case SVETypeFlags::ImmCheck2_4_Mul2:
+ if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 2, 4) ||
+ SemaBuiltinConstantArgMultiple(TheCall, ArgNum, 2))
+ HasError = true;
+ break;
}
}
return HasError;
}
+static ArmStreamingType getArmStreamingFnType(const FunctionDecl *FD) {
+ if (FD->hasAttr<ArmLocallyStreamingAttr>())
+ return ArmStreaming;
+ if (const auto *T = FD->getType()->getAs<FunctionProtoType>()) {
+ if (T->getAArch64SMEAttributes() & FunctionType::SME_PStateSMEnabledMask)
+ return ArmStreaming;
+ if (T->getAArch64SMEAttributes() & FunctionType::SME_PStateSMCompatibleMask)
+ return ArmStreamingCompatible;
+ }
+ return ArmNonStreaming;
+}
+
+static void checkArmStreamingBuiltin(Sema &S, CallExpr *TheCall,
+ const FunctionDecl *FD,
+ ArmStreamingType BuiltinType) {
+ ArmStreamingType FnType = getArmStreamingFnType(FD);
+ if (BuiltinType == ArmStreamingOrSVE2p1) {
+ // Check intrinsics that are available in [sve2p1 or sme/sme2].
+ llvm::StringMap<bool> CallerFeatureMap;
+ S.Context.getFunctionFeatureMap(CallerFeatureMap, FD);
+ if (Builtin::evaluateRequiredTargetFeatures("sve2p1", CallerFeatureMap))
+ BuiltinType = ArmStreamingCompatible;
+ else
+ BuiltinType = ArmStreaming;
+ }
+
+ if (FnType == ArmStreaming && BuiltinType == ArmNonStreaming) {
+ S.Diag(TheCall->getBeginLoc(), diag::warn_attribute_arm_sm_incompat_builtin)
+ << TheCall->getSourceRange() << "streaming";
+ }
+
+ if (FnType == ArmStreamingCompatible &&
+ BuiltinType != ArmStreamingCompatible) {
+ S.Diag(TheCall->getBeginLoc(), diag::warn_attribute_arm_sm_incompat_builtin)
+ << TheCall->getSourceRange() << "streaming compatible";
+ return;
+ }
+
+ if (FnType == ArmNonStreaming && BuiltinType == ArmStreaming) {
+ S.Diag(TheCall->getBeginLoc(), diag::warn_attribute_arm_sm_incompat_builtin)
+ << TheCall->getSourceRange() << "non-streaming";
+ }
+}
+
+static bool hasArmZAState(const FunctionDecl *FD) {
+ const auto *T = FD->getType()->getAs<FunctionProtoType>();
+ return (T && FunctionType::getArmZAState(T->getAArch64SMEAttributes()) !=
+ FunctionType::ARM_None) ||
+ (FD->hasAttr<ArmNewAttr>() && FD->getAttr<ArmNewAttr>()->isNewZA());
+}
+
+static bool hasArmZT0State(const FunctionDecl *FD) {
+ const auto *T = FD->getType()->getAs<FunctionProtoType>();
+ return (T && FunctionType::getArmZT0State(T->getAArch64SMEAttributes()) !=
+ FunctionType::ARM_None) ||
+ (FD->hasAttr<ArmNewAttr>() && FD->getAttr<ArmNewAttr>()->isNewZT0());
+}
+
+static ArmSMEState getSMEState(unsigned BuiltinID) {
+ switch (BuiltinID) {
+ default:
+ return ArmNoState;
+#define GET_SME_BUILTIN_GET_STATE
+#include "clang/Basic/arm_sme_builtins_za_state.inc"
+#undef GET_SME_BUILTIN_GET_STATE
+ }
+}
+
+bool Sema::CheckSMEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
+ if (const FunctionDecl *FD = getCurFunctionDecl()) {
+ std::optional<ArmStreamingType> BuiltinType;
+
+ switch (BuiltinID) {
+#define GET_SME_STREAMING_ATTRS
+#include "clang/Basic/arm_sme_streaming_attrs.inc"
+#undef GET_SME_STREAMING_ATTRS
+ }
+
+ if (BuiltinType)
+ checkArmStreamingBuiltin(*this, TheCall, FD, *BuiltinType);
+
+ if ((getSMEState(BuiltinID) & ArmZAMask) && !hasArmZAState(FD))
+ Diag(TheCall->getBeginLoc(),
+ diag::warn_attribute_arm_za_builtin_no_za_state)
+ << TheCall->getSourceRange();
+
+ if ((getSMEState(BuiltinID) & ArmZT0Mask) && !hasArmZT0State(FD))
+ Diag(TheCall->getBeginLoc(),
+ diag::warn_attribute_arm_zt0_builtin_no_zt0_state)
+ << TheCall->getSourceRange();
+ }
+
+ // Range check SME intrinsics that take immediate values.
+ SmallVector<std::tuple<int, int, int>, 3> ImmChecks;
+
+ switch (BuiltinID) {
+ default:
+ return false;
+#define GET_SME_IMMEDIATE_CHECK
+#include "clang/Basic/arm_sme_sema_rangechecks.inc"
+#undef GET_SME_IMMEDIATE_CHECK
+ }
+
+ return ParseSVEImmChecks(TheCall, ImmChecks);
+}
+
+bool Sema::CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
+ if (const FunctionDecl *FD = getCurFunctionDecl()) {
+ std::optional<ArmStreamingType> BuiltinType;
+
+ switch (BuiltinID) {
+#define GET_SVE_STREAMING_ATTRS
+#include "clang/Basic/arm_sve_streaming_attrs.inc"
+#undef GET_SVE_STREAMING_ATTRS
+ }
+ if (BuiltinType)
+ checkArmStreamingBuiltin(*this, TheCall, FD, *BuiltinType);
+ }
+ // Range check SVE intrinsics that take immediate values.
+ SmallVector<std::tuple<int, int, int>, 3> ImmChecks;
+
+ switch (BuiltinID) {
+ default:
+ return false;
+#define GET_SVE_IMMEDIATE_CHECK
+#include "clang/Basic/arm_sve_sema_rangechecks.inc"
+#undef GET_SVE_IMMEDIATE_CHECK
+ }
+
+ return ParseSVEImmChecks(TheCall, ImmChecks);
+}
+
bool Sema::CheckNeonBuiltinFunctionCall(const TargetInfo &TI,
unsigned BuiltinID, CallExpr *TheCall) {
+ if (const FunctionDecl *FD = getCurFunctionDecl()) {
+
+ switch (BuiltinID) {
+ default:
+ break;
+#define GET_NEON_BUILTINS
+#define TARGET_BUILTIN(id, ...) case NEON::BI##id:
+#define BUILTIN(id, ...) case NEON::BI##id:
+#include "clang/Basic/arm_neon.inc"
+ checkArmStreamingBuiltin(*this, TheCall, FD, ArmNonStreaming);
+ break;
+#undef TARGET_BUILTIN
+#undef BUILTIN
+#undef GET_NEON_BUILTINS
+ }
+ }
+
llvm::APSInt Result;
uint64_t mask = 0;
unsigned TV = 0;
@@ -2303,7 +3403,7 @@ bool Sema::CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
bool Sema::CheckARMCoprocessorImmediate(const TargetInfo &TI,
const Expr *CoprocArg, bool WantCDE) {
- if (isConstantEvaluated())
+ if (isConstantEvaluatedContext())
return false;
// We can't check the value of a dependent argument.
@@ -2527,13 +3627,15 @@ bool Sema::CheckAArch64BuiltinFunctionCall(const TargetInfo &TI,
if (BuiltinID == AArch64::BI__builtin_arm_prefetch) {
return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) ||
- SemaBuiltinConstantArgRange(TheCall, 2, 0, 2) ||
- SemaBuiltinConstantArgRange(TheCall, 3, 0, 1) ||
- SemaBuiltinConstantArgRange(TheCall, 4, 0, 1);
+ SemaBuiltinConstantArgRange(TheCall, 2, 0, 3) ||
+ SemaBuiltinConstantArgRange(TheCall, 3, 0, 1) ||
+ SemaBuiltinConstantArgRange(TheCall, 4, 0, 1);
}
if (BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
- BuiltinID == AArch64::BI__builtin_arm_wsr64)
+ BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
+ BuiltinID == AArch64::BI__builtin_arm_rsr128 ||
+ BuiltinID == AArch64::BI__builtin_arm_wsr128)
return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true);
// Memory Tagging Extensions (MTE) Intrinsics
@@ -2562,12 +3664,18 @@ bool Sema::CheckAArch64BuiltinFunctionCall(const TargetInfo &TI,
if (BuiltinID == AArch64::BI__getReg)
return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31);
+ if (BuiltinID == AArch64::BI__break)
+ return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0xffff);
+
if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall))
return true;
if (CheckSVEBuiltinFunctionCall(BuiltinID, TheCall))
return true;
+ if (CheckSMEBuiltinFunctionCall(BuiltinID, TheCall))
+ return true;
+
// For intrinsics which take an immediate value as part of the instruction,
// range check them here.
unsigned i = 0, l = 0, u = 0;
@@ -2591,19 +3699,8 @@ static bool isValidBPFPreserveFieldInfoArg(Expr *Arg) {
// to BPF backend to check whether the access is a
// field access or not.
return (Arg->IgnoreParens()->getObjectKind() == OK_BitField ||
- dyn_cast<MemberExpr>(Arg->IgnoreParens()) ||
- dyn_cast<ArraySubscriptExpr>(Arg->IgnoreParens()));
-}
-
-static bool isEltOfVectorTy(ASTContext &Context, CallExpr *Call, Sema &S,
- QualType VectorTy, QualType EltTy) {
- QualType VectorEltTy = VectorTy->castAs<VectorType>()->getElementType();
- if (!Context.hasSameType(VectorEltTy, EltTy)) {
- S.Diag(Call->getBeginLoc(), diag::err_typecheck_call_different_arg_types)
- << Call->getSourceRange() << VectorEltTy << EltTy;
- return false;
- }
- return true;
+ isa<MemberExpr>(Arg->IgnoreParens()) ||
+ isa<ArraySubscriptExpr>(Arg->IgnoreParens()));
}
static bool isValidBPFPreserveTypeInfoArg(Expr *Arg) {
@@ -2611,13 +3708,13 @@ static bool isValidBPFPreserveTypeInfoArg(Expr *Arg) {
if (ArgType->getAsPlaceholderType())
return false;
- // for TYPE_EXISTENCE/TYPE_SIZEOF reloc type
+ // for TYPE_EXISTENCE/TYPE_MATCH/TYPE_SIZEOF reloc type
// format:
// 1. __builtin_preserve_type_info(*(<type> *)0, flag);
// 2. <type> var;
// __builtin_preserve_type_info(var, flag);
- if (!dyn_cast<DeclRefExpr>(Arg->IgnoreParens()) &&
- !dyn_cast<UnaryOperator>(Arg->IgnoreParens()))
+ if (!isa<DeclRefExpr>(Arg->IgnoreParens()) &&
+ !isa<UnaryOperator>(Arg->IgnoreParens()))
return false;
// Typedef type.
@@ -2674,12 +3771,7 @@ static bool isValidBPFPreserveEnumValueArg(Expr *Arg) {
return false;
// The enum value must be supported.
- for (auto *EDI : ET->getDecl()->enumerators()) {
- if (EDI == Enumerator)
- return true;
- }
-
- return false;
+ return llvm::is_contained(ET->getDecl()->enumerators(), Enumerator);
}
bool Sema::CheckBPFBuiltinFunctionCall(unsigned BuiltinID,
@@ -2695,7 +3787,7 @@ bool Sema::CheckBPFBuiltinFunctionCall(unsigned BuiltinID,
// The second argument needs to be a constant int
Expr *Arg = TheCall->getArg(1);
- Optional<llvm::APSInt> Value = Arg->getIntegerConstantExpr(Context);
+ std::optional<llvm::APSInt> Value = Arg->getIntegerConstantExpr(Context);
diag::kind kind;
if (!Value) {
if (BuiltinID == BPF::BI__builtin_preserve_field_info)
@@ -2942,6 +4034,31 @@ bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) {
{ Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, {{ 3, false, 1, 0 }} },
{ Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B,
{{ 3, false, 1, 0 }} },
+
+ { Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10, {{ 2, false, 2, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10_128B,
+ {{ 2, false, 2, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10_vxx,
+ {{ 3, false, 2, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10_vxx_128B,
+ {{ 3, false, 2, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10, {{ 2, false, 2, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10_128B,
+ {{ 2, false, 2, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10_vxx,
+ {{ 3, false, 2, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10_vxx_128B,
+ {{ 3, false, 2, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi, {{ 2, false, 3, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi_128B, {{ 2, false, 3, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci, {{ 3, false, 3, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci_128B,
+ {{ 3, false, 3, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi, {{ 2, false, 3, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi_128B, {{ 2, false, 3, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci, {{ 3, false, 3, 0 }} },
+ { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci_128B,
+ {{ 3, false, 3, 0 }} },
};
// Use a dynamically initialized static to sort the table exactly once on
@@ -2974,8 +4091,8 @@ bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) {
unsigned M = 1 << A.Align;
Min *= M;
Max *= M;
- Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max) |
- SemaBuiltinConstantArgMultiple(TheCall, A.OpNum, M);
+ Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max);
+ Error |= SemaBuiltinConstantArgMultiple(TheCall, A.OpNum, M);
}
}
return Error;
@@ -2986,6 +4103,499 @@ bool Sema::CheckHexagonBuiltinFunctionCall(unsigned BuiltinID,
return CheckHexagonBuiltinArgument(BuiltinID, TheCall);
}
+bool Sema::CheckLoongArchBuiltinFunctionCall(const TargetInfo &TI,
+ unsigned BuiltinID,
+ CallExpr *TheCall) {
+ switch (BuiltinID) {
+ default:
+ break;
+ // Basic intrinsics.
+ case LoongArch::BI__builtin_loongarch_cacop_d:
+ case LoongArch::BI__builtin_loongarch_cacop_w: {
+ SemaBuiltinConstantArgRange(TheCall, 0, 0, llvm::maxUIntN(5));
+ SemaBuiltinConstantArgRange(TheCall, 2, llvm::minIntN(12),
+ llvm::maxIntN(12));
+ break;
+ }
+ case LoongArch::BI__builtin_loongarch_break:
+ case LoongArch::BI__builtin_loongarch_dbar:
+ case LoongArch::BI__builtin_loongarch_ibar:
+ case LoongArch::BI__builtin_loongarch_syscall:
+ // Check if immediate is in [0, 32767].
+ return SemaBuiltinConstantArgRange(TheCall, 0, 0, 32767);
+ case LoongArch::BI__builtin_loongarch_csrrd_w:
+ case LoongArch::BI__builtin_loongarch_csrrd_d:
+ return SemaBuiltinConstantArgRange(TheCall, 0, 0, 16383);
+ case LoongArch::BI__builtin_loongarch_csrwr_w:
+ case LoongArch::BI__builtin_loongarch_csrwr_d:
+ return SemaBuiltinConstantArgRange(TheCall, 1, 0, 16383);
+ case LoongArch::BI__builtin_loongarch_csrxchg_w:
+ case LoongArch::BI__builtin_loongarch_csrxchg_d:
+ return SemaBuiltinConstantArgRange(TheCall, 2, 0, 16383);
+ case LoongArch::BI__builtin_loongarch_lddir_d:
+ case LoongArch::BI__builtin_loongarch_ldpte_d:
+ return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31);
+ case LoongArch::BI__builtin_loongarch_movfcsr2gr:
+ case LoongArch::BI__builtin_loongarch_movgr2fcsr:
+ return SemaBuiltinConstantArgRange(TheCall, 0, 0, llvm::maxUIntN(2));
+
+ // LSX intrinsics.
+ case LoongArch::BI__builtin_lsx_vbitclri_b:
+ case LoongArch::BI__builtin_lsx_vbitrevi_b:
+ case LoongArch::BI__builtin_lsx_vbitseti_b:
+ case LoongArch::BI__builtin_lsx_vsat_b:
+ case LoongArch::BI__builtin_lsx_vsat_bu:
+ case LoongArch::BI__builtin_lsx_vslli_b:
+ case LoongArch::BI__builtin_lsx_vsrai_b:
+ case LoongArch::BI__builtin_lsx_vsrari_b:
+ case LoongArch::BI__builtin_lsx_vsrli_b:
+ case LoongArch::BI__builtin_lsx_vsllwil_h_b:
+ case LoongArch::BI__builtin_lsx_vsllwil_hu_bu:
+ case LoongArch::BI__builtin_lsx_vrotri_b:
+ case LoongArch::BI__builtin_lsx_vsrlri_b:
+ return SemaBuiltinConstantArgRange(TheCall, 1, 0, 7);
+ case LoongArch::BI__builtin_lsx_vbitclri_h:
+ case LoongArch::BI__builtin_lsx_vbitrevi_h:
+ case LoongArch::BI__builtin_lsx_vbitseti_h:
+ case LoongArch::BI__builtin_lsx_vsat_h:
+ case LoongArch::BI__builtin_lsx_vsat_hu:
+ case LoongArch::BI__builtin_lsx_vslli_h:
+ case LoongArch::BI__builtin_lsx_vsrai_h:
+ case LoongArch::BI__builtin_lsx_vsrari_h:
+ case LoongArch::BI__builtin_lsx_vsrli_h:
+ case LoongArch::BI__builtin_lsx_vsllwil_w_h:
+ case LoongArch::BI__builtin_lsx_vsllwil_wu_hu:
+ case LoongArch::BI__builtin_lsx_vrotri_h:
+ case LoongArch::BI__builtin_lsx_vsrlri_h:
+ return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15);
+ case LoongArch::BI__builtin_lsx_vssrarni_b_h:
+ case LoongArch::BI__builtin_lsx_vssrarni_bu_h:
+ case LoongArch::BI__builtin_lsx_vssrani_b_h:
+ case LoongArch::BI__builtin_lsx_vssrani_bu_h:
+ case LoongArch::BI__builtin_lsx_vsrarni_b_h:
+ case LoongArch::BI__builtin_lsx_vsrlni_b_h:
+ case LoongArch::BI__builtin_lsx_vsrlrni_b_h:
+ case LoongArch::BI__builtin_lsx_vssrlni_b_h:
+ case LoongArch::BI__builtin_lsx_vssrlni_bu_h:
+ case LoongArch::BI__builtin_lsx_vssrlrni_b_h:
+ case LoongArch::BI__builtin_lsx_vssrlrni_bu_h:
+ case LoongArch::BI__builtin_lsx_vsrani_b_h:
+ return SemaBuiltinConstantArgRange(TheCall, 2, 0, 15);
+ case LoongArch::BI__builtin_lsx_vslei_bu:
+ case LoongArch::BI__builtin_lsx_vslei_hu:
+ case LoongArch::BI__builtin_lsx_vslei_wu:
+ case LoongArch::BI__builtin_lsx_vslei_du:
+ case LoongArch::BI__builtin_lsx_vslti_bu:
+ case LoongArch::BI__builtin_lsx_vslti_hu:
+ case LoongArch::BI__builtin_lsx_vslti_wu:
+ case LoongArch::BI__builtin_lsx_vslti_du:
+ case LoongArch::BI__builtin_lsx_vmaxi_bu:
+ case LoongArch::BI__builtin_lsx_vmaxi_hu:
+ case LoongArch::BI__builtin_lsx_vmaxi_wu:
+ case LoongArch::BI__builtin_lsx_vmaxi_du:
+ case LoongArch::BI__builtin_lsx_vmini_bu:
+ case LoongArch::BI__builtin_lsx_vmini_hu:
+ case LoongArch::BI__builtin_lsx_vmini_wu:
+ case LoongArch::BI__builtin_lsx_vmini_du:
+ case LoongArch::BI__builtin_lsx_vaddi_bu:
+ case LoongArch::BI__builtin_lsx_vaddi_hu:
+ case LoongArch::BI__builtin_lsx_vaddi_wu:
+ case LoongArch::BI__builtin_lsx_vaddi_du:
+ case LoongArch::BI__builtin_lsx_vbitclri_w:
+ case LoongArch::BI__builtin_lsx_vbitrevi_w:
+ case LoongArch::BI__builtin_lsx_vbitseti_w:
+ case LoongArch::BI__builtin_lsx_vsat_w:
+ case LoongArch::BI__builtin_lsx_vsat_wu:
+ case LoongArch::BI__builtin_lsx_vslli_w:
+ case LoongArch::BI__builtin_lsx_vsrai_w:
+ case LoongArch::BI__builtin_lsx_vsrari_w:
+ case LoongArch::BI__builtin_lsx_vsrli_w:
+ case LoongArch::BI__builtin_lsx_vsllwil_d_w:
+ case LoongArch::BI__builtin_lsx_vsllwil_du_wu:
+ case LoongArch::BI__builtin_lsx_vsrlri_w:
+ case LoongArch::BI__builtin_lsx_vrotri_w:
+ case LoongArch::BI__builtin_lsx_vsubi_bu:
+ case LoongArch::BI__builtin_lsx_vsubi_hu:
+ case LoongArch::BI__builtin_lsx_vbsrl_v:
+ case LoongArch::BI__builtin_lsx_vbsll_v:
+ case LoongArch::BI__builtin_lsx_vsubi_wu:
+ case LoongArch::BI__builtin_lsx_vsubi_du:
+ return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31);
+ case LoongArch::BI__builtin_lsx_vssrarni_h_w:
+ case LoongArch::BI__builtin_lsx_vssrarni_hu_w:
+ case LoongArch::BI__builtin_lsx_vssrani_h_w:
+ case LoongArch::BI__builtin_lsx_vssrani_hu_w:
+ case LoongArch::BI__builtin_lsx_vsrarni_h_w:
+ case LoongArch::BI__builtin_lsx_vsrani_h_w:
+ case LoongArch::BI__builtin_lsx_vfrstpi_b:
+ case LoongArch::BI__builtin_lsx_vfrstpi_h:
+ case LoongArch::BI__builtin_lsx_vsrlni_h_w:
+ case LoongArch::BI__builtin_lsx_vsrlrni_h_w:
+ case LoongArch::BI__builtin_lsx_vssrlni_h_w:
+ case LoongArch::BI__builtin_lsx_vssrlni_hu_w:
+ case LoongArch::BI__builtin_lsx_vssrlrni_h_w:
+ case LoongArch::BI__builtin_lsx_vssrlrni_hu_w:
+ return SemaBuiltinConstantArgRange(TheCall, 2, 0, 31);
+ case LoongArch::BI__builtin_lsx_vbitclri_d:
+ case LoongArch::BI__builtin_lsx_vbitrevi_d:
+ case LoongArch::BI__builtin_lsx_vbitseti_d:
+ case LoongArch::BI__builtin_lsx_vsat_d:
+ case LoongArch::BI__builtin_lsx_vsat_du:
+ case LoongArch::BI__builtin_lsx_vslli_d:
+ case LoongArch::BI__builtin_lsx_vsrai_d:
+ case LoongArch::BI__builtin_lsx_vsrli_d:
+ case LoongArch::BI__builtin_lsx_vsrari_d:
+ case LoongArch::BI__builtin_lsx_vrotri_d:
+ case LoongArch::BI__builtin_lsx_vsrlri_d:
+ return SemaBuiltinConstantArgRange(TheCall, 1, 0, 63);
+ case LoongArch::BI__builtin_lsx_vssrarni_w_d:
+ case LoongArch::BI__builtin_lsx_vssrarni_wu_d:
+ case LoongArch::BI__builtin_lsx_vssrani_w_d:
+ case LoongArch::BI__builtin_lsx_vssrani_wu_d:
+ case LoongArch::BI__builtin_lsx_vsrarni_w_d:
+ case LoongArch::BI__builtin_lsx_vsrlni_w_d:
+ case LoongArch::BI__builtin_lsx_vsrlrni_w_d:
+ case LoongArch::BI__builtin_lsx_vssrlni_w_d:
+ case LoongArch::BI__builtin_lsx_vssrlni_wu_d:
+ case LoongArch::BI__builtin_lsx_vssrlrni_w_d:
+ case LoongArch::BI__builtin_lsx_vssrlrni_wu_d:
+ case LoongArch::BI__builtin_lsx_vsrani_w_d:
+ return SemaBuiltinConstantArgRange(TheCall, 2, 0, 63);
+ case LoongArch::BI__builtin_lsx_vssrarni_d_q:
+ case LoongArch::BI__builtin_lsx_vssrarni_du_q:
+ case LoongArch::BI__builtin_lsx_vssrani_d_q:
+ case LoongArch::BI__builtin_lsx_vssrani_du_q:
+ case LoongArch::BI__builtin_lsx_vsrarni_d_q:
+ case LoongArch::BI__builtin_lsx_vssrlni_d_q:
+ case LoongArch::BI__builtin_lsx_vssrlni_du_q:
+ case LoongArch::BI__builtin_lsx_vssrlrni_d_q:
+ case LoongArch::BI__builtin_lsx_vssrlrni_du_q:
+ case LoongArch::BI__builtin_lsx_vsrani_d_q:
+ case LoongArch::BI__builtin_lsx_vsrlrni_d_q:
+ case LoongArch::BI__builtin_lsx_vsrlni_d_q:
+ return SemaBuiltinConstantArgRange(TheCall, 2, 0, 127);
+ case LoongArch::BI__builtin_lsx_vseqi_b:
+ case LoongArch::BI__builtin_lsx_vseqi_h:
+ case LoongArch::BI__builtin_lsx_vseqi_w:
+ case LoongArch::BI__builtin_lsx_vseqi_d:
+ case LoongArch::BI__builtin_lsx_vslti_b:
+ case LoongArch::BI__builtin_lsx_vslti_h:
+ case LoongArch::BI__builtin_lsx_vslti_w:
+ case LoongArch::BI__builtin_lsx_vslti_d:
+ case LoongArch::BI__builtin_lsx_vslei_b:
+ case LoongArch::BI__builtin_lsx_vslei_h:
+ case LoongArch::BI__builtin_lsx_vslei_w:
+ case LoongArch::BI__builtin_lsx_vslei_d:
+ case LoongArch::BI__builtin_lsx_vmaxi_b:
+ case LoongArch::BI__builtin_lsx_vmaxi_h:
+ case LoongArch::BI__builtin_lsx_vmaxi_w:
+ case LoongArch::BI__builtin_lsx_vmaxi_d:
+ case LoongArch::BI__builtin_lsx_vmini_b:
+ case LoongArch::BI__builtin_lsx_vmini_h:
+ case LoongArch::BI__builtin_lsx_vmini_w:
+ case LoongArch::BI__builtin_lsx_vmini_d:
+ return SemaBuiltinConstantArgRange(TheCall, 1, -16, 15);
+ case LoongArch::BI__builtin_lsx_vandi_b:
+ case LoongArch::BI__builtin_lsx_vnori_b:
+ case LoongArch::BI__builtin_lsx_vori_b:
+ case LoongArch::BI__builtin_lsx_vshuf4i_b:
+ case LoongArch::BI__builtin_lsx_vshuf4i_h:
+ case LoongArch::BI__builtin_lsx_vshuf4i_w:
+ case LoongArch::BI__builtin_lsx_vxori_b:
+ return SemaBuiltinConstantArgRange(TheCall, 1, 0, 255);
+ case LoongArch::BI__builtin_lsx_vbitseli_b:
+ case LoongArch::BI__builtin_lsx_vshuf4i_d:
+ case LoongArch::BI__builtin_lsx_vextrins_b:
+ case LoongArch::BI__builtin_lsx_vextrins_h:
+ case LoongArch::BI__builtin_lsx_vextrins_w:
+ case LoongArch::BI__builtin_lsx_vextrins_d:
+ case LoongArch::BI__builtin_lsx_vpermi_w:
+ return SemaBuiltinConstantArgRange(TheCall, 2, 0, 255);
+ case LoongArch::BI__builtin_lsx_vpickve2gr_b:
+ case LoongArch::BI__builtin_lsx_vpickve2gr_bu:
+ case LoongArch::BI__builtin_lsx_vreplvei_b:
+ return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15);
+ case LoongArch::BI__builtin_lsx_vinsgr2vr_b:
+ return SemaBuiltinConstantArgRange(TheCall, 2, 0, 15);
+ case LoongArch::BI__builtin_lsx_vpickve2gr_h:
+ case LoongArch::BI__builtin_lsx_vpickve2gr_hu:
+ case LoongArch::BI__builtin_lsx_vreplvei_h:
+ return SemaBuiltinConstantArgRange(TheCall, 1, 0, 7);
+ case LoongArch::BI__builtin_lsx_vinsgr2vr_h:
+ return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7);
+ case LoongArch::BI__builtin_lsx_vpickve2gr_w:
+ case LoongArch::BI__builtin_lsx_vpickve2gr_wu:
+ case LoongArch::BI__builtin_lsx_vreplvei_w:
+ return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3);
+ case LoongArch::BI__builtin_lsx_vinsgr2vr_w:
+ return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3);
+ case LoongArch::BI__builtin_lsx_vpickve2gr_d:
+ case LoongArch::BI__builtin_lsx_vpickve2gr_du:
+ case LoongArch::BI__builtin_lsx_vreplvei_d:
+ return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
+ case LoongArch::BI__builtin_lsx_vinsgr2vr_d:
+ return SemaBuiltinConstantArgRange(TheCall, 2, 0, 1);
+ case LoongArch::BI__builtin_lsx_vstelm_b:
+ return SemaBuiltinConstantArgRange(TheCall, 2, -128, 127) ||
+ SemaBuiltinConstantArgRange(TheCall, 3, 0, 15);
+ case LoongArch::BI__builtin_lsx_vstelm_h:
+ return SemaBuiltinConstantArgRange(TheCall, 2, -256, 254) ||
+ SemaBuiltinConstantArgRange(TheCall, 3, 0, 7);
+ case LoongArch::BI__builtin_lsx_vstelm_w:
+ return SemaBuiltinConstantArgRange(TheCall, 2, -512, 508) ||
+ SemaBuiltinConstantArgRange(TheCall, 3, 0, 3);
+ case LoongArch::BI__builtin_lsx_vstelm_d:
+ return SemaBuiltinConstantArgRange(TheCall, 2, -1024, 1016) ||
+ SemaBuiltinConstantArgRange(TheCall, 3, 0, 1);
+ case LoongArch::BI__builtin_lsx_vldrepl_b:
+ case LoongArch::BI__builtin_lsx_vld:
+ return SemaBuiltinConstantArgRange(TheCall, 1, -2048, 2047);
+ case LoongArch::BI__builtin_lsx_vldrepl_h:
+ return SemaBuiltinConstantArgRange(TheCall, 1, -2048, 2046);
+ case LoongArch::BI__builtin_lsx_vldrepl_w:
+ return SemaBuiltinConstantArgRange(TheCall, 1, -2048, 2044);
+ case LoongArch::BI__builtin_lsx_vldrepl_d:
+ return SemaBuiltinConstantArgRange(TheCall, 1, -2048, 2040);
+ case LoongArch::BI__builtin_lsx_vst:
+ return SemaBuiltinConstantArgRange(TheCall, 2, -2048, 2047);
+ case LoongArch::BI__builtin_lsx_vldi:
+ return SemaBuiltinConstantArgRange(TheCall, 0, -4096, 4095);
+ case LoongArch::BI__builtin_lsx_vrepli_b:
+ case LoongArch::BI__builtin_lsx_vrepli_h:
+ case LoongArch::BI__builtin_lsx_vrepli_w:
+ case LoongArch::BI__builtin_lsx_vrepli_d:
+ return SemaBuiltinConstantArgRange(TheCall, 0, -512, 511);
+
+ // LASX intrinsics.
+ case LoongArch::BI__builtin_lasx_xvbitclri_b:
+ case LoongArch::BI__builtin_lasx_xvbitrevi_b:
+ case LoongArch::BI__builtin_lasx_xvbitseti_b:
+ case LoongArch::BI__builtin_lasx_xvsat_b:
+ case LoongArch::BI__builtin_lasx_xvsat_bu:
+ case LoongArch::BI__builtin_lasx_xvslli_b:
+ case LoongArch::BI__builtin_lasx_xvsrai_b:
+ case LoongArch::BI__builtin_lasx_xvsrari_b:
+ case LoongArch::BI__builtin_lasx_xvsrli_b:
+ case LoongArch::BI__builtin_lasx_xvsllwil_h_b:
+ case LoongArch::BI__builtin_lasx_xvsllwil_hu_bu:
+ case LoongArch::BI__builtin_lasx_xvrotri_b:
+ case LoongArch::BI__builtin_lasx_xvsrlri_b:
+ return SemaBuiltinConstantArgRange(TheCall, 1, 0, 7);
+ case LoongArch::BI__builtin_lasx_xvbitclri_h:
+ case LoongArch::BI__builtin_lasx_xvbitrevi_h:
+ case LoongArch::BI__builtin_lasx_xvbitseti_h:
+ case LoongArch::BI__builtin_lasx_xvsat_h:
+ case LoongArch::BI__builtin_lasx_xvsat_hu:
+ case LoongArch::BI__builtin_lasx_xvslli_h:
+ case LoongArch::BI__builtin_lasx_xvsrai_h:
+ case LoongArch::BI__builtin_lasx_xvsrari_h:
+ case LoongArch::BI__builtin_lasx_xvsrli_h:
+ case LoongArch::BI__builtin_lasx_xvsllwil_w_h:
+ case LoongArch::BI__builtin_lasx_xvsllwil_wu_hu:
+ case LoongArch::BI__builtin_lasx_xvrotri_h:
+ case LoongArch::BI__builtin_lasx_xvsrlri_h:
+ return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15);
+ case LoongArch::BI__builtin_lasx_xvssrarni_b_h:
+ case LoongArch::BI__builtin_lasx_xvssrarni_bu_h:
+ case LoongArch::BI__builtin_lasx_xvssrani_b_h:
+ case LoongArch::BI__builtin_lasx_xvssrani_bu_h:
+ case LoongArch::BI__builtin_lasx_xvsrarni_b_h:
+ case LoongArch::BI__builtin_lasx_xvsrlni_b_h:
+ case LoongArch::BI__builtin_lasx_xvsrlrni_b_h:
+ case LoongArch::BI__builtin_lasx_xvssrlni_b_h:
+ case LoongArch::BI__builtin_lasx_xvssrlni_bu_h:
+ case LoongArch::BI__builtin_lasx_xvssrlrni_b_h:
+ case LoongArch::BI__builtin_lasx_xvssrlrni_bu_h:
+ case LoongArch::BI__builtin_lasx_xvsrani_b_h:
+ return SemaBuiltinConstantArgRange(TheCall, 2, 0, 15);
+ case LoongArch::BI__builtin_lasx_xvslei_bu:
+ case LoongArch::BI__builtin_lasx_xvslei_hu:
+ case LoongArch::BI__builtin_lasx_xvslei_wu:
+ case LoongArch::BI__builtin_lasx_xvslei_du:
+ case LoongArch::BI__builtin_lasx_xvslti_bu:
+ case LoongArch::BI__builtin_lasx_xvslti_hu:
+ case LoongArch::BI__builtin_lasx_xvslti_wu:
+ case LoongArch::BI__builtin_lasx_xvslti_du:
+ case LoongArch::BI__builtin_lasx_xvmaxi_bu:
+ case LoongArch::BI__builtin_lasx_xvmaxi_hu:
+ case LoongArch::BI__builtin_lasx_xvmaxi_wu:
+ case LoongArch::BI__builtin_lasx_xvmaxi_du:
+ case LoongArch::BI__builtin_lasx_xvmini_bu:
+ case LoongArch::BI__builtin_lasx_xvmini_hu:
+ case LoongArch::BI__builtin_lasx_xvmini_wu:
+ case LoongArch::BI__builtin_lasx_xvmini_du:
+ case LoongArch::BI__builtin_lasx_xvaddi_bu:
+ case LoongArch::BI__builtin_lasx_xvaddi_hu:
+ case LoongArch::BI__builtin_lasx_xvaddi_wu:
+ case LoongArch::BI__builtin_lasx_xvaddi_du:
+ case LoongArch::BI__builtin_lasx_xvbitclri_w:
+ case LoongArch::BI__builtin_lasx_xvbitrevi_w:
+ case LoongArch::BI__builtin_lasx_xvbitseti_w:
+ case LoongArch::BI__builtin_lasx_xvsat_w:
+ case LoongArch::BI__builtin_lasx_xvsat_wu:
+ case LoongArch::BI__builtin_lasx_xvslli_w:
+ case LoongArch::BI__builtin_lasx_xvsrai_w:
+ case LoongArch::BI__builtin_lasx_xvsrari_w:
+ case LoongArch::BI__builtin_lasx_xvsrli_w:
+ case LoongArch::BI__builtin_lasx_xvsllwil_d_w:
+ case LoongArch::BI__builtin_lasx_xvsllwil_du_wu:
+ case LoongArch::BI__builtin_lasx_xvsrlri_w:
+ case LoongArch::BI__builtin_lasx_xvrotri_w:
+ case LoongArch::BI__builtin_lasx_xvsubi_bu:
+ case LoongArch::BI__builtin_lasx_xvsubi_hu:
+ case LoongArch::BI__builtin_lasx_xvsubi_wu:
+ case LoongArch::BI__builtin_lasx_xvsubi_du:
+ case LoongArch::BI__builtin_lasx_xvbsrl_v:
+ case LoongArch::BI__builtin_lasx_xvbsll_v:
+ return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31);
+ case LoongArch::BI__builtin_lasx_xvssrarni_h_w:
+ case LoongArch::BI__builtin_lasx_xvssrarni_hu_w:
+ case LoongArch::BI__builtin_lasx_xvssrani_h_w:
+ case LoongArch::BI__builtin_lasx_xvssrani_hu_w:
+ case LoongArch::BI__builtin_lasx_xvsrarni_h_w:
+ case LoongArch::BI__builtin_lasx_xvsrani_h_w:
+ case LoongArch::BI__builtin_lasx_xvfrstpi_b:
+ case LoongArch::BI__builtin_lasx_xvfrstpi_h:
+ case LoongArch::BI__builtin_lasx_xvsrlni_h_w:
+ case LoongArch::BI__builtin_lasx_xvsrlrni_h_w:
+ case LoongArch::BI__builtin_lasx_xvssrlni_h_w:
+ case LoongArch::BI__builtin_lasx_xvssrlni_hu_w:
+ case LoongArch::BI__builtin_lasx_xvssrlrni_h_w:
+ case LoongArch::BI__builtin_lasx_xvssrlrni_hu_w:
+ return SemaBuiltinConstantArgRange(TheCall, 2, 0, 31);
+ case LoongArch::BI__builtin_lasx_xvbitclri_d:
+ case LoongArch::BI__builtin_lasx_xvbitrevi_d:
+ case LoongArch::BI__builtin_lasx_xvbitseti_d:
+ case LoongArch::BI__builtin_lasx_xvsat_d:
+ case LoongArch::BI__builtin_lasx_xvsat_du:
+ case LoongArch::BI__builtin_lasx_xvslli_d:
+ case LoongArch::BI__builtin_lasx_xvsrai_d:
+ case LoongArch::BI__builtin_lasx_xvsrli_d:
+ case LoongArch::BI__builtin_lasx_xvsrari_d:
+ case LoongArch::BI__builtin_lasx_xvrotri_d:
+ case LoongArch::BI__builtin_lasx_xvsrlri_d:
+ return SemaBuiltinConstantArgRange(TheCall, 1, 0, 63);
+ case LoongArch::BI__builtin_lasx_xvssrarni_w_d:
+ case LoongArch::BI__builtin_lasx_xvssrarni_wu_d:
+ case LoongArch::BI__builtin_lasx_xvssrani_w_d:
+ case LoongArch::BI__builtin_lasx_xvssrani_wu_d:
+ case LoongArch::BI__builtin_lasx_xvsrarni_w_d:
+ case LoongArch::BI__builtin_lasx_xvsrlni_w_d:
+ case LoongArch::BI__builtin_lasx_xvsrlrni_w_d:
+ case LoongArch::BI__builtin_lasx_xvssrlni_w_d:
+ case LoongArch::BI__builtin_lasx_xvssrlni_wu_d:
+ case LoongArch::BI__builtin_lasx_xvssrlrni_w_d:
+ case LoongArch::BI__builtin_lasx_xvssrlrni_wu_d:
+ case LoongArch::BI__builtin_lasx_xvsrani_w_d:
+ return SemaBuiltinConstantArgRange(TheCall, 2, 0, 63);
+ case LoongArch::BI__builtin_lasx_xvssrarni_d_q:
+ case LoongArch::BI__builtin_lasx_xvssrarni_du_q:
+ case LoongArch::BI__builtin_lasx_xvssrani_d_q:
+ case LoongArch::BI__builtin_lasx_xvssrani_du_q:
+ case LoongArch::BI__builtin_lasx_xvsrarni_d_q:
+ case LoongArch::BI__builtin_lasx_xvssrlni_d_q:
+ case LoongArch::BI__builtin_lasx_xvssrlni_du_q:
+ case LoongArch::BI__builtin_lasx_xvssrlrni_d_q:
+ case LoongArch::BI__builtin_lasx_xvssrlrni_du_q:
+ case LoongArch::BI__builtin_lasx_xvsrani_d_q:
+ case LoongArch::BI__builtin_lasx_xvsrlni_d_q:
+ case LoongArch::BI__builtin_lasx_xvsrlrni_d_q:
+ return SemaBuiltinConstantArgRange(TheCall, 2, 0, 127);
+ case LoongArch::BI__builtin_lasx_xvseqi_b:
+ case LoongArch::BI__builtin_lasx_xvseqi_h:
+ case LoongArch::BI__builtin_lasx_xvseqi_w:
+ case LoongArch::BI__builtin_lasx_xvseqi_d:
+ case LoongArch::BI__builtin_lasx_xvslti_b:
+ case LoongArch::BI__builtin_lasx_xvslti_h:
+ case LoongArch::BI__builtin_lasx_xvslti_w:
+ case LoongArch::BI__builtin_lasx_xvslti_d:
+ case LoongArch::BI__builtin_lasx_xvslei_b:
+ case LoongArch::BI__builtin_lasx_xvslei_h:
+ case LoongArch::BI__builtin_lasx_xvslei_w:
+ case LoongArch::BI__builtin_lasx_xvslei_d:
+ case LoongArch::BI__builtin_lasx_xvmaxi_b:
+ case LoongArch::BI__builtin_lasx_xvmaxi_h:
+ case LoongArch::BI__builtin_lasx_xvmaxi_w:
+ case LoongArch::BI__builtin_lasx_xvmaxi_d:
+ case LoongArch::BI__builtin_lasx_xvmini_b:
+ case LoongArch::BI__builtin_lasx_xvmini_h:
+ case LoongArch::BI__builtin_lasx_xvmini_w:
+ case LoongArch::BI__builtin_lasx_xvmini_d:
+ return SemaBuiltinConstantArgRange(TheCall, 1, -16, 15);
+ case LoongArch::BI__builtin_lasx_xvandi_b:
+ case LoongArch::BI__builtin_lasx_xvnori_b:
+ case LoongArch::BI__builtin_lasx_xvori_b:
+ case LoongArch::BI__builtin_lasx_xvshuf4i_b:
+ case LoongArch::BI__builtin_lasx_xvshuf4i_h:
+ case LoongArch::BI__builtin_lasx_xvshuf4i_w:
+ case LoongArch::BI__builtin_lasx_xvxori_b:
+ case LoongArch::BI__builtin_lasx_xvpermi_d:
+ return SemaBuiltinConstantArgRange(TheCall, 1, 0, 255);
+ case LoongArch::BI__builtin_lasx_xvbitseli_b:
+ case LoongArch::BI__builtin_lasx_xvshuf4i_d:
+ case LoongArch::BI__builtin_lasx_xvextrins_b:
+ case LoongArch::BI__builtin_lasx_xvextrins_h:
+ case LoongArch::BI__builtin_lasx_xvextrins_w:
+ case LoongArch::BI__builtin_lasx_xvextrins_d:
+ case LoongArch::BI__builtin_lasx_xvpermi_q:
+ case LoongArch::BI__builtin_lasx_xvpermi_w:
+ return SemaBuiltinConstantArgRange(TheCall, 2, 0, 255);
+ case LoongArch::BI__builtin_lasx_xvrepl128vei_b:
+ return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15);
+ case LoongArch::BI__builtin_lasx_xvrepl128vei_h:
+ case LoongArch::BI__builtin_lasx_xvpickve2gr_w:
+ case LoongArch::BI__builtin_lasx_xvpickve2gr_wu:
+ case LoongArch::BI__builtin_lasx_xvpickve_w_f:
+ case LoongArch::BI__builtin_lasx_xvpickve_w:
+ return SemaBuiltinConstantArgRange(TheCall, 1, 0, 7);
+ case LoongArch::BI__builtin_lasx_xvinsgr2vr_w:
+ case LoongArch::BI__builtin_lasx_xvinsve0_w:
+ return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7);
+ case LoongArch::BI__builtin_lasx_xvrepl128vei_w:
+ case LoongArch::BI__builtin_lasx_xvpickve2gr_d:
+ case LoongArch::BI__builtin_lasx_xvpickve2gr_du:
+ case LoongArch::BI__builtin_lasx_xvpickve_d_f:
+ case LoongArch::BI__builtin_lasx_xvpickve_d:
+ return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3);
+ case LoongArch::BI__builtin_lasx_xvinsve0_d:
+ case LoongArch::BI__builtin_lasx_xvinsgr2vr_d:
+ return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3);
+ case LoongArch::BI__builtin_lasx_xvstelm_b:
+ return SemaBuiltinConstantArgRange(TheCall, 2, -128, 127) ||
+ SemaBuiltinConstantArgRange(TheCall, 3, 0, 31);
+ case LoongArch::BI__builtin_lasx_xvstelm_h:
+ return SemaBuiltinConstantArgRange(TheCall, 2, -256, 254) ||
+ SemaBuiltinConstantArgRange(TheCall, 3, 0, 15);
+ case LoongArch::BI__builtin_lasx_xvstelm_w:
+ return SemaBuiltinConstantArgRange(TheCall, 2, -512, 508) ||
+ SemaBuiltinConstantArgRange(TheCall, 3, 0, 7);
+ case LoongArch::BI__builtin_lasx_xvstelm_d:
+ return SemaBuiltinConstantArgRange(TheCall, 2, -1024, 1016) ||
+ SemaBuiltinConstantArgRange(TheCall, 3, 0, 3);
+ case LoongArch::BI__builtin_lasx_xvrepl128vei_d:
+ return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
+ case LoongArch::BI__builtin_lasx_xvldrepl_b:
+ case LoongArch::BI__builtin_lasx_xvld:
+ return SemaBuiltinConstantArgRange(TheCall, 1, -2048, 2047);
+ case LoongArch::BI__builtin_lasx_xvldrepl_h:
+ return SemaBuiltinConstantArgRange(TheCall, 1, -2048, 2046);
+ case LoongArch::BI__builtin_lasx_xvldrepl_w:
+ return SemaBuiltinConstantArgRange(TheCall, 1, -2048, 2044);
+ case LoongArch::BI__builtin_lasx_xvldrepl_d:
+ return SemaBuiltinConstantArgRange(TheCall, 1, -2048, 2040);
+ case LoongArch::BI__builtin_lasx_xvst:
+ return SemaBuiltinConstantArgRange(TheCall, 2, -2048, 2047);
+ case LoongArch::BI__builtin_lasx_xvldi:
+ return SemaBuiltinConstantArgRange(TheCall, 0, -4096, 4095);
+ case LoongArch::BI__builtin_lasx_xvrepli_b:
+ case LoongArch::BI__builtin_lasx_xvrepli_h:
+ case LoongArch::BI__builtin_lasx_xvrepli_w:
+ case LoongArch::BI__builtin_lasx_xvrepli_d:
+ return SemaBuiltinConstantArgRange(TheCall, 0, -512, 511);
+ }
+ return false;
+}
+
bool Sema::CheckMipsBuiltinFunctionCall(const TargetInfo &TI,
unsigned BuiltinID, CallExpr *TheCall) {
return CheckMipsBuiltinCpu(TI, BuiltinID, TheCall) ||
@@ -3216,7 +4826,7 @@ static QualType DecodePPCMMATypeFromStr(ASTContext &Context, const char *&Str,
switch (*Str++) {
case 'V':
return Context.getVectorType(Context.UnsignedCharTy, 16,
- VectorType::VectorKind::AltiVecVector);
+ VectorKind::AltiVecVector);
case 'i': {
char *End;
unsigned size = strtoul(Str, &End, 10);
@@ -3265,6 +4875,8 @@ static bool isPPC_64Builtin(unsigned BuiltinID) {
case PPC::BI__builtin_divde:
case PPC::BI__builtin_divdeu:
case PPC::BI__builtin_bpermd:
+ case PPC::BI__builtin_pdepd:
+ case PPC::BI__builtin_pextd:
case PPC::BI__builtin_ppc_ldarx:
case PPC::BI__builtin_ppc_stdcx:
case PPC::BI__builtin_ppc_tdw:
@@ -3280,26 +4892,19 @@ static bool isPPC_64Builtin(unsigned BuiltinID) {
case PPC::BI__builtin_ppc_store8r:
case PPC::BI__builtin_ppc_insert_exp:
case PPC::BI__builtin_ppc_extract_sig:
+ case PPC::BI__builtin_ppc_addex:
+ case PPC::BI__builtin_darn:
+ case PPC::BI__builtin_darn_raw:
+ case PPC::BI__builtin_ppc_compare_and_swaplp:
+ case PPC::BI__builtin_ppc_fetch_and_addlp:
+ case PPC::BI__builtin_ppc_fetch_and_andlp:
+ case PPC::BI__builtin_ppc_fetch_and_orlp:
+ case PPC::BI__builtin_ppc_fetch_and_swaplp:
return true;
}
return false;
}
-static bool SemaFeatureCheck(Sema &S, CallExpr *TheCall,
- StringRef FeatureToCheck, unsigned DiagID,
- StringRef DiagArg = "") {
- if (S.Context.getTargetInfo().hasFeature(FeatureToCheck))
- return false;
-
- if (DiagArg.empty())
- S.Diag(TheCall->getBeginLoc(), DiagID) << TheCall->getSourceRange();
- else
- S.Diag(TheCall->getBeginLoc(), DiagID)
- << DiagArg << TheCall->getSourceRange();
-
- return true;
-}
-
/// Returns true if the argument consists of one contiguous run of 1s with any
/// number of 0s on either side. The 1s are allowed to wrap from LSB to MSB, so
/// 0x000FFF0, 0x0000FFFF, 0xFF0000FF, 0x0 are all runs. 0x0F0F0000 is not,
@@ -3343,14 +4948,29 @@ bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
case PPC::BI__builtin_altivec_dss:
return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3);
case PPC::BI__builtin_tbegin:
- case PPC::BI__builtin_tend: i = 0; l = 0; u = 1; break;
- case PPC::BI__builtin_tsr: i = 0; l = 0; u = 7; break;
+ case PPC::BI__builtin_tend:
+ return SemaBuiltinConstantArgRange(TheCall, 0, 0, 1);
+ case PPC::BI__builtin_tsr:
+ return SemaBuiltinConstantArgRange(TheCall, 0, 0, 7);
case PPC::BI__builtin_tabortwc:
- case PPC::BI__builtin_tabortdc: i = 0; l = 0; u = 31; break;
+ case PPC::BI__builtin_tabortdc:
+ return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31);
case PPC::BI__builtin_tabortwci:
case PPC::BI__builtin_tabortdci:
return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) ||
SemaBuiltinConstantArgRange(TheCall, 2, 0, 31);
+ // According to GCC 'Basic PowerPC Built-in Functions Available on ISA 2.05',
+ // __builtin_(un)pack_longdouble are available only if long double uses IBM
+ // extended double representation.
+ case PPC::BI__builtin_unpack_longdouble:
+ if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 1))
+ return true;
+ [[fallthrough]];
+ case PPC::BI__builtin_pack_longdouble:
+ if (&TI.getLongDoubleFormat() != &llvm::APFloat::PPCDoubleDouble())
+ return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_requires_abi)
+ << "ibmlongdouble";
+ return false;
case PPC::BI__builtin_altivec_dst:
case PPC::BI__builtin_altivec_dstt:
case PPC::BI__builtin_altivec_dstst:
@@ -3359,32 +4979,10 @@ bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
case PPC::BI__builtin_vsx_xxpermdi:
case PPC::BI__builtin_vsx_xxsldwi:
return SemaBuiltinVSX(TheCall);
- case PPC::BI__builtin_divwe:
- case PPC::BI__builtin_divweu:
- case PPC::BI__builtin_divde:
- case PPC::BI__builtin_divdeu:
- return SemaFeatureCheck(*this, TheCall, "extdiv",
- diag::err_ppc_builtin_only_on_arch, "7");
- case PPC::BI__builtin_bpermd:
- return SemaFeatureCheck(*this, TheCall, "bpermd",
- diag::err_ppc_builtin_only_on_arch, "7");
case PPC::BI__builtin_unpack_vector_int128:
- return SemaFeatureCheck(*this, TheCall, "vsx",
- diag::err_ppc_builtin_only_on_arch, "7") ||
- SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
- case PPC::BI__builtin_pack_vector_int128:
- return SemaFeatureCheck(*this, TheCall, "vsx",
- diag::err_ppc_builtin_only_on_arch, "7");
+ return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
case PPC::BI__builtin_altivec_vgnb:
return SemaBuiltinConstantArgRange(TheCall, 1, 2, 7);
- case PPC::BI__builtin_altivec_vec_replace_elt:
- case PPC::BI__builtin_altivec_vec_replace_unaligned: {
- QualType VecTy = TheCall->getArg(0)->getType();
- QualType EltTy = TheCall->getArg(1)->getType();
- unsigned Width = Context.getIntWidth(EltTy);
- return SemaBuiltinConstantArgRange(TheCall, 2, 0, Width == 32 ? 12 : 8) ||
- !isEltOfVectorTy(Context, TheCall, *this, VecTy, EltTy);
- }
case PPC::BI__builtin_vsx_xxeval:
return SemaBuiltinConstantArgRange(TheCall, 3, 0, 255);
case PPC::BI__builtin_altivec_vsldbi:
@@ -3396,31 +4994,27 @@ bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
case PPC::BI__builtin_ppc_tw:
case PPC::BI__builtin_ppc_tdw:
return SemaBuiltinConstantArgRange(TheCall, 2, 1, 31);
- case PPC::BI__builtin_ppc_cmpeqb:
- case PPC::BI__builtin_ppc_setb:
- case PPC::BI__builtin_ppc_maddhd:
- case PPC::BI__builtin_ppc_maddhdu:
- case PPC::BI__builtin_ppc_maddld:
- return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions",
- diag::err_ppc_builtin_only_on_arch, "9");
case PPC::BI__builtin_ppc_cmprb:
- return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions",
- diag::err_ppc_builtin_only_on_arch, "9") ||
- SemaBuiltinConstantArgRange(TheCall, 0, 0, 1);
+ return SemaBuiltinConstantArgRange(TheCall, 0, 0, 1);
// For __rlwnm, __rlwimi and __rldimi, the last parameter mask must
// be a constant that represents a contiguous bit field.
case PPC::BI__builtin_ppc_rlwnm:
- return SemaBuiltinConstantArg(TheCall, 1, Result) ||
- SemaValueIsRunOfOnes(TheCall, 2);
+ return SemaValueIsRunOfOnes(TheCall, 2);
case PPC::BI__builtin_ppc_rlwimi:
case PPC::BI__builtin_ppc_rldimi:
return SemaBuiltinConstantArg(TheCall, 2, Result) ||
SemaValueIsRunOfOnes(TheCall, 3);
- case PPC::BI__builtin_ppc_extract_exp:
- case PPC::BI__builtin_ppc_extract_sig:
- case PPC::BI__builtin_ppc_insert_exp:
- return SemaFeatureCheck(*this, TheCall, "power9-vector",
- diag::err_ppc_builtin_only_on_arch, "9");
+ case PPC::BI__builtin_ppc_addex: {
+ if (SemaBuiltinConstantArgRange(TheCall, 2, 0, 3))
+ return true;
+ // Output warning for reserved values 1 to 3.
+ int ArgValue =
+ TheCall->getArg(2)->getIntegerConstantExpr(Context)->getSExtValue();
+ if (ArgValue != 0)
+ Diag(TheCall->getBeginLoc(), diag::warn_argument_undefined_behaviour)
+ << ArgValue;
+ return false;
+ }
case PPC::BI__builtin_ppc_mtfsb0:
case PPC::BI__builtin_ppc_mtfsb1:
return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31);
@@ -3433,21 +5027,60 @@ bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
return SemaBuiltinConstantArgPower2(TheCall, 0);
case PPC::BI__builtin_ppc_rdlam:
return SemaValueIsRunOfOnes(TheCall, 2);
- case PPC::BI__builtin_ppc_icbt:
- case PPC::BI__builtin_ppc_sthcx:
- case PPC::BI__builtin_ppc_stbcx:
- case PPC::BI__builtin_ppc_lharx:
- case PPC::BI__builtin_ppc_lbarx:
- return SemaFeatureCheck(*this, TheCall, "isa-v207-instructions",
- diag::err_ppc_builtin_only_on_arch, "8");
case PPC::BI__builtin_vsx_ldrmb:
case PPC::BI__builtin_vsx_strmb:
- return SemaFeatureCheck(*this, TheCall, "isa-v207-instructions",
- diag::err_ppc_builtin_only_on_arch, "8") ||
- SemaBuiltinConstantArgRange(TheCall, 1, 1, 16);
-#define CUSTOM_BUILTIN(Name, Intr, Types, Acc) \
- case PPC::BI__builtin_##Name: \
- return SemaBuiltinPPCMMACall(TheCall, Types);
+ return SemaBuiltinConstantArgRange(TheCall, 1, 1, 16);
+ case PPC::BI__builtin_altivec_vcntmbb:
+ case PPC::BI__builtin_altivec_vcntmbh:
+ case PPC::BI__builtin_altivec_vcntmbw:
+ case PPC::BI__builtin_altivec_vcntmbd:
+ return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
+ case PPC::BI__builtin_vsx_xxgenpcvbm:
+ case PPC::BI__builtin_vsx_xxgenpcvhm:
+ case PPC::BI__builtin_vsx_xxgenpcvwm:
+ case PPC::BI__builtin_vsx_xxgenpcvdm:
+ return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3);
+ case PPC::BI__builtin_ppc_test_data_class: {
+ // Check if the first argument of the __builtin_ppc_test_data_class call is
+ // valid. The argument must be 'float' or 'double' or '__float128'.
+ QualType ArgType = TheCall->getArg(0)->getType();
+ if (ArgType != QualType(Context.FloatTy) &&
+ ArgType != QualType(Context.DoubleTy) &&
+ ArgType != QualType(Context.Float128Ty))
+ return Diag(TheCall->getBeginLoc(),
+ diag::err_ppc_invalid_test_data_class_type);
+ return SemaBuiltinConstantArgRange(TheCall, 1, 0, 127);
+ }
+ case PPC::BI__builtin_ppc_maxfe:
+ case PPC::BI__builtin_ppc_minfe:
+ case PPC::BI__builtin_ppc_maxfl:
+ case PPC::BI__builtin_ppc_minfl:
+ case PPC::BI__builtin_ppc_maxfs:
+ case PPC::BI__builtin_ppc_minfs: {
+ if (Context.getTargetInfo().getTriple().isOSAIX() &&
+ (BuiltinID == PPC::BI__builtin_ppc_maxfe ||
+ BuiltinID == PPC::BI__builtin_ppc_minfe))
+ return Diag(TheCall->getBeginLoc(), diag::err_target_unsupported_type)
+ << "builtin" << true << 128 << QualType(Context.LongDoubleTy)
+ << false << Context.getTargetInfo().getTriple().str();
+ // Argument type should be exact.
+ QualType ArgType = QualType(Context.LongDoubleTy);
+ if (BuiltinID == PPC::BI__builtin_ppc_maxfl ||
+ BuiltinID == PPC::BI__builtin_ppc_minfl)
+ ArgType = QualType(Context.DoubleTy);
+ else if (BuiltinID == PPC::BI__builtin_ppc_maxfs ||
+ BuiltinID == PPC::BI__builtin_ppc_minfs)
+ ArgType = QualType(Context.FloatTy);
+ for (unsigned I = 0, E = TheCall->getNumArgs(); I < E; ++I)
+ if (TheCall->getArg(I)->getType() != ArgType)
+ return Diag(TheCall->getBeginLoc(),
+ diag::err_typecheck_convert_incompatible)
+ << TheCall->getArg(I)->getType() << ArgType << 1 << 0 << 0;
+ return false;
+ }
+#define CUSTOM_BUILTIN(Name, Intr, Types, Acc, Feature) \
+ case PPC::BI__builtin_##Name: \
+ return SemaBuiltinPPCMMACall(TheCall, BuiltinID, Types);
#include "clang/Basic/BuiltinsPPC.def"
}
return SemaBuiltinConstantArgRange(TheCall, i, l, u);
@@ -3499,19 +5132,19 @@ bool Sema::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID,
<< ArgExpr->getType();
auto Ord = ArgResult.Val.getInt().getZExtValue();
- // Check valididty of memory ordering as per C11 / C++11's memody model.
+ // Check validity of memory ordering as per C11 / C++11's memody model.
// Only fence needs check. Atomic dec/inc allow all memory orders.
if (!llvm::isValidAtomicOrderingCABI(Ord))
return Diag(ArgExpr->getBeginLoc(),
diag::warn_atomic_op_has_invalid_memory_order)
- << ArgExpr->getSourceRange();
+ << 0 << ArgExpr->getSourceRange();
switch (static_cast<llvm::AtomicOrderingCABI>(Ord)) {
case llvm::AtomicOrderingCABI::relaxed:
case llvm::AtomicOrderingCABI::consume:
if (BuiltinID == AMDGPU::BI__builtin_amdgcn_fence)
return Diag(ArgExpr->getBeginLoc(),
diag::warn_atomic_op_has_invalid_memory_order)
- << ArgExpr->getSourceRange();
+ << 0 << ArgExpr->getSourceRange();
break;
case llvm::AtomicOrderingCABI::acquire:
case llvm::AtomicOrderingCABI::release:
@@ -3551,6 +5184,35 @@ bool Sema::CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum) {
<< Arg->getSourceRange();
}
+static bool CheckInvalidVLENandLMUL(const TargetInfo &TI, CallExpr *TheCall,
+ Sema &S, QualType Type, int EGW) {
+ assert((EGW == 128 || EGW == 256) && "EGW can only be 128 or 256 bits");
+
+ // LMUL * VLEN >= EGW
+ ASTContext::BuiltinVectorTypeInfo Info =
+ S.Context.getBuiltinVectorTypeInfo(Type->castAs<BuiltinType>());
+ unsigned ElemSize = S.Context.getTypeSize(Info.ElementType);
+ unsigned MinElemCount = Info.EC.getKnownMinValue();
+
+ unsigned EGS = EGW / ElemSize;
+ // If EGS is less than or equal to the minimum number of elements, then the
+ // type is valid.
+ if (EGS <= MinElemCount)
+ return false;
+
+ // Otherwise, we need vscale to be at least EGS / MinElemCont.
+ assert(EGS % MinElemCount == 0);
+ unsigned VScaleFactor = EGS / MinElemCount;
+ // Vscale is VLEN/RVVBitsPerBlock.
+ unsigned MinRequiredVLEN = VScaleFactor * llvm::RISCV::RVVBitsPerBlock;
+ std::string RequiredExt = "zvl" + std::to_string(MinRequiredVLEN) + "b";
+ if (!TI.hasFeature(RequiredExt))
+ return S.Diag(TheCall->getBeginLoc(),
+ diag::err_riscv_type_requires_extension) << Type << RequiredExt;
+
+ return false;
+}
+
bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo &TI,
unsigned BuiltinID,
CallExpr *TheCall) {
@@ -3559,165 +5221,895 @@ bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo &TI,
bool FeatureMissing = false;
SmallVector<StringRef> ReqFeatures;
StringRef Features = Context.BuiltinInfo.getRequiredFeatures(BuiltinID);
- Features.split(ReqFeatures, ',');
+ Features.split(ReqFeatures, ',', -1, false);
// Check if each required feature is included
for (StringRef F : ReqFeatures) {
- if (TI.hasFeature(F))
- continue;
-
- // If the feature is 64bit, alter the string so it will print better in
- // the diagnostic.
- if (F == "64bit")
- F = "RV64";
-
- // Convert features like "zbr" and "experimental-zbr" to "Zbr".
- F.consume_front("experimental-");
- std::string FeatureStr = F.str();
- FeatureStr[0] = std::toupper(FeatureStr[0]);
+ SmallVector<StringRef> ReqOpFeatures;
+ F.split(ReqOpFeatures, '|');
+
+ if (llvm::none_of(ReqOpFeatures,
+ [&TI](StringRef OF) { return TI.hasFeature(OF); })) {
+ std::string FeatureStrs;
+ bool IsExtension = true;
+ for (StringRef OF : ReqOpFeatures) {
+ // If the feature is 64bit, alter the string so it will print better in
+ // the diagnostic.
+ if (OF == "64bit") {
+ assert(ReqOpFeatures.size() == 1 && "Expected '64bit' to be alone");
+ OF = "RV64";
+ IsExtension = false;
+ }
+ if (OF == "32bit") {
+ assert(ReqOpFeatures.size() == 1 && "Expected '32bit' to be alone");
+ OF = "RV32";
+ IsExtension = false;
+ }
- // Error message
- FeatureMissing = true;
- Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_requires_extension)
- << TheCall->getSourceRange() << StringRef(FeatureStr);
+ // Convert features like "zbr" and "experimental-zbr" to "Zbr".
+ OF.consume_front("experimental-");
+ std::string FeatureStr = OF.str();
+ FeatureStr[0] = std::toupper(FeatureStr[0]);
+ // Combine strings.
+ FeatureStrs += FeatureStrs.empty() ? "" : ", ";
+ FeatureStrs += "'";
+ FeatureStrs += FeatureStr;
+ FeatureStrs += "'";
+ }
+ // Error message
+ FeatureMissing = true;
+ Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_requires_extension)
+ << IsExtension
+ << TheCall->getSourceRange() << StringRef(FeatureStrs);
+ }
}
if (FeatureMissing)
return true;
+ // vmulh.vv, vmulh.vx, vmulhu.vv, vmulhu.vx, vmulhsu.vv, vmulhsu.vx,
+ // vsmul.vv, vsmul.vx are not included for EEW=64 in Zve64*.
switch (BuiltinID) {
- case RISCV::BI__builtin_rvv_vsetvli:
+ default:
+ break;
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vv:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vx:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vv_m:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vx_m:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tumu:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vv:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vx:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vv_m:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vx_m:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vmulhu_vx_tumu:
+ case RISCVVector::BI__builtin_rvv_vmulh_vv:
+ case RISCVVector::BI__builtin_rvv_vmulh_vx:
+ case RISCVVector::BI__builtin_rvv_vmulh_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vmulh_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vmulh_vv_m:
+ case RISCVVector::BI__builtin_rvv_vmulh_vx_m:
+ case RISCVVector::BI__builtin_rvv_vmulh_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vmulh_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vmulh_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vmulh_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vmulh_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vmulh_vx_tumu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_m:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_m:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_tumu: {
+ ASTContext::BuiltinVectorTypeInfo Info = Context.getBuiltinVectorTypeInfo(
+ TheCall->getType()->castAs<BuiltinType>());
+
+ if (Context.getTypeSize(Info.ElementType) == 64 && !TI.hasFeature("v"))
+ return Diag(TheCall->getBeginLoc(),
+ diag::err_riscv_builtin_requires_extension)
+ << /* IsExtension */ true << TheCall->getSourceRange() << "v";
+
+ break;
+ }
+ }
+
+ switch (BuiltinID) {
+ case RISCVVector::BI__builtin_rvv_vsetvli:
return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3) ||
CheckRISCVLMUL(TheCall, 2);
- case RISCV::BI__builtin_rvv_vsetvlimax:
+ case RISCVVector::BI__builtin_rvv_vsetvlimax:
return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) ||
CheckRISCVLMUL(TheCall, 1);
- case RISCV::BI__builtin_rvv_vget_v_i8m2_i8m1:
- case RISCV::BI__builtin_rvv_vget_v_i16m2_i16m1:
- case RISCV::BI__builtin_rvv_vget_v_i32m2_i32m1:
- case RISCV::BI__builtin_rvv_vget_v_i64m2_i64m1:
- case RISCV::BI__builtin_rvv_vget_v_f32m2_f32m1:
- case RISCV::BI__builtin_rvv_vget_v_f64m2_f64m1:
- case RISCV::BI__builtin_rvv_vget_v_u8m2_u8m1:
- case RISCV::BI__builtin_rvv_vget_v_u16m2_u16m1:
- case RISCV::BI__builtin_rvv_vget_v_u32m2_u32m1:
- case RISCV::BI__builtin_rvv_vget_v_u64m2_u64m1:
- case RISCV::BI__builtin_rvv_vget_v_i8m4_i8m2:
- case RISCV::BI__builtin_rvv_vget_v_i16m4_i16m2:
- case RISCV::BI__builtin_rvv_vget_v_i32m4_i32m2:
- case RISCV::BI__builtin_rvv_vget_v_i64m4_i64m2:
- case RISCV::BI__builtin_rvv_vget_v_f32m4_f32m2:
- case RISCV::BI__builtin_rvv_vget_v_f64m4_f64m2:
- case RISCV::BI__builtin_rvv_vget_v_u8m4_u8m2:
- case RISCV::BI__builtin_rvv_vget_v_u16m4_u16m2:
- case RISCV::BI__builtin_rvv_vget_v_u32m4_u32m2:
- case RISCV::BI__builtin_rvv_vget_v_u64m4_u64m2:
- case RISCV::BI__builtin_rvv_vget_v_i8m8_i8m4:
- case RISCV::BI__builtin_rvv_vget_v_i16m8_i16m4:
- case RISCV::BI__builtin_rvv_vget_v_i32m8_i32m4:
- case RISCV::BI__builtin_rvv_vget_v_i64m8_i64m4:
- case RISCV::BI__builtin_rvv_vget_v_f32m8_f32m4:
- case RISCV::BI__builtin_rvv_vget_v_f64m8_f64m4:
- case RISCV::BI__builtin_rvv_vget_v_u8m8_u8m4:
- case RISCV::BI__builtin_rvv_vget_v_u16m8_u16m4:
- case RISCV::BI__builtin_rvv_vget_v_u32m8_u32m4:
- case RISCV::BI__builtin_rvv_vget_v_u64m8_u64m4:
- return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
- case RISCV::BI__builtin_rvv_vget_v_i8m4_i8m1:
- case RISCV::BI__builtin_rvv_vget_v_i16m4_i16m1:
- case RISCV::BI__builtin_rvv_vget_v_i32m4_i32m1:
- case RISCV::BI__builtin_rvv_vget_v_i64m4_i64m1:
- case RISCV::BI__builtin_rvv_vget_v_f32m4_f32m1:
- case RISCV::BI__builtin_rvv_vget_v_f64m4_f64m1:
- case RISCV::BI__builtin_rvv_vget_v_u8m4_u8m1:
- case RISCV::BI__builtin_rvv_vget_v_u16m4_u16m1:
- case RISCV::BI__builtin_rvv_vget_v_u32m4_u32m1:
- case RISCV::BI__builtin_rvv_vget_v_u64m4_u64m1:
- case RISCV::BI__builtin_rvv_vget_v_i8m8_i8m2:
- case RISCV::BI__builtin_rvv_vget_v_i16m8_i16m2:
- case RISCV::BI__builtin_rvv_vget_v_i32m8_i32m2:
- case RISCV::BI__builtin_rvv_vget_v_i64m8_i64m2:
- case RISCV::BI__builtin_rvv_vget_v_f32m8_f32m2:
- case RISCV::BI__builtin_rvv_vget_v_f64m8_f64m2:
- case RISCV::BI__builtin_rvv_vget_v_u8m8_u8m2:
- case RISCV::BI__builtin_rvv_vget_v_u16m8_u16m2:
- case RISCV::BI__builtin_rvv_vget_v_u32m8_u32m2:
- case RISCV::BI__builtin_rvv_vget_v_u64m8_u64m2:
- return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3);
- case RISCV::BI__builtin_rvv_vget_v_i8m8_i8m1:
- case RISCV::BI__builtin_rvv_vget_v_i16m8_i16m1:
- case RISCV::BI__builtin_rvv_vget_v_i32m8_i32m1:
- case RISCV::BI__builtin_rvv_vget_v_i64m8_i64m1:
- case RISCV::BI__builtin_rvv_vget_v_f32m8_f32m1:
- case RISCV::BI__builtin_rvv_vget_v_f64m8_f64m1:
- case RISCV::BI__builtin_rvv_vget_v_u8m8_u8m1:
- case RISCV::BI__builtin_rvv_vget_v_u16m8_u16m1:
- case RISCV::BI__builtin_rvv_vget_v_u32m8_u32m1:
- case RISCV::BI__builtin_rvv_vget_v_u64m8_u64m1:
- return SemaBuiltinConstantArgRange(TheCall, 1, 0, 7);
- case RISCV::BI__builtin_rvv_vset_v_i8m1_i8m2:
- case RISCV::BI__builtin_rvv_vset_v_i16m1_i16m2:
- case RISCV::BI__builtin_rvv_vset_v_i32m1_i32m2:
- case RISCV::BI__builtin_rvv_vset_v_i64m1_i64m2:
- case RISCV::BI__builtin_rvv_vset_v_f32m1_f32m2:
- case RISCV::BI__builtin_rvv_vset_v_f64m1_f64m2:
- case RISCV::BI__builtin_rvv_vset_v_u8m1_u8m2:
- case RISCV::BI__builtin_rvv_vset_v_u16m1_u16m2:
- case RISCV::BI__builtin_rvv_vset_v_u32m1_u32m2:
- case RISCV::BI__builtin_rvv_vset_v_u64m1_u64m2:
- case RISCV::BI__builtin_rvv_vset_v_i8m2_i8m4:
- case RISCV::BI__builtin_rvv_vset_v_i16m2_i16m4:
- case RISCV::BI__builtin_rvv_vset_v_i32m2_i32m4:
- case RISCV::BI__builtin_rvv_vset_v_i64m2_i64m4:
- case RISCV::BI__builtin_rvv_vset_v_f32m2_f32m4:
- case RISCV::BI__builtin_rvv_vset_v_f64m2_f64m4:
- case RISCV::BI__builtin_rvv_vset_v_u8m2_u8m4:
- case RISCV::BI__builtin_rvv_vset_v_u16m2_u16m4:
- case RISCV::BI__builtin_rvv_vset_v_u32m2_u32m4:
- case RISCV::BI__builtin_rvv_vset_v_u64m2_u64m4:
- case RISCV::BI__builtin_rvv_vset_v_i8m4_i8m8:
- case RISCV::BI__builtin_rvv_vset_v_i16m4_i16m8:
- case RISCV::BI__builtin_rvv_vset_v_i32m4_i32m8:
- case RISCV::BI__builtin_rvv_vset_v_i64m4_i64m8:
- case RISCV::BI__builtin_rvv_vset_v_f32m4_f32m8:
- case RISCV::BI__builtin_rvv_vset_v_f64m4_f64m8:
- case RISCV::BI__builtin_rvv_vset_v_u8m4_u8m8:
- case RISCV::BI__builtin_rvv_vset_v_u16m4_u16m8:
- case RISCV::BI__builtin_rvv_vset_v_u32m4_u32m8:
- case RISCV::BI__builtin_rvv_vset_v_u64m4_u64m8:
- return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
- case RISCV::BI__builtin_rvv_vset_v_i8m1_i8m4:
- case RISCV::BI__builtin_rvv_vset_v_i16m1_i16m4:
- case RISCV::BI__builtin_rvv_vset_v_i32m1_i32m4:
- case RISCV::BI__builtin_rvv_vset_v_i64m1_i64m4:
- case RISCV::BI__builtin_rvv_vset_v_f32m1_f32m4:
- case RISCV::BI__builtin_rvv_vset_v_f64m1_f64m4:
- case RISCV::BI__builtin_rvv_vset_v_u8m1_u8m4:
- case RISCV::BI__builtin_rvv_vset_v_u16m1_u16m4:
- case RISCV::BI__builtin_rvv_vset_v_u32m1_u32m4:
- case RISCV::BI__builtin_rvv_vset_v_u64m1_u64m4:
- case RISCV::BI__builtin_rvv_vset_v_i8m2_i8m8:
- case RISCV::BI__builtin_rvv_vset_v_i16m2_i16m8:
- case RISCV::BI__builtin_rvv_vset_v_i32m2_i32m8:
- case RISCV::BI__builtin_rvv_vset_v_i64m2_i64m8:
- case RISCV::BI__builtin_rvv_vset_v_f32m2_f32m8:
- case RISCV::BI__builtin_rvv_vset_v_f64m2_f64m8:
- case RISCV::BI__builtin_rvv_vset_v_u8m2_u8m8:
- case RISCV::BI__builtin_rvv_vset_v_u16m2_u16m8:
- case RISCV::BI__builtin_rvv_vset_v_u32m2_u32m8:
- case RISCV::BI__builtin_rvv_vset_v_u64m2_u64m8:
- return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3);
- case RISCV::BI__builtin_rvv_vset_v_i8m1_i8m8:
- case RISCV::BI__builtin_rvv_vset_v_i16m1_i16m8:
- case RISCV::BI__builtin_rvv_vset_v_i32m1_i32m8:
- case RISCV::BI__builtin_rvv_vset_v_i64m1_i64m8:
- case RISCV::BI__builtin_rvv_vset_v_f32m1_f32m8:
- case RISCV::BI__builtin_rvv_vset_v_f64m1_f64m8:
- case RISCV::BI__builtin_rvv_vset_v_u8m1_u8m8:
- case RISCV::BI__builtin_rvv_vset_v_u16m1_u16m8:
- case RISCV::BI__builtin_rvv_vset_v_u32m1_u32m8:
- case RISCV::BI__builtin_rvv_vset_v_u64m1_u64m8:
- return SemaBuiltinConstantArgRange(TheCall, 1, 0, 7);
+ case RISCVVector::BI__builtin_rvv_vget_v: {
+ ASTContext::BuiltinVectorTypeInfo ResVecInfo =
+ Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(
+ TheCall->getType().getCanonicalType().getTypePtr()));
+ ASTContext::BuiltinVectorTypeInfo VecInfo =
+ Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(
+ TheCall->getArg(0)->getType().getCanonicalType().getTypePtr()));
+ unsigned MaxIndex;
+ if (VecInfo.NumVectors != 1) // vget for tuple type
+ MaxIndex = VecInfo.NumVectors;
+ else // vget for non-tuple type
+ MaxIndex = (VecInfo.EC.getKnownMinValue() * VecInfo.NumVectors) /
+ (ResVecInfo.EC.getKnownMinValue() * ResVecInfo.NumVectors);
+ return SemaBuiltinConstantArgRange(TheCall, 1, 0, MaxIndex - 1);
+ }
+ case RISCVVector::BI__builtin_rvv_vset_v: {
+ ASTContext::BuiltinVectorTypeInfo ResVecInfo =
+ Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(
+ TheCall->getType().getCanonicalType().getTypePtr()));
+ ASTContext::BuiltinVectorTypeInfo VecInfo =
+ Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(
+ TheCall->getArg(2)->getType().getCanonicalType().getTypePtr()));
+ unsigned MaxIndex;
+ if (ResVecInfo.NumVectors != 1) // vset for tuple type
+ MaxIndex = ResVecInfo.NumVectors;
+ else // vset fo non-tuple type
+ MaxIndex = (ResVecInfo.EC.getKnownMinValue() * ResVecInfo.NumVectors) /
+ (VecInfo.EC.getKnownMinValue() * VecInfo.NumVectors);
+ return SemaBuiltinConstantArgRange(TheCall, 1, 0, MaxIndex - 1);
+ }
+ // Vector Crypto
+ case RISCVVector::BI__builtin_rvv_vaeskf1_vi_tu:
+ case RISCVVector::BI__builtin_rvv_vaeskf2_vi_tu:
+ case RISCVVector::BI__builtin_rvv_vaeskf2_vi:
+ case RISCVVector::BI__builtin_rvv_vsm4k_vi_tu: {
+ QualType Op1Type = TheCall->getArg(0)->getType();
+ QualType Op2Type = TheCall->getArg(1)->getType();
+ return CheckInvalidVLENandLMUL(TI, TheCall, *this, Op1Type, 128) ||
+ CheckInvalidVLENandLMUL(TI, TheCall, *this, Op2Type, 128) ||
+ SemaBuiltinConstantArgRange(TheCall, 2, 0, 31);
+ }
+ case RISCVVector::BI__builtin_rvv_vsm3c_vi_tu:
+ case RISCVVector::BI__builtin_rvv_vsm3c_vi: {
+ QualType Op1Type = TheCall->getArg(0)->getType();
+ return CheckInvalidVLENandLMUL(TI, TheCall, *this, Op1Type, 256) ||
+ SemaBuiltinConstantArgRange(TheCall, 2, 0, 31);
+ }
+ case RISCVVector::BI__builtin_rvv_vaeskf1_vi:
+ case RISCVVector::BI__builtin_rvv_vsm4k_vi: {
+ QualType Op1Type = TheCall->getArg(0)->getType();
+ return CheckInvalidVLENandLMUL(TI, TheCall, *this, Op1Type, 128) ||
+ SemaBuiltinConstantArgRange(TheCall, 1, 0, 31);
+ }
+ case RISCVVector::BI__builtin_rvv_vaesdf_vv:
+ case RISCVVector::BI__builtin_rvv_vaesdf_vs:
+ case RISCVVector::BI__builtin_rvv_vaesdm_vv:
+ case RISCVVector::BI__builtin_rvv_vaesdm_vs:
+ case RISCVVector::BI__builtin_rvv_vaesef_vv:
+ case RISCVVector::BI__builtin_rvv_vaesef_vs:
+ case RISCVVector::BI__builtin_rvv_vaesem_vv:
+ case RISCVVector::BI__builtin_rvv_vaesem_vs:
+ case RISCVVector::BI__builtin_rvv_vaesz_vs:
+ case RISCVVector::BI__builtin_rvv_vsm4r_vv:
+ case RISCVVector::BI__builtin_rvv_vsm4r_vs:
+ case RISCVVector::BI__builtin_rvv_vaesdf_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vaesdf_vs_tu:
+ case RISCVVector::BI__builtin_rvv_vaesdm_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vaesdm_vs_tu:
+ case RISCVVector::BI__builtin_rvv_vaesef_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vaesef_vs_tu:
+ case RISCVVector::BI__builtin_rvv_vaesem_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vaesem_vs_tu:
+ case RISCVVector::BI__builtin_rvv_vaesz_vs_tu:
+ case RISCVVector::BI__builtin_rvv_vsm4r_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vsm4r_vs_tu: {
+ QualType Op1Type = TheCall->getArg(0)->getType();
+ QualType Op2Type = TheCall->getArg(1)->getType();
+ return CheckInvalidVLENandLMUL(TI, TheCall, *this, Op1Type, 128) ||
+ CheckInvalidVLENandLMUL(TI, TheCall, *this, Op2Type, 128);
+ }
+ case RISCVVector::BI__builtin_rvv_vsha2ch_vv:
+ case RISCVVector::BI__builtin_rvv_vsha2cl_vv:
+ case RISCVVector::BI__builtin_rvv_vsha2ms_vv:
+ case RISCVVector::BI__builtin_rvv_vsha2ch_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vsha2cl_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vsha2ms_vv_tu: {
+ QualType Op1Type = TheCall->getArg(0)->getType();
+ QualType Op2Type = TheCall->getArg(1)->getType();
+ QualType Op3Type = TheCall->getArg(2)->getType();
+ ASTContext::BuiltinVectorTypeInfo Info =
+ Context.getBuiltinVectorTypeInfo(Op1Type->castAs<BuiltinType>());
+ uint64_t ElemSize = Context.getTypeSize(Info.ElementType);
+ if (ElemSize == 64 && !TI.hasFeature("zvknhb"))
+ return Diag(TheCall->getBeginLoc(),
+ diag::err_riscv_builtin_requires_extension)
+ << /* IsExtension */ true << TheCall->getSourceRange() << "zvknb";
+
+ return CheckInvalidVLENandLMUL(TI, TheCall, *this, Op1Type, ElemSize * 4) ||
+ CheckInvalidVLENandLMUL(TI, TheCall, *this, Op2Type, ElemSize * 4) ||
+ CheckInvalidVLENandLMUL(TI, TheCall, *this, Op3Type, ElemSize * 4);
+ }
+
+ case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8mf8:
+ case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8mf4:
+ case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8mf2:
+ case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8m1:
+ case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8m2:
+ case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8m4:
+ case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u8m8:
+ case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16mf4:
+ case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16mf2:
+ case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16m1:
+ case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16m2:
+ case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16m4:
+ case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u16m8:
+ case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u32mf2:
+ case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u32m1:
+ case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u32m2:
+ case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u32m4:
+ case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u32m8:
+ case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u64m1:
+ case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u64m2:
+ case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u64m4:
+ case RISCVVector::BI__builtin_rvv_sf_vc_i_se_u64m8:
+ // bit_27_26, bit_24_20, bit_11_7, simm5
+ return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) ||
+ SemaBuiltinConstantArgRange(TheCall, 1, 0, 31) ||
+ SemaBuiltinConstantArgRange(TheCall, 2, 0, 31) ||
+ SemaBuiltinConstantArgRange(TheCall, 3, -16, 15);
+ case RISCVVector::BI__builtin_rvv_sf_vc_iv_se:
+ // bit_27_26, bit_11_7, vs2, simm5
+ return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) ||
+ SemaBuiltinConstantArgRange(TheCall, 1, 0, 31) ||
+ SemaBuiltinConstantArgRange(TheCall, 3, -16, 15);
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_i:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_i_se:
+ // bit_27_26, bit_24_20, simm5
+ return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) ||
+ SemaBuiltinConstantArgRange(TheCall, 1, 0, 31) ||
+ SemaBuiltinConstantArgRange(TheCall, 2, -16, 15);
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_iv:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_iv_se:
+ // bit_27_26, vs2, simm5
+ return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) ||
+ SemaBuiltinConstantArgRange(TheCall, 2, -16, 15);
+ case RISCVVector::BI__builtin_rvv_sf_vc_ivv_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_ivw_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_ivv:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_ivw:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_ivv_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_ivw_se:
+ // bit_27_26, vd, vs2, simm5
+ return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) ||
+ SemaBuiltinConstantArgRange(TheCall, 3, -16, 15);
+ case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8mf8:
+ case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8mf4:
+ case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8mf2:
+ case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8m1:
+ case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8m2:
+ case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8m4:
+ case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u8m8:
+ case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16mf4:
+ case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16mf2:
+ case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16m1:
+ case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16m2:
+ case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16m4:
+ case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u16m8:
+ case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u32mf2:
+ case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u32m1:
+ case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u32m2:
+ case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u32m4:
+ case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u32m8:
+ case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u64m1:
+ case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u64m2:
+ case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u64m4:
+ case RISCVVector::BI__builtin_rvv_sf_vc_x_se_u64m8:
+ // bit_27_26, bit_24_20, bit_11_7, xs1
+ return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) ||
+ SemaBuiltinConstantArgRange(TheCall, 1, 0, 31) ||
+ SemaBuiltinConstantArgRange(TheCall, 2, 0, 31);
+ case RISCVVector::BI__builtin_rvv_sf_vc_xv_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_vv_se:
+ // bit_27_26, bit_11_7, vs2, xs1/vs1
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_x:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_x_se:
+ // bit_27_26, bit_24-20, xs1
+ return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) ||
+ SemaBuiltinConstantArgRange(TheCall, 1, 0, 31);
+ case RISCVVector::BI__builtin_rvv_sf_vc_vvv_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_xvv_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_vvw_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_xvw_se:
+ // bit_27_26, vd, vs2, xs1
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_xv:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_vv:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_xv_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_vv_se:
+ // bit_27_26, vs2, xs1/vs1
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_xvv:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_vvv:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_xvw:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_vvw:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_xvv_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_vvv_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_xvw_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_vvw_se:
+ // bit_27_26, vd, vs2, xs1/vs1
+ return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3);
+ case RISCVVector::BI__builtin_rvv_sf_vc_fv_se:
+ // bit_26, bit_11_7, vs2, fs1
+ return SemaBuiltinConstantArgRange(TheCall, 0, 0, 1) ||
+ SemaBuiltinConstantArgRange(TheCall, 1, 0, 31);
+ case RISCVVector::BI__builtin_rvv_sf_vc_fvv_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_fvw_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_fvv:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_fvw:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_fvv_se:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_fvw_se:
+ // bit_26, vd, vs2, fs1
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_fv:
+ case RISCVVector::BI__builtin_rvv_sf_vc_v_fv_se:
+ // bit_26, vs2, fs1
+ return SemaBuiltinConstantArgRange(TheCall, 0, 0, 1);
+ // Check if byteselect is in [0, 3]
+ case RISCV::BI__builtin_riscv_aes32dsi:
+ case RISCV::BI__builtin_riscv_aes32dsmi:
+ case RISCV::BI__builtin_riscv_aes32esi:
+ case RISCV::BI__builtin_riscv_aes32esmi:
+ case RISCV::BI__builtin_riscv_sm4ks:
+ case RISCV::BI__builtin_riscv_sm4ed:
+ return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3);
+ // Check if rnum is in [0, 10]
+ case RISCV::BI__builtin_riscv_aes64ks1i:
+ return SemaBuiltinConstantArgRange(TheCall, 1, 0, 10);
+ // Check if value range for vxrm is in [0, 3]
+ case RISCVVector::BI__builtin_rvv_vaaddu_vv:
+ case RISCVVector::BI__builtin_rvv_vaaddu_vx:
+ case RISCVVector::BI__builtin_rvv_vaadd_vv:
+ case RISCVVector::BI__builtin_rvv_vaadd_vx:
+ case RISCVVector::BI__builtin_rvv_vasubu_vv:
+ case RISCVVector::BI__builtin_rvv_vasubu_vx:
+ case RISCVVector::BI__builtin_rvv_vasub_vv:
+ case RISCVVector::BI__builtin_rvv_vasub_vx:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx:
+ case RISCVVector::BI__builtin_rvv_vssra_vv:
+ case RISCVVector::BI__builtin_rvv_vssra_vx:
+ case RISCVVector::BI__builtin_rvv_vssrl_vv:
+ case RISCVVector::BI__builtin_rvv_vssrl_vx:
+ case RISCVVector::BI__builtin_rvv_vnclip_wv:
+ case RISCVVector::BI__builtin_rvv_vnclip_wx:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wv:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wx:
+ return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3);
+ case RISCVVector::BI__builtin_rvv_vaaddu_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vaaddu_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vaadd_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vaadd_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vasubu_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vasubu_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vasub_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vasub_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vssra_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vssra_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vssrl_vv_tu:
+ case RISCVVector::BI__builtin_rvv_vssrl_vx_tu:
+ case RISCVVector::BI__builtin_rvv_vnclip_wv_tu:
+ case RISCVVector::BI__builtin_rvv_vnclip_wx_tu:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wv_tu:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wx_tu:
+ case RISCVVector::BI__builtin_rvv_vaaddu_vv_m:
+ case RISCVVector::BI__builtin_rvv_vaaddu_vx_m:
+ case RISCVVector::BI__builtin_rvv_vaadd_vv_m:
+ case RISCVVector::BI__builtin_rvv_vaadd_vx_m:
+ case RISCVVector::BI__builtin_rvv_vasubu_vv_m:
+ case RISCVVector::BI__builtin_rvv_vasubu_vx_m:
+ case RISCVVector::BI__builtin_rvv_vasub_vv_m:
+ case RISCVVector::BI__builtin_rvv_vasub_vx_m:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_m:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_m:
+ case RISCVVector::BI__builtin_rvv_vssra_vv_m:
+ case RISCVVector::BI__builtin_rvv_vssra_vx_m:
+ case RISCVVector::BI__builtin_rvv_vssrl_vv_m:
+ case RISCVVector::BI__builtin_rvv_vssrl_vx_m:
+ case RISCVVector::BI__builtin_rvv_vnclip_wv_m:
+ case RISCVVector::BI__builtin_rvv_vnclip_wx_m:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wv_m:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wx_m:
+ return SemaBuiltinConstantArgRange(TheCall, 3, 0, 3);
+ case RISCVVector::BI__builtin_rvv_vaaddu_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vaaddu_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vaaddu_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vaaddu_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vaaddu_vx_tumu:
+ case RISCVVector::BI__builtin_rvv_vaaddu_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vaadd_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vaadd_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vaadd_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vaadd_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vaadd_vx_tumu:
+ case RISCVVector::BI__builtin_rvv_vaadd_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vasubu_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vasubu_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vasubu_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vasubu_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vasubu_vx_tumu:
+ case RISCVVector::BI__builtin_rvv_vasubu_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vasub_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vasub_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vasub_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vasub_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vasub_vx_tumu:
+ case RISCVVector::BI__builtin_rvv_vasub_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vssra_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vssra_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vssrl_vv_mu:
+ case RISCVVector::BI__builtin_rvv_vssrl_vx_mu:
+ case RISCVVector::BI__builtin_rvv_vnclip_wv_mu:
+ case RISCVVector::BI__builtin_rvv_vnclip_wx_mu:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wv_mu:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wx_mu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vssra_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vssra_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vssrl_vv_tum:
+ case RISCVVector::BI__builtin_rvv_vssrl_vx_tum:
+ case RISCVVector::BI__builtin_rvv_vnclip_wv_tum:
+ case RISCVVector::BI__builtin_rvv_vnclip_wx_tum:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wv_tum:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wx_tum:
+ case RISCVVector::BI__builtin_rvv_vsmul_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vsmul_vx_tumu:
+ case RISCVVector::BI__builtin_rvv_vssra_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vssra_vx_tumu:
+ case RISCVVector::BI__builtin_rvv_vssrl_vv_tumu:
+ case RISCVVector::BI__builtin_rvv_vssrl_vx_tumu:
+ case RISCVVector::BI__builtin_rvv_vnclip_wv_tumu:
+ case RISCVVector::BI__builtin_rvv_vnclip_wx_tumu:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wv_tumu:
+ case RISCVVector::BI__builtin_rvv_vnclipu_wx_tumu:
+ return SemaBuiltinConstantArgRange(TheCall, 4, 0, 3);
+ case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm:
+ case RISCVVector::BI__builtin_rvv_vfrec7_v_rm:
+ case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm:
+ case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm:
+ case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm:
+ case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm:
+ return SemaBuiltinConstantArgRange(TheCall, 1, 0, 4);
+ case RISCVVector::BI__builtin_rvv_vfadd_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfadd_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfsub_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfsub_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm:
+ case RISCVVector::BI__builtin_rvv_vfmul_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfmul_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm:
+ case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm:
+ case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm:
+ case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm:
+ case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_m:
+ return SemaBuiltinConstantArgRange(TheCall, 2, 0, 4);
+ case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_tu:
+ case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_mu:
+ return SemaBuiltinConstantArgRange(TheCall, 3, 0, 4);
+ case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_m:
+ case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm_tum:
+ case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_tumu:
+ case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_mu:
+ case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_mu:
+ return SemaBuiltinConstantArgRange(TheCall, 4, 0, 4);
+ case RISCV::BI__builtin_riscv_ntl_load:
+ case RISCV::BI__builtin_riscv_ntl_store:
+ DeclRefExpr *DRE =
+ cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
+ assert((BuiltinID == RISCV::BI__builtin_riscv_ntl_store ||
+ BuiltinID == RISCV::BI__builtin_riscv_ntl_load) &&
+ "Unexpected RISC-V nontemporal load/store builtin!");
+ bool IsStore = BuiltinID == RISCV::BI__builtin_riscv_ntl_store;
+ unsigned NumArgs = IsStore ? 3 : 2;
+
+ if (checkArgCountAtLeast(*this, TheCall, NumArgs - 1))
+ return true;
+
+ if (checkArgCountAtMost(*this, TheCall, NumArgs))
+ return true;
+
+ // Domain value should be compile-time constant.
+ // 2 <= domain <= 5
+ if (TheCall->getNumArgs() == NumArgs &&
+ SemaBuiltinConstantArgRange(TheCall, NumArgs - 1, 2, 5))
+ return true;
+
+ Expr *PointerArg = TheCall->getArg(0);
+ ExprResult PointerArgResult =
+ DefaultFunctionArrayLvalueConversion(PointerArg);
+
+ if (PointerArgResult.isInvalid())
+ return true;
+ PointerArg = PointerArgResult.get();
+
+ const PointerType *PtrType = PointerArg->getType()->getAs<PointerType>();
+ if (!PtrType) {
+ Diag(DRE->getBeginLoc(), diag::err_nontemporal_builtin_must_be_pointer)
+ << PointerArg->getType() << PointerArg->getSourceRange();
+ return true;
+ }
+
+ QualType ValType = PtrType->getPointeeType();
+ ValType = ValType.getUnqualifiedType();
+ if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
+ !ValType->isBlockPointerType() && !ValType->isFloatingType() &&
+ !ValType->isVectorType() && !ValType->isRVVSizelessBuiltinType()) {
+ Diag(DRE->getBeginLoc(),
+ diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector)
+ << PointerArg->getType() << PointerArg->getSourceRange();
+ return true;
+ }
+
+ if (!IsStore) {
+ TheCall->setType(ValType);
+ return false;
+ }
+
+ ExprResult ValArg = TheCall->getArg(1);
+ InitializedEntity Entity = InitializedEntity::InitializeParameter(
+ Context, ValType, /*consume*/ false);
+ ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg);
+ if (ValArg.isInvalid())
+ return true;
+
+ TheCall->setArg(1, ValArg.get());
+ TheCall->setType(Context.VoidTy);
+ return false;
}
return false;
@@ -3727,7 +6119,8 @@ bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID,
CallExpr *TheCall) {
if (BuiltinID == SystemZ::BI__builtin_tabort) {
Expr *Arg = TheCall->getArg(0);
- if (Optional<llvm::APSInt> AbortCode = Arg->getIntegerConstantExpr(Context))
+ if (std::optional<llvm::APSInt> AbortCode =
+ Arg->getIntegerConstantExpr(Context))
if (AbortCode->getSExtValue() >= 0 && AbortCode->getSExtValue() < 256)
return Diag(Arg->getBeginLoc(), diag::err_systemz_invalid_tabort_code)
<< Arg->getSourceRange();
@@ -3792,6 +6185,77 @@ bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID,
return SemaBuiltinConstantArgRange(TheCall, i, l, u);
}
+bool Sema::CheckWebAssemblyBuiltinFunctionCall(const TargetInfo &TI,
+ unsigned BuiltinID,
+ CallExpr *TheCall) {
+ switch (BuiltinID) {
+ case WebAssembly::BI__builtin_wasm_ref_null_extern:
+ return BuiltinWasmRefNullExtern(TheCall);
+ case WebAssembly::BI__builtin_wasm_ref_null_func:
+ return BuiltinWasmRefNullFunc(TheCall);
+ case WebAssembly::BI__builtin_wasm_table_get:
+ return BuiltinWasmTableGet(TheCall);
+ case WebAssembly::BI__builtin_wasm_table_set:
+ return BuiltinWasmTableSet(TheCall);
+ case WebAssembly::BI__builtin_wasm_table_size:
+ return BuiltinWasmTableSize(TheCall);
+ case WebAssembly::BI__builtin_wasm_table_grow:
+ return BuiltinWasmTableGrow(TheCall);
+ case WebAssembly::BI__builtin_wasm_table_fill:
+ return BuiltinWasmTableFill(TheCall);
+ case WebAssembly::BI__builtin_wasm_table_copy:
+ return BuiltinWasmTableCopy(TheCall);
+ }
+
+ return false;
+}
+
+void Sema::checkRVVTypeSupport(QualType Ty, SourceLocation Loc, Decl *D) {
+ const TargetInfo &TI = Context.getTargetInfo();
+
+ ASTContext::BuiltinVectorTypeInfo Info =
+ Context.getBuiltinVectorTypeInfo(Ty->castAs<BuiltinType>());
+ unsigned EltSize = Context.getTypeSize(Info.ElementType);
+ unsigned MinElts = Info.EC.getKnownMinValue();
+
+ // (ELEN, LMUL) pairs of (8, mf8), (16, mf4), (32, mf2), (64, m1) requires at
+ // least zve64x
+ if (((EltSize == 64 && Info.ElementType->isIntegerType()) || MinElts == 1) &&
+ !TI.hasFeature("zve64x"))
+ Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve64x";
+ else if (Info.ElementType->isFloat16Type() && !TI.hasFeature("zvfh") &&
+ !TI.hasFeature("zvfhmin"))
+ Diag(Loc, diag::err_riscv_type_requires_extension, D)
+ << Ty << "zvfh or zvfhmin";
+ else if (Info.ElementType->isBFloat16Type() &&
+ !TI.hasFeature("experimental-zvfbfmin"))
+ Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zvfbfmin";
+ else if (Info.ElementType->isSpecificBuiltinType(BuiltinType::Float) &&
+ !TI.hasFeature("zve32f"))
+ Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve32f";
+ else if (Info.ElementType->isSpecificBuiltinType(BuiltinType::Double) &&
+ !TI.hasFeature("zve64d"))
+ Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve64d";
+ // Given that caller already checked isRVVType() before calling this function,
+ // if we don't have at least zve32x supported, then we need to emit error.
+ else if (!TI.hasFeature("zve32x"))
+ Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve32x";
+}
+
+bool Sema::CheckNVPTXBuiltinFunctionCall(const TargetInfo &TI,
+ unsigned BuiltinID,
+ CallExpr *TheCall) {
+ switch (BuiltinID) {
+ case NVPTX::BI__nvvm_cp_async_ca_shared_global_4:
+ case NVPTX::BI__nvvm_cp_async_ca_shared_global_8:
+ case NVPTX::BI__nvvm_cp_async_ca_shared_global_16:
+ case NVPTX::BI__nvvm_cp_async_cg_shared_global_16:
+ return checkArgCountAtMost(*this, TheCall, 3);
+ }
+
+ return false;
+}
+
/// SemaBuiltinCpuSupports - Handle __builtin_cpu_supports(char *).
/// This checks that the target supports __builtin_cpu_supports and
/// that the string argument is constant and valid.
@@ -3850,14 +6314,22 @@ bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) {
case X86::BI__builtin_ia32_vcvttss2si64:
case X86::BI__builtin_ia32_vcvttss2usi32:
case X86::BI__builtin_ia32_vcvttss2usi64:
+ case X86::BI__builtin_ia32_vcvttsh2si32:
+ case X86::BI__builtin_ia32_vcvttsh2si64:
+ case X86::BI__builtin_ia32_vcvttsh2usi32:
+ case X86::BI__builtin_ia32_vcvttsh2usi64:
ArgNum = 1;
break;
case X86::BI__builtin_ia32_maxpd512:
case X86::BI__builtin_ia32_maxps512:
case X86::BI__builtin_ia32_minpd512:
case X86::BI__builtin_ia32_minps512:
+ case X86::BI__builtin_ia32_maxph512:
+ case X86::BI__builtin_ia32_minph512:
ArgNum = 2;
break;
+ case X86::BI__builtin_ia32_vcvtph2pd512_mask:
+ case X86::BI__builtin_ia32_vcvtph2psx512_mask:
case X86::BI__builtin_ia32_cvtps2pd512_mask:
case X86::BI__builtin_ia32_cvttpd2dq512_mask:
case X86::BI__builtin_ia32_cvttpd2qq512_mask:
@@ -3867,16 +6339,24 @@ bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) {
case X86::BI__builtin_ia32_cvttps2qq512_mask:
case X86::BI__builtin_ia32_cvttps2udq512_mask:
case X86::BI__builtin_ia32_cvttps2uqq512_mask:
+ case X86::BI__builtin_ia32_vcvttph2w512_mask:
+ case X86::BI__builtin_ia32_vcvttph2uw512_mask:
+ case X86::BI__builtin_ia32_vcvttph2dq512_mask:
+ case X86::BI__builtin_ia32_vcvttph2udq512_mask:
+ case X86::BI__builtin_ia32_vcvttph2qq512_mask:
+ case X86::BI__builtin_ia32_vcvttph2uqq512_mask:
case X86::BI__builtin_ia32_exp2pd_mask:
case X86::BI__builtin_ia32_exp2ps_mask:
case X86::BI__builtin_ia32_getexppd512_mask:
case X86::BI__builtin_ia32_getexpps512_mask:
+ case X86::BI__builtin_ia32_getexpph512_mask:
case X86::BI__builtin_ia32_rcp28pd_mask:
case X86::BI__builtin_ia32_rcp28ps_mask:
case X86::BI__builtin_ia32_rsqrt28pd_mask:
case X86::BI__builtin_ia32_rsqrt28ps_mask:
case X86::BI__builtin_ia32_vcomisd:
case X86::BI__builtin_ia32_vcomiss:
+ case X86::BI__builtin_ia32_vcomish:
case X86::BI__builtin_ia32_vcvtph2ps512_mask:
ArgNum = 3;
break;
@@ -3884,21 +6364,30 @@ bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) {
case X86::BI__builtin_ia32_cmpps512_mask:
case X86::BI__builtin_ia32_cmpsd_mask:
case X86::BI__builtin_ia32_cmpss_mask:
+ case X86::BI__builtin_ia32_cmpsh_mask:
+ case X86::BI__builtin_ia32_vcvtsh2sd_round_mask:
+ case X86::BI__builtin_ia32_vcvtsh2ss_round_mask:
case X86::BI__builtin_ia32_cvtss2sd_round_mask:
case X86::BI__builtin_ia32_getexpsd128_round_mask:
case X86::BI__builtin_ia32_getexpss128_round_mask:
+ case X86::BI__builtin_ia32_getexpsh128_round_mask:
case X86::BI__builtin_ia32_getmantpd512_mask:
case X86::BI__builtin_ia32_getmantps512_mask:
+ case X86::BI__builtin_ia32_getmantph512_mask:
case X86::BI__builtin_ia32_maxsd_round_mask:
case X86::BI__builtin_ia32_maxss_round_mask:
+ case X86::BI__builtin_ia32_maxsh_round_mask:
case X86::BI__builtin_ia32_minsd_round_mask:
case X86::BI__builtin_ia32_minss_round_mask:
+ case X86::BI__builtin_ia32_minsh_round_mask:
case X86::BI__builtin_ia32_rcp28sd_round_mask:
case X86::BI__builtin_ia32_rcp28ss_round_mask:
case X86::BI__builtin_ia32_reducepd512_mask:
case X86::BI__builtin_ia32_reduceps512_mask:
+ case X86::BI__builtin_ia32_reduceph512_mask:
case X86::BI__builtin_ia32_rndscalepd_mask:
case X86::BI__builtin_ia32_rndscaleps_mask:
+ case X86::BI__builtin_ia32_rndscaleph_mask:
case X86::BI__builtin_ia32_rsqrt28sd_round_mask:
case X86::BI__builtin_ia32_rsqrt28ss_round_mask:
ArgNum = 4;
@@ -3913,14 +6402,17 @@ bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) {
case X86::BI__builtin_ia32_fixupimmss_maskz:
case X86::BI__builtin_ia32_getmantsd_round_mask:
case X86::BI__builtin_ia32_getmantss_round_mask:
+ case X86::BI__builtin_ia32_getmantsh_round_mask:
case X86::BI__builtin_ia32_rangepd512_mask:
case X86::BI__builtin_ia32_rangeps512_mask:
case X86::BI__builtin_ia32_rangesd128_round_mask:
case X86::BI__builtin_ia32_rangess128_round_mask:
case X86::BI__builtin_ia32_reducesd_mask:
case X86::BI__builtin_ia32_reducess_mask:
+ case X86::BI__builtin_ia32_reducesh_mask:
case X86::BI__builtin_ia32_rndscalesd_round_mask:
case X86::BI__builtin_ia32_rndscaless_round_mask:
+ case X86::BI__builtin_ia32_rndscalesh_round_mask:
ArgNum = 5;
break;
case X86::BI__builtin_ia32_vcvtsd2si64:
@@ -3931,11 +6423,20 @@ bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) {
case X86::BI__builtin_ia32_vcvtss2si64:
case X86::BI__builtin_ia32_vcvtss2usi32:
case X86::BI__builtin_ia32_vcvtss2usi64:
+ case X86::BI__builtin_ia32_vcvtsh2si32:
+ case X86::BI__builtin_ia32_vcvtsh2si64:
+ case X86::BI__builtin_ia32_vcvtsh2usi32:
+ case X86::BI__builtin_ia32_vcvtsh2usi64:
case X86::BI__builtin_ia32_sqrtpd512:
case X86::BI__builtin_ia32_sqrtps512:
+ case X86::BI__builtin_ia32_sqrtph512:
ArgNum = 1;
HasRC = true;
break;
+ case X86::BI__builtin_ia32_addph512:
+ case X86::BI__builtin_ia32_divph512:
+ case X86::BI__builtin_ia32_mulph512:
+ case X86::BI__builtin_ia32_subph512:
case X86::BI__builtin_ia32_addpd512:
case X86::BI__builtin_ia32_addps512:
case X86::BI__builtin_ia32_divpd512:
@@ -3950,11 +6451,17 @@ bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) {
case X86::BI__builtin_ia32_cvtusi2sd64:
case X86::BI__builtin_ia32_cvtusi2ss32:
case X86::BI__builtin_ia32_cvtusi2ss64:
+ case X86::BI__builtin_ia32_vcvtusi2sh:
+ case X86::BI__builtin_ia32_vcvtusi642sh:
+ case X86::BI__builtin_ia32_vcvtsi2sh:
+ case X86::BI__builtin_ia32_vcvtsi642sh:
ArgNum = 2;
HasRC = true;
break;
case X86::BI__builtin_ia32_cvtdq2ps512_mask:
case X86::BI__builtin_ia32_cvtudq2ps512_mask:
+ case X86::BI__builtin_ia32_vcvtpd2ph512_mask:
+ case X86::BI__builtin_ia32_vcvtps2phx512_mask:
case X86::BI__builtin_ia32_cvtpd2ps512_mask:
case X86::BI__builtin_ia32_cvtpd2dq512_mask:
case X86::BI__builtin_ia32_cvtpd2qq512_mask:
@@ -3968,30 +6475,54 @@ bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) {
case X86::BI__builtin_ia32_cvtqq2ps512_mask:
case X86::BI__builtin_ia32_cvtuqq2pd512_mask:
case X86::BI__builtin_ia32_cvtuqq2ps512_mask:
+ case X86::BI__builtin_ia32_vcvtdq2ph512_mask:
+ case X86::BI__builtin_ia32_vcvtudq2ph512_mask:
+ case X86::BI__builtin_ia32_vcvtw2ph512_mask:
+ case X86::BI__builtin_ia32_vcvtuw2ph512_mask:
+ case X86::BI__builtin_ia32_vcvtph2w512_mask:
+ case X86::BI__builtin_ia32_vcvtph2uw512_mask:
+ case X86::BI__builtin_ia32_vcvtph2dq512_mask:
+ case X86::BI__builtin_ia32_vcvtph2udq512_mask:
+ case X86::BI__builtin_ia32_vcvtph2qq512_mask:
+ case X86::BI__builtin_ia32_vcvtph2uqq512_mask:
+ case X86::BI__builtin_ia32_vcvtqq2ph512_mask:
+ case X86::BI__builtin_ia32_vcvtuqq2ph512_mask:
ArgNum = 3;
HasRC = true;
break;
+ case X86::BI__builtin_ia32_addsh_round_mask:
case X86::BI__builtin_ia32_addss_round_mask:
case X86::BI__builtin_ia32_addsd_round_mask:
+ case X86::BI__builtin_ia32_divsh_round_mask:
case X86::BI__builtin_ia32_divss_round_mask:
case X86::BI__builtin_ia32_divsd_round_mask:
+ case X86::BI__builtin_ia32_mulsh_round_mask:
case X86::BI__builtin_ia32_mulss_round_mask:
case X86::BI__builtin_ia32_mulsd_round_mask:
+ case X86::BI__builtin_ia32_subsh_round_mask:
case X86::BI__builtin_ia32_subss_round_mask:
case X86::BI__builtin_ia32_subsd_round_mask:
+ case X86::BI__builtin_ia32_scalefph512_mask:
case X86::BI__builtin_ia32_scalefpd512_mask:
case X86::BI__builtin_ia32_scalefps512_mask:
case X86::BI__builtin_ia32_scalefsd_round_mask:
case X86::BI__builtin_ia32_scalefss_round_mask:
+ case X86::BI__builtin_ia32_scalefsh_round_mask:
case X86::BI__builtin_ia32_cvtsd2ss_round_mask:
+ case X86::BI__builtin_ia32_vcvtss2sh_round_mask:
+ case X86::BI__builtin_ia32_vcvtsd2sh_round_mask:
case X86::BI__builtin_ia32_sqrtsd_round_mask:
case X86::BI__builtin_ia32_sqrtss_round_mask:
+ case X86::BI__builtin_ia32_sqrtsh_round_mask:
case X86::BI__builtin_ia32_vfmaddsd3_mask:
case X86::BI__builtin_ia32_vfmaddsd3_maskz:
case X86::BI__builtin_ia32_vfmaddsd3_mask3:
case X86::BI__builtin_ia32_vfmaddss3_mask:
case X86::BI__builtin_ia32_vfmaddss3_maskz:
case X86::BI__builtin_ia32_vfmaddss3_mask3:
+ case X86::BI__builtin_ia32_vfmaddsh3_mask:
+ case X86::BI__builtin_ia32_vfmaddsh3_maskz:
+ case X86::BI__builtin_ia32_vfmaddsh3_mask3:
case X86::BI__builtin_ia32_vfmaddpd512_mask:
case X86::BI__builtin_ia32_vfmaddpd512_maskz:
case X86::BI__builtin_ia32_vfmaddpd512_mask3:
@@ -4000,6 +6531,10 @@ bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) {
case X86::BI__builtin_ia32_vfmaddps512_maskz:
case X86::BI__builtin_ia32_vfmaddps512_mask3:
case X86::BI__builtin_ia32_vfmsubps512_mask3:
+ case X86::BI__builtin_ia32_vfmaddph512_mask:
+ case X86::BI__builtin_ia32_vfmaddph512_maskz:
+ case X86::BI__builtin_ia32_vfmaddph512_mask3:
+ case X86::BI__builtin_ia32_vfmsubph512_mask3:
case X86::BI__builtin_ia32_vfmaddsubpd512_mask:
case X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
case X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
@@ -4008,6 +6543,26 @@ bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) {
case X86::BI__builtin_ia32_vfmaddsubps512_maskz:
case X86::BI__builtin_ia32_vfmaddsubps512_mask3:
case X86::BI__builtin_ia32_vfmsubaddps512_mask3:
+ case X86::BI__builtin_ia32_vfmaddsubph512_mask:
+ case X86::BI__builtin_ia32_vfmaddsubph512_maskz:
+ case X86::BI__builtin_ia32_vfmaddsubph512_mask3:
+ case X86::BI__builtin_ia32_vfmsubaddph512_mask3:
+ case X86::BI__builtin_ia32_vfmaddcsh_mask:
+ case X86::BI__builtin_ia32_vfmaddcsh_round_mask:
+ case X86::BI__builtin_ia32_vfmaddcsh_round_mask3:
+ case X86::BI__builtin_ia32_vfmaddcph512_mask:
+ case X86::BI__builtin_ia32_vfmaddcph512_maskz:
+ case X86::BI__builtin_ia32_vfmaddcph512_mask3:
+ case X86::BI__builtin_ia32_vfcmaddcsh_mask:
+ case X86::BI__builtin_ia32_vfcmaddcsh_round_mask:
+ case X86::BI__builtin_ia32_vfcmaddcsh_round_mask3:
+ case X86::BI__builtin_ia32_vfcmaddcph512_mask:
+ case X86::BI__builtin_ia32_vfcmaddcph512_maskz:
+ case X86::BI__builtin_ia32_vfcmaddcph512_mask3:
+ case X86::BI__builtin_ia32_vfmulcsh_mask:
+ case X86::BI__builtin_ia32_vfmulcph512_mask:
+ case X86::BI__builtin_ia32_vfcmulcsh_mask:
+ case X86::BI__builtin_ia32_vfcmulcph512_mask:
ArgNum = 4;
HasRC = true;
break;
@@ -4166,7 +6721,7 @@ bool Sema::CheckX86BuiltinTileDuplicate(CallExpr *TheCall,
if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
return true;
int ArgExtValue = Result.getExtValue();
- assert((ArgExtValue >= TileRegLow || ArgExtValue <= TileRegHigh) &&
+ assert((ArgExtValue >= TileRegLow && ArgExtValue <= TileRegHigh) &&
"Incorrect tile register num.");
if (ArgValues.test(ArgExtValue))
return Diag(TheCall->getBeginLoc(),
@@ -4197,6 +6752,9 @@ bool Sema::CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall) {
case X86::BI__builtin_ia32_tdpbusd:
case X86::BI__builtin_ia32_tdpbuud:
case X86::BI__builtin_ia32_tdpbf16ps:
+ case X86::BI__builtin_ia32_tdpfp16ps:
+ case X86::BI__builtin_ia32_tcmmimfp16ps:
+ case X86::BI__builtin_ia32_tcmmrlfp16ps:
return CheckX86BuiltinTileRangeAndDuplicate(TheCall, {0, 1, 2});
}
}
@@ -4359,6 +6917,9 @@ bool Sema::CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
case X86::BI__builtin_ia32_getmantps256_mask:
case X86::BI__builtin_ia32_getmantpd512_mask:
case X86::BI__builtin_ia32_getmantps512_mask:
+ case X86::BI__builtin_ia32_getmantph128_mask:
+ case X86::BI__builtin_ia32_getmantph256_mask:
+ case X86::BI__builtin_ia32_getmantph512_mask:
case X86::BI__builtin_ia32_vec_ext_v16qi:
case X86::BI__builtin_ia32_vec_ext_v16hi:
i = 1; l = 0; u = 15;
@@ -4377,6 +6938,7 @@ bool Sema::CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
case X86::BI__builtin_ia32_rangeps512_mask:
case X86::BI__builtin_ia32_getmantsd_round_mask:
case X86::BI__builtin_ia32_getmantss_round_mask:
+ case X86::BI__builtin_ia32_getmantsh_round_mask:
case X86::BI__builtin_ia32_vec_set_v16qi:
case X86::BI__builtin_ia32_vec_set_v16hi:
i = 2; l = 0; u = 15;
@@ -4429,12 +6991,16 @@ bool Sema::CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
case X86::BI__builtin_ia32_rndscalepd_256_mask:
case X86::BI__builtin_ia32_rndscaleps_mask:
case X86::BI__builtin_ia32_rndscalepd_mask:
+ case X86::BI__builtin_ia32_rndscaleph_mask:
case X86::BI__builtin_ia32_reducepd128_mask:
case X86::BI__builtin_ia32_reducepd256_mask:
case X86::BI__builtin_ia32_reducepd512_mask:
case X86::BI__builtin_ia32_reduceps128_mask:
case X86::BI__builtin_ia32_reduceps256_mask:
case X86::BI__builtin_ia32_reduceps512_mask:
+ case X86::BI__builtin_ia32_reduceph128_mask:
+ case X86::BI__builtin_ia32_reduceph256_mask:
+ case X86::BI__builtin_ia32_reduceph512_mask:
case X86::BI__builtin_ia32_prold512:
case X86::BI__builtin_ia32_prolq512:
case X86::BI__builtin_ia32_prold128:
@@ -4453,8 +7019,12 @@ bool Sema::CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
case X86::BI__builtin_ia32_fpclassps256_mask:
case X86::BI__builtin_ia32_fpclassps512_mask:
case X86::BI__builtin_ia32_fpclasspd512_mask:
+ case X86::BI__builtin_ia32_fpclassph128_mask:
+ case X86::BI__builtin_ia32_fpclassph256_mask:
+ case X86::BI__builtin_ia32_fpclassph512_mask:
case X86::BI__builtin_ia32_fpclasssd_mask:
case X86::BI__builtin_ia32_fpclassss_mask:
+ case X86::BI__builtin_ia32_fpclasssh_mask:
case X86::BI__builtin_ia32_pslldqi128_byteshift:
case X86::BI__builtin_ia32_pslldqi256_byteshift:
case X86::BI__builtin_ia32_pslldqi512_byteshift:
@@ -4549,6 +7119,7 @@ bool Sema::CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
case X86::BI__builtin_ia32_pternlogq128_maskz:
case X86::BI__builtin_ia32_pternlogq256_mask:
case X86::BI__builtin_ia32_pternlogq256_maskz:
+ case X86::BI__builtin_ia32_vsm3rnds2:
i = 3; l = 0; u = 255;
break;
case X86::BI__builtin_ia32_gatherpfdpd:
@@ -4565,8 +7136,14 @@ bool Sema::CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
case X86::BI__builtin_ia32_reducess_mask:
case X86::BI__builtin_ia32_rndscalesd_round_mask:
case X86::BI__builtin_ia32_rndscaless_round_mask:
+ case X86::BI__builtin_ia32_rndscalesh_round_mask:
+ case X86::BI__builtin_ia32_reducesh_mask:
i = 4; l = 0; u = 255;
break;
+ case X86::BI__builtin_ia32_cmpccxadd32:
+ case X86::BI__builtin_ia32_cmpccxadd64:
+ i = 3; l = 0; u = 15;
+ break;
}
// Note that we don't force a hard error on the range check here, allowing
@@ -4581,10 +7158,16 @@ bool Sema::CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
/// Returns true when the format fits the function and the FormatStringInfo has
/// been populated.
bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
- FormatStringInfo *FSI) {
- FSI->HasVAListArg = Format->getFirstArg() == 0;
+ bool IsVariadic, FormatStringInfo *FSI) {
+ if (Format->getFirstArg() == 0)
+ FSI->ArgPassingKind = FAPK_VAList;
+ else if (IsVariadic)
+ FSI->ArgPassingKind = FAPK_Variadic;
+ else
+ FSI->ArgPassingKind = FAPK_Fixed;
FSI->FormatIdx = Format->getFormatIdx() - 1;
- FSI->FirstDataArg = FSI->HasVAListArg ? 0 : Format->getFirstArg() - 1;
+ FSI->FirstDataArg =
+ FSI->ArgPassingKind == FAPK_VAList ? 0 : Format->getFirstArg() - 1;
// The way the format attribute works in GCC, the implicit this argument
// of member functions is counted. However, it doesn't appear in our own
@@ -4604,8 +7187,7 @@ bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
/// Returns true if the value evaluates to null.
static bool CheckNonNullExpr(Sema &S, const Expr *Expr) {
// If the expression has non-null type, it doesn't evaluate to null.
- if (auto nullability
- = Expr->IgnoreImplicit()->getType()->getNullability(S.Context)) {
+ if (auto nullability = Expr->IgnoreImplicit()->getType()->getNullability()) {
if (*nullability == NullabilityKind::NonNull)
return false;
}
@@ -4639,7 +7221,7 @@ static void CheckNonNullArgument(Sema &S,
bool Sema::GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx) {
FormatStringInfo FSI;
if ((GetFormatStringType(Format) == FST_NSString) &&
- getFormatStringInfo(Format, false, &FSI)) {
+ getFormatStringInfo(Format, false, true, &FSI)) {
Idx = FSI.FormatIdx;
return true;
}
@@ -4689,8 +7271,8 @@ DiagnoseCStringFormatDirectiveInCFAPI(Sema &S,
}
/// Determine whether the given type has a non-null nullability annotation.
-static bool isNonNullType(ASTContext &ctx, QualType type) {
- if (auto nullability = type->getNullability(ctx))
+static bool isNonNullType(QualType type) {
+ if (auto nullability = type->getNullability())
return *nullability == NullabilityKind::NonNull;
return false;
@@ -4703,8 +7285,8 @@ static void CheckNonNullArguments(Sema &S,
SourceLocation CallSiteLoc) {
assert((FDecl || Proto) && "Need a function declaration or prototype");
- // Already checked by by constant evaluator.
- if (S.isConstantEvaluated())
+ // Already checked by constant evaluator.
+ if (S.isConstantEvaluatedContext())
return;
// Check the attributes attached to the method/function itself.
llvm::SmallBitVector NonNullArgs;
@@ -4743,8 +7325,7 @@ static void CheckNonNullArguments(Sema &S,
for (ArrayRef<ParmVarDecl*>::iterator I = parms.begin(), E = parms.end();
I != E; ++I, ++ParamIndex) {
const ParmVarDecl *PVD = *I;
- if (PVD->hasAttr<NonNullAttr>() ||
- isNonNullType(S.Context, PVD->getType())) {
+ if (PVD->hasAttr<NonNullAttr>() || isNonNullType(PVD->getType())) {
if (NonNullArgs.empty())
NonNullArgs.resize(Args.size());
@@ -4773,7 +7354,7 @@ static void CheckNonNullArguments(Sema &S,
if (Proto) {
unsigned Index = 0;
for (auto paramType : Proto->getParamTypes()) {
- if (isNonNullType(S.Context, paramType)) {
+ if (isNonNullType(paramType)) {
if (NonNullArgs.empty())
NonNullArgs.resize(Args.size());
@@ -4789,7 +7370,41 @@ static void CheckNonNullArguments(Sema &S,
for (unsigned ArgIndex = 0, ArgIndexEnd = NonNullArgs.size();
ArgIndex != ArgIndexEnd; ++ArgIndex) {
if (NonNullArgs[ArgIndex])
- CheckNonNullArgument(S, Args[ArgIndex], CallSiteLoc);
+ CheckNonNullArgument(S, Args[ArgIndex], Args[ArgIndex]->getExprLoc());
+ }
+}
+
+// 16 byte ByVal alignment not due to a vector member is not honoured by XL
+// on AIX. Emit a warning here that users are generating binary incompatible
+// code to be safe.
+// Here we try to get information about the alignment of the struct member
+// from the struct passed to the caller function. We only warn when the struct
+// is passed byval, hence the series of checks and early returns if we are a not
+// passing a struct byval.
+void Sema::checkAIXMemberAlignment(SourceLocation Loc, const Expr *Arg) {
+ const auto *ICE = dyn_cast<ImplicitCastExpr>(Arg->IgnoreParens());
+ if (!ICE)
+ return;
+
+ const auto *DR = dyn_cast<DeclRefExpr>(ICE->getSubExpr());
+ if (!DR)
+ return;
+
+ const auto *PD = dyn_cast<ParmVarDecl>(DR->getDecl());
+ if (!PD || !PD->getType()->isRecordType())
+ return;
+
+ QualType ArgType = Arg->getType();
+ for (const FieldDecl *FD :
+ ArgType->castAs<RecordType>()->getDecl()->fields()) {
+ if (const auto *AA = FD->getAttr<AlignedAttr>()) {
+ CharUnits Alignment =
+ Context.toCharUnitsFromBits(AA->getAlignment(Context));
+ if (Alignment.getQuantity() == 16) {
+ Diag(FD->getLocation(), diag::warn_not_xl_compatible) << FD;
+ Diag(Loc, diag::note_misaligned_member_used_here) << PD;
+ }
+ }
}
}
@@ -4816,9 +7431,9 @@ void Sema::CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl,
// Find expected alignment, and the actual alignment of the passed object.
// getTypeAlignInChars requires complete types
- if (ArgTy.isNull() || ParamTy->isIncompleteType() ||
- ArgTy->isIncompleteType() || ParamTy->isUndeducedType() ||
- ArgTy->isUndeducedType())
+ if (ArgTy.isNull() || ParamTy->isDependentType() ||
+ ParamTy->isIncompleteType() || ArgTy->isIncompleteType() ||
+ ParamTy->isUndeducedType() || ArgTy->isUndeducedType())
return;
CharUnits ParamAlign = Context.getTypeAlignInChars(ParamTy);
@@ -4829,12 +7444,12 @@ void Sema::CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl,
if (ArgAlign < ParamAlign)
Diag(Loc, diag::warn_param_mismatched_alignment)
<< (int)ArgAlign.getQuantity() << (int)ParamAlign.getQuantity()
- << ParamName << FDecl;
+ << ParamName << (FDecl != nullptr) << FDecl;
}
/// Handles the checks for format strings, non-POD arguments to vararg
-/// functions, NULL arguments passed to non-NULL parameters, and diagnose_if
-/// attributes.
+/// functions, NULL arguments passed to non-NULL parameters, diagnose_if
+/// attributes and AArch64 SME attributes.
void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
const Expr *ThisArg, ArrayRef<const Expr *> Args,
bool IsMemberFunction, SourceLocation Loc,
@@ -4903,12 +7518,71 @@ void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
if (Arg->containsErrors())
continue;
+ if (Context.getTargetInfo().getTriple().isOSAIX() && FDecl && Arg &&
+ FDecl->hasLinkage() &&
+ FDecl->getFormalLinkage() != Linkage::Internal &&
+ CallType == VariadicDoesNotApply)
+ checkAIXMemberAlignment((Arg->getExprLoc()), Arg);
+
QualType ParamTy = Proto->getParamType(ArgIdx);
QualType ArgTy = Arg->getType();
CheckArgAlignment(Arg->getExprLoc(), FDecl, std::to_string(ArgIdx + 1),
ArgTy, ParamTy);
}
}
+
+ // If the callee has an AArch64 SME attribute to indicate that it is an
+ // __arm_streaming function, then the caller requires SME to be available.
+ FunctionProtoType::ExtProtoInfo ExtInfo = Proto->getExtProtoInfo();
+ if (ExtInfo.AArch64SMEAttributes & FunctionType::SME_PStateSMEnabledMask) {
+ if (auto *CallerFD = dyn_cast<FunctionDecl>(CurContext)) {
+ llvm::StringMap<bool> CallerFeatureMap;
+ Context.getFunctionFeatureMap(CallerFeatureMap, CallerFD);
+ if (!CallerFeatureMap.contains("sme"))
+ Diag(Loc, diag::err_sme_call_in_non_sme_target);
+ } else if (!Context.getTargetInfo().hasFeature("sme")) {
+ Diag(Loc, diag::err_sme_call_in_non_sme_target);
+ }
+ }
+
+ FunctionType::ArmStateValue CalleeArmZAState =
+ FunctionType::getArmZAState(ExtInfo.AArch64SMEAttributes);
+ FunctionType::ArmStateValue CalleeArmZT0State =
+ FunctionType::getArmZT0State(ExtInfo.AArch64SMEAttributes);
+ if (CalleeArmZAState != FunctionType::ARM_None ||
+ CalleeArmZT0State != FunctionType::ARM_None) {
+ bool CallerHasZAState = false;
+ bool CallerHasZT0State = false;
+ if (const auto *CallerFD = dyn_cast<FunctionDecl>(CurContext)) {
+ auto *Attr = CallerFD->getAttr<ArmNewAttr>();
+ if (Attr && Attr->isNewZA())
+ CallerHasZAState = true;
+ if (Attr && Attr->isNewZT0())
+ CallerHasZT0State = true;
+ if (const auto *FPT = CallerFD->getType()->getAs<FunctionProtoType>()) {
+ CallerHasZAState |=
+ FunctionType::getArmZAState(
+ FPT->getExtProtoInfo().AArch64SMEAttributes) !=
+ FunctionType::ARM_None;
+ CallerHasZT0State |=
+ FunctionType::getArmZT0State(
+ FPT->getExtProtoInfo().AArch64SMEAttributes) !=
+ FunctionType::ARM_None;
+ }
+ }
+
+ if (CalleeArmZAState != FunctionType::ARM_None && !CallerHasZAState)
+ Diag(Loc, diag::err_sme_za_call_no_za_state);
+
+ if (CalleeArmZT0State != FunctionType::ARM_None && !CallerHasZT0State)
+ Diag(Loc, diag::err_sme_zt0_call_no_zt0_state);
+
+ if (CallerHasZAState && CalleeArmZAState == FunctionType::ARM_None &&
+ CalleeArmZT0State != FunctionType::ARM_None) {
+ Diag(Loc, diag::err_sme_unimplemented_za_save_restore);
+ Diag(Loc, diag::note_sme_use_preserves_za);
+ }
+ }
}
if (FDecl && FDecl->hasAttr<AllocAlignAttr>()) {
@@ -4943,8 +7617,9 @@ void Sema::CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType,
Proto->isVariadic() ? VariadicConstructor : VariadicDoesNotApply;
auto *Ctor = cast<CXXConstructorDecl>(FDecl);
- CheckArgAlignment(Loc, FDecl, "'this'", Context.getPointerType(ThisType),
- Context.getPointerType(Ctor->getThisObjectType()));
+ CheckArgAlignment(
+ Loc, FDecl, "'this'", Context.getPointerType(ThisType),
+ Context.getPointerType(Ctor->getFunctionObjectParameterType()));
checkCall(FDecl, Proto, /*ThisArg=*/nullptr, Args, /*IsMemberFunction=*/true,
Loc, SourceRange(), CallType);
@@ -4964,14 +7639,15 @@ bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
unsigned NumArgs = TheCall->getNumArgs();
Expr *ImplicitThis = nullptr;
- if (IsMemberOperatorCall) {
- // If this is a call to a member operator, hide the first argument
- // from checkCall.
+ if (IsMemberOperatorCall && !FDecl->hasCXXExplicitFunctionObjectParameter()) {
+ // If this is a call to a member operator, hide the first
+ // argument from checkCall.
// FIXME: Our choice of AST representation here is less than ideal.
ImplicitThis = Args[0];
++Args;
--NumArgs;
- } else if (IsMemberFunction)
+ } else if (IsMemberFunction && !FDecl->isStatic() &&
+ !FDecl->hasCXXExplicitFunctionObjectParameter())
ImplicitThis =
cast<CXXMemberCallExpr>(TheCall)->getImplicitObjectArgument();
@@ -4984,14 +7660,14 @@ bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
ThisType = Context.getPointerType(ThisType);
}
- QualType ThisTypeFromDecl =
- Context.getPointerType(cast<CXXMethodDecl>(FDecl)->getThisObjectType());
+ QualType ThisTypeFromDecl = Context.getPointerType(
+ cast<CXXMethodDecl>(FDecl)->getFunctionObjectParameterType());
CheckArgAlignment(TheCall->getRParenLoc(), FDecl, "'this'", ThisType,
ThisTypeFromDecl);
}
- checkCall(FDecl, Proto, ImplicitThis, llvm::makeArrayRef(Args, NumArgs),
+ checkCall(FDecl, Proto, ImplicitThis, llvm::ArrayRef(Args, NumArgs),
IsMemberFunction, TheCall->getRParenLoc(),
TheCall->getCallee()->getSourceRange(), CallType);
@@ -5001,10 +7677,13 @@ bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
if (!FnInfo)
return false;
- CheckTCBEnforcement(TheCall, FDecl);
+ // Enforce TCB except for builtin calls, which are always allowed.
+ if (FDecl->getBuiltinID() == 0)
+ CheckTCBEnforcement(TheCall->getExprLoc(), FDecl);
CheckAbsoluteValueFunction(TheCall, FDecl);
CheckMaxUnsignedZero(TheCall, FDecl);
+ CheckInfNaNFunction(TheCall, FDecl);
if (getLangOpts().ObjC)
DiagnoseCStringFormatDirectiveInCFAPI(*this, FDecl, Args, NumArgs);
@@ -5041,6 +7720,8 @@ bool Sema::CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation lbrac,
/*IsMemberFunction=*/false, lbrac, Method->getSourceRange(),
CallType);
+ CheckTCBEnforcement(lbrac, Method);
+
return false;
}
@@ -5068,7 +7749,7 @@ bool Sema::CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
}
checkCall(NDecl, Proto, /*ThisArg=*/nullptr,
- llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()),
+ llvm::ArrayRef(TheCall->getArgs(), TheCall->getNumArgs()),
/*IsMemberFunction=*/false, TheCall->getRParenLoc(),
TheCall->getCallee()->getSourceRange(), CallType);
@@ -5081,7 +7762,7 @@ bool Sema::CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto) {
VariadicCallType CallType = getVariadicCallType(/*FDecl=*/nullptr, Proto,
TheCall->getCallee());
checkCall(/*FDecl=*/nullptr, Proto, /*ThisArg=*/nullptr,
- llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()),
+ llvm::ArrayRef(TheCall->getArgs(), TheCall->getNumArgs()),
/*IsMemberFunction=*/false, TheCall->getRParenLoc(),
TheCall->getCallee()->getSourceRange(), CallType);
@@ -5100,15 +7781,21 @@ static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) {
case AtomicExpr::AO__c11_atomic_load:
case AtomicExpr::AO__opencl_atomic_load:
+ case AtomicExpr::AO__hip_atomic_load:
case AtomicExpr::AO__atomic_load_n:
case AtomicExpr::AO__atomic_load:
+ case AtomicExpr::AO__scoped_atomic_load_n:
+ case AtomicExpr::AO__scoped_atomic_load:
return OrderingCABI != llvm::AtomicOrderingCABI::release &&
OrderingCABI != llvm::AtomicOrderingCABI::acq_rel;
case AtomicExpr::AO__c11_atomic_store:
case AtomicExpr::AO__opencl_atomic_store:
+ case AtomicExpr::AO__hip_atomic_store:
case AtomicExpr::AO__atomic_store:
case AtomicExpr::AO__atomic_store_n:
+ case AtomicExpr::AO__scoped_atomic_store:
+ case AtomicExpr::AO__scoped_atomic_store_n:
return OrderingCABI != llvm::AtomicOrderingCABI::consume &&
OrderingCABI != llvm::AtomicOrderingCABI::acquire &&
OrderingCABI != llvm::AtomicOrderingCABI::acq_rel;
@@ -5183,14 +7870,30 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
"need to update code for modified C11 atomics");
bool IsOpenCL = Op >= AtomicExpr::AO__opencl_atomic_init &&
Op <= AtomicExpr::AO__opencl_atomic_fetch_max;
+ bool IsHIP = Op >= AtomicExpr::AO__hip_atomic_load &&
+ Op <= AtomicExpr::AO__hip_atomic_fetch_max;
+ bool IsScoped = Op >= AtomicExpr::AO__scoped_atomic_load &&
+ Op <= AtomicExpr::AO__scoped_atomic_fetch_max;
bool IsC11 = (Op >= AtomicExpr::AO__c11_atomic_init &&
Op <= AtomicExpr::AO__c11_atomic_fetch_min) ||
IsOpenCL;
bool IsN = Op == AtomicExpr::AO__atomic_load_n ||
Op == AtomicExpr::AO__atomic_store_n ||
Op == AtomicExpr::AO__atomic_exchange_n ||
- Op == AtomicExpr::AO__atomic_compare_exchange_n;
- bool IsAddSub = false;
+ Op == AtomicExpr::AO__atomic_compare_exchange_n ||
+ Op == AtomicExpr::AO__scoped_atomic_load_n ||
+ Op == AtomicExpr::AO__scoped_atomic_store_n ||
+ Op == AtomicExpr::AO__scoped_atomic_exchange_n ||
+ Op == AtomicExpr::AO__scoped_atomic_compare_exchange_n;
+ // Bit mask for extra allowed value types other than integers for atomic
+ // arithmetic operations. Add/sub allow pointer and floating point. Min/max
+ // allow floating point.
+ enum ArithOpExtraValueType {
+ AOEVT_None = 0,
+ AOEVT_Pointer = 1,
+ AOEVT_FP = 2,
+ };
+ unsigned ArithAllows = AOEVT_None;
switch (Op) {
case AtomicExpr::AO__c11_atomic_init:
@@ -5200,35 +7903,67 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
case AtomicExpr::AO__c11_atomic_load:
case AtomicExpr::AO__opencl_atomic_load:
+ case AtomicExpr::AO__hip_atomic_load:
case AtomicExpr::AO__atomic_load_n:
+ case AtomicExpr::AO__scoped_atomic_load_n:
Form = Load;
break;
case AtomicExpr::AO__atomic_load:
+ case AtomicExpr::AO__scoped_atomic_load:
Form = LoadCopy;
break;
case AtomicExpr::AO__c11_atomic_store:
case AtomicExpr::AO__opencl_atomic_store:
+ case AtomicExpr::AO__hip_atomic_store:
case AtomicExpr::AO__atomic_store:
case AtomicExpr::AO__atomic_store_n:
+ case AtomicExpr::AO__scoped_atomic_store:
+ case AtomicExpr::AO__scoped_atomic_store_n:
Form = Copy;
break;
-
- case AtomicExpr::AO__c11_atomic_fetch_add:
- case AtomicExpr::AO__c11_atomic_fetch_sub:
- case AtomicExpr::AO__opencl_atomic_fetch_add:
- case AtomicExpr::AO__opencl_atomic_fetch_sub:
case AtomicExpr::AO__atomic_fetch_add:
case AtomicExpr::AO__atomic_fetch_sub:
case AtomicExpr::AO__atomic_add_fetch:
case AtomicExpr::AO__atomic_sub_fetch:
- IsAddSub = true;
+ case AtomicExpr::AO__scoped_atomic_fetch_add:
+ case AtomicExpr::AO__scoped_atomic_fetch_sub:
+ case AtomicExpr::AO__scoped_atomic_add_fetch:
+ case AtomicExpr::AO__scoped_atomic_sub_fetch:
+ case AtomicExpr::AO__c11_atomic_fetch_add:
+ case AtomicExpr::AO__c11_atomic_fetch_sub:
+ case AtomicExpr::AO__opencl_atomic_fetch_add:
+ case AtomicExpr::AO__opencl_atomic_fetch_sub:
+ case AtomicExpr::AO__hip_atomic_fetch_add:
+ case AtomicExpr::AO__hip_atomic_fetch_sub:
+ ArithAllows = AOEVT_Pointer | AOEVT_FP;
+ Form = Arithmetic;
+ break;
+ case AtomicExpr::AO__atomic_fetch_max:
+ case AtomicExpr::AO__atomic_fetch_min:
+ case AtomicExpr::AO__atomic_max_fetch:
+ case AtomicExpr::AO__atomic_min_fetch:
+ case AtomicExpr::AO__scoped_atomic_fetch_max:
+ case AtomicExpr::AO__scoped_atomic_fetch_min:
+ case AtomicExpr::AO__scoped_atomic_max_fetch:
+ case AtomicExpr::AO__scoped_atomic_min_fetch:
+ case AtomicExpr::AO__c11_atomic_fetch_max:
+ case AtomicExpr::AO__c11_atomic_fetch_min:
+ case AtomicExpr::AO__opencl_atomic_fetch_max:
+ case AtomicExpr::AO__opencl_atomic_fetch_min:
+ case AtomicExpr::AO__hip_atomic_fetch_max:
+ case AtomicExpr::AO__hip_atomic_fetch_min:
+ ArithAllows = AOEVT_FP;
Form = Arithmetic;
break;
case AtomicExpr::AO__c11_atomic_fetch_and:
case AtomicExpr::AO__c11_atomic_fetch_or:
case AtomicExpr::AO__c11_atomic_fetch_xor:
+ case AtomicExpr::AO__hip_atomic_fetch_and:
+ case AtomicExpr::AO__hip_atomic_fetch_or:
+ case AtomicExpr::AO__hip_atomic_fetch_xor:
+ case AtomicExpr::AO__c11_atomic_fetch_nand:
case AtomicExpr::AO__opencl_atomic_fetch_and:
case AtomicExpr::AO__opencl_atomic_fetch_or:
case AtomicExpr::AO__opencl_atomic_fetch_xor:
@@ -5240,56 +7975,62 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
case AtomicExpr::AO__atomic_or_fetch:
case AtomicExpr::AO__atomic_xor_fetch:
case AtomicExpr::AO__atomic_nand_fetch:
- Form = Arithmetic;
- break;
- case AtomicExpr::AO__c11_atomic_fetch_min:
- case AtomicExpr::AO__c11_atomic_fetch_max:
- case AtomicExpr::AO__opencl_atomic_fetch_min:
- case AtomicExpr::AO__opencl_atomic_fetch_max:
- case AtomicExpr::AO__atomic_min_fetch:
- case AtomicExpr::AO__atomic_max_fetch:
- case AtomicExpr::AO__atomic_fetch_min:
- case AtomicExpr::AO__atomic_fetch_max:
+ case AtomicExpr::AO__scoped_atomic_fetch_and:
+ case AtomicExpr::AO__scoped_atomic_fetch_or:
+ case AtomicExpr::AO__scoped_atomic_fetch_xor:
+ case AtomicExpr::AO__scoped_atomic_fetch_nand:
+ case AtomicExpr::AO__scoped_atomic_and_fetch:
+ case AtomicExpr::AO__scoped_atomic_or_fetch:
+ case AtomicExpr::AO__scoped_atomic_xor_fetch:
+ case AtomicExpr::AO__scoped_atomic_nand_fetch:
Form = Arithmetic;
break;
case AtomicExpr::AO__c11_atomic_exchange:
+ case AtomicExpr::AO__hip_atomic_exchange:
case AtomicExpr::AO__opencl_atomic_exchange:
case AtomicExpr::AO__atomic_exchange_n:
+ case AtomicExpr::AO__scoped_atomic_exchange_n:
Form = Xchg;
break;
case AtomicExpr::AO__atomic_exchange:
+ case AtomicExpr::AO__scoped_atomic_exchange:
Form = GNUXchg;
break;
case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
+ case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
+ case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
Form = C11CmpXchg;
break;
case AtomicExpr::AO__atomic_compare_exchange:
case AtomicExpr::AO__atomic_compare_exchange_n:
+ case AtomicExpr::AO__scoped_atomic_compare_exchange:
+ case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
Form = GNUCmpXchg;
break;
}
unsigned AdjustedNumArgs = NumArgs[Form];
- if (IsOpenCL && Op != AtomicExpr::AO__opencl_atomic_init)
+ if ((IsOpenCL || IsHIP || IsScoped) &&
+ Op != AtomicExpr::AO__opencl_atomic_init)
++AdjustedNumArgs;
// Check we have the right number of arguments.
if (Args.size() < AdjustedNumArgs) {
Diag(CallRange.getEnd(), diag::err_typecheck_call_too_few_args)
<< 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size())
- << ExprRange;
+ << /*is non object*/ 0 << ExprRange;
return ExprError();
} else if (Args.size() > AdjustedNumArgs) {
Diag(Args[AdjustedNumArgs]->getBeginLoc(),
diag::err_typecheck_call_too_many_args)
<< 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size())
- << ExprRange;
+ << /*is non object*/ 0 << ExprRange;
return ExprError();
}
@@ -5334,14 +8075,15 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
// For an arithmetic operation, the implied arithmetic must be well-formed.
if (Form == Arithmetic) {
- // gcc does not enforce these rules for GNU atomics, but we do so for
- // sanity.
- auto IsAllowedValueType = [&](QualType ValType) {
+ // GCC does not enforce these rules for GNU atomics, but we do to help catch
+ // trivial type errors.
+ auto IsAllowedValueType = [&](QualType ValType,
+ unsigned AllowedType) -> bool {
if (ValType->isIntegerType())
return true;
if (ValType->isPointerType())
- return true;
- if (!ValType->isFloatingType())
+ return AllowedType & AOEVT_Pointer;
+ if (!(ValType->isFloatingType() && (AllowedType & AOEVT_FP)))
return false;
// LLVM Parser does not allow atomicrmw with x86_fp80 type.
if (ValType->isSpecificBuiltinType(BuiltinType::LongDouble) &&
@@ -5350,13 +8092,13 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
return false;
return true;
};
- if (IsAddSub && !IsAllowedValueType(ValType)) {
- Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_ptr_or_fp)
- << IsC11 << Ptr->getType() << Ptr->getSourceRange();
- return ExprError();
- }
- if (!IsAddSub && !ValType->isIntegerType()) {
- Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int)
+ if (!IsAllowedValueType(ValType, ArithAllows)) {
+ auto DID = ArithAllows & AOEVT_FP
+ ? (ArithAllows & AOEVT_Pointer
+ ? diag::err_atomic_op_needs_atomic_int_ptr_or_fp
+ : diag::err_atomic_op_needs_atomic_int_or_fp)
+ : diag::err_atomic_op_needs_atomic_int;
+ Diag(ExprRange.getBegin(), DID)
<< IsC11 << Ptr->getType() << Ptr->getSourceRange();
return ExprError();
}
@@ -5376,7 +8118,9 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
if (!IsC11 && !AtomTy.isTriviallyCopyableType(Context) &&
!AtomTy->isScalarType()) {
// For GNU atomics, require a trivially-copyable type. This is not part of
- // the GNU atomics specification, but we enforce it for sanity.
+ // the GNU atomics specification but we enforce it for consistency with
+ // other atomics which generally all require a trivially-copyable type. This
+ // is because atomics just copy bits.
Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_trivial_copy)
<< Ptr->getType() << Ptr->getSourceRange();
return ExprError();
@@ -5415,7 +8159,7 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
// arguments are actually passed as pointers.
QualType ByValType = ValType; // 'CP'
bool IsPassedByAddress = false;
- if (!IsC11 && !IsN) {
+ if (!IsC11 && !IsHIP && !IsN) {
ByValType = Ptr->getType();
IsPassedByAddress = true;
}
@@ -5568,18 +8312,36 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
break;
}
+ // If the memory orders are constants, check they are valid.
if (SubExprs.size() >= 2 && Form != Init) {
- if (Optional<llvm::APSInt> Result =
- SubExprs[1]->getIntegerConstantExpr(Context))
- if (!isValidOrderingForOp(Result->getSExtValue(), Op))
- Diag(SubExprs[1]->getBeginLoc(),
- diag::warn_atomic_op_has_invalid_memory_order)
- << SubExprs[1]->getSourceRange();
+ std::optional<llvm::APSInt> Success =
+ SubExprs[1]->getIntegerConstantExpr(Context);
+ if (Success && !isValidOrderingForOp(Success->getSExtValue(), Op)) {
+ Diag(SubExprs[1]->getBeginLoc(),
+ diag::warn_atomic_op_has_invalid_memory_order)
+ << /*success=*/(Form == C11CmpXchg || Form == GNUCmpXchg)
+ << SubExprs[1]->getSourceRange();
+ }
+ if (SubExprs.size() >= 5) {
+ if (std::optional<llvm::APSInt> Failure =
+ SubExprs[3]->getIntegerConstantExpr(Context)) {
+ if (!llvm::is_contained(
+ {llvm::AtomicOrderingCABI::relaxed,
+ llvm::AtomicOrderingCABI::consume,
+ llvm::AtomicOrderingCABI::acquire,
+ llvm::AtomicOrderingCABI::seq_cst},
+ (llvm::AtomicOrderingCABI)Failure->getSExtValue())) {
+ Diag(SubExprs[3]->getBeginLoc(),
+ diag::warn_atomic_op_has_invalid_memory_order)
+ << /*failure=*/2 << SubExprs[3]->getSourceRange();
+ }
+ }
+ }
}
if (auto ScopeModel = AtomicExpr::getScopeModel(Op)) {
auto *Scope = Args[Args.size() - 1];
- if (Optional<llvm::APSInt> Result =
+ if (std::optional<llvm::APSInt> Result =
Scope->getIntegerConstantExpr(Context)) {
if (!ScopeModel->isValid(Result->getZExtValue()))
Diag(Scope->getBeginLoc(), diag::err_atomic_op_has_invalid_synch_scope)
@@ -5594,16 +8356,19 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
if ((Op == AtomicExpr::AO__c11_atomic_load ||
Op == AtomicExpr::AO__c11_atomic_store ||
Op == AtomicExpr::AO__opencl_atomic_load ||
- Op == AtomicExpr::AO__opencl_atomic_store ) &&
+ Op == AtomicExpr::AO__hip_atomic_load ||
+ Op == AtomicExpr::AO__opencl_atomic_store ||
+ Op == AtomicExpr::AO__hip_atomic_store) &&
Context.AtomicUsesUnsupportedLibcall(AE))
Diag(AE->getBeginLoc(), diag::err_atomic_load_store_uses_lib)
<< ((Op == AtomicExpr::AO__c11_atomic_load ||
- Op == AtomicExpr::AO__opencl_atomic_load)
+ Op == AtomicExpr::AO__opencl_atomic_load ||
+ Op == AtomicExpr::AO__hip_atomic_load)
? 0
: 1);
- if (ValType->isExtIntType()) {
- Diag(Ptr->getExprLoc(), diag::err_atomic_builtin_ext_int_prohibit);
+ if (ValType->isBitIntType()) {
+ Diag(Ptr->getExprLoc(), diag::err_atomic_builtin_bit_int_prohibit);
return ExprError();
}
@@ -5625,7 +8390,7 @@ static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) {
InitializedEntity Entity =
InitializedEntity::InitializeParameter(S.Context, Param);
- ExprResult Arg = E->getArg(0);
+ ExprResult Arg = E->getArg(ArgIndex);
Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg);
if (Arg.isInvalid())
return true;
@@ -5634,6 +8399,35 @@ static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) {
return false;
}
+bool Sema::BuiltinWasmRefNullExtern(CallExpr *TheCall) {
+ if (TheCall->getNumArgs() != 0)
+ return true;
+
+ TheCall->setType(Context.getWebAssemblyExternrefType());
+
+ return false;
+}
+
+bool Sema::BuiltinWasmRefNullFunc(CallExpr *TheCall) {
+ if (TheCall->getNumArgs() != 0) {
+ Diag(TheCall->getBeginLoc(), diag::err_typecheck_call_too_many_args)
+ << 0 /*function call*/ << /*expected*/ 0 << TheCall->getNumArgs()
+ << /*is non object*/ 0;
+ return true;
+ }
+
+ // This custom type checking code ensures that the nodes are as expected
+ // in order to later on generate the necessary builtin.
+ QualType Pointee = Context.getFunctionType(Context.VoidTy, {}, {});
+ QualType Type = Context.getPointerType(Pointee);
+ Pointee = Context.getAddrSpaceQualType(Pointee, LangAS::wasm_funcref);
+ Type = Context.getAttributedType(attr::WebAssemblyFuncref, Type,
+ Context.getPointerType(Pointee));
+ TheCall->setType(Type);
+
+ return false;
+}
+
/// We have a call to a function like __sync_fetch_and_add, which is an
/// overloaded function based on the pointer type of its first argument.
/// The main BuildCallExpr routines have already promoted the types of
@@ -5651,7 +8445,8 @@ Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
// Ensure that we have at least one argument to do type inference from.
if (TheCall->getNumArgs() < 1) {
Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least)
- << 0 << 1 << TheCall->getNumArgs() << Callee->getSourceRange();
+ << 0 << 1 << TheCall->getNumArgs() << /*is non object*/ 0
+ << Callee->getSourceRange();
return ExprError();
}
@@ -5927,7 +8722,7 @@ Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
// have at least that many.
if (TheCall->getNumArgs() < 1+NumFixed) {
Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least)
- << 0 << 1 + NumFixed << TheCall->getNumArgs()
+ << 0 << 1 + NumFixed << TheCall->getNumArgs() << /*is non object*/ 0
<< Callee->getSourceRange();
return ExprError();
}
@@ -5943,7 +8738,7 @@ Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
// Get the decl for the concrete builtin from this, we can tell what the
// concrete integer type we should convert to is.
unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex];
- const char *NewBuiltinName = Context.BuiltinInfo.getName(NewBuiltinID);
+ StringRef NewBuiltinName = Context.BuiltinInfo.getName(NewBuiltinID);
FunctionDecl *NewBuiltinDecl;
if (NewBuiltinID == BuiltinID)
NewBuiltinDecl = FDecl;
@@ -6000,11 +8795,11 @@ Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
// gracefully.
TheCall->setType(ResultType);
- // Prohibit use of _ExtInt with atomic builtins.
- // The arguments would have already been converted to the first argument's
- // type, so only need to check the first argument.
- const auto *ExtIntValType = ValType->getAs<ExtIntType>();
- if (ExtIntValType && !llvm::isPowerOf2_64(ExtIntValType->getNumBits())) {
+ // Prohibit problematic uses of bit-precise integer types with atomic
+ // builtins. The arguments would have already been converted to the first
+ // argument's type, so only need to check the first argument.
+ const auto *BitIntValType = ValType->getAs<BitIntType>();
+ if (BitIntValType && !llvm::isPowerOf2_64(BitIntValType->getNumBits())) {
Diag(FirstArg->getExprLoc(), diag::err_atomic_builtin_ext_int_size);
return ExprError();
}
@@ -6092,7 +8887,7 @@ bool Sema::CheckObjCString(Expr *Arg) {
Arg = Arg->IgnoreParenCasts();
StringLiteral *Literal = dyn_cast<StringLiteral>(Arg);
- if (!Literal || !Literal->isAscii()) {
+ if (!Literal || !Literal->isOrdinary()) {
Diag(Arg->getBeginLoc(), diag::err_cfstring_literal_not_string_constant)
<< Arg->getSourceRange();
return true;
@@ -6127,7 +8922,7 @@ ExprResult Sema::CheckOSLogFormatStringArg(Expr *Arg) {
}
}
- if (!Literal || (!Literal->isAscii() && !Literal->isUTF8())) {
+ if (!Literal || (!Literal->isOrdinary() && !Literal->isUTF8())) {
return ExprError(
Diag(Arg->getBeginLoc(), diag::err_os_log_format_not_string_constant)
<< Arg->getSourceRange());
@@ -6225,6 +9020,9 @@ bool Sema::SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) {
if (checkVAStartABI(*this, BuiltinID, Fn))
return true;
+ // In C23 mode, va_start only needs one argument. However, the builtin still
+ // requires two arguments (which matches the behavior of the GCC builtin),
+ // <stdarg.h> passes `0` as the second argument in C23 mode.
if (checkArgCount(*this, TheCall, 2))
return true;
@@ -6238,9 +9036,15 @@ bool Sema::SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) {
return true;
// Verify that the second argument to the builtin is the last argument of the
- // current function or method.
+ // current function or method. In C23 mode, if the second argument is an
+ // integer constant expression with value 0, then we don't bother with this
+ // check.
bool SecondArgIsLastNamedArgument = false;
const Expr *Arg = TheCall->getArg(1)->IgnoreParenCasts();
+ if (std::optional<llvm::APSInt> Val =
+ TheCall->getArg(1)->getIntegerConstantExpr(Context);
+ Val && LangOpts.C23 && *Val == 0)
+ return false;
// These are valid if SecondArgIsLastNamedArgument is false after the next
// block.
@@ -6266,7 +9070,7 @@ bool Sema::SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) {
Type->isSpecificBuiltinType(BuiltinType::Float) || [=] {
// Promotable integers are UB, but enumerations need a bit of
// extra checking to see what their promotable type actually is.
- if (!Type->isPromotableIntegerType())
+ if (!Context.isPromotableIntegerType(Type))
return false;
if (!Type->isEnumeralType())
return true;
@@ -6281,11 +9085,25 @@ bool Sema::SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) {
Diag(ParamLoc, diag::note_parameter_type) << Type;
}
- TheCall->setType(Context.VoidTy);
return false;
}
bool Sema::SemaBuiltinVAStartARMMicrosoft(CallExpr *Call) {
+ auto IsSuitablyTypedFormatArgument = [this](const Expr *Arg) -> bool {
+ const LangOptions &LO = getLangOpts();
+
+ if (LO.CPlusPlus)
+ return Arg->getType()
+ .getCanonicalType()
+ .getTypePtr()
+ ->getPointeeType()
+ .withoutLocalFastQualifiers() == Context.CharTy;
+
+ // In C, allow aliasing through `char *`, this is required for AArch64 at
+ // least.
+ return true;
+ };
+
// void __va_start(va_list *ap, const char *named_addr, size_t slot_size,
// const char *named_addr);
@@ -6294,7 +9112,8 @@ bool Sema::SemaBuiltinVAStartARMMicrosoft(CallExpr *Call) {
if (Call->getNumArgs() < 3)
return Diag(Call->getEndLoc(),
diag::err_typecheck_call_too_few_args_at_least)
- << 0 /*function call*/ << 3 << Call->getNumArgs();
+ << 0 /*function call*/ << 3 << Call->getNumArgs()
+ << /*is non object*/ 0;
// Type-check the first argument normally.
if (checkBuiltinArgument(*this, Call, 0))
@@ -6314,8 +9133,7 @@ bool Sema::SemaBuiltinVAStartARMMicrosoft(CallExpr *Call) {
const QualType &ConstCharPtrTy =
Context.getPointerType(Context.CharTy.withConst());
- if (!Arg1Ty->isPointerType() ||
- Arg1Ty->getPointeeType().withoutLocalFastQualifiers() != Context.CharTy)
+ if (!Arg1Ty->isPointerType() || !IsSuitablyTypedFormatArgument(Arg1))
Diag(Arg1->getBeginLoc(), diag::err_typecheck_convert_incompatible)
<< Arg1->getType() << ConstCharPtrTy << 1 /* different class */
<< 0 /* qualifier difference */
@@ -6335,10 +9153,15 @@ bool Sema::SemaBuiltinVAStartARMMicrosoft(CallExpr *Call) {
/// SemaBuiltinUnorderedCompare - Handle functions like __builtin_isgreater and
/// friends. This is declared to take (...), so we have to check everything.
-bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall) {
+bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall, unsigned BuiltinID) {
if (checkArgCount(*this, TheCall, 2))
return true;
+ if (BuiltinID == Builtin::BI__builtin_isunordered &&
+ TheCall->getFPFeaturesInEffect(getLangOpts()).getNoHonorNaNs())
+ Diag(TheCall->getBeginLoc(), diag::warn_fp_nan_inf_when_disabled)
+ << 1 << 0 << TheCall->getSourceRange();
+
ExprResult OrigArg0 = TheCall->getArg(0);
ExprResult OrigArg1 = TheCall->getArg(1);
@@ -6372,15 +9195,32 @@ bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall) {
/// SemaBuiltinSemaBuiltinFPClassification - Handle functions like
/// __builtin_isnan and friends. This is declared to take (...), so we have
-/// to check everything. We expect the last argument to be a floating point
-/// value.
-bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) {
+/// to check everything.
+bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs,
+ unsigned BuiltinID) {
if (checkArgCount(*this, TheCall, NumArgs))
return true;
- // __builtin_fpclassify is the only case where NumArgs != 1, so we can count
- // on all preceding parameters just being int. Try all of those.
- for (unsigned i = 0; i < NumArgs - 1; ++i) {
+ FPOptions FPO = TheCall->getFPFeaturesInEffect(getLangOpts());
+ if (FPO.getNoHonorInfs() && (BuiltinID == Builtin::BI__builtin_isfinite ||
+ BuiltinID == Builtin::BI__builtin_isinf ||
+ BuiltinID == Builtin::BI__builtin_isinf_sign))
+ Diag(TheCall->getBeginLoc(), diag::warn_fp_nan_inf_when_disabled)
+ << 0 << 0 << TheCall->getSourceRange();
+
+ if (FPO.getNoHonorNaNs() && (BuiltinID == Builtin::BI__builtin_isnan ||
+ BuiltinID == Builtin::BI__builtin_isunordered))
+ Diag(TheCall->getBeginLoc(), diag::warn_fp_nan_inf_when_disabled)
+ << 1 << 0 << TheCall->getSourceRange();
+
+ bool IsFPClass = NumArgs == 2;
+
+ // Find out position of floating-point argument.
+ unsigned FPArgNo = IsFPClass ? 0 : NumArgs - 1;
+
+ // We can count on all parameters preceding the floating-point just being int.
+ // Try all of those.
+ for (unsigned i = 0; i < FPArgNo; ++i) {
Expr *Arg = TheCall->getArg(i);
if (Arg->isTypeDependent())
@@ -6393,7 +9233,7 @@ bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) {
TheCall->setArg(i, Res.get());
}
- Expr *OrigArg = TheCall->getArg(NumArgs-1);
+ Expr *OrigArg = TheCall->getArg(FPArgNo);
if (OrigArg->isTypeDependent())
return false;
@@ -6405,14 +9245,39 @@ bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) {
OrigArg = UsualUnaryConversions(OrigArg).get();
else
OrigArg = DefaultFunctionArrayLvalueConversion(OrigArg).get();
- TheCall->setArg(NumArgs - 1, OrigArg);
+ TheCall->setArg(FPArgNo, OrigArg);
+
+ QualType VectorResultTy;
+ QualType ElementTy = OrigArg->getType();
+ // TODO: When all classification function are implemented with is_fpclass,
+ // vector argument can be supported in all of them.
+ if (ElementTy->isVectorType() && IsFPClass) {
+ VectorResultTy = GetSignedVectorType(ElementTy);
+ ElementTy = ElementTy->getAs<VectorType>()->getElementType();
+ }
// This operation requires a non-_Complex floating-point number.
- if (!OrigArg->getType()->isRealFloatingType())
+ if (!ElementTy->isRealFloatingType())
return Diag(OrigArg->getBeginLoc(),
diag::err_typecheck_call_invalid_unary_fp)
<< OrigArg->getType() << OrigArg->getSourceRange();
+ // __builtin_isfpclass has integer parameter that specify test mask. It is
+ // passed in (...), so it should be analyzed completely here.
+ if (IsFPClass)
+ if (SemaBuiltinConstantArgRange(TheCall, 1, 0, llvm::fcAllFlags))
+ return true;
+
+ // TODO: enable this code to all classification functions.
+ if (IsFPClass) {
+ QualType ResultTy;
+ if (!VectorResultTy.isNull())
+ ResultTy = VectorResultTy;
+ else
+ ResultTy = Context.IntTy;
+ TheCall->setType(ResultTy);
+ }
+
return false;
}
@@ -6527,7 +9392,7 @@ ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) {
return ExprError(Diag(TheCall->getEndLoc(),
diag::err_typecheck_call_too_few_args_at_least)
<< 0 /*function call*/ << 2 << TheCall->getNumArgs()
- << TheCall->getSourceRange());
+ << /*is non object*/ 0 << TheCall->getSourceRange());
// Determine which of the following types of shufflevector we're checking:
// 1) unary, vector mask: (lhs, mask)
@@ -6569,8 +9434,8 @@ ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) {
TheCall->getArg(1)->getEndLoc()));
} else if (numElements != numResElements) {
QualType eltType = LHSType->castAs<VectorType>()->getElementType();
- resType = Context.getVectorType(eltType, numResElements,
- VectorType::GenericVector);
+ resType =
+ Context.getVectorType(eltType, numResElements, VectorKind::Generic);
}
}
@@ -6579,14 +9444,14 @@ ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) {
TheCall->getArg(i)->isValueDependent())
continue;
- Optional<llvm::APSInt> Result;
+ std::optional<llvm::APSInt> Result;
if (!(Result = TheCall->getArg(i)->getIntegerConstantExpr(Context)))
return ExprError(Diag(TheCall->getBeginLoc(),
diag::err_shufflevector_nonconstant_argument)
<< TheCall->getArg(i)->getSourceRange());
// Allow -1 which will be translated to undef in the IR.
- if (Result->isSigned() && Result->isAllOnesValue())
+ if (Result->isSigned() && Result->isAllOnes())
continue;
if (Result->getActiveBits() > 64 ||
@@ -6622,8 +9487,9 @@ ExprResult Sema::SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
diag::err_convertvector_non_vector)
<< E->getSourceRange());
if (!DstTy->isVectorType() && !DstTy->isDependentType())
- return ExprError(Diag(BuiltinLoc,
- diag::err_convertvector_non_vector_type));
+ return ExprError(Diag(BuiltinLoc, diag::err_builtin_non_vector_type)
+ << "second"
+ << "__builtin_convertvector");
if (!SrcTy->isDependentType() && !DstTy->isDependentType()) {
unsigned SrcElts = SrcTy->castAs<VectorType>()->getNumElements();
@@ -6647,7 +9513,8 @@ bool Sema::SemaBuiltinPrefetch(CallExpr *TheCall) {
if (NumArgs > 3)
return Diag(TheCall->getEndLoc(),
diag::err_typecheck_call_too_many_args_at_most)
- << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange();
+ << 0 /*function call*/ << 3 << NumArgs << /*is non object*/ 0
+ << TheCall->getSourceRange();
// Argument 0 is checked for us and the remaining arguments must be
// constant integers.
@@ -6733,38 +9600,44 @@ bool Sema::SemaBuiltinAllocaWithAlign(CallExpr *TheCall) {
/// Handle __builtin_assume_aligned. This is declared
/// as (const void*, size_t, ...) and can take one optional constant int arg.
bool Sema::SemaBuiltinAssumeAligned(CallExpr *TheCall) {
+ if (checkArgCountRange(*this, TheCall, 2, 3))
+ return true;
+
unsigned NumArgs = TheCall->getNumArgs();
+ Expr *FirstArg = TheCall->getArg(0);
- if (NumArgs > 3)
- return Diag(TheCall->getEndLoc(),
- diag::err_typecheck_call_too_many_args_at_most)
- << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange();
+ {
+ ExprResult FirstArgResult =
+ DefaultFunctionArrayLvalueConversion(FirstArg);
+ if (checkBuiltinArgument(*this, TheCall, 0))
+ return true;
+ /// In-place updation of FirstArg by checkBuiltinArgument is ignored.
+ TheCall->setArg(0, FirstArgResult.get());
+ }
// The alignment must be a constant integer.
- Expr *Arg = TheCall->getArg(1);
+ Expr *SecondArg = TheCall->getArg(1);
// We can't check the value of a dependent argument.
- if (!Arg->isTypeDependent() && !Arg->isValueDependent()) {
+ if (!SecondArg->isValueDependent()) {
llvm::APSInt Result;
if (SemaBuiltinConstantArg(TheCall, 1, Result))
return true;
if (!Result.isPowerOf2())
return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two)
- << Arg->getSourceRange();
+ << SecondArg->getSourceRange();
if (Result > Sema::MaximumAlignment)
Diag(TheCall->getBeginLoc(), diag::warn_assume_aligned_too_great)
- << Arg->getSourceRange() << Sema::MaximumAlignment;
+ << SecondArg->getSourceRange() << Sema::MaximumAlignment;
}
if (NumArgs > 2) {
- ExprResult Arg(TheCall->getArg(2));
- InitializedEntity Entity = InitializedEntity::InitializeParameter(Context,
- Context.getSizeType(), false);
- Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg);
- if (Arg.isInvalid()) return true;
- TheCall->setArg(2, Arg.get());
+ Expr *ThirdArg = TheCall->getArg(2);
+ if (convertArgumentToType(*this, ThirdArg, Context.getSizeType()))
+ return true;
+ TheCall->setArg(2, ThirdArg);
}
return false;
@@ -6780,13 +9653,13 @@ bool Sema::SemaBuiltinOSLogFormat(CallExpr *TheCall) {
if (NumArgs < NumRequiredArgs) {
return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args)
<< 0 /* function call */ << NumRequiredArgs << NumArgs
- << TheCall->getSourceRange();
+ << /*is non object*/ 0 << TheCall->getSourceRange();
}
if (NumArgs >= NumRequiredArgs + 0x100) {
return Diag(TheCall->getEndLoc(),
diag::err_typecheck_call_too_many_args_at_most)
<< 0 /* function call */ << (NumRequiredArgs + 0xff) << NumArgs
- << TheCall->getSourceRange();
+ << /*is non object*/ 0 << TheCall->getSourceRange();
}
unsigned i = 0;
@@ -6835,7 +9708,7 @@ bool Sema::SemaBuiltinOSLogFormat(CallExpr *TheCall) {
llvm::SmallBitVector CheckedVarArgs(NumArgs, false);
ArrayRef<const Expr *> Args(TheCall->getArgs(), TheCall->getNumArgs());
bool Success = CheckFormatArguments(
- Args, /*HasVAListArg*/ false, FormatIdx, FirstDataArg, FST_OSLog,
+ Args, FAPK_Variadic, FormatIdx, FirstDataArg, FST_OSLog,
VariadicFunction, TheCall->getBeginLoc(), SourceRange(),
CheckedVarArgs);
if (!Success)
@@ -6860,7 +9733,7 @@ bool Sema::SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
if (Arg->isTypeDependent() || Arg->isValueDependent()) return false;
- Optional<llvm::APSInt> R;
+ std::optional<llvm::APSInt> R;
if (!(R = Arg->getIntegerConstantExpr(Context)))
return Diag(TheCall->getBeginLoc(), diag::err_constant_integer_arg_type)
<< FDecl->getDeclName() << Arg->getSourceRange();
@@ -6872,7 +9745,7 @@ bool Sema::SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
/// TheCall is a constant expression in the range [Low, High].
bool Sema::SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum,
int Low, int High, bool RangeIsError) {
- if (isConstantEvaluated())
+ if (isConstantEvaluatedContext())
return false;
llvm::APSInt Result;
@@ -7195,6 +10068,8 @@ bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
BuiltinID == ARM::BI__builtin_arm_wsrp;
bool IsAArch64Builtin = BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
+ BuiltinID == AArch64::BI__builtin_arm_rsr128 ||
+ BuiltinID == AArch64::BI__builtin_arm_wsr128 ||
BuiltinID == AArch64::BI__builtin_arm_rsr ||
BuiltinID == AArch64::BI__builtin_arm_rsrp ||
BuiltinID == AArch64::BI__builtin_arm_wsr ||
@@ -7229,18 +10104,18 @@ bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
bool ValidString = true;
if (IsARMBuiltin) {
- ValidString &= Fields[0].startswith_insensitive("cp") ||
- Fields[0].startswith_insensitive("p");
+ ValidString &= Fields[0].starts_with_insensitive("cp") ||
+ Fields[0].starts_with_insensitive("p");
if (ValidString)
Fields[0] = Fields[0].drop_front(
- Fields[0].startswith_insensitive("cp") ? 2 : 1);
+ Fields[0].starts_with_insensitive("cp") ? 2 : 1);
- ValidString &= Fields[2].startswith_insensitive("c");
+ ValidString &= Fields[2].starts_with_insensitive("c");
if (ValidString)
Fields[2] = Fields[2].drop_front(1);
if (FiveFields) {
- ValidString &= Fields[3].startswith_insensitive("c");
+ ValidString &= Fields[3].starts_with_insensitive("c");
if (ValidString)
Fields[3] = Fields[3].drop_front(1);
}
@@ -7262,21 +10137,51 @@ bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg)
<< Arg->getSourceRange();
} else if (IsAArch64Builtin && Fields.size() == 1) {
- // If the register name is one of those that appear in the condition below
- // and the special register builtin being used is one of the write builtins,
- // then we require that the argument provided for writing to the register
- // is an integer constant expression. This is because it will be lowered to
- // an MSR (immediate) instruction, so we need to know the immediate at
- // compile time.
+ // This code validates writes to PSTATE registers.
+
+ // Not a write.
if (TheCall->getNumArgs() != 2)
return false;
- std::string RegLower = Reg.lower();
- if (RegLower != "spsel" && RegLower != "daifset" && RegLower != "daifclr" &&
- RegLower != "pan" && RegLower != "uao")
+ // The 128-bit system register accesses do not touch PSTATE.
+ if (BuiltinID == AArch64::BI__builtin_arm_rsr128 ||
+ BuiltinID == AArch64::BI__builtin_arm_wsr128)
return false;
- return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15);
+ // These are the named PSTATE accesses using "MSR (immediate)" instructions,
+ // along with the upper limit on the immediates allowed.
+ auto MaxLimit = llvm::StringSwitch<std::optional<unsigned>>(Reg)
+ .CaseLower("spsel", 15)
+ .CaseLower("daifclr", 15)
+ .CaseLower("daifset", 15)
+ .CaseLower("pan", 15)
+ .CaseLower("uao", 15)
+ .CaseLower("dit", 15)
+ .CaseLower("ssbs", 15)
+ .CaseLower("tco", 15)
+ .CaseLower("allint", 1)
+ .CaseLower("pm", 1)
+ .Default(std::nullopt);
+
+ // If this is not a named PSTATE, just continue without validating, as this
+ // will be lowered to an "MSR (register)" instruction directly
+ if (!MaxLimit)
+ return false;
+
+ // Here we only allow constants in the range for that pstate, as required by
+ // the ACLE.
+ //
+ // While clang also accepts the names of system registers in its ACLE
+ // intrinsics, we prevent this with the PSTATE names used in MSR (immediate)
+ // as the value written via a register is different to the value used as an
+ // immediate to have the same effect. e.g., for the instruction `msr tco,
+ // x0`, it is bit 25 of register x0 that is written into PSTATE.TCO, but
+ // with `msr tco, #imm`, it is bit 0 of xN that is written into PSTATE.TCO.
+ //
+ // If a programmer wants to codegen the MSR (register) form of `msr tco,
+ // xN`, they can still do so by specifying the register using five
+ // colon-separated numbers in a string.
+ return SemaBuiltinConstantArgRange(TheCall, 1, 0, *MaxLimit);
}
return false;
@@ -7286,7 +10191,8 @@ bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
/// Emit an error and return true on failure; return false on success.
/// TypeStr is a string containing the type descriptor of the value returned by
/// the builtin and the descriptors of the expected type of the arguments.
-bool Sema::SemaBuiltinPPCMMACall(CallExpr *TheCall, const char *TypeStr) {
+bool Sema::SemaBuiltinPPCMMACall(CallExpr *TheCall, unsigned BuiltinID,
+ const char *TypeStr) {
assert((TypeStr[0] != '\0') &&
"Invalid types in PPC MMA builtin declaration");
@@ -7308,13 +10214,23 @@ bool Sema::SemaBuiltinPPCMMACall(CallExpr *TheCall, const char *TypeStr) {
}
Expr *Arg = TheCall->getArg(ArgNum);
- QualType ArgType = Arg->getType();
-
- if ((ExpectedType->isVoidPointerType() && !ArgType->isPointerType()) ||
- (!ExpectedType->isVoidPointerType() &&
- ArgType.getCanonicalType() != ExpectedType))
- return Diag(Arg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
- << ArgType << ExpectedType << 1 << 0 << 0;
+ QualType PassedType = Arg->getType();
+ QualType StrippedRVType = PassedType.getCanonicalType();
+
+ // Strip Restrict/Volatile qualifiers.
+ if (StrippedRVType.isRestrictQualified() ||
+ StrippedRVType.isVolatileQualified())
+ StrippedRVType = StrippedRVType.getCanonicalType().getUnqualifiedType();
+
+ // The only case where the argument type and expected type are allowed to
+ // mismatch is if the argument type is a non-void pointer (or array) and
+ // expected type is a void pointer.
+ if (StrippedRVType != ExpectedType)
+ if (!(ExpectedType->isVoidPointerType() &&
+ (StrippedRVType->isPointerType() || StrippedRVType->isArrayType())))
+ return Diag(Arg->getBeginLoc(),
+ diag::err_typecheck_convert_incompatible)
+ << PassedType << ExpectedType << 1 << 0 << 0;
// If the value of the Mask is not 0, we have a constraint in the size of
// the integer argument so here we ensure the argument is a constant that
@@ -7492,11 +10408,11 @@ class FormatStringLiteral {
unsigned getLength() const { return FExpr->getLength() - Offset; }
unsigned getCharByteWidth() const { return FExpr->getCharByteWidth(); }
- StringLiteral::StringKind getKind() const { return FExpr->getKind(); }
+ StringLiteralKind getKind() const { return FExpr->getKind(); }
QualType getType() const { return FExpr->getType(); }
- bool isAscii() const { return FExpr->isAscii(); }
+ bool isAscii() const { return FExpr->isOrdinary(); }
bool isWide() const { return FExpr->isWide(); }
bool isUTF8() const { return FExpr->isUTF8(); }
bool isUTF16() const { return FExpr->isUTF16(); }
@@ -7518,19 +10434,18 @@ class FormatStringLiteral {
SourceLocation getEndLoc() const LLVM_READONLY { return FExpr->getEndLoc(); }
};
-} // namespace
+} // namespace
-static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr,
- const Expr *OrigFormatExpr,
- ArrayRef<const Expr *> Args,
- bool HasVAListArg, unsigned format_idx,
- unsigned firstDataArg,
- Sema::FormatStringType Type,
- bool inFunctionCall,
- Sema::VariadicCallType CallType,
- llvm::SmallBitVector &CheckedVarArgs,
- UncoveredArgHandler &UncoveredArg,
- bool IgnoreStringsWithoutSpecifiers);
+static void CheckFormatString(
+ Sema &S, const FormatStringLiteral *FExpr, const Expr *OrigFormatExpr,
+ ArrayRef<const Expr *> Args, Sema::FormatArgumentPassingKind APK,
+ unsigned format_idx, unsigned firstDataArg, Sema::FormatStringType Type,
+ bool inFunctionCall, Sema::VariadicCallType CallType,
+ llvm::SmallBitVector &CheckedVarArgs, UncoveredArgHandler &UncoveredArg,
+ bool IgnoreStringsWithoutSpecifiers);
+
+static const Expr *maybeConstEvalStringLiteral(ASTContext &Context,
+ const Expr *E);
// Determine if an expression is a string literal or constant string.
// If this function returns false on the arguments to a function expecting a
@@ -7538,16 +10453,15 @@ static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr,
// True string literals are then checked by CheckFormatString.
static StringLiteralCheckType
checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args,
- bool HasVAListArg, unsigned format_idx,
+ Sema::FormatArgumentPassingKind APK, unsigned format_idx,
unsigned firstDataArg, Sema::FormatStringType Type,
Sema::VariadicCallType CallType, bool InFunctionCall,
llvm::SmallBitVector &CheckedVarArgs,
- UncoveredArgHandler &UncoveredArg,
- llvm::APSInt Offset,
+ UncoveredArgHandler &UncoveredArg, llvm::APSInt Offset,
bool IgnoreStringsWithoutSpecifiers = false) {
- if (S.isConstantEvaluated())
+ if (S.isConstantEvaluatedContext())
return SLCT_NotALiteral;
- tryAgain:
+tryAgain:
assert(Offset.isSigned() && "invalid offset");
if (E->isTypeDependent() || E->isValueDependent())
@@ -7563,6 +10477,15 @@ checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args,
return SLCT_UncheckedLiteral;
switch (E->getStmtClass()) {
+ case Stmt::InitListExprClass:
+ // Handle expressions like {"foobar"}.
+ if (const clang::Expr *SLE = maybeConstEvalStringLiteral(S.Context, E)) {
+ return checkFormatStringExpr(S, SLE, Args, APK, format_idx, firstDataArg,
+ Type, CallType, /*InFunctionCall*/ false,
+ CheckedVarArgs, UncoveredArg, Offset,
+ IgnoreStringsWithoutSpecifiers);
+ }
+ return SLCT_NotALiteral;
case Stmt::BinaryConditionalOperatorClass:
case Stmt::ConditionalOperatorClass: {
// The expression is a literal if both sub-expressions were, and it was
@@ -7576,8 +10499,8 @@ checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args,
bool CheckLeft = true, CheckRight = true;
bool Cond;
- if (C->getCond()->EvaluateAsBooleanCondition(Cond, S.getASTContext(),
- S.isConstantEvaluated())) {
+ if (C->getCond()->EvaluateAsBooleanCondition(
+ Cond, S.getASTContext(), S.isConstantEvaluatedContext())) {
if (Cond)
CheckRight = false;
else
@@ -7592,9 +10515,8 @@ checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args,
if (!CheckLeft)
Left = SLCT_UncheckedLiteral;
else {
- Left = checkFormatStringExpr(S, C->getTrueExpr(), Args,
- HasVAListArg, format_idx, firstDataArg,
- Type, CallType, InFunctionCall,
+ Left = checkFormatStringExpr(S, C->getTrueExpr(), Args, APK, format_idx,
+ firstDataArg, Type, CallType, InFunctionCall,
CheckedVarArgs, UncoveredArg, Offset,
IgnoreStringsWithoutSpecifiers);
if (Left == SLCT_NotALiteral || !CheckRight) {
@@ -7603,8 +10525,8 @@ checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args,
}
StringLiteralCheckType Right = checkFormatStringExpr(
- S, C->getFalseExpr(), Args, HasVAListArg, format_idx, firstDataArg,
- Type, CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset,
+ S, C->getFalseExpr(), Args, APK, format_idx, firstDataArg, Type,
+ CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset,
IgnoreStringsWithoutSpecifiers);
return (CheckLeft && Left < Right) ? Left : Right;
@@ -7654,42 +10576,85 @@ checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args,
if (InitList->isStringLiteralInit())
Init = InitList->getInit(0)->IgnoreParenImpCasts();
}
- return checkFormatStringExpr(S, Init, Args,
- HasVAListArg, format_idx,
- firstDataArg, Type, CallType,
- /*InFunctionCall*/ false, CheckedVarArgs,
- UncoveredArg, Offset);
+ return checkFormatStringExpr(
+ S, Init, Args, APK, format_idx, firstDataArg, Type, CallType,
+ /*InFunctionCall*/ false, CheckedVarArgs, UncoveredArg, Offset);
}
}
- // For vprintf* functions (i.e., HasVAListArg==true), we add a
- // special check to see if the format string is a function parameter
- // of the function calling the printf function. If the function
- // has an attribute indicating it is a printf-like function, then we
- // should suppress warnings concerning non-literals being used in a call
- // to a vprintf function. For example:
+ // When the format argument is an argument of this function, and this
+ // function also has the format attribute, there are several interactions
+ // for which there shouldn't be a warning. For instance, when calling
+ // v*printf from a function that has the printf format attribute, we
+ // should not emit a warning about using `fmt`, even though it's not
+ // constant, because the arguments have already been checked for the
+ // caller of `logmessage`:
//
- // void
- // logmessage(char const *fmt __attribute__ (format (printf, 1, 2)), ...){
- // va_list ap;
- // va_start(ap, fmt);
- // vprintf(fmt, ap); // Do NOT emit a warning about "fmt".
- // ...
+ // __attribute__((format(printf, 1, 2)))
+ // void logmessage(char const *fmt, ...) {
+ // va_list ap;
+ // va_start(ap, fmt);
+ // vprintf(fmt, ap); /* do not emit a warning about "fmt" */
+ // ...
// }
- if (HasVAListArg) {
- if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(VD)) {
- if (const NamedDecl *ND = dyn_cast<NamedDecl>(PV->getDeclContext())) {
- int PVIndex = PV->getFunctionScopeIndex() + 1;
- for (const auto *PVFormat : ND->specific_attrs<FormatAttr>()) {
- // adjust for implicit parameter
- if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(ND))
- if (MD->isInstance())
- ++PVIndex;
+ //
+ // Another interaction that we need to support is calling a variadic
+ // format function from a format function that has fixed arguments. For
+ // instance:
+ //
+ // __attribute__((format(printf, 1, 2)))
+ // void logstring(char const *fmt, char const *str) {
+ // printf(fmt, str); /* do not emit a warning about "fmt" */
+ // }
+ //
+ // Same (and perhaps more relatably) for the variadic template case:
+ //
+ // template<typename... Args>
+ // __attribute__((format(printf, 1, 2)))
+ // void log(const char *fmt, Args&&... args) {
+ // printf(fmt, forward<Args>(args)...);
+ // /* do not emit a warning about "fmt" */
+ // }
+ //
+ // Due to implementation difficulty, we only check the format, not the
+ // format arguments, in all cases.
+ //
+ if (const auto *PV = dyn_cast<ParmVarDecl>(VD)) {
+ if (const auto *D = dyn_cast<Decl>(PV->getDeclContext())) {
+ for (const auto *PVFormat : D->specific_attrs<FormatAttr>()) {
+ bool IsCXXMember = false;
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(D))
+ IsCXXMember = MD->isInstance();
+
+ bool IsVariadic = false;
+ if (const FunctionType *FnTy = D->getFunctionType())
+ IsVariadic = cast<FunctionProtoType>(FnTy)->isVariadic();
+ else if (const auto *BD = dyn_cast<BlockDecl>(D))
+ IsVariadic = BD->isVariadic();
+ else if (const auto *OMD = dyn_cast<ObjCMethodDecl>(D))
+ IsVariadic = OMD->isVariadic();
+
+ Sema::FormatStringInfo CallerFSI;
+ if (Sema::getFormatStringInfo(PVFormat, IsCXXMember, IsVariadic,
+ &CallerFSI)) {
// We also check if the formats are compatible.
// We can't pass a 'scanf' string to a 'printf' function.
- if (PVIndex == PVFormat->getFormatIdx() &&
- Type == S.GetFormatStringType(PVFormat))
- return SLCT_UncheckedLiteral;
+ if (PV->getFunctionScopeIndex() == CallerFSI.FormatIdx &&
+ Type == S.GetFormatStringType(PVFormat)) {
+ // Lastly, check that argument passing kinds transition in a
+ // way that makes sense:
+ // from a caller with FAPK_VAList, allow FAPK_VAList
+ // from a caller with FAPK_Fixed, allow FAPK_Fixed
+ // from a caller with FAPK_Fixed, allow FAPK_Variadic
+ // from a caller with FAPK_Variadic, allow FAPK_VAList
+ switch (combineFAPK(CallerFSI.ArgPassingKind, APK)) {
+ case combineFAPK(Sema::FAPK_VAList, Sema::FAPK_VAList):
+ case combineFAPK(Sema::FAPK_Fixed, Sema::FAPK_Fixed):
+ case combineFAPK(Sema::FAPK_Fixed, Sema::FAPK_Variadic):
+ case combineFAPK(Sema::FAPK_Variadic, Sema::FAPK_VAList):
+ return SLCT_UncheckedLiteral;
+ }
+ }
}
}
}
@@ -7708,8 +10673,8 @@ checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args,
for (const auto *FA : ND->specific_attrs<FormatArgAttr>()) {
const Expr *Arg = CE->getArg(FA->getFormatIdx().getASTIndex());
StringLiteralCheckType Result = checkFormatStringExpr(
- S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type,
- CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset,
+ S, Arg, Args, APK, format_idx, firstDataArg, Type, CallType,
+ InFunctionCall, CheckedVarArgs, UncoveredArg, Offset,
IgnoreStringsWithoutSpecifiers);
if (IsFirst) {
CommonResult = Result;
@@ -7724,16 +10689,18 @@ checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args,
if (BuiltinID == Builtin::BI__builtin___CFStringMakeConstantString ||
BuiltinID == Builtin::BI__builtin___NSStringMakeConstantString) {
const Expr *Arg = CE->getArg(0);
- return checkFormatStringExpr(S, Arg, Args,
- HasVAListArg, format_idx,
- firstDataArg, Type, CallType,
- InFunctionCall, CheckedVarArgs,
- UncoveredArg, Offset,
- IgnoreStringsWithoutSpecifiers);
+ return checkFormatStringExpr(
+ S, Arg, Args, APK, format_idx, firstDataArg, Type, CallType,
+ InFunctionCall, CheckedVarArgs, UncoveredArg, Offset,
+ IgnoreStringsWithoutSpecifiers);
}
}
}
-
+ if (const Expr *SLE = maybeConstEvalStringLiteral(S.Context, E))
+ return checkFormatStringExpr(S, SLE, Args, APK, format_idx, firstDataArg,
+ Type, CallType, /*InFunctionCall*/ false,
+ CheckedVarArgs, UncoveredArg, Offset,
+ IgnoreStringsWithoutSpecifiers);
return SLCT_NotALiteral;
}
case Stmt::ObjCMessageExprClass: {
@@ -7757,8 +10724,8 @@ checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args,
const Expr *Arg = ME->getArg(FA->getFormatIdx().getASTIndex());
return checkFormatStringExpr(
- S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type,
- CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset,
+ S, Arg, Args, APK, format_idx, firstDataArg, Type, CallType,
+ InFunctionCall, CheckedVarArgs, UncoveredArg, Offset,
IgnoreStringsWithoutSpecifiers);
}
}
@@ -7781,9 +10748,8 @@ checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args,
return SLCT_NotALiteral;
}
FormatStringLiteral FStr(StrE, Offset.sextOrTrunc(64).getSExtValue());
- CheckFormatString(S, &FStr, E, Args, HasVAListArg, format_idx,
- firstDataArg, Type, InFunctionCall, CallType,
- CheckedVarArgs, UncoveredArg,
+ CheckFormatString(S, &FStr, E, Args, APK, format_idx, firstDataArg, Type,
+ InFunctionCall, CallType, CheckedVarArgs, UncoveredArg,
IgnoreStringsWithoutSpecifiers);
return SLCT_CheckedLiteral;
}
@@ -7798,9 +10764,11 @@ checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args,
Expr::EvalResult LResult, RResult;
bool LIsInt = BinOp->getLHS()->EvaluateAsInt(
- LResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated());
+ LResult, S.Context, Expr::SE_NoSideEffects,
+ S.isConstantEvaluatedContext());
bool RIsInt = BinOp->getRHS()->EvaluateAsInt(
- RResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated());
+ RResult, S.Context, Expr::SE_NoSideEffects,
+ S.isConstantEvaluatedContext());
if (LIsInt != RIsInt) {
BinaryOperatorKind BinOpKind = BinOp->getOpcode();
@@ -7828,7 +10796,7 @@ checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args,
Expr::EvalResult IndexResult;
if (ASE->getRHS()->EvaluateAsInt(IndexResult, S.Context,
Expr::SE_NoSideEffects,
- S.isConstantEvaluated())) {
+ S.isConstantEvaluatedContext())) {
sumOffsets(Offset, IndexResult.Val.getInt(), BO_Add,
/*RHS is int*/ true);
E = ASE->getBase();
@@ -7844,6 +10812,20 @@ checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args,
}
}
+// If this expression can be evaluated at compile-time,
+// check if the result is a StringLiteral and return it
+// otherwise return nullptr
+static const Expr *maybeConstEvalStringLiteral(ASTContext &Context,
+ const Expr *E) {
+ Expr::EvalResult Result;
+ if (E->EvaluateAsRValue(Result, Context) && Result.Val.isLValue()) {
+ const auto *LVE = Result.Val.getLValueBase().dyn_cast<const Expr *>();
+ if (isa_and_nonnull<StringLiteral>(LVE))
+ return LVE;
+ }
+ return nullptr;
+}
+
Sema::FormatStringType Sema::GetFormatStringType(const FormatAttr *Format) {
return llvm::StringSwitch<FormatStringType>(Format->getType()->getName())
.Case("scanf", FST_Scanf)
@@ -7862,24 +10844,25 @@ Sema::FormatStringType Sema::GetFormatStringType(const FormatAttr *Format) {
/// functions) for correct use of format strings.
/// Returns true if a format string has been fully checked.
bool Sema::CheckFormatArguments(const FormatAttr *Format,
- ArrayRef<const Expr *> Args,
- bool IsCXXMember,
- VariadicCallType CallType,
- SourceLocation Loc, SourceRange Range,
+ ArrayRef<const Expr *> Args, bool IsCXXMember,
+ VariadicCallType CallType, SourceLocation Loc,
+ SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs) {
FormatStringInfo FSI;
- if (getFormatStringInfo(Format, IsCXXMember, &FSI))
- return CheckFormatArguments(Args, FSI.HasVAListArg, FSI.FormatIdx,
+ if (getFormatStringInfo(Format, IsCXXMember, CallType != VariadicDoesNotApply,
+ &FSI))
+ return CheckFormatArguments(Args, FSI.ArgPassingKind, FSI.FormatIdx,
FSI.FirstDataArg, GetFormatStringType(Format),
CallType, Loc, Range, CheckedVarArgs);
return false;
}
bool Sema::CheckFormatArguments(ArrayRef<const Expr *> Args,
- bool HasVAListArg, unsigned format_idx,
- unsigned firstDataArg, FormatStringType Type,
- VariadicCallType CallType,
- SourceLocation Loc, SourceRange Range,
+ Sema::FormatArgumentPassingKind APK,
+ unsigned format_idx, unsigned firstDataArg,
+ FormatStringType Type,
+ VariadicCallType CallType, SourceLocation Loc,
+ SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs) {
// CHECK: printf/scanf-like function is called with no format string.
if (format_idx >= Args.size()) {
@@ -7902,12 +10885,11 @@ bool Sema::CheckFormatArguments(ArrayRef<const Expr *> Args,
// ObjC string uses the same format specifiers as C string, so we can use
// the same format string checking logic for both ObjC and C strings.
UncoveredArgHandler UncoveredArg;
- StringLiteralCheckType CT =
- checkFormatStringExpr(*this, OrigFormatExpr, Args, HasVAListArg,
- format_idx, firstDataArg, Type, CallType,
- /*IsFunctionCall*/ true, CheckedVarArgs,
- UncoveredArg,
- /*no string offset*/ llvm::APSInt(64, false) = 0);
+ StringLiteralCheckType CT = checkFormatStringExpr(
+ *this, OrigFormatExpr, Args, APK, format_idx, firstDataArg, Type,
+ CallType,
+ /*IsFunctionCall*/ true, CheckedVarArgs, UncoveredArg,
+ /*no string offset*/ llvm::APSInt(64, false) = 0);
// Generate a diagnostic where an uncovered argument is detected.
if (UncoveredArg.hasUncoveredArg()) {
@@ -7970,7 +10952,7 @@ protected:
const unsigned FirstDataArg;
const unsigned NumDataArgs;
const char *Beg; // Start of format string.
- const bool HasVAListArg;
+ const Sema::FormatArgumentPassingKind ArgPassingKind;
ArrayRef<const Expr *> Args;
unsigned FormatIdx;
llvm::SmallBitVector CoveredArgs;
@@ -7985,14 +10967,15 @@ public:
CheckFormatHandler(Sema &s, const FormatStringLiteral *fexpr,
const Expr *origFormatExpr,
const Sema::FormatStringType type, unsigned firstDataArg,
- unsigned numDataArgs, const char *beg, bool hasVAListArg,
+ unsigned numDataArgs, const char *beg,
+ Sema::FormatArgumentPassingKind APK,
ArrayRef<const Expr *> Args, unsigned formatIdx,
bool inFunctionCall, Sema::VariadicCallType callType,
llvm::SmallBitVector &CheckedVarArgs,
UncoveredArgHandler &UncoveredArg)
: S(s), FExpr(fexpr), OrigFormatExpr(origFormatExpr), FSType(type),
FirstDataArg(firstDataArg), NumDataArgs(numDataArgs), Beg(beg),
- HasVAListArg(hasVAListArg), Args(Args), FormatIdx(formatIdx),
+ ArgPassingKind(APK), Args(Args), FormatIdx(formatIdx),
inFunctionCall(inFunctionCall), CallType(callType),
CheckedVarArgs(CheckedVarArgs), UncoveredArg(UncoveredArg) {
CoveredArgs.resize(numDataArgs);
@@ -8033,7 +11016,7 @@ public:
EmitFormatDiagnostic(Sema &S, bool inFunctionCall, const Expr *ArgumentExpr,
const PartialDiagnostic &PDiag, SourceLocation StringLoc,
bool IsStringLocation, Range StringRange,
- ArrayRef<FixItHint> Fixit = None);
+ ArrayRef<FixItHint> Fixit = std::nullopt);
protected:
bool HandleInvalidConversionSpecifier(unsigned argIndex, SourceLocation Loc,
@@ -8060,7 +11043,7 @@ protected:
template <typename Range>
void EmitFormatDiagnostic(PartialDiagnostic PDiag, SourceLocation StringLoc,
bool IsStringLocation, Range StringRange,
- ArrayRef<FixItHint> Fixit = None);
+ ArrayRef<FixItHint> Fixit = std::nullopt);
};
} // namespace
@@ -8103,7 +11086,7 @@ void CheckFormatHandler::HandleInvalidLengthModifier(
CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength());
// See if we know how to fix this length modifier.
- Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier();
+ std::optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier();
if (FixedLM) {
EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(),
getLocationOfByte(LM.getStart()),
@@ -8136,7 +11119,7 @@ void CheckFormatHandler::HandleNonStandardLengthModifier(
CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength());
// See if we know how to fix this length modifier.
- Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier();
+ std::optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier();
if (FixedLM) {
EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard)
<< LM.toString() << 0,
@@ -8163,7 +11146,7 @@ void CheckFormatHandler::HandleNonStandardConversionSpecifier(
using namespace analyze_format_string;
// See if we know how to fix this conversion specifier.
- Optional<ConversionSpecifier> FixedCS = CS.getStandardSpecifier();
+ std::optional<ConversionSpecifier> FixedCS = CS.getStandardSpecifier();
if (FixedCS) {
EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard)
<< CS.toString() << /*conversion specifier*/1,
@@ -8192,13 +11175,13 @@ void CheckFormatHandler::HandlePosition(const char *startPos,
getSpecifierRange(startPos, posLen));
}
-void
-CheckFormatHandler::HandleInvalidPosition(const char *startPos, unsigned posLen,
- analyze_format_string::PositionContext p) {
- EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_positional_specifier)
- << (unsigned) p,
- getLocationOfByte(startPos), /*IsStringLocation*/true,
- getSpecifierRange(startPos, posLen));
+void CheckFormatHandler::HandleInvalidPosition(
+ const char *startSpecifier, unsigned specifierLen,
+ analyze_format_string::PositionContext p) {
+ EmitFormatDiagnostic(
+ S.PDiag(diag::warn_format_invalid_positional_specifier) << (unsigned)p,
+ getLocationOfByte(startSpecifier), /*IsStringLocation*/ true,
+ getSpecifierRange(startSpecifier, specifierLen));
}
void CheckFormatHandler::HandleZeroPosition(const char *startPos,
@@ -8228,8 +11211,8 @@ const Expr *CheckFormatHandler::getDataArg(unsigned i) const {
void CheckFormatHandler::DoneProcessing() {
// Does the number of data arguments exceed the number of
// format conversions in the format string?
- if (!HasVAListArg) {
- // Find any arguments that weren't covered.
+ if (ArgPassingKind != Sema::FAPK_VAList) {
+ // Find any arguments that weren't covered.
CoveredArgs.flip();
signed notCoveredArg = CoveredArgs.find_first();
if (notCoveredArg >= 0) {
@@ -8243,7 +11226,7 @@ void CheckFormatHandler::DoneProcessing() {
void UncoveredArgHandler::Diagnose(Sema &S, bool IsFunctionCall,
const Expr *ArgExpr) {
- assert(hasUncoveredArg() && DiagnosticExprs.size() > 0 &&
+ assert(hasUncoveredArg() && !DiagnosticExprs.empty() &&
"Invalid state");
if (!ArgExpr)
@@ -8424,13 +11407,13 @@ public:
const Expr *origFormatExpr,
const Sema::FormatStringType type, unsigned firstDataArg,
unsigned numDataArgs, bool isObjC, const char *beg,
- bool hasVAListArg, ArrayRef<const Expr *> Args,
- unsigned formatIdx, bool inFunctionCall,
- Sema::VariadicCallType CallType,
+ Sema::FormatArgumentPassingKind APK,
+ ArrayRef<const Expr *> Args, unsigned formatIdx,
+ bool inFunctionCall, Sema::VariadicCallType CallType,
llvm::SmallBitVector &CheckedVarArgs,
UncoveredArgHandler &UncoveredArg)
: CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg,
- numDataArgs, beg, hasVAListArg, Args, formatIdx,
+ numDataArgs, beg, APK, Args, formatIdx,
inFunctionCall, CallType, CheckedVarArgs,
UncoveredArg) {}
@@ -8450,8 +11433,8 @@ public:
void handleInvalidMaskType(StringRef MaskType) override;
bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS,
- const char *startSpecifier,
- unsigned specifierLen) override;
+ const char *startSpecifier, unsigned specifierLen,
+ const TargetInfo &Target) override;
bool checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
const char *StartSpecifier,
unsigned SpecifierLen,
@@ -8505,17 +11488,16 @@ void CheckPrintfHandler::handleInvalidMaskType(StringRef MaskType) {
}
bool CheckPrintfHandler::HandleAmount(
- const analyze_format_string::OptionalAmount &Amt,
- unsigned k, const char *startSpecifier,
- unsigned specifierLen) {
+ const analyze_format_string::OptionalAmount &Amt, unsigned k,
+ const char *startSpecifier, unsigned specifierLen) {
if (Amt.hasDataArgument()) {
- if (!HasVAListArg) {
+ if (ArgPassingKind != Sema::FAPK_VAList) {
unsigned argIndex = Amt.getArgIndex();
if (argIndex >= NumDataArgs) {
EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_missing_arg)
- << k,
+ << k,
getLocationOfByte(Amt.getStart()),
- /*IsStringLocation*/true,
+ /*IsStringLocation*/ true,
getSpecifierRange(startSpecifier, specifierLen));
// Don't do any more checking. We will just emit
// spurious errors.
@@ -8710,11 +11692,9 @@ bool CheckPrintfHandler::checkForCStrMembers(
return false;
}
-bool
-CheckPrintfHandler::HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier
- &FS,
- const char *startSpecifier,
- unsigned specifierLen) {
+bool CheckPrintfHandler::HandlePrintfSpecifier(
+ const analyze_printf::PrintfSpecifier &FS, const char *startSpecifier,
+ unsigned specifierLen, const TargetInfo &Target) {
using namespace analyze_format_string;
using namespace analyze_printf;
@@ -8846,6 +11826,15 @@ CheckPrintfHandler::HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier
}
}
+ const llvm::Triple &Triple = Target.getTriple();
+ if (CS.getKind() == ConversionSpecifier::nArg &&
+ (Triple.isAndroid() || Triple.isOSFuchsia())) {
+ EmitFormatDiagnostic(S.PDiag(diag::warn_printf_narg_not_supported),
+ getLocationOfByte(CS.getStart()),
+ /*IsStringLocation*/ false,
+ getSpecifierRange(startSpecifier, specifierLen));
+ }
+
// Check for invalid use of field width
if (!FS.hasValidFieldWidth()) {
HandleInvalidAmount(FS, FS.getFieldWidth(), /* field width */ 0,
@@ -8904,7 +11893,7 @@ CheckPrintfHandler::HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier
HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen);
// The remaining checks depend on the data arguments.
- if (HasVAListArg)
+ if (ArgPassingKind == Sema::FAPK_VAList)
return true;
if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex))
@@ -9019,7 +12008,7 @@ isArithmeticArgumentPromotion(Sema &S, const ImplicitCastExpr *ICE) {
// It's an integer promotion if the destination type is the promoted
// source type.
if (ICE->getCastKind() == CK_IntegralCast &&
- From->isPromotableIntegerType() &&
+ S.Context.isPromotableIntegerType(From) &&
S.Context.getPromotedIntegerType(From) == To)
return true;
// Look through vector types, since we do default argument promotion for
@@ -9052,6 +12041,12 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
ExprTy = TET->getUnderlyingExpr()->getType();
}
+ // When using the format attribute in C++, you can receive a function or an
+ // array that will necessarily decay to a pointer when passed to the final
+ // format consumer. Apply decay before type comparison.
+ if (ExprTy->canDecayToPointerType())
+ ExprTy = S.Context.getDecayedType(ExprTy);
+
// Diagnose attempts to print a boolean value as a character. Unlike other
// -Wformat diagnostics, this is fine from a type perspective, but it still
// doesn't make sense.
@@ -9068,10 +12063,14 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
return true;
}
- analyze_printf::ArgType::MatchKind Match = AT.matchesType(S.Context, ExprTy);
- if (Match == analyze_printf::ArgType::Match)
+ ArgType::MatchKind ImplicitMatch = ArgType::NoMatch;
+ ArgType::MatchKind Match = AT.matchesType(S.Context, ExprTy);
+ if (Match == ArgType::Match)
return true;
+ // NoMatchPromotionTypeConfusion should be only returned in ImplictCastExpr
+ assert(Match != ArgType::NoMatchPromotionTypeConfusion);
+
// Look through argument promotions for our error message's reported type.
// This includes the integral and floating promotions, but excludes array
// and function pointer decay (seeing that an argument intended to be a
@@ -9088,13 +12087,9 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
if (ICE->getType() == S.Context.IntTy ||
ICE->getType() == S.Context.UnsignedIntTy) {
// All further checking is done on the subexpression
- const analyze_printf::ArgType::MatchKind ImplicitMatch =
- AT.matchesType(S.Context, ExprTy);
- if (ImplicitMatch == analyze_printf::ArgType::Match)
+ ImplicitMatch = AT.matchesType(S.Context, ExprTy);
+ if (ImplicitMatch == ArgType::Match)
return true;
- if (ImplicitMatch == ArgType::NoMatchPedantic ||
- ImplicitMatch == ArgType::NoMatchTypeConfusion)
- Match = ImplicitMatch;
}
}
} else if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E)) {
@@ -9105,21 +12100,49 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
// modifier is provided.
if (ExprTy == S.Context.IntTy &&
FS.getLengthModifier().getKind() != LengthModifier::AsChar)
- if (llvm::isUIntN(S.Context.getCharWidth(), CL->getValue()))
+ if (llvm::isUIntN(S.Context.getCharWidth(), CL->getValue())) {
ExprTy = S.Context.CharTy;
+ // To improve check results, we consider a character literal in C
+ // to be a 'char' rather than an 'int'. 'printf("%hd", 'a');' is
+ // more likely a type confusion situation, so we will suggest to
+ // use '%hhd' instead by discarding the MatchPromotion.
+ if (Match == ArgType::MatchPromotion)
+ Match = ArgType::NoMatch;
+ }
+ }
+ if (Match == ArgType::MatchPromotion) {
+ // WG14 N2562 only clarified promotions in *printf
+ // For NSLog in ObjC, just preserve -Wformat behavior
+ if (!S.getLangOpts().ObjC &&
+ ImplicitMatch != ArgType::NoMatchPromotionTypeConfusion &&
+ ImplicitMatch != ArgType::NoMatchTypeConfusion)
+ return true;
+ Match = ArgType::NoMatch;
}
+ if (ImplicitMatch == ArgType::NoMatchPedantic ||
+ ImplicitMatch == ArgType::NoMatchTypeConfusion)
+ Match = ImplicitMatch;
+ assert(Match != ArgType::MatchPromotion);
- // Look through enums to their underlying type.
+ // Look through unscoped enums to their underlying type.
bool IsEnum = false;
+ bool IsScopedEnum = false;
+ QualType IntendedTy = ExprTy;
if (auto EnumTy = ExprTy->getAs<EnumType>()) {
- ExprTy = EnumTy->getDecl()->getIntegerType();
- IsEnum = true;
+ IntendedTy = EnumTy->getDecl()->getIntegerType();
+ if (EnumTy->isUnscopedEnumerationType()) {
+ ExprTy = IntendedTy;
+ // This controls whether we're talking about the underlying type or not,
+ // which we only want to do when it's an unscoped enum.
+ IsEnum = true;
+ } else {
+ IsScopedEnum = true;
+ }
}
// %C in an Objective-C context prints a unichar, not a wchar_t.
// If the argument is an integer of some kind, believe the %C and suggest
// a cast instead of changing the conversion specifier.
- QualType IntendedTy = ExprTy;
if (isObjCContext() &&
FS.getConversionSpecifier().getKind() == ConversionSpecifier::CArg) {
if (ExprTy->isIntegralOrUnscopedEnumerationType() &&
@@ -9155,8 +12178,10 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
std::tie(CastTy, CastTyName) = shouldNotPrintDirectly(S.Context, IntendedTy, E);
if (!CastTy.isNull()) {
// %zi/%zu and %td/%tu are OK to use for NSInteger/NSUInteger of type int
- // (long in ASTContext). Only complain to pedants.
- if ((CastTyName == "NSInteger" || CastTyName == "NSUInteger") &&
+ // (long in ASTContext). Only complain to pedants or when they're the
+ // underlying type of a scoped enum (which always needs a cast).
+ if (!IsScopedEnum &&
+ (CastTyName == "NSInteger" || CastTyName == "NSUInteger") &&
(AT.isSizeT() || AT.isPtrdiffT()) &&
AT.matchesType(S.Context, CastTy))
Match = ArgType::NoMatchPedantic;
@@ -9178,10 +12203,13 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
CharSourceRange SpecRange = getSpecifierRange(StartSpecifier, SpecifierLen);
- if (IntendedTy == ExprTy && !ShouldNotPrintDirectly) {
+ if (IntendedTy == ExprTy && !ShouldNotPrintDirectly && !IsScopedEnum) {
unsigned Diag;
switch (Match) {
- case ArgType::Match: llvm_unreachable("expected non-matching");
+ case ArgType::Match:
+ case ArgType::MatchPromotion:
+ case ArgType::NoMatchPromotionTypeConfusion:
+ llvm_unreachable("expected non-matching");
case ArgType::NoMatchPedantic:
Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic;
break;
@@ -9208,15 +12236,16 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
// should be printed as 'long' for 64-bit compatibility.)
// Rather than emitting a normal format/argument mismatch, we want to
// add a cast to the recommended type (and correct the format string
- // if necessary).
+ // if necessary). We should also do so for scoped enumerations.
SmallString<16> CastBuf;
llvm::raw_svector_ostream CastFix(CastBuf);
- CastFix << "(";
+ CastFix << (S.LangOpts.CPlusPlus ? "static_cast<" : "(");
IntendedTy.print(CastFix, S.Context.getPrintingPolicy());
- CastFix << ")";
+ CastFix << (S.LangOpts.CPlusPlus ? ">" : ")");
SmallVector<FixItHint,4> Hints;
- if (!AT.matchesType(S.Context, IntendedTy) || ShouldNotPrintDirectly)
+ if (AT.matchesType(S.Context, IntendedTy) != ArgType::Match ||
+ ShouldNotPrintDirectly)
Hints.push_back(FixItHint::CreateReplacement(SpecRange, os.str()));
if (const CStyleCastExpr *CCast = dyn_cast<CStyleCastExpr>(E)) {
@@ -9224,7 +12253,7 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
SourceRange CastRange(CCast->getLParenLoc(), CCast->getRParenLoc());
Hints.push_back(FixItHint::CreateReplacement(CastRange, CastFix.str()));
- } else if (!requiresParensToAddCast(E)) {
+ } else if (!requiresParensToAddCast(E) && !S.LangOpts.CPlusPlus) {
// If the expression has high enough precedence,
// just write the C-style cast.
Hints.push_back(
@@ -9235,16 +12264,20 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
Hints.push_back(
FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str()));
- SourceLocation After = S.getLocForEndOfToken(E->getEndLoc());
+ // We don't use getLocForEndOfToken because it returns invalid source
+ // locations for macro expansions (by design).
+ SourceLocation EndLoc = S.SourceMgr.getSpellingLoc(E->getEndLoc());
+ SourceLocation After = EndLoc.getLocWithOffset(
+ Lexer::MeasureTokenLength(EndLoc, S.SourceMgr, S.LangOpts));
Hints.push_back(FixItHint::CreateInsertion(After, ")"));
}
- if (ShouldNotPrintDirectly) {
+ if (ShouldNotPrintDirectly && !IsScopedEnum) {
// The expression has a type that should not be printed directly.
// We extract the name from the typedef because we don't want to show
// the underlying type in the diagnostic.
StringRef Name;
- if (const TypedefType *TypedefTy = dyn_cast<TypedefType>(ExprTy))
+ if (const auto *TypedefTy = ExprTy->getAs<TypedefType>())
Name = TypedefTy->getDecl()->getName();
else
Name = CastTyName;
@@ -9272,12 +12305,16 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
// Since the warning for passing non-POD types to variadic functions
// was deferred until now, we emit a warning for non-POD
// arguments here.
+ bool EmitTypeMismatch = false;
switch (S.isValidVarArgType(ExprTy)) {
case Sema::VAK_Valid:
case Sema::VAK_ValidInCXX11: {
unsigned Diag;
switch (Match) {
- case ArgType::Match: llvm_unreachable("expected non-matching");
+ case ArgType::Match:
+ case ArgType::MatchPromotion:
+ case ArgType::NoMatchPromotionTypeConfusion:
+ llvm_unreachable("expected non-matching");
case ArgType::NoMatchPedantic:
Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic;
break;
@@ -9297,17 +12334,23 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
}
case Sema::VAK_Undefined:
case Sema::VAK_MSVCUndefined:
- EmitFormatDiagnostic(S.PDiag(diag::warn_non_pod_vararg_with_format_string)
- << S.getLangOpts().CPlusPlus11 << ExprTy
- << CallType
- << AT.getRepresentativeTypeName(S.Context) << CSR
- << E->getSourceRange(),
- E->getBeginLoc(), /*IsStringLocation*/ false, CSR);
- checkForCStrMembers(AT, E);
+ if (CallType == Sema::VariadicDoesNotApply) {
+ EmitTypeMismatch = true;
+ } else {
+ EmitFormatDiagnostic(
+ S.PDiag(diag::warn_non_pod_vararg_with_format_string)
+ << S.getLangOpts().CPlusPlus11 << ExprTy << CallType
+ << AT.getRepresentativeTypeName(S.Context) << CSR
+ << E->getSourceRange(),
+ E->getBeginLoc(), /*IsStringLocation*/ false, CSR);
+ checkForCStrMembers(AT, E);
+ }
break;
case Sema::VAK_Invalid:
- if (ExprTy->isObjCObjectType())
+ if (CallType == Sema::VariadicDoesNotApply)
+ EmitTypeMismatch = true;
+ else if (ExprTy->isObjCObjectType())
EmitFormatDiagnostic(
S.PDiag(diag::err_cannot_pass_objc_interface_to_vararg_format)
<< S.getLangOpts().CPlusPlus11 << ExprTy << CallType
@@ -9323,6 +12366,19 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
break;
}
+ if (EmitTypeMismatch) {
+ // The function is not variadic, so we do not generate warnings about
+ // being allowed to pass that object as a variadic argument. Instead,
+ // since there are inherently no printf specifiers for types which cannot
+ // be passed as variadic arguments, emit a plain old specifier mismatch
+ // argument.
+ EmitFormatDiagnostic(
+ S.PDiag(diag::warn_format_conversion_argument_type_mismatch)
+ << AT.getRepresentativeTypeName(S.Context) << ExprTy << false
+ << E->getSourceRange(),
+ E->getBeginLoc(), false, CSR);
+ }
+
assert(FirstDataArg + FS.getArgIndex() < CheckedVarArgs.size() &&
"format string specifier index out of range");
CheckedVarArgs[FirstDataArg + FS.getArgIndex()] = true;
@@ -9340,13 +12396,13 @@ public:
CheckScanfHandler(Sema &s, const FormatStringLiteral *fexpr,
const Expr *origFormatExpr, Sema::FormatStringType type,
unsigned firstDataArg, unsigned numDataArgs,
- const char *beg, bool hasVAListArg,
+ const char *beg, Sema::FormatArgumentPassingKind APK,
ArrayRef<const Expr *> Args, unsigned formatIdx,
bool inFunctionCall, Sema::VariadicCallType CallType,
llvm::SmallBitVector &CheckedVarArgs,
UncoveredArgHandler &UncoveredArg)
: CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg,
- numDataArgs, beg, hasVAListArg, Args, formatIdx,
+ numDataArgs, beg, APK, Args, formatIdx,
inFunctionCall, CallType, CheckedVarArgs,
UncoveredArg) {}
@@ -9450,7 +12506,7 @@ bool CheckScanfHandler::HandleScanfSpecifier(
HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen);
// The remaining checks depend on the data arguments.
- if (HasVAListArg)
+ if (ArgPassingKind == Sema::FAPK_VAList)
return true;
if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex))
@@ -9507,17 +12563,13 @@ bool CheckScanfHandler::HandleScanfSpecifier(
return true;
}
-static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr,
- const Expr *OrigFormatExpr,
- ArrayRef<const Expr *> Args,
- bool HasVAListArg, unsigned format_idx,
- unsigned firstDataArg,
- Sema::FormatStringType Type,
- bool inFunctionCall,
- Sema::VariadicCallType CallType,
- llvm::SmallBitVector &CheckedVarArgs,
- UncoveredArgHandler &UncoveredArg,
- bool IgnoreStringsWithoutSpecifiers) {
+static void CheckFormatString(
+ Sema &S, const FormatStringLiteral *FExpr, const Expr *OrigFormatExpr,
+ ArrayRef<const Expr *> Args, Sema::FormatArgumentPassingKind APK,
+ unsigned format_idx, unsigned firstDataArg, Sema::FormatStringType Type,
+ bool inFunctionCall, Sema::VariadicCallType CallType,
+ llvm::SmallBitVector &CheckedVarArgs, UncoveredArgHandler &UncoveredArg,
+ bool IgnoreStringsWithoutSpecifiers) {
// CHECK: is the format string a wide literal?
if (!FExpr->isAscii() && !FExpr->isUTF8()) {
CheckFormatHandler::EmitFormatDiagnostic(
@@ -9545,8 +12597,7 @@ static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr,
// Emit a warning if the string literal is truncated and does not contain an
// embedded null character.
- if (TypeSize <= StrRef.size() &&
- StrRef.substr(0, TypeSize).find('\0') == StringRef::npos) {
+ if (TypeSize <= StrRef.size() && !StrRef.substr(0, TypeSize).contains('\0')) {
CheckFormatHandler::EmitFormatDiagnostic(
S, inFunctionCall, Args[format_idx],
S.PDiag(diag::warn_printf_format_string_not_null_terminated),
@@ -9569,23 +12620,21 @@ static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr,
Type == Sema::FST_OSTrace) {
CheckPrintfHandler H(
S, FExpr, OrigFormatExpr, Type, firstDataArg, numDataArgs,
- (Type == Sema::FST_NSString || Type == Sema::FST_OSTrace), Str,
- HasVAListArg, Args, format_idx, inFunctionCall, CallType,
- CheckedVarArgs, UncoveredArg);
-
- if (!analyze_format_string::ParsePrintfString(H, Str, Str + StrLen,
- S.getLangOpts(),
- S.Context.getTargetInfo(),
- Type == Sema::FST_FreeBSDKPrintf))
+ (Type == Sema::FST_NSString || Type == Sema::FST_OSTrace), Str, APK,
+ Args, format_idx, inFunctionCall, CallType, CheckedVarArgs,
+ UncoveredArg);
+
+ if (!analyze_format_string::ParsePrintfString(
+ H, Str, Str + StrLen, S.getLangOpts(), S.Context.getTargetInfo(),
+ Type == Sema::FST_FreeBSDKPrintf))
H.DoneProcessing();
} else if (Type == Sema::FST_Scanf) {
CheckScanfHandler H(S, FExpr, OrigFormatExpr, Type, firstDataArg,
- numDataArgs, Str, HasVAListArg, Args, format_idx,
- inFunctionCall, CallType, CheckedVarArgs, UncoveredArg);
+ numDataArgs, Str, APK, Args, format_idx, inFunctionCall,
+ CallType, CheckedVarArgs, UncoveredArg);
- if (!analyze_format_string::ParseScanfString(H, Str, Str + StrLen,
- S.getLangOpts(),
- S.Context.getTargetInfo()))
+ if (!analyze_format_string::ParseScanfString(
+ H, Str, Str + StrLen, S.getLangOpts(), S.Context.getTargetInfo()))
H.DoneProcessing();
} // TODO: handle other formats
}
@@ -9819,7 +12868,7 @@ static void emitReplacement(Sema &S, SourceLocation Loc, SourceRange Range,
unsigned AbsKind, QualType ArgType) {
bool EmitHeaderHint = true;
const char *HeaderName = nullptr;
- const char *FunctionName = nullptr;
+ StringRef FunctionName;
if (S.getLangOpts().CPlusPlus && !ArgType->isAnyComplexType()) {
FunctionName = "std::abs";
if (ArgType->isIntegralOrEnumerationType()) {
@@ -9910,6 +12959,22 @@ static bool IsStdFunction(const FunctionDecl *FDecl,
return true;
}
+void Sema::CheckInfNaNFunction(const CallExpr *Call,
+ const FunctionDecl *FDecl) {
+ FPOptions FPO = Call->getFPFeaturesInEffect(getLangOpts());
+ if ((IsStdFunction(FDecl, "isnan") || IsStdFunction(FDecl, "isunordered") ||
+ (Call->getBuiltinCallee() == Builtin::BI__builtin_nanf)) &&
+ FPO.getNoHonorNaNs())
+ Diag(Call->getBeginLoc(), diag::warn_fp_nan_inf_when_disabled)
+ << 1 << 0 << Call->getSourceRange();
+ else if ((IsStdFunction(FDecl, "isinf") ||
+ (IsStdFunction(FDecl, "isfinite") ||
+ (FDecl->getIdentifier() && FDecl->getName() == "infinity"))) &&
+ FPO.getNoHonorInfs())
+ Diag(Call->getBeginLoc(), diag::warn_fp_nan_inf_when_disabled)
+ << 0 << 0 << Call->getSourceRange();
+}
+
// Warn when using the wrong abs() function.
void Sema::CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl) {
@@ -9927,7 +12992,7 @@ void Sema::CheckAbsoluteValueFunction(const CallExpr *Call,
// Unsigned types cannot be negative. Suggest removing the absolute value
// function call.
if (ArgType->isUnsignedIntegerType()) {
- const char *FunctionName =
+ StringRef FunctionName =
IsStdAbs ? "std::abs" : Context.BuiltinInfo.getName(AbsKind);
Diag(Call->getExprLoc(), diag::warn_unsigned_abs) << ArgType << ParamType;
Diag(Call->getExprLoc(), diag::note_remove_abs)
@@ -10278,7 +13343,10 @@ static void CheckMemaccessSize(Sema &S, unsigned BId, const CallExpr *Call) {
Call->getArg(BId == Builtin::BImemset ? 2 : 1)->IgnoreImpCasts();
auto isLiteralZero = [](const Expr *E) {
- return isa<IntegerLiteral>(E) && cast<IntegerLiteral>(E)->getValue() == 0;
+ return (isa<IntegerLiteral>(E) &&
+ cast<IntegerLiteral>(E)->getValue() == 0) ||
+ (isa<CharacterLiteral>(E) &&
+ cast<CharacterLiteral>(E)->getValue() == 0);
};
// If we're memsetting or bzeroing 0 bytes, then this is likely an error.
@@ -10800,7 +13868,7 @@ void CheckFreeArgumentsCast(Sema &S, const std::string &CalleeName,
/// Alerts the user that they are attempting to free a non-malloc'd object.
void Sema::CheckFreeArguments(const CallExpr *E) {
const std::string CalleeName =
- dyn_cast<FunctionDecl>(E->getCalleeDecl())->getQualifiedNameAsString();
+ cast<FunctionDecl>(E->getCalleeDecl())->getQualifiedNameAsString();
{ // Prefer something that doesn't involve a cast to make things simpler.
const Expr *Arg = E->getArg(0)->IgnoreParenCasts();
@@ -10843,7 +13911,7 @@ Sema::CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
const FunctionDecl *FD) {
// Check if the return value is null but should not be.
if (((Attrs && hasSpecificAttr<ReturnsNonNullAttr>(*Attrs)) ||
- (!isObjCMethod && isNonNullType(Context, lhsType))) &&
+ (!isObjCMethod && isNonNullType(lhsType))) &&
CheckNonNullExpr(*this, RetValExp))
Diag(ReturnLoc, diag::warn_null_ret)
<< (isObjCMethod ? 1 : 0) << RetValExp->getSourceRange();
@@ -10865,25 +13933,60 @@ Sema::CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
}
}
+ if (RetValExp && RetValExp->getType()->isWebAssemblyTableType()) {
+ Diag(ReturnLoc, diag::err_wasm_table_art) << 1;
+ }
+
// PPC MMA non-pointer types are not allowed as return type. Checking the type
// here prevent the user from using a PPC MMA type as trailing return type.
if (Context.getTargetInfo().getTriple().isPPC64())
CheckPPCMMAType(RetValExp->getType(), ReturnLoc);
}
-//===--- CHECK: Floating-Point comparisons (-Wfloat-equal) ---------------===//
+/// Check for comparisons of floating-point values using == and !=. Issue a
+/// warning if the comparison is not likely to do what the programmer intended.
+void Sema::CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS,
+ BinaryOperatorKind Opcode) {
+ if (!BinaryOperator::isEqualityOp(Opcode))
+ return;
+
+ // Match and capture subexpressions such as "(float) X == 0.1".
+ FloatingLiteral *FPLiteral;
+ CastExpr *FPCast;
+ auto getCastAndLiteral = [&FPLiteral, &FPCast](Expr *L, Expr *R) {
+ FPLiteral = dyn_cast<FloatingLiteral>(L->IgnoreParens());
+ FPCast = dyn_cast<CastExpr>(R->IgnoreParens());
+ return FPLiteral && FPCast;
+ };
+
+ if (getCastAndLiteral(LHS, RHS) || getCastAndLiteral(RHS, LHS)) {
+ auto *SourceTy = FPCast->getSubExpr()->getType()->getAs<BuiltinType>();
+ auto *TargetTy = FPLiteral->getType()->getAs<BuiltinType>();
+ if (SourceTy && TargetTy && SourceTy->isFloatingPoint() &&
+ TargetTy->isFloatingPoint()) {
+ bool Lossy;
+ llvm::APFloat TargetC = FPLiteral->getValue();
+ TargetC.convert(Context.getFloatTypeSemantics(QualType(SourceTy, 0)),
+ llvm::APFloat::rmNearestTiesToEven, &Lossy);
+ if (Lossy) {
+ // If the literal cannot be represented in the source type, then a
+ // check for == is always false and check for != is always true.
+ Diag(Loc, diag::warn_float_compare_literal)
+ << (Opcode == BO_EQ) << QualType(SourceTy, 0)
+ << LHS->getSourceRange() << RHS->getSourceRange();
+ return;
+ }
+ }
+ }
-/// Check for comparisons of floating point operands using != and ==.
-/// Issue a warning if these are no self-comparisons, as they are not likely
-/// to do what the programmer intended.
-void Sema::CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr *RHS) {
+ // Match a more general floating-point equality comparison (-Wfloat-equal).
Expr* LeftExprSansParen = LHS->IgnoreParenImpCasts();
Expr* RightExprSansParen = RHS->IgnoreParenImpCasts();
// Special case: check for x == x (which is OK).
// Do not emit warnings for such cases.
- if (DeclRefExpr* DRL = dyn_cast<DeclRefExpr>(LeftExprSansParen))
- if (DeclRefExpr* DRR = dyn_cast<DeclRefExpr>(RightExprSansParen))
+ if (auto *DRL = dyn_cast<DeclRefExpr>(LeftExprSansParen))
+ if (auto *DRR = dyn_cast<DeclRefExpr>(RightExprSansParen))
if (DRL->getDecl() == DRR->getDecl())
return;
@@ -10985,7 +14088,7 @@ struct IntRange {
false/*NonNegative*/);
}
- if (const auto *EIT = dyn_cast<ExtIntType>(T))
+ if (const auto *EIT = dyn_cast<BitIntType>(T))
return IntRange(EIT->getNumBits(), EIT->isUnsigned());
const BuiltinType *BT = cast<BuiltinType>(T);
@@ -11011,7 +14114,7 @@ struct IntRange {
if (const EnumType *ET = dyn_cast<EnumType>(T))
T = C.getCanonicalType(ET->getDecl()->getIntegerType()).getTypePtr();
- if (const auto *EIT = dyn_cast<ExtIntType>(T))
+ if (const auto *EIT = dyn_cast<BitIntType>(T))
return IntRange(EIT->getNumBits(), EIT->isUnsigned());
const BuiltinType *BT = cast<BuiltinType>(T);
@@ -11087,7 +14190,7 @@ struct IntRange {
static IntRange GetValueRange(ASTContext &C, llvm::APSInt &value,
unsigned MaxWidth) {
if (value.isSigned() && value.isNegative())
- return IntRange(value.getMinSignedBits(), false);
+ return IntRange(value.getSignificantBits(), false);
if (value.getBitWidth() > MaxWidth)
value = value.trunc(MaxWidth);
@@ -11138,7 +14241,7 @@ static QualType GetExprType(const Expr *E) {
///
/// \param MaxWidth The width to which the value will be truncated.
/// \param Approximate If \c true, return a likely range for the result: in
-/// particular, assume that aritmetic on narrower types doesn't leave
+/// particular, assume that arithmetic on narrower types doesn't leave
/// those types. If \c false, return a range including all possible
/// result values.
static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth,
@@ -11262,7 +14365,7 @@ static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth,
return IntRange(R.Width, /*NonNegative*/ true);
}
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case BO_ShlAssign:
return IntRange::forValueOfType(C, GetExprType(E));
@@ -11275,14 +14378,13 @@ static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth,
// If the shift amount is a positive constant, drop the width by
// that much.
- if (Optional<llvm::APSInt> shift =
+ if (std::optional<llvm::APSInt> shift =
BO->getRHS()->getIntegerConstantExpr(C)) {
if (shift->isNonNegative()) {
- unsigned zext = shift->getZExtValue();
- if (zext >= L.Width)
+ if (shift->uge(L.Width))
L.Width = (L.NonNegative ? 0 : 1);
else
- L.Width -= zext;
+ L.Width -= shift->getZExtValue();
}
}
@@ -11320,7 +14422,7 @@ static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth,
Approximate);
// If the divisor is constant, use that.
- if (Optional<llvm::APSInt> divisor =
+ if (std::optional<llvm::APSInt> divisor =
BO->getRHS()->getIntegerConstantExpr(C)) {
unsigned log2 = divisor->logBase2(); // floor(log_2(divisor))
if (log2 >= L.Width)
@@ -11550,7 +14652,7 @@ struct PromotedRange {
llvm_unreachable("impossible compare result");
}
- static llvm::Optional<StringRef>
+ static std::optional<StringRef>
constantValue(BinaryOperatorKind Op, ComparisonResult R, bool ConstantOnRHS) {
if (Op == BO_Cmp) {
ComparisonResult LTFlag = LT, GTFlag = GT;
@@ -11559,7 +14661,7 @@ struct PromotedRange {
if (R & EQ) return StringRef("'std::strong_ordering::equal'");
if (R & LTFlag) return StringRef("'std::strong_ordering::less'");
if (R & GTFlag) return StringRef("'std::strong_ordering::greater'");
- return llvm::None;
+ return std::nullopt;
}
ComparisonResult TrueFlag, FalseFlag;
@@ -11584,7 +14686,7 @@ struct PromotedRange {
return StringRef("true");
if (R & FalseFlag)
return StringRef("false");
- return llvm::None;
+ return std::nullopt;
}
};
}
@@ -11639,7 +14741,7 @@ static bool CheckTautologicalComparison(Sema &S, BinaryOperator *E,
return false;
IntRange OtherValueRange = GetExprRange(
- S.Context, Other, S.isConstantEvaluated(), /*Approximate*/ false);
+ S.Context, Other, S.isConstantEvaluatedContext(), /*Approximate=*/false);
QualType OtherT = Other->getType();
if (const auto *AT = OtherT->getAs<AtomicType>())
@@ -11794,8 +14896,10 @@ static void AnalyzeComparison(Sema &S, BinaryOperator *E) {
Expr *RHS = E->getRHS();
if (T->isIntegralType(S.Context)) {
- Optional<llvm::APSInt> RHSValue = RHS->getIntegerConstantExpr(S.Context);
- Optional<llvm::APSInt> LHSValue = LHS->getIntegerConstantExpr(S.Context);
+ std::optional<llvm::APSInt> RHSValue =
+ RHS->getIntegerConstantExpr(S.Context);
+ std::optional<llvm::APSInt> LHSValue =
+ LHS->getIntegerConstantExpr(S.Context);
// We don't care about expressions whose result is a constant.
if (RHSValue && LHSValue)
@@ -11852,8 +14956,9 @@ static void AnalyzeComparison(Sema &S, BinaryOperator *E) {
}
// Otherwise, calculate the effective range of the signed operand.
- IntRange signedRange = GetExprRange(
- S.Context, signedOperand, S.isConstantEvaluated(), /*Approximate*/ true);
+ IntRange signedRange =
+ GetExprRange(S.Context, signedOperand, S.isConstantEvaluatedContext(),
+ /*Approximate=*/true);
// Go ahead and analyze implicit conversions in the operands. Note
// that we skip the implicit conversions on both sides.
@@ -11871,8 +14976,8 @@ static void AnalyzeComparison(Sema &S, BinaryOperator *E) {
if (E->isEqualityOp()) {
unsigned comparisonWidth = S.Context.getIntWidth(T);
IntRange unsignedRange =
- GetExprRange(S.Context, unsignedOperand, S.isConstantEvaluated(),
- /*Approximate*/ true);
+ GetExprRange(S.Context, unsignedOperand, S.isConstantEvaluatedContext(),
+ /*Approximate=*/true);
// We should never be unable to prove that the unsigned operand is
// non-negative.
@@ -11916,9 +15021,6 @@ static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init,
}
}
- if (Bitfield->getType()->isBooleanType())
- return false;
-
// Ignore value- or type-dependent expressions.
if (Bitfield->getBitWidth()->isValueDependent() ||
Bitfield->getBitWidth()->isTypeDependent() ||
@@ -11990,10 +15092,22 @@ static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init,
unsigned OriginalWidth = Value.getBitWidth();
+ // In C, the macro 'true' from stdbool.h will evaluate to '1'; To reduce
+ // false positives where the user is demonstrating they intend to use the
+ // bit-field as a Boolean, check to see if the value is 1 and we're assigning
+ // to a one-bit bit-field to see if the value came from a macro named 'true'.
+ bool OneAssignedToOneBitBitfield = FieldWidth == 1 && Value == 1;
+ if (OneAssignedToOneBitBitfield && !S.LangOpts.CPlusPlus) {
+ SourceLocation MaybeMacroLoc = OriginalInit->getBeginLoc();
+ if (S.SourceMgr.isInSystemMacro(MaybeMacroLoc) &&
+ S.findMacroSpelling(MaybeMacroLoc, "true"))
+ return false;
+ }
+
if (!Value.isSigned() || Value.isNegative())
if (UnaryOperator *UO = dyn_cast<UnaryOperator>(OriginalInit))
if (UO->getOpcode() == UO_Minus || UO->getOpcode() == UO_Not)
- OriginalWidth = Value.getMinSignedBits();
+ OriginalWidth = Value.getSignificantBits();
if (OriginalWidth <= FieldWidth)
return false;
@@ -12007,17 +15121,14 @@ static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init,
if (llvm::APSInt::isSameValue(Value, TruncatedValue))
return false;
- // Special-case bitfields of width 1: booleans are naturally 0/1, and
- // therefore don't strictly fit into a signed bitfield of width 1.
- if (FieldWidth == 1 && Value == 1)
- return false;
-
std::string PrettyValue = toString(Value, 10);
std::string PrettyTrunc = toString(TruncatedValue, 10);
- S.Diag(InitLoc, diag::warn_impcast_bitfield_precision_constant)
- << PrettyValue << PrettyTrunc << OriginalInit->getType()
- << Init->getSourceRange();
+ S.Diag(InitLoc, OneAssignedToOneBitBitfield
+ ? diag::warn_impcast_single_bit_bitield_precision_constant
+ : diag::warn_impcast_bitfield_precision_constant)
+ << PrettyValue << PrettyTrunc << OriginalInit->getType()
+ << Init->getSourceRange();
return true;
}
@@ -12303,9 +15414,10 @@ static void DiagnoseNullConversion(Sema &S, Expr *E, QualType T,
return;
// Check for NULL (GNUNull) or nullptr (CXX11_nullptr).
- const Expr::NullPointerConstantKind NullKind =
- E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull);
- if (NullKind != Expr::NPCK_GNUNull && NullKind != Expr::NPCK_CXX11_nullptr)
+ const Expr *NewE = E->IgnoreParenImpCasts();
+ bool IsGNUNullExpr = isa<GNUNullExpr>(NewE);
+ bool HasNullPtrType = NewE->getType()->isNullPtrType();
+ if (!IsGNUNullExpr && !HasNullPtrType)
return;
// Return if target type is a safe conversion.
@@ -12322,7 +15434,7 @@ static void DiagnoseNullConversion(Sema &S, Expr *E, QualType T,
CC = S.SourceMgr.getTopMacroCallerLoc(CC);
// __null is usually wrapped in a macro. Go up a macro if that is the case.
- if (NullKind == Expr::NPCK_GNUNull && Loc.isMacroID()) {
+ if (IsGNUNullExpr && Loc.isMacroID()) {
StringRef MacroName = Lexer::getImmediateMacroNameForDiagnostics(
Loc, S.SourceMgr, S.getLangOpts());
if (MacroName == "NULL")
@@ -12334,7 +15446,7 @@ static void DiagnoseNullConversion(Sema &S, Expr *E, QualType T,
return;
S.Diag(Loc, diag::warn_impcast_null_pointer_to_integer)
- << (NullKind == Expr::NPCK_CXX11_nullptr) << T << SourceRange(CC)
+ << HasNullPtrType << T << SourceRange(CC)
<< FixItHint::CreateReplacement(Loc,
S.getFixItZeroLiteralForType(T, Loc));
}
@@ -12580,13 +15692,20 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
// Strip vector types.
if (isa<VectorType>(Source)) {
- if (Target->isVLSTBuiltinType() &&
+ if (Target->isSveVLSBuiltinType() &&
(S.Context.areCompatibleSveTypes(QualType(Target, 0),
QualType(Source, 0)) ||
S.Context.areLaxCompatibleSveTypes(QualType(Target, 0),
QualType(Source, 0))))
return;
+ if (Target->isRVVVLSBuiltinType() &&
+ (S.Context.areCompatibleRVVTypes(QualType(Target, 0),
+ QualType(Source, 0)) ||
+ S.Context.areLaxCompatibleRVVTypes(QualType(Target, 0),
+ QualType(Source, 0))))
+ return;
+
if (!isa<VectorType>(Target)) {
if (S.SourceMgr.isInSystemMacro(CC))
return;
@@ -12623,6 +15742,29 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
const BuiltinType *SourceBT = dyn_cast<BuiltinType>(Source);
const BuiltinType *TargetBT = dyn_cast<BuiltinType>(Target);
+ // Strip SVE vector types
+ if (SourceBT && SourceBT->isSveVLSBuiltinType()) {
+ // Need the original target type for vector type checks
+ const Type *OriginalTarget = S.Context.getCanonicalType(T).getTypePtr();
+ // Handle conversion from scalable to fixed when msve-vector-bits is
+ // specified
+ if (S.Context.areCompatibleSveTypes(QualType(OriginalTarget, 0),
+ QualType(Source, 0)) ||
+ S.Context.areLaxCompatibleSveTypes(QualType(OriginalTarget, 0),
+ QualType(Source, 0)))
+ return;
+
+ // If the vector cast is cast between two vectors of the same size, it is
+ // a bitcast, not a conversion.
+ if (S.Context.getTypeSize(Source) == S.Context.getTypeSize(Target))
+ return;
+
+ Source = SourceBT->getSveEltType(S.Context).getTypePtr();
+ }
+
+ if (TargetBT && TargetBT->isSveVLSBuiltinType())
+ Target = TargetBT->getSveEltType(S.Context).getTypePtr();
+
// If the source is floating point...
if (SourceBT && SourceBT->isFloatingPoint()) {
// ...and the target is floating point...
@@ -12698,7 +15840,7 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
if (Target->isUnsaturatedFixedPointType()) {
Expr::EvalResult Result;
if (E->EvaluateAsFixedPoint(Result, S.Context, Expr::SE_AllowSideEffects,
- S.isConstantEvaluated())) {
+ S.isConstantEvaluatedContext())) {
llvm::APFixedPoint Value = Result.Val.getFixedPoint();
llvm::APFixedPoint MaxVal = S.Context.getFixedPointMax(T);
llvm::APFixedPoint MinVal = S.Context.getFixedPointMin(T);
@@ -12713,7 +15855,7 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
}
} else if (Target->isIntegerType()) {
Expr::EvalResult Result;
- if (!S.isConstantEvaluated() &&
+ if (!S.isConstantEvaluatedContext() &&
E->EvaluateAsFixedPoint(Result, S.Context,
Expr::SE_AllowSideEffects)) {
llvm::APFixedPoint FXResult = Result.Val.getFixedPoint();
@@ -12736,7 +15878,7 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
} else if (Target->isUnsaturatedFixedPointType()) {
if (Source->isIntegerType()) {
Expr::EvalResult Result;
- if (!S.isConstantEvaluated() &&
+ if (!S.isConstantEvaluatedContext() &&
E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) {
llvm::APSInt Value = Result.Val.getInt();
@@ -12762,8 +15904,9 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
if (SourceBT && TargetBT && SourceBT->isIntegerType() &&
TargetBT->isFloatingType() && !IsListInit) {
// Determine the number of precision bits in the source integer type.
- IntRange SourceRange = GetExprRange(S.Context, E, S.isConstantEvaluated(),
- /*Approximate*/ true);
+ IntRange SourceRange =
+ GetExprRange(S.Context, E, S.isConstantEvaluatedContext(),
+ /*Approximate=*/true);
unsigned int SourcePrecision = SourceRange.Width;
// Determine the number of precision bits in the
@@ -12774,7 +15917,7 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
if (SourcePrecision > 0 && TargetPrecision > 0 &&
SourcePrecision > TargetPrecision) {
- if (Optional<llvm::APSInt> SourceInt =
+ if (std::optional<llvm::APSInt> SourceInt =
E->getIntegerConstantExpr(S.Context)) {
// If the source integer is a constant, convert it to the target
// floating point type. Issue a warning if the value changes
@@ -12831,8 +15974,8 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
IntRange SourceTypeRange =
IntRange::forTargetOfCanonicalType(S.Context, Source);
- IntRange LikelySourceRange =
- GetExprRange(S.Context, E, S.isConstantEvaluated(), /*Approximate*/ true);
+ IntRange LikelySourceRange = GetExprRange(
+ S.Context, E, S.isConstantEvaluatedContext(), /*Approximate=*/true);
IntRange TargetRange = IntRange::forTargetOfCanonicalType(S.Context, Target);
if (LikelySourceRange.Width > TargetRange.Width) {
@@ -12840,7 +15983,7 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
// TODO: this should happen for bitfield stores, too.
Expr::EvalResult Result;
if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects,
- S.isConstantEvaluated())) {
+ S.isConstantEvaluatedContext())) {
llvm::APSInt Value(32);
Value = Result.Val.getInt();
@@ -12908,12 +16051,19 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
// Fall through for non-constants to give a sign conversion warning.
}
- if ((TargetRange.NonNegative && !LikelySourceRange.NonNegative) ||
- (!TargetRange.NonNegative && LikelySourceRange.NonNegative &&
- LikelySourceRange.Width == TargetRange.Width)) {
+ if ((!isa<EnumType>(Target) || !isa<EnumType>(Source)) &&
+ ((TargetRange.NonNegative && !LikelySourceRange.NonNegative) ||
+ (!TargetRange.NonNegative && LikelySourceRange.NonNegative &&
+ LikelySourceRange.Width == TargetRange.Width))) {
if (S.SourceMgr.isInSystemMacro(CC))
return;
+ if (SourceBT && SourceBT->isInteger() && TargetBT &&
+ TargetBT->isInteger() &&
+ Source->isSignedIntegerType() == Target->isSignedIntegerType()) {
+ return;
+ }
+
unsigned DiagID = diag::warn_impcast_integer_sign;
// Traditionally, gcc has warned about this under -Wsign-compare.
@@ -12961,6 +16111,9 @@ static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E,
static void CheckConditionalOperand(Sema &S, Expr *E, QualType T,
SourceLocation CC, bool &ICContext) {
E = E->IgnoreParenImpCasts();
+ // Diagnose incomplete type for second or third operand in C.
+ if (!S.getLangOpts().CPlusPlus && E->getType()->isRecordType())
+ S.RequireCompleteExprType(E, diag::err_incomplete_type);
if (auto *CO = dyn_cast<AbstractConditionalOperator>(E))
return CheckConditionalOperator(S, CO, CC, T);
@@ -13059,6 +16212,20 @@ static void AnalyzeImplicitConversions(
<< OrigE->getSourceRange() << T->isBooleanType()
<< FixItHint::CreateReplacement(UO->getBeginLoc(), "!");
+ if (const auto *BO = dyn_cast<BinaryOperator>(SourceExpr))
+ if ((BO->getOpcode() == BO_And || BO->getOpcode() == BO_Or) &&
+ BO->getLHS()->isKnownToHaveBooleanValue() &&
+ BO->getRHS()->isKnownToHaveBooleanValue() &&
+ BO->getLHS()->HasSideEffects(S.Context) &&
+ BO->getRHS()->HasSideEffects(S.Context)) {
+ S.Diag(BO->getBeginLoc(), diag::warn_bitwise_instead_of_logical)
+ << (BO->getOpcode() == BO_And ? "&" : "|") << OrigE->getSourceRange()
+ << FixItHint::CreateReplacement(
+ BO->getOperatorLoc(),
+ (BO->getOpcode() == BO_And ? "&&" : "||"));
+ S.Diag(BO->getBeginLoc(), diag::note_cast_operand_to_int);
+ }
+
// For conditional operators, we analyze the arguments as if they
// were being fed directly into the output.
if (auto *CO = dyn_cast<AbstractConditionalOperator>(SourceExpr)) {
@@ -13127,6 +16294,13 @@ static void AnalyzeImplicitConversions(
if (!ChildExpr)
continue;
+ if (auto *CSE = dyn_cast<CoroutineSuspendExpr>(E))
+ if (ChildExpr == CSE->getOperand())
+ // Do not recurse over a CoroutineSuspendExpr's operand.
+ // The operand is also a subexpression of getCommonExpr(), and
+ // recursing into it directly would produce duplicate diagnostics.
+ continue;
+
if (IsLogicalAndOperator &&
isa<StringLiteral>(ChildExpr->IgnoreParenImpCasts()))
// Ignore checking string literals that are in logical and operators.
@@ -13262,7 +16436,7 @@ void Sema::DiagnoseAlwaysNonNullPointer(Expr *E,
bool IsAddressOf = false;
- if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
+ if (auto *UO = dyn_cast<UnaryOperator>(E->IgnoreParens())) {
if (UO->getOpcode() != UO_AddrOf)
return;
IsAddressOf = true;
@@ -13454,27 +16628,40 @@ void Sema::CheckBoolLikeConversion(Expr *E, SourceLocation CC) {
/// Diagnose when expression is an integer constant expression and its evaluation
/// results in integer overflow
-void Sema::CheckForIntOverflow (Expr *E) {
+void Sema::CheckForIntOverflow (const Expr *E) {
// Use a work list to deal with nested struct initializers.
- SmallVector<Expr *, 2> Exprs(1, E);
+ SmallVector<const Expr *, 2> Exprs(1, E);
do {
- Expr *OriginalE = Exprs.pop_back_val();
- Expr *E = OriginalE->IgnoreParenCasts();
+ const Expr *OriginalE = Exprs.pop_back_val();
+ const Expr *E = OriginalE->IgnoreParenCasts();
- if (isa<BinaryOperator>(E)) {
+ if (isa<BinaryOperator, UnaryOperator>(E)) {
E->EvaluateForOverflow(Context);
continue;
}
- if (auto InitList = dyn_cast<InitListExpr>(OriginalE))
+ if (const auto *InitList = dyn_cast<InitListExpr>(OriginalE))
Exprs.append(InitList->inits().begin(), InitList->inits().end());
else if (isa<ObjCBoxedExpr>(OriginalE))
E->EvaluateForOverflow(Context);
- else if (auto Call = dyn_cast<CallExpr>(E))
+ else if (const auto *Call = dyn_cast<CallExpr>(E))
Exprs.append(Call->arg_begin(), Call->arg_end());
- else if (auto Message = dyn_cast<ObjCMessageExpr>(E))
+ else if (const auto *Message = dyn_cast<ObjCMessageExpr>(E))
Exprs.append(Message->arg_begin(), Message->arg_end());
+ else if (const auto *Construct = dyn_cast<CXXConstructExpr>(E))
+ Exprs.append(Construct->arg_begin(), Construct->arg_end());
+ else if (const auto *Temporary = dyn_cast<CXXBindTemporaryExpr>(E))
+ Exprs.push_back(Temporary->getSubExpr());
+ else if (const auto *Array = dyn_cast<ArraySubscriptExpr>(E))
+ Exprs.push_back(Array->getIdx());
+ else if (const auto *Compound = dyn_cast<CompoundLiteralExpr>(E))
+ Exprs.push_back(Compound->getInitializer());
+ else if (const auto *New = dyn_cast<CXXNewExpr>(E);
+ New && New->isArray()) {
+ if (auto ArraySize = New->getArraySize())
+ Exprs.push_back(*ArraySize);
+ }
} while (!Exprs.empty());
}
@@ -13575,19 +16762,19 @@ class SequenceChecker : public ConstEvaluatedExprVisitor<SequenceChecker> {
/// Bundle together a sequencing region and the expression corresponding
/// to a specific usage. One Usage is stored for each usage kind in UsageInfo.
struct Usage {
- const Expr *UsageExpr;
+ const Expr *UsageExpr = nullptr;
SequenceTree::Seq Seq;
- Usage() : UsageExpr(nullptr), Seq() {}
+ Usage() = default;
};
struct UsageInfo {
Usage Uses[UK_Count];
/// Have we issued a diagnostic for this object already?
- bool Diagnosed;
+ bool Diagnosed = false;
- UsageInfo() : Uses(), Diagnosed(false) {}
+ UsageInfo();
};
using UsageInfoMap = llvm::SmallDenseMap<Object, UsageInfo, 16>;
@@ -13660,7 +16847,8 @@ class SequenceChecker : public ConstEvaluatedExprVisitor<SequenceChecker> {
if (!EvalOK || E->isValueDependent())
return false;
EvalOK = E->EvaluateAsBooleanCondition(
- Result, Self.SemaRef.Context, Self.SemaRef.isConstantEvaluated());
+ Result, Self.SemaRef.Context,
+ Self.SemaRef.isConstantEvaluatedContext());
return EvalOK;
}
@@ -13809,6 +16997,23 @@ public:
Base::VisitStmt(E);
}
+ void VisitCoroutineSuspendExpr(const CoroutineSuspendExpr *CSE) {
+ for (auto *Sub : CSE->children()) {
+ const Expr *ChildExpr = dyn_cast_or_null<Expr>(Sub);
+ if (!ChildExpr)
+ continue;
+
+ if (ChildExpr == CSE->getOperand())
+ // Do not recurse over a CoroutineSuspendExpr's operand.
+ // The operand is also a subexpression of getCommonExpr(), and
+ // recursing into it directly could confuse object management
+ // for the sake of sequence tracking.
+ continue;
+
+ Visit(Sub);
+ }
+ }
+
void VisitCastExpr(const CastExpr *E) {
Object O = Object();
if (E->getCastKind() == CK_LValueToRValue)
@@ -14328,6 +17533,8 @@ public:
}
};
+SequenceChecker::UsageInfo::UsageInfo() = default;
+
} // namespace
void Sema::CheckUnsequencedOperations(const Expr *E) {
@@ -14341,8 +17548,8 @@ void Sema::CheckUnsequencedOperations(const Expr *E) {
void Sema::CheckCompletedExpr(Expr *E, SourceLocation CheckLoc,
bool IsConstexpr) {
- llvm::SaveAndRestore<bool> ConstantContext(
- isConstantEvaluatedOverride, IsConstexpr || isa<ConstantExpr>(E));
+ llvm::SaveAndRestore ConstantContext(isConstantEvaluatedOverride,
+ IsConstexpr || isa<ConstantExpr>(E));
CheckImplicitConversions(E, CheckLoc);
if (!E->isInstantiationDependent())
CheckUnsequencedOperations(E);
@@ -14378,7 +17585,7 @@ static void diagnoseArrayStarInParamType(Sema &S, QualType PType,
if (!AT)
return;
- if (AT->getSizeModifier() != ArrayType::Star) {
+ if (AT->getSizeModifier() != ArraySizeModifier::Star) {
diagnoseArrayStarInParamType(S, AT->getElementType(), Loc);
return;
}
@@ -14395,14 +17602,21 @@ bool Sema::CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
bool CheckParameterNames) {
bool HasInvalidParm = false;
for (ParmVarDecl *Param : Parameters) {
+ assert(Param && "null in a parameter list");
// C99 6.7.5.3p4: the parameters in a parameter type list in a
// function declarator that is part of a function definition of
// that function shall not have incomplete type.
//
- // This is also C++ [dcl.fct]p6.
+ // C++23 [dcl.fct.def.general]/p2
+ // The type of a parameter [...] for a function definition
+ // shall not be a (possibly cv-qualified) class type that is incomplete
+ // or abstract within the function body unless the function is deleted.
if (!Param->isInvalidDecl() &&
- RequireCompleteType(Param->getLocation(), Param->getType(),
- diag::err_typecheck_decl_incomplete_type)) {
+ (RequireCompleteType(Param->getLocation(), Param->getType(),
+ diag::err_typecheck_decl_incomplete_type) ||
+ RequireNonAbstractType(Param->getBeginLoc(), Param->getOriginalType(),
+ diag::err_abstract_type_in_decl,
+ AbstractParamType))) {
Param->setInvalidDecl();
HasInvalidParm = true;
}
@@ -14412,8 +17626,8 @@ bool Sema::CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
if (CheckParameterNames && Param->getIdentifier() == nullptr &&
!Param->isImplicit() && !getLangOpts().CPlusPlus) {
// Diagnose this as an extension in C17 and earlier.
- if (!getLangOpts().C2x)
- Diag(Param->getLocation(), diag::ext_parameter_name_omitted_c2x);
+ if (!getLangOpts().C23)
+ Diag(Param->getLocation(), diag::ext_parameter_name_omitted_c23);
}
// C99 6.7.5.3p12:
@@ -14462,13 +17676,23 @@ bool Sema::CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
RD, /*DeclIsField*/ false);
}
}
+
+ if (!Param->isInvalidDecl() &&
+ Param->getOriginalType()->isWebAssemblyTableType()) {
+ Param->setInvalidDecl();
+ HasInvalidParm = true;
+ Diag(Param->getLocation(), diag::err_wasm_table_as_function_parameter);
+ }
}
return HasInvalidParm;
}
-Optional<std::pair<CharUnits, CharUnits>>
-static getBaseAlignmentAndOffsetFromPtr(const Expr *E, ASTContext &Ctx);
+std::optional<std::pair<
+ CharUnits, CharUnits>> static getBaseAlignmentAndOffsetFromPtr(const Expr
+ *E,
+ ASTContext
+ &Ctx);
/// Compute the alignment and offset of the base class object given the
/// derived-to-base cast expression and the alignment and offset of the derived
@@ -14502,21 +17726,21 @@ getDerivedToBaseAlignmentAndOffset(const CastExpr *CE, QualType DerivedType,
}
/// Compute the alignment and offset of a binary additive operator.
-static Optional<std::pair<CharUnits, CharUnits>>
+static std::optional<std::pair<CharUnits, CharUnits>>
getAlignmentAndOffsetFromBinAddOrSub(const Expr *PtrE, const Expr *IntE,
bool IsSub, ASTContext &Ctx) {
QualType PointeeType = PtrE->getType()->getPointeeType();
if (!PointeeType->isConstantSizeType())
- return llvm::None;
+ return std::nullopt;
auto P = getBaseAlignmentAndOffsetFromPtr(PtrE, Ctx);
if (!P)
- return llvm::None;
+ return std::nullopt;
CharUnits EltSize = Ctx.getTypeSizeInChars(PointeeType);
- if (Optional<llvm::APSInt> IdxRes = IntE->getIntegerConstantExpr(Ctx)) {
+ if (std::optional<llvm::APSInt> IdxRes = IntE->getIntegerConstantExpr(Ctx)) {
CharUnits Offset = EltSize * IdxRes->getExtValue();
if (IsSub)
Offset = -Offset;
@@ -14533,8 +17757,10 @@ getAlignmentAndOffsetFromBinAddOrSub(const Expr *PtrE, const Expr *IntE,
/// This helper function takes an lvalue expression and returns the alignment of
/// a VarDecl and a constant offset from the VarDecl.
-Optional<std::pair<CharUnits, CharUnits>>
-static getBaseAlignmentAndOffsetFromLValue(const Expr *E, ASTContext &Ctx) {
+std::optional<std::pair<
+ CharUnits,
+ CharUnits>> static getBaseAlignmentAndOffsetFromLValue(const Expr *E,
+ ASTContext &Ctx) {
E = E->IgnoreParens();
switch (E->getStmtClass()) {
default:
@@ -14569,8 +17795,12 @@ static getBaseAlignmentAndOffsetFromLValue(const Expr *E, ASTContext &Ctx) {
if (auto *VD = dyn_cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl())) {
// FIXME: If VD is captured by copy or is an escaping __block variable,
// use the alignment of VD's type.
- if (!VD->getType()->isReferenceType())
+ if (!VD->getType()->isReferenceType()) {
+ // Dependent alignment cannot be resolved -> bail out.
+ if (VD->hasDependentAlignment())
+ break;
return std::make_pair(Ctx.getDeclAlign(VD), CharUnits::Zero());
+ }
if (VD->hasInit())
return getBaseAlignmentAndOffsetFromLValue(VD->getInit(), Ctx);
}
@@ -14582,7 +17812,7 @@ static getBaseAlignmentAndOffsetFromLValue(const Expr *E, ASTContext &Ctx) {
if (!FD || FD->getType()->isReferenceType() ||
FD->getParent()->isInvalidDecl())
break;
- Optional<std::pair<CharUnits, CharUnits>> P;
+ std::optional<std::pair<CharUnits, CharUnits>> P;
if (ME->isArrow())
P = getBaseAlignmentAndOffsetFromPtr(ME->getBase(), Ctx);
else
@@ -14616,13 +17846,16 @@ static getBaseAlignmentAndOffsetFromLValue(const Expr *E, ASTContext &Ctx) {
break;
}
}
- return llvm::None;
+ return std::nullopt;
}
/// This helper function takes a pointer expression and returns the alignment of
/// a VarDecl and a constant offset from the VarDecl.
-Optional<std::pair<CharUnits, CharUnits>>
-static getBaseAlignmentAndOffsetFromPtr(const Expr *E, ASTContext &Ctx) {
+std::optional<std::pair<
+ CharUnits, CharUnits>> static getBaseAlignmentAndOffsetFromPtr(const Expr
+ *E,
+ ASTContext
+ &Ctx) {
E = E->IgnoreParens();
switch (E->getStmtClass()) {
default:
@@ -14681,12 +17914,12 @@ static getBaseAlignmentAndOffsetFromPtr(const Expr *E, ASTContext &Ctx) {
break;
}
}
- return llvm::None;
+ return std::nullopt;
}
static CharUnits getPresumedAlignmentOfPointer(const Expr *E, Sema &S) {
// See if we can compute the alignment of a VarDecl and an offset from it.
- Optional<std::pair<CharUnits, CharUnits>> P =
+ std::optional<std::pair<CharUnits, CharUnits>> P =
getBaseAlignmentAndOffsetFromPtr(E, S.Context);
if (P)
@@ -14740,58 +17973,11 @@ void Sema::CheckCastAlign(Expr *Op, QualType T, SourceRange TRange) {
<< TRange << Op->getSourceRange();
}
-/// Check whether this array fits the idiom of a size-one tail padded
-/// array member of a struct.
-///
-/// We avoid emitting out-of-bounds access warnings for such arrays as they are
-/// commonly used to emulate flexible arrays in C89 code.
-static bool IsTailPaddedMemberArray(Sema &S, const llvm::APInt &Size,
- const NamedDecl *ND) {
- if (Size != 1 || !ND) return false;
-
- const FieldDecl *FD = dyn_cast<FieldDecl>(ND);
- if (!FD) return false;
-
- // Don't consider sizes resulting from macro expansions or template argument
- // substitution to form C89 tail-padded arrays.
-
- TypeSourceInfo *TInfo = FD->getTypeSourceInfo();
- while (TInfo) {
- TypeLoc TL = TInfo->getTypeLoc();
- // Look through typedefs.
- if (TypedefTypeLoc TTL = TL.getAs<TypedefTypeLoc>()) {
- const TypedefNameDecl *TDL = TTL.getTypedefNameDecl();
- TInfo = TDL->getTypeSourceInfo();
- continue;
- }
- if (ConstantArrayTypeLoc CTL = TL.getAs<ConstantArrayTypeLoc>()) {
- const Expr *SizeExpr = dyn_cast<IntegerLiteral>(CTL.getSizeExpr());
- if (!SizeExpr || SizeExpr->getExprLoc().isMacroID())
- return false;
- }
- break;
- }
-
- const RecordDecl *RD = dyn_cast<RecordDecl>(FD->getDeclContext());
- if (!RD) return false;
- if (RD->isUnion()) return false;
- if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) {
- if (!CRD->isStandardLayout()) return false;
- }
-
- // See if this is the last field decl in the record.
- const Decl *D = FD;
- while ((D = D->getNextDeclInContext()))
- if (isa<FieldDecl>(D))
- return false;
- return true;
-}
-
void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE,
bool AllowOnePastEnd, bool IndexNegated) {
// Already diagnosed by the constant evaluator.
- if (isConstantEvaluated())
+ if (isConstantEvaluatedContext())
return;
IndexExpr = IndexExpr->IgnoreParenImpCasts();
@@ -14804,9 +17990,15 @@ void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ConstantArrayType *ArrayTy =
Context.getAsConstantArrayType(BaseExpr->getType());
+ LangOptions::StrictFlexArraysLevelKind
+ StrictFlexArraysLevel = getLangOpts().getStrictFlexArraysLevel();
+
const Type *BaseType =
ArrayTy == nullptr ? nullptr : ArrayTy->getElementType().getTypePtr();
- bool IsUnboundedArray = (BaseType == nullptr);
+ bool IsUnboundedArray =
+ BaseType == nullptr || BaseExpr->isFlexibleArrayMemberLike(
+ Context, StrictFlexArraysLevel,
+ /*IgnoreTemplateOrMacroSubstitution=*/true);
if (EffectiveType->isDependentType() ||
(!IsUnboundedArray && BaseType->isDependentType()))
return;
@@ -14821,25 +18013,20 @@ void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
index = -index;
}
- const NamedDecl *ND = nullptr;
- if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr))
- ND = DRE->getDecl();
- if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr))
- ND = ME->getMemberDecl();
-
if (IsUnboundedArray) {
+ if (EffectiveType->isFunctionType())
+ return;
if (index.isUnsigned() || !index.isNegative()) {
const auto &ASTC = getASTContext();
- unsigned AddrBits =
- ASTC.getTargetInfo().getPointerWidth(ASTC.getTargetAddressSpace(
- EffectiveType->getCanonicalTypeInternal()));
+ unsigned AddrBits = ASTC.getTargetInfo().getPointerWidth(
+ EffectiveType->getCanonicalTypeInternal().getAddressSpace());
if (index.getBitWidth() < AddrBits)
index = index.zext(AddrBits);
- Optional<CharUnits> ElemCharUnits =
+ std::optional<CharUnits> ElemCharUnits =
ASTC.getTypeSizeInCharsIfKnown(EffectiveType);
// PR50741 - If EffectiveType has unknown size (e.g., if it's a void
// pointer) bounds-checking isn't meaningful.
- if (!ElemCharUnits)
+ if (!ElemCharUnits || ElemCharUnits->isZero())
return;
llvm::APInt ElemBytes(index.getBitWidth(), ElemCharUnits->getQuantity());
// If index has more active bits than address space, we already know
@@ -14878,15 +18065,14 @@ void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
<< (unsigned)MaxElems.getLimitedValue(~0U)
<< IndexExpr->getSourceRange());
- if (!ND) {
- // Try harder to find a NamedDecl to point at in the note.
- while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(BaseExpr))
- BaseExpr = ASE->getBase()->IgnoreParenCasts();
- if (const auto *DRE = dyn_cast<DeclRefExpr>(BaseExpr))
- ND = DRE->getDecl();
- if (const auto *ME = dyn_cast<MemberExpr>(BaseExpr))
- ND = ME->getMemberDecl();
- }
+ const NamedDecl *ND = nullptr;
+ // Try harder to find a NamedDecl to point at in the note.
+ while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(BaseExpr))
+ BaseExpr = ASE->getBase()->IgnoreParenCasts();
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(BaseExpr))
+ ND = DRE->getDecl();
+ if (const auto *ME = dyn_cast<MemberExpr>(BaseExpr))
+ ND = ME->getMemberDecl();
if (ND)
DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr,
@@ -14906,20 +18092,24 @@ void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
return;
llvm::APInt size = ArrayTy->getSize();
- if (!size.isStrictlyPositive())
- return;
if (BaseType != EffectiveType) {
- // Make sure we're comparing apples to apples when comparing index to size
+ // Make sure we're comparing apples to apples when comparing index to
+ // size.
uint64_t ptrarith_typesize = Context.getTypeSize(EffectiveType);
uint64_t array_typesize = Context.getTypeSize(BaseType);
- // Handle ptrarith_typesize being zero, such as when casting to void*
- if (!ptrarith_typesize) ptrarith_typesize = 1;
+
+ // Handle ptrarith_typesize being zero, such as when casting to void*.
+ // Use the size in bits (what "getTypeSize()" returns) rather than bytes.
+ if (!ptrarith_typesize)
+ ptrarith_typesize = Context.getCharWidth();
+
if (ptrarith_typesize != array_typesize) {
- // There's a cast to a different size type involved
+ // There's a cast to a different size type involved.
uint64_t ratio = array_typesize / ptrarith_typesize;
+
// TODO: Be smarter about handling cases where array_typesize is not a
- // multiple of ptrarith_typesize
+ // multiple of ptrarith_typesize.
if (ptrarith_typesize * ratio == array_typesize)
size *= llvm::APInt(size.getBitWidth(), ratio);
}
@@ -14937,12 +18127,6 @@ void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
if (AllowOnePastEnd ? index.ule(size) : index.ult(size))
return;
- // Also don't warn for arrays of size 1 which are members of some
- // structure. These are often used to approximate flexible arrays in C89
- // code.
- if (IsTailPaddedMemberArray(*this, size, ND))
- return;
-
// Suppress the warning if the subscript expression (as identified by the
// ']' location) and the index expression are both from macro expansions
// within a system header.
@@ -14959,12 +18143,13 @@ void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
unsigned DiagID = ASE ? diag::warn_array_index_exceeds_bounds
: diag::warn_ptr_arith_exceeds_bounds;
+ unsigned CastMsg = (!ASE || BaseType == EffectiveType) ? 0 : 1;
+ QualType CastMsgTy = ASE ? ASE->getLHS()->getType() : QualType();
- DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr,
- PDiag(DiagID) << toString(index, 10, true)
- << toString(size, 10, true)
- << (unsigned)size.getLimitedValue(~0U)
- << IndexExpr->getSourceRange());
+ DiagRuntimeBehavior(
+ BaseExpr->getBeginLoc(), BaseExpr,
+ PDiag(DiagID) << toString(index, 10, true) << ArrayTy->desugar()
+ << CastMsg << CastMsgTy << IndexExpr->getSourceRange());
} else {
unsigned DiagID = diag::warn_array_index_precedes_bounds;
if (!ASE) {
@@ -14977,15 +18162,14 @@ void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
<< IndexExpr->getSourceRange());
}
- if (!ND) {
- // Try harder to find a NamedDecl to point at in the note.
- while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(BaseExpr))
- BaseExpr = ASE->getBase()->IgnoreParenCasts();
- if (const auto *DRE = dyn_cast<DeclRefExpr>(BaseExpr))
- ND = DRE->getDecl();
- if (const auto *ME = dyn_cast<MemberExpr>(BaseExpr))
- ND = ME->getMemberDecl();
- }
+ const NamedDecl *ND = nullptr;
+ // Try harder to find a NamedDecl to point at in the note.
+ while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(BaseExpr))
+ BaseExpr = ASE->getBase()->IgnoreParenCasts();
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(BaseExpr))
+ ND = DRE->getDecl();
+ if (const auto *ME = dyn_cast<MemberExpr>(BaseExpr))
+ ND = ME->getMemberDecl();
if (ND)
DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr,
@@ -15168,14 +18352,13 @@ static bool findRetainCycleOwner(Sema &S, Expr *e, RetainCycleOwner &owner) {
namespace {
struct FindCaptureVisitor : EvaluatedExprVisitor<FindCaptureVisitor> {
- ASTContext &Context;
VarDecl *Variable;
Expr *Capturer = nullptr;
bool VarWillBeReased = false;
FindCaptureVisitor(ASTContext &Context, VarDecl *variable)
: EvaluatedExprVisitor<FindCaptureVisitor>(Context),
- Context(Context), Variable(variable) {}
+ Variable(variable) {}
void VisitDeclRefExpr(DeclRefExpr *ref) {
if (ref->getDecl() == Variable && !Capturer)
@@ -15210,7 +18393,7 @@ namespace {
return;
if (Expr *RHS = BinOp->getRHS()) {
RHS = RHS->IgnoreParenCasts();
- Optional<llvm::APSInt> Value;
+ std::optional<llvm::APSInt> Value;
VarWillBeReased =
(RHS && (Value = RHS->getIntegerConstantExpr(Context)) &&
*Value == 0);
@@ -15275,37 +18458,36 @@ static bool isSetterLikeSelector(Selector sel) {
if (sel.isUnarySelector()) return false;
StringRef str = sel.getNameForSlot(0);
- while (!str.empty() && str.front() == '_') str = str.substr(1);
- if (str.startswith("set"))
+ str = str.ltrim('_');
+ if (str.starts_with("set"))
str = str.substr(3);
- else if (str.startswith("add")) {
+ else if (str.starts_with("add")) {
// Specially allow 'addOperationWithBlock:'.
- if (sel.getNumArgs() == 1 && str.startswith("addOperationWithBlock"))
+ if (sel.getNumArgs() == 1 && str.starts_with("addOperationWithBlock"))
return false;
str = str.substr(3);
- }
- else
+ } else
return false;
if (str.empty()) return true;
return !isLowercase(str.front());
}
-static Optional<int> GetNSMutableArrayArgumentIndex(Sema &S,
- ObjCMessageExpr *Message) {
+static std::optional<int>
+GetNSMutableArrayArgumentIndex(Sema &S, ObjCMessageExpr *Message) {
bool IsMutableArray = S.NSAPIObj->isSubclassOfNSClass(
Message->getReceiverInterface(),
NSAPI::ClassId_NSMutableArray);
if (!IsMutableArray) {
- return None;
+ return std::nullopt;
}
Selector Sel = Message->getSelector();
- Optional<NSAPI::NSArrayMethodKind> MKOpt =
- S.NSAPIObj->getNSArrayMethodKind(Sel);
+ std::optional<NSAPI::NSArrayMethodKind> MKOpt =
+ S.NSAPIObj->getNSArrayMethodKind(Sel);
if (!MKOpt) {
- return None;
+ return std::nullopt;
}
NSAPI::NSArrayMethodKind MK = *MKOpt;
@@ -15319,28 +18501,27 @@ static Optional<int> GetNSMutableArrayArgumentIndex(Sema &S,
return 1;
default:
- return None;
+ return std::nullopt;
}
- return None;
+ return std::nullopt;
}
-static
-Optional<int> GetNSMutableDictionaryArgumentIndex(Sema &S,
- ObjCMessageExpr *Message) {
+static std::optional<int>
+GetNSMutableDictionaryArgumentIndex(Sema &S, ObjCMessageExpr *Message) {
bool IsMutableDictionary = S.NSAPIObj->isSubclassOfNSClass(
Message->getReceiverInterface(),
NSAPI::ClassId_NSMutableDictionary);
if (!IsMutableDictionary) {
- return None;
+ return std::nullopt;
}
Selector Sel = Message->getSelector();
- Optional<NSAPI::NSDictionaryMethodKind> MKOpt =
- S.NSAPIObj->getNSDictionaryMethodKind(Sel);
+ std::optional<NSAPI::NSDictionaryMethodKind> MKOpt =
+ S.NSAPIObj->getNSDictionaryMethodKind(Sel);
if (!MKOpt) {
- return None;
+ return std::nullopt;
}
NSAPI::NSDictionaryMethodKind MK = *MKOpt;
@@ -15352,13 +18533,14 @@ Optional<int> GetNSMutableDictionaryArgumentIndex(Sema &S,
return 0;
default:
- return None;
+ return std::nullopt;
}
- return None;
+ return std::nullopt;
}
-static Optional<int> GetNSSetArgumentIndex(Sema &S, ObjCMessageExpr *Message) {
+static std::optional<int> GetNSSetArgumentIndex(Sema &S,
+ ObjCMessageExpr *Message) {
bool IsMutableSet = S.NSAPIObj->isSubclassOfNSClass(
Message->getReceiverInterface(),
NSAPI::ClassId_NSMutableSet);
@@ -15367,14 +18549,15 @@ static Optional<int> GetNSSetArgumentIndex(Sema &S, ObjCMessageExpr *Message) {
Message->getReceiverInterface(),
NSAPI::ClassId_NSMutableOrderedSet);
if (!IsMutableSet && !IsMutableOrderedSet) {
- return None;
+ return std::nullopt;
}
Selector Sel = Message->getSelector();
- Optional<NSAPI::NSSetMethodKind> MKOpt = S.NSAPIObj->getNSSetMethodKind(Sel);
+ std::optional<NSAPI::NSSetMethodKind> MKOpt =
+ S.NSAPIObj->getNSSetMethodKind(Sel);
if (!MKOpt) {
- return None;
+ return std::nullopt;
}
NSAPI::NSSetMethodKind MK = *MKOpt;
@@ -15389,7 +18572,7 @@ static Optional<int> GetNSSetArgumentIndex(Sema &S, ObjCMessageExpr *Message) {
return 1;
}
- return None;
+ return std::nullopt;
}
void Sema::CheckObjCCircularContainer(ObjCMessageExpr *Message) {
@@ -15397,7 +18580,7 @@ void Sema::CheckObjCCircularContainer(ObjCMessageExpr *Message) {
return;
}
- Optional<int> ArgOpt;
+ std::optional<int> ArgOpt;
if (!(ArgOpt = GetNSMutableArrayArgumentIndex(*this, Message)) &&
!(ArgOpt = GetNSMutableDictionaryArgumentIndex(*this, Message)) &&
@@ -15695,7 +18878,7 @@ void Sema::DiagnoseEmptyLoopBody(const Stmt *S,
Body = FS->getBody();
DiagID = diag::warn_empty_for_body;
} else if (const WhileStmt *WS = dyn_cast<WhileStmt>(S)) {
- StmtLoc = WS->getCond()->getSourceRange().getEnd();
+ StmtLoc = WS->getRParenLoc();
Body = WS->getBody();
DiagID = diag::warn_empty_while_body;
} else
@@ -15787,9 +18970,15 @@ void Sema::DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
RHSDeclRef->getDecl()->getCanonicalDecl())
return;
- Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType()
- << LHSExpr->getSourceRange()
- << RHSExpr->getSourceRange();
+ auto D = Diag(OpLoc, diag::warn_self_move)
+ << LHSExpr->getType() << LHSExpr->getSourceRange()
+ << RHSExpr->getSourceRange();
+ if (const FieldDecl *F =
+ getSelfAssignmentClassMemberCandidate(RHSDeclRef->getDecl()))
+ D << 1 << F
+ << FixItHint::CreateInsertion(LHSDeclRef->getBeginLoc(), "this->");
+ else
+ D << 0;
return;
}
@@ -15824,16 +19013,16 @@ void Sema::DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
RHSDeclRef->getDecl()->getCanonicalDecl())
return;
- Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType()
- << LHSExpr->getSourceRange()
- << RHSExpr->getSourceRange();
+ Diag(OpLoc, diag::warn_self_move)
+ << LHSExpr->getType() << 0 << LHSExpr->getSourceRange()
+ << RHSExpr->getSourceRange();
return;
}
if (isa<CXXThisExpr>(LHSBase) && isa<CXXThisExpr>(RHSBase))
- Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType()
- << LHSExpr->getSourceRange()
- << RHSExpr->getSourceRange();
+ Diag(OpLoc, diag::warn_self_move)
+ << LHSExpr->getType() << 0 << LHSExpr->getSourceRange()
+ << RHSExpr->getSourceRange();
}
//===--- Layout compatibility ----------------------------------------------//
@@ -15999,7 +19188,7 @@ static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2) {
///
/// \param MagicValue Type tag magic value.
///
-/// \param isConstantEvaluated wether the evalaution should be performed in
+/// \param isConstantEvaluated whether the evalaution should be performed in
/// constant context.
static bool FindTypeTagExpr(const Expr *TypeExpr, const ASTContext &Ctx,
@@ -16079,7 +19268,7 @@ static bool FindTypeTagExpr(const Expr *TypeExpr, const ASTContext &Ctx,
///
/// \param TypeInfo Information about the corresponding C type.
///
-/// \param isConstantEvaluated wether the evalaution should be performed in
+/// \param isConstantEvaluated whether the evalaution should be performed in
/// constant context.
///
/// \returns true if the corresponding C type was found.
@@ -16176,7 +19365,7 @@ void Sema::CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
TypeTagData TypeInfo;
if (!GetMatchingCType(ArgumentKind, TypeTagExpr, Context,
TypeTagForDatatypeMagicValues.get(), FoundWrongKind,
- TypeInfo, isConstantEvaluated())) {
+ TypeInfo, isConstantEvaluatedContext())) {
if (FoundWrongKind)
Diag(TypeTagExpr->getExprLoc(),
diag::warn_type_tag_for_datatype_wrong_kind)
@@ -16272,15 +19461,15 @@ void Sema::DiagnoseMisalignedMembers() {
void Sema::DiscardMisalignedMemberAddress(const Type *T, Expr *E) {
E = E->IgnoreParens();
- if (!T->isPointerType() && !T->isIntegerType())
+ if (!T->isPointerType() && !T->isIntegerType() && !T->isDependentType())
return;
if (isa<UnaryOperator>(E) &&
cast<UnaryOperator>(E)->getOpcode() == UO_AddrOf) {
auto *Op = cast<UnaryOperator>(E)->getSubExpr()->IgnoreParens();
if (isa<MemberExpr>(Op)) {
- auto MA = llvm::find(MisalignedMembers, MisalignedMember(Op));
+ auto *MA = llvm::find(MisalignedMembers, MisalignedMember(Op));
if (MA != MisalignedMembers.end() &&
- (T->isIntegerType() ||
+ (T->isDependentType() || T->isIntegerType() ||
(T->isPointerType() && (T->getPointeeType()->isIncompleteType() ||
Context.getTypeAlignInChars(
T->getPointeeType()) <= MA->Alignment))))
@@ -16352,10 +19541,8 @@ void Sema::RefersToMemberWithReducedAlignment(
// Synthesize offset of the whole access.
CharUnits Offset;
- for (auto I = ReverseMemberChain.rbegin(); I != ReverseMemberChain.rend();
- I++) {
- Offset += Context.toCharUnitsFromBits(Context.getFieldOffset(*I));
- }
+ for (const FieldDecl *FD : llvm::reverse(ReverseMemberChain))
+ Offset += Context.toCharUnitsFromBits(Context.getFieldOffset(FD));
// Compute the CompleteObjectAlignment as the alignment of the whole chain.
CharUnits CompleteObjectAlignment = Context.getTypeAlignInChars(
@@ -16408,6 +19595,115 @@ void Sema::CheckAddressOfPackedMember(Expr *rhs) {
_2, _3, _4));
}
+bool Sema::PrepareBuiltinElementwiseMathOneArgCall(CallExpr *TheCall) {
+ if (checkArgCount(*this, TheCall, 1))
+ return true;
+
+ ExprResult A = UsualUnaryConversions(TheCall->getArg(0));
+ if (A.isInvalid())
+ return true;
+
+ TheCall->setArg(0, A.get());
+ QualType TyA = A.get()->getType();
+
+ if (checkMathBuiltinElementType(*this, A.get()->getBeginLoc(), TyA))
+ return true;
+
+ TheCall->setType(TyA);
+ return false;
+}
+
+bool Sema::SemaBuiltinElementwiseMath(CallExpr *TheCall) {
+ if (checkArgCount(*this, TheCall, 2))
+ return true;
+
+ ExprResult A = TheCall->getArg(0);
+ ExprResult B = TheCall->getArg(1);
+ // Do standard promotions between the two arguments, returning their common
+ // type.
+ QualType Res =
+ UsualArithmeticConversions(A, B, TheCall->getExprLoc(), ACK_Comparison);
+ if (A.isInvalid() || B.isInvalid())
+ return true;
+
+ QualType TyA = A.get()->getType();
+ QualType TyB = B.get()->getType();
+
+ if (Res.isNull() || TyA.getCanonicalType() != TyB.getCanonicalType())
+ return Diag(A.get()->getBeginLoc(),
+ diag::err_typecheck_call_different_arg_types)
+ << TyA << TyB;
+
+ if (checkMathBuiltinElementType(*this, A.get()->getBeginLoc(), TyA))
+ return true;
+
+ TheCall->setArg(0, A.get());
+ TheCall->setArg(1, B.get());
+ TheCall->setType(Res);
+ return false;
+}
+
+bool Sema::SemaBuiltinElementwiseTernaryMath(CallExpr *TheCall) {
+ if (checkArgCount(*this, TheCall, 3))
+ return true;
+
+ Expr *Args[3];
+ for (int I = 0; I < 3; ++I) {
+ ExprResult Converted = UsualUnaryConversions(TheCall->getArg(I));
+ if (Converted.isInvalid())
+ return true;
+ Args[I] = Converted.get();
+ }
+
+ int ArgOrdinal = 1;
+ for (Expr *Arg : Args) {
+ if (checkFPMathBuiltinElementType(*this, Arg->getBeginLoc(), Arg->getType(),
+ ArgOrdinal++))
+ return true;
+ }
+
+ for (int I = 1; I < 3; ++I) {
+ if (Args[0]->getType().getCanonicalType() !=
+ Args[I]->getType().getCanonicalType()) {
+ return Diag(Args[0]->getBeginLoc(),
+ diag::err_typecheck_call_different_arg_types)
+ << Args[0]->getType() << Args[I]->getType();
+ }
+
+ TheCall->setArg(I, Args[I]);
+ }
+
+ TheCall->setType(Args[0]->getType());
+ return false;
+}
+
+bool Sema::PrepareBuiltinReduceMathOneArgCall(CallExpr *TheCall) {
+ if (checkArgCount(*this, TheCall, 1))
+ return true;
+
+ ExprResult A = UsualUnaryConversions(TheCall->getArg(0));
+ if (A.isInvalid())
+ return true;
+
+ TheCall->setArg(0, A.get());
+ return false;
+}
+
+bool Sema::SemaBuiltinNonDeterministicValue(CallExpr *TheCall) {
+ if (checkArgCount(*this, TheCall, 1))
+ return true;
+
+ ExprResult Arg = TheCall->getArg(0);
+ QualType TyArg = Arg.get()->getType();
+
+ if (!TyArg->isBuiltinType() && !TyArg->isVectorType())
+ return Diag(TheCall->getArg(0)->getBeginLoc(), diag::err_builtin_invalid_arg_type)
+ << 1 << /*vector, integer or floating point ty*/ 0 << TyArg;
+
+ TheCall->setType(TyArg);
+ return false;
+}
+
ExprResult Sema::SemaBuiltinMatrixTranspose(CallExpr *TheCall,
ExprResult CallResult) {
if (checkArgCount(*this, TheCall, 1))
@@ -16420,7 +19716,8 @@ ExprResult Sema::SemaBuiltinMatrixTranspose(CallExpr *TheCall,
auto *MType = Matrix->getType()->getAs<ConstantMatrixType>();
if (!MType) {
- Diag(Matrix->getBeginLoc(), diag::err_builtin_matrix_arg);
+ Diag(Matrix->getBeginLoc(), diag::err_builtin_invalid_arg_type)
+ << 1 << /* matrix ty*/ 1 << Matrix->getType();
return ExprError();
}
@@ -16438,10 +19735,10 @@ ExprResult Sema::SemaBuiltinMatrixTranspose(CallExpr *TheCall,
}
// Get and verify the matrix dimensions.
-static llvm::Optional<unsigned>
+static std::optional<unsigned>
getAndVerifyMatrixDimension(Expr *Expr, StringRef Name, Sema &S) {
SourceLocation ErrorPos;
- Optional<llvm::APSInt> Value =
+ std::optional<llvm::APSInt> Value =
Expr->getIntegerConstantExpr(S.Context, &ErrorPos);
if (!Value) {
S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_scalar_unsigned_arg)
@@ -16491,15 +19788,16 @@ ExprResult Sema::SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall,
auto *PtrTy = PtrExpr->getType()->getAs<PointerType>();
QualType ElementTy;
if (!PtrTy) {
- Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_pointer_arg)
- << PtrArgIdx + 1;
+ Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type)
+ << PtrArgIdx + 1 << /*pointer to element ty*/ 2 << PtrExpr->getType();
ArgError = true;
} else {
ElementTy = PtrTy->getPointeeType().getUnqualifiedType();
if (!ConstantMatrixType::isValidElementType(ElementTy)) {
- Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_pointer_arg)
- << PtrArgIdx + 1;
+ Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type)
+ << PtrArgIdx + 1 << /* pointer to element ty*/ 2
+ << PtrExpr->getType();
ArgError = true;
}
}
@@ -16528,7 +19826,7 @@ ExprResult Sema::SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall,
} else
ColumnsExpr = nullptr;
- // If any any part of the result matrix type is still pending, just use
+ // If any part of the result matrix type is still pending, just use
// Context.DependentTy, until all parts are resolved.
if ((RowsExpr && RowsExpr->isTypeDependent()) ||
(ColumnsExpr && ColumnsExpr->isTypeDependent())) {
@@ -16536,12 +19834,12 @@ ExprResult Sema::SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall,
return CallResult;
}
- // Check row and column dimenions.
- llvm::Optional<unsigned> MaybeRows;
+ // Check row and column dimensions.
+ std::optional<unsigned> MaybeRows;
if (RowsExpr)
MaybeRows = getAndVerifyMatrixDimension(RowsExpr, "row", *this);
- llvm::Optional<unsigned> MaybeColumns;
+ std::optional<unsigned> MaybeColumns;
if (ColumnsExpr)
MaybeColumns = getAndVerifyMatrixDimension(ColumnsExpr, "column", *this);
@@ -16553,7 +19851,7 @@ ExprResult Sema::SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall,
TheCall->setArg(3, StrideExpr);
if (MaybeRows) {
- if (Optional<llvm::APSInt> Value =
+ if (std::optional<llvm::APSInt> Value =
StrideExpr->getIntegerConstantExpr(Context)) {
uint64_t Stride = Value->getZExtValue();
if (Stride < *MaybeRows) {
@@ -16598,7 +19896,8 @@ ExprResult Sema::SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall,
auto *MatrixTy = MatrixExpr->getType()->getAs<ConstantMatrixType>();
if (!MatrixTy) {
- Diag(MatrixExpr->getBeginLoc(), diag::err_builtin_matrix_arg) << 0;
+ Diag(MatrixExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type)
+ << 1 << /*matrix ty */ 1 << MatrixExpr->getType();
ArgError = true;
}
@@ -16617,8 +19916,8 @@ ExprResult Sema::SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall,
// Check pointer argument.
auto *PtrTy = PtrExpr->getType()->getAs<PointerType>();
if (!PtrTy) {
- Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_pointer_arg)
- << PtrArgIdx + 1;
+ Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type)
+ << PtrArgIdx + 1 << /*pointer to element ty*/ 2 << PtrExpr->getType();
ArgError = true;
} else {
QualType ElementTy = PtrTy->getPointeeType();
@@ -16652,7 +19951,7 @@ ExprResult Sema::SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall,
// Check stride argument.
if (MatrixTy) {
- if (Optional<llvm::APSInt> Value =
+ if (std::optional<llvm::APSInt> Value =
StrideExpr->getIntegerConstantExpr(Context)) {
uint64_t Stride = Value->getZExtValue();
if (Stride < MatrixTy->getNumRows()) {
@@ -16669,37 +19968,198 @@ ExprResult Sema::SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall,
return CallResult;
}
+/// Checks the argument at the given index is a WebAssembly table and if it
+/// is, sets ElTy to the element type.
+static bool CheckWasmBuiltinArgIsTable(Sema &S, CallExpr *E, unsigned ArgIndex,
+ QualType &ElTy) {
+ Expr *ArgExpr = E->getArg(ArgIndex);
+ const auto *ATy = dyn_cast<ArrayType>(ArgExpr->getType());
+ if (!ATy || !ATy->getElementType().isWebAssemblyReferenceType()) {
+ return S.Diag(ArgExpr->getBeginLoc(),
+ diag::err_wasm_builtin_arg_must_be_table_type)
+ << ArgIndex + 1 << ArgExpr->getSourceRange();
+ }
+ ElTy = ATy->getElementType();
+ return false;
+}
+
+/// Checks the argument at the given index is an integer.
+static bool CheckWasmBuiltinArgIsInteger(Sema &S, CallExpr *E,
+ unsigned ArgIndex) {
+ Expr *ArgExpr = E->getArg(ArgIndex);
+ if (!ArgExpr->getType()->isIntegerType()) {
+ return S.Diag(ArgExpr->getBeginLoc(),
+ diag::err_wasm_builtin_arg_must_be_integer_type)
+ << ArgIndex + 1 << ArgExpr->getSourceRange();
+ }
+ return false;
+}
+
+/// Check that the first argument is a WebAssembly table, and the second
+/// is an index to use as index into the table.
+bool Sema::BuiltinWasmTableGet(CallExpr *TheCall) {
+ if (checkArgCount(*this, TheCall, 2))
+ return true;
+
+ QualType ElTy;
+ if (CheckWasmBuiltinArgIsTable(*this, TheCall, 0, ElTy))
+ return true;
+
+ if (CheckWasmBuiltinArgIsInteger(*this, TheCall, 1))
+ return true;
+
+ // If all is well, we set the type of TheCall to be the type of the
+ // element of the table.
+ // i.e. a table.get on an externref table has type externref,
+ // or whatever the type of the table element is.
+ TheCall->setType(ElTy);
+
+ return false;
+}
+
+/// Check that the first argumnet is a WebAssembly table, the second is
+/// an index to use as index into the table and the third is the reference
+/// type to set into the table.
+bool Sema::BuiltinWasmTableSet(CallExpr *TheCall) {
+ if (checkArgCount(*this, TheCall, 3))
+ return true;
+
+ QualType ElTy;
+ if (CheckWasmBuiltinArgIsTable(*this, TheCall, 0, ElTy))
+ return true;
+
+ if (CheckWasmBuiltinArgIsInteger(*this, TheCall, 1))
+ return true;
+
+ if (!Context.hasSameType(ElTy, TheCall->getArg(2)->getType()))
+ return true;
+
+ return false;
+}
+
+/// Check that the argument is a WebAssembly table.
+bool Sema::BuiltinWasmTableSize(CallExpr *TheCall) {
+ if (checkArgCount(*this, TheCall, 1))
+ return true;
+
+ QualType ElTy;
+ if (CheckWasmBuiltinArgIsTable(*this, TheCall, 0, ElTy))
+ return true;
+
+ return false;
+}
+
+/// Check that the first argument is a WebAssembly table, the second is the
+/// value to use for new elements (of a type matching the table type), the
+/// third value is an integer.
+bool Sema::BuiltinWasmTableGrow(CallExpr *TheCall) {
+ if (checkArgCount(*this, TheCall, 3))
+ return true;
+
+ QualType ElTy;
+ if (CheckWasmBuiltinArgIsTable(*this, TheCall, 0, ElTy))
+ return true;
+
+ Expr *NewElemArg = TheCall->getArg(1);
+ if (!Context.hasSameType(ElTy, NewElemArg->getType())) {
+ return Diag(NewElemArg->getBeginLoc(),
+ diag::err_wasm_builtin_arg_must_match_table_element_type)
+ << 2 << 1 << NewElemArg->getSourceRange();
+ }
+
+ if (CheckWasmBuiltinArgIsInteger(*this, TheCall, 2))
+ return true;
+
+ return false;
+}
+
+/// Check that the first argument is a WebAssembly table, the second is an
+/// integer, the third is the value to use to fill the table (of a type
+/// matching the table type), and the fourth is an integer.
+bool Sema::BuiltinWasmTableFill(CallExpr *TheCall) {
+ if (checkArgCount(*this, TheCall, 4))
+ return true;
+
+ QualType ElTy;
+ if (CheckWasmBuiltinArgIsTable(*this, TheCall, 0, ElTy))
+ return true;
+
+ if (CheckWasmBuiltinArgIsInteger(*this, TheCall, 1))
+ return true;
+
+ Expr *NewElemArg = TheCall->getArg(2);
+ if (!Context.hasSameType(ElTy, NewElemArg->getType())) {
+ return Diag(NewElemArg->getBeginLoc(),
+ diag::err_wasm_builtin_arg_must_match_table_element_type)
+ << 3 << 1 << NewElemArg->getSourceRange();
+ }
+
+ if (CheckWasmBuiltinArgIsInteger(*this, TheCall, 3))
+ return true;
+
+ return false;
+}
+
+/// Check that the first argument is a WebAssembly table, the second is also a
+/// WebAssembly table (of the same element type), and the third to fifth
+/// arguments are integers.
+bool Sema::BuiltinWasmTableCopy(CallExpr *TheCall) {
+ if (checkArgCount(*this, TheCall, 5))
+ return true;
+
+ QualType XElTy;
+ if (CheckWasmBuiltinArgIsTable(*this, TheCall, 0, XElTy))
+ return true;
+
+ QualType YElTy;
+ if (CheckWasmBuiltinArgIsTable(*this, TheCall, 1, YElTy))
+ return true;
+
+ Expr *TableYArg = TheCall->getArg(1);
+ if (!Context.hasSameType(XElTy, YElTy)) {
+ return Diag(TableYArg->getBeginLoc(),
+ diag::err_wasm_builtin_arg_must_match_table_element_type)
+ << 2 << 1 << TableYArg->getSourceRange();
+ }
+
+ for (int I = 2; I <= 4; I++) {
+ if (CheckWasmBuiltinArgIsInteger(*this, TheCall, I))
+ return true;
+ }
+
+ return false;
+}
+
/// \brief Enforce the bounds of a TCB
/// CheckTCBEnforcement - Enforces that every function in a named TCB only
/// directly calls other functions in the same TCB as marked by the enforce_tcb
/// and enforce_tcb_leaf attributes.
-void Sema::CheckTCBEnforcement(const CallExpr *TheCall,
- const FunctionDecl *Callee) {
- const FunctionDecl *Caller = getCurFunctionDecl();
+void Sema::CheckTCBEnforcement(const SourceLocation CallExprLoc,
+ const NamedDecl *Callee) {
+ // This warning does not make sense in code that has no runtime behavior.
+ if (isUnevaluatedContext())
+ return;
- // Calls to builtins are not enforced.
- if (!Caller || !Caller->hasAttr<EnforceTCBAttr>() ||
- Callee->getBuiltinID() != 0)
+ const NamedDecl *Caller = getCurFunctionOrMethodDecl();
+
+ if (!Caller || !Caller->hasAttr<EnforceTCBAttr>())
return;
// Search through the enforce_tcb and enforce_tcb_leaf attributes to find
// all TCBs the callee is a part of.
llvm::StringSet<> CalleeTCBs;
- for_each(Callee->specific_attrs<EnforceTCBAttr>(),
- [&](const auto *A) { CalleeTCBs.insert(A->getTCBName()); });
- for_each(Callee->specific_attrs<EnforceTCBLeafAttr>(),
- [&](const auto *A) { CalleeTCBs.insert(A->getTCBName()); });
+ for (const auto *A : Callee->specific_attrs<EnforceTCBAttr>())
+ CalleeTCBs.insert(A->getTCBName());
+ for (const auto *A : Callee->specific_attrs<EnforceTCBLeafAttr>())
+ CalleeTCBs.insert(A->getTCBName());
// Go through the TCBs the caller is a part of and emit warnings if Caller
// is in a TCB that the Callee is not.
- for_each(
- Caller->specific_attrs<EnforceTCBAttr>(),
- [&](const auto *A) {
- StringRef CallerTCB = A->getTCBName();
- if (CalleeTCBs.count(CallerTCB) == 0) {
- this->Diag(TheCall->getExprLoc(),
- diag::warn_tcb_enforcement_violation) << Callee
- << CallerTCB;
- }
- });
+ for (const auto *A : Caller->specific_attrs<EnforceTCBAttr>()) {
+ StringRef CallerTCB = A->getTCBName();
+ if (CalleeTCBs.count(CallerTCB) == 0) {
+ this->Diag(CallExprLoc, diag::warn_tcb_enforcement_violation)
+ << Callee << CallerTCB;
+ }
+ }
}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaCodeComplete.cpp b/contrib/llvm-project/clang/lib/Sema/SemaCodeComplete.cpp
index e03b671ae61e..c44be0df9b0a 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaCodeComplete.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaCodeComplete.cpp
@@ -23,6 +23,7 @@
#include "clang/AST/QualTypeNames.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/AST/Type.h"
+#include "clang/Basic/AttributeCommonInfo.h"
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/OperatorKinds.h"
#include "clang/Basic/Specifiers.h"
@@ -34,6 +35,8 @@
#include "clang/Sema/Designator.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Overload.h"
+#include "clang/Sema/ParsedAttr.h"
+#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/Sema.h"
@@ -50,8 +53,10 @@
#include "llvm/Support/Casting.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
+
#include <list>
#include <map>
+#include <optional>
#include <string>
#include <vector>
@@ -93,10 +98,10 @@ private:
/// When the entry contains a single declaration, this is
/// the index associated with that entry.
- unsigned SingleDeclIndex;
+ unsigned SingleDeclIndex = 0;
public:
- ShadowMapEntry() : DeclOrVector(), SingleDeclIndex(0) {}
+ ShadowMapEntry() = default;
ShadowMapEntry(const ShadowMapEntry &) = delete;
ShadowMapEntry(ShadowMapEntry &&Move) { *this = std::move(Move); }
ShadowMapEntry &operator=(const ShadowMapEntry &) = delete;
@@ -220,6 +225,7 @@ public:
case CodeCompletionContext::CCC_ObjCMessageReceiver:
case CodeCompletionContext::CCC_ParenthesizedExpression:
case CodeCompletionContext::CCC_Statement:
+ case CodeCompletionContext::CCC_TopLevelOrExpression:
case CodeCompletionContext::CCC_Recovery:
if (ObjCMethodDecl *Method = SemaRef.getCurMethodDecl())
if (Method->isInstanceMethod())
@@ -304,6 +310,23 @@ public:
bool isInterestingDecl(const NamedDecl *ND,
bool &AsNestedNameSpecifier) const;
+ /// Decide whether or not a use of function Decl can be a call.
+ ///
+ /// \param ND the function declaration.
+ ///
+ /// \param BaseExprType the object type in a member access expression,
+ /// if any.
+ bool canFunctionBeCalled(const NamedDecl *ND, QualType BaseExprType) const;
+
+ /// Decide whether or not a use of member function Decl can be a call.
+ ///
+ /// \param Method the function declaration.
+ ///
+ /// \param BaseExprType the object type in a member access expression,
+ /// if any.
+ bool canCxxMethodBeCalled(const CXXMethodDecl *Method,
+ QualType BaseExprType) const;
+
/// Check whether the result is hidden by the Hiding declaration.
///
/// \returns true if the result is hidden and cannot be found, false if
@@ -333,8 +356,11 @@ public:
///
/// \param InBaseClass whether the result was found in a base
/// class of the searched context.
+ ///
+ /// \param BaseExprType the type of expression that precedes the "." or "->"
+ /// in a member access expression.
void AddResult(Result R, DeclContext *CurContext, NamedDecl *Hiding,
- bool InBaseClass);
+ bool InBaseClass, QualType BaseExprType);
/// Add a new non-declaration result to this result set.
void AddResult(Result R);
@@ -567,7 +593,6 @@ void PreferredTypeBuilder::enterMemAccess(Sema &S, SourceLocation Tok,
return;
// Keep the expected type, only update the location.
ExpectedLoc = Tok;
- return;
}
void PreferredTypeBuilder::enterUnary(Sema &S, SourceLocation Tok,
@@ -741,9 +766,7 @@ getRequiredQualification(ASTContext &Context, const DeclContext *CurContext,
static bool shouldIgnoreDueToReservedName(const NamedDecl *ND, Sema &SemaRef) {
ReservedIdentifierStatus Status = ND->isReserved(SemaRef.getLangOpts());
// Ignore reserved names for compiler provided decls.
- if ((Status != ReservedIdentifierStatus::NotReserved) &&
- (Status != ReservedIdentifierStatus::StartsWithUnderscoreAtGlobalScope) &&
- ND->getLocation().isInvalid())
+ if (isReservedInAllContexts(Status) && ND->getLocation().isInvalid())
return true;
// For system headers ignore only double-underscore names.
@@ -1095,7 +1118,10 @@ void ResultBuilder::MaybeAddResult(Result R, DeclContext *CurContext) {
if (const UsingShadowDecl *Using = dyn_cast<UsingShadowDecl>(R.Declaration)) {
CodeCompletionResult Result(Using->getTargetDecl(),
getBasePriority(Using->getTargetDecl()),
- R.Qualifier);
+ R.Qualifier, false,
+ (R.Availability == CXAvailability_Available ||
+ R.Availability == CXAvailability_Deprecated),
+ std::move(R.FixIts));
Result.ShadowDecl = Using;
MaybeAddResult(Result, CurContext);
return;
@@ -1209,7 +1235,7 @@ static void setInBaseClass(ResultBuilder::Result &R) {
enum class OverloadCompare { BothViable, Dominates, Dominated };
// Will Candidate ever be called on the object, when overloaded with Incumbent?
// Returns Dominates if Candidate is always called, Dominated if Incumbent is
-// always called, BothViable if either may be called dependending on arguments.
+// always called, BothViable if either may be called depending on arguments.
// Precondition: must actually be overloads!
static OverloadCompare compareOverloads(const CXXMethodDecl &Candidate,
const CXXMethodDecl &Incumbent,
@@ -1227,8 +1253,8 @@ static OverloadCompare compareOverloads(const CXXMethodDecl &Candidate,
if (Candidate.parameters()[I]->getType().getCanonicalType() !=
Incumbent.parameters()[I]->getType().getCanonicalType())
return OverloadCompare::BothViable;
- if (!llvm::empty(Candidate.specific_attrs<EnableIfAttr>()) ||
- !llvm::empty(Incumbent.specific_attrs<EnableIfAttr>()))
+ if (!Candidate.specific_attrs<EnableIfAttr>().empty() ||
+ !Incumbent.specific_attrs<EnableIfAttr>().empty())
return OverloadCompare::BothViable;
// At this point, we know calls can't pick one or the other based on
// arguments, so one of the two must win. (Or both fail, handled elsewhere).
@@ -1256,8 +1282,69 @@ static OverloadCompare compareOverloads(const CXXMethodDecl &Candidate,
: OverloadCompare::Dominated;
}
+bool ResultBuilder::canCxxMethodBeCalled(const CXXMethodDecl *Method,
+ QualType BaseExprType) const {
+ // Find the class scope that we're currently in.
+ // We could e.g. be inside a lambda, so walk up the DeclContext until we
+ // find a CXXMethodDecl.
+ DeclContext *CurContext = SemaRef.CurContext;
+ const auto *CurrentClassScope = [&]() -> const CXXRecordDecl * {
+ for (DeclContext *Ctx = CurContext; Ctx; Ctx = Ctx->getParent()) {
+ const auto *CtxMethod = llvm::dyn_cast<CXXMethodDecl>(Ctx);
+ if (CtxMethod && !CtxMethod->getParent()->isLambda()) {
+ return CtxMethod->getParent();
+ }
+ }
+ return nullptr;
+ }();
+
+ // If we're not inside the scope of the method's class, it can't be a call.
+ bool FunctionCanBeCall =
+ CurrentClassScope &&
+ (CurrentClassScope == Method->getParent() ||
+ CurrentClassScope->isDerivedFrom(Method->getParent()));
+
+ // We skip the following calculation for exceptions if it's already true.
+ if (FunctionCanBeCall)
+ return true;
+
+ // Exception: foo->FooBase::bar() or foo->Foo::bar() *is* a call.
+ if (const CXXRecordDecl *MaybeDerived =
+ BaseExprType.isNull() ? nullptr
+ : BaseExprType->getAsCXXRecordDecl()) {
+ auto *MaybeBase = Method->getParent();
+ FunctionCanBeCall =
+ MaybeDerived == MaybeBase || MaybeDerived->isDerivedFrom(MaybeBase);
+ }
+
+ return FunctionCanBeCall;
+}
+
+bool ResultBuilder::canFunctionBeCalled(const NamedDecl *ND,
+ QualType BaseExprType) const {
+ // We apply heuristics only to CCC_Symbol:
+ // * CCC_{Arrow,Dot}MemberAccess reflect member access expressions:
+ // f.method() and f->method(). These are always calls.
+ // * A qualified name to a member function may *not* be a call. We have to
+ // subdivide the cases: For example, f.Base::method(), which is regarded as
+ // CCC_Symbol, should be a call.
+ // * Non-member functions and static member functions are always considered
+ // calls.
+ if (CompletionContext.getKind() == clang::CodeCompletionContext::CCC_Symbol) {
+ if (const auto *FuncTmpl = dyn_cast<FunctionTemplateDecl>(ND)) {
+ ND = FuncTmpl->getTemplatedDecl();
+ }
+ const auto *Method = dyn_cast<CXXMethodDecl>(ND);
+ if (Method && !Method->isStatic()) {
+ return canCxxMethodBeCalled(Method, BaseExprType);
+ }
+ }
+ return true;
+}
+
void ResultBuilder::AddResult(Result R, DeclContext *CurContext,
- NamedDecl *Hiding, bool InBaseClass = false) {
+ NamedDecl *Hiding, bool InBaseClass = false,
+ QualType BaseExprType = QualType()) {
if (R.Kind != Result::RK_Declaration) {
// For non-declaration results, just add the result.
Results.push_back(R);
@@ -1268,9 +1355,13 @@ void ResultBuilder::AddResult(Result R, DeclContext *CurContext,
if (const auto *Using = dyn_cast<UsingShadowDecl>(R.Declaration)) {
CodeCompletionResult Result(Using->getTargetDecl(),
getBasePriority(Using->getTargetDecl()),
- R.Qualifier);
+ R.Qualifier, false,
+ (R.Availability == CXAvailability_Available ||
+ R.Availability == CXAvailability_Deprecated),
+ std::move(R.FixIts));
Result.ShadowDecl = Using;
- AddResult(Result, CurContext, Hiding);
+ AddResult(Result, CurContext, Hiding, /*InBaseClass=*/false,
+ /*BaseExprType=*/BaseExprType);
return;
}
@@ -1372,6 +1463,8 @@ void ResultBuilder::AddResult(Result R, DeclContext *CurContext,
OverloadSet.Add(Method, Results.size());
}
+ R.FunctionCanBeCall = canFunctionBeCalled(R.getDeclaration(), BaseExprType);
+
// Insert this result into the set of results.
Results.push_back(R);
@@ -1438,7 +1531,7 @@ bool ResultBuilder::IsOrdinaryNonTypeName(const NamedDecl *ND) const {
bool ResultBuilder::IsIntegralConstantValue(const NamedDecl *ND) const {
if (!IsOrdinaryNonTypeName(ND))
- return 0;
+ return false;
if (const auto *VD = dyn_cast<ValueDecl>(ND->getUnderlyingDecl()))
if (VD->getType()->isIntegralOrEnumerationType())
@@ -1483,8 +1576,9 @@ bool ResultBuilder::IsClassOrStruct(const NamedDecl *ND) const {
// For purposes of this check, interfaces match too.
if (const auto *RD = dyn_cast<RecordDecl>(ND))
- return RD->getTagKind() == TTK_Class || RD->getTagKind() == TTK_Struct ||
- RD->getTagKind() == TTK_Interface;
+ return RD->getTagKind() == TagTypeKind::Class ||
+ RD->getTagKind() == TagTypeKind::Struct ||
+ RD->getTagKind() == TagTypeKind::Interface;
return false;
}
@@ -1496,7 +1590,7 @@ bool ResultBuilder::IsUnion(const NamedDecl *ND) const {
ND = ClassTemplate->getTemplatedDecl();
if (const auto *RD = dyn_cast<RecordDecl>(ND))
- return RD->getTagKind() == TTK_Union;
+ return RD->getTagKind() == TagTypeKind::Union;
return false;
}
@@ -1644,7 +1738,7 @@ public:
bool InBaseClass) override {
ResultBuilder::Result Result(ND, Results.getBasePriority(ND), nullptr,
false, IsAccessible(ND, Ctx), FixIts);
- Results.AddResult(Result, InitialLookupCtx, Hiding, InBaseClass);
+ Results.AddResult(Result, InitialLookupCtx, Hiding, InBaseClass, BaseType);
}
void EnteredContext(DeclContext *Ctx) override {
@@ -1802,7 +1896,7 @@ static void AddFunctionSpecifiers(Sema::ParserCompletionContext CCC,
Results.AddResult(Result("mutable"));
Results.AddResult(Result("virtual"));
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Sema::PCC_ObjCInterface:
case Sema::PCC_ObjCImplementation:
@@ -1815,6 +1909,7 @@ static void AddFunctionSpecifiers(Sema::ParserCompletionContext CCC,
case Sema::PCC_ObjCInstanceVariableList:
case Sema::PCC_Expression:
case Sema::PCC_Statement:
+ case Sema::PCC_TopLevelOrExpression:
case Sema::PCC_ForInit:
case Sema::PCC_Condition:
case Sema::PCC_RecoveryInFunction:
@@ -1872,6 +1967,7 @@ static bool WantTypesInContext(Sema::ParserCompletionContext CCC,
case Sema::PCC_Type:
case Sema::PCC_ParenthesizedExpression:
case Sema::PCC_LocalDeclarationSpecifiers:
+ case Sema::PCC_TopLevelOrExpression:
return true;
case Sema::PCC_Expression:
@@ -1896,6 +1992,7 @@ static PrintingPolicy getCompletionPrintingPolicy(const ASTContext &Context,
Policy.SuppressStrongLifetime = true;
Policy.SuppressUnwrittenScope = true;
Policy.SuppressScope = true;
+ Policy.CleanUglifiedParameters = true;
return Policy;
}
@@ -1922,15 +2019,15 @@ static const char *GetCompletionTypeString(QualType T, ASTContext &Context,
if (TagDecl *Tag = TagT->getDecl())
if (!Tag->hasNameForLinkage()) {
switch (Tag->getTagKind()) {
- case TTK_Struct:
+ case TagTypeKind::Struct:
return "struct <anonymous>";
- case TTK_Interface:
+ case TagTypeKind::Interface:
return "__interface <anonymous>";
- case TTK_Class:
+ case TagTypeKind::Class:
return "class <anonymous>";
- case TTK_Union:
+ case TagTypeKind::Union:
return "union <anonymous>";
- case TTK_Enum:
+ case TagTypeKind::Enum:
return "enum <anonymous>";
}
}
@@ -2089,7 +2186,7 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC, Scope *S,
AddObjCTopLevelResults(Results, true);
AddTypedefResult(Results);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Sema::PCC_Class:
if (SemaRef.getLangOpts().CPlusPlus) {
@@ -2121,8 +2218,7 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC, Scope *S,
if (CCC == Sema::PCC_Class) {
AddTypedefResult(Results);
- bool IsNotInheritanceScope =
- !(S->getFlags() & Scope::ClassInheritanceScope);
+ bool IsNotInheritanceScope = !S->isClassInheritanceScope();
// public:
Builder.AddTypedTextChunk("public");
if (IsNotInheritanceScope && Results.includeCodePatterns())
@@ -2148,7 +2244,7 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC, Scope *S,
Builder);
}
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Sema::PCC_Template:
case Sema::PCC_MemberTemplate:
@@ -2184,6 +2280,7 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC, Scope *S,
break;
case Sema::PCC_RecoveryInFunction:
+ case Sema::PCC_TopLevelOrExpression:
case Sema::PCC_Statement: {
if (SemaRef.getLangOpts().CPlusPlus11)
AddUsingAliasResult(Builder, Results);
@@ -2418,14 +2515,14 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC, Scope *S,
AddStaticAssertResult(Builder, Results, SemaRef.getLangOpts());
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
// Fall through (for statement expressions).
case Sema::PCC_ForInit:
case Sema::PCC_Condition:
AddStorageSpecifiers(CCC, SemaRef.getLangOpts(), Results);
// Fall through: conditions and statements can have expressions.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Sema::PCC_ParenthesizedExpression:
if (SemaRef.getLangOpts().ObjCAutoRefCount &&
@@ -2455,7 +2552,7 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC, Scope *S,
Results.AddResult(Result(Builder.TakeString()));
}
// Fall through
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Sema::PCC_Expression: {
if (SemaRef.getLangOpts().CPlusPlus) {
@@ -2638,6 +2735,13 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC, Scope *S,
Results.AddResult(Result(Builder.TakeString()));
}
+ if (SemaRef.getLangOpts().C23) {
+ // nullptr
+ Builder.AddResultTypeChunk("nullptr_t");
+ Builder.AddTypedTextChunk("nullptr");
+ Results.AddResult(Result(Builder.TakeString()));
+ }
+
// sizeof expression
Builder.AddResultTypeChunk("size_t");
Builder.AddTypedTextChunk("sizeof");
@@ -2778,7 +2882,7 @@ static void findTypeLocationForBlockDecl(const TypeSourceInfo *TSInfo,
while (true) {
// Look through typedefs.
if (!SuppressBlock) {
- if (TypedefTypeLoc TypedefTL = TL.getAs<TypedefTypeLoc>()) {
+ if (TypedefTypeLoc TypedefTL = TL.getAsAdjusted<TypedefTypeLoc>()) {
if (TypeSourceInfo *InnerTSInfo =
TypedefTL.getTypedefNameDecl()->getTypeSourceInfo()) {
TL = InnerTSInfo->getTypeLoc().getUnqualifiedLoc();
@@ -2809,22 +2913,24 @@ static void findTypeLocationForBlockDecl(const TypeSourceInfo *TSInfo,
}
}
-static std::string
-formatBlockPlaceholder(const PrintingPolicy &Policy, const NamedDecl *BlockDecl,
- FunctionTypeLoc &Block, FunctionProtoTypeLoc &BlockProto,
- bool SuppressBlockName = false,
- bool SuppressBlock = false,
- Optional<ArrayRef<QualType>> ObjCSubsts = None);
+static std::string formatBlockPlaceholder(
+ const PrintingPolicy &Policy, const NamedDecl *BlockDecl,
+ FunctionTypeLoc &Block, FunctionProtoTypeLoc &BlockProto,
+ bool SuppressBlockName = false, bool SuppressBlock = false,
+ std::optional<ArrayRef<QualType>> ObjCSubsts = std::nullopt);
-static std::string
-FormatFunctionParameter(const PrintingPolicy &Policy, const ParmVarDecl *Param,
- bool SuppressName = false, bool SuppressBlock = false,
- Optional<ArrayRef<QualType>> ObjCSubsts = None) {
+static std::string FormatFunctionParameter(
+ const PrintingPolicy &Policy, const DeclaratorDecl *Param,
+ bool SuppressName = false, bool SuppressBlock = false,
+ std::optional<ArrayRef<QualType>> ObjCSubsts = std::nullopt) {
// Params are unavailable in FunctionTypeLoc if the FunctionType is invalid.
- // It would be better to pass in the param Type, which is usually avaliable.
+ // It would be better to pass in the param Type, which is usually available.
// But this case is rare, so just pretend we fell back to int as elsewhere.
if (!Param)
return "int";
+ Decl::ObjCDeclQualifier ObjCQual = Decl::OBJC_TQ_None;
+ if (const auto *PVD = dyn_cast<ParmVarDecl>(Param))
+ ObjCQual = PVD->getObjCDeclQualifier();
bool ObjCMethodParam = isa<ObjCMethodDecl>(Param->getDeclContext());
if (Param->getType()->isDependentType() ||
!Param->getType()->isBlockPointerType()) {
@@ -2833,18 +2939,17 @@ FormatFunctionParameter(const PrintingPolicy &Policy, const ParmVarDecl *Param,
std::string Result;
if (Param->getIdentifier() && !ObjCMethodParam && !SuppressName)
- Result = std::string(Param->getIdentifier()->getName());
+ Result = std::string(Param->getIdentifier()->deuglifiedName());
QualType Type = Param->getType();
if (ObjCSubsts)
Type = Type.substObjCTypeArgs(Param->getASTContext(), *ObjCSubsts,
ObjCSubstitutionContext::Parameter);
if (ObjCMethodParam) {
- Result =
- "(" + formatObjCParamQualifiers(Param->getObjCDeclQualifier(), Type);
+ Result = "(" + formatObjCParamQualifiers(ObjCQual, Type);
Result += Type.getAsString(Policy) + ")";
if (Param->getIdentifier() && !SuppressName)
- Result += Param->getIdentifier()->getName();
+ Result += Param->getIdentifier()->deuglifiedName();
} else {
Type.getAsStringInternal(Result, Policy);
}
@@ -2872,20 +2977,19 @@ FormatFunctionParameter(const PrintingPolicy &Policy, const ParmVarDecl *Param,
// for the block; just use the parameter type as a placeholder.
std::string Result;
if (!ObjCMethodParam && Param->getIdentifier())
- Result = std::string(Param->getIdentifier()->getName());
+ Result = std::string(Param->getIdentifier()->deuglifiedName());
QualType Type = Param->getType().getUnqualifiedType();
if (ObjCMethodParam) {
Result = Type.getAsString(Policy);
- std::string Quals =
- formatObjCParamQualifiers(Param->getObjCDeclQualifier(), Type);
+ std::string Quals = formatObjCParamQualifiers(ObjCQual, Type);
if (!Quals.empty())
Result = "(" + Quals + " " + Result + ")";
if (Result.back() != ')')
Result += " ";
if (Param->getIdentifier())
- Result += Param->getIdentifier()->getName();
+ Result += Param->getIdentifier()->deuglifiedName();
} else {
Type.getAsStringInternal(Result, Policy);
}
@@ -2913,7 +3017,7 @@ static std::string
formatBlockPlaceholder(const PrintingPolicy &Policy, const NamedDecl *BlockDecl,
FunctionTypeLoc &Block, FunctionProtoTypeLoc &BlockProto,
bool SuppressBlockName, bool SuppressBlock,
- Optional<ArrayRef<QualType>> ObjCSubsts) {
+ std::optional<ArrayRef<QualType>> ObjCSubsts) {
std::string Result;
QualType ResultType = Block.getTypePtr()->getReturnType();
if (ObjCSubsts)
@@ -3080,14 +3184,14 @@ static void AddTemplateParameterChunks(
if (TTP->getIdentifier()) {
PlaceholderStr += ' ';
- PlaceholderStr += TTP->getIdentifier()->getName();
+ PlaceholderStr += TTP->getIdentifier()->deuglifiedName();
}
HasDefaultArg = TTP->hasDefaultArgument();
} else if (NonTypeTemplateParmDecl *NTTP =
dyn_cast<NonTypeTemplateParmDecl>(*P)) {
if (NTTP->getIdentifier())
- PlaceholderStr = std::string(NTTP->getIdentifier()->getName());
+ PlaceholderStr = std::string(NTTP->getIdentifier()->deuglifiedName());
NTTP->getType().getAsStringInternal(PlaceholderStr, Policy);
HasDefaultArg = NTTP->hasDefaultArgument();
} else {
@@ -3099,7 +3203,7 @@ static void AddTemplateParameterChunks(
PlaceholderStr = "template<...> class";
if (TTP->getIdentifier()) {
PlaceholderStr += ' ';
- PlaceholderStr += TTP->getIdentifier()->getName();
+ PlaceholderStr += TTP->getIdentifier()->deuglifiedName();
}
HasDefaultArg = TTP->hasDefaultArgument();
@@ -3480,8 +3584,14 @@ CodeCompletionString *CodeCompletionResult::createCodeCompletionStringForDecl(
// Figure out which template parameters are deduced (or have default
// arguments).
- llvm::SmallBitVector Deduced;
- Sema::MarkDeducedTemplateParameters(Ctx, FunTmpl, Deduced);
+ // Note that we're creating a non-empty bit vector so that we can go
+ // through the loop below to omit default template parameters for non-call
+ // cases.
+ llvm::SmallBitVector Deduced(FunTmpl->getTemplateParameters()->size());
+ // Avoid running it if this is not a call: We should emit *all* template
+ // parameters.
+ if (FunctionCanBeCall)
+ Sema::MarkDeducedTemplateParameters(Ctx, FunTmpl, Deduced);
unsigned LastDeducibleArgument;
for (LastDeducibleArgument = Deduced.size(); LastDeducibleArgument > 0;
--LastDeducibleArgument) {
@@ -3508,10 +3618,19 @@ CodeCompletionString *CodeCompletionResult::createCodeCompletionStringForDecl(
}
}
- if (LastDeducibleArgument) {
+ if (LastDeducibleArgument || !FunctionCanBeCall) {
// Some of the function template arguments cannot be deduced from a
// function call, so we introduce an explicit template argument list
// containing all of the arguments up to the first deducible argument.
+ //
+ // Or, if this isn't a call, emit all the template arguments
+ // to disambiguate the (potential) overloads.
+ //
+ // FIXME: Detect cases where the function parameters can be deduced from
+ // the surrounding context, as per [temp.deduct.funcaddr].
+ // e.g.,
+ // template <class T> void foo(T);
+ // void (*f)(int) = foo;
Result.AddChunk(CodeCompletionString::CK_LeftAngle);
AddTemplateParameterChunks(Ctx, Policy, FunTmpl, Result,
LastDeducibleArgument);
@@ -3582,7 +3701,7 @@ CodeCompletionString *CodeCompletionResult::createCodeCompletionStringForDecl(
std::string Arg;
QualType ParamType = (*P)->getType();
- Optional<ArrayRef<QualType>> ObjCSubsts;
+ std::optional<ArrayRef<QualType>> ObjCSubsts;
if (!CCContext.getBaseType().isNull())
ObjCSubsts = CCContext.getBaseType()->getObjCSubstitutions(Method);
@@ -3689,15 +3808,43 @@ const RawComment *clang::getParameterComment(
return nullptr;
}
-/// Add function overload parameter chunks to the given code completion
-/// string.
-static void AddOverloadParameterChunks(ASTContext &Context,
+static void AddOverloadAggregateChunks(const RecordDecl *RD,
const PrintingPolicy &Policy,
- const FunctionDecl *Function,
- const FunctionProtoType *Prototype,
CodeCompletionBuilder &Result,
- unsigned CurrentArg, unsigned Start = 0,
- bool InOptional = false) {
+ unsigned CurrentArg) {
+ unsigned ChunkIndex = 0;
+ auto AddChunk = [&](llvm::StringRef Placeholder) {
+ if (ChunkIndex > 0)
+ Result.AddChunk(CodeCompletionString::CK_Comma);
+ const char *Copy = Result.getAllocator().CopyString(Placeholder);
+ if (ChunkIndex == CurrentArg)
+ Result.AddCurrentParameterChunk(Copy);
+ else
+ Result.AddPlaceholderChunk(Copy);
+ ++ChunkIndex;
+ };
+ // Aggregate initialization has all bases followed by all fields.
+ // (Bases are not legal in C++11 but in that case we never get here).
+ if (auto *CRD = llvm::dyn_cast<CXXRecordDecl>(RD)) {
+ for (const auto &Base : CRD->bases())
+ AddChunk(Base.getType().getAsString(Policy));
+ }
+ for (const auto &Field : RD->fields())
+ AddChunk(FormatFunctionParameter(Policy, Field));
+}
+
+/// Add function overload parameter chunks to the given code completion
+/// string.
+static void AddOverloadParameterChunks(
+ ASTContext &Context, const PrintingPolicy &Policy,
+ const FunctionDecl *Function, const FunctionProtoType *Prototype,
+ FunctionProtoTypeLoc PrototypeLoc, CodeCompletionBuilder &Result,
+ unsigned CurrentArg, unsigned Start = 0, bool InOptional = false) {
+ if (!Function && !Prototype) {
+ Result.AddChunk(CodeCompletionString::CK_CurrentParameter, "...");
+ return;
+ }
+
bool FirstParameter = true;
unsigned NumParams =
Function ? Function->getNumParams() : Prototype->getNumParams();
@@ -3711,8 +3858,9 @@ static void AddOverloadParameterChunks(ASTContext &Context,
if (!FirstParameter)
Opt.AddChunk(CodeCompletionString::CK_Comma);
// Optional sections are nested.
- AddOverloadParameterChunks(Context, Policy, Function, Prototype, Opt,
- CurrentArg, P, /*InOptional=*/true);
+ AddOverloadParameterChunks(Context, Policy, Function, Prototype,
+ PrototypeLoc, Opt, CurrentArg, P,
+ /*InOptional=*/true);
Result.AddOptionalChunk(Opt.TakeString());
return;
}
@@ -3726,8 +3874,10 @@ static void AddOverloadParameterChunks(ASTContext &Context,
// Format the placeholder string.
std::string Placeholder;
- if (Function) {
- const ParmVarDecl *Param = Function->getParamDecl(P);
+ assert(P < Prototype->getNumParams());
+ if (Function || PrototypeLoc) {
+ const ParmVarDecl *Param =
+ Function ? Function->getParamDecl(P) : PrototypeLoc.getParam(P);
Placeholder = FormatFunctionParameter(Policy, Param);
if (Param->hasDefaultArg())
Placeholder += GetDefaultValueString(Param, Context.getSourceManager(),
@@ -3758,10 +3908,83 @@ static void AddOverloadParameterChunks(ASTContext &Context,
}
}
+static std::string
+formatTemplateParameterPlaceholder(const NamedDecl *Param, bool &Optional,
+ const PrintingPolicy &Policy) {
+ if (const auto *Type = dyn_cast<TemplateTypeParmDecl>(Param)) {
+ Optional = Type->hasDefaultArgument();
+ } else if (const auto *NonType = dyn_cast<NonTypeTemplateParmDecl>(Param)) {
+ Optional = NonType->hasDefaultArgument();
+ } else if (const auto *Template = dyn_cast<TemplateTemplateParmDecl>(Param)) {
+ Optional = Template->hasDefaultArgument();
+ }
+ std::string Result;
+ llvm::raw_string_ostream OS(Result);
+ Param->print(OS, Policy);
+ return Result;
+}
+
+static std::string templateResultType(const TemplateDecl *TD,
+ const PrintingPolicy &Policy) {
+ if (const auto *CTD = dyn_cast<ClassTemplateDecl>(TD))
+ return CTD->getTemplatedDecl()->getKindName().str();
+ if (const auto *VTD = dyn_cast<VarTemplateDecl>(TD))
+ return VTD->getTemplatedDecl()->getType().getAsString(Policy);
+ if (const auto *FTD = dyn_cast<FunctionTemplateDecl>(TD))
+ return FTD->getTemplatedDecl()->getReturnType().getAsString(Policy);
+ if (isa<TypeAliasTemplateDecl>(TD))
+ return "type";
+ if (isa<TemplateTemplateParmDecl>(TD))
+ return "class";
+ if (isa<ConceptDecl>(TD))
+ return "concept";
+ return "";
+}
+
+static CodeCompletionString *createTemplateSignatureString(
+ const TemplateDecl *TD, CodeCompletionBuilder &Builder, unsigned CurrentArg,
+ const PrintingPolicy &Policy) {
+ llvm::ArrayRef<NamedDecl *> Params = TD->getTemplateParameters()->asArray();
+ CodeCompletionBuilder OptionalBuilder(Builder.getAllocator(),
+ Builder.getCodeCompletionTUInfo());
+ std::string ResultType = templateResultType(TD, Policy);
+ if (!ResultType.empty())
+ Builder.AddResultTypeChunk(Builder.getAllocator().CopyString(ResultType));
+ Builder.AddTextChunk(
+ Builder.getAllocator().CopyString(TD->getNameAsString()));
+ Builder.AddChunk(CodeCompletionString::CK_LeftAngle);
+ // Initially we're writing into the main string. Once we see an optional arg
+ // (with default), we're writing into the nested optional chunk.
+ CodeCompletionBuilder *Current = &Builder;
+ for (unsigned I = 0; I < Params.size(); ++I) {
+ bool Optional = false;
+ std::string Placeholder =
+ formatTemplateParameterPlaceholder(Params[I], Optional, Policy);
+ if (Optional)
+ Current = &OptionalBuilder;
+ if (I > 0)
+ Current->AddChunk(CodeCompletionString::CK_Comma);
+ Current->AddChunk(I == CurrentArg
+ ? CodeCompletionString::CK_CurrentParameter
+ : CodeCompletionString::CK_Placeholder,
+ Current->getAllocator().CopyString(Placeholder));
+ }
+ // Add the optional chunk to the main string if we ever used it.
+ if (Current == &OptionalBuilder)
+ Builder.AddOptionalChunk(OptionalBuilder.TakeString());
+ Builder.AddChunk(CodeCompletionString::CK_RightAngle);
+ // For function templates, ResultType was the function's return type.
+ // Give some clue this is a function. (Don't show the possibly-bulky params).
+ if (isa<FunctionTemplateDecl>(TD))
+ Builder.AddInformativeChunk("()");
+ return Builder.TakeString();
+}
+
CodeCompletionString *
CodeCompleteConsumer::OverloadCandidate::CreateSignatureString(
unsigned CurrentArg, Sema &S, CodeCompletionAllocator &Allocator,
- CodeCompletionTUInfo &CCTUInfo, bool IncludeBriefComments) const {
+ CodeCompletionTUInfo &CCTUInfo, bool IncludeBriefComments,
+ bool Braced) const {
PrintingPolicy Policy = getCompletionPrintingPolicy(S);
// Show signatures of constructors as they are declared:
// vector(int n) rather than vector<string>(int n)
@@ -3771,22 +3994,20 @@ CodeCompleteConsumer::OverloadCandidate::CreateSignatureString(
// FIXME: Set priority, availability appropriately.
CodeCompletionBuilder Result(Allocator, CCTUInfo, 1,
CXAvailability_Available);
+
+ if (getKind() == CK_Template)
+ return createTemplateSignatureString(getTemplate(), Result, CurrentArg,
+ Policy);
+
FunctionDecl *FDecl = getFunction();
const FunctionProtoType *Proto =
- dyn_cast<FunctionProtoType>(getFunctionType());
- if (!FDecl && !Proto) {
- // Function without a prototype. Just give the return type and a
- // highlighted ellipsis.
- const FunctionType *FT = getFunctionType();
- Result.AddResultTypeChunk(Result.getAllocator().CopyString(
- FT->getReturnType().getAsString(Policy)));
- Result.AddChunk(CodeCompletionString::CK_LeftParen);
- Result.AddChunk(CodeCompletionString::CK_CurrentParameter, "...");
- Result.AddChunk(CodeCompletionString::CK_RightParen);
- return Result.TakeString();
- }
+ dyn_cast_or_null<FunctionProtoType>(getFunctionType());
- if (FDecl) {
+ // First, the name/type of the callee.
+ if (getKind() == CK_Aggregate) {
+ Result.AddTextChunk(
+ Result.getAllocator().CopyString(getAggregate()->getName()));
+ } else if (FDecl) {
if (IncludeBriefComments) {
if (auto RC = getParameterComment(S.getASTContext(), *this, CurrentArg))
Result.addBriefComment(RC->getBriefText(S.getASTContext()));
@@ -3798,14 +4019,21 @@ CodeCompleteConsumer::OverloadCandidate::CreateSignatureString(
FDecl->getDeclName().print(OS, Policy);
Result.AddTextChunk(Result.getAllocator().CopyString(OS.str()));
} else {
+ // Function without a declaration. Just give the return type.
Result.AddResultTypeChunk(Result.getAllocator().CopyString(
- Proto->getReturnType().getAsString(Policy)));
+ getFunctionType()->getReturnType().getAsString(Policy)));
}
- Result.AddChunk(CodeCompletionString::CK_LeftParen);
- AddOverloadParameterChunks(S.getASTContext(), Policy, FDecl, Proto, Result,
- CurrentArg);
- Result.AddChunk(CodeCompletionString::CK_RightParen);
+ // Next, the brackets and parameters.
+ Result.AddChunk(Braced ? CodeCompletionString::CK_LeftBrace
+ : CodeCompletionString::CK_LeftParen);
+ if (getKind() == CK_Aggregate)
+ AddOverloadAggregateChunks(getAggregate(), Policy, Result, CurrentArg);
+ else
+ AddOverloadParameterChunks(S.getASTContext(), Policy, FDecl, Proto,
+ getFunctionProtoTypeLoc(), Result, CurrentArg);
+ Result.AddChunk(Braced ? CodeCompletionString::CK_RightBrace
+ : CodeCompletionString::CK_RightParen);
return Result.TakeString();
}
@@ -3934,17 +4162,23 @@ CXCursorKind clang::getCursorKindForDecl(const Decl *D) {
case Decl::ObjCTypeParam:
return CXCursor_TemplateTypeParameter;
+ case Decl::Concept:
+ return CXCursor_ConceptDecl;
+
+ case Decl::LinkageSpec:
+ return CXCursor_LinkageSpec;
+
default:
if (const auto *TD = dyn_cast<TagDecl>(D)) {
switch (TD->getTagKind()) {
- case TTK_Interface: // fall through
- case TTK_Struct:
+ case TagTypeKind::Interface: // fall through
+ case TagTypeKind::Struct:
return CXCursor_StructDecl;
- case TTK_Class:
+ case TagTypeKind::Class:
return CXCursor_ClassDecl;
- case TTK_Union:
+ case TagTypeKind::Union:
return CXCursor_UnionDecl;
- case TTK_Enum:
+ case TagTypeKind::Enum:
return CXCursor_EnumDecl;
}
}
@@ -3994,7 +4228,7 @@ static void AddPrettyFunctionResults(const LangOptions &LangOpts,
static void HandleCodeCompleteResults(Sema *S,
CodeCompleteConsumer *CodeCompleter,
- CodeCompletionContext Context,
+ const CodeCompletionContext &Context,
CodeCompletionResult *Results,
unsigned NumResults) {
if (CodeCompleter)
@@ -4054,6 +4288,8 @@ mapCodeCompletionContext(Sema &S, Sema::ParserCompletionContext PCC) {
case Sema::PCC_LocalDeclarationSpecifiers:
return CodeCompletionContext::CCC_Type;
+ case Sema::PCC_TopLevelOrExpression:
+ return CodeCompletionContext::CCC_TopLevelOrExpression;
}
llvm_unreachable("Invalid ParserCompletionContext!");
@@ -4080,7 +4316,7 @@ static void MaybeAddOverrideCalls(Sema &S, DeclContext *InContext,
// We need to have names for all of the parameters, if we're going to
// generate a forwarding call.
- for (auto P : Method->parameters())
+ for (auto *P : Method->parameters())
if (!P->getDeclName())
return;
@@ -4108,7 +4344,7 @@ static void MaybeAddOverrideCalls(Sema &S, DeclContext *InContext,
Results.getAllocator().CopyString(Overridden->getNameAsString()));
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
bool FirstParam = true;
- for (auto P : Method->parameters()) {
+ for (auto *P : Method->parameters()) {
if (FirstParam)
FirstParam = false;
else
@@ -4155,16 +4391,13 @@ void Sema::CodeCompleteModuleImport(SourceLocation ImportLoc,
/*IsInclusionDirective=*/false);
// Enumerate submodules.
if (Mod) {
- for (Module::submodule_iterator Sub = Mod->submodule_begin(),
- SubEnd = Mod->submodule_end();
- Sub != SubEnd; ++Sub) {
-
+ for (auto *Submodule : Mod->submodules()) {
Builder.AddTypedTextChunk(
- Builder.getAllocator().CopyString((*Sub)->Name));
+ Builder.getAllocator().CopyString(Submodule->Name));
Results.AddResult(Result(
Builder.TakeString(), CCP_Declaration, CXCursor_ModuleImportDecl,
- (*Sub)->isAvailable() ? CXAvailability_Available
- : CXAvailability_NotAvailable));
+ Submodule->isAvailable() ? CXAvailability_Available
+ : CXAvailability_NotAvailable));
}
}
}
@@ -4197,6 +4430,7 @@ void Sema::CodeCompleteOrdinaryName(Scope *S,
break;
case PCC_Statement:
+ case PCC_TopLevelOrExpression:
case PCC_ParenthesizedExpression:
case PCC_Expression:
case PCC_ForInit:
@@ -4234,6 +4468,7 @@ void Sema::CodeCompleteOrdinaryName(Scope *S,
case PCC_ParenthesizedExpression:
case PCC_Expression:
case PCC_Statement:
+ case PCC_TopLevelOrExpression:
case PCC_RecoveryInFunction:
if (S->getFnParent())
AddPrettyFunctionResults(getLangOpts(), Results);
@@ -4325,7 +4560,8 @@ void Sema::CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
0) {
ParsedType T = DS.getRepAsType();
if (!T.get().isNull() && T.get()->isObjCObjectOrInterfaceType())
- AddClassMessageCompletions(*this, S, T, None, false, false, Results);
+ AddClassMessageCompletions(*this, S, T, std::nullopt, false, false,
+ Results);
}
// Note that we intentionally suppress macro results here, since we do not
@@ -4335,6 +4571,158 @@ void Sema::CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
Results.data(), Results.size());
}
+static const char *underscoreAttrScope(llvm::StringRef Scope) {
+ if (Scope == "clang")
+ return "_Clang";
+ if (Scope == "gnu")
+ return "__gnu__";
+ return nullptr;
+}
+
+static const char *noUnderscoreAttrScope(llvm::StringRef Scope) {
+ if (Scope == "_Clang")
+ return "clang";
+ if (Scope == "__gnu__")
+ return "gnu";
+ return nullptr;
+}
+
+void Sema::CodeCompleteAttribute(AttributeCommonInfo::Syntax Syntax,
+ AttributeCompletion Completion,
+ const IdentifierInfo *InScope) {
+ if (Completion == AttributeCompletion::None)
+ return;
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_Attribute);
+
+ // We're going to iterate over the normalized spellings of the attribute.
+ // These don't include "underscore guarding": the normalized spelling is
+ // clang::foo but you can also write _Clang::__foo__.
+ //
+ // (Clang supports a mix like clang::__foo__ but we won't suggest it: either
+ // you care about clashing with macros or you don't).
+ //
+ // So if we're already in a scope, we determine its canonical spellings
+ // (for comparison with normalized attr spelling) and remember whether it was
+ // underscore-guarded (so we know how to spell contained attributes).
+ llvm::StringRef InScopeName;
+ bool InScopeUnderscore = false;
+ if (InScope) {
+ InScopeName = InScope->getName();
+ if (const char *NoUnderscore = noUnderscoreAttrScope(InScopeName)) {
+ InScopeName = NoUnderscore;
+ InScopeUnderscore = true;
+ }
+ }
+ bool SyntaxSupportsGuards = Syntax == AttributeCommonInfo::AS_GNU ||
+ Syntax == AttributeCommonInfo::AS_CXX11 ||
+ Syntax == AttributeCommonInfo::AS_C23;
+
+ llvm::DenseSet<llvm::StringRef> FoundScopes;
+ auto AddCompletions = [&](const ParsedAttrInfo &A) {
+ if (A.IsTargetSpecific && !A.existsInTarget(Context.getTargetInfo()))
+ return;
+ if (!A.acceptsLangOpts(getLangOpts()))
+ return;
+ for (const auto &S : A.Spellings) {
+ if (S.Syntax != Syntax)
+ continue;
+ llvm::StringRef Name = S.NormalizedFullName;
+ llvm::StringRef Scope;
+ if ((Syntax == AttributeCommonInfo::AS_CXX11 ||
+ Syntax == AttributeCommonInfo::AS_C23)) {
+ std::tie(Scope, Name) = Name.split("::");
+ if (Name.empty()) // oops, unscoped
+ std::swap(Name, Scope);
+ }
+
+ // Do we just want a list of scopes rather than attributes?
+ if (Completion == AttributeCompletion::Scope) {
+ // Make sure to emit each scope only once.
+ if (!Scope.empty() && FoundScopes.insert(Scope).second) {
+ Results.AddResult(
+ CodeCompletionResult(Results.getAllocator().CopyString(Scope)));
+ // Include alternate form (__gnu__ instead of gnu).
+ if (const char *Scope2 = underscoreAttrScope(Scope))
+ Results.AddResult(CodeCompletionResult(Scope2));
+ }
+ continue;
+ }
+
+ // If a scope was specified, it must match but we don't need to print it.
+ if (!InScopeName.empty()) {
+ if (Scope != InScopeName)
+ continue;
+ Scope = "";
+ }
+
+ auto Add = [&](llvm::StringRef Scope, llvm::StringRef Name,
+ bool Underscores) {
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
+ llvm::SmallString<32> Text;
+ if (!Scope.empty()) {
+ Text.append(Scope);
+ Text.append("::");
+ }
+ if (Underscores)
+ Text.append("__");
+ Text.append(Name);
+ if (Underscores)
+ Text.append("__");
+ Builder.AddTypedTextChunk(Results.getAllocator().CopyString(Text));
+
+ if (!A.ArgNames.empty()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen, "(");
+ bool First = true;
+ for (const char *Arg : A.ArgNames) {
+ if (!First)
+ Builder.AddChunk(CodeCompletionString::CK_Comma, ", ");
+ First = false;
+ Builder.AddPlaceholderChunk(Arg);
+ }
+ Builder.AddChunk(CodeCompletionString::CK_RightParen, ")");
+ }
+
+ Results.AddResult(Builder.TakeString());
+ };
+
+ // Generate the non-underscore-guarded result.
+ // Note this is (a suffix of) the NormalizedFullName, no need to copy.
+ // If an underscore-guarded scope was specified, only the
+ // underscore-guarded attribute name is relevant.
+ if (!InScopeUnderscore)
+ Add(Scope, Name, /*Underscores=*/false);
+
+ // Generate the underscore-guarded version, for syntaxes that support it.
+ // We skip this if the scope was already spelled and not guarded, or
+ // we must spell it and can't guard it.
+ if (!(InScope && !InScopeUnderscore) && SyntaxSupportsGuards) {
+ llvm::SmallString<32> Guarded;
+ if (Scope.empty()) {
+ Add(Scope, Name, /*Underscores=*/true);
+ } else {
+ const char *GuardedScope = underscoreAttrScope(Scope);
+ if (!GuardedScope)
+ continue;
+ Add(GuardedScope, Name, /*Underscores=*/true);
+ }
+ }
+
+ // It may be nice to include the Kind so we can look up the docs later.
+ }
+ };
+
+ for (const auto *A : ParsedAttrInfo::getAllBuiltin())
+ AddCompletions(*A);
+ for (const auto &Entry : ParsedAttrInfoRegistry::entries())
+ AddCompletions(*Entry.instantiate());
+
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(), Results.size());
+}
+
struct Sema::CodeCompleteExpressionData {
CodeCompleteExpressionData(QualType PreferredType = QualType(),
bool IsParenthesized = false)
@@ -4386,9 +4774,9 @@ static const FunctionProtoType *TryDeconstructFunctionLike(QualType T) {
// Note we only handle the sugared types, they closely match what users wrote.
// We explicitly choose to not handle ClassTemplateSpecializationDecl.
if (auto *Specialization = T->getAs<TemplateSpecializationType>()) {
- if (Specialization->getNumArgs() != 1)
+ if (Specialization->template_arguments().size() != 1)
return nullptr;
- const TemplateArgument &Argument = Specialization->getArg(0);
+ const TemplateArgument &Argument = Specialization->template_arguments()[0];
if (Argument.getKind() != TemplateArgument::Type)
return nullptr;
return Argument.getAsType()->getAs<FunctionProtoType>();
@@ -4530,7 +4918,7 @@ void Sema::CodeCompletePostfixExpression(Scope *S, ExprResult E,
if (E.isInvalid())
CodeCompleteExpression(S, PreferredType);
else if (getLangOpts().ObjC)
- CodeCompleteObjCInstanceMessage(S, E.get(), None, false);
+ CodeCompleteObjCInstanceMessage(S, E.get(), std::nullopt, false);
}
/// The set of properties that have already been added, referenced by
@@ -4774,9 +5162,11 @@ AddObjCProperties(const CodeCompletionContext &CCContext,
}
}
-static void AddRecordMembersCompletionResults(
- Sema &SemaRef, ResultBuilder &Results, Scope *S, QualType BaseType,
- ExprValueKind BaseKind, RecordDecl *RD, Optional<FixItHint> AccessOpFixIt) {
+static void
+AddRecordMembersCompletionResults(Sema &SemaRef, ResultBuilder &Results,
+ Scope *S, QualType BaseType,
+ ExprValueKind BaseKind, RecordDecl *RD,
+ std::optional<FixItHint> AccessOpFixIt) {
// Indicate that we are performing a member access, and the cv-qualifiers
// for the base object type.
Results.setObjectTypeQualifiers(BaseType.getQualifiers(), BaseKind);
@@ -4785,7 +5175,7 @@ static void AddRecordMembersCompletionResults(
Results.allowNestedNameSpecifiers();
std::vector<FixItHint> FixIts;
if (AccessOpFixIt)
- FixIts.emplace_back(AccessOpFixIt.getValue());
+ FixIts.emplace_back(*AccessOpFixIt);
CodeCompletionDeclConsumer Consumer(Results, RD, BaseType, std::move(FixIts));
SemaRef.LookupVisibleDecls(RD, Sema::LookupMemberName, Consumer,
SemaRef.CodeCompleter->includeGlobals(),
@@ -4815,7 +5205,8 @@ static void AddRecordMembersCompletionResults(
// Returns the RecordDecl inside the BaseType, falling back to primary template
// in case of specializations. Since we might not have a decl for the
// instantiation/specialization yet, e.g. dependent code.
-static RecordDecl *getAsRecordDecl(const QualType BaseType) {
+static RecordDecl *getAsRecordDecl(QualType BaseType) {
+ BaseType = BaseType.getNonReferenceType();
if (auto *RD = BaseType->getAsRecordDecl()) {
if (const auto *CTSD =
llvm::dyn_cast<ClassTemplateSpecializationDecl>(RD)) {
@@ -4874,7 +5265,7 @@ public:
// We don't have the declared parameter types, only the actual types of
// arguments we've seen. These are still valuable, as it's hard to render
// a useful function completion with neither parameter types nor names!
- llvm::Optional<SmallVector<QualType, 1>> ArgTypes;
+ std::optional<SmallVector<QualType, 1>> ArgTypes;
// Whether this is accessed as T.member, T->member, or T::member.
enum AccessOperator {
Colons,
@@ -5093,8 +5484,8 @@ private:
// Overwrite existing if the new member has more info.
// The preference of . vs :: vs -> is fairly arbitrary.
if (/*Inserted*/ R.second ||
- std::make_tuple(M.ArgTypes.hasValue(), M.ResultType != nullptr,
- M.Operator) > std::make_tuple(O.ArgTypes.hasValue(),
+ std::make_tuple(M.ArgTypes.has_value(), M.ResultType != nullptr,
+ M.Operator) > std::make_tuple(O.ArgTypes.has_value(),
O.ResultType != nullptr,
O.Operator))
O = std::move(M);
@@ -5204,11 +5595,16 @@ private:
// We accept some lossiness (like dropping parameters).
// We only try to handle common expressions on the LHS of MemberExpr.
QualType getApproximateType(const Expr *E) {
+ if (E->getType().isNull())
+ return QualType();
+ E = E->IgnoreParenImpCasts();
QualType Unresolved = E->getType();
- if (Unresolved.isNull() ||
- !Unresolved->isSpecificBuiltinType(BuiltinType::Dependent))
- return Unresolved;
- E = E->IgnoreParens();
+ // We only resolve DependentTy, or undeduced autos (including auto* etc).
+ if (!Unresolved->isSpecificBuiltinType(BuiltinType::Dependent)) {
+ AutoType *Auto = Unresolved->getContainedAutoType();
+ if (!Auto || !Auto->isUndeducedAutoType())
+ return Unresolved;
+ }
// A call: approximate-resolve callee to a function type, get its return type
if (const CallExpr *CE = llvm::dyn_cast<CallExpr>(E)) {
QualType Callee = getApproximateType(CE->getCallee());
@@ -5257,11 +5653,25 @@ QualType getApproximateType(const Expr *E) {
: getApproximateType(CDSME->getBase());
if (CDSME->isArrow() && !Base.isNull())
Base = Base->getPointeeType(); // could handle unique_ptr etc here?
- RecordDecl *RD = Base.isNull() ? nullptr : getAsRecordDecl(Base);
+ auto *RD =
+ Base.isNull()
+ ? nullptr
+ : llvm::dyn_cast_or_null<CXXRecordDecl>(getAsRecordDecl(Base));
if (RD && RD->isCompleteDefinition()) {
- for (const auto *Member : RD->lookup(CDSME->getMember()))
- if (const ValueDecl *VD = llvm::dyn_cast<ValueDecl>(Member))
- return VD->getType().getNonReferenceType();
+ // Look up member heuristically, including in bases.
+ for (const auto *Member : RD->lookupDependentName(
+ CDSME->getMember(), [](const NamedDecl *Member) {
+ return llvm::isa<ValueDecl>(Member);
+ })) {
+ return llvm::cast<ValueDecl>(Member)->getType().getNonReferenceType();
+ }
+ }
+ }
+ // A reference to an `auto` variable: approximate-resolve its initializer.
+ if (const auto *DRE = llvm::dyn_cast<DeclRefExpr>(E)) {
+ if (const auto *VD = llvm::dyn_cast<VarDecl>(DRE->getDecl())) {
+ if (VD->hasInit())
+ return getApproximateType(VD->getInit());
}
}
return Unresolved;
@@ -5322,7 +5732,7 @@ void Sema::CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
&ResultBuilder::IsMember);
auto DoCompletion = [&](Expr *Base, bool IsArrow,
- Optional<FixItHint> AccessOpFixIt) -> bool {
+ std::optional<FixItHint> AccessOpFixIt) -> bool {
if (!Base)
return false;
@@ -5369,7 +5779,7 @@ void Sema::CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
// Objective-C property reference. Bail if we're performing fix-it code
// completion since Objective-C properties are normally backed by ivars,
// most Objective-C fix-its here would have little value.
- if (AccessOpFixIt.hasValue()) {
+ if (AccessOpFixIt) {
return false;
}
AddedPropertiesSet AddedProperties;
@@ -5394,7 +5804,7 @@ void Sema::CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
// Objective-C instance variable access. Bail if we're performing fix-it
// code completion since Objective-C properties are normally backed by
// ivars, most Objective-C fix-its here would have little value.
- if (AccessOpFixIt.hasValue()) {
+ if (AccessOpFixIt) {
return false;
}
ObjCInterfaceDecl *Class = nullptr;
@@ -5420,7 +5830,7 @@ void Sema::CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
Results.EnterNewScope();
- bool CompletionSucceded = DoCompletion(Base, IsArrow, None);
+ bool CompletionSucceded = DoCompletion(Base, IsArrow, std::nullopt);
if (CodeCompleter->includeFixIts()) {
const CharSourceRange OpRange =
CharSourceRange::getTokenRange(OpLoc, OpLoc);
@@ -5666,7 +6076,8 @@ static void mergeCandidatesWithResults(
if (Candidate.Function) {
if (Candidate.Function->isDeleted())
continue;
- if (!Candidate.Function->isVariadic() &&
+ if (shouldEnforceArgLimit(/*PartialOverloading=*/true,
+ Candidate.Function) &&
Candidate.Function->getNumParams() <= ArgSize &&
// Having zero args is annoying, normally we don't surface a function
// with 2 params, if you already have 2 params, because you are
@@ -5691,36 +6102,79 @@ static QualType getParamType(Sema &SemaRef,
// overload candidates.
QualType ParamType;
for (auto &Candidate : Candidates) {
- if (const auto *FType = Candidate.getFunctionType())
- if (const auto *Proto = dyn_cast<FunctionProtoType>(FType))
- if (N < Proto->getNumParams()) {
- if (ParamType.isNull())
- ParamType = Proto->getParamType(N);
- else if (!SemaRef.Context.hasSameUnqualifiedType(
- ParamType.getNonReferenceType(),
- Proto->getParamType(N).getNonReferenceType()))
- // Otherwise return a default-constructed QualType.
- return QualType();
- }
+ QualType CandidateParamType = Candidate.getParamType(N);
+ if (CandidateParamType.isNull())
+ continue;
+ if (ParamType.isNull()) {
+ ParamType = CandidateParamType;
+ continue;
+ }
+ if (!SemaRef.Context.hasSameUnqualifiedType(
+ ParamType.getNonReferenceType(),
+ CandidateParamType.getNonReferenceType()))
+ // Two conflicting types, give up.
+ return QualType();
}
return ParamType;
}
static QualType
-ProduceSignatureHelp(Sema &SemaRef, Scope *S,
- MutableArrayRef<ResultCandidate> Candidates,
- unsigned CurrentArg, SourceLocation OpenParLoc) {
+ProduceSignatureHelp(Sema &SemaRef, MutableArrayRef<ResultCandidate> Candidates,
+ unsigned CurrentArg, SourceLocation OpenParLoc,
+ bool Braced) {
if (Candidates.empty())
return QualType();
if (SemaRef.getPreprocessor().isCodeCompletionReached())
SemaRef.CodeCompleter->ProcessOverloadCandidates(
- SemaRef, CurrentArg, Candidates.data(), Candidates.size(), OpenParLoc);
+ SemaRef, CurrentArg, Candidates.data(), Candidates.size(), OpenParLoc,
+ Braced);
return getParamType(SemaRef, Candidates, CurrentArg);
}
-QualType Sema::ProduceCallSignatureHelp(Scope *S, Expr *Fn,
- ArrayRef<Expr *> Args,
+// Given a callee expression `Fn`, if the call is through a function pointer,
+// try to find the declaration of the corresponding function pointer type,
+// so that we can recover argument names from it.
+static FunctionProtoTypeLoc GetPrototypeLoc(Expr *Fn) {
+ TypeLoc Target;
+ if (const auto *T = Fn->getType().getTypePtr()->getAs<TypedefType>()) {
+ Target = T->getDecl()->getTypeSourceInfo()->getTypeLoc();
+
+ } else if (const auto *DR = dyn_cast<DeclRefExpr>(Fn)) {
+ const auto *D = DR->getDecl();
+ if (const auto *const VD = dyn_cast<VarDecl>(D)) {
+ Target = VD->getTypeSourceInfo()->getTypeLoc();
+ }
+ }
+
+ if (!Target)
+ return {};
+
+ // Unwrap types that may be wrapping the function type
+ while (true) {
+ if (auto P = Target.getAs<PointerTypeLoc>()) {
+ Target = P.getPointeeLoc();
+ continue;
+ }
+ if (auto A = Target.getAs<AttributedTypeLoc>()) {
+ Target = A.getModifiedLoc();
+ continue;
+ }
+ if (auto P = Target.getAs<ParenTypeLoc>()) {
+ Target = P.getInnerLoc();
+ continue;
+ }
+ break;
+ }
+
+ if (auto F = Target.getAs<FunctionProtoTypeLoc>()) {
+ return F;
+ }
+
+ return {};
+}
+
+QualType Sema::ProduceCallSignatureHelp(Expr *Fn, ArrayRef<Expr *> Args,
SourceLocation OpenParLoc) {
Fn = unwrapParenList(Fn);
if (!CodeCompleter || !Fn)
@@ -5801,6 +6255,8 @@ QualType Sema::ProduceCallSignatureHelp(Scope *S, Expr *Fn,
} else {
// Lastly we check whether expression's type is function pointer or
// function.
+
+ FunctionProtoTypeLoc P = GetPrototypeLoc(NakedFn);
QualType T = NakedFn->getType();
if (!T->getPointeeType().isNull())
T = T->getPointeeType();
@@ -5809,61 +6265,171 @@ QualType Sema::ProduceCallSignatureHelp(Scope *S, Expr *Fn,
if (!TooManyArguments(FP->getNumParams(),
ArgsWithoutDependentTypes.size(),
/*PartialOverloading=*/true) ||
- FP->isVariadic())
- Results.push_back(ResultCandidate(FP));
+ FP->isVariadic()) {
+ if (P) {
+ Results.push_back(ResultCandidate(P));
+ } else {
+ Results.push_back(ResultCandidate(FP));
+ }
+ }
} else if (auto FT = T->getAs<FunctionType>())
// No prototype and declaration, it may be a K & R style function.
Results.push_back(ResultCandidate(FT));
}
}
mergeCandidatesWithResults(*this, Results, CandidateSet, Loc, Args.size());
- QualType ParamType =
- ProduceSignatureHelp(*this, S, Results, Args.size(), OpenParLoc);
+ QualType ParamType = ProduceSignatureHelp(*this, Results, Args.size(),
+ OpenParLoc, /*Braced=*/false);
return !CandidateSet.empty() ? ParamType : QualType();
}
-QualType Sema::ProduceConstructorSignatureHelp(Scope *S, QualType Type,
+// Determine which param to continue aggregate initialization from after
+// a designated initializer.
+//
+// Given struct S { int a,b,c,d,e; }:
+// after `S{.b=1,` we want to suggest c to continue
+// after `S{.b=1, 2,` we continue with d (this is legal C and ext in C++)
+// after `S{.b=1, .a=2,` we continue with b (this is legal C and ext in C++)
+//
+// Possible outcomes:
+// - we saw a designator for a field, and continue from the returned index.
+// Only aggregate initialization is allowed.
+// - we saw a designator, but it was complex or we couldn't find the field.
+// Only aggregate initialization is possible, but we can't assist with it.
+// Returns an out-of-range index.
+// - we saw no designators, just positional arguments.
+// Returns std::nullopt.
+static std::optional<unsigned>
+getNextAggregateIndexAfterDesignatedInit(const ResultCandidate &Aggregate,
+ ArrayRef<Expr *> Args) {
+ static constexpr unsigned Invalid = std::numeric_limits<unsigned>::max();
+ assert(Aggregate.getKind() == ResultCandidate::CK_Aggregate);
+
+ // Look for designated initializers.
+ // They're in their syntactic form, not yet resolved to fields.
+ const IdentifierInfo *DesignatedFieldName = nullptr;
+ unsigned ArgsAfterDesignator = 0;
+ for (const Expr *Arg : Args) {
+ if (const auto *DIE = dyn_cast<DesignatedInitExpr>(Arg)) {
+ if (DIE->size() == 1 && DIE->getDesignator(0)->isFieldDesignator()) {
+ DesignatedFieldName = DIE->getDesignator(0)->getFieldName();
+ ArgsAfterDesignator = 0;
+ } else {
+ return Invalid; // Complicated designator.
+ }
+ } else if (isa<DesignatedInitUpdateExpr>(Arg)) {
+ return Invalid; // Unsupported.
+ } else {
+ ++ArgsAfterDesignator;
+ }
+ }
+ if (!DesignatedFieldName)
+ return std::nullopt;
+
+ // Find the index within the class's fields.
+ // (Probing getParamDecl() directly would be quadratic in number of fields).
+ unsigned DesignatedIndex = 0;
+ const FieldDecl *DesignatedField = nullptr;
+ for (const auto *Field : Aggregate.getAggregate()->fields()) {
+ if (Field->getIdentifier() == DesignatedFieldName) {
+ DesignatedField = Field;
+ break;
+ }
+ ++DesignatedIndex;
+ }
+ if (!DesignatedField)
+ return Invalid; // Designator referred to a missing field, give up.
+
+ // Find the index within the aggregate (which may have leading bases).
+ unsigned AggregateSize = Aggregate.getNumParams();
+ while (DesignatedIndex < AggregateSize &&
+ Aggregate.getParamDecl(DesignatedIndex) != DesignatedField)
+ ++DesignatedIndex;
+
+ // Continue from the index after the last named field.
+ return DesignatedIndex + ArgsAfterDesignator + 1;
+}
+
+QualType Sema::ProduceConstructorSignatureHelp(QualType Type,
SourceLocation Loc,
ArrayRef<Expr *> Args,
- SourceLocation OpenParLoc) {
+ SourceLocation OpenParLoc,
+ bool Braced) {
if (!CodeCompleter)
return QualType();
+ SmallVector<ResultCandidate, 8> Results;
// A complete type is needed to lookup for constructors.
- CXXRecordDecl *RD =
- isCompleteType(Loc, Type) ? Type->getAsCXXRecordDecl() : nullptr;
+ RecordDecl *RD =
+ isCompleteType(Loc, Type) ? Type->getAsRecordDecl() : nullptr;
if (!RD)
return Type;
+ CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD);
+
+ // Consider aggregate initialization.
+ // We don't check that types so far are correct.
+ // We also don't handle C99/C++17 brace-elision, we assume init-list elements
+ // are 1:1 with fields.
+ // FIXME: it would be nice to support "unwrapping" aggregates that contain
+ // a single subaggregate, like std::array<T, N> -> T __elements[N].
+ if (Braced && !RD->isUnion() &&
+ (!LangOpts.CPlusPlus || (CRD && CRD->isAggregate()))) {
+ ResultCandidate AggregateSig(RD);
+ unsigned AggregateSize = AggregateSig.getNumParams();
+
+ if (auto NextIndex =
+ getNextAggregateIndexAfterDesignatedInit(AggregateSig, Args)) {
+ // A designator was used, only aggregate init is possible.
+ if (*NextIndex >= AggregateSize)
+ return Type;
+ Results.push_back(AggregateSig);
+ return ProduceSignatureHelp(*this, Results, *NextIndex, OpenParLoc,
+ Braced);
+ }
+
+ // Describe aggregate initialization, but also constructors below.
+ if (Args.size() < AggregateSize)
+ Results.push_back(AggregateSig);
+ }
// FIXME: Provide support for member initializers.
// FIXME: Provide support for variadic template constructors.
- OverloadCandidateSet CandidateSet(Loc, OverloadCandidateSet::CSK_Normal);
+ if (CRD) {
+ OverloadCandidateSet CandidateSet(Loc, OverloadCandidateSet::CSK_Normal);
+ for (NamedDecl *C : LookupConstructors(CRD)) {
+ if (auto *FD = dyn_cast<FunctionDecl>(C)) {
+ // FIXME: we can't yet provide correct signature help for initializer
+ // list constructors, so skip them entirely.
+ if (Braced && LangOpts.CPlusPlus && isInitListConstructor(FD))
+ continue;
+ AddOverloadCandidate(FD, DeclAccessPair::make(FD, C->getAccess()), Args,
+ CandidateSet,
+ /*SuppressUserConversions=*/false,
+ /*PartialOverloading=*/true,
+ /*AllowExplicit*/ true);
+ } else if (auto *FTD = dyn_cast<FunctionTemplateDecl>(C)) {
+ if (Braced && LangOpts.CPlusPlus &&
+ isInitListConstructor(FTD->getTemplatedDecl()))
+ continue;
- for (NamedDecl *C : LookupConstructors(RD)) {
- if (auto *FD = dyn_cast<FunctionDecl>(C)) {
- AddOverloadCandidate(FD, DeclAccessPair::make(FD, C->getAccess()), Args,
- CandidateSet,
- /*SuppressUserConversions=*/false,
- /*PartialOverloading=*/true,
- /*AllowExplicit*/ true);
- } else if (auto *FTD = dyn_cast<FunctionTemplateDecl>(C)) {
- AddTemplateOverloadCandidate(
- FTD, DeclAccessPair::make(FTD, C->getAccess()),
- /*ExplicitTemplateArgs=*/nullptr, Args, CandidateSet,
- /*SuppressUserConversions=*/false,
- /*PartialOverloading=*/true);
+ AddTemplateOverloadCandidate(
+ FTD, DeclAccessPair::make(FTD, C->getAccess()),
+ /*ExplicitTemplateArgs=*/nullptr, Args, CandidateSet,
+ /*SuppressUserConversions=*/false,
+ /*PartialOverloading=*/true);
+ }
}
+ mergeCandidatesWithResults(*this, Results, CandidateSet, Loc, Args.size());
}
- SmallVector<ResultCandidate, 8> Results;
- mergeCandidatesWithResults(*this, Results, CandidateSet, Loc, Args.size());
- return ProduceSignatureHelp(*this, S, Results, Args.size(), OpenParLoc);
+ return ProduceSignatureHelp(*this, Results, Args.size(), OpenParLoc, Braced);
}
QualType Sema::ProduceCtorInitMemberSignatureHelp(
- Scope *S, Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy,
- ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc) {
+ Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy,
+ ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc,
+ bool Braced) {
if (!CodeCompleter)
return QualType();
@@ -5874,12 +6440,66 @@ QualType Sema::ProduceCtorInitMemberSignatureHelp(
// FIXME: Add support for Base class constructors as well.
if (ValueDecl *MemberDecl = tryLookupCtorInitMemberDecl(
Constructor->getParent(), SS, TemplateTypeTy, II))
- return ProduceConstructorSignatureHelp(getCurScope(), MemberDecl->getType(),
+ return ProduceConstructorSignatureHelp(MemberDecl->getType(),
MemberDecl->getLocation(), ArgExprs,
- OpenParLoc);
+ OpenParLoc, Braced);
return QualType();
}
+static bool argMatchesTemplateParams(const ParsedTemplateArgument &Arg,
+ unsigned Index,
+ const TemplateParameterList &Params) {
+ const NamedDecl *Param;
+ if (Index < Params.size())
+ Param = Params.getParam(Index);
+ else if (Params.hasParameterPack())
+ Param = Params.asArray().back();
+ else
+ return false; // too many args
+
+ switch (Arg.getKind()) {
+ case ParsedTemplateArgument::Type:
+ return llvm::isa<TemplateTypeParmDecl>(Param); // constraints not checked
+ case ParsedTemplateArgument::NonType:
+ return llvm::isa<NonTypeTemplateParmDecl>(Param); // type not checked
+ case ParsedTemplateArgument::Template:
+ return llvm::isa<TemplateTemplateParmDecl>(Param); // signature not checked
+ }
+ llvm_unreachable("Unhandled switch case");
+}
+
+QualType Sema::ProduceTemplateArgumentSignatureHelp(
+ TemplateTy ParsedTemplate, ArrayRef<ParsedTemplateArgument> Args,
+ SourceLocation LAngleLoc) {
+ if (!CodeCompleter || !ParsedTemplate)
+ return QualType();
+
+ SmallVector<ResultCandidate, 8> Results;
+ auto Consider = [&](const TemplateDecl *TD) {
+ // Only add if the existing args are compatible with the template.
+ bool Matches = true;
+ for (unsigned I = 0; I < Args.size(); ++I) {
+ if (!argMatchesTemplateParams(Args[I], I, *TD->getTemplateParameters())) {
+ Matches = false;
+ break;
+ }
+ }
+ if (Matches)
+ Results.emplace_back(TD);
+ };
+
+ TemplateName Template = ParsedTemplate.get();
+ if (const auto *TD = Template.getAsTemplateDecl()) {
+ Consider(TD);
+ } else if (const auto *OTS = Template.getAsOverloadedTemplate()) {
+ for (const NamedDecl *ND : *OTS)
+ if (const auto *TD = llvm::dyn_cast<TemplateDecl>(ND))
+ Consider(TD);
+ }
+ return ProduceSignatureHelp(*this, Results, Args.size(), LAngleLoc,
+ /*Braced=*/false);
+}
+
static QualType getDesignatedType(QualType BaseType, const Designation &Desig) {
for (unsigned I = 0; I < Desig.getNumDesignators(); ++I) {
if (BaseType.isNull())
@@ -5893,7 +6513,7 @@ static QualType getDesignatedType(QualType BaseType, const Designation &Desig) {
assert(D.isFieldDesignator());
auto *RD = getAsRecordDecl(BaseType);
if (RD && RD->isCompleteDefinition()) {
- for (const auto *Member : RD->lookup(D.getField()))
+ for (const auto *Member : RD->lookup(D.getFieldDecl()))
if (const FieldDecl *FD = llvm::dyn_cast<FieldDecl>(Member)) {
NextType = FD->getType();
break;
@@ -5921,7 +6541,15 @@ void Sema::CodeCompleteDesignator(QualType BaseType,
CodeCompleter->getCodeCompletionTUInfo(), CCC);
Results.EnterNewScope();
- for (const auto *FD : RD->fields()) {
+ for (const Decl *D : RD->decls()) {
+ const FieldDecl *FD;
+ if (auto *IFD = dyn_cast<IndirectFieldDecl>(D))
+ FD = IFD->getAnonField();
+ else if (auto *DFD = dyn_cast<FieldDecl>(D))
+ FD = DFD;
+ else
+ continue;
+
// FIXME: Make use of previous designators to mark any fields before those
// inaccessible, and also compute the next initializer priority.
ResultBuilder::Result Result(FD, Results.getBasePriority(FD));
@@ -6066,7 +6694,7 @@ void Sema::CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
// The "template" keyword can follow "::" in the grammar, but only
// put it into the grammar if the nested-name-specifier is dependent.
// FIXME: results is always empty, this appears to be dead.
- if (!Results.empty() && NNS->isDependent())
+ if (!Results.empty() && NNS && NNS->isDependent())
Results.AddResult("template");
// If the scope is a concept-constrained type parameter, infer nested
@@ -7050,7 +7678,8 @@ void Sema::CodeCompleteObjCPropertyGetter(Scope *S) {
Results.EnterNewScope();
VisitedSelectorSet Selectors;
- AddObjCMethods(Class, true, MK_ZeroArgSelector, None, CurContext, Selectors,
+ AddObjCMethods(Class, true, MK_ZeroArgSelector, std::nullopt, CurContext,
+ Selectors,
/*AllowSameLength=*/true, Results);
Results.ExitScope();
HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
@@ -7076,7 +7705,8 @@ void Sema::CodeCompleteObjCPropertySetter(Scope *S) {
Results.EnterNewScope();
VisitedSelectorSet Selectors;
- AddObjCMethods(Class, true, MK_OneArgSelector, None, CurContext, Selectors,
+ AddObjCMethods(Class, true, MK_OneArgSelector, std::nullopt, CurContext,
+ Selectors,
/*AllowSameLength=*/true, Results);
Results.ExitScope();
@@ -7372,7 +8002,8 @@ void Sema::CodeCompleteObjCMessageReceiver(Scope *S) {
if (Iface->getSuperClass()) {
Results.AddResult(Result("super"));
- AddSuperSendCompletion(*this, /*NeedSuperKeyword=*/true, None, Results);
+ AddSuperSendCompletion(*this, /*NeedSuperKeyword=*/true, std::nullopt,
+ Results);
}
if (getLangOpts().CPlusPlus11)
@@ -7922,6 +8553,24 @@ void Sema::CodeCompleteObjCInterfaceDecl(Scope *S) {
Results.data(), Results.size());
}
+void Sema::CodeCompleteObjCClassForwardDecl(Scope *S) {
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_ObjCClassForwardDecl);
+ Results.EnterNewScope();
+
+ if (CodeCompleter->includeGlobals()) {
+ // Add all classes.
+ AddInterfaceResults(Context.getTranslationUnitDecl(), CurContext, false,
+ false, Results);
+ }
+
+ Results.ExitScope();
+
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(), Results.size());
+}
+
void Sema::CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName,
SourceLocation ClassNameLoc) {
ResultBuilder Results(*this, CodeCompleter->getAllocator(),
@@ -8018,7 +8667,7 @@ void Sema::CodeCompleteObjCImplementationCategory(Scope *S,
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_ObjCCategoryName);
- // Add all of the categories that have have corresponding interface
+ // Add all of the categories that have corresponding interface
// declarations in this class and any of its superclasses, except for
// already-implemented categories in the class itself.
llvm::SmallPtrSet<IdentifierInfo *, 16> CategoryNames;
@@ -8182,7 +8831,7 @@ typedef llvm::DenseMap<Selector,
/// indexed by selector so they can be easily found.
static void FindImplementableMethods(ASTContext &Context,
ObjCContainerDecl *Container,
- Optional<bool> WantInstanceMethods,
+ std::optional<bool> WantInstanceMethods,
QualType ReturnType,
KnownMethodsMap &KnownMethods,
bool InOriginalClass = true) {
@@ -8699,8 +9348,8 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
if (IsInstanceMethod &&
(ReturnType.isNull() ||
(ReturnType->isObjCObjectPointerType() &&
- ReturnType->getAs<ObjCObjectPointerType>()->getInterfaceDecl() &&
- ReturnType->getAs<ObjCObjectPointerType>()
+ ReturnType->castAs<ObjCObjectPointerType>()->getInterfaceDecl() &&
+ ReturnType->castAs<ObjCObjectPointerType>()
->getInterfaceDecl()
->getName() == "NSEnumerator"))) {
std::string SelectorName = (Twine("enumeratorOf") + UpperKey).str();
@@ -8906,7 +9555,8 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
}
}
-void Sema::CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
+void Sema::CodeCompleteObjCMethodDecl(Scope *S,
+ std::optional<bool> IsInstanceMethod,
ParsedType ReturnTy) {
// Determine the return type of the method we're declaring, if
// provided.
@@ -9066,8 +9716,7 @@ void Sema::CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
IFace = Category->getClassInterface();
if (IFace)
- for (auto *Cat : IFace->visible_categories())
- Containers.push_back(Cat);
+ llvm::append_range(Containers, IFace->visible_categories());
if (IsInstanceMethod) {
for (unsigned I = 0, N = Containers.size(); I != N; ++I)
@@ -9149,7 +9798,7 @@ void Sema::CodeCompleteObjCMethodDeclSelector(
Results.ExitScope();
if (!AtParameterName && !SelIdents.empty() &&
- SelIdents.front()->getName().startswith("init")) {
+ SelIdents.front()->getName().starts_with("init")) {
for (const auto &M : PP.macros()) {
if (M.first->getName() != "NS_DESIGNATED_INITIALIZER")
continue;
@@ -9348,7 +9997,7 @@ void Sema::CodeCompletePreprocessorMacroName(bool IsDefinition) {
CodeCompleter->getCodeCompletionTUInfo(),
IsDefinition ? CodeCompletionContext::CCC_MacroName
: CodeCompletionContext::CCC_MacroNameUse);
- if (!IsDefinition && (!CodeCompleter || CodeCompleter->includeMacros())) {
+ if (!IsDefinition && CodeCompleter->includeMacros()) {
// Add just the names of macros, not their arguments.
CodeCompletionBuilder Builder(Results.getAllocator(),
Results.getCodeCompletionTUInfo());
@@ -9375,9 +10024,8 @@ void Sema::CodeCompletePreprocessorExpression() {
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_PreprocessorExpression);
- if (!CodeCompleter || CodeCompleter->includeMacros())
- AddMacroResults(PP, Results,
- !CodeCompleter || CodeCompleter->loadExternal(), true);
+ if (CodeCompleter->includeMacros())
+ AddMacroResults(PP, Results, CodeCompleter->loadExternal(), true);
// defined (<macro>)
Results.EnterNewScope();
@@ -9461,6 +10109,10 @@ void Sema::CodeCompleteIncludedFile(llvm::StringRef Dir, bool Angled) {
}
}
+ const StringRef &Dirname = llvm::sys::path::filename(Dir);
+ const bool isQt = Dirname.starts_with("Qt") || Dirname == "ActiveQt";
+ const bool ExtensionlessHeaders =
+ IsSystem || isQt || Dir.ends_with(".framework/Headers");
std::error_code EC;
unsigned Count = 0;
for (auto It = FS.dir_begin(Dir, EC);
@@ -9487,18 +10139,19 @@ void Sema::CodeCompleteIncludedFile(llvm::StringRef Dir, bool Angled) {
AddCompletion(Filename, /*IsDirectory=*/true);
break;
- case llvm::sys::fs::file_type::regular_file:
- // Only files that really look like headers. (Except in system dirs).
- if (!IsSystem) {
- // Header extensions from Types.def, which we can't depend on here.
- if (!(Filename.endswith_insensitive(".h") ||
- Filename.endswith_insensitive(".hh") ||
- Filename.endswith_insensitive(".hpp") ||
- Filename.endswith_insensitive(".inc")))
- break;
- }
+ case llvm::sys::fs::file_type::regular_file: {
+ // Only files that really look like headers. (Except in special dirs).
+ const bool IsHeader = Filename.ends_with_insensitive(".h") ||
+ Filename.ends_with_insensitive(".hh") ||
+ Filename.ends_with_insensitive(".hpp") ||
+ Filename.ends_with_insensitive(".hxx") ||
+ Filename.ends_with_insensitive(".inc") ||
+ (ExtensionlessHeaders && !Filename.contains('.'));
+ if (!IsHeader)
+ break;
AddCompletion(Filename, /*IsDirectory=*/false);
break;
+ }
default:
break;
}
@@ -9513,12 +10166,12 @@ void Sema::CodeCompleteIncludedFile(llvm::StringRef Dir, bool Angled) {
// header maps are not (currently) enumerable.
break;
case DirectoryLookup::LT_NormalDir:
- AddFilesFromIncludeDir(IncludeDir.getDir()->getName(), IsSystem,
+ AddFilesFromIncludeDir(IncludeDir.getDirRef()->getName(), IsSystem,
DirectoryLookup::LT_NormalDir);
break;
case DirectoryLookup::LT_Framework:
- AddFilesFromIncludeDir(IncludeDir.getFrameworkDir()->getName(), IsSystem,
- DirectoryLookup::LT_Framework);
+ AddFilesFromIncludeDir(IncludeDir.getFrameworkDirRef()->getName(),
+ IsSystem, DirectoryLookup::LT_Framework);
break;
}
};
@@ -9530,9 +10183,8 @@ void Sema::CodeCompleteIncludedFile(llvm::StringRef Dir, bool Angled) {
using llvm::make_range;
if (!Angled) {
// The current directory is on the include path for "quoted" includes.
- auto *CurFile = PP.getCurrentFileLexer()->getFileEntry();
- if (CurFile && CurFile->getDir())
- AddFilesFromIncludeDir(CurFile->getDir()->getName(), false,
+ if (auto CurFile = PP.getCurrentFileLexer()->getFileEntry())
+ AddFilesFromIncludeDir(CurFile->getDir().getName(), false,
DirectoryLookup::LT_NormalDir);
for (const auto &D : make_range(S.quoted_dir_begin(), S.quoted_dir_end()))
AddFilesFromDirLookup(D, false);
@@ -9558,7 +10210,7 @@ void Sema::CodeCompleteAvailabilityPlatformName() {
CodeCompletionContext::CCC_Other);
Results.EnterNewScope();
static const char *Platforms[] = {"macOS", "iOS", "watchOS", "tvOS"};
- for (const char *Platform : llvm::makeArrayRef(Platforms)) {
+ for (const char *Platform : llvm::ArrayRef(Platforms)) {
Results.AddResult(CodeCompletionResult(Platform));
Results.AddResult(CodeCompletionResult(Results.getAllocator().CopyString(
Twine(Platform) + "ApplicationExtension")));
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaConcept.cpp b/contrib/llvm-project/clang/lib/Sema/SemaConcept.cpp
index 931c9e3e2738..88fc846c89e4 100755
--- a/contrib/llvm-project/clang/lib/Sema/SemaConcept.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaConcept.cpp
@@ -1,9 +1,8 @@
//===-- SemaConcept.cpp - Semantic Analysis for Constraints and Concepts --===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -12,26 +11,32 @@
//===----------------------------------------------------------------------===//
#include "clang/Sema/SemaConcept.h"
-#include "clang/Sema/Sema.h"
-#include "clang/Sema/SemaInternal.h"
-#include "clang/Sema/SemaDiagnostic.h"
-#include "clang/Sema/TemplateDeduction.h"
-#include "clang/Sema/Template.h"
-#include "clang/Sema/Overload.h"
-#include "clang/Sema/Initialization.h"
-#include "clang/Sema/SemaInternal.h"
+#include "TreeTransform.h"
+#include "clang/AST/ASTLambda.h"
+#include "clang/AST/DeclCXX.h"
#include "clang/AST/ExprConcepts.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/Basic/OperatorPrecedence.h"
+#include "clang/Sema/EnterExpressionEvaluationContext.h"
+#include "clang/Sema/Initialization.h"
+#include "clang/Sema/Overload.h"
+#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/Sema.h"
+#include "clang/Sema/SemaDiagnostic.h"
+#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/Template.h"
+#include "clang/Sema/TemplateDeduction.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/StringExtras.h"
+#include <optional>
using namespace clang;
using namespace sema;
namespace {
class LogicalBinOp {
+ SourceLocation Loc;
OverloadedOperatorKind Op = OO_None;
const Expr *LHS = nullptr;
const Expr *RHS = nullptr;
@@ -42,12 +47,14 @@ public:
Op = BinaryOperator::getOverloadedOperator(BO->getOpcode());
LHS = BO->getLHS();
RHS = BO->getRHS();
+ Loc = BO->getExprLoc();
} else if (auto *OO = dyn_cast<CXXOperatorCallExpr>(E)) {
// If OO is not || or && it might not have exactly 2 arguments.
if (OO->getNumArgs() == 2) {
Op = OO->getOperator();
LHS = OO->getArg(0);
RHS = OO->getArg(1);
+ Loc = OO->getOperatorLoc();
}
}
}
@@ -58,6 +65,26 @@ public:
const Expr *getLHS() const { return LHS; }
const Expr *getRHS() const { return RHS; }
+
+ ExprResult recreateBinOp(Sema &SemaRef, ExprResult LHS) const {
+ return recreateBinOp(SemaRef, LHS, const_cast<Expr *>(getRHS()));
+ }
+
+ ExprResult recreateBinOp(Sema &SemaRef, ExprResult LHS,
+ ExprResult RHS) const {
+ assert((isAnd() || isOr()) && "Not the right kind of op?");
+ assert((!LHS.isInvalid() && !RHS.isInvalid()) && "not good expressions?");
+
+ if (!LHS.isUsable() || !RHS.isUsable())
+ return ExprEmpty();
+
+ // We should just be able to 'normalize' these to the builtin Binary
+ // Operator, since that is how they are evaluated in constriant checks.
+ return BinaryOperator::Create(SemaRef.Context, LHS.get(), RHS.get(),
+ BinaryOperator::getOverloadedOpcode(Op),
+ SemaRef.Context.BoolTy, VK_PRValue,
+ OK_Ordinary, Loc, FPOptionsOverride{});
+ }
};
}
@@ -81,27 +108,35 @@ bool Sema::CheckConstraintExpression(const Expr *ConstraintExpression,
QualType Type = ConstraintExpression->getType();
auto CheckForNonPrimary = [&] {
- if (PossibleNonPrimary)
- *PossibleNonPrimary =
- // We have the following case:
- // template<typename> requires func(0) struct S { };
- // The user probably isn't aware of the parentheses required around
- // the function call, and we're only going to parse 'func' as the
- // primary-expression, and complain that it is of non-bool type.
- (NextToken.is(tok::l_paren) &&
- (IsTrailingRequiresClause ||
- (Type->isDependentType() &&
- isa<UnresolvedLookupExpr>(ConstraintExpression)) ||
- Type->isFunctionType() ||
- Type->isSpecificBuiltinType(BuiltinType::Overload))) ||
- // We have the following case:
- // template<typename T> requires size_<T> == 0 struct S { };
- // The user probably isn't aware of the parentheses required around
- // the binary operator, and we're only going to parse 'func' as the
- // first operand, and complain that it is of non-bool type.
- getBinOpPrecedence(NextToken.getKind(),
- /*GreaterThanIsOperator=*/true,
- getLangOpts().CPlusPlus11) > prec::LogicalAnd;
+ if (!PossibleNonPrimary)
+ return;
+
+ *PossibleNonPrimary =
+ // We have the following case:
+ // template<typename> requires func(0) struct S { };
+ // The user probably isn't aware of the parentheses required around
+ // the function call, and we're only going to parse 'func' as the
+ // primary-expression, and complain that it is of non-bool type.
+ //
+ // However, if we're in a lambda, this might also be:
+ // []<typename> requires var () {};
+ // Which also looks like a function call due to the lambda parentheses,
+ // but unlike the first case, isn't an error, so this check is skipped.
+ (NextToken.is(tok::l_paren) &&
+ (IsTrailingRequiresClause ||
+ (Type->isDependentType() &&
+ isa<UnresolvedLookupExpr>(ConstraintExpression) &&
+ !dyn_cast_if_present<LambdaScopeInfo>(getCurFunction())) ||
+ Type->isFunctionType() ||
+ Type->isSpecificBuiltinType(BuiltinType::Overload))) ||
+ // We have the following case:
+ // template<typename T> requires size_<T> == 0 struct S { };
+ // The user probably isn't aware of the parentheses required around
+ // the binary operator, and we're only going to parse 'func' as the
+ // first operand, and complain that it is of non-bool type.
+ getBinOpPrecedence(NextToken.getKind(),
+ /*GreaterThanIsOperator=*/true,
+ getLangOpts().CPlusPlus11) > prec::LogicalAnd;
};
// An atomic constraint!
@@ -123,17 +158,39 @@ bool Sema::CheckConstraintExpression(const Expr *ConstraintExpression,
return true;
}
+namespace {
+struct SatisfactionStackRAII {
+ Sema &SemaRef;
+ bool Inserted = false;
+ SatisfactionStackRAII(Sema &SemaRef, const NamedDecl *ND,
+ const llvm::FoldingSetNodeID &FSNID)
+ : SemaRef(SemaRef) {
+ if (ND) {
+ SemaRef.PushSatisfactionStackEntry(ND, FSNID);
+ Inserted = true;
+ }
+ }
+ ~SatisfactionStackRAII() {
+ if (Inserted)
+ SemaRef.PopSatisfactionStackEntry();
+ }
+};
+} // namespace
+
template <typename AtomicEvaluator>
-static bool
+static ExprResult
calculateConstraintSatisfaction(Sema &S, const Expr *ConstraintExpr,
ConstraintSatisfaction &Satisfaction,
AtomicEvaluator &&Evaluator) {
ConstraintExpr = ConstraintExpr->IgnoreParenImpCasts();
if (LogicalBinOp BO = ConstraintExpr) {
- if (calculateConstraintSatisfaction(S, BO.getLHS(), Satisfaction,
- Evaluator))
- return true;
+ size_t EffectiveDetailEndIndex = Satisfaction.Details.size();
+ ExprResult LHSRes = calculateConstraintSatisfaction(
+ S, BO.getLHS(), Satisfaction, Evaluator);
+
+ if (LHSRes.isInvalid())
+ return ExprError();
bool IsLHSSatisfied = Satisfaction.IsSatisfied;
@@ -144,7 +201,8 @@ calculateConstraintSatisfaction(Sema &S, const Expr *ConstraintExpr,
// is checked. If that is satisfied, the disjunction is satisfied.
// Otherwise, the disjunction is satisfied if and only if the second
// operand is satisfied.
- return false;
+ // LHS is instantiated while RHS is not. Skip creating invalid BinaryOp.
+ return LHSRes;
if (BO.isAnd() && !IsLHSSatisfied)
// [temp.constr.op] p2
@@ -153,12 +211,38 @@ calculateConstraintSatisfaction(Sema &S, const Expr *ConstraintExpr,
// is checked. If that is not satisfied, the conjunction is not
// satisfied. Otherwise, the conjunction is satisfied if and only if
// the second operand is satisfied.
- return false;
+ // LHS is instantiated while RHS is not. Skip creating invalid BinaryOp.
+ return LHSRes;
- return calculateConstraintSatisfaction(
+ ExprResult RHSRes = calculateConstraintSatisfaction(
S, BO.getRHS(), Satisfaction, std::forward<AtomicEvaluator>(Evaluator));
- } else if (auto *C = dyn_cast<ExprWithCleanups>(ConstraintExpr)) {
- return calculateConstraintSatisfaction(S, C->getSubExpr(), Satisfaction,
+ if (RHSRes.isInvalid())
+ return ExprError();
+
+ bool IsRHSSatisfied = Satisfaction.IsSatisfied;
+ // Current implementation adds diagnostic information about the falsity
+ // of each false atomic constraint expression when it evaluates them.
+ // When the evaluation results to `false || true`, the information
+ // generated during the evaluation of left-hand side is meaningless
+ // because the whole expression evaluates to true.
+ // The following code removes the irrelevant diagnostic information.
+ // FIXME: We should probably delay the addition of diagnostic information
+ // until we know the entire expression is false.
+ if (BO.isOr() && IsRHSSatisfied) {
+ auto EffectiveDetailEnd = Satisfaction.Details.begin();
+ std::advance(EffectiveDetailEnd, EffectiveDetailEndIndex);
+ Satisfaction.Details.erase(EffectiveDetailEnd,
+ Satisfaction.Details.end());
+ }
+
+ return BO.recreateBinOp(S, LHSRes, RHSRes);
+ }
+
+ if (auto *C = dyn_cast<ExprWithCleanups>(ConstraintExpr)) {
+ // These aren't evaluated, so we don't care about cleanups, so we can just
+ // evaluate these as if the cleanups didn't exist.
+ return calculateConstraintSatisfaction(
+ S, C->getSubExpr(), Satisfaction,
std::forward<AtomicEvaluator>(Evaluator));
}
@@ -166,11 +250,35 @@ calculateConstraintSatisfaction(Sema &S, const Expr *ConstraintExpr,
ExprResult SubstitutedAtomicExpr = Evaluator(ConstraintExpr);
if (SubstitutedAtomicExpr.isInvalid())
- return true;
+ return ExprError();
if (!SubstitutedAtomicExpr.isUsable())
// Evaluator has decided satisfaction without yielding an expression.
- return false;
+ return ExprEmpty();
+
+ // We don't have the ability to evaluate this, since it contains a
+ // RecoveryExpr, so we want to fail overload resolution. Otherwise,
+ // we'd potentially pick up a different overload, and cause confusing
+ // diagnostics. SO, add a failure detail that will cause us to make this
+ // overload set not viable.
+ if (SubstitutedAtomicExpr.get()->containsErrors()) {
+ Satisfaction.IsSatisfied = false;
+ Satisfaction.ContainsErrors = true;
+
+ PartialDiagnostic Msg = S.PDiag(diag::note_constraint_references_error);
+ SmallString<128> DiagString;
+ DiagString = ": ";
+ Msg.EmitToString(S.getDiagnostics(), DiagString);
+ unsigned MessageSize = DiagString.size();
+ char *Mem = new (S.Context) char[MessageSize];
+ memcpy(Mem, DiagString.c_str(), MessageSize);
+ Satisfaction.Details.emplace_back(
+ ConstraintExpr,
+ new (S.Context) ConstraintSatisfaction::SubstitutionDiagnostic{
+ SubstitutedAtomicExpr.get()->getBeginLoc(),
+ StringRef(Mem, MessageSize)});
+ return SubstitutedAtomicExpr;
+ }
EnterExpressionEvaluationContext ConstantEvaluated(
S, Sema::ExpressionEvaluationContext::ConstantEvaluated);
@@ -187,7 +295,7 @@ calculateConstraintSatisfaction(Sema &S, const Expr *ConstraintExpr,
<< SubstitutedAtomicExpr.get()->getSourceRange();
for (const PartialDiagnosticAt &PDiag : EvaluationDiags)
S.Diag(PDiag.first, PDiag.second);
- return true;
+ return ExprError();
}
assert(EvalResult.Val.isInt() &&
@@ -197,17 +305,42 @@ calculateConstraintSatisfaction(Sema &S, const Expr *ConstraintExpr,
Satisfaction.Details.emplace_back(ConstraintExpr,
SubstitutedAtomicExpr.get());
+ return SubstitutedAtomicExpr;
+}
+
+static bool
+DiagRecursiveConstraintEval(Sema &S, llvm::FoldingSetNodeID &ID,
+ const NamedDecl *Templ, const Expr *E,
+ const MultiLevelTemplateArgumentList &MLTAL) {
+ E->Profile(ID, S.Context, /*Canonical=*/true);
+ for (const auto &List : MLTAL)
+ for (const auto &TemplateArg : List.Args)
+ TemplateArg.Profile(ID, S.Context);
+
+ // Note that we have to do this with our own collection, because there are
+ // times where a constraint-expression check can cause us to need to evaluate
+ // other constriants that are unrelated, such as when evaluating a recovery
+ // expression, or when trying to determine the constexpr-ness of special
+ // members. Otherwise we could just use the
+ // Sema::InstantiatingTemplate::isAlreadyBeingInstantiated function.
+ if (S.SatisfactionStackContains(Templ, ID)) {
+ S.Diag(E->getExprLoc(), diag::err_constraint_depends_on_self)
+ << const_cast<Expr *>(E) << E->getSourceRange();
+ return true;
+ }
+
return false;
}
-static bool calculateConstraintSatisfaction(
- Sema &S, const NamedDecl *Template, ArrayRef<TemplateArgument> TemplateArgs,
- SourceLocation TemplateNameLoc, MultiLevelTemplateArgumentList &MLTAL,
- const Expr *ConstraintExpr, ConstraintSatisfaction &Satisfaction) {
+static ExprResult calculateConstraintSatisfaction(
+ Sema &S, const NamedDecl *Template, SourceLocation TemplateNameLoc,
+ const MultiLevelTemplateArgumentList &MLTAL, const Expr *ConstraintExpr,
+ ConstraintSatisfaction &Satisfaction) {
return calculateConstraintSatisfaction(
S, ConstraintExpr, Satisfaction, [&](const Expr *AtomicExpr) {
EnterExpressionEvaluationContext ConstantEvaluated(
- S, Sema::ExpressionEvaluationContext::ConstantEvaluated);
+ S, Sema::ExpressionEvaluationContext::ConstantEvaluated,
+ Sema::ReuseLambdaContextDecl);
// Atomic constraint - substitute arguments and check satisfaction.
ExprResult SubstitutedExpression;
@@ -219,23 +352,28 @@ static bool calculateConstraintSatisfaction(
AtomicExpr->getSourceRange());
if (Inst.isInvalid())
return ExprError();
+
+ llvm::FoldingSetNodeID ID;
+ if (Template &&
+ DiagRecursiveConstraintEval(S, ID, Template, AtomicExpr, MLTAL)) {
+ Satisfaction.IsSatisfied = false;
+ Satisfaction.ContainsErrors = true;
+ return ExprEmpty();
+ }
+
+ SatisfactionStackRAII StackRAII(S, Template, ID);
+
// We do not want error diagnostics escaping here.
Sema::SFINAETrap Trap(S);
- SubstitutedExpression = S.SubstExpr(const_cast<Expr *>(AtomicExpr),
- MLTAL);
- // Substitution might have stripped off a contextual conversion to
- // bool if this is the operand of an '&&' or '||'. For example, we
- // might lose an lvalue-to-rvalue conversion here. If so, put it back
- // before we try to evaluate.
- if (!SubstitutedExpression.isInvalid())
- SubstitutedExpression =
- S.PerformContextuallyConvertToBool(SubstitutedExpression.get());
+ SubstitutedExpression =
+ S.SubstConstraintExpr(const_cast<Expr *>(AtomicExpr), MLTAL);
+
if (SubstitutedExpression.isInvalid() || Trap.hasErrorOccurred()) {
// C++2a [temp.constr.atomic]p1
// ...If substitution results in an invalid type or expression, the
// constraint is not satisfied.
if (!Trap.hasErrorOccurred())
- // A non-SFINAE error has occured as a result of this
+ // A non-SFINAE error has occurred as a result of this
// substitution.
return ExprError();
@@ -266,109 +404,316 @@ static bool calculateConstraintSatisfaction(
if (!S.CheckConstraintExpression(SubstitutedExpression.get()))
return ExprError();
+ // [temp.constr.atomic]p3: To determine if an atomic constraint is
+ // satisfied, the parameter mapping and template arguments are first
+ // substituted into its expression. If substitution results in an
+ // invalid type or expression, the constraint is not satisfied.
+ // Otherwise, the lvalue-to-rvalue conversion is performed if necessary,
+ // and E shall be a constant expression of type bool.
+ //
+ // Perform the L to R Value conversion if necessary. We do so for all
+ // non-PRValue categories, else we fail to extend the lifetime of
+ // temporaries, and that fails the constant expression check.
+ if (!SubstitutedExpression.get()->isPRValue())
+ SubstitutedExpression = ImplicitCastExpr::Create(
+ S.Context, SubstitutedExpression.get()->getType(),
+ CK_LValueToRValue, SubstitutedExpression.get(),
+ /*BasePath=*/nullptr, VK_PRValue, FPOptionsOverride());
+
return SubstitutedExpression;
});
}
-static bool CheckConstraintSatisfaction(Sema &S, const NamedDecl *Template,
- ArrayRef<const Expr *> ConstraintExprs,
- ArrayRef<TemplateArgument> TemplateArgs,
- SourceRange TemplateIDRange,
- ConstraintSatisfaction &Satisfaction) {
+static bool CheckConstraintSatisfaction(
+ Sema &S, const NamedDecl *Template, ArrayRef<const Expr *> ConstraintExprs,
+ llvm::SmallVectorImpl<Expr *> &Converted,
+ const MultiLevelTemplateArgumentList &TemplateArgsLists,
+ SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction) {
if (ConstraintExprs.empty()) {
Satisfaction.IsSatisfied = true;
return false;
}
- for (auto& Arg : TemplateArgs)
- if (Arg.isInstantiationDependent()) {
- // No need to check satisfaction for dependent constraint expressions.
- Satisfaction.IsSatisfied = true;
- return false;
- }
+ if (TemplateArgsLists.isAnyArgInstantiationDependent()) {
+ // No need to check satisfaction for dependent constraint expressions.
+ Satisfaction.IsSatisfied = true;
+ return false;
+ }
+ ArrayRef<TemplateArgument> TemplateArgs =
+ TemplateArgsLists.getNumSubstitutedLevels() > 0
+ ? TemplateArgsLists.getOutermost()
+ : ArrayRef<TemplateArgument> {};
Sema::InstantiatingTemplate Inst(S, TemplateIDRange.getBegin(),
Sema::InstantiatingTemplate::ConstraintsCheck{},
const_cast<NamedDecl *>(Template), TemplateArgs, TemplateIDRange);
if (Inst.isInvalid())
return true;
- MultiLevelTemplateArgumentList MLTAL;
- MLTAL.addOuterTemplateArguments(TemplateArgs);
-
for (const Expr *ConstraintExpr : ConstraintExprs) {
- if (calculateConstraintSatisfaction(S, Template, TemplateArgs,
- TemplateIDRange.getBegin(), MLTAL,
- ConstraintExpr, Satisfaction))
+ ExprResult Res = calculateConstraintSatisfaction(
+ S, Template, TemplateIDRange.getBegin(), TemplateArgsLists,
+ ConstraintExpr, Satisfaction);
+ if (Res.isInvalid())
return true;
- if (!Satisfaction.IsSatisfied)
+
+ Converted.push_back(Res.get());
+ if (!Satisfaction.IsSatisfied) {
+ // Backfill the 'converted' list with nulls so we can keep the Converted
+ // and unconverted lists in sync.
+ Converted.append(ConstraintExprs.size() - Converted.size(), nullptr);
// [temp.constr.op] p2
- // [...] To determine if a conjunction is satisfied, the satisfaction
- // of the first operand is checked. If that is not satisfied, the
- // conjunction is not satisfied. [...]
+ // [...] To determine if a conjunction is satisfied, the satisfaction
+ // of the first operand is checked. If that is not satisfied, the
+ // conjunction is not satisfied. [...]
return false;
+ }
}
return false;
}
bool Sema::CheckConstraintSatisfaction(
const NamedDecl *Template, ArrayRef<const Expr *> ConstraintExprs,
- ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange,
- ConstraintSatisfaction &OutSatisfaction) {
+ llvm::SmallVectorImpl<Expr *> &ConvertedConstraints,
+ const MultiLevelTemplateArgumentList &TemplateArgsLists,
+ SourceRange TemplateIDRange, ConstraintSatisfaction &OutSatisfaction) {
if (ConstraintExprs.empty()) {
OutSatisfaction.IsSatisfied = true;
return false;
}
+ if (!Template) {
+ return ::CheckConstraintSatisfaction(
+ *this, nullptr, ConstraintExprs, ConvertedConstraints,
+ TemplateArgsLists, TemplateIDRange, OutSatisfaction);
+ }
+
+ // A list of the template argument list flattened in a predictible manner for
+ // the purposes of caching. The ConstraintSatisfaction type is in AST so it
+ // has no access to the MultiLevelTemplateArgumentList, so this has to happen
+ // here.
+ llvm::SmallVector<TemplateArgument, 4> FlattenedArgs;
+ for (auto List : TemplateArgsLists)
+ FlattenedArgs.insert(FlattenedArgs.end(), List.Args.begin(),
+ List.Args.end());
llvm::FoldingSetNodeID ID;
+ ConstraintSatisfaction::Profile(ID, Context, Template, FlattenedArgs);
void *InsertPos;
- ConstraintSatisfaction *Satisfaction = nullptr;
- bool ShouldCache = LangOpts.ConceptSatisfactionCaching && Template;
- if (ShouldCache) {
- ConstraintSatisfaction::Profile(ID, Context, Template, TemplateArgs);
- Satisfaction = SatisfactionCache.FindNodeOrInsertPos(ID, InsertPos);
- if (Satisfaction) {
- OutSatisfaction = *Satisfaction;
- return false;
- }
- Satisfaction = new ConstraintSatisfaction(Template, TemplateArgs);
- } else {
- Satisfaction = &OutSatisfaction;
+ if (auto *Cached = SatisfactionCache.FindNodeOrInsertPos(ID, InsertPos)) {
+ OutSatisfaction = *Cached;
+ return false;
}
+
+ auto Satisfaction =
+ std::make_unique<ConstraintSatisfaction>(Template, FlattenedArgs);
if (::CheckConstraintSatisfaction(*this, Template, ConstraintExprs,
- TemplateArgs, TemplateIDRange,
- *Satisfaction)) {
- if (ShouldCache)
- delete Satisfaction;
+ ConvertedConstraints, TemplateArgsLists,
+ TemplateIDRange, *Satisfaction)) {
+ OutSatisfaction = *Satisfaction;
return true;
}
- if (ShouldCache) {
- // We cannot use InsertNode here because CheckConstraintSatisfaction might
- // have invalidated it.
- SatisfactionCache.InsertNode(Satisfaction);
- OutSatisfaction = *Satisfaction;
+ if (auto *Cached = SatisfactionCache.FindNodeOrInsertPos(ID, InsertPos)) {
+ // The evaluation of this constraint resulted in us trying to re-evaluate it
+ // recursively. This isn't really possible, except we try to form a
+ // RecoveryExpr as a part of the evaluation. If this is the case, just
+ // return the 'cached' version (which will have the same result), and save
+ // ourselves the extra-insert. If it ever becomes possible to legitimately
+ // recursively check a constraint, we should skip checking the 'inner' one
+ // above, and replace the cached version with this one, as it would be more
+ // specific.
+ OutSatisfaction = *Cached;
+ return false;
}
+
+ // Else we can simply add this satisfaction to the list.
+ OutSatisfaction = *Satisfaction;
+ // We cannot use InsertPos here because CheckConstraintSatisfaction might have
+ // invalidated it.
+ // Note that entries of SatisfactionCache are deleted in Sema's destructor.
+ SatisfactionCache.InsertNode(Satisfaction.release());
return false;
}
bool Sema::CheckConstraintSatisfaction(const Expr *ConstraintExpr,
ConstraintSatisfaction &Satisfaction) {
return calculateConstraintSatisfaction(
- *this, ConstraintExpr, Satisfaction,
- [](const Expr *AtomicExpr) -> ExprResult {
- return ExprResult(const_cast<Expr *>(AtomicExpr));
- });
+ *this, ConstraintExpr, Satisfaction,
+ [this](const Expr *AtomicExpr) -> ExprResult {
+ // We only do this to immitate lvalue-to-rvalue conversion.
+ return PerformContextuallyConvertToBool(
+ const_cast<Expr *>(AtomicExpr));
+ })
+ .isInvalid();
+}
+
+bool Sema::addInstantiatedCapturesToScope(
+ FunctionDecl *Function, const FunctionDecl *PatternDecl,
+ LocalInstantiationScope &Scope,
+ const MultiLevelTemplateArgumentList &TemplateArgs) {
+ const auto *LambdaClass = cast<CXXMethodDecl>(Function)->getParent();
+ const auto *LambdaPattern = cast<CXXMethodDecl>(PatternDecl)->getParent();
+
+ unsigned Instantiated = 0;
+
+ auto AddSingleCapture = [&](const ValueDecl *CapturedPattern,
+ unsigned Index) {
+ ValueDecl *CapturedVar = LambdaClass->getCapture(Index)->getCapturedVar();
+ if (CapturedVar->isInitCapture())
+ Scope.InstantiatedLocal(CapturedPattern, CapturedVar);
+ };
+
+ for (const LambdaCapture &CapturePattern : LambdaPattern->captures()) {
+ if (!CapturePattern.capturesVariable()) {
+ Instantiated++;
+ continue;
+ }
+ const ValueDecl *CapturedPattern = CapturePattern.getCapturedVar();
+ if (!CapturedPattern->isParameterPack()) {
+ AddSingleCapture(CapturedPattern, Instantiated++);
+ } else {
+ Scope.MakeInstantiatedLocalArgPack(CapturedPattern);
+ std::optional<unsigned> NumArgumentsInExpansion =
+ getNumArgumentsInExpansion(CapturedPattern->getType(), TemplateArgs);
+ if (!NumArgumentsInExpansion)
+ continue;
+ for (unsigned Arg = 0; Arg < *NumArgumentsInExpansion; ++Arg)
+ AddSingleCapture(CapturedPattern, Instantiated++);
+ }
+ }
+ return false;
+}
+
+bool Sema::SetupConstraintScope(
+ FunctionDecl *FD, std::optional<ArrayRef<TemplateArgument>> TemplateArgs,
+ MultiLevelTemplateArgumentList MLTAL, LocalInstantiationScope &Scope) {
+ if (FD->isTemplateInstantiation() && FD->getPrimaryTemplate()) {
+ FunctionTemplateDecl *PrimaryTemplate = FD->getPrimaryTemplate();
+ InstantiatingTemplate Inst(
+ *this, FD->getPointOfInstantiation(),
+ Sema::InstantiatingTemplate::ConstraintsCheck{}, PrimaryTemplate,
+ TemplateArgs ? *TemplateArgs : ArrayRef<TemplateArgument>{},
+ SourceRange());
+ if (Inst.isInvalid())
+ return true;
+
+ // addInstantiatedParametersToScope creates a map of 'uninstantiated' to
+ // 'instantiated' parameters and adds it to the context. For the case where
+ // this function is a template being instantiated NOW, we also need to add
+ // the list of current template arguments to the list so that they also can
+ // be picked out of the map.
+ if (auto *SpecArgs = FD->getTemplateSpecializationArgs()) {
+ MultiLevelTemplateArgumentList JustTemplArgs(FD, SpecArgs->asArray(),
+ /*Final=*/false);
+ if (addInstantiatedParametersToScope(
+ FD, PrimaryTemplate->getTemplatedDecl(), Scope, JustTemplArgs))
+ return true;
+ }
+
+ // If this is a member function, make sure we get the parameters that
+ // reference the original primary template.
+ // We walk up the instantiated template chain so that nested lambdas get
+ // handled properly.
+ for (FunctionTemplateDecl *FromMemTempl =
+ PrimaryTemplate->getInstantiatedFromMemberTemplate();
+ FromMemTempl;
+ FromMemTempl = FromMemTempl->getInstantiatedFromMemberTemplate()) {
+ if (addInstantiatedParametersToScope(FD, FromMemTempl->getTemplatedDecl(),
+ Scope, MLTAL))
+ return true;
+ }
+
+ return false;
+ }
+
+ if (FD->getTemplatedKind() == FunctionDecl::TK_MemberSpecialization ||
+ FD->getTemplatedKind() == FunctionDecl::TK_DependentNonTemplate) {
+ FunctionDecl *InstantiatedFrom =
+ FD->getTemplatedKind() == FunctionDecl::TK_MemberSpecialization
+ ? FD->getInstantiatedFromMemberFunction()
+ : FD->getInstantiatedFromDecl();
+
+ InstantiatingTemplate Inst(
+ *this, FD->getPointOfInstantiation(),
+ Sema::InstantiatingTemplate::ConstraintsCheck{}, InstantiatedFrom,
+ TemplateArgs ? *TemplateArgs : ArrayRef<TemplateArgument>{},
+ SourceRange());
+ if (Inst.isInvalid())
+ return true;
+
+ // Case where this was not a template, but instantiated as a
+ // child-function.
+ if (addInstantiatedParametersToScope(FD, InstantiatedFrom, Scope, MLTAL))
+ return true;
+ }
+
+ return false;
+}
+
+// This function collects all of the template arguments for the purposes of
+// constraint-instantiation and checking.
+std::optional<MultiLevelTemplateArgumentList>
+Sema::SetupConstraintCheckingTemplateArgumentsAndScope(
+ FunctionDecl *FD, std::optional<ArrayRef<TemplateArgument>> TemplateArgs,
+ LocalInstantiationScope &Scope) {
+ MultiLevelTemplateArgumentList MLTAL;
+
+ // Collect the list of template arguments relative to the 'primary' template.
+ // We need the entire list, since the constraint is completely uninstantiated
+ // at this point.
+ MLTAL = getTemplateInstantiationArgs(FD, FD->getLexicalDeclContext(),
+ /*Final=*/false, /*Innermost=*/nullptr,
+ /*RelativeToPrimary=*/true,
+ /*Pattern=*/nullptr,
+ /*ForConstraintInstantiation=*/true);
+ if (SetupConstraintScope(FD, TemplateArgs, MLTAL, Scope))
+ return std::nullopt;
+
+ return MLTAL;
}
bool Sema::CheckFunctionConstraints(const FunctionDecl *FD,
ConstraintSatisfaction &Satisfaction,
- SourceLocation UsageLoc) {
- const Expr *RC = FD->getTrailingRequiresClause();
- if (RC->isInstantiationDependent()) {
+ SourceLocation UsageLoc,
+ bool ForOverloadResolution) {
+ // Don't check constraints if the function is dependent. Also don't check if
+ // this is a function template specialization, as the call to
+ // CheckinstantiatedFunctionTemplateConstraints after this will check it
+ // better.
+ if (FD->isDependentContext() ||
+ FD->getTemplatedKind() ==
+ FunctionDecl::TK_FunctionTemplateSpecialization) {
Satisfaction.IsSatisfied = true;
return false;
}
+
+ // A lambda conversion operator has the same constraints as the call operator
+ // and constraints checking relies on whether we are in a lambda call operator
+ // (and may refer to its parameters), so check the call operator instead.
+ if (const auto *MD = dyn_cast<CXXConversionDecl>(FD);
+ MD && isLambdaConversionOperator(const_cast<CXXConversionDecl *>(MD)))
+ return CheckFunctionConstraints(MD->getParent()->getLambdaCallOperator(),
+ Satisfaction, UsageLoc,
+ ForOverloadResolution);
+
+ DeclContext *CtxToSave = const_cast<FunctionDecl *>(FD);
+
+ while (isLambdaCallOperator(CtxToSave) || FD->isTransparentContext()) {
+ if (isLambdaCallOperator(CtxToSave))
+ CtxToSave = CtxToSave->getParent()->getParent();
+ else
+ CtxToSave = CtxToSave->getNonTransparentContext();
+ }
+
+ ContextRAII SavedContext{*this, CtxToSave};
+ LocalInstantiationScope Scope(*this, !ForOverloadResolution);
+ std::optional<MultiLevelTemplateArgumentList> MLTAL =
+ SetupConstraintCheckingTemplateArgumentsAndScope(
+ const_cast<FunctionDecl *>(FD), {}, Scope);
+
+ if (!MLTAL)
+ return true;
+
Qualifiers ThisQuals;
CXXRecordDecl *Record = nullptr;
if (auto *Method = dyn_cast<CXXMethodDecl>(FD)) {
@@ -376,22 +721,149 @@ bool Sema::CheckFunctionConstraints(const FunctionDecl *FD,
Record = const_cast<CXXRecordDecl *>(Method->getParent());
}
CXXThisScopeRAII ThisScope(*this, Record, ThisQuals, Record != nullptr);
- // We substitute with empty arguments in order to rebuild the atomic
- // constraint in a constant-evaluated context.
- // FIXME: Should this be a dedicated TreeTransform?
+
+ LambdaScopeForCallOperatorInstantiationRAII LambdaScope(
+ *this, const_cast<FunctionDecl *>(FD), *MLTAL, Scope,
+ ForOverloadResolution);
+
return CheckConstraintSatisfaction(
- FD, {RC}, /*TemplateArgs=*/{},
+ FD, {FD->getTrailingRequiresClause()}, *MLTAL,
SourceRange(UsageLoc.isValid() ? UsageLoc : FD->getLocation()),
Satisfaction);
}
+
+// Figure out the to-translation-unit depth for this function declaration for
+// the purpose of seeing if they differ by constraints. This isn't the same as
+// getTemplateDepth, because it includes already instantiated parents.
+static unsigned
+CalculateTemplateDepthForConstraints(Sema &S, const NamedDecl *ND,
+ bool SkipForSpecialization = false) {
+ MultiLevelTemplateArgumentList MLTAL = S.getTemplateInstantiationArgs(
+ ND, ND->getLexicalDeclContext(), /*Final=*/false, /*Innermost=*/nullptr,
+ /*RelativeToPrimary=*/true,
+ /*Pattern=*/nullptr,
+ /*ForConstraintInstantiation=*/true, SkipForSpecialization);
+ return MLTAL.getNumLevels();
+}
+
+namespace {
+ class AdjustConstraintDepth : public TreeTransform<AdjustConstraintDepth> {
+ unsigned TemplateDepth = 0;
+ public:
+ using inherited = TreeTransform<AdjustConstraintDepth>;
+ AdjustConstraintDepth(Sema &SemaRef, unsigned TemplateDepth)
+ : inherited(SemaRef), TemplateDepth(TemplateDepth) {}
+
+ using inherited::TransformTemplateTypeParmType;
+ QualType TransformTemplateTypeParmType(TypeLocBuilder &TLB,
+ TemplateTypeParmTypeLoc TL, bool) {
+ const TemplateTypeParmType *T = TL.getTypePtr();
+
+ TemplateTypeParmDecl *NewTTPDecl = nullptr;
+ if (TemplateTypeParmDecl *OldTTPDecl = T->getDecl())
+ NewTTPDecl = cast_or_null<TemplateTypeParmDecl>(
+ TransformDecl(TL.getNameLoc(), OldTTPDecl));
+
+ QualType Result = getSema().Context.getTemplateTypeParmType(
+ T->getDepth() + TemplateDepth, T->getIndex(), T->isParameterPack(),
+ NewTTPDecl);
+ TemplateTypeParmTypeLoc NewTL = TLB.push<TemplateTypeParmTypeLoc>(Result);
+ NewTL.setNameLoc(TL.getNameLoc());
+ return Result;
+ }
+ };
+} // namespace
+
+static const Expr *SubstituteConstraintExpressionWithoutSatisfaction(
+ Sema &S, const Sema::TemplateCompareNewDeclInfo &DeclInfo,
+ const Expr *ConstrExpr) {
+ MultiLevelTemplateArgumentList MLTAL = S.getTemplateInstantiationArgs(
+ DeclInfo.getDecl(), DeclInfo.getLexicalDeclContext(), /*Final=*/false,
+ /*Innermost=*/nullptr,
+ /*RelativeToPrimary=*/true,
+ /*Pattern=*/nullptr, /*ForConstraintInstantiation=*/true,
+ /*SkipForSpecialization*/ false);
+
+ if (MLTAL.getNumSubstitutedLevels() == 0)
+ return ConstrExpr;
+
+ Sema::SFINAETrap SFINAE(S, /*AccessCheckingSFINAE=*/false);
+
+ Sema::InstantiatingTemplate Inst(
+ S, DeclInfo.getLocation(),
+ Sema::InstantiatingTemplate::ConstraintNormalization{},
+ const_cast<NamedDecl *>(DeclInfo.getDecl()), SourceRange{});
+ if (Inst.isInvalid())
+ return nullptr;
+
+ std::optional<Sema::CXXThisScopeRAII> ThisScope;
+ if (auto *RD = dyn_cast<CXXRecordDecl>(DeclInfo.getDeclContext()))
+ ThisScope.emplace(S, const_cast<CXXRecordDecl *>(RD), Qualifiers());
+ ExprResult SubstConstr = S.SubstConstraintExprWithoutSatisfaction(
+ const_cast<clang::Expr *>(ConstrExpr), MLTAL);
+ if (SFINAE.hasErrorOccurred() || !SubstConstr.isUsable())
+ return nullptr;
+ return SubstConstr.get();
+}
+
+bool Sema::AreConstraintExpressionsEqual(const NamedDecl *Old,
+ const Expr *OldConstr,
+ const TemplateCompareNewDeclInfo &New,
+ const Expr *NewConstr) {
+ if (OldConstr == NewConstr)
+ return true;
+ // C++ [temp.constr.decl]p4
+ if (Old && !New.isInvalid() && !New.ContainsDecl(Old) &&
+ Old->getLexicalDeclContext() != New.getLexicalDeclContext()) {
+ if (const Expr *SubstConstr =
+ SubstituteConstraintExpressionWithoutSatisfaction(*this, Old,
+ OldConstr))
+ OldConstr = SubstConstr;
+ else
+ return false;
+ if (const Expr *SubstConstr =
+ SubstituteConstraintExpressionWithoutSatisfaction(*this, New,
+ NewConstr))
+ NewConstr = SubstConstr;
+ else
+ return false;
+ }
+
+ llvm::FoldingSetNodeID ID1, ID2;
+ OldConstr->Profile(ID1, Context, /*Canonical=*/true);
+ NewConstr->Profile(ID2, Context, /*Canonical=*/true);
+ return ID1 == ID2;
+}
+
+bool Sema::FriendConstraintsDependOnEnclosingTemplate(const FunctionDecl *FD) {
+ assert(FD->getFriendObjectKind() && "Must be a friend!");
+
+ // The logic for non-templates is handled in ASTContext::isSameEntity, so we
+ // don't have to bother checking 'DependsOnEnclosingTemplate' for a
+ // non-function-template.
+ assert(FD->getDescribedFunctionTemplate() &&
+ "Non-function templates don't need to be checked");
+
+ SmallVector<const Expr *, 3> ACs;
+ FD->getDescribedFunctionTemplate()->getAssociatedConstraints(ACs);
+
+ unsigned OldTemplateDepth = CalculateTemplateDepthForConstraints(*this, FD);
+ for (const Expr *Constraint : ACs)
+ if (ConstraintExpressionDependsOnEnclosingTemplate(FD, OldTemplateDepth,
+ Constraint))
+ return true;
+
+ return false;
+}
+
bool Sema::EnsureTemplateArgumentListConstraints(
- TemplateDecl *TD, ArrayRef<TemplateArgument> TemplateArgs,
+ TemplateDecl *TD, const MultiLevelTemplateArgumentList &TemplateArgsLists,
SourceRange TemplateIDRange) {
ConstraintSatisfaction Satisfaction;
llvm::SmallVector<const Expr *, 3> AssociatedConstraints;
TD->getAssociatedConstraints(AssociatedConstraints);
- if (CheckConstraintSatisfaction(TD, AssociatedConstraints, TemplateArgs,
+ if (CheckConstraintSatisfaction(TD, AssociatedConstraints, TemplateArgsLists,
TemplateIDRange, Satisfaction))
return true;
@@ -399,7 +871,8 @@ bool Sema::EnsureTemplateArgumentListConstraints(
SmallString<128> TemplateArgString;
TemplateArgString = " ";
TemplateArgString += getTemplateArgumentBindingsText(
- TD->getTemplateParameters(), TemplateArgs.data(), TemplateArgs.size());
+ TD->getTemplateParameters(), TemplateArgsLists.getInnermost().data(),
+ TemplateArgsLists.getInnermost().size());
Diag(TemplateIDRange.getBegin(),
diag::err_template_arg_list_constraints_not_satisfied)
@@ -411,6 +884,49 @@ bool Sema::EnsureTemplateArgumentListConstraints(
return false;
}
+bool Sema::CheckInstantiatedFunctionTemplateConstraints(
+ SourceLocation PointOfInstantiation, FunctionDecl *Decl,
+ ArrayRef<TemplateArgument> TemplateArgs,
+ ConstraintSatisfaction &Satisfaction) {
+ // In most cases we're not going to have constraints, so check for that first.
+ FunctionTemplateDecl *Template = Decl->getPrimaryTemplate();
+ // Note - code synthesis context for the constraints check is created
+ // inside CheckConstraintsSatisfaction.
+ SmallVector<const Expr *, 3> TemplateAC;
+ Template->getAssociatedConstraints(TemplateAC);
+ if (TemplateAC.empty()) {
+ Satisfaction.IsSatisfied = true;
+ return false;
+ }
+
+ // Enter the scope of this instantiation. We don't use
+ // PushDeclContext because we don't have a scope.
+ Sema::ContextRAII savedContext(*this, Decl);
+ LocalInstantiationScope Scope(*this);
+
+ std::optional<MultiLevelTemplateArgumentList> MLTAL =
+ SetupConstraintCheckingTemplateArgumentsAndScope(Decl, TemplateArgs,
+ Scope);
+
+ if (!MLTAL)
+ return true;
+
+ Qualifiers ThisQuals;
+ CXXRecordDecl *Record = nullptr;
+ if (auto *Method = dyn_cast<CXXMethodDecl>(Decl)) {
+ ThisQuals = Method->getMethodQualifiers();
+ Record = Method->getParent();
+ }
+
+ CXXThisScopeRAII ThisScope(*this, Record, ThisQuals, Record != nullptr);
+ LambdaScopeForCallOperatorInstantiationRAII LambdaScope(
+ *this, const_cast<FunctionDecl *>(Decl), *MLTAL, Scope);
+
+ llvm::SmallVector<Expr *, 1> Converted;
+ return CheckConstraintSatisfaction(Template, TemplateAC, Converted, *MLTAL,
+ PointOfInstantiation, Satisfaction);
+}
+
static void diagnoseUnsatisfiedRequirement(Sema &S,
concepts::ExprRequirement *Req,
bool First) {
@@ -461,7 +977,7 @@ static void diagnoseUnsatisfiedRequirement(Sema &S,
Expr *e = Req->getExpr();
S.Diag(e->getBeginLoc(),
diag::note_expr_requirement_constraints_not_satisfied_simple)
- << (int)First << S.getDecltypeForParenthesizedExpr(e)
+ << (int)First << S.Context.getReferenceQualifiedType(e)
<< ConstraintExpr->getNamedConcept();
} else {
S.Diag(ConstraintExpr->getBeginLoc(),
@@ -502,31 +1018,28 @@ static void diagnoseUnsatisfiedRequirement(Sema &S,
return;
}
}
+static void diagnoseWellFormedUnsatisfiedConstraintExpr(Sema &S,
+ Expr *SubstExpr,
+ bool First = true);
static void diagnoseUnsatisfiedRequirement(Sema &S,
concepts::NestedRequirement *Req,
bool First) {
- if (Req->isSubstitutionFailure()) {
- concepts::Requirement::SubstitutionDiagnostic *SubstDiag =
- Req->getSubstitutionDiagnostic();
- if (!SubstDiag->DiagMessage.empty())
- S.Diag(SubstDiag->DiagLoc,
- diag::note_nested_requirement_substitution_error)
- << (int)First << SubstDiag->SubstitutedEntity
- << SubstDiag->DiagMessage;
+ using SubstitutionDiagnostic = std::pair<SourceLocation, StringRef>;
+ for (auto &Pair : Req->getConstraintSatisfaction()) {
+ if (auto *SubstDiag = Pair.second.dyn_cast<SubstitutionDiagnostic *>())
+ S.Diag(SubstDiag->first, diag::note_nested_requirement_substitution_error)
+ << (int)First << Req->getInvalidConstraintEntity() << SubstDiag->second;
else
- S.Diag(SubstDiag->DiagLoc,
- diag::note_nested_requirement_unknown_substitution_error)
- << (int)First << SubstDiag->SubstitutedEntity;
- return;
+ diagnoseWellFormedUnsatisfiedConstraintExpr(
+ S, Pair.second.dyn_cast<Expr *>(), First);
+ First = false;
}
- S.DiagnoseUnsatisfiedConstraint(Req->getConstraintSatisfaction(), First);
}
-
static void diagnoseWellFormedUnsatisfiedConstraintExpr(Sema &S,
Expr *SubstExpr,
- bool First = true) {
+ bool First) {
SubstExpr = SubstExpr->IgnoreParenImpCasts();
if (BinaryOperator *BO = dyn_cast<BinaryOperator>(SubstExpr)) {
switch (BO->getOpcode()) {
@@ -606,6 +1119,7 @@ static void diagnoseWellFormedUnsatisfiedConstraintExpr(Sema &S,
S.DiagnoseUnsatisfiedConstraint(CSE->getSatisfaction());
return;
} else if (auto *RE = dyn_cast<RequiresExpr>(SubstExpr)) {
+ // FIXME: RequiresExpr should store dependent diagnostics.
for (concepts::Requirement *Req : RE->getRequirements())
if (!Req->isDependent() && !Req->isSatisfied()) {
if (auto *E = dyn_cast<concepts::ExprRequirement>(Req))
@@ -665,6 +1179,11 @@ void Sema::DiagnoseUnsatisfiedConstraint(
const NormalizedConstraint *
Sema::getNormalizedAssociatedConstraints(
NamedDecl *ConstrainedDecl, ArrayRef<const Expr *> AssociatedConstraints) {
+ // In case the ConstrainedDecl comes from modules, it is necessary to use
+ // the canonical decl to avoid different atomic constraints with the 'same'
+ // declarations.
+ ConstrainedDecl = cast<NamedDecl>(ConstrainedDecl->getCanonicalDecl());
+
auto CacheEntry = NormalizationCache.find(ConstrainedDecl);
if (CacheEntry == NormalizationCache.end()) {
auto Normalized =
@@ -682,34 +1201,33 @@ Sema::getNormalizedAssociatedConstraints(
return CacheEntry->second;
}
-static bool substituteParameterMappings(Sema &S, NormalizedConstraint &N,
- ConceptDecl *Concept, ArrayRef<TemplateArgument> TemplateArgs,
- const ASTTemplateArgumentListInfo *ArgsAsWritten) {
+static bool
+substituteParameterMappings(Sema &S, NormalizedConstraint &N,
+ ConceptDecl *Concept,
+ const MultiLevelTemplateArgumentList &MLTAL,
+ const ASTTemplateArgumentListInfo *ArgsAsWritten) {
if (!N.isAtomic()) {
- if (substituteParameterMappings(S, N.getLHS(), Concept, TemplateArgs,
+ if (substituteParameterMappings(S, N.getLHS(), Concept, MLTAL,
ArgsAsWritten))
return true;
- return substituteParameterMappings(S, N.getRHS(), Concept, TemplateArgs,
+ return substituteParameterMappings(S, N.getRHS(), Concept, MLTAL,
ArgsAsWritten);
}
TemplateParameterList *TemplateParams = Concept->getTemplateParameters();
AtomicConstraint &Atomic = *N.getAtomicConstraint();
TemplateArgumentListInfo SubstArgs;
- MultiLevelTemplateArgumentList MLTAL;
- MLTAL.addOuterTemplateArguments(TemplateArgs);
if (!Atomic.ParameterMapping) {
llvm::SmallBitVector OccurringIndices(TemplateParams->size());
S.MarkUsedTemplateParameters(Atomic.ConstraintExpr, /*OnlyDeduced=*/false,
/*Depth=*/0, OccurringIndices);
- Atomic.ParameterMapping.emplace(
- MutableArrayRef<TemplateArgumentLoc>(
- new (S.Context) TemplateArgumentLoc[OccurringIndices.count()],
- OccurringIndices.count()));
+ TemplateArgumentLoc *TempArgs =
+ new (S.Context) TemplateArgumentLoc[OccurringIndices.count()];
for (unsigned I = 0, J = 0, C = TemplateParams->size(); I != C; ++I)
if (OccurringIndices[I])
- new (&(*Atomic.ParameterMapping)[J++]) TemplateArgumentLoc(
- S.getIdentityTemplateArgumentLoc(TemplateParams->begin()[I],
+ new (&(TempArgs)[J++])
+ TemplateArgumentLoc(S.getIdentityTemplateArgumentLoc(
+ TemplateParams->begin()[I],
// Here we assume we do not support things like
// template<typename A, typename B>
// concept C = ...;
@@ -718,44 +1236,59 @@ static bool substituteParameterMappings(Sema &S, NormalizedConstraint &N,
// struct S { };
// The above currently yields a diagnostic.
// We still might have default arguments for concept parameters.
- ArgsAsWritten->NumTemplateArgs > I ?
- ArgsAsWritten->arguments()[I].getLocation() :
- SourceLocation()));
+ ArgsAsWritten->NumTemplateArgs > I
+ ? ArgsAsWritten->arguments()[I].getLocation()
+ : SourceLocation()));
+ Atomic.ParameterMapping.emplace(TempArgs, OccurringIndices.count());
}
Sema::InstantiatingTemplate Inst(
S, ArgsAsWritten->arguments().front().getSourceRange().getBegin(),
Sema::InstantiatingTemplate::ParameterMappingSubstitution{}, Concept,
- SourceRange(ArgsAsWritten->arguments()[0].getSourceRange().getBegin(),
- ArgsAsWritten->arguments().back().getSourceRange().getEnd()));
+ ArgsAsWritten->arguments().front().getSourceRange());
if (S.SubstTemplateArguments(*Atomic.ParameterMapping, MLTAL, SubstArgs))
return true;
- Atomic.ParameterMapping.emplace(
- MutableArrayRef<TemplateArgumentLoc>(
- new (S.Context) TemplateArgumentLoc[SubstArgs.size()],
- SubstArgs.size()));
+
+ TemplateArgumentLoc *TempArgs =
+ new (S.Context) TemplateArgumentLoc[SubstArgs.size()];
std::copy(SubstArgs.arguments().begin(), SubstArgs.arguments().end(),
- N.getAtomicConstraint()->ParameterMapping->begin());
+ TempArgs);
+ Atomic.ParameterMapping.emplace(TempArgs, SubstArgs.size());
return false;
}
-Optional<NormalizedConstraint>
+static bool substituteParameterMappings(Sema &S, NormalizedConstraint &N,
+ const ConceptSpecializationExpr *CSE) {
+ TemplateArgumentList TAL{TemplateArgumentList::OnStack,
+ CSE->getTemplateArguments()};
+ MultiLevelTemplateArgumentList MLTAL = S.getTemplateInstantiationArgs(
+ CSE->getNamedConcept(), CSE->getNamedConcept()->getLexicalDeclContext(),
+ /*Final=*/false, &TAL,
+ /*RelativeToPrimary=*/true,
+ /*Pattern=*/nullptr,
+ /*ForConstraintInstantiation=*/true);
+
+ return substituteParameterMappings(S, N, CSE->getNamedConcept(), MLTAL,
+ CSE->getTemplateArgsAsWritten());
+}
+
+std::optional<NormalizedConstraint>
NormalizedConstraint::fromConstraintExprs(Sema &S, NamedDecl *D,
ArrayRef<const Expr *> E) {
assert(E.size() != 0);
auto Conjunction = fromConstraintExpr(S, D, E[0]);
if (!Conjunction)
- return None;
+ return std::nullopt;
for (unsigned I = 1; I < E.size(); ++I) {
auto Next = fromConstraintExpr(S, D, E[I]);
if (!Next)
- return None;
+ return std::nullopt;
*Conjunction = NormalizedConstraint(S.Context, std::move(*Conjunction),
std::move(*Next), CCK_Conjunction);
}
return Conjunction;
}
-llvm::Optional<NormalizedConstraint>
+std::optional<NormalizedConstraint>
NormalizedConstraint::fromConstraintExpr(Sema &S, NamedDecl *D, const Expr *E) {
assert(E != nullptr);
@@ -764,13 +1297,19 @@ NormalizedConstraint::fromConstraintExpr(Sema &S, NamedDecl *D, const Expr *E) {
// - The normal form of an expression (E) is the normal form of E.
// [...]
E = E->IgnoreParenImpCasts();
+
+ // C++2a [temp.param]p4:
+ // [...] If T is not a pack, then E is E', otherwise E is (E' && ...).
+ // Fold expression is considered atomic constraints per current wording.
+ // See http://cplusplus.github.io/concepts-ts/ts-active.html#28
+
if (LogicalBinOp BO = E) {
auto LHS = fromConstraintExpr(S, D, BO.getLHS());
if (!LHS)
- return None;
+ return std::nullopt;
auto RHS = fromConstraintExpr(S, D, BO.getRHS());
if (!RHS)
- return None;
+ return std::nullopt;
return NormalizedConstraint(S.Context, std::move(*LHS), std::move(*RHS),
BO.isAnd() ? CCK_Conjunction : CCK_Disjunction);
@@ -794,16 +1333,14 @@ NormalizedConstraint::fromConstraintExpr(Sema &S, NamedDecl *D, const Expr *E) {
SubNF = S.getNormalizedAssociatedConstraints(CD,
{CD->getConstraintExpr()});
if (!SubNF)
- return None;
+ return std::nullopt;
}
- Optional<NormalizedConstraint> New;
+ std::optional<NormalizedConstraint> New;
New.emplace(S.Context, *SubNF);
- if (substituteParameterMappings(
- S, *New, CSE->getNamedConcept(),
- CSE->getTemplateArguments(), CSE->getTemplateArgsAsWritten()))
- return None;
+ if (substituteParameterMappings(S, *New, CSE))
+ return std::nullopt;
return New;
}
@@ -873,7 +1410,7 @@ static NormalForm makeDNF(const NormalizedConstraint &Normalized) {
}
template<typename AtomicSubsumptionEvaluator>
-static bool subsumes(NormalForm PDNF, NormalForm QCNF,
+static bool subsumes(const NormalForm &PDNF, const NormalForm &QCNF,
AtomicSubsumptionEvaluator E) {
// C++ [temp.constr.order] p2
// Then, P subsumes Q if and only if, for every disjunctive clause Pi in the
@@ -926,9 +1463,26 @@ static bool subsumes(Sema &S, NamedDecl *DP, ArrayRef<const Expr *> P,
return false;
}
-bool Sema::IsAtLeastAsConstrained(NamedDecl *D1, ArrayRef<const Expr *> AC1,
- NamedDecl *D2, ArrayRef<const Expr *> AC2,
+bool Sema::IsAtLeastAsConstrained(NamedDecl *D1,
+ MutableArrayRef<const Expr *> AC1,
+ NamedDecl *D2,
+ MutableArrayRef<const Expr *> AC2,
bool &Result) {
+ if (const auto *FD1 = dyn_cast<FunctionDecl>(D1)) {
+ auto IsExpectedEntity = [](const FunctionDecl *FD) {
+ FunctionDecl::TemplatedKind Kind = FD->getTemplatedKind();
+ return Kind == FunctionDecl::TK_NonTemplate ||
+ Kind == FunctionDecl::TK_FunctionTemplate;
+ };
+ const auto *FD2 = dyn_cast<FunctionDecl>(D2);
+ (void)IsExpectedEntity;
+ (void)FD1;
+ (void)FD2;
+ assert(IsExpectedEntity(FD1) && FD2 && IsExpectedEntity(FD2) &&
+ "use non-instantiated function declaration for constraints partial "
+ "ordering");
+ }
+
if (AC1.empty()) {
Result = AC2.empty();
return false;
@@ -946,6 +1500,21 @@ bool Sema::IsAtLeastAsConstrained(NamedDecl *D1, ArrayRef<const Expr *> AC1,
return false;
}
+ unsigned Depth1 = CalculateTemplateDepthForConstraints(*this, D1, true);
+ unsigned Depth2 = CalculateTemplateDepthForConstraints(*this, D2, true);
+
+ for (size_t I = 0; I != AC1.size() && I != AC2.size(); ++I) {
+ if (Depth2 > Depth1) {
+ AC1[I] = AdjustConstraintDepth(*this, Depth2 - Depth1)
+ .TransformExpr(const_cast<Expr *>(AC1[I]))
+ .get();
+ } else if (Depth1 > Depth2) {
+ AC2[I] = AdjustConstraintDepth(*this, Depth1 - Depth2)
+ .TransformExpr(const_cast<Expr *>(AC2[I]))
+ .get();
+ }
+ }
+
if (subsumes(*this, D1, AC1, D2, AC2, Result,
[this] (const AtomicConstraint &A, const AtomicConstraint &B) {
return A.subsumes(Context, B);
@@ -981,8 +1550,8 @@ bool Sema::MaybeEmitAmbiguousAtomicConstraintsDiagnostic(NamedDecl *D1,
// Not the same source level expression - are the expressions
// identical?
llvm::FoldingSetNodeID IDA, IDB;
- EA->Profile(IDA, Context, /*Cannonical=*/true);
- EB->Profile(IDB, Context, /*Cannonical=*/true);
+ EA->Profile(IDA, Context, /*Canonical=*/true);
+ EB->Profile(IDB, Context, /*Canonical=*/true);
if (IDA != IDB)
return false;
@@ -1059,20 +1628,19 @@ concepts::ExprRequirement::ExprRequirement(
concepts::ExprRequirement::ReturnTypeRequirement::
ReturnTypeRequirement(TemplateParameterList *TPL) :
- TypeConstraintInfo(TPL, 0) {
+ TypeConstraintInfo(TPL, false) {
assert(TPL->size() == 1);
const TypeConstraint *TC =
cast<TemplateTypeParmDecl>(TPL->getParam(0))->getTypeConstraint();
assert(TC &&
"TPL must have a template type parameter with a type constraint");
auto *Constraint =
- cast_or_null<ConceptSpecializationExpr>(
- TC->getImmediatelyDeclaredConstraint());
+ cast<ConceptSpecializationExpr>(TC->getImmediatelyDeclaredConstraint());
bool Dependent =
Constraint->getTemplateArgsAsWritten() &&
TemplateSpecializationType::anyInstantiationDependentTemplateArguments(
Constraint->getTemplateArgsAsWritten()->arguments().drop_front(1));
- TypeConstraintInfo.setInt(Dependent ? 1 : 0);
+ TypeConstraintInfo.setInt(Dependent ? true : false);
}
concepts::TypeRequirement::TypeRequirement(TypeSourceInfo *T) :
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaCoroutine.cpp b/contrib/llvm-project/clang/lib/Sema/SemaCoroutine.cpp
index 3d1899a57c72..4e600fd29ee7 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaCoroutine.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaCoroutine.cpp
@@ -16,10 +16,12 @@
#include "CoroutineStmtBuilder.h"
#include "clang/AST/ASTLambda.h"
#include "clang/AST/Decl.h"
+#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/StmtCXX.h"
#include "clang/Basic/Builtins.h"
#include "clang/Lex/Preprocessor.h"
+#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Overload.h"
#include "clang/Sema/ScopeInfo.h"
@@ -53,18 +55,11 @@ static QualType lookupPromiseType(Sema &S, const FunctionDecl *FD,
SourceLocation KwLoc) {
const FunctionProtoType *FnType = FD->getType()->castAs<FunctionProtoType>();
const SourceLocation FuncLoc = FD->getLocation();
- // FIXME: Cache std::coroutine_traits once we've found it.
- NamespaceDecl *StdExp = S.lookupStdExperimentalNamespace();
- if (!StdExp) {
- S.Diag(KwLoc, diag::err_implied_coroutine_type_not_found)
- << "std::experimental::coroutine_traits";
- return QualType();
- }
- ClassTemplateDecl *CoroTraits = S.lookupCoroutineTraits(KwLoc, FuncLoc);
- if (!CoroTraits) {
+ ClassTemplateDecl *CoroTraits =
+ S.lookupCoroutineTraits(KwLoc, FuncLoc);
+ if (!CoroTraits)
return QualType();
- }
// Form template argument list for coroutine_traits<R, P1, P2, ...> according
// to [dcl.fct.def.coroutine]3
@@ -77,7 +72,7 @@ static QualType lookupPromiseType(Sema &S, const FunctionDecl *FD,
// If the function is a non-static member function, add the type
// of the implicit object parameter before the formal parameters.
if (auto *MD = dyn_cast<CXXMethodDecl>(FD)) {
- if (MD->isInstance()) {
+ if (MD->isImplicitObjectMemberFunction()) {
// [over.match.funcs]4
// For non-static member functions, the type of the implicit object
// parameter is
@@ -85,7 +80,7 @@ static QualType lookupPromiseType(Sema &S, const FunctionDecl *FD,
// ref-qualifier or with the & ref-qualifier
// -- "rvalue reference to cv X" for functions declared with the &&
// ref-qualifier
- QualType T = MD->getThisType()->castAs<PointerType>()->getPointeeType();
+ QualType T = MD->getFunctionObjectParameterType();
T = FnType->getRefQualifier() == RQ_RValue
? S.Context.getRValueReferenceType(T)
: S.Context.getLValueReferenceType(T, /*SpelledAsLValue*/ true);
@@ -122,10 +117,11 @@ static QualType lookupPromiseType(Sema &S, const FunctionDecl *FD,
QualType PromiseType = S.Context.getTypeDeclType(Promise);
auto buildElaboratedType = [&]() {
- auto *NNS = NestedNameSpecifier::Create(S.Context, nullptr, StdExp);
+ auto *NNS = NestedNameSpecifier::Create(S.Context, nullptr, S.getStdNamespace());
NNS = NestedNameSpecifier::Create(S.Context, NNS, false,
CoroTrait.getTypePtr());
- return S.Context.getElaboratedType(ETK_None, NNS, PromiseType);
+ return S.Context.getElaboratedType(ElaboratedTypeKeyword::None, NNS,
+ PromiseType);
};
if (!PromiseType->getAsCXXRecordDecl()) {
@@ -141,20 +137,20 @@ static QualType lookupPromiseType(Sema &S, const FunctionDecl *FD,
return PromiseType;
}
-/// Look up the std::experimental::coroutine_handle<PromiseType>.
+/// Look up the std::coroutine_handle<PromiseType>.
static QualType lookupCoroutineHandleType(Sema &S, QualType PromiseType,
SourceLocation Loc) {
if (PromiseType.isNull())
return QualType();
- NamespaceDecl *StdExp = S.lookupStdExperimentalNamespace();
- assert(StdExp && "Should already be diagnosed");
+ NamespaceDecl *CoroNamespace = S.getStdNamespace();
+ assert(CoroNamespace && "Should already be diagnosed");
LookupResult Result(S, &S.PP.getIdentifierTable().get("coroutine_handle"),
Loc, Sema::LookupOrdinaryName);
- if (!S.LookupQualifiedName(Result, StdExp)) {
+ if (!S.LookupQualifiedName(Result, CoroNamespace)) {
S.Diag(Loc, diag::err_implied_coroutine_type_not_found)
- << "std::experimental::coroutine_handle";
+ << "std::coroutine_handle";
return QualType();
}
@@ -242,53 +238,31 @@ static bool isValidCoroutineContext(Sema &S, SourceLocation Loc,
// placeholder type shall not be a coroutine."
if (FD->getReturnType()->isUndeducedType())
DiagInvalid(DiagAutoRet);
- // [dcl.fct.def.coroutine]p1: "The parameter-declaration-clause of the
- // coroutine shall not terminate with an ellipsis that is not part of a
- // parameter-declaration."
+ // [dcl.fct.def.coroutine]p1
+ // The parameter-declaration-clause of the coroutine shall not terminate with
+ // an ellipsis that is not part of a parameter-declaration.
if (FD->isVariadic())
DiagInvalid(DiagVarargs);
return !Diagnosed;
}
-static ExprResult buildOperatorCoawaitLookupExpr(Sema &SemaRef, Scope *S,
- SourceLocation Loc) {
- DeclarationName OpName =
- SemaRef.Context.DeclarationNames.getCXXOperatorName(OO_Coawait);
- LookupResult Operators(SemaRef, OpName, SourceLocation(),
- Sema::LookupOperatorName);
- SemaRef.LookupName(Operators, S);
-
- assert(!Operators.isAmbiguous() && "Operator lookup cannot be ambiguous");
- const auto &Functions = Operators.asUnresolvedSet();
- bool IsOverloaded =
- Functions.size() > 1 ||
- (Functions.size() == 1 && isa<FunctionTemplateDecl>(*Functions.begin()));
- Expr *CoawaitOp = UnresolvedLookupExpr::Create(
- SemaRef.Context, /*NamingClass*/ nullptr, NestedNameSpecifierLoc(),
- DeclarationNameInfo(OpName, Loc), /*RequiresADL*/ true, IsOverloaded,
- Functions.begin(), Functions.end());
- assert(CoawaitOp);
- return CoawaitOp;
-}
-
/// Build a call to 'operator co_await' if there is a suitable operator for
/// the given expression.
-static ExprResult buildOperatorCoawaitCall(Sema &SemaRef, SourceLocation Loc,
- Expr *E,
- UnresolvedLookupExpr *Lookup) {
+ExprResult Sema::BuildOperatorCoawaitCall(SourceLocation Loc, Expr *E,
+ UnresolvedLookupExpr *Lookup) {
UnresolvedSet<16> Functions;
Functions.append(Lookup->decls_begin(), Lookup->decls_end());
- return SemaRef.CreateOverloadedUnaryOp(Loc, UO_Coawait, Functions, E);
+ return CreateOverloadedUnaryOp(Loc, UO_Coawait, Functions, E);
}
static ExprResult buildOperatorCoawaitCall(Sema &SemaRef, Scope *S,
SourceLocation Loc, Expr *E) {
- ExprResult R = buildOperatorCoawaitLookupExpr(SemaRef, S, Loc);
+ ExprResult R = SemaRef.BuildOperatorCoawaitLookupExpr(S, Loc);
if (R.isInvalid())
return ExprError();
- return buildOperatorCoawaitCall(SemaRef, Loc, E,
- cast<UnresolvedLookupExpr>(R.get()));
+ return SemaRef.BuildOperatorCoawaitCall(Loc, E,
+ cast<UnresolvedLookupExpr>(R.get()));
}
static ExprResult buildCoroutineHandle(Sema &S, QualType PromiseType,
@@ -347,11 +321,12 @@ static ExprResult buildMemberCall(Sema &S, Expr *Base, SourceLocation Loc,
return ExprError();
}
- return S.BuildCallExpr(nullptr, Result.get(), Loc, Args, Loc, nullptr);
+ auto EndLoc = Args.empty() ? Loc : Args.back()->getEndLoc();
+ return S.BuildCallExpr(nullptr, Result.get(), Loc, Args, EndLoc, nullptr);
}
// See if return type is coroutine-handle and if so, invoke builtin coro-resume
-// on its address. This is to enable experimental support for coroutine-handle
+// on its address. This is to enable the support for coroutine-handle
// returning await_suspend that results in a guaranteed tail call to the target
// coroutine.
static Expr *maybeTailCall(Sema &S, QualType RetType, Expr *E,
@@ -366,12 +341,34 @@ static Expr *maybeTailCall(Sema &S, QualType RetType, Expr *E,
// EvaluateBinaryTypeTrait(BTT_IsConvertible, ...) which is at the moment
// a private function in SemaExprCXX.cpp
- ExprResult AddressExpr = buildMemberCall(S, E, Loc, "address", None);
+ ExprResult AddressExpr = buildMemberCall(S, E, Loc, "address", std::nullopt);
if (AddressExpr.isInvalid())
return nullptr;
Expr *JustAddress = AddressExpr.get();
+ // FIXME: Without optimizations, the temporary result from `await_suspend()`
+ // may be put on the coroutine frame since the coroutine frame constructor
+ // will think the temporary variable will escape from the
+ // `coroutine_handle<>::address()` call. This is problematic since the
+ // coroutine should be considered to be suspended after it enters
+ // `await_suspend` so it shouldn't access/update the coroutine frame after
+ // that.
+ //
+ // See https://github.com/llvm/llvm-project/issues/65054 for the report.
+ //
+ // The long term solution may wrap the whole logic about `await-suspend`
+ // into a standalone function. This is similar to the proposed solution
+ // in tryMarkAwaitSuspendNoInline. See the comments there for details.
+ //
+ // The short term solution here is to mark `coroutine_handle<>::address()`
+ // function as always-inline so that the coroutine frame constructor won't
+ // think the temporary result is escaped incorrectly.
+ if (auto *FD = cast<CallExpr>(JustAddress)->getDirectCallee())
+ if (!FD->hasAttr<AlwaysInlineAttr>() && !FD->hasAttr<NoInlineAttr>())
+ FD->addAttr(AlwaysInlineAttr::CreateImplicit(S.getASTContext(),
+ FD->getLocation()));
+
// Check that the type of AddressExpr is void*
if (!JustAddress->getType().getTypePtr()->isVoidPointerType())
S.Diag(cast<CallExpr>(JustAddress)->getCalleeDecl()->getLocation(),
@@ -388,6 +385,63 @@ static Expr *maybeTailCall(Sema &S, QualType RetType, Expr *E,
JustAddress);
}
+/// The await_suspend call performed by co_await is essentially asynchronous
+/// to the execution of the coroutine. Inlining it normally into an unsplit
+/// coroutine can cause miscompilation because the coroutine CFG misrepresents
+/// the true control flow of the program: things that happen in the
+/// await_suspend are not guaranteed to happen prior to the resumption of the
+/// coroutine, and things that happen after the resumption of the coroutine
+/// (including its exit and the potential deallocation of the coroutine frame)
+/// are not guaranteed to happen only after the end of await_suspend.
+///
+/// See https://github.com/llvm/llvm-project/issues/56301 and
+/// https://reviews.llvm.org/D157070 for the example and the full discussion.
+///
+/// The short-term solution to this problem is to mark the call as uninlinable.
+/// But we don't want to do this if the call is known to be trivial, which is
+/// very common.
+///
+/// The long-term solution may introduce patterns like:
+///
+/// call @llvm.coro.await_suspend(ptr %awaiter, ptr %handle,
+/// ptr @awaitSuspendFn)
+///
+/// Then it is much easier to perform the safety analysis in the middle end.
+/// If it is safe to inline the call to awaitSuspend, we can replace it in the
+/// CoroEarly pass. Otherwise we could replace it in the CoroSplit pass.
+static void tryMarkAwaitSuspendNoInline(Sema &S, OpaqueValueExpr *Awaiter,
+ CallExpr *AwaitSuspend) {
+ // The method here to extract the awaiter decl is not precise.
+ // This is intentional. Since it is hard to perform the analysis in the
+ // frontend due to the complexity of C++'s type systems.
+ // And we prefer to perform such analysis in the middle end since it is
+ // easier to implement and more powerful.
+ CXXRecordDecl *AwaiterDecl =
+ Awaiter->getType().getNonReferenceType()->getAsCXXRecordDecl();
+
+ if (AwaiterDecl && AwaiterDecl->field_empty())
+ return;
+
+ FunctionDecl *FD = AwaitSuspend->getDirectCallee();
+
+ assert(FD);
+
+ // If the `await_suspend()` function is marked as `always_inline` explicitly,
+ // we should give the user the right to control the codegen.
+ if (FD->hasAttr<NoInlineAttr>() || FD->hasAttr<AlwaysInlineAttr>())
+ return;
+
+ // This is problematic if the user calls the await_suspend standalone. But on
+ // the on hand, it is not incorrect semantically since inlining is not part
+ // of the standard. On the other hand, it is relatively rare to call
+ // the await_suspend function standalone.
+ //
+ // And given we've already had the long-term plan, the current workaround
+ // looks relatively tolerant.
+ FD->addAttr(
+ NoInlineAttr::CreateImplicit(S.getASTContext(), FD->getLocation()));
+}
+
/// Build calls to await_ready, await_suspend, and await_resume for a co_await
/// expression.
/// The generated AST tries to clean up temporary objects as early as
@@ -422,8 +476,8 @@ static ReadySuspendResumeResult buildCoawaitCalls(Sema &S, VarDecl *CoroPromise,
return Result.get();
};
- CallExpr *AwaitReady =
- cast_or_null<CallExpr>(BuildSubExpr(ACT::ACT_Ready, "await_ready", None));
+ CallExpr *AwaitReady = cast_or_null<CallExpr>(
+ BuildSubExpr(ACT::ACT_Ready, "await_ready", std::nullopt));
if (!AwaitReady)
return Calls;
if (!AwaitReady->getType()->isDependentType()) {
@@ -459,7 +513,11 @@ static ReadySuspendResumeResult buildCoawaitCalls(Sema &S, VarDecl *CoroPromise,
// type Z.
QualType RetType = AwaitSuspend->getCallReturnType(S.Context);
- // Experimental support for coroutine_handle returning await_suspend.
+ // We need to mark await_suspend as noinline temporarily. See the comment
+ // of tryMarkAwaitSuspendNoInline for details.
+ tryMarkAwaitSuspendNoInline(S, Operand, AwaitSuspend);
+
+ // Support for coroutine_handle returning await_suspend.
if (Expr *TailCallSuspend =
maybeTailCall(S, RetType, AwaitSuspend, Loc))
// Note that we don't wrap the expression with ExprWithCleanups here
@@ -484,7 +542,7 @@ static ReadySuspendResumeResult buildCoawaitCalls(Sema &S, VarDecl *CoroPromise,
}
}
- BuildSubExpr(ACT::ACT_Resume, "await_resume", None);
+ BuildSubExpr(ACT::ACT_Resume, "await_resume", std::nullopt);
// Make sure the awaiter object gets a chance to be cleaned up.
S.Cleanup.setExprNeedsCleanups(true);
@@ -509,10 +567,10 @@ VarDecl *Sema::buildCoroutinePromise(SourceLocation Loc) {
assert(isa<FunctionDecl>(CurContext) && "not in a function scope");
auto *FD = cast<FunctionDecl>(CurContext);
bool IsThisDependentType = [&] {
- if (auto *MD = dyn_cast_or_null<CXXMethodDecl>(FD))
- return MD->isInstance() && MD->getThisType()->isDependentType();
- else
- return false;
+ if (const auto *MD = dyn_cast_if_present<CXXMethodDecl>(FD))
+ return MD->isImplicitObjectMemberFunction() &&
+ MD->getThisType()->isDependentType();
+ return false;
}();
QualType T = FD->getType()->isDependentType() || IsThisDependentType
@@ -537,7 +595,7 @@ VarDecl *Sema::buildCoroutinePromise(SourceLocation Loc) {
// Add implicit object parameter.
if (auto *MD = dyn_cast<CXXMethodDecl>(FD)) {
- if (MD->isInstance() && !isLambdaCallOperator(MD)) {
+ if (MD->isImplicitObjectMemberFunction() && !isLambdaCallOperator(MD)) {
ExprResult ThisExpr = ActOnCXXThis(Loc);
if (ThisExpr.isInvalid())
return nullptr;
@@ -584,8 +642,12 @@ VarDecl *Sema::buildCoroutinePromise(SourceLocation Loc) {
/*TopLevelOfInitList=*/false,
/*TreatUnavailableAsInvalid=*/false);
- // Attempt to initialize the promise type with the arguments.
- // If that fails, fall back to the promise type's default constructor.
+ // [dcl.fct.def.coroutine]5.7
+ // promise-constructor-arguments is determined as follows: overload
+ // resolution is performed on a promise constructor call created by
+ // assembling an argument list q_1 ... q_n . If a viable constructor is
+ // found ([over.match.viable]), then promise-constructor-arguments is ( q_1
+ // , ..., q_n ), otherwise promise-constructor-arguments is empty.
if (InitSeq) {
ExprResult Result = InitSeq.Perform(*this, Entity, Kind, CtorArgExprs);
if (Result.isInvalid()) {
@@ -653,6 +715,10 @@ static void checkNoThrow(Sema &S, const Stmt *E,
return;
}
if (ThrowingDecls.empty()) {
+ // [dcl.fct.def.coroutine]p15
+ // The expression co_await promise.final_suspend() shall not be
+ // potentially-throwing ([except.spec]).
+ //
// First time seeing an error, emit the error message.
S.Diag(cast<FunctionDecl>(S.CurContext)->getLocation(),
diag::err_coroutine_promise_final_suspend_requires_nothrow);
@@ -660,32 +726,32 @@ static void checkNoThrow(Sema &S, const Stmt *E,
ThrowingDecls.insert(D);
}
};
- auto SC = E->getStmtClass();
- if (SC == Expr::CXXConstructExprClass) {
- auto const *Ctor = cast<CXXConstructExpr>(E)->getConstructor();
+
+ if (auto *CE = dyn_cast<CXXConstructExpr>(E)) {
+ CXXConstructorDecl *Ctor = CE->getConstructor();
checkDeclNoexcept(Ctor);
// Check the corresponding destructor of the constructor.
- checkDeclNoexcept(Ctor->getParent()->getDestructor(), true);
- } else if (SC == Expr::CallExprClass || SC == Expr::CXXMemberCallExprClass ||
- SC == Expr::CXXOperatorCallExprClass) {
- if (!cast<CallExpr>(E)->isTypeDependent()) {
- checkDeclNoexcept(cast<CallExpr>(E)->getCalleeDecl());
- auto ReturnType = cast<CallExpr>(E)->getCallReturnType(S.getASTContext());
- // Check the destructor of the call return type, if any.
- if (ReturnType.isDestructedType() ==
- QualType::DestructionKind::DK_cxx_destructor) {
- const auto *T =
- cast<RecordType>(ReturnType.getCanonicalType().getTypePtr());
- checkDeclNoexcept(
- dyn_cast<CXXRecordDecl>(T->getDecl())->getDestructor(), true);
- }
+ checkDeclNoexcept(Ctor->getParent()->getDestructor(), /*IsDtor=*/true);
+ } else if (auto *CE = dyn_cast<CallExpr>(E)) {
+ if (CE->isTypeDependent())
+ return;
+
+ checkDeclNoexcept(CE->getCalleeDecl());
+ QualType ReturnType = CE->getCallReturnType(S.getASTContext());
+ // Check the destructor of the call return type, if any.
+ if (ReturnType.isDestructedType() ==
+ QualType::DestructionKind::DK_cxx_destructor) {
+ const auto *T =
+ cast<RecordType>(ReturnType.getCanonicalType().getTypePtr());
+ checkDeclNoexcept(cast<CXXRecordDecl>(T->getDecl())->getDestructor(),
+ /*IsDtor=*/true);
+ }
+ } else
+ for (const auto *Child : E->children()) {
+ if (!Child)
+ continue;
+ checkNoThrow(S, Child, ThrowingDecls);
}
- }
- for (const auto *Child : E->children()) {
- if (!Child)
- continue;
- checkNoThrow(S, Child, ThrowingDecls);
- }
}
bool Sema::checkFinalSuspendNoThrow(const Stmt *FinalSuspend) {
@@ -708,6 +774,9 @@ bool Sema::checkFinalSuspendNoThrow(const Stmt *FinalSuspend) {
bool Sema::ActOnCoroutineBodyStart(Scope *SC, SourceLocation KWLoc,
StringRef Keyword) {
+ // Ignore previous expr evaluation contexts.
+ EnterExpressionEvaluationContext PotentiallyEvaluated(
+ *this, Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
if (!checkCoroutineContext(*this, KWLoc, Keyword))
return false;
auto *ScopeInfo = getCurFunction();
@@ -724,14 +793,15 @@ bool Sema::ActOnCoroutineBodyStart(Scope *SC, SourceLocation KWLoc,
SourceLocation Loc = Fn->getLocation();
// Build the initial suspend point
auto buildSuspends = [&](StringRef Name) mutable -> StmtResult {
- ExprResult Suspend =
- buildPromiseCall(*this, ScopeInfo->CoroutinePromise, Loc, Name, None);
- if (Suspend.isInvalid())
+ ExprResult Operand = buildPromiseCall(*this, ScopeInfo->CoroutinePromise,
+ Loc, Name, std::nullopt);
+ if (Operand.isInvalid())
return StmtError();
- Suspend = buildOperatorCoawaitCall(*this, SC, Loc, Suspend.get());
+ ExprResult Suspend =
+ buildOperatorCoawaitCall(*this, SC, Loc, Operand.get());
if (Suspend.isInvalid())
return StmtError();
- Suspend = BuildResolvedCoawaitExpr(Loc, Suspend.get(),
+ Suspend = BuildResolvedCoawaitExpr(Loc, Operand.get(), Suspend.get(),
/*IsImplicit*/ true);
Suspend = ActOnFinishFullExpr(Suspend.get(), /*DiscardedValue*/ false);
if (Suspend.isInvalid()) {
@@ -773,8 +843,8 @@ static bool isWithinCatchScope(Scope *S) {
// }();
// }
// }
- while (S && !(S->getFlags() & Scope::FnScope)) {
- if (S->getFlags() & Scope::CatchScope)
+ while (S && !S->isFunctionScope()) {
+ if (S->isCatchScope())
return true;
S = S->getParent();
}
@@ -786,126 +856,158 @@ static bool isWithinCatchScope(Scope *S) {
// function-body *outside of a handler* [...] A context within a function
// where an await-expression can appear is called a suspension context of the
// function."
-static void checkSuspensionContext(Sema &S, SourceLocation Loc,
+static bool checkSuspensionContext(Sema &S, SourceLocation Loc,
StringRef Keyword) {
// First emphasis of [expr.await]p2: must be a potentially evaluated context.
// That is, 'co_await' and 'co_yield' cannot appear in subexpressions of
// \c sizeof.
- if (S.isUnevaluatedContext())
+ if (S.isUnevaluatedContext()) {
S.Diag(Loc, diag::err_coroutine_unevaluated_context) << Keyword;
+ return false;
+ }
// Second emphasis of [expr.await]p2: must be outside of an exception handler.
- if (isWithinCatchScope(S.getCurScope()))
+ if (isWithinCatchScope(S.getCurScope())) {
S.Diag(Loc, diag::err_coroutine_within_handler) << Keyword;
+ return false;
+ }
+
+ return true;
}
ExprResult Sema::ActOnCoawaitExpr(Scope *S, SourceLocation Loc, Expr *E) {
+ if (!checkSuspensionContext(*this, Loc, "co_await"))
+ return ExprError();
+
if (!ActOnCoroutineBodyStart(S, Loc, "co_await")) {
CorrectDelayedTyposInExpr(E);
return ExprError();
}
- checkSuspensionContext(*this, Loc, "co_await");
-
- if (E->getType()->isPlaceholderType()) {
+ if (E->hasPlaceholderType()) {
ExprResult R = CheckPlaceholderExpr(E);
if (R.isInvalid()) return ExprError();
E = R.get();
}
- ExprResult Lookup = buildOperatorCoawaitLookupExpr(*this, S, Loc);
+ ExprResult Lookup = BuildOperatorCoawaitLookupExpr(S, Loc);
if (Lookup.isInvalid())
return ExprError();
return BuildUnresolvedCoawaitExpr(Loc, E,
cast<UnresolvedLookupExpr>(Lookup.get()));
}
-ExprResult Sema::BuildUnresolvedCoawaitExpr(SourceLocation Loc, Expr *E,
+ExprResult Sema::BuildOperatorCoawaitLookupExpr(Scope *S, SourceLocation Loc) {
+ DeclarationName OpName =
+ Context.DeclarationNames.getCXXOperatorName(OO_Coawait);
+ LookupResult Operators(*this, OpName, SourceLocation(),
+ Sema::LookupOperatorName);
+ LookupName(Operators, S);
+
+ assert(!Operators.isAmbiguous() && "Operator lookup cannot be ambiguous");
+ const auto &Functions = Operators.asUnresolvedSet();
+ bool IsOverloaded =
+ Functions.size() > 1 ||
+ (Functions.size() == 1 && isa<FunctionTemplateDecl>(*Functions.begin()));
+ Expr *CoawaitOp = UnresolvedLookupExpr::Create(
+ Context, /*NamingClass*/ nullptr, NestedNameSpecifierLoc(),
+ DeclarationNameInfo(OpName, Loc), /*RequiresADL*/ true, IsOverloaded,
+ Functions.begin(), Functions.end());
+ assert(CoawaitOp);
+ return CoawaitOp;
+}
+
+// Attempts to resolve and build a CoawaitExpr from "raw" inputs, bailing out to
+// DependentCoawaitExpr if needed.
+ExprResult Sema::BuildUnresolvedCoawaitExpr(SourceLocation Loc, Expr *Operand,
UnresolvedLookupExpr *Lookup) {
auto *FSI = checkCoroutineContext(*this, Loc, "co_await");
if (!FSI)
return ExprError();
- if (E->getType()->isPlaceholderType()) {
- ExprResult R = CheckPlaceholderExpr(E);
+ if (Operand->hasPlaceholderType()) {
+ ExprResult R = CheckPlaceholderExpr(Operand);
if (R.isInvalid())
return ExprError();
- E = R.get();
+ Operand = R.get();
}
auto *Promise = FSI->CoroutinePromise;
if (Promise->getType()->isDependentType()) {
- Expr *Res =
- new (Context) DependentCoawaitExpr(Loc, Context.DependentTy, E, Lookup);
+ Expr *Res = new (Context)
+ DependentCoawaitExpr(Loc, Context.DependentTy, Operand, Lookup);
return Res;
}
auto *RD = Promise->getType()->getAsCXXRecordDecl();
+ auto *Transformed = Operand;
if (lookupMember(*this, "await_transform", RD, Loc)) {
- ExprResult R = buildPromiseCall(*this, Promise, Loc, "await_transform", E);
+ ExprResult R =
+ buildPromiseCall(*this, Promise, Loc, "await_transform", Operand);
if (R.isInvalid()) {
Diag(Loc,
diag::note_coroutine_promise_implicit_await_transform_required_here)
- << E->getSourceRange();
+ << Operand->getSourceRange();
return ExprError();
}
- E = R.get();
+ Transformed = R.get();
}
- ExprResult Awaitable = buildOperatorCoawaitCall(*this, Loc, E, Lookup);
- if (Awaitable.isInvalid())
+ ExprResult Awaiter = BuildOperatorCoawaitCall(Loc, Transformed, Lookup);
+ if (Awaiter.isInvalid())
return ExprError();
- return BuildResolvedCoawaitExpr(Loc, Awaitable.get());
+ return BuildResolvedCoawaitExpr(Loc, Operand, Awaiter.get());
}
-ExprResult Sema::BuildResolvedCoawaitExpr(SourceLocation Loc, Expr *E,
- bool IsImplicit) {
+ExprResult Sema::BuildResolvedCoawaitExpr(SourceLocation Loc, Expr *Operand,
+ Expr *Awaiter, bool IsImplicit) {
auto *Coroutine = checkCoroutineContext(*this, Loc, "co_await", IsImplicit);
if (!Coroutine)
return ExprError();
- if (E->getType()->isPlaceholderType()) {
- ExprResult R = CheckPlaceholderExpr(E);
+ if (Awaiter->hasPlaceholderType()) {
+ ExprResult R = CheckPlaceholderExpr(Awaiter);
if (R.isInvalid()) return ExprError();
- E = R.get();
+ Awaiter = R.get();
}
- if (E->getType()->isDependentType()) {
+ if (Awaiter->getType()->isDependentType()) {
Expr *Res = new (Context)
- CoawaitExpr(Loc, Context.DependentTy, E, IsImplicit);
+ CoawaitExpr(Loc, Context.DependentTy, Operand, Awaiter, IsImplicit);
return Res;
}
// If the expression is a temporary, materialize it as an lvalue so that we
// can use it multiple times.
- if (E->isPRValue())
- E = CreateMaterializeTemporaryExpr(E->getType(), E, true);
+ if (Awaiter->isPRValue())
+ Awaiter = CreateMaterializeTemporaryExpr(Awaiter->getType(), Awaiter, true);
// The location of the `co_await` token cannot be used when constructing
// the member call expressions since it's before the location of `Expr`, which
// is used as the start of the member call expression.
- SourceLocation CallLoc = E->getExprLoc();
+ SourceLocation CallLoc = Awaiter->getExprLoc();
// Build the await_ready, await_suspend, await_resume calls.
- ReadySuspendResumeResult RSS = buildCoawaitCalls(
- *this, Coroutine->CoroutinePromise, CallLoc, E);
+ ReadySuspendResumeResult RSS =
+ buildCoawaitCalls(*this, Coroutine->CoroutinePromise, CallLoc, Awaiter);
if (RSS.IsInvalid)
return ExprError();
- Expr *Res =
- new (Context) CoawaitExpr(Loc, E, RSS.Results[0], RSS.Results[1],
- RSS.Results[2], RSS.OpaqueValue, IsImplicit);
+ Expr *Res = new (Context)
+ CoawaitExpr(Loc, Operand, Awaiter, RSS.Results[0], RSS.Results[1],
+ RSS.Results[2], RSS.OpaqueValue, IsImplicit);
return Res;
}
ExprResult Sema::ActOnCoyieldExpr(Scope *S, SourceLocation Loc, Expr *E) {
+ if (!checkSuspensionContext(*this, Loc, "co_yield"))
+ return ExprError();
+
if (!ActOnCoroutineBodyStart(S, Loc, "co_yield")) {
CorrectDelayedTyposInExpr(E);
return ExprError();
}
- checkSuspensionContext(*this, Loc, "co_yield");
-
// Build yield_value call.
ExprResult Awaitable = buildPromiseCall(
*this, getCurFunction()->CoroutinePromise, Loc, "yield_value", E);
@@ -924,14 +1026,16 @@ ExprResult Sema::BuildCoyieldExpr(SourceLocation Loc, Expr *E) {
if (!Coroutine)
return ExprError();
- if (E->getType()->isPlaceholderType()) {
+ if (E->hasPlaceholderType()) {
ExprResult R = CheckPlaceholderExpr(E);
if (R.isInvalid()) return ExprError();
E = R.get();
}
+ Expr *Operand = E;
+
if (E->getType()->isDependentType()) {
- Expr *Res = new (Context) CoyieldExpr(Loc, Context.DependentTy, E);
+ Expr *Res = new (Context) CoyieldExpr(Loc, Context.DependentTy, Operand, E);
return Res;
}
@@ -947,7 +1051,7 @@ ExprResult Sema::BuildCoyieldExpr(SourceLocation Loc, Expr *E) {
return ExprError();
Expr *Res =
- new (Context) CoyieldExpr(Loc, E, RSS.Results[0], RSS.Results[1],
+ new (Context) CoyieldExpr(Loc, Operand, E, RSS.Results[0], RSS.Results[1],
RSS.Results[2], RSS.OpaqueValue);
return Res;
@@ -967,8 +1071,8 @@ StmtResult Sema::BuildCoreturnStmt(SourceLocation Loc, Expr *E,
if (!FSI)
return StmtError();
- if (E && E->getType()->isPlaceholderType() &&
- !E->getType()->isSpecificPlaceholderType(BuiltinType::Overload)) {
+ if (E && E->hasPlaceholderType() &&
+ !E->hasPlaceholderType(BuiltinType::Overload)) {
ExprResult R = CheckPlaceholderExpr(E);
if (R.isInvalid()) return StmtError();
E = R.get();
@@ -981,7 +1085,7 @@ StmtResult Sema::BuildCoreturnStmt(SourceLocation Loc, Expr *E,
PC = buildPromiseCall(*this, Promise, Loc, "return_value", E);
} else {
E = MakeFullDiscardedValueExpr(E).get();
- PC = buildPromiseCall(*this, Promise, Loc, "return_void", None);
+ PC = buildPromiseCall(*this, Promise, Loc, "return_void", std::nullopt);
}
if (PC.isInvalid())
return StmtError();
@@ -1000,9 +1104,8 @@ static Expr *buildStdNoThrowDeclRef(Sema &S, SourceLocation Loc) {
LookupResult Result(S, &S.PP.getIdentifierTable().get("nothrow"), Loc,
Sema::LookupOrdinaryName);
if (!S.LookupQualifiedName(Result, Std)) {
- // FIXME: <experimental/coroutine> should have been included already.
- // If we require it to include <new> then this diagnostic is no longer
- // needed.
+ // <coroutine> is not requred to include <new>, so we couldn't omit
+ // the check here.
S.Diag(Loc, diag::err_implicit_coroutine_std_nothrow_type_not_found);
return nullptr;
}
@@ -1023,29 +1126,55 @@ static Expr *buildStdNoThrowDeclRef(Sema &S, SourceLocation Loc) {
return DR.get();
}
-// Find an appropriate delete for the promise.
-static FunctionDecl *findDeleteForPromise(Sema &S, SourceLocation Loc,
- QualType PromiseType) {
- FunctionDecl *OperatorDelete = nullptr;
+static TypeSourceInfo *getTypeSourceInfoForStdAlignValT(Sema &S,
+ SourceLocation Loc) {
+ EnumDecl *StdAlignValT = S.getStdAlignValT();
+ QualType StdAlignValDecl = S.Context.getTypeDeclType(StdAlignValT);
+ return S.Context.getTrivialTypeSourceInfo(StdAlignValDecl);
+}
+// Find an appropriate delete for the promise.
+static bool findDeleteForPromise(Sema &S, SourceLocation Loc, QualType PromiseType,
+ FunctionDecl *&OperatorDelete) {
DeclarationName DeleteName =
S.Context.DeclarationNames.getCXXOperatorName(OO_Delete);
auto *PointeeRD = PromiseType->getAsCXXRecordDecl();
assert(PointeeRD && "PromiseType must be a CxxRecordDecl type");
- if (S.FindDeallocationFunction(Loc, PointeeRD, DeleteName, OperatorDelete))
- return nullptr;
+ const bool Overaligned = S.getLangOpts().CoroAlignedAllocation;
+
+ // [dcl.fct.def.coroutine]p12
+ // The deallocation function's name is looked up by searching for it in the
+ // scope of the promise type. If nothing is found, a search is performed in
+ // the global scope.
+ if (S.FindDeallocationFunction(Loc, PointeeRD, DeleteName, OperatorDelete,
+ /*Diagnose*/ true, /*WantSize*/ true,
+ /*WantAligned*/ Overaligned))
+ return false;
+ // [dcl.fct.def.coroutine]p12
+ // If both a usual deallocation function with only a pointer parameter and a
+ // usual deallocation function with both a pointer parameter and a size
+ // parameter are found, then the selected deallocation function shall be the
+ // one with two parameters. Otherwise, the selected deallocation function
+ // shall be the function with one parameter.
if (!OperatorDelete) {
// Look for a global declaration.
- const bool CanProvideSize = S.isCompleteType(Loc, PromiseType);
- const bool Overaligned = false;
+ // Coroutines can always provide their required size.
+ const bool CanProvideSize = true;
+ // Sema::FindUsualDeallocationFunction will try to find the one with two
+ // parameters first. It will return the deallocation function with one
+ // parameter if failed.
OperatorDelete = S.FindUsualDeallocationFunction(Loc, CanProvideSize,
Overaligned, DeleteName);
+
+ if (!OperatorDelete)
+ return false;
}
+
S.MarkFunctionReferenced(Loc, OperatorDelete);
- return OperatorDelete;
+ return true;
}
@@ -1067,8 +1196,21 @@ void Sema::CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body) {
return;
}
- // Coroutines [stmt.return]p1:
- // A return statement shall not appear in a coroutine.
+ // The always_inline attribute doesn't reliably apply to a coroutine,
+ // because the coroutine will be split into pieces and some pieces
+ // might be called indirectly, as in a virtual call. Even the ramp
+ // function cannot be inlined at -O0, due to pipeline ordering
+ // problems (see https://llvm.org/PR53413). Tell the user about it.
+ if (FD->hasAttr<AlwaysInlineAttr>())
+ Diag(FD->getLocation(), diag::warn_always_inline_coroutine);
+
+ // The design of coroutines means we cannot allow use of VLAs within one, so
+ // diagnose if we've seen a VLA in the body of this function.
+ if (Fn->FirstVLALoc.isValid())
+ Diag(Fn->FirstVLALoc, diag::err_vla_in_coroutine_unsupported);
+
+ // [stmt.return.coroutine]p1:
+ // A coroutine shall not enclose a return statement ([stmt.return]).
if (Fn->FirstReturnLoc.isValid()) {
assert(Fn->FirstCoroutineStmtLoc.isValid() &&
"first coroutine location not set");
@@ -1076,6 +1218,12 @@ void Sema::CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body) {
Diag(Fn->FirstCoroutineStmtLoc, diag::note_declared_coroutine_here)
<< Fn->getFirstCoroutineStmtKeyword();
}
+
+ // Coroutines will get splitted into pieces. The GNU address of label
+ // extension wouldn't be meaningful in coroutines.
+ for (AddrLabelExpr *ALE : Fn->AddrLabels)
+ Diag(ALE->getBeginLoc(), diag::err_coro_invalid_addr_of_label);
+
CoroutineStmtBuilder Builder(*this, *FD, *Fn, Body);
if (Builder.isInvalid() || !Builder.buildStatements())
return FD->setInvalidDecl();
@@ -1084,6 +1232,18 @@ void Sema::CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body) {
Body = CoroutineBodyStmt::Create(Context, Builder);
}
+static CompoundStmt *buildCoroutineBody(Stmt *Body, ASTContext &Context) {
+ if (auto *CS = dyn_cast<CompoundStmt>(Body))
+ return CS;
+
+ // The body of the coroutine may be a try statement if it is in
+ // 'function-try-block' syntax. Here we wrap it into a compound
+ // statement for consistency.
+ assert(isa<CXXTryStmt>(Body) && "Unimaged coroutine body type");
+ return CompoundStmt::Create(Context, {Body}, FPOptionsOverride(),
+ SourceLocation(), SourceLocation());
+}
+
CoroutineStmtBuilder::CoroutineStmtBuilder(Sema &S, FunctionDecl &FD,
sema::FunctionScopeInfo &Fn,
Stmt *Body)
@@ -1091,7 +1251,7 @@ CoroutineStmtBuilder::CoroutineStmtBuilder(Sema &S, FunctionDecl &FD,
IsPromiseDependentType(
!Fn.CoroutinePromise ||
Fn.CoroutinePromise->getType()->isDependentType()) {
- this->Body = Body;
+ this->Body = buildCoroutineBody(Body, S.getASTContext());
for (auto KV : Fn.CoroutineParameterMoves)
this->ParamMovesVector.push_back(KV.second);
@@ -1169,12 +1329,15 @@ bool CoroutineStmtBuilder::makeReturnOnAllocFailure() {
assert(!IsPromiseDependentType &&
"cannot make statement while the promise type is dependent");
- // [dcl.fct.def.coroutine]/8
- // The unqualified-id get_return_object_on_allocation_failure is looked up in
- // the scope of class P by class member access lookup (3.4.5). ...
- // If an allocation function returns nullptr, ... the coroutine return value
- // is obtained by a call to ... get_return_object_on_allocation_failure().
-
+ // [dcl.fct.def.coroutine]p10
+ // If a search for the name get_return_object_on_allocation_failure in
+ // the scope of the promise type ([class.member.lookup]) finds any
+ // declarations, then the result of a call to an allocation function used to
+ // obtain storage for the coroutine state is assumed to return nullptr if it
+ // fails to obtain storage, ... If the allocation function returns nullptr,
+ // ... and the return value is obtained by a call to
+ // T::get_return_object_on_allocation_failure(), where T is the
+ // promise type.
DeclarationName DN =
S.PP.getIdentifierInfo("get_return_object_on_allocation_failure");
LookupResult Found(S, DN, Loc, Sema::LookupMemberName);
@@ -1209,47 +1372,13 @@ bool CoroutineStmtBuilder::makeReturnOnAllocFailure() {
return true;
}
-bool CoroutineStmtBuilder::makeNewAndDeleteExpr() {
- // Form and check allocation and deallocation calls.
- assert(!IsPromiseDependentType &&
- "cannot make statement while the promise type is dependent");
- QualType PromiseType = Fn.CoroutinePromise->getType();
-
- if (S.RequireCompleteType(Loc, PromiseType, diag::err_incomplete_type))
- return false;
-
- const bool RequiresNoThrowAlloc = ReturnStmtOnAllocFailure != nullptr;
-
- // [dcl.fct.def.coroutine]/7
- // Lookup allocation functions using a parameter list composed of the
- // requested size of the coroutine state being allocated, followed by
- // the coroutine function's arguments. If a matching allocation function
- // exists, use it. Otherwise, use an allocation function that just takes
- // the requested size.
-
- FunctionDecl *OperatorNew = nullptr;
- FunctionDecl *OperatorDelete = nullptr;
- FunctionDecl *UnusedResult = nullptr;
- bool PassAlignment = false;
- SmallVector<Expr *, 1> PlacementArgs;
-
- // [dcl.fct.def.coroutine]/7
- // "The allocation function’s name is looked up in the scope of P.
- // [...] If the lookup finds an allocation function in the scope of P,
- // overload resolution is performed on a function call created by assembling
- // an argument list. The first argument is the amount of space requested,
- // and has type std::size_t. The lvalues p1 ... pn are the succeeding
- // arguments."
- //
- // ...where "p1 ... pn" are defined earlier as:
- //
- // [dcl.fct.def.coroutine]/3
- // "For a coroutine f that is a non-static member function, let P1 denote the
- // type of the implicit object parameter (13.3.1) and P2 ... Pn be the types
- // of the function parameters; otherwise let P1 ... Pn be the types of the
- // function parameters. Let p1 ... pn be lvalues denoting those objects."
+// Collect placement arguments for allocation function of coroutine FD.
+// Return true if we collect placement arguments succesfully. Return false,
+// otherwise.
+static bool collectPlacementArgs(Sema &S, FunctionDecl &FD, SourceLocation Loc,
+ SmallVectorImpl<Expr *> &PlacementArgs) {
if (auto *MD = dyn_cast<CXXMethodDecl>(&FD)) {
- if (MD->isInstance() && !isLambdaCallOperator(MD)) {
+ if (MD->isImplicitObjectMemberFunction() && !isLambdaCallOperator(MD)) {
ExprResult ThisExpr = S.ActOnCXXThis(Loc);
if (ThisExpr.isInvalid())
return false;
@@ -1259,6 +1388,7 @@ bool CoroutineStmtBuilder::makeNewAndDeleteExpr() {
PlacementArgs.push_back(ThisExpr.get());
}
}
+
for (auto *PD : FD.parameters()) {
if (PD->getType()->isDependentType())
continue;
@@ -1273,34 +1403,154 @@ bool CoroutineStmtBuilder::makeNewAndDeleteExpr() {
PlacementArgs.push_back(PDRefExpr.get());
}
- S.FindAllocationFunctions(Loc, SourceRange(), /*NewScope*/ Sema::AFS_Class,
- /*DeleteScope*/ Sema::AFS_Both, PromiseType,
- /*isArray*/ false, PassAlignment, PlacementArgs,
- OperatorNew, UnusedResult, /*Diagnose*/ false);
- // [dcl.fct.def.coroutine]/7
- // "If no matching function is found, overload resolution is performed again
- // on a function call created by passing just the amount of space required as
- // an argument of type std::size_t."
- if (!OperatorNew && !PlacementArgs.empty()) {
- PlacementArgs.clear();
- S.FindAllocationFunctions(Loc, SourceRange(), /*NewScope*/ Sema::AFS_Class,
+ return true;
+}
+
+bool CoroutineStmtBuilder::makeNewAndDeleteExpr() {
+ // Form and check allocation and deallocation calls.
+ assert(!IsPromiseDependentType &&
+ "cannot make statement while the promise type is dependent");
+ QualType PromiseType = Fn.CoroutinePromise->getType();
+
+ if (S.RequireCompleteType(Loc, PromiseType, diag::err_incomplete_type))
+ return false;
+
+ const bool RequiresNoThrowAlloc = ReturnStmtOnAllocFailure != nullptr;
+
+ // According to [dcl.fct.def.coroutine]p9, Lookup allocation functions using a
+ // parameter list composed of the requested size of the coroutine state being
+ // allocated, followed by the coroutine function's arguments. If a matching
+ // allocation function exists, use it. Otherwise, use an allocation function
+ // that just takes the requested size.
+ //
+ // [dcl.fct.def.coroutine]p9
+ // An implementation may need to allocate additional storage for a
+ // coroutine.
+ // This storage is known as the coroutine state and is obtained by calling a
+ // non-array allocation function ([basic.stc.dynamic.allocation]). The
+ // allocation function's name is looked up by searching for it in the scope of
+ // the promise type.
+ // - If any declarations are found, overload resolution is performed on a
+ // function call created by assembling an argument list. The first argument is
+ // the amount of space requested, and has type std::size_t. The
+ // lvalues p1 ... pn are the succeeding arguments.
+ //
+ // ...where "p1 ... pn" are defined earlier as:
+ //
+ // [dcl.fct.def.coroutine]p3
+ // The promise type of a coroutine is `std::coroutine_traits<R, P1, ...,
+ // Pn>`
+ // , where R is the return type of the function, and `P1, ..., Pn` are the
+ // sequence of types of the non-object function parameters, preceded by the
+ // type of the object parameter ([dcl.fct]) if the coroutine is a non-static
+ // member function. [dcl.fct.def.coroutine]p4 In the following, p_i is an
+ // lvalue of type P_i, where p1 denotes the object parameter and p_i+1 denotes
+ // the i-th non-object function parameter for a non-static member function,
+ // and p_i denotes the i-th function parameter otherwise. For a non-static
+ // member function, q_1 is an lvalue that denotes *this; any other q_i is an
+ // lvalue that denotes the parameter copy corresponding to p_i.
+
+ FunctionDecl *OperatorNew = nullptr;
+ SmallVector<Expr *, 1> PlacementArgs;
+
+ const bool PromiseContainsNew = [this, &PromiseType]() -> bool {
+ DeclarationName NewName =
+ S.getASTContext().DeclarationNames.getCXXOperatorName(OO_New);
+ LookupResult R(S, NewName, Loc, Sema::LookupOrdinaryName);
+
+ if (PromiseType->isRecordType())
+ S.LookupQualifiedName(R, PromiseType->getAsCXXRecordDecl());
+
+ return !R.empty() && !R.isAmbiguous();
+ }();
+
+ // Helper function to indicate whether the last lookup found the aligned
+ // allocation function.
+ bool PassAlignment = S.getLangOpts().CoroAlignedAllocation;
+ auto LookupAllocationFunction = [&](Sema::AllocationFunctionScope NewScope =
+ Sema::AFS_Both,
+ bool WithoutPlacementArgs = false,
+ bool ForceNonAligned = false) {
+ // [dcl.fct.def.coroutine]p9
+ // The allocation function's name is looked up by searching for it in the
+ // scope of the promise type.
+ // - If any declarations are found, ...
+ // - If no declarations are found in the scope of the promise type, a search
+ // is performed in the global scope.
+ if (NewScope == Sema::AFS_Both)
+ NewScope = PromiseContainsNew ? Sema::AFS_Class : Sema::AFS_Global;
+
+ PassAlignment = !ForceNonAligned && S.getLangOpts().CoroAlignedAllocation;
+ FunctionDecl *UnusedResult = nullptr;
+ S.FindAllocationFunctions(Loc, SourceRange(), NewScope,
/*DeleteScope*/ Sema::AFS_Both, PromiseType,
- /*isArray*/ false, PassAlignment, PlacementArgs,
+ /*isArray*/ false, PassAlignment,
+ WithoutPlacementArgs ? MultiExprArg{}
+ : PlacementArgs,
OperatorNew, UnusedResult, /*Diagnose*/ false);
- }
+ };
- // [dcl.fct.def.coroutine]/7
- // "The allocation function’s name is looked up in the scope of P. If this
- // lookup fails, the allocation function’s name is looked up in the global
- // scope."
- if (!OperatorNew) {
- S.FindAllocationFunctions(Loc, SourceRange(), /*NewScope*/ Sema::AFS_Global,
- /*DeleteScope*/ Sema::AFS_Both, PromiseType,
- /*isArray*/ false, PassAlignment, PlacementArgs,
- OperatorNew, UnusedResult);
+ // We don't expect to call to global operator new with (size, p0, …, pn).
+ // So if we choose to lookup the allocation function in global scope, we
+ // shouldn't lookup placement arguments.
+ if (PromiseContainsNew && !collectPlacementArgs(S, FD, Loc, PlacementArgs))
+ return false;
+
+ LookupAllocationFunction();
+
+ if (PromiseContainsNew && !PlacementArgs.empty()) {
+ // [dcl.fct.def.coroutine]p9
+ // If no viable function is found ([over.match.viable]), overload
+ // resolution
+ // is performed again on a function call created by passing just the amount
+ // of space required as an argument of type std::size_t.
+ //
+ // Proposed Change of [dcl.fct.def.coroutine]p9 in P2014R0:
+ // Otherwise, overload resolution is performed again on a function call
+ // created
+ // by passing the amount of space requested as an argument of type
+ // std::size_t as the first argument, and the requested alignment as
+ // an argument of type std:align_val_t as the second argument.
+ if (!OperatorNew ||
+ (S.getLangOpts().CoroAlignedAllocation && !PassAlignment))
+ LookupAllocationFunction(/*NewScope*/ Sema::AFS_Class,
+ /*WithoutPlacementArgs*/ true);
}
+ // Proposed Change of [dcl.fct.def.coroutine]p12 in P2014R0:
+ // Otherwise, overload resolution is performed again on a function call
+ // created
+ // by passing the amount of space requested as an argument of type
+ // std::size_t as the first argument, and the lvalues p1 ... pn as the
+ // succeeding arguments. Otherwise, overload resolution is performed again
+ // on a function call created by passing just the amount of space required as
+ // an argument of type std::size_t.
+ //
+ // So within the proposed change in P2014RO, the priority order of aligned
+ // allocation functions wiht promise_type is:
+ //
+ // void* operator new( std::size_t, std::align_val_t, placement_args... );
+ // void* operator new( std::size_t, std::align_val_t);
+ // void* operator new( std::size_t, placement_args... );
+ // void* operator new( std::size_t);
+
+ // Helper variable to emit warnings.
+ bool FoundNonAlignedInPromise = false;
+ if (PromiseContainsNew && S.getLangOpts().CoroAlignedAllocation)
+ if (!OperatorNew || !PassAlignment) {
+ FoundNonAlignedInPromise = OperatorNew;
+
+ LookupAllocationFunction(/*NewScope*/ Sema::AFS_Class,
+ /*WithoutPlacementArgs*/ false,
+ /*ForceNonAligned*/ true);
+
+ if (!OperatorNew && !PlacementArgs.empty())
+ LookupAllocationFunction(/*NewScope*/ Sema::AFS_Class,
+ /*WithoutPlacementArgs*/ true,
+ /*ForceNonAligned*/ true);
+ }
+
bool IsGlobalOverload =
OperatorNew && !isa<CXXRecordDecl>(OperatorNew->getDeclContext());
// If we didn't find a class-local new declaration and non-throwing new
@@ -1312,14 +1562,27 @@ bool CoroutineStmtBuilder::makeNewAndDeleteExpr() {
return false;
PlacementArgs = {StdNoThrow};
OperatorNew = nullptr;
- S.FindAllocationFunctions(Loc, SourceRange(), /*NewScope*/ Sema::AFS_Both,
- /*DeleteScope*/ Sema::AFS_Both, PromiseType,
- /*isArray*/ false, PassAlignment, PlacementArgs,
- OperatorNew, UnusedResult);
+ LookupAllocationFunction(Sema::AFS_Global);
+ }
+
+ // If we found a non-aligned allocation function in the promise_type,
+ // it indicates the user forgot to update the allocation function. Let's emit
+ // a warning here.
+ if (FoundNonAlignedInPromise) {
+ S.Diag(OperatorNew->getLocation(),
+ diag::warn_non_aligned_allocation_function)
+ << &FD;
}
- if (!OperatorNew)
+ if (!OperatorNew) {
+ if (PromiseContainsNew)
+ S.Diag(Loc, diag::err_coroutine_unusable_new) << PromiseType << &FD;
+ else if (RequiresNoThrowAlloc)
+ S.Diag(Loc, diag::err_coroutine_unfound_nothrow_new)
+ << &FD << S.getLangOpts().CoroAlignedAllocation;
+
return false;
+ }
if (RequiresNoThrowAlloc) {
const auto *FT = OperatorNew->getType()->castAs<FunctionProtoType>();
@@ -1333,8 +1596,13 @@ bool CoroutineStmtBuilder::makeNewAndDeleteExpr() {
}
}
- if ((OperatorDelete = findDeleteForPromise(S, Loc, PromiseType)) == nullptr)
+ FunctionDecl *OperatorDelete = nullptr;
+ if (!findDeleteForPromise(S, Loc, PromiseType, OperatorDelete)) {
+ // FIXME: We should add an error here. According to:
+ // [dcl.fct.def.coroutine]p12
+ // If no usual deallocation function is found, the program is ill-formed.
return false;
+ }
Expr *FramePtr =
S.BuildBuiltinCallExpr(Loc, Builtin::BI__builtin_coro_frame, {});
@@ -1342,16 +1610,34 @@ bool CoroutineStmtBuilder::makeNewAndDeleteExpr() {
Expr *FrameSize =
S.BuildBuiltinCallExpr(Loc, Builtin::BI__builtin_coro_size, {});
- // Make new call.
+ Expr *FrameAlignment = nullptr;
+
+ if (S.getLangOpts().CoroAlignedAllocation) {
+ FrameAlignment =
+ S.BuildBuiltinCallExpr(Loc, Builtin::BI__builtin_coro_align, {});
+
+ TypeSourceInfo *AlignValTy = getTypeSourceInfoForStdAlignValT(S, Loc);
+ if (!AlignValTy)
+ return false;
+
+ FrameAlignment = S.BuildCXXNamedCast(Loc, tok::kw_static_cast, AlignValTy,
+ FrameAlignment, SourceRange(Loc, Loc),
+ SourceRange(Loc, Loc))
+ .get();
+ }
+ // Make new call.
ExprResult NewRef =
S.BuildDeclRefExpr(OperatorNew, OperatorNew->getType(), VK_LValue, Loc);
if (NewRef.isInvalid())
return false;
SmallVector<Expr *, 2> NewArgs(1, FrameSize);
- for (auto Arg : PlacementArgs)
- NewArgs.push_back(Arg);
+ if (S.getLangOpts().CoroAlignedAllocation && PassAlignment)
+ NewArgs.push_back(FrameAlignment);
+
+ if (OperatorNew->getNumParams() > NewArgs.size())
+ llvm::append_range(NewArgs, PlacementArgs);
ExprResult NewExpr =
S.BuildCallExpr(S.getCurScope(), NewRef.get(), Loc, NewArgs, Loc);
@@ -1373,12 +1659,36 @@ bool CoroutineStmtBuilder::makeNewAndDeleteExpr() {
SmallVector<Expr *, 2> DeleteArgs{CoroFree};
- // Check if we need to pass the size.
+ // [dcl.fct.def.coroutine]p12
+ // The selected deallocation function shall be called with the address of
+ // the block of storage to be reclaimed as its first argument. If a
+ // deallocation function with a parameter of type std::size_t is
+ // used, the size of the block is passed as the corresponding argument.
const auto *OpDeleteType =
OpDeleteQualType.getTypePtr()->castAs<FunctionProtoType>();
- if (OpDeleteType->getNumParams() > 1)
+ if (OpDeleteType->getNumParams() > DeleteArgs.size() &&
+ S.getASTContext().hasSameUnqualifiedType(
+ OpDeleteType->getParamType(DeleteArgs.size()), FrameSize->getType()))
DeleteArgs.push_back(FrameSize);
+ // Proposed Change of [dcl.fct.def.coroutine]p12 in P2014R0:
+ // If deallocation function lookup finds a usual deallocation function with
+ // a pointer parameter, size parameter and alignment parameter then this
+ // will be the selected deallocation function, otherwise if lookup finds a
+ // usual deallocation function with both a pointer parameter and a size
+ // parameter, then this will be the selected deallocation function.
+ // Otherwise, if lookup finds a usual deallocation function with only a
+ // pointer parameter, then this will be the selected deallocation
+ // function.
+ //
+ // So we are not forced to pass alignment to the deallocation function.
+ if (S.getLangOpts().CoroAlignedAllocation &&
+ OpDeleteType->getNumParams() > DeleteArgs.size() &&
+ S.getASTContext().hasSameUnqualifiedType(
+ OpDeleteType->getParamType(DeleteArgs.size()),
+ FrameAlignment->getType()))
+ DeleteArgs.push_back(FrameAlignment);
+
ExprResult DeleteExpr =
S.BuildCallExpr(S.getCurScope(), DeleteRef.get(), Loc, DeleteArgs, Loc);
DeleteExpr =
@@ -1396,9 +1706,13 @@ bool CoroutineStmtBuilder::makeOnFallthrough() {
assert(!IsPromiseDependentType &&
"cannot make statement while the promise type is dependent");
- // [dcl.fct.def.coroutine]/4
- // The unqualified-ids 'return_void' and 'return_value' are looked up in
- // the scope of class P. If both are found, the program is ill-formed.
+ // [dcl.fct.def.coroutine]/p6
+ // If searches for the names return_void and return_value in the scope of
+ // the promise type each find any declarations, the program is ill-formed.
+ // [Note 1: If return_void is found, flowing off the end of a coroutine is
+ // equivalent to a co_return with no operand. Otherwise, flowing off the end
+ // of a coroutine results in undefined behavior ([stmt.return.coroutine]). —
+ // end note]
bool HasRVoid, HasRValue;
LookupResult LRVoid =
lookupMember(S, "return_void", PromiseRecordDecl, Loc, HasRVoid);
@@ -1419,18 +1733,20 @@ bool CoroutineStmtBuilder::makeOnFallthrough() {
<< LRValue.getLookupName();
return false;
} else if (!HasRVoid && !HasRValue) {
- // FIXME: The PDTS currently specifies this case as UB, not ill-formed.
- // However we still diagnose this as an error since until the PDTS is fixed.
- S.Diag(FD.getLocation(),
- diag::err_coroutine_promise_requires_return_function)
- << PromiseRecordDecl;
- S.Diag(PromiseRecordDecl->getLocation(), diag::note_defined_here)
- << PromiseRecordDecl;
- return false;
+ // We need to set 'Fallthrough'. Otherwise the other analysis part might
+ // think the coroutine has defined a return_value method. So it might emit
+ // **false** positive warning. e.g.,
+ //
+ // promise_without_return_func foo() {
+ // co_await something();
+ // }
+ //
+ // Then AnalysisBasedWarning would emit a warning about `foo()` lacking a
+ // co_return statements, which isn't correct.
+ Fallthrough = S.ActOnNullStmt(PromiseRecordDecl->getLocation());
+ if (Fallthrough.isInvalid())
+ return false;
} else if (HasRVoid) {
- // If the unqualified-id return_void is found, flowing off the end of a
- // coroutine is equivalent to a co_return with no operand. Otherwise,
- // flowing off the end of a coroutine results in undefined behavior.
Fallthrough = S.BuildCoreturnStmt(FD.getLocation(), nullptr,
/*IsImplicit*/false);
Fallthrough = S.ActOnFinishFullStmt(Fallthrough.get());
@@ -1465,8 +1781,8 @@ bool CoroutineStmtBuilder::makeOnException() {
if (!S.getLangOpts().CXXExceptions)
return true;
- ExprResult UnhandledException = buildPromiseCall(S, Fn.CoroutinePromise, Loc,
- "unhandled_exception", None);
+ ExprResult UnhandledException = buildPromiseCall(
+ S, Fn.CoroutinePromise, Loc, "unhandled_exception", std::nullopt);
UnhandledException = S.ActOnFinishFullExpr(UnhandledException.get(), Loc,
/*DiscardedValue*/ false);
if (UnhandledException.isInvalid())
@@ -1486,10 +1802,11 @@ bool CoroutineStmtBuilder::makeOnException() {
}
bool CoroutineStmtBuilder::makeReturnObject() {
- // Build implicit 'p.get_return_object()' expression and form initialization
- // of return type from it.
- ExprResult ReturnObject =
- buildPromiseCall(S, Fn.CoroutinePromise, Loc, "get_return_object", None);
+ // [dcl.fct.def.coroutine]p7
+ // The expression promise.get_return_object() is used to initialize the
+ // returned reference or prvalue result object of a call to a coroutine.
+ ExprResult ReturnObject = buildPromiseCall(S, Fn.CoroutinePromise, Loc,
+ "get_return_object", std::nullopt);
if (ReturnObject.isInvalid())
return false;
@@ -1520,13 +1837,22 @@ bool CoroutineStmtBuilder::makeGroDeclAndReturnStmt() {
assert(!FnRetType->isDependentType() &&
"get_return_object type must no longer be dependent");
+ // The call to get_­return_­object is sequenced before the call to
+ // initial_­suspend and is invoked at most once, but there are caveats
+ // regarding on whether the prvalue result object may be initialized
+ // directly/eager or delayed, depending on the types involved.
+ //
+ // More info at https://github.com/cplusplus/papers/issues/1414
+ bool GroMatchesRetType = S.getASTContext().hasSameType(GroType, FnRetType);
+
if (FnRetType->isVoidType()) {
ExprResult Res =
S.ActOnFinishFullExpr(this->ReturnValue, Loc, /*DiscardedValue*/ false);
if (Res.isInvalid())
return false;
- this->ResultDecl = Res.get();
+ if (!GroMatchesRetType)
+ this->ResultDecl = Res.get();
return true;
}
@@ -1539,50 +1865,59 @@ bool CoroutineStmtBuilder::makeGroDeclAndReturnStmt() {
return false;
}
- auto *GroDecl = VarDecl::Create(
- S.Context, &FD, FD.getLocation(), FD.getLocation(),
- &S.PP.getIdentifierTable().get("__coro_gro"), GroType,
- S.Context.getTrivialTypeSourceInfo(GroType, Loc), SC_None);
- GroDecl->setImplicit();
+ StmtResult ReturnStmt;
+ clang::VarDecl *GroDecl = nullptr;
+ if (GroMatchesRetType) {
+ ReturnStmt = S.BuildReturnStmt(Loc, ReturnValue);
+ } else {
+ GroDecl = VarDecl::Create(
+ S.Context, &FD, FD.getLocation(), FD.getLocation(),
+ &S.PP.getIdentifierTable().get("__coro_gro"), GroType,
+ S.Context.getTrivialTypeSourceInfo(GroType, Loc), SC_None);
+ GroDecl->setImplicit();
+
+ S.CheckVariableDeclarationType(GroDecl);
+ if (GroDecl->isInvalidDecl())
+ return false;
- S.CheckVariableDeclarationType(GroDecl);
- if (GroDecl->isInvalidDecl())
- return false;
+ InitializedEntity Entity = InitializedEntity::InitializeVariable(GroDecl);
+ ExprResult Res =
+ S.PerformCopyInitialization(Entity, SourceLocation(), ReturnValue);
+ if (Res.isInvalid())
+ return false;
- InitializedEntity Entity = InitializedEntity::InitializeVariable(GroDecl);
- ExprResult Res =
- S.PerformCopyInitialization(Entity, SourceLocation(), ReturnValue);
- if (Res.isInvalid())
- return false;
+ Res = S.ActOnFinishFullExpr(Res.get(), /*DiscardedValue*/ false);
+ if (Res.isInvalid())
+ return false;
- Res = S.ActOnFinishFullExpr(Res.get(), /*DiscardedValue*/ false);
- if (Res.isInvalid())
- return false;
+ S.AddInitializerToDecl(GroDecl, Res.get(),
+ /*DirectInit=*/false);
- S.AddInitializerToDecl(GroDecl, Res.get(),
- /*DirectInit=*/false);
+ S.FinalizeDeclaration(GroDecl);
- S.FinalizeDeclaration(GroDecl);
+ // Form a declaration statement for the return declaration, so that AST
+ // visitors can more easily find it.
+ StmtResult GroDeclStmt =
+ S.ActOnDeclStmt(S.ConvertDeclToDeclGroup(GroDecl), Loc, Loc);
+ if (GroDeclStmt.isInvalid())
+ return false;
- // Form a declaration statement for the return declaration, so that AST
- // visitors can more easily find it.
- StmtResult GroDeclStmt =
- S.ActOnDeclStmt(S.ConvertDeclToDeclGroup(GroDecl), Loc, Loc);
- if (GroDeclStmt.isInvalid())
- return false;
+ this->ResultDecl = GroDeclStmt.get();
- this->ResultDecl = GroDeclStmt.get();
+ ExprResult declRef = S.BuildDeclRefExpr(GroDecl, GroType, VK_LValue, Loc);
+ if (declRef.isInvalid())
+ return false;
- ExprResult declRef = S.BuildDeclRefExpr(GroDecl, GroType, VK_LValue, Loc);
- if (declRef.isInvalid())
- return false;
+ ReturnStmt = S.BuildReturnStmt(Loc, declRef.get());
+ }
- StmtResult ReturnStmt = S.BuildReturnStmt(Loc, declRef.get());
if (ReturnStmt.isInvalid()) {
noteMemberDeclaredHere(S, ReturnValue, Fn);
return false;
}
- if (cast<clang::ReturnStmt>(ReturnStmt.get())->getNRVOCandidate() == GroDecl)
+
+ if (!GroMatchesRetType &&
+ cast<clang::ReturnStmt>(ReturnStmt.get())->getNRVOCandidate() == GroDecl)
GroDecl->setNRVOVariable(true);
this->ReturnStmt = ReturnStmt.get();
@@ -1625,13 +1960,25 @@ bool Sema::buildCoroutineParameterMoves(SourceLocation Loc) {
if (!ScopeInfo->CoroutineParameterMoves.empty())
return false;
+ // [dcl.fct.def.coroutine]p13
+ // When a coroutine is invoked, after initializing its parameters
+ // ([expr.call]), a copy is created for each coroutine parameter. For a
+ // parameter of type cv T, the copy is a variable of type cv T with
+ // automatic storage duration that is direct-initialized from an xvalue of
+ // type T referring to the parameter.
for (auto *PD : FD->parameters()) {
if (PD->getType()->isDependentType())
continue;
+ // Preserve the referenced state for unused parameter diagnostics.
+ bool DeclReferenced = PD->isReferenced();
+
ExprResult PDRefExpr =
BuildDeclRefExpr(PD, PD->getType().getNonReferenceType(),
ExprValueKind::VK_LValue, Loc); // FIXME: scope?
+
+ PD->setReferenced(DeclReferenced);
+
if (PDRefExpr.isInvalid())
return false;
@@ -1641,8 +1988,10 @@ bool Sema::buildCoroutineParameterMoves(SourceLocation Loc) {
CExpr = castForMoving(*this, PDRefExpr.get());
else
CExpr = PDRefExpr.get();
-
- auto D = buildVarDecl(*this, Loc, PD->getType(), PD->getIdentifier());
+ // [dcl.fct.def.coroutine]p13
+ // The initialization and destruction of each parameter copy occurs in the
+ // context of the called coroutine.
+ auto *D = buildVarDecl(*this, Loc, PD->getType(), PD->getIdentifier());
AddInitializerToDecl(D, CExpr, /*DirectInit=*/true);
// Convert decl to a statement.
@@ -1664,24 +2013,31 @@ StmtResult Sema::BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs Args) {
ClassTemplateDecl *Sema::lookupCoroutineTraits(SourceLocation KwLoc,
SourceLocation FuncLoc) {
+ if (StdCoroutineTraitsCache)
+ return StdCoroutineTraitsCache;
+
+ IdentifierInfo const &TraitIdent =
+ PP.getIdentifierTable().get("coroutine_traits");
+
+ NamespaceDecl *StdSpace = getStdNamespace();
+ LookupResult Result(*this, &TraitIdent, FuncLoc, LookupOrdinaryName);
+ bool Found = StdSpace && LookupQualifiedName(Result, StdSpace);
+
+ if (!Found) {
+ // The goggles, we found nothing!
+ Diag(KwLoc, diag::err_implied_coroutine_type_not_found)
+ << "std::coroutine_traits";
+ return nullptr;
+ }
+
+ // coroutine_traits is required to be a class template.
+ StdCoroutineTraitsCache = Result.getAsSingle<ClassTemplateDecl>();
if (!StdCoroutineTraitsCache) {
- if (auto StdExp = lookupStdExperimentalNamespace()) {
- LookupResult Result(*this,
- &PP.getIdentifierTable().get("coroutine_traits"),
- FuncLoc, LookupOrdinaryName);
- if (!LookupQualifiedName(Result, StdExp)) {
- Diag(KwLoc, diag::err_implied_coroutine_type_not_found)
- << "std::experimental::coroutine_traits";
- return nullptr;
- }
- if (!(StdCoroutineTraitsCache =
- Result.getAsSingle<ClassTemplateDecl>())) {
- Result.suppressDiagnostics();
- NamedDecl *Found = *Result.begin();
- Diag(Found->getLocation(), diag::err_malformed_std_coroutine_traits);
- return nullptr;
- }
- }
+ Result.suppressDiagnostics();
+ NamedDecl *Found = *Result.begin();
+ Diag(Found->getLocation(), diag::err_malformed_std_coroutine_traits);
+ return nullptr;
}
+
return StdCoroutineTraitsCache;
}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaDecl.cpp b/contrib/llvm-project/clang/lib/Sema/SemaDecl.cpp
index 205f58000302..f5bb3e0b42e2 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaDecl.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaDecl.cpp
@@ -17,6 +17,7 @@
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/CommentDiagnostic.h"
+#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
@@ -24,8 +25,11 @@
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/NonTrivialTypeVisitor.h"
+#include "clang/AST/Randstruct.h"
#include "clang/AST/StmtCXX.h"
+#include "clang/AST/Type.h"
#include "clang/Basic/Builtins.h"
+#include "clang/Basic/HLSLRuntime.h"
#include "clang/Basic/PartialDiagnostic.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
@@ -44,10 +48,12 @@
#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/Template.h"
#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/Triple.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/TargetParser/Triple.h"
#include <algorithm>
#include <cstring>
#include <functional>
+#include <optional>
#include <unordered_map>
using namespace clang;
@@ -141,9 +147,14 @@ bool Sema::isSimpleTypeSpecifier(tok::TokenKind Kind) const {
case tok::kw___bf16:
case tok::kw__Float16:
case tok::kw___float128:
+ case tok::kw___ibm128:
case tok::kw_wchar_t:
case tok::kw_bool:
- case tok::kw___underlying_type:
+ case tok::kw__Accum:
+ case tok::kw__Fract:
+ case tok::kw__Sat:
+#define TRANSFORM_TYPE_TRAIT_DEF(_, Trait) case tok::kw___##Trait:
+#include "clang/Basic/TransformTypeTraits.def"
case tok::kw___auto_type:
return true;
@@ -260,7 +271,8 @@ static ParsedType recoverFromTypeInKnownDependentBase(Sema &S,
ASTContext &Context = S.Context;
auto *NNS = NestedNameSpecifier::Create(Context, nullptr, false,
cast<Type>(Context.getRecordType(RD)));
- QualType T = Context.getDependentNameType(ETK_Typename, NNS, &II);
+ QualType T =
+ Context.getDependentNameType(ElaboratedTypeKeyword::Typename, NNS, &II);
CXXScopeSpec SS;
SS.MakeTrivial(Context, NNS, SourceRange(NameLoc));
@@ -273,6 +285,45 @@ static ParsedType recoverFromTypeInKnownDependentBase(Sema &S,
return S.CreateParsedType(T, Builder.getTypeSourceInfo(Context, T));
}
+/// Build a ParsedType for a simple-type-specifier with a nested-name-specifier.
+static ParsedType buildNamedType(Sema &S, const CXXScopeSpec *SS, QualType T,
+ SourceLocation NameLoc,
+ bool WantNontrivialTypeSourceInfo = true) {
+ switch (T->getTypeClass()) {
+ case Type::DeducedTemplateSpecialization:
+ case Type::Enum:
+ case Type::InjectedClassName:
+ case Type::Record:
+ case Type::Typedef:
+ case Type::UnresolvedUsing:
+ case Type::Using:
+ break;
+ // These can never be qualified so an ElaboratedType node
+ // would carry no additional meaning.
+ case Type::ObjCInterface:
+ case Type::ObjCTypeParam:
+ case Type::TemplateTypeParm:
+ return ParsedType::make(T);
+ default:
+ llvm_unreachable("Unexpected Type Class");
+ }
+
+ if (!SS || SS->isEmpty())
+ return ParsedType::make(S.Context.getElaboratedType(
+ ElaboratedTypeKeyword::None, nullptr, T, nullptr));
+
+ QualType ElTy = S.getElaboratedType(ElaboratedTypeKeyword::None, *SS, T);
+ if (!WantNontrivialTypeSourceInfo)
+ return ParsedType::make(ElTy);
+
+ TypeLocBuilder Builder;
+ Builder.pushTypeSpec(T).setNameLoc(NameLoc);
+ ElaboratedTypeLoc ElabTL = Builder.push<ElaboratedTypeLoc>(ElTy);
+ ElabTL.setElaboratedKeywordLoc(SourceLocation());
+ ElabTL.setQualifierLoc(SS->getWithLocInContext(S.Context));
+ return S.CreateParsedType(ElTy, Builder.getTypeSourceInfo(S.Context, ElTy));
+}
+
/// If the identifier refers to a type name within this scope,
/// return the declaration of that type.
///
@@ -282,12 +333,12 @@ static ParsedType recoverFromTypeInKnownDependentBase(Sema &S,
/// opaque pointer (actually a QualType) corresponding to that
/// type. Otherwise, returns NULL.
ParsedType Sema::getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
- Scope *S, CXXScopeSpec *SS,
- bool isClassName, bool HasTrailingDot,
- ParsedType ObjectTypePtr,
+ Scope *S, CXXScopeSpec *SS, bool isClassName,
+ bool HasTrailingDot, ParsedType ObjectTypePtr,
bool IsCtorOrDtorName,
bool WantNontrivialTypeSourceInfo,
bool IsClassTemplateDeductionContext,
+ ImplicitTypenameContext AllowImplicitTypename,
IdentifierInfo **CorrectedII) {
// FIXME: Consider allowing this outside C++1z mode as an extension.
bool AllowDeducedTemplate = IsClassTemplateDeductionContext &&
@@ -314,17 +365,34 @@ ParsedType Sema::getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
//
// We therefore do not perform any name lookup if the result would
// refer to a member of an unknown specialization.
- if (!isClassName && !IsCtorOrDtorName)
+ // In C++2a, in several contexts a 'typename' is not required. Also
+ // allow this as an extension.
+ if (AllowImplicitTypename == ImplicitTypenameContext::No &&
+ !isClassName && !IsCtorOrDtorName)
return nullptr;
+ bool IsImplicitTypename = !isClassName && !IsCtorOrDtorName;
+ if (IsImplicitTypename) {
+ SourceLocation QualifiedLoc = SS->getRange().getBegin();
+ if (getLangOpts().CPlusPlus20)
+ Diag(QualifiedLoc, diag::warn_cxx17_compat_implicit_typename);
+ else
+ Diag(QualifiedLoc, diag::ext_implicit_typename)
+ << SS->getScopeRep() << II.getName()
+ << FixItHint::CreateInsertion(QualifiedLoc, "typename ");
+ }
// We know from the grammar that this name refers to a type,
// so build a dependent node to describe the type.
if (WantNontrivialTypeSourceInfo)
- return ActOnTypenameType(S, SourceLocation(), *SS, II, NameLoc).get();
+ return ActOnTypenameType(S, SourceLocation(), *SS, II, NameLoc,
+ (ImplicitTypenameContext)IsImplicitTypename)
+ .get();
NestedNameSpecifierLoc QualifierLoc = SS->getWithLocInContext(Context);
- QualType T = CheckTypenameType(ETK_None, SourceLocation(), QualifierLoc,
- II, NameLoc);
+ QualType T = CheckTypenameType(
+ IsImplicitTypename ? ElaboratedTypeKeyword::Typename
+ : ElaboratedTypeKeyword::None,
+ SourceLocation(), QualifierLoc, II, NameLoc);
return ParsedType::make(T);
}
@@ -371,9 +439,9 @@ ParsedType Sema::getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
}
NamedDecl *IIDecl = nullptr;
+ UsingShadowDecl *FoundUsingShadow = nullptr;
switch (Result.getResultKind()) {
case LookupResult::NotFound:
- case LookupResult::NotFoundInCurrentInstantiation:
if (CorrectedII) {
TypeNameValidatorCCC CCC(/*AllowInvalid=*/true, isClassName,
AllowDeducedTemplate);
@@ -413,8 +481,20 @@ ParsedType Sema::getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
}
}
}
- // If typo correction failed or was not performed, fall through
- LLVM_FALLTHROUGH;
+ Result.suppressDiagnostics();
+ return nullptr;
+ case LookupResult::NotFoundInCurrentInstantiation:
+ if (AllowImplicitTypename == ImplicitTypenameContext::Yes) {
+ QualType T = Context.getDependentNameType(ElaboratedTypeKeyword::None,
+ SS->getScopeRep(), &II);
+ TypeLocBuilder TLB;
+ DependentNameTypeLoc TL = TLB.push<DependentNameTypeLoc>(T);
+ TL.setElaboratedKeywordLoc(SourceLocation());
+ TL.setQualifierLoc(SS->getWithLocInContext(Context));
+ TL.setNameLoc(NameLoc);
+ return CreateParsedType(T, TLB.getTypeSourceInfo(Context, T));
+ }
+ [[fallthrough]];
case LookupResult::FoundOverloaded:
case LookupResult::FoundUnresolvedValue:
Result.suppressDiagnostics();
@@ -440,8 +520,10 @@ ParsedType Sema::getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
(AllowDeducedTemplate && getAsTypeTemplateDecl(RealRes))) {
if (!IIDecl ||
// Make the selection of the recovery decl deterministic.
- RealRes->getLocation() < IIDecl->getLocation())
+ RealRes->getLocation() < IIDecl->getLocation()) {
IIDecl = RealRes;
+ FoundUsingShadow = dyn_cast<UsingShadowDecl>(*Res);
+ }
}
}
@@ -464,6 +546,7 @@ ParsedType Sema::getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
case LookupResult::Found:
IIDecl = Result.getFoundDecl();
+ FoundUsingShadow = dyn_cast<UsingShadowDecl>(*Result.begin());
break;
}
@@ -490,14 +573,21 @@ ParsedType Sema::getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
(void)DiagnoseUseOfDecl(IDecl, NameLoc);
if (!HasTrailingDot)
T = Context.getObjCInterfaceType(IDecl);
+ FoundUsingShadow = nullptr; // FIXME: Target must be a TypeDecl.
} else if (auto *UD = dyn_cast<UnresolvedUsingIfExistsDecl>(IIDecl)) {
(void)DiagnoseUseOfDecl(UD, NameLoc);
// Recover with 'int'
- T = Context.IntTy;
+ return ParsedType::make(Context.IntTy);
} else if (AllowDeducedTemplate) {
- if (auto *TD = getAsTypeTemplateDecl(IIDecl))
- T = Context.getDeducedTemplateSpecializationType(TemplateName(TD),
- QualType(), false);
+ if (auto *TD = getAsTypeTemplateDecl(IIDecl)) {
+ assert(!FoundUsingShadow || FoundUsingShadow->getTargetDecl() == TD);
+ TemplateName Template =
+ FoundUsingShadow ? TemplateName(FoundUsingShadow) : TemplateName(TD);
+ T = Context.getDeducedTemplateSpecializationType(Template, QualType(),
+ false);
+ // Don't wrap in a further UsingType.
+ FoundUsingShadow = nullptr;
+ }
}
if (T.isNull()) {
@@ -506,27 +596,10 @@ ParsedType Sema::getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
return nullptr;
}
- // NOTE: avoid constructing an ElaboratedType(Loc) if this is a
- // constructor or destructor name (in such a case, the scope specifier
- // will be attached to the enclosing Expr or Decl node).
- if (SS && SS->isNotEmpty() && !IsCtorOrDtorName &&
- !isa<ObjCInterfaceDecl, UnresolvedUsingIfExistsDecl>(IIDecl)) {
- if (WantNontrivialTypeSourceInfo) {
- // Construct a type with type-source information.
- TypeLocBuilder Builder;
- Builder.pushTypeSpec(T).setNameLoc(NameLoc);
-
- T = getElaboratedType(ETK_None, *SS, T);
- ElaboratedTypeLoc ElabTL = Builder.push<ElaboratedTypeLoc>(T);
- ElabTL.setElaboratedKeywordLoc(SourceLocation());
- ElabTL.setQualifierLoc(SS->getWithLocInContext(Context));
- return CreateParsedType(T, Builder.getTypeSourceInfo(Context, T));
- } else {
- T = getElaboratedType(ETK_None, *SS, T);
- }
- }
+ if (FoundUsingShadow)
+ T = Context.getUsingType(FoundUsingShadow, T);
- return ParsedType::make(T);
+ return buildNamedType(*this, SS, T, NameLoc, WantNontrivialTypeSourceInfo);
}
// Builds a fake NNS for the given decl context.
@@ -593,7 +666,8 @@ ParsedType Sema::ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
return ParsedType();
}
- QualType T = Context.getDependentNameType(ETK_None, NNS, &II);
+ QualType T =
+ Context.getDependentNameType(ElaboratedTypeKeyword::None, NNS, &II);
// Build type location information. We synthesized the qualifier, so we have
// to build a fake NestedNameSpecifierLoc.
@@ -622,11 +696,16 @@ DeclSpec::TST Sema::isTagName(IdentifierInfo &II, Scope *S) {
if (R.getResultKind() == LookupResult::Found)
if (const TagDecl *TD = R.getAsSingle<TagDecl>()) {
switch (TD->getTagKind()) {
- case TTK_Struct: return DeclSpec::TST_struct;
- case TTK_Interface: return DeclSpec::TST_interface;
- case TTK_Union: return DeclSpec::TST_union;
- case TTK_Class: return DeclSpec::TST_class;
- case TTK_Enum: return DeclSpec::TST_enum;
+ case TagTypeKind::Struct:
+ return DeclSpec::TST_struct;
+ case TagTypeKind::Interface:
+ return DeclSpec::TST_interface;
+ case TagTypeKind::Union:
+ return DeclSpec::TST_union;
+ case TagTypeKind::Class:
+ return DeclSpec::TST_class;
+ case TagTypeKind::Enum:
+ return DeclSpec::TST_enum;
}
}
@@ -802,25 +881,25 @@ static bool isTagTypeWithMissingTag(Sema &SemaRef, LookupResult &Result,
if (TagDecl *Tag = R.getAsSingle<TagDecl>()) {
StringRef FixItTagName;
switch (Tag->getTagKind()) {
- case TTK_Class:
- FixItTagName = "class ";
- break;
+ case TagTypeKind::Class:
+ FixItTagName = "class ";
+ break;
- case TTK_Enum:
- FixItTagName = "enum ";
- break;
+ case TagTypeKind::Enum:
+ FixItTagName = "enum ";
+ break;
- case TTK_Struct:
- FixItTagName = "struct ";
- break;
+ case TagTypeKind::Struct:
+ FixItTagName = "struct ";
+ break;
- case TTK_Interface:
- FixItTagName = "__interface ";
- break;
+ case TagTypeKind::Interface:
+ FixItTagName = "__interface ";
+ break;
- case TTK_Union:
- FixItTagName = "union ";
- break;
+ case TagTypeKind::Union:
+ FixItTagName = "union ";
+ break;
}
StringRef TagName = FixItTagName.drop_back();
@@ -842,21 +921,6 @@ static bool isTagTypeWithMissingTag(Sema &SemaRef, LookupResult &Result,
return false;
}
-/// Build a ParsedType for a simple-type-specifier with a nested-name-specifier.
-static ParsedType buildNestedType(Sema &S, CXXScopeSpec &SS,
- QualType T, SourceLocation NameLoc) {
- ASTContext &Context = S.Context;
-
- TypeLocBuilder Builder;
- Builder.pushTypeSpec(T).setNameLoc(NameLoc);
-
- T = S.getElaboratedType(ETK_None, SS, T);
- ElaboratedTypeLoc ElabTL = Builder.push<ElaboratedTypeLoc>(T);
- ElabTL.setElaboratedKeywordLoc(SourceLocation());
- ElabTL.setQualifierLoc(SS.getWithLocInContext(Context));
- return S.CreateParsedType(T, Builder.getTypeSourceInfo(Context, T));
-}
-
Sema::NameClassification Sema::ClassifyName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo *&Name,
SourceLocation NameLoc,
@@ -931,9 +995,13 @@ Corrected:
//
// appeared.
//
- // We also allow this in C99 as an extension.
- if (NamedDecl *D = ImplicitlyDefineFunction(NameLoc, *Name, S))
- return NameClassification::NonType(D);
+ // We also allow this in C99 as an extension. However, this is not
+ // allowed in all language modes as functions without prototypes may not
+ // be supported.
+ if (getLangOpts().implicitFunctionsAllowed()) {
+ if (NamedDecl *D = ImplicitlyDefineFunction(NameLoc, *Name, S))
+ return NameClassification::NonType(D);
+ }
}
if (getLangOpts().CPlusPlus20 && SS.isEmpty() && NextToken.is(tok::less)) {
@@ -1107,12 +1175,16 @@ Corrected:
IsFunctionTemplate = isa<FunctionTemplateDecl>(TD);
IsVarTemplate = isa<VarTemplateDecl>(TD);
+ UsingShadowDecl *FoundUsingShadow =
+ dyn_cast<UsingShadowDecl>(*Result.begin());
+ assert(!FoundUsingShadow ||
+ TD == cast<TemplateDecl>(FoundUsingShadow->getTargetDecl()));
+ Template =
+ FoundUsingShadow ? TemplateName(FoundUsingShadow) : TemplateName(TD);
if (SS.isNotEmpty())
- Template =
- Context.getQualifiedTemplateName(SS.getScopeRep(),
- /*TemplateKeyword=*/false, TD);
- else
- Template = TemplateName(TD);
+ Template = Context.getQualifiedTemplateName(SS.getScopeRep(),
+ /*TemplateKeyword=*/false,
+ Template);
} else {
// All results were non-template functions. This is a function template
// name.
@@ -1133,14 +1205,18 @@ Corrected:
: NameClassification::TypeTemplate(Template);
}
+ auto BuildTypeFor = [&](TypeDecl *Type, NamedDecl *Found) {
+ QualType T = Context.getTypeDeclType(Type);
+ if (const auto *USD = dyn_cast<UsingShadowDecl>(Found))
+ T = Context.getUsingType(USD, T);
+ return buildNamedType(*this, &SS, T, NameLoc);
+ };
+
NamedDecl *FirstDecl = (*Result.begin())->getUnderlyingDecl();
if (TypeDecl *Type = dyn_cast<TypeDecl>(FirstDecl)) {
DiagnoseUseOfDecl(Type, NameLoc);
MarkAnyDeclReferenced(Type->getLocation(), Type, /*OdrUse=*/false);
- QualType T = Context.getTypeDeclType(Type);
- if (SS.isNotEmpty())
- return buildNestedType(*this, SS, T, NameLoc);
- return ParsedType::make(T);
+ return BuildTypeFor(Type, *Result.begin());
}
ObjCInterfaceDecl *Class = dyn_cast<ObjCInterfaceDecl>(FirstDecl);
@@ -1189,10 +1265,7 @@ Corrected:
isTagTypeWithMissingTag(*this, Result, S, SS, Name, NameLoc)) {
TypeDecl *Type = Result.getAsSingle<TypeDecl>();
DiagnoseUseOfDecl(Type, NameLoc);
- QualType T = Context.getTypeDeclType(Type);
- if (SS.isNotEmpty())
- return buildNestedType(*this, SS, T, NameLoc);
- return ParsedType::make(T);
+ return BuildTypeFor(Type, *Result.begin());
}
// If we already know which single declaration is referenced, just annotate
@@ -1200,7 +1273,8 @@ Corrected:
// member accesses, as we need to defer certain access checks until we know
// the context.
bool ADL = UseArgumentDependentLookup(SS, Result, NextToken.is(tok::l_paren));
- if (Result.isSingleResult() && !ADL && !FirstDecl->isCXXClassMember())
+ if (Result.isSingleResult() && !ADL &&
+ (!FirstDecl->isCXXClassMember() || isa<EnumConstantDecl>(FirstDecl)))
return NameClassification::NonType(Result.getRepresentativeDecl());
// Otherwise, this is an overload set that we will need to resolve later.
@@ -1245,7 +1319,7 @@ ExprResult Sema::ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS,
Result.resolveKind();
bool ADL = UseArgumentDependentLookup(SS, Result, NextToken.is(tok::l_paren));
- return BuildDeclarationNameExpr(SS, Result, ADL);
+ return BuildDeclarationNameExpr(SS, Result, ADL, /*AcceptInvalidDecl=*/true);
}
ExprResult Sema::ActOnNameClassifiedAsOverloadSet(Scope *S, Expr *E) {
@@ -1450,27 +1524,38 @@ void Sema::ActOnExitFunctionContext() {
assert(CurContext && "Popped translation unit!");
}
-/// Determine whether we allow overloading of the function
-/// PrevDecl with another declaration.
+/// Determine whether overloading is allowed for a new function
+/// declaration considering prior declarations of the same name.
///
/// This routine determines whether overloading is possible, not
-/// whether some new function is actually an overload. It will return
-/// true in C++ (where we can always provide overloads) or, as an
-/// extension, in C when the previous function is already an
-/// overloaded function declaration or has the "overloadable"
-/// attribute.
-static bool AllowOverloadingOfFunction(LookupResult &Previous,
+/// whether a new declaration actually overloads a previous one.
+/// It will return true in C++ (where overloads are alway permitted)
+/// or, as a C extension, when either the new declaration or a
+/// previous one is declared with the 'overloadable' attribute.
+static bool AllowOverloadingOfFunction(const LookupResult &Previous,
ASTContext &Context,
const FunctionDecl *New) {
- if (Context.getLangOpts().CPlusPlus)
+ if (Context.getLangOpts().CPlusPlus || New->hasAttr<OverloadableAttr>())
return true;
- if (Previous.getResultKind() == LookupResult::FoundOverloaded)
- return true;
+ // Multiversion function declarations are not overloads in the
+ // usual sense of that term, but lookup will report that an
+ // overload set was found if more than one multiversion function
+ // declaration is present for the same name. It is therefore
+ // inadequate to assume that some prior declaration(s) had
+ // the overloadable attribute; checking is required. Since one
+ // declaration is permitted to omit the attribute, it is necessary
+ // to check at least two; hence the 'any_of' check below. Note that
+ // the overloadable attribute is implicitly added to declarations
+ // that were required to have it but did not.
+ if (Previous.getResultKind() == LookupResult::FoundOverloaded) {
+ return llvm::any_of(Previous, [](const NamedDecl *ND) {
+ return ND->hasAttr<OverloadableAttr>();
+ });
+ } else if (Previous.getResultKind() == LookupResult::Found)
+ return Previous.getFoundDecl()->hasAttr<OverloadableAttr>();
- return Previous.getResultKind() == LookupResult::Found &&
- (Previous.getFoundDecl()->hasAttr<OverloadableAttr>() ||
- New->hasAttr<OverloadableAttr>());
+ return false;
}
/// Add this decl to the scope shadowed decl chains.
@@ -1533,7 +1618,7 @@ void Sema::PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext) {
}
bool Sema::isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S,
- bool AllowInlineNamespace) {
+ bool AllowInlineNamespace) const {
return IdResolver.isDeclInScope(D, Ctx, S, AllowInlineNamespace);
}
@@ -1576,10 +1661,13 @@ void Sema::FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
/// We've determined that \p New is a redeclaration of \p Old. Check that they
/// have compatible owning modules.
bool Sema::CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old) {
- // FIXME: The Modules TS is not clear about how friend declarations are
- // to be treated. It's not meaningful to have different owning modules for
- // linkage in redeclarations of the same entity, so for now allow the
- // redeclaration and change the owning modules to match.
+ // [module.interface]p7:
+ // A declaration is attached to a module as follows:
+ // - If the declaration is a non-dependent friend declaration that nominates a
+ // function with a declarator-id that is a qualified-id or template-id or that
+ // nominates a class other than with an elaborated-type-specifier with neither
+ // a nested-name-specifier nor a simple-template-id, it is attached to the
+ // module to which the friend is attached ([basic.link]).
if (New->getFriendObjectKind() &&
Old->getOwningModuleForLinkage() != New->getOwningModuleForLinkage()) {
New->setLocalOwningModule(Old->getOwningModule());
@@ -1590,16 +1678,30 @@ bool Sema::CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old) {
Module *NewM = New->getOwningModule();
Module *OldM = Old->getOwningModule();
- if (NewM && NewM->Kind == Module::PrivateModuleFragment)
+ if (NewM && NewM->isPrivateModule())
NewM = NewM->Parent;
- if (OldM && OldM->Kind == Module::PrivateModuleFragment)
+ if (OldM && OldM->isPrivateModule())
OldM = OldM->Parent;
if (NewM == OldM)
return false;
- bool NewIsModuleInterface = NewM && NewM->isModulePurview();
- bool OldIsModuleInterface = OldM && OldM->isModulePurview();
+ if (NewM && OldM) {
+ // A module implementation unit has visibility of the decls in its
+ // implicitly imported interface.
+ if (NewM->isModuleImplementation() && OldM == ThePrimaryInterface)
+ return false;
+
+ // Partitions are part of the module, but a partition could import another
+ // module, so verify that the PMIs agree.
+ if ((NewM->isModulePartition() || OldM->isModulePartition()) &&
+ NewM->getPrimaryModuleInterfaceName() ==
+ OldM->getPrimaryModuleInterfaceName())
+ return false;
+ }
+
+ bool NewIsModuleInterface = NewM && NewM->isNamedModule();
+ bool OldIsModuleInterface = OldM && OldM->isNamedModule();
if (NewIsModuleInterface || OldIsModuleInterface) {
// C++ Modules TS [basic.def.odr] 6.2/6.7 [sic]:
// if a declaration of D [...] appears in the purview of a module, all
@@ -1618,17 +1720,147 @@ bool Sema::CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old) {
return false;
}
-static bool isUsingDecl(NamedDecl *D) {
+// [module.interface]p6:
+// A redeclaration of an entity X is implicitly exported if X was introduced by
+// an exported declaration; otherwise it shall not be exported.
+bool Sema::CheckRedeclarationExported(NamedDecl *New, NamedDecl *Old) {
+ // [module.interface]p1:
+ // An export-declaration shall inhabit a namespace scope.
+ //
+ // So it is meaningless to talk about redeclaration which is not at namespace
+ // scope.
+ if (!New->getLexicalDeclContext()
+ ->getNonTransparentContext()
+ ->isFileContext() ||
+ !Old->getLexicalDeclContext()
+ ->getNonTransparentContext()
+ ->isFileContext())
+ return false;
+
+ bool IsNewExported = New->isInExportDeclContext();
+ bool IsOldExported = Old->isInExportDeclContext();
+
+ // It should be irrevelant if both of them are not exported.
+ if (!IsNewExported && !IsOldExported)
+ return false;
+
+ if (IsOldExported)
+ return false;
+
+ assert(IsNewExported);
+
+ auto Lk = Old->getFormalLinkage();
+ int S = 0;
+ if (Lk == Linkage::Internal)
+ S = 1;
+ else if (Lk == Linkage::Module)
+ S = 2;
+ Diag(New->getLocation(), diag::err_redeclaration_non_exported) << New << S;
+ Diag(Old->getLocation(), diag::note_previous_declaration);
+ return true;
+}
+
+// A wrapper function for checking the semantic restrictions of
+// a redeclaration within a module.
+bool Sema::CheckRedeclarationInModule(NamedDecl *New, NamedDecl *Old) {
+ if (CheckRedeclarationModuleOwnership(New, Old))
+ return true;
+
+ if (CheckRedeclarationExported(New, Old))
+ return true;
+
+ return false;
+}
+
+// Check the redefinition in C++20 Modules.
+//
+// [basic.def.odr]p14:
+// For any definable item D with definitions in multiple translation units,
+// - if D is a non-inline non-templated function or variable, or
+// - if the definitions in different translation units do not satisfy the
+// following requirements,
+// the program is ill-formed; a diagnostic is required only if the definable
+// item is attached to a named module and a prior definition is reachable at
+// the point where a later definition occurs.
+// - Each such definition shall not be attached to a named module
+// ([module.unit]).
+// - Each such definition shall consist of the same sequence of tokens, ...
+// ...
+//
+// Return true if the redefinition is not allowed. Return false otherwise.
+bool Sema::IsRedefinitionInModule(const NamedDecl *New,
+ const NamedDecl *Old) const {
+ assert(getASTContext().isSameEntity(New, Old) &&
+ "New and Old are not the same definition, we should diagnostic it "
+ "immediately instead of checking it.");
+ assert(const_cast<Sema *>(this)->isReachable(New) &&
+ const_cast<Sema *>(this)->isReachable(Old) &&
+ "We shouldn't see unreachable definitions here.");
+
+ Module *NewM = New->getOwningModule();
+ Module *OldM = Old->getOwningModule();
+
+ // We only checks for named modules here. The header like modules is skipped.
+ // FIXME: This is not right if we import the header like modules in the module
+ // purview.
+ //
+ // For example, assuming "header.h" provides definition for `D`.
+ // ```C++
+ // //--- M.cppm
+ // export module M;
+ // import "header.h"; // or #include "header.h" but import it by clang modules
+ // actually.
+ //
+ // //--- Use.cpp
+ // import M;
+ // import "header.h"; // or uses clang modules.
+ // ```
+ //
+ // In this case, `D` has multiple definitions in multiple TU (M.cppm and
+ // Use.cpp) and `D` is attached to a named module `M`. The compiler should
+ // reject it. But the current implementation couldn't detect the case since we
+ // don't record the information about the importee modules.
+ //
+ // But this might not be painful in practice. Since the design of C++20 Named
+ // Modules suggests us to use headers in global module fragment instead of
+ // module purview.
+ if (NewM && NewM->isHeaderLikeModule())
+ NewM = nullptr;
+ if (OldM && OldM->isHeaderLikeModule())
+ OldM = nullptr;
+
+ if (!NewM && !OldM)
+ return true;
+
+ // [basic.def.odr]p14.3
+ // Each such definition shall not be attached to a named module
+ // ([module.unit]).
+ if ((NewM && NewM->isNamedModule()) || (OldM && OldM->isNamedModule()))
+ return true;
+
+ // Then New and Old lives in the same TU if their share one same module unit.
+ if (NewM)
+ NewM = NewM->getTopLevelModule();
+ if (OldM)
+ OldM = OldM->getTopLevelModule();
+ return OldM == NewM;
+}
+
+static bool isUsingDeclNotAtClassScope(NamedDecl *D) {
+ if (D->getDeclContext()->isFileContext())
+ return false;
+
return isa<UsingShadowDecl>(D) ||
isa<UnresolvedUsingTypenameDecl>(D) ||
isa<UnresolvedUsingValueDecl>(D);
}
-/// Removes using shadow declarations from the lookup results.
+/// Removes using shadow declarations not at class scope from the lookup
+/// results.
static void RemoveUsingDecls(LookupResult &R) {
LookupResult::Filter F = R.makeFilter();
while (F.hasNext())
- if (isUsingDecl(F.next()))
+ if (isUsingDeclNotAtClassScope(F.next()))
F.erase();
F.done();
@@ -1680,7 +1912,7 @@ bool Sema::mightHaveNonExternalLinkage(const DeclaratorDecl *D) {
// FIXME: This needs to be refactored; some other isInMainFile users want
// these semantics.
static bool isMainFileLoc(const Sema &S, SourceLocation Loc) {
- if (S.TUKind != TU_Complete)
+ if (S.TUKind != TU_Complete || S.getLangOpts().IsHeaderFile)
return false;
return S.SourceMgr.isInMainFile(Loc);
}
@@ -1768,24 +2000,34 @@ void Sema::MarkUnusedFileScopedDecl(const DeclaratorDecl *D) {
UnusedFileScopedDecls.push_back(D);
}
-static bool ShouldDiagnoseUnusedDecl(const NamedDecl *D) {
+static bool ShouldDiagnoseUnusedDecl(const LangOptions &LangOpts,
+ const NamedDecl *D) {
if (D->isInvalidDecl())
return false;
- if (auto *DD = dyn_cast<DecompositionDecl>(D)) {
+ if (const auto *DD = dyn_cast<DecompositionDecl>(D)) {
// For a decomposition declaration, warn if none of the bindings are
// referenced, instead of if the variable itself is referenced (which
// it is, by the bindings' expressions).
- for (auto *BD : DD->bindings())
+ bool IsAllPlaceholders = true;
+ for (const auto *BD : DD->bindings()) {
if (BD->isReferenced())
return false;
+ IsAllPlaceholders = IsAllPlaceholders && BD->isPlaceholderVar(LangOpts);
+ }
+ if (IsAllPlaceholders)
+ return false;
} else if (!D->getDeclName()) {
return false;
} else if (D->isReferenced() || D->isUsed()) {
return false;
}
- if (D->hasAttr<UnusedAttr>() || D->hasAttr<ObjCPreciseLifetimeAttr>())
+ if (D->isPlaceholderVar(LangOpts))
+ return false;
+
+ if (D->hasAttr<UnusedAttr>() || D->hasAttr<ObjCPreciseLifetimeAttr>() ||
+ D->hasAttr<CleanupAttr>())
return false;
if (isa<LabelDecl>(D))
@@ -1811,15 +2053,27 @@ static bool ShouldDiagnoseUnusedDecl(const NamedDecl *D) {
// Types of valid local variables should be complete, so this should succeed.
if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
- // White-list anything with an __attribute__((unused)) type.
+ const Expr *Init = VD->getInit();
+ if (const auto *Cleanups = dyn_cast_if_present<ExprWithCleanups>(Init))
+ Init = Cleanups->getSubExpr();
+
const auto *Ty = VD->getType().getTypePtr();
// Only look at the outermost level of typedef.
if (const TypedefType *TT = Ty->getAs<TypedefType>()) {
+ // Allow anything marked with __attribute__((unused)).
if (TT->getDecl()->hasAttr<UnusedAttr>())
return false;
}
+ // Warn for reference variables whose initializtion performs lifetime
+ // extension.
+ if (const auto *MTE = dyn_cast_if_present<MaterializeTemporaryExpr>(Init);
+ MTE && MTE->getExtendingDecl()) {
+ Ty = VD->getType().getNonReferenceType().getTypePtr();
+ Init = MTE->getSubExpr()->IgnoreImplicitAsWritten();
+ }
+
// If we failed to complete the type for some reason, or if the type is
// dependent, don't diagnose the variable.
if (Ty->isIncompleteType() || Ty->isDependentType())
@@ -1834,18 +2088,14 @@ static bool ShouldDiagnoseUnusedDecl(const NamedDecl *D) {
if (Tag->hasAttr<UnusedAttr>())
return false;
- if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Tag)) {
+ if (const auto *RD = dyn_cast<CXXRecordDecl>(Tag)) {
if (!RD->hasTrivialDestructor() && !RD->hasAttr<WarnUnusedAttr>())
return false;
- if (const Expr *Init = VD->getInit()) {
- if (const ExprWithCleanups *Cleanups =
- dyn_cast<ExprWithCleanups>(Init))
- Init = Cleanups->getSubExpr();
- const CXXConstructExpr *Construct =
- dyn_cast<CXXConstructExpr>(Init);
+ if (Init) {
+ const auto *Construct = dyn_cast<CXXConstructExpr>(Init);
if (Construct && !Construct->isElidable()) {
- CXXConstructorDecl *CD = Construct->getConstructor();
+ const CXXConstructorDecl *CD = Construct->getConstructor();
if (!CD->isTrivial() && !RD->hasAttr<WarnUnusedAttr>() &&
(VD->getInit()->isValueDependent() || !VD->evaluateValue()))
return false;
@@ -1853,10 +2103,16 @@ static bool ShouldDiagnoseUnusedDecl(const NamedDecl *D) {
// Suppress the warning if we don't know how this is constructed, and
// it could possibly be non-trivial constructor.
- if (Init->isTypeDependent())
+ if (Init->isTypeDependent()) {
for (const CXXConstructorDecl *Ctor : RD->ctors())
if (!Ctor->isTrivial())
return false;
+ }
+
+ // Suppress the warning if the constructor is unresolved because
+ // its arguments are dependent.
+ if (isa<CXXUnresolvedConstructExpr>(Init))
+ return false;
}
}
}
@@ -1872,7 +2128,7 @@ static void GenerateFixForUnusedDecl(const NamedDecl *D, ASTContext &Ctx,
if (isa<LabelDecl>(D)) {
SourceLocation AfterColon = Lexer::findLocationAfterToken(
D->getEndLoc(), tok::colon, Ctx.getSourceManager(), Ctx.getLangOpts(),
- true);
+ /*SkipTrailingWhitespaceAndNewline=*/false);
if (AfterColon.isInvalid())
return;
Hint = FixItHint::CreateRemoval(
@@ -1881,21 +2137,32 @@ static void GenerateFixForUnusedDecl(const NamedDecl *D, ASTContext &Ctx,
}
void Sema::DiagnoseUnusedNestedTypedefs(const RecordDecl *D) {
+ DiagnoseUnusedNestedTypedefs(
+ D, [this](SourceLocation Loc, PartialDiagnostic PD) { Diag(Loc, PD); });
+}
+
+void Sema::DiagnoseUnusedNestedTypedefs(const RecordDecl *D,
+ DiagReceiverTy DiagReceiver) {
if (D->getTypeForDecl()->isDependentType())
return;
for (auto *TmpD : D->decls()) {
if (const auto *T = dyn_cast<TypedefNameDecl>(TmpD))
- DiagnoseUnusedDecl(T);
+ DiagnoseUnusedDecl(T, DiagReceiver);
else if(const auto *R = dyn_cast<RecordDecl>(TmpD))
- DiagnoseUnusedNestedTypedefs(R);
+ DiagnoseUnusedNestedTypedefs(R, DiagReceiver);
}
}
+void Sema::DiagnoseUnusedDecl(const NamedDecl *D) {
+ DiagnoseUnusedDecl(
+ D, [this](SourceLocation Loc, PartialDiagnostic PD) { Diag(Loc, PD); });
+}
+
/// DiagnoseUnusedDecl - Emit warnings about declarations that are not used
/// unless they are marked attr(unused).
-void Sema::DiagnoseUnusedDecl(const NamedDecl *D) {
- if (!ShouldDiagnoseUnusedDecl(D))
+void Sema::DiagnoseUnusedDecl(const NamedDecl *D, DiagReceiverTy DiagReceiver) {
+ if (!ShouldDiagnoseUnusedDecl(getLangOpts(), D))
return;
if (auto *TD = dyn_cast<TypedefNameDecl>(D)) {
@@ -1916,12 +2183,19 @@ void Sema::DiagnoseUnusedDecl(const NamedDecl *D) {
else
DiagID = diag::warn_unused_variable;
- Diag(D->getLocation(), DiagID) << D << Hint;
+ SourceLocation DiagLoc = D->getLocation();
+ DiagReceiver(DiagLoc, PDiag(DiagID) << D << Hint << SourceRange(DiagLoc));
}
-void Sema::DiagnoseUnusedButSetDecl(const VarDecl *VD) {
- // If it's not referenced, it can't be set.
- if (!VD->isReferenced() || !VD->getDeclName() || VD->hasAttr<UnusedAttr>())
+void Sema::DiagnoseUnusedButSetDecl(const VarDecl *VD,
+ DiagReceiverTy DiagReceiver) {
+ // If it's not referenced, it can't be set. If it has the Cleanup attribute,
+ // it's not really unused.
+ if (!VD->isReferenced() || !VD->getDeclName() || VD->hasAttr<CleanupAttr>())
+ return;
+
+ // In C++, `_` variables behave as if they were maybe_unused
+ if (VD->hasAttr<UnusedAttr>() || VD->isPlaceholderVar(getLangOpts()))
return;
const auto *Ty = VD->getType().getTypePtr()->getBaseElementTypeUnsafe();
@@ -1935,12 +2209,23 @@ void Sema::DiagnoseUnusedButSetDecl(const VarDecl *VD) {
return;
// In C++, don't warn for record types that don't have WarnUnusedAttr, to
// mimic gcc's behavior.
- if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Tag)) {
- if (!RD->hasAttr<WarnUnusedAttr>())
- return;
- }
+ if (const auto *RD = dyn_cast<CXXRecordDecl>(Tag);
+ RD && !RD->hasAttr<WarnUnusedAttr>())
+ return;
}
+ // Don't warn about __block Objective-C pointer variables, as they might
+ // be assigned in the block but not used elsewhere for the purpose of lifetime
+ // extension.
+ if (VD->hasAttr<BlocksAttr>() && Ty->isObjCObjectPointerType())
+ return;
+
+ // Don't warn about Objective-C pointer variables with precise lifetime
+ // semantics; they can be used to ensure ARC releases the object at a known
+ // time, which may mean assignment but no other references.
+ if (VD->hasAttr<ObjCPreciseLifetimeAttr>() && Ty->isObjCObjectPointerType())
+ return;
+
auto iter = RefsMinusAssignments.find(VD);
if (iter == RefsMinusAssignments.end())
return;
@@ -1951,10 +2236,11 @@ void Sema::DiagnoseUnusedButSetDecl(const VarDecl *VD) {
return;
unsigned DiagID = isa<ParmVarDecl>(VD) ? diag::warn_unused_but_set_parameter
: diag::warn_unused_but_set_variable;
- Diag(VD->getLocation(), DiagID) << VD;
+ DiagReceiver(VD->getLocation(), PDiag(DiagID) << VD);
}
-static void CheckPoppedLabel(LabelDecl *L, Sema &S) {
+static void CheckPoppedLabel(LabelDecl *L, Sema &S,
+ Sema::DiagReceiverTy DiagReceiver) {
// Verify that we have no forward references left. If so, there was a goto
// or address of a label taken, but no definition of it. Label fwd
// definitions are indicated with a null substmt which is also not a resolved
@@ -1965,16 +2251,35 @@ static void CheckPoppedLabel(LabelDecl *L, Sema &S) {
else
Diagnose = L->getStmt() == nullptr;
if (Diagnose)
- S.Diag(L->getLocation(), diag::err_undeclared_label_use) << L;
+ DiagReceiver(L->getLocation(), S.PDiag(diag::err_undeclared_label_use)
+ << L);
}
void Sema::ActOnPopScope(SourceLocation Loc, Scope *S) {
- S->mergeNRVOIntoParent();
+ S->applyNRVO();
if (S->decl_empty()) return;
assert((S->getFlags() & (Scope::DeclScope | Scope::TemplateParamScope)) &&
"Scope shouldn't contain decls!");
+ /// We visit the decls in non-deterministic order, but we want diagnostics
+ /// emitted in deterministic order. Collect any diagnostic that may be emitted
+ /// and sort the diagnostics before emitting them, after we visited all decls.
+ struct LocAndDiag {
+ SourceLocation Loc;
+ std::optional<SourceLocation> PreviousDeclLoc;
+ PartialDiagnostic PD;
+ };
+ SmallVector<LocAndDiag, 16> DeclDiags;
+ auto addDiag = [&DeclDiags](SourceLocation Loc, PartialDiagnostic PD) {
+ DeclDiags.push_back(LocAndDiag{Loc, std::nullopt, std::move(PD)});
+ };
+ auto addDiagWithPrev = [&DeclDiags](SourceLocation Loc,
+ SourceLocation PreviousDeclLoc,
+ PartialDiagnostic PD) {
+ DeclDiags.push_back(LocAndDiag{Loc, PreviousDeclLoc, std::move(PD)});
+ };
+
for (auto *TmpD : S->decls()) {
assert(TmpD && "This decl didn't get pushed??");
@@ -1983,11 +2288,11 @@ void Sema::ActOnPopScope(SourceLocation Loc, Scope *S) {
// Diagnose unused variables in this scope.
if (!S->hasUnrecoverableErrorOccurred()) {
- DiagnoseUnusedDecl(D);
+ DiagnoseUnusedDecl(D, addDiag);
if (const auto *RD = dyn_cast<RecordDecl>(D))
- DiagnoseUnusedNestedTypedefs(RD);
+ DiagnoseUnusedNestedTypedefs(RD, addDiag);
if (VarDecl *VD = dyn_cast<VarDecl>(D)) {
- DiagnoseUnusedButSetDecl(VD);
+ DiagnoseUnusedButSetDecl(VD, addDiag);
RefsMinusAssignments.erase(VD);
}
}
@@ -1996,7 +2301,7 @@ void Sema::ActOnPopScope(SourceLocation Loc, Scope *S) {
// If this was a forward reference to a label, verify it was defined.
if (LabelDecl *LD = dyn_cast<LabelDecl>(D))
- CheckPoppedLabel(LD, *this);
+ CheckPoppedLabel(LD, *this, addDiag);
// Remove this name from our lexical scope, and warn on it if we haven't
// already.
@@ -2004,12 +2309,32 @@ void Sema::ActOnPopScope(SourceLocation Loc, Scope *S) {
auto ShadowI = ShadowingDecls.find(D);
if (ShadowI != ShadowingDecls.end()) {
if (const auto *FD = dyn_cast<FieldDecl>(ShadowI->second)) {
- Diag(D->getLocation(), diag::warn_ctor_parm_shadows_field)
- << D << FD << FD->getParent();
- Diag(FD->getLocation(), diag::note_previous_declaration);
+ addDiagWithPrev(D->getLocation(), FD->getLocation(),
+ PDiag(diag::warn_ctor_parm_shadows_field)
+ << D << FD << FD->getParent());
}
ShadowingDecls.erase(ShadowI);
}
+
+ if (!getLangOpts().CPlusPlus && S->isClassScope()) {
+ if (auto *FD = dyn_cast<FieldDecl>(TmpD);
+ FD && FD->hasAttr<CountedByAttr>())
+ CheckCountedByAttr(S, FD);
+ }
+ }
+
+ llvm::sort(DeclDiags,
+ [](const LocAndDiag &LHS, const LocAndDiag &RHS) -> bool {
+ // The particular order for diagnostics is not important, as long
+ // as the order is deterministic. Using the raw location is going
+ // to generally be in source order unless there are macro
+ // expansions involved.
+ return LHS.Loc.getRawEncoding() < RHS.Loc.getRawEncoding();
+ });
+ for (const LocAndDiag &D : DeclDiags) {
+ Diag(D.Loc, D.PD);
+ if (D.PreviousDeclLoc)
+ Diag(*D.PreviousDeclLoc, diag::note_previous_declaration);
}
}
@@ -2106,15 +2431,16 @@ FunctionDecl *Sema::CreateBuiltin(IdentifierInfo *II, QualType Type,
if (getLangOpts().CPlusPlus) {
LinkageSpecDecl *CLinkageDecl = LinkageSpecDecl::Create(
- Context, Parent, Loc, Loc, LinkageSpecDecl::lang_c, false);
+ Context, Parent, Loc, Loc, LinkageSpecLanguageIDs::C, false);
CLinkageDecl->setImplicit();
Parent->addDecl(CLinkageDecl);
Parent = CLinkageDecl;
}
FunctionDecl *New = FunctionDecl::Create(Context, Parent, Loc, Loc, II, Type,
- /*TInfo=*/nullptr, SC_Extern, false,
- Type->isFunctionProtoType());
+ /*TInfo=*/nullptr, SC_Extern,
+ getCurFPFeatures().isFPConstrained(),
+ false, Type->isFunctionProtoType());
New->setImplicit();
New->addAttr(BuiltinAttr::CreateImplicit(Context, ID));
@@ -2176,7 +2502,8 @@ NamedDecl *Sema::LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
if (!ForRedeclaration &&
(Context.BuiltinInfo.isPredefinedLibFunction(ID) ||
Context.BuiltinInfo.isHeaderDependentFunction(ID))) {
- Diag(Loc, diag::ext_implicit_lib_function_decl)
+ Diag(Loc, LangOpts.C99 ? diag::ext_implicit_lib_function_decl_c99
+ : diag::ext_implicit_lib_function_decl)
<< Context.BuiltinInfo.getName(ID) << R;
if (const char *Header = Context.BuiltinInfo.getHeaderName(ID))
Diag(Loc, diag::note_include_header_or_declare)
@@ -2627,6 +2954,8 @@ static bool mergeDeclAttribute(Sema &S, NamedDecl *D,
NewAttr = S.mergeDLLImportAttr(D, *ImportA);
else if (const auto *ExportA = dyn_cast<DLLExportAttr>(Attr))
NewAttr = S.mergeDLLExportAttr(D, *ExportA);
+ else if (const auto *EA = dyn_cast<ErrorAttr>(Attr))
+ NewAttr = S.mergeErrorAttr(D, *EA, EA->getUserDiagnostic());
else if (const auto *FA = dyn_cast<FormatAttr>(Attr))
NewAttr = S.mergeFormatAttr(D, *FA, FA->getType(), FA->getFormatIdx(),
FA->getFirstArg());
@@ -2673,6 +3002,13 @@ static bool mergeDeclAttribute(Sema &S, NamedDecl *D,
NewAttr = S.mergeEnforceTCBAttr(D, *TCBA);
else if (const auto *TCBLA = dyn_cast<EnforceTCBLeafAttr>(Attr))
NewAttr = S.mergeEnforceTCBLeafAttr(D, *TCBLA);
+ else if (const auto *BTFA = dyn_cast<BTFDeclTagAttr>(Attr))
+ NewAttr = S.mergeBTFDeclTagAttr(D, *BTFA);
+ else if (const auto *NT = dyn_cast<HLSLNumThreadsAttr>(Attr))
+ NewAttr =
+ S.mergeHLSLNumThreadsAttr(D, *NT, NT->getX(), NT->getY(), NT->getZ());
+ else if (const auto *SA = dyn_cast<HLSLShaderAttr>(Attr))
+ NewAttr = S.mergeHLSLShaderAttr(D, *SA, SA->getType());
else if (Attr->shouldInheritEvenIfAlreadyPresent() || !DeclHasAttr(D, Attr))
NewAttr = cast<InheritableAttr>(Attr->clone(S.Context));
@@ -2815,7 +3151,7 @@ static void checkNewAttributesAfterDef(Sema &S, Decl *New, const Decl *Old) {
continue;
} else if (isa<OMPDeclareVariantAttr>(NewAttribute)) {
// We allow to add OMP[Begin]DeclareVariantAttr to be added to
- // declarations after defintions.
+ // declarations after definitions.
++I;
continue;
}
@@ -2954,8 +3290,7 @@ void Sema::mergeDeclAttributes(NamedDecl *New, Decl *Old,
if (const auto *NewAbiTagAttr = New->getAttr<AbiTagAttr>()) {
if (const auto *OldAbiTagAttr = Old->getAttr<AbiTagAttr>()) {
for (const auto &NewTag : NewAbiTagAttr->tags()) {
- if (std::find(OldAbiTagAttr->tags_begin(), OldAbiTagAttr->tags_end(),
- NewTag) == OldAbiTagAttr->tags_end()) {
+ if (!llvm::is_contained(OldAbiTagAttr->tags(), NewTag)) {
Diag(NewAbiTagAttr->getLocation(),
diag::err_new_abi_tag_on_redeclaration)
<< NewTag;
@@ -3052,6 +3387,26 @@ static void mergeParamDeclAttributes(ParmVarDecl *newDecl,
diag::note_carries_dependency_missing_first_decl) << 1/*Param*/;
}
+ // HLSL parameter declarations for inout and out must match between
+ // declarations. In HLSL inout and out are ambiguous at the call site, but
+ // have different calling behavior, so you cannot overload a method based on a
+ // difference between inout and out annotations.
+ if (S.getLangOpts().HLSL) {
+ const auto *NDAttr = newDecl->getAttr<HLSLParamModifierAttr>();
+ const auto *ODAttr = oldDecl->getAttr<HLSLParamModifierAttr>();
+ // We don't need to cover the case where one declaration doesn't have an
+ // attribute. The only possible case there is if one declaration has an `in`
+ // attribute and the other declaration has no attribute. This case is
+ // allowed since parameters are `in` by default.
+ if (NDAttr && ODAttr &&
+ NDAttr->getSpellingListIndex() != ODAttr->getSpellingListIndex()) {
+ S.Diag(newDecl->getLocation(), diag::err_hlsl_param_qualifier_mismatch)
+ << NDAttr << newDecl;
+ S.Diag(oldDecl->getLocation(), diag::note_previous_declaration_as)
+ << ODAttr;
+ }
+ }
+
if (!oldDecl->hasAttrs())
return;
@@ -3074,11 +3429,50 @@ static void mergeParamDeclAttributes(ParmVarDecl *newDecl,
if (!foundAny) newDecl->dropAttrs();
}
+static bool EquivalentArrayTypes(QualType Old, QualType New,
+ const ASTContext &Ctx) {
+
+ auto NoSizeInfo = [&Ctx](QualType Ty) {
+ if (Ty->isIncompleteArrayType() || Ty->isPointerType())
+ return true;
+ if (const auto *VAT = Ctx.getAsVariableArrayType(Ty))
+ return VAT->getSizeModifier() == ArraySizeModifier::Star;
+ return false;
+ };
+
+ // `type[]` is equivalent to `type *` and `type[*]`.
+ if (NoSizeInfo(Old) && NoSizeInfo(New))
+ return true;
+
+ // Don't try to compare VLA sizes, unless one of them has the star modifier.
+ if (Old->isVariableArrayType() && New->isVariableArrayType()) {
+ const auto *OldVAT = Ctx.getAsVariableArrayType(Old);
+ const auto *NewVAT = Ctx.getAsVariableArrayType(New);
+ if ((OldVAT->getSizeModifier() == ArraySizeModifier::Star) ^
+ (NewVAT->getSizeModifier() == ArraySizeModifier::Star))
+ return false;
+ return true;
+ }
+
+ // Only compare size, ignore Size modifiers and CVR.
+ if (Old->isConstantArrayType() && New->isConstantArrayType()) {
+ return Ctx.getAsConstantArrayType(Old)->getSize() ==
+ Ctx.getAsConstantArrayType(New)->getSize();
+ }
+
+ // Don't try to compare dependent sized array
+ if (Old->isDependentSizedArrayType() && New->isDependentSizedArrayType()) {
+ return true;
+ }
+
+ return Old == New;
+}
+
static void mergeParamDeclTypes(ParmVarDecl *NewParam,
const ParmVarDecl *OldParam,
Sema &S) {
- if (auto Oldnullability = OldParam->getType()->getNullability(S.Context)) {
- if (auto Newnullability = NewParam->getType()->getNullability(S.Context)) {
+ if (auto Oldnullability = OldParam->getType()->getNullability()) {
+ if (auto Newnullability = NewParam->getType()->getNullability()) {
if (*Oldnullability != *Newnullability) {
S.Diag(NewParam->getLocation(), diag::warn_mismatched_nullability_attr)
<< DiagNullabilityKind(
@@ -3099,6 +3493,19 @@ static void mergeParamDeclTypes(ParmVarDecl *NewParam,
NewParam->setType(NewT);
}
}
+ const auto *OldParamDT = dyn_cast<DecayedType>(OldParam->getType());
+ const auto *NewParamDT = dyn_cast<DecayedType>(NewParam->getType());
+ if (OldParamDT && NewParamDT &&
+ OldParamDT->getPointeeType() == NewParamDT->getPointeeType()) {
+ QualType OldParamOT = OldParamDT->getOriginalType();
+ QualType NewParamOT = NewParamDT->getOriginalType();
+ if (!EquivalentArrayTypes(OldParamOT, NewParamOT, S.getASTContext())) {
+ S.Diag(NewParam->getLocation(), diag::warn_inconsistent_array_form)
+ << NewParam << NewParamOT;
+ S.Diag(OldParam->getLocation(), diag::note_previous_declaration_as)
+ << OldParamOT;
+ }
+ }
}
namespace {
@@ -3124,6 +3531,10 @@ getNoteDiagForInvalidRedeclaration(const T *Old, const T *New) {
PrevDiag = diag::note_previous_definition;
else if (Old->isImplicit()) {
PrevDiag = diag::note_previous_implicit_declaration;
+ if (const auto *FD = dyn_cast<FunctionDecl>(Old)) {
+ if (FD->getBuiltinID())
+ PrevDiag = diag::note_previous_builtin_declaration;
+ }
if (OldLocation.isInvalid())
OldLocation = New->getLocation();
} else
@@ -3275,8 +3686,8 @@ static void adjustDeclContextForDeclaratorDecl(DeclaratorDecl *NewD,
/// merged with.
///
/// Returns true if there was an error, false otherwise.
-bool Sema::MergeFunctionDecl(FunctionDecl *New, NamedDecl *&OldD,
- Scope *S, bool MergeTypeWithOld) {
+bool Sema::MergeFunctionDecl(FunctionDecl *New, NamedDecl *&OldD, Scope *S,
+ bool MergeTypeWithOld, bool NewDeclIsDefn) {
// Verify the old decl was also a function.
FunctionDecl *Old = OldD->getAsFunction();
if (!Old) {
@@ -3344,23 +3755,31 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, NamedDecl *&OldD,
!canRedefineFunction(Old, getLangOpts())) {
if (getLangOpts().MicrosoftExt) {
Diag(New->getLocation(), diag::ext_static_non_static) << New;
- Diag(OldLocation, PrevDiag);
+ Diag(OldLocation, PrevDiag) << Old << Old->getType();
} else {
Diag(New->getLocation(), diag::err_static_non_static) << New;
- Diag(OldLocation, PrevDiag);
+ Diag(OldLocation, PrevDiag) << Old << Old->getType();
return true;
}
}
- if (New->hasAttr<InternalLinkageAttr>() &&
- !Old->hasAttr<InternalLinkageAttr>()) {
- Diag(New->getLocation(), diag::err_internal_linkage_redeclaration)
- << New->getDeclName();
- notePreviousDefinition(Old, New->getLocation());
- New->dropAttr<InternalLinkageAttr>();
+ if (const auto *ILA = New->getAttr<InternalLinkageAttr>())
+ if (!Old->hasAttr<InternalLinkageAttr>()) {
+ Diag(New->getLocation(), diag::err_attribute_missing_on_first_decl)
+ << ILA;
+ Diag(Old->getLocation(), diag::note_previous_declaration);
+ New->dropAttr<InternalLinkageAttr>();
+ }
+
+ if (auto *EA = New->getAttr<ErrorAttr>()) {
+ if (!Old->hasAttr<ErrorAttr>()) {
+ Diag(EA->getLocation(), diag::err_attribute_missing_on_first_decl) << EA;
+ Diag(Old->getLocation(), diag::note_previous_declaration);
+ New->dropAttr<ErrorAttr>();
+ }
}
- if (CheckRedeclarationModuleOwnership(New, Old))
+ if (CheckRedeclarationInModule(New, Old))
return true;
if (!getLangOpts().CPlusPlus) {
@@ -3398,6 +3817,15 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, NamedDecl *&OldD,
}
}
+ // It is not permitted to redeclare an SME function with different SME
+ // attributes.
+ if (IsInvalidSMECallConversion(Old->getType(), New->getType())) {
+ Diag(New->getLocation(), diag::err_sme_attr_mismatch)
+ << New->getType() << Old->getType();
+ Diag(OldLocation, diag::note_previous_declaration);
+ return true;
+ }
+
// If a function is first declared with a calling convention, but is later
// declared or defined without one, all following decls assume the calling
// convention of the first.
@@ -3540,18 +3968,6 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, NamedDecl *&OldD,
}
if (getLangOpts().CPlusPlus) {
- // C++1z [over.load]p2
- // Certain function declarations cannot be overloaded:
- // -- Function declarations that differ only in the return type,
- // the exception specification, or both cannot be overloaded.
-
- // Check the exception specifications match. This may recompute the type of
- // both Old and New if it resolved exception specifications, so grab the
- // types again after this. Because this updates the type, we do this before
- // any of the other checks below, which may update the "de facto" NewQType
- // but do not necessarily update the type of New.
- if (CheckEquivalentExceptionSpec(Old, New))
- return true;
OldQType = Context.getCanonicalType(Old->getType());
NewQType = Context.getCanonicalType(New->getType());
@@ -3592,14 +4008,14 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, NamedDecl *&OldD,
// defined, copy the deduced value from the old declaration.
AutoType *OldAT = Old->getReturnType()->getContainedAutoType();
if (OldAT && OldAT->isDeduced()) {
- New->setType(
- SubstAutoType(New->getType(),
- OldAT->isDependentType() ? Context.DependentTy
- : OldAT->getDeducedType()));
- NewQType = Context.getCanonicalType(
- SubstAutoType(NewQType,
- OldAT->isDependentType() ? Context.DependentTy
- : OldAT->getDeducedType()));
+ QualType DT = OldAT->getDeducedType();
+ if (DT.isNull()) {
+ New->setType(SubstAutoTypeDependent(New->getType()));
+ NewQType = Context.getCanonicalType(SubstAutoTypeDependent(NewQType));
+ } else {
+ New->setType(SubstAutoType(New->getType(), DT));
+ NewQType = Context.getCanonicalType(SubstAutoType(NewQType, DT));
+ }
}
}
@@ -3673,16 +4089,29 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, NamedDecl *&OldD,
}
}
+ // C++1z [over.load]p2
+ // Certain function declarations cannot be overloaded:
+ // -- Function declarations that differ only in the return type,
+ // the exception specification, or both cannot be overloaded.
+
+ // Check the exception specifications match. This may recompute the type of
+ // both Old and New if it resolved exception specifications, so grab the
+ // types again after this. Because this updates the type, we do this before
+ // any of the other checks below, which may update the "de facto" NewQType
+ // but do not necessarily update the type of New.
+ if (CheckEquivalentExceptionSpec(Old, New))
+ return true;
+
// C++11 [dcl.attr.noreturn]p1:
// The first declaration of a function shall specify the noreturn
// attribute if any declaration of that function specifies the noreturn
// attribute.
- const CXX11NoReturnAttr *NRA = New->getAttr<CXX11NoReturnAttr>();
- if (NRA && !Old->hasAttr<CXX11NoReturnAttr>()) {
- Diag(NRA->getLocation(), diag::err_noreturn_missing_on_first_decl);
- Diag(Old->getFirstDecl()->getLocation(),
- diag::note_noreturn_missing_first_decl);
- }
+ if (const auto *NRA = New->getAttr<CXX11NoReturnAttr>())
+ if (!Old->hasAttr<CXX11NoReturnAttr>()) {
+ Diag(NRA->getLocation(), diag::err_attribute_missing_on_first_decl)
+ << NRA;
+ Diag(Old->getLocation(), diag::note_previous_declaration);
+ }
// C++11 [dcl.attr.depend]p2:
// The first declaration of a function shall specify the
@@ -3749,39 +4178,108 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, NamedDecl *&OldD,
// C: Function types need to be compatible, not identical. This handles
// duplicate function decls like "void f(int); void f(enum X);" properly.
- if (!getLangOpts().CPlusPlus &&
- Context.typesAreCompatible(OldQType, NewQType)) {
- const FunctionType *OldFuncType = OldQType->getAs<FunctionType>();
- const FunctionType *NewFuncType = NewQType->getAs<FunctionType>();
- const FunctionProtoType *OldProto = nullptr;
- if (MergeTypeWithOld && isa<FunctionNoProtoType>(NewFuncType) &&
- (OldProto = dyn_cast<FunctionProtoType>(OldFuncType))) {
- // The old declaration provided a function prototype, but the
- // new declaration does not. Merge in the prototype.
- assert(!OldProto->hasExceptionSpec() && "Exception spec in C");
- SmallVector<QualType, 16> ParamTypes(OldProto->param_types());
- NewQType =
- Context.getFunctionType(NewFuncType->getReturnType(), ParamTypes,
- OldProto->getExtProtoInfo());
- New->setType(NewQType);
- New->setHasInheritedPrototype();
-
- // Synthesize parameters with the same types.
- SmallVector<ParmVarDecl*, 16> Params;
- for (const auto &ParamType : OldProto->param_types()) {
- ParmVarDecl *Param = ParmVarDecl::Create(Context, New, SourceLocation(),
- SourceLocation(), nullptr,
- ParamType, /*TInfo=*/nullptr,
- SC_None, nullptr);
- Param->setScopeInfo(0, Params.size());
- Param->setImplicit();
- Params.push_back(Param);
+ if (!getLangOpts().CPlusPlus) {
+ // C99 6.7.5.3p15: ...If one type has a parameter type list and the other
+ // type is specified by a function definition that contains a (possibly
+ // empty) identifier list, both shall agree in the number of parameters
+ // and the type of each parameter shall be compatible with the type that
+ // results from the application of default argument promotions to the
+ // type of the corresponding identifier. ...
+ // This cannot be handled by ASTContext::typesAreCompatible() because that
+ // doesn't know whether the function type is for a definition or not when
+ // eventually calling ASTContext::mergeFunctionTypes(). The only situation
+ // we need to cover here is that the number of arguments agree as the
+ // default argument promotion rules were already checked by
+ // ASTContext::typesAreCompatible().
+ if (Old->hasPrototype() && !New->hasWrittenPrototype() && NewDeclIsDefn &&
+ Old->getNumParams() != New->getNumParams() && !Old->isImplicit()) {
+ if (Old->hasInheritedPrototype())
+ Old = Old->getCanonicalDecl();
+ Diag(New->getLocation(), diag::err_conflicting_types) << New;
+ Diag(Old->getLocation(), PrevDiag) << Old << Old->getType();
+ return true;
+ }
+
+ // If we are merging two functions where only one of them has a prototype,
+ // we may have enough information to decide to issue a diagnostic that the
+ // function without a protoype will change behavior in C23. This handles
+ // cases like:
+ // void i(); void i(int j);
+ // void i(int j); void i();
+ // void i(); void i(int j) {}
+ // See ActOnFinishFunctionBody() for other cases of the behavior change
+ // diagnostic. See GetFullTypeForDeclarator() for handling of a function
+ // type without a prototype.
+ if (New->hasWrittenPrototype() != Old->hasWrittenPrototype() &&
+ !New->isImplicit() && !Old->isImplicit()) {
+ const FunctionDecl *WithProto, *WithoutProto;
+ if (New->hasWrittenPrototype()) {
+ WithProto = New;
+ WithoutProto = Old;
+ } else {
+ WithProto = Old;
+ WithoutProto = New;
}
- New->setParams(Params);
+ if (WithProto->getNumParams() != 0) {
+ if (WithoutProto->getBuiltinID() == 0 && !WithoutProto->isImplicit()) {
+ // The one without the prototype will be changing behavior in C23, so
+ // warn about that one so long as it's a user-visible declaration.
+ bool IsWithoutProtoADef = false, IsWithProtoADef = false;
+ if (WithoutProto == New)
+ IsWithoutProtoADef = NewDeclIsDefn;
+ else
+ IsWithProtoADef = NewDeclIsDefn;
+ Diag(WithoutProto->getLocation(),
+ diag::warn_non_prototype_changes_behavior)
+ << IsWithoutProtoADef << (WithoutProto->getNumParams() ? 0 : 1)
+ << (WithoutProto == Old) << IsWithProtoADef;
+
+ // The reason the one without the prototype will be changing behavior
+ // is because of the one with the prototype, so note that so long as
+ // it's a user-visible declaration. There is one exception to this:
+ // when the new declaration is a definition without a prototype, the
+ // old declaration with a prototype is not the cause of the issue,
+ // and that does not need to be noted because the one with a
+ // prototype will not change behavior in C23.
+ if (WithProto->getBuiltinID() == 0 && !WithProto->isImplicit() &&
+ !IsWithoutProtoADef)
+ Diag(WithProto->getLocation(), diag::note_conflicting_prototype);
+ }
+ }
}
- return MergeCompatibleFunctionDecls(New, Old, S, MergeTypeWithOld);
+ if (Context.typesAreCompatible(OldQType, NewQType)) {
+ const FunctionType *OldFuncType = OldQType->getAs<FunctionType>();
+ const FunctionType *NewFuncType = NewQType->getAs<FunctionType>();
+ const FunctionProtoType *OldProto = nullptr;
+ if (MergeTypeWithOld && isa<FunctionNoProtoType>(NewFuncType) &&
+ (OldProto = dyn_cast<FunctionProtoType>(OldFuncType))) {
+ // The old declaration provided a function prototype, but the
+ // new declaration does not. Merge in the prototype.
+ assert(!OldProto->hasExceptionSpec() && "Exception spec in C");
+ NewQType = Context.getFunctionType(NewFuncType->getReturnType(),
+ OldProto->getParamTypes(),
+ OldProto->getExtProtoInfo());
+ New->setType(NewQType);
+ New->setHasInheritedPrototype();
+
+ // Synthesize parameters with the same types.
+ SmallVector<ParmVarDecl *, 16> Params;
+ for (const auto &ParamType : OldProto->param_types()) {
+ ParmVarDecl *Param = ParmVarDecl::Create(
+ Context, New, SourceLocation(), SourceLocation(), nullptr,
+ ParamType, /*TInfo=*/nullptr, SC_None, nullptr);
+ Param->setScopeInfo(0, Params.size());
+ Param->setImplicit();
+ Params.push_back(Param);
+ }
+
+ New->setParams(Params);
+ }
+
+ return MergeCompatibleFunctionDecls(New, Old, S, MergeTypeWithOld);
+ }
}
// Check if the function types are compatible when pointer size address
@@ -3892,8 +4390,8 @@ bool Sema::MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
mergeDeclAttributes(New, Old);
// Merge "pure" flag.
- if (Old->isPure())
- New->setPure();
+ if (Old->isPureVirtual())
+ New->setIsPureVirtual();
// Merge "used" flag.
if (Old->getMostRecentDecl()->isUsed(false))
@@ -3957,7 +4455,7 @@ static void diagnoseVarDeclTypeMismatch(Sema &S, VarDecl *New, VarDecl* Old) {
SourceLocation OldLocation;
std::tie(PrevDiag, OldLocation)
= getNoteDiagForInvalidRedeclaration(Old, New);
- S.Diag(OldLocation, PrevDiag);
+ S.Diag(OldLocation, PrevDiag) << Old << Old->getType();
New->setInvalidDecl();
}
@@ -3970,7 +4468,7 @@ static void diagnoseVarDeclTypeMismatch(Sema &S, VarDecl *New, VarDecl* Old) {
/// is attached.
void Sema::MergeVarDeclTypes(VarDecl *New, VarDecl *Old,
bool MergeTypeWithOld) {
- if (New->isInvalidDecl() || Old->isInvalidDecl())
+ if (New->isInvalidDecl() || Old->isInvalidDecl() || New->getType()->containsErrors() || Old->getType()->containsErrors())
return;
QualType MergedT;
@@ -4160,18 +4658,18 @@ void Sema::MergeVarDecl(VarDecl *New, LookupResult &Previous) {
Old->getStorageClass() == SC_None &&
!Old->hasAttr<WeakImportAttr>()) {
Diag(New->getLocation(), diag::warn_weak_import) << New->getDeclName();
- notePreviousDefinition(Old, New->getLocation());
+ Diag(Old->getLocation(), diag::note_previous_declaration);
// Remove weak_import attribute on new declaration.
New->dropAttr<WeakImportAttr>();
}
- if (New->hasAttr<InternalLinkageAttr>() &&
- !Old->hasAttr<InternalLinkageAttr>()) {
- Diag(New->getLocation(), diag::err_internal_linkage_redeclaration)
- << New->getDeclName();
- notePreviousDefinition(Old, New->getLocation());
- New->dropAttr<InternalLinkageAttr>();
- }
+ if (const auto *ILA = New->getAttr<InternalLinkageAttr>())
+ if (!Old->hasAttr<InternalLinkageAttr>()) {
+ Diag(New->getLocation(), diag::err_attribute_missing_on_first_decl)
+ << ILA;
+ Diag(Old->getLocation(), diag::note_previous_declaration);
+ New->dropAttr<InternalLinkageAttr>();
+ }
// Merge the types.
VarDecl *MostRecent = Old->getMostRecentDecl();
@@ -4239,7 +4737,7 @@ void Sema::MergeVarDecl(VarDecl *New, LookupResult &Previous) {
return New->setInvalidDecl();
}
- if (CheckRedeclarationModuleOwnership(New, Old))
+ if (CheckRedeclarationInModule(New, Old))
return;
// Variables with external linkage are analyzed in FinalizeDeclaratorGroup.
@@ -4291,15 +4789,15 @@ void Sema::MergeVarDecl(VarDecl *New, LookupResult &Previous) {
}
// C++ doesn't have tentative definitions, so go right ahead and check here.
- if (getLangOpts().CPlusPlus &&
- New->isThisDeclarationADefinition() == VarDecl::Definition) {
+ if (getLangOpts().CPlusPlus) {
if (Old->isStaticDataMember() && Old->getCanonicalDecl()->isInline() &&
Old->getCanonicalDecl()->isConstexpr()) {
// This definition won't be a definition any more once it's been merged.
Diag(New->getLocation(),
diag::warn_deprecated_redundant_constexpr_static_def);
- } else if (VarDecl *Def = Old->getDefinition()) {
- if (checkVarDeclRedefinition(Def, New))
+ } else if (New->isThisDeclarationADefinition() == VarDecl::Definition) {
+ VarDecl *Def = Old->getDefinition();
+ if (Def && checkVarDeclRedefinition(Def, New))
return;
}
}
@@ -4334,7 +4832,7 @@ void Sema::notePreviousDefinition(const NamedDecl *Old, SourceLocation New) {
auto FNewDecLoc = SrcMgr.getDecomposedLoc(New);
auto FOldDecLoc = SrcMgr.getDecomposedLoc(Old->getLocation());
auto *FNew = SrcMgr.getFileEntryForID(FNewDecLoc.first);
- auto *FOld = SrcMgr.getFileEntryForID(FOldDecLoc.first);
+ auto FOld = SrcMgr.getFileEntryRefForID(FOldDecLoc.first);
auto &HSI = PP.getHeaderSearchInfo();
StringRef HdrFilename =
SrcMgr.getFilename(SrcMgr.getSpellingLoc(Old->getLocation()));
@@ -4372,7 +4870,7 @@ void Sema::notePreviousDefinition(const NamedDecl *Old, SourceLocation New) {
EmittedDiag |= noteFromModuleOrInclude(getCurrentModule(), NewIncLoc);
// If the header has no guards, emit a note suggesting one.
- if (FOld && !HSI.isFileMultipleIncludeGuarded(FOld))
+ if (FOld && !HSI.isFileMultipleIncludeGuarded(*FOld))
Diag(Old->getLocation(), diag::note_use_ifdef_guards);
if (EmittedDiag)
@@ -4388,10 +4886,9 @@ void Sema::notePreviousDefinition(const NamedDecl *Old, SourceLocation New) {
/// of the same variable. Either diagnose or fix the problem.
bool Sema::checkVarDeclRedefinition(VarDecl *Old, VarDecl *New) {
if (!hasVisibleDefinition(Old) &&
- (New->getFormalLinkage() == InternalLinkage ||
- New->isInline() ||
- New->getDescribedVarTemplate() ||
- New->getNumTemplateParameterLists() ||
+ (New->getFormalLinkage() == Linkage::Internal || New->isInline() ||
+ isa<VarTemplateSpecializationDecl>(New) ||
+ New->getDescribedVarTemplate() || New->getNumTemplateParameterLists() ||
New->getDeclContext()->isDependentContext())) {
// The previous definition is hidden, and multiple definitions are
// permitted (in separate TUs). Demote this to a declaration.
@@ -4412,11 +4909,12 @@ bool Sema::checkVarDeclRedefinition(VarDecl *Old, VarDecl *New) {
/// ParsedFreeStandingDeclSpec - This method is invoked when a declspec with
/// no declarator (e.g. "struct foo;") is parsed.
-Decl *
-Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
- RecordDecl *&AnonRecord) {
- return ParsedFreeStandingDeclSpec(S, AS, DS, MultiTemplateParamsArg(), false,
- AnonRecord);
+Decl *Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
+ DeclSpec &DS,
+ const ParsedAttributesView &DeclAttrs,
+ RecordDecl *&AnonRecord) {
+ return ParsedFreeStandingDeclSpec(
+ S, AS, DS, DeclAttrs, MultiTemplateParamsArg(), false, AnonRecord);
}
// The MS ABI changed between VS2013 and VS2015 with regard to numbers used to
@@ -4609,7 +5107,8 @@ void Sema::setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TagFromDeclSpec->setTypedefNameForAnonDecl(NewTD);
}
-static unsigned GetDiagnosticTypeSpecifierID(DeclSpec::TST T) {
+static unsigned GetDiagnosticTypeSpecifierID(const DeclSpec &DS) {
+ DeclSpec::TST T = DS.getTypeSpecType();
switch (T) {
case DeclSpec::TST_class:
return 0;
@@ -4620,20 +5119,26 @@ static unsigned GetDiagnosticTypeSpecifierID(DeclSpec::TST T) {
case DeclSpec::TST_union:
return 3;
case DeclSpec::TST_enum:
+ if (const auto *ED = dyn_cast<EnumDecl>(DS.getRepAsDecl())) {
+ if (ED->isScopedUsingClassTag())
+ return 5;
+ if (ED->isScoped())
+ return 6;
+ }
return 4;
default:
llvm_unreachable("unexpected type specifier");
}
}
-
/// ParsedFreeStandingDeclSpec - This method is invoked when a declspec with
/// no declarator (e.g. "struct foo;") is parsed. It also accepts template
/// parameters to cope with template friend declarations.
-Decl *
-Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
- MultiTemplateParamsArg TemplateParams,
- bool IsExplicitInstantiation,
- RecordDecl *&AnonRecord) {
+Decl *Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
+ DeclSpec &DS,
+ const ParsedAttributesView &DeclAttrs,
+ MultiTemplateParamsArg TemplateParams,
+ bool IsExplicitInstantiation,
+ RecordDecl *&AnonRecord) {
Decl *TagD = nullptr;
TagDecl *Tag = nullptr;
if (DS.getTypeSpecType() == DeclSpec::TST_class ||
@@ -4682,7 +5187,7 @@ Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
// the declaration of a function or function template
if (Tag)
Diag(DS.getConstexprSpecLoc(), diag::err_constexpr_tag)
- << GetDiagnosticTypeSpecifierID(DS.getTypeSpecType())
+ << GetDiagnosticTypeSpecifierID(DS)
<< static_cast<int>(DS.getConstexprSpecifier());
else
Diag(DS.getConstexprSpecLoc(), diag::err_constexpr_wrong_decl_kind)
@@ -4716,7 +5221,7 @@ Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
//
// Per C++ [dcl.enum]p1, an opaque-enum-declaration can't either.
Diag(SS.getBeginLoc(), diag::err_standalone_class_nested_name_specifier)
- << GetDiagnosticTypeSpecifierID(DS.getTypeSpecType()) << SS.getRange();
+ << GetDiagnosticTypeSpecifierID(DS) << SS.getRange();
return nullptr;
}
@@ -4806,8 +5311,8 @@ Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
if (DS.isModulePrivateSpecified() &&
Tag && Tag->getDeclContext()->isFunctionOrMethod())
Diag(DS.getModulePrivateSpecLoc(), diag::err_module_private_local_class)
- << Tag->getTagKind()
- << FixItHint::CreateRemoval(DS.getModulePrivateSpecLoc());
+ << llvm::to_underlying(Tag->getTagKind())
+ << FixItHint::CreateRemoval(DS.getModulePrivateSpecLoc());
ActOnDocumentableDecl(TagD);
@@ -4872,16 +5377,28 @@ Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
// Warn about ignored type attributes, for example:
// __attribute__((aligned)) struct A;
// Attributes should be placed after tag to apply to type declaration.
- if (!DS.getAttributes().empty()) {
+ if (!DS.getAttributes().empty() || !DeclAttrs.empty()) {
DeclSpec::TST TypeSpecType = DS.getTypeSpecType();
if (TypeSpecType == DeclSpec::TST_class ||
TypeSpecType == DeclSpec::TST_struct ||
TypeSpecType == DeclSpec::TST_interface ||
TypeSpecType == DeclSpec::TST_union ||
TypeSpecType == DeclSpec::TST_enum) {
- for (const ParsedAttr &AL : DS.getAttributes())
- Diag(AL.getLoc(), diag::warn_declspec_attribute_ignored)
- << AL << GetDiagnosticTypeSpecifierID(TypeSpecType);
+
+ auto EmitAttributeDiagnostic = [this, &DS](const ParsedAttr &AL) {
+ unsigned DiagnosticId = diag::warn_declspec_attribute_ignored;
+ if (AL.isAlignas() && !getLangOpts().CPlusPlus)
+ DiagnosticId = diag::warn_attribute_ignored;
+ else if (AL.isRegularKeywordAttribute())
+ DiagnosticId = diag::err_declspec_keyword_has_no_effect;
+ else
+ DiagnosticId = diag::warn_declspec_attribute_ignored;
+ Diag(AL.getLoc(), DiagnosticId)
+ << AL << GetDiagnosticTypeSpecifierID(DS);
+ };
+
+ llvm::for_each(DS.getAttributes(), EmitAttributeDiagnostic);
+ llvm::for_each(DeclAttrs, EmitAttributeDiagnostic);
}
}
@@ -4892,13 +5409,14 @@ Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
/// check if there's an existing declaration that can't be overloaded.
///
/// \return true if this is a forbidden redeclaration
-static bool CheckAnonMemberRedeclaration(Sema &SemaRef,
- Scope *S,
+static bool CheckAnonMemberRedeclaration(Sema &SemaRef, Scope *S,
DeclContext *Owner,
DeclarationName Name,
- SourceLocation NameLoc,
- bool IsUnion) {
- LookupResult R(SemaRef, Name, NameLoc, Sema::LookupMemberName,
+ SourceLocation NameLoc, bool IsUnion,
+ StorageClass SC) {
+ LookupResult R(SemaRef, Name, NameLoc,
+ Owner->isRecord() ? Sema::LookupMemberName
+ : Sema::LookupOrdinaryName,
Sema::ForVisibleRedeclaration);
if (!SemaRef.LookupName(R, S)) return false;
@@ -4909,6 +5427,14 @@ static bool CheckAnonMemberRedeclaration(Sema &SemaRef,
if (!SemaRef.isDeclInScope(PrevDecl, Owner, S))
return false;
+ if (SC == StorageClass::SC_None &&
+ PrevDecl->isPlaceholderVar(SemaRef.getLangOpts()) &&
+ (Owner->isFunctionOrMethod() || Owner->isRecord())) {
+ if (!Owner->isRecord())
+ SemaRef.DiagPlaceholderVariableDefinition(NameLoc);
+ return false;
+ }
+
SemaRef.Diag(NameLoc, diag::err_anonymous_record_member_redecl)
<< IsUnion << Name;
SemaRef.Diag(PrevDecl->getLocation(), diag::note_previous_declaration);
@@ -4916,6 +5442,36 @@ static bool CheckAnonMemberRedeclaration(Sema &SemaRef,
return true;
}
+void Sema::ActOnDefinedDeclarationSpecifier(Decl *D) {
+ if (auto *RD = dyn_cast_if_present<RecordDecl>(D))
+ DiagPlaceholderFieldDeclDefinitions(RD);
+}
+
+/// Emit diagnostic warnings for placeholder members.
+/// We can only do that after the class is fully constructed,
+/// as anonymous union/structs can insert placeholders
+/// in their parent scope (which might be a Record).
+void Sema::DiagPlaceholderFieldDeclDefinitions(RecordDecl *Record) {
+ if (!getLangOpts().CPlusPlus)
+ return;
+
+ // This function can be parsed before we have validated the
+ // structure as an anonymous struct
+ if (Record->isAnonymousStructOrUnion())
+ return;
+
+ const NamedDecl *First = 0;
+ for (const Decl *D : Record->decls()) {
+ const NamedDecl *ND = dyn_cast<NamedDecl>(D);
+ if (!ND || !ND->isPlaceholderVar(getLangOpts()))
+ continue;
+ if (!First)
+ First = ND;
+ else
+ DiagPlaceholderVariableDefinition(ND->getLocation());
+ }
+}
+
/// InjectAnonymousStructOrUnionMembers - Inject the members of the
/// anonymous struct or union AnonRecord into the owning context Owner
/// and scope S. This routine will be invoked just after we realize
@@ -4935,6 +5491,7 @@ static bool CheckAnonMemberRedeclaration(Sema &SemaRef,
static bool
InjectAnonymousStructOrUnionMembers(Sema &SemaRef, Scope *S, DeclContext *Owner,
RecordDecl *AnonRecord, AccessSpecifier AS,
+ StorageClass SC,
SmallVectorImpl<NamedDecl *> &Chaining) {
bool Invalid = false;
@@ -4944,8 +5501,8 @@ InjectAnonymousStructOrUnionMembers(Sema &SemaRef, Scope *S, DeclContext *Owner,
cast<NamedDecl>(D)->getDeclName()) {
ValueDecl *VD = cast<ValueDecl>(D);
if (CheckAnonMemberRedeclaration(SemaRef, S, Owner, VD->getDeclName(),
- VD->getLocation(),
- AnonRecord->isUnion())) {
+ VD->getLocation(), AnonRecord->isUnion(),
+ SC)) {
// C++ [class.union]p2:
// The names of the members of an anonymous union shall be
// distinct from the names of any other entity in the
@@ -5244,8 +5801,9 @@ Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
Diag(DS.getBeginLoc(), diag::ext_no_declarators) << DS.getSourceRange();
// Mock up a declarator.
- Declarator Dc(DS, DeclaratorContext::Member);
- TypeSourceInfo *TInfo = GetTypeForDeclarator(Dc, S);
+ Declarator Dc(DS, ParsedAttributesView::none(), DeclaratorContext::Member);
+ StorageClass SC = StorageClassSpecToVarDeclStorageClass(DS);
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(Dc);
assert(TInfo && "couldn't build declarator info for anonymous struct/union");
// Create a declaration for this anonymous struct/union.
@@ -5263,7 +5821,6 @@ Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
FieldCollector->Add(cast<FieldDecl>(Anon));
} else {
DeclSpec::SCS SCSpec = DS.getStorageClassSpec();
- StorageClass SC = StorageClassSpecToVarDeclStorageClass(DS);
if (SCSpec == DeclSpec::SCS_mutable) {
// mutable can only appear on non-static class members, so it's always
// an error here
@@ -5272,17 +5829,16 @@ Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
SC = SC_None;
}
- assert(DS.getAttributes().empty() && "No attribute expected");
Anon = VarDecl::Create(Context, Owner, DS.getBeginLoc(),
Record->getLocation(), /*IdentifierInfo=*/nullptr,
Context.getTypeDeclType(Record), TInfo, SC);
+ ProcessDeclAttributes(S, Anon, Dc);
// Default-initialize the implicit variable. This initialization will be
// trivial in almost all cases, except if a union member has an in-class
// initializer:
// union { int n = 0; };
- if (!Invalid)
- ActOnUninitializedDecl(Anon);
+ ActOnUninitializedDecl(Anon);
}
Anon->setImplicit();
@@ -5300,7 +5856,8 @@ Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
SmallVector<NamedDecl*, 2> Chain;
Chain.push_back(Anon);
- if (InjectAnonymousStructOrUnionMembers(*this, S, Owner, Record, AS, Chain))
+ if (InjectAnonymousStructOrUnionMembers(*this, S, Owner, Record, AS, SC,
+ Chain))
Invalid = true;
if (VarDecl *NewVD = dyn_cast<VarDecl>(Anon)) {
@@ -5342,8 +5899,8 @@ Decl *Sema::BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
assert(Record && "expected a record!");
// Mock up a declarator.
- Declarator Dc(DS, DeclaratorContext::TypeName);
- TypeSourceInfo *TInfo = GetTypeForDeclarator(Dc, S);
+ Declarator Dc(DS, ParsedAttributesView::none(), DeclaratorContext::TypeName);
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(Dc);
assert(TInfo && "couldn't build declarator info for anonymous struct");
auto *ParentDecl = cast<RecordDecl>(CurContext);
@@ -5369,8 +5926,9 @@ Decl *Sema::BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *RecordDef = Record->getDefinition();
if (RequireCompleteSizedType(Anon->getLocation(), RecTy,
diag::err_field_incomplete_or_sizeless) ||
- InjectAnonymousStructOrUnionMembers(*this, S, CurContext, RecordDef,
- AS_none, Chain)) {
+ InjectAnonymousStructOrUnionMembers(
+ *this, S, CurContext, RecordDef, AS_none,
+ StorageClassSpecToVarDeclStorageClass(DS), Chain)) {
Anon->setInvalidDecl();
ParentDecl->setInvalidDecl();
}
@@ -5415,7 +5973,7 @@ Sema::GetNameFromUnqualifiedId(const UnqualifiedId &Name) {
diag::err_deduction_guide_name_not_class_template)
<< (int)getTemplateNameKindForDiagnostics(TN) << TN;
if (Template)
- Diag(Template->getLocation(), diag::note_template_decl_here);
+ NoteTemplateLocation(*Template);
return DeclarationNameInfo();
}
@@ -5551,7 +6109,7 @@ static bool hasSimilarParameters(ASTContext &Context,
return true;
}
-/// NeedsRebuildingInCurrentInstantiation - Checks whether the given
+/// RebuildDeclaratorInCurrentInstantiation - Checks whether the given
/// declarator needs to be rebuilt in the current instantiation.
/// Any bits of declarator which appear before the name are valid for
/// consideration here. That's specifically the type in the decl spec
@@ -5569,7 +6127,9 @@ static bool RebuildDeclaratorInCurrentInstantiation(Sema &S, Declarator &D,
switch (DS.getTypeSpecType()) {
case DeclSpec::TST_typename:
case DeclSpec::TST_typeofType:
- case DeclSpec::TST_underlyingType:
+ case DeclSpec::TST_typeof_unqualType:
+#define TRANSFORM_TYPE_TRAIT_DEF(_, Trait) case DeclSpec::TST_##Trait:
+#include "clang/Basic/TransformTypeTraits.def"
case DeclSpec::TST_atomic: {
// Grab the type from the parser.
TypeSourceInfo *TSI = nullptr;
@@ -5593,6 +6153,7 @@ static bool RebuildDeclaratorInCurrentInstantiation(Sema &S, Declarator &D,
}
case DeclSpec::TST_decltype:
+ case DeclSpec::TST_typeof_unqualExpr:
case DeclSpec::TST_typeofExpr: {
Expr *E = DS.getRepAsExpr();
ExprResult Result = S.RebuildExprInCurrentInstantiation(E);
@@ -5625,6 +6186,13 @@ static bool RebuildDeclaratorInCurrentInstantiation(Sema &S, Declarator &D,
return false;
}
+/// Returns true if the declaration is declared in a system header or from a
+/// system macro.
+static bool isFromSystemHeader(SourceManager &SM, const Decl *D) {
+ return SM.isInSystemHeader(D->getLocation()) ||
+ SM.isInSystemMacro(D->getLocation());
+}
+
void Sema::warnOnReservedIdentifier(const NamedDecl *D) {
// Avoid warning twice on the same identifier, and don't warn on redeclaration
// of system decl.
@@ -5632,19 +6200,33 @@ void Sema::warnOnReservedIdentifier(const NamedDecl *D) {
return;
ReservedIdentifierStatus Status = D->isReserved(getLangOpts());
if (Status != ReservedIdentifierStatus::NotReserved &&
- !Context.getSourceManager().isInSystemHeader(D->getLocation()))
+ !isFromSystemHeader(Context.getSourceManager(), D)) {
Diag(D->getLocation(), diag::warn_reserved_extern_symbol)
<< D << static_cast<int>(Status);
+ }
}
Decl *Sema::ActOnDeclarator(Scope *S, Declarator &D) {
D.setFunctionDefinitionKind(FunctionDefinitionKind::Declaration);
+
+ // Check if we are in an `omp begin/end declare variant` scope. Handle this
+ // declaration only if the `bind_to_declaration` extension is set.
+ SmallVector<FunctionDecl *, 4> Bases;
+ if (LangOpts.OpenMP && isInOpenMPDeclareVariantScope())
+ if (getOMPTraitInfoForSurroundingScope()->isExtensionActive(llvm::omp::TraitProperty::
+ implementation_extension_bind_to_declaration))
+ ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(
+ S, D, MultiTemplateParamsArg(), Bases);
+
Decl *Dcl = HandleDeclarator(S, D, MultiTemplateParamsArg());
if (OriginalLexicalContext && OriginalLexicalContext->isObjCContainer() &&
Dcl && Dcl->getDeclContext()->isFileContext())
Dcl->setTopLevelDeclInObjCContainer();
+ if (!Bases.empty())
+ ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope(Dcl, Bases);
+
return Dcl;
}
@@ -5730,7 +6312,15 @@ bool Sema::diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
else if (isa<BlockDecl>(Cur))
Diag(Loc, diag::err_invalid_declarator_in_block)
<< Name << SS.getRange();
- else
+ else if (isa<ExportDecl>(Cur)) {
+ if (!isa<NamespaceDecl>(DC))
+ Diag(Loc, diag::err_export_non_namespace_scope_name)
+ << Name << SS.getRange();
+ else
+ // The cases that DC is not NamespaceDecl should be handled in
+ // CheckRedeclarationExported.
+ return false;
+ } else
Diag(Loc, diag::err_invalid_declarator_scope)
<< Name << cast<NamedDecl>(Cur) << cast<NamedDecl>(DC) << SS.getRange();
@@ -5761,8 +6351,8 @@ bool Sema::diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
NestedNameSpecifierLoc SpecLoc(SS.getScopeRep(), SS.location_data());
while (SpecLoc.getPrefix())
SpecLoc = SpecLoc.getPrefix();
- if (dyn_cast_or_null<DecltypeType>(
- SpecLoc.getNestedNameSpecifier()->getAsType()))
+ if (isa_and_nonnull<DecltypeType>(
+ SpecLoc.getNestedNameSpecifier()->getAsType()))
Diag(Loc, diag::err_decltype_in_declarator)
<< SpecLoc.getTypeLoc().getSourceRange();
@@ -5848,7 +6438,7 @@ NamedDecl *Sema::HandleDeclarator(Scope *S, Declarator &D,
}
}
- TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D);
QualType R = TInfo->getType();
if (DiagnoseUnexpandedParameterPack(D.getIdentifierLoc(), TInfo,
@@ -5913,10 +6503,6 @@ NamedDecl *Sema::HandleDeclarator(Scope *S, Declarator &D,
// containing the two f's declared in X, but neither of them
// matches.
- // C++ [dcl.meaning]p1:
- // [...] the member shall not merely have been introduced by a
- // using-declaration in the scope of the class or namespace nominated by
- // the nested-name-specifier of the declarator-id.
RemoveUsingDecls(Previous);
}
@@ -6058,7 +6644,7 @@ static QualType TryToFixInvalidVariablyModifiedType(QualType T,
}
QualType FoldedArrayType = Context.getConstantArrayType(
- ElemTy, Res, VLATy->getSizeExpr(), ArrayType::Normal, 0);
+ ElemTy, Res, VLATy->getSizeExpr(), ArraySizeModifier::Normal, 0);
return Qs.apply(Context, FoldedArrayType);
}
@@ -6202,8 +6788,8 @@ Sema::ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
Diag(D.getDeclSpec().getConstexprSpecLoc(), diag::err_invalid_constexpr)
<< 1 << static_cast<int>(D.getDeclSpec().getConstexprSpecifier());
- if (D.getName().Kind != UnqualifiedIdKind::IK_Identifier) {
- if (D.getName().Kind == UnqualifiedIdKind::IK_DeductionGuideName)
+ if (D.getName().getKind() != UnqualifiedIdKind::IK_Identifier) {
+ if (D.getName().getKind() == UnqualifiedIdKind::IK_DeductionGuideName)
Diag(D.getName().StartLocation,
diag::err_deduction_guide_invalid_specifier)
<< "typedef";
@@ -6293,14 +6879,26 @@ Sema::ActOnTypedefNameDecl(Scope *S, DeclContext *DC, TypedefNameDecl *NewTD,
if (IdentifierInfo *II = NewTD->getIdentifier())
if (!NewTD->isInvalidDecl() &&
NewTD->getDeclContext()->getRedeclContext()->isTranslationUnit()) {
- if (II->isStr("FILE"))
+ switch (II->getInterestingIdentifierID()) {
+ case tok::InterestingIdentifierKind::FILE:
Context.setFILEDecl(NewTD);
- else if (II->isStr("jmp_buf"))
+ break;
+ case tok::InterestingIdentifierKind::jmp_buf:
Context.setjmp_bufDecl(NewTD);
- else if (II->isStr("sigjmp_buf"))
+ break;
+ case tok::InterestingIdentifierKind::sigjmp_buf:
Context.setsigjmp_bufDecl(NewTD);
- else if (II->isStr("ucontext_t"))
+ break;
+ case tok::InterestingIdentifierKind::ucontext_t:
Context.setucontext_tDecl(NewTD);
+ break;
+ case tok::InterestingIdentifierKind::float_t:
+ case tok::InterestingIdentifierKind::double_t:
+ NewTD->addAttr(AvailableOnlyInDefaultEvalMethodAttr::Create(Context));
+ break;
+ default:
+ break;
+ }
}
return NewTD;
@@ -6470,8 +7068,7 @@ static void checkAttributesAfterMerging(Sema &S, NamedDecl &ND) {
if (WeakRefAttr *Attr = ND.getAttr<WeakRefAttr>()) {
if (ND.isExternallyVisible()) {
S.Diag(Attr->getLocation(), diag::err_attribute_weakref_not_static);
- ND.dropAttr<WeakRefAttr>();
- ND.dropAttr<AliasAttr>();
+ ND.dropAttrs<WeakRefAttr, AliasAttr>();
}
}
@@ -6639,13 +7236,24 @@ static void checkDLLAttributeRedeclaration(Sema &S, NamedDecl *OldDecl,
(!IsInline || (IsMicrosoftABI && IsTemplate)) && !IsStaticDataMember &&
!NewDecl->isLocalExternDecl() && !IsQualifiedFriend) {
if (IsMicrosoftABI && IsDefinition) {
- S.Diag(NewDecl->getLocation(),
- diag::warn_redeclaration_without_import_attribute)
- << NewDecl;
- S.Diag(OldDecl->getLocation(), diag::note_previous_declaration);
- NewDecl->dropAttr<DLLImportAttr>();
- NewDecl->addAttr(
- DLLExportAttr::CreateImplicit(S.Context, NewImportAttr->getRange()));
+ if (IsSpecialization) {
+ S.Diag(
+ NewDecl->getLocation(),
+ diag::err_attribute_dllimport_function_specialization_definition);
+ S.Diag(OldImportAttr->getLocation(), diag::note_attribute);
+ NewDecl->dropAttr<DLLImportAttr>();
+ } else {
+ S.Diag(NewDecl->getLocation(),
+ diag::warn_redeclaration_without_import_attribute)
+ << NewDecl;
+ S.Diag(OldDecl->getLocation(), diag::note_previous_declaration);
+ NewDecl->dropAttr<DLLImportAttr>();
+ NewDecl->addAttr(DLLExportAttr::CreateImplicit(
+ S.Context, NewImportAttr->getRange()));
+ }
+ } else if (IsMicrosoftABI && IsSpecialization) {
+ assert(!IsDefinition);
+ // MSVC allows this. Keep the inherited attribute.
} else {
S.Diag(NewDecl->getLocation(),
diag::warn_redeclaration_without_attribute_prev_attribute_ignored)
@@ -6739,6 +7347,9 @@ static bool shouldConsiderLinkage(const VarDecl *VD) {
return true;
if (DC->isRecord())
return false;
+ if (DC->getDeclKind() == Decl::HLSLBuffer)
+ return false;
+
if (isa<RequiresExprBodyDecl>(DC))
return false;
llvm_unreachable("Unexpected context");
@@ -6768,7 +7379,8 @@ static bool hasParsedAttr(Scope *S, const Declarator &PD,
}
// Finally, check attributes on the decl itself.
- return PD.getAttributes().hasAttribute(Kind);
+ return PD.getAttributes().hasAttribute(Kind) ||
+ PD.getDeclarationAttributes().hasAttribute(Kind);
}
/// Adjust the \c DeclContext for a function or variable that might be a
@@ -6911,6 +7523,36 @@ static void copyAttrFromTypedefToDecl(Sema &S, Decl *D, const TypedefType *TT) {
}
}
+// This function emits warning and a corresponding note based on the
+// ReadOnlyPlacementAttr attribute. The warning checks that all global variable
+// declarations of an annotated type must be const qualified.
+void emitReadOnlyPlacementAttrWarning(Sema &S, const VarDecl *VD) {
+ QualType VarType = VD->getType().getCanonicalType();
+
+ // Ignore local declarations (for now) and those with const qualification.
+ // TODO: Local variables should not be allowed if their type declaration has
+ // ReadOnlyPlacementAttr attribute. To be handled in follow-up patch.
+ if (!VD || VD->hasLocalStorage() || VD->getType().isConstQualified())
+ return;
+
+ if (VarType->isArrayType()) {
+ // Retrieve element type for array declarations.
+ VarType = S.getASTContext().getBaseElementType(VarType);
+ }
+
+ const RecordDecl *RD = VarType->getAsRecordDecl();
+
+ // Check if the record declaration is present and if it has any attributes.
+ if (RD == nullptr)
+ return;
+
+ if (const auto *ConstDecl = RD->getAttr<ReadOnlyPlacementAttr>()) {
+ S.Diag(VD->getLocation(), diag::warn_var_decl_not_read_only) << RD;
+ S.Diag(ConstDecl->getLocation(), diag::note_enforce_read_only_placement);
+ return;
+ }
+}
+
NamedDecl *Sema::ActOnVariableDeclarator(
Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo,
LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists,
@@ -6919,6 +7561,7 @@ NamedDecl *Sema::ActOnVariableDeclarator(
DeclarationName Name = GetNameForDeclarator(D).getName();
IdentifierInfo *II = Name.getAsIdentifierInfo();
+ bool IsPlaceholderVariable = false;
if (D.isDecompositionDeclarator()) {
// Take the name of the first declarator as our name for diagnostic
@@ -6937,6 +7580,18 @@ NamedDecl *Sema::ActOnVariableDeclarator(
DeclSpec::SCS SCSpec = D.getDeclSpec().getStorageClassSpec();
StorageClass SC = StorageClassSpecToVarDeclStorageClass(D.getDeclSpec());
+ if (LangOpts.CPlusPlus && (DC->isClosure() || DC->isFunctionOrMethod()) &&
+ SC != SC_Static && SC != SC_Extern && II && II->isPlaceholder()) {
+ IsPlaceholderVariable = true;
+ if (!Previous.empty()) {
+ NamedDecl *PrevDecl = *Previous.begin();
+ bool SameDC = PrevDecl->getDeclContext()->getRedeclContext()->Equals(
+ DC->getRedeclContext());
+ if (SameDC && isDeclInScope(PrevDecl, CurContext, S, false))
+ DiagPlaceholderVariableDefinition(D.getIdentifierLoc());
+ }
+ }
+
// dllimport globals without explicit storage class are treated as extern. We
// have to change the storage class this early to get the right DeclContext.
if (SC == SC_None && !DC->isRecord() &&
@@ -7059,14 +7714,15 @@ NamedDecl *Sema::ActOnVariableDeclarator(
// members.
Diag(D.getIdentifierLoc(),
diag::err_static_data_member_not_allowed_in_local_class)
- << Name << RD->getDeclName() << RD->getTagKind();
+ << Name << RD->getDeclName()
+ << llvm::to_underlying(RD->getTagKind());
} else if (AnonStruct) {
// C++ [class.static.data]p4: Unnamed classes and classes contained
// directly or indirectly within unnamed classes shall not contain
// static data members.
Diag(D.getIdentifierLoc(),
diag::err_static_data_member_not_allowed_in_anon_struct)
- << Name << AnonStruct->getTagKind();
+ << Name << llvm::to_underlying(AnonStruct->getTagKind());
Invalid = true;
} else if (RD->isUnion()) {
// C++98 [class.union]p1: If a union contains a static data member,
@@ -7176,7 +7832,12 @@ NamedDecl *Sema::ActOnVariableDeclarator(
// If we have any template parameter lists that don't directly belong to
// the variable (matching the scope specifier), store them.
- unsigned VDTemplateParamLists = TemplateParams ? 1 : 0;
+ // An explicit variable template specialization does not own any template
+ // parameter lists.
+ bool IsExplicitSpecialization =
+ IsVariableTemplateSpecialization && !IsPartialSpecialization;
+ unsigned VDTemplateParamLists =
+ (TemplateParams && !IsExplicitSpecialization) ? 1 : 0;
if (TemplateParamLists.size() > VDTemplateParamLists)
NewVD->setTemplateParameterListsInfo(
Context, TemplateParamLists.drop_back(VDTemplateParamLists));
@@ -7229,7 +7890,7 @@ NamedDecl *Sema::ActOnVariableDeclarator(
diag::err_thread_non_global)
<< DeclSpec::getSpecifierName(TSCS);
else if (!Context.getTargetInfo().isTLSSupported()) {
- if (getLangOpts().CUDA || getLangOpts().OpenMPIsDevice ||
+ if (getLangOpts().CUDA || getLangOpts().OpenMPIsTargetDevice ||
getLangOpts().SYCLIsDevice) {
// Postpone error emission until we've collected attributes required to
// figure out whether it's a host or device variable and whether the
@@ -7254,7 +7915,7 @@ NamedDecl *Sema::ActOnVariableDeclarator(
Diag(D.getDeclSpec().getConstexprSpecLoc(),
diag::err_constexpr_wrong_decl_kind)
<< static_cast<int>(D.getDeclSpec().getConstexprSpecifier());
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case ConstexprSpecKind::Constexpr:
NewVD->setConstexpr(true);
@@ -7272,9 +7933,9 @@ NamedDecl *Sema::ActOnVariableDeclarator(
Diag(D.getDeclSpec().getConstexprSpecLoc(),
diag::err_constinit_local_variable);
else
- NewVD->addAttr(ConstInitAttr::Create(
- Context, D.getDeclSpec().getConstexprSpecLoc(),
- AttributeCommonInfo::AS_Keyword, ConstInitAttr::Keyword_constinit));
+ NewVD->addAttr(
+ ConstInitAttr::Create(Context, D.getDeclSpec().getConstexprSpecLoc(),
+ ConstInitAttr::Keyword_constinit));
break;
}
@@ -7327,15 +7988,26 @@ NamedDecl *Sema::ActOnVariableDeclarator(
DeclSpec::TSCS TSC = D.getDeclSpec().getThreadStorageClassSpec();
if (TSC != TSCS_unspecified) {
- bool IsCXX = getLangOpts().OpenCLCPlusPlus;
Diag(D.getDeclSpec().getThreadStorageClassSpecLoc(),
diag::err_opencl_unknown_type_specifier)
- << IsCXX << getLangOpts().getOpenCLVersionTuple().getAsString()
+ << getLangOpts().getOpenCLVersionString()
<< DeclSpec::getSpecifierName(TSC) << 1;
NewVD->setInvalidDecl();
}
}
+ // WebAssembly tables are always in address space 1 (wasm_var). Don't apply
+ // address space if the table has local storage (semantic checks elsewhere
+ // will produce an error anyway).
+ if (const auto *ATy = dyn_cast<ArrayType>(NewVD->getType())) {
+ if (ATy && ATy->getElementType().isWebAssemblyReferenceType() &&
+ !NewVD->hasLocalStorage()) {
+ QualType Type = Context.getAddrSpaceQualType(
+ NewVD->getType(), Context.getLangASForBuiltinAddressSpace(1));
+ NewVD->setType(Type);
+ }
+ }
+
// Handle attributes prior to checking for duplicates in MergeVarDecl
ProcessDeclAttributes(S, NewVD, D);
@@ -7347,17 +8019,18 @@ NamedDecl *Sema::ActOnVariableDeclarator(
if (const auto *TT = R->getAs<TypedefType>())
copyAttrFromTypedefToDecl<AllocSizeAttr>(*this, NewVD, TT);
- if (getLangOpts().CUDA || getLangOpts().OpenMPIsDevice ||
+ if (getLangOpts().CUDA || getLangOpts().OpenMPIsTargetDevice ||
getLangOpts().SYCLIsDevice) {
if (EmitTLSUnsupportedError &&
((getLangOpts().CUDA && DeclAttrsMatchCUDAMode(getLangOpts(), NewVD)) ||
- (getLangOpts().OpenMPIsDevice &&
+ (getLangOpts().OpenMPIsTargetDevice &&
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(NewVD))))
Diag(D.getDeclSpec().getThreadStorageClassSpecLoc(),
diag::err_thread_unsupported);
if (EmitTLSUnsupportedError &&
- (LangOpts.SYCLIsDevice || (LangOpts.OpenMP && LangOpts.OpenMPIsDevice)))
+ (LangOpts.SYCLIsDevice ||
+ (LangOpts.OpenMP && LangOpts.OpenMPIsTargetDevice)))
targetDiag(D.getIdentifierLoc(), diag::err_thread_unsupported);
// CUDA B.2.5: "__shared__ and __constant__ variables have implied static
// storage [duration]."
@@ -7489,9 +8162,16 @@ NamedDecl *Sema::ActOnVariableDeclarator(
NewVD->setInvalidDecl();
}
- if (!IsVariableTemplateSpecialization)
+ if (!IsVariableTemplateSpecialization && !IsPlaceholderVariable)
D.setRedeclaration(CheckVariableDeclaration(NewVD, Previous));
+ // CheckVariableDeclaration will set NewVD as invalid if something is in
+ // error like WebAssembly tables being declared as arrays with a non-zero
+ // size, but then parsing continues and emits further errors on that line.
+ // To avoid that we check here if it happened and return nullptr.
+ if (NewVD->getType()->isWebAssemblyTableType() && NewVD->isInvalidDecl())
+ return nullptr;
+
if (NewTemplate) {
VarTemplateDecl *PrevVarTemplate =
NewVD->getPreviousDecl()
@@ -7520,7 +8200,7 @@ NamedDecl *Sema::ActOnVariableDeclarator(
}
// Diagnose shadowed variables iff this isn't a redeclaration.
- if (ShadowedDecl && !D.isRedeclaration())
+ if (!IsPlaceholderVariable && ShadowedDecl && !D.isRedeclaration())
CheckShadow(NewVD, ShadowedDecl, Previous);
ProcessPragmaWeak(S, NewVD);
@@ -7576,6 +8256,8 @@ NamedDecl *Sema::ActOnVariableDeclarator(
if (IsMemberSpecialization && !NewVD->isInvalidDecl())
CompleteMemberSpecialization(NewVD, Previous);
+ emitReadOnlyPlacementAttrWarning(*this, NewVD);
+
return NewVD;
}
@@ -7634,7 +8316,7 @@ NamedDecl *Sema::getShadowedDeclaration(const VarDecl *D,
return nullptr;
// Don't diagnose declarations at file scope.
- if (D->hasGlobalStorage())
+ if (D->hasGlobalStorage() && !D->isStaticLocal())
return nullptr;
NamedDecl *ShadowedDecl = R.getFoundDecl();
@@ -7703,7 +8385,7 @@ void Sema::CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
if (shadowedVar->isExternC()) {
// For shadowing external vars, make sure that we point to the global
// declaration, not a locally scoped extern declaration.
- for (auto I : shadowedVar->redecls())
+ for (auto *I : shadowedVar->redecls())
if (I->isFileVarDecl()) {
ShadowedDecl = I;
break;
@@ -7714,28 +8396,40 @@ void Sema::CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
unsigned WarningDiag = diag::warn_decl_shadow;
SourceLocation CaptureLoc;
- if (isa<VarDecl>(D) && isa<VarDecl>(ShadowedDecl) && NewDC &&
- isa<CXXMethodDecl>(NewDC)) {
+ if (isa<VarDecl>(D) && NewDC && isa<CXXMethodDecl>(NewDC)) {
if (const auto *RD = dyn_cast<CXXRecordDecl>(NewDC->getParent())) {
if (RD->isLambda() && OldDC->Encloses(NewDC->getLexicalParent())) {
- if (RD->getLambdaCaptureDefault() == LCD_None) {
- // Try to avoid warnings for lambdas with an explicit capture list.
+ if (const auto *VD = dyn_cast<VarDecl>(ShadowedDecl)) {
const auto *LSI = cast<LambdaScopeInfo>(getCurFunction());
- // Warn only when the lambda captures the shadowed decl explicitly.
- CaptureLoc = getCaptureLocation(LSI, cast<VarDecl>(ShadowedDecl));
- if (CaptureLoc.isInvalid())
- WarningDiag = diag::warn_decl_shadow_uncaptured_local;
- } else {
- // Remember that this was shadowed so we can avoid the warning if the
- // shadowed decl isn't captured and the warning settings allow it.
+ if (RD->getLambdaCaptureDefault() == LCD_None) {
+ // Try to avoid warnings for lambdas with an explicit capture
+ // list. Warn only when the lambda captures the shadowed decl
+ // explicitly.
+ CaptureLoc = getCaptureLocation(LSI, VD);
+ if (CaptureLoc.isInvalid())
+ WarningDiag = diag::warn_decl_shadow_uncaptured_local;
+ } else {
+ // Remember that this was shadowed so we can avoid the warning if
+ // the shadowed decl isn't captured and the warning settings allow
+ // it.
+ cast<LambdaScopeInfo>(getCurFunction())
+ ->ShadowingDecls.push_back({D, VD});
+ return;
+ }
+ }
+ if (isa<FieldDecl>(ShadowedDecl)) {
+ // If lambda can capture this, then emit default shadowing warning,
+ // Otherwise it is not really a shadowing case since field is not
+ // available in lambda's body.
+ // At this point we don't know that lambda can capture this, so
+ // remember that this was shadowed and delay until we know.
cast<LambdaScopeInfo>(getCurFunction())
- ->ShadowingDecls.push_back(
- {cast<VarDecl>(D), cast<VarDecl>(ShadowedDecl)});
+ ->ShadowingDecls.push_back({D, ShadowedDecl});
return;
}
}
-
- if (cast<VarDecl>(ShadowedDecl)->hasLocalStorage()) {
+ if (const auto *VD = dyn_cast<VarDecl>(ShadowedDecl);
+ VD && VD->hasLocalStorage()) {
// A variable can't shadow a local variable in an enclosing scope, if
// they are separated by a non-capturing declaration context.
for (DeclContext *ParentDC = NewDC;
@@ -7752,6 +8446,10 @@ void Sema::CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
}
}
+ // Never warn about shadowing a placeholder variable.
+ if (ShadowedDecl->isPlaceholderVar(getLangOpts()))
+ return;
+
// Only warn about certain kinds of shadowing for class members.
if (NewDC && NewDC->isRecord()) {
// In particular, don't warn about shadowing non-class members.
@@ -7770,8 +8468,6 @@ void Sema::CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
DeclarationName Name = R.getLookupName();
// Emit warning and note.
- if (getSourceManager().isInSystemMacro(R.getNameLoc()))
- return;
ShadowedDeclKind Kind = computeShadowedDeclKind(ShadowedDecl, OldDC);
Diag(R.getNameLoc(), WarningDiag) << Name << Kind << OldDC;
if (!CaptureLoc.isInvalid())
@@ -7784,19 +8480,28 @@ void Sema::CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
/// when these variables are captured by the lambda.
void Sema::DiagnoseShadowingLambdaDecls(const LambdaScopeInfo *LSI) {
for (const auto &Shadow : LSI->ShadowingDecls) {
- const VarDecl *ShadowedDecl = Shadow.ShadowedDecl;
+ const NamedDecl *ShadowedDecl = Shadow.ShadowedDecl;
// Try to avoid the warning when the shadowed decl isn't captured.
- SourceLocation CaptureLoc = getCaptureLocation(LSI, ShadowedDecl);
const DeclContext *OldDC = ShadowedDecl->getDeclContext();
- Diag(Shadow.VD->getLocation(), CaptureLoc.isInvalid()
- ? diag::warn_decl_shadow_uncaptured_local
- : diag::warn_decl_shadow)
- << Shadow.VD->getDeclName()
- << computeShadowedDeclKind(ShadowedDecl, OldDC) << OldDC;
- if (!CaptureLoc.isInvalid())
- Diag(CaptureLoc, diag::note_var_explicitly_captured_here)
- << Shadow.VD->getDeclName() << /*explicitly*/ 0;
- Diag(ShadowedDecl->getLocation(), diag::note_previous_declaration);
+ if (const auto *VD = dyn_cast<VarDecl>(ShadowedDecl)) {
+ SourceLocation CaptureLoc = getCaptureLocation(LSI, VD);
+ Diag(Shadow.VD->getLocation(),
+ CaptureLoc.isInvalid() ? diag::warn_decl_shadow_uncaptured_local
+ : diag::warn_decl_shadow)
+ << Shadow.VD->getDeclName()
+ << computeShadowedDeclKind(ShadowedDecl, OldDC) << OldDC;
+ if (CaptureLoc.isValid())
+ Diag(CaptureLoc, diag::note_var_explicitly_captured_here)
+ << Shadow.VD->getDeclName() << /*explicitly*/ 0;
+ Diag(ShadowedDecl->getLocation(), diag::note_previous_declaration);
+ } else if (isa<FieldDecl>(ShadowedDecl)) {
+ Diag(Shadow.VD->getLocation(),
+ LSI->isCXXThisCaptured() ? diag::warn_decl_shadow
+ : diag::warn_decl_shadow_uncaptured_local)
+ << Shadow.VD->getDeclName()
+ << computeShadowedDeclKind(ShadowedDecl, OldDC) << OldDC;
+ Diag(ShadowedDecl->getLocation(), diag::note_previous_declaration);
+ }
}
}
@@ -8098,6 +8803,28 @@ void Sema::CheckVariableDeclarationType(VarDecl *NewVD) {
}
}
+ // WebAssembly tables must be static with a zero length and can't be
+ // declared within functions.
+ if (T->isWebAssemblyTableType()) {
+ if (getCurScope()->getParent()) { // Parent is null at top-level
+ Diag(NewVD->getLocation(), diag::err_wasm_table_in_function);
+ NewVD->setInvalidDecl();
+ return;
+ }
+ if (NewVD->getStorageClass() != SC_Static) {
+ Diag(NewVD->getLocation(), diag::err_wasm_table_must_be_static);
+ NewVD->setInvalidDecl();
+ return;
+ }
+ const auto *ATy = dyn_cast<ConstantArrayType>(T.getTypePtr());
+ if (!ATy || ATy->getSize().getSExtValue() != 0) {
+ Diag(NewVD->getLocation(),
+ diag::err_typecheck_wasm_table_must_have_zero_length);
+ NewVD->setInvalidDecl();
+ return;
+ }
+ }
+
bool isVM = T->isVariablyModifiedType();
if (isVM || NewVD->hasAttr<CleanupAttr>() ||
NewVD->hasAttr<BlocksAttr>())
@@ -8168,7 +8895,8 @@ void Sema::CheckVariableDeclarationType(VarDecl *NewVD) {
return;
}
- if (!NewVD->hasLocalStorage() && T->isSizelessType()) {
+ if (!NewVD->hasLocalStorage() && T->isSizelessType() &&
+ !T.isWebAssemblyReferenceType()) {
Diag(NewVD->getLocation(), diag::err_sizeless_nonlocal) << T;
NewVD->setInvalidDecl();
return;
@@ -8194,6 +8922,22 @@ void Sema::CheckVariableDeclarationType(VarDecl *NewVD) {
NewVD->setInvalidDecl();
return;
}
+
+ // Check that SVE types are only used in functions with SVE available.
+ if (T->isSVESizelessBuiltinType() && isa<FunctionDecl>(CurContext)) {
+ const FunctionDecl *FD = cast<FunctionDecl>(CurContext);
+ llvm::StringMap<bool> CallerFeatureMap;
+ Context.getFunctionFeatureMap(CallerFeatureMap, FD);
+ if (!Builtin::evaluateRequiredTargetFeatures(
+ "sve", CallerFeatureMap)) {
+ Diag(NewVD->getLocation(), diag::err_sve_vector_in_non_sve_target) << T;
+ NewVD->setInvalidDecl();
+ return;
+ }
+ }
+
+ if (T->isRVVSizelessBuiltinType())
+ checkRVVTypeSupport(T, NewVD->getLocation(), cast<Decl>(CurContext));
}
/// Perform semantic checking on a newly-created variable
@@ -8251,13 +8995,11 @@ bool Sema::AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD) {
CXXMethodDecl *BaseMD =
dyn_cast<CXXMethodDecl>(BaseND->getCanonicalDecl());
if (!BaseMD || !BaseMD->isVirtual() ||
- IsOverload(MD, BaseMD, /*UseMemberUsingDeclRules=*/false,
- /*ConsiderCudaAttrs=*/true,
- // C++2a [class.virtual]p2 does not consider requires
- // clauses when overriding.
- /*ConsiderRequiresClauses=*/false))
+ IsOverride(MD, BaseMD, /*UseMemberUsingDeclRules=*/false,
+ /*ConsiderCudaAttrs=*/true))
+ continue;
+ if (!CheckExplicitObjectOverride(MD, BaseMD))
continue;
-
if (Overridden.insert(BaseMD).second) {
MD->addOverriddenMethod(BaseMD);
CheckOverridingFunctionReturnType(MD, BaseMD);
@@ -8485,7 +9227,14 @@ static NamedDecl *DiagnoseInvalidRedeclaration(
<< NewFD->getParamDecl(Idx - 1)->getType();
} else if (FDisConst != NewFDisConst) {
SemaRef.Diag(FD->getLocation(), diag::note_member_def_close_const_match)
- << NewFDisConst << FD->getSourceRange().getEnd();
+ << NewFDisConst << FD->getSourceRange().getEnd()
+ << (NewFDisConst
+ ? FixItHint::CreateRemoval(ExtraArgs.D.getFunctionTypeInfo()
+ .getConstQualifierLoc())
+ : FixItHint::CreateInsertion(ExtraArgs.D.getFunctionTypeInfo()
+ .getRParenLoc()
+ .getLocWithOffset(1),
+ " const"));
} else
SemaRef.Diag(FD->getLocation(),
IsMember ? diag::note_member_def_close_match
@@ -8542,20 +9291,31 @@ static FunctionDecl *CreateNewFunctionDecl(Sema &SemaRef, Declarator &D,
bool isInline = D.getDeclSpec().isInlineSpecified();
if (!SemaRef.getLangOpts().CPlusPlus) {
- // Determine whether the function was written with a
- // prototype. This true when:
+ // Determine whether the function was written with a prototype. This is
+ // true when:
// - there is a prototype in the declarator, or
// - the type R of the function is some kind of typedef or other non-
// attributed reference to a type name (which eventually refers to a
- // function type).
+ // function type). Note, we can't always look at the adjusted type to
+ // check this case because attributes may cause a non-function
+ // declarator to still have a function type. e.g.,
+ // typedef void func(int a);
+ // __attribute__((noreturn)) func other_func; // This has a prototype
bool HasPrototype =
- (D.isFunctionDeclarator() && D.getFunctionTypeInfo().hasPrototype) ||
- (!R->getAsAdjusted<FunctionType>() && R->isFunctionProtoType());
-
- NewFD = FunctionDecl::Create(SemaRef.Context, DC, D.getBeginLoc(), NameInfo,
- R, TInfo, SC, isInline, HasPrototype,
- ConstexprSpecKind::Unspecified,
- /*TrailingRequiresClause=*/nullptr);
+ (D.isFunctionDeclarator() && D.getFunctionTypeInfo().hasPrototype) ||
+ (D.getDeclSpec().isTypeRep() &&
+ SemaRef.GetTypeFromParser(D.getDeclSpec().getRepAsType(), nullptr)
+ ->isFunctionProtoType()) ||
+ (!R->getAsAdjusted<FunctionType>() && R->isFunctionProtoType());
+ assert(
+ (HasPrototype || !SemaRef.getLangOpts().requiresStrictPrototypes()) &&
+ "Strict prototypes are required");
+
+ NewFD = FunctionDecl::Create(
+ SemaRef.Context, DC, D.getBeginLoc(), NameInfo, R, TInfo, SC,
+ SemaRef.getCurFPFeatures().isFPConstrained(), isInline, HasPrototype,
+ ConstexprSpecKind::Unspecified,
+ /*TrailingRequiresClause=*/nullptr);
if (D.isInvalidType())
NewFD->setInvalidDecl();
@@ -8574,14 +9334,7 @@ static FunctionDecl *CreateNewFunctionDecl(Sema &SemaRef, Declarator &D,
}
Expr *TrailingRequiresClause = D.getTrailingRequiresClause();
- // Check that the return type is not an abstract class type.
- // For record types, this is done by the AbstractClassUsageDiagnoser once
- // the class has been completely parsed.
- if (!DC->isRecord() &&
- SemaRef.RequireNonAbstractType(
- D.getIdentifierLoc(), R->castAs<FunctionType>()->getReturnType(),
- diag::err_abstract_type_in_decl, SemaRef.AbstractReturnType))
- D.setInvalidType();
+ SemaRef.CheckExplicitObjectMemberFunction(DC, D, Name, R);
if (Name.getNameKind() == DeclarationName::CXXConstructorName) {
// This is a C++ constructor declaration.
@@ -8591,9 +9344,9 @@ static FunctionDecl *CreateNewFunctionDecl(Sema &SemaRef, Declarator &D,
R = SemaRef.CheckConstructorDeclarator(D, R, SC);
return CXXConstructorDecl::Create(
SemaRef.Context, cast<CXXRecordDecl>(DC), D.getBeginLoc(), NameInfo, R,
- TInfo, ExplicitSpecifier, isInline,
- /*isImplicitlyDeclared=*/false, ConstexprKind, InheritedConstructor(),
- TrailingRequiresClause);
+ TInfo, ExplicitSpecifier, SemaRef.getCurFPFeatures().isFPConstrained(),
+ isInline, /*isImplicitlyDeclared=*/false, ConstexprKind,
+ InheritedConstructor(), TrailingRequiresClause);
} else if (Name.getNameKind() == DeclarationName::CXXDestructorName) {
// This is a C++ destructor declaration.
@@ -8602,8 +9355,13 @@ static FunctionDecl *CreateNewFunctionDecl(Sema &SemaRef, Declarator &D,
CXXRecordDecl *Record = cast<CXXRecordDecl>(DC);
CXXDestructorDecl *NewDD = CXXDestructorDecl::Create(
SemaRef.Context, Record, D.getBeginLoc(), NameInfo, R, TInfo,
- isInline, /*isImplicitlyDeclared=*/false, ConstexprKind,
+ SemaRef.getCurFPFeatures().isFPConstrained(), isInline,
+ /*isImplicitlyDeclared=*/false, ConstexprKind,
TrailingRequiresClause);
+ // User defined destructors start as not selected if the class definition is still
+ // not done.
+ if (Record->isBeingDefined())
+ NewDD->setIneligibleOrNotSelected(true);
// If the destructor needs an implicit exception specification, set it
// now. FIXME: It'd be nice to be able to create the right type to start
@@ -8620,11 +9378,10 @@ static FunctionDecl *CreateNewFunctionDecl(Sema &SemaRef, Declarator &D,
// Create a FunctionDecl to satisfy the function definition parsing
// code path.
- return FunctionDecl::Create(SemaRef.Context, DC, D.getBeginLoc(),
- D.getIdentifierLoc(), Name, R, TInfo, SC,
- isInline,
- /*hasPrototype=*/true, ConstexprKind,
- TrailingRequiresClause);
+ return FunctionDecl::Create(
+ SemaRef.Context, DC, D.getBeginLoc(), D.getIdentifierLoc(), Name, R,
+ TInfo, SC, SemaRef.getCurFPFeatures().isFPConstrained(), isInline,
+ /*hasPrototype=*/true, ConstexprKind, TrailingRequiresClause);
}
} else if (Name.getNameKind() == DeclarationName::CXXConversionFunctionName) {
@@ -8641,7 +9398,8 @@ static FunctionDecl *CreateNewFunctionDecl(Sema &SemaRef, Declarator &D,
IsVirtualOkay = true;
return CXXConversionDecl::Create(
SemaRef.Context, cast<CXXRecordDecl>(DC), D.getBeginLoc(), NameInfo, R,
- TInfo, isInline, ExplicitSpecifier, ConstexprKind, SourceLocation(),
+ TInfo, SemaRef.getCurFPFeatures().isFPConstrained(), isInline,
+ ExplicitSpecifier, ConstexprKind, SourceLocation(),
TrailingRequiresClause);
} else if (Name.getNameKind() == DeclarationName::CXXDeductionGuideName) {
@@ -8649,8 +9407,8 @@ static FunctionDecl *CreateNewFunctionDecl(Sema &SemaRef, Declarator &D,
SemaRef.Diag(TrailingRequiresClause->getBeginLoc(),
diag::err_trailing_requires_clause_on_deduction_guide)
<< TrailingRequiresClause->getSourceRange();
- SemaRef.CheckDeductionGuideDeclarator(D, R, SC);
-
+ if (SemaRef.CheckDeductionGuideDeclarator(D, R, SC))
+ return nullptr;
return CXXDeductionGuideDecl::Create(SemaRef.Context, DC, D.getBeginLoc(),
ExplicitSpecifier, NameInfo, R, TInfo,
D.getEndLoc());
@@ -8670,8 +9428,8 @@ static FunctionDecl *CreateNewFunctionDecl(Sema &SemaRef, Declarator &D,
// This is a C++ method declaration.
CXXMethodDecl *Ret = CXXMethodDecl::Create(
SemaRef.Context, cast<CXXRecordDecl>(DC), D.getBeginLoc(), NameInfo, R,
- TInfo, SC, isInline, ConstexprKind, SourceLocation(),
- TrailingRequiresClause);
+ TInfo, SC, SemaRef.getCurFPFeatures().isFPConstrained(), isInline,
+ ConstexprKind, SourceLocation(), TrailingRequiresClause);
IsVirtualOkay = !Ret->isStatic();
return Ret;
} else {
@@ -8683,9 +9441,10 @@ static FunctionDecl *CreateNewFunctionDecl(Sema &SemaRef, Declarator &D,
// Determine whether the function was written with a
// prototype. This true when:
// - we're in C++ (where every function has a prototype),
- return FunctionDecl::Create(SemaRef.Context, DC, D.getBeginLoc(), NameInfo,
- R, TInfo, SC, isInline, true /*HasPrototype*/,
- ConstexprKind, TrailingRequiresClause);
+ return FunctionDecl::Create(
+ SemaRef.Context, DC, D.getBeginLoc(), NameInfo, R, TInfo, SC,
+ SemaRef.getCurFPFeatures().isFPConstrained(), isInline,
+ true /*HasPrototype*/, ConstexprKind, TrailingRequiresClause);
}
}
@@ -8739,6 +9498,12 @@ static OpenCLParamType getOpenCLKernelParameterType(Sema &S, QualType PT) {
ParamKind == InvalidKernelParam)
return ParamKind;
+ // OpenCL v3.0 s6.11.a:
+ // A restriction to pass pointers to pointers only applies to OpenCL C
+ // v1.2 or below.
+ if (S.getLangOpts().getOpenCLCompatibleVersion() > 120)
+ return ValidKernelParam;
+
return PtrPtrKernelParam;
}
@@ -8748,10 +9513,28 @@ static OpenCLParamType getOpenCLKernelParameterType(Sema &S, QualType PT) {
// reference if an implementation supports them in kernel parameters.
if (S.getLangOpts().OpenCLCPlusPlus &&
!S.getOpenCLOptions().isAvailableOption(
- "__cl_clang_non_portable_kernel_param_types", S.getLangOpts()) &&
- !PointeeType->isAtomicType() && !PointeeType->isVoidType() &&
- !PointeeType->isStandardLayoutType())
+ "__cl_clang_non_portable_kernel_param_types", S.getLangOpts())) {
+ auto CXXRec = PointeeType.getCanonicalType()->getAsCXXRecordDecl();
+ bool IsStandardLayoutType = true;
+ if (CXXRec) {
+ // If template type is not ODR-used its definition is only available
+ // in the template definition not its instantiation.
+ // FIXME: This logic doesn't work for types that depend on template
+ // parameter (PR58590).
+ if (!CXXRec->hasDefinition())
+ CXXRec = CXXRec->getTemplateInstantiationPattern();
+ if (!CXXRec || !CXXRec->hasDefinition() || !CXXRec->isStandardLayout())
+ IsStandardLayoutType = false;
+ }
+ if (!PointeeType->isAtomicType() && !PointeeType->isVoidType() &&
+ !IsStandardLayoutType)
return InvalidKernelParam;
+ }
+
+ // OpenCL v1.2 s6.9.p:
+ // A restriction to pass pointers only applies to OpenCL C v1.2 or below.
+ if (S.getLangOpts().getOpenCLCompatibleVersion() > 120)
+ return ValidKernelParam;
return PtrKernelParam;
}
@@ -8819,14 +9602,8 @@ static void checkIsValidOpenCLKernelParameter(
// OpenCL v3.0 s6.11.a:
// A kernel function argument cannot be declared as a pointer to a pointer
// type. [...] This restriction only applies to OpenCL C 1.2 or below.
- if (S.getLangOpts().OpenCLVersion <= 120 &&
- !S.getLangOpts().OpenCLCPlusPlus) {
- S.Diag(Param->getLocation(), diag::err_opencl_ptrptr_kernel_param);
- D.setInvalidType();
- return;
- }
-
- ValidTypes.insert(PT.getTypePtr());
+ S.Diag(Param->getLocation(), diag::err_opencl_ptrptr_kernel_param);
+ D.setInvalidType();
return;
case InvalidAddrSpacePtrKernelParam:
@@ -8944,7 +9721,8 @@ static void checkIsValidOpenCLKernelParameter(
// OpenCL v1.2 s6.9.p:
// Arguments to kernel functions that are declared to be a struct or union
// do not allow OpenCL objects to be passed as elements of the struct or
- // union.
+ // union. This restriction was lifted in OpenCL v2.0 with the introduction
+ // of SVM.
if (ParamType == PtrKernelParam || ParamType == PtrPtrKernelParam ||
ParamType == InvalidAddrSpacePtrKernelParam) {
S.Diag(Param->getLocation(),
@@ -9000,6 +9778,33 @@ static Scope *getTagInjectionScope(Scope *S, const LangOptions &LangOpts) {
return S;
}
+/// Determine whether a declaration matches a known function in namespace std.
+static bool isStdBuiltin(ASTContext &Ctx, FunctionDecl *FD,
+ unsigned BuiltinID) {
+ switch (BuiltinID) {
+ case Builtin::BI__GetExceptionInfo:
+ // No type checking whatsoever.
+ return Ctx.getTargetInfo().getCXXABI().isMicrosoft();
+
+ case Builtin::BIaddressof:
+ case Builtin::BI__addressof:
+ case Builtin::BIforward:
+ case Builtin::BIforward_like:
+ case Builtin::BImove:
+ case Builtin::BImove_if_noexcept:
+ case Builtin::BIas_const: {
+ // Ensure that we don't treat the algorithm
+ // OutputIt std::move(InputIt, InputIt, OutputIt)
+ // as the builtin std::move.
+ const auto *FPT = FD->getType()->castAs<FunctionProtoType>();
+ return FPT->getNumParams() == 1 && !FPT->isVariadic();
+ }
+
+ default:
+ return false;
+ }
+}
+
NamedDecl*
Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo, LookupResult &Previous,
@@ -9012,8 +9817,7 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
Diag(D.getIdentifierLoc(), diag::err_function_decl_cmse_ns_call);
SmallVector<TemplateParameterList *, 4> TemplateParamLists;
- for (TemplateParameterList *TPL : TemplateParamListsRef)
- TemplateParamLists.push_back(TPL);
+ llvm::append_range(TemplateParamLists, TemplateParamListsRef);
if (TemplateParameterList *Invented = D.getInventedTemplateParameterList()) {
if (!TemplateParamLists.empty() &&
Invented->getDepth() == TemplateParamLists.back()->getDepth())
@@ -9033,15 +9837,15 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
<< DeclSpec::getSpecifierName(TSCS);
if (D.isFirstDeclarationOfMember())
- adjustMemberFunctionCC(R, D.isStaticMember(), D.isCtorOrDtor(),
- D.getIdentifierLoc());
+ adjustMemberFunctionCC(
+ R, !(D.isStaticMember() || D.isExplicitObjectMemberFunction()),
+ D.isCtorOrDtor(), D.getIdentifierLoc());
bool isFriend = false;
FunctionTemplateDecl *FunctionTemplate = nullptr;
bool isMemberSpecialization = false;
bool isFunctionTemplateSpecialization = false;
- bool isDependentClassScopeExplicitSpecialization = false;
bool HasExplicitTemplateArgs = false;
TemplateArgumentListInfo TemplateArgs;
@@ -9066,15 +9870,27 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
NewFD->setLocalExternDecl();
if (getLangOpts().CPlusPlus) {
+ // The rules for implicit inlines changed in C++20 for methods and friends
+ // with an in-class definition (when such a definition is not attached to
+ // the global module). User-specified 'inline' overrides this (set when
+ // the function decl is created above).
+ // FIXME: We need a better way to separate C++ standard and clang modules.
+ bool ImplicitInlineCXX20 = !getLangOpts().CPlusPlusModules ||
+ !NewFD->getOwningModule() ||
+ NewFD->getOwningModule()->isGlobalModule() ||
+ NewFD->getOwningModule()->isHeaderLikeModule();
bool isInline = D.getDeclSpec().isInlineSpecified();
bool isVirtual = D.getDeclSpec().isVirtualSpecified();
bool hasExplicit = D.getDeclSpec().hasExplicitSpecifier();
isFriend = D.getDeclSpec().isFriendSpecified();
if (isFriend && !isInline && D.isFunctionDefinition()) {
- // C++ [class.friend]p5
+ // Pre-C++20 [class.friend]p5
// A function can be defined in a friend declaration of a
// class . . . . Such a function is implicitly inline.
- NewFD->setImplicitlyInline();
+ // Post C++20 [class.friend]p7
+ // Such a function is implicitly an inline function if it is attached
+ // to the global module.
+ NewFD->setImplicitlyInline(ImplicitInlineCXX20);
}
// If this is a method defined in an __interface, and is not a constructor
@@ -9083,12 +9899,21 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
if (const CXXRecordDecl *Parent =
dyn_cast<CXXRecordDecl>(NewFD->getDeclContext())) {
if (Parent->isInterface() && cast<CXXMethodDecl>(NewFD)->isUserProvided())
- NewFD->setPure(true);
+ NewFD->setIsPureVirtual(true);
// C++ [class.union]p2
// A union can have member functions, but not virtual functions.
- if (isVirtual && Parent->isUnion())
+ if (isVirtual && Parent->isUnion()) {
Diag(D.getDeclSpec().getVirtualSpecLoc(), diag::err_virtual_in_union);
+ NewFD->setInvalidDecl();
+ }
+ if ((Parent->isClass() || Parent->isStruct()) &&
+ Parent->hasAttr<SYCLSpecialClassAttr>() &&
+ NewFD->getKind() == Decl::Kind::CXXMethod && NewFD->getIdentifier() &&
+ NewFD->getName() == "__init" && D.isFunctionDefinition()) {
+ if (auto *Def = Parent->getDefinition())
+ Def->setInitMethod(true);
+ }
}
SetNestedNameSpecifier(*this, NewFD, D);
@@ -9100,15 +9925,15 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
// Match up the template parameter lists with the scope specifier, then
// determine whether we have a template or a template specialization.
bool Invalid = false;
+ TemplateIdAnnotation *TemplateId =
+ D.getName().getKind() == UnqualifiedIdKind::IK_TemplateId
+ ? D.getName().TemplateId
+ : nullptr;
TemplateParameterList *TemplateParams =
MatchTemplateParametersToScopeSpecifier(
D.getDeclSpec().getBeginLoc(), D.getIdentifierLoc(),
- D.getCXXScopeSpec(),
- D.getName().getKind() == UnqualifiedIdKind::IK_TemplateId
- ? D.getName().TemplateId
- : nullptr,
- TemplateParamLists, isFriend, isMemberSpecialization,
- Invalid);
+ D.getCXXScopeSpec(), TemplateId, TemplateParamLists, isFriend,
+ isMemberSpecialization, Invalid);
if (TemplateParams) {
// Check that we can declare a template here.
if (CheckTemplateDeclScope(S, TemplateParams))
@@ -9121,6 +9946,11 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
if (Name.getNameKind() == DeclarationName::CXXDestructorName) {
Diag(NewFD->getLocation(), diag::err_destructor_template);
NewFD->setInvalidDecl();
+ // Function template with explicit template arguments.
+ } else if (TemplateId) {
+ Diag(D.getIdentifierLoc(), diag::err_function_template_partial_spec)
+ << SourceRange(TemplateId->LAngleLoc, TemplateId->RAngleLoc);
+ NewFD->setInvalidDecl();
}
// If we're adding a template to a dependent context, we may need to
@@ -9172,6 +10002,12 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
<< Name << RemoveRange
<< FixItHint::CreateRemoval(RemoveRange)
<< FixItHint::CreateInsertion(InsertLoc, "<>");
+ Invalid = true;
+
+ // Recover by faking up an empty template argument list.
+ HasExplicitTemplateArgs = true;
+ TemplateArgs.setLAngleLoc(InsertLoc);
+ TemplateArgs.setRAngleLoc(InsertLoc);
}
}
} else {
@@ -9185,6 +10021,33 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
if (TemplateParamLists.size() > 0)
// For source fidelity, store all the template param lists.
NewFD->setTemplateParameterListsInfo(Context, TemplateParamLists);
+
+ // "friend void foo<>(int);" is an implicit specialization decl.
+ if (isFriend && TemplateId)
+ isFunctionTemplateSpecialization = true;
+ }
+
+ // If this is a function template specialization and the unqualified-id of
+ // the declarator-id is a template-id, convert the template argument list
+ // into our AST format and check for unexpanded packs.
+ if (isFunctionTemplateSpecialization && TemplateId) {
+ HasExplicitTemplateArgs = true;
+
+ TemplateArgs.setLAngleLoc(TemplateId->LAngleLoc);
+ TemplateArgs.setRAngleLoc(TemplateId->RAngleLoc);
+ ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(),
+ TemplateId->NumArgs);
+ translateTemplateArguments(TemplateArgsPtr, TemplateArgs);
+
+ // FIXME: Should we check for unexpanded packs if this was an (invalid)
+ // declaration of a function template partial specialization? Should we
+ // consider the unexpanded pack context to be a partial specialization?
+ for (const TemplateArgumentLoc &ArgLoc : TemplateArgs.arguments()) {
+ if (DiagnoseUnexpandedParameterPack(
+ ArgLoc, isFriend ? UPPC_FriendDeclaration
+ : UPPC_ExplicitSpecialization))
+ NewFD->setInvalidDecl();
+ }
}
if (Invalid) {
@@ -9235,8 +10098,7 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
// a friend yet, so 'isDependentContext' on the FD doesn't work.
const FunctionProtoType *FPT =
NewFD->getType()->castAs<FunctionProtoType>();
- QualType Result =
- SubstAutoType(FPT->getReturnType(), Context.DependentTy);
+ QualType Result = SubstAutoTypeDependent(FPT->getReturnType());
NewFD->setType(Context.getFunctionType(Result, FPT->getParamTypes(),
FPT->getExtProtoInfo()));
}
@@ -9348,11 +10210,14 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
}
if (isa<CXXMethodDecl>(NewFD) && DC == CurContext &&
- D.isFunctionDefinition()) {
- // C++ [class.mfct]p2:
+ D.isFunctionDefinition() && !isInline) {
+ // Pre C++20 [class.mfct]p2:
// A member function may be defined (8.4) in its class definition, in
// which case it is an inline member function (7.1.2)
- NewFD->setImplicitlyInline();
+ // Post C++20 [class.mfct]p1:
+ // If a member function is attached to the global module and is defined
+ // in its class definition, it is inline.
+ NewFD->setImplicitlyInline(ImplicitInlineCXX20);
}
if (SC == SC_Static && isa<CXXMethodDecl>(NewFD) &&
@@ -9386,6 +10251,18 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
NewFD->setType(Context.getFunctionType(
FPT->getReturnType(), FPT->getParamTypes(),
FPT->getExtProtoInfo().withExceptionSpec(EST_BasicNoexcept)));
+
+ // C++20 [dcl.inline]/7
+ // If an inline function or variable that is attached to a named module
+ // is declared in a definition domain, it shall be defined in that
+ // domain.
+ // So, if the current declaration does not have a definition, we must
+ // check at the end of the TU (or when the PMF starts) to see that we
+ // have a definition at that point.
+ if (isInline && !D.isFunctionDefinition() && getLangOpts().CPlusPlus20 &&
+ NewFD->hasOwningModule() && NewFD->getOwningModule()->isNamedModule()) {
+ PendingInlineFuncDecls.insert(NewFD);
+ }
}
// Filter out previous declarations that don't match the scope.
@@ -9498,9 +10375,8 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
NewFD->setParams(Params);
if (D.getDeclSpec().isNoreturnSpecified())
- NewFD->addAttr(C11NoReturnAttr::Create(Context,
- D.getDeclSpec().getNoreturnSpecLoc(),
- AttributeCommonInfo::AS_Keyword));
+ NewFD->addAttr(
+ C11NoReturnAttr::Create(Context, D.getDeclSpec().getNoreturnSpecLoc()));
// Functions returning a variably modified type violate C99 6.7.5.2p2
// because all functions have linkage.
@@ -9515,15 +10391,14 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
!NewFD->hasAttr<SectionAttr>())
NewFD->addAttr(PragmaClangTextSectionAttr::CreateImplicit(
Context, PragmaClangTextSection.SectionName,
- PragmaClangTextSection.PragmaLocation, AttributeCommonInfo::AS_Pragma));
+ PragmaClangTextSection.PragmaLocation));
// Apply an implicit SectionAttr if #pragma code_seg is active.
if (CodeSegStack.CurrentValue && D.isFunctionDefinition() &&
!NewFD->hasAttr<SectionAttr>()) {
NewFD->addAttr(SectionAttr::CreateImplicit(
Context, CodeSegStack.CurrentValue->getString(),
- CodeSegStack.CurrentPragmaLocation, AttributeCommonInfo::AS_Pragma,
- SectionAttr::Declspec_allocate));
+ CodeSegStack.CurrentPragmaLocation, SectionAttr::Declspec_allocate));
if (UnifySection(CodeSegStack.CurrentValue->getString(),
ASTContext::PSF_Implicit | ASTContext::PSF_Execute |
ASTContext::PSF_Read,
@@ -9531,6 +10406,13 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
NewFD->dropAttr<SectionAttr>();
}
+ // Apply an implicit StrictGuardStackCheckAttr if #pragma strict_gs_check is
+ // active.
+ if (StrictGuardStackCheckStack.CurrentValue && D.isFunctionDefinition() &&
+ !NewFD->hasAttr<StrictGuardStackCheckAttr>())
+ NewFD->addAttr(StrictGuardStackCheckAttr::CreateImplicit(
+ Context, PragmaClangTextSection.PragmaLocation));
+
// Apply an implicit CodeSegAttr from class declspec or
// apply an implicit SectionAttr from #pragma code_seg if active.
if (!NewFD->hasAttr<CodeSegAttr>()) {
@@ -9542,21 +10424,27 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
// Handle attributes.
ProcessDeclAttributes(S, NewFD, D);
+ const auto *NewTVA = NewFD->getAttr<TargetVersionAttr>();
+ if (NewTVA && !NewTVA->isDefaultVersion() &&
+ !Context.getTargetInfo().hasFeature("fmv")) {
+ // Don't add to scope fmv functions declarations if fmv disabled
+ AddToScope = false;
+ return NewFD;
+ }
- if (getLangOpts().OpenCL) {
+ if (getLangOpts().OpenCL || getLangOpts().HLSL) {
+ // Neither OpenCL nor HLSL allow an address space qualifyer on a return
+ // type.
+ //
// OpenCL v1.1 s6.5: Using an address space qualifier in a function return
// type declaration will generate a compilation error.
LangAS AddressSpace = NewFD->getReturnType().getAddressSpace();
if (AddressSpace != LangAS::Default) {
- Diag(NewFD->getLocation(),
- diag::err_opencl_return_value_with_address_space);
+ Diag(NewFD->getLocation(), diag::err_return_value_with_address_space);
NewFD->setInvalidDecl();
}
}
- if (LangOpts.SYCLIsDevice || (LangOpts.OpenMP && LangOpts.OpenMPIsDevice))
- checkDeviceDecl(NewFD, D.getBeginLoc());
-
if (!getLangOpts().CPlusPlus) {
// Perform semantic checking on the function declaration.
if (!NewFD->isInvalidDecl() && NewFD->isMain())
@@ -9567,7 +10455,8 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
if (!NewFD->isInvalidDecl())
D.setRedeclaration(CheckFunctionDeclaration(S, NewFD, Previous,
- isMemberSpecialization));
+ isMemberSpecialization,
+ D.isFunctionDefinition()));
else if (!Previous.empty())
// Recover gracefully from an invalid redeclaration.
D.setRedeclaration(true);
@@ -9611,46 +10500,6 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
diag::ext_operator_new_delete_declared_inline)
<< NewFD->getDeclName();
- // If the declarator is a template-id, translate the parser's template
- // argument list into our AST format.
- if (D.getName().getKind() == UnqualifiedIdKind::IK_TemplateId) {
- TemplateIdAnnotation *TemplateId = D.getName().TemplateId;
- TemplateArgs.setLAngleLoc(TemplateId->LAngleLoc);
- TemplateArgs.setRAngleLoc(TemplateId->RAngleLoc);
- ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(),
- TemplateId->NumArgs);
- translateTemplateArguments(TemplateArgsPtr,
- TemplateArgs);
-
- HasExplicitTemplateArgs = true;
-
- if (NewFD->isInvalidDecl()) {
- HasExplicitTemplateArgs = false;
- } else if (FunctionTemplate) {
- // Function template with explicit template arguments.
- Diag(D.getIdentifierLoc(), diag::err_function_template_partial_spec)
- << SourceRange(TemplateId->LAngleLoc, TemplateId->RAngleLoc);
-
- HasExplicitTemplateArgs = false;
- } else {
- assert((isFunctionTemplateSpecialization ||
- D.getDeclSpec().isFriendSpecified()) &&
- "should have a 'template<>' for this decl");
- // "friend void foo<>(int);" is an implicit specialization decl.
- isFunctionTemplateSpecialization = true;
- }
- } else if (isFriend && isFunctionTemplateSpecialization) {
- // This combination is only possible in a recovery case; the user
- // wrote something like:
- // template <> friend void foo(int);
- // which we're recovering from as if the user had written:
- // friend void foo<>(int);
- // Go ahead and fake up a template id.
- HasExplicitTemplateArgs = true;
- TemplateArgs.setLAngleLoc(D.getIdentifierLoc());
- TemplateArgs.setRAngleLoc(D.getIdentifierLoc());
- }
-
// We do not add HD attributes to specializations here because
// they may have different constexpr-ness compared to their
// templates and, after maybeAddCUDAHostDeviceAttrs() is applied,
@@ -9660,32 +10509,54 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
if (getLangOpts().CUDA && !isFunctionTemplateSpecialization)
maybeAddCUDAHostDeviceAttrs(NewFD, Previous);
- // If it's a friend (and only if it's a friend), it's possible
- // that either the specialized function type or the specialized
- // template is dependent, and therefore matching will fail. In
- // this case, don't check the specialization yet.
- if (isFunctionTemplateSpecialization && isFriend &&
- (NewFD->getType()->isDependentType() || DC->isDependentContext() ||
- TemplateSpecializationType::anyInstantiationDependentTemplateArguments(
- TemplateArgs.arguments()))) {
- assert(HasExplicitTemplateArgs &&
- "friend function specialization without template args");
- if (CheckDependentFunctionTemplateSpecialization(NewFD, TemplateArgs,
- Previous))
- NewFD->setInvalidDecl();
- } else if (isFunctionTemplateSpecialization) {
- if (CurContext->isDependentContext() && CurContext->isRecord()
- && !isFriend) {
- isDependentClassScopeExplicitSpecialization = true;
- } else if (!NewFD->isInvalidDecl() &&
- CheckFunctionTemplateSpecialization(
- NewFD, (HasExplicitTemplateArgs ? &TemplateArgs : nullptr),
- Previous))
- NewFD->setInvalidDecl();
+ // Handle explict specializations of function templates
+ // and friend function declarations with an explicit
+ // template argument list.
+ if (isFunctionTemplateSpecialization) {
+ bool isDependentSpecialization = false;
+ if (isFriend) {
+ // For friend function specializations, this is a dependent
+ // specialization if its semantic context is dependent, its
+ // type is dependent, or if its template-id is dependent.
+ isDependentSpecialization =
+ DC->isDependentContext() || NewFD->getType()->isDependentType() ||
+ (HasExplicitTemplateArgs &&
+ TemplateSpecializationType::
+ anyInstantiationDependentTemplateArguments(
+ TemplateArgs.arguments()));
+ assert((!isDependentSpecialization ||
+ (HasExplicitTemplateArgs == isDependentSpecialization)) &&
+ "dependent friend function specialization without template "
+ "args");
+ } else {
+ // For class-scope explicit specializations of function templates,
+ // if the lexical context is dependent, then the specialization
+ // is dependent.
+ isDependentSpecialization =
+ CurContext->isRecord() && CurContext->isDependentContext();
+ }
+
+ TemplateArgumentListInfo *ExplicitTemplateArgs =
+ HasExplicitTemplateArgs ? &TemplateArgs : nullptr;
+ if (isDependentSpecialization) {
+ // If it's a dependent specialization, it may not be possible
+ // to determine the primary template (for explicit specializations)
+ // or befriended declaration (for friends) until the enclosing
+ // template is instantiated. In such cases, we store the declarations
+ // found by name lookup and defer resolution until instantiation.
+ if (CheckDependentFunctionTemplateSpecialization(
+ NewFD, ExplicitTemplateArgs, Previous))
+ NewFD->setInvalidDecl();
+ } else if (!NewFD->isInvalidDecl()) {
+ if (CheckFunctionTemplateSpecialization(NewFD, ExplicitTemplateArgs,
+ Previous))
+ NewFD->setInvalidDecl();
+ }
// C++ [dcl.stc]p1:
// A storage-class-specifier shall not be specified in an explicit
// specialization (14.7.3)
+ // FIXME: We should be checking this for dependent specializations.
FunctionTemplateSpecializationInfo *Info =
NewFD->getTemplateSpecializationInfo();
if (Info && SC != SC_None) {
@@ -9708,22 +10579,22 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
}
// Perform semantic checking on the function declaration.
- if (!isDependentClassScopeExplicitSpecialization) {
- if (!NewFD->isInvalidDecl() && NewFD->isMain())
- CheckMain(NewFD, D.getDeclSpec());
+ if (!NewFD->isInvalidDecl() && NewFD->isMain())
+ CheckMain(NewFD, D.getDeclSpec());
- if (!NewFD->isInvalidDecl() && NewFD->isMSVCRTEntryPoint())
- CheckMSVCRTEntryPoint(NewFD);
+ if (!NewFD->isInvalidDecl() && NewFD->isMSVCRTEntryPoint())
+ CheckMSVCRTEntryPoint(NewFD);
- if (!NewFD->isInvalidDecl())
- D.setRedeclaration(CheckFunctionDeclaration(S, NewFD, Previous,
- isMemberSpecialization));
- else if (!Previous.empty())
- // Recover gracefully from an invalid redeclaration.
- D.setRedeclaration(true);
- }
+ if (!NewFD->isInvalidDecl())
+ D.setRedeclaration(CheckFunctionDeclaration(S, NewFD, Previous,
+ isMemberSpecialization,
+ D.isFunctionDefinition()));
+ else if (!Previous.empty())
+ // Recover gracefully from an invalid redeclaration.
+ D.setRedeclaration(true);
- assert((NewFD->isInvalidDecl() || !D.isRedeclaration() ||
+ assert((NewFD->isInvalidDecl() || NewFD->isMultiVersion() ||
+ !D.isRedeclaration() ||
Previous.getResultKind() != LookupResult::FoundOverloaded) &&
"previous declaration set still overloaded");
@@ -9843,30 +10714,41 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
}
}
+ if (getLangOpts().HLSL && D.isFunctionDefinition()) {
+ // Any top level function could potentially be specified as an entry.
+ if (!NewFD->isInvalidDecl() && S->getDepth() == 0 && Name.isIdentifier())
+ ActOnHLSLTopLevelFunction(NewFD);
+
+ if (NewFD->hasAttr<HLSLShaderAttr>())
+ CheckHLSLEntryPoint(NewFD);
+ }
+
// If this is the first declaration of a library builtin function, add
// attributes as appropriate.
- if (!D.isRedeclaration() &&
- NewFD->getDeclContext()->getRedeclContext()->isFileContext()) {
+ if (!D.isRedeclaration()) {
if (IdentifierInfo *II = Previous.getLookupName().getAsIdentifierInfo()) {
if (unsigned BuiltinID = II->getBuiltinID()) {
- if (NewFD->getLanguageLinkage() == CLanguageLinkage) {
- // Validate the type matches unless this builtin is specified as
- // matching regardless of its declared type.
- if (Context.BuiltinInfo.allowTypeMismatch(BuiltinID)) {
- NewFD->addAttr(BuiltinAttr::CreateImplicit(Context, BuiltinID));
- } else {
- ASTContext::GetBuiltinTypeError Error;
- LookupNecessaryTypesForBuiltin(S, BuiltinID);
- QualType BuiltinType = Context.GetBuiltinType(BuiltinID, Error);
-
- if (!Error && !BuiltinType.isNull() &&
- Context.hasSameFunctionTypeIgnoringExceptionSpec(
- NewFD->getType(), BuiltinType))
+ bool InStdNamespace = Context.BuiltinInfo.isInStdNamespace(BuiltinID);
+ if (!InStdNamespace &&
+ NewFD->getDeclContext()->getRedeclContext()->isFileContext()) {
+ if (NewFD->getLanguageLinkage() == CLanguageLinkage) {
+ // Validate the type matches unless this builtin is specified as
+ // matching regardless of its declared type.
+ if (Context.BuiltinInfo.allowTypeMismatch(BuiltinID)) {
NewFD->addAttr(BuiltinAttr::CreateImplicit(Context, BuiltinID));
+ } else {
+ ASTContext::GetBuiltinTypeError Error;
+ LookupNecessaryTypesForBuiltin(S, BuiltinID);
+ QualType BuiltinType = Context.GetBuiltinType(BuiltinID, Error);
+
+ if (!Error && !BuiltinType.isNull() &&
+ Context.hasSameFunctionTypeIgnoringExceptionSpec(
+ NewFD->getType(), BuiltinType))
+ NewFD->addAttr(BuiltinAttr::CreateImplicit(Context, BuiltinID));
+ }
}
- } else if (BuiltinID == Builtin::BI__GetExceptionInfo &&
- Context.getTargetInfo().getCXXABI().isMicrosoft()) {
- // FIXME: We should consider this a builtin only in the std namespace.
+ } else if (InStdNamespace && NewFD->isInStdNamespace() &&
+ isStdBuiltin(Context, NewFD, BuiltinID)) {
NewFD->addAttr(BuiltinAttr::CreateImplicit(Context, BuiltinID));
}
}
@@ -9883,16 +10765,7 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
Diag(NewFD->getLocation(),
diag::err_attribute_overloadable_no_prototype)
<< NewFD;
-
- // Turn this into a variadic function with no parameters.
- const FunctionType *FT = NewFD->getType()->getAs<FunctionType>();
- FunctionProtoType::ExtProtoInfo EPI(
- Context.getDefaultCallingConvention(true, false));
- EPI.Variadic = true;
- EPI.ExtInfo = FT->getExtInfo();
-
- QualType R = Context.getFunctionType(FT->getReturnType(), None, EPI);
- NewFD->setType(R);
+ NewFD->dropAttr<OverloadableAttr>();
}
// If there's a #pragma GCC visibility in scope, and this isn't a class
@@ -9904,10 +10777,14 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
// marking the function.
AddCFAuditedAttribute(NewFD);
- // If this is a function definition, check if we have to apply optnone due to
- // a pragma.
- if(D.isFunctionDefinition())
+ // If this is a function definition, check if we have to apply any
+ // attributes (i.e. optnone and no_builtin) due to a pragma.
+ if (D.isFunctionDefinition()) {
AddRangeBasedOptnone(NewFD);
+ AddImplicitMSFunctionNoBuiltinAttr(NewFD);
+ AddSectionMSAllocText(NewFD);
+ ModifyFnAttributesMSPragmaOptimize(NewFD);
+ }
// If this is the first declaration of an extern C variable, update
// the map of such variables.
@@ -9955,8 +10832,7 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
if (getLangOpts().OpenCL && NewFD->hasAttr<OpenCLKernelAttr>()) {
// OpenCL v1.2 s6.8 static is invalid for kernel functions.
- if ((getLangOpts().OpenCLVersion >= 120)
- && (SC == SC_Static)) {
+ if (SC == SC_Static) {
Diag(D.getIdentifierLoc(), diag::err_static_kernel);
D.setInvalidType();
}
@@ -9971,7 +10847,7 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
}
llvm::SmallPtrSet<const Type *, 16> ValidTypes;
- for (auto Param : NewFD->parameters())
+ for (auto *Param : NewFD->parameters())
checkIsValidOpenCLKernelParameter(*this, D, Param, ValidTypes);
if (getLangOpts().OpenCLCPlusPlus) {
@@ -9987,6 +10863,22 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
}
if (getLangOpts().CPlusPlus) {
+ // Precalculate whether this is a friend function template with a constraint
+ // that depends on an enclosing template, per [temp.friend]p9.
+ if (isFriend && FunctionTemplate &&
+ FriendConstraintsDependOnEnclosingTemplate(NewFD)) {
+ NewFD->setFriendConstraintRefersToEnclosingTemplate(true);
+
+ // C++ [temp.friend]p9:
+ // A friend function template with a constraint that depends on a
+ // template parameter from an enclosing template shall be a definition.
+ if (!D.isFunctionDefinition()) {
+ Diag(NewFD->getBeginLoc(),
+ diag::err_friend_decl_with_enclosing_temp_constraint_must_be_def);
+ NewFD->setInvalidDecl();
+ }
+ }
+
if (FunctionTemplate) {
if (NewFD->isInvalidDecl())
FunctionTemplate->setInvalidDecl();
@@ -10002,7 +10894,7 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
// OpenCL 2.0 pipe restrictions forbids pipe packet types to be non-value
// types.
- if (getLangOpts().OpenCLVersion >= 200 || getLangOpts().OpenCLCPlusPlus) {
+ if (getLangOpts().getOpenCLCompatibleVersion() >= 200) {
if(const PipeType *PipeTy = PT->getAs<PipeType>()) {
QualType ElemTy = PipeTy->getElementType();
if (ElemTy->isReferenceType() || ElemTy->isPointerType()) {
@@ -10011,19 +10903,14 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
}
}
}
- }
-
- // Here we have an function template explicit specialization at class scope.
- // The actual specialization will be postponed to template instatiation
- // time via the ClassScopeFunctionSpecializationDecl node.
- if (isDependentClassScopeExplicitSpecialization) {
- ClassScopeFunctionSpecializationDecl *NewSpec =
- ClassScopeFunctionSpecializationDecl::Create(
- Context, CurContext, NewFD->getLocation(),
- cast<CXXMethodDecl>(NewFD),
- HasExplicitTemplateArgs, TemplateArgs);
- CurContext->addDecl(NewSpec);
- AddToScope = false;
+ // WebAssembly tables can't be used as function parameters.
+ if (Context.getTargetInfo().getTriple().isWasm()) {
+ if (PT->getUnqualifiedDesugaredType()->isWebAssemblyTableType()) {
+ Diag(Param->getTypeSpecStartLoc(),
+ diag::err_wasm_table_as_function_parameter);
+ D.setInvalidType();
+ }
+ }
}
// Diagnose availability attributes. Availability cannot be used on functions
@@ -10109,7 +10996,7 @@ static Attr *getImplicitCodeSegAttrFromClass(Sema &S, const FunctionDecl *FD) {
/// (from the current #pragma code-seg value).
///
/// \param FD Function being declared.
-/// \param IsDefinition Whether it is a definition or just a declarartion.
+/// \param IsDefinition Whether it is a definition or just a declaration.
/// \returns A CodeSegAttr or SectionAttr to apply to the function or
/// nullptr if no attribute should be added.
Attr *Sema::getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD,
@@ -10120,8 +11007,7 @@ Attr *Sema::getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD,
CodeSegStack.CurrentValue)
return SectionAttr::CreateImplicit(
getASTContext(), CodeSegStack.CurrentValue->getString(),
- CodeSegStack.CurrentPragmaLocation, AttributeCommonInfo::AS_Pragma,
- SectionAttr::Declspec_allocate);
+ CodeSegStack.CurrentPragmaLocation, SectionAttr::Declspec_allocate);
return nullptr;
}
@@ -10193,37 +11079,53 @@ bool Sema::shouldLinkDependentDeclWithPrevious(Decl *D, Decl *PrevDecl) {
PrevVD->getType());
}
-/// Check the target attribute of the function for MultiVersion
-/// validity.
+/// Check the target or target_version attribute of the function for
+/// MultiVersion validity.
///
/// Returns true if there was an error, false otherwise.
static bool CheckMultiVersionValue(Sema &S, const FunctionDecl *FD) {
const auto *TA = FD->getAttr<TargetAttr>();
- assert(TA && "MultiVersion Candidate requires a target attribute");
- ParsedTargetAttr ParseInfo = TA->parse();
+ const auto *TVA = FD->getAttr<TargetVersionAttr>();
+ assert(
+ (TA || TVA) &&
+ "MultiVersion candidate requires a target or target_version attribute");
const TargetInfo &TargetInfo = S.Context.getTargetInfo();
enum ErrType { Feature = 0, Architecture = 1 };
- if (!ParseInfo.Architecture.empty() &&
- !TargetInfo.validateCpuIs(ParseInfo.Architecture)) {
- S.Diag(FD->getLocation(), diag::err_bad_multiversion_option)
- << Architecture << ParseInfo.Architecture;
- return true;
- }
-
- for (const auto &Feat : ParseInfo.Features) {
- auto BareFeat = StringRef{Feat}.substr(1);
- if (Feat[0] == '-') {
+ if (TA) {
+ ParsedTargetAttr ParseInfo =
+ S.getASTContext().getTargetInfo().parseTargetAttr(TA->getFeaturesStr());
+ if (!ParseInfo.CPU.empty() && !TargetInfo.validateCpuIs(ParseInfo.CPU)) {
S.Diag(FD->getLocation(), diag::err_bad_multiversion_option)
- << Feature << ("no-" + BareFeat).str();
+ << Architecture << ParseInfo.CPU;
return true;
}
+ for (const auto &Feat : ParseInfo.Features) {
+ auto BareFeat = StringRef{Feat}.substr(1);
+ if (Feat[0] == '-') {
+ S.Diag(FD->getLocation(), diag::err_bad_multiversion_option)
+ << Feature << ("no-" + BareFeat).str();
+ return true;
+ }
- if (!TargetInfo.validateCpuSupports(BareFeat) ||
- !TargetInfo.isValidFeatureName(BareFeat)) {
- S.Diag(FD->getLocation(), diag::err_bad_multiversion_option)
- << Feature << BareFeat;
- return true;
+ if (!TargetInfo.validateCpuSupports(BareFeat) ||
+ !TargetInfo.isValidFeatureName(BareFeat)) {
+ S.Diag(FD->getLocation(), diag::err_bad_multiversion_option)
+ << Feature << BareFeat;
+ return true;
+ }
+ }
+ }
+
+ if (TVA) {
+ llvm::SmallVector<StringRef, 8> Feats;
+ TVA->getFeatures(Feats);
+ for (const auto &Feat : Feats) {
+ if (!TargetInfo.validateCpuSupports(Feat)) {
+ S.Diag(FD->getLocation(), diag::err_bad_multiversion_option)
+ << Feature << Feat;
+ return true;
+ }
}
}
return false;
@@ -10232,14 +11134,14 @@ static bool CheckMultiVersionValue(Sema &S, const FunctionDecl *FD) {
// Provide a white-list of attributes that are allowed to be combined with
// multiversion functions.
static bool AttrCompatibleWithMultiVersion(attr::Kind Kind,
- MultiVersionKind MVType) {
+ MultiVersionKind MVKind) {
// Note: this list/diagnosis must match the list in
// checkMultiversionAttributesAllSame.
switch (Kind) {
default:
return false;
case attr::Used:
- return MVType == MultiVersionKind::Target;
+ return MVKind == MultiVersionKind::Target;
case attr::NonNull:
case attr::NoThrow:
return true;
@@ -10249,14 +11151,10 @@ static bool AttrCompatibleWithMultiVersion(attr::Kind Kind,
static bool checkNonMultiVersionCompatAttributes(Sema &S,
const FunctionDecl *FD,
const FunctionDecl *CausedFD,
- MultiVersionKind MVType) {
- bool IsCPUSpecificCPUDispatchMVType =
- MVType == MultiVersionKind::CPUDispatch ||
- MVType == MultiVersionKind::CPUSpecific;
- const auto Diagnose = [FD, CausedFD, IsCPUSpecificCPUDispatchMVType](
- Sema &S, const Attr *A) {
+ MultiVersionKind MVKind) {
+ const auto Diagnose = [FD, CausedFD, MVKind](Sema &S, const Attr *A) {
S.Diag(FD->getLocation(), diag::err_multiversion_disallowed_other_attr)
- << IsCPUSpecificCPUDispatchMVType << A;
+ << static_cast<unsigned>(MVKind) << A;
if (CausedFD)
S.Diag(CausedFD->getLocation(), diag::note_multiversioning_caused_here);
return true;
@@ -10266,16 +11164,24 @@ static bool checkNonMultiVersionCompatAttributes(Sema &S,
switch (A->getKind()) {
case attr::CPUDispatch:
case attr::CPUSpecific:
- if (MVType != MultiVersionKind::CPUDispatch &&
- MVType != MultiVersionKind::CPUSpecific)
+ if (MVKind != MultiVersionKind::CPUDispatch &&
+ MVKind != MultiVersionKind::CPUSpecific)
return Diagnose(S, A);
break;
case attr::Target:
- if (MVType != MultiVersionKind::Target)
+ if (MVKind != MultiVersionKind::Target)
+ return Diagnose(S, A);
+ break;
+ case attr::TargetVersion:
+ if (MVKind != MultiVersionKind::TargetVersion)
+ return Diagnose(S, A);
+ break;
+ case attr::TargetClones:
+ if (MVKind != MultiVersionKind::TargetClones)
return Diagnose(S, A);
break;
default:
- if (!AttrCompatibleWithMultiVersion(A->getKind(), MVType))
+ if (!AttrCompatibleWithMultiVersion(A->getKind(), MVKind))
return Diagnose(S, A);
break;
}
@@ -10300,14 +11206,15 @@ bool Sema::areMultiversionVariantFunctionsCompatible(
DefaultedFuncs = 6,
ConstexprFuncs = 7,
ConstevalFuncs = 8,
+ Lambda = 9,
};
enum Different {
CallingConv = 0,
ReturnType = 1,
ConstexprSpec = 2,
InlineSpec = 3,
- StorageClass = 4,
- Linkage = 5,
+ Linkage = 4,
+ LanguageLinkage = 5,
};
if (NoProtoDiagID.getDiagID() != 0 && OldFD &&
@@ -10381,11 +11288,11 @@ bool Sema::areMultiversionVariantFunctionsCompatible(
if (OldFD->isInlineSpecified() != NewFD->isInlineSpecified())
return Diag(DiffDiagIDAt.first, DiffDiagIDAt.second) << InlineSpec;
- if (OldFD->getStorageClass() != NewFD->getStorageClass())
- return Diag(DiffDiagIDAt.first, DiffDiagIDAt.second) << StorageClass;
+ if (OldFD->getFormalLinkage() != NewFD->getFormalLinkage())
+ return Diag(DiffDiagIDAt.first, DiffDiagIDAt.second) << Linkage;
if (!CLinkageMayDiffer && OldFD->isExternC() != NewFD->isExternC())
- return Diag(DiffDiagIDAt.first, DiffDiagIDAt.second) << Linkage;
+ return Diag(DiffDiagIDAt.first, DiffDiagIDAt.second) << LanguageLinkage;
if (CheckEquivalentExceptionSpec(
OldFD->getType()->getAs<FunctionProtoType>(), OldFD->getLocation(),
@@ -10398,7 +11305,7 @@ bool Sema::areMultiversionVariantFunctionsCompatible(
static bool CheckMultiVersionAdditionalRules(Sema &S, const FunctionDecl *OldFD,
const FunctionDecl *NewFD,
bool CausesMV,
- MultiVersionKind MVType) {
+ MultiVersionKind MVKind) {
if (!S.getASTContext().getTargetInfo().supportsMultiVersioning()) {
S.Diag(NewFD->getLocation(), diag::err_multiversion_not_supported);
if (OldFD)
@@ -10406,15 +11313,15 @@ static bool CheckMultiVersionAdditionalRules(Sema &S, const FunctionDecl *OldFD,
return true;
}
- bool IsCPUSpecificCPUDispatchMVType =
- MVType == MultiVersionKind::CPUDispatch ||
- MVType == MultiVersionKind::CPUSpecific;
+ bool IsCPUSpecificCPUDispatchMVKind =
+ MVKind == MultiVersionKind::CPUDispatch ||
+ MVKind == MultiVersionKind::CPUSpecific;
if (CausesMV && OldFD &&
- checkNonMultiVersionCompatAttributes(S, OldFD, NewFD, MVType))
+ checkNonMultiVersionCompatAttributes(S, OldFD, NewFD, MVKind))
return true;
- if (checkNonMultiVersionCompatAttributes(S, NewFD, nullptr, MVType))
+ if (checkNonMultiVersionCompatAttributes(S, NewFD, nullptr, MVKind))
return true;
// Only allow transition to MultiVersion if it hasn't been used.
@@ -10427,11 +11334,11 @@ static bool CheckMultiVersionAdditionalRules(Sema &S, const FunctionDecl *OldFD,
S.PDiag(diag::note_multiversioning_caused_here)),
PartialDiagnosticAt(NewFD->getLocation(),
S.PDiag(diag::err_multiversion_doesnt_support)
- << IsCPUSpecificCPUDispatchMVType),
+ << static_cast<unsigned>(MVKind)),
PartialDiagnosticAt(NewFD->getLocation(),
S.PDiag(diag::err_multiversion_diff)),
/*TemplatesSupported=*/false,
- /*ConstexprSupported=*/!IsCPUSpecificCPUDispatchMVType,
+ /*ConstexprSupported=*/!IsCPUSpecificCPUDispatchMVKind,
/*CLinkageMayDiffer=*/false);
}
@@ -10441,23 +11348,23 @@ static bool CheckMultiVersionAdditionalRules(Sema &S, const FunctionDecl *OldFD,
/// This sets NewFD->isInvalidDecl() to true if there was an error.
///
/// Returns true if there was an error, false otherwise.
-static bool CheckMultiVersionFirstFunction(Sema &S, FunctionDecl *FD,
- MultiVersionKind MVType,
- const TargetAttr *TA) {
- assert(MVType != MultiVersionKind::None &&
+static bool CheckMultiVersionFirstFunction(Sema &S, FunctionDecl *FD) {
+ MultiVersionKind MVKind = FD->getMultiVersionKind();
+ assert(MVKind != MultiVersionKind::None &&
"Function lacks multiversion attribute");
-
- // Target only causes MV if it is default, otherwise this is a normal
- // function.
- if (MVType == MultiVersionKind::Target && !TA->isDefaultVersion())
+ const auto *TA = FD->getAttr<TargetAttr>();
+ const auto *TVA = FD->getAttr<TargetVersionAttr>();
+ // Target and target_version only causes MV if it is default, otherwise this
+ // is a normal function.
+ if ((TA && !TA->isDefaultVersion()) || (TVA && !TVA->isDefaultVersion()))
return false;
- if (MVType == MultiVersionKind::Target && CheckMultiVersionValue(S, FD)) {
+ if ((TA || TVA) && CheckMultiVersionValue(S, FD)) {
FD->setInvalidDecl();
return true;
}
- if (CheckMultiVersionAdditionalRules(S, nullptr, FD, true, MVType)) {
+ if (CheckMultiVersionAdditionalRules(S, nullptr, FD, true, MVKind)) {
FD->setInvalidDecl();
return true;
}
@@ -10475,31 +11382,27 @@ static bool PreviousDeclsHaveMultiVersionAttribute(const FunctionDecl *FD) {
return false;
}
-static bool CheckTargetCausesMultiVersioning(
- Sema &S, FunctionDecl *OldFD, FunctionDecl *NewFD, const TargetAttr *NewTA,
- bool &Redeclaration, NamedDecl *&OldDecl, bool &MergeTypeWithPrevious,
- LookupResult &Previous) {
+static bool CheckTargetCausesMultiVersioning(Sema &S, FunctionDecl *OldFD,
+ FunctionDecl *NewFD,
+ bool &Redeclaration,
+ NamedDecl *&OldDecl,
+ LookupResult &Previous) {
+ const auto *NewTA = NewFD->getAttr<TargetAttr>();
+ const auto *NewTVA = NewFD->getAttr<TargetVersionAttr>();
const auto *OldTA = OldFD->getAttr<TargetAttr>();
- ParsedTargetAttr NewParsed = NewTA->parse();
- // Sort order doesn't matter, it just needs to be consistent.
- llvm::sort(NewParsed.Features);
-
+ const auto *OldTVA = OldFD->getAttr<TargetVersionAttr>();
// If the old decl is NOT MultiVersioned yet, and we don't cause that
// to change, this is a simple redeclaration.
- if (!NewTA->isDefaultVersion() &&
- (!OldTA || OldTA->getFeaturesStr() == NewTA->getFeaturesStr()))
+ if ((NewTA && !NewTA->isDefaultVersion() &&
+ (!OldTA || OldTA->getFeaturesStr() == NewTA->getFeaturesStr())) ||
+ (NewTVA && !NewTVA->isDefaultVersion() &&
+ (!OldTVA || OldTVA->getName() == NewTVA->getName())))
return false;
// Otherwise, this decl causes MultiVersioning.
- if (!S.getASTContext().getTargetInfo().supportsMultiVersioning()) {
- S.Diag(NewFD->getLocation(), diag::err_multiversion_not_supported);
- S.Diag(OldFD->getLocation(), diag::note_previous_declaration);
- NewFD->setInvalidDecl();
- return true;
- }
-
if (CheckMultiVersionAdditionalRules(S, OldFD, NewFD, true,
- MultiVersionKind::Target)) {
+ NewTVA ? MultiVersionKind::TargetVersion
+ : MultiVersionKind::Target)) {
NewFD->setInvalidDecl();
return true;
}
@@ -10510,7 +11413,9 @@ static bool CheckTargetCausesMultiVersioning(
}
// If this is 'default', permit the forward declaration.
- if (!OldFD->isMultiVersion() && !OldTA && NewTA->isDefaultVersion()) {
+ if (!OldFD->isMultiVersion() &&
+ ((NewTA && NewTA->isDefaultVersion() && !OldTA) ||
+ (NewTVA && NewTVA->isDefaultVersion() && !OldTVA))) {
Redeclaration = true;
OldDecl = OldFD;
OldFD->setIsMultiVersion();
@@ -10524,23 +11429,50 @@ static bool CheckTargetCausesMultiVersioning(
return true;
}
- ParsedTargetAttr OldParsed = OldTA->parse(std::less<std::string>());
+ if (NewTA) {
+ ParsedTargetAttr OldParsed =
+ S.getASTContext().getTargetInfo().parseTargetAttr(
+ OldTA->getFeaturesStr());
+ llvm::sort(OldParsed.Features);
+ ParsedTargetAttr NewParsed =
+ S.getASTContext().getTargetInfo().parseTargetAttr(
+ NewTA->getFeaturesStr());
+ // Sort order doesn't matter, it just needs to be consistent.
+ llvm::sort(NewParsed.Features);
+ if (OldParsed == NewParsed) {
+ S.Diag(NewFD->getLocation(), diag::err_multiversion_duplicate);
+ S.Diag(OldFD->getLocation(), diag::note_previous_declaration);
+ NewFD->setInvalidDecl();
+ return true;
+ }
+ }
- if (OldParsed == NewParsed) {
- S.Diag(NewFD->getLocation(), diag::err_multiversion_duplicate);
- S.Diag(OldFD->getLocation(), diag::note_previous_declaration);
- NewFD->setInvalidDecl();
- return true;
+ if (NewTVA) {
+ llvm::SmallVector<StringRef, 8> Feats;
+ OldTVA->getFeatures(Feats);
+ llvm::sort(Feats);
+ llvm::SmallVector<StringRef, 8> NewFeats;
+ NewTVA->getFeatures(NewFeats);
+ llvm::sort(NewFeats);
+
+ if (Feats == NewFeats) {
+ S.Diag(NewFD->getLocation(), diag::err_multiversion_duplicate);
+ S.Diag(OldFD->getLocation(), diag::note_previous_declaration);
+ NewFD->setInvalidDecl();
+ return true;
+ }
}
for (const auto *FD : OldFD->redecls()) {
const auto *CurTA = FD->getAttr<TargetAttr>();
+ const auto *CurTVA = FD->getAttr<TargetVersionAttr>();
// We allow forward declarations before ANY multiversioning attributes, but
// nothing after the fact.
if (PreviousDeclsHaveMultiVersionAttribute(FD) &&
- (!CurTA || CurTA->isInherited())) {
+ ((NewTA && (!CurTA || CurTA->isInherited())) ||
+ (NewTVA && (!CurTVA || CurTVA->isInherited())))) {
S.Diag(FD->getLocation(), diag::err_multiversion_required_in_redecl)
- << 0;
+ << (NewTA ? 0 : 2);
S.Diag(NewFD->getLocation(), diag::note_multiversioning_caused_here);
NewFD->setInvalidDecl();
return true;
@@ -10550,27 +11482,35 @@ static bool CheckTargetCausesMultiVersioning(
OldFD->setIsMultiVersion();
NewFD->setIsMultiVersion();
Redeclaration = false;
- MergeTypeWithPrevious = false;
OldDecl = nullptr;
Previous.clear();
return false;
}
+static bool MultiVersionTypesCompatible(MultiVersionKind Old,
+ MultiVersionKind New) {
+ if (Old == New || Old == MultiVersionKind::None ||
+ New == MultiVersionKind::None)
+ return true;
+
+ return (Old == MultiVersionKind::CPUDispatch &&
+ New == MultiVersionKind::CPUSpecific) ||
+ (Old == MultiVersionKind::CPUSpecific &&
+ New == MultiVersionKind::CPUDispatch);
+}
+
/// Check the validity of a new function declaration being added to an existing
/// multiversioned declaration collection.
static bool CheckMultiVersionAdditionalDecl(
Sema &S, FunctionDecl *OldFD, FunctionDecl *NewFD,
- MultiVersionKind NewMVType, const TargetAttr *NewTA,
- const CPUDispatchAttr *NewCPUDisp, const CPUSpecificAttr *NewCPUSpec,
- bool &Redeclaration, NamedDecl *&OldDecl, bool &MergeTypeWithPrevious,
- LookupResult &Previous) {
-
- MultiVersionKind OldMVType = OldFD->getMultiVersionKind();
+ MultiVersionKind NewMVKind, const CPUDispatchAttr *NewCPUDisp,
+ const CPUSpecificAttr *NewCPUSpec, const TargetClonesAttr *NewClones,
+ bool &Redeclaration, NamedDecl *&OldDecl, LookupResult &Previous) {
+ const auto *NewTA = NewFD->getAttr<TargetAttr>();
+ const auto *NewTVA = NewFD->getAttr<TargetVersionAttr>();
+ MultiVersionKind OldMVKind = OldFD->getMultiVersionKind();
// Disallow mixing of multiversioning types.
- if ((OldMVType == MultiVersionKind::Target &&
- NewMVType != MultiVersionKind::Target) ||
- (NewMVType == MultiVersionKind::Target &&
- OldMVType != MultiVersionKind::Target)) {
+ if (!MultiVersionTypesCompatible(OldMVKind, NewMVKind)) {
S.Diag(NewFD->getLocation(), diag::err_multiversion_types_mixed);
S.Diag(OldFD->getLocation(), diag::note_previous_declaration);
NewFD->setInvalidDecl();
@@ -10579,23 +11519,51 @@ static bool CheckMultiVersionAdditionalDecl(
ParsedTargetAttr NewParsed;
if (NewTA) {
- NewParsed = NewTA->parse();
+ NewParsed = S.getASTContext().getTargetInfo().parseTargetAttr(
+ NewTA->getFeaturesStr());
llvm::sort(NewParsed.Features);
}
+ llvm::SmallVector<StringRef, 8> NewFeats;
+ if (NewTVA) {
+ NewTVA->getFeatures(NewFeats);
+ llvm::sort(NewFeats);
+ }
bool UseMemberUsingDeclRules =
S.CurContext->isRecord() && !NewFD->getFriendObjectKind();
- // Next, check ALL non-overloads to see if this is a redeclaration of a
- // previous member of the MultiVersion set.
+ bool MayNeedOverloadableChecks =
+ AllowOverloadingOfFunction(Previous, S.Context, NewFD);
+
+ // Next, check ALL non-invalid non-overloads to see if this is a redeclaration
+ // of a previous member of the MultiVersion set.
for (NamedDecl *ND : Previous) {
FunctionDecl *CurFD = ND->getAsFunction();
- if (!CurFD)
+ if (!CurFD || CurFD->isInvalidDecl())
continue;
- if (S.IsOverload(NewFD, CurFD, UseMemberUsingDeclRules))
+ if (MayNeedOverloadableChecks &&
+ S.IsOverload(NewFD, CurFD, UseMemberUsingDeclRules))
continue;
- if (NewMVType == MultiVersionKind::Target) {
+ if (NewMVKind == MultiVersionKind::None &&
+ OldMVKind == MultiVersionKind::TargetVersion) {
+ NewFD->addAttr(TargetVersionAttr::CreateImplicit(
+ S.Context, "default", NewFD->getSourceRange()));
+ NewFD->setIsMultiVersion();
+ NewMVKind = MultiVersionKind::TargetVersion;
+ if (!NewTVA) {
+ NewTVA = NewFD->getAttr<TargetVersionAttr>();
+ NewTVA->getFeatures(NewFeats);
+ llvm::sort(NewFeats);
+ }
+ }
+
+ switch (NewMVKind) {
+ case MultiVersionKind::None:
+ assert(OldMVKind == MultiVersionKind::TargetClones &&
+ "Only target_clones can be omitted in subsequent declarations");
+ break;
+ case MultiVersionKind::Target: {
const auto *CurTA = CurFD->getAttr<TargetAttr>();
if (CurTA->getFeaturesStr() == NewTA->getFeaturesStr()) {
NewFD->setIsMultiVersion();
@@ -10604,20 +11572,66 @@ static bool CheckMultiVersionAdditionalDecl(
return false;
}
- ParsedTargetAttr CurParsed = CurTA->parse(std::less<std::string>());
+ ParsedTargetAttr CurParsed =
+ S.getASTContext().getTargetInfo().parseTargetAttr(
+ CurTA->getFeaturesStr());
+ llvm::sort(CurParsed.Features);
if (CurParsed == NewParsed) {
S.Diag(NewFD->getLocation(), diag::err_multiversion_duplicate);
S.Diag(CurFD->getLocation(), diag::note_previous_declaration);
NewFD->setInvalidDecl();
return true;
}
- } else {
+ break;
+ }
+ case MultiVersionKind::TargetVersion: {
+ const auto *CurTVA = CurFD->getAttr<TargetVersionAttr>();
+ if (CurTVA->getName() == NewTVA->getName()) {
+ NewFD->setIsMultiVersion();
+ Redeclaration = true;
+ OldDecl = ND;
+ return false;
+ }
+ llvm::SmallVector<StringRef, 8> CurFeats;
+ if (CurTVA) {
+ CurTVA->getFeatures(CurFeats);
+ llvm::sort(CurFeats);
+ }
+ if (CurFeats == NewFeats) {
+ S.Diag(NewFD->getLocation(), diag::err_multiversion_duplicate);
+ S.Diag(CurFD->getLocation(), diag::note_previous_declaration);
+ NewFD->setInvalidDecl();
+ return true;
+ }
+ break;
+ }
+ case MultiVersionKind::TargetClones: {
+ const auto *CurClones = CurFD->getAttr<TargetClonesAttr>();
+ Redeclaration = true;
+ OldDecl = CurFD;
+ NewFD->setIsMultiVersion();
+
+ if (CurClones && NewClones &&
+ (CurClones->featuresStrs_size() != NewClones->featuresStrs_size() ||
+ !std::equal(CurClones->featuresStrs_begin(),
+ CurClones->featuresStrs_end(),
+ NewClones->featuresStrs_begin()))) {
+ S.Diag(NewFD->getLocation(), diag::err_target_clone_doesnt_match);
+ S.Diag(CurFD->getLocation(), diag::note_previous_declaration);
+ NewFD->setInvalidDecl();
+ return true;
+ }
+
+ return false;
+ }
+ case MultiVersionKind::CPUSpecific:
+ case MultiVersionKind::CPUDispatch: {
const auto *CurCPUSpec = CurFD->getAttr<CPUSpecificAttr>();
const auto *CurCPUDisp = CurFD->getAttr<CPUDispatchAttr>();
// Handle CPUDispatch/CPUSpecific versions.
// Only 1 CPUDispatch function is allowed, this will make it go through
// the redeclaration errors.
- if (NewMVType == MultiVersionKind::CPUDispatch &&
+ if (NewMVKind == MultiVersionKind::CPUDispatch &&
CurFD->hasAttr<CPUDispatchAttr>()) {
if (CurCPUDisp->cpus_size() == NewCPUDisp->cpus_size() &&
std::equal(
@@ -10638,8 +11652,7 @@ static bool CheckMultiVersionAdditionalDecl(
NewFD->setInvalidDecl();
return true;
}
- if (NewMVType == MultiVersionKind::CPUSpecific && CurCPUSpec) {
-
+ if (NewMVKind == MultiVersionKind::CPUSpecific && CurCPUSpec) {
if (CurCPUSpec->cpus_size() == NewCPUSpec->cpus_size() &&
std::equal(
CurCPUSpec->cpus_begin(), CurCPUSpec->cpus_end(),
@@ -10666,22 +11679,23 @@ static bool CheckMultiVersionAdditionalDecl(
}
}
}
- // If the two decls aren't the same MVType, there is no possible error
- // condition.
+ break;
+ }
}
}
// Else, this is simply a non-redecl case. Checking the 'value' is only
// necessary in the Target case, since The CPUSpecific/Dispatch cases are
// handled in the attribute adding step.
- if (NewMVType == MultiVersionKind::Target &&
+ if ((NewMVKind == MultiVersionKind::TargetVersion ||
+ NewMVKind == MultiVersionKind::Target) &&
CheckMultiVersionValue(S, NewFD)) {
NewFD->setInvalidDecl();
return true;
}
if (CheckMultiVersionAdditionalRules(S, OldFD, NewFD,
- !OldFD->isMultiVersion(), NewMVType)) {
+ !OldFD->isMultiVersion(), NewMVKind)) {
NewFD->setInvalidDecl();
return true;
}
@@ -10697,13 +11711,11 @@ static bool CheckMultiVersionAdditionalDecl(
NewFD->setIsMultiVersion();
Redeclaration = false;
- MergeTypeWithPrevious = false;
OldDecl = nullptr;
Previous.clear();
return false;
}
-
/// Check the validity of a mulitversion function declaration.
/// Also sets the multiversion'ness' of the function itself.
///
@@ -10712,28 +11724,22 @@ static bool CheckMultiVersionAdditionalDecl(
/// Returns true if there was an error, false otherwise.
static bool CheckMultiVersionFunction(Sema &S, FunctionDecl *NewFD,
bool &Redeclaration, NamedDecl *&OldDecl,
- bool &MergeTypeWithPrevious,
LookupResult &Previous) {
const auto *NewTA = NewFD->getAttr<TargetAttr>();
+ const auto *NewTVA = NewFD->getAttr<TargetVersionAttr>();
const auto *NewCPUDisp = NewFD->getAttr<CPUDispatchAttr>();
const auto *NewCPUSpec = NewFD->getAttr<CPUSpecificAttr>();
-
- // Mixing Multiversioning types is prohibited.
- if ((NewTA && NewCPUDisp) || (NewTA && NewCPUSpec) ||
- (NewCPUDisp && NewCPUSpec)) {
- S.Diag(NewFD->getLocation(), diag::err_multiversion_types_mixed);
- NewFD->setInvalidDecl();
- return true;
- }
-
- MultiVersionKind MVType = NewFD->getMultiVersionKind();
+ const auto *NewClones = NewFD->getAttr<TargetClonesAttr>();
+ MultiVersionKind MVKind = NewFD->getMultiVersionKind();
// Main isn't allowed to become a multiversion function, however it IS
- // permitted to have 'main' be marked with the 'target' optimization hint.
+ // permitted to have 'main' be marked with the 'target' optimization hint,
+ // for 'target_version' only default is allowed.
if (NewFD->isMain()) {
- if ((MVType == MultiVersionKind::Target && NewTA->isDefaultVersion()) ||
- MVType == MultiVersionKind::CPUDispatch ||
- MVType == MultiVersionKind::CPUSpecific) {
+ if (MVKind != MultiVersionKind::None &&
+ !(MVKind == MultiVersionKind::Target && !NewTA->isDefaultVersion()) &&
+ !(MVKind == MultiVersionKind::TargetVersion &&
+ NewTVA->isDefaultVersion())) {
S.Diag(NewFD->getLocation(), diag::err_multiversion_not_allowed_on_main);
NewFD->setInvalidDecl();
return true;
@@ -10741,40 +11747,105 @@ static bool CheckMultiVersionFunction(Sema &S, FunctionDecl *NewFD,
return false;
}
+ // Target attribute on AArch64 is not used for multiversioning
+ if (NewTA && S.getASTContext().getTargetInfo().getTriple().isAArch64())
+ return false;
+
if (!OldDecl || !OldDecl->getAsFunction() ||
OldDecl->getDeclContext()->getRedeclContext() !=
NewFD->getDeclContext()->getRedeclContext()) {
// If there's no previous declaration, AND this isn't attempting to cause
// multiversioning, this isn't an error condition.
- if (MVType == MultiVersionKind::None)
+ if (MVKind == MultiVersionKind::None)
return false;
- return CheckMultiVersionFirstFunction(S, NewFD, MVType, NewTA);
+ return CheckMultiVersionFirstFunction(S, NewFD);
}
FunctionDecl *OldFD = OldDecl->getAsFunction();
- if (!OldFD->isMultiVersion() && MVType == MultiVersionKind::None)
- return false;
+ if (!OldFD->isMultiVersion() && MVKind == MultiVersionKind::None) {
+ if (NewTVA || !OldFD->getAttr<TargetVersionAttr>())
+ return false;
+ if (!NewFD->getType()->getAs<FunctionProtoType>()) {
+ // Multiversion declaration doesn't have prototype.
+ S.Diag(NewFD->getLocation(), diag::err_multiversion_noproto);
+ NewFD->setInvalidDecl();
+ } else {
+ // No "target_version" attribute is equivalent to "default" attribute.
+ NewFD->addAttr(TargetVersionAttr::CreateImplicit(
+ S.Context, "default", NewFD->getSourceRange()));
+ NewFD->setIsMultiVersion();
+ OldFD->setIsMultiVersion();
+ OldDecl = OldFD;
+ Redeclaration = true;
+ }
+ return true;
+ }
- if (OldFD->isMultiVersion() && MVType == MultiVersionKind::None) {
+ // Multiversioned redeclarations aren't allowed to omit the attribute, except
+ // for target_clones and target_version.
+ if (OldFD->isMultiVersion() && MVKind == MultiVersionKind::None &&
+ OldFD->getMultiVersionKind() != MultiVersionKind::TargetClones &&
+ OldFD->getMultiVersionKind() != MultiVersionKind::TargetVersion) {
S.Diag(NewFD->getLocation(), diag::err_multiversion_required_in_redecl)
<< (OldFD->getMultiVersionKind() != MultiVersionKind::Target);
NewFD->setInvalidDecl();
return true;
}
- // Handle the target potentially causes multiversioning case.
- if (!OldFD->isMultiVersion() && MVType == MultiVersionKind::Target)
- return CheckTargetCausesMultiVersioning(S, OldFD, NewFD, NewTA,
- Redeclaration, OldDecl,
- MergeTypeWithPrevious, Previous);
+ if (!OldFD->isMultiVersion()) {
+ switch (MVKind) {
+ case MultiVersionKind::Target:
+ case MultiVersionKind::TargetVersion:
+ return CheckTargetCausesMultiVersioning(S, OldFD, NewFD, Redeclaration,
+ OldDecl, Previous);
+ case MultiVersionKind::TargetClones:
+ if (OldFD->isUsed(false)) {
+ NewFD->setInvalidDecl();
+ return S.Diag(NewFD->getLocation(), diag::err_multiversion_after_used);
+ }
+ OldFD->setIsMultiVersion();
+ break;
+
+ case MultiVersionKind::CPUDispatch:
+ case MultiVersionKind::CPUSpecific:
+ case MultiVersionKind::None:
+ break;
+ }
+ }
// At this point, we have a multiversion function decl (in OldFD) AND an
// appropriate attribute in the current function decl. Resolve that these are
// still compatible with previous declarations.
- return CheckMultiVersionAdditionalDecl(
- S, OldFD, NewFD, MVType, NewTA, NewCPUDisp, NewCPUSpec, Redeclaration,
- OldDecl, MergeTypeWithPrevious, Previous);
+ return CheckMultiVersionAdditionalDecl(S, OldFD, NewFD, MVKind, NewCPUDisp,
+ NewCPUSpec, NewClones, Redeclaration,
+ OldDecl, Previous);
+}
+
+static void CheckConstPureAttributesUsage(Sema &S, FunctionDecl *NewFD) {
+ bool IsPure = NewFD->hasAttr<PureAttr>();
+ bool IsConst = NewFD->hasAttr<ConstAttr>();
+
+ // If there are no pure or const attributes, there's nothing to check.
+ if (!IsPure && !IsConst)
+ return;
+
+ // If the function is marked both pure and const, we retain the const
+ // attribute because it makes stronger guarantees than the pure attribute, and
+ // we drop the pure attribute explicitly to prevent later confusion about
+ // semantics.
+ if (IsPure && IsConst) {
+ S.Diag(NewFD->getLocation(), diag::warn_const_attr_with_pure_attr);
+ NewFD->dropAttrs<PureAttr>();
+ }
+
+ // Constructors and destructors are functions which return void, so are
+ // handled here as well.
+ if (NewFD->getReturnType()->isVoidType()) {
+ S.Diag(NewFD->getLocation(), diag::warn_pure_function_returns_void)
+ << IsConst;
+ NewFD->dropAttrs<PureAttr, ConstAttr>();
+ }
}
/// Perform semantic checking of a new function declaration.
@@ -10796,7 +11867,8 @@ static bool CheckMultiVersionFunction(Sema &S, FunctionDecl *NewFD,
/// \returns true if the function declaration is a redeclaration.
bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
LookupResult &Previous,
- bool IsMemberSpecialization) {
+ bool IsMemberSpecialization,
+ bool DeclIsDefn) {
assert(!NewFD->getReturnType()->isVariablyModifiedType() &&
"Variably modified return types are not handled here");
@@ -10864,8 +11936,7 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
}
}
- if (CheckMultiVersionFunction(*this, NewFD, Redeclaration, OldDecl,
- MergeTypeWithPrevious, Previous))
+ if (CheckMultiVersionFunction(*this, NewFD, Redeclaration, OldDecl, Previous))
return Redeclaration;
// PPC MMA non-pointer types are not allowed as function return types.
@@ -10874,6 +11945,8 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
NewFD->setInvalidDecl();
}
+ CheckConstPureAttributesUsage(*this, NewFD);
+
// C++11 [dcl.constexpr]p8:
// A constexpr specifier for a non-static member function that is not
// a constructor declares that member function to be const.
@@ -10915,7 +11988,8 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
if (Redeclaration) {
// NewFD and OldDecl represent declarations that need to be
// merged.
- if (MergeFunctionDecl(NewFD, OldDecl, S, MergeTypeWithPrevious)) {
+ if (MergeFunctionDecl(NewFD, OldDecl, S, MergeTypeWithPrevious,
+ DeclIsDefn)) {
NewFD->setInvalidDecl();
return Redeclaration;
}
@@ -11001,16 +12075,20 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(NewFD)) {
CheckConstructor(Constructor);
} else if (CXXDestructorDecl *Destructor =
- dyn_cast<CXXDestructorDecl>(NewFD)) {
- CXXRecordDecl *Record = Destructor->getParent();
- QualType ClassType = Context.getTypeDeclType(Record);
-
- // FIXME: Shouldn't we be able to perform this check even when the class
- // type is dependent? Both gcc and edg can handle that.
- if (!ClassType->isDependentType()) {
- DeclarationName Name
- = Context.DeclarationNames.getCXXDestructorName(
- Context.getCanonicalType(ClassType));
+ dyn_cast<CXXDestructorDecl>(NewFD)) {
+ // We check here for invalid destructor names.
+ // If we have a friend destructor declaration that is dependent, we can't
+ // diagnose right away because cases like this are still valid:
+ // template <class T> struct A { friend T::X::~Y(); };
+ // struct B { struct Y { ~Y(); }; using X = Y; };
+ // template struct A<B>;
+ if (NewFD->getFriendObjectKind() == Decl::FriendObjectKind::FOK_None ||
+ !Destructor->getFunctionObjectParameterType()->isDependentType()) {
+ CXXRecordDecl *Record = Destructor->getParent();
+ QualType ClassType = Context.getTypeDeclType(Record);
+
+ DeclarationName Name = Context.DeclarationNames.getCXXDestructorName(
+ Context.getCanonicalType(ClassType));
if (NewFD->getDeclName() != Name) {
Diag(NewFD->getLocation(), diag::err_destructor_name);
NewFD->setInvalidDecl();
@@ -11045,6 +12123,55 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
checkThisInStaticMemberFunctionType(Method);
}
+ if (Expr *TRC = NewFD->getTrailingRequiresClause()) {
+ // C++20: dcl.decl.general p4:
+ // The optional requires-clause ([temp.pre]) in an init-declarator or
+ // member-declarator shall be present only if the declarator declares a
+ // templated function ([dcl.fct]).
+ //
+ // [temp.pre]/8:
+ // An entity is templated if it is
+ // - a template,
+ // - an entity defined ([basic.def]) or created ([class.temporary]) in a
+ // templated entity,
+ // - a member of a templated entity,
+ // - an enumerator for an enumeration that is a templated entity, or
+ // - the closure type of a lambda-expression ([expr.prim.lambda.closure])
+ // appearing in the declaration of a templated entity. [Note 6: A local
+ // class, a local or block variable, or a friend function defined in a
+ // templated entity is a templated entity. — end note]
+ //
+ // A templated function is a function template or a function that is
+ // templated. A templated class is a class template or a class that is
+ // templated. A templated variable is a variable template or a variable
+ // that is templated.
+
+ bool IsTemplate = NewFD->getDescribedFunctionTemplate();
+ bool IsFriend = NewFD->getFriendObjectKind();
+ if (!IsTemplate && // -a template
+ // defined... in a templated entity
+ !(DeclIsDefn && NewFD->isTemplated()) &&
+ // a member of a templated entity
+ !(isa<CXXMethodDecl>(NewFD) && NewFD->isTemplated()) &&
+ // Don't complain about instantiations, they've already had these
+ // rules + others enforced.
+ !NewFD->isTemplateInstantiation() &&
+ // If the function violates [temp.friend]p9 because it is missing
+ // a definition, and adding a definition would make it templated,
+ // then let that error take precedence.
+ !(!DeclIsDefn && IsFriend && NewFD->isTemplated())) {
+ Diag(TRC->getBeginLoc(), diag::err_constrained_non_templated_function);
+ } else if (!DeclIsDefn && !IsTemplate && IsFriend &&
+ !NewFD->isTemplateInstantiation()) {
+ // C++ [temp.friend]p9:
+ // A non-template friend declaration with a requires-clause shall be a
+ // definition.
+ Diag(NewFD->getBeginLoc(),
+ diag::err_non_temp_friend_decl_with_requires_clause_must_be_def);
+ NewFD->setInvalidDecl();
+ }
+ }
+
if (CXXConversionDecl *Conversion = dyn_cast<CXXConversionDecl>(NewFD))
ActOnConversionDeclarator(Conversion);
@@ -11121,6 +12248,46 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
if (!Redeclaration && LangOpts.CUDA)
checkCUDATargetOverload(NewFD, Previous);
}
+
+ // Check if the function definition uses any AArch64 SME features without
+ // having the '+sme' feature enabled.
+ if (DeclIsDefn) {
+ const auto *Attr = NewFD->getAttr<ArmNewAttr>();
+ bool UsesSM = NewFD->hasAttr<ArmLocallyStreamingAttr>();
+ bool UsesZA = Attr && Attr->isNewZA();
+ bool UsesZT0 = Attr && Attr->isNewZT0();
+ if (const auto *FPT = NewFD->getType()->getAs<FunctionProtoType>()) {
+ FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
+ UsesSM |=
+ EPI.AArch64SMEAttributes & FunctionType::SME_PStateSMEnabledMask;
+ UsesZA |= FunctionType::getArmZAState(EPI.AArch64SMEAttributes) !=
+ FunctionType::ARM_None;
+ UsesZT0 |= FunctionType::getArmZT0State(EPI.AArch64SMEAttributes) !=
+ FunctionType::ARM_None;
+ }
+
+ if (UsesSM || UsesZA) {
+ llvm::StringMap<bool> FeatureMap;
+ Context.getFunctionFeatureMap(FeatureMap, NewFD);
+ if (!FeatureMap.contains("sme")) {
+ if (UsesSM)
+ Diag(NewFD->getLocation(),
+ diag::err_sme_definition_using_sm_in_non_sme_target);
+ else
+ Diag(NewFD->getLocation(),
+ diag::err_sme_definition_using_za_in_non_sme_target);
+ }
+ }
+ if (UsesZT0) {
+ llvm::StringMap<bool> FeatureMap;
+ Context.getFunctionFeatureMap(FeatureMap, NewFD);
+ if (!FeatureMap.contains("sme2")) {
+ Diag(NewFD->getLocation(),
+ diag::err_sme_definition_using_zt0_in_non_sme2_target);
+ }
+ }
+ }
+
return Redeclaration;
}
@@ -11160,6 +12327,11 @@ void Sema::CheckMain(FunctionDecl* FD, const DeclSpec& DS) {
return;
}
+ // Functions named main in hlsl are default entries, but don't have specific
+ // signatures they are required to conform to.
+ if (getLangOpts().HLSL)
+ return;
+
QualType T = FD->getType();
assert(T->isFunctionType() && "function decl is not of function type");
const FunctionType* FT = T->castAs<FunctionType>();
@@ -11333,6 +12505,125 @@ void Sema::CheckMSVCRTEntryPoint(FunctionDecl *FD) {
}
}
+void Sema::ActOnHLSLTopLevelFunction(FunctionDecl *FD) {
+ auto &TargetInfo = getASTContext().getTargetInfo();
+
+ if (FD->getName() != TargetInfo.getTargetOpts().HLSLEntry)
+ return;
+
+ StringRef Env = TargetInfo.getTriple().getEnvironmentName();
+ HLSLShaderAttr::ShaderType ShaderType;
+ if (HLSLShaderAttr::ConvertStrToShaderType(Env, ShaderType)) {
+ if (const auto *Shader = FD->getAttr<HLSLShaderAttr>()) {
+ // The entry point is already annotated - check that it matches the
+ // triple.
+ if (Shader->getType() != ShaderType) {
+ Diag(Shader->getLocation(), diag::err_hlsl_entry_shader_attr_mismatch)
+ << Shader;
+ FD->setInvalidDecl();
+ }
+ } else {
+ // Implicitly add the shader attribute if the entry function isn't
+ // explicitly annotated.
+ FD->addAttr(HLSLShaderAttr::CreateImplicit(Context, ShaderType,
+ FD->getBeginLoc()));
+ }
+ } else {
+ switch (TargetInfo.getTriple().getEnvironment()) {
+ case llvm::Triple::UnknownEnvironment:
+ case llvm::Triple::Library:
+ break;
+ default:
+ llvm_unreachable("Unhandled environment in triple");
+ }
+ }
+}
+
+void Sema::CheckHLSLEntryPoint(FunctionDecl *FD) {
+ const auto *ShaderAttr = FD->getAttr<HLSLShaderAttr>();
+ assert(ShaderAttr && "Entry point has no shader attribute");
+ HLSLShaderAttr::ShaderType ST = ShaderAttr->getType();
+
+ switch (ST) {
+ case HLSLShaderAttr::Pixel:
+ case HLSLShaderAttr::Vertex:
+ case HLSLShaderAttr::Geometry:
+ case HLSLShaderAttr::Hull:
+ case HLSLShaderAttr::Domain:
+ case HLSLShaderAttr::RayGeneration:
+ case HLSLShaderAttr::Intersection:
+ case HLSLShaderAttr::AnyHit:
+ case HLSLShaderAttr::ClosestHit:
+ case HLSLShaderAttr::Miss:
+ case HLSLShaderAttr::Callable:
+ if (const auto *NT = FD->getAttr<HLSLNumThreadsAttr>()) {
+ DiagnoseHLSLAttrStageMismatch(NT, ST,
+ {HLSLShaderAttr::Compute,
+ HLSLShaderAttr::Amplification,
+ HLSLShaderAttr::Mesh});
+ FD->setInvalidDecl();
+ }
+ break;
+
+ case HLSLShaderAttr::Compute:
+ case HLSLShaderAttr::Amplification:
+ case HLSLShaderAttr::Mesh:
+ if (!FD->hasAttr<HLSLNumThreadsAttr>()) {
+ Diag(FD->getLocation(), diag::err_hlsl_missing_numthreads)
+ << HLSLShaderAttr::ConvertShaderTypeToStr(ST);
+ FD->setInvalidDecl();
+ }
+ break;
+ }
+
+ for (ParmVarDecl *Param : FD->parameters()) {
+ if (const auto *AnnotationAttr = Param->getAttr<HLSLAnnotationAttr>()) {
+ CheckHLSLSemanticAnnotation(FD, Param, AnnotationAttr);
+ } else {
+ // FIXME: Handle struct parameters where annotations are on struct fields.
+ // See: https://github.com/llvm/llvm-project/issues/57875
+ Diag(FD->getLocation(), diag::err_hlsl_missing_semantic_annotation);
+ Diag(Param->getLocation(), diag::note_previous_decl) << Param;
+ FD->setInvalidDecl();
+ }
+ }
+ // FIXME: Verify return type semantic annotation.
+}
+
+void Sema::CheckHLSLSemanticAnnotation(
+ FunctionDecl *EntryPoint, const Decl *Param,
+ const HLSLAnnotationAttr *AnnotationAttr) {
+ auto *ShaderAttr = EntryPoint->getAttr<HLSLShaderAttr>();
+ assert(ShaderAttr && "Entry point has no shader attribute");
+ HLSLShaderAttr::ShaderType ST = ShaderAttr->getType();
+
+ switch (AnnotationAttr->getKind()) {
+ case attr::HLSLSV_DispatchThreadID:
+ case attr::HLSLSV_GroupIndex:
+ if (ST == HLSLShaderAttr::Compute)
+ return;
+ DiagnoseHLSLAttrStageMismatch(AnnotationAttr, ST,
+ {HLSLShaderAttr::Compute});
+ break;
+ default:
+ llvm_unreachable("Unknown HLSLAnnotationAttr");
+ }
+}
+
+void Sema::DiagnoseHLSLAttrStageMismatch(
+ const Attr *A, HLSLShaderAttr::ShaderType Stage,
+ std::initializer_list<HLSLShaderAttr::ShaderType> AllowedStages) {
+ SmallVector<StringRef, 8> StageStrings;
+ llvm::transform(AllowedStages, std::back_inserter(StageStrings),
+ [](HLSLShaderAttr::ShaderType ST) {
+ return StringRef(
+ HLSLShaderAttr::ConvertShaderTypeToStr(ST));
+ });
+ Diag(A->getLoc(), diag::err_hlsl_attr_unsupported_in_stage)
+ << A << HLSLShaderAttr::ConvertShaderTypeToStr(Stage)
+ << (AllowedStages.size() != 1) << join(StageStrings, ", ");
+}
+
bool Sema::CheckForConstantInitializer(Expr *Init, QualType DclT) {
// FIXME: Need strict checking. In C89, we need to check for
// any assignment, increment, decrement, function-calls, or
@@ -11399,7 +12690,7 @@ namespace {
// Track and increment the index here.
isInitList = true;
InitFieldIndex.push_back(0);
- for (auto Child : InitList->children()) {
+ for (auto *Child : InitList->children()) {
CheckExpr(cast<Expr>(Child));
++InitFieldIndex.back();
}
@@ -11482,7 +12773,8 @@ namespace {
}
if (OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(E)) {
- HandleValue(OVE->getSourceExpr());
+ if (Expr *SE = OVE->getSourceExpr())
+ HandleValue(SE);
return;
}
@@ -11711,6 +13003,15 @@ QualType Sema::deduceVarTypeFromInitializer(VarDecl *VDecl,
DeducedType *Deduced = Type->getContainedDeducedType();
assert(Deduced && "deduceVarTypeFromInitializer for non-deduced type");
+ // Diagnose auto array declarations in C23, unless it's a supported extension.
+ if (getLangOpts().C23 && Type->isArrayType() &&
+ !isa_and_present<StringLiteral, InitListExpr>(Init)) {
+ Diag(Range.getBegin(), diag::err_auto_not_allowed)
+ << (int)Deduced->getContainedAutoType()->getKeyword()
+ << /*in array decl*/ 23 << Range;
+ return QualType();
+ }
+
// C++11 [dcl.spec.auto]p3
if (!Init) {
assert(VDecl && "no init for init capture deduction?");
@@ -11730,10 +13031,9 @@ QualType Sema::deduceVarTypeFromInitializer(VarDecl *VDecl,
if (Init)
DeduceInits = Init;
- if (DirectInit) {
- if (auto *PL = dyn_cast_or_null<ParenListExpr>(Init))
- DeduceInits = PL->exprs();
- }
+ auto *PL = dyn_cast_if_present<ParenListExpr>(Init);
+ if (DirectInit && PL)
+ DeduceInits = PL->exprs();
if (isa<DeducedTemplateSpecializationType>(Deduced)) {
assert(VDecl && "non-auto type for init capture deduction?");
@@ -11801,7 +13101,10 @@ QualType Sema::deduceVarTypeFromInitializer(VarDecl *VDecl,
Type.getQualifiers());
QualType DeducedType;
- if (DeduceAutoType(TSI, DeduceInit, DeducedType) == DAR_Failed) {
+ TemplateDeductionInfo Info(DeduceInit->getExprLoc());
+ TemplateDeductionResult Result =
+ DeduceAutoType(TSI->getTypeLoc(), DeduceInit, DeducedType, Info);
+ if (Result != TDK_Success && Result != TDK_AlreadyDiagnosed) {
if (!IsInitCapture)
DiagnoseAutoDeductionFailure(VDecl, DeduceInit);
else if (isa<InitListExpr>(Init))
@@ -11880,7 +13183,7 @@ void Sema::checkNonTrivialCUnionInInitializer(const Expr *Init,
InitType.hasNonTrivialToPrimitiveCopyCUnion()) &&
"shouldn't be called if type doesn't have a non-trivial C struct");
if (auto *ILE = dyn_cast<InitListExpr>(Init)) {
- for (auto I : ILE->inits()) {
+ for (auto *I : ILE->inits()) {
if (!I->getType().hasNonTrivialToPrimitiveDefaultInitializeCUnion() &&
!I->getType().hasNonTrivialToPrimitiveCopyCUnion())
continue;
@@ -12167,6 +13470,14 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
return;
}
+ // WebAssembly tables can't be used to initialise a variable.
+ if (Init && !Init->getType().isNull() &&
+ Init->getType()->isWebAssemblyTableType()) {
+ Diag(Init->getExprLoc(), diag::err_wasm_table_art) << 0;
+ VDecl->setInvalidDecl();
+ return;
+ }
+
// C++11 [decl.spec.auto]p6. Deduce the type which 'auto' stands in for.
if (VDecl->getType()->isUndeducedType()) {
// Attempt typo correction early so that the type of the init expression can
@@ -12198,8 +13509,11 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
return;
}
+ // C99 6.7.8p5. If the declaration of an identifier has block scope, and
+ // the identifier has external or internal linkage, the declaration shall
+ // have no initializer for the identifier.
+ // C++14 [dcl.init]p5 is the same restriction for C++.
if (VDecl->isLocalVarDecl() && VDecl->hasExternalStorage()) {
- // C99 6.7.8p5. C++ has no such restriction, but that is a defect.
Diag(VDecl->getLocation(), diag::err_block_extern_cant_init);
VDecl->setInvalidDecl();
return;
@@ -12225,6 +13539,16 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
VDecl->setInvalidDecl();
}
+ // C++ [module.import/6] external definitions are not permitted in header
+ // units.
+ if (getLangOpts().CPlusPlusModules && currentModuleIsHeaderUnit() &&
+ !VDecl->isInvalidDecl() && VDecl->isThisDeclarationADefinition() &&
+ VDecl->getFormalLinkage() == Linkage::External && !VDecl->isInline() &&
+ !VDecl->isTemplated() && !isa<VarTemplateSpecializationDecl>(VDecl)) {
+ Diag(VDecl->getLocation(), diag::err_extern_def_in_header_unit);
+ VDecl->setInvalidDecl();
+ }
+
// If adding the initializer will turn this declaration into a definition,
// and we already have a definition for this variable, diagnose or otherwise
// handle the situation.
@@ -12299,6 +13623,7 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
// Perform the initialization.
ParenListExpr *CXXDirectInit = dyn_cast<ParenListExpr>(Init);
+ bool IsParenListInit = false;
if (!VDecl->isInvalidDecl()) {
InitializedEntity Entity = InitializedEntity::InitializeVariable(VDecl);
InitializationKind Kind = InitializationKind::CreateForInit(
@@ -12331,16 +13656,40 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
/*TreatUnavailableAsInvalid=*/false);
ExprResult Result = InitSeq.Perform(*this, Entity, Kind, Args, &DclT);
if (Result.isInvalid()) {
- // If the provied initializer fails to initialize the var decl,
+ // If the provided initializer fails to initialize the var decl,
// we attach a recovery expr for better recovery.
auto RecoveryExpr =
CreateRecoveryExpr(Init->getBeginLoc(), Init->getEndLoc(), Args);
if (RecoveryExpr.get())
VDecl->setInit(RecoveryExpr.get());
+ // In general, for error recovery purposes, the initalizer doesn't play
+ // part in the valid bit of the declaration. There are a few exceptions:
+ // 1) if the var decl has a deduced auto type, and the type cannot be
+ // deduced by an invalid initializer;
+ // 2) if the var decl is decompsition decl with a non-deduced type, and
+ // the initialization fails (e.g. `int [a] = {1, 2};`);
+ // Case 1) was already handled elsewhere.
+ if (isa<DecompositionDecl>(VDecl)) // Case 2)
+ VDecl->setInvalidDecl();
return;
}
Init = Result.getAs<Expr>();
+ IsParenListInit = !InitSeq.steps().empty() &&
+ InitSeq.step_begin()->Kind ==
+ InitializationSequence::SK_ParenthesizedListInit;
+ QualType VDeclType = VDecl->getType();
+ if (Init && !Init->getType().isNull() &&
+ !Init->getType()->isDependentType() && !VDeclType->isDependentType() &&
+ Context.getAsIncompleteArrayType(VDeclType) &&
+ Context.getAsIncompleteArrayType(Init->getType())) {
+ // Bail out if it is not possible to deduce array size from the
+ // initializer.
+ Diag(VDecl->getLocation(), diag::err_typecheck_decl_incomplete_type)
+ << VDeclType;
+ VDecl->setInvalidDecl();
+ return;
+ }
}
// Check for self-references within variable initializers.
@@ -12589,20 +13938,24 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
// class type.
if (CXXDirectInit) {
assert(DirectInit && "Call-style initializer must be direct init.");
- VDecl->setInitStyle(VarDecl::CallInit);
+ VDecl->setInitStyle(IsParenListInit ? VarDecl::ParenListInit
+ : VarDecl::CallInit);
} else if (DirectInit) {
// This must be list-initialization. No other way is direct-initialization.
VDecl->setInitStyle(VarDecl::ListInit);
}
- if (LangOpts.OpenMP && VDecl->isFileVarDecl())
+ if (LangOpts.OpenMP &&
+ (LangOpts.OpenMPIsTargetDevice || !LangOpts.OMPTargetTriples.empty()) &&
+ VDecl->isFileVarDecl())
DeclsToCheckForDeferredDiags.insert(VDecl);
CheckCompleteVariableDeclaration(VDecl);
}
/// ActOnInitializerError - Given that there was an error parsing an
-/// initializer for the given declaration, try to return to some form
-/// of sanity.
+/// initializer for the given declaration, try to at least re-establish
+/// invariants such as whether a variable's type is either dependent or
+/// complete.
void Sema::ActOnInitializerError(Decl *D) {
// Our main concern here is re-establishing invariants like "a
// variable's type is either dependent or complete".
@@ -12751,7 +14104,7 @@ void Sema::ActOnUninitializedDecl(Decl *RealDecl) {
// that has an in-class initializer, so we type-check this like
// a declaration.
//
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case VarDecl::DeclarationOnly:
// It's only a declaration.
@@ -12778,7 +14131,7 @@ void Sema::ActOnUninitializedDecl(Decl *RealDecl) {
}
if (Context.getTargetInfo().allowDebugInfoForExternalRef() &&
- !Var->isInvalidDecl() && !getLangOpts().CPlusPlus)
+ !Var->isInvalidDecl())
ExternalDeclarations.push_back(Var);
return;
@@ -12821,8 +14174,12 @@ void Sema::ActOnUninitializedDecl(Decl *RealDecl) {
// Provide a specific diagnostic for uninitialized variable
// definitions with incomplete array type.
if (Type->isIncompleteArrayType()) {
- Diag(Var->getLocation(),
- diag::err_typecheck_incomplete_array_needs_initializer);
+ if (Var->isConstexpr())
+ Diag(Var->getLocation(), diag::err_constexpr_var_requires_const_init)
+ << Var;
+ else
+ Diag(Var->getLocation(),
+ diag::err_typecheck_incomplete_array_needs_initializer);
Var->setInvalidDecl();
return;
}
@@ -12832,7 +14189,6 @@ void Sema::ActOnUninitializedDecl(Decl *RealDecl) {
if (Type->isReferenceType()) {
Diag(Var->getLocation(), diag::err_reference_var_requires_init)
<< Var << SourceRange(Var->getLocation(), Var->getLocation());
- Var->setInvalidDecl();
return;
}
@@ -12907,8 +14263,8 @@ void Sema::ActOnUninitializedDecl(Decl *RealDecl) {
InitializationKind Kind
= InitializationKind::CreateDefault(Var->getLocation());
- InitializationSequence InitSeq(*this, Entity, Kind, None);
- ExprResult Init = InitSeq.Perform(*this, Entity, Kind, None);
+ InitializationSequence InitSeq(*this, Entity, Kind, std::nullopt);
+ ExprResult Init = InitSeq.Perform(*this, Entity, Kind, std::nullopt);
if (Init.get()) {
Var->setInit(MaybeCreateExprWithCleanups(Init.get()));
@@ -12981,11 +14337,9 @@ void Sema::ActOnCXXForRangeDecl(Decl *D) {
}
}
-StmtResult
-Sema::ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
- IdentifierInfo *Ident,
- ParsedAttributes &Attrs,
- SourceLocation AttrEnd) {
+StmtResult Sema::ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
+ IdentifierInfo *Ident,
+ ParsedAttributes &Attrs) {
// C++1y [stmt.iter]p1:
// A range-based for statement of the form
// for ( for-range-identifier : for-range-initializer ) statement
@@ -12998,9 +14352,9 @@ Sema::ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
DS.SetTypeSpecType(DeclSpec::TST_auto, IdentLoc, PrevSpec, DiagID,
getPrintingPolicy());
- Declarator D(DS, DeclaratorContext::ForInit);
+ Declarator D(DS, ParsedAttributesView::none(), DeclaratorContext::ForInit);
D.SetIdentifier(Ident, IdentLoc);
- D.takeAttributes(Attrs, AttrEnd);
+ D.takeAttributes(Attrs);
D.AddTypeInfo(DeclaratorChunk::getReference(0, IdentLoc, /*lvalue*/ false),
IdentLoc);
@@ -13008,7 +14362,8 @@ Sema::ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
cast<VarDecl>(Var)->setCXXForRangeDecl(true);
FinalizeDeclaration(Var);
return ActOnDeclStmt(FinalizeDeclaratorGroup(S, DS, Var), IdentLoc,
- AttrEnd.isValid() ? AttrEnd : IdentLoc);
+ Attrs.Range.getEnd().isValid() ? Attrs.Range.getEnd()
+ : IdentLoc);
}
void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
@@ -13058,6 +14413,7 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
var->getDeclContext()->getRedeclContext()->isFileContext() &&
var->isExternallyVisible() && var->hasLinkage() &&
!var->isInline() && !var->getDescribedVarTemplate() &&
+ var->getStorageClass() != SC_Register &&
!isa<VarTemplatePartialSpecializationDecl>(var) &&
!isTemplateInstantiation(var->getTemplateSpecializationKind()) &&
!getDiagnostics().isIgnored(diag::warn_missing_variable_declarations,
@@ -13075,7 +14431,7 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
}
// Cache the result of checking for constant initialization.
- Optional<bool> CacheHasConstInit;
+ std::optional<bool> CacheHasConstInit;
const Expr *CacheCulprit = nullptr;
auto checkConstInit = [&]() mutable {
if (!CacheHasConstInit)
@@ -13246,30 +14602,37 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
!inTemplateInstantiation()) {
PragmaStack<StringLiteral *> *Stack = nullptr;
int SectionFlags = ASTContext::PSF_Read;
- if (var->getType().isConstQualified()) {
- if (HasConstInit)
- Stack = &ConstSegStack;
- else {
- Stack = &BSSSegStack;
- SectionFlags |= ASTContext::PSF_Write;
- }
- } else if (var->hasInit() && HasConstInit) {
- Stack = &DataSegStack;
- SectionFlags |= ASTContext::PSF_Write;
+ bool MSVCEnv =
+ Context.getTargetInfo().getTriple().isWindowsMSVCEnvironment();
+ std::optional<QualType::NonConstantStorageReason> Reason;
+ if (HasConstInit &&
+ !(Reason = var->getType().isNonConstantStorage(Context, true, false))) {
+ Stack = &ConstSegStack;
} else {
- Stack = &BSSSegStack;
SectionFlags |= ASTContext::PSF_Write;
+ Stack = var->hasInit() && HasConstInit ? &DataSegStack : &BSSSegStack;
}
if (const SectionAttr *SA = var->getAttr<SectionAttr>()) {
if (SA->getSyntax() == AttributeCommonInfo::AS_Declspec)
SectionFlags |= ASTContext::PSF_Implicit;
UnifySection(SA->getName(), SectionFlags, var);
} else if (Stack->CurrentValue) {
+ if (Stack != &ConstSegStack && MSVCEnv &&
+ ConstSegStack.CurrentValue != ConstSegStack.DefaultValue &&
+ var->getType().isConstQualified()) {
+ assert((!Reason || Reason != QualType::NonConstantStorageReason::
+ NonConstNonReferenceType) &&
+ "This case should've already been handled elsewhere");
+ Diag(var->getLocation(), diag::warn_section_msvc_compat)
+ << var << ConstSegStack.CurrentValue << (int)(!HasConstInit
+ ? QualType::NonConstantStorageReason::NonTrivialCtor
+ : *Reason);
+ }
SectionFlags |= ASTContext::PSF_Implicit;
auto SectionName = Stack->CurrentValue->getString();
- var->addAttr(SectionAttr::CreateImplicit(
- Context, SectionName, Stack->CurrentPragmaLocation,
- AttributeCommonInfo::AS_Pragma, SectionAttr::Declspec_allocate));
+ var->addAttr(SectionAttr::CreateImplicit(Context, SectionName,
+ Stack->CurrentPragmaLocation,
+ SectionAttr::Declspec_allocate));
if (UnifySection(SectionName, SectionFlags, var))
var->dropAttr<SectionAttr>();
}
@@ -13279,8 +14642,7 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
// attribute.
if (CurInitSeg && var->getInit())
var->addAttr(InitSegAttr::CreateImplicit(Context, CurInitSeg->getString(),
- CurInitSegLoc,
- AttributeCommonInfo::AS_Pragma));
+ CurInitSegLoc));
}
// All the following checks are C++ only.
@@ -13346,6 +14708,26 @@ void Sema::CheckStaticLocalForDllExport(VarDecl *VD) {
}
}
+void Sema::CheckThreadLocalForLargeAlignment(VarDecl *VD) {
+ assert(VD->getTLSKind());
+
+ // Perform TLS alignment check here after attributes attached to the variable
+ // which may affect the alignment have been processed. Only perform the check
+ // if the target has a maximum TLS alignment (zero means no constraints).
+ if (unsigned MaxAlign = Context.getTargetInfo().getMaxTLSAlign()) {
+ // Protect the check so that it's not performed on dependent types and
+ // dependent alignments (we can't determine the alignment in that case).
+ if (!VD->hasDependentAlignment()) {
+ CharUnits MaxAlignChars = Context.toCharUnitsFromBits(MaxAlign);
+ if (Context.getDeclAlign(VD) > MaxAlignChars) {
+ Diag(VD->getLocation(), diag::err_tls_var_aligned_over_maximum)
+ << (unsigned)Context.getDeclAlign(VD).getQuantity() << VD
+ << (unsigned)MaxAlignChars.getQuantity();
+ }
+ }
+ }
+}
+
/// FinalizeDeclaration - called by ParseDeclarationAfterDeclarator to perform
/// any semantic actions necessary after any initializer has been attached.
void Sema::FinalizeDeclaration(Decl *ThisDecl) {
@@ -13362,23 +14744,19 @@ void Sema::FinalizeDeclaration(Decl *ThisDecl) {
if (PragmaClangBSSSection.Valid)
VD->addAttr(PragmaClangBSSSectionAttr::CreateImplicit(
Context, PragmaClangBSSSection.SectionName,
- PragmaClangBSSSection.PragmaLocation,
- AttributeCommonInfo::AS_Pragma));
+ PragmaClangBSSSection.PragmaLocation));
if (PragmaClangDataSection.Valid)
VD->addAttr(PragmaClangDataSectionAttr::CreateImplicit(
Context, PragmaClangDataSection.SectionName,
- PragmaClangDataSection.PragmaLocation,
- AttributeCommonInfo::AS_Pragma));
+ PragmaClangDataSection.PragmaLocation));
if (PragmaClangRodataSection.Valid)
VD->addAttr(PragmaClangRodataSectionAttr::CreateImplicit(
Context, PragmaClangRodataSection.SectionName,
- PragmaClangRodataSection.PragmaLocation,
- AttributeCommonInfo::AS_Pragma));
+ PragmaClangRodataSection.PragmaLocation));
if (PragmaClangRelroSection.Valid)
VD->addAttr(PragmaClangRelroSectionAttr::CreateImplicit(
Context, PragmaClangRelroSection.SectionName,
- PragmaClangRelroSection.PragmaLocation,
- AttributeCommonInfo::AS_Pragma));
+ PragmaClangRelroSection.PragmaLocation));
}
if (auto *DD = dyn_cast<DecompositionDecl>(ThisDecl)) {
@@ -13389,25 +14767,12 @@ void Sema::FinalizeDeclaration(Decl *ThisDecl) {
checkAttributesAfterMerging(*this, *VD);
- // Perform TLS alignment check here after attributes attached to the variable
- // which may affect the alignment have been processed. Only perform the check
- // if the target has a maximum TLS alignment (zero means no constraints).
- if (unsigned MaxAlign = Context.getTargetInfo().getMaxTLSAlign()) {
- // Protect the check so that it's not performed on dependent types and
- // dependent alignments (we can't determine the alignment in that case).
- if (VD->getTLSKind() && !VD->hasDependentAlignment()) {
- CharUnits MaxAlignChars = Context.toCharUnitsFromBits(MaxAlign);
- if (Context.getDeclAlign(VD) > MaxAlignChars) {
- Diag(VD->getLocation(), diag::err_tls_var_aligned_over_maximum)
- << (unsigned)Context.getDeclAlign(VD).getQuantity() << VD
- << (unsigned)MaxAlignChars.getQuantity();
- }
- }
- }
-
if (VD->isStaticLocal())
CheckStaticLocalForDllExport(VD);
+ if (VD->getTLSKind())
+ CheckThreadLocalForLargeAlignment(VD);
+
// Perform check for initializers of device-side global variables.
// CUDA allows empty constructors as initializers (see E.2.3.1, CUDA
// 7.5). We must also apply the same checks to all __shared__
@@ -13493,7 +14858,7 @@ void Sema::FinalizeDeclaration(Decl *ThisDecl) {
if (!MagicValueExpr) {
continue;
}
- Optional<llvm::APSInt> MagicValueInt;
+ std::optional<llvm::APSInt> MagicValueInt;
if (!(MagicValueInt = MagicValueExpr->getIntegerConstantExpr(Context))) {
Diag(I->getRange().getBegin(),
diag::err_type_tag_for_datatype_not_ice)
@@ -13535,6 +14900,12 @@ Sema::DeclGroupPtrTy Sema::FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
for (unsigned i = 0, e = Group.size(); i != e; ++i) {
if (Decl *D = Group[i]) {
+ // Check if the Decl has been declared in '#pragma omp declare target'
+ // directive and has static storage duration.
+ if (auto *VD = dyn_cast<VarDecl>(D);
+ LangOpts.OpenMP && VD && VD->hasAttr<OMPDeclareTargetDeclAttr>() &&
+ VD->hasGlobalStorage())
+ ActOnOpenMPDeclareTargetInitializer(D);
// For declarators, there are some additional syntactic-ish checks we need
// to perform.
if (auto *DD = dyn_cast<DeclaratorDecl>(D)) {
@@ -13705,9 +15076,32 @@ void Sema::CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D) {
}
}
+static void CheckExplicitObjectParameter(Sema &S, ParmVarDecl *P,
+ SourceLocation ExplicitThisLoc) {
+ if (!ExplicitThisLoc.isValid())
+ return;
+ assert(S.getLangOpts().CPlusPlus &&
+ "explicit parameter in non-cplusplus mode");
+ if (!S.getLangOpts().CPlusPlus23)
+ S.Diag(ExplicitThisLoc, diag::err_cxx20_deducing_this)
+ << P->getSourceRange();
+
+ // C++2b [dcl.fct/7] An explicit object parameter shall not be a function
+ // parameter pack.
+ if (P->isParameterPack()) {
+ S.Diag(P->getBeginLoc(), diag::err_explicit_object_parameter_pack)
+ << P->getSourceRange();
+ return;
+ }
+ P->setExplicitObjectParameterLoc(ExplicitThisLoc);
+ if (LambdaScopeInfo *LSI = S.getCurLambda())
+ LSI->ExplicitObjectParameter = P;
+}
+
/// ActOnParamDeclarator - Called from Parser::ParseFunctionDeclarator()
/// to introduce parameters into function prototype scope.
-Decl *Sema::ActOnParamDeclarator(Scope *S, Declarator &D) {
+Decl *Sema::ActOnParamDeclarator(Scope *S, Declarator &D,
+ SourceLocation ExplicitThisLoc) {
const DeclSpec &DS = D.getDeclSpec();
// Verify C99 6.7.5.3p2: The only SCS allowed is 'register'.
@@ -13747,7 +15141,7 @@ Decl *Sema::ActOnParamDeclarator(Scope *S, Declarator &D) {
CheckFunctionOrTemplateParamDeclarator(S, D);
- TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D);
QualType parmDeclType = TInfo->getType();
// Check for redeclaration of parameters, e.g. int foo(int x, int x);
@@ -13756,17 +15150,17 @@ Decl *Sema::ActOnParamDeclarator(Scope *S, Declarator &D) {
LookupResult R(*this, II, D.getIdentifierLoc(), LookupOrdinaryName,
ForVisibleRedeclaration);
LookupName(R, S);
- if (R.isSingleResult()) {
- NamedDecl *PrevDecl = R.getFoundDecl();
- if (PrevDecl->isTemplateParameter()) {
+ if (!R.empty()) {
+ NamedDecl *PrevDecl = *R.begin();
+ if (R.isSingleResult() && PrevDecl->isTemplateParameter()) {
// Maybe we will complain about the shadowed template parameter.
DiagnoseTemplateParameterShadow(D.getIdentifierLoc(), PrevDecl);
// Just pretend that we didn't see the previous declaration.
PrevDecl = nullptr;
- } else if (S->isDeclScope(PrevDecl)) {
+ }
+ if (PrevDecl && S->isDeclScope(PrevDecl)) {
Diag(D.getIdentifierLoc(), diag::err_param_redefinition) << II;
Diag(PrevDecl->getLocation(), diag::note_previous_declaration);
-
// Recover by removing the name
II = nullptr;
D.SetIdentifier(nullptr, D.getIdentifierLoc());
@@ -13785,6 +15179,8 @@ Decl *Sema::ActOnParamDeclarator(Scope *S, Declarator &D) {
if (D.isInvalidType())
New->setInvalidDecl();
+ CheckExplicitObjectParameter(*this, New, ExplicitThisLoc);
+
assert(S->isFunctionPrototypeScope());
assert(S->getFunctionPrototypeDepth() >= 1);
New->setScopeInfo(S->getFunctionPrototypeDepth() - 1,
@@ -13835,7 +15231,8 @@ void Sema::DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters) {
for (const ParmVarDecl *Parameter : Parameters) {
if (!Parameter->isReferenced() && Parameter->getDeclName() &&
- !Parameter->hasAttr<UnusedAttr>()) {
+ !Parameter->hasAttr<UnusedAttr>() &&
+ !Parameter->getIdentifier()->isPlaceholder()) {
Diag(Parameter->getLocation(), diag::warn_unused_parameter)
<< Parameter->getDeclName();
}
@@ -13868,6 +15265,37 @@ void Sema::DiagnoseSizeOfParametersAndReturnValue(
}
}
+QualType Sema::AdjustParameterTypeForObjCAutoRefCount(QualType T,
+ SourceLocation NameLoc,
+ TypeSourceInfo *TSInfo) {
+ // In ARC, infer a lifetime qualifier for appropriate parameter types.
+ if (!getLangOpts().ObjCAutoRefCount ||
+ T.getObjCLifetime() != Qualifiers::OCL_None || !T->isObjCLifetimeType())
+ return T;
+
+ Qualifiers::ObjCLifetime Lifetime;
+
+ // Special cases for arrays:
+ // - if it's const, use __unsafe_unretained
+ // - otherwise, it's an error
+ if (T->isArrayType()) {
+ if (!T.isConstQualified()) {
+ if (DelayedDiagnostics.shouldDelayDiagnostics())
+ DelayedDiagnostics.add(sema::DelayedDiagnostic::makeForbiddenType(
+ NameLoc, diag::err_arc_array_param_no_ownership, T, false));
+ else
+ Diag(NameLoc, diag::err_arc_array_param_no_ownership)
+ << TSInfo->getTypeLoc().getSourceRange();
+ }
+ Lifetime = Qualifiers::OCL_ExplicitNone;
+ } else {
+ Lifetime = T->getObjCARCImplicitLifetime();
+ }
+ T = Context.getLifetimeQualifiedType(T, Lifetime);
+
+ return T;
+}
+
ParmVarDecl *Sema::CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
@@ -13915,14 +15343,6 @@ ParmVarDecl *Sema::CheckParameter(DeclContext *DC, SourceLocation StartLoc,
checkNonTrivialCUnion(New->getType(), New->getLocation(),
NTCUC_FunctionParam, NTCUK_Destruct|NTCUK_Copy);
- // Parameters can not be abstract class types.
- // For record types, this is done by the AbstractClassUsageDiagnoser once
- // the class has been completely parsed.
- if (!CurContext->isRecord() &&
- RequireNonAbstractType(NameLoc, T, diag::err_abstract_type_in_decl,
- AbstractParamType))
- New->setInvalidDecl();
-
// Parameter declarators cannot be interface types. All ObjC objects are
// passed by reference.
if (T->isObjCObjectType()) {
@@ -13943,7 +15363,11 @@ ParmVarDecl *Sema::CheckParameter(DeclContext *DC, SourceLocation StartLoc,
// OpenCL allows function arguments declared to be an array of a type
// to be qualified with an address space.
!(getLangOpts().OpenCL &&
- (T->isArrayType() || T.getAddressSpace() == LangAS::opencl_private))) {
+ (T->isArrayType() || T.getAddressSpace() == LangAS::opencl_private)) &&
+ // WebAssembly allows reference types as parameters. Funcref in particular
+ // lives in a different address space.
+ !(T->isFunctionPointerType() &&
+ T.getAddressSpace() == LangAS::wasm_funcref)) {
Diag(NameLoc, diag::err_arg_with_address_space);
New->setInvalidDecl();
}
@@ -13961,18 +15385,28 @@ void Sema::ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls) {
DeclaratorChunk::FunctionTypeInfo &FTI = D.getFunctionTypeInfo();
- // Verify 6.9.1p6: 'every identifier in the identifier list shall be declared'
- // for a K&R function.
+ // C99 6.9.1p6 "If a declarator includes an identifier list, each declaration
+ // in the declaration list shall have at least one declarator, those
+ // declarators shall only declare identifiers from the identifier list, and
+ // every identifier in the identifier list shall be declared.
+ //
+ // C89 3.7.1p5 "If a declarator includes an identifier list, only the
+ // identifiers it names shall be declared in the declaration list."
+ //
+ // This is why we only diagnose in C99 and later. Note, the other conditions
+ // listed are checked elsewhere.
if (!FTI.hasPrototype) {
for (int i = FTI.NumParams; i != 0; /* decrement in loop */) {
--i;
if (FTI.Params[i].Param == nullptr) {
- SmallString<256> Code;
- llvm::raw_svector_ostream(Code)
- << " int " << FTI.Params[i].Ident->getName() << ";\n";
- Diag(FTI.Params[i].IdentLoc, diag::ext_param_not_declared)
- << FTI.Params[i].Ident
- << FixItHint::CreateInsertion(LocAfterDecls, Code);
+ if (getLangOpts().C99) {
+ SmallString<256> Code;
+ llvm::raw_svector_ostream(Code)
+ << " int " << FTI.Params[i].Ident->getName() << ";\n";
+ Diag(FTI.Params[i].IdentLoc, diag::ext_param_not_declared)
+ << FTI.Params[i].Ident
+ << FixItHint::CreateInsertion(LocAfterDecls, Code);
+ }
// Implicitly declare the argument as type 'int' for lack of a better
// type.
@@ -13985,7 +15419,8 @@ void Sema::ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
// Use the identifier location for the type source range.
DS.SetRangeStart(FTI.Params[i].IdentLoc);
DS.SetRangeEnd(FTI.Params[i].IdentLoc);
- Declarator ParamD(DS, DeclaratorContext::KNRTypeList);
+ Declarator ParamD(DS, ParsedAttributesView::none(),
+ DeclaratorContext::KNRTypeList);
ParamD.SetIdentifier(FTI.Params[i].Ident, FTI.Params[i].IdentLoc);
FTI.Params[i].Param = ActOnParamDeclarator(S, ParamD);
}
@@ -13996,7 +15431,7 @@ void Sema::ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
Decl *
Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
- SkipBodyInfo *SkipBody) {
+ SkipBodyInfo *SkipBody, FnBodyKind BodyKind) {
assert(getCurFunctionDecl() == nullptr && "Function parsing confused");
assert(D.isFunctionDeclarator() && "Not a function declarator!");
Scope *ParentScope = FnBodyScope->getParent();
@@ -14015,7 +15450,7 @@ Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Declarator &D,
D.setFunctionDefinitionKind(FunctionDefinitionKind::Definition);
Decl *DP = HandleDeclarator(ParentScope, D, TemplateParameterLists);
- Decl *Dcl = ActOnStartOfFunctionDef(FnBodyScope, DP, SkipBody);
+ Decl *Dcl = ActOnStartOfFunctionDef(FnBodyScope, DP, SkipBody, BodyKind);
if (!Bases.empty())
ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope(Dcl, Bases);
@@ -14027,6 +15462,21 @@ void Sema::ActOnFinishInlineFunctionDef(FunctionDecl *D) {
Consumer.HandleInlineFunctionDefinition(D);
}
+static bool FindPossiblePrototype(const FunctionDecl *FD,
+ const FunctionDecl *&PossiblePrototype) {
+ for (const FunctionDecl *Prev = FD->getPreviousDecl(); Prev;
+ Prev = Prev->getPreviousDecl()) {
+ // Ignore any declarations that occur in function or method
+ // scope, because they aren't visible from the header.
+ if (Prev->getLexicalDeclContext()->isFunctionOrMethod())
+ continue;
+
+ PossiblePrototype = Prev;
+ return Prev->getType()->isFunctionProtoType();
+ }
+ return false;
+}
+
static bool
ShouldWarnAboutMissingPrototype(const FunctionDecl *FD,
const FunctionDecl *&PossiblePrototype) {
@@ -14068,16 +15518,14 @@ ShouldWarnAboutMissingPrototype(const FunctionDecl *FD,
if (FD->isDeleted())
return false;
- for (const FunctionDecl *Prev = FD->getPreviousDecl();
- Prev; Prev = Prev->getPreviousDecl()) {
- // Ignore any declarations that occur in function or method
- // scope, because they aren't visible from the header.
- if (Prev->getLexicalDeclContext()->isFunctionOrMethod())
- continue;
+ // Don't warn on implicitly local functions (such as having local-typed
+ // parameters).
+ if (!FD->isExternallyVisible())
+ return false;
- PossiblePrototype = Prev;
- return Prev->getType()->isFunctionNoProtoType();
- }
+ // If we were able to find a potential prototype, don't warn.
+ if (FindPossiblePrototype(FD, PossiblePrototype))
+ return false;
return true;
}
@@ -14115,9 +15563,8 @@ Sema::CheckForFunctionRedefinition(FunctionDecl *FD,
// If we don't have a visible definition of the function, and it's inline or
// a template, skip the new definition.
if (SkipBody && !hasVisibleDefinition(Definition) &&
- (Definition->getFormalLinkage() == InternalLinkage ||
- Definition->isInlined() ||
- Definition->getDescribedFunctionTemplate() ||
+ (Definition->getFormalLinkage() == Linkage::Internal ||
+ Definition->isInlined() || Definition->getDescribedFunctionTemplate() ||
Definition->getNumTemplateParameterLists())) {
SkipBody->ShouldSkip = true;
SkipBody->Previous = const_cast<FunctionDecl*>(Definition);
@@ -14138,14 +15585,17 @@ Sema::CheckForFunctionRedefinition(FunctionDecl *FD,
FD->setInvalidDecl();
}
-static void RebuildLambdaScopeInfo(CXXMethodDecl *CallOperator,
- Sema &S) {
- CXXRecordDecl *const LambdaClass = CallOperator->getParent();
+LambdaScopeInfo *Sema::RebuildLambdaScopeInfo(CXXMethodDecl *CallOperator) {
+ CXXRecordDecl *LambdaClass = CallOperator->getParent();
- LambdaScopeInfo *LSI = S.PushLambdaScope();
+ LambdaScopeInfo *LSI = PushLambdaScope();
LSI->CallOperator = CallOperator;
LSI->Lambda = LambdaClass;
LSI->ReturnType = CallOperator->getReturnType();
+ // This function in calls in situation where the context of the call operator
+ // is not entered, so we set AfterParameterList to false, so that
+ // `tryCaptureVariable` finds explicit captures in the appropriate context.
+ LSI->AfterParameterList = false;
const LambdaCaptureDefault LCD = LambdaClass->getLambdaCaptureDefault();
if (LCD == LCD_None)
@@ -14158,15 +15608,17 @@ static void RebuildLambdaScopeInfo(CXXMethodDecl *CallOperator,
LSI->IntroducerRange = DNI.getCXXOperatorNameRange();
LSI->Mutable = !CallOperator->isConst();
+ if (CallOperator->isExplicitObjectMemberFunction())
+ LSI->ExplicitObjectParameter = CallOperator->getParamDecl(0);
// Add the captures to the LSI so they can be noted as already
// captured within tryCaptureVar.
auto I = LambdaClass->field_begin();
for (const auto &C : LambdaClass->captures()) {
if (C.capturesVariable()) {
- VarDecl *VD = C.getCapturedVar();
+ ValueDecl *VD = C.getCapturedVar();
if (VD->isInitCapture())
- S.CurrentInstantiationScope->InstantiatedLocal(VD, VD);
+ CurrentInstantiationScope->InstantiatedLocal(VD, VD);
const bool ByRef = C.getCaptureKind() == LCK_ByRef;
LSI->addCapture(VD, /*IsBlock*/false, ByRef,
/*RefersToEnclosingVariableOrCapture*/true, C.getLocation(),
@@ -14183,10 +15635,12 @@ static void RebuildLambdaScopeInfo(CXXMethodDecl *CallOperator,
}
++I;
}
+ return LSI;
}
Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Decl *D,
- SkipBodyInfo *SkipBody) {
+ SkipBodyInfo *SkipBody,
+ FnBodyKind BodyKind) {
if (!D) {
// Parsing the function declaration failed in some way. Push on a fake scope
// anyway so we can try to parse the function body.
@@ -14205,10 +15659,25 @@ Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Decl *D,
// Do not push if it is a lambda because one is already pushed when building
// the lambda in ActOnStartOfLambdaDefinition().
if (!isLambdaCallOperator(FD))
+ // [expr.const]/p14.1
+ // An expression or conversion is in an immediate function context if it is
+ // potentially evaluated and either: its innermost enclosing non-block scope
+ // is a function parameter scope of an immediate function.
PushExpressionEvaluationContext(
- FD->isConsteval() ? ExpressionEvaluationContext::ConstantEvaluated
+ FD->isConsteval() ? ExpressionEvaluationContext::ImmediateFunctionContext
: ExprEvalContexts.back().Context);
+ // Each ExpressionEvaluationContextRecord also keeps track of whether the
+ // context is nested in an immediate function context, so smaller contexts
+ // that appear inside immediate functions (like variable initializers) are
+ // considered to be inside an immediate function context even though by
+ // themselves they are not immediate function contexts. But when a new
+ // function is entered, we need to reset this tracking, since the entered
+ // function might be not an immediate function.
+ ExprEvalContexts.back().InImmediateFunctionContext = FD->isConsteval();
+ ExprEvalContexts.back().InImmediateEscalatingFunctionContext =
+ getLangOpts().CPlusPlus20 && FD->isImmediateEscalating();
+
// Check for defining attributes before the check for redefinition.
if (const auto *Attr = FD->getAttr<AliasAttr>()) {
Diag(Attr->getLocation(), diag::err_alias_is_definition) << FD << 0;
@@ -14220,6 +15689,16 @@ Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Decl *D,
FD->dropAttr<IFuncAttr>();
FD->setInvalidDecl();
}
+ if (const auto *Attr = FD->getAttr<TargetVersionAttr>()) {
+ if (!Context.getTargetInfo().hasFeature("fmv") &&
+ !Attr->isDefaultVersion()) {
+ // If function multi versioning disabled skip parsing function body
+ // defined with non-default target_version attribute
+ if (SkipBody)
+ SkipBody->ShouldSkip = true;
+ return nullptr;
+ }
+ }
if (auto *Ctor = dyn_cast<CXXConstructorDecl>(FD)) {
if (Ctor->getTemplateSpecializationKind() == TSK_ExplicitSpecialization &&
@@ -14260,7 +15739,7 @@ Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Decl *D,
assert(inTemplateInstantiation() &&
"There should be an active template instantiation on the stack "
"when instantiating a generic lambda!");
- RebuildLambdaScopeInfo(cast<CXXMethodDecl>(D), *this);
+ RebuildLambdaScopeInfo(cast<CXXMethodDecl>(D));
} else {
// Enter a new function scope
PushFunctionScope();
@@ -14275,21 +15754,28 @@ Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Decl *D,
}
}
- // The return type of a function definition must be complete
- // (C99 6.9.1p3, C++ [dcl.fct]p6).
+ // The return type of a function definition must be complete (C99 6.9.1p3).
+ // C++23 [dcl.fct.def.general]/p2
+ // The type of [...] the return for a function definition
+ // shall not be a (possibly cv-qualified) class type that is incomplete
+ // or abstract within the function body unless the function is deleted.
QualType ResultType = FD->getReturnType();
if (!ResultType->isDependentType() && !ResultType->isVoidType() &&
- !FD->isInvalidDecl() &&
- RequireCompleteType(FD->getLocation(), ResultType,
- diag::err_func_def_incomplete_result))
+ !FD->isInvalidDecl() && BodyKind != FnBodyKind::Delete &&
+ (RequireCompleteType(FD->getLocation(), ResultType,
+ diag::err_func_def_incomplete_result) ||
+ RequireNonAbstractType(FD->getLocation(), FD->getReturnType(),
+ diag::err_abstract_type_in_decl,
+ AbstractReturnType)))
FD->setInvalidDecl();
if (FnBodyScope)
PushDeclContext(FnBodyScope, FD);
// Check the validity of our function parameters
- CheckParmsForFunctionDef(FD->parameters(),
- /*CheckParameterNames=*/true);
+ if (BodyKind != FnBodyKind::Delete)
+ CheckParmsForFunctionDef(FD->parameters(),
+ /*CheckParameterNames=*/true);
// Add non-parameter declarations already in the function to the current
// scope.
@@ -14315,7 +15801,7 @@ Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Decl *D,
}
// Introduce our parameters into the function scope
- for (auto Param : FD->parameters()) {
+ for (auto *Param : FD->parameters()) {
Param->setOwningFunction(FD);
// If this has an identifier, add it to the scope stack.
@@ -14326,6 +15812,21 @@ Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Decl *D,
}
}
+ // C++ [module.import/6] external definitions are not permitted in header
+ // units. Deleted and Defaulted functions are implicitly inline (but the
+ // inline state is not set at this point, so check the BodyKind explicitly).
+ // FIXME: Consider an alternate location for the test where the inlined()
+ // state is complete.
+ if (getLangOpts().CPlusPlusModules && currentModuleIsHeaderUnit() &&
+ !FD->isInvalidDecl() && !FD->isInlined() &&
+ BodyKind != FnBodyKind::Delete && BodyKind != FnBodyKind::Default &&
+ FD->getFormalLinkage() == Linkage::External && !FD->isTemplated() &&
+ !FD->isTemplateInstantiation()) {
+ assert(FD->isThisDeclarationADefinition());
+ Diag(FD->getLocation(), diag::err_extern_def_in_header_unit);
+ FD->setInvalidDecl();
+ }
+
// Ensure that the function's exception specification is instantiated.
if (const FunctionProtoType *FPT = FD->getType()->getAs<FunctionProtoType>())
ResolveExceptionSpec(D->getLocation(), FPT);
@@ -14425,7 +15926,7 @@ Decl *Sema::ActOnSkippedFunctionBody(Decl *Decl) {
}
Decl *Sema::ActOnFinishFunctionBody(Decl *D, Stmt *BodyArg) {
- return ActOnFinishFunctionBody(D, BodyArg, false);
+ return ActOnFinishFunctionBody(D, BodyArg, /*IsInstantiation=*/false);
}
/// RAII object that pops an ExpressionEvaluationContext when exiting a function
@@ -14472,347 +15973,436 @@ static void diagnoseImplicitlyRetainedSelf(Sema &S) {
<< FixItHint::CreateInsertion(P.first, "self->");
}
+static bool methodHasName(const FunctionDecl *FD, StringRef Name) {
+ return isa<CXXMethodDecl>(FD) && FD->param_empty() &&
+ FD->getDeclName().isIdentifier() && FD->getName().equals(Name);
+}
+
+bool Sema::CanBeGetReturnObject(const FunctionDecl *FD) {
+ return methodHasName(FD, "get_return_object");
+}
+
+bool Sema::CanBeGetReturnTypeOnAllocFailure(const FunctionDecl *FD) {
+ return FD->isStatic() &&
+ methodHasName(FD, "get_return_object_on_allocation_failure");
+}
+
+void Sema::CheckCoroutineWrapper(FunctionDecl *FD) {
+ RecordDecl *RD = FD->getReturnType()->getAsRecordDecl();
+ if (!RD || !RD->getUnderlyingDecl()->hasAttr<CoroReturnTypeAttr>())
+ return;
+ // Allow some_promise_type::get_return_object().
+ if (CanBeGetReturnObject(FD) || CanBeGetReturnTypeOnAllocFailure(FD))
+ return;
+ if (!FD->hasAttr<CoroWrapperAttr>())
+ Diag(FD->getLocation(), diag::err_coroutine_return_type) << RD;
+}
+
Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
bool IsInstantiation) {
FunctionScopeInfo *FSI = getCurFunction();
FunctionDecl *FD = dcl ? dcl->getAsFunction() : nullptr;
- if (FSI->UsesFPIntrin && !FD->hasAttr<StrictFPAttr>())
+ if (FSI->UsesFPIntrin && FD && !FD->hasAttr<StrictFPAttr>())
FD->addAttr(StrictFPAttr::CreateImplicit(Context));
sema::AnalysisBasedWarnings::Policy WP = AnalysisWarnings.getDefaultPolicy();
sema::AnalysisBasedWarnings::Policy *ActivePolicy = nullptr;
- if (getLangOpts().Coroutines && FSI->isCoroutine())
- CheckCompletedCoroutineBody(FD, Body);
-
- // Do not call PopExpressionEvaluationContext() if it is a lambda because one
- // is already popped when finishing the lambda in BuildLambdaExpr(). This is
- // meant to pop the context added in ActOnStartOfFunctionDef().
- ExitFunctionBodyRAII ExitRAII(*this, isLambdaCallOperator(FD));
-
- if (FD) {
- FD->setBody(Body);
- FD->setWillHaveBody(false);
-
- if (getLangOpts().CPlusPlus14) {
- if (!FD->isInvalidDecl() && Body && !FD->isDependentContext() &&
- FD->getReturnType()->isUndeducedType()) {
- // If the function has a deduced result type but contains no 'return'
- // statements, the result type as written must be exactly 'auto', and
- // the deduced result type is 'void'.
- if (!FD->getReturnType()->getAs<AutoType>()) {
- Diag(dcl->getLocation(), diag::err_auto_fn_no_return_but_not_auto)
- << FD->getReturnType();
- FD->setInvalidDecl();
- } else {
- // Substitute 'void' for the 'auto' in the type.
- TypeLoc ResultType = getReturnTypeLoc(FD);
- Context.adjustDeducedFunctionResultType(
- FD, SubstAutoType(ResultType.getType(), Context.VoidTy));
+ // If we skip function body, we can't tell if a function is a coroutine.
+ if (getLangOpts().Coroutines && FD && !FD->hasSkippedBody()) {
+ if (FSI->isCoroutine())
+ CheckCompletedCoroutineBody(FD, Body);
+ else
+ CheckCoroutineWrapper(FD);
+ }
+
+ {
+ // Do not call PopExpressionEvaluationContext() if it is a lambda because
+ // one is already popped when finishing the lambda in BuildLambdaExpr().
+ // This is meant to pop the context added in ActOnStartOfFunctionDef().
+ ExitFunctionBodyRAII ExitRAII(*this, isLambdaCallOperator(FD));
+ if (FD) {
+ FD->setBody(Body);
+ FD->setWillHaveBody(false);
+ CheckImmediateEscalatingFunctionDefinition(FD, FSI);
+
+ if (getLangOpts().CPlusPlus14) {
+ if (!FD->isInvalidDecl() && Body && !FD->isDependentContext() &&
+ FD->getReturnType()->isUndeducedType()) {
+ // For a function with a deduced result type to return void,
+ // the result type as written must be 'auto' or 'decltype(auto)',
+ // possibly cv-qualified or constrained, but not ref-qualified.
+ if (!FD->getReturnType()->getAs<AutoType>()) {
+ Diag(dcl->getLocation(), diag::err_auto_fn_no_return_but_not_auto)
+ << FD->getReturnType();
+ FD->setInvalidDecl();
+ } else {
+ // Falling off the end of the function is the same as 'return;'.
+ Expr *Dummy = nullptr;
+ if (DeduceFunctionTypeFromReturnExpr(
+ FD, dcl->getLocation(), Dummy,
+ FD->getReturnType()->getAs<AutoType>()))
+ FD->setInvalidDecl();
+ }
}
- }
- } else if (getLangOpts().CPlusPlus11 && isLambdaCallOperator(FD)) {
- // In C++11, we don't use 'auto' deduction rules for lambda call
- // operators because we don't support return type deduction.
- auto *LSI = getCurLambda();
- if (LSI->HasImplicitReturnType) {
- deduceClosureReturnType(*LSI);
-
- // C++11 [expr.prim.lambda]p4:
- // [...] if there are no return statements in the compound-statement
- // [the deduced type is] the type void
- QualType RetType =
- LSI->ReturnType.isNull() ? Context.VoidTy : LSI->ReturnType;
-
- // Update the return type to the deduced type.
- const auto *Proto = FD->getType()->castAs<FunctionProtoType>();
- FD->setType(Context.getFunctionType(RetType, Proto->getParamTypes(),
- Proto->getExtProtoInfo()));
- }
- }
-
- // If the function implicitly returns zero (like 'main') or is naked,
- // don't complain about missing return statements.
- if (FD->hasImplicitReturnZero() || FD->hasAttr<NakedAttr>())
- WP.disableCheckFallThrough();
-
- // MSVC permits the use of pure specifier (=0) on function definition,
- // defined at class scope, warn about this non-standard construct.
- if (getLangOpts().MicrosoftExt && FD->isPure() && !FD->isOutOfLine())
- Diag(FD->getLocation(), diag::ext_pure_function_definition);
-
- if (!FD->isInvalidDecl()) {
- // Don't diagnose unused parameters of defaulted or deleted functions.
- if (!FD->isDeleted() && !FD->isDefaulted() && !FD->hasSkippedBody())
- DiagnoseUnusedParameters(FD->parameters());
- DiagnoseSizeOfParametersAndReturnValue(FD->parameters(),
- FD->getReturnType(), FD);
-
- // If this is a structor, we need a vtable.
- if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(FD))
- MarkVTableUsed(FD->getLocation(), Constructor->getParent());
- else if (CXXDestructorDecl *Destructor = dyn_cast<CXXDestructorDecl>(FD))
- MarkVTableUsed(FD->getLocation(), Destructor->getParent());
-
- // Try to apply the named return value optimization. We have to check
- // if we can do this here because lambdas keep return statements around
- // to deduce an implicit return type.
- if (FD->getReturnType()->isRecordType() &&
- (!getLangOpts().CPlusPlus || !FD->isDependentContext()))
- computeNRVO(Body, FSI);
- }
-
- // GNU warning -Wmissing-prototypes:
- // Warn if a global function is defined without a previous
- // prototype declaration. This warning is issued even if the
- // definition itself provides a prototype. The aim is to detect
- // global functions that fail to be declared in header files.
- const FunctionDecl *PossiblePrototype = nullptr;
- if (ShouldWarnAboutMissingPrototype(FD, PossiblePrototype)) {
- Diag(FD->getLocation(), diag::warn_missing_prototype) << FD;
-
- if (PossiblePrototype) {
- // We found a declaration that is not a prototype,
- // but that could be a zero-parameter prototype
- if (TypeSourceInfo *TI = PossiblePrototype->getTypeSourceInfo()) {
- TypeLoc TL = TI->getTypeLoc();
- if (FunctionNoProtoTypeLoc FTL = TL.getAs<FunctionNoProtoTypeLoc>())
- Diag(PossiblePrototype->getLocation(),
- diag::note_declaration_not_a_prototype)
- << (FD->getNumParams() != 0)
- << (FD->getNumParams() == 0
- ? FixItHint::CreateInsertion(FTL.getRParenLoc(), "void")
- : FixItHint{});
+ } else if (getLangOpts().CPlusPlus11 && isLambdaCallOperator(FD)) {
+ // In C++11, we don't use 'auto' deduction rules for lambda call
+ // operators because we don't support return type deduction.
+ auto *LSI = getCurLambda();
+ if (LSI->HasImplicitReturnType) {
+ deduceClosureReturnType(*LSI);
+
+ // C++11 [expr.prim.lambda]p4:
+ // [...] if there are no return statements in the compound-statement
+ // [the deduced type is] the type void
+ QualType RetType =
+ LSI->ReturnType.isNull() ? Context.VoidTy : LSI->ReturnType;
+
+ // Update the return type to the deduced type.
+ const auto *Proto = FD->getType()->castAs<FunctionProtoType>();
+ FD->setType(Context.getFunctionType(RetType, Proto->getParamTypes(),
+ Proto->getExtProtoInfo()));
}
- } else {
- // Returns true if the token beginning at this Loc is `const`.
- auto isLocAtConst = [&](SourceLocation Loc, const SourceManager &SM,
- const LangOptions &LangOpts) {
- std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc);
- if (LocInfo.first.isInvalid())
- return false;
+ }
- bool Invalid = false;
- StringRef Buffer = SM.getBufferData(LocInfo.first, &Invalid);
- if (Invalid)
- return false;
+ // If the function implicitly returns zero (like 'main') or is naked,
+ // don't complain about missing return statements.
+ if (FD->hasImplicitReturnZero() || FD->hasAttr<NakedAttr>())
+ WP.disableCheckFallThrough();
+
+ // MSVC permits the use of pure specifier (=0) on function definition,
+ // defined at class scope, warn about this non-standard construct.
+ if (getLangOpts().MicrosoftExt && FD->isPureVirtual() &&
+ !FD->isOutOfLine())
+ Diag(FD->getLocation(), diag::ext_pure_function_definition);
+
+ if (!FD->isInvalidDecl()) {
+ // Don't diagnose unused parameters of defaulted, deleted or naked
+ // functions.
+ if (!FD->isDeleted() && !FD->isDefaulted() && !FD->hasSkippedBody() &&
+ !FD->hasAttr<NakedAttr>())
+ DiagnoseUnusedParameters(FD->parameters());
+ DiagnoseSizeOfParametersAndReturnValue(FD->parameters(),
+ FD->getReturnType(), FD);
+
+ // If this is a structor, we need a vtable.
+ if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(FD))
+ MarkVTableUsed(FD->getLocation(), Constructor->getParent());
+ else if (CXXDestructorDecl *Destructor =
+ dyn_cast<CXXDestructorDecl>(FD))
+ MarkVTableUsed(FD->getLocation(), Destructor->getParent());
+
+ // Try to apply the named return value optimization. We have to check
+ // if we can do this here because lambdas keep return statements around
+ // to deduce an implicit return type.
+ if (FD->getReturnType()->isRecordType() &&
+ (!getLangOpts().CPlusPlus || !FD->isDependentContext()))
+ computeNRVO(Body, FSI);
+ }
- if (LocInfo.second > Buffer.size())
- return false;
+ // GNU warning -Wmissing-prototypes:
+ // Warn if a global function is defined without a previous
+ // prototype declaration. This warning is issued even if the
+ // definition itself provides a prototype. The aim is to detect
+ // global functions that fail to be declared in header files.
+ const FunctionDecl *PossiblePrototype = nullptr;
+ if (ShouldWarnAboutMissingPrototype(FD, PossiblePrototype)) {
+ Diag(FD->getLocation(), diag::warn_missing_prototype) << FD;
+
+ if (PossiblePrototype) {
+ // We found a declaration that is not a prototype,
+ // but that could be a zero-parameter prototype
+ if (TypeSourceInfo *TI = PossiblePrototype->getTypeSourceInfo()) {
+ TypeLoc TL = TI->getTypeLoc();
+ if (FunctionNoProtoTypeLoc FTL = TL.getAs<FunctionNoProtoTypeLoc>())
+ Diag(PossiblePrototype->getLocation(),
+ diag::note_declaration_not_a_prototype)
+ << (FD->getNumParams() != 0)
+ << (FD->getNumParams() == 0 ? FixItHint::CreateInsertion(
+ FTL.getRParenLoc(), "void")
+ : FixItHint{});
+ }
+ } else {
+ // Returns true if the token beginning at this Loc is `const`.
+ auto isLocAtConst = [&](SourceLocation Loc, const SourceManager &SM,
+ const LangOptions &LangOpts) {
+ std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc);
+ if (LocInfo.first.isInvalid())
+ return false;
- const char *LexStart = Buffer.data() + LocInfo.second;
- StringRef StartTok(LexStart, Buffer.size() - LocInfo.second);
+ bool Invalid = false;
+ StringRef Buffer = SM.getBufferData(LocInfo.first, &Invalid);
+ if (Invalid)
+ return false;
- return StartTok.consume_front("const") &&
- (StartTok.empty() || isWhitespace(StartTok[0]) ||
- StartTok.startswith("/*") || StartTok.startswith("//"));
- };
+ if (LocInfo.second > Buffer.size())
+ return false;
- auto findBeginLoc = [&]() {
- // If the return type has `const` qualifier, we want to insert
- // `static` before `const` (and not before the typename).
- if ((FD->getReturnType()->isAnyPointerType() &&
- FD->getReturnType()->getPointeeType().isConstQualified()) ||
- FD->getReturnType().isConstQualified()) {
- // But only do this if we can determine where the `const` is.
+ const char *LexStart = Buffer.data() + LocInfo.second;
+ StringRef StartTok(LexStart, Buffer.size() - LocInfo.second);
- if (isLocAtConst(FD->getBeginLoc(), getSourceManager(),
- getLangOpts()))
+ return StartTok.consume_front("const") &&
+ (StartTok.empty() || isWhitespace(StartTok[0]) ||
+ StartTok.starts_with("/*") || StartTok.starts_with("//"));
+ };
- return FD->getBeginLoc();
- }
- return FD->getTypeSpecStartLoc();
- };
- Diag(FD->getTypeSpecStartLoc(), diag::note_static_for_internal_linkage)
- << /* function */ 1
- << (FD->getStorageClass() == SC_None
- ? FixItHint::CreateInsertion(findBeginLoc(), "static ")
- : FixItHint{});
- }
-
- // GNU warning -Wstrict-prototypes
- // Warn if K&R function is defined without a previous declaration.
- // This warning is issued only if the definition itself does not provide
- // a prototype. Only K&R definitions do not provide a prototype.
- if (!FD->hasWrittenPrototype()) {
- TypeSourceInfo *TI = FD->getTypeSourceInfo();
- TypeLoc TL = TI->getTypeLoc();
- FunctionTypeLoc FTL = TL.getAsAdjusted<FunctionTypeLoc>();
- Diag(FTL.getLParenLoc(), diag::warn_strict_prototypes) << 2;
- }
- }
-
- // Warn on CPUDispatch with an actual body.
- if (FD->isMultiVersion() && FD->hasAttr<CPUDispatchAttr>() && Body)
- if (const auto *CmpndBody = dyn_cast<CompoundStmt>(Body))
- if (!CmpndBody->body_empty())
- Diag(CmpndBody->body_front()->getBeginLoc(),
- diag::warn_dispatch_body_ignored);
-
- if (auto *MD = dyn_cast<CXXMethodDecl>(FD)) {
- const CXXMethodDecl *KeyFunction;
- if (MD->isOutOfLine() && (MD = MD->getCanonicalDecl()) &&
- MD->isVirtual() &&
- (KeyFunction = Context.getCurrentKeyFunction(MD->getParent())) &&
- MD == KeyFunction->getCanonicalDecl()) {
- // Update the key-function state if necessary for this ABI.
- if (FD->isInlined() &&
- !Context.getTargetInfo().getCXXABI().canKeyFunctionBeInline()) {
- Context.setNonKeyFunction(MD);
-
- // If the newly-chosen key function is already defined, then we
- // need to mark the vtable as used retroactively.
- KeyFunction = Context.getCurrentKeyFunction(MD->getParent());
- const FunctionDecl *Definition;
- if (KeyFunction && KeyFunction->isDefined(Definition))
- MarkVTableUsed(Definition->getLocation(), MD->getParent(), true);
- } else {
- // We just defined they key function; mark the vtable as used.
- MarkVTableUsed(FD->getLocation(), MD->getParent(), true);
+ auto findBeginLoc = [&]() {
+ // If the return type has `const` qualifier, we want to insert
+ // `static` before `const` (and not before the typename).
+ if ((FD->getReturnType()->isAnyPointerType() &&
+ FD->getReturnType()->getPointeeType().isConstQualified()) ||
+ FD->getReturnType().isConstQualified()) {
+ // But only do this if we can determine where the `const` is.
+
+ if (isLocAtConst(FD->getBeginLoc(), getSourceManager(),
+ getLangOpts()))
+
+ return FD->getBeginLoc();
+ }
+ return FD->getTypeSpecStartLoc();
+ };
+ Diag(FD->getTypeSpecStartLoc(),
+ diag::note_static_for_internal_linkage)
+ << /* function */ 1
+ << (FD->getStorageClass() == SC_None
+ ? FixItHint::CreateInsertion(findBeginLoc(), "static ")
+ : FixItHint{});
}
}
- }
- assert((FD == getCurFunctionDecl() || getCurLambda()->CallOperator == FD) &&
- "Function parsing confused");
- } else if (ObjCMethodDecl *MD = dyn_cast_or_null<ObjCMethodDecl>(dcl)) {
- assert(MD == getCurMethodDecl() && "Method parsing confused");
- MD->setBody(Body);
- if (!MD->isInvalidDecl()) {
- DiagnoseSizeOfParametersAndReturnValue(MD->parameters(),
- MD->getReturnType(), MD);
+ // We might not have found a prototype because we didn't wish to warn on
+ // the lack of a missing prototype. Try again without the checks for
+ // whether we want to warn on the missing prototype.
+ if (!PossiblePrototype)
+ (void)FindPossiblePrototype(FD, PossiblePrototype);
+
+ // If the function being defined does not have a prototype, then we may
+ // need to diagnose it as changing behavior in C23 because we now know
+ // whether the function accepts arguments or not. This only handles the
+ // case where the definition has no prototype but does have parameters
+ // and either there is no previous potential prototype, or the previous
+ // potential prototype also has no actual prototype. This handles cases
+ // like:
+ // void f(); void f(a) int a; {}
+ // void g(a) int a; {}
+ // See MergeFunctionDecl() for other cases of the behavior change
+ // diagnostic. See GetFullTypeForDeclarator() for handling of a function
+ // type without a prototype.
+ if (!FD->hasWrittenPrototype() && FD->getNumParams() != 0 &&
+ (!PossiblePrototype || (!PossiblePrototype->hasWrittenPrototype() &&
+ !PossiblePrototype->isImplicit()))) {
+ // The function definition has parameters, so this will change behavior
+ // in C23. If there is a possible prototype, it comes before the
+ // function definition.
+ // FIXME: The declaration may have already been diagnosed as being
+ // deprecated in GetFullTypeForDeclarator() if it had no arguments, but
+ // there's no way to test for the "changes behavior" condition in
+ // SemaType.cpp when forming the declaration's function type. So, we do
+ // this awkward dance instead.
+ //
+ // If we have a possible prototype and it declares a function with a
+ // prototype, we don't want to diagnose it; if we have a possible
+ // prototype and it has no prototype, it may have already been
+ // diagnosed in SemaType.cpp as deprecated depending on whether
+ // -Wstrict-prototypes is enabled. If we already warned about it being
+ // deprecated, add a note that it also changes behavior. If we didn't
+ // warn about it being deprecated (because the diagnostic is not
+ // enabled), warn now that it is deprecated and changes behavior.
+
+ // This K&R C function definition definitely changes behavior in C23,
+ // so diagnose it.
+ Diag(FD->getLocation(), diag::warn_non_prototype_changes_behavior)
+ << /*definition*/ 1 << /* not supported in C23 */ 0;
+
+ // If we have a possible prototype for the function which is a user-
+ // visible declaration, we already tested that it has no prototype.
+ // This will change behavior in C23. This gets a warning rather than a
+ // note because it's the same behavior-changing problem as with the
+ // definition.
+ if (PossiblePrototype)
+ Diag(PossiblePrototype->getLocation(),
+ diag::warn_non_prototype_changes_behavior)
+ << /*declaration*/ 0 << /* conflicting */ 1 << /*subsequent*/ 1
+ << /*definition*/ 1;
+ }
- if (Body)
- computeNRVO(Body, FSI);
- }
- if (FSI->ObjCShouldCallSuper) {
- Diag(MD->getEndLoc(), diag::warn_objc_missing_super_call)
- << MD->getSelector().getAsString();
- FSI->ObjCShouldCallSuper = false;
- }
- if (FSI->ObjCWarnForNoDesignatedInitChain) {
- const ObjCMethodDecl *InitMethod = nullptr;
- bool isDesignated =
- MD->isDesignatedInitializerForTheInterface(&InitMethod);
- assert(isDesignated && InitMethod);
- (void)isDesignated;
+ // Warn on CPUDispatch with an actual body.
+ if (FD->isMultiVersion() && FD->hasAttr<CPUDispatchAttr>() && Body)
+ if (const auto *CmpndBody = dyn_cast<CompoundStmt>(Body))
+ if (!CmpndBody->body_empty())
+ Diag(CmpndBody->body_front()->getBeginLoc(),
+ diag::warn_dispatch_body_ignored);
+
+ if (auto *MD = dyn_cast<CXXMethodDecl>(FD)) {
+ const CXXMethodDecl *KeyFunction;
+ if (MD->isOutOfLine() && (MD = MD->getCanonicalDecl()) &&
+ MD->isVirtual() &&
+ (KeyFunction = Context.getCurrentKeyFunction(MD->getParent())) &&
+ MD == KeyFunction->getCanonicalDecl()) {
+ // Update the key-function state if necessary for this ABI.
+ if (FD->isInlined() &&
+ !Context.getTargetInfo().getCXXABI().canKeyFunctionBeInline()) {
+ Context.setNonKeyFunction(MD);
+
+ // If the newly-chosen key function is already defined, then we
+ // need to mark the vtable as used retroactively.
+ KeyFunction = Context.getCurrentKeyFunction(MD->getParent());
+ const FunctionDecl *Definition;
+ if (KeyFunction && KeyFunction->isDefined(Definition))
+ MarkVTableUsed(Definition->getLocation(), MD->getParent(), true);
+ } else {
+ // We just defined they key function; mark the vtable as used.
+ MarkVTableUsed(FD->getLocation(), MD->getParent(), true);
+ }
+ }
+ }
- auto superIsNSObject = [&](const ObjCMethodDecl *MD) {
- auto IFace = MD->getClassInterface();
- if (!IFace)
- return false;
- auto SuperD = IFace->getSuperClass();
- if (!SuperD)
- return false;
- return SuperD->getIdentifier() ==
- NSAPIObj->getNSClassId(NSAPI::ClassId_NSObject);
- };
- // Don't issue this warning for unavailable inits or direct subclasses
- // of NSObject.
- if (!MD->isUnavailable() && !superIsNSObject(MD)) {
- Diag(MD->getLocation(),
- diag::warn_objc_designated_init_missing_super_call);
- Diag(InitMethod->getLocation(),
- diag::note_objc_designated_init_marked_here);
+ assert(
+ (FD == getCurFunctionDecl() || getCurLambda()->CallOperator == FD) &&
+ "Function parsing confused");
+ } else if (ObjCMethodDecl *MD = dyn_cast_or_null<ObjCMethodDecl>(dcl)) {
+ assert(MD == getCurMethodDecl() && "Method parsing confused");
+ MD->setBody(Body);
+ if (!MD->isInvalidDecl()) {
+ DiagnoseSizeOfParametersAndReturnValue(MD->parameters(),
+ MD->getReturnType(), MD);
+
+ if (Body)
+ computeNRVO(Body, FSI);
+ }
+ if (FSI->ObjCShouldCallSuper) {
+ Diag(MD->getEndLoc(), diag::warn_objc_missing_super_call)
+ << MD->getSelector().getAsString();
+ FSI->ObjCShouldCallSuper = false;
+ }
+ if (FSI->ObjCWarnForNoDesignatedInitChain) {
+ const ObjCMethodDecl *InitMethod = nullptr;
+ bool isDesignated =
+ MD->isDesignatedInitializerForTheInterface(&InitMethod);
+ assert(isDesignated && InitMethod);
+ (void)isDesignated;
+
+ auto superIsNSObject = [&](const ObjCMethodDecl *MD) {
+ auto IFace = MD->getClassInterface();
+ if (!IFace)
+ return false;
+ auto SuperD = IFace->getSuperClass();
+ if (!SuperD)
+ return false;
+ return SuperD->getIdentifier() ==
+ NSAPIObj->getNSClassId(NSAPI::ClassId_NSObject);
+ };
+ // Don't issue this warning for unavailable inits or direct subclasses
+ // of NSObject.
+ if (!MD->isUnavailable() && !superIsNSObject(MD)) {
+ Diag(MD->getLocation(),
+ diag::warn_objc_designated_init_missing_super_call);
+ Diag(InitMethod->getLocation(),
+ diag::note_objc_designated_init_marked_here);
+ }
+ FSI->ObjCWarnForNoDesignatedInitChain = false;
+ }
+ if (FSI->ObjCWarnForNoInitDelegation) {
+ // Don't issue this warning for unavaialable inits.
+ if (!MD->isUnavailable())
+ Diag(MD->getLocation(),
+ diag::warn_objc_secondary_init_missing_init_call);
+ FSI->ObjCWarnForNoInitDelegation = false;
}
- FSI->ObjCWarnForNoDesignatedInitChain = false;
- }
- if (FSI->ObjCWarnForNoInitDelegation) {
- // Don't issue this warning for unavaialable inits.
- if (!MD->isUnavailable())
- Diag(MD->getLocation(),
- diag::warn_objc_secondary_init_missing_init_call);
- FSI->ObjCWarnForNoInitDelegation = false;
- }
- diagnoseImplicitlyRetainedSelf(*this);
- } else {
- // Parsing the function declaration failed in some way. Pop the fake scope
- // we pushed on.
- PopFunctionScopeInfo(ActivePolicy, dcl);
- return nullptr;
- }
+ diagnoseImplicitlyRetainedSelf(*this);
+ } else {
+ // Parsing the function declaration failed in some way. Pop the fake scope
+ // we pushed on.
+ PopFunctionScopeInfo(ActivePolicy, dcl);
+ return nullptr;
+ }
- if (Body && FSI->HasPotentialAvailabilityViolations)
- DiagnoseUnguardedAvailabilityViolations(dcl);
+ if (Body && FSI->HasPotentialAvailabilityViolations)
+ DiagnoseUnguardedAvailabilityViolations(dcl);
- assert(!FSI->ObjCShouldCallSuper &&
- "This should only be set for ObjC methods, which should have been "
- "handled in the block above.");
+ assert(!FSI->ObjCShouldCallSuper &&
+ "This should only be set for ObjC methods, which should have been "
+ "handled in the block above.");
- // Verify and clean out per-function state.
- if (Body && (!FD || !FD->isDefaulted())) {
- // C++ constructors that have function-try-blocks can't have return
- // statements in the handlers of that block. (C++ [except.handle]p14)
- // Verify this.
- if (FD && isa<CXXConstructorDecl>(FD) && isa<CXXTryStmt>(Body))
- DiagnoseReturnInConstructorExceptionHandler(cast<CXXTryStmt>(Body));
+ // Verify and clean out per-function state.
+ if (Body && (!FD || !FD->isDefaulted())) {
+ // C++ constructors that have function-try-blocks can't have return
+ // statements in the handlers of that block. (C++ [except.handle]p14)
+ // Verify this.
+ if (FD && isa<CXXConstructorDecl>(FD) && isa<CXXTryStmt>(Body))
+ DiagnoseReturnInConstructorExceptionHandler(cast<CXXTryStmt>(Body));
- // Verify that gotos and switch cases don't jump into scopes illegally.
- if (FSI->NeedsScopeChecking() &&
- !PP.isCodeCompletionEnabled())
- DiagnoseInvalidJumps(Body);
+ // Verify that gotos and switch cases don't jump into scopes illegally.
+ if (FSI->NeedsScopeChecking() && !PP.isCodeCompletionEnabled())
+ DiagnoseInvalidJumps(Body);
- if (CXXDestructorDecl *Destructor = dyn_cast<CXXDestructorDecl>(dcl)) {
- if (!Destructor->getParent()->isDependentType())
- CheckDestructor(Destructor);
+ if (CXXDestructorDecl *Destructor = dyn_cast<CXXDestructorDecl>(dcl)) {
+ if (!Destructor->getParent()->isDependentType())
+ CheckDestructor(Destructor);
- MarkBaseAndMemberDestructorsReferenced(Destructor->getLocation(),
- Destructor->getParent());
- }
+ MarkBaseAndMemberDestructorsReferenced(Destructor->getLocation(),
+ Destructor->getParent());
+ }
- // If any errors have occurred, clear out any temporaries that may have
- // been leftover. This ensures that these temporaries won't be picked up for
- // deletion in some later function.
- if (hasUncompilableErrorOccurred() ||
- getDiagnostics().getSuppressAllDiagnostics()) {
- DiscardCleanupsInEvaluationContext();
- }
- if (!hasUncompilableErrorOccurred() &&
- !isa<FunctionTemplateDecl>(dcl)) {
- // Since the body is valid, issue any analysis-based warnings that are
- // enabled.
- ActivePolicy = &WP;
- }
+ // If any errors have occurred, clear out any temporaries that may have
+ // been leftover. This ensures that these temporaries won't be picked up
+ // for deletion in some later function.
+ if (hasUncompilableErrorOccurred() ||
+ hasAnyUnrecoverableErrorsInThisFunction() ||
+ getDiagnostics().getSuppressAllDiagnostics()) {
+ DiscardCleanupsInEvaluationContext();
+ }
+ if (!hasUncompilableErrorOccurred() && !isa<FunctionTemplateDecl>(dcl)) {
+ // Since the body is valid, issue any analysis-based warnings that are
+ // enabled.
+ ActivePolicy = &WP;
+ }
- if (!IsInstantiation && FD && FD->isConstexpr() && !FD->isInvalidDecl() &&
- !CheckConstexprFunctionDefinition(FD, CheckConstexprKind::Diagnose))
- FD->setInvalidDecl();
+ if (!IsInstantiation && FD &&
+ (FD->isConstexpr() || FD->hasAttr<MSConstexprAttr>()) &&
+ !FD->isInvalidDecl() &&
+ !CheckConstexprFunctionDefinition(FD, CheckConstexprKind::Diagnose))
+ FD->setInvalidDecl();
- if (FD && FD->hasAttr<NakedAttr>()) {
- for (const Stmt *S : Body->children()) {
- // Allow local register variables without initializer as they don't
- // require prologue.
- bool RegisterVariables = false;
- if (auto *DS = dyn_cast<DeclStmt>(S)) {
- for (const auto *Decl : DS->decls()) {
- if (const auto *Var = dyn_cast<VarDecl>(Decl)) {
- RegisterVariables =
- Var->hasAttr<AsmLabelAttr>() && !Var->hasInit();
- if (!RegisterVariables)
- break;
+ if (FD && FD->hasAttr<NakedAttr>()) {
+ for (const Stmt *S : Body->children()) {
+ // Allow local register variables without initializer as they don't
+ // require prologue.
+ bool RegisterVariables = false;
+ if (auto *DS = dyn_cast<DeclStmt>(S)) {
+ for (const auto *Decl : DS->decls()) {
+ if (const auto *Var = dyn_cast<VarDecl>(Decl)) {
+ RegisterVariables =
+ Var->hasAttr<AsmLabelAttr>() && !Var->hasInit();
+ if (!RegisterVariables)
+ break;
+ }
}
}
- }
- if (RegisterVariables)
- continue;
- if (!isa<AsmStmt>(S) && !isa<NullStmt>(S)) {
- Diag(S->getBeginLoc(), diag::err_non_asm_stmt_in_naked_function);
- Diag(FD->getAttr<NakedAttr>()->getLocation(), diag::note_attribute);
- FD->setInvalidDecl();
- break;
+ if (RegisterVariables)
+ continue;
+ if (!isa<AsmStmt>(S) && !isa<NullStmt>(S)) {
+ Diag(S->getBeginLoc(), diag::err_non_asm_stmt_in_naked_function);
+ Diag(FD->getAttr<NakedAttr>()->getLocation(), diag::note_attribute);
+ FD->setInvalidDecl();
+ break;
+ }
}
}
- }
- assert(ExprCleanupObjects.size() ==
- ExprEvalContexts.back().NumCleanupObjects &&
- "Leftover temporaries in function");
- assert(!Cleanup.exprNeedsCleanups() && "Unaccounted cleanups in function");
- assert(MaybeODRUseExprs.empty() &&
- "Leftover expressions for odr-use checking");
- }
+ assert(ExprCleanupObjects.size() ==
+ ExprEvalContexts.back().NumCleanupObjects &&
+ "Leftover temporaries in function");
+ assert(!Cleanup.exprNeedsCleanups() &&
+ "Unaccounted cleanups in function");
+ assert(MaybeODRUseExprs.empty() &&
+ "Leftover expressions for odr-use checking");
+ }
+ } // Pops the ExitFunctionBodyRAII scope, which needs to happen before we pop
+ // the declaration context below. Otherwise, we're unable to transform
+ // 'this' expressions when transforming immediate context functions.
if (!IsInstantiation)
PopDeclContext();
@@ -14825,13 +16415,18 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
DiscardCleanupsInEvaluationContext();
}
- if (FD && (LangOpts.OpenMP || LangOpts.CUDA || LangOpts.SYCLIsDevice)) {
+ if (FD && ((LangOpts.OpenMP && (LangOpts.OpenMPIsTargetDevice ||
+ !LangOpts.OMPTargetTriples.empty())) ||
+ LangOpts.CUDA || LangOpts.SYCLIsDevice)) {
auto ES = getEmissionStatus(FD);
if (ES == Sema::FunctionEmissionStatus::Emitted ||
ES == Sema::FunctionEmissionStatus::Unknown)
DeclsToCheckForDeferredDiags.insert(FD);
}
+ if (FD && !FD->isDeleted())
+ checkTypeSupport(FD->getType(), FD->getLocation(), FD);
+
return dcl;
}
@@ -14853,6 +16448,10 @@ void Sema::ActOnFinishDelayedAttribute(Scope *S, Decl *D,
/// call, forming a call to an implicitly defined function (per C99 6.5.1p2).
NamedDecl *Sema::ImplicitlyDefineFunction(SourceLocation Loc,
IdentifierInfo &II, Scope *S) {
+ // It is not valid to implicitly define a function in C23.
+ assert(LangOpts.implicitFunctionsAllowed() &&
+ "Implicit function declarations aren't allowed in this language mode");
+
// Find the scope in which the identifier is injected and the corresponding
// DeclContext.
// FIXME: C89 does not say what happens if there is no enclosing block scope.
@@ -14862,8 +16461,14 @@ NamedDecl *Sema::ImplicitlyDefineFunction(SourceLocation Loc,
while (!BlockScope->isCompoundStmtScope() && BlockScope->getParent())
BlockScope = BlockScope->getParent();
+ // Loop until we find a DeclContext that is either a function/method or the
+ // translation unit, which are the only two valid places to implicitly define
+ // a function. This avoids accidentally defining the function within a tag
+ // declaration, for example.
Scope *ContextScope = BlockScope;
- while (!ContextScope->getEntity())
+ while (!ContextScope->getEntity() ||
+ (!ContextScope->getEntity()->isFunctionOrMethod() &&
+ !ContextScope->getEntity()->isTranslationUnit()))
ContextScope = ContextScope->getParent();
ContextRAII SavedContext(*this, ContextScope->getEntity());
@@ -14891,18 +16496,40 @@ NamedDecl *Sema::ImplicitlyDefineFunction(SourceLocation Loc,
}
}
- // Extension in C99. Legal in C90, but warn about it.
+ // Extension in C99 (defaults to error). Legal in C89, but warn about it.
unsigned diag_id;
- if (II.getName().startswith("__builtin_"))
+ if (II.getName().starts_with("__builtin_"))
diag_id = diag::warn_builtin_unknown;
// OpenCL v2.0 s6.9.u - Implicit function declaration is not supported.
- else if (getLangOpts().OpenCL)
- diag_id = diag::err_opencl_implicit_function_decl;
else if (getLangOpts().C99)
- diag_id = diag::ext_implicit_function_decl;
+ diag_id = diag::ext_implicit_function_decl_c99;
else
diag_id = diag::warn_implicit_function_decl;
+
+ TypoCorrection Corrected;
+ // Because typo correction is expensive, only do it if the implicit
+ // function declaration is going to be treated as an error.
+ //
+ // Perform the correction before issuing the main diagnostic, as some
+ // consumers use typo-correction callbacks to enhance the main diagnostic.
+ if (S && !ExternCPrev &&
+ (Diags.getDiagnosticLevel(diag_id, Loc) >= DiagnosticsEngine::Error)) {
+ DeclFilterCCC<FunctionDecl> CCC{};
+ Corrected = CorrectTypo(DeclarationNameInfo(&II, Loc), LookupOrdinaryName,
+ S, nullptr, CCC, CTK_NonError);
+ }
+
Diag(Loc, diag_id) << &II;
+ if (Corrected) {
+ // If the correction is going to suggest an implicitly defined function,
+ // skip the correction as not being a particularly good idea.
+ bool Diagnose = true;
+ if (const auto *D = Corrected.getCorrectionDecl())
+ Diagnose = !D->isImplicit();
+ if (Diagnose)
+ diagnoseTypo(Corrected, PDiag(diag::note_function_suggestion),
+ /*ErrorRecovery*/ false);
+ }
// If we found a prior declaration of this function, don't bother building
// another one. We've already pushed that one into scope, so there's nothing
@@ -14910,18 +16537,6 @@ NamedDecl *Sema::ImplicitlyDefineFunction(SourceLocation Loc,
if (ExternCPrev)
return ExternCPrev;
- // Because typo correction is expensive, only do it if the implicit
- // function declaration is going to be treated as an error.
- if (Diags.getDiagnosticLevel(diag_id, Loc) >= DiagnosticsEngine::Error) {
- TypoCorrection Corrected;
- DeclFilterCCC<FunctionDecl> CCC{};
- if (S && (Corrected =
- CorrectTypo(DeclarationNameInfo(&II, Loc), LookupOrdinaryName,
- S, nullptr, CCC, CTK_NonError)))
- diagnoseTypo(Corrected, PDiag(diag::note_function_suggestion),
- /*ErrorRecovery*/false);
- }
-
// Set a Declarator for the implicit definition: int foo();
const char *Dummy;
AttributeFactory attrFactory;
@@ -14932,7 +16547,7 @@ NamedDecl *Sema::ImplicitlyDefineFunction(SourceLocation Loc,
(void)Error; // Silence warning.
assert(!Error && "Error setting up implicit decl!");
SourceLocation NoLoc;
- Declarator D(DS, DeclaratorContext::Block);
+ Declarator D(DS, ParsedAttributesView::none(), DeclaratorContext::Block);
D.AddTypeInfo(DeclaratorChunk::getFunction(/*HasProto=*/false,
/*IsAmbiguous=*/false,
/*LParenLoc=*/NoLoc,
@@ -14949,8 +16564,8 @@ NamedDecl *Sema::ImplicitlyDefineFunction(SourceLocation Loc,
/*NumExceptions=*/0,
/*NoexceptExpr=*/nullptr,
/*ExceptionSpecTokens=*/nullptr,
- /*DeclsInPrototype=*/None, Loc,
- Loc, D),
+ /*DeclsInPrototype=*/std::nullopt,
+ Loc, Loc, D),
std::move(DS.getAttributes()), SourceLocation());
D.SetIdentifier(&II, Loc);
@@ -14978,7 +16593,7 @@ void Sema::AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction(
FD->getDeclName().getCXXOverloadedOperator() != OO_Array_New)
return;
- Optional<unsigned> AlignmentParam;
+ std::optional<unsigned> AlignmentParam;
bool IsNothrow = false;
if (!FD->isReplaceableGlobalAllocationFunction(&AlignmentParam, &IsNothrow))
return;
@@ -14988,7 +16603,11 @@ void Sema::AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction(
// indicates failure by returning a null pointer value. Any other allocation
// function never returns a null pointer value and indicates failure only by
// throwing an exception [...]
- if (!IsNothrow && !FD->hasAttr<ReturnsNonNullAttr>())
+ //
+ // However, -fcheck-new invalidates this possible assumption, so don't add
+ // NonNull when that is enabled.
+ if (!IsNothrow && !FD->hasAttr<ReturnsNonNullAttr>() &&
+ !getLangOpts().CheckNew)
FD->addAttr(ReturnsNonNullAttr::CreateImplicit(Context, FD->getLocation()));
// C++2a [basic.stc.dynamic.allocation]p2:
@@ -15017,9 +16636,9 @@ void Sema::AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction(
// (3.1) If the allocation function takes an argument of type
// std​::​align_­val_­t, the storage will have the alignment
// specified by the value of this argument.
- if (AlignmentParam.hasValue() && !FD->hasAttr<AllocAlignAttr>()) {
+ if (AlignmentParam && !FD->hasAttr<AllocAlignAttr>()) {
FD->addAttr(AllocAlignAttr::CreateImplicit(
- Context, ParamIdx(AlignmentParam.getValue(), FD), FD->getLocation()));
+ Context, ParamIdx(*AlignmentParam, FD), FD->getLocation()));
}
// FIXME:
@@ -15084,18 +16703,27 @@ void Sema::AddKnownFunctionAttributes(FunctionDecl *FD) {
FD->addAttr(CallbackAttr::CreateImplicit(
Context, Encoding.data(), Encoding.size(), FD->getLocation()));
- // Mark const if we don't care about errno and that is the only thing
- // preventing the function from being const. This allows IRgen to use LLVM
- // intrinsics for such functions.
- if (!getLangOpts().MathErrno && !FD->hasAttr<ConstAttr>() &&
- Context.BuiltinInfo.isConstWithoutErrno(BuiltinID))
+ // Mark const if we don't care about errno and/or floating point exceptions
+ // that are the only thing preventing the function from being const. This
+ // allows IRgen to use LLVM intrinsics for such functions.
+ bool NoExceptions =
+ getLangOpts().getDefaultExceptionMode() == LangOptions::FPE_Ignore;
+ bool ConstWithoutErrnoAndExceptions =
+ Context.BuiltinInfo.isConstWithoutErrnoAndExceptions(BuiltinID);
+ bool ConstWithoutExceptions =
+ Context.BuiltinInfo.isConstWithoutExceptions(BuiltinID);
+ if (!FD->hasAttr<ConstAttr>() &&
+ (ConstWithoutErrnoAndExceptions || ConstWithoutExceptions) &&
+ (!ConstWithoutErrnoAndExceptions ||
+ (!getLangOpts().MathErrno && NoExceptions)) &&
+ (!ConstWithoutExceptions || NoExceptions))
FD->addAttr(ConstAttr::CreateImplicit(Context, FD->getLocation()));
- // We make "fma" on some platforms const because we know it does not set
+ // We make "fma" on GNU or Windows const because we know it does not set
// errno in those environments even though it could set errno based on the
// C standard.
const llvm::Triple &Trip = Context.getTargetInfo().getTriple();
- if ((Trip.isGNUEnvironment() || Trip.isAndroid() || Trip.isOSMSVCRT()) &&
+ if ((Trip.isGNUEnvironment() || Trip.isOSMSVCRT()) &&
!FD->hasAttr<ConstAttr>()) {
switch (BuiltinID) {
case Builtin::BI__builtin_fma:
@@ -15132,6 +16760,57 @@ void Sema::AddKnownFunctionAttributes(FunctionDecl *FD) {
else
FD->addAttr(CUDAHostAttr::CreateImplicit(Context, FD->getLocation()));
}
+
+ // Add known guaranteed alignment for allocation functions.
+ switch (BuiltinID) {
+ case Builtin::BImemalign:
+ case Builtin::BIaligned_alloc:
+ if (!FD->hasAttr<AllocAlignAttr>())
+ FD->addAttr(AllocAlignAttr::CreateImplicit(Context, ParamIdx(1, FD),
+ FD->getLocation()));
+ break;
+ default:
+ break;
+ }
+
+ // Add allocsize attribute for allocation functions.
+ switch (BuiltinID) {
+ case Builtin::BIcalloc:
+ FD->addAttr(AllocSizeAttr::CreateImplicit(
+ Context, ParamIdx(1, FD), ParamIdx(2, FD), FD->getLocation()));
+ break;
+ case Builtin::BImemalign:
+ case Builtin::BIaligned_alloc:
+ case Builtin::BIrealloc:
+ FD->addAttr(AllocSizeAttr::CreateImplicit(Context, ParamIdx(2, FD),
+ ParamIdx(), FD->getLocation()));
+ break;
+ case Builtin::BImalloc:
+ FD->addAttr(AllocSizeAttr::CreateImplicit(Context, ParamIdx(1, FD),
+ ParamIdx(), FD->getLocation()));
+ break;
+ default:
+ break;
+ }
+
+ // Add lifetime attribute to std::move, std::fowrard et al.
+ switch (BuiltinID) {
+ case Builtin::BIaddressof:
+ case Builtin::BI__addressof:
+ case Builtin::BI__builtin_addressof:
+ case Builtin::BIas_const:
+ case Builtin::BIforward:
+ case Builtin::BIforward_like:
+ case Builtin::BImove:
+ case Builtin::BImove_if_noexcept:
+ if (ParmVarDecl *P = FD->getParamDecl(0u);
+ !P->hasAttr<LifetimeBoundAttr>())
+ P->addAttr(
+ LifetimeBoundAttr::CreateImplicit(Context, FD->getLocation()));
+ break;
+ default:
+ break;
+ }
}
AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction(FD);
@@ -15149,11 +16828,10 @@ void Sema::AddKnownFunctionAttributes(FunctionDecl *FD) {
IdentifierInfo *Name = FD->getIdentifier();
if (!Name)
return;
- if ((!getLangOpts().CPlusPlus &&
- FD->getDeclContext()->isTranslationUnit()) ||
+ if ((!getLangOpts().CPlusPlus && FD->getDeclContext()->isTranslationUnit()) ||
(isa<LinkageSpecDecl>(FD->getDeclContext()) &&
cast<LinkageSpecDecl>(FD->getDeclContext())->getLanguage() ==
- LinkageSpecDecl::lang_c)) {
+ LinkageSpecLanguageIDs::C)) {
// Okay: this could be a libc/libm/Objective-C function we know
// about.
} else
@@ -15248,10 +16926,8 @@ bool Sema::CheckEnumUnderlyingType(TypeSourceInfo *TI) {
if (BT->isInteger())
return false;
- if (T->isExtIntType())
- return false;
-
- return Diag(UnderlyingLoc, diag::err_enum_invalid_underlying) << T;
+ return Diag(UnderlyingLoc, diag::err_enum_invalid_underlying)
+ << T << T->isBitIntType();
}
/// Check whether this is a valid redeclaration of a previous enumeration.
@@ -15295,9 +16971,12 @@ bool Sema::CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
/// \returns diagnostic %select index.
static unsigned getRedeclDiagFromTagKind(TagTypeKind Tag) {
switch (Tag) {
- case TTK_Struct: return 0;
- case TTK_Interface: return 1;
- case TTK_Class: return 2;
+ case TagTypeKind::Struct:
+ return 0;
+ case TagTypeKind::Interface:
+ return 1;
+ case TagTypeKind::Class:
+ return 2;
default: llvm_unreachable("Invalid tag kind for redecl diagnostic!");
}
}
@@ -15308,7 +16987,8 @@ static unsigned getRedeclDiagFromTagKind(TagTypeKind Tag) {
/// \returns true iff the tag kind is compatible.
static bool isClassCompatTagKind(TagTypeKind Tag)
{
- return Tag == TTK_Struct || Tag == TTK_Class || Tag == TTK_Interface;
+ return Tag == TagTypeKind::Struct || Tag == TagTypeKind::Class ||
+ Tag == TagTypeKind::Interface;
}
Sema::NonTagKind Sema::getNonTagTypeDeclKind(const Decl *PrevDecl,
@@ -15324,13 +17004,13 @@ Sema::NonTagKind Sema::getNonTagTypeDeclKind(const Decl *PrevDecl,
else if (isa<TemplateTemplateParmDecl>(PrevDecl))
return NTK_TemplateTemplateArgument;
switch (TTK) {
- case TTK_Struct:
- case TTK_Interface:
- case TTK_Class:
+ case TagTypeKind::Struct:
+ case TagTypeKind::Interface:
+ case TagTypeKind::Class:
return getLangOpts().CPlusPlus ? NTK_NonClass : NTK_NonStruct;
- case TTK_Union:
+ case TagTypeKind::Union:
return NTK_NonUnion;
- case TTK_Enum:
+ case TagTypeKind::Enum:
return NTK_NonEnum;
}
llvm_unreachable("invalid TTK");
@@ -15533,17 +17213,16 @@ static bool isAcceptableTagRedeclContext(Sema &S, DeclContext *OldDC,
///
/// \param SkipBody If non-null, will be set to indicate if the caller should
/// skip the definition of this tag and treat it as if it were a declaration.
-Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
- SourceLocation KWLoc, CXXScopeSpec &SS,
- IdentifierInfo *Name, SourceLocation NameLoc,
- const ParsedAttributesView &Attrs, AccessSpecifier AS,
- SourceLocation ModulePrivateLoc,
- MultiTemplateParamsArg TemplateParameterLists,
- bool &OwnedDecl, bool &IsDependent,
- SourceLocation ScopedEnumKWLoc,
- bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
- bool IsTypeSpecifier, bool IsTemplateParamOrArg,
- SkipBodyInfo *SkipBody) {
+DeclResult
+Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
+ CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc,
+ const ParsedAttributesView &Attrs, AccessSpecifier AS,
+ SourceLocation ModulePrivateLoc,
+ MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl,
+ bool &IsDependent, SourceLocation ScopedEnumKWLoc,
+ bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
+ bool IsTypeSpecifier, bool IsTemplateParamOrArg,
+ OffsetOfKind OOK, SkipBodyInfo *SkipBody) {
// If this is not a definition, it must have a name.
IdentifierInfo *OrigName = Name;
assert((Name != nullptr || TUK == TUK_Definition) &&
@@ -15567,9 +17246,9 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
MatchTemplateParametersToScopeSpecifier(
KWLoc, NameLoc, SS, nullptr, TemplateParameterLists,
TUK == TUK_Friend, isMemberSpecialization, Invalid)) {
- if (Kind == TTK_Enum) {
+ if (Kind == TagTypeKind::Enum) {
Diag(KWLoc, diag::err_enum_template);
- return nullptr;
+ return true;
}
if (TemplateParams->size() > 0) {
@@ -15577,7 +17256,7 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
// be a member of another template).
if (Invalid)
- return nullptr;
+ return true;
OwnedDecl = false;
DeclResult Result = CheckClassTemplate(
@@ -15596,7 +17275,7 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
if (!TemplateParameterLists.empty() && isMemberSpecialization &&
CheckTemplateDeclScope(S, TemplateParameterLists.back()))
- return nullptr;
+ return true;
}
// Figure out the underlying type if this a enum declaration. We need to do
@@ -15605,7 +17284,7 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
llvm::PointerUnion<const Type*, TypeSourceInfo*> EnumUnderlying;
bool IsFixed = !UnderlyingType.isUnset() || ScopedEnum;
- if (Kind == TTK_Enum) {
+ if (Kind == TagTypeKind::Enum) {
if (UnderlyingType.isInvalid() || (!UnderlyingType.get() && ScopedEnum)) {
// No underlying type explicitly specified, or we failed to parse the
// type, default to int.
@@ -15655,7 +17334,7 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation Loc = NameLoc.isValid() ? NameLoc : KWLoc;
TagDecl *New = nullptr;
- if (Kind == TTK_Enum) {
+ if (Kind == TagTypeKind::Enum) {
New = EnumDecl::Create(Context, SearchDC, KWLoc, Loc, Name, nullptr,
ScopedEnum, ScopedEnumUsesClassTag, IsFixed);
// If this is an undefined enum, bail.
@@ -15667,7 +17346,10 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
ED->setIntegerTypeSourceInfo(TI);
else
ED->setIntegerType(QualType(EnumUnderlying.get<const Type *>(), 0));
- ED->setPromotionType(ED->getIntegerType());
+ QualType EnumTy = ED->getIntegerType();
+ ED->setPromotionType(Context.isPromotableIntegerType(EnumTy)
+ ? Context.getPromotedIntegerType(EnumTy)
+ : EnumTy);
}
} else { // struct/union
New = RecordDecl::Create(Context, Kind, SearchDC, KWLoc, Loc, Name,
@@ -15709,26 +17391,26 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
DC = computeDeclContext(SS, false);
if (!DC) {
IsDependent = true;
- return nullptr;
+ return true;
}
} else {
DC = computeDeclContext(SS, true);
if (!DC) {
Diag(SS.getRange().getBegin(), diag::err_dependent_nested_name_spec)
<< SS.getRange();
- return nullptr;
+ return true;
}
}
if (RequireCompleteDeclContext(SS, DC))
- return nullptr;
+ return true;
SearchDC = DC;
// Look-up name inside 'foo::'.
LookupQualifiedName(Previous, DC);
if (Previous.isAmbiguous())
- return nullptr;
+ return true;
if (Previous.empty()) {
// Name lookup did not find anything. However, if the
@@ -15740,12 +17422,12 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
if (Previous.wasNotFoundInCurrentInstantiation() &&
(TUK == TUK_Reference || TUK == TUK_Friend)) {
IsDependent = true;
- return nullptr;
+ return true;
}
// A tag 'foo::bar' must already exist.
Diag(NameLoc, diag::err_not_tag_in_scope)
- << Kind << Name << DC << SS.getRange();
+ << llvm::to_underlying(Kind) << Name << DC << SS.getRange();
Name = nullptr;
Invalid = true;
goto CreateNewDecl;
@@ -15757,7 +17439,7 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
// -- every member of class T that is itself a type
if (TUK != TUK_Reference && TUK != TUK_Friend &&
DiagnoseClassNameShadow(SearchDC, DeclarationNameInfo(Name, NameLoc)))
- return nullptr;
+ return true;
// If this is a named struct, check to see if there was a previous forward
// declaration or definition.
@@ -15821,16 +17503,27 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
// Note: there used to be some attempt at recovery here.
if (Previous.isAmbiguous())
- return nullptr;
+ return true;
if (!getLangOpts().CPlusPlus && TUK != TUK_Reference) {
// FIXME: This makes sure that we ignore the contexts associated
// with C structs, unions, and enums when looking for a matching
// tag declaration or definition. See the similar lookup tweak
// in Sema::LookupName; is there a better way to deal with this?
- while (isa<RecordDecl>(SearchDC) || isa<EnumDecl>(SearchDC))
+ while (isa<RecordDecl, EnumDecl, ObjCContainerDecl>(SearchDC))
+ SearchDC = SearchDC->getParent();
+ } else if (getLangOpts().CPlusPlus) {
+ // Inside ObjCContainer want to keep it as a lexical decl context but go
+ // past it (most often to TranslationUnit) to find the semantic decl
+ // context.
+ while (isa<ObjCContainerDecl>(SearchDC))
SearchDC = SearchDC->getParent();
}
+ } else if (getLangOpts().CPlusPlus) {
+ // Don't use ObjCContainerDecl as the semantic decl context for anonymous
+ // TagDecl the same way as we skip it for named TagDecl.
+ while (isa<ObjCContainerDecl>(SearchDC))
+ SearchDC = SearchDC->getParent();
}
if (Previous.isSingleResult() &&
@@ -15903,11 +17596,14 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
S = getTagInjectionScope(S, getLangOpts());
} else {
assert(TUK == TUK_Friend);
+ CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(SearchDC);
+
// C++ [namespace.memdef]p3:
// If a friend declaration in a non-local class first declares a
// class or function, the friend class or function is a member of
// the innermost enclosing namespace.
- SearchDC = SearchDC->getEnclosingNamespaceContext();
+ SearchDC = RD->isLocalClass() ? RD->isLocalClass()
+ : SearchDC->getEnclosingNamespaceContext();
}
// In C++, we need to do a redeclaration lookup to properly
@@ -15937,8 +17633,7 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
// It's okay to have a tag decl in the same scope as a typedef
// which hides a tag decl in the same scope. Finding this
- // insanity with a redeclaration lookup can only actually happen
- // in C++.
+ // with a redeclaration lookup can only actually happen in C++.
//
// This is also okay for elaborated-type-specifiers, which is
// technically forbidden by the current standard but which is
@@ -15992,9 +17687,9 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
if (!isAcceptableTagRedeclaration(PrevTagDecl, Kind,
TUK == TUK_Definition, KWLoc,
Name)) {
- bool SafeToContinue
- = (PrevTagDecl->getTagKind() != TTK_Enum &&
- Kind != TTK_Enum);
+ bool SafeToContinue =
+ (PrevTagDecl->getTagKind() != TagTypeKind::Enum &&
+ Kind != TagTypeKind::Enum);
if (SafeToContinue)
Diag(KWLoc, diag::err_use_with_wrong_tag)
<< Name
@@ -16014,7 +17709,8 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
}
}
- if (Kind == TTK_Enum && PrevTagDecl->getTagKind() == TTK_Enum) {
+ if (Kind == TagTypeKind::Enum &&
+ PrevTagDecl->getTagKind() == TagTypeKind::Enum) {
const EnumDecl *PrevEnum = cast<EnumDecl>(PrevTagDecl);
if (TUK == TUK_Reference || TUK == TUK_Friend)
return PrevTagDecl;
@@ -16184,8 +17880,8 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
if ((TUK == TUK_Reference || TUK == TUK_Friend) &&
!Previous.isForRedeclaration()) {
NonTagKind NTK = getNonTagTypeDeclKind(PrevDecl, Kind);
- Diag(NameLoc, diag::err_tag_reference_non_tag) << PrevDecl << NTK
- << Kind;
+ Diag(NameLoc, diag::err_tag_reference_non_tag)
+ << PrevDecl << NTK << llvm::to_underlying(Kind);
Diag(PrevDecl->getLocation(), diag::note_declared_at);
Invalid = true;
@@ -16243,7 +17939,7 @@ CreateNewDecl:
// PrevDecl.
TagDecl *New;
- if (Kind == TTK_Enum) {
+ if (Kind == TagTypeKind::Enum) {
// FIXME: Tag decls should be chained to any simultaneous vardecls, e.g.:
// enum X { A, B, C } D; D should chain to X.
New = EnumDecl::Create(Context, SearchDC, KWLoc, Loc, Name,
@@ -16279,8 +17975,11 @@ CreateNewDecl:
if (TypeSourceInfo *TI = EnumUnderlying.dyn_cast<TypeSourceInfo*>())
ED->setIntegerTypeSourceInfo(TI);
else
- ED->setIntegerType(QualType(EnumUnderlying.get<const Type*>(), 0));
- ED->setPromotionType(ED->getIntegerType());
+ ED->setIntegerType(QualType(EnumUnderlying.get<const Type *>(), 0));
+ QualType EnumTy = ED->getIntegerType();
+ ED->setPromotionType(Context.isPromotableIntegerType(EnumTy)
+ ? Context.getPromotedIntegerType(EnumTy)
+ : EnumTy);
assert(ED->isComplete() && "enum with type should be complete");
}
} else {
@@ -16300,10 +17999,14 @@ CreateNewDecl:
cast_or_null<RecordDecl>(PrevDecl));
}
+ if (OOK != OOK_Outside && TUK == TUK_Definition && !getLangOpts().CPlusPlus)
+ Diag(New->getLocation(), diag::ext_type_defined_in_offsetof)
+ << (OOK == OOK_Macro) << New->getSourceRange();
+
// C++11 [dcl.type]p3:
// A type-specifier-seq shall not define a class or enumeration [...].
- if (getLangOpts().CPlusPlus && (IsTypeSpecifier || IsTemplateParamOrArg) &&
- TUK == TUK_Definition) {
+ if (!Invalid && getLangOpts().CPlusPlus &&
+ (IsTypeSpecifier || IsTemplateParamOrArg) && TUK == TUK_Definition) {
Diag(New->getLocation(), diag::err_type_defined_in_type_specifier)
<< Context.getTagDeclType(New);
Invalid = true;
@@ -16371,7 +18074,7 @@ CreateNewDecl:
// If we're declaring or defining a tag in function prototype scope in C,
// note that this type can only be used within the function and add it to
// the list of decls to inject into the function definition scope.
- if ((Name || Kind == TTK_Enum) &&
+ if ((Name || Kind == TagTypeKind::Enum) &&
getNonFieldDeclScope(S)->isFunctionPrototypeScope()) {
if (getLangOpts().CPlusPlus) {
// C++ [dcl.fct]p6:
@@ -16405,7 +18108,7 @@ CreateNewDecl:
SetMemberAccessSpecifier(New, PrevDecl, AS);
if (PrevDecl)
- CheckRedeclarationModuleOwnership(New, PrevDecl);
+ CheckRedeclarationInModule(New, PrevDecl);
if (TUK == TUK_Definition && (!SkipBody || !SkipBody->ShouldSkip))
New->startDefinition();
@@ -16459,7 +18162,7 @@ CreateNewDecl:
if (New->isBeingDefined())
if (auto RD = dyn_cast<RecordDecl>(New))
RD->completeDefinition();
- return nullptr;
+ return true;
} else if (SkipBody && SkipBody->ShouldSkip) {
return SkipBody->Previous;
} else {
@@ -16481,8 +18184,7 @@ void Sema::ActOnTagStartDefinition(Scope *S, Decl *TagD) {
AddPushedVisibilityAttribute(Tag);
}
-bool Sema::ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
- SkipBodyInfo &SkipBody) {
+bool Sema::ActOnDuplicateDefinition(Decl *Prev, SkipBodyInfo &SkipBody) {
if (!hasStructuralCompatLayout(Prev, SkipBody.New))
return false;
@@ -16491,14 +18193,10 @@ bool Sema::ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
return true;
}
-Decl *Sema::ActOnObjCContainerStartDefinition(Decl *IDecl) {
- assert(isa<ObjCContainerDecl>(IDecl) &&
- "ActOnObjCContainerStartDefinition - Not ObjCContainerDecl");
- DeclContext *OCD = cast<DeclContext>(IDecl);
- assert(OCD->getLexicalParent() == CurContext &&
+void Sema::ActOnObjCContainerStartDefinition(ObjCContainerDecl *IDecl) {
+ assert(IDecl->getLexicalParent() == CurContext &&
"The next DeclContext should be lexically contained in the current one.");
- CurContext = OCD;
- return IDecl;
+ CurContext = IDecl;
}
void Sema::ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagD,
@@ -16518,9 +18216,10 @@ void Sema::ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagD,
Record->markAbstract();
if (FinalLoc.isValid()) {
- Record->addAttr(FinalAttr::Create(
- Context, FinalLoc, AttributeCommonInfo::AS_Keyword,
- static_cast<FinalAttr::Spelling>(IsFinalSpelledSealed)));
+ Record->addAttr(FinalAttr::Create(Context, FinalLoc,
+ IsFinalSpelledSealed
+ ? FinalAttr::Keyword_sealed
+ : FinalAttr::Keyword_final));
}
// C++ [class]p2:
// [...] The class-name is also inserted into the scope of the
@@ -16555,8 +18254,21 @@ void Sema::ActOnTagFinishDefinition(Scope *S, Decl *TagD,
RD->completeDefinition();
}
- if (isa<CXXRecordDecl>(Tag)) {
+ if (auto *RD = dyn_cast<CXXRecordDecl>(Tag)) {
FieldCollector->FinishClass();
+ if (RD->hasAttr<SYCLSpecialClassAttr>()) {
+ auto *Def = RD->getDefinition();
+ assert(Def && "The record is expected to have a completed definition");
+ unsigned NumInitMethods = 0;
+ for (auto *Method : Def->methods()) {
+ if (!Method->getIdentifier())
+ continue;
+ if (Method->getName() == "__init")
+ NumInitMethods++;
+ }
+ if (NumInitMethods > 1 || !Def->hasInitMethod())
+ Diag(RD->getLocation(), diag::err_sycl_special_type_num_init_method);
+ }
}
// Exit this scope of this tag's definition.
@@ -16569,6 +18281,23 @@ void Sema::ActOnTagFinishDefinition(Scope *S, Decl *TagD,
// Notify the consumer that we've defined a tag.
if (!Tag->isInvalidDecl())
Consumer.HandleTagDeclDefinition(Tag);
+
+ // Clangs implementation of #pragma align(packed) differs in bitfield layout
+ // from XLs and instead matches the XL #pragma pack(1) behavior.
+ if (Context.getTargetInfo().getTriple().isOSAIX() &&
+ AlignPackStack.hasValue()) {
+ AlignPackInfo APInfo = AlignPackStack.CurrentValue;
+ // Only diagnose #pragma align(packed).
+ if (!APInfo.IsAlignAttr() || APInfo.getAlignMode() != AlignPackInfo::Packed)
+ return;
+ const RecordDecl *RD = dyn_cast<RecordDecl>(Tag);
+ if (!RD)
+ return;
+ // Only warn if there is at least 1 bitfield member.
+ if (llvm::any_of(RD->fields(),
+ [](const FieldDecl *FD) { return FD->isBitField(); }))
+ Diag(BraceRange.getBegin(), diag::warn_pragma_align_not_xl_compatible);
+ }
}
void Sema::ActOnObjCContainerFinishDefinition() {
@@ -16576,14 +18305,14 @@ void Sema::ActOnObjCContainerFinishDefinition() {
PopDeclContext();
}
-void Sema::ActOnObjCTemporaryExitContainerContext(DeclContext *DC) {
- assert(DC == CurContext && "Mismatch of container contexts");
- OriginalLexicalContext = DC;
+void Sema::ActOnObjCTemporaryExitContainerContext(ObjCContainerDecl *ObjCCtx) {
+ assert(ObjCCtx == CurContext && "Mismatch of container contexts");
+ OriginalLexicalContext = ObjCCtx;
ActOnObjCContainerFinishDefinition();
}
-void Sema::ActOnObjCReenterContainerContext(DeclContext *DC) {
- ActOnObjCContainerStartDefinition(cast<Decl>(DC));
+void Sema::ActOnObjCReenterContainerContext(ObjCContainerDecl *ObjCCtx) {
+ ActOnObjCContainerStartDefinition(ObjCCtx);
OriginalLexicalContext = nullptr;
}
@@ -16607,17 +18336,12 @@ void Sema::ActOnTagDefinitionError(Scope *S, Decl *TagD) {
// Note that FieldName may be null for anonymous bitfields.
ExprResult Sema::VerifyBitField(SourceLocation FieldLoc,
- IdentifierInfo *FieldName,
- QualType FieldTy, bool IsMsStruct,
- Expr *BitWidth, bool *ZeroWidth) {
+ IdentifierInfo *FieldName, QualType FieldTy,
+ bool IsMsStruct, Expr *BitWidth) {
assert(BitWidth);
if (BitWidth->containsErrors())
return ExprError();
- // Default to true; that shouldn't confuse checks for emptiness
- if (ZeroWidth)
- *ZeroWidth = true;
-
// C99 6.7.2.1p4 - verify the field type.
// C++ 9.6p3: A bit-field shall have integral or enumeration type.
if (!FieldTy->isDependentType() && !FieldTy->isIntegralOrEnumerationType()) {
@@ -16645,12 +18369,10 @@ ExprResult Sema::VerifyBitField(SourceLocation FieldLoc,
return ICE;
BitWidth = ICE.get();
- if (Value != 0 && ZeroWidth)
- *ZeroWidth = false;
-
// Zero-width bitfield is ok for anonymous field.
if (Value == 0 && FieldName)
- return Diag(FieldLoc, diag::err_bitfield_has_zero_width) << FieldName;
+ return Diag(FieldLoc, diag::err_bitfield_has_zero_width)
+ << FieldName << BitWidth->getSourceRange();
if (Value.isSigned() && Value.isNegative()) {
if (FieldName)
@@ -16704,8 +18426,8 @@ ExprResult Sema::VerifyBitField(SourceLocation FieldLoc,
/// to create a FieldDecl object for it.
Decl *Sema::ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth) {
- FieldDecl *Res = HandleField(S, cast_or_null<RecordDecl>(TagD),
- DeclStart, D, static_cast<Expr*>(BitfieldWidth),
+ FieldDecl *Res = HandleField(S, cast_if_present<RecordDecl>(TagD), DeclStart,
+ D, BitfieldWidth,
/*InitStyle=*/ICIS_NoInit, AS_public);
return Res;
}
@@ -16728,7 +18450,7 @@ FieldDecl *Sema::HandleField(Scope *S, RecordDecl *Record,
SourceLocation Loc = DeclStart;
if (II) Loc = D.getIdentifierLoc();
- TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D);
QualType T = TInfo->getType();
if (getLangOpts().CPlusPlus) {
CheckExtraCXXDefaultArguments(D);
@@ -16900,17 +18622,15 @@ FieldDecl *Sema::CheckFieldDecl(DeclarationName Name, QualType T,
AbstractFieldType))
InvalidDecl = true;
- bool ZeroWidth = false;
if (InvalidDecl)
BitWidth = nullptr;
// If this is declared as a bit-field, check the bit-field.
if (BitWidth) {
- BitWidth = VerifyBitField(Loc, II, T, Record->isMsStruct(Context), BitWidth,
- &ZeroWidth).get();
+ BitWidth =
+ VerifyBitField(Loc, II, T, Record->isMsStruct(Context), BitWidth).get();
if (!BitWidth) {
InvalidDecl = true;
BitWidth = nullptr;
- ZeroWidth = false;
}
}
@@ -16946,7 +18666,8 @@ FieldDecl *Sema::CheckFieldDecl(DeclarationName Name, QualType T,
if (InvalidDecl)
NewFD->setInvalidDecl();
- if (PrevDecl && !isa<TagDecl>(PrevDecl)) {
+ if (PrevDecl && !isa<TagDecl>(PrevDecl) &&
+ !PrevDecl->isPlaceholderVar(getLangOpts())) {
Diag(Loc, diag::err_duplicate_member) << II;
Diag(PrevDecl->getLocation(), diag::note_previous_declaration);
NewFD->setInvalidDecl();
@@ -17083,20 +18804,17 @@ TranslateIvarVisibility(tok::ObjCKeywordKind ivarVisibility) {
/// ActOnIvar - Each ivar field of an objective-c class is passed into this
/// in order to create an IvarDecl object for it.
-Decl *Sema::ActOnIvar(Scope *S,
- SourceLocation DeclStart,
- Declarator &D, Expr *BitfieldWidth,
- tok::ObjCKeywordKind Visibility) {
+Decl *Sema::ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D,
+ Expr *BitWidth, tok::ObjCKeywordKind Visibility) {
IdentifierInfo *II = D.getIdentifier();
- Expr *BitWidth = (Expr*)BitfieldWidth;
SourceLocation Loc = DeclStart;
if (II) Loc = D.getIdentifierLoc();
// FIXME: Unnamed fields can be handled in various different ways, for
// example, unnamed unions inject all members into the struct namespace!
- TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D);
QualType T = TInfo->getType();
if (BitWidth) {
@@ -17152,9 +18870,11 @@ Decl *Sema::ActOnIvar(Scope *S,
}
// Construct the decl.
- ObjCIvarDecl *NewID = ObjCIvarDecl::Create(Context, EnclosingContext,
- DeclStart, Loc, II, T,
- TInfo, ac, (Expr *)BitfieldWidth);
+ ObjCIvarDecl *NewID = ObjCIvarDecl::Create(
+ Context, EnclosingContext, DeclStart, Loc, II, T, TInfo, ac, BitWidth);
+
+ if (T->containsErrors())
+ NewID->setInvalidDecl();
if (II) {
NamedDecl *PrevDecl = LookupSingleName(S, II, Loc, LookupMemberName,
@@ -17232,6 +18952,215 @@ void Sema::ActOnLastBitfield(SourceLocation DeclLoc,
AllIvarDecls.push_back(Ivar);
}
+/// [class.dtor]p4:
+/// At the end of the definition of a class, overload resolution is
+/// performed among the prospective destructors declared in that class with
+/// an empty argument list to select the destructor for the class, also
+/// known as the selected destructor.
+///
+/// We do the overload resolution here, then mark the selected constructor in the AST.
+/// Later CXXRecordDecl::getDestructor() will return the selected constructor.
+static void ComputeSelectedDestructor(Sema &S, CXXRecordDecl *Record) {
+ if (!Record->hasUserDeclaredDestructor()) {
+ return;
+ }
+
+ SourceLocation Loc = Record->getLocation();
+ OverloadCandidateSet OCS(Loc, OverloadCandidateSet::CSK_Normal);
+
+ for (auto *Decl : Record->decls()) {
+ if (auto *DD = dyn_cast<CXXDestructorDecl>(Decl)) {
+ if (DD->isInvalidDecl())
+ continue;
+ S.AddOverloadCandidate(DD, DeclAccessPair::make(DD, DD->getAccess()), {},
+ OCS);
+ assert(DD->isIneligibleOrNotSelected() && "Selecting a destructor but a destructor was already selected.");
+ }
+ }
+
+ if (OCS.empty()) {
+ return;
+ }
+ OverloadCandidateSet::iterator Best;
+ unsigned Msg = 0;
+ OverloadCandidateDisplayKind DisplayKind;
+
+ switch (OCS.BestViableFunction(S, Loc, Best)) {
+ case OR_Success:
+ case OR_Deleted:
+ Record->addedSelectedDestructor(dyn_cast<CXXDestructorDecl>(Best->Function));
+ break;
+
+ case OR_Ambiguous:
+ Msg = diag::err_ambiguous_destructor;
+ DisplayKind = OCD_AmbiguousCandidates;
+ break;
+
+ case OR_No_Viable_Function:
+ Msg = diag::err_no_viable_destructor;
+ DisplayKind = OCD_AllCandidates;
+ break;
+ }
+
+ if (Msg) {
+ // OpenCL have got their own thing going with destructors. It's slightly broken,
+ // but we allow it.
+ if (!S.LangOpts.OpenCL) {
+ PartialDiagnostic Diag = S.PDiag(Msg) << Record;
+ OCS.NoteCandidates(PartialDiagnosticAt(Loc, Diag), S, DisplayKind, {});
+ Record->setInvalidDecl();
+ }
+ // It's a bit hacky: At this point we've raised an error but we want the
+ // rest of the compiler to continue somehow working. However almost
+ // everything we'll try to do with the class will depend on there being a
+ // destructor. So let's pretend the first one is selected and hope for the
+ // best.
+ Record->addedSelectedDestructor(dyn_cast<CXXDestructorDecl>(OCS.begin()->Function));
+ }
+}
+
+/// [class.mem.special]p5
+/// Two special member functions are of the same kind if:
+/// - they are both default constructors,
+/// - they are both copy or move constructors with the same first parameter
+/// type, or
+/// - they are both copy or move assignment operators with the same first
+/// parameter type and the same cv-qualifiers and ref-qualifier, if any.
+static bool AreSpecialMemberFunctionsSameKind(ASTContext &Context,
+ CXXMethodDecl *M1,
+ CXXMethodDecl *M2,
+ Sema::CXXSpecialMember CSM) {
+ // We don't want to compare templates to non-templates: See
+ // https://github.com/llvm/llvm-project/issues/59206
+ if (CSM == Sema::CXXDefaultConstructor)
+ return bool(M1->getDescribedFunctionTemplate()) ==
+ bool(M2->getDescribedFunctionTemplate());
+ // FIXME: better resolve CWG
+ // https://cplusplus.github.io/CWG/issues/2787.html
+ if (!Context.hasSameType(M1->getNonObjectParameter(0)->getType(),
+ M2->getNonObjectParameter(0)->getType()))
+ return false;
+ if (!Context.hasSameType(M1->getFunctionObjectParameterReferenceType(),
+ M2->getFunctionObjectParameterReferenceType()))
+ return false;
+
+ return true;
+}
+
+/// [class.mem.special]p6:
+/// An eligible special member function is a special member function for which:
+/// - the function is not deleted,
+/// - the associated constraints, if any, are satisfied, and
+/// - no special member function of the same kind whose associated constraints
+/// [CWG2595], if any, are satisfied is more constrained.
+static void SetEligibleMethods(Sema &S, CXXRecordDecl *Record,
+ ArrayRef<CXXMethodDecl *> Methods,
+ Sema::CXXSpecialMember CSM) {
+ SmallVector<bool, 4> SatisfactionStatus;
+
+ for (CXXMethodDecl *Method : Methods) {
+ const Expr *Constraints = Method->getTrailingRequiresClause();
+ if (!Constraints)
+ SatisfactionStatus.push_back(true);
+ else {
+ ConstraintSatisfaction Satisfaction;
+ if (S.CheckFunctionConstraints(Method, Satisfaction))
+ SatisfactionStatus.push_back(false);
+ else
+ SatisfactionStatus.push_back(Satisfaction.IsSatisfied);
+ }
+ }
+
+ for (size_t i = 0; i < Methods.size(); i++) {
+ if (!SatisfactionStatus[i])
+ continue;
+ CXXMethodDecl *Method = Methods[i];
+ CXXMethodDecl *OrigMethod = Method;
+ if (FunctionDecl *MF = OrigMethod->getInstantiatedFromMemberFunction())
+ OrigMethod = cast<CXXMethodDecl>(MF);
+
+ const Expr *Constraints = OrigMethod->getTrailingRequiresClause();
+ bool AnotherMethodIsMoreConstrained = false;
+ for (size_t j = 0; j < Methods.size(); j++) {
+ if (i == j || !SatisfactionStatus[j])
+ continue;
+ CXXMethodDecl *OtherMethod = Methods[j];
+ if (FunctionDecl *MF = OtherMethod->getInstantiatedFromMemberFunction())
+ OtherMethod = cast<CXXMethodDecl>(MF);
+
+ if (!AreSpecialMemberFunctionsSameKind(S.Context, OrigMethod, OtherMethod,
+ CSM))
+ continue;
+
+ const Expr *OtherConstraints = OtherMethod->getTrailingRequiresClause();
+ if (!OtherConstraints)
+ continue;
+ if (!Constraints) {
+ AnotherMethodIsMoreConstrained = true;
+ break;
+ }
+ if (S.IsAtLeastAsConstrained(OtherMethod, {OtherConstraints}, OrigMethod,
+ {Constraints},
+ AnotherMethodIsMoreConstrained)) {
+ // There was an error with the constraints comparison. Exit the loop
+ // and don't consider this function eligible.
+ AnotherMethodIsMoreConstrained = true;
+ }
+ if (AnotherMethodIsMoreConstrained)
+ break;
+ }
+ // FIXME: Do not consider deleted methods as eligible after implementing
+ // DR1734 and DR1496.
+ if (!AnotherMethodIsMoreConstrained) {
+ Method->setIneligibleOrNotSelected(false);
+ Record->addedEligibleSpecialMemberFunction(Method, 1 << CSM);
+ }
+ }
+}
+
+static void ComputeSpecialMemberFunctionsEligiblity(Sema &S,
+ CXXRecordDecl *Record) {
+ SmallVector<CXXMethodDecl *, 4> DefaultConstructors;
+ SmallVector<CXXMethodDecl *, 4> CopyConstructors;
+ SmallVector<CXXMethodDecl *, 4> MoveConstructors;
+ SmallVector<CXXMethodDecl *, 4> CopyAssignmentOperators;
+ SmallVector<CXXMethodDecl *, 4> MoveAssignmentOperators;
+
+ for (auto *Decl : Record->decls()) {
+ auto *MD = dyn_cast<CXXMethodDecl>(Decl);
+ if (!MD) {
+ auto *FTD = dyn_cast<FunctionTemplateDecl>(Decl);
+ if (FTD)
+ MD = dyn_cast<CXXMethodDecl>(FTD->getTemplatedDecl());
+ }
+ if (!MD)
+ continue;
+ if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
+ if (CD->isInvalidDecl())
+ continue;
+ if (CD->isDefaultConstructor())
+ DefaultConstructors.push_back(MD);
+ else if (CD->isCopyConstructor())
+ CopyConstructors.push_back(MD);
+ else if (CD->isMoveConstructor())
+ MoveConstructors.push_back(MD);
+ } else if (MD->isCopyAssignmentOperator()) {
+ CopyAssignmentOperators.push_back(MD);
+ } else if (MD->isMoveAssignmentOperator()) {
+ MoveAssignmentOperators.push_back(MD);
+ }
+ }
+
+ SetEligibleMethods(S, Record, DefaultConstructors,
+ Sema::CXXDefaultConstructor);
+ SetEligibleMethods(S, Record, CopyConstructors, Sema::CXXCopyConstructor);
+ SetEligibleMethods(S, Record, MoveConstructors, Sema::CXXMoveConstructor);
+ SetEligibleMethods(S, Record, CopyAssignmentOperators,
+ Sema::CXXCopyAssignment);
+ SetEligibleMethods(S, Record, MoveAssignmentOperators,
+ Sema::CXXMoveAssignment);
+}
+
void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
ArrayRef<Decl *> Fields, SourceLocation LBrac,
SourceLocation RBrac,
@@ -17319,7 +19248,8 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
unsigned DiagID = 0;
if (!Record->isUnion() && !IsLastField) {
Diag(FD->getLocation(), diag::err_flexible_array_not_at_end)
- << FD->getDeclName() << FD->getType() << Record->getTagKind();
+ << FD->getDeclName() << FD->getType()
+ << llvm::to_underlying(Record->getTagKind());
Diag((*(i + 1))->getLocation(), diag::note_next_field_declaration);
FD->setInvalidDecl();
EnclosingDecl->setInvalidDecl();
@@ -17338,8 +19268,8 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
: diag::err_flexible_array_empty_aggregate;
if (DiagID)
- Diag(FD->getLocation(), DiagID) << FD->getDeclName()
- << Record->getTagKind();
+ Diag(FD->getLocation(), DiagID)
+ << FD->getDeclName() << llvm::to_underlying(Record->getTagKind());
// While the layout of types that contain virtual bases is not specified
// by the C++ standard, both the Itanium and Microsoft C++ ABIs place
// virtual bases after the derived members. This would make a flexible
@@ -17347,10 +19277,10 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
// of the type.
if (CXXRecord && CXXRecord->getNumVBases() != 0)
Diag(FD->getLocation(), diag::err_flexible_array_virtual_base)
- << FD->getDeclName() << Record->getTagKind();
+ << FD->getDeclName() << llvm::to_underlying(Record->getTagKind());
if (!getLangOpts().C99)
Diag(FD->getLocation(), diag::ext_c99_flexible_array_member)
- << FD->getDeclName() << Record->getTagKind();
+ << FD->getDeclName() << llvm::to_underlying(Record->getTagKind());
// If the element type has a non-trivial destructor, we would not
// implicitly destroy the elements, so disallow it for now.
@@ -17473,10 +19403,12 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
if (const auto *RT = FT->getAs<RecordType>()) {
if (RT->getDecl()->getArgPassingRestrictions() ==
- RecordDecl::APK_CanNeverPassInRegs)
- Record->setArgPassingRestrictions(RecordDecl::APK_CanNeverPassInRegs);
+ RecordArgPassingKind::CanNeverPassInRegs)
+ Record->setArgPassingRestrictions(
+ RecordArgPassingKind::CanNeverPassInRegs);
} else if (FT.getQualifiers().getObjCLifetime() == Qualifiers::OCL_Weak)
- Record->setArgPassingRestrictions(RecordDecl::APK_CanNeverPassInRegs);
+ Record->setArgPassingRestrictions(
+ RecordArgPassingKind::CanNeverPassInRegs);
}
if (Record && FD->getType().isVolatileQualified())
@@ -17543,6 +19475,8 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
Completed = true;
}
}
+ ComputeSelectedDestructor(*this, CXXRecord);
+ ComputeSpecialMemberFunctionsEligiblity(*this, CXXRecord);
}
}
@@ -17552,6 +19486,47 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
// Handle attributes before checking the layout.
ProcessDeclAttributeList(S, Record, Attrs);
+ // Check to see if a FieldDecl is a pointer to a function.
+ auto IsFunctionPointerOrForwardDecl = [&](const Decl *D) {
+ const FieldDecl *FD = dyn_cast<FieldDecl>(D);
+ if (!FD) {
+ // Check whether this is a forward declaration that was inserted by
+ // Clang. This happens when a non-forward declared / defined type is
+ // used, e.g.:
+ //
+ // struct foo {
+ // struct bar *(*f)();
+ // struct bar *(*g)();
+ // };
+ //
+ // "struct bar" shows up in the decl AST as a "RecordDecl" with an
+ // incomplete definition.
+ if (const auto *TD = dyn_cast<TagDecl>(D))
+ return !TD->isCompleteDefinition();
+ return false;
+ }
+ QualType FieldType = FD->getType().getDesugaredType(Context);
+ if (isa<PointerType>(FieldType)) {
+ QualType PointeeType = cast<PointerType>(FieldType)->getPointeeType();
+ return PointeeType.getDesugaredType(Context)->isFunctionType();
+ }
+ return false;
+ };
+
+ // Maybe randomize the record's decls. We automatically randomize a record
+ // of function pointers, unless it has the "no_randomize_layout" attribute.
+ if (!getLangOpts().CPlusPlus &&
+ (Record->hasAttr<RandomizeLayoutAttr>() ||
+ (!Record->hasAttr<NoRandomizeLayoutAttr>() &&
+ llvm::all_of(Record->decls(), IsFunctionPointerOrForwardDecl))) &&
+ !Record->isUnion() && !getLangOpts().RandstructSeed.empty() &&
+ !Record->isRandomized()) {
+ SmallVector<Decl *, 32> NewDeclOrdering;
+ if (randstruct::randomizeStructureLayout(Context, Record,
+ NewDeclOrdering))
+ Record->reorderDecls(NewDeclOrdering);
+ }
+
// We may have deferred checking for a deleted destructor. Check now.
if (CXXRecord) {
auto *Dtor = CXXRecord->getDestructor();
@@ -17698,7 +19673,7 @@ static bool isRepresentableIntegerValue(ASTContext &Context,
--BitWidth;
return Value.getActiveBits() <= BitWidth;
}
- return Value.getMinSignedBits() <= BitWidth;
+ return Value.getSignificantBits() <= BitWidth;
}
// Given an integral type, return the next larger integral type
@@ -17743,7 +19718,8 @@ EnumConstantDecl *Sema::CheckEnumConstant(EnumDecl *Enum,
Val = DefaultLvalueConversion(Val).get();
if (Val) {
- if (Enum->isDependentType() || Val->isTypeDependent())
+ if (Enum->isDependentType() || Val->isTypeDependent() ||
+ Val->containsErrors())
EltTy = Context.DependentTy;
else {
// FIXME: We don't allow folding in C++11 mode for an enum with a fixed
@@ -17887,6 +19863,7 @@ EnumConstantDecl *Sema::CheckEnumConstant(EnumDecl *Enum,
if (!getLangOpts().CPlusPlus && !T.isNull())
Diag(IdLoc, diag::warn_enum_value_overflow);
} else if (!getLangOpts().CPlusPlus &&
+ !EltTy->isDependentType() &&
!isRepresentableIntegerValue(Context, EnumVal, EltTy)) {
// Enforce C99 6.7.2.2p2 even when we compute the next value.
Diag(IdLoc, diag::ext_enum_value_not_int)
@@ -18087,7 +20064,7 @@ static void CheckForDuplicateEnumValues(Sema &S, ArrayRef<Decl *> Elements,
return;
}
- // Constants with initalizers are handled in the next loop.
+ // Constants with initializers are handled in the next loop.
if (ECD->getInitExpr())
continue;
@@ -18149,7 +20126,7 @@ static void CheckForDuplicateEnumValues(Sema &S, ArrayRef<Decl *> Elements,
// Emit one note for each of the remaining enum constants with
// the same value.
- for (auto *ECD : llvm::make_range(Vec->begin() + 1, Vec->end()))
+ for (auto *ECD : llvm::drop_begin(*Vec))
S.Diag(ECD->getLocation(), diag::note_duplicate_element)
<< ECD << toString(ECD->getInitVal(), 10)
<< ECD->getSourceRange();
@@ -18169,7 +20146,7 @@ bool Sema::IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
const auto &EVal = E->getInitVal();
// Only single-bit enumerators introduce new flag values.
if (EVal.isPowerOf2())
- FlagBits = FlagBits.zextOrSelf(EVal.getBitWidth()) | EVal;
+ FlagBits = FlagBits.zext(EVal.getBitWidth()) | EVal;
}
}
@@ -18218,9 +20195,6 @@ void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
unsigned NumNegativeBits = 0;
unsigned NumPositiveBits = 0;
- // Keep track of whether all elements have type int.
- bool AllElementsInt = true;
-
for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
EnumConstantDecl *ECD =
cast_or_null<EnumConstantDecl>(Elements[i]);
@@ -18229,18 +20203,24 @@ void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
const llvm::APSInt &InitVal = ECD->getInitVal();
// Keep track of the size of positive and negative values.
- if (InitVal.isUnsigned() || InitVal.isNonNegative())
- NumPositiveBits = std::max(NumPositiveBits,
- (unsigned)InitVal.getActiveBits());
- else
- NumNegativeBits = std::max(NumNegativeBits,
- (unsigned)InitVal.getMinSignedBits());
-
- // Keep track of whether every enum element has type int (very common).
- if (AllElementsInt)
- AllElementsInt = ECD->getType() == Context.IntTy;
+ if (InitVal.isUnsigned() || InitVal.isNonNegative()) {
+ // If the enumerator is zero that should still be counted as a positive
+ // bit since we need a bit to store the value zero.
+ unsigned ActiveBits = InitVal.getActiveBits();
+ NumPositiveBits = std::max({NumPositiveBits, ActiveBits, 1u});
+ } else {
+ NumNegativeBits =
+ std::max(NumNegativeBits, (unsigned)InitVal.getSignificantBits());
+ }
}
+ // If we have an empty set of enumerators we still need one bit.
+ // From [dcl.enum]p8
+ // If the enumerator-list is empty, the values of the enumeration are as if
+ // the enumeration had a single enumerator with value 0
+ if (!NumPositiveBits && !NumNegativeBits)
+ NumPositiveBits = 1;
+
// Figure out the type that should be used for this enum.
QualType BestType;
unsigned BestWidth;
@@ -18266,7 +20246,7 @@ void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
// target, promote that type instead of analyzing the enumerators.
if (Enum->isComplete()) {
BestType = Enum->getIntegerType();
- if (BestType->isPromotableIntegerType())
+ if (Context.isPromotableIntegerType(BestType))
BestPromotionType = Context.getPromotedIntegerType(BestType);
else
BestPromotionType = BestType;
@@ -18379,7 +20359,7 @@ void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
// Adjust the APSInt value.
InitVal = InitVal.extOrTrunc(NewWidth);
InitVal.setIsSigned(NewSign);
- ECD->setInitVal(InitVal);
+ ECD->setInitVal(Context, InitVal);
// Adjust the Expr initializer and type.
if (ECD->getInitExpr() &&
@@ -18431,6 +20411,12 @@ Decl *Sema::ActOnFileScopeAsmDecl(Expr *expr,
return New;
}
+Decl *Sema::ActOnTopLevelStmtDecl(Stmt *Statement) {
+ auto *New = TopLevelStmtDecl::Create(Context, Statement);
+ Context.getTranslationUnitDecl()->addDecl(New);
+ return New;
+}
+
void Sema::ActOnPragmaRedefineExtname(IdentifierInfo* Name,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
@@ -18439,9 +20425,9 @@ void Sema::ActOnPragmaRedefineExtname(IdentifierInfo* Name,
NamedDecl *PrevDecl = LookupSingleName(TUScope, Name, NameLoc,
LookupOrdinaryName);
AttributeCommonInfo Info(AliasName, SourceRange(AliasNameLoc),
- AttributeCommonInfo::AS_Pragma);
+ AttributeCommonInfo::Form::Pragma());
AsmLabelAttr *Attr = AsmLabelAttr::CreateImplicit(
- Context, AliasName->getName(), /*LiteralLabel=*/true, Info);
+ Context, AliasName->getName(), /*IsLiteralLabel=*/true, Info);
// If a declaration that:
// 1) declares a function or a variable
@@ -18453,7 +20439,7 @@ void Sema::ActOnPragmaRedefineExtname(IdentifierInfo* Name,
else
Diag(PrevDecl->getLocation(), diag::warn_redefine_extname_not_applied)
<< /*Variable*/(isa<FunctionDecl>(PrevDecl) ? 0 : 1) << PrevDecl;
- // Otherwise, add a label atttibute to ExtnameUndeclaredIdentifiers.
+ // Otherwise, add a label attribute to ExtnameUndeclaredIdentifiers.
} else
(void)ExtnameUndeclaredIdentifiers.insert(std::make_pair(Name, Attr));
}
@@ -18464,11 +20450,9 @@ void Sema::ActOnPragmaWeakID(IdentifierInfo* Name,
Decl *PrevDecl = LookupSingleName(TUScope, Name, NameLoc, LookupOrdinaryName);
if (PrevDecl) {
- PrevDecl->addAttr(WeakAttr::CreateImplicit(Context, PragmaLoc, AttributeCommonInfo::AS_Pragma));
+ PrevDecl->addAttr(WeakAttr::CreateImplicit(Context, PragmaLoc));
} else {
- (void)WeakUndeclaredIdentifiers.insert(
- std::pair<IdentifierInfo*,WeakInfo>
- (Name, WeakInfo((IdentifierInfo*)nullptr, NameLoc)));
+ (void)WeakUndeclaredIdentifiers[Name].insert(WeakInfo(nullptr, NameLoc));
}
}
@@ -18486,16 +20470,15 @@ void Sema::ActOnPragmaWeakAlias(IdentifierInfo* Name,
if (NamedDecl *ND = dyn_cast<NamedDecl>(PrevDecl))
DeclApplyPragmaWeak(TUScope, ND, W);
} else {
- (void)WeakUndeclaredIdentifiers.insert(
- std::pair<IdentifierInfo*,WeakInfo>(AliasName, W));
+ (void)WeakUndeclaredIdentifiers[AliasName].insert(W);
}
}
-Decl *Sema::getObjCDeclContext() const {
+ObjCContainerDecl *Sema::getObjCDeclContext() const {
return (dyn_cast_or_null<ObjCContainerDecl>(CurContext));
}
-Sema::FunctionEmissionStatus Sema::getEmissionStatus(FunctionDecl *FD,
+Sema::FunctionEmissionStatus Sema::getEmissionStatus(const FunctionDecl *FD,
bool Final) {
assert(FD && "Expected non-null FunctionDecl");
@@ -18513,27 +20496,27 @@ Sema::FunctionEmissionStatus Sema::getEmissionStatus(FunctionDecl *FD,
// We have to check the GVA linkage of the function's *definition* -- if we
// only have a declaration, we don't know whether or not the function will
// be emitted, because (say) the definition could include "inline".
- FunctionDecl *Def = FD->getDefinition();
+ const FunctionDecl *Def = FD->getDefinition();
return Def && !isDiscardableGVALinkage(
getASTContext().GetGVALinkageForFunction(Def));
};
- if (LangOpts.OpenMPIsDevice) {
+ if (LangOpts.OpenMPIsTargetDevice) {
// In OpenMP device mode we will not emit host only functions, or functions
// we don't need due to their linkage.
- Optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
+ std::optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
OMPDeclareTargetDeclAttr::getDeviceType(FD->getCanonicalDecl());
// DevTy may be changed later by
// #pragma omp declare target to(*) device_type(*).
// Therefore DevTy having no value does not imply host. The emission status
// will be checked again at the end of compilation unit with Final = true.
- if (DevTy.hasValue())
+ if (DevTy)
if (*DevTy == OMPDeclareTargetDeclAttr::DT_Host)
return FunctionEmissionStatus::OMPDiscarded;
// If we have an explicit value for the device type, or we are in a target
// declare context, we need to emit all extern and used symbols.
- if (isInOpenMPDeclareTargetContext() || DevTy.hasValue())
+ if (isInOpenMPDeclareTargetContext() || DevTy)
if (IsEmittedForExternalSymbol())
return FunctionEmissionStatus::Emitted;
// Device mode only emits what it must, if it wasn't tagged yet and needed,
@@ -18544,9 +20527,9 @@ Sema::FunctionEmissionStatus Sema::getEmissionStatus(FunctionDecl *FD,
// In OpenMP host compilation prior to 5.0 everything was an emitted host
// function. In 5.0, no_host was introduced which might cause a function to
// be ommitted.
- Optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
+ std::optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
OMPDeclareTargetDeclAttr::getDeviceType(FD->getCanonicalDecl());
- if (DevTy.hasValue())
+ if (DevTy)
if (*DevTy == OMPDeclareTargetDeclAttr::DT_NoHost)
return FunctionEmissionStatus::OMPDiscarded;
}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaDeclAttr.cpp b/contrib/llvm-project/clang/lib/Sema/SemaDeclAttr.cpp
index bb4ce8d4962e..6f462de4be78 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaDeclAttr.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaDeclAttr.cpp
@@ -23,7 +23,10 @@
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/AST/Type.h"
#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/Cuda.h"
#include "clang/Basic/DarwinSDKInfo.h"
+#include "clang/Basic/HLSLRuntime.h"
+#include "clang/Basic/LangOptions.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetBuiltins.h"
@@ -37,7 +40,6 @@
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/SemaInternal.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/IR/Assumptions.h"
@@ -45,6 +47,7 @@
#include "llvm/Support/Error.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
+#include <optional>
using namespace clang;
using namespace sema;
@@ -186,7 +189,7 @@ static inline bool isCFStringType(QualType T, ASTContext &Ctx) {
return false;
const RecordDecl *RD = RT->getDecl();
- if (RD->getTagKind() != TTK_Struct)
+ if (RD->getTagKind() != TagTypeKind::Struct)
return false;
return RD->getIdentifier() == &Ctx.Idents.get("__CFString");
@@ -200,7 +203,7 @@ static unsigned getNumAttributeArgs(const ParsedAttr &AL) {
/// A helper function to provide Attribute Location for the Attr types
/// AND the ParsedAttr.
template <typename AttrInfo>
-static std::enable_if_t<std::is_base_of<Attr, AttrInfo>::value, SourceLocation>
+static std::enable_if_t<std::is_base_of_v<Attr, AttrInfo>, SourceLocation>
getAttrLoc(const AttrInfo &AL) {
return AL.getLocation();
}
@@ -215,8 +218,8 @@ template <typename AttrInfo>
static bool checkUInt32Argument(Sema &S, const AttrInfo &AI, const Expr *Expr,
uint32_t &Val, unsigned Idx = UINT_MAX,
bool StrictlyUnsigned = false) {
- Optional<llvm::APSInt> I = llvm::APSInt(32);
- if (Expr->isTypeDependent() || Expr->isValueDependent() ||
+ std::optional<llvm::APSInt> I = llvm::APSInt(32);
+ if (Expr->isTypeDependent() ||
!(I = Expr->getIntegerConstantExpr(S.Context))) {
if (Idx != UINT_MAX)
S.Diag(getAttrLoc(AI), diag::err_attribute_argument_n_type)
@@ -271,7 +274,9 @@ static bool checkPositiveIntArgument(Sema &S, const AttrInfo &AI, const Expr *Ex
template <typename AttrTy>
static bool checkAttrMutualExclusion(Sema &S, Decl *D, const ParsedAttr &AL) {
if (const auto *A = D->getAttr<AttrTy>()) {
- S.Diag(AL.getLoc(), diag::err_attributes_are_not_compatible) << AL << A;
+ S.Diag(AL.getLoc(), diag::err_attributes_are_not_compatible)
+ << AL << A
+ << (AL.isRegularKeywordAttribute() || A->isRegularKeywordAttribute());
S.Diag(A->getLocation(), diag::note_conflicting_attribute);
return true;
}
@@ -281,8 +286,9 @@ static bool checkAttrMutualExclusion(Sema &S, Decl *D, const ParsedAttr &AL) {
template <typename AttrTy>
static bool checkAttrMutualExclusion(Sema &S, Decl *D, const Attr &AL) {
if (const auto *A = D->getAttr<AttrTy>()) {
- S.Diag(AL.getLocation(), diag::err_attributes_are_not_compatible) << &AL
- << A;
+ S.Diag(AL.getLocation(), diag::err_attributes_are_not_compatible)
+ << &AL << A
+ << (AL.isRegularKeywordAttribute() || A->isRegularKeywordAttribute());
S.Diag(A->getLocation(), diag::note_conflicting_attribute);
return true;
}
@@ -307,8 +313,8 @@ static bool checkFunctionOrMethodParameterIndex(
unsigned NumParams =
(HP ? getFunctionOrMethodNumParams(D) : 0) + HasImplicitThisParam;
- Optional<llvm::APSInt> IdxInt;
- if (IdxExpr->isTypeDependent() || IdxExpr->isValueDependent() ||
+ std::optional<llvm::APSInt> IdxInt;
+ if (IdxExpr->isTypeDependent() ||
!(IdxInt = IdxExpr->getIntegerConstantExpr(S.Context))) {
S.Diag(getAttrLoc(AI), diag::err_attribute_argument_n_type)
<< &AI << AttrArgNum << AANT_ArgumentIntegerConstant
@@ -334,6 +340,26 @@ static bool checkFunctionOrMethodParameterIndex(
return true;
}
+/// Check if the argument \p E is a ASCII string literal. If not emit an error
+/// and return false, otherwise set \p Str to the value of the string literal
+/// and return true.
+bool Sema::checkStringLiteralArgumentAttr(const AttributeCommonInfo &CI,
+ const Expr *E, StringRef &Str,
+ SourceLocation *ArgLocation) {
+ const auto *Literal = dyn_cast<StringLiteral>(E->IgnoreParenCasts());
+ if (ArgLocation)
+ *ArgLocation = E->getBeginLoc();
+
+ if (!Literal || (!Literal->isUnevaluated() && !Literal->isOrdinary())) {
+ Diag(E->getBeginLoc(), diag::err_attribute_argument_type)
+ << CI << AANT_ArgumentString;
+ return false;
+ }
+
+ Str = Literal->getString();
+ return true;
+}
+
/// Check if the argument \p ArgNum of \p Attr is a ASCII string literal.
/// If not emit an error and return false. If the argument is an identifier it
/// will emit an error with a fixit hint and treat it as if it was a string
@@ -360,14 +386,13 @@ bool Sema::checkStringLiteralArgumentAttr(const ParsedAttr &AL, unsigned ArgNum,
if (ArgLocation)
*ArgLocation = ArgExpr->getBeginLoc();
- if (!Literal || !Literal->isAscii()) {
+ if (!Literal || (!Literal->isUnevaluated() && !Literal->isOrdinary())) {
Diag(ArgExpr->getBeginLoc(), diag::err_attribute_argument_type)
<< AL << AANT_ArgumentString;
return false;
}
-
Str = Literal->getString();
- return true;
+ return checkStringLiteralArgumentAttr(AL, ArgExpr, Str, ArgLocation);
}
/// Applies the given attribute to the Decl without performing any
@@ -436,7 +461,7 @@ static bool threadSafetyCheckIsSmartPointer(Sema &S, const RecordType* RT) {
if (!CXXRecord)
return false;
- for (auto BaseSpecifier : CXXRecord->bases()) {
+ for (const auto &BaseSpecifier : CXXRecord->bases()) {
if (!foundStarOperator)
foundStarOperator = IsOverloadedOperatorPresent(
BaseSpecifier.getType()->getAsRecordDecl(), OO_Star);
@@ -610,7 +635,7 @@ static void checkAttrArgsAreCapabilityObjs(Sema &S, Decl *D,
if (const auto *StrLit = dyn_cast<StringLiteral>(ArgExp)) {
if (StrLit->getLength() == 0 ||
- (StrLit->isAscii() && StrLit->getString() == StringRef("*"))) {
+ (StrLit->isOrdinary() && StrLit->getString() == StringRef("*"))) {
// Pass empty strings to the analyzer without warnings.
// Treat "*" as the universal lock.
Args.push_back(ArgExp);
@@ -947,6 +972,14 @@ static void handleEnableIfAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
D->addAttr(::new (S.Context) EnableIfAttr(S.Context, AL, Cond, Msg));
}
+static void handleErrorAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ StringRef NewUserDiagnostic;
+ if (!S.checkStringLiteralArgumentAttr(AL, 0, NewUserDiagnostic))
+ return;
+ if (ErrorAttr *EA = S.mergeErrorAttr(D, AL, NewUserDiagnostic))
+ D->addAttr(EA);
+}
+
namespace {
/// Determines if a given Expr references any of the given function's
/// ParmVarDecls, or the function's implicit `this` parameter (if applicable).
@@ -993,6 +1026,84 @@ public:
};
}
+static void handleDiagnoseAsBuiltinAttr(Sema &S, Decl *D,
+ const ParsedAttr &AL) {
+ const auto *DeclFD = cast<FunctionDecl>(D);
+
+ if (const auto *MethodDecl = dyn_cast<CXXMethodDecl>(DeclFD))
+ if (!MethodDecl->isStatic()) {
+ S.Diag(AL.getLoc(), diag::err_attribute_no_member_function) << AL;
+ return;
+ }
+
+ auto DiagnoseType = [&](unsigned Index, AttributeArgumentNType T) {
+ SourceLocation Loc = [&]() {
+ auto Union = AL.getArg(Index - 1);
+ if (Union.is<Expr *>())
+ return Union.get<Expr *>()->getBeginLoc();
+ return Union.get<IdentifierLoc *>()->Loc;
+ }();
+
+ S.Diag(Loc, diag::err_attribute_argument_n_type) << AL << Index << T;
+ };
+
+ FunctionDecl *AttrFD = [&]() -> FunctionDecl * {
+ if (!AL.isArgExpr(0))
+ return nullptr;
+ auto *F = dyn_cast_if_present<DeclRefExpr>(AL.getArgAsExpr(0));
+ if (!F)
+ return nullptr;
+ return dyn_cast_if_present<FunctionDecl>(F->getFoundDecl());
+ }();
+
+ if (!AttrFD || !AttrFD->getBuiltinID(true)) {
+ DiagnoseType(1, AANT_ArgumentBuiltinFunction);
+ return;
+ }
+
+ if (AttrFD->getNumParams() != AL.getNumArgs() - 1) {
+ S.Diag(AL.getLoc(), diag::err_attribute_wrong_number_arguments_for)
+ << AL << AttrFD << AttrFD->getNumParams();
+ return;
+ }
+
+ SmallVector<unsigned, 8> Indices;
+
+ for (unsigned I = 1; I < AL.getNumArgs(); ++I) {
+ if (!AL.isArgExpr(I)) {
+ DiagnoseType(I + 1, AANT_ArgumentIntegerConstant);
+ return;
+ }
+
+ const Expr *IndexExpr = AL.getArgAsExpr(I);
+ uint32_t Index;
+
+ if (!checkUInt32Argument(S, AL, IndexExpr, Index, I + 1, false))
+ return;
+
+ if (Index > DeclFD->getNumParams()) {
+ S.Diag(AL.getLoc(), diag::err_attribute_bounds_for_function)
+ << AL << Index << DeclFD << DeclFD->getNumParams();
+ return;
+ }
+
+ QualType T1 = AttrFD->getParamDecl(I - 1)->getType();
+ QualType T2 = DeclFD->getParamDecl(Index - 1)->getType();
+
+ if (T1.getCanonicalType().getUnqualifiedType() !=
+ T2.getCanonicalType().getUnqualifiedType()) {
+ S.Diag(IndexExpr->getBeginLoc(), diag::err_attribute_parameter_types)
+ << AL << Index << DeclFD << T2 << I << AttrFD << T1;
+ return;
+ }
+
+ Indices.push_back(Index - 1);
+ }
+
+ D->addAttr(::new (S.Context) DiagnoseAsBuiltinAttr(
+ S.Context, AL, AttrFD, Indices.data(), Indices.size()));
+}
+
static void handleDiagnoseIfAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
S.Diag(AL.getLoc(), diag::ext_clang_diagnose_if);
@@ -1123,7 +1234,7 @@ static void handleConsumableAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
static bool checkForConsumableClass(Sema &S, const CXXMethodDecl *MD,
const ParsedAttr &AL) {
- QualType ThisType = MD->getThisType()->getPointeeType();
+ QualType ThisType = MD->getFunctionObjectParameterType();
if (const CXXRecordDecl *RD = ThisType->getAsCXXRecordDecl()) {
if (!RD->hasAttr<ConsumableAttr>()) {
@@ -1225,23 +1336,23 @@ static void handleReturnTypestateAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// FIXME: This check is currently being done in the analysis. It can be
// enabled here only after the parser propagates attributes at
// template specialization definition, not declaration.
- //QualType ReturnType;
+ // QualType ReturnType;
//
- //if (const ParmVarDecl *Param = dyn_cast<ParmVarDecl>(D)) {
+ // if (const ParmVarDecl *Param = dyn_cast<ParmVarDecl>(D)) {
// ReturnType = Param->getType();
//
//} else if (const CXXConstructorDecl *Constructor =
// dyn_cast<CXXConstructorDecl>(D)) {
- // ReturnType = Constructor->getThisType()->getPointeeType();
+ // ReturnType = Constructor->getFunctionObjectParameterType();
//
//} else {
//
// ReturnType = cast<FunctionDecl>(D)->getCallResultType();
//}
//
- //const CXXRecordDecl *RD = ReturnType->getAsCXXRecordDecl();
+ // const CXXRecordDecl *RD = ReturnType->getAsCXXRecordDecl();
//
- //if (!RD || !RD->hasAttr<ConsumableAttr>()) {
+ // if (!RD || !RD->hasAttr<ConsumableAttr>()) {
// S.Diag(Attr.getLoc(), diag::warn_return_state_for_unconsumable_type) <<
// ReturnType.getAsString();
// return;
@@ -1308,9 +1419,9 @@ static void handlePackedAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
FD->isBitField() &&
S.Context.getTypeAlign(FD->getType()) <= 8);
- if (S.getASTContext().getTargetInfo().getTriple().isPS4()) {
+ if (S.getASTContext().getTargetInfo().getTriple().isPS()) {
if (BitfieldByteAligned)
- // The PS4 target needs to maintain ABI backwards compatibility.
+ // The PS4/PS5 targets need to maintain ABI backwards compatibility.
S.Diag(AL.getLoc(), diag::warn_attribute_ignored_for_field_of_type)
<< AL << FD->getType();
else
@@ -1341,7 +1452,7 @@ static void handlePreferredName(Sema &S, Decl *D, const ParsedAttr &AL) {
if (!T.hasQualifiers() && T->isTypedefNameType()) {
// Find the template name, if this type names a template specialization.
const TemplateDecl *Template = nullptr;
- if (const auto *CTSD = dyn_cast_or_null<ClassTemplateSpecializationDecl>(
+ if (const auto *CTSD = dyn_cast_if_present<ClassTemplateSpecializationDecl>(
T->getAsCXXRecordDecl())) {
Template = CTSD->getSpecializedTemplate();
} else if (const auto *TST = T->getAs<TemplateSpecializationType>()) {
@@ -1593,7 +1704,7 @@ void Sema::AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
}
if (!E->isValueDependent()) {
- Optional<llvm::APSInt> I = llvm::APSInt(64);
+ std::optional<llvm::APSInt> I = llvm::APSInt(64);
if (!(I = E->getIntegerConstantExpr(Context))) {
if (OE)
Diag(AttrLoc, diag::err_attribute_argument_n_type)
@@ -1698,8 +1809,8 @@ static void handleAssumumptionAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
/// Normalize the attribute, __foo__ becomes foo.
/// Returns true if normalization was applied.
static bool normalizeName(StringRef &AttrName) {
- if (AttrName.size() > 4 && AttrName.startswith("__") &&
- AttrName.endswith("__")) {
+ if (AttrName.size() > 4 && AttrName.starts_with("__") &&
+ AttrName.ends_with("__")) {
AttrName = AttrName.drop_front(2).drop_back(2);
return true;
}
@@ -1780,15 +1891,17 @@ static void handleOwnershipAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
for (const auto *I : D->specific_attrs<OwnershipAttr>()) {
// Cannot have two ownership attributes of different kinds for the same
// index.
- if (I->getOwnKind() != K && I->args_end() !=
- std::find(I->args_begin(), I->args_end(), Idx)) {
- S.Diag(AL.getLoc(), diag::err_attributes_are_not_compatible) << AL << I;
- return;
+ if (I->getOwnKind() != K && llvm::is_contained(I->args(), Idx)) {
+ S.Diag(AL.getLoc(), diag::err_attributes_are_not_compatible)
+ << AL << I
+ << (AL.isRegularKeywordAttribute() ||
+ I->isRegularKeywordAttribute());
+ return;
} else if (K == OwnershipAttr::Returns &&
I->getOwnKind() == OwnershipAttr::Returns) {
// A returns attribute conflicts with any other returns attribute using
// a different index.
- if (std::find(I->args_begin(), I->args_end(), Idx) == I->args_end()) {
+ if (!llvm::is_contained(I->args(), Idx)) {
S.Diag(I->getLocation(), diag::err_ownership_returns_index_mismatch)
<< I->args_begin()->getSourceIndex();
if (I->args_size())
@@ -1890,8 +2003,12 @@ static void handleAliasAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
S.Diag(AL.getLoc(), diag::err_alias_not_supported_on_darwin);
return;
}
+
if (S.Context.getTargetInfo().getTriple().isNVPTX()) {
- S.Diag(AL.getLoc(), diag::err_alias_not_supported_on_nvptx);
+ CudaVersion Version =
+ ToCudaVersion(S.Context.getTargetInfo().getSDKVersion());
+ if (Version != CudaVersion::UNKNOWN && Version < CudaVersion::CUDA_100)
+ S.Diag(AL.getLoc(), diag::err_alias_not_supported_on_nvptx);
}
// Aliases should be on declarations, not definitions.
@@ -1937,7 +2054,7 @@ static void handleTLSModelAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
if (S.Context.getTargetInfo().getTriple().isOSAIX() &&
- Model != "global-dynamic") {
+ Model == "local-dynamic") {
S.Diag(LiteralLoc, diag::err_aix_attr_unsupported_tls_model) << Model;
return;
}
@@ -1957,6 +2074,28 @@ static void handleRestrictAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
static void handleCPUSpecificAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ // Ensure we don't combine these with themselves, since that causes some
+ // confusing behavior.
+ if (AL.getParsedKind() == ParsedAttr::AT_CPUDispatch) {
+ if (checkAttrMutualExclusion<CPUSpecificAttr>(S, D, AL))
+ return;
+
+ if (const auto *Other = D->getAttr<CPUDispatchAttr>()) {
+ S.Diag(AL.getLoc(), diag::err_disallowed_duplicate_attribute) << AL;
+ S.Diag(Other->getLocation(), diag::note_conflicting_attribute);
+ return;
+ }
+ } else if (AL.getParsedKind() == ParsedAttr::AT_CPUSpecific) {
+ if (checkAttrMutualExclusion<CPUDispatchAttr>(S, D, AL))
+ return;
+
+ if (const auto *Other = D->getAttr<CPUSpecificAttr>()) {
+ S.Diag(AL.getLoc(), diag::err_disallowed_duplicate_attribute) << AL;
+ S.Diag(Other->getLocation(), diag::note_conflicting_attribute);
+ return;
+ }
+ }
+
FunctionDecl *FD = cast<FunctionDecl>(D);
if (const auto *MD = dyn_cast<CXXMethodDecl>(D)) {
@@ -2041,6 +2180,14 @@ static void handleNakedAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
<< AL << Triple.getArchName();
return;
}
+
+ // This form is not allowed to be written on a member function (static or
+ // nonstatic) when in Microsoft compatibility mode.
+ if (S.getLangOpts().MSVCCompat && isa<CXXMethodDecl>(D)) {
+ S.Diag(AL.getLoc(), diag::err_attribute_wrong_decl_type_str)
+ << AL << AL.isRegularKeywordAttribute() << "non-member functions";
+ return;
+ }
}
D->addAttr(::new (S.Context) NakedAttr(S.Context, AL));
@@ -2051,13 +2198,29 @@ static void handleNoReturnAttr(Sema &S, Decl *D, const ParsedAttr &Attrs) {
if (!isa<ObjCMethodDecl>(D)) {
S.Diag(Attrs.getLoc(), diag::warn_attribute_wrong_decl_type)
- << Attrs << ExpectedFunctionOrMethod;
+ << Attrs << Attrs.isRegularKeywordAttribute()
+ << ExpectedFunctionOrMethod;
return;
}
D->addAttr(::new (S.Context) NoReturnAttr(S.Context, Attrs));
}
+static void handleStandardNoReturnAttr(Sema &S, Decl *D, const ParsedAttr &A) {
+ // The [[_Noreturn]] spelling is deprecated in C23, so if that was used,
+ // issue an appropriate diagnostic. However, don't issue a diagnostic if the
+ // attribute name comes from a macro expansion. We don't want to punish users
+ // who write [[noreturn]] after including <stdnoreturn.h> (where 'noreturn'
+ // is defined as a macro which expands to '_Noreturn').
+ if (!S.getLangOpts().CPlusPlus &&
+ A.getSemanticSpelling() == CXX11NoReturnAttr::C23_Noreturn &&
+ !(A.getLoc().isMacroID() &&
+ S.getSourceManager().isInSystemMacro(A.getLoc())))
+ S.Diag(A.getLoc(), diag::warn_deprecated_noreturn_spelling) << A.getRange();
+
+ D->addAttr(::new (S.Context) CXX11NoReturnAttr(S.Context, A));
+}
+
static void handleNoCfCheckAttr(Sema &S, Decl *D, const ParsedAttr &Attrs) {
if (!S.getLangOpts().CFProtectionBranch)
S.Diag(Attrs.getLoc(), diag::warn_nocf_check_attribute_ignored);
@@ -2077,7 +2240,9 @@ bool Sema::CheckAttrNoArgs(const ParsedAttr &Attrs) {
bool Sema::CheckAttrTarget(const ParsedAttr &AL) {
// Check whether the attribute is valid on the current target.
if (!AL.existsInTarget(Context.getTargetInfo())) {
- Diag(AL.getLoc(), diag::warn_unknown_attribute_ignored)
+ Diag(AL.getLoc(), AL.isRegularKeywordAttribute()
+ ? diag::err_keyword_not_supported_on_target
+ : diag::warn_unknown_attribute_ignored)
<< AL << AL.getRange();
AL.setInvalid();
return true;
@@ -2097,7 +2262,8 @@ static void handleAnalyzerNoReturnAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
S.Diag(AL.getLoc(), AL.isStandardAttributeSyntax()
? diag::err_attribute_wrong_decl_type
: diag::warn_attribute_wrong_decl_type)
- << AL << ExpectedFunctionMethodOrBlock;
+ << AL << AL.isRegularKeywordAttribute()
+ << ExpectedFunctionMethodOrBlock;
return;
}
}
@@ -2188,6 +2354,10 @@ static void handleUnusedAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
static void handleConstructorAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
uint32_t priority = ConstructorAttr::DefaultPriority;
+ if (S.getLangOpts().HLSL && AL.getNumArgs()) {
+ S.Diag(AL.getLoc(), diag::err_hlsl_init_priority_unsupported);
+ return;
+ }
if (AL.getNumArgs() &&
!checkUInt32Argument(S, AL, AL.getArgAsExpr(0), priority))
return;
@@ -2473,10 +2643,11 @@ static void handleAvailabilityAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
bool IsUnavailable = AL.getUnavailableLoc().isValid();
bool IsStrict = AL.getStrictLoc().isValid();
StringRef Str;
- if (const auto *SE = dyn_cast_or_null<StringLiteral>(AL.getMessageExpr()))
+ if (const auto *SE = dyn_cast_if_present<StringLiteral>(AL.getMessageExpr()))
Str = SE->getString();
StringRef Replacement;
- if (const auto *SE = dyn_cast_or_null<StringLiteral>(AL.getReplacementExpr()))
+ if (const auto *SE =
+ dyn_cast_if_present<StringLiteral>(AL.getReplacementExpr()))
Replacement = SE->getString();
if (II->isStr("swift")) {
@@ -2488,6 +2659,15 @@ static void handleAvailabilityAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
}
+ if (II->isStr("fuchsia")) {
+ std::optional<unsigned> Min, Sub;
+ if ((Min = Introduced.Version.getMinor()) ||
+ (Sub = Introduced.Version.getSubminor())) {
+ S.Diag(AL.getLoc(), diag::warn_availability_fuchsia_unavailable_minor);
+ return;
+ }
+ }
+
int PriorityModifier = AL.isPragmaClangAttribute()
? Sema::AP_PragmaClangAttribute
: Sema::AP_Explicit;
@@ -2508,37 +2688,53 @@ static void handleAvailabilityAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
NewII = &S.Context.Idents.get("watchos_app_extension");
if (NewII) {
- auto adjustWatchOSVersion = [](VersionTuple Version) -> VersionTuple {
- if (Version.empty())
- return Version;
- auto Major = Version.getMajor();
- auto NewMajor = Major >= 9 ? Major - 7 : 0;
- if (NewMajor >= 2) {
- if (Version.getMinor().hasValue()) {
- if (Version.getSubminor().hasValue())
- return VersionTuple(NewMajor, Version.getMinor().getValue(),
- Version.getSubminor().getValue());
- else
- return VersionTuple(NewMajor, Version.getMinor().getValue());
- }
- return VersionTuple(NewMajor);
+ const auto *SDKInfo = S.getDarwinSDKInfoForAvailabilityChecking();
+ const auto *IOSToWatchOSMapping =
+ SDKInfo ? SDKInfo->getVersionMapping(
+ DarwinSDKInfo::OSEnvPair::iOStoWatchOSPair())
+ : nullptr;
+
+ auto adjustWatchOSVersion =
+ [IOSToWatchOSMapping](VersionTuple Version) -> VersionTuple {
+ if (Version.empty())
+ return Version;
+ auto MinimumWatchOSVersion = VersionTuple(2, 0);
+
+ if (IOSToWatchOSMapping) {
+ if (auto MappedVersion = IOSToWatchOSMapping->map(
+ Version, MinimumWatchOSVersion, std::nullopt)) {
+ return *MappedVersion;
}
+ }
- return VersionTuple(2, 0);
- };
+ auto Major = Version.getMajor();
+ auto NewMajor = Major >= 9 ? Major - 7 : 0;
+ if (NewMajor >= 2) {
+ if (Version.getMinor()) {
+ if (Version.getSubminor())
+ return VersionTuple(NewMajor, *Version.getMinor(),
+ *Version.getSubminor());
+ else
+ return VersionTuple(NewMajor, *Version.getMinor());
+ }
+ return VersionTuple(NewMajor);
+ }
- auto NewIntroduced = adjustWatchOSVersion(Introduced.Version);
- auto NewDeprecated = adjustWatchOSVersion(Deprecated.Version);
- auto NewObsoleted = adjustWatchOSVersion(Obsoleted.Version);
-
- AvailabilityAttr *NewAttr = S.mergeAvailabilityAttr(
- ND, AL, NewII, true /*Implicit*/, NewIntroduced, NewDeprecated,
- NewObsoleted, IsUnavailable, Str, IsStrict, Replacement,
- Sema::AMK_None,
- PriorityModifier + Sema::AP_InferredFromOtherPlatform);
- if (NewAttr)
- D->addAttr(NewAttr);
- }
+ return MinimumWatchOSVersion;
+ };
+
+ auto NewIntroduced = adjustWatchOSVersion(Introduced.Version);
+ auto NewDeprecated = adjustWatchOSVersion(Deprecated.Version);
+ auto NewObsoleted = adjustWatchOSVersion(Obsoleted.Version);
+
+ AvailabilityAttr *NewAttr = S.mergeAvailabilityAttr(
+ ND, AL, NewII, true /*Implicit*/, NewIntroduced, NewDeprecated,
+ NewObsoleted, IsUnavailable, Str, IsStrict, Replacement,
+ Sema::AMK_None,
+ PriorityModifier + Sema::AP_InferredFromOtherPlatform);
+ if (NewAttr)
+ D->addAttr(NewAttr);
+ }
} else if (S.Context.getTargetInfo().getTriple().isTvOS()) {
// Transcribe "ios" to "tvos" (and add a new attribute) if the versioning
// matches before the start of the tvOS platform.
@@ -2549,14 +2745,38 @@ static void handleAvailabilityAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
NewII = &S.Context.Idents.get("tvos_app_extension");
if (NewII) {
+ const auto *SDKInfo = S.getDarwinSDKInfoForAvailabilityChecking();
+ const auto *IOSToTvOSMapping =
+ SDKInfo ? SDKInfo->getVersionMapping(
+ DarwinSDKInfo::OSEnvPair::iOStoTvOSPair())
+ : nullptr;
+
+ auto AdjustTvOSVersion =
+ [IOSToTvOSMapping](VersionTuple Version) -> VersionTuple {
+ if (Version.empty())
+ return Version;
+
+ if (IOSToTvOSMapping) {
+ if (auto MappedVersion = IOSToTvOSMapping->map(
+ Version, VersionTuple(0, 0), std::nullopt)) {
+ return *MappedVersion;
+ }
+ }
+ return Version;
+ };
+
+ auto NewIntroduced = AdjustTvOSVersion(Introduced.Version);
+ auto NewDeprecated = AdjustTvOSVersion(Deprecated.Version);
+ auto NewObsoleted = AdjustTvOSVersion(Obsoleted.Version);
+
AvailabilityAttr *NewAttr = S.mergeAvailabilityAttr(
- ND, AL, NewII, true /*Implicit*/, Introduced.Version,
- Deprecated.Version, Obsoleted.Version, IsUnavailable, Str, IsStrict,
- Replacement, Sema::AMK_None,
+ ND, AL, NewII, true /*Implicit*/, NewIntroduced, NewDeprecated,
+ NewObsoleted, IsUnavailable, Str, IsStrict, Replacement,
+ Sema::AMK_None,
PriorityModifier + Sema::AP_InferredFromOtherPlatform);
if (NewAttr)
D->addAttr(NewAttr);
- }
+ }
} else if (S.Context.getTargetInfo().getTriple().getOS() ==
llvm::Triple::IOS &&
S.Context.getTargetInfo().getTriple().isMacCatalystEnvironment()) {
@@ -2581,7 +2801,7 @@ static void handleAvailabilityAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
return V;
};
AvailabilityAttr *NewAttr = S.mergeAvailabilityAttr(
- ND, AL.getRange(), NewII, true /*Implicit*/,
+ ND, AL, NewII, true /*Implicit*/,
MinMacCatalystVersion(Introduced.Version),
MinMacCatalystVersion(Deprecated.Version),
MinMacCatalystVersion(Obsoleted.Version), IsUnavailable, Str,
@@ -2601,28 +2821,29 @@ static void handleAvailabilityAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// attributes that are inferred from 'ios'.
NewII = &S.Context.Idents.get("maccatalyst");
auto RemapMacOSVersion =
- [&](const VersionTuple &V) -> Optional<VersionTuple> {
+ [&](const VersionTuple &V) -> std::optional<VersionTuple> {
if (V.empty())
- return None;
+ return std::nullopt;
// API_TO_BE_DEPRECATED is 100000.
if (V.getMajor() == 100000)
return VersionTuple(100000);
// The minimum iosmac version is 13.1
- return MacOStoMacCatalystMapping->map(V, VersionTuple(13, 1), None);
+ return MacOStoMacCatalystMapping->map(V, VersionTuple(13, 1),
+ std::nullopt);
};
- Optional<VersionTuple> NewIntroduced =
- RemapMacOSVersion(Introduced.Version),
- NewDeprecated =
- RemapMacOSVersion(Deprecated.Version),
- NewObsoleted =
- RemapMacOSVersion(Obsoleted.Version);
+ std::optional<VersionTuple> NewIntroduced =
+ RemapMacOSVersion(Introduced.Version),
+ NewDeprecated =
+ RemapMacOSVersion(Deprecated.Version),
+ NewObsoleted =
+ RemapMacOSVersion(Obsoleted.Version);
if (NewIntroduced || NewDeprecated || NewObsoleted) {
auto VersionOrEmptyVersion =
- [](const Optional<VersionTuple> &V) -> VersionTuple {
+ [](const std::optional<VersionTuple> &V) -> VersionTuple {
return V ? *V : VersionTuple();
};
AvailabilityAttr *NewAttr = S.mergeAvailabilityAttr(
- ND, AL.getRange(), NewII, true /*Implicit*/,
+ ND, AL, NewII, true /*Implicit*/,
VersionOrEmptyVersion(NewIntroduced),
VersionOrEmptyVersion(NewDeprecated),
VersionOrEmptyVersion(NewObsoleted), /*IsUnavailable=*/false, Str,
@@ -2639,19 +2860,22 @@ static void handleAvailabilityAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
static void handleExternalSourceSymbolAttr(Sema &S, Decl *D,
const ParsedAttr &AL) {
- if (!AL.checkAtLeastNumArgs(S, 1) || !AL.checkAtMostNumArgs(S, 3))
+ if (!AL.checkAtLeastNumArgs(S, 1) || !AL.checkAtMostNumArgs(S, 4))
return;
StringRef Language;
- if (const auto *SE = dyn_cast_or_null<StringLiteral>(AL.getArgAsExpr(0)))
+ if (const auto *SE = dyn_cast_if_present<StringLiteral>(AL.getArgAsExpr(0)))
Language = SE->getString();
StringRef DefinedIn;
- if (const auto *SE = dyn_cast_or_null<StringLiteral>(AL.getArgAsExpr(1)))
+ if (const auto *SE = dyn_cast_if_present<StringLiteral>(AL.getArgAsExpr(1)))
DefinedIn = SE->getString();
bool IsGeneratedDeclaration = AL.getArgAsIdent(2) != nullptr;
+ StringRef USR;
+ if (const auto *SE = dyn_cast_if_present<StringLiteral>(AL.getArgAsExpr(3)))
+ USR = SE->getString();
D->addAttr(::new (S.Context) ExternalSourceSymbolAttr(
- S.Context, AL, Language, DefinedIn, IsGeneratedDeclaration));
+ S.Context, AL, Language, DefinedIn, IsGeneratedDeclaration, USR));
}
template <class T>
@@ -2690,12 +2914,10 @@ static void handleVisibilityAttr(Sema &S, Decl *D, const ParsedAttr &AL,
}
// 'type_visibility' can only go on a type or namespace.
- if (isTypeVisibility &&
- !(isa<TagDecl>(D) ||
- isa<ObjCInterfaceDecl>(D) ||
- isa<NamespaceDecl>(D))) {
+ if (isTypeVisibility && !(isa<TagDecl>(D) || isa<ObjCInterfaceDecl>(D) ||
+ isa<NamespaceDecl>(D))) {
S.Diag(AL.getRange().getBegin(), diag::err_attribute_wrong_decl_type)
- << AL << ExpectedTypeOrNamespace;
+ << AL << AL.isRegularKeywordAttribute() << ExpectedTypeOrNamespace;
return;
}
@@ -2842,9 +3064,8 @@ static void handleSentinelAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
unsigned sentinel = (unsigned)SentinelAttr::DefaultSentinel;
if (AL.getNumArgs() > 0) {
Expr *E = AL.getArgAsExpr(0);
- Optional<llvm::APSInt> Idx = llvm::APSInt(32);
- if (E->isTypeDependent() || E->isValueDependent() ||
- !(Idx = E->getIntegerConstantExpr(S.Context))) {
+ std::optional<llvm::APSInt> Idx = llvm::APSInt(32);
+ if (E->isTypeDependent() || !(Idx = E->getIntegerConstantExpr(S.Context))) {
S.Diag(AL.getLoc(), diag::err_attribute_argument_n_type)
<< AL << 1 << AANT_ArgumentIntegerConstant << E->getSourceRange();
return;
@@ -2862,9 +3083,8 @@ static void handleSentinelAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
unsigned nullPos = (unsigned)SentinelAttr::DefaultNullPos;
if (AL.getNumArgs() > 1) {
Expr *E = AL.getArgAsExpr(1);
- Optional<llvm::APSInt> Idx = llvm::APSInt(32);
- if (E->isTypeDependent() || E->isValueDependent() ||
- !(Idx = E->getIntegerConstantExpr(S.Context))) {
+ std::optional<llvm::APSInt> Idx = llvm::APSInt(32);
+ if (E->isTypeDependent() || !(Idx = E->getIntegerConstantExpr(S.Context))) {
S.Diag(AL.getLoc(), diag::err_attribute_argument_n_type)
<< AL << 2 << AANT_ArgumentIntegerConstant << E->getSourceRange();
return;
@@ -2916,12 +3136,14 @@ static void handleSentinelAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
} else {
S.Diag(AL.getLoc(), diag::warn_attribute_wrong_decl_type)
- << AL << ExpectedFunctionMethodOrBlock;
+ << AL << AL.isRegularKeywordAttribute()
+ << ExpectedFunctionMethodOrBlock;
return;
}
} else {
S.Diag(AL.getLoc(), diag::warn_attribute_wrong_decl_type)
- << AL << ExpectedFunctionMethodOrBlock;
+ << AL << AL.isRegularKeywordAttribute()
+ << ExpectedFunctionMethodOrBlock;
return;
}
D->addAttr(::new (S.Context) SentinelAttr(S.Context, AL, sentinel, nullPos));
@@ -2946,28 +3168,37 @@ static void handleWarnUnusedResult(Sema &S, Decl *D, const ParsedAttr &AL) {
// as a function pointer.
if (isa<VarDecl>(D))
S.Diag(AL.getLoc(), diag::warn_attribute_wrong_decl_type_str)
- << AL << "functions, classes, or enumerations";
+ << AL << AL.isRegularKeywordAttribute()
+ << "functions, classes, or enumerations";
// If this is spelled as the standard C++17 attribute, but not in C++17,
// warn about using it as an extension. If there are attribute arguments,
- // then claim it's a C++2a extension instead.
+ // then claim it's a C++20 extension instead.
// FIXME: If WG14 does not seem likely to adopt the same feature, add an
- // extension warning for C2x mode.
+ // extension warning for C23 mode.
const LangOptions &LO = S.getLangOpts();
if (AL.getNumArgs() == 1) {
if (LO.CPlusPlus && !LO.CPlusPlus20)
S.Diag(AL.getLoc(), diag::ext_cxx20_attr) << AL;
- // Since this this is spelled [[nodiscard]], get the optional string
- // literal. If in C++ mode, but not in C++2a mode, diagnose as an
+ // Since this is spelled [[nodiscard]], get the optional string
+ // literal. If in C++ mode, but not in C++20 mode, diagnose as an
// extension.
- // FIXME: C2x should support this feature as well, even as an extension.
+ // FIXME: C23 should support this feature as well, even as an extension.
if (!S.checkStringLiteralArgumentAttr(AL, 0, Str, nullptr))
return;
} else if (LO.CPlusPlus && !LO.CPlusPlus17)
S.Diag(AL.getLoc(), diag::ext_cxx17_attr) << AL;
}
+ if ((!AL.isGNUAttribute() &&
+ !(AL.isStandardAttributeSyntax() && AL.isClangScope())) &&
+ isa<TypedefNameDecl>(D)) {
+ S.Diag(AL.getLoc(), diag::warn_unused_result_typedef_unsupported_spelling)
+ << AL.isGNUScope();
+ return;
+ }
+
D->addAttr(::new (S.Context) WarnUnusedResultAttr(S.Context, AL, Str));
}
@@ -2984,7 +3215,7 @@ static void handleWeakImportAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// Nothing to warn about here.
} else
S.Diag(AL.getLoc(), diag::warn_attribute_wrong_decl_type)
- << AL << ExpectedVariableOrFunction;
+ << AL << AL.isRegularKeywordAttribute() << ExpectedVariableOrFunction;
return;
}
@@ -3138,6 +3369,22 @@ static void handleSectionAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
}
+static void handleCodeModelAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ StringRef Str;
+ SourceLocation LiteralLoc;
+ // Check that it is a string.
+ if (!S.checkStringLiteralArgumentAttr(AL, 0, Str, &LiteralLoc))
+ return;
+
+ llvm::CodeModel::Model CM;
+ if (!CodeModelAttr::ConvertStrToModel(Str, CM)) {
+ S.Diag(LiteralLoc, diag::err_attr_codemodel_arg) << Str;
+ return;
+ }
+
+ D->addAttr(::new (S.Context) CodeModelAttr(S.Context, AL, CM));
+}
+
// This is used for `__declspec(code_seg("segname"))` on a decl.
// `#pragma code_seg("segname")` uses checkSectionName() instead.
static bool checkCodeSegName(Sema &S, SourceLocation LiteralLoc,
@@ -3195,59 +3442,101 @@ static void handleCodeSegAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// handled later in the process, once we know how many exist.
bool Sema::checkTargetAttr(SourceLocation LiteralLoc, StringRef AttrStr) {
enum FirstParam { Unsupported, Duplicate, Unknown };
- enum SecondParam { None, Architecture, Tune };
- if (AttrStr.find("fpmath=") != StringRef::npos)
+ enum SecondParam { None, CPU, Tune };
+ enum ThirdParam { Target, TargetClones };
+ if (AttrStr.contains("fpmath="))
return Diag(LiteralLoc, diag::warn_unsupported_target_attribute)
- << Unsupported << None << "fpmath=";
+ << Unsupported << None << "fpmath=" << Target;
// Diagnose use of tune if target doesn't support it.
if (!Context.getTargetInfo().supportsTargetAttributeTune() &&
- AttrStr.find("tune=") != StringRef::npos)
+ AttrStr.contains("tune="))
return Diag(LiteralLoc, diag::warn_unsupported_target_attribute)
- << Unsupported << None << "tune=";
+ << Unsupported << None << "tune=" << Target;
- ParsedTargetAttr ParsedAttrs = TargetAttr::parse(AttrStr);
+ ParsedTargetAttr ParsedAttrs =
+ Context.getTargetInfo().parseTargetAttr(AttrStr);
- if (!ParsedAttrs.Architecture.empty() &&
- !Context.getTargetInfo().isValidCPUName(ParsedAttrs.Architecture))
+ if (!ParsedAttrs.CPU.empty() &&
+ !Context.getTargetInfo().isValidCPUName(ParsedAttrs.CPU))
return Diag(LiteralLoc, diag::warn_unsupported_target_attribute)
- << Unknown << Architecture << ParsedAttrs.Architecture;
+ << Unknown << CPU << ParsedAttrs.CPU << Target;
if (!ParsedAttrs.Tune.empty() &&
!Context.getTargetInfo().isValidCPUName(ParsedAttrs.Tune))
return Diag(LiteralLoc, diag::warn_unsupported_target_attribute)
- << Unknown << Tune << ParsedAttrs.Tune;
+ << Unknown << Tune << ParsedAttrs.Tune << Target;
- if (ParsedAttrs.DuplicateArchitecture)
- return Diag(LiteralLoc, diag::warn_unsupported_target_attribute)
- << Duplicate << None << "arch=";
- if (ParsedAttrs.DuplicateTune)
+ if (Context.getTargetInfo().getTriple().isRISCV() &&
+ ParsedAttrs.Duplicate != "")
+ return Diag(LiteralLoc, diag::err_duplicate_target_attribute)
+ << Duplicate << None << ParsedAttrs.Duplicate << Target;
+
+ if (ParsedAttrs.Duplicate != "")
return Diag(LiteralLoc, diag::warn_unsupported_target_attribute)
- << Duplicate << None << "tune=";
+ << Duplicate << None << ParsedAttrs.Duplicate << Target;
for (const auto &Feature : ParsedAttrs.Features) {
auto CurFeature = StringRef(Feature).drop_front(); // remove + or -.
if (!Context.getTargetInfo().isValidFeatureName(CurFeature))
return Diag(LiteralLoc, diag::warn_unsupported_target_attribute)
- << Unsupported << None << CurFeature;
+ << Unsupported << None << CurFeature << Target;
}
TargetInfo::BranchProtectionInfo BPI;
- StringRef Error;
- if (!ParsedAttrs.BranchProtection.empty() &&
- !Context.getTargetInfo().validateBranchProtection(
- ParsedAttrs.BranchProtection, BPI, Error)) {
- if (Error.empty())
+ StringRef DiagMsg;
+ if (ParsedAttrs.BranchProtection.empty())
+ return false;
+ if (!Context.getTargetInfo().validateBranchProtection(
+ ParsedAttrs.BranchProtection, ParsedAttrs.CPU, BPI, DiagMsg)) {
+ if (DiagMsg.empty())
return Diag(LiteralLoc, diag::warn_unsupported_target_attribute)
- << Unsupported << None << "branch-protection";
- else
- return Diag(LiteralLoc, diag::err_invalid_branch_protection_spec)
- << Error;
+ << Unsupported << None << "branch-protection" << Target;
+ return Diag(LiteralLoc, diag::err_invalid_branch_protection_spec)
+ << DiagMsg;
}
+ if (!DiagMsg.empty())
+ Diag(LiteralLoc, diag::warn_unsupported_branch_protection_spec) << DiagMsg;
return false;
}
+// Check Target Version attrs
+bool Sema::checkTargetVersionAttr(SourceLocation LiteralLoc, StringRef &AttrStr,
+ bool &isDefault) {
+ enum FirstParam { Unsupported };
+ enum SecondParam { None };
+ enum ThirdParam { Target, TargetClones, TargetVersion };
+ if (AttrStr.trim() == "default")
+ isDefault = true;
+ llvm::SmallVector<StringRef, 8> Features;
+ AttrStr.split(Features, "+");
+ for (auto &CurFeature : Features) {
+ CurFeature = CurFeature.trim();
+ if (CurFeature == "default")
+ continue;
+ if (!Context.getTargetInfo().validateCpuSupports(CurFeature))
+ return Diag(LiteralLoc, diag::warn_unsupported_target_attribute)
+ << Unsupported << None << CurFeature << TargetVersion;
+ }
+ return false;
+}
+
+static void handleTargetVersionAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ StringRef Str;
+ SourceLocation LiteralLoc;
+ bool isDefault = false;
+ if (!S.checkStringLiteralArgumentAttr(AL, 0, Str, &LiteralLoc) ||
+ S.checkTargetVersionAttr(LiteralLoc, Str, isDefault))
+ return;
+ // Do not create default only target_version attribute
+ if (!isDefault) {
+ TargetVersionAttr *NewAttr =
+ ::new (S.Context) TargetVersionAttr(S.Context, AL, Str);
+ D->addAttr(NewAttr);
+ }
+}
+
static void handleTargetAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
StringRef Str;
SourceLocation LiteralLoc;
@@ -3259,6 +3548,170 @@ static void handleTargetAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
D->addAttr(NewAttr);
}
+bool Sema::checkTargetClonesAttrString(
+ SourceLocation LiteralLoc, StringRef Str, const StringLiteral *Literal,
+ bool &HasDefault, bool &HasCommas, bool &HasNotDefault,
+ SmallVectorImpl<SmallString<64>> &StringsBuffer) {
+ enum FirstParam { Unsupported, Duplicate, Unknown };
+ enum SecondParam { None, CPU, Tune };
+ enum ThirdParam { Target, TargetClones };
+ HasCommas = HasCommas || Str.contains(',');
+ const TargetInfo &TInfo = Context.getTargetInfo();
+ // Warn on empty at the beginning of a string.
+ if (Str.size() == 0)
+ return Diag(LiteralLoc, diag::warn_unsupported_target_attribute)
+ << Unsupported << None << "" << TargetClones;
+
+ std::pair<StringRef, StringRef> Parts = {{}, Str};
+ while (!Parts.second.empty()) {
+ Parts = Parts.second.split(',');
+ StringRef Cur = Parts.first.trim();
+ SourceLocation CurLoc =
+ Literal->getLocationOfByte(Cur.data() - Literal->getString().data(),
+ getSourceManager(), getLangOpts(), TInfo);
+
+ bool DefaultIsDupe = false;
+ bool HasCodeGenImpact = false;
+ if (Cur.empty())
+ return Diag(CurLoc, diag::warn_unsupported_target_attribute)
+ << Unsupported << None << "" << TargetClones;
+
+ if (TInfo.getTriple().isAArch64()) {
+ // AArch64 target clones specific
+ if (Cur == "default") {
+ DefaultIsDupe = HasDefault;
+ HasDefault = true;
+ if (llvm::is_contained(StringsBuffer, Cur) || DefaultIsDupe)
+ Diag(CurLoc, diag::warn_target_clone_duplicate_options);
+ else
+ StringsBuffer.push_back(Cur);
+ } else {
+ std::pair<StringRef, StringRef> CurParts = {{}, Cur};
+ llvm::SmallVector<StringRef, 8> CurFeatures;
+ while (!CurParts.second.empty()) {
+ CurParts = CurParts.second.split('+');
+ StringRef CurFeature = CurParts.first.trim();
+ if (!TInfo.validateCpuSupports(CurFeature)) {
+ Diag(CurLoc, diag::warn_unsupported_target_attribute)
+ << Unsupported << None << CurFeature << TargetClones;
+ continue;
+ }
+ if (TInfo.doesFeatureAffectCodeGen(CurFeature))
+ HasCodeGenImpact = true;
+ CurFeatures.push_back(CurFeature);
+ }
+ // Canonize TargetClones Attributes
+ llvm::sort(CurFeatures);
+ SmallString<64> Res;
+ for (auto &CurFeat : CurFeatures) {
+ if (!Res.equals(""))
+ Res.append("+");
+ Res.append(CurFeat);
+ }
+ if (llvm::is_contained(StringsBuffer, Res) || DefaultIsDupe)
+ Diag(CurLoc, diag::warn_target_clone_duplicate_options);
+ else if (!HasCodeGenImpact)
+ // Ignore features in target_clone attribute that don't impact
+ // code generation
+ Diag(CurLoc, diag::warn_target_clone_no_impact_options);
+ else if (!Res.empty()) {
+ StringsBuffer.push_back(Res);
+ HasNotDefault = true;
+ }
+ }
+ } else {
+ // Other targets ( currently X86 )
+ if (Cur.starts_with("arch=")) {
+ if (!Context.getTargetInfo().isValidCPUName(
+ Cur.drop_front(sizeof("arch=") - 1)))
+ return Diag(CurLoc, diag::warn_unsupported_target_attribute)
+ << Unsupported << CPU << Cur.drop_front(sizeof("arch=") - 1)
+ << TargetClones;
+ } else if (Cur == "default") {
+ DefaultIsDupe = HasDefault;
+ HasDefault = true;
+ } else if (!Context.getTargetInfo().isValidFeatureName(Cur))
+ return Diag(CurLoc, diag::warn_unsupported_target_attribute)
+ << Unsupported << None << Cur << TargetClones;
+ if (llvm::is_contained(StringsBuffer, Cur) || DefaultIsDupe)
+ Diag(CurLoc, diag::warn_target_clone_duplicate_options);
+ // Note: Add even if there are duplicates, since it changes name mangling.
+ StringsBuffer.push_back(Cur);
+ }
+ }
+ if (Str.rtrim().ends_with(","))
+ return Diag(LiteralLoc, diag::warn_unsupported_target_attribute)
+ << Unsupported << None << "" << TargetClones;
+ return false;
+}
+
+static void handleTargetClonesAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ if (S.Context.getTargetInfo().getTriple().isAArch64() &&
+ !S.Context.getTargetInfo().hasFeature("fmv"))
+ return;
+
+ // Ensure we don't combine these with themselves, since that causes some
+ // confusing behavior.
+ if (const auto *Other = D->getAttr<TargetClonesAttr>()) {
+ S.Diag(AL.getLoc(), diag::err_disallowed_duplicate_attribute) << AL;
+ S.Diag(Other->getLocation(), diag::note_conflicting_attribute);
+ return;
+ }
+ if (checkAttrMutualExclusion<TargetClonesAttr>(S, D, AL))
+ return;
+
+ SmallVector<StringRef, 2> Strings;
+ SmallVector<SmallString<64>, 2> StringsBuffer;
+ bool HasCommas = false, HasDefault = false, HasNotDefault = false;
+
+ for (unsigned I = 0, E = AL.getNumArgs(); I != E; ++I) {
+ StringRef CurStr;
+ SourceLocation LiteralLoc;
+ if (!S.checkStringLiteralArgumentAttr(AL, I, CurStr, &LiteralLoc) ||
+ S.checkTargetClonesAttrString(
+ LiteralLoc, CurStr,
+ cast<StringLiteral>(AL.getArgAsExpr(I)->IgnoreParenCasts()),
+ HasDefault, HasCommas, HasNotDefault, StringsBuffer))
+ return;
+ }
+ for (auto &SmallStr : StringsBuffer)
+ Strings.push_back(SmallStr.str());
+
+ if (HasCommas && AL.getNumArgs() > 1)
+ S.Diag(AL.getLoc(), diag::warn_target_clone_mixed_values);
+
+ if (S.Context.getTargetInfo().getTriple().isAArch64() && !HasDefault) {
+ // Add default attribute if there is no one
+ HasDefault = true;
+ Strings.push_back("default");
+ }
+
+ if (!HasDefault) {
+ S.Diag(AL.getLoc(), diag::err_target_clone_must_have_default);
+ return;
+ }
+
+ // FIXME: We could probably figure out how to get this to work for lambdas
+ // someday.
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(D)) {
+ if (MD->getParent()->isLambda()) {
+ S.Diag(D->getLocation(), diag::err_multiversion_doesnt_support)
+ << static_cast<unsigned>(MultiVersionKind::TargetClones)
+ << /*Lambda*/ 9;
+ return;
+ }
+ }
+
+ // No multiversion if we have default version only.
+ if (S.Context.getTargetInfo().getTriple().isAArch64() && !HasNotDefault)
+ return;
+
+ cast<FunctionDecl>(D)->setIsMultiVersion();
+ TargetClonesAttr *NewAttr = ::new (S.Context)
+ TargetClonesAttr(S.Context, AL, Strings.data(), Strings.size());
+ D->addAttr(NewAttr);
+}
+
static void handleMinVectorWidthAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
Expr *E = AL.getArgAsExpr(0);
uint32_t VecWidth;
@@ -3354,7 +3807,7 @@ static void handleEnumExtensibilityAttr(Sema &S, Decl *D,
/// Handle __attribute__((format_arg((idx)))) attribute based on
/// http://gcc.gnu.org/onlinedocs/gcc/Function-Attributes.html
static void handleFormatArgAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- Expr *IdxExpr = AL.getArgAsExpr(0);
+ const Expr *IdxExpr = AL.getArgAsExpr(0);
ParamIdx Idx;
if (!checkFunctionOrMethodParameterIndex(S, D, AL, 1, IdxExpr, Idx))
return;
@@ -3368,11 +3821,17 @@ static void handleFormatArgAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
(!Ty->isPointerType() ||
!Ty->castAs<PointerType>()->getPointeeType()->isCharType())) {
S.Diag(AL.getLoc(), diag::err_format_attribute_not)
- << "a string type" << IdxExpr->getSourceRange()
- << getFunctionOrMethodParamRange(D, 0);
+ << IdxExpr->getSourceRange() << getFunctionOrMethodParamRange(D, 0);
return;
}
Ty = getFunctionOrMethodResultType(D);
+ // replace instancetype with the class type
+ auto Instancetype = S.Context.getObjCInstanceTypeDecl()->getTypeForDecl();
+ if (Ty->getAs<TypedefType>() == Instancetype)
+ if (auto *OMD = dyn_cast<ObjCMethodDecl>(D))
+ if (auto *Interface = OMD->getClassInterface())
+ Ty = S.Context.getObjCObjectPointerType(
+ QualType(Interface->getTypeForDecl(), 0));
if (!isNSStringType(Ty, S.Context, /*AllowNSAttributedString=*/true) &&
!isCFStringType(Ty, S.Context) &&
(!Ty->isPointerType() ||
@@ -3424,6 +3883,11 @@ static void handleInitPriorityAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
return;
}
+ if (S.getLangOpts().HLSL) {
+ S.Diag(AL.getLoc(), diag::err_hlsl_init_priority_unsupported);
+ return;
+ }
+
if (S.getCurFunctionOrMethodDecl()) {
S.Diag(AL.getLoc(), diag::err_init_priority_object_attr);
AL.setInvalid();
@@ -3458,6 +3922,31 @@ static void handleInitPriorityAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
D->addAttr(::new (S.Context) InitPriorityAttr(S.Context, AL, prioritynum));
}
+ErrorAttr *Sema::mergeErrorAttr(Decl *D, const AttributeCommonInfo &CI,
+ StringRef NewUserDiagnostic) {
+ if (const auto *EA = D->getAttr<ErrorAttr>()) {
+ std::string NewAttr = CI.getNormalizedFullName();
+ assert((NewAttr == "error" || NewAttr == "warning") &&
+ "unexpected normalized full name");
+ bool Match = (EA->isError() && NewAttr == "error") ||
+ (EA->isWarning() && NewAttr == "warning");
+ if (!Match) {
+ Diag(EA->getLocation(), diag::err_attributes_are_not_compatible)
+ << CI << EA
+ << (CI.isRegularKeywordAttribute() ||
+ EA->isRegularKeywordAttribute());
+ Diag(CI.getLoc(), diag::note_conflicting_attribute);
+ return nullptr;
+ }
+ if (EA->getUserDiagnostic() != NewUserDiagnostic) {
+ Diag(CI.getLoc(), diag::warn_duplicate_attribute) << EA;
+ Diag(EA->getLoc(), diag::note_previous_attribute);
+ }
+ D->dropAttr<ErrorAttr>();
+ }
+ return ::new (Context) ErrorAttr(Context, CI, NewUserDiagnostic);
+}
+
FormatAttr *Sema::mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Format, int FormatIdx,
int FirstArg) {
@@ -3539,27 +4028,12 @@ static void handleFormatAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// make sure the format string is really a string
QualType Ty = getFunctionOrMethodParamType(D, ArgIdx);
- if (Kind == CFStringFormat) {
- if (!isCFStringType(Ty, S.Context)) {
- S.Diag(AL.getLoc(), diag::err_format_attribute_not)
- << "a CFString" << IdxExpr->getSourceRange()
- << getFunctionOrMethodParamRange(D, ArgIdx);
- return;
- }
- } else if (Kind == NSStringFormat) {
- // FIXME: do we need to check if the type is NSString*? What are the
- // semantics?
- if (!isNSStringType(Ty, S.Context, /*AllowNSAttributedString=*/true)) {
- S.Diag(AL.getLoc(), diag::err_format_attribute_not)
- << "an NSString" << IdxExpr->getSourceRange()
- << getFunctionOrMethodParamRange(D, ArgIdx);
- return;
- }
- } else if (!Ty->isPointerType() ||
- !Ty->castAs<PointerType>()->getPointeeType()->isCharType()) {
+ if (!isNSStringType(Ty, S.Context, true) &&
+ !isCFStringType(Ty, S.Context) &&
+ (!Ty->isPointerType() ||
+ !Ty->castAs<PointerType>()->getPointeeType()->isCharType())) {
S.Diag(AL.getLoc(), diag::err_format_attribute_not)
- << "a string type" << IdxExpr->getSourceRange()
- << getFunctionOrMethodParamRange(D, ArgIdx);
+ << IdxExpr->getSourceRange() << getFunctionOrMethodParamRange(D, ArgIdx);
return;
}
@@ -3569,29 +4043,38 @@ static void handleFormatAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (!checkUInt32Argument(S, AL, FirstArgExpr, FirstArg, 3))
return;
- // check if the function is variadic if the 3rd argument non-zero
+ // FirstArg == 0 is is always valid.
if (FirstArg != 0) {
- if (isFunctionOrMethodVariadic(D)) {
- ++NumArgs; // +1 for ...
- } else {
- S.Diag(D->getLocation(), diag::err_format_attribute_requires_variadic);
- return;
- }
- }
-
- // strftime requires FirstArg to be 0 because it doesn't read from any
- // variable the input is just the current time + the format string.
- if (Kind == StrftimeFormat) {
- if (FirstArg != 0) {
+ if (Kind == StrftimeFormat) {
+ // If the kind is strftime, FirstArg must be 0 because strftime does not
+ // use any variadic arguments.
S.Diag(AL.getLoc(), diag::err_format_strftime_third_parameter)
- << FirstArgExpr->getSourceRange();
+ << FirstArgExpr->getSourceRange()
+ << FixItHint::CreateReplacement(FirstArgExpr->getSourceRange(), "0");
return;
+ } else if (isFunctionOrMethodVariadic(D)) {
+ // Else, if the function is variadic, then FirstArg must be 0 or the
+ // "position" of the ... parameter. It's unusual to use 0 with variadic
+ // functions, so the fixit proposes the latter.
+ if (FirstArg != NumArgs + 1) {
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_out_of_bounds)
+ << AL << 3 << FirstArgExpr->getSourceRange()
+ << FixItHint::CreateReplacement(FirstArgExpr->getSourceRange(),
+ std::to_string(NumArgs + 1));
+ return;
+ }
+ } else {
+ // Inescapable GCC compatibility diagnostic.
+ S.Diag(D->getLocation(), diag::warn_gcc_requires_variadic_function) << AL;
+ if (FirstArg <= Idx) {
+ // Else, the function is not variadic, and FirstArg must be 0 or any
+ // parameter after the format parameter. We don't offer a fixit because
+ // there are too many possible good values.
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_out_of_bounds)
+ << AL << 3 << FirstArgExpr->getSourceRange();
+ return;
+ }
}
- // if 0 it disables parameter checking (to use with e.g. va_list)
- } else if (FirstArg != 0 && FirstArg != NumArgs) {
- S.Diag(AL.getLoc(), diag::err_attribute_argument_out_of_bounds)
- << AL << 3 << FirstArgExpr->getSourceRange();
- return;
}
FormatAttr *NewAttr = S.mergeFormatAttr(D, AL, II, Idx, FirstArg);
@@ -3768,8 +4251,8 @@ static void handleTransparentUnionAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
RD = dyn_cast<RecordDecl>(D);
if (!RD || !RD->isUnion()) {
- S.Diag(AL.getLoc(), diag::warn_attribute_wrong_decl_type) << AL
- << ExpectedUnion;
+ S.Diag(AL.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << AL << AL.isRegularKeywordAttribute() << ExpectedUnion;
return;
}
@@ -3832,48 +4315,10 @@ static void handleTransparentUnionAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
void Sema::AddAnnotationAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Str, MutableArrayRef<Expr *> Args) {
auto *Attr = AnnotateAttr::Create(Context, Str, Args.data(), Args.size(), CI);
- llvm::SmallVector<PartialDiagnosticAt, 8> Notes;
- for (unsigned Idx = 0; Idx < Attr->args_size(); Idx++) {
- Expr *&E = Attr->args_begin()[Idx];
- assert(E && "error are handled before");
- if (E->isValueDependent() || E->isTypeDependent())
- continue;
-
- if (E->getType()->isArrayType())
- E = ImpCastExprToType(E, Context.getPointerType(E->getType()),
- clang::CK_ArrayToPointerDecay)
- .get();
- if (E->getType()->isFunctionType())
- E = ImplicitCastExpr::Create(Context,
- Context.getPointerType(E->getType()),
- clang::CK_FunctionToPointerDecay, E, nullptr,
- VK_PRValue, FPOptionsOverride());
- if (E->isLValue())
- E = ImplicitCastExpr::Create(Context, E->getType().getNonReferenceType(),
- clang::CK_LValueToRValue, E, nullptr,
- VK_PRValue, FPOptionsOverride());
-
- Expr::EvalResult Eval;
- Notes.clear();
- Eval.Diag = &Notes;
-
- bool Result =
- E->EvaluateAsConstantExpr(Eval, Context);
-
- /// Result means the expression can be folded to a constant.
- /// Note.empty() means the expression is a valid constant expression in the
- /// current language mode.
- if (!Result || !Notes.empty()) {
- Diag(E->getBeginLoc(), diag::err_attribute_argument_n_type)
- << CI << (Idx + 1) << AANT_ArgumentConstantExpr;
- for (auto &Note : Notes)
- Diag(Note.first, Note.second);
- return;
- }
- assert(Eval.Val.hasValue());
- E = ConstantExpr::Create(Context, E, Eval.Val);
+ if (ConstantFoldAttrArgs(
+ CI, MutableArrayRef<Expr *>(Attr->args_begin(), Attr->args_end()))) {
+ D->addAttr(Attr);
}
- D->addAttr(Attr);
}
static void handleAnnotateAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
@@ -3938,6 +4383,27 @@ void Sema::AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E) {
}
static void handleAlignedAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ if (AL.hasParsedType()) {
+ const ParsedType &TypeArg = AL.getTypeArg();
+ TypeSourceInfo *TInfo;
+ (void)S.GetTypeFromParser(
+ ParsedType::getFromOpaquePtr(TypeArg.getAsOpaquePtr()), &TInfo);
+ if (AL.isPackExpansion() &&
+ !TInfo->getType()->containsUnexpandedParameterPack()) {
+ S.Diag(AL.getEllipsisLoc(),
+ diag::err_pack_expansion_without_parameter_packs);
+ return;
+ }
+
+ if (!AL.isPackExpansion() &&
+ S.DiagnoseUnexpandedParameterPack(TInfo->getTypeLoc().getBeginLoc(),
+ TInfo, Sema::UPPC_Expression))
+ return;
+
+ S.AddAlignedAttr(D, AL, TInfo, AL.isPackExpansion());
+ return;
+ }
+
// check the attribute arguments.
if (AL.getNumArgs() > 1) {
S.Diag(AL.getLoc(), diag::err_attribute_wrong_number_arguments) << AL << 1;
@@ -3962,47 +4428,61 @@ static void handleAlignedAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
S.AddAlignedAttr(D, AL, E, AL.isPackExpansion());
}
+/// Perform checking of type validity
+///
+/// C++11 [dcl.align]p1:
+/// An alignment-specifier may be applied to a variable or to a class
+/// data member, but it shall not be applied to a bit-field, a function
+/// parameter, the formal parameter of a catch clause, or a variable
+/// declared with the register storage class specifier. An
+/// alignment-specifier may also be applied to the declaration of a class
+/// or enumeration type.
+/// CWG 2354:
+/// CWG agreed to remove permission for alignas to be applied to
+/// enumerations.
+/// C11 6.7.5/2:
+/// An alignment attribute shall not be specified in a declaration of
+/// a typedef, or a bit-field, or a function, or a parameter, or an
+/// object declared with the register storage-class specifier.
+static bool validateAlignasAppliedType(Sema &S, Decl *D,
+ const AlignedAttr &Attr,
+ SourceLocation AttrLoc) {
+ int DiagKind = -1;
+ if (isa<ParmVarDecl>(D)) {
+ DiagKind = 0;
+ } else if (const auto *VD = dyn_cast<VarDecl>(D)) {
+ if (VD->getStorageClass() == SC_Register)
+ DiagKind = 1;
+ if (VD->isExceptionVariable())
+ DiagKind = 2;
+ } else if (const auto *FD = dyn_cast<FieldDecl>(D)) {
+ if (FD->isBitField())
+ DiagKind = 3;
+ } else if (const auto *ED = dyn_cast<EnumDecl>(D)) {
+ if (ED->getLangOpts().CPlusPlus)
+ DiagKind = 4;
+ } else if (!isa<TagDecl>(D)) {
+ return S.Diag(AttrLoc, diag::err_attribute_wrong_decl_type)
+ << &Attr << Attr.isRegularKeywordAttribute()
+ << (Attr.isC11() ? ExpectedVariableOrField
+ : ExpectedVariableFieldOrTag);
+ }
+ if (DiagKind != -1) {
+ return S.Diag(AttrLoc, diag::err_alignas_attribute_wrong_decl_type)
+ << &Attr << DiagKind;
+ }
+ return false;
+}
+
void Sema::AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
bool IsPackExpansion) {
AlignedAttr TmpAttr(Context, CI, true, E);
SourceLocation AttrLoc = CI.getLoc();
// C++11 alignas(...) and C11 _Alignas(...) have additional requirements.
- if (TmpAttr.isAlignas()) {
- // C++11 [dcl.align]p1:
- // An alignment-specifier may be applied to a variable or to a class
- // data member, but it shall not be applied to a bit-field, a function
- // parameter, the formal parameter of a catch clause, or a variable
- // declared with the register storage class specifier. An
- // alignment-specifier may also be applied to the declaration of a class
- // or enumeration type.
- // C11 6.7.5/2:
- // An alignment attribute shall not be specified in a declaration of
- // a typedef, or a bit-field, or a function, or a parameter, or an
- // object declared with the register storage-class specifier.
- int DiagKind = -1;
- if (isa<ParmVarDecl>(D)) {
- DiagKind = 0;
- } else if (const auto *VD = dyn_cast<VarDecl>(D)) {
- if (VD->getStorageClass() == SC_Register)
- DiagKind = 1;
- if (VD->isExceptionVariable())
- DiagKind = 2;
- } else if (const auto *FD = dyn_cast<FieldDecl>(D)) {
- if (FD->isBitField())
- DiagKind = 3;
- } else if (!isa<TagDecl>(D)) {
- Diag(AttrLoc, diag::err_attribute_wrong_decl_type) << &TmpAttr
- << (TmpAttr.isC11() ? ExpectedVariableOrField
- : ExpectedVariableFieldOrTag);
- return;
- }
- if (DiagKind != -1) {
- Diag(AttrLoc, diag::err_alignas_attribute_wrong_decl_type)
- << &TmpAttr << DiagKind;
- return;
- }
- }
+ if (TmpAttr.isAlignas() &&
+ validateAlignasAppliedType(*this, D, TmpAttr, AttrLoc))
+ return;
if (E->isValueDependent()) {
// We can't support a dependent alignment on a non-dependent type,
@@ -4030,14 +4510,16 @@ void Sema::AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
if (ICE.isInvalid())
return;
- uint64_t AlignVal = Alignment.getZExtValue();
- // 16 byte ByVal alignment not due to a vector member is not honoured by XL
- // on AIX. Emit a warning here that users are generating binary incompatible
- // code to be safe.
- if (AlignVal >= 16 && isa<FieldDecl>(D) &&
- Context.getTargetInfo().getTriple().isOSAIX())
- Diag(AttrLoc, diag::warn_not_xl_compatible) << E->getSourceRange();
+ uint64_t MaximumAlignment = Sema::MaximumAlignment;
+ if (Context.getTargetInfo().getTriple().isOSBinFormatCOFF())
+ MaximumAlignment = std::min(MaximumAlignment, uint64_t(8192));
+ if (Alignment > MaximumAlignment) {
+ Diag(AttrLoc, diag::err_attribute_aligned_too_great)
+ << MaximumAlignment << E->getSourceRange();
+ return;
+ }
+ uint64_t AlignVal = Alignment.getZExtValue();
// C++11 [dcl.align]p2:
// -- if the constant expression evaluates to zero, the alignment
// specifier shall have no effect
@@ -4051,21 +4533,12 @@ void Sema::AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
}
}
- unsigned MaximumAlignment = Sema::MaximumAlignment;
- if (Context.getTargetInfo().getTriple().isOSBinFormatCOFF())
- MaximumAlignment = std::min(MaximumAlignment, 8192u);
- if (AlignVal > MaximumAlignment) {
- Diag(AttrLoc, diag::err_attribute_aligned_too_great)
- << MaximumAlignment << E->getSourceRange();
- return;
- }
-
- if (Context.getTargetInfo().isTLSSupported()) {
+ const auto *VD = dyn_cast<VarDecl>(D);
+ if (VD) {
unsigned MaxTLSAlign =
Context.toCharUnitsFromBits(Context.getTargetInfo().getMaxTLSAlign())
.getQuantity();
- const auto *VD = dyn_cast<VarDecl>(D);
- if (MaxTLSAlign && AlignVal > MaxTLSAlign && VD &&
+ if (MaxTLSAlign && AlignVal > MaxTLSAlign &&
VD->getTLSKind() != VarDecl::TLS_None) {
Diag(VD->getLocation(), diag::err_tls_var_aligned_over_maximum)
<< (unsigned)AlignVal << VD << MaxTLSAlign;
@@ -4073,17 +4546,69 @@ void Sema::AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
}
}
+ // On AIX, an aligned attribute can not decrease the alignment when applied
+ // to a variable declaration with vector type.
+ if (VD && Context.getTargetInfo().getTriple().isOSAIX()) {
+ const Type *Ty = VD->getType().getTypePtr();
+ if (Ty->isVectorType() && AlignVal < 16) {
+ Diag(VD->getLocation(), diag::warn_aligned_attr_underaligned)
+ << VD->getType() << 16;
+ return;
+ }
+ }
+
AlignedAttr *AA = ::new (Context) AlignedAttr(Context, CI, true, ICE.get());
AA->setPackExpansion(IsPackExpansion);
+ AA->setCachedAlignmentValue(
+ static_cast<unsigned>(AlignVal * Context.getCharWidth()));
D->addAttr(AA);
}
void Sema::AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI,
TypeSourceInfo *TS, bool IsPackExpansion) {
- // FIXME: Cache the number on the AL object if non-dependent?
- // FIXME: Perform checking of type validity
+ AlignedAttr TmpAttr(Context, CI, false, TS);
+ SourceLocation AttrLoc = CI.getLoc();
+
+ // C++11 alignas(...) and C11 _Alignas(...) have additional requirements.
+ if (TmpAttr.isAlignas() &&
+ validateAlignasAppliedType(*this, D, TmpAttr, AttrLoc))
+ return;
+
+ if (TS->getType()->isDependentType()) {
+ // We can't support a dependent alignment on a non-dependent type,
+ // because we have no way to model that a type is "type-dependent"
+ // but not dependent in any other way.
+ if (const auto *TND = dyn_cast<TypedefNameDecl>(D)) {
+ if (!TND->getUnderlyingType()->isDependentType()) {
+ Diag(AttrLoc, diag::err_alignment_dependent_typedef_name)
+ << TS->getTypeLoc().getSourceRange();
+ return;
+ }
+ }
+
+ AlignedAttr *AA = ::new (Context) AlignedAttr(Context, CI, false, TS);
+ AA->setPackExpansion(IsPackExpansion);
+ D->addAttr(AA);
+ return;
+ }
+
+ const auto *VD = dyn_cast<VarDecl>(D);
+ unsigned AlignVal = TmpAttr.getAlignment(Context);
+ // On AIX, an aligned attribute can not decrease the alignment when applied
+ // to a variable declaration with vector type.
+ if (VD && Context.getTargetInfo().getTriple().isOSAIX()) {
+ const Type *Ty = VD->getType().getTypePtr();
+ if (Ty->isVectorType() &&
+ Context.toCharUnitsFromBits(AlignVal).getQuantity() < 16) {
+ Diag(VD->getLocation(), diag::warn_aligned_attr_underaligned)
+ << VD->getType() << 16;
+ return;
+ }
+ }
+
AlignedAttr *AA = ::new (Context) AlignedAttr(Context, CI, false, TS);
AA->setPackExpansion(IsPackExpansion);
+ AA->setCachedAlignmentValue(AlignVal);
D->addAttr(AA);
}
@@ -4161,9 +4686,10 @@ bool Sema::checkMSInheritanceAttrOnDefinition(
/// attribute.
static void parseModeAttrArg(Sema &S, StringRef Str, unsigned &DestWidth,
bool &IntegerMode, bool &ComplexMode,
- bool &ExplicitIEEE) {
+ FloatModeKind &ExplicitType) {
IntegerMode = true;
ComplexMode = false;
+ ExplicitType = FloatModeKind::NoFloat;
switch (Str.size()) {
case 2:
switch (Str[0]) {
@@ -4183,13 +4709,17 @@ static void parseModeAttrArg(Sema &S, StringRef Str, unsigned &DestWidth,
DestWidth = 96;
break;
case 'K': // KFmode - IEEE quad precision (__float128)
- ExplicitIEEE = true;
+ ExplicitType = FloatModeKind::Float128;
DestWidth = Str[1] == 'I' ? 0 : 128;
break;
case 'T':
- ExplicitIEEE = false;
+ ExplicitType = FloatModeKind::LongDouble;
DestWidth = 128;
break;
+ case 'I':
+ ExplicitType = FloatModeKind::Ibm128;
+ DestWidth = Str[1] == 'I' ? 0 : 128;
+ break;
}
if (Str[1] == 'F') {
IntegerMode = false;
@@ -4210,7 +4740,7 @@ static void parseModeAttrArg(Sema &S, StringRef Str, unsigned &DestWidth,
break;
case 7:
if (Str == "pointer")
- DestWidth = S.Context.getTargetInfo().getPointerWidth(0);
+ DestWidth = S.Context.getTargetInfo().getPointerWidth(LangAS::Default);
break;
case 11:
if (Str == "unwind_word")
@@ -4248,7 +4778,7 @@ void Sema::AddModeAttr(Decl *D, const AttributeCommonInfo &CI,
unsigned DestWidth = 0;
bool IntegerMode = true;
bool ComplexMode = false;
- bool ExplicitIEEE = false;
+ FloatModeKind ExplicitType = FloatModeKind::NoFloat;
llvm::APInt VectorSize(64, 0);
if (Str.size() >= 4 && Str[0] == 'V') {
// Minimal length of vector mode is 4: 'V' + NUMBER(>=1) + TYPE(>=2).
@@ -4261,7 +4791,7 @@ void Sema::AddModeAttr(Decl *D, const AttributeCommonInfo &CI,
!Str.substr(1, VectorStringLength).getAsInteger(10, VectorSize) &&
VectorSize.isPowerOf2()) {
parseModeAttrArg(*this, Str.substr(VectorStringLength + 1), DestWidth,
- IntegerMode, ComplexMode, ExplicitIEEE);
+ IntegerMode, ComplexMode, ExplicitType);
// Avoid duplicate warning from template instantiation.
if (!InInstantiation)
Diag(AttrLoc, diag::warn_vector_mode_deprecated);
@@ -4272,7 +4802,7 @@ void Sema::AddModeAttr(Decl *D, const AttributeCommonInfo &CI,
if (!VectorSize)
parseModeAttrArg(*this, Str, DestWidth, IntegerMode, ComplexMode,
- ExplicitIEEE);
+ ExplicitType);
// FIXME: Sync this with InitializePredefinedMacros; we need to match int8_t
// and friends, at least with glibc.
@@ -4315,7 +4845,7 @@ void Sema::AddModeAttr(Decl *D, const AttributeCommonInfo &CI,
return;
}
bool IntegralOrAnyEnumType = (OldElemTy->isIntegralOrEnumerationType() &&
- !OldElemTy->isExtIntType()) ||
+ !OldElemTy->isBitIntType()) ||
OldElemTy->getAs<EnumType>();
if (!OldElemTy->getAs<BuiltinType>() && !OldElemTy->isComplexType() &&
@@ -4338,7 +4868,7 @@ void Sema::AddModeAttr(Decl *D, const AttributeCommonInfo &CI,
NewElemTy = Context.getIntTypeForBitwidth(DestWidth,
OldElemTy->isSignedIntegerType());
else
- NewElemTy = Context.getRealTypeForBitwidth(DestWidth, ExplicitIEEE);
+ NewElemTy = Context.getRealTypeForBitwidth(DestWidth, ExplicitType);
if (NewElemTy.isNull()) {
Diag(AttrLoc, diag::err_machine_mode) << 1 /*Unsupported*/ << Name;
@@ -4352,7 +4882,7 @@ void Sema::AddModeAttr(Decl *D, const AttributeCommonInfo &CI,
QualType NewTy = NewElemTy;
if (VectorSize.getBoolValue()) {
NewTy = Context.getVectorType(NewTy, VectorSize.getZExtValue(),
- VectorType::GenericVector);
+ VectorKind::Generic);
} else if (const auto *OldVT = OldTy->getAs<VectorType>()) {
// Complex machine mode does not support base vector types.
if (ComplexMode) {
@@ -4408,8 +4938,9 @@ InternalLinkageAttr *Sema::mergeInternalLinkageAttr(Decl *D,
// ImplicitParm or VarTemplateSpecialization).
if (VD->getKind() != Decl::Var) {
Diag(AL.getLoc(), diag::warn_attribute_wrong_decl_type)
- << AL << (getLangOpts().CPlusPlus ? ExpectedFunctionVariableOrClass
- : ExpectedVariableOrFunction);
+ << AL << AL.isRegularKeywordAttribute()
+ << (getLangOpts().CPlusPlus ? ExpectedFunctionVariableOrClass
+ : ExpectedVariableOrFunction);
return nullptr;
}
// Attribute does not apply to non-static local variables.
@@ -4428,8 +4959,9 @@ Sema::mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL) {
// ImplicitParm or VarTemplateSpecialization).
if (VD->getKind() != Decl::Var) {
Diag(AL.getLocation(), diag::warn_attribute_wrong_decl_type)
- << &AL << (getLangOpts().CPlusPlus ? ExpectedFunctionVariableOrClass
- : ExpectedVariableOrFunction);
+ << &AL << AL.isRegularKeywordAttribute()
+ << (getLangOpts().CPlusPlus ? ExpectedFunctionVariableOrClass
+ : ExpectedVariableOrFunction);
return nullptr;
}
// Attribute does not apply to non-static local variables.
@@ -4460,7 +4992,9 @@ SwiftNameAttr *Sema::mergeSwiftNameAttr(Decl *D, const SwiftNameAttr &SNA,
if (const auto *PrevSNA = D->getAttr<SwiftNameAttr>()) {
if (PrevSNA->getName() != Name && !PrevSNA->isImplicit()) {
Diag(PrevSNA->getLocation(), diag::err_attributes_are_not_compatible)
- << PrevSNA << &SNA;
+ << PrevSNA << &SNA
+ << (PrevSNA->isRegularKeywordAttribute() ||
+ SNA.isRegularKeywordAttribute());
Diag(SNA.getLoc(), diag::note_conflicting_attribute);
}
@@ -4560,7 +5094,10 @@ static void handleGlobalAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (FD->isInlineSpecified() && !S.getLangOpts().CUDAIsDevice)
S.Diag(FD->getBeginLoc(), diag::warn_kern_is_inline) << FD;
- D->addAttr(::new (S.Context) CUDAGlobalAttr(S.Context, AL));
+ if (AL.getKind() == ParsedAttr::AT_NVPTXKernel)
+ D->addAttr(::new (S.Context) NVPTXKernelAttr(S.Context, AL));
+ else
+ D->addAttr(::new (S.Context) CUDAGlobalAttr(S.Context, AL));
// In host compilation the kernel is emitted as a stub function, which is
// a helper function for launching the kernel. The instructions in the helper
// function has nothing to do with the source code of the kernel. Do not emit
@@ -4617,12 +5154,13 @@ static void handleCallConvAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// Diagnostic is emitted elsewhere: here we store the (valid) AL
// in the Decl node for syntactic reasoning, e.g., pretty-printing.
CallingConv CC;
- if (S.CheckCallingConvAttr(AL, CC, /*FD*/nullptr))
+ if (S.CheckCallingConvAttr(AL, CC, /*FD*/ nullptr,
+ S.IdentifyCUDATarget(dyn_cast<FunctionDecl>(D))))
return;
if (!isa<ObjCMethodDecl>(D)) {
S.Diag(AL.getLoc(), diag::warn_attribute_wrong_decl_type)
- << AL << ExpectedFunctionOrMethod;
+ << AL << AL.isRegularKeywordAttribute() << ExpectedFunctionOrMethod;
return;
}
@@ -4679,6 +5217,12 @@ static void handleCallConvAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
case ParsedAttr::AT_AArch64VectorPcs:
D->addAttr(::new (S.Context) AArch64VectorPcsAttr(S.Context, AL));
return;
+ case ParsedAttr::AT_AArch64SVEPcs:
+ D->addAttr(::new (S.Context) AArch64SVEPcsAttr(S.Context, AL));
+ return;
+ case ParsedAttr::AT_AMDGPUKernelCall:
+ D->addAttr(::new (S.Context) AMDGPUKernelCallAttr(S.Context, AL));
+ return;
case ParsedAttr::AT_IntelOclBicc:
D->addAttr(::new (S.Context) IntelOclBiccAttr(S.Context, AL));
return;
@@ -4688,14 +5232,25 @@ static void handleCallConvAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
case ParsedAttr::AT_PreserveAll:
D->addAttr(::new (S.Context) PreserveAllAttr(S.Context, AL));
return;
+ case ParsedAttr::AT_M68kRTD:
+ D->addAttr(::new (S.Context) M68kRTDAttr(S.Context, AL));
+ return;
default:
llvm_unreachable("unexpected attribute kind");
}
}
static void handleSuppressAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- if (!AL.checkAtLeastNumArgs(S, 1))
+ if (AL.getAttributeSpellingListIndex() == SuppressAttr::CXX11_gsl_suppress) {
+ // Suppression attribute with GSL spelling requires at least 1 argument.
+ if (!AL.checkAtLeastNumArgs(S, 1))
+ return;
+ } else if (!isa<VarDecl>(D)) {
+ // Analyzer suppression applies only to variables and statements.
+ S.Diag(AL.getLoc(), diag::err_attribute_wrong_decl_type_str)
+ << AL << 0 << "variables and statements";
return;
+ }
std::vector<StringRef> DiagnosticIdentifiers;
for (unsigned I = 0, E = AL.getNumArgs(); I != E; ++I) {
@@ -4704,8 +5259,6 @@ static void handleSuppressAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (!S.checkStringLiteralArgumentAttr(AL, I, RuleName, nullptr))
return;
- // FIXME: Warn if the rule name is unknown. This is tricky because only
- // clang-tidy knows about available rules.
DiagnosticIdentifiers.push_back(RuleName);
}
D->addAttr(::new (S.Context)
@@ -4733,7 +5286,7 @@ static void handleLifetimeCategoryAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
// To check if earlier decl attributes do not conflict the newly parsed ones
- // we always add (and check) the attribute to the cannonical decl. We need
+ // we always add (and check) the attribute to the canonical decl. We need
// to repeat the check for attribute mutual exclusion because we're attaching
// all of the attributes to the canonical declaration rather than the current
// declaration.
@@ -4747,7 +5300,9 @@ static void handleLifetimeCategoryAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
: nullptr;
if (ExistingDerefType != ParmType.getTypePtrOrNull()) {
S.Diag(AL.getLoc(), diag::err_attributes_are_not_compatible)
- << AL << OAttr;
+ << AL << OAttr
+ << (AL.isRegularKeywordAttribute() ||
+ OAttr->isRegularKeywordAttribute());
S.Diag(OAttr->getLocation(), diag::note_conflicting_attribute);
}
return;
@@ -4764,7 +5319,9 @@ static void handleLifetimeCategoryAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
: nullptr;
if (ExistingDerefType != ParmType.getTypePtrOrNull()) {
S.Diag(AL.getLoc(), diag::err_attributes_are_not_compatible)
- << AL << PAttr;
+ << AL << PAttr
+ << (AL.isRegularKeywordAttribute() ||
+ PAttr->isRegularKeywordAttribute());
S.Diag(PAttr->getLocation(), diag::note_conflicting_attribute);
}
return;
@@ -4776,8 +5333,24 @@ static void handleLifetimeCategoryAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
}
+static void handleRandomizeLayoutAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ if (checkAttrMutualExclusion<NoRandomizeLayoutAttr>(S, D, AL))
+ return;
+ if (!D->hasAttr<RandomizeLayoutAttr>())
+ D->addAttr(::new (S.Context) RandomizeLayoutAttr(S.Context, AL));
+}
+
+static void handleNoRandomizeLayoutAttr(Sema &S, Decl *D,
+ const ParsedAttr &AL) {
+ if (checkAttrMutualExclusion<RandomizeLayoutAttr>(S, D, AL))
+ return;
+ if (!D->hasAttr<NoRandomizeLayoutAttr>())
+ D->addAttr(::new (S.Context) NoRandomizeLayoutAttr(S.Context, AL));
+}
+
bool Sema::CheckCallingConvAttr(const ParsedAttr &Attrs, CallingConv &CC,
- const FunctionDecl *FD) {
+ const FunctionDecl *FD,
+ CUDAFunctionTarget CFT) {
if (Attrs.isInvalid())
return true;
@@ -4821,6 +5394,12 @@ bool Sema::CheckCallingConvAttr(const ParsedAttr &Attrs, CallingConv &CC,
case ParsedAttr::AT_AArch64VectorPcs:
CC = CC_AArch64VectorCall;
break;
+ case ParsedAttr::AT_AArch64SVEPcs:
+ CC = CC_AArch64SVEPCS;
+ break;
+ case ParsedAttr::AT_AMDGPUKernelCall:
+ CC = CC_AMDGPUKernelCall;
+ break;
case ParsedAttr::AT_RegCall:
CC = CC_X86RegCall;
break;
@@ -4859,6 +5438,9 @@ bool Sema::CheckCallingConvAttr(const ParsedAttr &Attrs, CallingConv &CC,
case ParsedAttr::AT_PreserveAll:
CC = CC_PreserveAll;
break;
+ case ParsedAttr::AT_M68kRTD:
+ CC = CC_M68kRTD;
+ break;
default: llvm_unreachable("unexpected attribute kind");
}
@@ -4870,7 +5452,8 @@ bool Sema::CheckCallingConvAttr(const ParsedAttr &Attrs, CallingConv &CC,
// on their host/device attributes.
if (LangOpts.CUDA) {
auto *Aux = Context.getAuxTargetInfo();
- auto CudaTarget = IdentifyCUDATarget(FD);
+ assert(FD || CFT != CFT_InvalidTarget);
+ auto CudaTarget = FD ? IdentifyCUDATarget(FD) : CFT;
bool CheckHost = false, CheckDevice = false;
switch (CudaTarget) {
case CFT_HostDevice:
@@ -4975,7 +5558,9 @@ void Sema::AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI,
if (auto existingAttr = D->getAttr<ParameterABIAttr>()) {
if (existingAttr->getABI() != abi) {
Diag(CI.getLoc(), diag::err_attributes_are_not_compatible)
- << getParameterABISpelling(abi) << existingAttr;
+ << getParameterABISpelling(abi) << existingAttr
+ << (CI.isRegularKeywordAttribute() ||
+ existingAttr->isRegularKeywordAttribute());
Diag(existingAttr->getLocation(), diag::note_conflicting_attribute);
return;
}
@@ -5056,6 +5641,14 @@ bool Sema::CheckRegparmAttr(const ParsedAttr &AL, unsigned &numParams) {
return false;
}
+// Helper to get CudaArch.
+static CudaArch getCudaArch(const TargetInfo &TI) {
+ if (!TI.getTriple().isNVPTX())
+ llvm_unreachable("getCudaArch is only valid for NVPTX triple");
+ auto &TO = TI.getTargetOpts();
+ return StringToCudaArch(TO.CPU);
+}
+
// Checks whether an argument of launch_bounds attribute is
// acceptable, performs implicit conversion to Rvalue, and returns
// non-nullptr Expr result on success. Otherwise, it returns nullptr
@@ -5071,7 +5664,7 @@ static Expr *makeLaunchBoundsArgExpr(Sema &S, Expr *E,
if (E->isValueDependent())
return E;
- Optional<llvm::APSInt> I = llvm::APSInt(64);
+ std::optional<llvm::APSInt> I = llvm::APSInt(64);
if (!(I = E->getIntegerConstantExpr(S.Context))) {
S.Diag(E->getExprLoc(), diag::err_attribute_argument_n_type)
<< &AL << Idx << AANT_ArgumentIntegerConstant << E->getSourceRange();
@@ -5097,29 +5690,53 @@ static Expr *makeLaunchBoundsArgExpr(Sema &S, Expr *E,
return ValArg.getAs<Expr>();
}
-void Sema::AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI,
- Expr *MaxThreads, Expr *MinBlocks) {
- CUDALaunchBoundsAttr TmpAttr(Context, CI, MaxThreads, MinBlocks);
+CUDALaunchBoundsAttr *
+Sema::CreateLaunchBoundsAttr(const AttributeCommonInfo &CI, Expr *MaxThreads,
+ Expr *MinBlocks, Expr *MaxBlocks) {
+ CUDALaunchBoundsAttr TmpAttr(Context, CI, MaxThreads, MinBlocks, MaxBlocks);
MaxThreads = makeLaunchBoundsArgExpr(*this, MaxThreads, TmpAttr, 0);
- if (MaxThreads == nullptr)
- return;
+ if (!MaxThreads)
+ return nullptr;
if (MinBlocks) {
MinBlocks = makeLaunchBoundsArgExpr(*this, MinBlocks, TmpAttr, 1);
- if (MinBlocks == nullptr)
- return;
+ if (!MinBlocks)
+ return nullptr;
}
- D->addAttr(::new (Context)
- CUDALaunchBoundsAttr(Context, CI, MaxThreads, MinBlocks));
+ if (MaxBlocks) {
+ // '.maxclusterrank' ptx directive requires .target sm_90 or higher.
+ auto SM = getCudaArch(Context.getTargetInfo());
+ if (SM == CudaArch::UNKNOWN || SM < CudaArch::SM_90) {
+ Diag(MaxBlocks->getBeginLoc(), diag::warn_cuda_maxclusterrank_sm_90)
+ << CudaArchToString(SM) << CI << MaxBlocks->getSourceRange();
+ // Ignore it by setting MaxBlocks to null;
+ MaxBlocks = nullptr;
+ } else {
+ MaxBlocks = makeLaunchBoundsArgExpr(*this, MaxBlocks, TmpAttr, 2);
+ if (!MaxBlocks)
+ return nullptr;
+ }
+ }
+
+ return ::new (Context)
+ CUDALaunchBoundsAttr(Context, CI, MaxThreads, MinBlocks, MaxBlocks);
+}
+
+void Sema::AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI,
+ Expr *MaxThreads, Expr *MinBlocks,
+ Expr *MaxBlocks) {
+ if (auto *Attr = CreateLaunchBoundsAttr(CI, MaxThreads, MinBlocks, MaxBlocks))
+ D->addAttr(Attr);
}
static void handleLaunchBoundsAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- if (!AL.checkAtLeastNumArgs(S, 1) || !AL.checkAtMostNumArgs(S, 2))
+ if (!AL.checkAtLeastNumArgs(S, 1) || !AL.checkAtMostNumArgs(S, 3))
return;
S.AddLaunchBoundsAttr(D, AL, AL.getArgAsExpr(0),
- AL.getNumArgs() > 1 ? AL.getArgAsExpr(1) : nullptr);
+ AL.getNumArgs() > 1 ? AL.getArgAsExpr(1) : nullptr,
+ AL.getNumArgs() > 2 ? AL.getArgAsExpr(2) : nullptr);
}
static void handleArgumentWithTypeTagAttr(Sema &S, Decl *D,
@@ -5167,7 +5784,7 @@ static void handleTypeTagForDatatypeAttr(Sema &S, Decl *D,
if (!isa<VarDecl>(D)) {
S.Diag(AL.getLoc(), diag::err_attribute_wrong_decl_type)
- << AL << ExpectedVariable;
+ << AL << AL.isRegularKeywordAttribute() << ExpectedVariable;
return;
}
@@ -5224,11 +5841,11 @@ struct IntrinToName {
static bool ArmBuiltinAliasValid(unsigned BuiltinID, StringRef AliasName,
ArrayRef<IntrinToName> Map,
const char *IntrinNames) {
- if (AliasName.startswith("__arm_"))
- AliasName = AliasName.substr(6);
- const IntrinToName *It = std::lower_bound(
- Map.begin(), Map.end(), BuiltinID,
- [](const IntrinToName &L, unsigned Id) { return L.Id < Id; });
+ AliasName.consume_front("__arm_");
+ const IntrinToName *It =
+ llvm::lower_bound(Map, BuiltinID, [](const IntrinToName &L, unsigned Id) {
+ return L.Id < Id;
+ });
if (It == Map.end() || It->Id != BuiltinID)
return false;
StringRef FullName(&IntrinNames[It->FullName]);
@@ -5261,6 +5878,14 @@ static bool ArmSveAliasValid(ASTContext &Context, unsigned BuiltinID,
BuiltinID <= AArch64::LastSVEBuiltin;
}
+static bool ArmSmeAliasValid(ASTContext &Context, unsigned BuiltinID,
+ StringRef AliasName) {
+ if (Context.BuiltinInfo.isAuxBuiltinID(BuiltinID))
+ BuiltinID = Context.BuiltinInfo.getAuxBuiltinID(BuiltinID);
+ return BuiltinID >= AArch64::FirstSMEBuiltin &&
+ BuiltinID <= AArch64::LastSMEBuiltin;
+}
+
static void handleArmBuiltinAliasAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (!AL.isArgIdent(0)) {
S.Diag(AL.getLoc(), diag::err_attribute_argument_n_type)
@@ -5273,7 +5898,8 @@ static void handleArmBuiltinAliasAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
StringRef AliasName = cast<FunctionDecl>(D)->getIdentifier()->getName();
bool IsAArch64 = S.Context.getTargetInfo().getTriple().isAArch64();
- if ((IsAArch64 && !ArmSveAliasValid(S.Context, BuiltinID, AliasName)) ||
+ if ((IsAArch64 && !ArmSveAliasValid(S.Context, BuiltinID, AliasName) &&
+ !ArmSmeAliasValid(S.Context, BuiltinID, AliasName)) ||
(!IsAArch64 && !ArmMveAliasValid(BuiltinID, AliasName) &&
!ArmCdeAliasValid(BuiltinID, AliasName))) {
S.Diag(AL.getLoc(), diag::err_attribute_arm_builtin_alias);
@@ -5284,8 +5910,8 @@ static void handleArmBuiltinAliasAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
static bool RISCVAliasValid(unsigned BuiltinID, StringRef AliasName) {
- return BuiltinID >= Builtin::FirstTSBuiltin &&
- BuiltinID < RISCV::LastTSBuiltin;
+ return BuiltinID >= RISCV::FirstRVVBuiltin &&
+ BuiltinID <= RISCV::LastRVVBuiltin;
}
static void handleBuiltinAliasAttr(Sema &S, Decl *D,
@@ -5303,11 +5929,12 @@ static void handleBuiltinAliasAttr(Sema &S, Decl *D,
bool IsAArch64 = S.Context.getTargetInfo().getTriple().isAArch64();
bool IsARM = S.Context.getTargetInfo().getTriple().isARM();
bool IsRISCV = S.Context.getTargetInfo().getTriple().isRISCV();
+ bool IsHLSL = S.Context.getLangOpts().HLSL;
if ((IsAArch64 && !ArmSveAliasValid(S.Context, BuiltinID, AliasName)) ||
(IsARM && !ArmMveAliasValid(BuiltinID, AliasName) &&
!ArmCdeAliasValid(BuiltinID, AliasName)) ||
(IsRISCV && !RISCVAliasValid(BuiltinID, AliasName)) ||
- (!IsAArch64 && !IsARM && !IsRISCV)) {
+ (!IsAArch64 && !IsARM && !IsRISCV && !IsHLSL)) {
S.Diag(AL.getLoc(), diag::err_attribute_builtin_alias) << AL;
return;
}
@@ -5315,6 +5942,21 @@ static void handleBuiltinAliasAttr(Sema &S, Decl *D,
D->addAttr(::new (S.Context) BuiltinAliasAttr(S.Context, AL, Ident));
}
+static void handlePreferredTypeAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ if (!AL.hasParsedType()) {
+ S.Diag(AL.getLoc(), diag::err_attribute_wrong_number_arguments) << AL << 1;
+ return;
+ }
+
+ TypeSourceInfo *ParmTSI = nullptr;
+ QualType QT = S.GetTypeFromParser(AL.getTypeArg(), &ParmTSI);
+ assert(ParmTSI && "no type source info for attribute argument");
+ S.RequireCompleteType(ParmTSI->getTypeLoc().getBeginLoc(), QT,
+ diag::err_incomplete_type);
+
+ D->addAttr(::new (S.Context) PreferredTypeAttr(S.Context, AL, ParmTSI));
+}
+
//===----------------------------------------------------------------------===//
// Checker-specific attribute handlers.
//===----------------------------------------------------------------------===//
@@ -5462,7 +6104,8 @@ static void handleXReturnsXRetainedAttr(Sema &S, Decl *D,
break;
}
S.Diag(D->getBeginLoc(), diag::warn_attribute_wrong_decl_type)
- << AL.getRange() << AL << ExpectedDeclKind;
+ << AL.getRange() << AL << AL.isRegularKeywordAttribute()
+ << ExpectedDeclKind;
return;
}
@@ -5578,7 +6221,7 @@ static void handleObjCRequiresSuperAttr(Sema &S, Decl *D,
const auto *Method = cast<ObjCMethodDecl>(D);
const DeclContext *DC = Method->getDeclContext();
- if (const auto *PDecl = dyn_cast_or_null<ObjCProtocolDecl>(DC)) {
+ if (const auto *PDecl = dyn_cast_if_present<ObjCProtocolDecl>(DC)) {
S.Diag(D->getBeginLoc(), diag::warn_objc_requires_super_protocol) << Attrs
<< 0;
S.Diag(PDecl->getLocation(), diag::note_protocol_decl);
@@ -5593,29 +6236,35 @@ static void handleObjCRequiresSuperAttr(Sema &S, Decl *D,
D->addAttr(::new (S.Context) ObjCRequiresSuperAttr(S.Context, Attrs));
}
-static void handleNSErrorDomain(Sema &S, Decl *D, const ParsedAttr &AL) {
- auto *E = AL.getArgAsExpr(0);
- auto Loc = E ? E->getBeginLoc() : AL.getLoc();
-
- auto *DRE = dyn_cast<DeclRefExpr>(AL.getArgAsExpr(0));
- if (!DRE) {
- S.Diag(Loc, diag::err_nserrordomain_invalid_decl) << 0;
+static void handleNSErrorDomain(Sema &S, Decl *D, const ParsedAttr &Attr) {
+ if (!isa<TagDecl>(D)) {
+ S.Diag(D->getBeginLoc(), diag::err_nserrordomain_invalid_decl) << 0;
return;
}
- auto *VD = dyn_cast<VarDecl>(DRE->getDecl());
- if (!VD) {
- S.Diag(Loc, diag::err_nserrordomain_invalid_decl) << 1 << DRE->getDecl();
+ IdentifierLoc *IdentLoc =
+ Attr.isArgIdent(0) ? Attr.getArgAsIdent(0) : nullptr;
+ if (!IdentLoc || !IdentLoc->Ident) {
+ // Try to locate the argument directly.
+ SourceLocation Loc = Attr.getLoc();
+ if (Attr.isArgExpr(0) && Attr.getArgAsExpr(0))
+ Loc = Attr.getArgAsExpr(0)->getBeginLoc();
+
+ S.Diag(Loc, diag::err_nserrordomain_invalid_decl) << 0;
return;
}
- if (!isNSStringType(VD->getType(), S.Context) &&
- !isCFStringType(VD->getType(), S.Context)) {
- S.Diag(Loc, diag::err_nserrordomain_wrong_type) << VD;
+ // Verify that the identifier is a valid decl in the C decl namespace.
+ LookupResult Result(S, DeclarationName(IdentLoc->Ident), SourceLocation(),
+ Sema::LookupNameKind::LookupOrdinaryName);
+ if (!S.LookupName(Result, S.TUScope) || !Result.getAsSingle<VarDecl>()) {
+ S.Diag(IdentLoc->Loc, diag::err_nserrordomain_invalid_decl)
+ << 1 << IdentLoc->Ident;
return;
}
- D->addAttr(::new (S.Context) NSErrorDomainAttr(S.Context, AL, VD));
+ D->addAttr(::new (S.Context)
+ NSErrorDomainAttr(S.Context, Attr, IdentLoc->Ident));
}
static void handleObjCBridgeAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
@@ -5734,10 +6383,12 @@ static void handleObjCBoxable(Sema &S, Decl *D, const ParsedAttr &AL) {
}
static void handleObjCOwnershipAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- if (hasDeclarator(D)) return;
+ if (hasDeclarator(D))
+ return;
S.Diag(D->getBeginLoc(), diag::err_attribute_wrong_decl_type)
- << AL.getRange() << AL << ExpectedVariable;
+ << AL.getRange() << AL << AL.isRegularKeywordAttribute()
+ << ExpectedVariable;
}
static void handleObjCPreciseLifetimeAttr(Sema &S, Decl *D,
@@ -6039,10 +6690,10 @@ validateSwiftFunctionName(Sema &S, const ParsedAttr &AL, SourceLocation Loc,
// Check whether this will be mapped to a getter or setter of a property.
bool IsGetter = false, IsSetter = false;
- if (Name.startswith("getter:")) {
+ if (Name.starts_with("getter:")) {
IsGetter = true;
Name = Name.substr(7);
- } else if (Name.startswith("setter:")) {
+ } else if (Name.starts_with("setter:")) {
IsSetter = true;
Name = Name.substr(7);
}
@@ -6063,7 +6714,7 @@ validateSwiftFunctionName(Sema &S, const ParsedAttr &AL, SourceLocation Loc,
if (BaseName.empty()) {
BaseName = ContextName;
ContextName = StringRef();
- } else if (ContextName.empty() || !isValidIdentifier(ContextName)) {
+ } else if (ContextName.empty() || !isValidAsciiIdentifier(ContextName)) {
S.Diag(Loc, diag::warn_attr_swift_name_invalid_identifier)
<< AL << /*context*/ 1;
return false;
@@ -6071,7 +6722,7 @@ validateSwiftFunctionName(Sema &S, const ParsedAttr &AL, SourceLocation Loc,
IsMember = true;
}
- if (!isValidIdentifier(BaseName) || BaseName == "_") {
+ if (!isValidAsciiIdentifier(BaseName) || BaseName == "_") {
S.Diag(Loc, diag::warn_attr_swift_name_invalid_identifier)
<< AL << /*basename*/ 0;
return false;
@@ -6115,13 +6766,13 @@ validateSwiftFunctionName(Sema &S, const ParsedAttr &AL, SourceLocation Loc,
}
StringRef CurrentParam;
- llvm::Optional<unsigned> SelfLocation;
+ std::optional<unsigned> SelfLocation;
unsigned NewValueCount = 0;
- llvm::Optional<unsigned> NewValueLocation;
+ std::optional<unsigned> NewValueLocation;
do {
std::tie(CurrentParam, Parameters) = Parameters.split(':');
- if (!isValidIdentifier(CurrentParam)) {
+ if (!isValidAsciiIdentifier(CurrentParam)) {
S.Diag(Loc, diag::warn_attr_swift_name_invalid_identifier)
<< AL << /*parameter*/2;
return false;
@@ -6230,7 +6881,8 @@ bool Sema::DiagnoseSwiftName(Decl *D, StringRef Name, SourceLocation Loc,
Params = F->parameters();
if (!F->hasWrittenPrototype()) {
- Diag(Loc, diag::warn_attribute_wrong_decl_type) << AL
+ Diag(Loc, diag::warn_attribute_wrong_decl_type)
+ << AL << AL.isRegularKeywordAttribute()
<< ExpectedFunctionWithProtoType;
return false;
}
@@ -6262,13 +6914,12 @@ bool Sema::DiagnoseSwiftName(Decl *D, StringRef Name, SourceLocation Loc,
// might be because we've transformed some of them. Check for potential
// "out" parameters and err on the side of not warning.
unsigned MaybeOutParamCount =
- std::count_if(Params.begin(), Params.end(),
- [](const ParmVarDecl *Param) -> bool {
- QualType ParamTy = Param->getType();
- if (ParamTy->isReferenceType() || ParamTy->isPointerType())
- return !ParamTy->getPointeeType().isConstQualified();
- return false;
- });
+ llvm::count_if(Params, [](const ParmVarDecl *Param) -> bool {
+ QualType ParamTy = Param->getType();
+ if (ParamTy->isReferenceType() || ParamTy->isPointerType())
+ return !ParamTy->getPointeeType().isConstQualified();
+ return false;
+ });
ParamCountValid = SwiftParamCount + MaybeOutParamCount >= ParamCount;
}
@@ -6290,13 +6941,13 @@ bool Sema::DiagnoseSwiftName(Decl *D, StringRef Name, SourceLocation Loc,
if (BaseName.empty()) {
BaseName = ContextName;
ContextName = StringRef();
- } else if (!isValidIdentifier(ContextName)) {
+ } else if (!isValidAsciiIdentifier(ContextName)) {
Diag(Loc, diag::warn_attr_swift_name_invalid_identifier) << AL
<< /*context*/1;
return false;
}
- if (!isValidIdentifier(BaseName)) {
+ if (!isValidAsciiIdentifier(BaseName)) {
Diag(Loc, diag::warn_attr_swift_name_invalid_identifier) << AL
<< /*basename*/0;
return false;
@@ -6352,7 +7003,7 @@ static void handleSwiftNewType(Sema &S, Decl *D, const ParsedAttr &AL) {
if (!isa<TypedefNameDecl>(D)) {
S.Diag(AL.getLoc(), diag::warn_attribute_wrong_decl_type_str)
- << AL << "typedefs";
+ << AL << AL.isRegularKeywordAttribute() << "typedefs";
return;
}
@@ -6493,6 +7144,230 @@ static void handleUuidAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
D->addAttr(UA);
}
+static void handleHLSLNumThreadsAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ llvm::VersionTuple SMVersion =
+ S.Context.getTargetInfo().getTriple().getOSVersion();
+ uint32_t ZMax = 1024;
+ uint32_t ThreadMax = 1024;
+ if (SMVersion.getMajor() <= 4) {
+ ZMax = 1;
+ ThreadMax = 768;
+ } else if (SMVersion.getMajor() == 5) {
+ ZMax = 64;
+ ThreadMax = 1024;
+ }
+
+ uint32_t X;
+ if (!checkUInt32Argument(S, AL, AL.getArgAsExpr(0), X))
+ return;
+ if (X > 1024) {
+ S.Diag(AL.getArgAsExpr(0)->getExprLoc(),
+ diag::err_hlsl_numthreads_argument_oor) << 0 << 1024;
+ return;
+ }
+ uint32_t Y;
+ if (!checkUInt32Argument(S, AL, AL.getArgAsExpr(1), Y))
+ return;
+ if (Y > 1024) {
+ S.Diag(AL.getArgAsExpr(1)->getExprLoc(),
+ diag::err_hlsl_numthreads_argument_oor) << 1 << 1024;
+ return;
+ }
+ uint32_t Z;
+ if (!checkUInt32Argument(S, AL, AL.getArgAsExpr(2), Z))
+ return;
+ if (Z > ZMax) {
+ S.Diag(AL.getArgAsExpr(2)->getExprLoc(),
+ diag::err_hlsl_numthreads_argument_oor) << 2 << ZMax;
+ return;
+ }
+
+ if (X * Y * Z > ThreadMax) {
+ S.Diag(AL.getLoc(), diag::err_hlsl_numthreads_invalid) << ThreadMax;
+ return;
+ }
+
+ HLSLNumThreadsAttr *NewAttr = S.mergeHLSLNumThreadsAttr(D, AL, X, Y, Z);
+ if (NewAttr)
+ D->addAttr(NewAttr);
+}
+
+HLSLNumThreadsAttr *Sema::mergeHLSLNumThreadsAttr(Decl *D,
+ const AttributeCommonInfo &AL,
+ int X, int Y, int Z) {
+ if (HLSLNumThreadsAttr *NT = D->getAttr<HLSLNumThreadsAttr>()) {
+ if (NT->getX() != X || NT->getY() != Y || NT->getZ() != Z) {
+ Diag(NT->getLocation(), diag::err_hlsl_attribute_param_mismatch) << AL;
+ Diag(AL.getLoc(), diag::note_conflicting_attribute);
+ }
+ return nullptr;
+ }
+ return ::new (Context) HLSLNumThreadsAttr(Context, AL, X, Y, Z);
+}
+
+static bool isLegalTypeForHLSLSV_DispatchThreadID(QualType T) {
+ if (!T->hasUnsignedIntegerRepresentation())
+ return false;
+ if (const auto *VT = T->getAs<VectorType>())
+ return VT->getNumElements() <= 3;
+ return true;
+}
+
+static void handleHLSLSV_DispatchThreadIDAttr(Sema &S, Decl *D,
+ const ParsedAttr &AL) {
+ // FIXME: support semantic on field.
+ // See https://github.com/llvm/llvm-project/issues/57889.
+ if (isa<FieldDecl>(D)) {
+ S.Diag(AL.getLoc(), diag::err_hlsl_attr_invalid_ast_node)
+ << AL << "parameter";
+ return;
+ }
+
+ auto *VD = cast<ValueDecl>(D);
+ if (!isLegalTypeForHLSLSV_DispatchThreadID(VD->getType())) {
+ S.Diag(AL.getLoc(), diag::err_hlsl_attr_invalid_type)
+ << AL << "uint/uint2/uint3";
+ return;
+ }
+
+ D->addAttr(::new (S.Context) HLSLSV_DispatchThreadIDAttr(S.Context, AL));
+}
+
+static void handleHLSLShaderAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ StringRef Str;
+ SourceLocation ArgLoc;
+ if (!S.checkStringLiteralArgumentAttr(AL, 0, Str, &ArgLoc))
+ return;
+
+ HLSLShaderAttr::ShaderType ShaderType;
+ if (!HLSLShaderAttr::ConvertStrToShaderType(Str, ShaderType)) {
+ S.Diag(AL.getLoc(), diag::warn_attribute_type_not_supported)
+ << AL << Str << ArgLoc;
+ return;
+ }
+
+ // FIXME: check function match the shader stage.
+
+ HLSLShaderAttr *NewAttr = S.mergeHLSLShaderAttr(D, AL, ShaderType);
+ if (NewAttr)
+ D->addAttr(NewAttr);
+}
+
+HLSLShaderAttr *
+Sema::mergeHLSLShaderAttr(Decl *D, const AttributeCommonInfo &AL,
+ HLSLShaderAttr::ShaderType ShaderType) {
+ if (HLSLShaderAttr *NT = D->getAttr<HLSLShaderAttr>()) {
+ if (NT->getType() != ShaderType) {
+ Diag(NT->getLocation(), diag::err_hlsl_attribute_param_mismatch) << AL;
+ Diag(AL.getLoc(), diag::note_conflicting_attribute);
+ }
+ return nullptr;
+ }
+ return HLSLShaderAttr::Create(Context, ShaderType, AL);
+}
+
+static void handleHLSLResourceBindingAttr(Sema &S, Decl *D,
+ const ParsedAttr &AL) {
+ StringRef Space = "space0";
+ StringRef Slot = "";
+
+ if (!AL.isArgIdent(0)) {
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_type)
+ << AL << AANT_ArgumentIdentifier;
+ return;
+ }
+
+ IdentifierLoc *Loc = AL.getArgAsIdent(0);
+ StringRef Str = Loc->Ident->getName();
+ SourceLocation ArgLoc = Loc->Loc;
+
+ SourceLocation SpaceArgLoc;
+ if (AL.getNumArgs() == 2) {
+ Slot = Str;
+ if (!AL.isArgIdent(1)) {
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_type)
+ << AL << AANT_ArgumentIdentifier;
+ return;
+ }
+
+ IdentifierLoc *Loc = AL.getArgAsIdent(1);
+ Space = Loc->Ident->getName();
+ SpaceArgLoc = Loc->Loc;
+ } else {
+ Slot = Str;
+ }
+
+ // Validate.
+ if (!Slot.empty()) {
+ switch (Slot[0]) {
+ case 'u':
+ case 'b':
+ case 's':
+ case 't':
+ break;
+ default:
+ S.Diag(ArgLoc, diag::err_hlsl_unsupported_register_type)
+ << Slot.substr(0, 1);
+ return;
+ }
+
+ StringRef SlotNum = Slot.substr(1);
+ unsigned Num = 0;
+ if (SlotNum.getAsInteger(10, Num)) {
+ S.Diag(ArgLoc, diag::err_hlsl_unsupported_register_number);
+ return;
+ }
+ }
+
+ if (!Space.starts_with("space")) {
+ S.Diag(SpaceArgLoc, diag::err_hlsl_expected_space) << Space;
+ return;
+ }
+ StringRef SpaceNum = Space.substr(5);
+ unsigned Num = 0;
+ if (SpaceNum.getAsInteger(10, Num)) {
+ S.Diag(SpaceArgLoc, diag::err_hlsl_expected_space) << Space;
+ return;
+ }
+
+ // FIXME: check reg type match decl. Issue
+ // https://github.com/llvm/llvm-project/issues/57886.
+ HLSLResourceBindingAttr *NewAttr =
+ HLSLResourceBindingAttr::Create(S.getASTContext(), Slot, Space, AL);
+ if (NewAttr)
+ D->addAttr(NewAttr);
+}
+
+static void handleHLSLParamModifierAttr(Sema &S, Decl *D,
+ const ParsedAttr &AL) {
+ HLSLParamModifierAttr *NewAttr = S.mergeHLSLParamModifierAttr(
+ D, AL,
+ static_cast<HLSLParamModifierAttr::Spelling>(AL.getSemanticSpelling()));
+ if (NewAttr)
+ D->addAttr(NewAttr);
+}
+
+HLSLParamModifierAttr *
+Sema::mergeHLSLParamModifierAttr(Decl *D, const AttributeCommonInfo &AL,
+ HLSLParamModifierAttr::Spelling Spelling) {
+ // We can only merge an `in` attribute with an `out` attribute. All other
+ // combinations of duplicated attributes are ill-formed.
+ if (HLSLParamModifierAttr *PA = D->getAttr<HLSLParamModifierAttr>()) {
+ if ((PA->isIn() && Spelling == HLSLParamModifierAttr::Keyword_out) ||
+ (PA->isOut() && Spelling == HLSLParamModifierAttr::Keyword_in)) {
+ D->dropAttr<HLSLParamModifierAttr>();
+ SourceRange AdjustedRange = {PA->getLocation(), AL.getRange().getEnd()};
+ return HLSLParamModifierAttr::Create(
+ Context, /*MergedSpelling=*/true, AdjustedRange,
+ HLSLParamModifierAttr::Keyword_inout);
+ }
+ Diag(AL.getLoc(), diag::err_hlsl_duplicate_parameter_modifier) << AL;
+ Diag(PA->getLocation(), diag::note_conflicting_attribute);
+ return nullptr;
+ }
+ return HLSLParamModifierAttr::Create(Context, AL);
+}
+
static void handleMSInheritanceAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (!S.LangOpts.CPlusPlus) {
S.Diag(AL.getLoc(), diag::err_attribute_not_supported_in_lang)
@@ -6524,6 +7399,28 @@ static void handleDeclspecThreadAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
D->addAttr(::new (S.Context) ThreadAttr(S.Context, AL));
}
+static void handleMSConstexprAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ if (!S.getLangOpts().isCompatibleWithMSVC(LangOptions::MSVC2022_3)) {
+ S.Diag(AL.getLoc(), diag::warn_unknown_attribute_ignored)
+ << AL << AL.getRange();
+ return;
+ }
+ auto *FD = cast<FunctionDecl>(D);
+ if (FD->isConstexprSpecified() || FD->isConsteval()) {
+ S.Diag(AL.getLoc(), diag::err_ms_constexpr_cannot_be_applied)
+ << FD->isConsteval() << FD;
+ return;
+ }
+ if (auto *MD = dyn_cast<CXXMethodDecl>(FD)) {
+ if (!S.getLangOpts().CPlusPlus20 && MD->isVirtual()) {
+ S.Diag(AL.getLoc(), diag::err_ms_constexpr_cannot_be_applied)
+ << /*virtual*/ 2 << MD;
+ return;
+ }
+ }
+ D->addAttr(::new (S.Context) MSConstexprAttr(S.Context, AL));
+}
+
static void handleAbiTagAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
SmallVector<StringRef, 4> Tags;
for (unsigned I = 0, E = AL.getNumArgs(); I != E; ++I) {
@@ -6585,7 +7482,7 @@ static void handleMSP430InterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// a function with no parameters and void return type.
if (!isFunctionOrMethod(D)) {
S.Diag(D->getLocation(), diag::warn_attribute_wrong_decl_type)
- << "'interrupt'" << ExpectedFunctionOrMethod;
+ << AL << AL.isRegularKeywordAttribute() << ExpectedFunctionOrMethod;
return;
}
@@ -6612,7 +7509,7 @@ static void handleMSP430InterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
Expr *NumParamsExpr = static_cast<Expr *>(AL.getArgAsExpr(0));
- Optional<llvm::APSInt> NumParams = llvm::APSInt(32);
+ std::optional<llvm::APSInt> NumParams = llvm::APSInt(32);
if (!(NumParams = NumParamsExpr->getIntegerConstantExpr(S.Context))) {
S.Diag(AL.getLoc(), diag::err_attribute_argument_type)
<< AL << AANT_ArgumentIntegerConstant
@@ -6658,7 +7555,7 @@ static void handleMipsInterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (!isFunctionOrMethod(D)) {
S.Diag(D->getLocation(), diag::warn_attribute_wrong_decl_type)
- << "'interrupt'" << ExpectedFunctionOrMethod;
+ << AL << AL.isRegularKeywordAttribute() << ExpectedFunctionOrMethod;
return;
}
@@ -6733,7 +7630,8 @@ static void handleAnyX86InterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
CXXMethodDecl::isStaticOverloadedOperator(
cast<NamedDecl>(D)->getDeclName().getCXXOverloadedOperator())) {
S.Diag(AL.getLoc(), diag::warn_attribute_wrong_decl_type)
- << AL << ExpectedFunctionWithProtoType;
+ << AL << AL.isRegularKeywordAttribute()
+ << ExpectedFunctionWithProtoType;
return;
}
// Interrupt handler must have void return type.
@@ -6789,7 +7687,7 @@ static void handleAnyX86InterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
static void handleAVRInterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (!isFunctionOrMethod(D)) {
S.Diag(D->getLocation(), diag::warn_attribute_wrong_decl_type)
- << "'interrupt'" << ExpectedFunction;
+ << AL << AL.isRegularKeywordAttribute() << ExpectedFunction;
return;
}
@@ -6802,7 +7700,7 @@ static void handleAVRInterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
static void handleAVRSignalAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (!isFunctionOrMethod(D)) {
S.Diag(D->getLocation(), diag::warn_attribute_wrong_decl_type)
- << "'signal'" << ExpectedFunction;
+ << AL << AL.isRegularKeywordAttribute() << ExpectedFunction;
return;
}
@@ -6814,7 +7712,7 @@ static void handleAVRSignalAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
static void handleBPFPreserveAIRecord(Sema &S, RecordDecl *RD) {
// Add preserve_access_index attribute to all fields and inner records.
- for (auto D : RD->decls()) {
+ for (auto *D : RD->decls()) {
if (D->hasAttr<BPFPreserveAccessIndexAttr>())
continue;
@@ -6831,10 +7729,35 @@ static void handleBPFPreserveAccessIndexAttr(Sema &S, Decl *D,
Rec->addAttr(::new (S.Context) BPFPreserveAccessIndexAttr(S.Context, AL));
}
-static void handleWebAssemblyExportNameAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+static bool hasBTFDeclTagAttr(Decl *D, StringRef Tag) {
+ for (const auto *I : D->specific_attrs<BTFDeclTagAttr>()) {
+ if (I->getBTFDeclTag() == Tag)
+ return true;
+ }
+ return false;
+}
+
+static void handleBTFDeclTagAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ StringRef Str;
+ if (!S.checkStringLiteralArgumentAttr(AL, 0, Str))
+ return;
+ if (hasBTFDeclTagAttr(D, Str))
+ return;
+
+ D->addAttr(::new (S.Context) BTFDeclTagAttr(S.Context, AL, Str));
+}
+
+BTFDeclTagAttr *Sema::mergeBTFDeclTagAttr(Decl *D, const BTFDeclTagAttr &AL) {
+ if (hasBTFDeclTagAttr(D, AL.getBTFDeclTag()))
+ return nullptr;
+ return ::new (Context) BTFDeclTagAttr(Context, AL, AL.getBTFDeclTag());
+}
+
+static void handleWebAssemblyExportNameAttr(Sema &S, Decl *D,
+ const ParsedAttr &AL) {
if (!isFunctionOrMethod(D)) {
S.Diag(D->getLocation(), diag::warn_attribute_wrong_decl_type)
- << "'export_name'" << ExpectedFunction;
+ << AL << AL.isRegularKeywordAttribute() << ExpectedFunction;
return;
}
@@ -6958,7 +7881,7 @@ static void handleRISCVInterruptAttr(Sema &S, Decl *D,
if (D->getFunctionType() == nullptr) {
S.Diag(D->getLocation(), diag::warn_attribute_wrong_decl_type)
- << "'interrupt'" << ExpectedFunction;
+ << AL << AL.isRegularKeywordAttribute() << ExpectedFunction;
return;
}
@@ -7044,16 +7967,22 @@ checkAMDGPUFlatWorkGroupSizeArguments(Sema &S, Expr *MinExpr, Expr *MaxExpr,
return false;
}
-void Sema::addAMDGPUFlatWorkGroupSizeAttr(Decl *D,
- const AttributeCommonInfo &CI,
- Expr *MinExpr, Expr *MaxExpr) {
+AMDGPUFlatWorkGroupSizeAttr *
+Sema::CreateAMDGPUFlatWorkGroupSizeAttr(const AttributeCommonInfo &CI,
+ Expr *MinExpr, Expr *MaxExpr) {
AMDGPUFlatWorkGroupSizeAttr TmpAttr(Context, CI, MinExpr, MaxExpr);
if (checkAMDGPUFlatWorkGroupSizeArguments(*this, MinExpr, MaxExpr, TmpAttr))
- return;
+ return nullptr;
+ return ::new (Context)
+ AMDGPUFlatWorkGroupSizeAttr(Context, CI, MinExpr, MaxExpr);
+}
- D->addAttr(::new (Context)
- AMDGPUFlatWorkGroupSizeAttr(Context, CI, MinExpr, MaxExpr));
+void Sema::addAMDGPUFlatWorkGroupSizeAttr(Decl *D,
+ const AttributeCommonInfo &CI,
+ Expr *MinExpr, Expr *MaxExpr) {
+ if (auto *Attr = CreateAMDGPUFlatWorkGroupSizeAttr(CI, MinExpr, MaxExpr))
+ D->addAttr(Attr);
}
static void handleAMDGPUFlatWorkGroupSizeAttr(Sema &S, Decl *D,
@@ -7098,15 +8027,21 @@ static bool checkAMDGPUWavesPerEUArguments(Sema &S, Expr *MinExpr,
return false;
}
-void Sema::addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI,
- Expr *MinExpr, Expr *MaxExpr) {
+AMDGPUWavesPerEUAttr *
+Sema::CreateAMDGPUWavesPerEUAttr(const AttributeCommonInfo &CI, Expr *MinExpr,
+ Expr *MaxExpr) {
AMDGPUWavesPerEUAttr TmpAttr(Context, CI, MinExpr, MaxExpr);
if (checkAMDGPUWavesPerEUArguments(*this, MinExpr, MaxExpr, TmpAttr))
- return;
+ return nullptr;
- D->addAttr(::new (Context)
- AMDGPUWavesPerEUAttr(Context, CI, MinExpr, MaxExpr));
+ return ::new (Context) AMDGPUWavesPerEUAttr(Context, CI, MinExpr, MaxExpr);
+}
+
+void Sema::addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI,
+ Expr *MinExpr, Expr *MaxExpr) {
+ if (auto *Attr = CreateAMDGPUWavesPerEUAttr(CI, MinExpr, MaxExpr))
+ D->addAttr(Attr);
}
static void handleAMDGPUWavesPerEUAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
@@ -7153,7 +8088,7 @@ static void handleX86ForceAlignArgPointerAttr(Sema &S, Decl *D,
// Attribute can only be applied to function types.
if (!isa<FunctionDecl>(D)) {
S.Diag(AL.getLoc(), diag::warn_attribute_wrong_decl_type)
- << AL << ExpectedFunction;
+ << AL << AL.isRegularKeywordAttribute() << ExpectedFunction;
return;
}
@@ -7389,6 +8324,11 @@ static bool isGlobalVar(const Decl *D) {
return false;
}
+static bool isSanitizerAttributeAllowedOnGlobals(StringRef Sanitizer) {
+ return Sanitizer == "address" || Sanitizer == "hwaddress" ||
+ Sanitizer == "memtag";
+}
+
static void handleNoSanitizeAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (!AL.checkAtLeastNumArgs(S, 1))
return;
@@ -7406,9 +8346,9 @@ static void handleNoSanitizeAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
SanitizerMask() &&
SanitizerName != "coverage")
S.Diag(LiteralLoc, diag::warn_unknown_sanitizer_ignored) << SanitizerName;
- else if (isGlobalVar(D) && SanitizerName != "address")
- S.Diag(D->getLocation(), diag::err_attribute_wrong_decl_type)
- << AL << ExpectedFunctionOrMethod;
+ else if (isGlobalVar(D) && !isSanitizerAttributeAllowedOnGlobals(SanitizerName))
+ S.Diag(D->getLocation(), diag::warn_attribute_type_not_supported_global)
+ << AL << SanitizerName;
Sanitizers.push_back(SanitizerName);
}
@@ -7427,14 +8367,14 @@ static void handleNoSanitizeSpecificAttr(Sema &S, Decl *D,
.Case("no_sanitize_memory", "memory");
if (isGlobalVar(D) && SanitizerName != "address")
S.Diag(D->getLocation(), diag::err_attribute_wrong_decl_type)
- << AL << ExpectedFunction;
+ << AL << AL.isRegularKeywordAttribute() << ExpectedFunction;
// FIXME: Rather than create a NoSanitizeSpecificAttr, this creates a
// NoSanitizeAttr object; but we need to calculate the correct spelling list
// index rather than incorrectly assume the index for NoSanitizeSpecificAttr
// has the same spellings as the index for NoSanitizeAttr. We don't have a
// general way to "translate" between the two, so this hack attempts to work
- // around the issue with hard-coded indicies. This is critical for calling
+ // around the issue with hard-coded indices. This is critical for calling
// getSpelling() or prettyPrint() on the resulting semantic attribute object
// without failing assertions.
unsigned TranslatedSpellingIndex = 0;
@@ -7453,12 +8393,12 @@ static void handleInternalLinkageAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
static void handleOpenCLNoSVMAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- if (S.LangOpts.OpenCLVersion != 200)
+ if (S.LangOpts.getOpenCLCompatibleVersion() < 200)
S.Diag(AL.getLoc(), diag::err_attribute_requires_opencl_version)
- << AL << "2.0" << 0;
+ << AL << "2.0" << 1;
else
- S.Diag(AL.getLoc(), diag::warn_opencl_attr_deprecated_ignored) << AL
- << "2.0";
+ S.Diag(AL.getLoc(), diag::warn_opencl_attr_deprecated_ignored)
+ << AL << S.LangOpts.getOpenCLVersionString();
}
static void handleOpenCLAccessAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
@@ -7486,18 +8426,17 @@ static void handleOpenCLAccessAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// OpenCL v3.0 s6.8 - For OpenCL C 2.0, or with the
// __opencl_c_read_write_images feature, image objects specified as arguments
// to a kernel can additionally be declared to be read-write.
- // C++ for OpenCL inherits rule from OpenCL C v2.0.
+ // C++ for OpenCL 1.0 inherits rule from OpenCL C v2.0.
+ // C++ for OpenCL 2021 inherits rule from OpenCL C v3.0.
if (const auto *PDecl = dyn_cast<ParmVarDecl>(D)) {
const Type *DeclTy = PDecl->getType().getCanonicalType().getTypePtr();
- if (AL.getAttrName()->getName().find("read_write") != StringRef::npos) {
- bool ReadWriteImagesUnsupportedForOCLC =
- (S.getLangOpts().OpenCLVersion < 200) ||
- (S.getLangOpts().OpenCLVersion == 300 &&
+ if (AL.getAttrName()->getName().contains("read_write")) {
+ bool ReadWriteImagesUnsupported =
+ (S.getLangOpts().getOpenCLCompatibleVersion() < 200) ||
+ (S.getLangOpts().getOpenCLCompatibleVersion() == 300 &&
!S.getOpenCLOptions().isSupported("__opencl_c_read_write_images",
S.getLangOpts()));
- if ((!S.getLangOpts().OpenCLCPlusPlus &&
- ReadWriteImagesUnsupportedForOCLC) ||
- DeclTy->isPipeType()) {
+ if (ReadWriteImagesUnsupported || DeclTy->isPipeType()) {
S.Diag(AL.getLoc(), diag::err_opencl_invalid_read_write)
<< AL << PDecl->getType() << DeclTy->isImageType();
D->setInvalidDecl(true);
@@ -7509,6 +8448,193 @@ static void handleOpenCLAccessAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
D->addAttr(::new (S.Context) OpenCLAccessAttr(S.Context, AL));
}
+static void handleZeroCallUsedRegsAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ // Check that the argument is a string literal.
+ StringRef KindStr;
+ SourceLocation LiteralLoc;
+ if (!S.checkStringLiteralArgumentAttr(AL, 0, KindStr, &LiteralLoc))
+ return;
+
+ ZeroCallUsedRegsAttr::ZeroCallUsedRegsKind Kind;
+ if (!ZeroCallUsedRegsAttr::ConvertStrToZeroCallUsedRegsKind(KindStr, Kind)) {
+ S.Diag(LiteralLoc, diag::warn_attribute_type_not_supported)
+ << AL << KindStr;
+ return;
+ }
+
+ D->dropAttr<ZeroCallUsedRegsAttr>();
+ D->addAttr(ZeroCallUsedRegsAttr::Create(S.Context, Kind, AL));
+}
+
+static void handleCountedByAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ if (!AL.isArgIdent(0)) {
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_type)
+ << AL << AANT_ArgumentIdentifier;
+ return;
+ }
+
+ IdentifierLoc *IL = AL.getArgAsIdent(0);
+ CountedByAttr *CBA =
+ ::new (S.Context) CountedByAttr(S.Context, AL, IL->Ident);
+ CBA->setCountedByFieldLoc(IL->Loc);
+ D->addAttr(CBA);
+}
+
+static const FieldDecl *
+FindFieldInTopLevelOrAnonymousStruct(const RecordDecl *RD,
+ const IdentifierInfo *FieldName) {
+ for (const Decl *D : RD->decls()) {
+ if (const auto *FD = dyn_cast<FieldDecl>(D))
+ if (FD->getName() == FieldName->getName())
+ return FD;
+
+ if (const auto *R = dyn_cast<RecordDecl>(D))
+ if (const FieldDecl *FD =
+ FindFieldInTopLevelOrAnonymousStruct(R, FieldName))
+ return FD;
+ }
+
+ return nullptr;
+}
+
+bool Sema::CheckCountedByAttr(Scope *S, const FieldDecl *FD) {
+ LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
+ LangOptions::StrictFlexArraysLevelKind::IncompleteOnly;
+ if (!Decl::isFlexibleArrayMemberLike(Context, FD, FD->getType(),
+ StrictFlexArraysLevel, true)) {
+ // The "counted_by" attribute must be on a flexible array member.
+ SourceRange SR = FD->getLocation();
+ Diag(SR.getBegin(), diag::err_counted_by_attr_not_on_flexible_array_member)
+ << SR;
+ return true;
+ }
+
+ const auto *CBA = FD->getAttr<CountedByAttr>();
+ const IdentifierInfo *FieldName = CBA->getCountedByField();
+
+ auto GetNonAnonStructOrUnion = [](const RecordDecl *RD) {
+ while (RD && !RD->getDeclName())
+ if (const auto *R = dyn_cast<RecordDecl>(RD->getDeclContext()))
+ RD = R;
+ else
+ break;
+
+ return RD;
+ };
+
+ const RecordDecl *EnclosingRD = GetNonAnonStructOrUnion(FD->getParent());
+ const FieldDecl *CountFD =
+ FindFieldInTopLevelOrAnonymousStruct(EnclosingRD, FieldName);
+
+ if (!CountFD) {
+ DeclarationNameInfo NameInfo(FieldName,
+ CBA->getCountedByFieldLoc().getBegin());
+ LookupResult MemResult(*this, NameInfo, Sema::LookupMemberName);
+ LookupName(MemResult, S);
+
+ if (!MemResult.empty()) {
+ SourceRange SR = CBA->getCountedByFieldLoc();
+ Diag(SR.getBegin(), diag::err_flexible_array_count_not_in_same_struct)
+ << CBA->getCountedByField() << SR;
+
+ if (auto *ND = MemResult.getAsSingle<NamedDecl>()) {
+ SR = ND->getLocation();
+ Diag(SR.getBegin(), diag::note_flexible_array_counted_by_attr_field)
+ << ND << SR;
+ }
+
+ return true;
+ } else {
+ // The "counted_by" field needs to exist in the struct.
+ LookupResult OrdResult(*this, NameInfo, Sema::LookupOrdinaryName);
+ LookupName(OrdResult, S);
+
+ if (!OrdResult.empty()) {
+ SourceRange SR = FD->getLocation();
+ Diag(SR.getBegin(), diag::err_counted_by_must_be_in_structure)
+ << FieldName << SR;
+
+ if (auto *ND = OrdResult.getAsSingle<NamedDecl>()) {
+ SR = ND->getLocation();
+ Diag(SR.getBegin(), diag::note_flexible_array_counted_by_attr_field)
+ << ND << SR;
+ }
+
+ return true;
+ }
+ }
+
+ CXXScopeSpec SS;
+ DeclFilterCCC<FieldDecl> Filter(FieldName);
+ return DiagnoseEmptyLookup(S, SS, MemResult, Filter, nullptr, std::nullopt,
+ const_cast<DeclContext *>(FD->getDeclContext()));
+ }
+
+ if (CountFD->hasAttr<CountedByAttr>()) {
+ // The "counted_by" field can't point to the flexible array member.
+ SourceRange SR = CBA->getCountedByFieldLoc();
+ Diag(SR.getBegin(), diag::err_counted_by_attr_refers_to_flexible_array)
+ << CBA->getCountedByField() << SR;
+ return true;
+ }
+
+ if (!CountFD->getType()->isIntegerType() ||
+ CountFD->getType()->isBooleanType()) {
+ // The "counted_by" field must have an integer type.
+ SourceRange SR = CBA->getCountedByFieldLoc();
+ Diag(SR.getBegin(),
+ diag::err_flexible_array_counted_by_attr_field_not_integer)
+ << CBA->getCountedByField() << SR;
+
+ SR = CountFD->getLocation();
+ Diag(SR.getBegin(), diag::note_flexible_array_counted_by_attr_field)
+ << CountFD << SR;
+ return true;
+ }
+
+ return false;
+}
+
+static void handleFunctionReturnThunksAttr(Sema &S, Decl *D,
+ const ParsedAttr &AL) {
+ StringRef KindStr;
+ SourceLocation LiteralLoc;
+ if (!S.checkStringLiteralArgumentAttr(AL, 0, KindStr, &LiteralLoc))
+ return;
+
+ FunctionReturnThunksAttr::Kind Kind;
+ if (!FunctionReturnThunksAttr::ConvertStrToKind(KindStr, Kind)) {
+ S.Diag(LiteralLoc, diag::warn_attribute_type_not_supported)
+ << AL << KindStr;
+ return;
+ }
+ // FIXME: it would be good to better handle attribute merging rather than
+ // silently replacing the existing attribute, so long as it does not break
+ // the expected codegen tests.
+ D->dropAttr<FunctionReturnThunksAttr>();
+ D->addAttr(FunctionReturnThunksAttr::Create(S.Context, Kind, AL));
+}
+
+static void handleAvailableOnlyInDefaultEvalMethod(Sema &S, Decl *D,
+ const ParsedAttr &AL) {
+ assert(isa<TypedefNameDecl>(D) && "This attribute only applies to a typedef");
+ handleSimpleAttribute<AvailableOnlyInDefaultEvalMethodAttr>(S, D, AL);
+}
+
+static void handleNoMergeAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ auto *VDecl = dyn_cast<VarDecl>(D);
+ if (VDecl && !VDecl->isFunctionPointerType()) {
+ S.Diag(AL.getLoc(), diag::warn_attribute_ignored_non_function_pointer)
+ << AL << VDecl;
+ return;
+ }
+ D->addAttr(NoMergeAttr::Create(S.Context, AL));
+}
+
+static void handleNoUniqueAddressAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ D->addAttr(NoUniqueAddressAttr::Create(S.Context, AL));
+}
+
static void handleSYCLKernelAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// The 'sycl_kernel' attribute applies only to function templates.
const auto *FD = cast<FunctionDecl>(D);
@@ -7703,6 +8829,11 @@ static void handleHandleAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
D->addAttr(Attr::Create(S.Context, Argument, AL));
}
+template<typename Attr>
+static void handleUnsafeBufferUsage(Sema &S, Decl *D, const ParsedAttr &AL) {
+ D->addAttr(Attr::Create(S.Context, AL));
+}
+
static void handleCFGuardAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// The guard attribute takes a single identifier argument.
@@ -7795,18 +8926,125 @@ EnforceTCBLeafAttr *Sema::mergeEnforceTCBLeafAttr(
// Top Level Sema Entry Points
//===----------------------------------------------------------------------===//
+// Returns true if the attribute must delay setting its arguments until after
+// template instantiation, and false otherwise.
+static bool MustDelayAttributeArguments(const ParsedAttr &AL) {
+ // Only attributes that accept expression parameter packs can delay arguments.
+ if (!AL.acceptsExprPack())
+ return false;
+
+ bool AttrHasVariadicArg = AL.hasVariadicArg();
+ unsigned AttrNumArgs = AL.getNumArgMembers();
+ for (size_t I = 0; I < std::min(AL.getNumArgs(), AttrNumArgs); ++I) {
+ bool IsLastAttrArg = I == (AttrNumArgs - 1);
+ // If the argument is the last argument and it is variadic it can contain
+ // any expression.
+ if (IsLastAttrArg && AttrHasVariadicArg)
+ return false;
+ Expr *E = AL.getArgAsExpr(I);
+ bool ArgMemberCanHoldExpr = AL.isParamExpr(I);
+ // If the expression is a pack expansion then arguments must be delayed
+ // unless the argument is an expression and it is the last argument of the
+ // attribute.
+ if (isa<PackExpansionExpr>(E))
+ return !(IsLastAttrArg && ArgMemberCanHoldExpr);
+ // Last case is if the expression is value dependent then it must delay
+ // arguments unless the corresponding argument is able to hold the
+ // expression.
+ if (E->isValueDependent() && !ArgMemberCanHoldExpr)
+ return true;
+ }
+ return false;
+}
+
+static bool checkArmNewAttrMutualExclusion(
+ Sema &S, const ParsedAttr &AL, const FunctionProtoType *FPT,
+ FunctionType::ArmStateValue CurrentState, StringRef StateName) {
+ auto CheckForIncompatibleAttr =
+ [&](FunctionType::ArmStateValue IncompatibleState,
+ StringRef IncompatibleStateName) {
+ if (CurrentState == IncompatibleState) {
+ S.Diag(AL.getLoc(), diag::err_attributes_are_not_compatible)
+ << (std::string("'__arm_new(\"") + StateName.str() + "\")'")
+ << (std::string("'") + IncompatibleStateName.str() + "(\"" +
+ StateName.str() + "\")'")
+ << true;
+ AL.setInvalid();
+ }
+ };
+
+ CheckForIncompatibleAttr(FunctionType::ARM_In, "__arm_in");
+ CheckForIncompatibleAttr(FunctionType::ARM_Out, "__arm_out");
+ CheckForIncompatibleAttr(FunctionType::ARM_InOut, "__arm_inout");
+ CheckForIncompatibleAttr(FunctionType::ARM_Preserves, "__arm_preserves");
+ return AL.isInvalid();
+}
+
+static void handleArmNewAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ if (!AL.getNumArgs()) {
+ S.Diag(AL.getLoc(), diag::err_missing_arm_state) << AL;
+ AL.setInvalid();
+ return;
+ }
+
+ std::vector<StringRef> NewState;
+ if (const auto *ExistingAttr = D->getAttr<ArmNewAttr>()) {
+ for (StringRef S : ExistingAttr->newArgs())
+ NewState.push_back(S);
+ }
+
+ bool HasZA = false;
+ bool HasZT0 = false;
+ for (unsigned I = 0, E = AL.getNumArgs(); I != E; ++I) {
+ StringRef StateName;
+ SourceLocation LiteralLoc;
+ if (!S.checkStringLiteralArgumentAttr(AL, I, StateName, &LiteralLoc))
+ return;
+
+ if (StateName == "za")
+ HasZA = true;
+ else if (StateName == "zt0")
+ HasZT0 = true;
+ else {
+ S.Diag(LiteralLoc, diag::err_unknown_arm_state) << StateName;
+ AL.setInvalid();
+ return;
+ }
+
+ if (!llvm::is_contained(NewState, StateName)) // Avoid adding duplicates.
+ NewState.push_back(StateName);
+ }
+
+ if (auto *FPT = dyn_cast<FunctionProtoType>(D->getFunctionType())) {
+ FunctionType::ArmStateValue ZAState =
+ FunctionType::getArmZAState(FPT->getAArch64SMEAttributes());
+ if (HasZA && ZAState != FunctionType::ARM_None &&
+ checkArmNewAttrMutualExclusion(S, AL, FPT, ZAState, "za"))
+ return;
+ FunctionType::ArmStateValue ZT0State =
+ FunctionType::getArmZT0State(FPT->getAArch64SMEAttributes());
+ if (HasZT0 && ZT0State != FunctionType::ARM_None &&
+ checkArmNewAttrMutualExclusion(S, AL, FPT, ZT0State, "zt0"))
+ return;
+ }
+
+ D->dropAttr<ArmNewAttr>();
+ D->addAttr(::new (S.Context)
+ ArmNewAttr(S.Context, AL, NewState.data(), NewState.size()));
+}
+
/// ProcessDeclAttribute - Apply the specific attribute to the specified decl if
/// the attribute applies to decls. If the attribute is a type attribute, just
/// silently ignore it if a GNU attribute.
-static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
- const ParsedAttr &AL,
- bool IncludeCXX11Attributes) {
+static void
+ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D, const ParsedAttr &AL,
+ const Sema::ProcessDeclAttributeOptions &Options) {
if (AL.isInvalid() || AL.getKind() == ParsedAttr::IgnoredAttribute)
return;
// Ignore C++11 attributes on declarator chunks: they appertain to the type
// instead.
- if (AL.isCXX11Attribute() && !IncludeCXX11Attributes)
+ if (AL.isCXX11Attribute() && !Options.IncludeCXX11Attributes)
return;
// Unknown attributes are automatically warned on. Target-specific attributes
@@ -7815,30 +9053,103 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
if (AL.getKind() == ParsedAttr::UnknownAttribute ||
!AL.existsInTarget(S.Context.getTargetInfo())) {
S.Diag(AL.getLoc(),
- AL.isDeclspecAttribute()
+ AL.isRegularKeywordAttribute()
+ ? (unsigned)diag::err_keyword_not_supported_on_target
+ : AL.isDeclspecAttribute()
? (unsigned)diag::warn_unhandled_ms_attribute_ignored
: (unsigned)diag::warn_unknown_attribute_ignored)
<< AL << AL.getRange();
return;
}
- if (S.checkCommonAttributeFeatures(D, AL))
+ // Check if argument population must delayed to after template instantiation.
+ bool MustDelayArgs = MustDelayAttributeArguments(AL);
+
+ // Argument number check must be skipped if arguments are delayed.
+ if (S.checkCommonAttributeFeatures(D, AL, MustDelayArgs))
+ return;
+
+ if (MustDelayArgs) {
+ AL.handleAttrWithDelayedArgs(S, D);
return;
+ }
switch (AL.getKind()) {
default:
if (AL.getInfo().handleDeclAttribute(S, D, AL) != ParsedAttrInfo::NotHandled)
break;
if (!AL.isStmtAttr()) {
- // Type attributes are handled elsewhere; silently move on.
assert(AL.isTypeAttr() && "Non-type attribute not handled");
- break;
+ }
+ if (AL.isTypeAttr()) {
+ if (Options.IgnoreTypeAttributes)
+ break;
+ if (!AL.isStandardAttributeSyntax() && !AL.isRegularKeywordAttribute()) {
+ // Non-[[]] type attributes are handled in processTypeAttrs(); silently
+ // move on.
+ break;
+ }
+
+ // According to the C and C++ standards, we should never see a
+ // [[]] type attribute on a declaration. However, we have in the past
+ // allowed some type attributes to "slide" to the `DeclSpec`, so we need
+ // to continue to support this legacy behavior. We only do this, however,
+ // if
+ // - we actually have a `DeclSpec`, i.e. if we're looking at a
+ // `DeclaratorDecl`, or
+ // - we are looking at an alias-declaration, where historically we have
+ // allowed type attributes after the identifier to slide to the type.
+ if (AL.slidesFromDeclToDeclSpecLegacyBehavior() &&
+ isa<DeclaratorDecl, TypeAliasDecl>(D)) {
+ // Suggest moving the attribute to the type instead, but only for our
+ // own vendor attributes; moving other vendors' attributes might hurt
+ // portability.
+ if (AL.isClangScope()) {
+ S.Diag(AL.getLoc(), diag::warn_type_attribute_deprecated_on_decl)
+ << AL << D->getLocation();
+ }
+
+ // Allow this type attribute to be handled in processTypeAttrs();
+ // silently move on.
+ break;
+ }
+
+ if (AL.getKind() == ParsedAttr::AT_Regparm) {
+ // `regparm` is a special case: It's a type attribute but we still want
+ // to treat it as if it had been written on the declaration because that
+ // way we'll be able to handle it directly in `processTypeAttr()`.
+ // If we treated `regparm` it as if it had been written on the
+ // `DeclSpec`, the logic in `distributeFunctionTypeAttrFromDeclSepc()`
+ // would try to move it to the declarator, but that doesn't work: We
+ // can't remove the attribute from the list of declaration attributes
+ // because it might be needed by other declarators in the same
+ // declaration.
+ break;
+ }
+
+ if (AL.getKind() == ParsedAttr::AT_VectorSize) {
+ // `vector_size` is a special case: It's a type attribute semantically,
+ // but GCC expects the [[]] syntax to be written on the declaration (and
+ // warns that the attribute has no effect if it is placed on the
+ // decl-specifier-seq).
+ // Silently move on and allow the attribute to be handled in
+ // processTypeAttr().
+ break;
+ }
+
+ if (AL.getKind() == ParsedAttr::AT_NoDeref) {
+ // FIXME: `noderef` currently doesn't work correctly in [[]] syntax.
+ // See https://github.com/llvm/llvm-project/issues/55790 for details.
+ // We allow processTypeAttrs() to emit a warning and silently move on.
+ break;
+ }
}
// N.B., ClangAttrEmitter.cpp emits a diagnostic helper that ensures a
// statement attribute is not written on a declaration, but this code is
- // needed for attributes in Attr.td that do not list any subjects.
- S.Diag(AL.getLoc(), diag::err_stmt_attribute_invalid_on_decl)
- << AL << D->getLocation();
+ // needed for type attributes as well as statement attributes in Attr.td
+ // that do not list any subjects.
+ S.Diag(AL.getLoc(), diag::err_attribute_invalid_on_decl)
+ << AL << AL.isRegularKeywordAttribute() << D->getLocation();
break;
case ParsedAttr::AT_Interrupt:
handleInterruptAttr(S, D, AL);
@@ -7846,6 +9157,9 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_X86ForceAlignArgPointer:
handleX86ForceAlignArgPointerAttr(S, D, AL);
break;
+ case ParsedAttr::AT_ReadOnlyPlacement:
+ handleSimpleAttribute<ReadOnlyPlacementAttr>(S, D, AL);
+ break;
case ParsedAttr::AT_DLLExport:
case ParsedAttr::AT_DLLImport:
handleDLLAttr(S, D, AL);
@@ -7868,6 +9182,12 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_BPFPreserveAccessIndex:
handleBPFPreserveAccessIndexAttr(S, D, AL);
break;
+ case ParsedAttr::AT_BPFPreserveStaticOffset:
+ handleSimpleAttribute<BPFPreserveStaticOffsetAttr>(S, D, AL);
+ break;
+ case ParsedAttr::AT_BTFDeclTag:
+ handleBTFDeclTagAttr(S, D, AL);
+ break;
case ParsedAttr::AT_WebAssemblyExportName:
handleWebAssemblyExportNameAttr(S, D, AL);
break;
@@ -7941,9 +9261,15 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_EnableIf:
handleEnableIfAttr(S, D, AL);
break;
+ case ParsedAttr::AT_Error:
+ handleErrorAttr(S, D, AL);
+ break;
case ParsedAttr::AT_DiagnoseIf:
handleDiagnoseIfAttr(S, D, AL);
break;
+ case ParsedAttr::AT_DiagnoseAsBuiltin:
+ handleDiagnoseAsBuiltinAttr(S, D, AL);
+ break;
case ParsedAttr::AT_NoBuiltin:
handleNoBuiltinAttr(S, D, AL);
break;
@@ -7965,6 +9291,9 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_SYCLKernel:
handleSYCLKernelAttr(S, D, AL);
break;
+ case ParsedAttr::AT_SYCLSpecialClass:
+ handleSimpleAttribute<SYCLSpecialClassAttr>(S, D, AL);
+ break;
case ParsedAttr::AT_Format:
handleFormatAttr(S, D, AL);
break;
@@ -7977,6 +9306,7 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_CalledOnce:
handleCalledOnceAttr(S, D, AL);
break;
+ case ParsedAttr::AT_NVPTXKernel:
case ParsedAttr::AT_CUDAGlobal:
handleGlobalAttr(S, D, AL);
break;
@@ -8010,6 +9340,9 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_NoEscape:
handleNoEscapeAttr(S, D, AL);
break;
+ case ParsedAttr::AT_MaybeUndef:
+ handleSimpleAttribute<MaybeUndefAttr>(S, D, AL);
+ break;
case ParsedAttr::AT_AssumeAligned:
handleAssumeAlignedAttr(S, D, AL);
break;
@@ -8025,6 +9358,9 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_NoReturn:
handleNoReturnAttr(S, D, AL);
break;
+ case ParsedAttr::AT_CXX11NoReturn:
+ handleStandardNoReturnAttr(S, D, AL);
+ break;
case ParsedAttr::AT_AnyX86NoCfCheck:
handleNoCfCheckAttr(S, D, AL);
break;
@@ -8122,12 +9458,27 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_Section:
handleSectionAttr(S, D, AL);
break;
+ case ParsedAttr::AT_CodeModel:
+ handleCodeModelAttr(S, D, AL);
+ break;
+ case ParsedAttr::AT_RandomizeLayout:
+ handleRandomizeLayoutAttr(S, D, AL);
+ break;
+ case ParsedAttr::AT_NoRandomizeLayout:
+ handleNoRandomizeLayoutAttr(S, D, AL);
+ break;
case ParsedAttr::AT_CodeSeg:
handleCodeSegAttr(S, D, AL);
break;
case ParsedAttr::AT_Target:
handleTargetAttr(S, D, AL);
break;
+ case ParsedAttr::AT_TargetVersion:
+ handleTargetVersionAttr(S, D, AL);
+ break;
+ case ParsedAttr::AT_TargetClones:
+ handleTargetClonesAttr(S, D, AL);
+ break;
case ParsedAttr::AT_MinVectorWidth:
handleMinVectorWidthAttr(S, D, AL);
break;
@@ -8208,6 +9559,9 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_PreserveMost:
case ParsedAttr::AT_PreserveAll:
case ParsedAttr::AT_AArch64VectorPcs:
+ case ParsedAttr::AT_AArch64SVEPcs:
+ case ParsedAttr::AT_AMDGPUKernelCall:
+ case ParsedAttr::AT_M68kRTD:
handleCallConvAttr(S, D, AL);
break;
case ParsedAttr::AT_Suppress:
@@ -8238,6 +9592,26 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_InternalLinkage:
handleInternalLinkageAttr(S, D, AL);
break;
+ case ParsedAttr::AT_ZeroCallUsedRegs:
+ handleZeroCallUsedRegsAttr(S, D, AL);
+ break;
+ case ParsedAttr::AT_FunctionReturnThunks:
+ handleFunctionReturnThunksAttr(S, D, AL);
+ break;
+ case ParsedAttr::AT_NoMerge:
+ handleNoMergeAttr(S, D, AL);
+ break;
+ case ParsedAttr::AT_NoUniqueAddress:
+ handleNoUniqueAddressAttr(S, D, AL);
+ break;
+
+ case ParsedAttr::AT_AvailableOnlyInDefaultEvalMethod:
+ handleAvailableOnlyInDefaultEvalMethod(S, D, AL);
+ break;
+
+ case ParsedAttr::AT_CountedBy:
+ handleCountedByAttr(S, D, AL);
+ break;
// Microsoft attributes:
case ParsedAttr::AT_LayoutVersion:
@@ -8252,6 +9626,29 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_Thread:
handleDeclspecThreadAttr(S, D, AL);
break;
+ case ParsedAttr::AT_MSConstexpr:
+ handleMSConstexprAttr(S, D, AL);
+ break;
+
+ // HLSL attributes:
+ case ParsedAttr::AT_HLSLNumThreads:
+ handleHLSLNumThreadsAttr(S, D, AL);
+ break;
+ case ParsedAttr::AT_HLSLSV_GroupIndex:
+ handleSimpleAttribute<HLSLSV_GroupIndexAttr>(S, D, AL);
+ break;
+ case ParsedAttr::AT_HLSLSV_DispatchThreadID:
+ handleHLSLSV_DispatchThreadIDAttr(S, D, AL);
+ break;
+ case ParsedAttr::AT_HLSLShader:
+ handleHLSLShaderAttr(S, D, AL);
+ break;
+ case ParsedAttr::AT_HLSLResourceBinding:
+ handleHLSLResourceBindingAttr(S, D, AL);
+ break;
+ case ParsedAttr::AT_HLSLParamModifier:
+ handleHLSLParamModifierAttr(S, D, AL);
+ break;
case ParsedAttr::AT_AbiTag:
handleAbiTagAttr(S, D, AL);
@@ -8411,6 +9808,14 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
handleArmBuiltinAliasAttr(S, D, AL);
break;
+ case ParsedAttr::AT_ArmLocallyStreaming:
+ handleSimpleAttribute<ArmLocallyStreamingAttr>(S, D, AL);
+ break;
+
+ case ParsedAttr::AT_ArmNew:
+ handleArmNewAttr(S, D, AL);
+ break;
+
case ParsedAttr::AT_AcquireHandle:
handleAcquireHandleAttr(S, D, AL);
break;
@@ -8419,6 +9824,10 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
handleHandleAttr<ReleaseHandleAttr>(S, D, AL);
break;
+ case ParsedAttr::AT_UnsafeBufferUsage:
+ handleUnsafeBufferUsage<UnsafeBufferUsageAttr>(S, D, AL);
+ break;
+
case ParsedAttr::AT_UseHandle:
handleHandleAttr<UseHandleAttr>(S, D, AL);
break;
@@ -8435,6 +9844,10 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
handleBuiltinAliasAttr(S, D, AL);
break;
+ case ParsedAttr::AT_PreferredType:
+ handlePreferredTypeAttr(S, D, AL);
+ break;
+
case ParsedAttr::AT_UsingIfExists:
handleSimpleAttribute<UsingIfExistsAttr>(S, D, AL);
break;
@@ -8443,14 +9856,14 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
/// ProcessDeclAttributeList - Apply all the decl attributes in the specified
/// attribute list to the specified decl, ignoring any type attributes.
-void Sema::ProcessDeclAttributeList(Scope *S, Decl *D,
- const ParsedAttributesView &AttrList,
- bool IncludeCXX11Attributes) {
+void Sema::ProcessDeclAttributeList(
+ Scope *S, Decl *D, const ParsedAttributesView &AttrList,
+ const ProcessDeclAttributeOptions &Options) {
if (AttrList.empty())
return;
for (const ParsedAttr &AL : AttrList)
- ProcessDeclAttribute(*this, S, D, AL, IncludeCXX11Attributes);
+ ProcessDeclAttribute(*this, S, D, AL, Options);
// FIXME: We should be able to handle these cases in TableGen.
// GCC accepts
@@ -8486,19 +9899,19 @@ void Sema::ProcessDeclAttributeList(Scope *S, Decl *D,
} else if (!D->hasAttr<CUDAGlobalAttr>()) {
if (const auto *A = D->getAttr<AMDGPUFlatWorkGroupSizeAttr>()) {
Diag(D->getLocation(), diag::err_attribute_wrong_decl_type)
- << A << ExpectedKernelFunction;
+ << A << A->isRegularKeywordAttribute() << ExpectedKernelFunction;
D->setInvalidDecl();
} else if (const auto *A = D->getAttr<AMDGPUWavesPerEUAttr>()) {
Diag(D->getLocation(), diag::err_attribute_wrong_decl_type)
- << A << ExpectedKernelFunction;
+ << A << A->isRegularKeywordAttribute() << ExpectedKernelFunction;
D->setInvalidDecl();
} else if (const auto *A = D->getAttr<AMDGPUNumSGPRAttr>()) {
Diag(D->getLocation(), diag::err_attribute_wrong_decl_type)
- << A << ExpectedKernelFunction;
+ << A << A->isRegularKeywordAttribute() << ExpectedKernelFunction;
D->setInvalidDecl();
} else if (const auto *A = D->getAttr<AMDGPUNumVGPRAttr>()) {
Diag(D->getLocation(), diag::err_attribute_wrong_decl_type)
- << A << ExpectedKernelFunction;
+ << A << A->isRegularKeywordAttribute() << ExpectedKernelFunction;
D->setInvalidDecl();
}
}
@@ -8538,7 +9951,8 @@ bool Sema::ProcessAccessDeclAttributeList(
AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList) {
for (const ParsedAttr &AL : AttrList) {
if (AL.getKind() == ParsedAttr::AT_Annotate) {
- ProcessDeclAttribute(*this, nullptr, ASDecl, AL, AL.isCXX11Attribute());
+ ProcessDeclAttribute(*this, nullptr, ASDecl, AL,
+ ProcessDeclAttributeOptions());
} else {
Diag(AL.getLoc(), diag::err_only_annotate_after_access_spec);
return true;
@@ -8571,6 +9985,7 @@ static void checkUnusedDeclAttributes(Sema &S, const ParsedAttributesView &A) {
/// used to build a declaration, complain about any decl attributes
/// which might be lying around on it.
void Sema::checkUnusedDeclAttributes(Declarator &D) {
+ ::checkUnusedDeclAttributes(*this, D.getDeclarationAttributes());
::checkUnusedDeclAttributes(*this, D.getDeclSpec().getAttributes());
::checkUnusedDeclAttributes(*this, D.getAttributes());
for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i)
@@ -8579,8 +9994,8 @@ void Sema::checkUnusedDeclAttributes(Declarator &D) {
/// DeclClonePragmaWeak - clone existing decl (maybe definition),
/// \#pragma weak needs a non-definition decl and source may not have one.
-NamedDecl * Sema::DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
- SourceLocation Loc) {
+NamedDecl *Sema::DeclClonePragmaWeak(NamedDecl *ND, const IdentifierInfo *II,
+ SourceLocation Loc) {
assert(isa<FunctionDecl>(ND) || isa<VarDecl>(ND));
NamedDecl *NewD = nullptr;
if (auto *FD = dyn_cast<FunctionDecl>(ND)) {
@@ -8592,8 +10007,9 @@ NamedDecl * Sema::DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
NewFD = FunctionDecl::Create(
FD->getASTContext(), FD->getDeclContext(), Loc, Loc,
DeclarationName(II), FD->getType(), FD->getTypeSourceInfo(), SC_None,
- false /*isInlineSpecified*/, FD->hasPrototype(),
- ConstexprSpecKind::Unspecified, FD->getTrailingRequiresClause());
+ getCurFPFeatures().isFPConstrained(), false /*isInlineSpecified*/,
+ FD->hasPrototype(), ConstexprSpecKind::Unspecified,
+ FD->getTrailingRequiresClause());
NewD = NewFD;
if (FD->getQualifier())
@@ -8624,16 +10040,13 @@ NamedDecl * Sema::DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
/// DeclApplyPragmaWeak - A declaration (maybe definition) needs \#pragma weak
/// applied to it, possibly with an alias.
-void Sema::DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W) {
- if (W.getUsed()) return; // only do this once
- W.setUsed(true);
+void Sema::DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, const WeakInfo &W) {
if (W.getAlias()) { // clone decl, impersonate __attribute(weak,alias(...))
IdentifierInfo *NDId = ND->getIdentifier();
NamedDecl *NewD = DeclClonePragmaWeak(ND, W.getAlias(), W.getLocation());
NewD->addAttr(
AliasAttr::CreateImplicit(Context, NDId->getName(), W.getLocation()));
- NewD->addAttr(WeakAttr::CreateImplicit(Context, W.getLocation(),
- AttributeCommonInfo::AS_Pragma));
+ NewD->addAttr(WeakAttr::CreateImplicit(Context, W.getLocation()));
WeakTopLevelDecl.push_back(NewD);
// FIXME: "hideous" code from Sema::LazilyCreateBuiltin
// to insert Decl at TU scope, sorry.
@@ -8644,8 +10057,7 @@ void Sema::DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W) {
PushOnScopeChains(NewD, S);
CurContext = SavedContext;
} else { // just add weak to existing
- ND->addAttr(WeakAttr::CreateImplicit(Context, W.getLocation(),
- AttributeCommonInfo::AS_Pragma));
+ ND->addAttr(WeakAttr::CreateImplicit(Context, W.getLocation()));
}
}
@@ -8653,23 +10065,25 @@ void Sema::ProcessPragmaWeak(Scope *S, Decl *D) {
// It's valid to "forward-declare" #pragma weak, in which case we
// have to do this.
LoadExternalWeakUndeclaredIdentifiers();
- if (!WeakUndeclaredIdentifiers.empty()) {
- NamedDecl *ND = nullptr;
- if (auto *VD = dyn_cast<VarDecl>(D))
- if (VD->isExternC())
- ND = VD;
- if (auto *FD = dyn_cast<FunctionDecl>(D))
- if (FD->isExternC())
- ND = FD;
- if (ND) {
- if (IdentifierInfo *Id = ND->getIdentifier()) {
- auto I = WeakUndeclaredIdentifiers.find(Id);
- if (I != WeakUndeclaredIdentifiers.end()) {
- WeakInfo W = I->second;
- DeclApplyPragmaWeak(S, ND, W);
- WeakUndeclaredIdentifiers[Id] = W;
- }
- }
+ if (WeakUndeclaredIdentifiers.empty())
+ return;
+ NamedDecl *ND = nullptr;
+ if (auto *VD = dyn_cast<VarDecl>(D))
+ if (VD->isExternC())
+ ND = VD;
+ if (auto *FD = dyn_cast<FunctionDecl>(D))
+ if (FD->isExternC())
+ ND = FD;
+ if (!ND)
+ return;
+ if (IdentifierInfo *Id = ND->getIdentifier()) {
+ auto I = WeakUndeclaredIdentifiers.find(Id);
+ if (I != WeakUndeclaredIdentifiers.end()) {
+ auto &WeakInfos = I->second;
+ for (const auto &W : WeakInfos)
+ DeclApplyPragmaWeak(S, ND, W);
+ std::remove_reference_t<decltype(WeakInfos)> EmptyWeakInfos;
+ WeakInfos.swap(EmptyWeakInfos);
}
}
}
@@ -8678,17 +10092,43 @@ void Sema::ProcessPragmaWeak(Scope *S, Decl *D) {
/// it, apply them to D. This is a bit tricky because PD can have attributes
/// specified in many different places, and we need to find and apply them all.
void Sema::ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD) {
+ // Ordering of attributes can be important, so we take care to process
+ // attributes in the order in which they appeared in the source code.
+
+ // First, process attributes that appeared on the declaration itself (but
+ // only if they don't have the legacy behavior of "sliding" to the DeclSepc).
+ ParsedAttributesView NonSlidingAttrs;
+ for (ParsedAttr &AL : PD.getDeclarationAttributes()) {
+ if (AL.slidesFromDeclToDeclSpecLegacyBehavior()) {
+ // Skip processing the attribute, but do check if it appertains to the
+ // declaration. This is needed for the `MatrixType` attribute, which,
+ // despite being a type attribute, defines a `SubjectList` that only
+ // allows it to be used on typedef declarations.
+ AL.diagnoseAppertainsTo(*this, D);
+ } else {
+ NonSlidingAttrs.addAtEnd(&AL);
+ }
+ }
+ ProcessDeclAttributeList(S, D, NonSlidingAttrs);
+
// Apply decl attributes from the DeclSpec if present.
- if (!PD.getDeclSpec().getAttributes().empty())
- ProcessDeclAttributeList(S, D, PD.getDeclSpec().getAttributes());
+ if (!PD.getDeclSpec().getAttributes().empty()) {
+ ProcessDeclAttributeList(S, D, PD.getDeclSpec().getAttributes(),
+ ProcessDeclAttributeOptions()
+ .WithIncludeCXX11Attributes(false)
+ .WithIgnoreTypeAttributes(true));
+ }
// Walk the declarator structure, applying decl attributes that were in a type
// position to the decl itself. This handles cases like:
// int *__attr__(x)** D;
// when X is a decl attribute.
- for (unsigned i = 0, e = PD.getNumTypeObjects(); i != e; ++i)
+ for (unsigned i = 0, e = PD.getNumTypeObjects(); i != e; ++i) {
ProcessDeclAttributeList(S, D, PD.getTypeObject(i).getAttrs(),
- /*IncludeCXX11Attributes=*/false);
+ ProcessDeclAttributeOptions()
+ .WithIncludeCXX11Attributes(false)
+ .WithIgnoreTypeAttributes(true));
+ }
// Finally, apply any attributes on the decl itself.
ProcessDeclAttributeList(S, D, PD.getAttributes());
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaDeclCXX.cpp b/contrib/llvm-project/clang/lib/Sema/SemaDeclCXX.cpp
index ac01beb1bf93..df5bd55e7c28 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaDeclCXX.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaDeclCXX.cpp
@@ -17,7 +17,10 @@
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/ComparisonCategories.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclTemplate.h"
#include "clang/AST/EvaluatedExprVisitor.h"
+#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/RecursiveASTVisitor.h"
@@ -26,23 +29,30 @@
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/AttributeCommonInfo.h"
#include "clang/Basic/PartialDiagnostic.h"
+#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Lex/LiteralSupport.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/CXXFieldCollector.h"
#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
+#include "clang/Sema/Ownership.h"
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/Template.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/ScopeExit.h"
#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/ConvertUTF.h"
+#include "llvm/Support/SaveAndRestore.h"
#include <map>
+#include <optional>
#include <set>
using namespace clang;
@@ -77,7 +87,8 @@ public:
bool CheckDefaultArgumentVisitor::VisitExpr(const Expr *Node) {
bool IsInvalid = false;
for (const Stmt *SubStmt : Node->children())
- IsInvalid |= Visit(SubStmt);
+ if (SubStmt)
+ IsInvalid |= Visit(SubStmt);
return IsInvalid;
}
@@ -85,7 +96,11 @@ bool CheckDefaultArgumentVisitor::VisitExpr(const Expr *Node) {
/// determine whether this declaration can be used in the default
/// argument expression.
bool CheckDefaultArgumentVisitor::VisitDeclRefExpr(const DeclRefExpr *DRE) {
- const NamedDecl *Decl = DRE->getDecl();
+ const ValueDecl *Decl = dyn_cast<ValueDecl>(DRE->getDecl());
+
+ if (!isa<VarDecl, BindingDecl>(Decl))
+ return false;
+
if (const auto *Param = dyn_cast<ParmVarDecl>(Decl)) {
// C++ [dcl.fct.default]p9:
// [...] parameters of a function shall not be used in default
@@ -99,7 +114,7 @@ bool CheckDefaultArgumentVisitor::VisitDeclRefExpr(const DeclRefExpr *DRE) {
return S.Diag(DRE->getBeginLoc(),
diag::err_param_default_argument_references_param)
<< Param->getDeclName() << DefaultArg->getSourceRange();
- } else if (const auto *VDecl = dyn_cast<VarDecl>(Decl)) {
+ } else if (auto *VD = Decl->getPotentiallyDecomposedVarDecl()) {
// C++ [dcl.fct.default]p7:
// Local variables shall not be used in default argument
// expressions.
@@ -109,14 +124,14 @@ bool CheckDefaultArgumentVisitor::VisitDeclRefExpr(const DeclRefExpr *DRE) {
// expression in a default argument.
//
// C++20 [dcl.fct.default]p7 (DR as part of P0588R1, see also CWG 2346):
- // Note: A local variable cannot be odr-used (6.3) in a default argument.
+ // Note: A local variable cannot be odr-used (6.3) in a default
+ // argument.
//
- if (VDecl->isLocalVarDecl() && !DRE->isNonOdrUse())
+ if (VD->isLocalVarDecl() && !DRE->isNonOdrUse())
return S.Diag(DRE->getBeginLoc(),
diag::err_param_default_argument_references_local)
- << VDecl->getDeclName() << DefaultArg->getSourceRange();
+ << Decl << DefaultArg->getSourceRange();
}
-
return false;
}
@@ -146,13 +161,20 @@ bool CheckDefaultArgumentVisitor::VisitPseudoObjectExpr(
}
bool CheckDefaultArgumentVisitor::VisitLambdaExpr(const LambdaExpr *Lambda) {
- // C++11 [expr.lambda.prim]p13:
- // A lambda-expression appearing in a default argument shall not
- // implicitly or explicitly capture any entity.
- if (Lambda->capture_begin() == Lambda->capture_end())
- return false;
-
- return S.Diag(Lambda->getBeginLoc(), diag::err_lambda_capture_default_arg);
+ // [expr.prim.lambda.capture]p9
+ // a lambda-expression appearing in a default argument cannot implicitly or
+ // explicitly capture any local entity. Such a lambda-expression can still
+ // have an init-capture if any full-expression in its initializer satisfies
+ // the constraints of an expression appearing in a default argument.
+ bool Invalid = false;
+ for (const LambdaCapture &LC : Lambda->captures()) {
+ if (!Lambda->isInitCapture(&LC))
+ return S.Diag(LC.getLocation(), diag::err_lambda_capture_default_arg);
+ // Init captures are always VarDecl.
+ auto *D = cast<VarDecl>(LC.getCapturedVar());
+ Invalid |= Visit(D->getInit());
+ }
+ return Invalid;
}
} // namespace
@@ -312,23 +334,16 @@ Sema::ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc,
ParmVarDecl *Param = cast<ParmVarDecl>(param);
UnparsedDefaultArgLocs.erase(Param);
- auto Fail = [&] {
- Param->setInvalidDecl();
- Param->setDefaultArg(new (Context) OpaqueValueExpr(
- EqualLoc, Param->getType().getNonReferenceType(), VK_PRValue));
- };
-
// Default arguments are only permitted in C++
if (!getLangOpts().CPlusPlus) {
Diag(EqualLoc, diag::err_param_default_argument)
<< DefaultArg->getSourceRange();
- return Fail();
+ return ActOnParamDefaultArgumentError(param, EqualLoc, DefaultArg);
}
// Check for unexpanded parameter packs.
- if (DiagnoseUnexpandedParameterPack(DefaultArg, UPPC_DefaultArgument)) {
- return Fail();
- }
+ if (DiagnoseUnexpandedParameterPack(DefaultArg, UPPC_DefaultArgument))
+ return ActOnParamDefaultArgumentError(param, EqualLoc, DefaultArg);
// C++11 [dcl.fct.default]p3
// A default argument expression [...] shall not be specified for a
@@ -343,14 +358,14 @@ Sema::ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc,
ExprResult Result = ConvertParamDefaultArgument(Param, DefaultArg, EqualLoc);
if (Result.isInvalid())
- return Fail();
+ return ActOnParamDefaultArgumentError(param, EqualLoc, DefaultArg);
DefaultArg = Result.getAs<Expr>();
// Check that the default argument is well-formed
CheckDefaultArgumentVisitor DefaultArgChecker(*this, DefaultArg);
if (DefaultArgChecker.Visit(DefaultArg))
- return Fail();
+ return ActOnParamDefaultArgumentError(param, EqualLoc, DefaultArg);
SetParamDefaultArgument(Param, DefaultArg, EqualLoc);
}
@@ -372,16 +387,23 @@ void Sema::ActOnParamUnparsedDefaultArgument(Decl *param,
/// ActOnParamDefaultArgumentError - Parsing or semantic analysis of
/// the default argument for the parameter param failed.
-void Sema::ActOnParamDefaultArgumentError(Decl *param,
- SourceLocation EqualLoc) {
+void Sema::ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc,
+ Expr *DefaultArg) {
if (!param)
return;
ParmVarDecl *Param = cast<ParmVarDecl>(param);
Param->setInvalidDecl();
UnparsedDefaultArgLocs.erase(Param);
- Param->setDefaultArg(new (Context) OpaqueValueExpr(
- EqualLoc, Param->getType().getNonReferenceType(), VK_PRValue));
+ ExprResult RE;
+ if (DefaultArg) {
+ RE = CreateRecoveryExpr(EqualLoc, DefaultArg->getEndLoc(), {DefaultArg},
+ Param->getType().getNonReferenceType());
+ } else {
+ RE = CreateRecoveryExpr(EqualLoc, EqualLoc, {},
+ Param->getType().getNonReferenceType());
+ }
+ Param->setDefaultArg(RE.get());
}
/// CheckExtraCXXDefaultArguments - Check for any extra default
@@ -435,7 +457,7 @@ void Sema::CheckExtraCXXDefaultArguments(Declarator &D) {
}
static bool functionDeclHasDefaultArgument(const FunctionDecl *FD) {
- return std::any_of(FD->param_begin(), FD->param_end(), [](ParmVarDecl *P) {
+ return llvm::any_of(FD->parameters(), [](ParmVarDecl *P) {
return P->hasDefaultArg() && !P->hasInheritedDefaultArg();
});
}
@@ -706,6 +728,12 @@ bool Sema::MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old,
return Invalid;
}
+void Sema::DiagPlaceholderVariableDefinition(SourceLocation Loc) {
+ Diag(Loc, getLangOpts().CPlusPlus26
+ ? diag::warn_cxx23_placeholder_var_definition
+ : diag::ext_placeholder_var_definition);
+}
+
NamedDecl *
Sema::ActOnDecompositionDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists) {
@@ -743,11 +771,16 @@ Sema::ActOnDecompositionDeclarator(Scope *S, Declarator &D,
// C++17 [dcl.dcl]/8:
// The decl-specifier-seq shall contain only the type-specifier auto
// and cv-qualifiers.
- // C++2a [dcl.dcl]/8:
+ // C++20 [dcl.dcl]/8:
// If decl-specifier-seq contains any decl-specifier other than static,
// thread_local, auto, or cv-qualifiers, the program is ill-formed.
+ // C++23 [dcl.pre]/6:
+ // Each decl-specifier in the decl-specifier-seq shall be static,
+ // thread_local, auto (9.2.9.6 [dcl.spec.auto]), or a cv-qualifier.
auto &DS = D.getDeclSpec();
{
+ // Note: While constrained-auto needs to be checked, we do so separately so
+ // we can emit a better diagnostic.
SmallVector<StringRef, 8> BadSpecifiers;
SmallVector<SourceLocation, 8> BadSpecifierLocs;
SmallVector<StringRef, 8> CPlusPlus20Specifiers;
@@ -774,6 +807,7 @@ Sema::ActOnDecompositionDeclarator(Scope *S, Declarator &D,
BadSpecifiers.push_back("inline");
BadSpecifierLocs.push_back(DS.getInlineSpecLoc());
}
+
if (!BadSpecifiers.empty()) {
auto &&Err = Diag(BadSpecifierLocs.front(), diag::err_decomp_decl_spec);
Err << (int)BadSpecifiers.size()
@@ -805,7 +839,7 @@ Sema::ActOnDecompositionDeclarator(Scope *S, Declarator &D,
Diag(DS.getVolatileSpecLoc(),
diag::warn_deprecated_volatile_structured_binding);
- TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D);
QualType R = TInfo->getType();
if (DiagnoseUnexpandedParameterPack(D.getIdentifierLoc(), TInfo,
@@ -834,6 +868,20 @@ Sema::ActOnDecompositionDeclarator(Scope *S, Declarator &D,
D.setInvalidType();
}
+ // Constrained auto is prohibited by [decl.pre]p6, so check that here.
+ if (DS.isConstrainedAuto()) {
+ TemplateIdAnnotation *TemplRep = DS.getRepAsTemplateId();
+ assert(TemplRep->Kind == TNK_Concept_template &&
+ "No other template kind should be possible for a constrained auto");
+
+ SourceRange TemplRange{TemplRep->TemplateNameLoc,
+ TemplRep->RAngleLoc.isValid()
+ ? TemplRep->RAngleLoc
+ : TemplRep->TemplateNameLoc};
+ Diag(TemplRep->TemplateNameLoc, diag::err_decomp_decl_constraint)
+ << TemplRange << FixItHint::CreateRemoval(TemplRange);
+ }
+
// Build the BindingDecls.
SmallVector<BindingDecl*, 8> Bindings;
@@ -841,6 +889,9 @@ Sema::ActOnDecompositionDeclarator(Scope *S, Declarator &D,
for (auto &B : D.getDecompositionDeclarator().bindings()) {
// Check for name conflicts.
DeclarationNameInfo NameInfo(B.Name, B.NameLoc);
+ IdentifierInfo *VarName = B.Name;
+ assert(VarName && "Cannot have an unnamed binding declaration");
+
LookupResult Previous(*this, NameInfo, LookupOrdinaryName,
ForVisibleRedeclaration);
LookupName(Previous, S,
@@ -854,7 +905,7 @@ Sema::ActOnDecompositionDeclarator(Scope *S, Declarator &D,
Previous.clear();
}
- auto *BD = BindingDecl::Create(Context, DC, B.NameLoc, B.Name);
+ auto *BD = BindingDecl::Create(Context, DC, B.NameLoc, VarName);
// Find the shadowed declaration before filtering for scope.
NamedDecl *ShadowedDecl = D.getCXXScopeSpec().isEmpty()
@@ -866,10 +917,24 @@ Sema::ActOnDecompositionDeclarator(Scope *S, Declarator &D,
FilterLookupForScope(Previous, DC, S, ConsiderLinkage,
/*AllowInlineNamespace*/false);
+ bool IsPlaceholder = DS.getStorageClassSpec() != DeclSpec::SCS_static &&
+ DC->isFunctionOrMethod() && VarName->isPlaceholder();
if (!Previous.empty()) {
- auto *Old = Previous.getRepresentativeDecl();
- Diag(B.NameLoc, diag::err_redefinition) << B.Name;
- Diag(Old->getLocation(), diag::note_previous_definition);
+ if (IsPlaceholder) {
+ bool sameDC = (Previous.end() - 1)
+ ->getDeclContext()
+ ->getRedeclContext()
+ ->Equals(DC->getRedeclContext());
+ if (sameDC &&
+ isDeclInScope(*(Previous.end() - 1), CurContext, S, false)) {
+ Previous.clear();
+ DiagPlaceholderVariableDefinition(B.NameLoc);
+ }
+ } else {
+ auto *Old = Previous.getRepresentativeDecl();
+ Diag(B.NameLoc, diag::err_redefinition) << B.Name;
+ Diag(Old->getLocation(), diag::note_previous_definition);
+ }
} else if (ShadowedDecl && !D.isRedeclaration()) {
CheckShadow(BD, ShadowedDecl, Previous);
}
@@ -983,9 +1048,9 @@ static std::string printTemplateArgs(const PrintingPolicy &PrintingPolicy,
for (auto &Arg : Args.arguments()) {
if (!First)
OS << ", ";
- Arg.getArgument().print(
- PrintingPolicy, OS,
- TemplateParameterList::shouldIncludeTypeForArgument(Params, I));
+ Arg.getArgument().print(PrintingPolicy, OS,
+ TemplateParameterList::shouldIncludeTypeForArgument(
+ PrintingPolicy, Params, I));
First = false;
I++;
}
@@ -1228,14 +1293,15 @@ static bool checkTupleLikeDecomposition(Sema &S,
if (E.isInvalid())
return true;
- E = S.BuildCallExpr(nullptr, E.get(), Loc, None, Loc);
+ E = S.BuildCallExpr(nullptr, E.get(), Loc, std::nullopt, Loc);
} else {
// Otherwise, the initializer is get<i-1>(e), where get is looked up
// in the associated namespaces.
Expr *Get = UnresolvedLookupExpr::Create(
S.Context, nullptr, NestedNameSpecifierLoc(), SourceLocation(),
- DeclarationNameInfo(GetDN, Loc), /*RequiresADL*/true, &Args,
- UnresolvedSetIterator(), UnresolvedSetIterator());
+ DeclarationNameInfo(GetDN, Loc), /*RequiresADL*/ true, &Args,
+ UnresolvedSetIterator(), UnresolvedSetIterator(),
+ /*KnownDependent=*/false);
Expr *Arg = E.get();
E = S.BuildCallExpr(nullptr, Get, Loc, Arg, Loc);
@@ -1383,9 +1449,8 @@ static bool checkMemberDecomposition(Sema &S, ArrayRef<BindingDecl*> Bindings,
DecompType.getQualifiers());
auto DiagnoseBadNumberOfBindings = [&]() -> bool {
- unsigned NumFields =
- std::count_if(RD->field_begin(), RD->field_end(),
- [](FieldDecl *FD) { return !FD->isUnnamedBitfield(); });
+ unsigned NumFields = llvm::count_if(
+ RD->fields(), [](FieldDecl *FD) { return !FD->isUnnamedBitfield(); });
assert(Bindings.size() != NumFields);
S.Diag(Src->getLocation(), diag::err_decomp_decl_wrong_number_bindings)
<< DecompType << (unsigned)Bindings.size() << NumFields << NumFields
@@ -1687,6 +1752,7 @@ static bool CheckConstexprParameterTypes(Sema &SemaRef,
e = FT->param_type_end();
i != e; ++i, ++ArgIndex) {
const ParmVarDecl *PD = FD->getParamDecl(ArgIndex);
+ assert(PD && "null in a parameter list");
SourceLocation ParamLoc = PD->getLocation();
if (CheckLiteralType(SemaRef, Kind, ParamLoc, *i,
diag::err_constexpr_non_literal_param, ArgIndex + 1,
@@ -1715,9 +1781,12 @@ static bool CheckConstexprReturnType(Sema &SemaRef, const FunctionDecl *FD,
/// \returns diagnostic %select index.
static unsigned getRecordDiagFromTagKind(TagTypeKind Tag) {
switch (Tag) {
- case TTK_Struct: return 0;
- case TTK_Interface: return 1;
- case TTK_Class: return 2;
+ case TagTypeKind::Struct:
+ return 0;
+ case TagTypeKind::Interface:
+ return 1;
+ case TagTypeKind::Class:
+ return 2;
default: llvm_unreachable("Invalid tag kind for record diagnostic!");
}
}
@@ -1892,16 +1961,26 @@ static bool CheckConstexprDeclStmt(Sema &SemaRef, const FunctionDecl *Dcl,
if (VD->isStaticLocal()) {
if (Kind == Sema::CheckConstexprKind::Diagnose) {
SemaRef.Diag(VD->getLocation(),
- diag::err_constexpr_local_var_static)
- << isa<CXXConstructorDecl>(Dcl)
- << (VD->getTLSKind() == VarDecl::TLS_Dynamic);
+ SemaRef.getLangOpts().CPlusPlus23
+ ? diag::warn_cxx20_compat_constexpr_var
+ : diag::ext_constexpr_static_var)
+ << isa<CXXConstructorDecl>(Dcl)
+ << (VD->getTLSKind() == VarDecl::TLS_Dynamic);
+ } else if (!SemaRef.getLangOpts().CPlusPlus23) {
+ return false;
}
- return false;
}
- if (CheckLiteralType(SemaRef, Kind, VD->getLocation(), VD->getType(),
- diag::err_constexpr_local_var_non_literal_type,
- isa<CXXConstructorDecl>(Dcl)))
+ if (SemaRef.LangOpts.CPlusPlus23) {
+ CheckLiteralType(SemaRef, Kind, VD->getLocation(), VD->getType(),
+ diag::warn_cxx20_compat_constexpr_var,
+ isa<CXXConstructorDecl>(Dcl),
+ /*variable of non-literal type*/ 2);
+ } else if (CheckLiteralType(
+ SemaRef, Kind, VD->getLocation(), VD->getType(),
+ diag::err_constexpr_local_var_non_literal_type,
+ isa<CXXConstructorDecl>(Dcl))) {
return false;
+ }
if (!VD->getType()->isDependentType() &&
!VD->hasInit() && !VD->isCXXForRangeDecl()) {
if (Kind == Sema::CheckConstexprKind::Diagnose) {
@@ -2021,6 +2100,7 @@ static bool
CheckConstexprFunctionStmt(Sema &SemaRef, const FunctionDecl *Dcl, Stmt *S,
SmallVectorImpl<SourceLocation> &ReturnStmts,
SourceLocation &Cxx1yLoc, SourceLocation &Cxx2aLoc,
+ SourceLocation &Cxx2bLoc,
Sema::CheckConstexprKind Kind) {
// - its function-body shall be [...] a compound-statement that contains only
switch (S->getStmtClass()) {
@@ -2050,6 +2130,13 @@ CheckConstexprFunctionStmt(Sema &SemaRef, const FunctionDecl *Dcl, Stmt *S,
ReturnStmts.push_back(S->getBeginLoc());
return true;
+ case Stmt::AttributedStmtClass:
+ // Attributes on a statement don't affect its formal kind and hence don't
+ // affect its validity in a constexpr function.
+ return CheckConstexprFunctionStmt(
+ SemaRef, Dcl, cast<AttributedStmt>(S)->getSubStmt(), ReturnStmts,
+ Cxx1yLoc, Cxx2aLoc, Cxx2bLoc, Kind);
+
case Stmt::CompoundStmtClass: {
// C++1y allows compound-statements.
if (!Cxx1yLoc.isValid())
@@ -2058,17 +2145,12 @@ CheckConstexprFunctionStmt(Sema &SemaRef, const FunctionDecl *Dcl, Stmt *S,
CompoundStmt *CompStmt = cast<CompoundStmt>(S);
for (auto *BodyIt : CompStmt->body()) {
if (!CheckConstexprFunctionStmt(SemaRef, Dcl, BodyIt, ReturnStmts,
- Cxx1yLoc, Cxx2aLoc, Kind))
+ Cxx1yLoc, Cxx2aLoc, Cxx2bLoc, Kind))
return false;
}
return true;
}
- case Stmt::AttributedStmtClass:
- if (!Cxx1yLoc.isValid())
- Cxx1yLoc = S->getBeginLoc();
- return true;
-
case Stmt::IfStmtClass: {
// C++1y allows if-statements.
if (!Cxx1yLoc.isValid())
@@ -2076,11 +2158,11 @@ CheckConstexprFunctionStmt(Sema &SemaRef, const FunctionDecl *Dcl, Stmt *S,
IfStmt *If = cast<IfStmt>(S);
if (!CheckConstexprFunctionStmt(SemaRef, Dcl, If->getThen(), ReturnStmts,
- Cxx1yLoc, Cxx2aLoc, Kind))
+ Cxx1yLoc, Cxx2aLoc, Cxx2bLoc, Kind))
return false;
if (If->getElse() &&
!CheckConstexprFunctionStmt(SemaRef, Dcl, If->getElse(), ReturnStmts,
- Cxx1yLoc, Cxx2aLoc, Kind))
+ Cxx1yLoc, Cxx2aLoc, Cxx2bLoc, Kind))
return false;
return true;
}
@@ -2096,11 +2178,12 @@ CheckConstexprFunctionStmt(Sema &SemaRef, const FunctionDecl *Dcl, Stmt *S,
break;
if (!Cxx1yLoc.isValid())
Cxx1yLoc = S->getBeginLoc();
- for (Stmt *SubStmt : S->children())
+ for (Stmt *SubStmt : S->children()) {
if (SubStmt &&
!CheckConstexprFunctionStmt(SemaRef, Dcl, SubStmt, ReturnStmts,
- Cxx1yLoc, Cxx2aLoc, Kind))
+ Cxx1yLoc, Cxx2aLoc, Cxx2bLoc, Kind))
return false;
+ }
return true;
case Stmt::SwitchStmtClass:
@@ -2111,11 +2194,24 @@ CheckConstexprFunctionStmt(Sema &SemaRef, const FunctionDecl *Dcl, Stmt *S,
// mutation, we can reasonably allow them in C++11 as an extension.
if (!Cxx1yLoc.isValid())
Cxx1yLoc = S->getBeginLoc();
- for (Stmt *SubStmt : S->children())
+ for (Stmt *SubStmt : S->children()) {
+ if (SubStmt &&
+ !CheckConstexprFunctionStmt(SemaRef, Dcl, SubStmt, ReturnStmts,
+ Cxx1yLoc, Cxx2aLoc, Cxx2bLoc, Kind))
+ return false;
+ }
+ return true;
+
+ case Stmt::LabelStmtClass:
+ case Stmt::GotoStmtClass:
+ if (Cxx2bLoc.isInvalid())
+ Cxx2bLoc = S->getBeginLoc();
+ for (Stmt *SubStmt : S->children()) {
if (SubStmt &&
!CheckConstexprFunctionStmt(SemaRef, Dcl, SubStmt, ReturnStmts,
- Cxx1yLoc, Cxx2aLoc, Kind))
+ Cxx1yLoc, Cxx2aLoc, Cxx2bLoc, Kind))
return false;
+ }
return true;
case Stmt::GCCAsmStmtClass:
@@ -2127,7 +2223,7 @@ CheckConstexprFunctionStmt(Sema &SemaRef, const FunctionDecl *Dcl, Stmt *S,
for (Stmt *SubStmt : S->children()) {
if (SubStmt &&
!CheckConstexprFunctionStmt(SemaRef, Dcl, SubStmt, ReturnStmts,
- Cxx1yLoc, Cxx2aLoc, Kind))
+ Cxx1yLoc, Cxx2aLoc, Cxx2bLoc, Kind))
return false;
}
return true;
@@ -2135,9 +2231,9 @@ CheckConstexprFunctionStmt(Sema &SemaRef, const FunctionDecl *Dcl, Stmt *S,
case Stmt::CXXCatchStmtClass:
// Do not bother checking the language mode (already covered by the
// try block check).
- if (!CheckConstexprFunctionStmt(SemaRef, Dcl,
- cast<CXXCatchStmt>(S)->getHandlerBlock(),
- ReturnStmts, Cxx1yLoc, Cxx2aLoc, Kind))
+ if (!CheckConstexprFunctionStmt(
+ SemaRef, Dcl, cast<CXXCatchStmt>(S)->getHandlerBlock(), ReturnStmts,
+ Cxx1yLoc, Cxx2aLoc, Cxx2bLoc, Kind))
return false;
return true;
@@ -2202,20 +2298,27 @@ static bool CheckConstexprFunctionBody(Sema &SemaRef, const FunctionDecl *Dcl,
//
// Note that walking the children here is enough to properly check for
// CompoundStmt and CXXTryStmt body.
- SourceLocation Cxx1yLoc, Cxx2aLoc;
+ SourceLocation Cxx1yLoc, Cxx2aLoc, Cxx2bLoc;
for (Stmt *SubStmt : Body->children()) {
if (SubStmt &&
!CheckConstexprFunctionStmt(SemaRef, Dcl, SubStmt, ReturnStmts,
- Cxx1yLoc, Cxx2aLoc, Kind))
+ Cxx1yLoc, Cxx2aLoc, Cxx2bLoc, Kind))
return false;
}
if (Kind == Sema::CheckConstexprKind::CheckValid) {
// If this is only valid as an extension, report that we don't satisfy the
// constraints of the current language.
- if ((Cxx2aLoc.isValid() && !SemaRef.getLangOpts().CPlusPlus20) ||
+ if ((Cxx2bLoc.isValid() && !SemaRef.getLangOpts().CPlusPlus23) ||
+ (Cxx2aLoc.isValid() && !SemaRef.getLangOpts().CPlusPlus20) ||
(Cxx1yLoc.isValid() && !SemaRef.getLangOpts().CPlusPlus17))
return false;
+ } else if (Cxx2bLoc.isValid()) {
+ SemaRef.Diag(Cxx2bLoc,
+ SemaRef.getLangOpts().CPlusPlus23
+ ? diag::warn_cxx20_compat_constexpr_body_invalid_stmt
+ : diag::ext_constexpr_body_invalid_stmt_cxx23)
+ << isa<CXXConstructorDecl>(Dcl);
} else if (Cxx2aLoc.isValid()) {
SemaRef.Diag(Cxx2aLoc,
SemaRef.getLangOpts().CPlusPlus20
@@ -2357,7 +2460,8 @@ static bool CheckConstexprFunctionBody(Sema &SemaRef, const FunctionDecl *Dcl,
!Expr::isPotentialConstantExpr(Dcl, Diags)) {
SemaRef.Diag(Dcl->getLocation(),
diag::ext_constexpr_function_never_constant_expr)
- << isa<CXXConstructorDecl>(Dcl) << Dcl->isConsteval();
+ << isa<CXXConstructorDecl>(Dcl) << Dcl->isConsteval()
+ << Dcl->getNameInfo().getSourceRange();
for (size_t I = 0, N = Diags.size(); I != N; ++I)
SemaRef.Diag(Diags[I].first, Diags[I].second);
// Don't return false here: we allow this for compatibility in
@@ -2367,6 +2471,117 @@ static bool CheckConstexprFunctionBody(Sema &SemaRef, const FunctionDecl *Dcl,
return true;
}
+bool Sema::CheckImmediateEscalatingFunctionDefinition(
+ FunctionDecl *FD, const sema::FunctionScopeInfo *FSI) {
+ if (!getLangOpts().CPlusPlus20 || !FD->isImmediateEscalating())
+ return true;
+ FD->setBodyContainsImmediateEscalatingExpressions(
+ FSI->FoundImmediateEscalatingExpression);
+ if (FSI->FoundImmediateEscalatingExpression) {
+ auto it = UndefinedButUsed.find(FD->getCanonicalDecl());
+ if (it != UndefinedButUsed.end()) {
+ Diag(it->second, diag::err_immediate_function_used_before_definition)
+ << it->first;
+ Diag(FD->getLocation(), diag::note_defined_here) << FD;
+ if (FD->isImmediateFunction() && !FD->isConsteval())
+ DiagnoseImmediateEscalatingReason(FD);
+ return false;
+ }
+ }
+ return true;
+}
+
+void Sema::DiagnoseImmediateEscalatingReason(FunctionDecl *FD) {
+ assert(FD->isImmediateEscalating() && !FD->isConsteval() &&
+ "expected an immediate function");
+ assert(FD->hasBody() && "expected the function to have a body");
+ struct ImmediateEscalatingExpressionsVisitor
+ : public RecursiveASTVisitor<ImmediateEscalatingExpressionsVisitor> {
+
+ using Base = RecursiveASTVisitor<ImmediateEscalatingExpressionsVisitor>;
+ Sema &SemaRef;
+
+ const FunctionDecl *ImmediateFn;
+ bool ImmediateFnIsConstructor;
+ CXXConstructorDecl *CurrentConstructor = nullptr;
+ CXXCtorInitializer *CurrentInit = nullptr;
+
+ ImmediateEscalatingExpressionsVisitor(Sema &SemaRef, FunctionDecl *FD)
+ : SemaRef(SemaRef), ImmediateFn(FD),
+ ImmediateFnIsConstructor(isa<CXXConstructorDecl>(FD)) {}
+
+ bool shouldVisitImplicitCode() const { return true; }
+ bool shouldVisitLambdaBody() const { return false; }
+
+ void Diag(const Expr *E, const FunctionDecl *Fn, bool IsCall) {
+ SourceLocation Loc = E->getBeginLoc();
+ SourceRange Range = E->getSourceRange();
+ if (CurrentConstructor && CurrentInit) {
+ Loc = CurrentConstructor->getLocation();
+ Range = CurrentInit->isWritten() ? CurrentInit->getSourceRange()
+ : SourceRange();
+ }
+
+ FieldDecl* InitializedField = CurrentInit ? CurrentInit->getAnyMember() : nullptr;
+
+ SemaRef.Diag(Loc, diag::note_immediate_function_reason)
+ << ImmediateFn << Fn << Fn->isConsteval() << IsCall
+ << isa<CXXConstructorDecl>(Fn) << ImmediateFnIsConstructor
+ << (InitializedField != nullptr)
+ << (CurrentInit && !CurrentInit->isWritten())
+ << InitializedField << Range;
+ }
+ bool TraverseCallExpr(CallExpr *E) {
+ if (const auto *DR =
+ dyn_cast<DeclRefExpr>(E->getCallee()->IgnoreImplicit());
+ DR && DR->isImmediateEscalating()) {
+ Diag(E, E->getDirectCallee(), /*IsCall=*/true);
+ return false;
+ }
+
+ for (Expr *A : E->arguments())
+ if (!getDerived().TraverseStmt(A))
+ return false;
+
+ return true;
+ }
+
+ bool VisitDeclRefExpr(DeclRefExpr *E) {
+ if (const auto *ReferencedFn = dyn_cast<FunctionDecl>(E->getDecl());
+ ReferencedFn && E->isImmediateEscalating()) {
+ Diag(E, ReferencedFn, /*IsCall=*/false);
+ return false;
+ }
+
+ return true;
+ }
+
+ bool VisitCXXConstructExpr(CXXConstructExpr *E) {
+ CXXConstructorDecl *D = E->getConstructor();
+ if (E->isImmediateEscalating()) {
+ Diag(E, D, /*IsCall=*/true);
+ return false;
+ }
+ return true;
+ }
+
+ bool TraverseConstructorInitializer(CXXCtorInitializer *Init) {
+ llvm::SaveAndRestore RAII(CurrentInit, Init);
+ return Base::TraverseConstructorInitializer(Init);
+ }
+
+ bool TraverseCXXConstructorDecl(CXXConstructorDecl *Ctr) {
+ llvm::SaveAndRestore RAII(CurrentConstructor, Ctr);
+ return Base::TraverseCXXConstructorDecl(Ctr);
+ }
+
+ bool TraverseType(QualType T) { return true; }
+ bool VisitBlockExpr(BlockExpr *T) { return true; }
+
+ } Visitor(*this, FD);
+ Visitor.TraverseDecl(FD);
+}
+
/// Get the class that is directly named by the current context. This is the
/// class for which an unqualified-id in this scope could name a constructor
/// or destructor.
@@ -2467,6 +2682,11 @@ Sema::CheckBaseSpecifier(CXXRecordDecl *Class,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc) {
+ // In HLSL, unspecified class access is public rather than private.
+ if (getLangOpts().HLSL && Class->getTagKind() == TagTypeKind::Class &&
+ Access == AS_none)
+ Access = AS_public;
+
QualType BaseType = TInfo->getType();
if (BaseType->containsErrors()) {
// Already emitted a diagnostic when parsing the error type.
@@ -2516,9 +2736,9 @@ Sema::CheckBaseSpecifier(CXXRecordDecl *Class,
// emitted.
if (!Class->getTypeForDecl()->isDependentType())
Class->setInvalidDecl();
- return new (Context) CXXBaseSpecifier(SpecifierRange, Virtual,
- Class->getTagKind() == TTK_Class,
- Access, TInfo, EllipsisLoc);
+ return new (Context) CXXBaseSpecifier(
+ SpecifierRange, Virtual, Class->getTagKind() == TagTypeKind::Class,
+ Access, TInfo, EllipsisLoc);
}
// Base specifiers must be record types.
@@ -2535,7 +2755,8 @@ Sema::CheckBaseSpecifier(CXXRecordDecl *Class,
}
// For the MS ABI, propagate DLL attributes to base class templates.
- if (Context.getTargetInfo().getCXXABI().isMicrosoft()) {
+ if (Context.getTargetInfo().getCXXABI().isMicrosoft() ||
+ Context.getTargetInfo().getTriple().isPS()) {
if (Attr *ClassAttr = getDLLAttr(Class)) {
if (auto *BaseTemplate = dyn_cast_or_null<ClassTemplateSpecializationDecl>(
BaseType->getAsCXXRecordDecl())) {
@@ -2603,9 +2824,9 @@ Sema::CheckBaseSpecifier(CXXRecordDecl *Class,
Class->setInvalidDecl();
// Create the base specifier.
- return new (Context) CXXBaseSpecifier(SpecifierRange, Virtual,
- Class->getTagKind() == TTK_Class,
- Access, TInfo, EllipsisLoc);
+ return new (Context) CXXBaseSpecifier(
+ SpecifierRange, Virtual, Class->getTagKind() == TagTypeKind::Class,
+ Access, TInfo, EllipsisLoc);
}
/// ActOnBaseSpecifier - Parsed a base specifier. A base specifier is
@@ -2613,12 +2834,11 @@ Sema::CheckBaseSpecifier(CXXRecordDecl *Class,
/// example:
/// class foo : public bar, virtual private baz {
/// 'public bar' and 'virtual private baz' are each base-specifiers.
-BaseResult
-Sema::ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange,
- ParsedAttributes &Attributes,
- bool Virtual, AccessSpecifier Access,
- ParsedType basetype, SourceLocation BaseLoc,
- SourceLocation EllipsisLoc) {
+BaseResult Sema::ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange,
+ const ParsedAttributesView &Attributes,
+ bool Virtual, AccessSpecifier Access,
+ ParsedType basetype, SourceLocation BaseLoc,
+ SourceLocation EllipsisLoc) {
if (!classdecl)
return true;
@@ -2635,10 +2855,12 @@ Sema::ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange,
for (const ParsedAttr &AL : Attributes) {
if (AL.isInvalid() || AL.getKind() == ParsedAttr::IgnoredAttribute)
continue;
- Diag(AL.getLoc(), AL.getKind() == ParsedAttr::UnknownAttribute
- ? (unsigned)diag::warn_unknown_attribute_ignored
- : (unsigned)diag::err_base_specifier_attribute)
- << AL << AL.getRange();
+ if (AL.getKind() == ParsedAttr::UnknownAttribute)
+ Diag(AL.getLoc(), diag::warn_unknown_attribute_ignored)
+ << AL << AL.getRange();
+ else
+ Diag(AL.getLoc(), diag::err_base_specifier_attribute)
+ << AL << AL.isRegularKeywordAttribute() << AL.getRange();
}
TypeSourceInfo *TInfo = nullptr;
@@ -2727,6 +2949,8 @@ bool Sema::AttachBaseSpecifiers(CXXRecordDecl *Class,
KnownBase = Bases[idx];
Bases[NumGoodBases++] = Bases[idx];
+ if (NewBaseType->isDependentType())
+ continue;
// Note this base's direct & indirect bases, if there could be ambiguity.
if (Bases.size() > 1)
NoteIndirectBases(Context, IndirectBaseTypes, NewBaseType);
@@ -3044,7 +3268,7 @@ void Sema::CheckOverrideControl(NamedDecl *D) {
return;
if (MD && !MD->isVirtual()) {
- // If we have a non-virtual method, check if if hides a virtual method.
+ // If we have a non-virtual method, check if it hides a virtual method.
// (In that case, it's most likely the method has the wrong type.)
SmallVector<CXXMethodDecl *, 8> OverloadedMethods;
FindHiddenVirtualMethods(MD, OverloadedMethods);
@@ -3156,16 +3380,6 @@ static bool InitializationHasSideEffects(const FieldDecl &FD) {
return false;
}
-static const ParsedAttr *getMSPropertyAttr(const ParsedAttributesView &list) {
- ParsedAttributesView::const_iterator Itr =
- llvm::find_if(list, [](const ParsedAttr &AL) {
- return AL.isDeclspecPropertyAttribute();
- });
- if (Itr != list.end())
- return &*Itr;
- return nullptr;
-}
-
// Check if there is a field shadowing.
void Sema::CheckShadowInheritedFields(const SourceLocation &Loc,
DeclarationName FieldName,
@@ -3243,7 +3457,7 @@ Sema::ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D,
bool isFunc = D.isDeclarationOfFunction();
const ParsedAttr *MSPropertyAttr =
- getMSPropertyAttr(D.getDeclSpec().getAttributes());
+ D.getDeclSpec().getAttributes().getMSPropertyAttr();
if (cast<CXXRecordDecl>(CurContext)->isInterface()) {
// The Microsoft extension __interface only permits public member functions
@@ -3391,6 +3605,15 @@ Sema::ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D,
return nullptr;
}
+ if (D.getName().getKind() == UnqualifiedIdKind::IK_TemplateId) {
+ Diag(D.getIdentifierLoc(), diag::err_member_with_template_arguments)
+ << II
+ << SourceRange(D.getName().TemplateId->LAngleLoc,
+ D.getName().TemplateId->RAngleLoc)
+ << D.getName().TemplateId->LAngleLoc;
+ D.SetIdentifier(II, Loc);
+ }
+
if (SS.isSet() && !SS.isInvalid()) {
// The user provided a superfluous scope specifier inside a class
// definition:
@@ -3492,12 +3715,12 @@ Sema::ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D,
}
if (VS.isOverrideSpecified())
- Member->addAttr(OverrideAttr::Create(Context, VS.getOverrideLoc(),
- AttributeCommonInfo::AS_Keyword));
+ Member->addAttr(OverrideAttr::Create(Context, VS.getOverrideLoc()));
if (VS.isFinalSpecified())
- Member->addAttr(FinalAttr::Create(
- Context, VS.getFinalLoc(), AttributeCommonInfo::AS_Keyword,
- static_cast<FinalAttr::Spelling>(VS.isFinalSpelledSealed())));
+ Member->addAttr(FinalAttr::Create(Context, VS.getFinalLoc(),
+ VS.isFinalSpelledSealed()
+ ? FinalAttr::Keyword_sealed
+ : FinalAttr::Keyword_final));
if (VS.getLastLocation().isValid()) {
// Update the end location of a method that has a virt-specifiers.
@@ -3516,10 +3739,20 @@ Sema::ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D,
if (!Diags.isIgnored(diag::warn_unused_private_field, FD->getLocation())) {
// Remember all explicit private FieldDecls that have a name, no side
// effects and are not part of a dependent type declaration.
+
+ auto DeclHasUnusedAttr = [](const QualType &T) {
+ if (const TagDecl *TD = T->getAsTagDecl())
+ return TD->hasAttr<UnusedAttr>();
+ if (const TypedefType *TDT = T->getAs<TypedefType>())
+ return TDT->getDecl()->hasAttr<UnusedAttr>();
+ return false;
+ };
+
if (!FD->isImplicit() && FD->getDeclName() &&
FD->getAccess() == AS_private &&
!FD->hasAttr<UnusedAttr>() &&
!FD->getParent()->isDependentContext() &&
+ !DeclHasUnusedAttr(FD->getType()) &&
!InitializationHasSideEffects(*FD))
UnusedPrivateFields.insert(FD);
}
@@ -3581,9 +3814,8 @@ namespace {
llvm::SmallVector<unsigned, 4> UsedFieldIndex;
// Discard the first field since it is the field decl that is being
// initialized.
- for (auto I = Fields.rbegin() + 1, E = Fields.rend(); I != E; ++I) {
- UsedFieldIndex.push_back((*I)->getFieldIndex());
- }
+ for (const FieldDecl *FD : llvm::drop_begin(llvm::reverse(Fields)))
+ UsedFieldIndex.push_back(FD->getFieldIndex());
for (auto UsedIter = UsedFieldIndex.begin(),
UsedEnd = UsedFieldIndex.end(),
@@ -3728,7 +3960,7 @@ namespace {
void CheckInitListExpr(InitListExpr *ILE) {
InitFieldIndex.push_back(0);
- for (auto Child : ILE->children()) {
+ for (auto *Child : ILE->children()) {
if (InitListExpr *SubList = dyn_cast<InitListExpr>(Child)) {
CheckInitListExpr(SubList);
} else {
@@ -3799,7 +4031,7 @@ namespace {
Expr *Callee = E->getCallee();
if (isa<MemberExpr>(Callee)) {
HandleValue(Callee, false /*AddressOf*/);
- for (auto Arg : E->arguments())
+ for (auto *Arg : E->arguments())
Visit(Arg);
return;
}
@@ -3824,7 +4056,7 @@ namespace {
return Inherited::VisitCXXOperatorCallExpr(E);
Visit(Callee);
- for (auto Arg : E->arguments())
+ for (auto *Arg : E->arguments())
HandleValue(Arg->IgnoreParenImpCasts(), false /*AddressOf*/);
}
@@ -3897,7 +4129,7 @@ namespace {
}
llvm::SmallPtrSet<QualType, 4> UninitializedBaseClasses;
- for (auto I : RD->bases())
+ for (const auto &I : RD->bases())
UninitializedBaseClasses.insert(I.getType().getCanonicalType());
if (UninitializedFields.empty() && UninitializedBaseClasses.empty())
@@ -3975,6 +4207,21 @@ ExprResult Sema::ActOnRequiresClause(ExprResult ConstraintExpr) {
return ConstraintExpr;
}
+ExprResult Sema::ConvertMemberDefaultInitExpression(FieldDecl *FD,
+ Expr *InitExpr,
+ SourceLocation InitLoc) {
+ InitializedEntity Entity =
+ InitializedEntity::InitializeMemberFromDefaultMemberInitializer(FD);
+ InitializationKind Kind =
+ FD->getInClassInitStyle() == ICIS_ListInit
+ ? InitializationKind::CreateDirectList(InitExpr->getBeginLoc(),
+ InitExpr->getBeginLoc(),
+ InitExpr->getEndLoc())
+ : InitializationKind::CreateCopy(InitExpr->getBeginLoc(), InitLoc);
+ InitializationSequence Seq(*this, Entity, Kind, InitExpr);
+ return Seq.Perform(*this, Entity, Kind, InitExpr);
+}
+
/// This is invoked after parsing an in-class initializer for a
/// non-static C++ class member, and after instantiating an in-class initializer
/// in a class template. Such actions are deferred until the class is complete.
@@ -4001,36 +4248,23 @@ void Sema::ActOnFinishCXXInClassMemberInitializer(Decl *D,
return;
}
- ExprResult Init = InitExpr;
- if (!FD->getType()->isDependentType() && !InitExpr->isTypeDependent()) {
- InitializedEntity Entity =
- InitializedEntity::InitializeMemberFromDefaultMemberInitializer(FD);
- InitializationKind Kind =
- FD->getInClassInitStyle() == ICIS_ListInit
- ? InitializationKind::CreateDirectList(InitExpr->getBeginLoc(),
- InitExpr->getBeginLoc(),
- InitExpr->getEndLoc())
- : InitializationKind::CreateCopy(InitExpr->getBeginLoc(), InitLoc);
- InitializationSequence Seq(*this, Entity, Kind, InitExpr);
- Init = Seq.Perform(*this, Entity, Kind, InitExpr);
+ ExprResult Init = CorrectDelayedTyposInExpr(InitExpr, /*InitDecl=*/nullptr,
+ /*RecoverUncorrectedTypos=*/true);
+ assert(Init.isUsable() && "Init should at least have a RecoveryExpr");
+ if (!FD->getType()->isDependentType() && !Init.get()->isTypeDependent()) {
+ Init = ConvertMemberDefaultInitExpression(FD, Init.get(), InitLoc);
+ // C++11 [class.base.init]p7:
+ // The initialization of each base and member constitutes a
+ // full-expression.
+ if (!Init.isInvalid())
+ Init = ActOnFinishFullExpr(Init.get(), /*DiscarededValue=*/false);
if (Init.isInvalid()) {
FD->setInvalidDecl();
return;
}
}
- // C++11 [class.base.init]p7:
- // The initialization of each base and member constitutes a
- // full-expression.
- Init = ActOnFinishFullExpr(Init.get(), InitLoc, /*DiscardedValue*/ false);
- if (Init.isInvalid()) {
- FD->setInvalidDecl();
- return;
- }
-
- InitExpr = Init.get();
-
- FD->setInClassInitializer(InitExpr);
+ FD->setInClassInitializer(Init.get());
}
/// Find the direct and/or virtual base specifiers that
@@ -4114,7 +4348,7 @@ Sema::ActOnMemInitializer(Decl *ConstructorD,
namespace {
// Callback to only accept typo corrections that can be a valid C++ member
-// intializer: either a non-static field member or a base class.
+// initializer: either a non-static field member or a base class.
class MemInitializerValidatorCCC final : public CorrectionCandidateCallback {
public:
explicit MemInitializerValidatorCCC(CXXRecordDecl *ClassDecl)
@@ -4139,16 +4373,57 @@ private:
}
+bool Sema::DiagRedefinedPlaceholderFieldDecl(SourceLocation Loc,
+ RecordDecl *ClassDecl,
+ const IdentifierInfo *Name) {
+ DeclContextLookupResult Result = ClassDecl->lookup(Name);
+ DeclContextLookupResult::iterator Found =
+ llvm::find_if(Result, [this](const NamedDecl *Elem) {
+ return isa<FieldDecl, IndirectFieldDecl>(Elem) &&
+ Elem->isPlaceholderVar(getLangOpts());
+ });
+ // We did not find a placeholder variable
+ if (Found == Result.end())
+ return false;
+ Diag(Loc, diag::err_using_placeholder_variable) << Name;
+ for (DeclContextLookupResult::iterator It = Found; It != Result.end(); It++) {
+ const NamedDecl *ND = *It;
+ if (ND->getDeclContext() != ND->getDeclContext())
+ break;
+ if (isa<FieldDecl, IndirectFieldDecl>(ND) &&
+ ND->isPlaceholderVar(getLangOpts()))
+ Diag(ND->getLocation(), diag::note_reference_placeholder) << ND;
+ }
+ return true;
+}
+
+ValueDecl *
+Sema::tryLookupUnambiguousFieldDecl(RecordDecl *ClassDecl,
+ const IdentifierInfo *MemberOrBase) {
+ ValueDecl *ND = nullptr;
+ for (auto *D : ClassDecl->lookup(MemberOrBase)) {
+ if (isa<FieldDecl, IndirectFieldDecl>(D)) {
+ bool IsPlaceholder = D->isPlaceholderVar(getLangOpts());
+ if (ND) {
+ if (IsPlaceholder && D->getDeclContext() == ND->getDeclContext())
+ return nullptr;
+ break;
+ }
+ if (!IsPlaceholder)
+ return cast<ValueDecl>(D);
+ ND = cast<ValueDecl>(D);
+ }
+ }
+ return ND;
+}
+
ValueDecl *Sema::tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl,
CXXScopeSpec &SS,
ParsedType TemplateTypeTy,
IdentifierInfo *MemberOrBase) {
if (SS.getScopeRep() || TemplateTypeTy)
return nullptr;
- for (auto *D : ClassDecl->lookup(MemberOrBase))
- if (isa<FieldDecl>(D) || isa<IndirectFieldDecl>(D))
- return cast<ValueDecl>(D);
- return nullptr;
+ return tryLookupUnambiguousFieldDecl(ClassDecl, MemberOrBase);
}
/// Handle a C++ member initializer.
@@ -4162,7 +4437,8 @@ Sema::BuildMemInitializer(Decl *ConstructorD,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc) {
- ExprResult Res = CorrectDelayedTyposInExpr(Init);
+ ExprResult Res = CorrectDelayedTyposInExpr(Init, /*InitDecl=*/nullptr,
+ /*RecoverUncorrectedTypos=*/true);
if (!Res.isUsable())
return true;
Init = Res.get();
@@ -4214,7 +4490,7 @@ Sema::BuildMemInitializer(Decl *ConstructorD,
if (BaseType.isNull())
return true;
} else if (DS.getTypeSpecType() == TST_decltype) {
- BaseType = BuildDecltypeType(DS.getRepAsExpr(), DS.getTypeSpecTypeLoc());
+ BaseType = BuildDecltypeType(DS.getRepAsExpr());
} else if (DS.getTypeSpecType() == TST_decltype_auto) {
Diag(DS.getTypeSpecTypeLoc(), diag::err_decltype_auto_invalid);
return true;
@@ -4238,9 +4514,9 @@ Sema::BuildMemInitializer(Decl *ConstructorD,
if (!NotUnknownSpecialization) {
// When the scope specifier can refer to a member of an unknown
// specialization, we take it as a type name.
- BaseType = CheckTypenameType(ETK_None, SourceLocation(),
- SS.getWithLocInContext(Context),
- *MemberOrBase, IdLoc);
+ BaseType = CheckTypenameType(
+ ElaboratedTypeKeyword::None, SourceLocation(),
+ SS.getWithLocInContext(Context), *MemberOrBase, IdLoc);
if (BaseType.isNull())
return true;
@@ -4258,6 +4534,25 @@ Sema::BuildMemInitializer(Decl *ConstructorD,
}
}
+ if (getLangOpts().MSVCCompat && !getLangOpts().CPlusPlus20) {
+ if (auto UnqualifiedBase = R.getAsSingle<ClassTemplateDecl>()) {
+ auto *TempSpec = cast<TemplateSpecializationType>(
+ UnqualifiedBase->getInjectedClassNameSpecialization());
+ TemplateName TN = TempSpec->getTemplateName();
+ for (auto const &Base : ClassDecl->bases()) {
+ auto BaseTemplate =
+ Base.getType()->getAs<TemplateSpecializationType>();
+ if (BaseTemplate && Context.hasSameTemplateName(
+ BaseTemplate->getTemplateName(), TN)) {
+ Diag(IdLoc, diag::ext_unqualified_base_class)
+ << SourceRange(IdLoc, Init->getSourceRange().getEnd());
+ BaseType = Base.getType();
+ break;
+ }
+ }
+ }
+ }
+
// If no results were found, try to correct typos.
TypoCorrection Corr;
MemInitializerValidatorCCC CCC(ClassDecl);
@@ -4304,17 +4599,14 @@ Sema::BuildMemInitializer(Decl *ConstructorD,
}
if (BaseType.isNull()) {
- BaseType = Context.getTypeDeclType(TyD);
+ BaseType = getElaboratedType(ElaboratedTypeKeyword::None, SS,
+ Context.getTypeDeclType(TyD));
MarkAnyDeclReferenced(TyD->getLocation(), TyD, /*OdrUse=*/false);
- if (SS.isSet()) {
- BaseType = Context.getElaboratedType(ETK_None, SS.getScopeRep(),
- BaseType);
- TInfo = Context.CreateTypeSourceInfo(BaseType);
- ElaboratedTypeLoc TL = TInfo->getTypeLoc().castAs<ElaboratedTypeLoc>();
- TL.getNamedTypeLoc().castAs<TypeSpecTypeLoc>().setNameLoc(IdLoc);
- TL.setElaboratedKeywordLoc(SourceLocation());
- TL.setQualifierLoc(SS.getWithLocInContext(Context));
- }
+ TInfo = Context.CreateTypeSourceInfo(BaseType);
+ ElaboratedTypeLoc TL = TInfo->getTypeLoc().castAs<ElaboratedTypeLoc>();
+ TL.getNamedTypeLoc().castAs<TypeSpecTypeLoc>().setNameLoc(IdLoc);
+ TL.setElaboratedKeywordLoc(SourceLocation());
+ TL.setQualifierLoc(SS.getWithLocInContext(Context));
}
}
@@ -4375,18 +4667,25 @@ Sema::BuildMemberInitializer(ValueDecl *Member, Expr *Init,
InitializationSequence InitSeq(*this, MemberEntity, Kind, Args);
ExprResult MemberInit = InitSeq.Perform(*this, MemberEntity, Kind, Args,
nullptr);
- if (MemberInit.isInvalid())
- return true;
-
- // C++11 [class.base.init]p7:
- // The initialization of each base and member constitutes a
- // full-expression.
- MemberInit = ActOnFinishFullExpr(MemberInit.get(), InitRange.getBegin(),
- /*DiscardedValue*/ false);
- if (MemberInit.isInvalid())
- return true;
-
- Init = MemberInit.get();
+ if (!MemberInit.isInvalid()) {
+ // C++11 [class.base.init]p7:
+ // The initialization of each base and member constitutes a
+ // full-expression.
+ MemberInit = ActOnFinishFullExpr(MemberInit.get(), InitRange.getBegin(),
+ /*DiscardedValue*/ false);
+ }
+
+ if (MemberInit.isInvalid()) {
+ // Args were sensible expressions but we couldn't initialize the member
+ // from them. Preserve them in a RecoveryExpr instead.
+ Init = CreateRecoveryExpr(InitRange.getBegin(), InitRange.getEnd(), Args,
+ Member->getType())
+ .get();
+ if (!Init)
+ return true;
+ } else {
+ Init = MemberInit.get();
+ }
}
if (DirectMember) {
@@ -4403,10 +4702,10 @@ Sema::BuildMemberInitializer(ValueDecl *Member, Expr *Init,
MemInitResult
Sema::BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init,
CXXRecordDecl *ClassDecl) {
- SourceLocation NameLoc = TInfo->getTypeLoc().getLocalSourceRange().getBegin();
+ SourceLocation NameLoc = TInfo->getTypeLoc().getSourceRange().getBegin();
if (!LangOpts.CPlusPlus11)
return Diag(NameLoc, diag::err_delegating_ctor)
- << TInfo->getTypeLoc().getLocalSourceRange();
+ << TInfo->getTypeLoc().getSourceRange();
Diag(NameLoc, diag::warn_cxx98_compat_delegating_ctor);
bool InitList = true;
@@ -4428,29 +4727,35 @@ Sema::BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init,
InitializationSequence InitSeq(*this, DelegationEntity, Kind, Args);
ExprResult DelegationInit = InitSeq.Perform(*this, DelegationEntity, Kind,
Args, nullptr);
- if (DelegationInit.isInvalid())
- return true;
-
- assert(cast<CXXConstructExpr>(DelegationInit.get())->getConstructor() &&
- "Delegating constructor with no target?");
+ if (!DelegationInit.isInvalid()) {
+ assert((DelegationInit.get()->containsErrors() ||
+ cast<CXXConstructExpr>(DelegationInit.get())->getConstructor()) &&
+ "Delegating constructor with no target?");
- // C++11 [class.base.init]p7:
- // The initialization of each base and member constitutes a
- // full-expression.
- DelegationInit = ActOnFinishFullExpr(
- DelegationInit.get(), InitRange.getBegin(), /*DiscardedValue*/ false);
- if (DelegationInit.isInvalid())
- return true;
+ // C++11 [class.base.init]p7:
+ // The initialization of each base and member constitutes a
+ // full-expression.
+ DelegationInit = ActOnFinishFullExpr(
+ DelegationInit.get(), InitRange.getBegin(), /*DiscardedValue*/ false);
+ }
- // If we are in a dependent context, template instantiation will
- // perform this type-checking again. Just save the arguments that we
- // received in a ParenListExpr.
- // FIXME: This isn't quite ideal, since our ASTs don't capture all
- // of the information that we have about the base
- // initializer. However, deconstructing the ASTs is a dicey process,
- // and this approach is far more likely to get the corner cases right.
- if (CurContext->isDependentContext())
- DelegationInit = Init;
+ if (DelegationInit.isInvalid()) {
+ DelegationInit =
+ CreateRecoveryExpr(InitRange.getBegin(), InitRange.getEnd(), Args,
+ QualType(ClassDecl->getTypeForDecl(), 0));
+ if (DelegationInit.isInvalid())
+ return true;
+ } else {
+ // If we are in a dependent context, template instantiation will
+ // perform this type-checking again. Just save the arguments that we
+ // received in a ParenListExpr.
+ // FIXME: This isn't quite ideal, since our ASTs don't capture all
+ // of the information that we have about the base
+ // initializer. However, deconstructing the ASTs is a dicey process,
+ // and this approach is far more likely to get the corner cases right.
+ if (CurContext->isDependentContext())
+ DelegationInit = Init;
+ }
return new (Context) CXXCtorInitializer(Context, TInfo, InitRange.getBegin(),
DelegationInit.getAs<Expr>(),
@@ -4461,12 +4766,11 @@ MemInitResult
Sema::BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo,
Expr *Init, CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc) {
- SourceLocation BaseLoc
- = BaseTInfo->getTypeLoc().getLocalSourceRange().getBegin();
+ SourceLocation BaseLoc = BaseTInfo->getTypeLoc().getBeginLoc();
if (!BaseType->isDependentType() && !BaseType->isRecordType())
return Diag(BaseLoc, diag::err_base_init_does_not_name_class)
- << BaseType << BaseTInfo->getTypeLoc().getLocalSourceRange();
+ << BaseType << BaseTInfo->getTypeLoc().getSourceRange();
// C++ [class.base.init]p2:
// [...] Unless the mem-initializer-id names a nonstatic data
@@ -4474,7 +4778,12 @@ Sema::BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo,
// of that class, the mem-initializer is ill-formed. A
// mem-initializer-list can initialize a base class using any
// name that denotes that base class type.
- bool Dependent = BaseType->isDependentType() || Init->isTypeDependent();
+
+ // We can store the initializers in "as-written" form and delay analysis until
+ // instantiation if the constructor is dependent. But not for dependent
+ // (broken) code in a non-template! SetCtorInitializers does not expect this.
+ bool Dependent = CurContext->isDependentContext() &&
+ (BaseType->isDependentType() || Init->isTypeDependent());
SourceRange InitRange = Init->getSourceRange();
if (EllipsisLoc.isValid()) {
@@ -4519,8 +4828,8 @@ Sema::BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo,
Dependent = true;
else
return Diag(BaseLoc, diag::err_not_direct_base_or_virtual)
- << BaseType << Context.getTypeDeclType(ClassDecl)
- << BaseTInfo->getTypeLoc().getLocalSourceRange();
+ << BaseType << Context.getTypeDeclType(ClassDecl)
+ << BaseTInfo->getTypeLoc().getSourceRange();
}
}
@@ -4561,26 +4870,30 @@ Sema::BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo,
InitRange.getEnd());
InitializationSequence InitSeq(*this, BaseEntity, Kind, Args);
ExprResult BaseInit = InitSeq.Perform(*this, BaseEntity, Kind, Args, nullptr);
- if (BaseInit.isInvalid())
- return true;
-
- // C++11 [class.base.init]p7:
- // The initialization of each base and member constitutes a
- // full-expression.
- BaseInit = ActOnFinishFullExpr(BaseInit.get(), InitRange.getBegin(),
- /*DiscardedValue*/ false);
- if (BaseInit.isInvalid())
- return true;
+ if (!BaseInit.isInvalid()) {
+ // C++11 [class.base.init]p7:
+ // The initialization of each base and member constitutes a
+ // full-expression.
+ BaseInit = ActOnFinishFullExpr(BaseInit.get(), InitRange.getBegin(),
+ /*DiscardedValue*/ false);
+ }
- // If we are in a dependent context, template instantiation will
- // perform this type-checking again. Just save the arguments that we
- // received in a ParenListExpr.
- // FIXME: This isn't quite ideal, since our ASTs don't capture all
- // of the information that we have about the base
- // initializer. However, deconstructing the ASTs is a dicey process,
- // and this approach is far more likely to get the corner cases right.
- if (CurContext->isDependentContext())
- BaseInit = Init;
+ if (BaseInit.isInvalid()) {
+ BaseInit = CreateRecoveryExpr(InitRange.getBegin(), InitRange.getEnd(),
+ Args, BaseType);
+ if (BaseInit.isInvalid())
+ return true;
+ } else {
+ // If we are in a dependent context, template instantiation will
+ // perform this type-checking again. Just save the arguments that we
+ // received in a ParenListExpr.
+ // FIXME: This isn't quite ideal, since our ASTs don't capture all
+ // of the information that we have about the base
+ // initializer. However, deconstructing the ASTs is a dicey process,
+ // and this approach is far more likely to get the corner cases right.
+ if (CurContext->isDependentContext())
+ BaseInit = Init;
+ }
return new (Context) CXXCtorInitializer(Context, BaseTInfo,
BaseSpec->isVirtual(),
@@ -4590,10 +4903,10 @@ Sema::BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo,
}
// Create a static_cast\<T&&>(expr).
-static Expr *CastForMoving(Sema &SemaRef, Expr *E, QualType T = QualType()) {
- if (T.isNull()) T = E->getType();
- QualType TargetType = SemaRef.BuildReferenceType(
- T, /*SpelledAsLValue*/false, SourceLocation(), DeclarationName());
+static Expr *CastForMoving(Sema &SemaRef, Expr *E) {
+ QualType TargetType =
+ SemaRef.BuildReferenceType(E->getType(), /*SpelledAsLValue*/ false,
+ SourceLocation(), DeclarationName());
SourceLocation ExprLoc = E->getBeginLoc();
TypeSourceInfo *TargetLoc = SemaRef.Context.getTrivialTypeSourceInfo(
TargetType, ExprLoc);
@@ -4629,8 +4942,8 @@ BuildImplicitBaseInitializer(Sema &SemaRef, CXXConstructorDecl *Constructor,
case IIK_Default: {
InitializationKind InitKind
= InitializationKind::CreateDefault(Constructor->getLocation());
- InitializationSequence InitSeq(SemaRef, InitEntity, InitKind, None);
- BaseInit = InitSeq.Perform(SemaRef, InitEntity, InitKind, None);
+ InitializationSequence InitSeq(SemaRef, InitEntity, InitKind, std::nullopt);
+ BaseInit = InitSeq.Perform(SemaRef, InitEntity, InitKind, std::nullopt);
break;
}
@@ -4794,9 +5107,9 @@ BuildImplicitMemberInitializer(Sema &SemaRef, CXXConstructorDecl *Constructor,
InitializationKind InitKind =
InitializationKind::CreateDefault(Loc);
- InitializationSequence InitSeq(SemaRef, InitEntity, InitKind, None);
+ InitializationSequence InitSeq(SemaRef, InitEntity, InitKind, std::nullopt);
ExprResult MemberInit =
- InitSeq.Perform(SemaRef, InitEntity, InitKind, None);
+ InitSeq.Perform(SemaRef, InitEntity, InitKind, std::nullopt);
MemberInit = SemaRef.MaybeCreateExprWithCleanups(MemberInit);
if (MemberInit.isInvalid())
@@ -5350,8 +5663,7 @@ static void DiagnoseBaseOrMemInitializerOrder(
return;
// Sort based on the ideal order, first in the pair.
- llvm::sort(CorrelatedInitOrder,
- [](auto &LHS, auto &RHS) { return LHS.first < RHS.first; });
+ llvm::sort(CorrelatedInitOrder, llvm::less_first());
// Introduce a new scope as SemaDiagnosticBuilder needs to be destroyed to
// emit the diagnostic before we can try adding notes.
@@ -5565,7 +5877,9 @@ Sema::MarkBaseAndMemberDestructorsReferenced(SourceLocation Location,
continue;
CXXDestructorDecl *Dtor = LookupDestructor(FieldClassDecl);
- assert(Dtor && "No dtor found for FieldClassDecl!");
+ // Dtor might still be missing, e.g because it's invalid.
+ if (!Dtor)
+ continue;
CheckDestructorAccess(Field->getLocation(), Dtor,
PDiag(diag::err_access_dtor_field)
<< Field->getDeclName()
@@ -5611,7 +5925,9 @@ Sema::MarkBaseAndMemberDestructorsReferenced(SourceLocation Location,
continue;
CXXDestructorDecl *Dtor = LookupDestructor(BaseClassDecl);
- assert(Dtor && "No dtor found for BaseClassDecl!");
+ // Dtor might still be missing, e.g because it's invalid.
+ if (!Dtor)
+ continue;
// FIXME: caret should be on the start of the class name
CheckDestructorAccess(Base.getBeginLoc(), Dtor,
@@ -5648,7 +5964,9 @@ void Sema::MarkVirtualBaseDestructorsReferenced(
continue;
CXXDestructorDecl *Dtor = LookupDestructor(BaseClassDecl);
- assert(Dtor && "No dtor found for BaseClassDecl!");
+ // Dtor might still be missing, e.g because it's invalid.
+ if (!Dtor)
+ continue;
if (CheckDestructorAccess(
ClassDecl->getLocation(), Dtor,
PDiag(diag::err_access_dtor_vbase)
@@ -5744,7 +6062,7 @@ void Sema::DiagnoseAbstractType(const CXXRecordDecl *RD) {
if (SO->second.size() != 1)
continue;
- if (!SO->second.front().Method->isPure())
+ if (!SO->second.front().Method->isPureVirtual())
continue;
if (!SeenPureMethods.insert(SO->second.front().Method).second)
@@ -5859,6 +6177,7 @@ struct CheckAbstractUsage {
if (CT != Info.AbstractType) return;
// It matched; do some magic.
+ // FIXME: These should be at most warnings. See P0929R2, CWG1640, CWG1646.
if (Sel == Sema::AbstractArrayType) {
Info.S.Diag(Ctx->getLocation(), diag::err_array_of_abstract_type)
<< T << TL.getSourceRange();
@@ -5877,19 +6196,31 @@ void AbstractUsageInfo::CheckType(const NamedDecl *D, TypeLoc TL,
}
-/// Check for invalid uses of an abstract type in a method declaration.
+/// Check for invalid uses of an abstract type in a function declaration.
static void CheckAbstractClassUsage(AbstractUsageInfo &Info,
- CXXMethodDecl *MD) {
- // No need to do the check on definitions, which require that
- // the return/param types be complete.
- if (MD->doesThisDeclarationHaveABody())
+ FunctionDecl *FD) {
+ // Only definitions are required to refer to complete and
+ // non-abstract types.
+ if (!FD->doesThisDeclarationHaveABody())
return;
// For safety's sake, just ignore it if we don't have type source
// information. This should never happen for non-implicit methods,
// but...
- if (TypeSourceInfo *TSI = MD->getTypeSourceInfo())
- Info.CheckType(MD, TSI->getTypeLoc(), Sema::AbstractNone);
+ if (TypeSourceInfo *TSI = FD->getTypeSourceInfo())
+ Info.CheckType(FD, TSI->getTypeLoc(), Sema::AbstractNone);
+}
+
+/// Check for invalid uses of an abstract type in a variable0 declaration.
+static void CheckAbstractClassUsage(AbstractUsageInfo &Info,
+ VarDecl *VD) {
+ // No need to do the check on definitions, which require that
+ // the type is complete.
+ if (VD->isThisDeclarationADefinition())
+ return;
+
+ Info.CheckType(VD, VD->getTypeSourceInfo()->getTypeLoc(),
+ Sema::AbstractVariableType);
}
/// Check for invalid uses of an abstract type within a class definition.
@@ -5898,29 +6229,32 @@ static void CheckAbstractClassUsage(AbstractUsageInfo &Info,
for (auto *D : RD->decls()) {
if (D->isImplicit()) continue;
- // Methods and method templates.
- if (isa<CXXMethodDecl>(D)) {
- CheckAbstractClassUsage(Info, cast<CXXMethodDecl>(D));
- } else if (isa<FunctionTemplateDecl>(D)) {
- FunctionDecl *FD = cast<FunctionTemplateDecl>(D)->getTemplatedDecl();
- CheckAbstractClassUsage(Info, cast<CXXMethodDecl>(FD));
+ // Step through friends to the befriended declaration.
+ if (auto *FD = dyn_cast<FriendDecl>(D)) {
+ D = FD->getFriendDecl();
+ if (!D) continue;
+ }
+
+ // Functions and function templates.
+ if (auto *FD = dyn_cast<FunctionDecl>(D)) {
+ CheckAbstractClassUsage(Info, FD);
+ } else if (auto *FTD = dyn_cast<FunctionTemplateDecl>(D)) {
+ CheckAbstractClassUsage(Info, FTD->getTemplatedDecl());
// Fields and static variables.
- } else if (isa<FieldDecl>(D)) {
- FieldDecl *FD = cast<FieldDecl>(D);
+ } else if (auto *FD = dyn_cast<FieldDecl>(D)) {
if (TypeSourceInfo *TSI = FD->getTypeSourceInfo())
Info.CheckType(FD, TSI->getTypeLoc(), Sema::AbstractFieldType);
- } else if (isa<VarDecl>(D)) {
- VarDecl *VD = cast<VarDecl>(D);
- if (TypeSourceInfo *TSI = VD->getTypeSourceInfo())
- Info.CheckType(VD, TSI->getTypeLoc(), Sema::AbstractVariableType);
+ } else if (auto *VD = dyn_cast<VarDecl>(D)) {
+ CheckAbstractClassUsage(Info, VD);
+ } else if (auto *VTD = dyn_cast<VarTemplateDecl>(D)) {
+ CheckAbstractClassUsage(Info, VTD->getTemplatedDecl());
// Nested classes and class templates.
- } else if (isa<CXXRecordDecl>(D)) {
- CheckAbstractClassUsage(Info, cast<CXXRecordDecl>(D));
- } else if (isa<ClassTemplateDecl>(D)) {
- CheckAbstractClassUsage(Info,
- cast<ClassTemplateDecl>(D)->getTemplatedDecl());
+ } else if (auto *RD = dyn_cast<CXXRecordDecl>(D)) {
+ CheckAbstractClassUsage(Info, RD);
+ } else if (auto *CTD = dyn_cast<ClassTemplateDecl>(D)) {
+ CheckAbstractClassUsage(Info, CTD->getTemplatedDecl());
}
}
}
@@ -5960,11 +6294,14 @@ static void ReferenceDllExportedMembers(Sema &S, CXXRecordDecl *Class) {
S.MarkVTableUsed(Class->getLocation(), Class, true);
for (Decl *Member : Class->decls()) {
+ // Skip members that were not marked exported.
+ if (!Member->hasAttr<DLLExportAttr>())
+ continue;
+
// Defined static variables that are members of an exported base
// class must be marked export too.
auto *VD = dyn_cast<VarDecl>(Member);
- if (VD && Member->getAttr<DLLExportAttr>() &&
- VD->getStorageClass() == SC_Static &&
+ if (VD && VD->getStorageClass() == SC_Static &&
TSK == TSK_ImplicitInstantiation)
S.MarkVariableReferenced(VD->getLocation(), VD);
@@ -5972,40 +6309,47 @@ static void ReferenceDllExportedMembers(Sema &S, CXXRecordDecl *Class) {
if (!MD)
continue;
- if (Member->getAttr<DLLExportAttr>()) {
- if (MD->isUserProvided()) {
- // Instantiate non-default class member functions ...
+ if (MD->isUserProvided()) {
+ // Instantiate non-default class member functions ...
- // .. except for certain kinds of template specializations.
- if (TSK == TSK_ImplicitInstantiation && !ClassAttr->isInherited())
- continue;
+ // .. except for certain kinds of template specializations.
+ if (TSK == TSK_ImplicitInstantiation && !ClassAttr->isInherited())
+ continue;
+
+ // If this is an MS ABI dllexport default constructor, instantiate any
+ // default arguments.
+ if (S.Context.getTargetInfo().getCXXABI().isMicrosoft()) {
+ auto *CD = dyn_cast<CXXConstructorDecl>(MD);
+ if (CD && CD->isDefaultConstructor() && TSK == TSK_Undeclared) {
+ S.InstantiateDefaultCtorDefaultArgs(CD);
+ }
+ }
- S.MarkFunctionReferenced(Class->getLocation(), MD);
+ S.MarkFunctionReferenced(Class->getLocation(), MD);
- // The function will be passed to the consumer when its definition is
- // encountered.
- } else if (MD->isExplicitlyDefaulted()) {
- // Synthesize and instantiate explicitly defaulted methods.
- S.MarkFunctionReferenced(Class->getLocation(), MD);
+ // The function will be passed to the consumer when its definition is
+ // encountered.
+ } else if (MD->isExplicitlyDefaulted()) {
+ // Synthesize and instantiate explicitly defaulted methods.
+ S.MarkFunctionReferenced(Class->getLocation(), MD);
- if (TSK != TSK_ExplicitInstantiationDefinition) {
- // Except for explicit instantiation defs, we will not see the
- // definition again later, so pass it to the consumer now.
- S.Consumer.HandleTopLevelDecl(DeclGroupRef(MD));
- }
- } else if (!MD->isTrivial() ||
- MD->isCopyAssignmentOperator() ||
- MD->isMoveAssignmentOperator()) {
- // Synthesize and instantiate non-trivial implicit methods, and the copy
- // and move assignment operators. The latter are exported even if they
- // are trivial, because the address of an operator can be taken and
- // should compare equal across libraries.
- S.MarkFunctionReferenced(Class->getLocation(), MD);
-
- // There is no later point when we will see the definition of this
- // function, so pass it to the consumer now.
+ if (TSK != TSK_ExplicitInstantiationDefinition) {
+ // Except for explicit instantiation defs, we will not see the
+ // definition again later, so pass it to the consumer now.
S.Consumer.HandleTopLevelDecl(DeclGroupRef(MD));
}
+ } else if (!MD->isTrivial() ||
+ MD->isCopyAssignmentOperator() ||
+ MD->isMoveAssignmentOperator()) {
+ // Synthesize and instantiate non-trivial implicit methods, and the copy
+ // and move assignment operators. The latter are exported even if they
+ // are trivial, because the address of an operator can be taken and
+ // should compare equal across libraries.
+ S.MarkFunctionReferenced(Class->getLocation(), MD);
+
+ // There is no later point when we will see the definition of this
+ // function, so pass it to the consumer now.
+ S.Consumer.HandleTopLevelDecl(DeclGroupRef(MD));
}
}
}
@@ -6194,6 +6538,17 @@ void Sema::checkClassLevelDLLAttribute(CXXRecordDecl *Class) {
if (!ClassAttr)
return;
+ // MSVC allows imported or exported template classes that have UniqueExternal
+ // linkage. This occurs when the template class has been instantiated with
+ // a template parameter which itself has internal linkage.
+ // We drop the attribute to avoid exporting or importing any members.
+ if ((Context.getTargetInfo().getCXXABI().isMicrosoft() ||
+ Context.getTargetInfo().getTriple().isPS()) &&
+ (!Class->isExternallyVisible() && Class->hasExternalFormalLinkage())) {
+ Class->dropAttrs<DLLExportAttr, DLLImportAttr>();
+ return;
+ }
+
if (!Class->isExternallyVisible()) {
Diag(Class->getLocation(), diag::err_attribute_dll_not_extern)
<< Class << ClassAttr;
@@ -6507,7 +6862,7 @@ static bool canPassInRegisters(Sema &S, CXXRecordDecl *D,
bool CopyCtorIsTrivial = false, CopyCtorIsTrivialForCall = false;
bool DtorIsTrivialForCall = false;
- // If a class has at least one non-deleted, trivial copy constructor, it
+ // If a class has at least one eligible, trivial copy constructor, it
// is passed according to the C ABI. Otherwise, it is passed indirectly.
//
// Note: This permits classes with non-trivial copy or move ctors to be
@@ -6522,7 +6877,8 @@ static bool canPassInRegisters(Sema &S, CXXRecordDecl *D,
}
} else {
for (const CXXConstructorDecl *CD : D->ctors()) {
- if (CD->isCopyConstructor() && !CD->isDeleted()) {
+ if (CD->isCopyConstructor() && !CD->isDeleted() &&
+ !CD->isIneligibleOrNotSelected()) {
if (CD->isTrivial())
CopyCtorIsTrivial = true;
if (CD->isTrivialForCall())
@@ -6588,7 +6944,7 @@ static bool canPassInRegisters(Sema &S, CXXRecordDecl *D,
return false;
for (const CXXMethodDecl *MD : D->methods()) {
- if (MD->isDeleted())
+ if (MD->isDeleted() || MD->isIneligibleOrNotSelected())
continue;
auto *CD = dyn_cast<CXXConstructorDecl>(MD);
@@ -6656,7 +7012,7 @@ void Sema::CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record) {
(F->getType().isConstQualified() && F->getType()->isScalarType())) {
if (!Complained) {
Diag(Record->getLocation(), diag::warn_no_constructor_for_refconst)
- << Record->getTagKind() << Record;
+ << llvm::to_underlying(Record->getTagKind()) << Record;
Complained = true;
}
@@ -6842,7 +7198,8 @@ void Sema::CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record) {
// Define defaulted constexpr virtual functions that override a base class
// function right away.
// FIXME: We can defer doing this until the vtable is marked as used.
- if (M->isDefaulted() && M->isConstexpr() && M->size_overridden_methods())
+ if (CSM != CXXInvalid && !M->isDeleted() && M->isDefaulted() &&
+ M->isConstexpr() && M->size_overridden_methods())
DefineDefaultedFunction(*this, M, M->getLocation());
if (!Incomplete)
@@ -6925,11 +7282,12 @@ void Sema::CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record) {
bool CanPass = canPassInRegisters(*this, Record, CCK);
// Do not change ArgPassingRestrictions if it has already been set to
- // APK_CanNeverPassInRegs.
- if (Record->getArgPassingRestrictions() != RecordDecl::APK_CanNeverPassInRegs)
- Record->setArgPassingRestrictions(CanPass
- ? RecordDecl::APK_CanPassInRegs
- : RecordDecl::APK_CannotPassInRegs);
+ // ArgPassingKind::CanNeverPassInRegs.
+ if (Record->getArgPassingRestrictions() !=
+ RecordArgPassingKind::CanNeverPassInRegs)
+ Record->setArgPassingRestrictions(
+ CanPass ? RecordArgPassingKind::CanPassInRegs
+ : RecordArgPassingKind::CannotPassInRegs);
// If canPassInRegisters returns true despite the record having a non-trivial
// destructor, the record is destructed in the callee. This happens only when
@@ -7073,6 +7431,11 @@ specialMemberIsConstexpr(Sema &S, CXXRecordDecl *ClassDecl,
bool ConstRHS,
CXXConstructorDecl *InheritedCtor = nullptr,
Sema::InheritedConstructorInfo *Inherited = nullptr) {
+ // Suppress duplicate constraint checking here, in case a constraint check
+ // caused us to decide to do this. Any truely recursive checks will get
+ // caught during these checks anyway.
+ Sema::SatisfactionStackResetRAII SSRAII{S};
+
// If we're inheriting a constructor, see if we need to call it for this base
// class.
if (InheritedCtor) {
@@ -7169,8 +7532,8 @@ static bool defaultedSpecialMemberIsConstexpr(
// class is a constexpr function, and
for (const auto &B : ClassDecl->bases()) {
const RecordType *BaseType = B.getType()->getAs<RecordType>();
- if (!BaseType) continue;
-
+ if (!BaseType)
+ continue;
CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getDecl());
if (!specialMemberIsConstexpr(S, BaseClassDecl, CSM, 0, ConstArg,
InheritedCtor, Inherited))
@@ -7297,13 +7660,15 @@ void Sema::CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *FD) {
if (DefKind.isSpecialMember()
? CheckExplicitlyDefaultedSpecialMember(cast<CXXMethodDecl>(FD),
- DefKind.asSpecialMember())
+ DefKind.asSpecialMember(),
+ FD->getDefaultLoc())
: CheckExplicitlyDefaultedComparison(S, FD, DefKind.asComparison()))
FD->setInvalidDecl();
}
bool Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
- CXXSpecialMember CSM) {
+ CXXSpecialMember CSM,
+ SourceLocation DefaultLoc) {
CXXRecordDecl *RD = MD->getParent();
assert(MD->isExplicitlyDefaulted() && CSM != CXXInvalid &&
@@ -7334,7 +7699,7 @@ bool Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
unsigned ExpectedParams = 1;
if (CSM == CXXDefaultConstructor || CSM == CXXDestructor)
ExpectedParams = 0;
- if (MD->getNumParams() != ExpectedParams) {
+ if (MD->getNumExplicitParams() != ExpectedParams) {
// This checks for default arguments: a copy or move constructor with a
// default argument is classified as a default constructor, and assignment
// operations and destructors can't have default arguments.
@@ -7351,7 +7716,7 @@ bool Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
}
}
- const FunctionProtoType *Type = MD->getType()->getAs<FunctionProtoType>();
+ const FunctionProtoType *Type = MD->getType()->castAs<FunctionProtoType>();
bool CanHaveConstParam = false;
if (CSM == CXXCopyConstructor)
@@ -7363,9 +7728,13 @@ bool Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
if (CSM == CXXCopyAssignment || CSM == CXXMoveAssignment) {
// Check for return type matching.
ReturnType = Type->getReturnType();
+ QualType ThisType = MD->getFunctionObjectParameterType();
QualType DeclType = Context.getTypeDeclType(RD);
- DeclType = Context.getAddrSpaceQualType(DeclType, MD->getMethodQualifiers().getAddressSpace());
+ DeclType = Context.getElaboratedType(ElaboratedTypeKeyword::None, nullptr,
+ DeclType, nullptr);
+ DeclType = Context.getAddrSpaceQualType(
+ DeclType, ThisType.getQualifiers().getAddressSpace());
QualType ExpectedReturnType = Context.getLValueReferenceType(DeclType);
if (!Context.hasSameType(ReturnType, ExpectedReturnType)) {
@@ -7375,7 +7744,7 @@ bool Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
}
// A defaulted special member cannot have cv-qualifiers.
- if (Type->getMethodQuals().hasConst() || Type->getMethodQuals().hasVolatile()) {
+ if (ThisType.isConstQualified() || ThisType.isVolatileQualified()) {
if (DeleteOnTypeMismatch)
ShouldDeleteForTypeMismatch = true;
else {
@@ -7384,10 +7753,31 @@ bool Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
HadError = true;
}
}
+ // [C++23][dcl.fct.def.default]/p2.2
+ // if F2 has an implicit object parameter of type “reference to C”,
+ // F1 may be an explicit object member function whose explicit object
+ // parameter is of (possibly different) type “reference to C”,
+ // in which case the type of F1 would differ from the type of F2
+ // in that the type of F1 has an additional parameter;
+ if (!Context.hasSameType(
+ ThisType.getNonReferenceType().getUnqualifiedType(),
+ Context.getRecordType(RD))) {
+ if (DeleteOnTypeMismatch)
+ ShouldDeleteForTypeMismatch = true;
+ else {
+ Diag(MD->getLocation(),
+ diag::err_defaulted_special_member_explicit_object_mismatch)
+ << (CSM == CXXMoveAssignment) << RD << MD->getSourceRange();
+ HadError = true;
+ }
+ }
}
// Check for parameter type matching.
- QualType ArgType = ExpectedParams ? Type->getParamType(0) : QualType();
+ QualType ArgType =
+ ExpectedParams
+ ? Type->getParamType(MD->isExplicitObjectMemberFunction() ? 1 : 0)
+ : QualType();
bool HasConstParam = false;
if (ExpectedParams && ArgType->isReferenceType()) {
// Argument must be reference to possibly-const T.
@@ -7439,15 +7829,33 @@ bool Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
// FIXME: This should not apply if the member is deleted.
bool Constexpr = defaultedSpecialMemberIsConstexpr(*this, RD, CSM,
HasConstParam);
+
+ // C++14 [dcl.constexpr]p6 (CWG DR647/CWG DR1358):
+ // If the instantiated template specialization of a constexpr function
+ // template or member function of a class template would fail to satisfy
+ // the requirements for a constexpr function or constexpr constructor, that
+ // specialization is still a constexpr function or constexpr constructor,
+ // even though a call to such a function cannot appear in a constant
+ // expression.
+ if (MD->isTemplateInstantiation() && MD->isConstexpr())
+ Constexpr = true;
+
if ((getLangOpts().CPlusPlus20 ||
(getLangOpts().CPlusPlus14 ? !isa<CXXDestructorDecl>(MD)
: isa<CXXConstructorDecl>(MD))) &&
MD->isConstexpr() && !Constexpr &&
MD->getTemplatedKind() == FunctionDecl::TK_NonTemplate) {
- Diag(MD->getBeginLoc(), MD->isConsteval()
- ? diag::err_incorrect_defaulted_consteval
- : diag::err_incorrect_defaulted_constexpr)
- << CSM;
+ if (!MD->isConsteval() && RD->getNumVBases()) {
+ Diag(MD->getBeginLoc(), diag::err_incorrect_defaulted_constexpr_with_vb)
+ << CSM;
+ for (const auto &I : RD->vbases())
+ Diag(I.getBeginLoc(), diag::note_constexpr_virtual_base_here);
+ } else {
+ Diag(MD->getBeginLoc(), MD->isConsteval()
+ ? diag::err_incorrect_defaulted_consteval
+ : diag::err_incorrect_defaulted_constexpr)
+ << CSM;
+ }
// FIXME: Explain why the special member can't be constexpr.
HadError = true;
}
@@ -7470,10 +7878,8 @@ bool Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
FunctionProtoType::ExtProtoInfo EPI = Type->getExtProtoInfo();
EPI.ExceptionSpec.Type = EST_Unevaluated;
EPI.ExceptionSpec.SourceDecl = MD;
- MD->setType(Context.getFunctionType(ReturnType,
- llvm::makeArrayRef(&ArgType,
- ExpectedParams),
- EPI));
+ MD->setType(
+ Context.getFunctionType(ReturnType, Type->getParamTypes(), EPI));
}
}
@@ -7484,8 +7890,11 @@ bool Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
Diag(MD->getLocation(), diag::warn_defaulted_method_deleted) << CSM;
if (ShouldDeleteForTypeMismatch) {
Diag(MD->getLocation(), diag::note_deleted_type_mismatch) << CSM;
- } else {
- ShouldDeleteSpecialMember(MD, CSM, nullptr, /*Diagnose*/true);
+ } else if (ShouldDeleteSpecialMember(MD, CSM, nullptr,
+ /*Diagnose*/ true) &&
+ DefaultLoc.isValid()) {
+ Diag(DefaultLoc, diag::note_replace_equals_default_to_delete)
+ << FixItHint::CreateReplacement(DefaultLoc, "delete");
}
}
if (ShouldDeleteForTypeMismatch && !HadError) {
@@ -7580,6 +7989,10 @@ protected:
// followed by the non-static data members of C
for (FieldDecl *Field : Record->fields()) {
+ // C++23 [class.bit]p2:
+ // Unnamed bit-fields are not members ...
+ if (Field->isUnnamedBitfield())
+ continue;
// Recursively expand anonymous structs.
if (Field->isAnonymousStructOrUnion()) {
if (visitSubobjects(Results, Field->getType()->getAsCXXRecordDecl(),
@@ -7743,7 +8156,8 @@ private:
OverloadCandidateSet CandidateSet(
FD->getLocation(), OverloadCandidateSet::CSK_Operator,
OverloadCandidateSet::OperatorRewriteInfo(
- OO, /*AllowRewrittenCandidates=*/!SpaceshipCandidates));
+ OO, FD->getLocation(),
+ /*AllowRewrittenCandidates=*/!SpaceshipCandidates));
/// C++2a [class.compare.default]p1 [P2002R0]:
/// [...] the defaulted function itself is never a candidate for overload
@@ -7771,9 +8185,21 @@ private:
DCK == DefaultedComparisonKind::Relational) &&
!Best->RewriteKind) {
if (Diagnose == ExplainDeleted) {
- S.Diag(Best->Function->getLocation(),
- diag::note_defaulted_comparison_not_rewritten_callee)
- << FD;
+ if (Best->Function) {
+ S.Diag(Best->Function->getLocation(),
+ diag::note_defaulted_comparison_not_rewritten_callee)
+ << FD;
+ } else {
+ assert(Best->Conversions.size() == 2 &&
+ Best->Conversions[0].isUserDefined() &&
+ "non-user-defined conversion from class to built-in "
+ "comparison");
+ S.Diag(Best->Conversions[0]
+ .UserDefined.FoundConversionFunction.getDecl()
+ ->getLocation(),
+ diag::note_defaulted_comparison_not_rewritten_conversion)
+ << FD;
+ }
}
return Result::deleted();
}
@@ -7870,7 +8296,7 @@ private:
"invalid builtin comparison");
if (NeedsDeducing) {
- Optional<ComparisonCategoryType> Cat =
+ std::optional<ComparisonCategoryType> Cat =
getComparisonCategoryForBuiltinCmp(T);
assert(Cat && "no category for builtin comparison?");
R.Category = *Cat;
@@ -7929,7 +8355,8 @@ private:
if (Diagnose == ExplainDeleted) {
S.Diag(Subobj.Loc, diag::note_defaulted_comparison_no_viable_function)
- << FD << Subobj.Kind << Subobj.Decl;
+ << FD << (OO == OO_EqualEqual || OO == OO_ExclaimEqual)
+ << Subobj.Kind << Subobj.Decl;
// For a three-way comparison, list both the candidates for the
// original operator and the candidates for the synthesized operator.
@@ -8103,7 +8530,8 @@ private:
ExprPair getCompleteObject() {
unsigned Param = 0;
ExprResult LHS;
- if (isa<CXXMethodDecl>(FD)) {
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(FD);
+ MD && MD->isImplicitObjectMemberFunction()) {
// LHS is '*this'.
LHS = S.ActOnCXXThis(Loc);
if (!LHS.isInvalid())
@@ -8157,7 +8585,7 @@ private:
if (ReturnFalse.isInvalid())
return StmtError();
- return S.ActOnIfStmt(Loc, false, Loc, nullptr,
+ return S.ActOnIfStmt(Loc, IfStatementKind::Ordinary, Loc, nullptr,
S.ActOnCondition(nullptr, Loc, NotCond.get(),
Sema::ConditionKind::Boolean),
Loc, ReturnFalse.get(), SourceLocation(), nullptr);
@@ -8312,8 +8740,8 @@ private:
return StmtError();
// if (...)
- return S.ActOnIfStmt(Loc, /*IsConstexpr=*/false, Loc, InitStmt, Cond, Loc,
- ReturnStmt.get(),
+ return S.ActOnIfStmt(Loc, IfStatementKind::Ordinary, Loc, InitStmt, Cond,
+ Loc, ReturnStmt.get(),
/*ElseLoc=*/SourceLocation(), /*Else=*/nullptr);
}
@@ -8371,9 +8799,6 @@ bool Sema::CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *FD,
DefaultedComparisonKind DCK) {
assert(DCK != DefaultedComparisonKind::None && "not a defaulted comparison");
- CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(FD->getLexicalDeclContext());
- assert(RD && "defaulted comparison is not defaulted in a class");
-
// Perform any unqualified lookups we're going to need to default this
// function.
if (S) {
@@ -8387,56 +8812,48 @@ bool Sema::CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *FD,
// C++2a [class.compare.default]p1:
// A defaulted comparison operator function for some class C shall be a
// non-template function declared in the member-specification of C that is
- // -- a non-static const member of C having one parameter of type
- // const C&, or
+ // -- a non-static const non-volatile member of C having one parameter of
+ // type const C& and either no ref-qualifier or the ref-qualifier &, or
// -- a friend of C having two parameters of type const C& or two
// parameters of type C.
- QualType ExpectedParmType1 = Context.getRecordType(RD);
- QualType ExpectedParmType2 =
- Context.getLValueReferenceType(ExpectedParmType1.withConst());
- if (isa<CXXMethodDecl>(FD))
- ExpectedParmType1 = ExpectedParmType2;
- for (const ParmVarDecl *Param : FD->parameters()) {
- if (!Param->getType()->isDependentType() &&
- !Context.hasSameType(Param->getType(), ExpectedParmType1) &&
- !Context.hasSameType(Param->getType(), ExpectedParmType2)) {
- // Don't diagnose an implicit 'operator=='; we will have diagnosed the
- // corresponding defaulted 'operator<=>' already.
- if (!FD->isImplicit()) {
- Diag(FD->getLocation(), diag::err_defaulted_comparison_param)
- << (int)DCK << Param->getType() << ExpectedParmType1
- << !isa<CXXMethodDecl>(FD)
- << ExpectedParmType2 << Param->getSourceRange();
- }
- return true;
- }
- }
- if (FD->getNumParams() == 2 &&
- !Context.hasSameType(FD->getParamDecl(0)->getType(),
- FD->getParamDecl(1)->getType())) {
- if (!FD->isImplicit()) {
- Diag(FD->getLocation(), diag::err_defaulted_comparison_param_mismatch)
- << (int)DCK
- << FD->getParamDecl(0)->getType()
- << FD->getParamDecl(0)->getSourceRange()
- << FD->getParamDecl(1)->getType()
- << FD->getParamDecl(1)->getSourceRange();
- }
- return true;
- }
- // ... non-static const member ...
- if (auto *MD = dyn_cast<CXXMethodDecl>(FD)) {
+ CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(FD->getLexicalDeclContext());
+ bool IsMethod = isa<CXXMethodDecl>(FD);
+ if (IsMethod) {
+ auto *MD = cast<CXXMethodDecl>(FD);
assert(!MD->isStatic() && "comparison function cannot be a static member");
- if (!MD->isConst()) {
- SourceLocation InsertLoc;
- if (FunctionTypeLoc Loc = MD->getFunctionTypeLoc())
- InsertLoc = getLocForEndOfToken(Loc.getRParenLoc());
+
+ if (MD->getRefQualifier() == RQ_RValue) {
+ Diag(MD->getLocation(), diag::err_ref_qualifier_comparison_operator);
+
+ // Remove the ref qualifier to recover.
+ const auto *FPT = MD->getType()->castAs<FunctionProtoType>();
+ FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
+ EPI.RefQualifier = RQ_None;
+ MD->setType(Context.getFunctionType(FPT->getReturnType(),
+ FPT->getParamTypes(), EPI));
+ }
+
+ // If we're out-of-class, this is the class we're comparing.
+ if (!RD)
+ RD = MD->getParent();
+ QualType T = MD->getFunctionObjectParameterType();
+ if (!T.isConstQualified()) {
+ SourceLocation Loc, InsertLoc;
+ if (MD->isExplicitObjectMemberFunction()) {
+ Loc = MD->getParamDecl(0)->getBeginLoc();
+ InsertLoc = getLocForEndOfToken(
+ MD->getParamDecl(0)->getExplicitObjectParamThisLoc());
+ } else {
+ Loc = MD->getLocation();
+ if (FunctionTypeLoc Loc = MD->getFunctionTypeLoc())
+ InsertLoc = Loc.getRParenLoc();
+ }
// Don't diagnose an implicit 'operator=='; we will have diagnosed the
// corresponding defaulted 'operator<=>' already.
if (!MD->isImplicit()) {
- Diag(MD->getLocation(), diag::err_defaulted_comparison_non_const)
- << (int)DCK << FixItHint::CreateInsertion(InsertLoc, " const");
+ Diag(Loc, diag::err_defaulted_comparison_non_const)
+ << (int)DCK << FixItHint::CreateInsertion(InsertLoc, " const");
}
// Add the 'const' to the type to recover.
@@ -8446,9 +8863,112 @@ bool Sema::CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *FD,
MD->setType(Context.getFunctionType(FPT->getReturnType(),
FPT->getParamTypes(), EPI));
}
- } else {
- // A non-member function declared in a class must be a friend.
+
+ if (MD->isVolatile()) {
+ Diag(MD->getLocation(), diag::err_volatile_comparison_operator);
+
+ // Remove the 'volatile' from the type to recover.
+ const auto *FPT = MD->getType()->castAs<FunctionProtoType>();
+ FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
+ EPI.TypeQuals.removeVolatile();
+ MD->setType(Context.getFunctionType(FPT->getReturnType(),
+ FPT->getParamTypes(), EPI));
+ }
+ }
+
+ if ((FD->getNumParams() -
+ (unsigned)FD->hasCXXExplicitFunctionObjectParameter()) !=
+ (IsMethod ? 1 : 2)) {
+ // Let's not worry about using a variadic template pack here -- who would do
+ // such a thing?
+ Diag(FD->getLocation(), diag::err_defaulted_comparison_num_args)
+ << int(IsMethod) << int(DCK);
+ return true;
+ }
+
+ const ParmVarDecl *KnownParm = nullptr;
+ for (const ParmVarDecl *Param : FD->parameters()) {
+ if (Param->isExplicitObjectParameter())
+ continue;
+ QualType ParmTy = Param->getType();
+
+ if (!KnownParm) {
+ auto CTy = ParmTy;
+ // Is it `T const &`?
+ bool Ok = !IsMethod;
+ QualType ExpectedTy;
+ if (RD)
+ ExpectedTy = Context.getRecordType(RD);
+ if (auto *Ref = CTy->getAs<ReferenceType>()) {
+ CTy = Ref->getPointeeType();
+ if (RD)
+ ExpectedTy.addConst();
+ Ok = true;
+ }
+
+ // Is T a class?
+ if (!Ok) {
+ } else if (RD) {
+ if (!RD->isDependentType() && !Context.hasSameType(CTy, ExpectedTy))
+ Ok = false;
+ } else if (auto *CRD = CTy->getAsRecordDecl()) {
+ RD = cast<CXXRecordDecl>(CRD);
+ } else {
+ Ok = false;
+ }
+
+ if (Ok) {
+ KnownParm = Param;
+ } else {
+ // Don't diagnose an implicit 'operator=='; we will have diagnosed the
+ // corresponding defaulted 'operator<=>' already.
+ if (!FD->isImplicit()) {
+ if (RD) {
+ QualType PlainTy = Context.getRecordType(RD);
+ QualType RefTy =
+ Context.getLValueReferenceType(PlainTy.withConst());
+ Diag(FD->getLocation(), diag::err_defaulted_comparison_param)
+ << int(DCK) << ParmTy << RefTy << int(!IsMethod) << PlainTy
+ << Param->getSourceRange();
+ } else {
+ assert(!IsMethod && "should know expected type for method");
+ Diag(FD->getLocation(),
+ diag::err_defaulted_comparison_param_unknown)
+ << int(DCK) << ParmTy << Param->getSourceRange();
+ }
+ }
+ return true;
+ }
+ } else if (!Context.hasSameType(KnownParm->getType(), ParmTy)) {
+ Diag(FD->getLocation(), diag::err_defaulted_comparison_param_mismatch)
+ << int(DCK) << KnownParm->getType() << KnownParm->getSourceRange()
+ << ParmTy << Param->getSourceRange();
+ return true;
+ }
+ }
+
+ assert(RD && "must have determined class");
+ if (IsMethod) {
+ } else if (isa<CXXRecordDecl>(FD->getLexicalDeclContext())) {
+ // In-class, must be a friend decl.
assert(FD->getFriendObjectKind() && "expected a friend declaration");
+ } else {
+ // Out of class, require the defaulted comparison to be a friend (of a
+ // complete type).
+ if (RequireCompleteType(FD->getLocation(), Context.getRecordType(RD),
+ diag::err_defaulted_comparison_not_friend, int(DCK),
+ int(1)))
+ return true;
+
+ if (llvm::none_of(RD->friends(), [&](const FriendDecl *F) {
+ return FD->getCanonicalDecl() ==
+ F->getFriendDecl()->getCanonicalDecl();
+ })) {
+ Diag(FD->getLocation(), diag::err_defaulted_comparison_not_friend)
+ << int(DCK) << int(0) << RD;
+ Diag(RD->getCanonicalDecl()->getLocation(), diag::note_declared_at);
+ return true;
+ }
}
// C++2a [class.eq]p1, [class.rel]p1:
@@ -8465,10 +8985,11 @@ bool Sema::CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *FD,
// C++2a [class.spaceship]p2 [P2002R0]:
// Let R be the declared return type [...]. If R is auto, [...]. Otherwise,
// R shall not contain a placeholder type.
- if (DCK == DefaultedComparisonKind::ThreeWay &&
- FD->getDeclaredReturnType()->getContainedDeducedType() &&
- !Context.hasSameType(FD->getDeclaredReturnType(),
- Context.getAutoDeductType())) {
+ if (QualType RT = FD->getDeclaredReturnType();
+ DCK == DefaultedComparisonKind::ThreeWay &&
+ RT->getContainedDeducedType() &&
+ (!Context.hasSameType(RT, Context.getAutoDeductType()) ||
+ RT->getContainedAutoType()->isConstrained())) {
Diag(FD->getLocation(),
diag::err_defaulted_comparison_deduced_return_type_not_auto)
<< (int)DCK << FD->getDeclaredReturnType() << Context.AutoDeductTy
@@ -8487,10 +9008,8 @@ bool Sema::CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *FD,
bool First = FD == FD->getCanonicalDecl();
- // If we want to delete the function, then do so; there's nothing else to
- // check in that case.
- if (Info.Deleted) {
- if (!First) {
+ if (!First) {
+ if (Info.Deleted) {
// C++11 [dcl.fct.def.default]p4:
// [For a] user-provided explicitly-defaulted function [...] if such a
// function is implicitly defined as deleted, the program is ill-formed.
@@ -8504,7 +9023,21 @@ bool Sema::CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *FD,
.visit();
return true;
}
+ if (isa<CXXRecordDecl>(FD->getLexicalDeclContext())) {
+ // C++20 [class.compare.default]p1:
+ // [...] A definition of a comparison operator as defaulted that appears
+ // in a class shall be the first declaration of that function.
+ Diag(FD->getLocation(), diag::err_non_first_default_compare_in_class)
+ << (int)DCK;
+ Diag(FD->getCanonicalDecl()->getLocation(),
+ diag::note_previous_declaration);
+ return true;
+ }
+ }
+ // If we want to delete the function, then do so; there's nothing else to
+ // check in that case.
+ if (Info.Deleted) {
SetDeclDeleted(FD, FD->getLocation());
if (!inTemplateInstantiation() && !FD->isImplicit()) {
Diag(FD->getLocation(), diag::warn_defaulted_comparison_deleted)
@@ -8512,6 +9045,9 @@ bool Sema::CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *FD,
DefaultedComparisonAnalyzer(*this, RD, FD, DCK,
DefaultedComparisonAnalyzer::ExplainDeleted)
.visit();
+ if (FD->getDefaultLoc().isValid())
+ Diag(FD->getDefaultLoc(), diag::note_replace_equals_default_to_delete)
+ << FixItHint::CreateReplacement(FD->getDefaultLoc(), "delete");
}
return false;
}
@@ -8541,12 +9077,25 @@ bool Sema::CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *FD,
// the requirements for a constexpr function [...]
// The only relevant requirements are that the parameter and return types are
// literal types. The remaining conditions are checked by the analyzer.
+ //
+ // We support P2448R2 in language modes earlier than C++23 as an extension.
+ // The concept of constexpr-compatible was removed.
+ // C++23 [dcl.fct.def.default]p3 [P2448R2]
+ // A function explicitly defaulted on its first declaration is implicitly
+ // inline, and is implicitly constexpr if it is constexpr-suitable.
+ // C++23 [dcl.constexpr]p3
+ // A function is constexpr-suitable if
+ // - it is not a coroutine, and
+ // - if the function is a constructor or destructor, its class does not
+ // have any virtual base classes.
if (FD->isConstexpr()) {
if (CheckConstexprReturnType(*this, FD, CheckConstexprKind::Diagnose) &&
CheckConstexprParameterTypes(*this, FD, CheckConstexprKind::Diagnose) &&
!Info.Constexpr) {
Diag(FD->getBeginLoc(),
- diag::err_incorrect_defaulted_comparison_constexpr)
+ getLangOpts().CPlusPlus23
+ ? diag::warn_cxx23_compat_defaulted_comparison_constexpr_mismatch
+ : diag::ext_defaulted_comparison_constexpr_mismatch)
<< FD->isImplicit() << (int)DCK << FD->isConsteval();
DefaultedComparisonAnalyzer(*this, RD, FD, DCK,
DefaultedComparisonAnalyzer::ExplainConstexpr)
@@ -8606,7 +9155,10 @@ void Sema::DefineDefaultedComparison(SourceLocation UseLoc, FunctionDecl *FD,
{
// Build and set up the function body.
- CXXRecordDecl *RD = cast<CXXRecordDecl>(FD->getLexicalParent());
+ // The first parameter has type maybe-ref-to maybe-const T, use that to get
+ // the type of the class being compared.
+ auto PT = FD->getParamDecl(0)->getType();
+ CXXRecordDecl *RD = PT.getNonReferenceType()->getAsCXXRecordDecl();
SourceLocation BodyLoc =
FD->getEndLoc().isValid() ? FD->getEndLoc() : FD->getLocation();
StmtResult Body =
@@ -8721,9 +9273,9 @@ struct SpecialMemberVisitor {
llvm_unreachable("invalid special member kind");
}
- if (MD->getNumParams()) {
+ if (MD->getNumExplicitParams()) {
if (const ReferenceType *RT =
- MD->getParamDecl(0)->getType()->getAs<ReferenceType>())
+ MD->getNonObjectParameter(0)->getType()->getAs<ReferenceType>())
ConstArg = RT->getPointeeType().isConstQualified();
}
}
@@ -8891,7 +9443,18 @@ bool SpecialMemberDeletionInfo::shouldDeleteForSubobjectCall(
// must be accessible and non-deleted, but need not be trivial. Such a
// destructor is never actually called, but is semantically checked as
// if it were.
- DiagKind = 4;
+ if (CSM == Sema::CXXDefaultConstructor) {
+ // [class.default.ctor]p2:
+ // A defaulted default constructor for class X is defined as deleted if
+ // - X is a union that has a variant member with a non-trivial default
+ // constructor and no variant member of X has a default member
+ // initializer
+ const auto *RD = cast<CXXRecordDecl>(Field->getParent());
+ if (!RD->hasInClassInitializer())
+ DiagKind = 4;
+ } else {
+ DiagKind = 4;
+ }
}
if (DiagKind == -1)
@@ -9032,13 +9595,12 @@ bool SpecialMemberDeletionInfo::shouldDeleteForField(FieldDecl *FD) {
<< !!ICI << MD->getParent() << FD << FieldType << /*Reference*/0;
return true;
}
- // C++11 [class.ctor]p5: any non-variant non-static data member of
- // const-qualified type (or array thereof) with no
- // brace-or-equal-initializer does not have a user-provided default
- // constructor.
+ // C++11 [class.ctor]p5 (modified by DR2394): any non-variant non-static
+ // data member of const-qualified type (or array thereof) with no
+ // brace-or-equal-initializer is not const-default-constructible.
if (!inUnion() && FieldType.isConstQualified() &&
!FD->hasInClassInitializer() &&
- (!FieldRecord || !FieldRecord->hasUserProvidedDefaultConstructor())) {
+ (!FieldRecord || !FieldRecord->allowConstDefaultInit())) {
if (Diagnose)
S.Diag(FD->getLocation(), diag::note_deleted_default_ctor_uninit_field)
<< !!ICI << MD->getParent() << FD << FD->getType() << /*Const*/1;
@@ -9108,7 +9670,8 @@ bool SpecialMemberDeletionInfo::shouldDeleteForField(FieldDecl *FD) {
}
// Don't check the implicit member of the anonymous union type.
- // This is technically non-conformant, but sanity demands it.
+ // This is technically non-conformant but supported, and we have a
+ // diagnostic for this elsewhere.
return false;
}
@@ -9592,11 +10155,22 @@ bool Sema::SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
case CXXCopyConstructor:
case CXXCopyAssignment: {
- // Trivial copy operations always have const, non-volatile parameter types.
- ConstArg = true;
- const ParmVarDecl *Param0 = MD->getParamDecl(0);
+ const ParmVarDecl *Param0 = MD->getNonObjectParameter(0);
const ReferenceType *RT = Param0->getType()->getAs<ReferenceType>();
- if (!RT || RT->getPointeeType().getCVRQualifiers() != Qualifiers::Const) {
+
+ // When ClangABICompat14 is true, CXX copy constructors will only be trivial
+ // if they are not user-provided and their parameter-type-list is equivalent
+ // to the parameter-type-list of an implicit declaration. This maintains the
+ // behavior before dr2171 was implemented.
+ //
+ // Otherwise, if ClangABICompat14 is false, All copy constructors can be
+ // trivial, if they are not user-provided, regardless of the qualifiers on
+ // the reference type.
+ const bool ClangABICompat14 = Context.getLangOpts().getClangABICompat() <=
+ LangOptions::ClangABI::Ver14;
+ if (!RT ||
+ ((RT->getPointeeType().getCVRQualifiers() != Qualifiers::Const) &&
+ ClangABICompat14)) {
if (Diagnose)
Diag(Param0->getLocation(), diag::note_nontrivial_param_type)
<< Param0->getSourceRange() << Param0->getType()
@@ -9604,13 +10178,15 @@ bool Sema::SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
Context.getRecordType(RD).withConst());
return false;
}
+
+ ConstArg = RT->getPointeeType().isConstQualified();
break;
}
case CXXMoveConstructor:
case CXXMoveAssignment: {
// Trivial move operations always have non-cv-qualified parameters.
- const ParmVarDecl *Param0 = MD->getParamDecl(0);
+ const ParmVarDecl *Param0 = MD->getNonObjectParameter(0);
const RValueReferenceType *RT =
Param0->getType()->getAs<RValueReferenceType>();
if (!RT || RT->getPointeeType().getCVRQualifiers()) {
@@ -9778,7 +10354,7 @@ public:
};
} // end anonymous namespace
-/// Add the most overriden methods from MD to Methods
+/// Add the most overridden methods from MD to Methods
static void AddMostOverridenMethods(const CXXMethodDecl *MD,
llvm::SmallPtrSetImpl<const CXXMethodDecl *>& Methods) {
if (MD->size_overridden_methods() == 0)
@@ -9937,10 +10513,12 @@ void Sema::ActOnFinishCXXMemberSpecification(
Diag(AL.getLoc(), diag::warn_attribute_after_definition_ignored) << AL;
}
- ActOnFields(S, RLoc, TagDecl, llvm::makeArrayRef(
- // strict aliasing violation!
- reinterpret_cast<Decl**>(FieldCollector->getCurFields()),
- FieldCollector->getCurNumFields()), LBrac, RBrac, AttrList);
+ ActOnFields(S, RLoc, TagDecl,
+ llvm::ArrayRef(
+ // strict aliasing violation!
+ reinterpret_cast<Decl **>(FieldCollector->getCurFields()),
+ FieldCollector->getCurNumFields()),
+ LBrac, RBrac, AttrList);
CheckCompletedCXXClass(S, cast<CXXRecordDecl>(TagDecl));
}
@@ -10506,7 +11084,7 @@ QualType Sema::CheckDestructorDeclarator(Declarator &D, QualType R,
EPI.Variadic = false;
EPI.TypeQuals = Qualifiers();
EPI.RefQualifier = RQ_None;
- return Context.getFunctionType(Context.VoidTy, None, EPI);
+ return Context.getFunctionType(Context.VoidTy, std::nullopt, EPI);
}
static void extendLeft(SourceRange &R, SourceRange Before) {
@@ -10574,15 +11152,25 @@ void Sema::CheckConversionDeclarator(Declarator &D, QualType &R,
<< SourceRange(D.getIdentifierLoc()) << 0;
D.setInvalidType();
}
-
const auto *Proto = R->castAs<FunctionProtoType>();
-
// Make sure we don't have any parameters.
- if (Proto->getNumParams() > 0) {
- Diag(D.getIdentifierLoc(), diag::err_conv_function_with_params);
+ DeclaratorChunk::FunctionTypeInfo &FTI = D.getFunctionTypeInfo();
+ unsigned NumParam = Proto->getNumParams();
+
+ // [C++2b]
+ // A conversion function shall have no non-object parameters.
+ if (NumParam == 1) {
+ DeclaratorChunk::FunctionTypeInfo &FTI = D.getFunctionTypeInfo();
+ if (const auto *First =
+ dyn_cast_if_present<ParmVarDecl>(FTI.Params[0].Param);
+ First && First->isExplicitObjectParameter())
+ NumParam--;
+ }
+ if (NumParam != 0) {
+ Diag(D.getIdentifierLoc(), diag::err_conv_function_with_params);
// Delete the parameters.
- D.getFunctionTypeInfo().freeParams();
+ FTI.freeParams();
D.setInvalidType();
} else if (Proto->isVariadic()) {
Diag(D.getIdentifierLoc(), diag::err_conv_function_variadic);
@@ -10609,7 +11197,7 @@ void Sema::CheckConversionDeclarator(Declarator &D, QualType &R,
PastFunctionChunk = true;
break;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case DeclaratorChunk::Array:
NeedsTypedef = true;
extendRight(After, Chunk.getSourceRange());
@@ -10682,7 +11270,8 @@ void Sema::CheckConversionDeclarator(Declarator &D, QualType &R,
// of the errors above fired) and with the conversion type as the
// return type.
if (D.isInvalidType())
- R = Context.getFunctionType(ConvType, None, Proto->getExtProtoInfo());
+ R = Context.getFunctionType(ConvType, std::nullopt,
+ Proto->getExtProtoInfo());
// C++0x explicit conversion operators.
if (DS.hasExplicitSpecifier() && !getLangOpts().CPlusPlus20)
@@ -10732,13 +11321,110 @@ Decl *Sema::ActOnConversionDeclarator(CXXConversionDecl *Conversion) {
<< ClassType << ConvType;
}
- if (FunctionTemplateDecl *ConversionTemplate
- = Conversion->getDescribedFunctionTemplate())
+ if (FunctionTemplateDecl *ConversionTemplate =
+ Conversion->getDescribedFunctionTemplate()) {
+ if (const auto *ConvTypePtr = ConvType->getAs<PointerType>()) {
+ ConvType = ConvTypePtr->getPointeeType();
+ }
+ if (ConvType->isUndeducedAutoType()) {
+ Diag(Conversion->getTypeSpecStartLoc(), diag::err_auto_not_allowed)
+ << getReturnTypeLoc(Conversion).getSourceRange()
+ << llvm::to_underlying(ConvType->getAs<AutoType>()->getKeyword())
+ << /* in declaration of conversion function template= */ 24;
+ }
+
return ConversionTemplate;
+ }
return Conversion;
}
+void Sema::CheckExplicitObjectMemberFunction(DeclContext *DC, Declarator &D,
+ DeclarationName Name, QualType R) {
+ CheckExplicitObjectMemberFunction(D, Name, R, false, DC);
+}
+
+void Sema::CheckExplicitObjectLambda(Declarator &D) {
+ CheckExplicitObjectMemberFunction(D, {}, {}, true);
+}
+
+void Sema::CheckExplicitObjectMemberFunction(Declarator &D,
+ DeclarationName Name, QualType R,
+ bool IsLambda, DeclContext *DC) {
+ if (!D.isFunctionDeclarator())
+ return;
+
+ DeclaratorChunk::FunctionTypeInfo &FTI = D.getFunctionTypeInfo();
+ if (FTI.NumParams == 0)
+ return;
+ ParmVarDecl *ExplicitObjectParam = nullptr;
+ for (unsigned Idx = 0; Idx < FTI.NumParams; Idx++) {
+ const auto &ParamInfo = FTI.Params[Idx];
+ if (!ParamInfo.Param)
+ continue;
+ ParmVarDecl *Param = cast<ParmVarDecl>(ParamInfo.Param);
+ if (!Param->isExplicitObjectParameter())
+ continue;
+ if (Idx == 0) {
+ ExplicitObjectParam = Param;
+ continue;
+ } else {
+ Diag(Param->getLocation(),
+ diag::err_explicit_object_parameter_must_be_first)
+ << IsLambda << Param->getSourceRange();
+ }
+ }
+ if (!ExplicitObjectParam)
+ return;
+
+ if (ExplicitObjectParam->hasDefaultArg()) {
+ Diag(ExplicitObjectParam->getLocation(),
+ diag::err_explicit_object_default_arg)
+ << ExplicitObjectParam->getSourceRange();
+ }
+
+ if (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_static) {
+ Diag(ExplicitObjectParam->getBeginLoc(),
+ diag::err_explicit_object_parameter_nonmember)
+ << D.getSourceRange() << /*static=*/0 << IsLambda;
+ D.setInvalidType();
+ }
+
+ if (D.getDeclSpec().isVirtualSpecified()) {
+ Diag(ExplicitObjectParam->getBeginLoc(),
+ diag::err_explicit_object_parameter_nonmember)
+ << D.getSourceRange() << /*virtual=*/1 << IsLambda;
+ D.setInvalidType();
+ }
+
+ if (IsLambda && FTI.hasMutableQualifier()) {
+ Diag(ExplicitObjectParam->getBeginLoc(),
+ diag::err_explicit_object_parameter_mutable)
+ << D.getSourceRange();
+ }
+
+ if (IsLambda)
+ return;
+
+ if (!DC || !DC->isRecord()) {
+ Diag(ExplicitObjectParam->getLocation(),
+ diag::err_explicit_object_parameter_nonmember)
+ << D.getSourceRange() << /*non-member=*/2 << IsLambda;
+ D.setInvalidType();
+ return;
+ }
+
+ // CWG2674: constructors and destructors cannot have explicit parameters.
+ if (Name.getNameKind() == DeclarationName::CXXConstructorName ||
+ Name.getNameKind() == DeclarationName::CXXDestructorName) {
+ Diag(ExplicitObjectParam->getBeginLoc(),
+ diag::err_explicit_object_parameter_constructor)
+ << (Name.getNameKind() == DeclarationName::CXXDestructorName)
+ << D.getSourceRange();
+ D.setInvalidType();
+ }
+}
+
namespace {
/// Utility class to accumulate and print a diagnostic listing the invalid
/// specifier(s) on a declaration.
@@ -10772,8 +11458,8 @@ struct BadSpecifierDiagnoser {
/// Check the validity of a declarator that we parsed for a deduction-guide.
/// These aren't actually declarators in the grammar, so we need to check that
/// the user didn't specify any pieces that are not part of the deduction-guide
-/// grammar.
-void Sema::CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
+/// grammar. Return true on invalid deduction-guide.
+bool Sema::CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
StorageClass &SC) {
TemplateName GuidedTemplate = D.getName().TemplateName.get().get();
TemplateDecl *GuidedTemplateDecl = GuidedTemplate.getAsTemplateDecl();
@@ -10786,7 +11472,7 @@ void Sema::CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
GuidedTemplateDecl->getDeclContext()->getRedeclContext())) {
Diag(D.getIdentifierLoc(), diag::err_deduction_guide_wrong_scope)
<< GuidedTemplateDecl;
- Diag(GuidedTemplateDecl->getLocation(), diag::note_template_decl_here);
+ NoteTemplateLocation(*GuidedTemplateDecl);
}
auto &DS = D.getMutableDeclSpec();
@@ -10823,7 +11509,7 @@ void Sema::CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
}
if (D.isInvalidType())
- return;
+ return true;
// Check the declarator is simple enough.
bool FoundFunction = false;
@@ -10836,14 +11522,13 @@ void Sema::CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
<< D.getSourceRange();
break;
}
- if (!Chunk.Fun.hasTrailingReturnType()) {
- Diag(D.getName().getBeginLoc(),
- diag::err_deduction_guide_no_trailing_return_type);
- break;
- }
+ if (!Chunk.Fun.hasTrailingReturnType())
+ return Diag(D.getName().getBeginLoc(),
+ diag::err_deduction_guide_no_trailing_return_type);
// Check that the return type is written as a specialization of
// the template specified as the deduction-guide's name.
+ // The template name may not be qualified. [temp.deduct.guide]
ParsedType TrailingReturnType = Chunk.Fun.getTrailingReturnType();
TypeSourceInfo *TSI = nullptr;
QualType RetTy = GetTypeFromParser(TrailingReturnType, &TSI);
@@ -10851,11 +11536,17 @@ void Sema::CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
bool AcceptableReturnType = false;
bool MightInstantiateToSpecialization = false;
if (auto RetTST =
- TSI->getTypeLoc().getAs<TemplateSpecializationTypeLoc>()) {
+ TSI->getTypeLoc().getAsAdjusted<TemplateSpecializationTypeLoc>()) {
TemplateName SpecifiedName = RetTST.getTypePtr()->getTemplateName();
bool TemplateMatches =
Context.hasSameTemplateName(SpecifiedName, GuidedTemplate);
- if (SpecifiedName.getKind() == TemplateName::Template && TemplateMatches)
+ auto TKind = SpecifiedName.getKind();
+ // A Using TemplateName can't actually be valid (either it's qualified, or
+ // we're in the wrong scope). But we have diagnosed these problems
+ // already.
+ bool SimplyWritten = TKind == TemplateName::Template ||
+ TKind == TemplateName::UsingTemplate;
+ if (SimplyWritten && TemplateMatches)
AcceptableReturnType = true;
else {
// This could still instantiate to the right type, unless we know it
@@ -10868,13 +11559,12 @@ void Sema::CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
MightInstantiateToSpecialization = true;
}
- if (!AcceptableReturnType) {
- Diag(TSI->getTypeLoc().getBeginLoc(),
- diag::err_deduction_guide_bad_trailing_return_type)
- << GuidedTemplate << TSI->getType()
- << MightInstantiateToSpecialization
- << TSI->getTypeLoc().getSourceRange();
- }
+ if (!AcceptableReturnType)
+ return Diag(TSI->getTypeLoc().getBeginLoc(),
+ diag::err_deduction_guide_bad_trailing_return_type)
+ << GuidedTemplate << TSI->getType()
+ << MightInstantiateToSpecialization
+ << TSI->getTypeLoc().getSourceRange();
// Keep going to check that we don't have any inner declarator pieces (we
// could still have a function returning a pointer to a function).
@@ -10882,7 +11572,9 @@ void Sema::CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
}
if (D.isFunctionDefinition())
+ // we can still create a valid deduction guide here.
Diag(D.getIdentifierLoc(), diag::err_deduction_guide_defines_function);
+ return false;
}
//===----------------------------------------------------------------------===//
@@ -10897,6 +11589,11 @@ static void DiagnoseNamespaceInlineMismatch(Sema &S, SourceLocation KeywordLoc,
NamespaceDecl *PrevNS) {
assert(*IsInline != PrevNS->isInline());
+ // 'inline' must appear on the original definition, but not necessarily
+ // on all extension definitions, so the note should point to the first
+ // definition to avoid confusion.
+ PrevNS = PrevNS->getFirstDecl();
+
if (PrevNS->isInline())
// The user probably just forgot the 'inline', so suggest that it
// be added back.
@@ -10911,10 +11608,13 @@ static void DiagnoseNamespaceInlineMismatch(Sema &S, SourceLocation KeywordLoc,
/// ActOnStartNamespaceDef - This is called at the start of a namespace
/// definition.
-Decl *Sema::ActOnStartNamespaceDef(
- Scope *NamespcScope, SourceLocation InlineLoc, SourceLocation NamespaceLoc,
- SourceLocation IdentLoc, IdentifierInfo *II, SourceLocation LBrace,
- const ParsedAttributesView &AttrList, UsingDirectiveDecl *&UD) {
+Decl *Sema::ActOnStartNamespaceDef(Scope *NamespcScope,
+ SourceLocation InlineLoc,
+ SourceLocation NamespaceLoc,
+ SourceLocation IdentLoc, IdentifierInfo *II,
+ SourceLocation LBrace,
+ const ParsedAttributesView &AttrList,
+ UsingDirectiveDecl *&UD, bool IsNested) {
SourceLocation StartLoc = InlineLoc.isValid() ? InlineLoc : NamespaceLoc;
// For anonymous namespace, take the location of the left brace.
SourceLocation Loc = II ? IdentLoc : LBrace;
@@ -10926,6 +11626,20 @@ Decl *Sema::ActOnStartNamespaceDef(
NamespaceDecl *PrevNS = nullptr;
if (II) {
+ // C++ [namespace.std]p7:
+ // A translation unit shall not declare namespace std to be an inline
+ // namespace (9.8.2).
+ //
+ // Precondition: the std namespace is in the file scope and is declared to
+ // be inline
+ auto DiagnoseInlineStdNS = [&]() {
+ assert(IsInline && II->isStr("std") &&
+ CurContext->getRedeclContext()->isTranslationUnit() &&
+ "Precondition of DiagnoseInlineStdNS not met");
+ Diag(InlineLoc, diag::err_inline_namespace_std)
+ << SourceRange(InlineLoc, InlineLoc.getLocWithOffset(6));
+ IsInline = false;
+ };
// C++ [namespace.def]p2:
// The identifier in an original-namespace-definition shall not
// have been previously defined in the declarative region in
@@ -10946,7 +11660,10 @@ Decl *Sema::ActOnStartNamespaceDef(
if (PrevNS) {
// This is an extended namespace definition.
- if (IsInline != PrevNS->isInline())
+ if (IsInline && II->isStr("std") &&
+ CurContext->getRedeclContext()->isTranslationUnit())
+ DiagnoseInlineStdNS();
+ else if (IsInline != PrevNS->isInline())
DiagnoseNamespaceInlineMismatch(*this, NamespaceLoc, Loc, II,
&IsInline, PrevNS);
} else if (PrevDecl) {
@@ -10958,6 +11675,8 @@ Decl *Sema::ActOnStartNamespaceDef(
// Continue on to push Namespc as current DeclContext and return it.
} else if (II->isStr("std") &&
CurContext->getRedeclContext()->isTranslationUnit()) {
+ if (IsInline)
+ DiagnoseInlineStdNS();
// This is the first "real" definition of the namespace "std", so update
// our cache of the "std" namespace to point at this definition.
PrevNS = getStdNamespace();
@@ -10984,8 +11703,8 @@ Decl *Sema::ActOnStartNamespaceDef(
&IsInline, PrevNS);
}
- NamespaceDecl *Namespc = NamespaceDecl::Create(Context, CurContext, IsInline,
- StartLoc, Loc, II, PrevNS);
+ NamespaceDecl *Namespc = NamespaceDecl::Create(
+ Context, CurContext, IsInline, StartLoc, Loc, II, PrevNS, IsNested);
if (IsInvalid)
Namespc->setInvalidDecl();
@@ -11089,21 +11808,6 @@ NamespaceDecl *Sema::getStdNamespace() const {
return cast_or_null<NamespaceDecl>(
StdNamespace.get(Context.getExternalSource()));
}
-
-NamespaceDecl *Sema::lookupStdExperimentalNamespace() {
- if (!StdExperimentalNamespaceCache) {
- if (auto Std = getStdNamespace()) {
- LookupResult Result(*this, &PP.getIdentifierTable().get("experimental"),
- SourceLocation(), LookupNamespaceName);
- if (!LookupQualifiedName(Result, Std) ||
- !(StdExperimentalNamespaceCache =
- Result.getAsSingle<NamespaceDecl>()))
- Result.suppressDiagnostics();
- }
- }
- return StdExperimentalNamespaceCache;
-}
-
namespace {
enum UnsupportedSTLSelect {
@@ -11148,7 +11852,8 @@ QualType Sema::CheckComparisonCategoryType(ComparisonCategoryType Kind,
auto TyForDiags = [&](ComparisonCategoryInfo *Info) {
auto *NNS =
NestedNameSpecifier::Create(Context, nullptr, getStdNamespace());
- return Context.getElaboratedType(ETK_None, NNS, Info->getType());
+ return Context.getElaboratedType(ElaboratedTypeKeyword::None, NNS,
+ Info->getType());
};
// Check if we've already successfully checked the comparison category type
@@ -11246,13 +11951,16 @@ QualType Sema::CheckComparisonCategoryType(ComparisonCategoryType Kind,
NamespaceDecl *Sema::getOrCreateStdNamespace() {
if (!StdNamespace) {
// The "std" namespace has not yet been defined, so build one implicitly.
- StdNamespace = NamespaceDecl::Create(Context,
- Context.getTranslationUnitDecl(),
- /*Inline=*/false,
- SourceLocation(), SourceLocation(),
- &PP.getIdentifierTable().get("std"),
- /*PrevDecl=*/nullptr);
+ StdNamespace = NamespaceDecl::Create(
+ Context, Context.getTranslationUnitDecl(),
+ /*Inline=*/false, SourceLocation(), SourceLocation(),
+ &PP.getIdentifierTable().get("std"),
+ /*PrevDecl=*/nullptr, /*Nested=*/false);
getStdNamespace()->setImplicit(true);
+ // We want the created NamespaceDecl to be available for redeclaration
+ // lookups, but not for regular name lookups.
+ Context.getTranslationUnitDecl()->addDecl(getStdNamespace());
+ getStdNamespace()->clearIdentifierNamespace();
}
return getStdNamespace();
@@ -11284,7 +11992,7 @@ bool Sema::isStdInitializerList(QualType Ty, QualType *Element) {
Ty->getAs<TemplateSpecializationType>()) {
Template = dyn_cast_or_null<ClassTemplateDecl>(
TST->getTemplateName().getAsTemplateDecl());
- Arguments = TST->getArgs();
+ Arguments = TST->template_arguments().begin();
}
if (!Template)
return false;
@@ -11363,7 +12071,9 @@ QualType Sema::BuildStdInitializerList(QualType Element, SourceLocation Loc) {
Args.addArgument(TemplateArgumentLoc(TemplateArgument(Element),
Context.getTrivialTypeSourceInfo(Element,
Loc)));
- return Context.getCanonicalType(
+ return Context.getElaboratedType(
+ ElaboratedTypeKeyword::None,
+ NestedNameSpecifier::Create(Context, nullptr, getStdNamespace()),
CheckTemplateIdType(TemplateName(StdInitializerList), Loc, Args));
}
@@ -11414,6 +12124,24 @@ public:
}
+static void DiagnoseInvisibleNamespace(const TypoCorrection &Corrected,
+ Sema &S) {
+ auto *ND = cast<NamespaceDecl>(Corrected.getFoundDecl());
+ Module *M = ND->getOwningModule();
+ assert(M && "hidden namespace definition not in a module?");
+
+ if (M->isExplicitGlobalModule())
+ S.Diag(Corrected.getCorrectionRange().getBegin(),
+ diag::err_module_unimported_use_header)
+ << (int)Sema::MissingImportKind::Declaration << Corrected.getFoundDecl()
+ << /*Header Name*/ false;
+ else
+ S.Diag(Corrected.getCorrectionRange().getBegin(),
+ diag::err_module_unimported_use)
+ << (int)Sema::MissingImportKind::Declaration << Corrected.getFoundDecl()
+ << M->getTopLevelModuleName();
+}
+
static bool TryNamespaceTypoCorrection(Sema &S, LookupResult &R, Scope *Sc,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
@@ -11423,7 +12151,16 @@ static bool TryNamespaceTypoCorrection(Sema &S, LookupResult &R, Scope *Sc,
if (TypoCorrection Corrected =
S.CorrectTypo(R.getLookupNameInfo(), R.getLookupKind(), Sc, &SS, CCC,
Sema::CTK_ErrorRecovery)) {
- if (DeclContext *DC = S.computeDeclContext(SS, false)) {
+ // Generally we find it is confusing more than helpful to diagnose the
+ // invisible namespace.
+ // See https://github.com/llvm/llvm-project/issues/73893.
+ //
+ // However, we should diagnose when the users are trying to using an
+ // invisible namespace. So we handle the case specially here.
+ if (isa_and_nonnull<NamespaceDecl>(Corrected.getFoundDecl()) &&
+ Corrected.requiresImport()) {
+ DiagnoseInvisibleNamespace(Corrected, S);
+ } else if (DeclContext *DC = S.computeDeclContext(SS, false)) {
std::string CorrectedStr(Corrected.getAsString(S.getLangOpts()));
bool DroppedSpecifier = Corrected.WillReplaceSpecifier() &&
Ident->getName().equals(CorrectedStr);
@@ -11625,30 +12362,40 @@ Decl *Sema::ActOnUsingDeclaration(Scope *S, AccessSpecifier AS,
Decl *Sema::ActOnUsingEnumDeclaration(Scope *S, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation EnumLoc,
- const DeclSpec &DS) {
- switch (DS.getTypeSpecType()) {
- case DeclSpec::TST_error:
- // This will already have been diagnosed
+ SourceLocation IdentLoc,
+ IdentifierInfo &II, CXXScopeSpec *SS) {
+ assert(!SS->isInvalid() && "ScopeSpec is invalid");
+ TypeSourceInfo *TSI = nullptr;
+ QualType EnumTy = GetTypeFromParser(
+ getTypeName(II, IdentLoc, S, SS, /*isClassName=*/false,
+ /*HasTrailingDot=*/false,
+ /*ObjectType=*/nullptr, /*IsCtorOrDtorName=*/false,
+ /*WantNontrivialTypeSourceInfo=*/true),
+ &TSI);
+ if (EnumTy.isNull()) {
+ Diag(IdentLoc, SS && isDependentScopeSpecifier(*SS)
+ ? diag::err_using_enum_is_dependent
+ : diag::err_unknown_typename)
+ << II.getName()
+ << SourceRange(SS ? SS->getBeginLoc() : IdentLoc, IdentLoc);
return nullptr;
+ }
- case DeclSpec::TST_enum:
- break;
-
- case DeclSpec::TST_typename:
- Diag(DS.getTypeSpecTypeLoc(), diag::err_using_enum_is_dependent);
+ auto *Enum = dyn_cast_if_present<EnumDecl>(EnumTy->getAsTagDecl());
+ if (!Enum) {
+ Diag(IdentLoc, diag::err_using_enum_not_enum) << EnumTy;
return nullptr;
-
- default:
- llvm_unreachable("unexpected DeclSpec type");
}
- // As with enum-decls, we ignore attributes for now.
- auto *Enum = cast<EnumDecl>(DS.getRepAsDecl());
if (auto *Def = Enum->getDefinition())
Enum = Def;
- auto *UD = BuildUsingEnumDeclaration(S, AS, UsingLoc, EnumLoc,
- DS.getTypeSpecTypeNameLoc(), Enum);
+ if (TSI == nullptr)
+ TSI = Context.getTrivialTypeSourceInfo(EnumTy, IdentLoc);
+
+ auto *UD =
+ BuildUsingEnumDeclaration(S, AS, UsingLoc, EnumLoc, IdentLoc, TSI, Enum);
+
if (UD)
PushOnScopeChains(UD, S, /*AddToContext*/ false);
@@ -12192,7 +12939,7 @@ NamedDecl *Sema::BuildUsingDeclaration(
// Unlike most lookups, we don't always want to hide tag
// declarations: tag names are visible through the using declaration
// even if hidden by ordinary names, *except* in a dependent context
- // where it's important for the sanity of two-phase lookup.
+ // where they may be used by two-phase lookup.
if (!IsInstantiation)
R.setHideTags(false);
@@ -12340,6 +13087,7 @@ NamedDecl *Sema::BuildUsingEnumDeclaration(Scope *S, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation EnumLoc,
SourceLocation NameLoc,
+ TypeSourceInfo *EnumType,
EnumDecl *ED) {
bool Invalid = false;
@@ -12366,7 +13114,7 @@ NamedDecl *Sema::BuildUsingEnumDeclaration(Scope *S, AccessSpecifier AS,
Invalid = true;
UsingEnumDecl *UD = UsingEnumDecl::Create(Context, CurContext, UsingLoc,
- EnumLoc, NameLoc, ED);
+ EnumLoc, NameLoc, EnumType);
UD->setAccess(AS);
CurContext->addDecl(UD);
@@ -12708,7 +13456,7 @@ bool Sema::CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename,
// Salient point: SS doesn't have to name a base class as long as
// lookup only finds members from base classes. Therefore we can
- // diagnose here only if we can prove that that can't happen,
+ // diagnose here only if we can prove that can't happen,
// i.e. if the class hierarchies provably don't intersect.
// TODO: it would be nice if "definitely valid" results were cached
@@ -12788,7 +13536,7 @@ Decl *Sema::ActOnAliasDeclaration(Scope *S, AccessSpecifier AS,
Previous.clear();
}
- assert(Name.Kind == UnqualifiedIdKind::IK_Identifier &&
+ assert(Name.getKind() == UnqualifiedIdKind::IK_Identifier &&
"name in alias declaration must be an identifier");
TypeAliasDecl *NewTD = TypeAliasDecl::Create(Context, CurContext, UsingLoc,
Name.StartLocation,
@@ -12884,7 +13632,7 @@ Decl *Sema::ActOnAliasDeclaration(Scope *S, AccessSpecifier AS,
NewDecl->setInvalidDecl();
else if (OldDecl) {
NewDecl->setPreviousDecl(OldDecl);
- CheckRedeclarationModuleOwnership(NewDecl, OldDecl);
+ CheckRedeclarationInModule(NewDecl, OldDecl);
}
NewND = NewDecl;
@@ -13178,7 +13926,8 @@ void Sema::CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD) {
R.resolveKind();
R.suppressDiagnostics();
- CheckFunctionDeclaration(S, FD, R, /*IsMemberSpecialization*/false);
+ CheckFunctionDeclaration(S, FD, R, /*IsMemberSpecialization*/ false,
+ FD->isThisDeclarationADefinition());
}
void Sema::setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem,
@@ -13235,20 +13984,20 @@ CXXConstructorDecl *Sema::DeclareImplicitDefaultConstructor(
CXXConstructorDecl *DefaultCon = CXXConstructorDecl::Create(
Context, ClassDecl, ClassLoc, NameInfo, /*Type*/ QualType(),
/*TInfo=*/nullptr, ExplicitSpecifier(),
+ getCurFPFeatures().isFPConstrained(),
/*isInline=*/true, /*isImplicitlyDeclared=*/true,
Constexpr ? ConstexprSpecKind::Constexpr
: ConstexprSpecKind::Unspecified);
DefaultCon->setAccess(AS_public);
DefaultCon->setDefaulted();
- if (getLangOpts().CUDA) {
+ setupImplicitSpecialMemberType(DefaultCon, Context.VoidTy, std::nullopt);
+
+ if (getLangOpts().CUDA)
inferCUDATargetForImplicitSpecialMember(ClassDecl, CXXDefaultConstructor,
DefaultCon,
/* ConstRHS */ false,
/* Diagnose */ false);
- }
-
- setupImplicitSpecialMemberType(DefaultCon, Context.VoidTy, None);
// We don't need to use SpecialMemberIsTrivial here; triviality for default
// constructors is easy to compute.
@@ -13356,7 +14105,8 @@ Sema::findInheritingConstructor(SourceLocation Loc,
CXXConstructorDecl *DerivedCtor = CXXConstructorDecl::Create(
Context, Derived, UsingLoc, NameInfo, TInfo->getType(), TInfo,
- BaseCtor->getExplicitSpecifier(), /*isInline=*/true,
+ BaseCtor->getExplicitSpecifier(), getCurFPFeatures().isFPConstrained(),
+ /*isInline=*/true,
/*isImplicitlyDeclared=*/true,
Constexpr ? BaseCtor->getConstexprKind() : ConstexprSpecKind::Unspecified,
InheritedConstructor(Shadow, BaseCtor),
@@ -13511,23 +14261,23 @@ CXXDestructorDecl *Sema::DeclareImplicitDestructor(CXXRecordDecl *ClassDecl) {
DeclarationName Name
= Context.DeclarationNames.getCXXDestructorName(ClassType);
DeclarationNameInfo NameInfo(Name, ClassLoc);
- CXXDestructorDecl *Destructor =
- CXXDestructorDecl::Create(Context, ClassDecl, ClassLoc, NameInfo,
- QualType(), nullptr, /*isInline=*/true,
- /*isImplicitlyDeclared=*/true,
- Constexpr ? ConstexprSpecKind::Constexpr
- : ConstexprSpecKind::Unspecified);
+ CXXDestructorDecl *Destructor = CXXDestructorDecl::Create(
+ Context, ClassDecl, ClassLoc, NameInfo, QualType(), nullptr,
+ getCurFPFeatures().isFPConstrained(),
+ /*isInline=*/true,
+ /*isImplicitlyDeclared=*/true,
+ Constexpr ? ConstexprSpecKind::Constexpr
+ : ConstexprSpecKind::Unspecified);
Destructor->setAccess(AS_public);
Destructor->setDefaulted();
- if (getLangOpts().CUDA) {
+ setupImplicitSpecialMemberType(Destructor, Context.VoidTy, std::nullopt);
+
+ if (getLangOpts().CUDA)
inferCUDATargetForImplicitSpecialMember(ClassDecl, CXXDestructor,
Destructor,
/* ConstRHS */ false,
/* Diagnose */ false);
- }
-
- setupImplicitSpecialMemberType(Destructor, Context.VoidTy, None);
// We don't need to use SpecialMemberIsTrivial here; triviality for
// destructors is easy to compute.
@@ -13682,7 +14432,8 @@ void Sema::AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor) {
FunctionProtoType::ExtProtoInfo EPI = DtorType->getExtProtoInfo();
EPI.ExceptionSpec.Type = EST_Unevaluated;
EPI.ExceptionSpec.SourceDecl = Destructor;
- Destructor->setType(Context.getFunctionType(Context.VoidTy, None, EPI));
+ Destructor->setType(
+ Context.getFunctionType(Context.VoidTy, std::nullopt, EPI));
// FIXME: If the destructor has a body that could throw, and the newly created
// spec doesn't allow exceptions, we should emit a warning, because this
@@ -14130,6 +14881,8 @@ CXXMethodDecl *Sema::DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl) {
return nullptr;
QualType ArgType = Context.getTypeDeclType(ClassDecl);
+ ArgType = Context.getElaboratedType(ElaboratedTypeKeyword::None, nullptr,
+ ArgType, nullptr);
LangAS AS = getDefaultCXXMethodAddrSpace();
if (AS != LangAS::Default)
ArgType = Context.getAddrSpaceQualType(ArgType, AS);
@@ -14152,6 +14905,7 @@ CXXMethodDecl *Sema::DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl) {
CXXMethodDecl *CopyAssignment = CXXMethodDecl::Create(
Context, ClassDecl, ClassLoc, NameInfo, QualType(),
/*TInfo=*/nullptr, /*StorageClass=*/SC_None,
+ getCurFPFeatures().isFPConstrained(),
/*isInline=*/true,
Constexpr ? ConstexprSpecKind::Constexpr : ConstexprSpecKind::Unspecified,
SourceLocation());
@@ -14159,14 +14913,13 @@ CXXMethodDecl *Sema::DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl) {
CopyAssignment->setDefaulted();
CopyAssignment->setImplicit();
- if (getLangOpts().CUDA) {
+ setupImplicitSpecialMemberType(CopyAssignment, RetType, ArgType);
+
+ if (getLangOpts().CUDA)
inferCUDATargetForImplicitSpecialMember(ClassDecl, CXXCopyAssignment,
CopyAssignment,
/* ConstRHS */ Const,
/* Diagnose */ false);
- }
-
- setupImplicitSpecialMemberType(CopyAssignment, RetType, ArgType);
// Add the parameter to the operator.
ParmVarDecl *FromParam = ParmVarDecl::Create(Context, CopyAssignment,
@@ -14208,13 +14961,10 @@ static void diagnoseDeprecatedCopyOperation(Sema &S, CXXMethodDecl *CopyOp) {
CXXRecordDecl *RD = CopyOp->getParent();
CXXMethodDecl *UserDeclaredOperation = nullptr;
- // In Microsoft mode, assignment operations don't affect constructors and
- // vice versa.
if (RD->hasUserDeclaredDestructor()) {
UserDeclaredOperation = RD->getDestructor();
} else if (!isa<CXXConstructorDecl>(CopyOp) &&
- RD->hasUserDeclaredCopyConstructor() &&
- !S.getLangOpts().MSVCCompat) {
+ RD->hasUserDeclaredCopyConstructor()) {
// Find any user-declared copy constructor.
for (auto *I : RD->ctors()) {
if (I->isCopyConstructor()) {
@@ -14224,8 +14974,7 @@ static void diagnoseDeprecatedCopyOperation(Sema &S, CXXMethodDecl *CopyOp) {
}
assert(UserDeclaredOperation);
} else if (isa<CXXConstructorDecl>(CopyOp) &&
- RD->hasUserDeclaredCopyAssignment() &&
- !S.getLangOpts().MSVCCompat) {
+ RD->hasUserDeclaredCopyAssignment()) {
// Find any user-declared move assignment operator.
for (auto *I : RD->methods()) {
if (I->isCopyAssignmentOperator()) {
@@ -14299,12 +15048,11 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
SmallVector<Stmt*, 8> Statements;
// The parameter for the "other" object, which we are copying from.
- ParmVarDecl *Other = CopyAssignOperator->getParamDecl(0);
+ ParmVarDecl *Other = CopyAssignOperator->getNonObjectParameter(0);
Qualifiers OtherQuals = Other->getType().getQualifiers();
QualType OtherRefType = Other->getType();
- if (const LValueReferenceType *OtherRef
- = OtherRefType->getAs<LValueReferenceType>()) {
- OtherRefType = OtherRef->getPointeeType();
+ if (OtherRefType->isLValueReferenceType()) {
+ OtherRefType = OtherRefType->getPointeeType();
OtherQuals = OtherRefType.getQualifiers();
}
@@ -14316,8 +15064,26 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
// Builds a DeclRefExpr for the "other" object.
RefBuilder OtherRef(Other, OtherRefType);
- // Builds the "this" pointer.
- ThisBuilder This;
+ // Builds the function object parameter.
+ std::optional<ThisBuilder> This;
+ std::optional<DerefBuilder> DerefThis;
+ std::optional<RefBuilder> ExplicitObject;
+ bool IsArrow = false;
+ QualType ObjectType;
+ if (CopyAssignOperator->isExplicitObjectMemberFunction()) {
+ ObjectType = CopyAssignOperator->getParamDecl(0)->getType();
+ if (ObjectType->isReferenceType())
+ ObjectType = ObjectType->getPointeeType();
+ ExplicitObject.emplace(CopyAssignOperator->getParamDecl(0), ObjectType);
+ } else {
+ ObjectType = getCurrentThisType();
+ This.emplace();
+ DerefThis.emplace(*This);
+ IsArrow = !LangOpts.HLSL;
+ }
+ ExprBuilder &ObjectParameter =
+ ExplicitObject ? static_cast<ExprBuilder &>(*ExplicitObject)
+ : static_cast<ExprBuilder &>(*This);
// Assign base classes.
bool Invalid = false;
@@ -14339,11 +15105,11 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
VK_LValue, BasePath);
// Dereference "this".
- DerefBuilder DerefThis(This);
- CastBuilder To(DerefThis,
- Context.getQualifiedType(
- BaseType, CopyAssignOperator->getMethodQualifiers()),
- VK_LValue, BasePath);
+ CastBuilder To(
+ ExplicitObject ? static_cast<ExprBuilder &>(*ExplicitObject)
+ : static_cast<ExprBuilder &>(*DerefThis),
+ Context.getQualifiedType(BaseType, ObjectType.getQualifiers()),
+ VK_LValue, BasePath);
// Build the copy.
StmtResult Copy = buildSingleCopyAssign(*this, Loc, BaseType,
@@ -14409,9 +15175,7 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
MemberLookup.resolveKind();
MemberBuilder From(OtherRef, OtherRefType, /*IsArrow=*/false, MemberLookup);
-
- MemberBuilder To(This, getCurrentThisType(), /*IsArrow=*/true, MemberLookup);
-
+ MemberBuilder To(ObjectParameter, ObjectType, IsArrow, MemberLookup);
// Build the copy of this field.
StmtResult Copy = buildSingleCopyAssign(*this, Loc, FieldType,
To, From,
@@ -14428,9 +15192,12 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
if (!Invalid) {
// Add a "return *this;"
- ExprResult ThisObj = CreateBuiltinUnaryOp(Loc, UO_Deref, This.build(*this, Loc));
-
- StmtResult Return = BuildReturnStmt(Loc, ThisObj.get());
+ Expr *ThisExpr =
+ (ExplicitObject ? static_cast<ExprBuilder &>(*ExplicitObject)
+ : LangOpts.HLSL ? static_cast<ExprBuilder &>(*This)
+ : static_cast<ExprBuilder &>(*DerefThis))
+ .build(*this, Loc);
+ StmtResult Return = BuildReturnStmt(Loc, ThisExpr);
if (Return.isInvalid())
Invalid = true;
else
@@ -14468,6 +15235,8 @@ CXXMethodDecl *Sema::DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl) {
// constructor rules.
QualType ArgType = Context.getTypeDeclType(ClassDecl);
+ ArgType = Context.getElaboratedType(ElaboratedTypeKeyword::None, nullptr,
+ ArgType, nullptr);
LangAS AS = getDefaultCXXMethodAddrSpace();
if (AS != LangAS::Default)
ArgType = Context.getAddrSpaceQualType(ArgType, AS);
@@ -14486,6 +15255,7 @@ CXXMethodDecl *Sema::DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl) {
CXXMethodDecl *MoveAssignment = CXXMethodDecl::Create(
Context, ClassDecl, ClassLoc, NameInfo, QualType(),
/*TInfo=*/nullptr, /*StorageClass=*/SC_None,
+ getCurFPFeatures().isFPConstrained(),
/*isInline=*/true,
Constexpr ? ConstexprSpecKind::Constexpr : ConstexprSpecKind::Unspecified,
SourceLocation());
@@ -14493,14 +15263,13 @@ CXXMethodDecl *Sema::DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl) {
MoveAssignment->setDefaulted();
MoveAssignment->setImplicit();
- if (getLangOpts().CUDA) {
+ setupImplicitSpecialMemberType(MoveAssignment, RetType, ArgType);
+
+ if (getLangOpts().CUDA)
inferCUDATargetForImplicitSpecialMember(ClassDecl, CXXMoveAssignment,
MoveAssignment,
/* ConstRHS */ false,
/* Diagnose */ false);
- }
-
- setupImplicitSpecialMemberType(MoveAssignment, RetType, ArgType);
// Add the parameter to the operator.
ParmVarDecl *FromParam = ParmVarDecl::Create(Context, MoveAssignment,
@@ -14610,8 +15379,7 @@ static void checkMoveAssignmentForRepeatedMove(Sema &S, CXXRecordDecl *Class,
continue;
// We're going to move the base classes of Base. Add them to the list.
- for (auto &BI : Base->bases())
- Worklist.push_back(&BI);
+ llvm::append_range(Worklist, llvm::make_pointer_range(Base->bases()));
}
}
}
@@ -14660,7 +15428,7 @@ void Sema::DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
SmallVector<Stmt*, 8> Statements;
// The parameter for the "other" object, which we are move from.
- ParmVarDecl *Other = MoveAssignOperator->getParamDecl(0);
+ ParmVarDecl *Other = MoveAssignOperator->getNonObjectParameter(0);
QualType OtherRefType =
Other->getType()->castAs<RValueReferenceType>()->getPointeeType();
@@ -14674,8 +15442,23 @@ void Sema::DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
// Cast to rvalue.
MoveCastBuilder MoveOther(OtherRef);
- // Builds the "this" pointer.
- ThisBuilder This;
+ // Builds the function object parameter.
+ std::optional<ThisBuilder> This;
+ std::optional<DerefBuilder> DerefThis;
+ std::optional<RefBuilder> ExplicitObject;
+ QualType ObjectType;
+ if (MoveAssignOperator->isExplicitObjectMemberFunction()) {
+ ObjectType = MoveAssignOperator->getParamDecl(0)->getType();
+ if (ObjectType->isReferenceType())
+ ObjectType = ObjectType->getPointeeType();
+ ExplicitObject.emplace(MoveAssignOperator->getParamDecl(0), ObjectType);
+ } else {
+ ObjectType = getCurrentThisType();
+ This.emplace();
+ DerefThis.emplace(*This);
+ }
+ ExprBuilder &ObjectParameter =
+ ExplicitObject ? *ExplicitObject : static_cast<ExprBuilder &>(*This);
// Assign base classes.
bool Invalid = false;
@@ -14703,14 +15486,13 @@ void Sema::DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
// appropriately-qualified base type.
CastBuilder From(OtherRef, BaseType, VK_XValue, BasePath);
- // Dereference "this".
- DerefBuilder DerefThis(This);
-
// Implicitly cast "this" to the appropriately-qualified base type.
- CastBuilder To(DerefThis,
- Context.getQualifiedType(
- BaseType, MoveAssignOperator->getMethodQualifiers()),
- VK_LValue, BasePath);
+ // Dereference "this".
+ CastBuilder To(
+ ExplicitObject ? static_cast<ExprBuilder &>(*ExplicitObject)
+ : static_cast<ExprBuilder &>(*DerefThis),
+ Context.getQualifiedType(BaseType, ObjectType.getQualifiers()),
+ VK_LValue, BasePath);
// Build the move.
StmtResult Move = buildSingleCopyAssign(*this, Loc, BaseType,
@@ -14775,8 +15557,8 @@ void Sema::DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
MemberLookup.resolveKind();
MemberBuilder From(MoveOther, OtherRefType,
/*IsArrow=*/false, MemberLookup);
- MemberBuilder To(This, getCurrentThisType(),
- /*IsArrow=*/true, MemberLookup);
+ MemberBuilder To(ObjectParameter, ObjectType, /*IsArrow=*/!ExplicitObject,
+ MemberLookup);
assert(!From.build(*this, Loc)->isLValue() && // could be xvalue or prvalue
"Member reference with rvalue base must be rvalue except for reference "
@@ -14798,10 +15580,12 @@ void Sema::DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
if (!Invalid) {
// Add a "return *this;"
- ExprResult ThisObj =
- CreateBuiltinUnaryOp(Loc, UO_Deref, This.build(*this, Loc));
+ Expr *ThisExpr =
+ (ExplicitObject ? static_cast<ExprBuilder &>(*ExplicitObject)
+ : static_cast<ExprBuilder &>(*DerefThis))
+ .build(*this, Loc);
- StmtResult Return = BuildReturnStmt(Loc, ThisObj.get());
+ StmtResult Return = BuildReturnStmt(Loc, ThisExpr);
if (Return.isInvalid())
Invalid = true;
else
@@ -14841,6 +15625,8 @@ CXXConstructorDecl *Sema::DeclareImplicitCopyConstructor(
QualType ClassType = Context.getTypeDeclType(ClassDecl);
QualType ArgType = ClassType;
+ ArgType = Context.getElaboratedType(ElaboratedTypeKeyword::None, nullptr,
+ ArgType, nullptr);
bool Const = ClassDecl->implicitCopyConstructorHasConstParam();
if (Const)
ArgType = ArgType.withConst();
@@ -14865,7 +15651,7 @@ CXXConstructorDecl *Sema::DeclareImplicitCopyConstructor(
// member of its class.
CXXConstructorDecl *CopyConstructor = CXXConstructorDecl::Create(
Context, ClassDecl, ClassLoc, NameInfo, QualType(), /*TInfo=*/nullptr,
- ExplicitSpecifier(),
+ ExplicitSpecifier(), getCurFPFeatures().isFPConstrained(),
/*isInline=*/true,
/*isImplicitlyDeclared=*/true,
Constexpr ? ConstexprSpecKind::Constexpr
@@ -14873,14 +15659,13 @@ CXXConstructorDecl *Sema::DeclareImplicitCopyConstructor(
CopyConstructor->setAccess(AS_public);
CopyConstructor->setDefaulted();
- if (getLangOpts().CUDA) {
+ setupImplicitSpecialMemberType(CopyConstructor, Context.VoidTy, ArgType);
+
+ if (getLangOpts().CUDA)
inferCUDATargetForImplicitSpecialMember(ClassDecl, CXXCopyConstructor,
CopyConstructor,
/* ConstRHS */ Const,
/* Diagnose */ false);
- }
-
- setupImplicitSpecialMemberType(CopyConstructor, Context.VoidTy, ArgType);
// During template instantiation of special member functions we need a
// reliable TypeSourceInfo for the parameter types in order to allow functions
@@ -14965,7 +15750,8 @@ void Sema::DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
: CopyConstructor->getLocation();
Sema::CompoundScopeRAII CompoundScope(*this);
CopyConstructor->setBody(
- ActOnCompoundStmt(Loc, Loc, None, /*isStmtExpr=*/false).getAs<Stmt>());
+ ActOnCompoundStmt(Loc, Loc, std::nullopt, /*isStmtExpr=*/false)
+ .getAs<Stmt>());
CopyConstructor->markUsed(Context);
}
@@ -14985,6 +15771,8 @@ CXXConstructorDecl *Sema::DeclareImplicitMoveConstructor(
QualType ClassType = Context.getTypeDeclType(ClassDecl);
QualType ArgType = ClassType;
+ ArgType = Context.getElaboratedType(ElaboratedTypeKeyword::None, nullptr,
+ ArgType, nullptr);
LangAS AS = getDefaultCXXMethodAddrSpace();
if (AS != LangAS::Default)
ArgType = Context.getAddrSpaceQualType(ClassType, AS);
@@ -15005,7 +15793,7 @@ CXXConstructorDecl *Sema::DeclareImplicitMoveConstructor(
// member of its class.
CXXConstructorDecl *MoveConstructor = CXXConstructorDecl::Create(
Context, ClassDecl, ClassLoc, NameInfo, QualType(), /*TInfo=*/nullptr,
- ExplicitSpecifier(),
+ ExplicitSpecifier(), getCurFPFeatures().isFPConstrained(),
/*isInline=*/true,
/*isImplicitlyDeclared=*/true,
Constexpr ? ConstexprSpecKind::Constexpr
@@ -15013,14 +15801,13 @@ CXXConstructorDecl *Sema::DeclareImplicitMoveConstructor(
MoveConstructor->setAccess(AS_public);
MoveConstructor->setDefaulted();
- if (getLangOpts().CUDA) {
+ setupImplicitSpecialMemberType(MoveConstructor, Context.VoidTy, ArgType);
+
+ if (getLangOpts().CUDA)
inferCUDATargetForImplicitSpecialMember(ClassDecl, CXXMoveConstructor,
MoveConstructor,
/* ConstRHS */ false,
/* Diagnose */ false);
- }
-
- setupImplicitSpecialMemberType(MoveConstructor, Context.VoidTy, ArgType);
// Add the parameter to the constructor.
ParmVarDecl *FromParam = ParmVarDecl::Create(Context, MoveConstructor,
@@ -15091,8 +15878,9 @@ void Sema::DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
? MoveConstructor->getEndLoc()
: MoveConstructor->getLocation();
Sema::CompoundScopeRAII CompoundScope(*this);
- MoveConstructor->setBody(ActOnCompoundStmt(
- Loc, Loc, None, /*isStmtExpr=*/ false).getAs<Stmt>());
+ MoveConstructor->setBody(
+ ActOnCompoundStmt(Loc, Loc, std::nullopt, /*isStmtExpr=*/false)
+ .getAs<Stmt>());
MoveConstructor->markUsed(Context);
}
@@ -15117,7 +15905,10 @@ void Sema::DefineImplicitLambdaToFunctionPointerConversion(
CXXRecordDecl *Lambda = Conv->getParent();
FunctionDecl *CallOp = Lambda->getLambdaCallOperator();
- FunctionDecl *Invoker = Lambda->getLambdaStaticInvoker(CC);
+ FunctionDecl *Invoker =
+ CallOp->hasCXXExplicitFunctionObjectParameter() || CallOp->isStatic()
+ ? CallOp
+ : Lambda->getLambdaStaticInvoker(CC);
if (auto *TemplateArgs = Conv->getTemplateSpecializationArgs()) {
CallOp = InstantiateFunctionDeclaration(
@@ -15125,10 +15916,13 @@ void Sema::DefineImplicitLambdaToFunctionPointerConversion(
if (!CallOp)
return;
- Invoker = InstantiateFunctionDeclaration(
- Invoker->getDescribedFunctionTemplate(), TemplateArgs, CurrentLocation);
- if (!Invoker)
- return;
+ if (CallOp != Invoker) {
+ Invoker = InstantiateFunctionDeclaration(
+ Invoker->getDescribedFunctionTemplate(), TemplateArgs,
+ CurrentLocation);
+ if (!Invoker)
+ return;
+ }
}
if (CallOp->isInvalidDecl())
@@ -15141,36 +15935,35 @@ void Sema::DefineImplicitLambdaToFunctionPointerConversion(
// to the PendingInstantiations.
MarkFunctionReferenced(CurrentLocation, CallOp);
- // Fill in the __invoke function with a dummy implementation. IR generation
- // will fill in the actual details. Update its type in case it contained
- // an 'auto'.
- Invoker->markUsed(Context);
- Invoker->setReferenced();
- Invoker->setType(Conv->getReturnType()->getPointeeType());
- Invoker->setBody(new (Context) CompoundStmt(Conv->getLocation()));
+ if (Invoker != CallOp) {
+ // Fill in the __invoke function with a dummy implementation. IR generation
+ // will fill in the actual details. Update its type in case it contained
+ // an 'auto'.
+ Invoker->markUsed(Context);
+ Invoker->setReferenced();
+ Invoker->setType(Conv->getReturnType()->getPointeeType());
+ Invoker->setBody(new (Context) CompoundStmt(Conv->getLocation()));
+ }
// Construct the body of the conversion function { return __invoke; }.
- Expr *FunctionRef = BuildDeclRefExpr(Invoker, Invoker->getType(),
- VK_LValue, Conv->getLocation());
+ Expr *FunctionRef = BuildDeclRefExpr(Invoker, Invoker->getType(), VK_LValue,
+ Conv->getLocation());
assert(FunctionRef && "Can't refer to __invoke function?");
Stmt *Return = BuildReturnStmt(Conv->getLocation(), FunctionRef).get();
- Conv->setBody(CompoundStmt::Create(Context, Return, Conv->getLocation(),
- Conv->getLocation()));
+ Conv->setBody(CompoundStmt::Create(Context, Return, FPOptionsOverride(),
+ Conv->getLocation(), Conv->getLocation()));
Conv->markUsed(Context);
Conv->setReferenced();
if (ASTMutationListener *L = getASTMutationListener()) {
L->CompletedImplicitDefinition(Conv);
- L->CompletedImplicitDefinition(Invoker);
+ if (Invoker != CallOp)
+ L->CompletedImplicitDefinition(Invoker);
}
}
-
-
void Sema::DefineImplicitLambdaToBlockPointerConversion(
- SourceLocation CurrentLocation,
- CXXConversionDecl *Conv)
-{
+ SourceLocation CurrentLocation, CXXConversionDecl *Conv) {
assert(!Conv->getParent()->isGenericLambda());
SynthesizedFunctionScope Scope(*this, Conv);
@@ -15209,8 +16002,8 @@ void Sema::DefineImplicitLambdaToBlockPointerConversion(
// Set the body of the conversion function.
Stmt *ReturnS = Return.get();
- Conv->setBody(CompoundStmt::Create(Context, ReturnS, Conv->getLocation(),
- Conv->getLocation()));
+ Conv->setBody(CompoundStmt::Create(Context, ReturnS, FPOptionsOverride(),
+ Conv->getLocation(), Conv->getLocation()));
Conv->markUsed(Context);
// We're done; notify the mutation listener, if any.
@@ -15230,7 +16023,7 @@ static bool hasOneRealArgument(MultiExprArg Args) {
if (!Args[1]->isDefaultArgument())
return false;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case 1:
return !Args[0]->isDefaultArgument();
}
@@ -15238,17 +16031,12 @@ static bool hasOneRealArgument(MultiExprArg Args) {
return false;
}
-ExprResult
-Sema::BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
- NamedDecl *FoundDecl,
- CXXConstructorDecl *Constructor,
- MultiExprArg ExprArgs,
- bool HadMultipleCandidates,
- bool IsListInitialization,
- bool IsStdInitListInitialization,
- bool RequiresZeroInit,
- unsigned ConstructKind,
- SourceRange ParenRange) {
+ExprResult Sema::BuildCXXConstructExpr(
+ SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl,
+ CXXConstructorDecl *Constructor, MultiExprArg ExprArgs,
+ bool HadMultipleCandidates, bool IsListInitialization,
+ bool IsStdInitListInitialization, bool RequiresZeroInit,
+ CXXConstructionKind ConstructKind, SourceRange ParenRange) {
bool Elidable = false;
// C++0x [class.copy]p34:
@@ -15261,7 +16049,7 @@ Sema::BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
// with the same cv-unqualified type, the copy/move operation
// can be omitted by constructing the temporary object
// directly into the target of the omitted copy/move
- if (ConstructKind == CXXConstructExpr::CK_Complete && Constructor &&
+ if (ConstructKind == CXXConstructionKind::Complete && Constructor &&
// FIXME: Converting constructors should also be accepted.
// But to fix this, the logic that digs down into a CXXConstructExpr
// to find the source object needs to handle it.
@@ -15285,21 +16073,18 @@ Sema::BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
ConstructKind, ParenRange);
}
-ExprResult
-Sema::BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
- NamedDecl *FoundDecl,
- CXXConstructorDecl *Constructor,
- bool Elidable,
- MultiExprArg ExprArgs,
- bool HadMultipleCandidates,
- bool IsListInitialization,
- bool IsStdInitListInitialization,
- bool RequiresZeroInit,
- unsigned ConstructKind,
- SourceRange ParenRange) {
+ExprResult Sema::BuildCXXConstructExpr(
+ SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl,
+ CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg ExprArgs,
+ bool HadMultipleCandidates, bool IsListInitialization,
+ bool IsStdInitListInitialization, bool RequiresZeroInit,
+ CXXConstructionKind ConstructKind, SourceRange ParenRange) {
if (auto *Shadow = dyn_cast<ConstructorUsingShadowDecl>(FoundDecl)) {
Constructor = findInheritingConstructor(ConstructLoc, Constructor, Shadow);
- if (DiagnoseUseOfDecl(Constructor, ConstructLoc))
+ // The only way to get here is if we did overlaod resolution to find the
+ // shadow decl, so we don't need to worry about re-checking the trailing
+ // requires clause.
+ if (DiagnoseUseOfOverloadedDecl(Constructor, ConstructLoc))
return ExprError();
}
@@ -15311,17 +16096,12 @@ Sema::BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
-ExprResult
-Sema::BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
- CXXConstructorDecl *Constructor,
- bool Elidable,
- MultiExprArg ExprArgs,
- bool HadMultipleCandidates,
- bool IsListInitialization,
- bool IsStdInitListInitialization,
- bool RequiresZeroInit,
- unsigned ConstructKind,
- SourceRange ParenRange) {
+ExprResult Sema::BuildCXXConstructExpr(
+ SourceLocation ConstructLoc, QualType DeclInitType,
+ CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg ExprArgs,
+ bool HadMultipleCandidates, bool IsListInitialization,
+ bool IsStdInitListInitialization, bool RequiresZeroInit,
+ CXXConstructionKind ConstructKind, SourceRange ParenRange) {
assert(declaresSameEntity(
Constructor->getParent(),
DeclInitType->getBaseElementTypeUnsafe()->getAsCXXRecordDecl()) &&
@@ -15329,88 +16109,20 @@ Sema::BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
MarkFunctionReferenced(ConstructLoc, Constructor);
if (getLangOpts().CUDA && !CheckCUDACall(ConstructLoc, Constructor))
return ExprError();
- if (getLangOpts().SYCLIsDevice &&
- !checkSYCLDeviceFunction(ConstructLoc, Constructor))
- return ExprError();
return CheckForImmediateInvocation(
CXXConstructExpr::Create(
Context, DeclInitType, ConstructLoc, Constructor, Elidable, ExprArgs,
HadMultipleCandidates, IsListInitialization,
IsStdInitListInitialization, RequiresZeroInit,
- static_cast<CXXConstructExpr::ConstructionKind>(ConstructKind),
- ParenRange),
+ static_cast<CXXConstructionKind>(ConstructKind), ParenRange),
Constructor);
}
-ExprResult Sema::BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field) {
- assert(Field->hasInClassInitializer());
-
- // If we already have the in-class initializer nothing needs to be done.
- if (Field->getInClassInitializer())
- return CXXDefaultInitExpr::Create(Context, Loc, Field, CurContext);
-
- // If we might have already tried and failed to instantiate, don't try again.
- if (Field->isInvalidDecl())
- return ExprError();
-
- // Maybe we haven't instantiated the in-class initializer. Go check the
- // pattern FieldDecl to see if it has one.
- CXXRecordDecl *ParentRD = cast<CXXRecordDecl>(Field->getParent());
-
- if (isTemplateInstantiation(ParentRD->getTemplateSpecializationKind())) {
- CXXRecordDecl *ClassPattern = ParentRD->getTemplateInstantiationPattern();
- DeclContext::lookup_result Lookup =
- ClassPattern->lookup(Field->getDeclName());
-
- FieldDecl *Pattern = nullptr;
- for (auto L : Lookup) {
- if (isa<FieldDecl>(L)) {
- Pattern = cast<FieldDecl>(L);
- break;
- }
- }
- assert(Pattern && "We must have set the Pattern!");
-
- if (!Pattern->hasInClassInitializer() ||
- InstantiateInClassInitializer(Loc, Field, Pattern,
- getTemplateInstantiationArgs(Field))) {
- // Don't diagnose this again.
- Field->setInvalidDecl();
- return ExprError();
- }
- return CXXDefaultInitExpr::Create(Context, Loc, Field, CurContext);
- }
-
- // DR1351:
- // If the brace-or-equal-initializer of a non-static data member
- // invokes a defaulted default constructor of its class or of an
- // enclosing class in a potentially evaluated subexpression, the
- // program is ill-formed.
- //
- // This resolution is unworkable: the exception specification of the
- // default constructor can be needed in an unevaluated context, in
- // particular, in the operand of a noexcept-expression, and we can be
- // unable to compute an exception specification for an enclosed class.
- //
- // Any attempt to resolve the exception specification of a defaulted default
- // constructor before the initializer is lexically complete will ultimately
- // come here at which point we can diagnose it.
- RecordDecl *OutermostClass = ParentRD->getOuterLexicalRecordContext();
- Diag(Loc, diag::err_default_member_initializer_not_yet_parsed)
- << OutermostClass << Field;
- Diag(Field->getEndLoc(),
- diag::note_default_member_initializer_not_yet_parsed);
- // Recover by marking the field invalid, unless we're in a SFINAE context.
- if (!isSFINAEContext())
- Field->setInvalidDecl();
- return ExprError();
-}
-
void Sema::FinalizeVarWithDestructor(VarDecl *VD, const RecordType *Record) {
if (VD->isInvalidDecl()) return;
// If initializing the variable failed, don't also diagnose problems with
- // the desctructor, they're likely related.
+ // the destructor, they're likely related.
if (VD->getInit() && VD->getInit()->containsErrors())
return;
@@ -15423,7 +16135,11 @@ void Sema::FinalizeVarWithDestructor(VarDecl *VD, const RecordType *Record) {
return;
CXXDestructorDecl *Destructor = LookupDestructor(ClassDecl);
-
+ // The result of `LookupDestructor` might be nullptr if the destructor is
+ // invalid, in which case it is marked as `IneligibleOrNotSelected` and
+ // will not be selected by `CXXRecordDecl::getDestructor()`.
+ if (!Destructor)
+ return;
// If this is an array, we'll require the destructor during initialization, so
// we can skip over this. We still want to emit exit-time destructor warnings
// though.
@@ -15453,7 +16169,8 @@ void Sema::FinalizeVarWithDestructor(VarDecl *VD, const RecordType *Record) {
}
}
- if (!VD->hasGlobalStorage()) return;
+ if (!VD->hasGlobalStorage() || !VD->needsDestruction(Context))
+ return;
// Emit warning for non-trivial dtor in global scope (a real global,
// class-static, function-static).
@@ -15491,19 +16208,16 @@ bool Sema::CompleteConstructorCall(CXXConstructorDecl *Constructor,
VariadicCallType CallType =
Proto->isVariadic() ? VariadicConstructor : VariadicDoesNotApply;
SmallVector<Expr *, 8> AllArgs;
- bool Invalid = GatherArgumentsForCall(Loc, Constructor,
- Proto, 0,
- llvm::makeArrayRef(Args, NumArgs),
- AllArgs,
- CallType, AllowExplicit,
- IsListInitialization);
+ bool Invalid = GatherArgumentsForCall(
+ Loc, Constructor, Proto, 0, llvm::ArrayRef(Args, NumArgs), AllArgs,
+ CallType, AllowExplicit, IsListInitialization);
ConvertedArgs.append(AllArgs.begin(), AllArgs.end());
DiagnoseSentinelCalls(Constructor, Loc, AllArgs);
CheckConstructorCall(Constructor, DeclInitType,
- llvm::makeArrayRef(AllArgs.data(), AllArgs.size()),
- Proto, Loc);
+ llvm::ArrayRef(AllArgs.data(), AllArgs.size()), Proto,
+ Loc);
return Invalid;
}
@@ -15702,18 +16416,28 @@ bool Sema::CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl) {
if (Op == OO_New || Op == OO_Array_New)
return CheckOperatorNewDeclaration(*this, FnDecl);
- // C++ [over.oper]p6:
- // An operator function shall either be a non-static member
- // function or be a non-member function and have at least one
- // parameter whose type is a class, a reference to a class, an
- // enumeration, or a reference to an enumeration.
+ // C++ [over.oper]p7:
+ // An operator function shall either be a member function or
+ // be a non-member function and have at least one parameter
+ // whose type is a class, a reference to a class, an enumeration,
+ // or a reference to an enumeration.
+ // Note: Before C++23, a member function could not be static. The only member
+ // function allowed to be static is the call operator function.
if (CXXMethodDecl *MethodDecl = dyn_cast<CXXMethodDecl>(FnDecl)) {
- if (MethodDecl->isStatic())
- return Diag(FnDecl->getLocation(),
- diag::err_operator_overload_static) << FnDecl->getDeclName();
+ if (MethodDecl->isStatic()) {
+ if (Op == OO_Call || Op == OO_Subscript)
+ Diag(FnDecl->getLocation(),
+ (LangOpts.CPlusPlus23
+ ? diag::warn_cxx20_compat_operator_overload_static
+ : diag::ext_operator_overload_static))
+ << FnDecl;
+ else
+ return Diag(FnDecl->getLocation(), diag::err_operator_overload_static)
+ << FnDecl;
+ }
} else {
bool ClassOrEnumParam = false;
- for (auto Param : FnDecl->parameters()) {
+ for (auto *Param : FnDecl->parameters()) {
QualType ParamType = Param->getType().getNonReferenceType();
if (ParamType->isDependentType() || ParamType->isRecordType() ||
ParamType->isEnumeralType()) {
@@ -15732,14 +16456,29 @@ bool Sema::CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl) {
// An operator function cannot have default arguments (8.3.6),
// except where explicitly stated below.
//
- // Only the function-call operator allows default arguments
- // (C++ [over.call]p1).
+ // Only the function-call operator (C++ [over.call]p1) and the subscript
+ // operator (CWG2507) allow default arguments.
if (Op != OO_Call) {
- for (auto Param : FnDecl->parameters()) {
- if (Param->hasDefaultArg())
- return Diag(Param->getLocation(),
+ ParmVarDecl *FirstDefaultedParam = nullptr;
+ for (auto *Param : FnDecl->parameters()) {
+ if (Param->hasDefaultArg()) {
+ FirstDefaultedParam = Param;
+ break;
+ }
+ }
+ if (FirstDefaultedParam) {
+ if (Op == OO_Subscript) {
+ Diag(FnDecl->getLocation(), LangOpts.CPlusPlus23
+ ? diag::ext_subscript_overload
+ : diag::error_subscript_overload)
+ << FnDecl->getDeclName() << 1
+ << FirstDefaultedParam->getDefaultArgRange();
+ } else {
+ return Diag(FirstDefaultedParam->getLocation(),
diag::err_operator_overload_default_arg)
- << FnDecl->getDeclName() << Param->getDefaultArgRange();
+ << FnDecl->getDeclName()
+ << FirstDefaultedParam->getDefaultArgRange();
+ }
}
}
@@ -15758,12 +16497,15 @@ bool Sema::CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl) {
// [...] Operator functions cannot have more or fewer parameters
// than the number required for the corresponding operator, as
// described in the rest of this subclause.
- unsigned NumParams = FnDecl->getNumParams()
- + (isa<CXXMethodDecl>(FnDecl)? 1 : 0);
- if (Op != OO_Call &&
+ unsigned NumParams = FnDecl->getNumParams() +
+ (isa<CXXMethodDecl>(FnDecl) &&
+ !FnDecl->hasCXXExplicitFunctionObjectParameter()
+ ? 1
+ : 0);
+ if (Op != OO_Call && Op != OO_Subscript &&
((NumParams == 1 && !CanBeUnaryOperator) ||
- (NumParams == 2 && !CanBeBinaryOperator) ||
- (NumParams < 1) || (NumParams > 2))) {
+ (NumParams == 2 && !CanBeBinaryOperator) || (NumParams < 1) ||
+ (NumParams > 2))) {
// We have the wrong number of parameters.
unsigned ErrorKind;
if (CanBeUnaryOperator && CanBeBinaryOperator) {
@@ -15775,19 +16517,26 @@ bool Sema::CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl) {
"All non-call overloaded operators are unary or binary!");
ErrorKind = 1; // 1 -> binary
}
-
return Diag(FnDecl->getLocation(), diag::err_operator_overload_must_be)
<< FnDecl->getDeclName() << NumParams << ErrorKind;
}
- // Overloaded operators other than operator() cannot be variadic.
+ if (Op == OO_Subscript && NumParams != 2) {
+ Diag(FnDecl->getLocation(), LangOpts.CPlusPlus23
+ ? diag::ext_subscript_overload
+ : diag::error_subscript_overload)
+ << FnDecl->getDeclName() << (NumParams == 1 ? 0 : 2);
+ }
+
+ // Overloaded operators other than operator() and operator[] cannot be
+ // variadic.
if (Op != OO_Call &&
FnDecl->getType()->castAs<FunctionProtoType>()->isVariadic()) {
return Diag(FnDecl->getLocation(), diag::err_operator_overload_variadic)
- << FnDecl->getDeclName();
+ << FnDecl->getDeclName();
}
- // Some operators must be non-static member functions.
+ // Some operators must be member functions.
if (MustBeMemberOperator && !isa<CXXMethodDecl>(FnDecl)) {
return Diag(FnDecl->getLocation(),
diag::err_operator_overload_must_be_member)
@@ -15840,7 +16589,7 @@ checkLiteralOperatorTemplateParameterList(Sema &SemaRef,
//
// As a DR resolution, we also allow placeholders for deduced class
// template specializations.
- if (SemaRef.getLangOpts().CPlusPlus20 &&
+ if (SemaRef.getLangOpts().CPlusPlus20 && PmDecl &&
!PmDecl->isTemplateParameterPack() &&
(PmDecl->getType()->isRecordType() ||
PmDecl->getType()->getAs<DeducedTemplateSpecializationType>()))
@@ -16020,7 +16769,7 @@ bool Sema::CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl) {
// A parameter-declaration-clause containing a default argument is not
// equivalent to any of the permitted forms.
- for (auto Param : FnDecl->parameters()) {
+ for (auto *Param : FnDecl->parameters()) {
if (Param->hasDefaultArg()) {
Diag(Param->getDefaultArgRange().getBegin(),
diag::err_literal_operator_default_argument)
@@ -16029,15 +16778,18 @@ bool Sema::CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl) {
}
}
- StringRef LiteralName
- = FnDecl->getDeclName().getCXXLiteralIdentifier()->getName();
- if (LiteralName[0] != '_' &&
+ const IdentifierInfo *II = FnDecl->getDeclName().getCXXLiteralIdentifier();
+ ReservedLiteralSuffixIdStatus Status = II->isReservedLiteralSuffixId();
+ if (Status != ReservedLiteralSuffixIdStatus::NotReserved &&
!getSourceManager().isInSystemHeader(FnDecl->getLocation())) {
- // C++11 [usrlit.suffix]p1:
- // Literal suffix identifiers that do not start with an underscore
- // are reserved for future standardization.
+ // C++23 [usrlit.suffix]p1:
+ // Literal suffix identifiers that do not start with an underscore are
+ // reserved for future standardization. Literal suffix identifiers that
+ // contain a double underscore __ are reserved for use by C++
+ // implementations.
Diag(FnDecl->getLocation(), diag::warn_user_literal_reserved)
- << StringLiteralParser::isValidUDSuffix(getLangOpts(), LiteralName);
+ << static_cast<int>(Status)
+ << StringLiteralParser::isValidUDSuffix(getLangOpts(), II->getName());
}
return false;
@@ -16053,18 +16805,14 @@ Decl *Sema::ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc) {
StringLiteral *Lit = cast<StringLiteral>(LangStr);
- if (!Lit->isAscii()) {
- Diag(LangStr->getExprLoc(), diag::err_language_linkage_spec_not_ascii)
- << LangStr->getSourceRange();
- return nullptr;
- }
+ assert(Lit->isUnevaluated() && "Unexpected string literal kind");
StringRef Lang = Lit->getString();
- LinkageSpecDecl::LanguageIDs Language;
+ LinkageSpecLanguageIDs Language;
if (Lang == "C")
- Language = LinkageSpecDecl::lang_c;
+ Language = LinkageSpecLanguageIDs::C;
else if (Lang == "C++")
- Language = LinkageSpecDecl::lang_cxx;
+ Language = LinkageSpecLanguageIDs::CXX;
else {
Diag(LangStr->getExprLoc(), diag::err_language_linkage_spec_unknown)
<< LangStr->getSourceRange();
@@ -16076,6 +16824,21 @@ Decl *Sema::ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc,
LinkageSpecDecl *D = LinkageSpecDecl::Create(Context, CurContext, ExternLoc,
LangStr->getExprLoc(), Language,
LBraceLoc.isValid());
+
+ /// C++ [module.unit]p7.2.3
+ /// - Otherwise, if the declaration
+ /// - ...
+ /// - ...
+ /// - appears within a linkage-specification,
+ /// it is attached to the global module.
+ ///
+ /// If the declaration is already in global module fragment, we don't
+ /// need to attach it again.
+ if (getLangOpts().CPlusPlusModules && isCurrentModulePurview()) {
+ Module *GlobalModule = PushImplicitGlobalModuleFragment(ExternLoc);
+ D->setLocalOwningModule(GlobalModule);
+ }
+
CurContext->addDecl(D);
PushDeclContext(S, D);
return D;
@@ -16092,6 +16855,15 @@ Decl *Sema::ActOnFinishLinkageSpecification(Scope *S,
LinkageSpecDecl* LSDecl = cast<LinkageSpecDecl>(LinkageSpec);
LSDecl->setRBraceLoc(RBraceLoc);
}
+
+ // If the current module doesn't has Parent, it implies that the
+ // LinkageSpec isn't in the module created by itself. So we don't
+ // need to pop it.
+ if (getLangOpts().CPlusPlusModules && getCurrentModule() &&
+ getCurrentModule()->isImplicitGlobalModule() &&
+ getCurrentModule()->Parent)
+ PopImplicitGlobalModuleFragment();
+
PopDeclContext();
return LinkageSpec;
}
@@ -16156,6 +16928,11 @@ VarDecl *Sema::BuildExceptionDeclaration(Scope *S,
!BaseType->isDependentType() && RequireCompleteType(Loc, BaseType, DK))
Invalid = true;
+ if (!Invalid && BaseType.isWebAssemblyReferenceType()) {
+ Diag(Loc, diag::err_wasm_reftype_tc) << 1;
+ Invalid = true;
+ }
+
if (!Invalid && Mode != 1 && BaseType->isSizelessType()) {
Diag(Loc, diag::err_catch_sizeless) << (Mode == 2 ? 1 : 0) << BaseType;
Invalid = true;
@@ -16244,7 +17021,7 @@ VarDecl *Sema::BuildExceptionDeclaration(Scope *S,
/// ActOnExceptionDeclarator - Parsed the exception-declarator in a C++ catch
/// handler.
Decl *Sema::ActOnExceptionDeclarator(Scope *S, Declarator &D) {
- TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D);
bool Invalid = D.isInvalidType();
// Check for unexpanded parameter packs.
@@ -16298,23 +17075,358 @@ Decl *Sema::ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc) {
- StringLiteral *AssertMessage =
- AssertMessageExpr ? cast<StringLiteral>(AssertMessageExpr) : nullptr;
-
if (DiagnoseUnexpandedParameterPack(AssertExpr, UPPC_StaticAssertExpression))
return nullptr;
return BuildStaticAssertDeclaration(StaticAssertLoc, AssertExpr,
- AssertMessage, RParenLoc, false);
+ AssertMessageExpr, RParenLoc, false);
+}
+
+static void WriteCharTypePrefix(BuiltinType::Kind BTK, llvm::raw_ostream &OS) {
+ switch (BTK) {
+ case BuiltinType::Char_S:
+ case BuiltinType::Char_U:
+ break;
+ case BuiltinType::Char8:
+ OS << "u8";
+ break;
+ case BuiltinType::Char16:
+ OS << 'u';
+ break;
+ case BuiltinType::Char32:
+ OS << 'U';
+ break;
+ case BuiltinType::WChar_S:
+ case BuiltinType::WChar_U:
+ OS << 'L';
+ break;
+ default:
+ llvm_unreachable("Non-character type");
+ }
+}
+
+/// Convert character's value, interpreted as a code unit, to a string.
+/// The value needs to be zero-extended to 32-bits.
+/// FIXME: This assumes Unicode literal encodings
+static void WriteCharValueForDiagnostic(uint32_t Value, const BuiltinType *BTy,
+ unsigned TyWidth,
+ SmallVectorImpl<char> &Str) {
+ char Arr[UNI_MAX_UTF8_BYTES_PER_CODE_POINT];
+ char *Ptr = Arr;
+ BuiltinType::Kind K = BTy->getKind();
+ llvm::raw_svector_ostream OS(Str);
+
+ // This should catch Char_S, Char_U, Char8, and use of escaped characters in
+ // other types.
+ if (K == BuiltinType::Char_S || K == BuiltinType::Char_U ||
+ K == BuiltinType::Char8 || Value <= 0x7F) {
+ StringRef Escaped = escapeCStyle<EscapeChar::Single>(Value);
+ if (!Escaped.empty())
+ EscapeStringForDiagnostic(Escaped, Str);
+ else
+ OS << static_cast<char>(Value);
+ return;
+ }
+
+ switch (K) {
+ case BuiltinType::Char16:
+ case BuiltinType::Char32:
+ case BuiltinType::WChar_S:
+ case BuiltinType::WChar_U: {
+ if (llvm::ConvertCodePointToUTF8(Value, Ptr))
+ EscapeStringForDiagnostic(StringRef(Arr, Ptr - Arr), Str);
+ else
+ OS << "\\x"
+ << llvm::format_hex_no_prefix(Value, TyWidth / 4, /*Upper=*/true);
+ break;
+ }
+ default:
+ llvm_unreachable("Non-character type is passed");
+ }
+}
+
+/// Convert \V to a string we can present to the user in a diagnostic
+/// \T is the type of the expression that has been evaluated into \V
+static bool ConvertAPValueToString(const APValue &V, QualType T,
+ SmallVectorImpl<char> &Str,
+ ASTContext &Context) {
+ if (!V.hasValue())
+ return false;
+
+ switch (V.getKind()) {
+ case APValue::ValueKind::Int:
+ if (T->isBooleanType()) {
+ // Bools are reduced to ints during evaluation, but for
+ // diagnostic purposes we want to print them as
+ // true or false.
+ int64_t BoolValue = V.getInt().getExtValue();
+ assert((BoolValue == 0 || BoolValue == 1) &&
+ "Bool type, but value is not 0 or 1");
+ llvm::raw_svector_ostream OS(Str);
+ OS << (BoolValue ? "true" : "false");
+ } else {
+ llvm::raw_svector_ostream OS(Str);
+ // Same is true for chars.
+ // We want to print the character representation for textual types
+ const auto *BTy = T->getAs<BuiltinType>();
+ if (BTy) {
+ switch (BTy->getKind()) {
+ case BuiltinType::Char_S:
+ case BuiltinType::Char_U:
+ case BuiltinType::Char8:
+ case BuiltinType::Char16:
+ case BuiltinType::Char32:
+ case BuiltinType::WChar_S:
+ case BuiltinType::WChar_U: {
+ unsigned TyWidth = Context.getIntWidth(T);
+ assert(8 <= TyWidth && TyWidth <= 32 && "Unexpected integer width");
+ uint32_t CodeUnit = static_cast<uint32_t>(V.getInt().getZExtValue());
+ WriteCharTypePrefix(BTy->getKind(), OS);
+ OS << '\'';
+ WriteCharValueForDiagnostic(CodeUnit, BTy, TyWidth, Str);
+ OS << "' (0x"
+ << llvm::format_hex_no_prefix(CodeUnit, /*Width=*/2,
+ /*Upper=*/true)
+ << ", " << V.getInt() << ')';
+ return true;
+ }
+ default:
+ break;
+ }
+ }
+ V.getInt().toString(Str);
+ }
+
+ break;
+
+ case APValue::ValueKind::Float:
+ V.getFloat().toString(Str);
+ break;
+
+ case APValue::ValueKind::LValue:
+ if (V.isNullPointer()) {
+ llvm::raw_svector_ostream OS(Str);
+ OS << "nullptr";
+ } else
+ return false;
+ break;
+
+ case APValue::ValueKind::ComplexFloat: {
+ llvm::raw_svector_ostream OS(Str);
+ OS << '(';
+ V.getComplexFloatReal().toString(Str);
+ OS << " + ";
+ V.getComplexFloatImag().toString(Str);
+ OS << "i)";
+ } break;
+
+ case APValue::ValueKind::ComplexInt: {
+ llvm::raw_svector_ostream OS(Str);
+ OS << '(';
+ V.getComplexIntReal().toString(Str);
+ OS << " + ";
+ V.getComplexIntImag().toString(Str);
+ OS << "i)";
+ } break;
+
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+/// Some Expression types are not useful to print notes about,
+/// e.g. literals and values that have already been expanded
+/// before such as int-valued template parameters.
+static bool UsefulToPrintExpr(const Expr *E) {
+ E = E->IgnoreParenImpCasts();
+ // Literals are pretty easy for humans to understand.
+ if (isa<IntegerLiteral, FloatingLiteral, CharacterLiteral, CXXBoolLiteralExpr,
+ CXXNullPtrLiteralExpr, FixedPointLiteral, ImaginaryLiteral>(E))
+ return false;
+
+ // These have been substituted from template parameters
+ // and appear as literals in the static assert error.
+ if (isa<SubstNonTypeTemplateParmExpr>(E))
+ return false;
+
+ // -5 is also simple to understand.
+ if (const auto *UnaryOp = dyn_cast<UnaryOperator>(E))
+ return UsefulToPrintExpr(UnaryOp->getSubExpr());
+
+ // Only print nested arithmetic operators.
+ if (const auto *BO = dyn_cast<BinaryOperator>(E))
+ return (BO->isShiftOp() || BO->isAdditiveOp() || BO->isMultiplicativeOp() ||
+ BO->isBitwiseOp());
+
+ return true;
+}
+
+/// Try to print more useful information about a failed static_assert
+/// with expression \E
+void Sema::DiagnoseStaticAssertDetails(const Expr *E) {
+ if (const auto *Op = dyn_cast<BinaryOperator>(E);
+ Op && Op->getOpcode() != BO_LOr) {
+ const Expr *LHS = Op->getLHS()->IgnoreParenImpCasts();
+ const Expr *RHS = Op->getRHS()->IgnoreParenImpCasts();
+
+ // Ignore comparisons of boolean expressions with a boolean literal.
+ if ((isa<CXXBoolLiteralExpr>(LHS) && RHS->getType()->isBooleanType()) ||
+ (isa<CXXBoolLiteralExpr>(RHS) && LHS->getType()->isBooleanType()))
+ return;
+
+ // Don't print obvious expressions.
+ if (!UsefulToPrintExpr(LHS) && !UsefulToPrintExpr(RHS))
+ return;
+
+ struct {
+ const clang::Expr *Cond;
+ Expr::EvalResult Result;
+ SmallString<12> ValueString;
+ bool Print;
+ } DiagSide[2] = {{LHS, Expr::EvalResult(), {}, false},
+ {RHS, Expr::EvalResult(), {}, false}};
+ for (unsigned I = 0; I < 2; I++) {
+ const Expr *Side = DiagSide[I].Cond;
+
+ Side->EvaluateAsRValue(DiagSide[I].Result, Context, true);
+
+ DiagSide[I].Print =
+ ConvertAPValueToString(DiagSide[I].Result.Val, Side->getType(),
+ DiagSide[I].ValueString, Context);
+ }
+ if (DiagSide[0].Print && DiagSide[1].Print) {
+ Diag(Op->getExprLoc(), diag::note_expr_evaluates_to)
+ << DiagSide[0].ValueString << Op->getOpcodeStr()
+ << DiagSide[1].ValueString << Op->getSourceRange();
+ }
+ }
+}
+
+bool Sema::EvaluateStaticAssertMessageAsString(Expr *Message,
+ std::string &Result,
+ ASTContext &Ctx,
+ bool ErrorOnInvalidMessage) {
+ assert(Message);
+ assert(!Message->isTypeDependent() && !Message->isValueDependent() &&
+ "can't evaluate a dependant static assert message");
+
+ if (const auto *SL = dyn_cast<StringLiteral>(Message)) {
+ assert(SL->isUnevaluated() && "expected an unevaluated string");
+ Result.assign(SL->getString().begin(), SL->getString().end());
+ return true;
+ }
+
+ SourceLocation Loc = Message->getBeginLoc();
+ QualType T = Message->getType().getNonReferenceType();
+ auto *RD = T->getAsCXXRecordDecl();
+ if (!RD) {
+ Diag(Loc, diag::err_static_assert_invalid_message);
+ return false;
+ }
+
+ auto FindMember = [&](StringRef Member, bool &Empty,
+ bool Diag = false) -> std::optional<LookupResult> {
+ DeclarationName DN = PP.getIdentifierInfo(Member);
+ LookupResult MemberLookup(*this, DN, Loc, Sema::LookupMemberName);
+ LookupQualifiedName(MemberLookup, RD);
+ Empty = MemberLookup.empty();
+ OverloadCandidateSet Candidates(MemberLookup.getNameLoc(),
+ OverloadCandidateSet::CSK_Normal);
+ if (MemberLookup.empty())
+ return std::nullopt;
+ return std::move(MemberLookup);
+ };
+
+ bool SizeNotFound, DataNotFound;
+ std::optional<LookupResult> SizeMember = FindMember("size", SizeNotFound);
+ std::optional<LookupResult> DataMember = FindMember("data", DataNotFound);
+ if (SizeNotFound || DataNotFound) {
+ Diag(Loc, diag::err_static_assert_missing_member_function)
+ << ((SizeNotFound && DataNotFound) ? 2
+ : SizeNotFound ? 0
+ : 1);
+ return false;
+ }
+
+ if (!SizeMember || !DataMember) {
+ if (!SizeMember)
+ FindMember("size", SizeNotFound, /*Diag=*/true);
+ if (!DataMember)
+ FindMember("data", DataNotFound, /*Diag=*/true);
+ return false;
+ }
+
+ auto BuildExpr = [&](LookupResult &LR) {
+ ExprResult Res = BuildMemberReferenceExpr(
+ Message, Message->getType(), Message->getBeginLoc(), false,
+ CXXScopeSpec(), SourceLocation(), nullptr, LR, nullptr, nullptr);
+ if (Res.isInvalid())
+ return ExprError();
+ Res = BuildCallExpr(nullptr, Res.get(), Loc, std::nullopt, Loc, nullptr,
+ false, true);
+ if (Res.isInvalid())
+ return ExprError();
+ if (Res.get()->isTypeDependent() || Res.get()->isValueDependent())
+ return ExprError();
+ return TemporaryMaterializationConversion(Res.get());
+ };
+
+ ExprResult SizeE = BuildExpr(*SizeMember);
+ ExprResult DataE = BuildExpr(*DataMember);
+
+ QualType SizeT = Context.getSizeType();
+ QualType ConstCharPtr =
+ Context.getPointerType(Context.getConstType(Context.CharTy));
+
+ ExprResult EvaluatedSize =
+ SizeE.isInvalid() ? ExprError()
+ : BuildConvertedConstantExpression(
+ SizeE.get(), SizeT, CCEK_StaticAssertMessageSize);
+ if (EvaluatedSize.isInvalid()) {
+ Diag(Loc, diag::err_static_assert_invalid_mem_fn_ret_ty) << /*size*/ 0;
+ return false;
+ }
+
+ ExprResult EvaluatedData =
+ DataE.isInvalid()
+ ? ExprError()
+ : BuildConvertedConstantExpression(DataE.get(), ConstCharPtr,
+ CCEK_StaticAssertMessageData);
+ if (EvaluatedData.isInvalid()) {
+ Diag(Loc, diag::err_static_assert_invalid_mem_fn_ret_ty) << /*data*/ 1;
+ return false;
+ }
+
+ if (!ErrorOnInvalidMessage &&
+ Diags.isIgnored(diag::warn_static_assert_message_constexpr, Loc))
+ return true;
+
+ Expr::EvalResult Status;
+ SmallVector<PartialDiagnosticAt, 8> Notes;
+ Status.Diag = &Notes;
+ if (!Message->EvaluateCharRangeAsString(Result, EvaluatedSize.get(),
+ EvaluatedData.get(), Ctx, Status) ||
+ !Notes.empty()) {
+ Diag(Message->getBeginLoc(),
+ ErrorOnInvalidMessage ? diag::err_static_assert_message_constexpr
+ : diag::warn_static_assert_message_constexpr);
+ for (const auto &Note : Notes)
+ Diag(Note.first, Note.second);
+ return !ErrorOnInvalidMessage;
+ }
+ return true;
}
Decl *Sema::BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
- Expr *AssertExpr,
- StringLiteral *AssertMessage,
+ Expr *AssertExpr, Expr *AssertMessage,
SourceLocation RParenLoc,
bool Failed) {
assert(AssertExpr != nullptr && "Expected non-null condition");
if (!AssertExpr->isTypeDependent() && !AssertExpr->isValueDependent() &&
+ (!AssertMessage || (!AssertMessage->isTypeDependent() &&
+ !AssertMessage->isValueDependent())) &&
!Failed) {
// In a static_assert-declaration, the constant-expression shall be a
// constant expression that can be contextually converted to bool.
@@ -16332,18 +17444,48 @@ Decl *Sema::BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
AssertExpr = FullAssertExpr.get();
llvm::APSInt Cond;
+ Expr *BaseExpr = AssertExpr;
+ AllowFoldKind FoldKind = NoFold;
+
+ if (!getLangOpts().CPlusPlus) {
+ // In C mode, allow folding as an extension for better compatibility with
+ // C++ in terms of expressions like static_assert("test") or
+ // static_assert(nullptr).
+ FoldKind = AllowFold;
+ }
+
if (!Failed && VerifyIntegerConstantExpression(
- AssertExpr, &Cond,
- diag::err_static_assert_expression_is_not_constant)
- .isInvalid())
+ BaseExpr, &Cond,
+ diag::err_static_assert_expression_is_not_constant,
+ FoldKind).isInvalid())
Failed = true;
- if (!Failed && !Cond) {
+ // If the static_assert passes, only verify that
+ // the message is grammatically valid without evaluating it.
+ if (!Failed && AssertMessage && Cond.getBoolValue()) {
+ std::string Str;
+ EvaluateStaticAssertMessageAsString(AssertMessage, Str, Context,
+ /*ErrorOnInvalidMessage=*/false);
+ }
+
+ // CWG2518
+ // [dcl.pre]/p10 If [...] the expression is evaluated in the context of a
+ // template definition, the declaration has no effect.
+ bool InTemplateDefinition =
+ getLangOpts().CPlusPlus && CurContext->isDependentContext();
+
+ if (!Failed && !Cond && !InTemplateDefinition) {
SmallString<256> MsgBuffer;
llvm::raw_svector_ostream Msg(MsgBuffer);
- if (AssertMessage)
- AssertMessage->printPretty(Msg, nullptr, getPrintingPolicy());
-
+ bool HasMessage = AssertMessage;
+ if (AssertMessage) {
+ std::string Str;
+ HasMessage =
+ EvaluateStaticAssertMessageAsString(
+ AssertMessage, Str, Context, /*ErrorOnInvalidMessage=*/true) ||
+ !Str.empty();
+ Msg << Str;
+ }
Expr *InnerCond = nullptr;
std::string InnerCondDescription;
std::tie(InnerCond, InnerCondDescription) =
@@ -16351,19 +17493,22 @@ Decl *Sema::BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
if (InnerCond && isa<ConceptSpecializationExpr>(InnerCond)) {
// Drill down into concept specialization expressions to see why they
// weren't satisfied.
- Diag(StaticAssertLoc, diag::err_static_assert_failed)
- << !AssertMessage << Msg.str() << AssertExpr->getSourceRange();
+ Diag(AssertExpr->getBeginLoc(), diag::err_static_assert_failed)
+ << !HasMessage << Msg.str() << AssertExpr->getSourceRange();
ConstraintSatisfaction Satisfaction;
if (!CheckConstraintSatisfaction(InnerCond, Satisfaction))
DiagnoseUnsatisfiedConstraint(Satisfaction);
} else if (InnerCond && !isa<CXXBoolLiteralExpr>(InnerCond)
&& !isa<IntegerLiteral>(InnerCond)) {
- Diag(StaticAssertLoc, diag::err_static_assert_requirement_failed)
- << InnerCondDescription << !AssertMessage
- << Msg.str() << InnerCond->getSourceRange();
+ Diag(InnerCond->getBeginLoc(),
+ diag::err_static_assert_requirement_failed)
+ << InnerCondDescription << !HasMessage << Msg.str()
+ << InnerCond->getSourceRange();
+ DiagnoseStaticAssertDetails(InnerCond);
} else {
- Diag(StaticAssertLoc, diag::err_static_assert_failed)
- << !AssertMessage << Msg.str() << AssertExpr->getSourceRange();
+ Diag(AssertExpr->getBeginLoc(), diag::err_static_assert_failed)
+ << !HasMessage << Msg.str() << AssertExpr->getSourceRange();
+ PrintContextStack();
}
Failed = true;
}
@@ -16394,7 +17539,7 @@ FriendDecl *Sema::CheckFriendTypeDecl(SourceLocation LocStart,
assert(TSInfo && "NULL TypeSourceInfo for friend type declaration");
QualType T = TSInfo->getType();
- SourceRange TypeRange = TSInfo->getTypeLoc().getLocalSourceRange();
+ SourceRange TypeRange = TSInfo->getTypeLoc().getSourceRange();
// C++03 [class.friend]p2:
// An elaborated-type-specifier shall be used in a friend declaration
@@ -16460,12 +17605,10 @@ FriendDecl *Sema::CheckFriendTypeDecl(SourceLocation LocStart,
/// Handle a friend tag declaration where the scope specifier was
/// templated.
-Decl *Sema::ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
- unsigned TagSpec, SourceLocation TagLoc,
- CXXScopeSpec &SS, IdentifierInfo *Name,
- SourceLocation NameLoc,
- const ParsedAttributesView &Attr,
- MultiTemplateParamsArg TempParamLists) {
+DeclResult Sema::ActOnTemplatedFriendTag(
+ Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc,
+ CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc,
+ const ParsedAttributesView &Attr, MultiTemplateParamsArg TempParamLists) {
TagTypeKind Kind = TypeWithKeyword::getTagTypeKindForTypeSpec(TagSpec);
bool IsMemberSpecialization = false;
@@ -16478,7 +17621,7 @@ Decl *Sema::ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
if (TemplateParams->size() > 0) {
// This is a declaration of a class template.
if (Invalid)
- return nullptr;
+ return true;
return CheckClassTemplate(S, TagSpec, TUK_Friend, TagLoc, SS, Name,
NameLoc, Attr, TemplateParams, AS_public,
@@ -16493,7 +17636,7 @@ Decl *Sema::ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
}
}
- if (Invalid) return nullptr;
+ if (Invalid) return true;
bool isAllExplicitSpecializations = true;
for (unsigned I = TempParamLists.size(); I-- > 0; ) {
@@ -16512,15 +17655,15 @@ Decl *Sema::ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
if (SS.isEmpty()) {
bool Owned = false;
bool IsDependent = false;
- return ActOnTag(S, TagSpec, TUK_Friend, TagLoc, SS, Name, NameLoc,
- Attr, AS_public,
+ return ActOnTag(S, TagSpec, TUK_Friend, TagLoc, SS, Name, NameLoc, Attr,
+ AS_public,
/*ModulePrivateLoc=*/SourceLocation(),
MultiTemplateParamsArg(), Owned, IsDependent,
/*ScopedEnumKWLoc=*/SourceLocation(),
/*ScopedEnumUsesClassTag=*/false,
/*UnderlyingType=*/TypeResult(),
/*IsTypeSpecifier=*/false,
- /*IsTemplateParamOrArg=*/false);
+ /*IsTemplateParamOrArg=*/false, /*OOK=*/OOK_Outside);
}
NestedNameSpecifierLoc QualifierLoc = SS.getWithLocInContext(Context);
@@ -16529,7 +17672,7 @@ Decl *Sema::ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
QualType T = CheckTypenameType(Keyword, TagLoc, QualifierLoc,
*Name, NameLoc);
if (T.isNull())
- return nullptr;
+ return true;
TypeSourceInfo *TSI = Context.CreateTypeSourceInfo(T);
if (isa<DependentNameType>(T)) {
@@ -16627,8 +17770,9 @@ Decl *Sema::ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
// Try to convert the decl specifier to a type. This works for
// friend templates because ActOnTag never produces a ClassTemplateDecl
// for a TUK_Friend.
- Declarator TheDeclarator(DS, DeclaratorContext::Member);
- TypeSourceInfo *TSI = GetTypeForDeclarator(TheDeclarator, S);
+ Declarator TheDeclarator(DS, ParsedAttributesView::none(),
+ DeclaratorContext::Member);
+ TypeSourceInfo *TSI = GetTypeForDeclarator(TheDeclarator);
QualType T = TSI->getType();
if (TheDeclarator.isInvalidType())
return nullptr;
@@ -16693,7 +17837,7 @@ NamedDecl *Sema::ActOnFriendFunctionDecl(Scope *S, Declarator &D,
assert(DS.getStorageClassSpec() == DeclSpec::SCS_unspecified);
SourceLocation Loc = D.getIdentifierLoc();
- TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D);
// C++ [class.friend]p1
// A friend of a class is a function or class....
@@ -16745,6 +17889,8 @@ NamedDecl *Sema::ActOnFriendFunctionDecl(Scope *S, Declarator &D,
LookupResult Previous(*this, NameInfo, LookupOrdinaryName,
ForExternalRedeclaration);
+ bool isTemplateId = D.getName().getKind() == UnqualifiedIdKind::IK_TemplateId;
+
// There are five cases here.
// - There's no scope specifier and we're in a local class. Only look
// for functions declared in the immediately-enclosing block scope.
@@ -16782,14 +17928,6 @@ NamedDecl *Sema::ActOnFriendFunctionDecl(Scope *S, Declarator &D,
}
adjustContextForLocalExternDecl(DC);
- // C++ [class.friend]p6:
- // A function can be defined in a friend declaration of a class if and
- // only if the class is a non-local class (9.8), the function name is
- // unqualified, and the function has namespace scope.
- if (D.isFunctionDefinition()) {
- Diag(NameInfo.getBeginLoc(), diag::err_friend_def_in_local_class);
- }
-
// - There's no scope specifier, in which case we just go to the
// appropriate scope and look for a function or function template
// there as appropriate.
@@ -16800,8 +17938,6 @@ NamedDecl *Sema::ActOnFriendFunctionDecl(Scope *S, Declarator &D,
// elaborated-type-specifier, the lookup to determine whether
// the entity has been previously declared shall not consider
// any scopes outside the innermost enclosing namespace.
- bool isTemplateId =
- D.getName().getKind() == UnqualifiedIdKind::IK_TemplateId;
// Find the appropriate context according to the above.
DC = CurContext;
@@ -16816,10 +17952,7 @@ NamedDecl *Sema::ActOnFriendFunctionDecl(Scope *S, Declarator &D,
while (DC->isRecord())
DC = DC->getParent();
- DeclContext *LookupDC = DC;
- while (LookupDC->isTransparentContext())
- LookupDC = LookupDC->getParent();
-
+ DeclContext *LookupDC = DC->getNonTransparentContext();
while (true) {
LookupQualifiedName(Previous, LookupDC);
@@ -16857,39 +17990,12 @@ NamedDecl *Sema::ActOnFriendFunctionDecl(Scope *S, Declarator &D,
diag::warn_cxx98_compat_friend_is_member :
diag::err_friend_is_member);
- if (D.isFunctionDefinition()) {
- // C++ [class.friend]p6:
- // A function can be defined in a friend declaration of a class if and
- // only if the class is a non-local class (9.8), the function name is
- // unqualified, and the function has namespace scope.
- //
- // FIXME: We should only do this if the scope specifier names the
- // innermost enclosing namespace; otherwise the fixit changes the
- // meaning of the code.
- SemaDiagnosticBuilder DB
- = Diag(SS.getRange().getBegin(), diag::err_qualified_friend_def);
-
- DB << SS.getScopeRep();
- if (DC->isFileContext())
- DB << FixItHint::CreateRemoval(SS.getRange());
- SS.clear();
- }
-
// - There's a scope specifier that does not match any template
// parameter lists, in which case we use some arbitrary context,
// create a method or method template, and wait for instantiation.
// - There's a scope specifier that does match some template
// parameter lists, which we don't handle right now.
} else {
- if (D.isFunctionDefinition()) {
- // C++ [class.friend]p6:
- // A function can be defined in a friend declaration of a class if and
- // only if the class is a non-local class (9.8), the function name is
- // unqualified, and the function has namespace scope.
- Diag(SS.getRange().getBegin(), diag::err_qualified_friend_def)
- << SS.getScopeRep();
- }
-
DC = CurContext;
assert(isa<CXXRecordDecl>(DC) && "friend declaration not in class?");
}
@@ -16974,6 +18080,38 @@ NamedDecl *Sema::ActOnFriendFunctionDecl(Scope *S, Declarator &D,
else
FD = cast<FunctionDecl>(ND);
+ // C++ [class.friend]p6:
+ // A function may be defined in a friend declaration of a class if and
+ // only if the class is a non-local class, and the function name is
+ // unqualified.
+ if (D.isFunctionDefinition()) {
+ // Qualified friend function definition.
+ if (SS.isNotEmpty()) {
+ // FIXME: We should only do this if the scope specifier names the
+ // innermost enclosing namespace; otherwise the fixit changes the
+ // meaning of the code.
+ SemaDiagnosticBuilder DB =
+ Diag(SS.getRange().getBegin(), diag::err_qualified_friend_def);
+
+ DB << SS.getScopeRep();
+ if (DC->isFileContext())
+ DB << FixItHint::CreateRemoval(SS.getRange());
+
+ // Friend function defined in a local class.
+ } else if (FunctionContainingLocalClass) {
+ Diag(NameInfo.getBeginLoc(), diag::err_friend_def_in_local_class);
+
+ // Per [basic.pre]p4, a template-id is not a name. Therefore, if we have
+ // a template-id, the function name is not unqualified because these is
+ // no name. While the wording requires some reading in-between the
+ // lines, GCC, MSVC, and EDG all consider a friend function
+ // specialization definitions // to be de facto explicit specialization
+ // and diagnose them as such.
+ } else if (isTemplateId) {
+ Diag(NameInfo.getBeginLoc(), diag::err_friend_specialization_def);
+ }
+ }
+
// C++11 [dcl.fct.default]p4: If a friend declaration specifies a
// default argument expression, that declaration shall be a definition
// and shall be the only declaration of the function or function
@@ -17088,13 +18226,6 @@ void Sema::SetDeclDefaulted(Decl *Dcl, SourceLocation DefaultLoc) {
return;
}
- if (DefKind.isComparison() &&
- !isa<CXXRecordDecl>(FD->getLexicalDeclContext())) {
- Diag(FD->getLocation(), diag::err_defaulted_comparison_out_of_class)
- << (int)DefKind.asComparison();
- return;
- }
-
// Issue compatibility warning. We already warned if the operator is
// 'operator<=>' when parsing the '<=>' token.
if (DefKind.isComparison() &&
@@ -17106,6 +18237,7 @@ void Sema::SetDeclDefaulted(Decl *Dcl, SourceLocation DefaultLoc) {
FD->setDefaulted();
FD->setExplicitlyDefaulted();
+ FD->setDefaultLoc(DefaultLoc);
// Defer checking functions that are defaulted in a dependent context.
if (FD->isDependentContext())
@@ -17116,31 +18248,41 @@ void Sema::SetDeclDefaulted(Decl *Dcl, SourceLocation DefaultLoc) {
// that we've marked it as defaulted.
FD->setWillHaveBody(false);
- // If this definition appears within the record, do the checking when
- // the record is complete. This is always the case for a defaulted
- // comparison.
- if (DefKind.isComparison())
- return;
- auto *MD = cast<CXXMethodDecl>(FD);
-
- const FunctionDecl *Primary = FD;
- if (const FunctionDecl *Pattern = FD->getTemplateInstantiationPattern())
- // Ask the template instantiation pattern that actually had the
- // '= default' on it.
- Primary = Pattern;
+ if (DefKind.isComparison()) {
+ // If this comparison's defaulting occurs within the definition of its
+ // lexical class context, we have to do the checking when complete.
+ if (auto const *RD = dyn_cast<CXXRecordDecl>(FD->getLexicalDeclContext()))
+ if (!RD->isCompleteDefinition())
+ return;
+ }
- // If the method was defaulted on its first declaration, we will have
+ // If this member fn was defaulted on its first declaration, we will have
// already performed the checking in CheckCompletedCXXClass. Such a
// declaration doesn't trigger an implicit definition.
- if (Primary->getCanonicalDecl()->isDefaulted())
- return;
+ if (isa<CXXMethodDecl>(FD)) {
+ const FunctionDecl *Primary = FD;
+ if (const FunctionDecl *Pattern = FD->getTemplateInstantiationPattern())
+ // Ask the template instantiation pattern that actually had the
+ // '= default' on it.
+ Primary = Pattern;
+ if (Primary->getCanonicalDecl()->isDefaulted())
+ return;
+ }
- // FIXME: Once we support defining comparisons out of class, check for a
- // defaulted comparison here.
- if (CheckExplicitlyDefaultedSpecialMember(MD, DefKind.asSpecialMember()))
- MD->setInvalidDecl();
- else
- DefineDefaultedFunction(*this, MD, DefaultLoc);
+ if (DefKind.isComparison()) {
+ if (CheckExplicitlyDefaultedComparison(nullptr, FD, DefKind.asComparison()))
+ FD->setInvalidDecl();
+ else
+ DefineDefaultedComparison(DefaultLoc, FD, DefKind.asComparison());
+ } else {
+ auto *MD = cast<CXXMethodDecl>(FD);
+
+ if (CheckExplicitlyDefaultedSpecialMember(MD, DefKind.asSpecialMember(),
+ DefaultLoc))
+ MD->setInvalidDecl();
+ else
+ DefineDefaultedFunction(*this, MD, DefaultLoc);
+ }
}
static void SearchForReturnInStmt(Sema &Self, Stmt *S) {
@@ -17162,6 +18304,21 @@ void Sema::DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock) {
}
}
+void Sema::SetFunctionBodyKind(Decl *D, SourceLocation Loc,
+ FnBodyKind BodyKind) {
+ switch (BodyKind) {
+ case FnBodyKind::Delete:
+ SetDeclDeleted(D, Loc);
+ break;
+ case FnBodyKind::Default:
+ SetDeclDefaulted(D, Loc);
+ break;
+ case FnBodyKind::Other:
+ llvm_unreachable(
+ "Parsed function body should be '= delete;' or '= default;'");
+ }
+}
+
bool Sema::CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old) {
const auto *NewFT = New->getType()->castAs<FunctionProtoType>();
@@ -17180,6 +18337,14 @@ bool Sema::CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
}
}
+ // SME attributes must match when overriding a function declaration.
+ if (IsInvalidSMECallConversion(Old->getType(), New->getType())) {
+ Diag(New->getLocation(), diag::err_conflicting_overriding_attributes)
+ << New << New->getType() << Old->getType();
+ Diag(Old->getLocation(), diag::note_overridden_virtual_function);
+ return true;
+ }
+
// Virtual overrides must have the same code_seg.
const auto *OldCSA = Old->getAttr<CodeSegAttr>();
const auto *NewCSA = New->getAttr<CodeSegAttr>();
@@ -17210,6 +18375,20 @@ bool Sema::CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
return true;
}
+bool Sema::CheckExplicitObjectOverride(CXXMethodDecl *New,
+ const CXXMethodDecl *Old) {
+ // CWG2553
+ // A virtual function shall not be an explicit object member function.
+ if (!New->isExplicitObjectMemberFunction())
+ return true;
+ Diag(New->getParamDecl(0)->getBeginLoc(),
+ diag::err_explicit_object_parameter_nonmember)
+ << New->getSourceRange() << /*virtual*/ 1 << /*IsLambda*/ false;
+ Diag(Old->getLocation(), diag::note_overridden_virtual_function);
+ New->setInvalidDecl();
+ return false;
+}
+
bool Sema::CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old) {
QualType NewTy = New->getType()->castAs<FunctionType>()->getReturnType();
@@ -17327,7 +18506,7 @@ bool Sema::CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange) {
Method->setRangeEnd(EndLoc);
if (Method->isVirtual() || Method->getParent()->isDependentContext()) {
- Method->setPure();
+ Method->setIsPureVirtual();
return false;
}
@@ -17452,7 +18631,7 @@ void Sema::MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
return;
// Do not mark as used if compiling for the device outside of the target
// region.
- if (TUKind != TU_Prefix && LangOpts.OpenMP && LangOpts.OpenMPIsDevice &&
+ if (TUKind != TU_Prefix && LangOpts.OpenMP && LangOpts.OpenMPIsTargetDevice &&
!isInOpenMPDeclareTargetContext() &&
!isInOpenMPTargetExecutionDirective()) {
if (!DefinitionRequired)
@@ -17500,7 +18679,7 @@ void Sema::MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
// immediately. For all other classes, we mark their virtual members
// at the end of the translation unit.
if (Class->isLocalClass())
- MarkVirtualMembersReferenced(Loc, Class);
+ MarkVirtualMembersReferenced(Loc, Class->getDefinition());
else
VTableUses.push_back(std::make_pair(Class, Loc));
}
@@ -17546,7 +18725,7 @@ bool Sema::DefineUsedVTables() {
// definition.
bool IsExplicitInstantiationDeclaration =
ClassTSK == TSK_ExplicitInstantiationDeclaration;
- for (auto R : Class->redecls()) {
+ for (auto *R : Class->redecls()) {
TemplateSpecializationKind TSK
= cast<CXXRecordDecl>(R)->getTemplateSpecializationKind();
if (TSK == TSK_ExplicitInstantiationDeclaration)
@@ -17582,16 +18761,12 @@ bool Sema::DefineUsedVTables() {
// no key function or the key function is inlined. Don't warn in C++ ABIs
// that lack key functions, since the user won't be able to make one.
if (Context.getTargetInfo().getCXXABI().hasKeyFunctions() &&
- Class->isExternallyVisible() && ClassTSK != TSK_ImplicitInstantiation) {
+ Class->isExternallyVisible() && ClassTSK != TSK_ImplicitInstantiation &&
+ ClassTSK != TSK_ExplicitInstantiationDefinition) {
const FunctionDecl *KeyFunctionDef = nullptr;
if (!KeyFunction || (KeyFunction->hasBody(KeyFunctionDef) &&
- KeyFunctionDef->isInlined())) {
- Diag(Class->getLocation(),
- ClassTSK == TSK_ExplicitInstantiationDefinition
- ? diag::warn_weak_template_vtable
- : diag::warn_weak_vtable)
- << Class;
- }
+ KeyFunctionDef->isInlined()))
+ Diag(Class->getLocation(), diag::warn_weak_vtable) << Class;
}
}
VTableUses.clear();
@@ -17602,7 +18777,7 @@ bool Sema::DefineUsedVTables() {
void Sema::MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD) {
for (const auto *I : RD->methods())
- if (I->isVirtual() && !I->isPure())
+ if (I->isVirtual() && !I->isPureVirtual())
ResolveExceptionSpec(Loc, I->getType()->castAs<FunctionProtoType>());
}
@@ -17623,7 +18798,8 @@ void Sema::MarkVirtualMembersReferenced(SourceLocation Loc,
// C++ [basic.def.odr]p2:
// [...] A virtual member function is used if it is not pure. [...]
- if (!Overrider->isPure() && (!ConstexprOnly || Overrider->isConstexpr()))
+ if (!Overrider->isPureVirtual() &&
+ (!ConstexprOnly || Overrider->isConstexpr()))
MarkFunctionReferenced(Loc, Overrider);
}
}
@@ -17662,9 +18838,9 @@ void Sema::SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation) {
InitializationKind InitKind =
InitializationKind::CreateDefault(ObjCImplementation->getLocation());
- InitializationSequence InitSeq(*this, InitEntity, InitKind, None);
+ InitializationSequence InitSeq(*this, InitEntity, InitKind, std::nullopt);
ExprResult MemberInit =
- InitSeq.Perform(*this, InitEntity, InitKind, None);
+ InitSeq.Perform(*this, InitEntity, InitKind, std::nullopt);
MemberInit = MaybeCreateExprWithCleanups(MemberInit);
// Note, MemberInit could actually come back empty if no initialization
// is required (e.g., because it would call a trivial default constructor)
@@ -17765,8 +18941,8 @@ void Sema::CheckDelegatingCtorCycles() {
llvm::SmallPtrSet<CXXConstructorDecl*, 4> Valid, Invalid, Current;
for (DelegatingCtorDeclsType::iterator
- I = DelegatingCtorDecls.begin(ExternalSource),
- E = DelegatingCtorDecls.end();
+ I = DelegatingCtorDecls.begin(ExternalSource.get()),
+ E = DelegatingCtorDecls.end();
I != E; ++I)
DelegatingCycleHelper(*I, Valid, Invalid, Current, *this);
@@ -17856,7 +19032,7 @@ bool Sema::checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method) {
case EST_NoexceptTrue:
if (!Finder.TraverseStmt(Proto->getNoexceptExpr()))
return true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case EST_Dynamic:
for (const auto &E : Proto->exceptions()) {
@@ -17882,27 +19058,27 @@ bool Sema::checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method) {
else if (const auto *G = dyn_cast<PtGuardedByAttr>(A))
Arg = G->getArg();
else if (const auto *AA = dyn_cast<AcquiredAfterAttr>(A))
- Args = llvm::makeArrayRef(AA->args_begin(), AA->args_size());
+ Args = llvm::ArrayRef(AA->args_begin(), AA->args_size());
else if (const auto *AB = dyn_cast<AcquiredBeforeAttr>(A))
- Args = llvm::makeArrayRef(AB->args_begin(), AB->args_size());
+ Args = llvm::ArrayRef(AB->args_begin(), AB->args_size());
else if (const auto *ETLF = dyn_cast<ExclusiveTrylockFunctionAttr>(A)) {
Arg = ETLF->getSuccessValue();
- Args = llvm::makeArrayRef(ETLF->args_begin(), ETLF->args_size());
+ Args = llvm::ArrayRef(ETLF->args_begin(), ETLF->args_size());
} else if (const auto *STLF = dyn_cast<SharedTrylockFunctionAttr>(A)) {
Arg = STLF->getSuccessValue();
- Args = llvm::makeArrayRef(STLF->args_begin(), STLF->args_size());
+ Args = llvm::ArrayRef(STLF->args_begin(), STLF->args_size());
} else if (const auto *LR = dyn_cast<LockReturnedAttr>(A))
Arg = LR->getArg();
else if (const auto *LE = dyn_cast<LocksExcludedAttr>(A))
- Args = llvm::makeArrayRef(LE->args_begin(), LE->args_size());
+ Args = llvm::ArrayRef(LE->args_begin(), LE->args_size());
else if (const auto *RC = dyn_cast<RequiresCapabilityAttr>(A))
- Args = llvm::makeArrayRef(RC->args_begin(), RC->args_size());
+ Args = llvm::ArrayRef(RC->args_begin(), RC->args_size());
else if (const auto *AC = dyn_cast<AcquireCapabilityAttr>(A))
- Args = llvm::makeArrayRef(AC->args_begin(), AC->args_size());
+ Args = llvm::ArrayRef(AC->args_begin(), AC->args_size());
else if (const auto *AC = dyn_cast<TryAcquireCapabilityAttr>(A))
- Args = llvm::makeArrayRef(AC->args_begin(), AC->args_size());
+ Args = llvm::ArrayRef(AC->args_begin(), AC->args_size());
else if (const auto *RC = dyn_cast<ReleaseCapabilityAttr>(A))
- Args = llvm::makeArrayRef(RC->args_begin(), RC->args_size());
+ Args = llvm::ArrayRef(RC->args_begin(), RC->args_size());
if (Arg && !Finder.TraverseStmt(Arg))
return true;
@@ -18017,7 +19193,7 @@ MSPropertyDecl *Sema::HandleMSProperty(Scope *S, RecordDecl *Record,
}
SourceLocation Loc = D.getIdentifierLoc();
- TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D);
QualType T = TInfo->getType();
if (getLangOpts().CPlusPlus) {
CheckExtraCXXDefaultArguments(D);
@@ -18112,8 +19288,7 @@ void Sema::ActOnStartFunctionDeclarationDeclarator(
}
if (ExplicitParams) {
Info.AutoTemplateParameterDepth = ExplicitParams->getDepth();
- for (NamedDecl *Param : *ExplicitParams)
- Info.TemplateParams.push_back(Param);
+ llvm::append_range(Info.TemplateParams, *ExplicitParams);
Info.NumExplicitTemplateParams = ExplicitParams->size();
} else {
Info.AutoTemplateParameterDepth = TemplateParameterDepth;
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaDeclObjC.cpp b/contrib/llvm-project/clang/lib/Sema/SemaDeclObjC.cpp
index e0f8c6e92d5a..bb0d0cd2030b 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaDeclObjC.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaDeclObjC.cpp
@@ -296,7 +296,7 @@ static void DiagnoseObjCImplementedDeprecations(Sema &S, const NamedDecl *ND,
RealizedPlatform = S.Context.getTargetInfo().getPlatformName();
// Warn about implementing unavailable methods, unless the unavailable
// is for an app extension.
- if (RealizedPlatform.endswith("_app_extension"))
+ if (RealizedPlatform.ends_with("_app_extension"))
return;
S.Diag(ImplLoc, diag::warn_unavailable_def);
S.Diag(ND->getLocation(), diag::note_method_declared_at)
@@ -611,7 +611,7 @@ ActOnSuperClassOfClassInterface(Scope *S,
}
}
- if (!dyn_cast_or_null<TypedefNameDecl>(PrevDecl)) {
+ if (!isa_and_nonnull<TypedefNameDecl>(PrevDecl)) {
if (!SuperClassDecl)
Diag(SuperLoc, diag::err_undef_superclass)
<< SuperName << ClassName << SourceRange(AtInterfaceLoc, ClassLoc);
@@ -782,7 +782,7 @@ ObjCTypeParamList *Sema::actOnObjCTypeParamList(Scope *S,
// scope until later (after the instance variable block), but we want the
// diagnostics to occur right after we parse the type parameter list.
llvm::SmallDenseMap<IdentifierInfo *, ObjCTypeParamDecl *> knownParams;
- for (auto typeParam : typeParams) {
+ for (auto *typeParam : typeParams) {
auto known = knownParams.find(typeParam->getIdentifier());
if (known != knownParams.end()) {
Diag(typeParam->getLocation(), diag::err_objc_type_param_redecl)
@@ -803,7 +803,7 @@ ObjCTypeParamList *Sema::actOnObjCTypeParamList(Scope *S,
}
void Sema::popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList) {
- for (auto typeParam : *typeParamList) {
+ for (auto *typeParam : *typeParamList) {
if (!typeParam->isInvalidDecl()) {
S->RemoveDecl(typeParam);
IdResolver.RemoveDecl(typeParam);
@@ -971,14 +971,14 @@ static bool checkTypeParamListConsistency(Sema &S,
return false;
}
-Decl *Sema::ActOnStartClassInterface(
+ObjCInterfaceDecl *Sema::ActOnStartClassInterface(
Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName, SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
- const ParsedAttributesView &AttrList) {
+ const ParsedAttributesView &AttrList, SkipBodyInfo *SkipBody) {
assert(ClassName && "Missing class identifier");
// Check for another declaration kind with the same name.
@@ -1029,7 +1029,7 @@ Decl *Sema::ActOnStartClassInterface(
// Clone the type parameter list.
SmallVector<ObjCTypeParamDecl *, 4> clonedTypeParams;
- for (auto typeParam : *prevTypeParamList) {
+ for (auto *typeParam : *prevTypeParamList) {
clonedTypeParams.push_back(
ObjCTypeParamDecl::Create(
Context,
@@ -1057,10 +1057,16 @@ Decl *Sema::ActOnStartClassInterface(
if (PrevIDecl) {
// Class already seen. Was it a definition?
if (ObjCInterfaceDecl *Def = PrevIDecl->getDefinition()) {
- Diag(AtInterfaceLoc, diag::err_duplicate_class_def)
- << PrevIDecl->getDeclName();
- Diag(Def->getLocation(), diag::note_previous_definition);
- IDecl->setInvalidDecl();
+ if (SkipBody && !hasVisibleDefinition(Def)) {
+ SkipBody->CheckSameAsPrevious = true;
+ SkipBody->New = IDecl;
+ SkipBody->Previous = Def;
+ } else {
+ Diag(AtInterfaceLoc, diag::err_duplicate_class_def)
+ << PrevIDecl->getDeclName();
+ Diag(Def->getLocation(), diag::note_previous_definition);
+ IDecl->setInvalidDecl();
+ }
}
}
@@ -1075,7 +1081,9 @@ Decl *Sema::ActOnStartClassInterface(
// Start the definition of this class. If we're in a redefinition case, there
// may already be a definition, so we'll end up adding to it.
- if (!IDecl->hasDefinition())
+ if (SkipBody && SkipBody->CheckSameAsPrevious)
+ IDecl->startDuplicateDefinitionForComparison();
+ else if (!IDecl->hasDefinition())
IDecl->startDefinition();
if (SuperName) {
@@ -1100,7 +1108,8 @@ Decl *Sema::ActOnStartClassInterface(
}
CheckObjCDeclScope(IDecl);
- return ActOnObjCContainerStartDefinition(IDecl);
+ ActOnObjCContainerStartDefinition(IDecl);
+ return IDecl;
}
/// ActOnTypedefedProtocols - this action finds protocol list as part of the
@@ -1208,11 +1217,11 @@ bool Sema::CheckForwardProtocolDeclarationForCircularDependency(
return res;
}
-Decl *Sema::ActOnStartProtocolInterface(
+ObjCProtocolDecl *Sema::ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName,
SourceLocation ProtocolLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
- const ParsedAttributesView &AttrList) {
+ const ParsedAttributesView &AttrList, SkipBodyInfo *SkipBody) {
bool err = false;
// FIXME: Deal with AttrList.
assert(ProtocolName && "Missing protocol identifier");
@@ -1220,23 +1229,29 @@ Decl *Sema::ActOnStartProtocolInterface(
forRedeclarationInCurContext());
ObjCProtocolDecl *PDecl = nullptr;
if (ObjCProtocolDecl *Def = PrevDecl? PrevDecl->getDefinition() : nullptr) {
- // If we already have a definition, complain.
- Diag(ProtocolLoc, diag::warn_duplicate_protocol_def) << ProtocolName;
- Diag(Def->getLocation(), diag::note_previous_definition);
-
// Create a new protocol that is completely distinct from previous
// declarations, and do not make this protocol available for name lookup.
// That way, we'll end up completely ignoring the duplicate.
// FIXME: Can we turn this into an error?
PDecl = ObjCProtocolDecl::Create(Context, CurContext, ProtocolName,
ProtocolLoc, AtProtoInterfaceLoc,
- /*PrevDecl=*/nullptr);
+ /*PrevDecl=*/Def);
+
+ if (SkipBody && !hasVisibleDefinition(Def)) {
+ SkipBody->CheckSameAsPrevious = true;
+ SkipBody->New = PDecl;
+ SkipBody->Previous = Def;
+ } else {
+ // If we already have a definition, complain.
+ Diag(ProtocolLoc, diag::warn_duplicate_protocol_def) << ProtocolName;
+ Diag(Def->getLocation(), diag::note_previous_definition);
+ }
// If we are using modules, add the decl to the context in order to
// serialize something meaningful.
if (getLangOpts().Modules)
PushOnScopeChains(PDecl, TUScope);
- PDecl->startDefinition();
+ PDecl->startDuplicateDefinitionForComparison();
} else {
if (PrevDecl) {
// Check for circular dependencies among protocol declarations. This can
@@ -1272,7 +1287,8 @@ Decl *Sema::ActOnStartProtocolInterface(
}
CheckObjCDeclScope(PDecl);
- return ActOnObjCContainerStartDefinition(PDecl);
+ ActOnObjCContainerStartDefinition(PDecl);
+ return PDecl;
}
static bool NestedProtocolHasNoDefinition(ObjCProtocolDecl *PDecl,
@@ -1500,7 +1516,7 @@ void Sema::actOnObjCTypeArgsOrProtocolQualifiers(
llvm::SmallPtrSet<ObjCProtocolDecl*, 8> knownProtocols;
Context.CollectInheritedProtocols(baseClass, knownProtocols);
bool allProtocolsDeclared = true;
- for (auto proto : protocols) {
+ for (auto *proto : protocols) {
if (knownProtocols.count(static_cast<ObjCProtocolDecl *>(proto)) == 0) {
allProtocolsDeclared = false;
break;
@@ -1586,7 +1602,7 @@ void Sema::actOnObjCTypeArgsOrProtocolQualifiers(
DS.SetRangeEnd(loc);
// Form the declarator.
- Declarator D(DS, DeclaratorContext::TypeName);
+ Declarator D(DS, ParsedAttributesView::none(), DeclaratorContext::TypeName);
// If we have a typedef of an Objective-C class type that is missing a '*',
// add the '*'.
@@ -1607,7 +1623,7 @@ void Sema::actOnObjCTypeArgsOrProtocolQualifiers(
}
// Convert this to a type.
- return ActOnTypeName(S, D);
+ return ActOnTypeName(D);
};
// Local function that updates the declaration specifiers with
@@ -1799,7 +1815,7 @@ Sema::ActOnForwardProtocolDeclaration(SourceLocation AtProtocolLoc,
return BuildDeclaratorGroup(DeclsInGroup);
}
-Decl *Sema::ActOnStartCategoryInterface(
+ObjCCategoryDecl *Sema::ActOnStartCategoryInterface(
SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName, SourceLocation CategoryLoc,
@@ -1826,7 +1842,8 @@ Decl *Sema::ActOnStartCategoryInterface(
if (!IDecl)
Diag(ClassLoc, diag::err_undef_interface) << ClassName;
- return ActOnObjCContainerStartDefinition(CDecl);
+ ActOnObjCContainerStartDefinition(CDecl);
+ return CDecl;
}
if (!CategoryName && IDecl->getImplementation()) {
@@ -1889,17 +1906,17 @@ Decl *Sema::ActOnStartCategoryInterface(
}
CheckObjCDeclScope(CDecl);
- return ActOnObjCContainerStartDefinition(CDecl);
+ ActOnObjCContainerStartDefinition(CDecl);
+ return CDecl;
}
/// ActOnStartCategoryImplementation - Perform semantic checks on the
/// category implementation declaration and build an ObjCCategoryImplDecl
/// object.
-Decl *Sema::ActOnStartCategoryImplementation(
- SourceLocation AtCatImplLoc,
- IdentifierInfo *ClassName, SourceLocation ClassLoc,
- IdentifierInfo *CatName, SourceLocation CatLoc,
- const ParsedAttributesView &Attrs) {
+ObjCCategoryImplDecl *Sema::ActOnStartCategoryImplementation(
+ SourceLocation AtCatImplLoc, IdentifierInfo *ClassName,
+ SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc,
+ const ParsedAttributesView &Attrs) {
ObjCInterfaceDecl *IDecl = getObjCInterfaceDecl(ClassName, ClassLoc, true);
ObjCCategoryDecl *CatIDecl = nullptr;
if (IDecl && IDecl->hasDefinition()) {
@@ -1958,15 +1975,14 @@ Decl *Sema::ActOnStartCategoryImplementation(
}
CheckObjCDeclScope(CDecl);
- return ActOnObjCContainerStartDefinition(CDecl);
+ ActOnObjCContainerStartDefinition(CDecl);
+ return CDecl;
}
-Decl *Sema::ActOnStartClassImplementation(
- SourceLocation AtClassImplLoc,
- IdentifierInfo *ClassName, SourceLocation ClassLoc,
- IdentifierInfo *SuperClassname,
- SourceLocation SuperClassLoc,
- const ParsedAttributesView &Attrs) {
+ObjCImplementationDecl *Sema::ActOnStartClassImplementation(
+ SourceLocation AtClassImplLoc, IdentifierInfo *ClassName,
+ SourceLocation ClassLoc, IdentifierInfo *SuperClassname,
+ SourceLocation SuperClassLoc, const ParsedAttributesView &Attrs) {
ObjCInterfaceDecl *IDecl = nullptr;
// Check for another declaration kind with the same name.
NamedDecl *PrevDecl
@@ -2063,8 +2079,10 @@ Decl *Sema::ActOnStartClassImplementation(
ProcessDeclAttributeList(TUScope, IMPDecl, Attrs);
AddPragmaAttributes(TUScope, IMPDecl);
- if (CheckObjCDeclScope(IMPDecl))
- return ActOnObjCContainerStartDefinition(IMPDecl);
+ if (CheckObjCDeclScope(IMPDecl)) {
+ ActOnObjCContainerStartDefinition(IMPDecl);
+ return IMPDecl;
+ }
// Check that there is no duplicate implementation of this class.
if (IDecl->getImplementation()) {
@@ -2090,7 +2108,8 @@ Decl *Sema::ActOnStartClassImplementation(
<< IDecl->getSuperClass()->getDeclName();
}
- return ActOnObjCContainerStartDefinition(IMPDecl);
+ ActOnObjCContainerStartDefinition(IMPDecl);
+ return IMPDecl;
}
Sema::DeclGroupPtrTy
@@ -2212,9 +2231,8 @@ void Sema::CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
Diag(IVI->getLocation(), diag::err_inconsistent_ivar_count);
}
-static void WarnUndefinedMethod(Sema &S, SourceLocation ImpLoc,
- ObjCMethodDecl *method,
- bool &IncompleteImpl,
+static void WarnUndefinedMethod(Sema &S, ObjCImplDecl *Impl,
+ ObjCMethodDecl *method, bool &IncompleteImpl,
unsigned DiagID,
NamedDecl *NeededFor = nullptr) {
// No point warning no definition of method which is 'unavailable'.
@@ -2227,10 +2245,19 @@ static void WarnUndefinedMethod(Sema &S, SourceLocation ImpLoc,
// separate warnings. We will give that approach a try, as that
// matches what we do with protocols.
{
- const Sema::SemaDiagnosticBuilder &B = S.Diag(ImpLoc, DiagID);
+ const Sema::SemaDiagnosticBuilder &B = S.Diag(Impl->getLocation(), DiagID);
B << method;
if (NeededFor)
B << NeededFor;
+
+ // Add an empty definition at the end of the @implementation.
+ std::string FixItStr;
+ llvm::raw_string_ostream Out(FixItStr);
+ method->print(Out, Impl->getASTContext().getPrintingPolicy());
+ Out << " {\n}\n\n";
+
+ SourceLocation Loc = Impl->getAtEndRange().getBegin();
+ B << FixItHint::CreateInsertion(Loc, FixItStr);
}
// Issue a note to the original declaration.
@@ -2342,21 +2369,17 @@ static bool CheckMethodOverrideReturn(Sema &S,
!S.Context.hasSameNullabilityTypeQualifier(MethodImpl->getReturnType(),
MethodDecl->getReturnType(),
false)) {
- auto nullabilityMethodImpl =
- *MethodImpl->getReturnType()->getNullability(S.Context);
- auto nullabilityMethodDecl =
- *MethodDecl->getReturnType()->getNullability(S.Context);
- S.Diag(MethodImpl->getLocation(),
- diag::warn_conflicting_nullability_attr_overriding_ret_types)
- << DiagNullabilityKind(
- nullabilityMethodImpl,
- ((MethodImpl->getObjCDeclQualifier() & Decl::OBJC_TQ_CSNullability)
- != 0))
- << DiagNullabilityKind(
- nullabilityMethodDecl,
- ((MethodDecl->getObjCDeclQualifier() & Decl::OBJC_TQ_CSNullability)
- != 0));
- S.Diag(MethodDecl->getLocation(), diag::note_previous_declaration);
+ auto nullabilityMethodImpl = *MethodImpl->getReturnType()->getNullability();
+ auto nullabilityMethodDecl = *MethodDecl->getReturnType()->getNullability();
+ S.Diag(MethodImpl->getLocation(),
+ diag::warn_conflicting_nullability_attr_overriding_ret_types)
+ << DiagNullabilityKind(nullabilityMethodImpl,
+ ((MethodImpl->getObjCDeclQualifier() &
+ Decl::OBJC_TQ_CSNullability) != 0))
+ << DiagNullabilityKind(nullabilityMethodDecl,
+ ((MethodDecl->getObjCDeclQualifier() &
+ Decl::OBJC_TQ_CSNullability) != 0));
+ S.Diag(MethodDecl->getLocation(), diag::note_previous_declaration);
}
if (S.Context.hasSameUnqualifiedType(MethodImpl->getReturnType(),
@@ -2434,14 +2457,12 @@ static bool CheckMethodOverrideParam(Sema &S,
!S.Context.hasSameNullabilityTypeQualifier(ImplTy, IfaceTy, true)) {
S.Diag(ImplVar->getLocation(),
diag::warn_conflicting_nullability_attr_overriding_param_types)
- << DiagNullabilityKind(
- *ImplTy->getNullability(S.Context),
- ((ImplVar->getObjCDeclQualifier() & Decl::OBJC_TQ_CSNullability)
- != 0))
- << DiagNullabilityKind(
- *IfaceTy->getNullability(S.Context),
- ((IfaceVar->getObjCDeclQualifier() & Decl::OBJC_TQ_CSNullability)
- != 0));
+ << DiagNullabilityKind(*ImplTy->getNullability(),
+ ((ImplVar->getObjCDeclQualifier() &
+ Decl::OBJC_TQ_CSNullability) != 0))
+ << DiagNullabilityKind(*IfaceTy->getNullability(),
+ ((IfaceVar->getObjCDeclQualifier() &
+ Decl::OBJC_TQ_CSNullability) != 0));
S.Diag(IfaceVar->getLocation(), diag::note_previous_declaration);
}
if (S.Context.hasSameUnqualifiedType(ImplTy, IfaceTy))
@@ -2611,10 +2632,11 @@ void Sema::WarnExactTypedMethods(ObjCMethodDecl *ImpMethodDecl,
// don't issue warning when protocol method is optional because primary
// class is not required to implement it and it is safe for protocol
// to implement it.
- if (MethodDecl->getImplementationControl() == ObjCMethodDecl::Optional)
+ if (MethodDecl->getImplementationControl() ==
+ ObjCImplementationControl::Optional)
return;
// don't issue warning when primary class's method is
- // depecated/unavailable.
+ // deprecated/unavailable.
if (MethodDecl->hasAttr<UnavailableAttr>() ||
MethodDecl->hasAttr<DeprecatedAttr>())
return;
@@ -2679,14 +2701,10 @@ static void findProtocolsWithExplicitImpls(const ObjCInterfaceDecl *Super,
/// CheckProtocolMethodDefs - This routine checks unimplemented methods
/// Declared in protocol, and those referenced by it.
-static void CheckProtocolMethodDefs(Sema &S,
- SourceLocation ImpLoc,
- ObjCProtocolDecl *PDecl,
- bool& IncompleteImpl,
- const Sema::SelectorSet &InsMap,
- const Sema::SelectorSet &ClsMap,
- ObjCContainerDecl *CDecl,
- LazyProtocolNameSet &ProtocolsExplictImpl) {
+static void CheckProtocolMethodDefs(
+ Sema &S, ObjCImplDecl *Impl, ObjCProtocolDecl *PDecl, bool &IncompleteImpl,
+ const Sema::SelectorSet &InsMap, const Sema::SelectorSet &ClsMap,
+ ObjCContainerDecl *CDecl, LazyProtocolNameSet &ProtocolsExplictImpl) {
ObjCCategoryDecl *C = dyn_cast<ObjCCategoryDecl>(CDecl);
ObjCInterfaceDecl *IDecl = C ? C->getClassInterface()
: dyn_cast<ObjCInterfaceDecl>(CDecl);
@@ -2711,8 +2729,7 @@ static void CheckProtocolMethodDefs(Sema &S,
ProtocolsExplictImpl.reset(new ProtocolNameSet);
findProtocolsWithExplicitImpls(Super, *ProtocolsExplictImpl);
}
- if (ProtocolsExplictImpl->find(PDecl->getIdentifier()) !=
- ProtocolsExplictImpl->end())
+ if (ProtocolsExplictImpl->contains(PDecl->getIdentifier()))
return;
// If no super class conforms to the protocol, we should not search
@@ -2749,46 +2766,43 @@ static void CheckProtocolMethodDefs(Sema &S,
// check unimplemented instance methods.
if (!NSIDecl)
for (auto *method : PDecl->instance_methods()) {
- if (method->getImplementationControl() != ObjCMethodDecl::Optional &&
+ if (method->getImplementationControl() !=
+ ObjCImplementationControl::Optional &&
!method->isPropertyAccessor() &&
!InsMap.count(method->getSelector()) &&
- (!Super || !Super->lookupMethod(method->getSelector(),
- true /* instance */,
- false /* shallowCategory */,
- true /* followsSuper */,
- nullptr /* category */))) {
- // If a method is not implemented in the category implementation but
- // has been declared in its primary class, superclass,
- // or in one of their protocols, no need to issue the warning.
- // This is because method will be implemented in the primary class
- // or one of its super class implementation.
-
- // Ugly, but necessary. Method declared in protocol might have
- // have been synthesized due to a property declared in the class which
- // uses the protocol.
- if (ObjCMethodDecl *MethodInClass =
- IDecl->lookupMethod(method->getSelector(),
- true /* instance */,
- true /* shallowCategoryLookup */,
- false /* followSuper */))
- if (C || MethodInClass->isPropertyAccessor())
- continue;
- unsigned DIAG = diag::warn_unimplemented_protocol_method;
- if (!S.Diags.isIgnored(DIAG, ImpLoc)) {
- WarnUndefinedMethod(S, ImpLoc, method, IncompleteImpl, DIAG,
- PDecl);
- }
- }
+ (!Super || !Super->lookupMethod(
+ method->getSelector(), true /* instance */,
+ false /* shallowCategory */, true /* followsSuper */,
+ nullptr /* category */))) {
+ // If a method is not implemented in the category implementation but
+ // has been declared in its primary class, superclass,
+ // or in one of their protocols, no need to issue the warning.
+ // This is because method will be implemented in the primary class
+ // or one of its super class implementation.
+
+ // Ugly, but necessary. Method declared in protocol might have
+ // have been synthesized due to a property declared in the class which
+ // uses the protocol.
+ if (ObjCMethodDecl *MethodInClass = IDecl->lookupMethod(
+ method->getSelector(), true /* instance */,
+ true /* shallowCategoryLookup */, false /* followSuper */))
+ if (C || MethodInClass->isPropertyAccessor())
+ continue;
+ unsigned DIAG = diag::warn_unimplemented_protocol_method;
+ if (!S.Diags.isIgnored(DIAG, Impl->getLocation())) {
+ WarnUndefinedMethod(S, Impl, method, IncompleteImpl, DIAG, PDecl);
+ }
+ }
}
// check unimplemented class methods
for (auto *method : PDecl->class_methods()) {
- if (method->getImplementationControl() != ObjCMethodDecl::Optional &&
+ if (method->getImplementationControl() !=
+ ObjCImplementationControl::Optional &&
!ClsMap.count(method->getSelector()) &&
- (!Super || !Super->lookupMethod(method->getSelector(),
- false /* class method */,
- false /* shallowCategoryLookup */,
- true /* followSuper */,
- nullptr /* category */))) {
+ (!Super || !Super->lookupMethod(
+ method->getSelector(), false /* class method */,
+ false /* shallowCategoryLookup */,
+ true /* followSuper */, nullptr /* category */))) {
// See above comment for instance method lookups.
if (C && IDecl->lookupMethod(method->getSelector(),
false /* class */,
@@ -2797,15 +2811,15 @@ static void CheckProtocolMethodDefs(Sema &S,
continue;
unsigned DIAG = diag::warn_unimplemented_protocol_method;
- if (!S.Diags.isIgnored(DIAG, ImpLoc)) {
- WarnUndefinedMethod(S, ImpLoc, method, IncompleteImpl, DIAG, PDecl);
+ if (!S.Diags.isIgnored(DIAG, Impl->getLocation())) {
+ WarnUndefinedMethod(S, Impl, method, IncompleteImpl, DIAG, PDecl);
}
}
}
// Check on this protocols's referenced protocols, recursively.
for (auto *PI : PDecl->protocols())
- CheckProtocolMethodDefs(S, ImpLoc, PI, IncompleteImpl, InsMap, ClsMap,
- CDecl, ProtocolsExplictImpl);
+ CheckProtocolMethodDefs(S, Impl, PI, IncompleteImpl, InsMap, ClsMap, CDecl,
+ ProtocolsExplictImpl);
}
/// MatchAllMethodDeclarations - Check methods declared in interface
@@ -2828,7 +2842,7 @@ void Sema::MatchAllMethodDeclarations(const SelectorSet &InsMap,
if (!I->isPropertyAccessor() &&
!InsMap.count(I->getSelector())) {
if (ImmediateClass)
- WarnUndefinedMethod(*this, IMPDecl->getLocation(), I, IncompleteImpl,
+ WarnUndefinedMethod(*this, IMPDecl, I, IncompleteImpl,
diag::warn_undef_method_impl);
continue;
} else {
@@ -2858,7 +2872,7 @@ void Sema::MatchAllMethodDeclarations(const SelectorSet &InsMap,
if (!I->isPropertyAccessor() &&
!ClsMap.count(I->getSelector())) {
if (ImmediateClass)
- WarnUndefinedMethod(*this, IMPDecl->getLocation(), I, IncompleteImpl,
+ WarnUndefinedMethod(*this, IMPDecl, I, IncompleteImpl,
diag::warn_undef_method_impl);
} else {
ObjCMethodDecl *ImpMethodDecl =
@@ -3025,16 +3039,15 @@ void Sema::ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
if (ObjCInterfaceDecl *I = dyn_cast<ObjCInterfaceDecl> (CDecl)) {
for (auto *PI : I->all_referenced_protocols())
- CheckProtocolMethodDefs(*this, IMPDecl->getLocation(), PI, IncompleteImpl,
- InsMap, ClsMap, I, ExplicitImplProtocols);
+ CheckProtocolMethodDefs(*this, IMPDecl, PI, IncompleteImpl, InsMap,
+ ClsMap, I, ExplicitImplProtocols);
} else if (ObjCCategoryDecl *C = dyn_cast<ObjCCategoryDecl>(CDecl)) {
// For extended class, unimplemented methods in its protocols will
// be reported in the primary class.
if (!C->IsClassExtension()) {
for (auto *P : C->protocols())
- CheckProtocolMethodDefs(*this, IMPDecl->getLocation(), P,
- IncompleteImpl, InsMap, ClsMap, CDecl,
- ExplicitImplProtocols);
+ CheckProtocolMethodDefs(*this, IMPDecl, P, IncompleteImpl, InsMap,
+ ClsMap, CDecl, ExplicitImplProtocols);
DiagnoseUnimplementedProperties(S, IMPDecl, CDecl,
/*SynthesizeProperties=*/false);
}
@@ -3427,8 +3440,10 @@ void Sema::AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl,
GlobalMethodPool::iterator Pos = MethodPool.find(Method->getSelector());
if (Pos == MethodPool.end())
- Pos = MethodPool.insert(std::make_pair(Method->getSelector(),
- GlobalMethods())).first;
+ Pos = MethodPool
+ .insert(std::make_pair(Method->getSelector(),
+ GlobalMethodPool::Lists()))
+ .first;
Method->setDefined(impl);
@@ -3636,7 +3651,7 @@ ObjCMethodDecl *Sema::LookupImplementedMethodInGlobalPool(Selector Sel) {
if (Pos == MethodPool.end())
return nullptr;
- GlobalMethods &Methods = Pos->second;
+ GlobalMethodPool::Lists &Methods = Pos->second;
for (const ObjCMethodList *Method = &Methods.first; Method;
Method = Method->getNext())
if (Method->getMethod() &&
@@ -3746,7 +3761,7 @@ Sema::SelectorsForTypoCorrection(Selector Sel,
/// DiagnoseDuplicateIvars -
/// Check for duplicate ivars in the entire class at the start of
-/// \@implementation. This becomes necesssary because class extension can
+/// \@implementation. This becomes necessary because class extension can
/// add ivars to a class in random order which will not be known until
/// class's \@implementation is seen.
void Sema::DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID,
@@ -3847,7 +3862,7 @@ static void DiagnoseVariableSizedIvars(Sema &S, ObjCContainerDecl *OCD) {
// Check if variable sized ivar is in interface and visible to subclasses.
if (!isa<ObjCInterfaceDecl>(OCD)) {
- for (auto ivar : Ivars) {
+ for (auto *ivar : Ivars) {
if (!ivar->isInvalidDecl() && IsVariableSizedType(ivar->getType())) {
S.Diag(ivar->getLocation(), diag::warn_variable_sized_ivar_visibility)
<< ivar->getDeclName() << ivar->getType();
@@ -3869,7 +3884,7 @@ static void DiagnoseVariableSizedIvars(Sema &S, ObjCContainerDecl *OCD) {
if (IvarTy->isIncompleteArrayType()) {
S.Diag(ivar->getLocation(), diag::err_flexible_array_not_at_end)
<< ivar->getDeclName() << IvarTy
- << TTK_Class; // Use "class" for Obj-C.
+ << llvm::to_underlying(TagTypeKind::Class); // Use "class" for Obj-C.
IsInvalidIvar = true;
} else if (const RecordType *RecordTy = IvarTy->getAs<RecordType>()) {
if (RecordTy->getDecl()->hasFlexibleArrayMember()) {
@@ -3982,7 +3997,7 @@ Decl *Sema::ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods,
// they are overridden by an explicit method that is encountered
// later.
if (auto *OID = dyn_cast<ObjCImplementationDecl>(CurContext)) {
- for (auto PropImpl : OID->property_impls()) {
+ for (auto *PropImpl : OID->property_impls()) {
if (auto *Getter = PropImpl->getGetterMethodDecl())
if (Getter->isSynthesizedAccessorStub())
OID->addDecl(Getter);
@@ -4424,6 +4439,11 @@ void Sema::CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ResultTypeCompatibilityKind RTC) {
if (!ObjCMethod)
return;
+ auto IsMethodInCurrentClass = [CurrentClass](const ObjCMethodDecl *M) {
+ // Checking canonical decl works across modules.
+ return M->getClassInterface()->getCanonicalDecl() ==
+ CurrentClass->getCanonicalDecl();
+ };
// Search for overridden methods and merge information down from them.
OverrideSearch overrides(*this, ObjCMethod);
// Keep track if the method overrides any method in the class's base classes,
@@ -4435,8 +4455,7 @@ void Sema::CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
for (ObjCMethodDecl *overridden : overrides) {
if (!hasOverriddenMethodsInBaseOrProtocol) {
if (isa<ObjCProtocolDecl>(overridden->getDeclContext()) ||
- CurrentClass != overridden->getClassInterface() ||
- overridden->isOverriding()) {
+ !IsMethodInCurrentClass(overridden) || overridden->isOverriding()) {
CheckObjCMethodDirectOverrides(ObjCMethod, overridden);
hasOverriddenMethodsInBaseOrProtocol = true;
} else if (isa<ObjCImplDecl>(ObjCMethod->getDeclContext())) {
@@ -4461,7 +4480,7 @@ void Sema::CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
OverrideSearch overrides(*this, overridden);
for (ObjCMethodDecl *SuperOverridden : overrides) {
if (isa<ObjCProtocolDecl>(SuperOverridden->getDeclContext()) ||
- CurrentClass != SuperOverridden->getClassInterface()) {
+ !IsMethodInCurrentClass(SuperOverridden)) {
CheckObjCMethodDirectOverrides(ObjCMethod, SuperOverridden);
hasOverriddenMethodsInBaseOrProtocol = true;
overridden->setOverriding(true);
@@ -4525,11 +4544,11 @@ static QualType mergeTypeNullabilityForRedecl(Sema &S, SourceLocation loc,
QualType prevType,
bool prevUsesCSKeyword) {
// Determine the nullability of both types.
- auto nullability = type->getNullability(S.Context);
- auto prevNullability = prevType->getNullability(S.Context);
+ auto nullability = type->getNullability();
+ auto prevNullability = prevType->getNullability();
// Easy case: both have nullability.
- if (nullability.hasValue() == prevNullability.hasValue()) {
+ if (nullability.has_value() == prevNullability.has_value()) {
// Neither has nullability; continue.
if (!nullability)
return type;
@@ -4739,8 +4758,9 @@ Decl *Sema::ActOnMethodDeclaration(
MethodType == tok::minus, isVariadic,
/*isPropertyAccessor=*/false, /*isSynthesizedAccessorStub=*/false,
/*isImplicitlyDeclared=*/false, /*isDefined=*/false,
- MethodDeclKind == tok::objc_optional ? ObjCMethodDecl::Optional
- : ObjCMethodDecl::Required,
+ MethodDeclKind == tok::objc_optional
+ ? ObjCImplementationControl::Optional
+ : ObjCImplementationControl::Required,
HasRelatedResultType);
SmallVector<ParmVarDecl*, 16> Params;
@@ -4832,7 +4852,7 @@ Decl *Sema::ActOnMethodDeclaration(
// If this method overrides a previous @synthesize declaration,
// register it with the property. Linear search through all
// properties here, because the autosynthesized stub hasn't been
- // made visible yet, so it can be overriden by a later
+ // made visible yet, so it can be overridden by a later
// user-specified implementation.
for (ObjCPropertyImplDecl *PropertyImpl : ImpDecl->property_impls()) {
if (auto *Setter = PropertyImpl->getSetterMethodDecl())
@@ -5191,7 +5211,7 @@ Decl *Sema::ActOnObjCExceptionDecl(Scope *S, Declarator &D) {
if (getLangOpts().CPlusPlus)
CheckExtraCXXDefaultArguments(D);
- TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D);
QualType ExceptionType = TInfo->getType();
VarDecl *New = BuildObjCExceptionDecl(TInfo, ExceptionType,
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaExceptionSpec.cpp b/contrib/llvm-project/clang/lib/Sema/SemaExceptionSpec.cpp
index 8816c9c1fea0..75730ea888af 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaExceptionSpec.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaExceptionSpec.cpp
@@ -21,6 +21,7 @@
#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallString.h"
+#include <optional>
namespace clang {
@@ -78,14 +79,21 @@ bool Sema::isLibstdcxxEagerExceptionSpecHack(const Declarator &D) {
.Default(false);
}
-ExprResult Sema::ActOnNoexceptSpec(SourceLocation NoexceptLoc,
- Expr *NoexceptExpr,
+ExprResult Sema::ActOnNoexceptSpec(Expr *NoexceptExpr,
ExceptionSpecificationType &EST) {
- // FIXME: This is bogus, a noexcept expression is not a condition.
- ExprResult Converted = CheckBooleanCondition(NoexceptLoc, NoexceptExpr);
+
+ if (NoexceptExpr->isTypeDependent() ||
+ NoexceptExpr->containsUnexpandedParameterPack()) {
+ EST = EST_DependentNoexcept;
+ return NoexceptExpr;
+ }
+
+ llvm::APSInt Result;
+ ExprResult Converted = CheckConvertedConstantExpression(
+ NoexceptExpr, Context.BoolTy, Result, CCEK_Noexcept);
+
if (Converted.isInvalid()) {
EST = EST_NoexceptFalse;
-
// Fill in an expression of 'false' as a fixup.
auto *BoolExpr = new (Context)
CXXBoolLiteralExpr(false, Context.BoolTy, NoexceptExpr->getBeginLoc());
@@ -99,9 +107,6 @@ ExprResult Sema::ActOnNoexceptSpec(SourceLocation NoexceptLoc,
return Converted;
}
- llvm::APSInt Result;
- Converted = VerifyIntegerConstantExpression(
- Converted.get(), &Result, diag::err_noexcept_needs_constant_expression);
if (!Converted.isInvalid())
EST = !Result ? EST_NoexceptFalse : EST_NoexceptTrue;
return Converted;
@@ -167,6 +172,12 @@ bool Sema::CheckSpecifiedExceptionType(QualType &T, SourceRange Range) {
RequireCompleteType(Range.getBegin(), PointeeT, DiagID, Kind, Range))
return ReturnValueOnError;
+ // WebAssembly reference types can't be used in exception specifications.
+ if (PointeeT.isWebAssemblyReferenceType()) {
+ Diag(Range.getBegin(), diag::err_wasm_reftype_exception_spec);
+ return true;
+ }
+
// The MSVC compatibility mode doesn't extend to sizeless types,
// so diagnose them separately.
if (PointeeT->isSizelessType() && Kind != 1) {
@@ -338,8 +349,7 @@ bool Sema::CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New) {
if (!MissingExceptionSpecification)
return ReturnValueOnError;
- const FunctionProtoType *NewProto =
- New->getType()->castAs<FunctionProtoType>();
+ const auto *NewProto = New->getType()->castAs<FunctionProtoType>();
// The new function declaration is only missing an empty exception
// specification "throw()". If the throw() specification came from a
@@ -349,7 +359,7 @@ bool Sema::CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New) {
// specifications.
//
// Likewise if the old function is a builtin.
- if (MissingEmptyExceptionSpecification && NewProto &&
+ if (MissingEmptyExceptionSpecification &&
(Old->getLocation().isInvalid() ||
Context.getSourceManager().isInSystemHeader(Old->getLocation()) ||
Old->getBuiltinID()) &&
@@ -360,8 +370,7 @@ bool Sema::CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New) {
return false;
}
- const FunctionProtoType *OldProto =
- Old->getType()->castAs<FunctionProtoType>();
+ const auto *OldProto = Old->getType()->castAs<FunctionProtoType>();
FunctionProtoType::ExceptionSpecInfo ESI = OldProto->getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
@@ -387,9 +396,8 @@ bool Sema::CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New) {
NewProto->getExtProtoInfo().withExceptionSpec(ESI)));
}
- if (getLangOpts().MSVCCompat && ESI.Type != EST_DependentNoexcept) {
- // Allow missing exception specifications in redeclarations as an extension.
- DiagID = diag::ext_ms_missing_exception_specification;
+ if (getLangOpts().MSVCCompat && isDynamicExceptionSpec(ESI.Type)) {
+ DiagID = diag::ext_missing_exception_specification;
ReturnValueOnError = false;
} else if (New->isReplaceableGlobalAllocationFunction() &&
ESI.Type != EST_DependentNoexcept) {
@@ -398,6 +406,10 @@ bool Sema::CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New) {
DiagID = diag::ext_missing_exception_specification;
ReturnValueOnError = false;
} else if (ESI.Type == EST_NoThrow) {
+ // Don't emit any warning for missing 'nothrow' in MSVC.
+ if (getLangOpts().MSVCCompat) {
+ return false;
+ }
// Allow missing attribute 'nothrow' in redeclarations, since this is a very
// common omission.
DiagID = diag::ext_missing_exception_specification;
@@ -757,14 +769,12 @@ bool Sema::handlerCanCatch(QualType HandlerType, QualType ExceptionType) {
/// CheckExceptionSpecSubset - Check whether the second function type's
/// exception specification is a subset (or equivalent) of the first function
/// type. This is used by override and pointer assignment checks.
-bool Sema::CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
- const PartialDiagnostic &NestedDiagID,
- const PartialDiagnostic &NoteID,
- const PartialDiagnostic &NoThrowDiagID,
- const FunctionProtoType *Superset,
- SourceLocation SuperLoc,
- const FunctionProtoType *Subset,
- SourceLocation SubLoc) {
+bool Sema::CheckExceptionSpecSubset(
+ const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID,
+ const PartialDiagnostic &NoteID, const PartialDiagnostic &NoThrowDiagID,
+ const FunctionProtoType *Superset, bool SkipSupersetFirstParameter,
+ SourceLocation SuperLoc, const FunctionProtoType *Subset,
+ bool SkipSubsetFirstParameter, SourceLocation SubLoc) {
// Just auto-succeed under -fno-exceptions.
if (!getLangOpts().CXXExceptions)
@@ -804,8 +814,9 @@ bool Sema::CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
// done.
if ((SuperCanThrow == CT_Can && SuperEST != EST_Dynamic) ||
SubCanThrow == CT_Cannot)
- return CheckParamExceptionSpec(NestedDiagID, NoteID, Superset, SuperLoc,
- Subset, SubLoc);
+ return CheckParamExceptionSpec(NestedDiagID, NoteID, Superset,
+ SkipSupersetFirstParameter, SuperLoc, Subset,
+ SkipSubsetFirstParameter, SubLoc);
// Allow __declspec(nothrow) to be missing on redeclaration as an extension in
// some cases.
@@ -857,8 +868,9 @@ bool Sema::CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
}
}
// We've run half the gauntlet.
- return CheckParamExceptionSpec(NestedDiagID, NoteID, Superset, SuperLoc,
- Subset, SubLoc);
+ return CheckParamExceptionSpec(NestedDiagID, NoteID, Superset,
+ SkipSupersetFirstParameter, SuperLoc, Subset,
+ SkipSupersetFirstParameter, SubLoc);
}
static bool
@@ -882,12 +894,11 @@ CheckSpecForTypesEquivalent(Sema &S, const PartialDiagnostic &DiagID,
/// assignment and override compatibility check. We do not check the parameters
/// of parameter function pointers recursively, as no sane programmer would
/// even be able to write such a function type.
-bool Sema::CheckParamExceptionSpec(const PartialDiagnostic &DiagID,
- const PartialDiagnostic &NoteID,
- const FunctionProtoType *Target,
- SourceLocation TargetLoc,
- const FunctionProtoType *Source,
- SourceLocation SourceLoc) {
+bool Sema::CheckParamExceptionSpec(
+ const PartialDiagnostic &DiagID, const PartialDiagnostic &NoteID,
+ const FunctionProtoType *Target, bool SkipTargetFirstParameter,
+ SourceLocation TargetLoc, const FunctionProtoType *Source,
+ bool SkipSourceFirstParameter, SourceLocation SourceLoc) {
auto RetDiag = DiagID;
RetDiag << 0;
if (CheckSpecForTypesEquivalent(
@@ -898,14 +909,16 @@ bool Sema::CheckParamExceptionSpec(const PartialDiagnostic &DiagID,
// We shouldn't even be testing this unless the arguments are otherwise
// compatible.
- assert(Target->getNumParams() == Source->getNumParams() &&
+ assert((Target->getNumParams() - (unsigned)SkipTargetFirstParameter) ==
+ (Source->getNumParams() - (unsigned)SkipSourceFirstParameter) &&
"Functions have different argument counts.");
for (unsigned i = 0, E = Target->getNumParams(); i != E; ++i) {
auto ParamDiag = DiagID;
ParamDiag << 1;
if (CheckSpecForTypesEquivalent(
*this, ParamDiag, PDiag(),
- Target->getParamType(i), TargetLoc, Source->getParamType(i),
+ Target->getParamType(i + (SkipTargetFirstParameter ? 1 : 0)),
+ TargetLoc, Source->getParamType(SkipSourceFirstParameter ? 1 : 0),
SourceLoc))
return true;
}
@@ -946,9 +959,10 @@ bool Sema::CheckExceptionSpecCompatibility(Expr *From, QualType ToType) {
// void (*q)(void (*) throw(int)) = p;
// }
// ... because it might be instantiated with T=int.
- return CheckExceptionSpecSubset(
- PDiag(DiagID), PDiag(NestedDiagID), PDiag(), PDiag(), ToFunc,
- From->getSourceRange().getBegin(), FromFunc, SourceLocation()) &&
+ return CheckExceptionSpecSubset(PDiag(DiagID), PDiag(NestedDiagID), PDiag(),
+ PDiag(), ToFunc, 0,
+ From->getSourceRange().getBegin(), FromFunc,
+ 0, SourceLocation()) &&
!getLangOpts().CPlusPlus17;
}
@@ -977,14 +991,14 @@ bool Sema::CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
unsigned DiagID = diag::err_override_exception_spec;
if (getLangOpts().MSVCCompat)
DiagID = diag::ext_override_exception_spec;
- return CheckExceptionSpecSubset(PDiag(DiagID),
- PDiag(diag::err_deep_exception_specs_differ),
- PDiag(diag::note_overridden_virtual_function),
- PDiag(diag::ext_override_exception_spec),
- Old->getType()->castAs<FunctionProtoType>(),
- Old->getLocation(),
- New->getType()->castAs<FunctionProtoType>(),
- New->getLocation());
+ return CheckExceptionSpecSubset(
+ PDiag(DiagID), PDiag(diag::err_deep_exception_specs_differ),
+ PDiag(diag::note_overridden_virtual_function),
+ PDiag(diag::ext_override_exception_spec),
+ Old->getType()->castAs<FunctionProtoType>(),
+ Old->hasCXXExplicitFunctionObjectParameter(), Old->getLocation(),
+ New->getType()->castAs<FunctionProtoType>(),
+ New->hasCXXExplicitFunctionObjectParameter(), New->getLocation());
}
static CanThrowResult canSubStmtsThrow(Sema &Self, const Stmt *S) {
@@ -1284,6 +1298,7 @@ CanThrowResult Sema::canThrow(const Stmt *S) {
case Expr::StmtExprClass:
case Expr::ConvertVectorExprClass:
case Expr::VAArgExprClass:
+ case Expr::CXXParenListInitExprClass:
return canSubStmtsThrow(*this, S);
case Expr::CompoundLiteralExprClass:
@@ -1448,15 +1463,20 @@ CanThrowResult Sema::canThrow(const Stmt *S) {
case Stmt::OMPForSimdDirectiveClass:
case Stmt::OMPMasterDirectiveClass:
case Stmt::OMPMasterTaskLoopDirectiveClass:
+ case Stmt::OMPMaskedTaskLoopDirectiveClass:
case Stmt::OMPMasterTaskLoopSimdDirectiveClass:
+ case Stmt::OMPMaskedTaskLoopSimdDirectiveClass:
case Stmt::OMPOrderedDirectiveClass:
case Stmt::OMPCanonicalLoopClass:
case Stmt::OMPParallelDirectiveClass:
case Stmt::OMPParallelForDirectiveClass:
case Stmt::OMPParallelForSimdDirectiveClass:
case Stmt::OMPParallelMasterDirectiveClass:
+ case Stmt::OMPParallelMaskedDirectiveClass:
case Stmt::OMPParallelMasterTaskLoopDirectiveClass:
+ case Stmt::OMPParallelMaskedTaskLoopDirectiveClass:
case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass:
+ case Stmt::OMPParallelMaskedTaskLoopSimdDirectiveClass:
case Stmt::OMPParallelSectionsDirectiveClass:
case Stmt::OMPSectionDirectiveClass:
case Stmt::OMPSectionsDirectiveClass:
@@ -1478,12 +1498,14 @@ CanThrowResult Sema::canThrow(const Stmt *S) {
case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
case Stmt::OMPTargetUpdateDirectiveClass:
+ case Stmt::OMPScopeDirectiveClass:
case Stmt::OMPTaskDirectiveClass:
case Stmt::OMPTaskgroupDirectiveClass:
case Stmt::OMPTaskLoopDirectiveClass:
case Stmt::OMPTaskLoopSimdDirectiveClass:
case Stmt::OMPTaskwaitDirectiveClass:
case Stmt::OMPTaskyieldDirectiveClass:
+ case Stmt::OMPErrorDirectiveClass:
case Stmt::OMPTeamsDirectiveClass:
case Stmt::OMPTeamsDistributeDirectiveClass:
case Stmt::OMPTeamsDistributeParallelForDirectiveClass:
@@ -1492,6 +1514,12 @@ CanThrowResult Sema::canThrow(const Stmt *S) {
case Stmt::OMPInteropDirectiveClass:
case Stmt::OMPDispatchDirectiveClass:
case Stmt::OMPMaskedDirectiveClass:
+ case Stmt::OMPMetaDirectiveClass:
+ case Stmt::OMPGenericLoopDirectiveClass:
+ case Stmt::OMPTeamsGenericLoopDirectiveClass:
+ case Stmt::OMPTargetTeamsGenericLoopDirectiveClass:
+ case Stmt::OMPParallelGenericLoopDirectiveClass:
+ case Stmt::OMPTargetParallelGenericLoopDirectiveClass:
case Stmt::ReturnStmtClass:
case Stmt::SEHExceptStmtClass:
case Stmt::SEHFinallyStmtClass:
@@ -1529,7 +1557,7 @@ CanThrowResult Sema::canThrow(const Stmt *S) {
// For 'if constexpr', consider only the non-discarded case.
// FIXME: We should add a DiscardedStmt marker to the AST.
- if (Optional<const Stmt *> Case = IS->getNondiscardedCase(Context))
+ if (std::optional<const Stmt *> Case = IS->getNondiscardedCase(Context))
return *Case ? mergeCanThrow(CT, canThrow(*Case)) : CT;
CanThrowResult Then = canThrow(IS->getThen());
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaExpr.cpp b/contrib/llvm-project/clang/lib/Sema/SemaExpr.cpp
index f04eb9199024..4cce0abc2315 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaExpr.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaExpr.cpp
@@ -25,18 +25,24 @@
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/OperationKinds.h"
+#include "clang/AST/ParentMapContext.h"
#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/Type.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/Builtins.h"
+#include "clang/Basic/DiagnosticSema.h"
#include "clang/Basic/PartialDiagnostic.h"
#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/TypeTraits.h"
#include "clang/Lex/LiteralSupport.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/DelayedDiagnostic.h"
#include "clang/Sema/Designator.h"
+#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Overload.h"
@@ -48,12 +54,14 @@
#include "clang/Sema/Template.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/SaveAndRestore.h"
+#include "llvm/Support/TypeSize.h"
+#include <optional>
using namespace clang;
using namespace sema;
-using llvm::RoundingMode;
/// Determine whether the use of this declaration is valid, without
/// emitting diagnostics.
@@ -97,7 +105,7 @@ static void DiagnoseUnusedOfDecl(Sema &S, NamedDecl *D, SourceLocation Loc) {
// [[maybe_unused]] should not diagnose uses, but __attribute__((unused))
// should diagnose them.
if (A->getSemanticSpelling() != UnusedAttr::CXX11_maybe_unused &&
- A->getSemanticSpelling() != UnusedAttr::C2x_maybe_unused) {
+ A->getSemanticSpelling() != UnusedAttr::C23_maybe_unused) {
const Decl *DC = cast_or_null<Decl>(S.getCurObjCLexicalContext());
if (DC && !DC->hasAttr<UnusedAttr>())
S.Diag(Loc, diag::warn_used_but_marked_unused) << D;
@@ -131,7 +139,7 @@ void Sema::NoteDeletedFunction(FunctionDecl *Decl) {
/// Determine whether a FunctionDecl was ever declared with an
/// explicit storage class.
static bool hasAnyExplicitStorageClass(const FunctionDecl *D) {
- for (auto I : D->redecls()) {
+ for (auto *I : D->redecls()) {
if (I->getStorageClass() != SC_None)
return true;
}
@@ -165,7 +173,7 @@ static void diagnoseUseOfInternalDeclInInlineFunction(Sema &S,
return;
// Check if the decl has internal linkage.
- if (D->getFormalLinkage() != InternalLinkage)
+ if (D->getFormalLinkage() != Linkage::Internal)
return;
// Downgrade from ExtWarn to Extension if
@@ -217,7 +225,8 @@ bool Sema::DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
bool ObjCPropertyAccess,
bool AvoidPartialAvailabilityChecks,
- ObjCInterfaceDecl *ClassReceiver) {
+ ObjCInterfaceDecl *ClassReceiver,
+ bool SkipTrailingRequiresClause) {
SourceLocation Loc = Locs.front();
if (getLangOpts().CPlusPlus && isa<FunctionDecl>(D)) {
// If there were any diagnostics suppressed by template argument deduction,
@@ -276,9 +285,10 @@ bool Sema::DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
// See if this is a function with constraints that need to be satisfied.
// Check this before deducing the return type, as it might instantiate the
// definition.
- if (FD->getTrailingRequiresClause()) {
+ if (!SkipTrailingRequiresClause && FD->getTrailingRequiresClause()) {
ConstraintSatisfaction Satisfaction;
- if (CheckFunctionConstraints(FD, Satisfaction, Loc))
+ if (CheckFunctionConstraints(FD, Satisfaction, Loc,
+ /*ForOverloadResolution*/ true))
// A diagnostic will have already been generated (non-constant
// constraint expression, for example)
return true;
@@ -300,8 +310,6 @@ bool Sema::DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
if (getLangOpts().CUDA && !CheckCUDACall(Loc, FD))
return true;
- if (getLangOpts().SYCLIsDevice && !checkSYCLDeviceFunction(Loc, FD))
- return true;
}
if (auto *MD = dyn_cast<CXXMethodDecl>(D)) {
@@ -344,7 +352,8 @@ bool Sema::DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
// [OpenMP 5.0], 2.19.7.3. declare mapper Directive, Restrictions
// List-items in map clauses on this construct may only refer to the declared
// variable var and entities that could be referenced by a procedure defined
- // at the same location
+ // at the same location.
+ // [OpenMP 5.2] Also allow iterator declared variables.
if (LangOpts.OpenMP && isa<VarDecl>(D) &&
!isOpenMPDeclareMapperVarDeclAllowed(cast<VarDecl>(D))) {
Diag(Loc, diag::err_omp_declare_mapper_wrong_var)
@@ -366,10 +375,21 @@ bool Sema::DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
diagnoseUseOfInternalDeclInInlineFunction(*this, D, Loc);
- if (LangOpts.SYCLIsDevice || (LangOpts.OpenMP && LangOpts.OpenMPIsDevice)) {
- if (auto *VD = dyn_cast<ValueDecl>(D))
- checkDeviceDecl(VD, Loc);
+ if (D->hasAttr<AvailableOnlyInDefaultEvalMethodAttr>()) {
+ if (getLangOpts().getFPEvalMethod() !=
+ LangOptions::FPEvalMethodKind::FEM_UnsetOnCommandLine &&
+ PP.getLastFPEvalPragmaLocation().isValid() &&
+ PP.getCurrentFPEvalMethod() != getLangOpts().getFPEvalMethod())
+ Diag(D->getLocation(),
+ diag::err_type_available_only_in_default_eval_method)
+ << D->getName();
+ }
+
+ if (auto *VD = dyn_cast<ValueDecl>(D))
+ checkTypeSupport(VD->getType(), Loc, VD);
+ if (LangOpts.SYCLIsDevice ||
+ (LangOpts.OpenMP && LangOpts.OpenMPIsTargetDevice)) {
if (!Context.getTargetInfo().isTLSSupported())
if (const auto *VD = dyn_cast<VarDecl>(D))
if (VD->getTLSKind() != VarDecl::TLS_None)
@@ -394,80 +414,83 @@ bool Sema::DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
/// message-send is to a declaration with the sentinel attribute, and
/// if so, it checks that the requirements of the sentinel are
/// satisfied.
-void Sema::DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
+void Sema::DiagnoseSentinelCalls(const NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args) {
- const SentinelAttr *attr = D->getAttr<SentinelAttr>();
- if (!attr)
+ const SentinelAttr *Attr = D->getAttr<SentinelAttr>();
+ if (!Attr)
return;
// The number of formal parameters of the declaration.
- unsigned numFormalParams;
+ unsigned NumFormalParams;
// The kind of declaration. This is also an index into a %select in
// the diagnostic.
- enum CalleeType { CT_Function, CT_Method, CT_Block } calleeType;
-
- if (ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
- numFormalParams = MD->param_size();
- calleeType = CT_Method;
- } else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
- numFormalParams = FD->param_size();
- calleeType = CT_Function;
- } else if (isa<VarDecl>(D)) {
- QualType type = cast<ValueDecl>(D)->getType();
- const FunctionType *fn = nullptr;
- if (const PointerType *ptr = type->getAs<PointerType>()) {
- fn = ptr->getPointeeType()->getAs<FunctionType>();
- if (!fn) return;
- calleeType = CT_Function;
- } else if (const BlockPointerType *ptr = type->getAs<BlockPointerType>()) {
- fn = ptr->getPointeeType()->castAs<FunctionType>();
- calleeType = CT_Block;
+ enum { CK_Function, CK_Method, CK_Block } CalleeKind;
+
+ if (const auto *MD = dyn_cast<ObjCMethodDecl>(D)) {
+ NumFormalParams = MD->param_size();
+ CalleeKind = CK_Method;
+ } else if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
+ NumFormalParams = FD->param_size();
+ CalleeKind = CK_Function;
+ } else if (const auto *VD = dyn_cast<VarDecl>(D)) {
+ QualType Ty = VD->getType();
+ const FunctionType *Fn = nullptr;
+ if (const auto *PtrTy = Ty->getAs<PointerType>()) {
+ Fn = PtrTy->getPointeeType()->getAs<FunctionType>();
+ if (!Fn)
+ return;
+ CalleeKind = CK_Function;
+ } else if (const auto *PtrTy = Ty->getAs<BlockPointerType>()) {
+ Fn = PtrTy->getPointeeType()->castAs<FunctionType>();
+ CalleeKind = CK_Block;
} else {
return;
}
- if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fn)) {
- numFormalParams = proto->getNumParams();
- } else {
- numFormalParams = 0;
- }
+ if (const auto *proto = dyn_cast<FunctionProtoType>(Fn))
+ NumFormalParams = proto->getNumParams();
+ else
+ NumFormalParams = 0;
} else {
return;
}
- // "nullPos" is the number of formal parameters at the end which
+ // "NullPos" is the number of formal parameters at the end which
// effectively count as part of the variadic arguments. This is
// useful if you would prefer to not have *any* formal parameters,
// but the language forces you to have at least one.
- unsigned nullPos = attr->getNullPos();
- assert((nullPos == 0 || nullPos == 1) && "invalid null position on sentinel");
- numFormalParams = (nullPos > numFormalParams ? 0 : numFormalParams - nullPos);
+ unsigned NullPos = Attr->getNullPos();
+ assert((NullPos == 0 || NullPos == 1) && "invalid null position on sentinel");
+ NumFormalParams = (NullPos > NumFormalParams ? 0 : NumFormalParams - NullPos);
// The number of arguments which should follow the sentinel.
- unsigned numArgsAfterSentinel = attr->getSentinel();
+ unsigned NumArgsAfterSentinel = Attr->getSentinel();
// If there aren't enough arguments for all the formal parameters,
// the sentinel, and the args after the sentinel, complain.
- if (Args.size() < numFormalParams + numArgsAfterSentinel + 1) {
+ if (Args.size() < NumFormalParams + NumArgsAfterSentinel + 1) {
Diag(Loc, diag::warn_not_enough_argument) << D->getDeclName();
- Diag(D->getLocation(), diag::note_sentinel_here) << int(calleeType);
+ Diag(D->getLocation(), diag::note_sentinel_here) << int(CalleeKind);
return;
}
// Otherwise, find the sentinel expression.
- Expr *sentinelExpr = Args[Args.size() - numArgsAfterSentinel - 1];
- if (!sentinelExpr) return;
- if (sentinelExpr->isValueDependent()) return;
- if (Context.isSentinelNullExpr(sentinelExpr)) return;
+ const Expr *SentinelExpr = Args[Args.size() - NumArgsAfterSentinel - 1];
+ if (!SentinelExpr)
+ return;
+ if (SentinelExpr->isValueDependent())
+ return;
+ if (Context.isSentinelNullExpr(SentinelExpr))
+ return;
// Pick a reasonable string to insert. Optimistically use 'nil', 'nullptr',
// or 'NULL' if those are actually defined in the context. Only use
// 'nil' for ObjC methods, where it's much more likely that the
// variadic arguments form a list of object pointers.
- SourceLocation MissingNilLoc = getLocForEndOfToken(sentinelExpr->getEndLoc());
+ SourceLocation MissingNilLoc = getLocForEndOfToken(SentinelExpr->getEndLoc());
std::string NullValue;
- if (calleeType == CT_Method && PP.isMacroDefined("nil"))
+ if (CalleeKind == CK_Method && PP.isMacroDefined("nil"))
NullValue = "nil";
else if (getLangOpts().CPlusPlus11)
NullValue = "nullptr";
@@ -477,12 +500,13 @@ void Sema::DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
NullValue = "(void*) 0";
if (MissingNilLoc.isInvalid())
- Diag(Loc, diag::warn_missing_sentinel) << int(calleeType);
+ Diag(Loc, diag::warn_missing_sentinel) << int(CalleeKind);
else
Diag(MissingNilLoc, diag::warn_missing_sentinel)
- << int(calleeType)
- << FixItHint::CreateInsertion(MissingNilLoc, ", " + NullValue);
- Diag(D->getLocation(), diag::note_sentinel_here) << int(calleeType);
+ << int(CalleeKind)
+ << FixItHint::CreateInsertion(MissingNilLoc, ", " + NullValue);
+ Diag(D->getLocation(), diag::note_sentinel_here)
+ << int(CalleeKind) << Attr->getRange();
}
SourceRange Sema::getExprRange(Expr *E) const {
@@ -496,7 +520,7 @@ SourceRange Sema::getExprRange(Expr *E) const {
/// DefaultFunctionArrayConversion (C99 6.3.2.1p3, C99 6.3.2.1p4).
ExprResult Sema::DefaultFunctionArrayConversion(Expr *E, bool Diagnose) {
// Handle any placeholder expressions which made it here.
- if (E->getType()->isPlaceholderType()) {
+ if (E->hasPlaceholderType()) {
ExprResult result = CheckPlaceholderExpr(E);
if (result.isInvalid()) return ExprError();
E = result.get();
@@ -620,7 +644,7 @@ static void DiagnoseDirectIsaAccess(Sema &S, const ObjCIvarRefExpr *OIRE,
ExprResult Sema::DefaultLvalueConversion(Expr *E) {
// Handle any placeholder expressions which made it here.
- if (E->getType()->isPlaceholderType()) {
+ if (E->hasPlaceholderType()) {
ExprResult result = CheckPlaceholderExpr(E);
if (result.isInvalid()) return ExprError();
E = result.get();
@@ -772,6 +796,40 @@ ExprResult Sema::UsualUnaryConversions(Expr *E) {
QualType Ty = E->getType();
assert(!Ty.isNull() && "UsualUnaryConversions - missing type");
+ LangOptions::FPEvalMethodKind EvalMethod = CurFPFeatures.getFPEvalMethod();
+ if (EvalMethod != LangOptions::FEM_Source && Ty->isFloatingType() &&
+ (getLangOpts().getFPEvalMethod() !=
+ LangOptions::FPEvalMethodKind::FEM_UnsetOnCommandLine ||
+ PP.getLastFPEvalPragmaLocation().isValid())) {
+ switch (EvalMethod) {
+ default:
+ llvm_unreachable("Unrecognized float evaluation method");
+ break;
+ case LangOptions::FEM_UnsetOnCommandLine:
+ llvm_unreachable("Float evaluation method should be set by now");
+ break;
+ case LangOptions::FEM_Double:
+ if (Context.getFloatingTypeOrder(Context.DoubleTy, Ty) > 0)
+ // Widen the expression to double.
+ return Ty->isComplexType()
+ ? ImpCastExprToType(E,
+ Context.getComplexType(Context.DoubleTy),
+ CK_FloatingComplexCast)
+ : ImpCastExprToType(E, Context.DoubleTy, CK_FloatingCast);
+ break;
+ case LangOptions::FEM_Extended:
+ if (Context.getFloatingTypeOrder(Context.LongDoubleTy, Ty) > 0)
+ // Widen the expression to long double.
+ return Ty->isComplexType()
+ ? ImpCastExprToType(
+ E, Context.getComplexType(Context.LongDoubleTy),
+ CK_FloatingComplexCast)
+ : ImpCastExprToType(E, Context.LongDoubleTy,
+ CK_FloatingCast);
+ break;
+ }
+ }
+
// Half FP have to be promoted to float unless it is natively supported
if (Ty->isHalfType() && !getLangOpts().NativeHalfType)
return ImpCastExprToType(Res.get(), Context.FloatTy, CK_FloatingCast);
@@ -798,7 +856,7 @@ ExprResult Sema::UsualUnaryConversions(Expr *E) {
E = ImpCastExprToType(E, PTy, CK_IntegralCast).get();
return E;
}
- if (Ty->isPromotableIntegerType()) {
+ if (Context.isPromotableIntegerType(Ty)) {
QualType PT = Context.getPromotedIntegerType(Ty);
E = ImpCastExprToType(E, PT, CK_IntegralCast).get();
return E;
@@ -897,6 +955,11 @@ Sema::VarArgKind Sema::isValidVarArgType(const QualType &Ty) {
if (Ty.isDestructedType() == QualType::DK_nontrivial_c_struct)
return VAK_Invalid;
+ if (Context.getTargetInfo().getTriple().isWasm() &&
+ Ty.isWebAssemblyReferenceType()) {
+ return VAK_Invalid;
+ }
+
if (Ty.isCXX98PODType(Context))
return VAK_Valid;
@@ -937,7 +1000,7 @@ void Sema::checkVariadicArgument(const Expr *E, VariadicCallType CT) {
DiagRuntimeBehavior(
E->getBeginLoc(), nullptr,
PDiag(diag::warn_cxx98_compat_pass_non_pod_arg_to_vararg) << Ty << CT);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case VAK_Valid:
if (Ty->isRecordType()) {
// This is unlikely to be what the user intended. If the class has a
@@ -1017,7 +1080,7 @@ ExprResult Sema::DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
return ExprError();
ExprResult Call = BuildCallExpr(TUScope, TrapFn.get(), E->getBeginLoc(),
- None, E->getEndLoc());
+ std::nullopt, E->getEndLoc());
if (Call.isInvalid())
return ExprError();
@@ -1049,7 +1112,7 @@ static bool handleIntegerToComplexFloatConversion(Sema &S, ExprResult &IntExpr,
if (IntTy->isComplexType() || IntTy->isRealFloatingType()) return true;
if (SkipCast) return false;
if (IntTy->isIntegerType()) {
- QualType fpTy = cast<ComplexType>(ComplexTy)->getElementType();
+ QualType fpTy = ComplexTy->castAs<ComplexType>()->getElementType();
IntExpr = S.ImpCastExprToType(IntExpr.get(), fpTy, CK_IntegralToFloating);
IntExpr = S.ImpCastExprToType(IntExpr.get(), ComplexTy,
CK_FloatingRealToComplex);
@@ -1061,60 +1124,59 @@ static bool handleIntegerToComplexFloatConversion(Sema &S, ExprResult &IntExpr,
return false;
}
+// This handles complex/complex, complex/float, or float/complex.
+// When both operands are complex, the shorter operand is converted to the
+// type of the longer, and that is the type of the result. This corresponds
+// to what is done when combining two real floating-point operands.
+// The fun begins when size promotion occur across type domains.
+// From H&S 6.3.4: When one operand is complex and the other is a real
+// floating-point type, the less precise type is converted, within it's
+// real or complex domain, to the precision of the other type. For example,
+// when combining a "long double" with a "double _Complex", the
+// "double _Complex" is promoted to "long double _Complex".
+static QualType handleComplexFloatConversion(Sema &S, ExprResult &Shorter,
+ QualType ShorterType,
+ QualType LongerType,
+ bool PromotePrecision) {
+ bool LongerIsComplex = isa<ComplexType>(LongerType.getCanonicalType());
+ QualType Result =
+ LongerIsComplex ? LongerType : S.Context.getComplexType(LongerType);
+
+ if (PromotePrecision) {
+ if (isa<ComplexType>(ShorterType.getCanonicalType())) {
+ Shorter =
+ S.ImpCastExprToType(Shorter.get(), Result, CK_FloatingComplexCast);
+ } else {
+ if (LongerIsComplex)
+ LongerType = LongerType->castAs<ComplexType>()->getElementType();
+ Shorter = S.ImpCastExprToType(Shorter.get(), LongerType, CK_FloatingCast);
+ }
+ }
+ return Result;
+}
+
/// Handle arithmetic conversion with complex types. Helper function of
/// UsualArithmeticConversions()
-static QualType handleComplexFloatConversion(Sema &S, ExprResult &LHS,
- ExprResult &RHS, QualType LHSType,
- QualType RHSType,
- bool IsCompAssign) {
+static QualType handleComplexConversion(Sema &S, ExprResult &LHS,
+ ExprResult &RHS, QualType LHSType,
+ QualType RHSType, bool IsCompAssign) {
// if we have an integer operand, the result is the complex type.
if (!handleIntegerToComplexFloatConversion(S, RHS, LHS, RHSType, LHSType,
- /*skipCast*/false))
+ /*SkipCast=*/false))
return LHSType;
if (!handleIntegerToComplexFloatConversion(S, LHS, RHS, LHSType, RHSType,
- /*skipCast*/IsCompAssign))
+ /*SkipCast=*/IsCompAssign))
return RHSType;
- // This handles complex/complex, complex/float, or float/complex.
- // When both operands are complex, the shorter operand is converted to the
- // type of the longer, and that is the type of the result. This corresponds
- // to what is done when combining two real floating-point operands.
- // The fun begins when size promotion occur across type domains.
- // From H&S 6.3.4: When one operand is complex and the other is a real
- // floating-point type, the less precise type is converted, within it's
- // real or complex domain, to the precision of the other type. For example,
- // when combining a "long double" with a "double _Complex", the
- // "double _Complex" is promoted to "long double _Complex".
-
// Compute the rank of the two types, regardless of whether they are complex.
int Order = S.Context.getFloatingTypeOrder(LHSType, RHSType);
-
- auto *LHSComplexType = dyn_cast<ComplexType>(LHSType);
- auto *RHSComplexType = dyn_cast<ComplexType>(RHSType);
- QualType LHSElementType =
- LHSComplexType ? LHSComplexType->getElementType() : LHSType;
- QualType RHSElementType =
- RHSComplexType ? RHSComplexType->getElementType() : RHSType;
-
- QualType ResultType = S.Context.getComplexType(LHSElementType);
- if (Order < 0) {
+ if (Order < 0)
// Promote the precision of the LHS if not an assignment.
- ResultType = S.Context.getComplexType(RHSElementType);
- if (!IsCompAssign) {
- if (LHSComplexType)
- LHS =
- S.ImpCastExprToType(LHS.get(), ResultType, CK_FloatingComplexCast);
- else
- LHS = S.ImpCastExprToType(LHS.get(), RHSElementType, CK_FloatingCast);
- }
- } else if (Order > 0) {
- // Promote the precision of the RHS.
- if (RHSComplexType)
- RHS = S.ImpCastExprToType(RHS.get(), ResultType, CK_FloatingComplexCast);
- else
- RHS = S.ImpCastExprToType(RHS.get(), LHSElementType, CK_FloatingCast);
- }
- return ResultType;
+ return handleComplexFloatConversion(S, LHS, LHSType, RHSType,
+ /*PromotePrecision=*/!IsCompAssign);
+ // Promote the precision of the RHS unless it is already the same as the LHS.
+ return handleComplexFloatConversion(S, RHS, RHSType, LHSType,
+ /*PromotePrecision=*/Order > 0);
}
/// Handle arithmetic conversion from integer to float. Helper function
@@ -1197,45 +1259,32 @@ static QualType handleFloatConversion(Sema &S, ExprResult &LHS,
/*ConvertInt=*/!IsCompAssign);
}
-/// Diagnose attempts to convert between __float128 and long double if
-/// there is no support for such conversion. Helper function of
-/// UsualArithmeticConversions().
+/// Diagnose attempts to convert between __float128, __ibm128 and
+/// long double if there is no support for such conversion.
+/// Helper function of UsualArithmeticConversions().
static bool unsupportedTypeConversion(const Sema &S, QualType LHSType,
QualType RHSType) {
- /* No issue converting if at least one of the types is not a floating point
- type or the two types have the same rank.
- */
- if (!LHSType->isFloatingType() || !RHSType->isFloatingType() ||
- S.Context.getFloatingTypeOrder(LHSType, RHSType) == 0)
+ // No issue if either is not a floating point type.
+ if (!LHSType->isFloatingType() || !RHSType->isFloatingType())
return false;
- assert(LHSType->isFloatingType() && RHSType->isFloatingType() &&
- "The remaining types must be floating point types.");
-
+ // No issue if both have the same 128-bit float semantics.
auto *LHSComplex = LHSType->getAs<ComplexType>();
auto *RHSComplex = RHSType->getAs<ComplexType>();
- QualType LHSElemType = LHSComplex ?
- LHSComplex->getElementType() : LHSType;
- QualType RHSElemType = RHSComplex ?
- RHSComplex->getElementType() : RHSType;
+ QualType LHSElem = LHSComplex ? LHSComplex->getElementType() : LHSType;
+ QualType RHSElem = RHSComplex ? RHSComplex->getElementType() : RHSType;
- // No issue if the two types have the same representation
- if (&S.Context.getFloatTypeSemantics(LHSElemType) ==
- &S.Context.getFloatTypeSemantics(RHSElemType))
- return false;
+ const llvm::fltSemantics &LHSSem = S.Context.getFloatTypeSemantics(LHSElem);
+ const llvm::fltSemantics &RHSSem = S.Context.getFloatTypeSemantics(RHSElem);
- bool Float128AndLongDouble = (LHSElemType == S.Context.Float128Ty &&
- RHSElemType == S.Context.LongDoubleTy);
- Float128AndLongDouble |= (LHSElemType == S.Context.LongDoubleTy &&
- RHSElemType == S.Context.Float128Ty);
+ if ((&LHSSem != &llvm::APFloat::PPCDoubleDouble() ||
+ &RHSSem != &llvm::APFloat::IEEEquad()) &&
+ (&LHSSem != &llvm::APFloat::IEEEquad() ||
+ &RHSSem != &llvm::APFloat::PPCDoubleDouble()))
+ return false;
- // We've handled the situation where __float128 and long double have the same
- // representation. We allow all conversions for all possible long double types
- // except PPC's double double.
- return Float128AndLongDouble &&
- (&S.Context.getFloatTypeSemantics(S.Context.LongDoubleTy) ==
- &llvm::APFloat::PPCDoubleDouble());
+ return true;
}
typedef ExprResult PerformCastFn(Sema &S, Expr *operand, QualType toType);
@@ -1454,16 +1503,22 @@ static void checkEnumArithmeticConversions(Sema &S, Expr *LHS, Expr *RHS,
bool IsCompAssign = ACK == Sema::ACK_CompAssign;
if ((!IsCompAssign && LEnum && R->isFloatingType()) ||
(REnum && L->isFloatingType())) {
- S.Diag(Loc, S.getLangOpts().CPlusPlus20
+ S.Diag(Loc, S.getLangOpts().CPlusPlus26
+ ? diag::err_arith_conv_enum_float_cxx26
+ : S.getLangOpts().CPlusPlus20
? diag::warn_arith_conv_enum_float_cxx20
: diag::warn_arith_conv_enum_float)
- << LHS->getSourceRange() << RHS->getSourceRange()
- << (int)ACK << LEnum << L << R;
+ << LHS->getSourceRange() << RHS->getSourceRange() << (int)ACK << LEnum
+ << L << R;
} else if (!IsCompAssign && LEnum && REnum &&
!S.Context.hasSameUnqualifiedType(L, R)) {
unsigned DiagID;
- if (!L->castAs<EnumType>()->getDecl()->hasNameForLinkage() ||
- !R->castAs<EnumType>()->getDecl()->hasNameForLinkage()) {
+ // In C++ 26, usual arithmetic conversions between 2 different enum types
+ // are ill-formed.
+ if (S.getLangOpts().CPlusPlus26)
+ DiagID = diag::err_conv_mixed_enum_types_cxx26;
+ else if (!L->castAs<EnumType>()->getDecl()->hasNameForLinkage() ||
+ !R->castAs<EnumType>()->getDecl()->hasNameForLinkage()) {
// If either enumeration type is unnamed, it's less likely that the
// user cares about this, but this situation is still deprecated in
// C++2a. Use a different warning group.
@@ -1513,18 +1568,16 @@ QualType Sema::UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
// For conversion purposes, we ignore any qualifiers.
// For example, "const float" and "float" are equivalent.
- QualType LHSType =
- Context.getCanonicalType(LHS.get()->getType()).getUnqualifiedType();
- QualType RHSType =
- Context.getCanonicalType(RHS.get()->getType()).getUnqualifiedType();
+ QualType LHSType = LHS.get()->getType().getUnqualifiedType();
+ QualType RHSType = RHS.get()->getType().getUnqualifiedType();
// For conversion purposes, we ignore any atomic qualifier on the LHS.
if (const AtomicType *AtomicLHS = LHSType->getAs<AtomicType>())
LHSType = AtomicLHS->getValueType();
// If both types are identical, no conversion is needed.
- if (LHSType == RHSType)
- return LHSType;
+ if (Context.hasSameType(LHSType, RHSType))
+ return Context.getCommonSugaredType(LHSType, RHSType);
// If either side is a non-arithmetic type (e.g. a pointer), we are done.
// The caller can deal with this (e.g. pointer + int).
@@ -1533,7 +1586,7 @@ QualType Sema::UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
// Apply unary and bitfield promotions to the LHS's type.
QualType LHSUnpromotedType = LHSType;
- if (LHSType->isPromotableIntegerType())
+ if (Context.isPromotableIntegerType(LHSType))
LHSType = Context.getPromotedIntegerType(LHSType);
QualType LHSBitfieldPromoteTy = Context.isPromotableBitField(LHS.get());
if (!LHSBitfieldPromoteTy.isNull())
@@ -1542,20 +1595,20 @@ QualType Sema::UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
LHS = ImpCastExprToType(LHS.get(), LHSType, CK_IntegralCast);
// If both types are identical, no conversion is needed.
- if (LHSType == RHSType)
- return LHSType;
+ if (Context.hasSameType(LHSType, RHSType))
+ return Context.getCommonSugaredType(LHSType, RHSType);
// At this point, we have two different arithmetic types.
- // Diagnose attempts to convert between __float128 and long double where
- // such conversions currently can't be handled.
+ // Diagnose attempts to convert between __ibm128, __float128 and long double
+ // where such conversions currently can't be handled.
if (unsupportedTypeConversion(*this, LHSType, RHSType))
return QualType();
// Handle complex types first (C99 6.3.1.8p1).
if (LHSType->isComplexType() || RHSType->isComplexType())
- return handleComplexFloatConversion(*this, LHS, RHS, LHSType, RHSType,
- ACK == ACK_CompAssign);
+ return handleComplexConversion(*this, LHS, RHS, LHSType, RHSType,
+ ACK == ACK_CompAssign);
// Now handle "real" floating types (i.e. float, double, long double).
if (LHSType->isRealFloatingType() || RHSType->isRealFloatingType())
@@ -1580,13 +1633,10 @@ QualType Sema::UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
//===----------------------------------------------------------------------===//
-ExprResult
-Sema::ActOnGenericSelectionExpr(SourceLocation KeyLoc,
- SourceLocation DefaultLoc,
- SourceLocation RParenLoc,
- Expr *ControllingExpr,
- ArrayRef<ParsedType> ArgTypes,
- ArrayRef<Expr *> ArgExprs) {
+ExprResult Sema::ActOnGenericSelectionExpr(
+ SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc,
+ bool PredicateIsExpr, void *ControllingExprOrType,
+ ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs) {
unsigned NumAssocs = ArgTypes.size();
assert(NumAssocs == ArgExprs.size());
@@ -1598,47 +1648,68 @@ Sema::ActOnGenericSelectionExpr(SourceLocation KeyLoc,
Types[i] = nullptr;
}
- ExprResult ER = CreateGenericSelectionExpr(KeyLoc, DefaultLoc, RParenLoc,
- ControllingExpr,
- llvm::makeArrayRef(Types, NumAssocs),
- ArgExprs);
+ // If we have a controlling type, we need to convert it from a parsed type
+ // into a semantic type and then pass that along.
+ if (!PredicateIsExpr) {
+ TypeSourceInfo *ControllingType;
+ (void)GetTypeFromParser(ParsedType::getFromOpaquePtr(ControllingExprOrType),
+ &ControllingType);
+ assert(ControllingType && "couldn't get the type out of the parser");
+ ControllingExprOrType = ControllingType;
+ }
+
+ ExprResult ER = CreateGenericSelectionExpr(
+ KeyLoc, DefaultLoc, RParenLoc, PredicateIsExpr, ControllingExprOrType,
+ llvm::ArrayRef(Types, NumAssocs), ArgExprs);
delete [] Types;
return ER;
}
-ExprResult
-Sema::CreateGenericSelectionExpr(SourceLocation KeyLoc,
- SourceLocation DefaultLoc,
- SourceLocation RParenLoc,
- Expr *ControllingExpr,
- ArrayRef<TypeSourceInfo *> Types,
- ArrayRef<Expr *> Exprs) {
+ExprResult Sema::CreateGenericSelectionExpr(
+ SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc,
+ bool PredicateIsExpr, void *ControllingExprOrType,
+ ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs) {
unsigned NumAssocs = Types.size();
assert(NumAssocs == Exprs.size());
-
- // Decay and strip qualifiers for the controlling expression type, and handle
- // placeholder type replacement. See committee discussion from WG14 DR423.
- {
+ assert(ControllingExprOrType &&
+ "Must have either a controlling expression or a controlling type");
+
+ Expr *ControllingExpr = nullptr;
+ TypeSourceInfo *ControllingType = nullptr;
+ if (PredicateIsExpr) {
+ // Decay and strip qualifiers for the controlling expression type, and
+ // handle placeholder type replacement. See committee discussion from WG14
+ // DR423.
EnterExpressionEvaluationContext Unevaluated(
*this, Sema::ExpressionEvaluationContext::Unevaluated);
- ExprResult R = DefaultFunctionArrayLvalueConversion(ControllingExpr);
+ ExprResult R = DefaultFunctionArrayLvalueConversion(
+ reinterpret_cast<Expr *>(ControllingExprOrType));
if (R.isInvalid())
return ExprError();
ControllingExpr = R.get();
+ } else {
+ // The extension form uses the type directly rather than converting it.
+ ControllingType = reinterpret_cast<TypeSourceInfo *>(ControllingExprOrType);
+ if (!ControllingType)
+ return ExprError();
}
+ bool TypeErrorFound = false,
+ IsResultDependent = ControllingExpr
+ ? ControllingExpr->isTypeDependent()
+ : ControllingType->getType()->isDependentType(),
+ ContainsUnexpandedParameterPack =
+ ControllingExpr
+ ? ControllingExpr->containsUnexpandedParameterPack()
+ : ControllingType->getType()->containsUnexpandedParameterPack();
+
// The controlling expression is an unevaluated operand, so side effects are
// likely unintended.
- if (!inTemplateInstantiation() &&
+ if (!inTemplateInstantiation() && !IsResultDependent && ControllingExpr &&
ControllingExpr->HasSideEffects(Context, false))
Diag(ControllingExpr->getExprLoc(),
diag::warn_side_effects_unevaluated_context);
- bool TypeErrorFound = false,
- IsResultDependent = ControllingExpr->isTypeDependent(),
- ContainsUnexpandedParameterPack
- = ControllingExpr->containsUnexpandedParameterPack();
-
for (unsigned i = 0; i < NumAssocs; ++i) {
if (Exprs[i]->containsUnexpandedParameterPack())
ContainsUnexpandedParameterPack = true;
@@ -1650,15 +1721,55 @@ Sema::CreateGenericSelectionExpr(SourceLocation KeyLoc,
if (Types[i]->getType()->isDependentType()) {
IsResultDependent = true;
} else {
+ // We relax the restriction on use of incomplete types and non-object
+ // types with the type-based extension of _Generic. Allowing incomplete
+ // objects means those can be used as "tags" for a type-safe way to map
+ // to a value. Similarly, matching on function types rather than
+ // function pointer types can be useful. However, the restriction on VM
+ // types makes sense to retain as there are open questions about how
+ // the selection can be made at compile time.
+ //
// C11 6.5.1.1p2 "The type name in a generic association shall specify a
// complete object type other than a variably modified type."
unsigned D = 0;
- if (Types[i]->getType()->isIncompleteType())
+ if (ControllingExpr && Types[i]->getType()->isIncompleteType())
D = diag::err_assoc_type_incomplete;
- else if (!Types[i]->getType()->isObjectType())
+ else if (ControllingExpr && !Types[i]->getType()->isObjectType())
D = diag::err_assoc_type_nonobject;
else if (Types[i]->getType()->isVariablyModifiedType())
D = diag::err_assoc_type_variably_modified;
+ else if (ControllingExpr) {
+ // Because the controlling expression undergoes lvalue conversion,
+ // array conversion, and function conversion, an association which is
+ // of array type, function type, or is qualified can never be
+ // reached. We will warn about this so users are less surprised by
+ // the unreachable association. However, we don't have to handle
+ // function types; that's not an object type, so it's handled above.
+ //
+ // The logic is somewhat different for C++ because C++ has different
+ // lvalue to rvalue conversion rules than C. [conv.lvalue]p1 says,
+ // If T is a non-class type, the type of the prvalue is the cv-
+ // unqualified version of T. Otherwise, the type of the prvalue is T.
+ // The result of these rules is that all qualified types in an
+ // association in C are unreachable, and in C++, only qualified non-
+ // class types are unreachable.
+ //
+ // NB: this does not apply when the first operand is a type rather
+ // than an expression, because the type form does not undergo
+ // conversion.
+ unsigned Reason = 0;
+ QualType QT = Types[i]->getType();
+ if (QT->isArrayType())
+ Reason = 1;
+ else if (QT.hasQualifiers() &&
+ (!LangOpts.CPlusPlus || !QT->isRecordType()))
+ Reason = 2;
+
+ if (Reason)
+ Diag(Types[i]->getTypeLoc().getBeginLoc(),
+ diag::warn_unreachable_association)
+ << QT << (Reason - 1);
+ }
if (D != 0) {
Diag(Types[i]->getTypeLoc().getBeginLoc(), D)
@@ -1692,31 +1803,60 @@ Sema::CreateGenericSelectionExpr(SourceLocation KeyLoc,
// If we determined that the generic selection is result-dependent, don't
// try to compute the result expression.
- if (IsResultDependent)
- return GenericSelectionExpr::Create(Context, KeyLoc, ControllingExpr, Types,
+ if (IsResultDependent) {
+ if (ControllingExpr)
+ return GenericSelectionExpr::Create(Context, KeyLoc, ControllingExpr,
+ Types, Exprs, DefaultLoc, RParenLoc,
+ ContainsUnexpandedParameterPack);
+ return GenericSelectionExpr::Create(Context, KeyLoc, ControllingType, Types,
Exprs, DefaultLoc, RParenLoc,
ContainsUnexpandedParameterPack);
+ }
SmallVector<unsigned, 1> CompatIndices;
unsigned DefaultIndex = -1U;
+ // Look at the canonical type of the controlling expression in case it was a
+ // deduced type like __auto_type. However, when issuing diagnostics, use the
+ // type the user wrote in source rather than the canonical one.
for (unsigned i = 0; i < NumAssocs; ++i) {
if (!Types[i])
DefaultIndex = i;
- else if (Context.typesAreCompatible(ControllingExpr->getType(),
- Types[i]->getType()))
+ else if (ControllingExpr &&
+ Context.typesAreCompatible(
+ ControllingExpr->getType().getCanonicalType(),
+ Types[i]->getType()))
+ CompatIndices.push_back(i);
+ else if (ControllingType &&
+ Context.typesAreCompatible(
+ ControllingType->getType().getCanonicalType(),
+ Types[i]->getType()))
CompatIndices.push_back(i);
}
+ auto GetControllingRangeAndType = [](Expr *ControllingExpr,
+ TypeSourceInfo *ControllingType) {
+ // We strip parens here because the controlling expression is typically
+ // parenthesized in macro definitions.
+ if (ControllingExpr)
+ ControllingExpr = ControllingExpr->IgnoreParens();
+
+ SourceRange SR = ControllingExpr
+ ? ControllingExpr->getSourceRange()
+ : ControllingType->getTypeLoc().getSourceRange();
+ QualType QT = ControllingExpr ? ControllingExpr->getType()
+ : ControllingType->getType();
+
+ return std::make_pair(SR, QT);
+ };
+
// C11 6.5.1.1p2 "The controlling expression of a generic selection shall have
// type compatible with at most one of the types named in its generic
// association list."
if (CompatIndices.size() > 1) {
- // We strip parens here because the controlling expression is typically
- // parenthesized in macro definitions.
- ControllingExpr = ControllingExpr->IgnoreParens();
- Diag(ControllingExpr->getBeginLoc(), diag::err_generic_sel_multi_match)
- << ControllingExpr->getSourceRange() << ControllingExpr->getType()
- << (unsigned)CompatIndices.size();
+ auto P = GetControllingRangeAndType(ControllingExpr, ControllingType);
+ SourceRange SR = P.first;
+ Diag(SR.getBegin(), diag::err_generic_sel_multi_match)
+ << SR << P.second << (unsigned)CompatIndices.size();
for (unsigned I : CompatIndices) {
Diag(Types[I]->getTypeLoc().getBeginLoc(),
diag::note_compat_assoc)
@@ -1730,11 +1870,9 @@ Sema::CreateGenericSelectionExpr(SourceLocation KeyLoc,
// its controlling expression shall have type compatible with exactly one of
// the types named in its generic association list."
if (DefaultIndex == -1U && CompatIndices.size() == 0) {
- // We strip parens here because the controlling expression is typically
- // parenthesized in macro definitions.
- ControllingExpr = ControllingExpr->IgnoreParens();
- Diag(ControllingExpr->getBeginLoc(), diag::err_generic_sel_no_match)
- << ControllingExpr->getSourceRange() << ControllingExpr->getType();
+ auto P = GetControllingRangeAndType(ControllingExpr, ControllingType);
+ SourceRange SR = P.first;
+ Diag(SR.getBegin(), diag::err_generic_sel_no_match) << SR << P.second;
return ExprError();
}
@@ -1746,11 +1884,46 @@ Sema::CreateGenericSelectionExpr(SourceLocation KeyLoc,
unsigned ResultIndex =
CompatIndices.size() ? CompatIndices[0] : DefaultIndex;
+ if (ControllingExpr) {
+ return GenericSelectionExpr::Create(
+ Context, KeyLoc, ControllingExpr, Types, Exprs, DefaultLoc, RParenLoc,
+ ContainsUnexpandedParameterPack, ResultIndex);
+ }
return GenericSelectionExpr::Create(
- Context, KeyLoc, ControllingExpr, Types, Exprs, DefaultLoc, RParenLoc,
+ Context, KeyLoc, ControllingType, Types, Exprs, DefaultLoc, RParenLoc,
ContainsUnexpandedParameterPack, ResultIndex);
}
+static PredefinedIdentKind getPredefinedExprKind(tok::TokenKind Kind) {
+ switch (Kind) {
+ default:
+ llvm_unreachable("unexpected TokenKind");
+ case tok::kw___func__:
+ return PredefinedIdentKind::Func; // [C99 6.4.2.2]
+ case tok::kw___FUNCTION__:
+ return PredefinedIdentKind::Function;
+ case tok::kw___FUNCDNAME__:
+ return PredefinedIdentKind::FuncDName; // [MS]
+ case tok::kw___FUNCSIG__:
+ return PredefinedIdentKind::FuncSig; // [MS]
+ case tok::kw_L__FUNCTION__:
+ return PredefinedIdentKind::LFunction; // [MS]
+ case tok::kw_L__FUNCSIG__:
+ return PredefinedIdentKind::LFuncSig; // [MS]
+ case tok::kw___PRETTY_FUNCTION__:
+ return PredefinedIdentKind::PrettyFunction; // [GNU]
+ }
+}
+
+/// getPredefinedExprDecl - Returns Decl of a given DeclContext that can be used
+/// to determine the value of a PredefinedExpr. This can be either a
+/// block, lambda, captured statement, function, otherwise a nullptr.
+static Decl *getPredefinedExprDecl(DeclContext *DC) {
+ while (DC && !isa<BlockDecl, CapturedDecl, FunctionDecl, ObjCMethodDecl>(DC))
+ DC = DC->getParent();
+ return cast_or_null<Decl>(DC);
+}
+
/// getUDSuffixLoc - Create a SourceLocation for a ud-suffix, given the
/// location of the token and the offset of the ud-suffix within it.
static SourceLocation getUDSuffixLoc(Sema &S, SourceLocation TokLoc,
@@ -1781,7 +1954,7 @@ static ExprResult BuildCookedLiteralOperatorCall(Sema &S, Scope *Scope,
OpNameInfo.setCXXLiteralOperatorNameLoc(UDSuffixLoc);
LookupResult R(S, OpName, UDSuffixLoc, Sema::LookupOrdinaryName);
- if (S.LookupLiteralOperator(Scope, R, llvm::makeArrayRef(ArgTy, Args.size()),
+ if (S.LookupLiteralOperator(Scope, R, llvm::ArrayRef(ArgTy, Args.size()),
/*AllowRaw*/ false, /*AllowTemplate*/ false,
/*AllowStringTemplatePack*/ false,
/*DiagnoseMissing*/ true) == Sema::LOLR_Error)
@@ -1790,6 +1963,84 @@ static ExprResult BuildCookedLiteralOperatorCall(Sema &S, Scope *Scope,
return S.BuildLiteralOperatorCall(R, OpNameInfo, Args, LitEndLoc);
}
+ExprResult Sema::ActOnUnevaluatedStringLiteral(ArrayRef<Token> StringToks) {
+ // StringToks needs backing storage as it doesn't hold array elements itself
+ std::vector<Token> ExpandedToks;
+ if (getLangOpts().MicrosoftExt)
+ StringToks = ExpandedToks = ExpandFunctionLocalPredefinedMacros(StringToks);
+
+ StringLiteralParser Literal(StringToks, PP,
+ StringLiteralEvalMethod::Unevaluated);
+ if (Literal.hadError)
+ return ExprError();
+
+ SmallVector<SourceLocation, 4> StringTokLocs;
+ for (const Token &Tok : StringToks)
+ StringTokLocs.push_back(Tok.getLocation());
+
+ StringLiteral *Lit = StringLiteral::Create(
+ Context, Literal.GetString(), StringLiteralKind::Unevaluated, false, {},
+ &StringTokLocs[0], StringTokLocs.size());
+
+ if (!Literal.getUDSuffix().empty()) {
+ SourceLocation UDSuffixLoc =
+ getUDSuffixLoc(*this, StringTokLocs[Literal.getUDSuffixToken()],
+ Literal.getUDSuffixOffset());
+ return ExprError(Diag(UDSuffixLoc, diag::err_invalid_string_udl));
+ }
+
+ return Lit;
+}
+
+std::vector<Token>
+Sema::ExpandFunctionLocalPredefinedMacros(ArrayRef<Token> Toks) {
+ // MSVC treats some predefined identifiers (e.g. __FUNCTION__) as function
+ // local macros that expand to string literals that may be concatenated.
+ // These macros are expanded here (in Sema), because StringLiteralParser
+ // (in Lex) doesn't know the enclosing function (because it hasn't been
+ // parsed yet).
+ assert(getLangOpts().MicrosoftExt);
+
+ // Note: Although function local macros are defined only inside functions,
+ // we ensure a valid `CurrentDecl` even outside of a function. This allows
+ // expansion of macros into empty string literals without additional checks.
+ Decl *CurrentDecl = getPredefinedExprDecl(CurContext);
+ if (!CurrentDecl)
+ CurrentDecl = Context.getTranslationUnitDecl();
+
+ std::vector<Token> ExpandedToks;
+ ExpandedToks.reserve(Toks.size());
+ for (const Token &Tok : Toks) {
+ if (!isFunctionLocalStringLiteralMacro(Tok.getKind(), getLangOpts())) {
+ assert(tok::isStringLiteral(Tok.getKind()));
+ ExpandedToks.emplace_back(Tok);
+ continue;
+ }
+ if (isa<TranslationUnitDecl>(CurrentDecl))
+ Diag(Tok.getLocation(), diag::ext_predef_outside_function);
+ // Stringify predefined expression
+ Diag(Tok.getLocation(), diag::ext_string_literal_from_predefined)
+ << Tok.getKind();
+ SmallString<64> Str;
+ llvm::raw_svector_ostream OS(Str);
+ Token &Exp = ExpandedToks.emplace_back();
+ Exp.startToken();
+ if (Tok.getKind() == tok::kw_L__FUNCTION__ ||
+ Tok.getKind() == tok::kw_L__FUNCSIG__) {
+ OS << 'L';
+ Exp.setKind(tok::wide_string_literal);
+ } else {
+ Exp.setKind(tok::string_literal);
+ }
+ OS << '"'
+ << Lexer::Stringify(PredefinedExpr::ComputeName(
+ getPredefinedExprKind(Tok.getKind()), CurrentDecl))
+ << '"';
+ PP.CreateString(OS.str(), Exp, Tok.getLocation(), Tok.getEndLoc());
+ }
+ return ExpandedToks;
+}
+
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz"). The result string has to handle string
/// concatenation ([C99 5.1.1.2, translation phase #6]), so it may come from
@@ -1800,6 +2051,11 @@ ExprResult
Sema::ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope) {
assert(!StringToks.empty() && "Must have at least one string!");
+ // StringToks needs backing storage as it doesn't hold array elements itself
+ std::vector<Token> ExpandedToks;
+ if (getLangOpts().MicrosoftExt)
+ StringToks = ExpandedToks = ExpandFunctionLocalPredefinedMacros(StringToks);
+
StringLiteralParser Literal(StringToks, PP);
if (Literal.hadError)
return ExprError();
@@ -1809,20 +2065,20 @@ Sema::ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope) {
StringTokLocs.push_back(Tok.getLocation());
QualType CharTy = Context.CharTy;
- StringLiteral::StringKind Kind = StringLiteral::Ascii;
+ StringLiteralKind Kind = StringLiteralKind::Ordinary;
if (Literal.isWide()) {
CharTy = Context.getWideCharType();
- Kind = StringLiteral::Wide;
+ Kind = StringLiteralKind::Wide;
} else if (Literal.isUTF8()) {
if (getLangOpts().Char8)
CharTy = Context.Char8Ty;
- Kind = StringLiteral::UTF8;
+ Kind = StringLiteralKind::UTF8;
} else if (Literal.isUTF16()) {
CharTy = Context.Char16Ty;
- Kind = StringLiteral::UTF16;
+ Kind = StringLiteralKind::UTF16;
} else if (Literal.isUTF32()) {
CharTy = Context.Char32Ty;
- Kind = StringLiteral::UTF32;
+ Kind = StringLiteralKind::UTF32;
} else if (Literal.isPascal()) {
CharTy = Context.UnsignedCharTy;
}
@@ -1830,7 +2086,7 @@ Sema::ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope) {
// Warn on initializing an array of char from a u8 string literal; this
// becomes ill-formed in C++2a.
if (getLangOpts().CPlusPlus && !getLangOpts().CPlusPlus20 &&
- !getLangOpts().Char8 && Kind == StringLiteral::UTF8) {
+ !getLangOpts().Char8 && Kind == StringLiteralKind::UTF8) {
Diag(StringTokLocs.front(), diag::warn_cxx20_compat_utf8_string);
// Create removals for all 'u8' prefixes in the string literal(s). This
@@ -1906,8 +2162,8 @@ Sema::ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope) {
TemplateArgument Arg(Lit);
TemplateArgumentLocInfo ArgInfo(Lit);
ExplicitArgs.addArgument(TemplateArgumentLoc(Arg, ArgInfo));
- return BuildLiteralOperatorCall(R, OpNameInfo, None, StringTokLocs.back(),
- &ExplicitArgs);
+ return BuildLiteralOperatorCall(R, OpNameInfo, std::nullopt,
+ StringTokLocs.back(), &ExplicitArgs);
}
case LOLR_StringTemplatePack: {
@@ -1927,8 +2183,8 @@ Sema::ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope) {
TemplateArgumentLocInfo ArgInfo;
ExplicitArgs.addArgument(TemplateArgumentLoc(Arg, ArgInfo));
}
- return BuildLiteralOperatorCall(R, OpNameInfo, None, StringTokLocs.back(),
- &ExplicitArgs);
+ return BuildLiteralOperatorCall(R, OpNameInfo, std::nullopt,
+ StringTokLocs.back(), &ExplicitArgs);
}
case LOLR_Raw:
case LOLR_ErrorNoDiagnostic:
@@ -2024,9 +2280,8 @@ Sema::BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
NestedNameSpecifierLoc NNS, NamedDecl *FoundD,
SourceLocation TemplateKWLoc,
const TemplateArgumentListInfo *TemplateArgs) {
- bool RefersToCapturedVariable =
- isa<VarDecl>(D) &&
- NeedToCaptureVariable(cast<VarDecl>(D), NameInfo.getLoc());
+ bool RefersToCapturedVariable = isa<VarDecl, BindingDecl>(D) &&
+ NeedToCaptureVariable(D, NameInfo.getLoc());
DeclRefExpr *E = DeclRefExpr::Create(
Context, NNS, TemplateKWLoc, D, RefersToCapturedVariable, NameInfo, Ty,
@@ -2045,9 +2300,9 @@ Sema::BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
// b) if the function is a defaulted comparison, we can use the body we
// build when defining it as input to the exception specification
// computation rather than computing a new body.
- if (auto *FPT = Ty->getAs<FunctionProtoType>()) {
+ if (const auto *FPT = Ty->getAs<FunctionProtoType>()) {
if (isUnresolvedExceptionSpec(FPT->getExceptionSpecType())) {
- if (auto *NewFPT = ResolveExceptionSpec(NameInfo.getLoc(), FPT))
+ if (const auto *NewFPT = ResolveExceptionSpec(NameInfo.getLoc(), FPT))
E->setType(Context.getQualifiedType(NewFPT, Ty.getQualifiers()));
}
}
@@ -2057,8 +2312,8 @@ Sema::BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, E->getBeginLoc()))
getCurFunction()->recordUseOfWeak(E);
- FieldDecl *FD = dyn_cast<FieldDecl>(D);
- if (IndirectFieldDecl *IFD = dyn_cast<IndirectFieldDecl>(D))
+ const auto *FD = dyn_cast<FieldDecl>(D);
+ if (const auto *IFD = dyn_cast<IndirectFieldDecl>(D))
FD = IFD->getAnonField();
if (FD) {
UnusedPrivateFields.remove(FD);
@@ -2069,8 +2324,8 @@ Sema::BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
// C++ [expr.prim]/8: The expression [...] is a bit-field if the identifier
// designates a bit-field.
- if (auto *BD = dyn_cast<BindingDecl>(D))
- if (auto *BE = BD->getBinding())
+ if (const auto *BD = dyn_cast<BindingDecl>(D))
+ if (const auto *BE = BD->getBinding())
E->setObjectKind(BE->getObjectKind());
return E;
@@ -2149,7 +2404,7 @@ static void emitEmptyLookupTypoDiagnostic(
///
/// Return \c true if the error is unrecoverable, or \c false if the caller
/// should attempt to recover using these lookup results.
-bool Sema::DiagnoseDependentMemberLookup(LookupResult &R) {
+bool Sema::DiagnoseDependentMemberLookup(const LookupResult &R) {
// During a default argument instantiation the CurContext points
// to a CXXMethodDecl; but we can't apply a this-> fixit inside a
// function parameter list, hence add an explicit check.
@@ -2157,7 +2412,7 @@ bool Sema::DiagnoseDependentMemberLookup(LookupResult &R) {
!CodeSynthesisContexts.empty() &&
CodeSynthesisContexts.back().Kind ==
CodeSynthesisContext::DefaultFunctionArgumentInstantiation;
- CXXMethodDecl *CurMethod = dyn_cast<CXXMethodDecl>(CurContext);
+ const auto *CurMethod = dyn_cast<CXXMethodDecl>(CurContext);
bool isInstance = CurMethod && CurMethod->isInstance() &&
R.getNamingClass() == CurMethod->getParent() &&
!isDefaultArgument;
@@ -2189,7 +2444,7 @@ bool Sema::DiagnoseDependentMemberLookup(LookupResult &R) {
Diag(R.getNameLoc(), DiagID) << R.getLookupName();
}
- for (NamedDecl *D : R)
+ for (const NamedDecl *D : R)
Diag(D->getLocation(), NoteID);
// Return true if we are inside a default argument instantiation
@@ -2200,7 +2455,7 @@ bool Sema::DiagnoseDependentMemberLookup(LookupResult &R) {
// FIXME: Is this special case necessary? We could allow the caller to
// diagnose this.
if (isDefaultArgument && ((*R.begin())->isCXXInstanceMember())) {
- Diag(R.getNameLoc(), diag::err_member_call_without_object);
+ Diag(R.getNameLoc(), diag::err_member_call_without_object) << 0;
return true;
}
@@ -2214,7 +2469,8 @@ bool Sema::DiagnoseDependentMemberLookup(LookupResult &R) {
bool Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
CorrectionCandidateCallback &CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs,
- ArrayRef<Expr *> Args, TypoExpr **Out) {
+ ArrayRef<Expr *> Args, DeclContext *LookupCtx,
+ TypoExpr **Out) {
DeclarationName Name = R.getLookupName();
unsigned diagnostic = diag::err_undeclared_var_use;
@@ -2230,7 +2486,8 @@ bool Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
// unqualified lookup. This is useful when (for example) the
// original lookup would not have found something because it was a
// dependent name.
- DeclContext *DC = SS.isEmpty() ? CurContext : nullptr;
+ DeclContext *DC =
+ LookupCtx ? LookupCtx : (SS.isEmpty() ? CurContext : nullptr);
while (DC) {
if (isa<CXXRecordDecl>(DC)) {
LookupQualifiedName(R, DC);
@@ -2273,12 +2530,12 @@ bool Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
emitEmptyLookupTypoDiagnostic(TC, *this, SS, Name, TypoLoc, Args,
diagnostic, diagnostic_suggest);
},
- nullptr, CTK_ErrorRecovery);
+ nullptr, CTK_ErrorRecovery, LookupCtx);
if (*Out)
return true;
- } else if (S &&
- (Corrected = CorrectTypo(R.getLookupNameInfo(), R.getLookupKind(),
- S, &SS, CCC, CTK_ErrorRecovery))) {
+ } else if (S && (Corrected =
+ CorrectTypo(R.getLookupNameInfo(), R.getLookupKind(), S,
+ &SS, CCC, CTK_ErrorRecovery, LookupCtx))) {
std::string CorrectedStr(Corrected.getAsString(getLangOpts()));
bool DroppedSpecifier =
Corrected.WillReplaceSpecifier() && Name.getAsString() == CorrectedStr;
@@ -2527,9 +2784,10 @@ Sema::ActOnIdExpression(Scope *S, CXXScopeSpec &SS,
if (R.isAmbiguous())
return ExprError();
- // This could be an implicitly declared function reference (legal in C90,
- // extension in C99, forbidden in C++).
- if (R.empty() && HasTrailingLParen && II && !getLangOpts().CPlusPlus) {
+ // This could be an implicitly declared function reference if the language
+ // mode allows it as a feature.
+ if (R.empty() && HasTrailingLParen && II &&
+ getLangOpts().implicitFunctionsAllowed()) {
NamedDecl *D = ImplicitlyDefineFunction(NameLoc, *II, S);
if (D) R.addDecl(D);
}
@@ -2567,7 +2825,7 @@ Sema::ActOnIdExpression(Scope *S, CXXScopeSpec &SS,
// a template name, but we happen to have always already looked up the name
// before we get here if it must be a template name.
if (DiagnoseEmptyLookup(S, SS, R, CCC ? *CCC : DefaultValidator, nullptr,
- None, &TE)) {
+ std::nullopt, nullptr, &TE)) {
if (TE && KeywordReplacement) {
auto &State = getTypoExprState(TE);
auto BestTC = State.Consumer->getNextCorrection();
@@ -2679,6 +2937,10 @@ Sema::ActOnIdExpression(Scope *S, CXXScopeSpec &SS,
ExprResult Sema::BuildQualifiedDeclarationNameExpr(
CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI) {
+ if (NameInfo.getName().isDependentName())
+ return BuildDependentDeclRefExpr(SS, /*TemplateKWLoc=*/SourceLocation(),
+ NameInfo, /*TemplateArgs=*/nullptr);
+
DeclContext *DC = computeDeclContext(SS, false);
if (!DC)
return BuildDependentDeclRefExpr(SS, /*TemplateKWLoc=*/SourceLocation(),
@@ -2735,7 +2997,7 @@ ExprResult Sema::BuildQualifiedDeclarationNameExpr(
TypeLocBuilder TLB;
TLB.pushTypeSpec(Ty).setNameLoc(NameInfo.getLoc());
- QualType ET = getElaboratedType(ETK_None, SS, Ty);
+ QualType ET = getElaboratedType(ElaboratedTypeKeyword::None, SS, Ty);
ElaboratedTypeLoc QTL = TLB.push<ElaboratedTypeLoc>(ET);
QTL.setElaboratedKeywordLoc(SourceLocation());
QTL.setQualifierLoc(SS.getWithLocInContext(Context));
@@ -2888,7 +3150,7 @@ ExprResult Sema::BuildIvarRefExpr(Scope *S, SourceLocation Loc,
!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, Loc))
getCurFunction()->recordUseOfWeak(Result);
}
- if (getLangOpts().ObjCAutoRefCount)
+ if (getLangOpts().ObjCAutoRefCount && !isUnevaluatedContext())
if (const BlockDecl *BD = CurContext->getInnermostBlockDecl())
ImplicitlyRetainedSelfLocs.push_back({Loc, BD});
@@ -2942,7 +3204,7 @@ Sema::PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member) {
- CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Member->getDeclContext());
+ const auto *RD = dyn_cast<CXXRecordDecl>(Member->getDeclContext());
if (!RD)
return From;
@@ -2967,12 +3229,12 @@ Sema::PerformObjectMemberConversion(Expr *From,
DestType = DestRecordType;
FromRecordType = FromType;
}
- } else if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Member)) {
- if (Method->isStatic())
+ } else if (const auto *Method = dyn_cast<CXXMethodDecl>(Member)) {
+ if (!Method->isImplicitObjectMemberFunction())
return From;
- DestType = Method->getThisType();
- DestRecordType = DestType->getPointeeType();
+ DestType = Method->getThisType().getNonReferenceType();
+ DestRecordType = Method->getFunctionObjectParameterType();
if (FromType->getAs<PointerType>()) {
FromRecordType = FromType->getPointeeType();
@@ -3034,7 +3296,7 @@ Sema::PerformObjectMemberConversion(Expr *From,
QualType QType = QualType(Qualifier->getAsType(), 0);
assert(QType->isRecordType() && "lookup done with non-record type");
- QualType QRecordType = QualType(QType->getAs<RecordType>(), 0);
+ QualType QRecordType = QualType(QType->castAs<RecordType>(), 0);
// In C++98, the qualifier type doesn't actually have to be a base
// type of the object type, in which case we just ignore it.
@@ -3087,7 +3349,7 @@ bool Sema::UseArgumentDependentLookup(const CXXScopeSpec &SS,
// Turn off ADL when we find certain kinds of declarations during
// normal lookup:
- for (NamedDecl *D : R) {
+ for (const NamedDecl *D : R) {
// C++0x [basic.lookup.argdep]p3:
// -- a declaration of a class member
// Since using decls preserve this property, we check this on the
@@ -3110,9 +3372,7 @@ bool Sema::UseArgumentDependentLookup(const CXXScopeSpec &SS,
// -- a declaration that is neither a function or a function
// template
// And also for builtin functions.
- if (isa<FunctionDecl>(D)) {
- FunctionDecl *FDecl = cast<FunctionDecl>(D);
-
+ if (const auto *FDecl = dyn_cast<FunctionDecl>(D)) {
// But also builtin functions.
if (FDecl->getBuiltinID() && FDecl->isImplicit())
return false;
@@ -3128,8 +3388,9 @@ bool Sema::UseArgumentDependentLookup(const CXXScopeSpec &SS,
/// as an expression. This is only actually called for lookups that
/// were not overloaded, and it doesn't promise that the declaration
/// will in fact be used.
-static bool CheckDeclInExpr(Sema &S, SourceLocation Loc, NamedDecl *D) {
- if (D->isInvalidDecl())
+static bool CheckDeclInExpr(Sema &S, SourceLocation Loc, NamedDecl *D,
+ bool AcceptInvalid) {
+ if (D->isInvalidDecl() && !AcceptInvalid)
return true;
if (isa<TypedefNameDecl>(D)) {
@@ -3175,7 +3436,8 @@ ExprResult Sema::BuildDeclarationNameExpr(const CXXScopeSpec &SS,
// result, because in the overloaded case the results can only be
// functions and function templates.
if (R.isSingleResult() && !ShouldLookupResultBeMultiVersionOverload(R) &&
- CheckDeclInExpr(*this, R.getNameLoc(), R.getFoundDecl()))
+ CheckDeclInExpr(*this, R.getNameLoc(), R.getFoundDecl(),
+ AcceptInvalidDecl))
return ExprError();
// Otherwise, just build an unresolved lookup expression. Suppress
@@ -3193,9 +3455,9 @@ ExprResult Sema::BuildDeclarationNameExpr(const CXXScopeSpec &SS,
return ULE;
}
-static void
-diagnoseUncapturableValueReference(Sema &S, SourceLocation loc,
- ValueDecl *var, DeclContext *DC);
+static void diagnoseUncapturableValueReferenceOrBinding(Sema &S,
+ SourceLocation loc,
+ ValueDecl *var);
/// Complete semantic analysis for a reference to the given declaration.
ExprResult Sema::BuildDeclarationNameExpr(
@@ -3207,8 +3469,12 @@ ExprResult Sema::BuildDeclarationNameExpr(
"Cannot refer unambiguously to a function template");
SourceLocation Loc = NameInfo.getLoc();
- if (CheckDeclInExpr(*this, Loc, D))
- return ExprError();
+ if (CheckDeclInExpr(*this, Loc, D, AcceptInvalidDecl)) {
+ // Recovery from invalid cases (e.g. D is an invalid Decl).
+ // We use the dependent type for the RecoveryExpr to prevent bogus follow-up
+ // diagnostics, as invalid decls use int as a fallback type.
+ return CreateRecoveryExpr(NameInfo.getBeginLoc(), NameInfo.getEndLoc(), {});
+ }
if (TemplateDecl *Template = dyn_cast<TemplateDecl>(D)) {
// Specifically diagnose references to class templates that are missing
@@ -3219,8 +3485,7 @@ ExprResult Sema::BuildDeclarationNameExpr(
// Make sure that we're referring to a value.
if (!isa<ValueDecl, UnresolvedUsingIfExistsDecl>(D)) {
- Diag(Loc, diag::err_ref_non_value)
- << D << SS.getRange();
+ Diag(Loc, diag::err_ref_non_value) << D << SS.getRange();
Diag(D->getLocation(), diag::note_declared_at);
return ExprError();
}
@@ -3241,215 +3506,211 @@ ExprResult Sema::BuildDeclarationNameExpr(
// Handle members of anonymous structs and unions. If we got here,
// and the reference is to a class member indirect field, then this
// must be the subject of a pointer-to-member expression.
- if (IndirectFieldDecl *indirectField = dyn_cast<IndirectFieldDecl>(VD))
- if (!indirectField->isCXXClassMember())
- return BuildAnonymousStructUnionMemberReference(SS, NameInfo.getLoc(),
- indirectField);
+ if (auto *IndirectField = dyn_cast<IndirectFieldDecl>(VD);
+ IndirectField && !IndirectField->isCXXClassMember())
+ return BuildAnonymousStructUnionMemberReference(SS, NameInfo.getLoc(),
+ IndirectField);
- {
- QualType type = VD->getType();
- if (type.isNull())
- return ExprError();
- ExprValueKind valueKind = VK_PRValue;
+ QualType type = VD->getType();
+ if (type.isNull())
+ return ExprError();
+ ExprValueKind valueKind = VK_PRValue;
- // In 'T ...V;', the type of the declaration 'V' is 'T...', but the type of
- // a reference to 'V' is simply (unexpanded) 'T'. The type, like the value,
- // is expanded by some outer '...' in the context of the use.
- type = type.getNonPackExpansionType();
+ // In 'T ...V;', the type of the declaration 'V' is 'T...', but the type of
+ // a reference to 'V' is simply (unexpanded) 'T'. The type, like the value,
+ // is expanded by some outer '...' in the context of the use.
+ type = type.getNonPackExpansionType();
- switch (D->getKind()) {
+ switch (D->getKind()) {
// Ignore all the non-ValueDecl kinds.
#define ABSTRACT_DECL(kind)
#define VALUE(type, base)
-#define DECL(type, base) \
- case Decl::type:
+#define DECL(type, base) case Decl::type:
#include "clang/AST/DeclNodes.inc"
- llvm_unreachable("invalid value decl kind");
-
- // These shouldn't make it here.
- case Decl::ObjCAtDefsField:
- llvm_unreachable("forming non-member reference to ivar?");
-
- // Enum constants are always r-values and never references.
- // Unresolved using declarations are dependent.
- case Decl::EnumConstant:
- case Decl::UnresolvedUsingValue:
- case Decl::OMPDeclareReduction:
- case Decl::OMPDeclareMapper:
- valueKind = VK_PRValue;
+ llvm_unreachable("invalid value decl kind");
+
+ // These shouldn't make it here.
+ case Decl::ObjCAtDefsField:
+ llvm_unreachable("forming non-member reference to ivar?");
+
+ // Enum constants are always r-values and never references.
+ // Unresolved using declarations are dependent.
+ case Decl::EnumConstant:
+ case Decl::UnresolvedUsingValue:
+ case Decl::OMPDeclareReduction:
+ case Decl::OMPDeclareMapper:
+ valueKind = VK_PRValue;
+ break;
+
+ // Fields and indirect fields that got here must be for
+ // pointer-to-member expressions; we just call them l-values for
+ // internal consistency, because this subexpression doesn't really
+ // exist in the high-level semantics.
+ case Decl::Field:
+ case Decl::IndirectField:
+ case Decl::ObjCIvar:
+ assert(getLangOpts().CPlusPlus && "building reference to field in C?");
+
+ // These can't have reference type in well-formed programs, but
+ // for internal consistency we do this anyway.
+ type = type.getNonReferenceType();
+ valueKind = VK_LValue;
+ break;
+
+ // Non-type template parameters are either l-values or r-values
+ // depending on the type.
+ case Decl::NonTypeTemplateParm: {
+ if (const ReferenceType *reftype = type->getAs<ReferenceType>()) {
+ type = reftype->getPointeeType();
+ valueKind = VK_LValue; // even if the parameter is an r-value reference
break;
+ }
- // Fields and indirect fields that got here must be for
- // pointer-to-member expressions; we just call them l-values for
- // internal consistency, because this subexpression doesn't really
- // exist in the high-level semantics.
- case Decl::Field:
- case Decl::IndirectField:
- case Decl::ObjCIvar:
- assert(getLangOpts().CPlusPlus &&
- "building reference to field in C?");
-
- // These can't have reference type in well-formed programs, but
- // for internal consistency we do this anyway.
- type = type.getNonReferenceType();
+ // [expr.prim.id.unqual]p2:
+ // If the entity is a template parameter object for a template
+ // parameter of type T, the type of the expression is const T.
+ // [...] The expression is an lvalue if the entity is a [...] template
+ // parameter object.
+ if (type->isRecordType()) {
+ type = type.getUnqualifiedType().withConst();
valueKind = VK_LValue;
break;
+ }
- // Non-type template parameters are either l-values or r-values
- // depending on the type.
- case Decl::NonTypeTemplateParm: {
- if (const ReferenceType *reftype = type->getAs<ReferenceType>()) {
- type = reftype->getPointeeType();
- valueKind = VK_LValue; // even if the parameter is an r-value reference
- break;
- }
-
- // [expr.prim.id.unqual]p2:
- // If the entity is a template parameter object for a template
- // parameter of type T, the type of the expression is const T.
- // [...] The expression is an lvalue if the entity is a [...] template
- // parameter object.
- if (type->isRecordType()) {
- type = type.getUnqualifiedType().withConst();
- valueKind = VK_LValue;
- break;
- }
+ // For non-references, we need to strip qualifiers just in case
+ // the template parameter was declared as 'const int' or whatever.
+ valueKind = VK_PRValue;
+ type = type.getUnqualifiedType();
+ break;
+ }
- // For non-references, we need to strip qualifiers just in case
- // the template parameter was declared as 'const int' or whatever.
+ case Decl::Var:
+ case Decl::VarTemplateSpecialization:
+ case Decl::VarTemplatePartialSpecialization:
+ case Decl::Decomposition:
+ case Decl::OMPCapturedExpr:
+ // In C, "extern void blah;" is valid and is an r-value.
+ if (!getLangOpts().CPlusPlus && !type.hasQualifiers() &&
+ type->isVoidType()) {
valueKind = VK_PRValue;
- type = type.getUnqualifiedType();
break;
}
+ [[fallthrough]];
- case Decl::Var:
- case Decl::VarTemplateSpecialization:
- case Decl::VarTemplatePartialSpecialization:
- case Decl::Decomposition:
- case Decl::OMPCapturedExpr:
- // In C, "extern void blah;" is valid and is an r-value.
- if (!getLangOpts().CPlusPlus &&
- !type.hasQualifiers() &&
- type->isVoidType()) {
- valueKind = VK_PRValue;
- break;
- }
- LLVM_FALLTHROUGH;
+ case Decl::ImplicitParam:
+ case Decl::ParmVar: {
+ // These are always l-values.
+ valueKind = VK_LValue;
+ type = type.getNonReferenceType();
- case Decl::ImplicitParam:
- case Decl::ParmVar: {
- // These are always l-values.
- valueKind = VK_LValue;
- type = type.getNonReferenceType();
-
- // FIXME: Does the addition of const really only apply in
- // potentially-evaluated contexts? Since the variable isn't actually
- // captured in an unevaluated context, it seems that the answer is no.
- if (!isUnevaluatedContext()) {
- QualType CapturedType = getCapturedDeclRefType(cast<VarDecl>(VD), Loc);
- if (!CapturedType.isNull())
- type = CapturedType;
- }
-
- break;
+ // FIXME: Does the addition of const really only apply in
+ // potentially-evaluated contexts? Since the variable isn't actually
+ // captured in an unevaluated context, it seems that the answer is no.
+ if (!isUnevaluatedContext()) {
+ QualType CapturedType = getCapturedDeclRefType(cast<VarDecl>(VD), Loc);
+ if (!CapturedType.isNull())
+ type = CapturedType;
}
- case Decl::Binding: {
- // These are always lvalues.
- valueKind = VK_LValue;
- type = type.getNonReferenceType();
- // FIXME: Support lambda-capture of BindingDecls, once CWG actually
- // decides how that's supposed to work.
- auto *BD = cast<BindingDecl>(VD);
- if (BD->getDeclContext() != CurContext) {
- auto *DD = dyn_cast_or_null<VarDecl>(BD->getDecomposedDecl());
- if (DD && DD->hasLocalStorage())
- diagnoseUncapturableValueReference(*this, Loc, BD, CurContext);
- }
- break;
- }
-
- case Decl::Function: {
- if (unsigned BID = cast<FunctionDecl>(VD)->getBuiltinID()) {
- if (!Context.BuiltinInfo.isPredefinedLibFunction(BID)) {
- type = Context.BuiltinFnTy;
- valueKind = VK_PRValue;
- break;
- }
- }
+ break;
+ }
- const FunctionType *fty = type->castAs<FunctionType>();
+ case Decl::Binding:
+ // These are always lvalues.
+ valueKind = VK_LValue;
+ type = type.getNonReferenceType();
+ break;
- // If we're referring to a function with an __unknown_anytype
- // result type, make the entire expression __unknown_anytype.
- if (fty->getReturnType() == Context.UnknownAnyTy) {
- type = Context.UnknownAnyTy;
+ case Decl::Function: {
+ if (unsigned BID = cast<FunctionDecl>(VD)->getBuiltinID()) {
+ if (!Context.BuiltinInfo.isDirectlyAddressable(BID)) {
+ type = Context.BuiltinFnTy;
valueKind = VK_PRValue;
break;
}
+ }
- // Functions are l-values in C++.
- if (getLangOpts().CPlusPlus) {
- valueKind = VK_LValue;
- break;
- }
+ const FunctionType *fty = type->castAs<FunctionType>();
- // C99 DR 316 says that, if a function type comes from a
- // function definition (without a prototype), that type is only
- // used for checking compatibility. Therefore, when referencing
- // the function, we pretend that we don't have the full function
- // type.
- if (!cast<FunctionDecl>(VD)->hasPrototype() &&
- isa<FunctionProtoType>(fty))
- type = Context.getFunctionNoProtoType(fty->getReturnType(),
- fty->getExtInfo());
-
- // Functions are r-values in C.
+ // If we're referring to a function with an __unknown_anytype
+ // result type, make the entire expression __unknown_anytype.
+ if (fty->getReturnType() == Context.UnknownAnyTy) {
+ type = Context.UnknownAnyTy;
valueKind = VK_PRValue;
break;
}
- case Decl::CXXDeductionGuide:
- llvm_unreachable("building reference to deduction guide");
-
- case Decl::MSProperty:
- case Decl::MSGuid:
- case Decl::TemplateParamObject:
- // FIXME: Should MSGuidDecl and template parameter objects be subject to
- // capture in OpenMP, or duplicated between host and device?
+ // Functions are l-values in C++.
+ if (getLangOpts().CPlusPlus) {
valueKind = VK_LValue;
break;
+ }
- case Decl::CXXMethod:
- // If we're referring to a method with an __unknown_anytype
- // result type, make the entire expression __unknown_anytype.
- // This should only be possible with a type written directly.
- if (const FunctionProtoType *proto
- = dyn_cast<FunctionProtoType>(VD->getType()))
- if (proto->getReturnType() == Context.UnknownAnyTy) {
- type = Context.UnknownAnyTy;
- valueKind = VK_PRValue;
- break;
- }
+ // C99 DR 316 says that, if a function type comes from a
+ // function definition (without a prototype), that type is only
+ // used for checking compatibility. Therefore, when referencing
+ // the function, we pretend that we don't have the full function
+ // type.
+ if (!cast<FunctionDecl>(VD)->hasPrototype() && isa<FunctionProtoType>(fty))
+ type = Context.getFunctionNoProtoType(fty->getReturnType(),
+ fty->getExtInfo());
+
+ // Functions are r-values in C.
+ valueKind = VK_PRValue;
+ break;
+ }
+
+ case Decl::CXXDeductionGuide:
+ llvm_unreachable("building reference to deduction guide");
- // C++ methods are l-values if static, r-values if non-static.
- if (cast<CXXMethodDecl>(VD)->isStatic()) {
- valueKind = VK_LValue;
+ case Decl::MSProperty:
+ case Decl::MSGuid:
+ case Decl::TemplateParamObject:
+ // FIXME: Should MSGuidDecl and template parameter objects be subject to
+ // capture in OpenMP, or duplicated between host and device?
+ valueKind = VK_LValue;
+ break;
+
+ case Decl::UnnamedGlobalConstant:
+ valueKind = VK_LValue;
+ break;
+
+ case Decl::CXXMethod:
+ // If we're referring to a method with an __unknown_anytype
+ // result type, make the entire expression __unknown_anytype.
+ // This should only be possible with a type written directly.
+ if (const FunctionProtoType *proto =
+ dyn_cast<FunctionProtoType>(VD->getType()))
+ if (proto->getReturnType() == Context.UnknownAnyTy) {
+ type = Context.UnknownAnyTy;
+ valueKind = VK_PRValue;
break;
}
- LLVM_FALLTHROUGH;
- case Decl::CXXConversion:
- case Decl::CXXDestructor:
- case Decl::CXXConstructor:
- valueKind = VK_PRValue;
+ // C++ methods are l-values if static, r-values if non-static.
+ if (cast<CXXMethodDecl>(VD)->isStatic()) {
+ valueKind = VK_LValue;
break;
}
+ [[fallthrough]];
- return BuildDeclRefExpr(VD, type, valueKind, NameInfo, &SS, FoundD,
- /*FIXME: TemplateKWLoc*/ SourceLocation(),
- TemplateArgs);
+ case Decl::CXXConversion:
+ case Decl::CXXDestructor:
+ case Decl::CXXConstructor:
+ valueKind = VK_PRValue;
+ break;
}
+
+ auto *E =
+ BuildDeclRefExpr(VD, type, valueKind, NameInfo, &SS, FoundD,
+ /*FIXME: TemplateKWLoc*/ SourceLocation(), TemplateArgs);
+ // Clang AST consumers assume a DeclRefExpr refers to a valid decl. We
+ // wrap a DeclRefExpr referring to an invalid decl with a dependent-type
+ // RecoveryExpr to avoid follow-up semantic analysis (thus prevent bogus
+ // diagnostics).
+ if (VD->isInvalidDecl() && E)
+ return CreateRecoveryExpr(E->getBeginLoc(), E->getEndLoc(), {E});
+ return E;
}
static void ConvertUTF8ToWideString(unsigned CharByteWidth, StringRef Source,
@@ -3465,18 +3726,8 @@ static void ConvertUTF8ToWideString(unsigned CharByteWidth, StringRef Source,
}
ExprResult Sema::BuildPredefinedExpr(SourceLocation Loc,
- PredefinedExpr::IdentKind IK) {
- // Pick the current block, lambda, captured statement or function.
- Decl *currentDecl = nullptr;
- if (const BlockScopeInfo *BSI = getCurBlock())
- currentDecl = BSI->TheDecl;
- else if (const LambdaScopeInfo *LSI = getCurLambda())
- currentDecl = LSI->CallOperator;
- else if (const CapturedRegionScopeInfo *CSI = getCurCapturedRegion())
- currentDecl = CSI->TheCapturedDecl;
- else
- currentDecl = getCurFunctionOrMethodDecl();
-
+ PredefinedIdentKind IK) {
+ Decl *currentDecl = getPredefinedExprDecl(CurContext);
if (!currentDecl) {
Diag(Loc, diag::ext_predef_outside_function);
currentDecl = Context.getTranslationUnitDecl();
@@ -3493,28 +3744,30 @@ ExprResult Sema::BuildPredefinedExpr(SourceLocation Loc,
unsigned Length = Str.length();
llvm::APInt LengthI(32, Length + 1);
- if (IK == PredefinedExpr::LFunction || IK == PredefinedExpr::LFuncSig) {
+ if (IK == PredefinedIdentKind::LFunction ||
+ IK == PredefinedIdentKind::LFuncSig) {
ResTy =
Context.adjustStringLiteralBaseType(Context.WideCharTy.withConst());
SmallString<32> RawChars;
ConvertUTF8ToWideString(Context.getTypeSizeInChars(ResTy).getQuantity(),
Str, RawChars);
ResTy = Context.getConstantArrayType(ResTy, LengthI, nullptr,
- ArrayType::Normal,
+ ArraySizeModifier::Normal,
/*IndexTypeQuals*/ 0);
- SL = StringLiteral::Create(Context, RawChars, StringLiteral::Wide,
+ SL = StringLiteral::Create(Context, RawChars, StringLiteralKind::Wide,
/*Pascal*/ false, ResTy, Loc);
} else {
ResTy = Context.adjustStringLiteralBaseType(Context.CharTy.withConst());
ResTy = Context.getConstantArrayType(ResTy, LengthI, nullptr,
- ArrayType::Normal,
+ ArraySizeModifier::Normal,
/*IndexTypeQuals*/ 0);
- SL = StringLiteral::Create(Context, Str, StringLiteral::Ascii,
+ SL = StringLiteral::Create(Context, Str, StringLiteralKind::Ordinary,
/*Pascal*/ false, ResTy, Loc);
}
}
- return PredefinedExpr::Create(Context, Loc, ResTy, IK, SL);
+ return PredefinedExpr::Create(Context, Loc, ResTy, IK, LangOpts.MicrosoftExt,
+ SL);
}
ExprResult Sema::BuildSYCLUniqueStableNameExpr(SourceLocation OpLoc,
@@ -3540,20 +3793,7 @@ ExprResult Sema::ActOnSYCLUniqueStableNameExpr(SourceLocation OpLoc,
}
ExprResult Sema::ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind) {
- PredefinedExpr::IdentKind IK;
-
- switch (Kind) {
- default: llvm_unreachable("Unknown simple primary expr!");
- case tok::kw___func__: IK = PredefinedExpr::Func; break; // [C99 6.4.2.2]
- case tok::kw___FUNCTION__: IK = PredefinedExpr::Function; break;
- case tok::kw___FUNCDNAME__: IK = PredefinedExpr::FuncDName; break; // [MS]
- case tok::kw___FUNCSIG__: IK = PredefinedExpr::FuncSig; break; // [MS]
- case tok::kw_L__FUNCTION__: IK = PredefinedExpr::LFunction; break; // [MS]
- case tok::kw_L__FUNCSIG__: IK = PredefinedExpr::LFuncSig; break; // [MS]
- case tok::kw___PRETTY_FUNCTION__: IK = PredefinedExpr::PrettyFunction; break;
- }
-
- return BuildPredefinedExpr(Loc, IK);
+ return BuildPredefinedExpr(Loc, getPredefinedExprKind(Kind));
}
ExprResult Sema::ActOnCharacterConstant(const Token &Tok, Scope *UDLScope) {
@@ -3571,6 +3811,8 @@ ExprResult Sema::ActOnCharacterConstant(const Token &Tok, Scope *UDLScope) {
QualType Ty;
if (Literal.isWide())
Ty = Context.WideCharTy; // L'x' -> wchar_t in C and C++.
+ else if (Literal.isUTF8() && getLangOpts().C23)
+ Ty = Context.UnsignedCharTy; // u8'x' -> unsigned char in C23
else if (Literal.isUTF8() && getLangOpts().Char8)
Ty = Context.Char8Ty; // u8'x' -> char8_t when it exists.
else if (Literal.isUTF16())
@@ -3580,17 +3822,18 @@ ExprResult Sema::ActOnCharacterConstant(const Token &Tok, Scope *UDLScope) {
else if (!getLangOpts().CPlusPlus || Literal.isMultiChar())
Ty = Context.IntTy; // 'x' -> int in C, 'wxyz' -> int in C++.
else
- Ty = Context.CharTy; // 'x' -> char in C++
+ Ty = Context.CharTy; // 'x' -> char in C++;
+ // u8'x' -> char in C11-C17 and in C++ without char8_t.
- CharacterLiteral::CharacterKind Kind = CharacterLiteral::Ascii;
+ CharacterLiteralKind Kind = CharacterLiteralKind::Ascii;
if (Literal.isWide())
- Kind = CharacterLiteral::Wide;
+ Kind = CharacterLiteralKind::Wide;
else if (Literal.isUTF16())
- Kind = CharacterLiteral::UTF16;
+ Kind = CharacterLiteralKind::UTF16;
else if (Literal.isUTF32())
- Kind = CharacterLiteral::UTF32;
+ Kind = CharacterLiteralKind::UTF32;
else if (Literal.isUTF8())
- Kind = CharacterLiteral::UTF8;
+ Kind = CharacterLiteralKind::UTF8;
Expr *Lit = new (Context) CharacterLiteral(Literal.getValue(), Kind, Ty,
Tok.getLocation());
@@ -3708,7 +3951,7 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
if (Literal.hasUDSuffix()) {
// We're building a user-defined literal.
- IdentifierInfo *UDSuffix = &Context.Idents.get(Literal.getUDSuffix());
+ const IdentifierInfo *UDSuffix = &Context.Idents.get(Literal.getUDSuffix());
SourceLocation UDSuffixLoc =
getUDSuffixLoc(*this, Tok.getLocation(), Literal.getUDSuffixOffset());
@@ -3771,10 +4014,11 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
unsigned Length = Literal.getUDSuffixOffset();
QualType StrTy = Context.getConstantArrayType(
Context.adjustStringLiteralBaseType(Context.CharTy.withConst()),
- llvm::APInt(32, Length + 1), nullptr, ArrayType::Normal, 0);
- Expr *Lit = StringLiteral::Create(
- Context, StringRef(TokSpelling.data(), Length), StringLiteral::Ascii,
- /*Pascal*/false, StrTy, &TokLoc, 1);
+ llvm::APInt(32, Length + 1), nullptr, ArraySizeModifier::Normal, 0);
+ Expr *Lit =
+ StringLiteral::Create(Context, StringRef(TokSpelling.data(), Length),
+ StringLiteralKind::Ordinary,
+ /*Pascal*/ false, StrTy, &TokLoc, 1);
return BuildLiteralOperatorCall(R, OpNameInfo, Lit, TokLoc);
}
@@ -3793,7 +4037,7 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
TemplateArgumentLocInfo ArgInfo;
ExplicitArgs.addArgument(TemplateArgumentLoc(Arg, ArgInfo));
}
- return BuildLiteralOperatorCall(R, OpNameInfo, None, TokLoc,
+ return BuildLiteralOperatorCall(R, OpNameInfo, std::nullopt, TokLoc,
&ExplicitArgs);
}
case LOLR_StringTemplatePack:
@@ -3832,7 +4076,7 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
llvm::APInt Val(bit_width, 0, isSigned);
bool Overflowed = Literal.GetFixedPointValue(Val, scale);
- bool ValIsZero = Val.isNullValue() && !Overflowed;
+ bool ValIsZero = Val.isZero() && !Overflowed;
auto MaxVal = Context.getFixedPointMax(Ty).getValue();
if (Literal.isFract && Val == MaxVal + 1 && !ValIsZero)
@@ -3877,7 +4121,7 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
"cl_khr_fp64", getLangOpts())) {
// Impose single-precision float type when cl_khr_fp64 is not enabled.
Diag(Tok.getLocation(), diag::warn_double_const_requires_fp64)
- << (getLangOpts().OpenCLVersion >= 300);
+ << (getLangOpts().getOpenCLCompatibleVersion() >= 300);
Res = ImpCastExprToType(Res, Context.FloatTy, CK_FloatingCast).get();
}
}
@@ -3886,27 +4130,35 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
} else {
QualType Ty;
- // 'long long' is a C99 or C++11 feature.
- if (!getLangOpts().C99 && Literal.isLongLong) {
- if (getLangOpts().CPlusPlus)
- Diag(Tok.getLocation(),
- getLangOpts().CPlusPlus11 ?
- diag::warn_cxx98_compat_longlong : diag::ext_cxx11_longlong);
- else
- Diag(Tok.getLocation(), diag::ext_c99_longlong);
- }
-
- // 'z/uz' literals are a C++2b feature.
+ // 'z/uz' literals are a C++23 feature.
if (Literal.isSizeT)
Diag(Tok.getLocation(), getLangOpts().CPlusPlus
- ? getLangOpts().CPlusPlus2b
+ ? getLangOpts().CPlusPlus23
? diag::warn_cxx20_compat_size_t_suffix
- : diag::ext_cxx2b_size_t_suffix
- : diag::err_cxx2b_size_t_suffix);
-
- // Get the value in the widest-possible width.
- unsigned MaxWidth = Context.getTargetInfo().getIntMaxTWidth();
- llvm::APInt ResultVal(MaxWidth, 0);
+ : diag::ext_cxx23_size_t_suffix
+ : diag::err_cxx23_size_t_suffix);
+
+ // 'wb/uwb' literals are a C23 feature. We support _BitInt as a type in C++,
+ // but we do not currently support the suffix in C++ mode because it's not
+ // entirely clear whether WG21 will prefer this suffix to return a library
+ // type such as std::bit_int instead of returning a _BitInt.
+ if (Literal.isBitInt && !getLangOpts().CPlusPlus)
+ PP.Diag(Tok.getLocation(), getLangOpts().C23
+ ? diag::warn_c23_compat_bitint_suffix
+ : diag::ext_c23_bitint_suffix);
+
+ // Get the value in the widest-possible width. What is "widest" depends on
+ // whether the literal is a bit-precise integer or not. For a bit-precise
+ // integer type, try to scan the source to determine how many bits are
+ // needed to represent the value. This may seem a bit expensive, but trying
+ // to get the integer value from an overly-wide APInt is *extremely*
+ // expensive, so the naive approach of assuming
+ // llvm::IntegerType::MAX_INT_BITS is a big performance hit.
+ unsigned BitsNeeded =
+ Literal.isBitInt ? llvm::APInt::getSufficientBitsNeeded(
+ Literal.getLiteralDigits(), Literal.getRadix())
+ : Context.getTargetInfo().getIntMaxTWidth();
+ llvm::APInt ResultVal(BitsNeeded, 0);
if (Literal.GetIntegerValue(ResultVal)) {
// If this value didn't fit into uintmax_t, error and force to ull.
@@ -3938,7 +4190,33 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
}
}
- // Check C++2b size_t literals.
+ // Bit-precise integer literals are automagically-sized based on the
+ // width required by the literal.
+ if (Literal.isBitInt) {
+ // The signed version has one more bit for the sign value. There are no
+ // zero-width bit-precise integers, even if the literal value is 0.
+ Width = std::max(ResultVal.getActiveBits(), 1u) +
+ (Literal.isUnsigned ? 0u : 1u);
+
+ // Diagnose if the width of the constant is larger than BITINT_MAXWIDTH,
+ // and reset the type to the largest supported width.
+ unsigned int MaxBitIntWidth =
+ Context.getTargetInfo().getMaxBitIntWidth();
+ if (Width > MaxBitIntWidth) {
+ Diag(Tok.getLocation(), diag::err_integer_literal_too_large)
+ << Literal.isUnsigned;
+ Width = MaxBitIntWidth;
+ }
+
+ // Reset the result value to the smaller APInt and select the correct
+ // type to be used. Note, we zext even for signed values because the
+ // literal itself is always an unsigned value (a preceeding - is a
+ // unary operator, not part of the literal).
+ ResultVal = ResultVal.zextOrTrunc(Width);
+ Ty = Context.getBitIntType(Literal.isUnsigned, Width);
+ }
+
+ // Check C++23 size_t literals.
if (Literal.isSizeT) {
assert(!Literal.MicrosoftInteger &&
"size_t literals can't be Microsoft literals");
@@ -4018,6 +4296,15 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
else if (AllowUnsigned)
Ty = Context.UnsignedLongLongTy;
Width = LongLongSize;
+
+ // 'long long' is a C99 or C++11 feature, whether the literal
+ // explicitly specified 'long long' or we needed the extra width.
+ if (getLangOpts().CPlusPlus)
+ Diag(Tok.getLocation(), getLangOpts().CPlusPlus11
+ ? diag::warn_cxx98_compat_longlong
+ : diag::ext_cxx11_longlong);
+ else if (!getLangOpts().C99)
+ Diag(Tok.getLocation(), diag::ext_c99_longlong);
}
}
@@ -4078,6 +4365,18 @@ static bool CheckVecStepTraitOperandType(Sema &S, QualType T,
return false;
}
+static bool CheckVectorElementsTraitOperandType(Sema &S, QualType T,
+ SourceLocation Loc,
+ SourceRange ArgRange) {
+ // builtin_vectorelements supports both fixed-sized and scalable vectors.
+ if (!T->isVectorType() && !T->isSizelessVectorType())
+ return S.Diag(Loc, diag::err_builtin_non_vector_type)
+ << ""
+ << "__builtin_vectorelements" << T << ArgRange;
+
+ return false;
+}
+
static bool CheckExtensionTraitOperandType(Sema &S, QualType T,
SourceLocation Loc,
SourceRange ArgRange,
@@ -4127,13 +4426,13 @@ static bool CheckObjCTraitOperandConstraints(Sema &S, QualType T,
/// Check whether E is a pointer from a decayed array type (the decayed
/// pointer type is equal to T) and emit a warning if it is.
static void warnOnSizeofOnArrayDecay(Sema &S, SourceLocation Loc, QualType T,
- Expr *E) {
+ const Expr *E) {
// Don't warn if the operation changed the type.
if (T != E->getType())
return;
// Now look for array decays.
- ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E);
+ const auto *ICE = dyn_cast<ImplicitCastExpr>(E);
if (!ICE || ICE->getCastKind() != CK_ArrayToPointerDecay)
return;
@@ -4155,8 +4454,9 @@ bool Sema::CheckUnaryExprOrTypeTraitOperand(Expr *E,
assert(!ExprTy->isReferenceType());
bool IsUnevaluatedOperand =
- (ExprKind == UETT_SizeOf || ExprKind == UETT_AlignOf ||
- ExprKind == UETT_PreferredAlignOf || ExprKind == UETT_VecStep);
+ (ExprKind == UETT_SizeOf || ExprKind == UETT_DataSizeOf ||
+ ExprKind == UETT_AlignOf || ExprKind == UETT_PreferredAlignOf ||
+ ExprKind == UETT_VecStep);
if (IsUnevaluatedOperand) {
ExprResult Result = CheckUnevaluatedOperand(E);
if (Result.isInvalid())
@@ -4171,6 +4471,7 @@ bool Sema::CheckUnaryExprOrTypeTraitOperand(Expr *E,
// FIXME: Should we consider instantiation-dependent operands to 'alignof'?
if (IsUnevaluatedOperand && !inTemplateInstantiation() &&
!E->isInstantiationDependent() &&
+ !E->getType()->isVariableArrayType() &&
E->HasSideEffects(Context, false))
Diag(E->getExprLoc(), diag::warn_side_effects_unevaluated_context);
@@ -4178,11 +4479,24 @@ bool Sema::CheckUnaryExprOrTypeTraitOperand(Expr *E,
return CheckVecStepTraitOperandType(*this, ExprTy, E->getExprLoc(),
E->getSourceRange());
+ if (ExprKind == UETT_VectorElements)
+ return CheckVectorElementsTraitOperandType(*this, ExprTy, E->getExprLoc(),
+ E->getSourceRange());
+
// Explicitly list some types as extensions.
if (!CheckExtensionTraitOperandType(*this, ExprTy, E->getExprLoc(),
E->getSourceRange(), ExprKind))
return false;
+ // WebAssembly tables are always illegal operands to unary expressions and
+ // type traits.
+ if (Context.getTargetInfo().getTriple().isWasm() &&
+ E->getType()->isWebAssemblyTableType()) {
+ Diag(E->getExprLoc(), diag::err_wasm_table_invalid_uett_operand)
+ << getTraitSpelling(ExprKind);
+ return true;
+ }
+
// 'alignof' applied to an expression only requires the base element type of
// the expression to be complete. 'sizeof' requires the expression's type to
// be complete (and will attempt to complete it if it's an array of unknown
@@ -4215,8 +4529,8 @@ bool Sema::CheckUnaryExprOrTypeTraitOperand(Expr *E,
return true;
if (ExprKind == UETT_SizeOf) {
- if (DeclRefExpr *DeclRef = dyn_cast<DeclRefExpr>(E->IgnoreParens())) {
- if (ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(DeclRef->getFoundDecl())) {
+ if (const auto *DeclRef = dyn_cast<DeclRefExpr>(E->IgnoreParens())) {
+ if (const auto *PVD = dyn_cast<ParmVarDecl>(DeclRef->getFoundDecl())) {
QualType OType = PVD->getOriginalType();
QualType Type = PVD->getType();
if (Type->isPointerType() && OType->isArrayType()) {
@@ -4230,7 +4544,7 @@ bool Sema::CheckUnaryExprOrTypeTraitOperand(Expr *E,
// Warn on "sizeof(array op x)" and "sizeof(x op array)", where the array
// decays into a pointer and returns an unintended result. This is most
// likely a typo for "sizeof(array) op x".
- if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E->IgnoreParens())) {
+ if (const auto *BO = dyn_cast<BinaryOperator>(E->IgnoreParens())) {
warnOnSizeofOnArrayDecay(*this, BO->getOperatorLoc(), BO->getType(),
BO->getLHS());
warnOnSizeofOnArrayDecay(*this, BO->getOperatorLoc(), BO->getType(),
@@ -4241,70 +4555,6 @@ bool Sema::CheckUnaryExprOrTypeTraitOperand(Expr *E,
return false;
}
-/// Check the constraints on operands to unary expression and type
-/// traits.
-///
-/// This will complete any types necessary, and validate the various constraints
-/// on those operands.
-///
-/// The UsualUnaryConversions() function is *not* called by this routine.
-/// C99 6.3.2.1p[2-4] all state:
-/// Except when it is the operand of the sizeof operator ...
-///
-/// C++ [expr.sizeof]p4
-/// The lvalue-to-rvalue, array-to-pointer, and function-to-pointer
-/// standard conversions are not applied to the operand of sizeof.
-///
-/// This policy is followed for all of the unary trait expressions.
-bool Sema::CheckUnaryExprOrTypeTraitOperand(QualType ExprType,
- SourceLocation OpLoc,
- SourceRange ExprRange,
- UnaryExprOrTypeTrait ExprKind) {
- if (ExprType->isDependentType())
- return false;
-
- // C++ [expr.sizeof]p2:
- // When applied to a reference or a reference type, the result
- // is the size of the referenced type.
- // C++11 [expr.alignof]p3:
- // When alignof is applied to a reference type, the result
- // shall be the alignment of the referenced type.
- if (const ReferenceType *Ref = ExprType->getAs<ReferenceType>())
- ExprType = Ref->getPointeeType();
-
- // C11 6.5.3.4/3, C++11 [expr.alignof]p3:
- // When alignof or _Alignof is applied to an array type, the result
- // is the alignment of the element type.
- if (ExprKind == UETT_AlignOf || ExprKind == UETT_PreferredAlignOf ||
- ExprKind == UETT_OpenMPRequiredSimdAlign)
- ExprType = Context.getBaseElementType(ExprType);
-
- if (ExprKind == UETT_VecStep)
- return CheckVecStepTraitOperandType(*this, ExprType, OpLoc, ExprRange);
-
- // Explicitly list some types as extensions.
- if (!CheckExtensionTraitOperandType(*this, ExprType, OpLoc, ExprRange,
- ExprKind))
- return false;
-
- if (RequireCompleteSizedType(
- OpLoc, ExprType, diag::err_sizeof_alignof_incomplete_or_sizeless_type,
- getTraitSpelling(ExprKind), ExprRange))
- return true;
-
- if (ExprType->isFunctionType()) {
- Diag(OpLoc, diag::err_sizeof_alignof_function_type)
- << getTraitSpelling(ExprKind) << ExprRange;
- return true;
- }
-
- if (CheckObjCTraitOperandConstraints(*this, ExprType, OpLoc, ExprRange,
- ExprKind))
- return true;
-
- return false;
-}
-
static bool CheckAlignOfExpr(Sema &S, Expr *E, UnaryExprOrTypeTrait ExprKind) {
// Cannot know anything else if the expression is dependent.
if (E->isTypeDependent())
@@ -4396,15 +4646,17 @@ static void captureVariablyModifiedType(ASTContext &Context, QualType T,
case Type::ConstantMatrix:
case Type::Record:
case Type::Enum:
- case Type::Elaborated:
case Type::TemplateSpecialization:
case Type::ObjCObject:
case Type::ObjCInterface:
case Type::ObjCObjectPointer:
case Type::ObjCTypeParam:
case Type::Pipe:
- case Type::ExtInt:
+ case Type::BitInt:
llvm_unreachable("type class is never variably-modified!");
+ case Type::Elaborated:
+ T = cast<ElaboratedType>(Ty)->getNamedType();
+ break;
case Type::Adjusted:
T = cast<AdjustedType>(Ty)->getOriginalType();
break;
@@ -4451,6 +4703,7 @@ static void captureVariablyModifiedType(ASTContext &Context, QualType T,
case Type::TypeOf:
case Type::UnaryTransform:
case Type::Attributed:
+ case Type::BTFTagAttributed:
case Type::SubstTemplateTypeParm:
case Type::MacroQualified:
// Keep walking after single level desugaring.
@@ -4462,6 +4715,9 @@ static void captureVariablyModifiedType(ASTContext &Context, QualType T,
case Type::Decltype:
T = cast<DecltypeType>(Ty)->desugar();
break;
+ case Type::Using:
+ T = cast<UsingType>(Ty)->desugar();
+ break;
case Type::Auto:
case Type::DeducedTemplateSpecialization:
T = cast<DeducedType>(Ty)->getDeducedType();
@@ -4476,23 +4732,82 @@ static void captureVariablyModifiedType(ASTContext &Context, QualType T,
} while (!T.isNull() && T->isVariablyModifiedType());
}
-/// Build a sizeof or alignof expression given a type operand.
-ExprResult
-Sema::CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
- SourceLocation OpLoc,
- UnaryExprOrTypeTrait ExprKind,
- SourceRange R) {
- if (!TInfo)
- return ExprError();
+/// Check the constraints on operands to unary expression and type
+/// traits.
+///
+/// This will complete any types necessary, and validate the various constraints
+/// on those operands.
+///
+/// The UsualUnaryConversions() function is *not* called by this routine.
+/// C99 6.3.2.1p[2-4] all state:
+/// Except when it is the operand of the sizeof operator ...
+///
+/// C++ [expr.sizeof]p4
+/// The lvalue-to-rvalue, array-to-pointer, and function-to-pointer
+/// standard conversions are not applied to the operand of sizeof.
+///
+/// This policy is followed for all of the unary trait expressions.
+bool Sema::CheckUnaryExprOrTypeTraitOperand(QualType ExprType,
+ SourceLocation OpLoc,
+ SourceRange ExprRange,
+ UnaryExprOrTypeTrait ExprKind,
+ StringRef KWName) {
+ if (ExprType->isDependentType())
+ return false;
- QualType T = TInfo->getType();
+ // C++ [expr.sizeof]p2:
+ // When applied to a reference or a reference type, the result
+ // is the size of the referenced type.
+ // C++11 [expr.alignof]p3:
+ // When alignof is applied to a reference type, the result
+ // shall be the alignment of the referenced type.
+ if (const ReferenceType *Ref = ExprType->getAs<ReferenceType>())
+ ExprType = Ref->getPointeeType();
- if (!T->isDependentType() &&
- CheckUnaryExprOrTypeTraitOperand(T, OpLoc, R, ExprKind))
- return ExprError();
+ // C11 6.5.3.4/3, C++11 [expr.alignof]p3:
+ // When alignof or _Alignof is applied to an array type, the result
+ // is the alignment of the element type.
+ if (ExprKind == UETT_AlignOf || ExprKind == UETT_PreferredAlignOf ||
+ ExprKind == UETT_OpenMPRequiredSimdAlign)
+ ExprType = Context.getBaseElementType(ExprType);
+
+ if (ExprKind == UETT_VecStep)
+ return CheckVecStepTraitOperandType(*this, ExprType, OpLoc, ExprRange);
+
+ if (ExprKind == UETT_VectorElements)
+ return CheckVectorElementsTraitOperandType(*this, ExprType, OpLoc,
+ ExprRange);
+
+ // Explicitly list some types as extensions.
+ if (!CheckExtensionTraitOperandType(*this, ExprType, OpLoc, ExprRange,
+ ExprKind))
+ return false;
+
+ if (RequireCompleteSizedType(
+ OpLoc, ExprType, diag::err_sizeof_alignof_incomplete_or_sizeless_type,
+ KWName, ExprRange))
+ return true;
+
+ if (ExprType->isFunctionType()) {
+ Diag(OpLoc, diag::err_sizeof_alignof_function_type) << KWName << ExprRange;
+ return true;
+ }
+
+ // WebAssembly tables are always illegal operands to unary expressions and
+ // type traits.
+ if (Context.getTargetInfo().getTriple().isWasm() &&
+ ExprType->isWebAssemblyTableType()) {
+ Diag(OpLoc, diag::err_wasm_table_invalid_uett_operand)
+ << getTraitSpelling(ExprKind);
+ return true;
+ }
+
+ if (CheckObjCTraitOperandConstraints(*this, ExprType, OpLoc, ExprRange,
+ ExprKind))
+ return true;
- if (T->isVariablyModifiedType() && FunctionScopes.size() > 1) {
- if (auto *TT = T->getAs<TypedefType>()) {
+ if (ExprType->isVariablyModifiedType() && FunctionScopes.size() > 1) {
+ if (auto *TT = ExprType->getAs<TypedefType>()) {
for (auto I = FunctionScopes.rbegin(),
E = std::prev(FunctionScopes.rend());
I != E; ++I) {
@@ -4509,12 +4824,36 @@ Sema::CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
if (DC) {
if (DC->containsDecl(TT->getDecl()))
break;
- captureVariablyModifiedType(Context, T, CSI);
+ captureVariablyModifiedType(Context, ExprType, CSI);
}
}
}
}
+ return false;
+}
+
+/// Build a sizeof or alignof expression given a type operand.
+ExprResult Sema::CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
+ SourceLocation OpLoc,
+ UnaryExprOrTypeTrait ExprKind,
+ SourceRange R) {
+ if (!TInfo)
+ return ExprError();
+
+ QualType T = TInfo->getType();
+
+ if (!T->isDependentType() &&
+ CheckUnaryExprOrTypeTraitOperand(T, OpLoc, R, ExprKind,
+ getTraitSpelling(ExprKind)))
+ return ExprError();
+
+ // Adds overload of TransformToPotentiallyEvaluated for TypeSourceInfo to
+ // properly deal with VLAs in nested calls of sizeof and typeof.
+ if (isUnevaluatedContext() && ExprKind == UETT_SizeOf &&
+ TInfo->getType()->isVariablyModifiedType())
+ TInfo = TransformToPotentiallyEvaluated(TInfo);
+
// C99 6.5.3.4p4: the type (an unsigned integer type) is size_t.
return new (Context) UnaryExprOrTypeTraitExpr(
ExprKind, TInfo, Context.getSizeType(), OpLoc, R.getEnd());
@@ -4545,6 +4884,8 @@ Sema::CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
} else if (E->refersToBitField()) { // C99 6.5.3.4p1.
Diag(E->getExprLoc(), diag::err_sizeof_alignof_typeof_bitfield) << 0;
isInvalid = true;
+ } else if (ExprKind == UETT_VectorElements) {
+ isInvalid = CheckUnaryExprOrTypeTraitOperand(E, UETT_VectorElements);
} else {
isInvalid = CheckUnaryExprOrTypeTraitOperand(E, UETT_SizeOf);
}
@@ -4584,6 +4925,29 @@ Sema::ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
return Result;
}
+bool Sema::CheckAlignasTypeArgument(StringRef KWName, TypeSourceInfo *TInfo,
+ SourceLocation OpLoc, SourceRange R) {
+ if (!TInfo)
+ return true;
+ return CheckUnaryExprOrTypeTraitOperand(TInfo->getType(), OpLoc, R,
+ UETT_AlignOf, KWName);
+}
+
+/// ActOnAlignasTypeArgument - Handle @c alignas(type-id) and @c
+/// _Alignas(type-name) .
+/// [dcl.align] An alignment-specifier of the form
+/// alignas(type-id) has the same effect as alignas(alignof(type-id)).
+///
+/// [N1570 6.7.5] _Alignas(type-name) is equivalent to
+/// _Alignas(_Alignof(type-name)).
+bool Sema::ActOnAlignasTypeArgument(StringRef KWName, ParsedType Ty,
+ SourceLocation OpLoc, SourceRange R) {
+ TypeSourceInfo *TInfo;
+ (void)GetTypeFromParser(ParsedType::getFromOpaquePtr(Ty.getAsOpaquePtr()),
+ &TInfo);
+ return CheckAlignasTypeArgument(KWName, TInfo, OpLoc, R);
+}
+
static QualType CheckRealImagOperand(Sema &S, ExprResult &V, SourceLocation Loc,
bool IsReal) {
if (V.get()->isTypeDependent())
@@ -4662,19 +5026,54 @@ static bool isMSPropertySubscriptExpr(Sema &S, Expr *Base) {
return isa<MSPropertySubscriptExpr>(BaseNoParens);
}
-ExprResult
-Sema::ActOnArraySubscriptExpr(Scope *S, Expr *base, SourceLocation lbLoc,
- Expr *idx, SourceLocation rbLoc) {
+// Returns the type used for LHS[RHS], given one of LHS, RHS is type-dependent.
+// Typically this is DependentTy, but can sometimes be more precise.
+//
+// There are cases when we could determine a non-dependent type:
+// - LHS and RHS may have non-dependent types despite being type-dependent
+// (e.g. unbounded array static members of the current instantiation)
+// - one may be a dependent-sized array with known element type
+// - one may be a dependent-typed valid index (enum in current instantiation)
+//
+// We *always* return a dependent type, in such cases it is DependentTy.
+// This avoids creating type-dependent expressions with non-dependent types.
+// FIXME: is this important to avoid? See https://reviews.llvm.org/D107275
+static QualType getDependentArraySubscriptType(Expr *LHS, Expr *RHS,
+ const ASTContext &Ctx) {
+ assert(LHS->isTypeDependent() || RHS->isTypeDependent());
+ QualType LTy = LHS->getType(), RTy = RHS->getType();
+ QualType Result = Ctx.DependentTy;
+ if (RTy->isIntegralOrUnscopedEnumerationType()) {
+ if (const PointerType *PT = LTy->getAs<PointerType>())
+ Result = PT->getPointeeType();
+ else if (const ArrayType *AT = LTy->getAsArrayTypeUnsafe())
+ Result = AT->getElementType();
+ } else if (LTy->isIntegralOrUnscopedEnumerationType()) {
+ if (const PointerType *PT = RTy->getAs<PointerType>())
+ Result = PT->getPointeeType();
+ else if (const ArrayType *AT = RTy->getAsArrayTypeUnsafe())
+ Result = AT->getElementType();
+ }
+ // Ensure we return a dependent type.
+ return Result->isDependentType() ? Result : Ctx.DependentTy;
+}
+
+ExprResult Sema::ActOnArraySubscriptExpr(Scope *S, Expr *base,
+ SourceLocation lbLoc,
+ MultiExprArg ArgExprs,
+ SourceLocation rbLoc) {
+
if (base && !base->getType().isNull() &&
- base->getType()->isSpecificPlaceholderType(BuiltinType::OMPArraySection))
- return ActOnOMPArraySectionExpr(base, lbLoc, idx, SourceLocation(),
+ base->hasPlaceholderType(BuiltinType::OMPArraySection))
+ return ActOnOMPArraySectionExpr(base, lbLoc, ArgExprs.front(), SourceLocation(),
SourceLocation(), /*Length*/ nullptr,
/*Stride=*/nullptr, rbLoc);
// Since this might be a postfix expression, get rid of ParenListExprs.
if (isa<ParenListExpr>(base)) {
ExprResult result = MaybeConvertParenListExprToParenExpr(S, base);
- if (result.isInvalid()) return ExprError();
+ if (result.isInvalid())
+ return ExprError();
base = result.get();
}
@@ -4692,8 +5091,8 @@ Sema::ActOnArraySubscriptExpr(Scope *S, Expr *base, SourceLocation lbLoc,
};
// The matrix subscript operator ([][])is considered a single operator.
// Separating the index expressions by parenthesis is not allowed.
- if (base->getType()->isSpecificPlaceholderType(
- BuiltinType::IncompleteMatrixIdx) &&
+ if (base && !base->getType().isNull() &&
+ base->hasPlaceholderType(BuiltinType::IncompleteMatrixIdx) &&
!isa<MatrixSubscriptExpr>(base)) {
Diag(base->getExprLoc(), diag::err_matrix_separate_incomplete_index)
<< SourceRange(base->getBeginLoc(), rbLoc);
@@ -4703,13 +5102,20 @@ Sema::ActOnArraySubscriptExpr(Scope *S, Expr *base, SourceLocation lbLoc,
// MatrixSubscriptExpr.
auto *matSubscriptE = dyn_cast<MatrixSubscriptExpr>(base);
if (matSubscriptE) {
- if (CheckAndReportCommaError(idx))
+ assert(ArgExprs.size() == 1);
+ if (CheckAndReportCommaError(ArgExprs.front()))
return ExprError();
assert(matSubscriptE->isIncomplete() &&
"base has to be an incomplete matrix subscript");
- return CreateBuiltinMatrixSubscriptExpr(
- matSubscriptE->getBase(), matSubscriptE->getRowIdx(), idx, rbLoc);
+ return CreateBuiltinMatrixSubscriptExpr(matSubscriptE->getBase(),
+ matSubscriptE->getRowIdx(),
+ ArgExprs.front(), rbLoc);
+ }
+ if (base->getType()->isWebAssemblyTableType()) {
+ Diag(base->getExprLoc(), diag::err_wasm_table_art)
+ << SourceRange(base->getBeginLoc(), rbLoc) << 3;
+ return ExprError();
}
// Handle any non-overload placeholder types in the base and index
@@ -4730,32 +5136,44 @@ Sema::ActOnArraySubscriptExpr(Scope *S, Expr *base, SourceLocation lbLoc,
// If the base is a matrix type, try to create a new MatrixSubscriptExpr.
if (base->getType()->isMatrixType()) {
- if (CheckAndReportCommaError(idx))
+ assert(ArgExprs.size() == 1);
+ if (CheckAndReportCommaError(ArgExprs.front()))
return ExprError();
- return CreateBuiltinMatrixSubscriptExpr(base, idx, nullptr, rbLoc);
+ return CreateBuiltinMatrixSubscriptExpr(base, ArgExprs.front(), nullptr,
+ rbLoc);
}
- // A comma-expression as the index is deprecated in C++2a onwards.
- if (getLangOpts().CPlusPlus20 &&
- ((isa<BinaryOperator>(idx) && cast<BinaryOperator>(idx)->isCommaOp()) ||
- (isa<CXXOperatorCallExpr>(idx) &&
- cast<CXXOperatorCallExpr>(idx)->getOperator() == OO_Comma))) {
- Diag(idx->getExprLoc(), diag::warn_deprecated_comma_subscript)
- << SourceRange(base->getBeginLoc(), rbLoc);
+ if (ArgExprs.size() == 1 && getLangOpts().CPlusPlus20) {
+ Expr *idx = ArgExprs[0];
+ if ((isa<BinaryOperator>(idx) && cast<BinaryOperator>(idx)->isCommaOp()) ||
+ (isa<CXXOperatorCallExpr>(idx) &&
+ cast<CXXOperatorCallExpr>(idx)->getOperator() == OO_Comma)) {
+ Diag(idx->getExprLoc(), diag::warn_deprecated_comma_subscript)
+ << SourceRange(base->getBeginLoc(), rbLoc);
+ }
}
- if (idx->getType()->isNonOverloadPlaceholderType()) {
- ExprResult result = CheckPlaceholderExpr(idx);
- if (result.isInvalid()) return ExprError();
- idx = result.get();
+ if (ArgExprs.size() == 1 &&
+ ArgExprs[0]->getType()->isNonOverloadPlaceholderType()) {
+ ExprResult result = CheckPlaceholderExpr(ArgExprs[0]);
+ if (result.isInvalid())
+ return ExprError();
+ ArgExprs[0] = result.get();
+ } else {
+ if (CheckArgsForPlaceholders(ArgExprs))
+ return ExprError();
}
// Build an unanalyzed expression if either operand is type-dependent.
- if (getLangOpts().CPlusPlus &&
- (base->isTypeDependent() || idx->isTypeDependent())) {
- return new (Context) ArraySubscriptExpr(base, idx, Context.DependentTy,
- VK_LValue, OK_Ordinary, rbLoc);
+ if (getLangOpts().CPlusPlus && ArgExprs.size() == 1 &&
+ (base->isTypeDependent() ||
+ Expr::hasAnyTypeDependentArguments(ArgExprs)) &&
+ !isa<PackExpansionExpr>(ArgExprs[0])) {
+ return new (Context) ArraySubscriptExpr(
+ base, ArgExprs.front(),
+ getDependentArraySubscriptType(base, ArgExprs.front(), getASTContext()),
+ VK_LValue, OK_Ordinary, rbLoc);
}
// MSDN, property (C++)
@@ -4767,10 +5185,12 @@ Sema::ActOnArraySubscriptExpr(Scope *S, Expr *base, SourceLocation lbLoc,
// indices. In this case, i=p->x[a][b] will be turned into i=p->GetX(a, b),
// and p->x[a][b] = i will be turned into p->PutX(a, b, i);
if (IsMSPropertySubscript) {
+ assert(ArgExprs.size() == 1);
// Build MS property subscript expression if base is MS property reference
// or MS property subscript.
- return new (Context) MSPropertySubscriptExpr(
- base, idx, Context.PseudoObjectTy, VK_LValue, OK_Ordinary, rbLoc);
+ return new (Context)
+ MSPropertySubscriptExpr(base, ArgExprs.front(), Context.PseudoObjectTy,
+ VK_LValue, OK_Ordinary, rbLoc);
}
// Use C++ overloaded-operator rules if either operand has record
@@ -4781,14 +5201,15 @@ Sema::ActOnArraySubscriptExpr(Scope *S, Expr *base, SourceLocation lbLoc,
//
// ObjC pointers have their own subscripting logic that is not tied
// to overload resolution and so should not take this path.
- if (getLangOpts().CPlusPlus &&
- (base->getType()->isRecordType() ||
- (!base->getType()->isObjCObjectPointerType() &&
- idx->getType()->isRecordType()))) {
- return CreateOverloadedArraySubscriptExpr(lbLoc, rbLoc, base, idx);
+ if (getLangOpts().CPlusPlus && !base->getType()->isObjCObjectPointerType() &&
+ ((base->getType()->isRecordType() ||
+ (ArgExprs.size() != 1 || isa<PackExpansionExpr>(ArgExprs[0]) ||
+ ArgExprs[0]->getType()->isRecordType())))) {
+ return CreateOverloadedArraySubscriptExpr(lbLoc, rbLoc, base, ArgExprs);
}
- ExprResult Res = CreateBuiltinArraySubscriptExpr(base, lbLoc, idx, rbLoc);
+ ExprResult Res =
+ CreateBuiltinArraySubscriptExpr(base, lbLoc, ArgExprs.front(), rbLoc);
if (!Res.isInvalid() && isa<ArraySubscriptExpr>(Res.get()))
CheckSubscriptAccessOfNoDeref(cast<ArraySubscriptExpr>(Res.get()));
@@ -4844,7 +5265,7 @@ ExprResult Sema::CreateBuiltinMatrixSubscriptExpr(Expr *Base, Expr *RowIdx,
return nullptr;
}
- if (Optional<llvm::APSInt> Idx =
+ if (std::optional<llvm::APSInt> Idx =
IndexExpr->getIntegerConstantExpr(Context)) {
if ((*Idx < 0 || *Idx >= Dim)) {
Diag(IndexExpr->getBeginLoc(), diag::err_matrix_index_outside_range)
@@ -4924,9 +5345,8 @@ ExprResult Sema::ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
SourceLocation ColonLocSecond,
Expr *Length, Expr *Stride,
SourceLocation RBLoc) {
- if (Base->getType()->isPlaceholderType() &&
- !Base->getType()->isSpecificPlaceholderType(
- BuiltinType::OMPArraySection)) {
+ if (Base->hasPlaceholderType() &&
+ !Base->hasPlaceholderType(BuiltinType::OMPArraySection)) {
ExprResult Result = CheckPlaceholderExpr(Base);
if (Result.isInvalid())
return ExprError();
@@ -5094,8 +5514,7 @@ ExprResult Sema::ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
}
}
- if (!Base->getType()->isSpecificPlaceholderType(
- BuiltinType::OMPArraySection)) {
+ if (!Base->hasPlaceholderType(BuiltinType::OMPArraySection)) {
ExprResult Result = DefaultFunctionArrayLvalueConversion(Base);
if (Result.isInvalid())
return ExprError();
@@ -5110,7 +5529,7 @@ ExprResult Sema::ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc,
SourceLocation RParenLoc,
ArrayRef<Expr *> Dims,
ArrayRef<SourceRange> Brackets) {
- if (Base->getType()->isPlaceholderType()) {
+ if (Base->hasPlaceholderType()) {
ExprResult Result = CheckPlaceholderExpr(Base);
if (Result.isInvalid())
return ExprError();
@@ -5135,7 +5554,7 @@ ExprResult Sema::ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc,
SmallVector<Expr *, 4> NewDims;
bool ErrorFound = false;
for (Expr *Dim : Dims) {
- if (Dim->getType()->isPlaceholderType()) {
+ if (Dim->hasPlaceholderType()) {
ExprResult Result = CheckPlaceholderExpr(Dim);
if (Result.isInvalid()) {
ErrorFound = true;
@@ -5250,6 +5669,10 @@ ExprResult Sema::ActOnOMPIteratorExpr(Scope *S, SourceLocation IteratorKwLoc,
} else {
CurContext->addDecl(VD);
}
+
+ /// Act on the iterator variable declaration.
+ ActOnOpenMPIteratorVarDecl(VD);
+
Expr *Begin = D.Range.Begin;
if (!IsDeclTyDependent && Begin && !Begin->isTypeDependent()) {
ExprResult BeginRes =
@@ -5269,11 +5692,12 @@ ExprResult Sema::ActOnOMPIteratorExpr(Scope *S, SourceLocation IteratorKwLoc,
IsCorrect = false;
continue;
}
- Optional<llvm::APSInt> Result = Step->getIntegerConstantExpr(Context);
+ std::optional<llvm::APSInt> Result =
+ Step->getIntegerConstantExpr(Context);
// OpenMP 5.0, 2.1.6 Iterators, Restrictions
// If the step expression of a range-specification equals zero, the
// behavior is unspecified.
- if (Result && Result->isNullValue()) {
+ if (Result && Result->isZero()) {
Diag(Step->getExprLoc(), diag::err_omp_iterator_step_constant_zero)
<< Step << Step->getSourceRange();
IsCorrect = false;
@@ -5509,7 +5933,8 @@ Sema::CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
if (LHSTy->isDependentType() || RHSTy->isDependentType()) {
BaseExpr = LHSExp;
IndexExpr = RHSExp;
- ResultType = Context.DependentTy;
+ ResultType =
+ getDependentArraySubscriptType(LHSExp, RHSExp, getASTContext());
} else if (const PointerType *PTy = LHSTy->getAs<PointerType>()) {
BaseExpr = LHSExp;
IndexExpr = RHSExp;
@@ -5563,6 +5988,33 @@ Sema::CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Qualifiers Combined = BaseQuals + MemberQuals;
if (Combined != MemberQuals)
ResultType = Context.getQualifiedType(ResultType, Combined);
+ } else if (LHSTy->isBuiltinType() &&
+ LHSTy->getAs<BuiltinType>()->isSveVLSBuiltinType()) {
+ const BuiltinType *BTy = LHSTy->getAs<BuiltinType>();
+ if (BTy->isSVEBool())
+ return ExprError(Diag(LLoc, diag::err_subscript_svbool_t)
+ << LHSExp->getSourceRange() << RHSExp->getSourceRange());
+
+ BaseExpr = LHSExp;
+ IndexExpr = RHSExp;
+ if (getLangOpts().CPlusPlus11 && LHSExp->isPRValue()) {
+ ExprResult Materialized = TemporaryMaterializationConversion(LHSExp);
+ if (Materialized.isInvalid())
+ return ExprError();
+ LHSExp = Materialized.get();
+ }
+ VK = LHSExp->getValueKind();
+ if (VK != VK_PRValue)
+ OK = OK_VectorComponent;
+
+ ResultType = BTy->getSveEltType(Context);
+
+ QualType BaseType = BaseExpr->getType();
+ Qualifiers BaseQuals = BaseType.getQualifiers();
+ Qualifiers MemberQuals = ResultType.getQualifiers();
+ Qualifiers Combined = BaseQuals + MemberQuals;
+ if (Combined != MemberQuals)
+ ResultType = Context.getQualifiedType(ResultType, Combined);
} else if (LHSTy->isArrayType()) {
// If we see an array that wasn't promoted by
// DefaultFunctionArrayLvalueConversion, it must be an array that
@@ -5599,9 +6051,14 @@ Sema::CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
<< IndexExpr->getSourceRange());
if ((IndexExpr->getType()->isSpecificBuiltinType(BuiltinType::Char_S) ||
- IndexExpr->getType()->isSpecificBuiltinType(BuiltinType::Char_U))
- && !IndexExpr->isTypeDependent())
- Diag(LLoc, diag::warn_subscript_is_char) << IndexExpr->getSourceRange();
+ IndexExpr->getType()->isSpecificBuiltinType(BuiltinType::Char_U)) &&
+ !IndexExpr->isTypeDependent()) {
+ std::optional<llvm::APSInt> IntegerContantExpr =
+ IndexExpr->getIntegerConstantExpr(getASTContext());
+ if (!IntegerContantExpr.has_value() ||
+ IntegerContantExpr.value().isNegative())
+ Diag(LLoc, diag::warn_subscript_is_char) << IndexExpr->getSourceRange();
+ }
// C99 6.5.2.1p1: "shall have type "pointer to *object* type". Similarly,
// C++ [expr.sub]p1: The type "T" shall be a completely-defined object
@@ -5623,6 +6080,7 @@ Sema::CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
if (!ResultType.hasQualifiers())
VK = VK_PRValue;
} else if (!ResultType->isDependentType() &&
+ !ResultType.isWebAssemblyReferenceType() &&
RequireCompleteSizedType(
LLoc, ResultType,
diag::err_subscript_incomplete_or_sizeless_type, BaseExpr))
@@ -5663,8 +6121,10 @@ Sema::CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
}
bool Sema::CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
- ParmVarDecl *Param) {
+ ParmVarDecl *Param, Expr *RewrittenInit,
+ bool SkipImmediateInvocations) {
if (Param->hasUnparsedDefaultArg()) {
+ assert(!RewrittenInit && "Should not have a rewritten init expression yet");
// If we've already cleared out the location for the default argument,
// that means we're parsing it right now.
if (!UnparsedDefaultArgLocs.count(Param)) {
@@ -5681,11 +6141,14 @@ bool Sema::CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
return true;
}
- if (Param->hasUninstantiatedDefaultArg() &&
- InstantiateDefaultArgument(CallLoc, FD, Param))
- return true;
+ if (Param->hasUninstantiatedDefaultArg()) {
+ assert(!RewrittenInit && "Should not have a rewitten init expression yet");
+ if (InstantiateDefaultArgument(CallLoc, FD, Param))
+ return true;
+ }
- assert(Param->hasInit() && "default argument but no initializer?");
+ Expr *Init = RewrittenInit ? RewrittenInit : Param->getInit();
+ assert(Init && "default argument but no initializer?");
// If the default expression creates temporaries, we need to
// push them to the current stack of expression temporaries so they'll
@@ -5694,41 +6157,293 @@ bool Sema::CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
// bound temporaries; see the comment in PR5810.
// We don't need to do that with block decls, though, because
// blocks in default argument expression can never capture anything.
- if (auto Init = dyn_cast<ExprWithCleanups>(Param->getInit())) {
+ if (auto *InitWithCleanup = dyn_cast<ExprWithCleanups>(Init)) {
// Set the "needs cleanups" bit regardless of whether there are
// any explicit objects.
- Cleanup.setExprNeedsCleanups(Init->cleanupsHaveSideEffects());
-
+ Cleanup.setExprNeedsCleanups(InitWithCleanup->cleanupsHaveSideEffects());
// Append all the objects to the cleanup list. Right now, this
// should always be a no-op, because blocks in default argument
// expressions should never be able to capture anything.
- assert(!Init->getNumObjects() &&
+ assert(!InitWithCleanup->getNumObjects() &&
"default argument expression has capturing blocks?");
}
-
- // We already type-checked the argument, so we know it works.
- // Just mark all of the declarations in this potentially-evaluated expression
- // as being "referenced".
+ // C++ [expr.const]p15.1:
+ // An expression or conversion is in an immediate function context if it is
+ // potentially evaluated and [...] its innermost enclosing non-block scope
+ // is a function parameter scope of an immediate function.
EnterExpressionEvaluationContext EvalContext(
- *this, ExpressionEvaluationContext::PotentiallyEvaluated, Param);
- MarkDeclarationsReferencedInExpr(Param->getDefaultArg(),
- /*SkipLocalVariables=*/true);
+ *this,
+ FD->isImmediateFunction()
+ ? ExpressionEvaluationContext::ImmediateFunctionContext
+ : ExpressionEvaluationContext::PotentiallyEvaluated,
+ Param);
+ ExprEvalContexts.back().IsCurrentlyCheckingDefaultArgumentOrInitializer =
+ SkipImmediateInvocations;
+ runWithSufficientStackSpace(CallLoc, [&] {
+ MarkDeclarationsReferencedInExpr(Init, /*SkipLocalVariables=*/true);
+ });
return false;
}
+struct ImmediateCallVisitor : public RecursiveASTVisitor<ImmediateCallVisitor> {
+ const ASTContext &Context;
+ ImmediateCallVisitor(const ASTContext &Ctx) : Context(Ctx) {}
+
+ bool HasImmediateCalls = false;
+ bool shouldVisitImplicitCode() const { return true; }
+
+ bool VisitCallExpr(CallExpr *E) {
+ if (const FunctionDecl *FD = E->getDirectCallee())
+ HasImmediateCalls |= FD->isImmediateFunction();
+ return RecursiveASTVisitor<ImmediateCallVisitor>::VisitStmt(E);
+ }
+
+ // SourceLocExpr are not immediate invocations
+ // but CXXDefaultInitExpr/CXXDefaultArgExpr containing a SourceLocExpr
+ // need to be rebuilt so that they refer to the correct SourceLocation and
+ // DeclContext.
+ bool VisitSourceLocExpr(SourceLocExpr *E) {
+ HasImmediateCalls = true;
+ return RecursiveASTVisitor<ImmediateCallVisitor>::VisitStmt(E);
+ }
+
+ // A nested lambda might have parameters with immediate invocations
+ // in their default arguments.
+ // The compound statement is not visited (as it does not constitute a
+ // subexpression).
+ // FIXME: We should consider visiting and transforming captures
+ // with init expressions.
+ bool VisitLambdaExpr(LambdaExpr *E) {
+ return VisitCXXMethodDecl(E->getCallOperator());
+ }
+
+ // Blocks don't support default parameters, and, as for lambdas,
+ // we don't consider their body a subexpression.
+ bool VisitBlockDecl(BlockDecl *B) { return false; }
+
+ bool VisitCompoundStmt(CompoundStmt *B) { return false; }
+
+ bool VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) {
+ return TraverseStmt(E->getExpr());
+ }
+
+ bool VisitCXXDefaultInitExpr(CXXDefaultInitExpr *E) {
+ return TraverseStmt(E->getExpr());
+ }
+};
+
+struct EnsureImmediateInvocationInDefaultArgs
+ : TreeTransform<EnsureImmediateInvocationInDefaultArgs> {
+ EnsureImmediateInvocationInDefaultArgs(Sema &SemaRef)
+ : TreeTransform(SemaRef) {}
+
+ // Lambda can only have immediate invocations in the default
+ // args of their parameters, which is transformed upon calling the closure.
+ // The body is not a subexpression, so we have nothing to do.
+ // FIXME: Immediate calls in capture initializers should be transformed.
+ ExprResult TransformLambdaExpr(LambdaExpr *E) { return E; }
+ ExprResult TransformBlockExpr(BlockExpr *E) { return E; }
+
+ // Make sure we don't rebuild the this pointer as it would
+ // cause it to incorrectly point it to the outermost class
+ // in the case of nested struct initialization.
+ ExprResult TransformCXXThisExpr(CXXThisExpr *E) { return E; }
+};
+
ExprResult Sema::BuildCXXDefaultArgExpr(SourceLocation CallLoc,
- FunctionDecl *FD, ParmVarDecl *Param) {
+ FunctionDecl *FD, ParmVarDecl *Param,
+ Expr *Init) {
assert(Param->hasDefaultArg() && "can't build nonexistent default arg");
- if (CheckCXXDefaultArgExpr(CallLoc, FD, Param))
+
+ bool NestedDefaultChecking = isCheckingDefaultArgumentOrInitializer();
+
+ std::optional<ExpressionEvaluationContextRecord::InitializationContext>
+ InitializationContext =
+ OutermostDeclarationWithDelayedImmediateInvocations();
+ if (!InitializationContext.has_value())
+ InitializationContext.emplace(CallLoc, Param, CurContext);
+
+ if (!Init && !Param->hasUnparsedDefaultArg()) {
+ // Mark that we are replacing a default argument first.
+ // If we are instantiating a template we won't have to
+ // retransform immediate calls.
+ // C++ [expr.const]p15.1:
+ // An expression or conversion is in an immediate function context if it
+ // is potentially evaluated and [...] its innermost enclosing non-block
+ // scope is a function parameter scope of an immediate function.
+ EnterExpressionEvaluationContext EvalContext(
+ *this,
+ FD->isImmediateFunction()
+ ? ExpressionEvaluationContext::ImmediateFunctionContext
+ : ExpressionEvaluationContext::PotentiallyEvaluated,
+ Param);
+
+ if (Param->hasUninstantiatedDefaultArg()) {
+ if (InstantiateDefaultArgument(CallLoc, FD, Param))
+ return ExprError();
+ }
+ // CWG2631
+ // An immediate invocation that is not evaluated where it appears is
+ // evaluated and checked for whether it is a constant expression at the
+ // point where the enclosing initializer is used in a function call.
+ ImmediateCallVisitor V(getASTContext());
+ if (!NestedDefaultChecking)
+ V.TraverseDecl(Param);
+ if (V.HasImmediateCalls) {
+ ExprEvalContexts.back().DelayedDefaultInitializationContext = {
+ CallLoc, Param, CurContext};
+ EnsureImmediateInvocationInDefaultArgs Immediate(*this);
+ ExprResult Res;
+ runWithSufficientStackSpace(CallLoc, [&] {
+ Res = Immediate.TransformInitializer(Param->getInit(),
+ /*NotCopy=*/false);
+ });
+ if (Res.isInvalid())
+ return ExprError();
+ Res = ConvertParamDefaultArgument(Param, Res.get(),
+ Res.get()->getBeginLoc());
+ if (Res.isInvalid())
+ return ExprError();
+ Init = Res.get();
+ }
+ }
+
+ if (CheckCXXDefaultArgExpr(
+ CallLoc, FD, Param, Init,
+ /*SkipImmediateInvocations=*/NestedDefaultChecking))
return ExprError();
- return CXXDefaultArgExpr::Create(Context, CallLoc, Param, CurContext);
+
+ return CXXDefaultArgExpr::Create(Context, InitializationContext->Loc, Param,
+ Init, InitializationContext->Context);
+}
+
+ExprResult Sema::BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field) {
+ assert(Field->hasInClassInitializer());
+
+ // If we might have already tried and failed to instantiate, don't try again.
+ if (Field->isInvalidDecl())
+ return ExprError();
+
+ CXXThisScopeRAII This(*this, Field->getParent(), Qualifiers());
+
+ auto *ParentRD = cast<CXXRecordDecl>(Field->getParent());
+
+ std::optional<ExpressionEvaluationContextRecord::InitializationContext>
+ InitializationContext =
+ OutermostDeclarationWithDelayedImmediateInvocations();
+ if (!InitializationContext.has_value())
+ InitializationContext.emplace(Loc, Field, CurContext);
+
+ Expr *Init = nullptr;
+
+ bool NestedDefaultChecking = isCheckingDefaultArgumentOrInitializer();
+
+ EnterExpressionEvaluationContext EvalContext(
+ *this, ExpressionEvaluationContext::PotentiallyEvaluated, Field);
+
+ if (!Field->getInClassInitializer()) {
+ // Maybe we haven't instantiated the in-class initializer. Go check the
+ // pattern FieldDecl to see if it has one.
+ if (isTemplateInstantiation(ParentRD->getTemplateSpecializationKind())) {
+ CXXRecordDecl *ClassPattern = ParentRD->getTemplateInstantiationPattern();
+ DeclContext::lookup_result Lookup =
+ ClassPattern->lookup(Field->getDeclName());
+
+ FieldDecl *Pattern = nullptr;
+ for (auto *L : Lookup) {
+ if ((Pattern = dyn_cast<FieldDecl>(L)))
+ break;
+ }
+ assert(Pattern && "We must have set the Pattern!");
+ if (!Pattern->hasInClassInitializer() ||
+ InstantiateInClassInitializer(Loc, Field, Pattern,
+ getTemplateInstantiationArgs(Field))) {
+ Field->setInvalidDecl();
+ return ExprError();
+ }
+ }
+ }
+
+ // CWG2631
+ // An immediate invocation that is not evaluated where it appears is
+ // evaluated and checked for whether it is a constant expression at the
+ // point where the enclosing initializer is used in a [...] a constructor
+ // definition, or an aggregate initialization.
+ ImmediateCallVisitor V(getASTContext());
+ if (!NestedDefaultChecking)
+ V.TraverseDecl(Field);
+ if (V.HasImmediateCalls) {
+ ExprEvalContexts.back().DelayedDefaultInitializationContext = {Loc, Field,
+ CurContext};
+ ExprEvalContexts.back().IsCurrentlyCheckingDefaultArgumentOrInitializer =
+ NestedDefaultChecking;
+
+ EnsureImmediateInvocationInDefaultArgs Immediate(*this);
+ ExprResult Res;
+ runWithSufficientStackSpace(Loc, [&] {
+ Res = Immediate.TransformInitializer(Field->getInClassInitializer(),
+ /*CXXDirectInit=*/false);
+ });
+ if (!Res.isInvalid())
+ Res = ConvertMemberDefaultInitExpression(Field, Res.get(), Loc);
+ if (Res.isInvalid()) {
+ Field->setInvalidDecl();
+ return ExprError();
+ }
+ Init = Res.get();
+ }
+
+ if (Field->getInClassInitializer()) {
+ Expr *E = Init ? Init : Field->getInClassInitializer();
+ if (!NestedDefaultChecking)
+ runWithSufficientStackSpace(Loc, [&] {
+ MarkDeclarationsReferencedInExpr(E, /*SkipLocalVariables=*/false);
+ });
+ // C++11 [class.base.init]p7:
+ // The initialization of each base and member constitutes a
+ // full-expression.
+ ExprResult Res = ActOnFinishFullExpr(E, /*DiscardedValue=*/false);
+ if (Res.isInvalid()) {
+ Field->setInvalidDecl();
+ return ExprError();
+ }
+ Init = Res.get();
+
+ return CXXDefaultInitExpr::Create(Context, InitializationContext->Loc,
+ Field, InitializationContext->Context,
+ Init);
+ }
+
+ // DR1351:
+ // If the brace-or-equal-initializer of a non-static data member
+ // invokes a defaulted default constructor of its class or of an
+ // enclosing class in a potentially evaluated subexpression, the
+ // program is ill-formed.
+ //
+ // This resolution is unworkable: the exception specification of the
+ // default constructor can be needed in an unevaluated context, in
+ // particular, in the operand of a noexcept-expression, and we can be
+ // unable to compute an exception specification for an enclosed class.
+ //
+ // Any attempt to resolve the exception specification of a defaulted default
+ // constructor before the initializer is lexically complete will ultimately
+ // come here at which point we can diagnose it.
+ RecordDecl *OutermostClass = ParentRD->getOuterLexicalRecordContext();
+ Diag(Loc, diag::err_default_member_initializer_not_yet_parsed)
+ << OutermostClass << Field;
+ Diag(Field->getEndLoc(),
+ diag::note_default_member_initializer_not_yet_parsed);
+ // Recover by marking the field invalid, unless we're in a SFINAE context.
+ if (!isSFINAEContext())
+ Field->setInvalidDecl();
+ return ExprError();
}
Sema::VariadicCallType
Sema::getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto,
Expr *Fn) {
if (Proto && Proto->isVariadic()) {
- if (dyn_cast_or_null<CXXConstructorDecl>(FDecl))
+ if (isa_and_nonnull<CXXConstructorDecl>(FDecl))
return VariadicConstructor;
else if (Fn && Fn->getType()->isBlockPointerType())
return VariadicBlock;
@@ -5828,6 +6543,9 @@ Sema::ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
// C99 6.5.2.2p7 - the arguments are implicitly converted, as if by
// assignment, to the types of the corresponding parameter, ...
+ bool HasExplicitObjectParameter =
+ FDecl && FDecl->hasCXXExplicitFunctionObjectParameter();
+ unsigned ExplicitObjectParameterOffset = HasExplicitObjectParameter ? 1 : 0;
unsigned NumParams = Proto->getNumParams();
bool Invalid = false;
unsigned MinArgs = FDecl ? FDecl->getMinRequiredArguments() : NumParams;
@@ -5846,25 +6564,34 @@ Sema::ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
MinArgs == NumParams && !Proto->isVariadic()
? diag::err_typecheck_call_too_few_args_suggest
: diag::err_typecheck_call_too_few_args_at_least_suggest;
- diagnoseTypo(TC, PDiag(diag_id) << FnKind << MinArgs
- << static_cast<unsigned>(Args.size())
- << TC.getCorrectionRange());
- } else if (MinArgs == 1 && FDecl && FDecl->getParamDecl(0)->getDeclName())
+ diagnoseTypo(
+ TC, PDiag(diag_id)
+ << FnKind << MinArgs - ExplicitObjectParameterOffset
+ << static_cast<unsigned>(Args.size()) -
+ ExplicitObjectParameterOffset
+ << HasExplicitObjectParameter << TC.getCorrectionRange());
+ } else if (MinArgs - ExplicitObjectParameterOffset == 1 && FDecl &&
+ FDecl->getParamDecl(ExplicitObjectParameterOffset)
+ ->getDeclName())
Diag(RParenLoc,
MinArgs == NumParams && !Proto->isVariadic()
? diag::err_typecheck_call_too_few_args_one
: diag::err_typecheck_call_too_few_args_at_least_one)
- << FnKind << FDecl->getParamDecl(0) << Fn->getSourceRange();
+ << FnKind << FDecl->getParamDecl(ExplicitObjectParameterOffset)
+ << HasExplicitObjectParameter << Fn->getSourceRange();
else
Diag(RParenLoc, MinArgs == NumParams && !Proto->isVariadic()
? diag::err_typecheck_call_too_few_args
: diag::err_typecheck_call_too_few_args_at_least)
- << FnKind << MinArgs << static_cast<unsigned>(Args.size())
- << Fn->getSourceRange();
+ << FnKind << MinArgs - ExplicitObjectParameterOffset
+ << static_cast<unsigned>(Args.size()) -
+ ExplicitObjectParameterOffset
+ << HasExplicitObjectParameter << Fn->getSourceRange();
// Emit the location of the prototype.
if (!TC && FDecl && !FDecl->getBuiltinID() && !IsExecConfig)
- Diag(FDecl->getLocation(), diag::note_callee_decl) << FDecl;
+ Diag(FDecl->getLocation(), diag::note_callee_decl)
+ << FDecl << FDecl->getParametersSourceRange();
return true;
}
@@ -5884,17 +6611,23 @@ Sema::ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
MinArgs == NumParams && !Proto->isVariadic()
? diag::err_typecheck_call_too_many_args_suggest
: diag::err_typecheck_call_too_many_args_at_most_suggest;
- diagnoseTypo(TC, PDiag(diag_id) << FnKind << NumParams
- << static_cast<unsigned>(Args.size())
- << TC.getCorrectionRange());
- } else if (NumParams == 1 && FDecl &&
- FDecl->getParamDecl(0)->getDeclName())
+ diagnoseTypo(
+ TC, PDiag(diag_id)
+ << FnKind << NumParams - ExplicitObjectParameterOffset
+ << static_cast<unsigned>(Args.size()) -
+ ExplicitObjectParameterOffset
+ << HasExplicitObjectParameter << TC.getCorrectionRange());
+ } else if (NumParams - ExplicitObjectParameterOffset == 1 && FDecl &&
+ FDecl->getParamDecl(ExplicitObjectParameterOffset)
+ ->getDeclName())
Diag(Args[NumParams]->getBeginLoc(),
MinArgs == NumParams
? diag::err_typecheck_call_too_many_args_one
: diag::err_typecheck_call_too_many_args_at_most_one)
- << FnKind << FDecl->getParamDecl(0)
- << static_cast<unsigned>(Args.size()) << Fn->getSourceRange()
+ << FnKind << FDecl->getParamDecl(ExplicitObjectParameterOffset)
+ << static_cast<unsigned>(Args.size()) -
+ ExplicitObjectParameterOffset
+ << HasExplicitObjectParameter << Fn->getSourceRange()
<< SourceRange(Args[NumParams]->getBeginLoc(),
Args.back()->getEndLoc());
else
@@ -5902,14 +6635,17 @@ Sema::ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
MinArgs == NumParams
? diag::err_typecheck_call_too_many_args
: diag::err_typecheck_call_too_many_args_at_most)
- << FnKind << NumParams << static_cast<unsigned>(Args.size())
- << Fn->getSourceRange()
+ << FnKind << NumParams - ExplicitObjectParameterOffset
+ << static_cast<unsigned>(Args.size()) -
+ ExplicitObjectParameterOffset
+ << HasExplicitObjectParameter << Fn->getSourceRange()
<< SourceRange(Args[NumParams]->getBeginLoc(),
Args.back()->getEndLoc());
// Emit the location of the prototype.
if (!TC && FDecl && !FDecl->getBuiltinID() && !IsExecConfig)
- Diag(FDecl->getLocation(), diag::note_callee_decl) << FDecl;
+ Diag(FDecl->getLocation(), diag::note_callee_decl)
+ << FDecl << FDecl->getParametersSourceRange();
// This deletes the extra arguments.
Call->shrinkNumArgs(NumParams);
@@ -6063,7 +6799,7 @@ Sema::CheckStaticArrayArgument(SourceLocation CallLoc,
QualType OrigTy = Param->getOriginalType();
const ArrayType *AT = Context.getAsArrayType(OrigTy);
- if (!AT || AT->getSizeModifier() != ArrayType::Static)
+ if (!AT || AT->getSizeModifier() != ArraySizeModifier::Static)
return;
if (ArgExpr->isNullPointerConstant(Context,
@@ -6094,9 +6830,10 @@ Sema::CheckStaticArrayArgument(SourceLocation CallLoc,
return;
}
- Optional<CharUnits> ArgSize =
+ std::optional<CharUnits> ArgSize =
getASTContext().getTypeSizeInCharsIfKnown(ArgCAT);
- Optional<CharUnits> ParmSize = getASTContext().getTypeSizeInCharsIfKnown(CAT);
+ std::optional<CharUnits> ParmSize =
+ getASTContext().getTypeSizeInCharsIfKnown(CAT);
if (ArgSize && ParmSize && *ArgSize < *ParmSize) {
Diag(CallLoc, diag::warn_static_array_too_small)
<< ArgExpr->getSourceRange() << (unsigned)ArgSize->getQuantity()
@@ -6134,6 +6871,8 @@ static bool isPlaceholderToRemoveAsArg(QualType type) {
#include "clang/Basic/PPCTypes.def"
#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
#include "clang/Basic/RISCVVTypes.def"
+#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
#define PLACEHOLDER_TYPE(ID, SINGLETON_ID)
#define BUILTIN_TYPE(ID, SINGLETON_ID) case BuiltinType::ID:
#include "clang/AST/BuiltinTypes.def"
@@ -6171,15 +6910,13 @@ static bool isPlaceholderToRemoveAsArg(QualType type) {
llvm_unreachable("bad builtin type kind");
}
-/// Check an argument list for placeholders that we won't try to
-/// handle later.
-static bool checkArgsForPlaceholders(Sema &S, MultiExprArg args) {
+bool Sema::CheckArgsForPlaceholders(MultiExprArg args) {
// Apply this processing to all the arguments at once instead of
// dying at the first failure.
bool hasInvalid = false;
for (size_t i = 0, e = args.size(); i != e; i++) {
if (isPlaceholderToRemoveAsArg(args[i]->getType())) {
- ExprResult result = S.CheckPlaceholderExpr(args[i]);
+ ExprResult result = CheckPlaceholderExpr(args[i]);
if (result.isInvalid()) hasInvalid = true;
else args[i] = result.get();
}
@@ -6222,10 +6959,10 @@ static FunctionDecl *rewriteBuiltinFunctionDecl(Sema *Sema, ASTContext &Context,
return nullptr;
Expr *Arg = ArgRes.get();
QualType ArgType = Arg->getType();
- if (!ParamType->isPointerType() ||
- ParamType.hasAddressSpace() ||
+ if (!ParamType->isPointerType() || ParamType.hasAddressSpace() ||
!ArgType->isPointerType() ||
- !ArgType->getPointeeType().hasAddressSpace()) {
+ !ArgType->getPointeeType().hasAddressSpace() ||
+ isPtrSizeAddressSpace(ArgType->getPointeeType().getAddressSpace())) {
OverloadParams.push_back(ParamType);
continue;
}
@@ -6249,14 +6986,12 @@ static FunctionDecl *rewriteBuiltinFunctionDecl(Sema *Sema, ASTContext &Context,
QualType OverloadTy = Context.getFunctionType(FT->getReturnType(),
OverloadParams, EPI);
DeclContext *Parent = FDecl->getParent();
- FunctionDecl *OverloadDecl = FunctionDecl::Create(Context, Parent,
- FDecl->getLocation(),
- FDecl->getLocation(),
- FDecl->getIdentifier(),
- OverloadTy,
- /*TInfo=*/nullptr,
- SC_Extern, false,
- /*hasPrototype=*/true);
+ FunctionDecl *OverloadDecl = FunctionDecl::Create(
+ Context, Parent, FDecl->getLocation(), FDecl->getLocation(),
+ FDecl->getIdentifier(), OverloadTy,
+ /*TInfo=*/nullptr, SC_Extern, Sema->getCurFPFeatures().isFPConstrained(),
+ false,
+ /*hasPrototype=*/true);
SmallVector<ParmVarDecl*, 16> Params;
FT = cast<FunctionProtoType>(OverloadTy);
for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
@@ -6381,6 +7116,38 @@ tryImplicitlyCaptureThisIfImplicitMemberFunctionAccessWithDependentArgs(
}
}
+// Once a call is fully resolved, warn for unqualified calls to specific
+// C++ standard functions, like move and forward.
+static void DiagnosedUnqualifiedCallsToStdFunctions(Sema &S,
+ const CallExpr *Call) {
+ // We are only checking unary move and forward so exit early here.
+ if (Call->getNumArgs() != 1)
+ return;
+
+ const Expr *E = Call->getCallee()->IgnoreParenImpCasts();
+ if (!E || isa<UnresolvedLookupExpr>(E))
+ return;
+ const DeclRefExpr *DRE = dyn_cast_if_present<DeclRefExpr>(E);
+ if (!DRE || !DRE->getLocation().isValid())
+ return;
+
+ if (DRE->getQualifier())
+ return;
+
+ const FunctionDecl *FD = Call->getDirectCallee();
+ if (!FD)
+ return;
+
+ // Only warn for some functions deemed more frequent or problematic.
+ unsigned BuiltinID = FD->getBuiltinID();
+ if (BuiltinID != Builtin::BImove && BuiltinID != Builtin::BIforward)
+ return;
+
+ S.Diag(DRE->getLocation(), diag::warn_unqualified_call_to_std_cast_function)
+ << FD->getQualifiedNameAsString()
+ << FixItHint::CreateInsertion(DRE->getLocation(), "std::");
+}
+
ExprResult Sema::ActOnCallExpr(Scope *Scope, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig) {
@@ -6392,20 +7159,22 @@ ExprResult Sema::ActOnCallExpr(Scope *Scope, Expr *Fn, SourceLocation LParenLoc,
// Diagnose uses of the C++20 "ADL-only template-id call" feature in earlier
// language modes.
- if (auto *ULE = dyn_cast<UnresolvedLookupExpr>(Fn)) {
- if (ULE->hasExplicitTemplateArgs() &&
- ULE->decls_begin() == ULE->decls_end()) {
- Diag(Fn->getExprLoc(), getLangOpts().CPlusPlus20
- ? diag::warn_cxx17_compat_adl_only_template_id
- : diag::ext_adl_only_template_id)
- << ULE->getName();
- }
+ if (const auto *ULE = dyn_cast<UnresolvedLookupExpr>(Fn);
+ ULE && ULE->hasExplicitTemplateArgs() &&
+ ULE->decls_begin() == ULE->decls_end()) {
+ Diag(Fn->getExprLoc(), getLangOpts().CPlusPlus20
+ ? diag::warn_cxx17_compat_adl_only_template_id
+ : diag::ext_adl_only_template_id)
+ << ULE->getName();
}
if (LangOpts.OpenMP)
Call = ActOnOpenMPCall(Call, Scope, LParenLoc, ArgExprs, RParenLoc,
ExecConfig);
-
+ if (LangOpts.CPlusPlus) {
+ if (const auto *CE = dyn_cast<CallExpr>(Call.get()))
+ DiagnosedUnqualifiedCallsToStdFunctions(*this, CE);
+ }
return Call;
}
@@ -6421,7 +7190,7 @@ ExprResult Sema::BuildCallExpr(Scope *Scope, Expr *Fn, SourceLocation LParenLoc,
if (Result.isInvalid()) return ExprError();
Fn = Result.get();
- if (checkArgsForPlaceholders(*this, ArgExprs))
+ if (CheckArgsForPlaceholders(ArgExprs))
return ExprError();
if (getLangOpts().CPlusPlus) {
@@ -6476,7 +7245,8 @@ ExprResult Sema::BuildCallExpr(Scope *Scope, Expr *Fn, SourceLocation LParenLoc,
if (Fn->getType() == Context.BoundMemberTy) {
return BuildCallToMemberFunction(Scope, Fn, LParenLoc, ArgExprs,
- RParenLoc, AllowRecovery);
+ RParenLoc, ExecConfig, IsExecConfig,
+ AllowRecovery);
}
}
@@ -6495,7 +7265,8 @@ ExprResult Sema::BuildCallExpr(Scope *Scope, Expr *Fn, SourceLocation LParenLoc,
Scope, Fn, ULE, LParenLoc, ArgExprs, RParenLoc, ExecConfig,
/*AllowTypoCorrection=*/true, find.IsAddressOfOperand);
return BuildCallToMemberFunction(Scope, Fn, LParenLoc, ArgExprs,
- RParenLoc, AllowRecovery);
+ RParenLoc, ExecConfig, IsExecConfig,
+ AllowRecovery);
}
}
@@ -6534,8 +7305,8 @@ ExprResult Sema::BuildCallExpr(Scope *Scope, Expr *Fn, SourceLocation LParenLoc,
nullptr, DRE->isNonOdrUse());
}
}
- } else if (isa<MemberExpr>(NakedFn))
- NDecl = cast<MemberExpr>(NakedFn)->getMemberDecl();
+ } else if (auto *ME = dyn_cast<MemberExpr>(NakedFn))
+ NDecl = ME->getMemberDecl();
if (FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(NDecl)) {
if (CallingNDeclIndirectly && !checkAddressOfFunctionIsAvailable(
@@ -6543,6 +7314,57 @@ ExprResult Sema::BuildCallExpr(Scope *Scope, Expr *Fn, SourceLocation LParenLoc,
return ExprError();
checkDirectCallValidity(*this, Fn, FD, ArgExprs);
+
+ // If this expression is a call to a builtin function in HIP device
+ // compilation, allow a pointer-type argument to default address space to be
+ // passed as a pointer-type parameter to a non-default address space.
+ // If Arg is declared in the default address space and Param is declared
+ // in a non-default address space, perform an implicit address space cast to
+ // the parameter type.
+ if (getLangOpts().HIP && getLangOpts().CUDAIsDevice && FD &&
+ FD->getBuiltinID()) {
+ for (unsigned Idx = 0; Idx < FD->param_size(); ++Idx) {
+ ParmVarDecl *Param = FD->getParamDecl(Idx);
+ if (!ArgExprs[Idx] || !Param || !Param->getType()->isPointerType() ||
+ !ArgExprs[Idx]->getType()->isPointerType())
+ continue;
+
+ auto ParamAS = Param->getType()->getPointeeType().getAddressSpace();
+ auto ArgTy = ArgExprs[Idx]->getType();
+ auto ArgPtTy = ArgTy->getPointeeType();
+ auto ArgAS = ArgPtTy.getAddressSpace();
+
+ // Add address space cast if target address spaces are different
+ bool NeedImplicitASC =
+ ParamAS != LangAS::Default && // Pointer params in generic AS don't need special handling.
+ ( ArgAS == LangAS::Default || // We do allow implicit conversion from generic AS
+ // or from specific AS which has target AS matching that of Param.
+ getASTContext().getTargetAddressSpace(ArgAS) == getASTContext().getTargetAddressSpace(ParamAS));
+ if (!NeedImplicitASC)
+ continue;
+
+ // First, ensure that the Arg is an RValue.
+ if (ArgExprs[Idx]->isGLValue()) {
+ ArgExprs[Idx] = ImplicitCastExpr::Create(
+ Context, ArgExprs[Idx]->getType(), CK_NoOp, ArgExprs[Idx],
+ nullptr, VK_PRValue, FPOptionsOverride());
+ }
+
+ // Construct a new arg type with address space of Param
+ Qualifiers ArgPtQuals = ArgPtTy.getQualifiers();
+ ArgPtQuals.setAddressSpace(ParamAS);
+ auto NewArgPtTy =
+ Context.getQualifiedType(ArgPtTy.getUnqualifiedType(), ArgPtQuals);
+ auto NewArgTy =
+ Context.getQualifiedType(Context.getPointerType(NewArgPtTy),
+ ArgTy.getQualifiers());
+
+ // Finally perform an implicit address space cast
+ ArgExprs[Idx] = ImpCastExprToType(ArgExprs[Idx], NewArgTy,
+ CK_AddressSpaceConversion)
+ .get();
+ }
+ }
}
if (Context.isDependenceAllowed() &&
@@ -6552,13 +7374,8 @@ ExprResult Sema::BuildCallExpr(Scope *Scope, Expr *Fn, SourceLocation LParenLoc,
llvm::any_of(ArgExprs,
[](clang::Expr *E) { return E->containsErrors(); })) &&
"should only occur in error-recovery path.");
- QualType ReturnType =
- llvm::isa_and_nonnull<FunctionDecl>(NDecl)
- ? cast<FunctionDecl>(NDecl)->getCallResultType()
- : Context.DependentTy;
- return CallExpr::Create(Context, Fn, ArgExprs, ReturnType,
- Expr::getValueKindForType(ReturnType), RParenLoc,
- CurFPFeatureOverrides());
+ return CallExpr::Create(Context, Fn, ArgExprs, Context.DependentTy,
+ VK_PRValue, RParenLoc, CurFPFeatureOverrides());
}
return BuildResolvedCallExpr(Fn, NDecl, LParenLoc, ArgExprs, RParenLoc,
ExecConfig, IsExecConfig);
@@ -6662,11 +7479,18 @@ ExprResult Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
Diag(FDecl->getLocation(), diag::note_callee_decl) << FDecl;
}
}
- if (Caller->hasAttr<AnyX86InterruptAttr>() &&
- ((!FDecl || !FDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>()))) {
- Diag(Fn->getExprLoc(), diag::warn_anyx86_interrupt_regsave);
- if (FDecl)
- Diag(FDecl->getLocation(), diag::note_callee_decl) << FDecl;
+ if (Caller->hasAttr<AnyX86InterruptAttr>() ||
+ Caller->hasAttr<AnyX86NoCallerSavedRegistersAttr>()) {
+ const TargetInfo &TI = Context.getTargetInfo();
+ bool HasNonGPRRegisters =
+ TI.hasFeature("sse") || TI.hasFeature("x87") || TI.hasFeature("mmx");
+ if (HasNonGPRRegisters &&
+ (!FDecl || !FDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>())) {
+ Diag(Fn->getExprLoc(), diag::warn_anyx86_excessive_regsave)
+ << (Caller->hasAttr<AnyX86InterruptAttr>() ? 0 : 1);
+ if (FDecl)
+ Diag(FDecl->getLocation(), diag::note_callee_decl) << FDecl;
+ }
}
}
@@ -6756,7 +7580,7 @@ ExprResult Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
TheCall = dyn_cast<CallExpr>(Result.get());
bool CorrectedTypos = TheCall != TheOldCall;
if (!TheCall) return Result;
- Args = llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs());
+ Args = llvm::ArrayRef(TheCall->getArgs(), TheCall->getNumArgs());
// A new call expression node was created if some typos were corrected.
// However it may not have been constructed with enough storage. In this
@@ -6810,6 +7634,16 @@ ExprResult Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
TheCall->setType(FuncT->getCallResultType(Context));
TheCall->setValueKind(Expr::getValueKindForType(FuncT->getReturnType()));
+ // WebAssembly tables can't be used as arguments.
+ if (Context.getTargetInfo().getTriple().isWasm()) {
+ for (const Expr *Arg : Args) {
+ if (Arg && Arg->getType()->isWebAssemblyTableType()) {
+ return ExprError(Diag(Arg->getExprLoc(),
+ diag::err_wasm_table_as_function_parameter));
+ }
+ }
+ }
+
if (Proto) {
if (ConvertArgumentsForCall(TheCall, Fn, FDecl, Proto, Args, RParenLoc,
IsExecConfig))
@@ -6834,6 +7668,23 @@ ExprResult Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
Proto = FDecl->getType()->getAs<FunctionProtoType>();
}
+ // If we still haven't found a prototype to use but there are arguments to
+ // the call, diagnose this as calling a function without a prototype.
+ // However, if we found a function declaration, check to see if
+ // -Wdeprecated-non-prototype was disabled where the function was declared.
+ // If so, we will silence the diagnostic here on the assumption that this
+ // interface is intentional and the user knows what they're doing. We will
+ // also silence the diagnostic if there is a function declaration but it
+ // was implicitly defined (the user already gets diagnostics about the
+ // creation of the implicit function declaration, so the additional warning
+ // is not helpful).
+ if (!Proto && !Args.empty() &&
+ (!FDecl || (!FDecl->isImplicit() &&
+ !Diags.isIgnored(diag::warn_strict_uses_without_prototype,
+ FDecl->getLocation()))))
+ Diag(LParenLoc, diag::warn_strict_uses_without_prototype)
+ << (FDecl != nullptr) << FDecl;
+
// Promote the arguments (C99 6.5.2.2p6).
for (unsigned i = 0, e = Args.size(); i != e; i++) {
Expr *Arg = Args[i];
@@ -6867,9 +7718,9 @@ ExprResult Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
}
if (CXXMethodDecl *Method = dyn_cast_or_null<CXXMethodDecl>(FDecl))
- if (!Method->isStatic())
+ if (Method->isImplicitObjectMemberFunction())
return ExprError(Diag(LParenLoc, diag::err_member_call_without_object)
- << Fn->getSourceRange());
+ << Fn->getSourceRange() << 0);
// Check for sentinels
if (NDecl)
@@ -6933,10 +7784,23 @@ Sema::BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo,
SourceRange(LParenLoc, LiteralExpr->getSourceRange().getEnd())))
return ExprError();
if (literalType->isVariableArrayType()) {
- if (!tryToFixVariablyModifiedVarType(TInfo, literalType, LParenLoc,
- diag::err_variable_object_no_init)) {
+ // C23 6.7.10p4: An entity of variable length array type shall not be
+ // initialized except by an empty initializer.
+ //
+ // The C extension warnings are issued from ParseBraceInitializer() and
+ // do not need to be issued here. However, we continue to issue an error
+ // in the case there are initializers or we are compiling C++. We allow
+ // use of VLAs in C++, but it's not clear we want to allow {} to zero
+ // init a VLA in C++ in all cases (such as with non-trivial constructors).
+ // FIXME: should we allow this construct in C++ when it makes sense to do
+ // so?
+ std::optional<unsigned> NumInits;
+ if (const auto *ILE = dyn_cast<InitListExpr>(LiteralExpr))
+ NumInits = ILE->getNumInits();
+ if ((LangOpts.CPlusPlus || NumInits.value_or(0)) &&
+ !tryToFixVariablyModifiedVarType(TInfo, literalType, LParenLoc,
+ diag::err_variable_object_no_init))
return ExprError();
- }
}
} else if (!literalType->isDependentType() &&
RequireCompleteType(LParenLoc, literalType,
@@ -7394,12 +8258,32 @@ bool Sema::isValidSveBitcast(QualType srcTy, QualType destTy) {
assert(srcTy->isVectorType() || destTy->isVectorType());
auto ValidScalableConversion = [](QualType FirstType, QualType SecondType) {
- if (!FirstType->isSizelessBuiltinType())
+ if (!FirstType->isSVESizelessBuiltinType())
return false;
const auto *VecTy = SecondType->getAs<VectorType>();
- return VecTy &&
- VecTy->getVectorKind() == VectorType::SveFixedLengthDataVector;
+ return VecTy && VecTy->getVectorKind() == VectorKind::SveFixedLengthData;
+ };
+
+ return ValidScalableConversion(srcTy, destTy) ||
+ ValidScalableConversion(destTy, srcTy);
+}
+
+/// Are the two types RVV-bitcast-compatible types? I.e. is bitcasting from the
+/// first RVV type (e.g. an RVV scalable type) to the second type (e.g. an RVV
+/// VLS type) allowed?
+///
+/// This will also return false if the two given types do not make sense from
+/// the perspective of RVV bitcasts.
+bool Sema::isValidRVVBitcast(QualType srcTy, QualType destTy) {
+ assert(srcTy->isVectorType() || destTy->isVectorType());
+
+ auto ValidScalableConversion = [](QualType FirstType, QualType SecondType) {
+ if (!FirstType->isRVVSizelessBuiltinType())
+ return false;
+
+ const auto *VecTy = SecondType->getAs<VectorType>();
+ return VecTy && VecTy->getVectorKind() == VectorKind::RVVFixedLengthData;
};
return ValidScalableConversion(srcTy, destTy) ||
@@ -7438,6 +8322,30 @@ bool Sema::areVectorTypesSameSize(QualType SrcTy, QualType DestTy) {
return (SrcLen * SrcEltSize == DestLen * DestEltSize);
}
+// This returns true if at least one of the types is an altivec vector.
+bool Sema::anyAltivecTypes(QualType SrcTy, QualType DestTy) {
+ assert((DestTy->isVectorType() || SrcTy->isVectorType()) &&
+ "expected at least one type to be a vector here");
+
+ bool IsSrcTyAltivec =
+ SrcTy->isVectorType() && ((SrcTy->castAs<VectorType>()->getVectorKind() ==
+ VectorKind::AltiVecVector) ||
+ (SrcTy->castAs<VectorType>()->getVectorKind() ==
+ VectorKind::AltiVecBool) ||
+ (SrcTy->castAs<VectorType>()->getVectorKind() ==
+ VectorKind::AltiVecPixel));
+
+ bool IsDestTyAltivec = DestTy->isVectorType() &&
+ ((DestTy->castAs<VectorType>()->getVectorKind() ==
+ VectorKind::AltiVecVector) ||
+ (DestTy->castAs<VectorType>()->getVectorKind() ==
+ VectorKind::AltiVecBool) ||
+ (DestTy->castAs<VectorType>()->getVectorKind() ==
+ VectorKind::AltiVecPixel));
+
+ return (IsSrcTyAltivec || IsDestTyAltivec);
+}
+
/// Are the two types lax-compatible vector types? That is, given
/// that one of them is a vector, do they have equal storage sizes,
/// where the storage size is the number of elements times the element
@@ -7659,8 +8567,7 @@ Sema::ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
CastExpr = Result.get();
}
- if (getLangOpts().CPlusPlus && !castType->isVoidType() &&
- !getSourceManager().isInSystemMacro(LParenLoc))
+ if (getLangOpts().CPlusPlus && !castType->isVoidType())
Diag(LParenLoc, diag::warn_old_style_cast) << CastExpr->getSourceRange();
CheckTollFreeBridgeCast(castType, CastExpr);
@@ -7706,6 +8613,9 @@ ExprResult Sema::BuildVectorLiteral(SourceLocation LParenLoc,
// initializers must be one or must match the size of the vector.
// If a single value is specified in the initializer then it will be
// replicated to all the components of the vector
+ if (CheckAltivecInitFromScalar(E->getSourceRange(), Ty,
+ VTy->getElementType()))
+ return ExprError();
if (ShouldSplatAltivecScalarInCast(VTy)) {
// The number of initializers must be one or must match the size of the
// vector. If a single value is specified in the initializer then it will
@@ -7730,16 +8640,15 @@ ExprResult Sema::BuildVectorLiteral(SourceLocation LParenLoc,
else {
// For OpenCL, when the number of initializers is a single value,
// it will be replicated to all components of the vector.
- if (getLangOpts().OpenCL &&
- VTy->getVectorKind() == VectorType::GenericVector &&
+ if (getLangOpts().OpenCL && VTy->getVectorKind() == VectorKind::Generic &&
numExprs == 1) {
- QualType ElemTy = VTy->getElementType();
- ExprResult Literal = DefaultLvalueConversion(exprs[0]);
- if (Literal.isInvalid())
- return ExprError();
- Literal = ImpCastExprToType(Literal.get(), ElemTy,
- PrepareScalarCast(Literal, ElemTy));
- return BuildCStyleCastExpr(LParenLoc, TInfo, RParenLoc, Literal.get());
+ QualType ElemTy = VTy->getElementType();
+ ExprResult Literal = DefaultLvalueConversion(exprs[0]);
+ if (Literal.isInvalid())
+ return ExprError();
+ Literal = ImpCastExprToType(Literal.get(), ElemTy,
+ PrepareScalarCast(Literal, ElemTy));
+ return BuildCStyleCastExpr(LParenLoc, TInfo, RParenLoc, Literal.get());
}
initExprs.append(exprs, exprs + numExprs);
@@ -7780,10 +8689,10 @@ ExprResult Sema::ActOnParenListExpr(SourceLocation L,
/// Emit a specialized diagnostic when one expression is a null pointer
/// constant and the other is not a pointer. Returns true if a diagnostic is
/// emitted.
-bool Sema::DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
+bool Sema::DiagnoseConditionalForNull(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation QuestionLoc) {
- Expr *NullExpr = LHSExpr;
- Expr *NonPointerExpr = RHSExpr;
+ const Expr *NullExpr = LHSExpr;
+ const Expr *NonPointerExpr = RHSExpr;
Expr::NullPointerConstantKind NullKind =
NullExpr->isNullPointerConstant(Context,
Expr::NPC_ValueDependentIsNotNull);
@@ -7819,7 +8728,8 @@ bool Sema::DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
}
/// Return false if the condition expression is valid, true otherwise.
-static bool checkCondition(Sema &S, Expr *Cond, SourceLocation QuestionLoc) {
+static bool checkCondition(Sema &S, const Expr *Cond,
+ SourceLocation QuestionLoc) {
QualType CondTy = Cond->getType();
// OpenCL v1.1 s6.3.i says the condition cannot be a floating point type.
@@ -7837,23 +8747,6 @@ static bool checkCondition(Sema &S, Expr *Cond, SourceLocation QuestionLoc) {
return true;
}
-/// Handle when one or both operands are void type.
-static QualType checkConditionalVoidType(Sema &S, ExprResult &LHS,
- ExprResult &RHS) {
- Expr *LHSExpr = LHS.get();
- Expr *RHSExpr = RHS.get();
-
- if (!LHSExpr->getType()->isVoidType())
- S.Diag(RHSExpr->getBeginLoc(), diag::ext_typecheck_cond_one_void)
- << RHSExpr->getSourceRange();
- if (!RHSExpr->getType()->isVoidType())
- S.Diag(LHSExpr->getBeginLoc(), diag::ext_typecheck_cond_one_void)
- << LHSExpr->getSourceRange();
- LHS = S.ImpCastExprToType(LHS.get(), S.Context.VoidTy, CK_ToVoid);
- RHS = S.ImpCastExprToType(RHS.get(), S.Context.VoidTy, CK_ToVoid);
- return S.Context.VoidTy;
-}
-
/// Return false if the NullExpr can be promoted to PointerTy,
/// true otherwise.
static bool checkConditionalNullPointer(Sema &S, ExprResult &NullExpr,
@@ -7877,7 +8770,7 @@ static QualType checkConditionalPointerCompatibility(Sema &S, ExprResult &LHS,
if (S.Context.hasSameType(LHSTy, RHSTy)) {
// Two identical pointers types are always compatible.
- return LHSTy;
+ return S.Context.getCommonSugaredType(LHSTy, RHSTy);
}
QualType lhptee, rhptee;
@@ -7947,7 +8840,9 @@ static QualType checkConditionalPointerCompatibility(Sema &S, ExprResult &LHS,
lhptee = S.Context.getQualifiedType(lhptee.getUnqualifiedType(), lhQual);
rhptee = S.Context.getQualifiedType(rhptee.getUnqualifiedType(), rhQual);
- QualType CompositeTy = S.Context.mergeTypes(lhptee, rhptee);
+ QualType CompositeTy = S.Context.mergeTypes(
+ lhptee, rhptee, /*OfBlockPointer=*/false, /*Unqualified=*/false,
+ /*BlockReturnType=*/false, /*IsConditionalOperator=*/true);
if (CompositeTy.isNull()) {
// In this situation, we assume void* type. No especially good
@@ -8245,11 +9140,17 @@ OpenCLCheckVectorConditional(Sema &S, ExprResult &Cond,
// result as specified in OpenCL v1.1 s6.3.i.
if (LHS.get()->getType()->isVectorType() ||
RHS.get()->getType()->isVectorType()) {
- QualType VecResTy = S.CheckVectorOperands(LHS, RHS, QuestionLoc,
- /*isCompAssign*/false,
- /*AllowBothBool*/true,
- /*AllowBoolConversions*/false);
- if (VecResTy.isNull()) return QualType();
+ bool IsBoolVecLang =
+ !S.getLangOpts().OpenCL && !S.getLangOpts().OpenCLCPlusPlus;
+ QualType VecResTy =
+ S.CheckVectorOperands(LHS, RHS, QuestionLoc,
+ /*isCompAssign*/ false,
+ /*AllowBothBool*/ true,
+ /*AllowBoolConversions*/ false,
+ /*AllowBooleanOperation*/ IsBoolVecLang,
+ /*ReportInvalid*/ true);
+ if (VecResTy.isNull())
+ return QualType();
// The result type must match the condition type as specified in
// OpenCL v1.1 s6.11.6.
if (checkVectorResult(S, CondTy, VecResTy, QuestionLoc))
@@ -8319,23 +9220,31 @@ QualType Sema::CheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
if (checkCondition(*this, Cond.get(), QuestionLoc))
return QualType();
- // Now check the two expressions.
+ // Handle vectors.
if (LHS.get()->getType()->isVectorType() ||
RHS.get()->getType()->isVectorType())
- return CheckVectorOperands(LHS, RHS, QuestionLoc, /*isCompAssign*/false,
- /*AllowBothBool*/true,
- /*AllowBoolConversions*/false);
+ return CheckVectorOperands(LHS, RHS, QuestionLoc, /*isCompAssign*/ false,
+ /*AllowBothBool*/ true,
+ /*AllowBoolConversions*/ false,
+ /*AllowBooleanOperation*/ false,
+ /*ReportInvalid*/ true);
QualType ResTy =
UsualArithmeticConversions(LHS, RHS, QuestionLoc, ACK_Conditional);
if (LHS.isInvalid() || RHS.isInvalid())
return QualType();
+ // WebAssembly tables are not allowed as conditional LHS or RHS.
QualType LHSTy = LHS.get()->getType();
QualType RHSTy = RHS.get()->getType();
+ if (LHSTy->isWebAssemblyTableType() || RHSTy->isWebAssemblyTableType()) {
+ Diag(QuestionLoc, diag::err_wasm_table_conditional_expression)
+ << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
+ return QualType();
+ }
- // Diagnose attempts to convert between __float128 and long double where
- // such conversions currently can't be handled.
+ // Diagnose attempts to convert between __ibm128, __float128 and long double
+ // where such conversions currently can't be handled.
if (unsupportedTypeConversion(*this, LHSTy, RHSTy)) {
Diag(QuestionLoc,
diag::err_typecheck_cond_incompatible_operands) << LHSTy << RHSTy
@@ -8346,16 +9255,17 @@ QualType Sema::CheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
// OpenCL v2.0 s6.12.5 - Blocks cannot be used as expressions of the ternary
// selection operator (?:).
if (getLangOpts().OpenCL &&
- (checkBlockType(*this, LHS.get()) | checkBlockType(*this, RHS.get()))) {
+ ((int)checkBlockType(*this, LHS.get()) | (int)checkBlockType(*this, RHS.get()))) {
return QualType();
}
// If both operands have arithmetic type, do the usual arithmetic conversions
// to find a common type: C99 6.5.15p3,5.
if (LHSTy->isArithmeticType() && RHSTy->isArithmeticType()) {
- // Disallow invalid arithmetic conversions, such as those between ExtInts of
- // different sizes, or between ExtInts and other types.
- if (ResTy.isNull() && (LHSTy->isExtIntType() || RHSTy->isExtIntType())) {
+ // Disallow invalid arithmetic conversions, such as those between bit-
+ // precise integers types of different sizes, or between a bit-precise
+ // integer and another type.
+ if (ResTy.isNull() && (LHSTy->isBitIntType() || RHSTy->isBitIntType())) {
Diag(QuestionLoc, diag::err_typecheck_cond_incompatible_operands)
<< LHSTy << RHSTy << LHS.get()->getSourceRange()
<< RHS.get()->getSourceRange();
@@ -8368,11 +9278,6 @@ QualType Sema::CheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
return ResTy;
}
- // And if they're both bfloat (which isn't arithmetic), that's fine too.
- if (LHSTy->isBFloat16Type() && RHSTy->isBFloat16Type()) {
- return LHSTy;
- }
-
// If both operands are the same structure or union type, the result is that
// type.
if (const RecordType *LHSRT = LHSTy->getAs<RecordType>()) { // C99 6.5.15p3
@@ -8380,16 +9285,37 @@ QualType Sema::CheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
if (LHSRT->getDecl() == RHSRT->getDecl())
// "If both the operands have structure or union type, the result has
// that type." This implies that CV qualifiers are dropped.
- return LHSTy.getUnqualifiedType();
+ return Context.getCommonSugaredType(LHSTy.getUnqualifiedType(),
+ RHSTy.getUnqualifiedType());
// FIXME: Type of conditional expression must be complete in C mode.
}
// C99 6.5.15p5: "If both operands have void type, the result has void type."
// The following || allows only one side to be void (a GCC-ism).
if (LHSTy->isVoidType() || RHSTy->isVoidType()) {
- return checkConditionalVoidType(*this, LHS, RHS);
+ QualType ResTy;
+ if (LHSTy->isVoidType() && RHSTy->isVoidType()) {
+ ResTy = Context.getCommonSugaredType(LHSTy, RHSTy);
+ } else if (RHSTy->isVoidType()) {
+ ResTy = RHSTy;
+ Diag(RHS.get()->getBeginLoc(), diag::ext_typecheck_cond_one_void)
+ << RHS.get()->getSourceRange();
+ } else {
+ ResTy = LHSTy;
+ Diag(LHS.get()->getBeginLoc(), diag::ext_typecheck_cond_one_void)
+ << LHS.get()->getSourceRange();
+ }
+ LHS = ImpCastExprToType(LHS.get(), ResTy, CK_ToVoid);
+ RHS = ImpCastExprToType(RHS.get(), ResTy, CK_ToVoid);
+ return ResTy;
}
+ // C23 6.5.15p7:
+ // ... if both the second and third operands have nullptr_t type, the
+ // result also has that type.
+ if (LHSTy->isNullPtrType() && Context.hasSameType(LHSTy, RHSTy))
+ return ResTy;
+
// C99 6.5.15p6 - "if one operand is a null pointer constant, the result has
// the type of the other operand."
if (!checkConditionalNullPointer(*this, RHS, LHSTy)) return LHSTy;
@@ -8423,17 +9349,17 @@ QualType Sema::CheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
/*IsIntFirstExpr=*/false))
return LHSTy;
- // Allow ?: operations in which both operands have the same
- // built-in sizeless type.
- if (LHSTy->isSizelessBuiltinType() && Context.hasSameType(LHSTy, RHSTy))
- return LHSTy;
-
// Emit a better diagnostic if one of the expressions is a null pointer
// constant and the other is not a pointer type. In this case, the user most
// likely forgot to take the address of the other expression.
if (DiagnoseConditionalForNull(LHS.get(), RHS.get(), QuestionLoc))
return QualType();
+ // Finally, if the LHS and RHS types are canonically the same type, we can
+ // use the common sugared type.
+ if (Context.hasSameType(LHSTy, RHSTy))
+ return Context.getCommonSugaredType(LHSTy, RHSTy);
+
// Otherwise, the operands are not compatible.
Diag(QuestionLoc, diag::err_typecheck_cond_incompatible_operands)
<< LHSTy << RHSTy << LHS.get()->getSourceRange()
@@ -8615,28 +9541,27 @@ static bool IsArithmeticOp(BinaryOperatorKind Opc) {
/// expression, either using a built-in or overloaded operator,
/// and sets *OpCode to the opcode and *RHSExprs to the right-hand side
/// expression.
-static bool IsArithmeticBinaryExpr(Expr *E, BinaryOperatorKind *Opcode,
- Expr **RHSExprs) {
+static bool IsArithmeticBinaryExpr(const Expr *E, BinaryOperatorKind *Opcode,
+ const Expr **RHSExprs) {
// Don't strip parenthesis: we should not warn if E is in parenthesis.
E = E->IgnoreImpCasts();
E = E->IgnoreConversionOperatorSingleStep();
E = E->IgnoreImpCasts();
- if (auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E)) {
+ if (const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E)) {
E = MTE->getSubExpr();
E = E->IgnoreImpCasts();
}
// Built-in binary operator.
- if (BinaryOperator *OP = dyn_cast<BinaryOperator>(E)) {
- if (IsArithmeticOp(OP->getOpcode())) {
- *Opcode = OP->getOpcode();
- *RHSExprs = OP->getRHS();
- return true;
- }
+ if (const auto *OP = dyn_cast<BinaryOperator>(E);
+ OP && IsArithmeticOp(OP->getOpcode())) {
+ *Opcode = OP->getOpcode();
+ *RHSExprs = OP->getRHS();
+ return true;
}
// Overloaded operator.
- if (CXXOperatorCallExpr *Call = dyn_cast<CXXOperatorCallExpr>(E)) {
+ if (const auto *Call = dyn_cast<CXXOperatorCallExpr>(E)) {
if (Call->getNumArgs() != 2)
return false;
@@ -8661,14 +9586,14 @@ static bool IsArithmeticBinaryExpr(Expr *E, BinaryOperatorKind *Opcode,
/// ExprLooksBoolean - Returns true if E looks boolean, i.e. it has boolean type
/// or is a logical expression such as (x==y) which has int type, but is
/// commonly interpreted as boolean.
-static bool ExprLooksBoolean(Expr *E) {
+static bool ExprLooksBoolean(const Expr *E) {
E = E->IgnoreParenImpCasts();
if (E->getType()->isBooleanType())
return true;
- if (BinaryOperator *OP = dyn_cast<BinaryOperator>(E))
+ if (const auto *OP = dyn_cast<BinaryOperator>(E))
return OP->isComparisonOp() || OP->isLogicalOp();
- if (UnaryOperator *OP = dyn_cast<UnaryOperator>(E))
+ if (const auto *OP = dyn_cast<UnaryOperator>(E))
return OP->getOpcode() == UO_LNot;
if (E->getType()->isPointerType())
return true;
@@ -8682,13 +9607,11 @@ static bool ExprLooksBoolean(Expr *E) {
/// and binary operator are mixed in a way that suggests the programmer assumed
/// the conditional operator has higher precedence, for example:
/// "int x = a + someBinaryCondition ? 1 : 2".
-static void DiagnoseConditionalPrecedence(Sema &Self,
- SourceLocation OpLoc,
- Expr *Condition,
- Expr *LHSExpr,
- Expr *RHSExpr) {
+static void DiagnoseConditionalPrecedence(Sema &Self, SourceLocation OpLoc,
+ Expr *Condition, const Expr *LHSExpr,
+ const Expr *RHSExpr) {
BinaryOperatorKind CondOpcode;
- Expr *CondRHS;
+ const Expr *CondRHS;
if (!IsArithmeticBinaryExpr(Condition, &CondOpcode, &CondRHS))
return;
@@ -8724,8 +9647,8 @@ static QualType computeConditionalNullability(QualType ResTy, bool IsBin,
if (!ResTy->isAnyPointerType())
return ResTy;
- auto GetNullability = [&Ctx](QualType Ty) {
- Optional<NullabilityKind> Kind = Ty->getNullability(Ctx);
+ auto GetNullability = [](QualType Ty) {
+ std::optional<NullabilityKind> Kind = Ty->getNullability();
if (Kind) {
// For our purposes, treat _Nullable_result as _Nullable.
if (*Kind == NullabilityKind::NullableResult)
@@ -8762,7 +9685,7 @@ static QualType computeConditionalNullability(QualType ResTy, bool IsBin,
return ResTy;
// Strip all nullability from ResTy.
- while (ResTy->getNullability(Ctx))
+ while (ResTy->getNullability())
ResTy = ResTy.getSingleStepDesugaredType(Ctx);
// Create a new AttributedType with the new nullability kind.
@@ -8875,6 +9798,21 @@ ExprResult Sema::ActOnConditionalOp(SourceLocation QuestionLoc,
ColonLoc, result, VK, OK);
}
+// Check that the SME attributes for PSTATE.ZA and PSTATE.SM are compatible.
+bool Sema::IsInvalidSMECallConversion(QualType FromType, QualType ToType) {
+ unsigned FromAttributes = 0, ToAttributes = 0;
+ if (const auto *FromFn =
+ dyn_cast<FunctionProtoType>(Context.getCanonicalType(FromType)))
+ FromAttributes =
+ FromFn->getAArch64SMEAttributes() & FunctionType::SME_AttributeMask;
+ if (const auto *ToFn =
+ dyn_cast<FunctionProtoType>(Context.getCanonicalType(ToType)))
+ ToAttributes =
+ ToFn->getAArch64SMEAttributes() & FunctionType::SME_AttributeMask;
+
+ return FromAttributes != ToAttributes;
+}
+
// Check if we have a conversion between incompatible cmse function pointer
// types, that is, a conversion between a function pointer with the
// cmse_nonsecure_call attribute and one without.
@@ -8899,7 +9837,8 @@ static bool IsInvalidCmseNSCallConversion(Sema &S, QualType FromType,
// This circumvents the usual type rules specified in 6.2.7p1 & 6.7.5.[1-3].
// FIXME: add a couple examples in this comment.
static Sema::AssignConvertType
-checkPointerTypesForAssignment(Sema &S, QualType LHSType, QualType RHSType) {
+checkPointerTypesForAssignment(Sema &S, QualType LHSType, QualType RHSType,
+ SourceLocation Loc) {
assert(LHSType.isCanonical() && "LHS not canonicalized!");
assert(RHSType.isCanonical() && "RHS not canonicalized!");
@@ -8968,6 +9907,13 @@ checkPointerTypesForAssignment(Sema &S, QualType LHSType, QualType RHSType) {
return Sema::FunctionVoidPointer;
}
+ if (!S.Diags.isIgnored(
+ diag::warn_typecheck_convert_incompatible_function_pointer_strict,
+ Loc) &&
+ RHSType->isFunctionPointerType() && LHSType->isFunctionPointerType() &&
+ !S.IsFunctionConversion(RHSType, LHSType, RHSType))
+ return Sema::IncompatibleFunctionPointerStrict;
+
// C99 6.5.16.1p1 (constraint 3): both operands are pointers to qualified or
// unqualified versions of compatible types, ...
QualType ltrans = QualType(lhptee, 0), rtrans = QualType(rhptee, 0);
@@ -9033,6 +9979,8 @@ checkPointerTypesForAssignment(Sema &S, QualType LHSType, QualType RHSType) {
return Sema::IncompatibleFunctionPointer;
if (IsInvalidCmseNSCallConversion(S, ltrans, rtrans))
return Sema::IncompatibleFunctionPointer;
+ if (S.IsInvalidSMECallConversion(rtrans, ltrans))
+ return Sema::IncompatibleFunctionPointer;
return ConvTy;
}
@@ -9181,6 +10129,15 @@ Sema::CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS,
return Compatible;
}
+ // If the LHS has an __auto_type, there are no additional type constraints
+ // to be worried about.
+ if (const auto *AT = dyn_cast<AutoType>(LHSType)) {
+ if (AT->isGNUAutoType()) {
+ Kind = CK_NoOp;
+ return Compatible;
+ }
+ }
+
// If we have an atomic type, try a non-atomic assignment, then just add an
// atomic qualification step.
if (const AtomicType *AtomicTy = dyn_cast<AtomicType>(LHSType)) {
@@ -9237,6 +10194,14 @@ Sema::CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS,
// vectors, the total size only needs to be the same. This is a bitcast;
// no bits are changed but the result type is different.
if (isLaxVectorConversion(RHSType, LHSType)) {
+ // The default for lax vector conversions with Altivec vectors will
+ // change, so if we are converting between vector types where
+ // at least one is an Altivec vector, emit a warning.
+ if (Context.getTargetInfo().getTriple().isPPC() &&
+ anyAltivecTypes(RHSType, LHSType) &&
+ !Context.areCompatibleVectorTypes(RHSType, LHSType))
+ Diag(RHS.get()->getExprLoc(), diag::warn_deprecated_lax_vec_conv_all)
+ << RHSType << LHSType;
Kind = CK_BitCast;
return IncompatibleVectors;
}
@@ -9250,6 +10215,12 @@ Sema::CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS,
const VectorType *VecType = RHSType->getAs<VectorType>();
if (VecType && VecType->getNumElements() == 1 &&
isLaxVectorConversion(RHSType, LHSType)) {
+ if (Context.getTargetInfo().getTriple().isPPC() &&
+ (VecType->getVectorKind() == VectorKind::AltiVecVector ||
+ VecType->getVectorKind() == VectorKind::AltiVecBool ||
+ VecType->getVectorKind() == VectorKind::AltiVecPixel))
+ Diag(RHS.get()->getExprLoc(), diag::warn_deprecated_lax_vec_conv_all)
+ << RHSType << LHSType;
ExprResult *VecExpr = &RHS;
*VecExpr = ImpCastExprToType(VecExpr->get(), LHSType, CK_BitCast);
Kind = CK_BitCast;
@@ -9258,19 +10229,29 @@ Sema::CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS,
}
// Allow assignments between fixed-length and sizeless SVE vectors.
- if ((LHSType->isSizelessBuiltinType() && RHSType->isVectorType()) ||
- (LHSType->isVectorType() && RHSType->isSizelessBuiltinType()))
+ if ((LHSType->isSVESizelessBuiltinType() && RHSType->isVectorType()) ||
+ (LHSType->isVectorType() && RHSType->isSVESizelessBuiltinType()))
if (Context.areCompatibleSveTypes(LHSType, RHSType) ||
Context.areLaxCompatibleSveTypes(LHSType, RHSType)) {
Kind = CK_BitCast;
return Compatible;
}
+ // Allow assignments between fixed-length and sizeless RVV vectors.
+ if ((LHSType->isRVVSizelessBuiltinType() && RHSType->isVectorType()) ||
+ (LHSType->isVectorType() && RHSType->isRVVSizelessBuiltinType())) {
+ if (Context.areCompatibleRVVTypes(LHSType, RHSType) ||
+ Context.areLaxCompatibleRVVTypes(LHSType, RHSType)) {
+ Kind = CK_BitCast;
+ return Compatible;
+ }
+ }
+
return Incompatible;
}
- // Diagnose attempts to convert between __float128 and long double where
- // such conversions currently can't be handled.
+ // Diagnose attempts to convert between __ibm128, __float128 and long double
+ // where such conversions currently can't be handled.
if (unsupportedTypeConversion(*this, LHSType, RHSType))
return Incompatible;
@@ -9300,7 +10281,8 @@ Sema::CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS,
Kind = CK_NoOp;
else
Kind = CK_BitCast;
- return checkPointerTypesForAssignment(*this, LHSType, RHSType);
+ return checkPointerTypesForAssignment(*this, LHSType, RHSType,
+ RHS.get()->getBeginLoc());
}
// int -> T*
@@ -9434,6 +10416,15 @@ Sema::CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS,
return Incompatible;
}
+ // Conversion to nullptr_t (C23 only)
+ if (getLangOpts().C23 && LHSType->isNullPtrType() &&
+ RHS.get()->isNullPointerConstant(Context,
+ Expr::NPC_ValueDependentIsNull)) {
+ // null -> nullptr_t
+ Kind = CK_NullToPointer;
+ return Compatible;
+ }
+
// Conversions from pointers that are not covered by the above.
if (isa<PointerType>(RHSType)) {
// T* -> _Bool
@@ -9628,12 +10619,36 @@ Sema::CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &CallerRHS,
return Incompatible;
}
+ // This check seems unnatural, however it is necessary to ensure the proper
+ // conversion of functions/arrays. If the conversion were done for all
+ // DeclExpr's (created by ActOnIdExpression), it would mess up the unary
+ // expressions that suppress this implicit conversion (&, sizeof). This needs
+ // to happen before we check for null pointer conversions because C does not
+ // undergo the same implicit conversions as C++ does above (by the calls to
+ // TryImplicitConversion() and PerformImplicitConversion()) which insert the
+ // lvalue to rvalue cast before checking for null pointer constraints. This
+ // addresses code like: nullptr_t val; int *ptr; ptr = val;
+ //
+ // Suppress this for references: C++ 8.5.3p5.
+ if (!LHSType->isReferenceType()) {
+ // FIXME: We potentially allocate here even if ConvertRHS is false.
+ RHS = DefaultFunctionArrayLvalueConversion(RHS.get(), Diagnose);
+ if (RHS.isInvalid())
+ return Incompatible;
+ }
+
+ // The constraints are expressed in terms of the atomic, qualified, or
+ // unqualified type of the LHS.
+ QualType LHSTypeAfterConversion = LHSType.getAtomicUnqualifiedType();
+
// C99 6.5.16.1p1: the left operand is a pointer and the right is
- // a null pointer constant.
- if ((LHSType->isPointerType() || LHSType->isObjCObjectPointerType() ||
- LHSType->isBlockPointerType()) &&
- RHS.get()->isNullPointerConstant(Context,
- Expr::NPC_ValueDependentIsNull)) {
+ // a null pointer constant <C23>or its type is nullptr_t;</C23>.
+ if ((LHSTypeAfterConversion->isPointerType() ||
+ LHSTypeAfterConversion->isObjCObjectPointerType() ||
+ LHSTypeAfterConversion->isBlockPointerType()) &&
+ ((getLangOpts().C23 && RHS.get()->getType()->isNullPtrType()) ||
+ RHS.get()->isNullPointerConstant(Context,
+ Expr::NPC_ValueDependentIsNull))) {
if (Diagnose || ConvertRHS) {
CastKind Kind;
CXXCastPath Path;
@@ -9644,6 +10659,26 @@ Sema::CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &CallerRHS,
}
return Compatible;
}
+ // C23 6.5.16.1p1: the left operand has type atomic, qualified, or
+ // unqualified bool, and the right operand is a pointer or its type is
+ // nullptr_t.
+ if (getLangOpts().C23 && LHSType->isBooleanType() &&
+ RHS.get()->getType()->isNullPtrType()) {
+ // NB: T* -> _Bool is handled in CheckAssignmentConstraints, this only
+ // only handles nullptr -> _Bool due to needing an extra conversion
+ // step.
+ // We model this by converting from nullptr -> void * and then let the
+ // conversion from void * -> _Bool happen naturally.
+ if (Diagnose || ConvertRHS) {
+ CastKind Kind;
+ CXXCastPath Path;
+ CheckPointerConversion(RHS.get(), Context.VoidPtrTy, Kind, Path,
+ /*IgnoreBaseAccess=*/false, Diagnose);
+ if (ConvertRHS)
+ RHS = ImpCastExprToType(RHS.get(), Context.VoidPtrTy, Kind, VK_PRValue,
+ &Path);
+ }
+ }
// OpenCL queue_t type assignment.
if (LHSType->isQueueT() && RHS.get()->isNullPointerConstant(
@@ -9652,18 +10687,6 @@ Sema::CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &CallerRHS,
return Compatible;
}
- // This check seems unnatural, however it is necessary to ensure the proper
- // conversion of functions/arrays. If the conversion were done for all
- // DeclExpr's (created by ActOnIdExpression), it would mess up the unary
- // expressions that suppress this implicit conversion (&, sizeof).
- //
- // Suppress this for references: C++ 8.5.3p5.
- if (!LHSType->isReferenceType()) {
- // FIXME: We potentially allocate here even if ConvertRHS is false.
- RHS = DefaultFunctionArrayLvalueConversion(RHS.get(), Diagnose);
- if (RHS.isInvalid())
- return Incompatible;
- }
CastKind Kind;
Sema::AssignConvertType result =
CheckAssignmentConstraints(LHSType, RHS, Kind, ConvertRHS);
@@ -9842,9 +10865,11 @@ static bool tryVectorConvertAndSplat(Sema &S, ExprResult *scalar,
static ExprResult convertVector(Expr *E, QualType ElementType, Sema &S) {
const auto *VecTy = E->getType()->getAs<VectorType>();
assert(VecTy && "Expression E must be a vector");
- QualType NewVecTy = S.Context.getVectorType(ElementType,
- VecTy->getNumElements(),
- VecTy->getVectorKind());
+ QualType NewVecTy =
+ VecTy->isExtVectorType()
+ ? S.Context.getExtVectorType(ElementType, VecTy->getNumElements())
+ : S.Context.getVectorType(ElementType, VecTy->getNumElements(),
+ VecTy->getVectorKind());
// Look through the implicit cast. Return the subexpression if its type is
// NewVecTy.
@@ -9876,7 +10901,7 @@ static bool canConvertIntToOtherIntTy(Sema &S, ExprResult *Int,
// bits that the vector element type, reject it.
llvm::APSInt Result = EVResult.Val.getInt();
unsigned NumBits = IntSigned
- ? (Result.isNegative() ? Result.getMinSignedBits()
+ ? (Result.isNegative() ? Result.getSignificantBits()
: Result.getActiveBits())
: Result.getActiveBits();
if (Order < 0 && S.Context.getIntWidth(OtherIntTy) < NumBits)
@@ -9944,12 +10969,18 @@ static bool tryGCCVectorConvertAndSplat(Sema &S, ExprResult *Scalar,
ExprResult *Vector) {
QualType ScalarTy = Scalar->get()->getType().getUnqualifiedType();
QualType VectorTy = Vector->get()->getType().getUnqualifiedType();
- const VectorType *VT = VectorTy->getAs<VectorType>();
-
- assert(!isa<ExtVectorType>(VT) &&
- "ExtVectorTypes should not be handled here!");
-
- QualType VectorEltTy = VT->getElementType();
+ QualType VectorEltTy;
+
+ if (const auto *VT = VectorTy->getAs<VectorType>()) {
+ assert(!isa<ExtVectorType>(VT) &&
+ "ExtVectorTypes should not be handled here!");
+ VectorEltTy = VT->getElementType();
+ } else if (VectorTy->isSveVLSBuiltinType()) {
+ VectorEltTy =
+ VectorTy->castAs<BuiltinType>()->getSveEltType(S.getASTContext());
+ } else {
+ llvm_unreachable("Only Fixed-Length and SVE Vector types are handled here");
+ }
// Reject cases where the vector element type or the scalar element type are
// not integral or floating point types.
@@ -10020,18 +11051,18 @@ static bool tryGCCVectorConvertAndSplat(Sema &S, ExprResult *Scalar,
return true;
// Adjust scalar if desired.
- if (Scalar) {
- if (ScalarCast != CK_NoOp)
- *Scalar = S.ImpCastExprToType(Scalar->get(), VectorEltTy, ScalarCast);
- *Scalar = S.ImpCastExprToType(Scalar->get(), VectorTy, CK_VectorSplat);
- }
+ if (ScalarCast != CK_NoOp)
+ *Scalar = S.ImpCastExprToType(Scalar->get(), VectorEltTy, ScalarCast);
+ *Scalar = S.ImpCastExprToType(Scalar->get(), VectorTy, CK_VectorSplat);
return false;
}
QualType Sema::CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool,
- bool AllowBoolConversions) {
+ bool AllowBoolConversions,
+ bool AllowBoolOperation,
+ bool ReportInvalid) {
if (!IsCompAssign) {
LHS = DefaultFunctionArrayLvalueConversion(LHS.get());
if (LHS.isInvalid())
@@ -10050,20 +11081,21 @@ QualType Sema::CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
const VectorType *RHSVecType = RHSType->getAs<VectorType>();
assert(LHSVecType || RHSVecType);
- if ((LHSVecType && LHSVecType->getElementType()->isBFloat16Type()) ||
- (RHSVecType && RHSVecType->getElementType()->isBFloat16Type()))
- return InvalidOperands(Loc, LHS, RHS);
-
// AltiVec-style "vector bool op vector bool" combinations are allowed
// for some operators but not others.
- if (!AllowBothBool &&
- LHSVecType && LHSVecType->getVectorKind() == VectorType::AltiVecBool &&
- RHSVecType && RHSVecType->getVectorKind() == VectorType::AltiVecBool)
- return InvalidOperands(Loc, LHS, RHS);
+ if (!AllowBothBool && LHSVecType &&
+ LHSVecType->getVectorKind() == VectorKind::AltiVecBool && RHSVecType &&
+ RHSVecType->getVectorKind() == VectorKind::AltiVecBool)
+ return ReportInvalid ? InvalidOperands(Loc, LHS, RHS) : QualType();
+
+ // This operation may not be performed on boolean vectors.
+ if (!AllowBoolOperation &&
+ (LHSType->isExtVectorBoolType() || RHSType->isExtVectorBoolType()))
+ return ReportInvalid ? InvalidOperands(Loc, LHS, RHS) : QualType();
// If the vector types are identical, return.
if (Context.hasSameType(LHSType, RHSType))
- return LHSType;
+ return Context.getCommonSugaredType(LHSType, RHSType);
// If we have compatible AltiVec and GCC vector types, use the AltiVec type.
if (LHSVecType && RHSVecType &&
@@ -10085,56 +11117,89 @@ QualType Sema::CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
LHSVecType->getNumElements() == RHSVecType->getNumElements() &&
(Context.getTypeSize(LHSVecType->getElementType()) ==
Context.getTypeSize(RHSVecType->getElementType()))) {
- if (LHSVecType->getVectorKind() == VectorType::AltiVecVector &&
+ if (LHSVecType->getVectorKind() == VectorKind::AltiVecVector &&
LHSVecType->getElementType()->isIntegerType() &&
- RHSVecType->getVectorKind() == VectorType::AltiVecBool) {
+ RHSVecType->getVectorKind() == VectorKind::AltiVecBool) {
RHS = ImpCastExprToType(RHS.get(), LHSType, CK_BitCast);
return LHSType;
}
if (!IsCompAssign &&
- LHSVecType->getVectorKind() == VectorType::AltiVecBool &&
- RHSVecType->getVectorKind() == VectorType::AltiVecVector &&
+ LHSVecType->getVectorKind() == VectorKind::AltiVecBool &&
+ RHSVecType->getVectorKind() == VectorKind::AltiVecVector &&
RHSVecType->getElementType()->isIntegerType()) {
LHS = ImpCastExprToType(LHS.get(), RHSType, CK_BitCast);
return RHSType;
}
}
- // Expressions containing fixed-length and sizeless SVE vectors are invalid
- // since the ambiguity can affect the ABI.
- auto IsSveConversion = [](QualType FirstType, QualType SecondType) {
+ // Expressions containing fixed-length and sizeless SVE/RVV vectors are
+ // invalid since the ambiguity can affect the ABI.
+ auto IsSveRVVConversion = [](QualType FirstType, QualType SecondType,
+ unsigned &SVEorRVV) {
const VectorType *VecType = SecondType->getAs<VectorType>();
- return FirstType->isSizelessBuiltinType() && VecType &&
- (VecType->getVectorKind() == VectorType::SveFixedLengthDataVector ||
- VecType->getVectorKind() ==
- VectorType::SveFixedLengthPredicateVector);
+ SVEorRVV = 0;
+ if (FirstType->isSizelessBuiltinType() && VecType) {
+ if (VecType->getVectorKind() == VectorKind::SveFixedLengthData ||
+ VecType->getVectorKind() == VectorKind::SveFixedLengthPredicate)
+ return true;
+ if (VecType->getVectorKind() == VectorKind::RVVFixedLengthData ||
+ VecType->getVectorKind() == VectorKind::RVVFixedLengthMask) {
+ SVEorRVV = 1;
+ return true;
+ }
+ }
+
+ return false;
};
- if (IsSveConversion(LHSType, RHSType) || IsSveConversion(RHSType, LHSType)) {
- Diag(Loc, diag::err_typecheck_sve_ambiguous) << LHSType << RHSType;
+ unsigned SVEorRVV;
+ if (IsSveRVVConversion(LHSType, RHSType, SVEorRVV) ||
+ IsSveRVVConversion(RHSType, LHSType, SVEorRVV)) {
+ Diag(Loc, diag::err_typecheck_sve_rvv_ambiguous)
+ << SVEorRVV << LHSType << RHSType;
return QualType();
}
- // Expressions containing GNU and SVE (fixed or sizeless) vectors are invalid
- // since the ambiguity can affect the ABI.
- auto IsSveGnuConversion = [](QualType FirstType, QualType SecondType) {
+ // Expressions containing GNU and SVE or RVV (fixed or sizeless) vectors are
+ // invalid since the ambiguity can affect the ABI.
+ auto IsSveRVVGnuConversion = [](QualType FirstType, QualType SecondType,
+ unsigned &SVEorRVV) {
const VectorType *FirstVecType = FirstType->getAs<VectorType>();
const VectorType *SecondVecType = SecondType->getAs<VectorType>();
- if (FirstVecType && SecondVecType)
- return FirstVecType->getVectorKind() == VectorType::GenericVector &&
- (SecondVecType->getVectorKind() ==
- VectorType::SveFixedLengthDataVector ||
- SecondVecType->getVectorKind() ==
- VectorType::SveFixedLengthPredicateVector);
+ SVEorRVV = 0;
+ if (FirstVecType && SecondVecType) {
+ if (FirstVecType->getVectorKind() == VectorKind::Generic) {
+ if (SecondVecType->getVectorKind() == VectorKind::SveFixedLengthData ||
+ SecondVecType->getVectorKind() ==
+ VectorKind::SveFixedLengthPredicate)
+ return true;
+ if (SecondVecType->getVectorKind() == VectorKind::RVVFixedLengthData ||
+ SecondVecType->getVectorKind() == VectorKind::RVVFixedLengthMask) {
+ SVEorRVV = 1;
+ return true;
+ }
+ }
+ return false;
+ }
+
+ if (SecondVecType &&
+ SecondVecType->getVectorKind() == VectorKind::Generic) {
+ if (FirstType->isSVESizelessBuiltinType())
+ return true;
+ if (FirstType->isRVVSizelessBuiltinType()) {
+ SVEorRVV = 1;
+ return true;
+ }
+ }
- return FirstType->isSizelessBuiltinType() && SecondVecType &&
- SecondVecType->getVectorKind() == VectorType::GenericVector;
+ return false;
};
- if (IsSveGnuConversion(LHSType, RHSType) ||
- IsSveGnuConversion(RHSType, LHSType)) {
- Diag(Loc, diag::err_typecheck_sve_gnu_ambiguous) << LHSType << RHSType;
+ if (IsSveRVVGnuConversion(LHSType, RHSType, SVEorRVV) ||
+ IsSveRVVGnuConversion(RHSType, LHSType, SVEorRVV)) {
+ Diag(Loc, diag::err_typecheck_sve_rvv_gnu_ambiguous)
+ << SVEorRVV << LHSType << RHSType;
return QualType();
}
@@ -10173,6 +11238,10 @@ QualType Sema::CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
QualType OtherType = LHSVecType ? RHSType : LHSType;
ExprResult *OtherExpr = LHSVecType ? &RHS : &LHS;
if (isLaxVectorConversion(OtherType, VecType)) {
+ if (Context.getTargetInfo().getTriple().isPPC() &&
+ anyAltivecTypes(RHSType, LHSType) &&
+ !Context.areCompatibleVectorTypes(RHSType, LHSType))
+ Diag(Loc, diag::warn_deprecated_lax_vec_conv_all) << RHSType << LHSType;
// If we're allowing lax vector conversions, only the total (data) size
// needs to be the same. For non compound assignment, if one of the types is
// scalar, the result is always the vector type.
@@ -10238,6 +11307,81 @@ QualType Sema::CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
return QualType();
}
+QualType Sema::CheckSizelessVectorOperands(ExprResult &LHS, ExprResult &RHS,
+ SourceLocation Loc,
+ bool IsCompAssign,
+ ArithConvKind OperationKind) {
+ if (!IsCompAssign) {
+ LHS = DefaultFunctionArrayLvalueConversion(LHS.get());
+ if (LHS.isInvalid())
+ return QualType();
+ }
+ RHS = DefaultFunctionArrayLvalueConversion(RHS.get());
+ if (RHS.isInvalid())
+ return QualType();
+
+ QualType LHSType = LHS.get()->getType().getUnqualifiedType();
+ QualType RHSType = RHS.get()->getType().getUnqualifiedType();
+
+ const BuiltinType *LHSBuiltinTy = LHSType->getAs<BuiltinType>();
+ const BuiltinType *RHSBuiltinTy = RHSType->getAs<BuiltinType>();
+
+ unsigned DiagID = diag::err_typecheck_invalid_operands;
+ if ((OperationKind == ACK_Arithmetic) &&
+ ((LHSBuiltinTy && LHSBuiltinTy->isSVEBool()) ||
+ (RHSBuiltinTy && RHSBuiltinTy->isSVEBool()))) {
+ Diag(Loc, DiagID) << LHSType << RHSType << LHS.get()->getSourceRange()
+ << RHS.get()->getSourceRange();
+ return QualType();
+ }
+
+ if (Context.hasSameType(LHSType, RHSType))
+ return LHSType;
+
+ if (LHSType->isSveVLSBuiltinType() && !RHSType->isSveVLSBuiltinType()) {
+ if (!tryGCCVectorConvertAndSplat(*this, &RHS, &LHS))
+ return LHSType;
+ }
+ if (RHSType->isSveVLSBuiltinType() && !LHSType->isSveVLSBuiltinType()) {
+ if (LHS.get()->isLValue() ||
+ !tryGCCVectorConvertAndSplat(*this, &LHS, &RHS))
+ return RHSType;
+ }
+
+ if ((!LHSType->isSveVLSBuiltinType() && !LHSType->isRealType()) ||
+ (!RHSType->isSveVLSBuiltinType() && !RHSType->isRealType())) {
+ Diag(Loc, diag::err_typecheck_vector_not_convertable_non_scalar)
+ << LHSType << RHSType << LHS.get()->getSourceRange()
+ << RHS.get()->getSourceRange();
+ return QualType();
+ }
+
+ if (LHSType->isSveVLSBuiltinType() && RHSType->isSveVLSBuiltinType() &&
+ Context.getBuiltinVectorTypeInfo(LHSBuiltinTy).EC !=
+ Context.getBuiltinVectorTypeInfo(RHSBuiltinTy).EC) {
+ Diag(Loc, diag::err_typecheck_vector_lengths_not_equal)
+ << LHSType << RHSType << LHS.get()->getSourceRange()
+ << RHS.get()->getSourceRange();
+ return QualType();
+ }
+
+ if (LHSType->isSveVLSBuiltinType() || RHSType->isSveVLSBuiltinType()) {
+ QualType Scalar = LHSType->isSveVLSBuiltinType() ? RHSType : LHSType;
+ QualType Vector = LHSType->isSveVLSBuiltinType() ? LHSType : RHSType;
+ bool ScalarOrVector =
+ LHSType->isSveVLSBuiltinType() && RHSType->isSveVLSBuiltinType();
+
+ Diag(Loc, diag::err_typecheck_vector_not_convertable_implict_truncation)
+ << ScalarOrVector << Scalar << Vector;
+
+ return QualType();
+ }
+
+ Diag(Loc, DiagID) << LHSType << RHSType << LHS.get()->getSourceRange()
+ << RHS.get()->getSourceRange();
+ return QualType();
+}
+
// checkArithmeticNull - Detect when a NULL constant is used improperly in an
// expression. These are mainly cases where the null pointer is used as an
// integer instead of a pointer.
@@ -10347,8 +11491,13 @@ QualType Sema::CheckMultiplyDivideOperands(ExprResult &LHS, ExprResult &RHS,
QualType RHSTy = RHS.get()->getType();
if (LHSTy->isVectorType() || RHSTy->isVectorType())
return CheckVectorOperands(LHS, RHS, Loc, IsCompAssign,
- /*AllowBothBool*/getLangOpts().AltiVec,
- /*AllowBoolConversions*/false);
+ /*AllowBothBool*/ getLangOpts().AltiVec,
+ /*AllowBoolConversions*/ false,
+ /*AllowBooleanOperation*/ false,
+ /*ReportInvalid*/ true);
+ if (LHSTy->isSveVLSBuiltinType() || RHSTy->isSveVLSBuiltinType())
+ return CheckSizelessVectorOperands(LHS, RHS, Loc, IsCompAssign,
+ ACK_Arithmetic);
if (!IsDiv &&
(LHSTy->isConstantMatrixType() || RHSTy->isConstantMatrixType()))
return CheckMatrixMultiplyOperands(LHS, RHS, Loc, IsCompAssign);
@@ -10381,8 +11530,20 @@ QualType Sema::CheckRemainderOperands(
if (LHS.get()->getType()->hasIntegerRepresentation() &&
RHS.get()->getType()->hasIntegerRepresentation())
return CheckVectorOperands(LHS, RHS, Loc, IsCompAssign,
- /*AllowBothBool*/getLangOpts().AltiVec,
- /*AllowBoolConversions*/false);
+ /*AllowBothBool*/ getLangOpts().AltiVec,
+ /*AllowBoolConversions*/ false,
+ /*AllowBooleanOperation*/ false,
+ /*ReportInvalid*/ true);
+ return InvalidOperands(Loc, LHS, RHS);
+ }
+
+ if (LHS.get()->getType()->isSveVLSBuiltinType() ||
+ RHS.get()->getType()->isSveVLSBuiltinType()) {
+ if (LHS.get()->getType()->hasIntegerRepresentation() &&
+ RHS.get()->getType()->hasIntegerRepresentation())
+ return CheckSizelessVectorOperands(LHS, RHS, Loc, IsCompAssign,
+ ACK_Arithmetic);
+
return InvalidOperands(Loc, LHS, RHS);
}
@@ -10443,8 +11604,10 @@ static void diagnoseSubtractionOnNullPointer(Sema &S, SourceLocation Loc,
if (S.Diags.getSuppressSystemWarnings() && S.SourceMgr.isInSystemMacro(Loc))
return;
- S.Diag(Loc, diag::warn_pointer_sub_null_ptr)
- << S.getLangOpts().CPlusPlus << Pointer->getSourceRange();
+ S.DiagRuntimeBehavior(Loc, Pointer,
+ S.PDiag(diag::warn_pointer_sub_null_ptr)
+ << S.getLangOpts().CPlusPlus
+ << Pointer->getSourceRange());
}
/// Diagnose invalid arithmetic on two function pointers.
@@ -10683,14 +11846,25 @@ QualType Sema::CheckAdditionOperands(ExprResult &LHS, ExprResult &RHS,
if (LHS.get()->getType()->isVectorType() ||
RHS.get()->getType()->isVectorType()) {
- QualType compType = CheckVectorOperands(
- LHS, RHS, Loc, CompLHSTy,
- /*AllowBothBool*/getLangOpts().AltiVec,
- /*AllowBoolConversions*/getLangOpts().ZVector);
+ QualType compType =
+ CheckVectorOperands(LHS, RHS, Loc, CompLHSTy,
+ /*AllowBothBool*/ getLangOpts().AltiVec,
+ /*AllowBoolConversions*/ getLangOpts().ZVector,
+ /*AllowBooleanOperation*/ false,
+ /*ReportInvalid*/ true);
if (CompLHSTy) *CompLHSTy = compType;
return compType;
}
+ if (LHS.get()->getType()->isSveVLSBuiltinType() ||
+ RHS.get()->getType()->isSveVLSBuiltinType()) {
+ QualType compType =
+ CheckSizelessVectorOperands(LHS, RHS, Loc, CompLHSTy, ACK_Arithmetic);
+ if (CompLHSTy)
+ *CompLHSTy = compType;
+ return compType;
+ }
+
if (LHS.get()->getType()->isConstantMatrixType() ||
RHS.get()->getType()->isConstantMatrixType()) {
QualType compType =
@@ -10770,7 +11944,7 @@ QualType Sema::CheckAdditionOperands(ExprResult &LHS, ExprResult &RHS,
QualType LHSTy = Context.isPromotableBitField(LHS.get());
if (LHSTy.isNull()) {
LHSTy = LHS.get()->getType();
- if (LHSTy->isPromotableIntegerType())
+ if (Context.isPromotableIntegerType(LHSTy))
LHSTy = Context.getPromotedIntegerType(LHSTy);
}
*CompLHSTy = LHSTy;
@@ -10787,14 +11961,25 @@ QualType Sema::CheckSubtractionOperands(ExprResult &LHS, ExprResult &RHS,
if (LHS.get()->getType()->isVectorType() ||
RHS.get()->getType()->isVectorType()) {
- QualType compType = CheckVectorOperands(
- LHS, RHS, Loc, CompLHSTy,
- /*AllowBothBool*/getLangOpts().AltiVec,
- /*AllowBoolConversions*/getLangOpts().ZVector);
+ QualType compType =
+ CheckVectorOperands(LHS, RHS, Loc, CompLHSTy,
+ /*AllowBothBool*/ getLangOpts().AltiVec,
+ /*AllowBoolConversions*/ getLangOpts().ZVector,
+ /*AllowBooleanOperation*/ false,
+ /*ReportInvalid*/ true);
if (CompLHSTy) *CompLHSTy = compType;
return compType;
}
+ if (LHS.get()->getType()->isSveVLSBuiltinType() ||
+ RHS.get()->getType()->isSveVLSBuiltinType()) {
+ QualType compType =
+ CheckSizelessVectorOperands(LHS, RHS, Loc, CompLHSTy, ACK_Arithmetic);
+ if (CompLHSTy)
+ *CompLHSTy = compType;
+ return compType;
+ }
+
if (LHS.get()->getType()->isConstantMatrixType() ||
RHS.get()->getType()->isConstantMatrixType()) {
QualType compType =
@@ -10939,14 +12124,13 @@ static void DiagnoseBadShiftValues(Sema& S, ExprResult &LHS, ExprResult &RHS,
QualType LHSExprType = LHS.get()->getType();
uint64_t LeftSize = S.Context.getTypeSize(LHSExprType);
- if (LHSExprType->isExtIntType())
+ if (LHSExprType->isBitIntType())
LeftSize = S.Context.getIntWidth(LHSExprType);
else if (LHSExprType->isFixedPointType()) {
auto FXSema = S.Context.getFixedPointSemantics(LHSExprType);
LeftSize = FXSema.getWidth() - (unsigned)FXSema.hasUnsignedPadding();
}
- llvm::APInt LeftBits(Right.getBitWidth(), LeftSize);
- if (Right.uge(LeftBits)) {
+ if (Right.uge(LeftSize)) {
S.DiagRuntimeBehavior(Loc, RHS.get(),
S.PDiag(diag::warn_shift_gt_typewidth)
<< RHS.get()->getSourceRange());
@@ -10970,10 +12154,16 @@ static void DiagnoseBadShiftValues(Sema& S, ExprResult &LHS, ExprResult &RHS,
return;
llvm::APSInt Left = LHSResult.Val.getInt();
- // If LHS does not have a signed type and non-negative value
- // then, the behavior is undefined before C++2a. Warn about it.
- if (Left.isNegative() && !S.getLangOpts().isSignedOverflowDefined() &&
- !S.getLangOpts().CPlusPlus20) {
+ // Don't warn if signed overflow is defined, then all the rest of the
+ // diagnostics will not be triggered because the behavior is defined.
+ // Also don't warn in C++20 mode (and newer), as signed left shifts
+ // always wrap and never overflow.
+ if (S.getLangOpts().isSignedOverflowDefined() || S.getLangOpts().CPlusPlus20)
+ return;
+
+ // If LHS does not have a non-negative value then, the
+ // behavior is undefined before C++2a. Warn about it.
+ if (Left.isNegative()) {
S.DiagRuntimeBehavior(Loc, LHS.get(),
S.PDiag(diag::warn_shift_lhs_negative)
<< LHS.get()->getSourceRange());
@@ -10981,8 +12171,8 @@ static void DiagnoseBadShiftValues(Sema& S, ExprResult &LHS, ExprResult &RHS,
}
llvm::APInt ResultBits =
- static_cast<llvm::APInt&>(Right) + Left.getMinSignedBits();
- if (LeftBits.uge(ResultBits))
+ static_cast<llvm::APInt &>(Right) + Left.getSignificantBits();
+ if (ResultBits.ule(LeftSize))
return;
llvm::APSInt Result = Left.extend(ResultBits.getLimitedValue());
Result = Result.shl(Right);
@@ -10996,7 +12186,7 @@ static void DiagnoseBadShiftValues(Sema& S, ExprResult &LHS, ExprResult &RHS,
// bugs -- if the result is cast back to an unsigned type, it will have the
// expected value. Thus we place this behind a different warning that can be
// turned off separately if needed.
- if (LeftBits == ResultBits - 1) {
+ if (ResultBits - 1 == LeftSize) {
S.Diag(Loc, diag::warn_shift_result_sets_sign_bit)
<< HexResult << LHSType
<< LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
@@ -11004,9 +12194,9 @@ static void DiagnoseBadShiftValues(Sema& S, ExprResult &LHS, ExprResult &RHS,
}
S.Diag(Loc, diag::warn_shift_result_gt_typewidth)
- << HexResult.str() << Result.getMinSignedBits() << LHSType
- << Left.getBitWidth() << LHS.get()->getSourceRange()
- << RHS.get()->getSourceRange();
+ << HexResult.str() << Result.getSignificantBits() << LHSType
+ << Left.getBitWidth() << LHS.get()->getSourceRange()
+ << RHS.get()->getSourceRange();
}
/// Return the resulting type when a vector is shifted
@@ -11041,6 +12231,15 @@ static QualType checkVectorShift(Sema &S, ExprResult &LHS, ExprResult &RHS,
const VectorType *RHSVecTy = RHSType->getAs<VectorType>();
QualType RHSEleType = RHSVecTy ? RHSVecTy->getElementType() : RHSType;
+ // Do not allow shifts for boolean vectors.
+ if ((LHSVecTy && LHSVecTy->isExtVectorBoolType()) ||
+ (RHSVecTy && RHSVecTy->isExtVectorBoolType())) {
+ S.Diag(Loc, diag::err_typecheck_invalid_operands)
+ << LHS.get()->getType() << RHS.get()->getType()
+ << LHS.get()->getSourceRange();
+ return QualType();
+ }
+
// The operands need to be integers.
if (!LHSEleType->isIntegerType()) {
S.Diag(Loc, diag::err_typecheck_expect_int)
@@ -11096,6 +12295,97 @@ static QualType checkVectorShift(Sema &S, ExprResult &LHS, ExprResult &RHS,
return LHSType;
}
+static QualType checkSizelessVectorShift(Sema &S, ExprResult &LHS,
+ ExprResult &RHS, SourceLocation Loc,
+ bool IsCompAssign) {
+ if (!IsCompAssign) {
+ LHS = S.UsualUnaryConversions(LHS.get());
+ if (LHS.isInvalid())
+ return QualType();
+ }
+
+ RHS = S.UsualUnaryConversions(RHS.get());
+ if (RHS.isInvalid())
+ return QualType();
+
+ QualType LHSType = LHS.get()->getType();
+ const BuiltinType *LHSBuiltinTy = LHSType->castAs<BuiltinType>();
+ QualType LHSEleType = LHSType->isSveVLSBuiltinType()
+ ? LHSBuiltinTy->getSveEltType(S.getASTContext())
+ : LHSType;
+
+ // Note that RHS might not be a vector
+ QualType RHSType = RHS.get()->getType();
+ const BuiltinType *RHSBuiltinTy = RHSType->castAs<BuiltinType>();
+ QualType RHSEleType = RHSType->isSveVLSBuiltinType()
+ ? RHSBuiltinTy->getSveEltType(S.getASTContext())
+ : RHSType;
+
+ if ((LHSBuiltinTy && LHSBuiltinTy->isSVEBool()) ||
+ (RHSBuiltinTy && RHSBuiltinTy->isSVEBool())) {
+ S.Diag(Loc, diag::err_typecheck_invalid_operands)
+ << LHSType << RHSType << LHS.get()->getSourceRange();
+ return QualType();
+ }
+
+ if (!LHSEleType->isIntegerType()) {
+ S.Diag(Loc, diag::err_typecheck_expect_int)
+ << LHS.get()->getType() << LHS.get()->getSourceRange();
+ return QualType();
+ }
+
+ if (!RHSEleType->isIntegerType()) {
+ S.Diag(Loc, diag::err_typecheck_expect_int)
+ << RHS.get()->getType() << RHS.get()->getSourceRange();
+ return QualType();
+ }
+
+ if (LHSType->isSveVLSBuiltinType() && RHSType->isSveVLSBuiltinType() &&
+ (S.Context.getBuiltinVectorTypeInfo(LHSBuiltinTy).EC !=
+ S.Context.getBuiltinVectorTypeInfo(RHSBuiltinTy).EC)) {
+ S.Diag(Loc, diag::err_typecheck_invalid_operands)
+ << LHSType << RHSType << LHS.get()->getSourceRange()
+ << RHS.get()->getSourceRange();
+ return QualType();
+ }
+
+ if (!LHSType->isSveVLSBuiltinType()) {
+ assert(RHSType->isSveVLSBuiltinType());
+ if (IsCompAssign)
+ return RHSType;
+ if (LHSEleType != RHSEleType) {
+ LHS = S.ImpCastExprToType(LHS.get(), RHSEleType, clang::CK_IntegralCast);
+ LHSEleType = RHSEleType;
+ }
+ const llvm::ElementCount VecSize =
+ S.Context.getBuiltinVectorTypeInfo(RHSBuiltinTy).EC;
+ QualType VecTy =
+ S.Context.getScalableVectorType(LHSEleType, VecSize.getKnownMinValue());
+ LHS = S.ImpCastExprToType(LHS.get(), VecTy, clang::CK_VectorSplat);
+ LHSType = VecTy;
+ } else if (RHSBuiltinTy && RHSBuiltinTy->isSveVLSBuiltinType()) {
+ if (S.Context.getTypeSize(RHSBuiltinTy) !=
+ S.Context.getTypeSize(LHSBuiltinTy)) {
+ S.Diag(Loc, diag::err_typecheck_vector_lengths_not_equal)
+ << LHSType << RHSType << LHS.get()->getSourceRange()
+ << RHS.get()->getSourceRange();
+ return QualType();
+ }
+ } else {
+ const llvm::ElementCount VecSize =
+ S.Context.getBuiltinVectorTypeInfo(LHSBuiltinTy).EC;
+ if (LHSEleType != RHSEleType) {
+ RHS = S.ImpCastExprToType(RHS.get(), LHSEleType, clang::CK_IntegralCast);
+ RHSEleType = LHSEleType;
+ }
+ QualType VecTy =
+ S.Context.getScalableVectorType(RHSEleType, VecSize.getKnownMinValue());
+ RHS = S.ImpCastExprToType(RHS.get(), VecTy, CK_VectorSplat);
+ }
+
+ return LHSType;
+}
+
// C99 6.5.7
QualType Sema::CheckShiftOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, BinaryOperatorKind Opc,
@@ -11110,15 +12400,19 @@ QualType Sema::CheckShiftOperands(ExprResult &LHS, ExprResult &RHS,
// like general shifts, except that neither the LHS nor the RHS is
// allowed to be a "vector bool".
if (auto LHSVecType = LHS.get()->getType()->getAs<VectorType>())
- if (LHSVecType->getVectorKind() == VectorType::AltiVecBool)
+ if (LHSVecType->getVectorKind() == VectorKind::AltiVecBool)
return InvalidOperands(Loc, LHS, RHS);
if (auto RHSVecType = RHS.get()->getType()->getAs<VectorType>())
- if (RHSVecType->getVectorKind() == VectorType::AltiVecBool)
+ if (RHSVecType->getVectorKind() == VectorKind::AltiVecBool)
return InvalidOperands(Loc, LHS, RHS);
}
return checkVectorShift(*this, LHS, RHS, Loc, IsCompAssign);
}
+ if (LHS.get()->getType()->isSveVLSBuiltinType() ||
+ RHS.get()->getType()->isSveVLSBuiltinType())
+ return checkSizelessVectorShift(*this, LHS, RHS, Loc, IsCompAssign);
+
// Shifts don't perform usual arithmetic conversions, they just do integer
// promotions on each operand. C99 6.5.7p3
@@ -11150,7 +12444,6 @@ QualType Sema::CheckShiftOperands(ExprResult &LHS, ExprResult &RHS,
isScopedEnumerationType(RHSType)) {
return InvalidOperands(Loc, LHS, RHS);
}
- // Sanity-check shift operands
DiagnoseBadShiftValues(*this, LHS, RHS, Loc, Opc, LHSType);
// "The type of the result is that of the promoted left operand."
@@ -11428,6 +12721,11 @@ static void diagnoseTautologicalComparison(Sema &S, SourceLocation Loc,
S.inTemplateInstantiation())
return;
+ // WebAssembly Tables cannot be compared, therefore shouldn't emit
+ // Tautological diagnostics.
+ if (LHSType->isWebAssemblyTableType() || RHSType->isWebAssemblyTableType())
+ return;
+
// Comparisons between two array types are ill-formed for operator<=>, so
// we shouldn't emit any additional warnings about it.
if (Opc == BO_Cmp && LHSType->isArrayType() && RHSType->isArrayType())
@@ -11669,7 +12967,7 @@ static QualType checkArithmeticOrEnumeralThreeWayCompare(Sema &S,
// We can't use `CK_IntegralCast` when the underlying type is 'bool', so we
// promote the boolean type, and all other promotable integer types, to
// avoid this.
- if (IntType->isPromotableIntegerType())
+ if (S.Context.isPromotableIntegerType(IntType))
IntType = S.Context.getPromotedIntegerType(IntType);
LHS = S.ImpCastExprToType(LHS.get(), IntType, CK_IntegralCast);
@@ -11686,7 +12984,7 @@ static QualType checkArithmeticOrEnumeralThreeWayCompare(Sema &S,
if (Type.isNull())
return S.InvalidOperands(Loc, LHS, RHS);
- Optional<ComparisonCategoryType> CCT =
+ std::optional<ComparisonCategoryType> CCT =
getComparisonCategoryForBuiltinCmp(Type);
if (!CCT)
return S.InvalidOperands(Loc, LHS, RHS);
@@ -11724,8 +13022,8 @@ static QualType checkArithmeticOrEnumeralCompare(Sema &S, ExprResult &LHS,
return S.InvalidOperands(Loc, LHS, RHS);
// Check for comparisons of floating point operands using != and ==.
- if (Type->hasFloatingRepresentation() && BinaryOperator::isEqualityOp(Opc))
- S.CheckFloatComparison(Loc, LHS.get(), RHS.get());
+ if (Type->hasFloatingRepresentation())
+ S.CheckFloatComparison(Loc, LHS.get(), RHS.get(), Opc);
// The result of comparisons is 'bool' in C++, 'int' in C.
return S.Context.getLogicalOperationType();
@@ -11801,6 +13099,10 @@ QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
RHS.get()->getType()->isVectorType())
return CheckVectorCompareOperands(LHS, RHS, Loc, Opc);
+ if (LHS.get()->getType()->isSveVLSBuiltinType() ||
+ RHS.get()->getType()->isSveVLSBuiltinType())
+ return CheckSizelessVectorCompareOperands(LHS, RHS, Loc, Opc);
+
diagnoseLogicalNotOnLHSofCheck(*this, LHS, RHS, Loc, Opc);
diagnoseTautologicalComparison(*this, Loc, LHS.get(), RHS.get(), Opc);
@@ -11810,6 +13112,12 @@ QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
(RHSType->isArithmeticType() || RHSType->isEnumeralType()))
return checkArithmeticOrEnumeralCompare(*this, LHS, RHS, Loc, Opc);
+ if ((LHSType->isPointerType() &&
+ LHSType->getPointeeType().isWebAssemblyReferenceType()) ||
+ (RHSType->isPointerType() &&
+ RHSType->getPointeeType().isWebAssemblyReferenceType()))
+ return InvalidOperands(Loc, LHS, RHS);
+
const Expr::NullPointerConstantKind LHSNullKind =
LHS.get()->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull);
const Expr::NullPointerConstantKind RHSNullKind =
@@ -11826,7 +13134,7 @@ QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
QualType CompositeTy = LHS.get()->getType();
assert(!CompositeTy->isReferenceType());
- Optional<ComparisonCategoryType> CCT =
+ std::optional<ComparisonCategoryType> CCT =
getComparisonCategoryForBuiltinCmp(CompositeTy);
if (!CCT)
return InvalidOperands(Loc, LHS, RHS);
@@ -11971,34 +13279,55 @@ QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
return computeResultTy();
}
- if (getLangOpts().CPlusPlus) {
- // C++ [expr.eq]p4:
- // Two operands of type std::nullptr_t or one operand of type
- // std::nullptr_t and the other a null pointer constant compare equal.
- if (!IsOrdered && LHSIsNull && RHSIsNull) {
- if (LHSType->isNullPtrType()) {
- RHS = ImpCastExprToType(RHS.get(), LHSType, CK_NullToPointer);
- return computeResultTy();
- }
- if (RHSType->isNullPtrType()) {
- LHS = ImpCastExprToType(LHS.get(), RHSType, CK_NullToPointer);
- return computeResultTy();
- }
- }
- // Comparison of Objective-C pointers and block pointers against nullptr_t.
- // These aren't covered by the composite pointer type rules.
- if (!IsOrdered && RHSType->isNullPtrType() &&
- (LHSType->isObjCObjectPointerType() || LHSType->isBlockPointerType())) {
+ // C++ [expr.eq]p4:
+ // Two operands of type std::nullptr_t or one operand of type
+ // std::nullptr_t and the other a null pointer constant compare
+ // equal.
+ // C23 6.5.9p5:
+ // If both operands have type nullptr_t or one operand has type nullptr_t
+ // and the other is a null pointer constant, they compare equal if the
+ // former is a null pointer.
+ if (!IsOrdered && LHSIsNull && RHSIsNull) {
+ if (LHSType->isNullPtrType()) {
RHS = ImpCastExprToType(RHS.get(), LHSType, CK_NullToPointer);
return computeResultTy();
}
- if (!IsOrdered && LHSType->isNullPtrType() &&
- (RHSType->isObjCObjectPointerType() || RHSType->isBlockPointerType())) {
+ if (RHSType->isNullPtrType()) {
+ LHS = ImpCastExprToType(LHS.get(), RHSType, CK_NullToPointer);
+ return computeResultTy();
+ }
+ }
+
+ if (!getLangOpts().CPlusPlus && !IsOrdered && (LHSIsNull || RHSIsNull)) {
+ // C23 6.5.9p6:
+ // Otherwise, at least one operand is a pointer. If one is a pointer and
+ // the other is a null pointer constant or has type nullptr_t, they
+ // compare equal
+ if (LHSIsNull && RHSType->isPointerType()) {
LHS = ImpCastExprToType(LHS.get(), RHSType, CK_NullToPointer);
return computeResultTy();
}
+ if (RHSIsNull && LHSType->isPointerType()) {
+ RHS = ImpCastExprToType(RHS.get(), LHSType, CK_NullToPointer);
+ return computeResultTy();
+ }
+ }
+ // Comparison of Objective-C pointers and block pointers against nullptr_t.
+ // These aren't covered by the composite pointer type rules.
+ if (!IsOrdered && RHSType->isNullPtrType() &&
+ (LHSType->isObjCObjectPointerType() || LHSType->isBlockPointerType())) {
+ RHS = ImpCastExprToType(RHS.get(), LHSType, CK_NullToPointer);
+ return computeResultTy();
+ }
+ if (!IsOrdered && LHSType->isNullPtrType() &&
+ (RHSType->isObjCObjectPointerType() || RHSType->isBlockPointerType())) {
+ LHS = ImpCastExprToType(LHS.get(), RHSType, CK_NullToPointer);
+ return computeResultTy();
+ }
+
+ if (getLangOpts().CPlusPlus) {
if (IsRelational &&
((LHSType->isNullPtrType() && RHSType->isPointerType()) ||
(RHSType->isNullPtrType() && LHSType->isPointerType()))) {
@@ -12191,7 +13520,7 @@ QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
return computeResultTy();
}
- if (getLangOpts().OpenCLVersion >= 200 || getLangOpts().OpenCLCPlusPlus) {
+ if (getLangOpts().getOpenCLCompatibleVersion() >= 200) {
if (LHSType->isClkEventT() && RHSType->isClkEventT()) {
return computeResultTy();
}
@@ -12224,35 +13553,54 @@ QualType Sema::GetSignedVectorType(QualType V) {
unsigned TypeSize = Context.getTypeSize(VTy->getElementType());
if (isa<ExtVectorType>(VTy)) {
+ if (VTy->isExtVectorBoolType())
+ return Context.getExtVectorType(Context.BoolTy, VTy->getNumElements());
if (TypeSize == Context.getTypeSize(Context.CharTy))
return Context.getExtVectorType(Context.CharTy, VTy->getNumElements());
- else if (TypeSize == Context.getTypeSize(Context.ShortTy))
+ if (TypeSize == Context.getTypeSize(Context.ShortTy))
return Context.getExtVectorType(Context.ShortTy, VTy->getNumElements());
- else if (TypeSize == Context.getTypeSize(Context.IntTy))
+ if (TypeSize == Context.getTypeSize(Context.IntTy))
return Context.getExtVectorType(Context.IntTy, VTy->getNumElements());
- else if (TypeSize == Context.getTypeSize(Context.LongTy))
+ if (TypeSize == Context.getTypeSize(Context.Int128Ty))
+ return Context.getExtVectorType(Context.Int128Ty, VTy->getNumElements());
+ if (TypeSize == Context.getTypeSize(Context.LongTy))
return Context.getExtVectorType(Context.LongTy, VTy->getNumElements());
assert(TypeSize == Context.getTypeSize(Context.LongLongTy) &&
"Unhandled vector element size in vector compare");
return Context.getExtVectorType(Context.LongLongTy, VTy->getNumElements());
}
+ if (TypeSize == Context.getTypeSize(Context.Int128Ty))
+ return Context.getVectorType(Context.Int128Ty, VTy->getNumElements(),
+ VectorKind::Generic);
if (TypeSize == Context.getTypeSize(Context.LongLongTy))
return Context.getVectorType(Context.LongLongTy, VTy->getNumElements(),
- VectorType::GenericVector);
- else if (TypeSize == Context.getTypeSize(Context.LongTy))
+ VectorKind::Generic);
+ if (TypeSize == Context.getTypeSize(Context.LongTy))
return Context.getVectorType(Context.LongTy, VTy->getNumElements(),
- VectorType::GenericVector);
- else if (TypeSize == Context.getTypeSize(Context.IntTy))
+ VectorKind::Generic);
+ if (TypeSize == Context.getTypeSize(Context.IntTy))
return Context.getVectorType(Context.IntTy, VTy->getNumElements(),
- VectorType::GenericVector);
- else if (TypeSize == Context.getTypeSize(Context.ShortTy))
+ VectorKind::Generic);
+ if (TypeSize == Context.getTypeSize(Context.ShortTy))
return Context.getVectorType(Context.ShortTy, VTy->getNumElements(),
- VectorType::GenericVector);
+ VectorKind::Generic);
assert(TypeSize == Context.getTypeSize(Context.CharTy) &&
"Unhandled vector element size in vector compare");
return Context.getVectorType(Context.CharTy, VTy->getNumElements(),
- VectorType::GenericVector);
+ VectorKind::Generic);
+}
+
+QualType Sema::GetSignedSizelessVectorType(QualType V) {
+ const BuiltinType *VTy = V->castAs<BuiltinType>();
+ assert(VTy->isSizelessBuiltinType() && "expected sizeless type");
+
+ const QualType ETy = V->getSveEltType(Context);
+ const auto TypeSize = Context.getTypeSize(ETy);
+
+ const QualType IntTy = Context.getIntTypeForBitwidth(TypeSize, true);
+ const llvm::ElementCount VecSize = Context.getBuiltinVectorTypeInfo(VTy).EC;
+ return Context.getScalableVectorType(IntTy, VecSize.getKnownMinValue());
}
/// CheckVectorCompareOperands - vector comparisons are a clang extension that
@@ -12269,9 +13617,12 @@ QualType Sema::CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
// Check to make sure we're operating on vectors of the same type and width,
// Allowing one side to be a scalar of element type.
- QualType vType = CheckVectorOperands(LHS, RHS, Loc, /*isCompAssign*/false,
- /*AllowBothBool*/true,
- /*AllowBoolConversions*/getLangOpts().ZVector);
+ QualType vType =
+ CheckVectorOperands(LHS, RHS, Loc, /*isCompAssign*/ false,
+ /*AllowBothBool*/ true,
+ /*AllowBoolConversions*/ getLangOpts().ZVector,
+ /*AllowBooleanOperation*/ true,
+ /*ReportInvalid*/ true);
if (vType.isNull())
return vType;
@@ -12288,7 +13639,7 @@ QualType Sema::CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
// If AltiVec, the comparison results in a numeric type, i.e.
// bool for C++, int for C
if (vType->castAs<VectorType>()->getVectorKind() ==
- VectorType::AltiVecVector)
+ VectorKind::AltiVecVector)
return Context.getLogicalOperationType();
else
Diag(Loc, diag::warn_deprecated_altivec_src_compat);
@@ -12308,16 +13659,56 @@ QualType Sema::CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
diagnoseTautologicalComparison(*this, Loc, LHS.get(), RHS.get(), Opc);
// Check for comparisons of floating point operands using != and ==.
- if (BinaryOperator::isEqualityOp(Opc) &&
- LHSType->hasFloatingRepresentation()) {
+ if (LHSType->hasFloatingRepresentation()) {
assert(RHS.get()->getType()->hasFloatingRepresentation());
- CheckFloatComparison(Loc, LHS.get(), RHS.get());
+ CheckFloatComparison(Loc, LHS.get(), RHS.get(), Opc);
}
// Return a signed type for the vector.
return GetSignedVectorType(vType);
}
+QualType Sema::CheckSizelessVectorCompareOperands(ExprResult &LHS,
+ ExprResult &RHS,
+ SourceLocation Loc,
+ BinaryOperatorKind Opc) {
+ if (Opc == BO_Cmp) {
+ Diag(Loc, diag::err_three_way_vector_comparison);
+ return QualType();
+ }
+
+ // Check to make sure we're operating on vectors of the same type and width,
+ // Allowing one side to be a scalar of element type.
+ QualType vType = CheckSizelessVectorOperands(
+ LHS, RHS, Loc, /*isCompAssign*/ false, ACK_Comparison);
+
+ if (vType.isNull())
+ return vType;
+
+ QualType LHSType = LHS.get()->getType();
+
+ // For non-floating point types, check for self-comparisons of the form
+ // x == x, x != x, x < x, etc. These always evaluate to a constant, and
+ // often indicate logic errors in the program.
+ diagnoseTautologicalComparison(*this, Loc, LHS.get(), RHS.get(), Opc);
+
+ // Check for comparisons of floating point operands using != and ==.
+ if (LHSType->hasFloatingRepresentation()) {
+ assert(RHS.get()->getType()->hasFloatingRepresentation());
+ CheckFloatComparison(Loc, LHS.get(), RHS.get(), Opc);
+ }
+
+ const BuiltinType *LHSBuiltinTy = LHSType->getAs<BuiltinType>();
+ const BuiltinType *RHSBuiltinTy = RHS.get()->getType()->getAs<BuiltinType>();
+
+ if (LHSBuiltinTy && RHSBuiltinTy && LHSBuiltinTy->isSVEBool() &&
+ RHSBuiltinTy->isSVEBool())
+ return LHSType;
+
+ // Return a signed type for the vector.
+ return GetSignedSizelessVectorType(vType);
+}
+
static void diagnoseXorMisusedAsPow(Sema &S, const ExprResult &XorLHS,
const ExprResult &XorRHS,
const SourceLocation Loc) {
@@ -12392,14 +13783,13 @@ static void diagnoseXorMisusedAsPow(Sema &S, const ExprResult &XorLHS,
StringRef RHSStrRef = RHSStr;
// Do not diagnose literals with digit separators, binary, hexadecimal, octal
// literals.
- if (LHSStrRef.startswith("0b") || LHSStrRef.startswith("0B") ||
- RHSStrRef.startswith("0b") || RHSStrRef.startswith("0B") ||
- LHSStrRef.startswith("0x") || LHSStrRef.startswith("0X") ||
- RHSStrRef.startswith("0x") || RHSStrRef.startswith("0X") ||
- (LHSStrRef.size() > 1 && LHSStrRef.startswith("0")) ||
- (RHSStrRef.size() > 1 && RHSStrRef.startswith("0")) ||
- LHSStrRef.find('\'') != StringRef::npos ||
- RHSStrRef.find('\'') != StringRef::npos)
+ if (LHSStrRef.starts_with("0b") || LHSStrRef.starts_with("0B") ||
+ RHSStrRef.starts_with("0b") || RHSStrRef.starts_with("0B") ||
+ LHSStrRef.starts_with("0x") || LHSStrRef.starts_with("0X") ||
+ RHSStrRef.starts_with("0x") || RHSStrRef.starts_with("0X") ||
+ (LHSStrRef.size() > 1 && LHSStrRef.starts_with("0")) ||
+ (RHSStrRef.size() > 1 && RHSStrRef.starts_with("0")) ||
+ LHSStrRef.contains('\'') || RHSStrRef.contains('\''))
return;
bool SuggestXor =
@@ -12446,12 +13836,15 @@ QualType Sema::CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
// Ensure that either both operands are of the same vector type, or
// one operand is of a vector type and the other is of its element type.
QualType vType = CheckVectorOperands(LHS, RHS, Loc, false,
- /*AllowBothBool*/true,
- /*AllowBoolConversions*/false);
+ /*AllowBothBool*/ true,
+ /*AllowBoolConversions*/ false,
+ /*AllowBooleanOperation*/ false,
+ /*ReportInvalid*/ false);
if (vType.isNull())
return InvalidOperands(Loc, LHS, RHS);
- if (getLangOpts().OpenCL && getLangOpts().OpenCLVersion < 120 &&
- !getLangOpts().OpenCLCPlusPlus && vType->hasFloatingRepresentation())
+ if (getLangOpts().OpenCL &&
+ getLangOpts().getOpenCLCompatibleVersion() < 120 &&
+ vType->hasFloatingRepresentation())
return InvalidOperands(Loc, LHS, RHS);
// FIXME: The check for C++ here is for GCC compatibility. GCC rejects the
// usage of the logical operators && and || with vectors in C. This
@@ -12485,7 +13878,7 @@ QualType Sema::CheckMatrixElementwiseOperands(ExprResult &LHS, ExprResult &RHS,
assert((LHSMatType || RHSMatType) && "At least one operand must be a matrix");
if (Context.hasSameType(LHSType, RHSType))
- return LHSType;
+ return Context.getCommonSugaredType(LHSType, RHSType);
// Type conversion may change LHS/RHS. Keep copies to the original results, in
// case we have to return InvalidOperands.
@@ -12529,17 +13922,37 @@ QualType Sema::CheckMatrixMultiplyOperands(ExprResult &LHS, ExprResult &RHS,
if (LHSMatType->getNumColumns() != RHSMatType->getNumRows())
return InvalidOperands(Loc, LHS, RHS);
- if (!Context.hasSameType(LHSMatType->getElementType(),
- RHSMatType->getElementType()))
+ if (Context.hasSameType(LHSMatType, RHSMatType))
+ return Context.getCommonSugaredType(
+ LHS.get()->getType().getUnqualifiedType(),
+ RHS.get()->getType().getUnqualifiedType());
+
+ QualType LHSELTy = LHSMatType->getElementType(),
+ RHSELTy = RHSMatType->getElementType();
+ if (!Context.hasSameType(LHSELTy, RHSELTy))
return InvalidOperands(Loc, LHS, RHS);
- return Context.getConstantMatrixType(LHSMatType->getElementType(),
- LHSMatType->getNumRows(),
- RHSMatType->getNumColumns());
+ return Context.getConstantMatrixType(
+ Context.getCommonSugaredType(LHSELTy, RHSELTy),
+ LHSMatType->getNumRows(), RHSMatType->getNumColumns());
}
return CheckMatrixElementwiseOperands(LHS, RHS, Loc, IsCompAssign);
}
+static bool isLegalBoolVectorBinaryOp(BinaryOperatorKind Opc) {
+ switch (Opc) {
+ default:
+ return false;
+ case BO_And:
+ case BO_AndAssign:
+ case BO_Or:
+ case BO_OrAssign:
+ case BO_Xor:
+ case BO_XorAssign:
+ return true;
+ }
+}
+
inline QualType Sema::CheckBitwiseOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc) {
@@ -12548,13 +13961,35 @@ inline QualType Sema::CheckBitwiseOperands(ExprResult &LHS, ExprResult &RHS,
bool IsCompAssign =
Opc == BO_AndAssign || Opc == BO_OrAssign || Opc == BO_XorAssign;
+ bool LegalBoolVecOperator = isLegalBoolVectorBinaryOp(Opc);
+
if (LHS.get()->getType()->isVectorType() ||
RHS.get()->getType()->isVectorType()) {
if (LHS.get()->getType()->hasIntegerRepresentation() &&
RHS.get()->getType()->hasIntegerRepresentation())
return CheckVectorOperands(LHS, RHS, Loc, IsCompAssign,
- /*AllowBothBool*/true,
- /*AllowBoolConversions*/getLangOpts().ZVector);
+ /*AllowBothBool*/ true,
+ /*AllowBoolConversions*/ getLangOpts().ZVector,
+ /*AllowBooleanOperation*/ LegalBoolVecOperator,
+ /*ReportInvalid*/ true);
+ return InvalidOperands(Loc, LHS, RHS);
+ }
+
+ if (LHS.get()->getType()->isSveVLSBuiltinType() ||
+ RHS.get()->getType()->isSveVLSBuiltinType()) {
+ if (LHS.get()->getType()->hasIntegerRepresentation() &&
+ RHS.get()->getType()->hasIntegerRepresentation())
+ return CheckSizelessVectorOperands(LHS, RHS, Loc, IsCompAssign,
+ ACK_BitwiseOp);
+ return InvalidOperands(Loc, LHS, RHS);
+ }
+
+ if (LHS.get()->getType()->isSveVLSBuiltinType() ||
+ RHS.get()->getType()->isSveVLSBuiltinType()) {
+ if (LHS.get()->getType()->hasIntegerRepresentation() &&
+ RHS.get()->getType()->hasIntegerRepresentation())
+ return CheckSizelessVectorOperands(LHS, RHS, Loc, IsCompAssign,
+ ACK_BitwiseOp);
return InvalidOperands(Loc, LHS, RHS);
}
@@ -12586,7 +14021,8 @@ inline QualType Sema::CheckLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc) {
// Check vector operands differently.
- if (LHS.get()->getType()->isVectorType() || RHS.get()->getType()->isVectorType())
+ if (LHS.get()->getType()->isVectorType() ||
+ RHS.get()->getType()->isVectorType())
return CheckVectorLogicalOperands(LHS, RHS, Loc);
bool EnumConstantInBoolContext = false;
@@ -12601,6 +14037,16 @@ inline QualType Sema::CheckLogicalOperands(ExprResult &LHS, ExprResult &RHS,
if (EnumConstantInBoolContext)
Diag(Loc, diag::warn_enum_constant_in_bool_context);
+ // WebAssembly tables can't be used with logical operators.
+ QualType LHSTy = LHS.get()->getType();
+ QualType RHSTy = RHS.get()->getType();
+ const auto *LHSATy = dyn_cast<ArrayType>(LHSTy);
+ const auto *RHSATy = dyn_cast<ArrayType>(RHSTy);
+ if ((LHSATy && LHSATy->getElementType().isWebAssemblyReferenceType()) ||
+ (RHSATy && RHSATy->getElementType().isWebAssemblyReferenceType())) {
+ return InvalidOperands(Loc, LHS, RHS);
+ }
+
// Diagnose cases where the user write a logical and/or but probably meant a
// bitwise one. We do this when the LHS is a non-bool integer and the RHS
// is a constant.
@@ -12616,18 +14062,17 @@ inline QualType Sema::CheckLogicalOperands(ExprResult &LHS, ExprResult &RHS,
Expr::EvalResult EVResult;
if (RHS.get()->EvaluateAsInt(EVResult, Context)) {
llvm::APSInt Result = EVResult.Val.getInt();
- if ((getLangOpts().Bool && !RHS.get()->getType()->isBooleanType() &&
+ if ((getLangOpts().CPlusPlus && !RHS.get()->getType()->isBooleanType() &&
!RHS.get()->getExprLoc().isMacroID()) ||
(Result != 0 && Result != 1)) {
Diag(Loc, diag::warn_logical_instead_of_bitwise)
- << RHS.get()->getSourceRange()
- << (Opc == BO_LAnd ? "&&" : "||");
+ << RHS.get()->getSourceRange() << (Opc == BO_LAnd ? "&&" : "||");
// Suggest replacing the logical operator with the bitwise version
Diag(Loc, diag::note_logical_instead_of_bitwise_change_operator)
<< (Opc == BO_LAnd ? "&" : "|")
- << FixItHint::CreateReplacement(SourceRange(
- Loc, getLocForEndOfToken(Loc)),
- Opc == BO_LAnd ? "&" : "|");
+ << FixItHint::CreateReplacement(
+ SourceRange(Loc, getLocForEndOfToken(Loc)),
+ Opc == BO_LAnd ? "&" : "|");
if (Opc == BO_LAnd)
// Suggest replacing "Foo() && kNonZero" with "Foo()"
Diag(Loc, diag::note_logical_instead_of_bitwise_remove_constant)
@@ -12913,7 +14358,7 @@ static void DiagnoseRecursiveConstFields(Sema &S, const ValueDecl *VD,
// Then we append it to the list to check next in order.
FieldTy = FieldTy.getCanonicalType();
if (const auto *FieldRecTy = FieldTy->getAs<RecordType>()) {
- if (llvm::find(RecordTypeList, FieldRecTy) == RecordTypeList.end())
+ if (!llvm::is_contained(RecordTypeList, FieldRecTy))
RecordTypeList.push_back(FieldRecTy);
}
}
@@ -13121,7 +14566,8 @@ static void CheckIdentityFieldAssignment(Expr *LHSExpr, Expr *RHSExpr,
// C99 6.5.16.1
QualType Sema::CheckAssignmentOperands(Expr *LHSExpr, ExprResult &RHS,
SourceLocation Loc,
- QualType CompoundType) {
+ QualType CompoundType,
+ BinaryOperatorKind Opc) {
assert(!LHSExpr->hasPlaceholderType(BuiltinType::PseudoObject));
// Verify that LHS is a modifiable lvalue, and emit error if not.
@@ -13142,6 +14588,12 @@ QualType Sema::CheckAssignmentOperands(Expr *LHSExpr, ExprResult &RHS,
return QualType();
}
+ // WebAssembly tables can't be used on RHS of an assignment expression.
+ if (RHSType->isWebAssemblyTableType()) {
+ Diag(Loc, diag::err_wasm_table_art) << 0;
+ return QualType();
+ }
+
AssignConvertType ConvTy;
if (CompoundType.isNull()) {
Expr *RHSCheck = RHS.get();
@@ -13233,27 +14685,24 @@ QualType Sema::CheckAssignmentOperands(Expr *LHSExpr, ExprResult &RHS,
// type is deprecated unless the assignment is either a discarded-value
// expression or an unevaluated operand
ExprEvalContexts.back().VolatileAssignmentLHSs.push_back(LHSExpr);
- } else {
- // C++2a [expr.ass]p6:
- // [Compound-assignment] expressions are deprecated if E1 has
- // volatile-qualified type
- Diag(Loc, diag::warn_deprecated_compound_assign_volatile) << LHSType;
}
}
- // C99 6.5.16p3: The type of an assignment expression is the type of the
- // left operand unless the left operand has qualified type, in which case
- // it is the unqualified version of the type of the left operand.
- // C99 6.5.16.1p2: In simple assignment, the value of the right operand
- // is converted to the type of the assignment expression (above).
+ // C11 6.5.16p3: The type of an assignment expression is the type of the
+ // left operand would have after lvalue conversion.
+ // C11 6.3.2.1p2: ...this is called lvalue conversion. If the lvalue has
+ // qualified type, the value has the unqualified version of the type of the
+ // lvalue; additionally, if the lvalue has atomic type, the value has the
+ // non-atomic version of the type of the lvalue.
// C++ 5.17p1: the type of the assignment expression is that of its left
// operand.
- return (getLangOpts().CPlusPlus
- ? LHSType : LHSType.getUnqualifiedType());
+ return getLangOpts().CPlusPlus ? LHSType : LHSType.getAtomicUnqualifiedType();
}
-// Only ignore explicit casts to void.
-static bool IgnoreCommaOperand(const Expr *E) {
+// Scenarios to ignore if expression E is:
+// 1. an explicit cast expression into void
+// 2. a function call expression that returns void
+static bool IgnoreCommaOperand(const Expr *E, const ASTContext &Context) {
E = E->IgnoreParens();
if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
@@ -13268,6 +14717,8 @@ static bool IgnoreCommaOperand(const Expr *E) {
}
}
+ if (const auto *CE = dyn_cast<CallExpr>(E))
+ return CE->getCallReturnType(Context)->isVoidType();
return false;
}
@@ -13309,7 +14760,7 @@ void Sema::DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc) {
}
// Only allow some expressions on LHS to not warn.
- if (IgnoreCommaOperand(LHS))
+ if (IgnoreCommaOperand(LHS, Context))
return;
Diag(Loc, diag::warn_comma_operator);
@@ -13340,7 +14791,7 @@ static QualType CheckCommaOperands(Sema &S, ExprResult &LHS, ExprResult &RHS,
if (LHS.isInvalid())
return QualType();
- S.DiagnoseUnusedExprResult(LHS.get());
+ S.DiagnoseUnusedExprResult(LHS.get(), diag::warn_unused_comma_left_operand);
if (!S.getLangOpts().CPlusPlus) {
RHS = S.DefaultFunctionArrayLvalueConversion(RHS.get());
@@ -13415,10 +14866,10 @@ static QualType CheckIncrementDecrementOperand(Sema &S, Expr *Op,
// OK! ( C/C++ Language Extensions for CBEA(Version 2.6) 10.3 )
} else if (S.getLangOpts().ZVector && ResType->isVectorType() &&
(ResType->castAs<VectorType>()->getVectorKind() !=
- VectorType::AltiVecBool)) {
+ VectorKind::AltiVecBool)) {
// The z vector extensions allow ++ and -- for non-bool vectors.
- } else if(S.getLangOpts().OpenCL && ResType->isVectorType() &&
- ResType->castAs<VectorType>()->getElementType()->isIntegerType()) {
+ } else if (S.getLangOpts().OpenCL && ResType->isVectorType() &&
+ ResType->castAs<VectorType>()->getElementType()->isIntegerType()) {
// OpenCL V1.2 6.3 says dec/inc ops operate on integer vector types.
} else {
S.Diag(OpLoc, diag::err_typecheck_illegal_increment_decrement)
@@ -13529,6 +14980,34 @@ static void diagnoseAddressOfInvalidType(Sema &S, SourceLocation Loc,
S.Diag(Loc, diag::err_typecheck_address_of) << Type << E->getSourceRange();
}
+bool Sema::CheckUseOfCXXMethodAsAddressOfOperand(SourceLocation OpLoc,
+ const Expr *Op,
+ const CXXMethodDecl *MD) {
+ const auto *DRE = cast<DeclRefExpr>(Op->IgnoreParens());
+
+ if (Op != DRE)
+ return Diag(OpLoc, diag::err_parens_pointer_member_function)
+ << Op->getSourceRange();
+
+ // Taking the address of a dtor is illegal per C++ [class.dtor]p2.
+ if (isa<CXXDestructorDecl>(MD))
+ return Diag(OpLoc, diag::err_typecheck_addrof_dtor)
+ << DRE->getSourceRange();
+
+ if (DRE->getQualifier())
+ return false;
+
+ if (MD->getParent()->getName().empty())
+ return Diag(OpLoc, diag::err_unqualified_pointer_member_function)
+ << DRE->getSourceRange();
+
+ SmallString<32> Str;
+ StringRef Qual = (MD->getParent()->getName() + "::").toStringRef(Str);
+ return Diag(OpLoc, diag::err_unqualified_pointer_member_function)
+ << DRE->getSourceRange()
+ << FixItHint::CreateInsertion(DRE->getSourceRange().getBegin(), Qual);
+}
+
/// CheckAddressOfOperand - The operand of & must be either a function
/// designator or an lvalue designating an object. If it is an lvalue, the
/// object cannot be declared with storage class register or be a bit field.
@@ -13574,7 +15053,7 @@ QualType Sema::CheckAddressOfOperand(ExprResult &OrigOp, SourceLocation OpLoc) {
if (OrigOp.get()->isTypeDependent())
return Context.DependentTy;
- assert(!OrigOp.get()->getType()->isPlaceholderType());
+ assert(!OrigOp.get()->hasPlaceholderType());
// Make sure to ignore parentheses in subsequent checks
Expr *op = OrigOp.get()->IgnoreParens();
@@ -13638,28 +15117,7 @@ QualType Sema::CheckAddressOfOperand(ExprResult &OrigOp, SourceLocation OpLoc) {
DeclRefExpr *DRE = cast<DeclRefExpr>(op);
CXXMethodDecl *MD = cast<CXXMethodDecl>(DRE->getDecl());
- // The id-expression was parenthesized.
- if (OrigOp.get() != DRE) {
- Diag(OpLoc, diag::err_parens_pointer_member_function)
- << OrigOp.get()->getSourceRange();
-
- // The method was named without a qualifier.
- } else if (!DRE->getQualifier()) {
- if (MD->getParent()->getName().empty())
- Diag(OpLoc, diag::err_unqualified_pointer_member_function)
- << op->getSourceRange();
- else {
- SmallString<32> Str;
- StringRef Qual = (MD->getParent()->getName() + "::").toStringRef(Str);
- Diag(OpLoc, diag::err_unqualified_pointer_member_function)
- << op->getSourceRange()
- << FixItHint::CreateInsertion(op->getSourceRange().getBegin(), Qual);
- }
- }
-
- // Taking the address of a dtor is illegal per C++ [class.dtor]p2.
- if (isa<CXXDestructorDecl>(MD))
- Diag(OpLoc, diag::err_typecheck_addrof_dtor) << op->getSourceRange();
+ CheckUseOfCXXMethodAsAddressOfOperand(OpLoc, OrigOp.get(), MD);
QualType MPTy = Context.getMemberPointerType(
op->getType(), Context.getTypeDeclType(MD->getParent()).getTypePtr());
@@ -13679,7 +15137,11 @@ QualType Sema::CheckAddressOfOperand(ExprResult &OrigOp, SourceLocation OpLoc) {
<< op->getType() << op->getSourceRange();
return QualType();
}
+ } else if (const auto *DRE = dyn_cast<DeclRefExpr>(op)) {
+ if (const auto *MD = dyn_cast_or_null<CXXMethodDecl>(DRE->getDecl()))
+ CheckUseOfCXXMethodAsAddressOfOperand(OpLoc, OrigOp.get(), MD);
}
+
} else if (op->getObjectKind() == OK_BitField) { // C99 6.5.3.2p1
// The operand cannot be a bit-field
AddressOfError = AO_Bit_Field;
@@ -13729,8 +15191,8 @@ QualType Sema::CheckAddressOfOperand(ExprResult &OrigOp, SourceLocation OpLoc) {
return MPTy;
}
}
- } else if (!isa<FunctionDecl>(dcl) && !isa<NonTypeTemplateParmDecl>(dcl) &&
- !isa<BindingDecl>(dcl) && !isa<MSGuidDecl>(dcl))
+ } else if (!isa<FunctionDecl, NonTypeTemplateParmDecl, BindingDecl,
+ MSGuidDecl, UnnamedGlobalConstantDecl>(dcl))
llvm_unreachable("Unknown/unexpected decl type");
}
@@ -13750,6 +15212,21 @@ QualType Sema::CheckAddressOfOperand(ExprResult &OrigOp, SourceLocation OpLoc) {
if (op->getType()->isObjCObjectType())
return Context.getObjCObjectPointerType(op->getType());
+ // Cannot take the address of WebAssembly references or tables.
+ if (Context.getTargetInfo().getTriple().isWasm()) {
+ QualType OpTy = op->getType();
+ if (OpTy.isWebAssemblyReferenceType()) {
+ Diag(OpLoc, diag::err_wasm_ca_reference)
+ << 1 << OrigOp.get()->getSourceRange();
+ return QualType();
+ }
+ if (OpTy->isWebAssemblyTableType()) {
+ Diag(OpLoc, diag::err_wasm_table_pr)
+ << 1 << OrigOp.get()->getSourceRange();
+ return QualType();
+ }
+ }
+
CheckAddressOfPackedMember(op);
return Context.getPointerType(op->getType());
@@ -13769,13 +15246,13 @@ static void RecordModifiableNonNullParam(Sema &S, const Expr *Exp) {
if (!FD->hasAttr<NonNullAttr>() && !Param->hasAttr<NonNullAttr>())
return;
if (FunctionScopeInfo *FD = S.getCurFunction())
- if (!FD->ModifiedNonNullParams.count(Param))
- FD->ModifiedNonNullParams.insert(Param);
+ FD->ModifiedNonNullParams.insert(Param);
}
/// CheckIndirectionOperand - Type check unary indirection (prefix '*').
static QualType CheckIndirectionOperand(Sema &S, Expr *Op, ExprValueKind &VK,
- SourceLocation OpLoc) {
+ SourceLocation OpLoc,
+ bool IsAfterAmp = false) {
if (Op->isTypeDependent())
return S.Context.DependentTy;
@@ -13812,18 +15289,18 @@ static QualType CheckIndirectionOperand(Sema &S, Expr *Op, ExprValueKind &VK,
return QualType();
}
- // Note that per both C89 and C99, indirection is always legal, even if Result
- // is an incomplete type or void. It would be possible to warn about
- // dereferencing a void pointer, but it's completely well-defined, and such a
- // warning is unlikely to catch any mistakes. In C++, indirection is not valid
- // for pointers to 'void' but is fine for any other pointer type:
- //
- // C++ [expr.unary.op]p1:
- // [...] the expression to which [the unary * operator] is applied shall
- // be a pointer to an object type, or a pointer to a function type
- if (S.getLangOpts().CPlusPlus && Result->isVoidType())
- S.Diag(OpLoc, diag::ext_typecheck_indirection_through_void_pointer)
- << OpTy << Op->getSourceRange();
+ if (Result->isVoidType()) {
+ // C++ [expr.unary.op]p1:
+ // [...] the expression to which [the unary * operator] is applied shall
+ // be a pointer to an object type, or a pointer to a function type
+ LangOptions LO = S.getLangOpts();
+ if (LO.CPlusPlus)
+ S.Diag(OpLoc, diag::err_typecheck_indirection_through_void_pointer_cpp)
+ << OpTy << Op->getSourceRange();
+ else if (!(LO.C99 && IsAfterAmp) && !S.isUnevaluatedContext())
+ S.Diag(OpLoc, diag::ext_typecheck_indirection_through_void_pointer)
+ << OpTy << Op->getSourceRange();
+ }
// Dereferences are usually l-values...
VK = VK_LValue;
@@ -13896,6 +15373,40 @@ static inline UnaryOperatorKind ConvertTokenKindToUnaryOpcode(
return Opc;
}
+const FieldDecl *
+Sema::getSelfAssignmentClassMemberCandidate(const ValueDecl *SelfAssigned) {
+ // Explore the case for adding 'this->' to the LHS of a self assignment, very
+ // common for setters.
+ // struct A {
+ // int X;
+ // -void setX(int X) { X = X; }
+ // +void setX(int X) { this->X = X; }
+ // };
+
+ // Only consider parameters for self assignment fixes.
+ if (!isa<ParmVarDecl>(SelfAssigned))
+ return nullptr;
+ const auto *Method =
+ dyn_cast_or_null<CXXMethodDecl>(getCurFunctionDecl(true));
+ if (!Method)
+ return nullptr;
+
+ const CXXRecordDecl *Parent = Method->getParent();
+ // In theory this is fixable if the lambda explicitly captures this, but
+ // that's added complexity that's rarely going to be used.
+ if (Parent->isLambda())
+ return nullptr;
+
+ // FIXME: Use an actual Lookup operation instead of just traversing fields
+ // in order to get base class fields.
+ auto Field =
+ llvm::find_if(Parent->fields(),
+ [Name(SelfAssigned->getDeclName())](const FieldDecl *F) {
+ return F->getDeclName() == Name;
+ });
+ return (Field != Parent->field_end()) ? *Field : nullptr;
+}
+
/// DiagnoseSelfAssignment - Emits a warning if a value is assigned to itself.
/// This warning suppressed in the event of macro expansions.
static void DiagnoseSelfAssignment(Sema &S, Expr *LHSExpr, Expr *RHSExpr,
@@ -13926,10 +15437,16 @@ static void DiagnoseSelfAssignment(Sema &S, Expr *LHSExpr, Expr *RHSExpr,
if (RefTy->getPointeeType().isVolatileQualified())
return;
- S.Diag(OpLoc, IsBuiltin ? diag::warn_self_assignment_builtin
- : diag::warn_self_assignment_overloaded)
- << LHSDeclRef->getType() << LHSExpr->getSourceRange()
- << RHSExpr->getSourceRange();
+ auto Diag = S.Diag(OpLoc, IsBuiltin ? diag::warn_self_assignment_builtin
+ : diag::warn_self_assignment_overloaded)
+ << LHSDeclRef->getType() << LHSExpr->getSourceRange()
+ << RHSExpr->getSourceRange();
+ if (const FieldDecl *SelfAssignField =
+ S.getSelfAssignmentClassMemberCandidate(RHSDecl))
+ Diag << 1 << SelfAssignField
+ << FixItHint::CreateInsertion(LHSDeclRef->getBeginLoc(), "this->");
+ else
+ Diag << 0;
}
/// Check if a bitwise-& is performed on an Objective-C pointer. This
@@ -13966,7 +15483,7 @@ static void checkObjCPointerIntrospection(Sema &S, ExprResult &L, ExprResult &R,
if (const ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(Ex)) {
Selector S = ME->getSelector();
StringRef SelArg0 = S.getNameForSlot(0);
- if (SelArg0.startswith("performSelector"))
+ if (SelArg0.starts_with("performSelector"))
Diag = diag::warn_objc_pointer_masking_performSelector;
}
@@ -14060,7 +15577,7 @@ static bool needsConversionOfHalfVec(bool OpRequiresConversion, ASTContext &Ctx,
// the vectors shouldn't be treated as storage-only types. See the
// discussion here: https://reviews.llvm.org/rG825235c140e7
if (const VectorType *VT = Ty->getAs<VectorType>()) {
- if (VT->getVectorKind() == VectorType::NeonVector)
+ if (VT->getVectorKind() == VectorKind::Neon)
return false;
return VT->getElementType().getCanonicalType() == Ctx.HalfTy;
}
@@ -14132,9 +15649,12 @@ ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
}
}
+ checkTypeSupport(LHSExpr->getType(), OpLoc, /*ValueDecl*/ nullptr);
+ checkTypeSupport(RHSExpr->getType(), OpLoc, /*ValueDecl*/ nullptr);
+
switch (Opc) {
case BO_Assign:
- ResultTy = CheckAssignmentOperands(LHS.get(), RHS, OpLoc, QualType());
+ ResultTy = CheckAssignmentOperands(LHS.get(), RHS, OpLoc, QualType(), Opc);
if (getLangOpts().CPlusPlus &&
LHS.get()->getObjectKind() != OK_ObjCProperty) {
VK = LHS.get()->getValueKind();
@@ -14214,7 +15734,7 @@ ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
break;
case BO_And:
checkObjCPointerIntrospection(*this, LHS, RHS, OpLoc);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case BO_Xor:
case BO_Or:
ResultTy = CheckBitwiseOperands(LHS, RHS, OpLoc, Opc);
@@ -14231,42 +15751,48 @@ ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
Opc == BO_DivAssign);
CompLHSTy = CompResultTy;
if (!CompResultTy.isNull() && !LHS.isInvalid() && !RHS.isInvalid())
- ResultTy = CheckAssignmentOperands(LHS.get(), RHS, OpLoc, CompResultTy);
+ ResultTy =
+ CheckAssignmentOperands(LHS.get(), RHS, OpLoc, CompResultTy, Opc);
break;
case BO_RemAssign:
CompResultTy = CheckRemainderOperands(LHS, RHS, OpLoc, true);
CompLHSTy = CompResultTy;
if (!CompResultTy.isNull() && !LHS.isInvalid() && !RHS.isInvalid())
- ResultTy = CheckAssignmentOperands(LHS.get(), RHS, OpLoc, CompResultTy);
+ ResultTy =
+ CheckAssignmentOperands(LHS.get(), RHS, OpLoc, CompResultTy, Opc);
break;
case BO_AddAssign:
ConvertHalfVec = true;
CompResultTy = CheckAdditionOperands(LHS, RHS, OpLoc, Opc, &CompLHSTy);
if (!CompResultTy.isNull() && !LHS.isInvalid() && !RHS.isInvalid())
- ResultTy = CheckAssignmentOperands(LHS.get(), RHS, OpLoc, CompResultTy);
+ ResultTy =
+ CheckAssignmentOperands(LHS.get(), RHS, OpLoc, CompResultTy, Opc);
break;
case BO_SubAssign:
ConvertHalfVec = true;
CompResultTy = CheckSubtractionOperands(LHS, RHS, OpLoc, &CompLHSTy);
if (!CompResultTy.isNull() && !LHS.isInvalid() && !RHS.isInvalid())
- ResultTy = CheckAssignmentOperands(LHS.get(), RHS, OpLoc, CompResultTy);
+ ResultTy =
+ CheckAssignmentOperands(LHS.get(), RHS, OpLoc, CompResultTy, Opc);
break;
case BO_ShlAssign:
case BO_ShrAssign:
CompResultTy = CheckShiftOperands(LHS, RHS, OpLoc, Opc, true);
CompLHSTy = CompResultTy;
if (!CompResultTy.isNull() && !LHS.isInvalid() && !RHS.isInvalid())
- ResultTy = CheckAssignmentOperands(LHS.get(), RHS, OpLoc, CompResultTy);
+ ResultTy =
+ CheckAssignmentOperands(LHS.get(), RHS, OpLoc, CompResultTy, Opc);
break;
case BO_AndAssign:
case BO_OrAssign: // fallthrough
DiagnoseSelfAssignment(*this, LHS.get(), RHS.get(), OpLoc, true);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case BO_XorAssign:
CompResultTy = CheckBitwiseOperands(LHS, RHS, OpLoc, Opc);
CompLHSTy = CompResultTy;
if (!CompResultTy.isNull() && !LHS.isInvalid() && !RHS.isInvalid())
- ResultTy = CheckAssignmentOperands(LHS.get(), RHS, OpLoc, CompResultTy);
+ ResultTy =
+ CheckAssignmentOperands(LHS.get(), RHS, OpLoc, CompResultTy, Opc);
break;
case BO_Comma:
ResultTy = CheckCommaOperands(*this, LHS, RHS, OpLoc);
@@ -14403,38 +15929,21 @@ EmitDiagnosticForLogicalAndInLogicalOr(Sema &Self, SourceLocation OpLoc,
Bop->getSourceRange());
}
-/// Returns true if the given expression can be evaluated as a constant
-/// 'true'.
-static bool EvaluatesAsTrue(Sema &S, Expr *E) {
- bool Res;
- return !E->isValueDependent() &&
- E->EvaluateAsBooleanCondition(Res, S.getASTContext()) && Res;
-}
-
-/// Returns true if the given expression can be evaluated as a constant
-/// 'false'.
-static bool EvaluatesAsFalse(Sema &S, Expr *E) {
- bool Res;
- return !E->isValueDependent() &&
- E->EvaluateAsBooleanCondition(Res, S.getASTContext()) && !Res;
-}
-
/// Look for '&&' in the left hand of a '||' expr.
static void DiagnoseLogicalAndInLogicalOrLHS(Sema &S, SourceLocation OpLoc,
Expr *LHSExpr, Expr *RHSExpr) {
if (BinaryOperator *Bop = dyn_cast<BinaryOperator>(LHSExpr)) {
if (Bop->getOpcode() == BO_LAnd) {
- // If it's "a && b || 0" don't warn since the precedence doesn't matter.
- if (EvaluatesAsFalse(S, RHSExpr))
- return;
- // If it's "1 && a || b" don't warn since the precedence doesn't matter.
- if (!EvaluatesAsTrue(S, Bop->getLHS()))
+ // If it's "string_literal && a || b" don't warn since the precedence
+ // doesn't matter.
+ if (!isa<StringLiteral>(Bop->getLHS()->IgnoreParenImpCasts()))
return EmitDiagnosticForLogicalAndInLogicalOr(S, OpLoc, Bop);
} else if (Bop->getOpcode() == BO_LOr) {
if (BinaryOperator *RBop = dyn_cast<BinaryOperator>(Bop->getRHS())) {
- // If it's "a || b && 1 || c" we didn't warn earlier for
- // "a || b && 1", but warn now.
- if (RBop->getOpcode() == BO_LAnd && EvaluatesAsTrue(S, RBop->getRHS()))
+ // If it's "a || b && string_literal || c" we didn't warn earlier for
+ // "a || b && string_literal", but warn now.
+ if (RBop->getOpcode() == BO_LAnd &&
+ isa<StringLiteral>(RBop->getRHS()->IgnoreParenImpCasts()))
return EmitDiagnosticForLogicalAndInLogicalOr(S, OpLoc, RBop);
}
}
@@ -14446,11 +15955,9 @@ static void DiagnoseLogicalAndInLogicalOrRHS(Sema &S, SourceLocation OpLoc,
Expr *LHSExpr, Expr *RHSExpr) {
if (BinaryOperator *Bop = dyn_cast<BinaryOperator>(RHSExpr)) {
if (Bop->getOpcode() == BO_LAnd) {
- // If it's "0 || a && b" don't warn since the precedence doesn't matter.
- if (EvaluatesAsFalse(S, LHSExpr))
- return;
- // If it's "a || b && 1" don't warn since the precedence doesn't matter.
- if (!EvaluatesAsTrue(S, Bop->getRHS()))
+ // If it's "a || b && string_literal" don't warn since the precedence
+ // doesn't matter.
+ if (!isa<StringLiteral>(Bop->getRHS()->IgnoreParenImpCasts()))
return EmitDiagnosticForLogicalAndInLogicalOr(S, OpLoc, Bop);
}
}
@@ -14583,13 +16090,22 @@ static ExprResult BuildOverloadedBinOp(Sema &S, Scope *Sc, SourceLocation OpLoc,
Expr *LHS, Expr *RHS) {
switch (Opc) {
case BO_Assign:
+ // In the non-overloaded case, we warn about self-assignment (x = x) for
+ // both simple assignment and certain compound assignments where algebra
+ // tells us the operation yields a constant result. When the operator is
+ // overloaded, we can't do the latter because we don't want to assume that
+ // those algebraic identities still apply; for example, a path-building
+ // library might use operator/= to append paths. But it's still reasonable
+ // to assume that simple assignment is just moving/copying values around
+ // and so self-assignment is likely a bug.
+ DiagnoseSelfAssignment(S, LHS, RHS, OpLoc, false);
+ [[fallthrough]];
case BO_DivAssign:
case BO_RemAssign:
case BO_SubAssign:
case BO_AndAssign:
case BO_OrAssign:
case BO_XorAssign:
- DiagnoseSelfAssignment(S, LHS, RHS, OpLoc, false);
CheckIdentityFieldAssignment(LHS, RHS, OpLoc, S);
break;
default:
@@ -14655,7 +16171,7 @@ ExprResult Sema::BuildBinOp(Scope *S, SourceLocation OpLoc,
pty->getKind() == BuiltinType::Overload)) {
auto *OE = dyn_cast<OverloadExpr>(LHSExpr);
if (OE && !OE->hasTemplateKeyword() && !OE->hasExplicitTemplateArgs() &&
- std::any_of(OE->decls_begin(), OE->decls_end(), [](NamedDecl *ND) {
+ llvm::any_of(OE->decls(), [](NamedDecl *ND) {
return isa<FunctionTemplateDecl>(ND);
})) {
Diag(OE->getQualifier() ? OE->getQualifierLoc().getBeginLoc()
@@ -14756,15 +16272,15 @@ static bool isOverflowingIntegerType(ASTContext &Ctx, QualType T) {
if (T.isNull() || T->isDependentType())
return false;
- if (!T->isPromotableIntegerType())
+ if (!Ctx.isPromotableIntegerType(T))
return true;
return Ctx.getIntWidth(T) >= Ctx.getIntWidth(Ctx.IntTy);
}
ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
- UnaryOperatorKind Opc,
- Expr *InputExpr) {
+ UnaryOperatorKind Opc, Expr *InputExpr,
+ bool IsAfterAmp) {
ExprResult Input = InputExpr;
ExprValueKind VK = VK_PRValue;
ExprObjectKind OK = OK_Ordinary;
@@ -14786,6 +16302,13 @@ ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
}
}
+ if (getLangOpts().HLSL && OpLoc.isValid()) {
+ if (Opc == UO_AddrOf)
+ return ExprError(Diag(OpLoc, diag::err_hlsl_operator_unsupported) << 0);
+ if (Opc == UO_Deref)
+ return ExprError(Diag(OpLoc, diag::err_hlsl_operator_unsupported) << 1);
+ }
+
switch (Opc) {
case UO_PreInc:
case UO_PreDec:
@@ -14807,7 +16330,8 @@ ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
case UO_Deref: {
Input = DefaultFunctionArrayLvalueConversion(Input.get());
if (Input.isInvalid()) return ExprError();
- resultType = CheckIndirectionOperand(*this, Input.get(), VK, OpLoc);
+ resultType =
+ CheckIndirectionOperand(*this, Input.get(), VK, OpLoc, IsAfterAmp);
break;
}
case UO_Plus:
@@ -14834,7 +16358,9 @@ ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
// The z vector extensions don't allow + or - with bool vectors.
(!Context.getLangOpts().ZVector ||
resultType->castAs<VectorType>()->getVectorKind() !=
- VectorType::AltiVecBool))
+ VectorKind::AltiVecBool))
+ break;
+ else if (resultType->isSveVLSBuiltinType()) // SVE vectors allow + and -
break;
else if (getLangOpts().CPlusPlus && // C++ [expr.unary.op]p6
Opc == UO_Plus &&
@@ -14883,6 +16409,13 @@ ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
resultType = Context.FloatTy;
}
+ // WebAsembly tables can't be used in unary expressions.
+ if (resultType->isPointerType() &&
+ resultType->getPointeeType().isWebAssemblyReferenceType()) {
+ return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr)
+ << resultType << Input.get()->getSourceRange());
+ }
+
if (resultType->isDependentType())
break;
if (resultType->isScalarType() && !isScopedEnumerationType(resultType)) {
@@ -14902,8 +16435,7 @@ ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
}
} else if (resultType->isExtVectorType()) {
if (Context.getLangOpts().OpenCL &&
- Context.getLangOpts().OpenCLVersion < 120 &&
- !Context.getLangOpts().OpenCLCPlusPlus) {
+ Context.getLangOpts().getOpenCLCompatibleVersion() < 120) {
// OpenCL v1.1 6.3.h: The logical operator not (!) does not
// operate on vector float types.
QualType T = resultType->castAs<ExtVectorType>()->getElementType();
@@ -14916,7 +16448,7 @@ ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
break;
} else if (Context.getLangOpts().CPlusPlus && resultType->isVectorType()) {
const VectorType *VTy = resultType->castAs<VectorType>();
- if (VTy->getVectorKind() != VectorType::GenericVector)
+ if (VTy->getVectorKind() != VectorKind::Generic)
return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr)
<< resultType << Input.get()->getSourceRange());
@@ -15000,7 +16532,7 @@ bool Sema::isQualifiedMemberAccess(Expr *E) {
if (isa<FieldDecl>(VD) || isa<IndirectFieldDecl>(VD))
return true;
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(VD))
- return Method->isInstance();
+ return Method->isImplicitObjectMemberFunction();
return false;
}
@@ -15011,7 +16543,7 @@ bool Sema::isQualifiedMemberAccess(Expr *E) {
for (NamedDecl *D : ULE->decls()) {
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) {
- if (Method->isInstance())
+ if (Method->isImplicitObjectMemberFunction())
return true;
} else {
// Overload set does not contain methods.
@@ -15026,7 +16558,8 @@ bool Sema::isQualifiedMemberAccess(Expr *E) {
}
ExprResult Sema::BuildUnaryOp(Scope *S, SourceLocation OpLoc,
- UnaryOperatorKind Opc, Expr *Input) {
+ UnaryOperatorKind Opc, Expr *Input,
+ bool IsAfterAmp) {
// First things first: handle placeholders so that the
// overloaded-operator check considers the right type.
if (const BuiltinType *pty = Input->getType()->getAsPlaceholderType()) {
@@ -15065,13 +16598,14 @@ ExprResult Sema::BuildUnaryOp(Scope *S, SourceLocation OpLoc,
return CreateOverloadedUnaryOp(OpLoc, Opc, Functions, Input);
}
- return CreateBuiltinUnaryOp(OpLoc, Opc, Input);
+ return CreateBuiltinUnaryOp(OpLoc, Opc, Input, IsAfterAmp);
}
// Unary Operators. 'Tok' is the token for the operator.
-ExprResult Sema::ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
- tok::TokenKind Op, Expr *Input) {
- return BuildUnaryOp(S, OpLoc, ConvertTokenKindToUnaryOpcode(Op), Input);
+ExprResult Sema::ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op,
+ Expr *Input, bool IsAfterAmp) {
+ return BuildUnaryOp(S, OpLoc, ConvertTokenKindToUnaryOpcode(Op), Input,
+ IsAfterAmp);
}
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
@@ -15079,12 +16613,19 @@ ExprResult Sema::ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl) {
TheDecl->markUsed(Context);
// Create the AST node. The address of a label always has type 'void*'.
- return new (Context) AddrLabelExpr(OpLoc, LabLoc, TheDecl,
- Context.getPointerType(Context.VoidTy));
+ auto *Res = new (Context) AddrLabelExpr(
+ OpLoc, LabLoc, TheDecl, Context.getPointerType(Context.VoidTy));
+
+ if (getCurFunction())
+ getCurFunction()->AddrLabels.push_back(Res);
+
+ return Res;
}
void Sema::ActOnStartStmtExpr() {
PushExpressionEvaluationContext(ExprEvalContexts.back().Context);
+ // Make sure we diagnose jumping into a statement expression.
+ setFunctionHasBranchProtectedScope();
}
void Sema::ActOnStmtExprError() {
@@ -15261,12 +16802,11 @@ ExprResult Sema::BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
LangOpts.CPlusPlus11? diag::ext_offsetof_non_standardlayout_type
: diag::ext_offsetof_non_pod_type;
- if (!IsSafe && !DidWarnAboutNonPOD &&
- DiagRuntimeBehavior(BuiltinLoc, nullptr,
- PDiag(DiagID)
- << SourceRange(Components[0].LocStart, OC.LocEnd)
- << CurrentType))
+ if (!IsSafe && !DidWarnAboutNonPOD && !isUnevaluatedContext()) {
+ Diag(BuiltinLoc, DiagID)
+ << SourceRange(Components[0].LocStart, OC.LocEnd) << CurrentType;
DidWarnAboutNonPOD = true;
+ }
}
// Look for the field.
@@ -15279,10 +16819,15 @@ ExprResult Sema::BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
MemberDecl = IndirectMemberDecl->getAnonField();
}
- if (!MemberDecl)
- return ExprError(Diag(BuiltinLoc, diag::err_no_member)
- << OC.U.IdentInfo << RD << SourceRange(OC.LocStart,
- OC.LocEnd));
+ if (!MemberDecl) {
+ // Lookup could be ambiguous when looking up a placeholder variable
+ // __builtin_offsetof(S, _).
+ // In that case we would already have emitted a diagnostic
+ if (!R.isAmbiguous())
+ Diag(BuiltinLoc, diag::err_no_member)
+ << OC.U.IdentInfo << RD << SourceRange(OC.LocStart, OC.LocEnd);
+ return ExprError();
+ }
// C99 7.17p3:
// (If the specified member is a bit-field, the behavior is undefined.)
@@ -15427,7 +16972,7 @@ void Sema::ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
assert(ParamInfo.getContext() == DeclaratorContext::BlockLiteral);
BlockScopeInfo *CurBlock = getCurBlock();
- TypeSourceInfo *Sig = GetTypeForDeclarator(ParamInfo, CurScope);
+ TypeSourceInfo *Sig = GetTypeForDeclarator(ParamInfo);
QualType T = Sig->getType();
// FIXME: We should allow unexpanded parameter packs here, but that would,
@@ -15437,7 +16982,7 @@ void Sema::ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
FunctionProtoType::ExtProtoInfo EPI;
EPI.HasTrailingReturn = false;
EPI.TypeQuals.addConst();
- T = Context.getFunctionType(Context.DependentTy, None, EPI);
+ T = Context.getFunctionType(Context.DependentTy, std::nullopt, EPI);
Sig = Context.getTrivialTypeSourceInfo(T);
}
@@ -15497,8 +17042,8 @@ void Sema::ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
if (Param->getIdentifier() == nullptr && !Param->isImplicit() &&
!Param->isInvalidDecl() && !getLangOpts().CPlusPlus) {
// Diagnose this as an extension in C17 and earlier.
- if (!getLangOpts().C2x)
- Diag(Param->getLocation(), diag::ext_parameter_name_omitted_c2x);
+ if (!getLangOpts().C23)
+ Diag(Param->getLocation(), diag::ext_parameter_name_omitted_c23);
}
Params.push_back(Param);
}
@@ -15524,7 +17069,7 @@ void Sema::ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
ProcessDeclAttributes(CurScope, CurBlock->TheDecl, ParamInfo);
// Put the parameter variables in scope.
- for (auto AI : CurBlock->TheDecl->parameters()) {
+ for (auto *AI : CurBlock->TheDecl->parameters()) {
AI->setOwningFunction(CurBlock->TheDecl);
// If this has an identifier, add it to the scope stack.
@@ -15533,6 +17078,9 @@ void Sema::ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
PushOnScopeChains(AI, CurBlock->TheScope);
}
+
+ if (AI->isInvalidDecl())
+ CurBlock->TheDecl->setInvalidDecl();
}
}
@@ -15587,10 +17135,10 @@ ExprResult Sema::ActOnBlockStmtExpr(SourceLocation CaretLoc,
if (isa<FunctionNoProtoType>(FTy)) {
FunctionProtoType::ExtProtoInfo EPI;
EPI.ExtInfo = Ext;
- BlockTy = Context.getFunctionType(RetTy, None, EPI);
+ BlockTy = Context.getFunctionType(RetTy, std::nullopt, EPI);
- // Otherwise, if we don't need to change anything about the function type,
- // preserve its sugar structure.
+ // Otherwise, if we don't need to change anything about the function type,
+ // preserve its sugar structure.
} else if (FTy->getReturnType() == RetTy &&
(!NoReturn || FTy->getNoReturnAttr())) {
BlockTy = BSI->FunctionType;
@@ -15608,7 +17156,7 @@ ExprResult Sema::ActOnBlockStmtExpr(SourceLocation CaretLoc,
} else {
FunctionProtoType::ExtProtoInfo EPI;
EPI.ExtInfo = FunctionType::ExtInfo().withNoReturn(NoReturn);
- BlockTy = Context.getFunctionType(RetTy, None, EPI);
+ BlockTy = Context.getFunctionType(RetTy, std::nullopt, EPI);
}
DiagnoseUnusedParameters(BD->parameters());
@@ -15643,8 +17191,9 @@ ExprResult Sema::ActOnBlockStmtExpr(SourceLocation CaretLoc,
for (Capture &Cap : BSI->Captures) {
if (Cap.isInvalid() || Cap.isThisCapture())
continue;
-
- VarDecl *Var = Cap.getVariable();
+ // Cap.getVariable() is always a VarDecl because
+ // blocks cannot capture structured bindings or other ValueDecl kinds.
+ auto *Var = cast<VarDecl>(Cap.getVariable());
Expr *CopyExpr = nullptr;
if (getLangOpts().CPlusPlus && Cap.isCopyCapture()) {
if (const RecordType *Record =
@@ -15732,6 +17281,9 @@ ExprResult Sema::ActOnBlockStmtExpr(SourceLocation CaretLoc,
if (getCurFunction())
getCurFunction()->addBlock(BD);
+ if (BD->isInvalidDecl())
+ return CreateRecoveryExpr(Result->getBeginLoc(), Result->getEndLoc(),
+ {Result}, Result->getType());
return Result;
}
@@ -15758,7 +17310,7 @@ ExprResult Sema::BuildVAArgExpr(SourceLocation BuiltinLoc,
}
// NVPTX does not support va_arg expression.
- if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice &&
+ if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice &&
Context.getTargetInfo().getTriple().isNVPTX())
targetDiag(E->getBeginLoc(), diag::err_va_arg_in_device);
@@ -15837,20 +17389,23 @@ ExprResult Sema::BuildVAArgExpr(SourceLocation BuiltinLoc,
// Check for va_arg where arguments of the given type will be promoted
// (i.e. this va_arg is guaranteed to have undefined behavior).
QualType PromoteType;
- if (TInfo->getType()->isPromotableIntegerType()) {
+ if (Context.isPromotableIntegerType(TInfo->getType())) {
PromoteType = Context.getPromotedIntegerType(TInfo->getType());
// [cstdarg.syn]p1 defers the C++ behavior to what the C standard says,
- // and C2x 7.16.1.1p2 says, in part:
+ // and C23 7.16.1.1p2 says, in part:
// If type is not compatible with the type of the actual next argument
// (as promoted according to the default argument promotions), the
// behavior is undefined, except for the following cases:
// - both types are pointers to qualified or unqualified versions of
// compatible types;
- // - one type is a signed integer type, the other type is the
- // corresponding unsigned integer type, and the value is
- // representable in both types;
+ // - one type is compatible with a signed integer type, the other
+ // type is compatible with the corresponding unsigned integer type,
+ // and the value is representable in both types;
// - one type is pointer to qualified or unqualified void and the
- // other is a pointer to a qualified or unqualified character type.
+ // other is a pointer to a qualified or unqualified character type;
+ // - or, the type of the next argument is nullptr_t and type is a
+ // pointer type that has the same representation and alignment
+ // requirements as a pointer to a character type.
// Given that type compatibility is the primary requirement (ignoring
// qualifications), you would think we could call typesAreCompatible()
// directly to test this. However, in C++, that checks for *same type*,
@@ -15868,7 +17423,7 @@ ExprResult Sema::BuildVAArgExpr(SourceLocation BuiltinLoc,
// promoted type and the underlying type are the same except for
// signedness. Ask the AST for the correctly corresponding type and see
// if that's compatible.
- if (!PromoteType.isNull() &&
+ if (!PromoteType.isNull() && !UnderlyingType->isBooleanType() &&
PromoteType->isUnsignedIntegerType() !=
UnderlyingType->isUnsignedIntegerType()) {
UnderlyingType =
@@ -15898,7 +17453,7 @@ ExprResult Sema::ActOnGNUNullExpr(SourceLocation TokenLoc) {
// The type of __null will be int or long, depending on the size of
// pointers on the target.
QualType Ty;
- unsigned pw = Context.getTargetInfo().getPointerWidth(0);
+ unsigned pw = Context.getTargetInfo().getPointerWidth(LangAS::Default);
if (pw == Context.getTargetInfo().getIntWidth())
Ty = Context.IntTy;
else if (pw == Context.getTargetInfo().getLongWidth())
@@ -15912,18 +17467,112 @@ ExprResult Sema::ActOnGNUNullExpr(SourceLocation TokenLoc) {
return new (Context) GNUNullExpr(Ty, TokenLoc);
}
-ExprResult Sema::ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind,
+static CXXRecordDecl *LookupStdSourceLocationImpl(Sema &S, SourceLocation Loc) {
+ CXXRecordDecl *ImplDecl = nullptr;
+
+ // Fetch the std::source_location::__impl decl.
+ if (NamespaceDecl *Std = S.getStdNamespace()) {
+ LookupResult ResultSL(S, &S.PP.getIdentifierTable().get("source_location"),
+ Loc, Sema::LookupOrdinaryName);
+ if (S.LookupQualifiedName(ResultSL, Std)) {
+ if (auto *SLDecl = ResultSL.getAsSingle<RecordDecl>()) {
+ LookupResult ResultImpl(S, &S.PP.getIdentifierTable().get("__impl"),
+ Loc, Sema::LookupOrdinaryName);
+ if ((SLDecl->isCompleteDefinition() || SLDecl->isBeingDefined()) &&
+ S.LookupQualifiedName(ResultImpl, SLDecl)) {
+ ImplDecl = ResultImpl.getAsSingle<CXXRecordDecl>();
+ }
+ }
+ }
+ }
+
+ if (!ImplDecl || !ImplDecl->isCompleteDefinition()) {
+ S.Diag(Loc, diag::err_std_source_location_impl_not_found);
+ return nullptr;
+ }
+
+ // Verify that __impl is a trivial struct type, with no base classes, and with
+ // only the four expected fields.
+ if (ImplDecl->isUnion() || !ImplDecl->isStandardLayout() ||
+ ImplDecl->getNumBases() != 0) {
+ S.Diag(Loc, diag::err_std_source_location_impl_malformed);
+ return nullptr;
+ }
+
+ unsigned Count = 0;
+ for (FieldDecl *F : ImplDecl->fields()) {
+ StringRef Name = F->getName();
+
+ if (Name == "_M_file_name") {
+ if (F->getType() !=
+ S.Context.getPointerType(S.Context.CharTy.withConst()))
+ break;
+ Count++;
+ } else if (Name == "_M_function_name") {
+ if (F->getType() !=
+ S.Context.getPointerType(S.Context.CharTy.withConst()))
+ break;
+ Count++;
+ } else if (Name == "_M_line") {
+ if (!F->getType()->isIntegerType())
+ break;
+ Count++;
+ } else if (Name == "_M_column") {
+ if (!F->getType()->isIntegerType())
+ break;
+ Count++;
+ } else {
+ Count = 100; // invalid
+ break;
+ }
+ }
+ if (Count != 4) {
+ S.Diag(Loc, diag::err_std_source_location_impl_malformed);
+ return nullptr;
+ }
+
+ return ImplDecl;
+}
+
+ExprResult Sema::ActOnSourceLocExpr(SourceLocIdentKind Kind,
SourceLocation BuiltinLoc,
SourceLocation RPLoc) {
- return BuildSourceLocExpr(Kind, BuiltinLoc, RPLoc, CurContext);
+ QualType ResultTy;
+ switch (Kind) {
+ case SourceLocIdentKind::File:
+ case SourceLocIdentKind::FileName:
+ case SourceLocIdentKind::Function:
+ case SourceLocIdentKind::FuncSig: {
+ QualType ArrTy = Context.getStringLiteralArrayType(Context.CharTy, 0);
+ ResultTy =
+ Context.getPointerType(ArrTy->getAsArrayTypeUnsafe()->getElementType());
+ break;
+ }
+ case SourceLocIdentKind::Line:
+ case SourceLocIdentKind::Column:
+ ResultTy = Context.UnsignedIntTy;
+ break;
+ case SourceLocIdentKind::SourceLocStruct:
+ if (!StdSourceLocationImplDecl) {
+ StdSourceLocationImplDecl =
+ LookupStdSourceLocationImpl(*this, BuiltinLoc);
+ if (!StdSourceLocationImplDecl)
+ return ExprError();
+ }
+ ResultTy = Context.getPointerType(
+ Context.getRecordType(StdSourceLocationImplDecl).withConst());
+ break;
+ }
+
+ return BuildSourceLocExpr(Kind, ResultTy, BuiltinLoc, RPLoc, CurContext);
}
-ExprResult Sema::BuildSourceLocExpr(SourceLocExpr::IdentKind Kind,
+ExprResult Sema::BuildSourceLocExpr(SourceLocIdentKind Kind, QualType ResultTy,
SourceLocation BuiltinLoc,
SourceLocation RPLoc,
DeclContext *ParentContext) {
return new (Context)
- SourceLocExpr(Context, Kind, BuiltinLoc, RPLoc, ParentContext);
+ SourceLocExpr(Context, Kind, ResultTy, BuiltinLoc, RPLoc, ParentContext);
}
bool Sema::CheckConversionToObjCLiteral(QualType DstType, Expr *&Exp,
@@ -15948,7 +17597,7 @@ bool Sema::CheckConversionToObjCLiteral(QualType DstType, Expr *&Exp,
if (!PT->isObjCIdType() &&
!(ID && ID->getIdentifier()->isStr("NSString")))
return false;
- if (!SL->isAscii())
+ if (!SL->isOrdinary())
return false;
if (Diagnose) {
@@ -16043,6 +17692,12 @@ bool Sema::DiagnoseAssignmentResult(AssignConvertType ConvTy,
ConvHints.tryToFixConversion(SrcExpr, SrcType, DstType, *this);
MayHaveConvFixit = true;
break;
+ case IncompatibleFunctionPointerStrict:
+ DiagKind =
+ diag::warn_typecheck_convert_incompatible_function_pointer_strict;
+ ConvHints.tryToFixConversion(SrcExpr, SrcType, DstType, *this);
+ MayHaveConvFixit = true;
+ break;
case IncompatibleFunctionPointer:
if (getLangOpts().CPlusPlus) {
DiagKind = diag::err_typecheck_convert_incompatible_function_pointer;
@@ -16229,10 +17884,12 @@ bool Sema::DiagnoseAssignmentResult(AssignConvertType ConvTy,
}
PartialDiagnostic FDiag = PDiag(DiagKind);
+ AssignmentAction ActionForDiag = Action;
if (Action == AA_Passing_CFAudited)
- FDiag << FirstType << SecondType << AA_Passing << SrcExpr->getSourceRange();
- else
- FDiag << FirstType << SecondType << Action << SrcExpr->getSourceRange();
+ ActionForDiag = AA_Passing;
+
+ FDiag << FirstType << SecondType << ActionForDiag
+ << SrcExpr->getSourceRange();
if (DiagKind == diag::ext_typecheck_convert_incompatible_pointer_sign ||
DiagKind == diag::err_typecheck_convert_incompatible_pointer_sign) {
@@ -16524,12 +18181,41 @@ ExprResult Sema::TransformToPotentiallyEvaluated(Expr *E) {
return TransformToPE(*this).TransformExpr(E);
}
+TypeSourceInfo *Sema::TransformToPotentiallyEvaluated(TypeSourceInfo *TInfo) {
+ assert(isUnevaluatedContext() &&
+ "Should only transform unevaluated expressions");
+ ExprEvalContexts.back().Context =
+ ExprEvalContexts[ExprEvalContexts.size() - 2].Context;
+ if (isUnevaluatedContext())
+ return TInfo;
+ return TransformToPE(*this).TransformType(TInfo);
+}
+
void
Sema::PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl,
ExpressionEvaluationContextRecord::ExpressionKind ExprContext) {
ExprEvalContexts.emplace_back(NewContext, ExprCleanupObjects.size(), Cleanup,
LambdaContextDecl, ExprContext);
+
+ // Discarded statements and immediate contexts nested in other
+ // discarded statements or immediate context are themselves
+ // a discarded statement or an immediate context, respectively.
+ ExprEvalContexts.back().InDiscardedStatement =
+ ExprEvalContexts[ExprEvalContexts.size() - 2]
+ .isDiscardedStatementContext();
+
+ // C++23 [expr.const]/p15
+ // An expression or conversion is in an immediate function context if [...]
+ // it is a subexpression of a manifestly constant-evaluated expression or
+ // conversion.
+ const auto &Prev = ExprEvalContexts[ExprEvalContexts.size() - 2];
+ ExprEvalContexts.back().InImmediateFunctionContext =
+ Prev.isImmediateFunctionContext() || Prev.isConstantEvaluated();
+
+ ExprEvalContexts.back().InImmediateEscalatingFunctionContext =
+ Prev.InImmediateEscalatingFunctionContext;
+
Cleanup.reset();
if (!MaybeODRUseExprs.empty())
std::swap(MaybeODRUseExprs, ExprEvalContexts.back().SavedMaybeODRUseExprs);
@@ -16602,15 +18288,37 @@ void Sema::CheckUnusedVolatileAssignment(Expr *E) {
if (auto *BO = dyn_cast<BinaryOperator>(E->IgnoreParenImpCasts())) {
if (BO->getOpcode() == BO_Assign) {
auto &LHSs = ExprEvalContexts.back().VolatileAssignmentLHSs;
- LHSs.erase(std::remove(LHSs.begin(), LHSs.end(), BO->getLHS()),
- LHSs.end());
+ llvm::erase(LHSs, BO->getLHS());
}
}
}
+void Sema::MarkExpressionAsImmediateEscalating(Expr *E) {
+ assert(getLangOpts().CPlusPlus20 &&
+ ExprEvalContexts.back().InImmediateEscalatingFunctionContext &&
+ "Cannot mark an immediate escalating expression outside of an "
+ "immediate escalating context");
+ if (auto *Call = dyn_cast<CallExpr>(E->IgnoreImplicit());
+ Call && Call->getCallee()) {
+ if (auto *DeclRef =
+ dyn_cast<DeclRefExpr>(Call->getCallee()->IgnoreImplicit()))
+ DeclRef->setIsImmediateEscalating(true);
+ } else if (auto *Ctr = dyn_cast<CXXConstructExpr>(E->IgnoreImplicit())) {
+ Ctr->setIsImmediateEscalating(true);
+ } else if (auto *DeclRef = dyn_cast<DeclRefExpr>(E->IgnoreImplicit())) {
+ DeclRef->setIsImmediateEscalating(true);
+ } else {
+ assert(false && "expected an immediately escalating expression");
+ }
+ if (FunctionScopeInfo *FI = getCurFunction())
+ FI->FoundImmediateEscalatingExpression = true;
+}
+
ExprResult Sema::CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl) {
- if (!E.isUsable() || !Decl || !Decl->isConsteval() || isConstantEvaluated() ||
- RebuildingImmediateInvocation)
+ if (isUnevaluatedContext() || !E.isUsable() || !Decl ||
+ !Decl->isImmediateFunction() || isAlwaysConstantEvaluatedContext() ||
+ isCheckingDefaultArgumentOrInitializer() ||
+ RebuildingImmediateInvocation || isImmediateFunctionContext())
return E;
/// Opportunistically remove the callee from ReferencesToConsteval if we can.
@@ -16622,14 +18330,63 @@ ExprResult Sema::CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl) {
dyn_cast<DeclRefExpr>(Call->getCallee()->IgnoreImplicit()))
ExprEvalContexts.back().ReferenceToConsteval.erase(DeclRef);
- E = MaybeCreateExprWithCleanups(E);
+ // C++23 [expr.const]/p16
+ // An expression or conversion is immediate-escalating if it is not initially
+ // in an immediate function context and it is [...] an immediate invocation
+ // that is not a constant expression and is not a subexpression of an
+ // immediate invocation.
+ APValue Cached;
+ auto CheckConstantExpressionAndKeepResult = [&]() {
+ llvm::SmallVector<PartialDiagnosticAt, 8> Notes;
+ Expr::EvalResult Eval;
+ Eval.Diag = &Notes;
+ bool Res = E.get()->EvaluateAsConstantExpr(
+ Eval, getASTContext(), ConstantExprKind::ImmediateInvocation);
+ if (Res && Notes.empty()) {
+ Cached = std::move(Eval.Val);
+ return true;
+ }
+ return false;
+ };
+
+ if (!E.get()->isValueDependent() &&
+ ExprEvalContexts.back().InImmediateEscalatingFunctionContext &&
+ !CheckConstantExpressionAndKeepResult()) {
+ MarkExpressionAsImmediateEscalating(E.get());
+ return E;
+ }
+
+ if (Cleanup.exprNeedsCleanups()) {
+ // Since an immediate invocation is a full expression itself - it requires
+ // an additional ExprWithCleanups node, but it can participate to a bigger
+ // full expression which actually requires cleanups to be run after so
+ // create ExprWithCleanups without using MaybeCreateExprWithCleanups as it
+ // may discard cleanups for outer expression too early.
+
+ // Note that ExprWithCleanups created here must always have empty cleanup
+ // objects:
+ // - compound literals do not create cleanup objects in C++ and immediate
+ // invocations are C++-only.
+ // - blocks are not allowed inside constant expressions and compiler will
+ // issue an error if they appear there.
+ //
+ // Hence, in correct code any cleanup objects created inside current
+ // evaluation context must be outside the immediate invocation.
+ E = ExprWithCleanups::Create(getASTContext(), E.get(),
+ Cleanup.cleanupsHaveSideEffects(), {});
+ }
ConstantExpr *Res = ConstantExpr::Create(
getASTContext(), E.get(),
ConstantExpr::getStorageKind(Decl->getReturnType().getTypePtr(),
getASTContext()),
/*IsImmediateInvocation*/ true);
- ExprEvalContexts.back().ImmediateInvocationCandidates.emplace_back(Res, 0);
+ if (Cached.hasValue())
+ Res->MoveIntoResult(Cached, getASTContext());
+ /// Value-dependent constant expressions should not be immediately
+ /// evaluated until they are instantiated.
+ if (!Res->isValueDependent())
+ ExprEvalContexts.back().ImmediateInvocationCandidates.emplace_back(Res, 0);
return Res;
}
@@ -16642,18 +18399,32 @@ static void EvaluateAndDiagnoseImmediateInvocation(
bool Result = CE->EvaluateAsConstantExpr(
Eval, SemaRef.getASTContext(), ConstantExprKind::ImmediateInvocation);
if (!Result || !Notes.empty()) {
+ SemaRef.FailedImmediateInvocations.insert(CE);
Expr *InnerExpr = CE->getSubExpr()->IgnoreImplicit();
if (auto *FunctionalCast = dyn_cast<CXXFunctionalCastExpr>(InnerExpr))
- InnerExpr = FunctionalCast->getSubExpr();
+ InnerExpr = FunctionalCast->getSubExpr()->IgnoreImplicit();
FunctionDecl *FD = nullptr;
if (auto *Call = dyn_cast<CallExpr>(InnerExpr))
FD = cast<FunctionDecl>(Call->getCalleeDecl());
else if (auto *Call = dyn_cast<CXXConstructExpr>(InnerExpr))
FD = Call->getConstructor();
- else
- llvm_unreachable("unhandled decl kind");
- assert(FD->isConsteval());
- SemaRef.Diag(CE->getBeginLoc(), diag::err_invalid_consteval_call) << FD;
+ else if (auto *Cast = dyn_cast<CastExpr>(InnerExpr))
+ FD = dyn_cast_or_null<FunctionDecl>(Cast->getConversionFunction());
+
+ assert(FD && FD->isImmediateFunction() &&
+ "could not find an immediate function in this expression");
+ if (FD->isInvalidDecl())
+ return;
+ SemaRef.Diag(CE->getBeginLoc(), diag::err_invalid_consteval_call)
+ << FD << FD->isConsteval();
+ if (auto Context =
+ SemaRef.InnermostDeclarationWithDelayedImmediateInvocations()) {
+ SemaRef.Diag(Context->Loc, diag::note_invalid_consteval_initializer)
+ << Context->Decl;
+ SemaRef.Diag(Context->Decl->getBeginLoc(), diag::note_declared_at);
+ }
+ if (!FD->isConsteval())
+ SemaRef.DiagnoseImmediateEscalatingReason(FD);
for (auto &Note : Notes)
SemaRef.Diag(Note.first, Note.second);
return;
@@ -16680,10 +18451,16 @@ static void RemoveNestedImmediateInvocation(
[E](Sema::ImmediateInvocationCandidate Elem) {
return Elem.getPointer() == E;
});
- assert(It != IISet.rend() &&
- "ConstantExpr marked IsImmediateInvocation should "
- "be present");
- It->setInt(1); // Mark as deleted
+ // It is possible that some subexpression of the current immediate
+ // invocation was handled from another expression evaluation context. Do
+ // not handle the current immediate invocation if some of its
+ // subexpressions failed before.
+ if (It == IISet.rend()) {
+ if (SemaRef.FailedImmediateInvocations.contains(E))
+ CurrentII->setInt(1);
+ } else {
+ It->setInt(1); // Mark as deleted
+ }
}
ExprResult TransformConstantExpr(ConstantExpr *E) {
if (!E->isImmediateInvocation())
@@ -16697,7 +18474,10 @@ static void RemoveNestedImmediateInvocation(
DRSet.erase(cast<DeclRefExpr>(E->getCallee()->IgnoreImplicit()));
return Base::TransformCXXOperatorCallExpr(E);
}
- /// Base::TransformInitializer skip ConstantExpr so we need to visit them
+ /// Base::TransformUserDefinedLiteral doesn't preserve the
+ /// UserDefinedLiteral node.
+ ExprResult TransformUserDefinedLiteral(UserDefinedLiteral *E) { return E; }
+ /// Base::TransformInitializer skips ConstantExpr so we need to visit them
/// here.
ExprResult TransformInitializer(Expr *Init, bool NotCopyInit) {
if (!Init)
@@ -16713,6 +18493,11 @@ static void RemoveNestedImmediateInvocation(
DRSet.erase(E);
return E;
}
+ ExprResult TransformLambdaExpr(LambdaExpr *E) {
+ // Do not rebuild lambdas to avoid creating a new type.
+ // Lambdas have already been processed inside their eval context.
+ return E;
+ }
bool AlwaysRebuild() { return false; }
bool ReplacingOriginal() { return true; }
bool AllowSkippingCXXConstructExpr() {
@@ -16734,9 +18519,13 @@ static void RemoveNestedImmediateInvocation(
Transformer.AllowSkippingFirstCXXConstructExpr = false;
ExprResult Res = Transformer.TransformExpr(It->getPointer()->getSubExpr());
- assert(Res.isUsable());
- Res = SemaRef.MaybeCreateExprWithCleanups(Res);
- It->getPointer()->setSubExpr(Res.get());
+ // The result may not be usable in case of previous compilation errors.
+ // In this case evaluation of the expression may result in crash so just
+ // don't do anything further with the result.
+ if (Res.isUsable()) {
+ Res = SemaRef.MaybeCreateExprWithCleanups(Res);
+ It->getPointer()->setSubExpr(Res.get());
+ }
}
static void
@@ -16747,14 +18536,17 @@ HandleImmediateInvocations(Sema &SemaRef,
SemaRef.RebuildingImmediateInvocation)
return;
- /// When we have more then 1 ImmediateInvocationCandidates we need to check
- /// for nested ImmediateInvocationCandidates. when we have only 1 we only
- /// need to remove ReferenceToConsteval in the immediate invocation.
- if (Rec.ImmediateInvocationCandidates.size() > 1) {
+ /// When we have more than 1 ImmediateInvocationCandidates or previously
+ /// failed immediate invocations, we need to check for nested
+ /// ImmediateInvocationCandidates in order to avoid duplicate diagnostics.
+ /// Otherwise we only need to remove ReferenceToConsteval in the immediate
+ /// invocation.
+ if (Rec.ImmediateInvocationCandidates.size() > 1 ||
+ !SemaRef.FailedImmediateInvocations.empty()) {
/// Prevent sema calls during the tree transform from adding pointers that
/// are already in the sets.
- llvm::SaveAndRestore<bool> DisableIITracking(
+ llvm::SaveAndRestore DisableIITracking(
SemaRef.RebuildingImmediateInvocation, true);
/// Prevent diagnostic during tree transfrom as they are duplicates
@@ -16780,11 +18572,49 @@ HandleImmediateInvocations(Sema &SemaRef,
for (auto CE : Rec.ImmediateInvocationCandidates)
if (!CE.getInt())
EvaluateAndDiagnoseImmediateInvocation(SemaRef, CE);
- for (auto DR : Rec.ReferenceToConsteval) {
+ for (auto *DR : Rec.ReferenceToConsteval) {
+ // If the expression is immediate escalating, it is not an error;
+ // The outer context itself becomes immediate and further errors,
+ // if any, will be handled by DiagnoseImmediateEscalatingReason.
+ if (DR->isImmediateEscalating())
+ continue;
auto *FD = cast<FunctionDecl>(DR->getDecl());
- SemaRef.Diag(DR->getBeginLoc(), diag::err_invalid_consteval_take_address)
- << FD;
- SemaRef.Diag(FD->getLocation(), diag::note_declared_at);
+ const NamedDecl *ND = FD;
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(ND);
+ MD && (MD->isLambdaStaticInvoker() || isLambdaCallOperator(MD)))
+ ND = MD->getParent();
+
+ // C++23 [expr.const]/p16
+ // An expression or conversion is immediate-escalating if it is not
+ // initially in an immediate function context and it is [...] a
+ // potentially-evaluated id-expression that denotes an immediate function
+ // that is not a subexpression of an immediate invocation.
+ bool ImmediateEscalating = false;
+ bool IsPotentiallyEvaluated =
+ Rec.Context ==
+ Sema::ExpressionEvaluationContext::PotentiallyEvaluated ||
+ Rec.Context ==
+ Sema::ExpressionEvaluationContext::PotentiallyEvaluatedIfUsed;
+ if (SemaRef.inTemplateInstantiation() && IsPotentiallyEvaluated)
+ ImmediateEscalating = Rec.InImmediateEscalatingFunctionContext;
+
+ if (!Rec.InImmediateEscalatingFunctionContext ||
+ (SemaRef.inTemplateInstantiation() && !ImmediateEscalating)) {
+ SemaRef.Diag(DR->getBeginLoc(), diag::err_invalid_consteval_take_address)
+ << ND << isa<CXXRecordDecl>(ND) << FD->isConsteval();
+ SemaRef.Diag(ND->getLocation(), diag::note_declared_at);
+ if (auto Context =
+ SemaRef.InnermostDeclarationWithDelayedImmediateInvocations()) {
+ SemaRef.Diag(Context->Loc, diag::note_invalid_consteval_initializer)
+ << Context->Decl;
+ SemaRef.Diag(Context->Decl->getBeginLoc(), diag::note_declared_at);
+ }
+ if (FD->isImmediateEscalating() && !FD->isConsteval())
+ SemaRef.DiagnoseImmediateEscalatingReason(FD);
+
+ } else {
+ SemaRef.MarkExpressionAsImmediateEscalating(DR);
+ }
}
}
@@ -16881,6 +18711,8 @@ static bool isPotentiallyConstantEvaluatedContext(Sema &SemaRef) {
// An expression or conversion is potentially constant evaluated if it is
switch (SemaRef.ExprEvalContexts.back().Context) {
case Sema::ExpressionEvaluationContext::ConstantEvaluated:
+ case Sema::ExpressionEvaluationContext::ImmediateFunctionContext:
+
// -- a manifestly constant-evaluated expression,
case Sema::ExpressionEvaluationContext::PotentiallyEvaluated:
case Sema::ExpressionEvaluationContext::PotentiallyEvaluatedIfUsed:
@@ -17003,6 +18835,7 @@ static OdrUseContext isOdrUseContext(Sema &SemaRef) {
return OdrUseContext::None;
case Sema::ExpressionEvaluationContext::ConstantEvaluated:
+ case Sema::ExpressionEvaluationContext::ImmediateFunctionContext:
case Sema::ExpressionEvaluationContext::PotentiallyEvaluated:
Result = OdrUseContext::Used;
break;
@@ -17109,14 +18942,11 @@ void Sema::MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
if (NeedDefinition &&
(Func->getTemplateSpecializationKind() != TSK_Undeclared ||
Func->getMemberSpecializationInfo()))
- checkSpecializationVisibility(Loc, Func);
+ checkSpecializationReachability(Loc, Func);
if (getLangOpts().CUDA)
CheckCUDACall(Loc, Func);
- if (getLangOpts().SYCLIsDevice)
- checkSYCLDeviceFunction(Loc, Func);
-
// If we need a definition, try to create one.
if (NeedDefinition && !Func->getBody()) {
runWithSufficientStackSpace(Loc, [&] {
@@ -17218,7 +19048,7 @@ void Sema::MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
}
} else {
// Walk redefinitions, as some of them may be instantiable.
- for (auto i : Func->redecls()) {
+ for (auto *i : Func->redecls()) {
if (!i->isUsed(false) && i->isImplicitlyInstantiable())
MarkFunctionReferenced(Loc, i, MightBeOdrUse);
}
@@ -17226,6 +19056,24 @@ void Sema::MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
});
}
+ // If a constructor was defined in the context of a default parameter
+ // or of another default member initializer (ie a PotentiallyEvaluatedIfUsed
+ // context), its initializers may not be referenced yet.
+ if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(Func)) {
+ EnterExpressionEvaluationContext EvalContext(
+ *this,
+ Constructor->isImmediateFunction()
+ ? ExpressionEvaluationContext::ImmediateFunctionContext
+ : ExpressionEvaluationContext::PotentiallyEvaluated,
+ Constructor);
+ for (CXXCtorInitializer *Init : Constructor->inits()) {
+ if (Init->isInClassMemberInitializer())
+ runWithSufficientStackSpace(Init->getSourceLocation(), [&]() {
+ MarkDeclarationsReferencedInExpr(Init->getInit());
+ });
+ }
+ }
+
// C++14 [except.spec]p17:
// An exception-specification is considered to be needed when:
// - the function is odr-used or, if it appears in an unevaluated operand,
@@ -17239,6 +19087,13 @@ void Sema::MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
if (FPT && isUnresolvedExceptionSpec(FPT->getExceptionSpecType()))
ResolveExceptionSpec(Loc, FPT);
+ // A callee could be called by a host function then by a device function.
+ // If we only try recording once, we will miss recording the use on device
+ // side. Therefore keep trying until it is recorded.
+ if (LangOpts.OffloadImplicitHostDeviceTemplates && LangOpts.CUDAIsDevice &&
+ !getASTContext().CUDAImplicitHostDeviceFunUsedByDevice.count(Func))
+ CUDARecordImplicitHostDeviceFuncUsedByDevice(Func);
+
// If this is the first "real" use, act on that.
if (OdrUse == OdrUseContext::Used && !Func->isUsed(/*CheckUsedAttr=*/false)) {
// Keep track of used but undefined functions.
@@ -17285,10 +19140,13 @@ void Sema::MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
/// - else capture it in the DeclContext that maps to the
/// *FunctionScopeIndexToStopAt on the FunctionScopeInfo stack.
static void
-MarkVarDeclODRUsed(VarDecl *Var, SourceLocation Loc, Sema &SemaRef,
+MarkVarDeclODRUsed(ValueDecl *V, SourceLocation Loc, Sema &SemaRef,
const unsigned *const FunctionScopeIndexToStopAt = nullptr) {
// Keep track of used but undefined variables.
// FIXME: We shouldn't suppress this warning for static data members.
+ VarDecl *Var = V->getPotentiallyDecomposedVarDecl();
+ assert(Var && "expected a capturable variable");
+
if (Var->hasDefinition(SemaRef.Context) == VarDecl::DeclarationOnly &&
(!Var->isExternallyVisible() || Var->isInline() ||
SemaRef.isExternalWithNoLinkageType(Var)) &&
@@ -17299,14 +19157,13 @@ MarkVarDeclODRUsed(VarDecl *Var, SourceLocation Loc, Sema &SemaRef,
}
QualType CaptureType, DeclRefType;
if (SemaRef.LangOpts.OpenMP)
- SemaRef.tryCaptureOpenMPLambdas(Var);
- SemaRef.tryCaptureVariable(Var, Loc, Sema::TryCapture_Implicit,
- /*EllipsisLoc*/ SourceLocation(),
- /*BuildAndDiagnose*/ true,
- CaptureType, DeclRefType,
- FunctionScopeIndexToStopAt);
-
- if (SemaRef.LangOpts.CUDA && Var && Var->hasGlobalStorage()) {
+ SemaRef.tryCaptureOpenMPLambdas(V);
+ SemaRef.tryCaptureVariable(V, Loc, Sema::TryCapture_Implicit,
+ /*EllipsisLoc*/ SourceLocation(),
+ /*BuildAndDiagnose*/ true, CaptureType,
+ DeclRefType, FunctionScopeIndexToStopAt);
+
+ if (SemaRef.LangOpts.CUDA && Var->hasGlobalStorage()) {
auto *FD = dyn_cast_or_null<FunctionDecl>(SemaRef.CurContext);
auto VarTarget = SemaRef.IdentifyCUDATarget(Var);
auto UserTarget = SemaRef.IdentifyCUDATarget(FD);
@@ -17316,7 +19173,7 @@ MarkVarDeclODRUsed(VarDecl *Var, SourceLocation Loc, Sema &SemaRef,
// Diagnose ODR-use of host global variables in device functions.
// Reference of device global variables in host functions is allowed
// through shadow variables therefore it is not diagnosed.
- if (SemaRef.LangOpts.CUDAIsDevice) {
+ if (SemaRef.LangOpts.CUDAIsDevice && !SemaRef.LangOpts.HIPStdPar) {
SemaRef.targetDiag(Loc, diag::err_ref_bad_target)
<< /*host*/ 2 << /*variable*/ 1 << Var << UserTarget;
SemaRef.targetDiag(Var->getLocation(),
@@ -17325,9 +19182,9 @@ MarkVarDeclODRUsed(VarDecl *Var, SourceLocation Loc, Sema &SemaRef,
: diag::note_cuda_host_var);
}
} else if (VarTarget == Sema::CVT_Device &&
+ !Var->hasAttr<CUDASharedAttr>() &&
(UserTarget == Sema::CFT_Host ||
- UserTarget == Sema::CFT_HostDevice) &&
- !Var->hasExternalStorage()) {
+ UserTarget == Sema::CFT_HostDevice)) {
// Record a CUDA/HIP device side variable if it is ODR-used
// by host code. This is done conservatively, when the variable is
// referenced in any of the following contexts:
@@ -17338,22 +19195,24 @@ MarkVarDeclODRUsed(VarDecl *Var, SourceLocation Loc, Sema &SemaRef,
// be visible in the device compilation for the compiler to be able to
// emit template variables instantiated by host code only and to
// externalize the static device side variable ODR-used by host code.
- SemaRef.getASTContext().CUDADeviceVarODRUsedByHost.insert(Var);
+ if (!Var->hasExternalStorage())
+ SemaRef.getASTContext().CUDADeviceVarODRUsedByHost.insert(Var);
+ else if (SemaRef.LangOpts.GPURelocatableDeviceCode)
+ SemaRef.getASTContext().CUDAExternalDeviceDeclODRUsedByHost.insert(Var);
}
}
- Var->markUsed(SemaRef.Context);
+ V->markUsed(SemaRef.Context);
}
-void Sema::MarkCaptureUsedInEnclosingContext(VarDecl *Capture,
+void Sema::MarkCaptureUsedInEnclosingContext(ValueDecl *Capture,
SourceLocation Loc,
unsigned CapturingScopeIndex) {
MarkVarDeclODRUsed(Capture, Loc, *this, &CapturingScopeIndex);
}
-static void
-diagnoseUncapturableValueReference(Sema &S, SourceLocation loc,
- ValueDecl *var, DeclContext *DC) {
+void diagnoseUncapturableValueReferenceOrBinding(Sema &S, SourceLocation loc,
+ ValueDecl *var) {
DeclContext *VarDC = var->getDeclContext();
// If the parameter still belongs to the translation unit, then
@@ -17393,12 +19252,12 @@ diagnoseUncapturableValueReference(Sema &S, SourceLocation loc,
// capture.
}
-
-static bool isVariableAlreadyCapturedInScopeInfo(CapturingScopeInfo *CSI, VarDecl *Var,
- bool &SubCapturesAreNested,
- QualType &CaptureType,
- QualType &DeclRefType) {
- // Check whether we've already captured it.
+static bool isVariableAlreadyCapturedInScopeInfo(CapturingScopeInfo *CSI,
+ ValueDecl *Var,
+ bool &SubCapturesAreNested,
+ QualType &CaptureType,
+ QualType &DeclRefType) {
+ // Check whether we've already captured it.
if (CSI->CaptureMap.count(Var)) {
// If we found a capture, any subcaptures are nested.
SubCapturesAreNested = true;
@@ -17414,7 +19273,8 @@ static bool isVariableAlreadyCapturedInScopeInfo(CapturingScopeInfo *CSI, VarDec
// private instances of the captured declarations.
const Capture &Cap = CSI->getCapture(Var);
if (Cap.isCopyCapture() &&
- !(isa<LambdaScopeInfo>(CSI) && cast<LambdaScopeInfo>(CSI)->Mutable) &&
+ !(isa<LambdaScopeInfo>(CSI) &&
+ !cast<LambdaScopeInfo>(CSI)->lambdaCaptureShouldBeConst()) &&
!(isa<CapturedRegionScopeInfo>(CSI) &&
cast<CapturedRegionScopeInfo>(CSI)->CapRegionKind == CR_OpenMP))
DeclRefType.addConst();
@@ -17425,14 +19285,18 @@ static bool isVariableAlreadyCapturedInScopeInfo(CapturingScopeInfo *CSI, VarDec
// Only block literals, captured statements, and lambda expressions can
// capture; other scopes don't work.
-static DeclContext *getParentOfCapturingContextOrNull(DeclContext *DC, VarDecl *Var,
- SourceLocation Loc,
- const bool Diagnose, Sema &S) {
+static DeclContext *getParentOfCapturingContextOrNull(DeclContext *DC,
+ ValueDecl *Var,
+ SourceLocation Loc,
+ const bool Diagnose,
+ Sema &S) {
if (isa<BlockDecl>(DC) || isa<CapturedDecl>(DC) || isLambdaCallOperator(DC))
return getLambdaAwareParentOfDeclContext(DC);
- else if (Var->hasLocalStorage()) {
- if (Diagnose)
- diagnoseUncapturableValueReference(S, Loc, Var, DC);
+
+ VarDecl *Underlying = Var->getPotentiallyDecomposedVarDecl();
+ if (Underlying) {
+ if (Underlying->hasLocalStorage() && Diagnose)
+ diagnoseUncapturableValueReferenceOrBinding(S, Loc, Var);
}
return nullptr;
}
@@ -17440,9 +19304,12 @@ static DeclContext *getParentOfCapturingContextOrNull(DeclContext *DC, VarDecl *
// Certain capturing entities (lambdas, blocks etc.) are not allowed to capture
// certain types of variables (unnamed, variably modified types etc.)
// so check for eligibility.
-static bool isVariableCapturable(CapturingScopeInfo *CSI, VarDecl *Var,
- SourceLocation Loc,
- const bool Diagnose, Sema &S) {
+static bool isVariableCapturable(CapturingScopeInfo *CSI, ValueDecl *Var,
+ SourceLocation Loc, const bool Diagnose,
+ Sema &S) {
+
+ assert((isa<VarDecl, BindingDecl>(Var)) &&
+ "Only variables and structured bindings can be captured");
bool IsBlock = isa<BlockScopeInfo>(CSI);
bool IsLambda = isa<LambdaScopeInfo>(CSI);
@@ -17499,17 +19366,28 @@ static bool isVariableCapturable(CapturingScopeInfo *CSI, VarDecl *Var,
return false;
}
+ if (isa<BindingDecl>(Var)) {
+ if (!IsLambda || !S.getLangOpts().CPlusPlus) {
+ if (Diagnose)
+ diagnoseUncapturableValueReferenceOrBinding(S, Loc, Var);
+ return false;
+ } else if (Diagnose && S.getLangOpts().CPlusPlus) {
+ S.Diag(Loc, S.LangOpts.CPlusPlus20
+ ? diag::warn_cxx17_compat_capture_binding
+ : diag::ext_capture_binding)
+ << Var;
+ S.Diag(Var->getLocation(), diag::note_entity_declared_at) << Var;
+ }
+ }
+
return true;
}
// Returns true if the capture by block was successful.
-static bool captureInBlock(BlockScopeInfo *BSI, VarDecl *Var,
- SourceLocation Loc,
- const bool BuildAndDiagnose,
- QualType &CaptureType,
- QualType &DeclRefType,
- const bool Nested,
- Sema &S, bool Invalid) {
+static bool captureInBlock(BlockScopeInfo *BSI, ValueDecl *Var,
+ SourceLocation Loc, const bool BuildAndDiagnose,
+ QualType &CaptureType, QualType &DeclRefType,
+ const bool Nested, Sema &S, bool Invalid) {
bool ByRef = false;
// Blocks are not allowed to capture arrays, excepting OpenCL.
@@ -17573,10 +19451,9 @@ static bool captureInBlock(BlockScopeInfo *BSI, VarDecl *Var,
return !Invalid;
}
-
/// Capture the given variable in the captured region.
static bool captureInCapturedRegion(
- CapturedRegionScopeInfo *RSI, VarDecl *Var, SourceLocation Loc,
+ CapturedRegionScopeInfo *RSI, ValueDecl *Var, SourceLocation Loc,
const bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType,
const bool RefersToCapturedVariable, Sema::TryCaptureKind Kind,
bool IsTopScope, Sema &S, bool Invalid) {
@@ -17615,16 +19492,12 @@ static bool captureInCapturedRegion(
}
/// Capture the given variable in the lambda.
-static bool captureInLambda(LambdaScopeInfo *LSI,
- VarDecl *Var,
- SourceLocation Loc,
- const bool BuildAndDiagnose,
- QualType &CaptureType,
- QualType &DeclRefType,
+static bool captureInLambda(LambdaScopeInfo *LSI, ValueDecl *Var,
+ SourceLocation Loc, const bool BuildAndDiagnose,
+ QualType &CaptureType, QualType &DeclRefType,
const bool RefersToCapturedVariable,
const Sema::TryCaptureKind Kind,
- SourceLocation EllipsisLoc,
- const bool IsTopScope,
+ SourceLocation EllipsisLoc, const bool IsTopScope,
Sema &S, bool Invalid) {
// Determine whether we are capturing by reference or by value.
bool ByRef = false;
@@ -17634,6 +19507,22 @@ static bool captureInLambda(LambdaScopeInfo *LSI,
ByRef = (LSI->ImpCaptureStyle == LambdaScopeInfo::ImpCap_LambdaByref);
}
+ BindingDecl *BD = dyn_cast<BindingDecl>(Var);
+ // FIXME: We should support capturing structured bindings in OpenMP.
+ if (!Invalid && BD && S.LangOpts.OpenMP) {
+ if (BuildAndDiagnose) {
+ S.Diag(Loc, diag::err_capture_binding_openmp) << Var;
+ S.Diag(Var->getLocation(), diag::note_entity_declared_at) << Var;
+ }
+ Invalid = true;
+ }
+
+ if (BuildAndDiagnose && S.Context.getTargetInfo().getTriple().isWasm() &&
+ CaptureType.getNonReferenceType().isWebAssemblyReferenceType()) {
+ S.Diag(Loc, diag::err_wasm_ca_reference) << 0;
+ Invalid = true;
+ }
+
// Compute the type of the field that will capture this variable.
if (ByRef) {
// C++11 [expr.prim.lambda]p15:
@@ -17702,7 +19591,8 @@ static bool captureInLambda(LambdaScopeInfo *LSI,
// declared const (9.3.1) if and only if the lambda-expression's
// parameter-declaration-clause is not followed by mutable.
DeclRefType = CaptureType.getNonReferenceType();
- if (!LSI->Mutable && !CaptureType->isReferenceType())
+ bool Const = LSI->lambdaCaptureShouldBeConst();
+ if (Const && !CaptureType->isReferenceType())
DeclRefType.addConst();
}
@@ -17714,7 +19604,8 @@ static bool captureInLambda(LambdaScopeInfo *LSI,
return !Invalid;
}
-static bool canCaptureVariableByCopy(VarDecl *Var, const ASTContext &Context) {
+static bool canCaptureVariableByCopy(ValueDecl *Var,
+ const ASTContext &Context) {
// Offer a Copy fix even if the type is dependent.
if (Var->getType()->isDependentType())
return true;
@@ -17740,7 +19631,7 @@ static bool canCaptureVariableByCopy(VarDecl *Var, const ASTContext &Context) {
/// standard, for example we can't emit a default copy capture fix-it if we
/// already explicitly copy capture capture another variable.
static void buildLambdaCaptureFixit(Sema &Sema, LambdaScopeInfo *LSI,
- VarDecl *Var) {
+ ValueDecl *Var) {
assert(LSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_None);
// Don't offer Capture by copy of default capture by copy fixes if Var is
// known not to be copy constructible.
@@ -17816,16 +19707,30 @@ static void buildLambdaCaptureFixit(Sema &Sema, LambdaScopeInfo *LSI,
}
bool Sema::tryCaptureVariable(
- VarDecl *Var, SourceLocation ExprLoc, TryCaptureKind Kind,
+ ValueDecl *Var, SourceLocation ExprLoc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType,
QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt) {
// An init-capture is notionally from the context surrounding its
// declaration, but its parent DC is the lambda class.
DeclContext *VarDC = Var->getDeclContext();
- if (Var->isInitCapture())
- VarDC = VarDC->getParent();
-
DeclContext *DC = CurContext;
+
+ // tryCaptureVariable is called every time a DeclRef is formed,
+ // it can therefore have non-negigible impact on performances.
+ // For local variables and when there is no capturing scope,
+ // we can bailout early.
+ if (CapturingFunctionScopes == 0 && (!BuildAndDiagnose || VarDC == DC))
+ return true;
+
+ const auto *VD = dyn_cast<VarDecl>(Var);
+ if (VD) {
+ if (VD->isInitCapture())
+ VarDC = VarDC->getParent();
+ } else {
+ VD = Var->getPotentiallyDecomposedVarDecl();
+ }
+ assert(VD && "Cannot capture a null variable");
+
const unsigned MaxFunctionScopesIndex = FunctionScopeIndexToStopAt
? *FunctionScopeIndexToStopAt : FunctionScopes.size() - 1;
// We need to sync up the Declaration Context with the
@@ -17838,19 +19743,16 @@ bool Sema::tryCaptureVariable(
}
}
-
- // If the variable is declared in the current context, there is no need to
- // capture it.
- if (VarDC == DC) return true;
-
// Capture global variables if it is required to use private copy of this
// variable.
- bool IsGlobal = !Var->hasLocalStorage();
+ bool IsGlobal = !VD->hasLocalStorage();
if (IsGlobal &&
!(LangOpts.OpenMP && isOpenMPCapturedDecl(Var, /*CheckScopeInfo=*/true,
MaxFunctionScopesIndex)))
return true;
- Var = Var->getCanonicalDecl();
+
+ if (isa<VarDecl>(Var))
+ Var = cast<VarDecl>(Var->getCanonicalDecl());
// Walk up the stack to determine whether we can capture the variable,
// performing the "simple" checks that don't depend on type. We stop when
@@ -17866,12 +19768,34 @@ bool Sema::tryCaptureVariable(
bool Explicit = (Kind != TryCapture_Implicit);
unsigned FunctionScopesIndex = MaxFunctionScopesIndex;
do {
+
+ LambdaScopeInfo *LSI = nullptr;
+ if (!FunctionScopes.empty())
+ LSI = dyn_cast_or_null<LambdaScopeInfo>(
+ FunctionScopes[FunctionScopesIndex]);
+
+ bool IsInScopeDeclarationContext =
+ !LSI || LSI->AfterParameterList || CurContext == LSI->CallOperator;
+
+ if (LSI && !LSI->AfterParameterList) {
+ // This allows capturing parameters from a default value which does not
+ // seems correct
+ if (isa<ParmVarDecl>(Var) && !Var->getDeclContext()->isFunctionOrMethod())
+ return true;
+ }
+ // If the variable is declared in the current context, there is no need to
+ // capture it.
+ if (IsInScopeDeclarationContext &&
+ FunctionScopesIndex == MaxFunctionScopesIndex && VarDC == DC)
+ return true;
+
// Only block literals, captured statements, and lambda expressions can
// capture; other scopes don't work.
- DeclContext *ParentDC = getParentOfCapturingContextOrNull(DC, Var,
- ExprLoc,
- BuildAndDiagnose,
- *this);
+ DeclContext *ParentDC =
+ !IsInScopeDeclarationContext
+ ? DC->getParent()
+ : getParentOfCapturingContextOrNull(DC, Var, ExprLoc,
+ BuildAndDiagnose, *this);
// We need to check for the parent *first* because, if we *have*
// private-captured a global variable, we need to recursively capture it in
// intermediate blocks, lambdas, etc.
@@ -17886,13 +19810,20 @@ bool Sema::tryCaptureVariable(
FunctionScopeInfo *FSI = FunctionScopes[FunctionScopesIndex];
CapturingScopeInfo *CSI = cast<CapturingScopeInfo>(FSI);
-
// Check whether we've already captured it.
if (isVariableAlreadyCapturedInScopeInfo(CSI, Var, Nested, CaptureType,
DeclRefType)) {
CSI->getCapture(Var).markUsed(BuildAndDiagnose);
break;
}
+
+ // When evaluating some attributes (like enable_if) we might refer to a
+ // function parameter appertaining to the same declaration as that
+ // attribute.
+ if (const auto *Parm = dyn_cast<ParmVarDecl>(Var);
+ Parm && Parm->getDeclContext() == DC)
+ return true;
+
// If we are instantiating a generic lambda call operator body,
// we do not want to capture new variables. What was captured
// during either a lambdas transformation or initial parsing
@@ -17906,7 +19837,7 @@ bool Sema::tryCaptureVariable(
Diag(LSI->Lambda->getBeginLoc(), diag::note_lambda_decl);
buildLambdaCaptureFixit(*this, LSI, Var);
} else
- diagnoseUncapturableValueReference(*this, ExprLoc, Var, DC);
+ diagnoseUncapturableValueReferenceOrBinding(*this, ExprLoc, Var);
}
return true;
}
@@ -18002,10 +19933,10 @@ bool Sema::tryCaptureVariable(
}
return true;
}
-
- FunctionScopesIndex--;
- DC = ParentDC;
Explicit = false;
+ FunctionScopesIndex--;
+ if (IsInScopeDeclarationContext)
+ DC = ParentDC;
} while (!VarDC->Equals(DC));
// Walk back down the scope stack, (e.g. from outer lambda to inner lambda)
@@ -18054,7 +19985,7 @@ bool Sema::tryCaptureVariable(
return Invalid;
}
-bool Sema::tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
+bool Sema::tryCaptureVariable(ValueDecl *Var, SourceLocation Loc,
TryCaptureKind Kind, SourceLocation EllipsisLoc) {
QualType CaptureType;
QualType DeclRefType;
@@ -18063,7 +19994,7 @@ bool Sema::tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
DeclRefType, nullptr);
}
-bool Sema::NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc) {
+bool Sema::NeedToCaptureVariable(ValueDecl *Var, SourceLocation Loc) {
QualType CaptureType;
QualType DeclRefType;
return !tryCaptureVariable(Var, Loc, TryCapture_Implicit, SourceLocation(),
@@ -18071,7 +20002,7 @@ bool Sema::NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc) {
DeclRefType, nullptr);
}
-QualType Sema::getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc) {
+QualType Sema::getCapturedDeclRefType(ValueDecl *Var, SourceLocation Loc) {
QualType CaptureType;
QualType DeclRefType;
@@ -18270,7 +20201,6 @@ static ExprResult rebuildPotentialResultsAsNonOdrUsed(Sema &S, Expr *E,
ME->getQualifierLoc(), ME->getTemplateKeywordLoc(), ME->getMemberDecl(),
ME->getFoundDecl(), ME->getMemberNameInfo(), CopiedTemplateArgs(ME),
ME->getType(), ME->getValueKind(), ME->getObjectKind(), NOUR);
- return ExprEmpty();
}
case Expr::BinaryOperatorClass: {
@@ -18358,9 +20288,15 @@ static ExprResult rebuildPotentialResultsAsNonOdrUsed(Sema &S, Expr *E,
}
}
+ void *ExOrTy = nullptr;
+ bool IsExpr = GSE->isExprPredicate();
+ if (IsExpr)
+ ExOrTy = GSE->getControllingExpr();
+ else
+ ExOrTy = GSE->getControllingType();
return AnyChanged ? S.CreateGenericSelectionExpr(
GSE->getGenericLoc(), GSE->getDefaultLoc(),
- GSE->getRParenLoc(), GSE->getControllingExpr(),
+ GSE->getRParenLoc(), IsExpr, ExOrTy,
GSE->getAssocTypeSourceInfos(), AssocExprs)
: ExprEmpty();
}
@@ -18494,6 +20430,38 @@ void Sema::CleanupVarDeclMarking() {
"MarkVarDeclODRUsed failed to cleanup MaybeODRUseExprs?");
}
+static void DoMarkPotentialCapture(Sema &SemaRef, SourceLocation Loc,
+ ValueDecl *Var, Expr *E) {
+ VarDecl *VD = Var->getPotentiallyDecomposedVarDecl();
+ if (!VD)
+ return;
+
+ const bool RefersToEnclosingScope =
+ (SemaRef.CurContext != VD->getDeclContext() &&
+ VD->getDeclContext()->isFunctionOrMethod() && VD->hasLocalStorage());
+ if (RefersToEnclosingScope) {
+ LambdaScopeInfo *const LSI =
+ SemaRef.getCurLambda(/*IgnoreNonLambdaCapturingScope=*/true);
+ if (LSI && (!LSI->CallOperator ||
+ !LSI->CallOperator->Encloses(Var->getDeclContext()))) {
+ // If a variable could potentially be odr-used, defer marking it so
+ // until we finish analyzing the full expression for any
+ // lvalue-to-rvalue
+ // or discarded value conversions that would obviate odr-use.
+ // Add it to the list of potential captures that will be analyzed
+ // later (ActOnFinishFullExpr) for eventual capture and odr-use marking
+ // unless the variable is a reference that was initialized by a constant
+ // expression (this will never need to be captured or odr-used).
+ //
+ // FIXME: We can simplify this a lot after implementing P0588R1.
+ assert(E && "Capture variable should be used in an expression.");
+ if (!Var->getType()->isReferenceType() ||
+ !VD->isUsableInConstantExpressions(SemaRef.Context))
+ LSI->addPotentialCapture(E->IgnoreParens());
+ }
+ }
+}
+
static void DoMarkVarDeclReferenced(
Sema &SemaRef, SourceLocation Loc, VarDecl *Var, Expr *E,
llvm::DenseMap<const VarDecl *, int> &RefsMinusAssignments) {
@@ -18576,14 +20544,31 @@ static void DoMarkVarDeclReferenced(
DRE->setDecl(DRE->getDecl());
else if (auto *ME = dyn_cast_or_null<MemberExpr>(E))
ME->setMemberDecl(ME->getMemberDecl());
- } else if (FirstInstantiation ||
- isa<VarTemplateSpecializationDecl>(Var)) {
+ } else if (FirstInstantiation) {
+ SemaRef.PendingInstantiations
+ .push_back(std::make_pair(Var, PointOfInstantiation));
+ } else {
+ bool Inserted = false;
+ for (auto &I : SemaRef.SavedPendingInstantiations) {
+ auto Iter = llvm::find_if(
+ I, [Var](const Sema::PendingImplicitInstantiation &P) {
+ return P.first == Var;
+ });
+ if (Iter != I.end()) {
+ SemaRef.PendingInstantiations.push_back(*Iter);
+ I.erase(Iter);
+ Inserted = true;
+ break;
+ }
+ }
+
// FIXME: For a specialization of a variable template, we don't
// distinguish between "declaration and type implicitly instantiated"
// and "implicit instantiation of definition requested", so we have
// no direct way to avoid enqueueing the pending instantiation
// multiple times.
- SemaRef.PendingInstantiations
+ if (isa<VarTemplateSpecializationDecl>(Var) && !Inserted)
+ SemaRef.PendingInstantiations
.push_back(std::make_pair(Var, PointOfInstantiation));
}
}
@@ -18617,7 +20602,10 @@ static void DoMarkVarDeclReferenced(
switch (OdrUse) {
case OdrUseContext::None:
- assert((!E || isa<FunctionParmPackExpr>(E)) &&
+ // In some cases, a variable may not have been marked unevaluated, if it
+ // appears in a defaukt initializer.
+ assert((!E || isa<FunctionParmPackExpr>(E) ||
+ SemaRef.isUnevaluatedContext()) &&
"missing non-odr-use marking for unevaluated decl ref");
break;
@@ -18640,34 +20628,31 @@ static void DoMarkVarDeclReferenced(
// odr-used, but we may still need to track them for lambda capture.
// FIXME: Do we also need to do this inside dependent typeid expressions
// (which are modeled as unevaluated at this point)?
- const bool RefersToEnclosingScope =
- (SemaRef.CurContext != Var->getDeclContext() &&
- Var->getDeclContext()->isFunctionOrMethod() && Var->hasLocalStorage());
- if (RefersToEnclosingScope) {
- LambdaScopeInfo *const LSI =
- SemaRef.getCurLambda(/*IgnoreNonLambdaCapturingScope=*/true);
- if (LSI && (!LSI->CallOperator ||
- !LSI->CallOperator->Encloses(Var->getDeclContext()))) {
- // If a variable could potentially be odr-used, defer marking it so
- // until we finish analyzing the full expression for any
- // lvalue-to-rvalue
- // or discarded value conversions that would obviate odr-use.
- // Add it to the list of potential captures that will be analyzed
- // later (ActOnFinishFullExpr) for eventual capture and odr-use marking
- // unless the variable is a reference that was initialized by a constant
- // expression (this will never need to be captured or odr-used).
- //
- // FIXME: We can simplify this a lot after implementing P0588R1.
- assert(E && "Capture variable should be used in an expression.");
- if (!Var->getType()->isReferenceType() ||
- !Var->isUsableInConstantExpressions(SemaRef.Context))
- LSI->addPotentialCapture(E->IgnoreParens());
- }
- }
+ DoMarkPotentialCapture(SemaRef, Loc, Var, E);
break;
}
}
+static void DoMarkBindingDeclReferenced(Sema &SemaRef, SourceLocation Loc,
+ BindingDecl *BD, Expr *E) {
+ BD->setReferenced();
+
+ if (BD->isInvalidDecl())
+ return;
+
+ OdrUseContext OdrUse = isOdrUseContext(SemaRef);
+ if (OdrUse == OdrUseContext::Used) {
+ QualType CaptureType, DeclRefType;
+ SemaRef.tryCaptureVariable(BD, Loc, Sema::TryCapture_Implicit,
+ /*EllipsisLoc*/ SourceLocation(),
+ /*BuildAndDiagnose*/ true, CaptureType,
+ DeclRefType,
+ /*FunctionScopeIndexToStopAt*/ nullptr);
+ } else if (OdrUse == OdrUseContext::Dependent) {
+ DoMarkPotentialCapture(SemaRef, Loc, BD, E);
+ }
+}
+
/// Mark a variable referenced, and check whether it is odr-used
/// (C++ [basic.def.odr]p2, C99 6.9p3). Note that this should not be
/// used directly for normal expressions referring to VarDecl.
@@ -18675,6 +20660,34 @@ void Sema::MarkVariableReferenced(SourceLocation Loc, VarDecl *Var) {
DoMarkVarDeclReferenced(*this, Loc, Var, nullptr, RefsMinusAssignments);
}
+// C++ [temp.dep.expr]p3:
+// An id-expression is type-dependent if it contains:
+// - an identifier associated by name lookup with an entity captured by copy
+// in a lambda-expression that has an explicit object parameter whose type
+// is dependent ([dcl.fct]),
+static void FixDependencyOfIdExpressionsInLambdaWithDependentObjectParameter(
+ Sema &SemaRef, ValueDecl *D, Expr *E) {
+ auto *ID = dyn_cast<DeclRefExpr>(E);
+ if (!ID || ID->isTypeDependent())
+ return;
+
+ auto IsDependent = [&]() {
+ const LambdaScopeInfo *LSI = SemaRef.getCurLambda();
+ if (!LSI)
+ return false;
+ if (!LSI->ExplicitObjectParameter ||
+ !LSI->ExplicitObjectParameter->getType()->isDependentType())
+ return false;
+ if (!LSI->CaptureMap.count(D))
+ return false;
+ const Capture &Cap = LSI->getCapture(D);
+ return !Cap.isCopyCapture();
+ }();
+
+ ID->setCapturedByCopyInLambdaWithExplicitObjectParameter(
+ IsDependent, SemaRef.getASTContext());
+}
+
static void
MarkExprReferenced(Sema &SemaRef, SourceLocation Loc, Decl *D, Expr *E,
bool MightBeOdrUse,
@@ -18684,9 +20697,19 @@ MarkExprReferenced(Sema &SemaRef, SourceLocation Loc, Decl *D, Expr *E,
if (VarDecl *Var = dyn_cast<VarDecl>(D)) {
DoMarkVarDeclReferenced(SemaRef, Loc, Var, E, RefsMinusAssignments);
+ if (SemaRef.getLangOpts().CPlusPlus)
+ FixDependencyOfIdExpressionsInLambdaWithDependentObjectParameter(SemaRef,
+ Var, E);
return;
}
+ if (BindingDecl *Decl = dyn_cast<BindingDecl>(D)) {
+ DoMarkBindingDeclReferenced(SemaRef, Loc, Decl, E);
+ if (SemaRef.getLangOpts().CPlusPlus)
+ FixDependencyOfIdExpressionsInLambdaWithDependentObjectParameter(SemaRef,
+ Decl, E);
+ return;
+ }
SemaRef.MarkAnyDeclReferenced(Loc, D, MightBeOdrUse);
// If this is a call to a method via a cast, also mark the method in the
@@ -18725,10 +20748,14 @@ void Sema::MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base) {
!Method->getDevirtualizedMethod(Base, getLangOpts().AppleKext))
OdrUse = false;
- if (auto *FD = dyn_cast<FunctionDecl>(E->getDecl()))
- if (!isConstantEvaluated() && FD->isConsteval() &&
- !RebuildingImmediateInvocation)
+ if (auto *FD = dyn_cast<FunctionDecl>(E->getDecl())) {
+ if (!isUnevaluatedContext() && !isConstantEvaluatedContext() &&
+ !isImmediateFunctionContext() &&
+ !isCheckingDefaultArgumentOrInitializer() &&
+ FD->isImmediateFunction() && !RebuildingImmediateInvocation &&
+ !FD->isDependentContext())
ExprEvalContexts.back().ReferenceToConsteval.insert(E);
+ }
MarkExprReferenced(*this, E->getLocation(), E->getDecl(), E, OdrUse,
RefsMinusAssignments);
}
@@ -18744,7 +20771,7 @@ void Sema::MarkMemberReferenced(MemberExpr *E) {
bool MightBeOdrUse = true;
if (E->performsVirtualDispatch(getLangOpts())) {
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(E->getMemberDecl()))
- if (Method->isPure())
+ if (Method->isPureVirtual())
MightBeOdrUse = false;
}
SourceLocation Loc =
@@ -18829,14 +20856,27 @@ class EvaluatedExprMarker : public UsedDeclVisitor<EvaluatedExprMarker> {
public:
typedef UsedDeclVisitor<EvaluatedExprMarker> Inherited;
bool SkipLocalVariables;
+ ArrayRef<const Expr *> StopAt;
- EvaluatedExprMarker(Sema &S, bool SkipLocalVariables)
- : Inherited(S), SkipLocalVariables(SkipLocalVariables) {}
+ EvaluatedExprMarker(Sema &S, bool SkipLocalVariables,
+ ArrayRef<const Expr *> StopAt)
+ : Inherited(S), SkipLocalVariables(SkipLocalVariables), StopAt(StopAt) {}
void visitUsedDecl(SourceLocation Loc, Decl *D) {
S.MarkFunctionReferenced(Loc, cast<FunctionDecl>(D));
}
+ void Visit(Expr *E) {
+ if (llvm::is_contained(StopAt, E))
+ return;
+ Inherited::Visit(E);
+ }
+
+ void VisitConstantExpr(ConstantExpr *E) {
+ // Don't mark declarations within a ConstantExpression, as this expression
+ // will be evaluated and folded to a value.
+ }
+
void VisitDeclRefExpr(DeclRefExpr *E) {
// If we were asked not to visit local variables, don't.
if (SkipLocalVariables) {
@@ -18863,9 +20903,43 @@ public:
///
/// \param SkipLocalVariables If true, don't mark local variables as
/// 'referenced'.
+/// \param StopAt Subexpressions that we shouldn't recurse into.
void Sema::MarkDeclarationsReferencedInExpr(Expr *E,
- bool SkipLocalVariables) {
- EvaluatedExprMarker(*this, SkipLocalVariables).Visit(E);
+ bool SkipLocalVariables,
+ ArrayRef<const Expr*> StopAt) {
+ EvaluatedExprMarker(*this, SkipLocalVariables, StopAt).Visit(E);
+}
+
+/// Emit a diagnostic when statements are reachable.
+/// FIXME: check for reachability even in expressions for which we don't build a
+/// CFG (eg, in the initializer of a global or in a constant expression).
+/// For example,
+/// namespace { auto *p = new double[3][false ? (1, 2) : 3]; }
+bool Sema::DiagIfReachable(SourceLocation Loc, ArrayRef<const Stmt *> Stmts,
+ const PartialDiagnostic &PD) {
+ if (!Stmts.empty() && getCurFunctionOrMethodDecl()) {
+ if (!FunctionScopes.empty())
+ FunctionScopes.back()->PossiblyUnreachableDiags.push_back(
+ sema::PossiblyUnreachableDiag(PD, Loc, Stmts));
+ return true;
+ }
+
+ // The initializer of a constexpr variable or of the first declaration of a
+ // static data member is not syntactically a constant evaluated constant,
+ // but nonetheless is always required to be a constant expression, so we
+ // can skip diagnosing.
+ // FIXME: Using the mangling context here is a hack.
+ if (auto *VD = dyn_cast_or_null<VarDecl>(
+ ExprEvalContexts.back().ManglingContextDecl)) {
+ if (VD->isConstexpr() ||
+ (VD->isStaticDataMember() && VD->isFirstDecl() && !VD->isInline()))
+ return false;
+ // FIXME: For any other kind of variable, we should build a CFG for its
+ // initializer and check whether the context in question is reachable.
+ }
+
+ Diag(Loc, PD);
+ return true;
}
/// Emit a diagnostic that describes an effect on the run-time behavior
@@ -18886,6 +20960,10 @@ void Sema::MarkDeclarationsReferencedInExpr(Expr *E,
/// during overload resolution or within sizeof/alignof/typeof/typeid.
bool Sema::DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts,
const PartialDiagnostic &PD) {
+
+ if (ExprEvalContexts.back().isDiscardedStatementContext())
+ return false;
+
switch (ExprEvalContexts.back().Context) {
case ExpressionEvaluationContext::Unevaluated:
case ExpressionEvaluationContext::UnevaluatedList:
@@ -18895,33 +20973,13 @@ bool Sema::DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts,
break;
case ExpressionEvaluationContext::ConstantEvaluated:
+ case ExpressionEvaluationContext::ImmediateFunctionContext:
// Relevant diagnostics should be produced by constant evaluation.
break;
case ExpressionEvaluationContext::PotentiallyEvaluated:
case ExpressionEvaluationContext::PotentiallyEvaluatedIfUsed:
- if (!Stmts.empty() && getCurFunctionOrMethodDecl()) {
- FunctionScopes.back()->PossiblyUnreachableDiags.
- push_back(sema::PossiblyUnreachableDiag(PD, Loc, Stmts));
- return true;
- }
-
- // The initializer of a constexpr variable or of the first declaration of a
- // static data member is not syntactically a constant evaluated constant,
- // but nonetheless is always required to be a constant expression, so we
- // can skip diagnosing.
- // FIXME: Using the mangling context here is a hack.
- if (auto *VD = dyn_cast_or_null<VarDecl>(
- ExprEvalContexts.back().ManglingContextDecl)) {
- if (VD->isConstexpr() ||
- (VD->isStaticDataMember() && VD->isFirstDecl() && !VD->isInline()))
- break;
- // FIXME: For any other kind of variable, we should build a CFG for its
- // initializer and check whether the context in question is reachable.
- }
-
- Diag(Loc, PD);
- return true;
+ return DiagIfReachable(Loc, Stmts, PD);
}
return false;
@@ -18930,7 +20988,7 @@ bool Sema::DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts,
bool Sema::DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD) {
return DiagRuntimeBehavior(
- Loc, Statement ? llvm::makeArrayRef(Statement) : llvm::None, PD);
+ Loc, Statement ? llvm::ArrayRef(Statement) : std::nullopt, PD);
}
bool Sema::CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
@@ -19093,10 +21151,12 @@ ExprResult Sema::CheckBooleanCondition(SourceLocation Loc, Expr *E,
}
Sema::ConditionResult Sema::ActOnCondition(Scope *S, SourceLocation Loc,
- Expr *SubExpr, ConditionKind CK) {
- // Empty conditions are valid in for-statements.
+ Expr *SubExpr, ConditionKind CK,
+ bool MissingOK) {
+ // MissingOK indicates whether having no condition expression is valid
+ // (for loop) or invalid (e.g. while loop).
if (!SubExpr)
- return ConditionResult();
+ return MissingOK ? ConditionResult() : ConditionError();
ExprResult Cond;
switch (CK) {
@@ -19114,7 +21174,7 @@ Sema::ConditionResult Sema::ActOnCondition(Scope *S, SourceLocation Loc,
}
if (Cond.isInvalid()) {
Cond = CreateRecoveryExpr(SubExpr->getBeginLoc(), SubExpr->getEndLoc(),
- {SubExpr});
+ {SubExpr}, PreferredConditionType(CK));
if (!Cond.get())
return ConditionError();
}
@@ -19371,14 +21431,7 @@ ExprResult RebuildUnknownAnyExpr::VisitCallExpr(CallExpr *E) {
if (ParamTypes.empty() && Proto->isVariadic()) { // the special case
ArgTypes.reserve(E->getNumArgs());
for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
- Expr *Arg = E->getArg(i);
- QualType ArgType = Arg->getType();
- if (E->isLValue()) {
- ArgType = S.Context.getLValueReferenceType(ArgType);
- } else if (E->isXValue()) {
- ArgType = S.Context.getRValueReferenceType(ArgType);
- }
- ArgTypes.push_back(ArgType);
+ ArgTypes.push_back(S.Context.getReferenceQualifiedType(E->getArg(i)));
}
ParamTypes = ArgTypes;
}
@@ -19505,7 +21558,8 @@ ExprResult RebuildUnknownAnyExpr::resolveDecl(Expr *E, ValueDecl *VD) {
FunctionDecl *NewFD = FunctionDecl::Create(
S.Context, FD->getDeclContext(), Loc, Loc,
FD->getNameInfo().getName(), DestType, FD->getTypeSourceInfo(),
- SC_None, false /*isInlineSpecified*/, FD->hasPrototype(),
+ SC_None, S.getCurFPFeatures().isFPConstrained(),
+ false /*isInlineSpecified*/, FD->hasPrototype(),
/*ConstexprKind*/ ConstexprSpecKind::Unspecified);
if (FD->getQualifier())
@@ -19726,7 +21780,8 @@ ExprResult Sema::CheckPlaceholderExpr(Expr *E) {
auto *DRE = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts());
if (DRE) {
auto *FD = cast<FunctionDecl>(DRE->getDecl());
- if (FD->getBuiltinID() == Builtin::BI__noop) {
+ unsigned BuiltinID = FD->getBuiltinID();
+ if (BuiltinID == Builtin::BI__noop) {
E = ImpCastExprToType(E, Context.getPointerType(FD->getType()),
CK_BuiltinFnToFnPtr)
.get();
@@ -19734,6 +21789,36 @@ ExprResult Sema::CheckPlaceholderExpr(Expr *E) {
VK_PRValue, SourceLocation(),
FPOptionsOverride());
}
+
+ if (Context.BuiltinInfo.isInStdNamespace(BuiltinID)) {
+ // Any use of these other than a direct call is ill-formed as of C++20,
+ // because they are not addressable functions. In earlier language
+ // modes, warn and force an instantiation of the real body.
+ Diag(E->getBeginLoc(),
+ getLangOpts().CPlusPlus20
+ ? diag::err_use_of_unaddressable_function
+ : diag::warn_cxx20_compat_use_of_unaddressable_function);
+ if (FD->isImplicitlyInstantiable()) {
+ // Require a definition here because a normal attempt at
+ // instantiation for a builtin will be ignored, and we won't try
+ // again later. We assume that the definition of the template
+ // precedes this use.
+ InstantiateFunctionDefinition(E->getBeginLoc(), FD,
+ /*Recursive=*/false,
+ /*DefinitionRequired=*/true,
+ /*AtEndOfTU=*/false);
+ }
+ // Produce a properly-typed reference to the function.
+ CXXScopeSpec SS;
+ SS.Adopt(DRE->getQualifierLoc());
+ TemplateArgumentListInfo TemplateArgs;
+ DRE->copyTemplateArgumentsInto(TemplateArgs);
+ return BuildDeclRefExpr(
+ FD, FD->getType(), VK_LValue, DRE->getNameInfo(),
+ DRE->hasQualifier() ? &SS : nullptr, DRE->getFoundDecl(),
+ DRE->getTemplateKeywordLoc(),
+ DRE->hasExplicitTemplateArgs() ? &TemplateArgs : nullptr);
+ }
}
Diag(E->getBeginLoc(), diag::err_builtin_fn_use);
@@ -19774,6 +21859,8 @@ ExprResult Sema::CheckPlaceholderExpr(Expr *E) {
#include "clang/Basic/PPCTypes.def"
#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
#include "clang/Basic/RISCVVTypes.def"
+#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
#define BUILTIN_TYPE(Id, SingletonId) case BuiltinType::Id:
#define PLACEHOLDER_TYPE(Id, SingletonId)
#include "clang/AST/BuiltinTypes.def"
@@ -19815,7 +21902,8 @@ Sema::ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind) {
ExprResult Sema::ActOnObjCAvailabilityCheckExpr(
llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc,
SourceLocation RParen) {
- auto FindSpecVersion = [&](StringRef Platform) -> Optional<VersionTuple> {
+ auto FindSpecVersion =
+ [&](StringRef Platform) -> std::optional<VersionTuple> {
auto Spec = llvm::find_if(AvailSpecs, [&](const AvailabilitySpec &Spec) {
return Spec.getPlatform() == Platform;
});
@@ -19827,7 +21915,7 @@ ExprResult Sema::ActOnObjCAvailabilityCheckExpr(
});
}
if (Spec == AvailSpecs.end())
- return None;
+ return std::nullopt;
return Spec->getVersion();
};
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaExprCXX.cpp b/contrib/llvm-project/clang/lib/Sema/SemaExprCXX.cpp
index 7961e7941813..953bfe484a52 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaExprCXX.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaExprCXX.cpp
@@ -11,8 +11,6 @@
///
//===----------------------------------------------------------------------===//
-#include "clang/Sema/Template.h"
-#include "clang/Sema/SemaInternal.h"
#include "TreeTransform.h"
#include "TypeLocBuilder.h"
#include "clang/AST/ASTContext.h"
@@ -21,24 +19,35 @@
#include "clang/AST/CharUnits.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/Type.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/AlignedAllocation.h"
+#include "clang/Basic/DiagnosticSema.h"
#include "clang/Basic/PartialDiagnostic.h"
#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/TokenKinds.h"
+#include "clang/Basic/TypeTraits.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/SemaLambda.h"
+#include "clang/Sema/Template.h"
#include "clang/Sema/TemplateDeduction.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/TypeSize.h"
+#include <optional>
using namespace clang;
using namespace sema;
@@ -63,8 +72,8 @@ ParsedType Sema::getInheritingConstructorName(CXXScopeSpec &SS,
// Strip off the last layer of the nested-name-specifier and build a
// typename type for it.
assert(NNS->getAsIdentifier() == &Name && "not a constructor name");
- Type = Context.getDependentNameType(ETK_None, NNS->getPrefix(),
- NNS->getAsIdentifier());
+ Type = Context.getDependentNameType(
+ ElaboratedTypeKeyword::None, NNS->getPrefix(), NNS->getAsIdentifier());
break;
case NestedNameSpecifier::Global:
@@ -92,7 +101,8 @@ ParsedType Sema::getConstructorName(IdentifierInfo &II,
// friend declaration or an inherited constructor declaration), form an
// unresolved "typename" type.
if (CurClass->isDependentContext() && !EnteringContext && SS.getScopeRep()) {
- QualType T = Context.getDependentNameType(ETK_None, SS.getScopeRep(), &II);
+ QualType T = Context.getDependentNameType(ElaboratedTypeKeyword::None,
+ SS.getScopeRep(), &II);
return ParsedType::make(T);
}
@@ -130,9 +140,7 @@ ParsedType Sema::getConstructorName(IdentifierInfo &II,
return ParsedType::make(T);
}
-ParsedType Sema::getDestructorName(SourceLocation TildeLoc,
- IdentifierInfo &II,
- SourceLocation NameLoc,
+ParsedType Sema::getDestructorName(IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectTypePtr,
bool EnteringContext) {
@@ -238,8 +246,9 @@ ParsedType Sema::getDestructorName(SourceLocation TildeLoc,
if (IsAcceptableResult(Type)) {
QualType T = Context.getTypeDeclType(Type);
MarkAnyDeclReferenced(Type->getLocation(), Type, /*OdrUse=*/false);
- return CreateParsedType(T,
- Context.getTrivialTypeSourceInfo(T, NameLoc));
+ return CreateParsedType(
+ Context.getElaboratedType(ElaboratedTypeKeyword::None, nullptr, T),
+ Context.getTrivialTypeSourceInfo(T, NameLoc));
}
}
@@ -355,9 +364,9 @@ ParsedType Sema::getDestructorName(SourceLocation TildeLoc,
// We didn't find our type, but that's OK: it's dependent anyway.
// FIXME: What if we have no nested-name-specifier?
- QualType T = CheckTypenameType(ETK_None, SourceLocation(),
- SS.getWithLocInContext(Context),
- II, NameLoc);
+ QualType T =
+ CheckTypenameType(ElaboratedTypeKeyword::None, SourceLocation(),
+ SS.getWithLocInContext(Context), II, NameLoc);
return ParsedType::make(T);
}
@@ -385,7 +394,7 @@ ParsedType Sema::getDestructorName(SourceLocation TildeLoc,
//
// also looks for type-name in the scope. Unfortunately, we can't
// reasonably apply this fallback for dependent nested-name-specifiers.
- if (SS.getScopeRep()->getPrefix()) {
+ if (SS.isValid() && SS.getScopeRep()->getPrefix()) {
if (ParsedType T = LookupInScope()) {
Diag(SS.getEndLoc(), diag::ext_qualified_dtor_named_in_lexical_scope)
<< FixItHint::CreateRemoval(SS.getRange());
@@ -468,7 +477,7 @@ ParsedType Sema::getDestructorTypeForDecltype(const DeclSpec &DS,
assert(DS.getTypeSpecType() == DeclSpec::TST_decltype &&
"unexpected type in getDestructorType");
- QualType T = BuildDecltypeType(DS.getRepAsExpr(), DS.getTypeSpecTypeLoc());
+ QualType T = BuildDecltypeType(DS.getRepAsExpr());
// If we know the type of the object, check that the correct destructor
// type was named now; we can give better diagnostics this way.
@@ -494,13 +503,16 @@ bool Sema::checkLiteralOperatorId(const CXXScopeSpec &SS,
IdentifierInfo *II = Name.Identifier;
ReservedIdentifierStatus Status = II->isReserved(PP.getLangOpts());
SourceLocation Loc = Name.getEndLoc();
- if (Status != ReservedIdentifierStatus::NotReserved &&
- !PP.getSourceManager().isInSystemHeader(Loc)) {
- Diag(Loc, diag::warn_reserved_extern_symbol)
- << II << static_cast<int>(Status)
- << FixItHint::CreateReplacement(
- Name.getSourceRange(),
- (StringRef("operator\"\"") + II->getName()).str());
+ if (!PP.getSourceManager().isInSystemHeader(Loc)) {
+ if (auto Hint = FixItHint::CreateReplacement(
+ Name.getSourceRange(),
+ (StringRef("operator\"\"") + II->getName()).str());
+ isReservedInAllContexts(Status)) {
+ Diag(Loc, diag::warn_reserved_extern_symbol)
+ << II << static_cast<int>(Status) << Hint;
+ } else {
+ Diag(Loc, diag::warn_deprecated_literal_operator_id) << II << Hint;
+ }
}
}
@@ -564,7 +576,7 @@ ExprResult Sema::BuildCXXTypeId(QualType TypeInfoType,
SourceLocation RParenLoc) {
bool WasEvaluated = false;
if (E && !E->isTypeDependent()) {
- if (E->getType()->isPlaceholderType()) {
+ if (E->hasPlaceholderType()) {
ExprResult result = CheckPlaceholderExpr(E);
if (result.isInvalid()) return ExprError();
E = result.get();
@@ -831,21 +843,21 @@ Sema::ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *Ex) {
// operation from the operand to the exception object (15.1) can be
// omitted by constructing the automatic object directly into the
// exception object
- if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Ex->IgnoreParens()))
- if (VarDecl *Var = dyn_cast<VarDecl>(DRE->getDecl())) {
- if (Var->hasLocalStorage() && !Var->getType().isVolatileQualified()) {
- for( ; S; S = S->getParent()) {
- if (S->isDeclScope(Var)) {
- IsThrownVarInScope = true;
- break;
- }
-
- if (S->getFlags() &
- (Scope::FnScope | Scope::ClassScope | Scope::BlockScope |
- Scope::FunctionPrototypeScope | Scope::ObjCMethodScope |
- Scope::TryScope))
- break;
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(Ex->IgnoreParens()))
+ if (const auto *Var = dyn_cast<VarDecl>(DRE->getDecl());
+ Var && Var->hasLocalStorage() &&
+ !Var->getType().isVolatileQualified()) {
+ for (; S; S = S->getParent()) {
+ if (S->isDeclScope(Var)) {
+ IsThrownVarInScope = true;
+ break;
}
+
+ // FIXME: Many of the scope checks here seem incorrect.
+ if (S->getFlags() &
+ (Scope::FnScope | Scope::ClassScope | Scope::BlockScope |
+ Scope::ObjCMethodScope | Scope::TryScope))
+ break;
}
}
}
@@ -855,13 +867,21 @@ Sema::ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *Ex) {
ExprResult Sema::BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope) {
- // Don't report an error if 'throw' is used in system headers.
- if (!getLangOpts().CXXExceptions &&
+ const llvm::Triple &T = Context.getTargetInfo().getTriple();
+ const bool IsOpenMPGPUTarget =
+ getLangOpts().OpenMPIsTargetDevice && (T.isNVPTX() || T.isAMDGCN());
+ // Don't report an error if 'throw' is used in system headers or in an OpenMP
+ // target region compiled for a GPU architecture.
+ if (!IsOpenMPGPUTarget && !getLangOpts().CXXExceptions &&
!getSourceManager().isInSystemHeader(OpLoc) && !getLangOpts().CUDA) {
// Delay error emission for the OpenMP device code.
targetDiag(OpLoc, diag::err_exceptions_disabled) << "throw";
}
+ // In OpenMP target regions, we replace 'throw' with a trap on GPU targets.
+ if (IsOpenMPGPUTarget)
+ targetDiag(OpLoc, diag::warn_throw_not_valid_on_target) << T.str();
+
// Exceptions aren't allowed in CUDA device code.
if (getLangOpts().CUDA)
CUDADiagIfDeviceCode(OpLoc, diag::err_cuda_device_exceptions)
@@ -969,6 +989,19 @@ bool Sema::CheckCXXThrowOperand(SourceLocation ThrowLoc,
Ty = Ptr->getPointeeType();
isPointer = true;
}
+
+ // Cannot throw WebAssembly reference type.
+ if (Ty.isWebAssemblyReferenceType()) {
+ Diag(ThrowLoc, diag::err_wasm_reftype_tc) << 0 << E->getSourceRange();
+ return true;
+ }
+
+ // Cannot throw WebAssembly table.
+ if (isPointer && Ty.isWebAssemblyReferenceType()) {
+ Diag(ThrowLoc, diag::err_wasm_table_art) << 2 << E->getSourceRange();
+ return true;
+ }
+
if (!isPointer || !Ty->isVoidType()) {
if (RequireCompleteType(ThrowLoc, Ty,
isPointer ? diag::err_throw_incomplete_ptr
@@ -1068,6 +1101,16 @@ bool Sema::CheckCXXThrowOperand(SourceLocation ThrowLoc,
<< (unsigned)ExnObjAlign.getQuantity();
}
}
+ if (!isPointer && getLangOpts().AssumeNothrowExceptionDtor) {
+ if (CXXDestructorDecl *Dtor = RD->getDestructor()) {
+ auto Ty = Dtor->getType();
+ if (auto *FT = Ty.getTypePtr()->getAs<FunctionProtoType>()) {
+ if (!isUnresolvedExceptionSpec(FT->getExceptionSpecType()) &&
+ !FT->isNothrow())
+ Diag(ThrowLoc, diag::err_throw_object_throwing_dtor) << RD;
+ }
+ }
+ }
return false;
}
@@ -1129,18 +1172,16 @@ static QualType adjustCVQualifiersForCXXThisWithinLambda(
auto C = CurLSI->getCXXThisCapture();
if (C.isCopyCapture()) {
- ClassType.removeLocalCVRQualifiers(Qualifiers::CVRMask);
- if (CurLSI->CallOperator->isConst())
+ if (CurLSI->lambdaCaptureShouldBeConst())
ClassType.addConst();
return ASTCtx.getPointerType(ClassType);
}
}
- // 2) We've run out of ScopeInfos but check if CurDC is a lambda (which can
- // happen during instantiation of its nested generic lambda call operator)
- if (isLambdaCallOperator(CurDC)) {
- assert(CurLSI && "While computing 'this' capture-type for a generic "
- "lambda, we must have a corresponding LambdaScopeInfo");
+ // 2) We've run out of ScopeInfos but check 1. if CurDC is a lambda (which
+ // can happen during instantiation of its nested generic lambda call
+ // operator); 2. if we're in a lambda scope (lambda body).
+ if (CurLSI && isLambdaCallOperator(CurDC)) {
assert(isGenericLambdaCallOperatorSpecialization(CurLSI->CallOperator) &&
"While computing 'this' capture-type for a generic lambda, when we "
"run out of enclosing LSI's, yet the enclosing DC is a "
@@ -1170,7 +1211,6 @@ static QualType adjustCVQualifiersForCXXThisWithinLambda(
while (Closure &&
IsThisCaptured(Closure, IsByCopyCapture, IsConstCapture)) {
if (IsByCopyCapture) {
- ClassType.removeLocalCVRQualifiers(Qualifiers::CVRMask);
if (IsConstCapture)
ClassType.addConst();
return ASTCtx.getPointerType(ClassType);
@@ -1188,11 +1228,11 @@ QualType Sema::getCurrentThisType() {
QualType ThisTy = CXXThisTypeOverride;
if (CXXMethodDecl *method = dyn_cast<CXXMethodDecl>(DC)) {
- if (method && method->isInstance())
- ThisTy = method->getThisType();
+ if (method && method->isImplicitObjectMemberFunction())
+ ThisTy = method->getThisType().getNonReferenceType();
}
- if (ThisTy.isNull() && isLambdaCallOperator(CurContext) &&
+ if (ThisTy.isNull() && isLambdaCallWithImplicitObjectParameter(CurContext) &&
inTemplateInstantiation() && isa<CXXRecordDecl>(DC)) {
// This is a lambda call operator that is being instantiated as a default
@@ -1231,7 +1271,8 @@ Sema::CXXThisScopeRAII::CXXThisScopeRAII(Sema &S,
QualType T = S.Context.getRecordType(Record);
T = S.getASTContext().getQualifiedType(T, CXXThisTypeQuals);
- S.CXXThisTypeOverride = S.Context.getPointerType(T);
+ S.CXXThisTypeOverride =
+ S.Context.getLangOpts().HLSL ? T : S.Context.getPointerType(T);
this->Enabled = true;
}
@@ -1304,6 +1345,7 @@ bool Sema::CheckCXXThisCapture(SourceLocation Loc, const bool Explicit,
if (LSI && isGenericLambdaCallOperatorSpecialization(LSI->CallOperator)) {
// This context can't implicitly capture 'this'; fail out.
if (BuildAndDiagnose) {
+ LSI->CallOperator->setInvalidDecl();
Diag(Loc, diag::err_this_capture)
<< (Explicit && idx == MaxFunctionScopesIndex);
if (!Explicit)
@@ -1325,10 +1367,11 @@ bool Sema::CheckCXXThisCapture(SourceLocation Loc, const bool Explicit,
continue;
}
// This context can't implicitly capture 'this'; fail out.
- if (BuildAndDiagnose)
+ if (BuildAndDiagnose) {
+ LSI->CallOperator->setInvalidDecl();
Diag(Loc, diag::err_this_capture)
<< (Explicit && idx == MaxFunctionScopesIndex);
-
+ }
if (!Explicit)
buildLambdaThisCaptureFixit(*this, LSI);
return true;
@@ -1347,7 +1390,7 @@ bool Sema::CheckCXXThisCapture(SourceLocation Loc, const bool Explicit,
// implicitly capturing the *enclosing object* by reference (see loop
// above)).
assert((!ByCopy ||
- dyn_cast<LambdaScopeInfo>(FunctionScopes[MaxFunctionScopesIndex])) &&
+ isa<LambdaScopeInfo>(FunctionScopes[MaxFunctionScopesIndex])) &&
"Only a lambda can capture the enclosing object (referred to by "
"*this) by copy");
QualType ThisTy = getCurrentThisType();
@@ -1357,15 +1400,7 @@ bool Sema::CheckCXXThisCapture(SourceLocation Loc, const bool Explicit,
// The type of the corresponding data member (not a 'this' pointer if 'by
// copy').
- QualType CaptureType = ThisTy;
- if (ByCopy) {
- // If we are capturing the object referred to by '*this' by copy, ignore
- // any cv qualifiers inherited from the type of the member function for
- // the type of the closure-type's corresponding data member and any use
- // of 'this'.
- CaptureType = ThisTy->getPointeeType();
- CaptureType.removeLocalCVRQualifiers(Qualifiers::CVRMask);
- }
+ QualType CaptureType = ByCopy ? ThisTy->getPointeeType() : ThisTy;
bool isNested = NumCapturingClosures > 1;
CSI->addThisCapture(isNested, Loc, CaptureType, ByCopy);
@@ -1377,16 +1412,28 @@ ExprResult Sema::ActOnCXXThis(SourceLocation Loc) {
/// C++ 9.3.2: In the body of a non-static member function, the keyword this
/// is a non-lvalue expression whose value is the address of the object for
/// which the function is called.
-
QualType ThisTy = getCurrentThisType();
- if (ThisTy.isNull())
- return Diag(Loc, diag::err_invalid_this_use);
+
+ if (ThisTy.isNull()) {
+ DeclContext *DC = getFunctionLevelDeclContext();
+
+ if (const auto *Method = dyn_cast<CXXMethodDecl>(DC);
+ Method && Method->isExplicitObjectMemberFunction()) {
+ return Diag(Loc, diag::err_invalid_this_use) << 1;
+ }
+
+ if (isLambdaCallWithExplicitObjectParameter(CurContext))
+ return Diag(Loc, diag::err_invalid_this_use) << 1;
+
+ return Diag(Loc, diag::err_invalid_this_use) << 0;
+ }
+
return BuildCXXThisExpr(Loc, ThisTy, /*IsImplicit=*/false);
}
Expr *Sema::BuildCXXThisExpr(SourceLocation Loc, QualType Type,
bool IsImplicit) {
- auto *This = new (Context) CXXThisExpr(Loc, Type, IsImplicit);
+ auto *This = CXXThisExpr::Create(Context, Loc, Type, IsImplicit);
MarkThisReferenced(This);
return This;
}
@@ -1448,12 +1495,12 @@ Sema::BuildCXXTypeConstructExpr(TypeSourceInfo *TInfo,
QualType Ty = TInfo->getType();
SourceLocation TyBeginLoc = TInfo->getTypeLoc().getBeginLoc();
- assert((!ListInitialization ||
- (Exprs.size() == 1 && isa<InitListExpr>(Exprs[0]))) &&
- "List initialization must have initializer list as expression.");
+ assert((!ListInitialization || Exprs.size() == 1) &&
+ "List initialization must have exactly one expression.");
SourceRange FullRange = SourceRange(TyBeginLoc, RParenOrBraceLoc);
- InitializedEntity Entity = InitializedEntity::InitializeTemporary(TInfo);
+ InitializedEntity Entity =
+ InitializedEntity::InitializeTemporary(Context, TInfo);
InitializationKind Kind =
Exprs.size()
? ListInitialization
@@ -1464,29 +1511,67 @@ Sema::BuildCXXTypeConstructExpr(TypeSourceInfo *TInfo,
: InitializationKind::CreateValue(TyBeginLoc, LParenOrBraceLoc,
RParenOrBraceLoc);
- // C++1z [expr.type.conv]p1:
+ // C++17 [expr.type.conv]p1:
// If the type is a placeholder for a deduced class type, [...perform class
// template argument deduction...]
+ // C++23:
+ // Otherwise, if the type contains a placeholder type, it is replaced by the
+ // type determined by placeholder type deduction.
DeducedType *Deduced = Ty->getContainedDeducedType();
- if (Deduced && isa<DeducedTemplateSpecializationType>(Deduced)) {
+ if (Deduced && !Deduced->isDeduced() &&
+ isa<DeducedTemplateSpecializationType>(Deduced)) {
Ty = DeduceTemplateSpecializationFromInitializer(TInfo, Entity,
Kind, Exprs);
if (Ty.isNull())
return ExprError();
Entity = InitializedEntity::InitializeTemporary(TInfo, Ty);
- }
+ } else if (Deduced && !Deduced->isDeduced()) {
+ MultiExprArg Inits = Exprs;
+ if (ListInitialization) {
+ auto *ILE = cast<InitListExpr>(Exprs[0]);
+ Inits = MultiExprArg(ILE->getInits(), ILE->getNumInits());
+ }
- if (Ty->isDependentType() || CallExpr::hasAnyTypeDependentArguments(Exprs)) {
- // FIXME: CXXUnresolvedConstructExpr does not model list-initialization
- // directly. We work around this by dropping the locations of the braces.
- SourceRange Locs = ListInitialization
- ? SourceRange()
- : SourceRange(LParenOrBraceLoc, RParenOrBraceLoc);
- return CXXUnresolvedConstructExpr::Create(Context, Ty.getNonReferenceType(),
- TInfo, Locs.getBegin(), Exprs,
- Locs.getEnd());
+ if (Inits.empty())
+ return ExprError(Diag(TyBeginLoc, diag::err_auto_expr_init_no_expression)
+ << Ty << FullRange);
+ if (Inits.size() > 1) {
+ Expr *FirstBad = Inits[1];
+ return ExprError(Diag(FirstBad->getBeginLoc(),
+ diag::err_auto_expr_init_multiple_expressions)
+ << Ty << FullRange);
+ }
+ if (getLangOpts().CPlusPlus23) {
+ if (Ty->getAs<AutoType>())
+ Diag(TyBeginLoc, diag::warn_cxx20_compat_auto_expr) << FullRange;
+ }
+ Expr *Deduce = Inits[0];
+ if (isa<InitListExpr>(Deduce))
+ return ExprError(
+ Diag(Deduce->getBeginLoc(), diag::err_auto_expr_init_paren_braces)
+ << ListInitialization << Ty << FullRange);
+ QualType DeducedType;
+ TemplateDeductionInfo Info(Deduce->getExprLoc());
+ TemplateDeductionResult Result =
+ DeduceAutoType(TInfo->getTypeLoc(), Deduce, DeducedType, Info);
+ if (Result != TDK_Success && Result != TDK_AlreadyDiagnosed)
+ return ExprError(Diag(TyBeginLoc, diag::err_auto_expr_deduction_failure)
+ << Ty << Deduce->getType() << FullRange
+ << Deduce->getSourceRange());
+ if (DeducedType.isNull()) {
+ assert(Result == TDK_AlreadyDiagnosed);
+ return ExprError();
+ }
+
+ Ty = DeducedType;
+ Entity = InitializedEntity::InitializeTemporary(TInfo, Ty);
}
+ if (Ty->isDependentType() || CallExpr::hasAnyTypeDependentArguments(Exprs))
+ return CXXUnresolvedConstructExpr::Create(
+ Context, Ty.getNonReferenceType(), TInfo, LParenOrBraceLoc, Exprs,
+ RParenOrBraceLoc, ListInitialization);
+
// C++ [expr.type.conv]p1:
// If the expression list is a parenthesized single expression, the type
// conversion expression is equivalent (in definedness, and if defined in
@@ -1507,8 +1592,10 @@ Sema::BuildCXXTypeConstructExpr(TypeSourceInfo *TInfo,
ElemTy = Context.getBaseElementType(Ty);
}
- // There doesn't seem to be an explicit rule against this but sanity demands
- // we only construct objects with object types.
+ // Only construct objects with object types.
+ // The standard doesn't explicitly forbid function types here, but that's an
+ // obvious oversight, as there's no way to dynamically construct a function
+ // in general.
if (Ty->isFunctionType())
return ExprError(Diag(TyBeginLoc, diag::err_init_for_function_type)
<< Ty << FullRange);
@@ -1532,6 +1619,9 @@ Sema::BuildCXXTypeConstructExpr(TypeSourceInfo *TInfo,
Expr *Inner = Result.get();
if (CXXBindTemporaryExpr *BTE = dyn_cast_or_null<CXXBindTemporaryExpr>(Inner))
Inner = BTE->getSubExpr();
+ if (auto *CE = dyn_cast<ConstantExpr>(Inner);
+ CE && CE->isImmediateInvocation())
+ Inner = CE->getSubExpr();
if (!isa<CXXTemporaryObjectExpr>(Inner) &&
!isa<CXXScalarValueInitExpr>(Inner)) {
// If we created a CXXTemporaryObjectExpr, that node also represents the
@@ -1557,7 +1647,7 @@ Sema::BuildCXXTypeConstructExpr(TypeSourceInfo *TInfo,
bool Sema::isUsualDeallocationFunction(const CXXMethodDecl *Method) {
// [CUDA] Ignore this function, if we can't call it.
- const FunctionDecl *Caller = dyn_cast<FunctionDecl>(CurContext);
+ const FunctionDecl *Caller = getCurFunctionDecl(/*AllowLambda=*/true);
if (getLangOpts().CUDA) {
auto CallPreference = IdentifyCUDAPreference(Caller, Method);
// If it's not callable at all, it's not the right function.
@@ -1651,8 +1741,8 @@ namespace {
// In CUDA, determine how much we'd like / dislike to call this.
if (S.getLangOpts().CUDA)
- if (auto *Caller = dyn_cast<FunctionDecl>(S.CurContext))
- CUDAPref = S.IdentifyCUDAPreference(Caller, FD);
+ CUDAPref = S.IdentifyCUDAPreference(
+ S.getCurFunctionDecl(/*AllowLambda=*/true), FD);
}
explicit operator bool() const { return FD; }
@@ -1789,7 +1879,7 @@ Sema::ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen, MultiExprArg PlacementArgs,
SourceLocation PlacementRParen, SourceRange TypeIdParens,
Declarator &D, Expr *Initializer) {
- Optional<Expr *> ArraySize;
+ std::optional<Expr *> ArraySize;
// If the specified type is an array, unwrap it and save the expression.
if (D.getNumTypeObjects() > 0 &&
D.getTypeObject(0).Kind == DeclaratorChunk::Array) {
@@ -1841,7 +1931,7 @@ Sema::ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
}
}
- TypeSourceInfo *TInfo = GetTypeForDeclarator(D, /*Scope=*/nullptr);
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D);
QualType AllocType = TInfo->getType();
if (D.isInvalidType())
return ExprError();
@@ -1856,18 +1946,18 @@ Sema::ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
Initializer);
}
-static bool isLegalArrayNewInitializer(CXXNewExpr::InitializationStyle Style,
- Expr *Init) {
+static bool isLegalArrayNewInitializer(CXXNewInitializationStyle Style,
+ Expr *Init, bool IsCPlusPlus20) {
if (!Init)
return true;
if (ParenListExpr *PLE = dyn_cast<ParenListExpr>(Init))
- return PLE->getNumExprs() == 0;
+ return IsCPlusPlus20 || PLE->getNumExprs() == 0;
if (isa<ImplicitValueInitExpr>(Init))
return true;
else if (CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(Init))
return !CCE->isListInitialization() &&
CCE->getConstructor()->isDefaultConstructor();
- else if (Style == CXXNewExpr::ListInit) {
+ else if (Style == CXXNewInitializationStyle::Braces) {
assert(isa<InitListExpr>(Init) &&
"Shouldn't create list CXXConstructExprs for arrays.");
return true;
@@ -1881,9 +1971,9 @@ Sema::isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const {
return false;
if (FD.isDefined())
return false;
- Optional<unsigned> AlignmentParam;
+ std::optional<unsigned> AlignmentParam;
if (FD.isReplaceableGlobalAllocationFunction(&AlignmentParam) &&
- AlignmentParam.hasValue())
+ AlignmentParam)
return true;
return false;
}
@@ -1907,91 +1997,90 @@ void Sema::diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD,
}
}
-ExprResult
-Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
- SourceLocation PlacementLParen,
- MultiExprArg PlacementArgs,
- SourceLocation PlacementRParen,
- SourceRange TypeIdParens,
- QualType AllocType,
- TypeSourceInfo *AllocTypeInfo,
- Optional<Expr *> ArraySize,
- SourceRange DirectInitRange,
- Expr *Initializer) {
+ExprResult Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
+ SourceLocation PlacementLParen,
+ MultiExprArg PlacementArgs,
+ SourceLocation PlacementRParen,
+ SourceRange TypeIdParens, QualType AllocType,
+ TypeSourceInfo *AllocTypeInfo,
+ std::optional<Expr *> ArraySize,
+ SourceRange DirectInitRange, Expr *Initializer) {
SourceRange TypeRange = AllocTypeInfo->getTypeLoc().getSourceRange();
SourceLocation StartLoc = Range.getBegin();
- CXXNewExpr::InitializationStyle initStyle;
+ CXXNewInitializationStyle InitStyle;
if (DirectInitRange.isValid()) {
assert(Initializer && "Have parens but no initializer.");
- initStyle = CXXNewExpr::CallInit;
+ InitStyle = CXXNewInitializationStyle::Parens;
} else if (Initializer && isa<InitListExpr>(Initializer))
- initStyle = CXXNewExpr::ListInit;
+ InitStyle = CXXNewInitializationStyle::Braces;
else {
assert((!Initializer || isa<ImplicitValueInitExpr>(Initializer) ||
isa<CXXConstructExpr>(Initializer)) &&
"Initializer expression that cannot have been implicitly created.");
- initStyle = CXXNewExpr::NoInit;
+ InitStyle = CXXNewInitializationStyle::None;
}
- Expr **Inits = &Initializer;
- unsigned NumInits = Initializer ? 1 : 0;
+ MultiExprArg Exprs(&Initializer, Initializer ? 1 : 0);
if (ParenListExpr *List = dyn_cast_or_null<ParenListExpr>(Initializer)) {
- assert(initStyle == CXXNewExpr::CallInit && "paren init for non-call init");
- Inits = List->getExprs();
- NumInits = List->getNumExprs();
+ assert(InitStyle == CXXNewInitializationStyle::Parens &&
+ "paren init for non-call init");
+ Exprs = MultiExprArg(List->getExprs(), List->getNumExprs());
}
// C++11 [expr.new]p15:
// A new-expression that creates an object of type T initializes that
// object as follows:
- InitializationKind Kind
- // - If the new-initializer is omitted, the object is default-
- // initialized (8.5); if no initialization is performed,
- // the object has indeterminate value
- = initStyle == CXXNewExpr::NoInit
- ? InitializationKind::CreateDefault(TypeRange.getBegin())
- // - Otherwise, the new-initializer is interpreted according to
- // the
- // initialization rules of 8.5 for direct-initialization.
- : initStyle == CXXNewExpr::ListInit
- ? InitializationKind::CreateDirectList(
- TypeRange.getBegin(), Initializer->getBeginLoc(),
- Initializer->getEndLoc())
- : InitializationKind::CreateDirect(TypeRange.getBegin(),
- DirectInitRange.getBegin(),
- DirectInitRange.getEnd());
+ InitializationKind Kind = [&] {
+ switch (InitStyle) {
+ // - If the new-initializer is omitted, the object is default-
+ // initialized (8.5); if no initialization is performed,
+ // the object has indeterminate value
+ case CXXNewInitializationStyle::None:
+ return InitializationKind::CreateDefault(TypeRange.getBegin());
+ // - Otherwise, the new-initializer is interpreted according to the
+ // initialization rules of 8.5 for direct-initialization.
+ case CXXNewInitializationStyle::Parens:
+ return InitializationKind::CreateDirect(TypeRange.getBegin(),
+ DirectInitRange.getBegin(),
+ DirectInitRange.getEnd());
+ case CXXNewInitializationStyle::Braces:
+ return InitializationKind::CreateDirectList(TypeRange.getBegin(),
+ Initializer->getBeginLoc(),
+ Initializer->getEndLoc());
+ }
+ llvm_unreachable("Unknown initialization kind");
+ }();
// C++11 [dcl.spec.auto]p6. Deduce the type which 'auto' stands in for.
auto *Deduced = AllocType->getContainedDeducedType();
- if (Deduced && isa<DeducedTemplateSpecializationType>(Deduced)) {
+ if (Deduced && !Deduced->isDeduced() &&
+ isa<DeducedTemplateSpecializationType>(Deduced)) {
if (ArraySize)
return ExprError(
- Diag(ArraySize ? (*ArraySize)->getExprLoc() : TypeRange.getBegin(),
+ Diag(*ArraySize ? (*ArraySize)->getExprLoc() : TypeRange.getBegin(),
diag::err_deduced_class_template_compound_type)
<< /*array*/ 2
- << (ArraySize ? (*ArraySize)->getSourceRange() : TypeRange));
+ << (*ArraySize ? (*ArraySize)->getSourceRange() : TypeRange));
InitializedEntity Entity
= InitializedEntity::InitializeNew(StartLoc, AllocType);
AllocType = DeduceTemplateSpecializationFromInitializer(
- AllocTypeInfo, Entity, Kind, MultiExprArg(Inits, NumInits));
+ AllocTypeInfo, Entity, Kind, Exprs);
if (AllocType.isNull())
return ExprError();
- } else if (Deduced) {
- bool Braced = (initStyle == CXXNewExpr::ListInit);
- if (NumInits == 1) {
- if (auto p = dyn_cast_or_null<InitListExpr>(Inits[0])) {
- Inits = p->getInits();
- NumInits = p->getNumInits();
- Braced = true;
- }
+ } else if (Deduced && !Deduced->isDeduced()) {
+ MultiExprArg Inits = Exprs;
+ bool Braced = (InitStyle == CXXNewInitializationStyle::Braces);
+ if (Braced) {
+ auto *ILE = cast<InitListExpr>(Exprs[0]);
+ Inits = MultiExprArg(ILE->getInits(), ILE->getNumInits());
}
- if (initStyle == CXXNewExpr::NoInit || NumInits == 0)
+ if (InitStyle == CXXNewInitializationStyle::None || Inits.empty())
return ExprError(Diag(StartLoc, diag::err_auto_new_requires_ctor_arg)
<< AllocType << TypeRange);
- if (NumInits > 1) {
+ if (Inits.size() > 1) {
Expr *FirstBad = Inits[1];
return ExprError(Diag(FirstBad->getBeginLoc(),
diag::err_auto_new_ctor_multiple_expressions)
@@ -2001,13 +2090,22 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
Diag(Initializer->getBeginLoc(), diag::ext_auto_new_list_init)
<< AllocType << TypeRange;
Expr *Deduce = Inits[0];
+ if (isa<InitListExpr>(Deduce))
+ return ExprError(
+ Diag(Deduce->getBeginLoc(), diag::err_auto_expr_init_paren_braces)
+ << Braced << AllocType << TypeRange);
QualType DeducedType;
- if (DeduceAutoType(AllocTypeInfo, Deduce, DeducedType) == DAR_Failed)
+ TemplateDeductionInfo Info(Deduce->getExprLoc());
+ TemplateDeductionResult Result =
+ DeduceAutoType(AllocTypeInfo->getTypeLoc(), Deduce, DeducedType, Info);
+ if (Result != TDK_Success && Result != TDK_AlreadyDiagnosed)
return ExprError(Diag(StartLoc, diag::err_auto_new_deduction_failure)
- << AllocType << Deduce->getType()
- << TypeRange << Deduce->getSourceRange());
- if (DeducedType.isNull())
+ << AllocType << Deduce->getType() << TypeRange
+ << Deduce->getSourceRange());
+ if (DeducedType.isNull()) {
+ assert(Result == TDK_AlreadyDiagnosed);
return ExprError();
+ }
AllocType = DeducedType;
}
@@ -2026,6 +2124,9 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
if (CheckAllocatedType(AllocType, TypeRange.getBegin(), TypeRange))
return ExprError();
+ if (ArraySize && !checkArrayElementAlignment(AllocType, TypeRange.getBegin()))
+ return ExprError();
+
// In ARC, infer 'retaining' for the allocated
if (getLangOpts().ObjCAutoRefCount &&
AllocType.getObjCLifetime() == Qualifiers::OCL_None &&
@@ -2049,7 +2150,7 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
// conversion function to integral or unscoped enumeration type exists.
// C++1y [expr.new]p6: The expression [...] is implicitly converted to
// std::size_t.
- llvm::Optional<uint64_t> KnownArraySize;
+ std::optional<uint64_t> KnownArraySize;
if (ArraySize && *ArraySize && !(*ArraySize)->isTypeDependent()) {
ExprResult ConvertedSize;
if (getLangOpts().CPlusPlus14) {
@@ -2137,39 +2238,38 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
// Let's see if this is a constant < 0. If so, we reject it out of hand,
// per CWG1464. Otherwise, if it's not a constant, we must have an
// unparenthesized array type.
- if (!(*ArraySize)->isValueDependent()) {
- // We've already performed any required implicit conversion to integer or
- // unscoped enumeration type.
- // FIXME: Per CWG1464, we are required to check the value prior to
- // converting to size_t. This will never find a negative array size in
- // C++14 onwards, because Value is always unsigned here!
- if (Optional<llvm::APSInt> Value =
- (*ArraySize)->getIntegerConstantExpr(Context)) {
- if (Value->isSigned() && Value->isNegative()) {
- return ExprError(Diag((*ArraySize)->getBeginLoc(),
- diag::err_typecheck_negative_array_size)
- << (*ArraySize)->getSourceRange());
- }
-
- if (!AllocType->isDependentType()) {
- unsigned ActiveSizeBits = ConstantArrayType::getNumAddressingBits(
- Context, AllocType, *Value);
- if (ActiveSizeBits > ConstantArrayType::getMaxSizeBits(Context))
- return ExprError(
- Diag((*ArraySize)->getBeginLoc(), diag::err_array_too_large)
- << toString(*Value, 10) << (*ArraySize)->getSourceRange());
- }
- KnownArraySize = Value->getZExtValue();
- } else if (TypeIdParens.isValid()) {
- // Can't have dynamic array size when the type-id is in parentheses.
- Diag((*ArraySize)->getBeginLoc(), diag::ext_new_paren_array_nonconst)
- << (*ArraySize)->getSourceRange()
- << FixItHint::CreateRemoval(TypeIdParens.getBegin())
- << FixItHint::CreateRemoval(TypeIdParens.getEnd());
+ // We've already performed any required implicit conversion to integer or
+ // unscoped enumeration type.
+ // FIXME: Per CWG1464, we are required to check the value prior to
+ // converting to size_t. This will never find a negative array size in
+ // C++14 onwards, because Value is always unsigned here!
+ if (std::optional<llvm::APSInt> Value =
+ (*ArraySize)->getIntegerConstantExpr(Context)) {
+ if (Value->isSigned() && Value->isNegative()) {
+ return ExprError(Diag((*ArraySize)->getBeginLoc(),
+ diag::err_typecheck_negative_array_size)
+ << (*ArraySize)->getSourceRange());
+ }
- TypeIdParens = SourceRange();
+ if (!AllocType->isDependentType()) {
+ unsigned ActiveSizeBits =
+ ConstantArrayType::getNumAddressingBits(Context, AllocType, *Value);
+ if (ActiveSizeBits > ConstantArrayType::getMaxSizeBits(Context))
+ return ExprError(
+ Diag((*ArraySize)->getBeginLoc(), diag::err_array_too_large)
+ << toString(*Value, 10) << (*ArraySize)->getSourceRange());
}
+
+ KnownArraySize = Value->getZExtValue();
+ } else if (TypeIdParens.isValid()) {
+ // Can't have dynamic array size when the type-id is in parentheses.
+ Diag((*ArraySize)->getBeginLoc(), diag::ext_new_paren_array_nonconst)
+ << (*ArraySize)->getSourceRange()
+ << FixItHint::CreateRemoval(TypeIdParens.getBegin())
+ << FixItHint::CreateRemoval(TypeIdParens.getEnd());
+
+ TypeIdParens = SourceRange();
}
// Note that we do *not* convert the argument in any way. It can
@@ -2184,12 +2284,15 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
bool PassAlignment = getLangOpts().AlignedAllocation &&
Alignment > NewAlignment;
+ if (CheckArgsForPlaceholders(PlacementArgs))
+ return ExprError();
+
AllocationFunctionScope Scope = UseGlobal ? AFS_Global : AFS_Both;
if (!AllocType->isDependentType() &&
!Expr::hasAnyTypeDependentArguments(PlacementArgs) &&
FindAllocationFunctions(
StartLoc, SourceRange(PlacementLParen, PlacementRParen), Scope, Scope,
- AllocType, ArraySize.hasValue(), PassAlignment, PlacementArgs,
+ AllocType, ArraySize.has_value(), PassAlignment, PlacementArgs,
OperatorNew, OperatorDelete))
return ExprError();
@@ -2231,11 +2334,11 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
SizeTyWidth, Context.getTypeSizeInChars(AllocType).getQuantity());
// How many bytes do we want to allocate here?
- llvm::Optional<llvm::APInt> AllocationSize;
- if (!ArraySize.hasValue() && !AllocType->isDependentType()) {
+ std::optional<llvm::APInt> AllocationSize;
+ if (!ArraySize && !AllocType->isDependentType()) {
// For non-array operator new, we only want to allocate one element.
AllocationSize = SingleEltSize;
- } else if (KnownArraySize.hasValue() && !AllocType->isDependentType()) {
+ } else if (KnownArraySize && !AllocType->isDependentType()) {
// For array operator new, only deal with static array size case.
bool Overflow;
AllocationSize = llvm::APInt(SizeTyWidth, *KnownArraySize)
@@ -2247,8 +2350,7 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
}
IntegerLiteral AllocationSizeLiteral(
- Context,
- AllocationSize.getValueOr(llvm::APInt::getNullValue(SizeTyWidth)),
+ Context, AllocationSize.value_or(llvm::APInt::getZero(SizeTyWidth)),
SizeTy, SourceLocation());
// Otherwise, if we failed to constant-fold the allocation size, we'll
// just give up and pass-in something opaque, that isn't a null pointer.
@@ -2273,7 +2375,7 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
// Adjust placement args by prepending conjured size and alignment exprs.
llvm::SmallVector<Expr *, 8> CallArgs;
CallArgs.reserve(NumImplicitArgs + PlacementArgs.size());
- CallArgs.emplace_back(AllocationSize.hasValue()
+ CallArgs.emplace_back(AllocationSize
? static_cast<Expr *>(&AllocationSizeLiteral)
: &OpaqueAllocationSize);
if (PassAlignment)
@@ -2302,9 +2404,10 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
// Array 'new' can't have any initializers except empty parentheses.
// Initializer lists are also allowed, in C++11. Rely on the parser for the
// dialect distinction.
- if (ArraySize && !isLegalArrayNewInitializer(initStyle, Initializer)) {
- SourceRange InitRange(Inits[0]->getBeginLoc(),
- Inits[NumInits - 1]->getEndLoc());
+ if (ArraySize && !isLegalArrayNewInitializer(InitStyle, Initializer,
+ getLangOpts().CPlusPlus20)) {
+ SourceRange InitRange(Exprs.front()->getBeginLoc(),
+ Exprs.back()->getEndLoc());
Diag(StartLoc, diag::err_new_array_init_args) << InitRange;
return ExprError();
}
@@ -2312,8 +2415,7 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
// If we can perform the initialization, and we've not already done so,
// do it now.
if (!AllocType->isDependentType() &&
- !Expr::hasAnyTypeDependentArguments(
- llvm::makeArrayRef(Inits, NumInits))) {
+ !Expr::hasAnyTypeDependentArguments(Exprs)) {
// The type we initialize is the complete type, including the array bound.
QualType InitType;
if (KnownArraySize)
@@ -2321,19 +2423,17 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
AllocType,
llvm::APInt(Context.getTypeSize(Context.getSizeType()),
*KnownArraySize),
- *ArraySize, ArrayType::Normal, 0);
+ *ArraySize, ArraySizeModifier::Normal, 0);
else if (ArraySize)
- InitType =
- Context.getIncompleteArrayType(AllocType, ArrayType::Normal, 0);
+ InitType = Context.getIncompleteArrayType(AllocType,
+ ArraySizeModifier::Normal, 0);
else
InitType = AllocType;
InitializedEntity Entity
= InitializedEntity::InitializeNew(StartLoc, InitType);
- InitializationSequence InitSeq(*this, Entity, Kind,
- MultiExprArg(Inits, NumInits));
- ExprResult FullInit = InitSeq.Perform(*this, Entity, Kind,
- MultiExprArg(Inits, NumInits));
+ InitializationSequence InitSeq(*this, Entity, Kind, Exprs);
+ ExprResult FullInit = InitSeq.Perform(*this, Entity, Kind, Exprs);
if (FullInit.isInvalid())
return ExprError();
@@ -2377,7 +2477,7 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
return CXXNewExpr::Create(Context, UseGlobal, OperatorNew, OperatorDelete,
PassAlignment, UsualArrayDeleteWantsSize,
- PlacementArgs, TypeIdParens, ArraySize, initStyle,
+ PlacementArgs, TypeIdParens, ArraySize, InitStyle,
Initializer, ResultType, AllocTypeInfo, Range,
DirectInitRange);
}
@@ -2593,9 +2693,9 @@ bool Sema::FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
// FIXME: Should the Sema create the expression and embed it in the syntax
// tree? Or should the consumer just recalculate the value?
// FIXME: Using a dummy value will interact poorly with attribute enable_if.
- IntegerLiteral Size(Context, llvm::APInt::getNullValue(
- Context.getTargetInfo().getPointerWidth(0)),
- Context.getSizeType(),
+ QualType SizeTy = Context.getSizeType();
+ unsigned SizeTyWidth = Context.getTypeSize(SizeTy);
+ IntegerLiteral Size(Context, llvm::APInt::getZero(SizeTyWidth), SizeTy,
SourceLocation());
AllocArgs.push_back(&Size);
@@ -2796,7 +2896,8 @@ bool Sema::FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
}
if (getLangOpts().CUDA)
- EraseUnwantedCUDAMatches(dyn_cast<FunctionDecl>(CurContext), Matches);
+ EraseUnwantedCUDAMatches(getCurFunctionDecl(/*AllowLambda=*/true),
+ Matches);
} else {
// C++1y [expr.new]p22:
// For a non-placement allocation function, the normal deallocation
@@ -2912,6 +3013,14 @@ void Sema::DeclareGlobalNewDelete() {
if (getLangOpts().OpenCLCPlusPlus)
return;
+ // C++ [basic.stc.dynamic.general]p2:
+ // The library provides default definitions for the global allocation
+ // and deallocation functions. Some global allocation and deallocation
+ // functions are replaceable ([new.delete]); these are attached to the
+ // global module ([module.unit]).
+ if (getLangOpts().CPlusPlusModules && getCurrentModule())
+ PushGlobalModuleFragment(SourceLocation());
+
// C++ [basic.std.dynamic]p2:
// [...] The following allocation and deallocation functions (18.4) are
// implicitly declared in global scope in each translation unit of a
@@ -2945,12 +3054,19 @@ void Sema::DeclareGlobalNewDelete() {
if (!StdBadAlloc && !getLangOpts().CPlusPlus11) {
// The "std::bad_alloc" class has not yet been declared, so build it
// implicitly.
- StdBadAlloc = CXXRecordDecl::Create(Context, TTK_Class,
- getOrCreateStdNamespace(),
- SourceLocation(), SourceLocation(),
- &PP.getIdentifierTable().get("bad_alloc"),
- nullptr);
+ StdBadAlloc = CXXRecordDecl::Create(
+ Context, TagTypeKind::Class, getOrCreateStdNamespace(),
+ SourceLocation(), SourceLocation(),
+ &PP.getIdentifierTable().get("bad_alloc"), nullptr);
getStdBadAlloc()->setImplicit(true);
+
+ // The implicitly declared "std::bad_alloc" should live in global module
+ // fragment.
+ if (TheGlobalModuleFragment) {
+ getStdBadAlloc()->setModuleOwnershipKind(
+ Decl::ModuleOwnershipKind::ReachableWhenImported);
+ getStdBadAlloc()->setLocalOwningModule(TheGlobalModuleFragment);
+ }
}
if (!StdAlignValT && getLangOpts().AlignedAllocation) {
// The "std::align_val_t" enum class has not yet been declared, so build it
@@ -2958,9 +3074,19 @@ void Sema::DeclareGlobalNewDelete() {
auto *AlignValT = EnumDecl::Create(
Context, getOrCreateStdNamespace(), SourceLocation(), SourceLocation(),
&PP.getIdentifierTable().get("align_val_t"), nullptr, true, true, true);
+
+ // The implicitly declared "std::align_val_t" should live in global module
+ // fragment.
+ if (TheGlobalModuleFragment) {
+ AlignValT->setModuleOwnershipKind(
+ Decl::ModuleOwnershipKind::ReachableWhenImported);
+ AlignValT->setLocalOwningModule(TheGlobalModuleFragment);
+ }
+
AlignValT->setIntegerType(Context.getSizeType());
AlignValT->setPromotionType(Context.getSizeType());
AlignValT->setImplicit(true);
+
StdAlignValT = AlignValT;
}
@@ -3002,6 +3128,9 @@ void Sema::DeclareGlobalNewDelete() {
DeclareGlobalAllocationFunctions(OO_Array_New, VoidPtr, SizeT);
DeclareGlobalAllocationFunctions(OO_Delete, Context.VoidTy, VoidPtr);
DeclareGlobalAllocationFunctions(OO_Array_Delete, Context.VoidTy, VoidPtr);
+
+ if (getLangOpts().CPlusPlusModules && getCurrentModule())
+ PopGlobalModuleFragment();
}
/// DeclareGlobalAllocationFunction - Declares a single implicit global
@@ -3023,7 +3152,7 @@ void Sema::DeclareGlobalAllocationFunction(DeclarationName Name,
for (auto *P : Func->parameters())
FuncParams.push_back(
Context.getCanonicalType(P->getType().getUnqualifiedType()));
- if (llvm::makeArrayRef(FuncParams) == Params) {
+ if (llvm::ArrayRef(FuncParams) == Params) {
// Make the function visible to name lookup, even if we found it in
// an unimported module. It either is an implicitly-declared global
// allocation function, or is suppressing that function.
@@ -3046,7 +3175,10 @@ void Sema::DeclareGlobalAllocationFunction(DeclarationName Name,
BadAllocType = Context.getTypeDeclType(getStdBadAlloc());
assert(StdBadAlloc && "Must have std::bad_alloc declared");
EPI.ExceptionSpec.Type = EST_Dynamic;
- EPI.ExceptionSpec.Exceptions = llvm::makeArrayRef(BadAllocType);
+ EPI.ExceptionSpec.Exceptions = llvm::ArrayRef(BadAllocType);
+ }
+ if (getLangOpts().NewInfallible) {
+ EPI.ExceptionSpec.Type = EST_DynamicNone;
}
} else {
EPI.ExceptionSpec =
@@ -3056,16 +3188,41 @@ void Sema::DeclareGlobalAllocationFunction(DeclarationName Name,
auto CreateAllocationFunctionDecl = [&](Attr *ExtraAttr) {
QualType FnType = Context.getFunctionType(Return, Params, EPI);
FunctionDecl *Alloc = FunctionDecl::Create(
- Context, GlobalCtx, SourceLocation(), SourceLocation(), Name,
- FnType, /*TInfo=*/nullptr, SC_None, false, true);
+ Context, GlobalCtx, SourceLocation(), SourceLocation(), Name, FnType,
+ /*TInfo=*/nullptr, SC_None, getCurFPFeatures().isFPConstrained(), false,
+ true);
Alloc->setImplicit();
// Global allocation functions should always be visible.
Alloc->setVisibleDespiteOwningModule();
- Alloc->addAttr(VisibilityAttr::CreateImplicit(
- Context, LangOpts.GlobalAllocationFunctionVisibilityHidden
- ? VisibilityAttr::Hidden
- : VisibilityAttr::Default));
+ if (HasBadAllocExceptionSpec && getLangOpts().NewInfallible &&
+ !getLangOpts().CheckNew)
+ Alloc->addAttr(
+ ReturnsNonNullAttr::CreateImplicit(Context, Alloc->getLocation()));
+
+ // C++ [basic.stc.dynamic.general]p2:
+ // The library provides default definitions for the global allocation
+ // and deallocation functions. Some global allocation and deallocation
+ // functions are replaceable ([new.delete]); these are attached to the
+ // global module ([module.unit]).
+ //
+ // In the language wording, these functions are attched to the global
+ // module all the time. But in the implementation, the global module
+ // is only meaningful when we're in a module unit. So here we attach
+ // these allocation functions to global module conditionally.
+ if (TheGlobalModuleFragment) {
+ Alloc->setModuleOwnershipKind(
+ Decl::ModuleOwnershipKind::ReachableWhenImported);
+ Alloc->setLocalOwningModule(TheGlobalModuleFragment);
+ }
+
+ if (LangOpts.hasGlobalAllocationFunctionVisibility())
+ Alloc->addAttr(VisibilityAttr::CreateImplicit(
+ Context, LangOpts.hasHiddenGlobalAllocationFunctionVisibility()
+ ? VisibilityAttr::Hidden
+ : LangOpts.hasProtectedGlobalAllocationFunctionVisibility()
+ ? VisibilityAttr::Protected
+ : VisibilityAttr::Default));
llvm::SmallVector<ParmVarDecl *, 3> ParamDecls;
for (QualType T : Params) {
@@ -3130,7 +3287,8 @@ FunctionDecl *Sema::FindDeallocationFunctionForDestructor(SourceLocation Loc,
bool Sema::FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name,
- FunctionDecl *&Operator, bool Diagnose) {
+ FunctionDecl *&Operator, bool Diagnose,
+ bool WantSize, bool WantAligned) {
LookupResult Found(*this, Name, StartLoc, LookupOrdinaryName);
// Try to find operator delete/operator delete[] in class scope.
LookupQualifiedName(Found, RD);
@@ -3140,13 +3298,14 @@ bool Sema::FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
Found.suppressDiagnostics();
- bool Overaligned = hasNewExtendedAlignment(*this, Context.getRecordType(RD));
+ bool Overaligned =
+ WantAligned || hasNewExtendedAlignment(*this, Context.getRecordType(RD));
// C++17 [expr.delete]p10:
// If the deallocation functions have class scope, the one without a
// parameter of type std::size_t is selected.
llvm::SmallVector<UsualDeallocFnInfo, 4> Matches;
- resolveDeallocationOverload(*this, Found, /*WantSize*/ false,
+ resolveDeallocationOverload(*this, Found, /*WantSize*/ WantSize,
/*WantAlign*/ Overaligned, &Matches);
// If we could find an overload, use it.
@@ -3699,7 +3858,7 @@ static bool resolveBuiltinNewDeleteOverload(Sema &S, CallExpr *TheCall,
// We do our own custom access checks below.
R.suppressDiagnostics();
- SmallVector<Expr *, 8> Args(TheCall->arg_begin(), TheCall->arg_end());
+ SmallVector<Expr *, 8> Args(TheCall->arguments());
OverloadCandidateSet Candidates(R.getNameLoc(),
OverloadCandidateSet::CSK_Normal);
for (LookupResult::iterator FnOvl = R.begin(), FnOvlEnd = R.end();
@@ -3838,7 +3997,7 @@ void Sema::CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
if (getSourceManager().isInSystemHeader(PointeeRD->getLocation()))
return;
- QualType ClassType = dtor->getThisType()->getPointeeType();
+ QualType ClassType = dtor->getFunctionObjectParameterType();
if (PointeeRD->isAbstract()) {
// If the class is abstract, we warn by default, because we're
// sure the code has undefined behavior.
@@ -3918,7 +4077,7 @@ ExprResult Sema::CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr) {
// The value of a condition that is an expression is the value of the
// expression, implicitly converted to bool.
//
- // C++2b 8.5.2p2
+ // C++23 8.5.2p2
// If the if statement is of the form if constexpr, the value of the condition
// is contextually converted to bool and the converted expression shall be
// a constant expression.
@@ -3958,17 +4117,20 @@ Sema::IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType) {
// explicit appropriate pointer target type (C++ 4.2p2).
if (!ToPtrType->getPointeeType().hasQualifiers()) {
switch (StrLit->getKind()) {
- case StringLiteral::UTF8:
- case StringLiteral::UTF16:
- case StringLiteral::UTF32:
- // We don't allow UTF literals to be implicitly converted
- break;
- case StringLiteral::Ascii:
- return (ToPointeeType->getKind() == BuiltinType::Char_U ||
- ToPointeeType->getKind() == BuiltinType::Char_S);
- case StringLiteral::Wide:
- return Context.typesAreCompatible(Context.getWideCharType(),
- QualType(ToPointeeType, 0));
+ case StringLiteralKind::UTF8:
+ case StringLiteralKind::UTF16:
+ case StringLiteralKind::UTF32:
+ // We don't allow UTF literals to be implicitly converted
+ break;
+ case StringLiteralKind::Ordinary:
+ return (ToPointeeType->getKind() == BuiltinType::Char_U ||
+ ToPointeeType->getKind() == BuiltinType::Char_S);
+ case StringLiteralKind::Wide:
+ return Context.typesAreCompatible(Context.getWideCharType(),
+ QualType(ToPointeeType, 0));
+ case StringLiteralKind::Unevaluated:
+ assert(false && "Unevaluated string literal in expression");
+ break;
}
}
}
@@ -4007,7 +4169,7 @@ static ExprResult BuildCXXCastArgument(Sema &S,
CastLoc, Ty, FoundDecl, cast<CXXConstructorDecl>(Method),
ConstructorArgs, HadMultipleCandidates,
/*ListInit*/ false, /*StdInitListInit*/ false, /*ZeroInit*/ false,
- CXXConstructExpr::CK_Complete, SourceRange());
+ CXXConstructionKind::Complete, SourceRange());
if (Result.isInvalid())
return ExprError();
@@ -4124,7 +4286,8 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
return ExprError();
case ImplicitConversionSequence::EllipsisConversion:
- llvm_unreachable("Cannot perform an ellipsis conversion");
+ case ImplicitConversionSequence::StaticObjectArgumentConversion:
+ llvm_unreachable("bad conversion");
case ImplicitConversionSequence::BadConversion:
Sema::AssignConvertType ConvTy =
@@ -4169,17 +4332,17 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
return ExprError();
return BuildCXXConstructExpr(
/*FIXME:ConstructLoc*/ SourceLocation(), ToType,
- SCS.FoundCopyConstructor, SCS.CopyConstructor,
- ConstructorArgs, /*HadMultipleCandidates*/ false,
+ SCS.FoundCopyConstructor, SCS.CopyConstructor, ConstructorArgs,
+ /*HadMultipleCandidates*/ false,
/*ListInit*/ false, /*StdInitListInit*/ false, /*ZeroInit*/ false,
- CXXConstructExpr::CK_Complete, SourceRange());
+ CXXConstructionKind::Complete, SourceRange());
}
return BuildCXXConstructExpr(
/*FIXME:ConstructLoc*/ SourceLocation(), ToType,
- SCS.FoundCopyConstructor, SCS.CopyConstructor,
- From, /*HadMultipleCandidates*/ false,
+ SCS.FoundCopyConstructor, SCS.CopyConstructor, From,
+ /*HadMultipleCandidates*/ false,
/*ListInit*/ false, /*StdInitListInit*/ false, /*ZeroInit*/ false,
- CXXConstructExpr::CK_Complete, SourceRange());
+ CXXConstructionKind::Complete, SourceRange());
}
// Resolve overloaded function references.
@@ -4193,7 +4356,17 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
if (DiagnoseUseOfDecl(Fn, From->getBeginLoc()))
return ExprError();
- From = FixOverloadedFunctionReference(From, Found, Fn);
+ ExprResult Res = FixOverloadedFunctionReference(From, Found, Fn);
+ if (Res.isInvalid())
+ return ExprError();
+
+ // We might get back another placeholder expression if we resolved to a
+ // builtin.
+ Res = CheckPlaceholderExpr(Res.get());
+ if (Res.isInvalid())
+ return ExprError();
+
+ From = Res.get();
FromType = From->getType();
}
@@ -4330,6 +4503,36 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
.get();
break;
+ case ICK_Fixed_Point_Conversion:
+ assert((FromType->isFixedPointType() || ToType->isFixedPointType()) &&
+ "Attempting implicit fixed point conversion without a fixed "
+ "point operand");
+ if (FromType->isFloatingType())
+ From = ImpCastExprToType(From, ToType, CK_FloatingToFixedPoint,
+ VK_PRValue,
+ /*BasePath=*/nullptr, CCK).get();
+ else if (ToType->isFloatingType())
+ From = ImpCastExprToType(From, ToType, CK_FixedPointToFloating,
+ VK_PRValue,
+ /*BasePath=*/nullptr, CCK).get();
+ else if (FromType->isIntegralType(Context))
+ From = ImpCastExprToType(From, ToType, CK_IntegralToFixedPoint,
+ VK_PRValue,
+ /*BasePath=*/nullptr, CCK).get();
+ else if (ToType->isIntegralType(Context))
+ From = ImpCastExprToType(From, ToType, CK_FixedPointToIntegral,
+ VK_PRValue,
+ /*BasePath=*/nullptr, CCK).get();
+ else if (ToType->isBooleanType())
+ From = ImpCastExprToType(From, ToType, CK_FixedPointToBoolean,
+ VK_PRValue,
+ /*BasePath=*/nullptr, CCK).get();
+ else
+ From = ImpCastExprToType(From, ToType, CK_FixedPointCast,
+ VK_PRValue,
+ /*BasePath=*/nullptr, CCK).get();
+ break;
+
case ICK_Compatible_Conversion:
From = ImpCastExprToType(From, ToType, CK_NoOp, From->getValueKind(),
/*BasePath=*/nullptr, CCK).get();
@@ -4453,6 +4656,7 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
break;
case ICK_SVE_Vector_Conversion:
+ case ICK_RVV_Vector_Conversion:
From = ImpCastExprToType(From, ToType, CK_BitCast, VK_PRValue,
/*BasePath=*/nullptr, CCK)
.get();
@@ -4598,6 +4802,13 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
From->getType()->getPointeeType().getAddressSpace())
CK = CK_AddressSpaceConversion;
+ if (!isCast(CCK) &&
+ !ToType->getPointeeType().getQualifiers().hasUnaligned() &&
+ From->getType()->getPointeeType().getQualifiers().hasUnaligned()) {
+ Diag(From->getBeginLoc(), diag::warn_imp_cast_drops_unaligned)
+ << InitialFromType << ToType;
+ }
+
From = ImpCastExprToType(From, ToType.getNonLValueExprType(Context), CK, VK,
/*BasePath=*/nullptr, CCK)
.get();
@@ -4679,12 +4890,16 @@ static bool CheckUnaryTypeTraitTypeCompleteness(Sema &S, TypeTrait UTT,
case UTT_IsIntegral:
case UTT_IsFloatingPoint:
case UTT_IsArray:
+ case UTT_IsBoundedArray:
case UTT_IsPointer:
+ case UTT_IsNullPointer:
+ case UTT_IsReferenceable:
case UTT_IsLvalueReference:
case UTT_IsRvalueReference:
case UTT_IsMemberFunctionPointer:
case UTT_IsMemberObjectPointer:
case UTT_IsEnum:
+ case UTT_IsScopedEnum:
case UTT_IsUnion:
case UTT_IsClass:
case UTT_IsFunction:
@@ -4705,6 +4920,7 @@ static bool CheckUnaryTypeTraitTypeCompleteness(Sema &S, TypeTrait UTT,
case UTT_IsConst:
case UTT_IsVolatile:
case UTT_IsSigned:
+ case UTT_IsUnboundedArray:
case UTT_IsUnsigned:
// This type trait always returns false, checking the type is moot.
@@ -4731,14 +4947,26 @@ static bool CheckUnaryTypeTraitTypeCompleteness(Sema &S, TypeTrait UTT,
Loc, ArgTy, diag::err_incomplete_type_used_in_type_trait_expr);
return true;
+ // LWG3823: T shall be an array type, a complete type, or cv void.
+ case UTT_IsAggregate:
+ if (ArgTy->isArrayType() || ArgTy->isVoidType())
+ return true;
+
+ return !S.RequireCompleteType(
+ Loc, ArgTy, diag::err_incomplete_type_used_in_type_trait_expr);
+
// C++1z [meta.unary.prop]:
// remove_all_extents_t<T> shall be a complete type or cv void.
- case UTT_IsAggregate:
case UTT_IsTrivial:
case UTT_IsTriviallyCopyable:
case UTT_IsStandardLayout:
case UTT_IsPOD:
case UTT_IsLiteral:
+ // By analogy, is_trivially_relocatable and is_trivially_equality_comparable
+ // impose the same constraints.
+ case UTT_IsTriviallyRelocatable:
+ case UTT_IsTriviallyEqualityComparable:
+ case UTT_CanPassInRegs:
// Per the GCC type traits documentation, T shall be a complete type, cv void,
// or an array of unknown bound. But GCC actually imposes the same constraints
// as above.
@@ -4754,7 +4982,7 @@ static bool CheckUnaryTypeTraitTypeCompleteness(Sema &S, TypeTrait UTT,
case UTT_HasTrivialDestructor:
case UTT_HasVirtualDestructor:
ArgTy = QualType(ArgTy->getBaseElementTypeUnsafe(), 0);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
// C++1z [meta.unary.prop]:
// T shall be a complete type, cv void, or an array of unknown bound.
@@ -4822,8 +5050,26 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT,
return T->isFloatingType();
case UTT_IsArray:
return T->isArrayType();
+ case UTT_IsBoundedArray:
+ if (!T->isVariableArrayType()) {
+ return T->isArrayType() && !T->isIncompleteArrayType();
+ }
+
+ Self.Diag(KeyLoc, diag::err_vla_unsupported)
+ << 1 << tok::kw___is_bounded_array;
+ return false;
+ case UTT_IsUnboundedArray:
+ if (!T->isVariableArrayType()) {
+ return T->isIncompleteArrayType();
+ }
+
+ Self.Diag(KeyLoc, diag::err_vla_unsupported)
+ << 1 << tok::kw___is_unbounded_array;
+ return false;
case UTT_IsPointer:
return T->isAnyPointerType();
+ case UTT_IsNullPointer:
+ return T->isNullPtrType();
case UTT_IsLvalueReference:
return T->isLValueReferenceType();
case UTT_IsRvalueReference:
@@ -4834,6 +5080,8 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT,
return T->isMemberDataPointerType();
case UTT_IsEnum:
return T->isEnumeralType();
+ case UTT_IsScopedEnum:
+ return T->isScopedEnumeralType();
case UTT_IsUnion:
return T->isUnionType();
case UTT_IsClass:
@@ -5203,26 +5451,43 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT,
return !T->isIncompleteType();
case UTT_HasUniqueObjectRepresentations:
return C.hasUniqueObjectRepresentations(T);
+ case UTT_IsTriviallyRelocatable:
+ return T.isTriviallyRelocatableType(C);
+ case UTT_IsReferenceable:
+ return T.isReferenceable();
+ case UTT_CanPassInRegs:
+ if (CXXRecordDecl *RD = T->getAsCXXRecordDecl(); RD && !T.hasQualifiers())
+ return RD->canPassInRegisters();
+ Self.Diag(KeyLoc, diag::err_builtin_pass_in_regs_non_class) << T;
+ return false;
+ case UTT_IsTriviallyEqualityComparable:
+ return T.isTriviallyEqualityComparableType(C);
}
}
static bool EvaluateBinaryTypeTrait(Sema &Self, TypeTrait BTT, QualType LhsT,
QualType RhsT, SourceLocation KeyLoc);
-static bool evaluateTypeTrait(Sema &S, TypeTrait Kind, SourceLocation KWLoc,
- ArrayRef<TypeSourceInfo *> Args,
- SourceLocation RParenLoc) {
+static bool EvaluateBooleanTypeTrait(Sema &S, TypeTrait Kind,
+ SourceLocation KWLoc,
+ ArrayRef<TypeSourceInfo *> Args,
+ SourceLocation RParenLoc,
+ bool IsDependent) {
+ if (IsDependent)
+ return false;
+
if (Kind <= UTT_Last)
return EvaluateUnaryTypeTrait(S, Kind, KWLoc, Args[0]->getType());
- // Evaluate BTT_ReferenceBindsToTemporary alongside the IsConstructible
- // traits to avoid duplication.
- if (Kind <= BTT_Last && Kind != BTT_ReferenceBindsToTemporary)
+ // Evaluate ReferenceBindsToTemporary and ReferenceConstructsFromTemporary
+ // alongside the IsConstructible traits to avoid duplication.
+ if (Kind <= BTT_Last && Kind != BTT_ReferenceBindsToTemporary && Kind != BTT_ReferenceConstructsFromTemporary)
return EvaluateBinaryTypeTrait(S, Kind, Args[0]->getType(),
Args[1]->getType(), RParenLoc);
switch (Kind) {
case clang::BTT_ReferenceBindsToTemporary:
+ case clang::BTT_ReferenceConstructsFromTemporary:
case clang::TT_IsConstructible:
case clang::TT_IsNothrowConstructible:
case clang::TT_IsTriviallyConstructible: {
@@ -5284,7 +5549,8 @@ static bool evaluateTypeTrait(Sema &S, TypeTrait Kind, SourceLocation KWLoc,
S, Sema::ExpressionEvaluationContext::Unevaluated);
Sema::SFINAETrap SFINAE(S, /*AccessCheckingSFINAE=*/true);
Sema::ContextRAII TUContext(S, S.Context.getTranslationUnitDecl());
- InitializedEntity To(InitializedEntity::InitializeTemporary(Args[0]));
+ InitializedEntity To(
+ InitializedEntity::InitializeTemporary(S.Context, Args[0]));
InitializationKind InitKind(InitializationKind::CreateDirect(KWLoc, KWLoc,
RParenLoc));
InitializationSequence Init(S, To, InitKind, ArgExprs);
@@ -5298,11 +5564,23 @@ static bool evaluateTypeTrait(Sema &S, TypeTrait Kind, SourceLocation KWLoc,
if (Kind == clang::TT_IsConstructible)
return true;
- if (Kind == clang::BTT_ReferenceBindsToTemporary) {
+ if (Kind == clang::BTT_ReferenceBindsToTemporary || Kind == clang::BTT_ReferenceConstructsFromTemporary) {
if (!T->isReferenceType())
return false;
- return !Init.isDirectReferenceBinding();
+ if (!Init.isDirectReferenceBinding())
+ return true;
+
+ if (Kind == clang::BTT_ReferenceBindsToTemporary)
+ return false;
+
+ QualType U = Args[1]->getType();
+ if (U->isReferenceType())
+ return false;
+
+ QualType TPtr = S.Context.getPointerType(S.BuiltinRemoveReference(T, UnaryTransformType::RemoveCVRef, {}));
+ QualType UPtr = S.Context.getPointerType(S.BuiltinRemoveReference(U, UnaryTransformType::RemoveCVRef, {}));
+ return EvaluateBinaryTypeTrait(S, TypeTrait::BTT_IsConvertibleTo, UPtr, TPtr, RParenLoc);
}
if (Kind == clang::TT_IsNothrowConstructible)
@@ -5328,15 +5606,76 @@ static bool evaluateTypeTrait(Sema &S, TypeTrait Kind, SourceLocation KWLoc,
return false;
}
+namespace {
+void DiagnoseBuiltinDeprecation(Sema& S, TypeTrait Kind,
+ SourceLocation KWLoc) {
+ TypeTrait Replacement;
+ switch (Kind) {
+ case UTT_HasNothrowAssign:
+ case UTT_HasNothrowMoveAssign:
+ Replacement = BTT_IsNothrowAssignable;
+ break;
+ case UTT_HasNothrowCopy:
+ case UTT_HasNothrowConstructor:
+ Replacement = TT_IsNothrowConstructible;
+ break;
+ case UTT_HasTrivialAssign:
+ case UTT_HasTrivialMoveAssign:
+ Replacement = BTT_IsTriviallyAssignable;
+ break;
+ case UTT_HasTrivialCopy:
+ Replacement = UTT_IsTriviallyCopyable;
+ break;
+ case UTT_HasTrivialDefaultConstructor:
+ case UTT_HasTrivialMoveConstructor:
+ Replacement = TT_IsTriviallyConstructible;
+ break;
+ case UTT_HasTrivialDestructor:
+ Replacement = UTT_IsTriviallyDestructible;
+ break;
+ default:
+ return;
+ }
+ S.Diag(KWLoc, diag::warn_deprecated_builtin)
+ << getTraitSpelling(Kind) << getTraitSpelling(Replacement);
+}
+}
+
+bool Sema::CheckTypeTraitArity(unsigned Arity, SourceLocation Loc, size_t N) {
+ if (Arity && N != Arity) {
+ Diag(Loc, diag::err_type_trait_arity)
+ << Arity << 0 << (Arity > 1) << (int)N << SourceRange(Loc);
+ return false;
+ }
+
+ if (!Arity && N == 0) {
+ Diag(Loc, diag::err_type_trait_arity)
+ << 1 << 1 << 1 << (int)N << SourceRange(Loc);
+ return false;
+ }
+ return true;
+}
+
+enum class TypeTraitReturnType {
+ Bool,
+};
+
+static TypeTraitReturnType GetReturnType(TypeTrait Kind) {
+ return TypeTraitReturnType::Bool;
+}
+
ExprResult Sema::BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc) {
- QualType ResultType = Context.getLogicalOperationType();
+ if (!CheckTypeTraitArity(getTypeTraitArity(Kind), KWLoc, Args.size()))
+ return ExprError();
if (Kind <= UTT_Last && !CheckUnaryTypeTraitTypeCompleteness(
*this, Kind, KWLoc, Args[0]->getType()))
return ExprError();
+ DiagnoseBuiltinDeprecation(*this, Kind, KWLoc);
+
bool Dependent = false;
for (unsigned I = 0, N = Args.size(); I != N; ++I) {
if (Args[I]->getType()->isDependentType()) {
@@ -5345,12 +5684,15 @@ ExprResult Sema::BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
}
}
- bool Result = false;
- if (!Dependent)
- Result = evaluateTypeTrait(*this, Kind, KWLoc, Args, RParenLoc);
-
- return TypeTraitExpr::Create(Context, ResultType, KWLoc, Kind, Args,
- RParenLoc, Result);
+ switch (GetReturnType(Kind)) {
+ case TypeTraitReturnType::Bool: {
+ bool Result = EvaluateBooleanTypeTrait(*this, Kind, KWLoc, Args, RParenLoc,
+ Dependent);
+ return TypeTraitExpr::Create(Context, Context.getLogicalOperationType(),
+ KWLoc, Kind, Args, RParenLoc, Result);
+ }
+ }
+ llvm_unreachable("unhandled type trait return type");
}
ExprResult Sema::ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
@@ -5696,7 +6038,7 @@ ExprResult Sema::BuildExpressionTrait(ExpressionTrait ET,
SourceLocation RParen) {
if (Queried->isTypeDependent()) {
// Delay type-checking for type-dependent expressions.
- } else if (Queried->getType()->isPlaceholderType()) {
+ } else if (Queried->hasPlaceholderType()) {
ExprResult PE = CheckPlaceholderExpr(Queried);
if (PE.isInvalid()) return ExprError();
return BuildExpressionTrait(ET, KWLoc, PE.get(), RParen);
@@ -5712,8 +6054,7 @@ QualType Sema::CheckPointerToMemberOperands(ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK,
SourceLocation Loc,
bool isIndirect) {
- assert(!LHS.get()->getType()->isPlaceholderType() &&
- !RHS.get()->getType()->isPlaceholderType() &&
+ assert(!LHS.get()->hasPlaceholderType() && !RHS.get()->hasPlaceholderType() &&
"placeholders should have been weeded out by now");
// The LHS undergoes lvalue conversions if this is ->*, and undergoes the
@@ -5890,10 +6231,8 @@ static bool TryClassUnification(Sema &Self, Expr *From, Expr *To,
// -- If E2 is an xvalue: E1 can be converted to match E2 if E1 can be
// implicitly converted to the type "rvalue reference to R2", subject to
// the constraint that the reference must bind directly.
- if (To->isLValue() || To->isXValue()) {
- QualType T = To->isLValue() ? Self.Context.getLValueReferenceType(ToType)
- : Self.Context.getRValueReferenceType(ToType);
-
+ if (To->isGLValue()) {
+ QualType T = Self.Context.getReferenceQualifiedType(To);
InitializedEntity Entity = InitializedEntity::InitializeTemporary(T);
InitializationSequence InitSeq(Self, Entity, Kind, From);
@@ -6043,8 +6382,17 @@ static bool isValidVectorForConditionalCondition(ASTContext &Ctx,
return false;
const QualType EltTy =
cast<VectorType>(CondTy.getCanonicalType())->getElementType();
- assert(!EltTy->isBooleanType() && !EltTy->isEnumeralType() &&
- "Vectors cant be boolean or enum types");
+ assert(!EltTy->isEnumeralType() && "Vectors cant be enum types");
+ return EltTy->isIntegralType(Ctx);
+}
+
+static bool isValidSizelessVectorForConditionalCondition(ASTContext &Ctx,
+ QualType CondTy) {
+ if (!CondTy->isSveVLSBuiltinType())
+ return false;
+ const QualType EltTy =
+ cast<BuiltinType>(CondTy.getCanonicalType())->getSveEltType(Ctx);
+ assert(!EltTy->isEnumeralType() && "Vectors cant be enum types");
return EltTy->isIntegralType(Ctx);
}
@@ -6079,24 +6427,24 @@ QualType Sema::CheckVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS,
<< LHSType << RHSType;
return {};
}
- ResultType = LHSType;
+ ResultType = Context.getCommonSugaredType(LHSType, RHSType);
} else if (LHSVT || RHSVT) {
ResultType = CheckVectorOperands(
LHS, RHS, QuestionLoc, /*isCompAssign*/ false, /*AllowBothBool*/ true,
- /*AllowBoolConversions*/ false);
+ /*AllowBoolConversions*/ false,
+ /*AllowBoolOperation*/ true,
+ /*ReportInvalid*/ true);
if (ResultType.isNull())
return {};
} else {
// Both are scalar.
- QualType ResultElementTy;
- LHSType = LHSType.getCanonicalType().getUnqualifiedType();
- RHSType = RHSType.getCanonicalType().getUnqualifiedType();
-
- if (Context.hasSameType(LHSType, RHSType))
- ResultElementTy = LHSType;
- else
- ResultElementTy =
- UsualArithmeticConversions(LHS, RHS, QuestionLoc, ACK_Conditional);
+ LHSType = LHSType.getUnqualifiedType();
+ RHSType = RHSType.getUnqualifiedType();
+ QualType ResultElementTy =
+ Context.hasSameType(LHSType, RHSType)
+ ? Context.getCommonSugaredType(LHSType, RHSType)
+ : UsualArithmeticConversions(LHS, RHS, QuestionLoc,
+ ACK_Conditional);
if (ResultElementTy->isEnumeralType()) {
Diag(QuestionLoc, diag::err_conditional_vector_operand_type)
@@ -6108,7 +6456,7 @@ QualType Sema::CheckVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS,
Context.getExtVectorType(ResultElementTy, CondVT->getNumElements());
else
ResultType = Context.getVectorType(
- ResultElementTy, CondVT->getNumElements(), VectorType::GenericVector);
+ ResultElementTy, CondVT->getNumElements(), VectorKind::Generic);
LHS = ImpCastExprToType(LHS.get(), ResultType, CK_VectorSplat);
RHS = ImpCastExprToType(RHS.get(), ResultType, CK_VectorSplat);
@@ -6137,6 +6485,89 @@ QualType Sema::CheckVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS,
return ResultType;
}
+QualType Sema::CheckSizelessVectorConditionalTypes(ExprResult &Cond,
+ ExprResult &LHS,
+ ExprResult &RHS,
+ SourceLocation QuestionLoc) {
+ LHS = DefaultFunctionArrayLvalueConversion(LHS.get());
+ RHS = DefaultFunctionArrayLvalueConversion(RHS.get());
+
+ QualType CondType = Cond.get()->getType();
+ const auto *CondBT = CondType->castAs<BuiltinType>();
+ QualType CondElementTy = CondBT->getSveEltType(Context);
+ llvm::ElementCount CondElementCount =
+ Context.getBuiltinVectorTypeInfo(CondBT).EC;
+
+ QualType LHSType = LHS.get()->getType();
+ const auto *LHSBT =
+ LHSType->isSveVLSBuiltinType() ? LHSType->getAs<BuiltinType>() : nullptr;
+ QualType RHSType = RHS.get()->getType();
+ const auto *RHSBT =
+ RHSType->isSveVLSBuiltinType() ? RHSType->getAs<BuiltinType>() : nullptr;
+
+ QualType ResultType;
+
+ if (LHSBT && RHSBT) {
+ // If both are sizeless vector types, they must be the same type.
+ if (!Context.hasSameType(LHSType, RHSType)) {
+ Diag(QuestionLoc, diag::err_conditional_vector_mismatched)
+ << LHSType << RHSType;
+ return QualType();
+ }
+ ResultType = LHSType;
+ } else if (LHSBT || RHSBT) {
+ ResultType = CheckSizelessVectorOperands(
+ LHS, RHS, QuestionLoc, /*IsCompAssign*/ false, ACK_Conditional);
+ if (ResultType.isNull())
+ return QualType();
+ } else {
+ // Both are scalar so splat
+ QualType ResultElementTy;
+ LHSType = LHSType.getCanonicalType().getUnqualifiedType();
+ RHSType = RHSType.getCanonicalType().getUnqualifiedType();
+
+ if (Context.hasSameType(LHSType, RHSType))
+ ResultElementTy = LHSType;
+ else
+ ResultElementTy =
+ UsualArithmeticConversions(LHS, RHS, QuestionLoc, ACK_Conditional);
+
+ if (ResultElementTy->isEnumeralType()) {
+ Diag(QuestionLoc, diag::err_conditional_vector_operand_type)
+ << ResultElementTy;
+ return QualType();
+ }
+
+ ResultType = Context.getScalableVectorType(
+ ResultElementTy, CondElementCount.getKnownMinValue());
+
+ LHS = ImpCastExprToType(LHS.get(), ResultType, CK_VectorSplat);
+ RHS = ImpCastExprToType(RHS.get(), ResultType, CK_VectorSplat);
+ }
+
+ assert(!ResultType.isNull() && ResultType->isSveVLSBuiltinType() &&
+ "Result should have been a vector type");
+ auto *ResultBuiltinTy = ResultType->castAs<BuiltinType>();
+ QualType ResultElementTy = ResultBuiltinTy->getSveEltType(Context);
+ llvm::ElementCount ResultElementCount =
+ Context.getBuiltinVectorTypeInfo(ResultBuiltinTy).EC;
+
+ if (ResultElementCount != CondElementCount) {
+ Diag(QuestionLoc, diag::err_conditional_vector_size)
+ << CondType << ResultType;
+ return QualType();
+ }
+
+ if (Context.getTypeSize(ResultElementTy) !=
+ Context.getTypeSize(CondElementTy)) {
+ Diag(QuestionLoc, diag::err_conditional_vector_element_size)
+ << CondType << ResultType;
+ return QualType();
+ }
+
+ return ResultType;
+}
+
/// Check the operands of ?: under C++ semantics.
///
/// See C++ [expr.cond]. Note that LHS is never null, even for the GNU x ?: y
@@ -6170,10 +6601,14 @@ QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
bool IsVectorConditional =
isValidVectorForConditionalCondition(Context, Cond.get()->getType());
+ bool IsSizelessVectorConditional =
+ isValidSizelessVectorForConditionalCondition(Context,
+ Cond.get()->getType());
+
// C++11 [expr.cond]p1
// The first expression is contextually converted to bool.
if (!Cond.get()->isTypeDependent()) {
- ExprResult CondRes = IsVectorConditional
+ ExprResult CondRes = IsVectorConditional || IsSizelessVectorConditional
? DefaultFunctionArrayLvalueConversion(Cond.get())
: CheckCXXBooleanCondition(Cond.get());
if (CondRes.isInvalid())
@@ -6229,7 +6664,7 @@ QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
// -- Both the second and third operands have type void; the result is of
// type void and is a prvalue.
if (LVoid && RVoid)
- return Context.VoidTy;
+ return Context.getCommonSugaredType(LTy, RTy);
// Neither holds, error.
Diag(QuestionLoc, diag::err_conditional_void_nonvoid)
@@ -6242,6 +6677,16 @@ QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
if (IsVectorConditional)
return CheckVectorConditionalTypes(Cond, LHS, RHS, QuestionLoc);
+ if (IsSizelessVectorConditional)
+ return CheckSizelessVectorConditionalTypes(Cond, LHS, RHS, QuestionLoc);
+
+ // WebAssembly tables are not allowed as conditional LHS or RHS.
+ if (LTy->isWebAssemblyTableType() || RTy->isWebAssemblyTableType()) {
+ Diag(QuestionLoc, diag::err_wasm_table_conditional_expression)
+ << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
+ return QualType();
+ }
+
// C++11 [expr.cond]p3
// Otherwise, if the second and third operand have different types, and
// either has (cv) class type [...] an attempt is made to convert each of
@@ -6332,21 +6777,7 @@ QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
if (LHS.get()->getObjectKind() == OK_BitField ||
RHS.get()->getObjectKind() == OK_BitField)
OK = OK_BitField;
-
- // If we have function pointer types, unify them anyway to unify their
- // exception specifications, if any.
- if (LTy->isFunctionPointerType() || LTy->isMemberFunctionPointerType()) {
- Qualifiers Qs = LTy.getQualifiers();
- LTy = FindCompositePointerType(QuestionLoc, LHS, RHS,
- /*ConvertArgs*/false);
- LTy = Context.getQualifiedType(LTy, Qs);
-
- assert(!LTy.isNull() && "failed to find composite pointer type for "
- "canonically equivalent function ptr types");
- assert(Context.hasSameType(LTy, RTy) && "bad composite pointer type");
- }
-
- return LTy;
+ return Context.getCommonSugaredType(LTy, RTy);
}
// C++11 [expr.cond]p5
@@ -6376,43 +6807,32 @@ QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
// is a prvalue temporary of the result type, which is
// copy-initialized from either the second operand or the third
// operand depending on the value of the first operand.
- if (Context.getCanonicalType(LTy) == Context.getCanonicalType(RTy)) {
+ if (Context.hasSameType(LTy, RTy)) {
if (LTy->isRecordType()) {
// The operands have class type. Make a temporary copy.
- InitializedEntity Entity = InitializedEntity::InitializeTemporary(LTy);
-
- ExprResult LHSCopy = PerformCopyInitialization(Entity,
- SourceLocation(),
- LHS);
+ ExprResult LHSCopy = PerformCopyInitialization(
+ InitializedEntity::InitializeTemporary(LTy), SourceLocation(), LHS);
if (LHSCopy.isInvalid())
return QualType();
- ExprResult RHSCopy = PerformCopyInitialization(Entity,
- SourceLocation(),
- RHS);
+ ExprResult RHSCopy = PerformCopyInitialization(
+ InitializedEntity::InitializeTemporary(RTy), SourceLocation(), RHS);
if (RHSCopy.isInvalid())
return QualType();
LHS = LHSCopy;
RHS = RHSCopy;
}
-
- // If we have function pointer types, unify them anyway to unify their
- // exception specifications, if any.
- if (LTy->isFunctionPointerType() || LTy->isMemberFunctionPointerType()) {
- LTy = FindCompositePointerType(QuestionLoc, LHS, RHS);
- assert(!LTy.isNull() && "failed to find composite pointer type for "
- "canonically equivalent function ptr types");
- }
-
- return LTy;
+ return Context.getCommonSugaredType(LTy, RTy);
}
// Extension: conditional operator involving vector types.
if (LTy->isVectorType() || RTy->isVectorType())
- return CheckVectorOperands(LHS, RHS, QuestionLoc, /*isCompAssign*/false,
- /*AllowBothBool*/true,
- /*AllowBoolConversions*/false);
+ return CheckVectorOperands(LHS, RHS, QuestionLoc, /*isCompAssign*/ false,
+ /*AllowBothBool*/ true,
+ /*AllowBoolConversions*/ false,
+ /*AllowBoolOperation*/ false,
+ /*ReportInvalid*/ true);
// -- The second and third operands have arithmetic or enumeration type;
// the usual arithmetic conversions are performed to bring them to a
@@ -6468,79 +6888,6 @@ QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
return QualType();
}
-static FunctionProtoType::ExceptionSpecInfo
-mergeExceptionSpecs(Sema &S, FunctionProtoType::ExceptionSpecInfo ESI1,
- FunctionProtoType::ExceptionSpecInfo ESI2,
- SmallVectorImpl<QualType> &ExceptionTypeStorage) {
- ExceptionSpecificationType EST1 = ESI1.Type;
- ExceptionSpecificationType EST2 = ESI2.Type;
-
- // If either of them can throw anything, that is the result.
- if (EST1 == EST_None) return ESI1;
- if (EST2 == EST_None) return ESI2;
- if (EST1 == EST_MSAny) return ESI1;
- if (EST2 == EST_MSAny) return ESI2;
- if (EST1 == EST_NoexceptFalse) return ESI1;
- if (EST2 == EST_NoexceptFalse) return ESI2;
-
- // If either of them is non-throwing, the result is the other.
- if (EST1 == EST_NoThrow) return ESI2;
- if (EST2 == EST_NoThrow) return ESI1;
- if (EST1 == EST_DynamicNone) return ESI2;
- if (EST2 == EST_DynamicNone) return ESI1;
- if (EST1 == EST_BasicNoexcept) return ESI2;
- if (EST2 == EST_BasicNoexcept) return ESI1;
- if (EST1 == EST_NoexceptTrue) return ESI2;
- if (EST2 == EST_NoexceptTrue) return ESI1;
-
- // If we're left with value-dependent computed noexcept expressions, we're
- // stuck. Before C++17, we can just drop the exception specification entirely,
- // since it's not actually part of the canonical type. And this should never
- // happen in C++17, because it would mean we were computing the composite
- // pointer type of dependent types, which should never happen.
- if (EST1 == EST_DependentNoexcept || EST2 == EST_DependentNoexcept) {
- assert(!S.getLangOpts().CPlusPlus17 &&
- "computing composite pointer type of dependent types");
- return FunctionProtoType::ExceptionSpecInfo();
- }
-
- // Switch over the possibilities so that people adding new values know to
- // update this function.
- switch (EST1) {
- case EST_None:
- case EST_DynamicNone:
- case EST_MSAny:
- case EST_BasicNoexcept:
- case EST_DependentNoexcept:
- case EST_NoexceptFalse:
- case EST_NoexceptTrue:
- case EST_NoThrow:
- llvm_unreachable("handled above");
-
- case EST_Dynamic: {
- // This is the fun case: both exception specifications are dynamic. Form
- // the union of the two lists.
- assert(EST2 == EST_Dynamic && "other cases should already be handled");
- llvm::SmallPtrSet<QualType, 8> Found;
- for (auto &Exceptions : {ESI1.Exceptions, ESI2.Exceptions})
- for (QualType E : Exceptions)
- if (Found.insert(S.Context.getCanonicalType(E)).second)
- ExceptionTypeStorage.push_back(E);
-
- FunctionProtoType::ExceptionSpecInfo Result(EST_Dynamic);
- Result.Exceptions = ExceptionTypeStorage;
- return Result;
- }
-
- case EST_Unevaluated:
- case EST_Uninstantiated:
- case EST_Unparsed:
- llvm_unreachable("shouldn't see unresolved exception specifications here");
- }
-
- llvm_unreachable("invalid ExceptionSpecificationType");
-}
-
/// Find a merged pointer type and convert the two expressions to it.
///
/// This finds the composite pointer type for \p E1 and \p E2 according to
@@ -6608,7 +6955,7 @@ QualType Sema::FindCompositePointerType(SourceLocation Loc,
const Type *ClassOrBound;
Step(Kind K, const Type *ClassOrBound = nullptr)
- : K(K), Quals(), ClassOrBound(ClassOrBound) {}
+ : K(K), ClassOrBound(ClassOrBound) {}
QualType rebuild(ASTContext &Ctx, QualType T) const {
T = Ctx.getQualifiedType(T, Quals);
switch (K) {
@@ -6621,9 +6968,9 @@ QualType Sema::FindCompositePointerType(SourceLocation Loc,
case Array:
if (auto *CAT = cast_or_null<ConstantArrayType>(ClassOrBound))
return Ctx.getConstantArrayType(T, CAT->getSize(), nullptr,
- ArrayType::Normal, 0);
+ ArraySizeModifier::Normal, 0);
else
- return Ctx.getIncompleteArrayType(T, ArrayType::Normal, 0);
+ return Ctx.getIncompleteArrayType(T, ArraySizeModifier::Normal, 0);
}
llvm_unreachable("unknown step kind");
}
@@ -6669,8 +7016,15 @@ QualType Sema::FindCompositePointerType(SourceLocation Loc,
} else if (Steps.size() == 1) {
bool MaybeQ1 = Q1.isAddressSpaceSupersetOf(Q2);
bool MaybeQ2 = Q2.isAddressSpaceSupersetOf(Q1);
- if (MaybeQ1 == MaybeQ2)
- return QualType(); // No unique best address space.
+ if (MaybeQ1 == MaybeQ2) {
+ // Exception for ptr size address spaces. Should be able to choose
+ // either address space during comparison.
+ if (isPtrSizeAddressSpace(Q1.getAddressSpace()) ||
+ isPtrSizeAddressSpace(Q2.getAddressSpace()))
+ MaybeQ1 = true;
+ else
+ return QualType(); // No unique best address space.
+ }
Quals.setAddressSpace(MaybeQ1 ? Q1.getAddressSpace()
: Q2.getAddressSpace());
} else {
@@ -6699,6 +7053,36 @@ QualType Sema::FindCompositePointerType(SourceLocation Loc,
}
// FIXME: Can we unify the following with UnwrapSimilarTypes?
+
+ const ArrayType *Arr1, *Arr2;
+ if ((Arr1 = Context.getAsArrayType(Composite1)) &&
+ (Arr2 = Context.getAsArrayType(Composite2))) {
+ auto *CAT1 = dyn_cast<ConstantArrayType>(Arr1);
+ auto *CAT2 = dyn_cast<ConstantArrayType>(Arr2);
+ if (CAT1 && CAT2 && CAT1->getSize() == CAT2->getSize()) {
+ Composite1 = Arr1->getElementType();
+ Composite2 = Arr2->getElementType();
+ Steps.emplace_back(Step::Array, CAT1);
+ continue;
+ }
+ bool IAT1 = isa<IncompleteArrayType>(Arr1);
+ bool IAT2 = isa<IncompleteArrayType>(Arr2);
+ if ((IAT1 && IAT2) ||
+ (getLangOpts().CPlusPlus20 && (IAT1 != IAT2) &&
+ ((bool)CAT1 != (bool)CAT2) &&
+ (Steps.empty() || Steps.back().K != Step::Array))) {
+ // In C++20 onwards, we can unify an array of N T with an array of
+ // a different or unknown bound. But we can't form an array whose
+ // element type is an array of unknown bound by doing so.
+ Composite1 = Arr1->getElementType();
+ Composite2 = Arr2->getElementType();
+ Steps.emplace_back(Step::Array);
+ if (CAT1 || CAT2)
+ NeedConstBefore = Steps.size();
+ continue;
+ }
+ }
+
const PointerType *Ptr1, *Ptr2;
if ((Ptr1 = Composite1->getAs<PointerType>()) &&
(Ptr2 = Composite2->getAs<PointerType>())) {
@@ -6759,8 +7143,6 @@ QualType Sema::FindCompositePointerType(SourceLocation Loc,
continue;
}
- // FIXME: arrays
-
// FIXME: block pointer types?
// Cannot unwrap any more types.
@@ -6809,9 +7191,9 @@ QualType Sema::FindCompositePointerType(SourceLocation Loc,
// The result is nothrow if both operands are.
SmallVector<QualType, 8> ExceptionTypeStorage;
- EPI1.ExceptionSpec = EPI2.ExceptionSpec =
- mergeExceptionSpecs(*this, EPI1.ExceptionSpec, EPI2.ExceptionSpec,
- ExceptionTypeStorage);
+ EPI1.ExceptionSpec = EPI2.ExceptionSpec = Context.mergeExceptionSpecs(
+ EPI1.ExceptionSpec, EPI2.ExceptionSpec, ExceptionTypeStorage,
+ getLangOpts().CPlusPlus17);
Composite1 = Context.getFunctionType(FPT1->getReturnType(),
FPT1->getParamTypes(), EPI1);
@@ -6855,7 +7237,7 @@ QualType Sema::FindCompositePointerType(SourceLocation Loc,
Steps[I].Quals.addConst();
// Rebuild the composite type.
- QualType Composite = Composite1;
+ QualType Composite = Context.getCommonSugaredType(Composite1, Composite2);
for (auto &S : llvm::reverse(Steps))
Composite = S.rebuild(Context, Composite);
@@ -7072,8 +7454,8 @@ Expr *Sema::MaybeCreateExprWithCleanups(Expr *SubExpr) {
if (!Cleanup.exprNeedsCleanups())
return SubExpr;
- auto Cleanups = llvm::makeArrayRef(ExprCleanupObjects.begin() + FirstCleanup,
- ExprCleanupObjects.size() - FirstCleanup);
+ auto Cleanups = llvm::ArrayRef(ExprCleanupObjects.begin() + FirstCleanup,
+ ExprCleanupObjects.size() - FirstCleanup);
auto *E = ExprWithCleanups::Create(
Context, SubExpr, Cleanup.cleanupsHaveSideEffects(), Cleanups);
@@ -7094,8 +7476,9 @@ Stmt *Sema::MaybeCreateStmtWithCleanups(Stmt *SubStmt) {
// a StmtExpr; currently this is only used for asm statements.
// This is hacky, either create a new CXXStmtWithTemporaries statement or
// a new AsmStmtWithTemporaries.
- CompoundStmt *CompStmt = CompoundStmt::Create(
- Context, SubStmt, SourceLocation(), SourceLocation());
+ CompoundStmt *CompStmt =
+ CompoundStmt::Create(Context, SubStmt, FPOptionsOverride(),
+ SourceLocation(), SourceLocation());
Expr *E = new (Context)
StmtExpr(CompStmt, Context.VoidTy, SourceLocation(), SourceLocation(),
/*FIXME TemplateDepth=*/0);
@@ -7144,7 +7527,7 @@ ExprResult Sema::ActOnDecltypeExpression(Expr *E) {
return BinaryOperator::Create(Context, BO->getLHS(), RHS.get(), BO_Comma,
BO->getType(), BO->getValueKind(),
BO->getObjectKind(), BO->getOperatorLoc(),
- BO->getFPFeatures(getLangOpts()));
+ BO->getFPFeatures());
}
}
@@ -7369,8 +7752,10 @@ ExprResult Sema::ActOnStartCXXMemberReference(Scope *S, Expr *Base,
// the member function body.
if (!BaseType->isDependentType() &&
!isThisOutsideMemberFunctionBody(BaseType) &&
- RequireCompleteType(OpLoc, BaseType, diag::err_incomplete_member_access))
- return ExprError();
+ RequireCompleteType(OpLoc, BaseType,
+ diag::err_incomplete_member_access)) {
+ return CreateRecoveryExpr(Base->getBeginLoc(), Base->getEndLoc(), {Base});
+ }
// C++ [basic.lookup.classref]p2:
// If the id-expression in a class member access (5.2.5) is an
@@ -7474,8 +7859,8 @@ ExprResult Sema::BuildPseudoDestructorExpr(Expr *Base,
// designated by the pseudo-destructor-name shall be the same type.
if (DestructedTypeInfo) {
QualType DestructedType = DestructedTypeInfo->getType();
- SourceLocation DestructedTypeStart
- = DestructedTypeInfo->getTypeLoc().getLocalSourceRange().getBegin();
+ SourceLocation DestructedTypeStart =
+ DestructedTypeInfo->getTypeLoc().getBeginLoc();
if (!DestructedType->isDependentType() && !ObjectType->isDependentType()) {
if (!Context.hasSameUnqualifiedType(DestructedType, ObjectType)) {
// Detect dot pseudo destructor calls on pointer objects, e.g.:
@@ -7500,7 +7885,7 @@ ExprResult Sema::BuildPseudoDestructorExpr(Expr *Base,
} else {
Diag(DestructedTypeStart, diag::err_pseudo_dtor_type_mismatch)
<< ObjectType << DestructedType << Base->getSourceRange()
- << DestructedTypeInfo->getTypeLoc().getLocalSourceRange();
+ << DestructedTypeInfo->getTypeLoc().getSourceRange();
// Recover by setting the destructed type to the object type.
DestructedType = ObjectType;
@@ -7516,8 +7901,8 @@ ExprResult Sema::BuildPseudoDestructorExpr(Expr *Base,
// type.
} else {
Diag(DestructedTypeStart, diag::err_arc_pseudo_dtor_inconstant_quals)
- << ObjectType << DestructedType << Base->getSourceRange()
- << DestructedTypeInfo->getTypeLoc().getLocalSourceRange();
+ << ObjectType << DestructedType << Base->getSourceRange()
+ << DestructedTypeInfo->getTypeLoc().getSourceRange();
}
// Recover by setting the destructed type to the object type.
@@ -7541,10 +7926,10 @@ ExprResult Sema::BuildPseudoDestructorExpr(Expr *Base,
if (!ScopeType->isDependentType() && !ObjectType->isDependentType() &&
!Context.hasSameUnqualifiedType(ScopeType, ObjectType)) {
- Diag(ScopeTypeInfo->getTypeLoc().getLocalSourceRange().getBegin(),
+ Diag(ScopeTypeInfo->getTypeLoc().getSourceRange().getBegin(),
diag::err_pseudo_dtor_type_mismatch)
- << ObjectType << ScopeType << Base->getSourceRange()
- << ScopeTypeInfo->getTypeLoc().getLocalSourceRange();
+ << ObjectType << ScopeType << Base->getSourceRange()
+ << ScopeTypeInfo->getTypeLoc().getSourceRange();
ScopeType = QualType();
ScopeTypeInfo = nullptr;
@@ -7722,12 +8107,12 @@ ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
return true;
}
- QualType T = BuildDecltypeType(DS.getRepAsExpr(), DS.getTypeSpecTypeLoc(),
- false);
+ QualType T = BuildDecltypeType(DS.getRepAsExpr(), /*AsUnevaluated=*/false);
TypeLocBuilder TLB;
DecltypeTypeLoc DecltypeTL = TLB.push<DecltypeTypeLoc>(T);
- DecltypeTL.setNameLoc(DS.getTypeSpecTypeLoc());
+ DecltypeTL.setDecltypeLoc(DS.getTypeSpecTypeLoc());
+ DecltypeTL.setRParenLoc(DS.getTypeofParensRange().getEnd());
TypeSourceInfo *DestructedTypeInfo = TLB.getTypeSourceInfo(Context, T);
PseudoDestructorTypeStorage Destructed(DestructedTypeInfo);
@@ -7736,68 +8121,6 @@ ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
Destructed);
}
-ExprResult Sema::BuildCXXMemberCallExpr(Expr *E, NamedDecl *FoundDecl,
- CXXConversionDecl *Method,
- bool HadMultipleCandidates) {
- // Convert the expression to match the conversion function's implicit object
- // parameter.
- ExprResult Exp = PerformObjectArgumentInitialization(E, /*Qualifier=*/nullptr,
- FoundDecl, Method);
- if (Exp.isInvalid())
- return true;
-
- if (Method->getParent()->isLambda() &&
- Method->getConversionType()->isBlockPointerType()) {
- // This is a lambda conversion to block pointer; check if the argument
- // was a LambdaExpr.
- Expr *SubE = E;
- CastExpr *CE = dyn_cast<CastExpr>(SubE);
- if (CE && CE->getCastKind() == CK_NoOp)
- SubE = CE->getSubExpr();
- SubE = SubE->IgnoreParens();
- if (CXXBindTemporaryExpr *BE = dyn_cast<CXXBindTemporaryExpr>(SubE))
- SubE = BE->getSubExpr();
- if (isa<LambdaExpr>(SubE)) {
- // For the conversion to block pointer on a lambda expression, we
- // construct a special BlockLiteral instead; this doesn't really make
- // a difference in ARC, but outside of ARC the resulting block literal
- // follows the normal lifetime rules for block literals instead of being
- // autoreleased.
- PushExpressionEvaluationContext(
- ExpressionEvaluationContext::PotentiallyEvaluated);
- ExprResult BlockExp = BuildBlockForLambdaConversion(
- Exp.get()->getExprLoc(), Exp.get()->getExprLoc(), Method, Exp.get());
- PopExpressionEvaluationContext();
-
- // FIXME: This note should be produced by a CodeSynthesisContext.
- if (BlockExp.isInvalid())
- Diag(Exp.get()->getExprLoc(), diag::note_lambda_to_block_conv);
- return BlockExp;
- }
- }
-
- MemberExpr *ME =
- BuildMemberExpr(Exp.get(), /*IsArrow=*/false, SourceLocation(),
- NestedNameSpecifierLoc(), SourceLocation(), Method,
- DeclAccessPair::make(FoundDecl, FoundDecl->getAccess()),
- HadMultipleCandidates, DeclarationNameInfo(),
- Context.BoundMemberTy, VK_PRValue, OK_Ordinary);
-
- QualType ResultType = Method->getReturnType();
- ExprValueKind VK = Expr::getValueKindForType(ResultType);
- ResultType = ResultType.getNonLValueExprType(Context);
-
- CXXMemberCallExpr *CE = CXXMemberCallExpr::Create(
- Context, ME, /*Args=*/{}, ResultType, VK, Exp.get()->getEndLoc(),
- CurFPFeatureOverrides());
-
- if (CheckFunctionCall(Method, CE,
- Method->getType()->castAs<FunctionProtoType>()))
- return ExprError();
-
- return CheckForImmediateInvocation(CE, CE->getMethodDecl());
-}
-
ExprResult Sema::BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen) {
// If the operand is an unresolved lookup expression, the expression is ill-
@@ -7833,6 +8156,8 @@ ExprResult Sema::ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation,
static void MaybeDecrementCount(
Expr *E, llvm::DenseMap<const VarDecl *, int> &RefsMinusAssignments) {
DeclRefExpr *LHS = nullptr;
+ bool IsCompoundAssign = false;
+ bool isIncrementDecrementUnaryOp = false;
if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
if (BO->getLHS()->getType()->isDependentType() ||
BO->getRHS()->getType()->isDependentType()) {
@@ -7840,17 +8165,30 @@ static void MaybeDecrementCount(
return;
} else if (!BO->isAssignmentOp())
return;
+ else
+ IsCompoundAssign = BO->isCompoundAssignmentOp();
LHS = dyn_cast<DeclRefExpr>(BO->getLHS());
} else if (CXXOperatorCallExpr *COCE = dyn_cast<CXXOperatorCallExpr>(E)) {
if (COCE->getOperator() != OO_Equal)
return;
LHS = dyn_cast<DeclRefExpr>(COCE->getArg(0));
+ } else if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
+ if (!UO->isIncrementDecrementOp())
+ return;
+ isIncrementDecrementUnaryOp = true;
+ LHS = dyn_cast<DeclRefExpr>(UO->getSubExpr());
}
if (!LHS)
return;
VarDecl *VD = dyn_cast<VarDecl>(LHS->getDecl());
if (!VD)
return;
+ // Don't decrement RefsMinusAssignments if volatile variable with compound
+ // assignment (+=, ...) or increment/decrement unary operator to avoid
+ // potential unused-but-set-variable warning.
+ if ((IsCompoundAssign || isIncrementDecrementUnaryOp) &&
+ VD->getType().isVolatileQualified())
+ return;
auto iter = RefsMinusAssignments.find(VD);
if (iter == RefsMinusAssignments.end())
return;
@@ -7957,12 +8295,12 @@ static inline bool VariableCanNeverBeAConstantExpression(VarDecl *Var,
const VarDecl *DefVD = nullptr;
// If there is no initializer - this can not be a constant expression.
- if (!Var->getAnyInitializer(DefVD)) return true;
+ const Expr *Init = Var->getAnyInitializer(DefVD);
+ if (!Init)
+ return true;
assert(DefVD);
- if (DefVD->isWeak()) return false;
- EvaluatedStmt *Eval = DefVD->ensureEvaluatedStmt();
-
- Expr *Init = cast<Expr>(Eval->Value);
+ if (DefVD->isWeak())
+ return false;
if (Var->getType()->isDependentType() || Init->isValueDependent()) {
// FIXME: Teach the constant evaluator to deal with the non-dependent parts
@@ -8000,7 +8338,7 @@ static void CheckIfAnyEnclosingLambdasMustCaptureAnyPotentialCaptures(
// All the potentially captureable variables in the current nested
// lambda (within a generic outer lambda), must be captured by an
// outer lambda that is enclosed within a non-dependent context.
- CurrentLSI->visitPotentialCaptures([&] (VarDecl *Var, Expr *VarExpr) {
+ CurrentLSI->visitPotentialCaptures([&](ValueDecl *Var, Expr *VarExpr) {
// If the variable is clearly identified as non-odr-used and the full
// expression is not instantiation dependent, only then do we not
// need to check enclosing lambda's for speculative captures.
@@ -8016,15 +8354,18 @@ static void CheckIfAnyEnclosingLambdasMustCaptureAnyPotentialCaptures(
!IsFullExprInstantiationDependent)
return;
+ VarDecl *UnderlyingVar = Var->getPotentiallyDecomposedVarDecl();
+ if (!UnderlyingVar)
+ return;
+
// If we have a capture-capable lambda for the variable, go ahead and
// capture the variable in that lambda (and all its enclosing lambdas).
- if (const Optional<unsigned> Index =
+ if (const std::optional<unsigned> Index =
getStackIndexOfNearestEnclosingCaptureCapableLambda(
S.FunctionScopes, Var, S))
- S.MarkCaptureUsedInEnclosingContext(Var, VarExpr->getExprLoc(),
- Index.getValue());
+ S.MarkCaptureUsedInEnclosingContext(Var, VarExpr->getExprLoc(), *Index);
const bool IsVarNeverAConstantExpression =
- VariableCanNeverBeAConstantExpression(Var, S.Context);
+ VariableCanNeverBeAConstantExpression(UnderlyingVar, S.Context);
if (!IsFullExprInstantiationDependent || IsVarNeverAConstantExpression) {
// This full expression is not instantiation dependent or the variable
// can not be used in a constant expression - which means
@@ -8052,10 +8393,10 @@ static void CheckIfAnyEnclosingLambdasMustCaptureAnyPotentialCaptures(
if (CurrentLSI->hasPotentialThisCapture()) {
// If we have a capture-capable lambda for 'this', go ahead and capture
// 'this' in that lambda (and all its enclosing lambdas).
- if (const Optional<unsigned> Index =
+ if (const std::optional<unsigned> Index =
getStackIndexOfNearestEnclosingCaptureCapableLambda(
S.FunctionScopes, /*0 is 'this'*/ nullptr, S)) {
- const unsigned FunctionScopeIndexOfCapturableLambda = Index.getValue();
+ const unsigned FunctionScopeIndexOfCapturableLambda = *Index;
S.CheckCXXThisCapture(CurrentLSI->PotentialThisCaptureLocation,
/*Explicit*/ false, /*BuildAndDiagnose*/ true,
&FunctionScopeIndexOfCapturableLambda);
@@ -8195,7 +8536,7 @@ class TransformTypos : public TreeTransform<TransformTypos> {
///
/// Returns true if there are any untried correction combinations.
bool CheckAndAdvanceTypoExprCorrectionStreams() {
- for (auto TE : TypoExprs) {
+ for (auto *TE : TypoExprs) {
auto &State = SemaRef.getTypoExprState(TE);
TransformCache.erase(TE);
if (!State.Consumer->hasMadeAnyCorrectionProgress())
@@ -8217,7 +8558,7 @@ class TransformTypos : public TreeTransform<TransformTypos> {
return DRE->getFoundDecl();
if (auto *ME = dyn_cast<MemberExpr>(E))
return ME->getFoundDecl();
- // FIXME: Add any other expr types that could be be seen by the delayed typo
+ // FIXME: Add any other expr types that could be seen by the delayed typo
// correction TreeTransform for which the corresponding TypoCorrection could
// contain multiple decls.
return nullptr;
@@ -8262,7 +8603,7 @@ class TransformTypos : public TreeTransform<TransformTypos> {
// TypoExprs were created recursively and thus won't be in our
// Sema's TypoExprs - they were created in our `RecursiveTransformLoop`.
auto &SemaTypoExprs = SemaRef.TypoExprs;
- for (auto TE : TypoExprs) {
+ for (auto *TE : TypoExprs) {
TransformCache.erase(TE);
SemaRef.clearDelayedTypo(TE);
@@ -8491,14 +8832,14 @@ Sema::CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl,
}
ExprResult Sema::ActOnFinishFullExpr(Expr *FE, SourceLocation CC,
- bool DiscardedValue,
- bool IsConstexpr) {
+ bool DiscardedValue, bool IsConstexpr,
+ bool IsTemplateArgument) {
ExprResult FullExpr = FE;
if (!FullExpr.get())
return ExprError();
- if (DiagnoseUnexpandedParameterPack(FullExpr.get()))
+ if (!IsTemplateArgument && DiagnoseUnexpandedParameterPack(FullExpr.get()))
return ExprError();
if (DiscardedValue) {
@@ -8518,7 +8859,7 @@ ExprResult Sema::ActOnFinishFullExpr(Expr *FE, SourceLocation CC,
if (FullExpr.isInvalid())
return ExprError();
- DiagnoseUnusedExprResult(FullExpr.get());
+ DiagnoseUnusedExprResult(FullExpr.get(), diag::warn_unused_expr);
}
FullExpr = CorrectDelayedTyposInExpr(FullExpr.get(), /*InitDecl=*/nullptr,
@@ -8655,9 +8996,10 @@ Sema::ActOnTypeRequirement(SourceLocation TypenameKWLoc, CXXScopeSpec &SS,
"Exactly one of TypeName and TemplateId must be specified.");
TypeSourceInfo *TSI = nullptr;
if (TypeName) {
- QualType T = CheckTypenameType(ETK_Typename, TypenameKWLoc,
- SS.getWithLocInContext(Context), *TypeName,
- NameLoc, &TSI, /*DeducedTypeContext=*/false);
+ QualType T =
+ CheckTypenameType(ElaboratedTypeKeyword::Typename, TypenameKWLoc,
+ SS.getWithLocInContext(Context), *TypeName, NameLoc,
+ &TSI, /*DeducedTSTContext=*/false);
if (T.isNull())
return nullptr;
} else {
@@ -8708,7 +9050,7 @@ Sema::ActOnCompoundRequirement(
/*HasTypeConstraint=*/true);
if (BuildTypeConstraint(SS, TypeConstraint, TParam,
- /*EllpsisLoc=*/SourceLocation(),
+ /*EllipsisLoc=*/SourceLocation(),
/*AllowUnexpandedPack=*/true))
// Just produce a requirement with no type requirements.
return BuildExprRequirement(E, /*IsSimple=*/false, NoexceptLoc, {});
@@ -8729,7 +9071,8 @@ Sema::BuildExprRequirement(
concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement) {
auto Status = concepts::ExprRequirement::SS_Satisfied;
ConceptSpecializationExpr *SubstitutedConstraintExpr = nullptr;
- if (E->isInstantiationDependent() || ReturnTypeRequirement.isDependent())
+ if (E->isInstantiationDependent() || E->getType()->isPlaceholderType() ||
+ ReturnTypeRequirement.isDependent())
Status = concepts::ExprRequirement::SS_Dependent;
else if (NoexceptLoc.isValid() && canThrow(E) == CanThrowResult::CT_Can)
Status = concepts::ExprRequirement::SS_NoexceptNotMet;
@@ -8742,21 +9085,30 @@ Sema::BuildExprRequirement(
TemplateParameterList *TPL =
ReturnTypeRequirement.getTypeConstraintTemplateParameterList();
QualType MatchedType =
- getDecltypeForParenthesizedExpr(E).getCanonicalType();
+ Context.getReferenceQualifiedType(E).getCanonicalType();
llvm::SmallVector<TemplateArgument, 1> Args;
Args.push_back(TemplateArgument(MatchedType));
+
+ auto *Param = cast<TemplateTypeParmDecl>(TPL->getParam(0));
+
TemplateArgumentList TAL(TemplateArgumentList::OnStack, Args);
- MultiLevelTemplateArgumentList MLTAL(TAL);
- for (unsigned I = 0; I < TPL->getDepth(); ++I)
- MLTAL.addOuterRetainedLevel();
- Expr *IDC =
- cast<TemplateTypeParmDecl>(TPL->getParam(0))->getTypeConstraint()
- ->getImmediatelyDeclaredConstraint();
+ MultiLevelTemplateArgumentList MLTAL(Param, TAL.asArray(),
+ /*Final=*/false);
+ MLTAL.addOuterRetainedLevels(TPL->getDepth());
+ const TypeConstraint *TC = Param->getTypeConstraint();
+ assert(TC && "Type Constraint cannot be null here");
+ auto *IDC = TC->getImmediatelyDeclaredConstraint();
+ assert(IDC && "ImmediatelyDeclaredConstraint can't be null here.");
ExprResult Constraint = SubstExpr(IDC, MLTAL);
- assert(!Constraint.isInvalid() &&
- "Substitution cannot fail as it is simply putting a type template "
- "argument into a concept specialization expression's parameter.");
-
+ if (Constraint.isInvalid()) {
+ return new (Context) concepts::ExprRequirement(
+ concepts::createSubstDiagAt(*this, IDC->getExprLoc(),
+ [&](llvm::raw_ostream &OS) {
+ IDC->printPretty(OS, /*Helper=*/nullptr,
+ getPrintingPolicy());
+ }),
+ IsSimple, NoexceptLoc, ReturnTypeRequirement);
+ }
SubstitutedConstraintExpr =
cast<ConceptSpecializationExpr>(Constraint.get());
if (!SubstitutedConstraintExpr->isSatisfied())
@@ -8804,9 +9156,11 @@ Sema::BuildNestedRequirement(Expr *Constraint) {
}
concepts::NestedRequirement *
-Sema::BuildNestedRequirement(
- concepts::Requirement::SubstitutionDiagnostic *SubstDiag) {
- return new (Context) concepts::NestedRequirement(SubstDiag);
+Sema::BuildNestedRequirement(StringRef InvalidConstraintEntity,
+ const ASTConstraintSatisfaction &Satisfaction) {
+ return new (Context) concepts::NestedRequirement(
+ InvalidConstraintEntity,
+ ASTConstraintSatisfaction::Rebuild(Context, Satisfaction));
}
RequiresExprBodyDecl *
@@ -8845,14 +9199,14 @@ void Sema::ActOnFinishRequiresExpr() {
assert(CurContext && "Popped translation unit!");
}
-ExprResult
-Sema::ActOnRequiresExpr(SourceLocation RequiresKWLoc,
- RequiresExprBodyDecl *Body,
- ArrayRef<ParmVarDecl *> LocalParameters,
- ArrayRef<concepts::Requirement *> Requirements,
- SourceLocation ClosingBraceLoc) {
- auto *RE = RequiresExpr::Create(Context, RequiresKWLoc, Body, LocalParameters,
- Requirements, ClosingBraceLoc);
+ExprResult Sema::ActOnRequiresExpr(
+ SourceLocation RequiresKWLoc, RequiresExprBodyDecl *Body,
+ SourceLocation LParenLoc, ArrayRef<ParmVarDecl *> LocalParameters,
+ SourceLocation RParenLoc, ArrayRef<concepts::Requirement *> Requirements,
+ SourceLocation ClosingBraceLoc) {
+ auto *RE = RequiresExpr::Create(Context, RequiresKWLoc, Body, LParenLoc,
+ LocalParameters, RParenLoc, Requirements,
+ ClosingBraceLoc);
if (DiagnoseUnexpandedParameterPackInRequiresExpr(RE))
return ExprError();
return RE;
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaExprMember.cpp b/contrib/llvm-project/clang/lib/Sema/SemaExprMember.cpp
index af2aa49c0103..32998ae60eaf 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaExprMember.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaExprMember.cpp
@@ -46,7 +46,7 @@ enum IMAKind {
/// The reference may be to an instance member, but it might be invalid if
/// so, because the context is not an instance method.
- IMA_Mixed_StaticContext,
+ IMA_Mixed_StaticOrExplicitContext,
/// The reference may be to an instance member, but it is invalid if
/// so, because the context is from an unrelated class.
@@ -63,7 +63,7 @@ enum IMAKind {
/// The reference may be to an unresolved using declaration and the
/// context is not an instance method.
- IMA_Unresolved_StaticContext,
+ IMA_Unresolved_StaticOrExplicitContext,
// The reference refers to a field which is not a member of the containing
// class, which is allowed because we're in C++11 mode and the context is
@@ -72,7 +72,7 @@ enum IMAKind {
/// All possible referrents are instance members and the current
/// context is not an instance method.
- IMA_Error_StaticContext,
+ IMA_Error_StaticOrExplicitContext,
/// All possible referrents are instance members of an unrelated
/// class.
@@ -91,11 +91,14 @@ static IMAKind ClassifyImplicitMemberAccess(Sema &SemaRef,
DeclContext *DC = SemaRef.getFunctionLevelDeclContext();
- bool isStaticContext = SemaRef.CXXThisTypeOverride.isNull() &&
- (!isa<CXXMethodDecl>(DC) || cast<CXXMethodDecl>(DC)->isStatic());
+ bool isStaticOrExplicitContext =
+ SemaRef.CXXThisTypeOverride.isNull() &&
+ (!isa<CXXMethodDecl>(DC) || cast<CXXMethodDecl>(DC)->isStatic() ||
+ cast<CXXMethodDecl>(DC)->isExplicitObjectMemberFunction());
if (R.isUnresolvableResult())
- return isStaticContext ? IMA_Unresolved_StaticContext : IMA_Unresolved;
+ return isStaticOrExplicitContext ? IMA_Unresolved_StaticOrExplicitContext
+ : IMA_Unresolved;
// Collect all the declaring classes of instance members we find.
bool hasNonInstance = false;
@@ -144,6 +147,7 @@ static IMAKind ClassifyImplicitMemberAccess(Sema &SemaRef,
case Sema::ExpressionEvaluationContext::DiscardedStatement:
case Sema::ExpressionEvaluationContext::ConstantEvaluated:
+ case Sema::ExpressionEvaluationContext::ImmediateFunctionContext:
case Sema::ExpressionEvaluationContext::PotentiallyEvaluated:
case Sema::ExpressionEvaluationContext::PotentiallyEvaluatedIfUsed:
break;
@@ -151,19 +155,22 @@ static IMAKind ClassifyImplicitMemberAccess(Sema &SemaRef,
// If the current context is not an instance method, it can't be
// an implicit member reference.
- if (isStaticContext) {
+ if (isStaticOrExplicitContext) {
if (hasNonInstance)
- return IMA_Mixed_StaticContext;
+ return IMA_Mixed_StaticOrExplicitContext;
return AbstractInstanceResult ? AbstractInstanceResult
- : IMA_Error_StaticContext;
+ : IMA_Error_StaticOrExplicitContext;
}
CXXRecordDecl *contextClass;
- if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(DC))
+ if (auto *MD = dyn_cast<CXXMethodDecl>(DC))
contextClass = MD->getParent()->getCanonicalDecl();
+ else if (auto *RD = dyn_cast<CXXRecordDecl>(DC))
+ contextClass = RD;
else
- contextClass = cast<CXXRecordDecl>(DC);
+ return AbstractInstanceResult ? AbstractInstanceResult
+ : IMA_Error_StaticOrExplicitContext;
// [class.mfct.non-static]p3:
// ...is used in the body of a non-static member function of class X,
@@ -210,14 +217,31 @@ static void diagnoseInstanceReference(Sema &SemaRef,
CXXRecordDecl *RepClass = dyn_cast<CXXRecordDecl>(Rep->getDeclContext());
bool InStaticMethod = Method && Method->isStatic();
+ bool InExplicitObjectMethod =
+ Method && Method->isExplicitObjectMemberFunction();
bool IsField = isa<FieldDecl>(Rep) || isa<IndirectFieldDecl>(Rep);
+ std::string Replacement;
+ if (InExplicitObjectMethod) {
+ DeclarationName N = Method->getParamDecl(0)->getDeclName();
+ if (!N.isEmpty()) {
+ Replacement.append(N.getAsString());
+ Replacement.append(".");
+ }
+ }
if (IsField && InStaticMethod)
// "invalid use of member 'x' in static member function"
- SemaRef.Diag(Loc, diag::err_invalid_member_use_in_static_method)
- << Range << nameInfo.getName();
- else if (ContextClass && RepClass && SS.isEmpty() && !InStaticMethod &&
- !RepClass->Equals(ContextClass) && RepClass->Encloses(ContextClass))
+ SemaRef.Diag(Loc, diag::err_invalid_member_use_in_method)
+ << Range << nameInfo.getName() << /*static*/ 0;
+ else if (IsField && InExplicitObjectMethod) {
+ auto Diag = SemaRef.Diag(Loc, diag::err_invalid_member_use_in_method)
+ << Range << nameInfo.getName() << /*explicit*/ 1;
+ if (!Replacement.empty())
+ Diag << FixItHint::CreateInsertion(Loc, Replacement);
+ } else if (ContextClass && RepClass && SS.isEmpty() &&
+ !InExplicitObjectMethod && !InStaticMethod &&
+ !RepClass->Equals(ContextClass) &&
+ RepClass->Encloses(ContextClass))
// Unqualified lookup in a non-static member function found a member of an
// enclosing class.
SemaRef.Diag(Loc, diag::err_nested_non_static_member_use)
@@ -225,9 +249,18 @@ static void diagnoseInstanceReference(Sema &SemaRef,
else if (IsField)
SemaRef.Diag(Loc, diag::err_invalid_non_static_member_use)
<< nameInfo.getName() << Range;
- else
+ else if (!InExplicitObjectMethod)
SemaRef.Diag(Loc, diag::err_member_call_without_object)
- << Range;
+ << Range << /*static*/ 0;
+ else {
+ if (const auto *Tpl = dyn_cast<FunctionTemplateDecl>(Rep))
+ Rep = Tpl->getTemplatedDecl();
+ const auto *Callee = cast<CXXMethodDecl>(Rep);
+ auto Diag = SemaRef.Diag(Loc, diag::err_member_call_without_object)
+ << Range << Callee->isExplicitObjectMemberFunction();
+ if (!Replacement.empty())
+ Diag << FixItHint::CreateInsertion(Loc, Replacement);
+ }
}
/// Builds an expression which might be an implicit member expression.
@@ -248,16 +281,16 @@ ExprResult Sema::BuildPossibleImplicitMemberExpr(
case IMA_Field_Uneval_Context:
Diag(R.getNameLoc(), diag::warn_cxx98_compat_non_static_member_use)
<< R.getLookupNameInfo().getName();
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case IMA_Static:
case IMA_Abstract:
- case IMA_Mixed_StaticContext:
- case IMA_Unresolved_StaticContext:
+ case IMA_Mixed_StaticOrExplicitContext:
+ case IMA_Unresolved_StaticOrExplicitContext:
if (TemplateArgs || TemplateKWLoc.isValid())
return BuildTemplateIdExpr(SS, TemplateKWLoc, R, false, TemplateArgs);
return AsULE ? AsULE : BuildDeclarationNameExpr(SS, R, false);
- case IMA_Error_StaticContext:
+ case IMA_Error_StaticOrExplicitContext:
case IMA_Error_Unrelated:
diagnoseInstanceReference(*this, SS, R.getRepresentativeDecl(),
R.getLookupNameInfo());
@@ -340,7 +373,8 @@ CheckExtVectorComponent(Sema &S, QualType baseType, ExprValueKind &VK,
// Emit a warning if an rgba selector is used earlier than OpenCL C 3.0.
if (HasRGBA || (*compStr && IsRGBA(*compStr))) {
- if (S.getLangOpts().OpenCL && S.getLangOpts().OpenCLVersion < 300) {
+ if (S.getLangOpts().OpenCL &&
+ S.getLangOpts().getOpenCLCompatibleVersion() < 300) {
const char *DiagBegin = HasRGBA ? CompName->getNameStart() : compStr;
S.Diag(OpLoc, diag::ext_opencl_ext_vector_type_rgba_selector)
<< StringRef(DiagBegin, 1) << SourceRange(CompLoc);
@@ -502,9 +536,12 @@ Sema::ActOnDependentMemberExpr(Expr *BaseExpr, QualType BaseType,
}
}
- assert(BaseType->isDependentType() ||
- NameInfo.getName().isDependentName() ||
- isDependentScopeSpecifier(SS));
+ assert(BaseType->isDependentType() || NameInfo.getName().isDependentName() ||
+ isDependentScopeSpecifier(SS) ||
+ (TemplateArgs && llvm::any_of(TemplateArgs->arguments(),
+ [](const TemplateArgumentLoc &Arg) {
+ return Arg.getArgument().isDependent();
+ })));
// Get the type being accessed in BaseType. If this is an arrow, the BaseExpr
// must have pointer type, and the accessed type is the pointee.
@@ -564,10 +601,7 @@ bool Sema::CheckQualifiedMemberReference(Expr *BaseExpr,
return false;
// Note that we use the DC of the decl, not the underlying decl.
- DeclContext *DC = (*I)->getDeclContext();
- while (DC->isTransparentContext())
- DC = DC->getParent();
-
+ DeclContext *DC = (*I)->getDeclContext()->getNonTransparentContext();
if (!DC->isRecord())
continue;
@@ -612,11 +646,10 @@ public:
if (Record->containsDecl(ND))
return true;
- if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Record)) {
+ if (const auto *RD = dyn_cast<CXXRecordDecl>(Record)) {
// Accept candidates that occur in any of the current class' base classes.
for (const auto &BS : RD->bases()) {
- if (const RecordType *BSTy =
- dyn_cast_or_null<RecordType>(BS.getType().getTypePtrOrNull())) {
+ if (const auto *BSTy = BS.getType()->getAs<RecordType>()) {
if (BSTy->getDecl()->containsDecl(ND))
return true;
}
@@ -749,7 +782,8 @@ Sema::BuildMemberReferenceExpr(Expr *Base, QualType BaseType,
const Scope *S,
ActOnMemberAccessExtraArgs *ExtraArgs) {
if (BaseType->isDependentType() ||
- (SS.isSet() && isDependentScopeSpecifier(SS)))
+ (SS.isSet() && isDependentScopeSpecifier(SS)) ||
+ NameInfo.getName().isDependentName())
return ActOnDependentMemberExpr(Base, BaseType,
IsArrow, OpLoc,
SS, TemplateKWLoc, FirstQualifierInScope,
@@ -763,7 +797,7 @@ Sema::BuildMemberReferenceExpr(Expr *Base, QualType BaseType,
QualType RecordTy = BaseType;
if (IsArrow) RecordTy = RecordTy->castAs<PointerType>()->getPointeeType();
if (LookupMemberExprInRecord(
- *this, R, nullptr, RecordTy->getAs<RecordType>(), OpLoc, IsArrow,
+ *this, R, nullptr, RecordTy->castAs<RecordType>(), OpLoc, IsArrow,
SS, TemplateArgs != nullptr, TemplateKWLoc, TE))
return ExprError();
if (TE)
@@ -939,7 +973,7 @@ static bool IsInFnTryBlockHandler(const Scope *S) {
// function scope. If a FnTryCatchScope is found, check whether the TryScope
// flag is set. If it is not, it's a function-try-block handler.
for (; S != S->getFnParent(); S = S->getParent()) {
- if (S->getFlags() & Scope::FnTryCatchScope)
+ if (S->isFnTryCatchScope())
return (S->getFlags() & Scope::TryScope) != Scope::TryScope;
}
return false;
@@ -1160,10 +1194,10 @@ Sema::BuildMemberReferenceExpr(Expr *BaseExpr, QualType BaseExprType,
if (!Var->getTemplateSpecializationKind())
Var->setTemplateSpecializationKind(TSK_ImplicitInstantiation, MemberLoc);
- return BuildMemberExpr(
- BaseExpr, IsArrow, OpLoc, &SS, TemplateKWLoc, Var, FoundDecl,
- /*HadMultipleCandidates=*/false, MemberNameInfo,
- Var->getType().getNonReferenceType(), VK_LValue, OK_Ordinary);
+ return BuildMemberExpr(BaseExpr, IsArrow, OpLoc, &SS, TemplateKWLoc, Var,
+ FoundDecl, /*HadMultipleCandidates=*/false,
+ MemberNameInfo, Var->getType().getNonReferenceType(),
+ VK_LValue, OK_Ordinary, TemplateArgs);
}
// We found something that we didn't expect. Complain.
@@ -1291,6 +1325,21 @@ static ExprResult LookupMemberExpr(Sema &S, LookupResult &R,
}
}
+ // If the base type is an atomic type, this access is undefined behavior per
+ // C11 6.5.2.3p5. Instead of giving a typecheck error, we'll warn the user
+ // about the UB and recover by converting the atomic lvalue into a non-atomic
+ // lvalue. Because this is inherently unsafe as an atomic operation, the
+ // warning defaults to an error.
+ if (const auto *ATy = BaseType->getAs<AtomicType>()) {
+ S.DiagRuntimeBehavior(OpLoc, nullptr,
+ S.PDiag(diag::warn_atomic_member_access));
+ BaseType = ATy->getValueType().getUnqualifiedType();
+ BaseExpr = ImplicitCastExpr::Create(
+ S.Context, IsArrow ? S.Context.getPointerType(BaseType) : BaseType,
+ CK_AtomicToNonAtomic, BaseExpr.get(), nullptr,
+ BaseExpr.get()->getValueKind(), FPOptionsOverride());
+ }
+
// Handle field access to simple records.
if (const RecordType *RTy = BaseType->getAs<RecordType>()) {
TypoExpr *TE = nullptr;
@@ -1591,20 +1640,21 @@ static ExprResult LookupMemberExpr(Sema &S, LookupResult &R,
false);
}
+ if (BaseType->isExtVectorBoolType()) {
+ // We disallow element access for ext_vector_type bool. There is no way to
+ // materialize a reference to a vector element as a pointer (each element is
+ // one bit in the vector).
+ S.Diag(R.getNameLoc(), diag::err_ext_vector_component_name_illegal)
+ << MemberName
+ << (BaseExpr.get() ? BaseExpr.get()->getSourceRange() : SourceRange());
+ return ExprError();
+ }
+
// Handle 'field access' to vectors, such as 'V.xx'.
if (BaseType->isExtVectorType()) {
// FIXME: this expr should store IsArrow.
IdentifierInfo *Member = MemberName.getAsIdentifierInfo();
- ExprValueKind VK;
- if (IsArrow)
- VK = VK_LValue;
- else {
- if (PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(BaseExpr.get()))
- VK = POE->getSyntacticForm()->getValueKind();
- else
- VK = BaseExpr.get()->getValueKind();
- }
-
+ ExprValueKind VK = (IsArrow ? VK_LValue : BaseExpr.get()->getValueKind());
QualType ret = CheckExtVectorComponent(S, BaseType, VK, OpLoc,
Member, MemberLoc);
if (ret.isNull())
@@ -1644,6 +1694,9 @@ static ExprResult LookupMemberExpr(Sema &S, LookupResult &R,
<< BaseType << int(IsArrow) << BaseExpr.get()->getSourceRange()
<< FixItHint::CreateReplacement(OpLoc, "->");
+ if (S.isSFINAEContext())
+ return ExprError();
+
// Recurse as an -> access.
IsArrow = true;
return LookupMemberExpr(S, R, BaseExpr, IsArrow, OpLoc, SS,
@@ -1664,6 +1717,16 @@ static ExprResult LookupMemberExpr(Sema &S, LookupResult &R,
ObjCImpDecl, HasTemplateArgs, TemplateKWLoc);
}
+ // HLSL supports implicit conversion of scalar types to single element vector
+ // rvalues in member expressions.
+ if (S.getLangOpts().HLSL && BaseType->isScalarType()) {
+ QualType VectorTy = S.Context.getExtVectorType(BaseType, 1);
+ BaseExpr = S.ImpCastExprToType(BaseExpr.get(), VectorTy, CK_VectorSplat,
+ BaseExpr.get()->getValueKind());
+ return LookupMemberExpr(S, R, BaseExpr, IsArrow, OpLoc, SS, ObjCImpDecl,
+ HasTemplateArgs, TemplateKWLoc);
+ }
+
S.Diag(OpLoc, diag::err_typecheck_member_reference_struct_union)
<< BaseType << BaseExpr.get()->getSourceRange() << MemberLoc;
@@ -1707,6 +1770,9 @@ ExprResult Sema::ActOnMemberAccessExpr(Scope *S, Expr *Base,
DeclarationName Name = NameInfo.getName();
bool IsArrow = (OpKind == tok::arrow);
+ if (getLangOpts().HLSL && IsArrow)
+ return ExprError(Diag(OpLoc, diag::err_hlsl_operator_unsupported) << 2);
+
NamedDecl *FirstQualifierInScope
= (!SS.isSet() ? nullptr : FindFirstQualifierInScope(S, SS.getScopeRep()));
@@ -1873,10 +1939,9 @@ Sema::BuildImplicitMemberExpr(const CXXScopeSpec &SS,
baseExpr = BuildCXXThisExpr(loc, ThisTy, /*IsImplicit=*/true);
}
- return BuildMemberReferenceExpr(baseExpr, ThisTy,
- /*OpLoc*/ SourceLocation(),
- /*IsArrow*/ true,
- SS, TemplateKWLoc,
- /*FirstQualifierInScope*/ nullptr,
- R, TemplateArgs, S);
+ return BuildMemberReferenceExpr(
+ baseExpr, ThisTy,
+ /*OpLoc=*/SourceLocation(),
+ /*IsArrow=*/!getLangOpts().HLSL, SS, TemplateKWLoc,
+ /*FirstQualifierInScope=*/nullptr, R, TemplateArgs, S);
}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaExprObjC.cpp b/contrib/llvm-project/clang/lib/Sema/SemaExprObjC.cpp
index 8a9c933fc93f..a8853f634c9c 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaExprObjC.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaExprObjC.cpp
@@ -27,10 +27,11 @@
#include "clang/Sema/SemaInternal.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/ConvertUTF.h"
+#include <optional>
using namespace clang;
using namespace sema;
-using llvm::makeArrayRef;
+using llvm::ArrayRef;
ExprResult Sema::ParseObjCStringLiteral(SourceLocation *AtLocs,
ArrayRef<Expr *> Strings) {
@@ -50,7 +51,7 @@ ExprResult Sema::ParseObjCStringLiteral(SourceLocation *AtLocs,
S = cast<StringLiteral>(E);
// ObjC strings can't be wide or UTF.
- if (!S->isAscii()) {
+ if (!S->isOrdinary()) {
Diag(S->getBeginLoc(), diag::err_cfstring_literal_not_string_constant)
<< S->getSourceRange();
return true;
@@ -70,7 +71,7 @@ ExprResult Sema::ParseObjCStringLiteral(SourceLocation *AtLocs,
QualType StrTy = Context.getConstantArrayType(
CAT->getElementType(), llvm::APInt(32, StrBuf.size() + 1), nullptr,
CAT->getSizeModifier(), CAT->getIndexTypeCVRQualifiers());
- S = StringLiteral::Create(Context, StrBuf, StringLiteral::Ascii,
+ S = StringLiteral::Create(Context, StrBuf, StringLiteralKind::Ordinary,
/*Pascal=*/false, StrTy, &StrLocs[0],
StrLocs.size());
}
@@ -243,7 +244,7 @@ static ObjCMethodDecl *getNSNumberFactoryMethod(Sema &S, SourceLocation Loc,
QualType NumberType,
bool isLiteral = false,
SourceRange R = SourceRange()) {
- Optional<NSAPI::NSNumberLiteralMethodKind> Kind =
+ std::optional<NSAPI::NSNumberLiteralMethodKind> Kind =
S.NSAPIObj->getNSNumberFactoryMethodKind(NumberType);
if (!Kind) {
@@ -284,21 +285,21 @@ static ObjCMethodDecl *getNSNumberFactoryMethod(Sema &S, SourceLocation Loc,
if (!Method && S.getLangOpts().DebuggerObjCLiteral) {
// create a stub definition this NSNumber factory method.
TypeSourceInfo *ReturnTInfo = nullptr;
- Method =
- ObjCMethodDecl::Create(CX, SourceLocation(), SourceLocation(), Sel,
- S.NSNumberPointer, ReturnTInfo, S.NSNumberDecl,
- /*isInstance=*/false, /*isVariadic=*/false,
- /*isPropertyAccessor=*/false,
- /*isSynthesizedAccessorStub=*/false,
- /*isImplicitlyDeclared=*/true,
- /*isDefined=*/false, ObjCMethodDecl::Required,
- /*HasRelatedResultType=*/false);
+ Method = ObjCMethodDecl::Create(
+ CX, SourceLocation(), SourceLocation(), Sel, S.NSNumberPointer,
+ ReturnTInfo, S.NSNumberDecl,
+ /*isInstance=*/false, /*isVariadic=*/false,
+ /*isPropertyAccessor=*/false,
+ /*isSynthesizedAccessorStub=*/false,
+ /*isImplicitlyDeclared=*/true,
+ /*isDefined=*/false, ObjCImplementationControl::Required,
+ /*HasRelatedResultType=*/false);
ParmVarDecl *value = ParmVarDecl::Create(S.Context, Method,
SourceLocation(), SourceLocation(),
&CX.Idents.get("value"),
NumberType, /*TInfo=*/nullptr,
SC_None, nullptr);
- Method->setMethodParams(S.Context, value, None);
+ Method->setMethodParams(S.Context, value, std::nullopt);
}
if (!validateBoxingMethod(S, Loc, S.NSNumberDecl, Sel, Method))
@@ -320,20 +321,20 @@ ExprResult Sema::BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number) {
// In C, character literals have type 'int'. That's not the type we want
// to use to determine the Objective-c literal kind.
switch (Char->getKind()) {
- case CharacterLiteral::Ascii:
- case CharacterLiteral::UTF8:
+ case CharacterLiteralKind::Ascii:
+ case CharacterLiteralKind::UTF8:
NumberType = Context.CharTy;
break;
- case CharacterLiteral::Wide:
+ case CharacterLiteralKind::Wide:
NumberType = Context.getWideCharType();
break;
- case CharacterLiteral::UTF16:
+ case CharacterLiteralKind::UTF16:
NumberType = Context.Char16Ty;
break;
- case CharacterLiteral::UTF32:
+ case CharacterLiteralKind::UTF32:
NumberType = Context.Char32Ty;
break;
}
@@ -448,7 +449,7 @@ static ExprResult CheckObjCCollectionLiteralElement(Sema &S, Expr *Element,
}
// If this is potentially an Objective-C string literal, add the '@'.
else if (StringLiteral *String = dyn_cast<StringLiteral>(OrigElement)) {
- if (String->isAscii()) {
+ if (String->isOrdinary()) {
S.Diag(OrigElement->getBeginLoc(), diag::err_box_literal_collection)
<< 0 << OrigElement->getSourceRange()
<< FixItHint::CreateInsertion(OrigElement->getBeginLoc(), "@");
@@ -533,7 +534,7 @@ ExprResult Sema::BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr) {
if (CE->getCastKind() == CK_ArrayToPointerDecay)
if (auto *SL =
dyn_cast<StringLiteral>(CE->getSubExpr()->IgnoreParens())) {
- assert((SL->isAscii() || SL->isUTF8()) &&
+ assert((SL->isOrdinary() || SL->isUTF8()) &&
"unexpected character encoding");
StringRef Str = SL->getString();
const llvm::UTF8 *StrBegin = Str.bytes_begin();
@@ -567,7 +568,7 @@ ExprResult Sema::BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr) {
/*isPropertyAccessor=*/false,
/*isSynthesizedAccessorStub=*/false,
/*isImplicitlyDeclared=*/true,
- /*isDefined=*/false, ObjCMethodDecl::Required,
+ /*isDefined=*/false, ObjCImplementationControl::Required,
/*HasRelatedResultType=*/false);
QualType ConstCharType = Context.CharTy.withConst();
ParmVarDecl *value =
@@ -577,7 +578,7 @@ ExprResult Sema::BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr) {
Context.getPointerType(ConstCharType),
/*TInfo=*/nullptr,
SC_None, nullptr);
- M->setMethodParams(Context, value, None);
+ M->setMethodParams(Context, value, std::nullopt);
BoxingMethod = M;
}
@@ -591,8 +592,8 @@ ExprResult Sema::BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr) {
BoxingMethod = StringWithUTF8StringMethod;
BoxedType = NSStringPointer;
// Transfer the nullability from method's return type.
- Optional<NullabilityKind> Nullability =
- BoxingMethod->getReturnType()->getNullability(Context);
+ std::optional<NullabilityKind> Nullability =
+ BoxingMethod->getReturnType()->getNullability();
if (Nullability)
BoxedType = Context.getAttributedType(
AttributedType::getNullabilityAttrKind(*Nullability), BoxedType,
@@ -610,20 +611,20 @@ ExprResult Sema::BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr) {
// In C, character literals have type 'int'. That's not the type we want
// to use to determine the Objective-c literal kind.
switch (Char->getKind()) {
- case CharacterLiteral::Ascii:
- case CharacterLiteral::UTF8:
+ case CharacterLiteralKind::Ascii:
+ case CharacterLiteralKind::UTF8:
ValueType = Context.CharTy;
break;
- case CharacterLiteral::Wide:
+ case CharacterLiteralKind::Wide:
ValueType = Context.getWideCharType();
break;
- case CharacterLiteral::UTF16:
+ case CharacterLiteralKind::UTF16:
ValueType = Context.Char16Ty;
break;
- case CharacterLiteral::UTF32:
+ case CharacterLiteralKind::UTF32:
ValueType = Context.Char32Ty;
break;
}
@@ -681,7 +682,7 @@ ExprResult Sema::BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr) {
/*isPropertyAccessor=*/false,
/*isSynthesizedAccessorStub=*/false,
/*isImplicitlyDeclared=*/true,
- /*isDefined=*/false, ObjCMethodDecl::Required,
+ /*isDefined=*/false, ObjCImplementationControl::Required,
/*HasRelatedResultType=*/false);
SmallVector<ParmVarDecl *, 2> Params;
@@ -705,7 +706,7 @@ ExprResult Sema::BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr) {
SC_None, nullptr);
Params.push_back(type);
- M->setMethodParams(Context, Params, None);
+ M->setMethodParams(Context, Params, std::nullopt);
BoxingMethod = M;
}
@@ -815,7 +816,7 @@ ExprResult Sema::BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements) {
false /*isVariadic*/,
/*isPropertyAccessor=*/false, /*isSynthesizedAccessorStub=*/false,
/*isImplicitlyDeclared=*/true, /*isDefined=*/false,
- ObjCMethodDecl::Required, false);
+ ObjCImplementationControl::Required, false);
SmallVector<ParmVarDecl *, 2> Params;
ParmVarDecl *objects = ParmVarDecl::Create(Context, Method,
SourceLocation(),
@@ -833,7 +834,7 @@ ExprResult Sema::BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements) {
/*TInfo=*/nullptr, SC_None,
nullptr);
Params.push_back(cnt);
- Method->setMethodParams(Context, Params, None);
+ Method->setMethodParams(Context, Params, std::nullopt);
}
if (!validateBoxingMethod(*this, Loc, NSArrayDecl, Sel, Method))
@@ -977,7 +978,7 @@ ExprResult Sema::BuildObjCDictionaryLiteral(SourceRange SR,
/*isPropertyAccessor=*/false,
/*isSynthesizedAccessorStub=*/false,
/*isImplicitlyDeclared=*/true, /*isDefined=*/false,
- ObjCMethodDecl::Required, false);
+ ObjCImplementationControl::Required, false);
SmallVector<ParmVarDecl *, 3> Params;
ParmVarDecl *objects = ParmVarDecl::Create(Context, Method,
SourceLocation(),
@@ -1003,7 +1004,7 @@ ExprResult Sema::BuildObjCDictionaryLiteral(SourceRange SR,
/*TInfo=*/nullptr, SC_None,
nullptr);
Params.push_back(cnt);
- Method->setMethodParams(Context, Params, None);
+ Method->setMethodParams(Context, Params, std::nullopt);
}
if (!validateBoxingMethod(*this, SR.getBegin(), NSDictionaryDecl, Sel,
@@ -1037,12 +1038,9 @@ ExprResult Sema::BuildObjCDictionaryLiteral(SourceRange SR,
if (ObjCProtocolDecl *NSCopyingPDecl =
LookupProtocol(&Context.Idents.get("NSCopying"), SR.getBegin())) {
ObjCProtocolDecl *PQ[] = {NSCopyingPDecl};
- QIDNSCopying =
- Context.getObjCObjectType(Context.ObjCBuiltinIdTy, { },
- llvm::makeArrayRef(
- (ObjCProtocolDecl**) PQ,
- 1),
- false);
+ QIDNSCopying = Context.getObjCObjectType(
+ Context.ObjCBuiltinIdTy, {},
+ llvm::ArrayRef((ObjCProtocolDecl **)PQ, 1), false);
QIDNSCopying = Context.getObjCObjectPointerType(QIDNSCopying);
}
}
@@ -1280,11 +1278,11 @@ static ObjCMethodDecl *findMethodInCurrentClass(Sema &S, Selector Sel) {
// whether Sel is potentially direct in this context.
if (ObjCMethodDecl *MD = IFace->lookupMethod(Sel, /*isInstance=*/true))
return MD;
- if (ObjCMethodDecl *MD = IFace->lookupPrivateMethod(Sel, /*isInstance=*/true))
+ if (ObjCMethodDecl *MD = IFace->lookupPrivateMethod(Sel, /*Instance=*/true))
return MD;
if (ObjCMethodDecl *MD = IFace->lookupMethod(Sel, /*isInstance=*/false))
return MD;
- if (ObjCMethodDecl *MD = IFace->lookupPrivateMethod(Sel, /*isInstance=*/false))
+ if (ObjCMethodDecl *MD = IFace->lookupPrivateMethod(Sel, /*Instance=*/false))
return MD;
return nullptr;
@@ -1349,7 +1347,8 @@ ExprResult Sema::ParseObjCSelectorExpression(Selector Sel,
}
if (Method &&
- Method->getImplementationControl() != ObjCMethodDecl::Optional &&
+ Method->getImplementationControl() !=
+ ObjCImplementationControl::Optional &&
!getSourceManager().isInSystemHeader(Method->getLocation()))
ReferencedSelectors.insert(std::make_pair(Sel, AtLoc));
@@ -1466,8 +1465,8 @@ static QualType getBaseMessageSendResultType(Sema &S,
// result type to the returned result.
auto transferNullability = [&](QualType type) -> QualType {
// If the method's result type has nullability, extract it.
- if (auto nullability = Method->getSendResultType(ReceiverType)
- ->getNullability(Context)){
+ if (auto nullability =
+ Method->getSendResultType(ReceiverType)->getNullability()) {
// Strip off any outer nullability sugar from the provided type.
(void)AttributedType::stripOuterNullability(type);
@@ -1546,7 +1545,7 @@ QualType Sema::getMessageSendResultType(const Expr *Receiver,
assert(MD->isClassMethod() && "expected a class method");
QualType NewResultType = Context.getObjCObjectPointerType(
Context.getObjCInterfaceType(MD->getClassInterface()));
- if (auto Nullability = resultType->getNullability(Context))
+ if (auto Nullability = resultType->getNullability())
NewResultType = Context.getAttributedType(
AttributedType::getNullabilityAttrKind(*Nullability),
NewResultType, NewResultType);
@@ -1563,16 +1562,16 @@ QualType Sema::getMessageSendResultType(const Expr *Receiver,
// Map the nullability of the result into a table index.
unsigned receiverNullabilityIdx = 0;
- if (Optional<NullabilityKind> nullability =
- ReceiverType->getNullability(Context)) {
+ if (std::optional<NullabilityKind> nullability =
+ ReceiverType->getNullability()) {
if (*nullability == NullabilityKind::NullableResult)
nullability = NullabilityKind::Nullable;
receiverNullabilityIdx = 1 + static_cast<unsigned>(*nullability);
}
unsigned resultNullabilityIdx = 0;
- if (Optional<NullabilityKind> nullability =
- resultType->getNullability(Context)) {
+ if (std::optional<NullabilityKind> nullability =
+ resultType->getNullability()) {
if (*nullability == NullabilityKind::NullableResult)
nullability = NullabilityKind::Nullable;
resultNullabilityIdx = 1 + static_cast<unsigned>(*nullability);
@@ -1605,7 +1604,7 @@ QualType Sema::getMessageSendResultType(const Expr *Receiver,
} else {
resultType = resultType.getDesugaredType(Context);
}
- } while (resultType->getNullability(Context));
+ } while (resultType->getNullability());
// Add nullability back if needed.
if (newResultNullabilityIdx > 0) {
@@ -1802,14 +1801,15 @@ bool Sema::CheckMessageArgumentTypes(
// FIXME. This need be cleaned up.
if (Args.size() < NumNamedArgs) {
Diag(SelLoc, diag::err_typecheck_call_too_few_args)
- << 2 << NumNamedArgs << static_cast<unsigned>(Args.size());
+ << 2 << NumNamedArgs << static_cast<unsigned>(Args.size())
+ << /*is non object*/ 0;
return false;
}
// Compute the set of type arguments to be substituted into each parameter
// type.
- Optional<ArrayRef<QualType>> typeArgs
- = ReceiverType->getObjCSubstitutions(Method->getDeclContext());
+ std::optional<ArrayRef<QualType>> typeArgs =
+ ReceiverType->getObjCSubstitutions(Method->getDeclContext());
bool IsError = false;
for (unsigned i = 0; i < NumNamedArgs; i++) {
// We can't do any type-checking on a type-dependent argument.
@@ -1900,7 +1900,7 @@ bool Sema::CheckMessageArgumentTypes(
Diag(Args[NumNamedArgs]->getBeginLoc(),
diag::err_typecheck_call_too_many_args)
<< 2 /*method*/ << NumNamedArgs << static_cast<unsigned>(Args.size())
- << Method->getSourceRange()
+ << Method->getSourceRange() << /*is non object*/ 0
<< SourceRange(Args[NumNamedArgs]->getBeginLoc(),
Args.back()->getEndLoc());
}
@@ -1909,8 +1909,8 @@ bool Sema::CheckMessageArgumentTypes(
DiagnoseSentinelCalls(Method, SelLoc, Args);
// Do additional checkings on method.
- IsError |= CheckObjCMethodCall(
- Method, SelLoc, makeArrayRef(Args.data(), Args.size()));
+ IsError |=
+ CheckObjCMethodCall(Method, SelLoc, ArrayRef(Args.data(), Args.size()));
return IsError;
}
@@ -2440,6 +2440,9 @@ ExprResult Sema::BuildClassMessageImplicit(QualType ReceiverType,
if (!ReceiverType.isNull())
receiverTypeInfo = Context.getTrivialTypeSourceInfo(ReceiverType);
+ assert(((isSuperReceiver && Loc.isValid()) || receiverTypeInfo) &&
+ "Either the super receiver location needs to be valid or the receiver "
+ "needs valid type source information");
return BuildClassMessage(receiverTypeInfo, ReceiverType,
/*SuperLoc=*/isSuperReceiver ? Loc : SourceLocation(),
Sel, Method, Loc, Loc, Loc, Args,
@@ -2633,10 +2636,10 @@ ExprResult Sema::BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
unsigned NumArgs = ArgsIn.size();
Expr **Args = ArgsIn.data();
assert(SuperLoc.isInvalid() && "Message to super with dependent type");
- return ObjCMessageExpr::Create(
- Context, ReceiverType, VK_PRValue, LBracLoc, ReceiverTypeInfo, Sel,
- SelectorLocs, /*Method=*/nullptr, makeArrayRef(Args, NumArgs), RBracLoc,
- isImplicit);
+ return ObjCMessageExpr::Create(Context, ReceiverType, VK_PRValue, LBracLoc,
+ ReceiverTypeInfo, Sel, SelectorLocs,
+ /*Method=*/nullptr, ArrayRef(Args, NumArgs),
+ RBracLoc, isImplicit);
}
// Find the class to which we are sending this message.
@@ -2735,21 +2738,19 @@ ExprResult Sema::BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
// Construct the appropriate ObjCMessageExpr.
ObjCMessageExpr *Result;
if (SuperLoc.isValid())
- Result = ObjCMessageExpr::Create(Context, ReturnType, VK, LBracLoc,
- SuperLoc, /*IsInstanceSuper=*/false,
- ReceiverType, Sel, SelectorLocs,
- Method, makeArrayRef(Args, NumArgs),
- RBracLoc, isImplicit);
+ Result = ObjCMessageExpr::Create(
+ Context, ReturnType, VK, LBracLoc, SuperLoc, /*IsInstanceSuper=*/false,
+ ReceiverType, Sel, SelectorLocs, Method, ArrayRef(Args, NumArgs),
+ RBracLoc, isImplicit);
else {
- Result = ObjCMessageExpr::Create(Context, ReturnType, VK, LBracLoc,
- ReceiverTypeInfo, Sel, SelectorLocs,
- Method, makeArrayRef(Args, NumArgs),
- RBracLoc, isImplicit);
+ Result = ObjCMessageExpr::Create(
+ Context, ReturnType, VK, LBracLoc, ReceiverTypeInfo, Sel, SelectorLocs,
+ Method, ArrayRef(Args, NumArgs), RBracLoc, isImplicit);
if (!isImplicit)
checkCocoaAPI(*this, Result);
}
if (Method)
- checkFoundationAPI(*this, SelLoc, Method, makeArrayRef(Args, NumArgs),
+ checkFoundationAPI(*this, SelLoc, Method, ArrayRef(Args, NumArgs),
ReceiverType, /*IsClassObjectCall=*/true);
return MaybeBindToTemporary(Result);
}
@@ -2888,8 +2889,8 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
assert(SuperLoc.isInvalid() && "Message to super with dependent type");
return ObjCMessageExpr::Create(
Context, Context.DependentTy, VK_PRValue, LBracLoc, Receiver, Sel,
- SelectorLocs, /*Method=*/nullptr, makeArrayRef(Args, NumArgs),
- RBracLoc, isImplicit);
+ SelectorLocs, /*Method=*/nullptr, ArrayRef(Args, NumArgs), RBracLoc,
+ isImplicit);
}
// If necessary, apply function/array conversion to the receiver.
@@ -3326,16 +3327,14 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
// Construct the appropriate ObjCMessageExpr instance.
ObjCMessageExpr *Result;
if (SuperLoc.isValid())
- Result = ObjCMessageExpr::Create(Context, ReturnType, VK, LBracLoc,
- SuperLoc, /*IsInstanceSuper=*/true,
- ReceiverType, Sel, SelectorLocs, Method,
- makeArrayRef(Args, NumArgs), RBracLoc,
- isImplicit);
+ Result = ObjCMessageExpr::Create(
+ Context, ReturnType, VK, LBracLoc, SuperLoc, /*IsInstanceSuper=*/true,
+ ReceiverType, Sel, SelectorLocs, Method, ArrayRef(Args, NumArgs),
+ RBracLoc, isImplicit);
else {
- Result = ObjCMessageExpr::Create(Context, ReturnType, VK, LBracLoc,
- Receiver, Sel, SelectorLocs, Method,
- makeArrayRef(Args, NumArgs), RBracLoc,
- isImplicit);
+ Result = ObjCMessageExpr::Create(
+ Context, ReturnType, VK, LBracLoc, Receiver, Sel, SelectorLocs, Method,
+ ArrayRef(Args, NumArgs), RBracLoc, isImplicit);
if (!isImplicit)
checkCocoaAPI(*this, Result);
}
@@ -3356,7 +3355,7 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
}
}
}
- checkFoundationAPI(*this, SelLoc, Method, makeArrayRef(Args, NumArgs),
+ checkFoundationAPI(*this, SelLoc, Method, ArrayRef(Args, NumArgs),
ReceiverType, IsClassObjectCall);
}
@@ -3772,7 +3771,7 @@ static void addFixitForObjCARCConversion(
SourceManager &SM = S.getSourceManager();
char PrevChar = *SM.getCharacterData(range.getBegin().getLocWithOffset(-1));
- if (Lexer::isIdentifierBodyChar(PrevChar, S.getLangOpts()))
+ if (Lexer::isAsciiIdentifierContinueChar(PrevChar, S.getLangOpts()))
BridgeCall += ' ';
BridgeCall += CFBridgeName;
@@ -3790,7 +3789,7 @@ static void addFixitForObjCARCConversion(
SourceManager &SM = S.getSourceManager();
char PrevChar = *SM.getCharacterData(range.getBegin().getLocWithOffset(-1));
- if (Lexer::isIdentifierBodyChar(PrevChar, S.getLangOpts()))
+ if (Lexer::isAsciiIdentifierContinueChar(PrevChar, S.getLangOpts()))
BridgeCall += ' ';
BridgeCall += CFBridgeName;
@@ -3860,7 +3859,7 @@ static inline T *getObjCBridgeAttr(const TypedefType *TD) {
static ObjCBridgeRelatedAttr *ObjCBridgeRelatedAttrFromType(QualType T,
TypedefNameDecl *&TDNDecl) {
- while (const TypedefType *TD = dyn_cast<TypedefType>(T.getTypePtr())) {
+ while (const auto *TD = T->getAs<TypedefType>()) {
TDNDecl = TD->getDecl();
if (ObjCBridgeRelatedAttr *ObjCBAttr =
getObjCBridgeAttr<ObjCBridgeRelatedAttr>(TD))
@@ -4007,7 +4006,7 @@ static bool CheckObjCBridgeNSCast(Sema &S, QualType castType, Expr *castExpr,
bool &HadTheAttribute, bool warn) {
QualType T = castExpr->getType();
HadTheAttribute = false;
- while (const TypedefType *TD = dyn_cast<TypedefType>(T.getTypePtr())) {
+ while (const auto *TD = T->getAs<TypedefType>()) {
TypedefNameDecl *TDNDecl = TD->getDecl();
if (TB *ObjCBAttr = getObjCBridgeAttr<TB>(TD)) {
if (IdentifierInfo *Parm = ObjCBAttr->getBridgedType()) {
@@ -4015,12 +4014,11 @@ static bool CheckObjCBridgeNSCast(Sema &S, QualType castType, Expr *castExpr,
if (Parm->isStr("id"))
return true;
- NamedDecl *Target = nullptr;
// Check for an existing type with this name.
LookupResult R(S, DeclarationName(Parm), SourceLocation(),
Sema::LookupOrdinaryName);
if (S.LookupName(R, S.TUScope)) {
- Target = R.getFoundDecl();
+ NamedDecl *Target = R.getFoundDecl();
if (Target && isa<ObjCInterfaceDecl>(Target)) {
ObjCInterfaceDecl *ExprClass = cast<ObjCInterfaceDecl>(Target);
if (const ObjCObjectPointerType *InterfacePointerType =
@@ -4056,8 +4054,6 @@ static bool CheckObjCBridgeNSCast(Sema &S, QualType castType, Expr *castExpr,
diag::err_objc_cf_bridged_not_interface)
<< castExpr->getType() << Parm;
S.Diag(TDNDecl->getBeginLoc(), diag::note_declared_at);
- if (Target)
- S.Diag(Target->getBeginLoc(), diag::note_declared_at);
}
return true;
}
@@ -4073,7 +4069,7 @@ static bool CheckObjCBridgeCFCast(Sema &S, QualType castType, Expr *castExpr,
bool &HadTheAttribute, bool warn) {
QualType T = castType;
HadTheAttribute = false;
- while (const TypedefType *TD = dyn_cast<TypedefType>(T.getTypePtr())) {
+ while (const auto *TD = T->getAs<TypedefType>()) {
TypedefNameDecl *TDNDecl = TD->getDecl();
if (TB *ObjCBAttr = getObjCBridgeAttr<TB>(TD)) {
if (IdentifierInfo *Parm = ObjCBAttr->getBridgedType()) {
@@ -4380,11 +4376,9 @@ Sema::CheckObjCBridgeRelatedConversions(SourceLocation Loc,
Diag(RelatedClass->getBeginLoc(), diag::note_declared_at);
Diag(TDNDecl->getBeginLoc(), diag::note_declared_at);
- ExprResult msg =
- BuildInstanceMessageImplicit(SrcExpr, SrcType,
- InstanceMethod->getLocation(),
- InstanceMethod->getSelector(),
- InstanceMethod, None);
+ ExprResult msg = BuildInstanceMessageImplicit(
+ SrcExpr, SrcType, InstanceMethod->getLocation(),
+ InstanceMethod->getSelector(), InstanceMethod, std::nullopt);
SrcExpr = msg.get();
}
return true;
@@ -4453,9 +4447,14 @@ Sema::CheckObjCConversion(SourceRange castRange, QualType castType,
// Allow casts between pointers to lifetime types (e.g., __strong id*)
// and pointers to void (e.g., cv void *). Casting from void* to lifetime*
// must be explicit.
- if (exprACTC == ACTC_indirectRetainable && castACTC == ACTC_voidPtr)
+ // Allow conversions between pointers to lifetime types and coreFoundation
+ // pointers too, but only when the conversions are explicit.
+ if (exprACTC == ACTC_indirectRetainable &&
+ (castACTC == ACTC_voidPtr ||
+ (castACTC == ACTC_coreFoundation && isCast(CCK))))
return ACR_okay;
- if (castACTC == ACTC_indirectRetainable && exprACTC == ACTC_voidPtr &&
+ if (castACTC == ACTC_indirectRetainable &&
+ (exprACTC == ACTC_voidPtr || exprACTC == ACTC_coreFoundation) &&
isCast(CCK))
return ACR_okay;
@@ -4557,6 +4556,7 @@ Expr *Sema::stripARCUnbridgedCast(Expr *e) {
CurFPFeatureOverrides());
} else if (GenericSelectionExpr *gse = dyn_cast<GenericSelectionExpr>(e)) {
assert(!gse->isResultDependent());
+ assert(!gse->isTypePredicate());
unsigned n = gse->getNumAssocs();
SmallVector<Expr *, 4> subExprs;
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaFixItUtils.cpp b/contrib/llvm-project/clang/lib/Sema/SemaFixItUtils.cpp
index 2910a56f866b..2c85a5319430 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaFixItUtils.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaFixItUtils.cpp
@@ -124,7 +124,7 @@ bool ConversionFixItGenerator::tryToFixConversion(const Expr *FullExpr,
// Check if the pointer to the argument needs to be passed:
// (type -> type *) or (type & -> type *).
- if (isa<PointerType>(ToQTy)) {
+ if (const auto *ToPtrTy = dyn_cast<PointerType>(ToQTy)) {
bool CanConvert = false;
OverloadFixItKind FixKind = OFIK_TakeAddress;
@@ -132,6 +132,10 @@ bool ConversionFixItGenerator::tryToFixConversion(const Expr *FullExpr,
if (!Expr->isLValue() || Expr->getObjectKind() != OK_Ordinary)
return false;
+ // Do no take address of const pointer to get void*
+ if (isa<PointerType>(FromQTy) && ToPtrTy->isVoidPointerType())
+ return false;
+
CanConvert = CompareTypes(S.Context.getPointerType(FromQTy), ToQTy, S,
Begin, VK_PRValue);
if (CanConvert) {
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaHLSL.cpp b/contrib/llvm-project/clang/lib/Sema/SemaHLSL.cpp
new file mode 100644
index 000000000000..cf82cc9bccdf
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Sema/SemaHLSL.cpp
@@ -0,0 +1,34 @@
+//===- SemaHLSL.cpp - Semantic Analysis for HLSL constructs ---------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// This implements Semantic Analysis for HLSL constructs.
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/Sema.h"
+
+using namespace clang;
+
+Decl *Sema::ActOnStartHLSLBuffer(Scope *BufferScope, bool CBuffer,
+ SourceLocation KwLoc, IdentifierInfo *Ident,
+ SourceLocation IdentLoc,
+ SourceLocation LBrace) {
+ // For anonymous namespace, take the location of the left brace.
+ DeclContext *LexicalParent = getCurLexicalContext();
+ HLSLBufferDecl *Result = HLSLBufferDecl::Create(
+ Context, LexicalParent, CBuffer, KwLoc, Ident, IdentLoc, LBrace);
+
+ PushOnScopeChains(Result, BufferScope);
+ PushDeclContext(BufferScope, Result);
+
+ return Result;
+}
+
+void Sema::ActOnFinishHLSLBuffer(Decl *Dcl, SourceLocation RBrace) {
+ auto *BufDecl = cast<HLSLBufferDecl>(Dcl);
+ BufDecl->setRBraceLoc(RBrace);
+ PopDeclContext();
+}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaInit.cpp b/contrib/llvm-project/clang/lib/Sema/SemaInit.cpp
index 78574e34d906..457fa377355a 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaInit.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaInit.cpp
@@ -12,20 +12,28 @@
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprOpenMP.h"
+#include "clang/AST/IgnoreExpr.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Sema/Designator.h"
+#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
+#include "clang/Sema/Ownership.h"
#include "clang/Sema/SemaInternal.h"
#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
@@ -81,17 +89,27 @@ static StringInitFailureKind IsStringInit(Expr *Init, const ArrayType *AT,
const QualType ElemTy =
Context.getCanonicalType(AT->getElementType()).getUnqualifiedType();
+ auto IsCharOrUnsignedChar = [](const QualType &T) {
+ const BuiltinType *BT = dyn_cast<BuiltinType>(T.getTypePtr());
+ return BT && BT->isCharType() && BT->getKind() != BuiltinType::SChar;
+ };
+
switch (SL->getKind()) {
- case StringLiteral::UTF8:
+ case StringLiteralKind::UTF8:
// char8_t array can be initialized with a UTF-8 string.
- if (ElemTy->isChar8Type())
+ // - C++20 [dcl.init.string] (DR)
+ // Additionally, an array of char or unsigned char may be initialized
+ // by a UTF-8 string literal.
+ if (ElemTy->isChar8Type() ||
+ (Context.getLangOpts().Char8 &&
+ IsCharOrUnsignedChar(ElemTy.getCanonicalType())))
return SIF_None;
- LLVM_FALLTHROUGH;
- case StringLiteral::Ascii:
+ [[fallthrough]];
+ case StringLiteralKind::Ordinary:
// char array can be initialized with a narrow string.
// Only allow char x[] = "foo"; not char x[] = L"foo";
if (ElemTy->isCharType())
- return (SL->getKind() == StringLiteral::UTF8 &&
+ return (SL->getKind() == StringLiteralKind::UTF8 &&
Context.getLangOpts().Char8)
? SIF_UTF8StringIntoPlainChar
: SIF_None;
@@ -105,7 +123,7 @@ static StringInitFailureKind IsStringInit(Expr *Init, const ArrayType *AT,
// version of wchar_t, char16_t, or char32_t may be initialized by a wide
// string literal with the corresponding encoding prefix (L, u, or U,
// respectively), optionally enclosed in braces.
- case StringLiteral::UTF16:
+ case StringLiteralKind::UTF16:
if (Context.typesAreCompatible(Context.Char16Ty, ElemTy))
return SIF_None;
if (ElemTy->isCharType() || ElemTy->isChar8Type())
@@ -113,7 +131,7 @@ static StringInitFailureKind IsStringInit(Expr *Init, const ArrayType *AT,
if (IsWideCharCompatible(ElemTy, Context))
return SIF_IncompatWideStringIntoWideChar;
return SIF_Other;
- case StringLiteral::UTF32:
+ case StringLiteralKind::UTF32:
if (Context.typesAreCompatible(Context.Char32Ty, ElemTy))
return SIF_None;
if (ElemTy->isCharType() || ElemTy->isChar8Type())
@@ -121,7 +139,7 @@ static StringInitFailureKind IsStringInit(Expr *Init, const ArrayType *AT,
if (IsWideCharCompatible(ElemTy, Context))
return SIF_IncompatWideStringIntoWideChar;
return SIF_Other;
- case StringLiteral::Wide:
+ case StringLiteralKind::Wide:
if (Context.typesAreCompatible(Context.getWideCharType(), ElemTy))
return SIF_None;
if (ElemTy->isCharType() || ElemTy->isChar8Type())
@@ -129,6 +147,9 @@ static StringInitFailureKind IsStringInit(Expr *Init, const ArrayType *AT,
if (IsWideCharCompatible(ElemTy, Context))
return SIF_IncompatWideStringIntoWideChar;
return SIF_Other;
+ case StringLiteralKind::Unevaluated:
+ assert(false && "Unevaluated string literal in initialization");
+ break;
}
llvm_unreachable("missed a StringLiteral kind?");
@@ -152,20 +173,9 @@ static void updateStringLiteralType(Expr *E, QualType Ty) {
while (true) {
E->setType(Ty);
E->setValueKind(VK_PRValue);
- if (isa<StringLiteral>(E) || isa<ObjCEncodeExpr>(E)) {
- break;
- } else if (ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
- E = PE->getSubExpr();
- } else if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
- assert(UO->getOpcode() == UO_Extension);
- E = UO->getSubExpr();
- } else if (GenericSelectionExpr *GSE = dyn_cast<GenericSelectionExpr>(E)) {
- E = GSE->getResultExpr();
- } else if (ChooseExpr *CE = dyn_cast<ChooseExpr>(E)) {
- E = CE->getChosenSubExpr();
- } else {
- llvm_unreachable("unexpected expr in string literal init");
- }
+ if (isa<StringLiteral>(E) || isa<ObjCEncodeExpr>(E))
+ break;
+ E = IgnoreParensSingleStep(E);
}
}
@@ -174,20 +184,9 @@ static void updateStringLiteralType(Expr *E, QualType Ty) {
static void updateGNUCompoundLiteralRValue(Expr *E) {
while (true) {
E->setValueKind(VK_PRValue);
- if (isa<CompoundLiteralExpr>(E)) {
- break;
- } else if (ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
- E = PE->getSubExpr();
- } else if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
- assert(UO->getOpcode() == UO_Extension);
- E = UO->getSubExpr();
- } else if (GenericSelectionExpr *GSE = dyn_cast<GenericSelectionExpr>(E)) {
- E = GSE->getResultExpr();
- } else if (ChooseExpr *CE = dyn_cast<ChooseExpr>(E)) {
- E = CE->getChosenSubExpr();
- } else {
- llvm_unreachable("unexpected expr in array compound literal init");
- }
+ if (isa<CompoundLiteralExpr>(E))
+ break;
+ E = IgnoreParensSingleStep(E);
}
}
@@ -203,9 +202,8 @@ static void CheckStringInit(Expr *Str, QualType &DeclT, const ArrayType *AT,
// being initialized to a string literal.
llvm::APInt ConstVal(32, StrLength);
// Return a new array type (C99 6.7.8p22).
- DeclT = S.Context.getConstantArrayType(IAT->getElementType(),
- ConstVal, nullptr,
- ArrayType::Normal, 0);
+ DeclT = S.Context.getConstantArrayType(
+ IAT->getElementType(), ConstVal, nullptr, ArraySizeModifier::Normal, 0);
updateStringLiteralType(Str, DeclT);
return;
}
@@ -229,6 +227,7 @@ static void CheckStringInit(Expr *Str, QualType &DeclT, const ArrayType *AT,
if (StrLength > CAT->getSize().getZExtValue())
S.Diag(Str->getBeginLoc(),
diag::err_initializer_string_for_char_array_too_long)
+ << CAT->getSize().getZExtValue() << StrLength
<< Str->getSourceRange();
} else {
// C99 6.7.8p14.
@@ -290,6 +289,7 @@ class InitListChecker {
bool InOverloadResolution;
InitListExpr *FullyStructuredList = nullptr;
NoInitExpr *DummyExpr = nullptr;
+ SmallVectorImpl<QualType> *AggrDeductionCandidateParamTypes = nullptr;
NoInitExpr *getDummyInit() {
if (!DummyExpr)
@@ -339,7 +339,7 @@ class InitListChecker {
unsigned &StructuredIndex);
void CheckStructUnionTypes(const InitializedEntity &Entity,
InitListExpr *IList, QualType DeclType,
- CXXRecordDecl::base_class_range Bases,
+ CXXRecordDecl::base_class_const_range Bases,
RecordDecl::field_iterator Field,
bool SubobjectIsDesignatorContext, unsigned &Index,
InitListExpr *StructuredList,
@@ -376,18 +376,22 @@ class InitListChecker {
unsigned ExpectedNumInits);
int numArrayElements(QualType DeclType);
int numStructUnionElements(QualType DeclType);
+ static RecordDecl *getRecordDecl(QualType DeclType);
ExprResult PerformEmptyInit(SourceLocation Loc,
const InitializedEntity &Entity);
/// Diagnose that OldInit (or part thereof) has been overridden by NewInit.
void diagnoseInitOverride(Expr *OldInit, SourceRange NewInitRange,
+ bool UnionOverride = false,
bool FullyOverwritten = true) {
// Overriding an initializer via a designator is valid with C99 designated
// initializers, but ill-formed with C++20 designated initializers.
- unsigned DiagID = SemaRef.getLangOpts().CPlusPlus
- ? diag::ext_initializer_overrides
- : diag::warn_initializer_overrides;
+ unsigned DiagID =
+ SemaRef.getLangOpts().CPlusPlus
+ ? (UnionOverride ? diag::ext_initializer_union_overrides
+ : diag::ext_initializer_overrides)
+ : diag::warn_initializer_overrides;
if (InOverloadResolution && SemaRef.getLangOpts().CPlusPlus) {
// In overload resolution, we have to strictly enforce the rules, and so
@@ -475,9 +479,19 @@ class InitListChecker {
SourceLocation Loc);
public:
+ InitListChecker(
+ Sema &S, const InitializedEntity &Entity, InitListExpr *IL, QualType &T,
+ bool VerifyOnly, bool TreatUnavailableAsInvalid,
+ bool InOverloadResolution = false,
+ SmallVectorImpl<QualType> *AggrDeductionCandidateParamTypes = nullptr);
InitListChecker(Sema &S, const InitializedEntity &Entity, InitListExpr *IL,
- QualType &T, bool VerifyOnly, bool TreatUnavailableAsInvalid,
- bool InOverloadResolution = false);
+ QualType &T,
+ SmallVectorImpl<QualType> &AggrDeductionCandidateParamTypes)
+ : InitListChecker(S, Entity, IL, T, /*VerifyOnly=*/true,
+ /*TreatUnavailableAsInvalid=*/false,
+ /*InOverloadResolution=*/false,
+ &AggrDeductionCandidateParamTypes){};
+
bool HadError() { return hadError; }
// Retrieves the fully-structured initializer list used for
@@ -493,7 +507,7 @@ ExprResult InitListChecker::PerformEmptyInit(SourceLocation Loc,
true);
MultiExprArg SubInit;
Expr *InitExpr;
- InitListExpr DummyInitList(SemaRef.Context, Loc, None, Loc);
+ InitListExpr DummyInitList(SemaRef.Context, Loc, std::nullopt, Loc);
// C++ [dcl.init.aggr]p7:
// If there are fewer initializer-clauses in the list than there are
@@ -512,8 +526,10 @@ ExprResult InitListChecker::PerformEmptyInit(SourceLocation Loc,
//
// Only do this if we're initializing a class type, to avoid filling in
// the initializer list where possible.
- InitExpr = VerifyOnly ? &DummyInitList : new (SemaRef.Context)
- InitListExpr(SemaRef.Context, Loc, None, Loc);
+ InitExpr = VerifyOnly
+ ? &DummyInitList
+ : new (SemaRef.Context)
+ InitListExpr(SemaRef.Context, Loc, std::nullopt, Loc);
InitExpr->setType(SemaRef.Context.VoidTy);
SubInit = InitExpr;
Kind = InitializationKind::CreateCopy(Loc, Loc);
@@ -695,10 +711,10 @@ void InitListChecker::FillInEmptyInitForField(unsigned Init, FieldDecl *Field,
// member of reference type uninitialized, the program is
// ill-formed.
SemaRef.Diag(Loc, diag::err_init_reference_member_uninitialized)
- << Field->getType()
- << ILE->getSyntacticForm()->getSourceRange();
- SemaRef.Diag(Field->getLocation(),
- diag::note_uninit_reference_member);
+ << Field->getType()
+ << (ILE->isSyntacticForm() ? ILE : ILE->getSyntacticForm())
+ ->getSourceRange();
+ SemaRef.Diag(Field->getLocation(), diag::note_uninit_reference_member);
}
hadError = true;
return;
@@ -792,7 +808,7 @@ InitListChecker::FillInEmptyInitializations(const InitializedEntity &Entity,
// order to leave them uninitialized, the ILE is expanded and the extra
// fields are then filled with NoInitExpr.
unsigned NumElems = numStructUnionElements(ILE->getType());
- if (RDecl->hasFlexibleArrayMember())
+ if (!RDecl->isUnion() && RDecl->hasFlexibleArrayMember())
++NumElems;
if (!VerifyOnly && ILE->getNumInits() < NumElems)
ILE->resizeInits(SemaRef.Context, NumElems);
@@ -935,18 +951,19 @@ InitListChecker::FillInEmptyInitializations(const InitializedEntity &Entity,
static bool hasAnyDesignatedInits(const InitListExpr *IL) {
for (const Stmt *Init : *IL)
- if (Init && isa<DesignatedInitExpr>(Init))
+ if (isa_and_nonnull<DesignatedInitExpr>(Init))
return true;
return false;
}
-InitListChecker::InitListChecker(Sema &S, const InitializedEntity &Entity,
- InitListExpr *IL, QualType &T, bool VerifyOnly,
- bool TreatUnavailableAsInvalid,
- bool InOverloadResolution)
+InitListChecker::InitListChecker(
+ Sema &S, const InitializedEntity &Entity, InitListExpr *IL, QualType &T,
+ bool VerifyOnly, bool TreatUnavailableAsInvalid, bool InOverloadResolution,
+ SmallVectorImpl<QualType> *AggrDeductionCandidateParamTypes)
: SemaRef(S), VerifyOnly(VerifyOnly),
TreatUnavailableAsInvalid(TreatUnavailableAsInvalid),
- InOverloadResolution(InOverloadResolution) {
+ InOverloadResolution(InOverloadResolution),
+ AggrDeductionCandidateParamTypes(AggrDeductionCandidateParamTypes) {
if (!VerifyOnly || hasAnyDesignatedInits(IL)) {
FullyStructuredList =
createInitListExpr(T, IL->getSourceRange(), IL->getNumInits());
@@ -960,7 +977,7 @@ InitListChecker::InitListChecker(Sema &S, const InitializedEntity &Entity,
CheckExplicitInitList(Entity, IL, T, FullyStructuredList,
/*TopLevelObject=*/true);
- if (!hadError && FullyStructuredList) {
+ if (!hadError && !AggrDeductionCandidateParamTypes && FullyStructuredList) {
bool RequiresSecondPass = false;
FillInEmptyInitializations(Entity, FullyStructuredList, RequiresSecondPass,
/*OuterILE=*/nullptr, /*OuterIndex=*/0);
@@ -996,6 +1013,14 @@ int InitListChecker::numStructUnionElements(QualType DeclType) {
return InitializableMembers - structDecl->hasFlexibleArrayMember();
}
+RecordDecl *InitListChecker::getRecordDecl(QualType DeclType) {
+ if (const auto *RT = DeclType->getAs<RecordType>())
+ return RT->getDecl();
+ if (const auto *Inject = DeclType->getAs<InjectedClassNameType>())
+ return Inject->getDecl();
+ return nullptr;
+}
+
/// Determine whether Entity is an entity for which it is idiomatic to elide
/// the braces in aggregate initialization.
static bool isIdiomaticBraceElisionEntity(const InitializedEntity &Entity) {
@@ -1139,6 +1164,7 @@ static void warnBracedScalarInit(Sema &S, const InitializedEntity &Entity,
case InitializedEntity::EK_Parameter_CF_Audited:
case InitializedEntity::EK_TemplateParameter:
case InitializedEntity::EK_Result:
+ case InitializedEntity::EK_ParenAggInitMember:
// Extra braces here are suspicious.
DiagID = diag::warn_braces_around_init;
break;
@@ -1289,15 +1315,18 @@ void InitListChecker::CheckListElementTypes(const InitializedEntity &Entity,
} else if (DeclType->isVectorType()) {
CheckVectorType(Entity, IList, DeclType, Index,
StructuredList, StructuredIndex);
- } else if (DeclType->isRecordType()) {
- assert(DeclType->isAggregateType() &&
- "non-aggregate records should be handed in CheckSubElementType");
- RecordDecl *RD = DeclType->castAs<RecordType>()->getDecl();
+ } else if (const RecordDecl *RD = getRecordDecl(DeclType)) {
auto Bases =
- CXXRecordDecl::base_class_range(CXXRecordDecl::base_class_iterator(),
- CXXRecordDecl::base_class_iterator());
- if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RD))
- Bases = CXXRD->bases();
+ CXXRecordDecl::base_class_const_range(CXXRecordDecl::base_class_const_iterator(),
+ CXXRecordDecl::base_class_const_iterator());
+ if (DeclType->isRecordType()) {
+ assert(DeclType->isAggregateType() &&
+ "non-aggregate records should be handed in CheckSubElementType");
+ if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RD))
+ Bases = CXXRD->bases();
+ } else {
+ Bases = cast<CXXRecordDecl>(RD)->bases();
+ }
CheckStructUnionTypes(Entity, IList, DeclType, Bases, RD->field_begin(),
SubobjectIsDesignatorContext, Index, StructuredList,
StructuredIndex, TopLevelObject);
@@ -1327,6 +1356,13 @@ void InitListChecker::CheckListElementTypes(const InitializedEntity &Entity,
// Checks for scalar type are sufficient for these types too.
CheckScalarType(Entity, IList, DeclType, Index, StructuredList,
StructuredIndex);
+ } else if (DeclType->isDependentType()) {
+ // C++ [over.match.class.deduct]p1.5:
+ // brace elision is not considered for any aggregate element that has a
+ // dependent non-array type or an array type with a value-dependent bound
+ ++Index;
+ assert(AggrDeductionCandidateParamTypes);
+ AggrDeductionCandidateParamTypes->push_back(DeclType);
} else {
if (!VerifyOnly)
SemaRef.Diag(IList->getBeginLoc(), diag::err_illegal_initializer_type)
@@ -1384,31 +1420,46 @@ void InitListChecker::CheckSubElementType(const InitializedEntity &Entity,
? InitializedEntity::InitializeTemporary(ElemType)
: Entity;
- InitializationSequence Seq(SemaRef, TmpEntity, Kind, expr,
- /*TopLevelOfInitList*/ true);
+ if (TmpEntity.getType()->isDependentType()) {
+ // C++ [over.match.class.deduct]p1.5:
+ // brace elision is not considered for any aggregate element that has a
+ // dependent non-array type or an array type with a value-dependent
+ // bound
+ assert(AggrDeductionCandidateParamTypes);
+ if (!isa_and_nonnull<ConstantArrayType>(
+ SemaRef.Context.getAsArrayType(ElemType))) {
+ ++Index;
+ AggrDeductionCandidateParamTypes->push_back(ElemType);
+ return;
+ }
+ } else {
+ InitializationSequence Seq(SemaRef, TmpEntity, Kind, expr,
+ /*TopLevelOfInitList*/ true);
+ // C++14 [dcl.init.aggr]p13:
+ // If the assignment-expression can initialize a member, the member is
+ // initialized. Otherwise [...] brace elision is assumed
+ //
+ // Brace elision is never performed if the element is not an
+ // assignment-expression.
+ if (Seq || isa<InitListExpr>(expr)) {
+ if (!VerifyOnly) {
+ ExprResult Result = Seq.Perform(SemaRef, TmpEntity, Kind, expr);
+ if (Result.isInvalid())
+ hadError = true;
- // C++14 [dcl.init.aggr]p13:
- // If the assignment-expression can initialize a member, the member is
- // initialized. Otherwise [...] brace elision is assumed
- //
- // Brace elision is never performed if the element is not an
- // assignment-expression.
- if (Seq || isa<InitListExpr>(expr)) {
- if (!VerifyOnly) {
- ExprResult Result = Seq.Perform(SemaRef, TmpEntity, Kind, expr);
- if (Result.isInvalid())
+ UpdateStructuredListElement(StructuredList, StructuredIndex,
+ Result.getAs<Expr>());
+ } else if (!Seq) {
hadError = true;
-
- UpdateStructuredListElement(StructuredList, StructuredIndex,
- Result.getAs<Expr>());
- } else if (!Seq) {
- hadError = true;
- } else if (StructuredList) {
- UpdateStructuredListElement(StructuredList, StructuredIndex,
- getDummyInit());
+ } else if (StructuredList) {
+ UpdateStructuredListElement(StructuredList, StructuredIndex,
+ getDummyInit());
+ }
+ ++Index;
+ if (AggrDeductionCandidateParamTypes)
+ AggrDeductionCandidateParamTypes->push_back(ElemType);
+ return;
}
- ++Index;
- return;
}
// Fall through for subaggregate initialization
@@ -1518,12 +1569,12 @@ void InitListChecker::CheckComplexType(const InitializedEntity &Entity,
// As an extension, clang supports complex initializers, which initialize
// a complex number component-wise. When an explicit initializer list for
- // a complex number contains two two initializers, this extension kicks in:
- // it exepcts the initializer list to contain two elements convertible to
+ // a complex number contains two initializers, this extension kicks in:
+ // it expects the initializer list to contain two elements convertible to
// the element type of the complex type. The first element initializes
// the real part, and the second element intitializes the imaginary part.
- if (IList->getNumInits() != 2)
+ if (IList->getNumInits() < 2)
return CheckScalarType(Entity, IList, DeclType, Index, StructuredList,
StructuredIndex);
@@ -1552,20 +1603,23 @@ void InitListChecker::CheckScalarType(const InitializedEntity &Entity,
unsigned &StructuredIndex) {
if (Index >= IList->getNumInits()) {
if (!VerifyOnly) {
- if (DeclType->isSizelessBuiltinType())
- SemaRef.Diag(IList->getBeginLoc(),
- SemaRef.getLangOpts().CPlusPlus11
- ? diag::warn_cxx98_compat_empty_sizeless_initializer
- : diag::err_empty_sizeless_initializer)
- << DeclType << IList->getSourceRange();
- else
- SemaRef.Diag(IList->getBeginLoc(),
- SemaRef.getLangOpts().CPlusPlus11
- ? diag::warn_cxx98_compat_empty_scalar_initializer
- : diag::err_empty_scalar_initializer)
- << IList->getSourceRange();
+ if (SemaRef.getLangOpts().CPlusPlus) {
+ if (DeclType->isSizelessBuiltinType())
+ SemaRef.Diag(IList->getBeginLoc(),
+ SemaRef.getLangOpts().CPlusPlus11
+ ? diag::warn_cxx98_compat_empty_sizeless_initializer
+ : diag::err_empty_sizeless_initializer)
+ << DeclType << IList->getSourceRange();
+ else
+ SemaRef.Diag(IList->getBeginLoc(),
+ SemaRef.getLangOpts().CPlusPlus11
+ ? diag::warn_cxx98_compat_empty_scalar_initializer
+ : diag::err_empty_scalar_initializer)
+ << IList->getSourceRange();
+ }
}
- hadError = !SemaRef.getLangOpts().CPlusPlus11;
+ hadError =
+ SemaRef.getLangOpts().CPlusPlus && !SemaRef.getLangOpts().CPlusPlus11;
++Index;
++StructuredIndex;
return;
@@ -1621,6 +1675,8 @@ void InitListChecker::CheckScalarType(const InitializedEntity &Entity,
}
UpdateStructuredListElement(StructuredList, StructuredIndex, ResultExpr);
++Index;
+ if (AggrDeductionCandidateParamTypes)
+ AggrDeductionCandidateParamTypes->push_back(DeclType);
}
void InitListChecker::CheckReferenceType(const InitializedEntity &Entity,
@@ -1676,6 +1732,8 @@ void InitListChecker::CheckReferenceType(const InitializedEntity &Entity,
UpdateStructuredListElement(StructuredList, StructuredIndex, expr);
++Index;
+ if (AggrDeductionCandidateParamTypes)
+ AggrDeductionCandidateParamTypes->push_back(DeclType);
}
void InitListChecker::CheckVectorType(const InitializedEntity &Entity,
@@ -1696,7 +1754,7 @@ void InitListChecker::CheckVectorType(const InitializedEntity &Entity,
return;
}
- if (!SemaRef.getLangOpts().OpenCL) {
+ if (!SemaRef.getLangOpts().OpenCL && !SemaRef.getLangOpts().HLSL ) {
// If the initializing element is a vector, try to copy-initialize
// instead of breaking it apart (which is doomed to failure anyway).
Expr *Init = IList->getInit(Index);
@@ -1727,6 +1785,8 @@ void InitListChecker::CheckVectorType(const InitializedEntity &Entity,
}
UpdateStructuredListElement(StructuredList, StructuredIndex, ResultExpr);
++Index;
+ if (AggrDeductionCandidateParamTypes)
+ AggrDeductionCandidateParamTypes->push_back(elementType);
return;
}
@@ -1750,8 +1810,8 @@ void InitListChecker::CheckVectorType(const InitializedEntity &Entity,
bool isBigEndian = SemaRef.Context.getTargetInfo().isBigEndian();
const VectorType *T = Entity.getType()->castAs<VectorType>();
- if (isBigEndian && (T->getVectorKind() == VectorType::NeonVector ||
- T->getVectorKind() == VectorType::NeonPolyVector)) {
+ if (isBigEndian && (T->getVectorKind() == VectorKind::Neon ||
+ T->getVectorKind() == VectorKind::NeonPoly)) {
// The ability to use vector initializer lists is a GNU vector extension
// and is unrelated to the NEON intrinsics in arm_neon.h. On little
// endian machines it works fine, however on big endian machines it
@@ -1790,7 +1850,7 @@ void InitListChecker::CheckVectorType(const InitializedEntity &Entity,
InitializedEntity ElementEntity =
InitializedEntity::InitializeElement(SemaRef.Context, 0, Entity);
- // OpenCL initializers allows vectors to be constructed from vectors.
+ // OpenCL and HLSL initializers allow vectors to be constructed from vectors.
for (unsigned i = 0; i < maxElements; ++i) {
// Don't attempt to go past the end of the init list
if (Index >= IList->getNumInits())
@@ -1819,7 +1879,7 @@ void InitListChecker::CheckVectorType(const InitializedEntity &Entity,
}
}
- // OpenCL requires all elements to be initialized.
+ // OpenCL and HLSL require all elements to be initialized.
if (numEltsInit != maxElements) {
if (!VerifyOnly)
SemaRef.Diag(IList->getBeginLoc(),
@@ -1888,6 +1948,8 @@ void InitListChecker::CheckArrayType(const InitializedEntity &Entity,
StructuredList->resizeInits(SemaRef.Context, StructuredIndex);
}
++Index;
+ if (AggrDeductionCandidateParamTypes)
+ AggrDeductionCandidateParamTypes->push_back(DeclType);
return;
}
}
@@ -1895,11 +1957,24 @@ void InitListChecker::CheckArrayType(const InitializedEntity &Entity,
// Check for VLAs; in standard C it would be possible to check this
// earlier, but I don't know where clang accepts VLAs (gcc accepts
// them in all sorts of strange places).
- if (!VerifyOnly)
- SemaRef.Diag(VAT->getSizeExpr()->getBeginLoc(),
- diag::err_variable_object_no_init)
- << VAT->getSizeExpr()->getSourceRange();
- hadError = true;
+ bool HasErr = IList->getNumInits() != 0 || SemaRef.getLangOpts().CPlusPlus;
+ if (!VerifyOnly) {
+ // C23 6.7.10p4: An entity of variable length array type shall not be
+ // initialized except by an empty initializer.
+ //
+ // The C extension warnings are issued from ParseBraceInitializer() and
+ // do not need to be issued here. However, we continue to issue an error
+ // in the case there are initializers or we are compiling C++. We allow
+ // use of VLAs in C++, but it's not clear we want to allow {} to zero
+ // init a VLA in C++ in all cases (such as with non-trivial constructors).
+ // FIXME: should we allow this construct in C++ when it makes sense to do
+ // so?
+ if (HasErr)
+ SemaRef.Diag(VAT->getSizeExpr()->getBeginLoc(),
+ diag::err_variable_object_no_init)
+ << VAT->getSizeExpr()->getSourceRange();
+ }
+ hadError = HasErr;
++Index;
++StructuredIndex;
return;
@@ -1979,7 +2054,7 @@ void InitListChecker::CheckArrayType(const InitializedEntity &Entity,
}
DeclType = SemaRef.Context.getConstantArrayType(
- elementType, maxElements, nullptr, ArrayType::Normal, 0);
+ elementType, maxElements, nullptr, ArraySizeModifier::Normal, 0);
}
if (!hadError) {
// If there are any members of the array that get value-initialized, check
@@ -2004,10 +2079,6 @@ bool InitListChecker::CheckFlexibleArrayInit(const InitializedEntity &Entity,
cast<InitListExpr>(InitExpr)->getNumInits() == 0) {
// Empty flexible array init always allowed as an extension
FlexArrayDiag = diag::ext_flexible_array_init;
- } else if (SemaRef.getLangOpts().CPlusPlus) {
- // Disallow flexible array init in C++; it is not required for gcc
- // compatibility, and it needs work to IRGen correctly in general.
- FlexArrayDiag = diag::err_flexible_array_init;
} else if (!TopLevelObject) {
// Disallow flexible array init on non-top-level object
FlexArrayDiag = diag::err_flexible_array_init;
@@ -2034,24 +2105,22 @@ bool InitListChecker::CheckFlexibleArrayInit(const InitializedEntity &Entity,
void InitListChecker::CheckStructUnionTypes(
const InitializedEntity &Entity, InitListExpr *IList, QualType DeclType,
- CXXRecordDecl::base_class_range Bases, RecordDecl::field_iterator Field,
+ CXXRecordDecl::base_class_const_range Bases, RecordDecl::field_iterator Field,
bool SubobjectIsDesignatorContext, unsigned &Index,
InitListExpr *StructuredList, unsigned &StructuredIndex,
bool TopLevelObject) {
- RecordDecl *structDecl = DeclType->castAs<RecordType>()->getDecl();
+ const RecordDecl *RD = getRecordDecl(DeclType);
// If the record is invalid, some of it's members are invalid. To avoid
- // confusion, we forgo checking the intializer for the entire record.
- if (structDecl->isInvalidDecl()) {
+ // confusion, we forgo checking the initializer for the entire record.
+ if (RD->isInvalidDecl()) {
// Assume it was supposed to consume a single initializer.
++Index;
hadError = true;
return;
}
- if (DeclType->isUnionType() && IList->getNumInits() == 0) {
- RecordDecl *RD = DeclType->castAs<RecordType>()->getDecl();
-
+ if (RD->isUnion() && IList->getNumInits() == 0) {
if (!VerifyOnly)
for (FieldDecl *FD : RD->fields()) {
QualType ET = SemaRef.Context.getBaseElementType(FD->getType());
@@ -2095,7 +2164,8 @@ void InitListChecker::CheckStructUnionTypes(
bool InitializedSomething = false;
// If we have any base classes, they are initialized prior to the fields.
- for (auto &Base : Bases) {
+ for (auto I = Bases.begin(), E = Bases.end(); I != E; ++I) {
+ auto &Base = *I;
Expr *Init = Index < IList->getNumInits() ? IList->getInit(Index) : nullptr;
// Designated inits always initialize fields, so if we see one, all
@@ -2103,6 +2173,34 @@ void InitListChecker::CheckStructUnionTypes(
if (Init && isa<DesignatedInitExpr>(Init))
Init = nullptr;
+ // C++ [over.match.class.deduct]p1.6:
+ // each non-trailing aggregate element that is a pack expansion is assumed
+ // to correspond to no elements of the initializer list, and (1.7) a
+ // trailing aggregate element that is a pack expansion is assumed to
+ // correspond to all remaining elements of the initializer list (if any).
+
+ // C++ [over.match.class.deduct]p1.9:
+ // ... except that additional parameter packs of the form P_j... are
+ // inserted into the parameter list in their original aggregate element
+ // position corresponding to each non-trailing aggregate element of
+ // type P_j that was skipped because it was a parameter pack, and the
+ // trailing sequence of parameters corresponding to a trailing
+ // aggregate element that is a pack expansion (if any) is replaced
+ // by a single parameter of the form T_n....
+ if (AggrDeductionCandidateParamTypes && Base.isPackExpansion()) {
+ AggrDeductionCandidateParamTypes->push_back(
+ SemaRef.Context.getPackExpansionType(Base.getType(), std::nullopt));
+
+ // Trailing pack expansion
+ if (I + 1 == E && RD->field_empty()) {
+ if (Index < IList->getNumInits())
+ Index = IList->getNumInits();
+ return;
+ }
+
+ continue;
+ }
+
SourceLocation InitLoc = Init ? Init->getBeginLoc() : IList->getEndLoc();
InitializedEntity BaseEntity = InitializedEntity::InitializeBase(
SemaRef.Context, &Base, false, &Entity);
@@ -2125,12 +2223,16 @@ void InitListChecker::CheckStructUnionTypes(
// anything except look at designated initializers; That's okay,
// because an error should get printed out elsewhere. It might be
// worthwhile to skip over the rest of the initializer, though.
- RecordDecl *RD = DeclType->castAs<RecordType>()->getDecl();
RecordDecl::field_iterator FieldEnd = RD->field_end();
+ size_t NumRecordDecls = llvm::count_if(RD->decls(), [&](const Decl *D) {
+ return isa<FieldDecl>(D) || isa<RecordDecl>(D);
+ });
bool CheckForMissingFields =
!IList->isIdiomaticZeroInitializer(SemaRef.getLangOpts());
bool HasDesignatedInit = false;
+ llvm::SmallPtrSet<FieldDecl *, 4> InitializedFields;
+
while (Index < IList->getNumInits()) {
Expr *Init = IList->getInit(Index);
SourceLocation InitLoc = Init->getBeginLoc();
@@ -2146,20 +2248,23 @@ void InitListChecker::CheckStructUnionTypes(
// Handle this designated initializer. Field will be updated to
// the next field that we'll be initializing.
- if (CheckDesignatedInitializer(Entity, IList, DIE, 0,
- DeclType, &Field, nullptr, Index,
- StructuredList, StructuredIndex,
- true, TopLevelObject))
+ bool DesignatedInitFailed = CheckDesignatedInitializer(
+ Entity, IList, DIE, 0, DeclType, &Field, nullptr, Index,
+ StructuredList, StructuredIndex, true, TopLevelObject);
+ if (DesignatedInitFailed)
hadError = true;
- else if (!VerifyOnly) {
- // Find the field named by the designated initializer.
- RecordDecl::field_iterator F = RD->field_begin();
- while (std::next(F) != Field)
- ++F;
- QualType ET = SemaRef.Context.getBaseElementType(F->getType());
- if (checkDestructorReference(ET, InitLoc, SemaRef)) {
- hadError = true;
- return;
+
+ // Find the field named by the designated initializer.
+ DesignatedInitExpr::Designator *D = DIE->getDesignator(0);
+ if (!VerifyOnly && D->isFieldDesignator()) {
+ FieldDecl *F = D->getFieldDecl();
+ InitializedFields.insert(F);
+ if (!DesignatedInitFailed) {
+ QualType ET = SemaRef.Context.getBaseElementType(F->getType());
+ if (checkDestructorReference(ET, InitLoc, SemaRef)) {
+ hadError = true;
+ return;
+ }
}
}
@@ -2167,17 +2272,47 @@ void InitListChecker::CheckStructUnionTypes(
// Disable check for missing fields when designators are used.
// This matches gcc behaviour.
- CheckForMissingFields = false;
+ if (!SemaRef.getLangOpts().CPlusPlus)
+ CheckForMissingFields = false;
continue;
}
+ // Check if this is an initializer of forms:
+ //
+ // struct foo f = {};
+ // struct foo g = {0};
+ //
+ // These are okay for randomized structures. [C99 6.7.8p19]
+ //
+ // Also, if there is only one element in the structure, we allow something
+ // like this, because it's really not randomized in the tranditional sense.
+ //
+ // struct foo h = {bar};
+ auto IsZeroInitializer = [&](const Expr *I) {
+ if (IList->getNumInits() == 1) {
+ if (NumRecordDecls == 1)
+ return true;
+ if (const auto *IL = dyn_cast<IntegerLiteral>(I))
+ return IL->getValue().isZero();
+ }
+ return false;
+ };
+
+ // Don't allow non-designated initializers on randomized structures.
+ if (RD->isRandomized() && !IsZeroInitializer(Init)) {
+ if (!VerifyOnly)
+ SemaRef.Diag(InitLoc, diag::err_non_designated_init_used);
+ hadError = true;
+ break;
+ }
+
if (Field == FieldEnd) {
// We've run out of fields. We're done.
break;
}
// We've already initialized a member of a union. We're done.
- if (InitializedSomething && DeclType->isUnionType())
+ if (InitializedSomething && RD->isUnion())
break;
// If we've hit the flexible array member at the end, we're done.
@@ -2217,8 +2352,9 @@ void InitListChecker::CheckStructUnionTypes(
CheckSubElementType(MemberEntity, IList, Field->getType(), Index,
StructuredList, StructuredIndex);
InitializedSomething = true;
+ InitializedFields.insert(*Field);
- if (DeclType->isUnionType() && StructuredList) {
+ if (RD->isUnion() && StructuredList) {
// Initialize the first field within the union.
StructuredList->setInitializedFieldInUnion(*Field);
}
@@ -2228,15 +2364,21 @@ void InitListChecker::CheckStructUnionTypes(
// Emit warnings for missing struct field initializers.
if (!VerifyOnly && InitializedSomething && CheckForMissingFields &&
- Field != FieldEnd && !Field->getType()->isIncompleteArrayType() &&
- !DeclType->isUnionType()) {
+ !RD->isUnion()) {
// It is possible we have one or more unnamed bitfields remaining.
// Find first (if any) named field and emit warning.
- for (RecordDecl::field_iterator it = Field, end = RD->field_end();
+ for (RecordDecl::field_iterator it = HasDesignatedInit ? RD->field_begin()
+ : Field,
+ end = RD->field_end();
it != end; ++it) {
- if (!it->isUnnamedBitfield() && !it->hasInClassInitializer()) {
+ if (HasDesignatedInit && InitializedFields.count(*it))
+ continue;
+
+ if (!it->isUnnamedBitfield() && !it->hasInClassInitializer() &&
+ !it->getType()->isIncompleteArrayType()) {
SemaRef.Diag(IList->getSourceRange().getEnd(),
- diag::warn_missing_field_initializers) << *it;
+ diag::warn_missing_field_initializers)
+ << *it;
break;
}
}
@@ -2244,7 +2386,7 @@ void InitListChecker::CheckStructUnionTypes(
// Check that any remaining fields can be value-initialized if we're not
// building a structured list. (If we are, we'll check this later.)
- if (!StructuredList && Field != FieldEnd && !DeclType->isUnionType() &&
+ if (!StructuredList && Field != FieldEnd && !RD->isUnion() &&
!Field->getType()->isIncompleteArrayType()) {
for (; Field != FieldEnd && !hadError; ++Field) {
if (!Field->isUnnamedBitfield() && !Field->hasInClassInitializer())
@@ -2283,7 +2425,8 @@ void InitListChecker::CheckStructUnionTypes(
InitializedEntity MemberEntity =
InitializedEntity::InitializeMember(*Field, &Entity);
- if (isa<InitListExpr>(IList->getInit(Index)))
+ if (isa<InitListExpr>(IList->getInit(Index)) ||
+ AggrDeductionCandidateParamTypes)
CheckSubElementType(MemberEntity, IList, Field->getType(), Index,
StructuredList, StructuredIndex);
else
@@ -2306,14 +2449,14 @@ static void ExpandAnonymousFieldDesignator(Sema &SemaRef,
for (IndirectFieldDecl::chain_iterator PI = IndirectField->chain_begin(),
PE = IndirectField->chain_end(); PI != PE; ++PI) {
if (PI + 1 == PE)
- Replacements.push_back(Designator((IdentifierInfo *)nullptr,
- DIE->getDesignator(DesigIdx)->getDotLoc(),
- DIE->getDesignator(DesigIdx)->getFieldLoc()));
+ Replacements.push_back(Designator::CreateFieldDesignator(
+ (IdentifierInfo *)nullptr, DIE->getDesignator(DesigIdx)->getDotLoc(),
+ DIE->getDesignator(DesigIdx)->getFieldLoc()));
else
- Replacements.push_back(Designator((IdentifierInfo *)nullptr,
- SourceLocation(), SourceLocation()));
+ Replacements.push_back(Designator::CreateFieldDesignator(
+ (IdentifierInfo *)nullptr, SourceLocation(), SourceLocation()));
assert(isa<FieldDecl>(*PI));
- Replacements.back().setField(cast<FieldDecl>(*PI));
+ Replacements.back().setFieldDecl(cast<FieldDecl>(*PI));
}
// Expand the current designator into the set of replacement
@@ -2341,7 +2484,7 @@ namespace {
// the given struct or union.
class FieldInitializerValidatorCCC final : public CorrectionCandidateCallback {
public:
- explicit FieldInitializerValidatorCCC(RecordDecl *RD)
+ explicit FieldInitializerValidatorCCC(const RecordDecl *RD)
: Record(RD) {}
bool ValidateCandidate(const TypoCorrection &candidate) override {
@@ -2354,7 +2497,7 @@ class FieldInitializerValidatorCCC final : public CorrectionCandidateCallback {
}
private:
- RecordDecl *Record;
+ const RecordDecl *Record;
};
} // end anonymous namespace
@@ -2430,6 +2573,8 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
Result.get());
}
++Index;
+ if (AggrDeductionCandidateParamTypes)
+ AggrDeductionCandidateParamTypes->push_back(CurrentObjectType);
return !Seq;
}
@@ -2487,6 +2632,7 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
// subobject [0].b.
diagnoseInitOverride(ExistingInit,
SourceRange(D->getBeginLoc(), DIE->getEndLoc()),
+ /*UnionOverride=*/false,
/*FullyOverwritten=*/false);
if (!VerifyOnly) {
@@ -2522,8 +2668,8 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
// then the current object (defined below) shall have
// structure or union type and the identifier shall be the
// name of a member of that type.
- const RecordType *RT = CurrentObjectType->getAs<RecordType>();
- if (!RT) {
+ RecordDecl *RD = getRecordDecl(CurrentObjectType);
+ if (!RD) {
SourceLocation Loc = D->getDotLoc();
if (Loc.isInvalid())
Loc = D->getFieldLoc();
@@ -2534,24 +2680,19 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
return true;
}
- FieldDecl *KnownField = D->getField();
+ FieldDecl *KnownField = D->getFieldDecl();
if (!KnownField) {
- IdentifierInfo *FieldName = D->getFieldName();
- DeclContext::lookup_result Lookup = RT->getDecl()->lookup(FieldName);
- for (NamedDecl *ND : Lookup) {
- if (auto *FD = dyn_cast<FieldDecl>(ND)) {
- KnownField = FD;
- break;
- }
- if (auto *IFD = dyn_cast<IndirectFieldDecl>(ND)) {
- // In verify mode, don't modify the original.
- if (VerifyOnly)
- DIE = CloneDesignatedInitExpr(SemaRef, DIE);
- ExpandAnonymousFieldDesignator(SemaRef, DIE, DesigIdx, IFD);
- D = DIE->getDesignator(DesigIdx);
- KnownField = cast<FieldDecl>(*IFD->chain_begin());
- break;
- }
+ const IdentifierInfo *FieldName = D->getFieldName();
+ ValueDecl *VD = SemaRef.tryLookupUnambiguousFieldDecl(RD, FieldName);
+ if (auto *FD = dyn_cast_if_present<FieldDecl>(VD)) {
+ KnownField = FD;
+ } else if (auto *IFD = dyn_cast_if_present<IndirectFieldDecl>(VD)) {
+ // In verify mode, don't modify the original.
+ if (VerifyOnly)
+ DIE = CloneDesignatedInitExpr(SemaRef, DIE);
+ ExpandAnonymousFieldDesignator(SemaRef, DIE, DesigIdx, IFD);
+ D = DIE->getDesignator(DesigIdx);
+ KnownField = cast<FieldDecl>(*IFD->chain_begin());
}
if (!KnownField) {
if (VerifyOnly) {
@@ -2559,10 +2700,17 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
return true; // No typo correction when just trying this out.
}
+ // We found a placeholder variable
+ if (SemaRef.DiagRedefinedPlaceholderFieldDecl(DIE->getBeginLoc(), RD,
+ FieldName)) {
+ ++Index;
+ return true;
+ }
// Name lookup found something, but it wasn't a field.
- if (!Lookup.empty()) {
+ if (DeclContextLookupResult Lookup = RD->lookup(FieldName);
+ !Lookup.empty()) {
SemaRef.Diag(D->getFieldLoc(), diag::err_field_designator_nonfield)
- << FieldName;
+ << FieldName;
SemaRef.Diag(Lookup.front()->getLocation(),
diag::note_field_designator_found);
++Index;
@@ -2571,11 +2719,11 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
// Name lookup didn't find anything.
// Determine whether this was a typo for another field name.
- FieldInitializerValidatorCCC CCC(RT->getDecl());
+ FieldInitializerValidatorCCC CCC(RD);
if (TypoCorrection Corrected = SemaRef.CorrectTypo(
DeclarationNameInfo(FieldName, D->getFieldLoc()),
Sema::LookupMemberName, /*Scope=*/nullptr, /*SS=*/nullptr, CCC,
- Sema::CTK_ErrorRecovery, RT->getDecl())) {
+ Sema::CTK_ErrorRecovery, RD)) {
SemaRef.diagnoseTypo(
Corrected,
SemaRef.PDiag(diag::err_field_designator_unknown_suggest)
@@ -2584,8 +2732,15 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
hadError = true;
} else {
// Typo correction didn't find anything.
- SemaRef.Diag(D->getFieldLoc(), diag::err_field_designator_unknown)
- << FieldName << CurrentObjectType;
+ SourceLocation Loc = D->getFieldLoc();
+
+ // The loc can be invalid with a "null" designator (i.e. an anonymous
+ // union/struct). Do our best to approximate the location.
+ if (Loc.isInvalid())
+ Loc = IList->getBeginLoc();
+
+ SemaRef.Diag(Loc, diag::err_field_designator_unknown)
+ << FieldName << CurrentObjectType << DIE->getSourceRange();
++Index;
return true;
}
@@ -2593,12 +2748,12 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
}
unsigned NumBases = 0;
- if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
+ if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RD))
NumBases = CXXRD->getNumBases();
unsigned FieldIndex = NumBases;
- for (auto *FI : RT->getDecl()->fields()) {
+ for (auto *FI : RD->fields()) {
if (FI->isUnnamedBitfield())
continue;
if (declaresSameEntity(KnownField, FI)) {
@@ -2613,7 +2768,7 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
// All of the fields of a union are located at the same place in
// the initializer list.
- if (RT->getDecl()->isUnion()) {
+ if (RD->isUnion()) {
FieldIndex = 0;
if (StructuredList) {
FieldDecl *CurrentField = StructuredList->getInitializedFieldInUnion();
@@ -2625,7 +2780,10 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
if (ExistingInit) {
// We're about to throw away an initializer, emit warning.
diagnoseInitOverride(
- ExistingInit, SourceRange(D->getBeginLoc(), DIE->getEndLoc()));
+ ExistingInit, SourceRange(D->getBeginLoc(), DIE->getEndLoc()),
+ /*UnionOverride=*/true,
+ /*FullyOverwritten=*/SemaRef.getLangOpts().CPlusPlus ? false
+ : true);
}
// remove existing initializer
@@ -2665,15 +2823,14 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
// cases where a designator takes us backwards too.
if (IsFirstDesignator && !VerifyOnly && SemaRef.getLangOpts().CPlusPlus &&
NextField &&
- (*NextField == RT->getDecl()->field_end() ||
+ (*NextField == RD->field_end() ||
(*NextField)->getFieldIndex() > Field->getFieldIndex() + 1)) {
// Find the field that we just initialized.
FieldDecl *PrevField = nullptr;
- for (auto FI = RT->getDecl()->field_begin();
- FI != RT->getDecl()->field_end(); ++FI) {
+ for (auto FI = RD->field_begin(); FI != RD->field_end(); ++FI) {
if (FI->isUnnamedBitfield())
continue;
- if (*NextField != RT->getDecl()->field_end() &&
+ if (*NextField != RD->field_end() &&
declaresSameEntity(*FI, **NextField))
break;
PrevField = *FI;
@@ -2681,10 +2838,11 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
if (PrevField &&
PrevField->getFieldIndex() > KnownField->getFieldIndex()) {
- SemaRef.Diag(DIE->getBeginLoc(), diag::ext_designated_init_reordered)
+ SemaRef.Diag(DIE->getInit()->getBeginLoc(),
+ diag::ext_designated_init_reordered)
<< KnownField << PrevField << DIE->getSourceRange();
- unsigned OldIndex = NumBases + PrevField->getFieldIndex();
+ unsigned OldIndex = StructuredIndex - 1;
if (StructuredList && OldIndex <= StructuredList->getNumInits()) {
if (Expr *PrevInit = StructuredList->getInit(OldIndex)) {
SemaRef.Diag(PrevInit->getBeginLoc(),
@@ -2698,7 +2856,7 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
// Update the designator with the field declaration.
if (!VerifyOnly)
- D->setField(*Field);
+ D->setFieldDecl(*Field);
// Make sure that our non-designated initializer list has space
// for a subobject corresponding to this field.
@@ -2788,8 +2946,12 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
// If this the first designator, our caller will continue checking
// the rest of this struct/class/union subobject.
if (IsFirstDesignator) {
+ if (Field != RD->field_end() && Field->isUnnamedBitfield())
+ ++Field;
+
if (NextField)
*NextField = Field;
+
StructuredIndex = FieldIndex;
return false;
}
@@ -2798,7 +2960,7 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
return false;
// We've already initialized something in the union; we're done.
- if (RT->getDecl()->isUnion())
+ if (RD->isUnion())
return hadError;
// Check the remaining fields within this class/struct/union subobject.
@@ -2899,14 +3061,14 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
// We're modifying a string literal init; we have to decompose the string
// so we can modify the individual characters.
ASTContext &Context = SemaRef.Context;
- Expr *SubExpr = StructuredList->getInit(0)->IgnoreParens();
+ Expr *SubExpr = StructuredList->getInit(0)->IgnoreParenImpCasts();
// Compute the character type
QualType CharTy = AT->getElementType();
// Compute the type of the integer literals.
QualType PromotedCharTy = CharTy;
- if (CharTy->isPromotableIntegerType())
+ if (Context.isPromotableIntegerType(CharTy))
PromotedCharTy = Context.getPromotedIntegerType(CharTy);
unsigned PromotedCharTyWidth = Context.getTypeSize(PromotedCharTy);
@@ -3082,10 +3244,8 @@ InitListExpr *
InitListChecker::createInitListExpr(QualType CurrentObjectType,
SourceRange InitRange,
unsigned ExpectedNumInits) {
- InitListExpr *Result
- = new (SemaRef.Context) InitListExpr(SemaRef.Context,
- InitRange.getBegin(), None,
- InitRange.getEnd());
+ InitListExpr *Result = new (SemaRef.Context) InitListExpr(
+ SemaRef.Context, InitRange.getBegin(), std::nullopt, InitRange.getEnd());
QualType ResultType = CurrentObjectType;
if (!ResultType->isArrayType())
@@ -3108,6 +3268,8 @@ InitListChecker::createInitListExpr(QualType CurrentObjectType,
NumElements = VType->getNumElements();
} else if (CurrentObjectType->isRecordType()) {
NumElements = numStructUnionElements(CurrentObjectType);
+ } else if (CurrentObjectType->isDependentType()) {
+ NumElements = 1;
}
Result->reserveInits(SemaRef.Context, NumElements);
@@ -3186,13 +3348,11 @@ ExprResult Sema::ActOnDesignatedInitializer(Designation &Desig,
// Build designators and check array designator expressions.
for (unsigned Idx = 0; Idx < Desig.getNumDesignators(); ++Idx) {
const Designator &D = Desig.getDesignator(Idx);
- switch (D.getKind()) {
- case Designator::FieldDesignator:
- Designators.push_back(ASTDesignator(D.getField(), D.getDotLoc(),
- D.getFieldLoc()));
- break;
- case Designator::ArrayDesignator: {
+ if (D.isFieldDesignator()) {
+ Designators.push_back(ASTDesignator::CreateFieldDesignator(
+ D.getFieldDecl(), D.getDotLoc(), D.getFieldLoc()));
+ } else if (D.isArrayDesignator()) {
Expr *Index = static_cast<Expr *>(D.getArrayIndex());
llvm::APSInt IndexValue;
if (!Index->isTypeDependent() && !Index->isValueDependent())
@@ -3200,15 +3360,11 @@ ExprResult Sema::ActOnDesignatedInitializer(Designation &Desig,
if (!Index)
Invalid = true;
else {
- Designators.push_back(ASTDesignator(InitExpressions.size(),
- D.getLBracketLoc(),
- D.getRBracketLoc()));
+ Designators.push_back(ASTDesignator::CreateArrayDesignator(
+ InitExpressions.size(), D.getLBracketLoc(), D.getRBracketLoc()));
InitExpressions.push_back(Index);
}
- break;
- }
-
- case Designator::ArrayRangeDesignator: {
+ } else if (D.isArrayRangeDesignator()) {
Expr *StartIndex = static_cast<Expr *>(D.getArrayRangeStart());
Expr *EndIndex = static_cast<Expr *>(D.getArrayRangeEnd());
llvm::APSInt StartValue;
@@ -3240,25 +3396,19 @@ ExprResult Sema::ActOnDesignatedInitializer(Designation &Desig,
<< StartIndex->getSourceRange() << EndIndex->getSourceRange();
Invalid = true;
} else {
- Designators.push_back(ASTDesignator(InitExpressions.size(),
- D.getLBracketLoc(),
- D.getEllipsisLoc(),
- D.getRBracketLoc()));
+ Designators.push_back(ASTDesignator::CreateArrayRangeDesignator(
+ InitExpressions.size(), D.getLBracketLoc(), D.getEllipsisLoc(),
+ D.getRBracketLoc()));
InitExpressions.push_back(StartIndex);
InitExpressions.push_back(EndIndex);
}
}
- break;
- }
}
}
if (Invalid || Init.isInvalid())
return ExprError();
- // Clear out the expressions within the designation.
- Desig.ClearExprs(*this);
-
return DesignatedInitExpr::Create(Context, Designators, InitExpressions,
EqualOrColonLoc, GNUSyntax,
Init.getAs<Expr>());
@@ -3309,6 +3459,7 @@ DeclarationName InitializedEntity::getName() const {
case EK_Variable:
case EK_Member:
+ case EK_ParenAggInitMember:
case EK_Binding:
case EK_TemplateParameter:
return Variable.VariableOrMember->getDeclName();
@@ -3340,6 +3491,7 @@ ValueDecl *InitializedEntity::getDecl() const {
switch (getKind()) {
case EK_Variable:
case EK_Member:
+ case EK_ParenAggInitMember:
case EK_Binding:
case EK_TemplateParameter:
return Variable.VariableOrMember;
@@ -3381,6 +3533,7 @@ bool InitializedEntity::allowsNRVO() const {
case EK_Parameter_CF_Audited:
case EK_TemplateParameter:
case EK_Member:
+ case EK_ParenAggInitMember:
case EK_Binding:
case EK_New:
case EK_Temporary:
@@ -3415,7 +3568,10 @@ unsigned InitializedEntity::dumpImpl(raw_ostream &OS) const {
case EK_Result: OS << "Result"; break;
case EK_StmtExprResult: OS << "StmtExprResult"; break;
case EK_Exception: OS << "Exception"; break;
- case EK_Member: OS << "Member"; break;
+ case EK_Member:
+ case EK_ParenAggInitMember:
+ OS << "Member";
+ break;
case EK_Binding: OS << "Binding"; break;
case EK_New: OS << "New"; break;
case EK_Temporary: OS << "Temporary"; break;
@@ -3441,7 +3597,7 @@ unsigned InitializedEntity::dumpImpl(raw_ostream &OS) const {
D->printQualifiedName(OS);
}
- OS << " '" << getType().getAsString() << "'\n";
+ OS << " '" << getType() << "'\n";
return Depth + 1;
}
@@ -3491,6 +3647,7 @@ void InitializationSequence::Step::Destroy() {
case SK_StdInitializerListConstructorCall:
case SK_OCLSamplerInit:
case SK_OCLZeroOpaqueType:
+ case SK_ParenthesizedListInit:
break;
case SK_ConversionSequence:
@@ -3501,10 +3658,10 @@ void InitializationSequence::Step::Destroy() {
bool InitializationSequence::isDirectReferenceBinding() const {
// There can be some lvalue adjustments after the SK_BindReference step.
- for (auto I = Steps.rbegin(); I != Steps.rend(); ++I) {
- if (I->Kind == SK_BindReference)
+ for (const Step &S : llvm::reverse(Steps)) {
+ if (S.Kind == SK_BindReference)
return true;
- if (I->Kind == SK_BindReferenceToTemporary)
+ if (S.Kind == SK_BindReferenceToTemporary)
return false;
}
return false;
@@ -3550,6 +3707,8 @@ bool InitializationSequence::isAmbiguous() const {
case FK_PlaceholderType:
case FK_ExplicitConstructor:
case FK_AddressOfUnaddressableFunction:
+ case FK_ParenthesizedListInitFailed:
+ case FK_DesignatedInitForNonAggregate:
return false;
case FK_ReferenceInitOverloadFailed:
@@ -3785,6 +3944,13 @@ void InitializationSequence::AddOCLZeroOpaqueTypeStep(QualType T) {
Steps.push_back(S);
}
+void InitializationSequence::AddParenthesizedListInitStep(QualType T) {
+ Step S;
+ S.Kind = SK_ParenthesizedListInit;
+ S.Type = T;
+ Steps.push_back(S);
+}
+
void InitializationSequence::RewrapReferenceInitList(QualType T,
InitListExpr *Syntactic) {
assert(Syntactic->getNumInits() == 1 &&
@@ -3892,7 +4058,7 @@ static bool TryInitializerListConstruction(Sema &S,
E.withConst(),
llvm::APInt(S.Context.getTypeSize(S.Context.getSizeType()),
List->getNumInits()),
- nullptr, clang::ArrayType::Normal, 0);
+ nullptr, clang::ArraySizeModifier::Normal, 0);
InitializedEntity HiddenArray =
InitializedEntity::InitializeTemporary(ArrayType);
InitializationKind Kind = InitializationKind::CreateDirectList(
@@ -3921,16 +4087,13 @@ static bool hasCopyOrMoveCtorParam(ASTContext &Ctx,
return Ctx.hasSameUnqualifiedType(ParmT, ClassT);
}
-static OverloadingResult
-ResolveConstructorOverload(Sema &S, SourceLocation DeclLoc,
- MultiExprArg Args,
- OverloadCandidateSet &CandidateSet,
- QualType DestType,
- DeclContext::lookup_result Ctors,
- OverloadCandidateSet::iterator &Best,
- bool CopyInitializing, bool AllowExplicit,
- bool OnlyListConstructors, bool IsListInit,
- bool SecondStepOfCopyInit = false) {
+static OverloadingResult ResolveConstructorOverload(
+ Sema &S, SourceLocation DeclLoc, MultiExprArg Args,
+ OverloadCandidateSet &CandidateSet, QualType DestType,
+ DeclContext::lookup_result Ctors, OverloadCandidateSet::iterator &Best,
+ bool CopyInitializing, bool AllowExplicit, bool OnlyListConstructors,
+ bool IsListInit, bool RequireActualConstructor,
+ bool SecondStepOfCopyInit = false) {
CandidateSet.clear(OverloadCandidateSet::CSK_InitByConstructor);
CandidateSet.setDestAS(DestType.getQualifiers().getAddressSpace());
@@ -3993,7 +4156,7 @@ ResolveConstructorOverload(Sema &S, SourceLocation DeclLoc,
// Note: SecondStepOfCopyInit is only ever true in this case when
// evaluating whether to produce a C++98 compatibility warning.
if (S.getLangOpts().CPlusPlus17 && Args.size() == 1 &&
- !SecondStepOfCopyInit) {
+ !RequireActualConstructor && !SecondStepOfCopyInit) {
Expr *Initializer = Args[0];
auto *SourceRD = Initializer->getType()->getAsCXXRecordDecl();
if (SourceRD && S.isCompleteType(DeclLoc, Initializer->getType())) {
@@ -4061,6 +4224,12 @@ static void TryConstructorInitialization(Sema &S,
return;
}
+ bool RequireActualConstructor =
+ !(Entity.getKind() != InitializedEntity::EK_Base &&
+ Entity.getKind() != InitializedEntity::EK_Delegating &&
+ Entity.getKind() !=
+ InitializedEntity::EK_LambdaToBlockConversionBlockElement);
+
// C++17 [dcl.init]p17:
// - If the initializer expression is a prvalue and the cv-unqualified
// version of the source type is the same class as the class of the
@@ -4070,11 +4239,7 @@ static void TryConstructorInitialization(Sema &S,
// class or delegating to another constructor from a mem-initializer.
// ObjC++: Lambda captured by the block in the lambda to block conversion
// should avoid copy elision.
- if (S.getLangOpts().CPlusPlus17 &&
- Entity.getKind() != InitializedEntity::EK_Base &&
- Entity.getKind() != InitializedEntity::EK_Delegating &&
- Entity.getKind() !=
- InitializedEntity::EK_LambdaToBlockConversionBlockElement &&
+ if (S.getLangOpts().CPlusPlus17 && !RequireActualConstructor &&
UnwrappedArgs.size() == 1 && UnwrappedArgs[0]->isPRValue() &&
S.Context.hasSameUnqualifiedType(UnwrappedArgs[0]->getType(), DestType)) {
// Convert qualifications if necessary.
@@ -4122,11 +4287,10 @@ static void TryConstructorInitialization(Sema &S,
// If the initializer list has no elements and T has a default constructor,
// the first phase is omitted.
if (!(UnwrappedArgs.empty() && S.LookupDefaultConstructor(DestRecordDecl)))
- Result = ResolveConstructorOverload(S, Kind.getLocation(), Args,
- CandidateSet, DestType, Ctors, Best,
- CopyInitialization, AllowExplicit,
- /*OnlyListConstructors=*/true,
- IsListInit);
+ Result = ResolveConstructorOverload(
+ S, Kind.getLocation(), Args, CandidateSet, DestType, Ctors, Best,
+ CopyInitialization, AllowExplicit,
+ /*OnlyListConstructors=*/true, IsListInit, RequireActualConstructor);
}
// C++11 [over.match.list]p1:
@@ -4136,11 +4300,10 @@ static void TryConstructorInitialization(Sema &S,
// elements of the initializer list.
if (Result == OR_No_Viable_Function) {
AsInitializerList = false;
- Result = ResolveConstructorOverload(S, Kind.getLocation(), UnwrappedArgs,
- CandidateSet, DestType, Ctors, Best,
- CopyInitialization, AllowExplicit,
- /*OnlyListConstructors=*/false,
- IsListInit);
+ Result = ResolveConstructorOverload(
+ S, Kind.getLocation(), UnwrappedArgs, CandidateSet, DestType, Ctors,
+ Best, CopyInitialization, AllowExplicit,
+ /*OnlyListConstructors=*/false, IsListInit, RequireActualConstructor);
}
if (Result) {
Sequence.SetOverloadFailure(
@@ -4253,7 +4416,8 @@ static void TryReferenceInitializationCore(Sema &S,
Qualifiers T1Quals,
QualType cv2T2, QualType T2,
Qualifiers T2Quals,
- InitializationSequence &Sequence);
+ InitializationSequence &Sequence,
+ bool TopLevelOfInitList);
static void TryValueInitialization(Sema &S,
const InitializedEntity &Entity,
@@ -4307,7 +4471,8 @@ static void TryReferenceListInitialization(Sema &S,
if (RefRelationship >= Sema::Ref_Related) {
// Try to bind the reference here.
TryReferenceInitializationCore(S, Entity, Kind, Initializer, cv1T1, T1,
- T1Quals, cv2T2, T2, T2Quals, Sequence);
+ T1Quals, cv2T2, T2, T2Quals, Sequence,
+ /*TopLevelOfInitList=*/true);
if (Sequence)
Sequence.RewrapReferenceInitList(cv1T1, InitList);
return;
@@ -4341,6 +4506,17 @@ static void TryReferenceListInitialization(Sema &S,
if (Sequence) {
if (DestType->isRValueReferenceType() ||
(T1Quals.hasConst() && !T1Quals.hasVolatile())) {
+ if (S.getLangOpts().CPlusPlus20 &&
+ isa<IncompleteArrayType>(T1->getUnqualifiedDesugaredType()) &&
+ DestType->isRValueReferenceType()) {
+ // C++20 [dcl.init.list]p3.10:
+ // List-initialization of an object or reference of type T is defined as
+ // follows:
+ // ..., unless T is “reference to array of unknown bound of U”, in which
+ // case the type of the prvalue is the type of x in the declaration U
+ // x[] H, where H is the initializer list.
+ Sequence.AddQualificationConversionStep(cv1T1, clang::VK_PRValue);
+ }
Sequence.AddReferenceBindingStep(cv1T1IgnoreAS,
/*BindingTemporary=*/true);
if (T1Quals.hasAddressSpace())
@@ -4380,6 +4556,22 @@ static void TryListInitialization(Sema &S,
return;
}
+ // C++20 [dcl.init.list]p3:
+ // - If the braced-init-list contains a designated-initializer-list, T shall
+ // be an aggregate class. [...] Aggregate initialization is performed.
+ //
+ // We allow arrays here too in order to support array designators.
+ //
+ // FIXME: This check should precede the handling of reference initialization.
+ // We follow other compilers in allowing things like 'Aggr &&a = {.x = 1};'
+ // as a tentative DR resolution.
+ bool IsDesignatedInit = InitList->hasDesignatedInit();
+ if (!DestType->isAggregateType() && IsDesignatedInit) {
+ Sequence.SetFailed(
+ InitializationSequence::FK_DesignatedInitForNonAggregate);
+ return;
+ }
+
// C++11 [dcl.init.list]p3, per DR1467:
// - If T is a class type and the initializer list has a single element of
// type cv U, where U is T or a class derived from T, the object is
@@ -4391,7 +4583,8 @@ static void TryListInitialization(Sema &S,
// (8.5.2 [dcl.init.string]), initialization is performed as described
// in that section.
// - Otherwise, if T is an aggregate, [...] (continue below).
- if (S.getLangOpts().CPlusPlus11 && InitList->getNumInits() == 1) {
+ if (S.getLangOpts().CPlusPlus11 && InitList->getNumInits() == 1 &&
+ !IsDesignatedInit) {
if (DestType->isRecordType()) {
QualType InitType = InitList->getInit(0)->getType();
if (S.Context.hasSameUnqualifiedType(InitType, DestType) ||
@@ -4433,7 +4626,7 @@ static void TryListInitialization(Sema &S,
// - If T is an aggregate, aggregate initialization is performed.
if ((DestType->isRecordType() && !DestType->isAggregateType()) ||
(S.getLangOpts().CPlusPlus11 &&
- S.isStdInitializerList(DestType, nullptr))) {
+ S.isStdInitializerList(DestType, nullptr) && !IsDesignatedInit)) {
if (S.getLangOpts().CPlusPlus11) {
// - Otherwise, if the initializer list has no elements and T is a
// class type with a default constructor, the object is
@@ -4475,13 +4668,13 @@ static void TryListInitialization(Sema &S,
Kind.getKind() == InitializationKind::IK_DirectList &&
ET && ET->getDecl()->isFixed() &&
!S.Context.hasSameUnqualifiedType(E->getType(), DestType) &&
- (E->getType()->isIntegralOrEnumerationType() ||
+ (E->getType()->isIntegralOrUnscopedEnumerationType() ||
E->getType()->isFloatingType())) {
// There are two ways that T(v) can work when T is an enumeration type.
// If there is either an implicit conversion sequence from v to T or
// a conversion function that can convert from v to T, then we use that.
- // Otherwise, if v is of integral, enumeration, or floating-point type,
- // it is converted to the enumeration type via its underlying type.
+ // Otherwise, if v is of integral, unscoped enumeration, or floating-point
+ // type, it is converted to the enumeration type via its underlying type.
// There is no overlap possible between these two cases (except when the
// source value is already of the destination type), and the first
// case is handled by the general case for single-element lists below.
@@ -4738,11 +4931,11 @@ static void CheckCXX98CompatAccessibleCopy(Sema &S,
Expr *CurInitExpr);
/// Attempt reference initialization (C++0x [dcl.init.ref])
-static void TryReferenceInitialization(Sema &S,
- const InitializedEntity &Entity,
+static void TryReferenceInitialization(Sema &S, const InitializedEntity &Entity,
const InitializationKind &Kind,
Expr *Initializer,
- InitializationSequence &Sequence) {
+ InitializationSequence &Sequence,
+ bool TopLevelOfInitList) {
QualType DestType = Entity.getType();
QualType cv1T1 = DestType->castAs<ReferenceType>()->getPointeeType();
Qualifiers T1Quals;
@@ -4760,7 +4953,8 @@ static void TryReferenceInitialization(Sema &S,
// Delegate everything else to a subfunction.
TryReferenceInitializationCore(S, Entity, Kind, Initializer, cv1T1, T1,
- T1Quals, cv2T2, T2, T2Quals, Sequence);
+ T1Quals, cv2T2, T2, T2Quals, Sequence,
+ TopLevelOfInitList);
}
/// Determine whether an expression is a non-referenceable glvalue (one to
@@ -4783,7 +4977,8 @@ static void TryReferenceInitializationCore(Sema &S,
Qualifiers T1Quals,
QualType cv2T2, QualType T2,
Qualifiers T2Quals,
- InitializationSequence &Sequence) {
+ InitializationSequence &Sequence,
+ bool TopLevelOfInitList) {
QualType DestType = Entity.getType();
SourceLocation DeclLoc = Initializer->getBeginLoc();
@@ -5057,7 +5252,8 @@ static void TryReferenceInitializationCore(Sema &S,
Sequence.SetFailed(InitializationSequence::FK_ReferenceInitFailed);
return;
} else {
- Sequence.AddConversionSequenceStep(ICS, TempEntity.getType());
+ Sequence.AddConversionSequenceStep(ICS, TempEntity.getType(),
+ TopLevelOfInitList);
}
// [...] If T1 is reference-related to T2, cv1 must be the
@@ -5199,7 +5395,7 @@ static void TryDefaultInitialization(Sema &S,
// constructor for T is called (and the initialization is ill-formed if
// T has no accessible default constructor);
if (DestType->isRecordType() && S.getLangOpts().CPlusPlus) {
- TryConstructorInitialization(S, Entity, Kind, None, DestType,
+ TryConstructorInitialization(S, Entity, Kind, std::nullopt, DestType,
Entity.getType(), Sequence);
return;
}
@@ -5222,6 +5418,255 @@ static void TryDefaultInitialization(Sema &S,
}
}
+static void TryOrBuildParenListInitialization(
+ Sema &S, const InitializedEntity &Entity, const InitializationKind &Kind,
+ ArrayRef<Expr *> Args, InitializationSequence &Sequence, bool VerifyOnly,
+ ExprResult *Result = nullptr) {
+ unsigned EntityIndexToProcess = 0;
+ SmallVector<Expr *, 4> InitExprs;
+ QualType ResultType;
+ Expr *ArrayFiller = nullptr;
+ FieldDecl *InitializedFieldInUnion = nullptr;
+
+ auto HandleInitializedEntity = [&](const InitializedEntity &SubEntity,
+ const InitializationKind &SubKind,
+ Expr *Arg, Expr **InitExpr = nullptr) {
+ InitializationSequence IS = InitializationSequence(
+ S, SubEntity, SubKind, Arg ? MultiExprArg(Arg) : std::nullopt);
+
+ if (IS.Failed()) {
+ if (!VerifyOnly) {
+ IS.Diagnose(S, SubEntity, SubKind, Arg ? ArrayRef(Arg) : std::nullopt);
+ } else {
+ Sequence.SetFailed(
+ InitializationSequence::FK_ParenthesizedListInitFailed);
+ }
+
+ return false;
+ }
+ if (!VerifyOnly) {
+ ExprResult ER;
+ ER = IS.Perform(S, SubEntity, SubKind,
+ Arg ? MultiExprArg(Arg) : std::nullopt);
+ if (InitExpr)
+ *InitExpr = ER.get();
+ else
+ InitExprs.push_back(ER.get());
+ }
+ return true;
+ };
+
+ if (const ArrayType *AT =
+ S.getASTContext().getAsArrayType(Entity.getType())) {
+ SmallVector<InitializedEntity, 4> ElementEntities;
+ uint64_t ArrayLength;
+ // C++ [dcl.init]p16.5
+ // if the destination type is an array, the object is initialized as
+ // follows. Let x1, . . . , xk be the elements of the expression-list. If
+ // the destination type is an array of unknown bound, it is defined as
+ // having k elements.
+ if (const ConstantArrayType *CAT =
+ S.getASTContext().getAsConstantArrayType(Entity.getType())) {
+ ArrayLength = CAT->getSize().getZExtValue();
+ ResultType = Entity.getType();
+ } else if (const VariableArrayType *VAT =
+ S.getASTContext().getAsVariableArrayType(Entity.getType())) {
+ // Braced-initialization of variable array types is not allowed, even if
+ // the size is greater than or equal to the number of args, so we don't
+ // allow them to be initialized via parenthesized aggregate initialization
+ // either.
+ const Expr *SE = VAT->getSizeExpr();
+ S.Diag(SE->getBeginLoc(), diag::err_variable_object_no_init)
+ << SE->getSourceRange();
+ return;
+ } else {
+ assert(isa<IncompleteArrayType>(Entity.getType()));
+ ArrayLength = Args.size();
+ }
+ EntityIndexToProcess = ArrayLength;
+
+ // ...the ith array element is copy-initialized with xi for each
+ // 1 <= i <= k
+ for (Expr *E : Args) {
+ InitializedEntity SubEntity = InitializedEntity::InitializeElement(
+ S.getASTContext(), EntityIndexToProcess, Entity);
+ InitializationKind SubKind = InitializationKind::CreateForInit(
+ E->getExprLoc(), /*isDirectInit=*/false, E);
+ if (!HandleInitializedEntity(SubEntity, SubKind, E))
+ return;
+ }
+ // ...and value-initialized for each k < i <= n;
+ if (ArrayLength > Args.size() || Entity.isVariableLengthArrayNew()) {
+ InitializedEntity SubEntity = InitializedEntity::InitializeElement(
+ S.getASTContext(), Args.size(), Entity);
+ InitializationKind SubKind = InitializationKind::CreateValue(
+ Kind.getLocation(), Kind.getLocation(), Kind.getLocation(), true);
+ if (!HandleInitializedEntity(SubEntity, SubKind, nullptr, &ArrayFiller))
+ return;
+ }
+
+ if (ResultType.isNull()) {
+ ResultType = S.Context.getConstantArrayType(
+ AT->getElementType(), llvm::APInt(/*numBits=*/32, ArrayLength),
+ /*SizeExpr=*/nullptr, ArraySizeModifier::Normal, 0);
+ }
+ } else if (auto *RT = Entity.getType()->getAs<RecordType>()) {
+ bool IsUnion = RT->isUnionType();
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ if (RD->isInvalidDecl()) {
+ // Exit early to avoid confusion when processing members.
+ // We do the same for braced list initialization in
+ // `CheckStructUnionTypes`.
+ Sequence.SetFailed(
+ clang::InitializationSequence::FK_ParenthesizedListInitFailed);
+ return;
+ }
+
+ if (!IsUnion) {
+ for (const CXXBaseSpecifier &Base : RD->bases()) {
+ InitializedEntity SubEntity = InitializedEntity::InitializeBase(
+ S.getASTContext(), &Base, false, &Entity);
+ if (EntityIndexToProcess < Args.size()) {
+ // C++ [dcl.init]p16.6.2.2.
+ // ...the object is initialized is follows. Let e1, ..., en be the
+ // elements of the aggregate([dcl.init.aggr]). Let x1, ..., xk be
+ // the elements of the expression-list...The element ei is
+ // copy-initialized with xi for 1 <= i <= k.
+ Expr *E = Args[EntityIndexToProcess];
+ InitializationKind SubKind = InitializationKind::CreateForInit(
+ E->getExprLoc(), /*isDirectInit=*/false, E);
+ if (!HandleInitializedEntity(SubEntity, SubKind, E))
+ return;
+ } else {
+ // We've processed all of the args, but there are still base classes
+ // that have to be initialized.
+ // C++ [dcl.init]p17.6.2.2
+ // The remaining elements...otherwise are value initialzed
+ InitializationKind SubKind = InitializationKind::CreateValue(
+ Kind.getLocation(), Kind.getLocation(), Kind.getLocation(),
+ /*IsImplicit=*/true);
+ if (!HandleInitializedEntity(SubEntity, SubKind, nullptr))
+ return;
+ }
+ EntityIndexToProcess++;
+ }
+ }
+
+ for (FieldDecl *FD : RD->fields()) {
+ // Unnamed bitfields should not be initialized at all, either with an arg
+ // or by default.
+ if (FD->isUnnamedBitfield())
+ continue;
+
+ InitializedEntity SubEntity =
+ InitializedEntity::InitializeMemberFromParenAggInit(FD);
+
+ if (EntityIndexToProcess < Args.size()) {
+ // ...The element ei is copy-initialized with xi for 1 <= i <= k.
+ Expr *E = Args[EntityIndexToProcess];
+
+ // Incomplete array types indicate flexible array members. Do not allow
+ // paren list initializations of structs with these members, as GCC
+ // doesn't either.
+ if (FD->getType()->isIncompleteArrayType()) {
+ if (!VerifyOnly) {
+ S.Diag(E->getBeginLoc(), diag::err_flexible_array_init)
+ << SourceRange(E->getBeginLoc(), E->getEndLoc());
+ S.Diag(FD->getLocation(), diag::note_flexible_array_member) << FD;
+ }
+ Sequence.SetFailed(
+ InitializationSequence::FK_ParenthesizedListInitFailed);
+ return;
+ }
+
+ InitializationKind SubKind = InitializationKind::CreateForInit(
+ E->getExprLoc(), /*isDirectInit=*/false, E);
+ if (!HandleInitializedEntity(SubEntity, SubKind, E))
+ return;
+
+ // Unions should have only one initializer expression, so we bail out
+ // after processing the first field. If there are more initializers then
+ // it will be caught when we later check whether EntityIndexToProcess is
+ // less than Args.size();
+ if (IsUnion) {
+ InitializedFieldInUnion = FD;
+ EntityIndexToProcess = 1;
+ break;
+ }
+ } else {
+ // We've processed all of the args, but there are still members that
+ // have to be initialized.
+ if (FD->hasInClassInitializer()) {
+ if (!VerifyOnly) {
+ // C++ [dcl.init]p16.6.2.2
+ // The remaining elements are initialized with their default
+ // member initializers, if any
+ ExprResult DIE = S.BuildCXXDefaultInitExpr(
+ Kind.getParenOrBraceRange().getEnd(), FD);
+ if (DIE.isInvalid())
+ return;
+ S.checkInitializerLifetime(SubEntity, DIE.get());
+ InitExprs.push_back(DIE.get());
+ }
+ } else {
+ // C++ [dcl.init]p17.6.2.2
+ // The remaining elements...otherwise are value initialzed
+ if (FD->getType()->isReferenceType()) {
+ Sequence.SetFailed(
+ InitializationSequence::FK_ParenthesizedListInitFailed);
+ if (!VerifyOnly) {
+ SourceRange SR = Kind.getParenOrBraceRange();
+ S.Diag(SR.getEnd(), diag::err_init_reference_member_uninitialized)
+ << FD->getType() << SR;
+ S.Diag(FD->getLocation(), diag::note_uninit_reference_member);
+ }
+ return;
+ }
+ InitializationKind SubKind = InitializationKind::CreateValue(
+ Kind.getLocation(), Kind.getLocation(), Kind.getLocation(), true);
+ if (!HandleInitializedEntity(SubEntity, SubKind, nullptr))
+ return;
+ }
+ }
+ EntityIndexToProcess++;
+ }
+ ResultType = Entity.getType();
+ }
+
+ // Not all of the args have been processed, so there must've been more args
+ // than were required to initialize the element.
+ if (EntityIndexToProcess < Args.size()) {
+ Sequence.SetFailed(InitializationSequence::FK_ParenthesizedListInitFailed);
+ if (!VerifyOnly) {
+ QualType T = Entity.getType();
+ int InitKind = T->isArrayType() ? 0 : T->isUnionType() ? 3 : 4;
+ SourceRange ExcessInitSR(Args[EntityIndexToProcess]->getBeginLoc(),
+ Args.back()->getEndLoc());
+ S.Diag(Kind.getLocation(), diag::err_excess_initializers)
+ << InitKind << ExcessInitSR;
+ }
+ return;
+ }
+
+ if (VerifyOnly) {
+ Sequence.setSequenceKind(InitializationSequence::NormalSequence);
+ Sequence.AddParenthesizedListInitStep(Entity.getType());
+ } else if (Result) {
+ SourceRange SR = Kind.getParenOrBraceRange();
+ auto *CPLIE = CXXParenListInitExpr::Create(
+ S.getASTContext(), InitExprs, ResultType, Args.size(),
+ Kind.getLocation(), SR.getBegin(), SR.getEnd());
+ if (ArrayFiller)
+ CPLIE->setArrayFiller(ArrayFiller);
+ if (InitializedFieldInUnion)
+ CPLIE->setInitializedFieldInUnion(InitializedFieldInUnion);
+ *Result = CPLIE;
+ S.Diag(Kind.getLocation(),
+ diag::warn_cxx17_compat_aggregate_init_paren_list)
+ << Kind.getLocation() << SR << ResultType;
+ }
+}
+
/// Attempt a user-defined conversion between two types (C++ [dcl.init]),
/// which enumerates all conversion functions and performs overload resolution
/// to select the best.
@@ -5772,7 +6217,8 @@ void InitializationSequence::InitializeFrom(Sema &S,
else if (isa<InitListExpr>(Args[0]))
SetFailed(FK_ParenthesizedListInitForReference);
else
- TryReferenceInitialization(S, Entity, Kind, Args[0], *this);
+ TryReferenceInitialization(S, Entity, Kind, Args[0], *this,
+ TopLevelOfInitList);
return;
}
@@ -5877,7 +6323,11 @@ void InitializationSequence::InitializeFrom(Sema &S,
TryListInitialization(S, Entity, Kind, cast<InitListExpr>(Initializer),
*this, TreatUnavailableAsInvalid);
AddParenthesizedArrayInitStep(DestType);
- } else if (DestAT->getElementType()->isCharType())
+ } else if (S.getLangOpts().CPlusPlus20 && !TopLevelOfInitList &&
+ Kind.getKind() == InitializationKind::IK_Direct)
+ TryOrBuildParenListInitialization(S, Entity, Kind, Args, *this,
+ /*VerifyOnly=*/true);
+ else if (DestAT->getElementType()->isCharType())
SetFailed(FK_ArrayNeedsInitListOrStringLiteral);
else if (IsWideCharCompatible(DestAT->getElementType(), Context))
SetFailed(FK_ArrayNeedsInitListOrWideStringLiteral);
@@ -5898,6 +6348,7 @@ void InitializationSequence::InitializeFrom(Sema &S,
// We're at the end of the line for C: it's either a write-back conversion
// or it's a C assignment. There's no need to check anything else.
if (!S.getLangOpts().CPlusPlus) {
+ assert(Initializer && "Initializer must be non-null");
// If allowed, check whether this is an Objective-C writeback conversion.
if (allowObjCWritebackConversion &&
tryObjCWritebackConversion(S, *this, Entity, Initializer)) {
@@ -5924,23 +6375,91 @@ void InitializationSequence::InitializeFrom(Sema &S,
if (Kind.getKind() == InitializationKind::IK_Direct ||
(Kind.getKind() == InitializationKind::IK_Copy &&
(Context.hasSameUnqualifiedType(SourceType, DestType) ||
- S.IsDerivedFrom(Initializer->getBeginLoc(), SourceType, DestType))))
- TryConstructorInitialization(S, Entity, Kind, Args,
- DestType, DestType, *this);
- // - Otherwise (i.e., for the remaining copy-initialization cases),
- // user-defined conversion sequences that can convert from the source
- // type to the destination type or (when a conversion function is
- // used) to a derived class thereof are enumerated as described in
- // 13.3.1.4, and the best one is chosen through overload resolution
- // (13.3).
- else
+ (Initializer && S.IsDerivedFrom(Initializer->getBeginLoc(),
+ SourceType, DestType))))) {
+ TryConstructorInitialization(S, Entity, Kind, Args, DestType, DestType,
+ *this);
+
+ // We fall back to the "no matching constructor" path if the
+ // failed candidate set has functions other than the three default
+ // constructors. For example, conversion function.
+ if (const auto *RD =
+ dyn_cast<CXXRecordDecl>(DestType->getAs<RecordType>()->getDecl());
+ // In general, we should call isCompleteType for RD to check its
+ // completeness, we don't call it here as it was already called in the
+ // above TryConstructorInitialization.
+ S.getLangOpts().CPlusPlus20 && RD && RD->hasDefinition() &&
+ RD->isAggregate() && Failed() &&
+ getFailureKind() == FK_ConstructorOverloadFailed) {
+ // Do not attempt paren list initialization if overload resolution
+ // resolves to a deleted function .
+ //
+ // We may reach this condition if we have a union wrapping a class with
+ // a non-trivial copy or move constructor and we call one of those two
+ // constructors. The union is an aggregate, but the matched constructor
+ // is implicitly deleted, so we need to prevent aggregate initialization
+ // (otherwise, it'll attempt aggregate initialization by initializing
+ // the first element with a reference to the union).
+ OverloadCandidateSet::iterator Best;
+ OverloadingResult OR = getFailedCandidateSet().BestViableFunction(
+ S, Kind.getLocation(), Best);
+ if (OR != OverloadingResult::OR_Deleted) {
+ // C++20 [dcl.init] 17.6.2.2:
+ // - Otherwise, if no constructor is viable, the destination type is
+ // an
+ // aggregate class, and the initializer is a parenthesized
+ // expression-list.
+ TryOrBuildParenListInitialization(S, Entity, Kind, Args, *this,
+ /*VerifyOnly=*/true);
+ }
+ }
+ } else {
+ // - Otherwise (i.e., for the remaining copy-initialization cases),
+ // user-defined conversion sequences that can convert from the
+ // source type to the destination type or (when a conversion
+ // function is used) to a derived class thereof are enumerated as
+ // described in 13.3.1.4, and the best one is chosen through
+ // overload resolution (13.3).
+ assert(Initializer && "Initializer must be non-null");
TryUserDefinedConversion(S, DestType, Kind, Initializer, *this,
TopLevelOfInitList);
+ }
return;
}
assert(Args.size() >= 1 && "Zero-argument case handled above");
+ // For HLSL ext vector types we allow list initialization behavior for C++
+ // constructor syntax. This is accomplished by converting initialization
+ // arguments an InitListExpr late.
+ if (S.getLangOpts().HLSL && DestType->isExtVectorType() &&
+ (SourceType.isNull() ||
+ !Context.hasSameUnqualifiedType(SourceType, DestType))) {
+
+ llvm::SmallVector<Expr *> InitArgs;
+ for (auto *Arg : Args) {
+ if (Arg->getType()->isExtVectorType()) {
+ const auto *VTy = Arg->getType()->castAs<ExtVectorType>();
+ unsigned Elm = VTy->getNumElements();
+ for (unsigned Idx = 0; Idx < Elm; ++Idx) {
+ InitArgs.emplace_back(new (Context) ArraySubscriptExpr(
+ Arg,
+ IntegerLiteral::Create(
+ Context, llvm::APInt(Context.getIntWidth(Context.IntTy), Idx),
+ Context.IntTy, SourceLocation()),
+ VTy->getElementType(), Arg->getValueKind(), Arg->getObjectKind(),
+ SourceLocation()));
+ }
+ } else
+ InitArgs.emplace_back(Arg);
+ }
+ InitListExpr *ILE = new (Context) InitListExpr(
+ S.getASTContext(), SourceLocation(), InitArgs, SourceLocation());
+ Args[0] = ILE;
+ AddListInitializationStep(DestType);
+ return;
+ }
+
// The remaining cases all need a source type.
if (Args.size() > 1) {
SetFailed(FK_TooManyInitsForScalar);
@@ -5953,6 +6472,7 @@ void InitializationSequence::InitializeFrom(Sema &S,
// - Otherwise, if the source type is a (possibly cv-qualified) class
// type, conversion functions are considered.
if (!SourceType.isNull() && SourceType->isRecordType()) {
+ assert(Initializer && "Initializer must be non-null");
// For a conversion to _Atomic(T) from either T or a class type derived
// from T, initialize the T object then convert to _Atomic type.
bool NeedAtomicConversion = false;
@@ -6089,6 +6609,7 @@ getAssignmentAction(const InitializedEntity &Entity, bool Diagnose = false) {
return Sema::AA_Converting;
case InitializedEntity::EK_Member:
+ case InitializedEntity::EK_ParenAggInitMember:
case InitializedEntity::EK_Binding:
case InitializedEntity::EK_ArrayElement:
case InitializedEntity::EK_VectorElement:
@@ -6109,6 +6630,7 @@ static bool shouldBindAsTemporary(const InitializedEntity &Entity) {
switch (Entity.getKind()) {
case InitializedEntity::EK_ArrayElement:
case InitializedEntity::EK_Member:
+ case InitializedEntity::EK_ParenAggInitMember:
case InitializedEntity::EK_Result:
case InitializedEntity::EK_StmtExprResult:
case InitializedEntity::EK_New:
@@ -6153,6 +6675,7 @@ static bool shouldDestroyEntity(const InitializedEntity &Entity) {
return false;
case InitializedEntity::EK_Member:
+ case InitializedEntity::EK_ParenAggInitMember:
case InitializedEntity::EK_Binding:
case InitializedEntity::EK_Variable:
case InitializedEntity::EK_Parameter:
@@ -6189,6 +6712,7 @@ static SourceLocation getInitializationLoc(const InitializedEntity &Entity,
case InitializedEntity::EK_ArrayElement:
case InitializedEntity::EK_Member:
+ case InitializedEntity::EK_ParenAggInitMember:
case InitializedEntity::EK_Parameter:
case InitializedEntity::EK_Parameter_CF_Audited:
case InitializedEntity::EK_TemplateParameter:
@@ -6259,6 +6783,7 @@ static ExprResult CopyObject(Sema &S,
S, Loc, CurInitExpr, CandidateSet, T, Ctors, Best,
/*CopyInitializing=*/false, /*AllowExplicit=*/true,
/*OnlyListConstructors=*/false, /*IsListInit=*/false,
+ /*RequireActualConstructor=*/false,
/*SecondStepOfCopyInit=*/true)) {
case OR_Success:
break;
@@ -6361,15 +6886,12 @@ static ExprResult CopyObject(Sema &S,
CurInitExpr->getType());
// Actually perform the constructor call.
- CurInit = S.BuildCXXConstructExpr(Loc, T, Best->FoundDecl, Constructor,
- Elidable,
- ConstructorArgs,
- HadMultipleCandidates,
- /*ListInit*/ false,
- /*StdInitListInit*/ false,
- /*ZeroInit*/ false,
- CXXConstructExpr::CK_Complete,
- SourceRange());
+ CurInit = S.BuildCXXConstructExpr(
+ Loc, T, Best->FoundDecl, Constructor, Elidable, ConstructorArgs,
+ HadMultipleCandidates,
+ /*ListInit*/ false,
+ /*StdInitListInit*/ false,
+ /*ZeroInit*/ false, CXXConstructionKind::Complete, SourceRange());
// If we're supposed to bind temporaries, do so.
if (!CurInit.isInvalid() && shouldBindAsTemporary(Entity))
@@ -6404,6 +6926,7 @@ static void CheckCXX98CompatAccessibleCopy(Sema &S,
S, Loc, CurInitExpr, CandidateSet, CurInitExpr->getType(), Ctors, Best,
/*CopyInitializing=*/false, /*AllowExplicit=*/true,
/*OnlyListConstructors=*/false, /*IsListInit=*/false,
+ /*RequireActualConstructor=*/false,
/*SecondStepOfCopyInit=*/true);
PartialDiagnostic Diag = S.PDiag(diag::warn_cxx98_compat_temp_copy)
@@ -6536,7 +7059,7 @@ PerformConstructorInitialization(Sema &S,
if (isExplicitTemporary(Entity, Kind, NumArgs)) {
// An explicitly-constructed temporary, e.g., X(1, 2).
- if (S.DiagnoseUseOfDecl(Constructor, Loc))
+ if (S.DiagnoseUseOfDecl(Step.Function.FoundDecl, Loc))
return ExprError();
TypeSourceInfo *TSInfo = Entity.getTypeSourceInfo();
@@ -6551,8 +7074,6 @@ PerformConstructorInitialization(Sema &S,
if (auto *Shadow = dyn_cast<ConstructorUsingShadowDecl>(
Step.Function.FoundDecl.getDecl())) {
CalleeDecl = S.findInheritingConstructor(Loc, Constructor, Shadow);
- if (S.DiagnoseUseOfDecl(CalleeDecl, Loc))
- return ExprError();
}
S.MarkFunctionReferenced(Loc, CalleeDecl);
@@ -6565,15 +7086,14 @@ PerformConstructorInitialization(Sema &S,
ConstructorInitRequiresZeroInit),
CalleeDecl);
} else {
- CXXConstructExpr::ConstructionKind ConstructKind =
- CXXConstructExpr::CK_Complete;
+ CXXConstructionKind ConstructKind = CXXConstructionKind::Complete;
if (Entity.getKind() == InitializedEntity::EK_Base) {
- ConstructKind = Entity.getBaseSpecifier()->isVirtual() ?
- CXXConstructExpr::CK_VirtualBase :
- CXXConstructExpr::CK_NonVirtualBase;
+ ConstructKind = Entity.getBaseSpecifier()->isVirtual()
+ ? CXXConstructionKind::VirtualBase
+ : CXXConstructionKind::NonVirtualBase;
} else if (Entity.getKind() == InitializedEntity::EK_Delegating) {
- ConstructKind = CXXConstructExpr::CK_Delegating;
+ ConstructKind = CXXConstructionKind::Delegating;
}
// Only get the parenthesis or brace range if it is a list initialization or
@@ -6757,7 +7277,15 @@ static LifetimeResult getEntityLifetime(
case InitializedEntity::EK_Exception:
// FIXME: Can we diagnose lifetime problems with exceptions?
return {nullptr, LK_FullExpression};
+
+ case InitializedEntity::EK_ParenAggInitMember:
+ // -- A temporary object bound to a reference element of an aggregate of
+ // class type initialized from a parenthesized expression-list
+ // [dcl.init, 9.3] persists until the completion of the full-expression
+ // containing the expression-list.
+ return {nullptr, LK_FullExpression};
}
+
llvm_unreachable("unknown entity kind");
}
@@ -6872,8 +7400,9 @@ static bool shouldTrackImplicitObjectArg(const CXXMethodDecl *Callee) {
return true;
if (!isInStlNamespace(Callee->getParent()))
return false;
- if (!isRecordWithAttr<PointerAttr>(Callee->getThisObjectType()) &&
- !isRecordWithAttr<OwnerAttr>(Callee->getThisObjectType()))
+ if (!isRecordWithAttr<PointerAttr>(
+ Callee->getFunctionObjectParameterType()) &&
+ !isRecordWithAttr<OwnerAttr>(Callee->getFunctionObjectParameterType()))
return false;
if (Callee->getReturnType()->isPointerType() ||
isRecordWithAttr<PointerAttr>(Callee->getReturnType())) {
@@ -6932,10 +7461,10 @@ static void handleGslAnnotatedTypes(IndirectLocalPath &Path, Expr *Call,
return;
// Once we initialized a value with a reference, it can no longer dangle.
if (!Value) {
- for (auto It = Path.rbegin(), End = Path.rend(); It != End; ++It) {
- if (It->Kind == IndirectLocalPathEntry::GslReferenceInit)
+ for (const IndirectLocalPathEntry &PE : llvm::reverse(Path)) {
+ if (PE.Kind == IndirectLocalPathEntry::GslReferenceInit)
continue;
- if (It->Kind == IndirectLocalPathEntry::GslPointerInit)
+ if (PE.Kind == IndirectLocalPathEntry::GslPointerInit)
return;
break;
}
@@ -7008,7 +7537,7 @@ static bool implicitObjectParamIsLifetimeBound(const FunctionDecl *FD) {
QualType LHST;
auto *MD = dyn_cast<CXXMethodDecl>(FD);
if (MD && MD->isCXXInstanceMember())
- LHST = Ctx.getLValueReferenceType(MD->getThisObjectType());
+ LHST = Ctx.getLValueReferenceType(MD->getFunctionObjectParameterType());
else
LHST = MD->getParamDecl(0)->getType();
if (Ctx.hasSameType(RetT, LHST))
@@ -7026,11 +7555,11 @@ static void visitLifetimeBoundArguments(IndirectLocalPath &Path, Expr *Call,
if (auto *CE = dyn_cast<CallExpr>(Call)) {
Callee = CE->getDirectCallee();
- Args = llvm::makeArrayRef(CE->getArgs(), CE->getNumArgs());
+ Args = llvm::ArrayRef(CE->getArgs(), CE->getNumArgs());
} else {
auto *CCE = cast<CXXConstructExpr>(Call);
Callee = CCE->getConstructor();
- Args = llvm::makeArrayRef(CCE->getArgs(), CCE->getNumArgs());
+ Args = llvm::ArrayRef(CCE->getArgs(), CCE->getNumArgs());
}
if (!Callee)
return;
@@ -7055,13 +7584,31 @@ static void visitLifetimeBoundArguments(IndirectLocalPath &Path, Expr *Call,
Path.pop_back();
};
- if (ObjectArg && implicitObjectParamIsLifetimeBound(Callee))
- VisitLifetimeBoundArg(Callee, ObjectArg);
+ bool CheckCoroCall = false;
+ if (const auto *RD = Callee->getReturnType()->getAsRecordDecl()) {
+ CheckCoroCall = RD->hasAttr<CoroLifetimeBoundAttr>() &&
+ RD->hasAttr<CoroReturnTypeAttr>() &&
+ !Callee->hasAttr<CoroDisableLifetimeBoundAttr>();
+ }
+
+ if (ObjectArg) {
+ bool CheckCoroObjArg = CheckCoroCall;
+ // Coroutine lambda objects with empty capture list are not lifetimebound.
+ if (auto *LE = dyn_cast<LambdaExpr>(ObjectArg->IgnoreImplicit());
+ LE && LE->captures().empty())
+ CheckCoroObjArg = false;
+ // Allow `get_return_object()` as the object param (__promise) is not
+ // lifetimebound.
+ if (Sema::CanBeGetReturnObject(Callee))
+ CheckCoroObjArg = false;
+ if (implicitObjectParamIsLifetimeBound(Callee) || CheckCoroObjArg)
+ VisitLifetimeBoundArg(Callee, ObjectArg);
+ }
for (unsigned I = 0,
N = std::min<unsigned>(Callee->getNumParams(), Args.size());
I != N; ++I) {
- if (Callee->getParamDecl(I)->hasAttr<LifetimeBoundAttr>())
+ if (CheckCoroCall || Callee->getParamDecl(I)->hasAttr<LifetimeBoundAttr>())
VisitLifetimeBoundArg(Callee->getParamDecl(I), Args[I]);
}
}
@@ -7513,7 +8060,7 @@ static SourceRange nextPathEntryRange(const IndirectLocalPath &Path, unsigned I,
case IndirectLocalPathEntry::VarInit:
if (cast<VarDecl>(Path[I].D)->isImplicit())
return SourceRange();
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case IndirectLocalPathEntry::DefaultInit:
return Path[I].E->getSourceRange();
@@ -7527,15 +8074,15 @@ static SourceRange nextPathEntryRange(const IndirectLocalPath &Path, unsigned I,
}
static bool pathOnlyInitializesGslPointer(IndirectLocalPath &Path) {
- for (auto It = Path.rbegin(), End = Path.rend(); It != End; ++It) {
- if (It->Kind == IndirectLocalPathEntry::VarInit)
+ for (const auto &It : llvm::reverse(Path)) {
+ if (It.Kind == IndirectLocalPathEntry::VarInit)
continue;
- if (It->Kind == IndirectLocalPathEntry::AddressOf)
+ if (It.Kind == IndirectLocalPathEntry::AddressOf)
continue;
- if (It->Kind == IndirectLocalPathEntry::LifetimeBoundCall)
+ if (It.Kind == IndirectLocalPathEntry::LifetimeBoundCall)
continue;
- return It->Kind == IndirectLocalPathEntry::GslPointerInit ||
- It->Kind == IndirectLocalPathEntry::GslReferenceInit;
+ return It.Kind == IndirectLocalPathEntry::GslPointerInit ||
+ It.Kind == IndirectLocalPathEntry::GslReferenceInit;
}
return false;
}
@@ -7768,7 +8315,7 @@ void Sema::checkInitializerLifetime(const InitializedEntity &Entity,
case IndirectLocalPathEntry::DefaultInit: {
auto *FD = cast<FieldDecl>(Elem.D);
- Diag(FD->getLocation(), diag::note_init_with_default_member_initalizer)
+ Diag(FD->getLocation(), diag::note_init_with_default_member_initializer)
<< FD << nextPathEntryRange(Path, I + 1, L);
break;
}
@@ -7787,7 +8334,7 @@ void Sema::checkInitializerLifetime(const InitializedEntity &Entity,
break;
// FIXME: We can't easily tell apart an init-capture from a nested
// capture of an init-capture.
- const VarDecl *VD = Elem.Capture->getCapturedVar();
+ const ValueDecl *VD = Elem.Capture->getCapturedVar();
Diag(Elem.Capture->getLocation(), diag::note_lambda_capture_initializer)
<< VD << VD->isInitCapture() << Elem.Capture->isExplicit()
<< (Elem.Capture->getCaptureKind() == LCK_ByRef) << VD
@@ -7994,19 +8541,29 @@ ExprResult InitializationSequence::Perform(Sema &S,
return ExprError();
}
if (!ZeroInitializationFixit.empty()) {
- unsigned DiagID = diag::err_default_init_const;
- if (Decl *D = Entity.getDecl())
- if (S.getLangOpts().MSVCCompat && D->hasAttr<SelectAnyAttr>())
- DiagID = diag::ext_default_init_const;
+ const Decl *D = Entity.getDecl();
+ const auto *VD = dyn_cast_or_null<VarDecl>(D);
+ QualType DestType = Entity.getType();
// The initialization would have succeeded with this fixit. Since the fixit
// is on the error, we need to build a valid AST in this case, so this isn't
// handled in the Failed() branch above.
- QualType DestType = Entity.getType();
- S.Diag(Kind.getLocation(), DiagID)
- << DestType << (bool)DestType->getAs<RecordType>()
- << FixItHint::CreateInsertion(ZeroInitializationFixitLoc,
- ZeroInitializationFixit);
+ if (!DestType->isRecordType() && VD && VD->isConstexpr()) {
+ // Use a more useful diagnostic for constexpr variables.
+ S.Diag(Kind.getLocation(), diag::err_constexpr_var_requires_const_init)
+ << VD
+ << FixItHint::CreateInsertion(ZeroInitializationFixitLoc,
+ ZeroInitializationFixit);
+ } else {
+ unsigned DiagID = diag::err_default_init_const;
+ if (S.getLangOpts().MSVCCompat && D && D->hasAttr<SelectAnyAttr>())
+ DiagID = diag::ext_default_init_const;
+
+ S.Diag(Kind.getLocation(), DiagID)
+ << DestType << (bool)DestType->getAs<RecordType>()
+ << FixItHint::CreateInsertion(ZeroInitializationFixitLoc,
+ ZeroInitializationFixit);
+ }
}
if (getKind() == DependentSequence) {
@@ -8076,6 +8633,15 @@ ExprResult InitializationSequence::Perform(Sema &S,
<< Init->getSourceRange();
}
+ if (S.getLangOpts().MicrosoftExt && Args.size() == 1 &&
+ isa<PredefinedExpr>(Args[0]) && Entity.getType()->isArrayType()) {
+ // Produce a Microsoft compatibility warning when initializing from a
+ // predefined expression since MSVC treats predefined expressions as string
+ // literals.
+ Expr *Init = Args[0];
+ S.Diag(Init->getBeginLoc(), diag::ext_init_from_predefined) << Init;
+ }
+
// OpenCL v2.0 s6.13.11.1. atomic variables can be initialized in global scope
QualType ETy = Entity.getType();
bool HasGlobalAS = ETy.hasAddressSpace() &&
@@ -8101,6 +8667,12 @@ ExprResult InitializationSequence::Perform(Sema &S,
ExprResult CurInit((Expr *)nullptr);
SmallVector<Expr*, 4> ArrayLoopCommonExprs;
+ // HLSL allows vector initialization to function like list initialization, but
+ // use the syntax of a C++-like constructor.
+ bool IsHLSLVectorInit = S.getLangOpts().HLSL && DestType->isExtVectorType() &&
+ isa<InitListExpr>(Args[0]);
+ (void)IsHLSLVectorInit;
+
// For initialization steps that start with a single initializer,
// grab the only argument out the Args and place it into the "current"
// initializer.
@@ -8138,7 +8710,7 @@ ExprResult InitializationSequence::Perform(Sema &S,
case SK_StdInitializerList:
case SK_OCLSamplerInit:
case SK_OCLZeroOpaqueType: {
- assert(Args.size() == 1);
+ assert(Args.size() == 1 || IsHLSLVectorInit);
CurInit = Args[0];
if (!CurInit.get()) return ExprError();
break;
@@ -8148,6 +8720,7 @@ ExprResult InitializationSequence::Perform(Sema &S,
case SK_ConstructorInitializationFromList:
case SK_StdInitializerListConstructorCall:
case SK_ZeroInitialization:
+ case SK_ParenthesizedListInit:
break;
}
@@ -8189,6 +8762,10 @@ ExprResult InitializationSequence::Perform(Sema &S,
CurInit = S.FixOverloadedFunctionReference(CurInit,
Step->Function.FoundDecl,
Step->Function.Function);
+ // We might get back another placeholder expression if we resolved to a
+ // builtin.
+ if (!CurInit.isInvalid())
+ CurInit = S.CheckPlaceholderExpr(CurInit.get());
break;
case SK_CastDerivedToBasePRValue:
@@ -8252,7 +8829,7 @@ ExprResult InitializationSequence::Perform(Sema &S,
// When this is an incomplete array type (such as when this is
// initializing an array of unknown bounds from an init list), use THAT
- // type instead so that we propogate the array bounds.
+ // type instead so that we propagate the array bounds.
if (MTETy->isIncompleteArrayType() &&
!CurInit.get()->getType()->isIncompleteArrayType() &&
S.Context.hasSameType(
@@ -8315,15 +8892,12 @@ ExprResult InitializationSequence::Perform(Sema &S,
return ExprError();
// Build an expression that constructs a temporary.
- CurInit = S.BuildCXXConstructExpr(Loc, Step->Type,
- FoundFn, Constructor,
- ConstructorArgs,
- HadMultipleCandidates,
- /*ListInit*/ false,
- /*StdInitListInit*/ false,
- /*ZeroInit*/ false,
- CXXConstructExpr::CK_Complete,
- SourceRange());
+ CurInit = S.BuildCXXConstructExpr(
+ Loc, Step->Type, FoundFn, Constructor, ConstructorArgs,
+ HadMultipleCandidates,
+ /*ListInit*/ false,
+ /*StdInitListInit*/ false,
+ /*ZeroInit*/ false, CXXConstructionKind::Complete, SourceRange());
if (CurInit.isInvalid())
return ExprError();
@@ -8670,7 +9244,7 @@ ExprResult InitializationSequence::Perform(Sema &S,
<< Step->Type << CurInit.get()->getType()
<< CurInit.get()->getSourceRange();
updateGNUCompoundLiteralRValue(CurInit.get());
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case SK_ArrayInit:
// If the destination type is an incomplete array type, update the
// type accordingly.
@@ -8680,10 +9254,8 @@ ExprResult InitializationSequence::Perform(Sema &S,
if (const ConstantArrayType *ConstantSource
= S.Context.getAsConstantArrayType(CurInit.get()->getType())) {
*ResultType = S.Context.getConstantArrayType(
- IncompleteDest->getElementType(),
- ConstantSource->getSize(),
- ConstantSource->getSizeExpr(),
- ArrayType::Normal, 0);
+ IncompleteDest->getElementType(), ConstantSource->getSize(),
+ ConstantSource->getSizeExpr(), ArraySizeModifier::Normal, 0);
}
}
}
@@ -8833,28 +9405,40 @@ ExprResult InitializationSequence::Perform(Sema &S,
CurInit.get()->getValueKind());
break;
}
+ case SK_ParenthesizedListInit: {
+ CurInit = nullptr;
+ TryOrBuildParenListInitialization(S, Entity, Kind, Args, *this,
+ /*VerifyOnly=*/false, &CurInit);
+ if (CurInit.get() && ResultType)
+ *ResultType = CurInit.get()->getType();
+ if (shouldBindAsTemporary(Entity))
+ CurInit = S.MaybeBindToTemporary(CurInit.get());
+ break;
+ }
}
}
+ Expr *Init = CurInit.get();
+ if (!Init)
+ return ExprError();
+
// Check whether the initializer has a shorter lifetime than the initialized
// entity, and if not, either lifetime-extend or warn as appropriate.
- if (auto *Init = CurInit.get())
- S.checkInitializerLifetime(Entity, Init);
+ S.checkInitializerLifetime(Entity, Init);
// Diagnose non-fatal problems with the completed initialization.
- if (Entity.getKind() == InitializedEntity::EK_Member &&
+ if (InitializedEntity::EntityKind EK = Entity.getKind();
+ (EK == InitializedEntity::EK_Member ||
+ EK == InitializedEntity::EK_ParenAggInitMember) &&
cast<FieldDecl>(Entity.getDecl())->isBitField())
S.CheckBitFieldInitialization(Kind.getLocation(),
- cast<FieldDecl>(Entity.getDecl()),
- CurInit.get());
+ cast<FieldDecl>(Entity.getDecl()), Init);
// Check for std::move on construction.
- if (const Expr *E = CurInit.get()) {
- CheckMoveOnConstruction(S, E,
- Entity.getKind() == InitializedEntity::EK_Result);
- }
+ CheckMoveOnConstruction(S, Init,
+ Entity.getKind() == InitializedEntity::EK_Result);
- return CurInit;
+ return Init;
}
/// Somewhere within T there is an uninitialized reference subobject.
@@ -8914,12 +9498,16 @@ static void emitBadConversionNotes(Sema &S, const InitializedEntity &entity,
S.EmitRelatedResultTypeNoteForReturn(destType);
}
QualType fromType = op->getType();
- auto *fromDecl = fromType.getTypePtr()->getPointeeCXXRecordDecl();
- auto *destDecl = destType.getTypePtr()->getPointeeCXXRecordDecl();
+ QualType fromPointeeType = fromType.getCanonicalType()->getPointeeType();
+ QualType destPointeeType = destType.getCanonicalType()->getPointeeType();
+ auto *fromDecl = fromType->getPointeeCXXRecordDecl();
+ auto *destDecl = destType->getPointeeCXXRecordDecl();
if (fromDecl && destDecl && fromDecl->getDeclKind() == Decl::CXXRecord &&
destDecl->getDeclKind() == Decl::CXXRecord &&
!fromDecl->isInvalidDecl() && !destDecl->isInvalidDecl() &&
- !fromDecl->hasDefinition())
+ !fromDecl->hasDefinition() &&
+ destPointeeType.getQualifiers().compatiblyIncludes(
+ fromPointeeType.getQualifiers()))
S.Diag(fromDecl->getLocation(), diag::note_forward_class_conversion)
<< S.getASTContext().getTagDeclType(fromDecl)
<< S.getASTContext().getTagDeclType(destDecl);
@@ -8935,7 +9523,7 @@ static void diagnoseListInit(Sema &S, const InitializedEntity &Entity,
E.withConst(),
llvm::APInt(S.Context.getTypeSize(S.Context.getSizeType()),
InitList->getNumInits()),
- nullptr, clang::ArrayType::Normal, 0);
+ nullptr, clang::ArraySizeModifier::Normal, 0);
InitializedEntity HiddenArray =
InitializedEntity::InitializeTemporary(ArrayType);
return diagnoseListInit(S, HiddenArray, InitList);
@@ -9031,9 +9619,8 @@ bool InitializationSequence::Diagnose(Sema &S,
<< FixItHint::CreateInsertion(Args.front()->getBeginLoc(), "u8");
break;
case FK_UTF8StringIntoPlainChar:
- S.Diag(Kind.getLocation(),
- diag::err_array_init_utf8_string_into_char)
- << S.getLangOpts().CPlusPlus20;
+ S.Diag(Kind.getLocation(), diag::err_array_init_utf8_string_into_char)
+ << DestType->isSignedIntegerType() << S.getLangOpts().CPlusPlus20;
break;
case FK_ArrayTypeMismatch:
case FK_NonConstantArrayInit:
@@ -9128,7 +9715,7 @@ bool InitializationSequence::Diagnose(Sema &S,
<< Args[0]->getSourceRange();
break;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case FK_NonConstLValueReferenceBindingToUnrelated:
S.Diag(Kind.getLocation(),
@@ -9293,7 +9880,8 @@ bool InitializationSequence::Diagnose(Sema &S,
case OR_No_Viable_Function:
if (Kind.getKind() == InitializationKind::IK_Default &&
(Entity.getKind() == InitializedEntity::EK_Base ||
- Entity.getKind() == InitializedEntity::EK_Member) &&
+ Entity.getKind() == InitializedEntity::EK_Member ||
+ Entity.getKind() == InitializedEntity::EK_ParenAggInitMember) &&
isa<CXXConstructorDecl>(S.CurContext)) {
// This is implicit default initialization of a member or
// base within a constructor. If no viable function was
@@ -9391,6 +9979,10 @@ bool InitializationSequence::Diagnose(Sema &S,
<< Entity.getName();
S.Diag(Entity.getDecl()->getLocation(), diag::note_previous_decl)
<< Entity.getName();
+ } else if (const auto *VD = dyn_cast_if_present<VarDecl>(Entity.getDecl());
+ VD && VD->isConstexpr()) {
+ S.Diag(Kind.getLocation(), diag::err_constexpr_var_requires_const_init)
+ << VD;
} else {
S.Diag(Kind.getLocation(), diag::err_default_init_const)
<< DestType << (bool)DestType->getAs<RecordType>();
@@ -9427,6 +10019,17 @@ bool InitializationSequence::Diagnose(Sema &S,
diag::note_explicit_ctor_deduction_guide_here) << false;
break;
}
+
+ case FK_ParenthesizedListInitFailed:
+ TryOrBuildParenListInitialization(S, Entity, Kind, Args, *this,
+ /*VerifyOnly=*/false);
+ break;
+
+ case FK_DesignatedInitForNonAggregate:
+ InitListExpr *InitList = cast<InitListExpr>(Args[0]);
+ S.Diag(Kind.getLocation(), diag::err_designated_init_for_non_aggregate)
+ << Entity.getType() << InitList->getSourceRange();
+ break;
}
PrintInitLocationNote(S, Entity);
@@ -9593,6 +10196,14 @@ void InitializationSequence::dump(raw_ostream &OS) const {
case FK_ExplicitConstructor:
OS << "list copy initialization chose explicit constructor";
break;
+
+ case FK_ParenthesizedListInitFailed:
+ OS << "parenthesized list initialization failed";
+ break;
+
+ case FK_DesignatedInitForNonAggregate:
+ OS << "designated initializer for non-aggregate type";
+ break;
}
OS << '\n';
return;
@@ -9764,9 +10375,12 @@ void InitializationSequence::dump(raw_ostream &OS) const {
case SK_OCLZeroOpaqueType:
OS << "OpenCL opaque type from zero";
break;
+ case SK_ParenthesizedListInit:
+ OS << "initialization from a parenthesized list of values";
+ break;
}
- OS << " [" << S->Type.getAsString() << ']';
+ OS << " [" << S->Type << ']';
}
OS << '\n';
@@ -9776,11 +10390,6 @@ void InitializationSequence::dump() const {
dump(llvm::errs());
}
-static bool NarrowingErrs(const LangOptions &L) {
- return L.CPlusPlus11 &&
- (!L.MicrosoftExt || L.isCompatibleWithMSVC(LangOptions::MSVC2015));
-}
-
static void DiagnoseNarrowingInInitList(Sema &S,
const ImplicitConversionSequence &ICS,
QualType PreNarrowingType,
@@ -9795,11 +10404,25 @@ static void DiagnoseNarrowingInInitList(Sema &S,
SCS = &ICS.UserDefined.After;
break;
case ImplicitConversionSequence::AmbiguousConversion:
+ case ImplicitConversionSequence::StaticObjectArgumentConversion:
case ImplicitConversionSequence::EllipsisConversion:
case ImplicitConversionSequence::BadConversion:
return;
}
+ auto MakeDiag = [&](bool IsConstRef, unsigned DefaultDiagID,
+ unsigned ConstRefDiagID, unsigned WarnDiagID) {
+ unsigned DiagID;
+ auto &L = S.getLangOpts();
+ if (L.CPlusPlus11 &&
+ (!L.MicrosoftExt || L.isCompatibleWithMSVC(LangOptions::MSVC2015)))
+ DiagID = IsConstRef ? ConstRefDiagID : DefaultDiagID;
+ else
+ DiagID = WarnDiagID;
+ return S.Diag(PostInit->getBeginLoc(), DiagID)
+ << PostInit->getSourceRange();
+ };
+
// C++11 [dcl.init.list]p7: Check whether this is a narrowing conversion.
APValue ConstantValue;
QualType ConstantType;
@@ -9810,40 +10433,41 @@ static void DiagnoseNarrowingInInitList(Sema &S,
// No narrowing occurred.
return;
- case NK_Type_Narrowing:
+ case NK_Type_Narrowing: {
// This was a floating-to-integer conversion, which is always considered a
// narrowing conversion even if the value is a constant and can be
// represented exactly as an integer.
- S.Diag(PostInit->getBeginLoc(), NarrowingErrs(S.getLangOpts())
- ? diag::ext_init_list_type_narrowing
- : diag::warn_init_list_type_narrowing)
- << PostInit->getSourceRange()
+ QualType T = EntityType.getNonReferenceType();
+ MakeDiag(T != EntityType, diag::ext_init_list_type_narrowing,
+ diag::ext_init_list_type_narrowing_const_reference,
+ diag::warn_init_list_type_narrowing)
<< PreNarrowingType.getLocalUnqualifiedType()
- << EntityType.getLocalUnqualifiedType();
+ << T.getLocalUnqualifiedType();
break;
+ }
- case NK_Constant_Narrowing:
+ case NK_Constant_Narrowing: {
// A constant value was narrowed.
- S.Diag(PostInit->getBeginLoc(),
- NarrowingErrs(S.getLangOpts())
- ? diag::ext_init_list_constant_narrowing
- : diag::warn_init_list_constant_narrowing)
- << PostInit->getSourceRange()
+ MakeDiag(EntityType.getNonReferenceType() != EntityType,
+ diag::ext_init_list_constant_narrowing,
+ diag::ext_init_list_constant_narrowing_const_reference,
+ diag::warn_init_list_constant_narrowing)
<< ConstantValue.getAsString(S.getASTContext(), ConstantType)
- << EntityType.getLocalUnqualifiedType();
+ << EntityType.getNonReferenceType().getLocalUnqualifiedType();
break;
+ }
- case NK_Variable_Narrowing:
+ case NK_Variable_Narrowing: {
// A variable's value may have been narrowed.
- S.Diag(PostInit->getBeginLoc(),
- NarrowingErrs(S.getLangOpts())
- ? diag::ext_init_list_variable_narrowing
- : diag::warn_init_list_variable_narrowing)
- << PostInit->getSourceRange()
+ MakeDiag(EntityType.getNonReferenceType() != EntityType,
+ diag::ext_init_list_variable_narrowing,
+ diag::ext_init_list_variable_narrowing_const_reference,
+ diag::warn_init_list_variable_narrowing)
<< PreNarrowingType.getLocalUnqualifiedType()
- << EntityType.getLocalUnqualifiedType();
+ << EntityType.getNonReferenceType().getLocalUnqualifiedType();
break;
}
+ }
SmallString<128> StaticCast;
llvm::raw_svector_ostream OS(StaticCast);
@@ -9911,8 +10535,7 @@ Sema::PerformCopyInitialization(const InitializedEntity &Entity,
const bool ShouldTrackCopy =
Entity.isParameterKind() && Seq.isConstructorInitialization();
if (ShouldTrackCopy) {
- if (llvm::find(CurrentParameterCopyTypes, Entity.getType()) !=
- CurrentParameterCopyTypes.end()) {
+ if (llvm::is_contained(CurrentParameterCopyTypes, Entity.getType())) {
Seq.SetOverloadFailure(
InitializationSequence::FK_ConstructorOverloadFailed,
OR_No_Viable_Function);
@@ -9969,7 +10592,7 @@ QualType Sema::DeduceTemplateSpecializationFromInitializer(
auto TemplateName = DeducedTST->getTemplateName();
if (TemplateName.isDependent())
- return SubstAutoType(TSInfo->getType(), Context.DependentTy);
+ return SubstAutoTypeDependent(TSInfo->getType());
// We can only perform deduction for class templates.
auto *Template =
@@ -9979,7 +10602,7 @@ QualType Sema::DeduceTemplateSpecializationFromInitializer(
diag::err_deduced_non_class_template_specialization_type)
<< (int)getTemplateNameKindForDiagnostics(TemplateName) << TemplateName;
if (auto *TD = TemplateName.getAsTemplateDecl())
- Diag(TD->getLocation(), diag::note_template_decl_here);
+ NoteTemplateLocation(*TD);
return QualType();
}
@@ -9988,7 +10611,7 @@ QualType Sema::DeduceTemplateSpecializationFromInitializer(
Diag(TSInfo->getTypeLoc().getBeginLoc(),
diag::warn_cxx14_compat_class_template_argument_deduction)
<< TSInfo->getTypeLoc().getSourceRange() << 0;
- return SubstAutoType(TSInfo->getType(), Context.DependentTy);
+ return SubstAutoTypeDependent(TSInfo->getType());
}
// FIXME: Perform "exact type" matching first, per CWG discussion?
@@ -10032,13 +10655,144 @@ QualType Sema::DeduceTemplateSpecializationFromInitializer(
OverloadCandidateSet::CSK_Normal);
OverloadCandidateSet::iterator Best;
- bool HasAnyDeductionGuide = false;
bool AllowExplicit = !Kind.isCopyInit() || ListInit;
- auto tryToResolveOverload =
+ // Return true if the candidate is added successfully, false otherwise.
+ auto addDeductionCandidate = [&](FunctionTemplateDecl *TD,
+ CXXDeductionGuideDecl *GD,
+ DeclAccessPair FoundDecl,
+ bool OnlyListConstructors,
+ bool AllowAggregateDeductionCandidate) {
+ // C++ [over.match.ctor]p1: (non-list copy-initialization from non-class)
+ // For copy-initialization, the candidate functions are all the
+ // converting constructors (12.3.1) of that class.
+ // C++ [over.match.copy]p1: (non-list copy-initialization from class)
+ // The converting constructors of T are candidate functions.
+ if (!AllowExplicit) {
+ // Overload resolution checks whether the deduction guide is declared
+ // explicit for us.
+
+ // When looking for a converting constructor, deduction guides that
+ // could never be called with one argument are not interesting to
+ // check or note.
+ if (GD->getMinRequiredArguments() > 1 ||
+ (GD->getNumParams() == 0 && !GD->isVariadic()))
+ return;
+ }
+
+ // C++ [over.match.list]p1.1: (first phase list initialization)
+ // Initially, the candidate functions are the initializer-list
+ // constructors of the class T
+ if (OnlyListConstructors && !isInitListConstructor(GD))
+ return;
+
+ if (!AllowAggregateDeductionCandidate &&
+ GD->getDeductionCandidateKind() == DeductionCandidate::Aggregate)
+ return;
+
+ // C++ [over.match.list]p1.2: (second phase list initialization)
+ // the candidate functions are all the constructors of the class T
+ // C++ [over.match.ctor]p1: (all other cases)
+ // the candidate functions are all the constructors of the class of
+ // the object being initialized
+
+ // C++ [over.best.ics]p4:
+ // When [...] the constructor [...] is a candidate by
+ // - [over.match.copy] (in all cases)
+ // FIXME: The "second phase of [over.match.list] case can also
+ // theoretically happen here, but it's not clear whether we can
+ // ever have a parameter of the right type.
+ bool SuppressUserConversions = Kind.isCopyInit();
+
+ if (TD) {
+ SmallVector<Expr *, 8> TmpInits;
+ for (Expr *E : Inits)
+ if (auto *DI = dyn_cast<DesignatedInitExpr>(E))
+ TmpInits.push_back(DI->getInit());
+ else
+ TmpInits.push_back(E);
+ AddTemplateOverloadCandidate(
+ TD, FoundDecl, /*ExplicitArgs=*/nullptr, TmpInits, Candidates,
+ SuppressUserConversions,
+ /*PartialOverloading=*/false, AllowExplicit, ADLCallKind::NotADL,
+ /*PO=*/{}, AllowAggregateDeductionCandidate);
+ } else {
+ AddOverloadCandidate(GD, FoundDecl, Inits, Candidates,
+ SuppressUserConversions,
+ /*PartialOverloading=*/false, AllowExplicit);
+ }
+ };
+
+ bool FoundDeductionGuide = false;
+
+ auto TryToResolveOverload =
[&](bool OnlyListConstructors) -> OverloadingResult {
Candidates.clear(OverloadCandidateSet::CSK_Normal);
- HasAnyDeductionGuide = false;
+ bool HasAnyDeductionGuide = false;
+
+ auto SynthesizeAggrGuide = [&](InitListExpr *ListInit) {
+ auto *Pattern = Template;
+ while (Pattern->getInstantiatedFromMemberTemplate()) {
+ if (Pattern->isMemberSpecialization())
+ break;
+ Pattern = Pattern->getInstantiatedFromMemberTemplate();
+ }
+
+ auto *RD = cast<CXXRecordDecl>(Pattern->getTemplatedDecl());
+ if (!(RD->getDefinition() && RD->isAggregate()))
+ return;
+ QualType Ty = Context.getRecordType(RD);
+ SmallVector<QualType, 8> ElementTypes;
+
+ InitListChecker CheckInitList(*this, Entity, ListInit, Ty, ElementTypes);
+ if (!CheckInitList.HadError()) {
+ // C++ [over.match.class.deduct]p1.8:
+ // if e_i is of array type and x_i is a braced-init-list, T_i is an
+ // rvalue reference to the declared type of e_i and
+ // C++ [over.match.class.deduct]p1.9:
+ // if e_i is of array type and x_i is a bstring-literal, T_i is an
+ // lvalue reference to the const-qualified declared type of e_i and
+ // C++ [over.match.class.deduct]p1.10:
+ // otherwise, T_i is the declared type of e_i
+ for (int I = 0, E = ListInit->getNumInits();
+ I < E && !isa<PackExpansionType>(ElementTypes[I]); ++I)
+ if (ElementTypes[I]->isArrayType()) {
+ if (isa<InitListExpr>(ListInit->getInit(I)))
+ ElementTypes[I] = Context.getRValueReferenceType(ElementTypes[I]);
+ else if (isa<StringLiteral>(
+ ListInit->getInit(I)->IgnoreParenImpCasts()))
+ ElementTypes[I] =
+ Context.getLValueReferenceType(ElementTypes[I].withConst());
+ }
+
+ llvm::FoldingSetNodeID ID;
+ ID.AddPointer(Template);
+ for (auto &T : ElementTypes)
+ T.getCanonicalType().Profile(ID);
+ unsigned Hash = ID.ComputeHash();
+ if (AggregateDeductionCandidates.count(Hash) == 0) {
+ if (FunctionTemplateDecl *TD =
+ DeclareImplicitDeductionGuideFromInitList(
+ Template, ElementTypes,
+ TSInfo->getTypeLoc().getEndLoc())) {
+ auto *GD = cast<CXXDeductionGuideDecl>(TD->getTemplatedDecl());
+ GD->setDeductionCandidateKind(DeductionCandidate::Aggregate);
+ AggregateDeductionCandidates[Hash] = GD;
+ addDeductionCandidate(TD, GD, DeclAccessPair::make(TD, AS_public),
+ OnlyListConstructors,
+ /*AllowAggregateDeductionCandidate=*/true);
+ }
+ } else {
+ CXXDeductionGuideDecl *GD = AggregateDeductionCandidates[Hash];
+ FunctionTemplateDecl *TD = GD->getDescribedFunctionTemplate();
+ assert(TD && "aggregate deduction candidate is function template");
+ addDeductionCandidate(TD, GD, DeclAccessPair::make(TD, AS_public),
+ OnlyListConstructors,
+ /*AllowAggregateDeductionCandidate=*/true);
+ }
+ HasAnyDeductionGuide = true;
+ }
+ };
for (auto I = Guides.begin(), E = Guides.end(); I != E; ++I) {
NamedDecl *D = (*I)->getUnderlyingDecl();
@@ -10046,7 +10800,7 @@ QualType Sema::DeduceTemplateSpecializationFromInitializer(
continue;
auto *TD = dyn_cast<FunctionTemplateDecl>(D);
- auto *GD = dyn_cast_or_null<CXXDeductionGuideDecl>(
+ auto *GD = dyn_cast_if_present<CXXDeductionGuideDecl>(
TD ? TD->getTemplatedDecl() : dyn_cast<FunctionDecl>(D));
if (!GD)
continue;
@@ -10054,53 +10808,33 @@ QualType Sema::DeduceTemplateSpecializationFromInitializer(
if (!GD->isImplicit())
HasAnyDeductionGuide = true;
- // C++ [over.match.ctor]p1: (non-list copy-initialization from non-class)
- // For copy-initialization, the candidate functions are all the
- // converting constructors (12.3.1) of that class.
- // C++ [over.match.copy]p1: (non-list copy-initialization from class)
- // The converting constructors of T are candidate functions.
- if (!AllowExplicit) {
- // Overload resolution checks whether the deduction guide is declared
- // explicit for us.
-
- // When looking for a converting constructor, deduction guides that
- // could never be called with one argument are not interesting to
- // check or note.
- if (GD->getMinRequiredArguments() > 1 ||
- (GD->getNumParams() == 0 && !GD->isVariadic()))
- continue;
+ addDeductionCandidate(TD, GD, I.getPair(), OnlyListConstructors,
+ /*AllowAggregateDeductionCandidate=*/false);
+ }
+
+ // C++ [over.match.class.deduct]p1.4:
+ // if C is defined and its definition satisfies the conditions for an
+ // aggregate class ([dcl.init.aggr]) with the assumption that any
+ // dependent base class has no virtual functions and no virtual base
+ // classes, and the initializer is a non-empty braced-init-list or
+ // parenthesized expression-list, and there are no deduction-guides for
+ // C, the set contains an additional function template, called the
+ // aggregate deduction candidate, defined as follows.
+ if (getLangOpts().CPlusPlus20 && !HasAnyDeductionGuide) {
+ if (ListInit && ListInit->getNumInits()) {
+ SynthesizeAggrGuide(ListInit);
+ } else if (Inits.size()) { // parenthesized expression-list
+ // Inits are expressions inside the parentheses. We don't have
+ // the parentheses source locations, use the begin/end of Inits as the
+ // best heuristic.
+ InitListExpr TempListInit(getASTContext(), Inits.front()->getBeginLoc(),
+ Inits, Inits.back()->getEndLoc());
+ SynthesizeAggrGuide(&TempListInit);
}
+ }
- // C++ [over.match.list]p1.1: (first phase list initialization)
- // Initially, the candidate functions are the initializer-list
- // constructors of the class T
- if (OnlyListConstructors && !isInitListConstructor(GD))
- continue;
+ FoundDeductionGuide = FoundDeductionGuide || HasAnyDeductionGuide;
- // C++ [over.match.list]p1.2: (second phase list initialization)
- // the candidate functions are all the constructors of the class T
- // C++ [over.match.ctor]p1: (all other cases)
- // the candidate functions are all the constructors of the class of
- // the object being initialized
-
- // C++ [over.best.ics]p4:
- // When [...] the constructor [...] is a candidate by
- // - [over.match.copy] (in all cases)
- // FIXME: The "second phase of [over.match.list] case can also
- // theoretically happen here, but it's not clear whether we can
- // ever have a parameter of the right type.
- bool SuppressUserConversions = Kind.isCopyInit();
-
- if (TD)
- AddTemplateOverloadCandidate(TD, I.getPair(), /*ExplicitArgs*/ nullptr,
- Inits, Candidates, SuppressUserConversions,
- /*PartialOverloading*/ false,
- AllowExplicit);
- else
- AddOverloadCandidate(GD, I.getPair(), Inits, Candidates,
- SuppressUserConversions,
- /*PartialOverloading*/ false, AllowExplicit);
- }
return Candidates.BestViableFunction(*this, Kind.getLocation(), Best);
};
@@ -10136,7 +10870,7 @@ QualType Sema::DeduceTemplateSpecializationFromInitializer(
}
if (TryListConstructors)
- Result = tryToResolveOverload(/*OnlyListConstructor*/true);
+ Result = TryToResolveOverload(/*OnlyListConstructor*/true);
// Then unwrap the initializer list and try again considering all
// constructors.
Inits = MultiExprArg(ListInit->getInits(), ListInit->getNumInits());
@@ -10145,7 +10879,7 @@ QualType Sema::DeduceTemplateSpecializationFromInitializer(
// If list-initialization fails, or if we're doing any other kind of
// initialization, we (eventually) consider constructors.
if (Result == OR_No_Viable_Function)
- Result = tryToResolveOverload(/*OnlyListConstructor*/false);
+ Result = TryToResolveOverload(/*OnlyListConstructor*/false);
switch (Result) {
case OR_Ambiguous:
@@ -10199,7 +10933,7 @@ QualType Sema::DeduceTemplateSpecializationFromInitializer(
// Make sure we didn't select an unusable deduction guide, and mark it
// as referenced.
- DiagnoseUseOfDecl(Best->Function, Kind.getLocation());
+ DiagnoseUseOfDecl(Best->FoundDecl, Kind.getLocation());
MarkFunctionReferenced(Kind.getLocation(), Best->Function);
break;
}
@@ -10215,7 +10949,7 @@ QualType Sema::DeduceTemplateSpecializationFromInitializer(
// Warn if CTAD was used on a type that does not have any user-defined
// deduction guides.
- if (!HasAnyDeductionGuide) {
+ if (!FoundDeductionGuide) {
Diag(TSInfo->getTypeLoc().getBeginLoc(),
diag::warn_ctad_maybe_unsupported)
<< TemplateName;
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaLambda.cpp b/contrib/llvm-project/clang/lib/Sema/SemaLambda.cpp
index 1fcc03d997c1..5b95bae567b7 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaLambda.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaLambda.cpp
@@ -20,7 +20,9 @@
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/SemaLambda.h"
+#include "clang/Sema/Template.h"
#include "llvm/ADT/STLExtras.h"
+#include <optional>
using namespace clang;
using namespace sema;
@@ -54,17 +56,17 @@ using namespace sema;
/// is at the top of the stack and has the highest index.
/// \param VarToCapture - the variable to capture. If NULL, capture 'this'.
///
-/// \returns An Optional<unsigned> Index that if evaluates to 'true' contains
-/// the index (into Sema's FunctionScopeInfo stack) of the innermost lambda
-/// which is capture-ready. If the return value evaluates to 'false' then
-/// no lambda is capture-ready for \p VarToCapture.
+/// \returns An std::optional<unsigned> Index that if evaluates to 'true'
+/// contains the index (into Sema's FunctionScopeInfo stack) of the innermost
+/// lambda which is capture-ready. If the return value evaluates to 'false'
+/// then no lambda is capture-ready for \p VarToCapture.
-static inline Optional<unsigned>
+static inline std::optional<unsigned>
getStackIndexOfNearestEnclosingCaptureReadyLambda(
ArrayRef<const clang::sema::FunctionScopeInfo *> FunctionScopes,
- VarDecl *VarToCapture) {
+ ValueDecl *VarToCapture) {
// Label failure to capture.
- const Optional<unsigned> NoLambdaIsCaptureReady;
+ const std::optional<unsigned> NoLambdaIsCaptureReady;
// Ignore all inner captured regions.
unsigned CurScopeIndex = FunctionScopes.size() - 1;
@@ -165,24 +167,25 @@ getStackIndexOfNearestEnclosingCaptureReadyLambda(
/// \param VarToCapture - the variable to capture. If NULL, capture 'this'.
///
///
-/// \returns An Optional<unsigned> Index that if evaluates to 'true' contains
-/// the index (into Sema's FunctionScopeInfo stack) of the innermost lambda
-/// which is capture-capable. If the return value evaluates to 'false' then
-/// no lambda is capture-capable for \p VarToCapture.
+/// \returns An std::optional<unsigned> Index that if evaluates to 'true'
+/// contains the index (into Sema's FunctionScopeInfo stack) of the innermost
+/// lambda which is capture-capable. If the return value evaluates to 'false'
+/// then no lambda is capture-capable for \p VarToCapture.
-Optional<unsigned> clang::getStackIndexOfNearestEnclosingCaptureCapableLambda(
+std::optional<unsigned>
+clang::getStackIndexOfNearestEnclosingCaptureCapableLambda(
ArrayRef<const sema::FunctionScopeInfo *> FunctionScopes,
- VarDecl *VarToCapture, Sema &S) {
+ ValueDecl *VarToCapture, Sema &S) {
- const Optional<unsigned> NoLambdaIsCaptureCapable;
+ const std::optional<unsigned> NoLambdaIsCaptureCapable;
- const Optional<unsigned> OptionalStackIndex =
+ const std::optional<unsigned> OptionalStackIndex =
getStackIndexOfNearestEnclosingCaptureReadyLambda(FunctionScopes,
VarToCapture);
if (!OptionalStackIndex)
return NoLambdaIsCaptureCapable;
- const unsigned IndexOfCaptureReadyLambda = OptionalStackIndex.getValue();
+ const unsigned IndexOfCaptureReadyLambda = *OptionalStackIndex;
assert(((IndexOfCaptureReadyLambda != (FunctionScopes.size() - 1)) ||
S.getCurGenericLambda()) &&
"The capture ready lambda for a potential capture can only be the "
@@ -238,21 +241,20 @@ getGenericLambdaTemplateParameterList(LambdaScopeInfo *LSI, Sema &SemaRef) {
return LSI->GLTemplateParameterList;
}
-CXXRecordDecl *Sema::createLambdaClosureType(SourceRange IntroducerRange,
- TypeSourceInfo *Info,
- bool KnownDependent,
- LambdaCaptureDefault CaptureDefault) {
+CXXRecordDecl *
+Sema::createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info,
+ unsigned LambdaDependencyKind,
+ LambdaCaptureDefault CaptureDefault) {
DeclContext *DC = CurContext;
while (!(DC->isFunctionOrMethod() || DC->isRecord() || DC->isFileContext()))
DC = DC->getParent();
- bool IsGenericLambda = getGenericLambdaTemplateParameterList(getCurLambda(),
- *this);
+
+ bool IsGenericLambda =
+ Info && getGenericLambdaTemplateParameterList(getCurLambda(), *this);
// Start constructing the lambda class.
- CXXRecordDecl *Class = CXXRecordDecl::CreateLambda(Context, DC, Info,
- IntroducerRange.getBegin(),
- KnownDependent,
- IsGenericLambda,
- CaptureDefault);
+ CXXRecordDecl *Class = CXXRecordDecl::CreateLambda(
+ Context, DC, Info, IntroducerRange.getBegin(), LambdaDependencyKind,
+ IsGenericLambda, CaptureDefault);
DC->addDecl(Class);
return Class;
@@ -282,11 +284,14 @@ Sema::getCurrentMangleNumberContext(const DeclContext *DC) {
Normal,
DefaultArgument,
DataMember,
- StaticDataMember,
InlineVariable,
- VariableTemplate
+ TemplatedVariable,
+ Concept
} Kind = Normal;
+ bool IsInNonspecializedTemplate =
+ inTemplateInstantiation() || CurContext->isDependentContext();
+
// Default arguments of member function parameters that appear in a class
// definition, as well as the initializers of data members, receive special
// treatment. Identify them.
@@ -297,30 +302,29 @@ Sema::getCurrentMangleNumberContext(const DeclContext *DC) {
if (LexicalDC->isRecord())
Kind = DefaultArgument;
} else if (VarDecl *Var = dyn_cast<VarDecl>(ManglingContextDecl)) {
- if (Var->getDeclContext()->isRecord())
- Kind = StaticDataMember;
- else if (Var->getMostRecentDecl()->isInline())
+ if (Var->getMostRecentDecl()->isInline())
Kind = InlineVariable;
+ else if (Var->getDeclContext()->isRecord() && IsInNonspecializedTemplate)
+ Kind = TemplatedVariable;
else if (Var->getDescribedVarTemplate())
- Kind = VariableTemplate;
+ Kind = TemplatedVariable;
else if (auto *VTS = dyn_cast<VarTemplateSpecializationDecl>(Var)) {
if (!VTS->isExplicitSpecialization())
- Kind = VariableTemplate;
+ Kind = TemplatedVariable;
}
} else if (isa<FieldDecl>(ManglingContextDecl)) {
Kind = DataMember;
+ } else if (isa<ImplicitConceptSpecializationDecl>(ManglingContextDecl)) {
+ Kind = Concept;
}
}
// Itanium ABI [5.1.7]:
// In the following contexts [...] the one-definition rule requires closure
// types in different translation units to "correspond":
- bool IsInNonspecializedTemplate =
- inTemplateInstantiation() || CurContext->isDependentContext();
switch (Kind) {
case Normal: {
- // -- the bodies of non-exported nonspecialized template functions
- // -- the bodies of inline functions
+ // -- the bodies of inline or templated functions
if ((IsInNonspecializedTemplate &&
!(ManglingContextDecl && isa<ParmVarDecl>(ManglingContextDecl))) ||
isInInlineFunction(CurContext)) {
@@ -332,21 +336,18 @@ Sema::getCurrentMangleNumberContext(const DeclContext *DC) {
return std::make_tuple(nullptr, nullptr);
}
- case StaticDataMember:
- // -- the initializers of nonspecialized static members of template classes
- if (!IsInNonspecializedTemplate)
- return std::make_tuple(nullptr, ManglingContextDecl);
- // Fall through to get the current context.
- LLVM_FALLTHROUGH;
-
+ case Concept:
+ // Concept definitions aren't code generated and thus aren't mangled,
+ // however the ManglingContextDecl is important for the purposes of
+ // re-forming the template argument list of the lambda for constraint
+ // evaluation.
case DataMember:
- // -- the in-class initializers of class members
+ // -- default member initializers
case DefaultArgument:
// -- default arguments appearing in class definitions
case InlineVariable:
- // -- the initializers of inline variables
- case VariableTemplate:
- // -- the initializers of templated variables
+ case TemplatedVariable:
+ // -- the initializers of inline or templated variables
return std::make_tuple(
&Context.getManglingNumberContext(ASTContext::NeedExtraManglingDecl,
ManglingContextDecl),
@@ -356,16 +357,13 @@ Sema::getCurrentMangleNumberContext(const DeclContext *DC) {
llvm_unreachable("unexpected context");
}
-CXXMethodDecl *Sema::startLambdaDefinition(CXXRecordDecl *Class,
- SourceRange IntroducerRange,
- TypeSourceInfo *MethodTypeInfo,
- SourceLocation EndLoc,
- ArrayRef<ParmVarDecl *> Params,
- ConstexprSpecKind ConstexprKind,
- Expr *TrailingRequiresClause) {
+static QualType
+buildTypeForLambdaCallOperator(Sema &S, clang::CXXRecordDecl *Class,
+ TemplateParameterList *TemplateParams,
+ TypeSourceInfo *MethodTypeInfo) {
+ assert(MethodTypeInfo && "expected a non null type");
+
QualType MethodType = MethodTypeInfo->getType();
- TemplateParameterList *TemplateParams =
- getGenericLambdaTemplateParameterList(getCurLambda(), *this);
// If a lambda appears in a dependent context or is a generic lambda (has
// template parameters) and has an 'auto' return type, deduce it to a
// dependent type.
@@ -373,75 +371,55 @@ CXXMethodDecl *Sema::startLambdaDefinition(CXXRecordDecl *Class,
const FunctionProtoType *FPT = MethodType->castAs<FunctionProtoType>();
QualType Result = FPT->getReturnType();
if (Result->isUndeducedType()) {
- Result = SubstAutoType(Result, Context.DependentTy);
- MethodType = Context.getFunctionType(Result, FPT->getParamTypes(),
- FPT->getExtProtoInfo());
+ Result = S.SubstAutoTypeDependent(Result);
+ MethodType = S.Context.getFunctionType(Result, FPT->getParamTypes(),
+ FPT->getExtProtoInfo());
}
}
+ return MethodType;
+}
- // C++11 [expr.prim.lambda]p5:
- // The closure type for a lambda-expression has a public inline function
- // call operator (13.5.4) whose parameters and return type are described by
- // the lambda-expression's parameter-declaration-clause and
- // trailing-return-type respectively.
- DeclarationName MethodName
- = Context.DeclarationNames.getCXXOperatorName(OO_Call);
- DeclarationNameLoc MethodNameLoc =
- DeclarationNameLoc::makeCXXOperatorNameLoc(IntroducerRange);
- CXXMethodDecl *Method = CXXMethodDecl::Create(
- Context, Class, EndLoc,
- DeclarationNameInfo(MethodName, IntroducerRange.getBegin(),
- MethodNameLoc),
- MethodType, MethodTypeInfo, SC_None,
- /*isInline=*/true, ConstexprKind, EndLoc, TrailingRequiresClause);
- Method->setAccess(AS_public);
- if (!TemplateParams)
- Class->addDecl(Method);
-
- // Temporarily set the lexical declaration context to the current
- // context, so that the Scope stack matches the lexical nesting.
- Method->setLexicalDeclContext(CurContext);
- // Create a function template if we have a template parameter list
- FunctionTemplateDecl *const TemplateMethod = TemplateParams ?
- FunctionTemplateDecl::Create(Context, Class,
- Method->getLocation(), MethodName,
- TemplateParams,
- Method) : nullptr;
- if (TemplateMethod) {
- TemplateMethod->setAccess(AS_public);
- Method->setDescribedFunctionTemplate(TemplateMethod);
- Class->addDecl(TemplateMethod);
- TemplateMethod->setLexicalDeclContext(CurContext);
- }
-
- // Add parameters.
- if (!Params.empty()) {
- Method->setParams(Params);
- CheckParmsForFunctionDef(Params,
- /*CheckParameterNames=*/false);
-
- for (auto P : Method->parameters())
- P->setOwningFunction(Method);
- }
-
- return Method;
+// [C++2b] [expr.prim.lambda.closure] p4
+// Given a lambda with a lambda-capture, the type of the explicit object
+// parameter, if any, of the lambda's function call operator (possibly
+// instantiated from a function call operator template) shall be either:
+// - the closure type,
+// - class type derived from the closure type, or
+// - a reference to a possibly cv-qualified such type.
+void Sema::DiagnoseInvalidExplicitObjectParameterInLambda(
+ CXXMethodDecl *Method) {
+ if (!isLambdaCallWithExplicitObjectParameter(Method))
+ return;
+ CXXRecordDecl *RD = Method->getParent();
+ if (Method->getType()->isDependentType())
+ return;
+ if (RD->isCapturelessLambda())
+ return;
+ QualType ExplicitObjectParameterType = Method->getParamDecl(0)
+ ->getType()
+ .getNonReferenceType()
+ .getUnqualifiedType()
+ .getDesugaredType(getASTContext());
+ QualType LambdaType = getASTContext().getRecordType(RD);
+ if (LambdaType == ExplicitObjectParameterType)
+ return;
+ if (IsDerivedFrom(RD->getLocation(), ExplicitObjectParameterType, LambdaType))
+ return;
+ Diag(Method->getParamDecl(0)->getLocation(),
+ diag::err_invalid_explicit_object_type_in_lambda)
+ << ExplicitObjectParameterType;
}
void Sema::handleLambdaNumbering(
CXXRecordDecl *Class, CXXMethodDecl *Method,
- Optional<std::tuple<bool, unsigned, unsigned, Decl *>> Mangling) {
- if (Mangling) {
- bool HasKnownInternalLinkage;
- unsigned ManglingNumber, DeviceManglingNumber;
- Decl *ManglingContextDecl;
- std::tie(HasKnownInternalLinkage, ManglingNumber, DeviceManglingNumber,
- ManglingContextDecl) = Mangling.getValue();
- Class->setLambdaMangling(ManglingNumber, ManglingContextDecl,
- HasKnownInternalLinkage);
- Class->setDeviceLambdaManglingNumber(DeviceManglingNumber);
+ std::optional<CXXRecordDecl::LambdaNumbering> NumberingOverride) {
+ if (NumberingOverride) {
+ Class->setLambdaNumbering(*NumberingOverride);
return;
}
+ ContextRAII ManglingContext(*this, Class->getDeclContext());
+
auto getMangleNumberingContext =
[this](CXXRecordDecl *Class,
Decl *ManglingContextDecl) -> MangleNumberingContext * {
@@ -456,11 +434,10 @@ void Sema::handleLambdaNumbering(
return &Context.getManglingNumberContext(DC);
};
+ CXXRecordDecl::LambdaNumbering Numbering;
MangleNumberingContext *MCtx;
- Decl *ManglingContextDecl;
- std::tie(MCtx, ManglingContextDecl) =
+ std::tie(MCtx, Numbering.ContextDecl) =
getCurrentMangleNumberContext(Class->getDeclContext());
- bool HasKnownInternalLinkage = false;
if (!MCtx && (getLangOpts().CUDA || getLangOpts().SYCLIsDevice ||
getLangOpts().SYCLIsHost)) {
// Force lambda numbering in CUDA/HIP as we need to name lambdas following
@@ -470,26 +447,41 @@ void Sema::handleLambdaNumbering(
// Also force for SYCL, since we need this for the
// __builtin_sycl_unique_stable_name implementation, which depends on lambda
// mangling.
- MCtx = getMangleNumberingContext(Class, ManglingContextDecl);
+ MCtx = getMangleNumberingContext(Class, Numbering.ContextDecl);
assert(MCtx && "Retrieving mangle numbering context failed!");
- HasKnownInternalLinkage = true;
+ Numbering.HasKnownInternalLinkage = true;
}
if (MCtx) {
- unsigned ManglingNumber = MCtx->getManglingNumber(Method);
- Class->setLambdaMangling(ManglingNumber, ManglingContextDecl,
- HasKnownInternalLinkage);
- Class->setDeviceLambdaManglingNumber(MCtx->getDeviceManglingNumber(Method));
+ Numbering.IndexInContext = MCtx->getNextLambdaIndex();
+ Numbering.ManglingNumber = MCtx->getManglingNumber(Method);
+ Numbering.DeviceManglingNumber = MCtx->getDeviceManglingNumber(Method);
+ Class->setLambdaNumbering(Numbering);
+
+ if (auto *Source =
+ dyn_cast_or_null<ExternalSemaSource>(Context.getExternalSource()))
+ Source->AssignedLambdaNumbering(Class);
}
}
-void Sema::buildLambdaScope(LambdaScopeInfo *LSI,
- CXXMethodDecl *CallOperator,
- SourceRange IntroducerRange,
- LambdaCaptureDefault CaptureDefault,
- SourceLocation CaptureDefaultLoc,
- bool ExplicitParams,
- bool ExplicitResultType,
- bool Mutable) {
+static void buildLambdaScopeReturnType(Sema &S, LambdaScopeInfo *LSI,
+ CXXMethodDecl *CallOperator,
+ bool ExplicitResultType) {
+ if (ExplicitResultType) {
+ LSI->HasImplicitReturnType = false;
+ LSI->ReturnType = CallOperator->getReturnType();
+ if (!LSI->ReturnType->isDependentType() && !LSI->ReturnType->isVoidType())
+ S.RequireCompleteType(CallOperator->getBeginLoc(), LSI->ReturnType,
+ diag::err_lambda_incomplete_result);
+ } else {
+ LSI->HasImplicitReturnType = true;
+ }
+}
+
+void Sema::buildLambdaScope(LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator,
+ SourceRange IntroducerRange,
+ LambdaCaptureDefault CaptureDefault,
+ SourceLocation CaptureDefaultLoc,
+ bool ExplicitParams, bool Mutable) {
LSI->CallOperator = CallOperator;
CXXRecordDecl *LambdaClass = CallOperator->getParent();
LSI->Lambda = LambdaClass;
@@ -501,30 +493,16 @@ void Sema::buildLambdaScope(LambdaScopeInfo *LSI,
LSI->IntroducerRange = IntroducerRange;
LSI->ExplicitParams = ExplicitParams;
LSI->Mutable = Mutable;
-
- if (ExplicitResultType) {
- LSI->ReturnType = CallOperator->getReturnType();
-
- if (!LSI->ReturnType->isDependentType() &&
- !LSI->ReturnType->isVoidType()) {
- if (RequireCompleteType(CallOperator->getBeginLoc(), LSI->ReturnType,
- diag::err_lambda_incomplete_result)) {
- // Do nothing.
- }
- }
- } else {
- LSI->HasImplicitReturnType = true;
- }
}
void Sema::finishLambdaExplicitCaptures(LambdaScopeInfo *LSI) {
LSI->finishedExplicitCaptures();
}
-void Sema::ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc,
- ArrayRef<NamedDecl *> TParams,
- SourceLocation RAngleLoc,
- ExprResult RequiresClause) {
+void Sema::ActOnLambdaExplicitTemplateParameterList(
+ LambdaIntroducer &Intro, SourceLocation LAngleLoc,
+ ArrayRef<NamedDecl *> TParams, SourceLocation RAngleLoc,
+ ExprResult RequiresClause) {
LambdaScopeInfo *LSI = getCurLambda();
assert(LSI && "Expected a lambda scope");
assert(LSI->NumExplicitTemplateParams == 0 &&
@@ -540,35 +518,6 @@ void Sema::ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc,
LSI->RequiresClause = RequiresClause;
}
-void Sema::addLambdaParameters(
- ArrayRef<LambdaIntroducer::LambdaCapture> Captures,
- CXXMethodDecl *CallOperator, Scope *CurScope) {
- // Introduce our parameters into the function scope
- for (unsigned p = 0, NumParams = CallOperator->getNumParams();
- p < NumParams; ++p) {
- ParmVarDecl *Param = CallOperator->getParamDecl(p);
-
- // If this has an identifier, add it to the scope stack.
- if (CurScope && Param->getIdentifier()) {
- bool Error = false;
- // Resolution of CWG 2211 in C++17 renders shadowing ill-formed, but we
- // retroactively apply it.
- for (const auto &Capture : Captures) {
- if (Capture.Id == Param->getIdentifier()) {
- Error = true;
- Diag(Param->getLocation(), diag::err_parameter_shadow_capture);
- Diag(Capture.Loc, diag::note_var_explicitly_captured_here)
- << Capture.Id << true;
- }
- }
- if (!Error)
- CheckShadow(CurScope, Param);
-
- PushOnScopeChains(Param, CurScope);
- }
- }
-}
-
/// If this expression is an enumerator-like expression of some type
/// T, return the type T; otherwise, return null.
///
@@ -771,8 +720,8 @@ void Sema::deduceClosureReturnType(CapturingScopeInfo &CSI) {
if (Context.getCanonicalFunctionResultType(ReturnType) ==
Context.getCanonicalFunctionResultType(CSI.ReturnType)) {
// Use the return type with the strictest possible nullability annotation.
- auto RetTyNullability = ReturnType->getNullability(Ctx);
- auto BlockNullability = CSI.ReturnType->getNullability(Ctx);
+ auto RetTyNullability = ReturnType->getNullability();
+ auto BlockNullability = CSI.ReturnType->getNullability();
if (BlockNullability &&
(!RetTyNullability ||
hasWeakerNullability(*RetTyNullability, *BlockNullability)))
@@ -791,8 +740,8 @@ void Sema::deduceClosureReturnType(CapturingScopeInfo &CSI) {
QualType Sema::buildLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
- Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool IsDirectInit,
- Expr *&Init) {
+ std::optional<unsigned> NumExpansions, IdentifierInfo *Id,
+ bool IsDirectInit, Expr *&Init) {
// Create an 'auto' or 'auto&' TypeSourceInfo that we can use to
// deduce against.
QualType DeductType = Context.getAutoDeductType();
@@ -855,11 +804,9 @@ QualType Sema::buildLambdaInitCaptureInitialization(
return DeducedType;
}
-VarDecl *Sema::createLambdaInitCaptureVarDecl(SourceLocation Loc,
- QualType InitCaptureType,
- SourceLocation EllipsisLoc,
- IdentifierInfo *Id,
- unsigned InitStyle, Expr *Init) {
+VarDecl *Sema::createLambdaInitCaptureVarDecl(
+ SourceLocation Loc, QualType InitCaptureType, SourceLocation EllipsisLoc,
+ IdentifierInfo *Id, unsigned InitStyle, Expr *Init, DeclContext *DeclCtx) {
// FIXME: Retain the TypeSourceInfo from buildLambdaInitCaptureInitialization
// rather than reconstructing it here.
TypeSourceInfo *TSI = Context.getTrivialTypeSourceInfo(InitCaptureType, Loc);
@@ -870,8 +817,8 @@ VarDecl *Sema::createLambdaInitCaptureVarDecl(SourceLocation Loc,
// used as a variable, and only exists as a way to name and refer to the
// init-capture.
// FIXME: Pass in separate source locations for '&' and identifier.
- VarDecl *NewVD = VarDecl::Create(Context, CurContext, Loc,
- Loc, Id, InitCaptureType, TSI, SC_Auto);
+ VarDecl *NewVD = VarDecl::Create(Context, DeclCtx, Loc, Loc, Id,
+ InitCaptureType, TSI, SC_Auto);
NewVD->setInitCapture(true);
NewVD->setReferenced(true);
// FIXME: Pass in a VarDecl::InitializationStyle.
@@ -883,168 +830,247 @@ VarDecl *Sema::createLambdaInitCaptureVarDecl(SourceLocation Loc,
return NewVD;
}
-void Sema::addInitCapture(LambdaScopeInfo *LSI, VarDecl *Var) {
+void Sema::addInitCapture(LambdaScopeInfo *LSI, VarDecl *Var, bool ByRef) {
assert(Var->isInitCapture() && "init capture flag should be set");
- LSI->addCapture(Var, /*isBlock*/false, Var->getType()->isReferenceType(),
- /*isNested*/false, Var->getLocation(), SourceLocation(),
- Var->getType(), /*Invalid*/false);
+ LSI->addCapture(Var, /*isBlock=*/false, ByRef,
+ /*isNested=*/false, Var->getLocation(), SourceLocation(),
+ Var->getType(), /*Invalid=*/false);
}
-void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
- Declarator &ParamInfo,
- Scope *CurScope) {
- LambdaScopeInfo *const LSI = getCurLambda();
- assert(LSI && "LambdaScopeInfo should be on stack!");
+// Unlike getCurLambda, getCurrentLambdaScopeUnsafe doesn't
+// check that the current lambda is in a consistent or fully constructed state.
+static LambdaScopeInfo *getCurrentLambdaScopeUnsafe(Sema &S) {
+ assert(!S.FunctionScopes.empty());
+ return cast<LambdaScopeInfo>(S.FunctionScopes[S.FunctionScopes.size() - 1]);
+}
- // Determine if we're within a context where we know that the lambda will
- // be dependent, because there are template parameters in scope.
- bool KnownDependent;
- if (LSI->NumExplicitTemplateParams > 0) {
- auto *TemplateParamScope = CurScope->getTemplateParamParent();
- assert(TemplateParamScope &&
- "Lambda with explicit template param list should establish a "
- "template param scope");
- assert(TemplateParamScope->getParent());
- KnownDependent = TemplateParamScope->getParent()
- ->getTemplateParamParent() != nullptr;
- } else {
- KnownDependent = CurScope->getTemplateParamParent() != nullptr;
- }
+static TypeSourceInfo *
+getDummyLambdaType(Sema &S, SourceLocation Loc = SourceLocation()) {
+ // C++11 [expr.prim.lambda]p4:
+ // If a lambda-expression does not include a lambda-declarator, it is as
+ // if the lambda-declarator were ().
+ FunctionProtoType::ExtProtoInfo EPI(S.Context.getDefaultCallingConvention(
+ /*IsVariadic=*/false, /*IsCXXMethod=*/true));
+ EPI.HasTrailingReturn = true;
+ EPI.TypeQuals.addConst();
+ LangAS AS = S.getDefaultCXXMethodAddrSpace();
+ if (AS != LangAS::Default)
+ EPI.TypeQuals.addAddressSpace(AS);
+
+ // C++1y [expr.prim.lambda]:
+ // The lambda return type is 'auto', which is replaced by the
+ // trailing-return type if provided and/or deduced from 'return'
+ // statements
+ // We don't do this before C++1y, because we don't support deduced return
+ // types there.
+ QualType DefaultTypeForNoTrailingReturn = S.getLangOpts().CPlusPlus14
+ ? S.Context.getAutoDeductType()
+ : S.Context.DependentTy;
+ QualType MethodTy = S.Context.getFunctionType(DefaultTypeForNoTrailingReturn,
+ std::nullopt, EPI);
+ return S.Context.getTrivialTypeSourceInfo(MethodTy, Loc);
+}
+
+static TypeSourceInfo *getLambdaType(Sema &S, LambdaIntroducer &Intro,
+ Declarator &ParamInfo, Scope *CurScope,
+ SourceLocation Loc,
+ bool &ExplicitResultType) {
+
+ ExplicitResultType = false;
+
+ assert(
+ (ParamInfo.getDeclSpec().getStorageClassSpec() ==
+ DeclSpec::SCS_unspecified ||
+ ParamInfo.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_static) &&
+ "Unexpected storage specifier");
+ bool IsLambdaStatic =
+ ParamInfo.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_static;
- // Determine the signature of the call operator.
TypeSourceInfo *MethodTyInfo;
- bool ExplicitParams = true;
- bool ExplicitResultType = true;
- bool ContainsUnexpandedParameterPack = false;
- SourceLocation EndLoc;
- SmallVector<ParmVarDecl *, 8> Params;
+
if (ParamInfo.getNumTypeObjects() == 0) {
- // C++11 [expr.prim.lambda]p4:
- // If a lambda-expression does not include a lambda-declarator, it is as
- // if the lambda-declarator were ().
- FunctionProtoType::ExtProtoInfo EPI(Context.getDefaultCallingConvention(
- /*IsVariadic=*/false, /*IsCXXMethod=*/true));
- EPI.HasTrailingReturn = true;
- EPI.TypeQuals.addConst();
- LangAS AS = getDefaultCXXMethodAddrSpace();
- if (AS != LangAS::Default)
- EPI.TypeQuals.addAddressSpace(AS);
-
- // C++1y [expr.prim.lambda]:
- // The lambda return type is 'auto', which is replaced by the
- // trailing-return type if provided and/or deduced from 'return'
- // statements
- // We don't do this before C++1y, because we don't support deduced return
- // types there.
- QualType DefaultTypeForNoTrailingReturn =
- getLangOpts().CPlusPlus14 ? Context.getAutoDeductType()
- : Context.DependentTy;
- QualType MethodTy =
- Context.getFunctionType(DefaultTypeForNoTrailingReturn, None, EPI);
- MethodTyInfo = Context.getTrivialTypeSourceInfo(MethodTy);
- ExplicitParams = false;
- ExplicitResultType = false;
- EndLoc = Intro.Range.getEnd();
+ MethodTyInfo = getDummyLambdaType(S, Loc);
} else {
- assert(ParamInfo.isFunctionDeclarator() &&
- "lambda-declarator is a function");
- DeclaratorChunk::FunctionTypeInfo &FTI = ParamInfo.getFunctionTypeInfo();
+ // Check explicit parameters
+ S.CheckExplicitObjectLambda(ParamInfo);
- // C++11 [expr.prim.lambda]p5:
- // This function call operator is declared const (9.3.1) if and only if
- // the lambda-expression's parameter-declaration-clause is not followed
- // by mutable. It is neither virtual nor declared volatile. [...]
- if (!FTI.hasMutableQualifier()) {
- FTI.getOrCreateMethodQualifiers().SetTypeQual(DeclSpec::TQ_const,
- SourceLocation());
- }
+ DeclaratorChunk::FunctionTypeInfo &FTI = ParamInfo.getFunctionTypeInfo();
- MethodTyInfo = GetTypeForDeclarator(ParamInfo, CurScope);
- assert(MethodTyInfo && "no type from lambda-declarator");
- EndLoc = ParamInfo.getSourceRange().getEnd();
+ bool HasExplicitObjectParameter =
+ ParamInfo.isExplicitObjectMemberFunction();
ExplicitResultType = FTI.hasTrailingReturnType();
-
- if (FTIHasNonVoidParameters(FTI)) {
- Params.reserve(FTI.NumParams);
- for (unsigned i = 0, e = FTI.NumParams; i != e; ++i)
- Params.push_back(cast<ParmVarDecl>(FTI.Params[i].Param));
+ if (!FTI.hasMutableQualifier() && !IsLambdaStatic &&
+ !HasExplicitObjectParameter)
+ FTI.getOrCreateMethodQualifiers().SetTypeQual(DeclSpec::TQ_const, Loc);
+
+ if (ExplicitResultType && S.getLangOpts().HLSL) {
+ QualType RetTy = FTI.getTrailingReturnType().get();
+ if (!RetTy.isNull()) {
+ // HLSL does not support specifying an address space on a lambda return
+ // type.
+ LangAS AddressSpace = RetTy.getAddressSpace();
+ if (AddressSpace != LangAS::Default)
+ S.Diag(FTI.getTrailingReturnTypeLoc(),
+ diag::err_return_value_with_address_space);
+ }
}
+ MethodTyInfo = S.GetTypeForDeclarator(ParamInfo);
+ assert(MethodTyInfo && "no type from lambda-declarator");
+
// Check for unexpanded parameter packs in the method type.
if (MethodTyInfo->getType()->containsUnexpandedParameterPack())
- DiagnoseUnexpandedParameterPack(Intro.Range.getBegin(), MethodTyInfo,
- UPPC_DeclarationType);
+ S.DiagnoseUnexpandedParameterPack(Intro.Range.getBegin(), MethodTyInfo,
+ S.UPPC_DeclarationType);
}
+ return MethodTyInfo;
+}
- CXXRecordDecl *Class = createLambdaClosureType(Intro.Range, MethodTyInfo,
- KnownDependent, Intro.Default);
- CXXMethodDecl *Method =
- startLambdaDefinition(Class, Intro.Range, MethodTyInfo, EndLoc, Params,
- ParamInfo.getDeclSpec().getConstexprSpecifier(),
- ParamInfo.getTrailingRequiresClause());
- if (ExplicitParams)
- CheckCXXDefaultArguments(Method);
+CXXMethodDecl *Sema::CreateLambdaCallOperator(SourceRange IntroducerRange,
+ CXXRecordDecl *Class) {
+
+ // C++20 [expr.prim.lambda.closure]p3:
+ // The closure type for a lambda-expression has a public inline function
+ // call operator (for a non-generic lambda) or function call operator
+ // template (for a generic lambda) whose parameters and return type are
+ // described by the lambda-expression's parameter-declaration-clause
+ // and trailing-return-type respectively.
+ DeclarationName MethodName =
+ Context.DeclarationNames.getCXXOperatorName(OO_Call);
+ DeclarationNameLoc MethodNameLoc =
+ DeclarationNameLoc::makeCXXOperatorNameLoc(IntroducerRange.getBegin());
+ CXXMethodDecl *Method = CXXMethodDecl::Create(
+ Context, Class, SourceLocation(),
+ DeclarationNameInfo(MethodName, IntroducerRange.getBegin(),
+ MethodNameLoc),
+ QualType(), /*Tinfo=*/nullptr, SC_None,
+ getCurFPFeatures().isFPConstrained(),
+ /*isInline=*/true, ConstexprSpecKind::Unspecified, SourceLocation(),
+ /*TrailingRequiresClause=*/nullptr);
+ Method->setAccess(AS_public);
+ return Method;
+}
- // This represents the function body for the lambda function, check if we
- // have to apply optnone due to a pragma.
- AddRangeBasedOptnone(Method);
+void Sema::AddTemplateParametersToLambdaCallOperator(
+ CXXMethodDecl *CallOperator, CXXRecordDecl *Class,
+ TemplateParameterList *TemplateParams) {
+ assert(TemplateParams && "no template parameters");
+ FunctionTemplateDecl *TemplateMethod = FunctionTemplateDecl::Create(
+ Context, Class, CallOperator->getLocation(), CallOperator->getDeclName(),
+ TemplateParams, CallOperator);
+ TemplateMethod->setAccess(AS_public);
+ CallOperator->setDescribedFunctionTemplate(TemplateMethod);
+}
- // code_seg attribute on lambda apply to the method.
- if (Attr *A = getImplicitCodeSegOrSectionAttrForFunction(Method, /*IsDefinition=*/true))
- Method->addAttr(A);
+void Sema::CompleteLambdaCallOperator(
+ CXXMethodDecl *Method, SourceLocation LambdaLoc,
+ SourceLocation CallOperatorLoc, Expr *TrailingRequiresClause,
+ TypeSourceInfo *MethodTyInfo, ConstexprSpecKind ConstexprKind,
+ StorageClass SC, ArrayRef<ParmVarDecl *> Params,
+ bool HasExplicitResultType) {
- // Attributes on the lambda apply to the method.
- ProcessDeclAttributes(CurScope, Method, ParamInfo);
+ LambdaScopeInfo *LSI = getCurrentLambdaScopeUnsafe(*this);
- // CUDA lambdas get implicit host and device attributes.
- if (getLangOpts().CUDA)
- CUDASetLambdaAttrs(Method);
+ if (TrailingRequiresClause)
+ Method->setTrailingRequiresClause(TrailingRequiresClause);
- // OpenMP lambdas might get assumumption attributes.
- if (LangOpts.OpenMP)
- ActOnFinishedFunctionDefinitionInOpenMPAssumeScope(Method);
+ TemplateParameterList *TemplateParams =
+ getGenericLambdaTemplateParameterList(LSI, *this);
+
+ DeclContext *DC = Method->getLexicalDeclContext();
+ Method->setLexicalDeclContext(LSI->Lambda);
+ if (TemplateParams) {
+ FunctionTemplateDecl *TemplateMethod =
+ Method->getDescribedFunctionTemplate();
+ assert(TemplateMethod &&
+ "AddTemplateParametersToLambdaCallOperator should have been called");
+
+ LSI->Lambda->addDecl(TemplateMethod);
+ TemplateMethod->setLexicalDeclContext(DC);
+ } else {
+ LSI->Lambda->addDecl(Method);
+ }
+ LSI->Lambda->setLambdaIsGeneric(TemplateParams);
+ LSI->Lambda->setLambdaTypeInfo(MethodTyInfo);
+
+ Method->setLexicalDeclContext(DC);
+ Method->setLocation(LambdaLoc);
+ Method->setInnerLocStart(CallOperatorLoc);
+ Method->setTypeSourceInfo(MethodTyInfo);
+ Method->setType(buildTypeForLambdaCallOperator(*this, LSI->Lambda,
+ TemplateParams, MethodTyInfo));
+ Method->setConstexprKind(ConstexprKind);
+ Method->setStorageClass(SC);
+ if (!Params.empty()) {
+ CheckParmsForFunctionDef(Params, /*CheckParameterNames=*/false);
+ Method->setParams(Params);
+ for (auto P : Method->parameters()) {
+ assert(P && "null in a parameter list");
+ P->setOwningFunction(Method);
+ }
+ }
- // Number the lambda for linkage purposes if necessary.
- handleLambdaNumbering(Class, Method);
+ buildLambdaScopeReturnType(*this, LSI, Method, HasExplicitResultType);
+}
- // Introduce the function call operator as the current declaration context.
- PushDeclContext(CurScope, Method);
+void Sema::ActOnLambdaExpressionAfterIntroducer(LambdaIntroducer &Intro,
+ Scope *CurrentScope) {
- // Build the lambda scope.
- buildLambdaScope(LSI, Method, Intro.Range, Intro.Default, Intro.DefaultLoc,
- ExplicitParams, ExplicitResultType, !Method->isConst());
+ LambdaScopeInfo *LSI = getCurLambda();
+ assert(LSI && "LambdaScopeInfo should be on stack!");
- // C++11 [expr.prim.lambda]p9:
- // A lambda-expression whose smallest enclosing scope is a block scope is a
- // local lambda expression; any other lambda expression shall not have a
- // capture-default or simple-capture in its lambda-introducer.
- //
- // For simple-captures, this is covered by the check below that any named
- // entity is a variable that can be captured.
- //
- // For DR1632, we also allow a capture-default in any context where we can
- // odr-use 'this' (in particular, in a default initializer for a non-static
- // data member).
- if (Intro.Default != LCD_None && !Class->getParent()->isFunctionOrMethod() &&
- (getCurrentThisType().isNull() ||
- CheckCXXThisCapture(SourceLocation(), /*Explicit*/true,
- /*BuildAndDiagnose*/false)))
- Diag(Intro.DefaultLoc, diag::err_capture_default_non_local);
+ if (Intro.Default == LCD_ByCopy)
+ LSI->ImpCaptureStyle = LambdaScopeInfo::ImpCap_LambdaByval;
+ else if (Intro.Default == LCD_ByRef)
+ LSI->ImpCaptureStyle = LambdaScopeInfo::ImpCap_LambdaByref;
+ LSI->CaptureDefaultLoc = Intro.DefaultLoc;
+ LSI->IntroducerRange = Intro.Range;
+ LSI->AfterParameterList = false;
+
+ assert(LSI->NumExplicitTemplateParams == 0);
+
+ // Determine if we're within a context where we know that the lambda will
+ // be dependent, because there are template parameters in scope.
+ CXXRecordDecl::LambdaDependencyKind LambdaDependencyKind =
+ CXXRecordDecl::LDK_Unknown;
+ if (LSI->NumExplicitTemplateParams > 0) {
+ Scope *TemplateParamScope = CurScope->getTemplateParamParent();
+ assert(TemplateParamScope &&
+ "Lambda with explicit template param list should establish a "
+ "template param scope");
+ assert(TemplateParamScope->getParent());
+ if (TemplateParamScope->getParent()->getTemplateParamParent() != nullptr)
+ LambdaDependencyKind = CXXRecordDecl::LDK_AlwaysDependent;
+ } else if (CurScope->getTemplateParamParent() != nullptr) {
+ LambdaDependencyKind = CXXRecordDecl::LDK_AlwaysDependent;
+ }
+
+ CXXRecordDecl *Class = createLambdaClosureType(
+ Intro.Range, /*Info=*/nullptr, LambdaDependencyKind, Intro.Default);
+ LSI->Lambda = Class;
+
+ CXXMethodDecl *Method = CreateLambdaCallOperator(Intro.Range, Class);
+ LSI->CallOperator = Method;
+ Method->setLexicalDeclContext(CurContext);
+
+ PushDeclContext(CurScope, Method);
+
+ bool ContainsUnexpandedParameterPack = false;
// Distinct capture names, for diagnostics.
- llvm::SmallSet<IdentifierInfo*, 8> CaptureNames;
+ llvm::DenseMap<IdentifierInfo *, ValueDecl *> CaptureNames;
// Handle explicit captures.
- SourceLocation PrevCaptureLoc
- = Intro.Default == LCD_None? Intro.Range.getBegin() : Intro.DefaultLoc;
+ SourceLocation PrevCaptureLoc =
+ Intro.Default == LCD_None ? Intro.Range.getBegin() : Intro.DefaultLoc;
for (auto C = Intro.Captures.begin(), E = Intro.Captures.end(); C != E;
PrevCaptureLoc = C->Loc, ++C) {
if (C->Kind == LCK_This || C->Kind == LCK_StarThis) {
if (C->Kind == LCK_StarThis)
Diag(C->Loc, !getLangOpts().CPlusPlus17
- ? diag::ext_star_this_lambda_capture_cxx17
- : diag::warn_cxx14_compat_star_this_lambda_capture);
+ ? diag::ext_star_this_lambda_capture_cxx17
+ : diag::warn_cxx14_compat_star_this_lambda_capture);
// C++11 [expr.prim.lambda]p8:
// An identifier or this shall not appear more than once in a
@@ -1057,7 +1083,7 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
continue;
}
- // C++2a [expr.prim.lambda]p8:
+ // C++20 [expr.prim.lambda]p8:
// If a lambda-capture includes a capture-default that is =,
// each simple-capture of that lambda-capture shall be of the form
// "&identifier", "this", or "* this". [ Note: The form [&,this] is
@@ -1089,7 +1115,7 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
if (C->Init.isInvalid())
continue;
- VarDecl *Var = nullptr;
+ ValueDecl *Var = nullptr;
if (C->Init.isUsable()) {
Diag(C->Loc, getLangOpts().CPlusPlus14
? diag::warn_cxx11_compat_init_capture
@@ -1123,13 +1149,11 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
}
Var = createLambdaInitCaptureVarDecl(C->Loc, C->InitCaptureType.get(),
C->EllipsisLoc, C->Id, InitStyle,
- C->Init.get());
- // C++1y [expr.prim.lambda]p11:
- // An init-capture behaves as if it declares and explicitly
- // captures a variable [...] whose declarative region is the
- // lambda-expression's compound-statement
- if (Var)
- PushOnScopeChains(Var, CurScope, false);
+ C->Init.get(), Method);
+ assert(Var && "createLambdaInitCaptureVarDecl returned a null VarDecl?");
+ if (auto *V = dyn_cast<VarDecl>(Var))
+ CheckShadow(CurrentScope, V);
+ PushOnScopeChains(Var, CurrentScope, false);
} else {
assert(C->InitKind == LambdaCaptureInitKind::NoInit &&
"init capture has valid but null init?");
@@ -1167,41 +1191,52 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
continue;
}
- Var = R.getAsSingle<VarDecl>();
+ if (auto *BD = R.getAsSingle<BindingDecl>())
+ Var = BD;
+ else
+ Var = R.getAsSingle<VarDecl>();
if (Var && DiagnoseUseOfDecl(Var, C->Loc))
continue;
}
+ // C++11 [expr.prim.lambda]p10:
+ // [...] each such lookup shall find a variable with automatic storage
+ // duration declared in the reaching scope of the local lambda expression.
+ // Note that the 'reaching scope' check happens in tryCaptureVariable().
+ if (!Var) {
+ Diag(C->Loc, diag::err_capture_does_not_name_variable) << C->Id;
+ continue;
+ }
+
// C++11 [expr.prim.lambda]p8:
// An identifier or this shall not appear more than once in a
// lambda-capture.
- if (!CaptureNames.insert(C->Id).second) {
- if (Var && LSI->isCaptured(Var)) {
+ if (auto [It, Inserted] = CaptureNames.insert(std::pair{C->Id, Var});
+ !Inserted) {
+ if (C->InitKind == LambdaCaptureInitKind::NoInit &&
+ !Var->isInitCapture()) {
Diag(C->Loc, diag::err_capture_more_than_once)
- << C->Id << SourceRange(LSI->getCapture(Var).getLocation())
+ << C->Id << It->second->getBeginLoc()
<< FixItHint::CreateRemoval(
SourceRange(getLocForEndOfToken(PrevCaptureLoc), C->Loc));
- } else
+ Var->setInvalidDecl();
+ } else if (Var && Var->isPlaceholderVar(getLangOpts())) {
+ DiagPlaceholderVariableDefinition(C->Loc);
+ } else {
// Previous capture captured something different (one or both was
- // an init-cpature): no fixit.
+ // an init-capture): no fixit.
Diag(C->Loc, diag::err_capture_more_than_once) << C->Id;
- continue;
- }
-
- // C++11 [expr.prim.lambda]p10:
- // [...] each such lookup shall find a variable with automatic storage
- // duration declared in the reaching scope of the local lambda expression.
- // Note that the 'reaching scope' check happens in tryCaptureVariable().
- if (!Var) {
- Diag(C->Loc, diag::err_capture_does_not_name_variable) << C->Id;
- continue;
+ continue;
+ }
}
// Ignore invalid decls; they'll just confuse the code later.
if (Var->isInvalidDecl())
continue;
- if (!Var->hasLocalStorage()) {
+ VarDecl *Underlying = Var->getPotentiallyDecomposedVarDecl();
+
+ if (!Underlying->hasLocalStorage()) {
Diag(C->Loc, diag::err_capture_non_automatic_variable) << C->Id;
Diag(Var->getLocation(), diag::note_previous_decl) << C->Id;
continue;
@@ -1225,28 +1260,237 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
}
if (C->Init.isUsable()) {
- addInitCapture(LSI, Var);
+ addInitCapture(LSI, cast<VarDecl>(Var), C->Kind == LCK_ByRef);
+ PushOnScopeChains(Var, CurScope, false);
} else {
- TryCaptureKind Kind = C->Kind == LCK_ByRef ? TryCapture_ExplicitByRef :
- TryCapture_ExplicitByVal;
+ TryCaptureKind Kind = C->Kind == LCK_ByRef ? TryCapture_ExplicitByRef
+ : TryCapture_ExplicitByVal;
tryCaptureVariable(Var, C->Loc, Kind, EllipsisLoc);
}
if (!LSI->Captures.empty())
LSI->ExplicitCaptureRanges[LSI->Captures.size() - 1] = C->ExplicitRange;
}
finishLambdaExplicitCaptures(LSI);
-
LSI->ContainsUnexpandedParameterPack |= ContainsUnexpandedParameterPack;
+ PopDeclContext();
+}
+
+void Sema::ActOnLambdaClosureQualifiers(LambdaIntroducer &Intro,
+ SourceLocation MutableLoc) {
+
+ LambdaScopeInfo *LSI = getCurrentLambdaScopeUnsafe(*this);
+ LSI->Mutable = MutableLoc.isValid();
+ ContextRAII Context(*this, LSI->CallOperator, /*NewThisContext*/ false);
+
+ // C++11 [expr.prim.lambda]p9:
+ // A lambda-expression whose smallest enclosing scope is a block scope is a
+ // local lambda expression; any other lambda expression shall not have a
+ // capture-default or simple-capture in its lambda-introducer.
+ //
+ // For simple-captures, this is covered by the check below that any named
+ // entity is a variable that can be captured.
+ //
+ // For DR1632, we also allow a capture-default in any context where we can
+ // odr-use 'this' (in particular, in a default initializer for a non-static
+ // data member).
+ if (Intro.Default != LCD_None &&
+ !LSI->Lambda->getParent()->isFunctionOrMethod() &&
+ (getCurrentThisType().isNull() ||
+ CheckCXXThisCapture(SourceLocation(), /*Explicit=*/true,
+ /*BuildAndDiagnose=*/false)))
+ Diag(Intro.DefaultLoc, diag::err_capture_default_non_local);
+}
+
+void Sema::ActOnLambdaClosureParameters(
+ Scope *LambdaScope, MutableArrayRef<DeclaratorChunk::ParamInfo> Params) {
+ LambdaScopeInfo *LSI = getCurrentLambdaScopeUnsafe(*this);
+ PushDeclContext(LambdaScope, LSI->CallOperator);
+
+ for (const DeclaratorChunk::ParamInfo &P : Params) {
+ auto *Param = cast<ParmVarDecl>(P.Param);
+ Param->setOwningFunction(LSI->CallOperator);
+ if (Param->getIdentifier())
+ PushOnScopeChains(Param, LambdaScope, false);
+ }
+
+ // After the parameter list, we may parse a noexcept/requires/trailing return
+ // type which need to know whether the call operator constiture a dependent
+ // context, so we need to setup the FunctionTemplateDecl of generic lambdas
+ // now.
+ TemplateParameterList *TemplateParams =
+ getGenericLambdaTemplateParameterList(LSI, *this);
+ if (TemplateParams) {
+ AddTemplateParametersToLambdaCallOperator(LSI->CallOperator, LSI->Lambda,
+ TemplateParams);
+ LSI->Lambda->setLambdaIsGeneric(true);
+ }
+ LSI->AfterParameterList = true;
+}
+
+void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
+ Declarator &ParamInfo,
+ const DeclSpec &DS) {
+
+ LambdaScopeInfo *LSI = getCurrentLambdaScopeUnsafe(*this);
+ LSI->CallOperator->setConstexprKind(DS.getConstexprSpecifier());
+
+ SmallVector<ParmVarDecl *, 8> Params;
+ bool ExplicitResultType;
+
+ SourceLocation TypeLoc, CallOperatorLoc;
+ if (ParamInfo.getNumTypeObjects() == 0) {
+ CallOperatorLoc = TypeLoc = Intro.Range.getEnd();
+ } else {
+ unsigned Index;
+ ParamInfo.isFunctionDeclarator(Index);
+ const auto &Object = ParamInfo.getTypeObject(Index);
+ TypeLoc =
+ Object.Loc.isValid() ? Object.Loc : ParamInfo.getSourceRange().getEnd();
+ CallOperatorLoc = ParamInfo.getSourceRange().getEnd();
+ }
+
+ CXXRecordDecl *Class = LSI->Lambda;
+ CXXMethodDecl *Method = LSI->CallOperator;
+
+ TypeSourceInfo *MethodTyInfo = getLambdaType(
+ *this, Intro, ParamInfo, getCurScope(), TypeLoc, ExplicitResultType);
+
+ LSI->ExplicitParams = ParamInfo.getNumTypeObjects() != 0;
+
+ if (ParamInfo.isFunctionDeclarator() != 0 &&
+ !FTIHasSingleVoidParameter(ParamInfo.getFunctionTypeInfo())) {
+ const auto &FTI = ParamInfo.getFunctionTypeInfo();
+ Params.reserve(Params.size());
+ for (unsigned I = 0; I < FTI.NumParams; ++I) {
+ auto *Param = cast<ParmVarDecl>(FTI.Params[I].Param);
+ Param->setScopeInfo(0, Params.size());
+ Params.push_back(Param);
+ }
+ }
+
+ bool IsLambdaStatic =
+ ParamInfo.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_static;
+
+ CompleteLambdaCallOperator(
+ Method, Intro.Range.getBegin(), CallOperatorLoc,
+ ParamInfo.getTrailingRequiresClause(), MethodTyInfo,
+ ParamInfo.getDeclSpec().getConstexprSpecifier(),
+ IsLambdaStatic ? SC_Static : SC_None, Params, ExplicitResultType);
+
+ CheckCXXDefaultArguments(Method);
- // Add lambda parameters into scope.
- addLambdaParameters(Intro.Captures, Method, CurScope);
+ // This represents the function body for the lambda function, check if we
+ // have to apply optnone due to a pragma.
+ AddRangeBasedOptnone(Method);
+
+ // code_seg attribute on lambda apply to the method.
+ if (Attr *A = getImplicitCodeSegOrSectionAttrForFunction(
+ Method, /*IsDefinition=*/true))
+ Method->addAttr(A);
+
+ // Attributes on the lambda apply to the method.
+ ProcessDeclAttributes(CurScope, Method, ParamInfo);
+
+ // CUDA lambdas get implicit host and device attributes.
+ if (getLangOpts().CUDA)
+ CUDASetLambdaAttrs(Method);
+
+ // OpenMP lambdas might get assumumption attributes.
+ if (LangOpts.OpenMP)
+ ActOnFinishedFunctionDefinitionInOpenMPAssumeScope(Method);
+
+ handleLambdaNumbering(Class, Method);
+
+ for (auto &&C : LSI->Captures) {
+ if (!C.isVariableCapture())
+ continue;
+ ValueDecl *Var = C.getVariable();
+ if (Var && Var->isInitCapture()) {
+ PushOnScopeChains(Var, CurScope, false);
+ }
+ }
+
+ auto CheckRedefinition = [&](ParmVarDecl *Param) {
+ for (const auto &Capture : Intro.Captures) {
+ if (Capture.Id == Param->getIdentifier()) {
+ Diag(Param->getLocation(), diag::err_parameter_shadow_capture);
+ Diag(Capture.Loc, diag::note_var_explicitly_captured_here)
+ << Capture.Id << true;
+ return false;
+ }
+ }
+ return true;
+ };
+
+ for (ParmVarDecl *P : Params) {
+ if (!P->getIdentifier())
+ continue;
+ if (CheckRedefinition(P))
+ CheckShadow(CurScope, P);
+ PushOnScopeChains(P, CurScope);
+ }
+
+ // C++23 [expr.prim.lambda.capture]p5:
+ // If an identifier in a capture appears as the declarator-id of a parameter
+ // of the lambda-declarator's parameter-declaration-clause or as the name of a
+ // template parameter of the lambda-expression's template-parameter-list, the
+ // program is ill-formed.
+ TemplateParameterList *TemplateParams =
+ getGenericLambdaTemplateParameterList(LSI, *this);
+ if (TemplateParams) {
+ for (const auto *TP : TemplateParams->asArray()) {
+ if (!TP->getIdentifier())
+ continue;
+ for (const auto &Capture : Intro.Captures) {
+ if (Capture.Id == TP->getIdentifier()) {
+ Diag(Capture.Loc, diag::err_template_param_shadow) << Capture.Id;
+ NoteTemplateParameterLocation(*TP);
+ }
+ }
+ }
+ }
+
+ // C++20: dcl.decl.general p4:
+ // The optional requires-clause ([temp.pre]) in an init-declarator or
+ // member-declarator shall be present only if the declarator declares a
+ // templated function ([dcl.fct]).
+ if (Expr *TRC = Method->getTrailingRequiresClause()) {
+ // [temp.pre]/8:
+ // An entity is templated if it is
+ // - a template,
+ // - an entity defined ([basic.def]) or created ([class.temporary]) in a
+ // templated entity,
+ // - a member of a templated entity,
+ // - an enumerator for an enumeration that is a templated entity, or
+ // - the closure type of a lambda-expression ([expr.prim.lambda.closure])
+ // appearing in the declaration of a templated entity. [Note 6: A local
+ // class, a local or block variable, or a friend function defined in a
+ // templated entity is a templated entity. — end note]
+ //
+ // A templated function is a function template or a function that is
+ // templated. A templated class is a class template or a class that is
+ // templated. A templated variable is a variable template or a variable
+ // that is templated.
+
+ // Note: we only have to check if this is defined in a template entity, OR
+ // if we are a template, since the rest don't apply. The requires clause
+ // applies to the call operator, which we already know is a member function,
+ // AND defined.
+ if (!Method->getDescribedFunctionTemplate() && !Method->isTemplated()) {
+ Diag(TRC->getBeginLoc(), diag::err_constrained_non_templated_function);
+ }
+ }
// Enter a new evaluation context to insulate the lambda from any
// cleanups from the enclosing full-expression.
PushExpressionEvaluationContext(
LSI->CallOperator->isConsteval()
- ? ExpressionEvaluationContext::ConstantEvaluated
+ ? ExpressionEvaluationContext::ImmediateFunctionContext
: ExpressionEvaluationContext::PotentiallyEvaluated);
+ ExprEvalContexts.back().InImmediateFunctionContext =
+ LSI->CallOperator->isConsteval();
+ ExprEvalContexts.back().InImmediateEscalatingFunctionContext =
+ getLangOpts().CPlusPlus20 && LSI->CallOperator->isImmediateEscalating();
}
void Sema::ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
@@ -1375,7 +1619,7 @@ static void addFunctionPointerConversion(Sema &S, SourceRange IntroducerRange,
ConvExtInfo.TypeQuals.addConst();
ConvExtInfo.ExceptionSpec.Type = EST_BasicNoexcept;
QualType ConvTy =
- S.Context.getFunctionType(PtrToFunctionTy, None, ConvExtInfo);
+ S.Context.getFunctionType(PtrToFunctionTy, std::nullopt, ConvExtInfo);
SourceLocation Loc = IntroducerRange.getBegin();
DeclarationName ConversionName
@@ -1447,6 +1691,7 @@ static void addFunctionPointerConversion(Sema &S, SourceRange IntroducerRange,
CXXConversionDecl *Conversion = CXXConversionDecl::Create(
S.Context, Class, Loc,
DeclarationNameInfo(ConversionName, Loc, ConvNameLoc), ConvTy, ConvTSI,
+ S.getCurFPFeatures().isFPConstrained(),
/*isInline=*/true, ExplicitSpecifier(),
S.getLangOpts().CPlusPlus17 ? ConstexprSpecKind::Constexpr
: ConstexprSpecKind::Unspecified,
@@ -1454,6 +1699,11 @@ static void addFunctionPointerConversion(Sema &S, SourceRange IntroducerRange,
Conversion->setAccess(AS_public);
Conversion->setImplicit(true);
+ // A non-generic lambda may still be a templated entity. We need to preserve
+ // constraints when converting the lambda to a function pointer. See GH63181.
+ if (Expr *Requires = CallOperator->getTrailingRequiresClause())
+ Conversion->setTrailingRequiresClause(Requires);
+
if (Class->isGenericLambda()) {
// Create a template version of the conversion operator, using the template
// parameter list of the function call operator.
@@ -1470,44 +1720,50 @@ static void addFunctionPointerConversion(Sema &S, SourceRange IntroducerRange,
Class->addDecl(ConversionTemplate);
} else
Class->addDecl(Conversion);
- // Add a non-static member function that will be the result of
- // the conversion with a certain unique ID.
- DeclarationName InvokerName = &S.Context.Idents.get(
- getLambdaStaticInvokerName());
- // FIXME: Instead of passing in the CallOperator->getTypeSourceInfo()
- // we should get a prebuilt TrivialTypeSourceInfo from Context
- // using FunctionTy & Loc and get its TypeLoc as a FunctionProtoTypeLoc
- // then rewire the parameters accordingly, by hoisting up the InvokeParams
- // loop below and then use its Params to set Invoke->setParams(...) below.
- // This would avoid the 'const' qualifier of the calloperator from
- // contaminating the type of the invoker, which is currently adjusted
- // in SemaTemplateDeduction.cpp:DeduceTemplateArguments. Fixing the
- // trailing return type of the invoker would require a visitor to rebuild
- // the trailing return type and adjusting all back DeclRefExpr's to refer
- // to the new static invoker parameters - not the call operator's.
- CXXMethodDecl *Invoke = CXXMethodDecl::Create(
- S.Context, Class, Loc, DeclarationNameInfo(InvokerName, Loc),
- InvokerFunctionTy, CallOperator->getTypeSourceInfo(), SC_Static,
- /*isInline=*/true, ConstexprSpecKind::Unspecified,
- CallOperator->getBody()->getEndLoc());
- for (unsigned I = 0, N = CallOperator->getNumParams(); I != N; ++I)
- InvokerParams[I]->setOwningFunction(Invoke);
- Invoke->setParams(InvokerParams);
- Invoke->setAccess(AS_private);
- Invoke->setImplicit(true);
- if (Class->isGenericLambda()) {
- FunctionTemplateDecl *TemplateCallOperator =
- CallOperator->getDescribedFunctionTemplate();
- FunctionTemplateDecl *StaticInvokerTemplate = FunctionTemplateDecl::Create(
- S.Context, Class, Loc, InvokerName,
- TemplateCallOperator->getTemplateParameters(),
- Invoke);
- StaticInvokerTemplate->setAccess(AS_private);
- StaticInvokerTemplate->setImplicit(true);
- Invoke->setDescribedFunctionTemplate(StaticInvokerTemplate);
- Class->addDecl(StaticInvokerTemplate);
- } else
- Class->addDecl(Invoke);
+
+ // If the lambda is not static, we need to add a static member
+ // function that will be the result of the conversion with a
+ // certain unique ID.
+ // When it is static we just return the static call operator instead.
+ if (CallOperator->isImplicitObjectMemberFunction()) {
+ DeclarationName InvokerName =
+ &S.Context.Idents.get(getLambdaStaticInvokerName());
+ // FIXME: Instead of passing in the CallOperator->getTypeSourceInfo()
+ // we should get a prebuilt TrivialTypeSourceInfo from Context
+ // using FunctionTy & Loc and get its TypeLoc as a FunctionProtoTypeLoc
+ // then rewire the parameters accordingly, by hoisting up the InvokeParams
+ // loop below and then use its Params to set Invoke->setParams(...) below.
+ // This would avoid the 'const' qualifier of the calloperator from
+ // contaminating the type of the invoker, which is currently adjusted
+ // in SemaTemplateDeduction.cpp:DeduceTemplateArguments. Fixing the
+ // trailing return type of the invoker would require a visitor to rebuild
+ // the trailing return type and adjusting all back DeclRefExpr's to refer
+ // to the new static invoker parameters - not the call operator's.
+ CXXMethodDecl *Invoke = CXXMethodDecl::Create(
+ S.Context, Class, Loc, DeclarationNameInfo(InvokerName, Loc),
+ InvokerFunctionTy, CallOperator->getTypeSourceInfo(), SC_Static,
+ S.getCurFPFeatures().isFPConstrained(),
+ /*isInline=*/true, CallOperator->getConstexprKind(),
+ CallOperator->getBody()->getEndLoc());
+ for (unsigned I = 0, N = CallOperator->getNumParams(); I != N; ++I)
+ InvokerParams[I]->setOwningFunction(Invoke);
+ Invoke->setParams(InvokerParams);
+ Invoke->setAccess(AS_private);
+ Invoke->setImplicit(true);
+ if (Class->isGenericLambda()) {
+ FunctionTemplateDecl *TemplateCallOperator =
+ CallOperator->getDescribedFunctionTemplate();
+ FunctionTemplateDecl *StaticInvokerTemplate =
+ FunctionTemplateDecl::Create(
+ S.Context, Class, Loc, InvokerName,
+ TemplateCallOperator->getTemplateParameters(), Invoke);
+ StaticInvokerTemplate->setAccess(AS_private);
+ StaticInvokerTemplate->setImplicit(true);
+ Invoke->setDescribedFunctionTemplate(StaticInvokerTemplate);
+ Class->addDecl(StaticInvokerTemplate);
+ } else
+ Class->addDecl(Invoke);
+ }
}
/// Add a lambda's conversion to function pointers, as described in
@@ -1545,7 +1801,8 @@ static void addBlockPointerConversion(Sema &S,
/*IsVariadic=*/false, /*IsCXXMethod=*/true));
ConversionEPI.TypeQuals = Qualifiers();
ConversionEPI.TypeQuals.addConst();
- QualType ConvTy = S.Context.getFunctionType(BlockPtrTy, None, ConversionEPI);
+ QualType ConvTy =
+ S.Context.getFunctionType(BlockPtrTy, std::nullopt, ConversionEPI);
SourceLocation Loc = IntroducerRange.getBegin();
DeclarationName Name
@@ -1556,6 +1813,7 @@ static void addBlockPointerConversion(Sema &S,
CXXConversionDecl *Conversion = CXXConversionDecl::Create(
S.Context, Class, Loc, DeclarationNameInfo(Name, Loc, NameLoc), ConvTy,
S.Context.getTrivialTypeSourceInfo(ConvTy, Loc),
+ S.getCurFPFeatures().isFPConstrained(),
/*isInline=*/true, ExplicitSpecifier(), ConstexprSpecKind::Unspecified,
CallOperator->getBody()->getEndLoc());
Conversion->setAccess(AS_public);
@@ -1572,7 +1830,7 @@ ExprResult Sema::BuildCaptureInit(const Capture &Cap,
// An init-capture is initialized directly from its stored initializer.
if (Cap.isInitCapture())
- return Cap.getVariable()->getInit();
+ return cast<VarDecl>(Cap.getVariable())->getInit();
// For anything else, build an initialization expression. For an implicit
// capture, the capture notionally happens at the capture-default, so use
@@ -1603,7 +1861,7 @@ ExprResult Sema::BuildCaptureInit(const Capture &Cap,
Init = This;
} else {
assert(Cap.isVariableCapture() && "unknown kind of capture");
- VarDecl *Var = Cap.getVariable();
+ ValueDecl *Var = Cap.getVariable();
Name = Var->getIdentifier();
Init = BuildDeclarationNameExpr(
CXXScopeSpec(), DeclarationNameInfo(Var->getDeclName(), Loc), Var);
@@ -1627,8 +1885,7 @@ ExprResult Sema::BuildCaptureInit(const Capture &Cap,
return InitSeq.Perform(*this, Entity, InitKind, InitExpr);
}
-ExprResult Sema::ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
- Scope *CurScope) {
+ExprResult Sema::ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body) {
LambdaScopeInfo LSI = *cast<LambdaScopeInfo>(FunctionScopes.back());
ActOnFinishFunctionBody(LSI.CallOperator, Body);
return BuildLambdaExpr(StartLoc, Body->getEndLoc(), &LSI);
@@ -1652,7 +1909,7 @@ mapImplicitCaptureStyle(CapturingScopeInfo::ImplicitCaptureStyle ICS) {
bool Sema::CaptureHasSideEffects(const Capture &From) {
if (From.isInitCapture()) {
- Expr *Init = From.getVariable()->getInit();
+ Expr *Init = cast<VarDecl>(From.getVariable())->getInit();
if (Init && Init->HasSideEffects(Context))
return true;
}
@@ -1683,6 +1940,12 @@ bool Sema::DiagnoseUnusedLambdaCapture(SourceRange CaptureRange,
if (From.isVLATypeCapture())
return false;
+ // FIXME: maybe we should warn on these if we can find a sensible diagnostic
+ // message
+ if (From.isInitCapture() &&
+ From.getVariable()->isPlaceholderVar(getLangOpts()))
+ return false;
+
auto diag = Diag(From.getLocation(), diag::warn_unused_lambda_capture);
if (From.isThisCapture())
diag << "'this'";
@@ -1702,9 +1965,9 @@ FieldDecl *Sema::BuildCaptureField(RecordDecl *RD,
TypeSourceInfo *TSI = nullptr;
if (Capture.isVariableCapture()) {
- auto *Var = Capture.getVariable();
- if (Var->isInitCapture())
- TSI = Capture.getVariable()->getTypeSourceInfo();
+ const auto *Var = dyn_cast_or_null<VarDecl>(Capture.getVariable());
+ if (Var && Var->isInitCapture())
+ TSI = Var->getTypeSourceInfo();
}
// FIXME: Should we really be doing this? A null TypeSourceInfo seems more
@@ -1852,7 +2115,7 @@ ExprResult Sema::BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
return LambdaCapture(From.getLocation(), IsImplicit, LCK_VLAType);
} else {
assert(From.isVariableCapture() && "unknown kind of capture");
- VarDecl *Var = From.getVariable();
+ ValueDecl *Var = From.getVariable();
LambdaCaptureKind Kind =
From.isCopyCapture() ? LCK_ByCopy : LCK_ByRef;
return LambdaCapture(From.getLocation(), IsImplicit, Kind, Var,
@@ -1945,6 +2208,7 @@ ExprResult Sema::BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
// ratified, it lays out the exact set of conditions where we shouldn't
// allow a lambda-expression.
case ExpressionEvaluationContext::ConstantEvaluated:
+ case ExpressionEvaluationContext::ImmediateFunctionContext:
// We don't actually diagnose this case immediately, because we
// could be within a context where we might find out later that
// the expression is potentially evaluated (e.g., for typeid).
@@ -2029,3 +2293,56 @@ ExprResult Sema::BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
return BuildBlock;
}
+
+static FunctionDecl *getPatternFunctionDecl(FunctionDecl *FD) {
+ if (FD->getTemplatedKind() == FunctionDecl::TK_MemberSpecialization) {
+ while (FD->getInstantiatedFromMemberFunction())
+ FD = FD->getInstantiatedFromMemberFunction();
+ return FD;
+ }
+
+ if (FD->getTemplatedKind() == FunctionDecl::TK_DependentNonTemplate)
+ return FD->getInstantiatedFromDecl();
+
+ FunctionTemplateDecl *FTD = FD->getPrimaryTemplate();
+ if (!FTD)
+ return nullptr;
+
+ while (FTD->getInstantiatedFromMemberTemplate())
+ FTD = FTD->getInstantiatedFromMemberTemplate();
+
+ return FTD->getTemplatedDecl();
+}
+
+Sema::LambdaScopeForCallOperatorInstantiationRAII::
+ LambdaScopeForCallOperatorInstantiationRAII(
+ Sema &SemaRef, FunctionDecl *FD, MultiLevelTemplateArgumentList MLTAL,
+ LocalInstantiationScope &Scope, bool ShouldAddDeclsFromParentScope)
+ : FunctionScopeRAII(SemaRef) {
+ if (!isLambdaCallOperator(FD)) {
+ FunctionScopeRAII::disable();
+ return;
+ }
+
+ SemaRef.RebuildLambdaScopeInfo(cast<CXXMethodDecl>(FD));
+
+ FunctionDecl *Pattern = getPatternFunctionDecl(FD);
+ if (Pattern) {
+ SemaRef.addInstantiatedCapturesToScope(FD, Pattern, Scope, MLTAL);
+
+ FunctionDecl *ParentFD = FD;
+ while (ShouldAddDeclsFromParentScope) {
+
+ ParentFD =
+ dyn_cast<FunctionDecl>(getLambdaAwareParentOfDeclContext(ParentFD));
+ Pattern =
+ dyn_cast<FunctionDecl>(getLambdaAwareParentOfDeclContext(Pattern));
+
+ if (!FD || !Pattern)
+ break;
+
+ SemaRef.addInstantiatedParametersToScope(ParentFD, Pattern, Scope, MLTAL);
+ SemaRef.addInstantiatedLocalVarsToScope(ParentFD, Pattern, Scope);
+ }
+ }
+}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaLookup.cpp b/contrib/llvm-project/clang/lib/Sema/SemaLookup.cpp
index 5e8c4de61e5d..02b1a045df44 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaLookup.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaLookup.cpp
@@ -29,6 +29,7 @@
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Overload.h"
+#include "clang/Sema/RISCVIntrinsicManager.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/Sema.h"
@@ -39,10 +40,12 @@
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/ADT/edit_distance.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
#include <algorithm>
#include <iterator>
#include <list>
+#include <optional>
#include <set>
#include <utility>
#include <vector>
@@ -155,7 +158,7 @@ namespace {
void addUsingDirectives(DeclContext *DC, DeclContext *EffectiveDC) {
SmallVector<DeclContext*, 4> queue;
while (true) {
- for (auto UD : DC->using_directives()) {
+ for (auto *UD : DC->using_directives()) {
DeclContext *NS = UD->getNominatedNamespace();
if (SemaRef.isVisible(UD) && visited.insert(NS).second) {
addUsingDirective(UD, EffectiveDC);
@@ -196,7 +199,7 @@ namespace {
const_iterator end() const { return list.end(); }
llvm::iterator_range<const_iterator>
- getNamespacesFor(DeclContext *DC) const {
+ getNamespacesFor(const DeclContext *DC) const {
return llvm::make_range(std::equal_range(begin(), end(),
DC->getPrimaryContext(),
UnqualUsingEntry::Comparator()));
@@ -324,14 +327,14 @@ void LookupResult::configure() {
}
}
-bool LookupResult::sanity() const {
+bool LookupResult::checkDebugAssumptions() const {
// This function is never called by NDEBUG builds.
assert(ResultKind != NotFound || Decls.size() == 0);
assert(ResultKind != Found || Decls.size() == 1);
assert(ResultKind != FoundOverloaded || Decls.size() > 1 ||
(Decls.size() == 1 &&
isa<FunctionTemplateDecl>((*begin())->getUnderlyingDecl())));
- assert(ResultKind != FoundUnresolvedValue || sanityCheckUnresolved());
+ assert(ResultKind != FoundUnresolvedValue || checkUnresolved());
assert(ResultKind != Ambiguous || Decls.size() > 1 ||
(Decls.size() == 1 && (Ambiguity == AmbiguousBaseSubobjects ||
Ambiguity == AmbiguousBaseSubobjectTypes)));
@@ -348,12 +351,12 @@ void LookupResult::deletePaths(CXXBasePaths *Paths) {
/// Get a representative context for a declaration such that two declarations
/// will have the same context if they were found within the same scope.
-static DeclContext *getContextForScopeMatching(Decl *D) {
+static const DeclContext *getContextForScopeMatching(const Decl *D) {
// For function-local declarations, use that function as the context. This
// doesn't account for scopes within the function; the caller must deal with
// those.
- DeclContext *DC = D->getLexicalDeclContext();
- if (DC->isFunctionOrMethod())
+ if (const DeclContext *DC = D->getLexicalDeclContext();
+ DC->isFunctionOrMethod())
return DC;
// Otherwise, look at the semantic context of the declaration. The
@@ -364,15 +367,16 @@ static DeclContext *getContextForScopeMatching(Decl *D) {
/// Determine whether \p D is a better lookup result than \p Existing,
/// given that they declare the same entity.
static bool isPreferredLookupResult(Sema &S, Sema::LookupNameKind Kind,
- NamedDecl *D, NamedDecl *Existing) {
+ const NamedDecl *D,
+ const NamedDecl *Existing) {
// When looking up redeclarations of a using declaration, prefer a using
// shadow declaration over any other declaration of the same entity.
if (Kind == Sema::LookupUsingDeclName && isa<UsingShadowDecl>(D) &&
!isa<UsingShadowDecl>(Existing))
return true;
- auto *DUnderlying = D->getUnderlyingDecl();
- auto *EUnderlying = Existing->getUnderlyingDecl();
+ const auto *DUnderlying = D->getUnderlyingDecl();
+ const auto *EUnderlying = Existing->getUnderlyingDecl();
// If they have different underlying declarations, prefer a typedef over the
// original type (this happens when two type declarations denote the same
@@ -394,8 +398,8 @@ static bool isPreferredLookupResult(Sema &S, Sema::LookupNameKind Kind,
// FIXME: In the presence of ambiguous default arguments, we should keep both,
// so we can diagnose the ambiguity if the default argument is needed.
// See C++ [over.match.best]p3.
- if (auto *DFD = dyn_cast<FunctionDecl>(DUnderlying)) {
- auto *EFD = cast<FunctionDecl>(EUnderlying);
+ if (const auto *DFD = dyn_cast<FunctionDecl>(DUnderlying)) {
+ const auto *EFD = cast<FunctionDecl>(EUnderlying);
unsigned DMin = DFD->getMinRequiredArguments();
unsigned EMin = EFD->getMinRequiredArguments();
// If D has more default arguments, it is preferred.
@@ -406,8 +410,8 @@ static bool isPreferredLookupResult(Sema &S, Sema::LookupNameKind Kind,
}
// Pick the template with more default template arguments.
- if (auto *DTD = dyn_cast<TemplateDecl>(DUnderlying)) {
- auto *ETD = cast<TemplateDecl>(EUnderlying);
+ if (const auto *DTD = dyn_cast<TemplateDecl>(DUnderlying)) {
+ const auto *ETD = cast<TemplateDecl>(EUnderlying);
unsigned DMin = DTD->getTemplateParameters()->getMinRequiredArguments();
unsigned EMin = ETD->getTemplateParameters()->getMinRequiredArguments();
// If D has more default arguments, it is preferred. Note that default
@@ -430,8 +434,8 @@ static bool isPreferredLookupResult(Sema &S, Sema::LookupNameKind Kind,
// VarDecl can have incomplete array types, prefer the one with more complete
// array type.
- if (VarDecl *DVD = dyn_cast<VarDecl>(DUnderlying)) {
- VarDecl *EVD = cast<VarDecl>(EUnderlying);
+ if (const auto *DVD = dyn_cast<VarDecl>(DUnderlying)) {
+ const auto *EVD = cast<VarDecl>(EUnderlying);
if (EVD->getType()->isIncompleteType() &&
!DVD->getType()->isIncompleteType()) {
// Prefer the decl with a more complete type if visible.
@@ -448,7 +452,7 @@ static bool isPreferredLookupResult(Sema &S, Sema::LookupNameKind Kind,
}
// Pick the newer declaration; it might have a more precise type.
- for (Decl *Prev = DUnderlying->getPreviousDecl(); Prev;
+ for (const Decl *Prev = DUnderlying->getPreviousDecl(); Prev;
Prev = Prev->getPreviousDecl())
if (Prev == EUnderlying)
return true;
@@ -456,7 +460,7 @@ static bool isPreferredLookupResult(Sema &S, Sema::LookupNameKind Kind,
}
/// Determine whether \p D can hide a tag declaration.
-static bool canHideTag(NamedDecl *D) {
+static bool canHideTag(const NamedDecl *D) {
// C++ [basic.scope.declarative]p4:
// Given a set of declarations in a single declarative region [...]
// exactly one declaration shall declare a class name or enumeration name
@@ -489,7 +493,7 @@ void LookupResult::resolveKind() {
// If there's a single decl, we need to examine it to decide what
// kind of lookup this is.
if (N == 1) {
- NamedDecl *D = (*Decls.begin())->getUnderlyingDecl();
+ const NamedDecl *D = (*Decls.begin())->getUnderlyingDecl();
if (isa<FunctionTemplateDecl>(D))
ResultKind = FoundOverloaded;
else if (isa<UnresolvedUsingValueDecl>(D))
@@ -500,36 +504,59 @@ void LookupResult::resolveKind() {
// Don't do any extra resolution if we've already resolved as ambiguous.
if (ResultKind == Ambiguous) return;
- llvm::SmallDenseMap<NamedDecl*, unsigned, 16> Unique;
+ llvm::SmallDenseMap<const NamedDecl *, unsigned, 16> Unique;
llvm::SmallDenseMap<QualType, unsigned, 16> UniqueTypes;
bool Ambiguous = false;
+ bool ReferenceToPlaceHolderVariable = false;
bool HasTag = false, HasFunction = false;
bool HasFunctionTemplate = false, HasUnresolved = false;
- NamedDecl *HasNonFunction = nullptr;
-
- llvm::SmallVector<NamedDecl*, 4> EquivalentNonFunctions;
+ const NamedDecl *HasNonFunction = nullptr;
- unsigned UniqueTagIndex = 0;
+ llvm::SmallVector<const NamedDecl *, 4> EquivalentNonFunctions;
+ llvm::BitVector RemovedDecls(N);
- unsigned I = 0;
- while (I < N) {
- NamedDecl *D = Decls[I]->getUnderlyingDecl();
+ for (unsigned I = 0; I < N; I++) {
+ const NamedDecl *D = Decls[I]->getUnderlyingDecl();
D = cast<NamedDecl>(D->getCanonicalDecl());
// Ignore an invalid declaration unless it's the only one left.
- if (D->isInvalidDecl() && !(I == 0 && N == 1)) {
- Decls[I] = Decls[--N];
+ // Also ignore HLSLBufferDecl which not have name conflict with other Decls.
+ if ((D->isInvalidDecl() || isa<HLSLBufferDecl>(D)) &&
+ N - RemovedDecls.count() > 1) {
+ RemovedDecls.set(I);
continue;
}
- llvm::Optional<unsigned> ExistingI;
+ // C++ [basic.scope.hiding]p2:
+ // A class name or enumeration name can be hidden by the name of
+ // an object, function, or enumerator declared in the same
+ // scope. If a class or enumeration name and an object, function,
+ // or enumerator are declared in the same scope (in any order)
+ // with the same name, the class or enumeration name is hidden
+ // wherever the object, function, or enumerator name is visible.
+ if (HideTags && isa<TagDecl>(D)) {
+ bool Hidden = false;
+ for (auto *OtherDecl : Decls) {
+ if (canHideTag(OtherDecl) && !OtherDecl->isInvalidDecl() &&
+ getContextForScopeMatching(OtherDecl)->Equals(
+ getContextForScopeMatching(Decls[I]))) {
+ RemovedDecls.set(I);
+ Hidden = true;
+ break;
+ }
+ }
+ if (Hidden)
+ continue;
+ }
+
+ std::optional<unsigned> ExistingI;
// Redeclarations of types via typedef can occur both within a scope
// and, through using declarations and directives, across scopes. There is
// no ambiguity if they all refer to the same type, so unique based on the
// canonical type.
- if (TypeDecl *TD = dyn_cast<TypeDecl>(D)) {
+ if (const auto *TD = dyn_cast<TypeDecl>(D)) {
QualType T = getSema().Context.getTypeDeclType(TD);
auto UniqueResult = UniqueTypes.insert(
std::make_pair(getSema().Context.getCanonicalType(T), I));
@@ -541,7 +568,7 @@ void LookupResult::resolveKind() {
// For non-type declarations, check for a prior lookup result naming this
// canonical declaration.
- if (!ExistingI) {
+ if (!D->isPlaceholderVar(getSema().getLangOpts()) && !ExistingI) {
auto UniqueResult = Unique.insert(std::make_pair(D, I));
if (!UniqueResult.second) {
// We've seen this entity before.
@@ -555,7 +582,7 @@ void LookupResult::resolveKind() {
if (isPreferredLookupResult(getSema(), getLookupKind(), Decls[I],
Decls[*ExistingI]))
Decls[*ExistingI] = Decls[I];
- Decls[I] = Decls[--N];
+ RemovedDecls.set(I);
continue;
}
@@ -566,7 +593,6 @@ void LookupResult::resolveKind() {
} else if (isa<TagDecl>(D)) {
if (HasTag)
Ambiguous = true;
- UniqueTagIndex = I;
HasTag = true;
} else if (isa<FunctionTemplateDecl>(D)) {
HasFunction = true;
@@ -582,36 +608,18 @@ void LookupResult::resolveKind() {
if (getSema().isEquivalentInternalLinkageDeclaration(HasNonFunction,
D)) {
EquivalentNonFunctions.push_back(D);
- Decls[I] = Decls[--N];
+ RemovedDecls.set(I);
continue;
}
-
+ if (D->isPlaceholderVar(getSema().getLangOpts()) &&
+ getContextForScopeMatching(D) ==
+ getContextForScopeMatching(Decls[I])) {
+ ReferenceToPlaceHolderVariable = true;
+ }
Ambiguous = true;
}
HasNonFunction = D;
}
- I++;
- }
-
- // C++ [basic.scope.hiding]p2:
- // A class name or enumeration name can be hidden by the name of
- // an object, function, or enumerator declared in the same
- // scope. If a class or enumeration name and an object, function,
- // or enumerator are declared in the same scope (in any order)
- // with the same name, the class or enumeration name is hidden
- // wherever the object, function, or enumerator name is visible.
- // But it's still an error if there are distinct tag types found,
- // even if they're not visible. (ref?)
- if (N > 1 && HideTags && HasTag && !Ambiguous &&
- (HasFunction || HasNonFunction || HasUnresolved)) {
- NamedDecl *OtherDecl = Decls[UniqueTagIndex ? 0 : N - 1];
- if (isa<TagDecl>(Decls[UniqueTagIndex]->getUnderlyingDecl()) &&
- getContextForScopeMatching(Decls[UniqueTagIndex])->Equals(
- getContextForScopeMatching(OtherDecl)) &&
- canHideTag(OtherDecl))
- Decls[UniqueTagIndex] = Decls[--N];
- else
- Ambiguous = true;
}
// FIXME: This diagnostic should really be delayed until we're done with
@@ -620,12 +628,20 @@ void LookupResult::resolveKind() {
getSema().diagnoseEquivalentInternalLinkageDeclarations(
getNameLoc(), HasNonFunction, EquivalentNonFunctions);
- Decls.set_size(N);
+ // Remove decls by replacing them with decls from the end (which
+ // means that we need to iterate from the end) and then truncating
+ // to the new size.
+ for (int I = RemovedDecls.find_last(); I >= 0; I = RemovedDecls.find_prev(I))
+ Decls[I] = Decls[--N];
+ Decls.truncate(N);
- if (HasNonFunction && (HasFunction || HasUnresolved))
+ if ((HasNonFunction && (HasFunction || HasUnresolved)) ||
+ (HideTags && HasTag && (HasFunction || HasNonFunction || HasUnresolved)))
Ambiguous = true;
- if (Ambiguous)
+ if (Ambiguous && ReferenceToPlaceHolderVariable)
+ setAmbiguous(LookupResult::AmbiguousReferenceToPlaceholderVariable);
+ else if (Ambiguous)
setAmbiguous(LookupResult::AmbiguousReference);
else if (HasUnresolved)
ResultKind = LookupResult::FoundUnresolvedValue;
@@ -859,7 +875,8 @@ static void InsertOCLBuiltinDeclarationsFromTable(Sema &S, LookupResult &LR,
for (const auto &FTy : FunctionList) {
NewOpenCLBuiltin = FunctionDecl::Create(
Context, Parent, Loc, Loc, II, FTy, /*TInfo=*/nullptr, SC_Extern,
- false, FTy->isFunctionProtoType());
+ S.getCurFPFeatures().isFPConstrained(), false,
+ FTy->isFunctionProtoType());
NewOpenCLBuiltin->setImplicit();
// Create Decl objects for each parameter, adding them to the
@@ -927,6 +944,16 @@ bool Sema::LookupBuiltin(LookupResult &R) {
}
}
+ if (DeclareRISCVVBuiltins || DeclareRISCVSiFiveVectorBuiltins) {
+ if (!RVIntrinsicManager)
+ RVIntrinsicManager = CreateRISCVIntrinsicManager(*this);
+
+ RVIntrinsicManager->InitIntrinsicList();
+
+ if (RVIntrinsicManager->CreateIntrinsicIfFound(R, II, PP))
+ return true;
+ }
+
// If this is a builtin on this (or all) targets, create the decl.
if (unsigned BuiltinID = II->getBuiltinID()) {
// In C++ and OpenCL (spec v1.2 s6.9.f), we don't have any predefined
@@ -1167,9 +1194,8 @@ static bool LookupDirect(Sema &S, LookupResult &R, const DeclContext *DC) {
FunctionProtoType::ExtProtoInfo EPI = ConvProto->getExtProtoInfo();
EPI.ExtInfo = EPI.ExtInfo.withCallingConv(CC_C);
EPI.ExceptionSpec = EST_None;
- QualType ExpectedType
- = R.getSema().Context.getFunctionType(R.getLookupName().getCXXNameType(),
- None, EPI);
+ QualType ExpectedType = R.getSema().Context.getFunctionType(
+ R.getLookupName().getCXXNameType(), std::nullopt, EPI);
// Perform template argument deduction against the type that we would
// expect the function to have.
@@ -1185,9 +1211,9 @@ static bool LookupDirect(Sema &S, LookupResult &R, const DeclContext *DC) {
}
// Performs C++ unqualified lookup into the given file context.
-static bool
-CppNamespaceLookup(Sema &S, LookupResult &R, ASTContext &Context,
- DeclContext *NS, UnqualUsingDirectiveSet &UDirs) {
+static bool CppNamespaceLookup(Sema &S, LookupResult &R, ASTContext &Context,
+ const DeclContext *NS,
+ UnqualUsingDirectiveSet &UDirs) {
assert(NS && NS->isFileContext() && "CppNamespaceLookup() requires namespace!");
@@ -1321,8 +1347,7 @@ bool Sema::CppLookupName(LookupResult &R, Scope *S) {
if (!SearchNamespaceScope) {
R.resolveKind();
if (S->isClassScope())
- if (CXXRecordDecl *Record =
- dyn_cast_or_null<CXXRecordDecl>(S->getEntity()))
+ if (auto *Record = dyn_cast_if_present<CXXRecordDecl>(S->getEntity()))
R.setNamingClass(Record);
return true;
}
@@ -1555,39 +1580,67 @@ llvm::DenseSet<Module*> &Sema::getLookupModules() {
return LookupModulesCache;
}
-/// Determine whether the module M is part of the current module from the
-/// perspective of a module-private visibility check.
-static bool isInCurrentModule(const Module *M, const LangOptions &LangOpts) {
- // If M is the global module fragment of a module that we've not yet finished
- // parsing, then it must be part of the current module.
- return M->getTopLevelModuleName() == LangOpts.CurrentModule ||
- (M->Kind == Module::GlobalModuleFragment && !M->Parent);
+/// Determine if we could use all the declarations in the module.
+bool Sema::isUsableModule(const Module *M) {
+ assert(M && "We shouldn't check nullness for module here");
+ // Return quickly if we cached the result.
+ if (UsableModuleUnitsCache.count(M))
+ return true;
+
+ // If M is the global module fragment of the current translation unit. So it
+ // should be usable.
+ // [module.global.frag]p1:
+ // The global module fragment can be used to provide declarations that are
+ // attached to the global module and usable within the module unit.
+ if (M == TheGlobalModuleFragment || M == TheImplicitGlobalModuleFragment ||
+ // If M is the module we're parsing, it should be usable. This covers the
+ // private module fragment. The private module fragment is usable only if
+ // it is within the current module unit. And it must be the current
+ // parsing module unit if it is within the current module unit according
+ // to the grammar of the private module fragment. NOTE: This is covered by
+ // the following condition. The intention of the check is to avoid string
+ // comparison as much as possible.
+ M == getCurrentModule() ||
+ // The module unit which is in the same module with the current module
+ // unit is usable.
+ //
+ // FIXME: Here we judge if they are in the same module by comparing the
+ // string. Is there any better solution?
+ M->getPrimaryModuleInterfaceName() ==
+ llvm::StringRef(getLangOpts().CurrentModule).split(':').first) {
+ UsableModuleUnitsCache.insert(M);
+ return true;
+ }
+
+ return false;
}
-bool Sema::hasVisibleMergedDefinition(NamedDecl *Def) {
+bool Sema::hasVisibleMergedDefinition(const NamedDecl *Def) {
for (const Module *Merged : Context.getModulesWithMergedDefinition(Def))
if (isModuleVisible(Merged))
return true;
return false;
}
-bool Sema::hasMergedDefinitionInCurrentModule(NamedDecl *Def) {
+bool Sema::hasMergedDefinitionInCurrentModule(const NamedDecl *Def) {
for (const Module *Merged : Context.getModulesWithMergedDefinition(Def))
- if (isInCurrentModule(Merged, getLangOpts()))
+ if (isUsableModule(Merged))
return true;
return false;
}
-template<typename ParmDecl>
+template <typename ParmDecl>
static bool
-hasVisibleDefaultArgument(Sema &S, const ParmDecl *D,
- llvm::SmallVectorImpl<Module *> *Modules) {
+hasAcceptableDefaultArgument(Sema &S, const ParmDecl *D,
+ llvm::SmallVectorImpl<Module *> *Modules,
+ Sema::AcceptableKind Kind) {
if (!D->hasDefaultArgument())
return false;
- while (D) {
+ llvm::SmallPtrSet<const ParmDecl *, 4> Visited;
+ while (D && Visited.insert(D).second) {
auto &DefaultArg = D->getDefaultArgStorage();
- if (!DefaultArg.isInherited() && S.isVisible(D))
+ if (!DefaultArg.isInherited() && S.isAcceptable(D, Kind))
return true;
if (!DefaultArg.isInherited() && Modules) {
@@ -1595,26 +1648,43 @@ hasVisibleDefaultArgument(Sema &S, const ParmDecl *D,
Modules->push_back(S.getOwningModule(NonConstD));
}
- // If there was a previous default argument, maybe its parameter is visible.
+ // If there was a previous default argument, maybe its parameter is
+ // acceptable.
D = DefaultArg.getInheritedFrom();
}
return false;
}
-bool Sema::hasVisibleDefaultArgument(const NamedDecl *D,
- llvm::SmallVectorImpl<Module *> *Modules) {
+bool Sema::hasAcceptableDefaultArgument(
+ const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules,
+ Sema::AcceptableKind Kind) {
if (auto *P = dyn_cast<TemplateTypeParmDecl>(D))
- return ::hasVisibleDefaultArgument(*this, P, Modules);
+ return ::hasAcceptableDefaultArgument(*this, P, Modules, Kind);
+
if (auto *P = dyn_cast<NonTypeTemplateParmDecl>(D))
- return ::hasVisibleDefaultArgument(*this, P, Modules);
- return ::hasVisibleDefaultArgument(*this, cast<TemplateTemplateParmDecl>(D),
- Modules);
+ return ::hasAcceptableDefaultArgument(*this, P, Modules, Kind);
+
+ return ::hasAcceptableDefaultArgument(
+ *this, cast<TemplateTemplateParmDecl>(D), Modules, Kind);
+}
+
+bool Sema::hasVisibleDefaultArgument(const NamedDecl *D,
+ llvm::SmallVectorImpl<Module *> *Modules) {
+ return hasAcceptableDefaultArgument(D, Modules,
+ Sema::AcceptableKind::Visible);
}
-template<typename Filter>
-static bool hasVisibleDeclarationImpl(Sema &S, const NamedDecl *D,
- llvm::SmallVectorImpl<Module *> *Modules,
- Filter F) {
+bool Sema::hasReachableDefaultArgument(
+ const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules) {
+ return hasAcceptableDefaultArgument(D, Modules,
+ Sema::AcceptableKind::Reachable);
+}
+
+template <typename Filter>
+static bool
+hasAcceptableDeclarationImpl(Sema &S, const NamedDecl *D,
+ llvm::SmallVectorImpl<Module *> *Modules, Filter F,
+ Sema::AcceptableKind Kind) {
bool HasFilteredRedecls = false;
for (auto *Redecl : D->redecls()) {
@@ -1622,7 +1692,7 @@ static bool hasVisibleDeclarationImpl(Sema &S, const NamedDecl *D,
if (!F(R))
continue;
- if (S.isVisible(R))
+ if (S.isAcceptable(R, Kind))
return true;
HasFilteredRedecls = true;
@@ -1638,74 +1708,115 @@ static bool hasVisibleDeclarationImpl(Sema &S, const NamedDecl *D,
return true;
}
+static bool
+hasAcceptableExplicitSpecialization(Sema &S, const NamedDecl *D,
+ llvm::SmallVectorImpl<Module *> *Modules,
+ Sema::AcceptableKind Kind) {
+ return hasAcceptableDeclarationImpl(
+ S, D, Modules,
+ [](const NamedDecl *D) {
+ if (auto *RD = dyn_cast<CXXRecordDecl>(D))
+ return RD->getTemplateSpecializationKind() ==
+ TSK_ExplicitSpecialization;
+ if (auto *FD = dyn_cast<FunctionDecl>(D))
+ return FD->getTemplateSpecializationKind() ==
+ TSK_ExplicitSpecialization;
+ if (auto *VD = dyn_cast<VarDecl>(D))
+ return VD->getTemplateSpecializationKind() ==
+ TSK_ExplicitSpecialization;
+ llvm_unreachable("unknown explicit specialization kind");
+ },
+ Kind);
+}
+
bool Sema::hasVisibleExplicitSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules) {
- return hasVisibleDeclarationImpl(*this, D, Modules, [](const NamedDecl *D) {
- if (auto *RD = dyn_cast<CXXRecordDecl>(D))
- return RD->getTemplateSpecializationKind() == TSK_ExplicitSpecialization;
- if (auto *FD = dyn_cast<FunctionDecl>(D))
- return FD->getTemplateSpecializationKind() == TSK_ExplicitSpecialization;
- if (auto *VD = dyn_cast<VarDecl>(D))
- return VD->getTemplateSpecializationKind() == TSK_ExplicitSpecialization;
- llvm_unreachable("unknown explicit specialization kind");
- });
+ return ::hasAcceptableExplicitSpecialization(*this, D, Modules,
+ Sema::AcceptableKind::Visible);
}
-bool Sema::hasVisibleMemberSpecialization(
+bool Sema::hasReachableExplicitSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules) {
+ return ::hasAcceptableExplicitSpecialization(*this, D, Modules,
+ Sema::AcceptableKind::Reachable);
+}
+
+static bool
+hasAcceptableMemberSpecialization(Sema &S, const NamedDecl *D,
+ llvm::SmallVectorImpl<Module *> *Modules,
+ Sema::AcceptableKind Kind) {
assert(isa<CXXRecordDecl>(D->getDeclContext()) &&
"not a member specialization");
- return hasVisibleDeclarationImpl(*this, D, Modules, [](const NamedDecl *D) {
- // If the specialization is declared at namespace scope, then it's a member
- // specialization declaration. If it's lexically inside the class
- // definition then it was instantiated.
- //
- // FIXME: This is a hack. There should be a better way to determine this.
- // FIXME: What about MS-style explicit specializations declared within a
- // class definition?
- return D->getLexicalDeclContext()->isFileContext();
- });
+ return hasAcceptableDeclarationImpl(
+ S, D, Modules,
+ [](const NamedDecl *D) {
+ // If the specialization is declared at namespace scope, then it's a
+ // member specialization declaration. If it's lexically inside the class
+ // definition then it was instantiated.
+ //
+ // FIXME: This is a hack. There should be a better way to determine
+ // this.
+ // FIXME: What about MS-style explicit specializations declared within a
+ // class definition?
+ return D->getLexicalDeclContext()->isFileContext();
+ },
+ Kind);
+}
+
+bool Sema::hasVisibleMemberSpecialization(
+ const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules) {
+ return hasAcceptableMemberSpecialization(*this, D, Modules,
+ Sema::AcceptableKind::Visible);
}
-/// Determine whether a declaration is visible to name lookup.
+bool Sema::hasReachableMemberSpecialization(
+ const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules) {
+ return hasAcceptableMemberSpecialization(*this, D, Modules,
+ Sema::AcceptableKind::Reachable);
+}
+
+/// Determine whether a declaration is acceptable to name lookup.
///
-/// This routine determines whether the declaration D is visible in the current
-/// lookup context, taking into account the current template instantiation
-/// stack. During template instantiation, a declaration is visible if it is
-/// visible from a module containing any entity on the template instantiation
-/// path (by instantiating a template, you allow it to see the declarations that
-/// your module can see, including those later on in your module).
-bool LookupResult::isVisibleSlow(Sema &SemaRef, NamedDecl *D) {
+/// This routine determines whether the declaration D is acceptable in the
+/// current lookup context, taking into account the current template
+/// instantiation stack. During template instantiation, a declaration is
+/// acceptable if it is acceptable from a module containing any entity on the
+/// template instantiation path (by instantiating a template, you allow it to
+/// see the declarations that your module can see, including those later on in
+/// your module).
+bool LookupResult::isAcceptableSlow(Sema &SemaRef, NamedDecl *D,
+ Sema::AcceptableKind Kind) {
assert(!D->isUnconditionallyVisible() &&
"should not call this: not in slow case");
Module *DeclModule = SemaRef.getOwningModule(D);
assert(DeclModule && "hidden decl has no owning module");
- // If the owning module is visible, the decl is visible.
- if (SemaRef.isModuleVisible(DeclModule, D->isModulePrivate()))
+ // If the owning module is visible, the decl is acceptable.
+ if (SemaRef.isModuleVisible(DeclModule,
+ D->isInvisibleOutsideTheOwningModule()))
return true;
// Determine whether a decl context is a file context for the purpose of
- // visibility. This looks through some (export and linkage spec) transparent
- // contexts, but not others (enums).
+ // visibility/reachability. This looks through some (export and linkage spec)
+ // transparent contexts, but not others (enums).
auto IsEffectivelyFileContext = [](const DeclContext *DC) {
return DC->isFileContext() || isa<LinkageSpecDecl>(DC) ||
isa<ExportDecl>(DC);
};
// If this declaration is not at namespace scope
- // then it is visible if its lexical parent has a visible definition.
+ // then it is acceptable if its lexical parent has a acceptable definition.
DeclContext *DC = D->getLexicalDeclContext();
if (DC && !IsEffectivelyFileContext(DC)) {
// For a parameter, check whether our current template declaration's
- // lexical context is visible, not whether there's some other visible
+ // lexical context is acceptable, not whether there's some other acceptable
// definition of it, because parameters aren't "within" the definition.
//
- // In C++ we need to check for a visible definition due to ODR merging,
+ // In C++ we need to check for a acceptable definition due to ODR merging,
// and in C we must not because each declaration of a function gets its own
// set of declarations for tags in prototype scope.
- bool VisibleWithinParent;
+ bool AcceptableWithinParent;
if (D->isTemplateParameter()) {
bool SearchDefinitions = true;
if (const auto *DCD = dyn_cast<Decl>(DC)) {
@@ -1716,51 +1827,59 @@ bool LookupResult::isVisibleSlow(Sema &SemaRef, NamedDecl *D) {
}
}
if (SearchDefinitions)
- VisibleWithinParent = SemaRef.hasVisibleDefinition(cast<NamedDecl>(DC));
+ AcceptableWithinParent =
+ SemaRef.hasAcceptableDefinition(cast<NamedDecl>(DC), Kind);
else
- VisibleWithinParent = isVisible(SemaRef, cast<NamedDecl>(DC));
+ AcceptableWithinParent =
+ isAcceptable(SemaRef, cast<NamedDecl>(DC), Kind);
} else if (isa<ParmVarDecl>(D) ||
(isa<FunctionDecl>(DC) && !SemaRef.getLangOpts().CPlusPlus))
- VisibleWithinParent = isVisible(SemaRef, cast<NamedDecl>(DC));
+ AcceptableWithinParent = isAcceptable(SemaRef, cast<NamedDecl>(DC), Kind);
else if (D->isModulePrivate()) {
- // A module-private declaration is only visible if an enclosing lexical
+ // A module-private declaration is only acceptable if an enclosing lexical
// parent was merged with another definition in the current module.
- VisibleWithinParent = false;
+ AcceptableWithinParent = false;
do {
if (SemaRef.hasMergedDefinitionInCurrentModule(cast<NamedDecl>(DC))) {
- VisibleWithinParent = true;
+ AcceptableWithinParent = true;
break;
}
DC = DC->getLexicalParent();
} while (!IsEffectivelyFileContext(DC));
} else {
- VisibleWithinParent = SemaRef.hasVisibleDefinition(cast<NamedDecl>(DC));
+ AcceptableWithinParent =
+ SemaRef.hasAcceptableDefinition(cast<NamedDecl>(DC), Kind);
}
- if (VisibleWithinParent && SemaRef.CodeSynthesisContexts.empty() &&
+ if (AcceptableWithinParent && SemaRef.CodeSynthesisContexts.empty() &&
+ Kind == Sema::AcceptableKind::Visible &&
// FIXME: Do something better in this case.
!SemaRef.getLangOpts().ModulesLocalVisibility) {
// Cache the fact that this declaration is implicitly visible because
// its parent has a visible definition.
D->setVisibleDespiteOwningModule();
}
- return VisibleWithinParent;
+ return AcceptableWithinParent;
}
- return false;
+ if (Kind == Sema::AcceptableKind::Visible)
+ return false;
+
+ assert(Kind == Sema::AcceptableKind::Reachable &&
+ "Additional Sema::AcceptableKind?");
+ return isReachableSlow(SemaRef, D);
}
bool Sema::isModuleVisible(const Module *M, bool ModulePrivate) {
// The module might be ordinarily visible. For a module-private query, that
- // means it is part of the current module. For any other query, that means it
- // is in our visible module set.
- if (ModulePrivate) {
- if (isInCurrentModule(M, getLangOpts()))
- return true;
- } else {
- if (VisibleModules.isVisible(M))
- return true;
- }
+ // means it is part of the current module.
+ if (ModulePrivate && isUsableModule(M))
+ return true;
+
+ // For a query which is not module-private, that means it is in our visible
+ // module set.
+ if (!ModulePrivate && VisibleModules.isVisible(M))
+ return true;
// Otherwise, it might be visible by virtue of the query being within a
// template instantiation or similar that is permitted to look inside M.
@@ -1774,6 +1893,12 @@ bool Sema::isModuleVisible(const Module *M, bool ModulePrivate) {
if (LookupModules.count(M))
return true;
+ // The global module fragments are visible to its corresponding module unit.
+ // So the global module fragment should be visible if the its corresponding
+ // module unit is visible.
+ if (M->isGlobalModule() && LookupModules.count(M->getTopLevelModule()))
+ return true;
+
// For a module-private query, that's everywhere we get to look.
if (ModulePrivate)
return false;
@@ -1784,8 +1909,66 @@ bool Sema::isModuleVisible(const Module *M, bool ModulePrivate) {
});
}
-bool Sema::isVisibleSlow(const NamedDecl *D) {
- return LookupResult::isVisible(*this, const_cast<NamedDecl*>(D));
+// FIXME: Return false directly if we don't have an interface dependency on the
+// translation unit containing D.
+bool LookupResult::isReachableSlow(Sema &SemaRef, NamedDecl *D) {
+ assert(!isVisible(SemaRef, D) && "Shouldn't call the slow case.\n");
+
+ Module *DeclModule = SemaRef.getOwningModule(D);
+ assert(DeclModule && "hidden decl has no owning module");
+
+ // Entities in header like modules are reachable only if they're visible.
+ if (DeclModule->isHeaderLikeModule())
+ return false;
+
+ if (!D->isInAnotherModuleUnit())
+ return true;
+
+ // [module.reach]/p3:
+ // A declaration D is reachable from a point P if:
+ // ...
+ // - D is not discarded ([module.global.frag]), appears in a translation unit
+ // that is reachable from P, and does not appear within a private module
+ // fragment.
+ //
+ // A declaration that's discarded in the GMF should be module-private.
+ if (D->isModulePrivate())
+ return false;
+
+ // [module.reach]/p1
+ // A translation unit U is necessarily reachable from a point P if U is a
+ // module interface unit on which the translation unit containing P has an
+ // interface dependency, or the translation unit containing P imports U, in
+ // either case prior to P ([module.import]).
+ //
+ // [module.import]/p10
+ // A translation unit has an interface dependency on a translation unit U if
+ // it contains a declaration (possibly a module-declaration) that imports U
+ // or if it has an interface dependency on a translation unit that has an
+ // interface dependency on U.
+ //
+ // So we could conclude the module unit U is necessarily reachable if:
+ // (1) The module unit U is module interface unit.
+ // (2) The current unit has an interface dependency on the module unit U.
+ //
+ // Here we only check for the first condition. Since we couldn't see
+ // DeclModule if it isn't (transitively) imported.
+ if (DeclModule->getTopLevelModule()->isModuleInterfaceUnit())
+ return true;
+
+ // [module.reach]/p2
+ // Additional translation units on
+ // which the point within the program has an interface dependency may be
+ // considered reachable, but it is unspecified which are and under what
+ // circumstances.
+ //
+ // The decision here is to treat all additional tranditional units as
+ // unreachable.
+ return false;
+}
+
+bool Sema::isAcceptableSlow(const NamedDecl *D, Sema::AcceptableKind Kind) {
+ return LookupResult::isAcceptable(*this, const_cast<NamedDecl *>(D), Kind);
}
bool Sema::shouldLinkPossiblyHiddenDecl(LookupResult &R, const NamedDecl *New) {
@@ -1834,9 +2017,9 @@ bool Sema::shouldLinkPossiblyHiddenDecl(LookupResult &R, const NamedDecl *New) {
/// and visible. If no declaration of D is visible, returns null.
static NamedDecl *findAcceptableDecl(Sema &SemaRef, NamedDecl *D,
unsigned IDNS) {
- assert(!LookupResult::isVisible(SemaRef, D) && "not in slow case");
+ assert(!LookupResult::isAvailableForLookup(SemaRef, D) && "not in slow case");
- for (auto RD : D->redecls()) {
+ for (auto *RD : D->redecls()) {
// Don't bother with extra checks if we already know this one isn't visible.
if (RD == D)
continue;
@@ -1846,7 +2029,7 @@ static NamedDecl *findAcceptableDecl(Sema &SemaRef, NamedDecl *D,
// visible in the same scope as D. This needs to be done much more
// carefully.
if (ND->isInIdentifierNamespace(IDNS) &&
- LookupResult::isVisible(SemaRef, ND))
+ LookupResult::isAvailableForLookup(SemaRef, ND))
return ND;
}
@@ -1856,8 +2039,17 @@ static NamedDecl *findAcceptableDecl(Sema &SemaRef, NamedDecl *D,
bool Sema::hasVisibleDeclarationSlow(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules) {
assert(!isVisible(D) && "not in slow case");
- return hasVisibleDeclarationImpl(*this, D, Modules,
- [](const NamedDecl *) { return true; });
+ return hasAcceptableDeclarationImpl(
+ *this, D, Modules, [](const NamedDecl *) { return true; },
+ Sema::AcceptableKind::Visible);
+}
+
+bool Sema::hasReachableDeclarationSlow(
+ const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules) {
+ assert(!isReachable(D) && "not in slow case");
+ return hasAcceptableDeclarationImpl(
+ *this, D, Modules, [](const NamedDecl *) { return true; },
+ Sema::AcceptableKind::Reachable);
}
NamedDecl *LookupResult::getAcceptableDeclSlow(NamedDecl *D) const {
@@ -1882,6 +2074,83 @@ NamedDecl *LookupResult::getAcceptableDeclSlow(NamedDecl *D) const {
return findAcceptableDecl(getSema(), D, IDNS);
}
+bool LookupResult::isVisible(Sema &SemaRef, NamedDecl *D) {
+ // If this declaration is already visible, return it directly.
+ if (D->isUnconditionallyVisible())
+ return true;
+
+ // During template instantiation, we can refer to hidden declarations, if
+ // they were visible in any module along the path of instantiation.
+ return isAcceptableSlow(SemaRef, D, Sema::AcceptableKind::Visible);
+}
+
+bool LookupResult::isReachable(Sema &SemaRef, NamedDecl *D) {
+ if (D->isUnconditionallyVisible())
+ return true;
+
+ return isAcceptableSlow(SemaRef, D, Sema::AcceptableKind::Reachable);
+}
+
+bool LookupResult::isAvailableForLookup(Sema &SemaRef, NamedDecl *ND) {
+ // We should check the visibility at the callsite already.
+ if (isVisible(SemaRef, ND))
+ return true;
+
+ // Deduction guide lives in namespace scope generally, but it is just a
+ // hint to the compilers. What we actually lookup for is the generated member
+ // of the corresponding template. So it is sufficient to check the
+ // reachability of the template decl.
+ if (auto *DeductionGuide = ND->getDeclName().getCXXDeductionGuideTemplate())
+ return SemaRef.hasReachableDefinition(DeductionGuide);
+
+ // FIXME: The lookup for allocation function is a standalone process.
+ // (We can find the logics in Sema::FindAllocationFunctions)
+ //
+ // Such structure makes it a problem when we instantiate a template
+ // declaration using placement allocation function if the placement
+ // allocation function is invisible.
+ // (See https://github.com/llvm/llvm-project/issues/59601)
+ //
+ // Here we workaround it by making the placement allocation functions
+ // always acceptable. The downside is that we can't diagnose the direct
+ // use of the invisible placement allocation functions. (Although such uses
+ // should be rare).
+ if (auto *FD = dyn_cast<FunctionDecl>(ND);
+ FD && FD->isReservedGlobalPlacementOperator())
+ return true;
+
+ auto *DC = ND->getDeclContext();
+ // If ND is not visible and it is at namespace scope, it shouldn't be found
+ // by name lookup.
+ if (DC->isFileContext())
+ return false;
+
+ // [module.interface]p7
+ // Class and enumeration member names can be found by name lookup in any
+ // context in which a definition of the type is reachable.
+ //
+ // FIXME: The current implementation didn't consider about scope. For example,
+ // ```
+ // // m.cppm
+ // export module m;
+ // enum E1 { e1 };
+ // // Use.cpp
+ // import m;
+ // void test() {
+ // auto a = E1::e1; // Error as expected.
+ // auto b = e1; // Should be error. namespace-scope name e1 is not visible
+ // }
+ // ```
+ // For the above example, the current implementation would emit error for `a`
+ // correctly. However, the implementation wouldn't diagnose about `b` now.
+ // Since we only check the reachability for the parent only.
+ // See clang/test/CXX/module/module.interface/p7.cpp for example.
+ if (auto *TD = dyn_cast<TagDecl>(DC))
+ return SemaRef.hasReachableDefinition(TD);
+
+ return false;
+}
+
/// Perform unqualified name lookup starting from a given
/// scope.
///
@@ -1910,13 +2179,14 @@ NamedDecl *LookupResult::getAcceptableDeclSlow(NamedDecl *D) const {
/// used to diagnose ambiguities.
///
/// @returns \c true if lookup succeeded and false otherwise.
-bool Sema::LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation) {
+bool Sema::LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation,
+ bool ForceNoCPlusPlus) {
DeclarationName Name = R.getLookupName();
if (!Name) return false;
LookupNameKind NameKind = R.getLookupKind();
- if (!getLangOpts().CPlusPlus) {
+ if (!getLangOpts().CPlusPlus || ForceNoCPlusPlus) {
// Unqualified name lookup in C/Objective-C is purely lexical, so
// search in the declarations attached to the name.
if (NameKind == Sema::LookupRedeclarationWithLinkage) {
@@ -2109,7 +2379,7 @@ static bool LookupQualifiedNameInUsingDirectives(Sema &S, LookupResult &R,
continue;
}
- for (auto I : ND->using_directives()) {
+ for (auto *I : ND->using_directives()) {
NamespaceDecl *Nom = I->getNominatedNamespace();
if (S.isVisible(I) && Visited.insert(Nom).second)
Queue.push_back(Nom);
@@ -2166,8 +2436,9 @@ bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool oldVal;
DeclContext *Context;
// Set flag in DeclContext informing debugger that we're looking for qualified name
- QualifiedLookupInScope(DeclContext *ctx) : Context(ctx) {
- oldVal = ctx->setUseQualifiedLookup();
+ QualifiedLookupInScope(DeclContext *ctx)
+ : oldVal(ctx->shouldUseQualifiedLookup()), Context(ctx) {
+ ctx->setUseQualifiedLookup();
}
~QualifiedLookupInScope() {
Context->setUseQualifiedLookup(oldVal);
@@ -2597,6 +2868,18 @@ void Sema::DiagnoseAmbiguousLookup(LookupResult &Result) {
break;
}
+ case LookupResult::AmbiguousReferenceToPlaceholderVariable: {
+ Diag(NameLoc, diag::err_using_placeholder_variable) << Name << LookupRange;
+ DeclContext *DC = nullptr;
+ for (auto *D : Result) {
+ Diag(D->getLocation(), diag::note_reference_placeholder) << D;
+ if (DC != nullptr && DC != D->getDeclContext())
+ break;
+ DC = D->getDeclContext();
+ }
+ break;
+ }
+
case LookupResult::AmbiguousReference: {
Diag(NameLoc, diag::err_ambiguous_reference) << Name << LookupRange;
@@ -2699,6 +2982,7 @@ addAssociatedClassesAndNamespaces(AssociatedLookup &Result,
case TemplateArgument::Integral:
case TemplateArgument::Expression:
case TemplateArgument::NullPtr:
+ case TemplateArgument::StructuralValue:
// [Note: non-type template arguments do not contribute to the set of
// associated namespaces. ]
break;
@@ -2889,7 +3173,7 @@ addAssociatedClassesAndNamespaces(AssociatedLookup &Result, QualType Ty) {
for (const auto &Arg : Proto->param_types())
Queue.push_back(Arg.getTypePtr());
// fallthrough
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case Type::FunctionNoProto: {
const FunctionType *FnType = cast<FunctionType>(T);
@@ -2934,7 +3218,7 @@ addAssociatedClassesAndNamespaces(AssociatedLookup &Result, QualType Ty) {
case Type::ExtVector:
case Type::ConstantMatrix:
case Type::Complex:
- case Type::ExtInt:
+ case Type::BitInt:
break;
// Non-deduced auto types only get here for error cases.
@@ -3219,27 +3503,27 @@ Sema::SpecialMemberOverloadResult Sema::LookupSpecialMember(CXXRecordDecl *RD,
if (CXXMethodDecl *M = dyn_cast<CXXMethodDecl>(Cand->getUnderlyingDecl())) {
if (SM == CXXCopyAssignment || SM == CXXMoveAssignment)
AddMethodCandidate(M, Cand, RD, ThisTy, Classification,
- llvm::makeArrayRef(&Arg, NumArgs), OCS, true);
+ llvm::ArrayRef(&Arg, NumArgs), OCS, true);
else if (CtorInfo)
AddOverloadCandidate(CtorInfo.Constructor, CtorInfo.FoundDecl,
- llvm::makeArrayRef(&Arg, NumArgs), OCS,
+ llvm::ArrayRef(&Arg, NumArgs), OCS,
/*SuppressUserConversions*/ true);
else
- AddOverloadCandidate(M, Cand, llvm::makeArrayRef(&Arg, NumArgs), OCS,
+ AddOverloadCandidate(M, Cand, llvm::ArrayRef(&Arg, NumArgs), OCS,
/*SuppressUserConversions*/ true);
} else if (FunctionTemplateDecl *Tmpl =
dyn_cast<FunctionTemplateDecl>(Cand->getUnderlyingDecl())) {
if (SM == CXXCopyAssignment || SM == CXXMoveAssignment)
- AddMethodTemplateCandidate(
- Tmpl, Cand, RD, nullptr, ThisTy, Classification,
- llvm::makeArrayRef(&Arg, NumArgs), OCS, true);
+ AddMethodTemplateCandidate(Tmpl, Cand, RD, nullptr, ThisTy,
+ Classification,
+ llvm::ArrayRef(&Arg, NumArgs), OCS, true);
else if (CtorInfo)
- AddTemplateOverloadCandidate(
- CtorInfo.ConstructorTmpl, CtorInfo.FoundDecl, nullptr,
- llvm::makeArrayRef(&Arg, NumArgs), OCS, true);
+ AddTemplateOverloadCandidate(CtorInfo.ConstructorTmpl,
+ CtorInfo.FoundDecl, nullptr,
+ llvm::ArrayRef(&Arg, NumArgs), OCS, true);
else
- AddTemplateOverloadCandidate(
- Tmpl, Cand, nullptr, llvm::makeArrayRef(&Arg, NumArgs), OCS, true);
+ AddTemplateOverloadCandidate(Tmpl, Cand, nullptr,
+ llvm::ArrayRef(&Arg, NumArgs), OCS, true);
} else {
assert(isa<UsingDecl>(Cand.getDecl()) &&
"illegal Kind of operator = Decl");
@@ -3362,9 +3646,10 @@ CXXMethodDecl *Sema::LookupMovingAssignment(CXXRecordDecl *Class,
///
/// \returns The destructor for this class.
CXXDestructorDecl *Sema::LookupDestructor(CXXRecordDecl *Class) {
- return cast<CXXDestructorDecl>(LookupSpecialMember(Class, CXXDestructor,
- false, false, false,
- false, false).getMethod());
+ return cast_or_null<CXXDestructorDecl>(
+ LookupSpecialMember(Class, CXXDestructor, false, false, false, false,
+ false)
+ .getMethod());
}
/// LookupLiteralOperator - Determine which literal operator should be used for
@@ -3438,11 +3723,11 @@ Sema::LookupLiteralOperator(Scope *S, LookupResult &R,
// is a well-formed template argument for the template parameter.
if (StringLit) {
SFINAETrap Trap(*this);
- SmallVector<TemplateArgument, 1> Checked;
+ SmallVector<TemplateArgument, 1> SugaredChecked, CanonicalChecked;
TemplateArgumentLoc Arg(TemplateArgument(StringLit), StringLit);
- if (CheckTemplateArgument(Params->getParam(0), Arg, FD,
- R.getNameLoc(), R.getNameLoc(), 0,
- Checked) ||
+ if (CheckTemplateArgument(
+ Params->getParam(0), Arg, FD, R.getNameLoc(), R.getNameLoc(),
+ 0, SugaredChecked, CanonicalChecked, CTAK_Specified) ||
Trap.hasErrorOccurred())
IsTemplate = false;
}
@@ -3499,8 +3784,8 @@ Sema::LookupLiteralOperator(Scope *S, LookupResult &R,
// operator template, but not both.
if (FoundRaw && FoundTemplate) {
Diag(R.getNameLoc(), diag::err_ovl_ambiguous_call) << R.getLookupName();
- for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I)
- NoteOverloadCandidate(*I, (*I)->getUnderlyingDecl()->getAsFunction());
+ for (const NamedDecl *D : R)
+ NoteOverloadCandidate(D, D->getUnderlyingDecl()->getAsFunction());
return LOLR_Error;
}
@@ -3589,6 +3874,12 @@ void Sema::ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
// associated classes are visible within their respective
// namespaces even if they are not visible during an ordinary
// lookup (11.4).
+ //
+ // C++20 [basic.lookup.argdep] p4.3
+ // -- are exported, are attached to a named module M, do not appear
+ // in the translation unit containing the point of the lookup, and
+ // have the same innermost enclosing non-inline namespace scope as
+ // a declaration of an associated entity attached to M.
DeclContext::lookup_result R = NS->lookup(Name);
for (auto *D : R) {
auto *Underlying = D;
@@ -3610,9 +3901,50 @@ void Sema::ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
Visible = true;
break;
}
+
+ if (!getLangOpts().CPlusPlusModules)
+ continue;
+
+ if (D->isInExportDeclContext()) {
+ Module *FM = D->getOwningModule();
+ // C++20 [basic.lookup.argdep] p4.3 .. are exported ...
+ // exports are only valid in module purview and outside of any
+ // PMF (although a PMF should not even be present in a module
+ // with an import).
+ assert(FM && FM->isNamedModule() && !FM->isPrivateModule() &&
+ "bad export context");
+ // .. are attached to a named module M, do not appear in the
+ // translation unit containing the point of the lookup..
+ if (D->isInAnotherModuleUnit() &&
+ llvm::any_of(AssociatedClasses, [&](auto *E) {
+ // ... and have the same innermost enclosing non-inline
+ // namespace scope as a declaration of an associated entity
+ // attached to M
+ if (E->getOwningModule() != FM)
+ return false;
+ // TODO: maybe this could be cached when generating the
+ // associated namespaces / entities.
+ DeclContext *Ctx = E->getDeclContext();
+ while (!Ctx->isFileContext() || Ctx->isInlineNamespace())
+ Ctx = Ctx->getParent();
+ return Ctx == NS;
+ })) {
+ Visible = true;
+ break;
+ }
+ }
} else if (D->getFriendObjectKind()) {
auto *RD = cast<CXXRecordDecl>(D->getLexicalDeclContext());
- if (AssociatedClasses.count(RD) && isVisible(D)) {
+ // [basic.lookup.argdep]p4:
+ // Argument-dependent lookup finds all declarations of functions and
+ // function templates that
+ // - ...
+ // - are declared as a friend ([class.friend]) of any class with a
+ // reachable definition in the set of associated entities,
+ //
+ // FIXME: If there's a merged definition of D that is reachable, then
+ // the friend declaration should be considered.
+ if (AssociatedClasses.count(RD) && isReachable(D)) {
Visible = true;
break;
}
@@ -3842,28 +4174,27 @@ private:
// Enumerate all of the results in this context.
for (DeclContextLookupResult R :
Load ? Ctx->lookups()
- : Ctx->noload_lookups(/*PreserveInternalState=*/false)) {
- for (auto *D : R) {
- if (auto *ND = Result.getAcceptableDecl(D)) {
- // Rather than visit immediatelly, we put ND into a vector and visit
- // all decls, in order, outside of this loop. The reason is that
- // Consumer.FoundDecl() may invalidate the iterators used in the two
- // loops above.
- DeclsToVisit.push_back(ND);
- }
+ : Ctx->noload_lookups(/*PreserveInternalState=*/false))
+ for (auto *D : R)
+ // Rather than visit immediately, we put ND into a vector and visit
+ // all decls, in order, outside of this loop. The reason is that
+ // Consumer.FoundDecl() and LookupResult::getAcceptableDecl(D)
+ // may invalidate the iterators used in the two
+ // loops above.
+ DeclsToVisit.push_back(D);
+
+ for (auto *D : DeclsToVisit)
+ if (auto *ND = Result.getAcceptableDecl(D)) {
+ Consumer.FoundDecl(ND, Visited.checkHidden(ND), Ctx, InBaseClass);
+ Visited.add(ND);
}
- }
- for (auto *ND : DeclsToVisit) {
- Consumer.FoundDecl(ND, Visited.checkHidden(ND), Ctx, InBaseClass);
- Visited.add(ND);
- }
DeclsToVisit.clear();
// Traverse using directives for qualified name lookup.
if (QualifiedNameLookup) {
ShadowContextRAII Shadow(Visited);
- for (auto I : Ctx->using_directives()) {
+ for (auto *I : Ctx->using_directives()) {
if (!Result.getSema().isVisible(I))
continue;
lookupInDeclContext(I->getNominatedNamespace(), Result,
@@ -4306,18 +4637,35 @@ void TypoCorrectionConsumer::addCorrection(TypoCorrection Correction) {
if (!CList.empty() && !CList.back().isResolved())
CList.pop_back();
if (NamedDecl *NewND = Correction.getCorrectionDecl()) {
- std::string CorrectionStr = Correction.getAsString(SemaRef.getLangOpts());
- for (TypoResultList::iterator RI = CList.begin(), RIEnd = CList.end();
- RI != RIEnd; ++RI) {
- // If the Correction refers to a decl already in the result list,
- // replace the existing result if the string representation of Correction
- // comes before the current result alphabetically, then stop as there is
- // nothing more to be done to add Correction to the candidate set.
- if (RI->getCorrectionDecl() == NewND) {
- if (CorrectionStr < RI->getAsString(SemaRef.getLangOpts()))
- *RI = Correction;
- return;
- }
+ auto RI = llvm::find_if(CList, [NewND](const TypoCorrection &TypoCorr) {
+ return TypoCorr.getCorrectionDecl() == NewND;
+ });
+ if (RI != CList.end()) {
+ // The Correction refers to a decl already in the list. No insertion is
+ // necessary and all further cases will return.
+
+ auto IsDeprecated = [](Decl *D) {
+ while (D) {
+ if (D->isDeprecated())
+ return true;
+ D = llvm::dyn_cast_or_null<NamespaceDecl>(D->getDeclContext());
+ }
+ return false;
+ };
+
+ // Prefer non deprecated Corrections over deprecated and only then
+ // sort using an alphabetical order.
+ std::pair<bool, std::string> NewKey = {
+ IsDeprecated(Correction.getFoundDecl()),
+ Correction.getAsString(SemaRef.getLangOpts())};
+
+ std::pair<bool, std::string> PrevKey = {
+ IsDeprecated(RI->getFoundDecl()),
+ RI->getAsString(SemaRef.getLangOpts())};
+
+ if (NewKey < PrevKey)
+ *RI = Correction;
+ return;
}
}
if (CList.empty() || Correction.isResolved())
@@ -4595,9 +4943,7 @@ void TypoCorrectionConsumer::NamespaceSpecifierSet::addNameSpecifier(
dyn_cast_or_null<NamedDecl>(NamespaceDeclChain.back())) {
IdentifierInfo *Name = ND->getIdentifier();
bool SameNameSpecifier = false;
- if (std::find(CurNameSpecifierIdentifiers.begin(),
- CurNameSpecifierIdentifiers.end(),
- Name) != CurNameSpecifierIdentifiers.end()) {
+ if (llvm::is_contained(CurNameSpecifierIdentifiers, Name)) {
std::string NewNameSpecifier;
llvm::raw_string_ostream SpecifierOStream(NewNameSpecifier);
SmallVector<const IdentifierInfo *, 4> NewNameSpecifierIdentifiers;
@@ -4606,8 +4952,7 @@ void TypoCorrectionConsumer::NamespaceSpecifierSet::addNameSpecifier(
SpecifierOStream.flush();
SameNameSpecifier = NewNameSpecifier == CurNameSpecifier;
}
- if (SameNameSpecifier || llvm::find(CurContextIdentifiers, Name) !=
- CurContextIdentifiers.end()) {
+ if (SameNameSpecifier || llvm::is_contained(CurContextIdentifiers, Name)) {
// Rebuild the NestedNameSpecifier as a globally-qualified specifier.
NNS = NestedNameSpecifier::GlobalSpecifier(Context);
NumSpecifiers =
@@ -4622,9 +4967,9 @@ void TypoCorrectionConsumer::NamespaceSpecifierSet::addNameSpecifier(
if (NNS && !CurNameSpecifierIdentifiers.empty()) {
SmallVector<const IdentifierInfo*, 4> NewNameSpecifierIdentifiers;
getNestedNameSpecifierIdentifiers(NNS, NewNameSpecifierIdentifiers);
- NumSpecifiers = llvm::ComputeEditDistance(
- llvm::makeArrayRef(CurNameSpecifierIdentifiers),
- llvm::makeArrayRef(NewNameSpecifierIdentifiers));
+ NumSpecifiers =
+ llvm::ComputeEditDistance(llvm::ArrayRef(CurNameSpecifierIdentifiers),
+ llvm::ArrayRef(NewNameSpecifierIdentifiers));
}
SpecifierInfo SI = {Ctx, NNS, NumSpecifiers};
@@ -4711,9 +5056,8 @@ static void AddKeywordsToConsumer(Sema &SemaRef,
"extern", "inline", "static", "typedef"
};
- const unsigned NumCTypeSpecs = llvm::array_lengthof(CTypeSpecs);
- for (unsigned I = 0; I != NumCTypeSpecs; ++I)
- Consumer.addKeywordResult(CTypeSpecs[I]);
+ for (const auto *CTS : CTypeSpecs)
+ Consumer.addKeywordResult(CTS);
if (SemaRef.getLangOpts().C99)
Consumer.addKeywordResult("restrict");
@@ -4765,9 +5109,8 @@ static void AddKeywordsToConsumer(Sema &SemaRef,
static const char *const CXXExprs[] = {
"delete", "new", "operator", "throw", "typeid"
};
- const unsigned NumCXXExprs = llvm::array_lengthof(CXXExprs);
- for (unsigned I = 0; I != NumCXXExprs; ++I)
- Consumer.addKeywordResult(CXXExprs[I]);
+ for (const auto *CE : CXXExprs)
+ Consumer.addKeywordResult(CE);
if (isa<CXXMethodDecl>(SemaRef.CurContext) &&
cast<CXXMethodDecl>(SemaRef.CurContext)->isInstance())
@@ -4791,9 +5134,8 @@ static void AddKeywordsToConsumer(Sema &SemaRef,
// Statements.
static const char *const CStmts[] = {
"do", "else", "for", "goto", "if", "return", "switch", "while" };
- const unsigned NumCStmts = llvm::array_lengthof(CStmts);
- for (unsigned I = 0; I != NumCStmts; ++I)
- Consumer.addKeywordResult(CStmts[I]);
+ for (const auto *CS : CStmts)
+ Consumer.addKeywordResult(CS);
if (SemaRef.getLangOpts().CPlusPlus) {
Consumer.addKeywordResult("catch");
@@ -5291,15 +5633,15 @@ bool FunctionCallFilterCCC::ValidateCandidate(const TypoCorrection &candidate) {
// unless the method being corrected--or the current DeclContext, if the
// function being corrected is not a method--is a method in the same class
// or a descendent class of the candidate's parent class.
- if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) {
if (MemberFn || !MD->isStatic()) {
- CXXMethodDecl *CurMD =
+ const auto *CurMD =
MemberFn
- ? dyn_cast_or_null<CXXMethodDecl>(MemberFn->getMemberDecl())
- : dyn_cast_or_null<CXXMethodDecl>(CurContext);
- CXXRecordDecl *CurRD =
+ ? dyn_cast_if_present<CXXMethodDecl>(MemberFn->getMemberDecl())
+ : dyn_cast_if_present<CXXMethodDecl>(CurContext);
+ const CXXRecordDecl *CurRD =
CurMD ? CurMD->getParent()->getCanonicalDecl() : nullptr;
- CXXRecordDecl *RD = MD->getParent()->getCanonicalDecl();
+ const CXXRecordDecl *RD = MD->getParent()->getCanonicalDecl();
if (!CurRD || (CurRD != RD && !CurRD->isDerivedFrom(RD)))
continue;
}
@@ -5318,31 +5660,28 @@ void Sema::diagnoseTypo(const TypoCorrection &Correction,
/// Find which declaration we should import to provide the definition of
/// the given declaration.
-static NamedDecl *getDefinitionToImport(NamedDecl *D) {
- if (VarDecl *VD = dyn_cast<VarDecl>(D))
+static const NamedDecl *getDefinitionToImport(const NamedDecl *D) {
+ if (const auto *VD = dyn_cast<VarDecl>(D))
return VD->getDefinition();
- if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ if (const auto *FD = dyn_cast<FunctionDecl>(D))
return FD->getDefinition();
- if (TagDecl *TD = dyn_cast<TagDecl>(D))
+ if (const auto *TD = dyn_cast<TagDecl>(D))
return TD->getDefinition();
- // The first definition for this ObjCInterfaceDecl might be in the TU
- // and not associated with any module. Use the one we know to be complete
- // and have just seen in a module.
- if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(D))
- return ID;
- if (ObjCProtocolDecl *PD = dyn_cast<ObjCProtocolDecl>(D))
+ if (const auto *ID = dyn_cast<ObjCInterfaceDecl>(D))
+ return ID->getDefinition();
+ if (const auto *PD = dyn_cast<ObjCProtocolDecl>(D))
return PD->getDefinition();
- if (TemplateDecl *TD = dyn_cast<TemplateDecl>(D))
- if (NamedDecl *TTD = TD->getTemplatedDecl())
+ if (const auto *TD = dyn_cast<TemplateDecl>(D))
+ if (const NamedDecl *TTD = TD->getTemplatedDecl())
return getDefinitionToImport(TTD);
return nullptr;
}
-void Sema::diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
+void Sema::diagnoseMissingImport(SourceLocation Loc, const NamedDecl *Decl,
MissingImportKind MIK, bool Recover) {
// Suggest importing a module providing the definition of this entity, if
// possible.
- NamedDecl *Def = getDefinitionToImport(Decl);
+ const NamedDecl *Def = getDefinitionToImport(Decl);
if (!Def)
Def = Decl;
@@ -5360,20 +5699,25 @@ void Sema::diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
/// Get a "quoted.h" or <angled.h> include path to use in a diagnostic
/// suggesting the addition of a #include of the specified file.
-static std::string getHeaderNameForHeader(Preprocessor &PP, const FileEntry *E,
+static std::string getHeaderNameForHeader(Preprocessor &PP, FileEntryRef E,
llvm::StringRef IncludingFile) {
- bool IsSystem = false;
+ bool IsAngled = false;
auto Path = PP.getHeaderSearchInfo().suggestPathToFileForDiagnostics(
- E, IncludingFile, &IsSystem);
- return (IsSystem ? '<' : '"') + Path + (IsSystem ? '>' : '"');
+ E, IncludingFile, &IsAngled);
+ return (IsAngled ? '<' : '"') + Path + (IsAngled ? '>' : '"');
}
-void Sema::diagnoseMissingImport(SourceLocation UseLoc, NamedDecl *Decl,
+void Sema::diagnoseMissingImport(SourceLocation UseLoc, const NamedDecl *Decl,
SourceLocation DeclLoc,
ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover) {
assert(!Modules.empty());
+ // See https://github.com/llvm/llvm-project/issues/73893. It is generally
+ // confusing than helpful to show the namespace is not visible.
+ if (isa<NamespaceDecl>(Decl))
+ return;
+
auto NotePrevious = [&] {
// FIXME: Suppress the note backtrace even under
// -fdiagnostics-show-note-include-stack. We don't care how this
@@ -5385,7 +5729,7 @@ void Sema::diagnoseMissingImport(SourceLocation UseLoc, NamedDecl *Decl,
llvm::SmallVector<Module*, 8> UniqueModules;
llvm::SmallDenseSet<Module*, 8> UniqueModuleSet;
for (auto *M : Modules) {
- if (M->Kind == Module::GlobalModuleFragment)
+ if (M->isExplicitGlobalModule() || M->isPrivateModule())
continue;
if (UniqueModuleSet.insert(M).second)
UniqueModules.push_back(M);
@@ -5393,11 +5737,12 @@ void Sema::diagnoseMissingImport(SourceLocation UseLoc, NamedDecl *Decl,
// Try to find a suitable header-name to #include.
std::string HeaderName;
- if (const FileEntry *Header =
+ if (OptionalFileEntryRef Header =
PP.getHeaderToIncludeForDiagnostics(UseLoc, DeclLoc)) {
if (const FileEntry *FE =
SourceMgr.getFileEntryForID(SourceMgr.getFileID(UseLoc)))
- HeaderName = getHeaderNameForHeader(PP, Header, FE->tryGetRealPathName());
+ HeaderName =
+ getHeaderNameForHeader(PP, *Header, FE->tryGetRealPathName());
}
// If we have a #include we should suggest, or if all definition locations
@@ -5416,16 +5761,38 @@ void Sema::diagnoseMissingImport(SourceLocation UseLoc, NamedDecl *Decl,
Modules = UniqueModules;
+ auto GetModuleNameForDiagnostic = [this](const Module *M) -> std::string {
+ if (M->isModuleMapModule())
+ return M->getFullModuleName();
+
+ Module *CurrentModule = getCurrentModule();
+
+ if (M->isImplicitGlobalModule())
+ M = M->getTopLevelModule();
+
+ bool IsInTheSameModule =
+ CurrentModule && CurrentModule->getPrimaryModuleInterfaceName() ==
+ M->getPrimaryModuleInterfaceName();
+
+ // If the current module unit is in the same module with M, it is OK to show
+ // the partition name. Otherwise, it'll be sufficient to show the primary
+ // module name.
+ if (IsInTheSameModule)
+ return M->getTopLevelModuleName().str();
+ else
+ return M->getPrimaryModuleInterfaceName().str();
+ };
+
if (Modules.size() > 1) {
std::string ModuleList;
unsigned N = 0;
- for (Module *M : Modules) {
+ for (const auto *M : Modules) {
ModuleList += "\n ";
if (++N == 5 && N != Modules.size()) {
ModuleList += "[...]";
break;
}
- ModuleList += M->getFullModuleName();
+ ModuleList += GetModuleNameForDiagnostic(M);
}
Diag(UseLoc, diag::err_module_unimported_use_multiple)
@@ -5433,7 +5800,7 @@ void Sema::diagnoseMissingImport(SourceLocation UseLoc, NamedDecl *Decl,
} else {
// FIXME: Add a FixItHint that imports the corresponding module.
Diag(UseLoc, diag::err_module_unimported_use)
- << (int)MIK << Decl << Modules[0]->getFullModuleName();
+ << (int)MIK << Decl << GetModuleNameForDiagnostic(Modules[0]);
}
NotePrevious();
@@ -5522,3 +5889,7 @@ void Sema::ActOnPragmaDump(Scope *S, SourceLocation IILoc, IdentifierInfo *II) {
LookupName(R, S);
R.dump();
}
+
+void Sema::ActOnPragmaDump(Expr *E) {
+ E->dump();
+}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaModule.cpp b/contrib/llvm-project/clang/lib/Sema/SemaModule.cpp
index af95b1a93cc4..ed7f626971f3 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaModule.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaModule.cpp
@@ -15,6 +15,8 @@
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/SemaInternal.h"
+#include "llvm/ADT/StringExtras.h"
+#include <optional>
using namespace clang;
using namespace sema;
@@ -26,11 +28,11 @@ static void checkModuleImportContext(Sema &S, Module *M,
if (auto *LSD = dyn_cast<LinkageSpecDecl>(DC)) {
switch (LSD->getLanguage()) {
- case LinkageSpecDecl::lang_c:
+ case LinkageSpecLanguageIDs::C:
if (ExternCLoc.isInvalid())
ExternCLoc = LSD->getBeginLoc();
break;
- case LinkageSpecDecl::lang_cxx:
+ case LinkageSpecLanguageIDs::CXX:
break;
}
DC = LSD->getParent();
@@ -54,48 +56,150 @@ static void checkModuleImportContext(Sema &S, Module *M,
}
}
-Sema::DeclGroupPtrTy
-Sema::ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc) {
- if (!ModuleScopes.empty() &&
- ModuleScopes.back().Module->Kind == Module::GlobalModuleFragment) {
- // Under -std=c++2a -fmodules-ts, we can find an explicit 'module;' after
- // already implicitly entering the global module fragment. That's OK.
- assert(getLangOpts().CPlusPlusModules && getLangOpts().ModulesTS &&
- "unexpectedly encountered multiple global module fragment decls");
- ModuleScopes.back().BeginLoc = ModuleLoc;
- return nullptr;
- }
+// We represent the primary and partition names as 'Paths' which are sections
+// of the hierarchical access path for a clang module. However for C++20
+// the periods in a name are just another character, and we will need to
+// flatten them into a string.
+static std::string stringFromPath(ModuleIdPath Path) {
+ std::string Name;
+ if (Path.empty())
+ return Name;
- // We start in the global module; all those declarations are implicitly
- // module-private (though they do not have module linkage).
- auto &Map = PP.getHeaderSearchInfo().getModuleMap();
- auto *GlobalModule = Map.createGlobalModuleFragmentForModuleUnit(ModuleLoc);
- assert(GlobalModule && "module creation should not fail");
+ for (auto &Piece : Path) {
+ if (!Name.empty())
+ Name += ".";
+ Name += Piece.first->getName();
+ }
+ return Name;
+}
- // Enter the scope of the global module.
- ModuleScopes.push_back({});
- ModuleScopes.back().BeginLoc = ModuleLoc;
- ModuleScopes.back().Module = GlobalModule;
- VisibleModules.setVisible(GlobalModule, ModuleLoc);
+Sema::DeclGroupPtrTy
+Sema::ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc) {
+ // We start in the global module;
+ Module *GlobalModule =
+ PushGlobalModuleFragment(ModuleLoc);
// All declarations created from now on are owned by the global module.
auto *TU = Context.getTranslationUnitDecl();
- TU->setModuleOwnershipKind(Decl::ModuleOwnershipKind::Visible);
+ // [module.global.frag]p2
+ // A global-module-fragment specifies the contents of the global module
+ // fragment for a module unit. The global module fragment can be used to
+ // provide declarations that are attached to the global module and usable
+ // within the module unit.
+ //
+ // So the declations in the global module shouldn't be visible by default.
+ TU->setModuleOwnershipKind(Decl::ModuleOwnershipKind::ReachableWhenImported);
TU->setLocalOwningModule(GlobalModule);
// FIXME: Consider creating an explicit representation of this declaration.
return nullptr;
}
+void Sema::HandleStartOfHeaderUnit() {
+ assert(getLangOpts().CPlusPlusModules &&
+ "Header units are only valid for C++20 modules");
+ SourceLocation StartOfTU =
+ SourceMgr.getLocForStartOfFile(SourceMgr.getMainFileID());
+
+ StringRef HUName = getLangOpts().CurrentModule;
+ if (HUName.empty()) {
+ HUName =
+ SourceMgr.getFileEntryRefForID(SourceMgr.getMainFileID())->getName();
+ const_cast<LangOptions &>(getLangOpts()).CurrentModule = HUName.str();
+ }
+
+ // TODO: Make the C++20 header lookup independent.
+ // When the input is pre-processed source, we need a file ref to the original
+ // file for the header map.
+ auto F = SourceMgr.getFileManager().getOptionalFileRef(HUName);
+ // For the sake of error recovery (if someone has moved the original header
+ // after creating the pre-processed output) fall back to obtaining the file
+ // ref for the input file, which must be present.
+ if (!F)
+ F = SourceMgr.getFileEntryRefForID(SourceMgr.getMainFileID());
+ assert(F && "failed to find the header unit source?");
+ Module::Header H{HUName.str(), HUName.str(), *F};
+ auto &Map = PP.getHeaderSearchInfo().getModuleMap();
+ Module *Mod = Map.createHeaderUnit(StartOfTU, HUName, H);
+ assert(Mod && "module creation should not fail");
+ ModuleScopes.push_back({}); // No GMF
+ ModuleScopes.back().BeginLoc = StartOfTU;
+ ModuleScopes.back().Module = Mod;
+ VisibleModules.setVisible(Mod, StartOfTU);
+
+ // From now on, we have an owning module for all declarations we see.
+ // All of these are implicitly exported.
+ auto *TU = Context.getTranslationUnitDecl();
+ TU->setModuleOwnershipKind(Decl::ModuleOwnershipKind::Visible);
+ TU->setLocalOwningModule(Mod);
+}
+
+/// Tests whether the given identifier is reserved as a module name and
+/// diagnoses if it is. Returns true if a diagnostic is emitted and false
+/// otherwise.
+static bool DiagReservedModuleName(Sema &S, const IdentifierInfo *II,
+ SourceLocation Loc) {
+ enum {
+ Valid = -1,
+ Invalid = 0,
+ Reserved = 1,
+ } Reason = Valid;
+
+ if (II->isStr("module") || II->isStr("import"))
+ Reason = Invalid;
+ else if (II->isReserved(S.getLangOpts()) !=
+ ReservedIdentifierStatus::NotReserved)
+ Reason = Reserved;
+
+ // If the identifier is reserved (not invalid) but is in a system header,
+ // we do not diagnose (because we expect system headers to use reserved
+ // identifiers).
+ if (Reason == Reserved && S.getSourceManager().isInSystemHeader(Loc))
+ Reason = Valid;
+
+ switch (Reason) {
+ case Valid:
+ return false;
+ case Invalid:
+ return S.Diag(Loc, diag::err_invalid_module_name) << II;
+ case Reserved:
+ S.Diag(Loc, diag::warn_reserved_module_name) << II;
+ return false;
+ }
+ llvm_unreachable("fell off a fully covered switch");
+}
+
Sema::DeclGroupPtrTy
Sema::ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc,
- ModuleDeclKind MDK, ModuleIdPath Path, bool IsFirstDecl) {
- assert((getLangOpts().ModulesTS || getLangOpts().CPlusPlusModules) &&
- "should only have module decl in Modules TS or C++20");
+ ModuleDeclKind MDK, ModuleIdPath Path,
+ ModuleIdPath Partition, ModuleImportState &ImportState) {
+ assert(getLangOpts().CPlusPlusModules &&
+ "should only have module decl in standard C++ modules");
+
+ bool IsFirstDecl = ImportState == ModuleImportState::FirstDecl;
+ bool SeenGMF = ImportState == ModuleImportState::GlobalFragment;
+ // If any of the steps here fail, we count that as invalidating C++20
+ // module state;
+ ImportState = ModuleImportState::NotACXX20Module;
+
+ bool IsPartition = !Partition.empty();
+ if (IsPartition)
+ switch (MDK) {
+ case ModuleDeclKind::Implementation:
+ MDK = ModuleDeclKind::PartitionImplementation;
+ break;
+ case ModuleDeclKind::Interface:
+ MDK = ModuleDeclKind::PartitionInterface;
+ break;
+ default:
+ llvm_unreachable("how did we get a partition type set?");
+ }
- // A module implementation unit requires that we are not compiling a module
- // of any kind. A module interface unit requires that we are not compiling a
- // module map.
+ // A (non-partition) module implementation unit requires that we are not
+ // compiling a module of any kind. A partition implementation emits an
+ // interface (and the AST for the implementation), which will subsequently
+ // be consumed to emit a binary.
+ // A module interface unit requires that we are not compiling a module map.
switch (getLangOpts().getCompilingModule()) {
case LangOptions::CMK_None:
// It's OK to compile a module interface as a normal translation unit.
@@ -106,7 +210,7 @@ Sema::ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc,
break;
// We were asked to compile a module interface unit but this is a module
- // implementation unit. That indicates the 'export' is missing.
+ // implementation unit.
Diag(ModuleLoc, diag::err_module_interface_implementation_mismatch)
<< FixItHint::CreateInsertion(ModuleLoc, "export ");
MDK = ModuleDeclKind::Interface;
@@ -116,8 +220,8 @@ Sema::ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc,
Diag(ModuleLoc, diag::err_module_decl_in_module_map_module);
return nullptr;
- case LangOptions::CMK_HeaderModule:
- Diag(ModuleLoc, diag::err_module_decl_in_header_module);
+ case LangOptions::CMK_HeaderUnit:
+ Diag(ModuleLoc, diag::err_module_decl_in_header_unit);
return nullptr;
}
@@ -127,23 +231,20 @@ Sema::ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc,
// here, in order to support macro import.
// Only one module-declaration is permitted per source file.
- if (!ModuleScopes.empty() &&
- ModuleScopes.back().Module->isModulePurview()) {
+ if (isCurrentModulePurview()) {
Diag(ModuleLoc, diag::err_module_redeclaration);
Diag(VisibleModules.getImportLoc(ModuleScopes.back().Module),
diag::note_prev_module_declaration);
return nullptr;
}
- // Find the global module fragment we're adopting into this module, if any.
- Module *GlobalModuleFragment = nullptr;
- if (!ModuleScopes.empty() &&
- ModuleScopes.back().Module->Kind == Module::GlobalModuleFragment)
- GlobalModuleFragment = ModuleScopes.back().Module;
+ assert((!getLangOpts().CPlusPlusModules ||
+ SeenGMF == (bool)this->TheGlobalModuleFragment) &&
+ "mismatched global module state");
// In C++20, the module-declaration must be the first declaration if there
// is no global module fragment.
- if (getLangOpts().CPlusPlusModules && !IsFirstDecl && !GlobalModuleFragment) {
+ if (getLangOpts().CPlusPlusModules && !IsFirstDecl && !SeenGMF) {
Diag(ModuleLoc, diag::err_module_decl_not_at_start);
SourceLocation BeginLoc =
ModuleScopes.empty()
@@ -155,39 +256,63 @@ Sema::ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc,
}
}
+ // C++23 [module.unit]p1: ... The identifiers module and import shall not
+ // appear as identifiers in a module-name or module-partition. All
+ // module-names either beginning with an identifier consisting of std
+ // followed by zero or more digits or containing a reserved identifier
+ // ([lex.name]) are reserved and shall not be specified in a
+ // module-declaration; no diagnostic is required.
+
+ // Test the first part of the path to see if it's std[0-9]+ but allow the
+ // name in a system header.
+ StringRef FirstComponentName = Path[0].first->getName();
+ if (!getSourceManager().isInSystemHeader(Path[0].second) &&
+ (FirstComponentName == "std" ||
+ (FirstComponentName.starts_with("std") &&
+ llvm::all_of(FirstComponentName.drop_front(3), &llvm::isDigit))))
+ Diag(Path[0].second, diag::warn_reserved_module_name) << Path[0].first;
+
+ // Then test all of the components in the path to see if any of them are
+ // using another kind of reserved or invalid identifier.
+ for (auto Part : Path) {
+ if (DiagReservedModuleName(*this, Part.first, Part.second))
+ return nullptr;
+ }
+
// Flatten the dots in a module name. Unlike Clang's hierarchical module map
// modules, the dots here are just another character that can appear in a
// module name.
- std::string ModuleName;
- for (auto &Piece : Path) {
- if (!ModuleName.empty())
- ModuleName += ".";
- ModuleName += Piece.first->getName();
+ std::string ModuleName = stringFromPath(Path);
+ if (IsPartition) {
+ ModuleName += ":";
+ ModuleName += stringFromPath(Partition);
}
-
// If a module name was explicitly specified on the command line, it must be
// correct.
if (!getLangOpts().CurrentModule.empty() &&
getLangOpts().CurrentModule != ModuleName) {
Diag(Path.front().second, diag::err_current_module_name_mismatch)
- << SourceRange(Path.front().second, Path.back().second)
+ << SourceRange(Path.front().second, IsPartition
+ ? Partition.back().second
+ : Path.back().second)
<< getLangOpts().CurrentModule;
return nullptr;
}
const_cast<LangOptions&>(getLangOpts()).CurrentModule = ModuleName;
auto &Map = PP.getHeaderSearchInfo().getModuleMap();
- Module *Mod;
-
+ Module *Mod; // The module we are creating.
+ Module *Interface = nullptr; // The interface for an implementation.
switch (MDK) {
- case ModuleDeclKind::Interface: {
+ case ModuleDeclKind::Interface:
+ case ModuleDeclKind::PartitionInterface: {
// We can't have parsed or imported a definition of this module or parsed a
// module map defining it already.
if (auto *M = Map.findModule(ModuleName)) {
Diag(Path[0].second, diag::err_module_redefinition) << ModuleName;
if (M->DefinitionLoc.isValid())
Diag(M->DefinitionLoc, diag::note_prev_module_definition);
- else if (Optional<FileEntryRef> FE = M->getASTFile())
+ else if (OptionalFileEntryRef FE = M->getASTFile())
Diag(M->DefinitionLoc, diag::note_prev_module_definition_from_ast_file)
<< FE->getName();
Mod = M;
@@ -195,28 +320,49 @@ Sema::ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc,
}
// Create a Module for the module that we're defining.
- Mod = Map.createModuleForInterfaceUnit(ModuleLoc, ModuleName,
- GlobalModuleFragment);
+ Mod = Map.createModuleForInterfaceUnit(ModuleLoc, ModuleName);
+ if (MDK == ModuleDeclKind::PartitionInterface)
+ Mod->Kind = Module::ModulePartitionInterface;
assert(Mod && "module creation should not fail");
break;
}
- case ModuleDeclKind::Implementation:
+ case ModuleDeclKind::Implementation: {
+ // C++20 A module-declaration that contains neither an export-
+ // keyword nor a module-partition implicitly imports the primary
+ // module interface unit of the module as if by a module-import-
+ // declaration.
std::pair<IdentifierInfo *, SourceLocation> ModuleNameLoc(
PP.getIdentifierInfo(ModuleName), Path[0].second);
- Mod = getModuleLoader().loadModule(ModuleLoc, {ModuleNameLoc},
- Module::AllVisible,
- /*IsInclusionDirective=*/false);
- if (!Mod) {
+
+ // The module loader will assume we're trying to import the module that
+ // we're building if `LangOpts.CurrentModule` equals to 'ModuleName'.
+ // Change the value for `LangOpts.CurrentModule` temporarily to make the
+ // module loader work properly.
+ const_cast<LangOptions &>(getLangOpts()).CurrentModule = "";
+ Interface = getModuleLoader().loadModule(ModuleLoc, {ModuleNameLoc},
+ Module::AllVisible,
+ /*IsInclusionDirective=*/false);
+ const_cast<LangOptions&>(getLangOpts()).CurrentModule = ModuleName;
+
+ if (!Interface) {
Diag(ModuleLoc, diag::err_module_not_defined) << ModuleName;
// Create an empty module interface unit for error recovery.
- Mod = Map.createModuleForInterfaceUnit(ModuleLoc, ModuleName,
- GlobalModuleFragment);
+ Mod = Map.createModuleForInterfaceUnit(ModuleLoc, ModuleName);
+ } else {
+ Mod = Map.createModuleForImplementationUnit(ModuleLoc, ModuleName);
}
+ } break;
+
+ case ModuleDeclKind::PartitionImplementation:
+ // Create an interface, but note that it is an implementation
+ // unit.
+ Mod = Map.createModuleForInterfaceUnit(ModuleLoc, ModuleName);
+ Mod->Kind = Module::ModulePartitionImplementation;
break;
}
- if (!GlobalModuleFragment) {
+ if (!this->TheGlobalModuleFragment) {
ModuleScopes.push_back({});
if (getLangOpts().ModulesLocalVisibility)
ModuleScopes.back().OuterVisibleModules = std::move(VisibleModules);
@@ -228,17 +374,47 @@ Sema::ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc,
// Switch from the global module fragment (if any) to the named module.
ModuleScopes.back().BeginLoc = StartLoc;
ModuleScopes.back().Module = Mod;
- ModuleScopes.back().ModuleInterface = MDK != ModuleDeclKind::Implementation;
VisibleModules.setVisible(Mod, ModuleLoc);
// From now on, we have an owning module for all declarations we see.
- // However, those declarations are module-private unless explicitly
+ // In C++20 modules, those declaration would be reachable when imported
+ // unless explicitily exported.
+ // Otherwise, those declarations are module-private unless explicitly
// exported.
auto *TU = Context.getTranslationUnitDecl();
- TU->setModuleOwnershipKind(Decl::ModuleOwnershipKind::ModulePrivate);
+ TU->setModuleOwnershipKind(Decl::ModuleOwnershipKind::ReachableWhenImported);
TU->setLocalOwningModule(Mod);
- // FIXME: Create a ModuleDecl.
+ // We are in the module purview, but before any other (non import)
+ // statements, so imports are allowed.
+ ImportState = ModuleImportState::ImportAllowed;
+
+ getASTContext().setCurrentNamedModule(Mod);
+
+ // We already potentially made an implicit import (in the case of a module
+ // implementation unit importing its interface). Make this module visible
+ // and return the import decl to be added to the current TU.
+ if (Interface) {
+
+ VisibleModules.setVisible(Interface, ModuleLoc);
+ VisibleModules.makeTransitiveImportsVisible(Interface, ModuleLoc);
+
+ // Make the import decl for the interface in the impl module.
+ ImportDecl *Import = ImportDecl::Create(Context, CurContext, ModuleLoc,
+ Interface, Path[0].second);
+ CurContext->addDecl(Import);
+
+ // Sequence initialization of the imported module before that of the current
+ // module, if any.
+ Context.addModuleInitializer(ModuleScopes.back().Module, Import);
+ Mod->Imports.insert(Interface); // As if we imported it.
+ // Also save this as a shortcut to checking for decls in the interface
+ ThePrimaryInterface = Interface;
+ // If we made an implicit import of the module interface, then return the
+ // imported module decl.
+ return ConvertDeclToDeclGroup(Import);
+ }
+
return nullptr;
}
@@ -248,10 +424,14 @@ Sema::ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc,
// C++20 [basic.link]/2:
// A private-module-fragment shall appear only in a primary module
// interface unit.
- switch (ModuleScopes.empty() ? Module::GlobalModuleFragment
+ switch (ModuleScopes.empty() ? Module::ExplicitGlobalModuleFragment
: ModuleScopes.back().Module->Kind) {
case Module::ModuleMapModule:
- case Module::GlobalModuleFragment:
+ case Module::ExplicitGlobalModuleFragment:
+ case Module::ImplicitGlobalModuleFragment:
+ case Module::ModulePartitionImplementation:
+ case Module::ModulePartitionInterface:
+ case Module::ModuleHeaderUnit:
Diag(PrivateLoc, diag::err_private_module_fragment_not_module);
return nullptr;
@@ -260,19 +440,17 @@ Sema::ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc,
Diag(ModuleScopes.back().BeginLoc, diag::note_previous_definition);
return nullptr;
- case Module::ModuleInterfaceUnit:
- break;
- }
-
- if (!ModuleScopes.back().ModuleInterface) {
+ case Module::ModuleImplementationUnit:
Diag(PrivateLoc, diag::err_private_module_fragment_not_module_interface);
Diag(ModuleScopes.back().BeginLoc,
diag::note_not_module_interface_add_export)
<< FixItHint::CreateInsertion(ModuleScopes.back().BeginLoc, "export ");
return nullptr;
+
+ case Module::ModuleInterfaceUnit:
+ break;
}
- // FIXME: Check this isn't a module interface partition.
// FIXME: Check that this translation unit does not import any partitions;
// such imports would violate [basic.link]/2's "shall be the only module unit"
// restriction.
@@ -290,7 +468,6 @@ Sema::ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc,
ModuleScopes.push_back({});
ModuleScopes.back().BeginLoc = ModuleLoc;
ModuleScopes.back().Module = PrivateModuleFragment;
- ModuleScopes.back().ModuleInterface = true;
VisibleModules.setVisible(PrivateModuleFragment, ModuleLoc);
// All declarations created from now on are scoped to the private module
@@ -306,27 +483,59 @@ Sema::ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc,
DeclResult Sema::ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
- SourceLocation ImportLoc,
- ModuleIdPath Path) {
- // Flatten the module path for a Modules TS module name.
+ SourceLocation ImportLoc, ModuleIdPath Path,
+ bool IsPartition) {
+ assert((!IsPartition || getLangOpts().CPlusPlusModules) &&
+ "partition seen in non-C++20 code?");
+
+ // For a C++20 module name, flatten into a single identifier with the source
+ // location of the first component.
std::pair<IdentifierInfo *, SourceLocation> ModuleNameLoc;
- if (getLangOpts().ModulesTS) {
- std::string ModuleName;
- for (auto &Piece : Path) {
- if (!ModuleName.empty())
- ModuleName += ".";
- ModuleName += Piece.first->getName();
- }
+
+ std::string ModuleName;
+ if (IsPartition) {
+ // We already checked that we are in a module purview in the parser.
+ assert(!ModuleScopes.empty() && "in a module purview, but no module?");
+ Module *NamedMod = ModuleScopes.back().Module;
+ // If we are importing into a partition, find the owning named module,
+ // otherwise, the name of the importing named module.
+ ModuleName = NamedMod->getPrimaryModuleInterfaceName().str();
+ ModuleName += ":";
+ ModuleName += stringFromPath(Path);
ModuleNameLoc = {PP.getIdentifierInfo(ModuleName), Path[0].second};
Path = ModuleIdPath(ModuleNameLoc);
+ } else if (getLangOpts().CPlusPlusModules) {
+ ModuleName = stringFromPath(Path);
+ ModuleNameLoc = {PP.getIdentifierInfo(ModuleName), Path[0].second};
+ Path = ModuleIdPath(ModuleNameLoc);
+ }
+
+ // Diagnose self-import before attempting a load.
+ // [module.import]/9
+ // A module implementation unit of a module M that is not a module partition
+ // shall not contain a module-import-declaration nominating M.
+ // (for an implementation, the module interface is imported implicitly,
+ // but that's handled in the module decl code).
+
+ if (getLangOpts().CPlusPlusModules && isCurrentModulePurview() &&
+ getCurrentModule()->Name == ModuleName) {
+ Diag(ImportLoc, diag::err_module_self_import_cxx20)
+ << ModuleName << currentModuleIsImplementation();
+ return true;
}
- Module *Mod =
- getModuleLoader().loadModule(ImportLoc, Path, Module::AllVisible,
- /*IsInclusionDirective=*/false);
+ Module *Mod = getModuleLoader().loadModule(
+ ImportLoc, Path, Module::AllVisible, /*IsInclusionDirective=*/false);
if (!Mod)
return true;
+ if (!Mod->isInterfaceOrPartition() && !ModuleName.empty() &&
+ !getLangOpts().ObjC) {
+ Diag(ImportLoc, diag::err_module_import_non_interface_nor_parition)
+ << ModuleName;
+ return true;
+ }
+
return ActOnModuleImport(StartLoc, ExportLoc, ImportLoc, Mod, Path);
}
@@ -340,8 +549,11 @@ static const ExportDecl *getEnclosingExportDecl(const Decl *D) {
DeclResult Sema::ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
- SourceLocation ImportLoc,
- Module *Mod, ModuleIdPath Path) {
+ SourceLocation ImportLoc, Module *Mod,
+ ModuleIdPath Path) {
+ if (Mod->isHeaderUnit())
+ Diag(ImportLoc, diag::warn_experimental_header_unit);
+
VisibleModules.setVisible(Mod, ImportLoc);
checkModuleImportContext(*this, Mod, ImportLoc, CurContext);
@@ -349,12 +561,8 @@ DeclResult Sema::ActOnModuleImport(SourceLocation StartLoc,
// FIXME: we should support importing a submodule within a different submodule
// of the same top-level module. Until we do, make it an error rather than
// silently ignoring the import.
- // Import-from-implementation is valid in the Modules TS. FIXME: Should we
- // warn on a redundant import of the current module?
- // FIXME: Import of a module from an implementation partition of the same
- // module is permitted.
- if (Mod->getTopLevelModuleName() == getLangOpts().CurrentModule &&
- (getLangOpts().isCompilingModule() || !getLangOpts().ModulesTS)) {
+ // FIXME: Should we warn on a redundant import of the current module?
+ if (Mod->isForBuilding(getLangOpts())) {
Diag(ImportLoc, getLangOpts().isCompilingModule()
? diag::err_module_self_import
: diag::err_module_import_in_implementation)
@@ -362,22 +570,26 @@ DeclResult Sema::ActOnModuleImport(SourceLocation StartLoc,
}
SmallVector<SourceLocation, 2> IdentifierLocs;
- Module *ModCheck = Mod;
- for (unsigned I = 0, N = Path.size(); I != N; ++I) {
- // If we've run out of module parents, just drop the remaining identifiers.
- // We need the length to be consistent.
- if (!ModCheck)
- break;
- ModCheck = ModCheck->Parent;
- IdentifierLocs.push_back(Path[I].second);
- }
-
- // If this was a header import, pad out with dummy locations.
- // FIXME: Pass in and use the location of the header-name token in this case.
if (Path.empty()) {
- for (; ModCheck; ModCheck = ModCheck->Parent) {
+ // If this was a header import, pad out with dummy locations.
+ // FIXME: Pass in and use the location of the header-name token in this
+ // case.
+ for (Module *ModCheck = Mod; ModCheck; ModCheck = ModCheck->Parent)
IdentifierLocs.push_back(SourceLocation());
+ } else if (getLangOpts().CPlusPlusModules && !Mod->Parent) {
+ // A single identifier for the whole name.
+ IdentifierLocs.push_back(Path[0].second);
+ } else {
+ Module *ModCheck = Mod;
+ for (unsigned I = 0, N = Path.size(); I != N; ++I) {
+ // If we've run out of module parents, just drop the remaining
+ // identifiers. We need the length to be consistent.
+ if (!ModCheck)
+ break;
+ ModCheck = ModCheck->Parent;
+
+ IdentifierLocs.push_back(Path[I].second);
}
}
@@ -390,11 +602,23 @@ DeclResult Sema::ActOnModuleImport(SourceLocation StartLoc,
if (!ModuleScopes.empty())
Context.addModuleInitializer(ModuleScopes.back().Module, Import);
- // Re-export the module if needed.
- if (!ModuleScopes.empty() && ModuleScopes.back().ModuleInterface) {
+ // A module (partition) implementation unit shall not be exported.
+ if (getLangOpts().CPlusPlusModules && ExportLoc.isValid() &&
+ Mod->Kind == Module::ModuleKind::ModulePartitionImplementation) {
+ Diag(ExportLoc, diag::err_export_partition_impl)
+ << SourceRange(ExportLoc, Path.back().second);
+ } else if (!ModuleScopes.empty() && !currentModuleIsImplementation()) {
+ // Re-export the module if the imported module is exported.
+ // Note that we don't need to add re-exported module to Imports field
+ // since `Exports` implies the module is imported already.
if (ExportLoc.isValid() || getEnclosingExportDecl(Import))
getCurrentModule()->Exports.emplace_back(Mod, false);
+ else
+ getCurrentModule()->Imports.insert(Mod);
} else if (ExportLoc.isValid()) {
+ // [module.interface]p1:
+ // An export-declaration shall inhabit a namespace scope and appear in the
+ // purview of a module interface unit.
Diag(ExportLoc, diag::err_export_not_in_module_interface);
}
@@ -416,11 +640,9 @@ void Sema::BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod) {
TUKind == TU_Module &&
getSourceManager().isWrittenInMainFile(DirectiveLoc);
- bool ShouldAddImport = !IsInModuleIncludes;
-
- // If this module import was due to an inclusion directive, create an
- // implicit import declaration to capture it in the AST.
- if (ShouldAddImport) {
+ // If we are really importing a module (not just checking layering) due to an
+ // #include in the main file, synthesize an ImportDecl.
+ if (getLangOpts().Modules && !IsInModuleIncludes) {
TranslationUnitDecl *TU = getASTContext().getTranslationUnitDecl();
ImportDecl *ImportD = ImportDecl::CreateImplicit(getASTContext(), TU,
DirectiveLoc, Mod,
@@ -433,6 +655,13 @@ void Sema::BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod) {
getModuleLoader().makeModuleVisible(Mod, Module::AllVisible, DirectiveLoc);
VisibleModules.setVisible(Mod, DirectiveLoc);
+
+ if (getLangOpts().isCompilingModule()) {
+ Module *ThisModule = PP.getHeaderSearchInfo().lookupModule(
+ getLangOpts().CurrentModule, DirectiveLoc, false, false);
+ (void)ThisModule;
+ assert(ThisModule && "was expecting a module if building one");
+ }
}
void Sema::ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod) {
@@ -527,21 +756,30 @@ Decl *Sema::ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
// Set this temporarily so we know the export-declaration was braced.
D->setRBraceLoc(LBraceLoc);
+ CurContext->addDecl(D);
+ PushDeclContext(S, D);
+
// C++2a [module.interface]p1:
// An export-declaration shall appear only [...] in the purview of a module
// interface unit. An export-declaration shall not appear directly or
// indirectly within [...] a private-module-fragment.
- if (ModuleScopes.empty() || !ModuleScopes.back().Module->isModulePurview()) {
+ if (!isCurrentModulePurview()) {
Diag(ExportLoc, diag::err_export_not_in_module_interface) << 0;
- } else if (!ModuleScopes.back().ModuleInterface) {
+ D->setInvalidDecl();
+ return D;
+ } else if (currentModuleIsImplementation()) {
Diag(ExportLoc, diag::err_export_not_in_module_interface) << 1;
Diag(ModuleScopes.back().BeginLoc,
diag::note_not_module_interface_add_export)
<< FixItHint::CreateInsertion(ModuleScopes.back().BeginLoc, "export ");
+ D->setInvalidDecl();
+ return D;
} else if (ModuleScopes.back().Module->Kind ==
Module::PrivateModuleFragment) {
Diag(ExportLoc, diag::err_export_in_private_module_fragment);
Diag(ModuleScopes.back().BeginLoc, diag::note_private_module_fragment);
+ D->setInvalidDecl();
+ return D;
}
for (const DeclContext *DC = CurContext; DC; DC = DC->getLexicalParent()) {
@@ -553,7 +791,7 @@ Decl *Sema::ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
Diag(ND->getLocation(), diag::note_anonymous_namespace);
// Don't diagnose internal-linkage declarations in this region.
D->setInvalidDecl();
- break;
+ return D;
}
// A declaration is exported if it is [...] a namespace-definition
@@ -572,86 +810,40 @@ Decl *Sema::ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
Diag(ExportLoc, diag::err_export_within_export);
if (ED->hasBraces())
Diag(ED->getLocation(), diag::note_export);
+ D->setInvalidDecl();
+ return D;
}
- CurContext->addDecl(D);
- PushDeclContext(S, D);
D->setModuleOwnershipKind(Decl::ModuleOwnershipKind::VisibleWhenImported);
return D;
}
-static bool checkExportedDeclContext(Sema &S, DeclContext *DC,
- SourceLocation BlockStart);
-
-namespace {
-enum class UnnamedDeclKind {
- Empty,
- StaticAssert,
- Asm,
- UsingDirective,
- Context
-};
-}
+static bool checkExportedDecl(Sema &, Decl *, SourceLocation);
-static llvm::Optional<UnnamedDeclKind> getUnnamedDeclKind(Decl *D) {
- if (isa<EmptyDecl>(D))
- return UnnamedDeclKind::Empty;
- if (isa<StaticAssertDecl>(D))
- return UnnamedDeclKind::StaticAssert;
- if (isa<FileScopeAsmDecl>(D))
- return UnnamedDeclKind::Asm;
- if (isa<UsingDirectiveDecl>(D))
- return UnnamedDeclKind::UsingDirective;
- // Everything else either introduces one or more names or is ill-formed.
- return llvm::None;
-}
-
-unsigned getUnnamedDeclDiag(UnnamedDeclKind UDK, bool InBlock) {
- switch (UDK) {
- case UnnamedDeclKind::Empty:
- case UnnamedDeclKind::StaticAssert:
- // Allow empty-declarations and static_asserts in an export block as an
- // extension.
- return InBlock ? diag::ext_export_no_name_block : diag::err_export_no_name;
-
- case UnnamedDeclKind::UsingDirective:
- // Allow exporting using-directives as an extension.
- return diag::ext_export_using_directive;
-
- case UnnamedDeclKind::Context:
- // Allow exporting DeclContexts that transitively contain no declarations
- // as an extension.
- return diag::ext_export_no_names;
-
- case UnnamedDeclKind::Asm:
- return diag::err_export_no_name;
- }
- llvm_unreachable("unknown kind");
-}
-
-static void diagExportedUnnamedDecl(Sema &S, UnnamedDeclKind UDK, Decl *D,
- SourceLocation BlockStart) {
- S.Diag(D->getLocation(), getUnnamedDeclDiag(UDK, BlockStart.isValid()))
- << (unsigned)UDK;
- if (BlockStart.isValid())
- S.Diag(BlockStart, diag::note_export);
+/// Check that it's valid to export all the declarations in \p DC.
+static bool checkExportedDeclContext(Sema &S, DeclContext *DC,
+ SourceLocation BlockStart) {
+ bool AllUnnamed = true;
+ for (auto *D : DC->decls())
+ AllUnnamed &= checkExportedDecl(S, D, BlockStart);
+ return AllUnnamed;
}
/// Check that it's valid to export \p D.
static bool checkExportedDecl(Sema &S, Decl *D, SourceLocation BlockStart) {
- // C++2a [module.interface]p3:
- // An exported declaration shall declare at least one name
- if (auto UDK = getUnnamedDeclKind(D))
- diagExportedUnnamedDecl(S, *UDK, D, BlockStart);
- // [...] shall not declare a name with internal linkage.
+ // C++20 [module.interface]p3:
+ // [...] it shall not declare a name with internal linkage.
+ bool HasName = false;
if (auto *ND = dyn_cast<NamedDecl>(D)) {
// Don't diagnose anonymous union objects; we'll diagnose their members
// instead.
- if (ND->getDeclName() && ND->getFormalLinkage() == InternalLinkage) {
+ HasName = (bool)ND->getDeclName();
+ if (HasName && ND->getFormalLinkage() == Linkage::Internal) {
S.Diag(ND->getLocation(), diag::err_export_internal) << ND;
if (BlockStart.isValid())
S.Diag(BlockStart, diag::note_export);
+ return false;
}
}
@@ -660,29 +852,36 @@ static bool checkExportedDecl(Sema &S, Decl *D, SourceLocation BlockStart) {
// shall have been introduced with a name having external linkage
if (auto *USD = dyn_cast<UsingShadowDecl>(D)) {
NamedDecl *Target = USD->getUnderlyingDecl();
- if (Target->getFormalLinkage() == InternalLinkage) {
- S.Diag(USD->getLocation(), diag::err_export_using_internal) << Target;
+ Linkage Lk = Target->getFormalLinkage();
+ if (Lk == Linkage::Internal || Lk == Linkage::Module) {
+ S.Diag(USD->getLocation(), diag::err_export_using_internal)
+ << (Lk == Linkage::Internal ? 0 : 1) << Target;
S.Diag(Target->getLocation(), diag::note_using_decl_target);
if (BlockStart.isValid())
S.Diag(BlockStart, diag::note_export);
+ return false;
}
}
// Recurse into namespace-scope DeclContexts. (Only namespace-scope
- // declarations are exported.)
- if (auto *DC = dyn_cast<DeclContext>(D))
- if (DC->getRedeclContext()->isFileContext() && !isa<EnumDecl>(D))
- return checkExportedDeclContext(S, DC, BlockStart);
- return false;
-}
-
-/// Check that it's valid to export all the declarations in \p DC.
-static bool checkExportedDeclContext(Sema &S, DeclContext *DC,
- SourceLocation BlockStart) {
- bool AllUnnamed = true;
- for (auto *D : DC->decls())
- AllUnnamed &= checkExportedDecl(S, D, BlockStart);
- return AllUnnamed;
+ // declarations are exported).
+ if (auto *DC = dyn_cast<DeclContext>(D)) {
+ if (!isa<NamespaceDecl>(D))
+ return true;
+
+ if (auto *ND = dyn_cast<NamedDecl>(D)) {
+ if (!ND->getDeclName()) {
+ S.Diag(ND->getLocation(), diag::err_export_anon_ns_internal);
+ if (BlockStart.isValid())
+ S.Diag(BlockStart, diag::note_export);
+ return false;
+ } else if (!DC->decls().empty() &&
+ DC->getRedeclContext()->isFileContext()) {
+ return checkExportedDeclContext(S, DC, BlockStart);
+ }
+ }
+ }
+ return true;
}
/// Complete the definition of an export declaration.
@@ -697,14 +896,88 @@ Decl *Sema::ActOnFinishExportDecl(Scope *S, Decl *D, SourceLocation RBraceLoc) {
SourceLocation BlockStart =
ED->hasBraces() ? ED->getBeginLoc() : SourceLocation();
for (auto *Child : ED->decls()) {
- if (checkExportedDecl(*this, Child, BlockStart)) {
- // If a top-level child is a linkage-spec declaration, it might contain
- // no declarations (transitively), in which case it's ill-formed.
- diagExportedUnnamedDecl(*this, UnnamedDeclKind::Context, Child,
- BlockStart);
+ checkExportedDecl(*this, Child, BlockStart);
+ if (auto *FD = dyn_cast<FunctionDecl>(Child)) {
+ // [dcl.inline]/7
+ // If an inline function or variable that is attached to a named module
+ // is declared in a definition domain, it shall be defined in that
+ // domain.
+ // So, if the current declaration does not have a definition, we must
+ // check at the end of the TU (or when the PMF starts) to see that we
+ // have a definition at that point.
+ if (FD->isInlineSpecified() && !FD->isDefined())
+ PendingInlineFuncDecls.insert(FD);
}
}
}
return D;
}
+
+Module *Sema::PushGlobalModuleFragment(SourceLocation BeginLoc) {
+ // We shouldn't create new global module fragment if there is already
+ // one.
+ if (!TheGlobalModuleFragment) {
+ ModuleMap &Map = PP.getHeaderSearchInfo().getModuleMap();
+ TheGlobalModuleFragment = Map.createGlobalModuleFragmentForModuleUnit(
+ BeginLoc, getCurrentModule());
+ }
+
+ assert(TheGlobalModuleFragment && "module creation should not fail");
+
+ // Enter the scope of the global module.
+ ModuleScopes.push_back({BeginLoc, TheGlobalModuleFragment,
+ /*OuterVisibleModules=*/{}});
+ VisibleModules.setVisible(TheGlobalModuleFragment, BeginLoc);
+
+ return TheGlobalModuleFragment;
+}
+
+void Sema::PopGlobalModuleFragment() {
+ assert(!ModuleScopes.empty() &&
+ getCurrentModule()->isExplicitGlobalModule() &&
+ "left the wrong module scope, which is not global module fragment");
+ ModuleScopes.pop_back();
+}
+
+Module *Sema::PushImplicitGlobalModuleFragment(SourceLocation BeginLoc) {
+ if (!TheImplicitGlobalModuleFragment) {
+ ModuleMap &Map = PP.getHeaderSearchInfo().getModuleMap();
+ TheImplicitGlobalModuleFragment =
+ Map.createImplicitGlobalModuleFragmentForModuleUnit(BeginLoc,
+ getCurrentModule());
+ }
+ assert(TheImplicitGlobalModuleFragment && "module creation should not fail");
+
+ // Enter the scope of the global module.
+ ModuleScopes.push_back({BeginLoc, TheImplicitGlobalModuleFragment,
+ /*OuterVisibleModules=*/{}});
+ VisibleModules.setVisible(TheImplicitGlobalModuleFragment, BeginLoc);
+ return TheImplicitGlobalModuleFragment;
+}
+
+void Sema::PopImplicitGlobalModuleFragment() {
+ assert(!ModuleScopes.empty() &&
+ getCurrentModule()->isImplicitGlobalModule() &&
+ "left the wrong module scope, which is not global module fragment");
+ ModuleScopes.pop_back();
+}
+
+bool Sema::isCurrentModulePurview() const {
+ if (!getCurrentModule())
+ return false;
+
+ /// Does this Module scope describe part of the purview of a standard named
+ /// C++ module?
+ switch (getCurrentModule()->Kind) {
+ case Module::ModuleInterfaceUnit:
+ case Module::ModuleImplementationUnit:
+ case Module::ModulePartitionInterface:
+ case Module::ModulePartitionImplementation:
+ case Module::PrivateModuleFragment:
+ case Module::ImplicitGlobalModuleFragment:
+ return true;
+ default:
+ return false;
+ }
+}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaObjCProperty.cpp b/contrib/llvm-project/clang/lib/Sema/SemaObjCProperty.cpp
index 74c73ace3c5f..349c7fc9c91b 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaObjCProperty.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaObjCProperty.cpp
@@ -112,8 +112,8 @@ CheckPropertyAgainstProtocol(Sema &S, ObjCPropertyDecl *Prop,
return;
// Look for a property with the same name.
- if (ObjCPropertyDecl *ProtoProp =
- Proto->lookup(Prop->getDeclName()).find_first<ObjCPropertyDecl>()) {
+ if (ObjCPropertyDecl *ProtoProp = Proto->getProperty(
+ Prop->getIdentifier(), Prop->isInstanceProperty())) {
S.DiagnosePropertyMismatch(Prop, ProtoProp, Proto->getIdentifier(), true);
return;
}
@@ -180,7 +180,7 @@ Decl *Sema::ActOnProperty(Scope *S, SourceLocation AtLoc,
unsigned Attributes = ODS.getPropertyAttributes();
FD.D.setObjCWeakProperty((Attributes & ObjCPropertyAttribute::kind_weak) !=
0);
- TypeSourceInfo *TSI = GetTypeForDeclarator(FD.D, S);
+ TypeSourceInfo *TSI = GetTypeForDeclarator(FD.D);
QualType T = TSI->getType();
if (!getOwnershipRule(Attributes)) {
Attributes |= deducePropertyOwnershipFromType(*this, T);
@@ -231,8 +231,8 @@ Decl *Sema::ActOnProperty(Scope *S, SourceLocation AtLoc,
bool FoundInSuper = false;
ObjCInterfaceDecl *CurrentInterfaceDecl = IFace;
while (ObjCInterfaceDecl *Super = CurrentInterfaceDecl->getSuperClass()) {
- if (ObjCPropertyDecl *SuperProp =
- Super->lookup(Res->getDeclName()).find_first<ObjCPropertyDecl>()) {
+ if (ObjCPropertyDecl *SuperProp = Super->getProperty(
+ Res->getIdentifier(), Res->isInstanceProperty())) {
DiagnosePropertyMismatch(Res, SuperProp, Super->getIdentifier(), false);
FoundInSuper = true;
break;
@@ -1028,7 +1028,7 @@ static bool hasWrittenStorageAttribute(ObjCPropertyDecl *Prop,
// Find the corresponding property in the primary class definition.
auto OrigClass = Category->getClassInterface();
- for (auto Found : OrigClass->lookup(Prop->getDeclName())) {
+ for (auto *Found : OrigClass->lookup(Prop->getDeclName())) {
if (ObjCPropertyDecl *OrigProp = dyn_cast<ObjCPropertyDecl>(Found))
return OrigProp->getPropertyAttributesAsWritten() & OwnershipMask;
}
@@ -1363,10 +1363,9 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
if (!Context.hasSameType(PropertyIvarType, IvarType)) {
if (isa<ObjCObjectPointerType>(PropertyIvarType)
&& isa<ObjCObjectPointerType>(IvarType))
- compat =
- Context.canAssignObjCInterfaces(
- PropertyIvarType->getAs<ObjCObjectPointerType>(),
- IvarType->getAs<ObjCObjectPointerType>());
+ compat = Context.canAssignObjCInterfaces(
+ PropertyIvarType->castAs<ObjCObjectPointerType>(),
+ IvarType->castAs<ObjCObjectPointerType>());
else {
compat = (CheckAssignmentConstraints(PropertyIvarLoc, PropertyIvarType,
IvarType)
@@ -1822,9 +1821,8 @@ CollectImmediateProperties(ObjCContainerDecl *CDecl,
static void CollectSuperClassPropertyImplementations(ObjCInterfaceDecl *CDecl,
ObjCInterfaceDecl::PropertyMap &PropMap) {
if (ObjCInterfaceDecl *SDecl = CDecl->getSuperClass()) {
- ObjCInterfaceDecl::PropertyDeclOrder PO;
while (SDecl) {
- SDecl->collectPropertiesToImplement(PropMap, PO);
+ SDecl->collectPropertiesToImplement(PropMap);
SDecl = SDecl->getSuperClass();
}
}
@@ -1889,15 +1887,14 @@ void Sema::DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
ObjCInterfaceDecl *IDecl,
SourceLocation AtEnd) {
ObjCInterfaceDecl::PropertyMap PropMap;
- ObjCInterfaceDecl::PropertyDeclOrder PropertyOrder;
- IDecl->collectPropertiesToImplement(PropMap, PropertyOrder);
+ IDecl->collectPropertiesToImplement(PropMap);
if (PropMap.empty())
return;
ObjCInterfaceDecl::PropertyMap SuperPropMap;
CollectSuperClassPropertyImplementations(IDecl, SuperPropMap);
- for (unsigned i = 0, e = PropertyOrder.size(); i != e; i++) {
- ObjCPropertyDecl *Prop = PropertyOrder[i];
+ for (const auto &PropEntry : PropMap) {
+ ObjCPropertyDecl *Prop = PropEntry.second;
// Is there a matching property synthesize/dynamic?
if (Prop->isInvalidDecl() ||
Prop->isClassProperty() ||
@@ -2046,8 +2043,7 @@ void Sema::DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
// its primary class (and its super classes) if property is
// declared in one of those containers.
if ((IDecl = C->getClassInterface())) {
- ObjCInterfaceDecl::PropertyDeclOrder PO;
- IDecl->collectPropertiesToImplement(NoNeedToImplPropMap, PO);
+ IDecl->collectPropertiesToImplement(NoNeedToImplPropMap);
}
}
if (IDecl)
@@ -2492,8 +2488,8 @@ void Sema::ProcessPropertyDecl(ObjCPropertyDecl *property) {
/*isPropertyAccessor=*/true, /*isSynthesizedAccessorStub=*/false,
/*isImplicitlyDeclared=*/true, /*isDefined=*/false,
(property->getPropertyImplementation() == ObjCPropertyDecl::Optional)
- ? ObjCMethodDecl::Optional
- : ObjCMethodDecl::Required);
+ ? ObjCImplementationControl::Optional
+ : ObjCImplementationControl::Required);
CD->addDecl(GetterMethod);
AddPropertyAttrs(*this, GetterMethod, property);
@@ -2511,8 +2507,7 @@ void Sema::ProcessPropertyDecl(ObjCPropertyDecl *property) {
if (const SectionAttr *SA = property->getAttr<SectionAttr>())
GetterMethod->addAttr(SectionAttr::CreateImplicit(
- Context, SA->getName(), Loc, AttributeCommonInfo::AS_GNU,
- SectionAttr::GNU_section));
+ Context, SA->getName(), Loc, SectionAttr::GNU_section));
if (getLangOpts().ObjCAutoRefCount)
CheckARCMethodDecl(GetterMethod);
@@ -2535,19 +2530,17 @@ void Sema::ProcessPropertyDecl(ObjCPropertyDecl *property) {
// for this class.
SourceLocation Loc = property->getLocation();
- SetterMethod =
- ObjCMethodDecl::Create(Context, Loc, Loc,
- property->getSetterName(), Context.VoidTy,
- nullptr, CD, !IsClassProperty,
- /*isVariadic=*/false,
- /*isPropertyAccessor=*/true,
- /*isSynthesizedAccessorStub=*/false,
- /*isImplicitlyDeclared=*/true,
- /*isDefined=*/false,
- (property->getPropertyImplementation() ==
- ObjCPropertyDecl::Optional) ?
- ObjCMethodDecl::Optional :
- ObjCMethodDecl::Required);
+ SetterMethod = ObjCMethodDecl::Create(
+ Context, Loc, Loc, property->getSetterName(), Context.VoidTy, nullptr,
+ CD, !IsClassProperty,
+ /*isVariadic=*/false,
+ /*isPropertyAccessor=*/true,
+ /*isSynthesizedAccessorStub=*/false,
+ /*isImplicitlyDeclared=*/true,
+ /*isDefined=*/false,
+ (property->getPropertyImplementation() == ObjCPropertyDecl::Optional)
+ ? ObjCImplementationControl::Optional
+ : ObjCImplementationControl::Required);
// Remove all qualifiers from the setter's parameter type.
QualType paramTy =
@@ -2574,7 +2567,7 @@ void Sema::ProcessPropertyDecl(ObjCPropertyDecl *property) {
/*TInfo=*/nullptr,
SC_None,
nullptr);
- SetterMethod->setMethodParams(Context, Argument, None);
+ SetterMethod->setMethodParams(Context, Argument, std::nullopt);
AddPropertyAttrs(*this, SetterMethod, property);
@@ -2584,8 +2577,7 @@ void Sema::ProcessPropertyDecl(ObjCPropertyDecl *property) {
CD->addDecl(SetterMethod);
if (const SectionAttr *SA = property->getAttr<SectionAttr>())
SetterMethod->addAttr(SectionAttr::CreateImplicit(
- Context, SA->getName(), Loc, AttributeCommonInfo::AS_GNU,
- SectionAttr::GNU_section));
+ Context, SA->getName(), Loc, SectionAttr::GNU_section));
// It's possible for the user to have set a very odd custom
// setter selector that causes it to have a method family.
if (getLangOpts().ObjCAutoRefCount)
@@ -2757,7 +2749,7 @@ void Sema::CheckObjCPropertyAttributes(Decl *PDecl,
if (Attributes & ObjCPropertyAttribute::kind_weak) {
// 'weak' and 'nonnull' are mutually exclusive.
- if (auto nullability = PropertyTy->getNullability(Context)) {
+ if (auto nullability = PropertyTy->getNullability()) {
if (*nullability == NullabilityKind::NonNull)
Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
<< "nonnull" << "weak";
@@ -2801,9 +2793,7 @@ void Sema::CheckObjCPropertyAttributes(Decl *PDecl,
}
// FIXME: Implement warning dependent on NSCopying being
- // implemented. See also:
- // <rdar://5168496&4855821&5607453&5096644&4947311&5698469&4947014&5168496>
- // (please trim this list while you are at it).
+ // implemented.
}
if (!(Attributes & ObjCPropertyAttribute::kind_copy) &&
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaOpenMP.cpp b/contrib/llvm-project/clang/lib/Sema/SemaOpenMP.cpp
index c0cd2bf18a77..217fcb979dee 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaOpenMP.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaOpenMP.cpp
@@ -27,16 +27,21 @@
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PartialDiagnostic.h"
#include "clang/Basic/TargetInfo.h"
+#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
+#include "clang/Sema/ParsedAttr.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/SemaInternal.h"
#include "llvm/ADT/IndexedMap.h"
#include "llvm/ADT/PointerEmbeddedInt.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/Frontend/OpenMP/OMPAssume.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
+#include <optional>
#include <set>
using namespace clang;
@@ -57,7 +62,8 @@ enum DefaultDataSharingAttributes {
DSA_unspecified = 0, /// Data sharing attribute not specified.
DSA_none = 1 << 0, /// Default data sharing attribute 'none'.
DSA_shared = 1 << 1, /// Default data sharing attribute 'shared'.
- DSA_firstprivate = 1 << 2, /// Default data sharing attribute 'firstprivate'.
+ DSA_private = 1 << 2, /// Default data sharing attribute 'private'.
+ DSA_firstprivate = 1 << 3, /// Default data sharing attribute 'firstprivate'.
};
/// Stack for tracking declarations used in OpenMP directives and
@@ -83,8 +89,7 @@ public:
};
using OperatorOffsetTy =
llvm::SmallVector<std::pair<Expr *, OverloadedOperatorKind>, 4>;
- using DoacrossDependMapTy =
- llvm::DenseMap<OMPDependClause *, OperatorOffsetTy>;
+ using DoacrossClauseMapTy = llvm::DenseMap<OMPClause *, OperatorOffsetTy>;
/// Kind of the declaration used in the uses_allocators clauses.
enum class UsesAllocatorsDeclKind {
/// Predefined allocator
@@ -156,8 +161,12 @@ private:
LoopControlVariablesMapTy LCVMap;
DefaultDataSharingAttributes DefaultAttr = DSA_unspecified;
SourceLocation DefaultAttrLoc;
- DefaultmapInfo DefaultmapMap[OMPC_DEFAULTMAP_unknown];
+ DefaultmapInfo DefaultmapMap[OMPC_DEFAULTMAP_unknown + 1];
OpenMPDirectiveKind Directive = OMPD_unknown;
+ /// GenericLoopDirective with bind clause is mapped to other directives,
+ /// like for, distribute and simd. Presently, set MappedDirective to
+ /// OMPLoop. This may also be used in a similar way for other constructs.
+ OpenMPDirectiveKind MappedDirective = OMPD_unknown;
DeclarationNameInfo DirectiveName;
Scope *CurScope = nullptr;
DeclContext *Context = nullptr;
@@ -165,15 +174,17 @@ private:
/// Set of 'depend' clauses with 'sink|source' dependence kind. Required to
/// get the data (loop counters etc.) about enclosing loop-based construct.
/// This data is required during codegen.
- DoacrossDependMapTy DoacrossDepends;
+ DoacrossClauseMapTy DoacrossDepends;
/// First argument (Expr *) contains optional argument of the
/// 'ordered' clause, the second one is true if the regions has 'ordered'
/// clause, false otherwise.
- llvm::Optional<std::pair<const Expr *, OMPOrderedClause *>> OrderedRegion;
+ std::optional<std::pair<const Expr *, OMPOrderedClause *>> OrderedRegion;
+ bool RegionHasOrderConcurrent = false;
unsigned AssociatedLoops = 1;
bool HasMutipleLoops = false;
const Decl *PossiblyLoopCounter = nullptr;
bool NowaitRegion = false;
+ bool UntiedRegion = false;
bool CancelRegion = false;
bool LoopStart = false;
bool BodyComplete = false;
@@ -192,7 +203,24 @@ private:
llvm::DenseSet<CanonicalDeclPtr<Decl>> UsedInScanDirective;
llvm::DenseMap<CanonicalDeclPtr<const Decl>, UsesAllocatorsDeclKind>
UsesAllocatorsDecls;
+ /// Data is required on creating capture fields for implicit
+ /// default first|private clause.
+ struct ImplicitDefaultFDInfoTy {
+ /// Field decl.
+ const FieldDecl *FD = nullptr;
+ /// Nesting stack level
+ size_t StackLevel = 0;
+ /// Capture variable decl.
+ VarDecl *VD = nullptr;
+ ImplicitDefaultFDInfoTy(const FieldDecl *FD, size_t StackLevel,
+ VarDecl *VD)
+ : FD(FD), StackLevel(StackLevel), VD(VD) {}
+ };
+ /// List of captured fields
+ llvm::SmallVector<ImplicitDefaultFDInfoTy, 8>
+ ImplicitDefaultFirstprivateFDs;
Expr *DeclareMapperVar = nullptr;
+ SmallVector<VarDecl *, 16> IteratorVarDecls;
SharingMapTy(OpenMPDirectiveKind DKind, DeclarationNameInfo Name,
Scope *CurScope, SourceLocation Loc)
: Directive(DKind), DirectiveName(Name), CurScope(CurScope),
@@ -255,14 +283,14 @@ private:
return &Stack.back().first[Size - 1];
}
const SharingMapTy *getTopOfStackOrNull() const {
- return const_cast<DSAStackTy&>(*this).getTopOfStackOrNull();
+ return const_cast<DSAStackTy &>(*this).getTopOfStackOrNull();
}
SharingMapTy &getTopOfStack() {
assert(!isStackEmpty() && "no current directive");
return *getTopOfStackOrNull();
}
const SharingMapTy &getTopOfStack() const {
- return const_cast<DSAStackTy&>(*this).getTopOfStack();
+ return const_cast<DSAStackTy &>(*this).getTopOfStack();
}
SharingMapTy *getSecondOnStackOrNull() {
@@ -272,7 +300,7 @@ private:
return &Stack.back().first[Size - 2];
}
const SharingMapTy *getSecondOnStackOrNull() const {
- return const_cast<DSAStackTy&>(*this).getSecondOnStackOrNull();
+ return const_cast<DSAStackTy &>(*this).getSecondOnStackOrNull();
}
/// Get the stack element at a certain level (previously returned by
@@ -286,7 +314,7 @@ private:
return Stack.back().first[Level];
}
const SharingMapTy &getStackElemAtLevel(unsigned Level) const {
- return const_cast<DSAStackTy&>(*this).getStackElemAtLevel(Level);
+ return const_cast<DSAStackTy &>(*this).getStackElemAtLevel(Level);
}
DSAVarData getDSA(const_iterator &Iter, ValueDecl *D) const;
@@ -310,6 +338,8 @@ private:
/// Vector of previously encountered target directives
SmallVector<SourceLocation, 2> TargetLocations;
SourceLocation AtomicLocation;
+ /// Vector of declare variant construct traits.
+ SmallVector<llvm::omp::TraitProperty, 8> ConstructTraits;
public:
explicit DSAStackTy(Sema &S) : SemaRef(S) {}
@@ -352,9 +382,7 @@ public:
const SharingMapTy *Top = getTopOfStackOrNull();
return Top && Top->BodyComplete;
}
- void setBodyComplete() {
- getTopOfStack().BodyComplete = true;
- }
+ void setBodyComplete() { getTopOfStack().BodyComplete = true; }
bool isForceVarCapturing() const { return ForceCapturing; }
void setForceVarCapturing(bool V) { ForceCapturing = V; }
@@ -390,6 +418,7 @@ public:
class ParentDirectiveScope {
DSAStackTy &Self;
bool Active;
+
public:
ParentDirectiveScope(DSAStackTy &Self, bool Activate)
: Self(Self), Active(false) {
@@ -431,8 +460,7 @@ public:
}
/// Marks (or clears) declaration as possibly loop counter.
void resetPossibleLoopCounter(const Decl *D = nullptr) {
- getTopOfStack().PossiblyLoopCounter =
- D ? D->getCanonicalDecl() : D;
+ getTopOfStack().PossiblyLoopCounter = D ? D->getCanonicalDecl() : D;
}
/// Gets the possible loop counter decl.
const Decl *getPossiblyLoopCunter() const {
@@ -513,7 +541,7 @@ public:
/// Checks if the specified declaration was used in the inner scan directive.
bool isUsedInScanDirective(ValueDecl *D) const {
if (const SharingMapTy *Stack = getTopOfStackOrNull())
- return Stack->UsedInScanDirective.count(D) > 0;
+ return Stack->UsedInScanDirective.contains(D);
return false;
}
@@ -573,7 +601,9 @@ public:
/// predicate.
const DSAVarData
hasDSA(ValueDecl *D,
- const llvm::function_ref<bool(OpenMPClauseKind, bool)> CPred,
+ const llvm::function_ref<bool(OpenMPClauseKind, bool,
+ DefaultDataSharingAttributes)>
+ CPred,
const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
bool FromParent) const;
/// Checks if the specified variables has data-sharing attributes which
@@ -610,6 +640,24 @@ public:
const SharingMapTy *Top = getTopOfStackOrNull();
return Top ? Top->Directive : OMPD_unknown;
}
+ OpenMPDirectiveKind getMappedDirective() const {
+ const SharingMapTy *Top = getTopOfStackOrNull();
+ return Top ? Top->MappedDirective : OMPD_unknown;
+ }
+ void setCurrentDirective(OpenMPDirectiveKind NewDK) {
+ SharingMapTy *Top = getTopOfStackOrNull();
+ assert(Top &&
+ "Before calling setCurrentDirective Top of Stack not to be NULL.");
+ // Store the old into MappedDirective & assign argument NewDK to Directive.
+ Top->Directive = NewDK;
+ }
+ void setMappedDirective(OpenMPDirectiveKind NewDK) {
+ SharingMapTy *Top = getTopOfStackOrNull();
+ assert(Top &&
+ "Before calling setMappedDirective Top of Stack not to be NULL.");
+ // Store the old into MappedDirective & assign argument NewDK to Directive.
+ Top->MappedDirective = NewDK;
+ }
/// Returns directive kind at specified level.
OpenMPDirectiveKind getDirective(unsigned Level) const {
assert(!isStackEmpty() && "No directive at specified level.");
@@ -629,13 +677,10 @@ public:
}
/// Add requires decl to internal vector
- void addRequiresDecl(OMPRequiresDecl *RD) {
- RequiresDecls.push_back(RD);
- }
+ void addRequiresDecl(OMPRequiresDecl *RD) { RequiresDecls.push_back(RD); }
/// Checks if the defined 'requires' directive has specified type of clause.
- template <typename ClauseType>
- bool hasRequiresDeclWithClause() const {
+ template <typename ClauseType> bool hasRequiresDeclWithClause() const {
return llvm::any_of(RequiresDecls, [](const OMPRequiresDecl *D) {
return llvm::any_of(D->clauselists(), [](const OMPClause *C) {
return isa<ClauseType>(C);
@@ -678,9 +723,7 @@ public:
/// Returns the location of the first encountered atomic directive in the
/// module.
- SourceLocation getAtomicDirectiveLoc() const {
- return AtomicLocation;
- }
+ SourceLocation getAtomicDirectiveLoc() const { return AtomicLocation; }
// Return previously encountered target region locations.
ArrayRef<SourceLocation> getEncounteredTargetLocs() const {
@@ -697,6 +740,11 @@ public:
getTopOfStack().DefaultAttr = DSA_shared;
getTopOfStack().DefaultAttrLoc = Loc;
}
+ /// Set default data sharing attribute to private.
+ void setDefaultDSAPrivate(SourceLocation Loc) {
+ getTopOfStack().DefaultAttr = DSA_private;
+ getTopOfStack().DefaultAttrLoc = Loc;
+ }
/// Set default data sharing attribute to firstprivate.
void setDefaultDSAFirstPrivate(SourceLocation Loc) {
getTopOfStack().DefaultAttr = DSA_firstprivate;
@@ -704,8 +752,7 @@ public:
}
/// Set default data mapping attribute to Modifier:Kind
void setDefaultDMAAttr(OpenMPDefaultmapClauseModifier M,
- OpenMPDefaultmapClauseKind Kind,
- SourceLocation Loc) {
+ OpenMPDefaultmapClauseKind Kind, SourceLocation Loc) {
DefaultmapInfo &DMI = getTopOfStack().DefaultmapMap[Kind];
DMI.ImplicitBehavior = M;
DMI.SLoc = Loc;
@@ -726,17 +773,31 @@ public:
OMPC_DEFAULTMAP_MODIFIER_unknown;
}
+ ArrayRef<llvm::omp::TraitProperty> getConstructTraits() {
+ return ConstructTraits;
+ }
+ void handleConstructTrait(ArrayRef<llvm::omp::TraitProperty> Traits,
+ bool ScopeEntry) {
+ if (ScopeEntry)
+ ConstructTraits.append(Traits.begin(), Traits.end());
+ else
+ for (llvm::omp::TraitProperty Trait : llvm::reverse(Traits)) {
+ llvm::omp::TraitProperty Top = ConstructTraits.pop_back_val();
+ assert(Top == Trait && "Something left a trait on the stack!");
+ (void)Trait;
+ (void)Top;
+ }
+ }
+
DefaultDataSharingAttributes getDefaultDSA(unsigned Level) const {
return getStackSize() <= Level ? DSA_unspecified
: getStackElemAtLevel(Level).DefaultAttr;
}
DefaultDataSharingAttributes getDefaultDSA() const {
- return isStackEmpty() ? DSA_unspecified
- : getTopOfStack().DefaultAttr;
+ return isStackEmpty() ? DSA_unspecified : getTopOfStack().DefaultAttr;
}
SourceLocation getDefaultDSALocation() const {
- return isStackEmpty() ? SourceLocation()
- : getTopOfStack().DefaultAttrLoc;
+ return isStackEmpty() ? SourceLocation() : getTopOfStack().DefaultAttrLoc;
}
OpenMPDefaultmapClauseModifier
getDefaultmapModifier(OpenMPDefaultmapClauseKind Kind) const {
@@ -805,31 +866,42 @@ public:
/// false - otherwise.
bool isOrderedRegion() const {
if (const SharingMapTy *Top = getTopOfStackOrNull())
- return Top->OrderedRegion.hasValue();
+ return Top->OrderedRegion.has_value();
return false;
}
/// Returns optional parameter for the ordered region.
std::pair<const Expr *, OMPOrderedClause *> getOrderedRegionParam() const {
if (const SharingMapTy *Top = getTopOfStackOrNull())
- if (Top->OrderedRegion.hasValue())
- return Top->OrderedRegion.getValue();
+ if (Top->OrderedRegion)
+ return *Top->OrderedRegion;
return std::make_pair(nullptr, nullptr);
}
/// Returns true, if parent region is ordered (has associated
/// 'ordered' clause), false - otherwise.
bool isParentOrderedRegion() const {
if (const SharingMapTy *Parent = getSecondOnStackOrNull())
- return Parent->OrderedRegion.hasValue();
+ return Parent->OrderedRegion.has_value();
return false;
}
/// Returns optional parameter for the ordered region.
std::pair<const Expr *, OMPOrderedClause *>
getParentOrderedRegionParam() const {
if (const SharingMapTy *Parent = getSecondOnStackOrNull())
- if (Parent->OrderedRegion.hasValue())
- return Parent->OrderedRegion.getValue();
+ if (Parent->OrderedRegion)
+ return *Parent->OrderedRegion;
return std::make_pair(nullptr, nullptr);
}
+ /// Marks current region as having an 'order' clause.
+ void setRegionHasOrderConcurrent(bool HasOrderConcurrent) {
+ getTopOfStack().RegionHasOrderConcurrent = HasOrderConcurrent;
+ }
+ /// Returns true, if parent region is order (has associated
+ /// 'order' clause), false - otherwise.
+ bool isParentOrderConcurrent() const {
+ if (const SharingMapTy *Parent = getSecondOnStackOrNull())
+ return Parent->RegionHasOrderConcurrent;
+ return false;
+ }
/// Marks current region as nowait (it has a 'nowait' clause).
void setNowaitRegion(bool IsNowait = true) {
getTopOfStack().NowaitRegion = IsNowait;
@@ -841,6 +913,15 @@ public:
return Parent->NowaitRegion;
return false;
}
+ /// Marks current region as untied (it has a 'untied' clause).
+ void setUntiedRegion(bool IsUntied = true) {
+ getTopOfStack().UntiedRegion = IsUntied;
+ }
+ /// Return true if current region is untied.
+ bool isUntiedRegion() const {
+ const SharingMapTy *Top = getTopOfStackOrNull();
+ return Top ? Top->UntiedRegion : false;
+ }
/// Marks parent region as cancel region.
void setParentCancelRegion(bool Cancel = true) {
if (SharingMapTy *Parent = getSecondOnStackOrNull())
@@ -996,17 +1077,16 @@ public:
assert(!isStackEmpty());
return getStackSize() - 1;
}
- void addDoacrossDependClause(OMPDependClause *C,
- const OperatorOffsetTy &OpsOffs) {
+ void addDoacrossDependClause(OMPClause *C, const OperatorOffsetTy &OpsOffs) {
SharingMapTy *Parent = getSecondOnStackOrNull();
assert(Parent && isOpenMPWorksharingDirective(Parent->Directive));
Parent->DoacrossDepends.try_emplace(C, OpsOffs);
}
- llvm::iterator_range<DoacrossDependMapTy::const_iterator>
+ llvm::iterator_range<DoacrossClauseMapTy::const_iterator>
getDoacrossDependClauses() const {
const SharingMapTy &StackElem = getTopOfStack();
if (isOpenMPWorksharingDirective(StackElem.Directive)) {
- const DoacrossDependMapTy &Ref = StackElem.DoacrossDepends;
+ const DoacrossClauseMapTy &Ref = StackElem.DoacrossDepends;
return llvm::make_range(Ref.begin(), Ref.end());
}
return llvm::make_range(StackElem.DoacrossDepends.end(),
@@ -1022,7 +1102,7 @@ public:
// Return set of mapped classes types
bool isClassPreviouslyMapped(QualType QT) const {
const SharingMapTy &StackElem = getTopOfStack();
- return StackElem.MappedClassesQualTypes.count(QT) != 0;
+ return StackElem.MappedClassesQualTypes.contains(QT);
}
/// Adds global declare target to the parent target region.
@@ -1061,7 +1141,7 @@ public:
}
/// Checks if the decl is implicitly firstprivate in the task-based region.
bool isImplicitTaskFirstprivate(Decl *D) const {
- return getTopOfStack().ImplicitTaskFirstprivates.count(D) > 0;
+ return getTopOfStack().ImplicitTaskFirstprivates.contains(D);
}
/// Marks decl as used in uses_allocators clause as the allocator.
@@ -1070,19 +1150,20 @@ public:
}
/// Checks if specified decl is used in uses allocator clause as the
/// allocator.
- Optional<UsesAllocatorsDeclKind> isUsesAllocatorsDecl(unsigned Level,
- const Decl *D) const {
+ std::optional<UsesAllocatorsDeclKind>
+ isUsesAllocatorsDecl(unsigned Level, const Decl *D) const {
const SharingMapTy &StackElem = getTopOfStack();
auto I = StackElem.UsesAllocatorsDecls.find(D);
if (I == StackElem.UsesAllocatorsDecls.end())
- return None;
+ return std::nullopt;
return I->getSecond();
}
- Optional<UsesAllocatorsDeclKind> isUsesAllocatorsDecl(const Decl *D) const {
+ std::optional<UsesAllocatorsDeclKind>
+ isUsesAllocatorsDecl(const Decl *D) const {
const SharingMapTy &StackElem = getTopOfStack();
auto I = StackElem.UsesAllocatorsDecls.find(D);
if (I == StackElem.UsesAllocatorsDecls.end())
- return None;
+ return std::nullopt;
return I->getSecond();
}
@@ -1094,6 +1175,66 @@ public:
const SharingMapTy *Top = getTopOfStackOrNull();
return Top ? Top->DeclareMapperVar : nullptr;
}
+
+ /// Add a new iterator variable.
+ void addIteratorVarDecl(VarDecl *VD) {
+ SharingMapTy &StackElem = getTopOfStack();
+ StackElem.IteratorVarDecls.push_back(VD->getCanonicalDecl());
+ }
+ /// Check if variable declaration is an iterator VarDecl.
+ bool isIteratorVarDecl(const VarDecl *VD) const {
+ const SharingMapTy *Top = getTopOfStackOrNull();
+ if (!Top)
+ return false;
+
+ return llvm::is_contained(Top->IteratorVarDecls, VD->getCanonicalDecl());
+ }
+ /// get captured field from ImplicitDefaultFirstprivateFDs
+ VarDecl *getImplicitFDCapExprDecl(const FieldDecl *FD) const {
+ const_iterator I = begin();
+ const_iterator EndI = end();
+ size_t StackLevel = getStackSize();
+ for (; I != EndI; ++I) {
+ if (I->DefaultAttr == DSA_firstprivate || I->DefaultAttr == DSA_private)
+ break;
+ StackLevel--;
+ }
+ assert((StackLevel > 0 && I != EndI) || (StackLevel == 0 && I == EndI));
+ if (I == EndI)
+ return nullptr;
+ for (const auto &IFD : I->ImplicitDefaultFirstprivateFDs)
+ if (IFD.FD == FD && IFD.StackLevel == StackLevel)
+ return IFD.VD;
+ return nullptr;
+ }
+ /// Check if capture decl is field captured in ImplicitDefaultFirstprivateFDs
+ bool isImplicitDefaultFirstprivateFD(VarDecl *VD) const {
+ const_iterator I = begin();
+ const_iterator EndI = end();
+ for (; I != EndI; ++I)
+ if (I->DefaultAttr == DSA_firstprivate || I->DefaultAttr == DSA_private)
+ break;
+ if (I == EndI)
+ return false;
+ for (const auto &IFD : I->ImplicitDefaultFirstprivateFDs)
+ if (IFD.VD == VD)
+ return true;
+ return false;
+ }
+ /// Store capture FD info in ImplicitDefaultFirstprivateFDs
+ void addImplicitDefaultFirstprivateFD(const FieldDecl *FD, VarDecl *VD) {
+ iterator I = begin();
+ const_iterator EndI = end();
+ size_t StackLevel = getStackSize();
+ for (; I != EndI; ++I) {
+ if (I->DefaultAttr == DSA_private || I->DefaultAttr == DSA_firstprivate) {
+ I->ImplicitDefaultFirstprivateFDs.emplace_back(FD, StackLevel, VD);
+ break;
+ }
+ StackLevel--;
+ }
+ assert((StackLevel > 0 && I != EndI) || (StackLevel == 0 && I == EndI));
+ }
};
bool isImplicitTaskingRegion(OpenMPDirectiveKind DKind) {
@@ -1213,7 +1354,7 @@ DSAStackTy::DSAVarData DSAStackTy::getDSA(const_iterator &Iter,
case DSA_none:
return DVar;
case DSA_firstprivate:
- if (VD->getStorageDuration() == SD_Static &&
+ if (VD && VD->getStorageDuration() == SD_Static &&
VD->getDeclContext()->isFileContext()) {
DVar.CKind = OMPC_unknown;
} else {
@@ -1221,6 +1362,18 @@ DSAStackTy::DSAVarData DSAStackTy::getDSA(const_iterator &Iter,
}
DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
return DVar;
+ case DSA_private:
+ // each variable with static storage duration that is declared
+ // in a namespace or global scope and referenced in the construct,
+ // and that does not have a predetermined data-sharing attribute
+ if (VD && VD->getStorageDuration() == SD_Static &&
+ VD->getDeclContext()->isFileContext()) {
+ DVar.CKind = OMPC_unknown;
+ } else {
+ DVar.CKind = OMPC_private;
+ }
+ DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
+ return DVar;
case DSA_unspecified:
// OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
// in a Construct, implicitly determined, p.2]
@@ -1439,8 +1592,7 @@ void DSAStackTy::addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
"Additional reduction info may be specified only once for reduction "
"items.");
ReductionData.set(BOK, SR);
- Expr *&TaskgroupReductionRef =
- getTopOfStack().TaskgroupReductionRef;
+ Expr *&TaskgroupReductionRef = getTopOfStack().TaskgroupReductionRef;
if (!TaskgroupReductionRef) {
VarDecl *VD = buildVarDecl(SemaRef, SR.getBegin(),
SemaRef.Context.VoidPtrTy, ".task_red.");
@@ -1465,8 +1617,7 @@ void DSAStackTy::addTaskgroupReductionData(const ValueDecl *D, SourceRange SR,
"Additional reduction info may be specified only once for reduction "
"items.");
ReductionData.set(ReductionRef, SR);
- Expr *&TaskgroupReductionRef =
- getTopOfStack().TaskgroupReductionRef;
+ Expr *&TaskgroupReductionRef = getTopOfStack().TaskgroupReductionRef;
if (!TaskgroupReductionRef) {
VarDecl *VD = buildVarDecl(SemaRef, SR.getBegin(),
SemaRef.Context.VoidPtrTy, ".task_red.");
@@ -1577,10 +1728,9 @@ static bool rejectConstNotMutableType(Sema &SemaRef, const ValueDecl *D,
ASTContext &Context = SemaRef.getASTContext();
bool IsClassType;
if (isConstNotMutableType(SemaRef, Type, AcceptIfMutable, &IsClassType)) {
- unsigned Diag = ListItemNotVar
- ? diag::err_omp_const_list_item
- : IsClassType ? diag::err_omp_const_not_mutable_variable
- : diag::err_omp_const_variable;
+ unsigned Diag = ListItemNotVar ? diag::err_omp_const_list_item
+ : IsClassType ? diag::err_omp_const_not_mutable_variable
+ : diag::err_omp_const_variable;
SemaRef.Diag(ELoc, Diag) << getOpenMPClauseName(CKind);
if (!ListItemNotVar && D) {
const VarDecl *VD = dyn_cast<VarDecl>(D);
@@ -1640,8 +1790,7 @@ const DSAStackTy::DSAVarData DSAStackTy::getTopDSA(ValueDecl *D,
});
if (IterTarget != end()) {
const_iterator ParentIterTarget = IterTarget + 1;
- for (const_iterator Iter = begin();
- Iter != ParentIterTarget; ++Iter) {
+ for (const_iterator Iter = begin(); Iter != ParentIterTarget; ++Iter) {
if (isOpenMPLocal(VD, Iter)) {
DVar.RefExpr =
buildDeclRefExpr(SemaRef, VD, D->getType().getNonReferenceType(),
@@ -1659,9 +1808,9 @@ const DSAStackTy::DSAVarData DSAStackTy::getTopDSA(ValueDecl *D,
return DVar;
}
const_iterator End = end();
- if (!SemaRef.isOpenMPCapturedByRef(
- D, std::distance(ParentIterTarget, End),
- /*OpenMPCaptureLevel=*/0)) {
+ if (!SemaRef.isOpenMPCapturedByRef(D,
+ std::distance(ParentIterTarget, End),
+ /*OpenMPCaptureLevel=*/0)) {
DVar.RefExpr =
buildDeclRefExpr(SemaRef, VD, D->getType().getNonReferenceType(),
IterTarget->ConstructLoc);
@@ -1781,7 +1930,9 @@ const DSAStackTy::DSAVarData DSAStackTy::getImplicitDSA(ValueDecl *D,
const DSAStackTy::DSAVarData
DSAStackTy::hasDSA(ValueDecl *D,
- const llvm::function_ref<bool(OpenMPClauseKind, bool)> CPred,
+ const llvm::function_ref<bool(OpenMPClauseKind, bool,
+ DefaultDataSharingAttributes)>
+ CPred,
const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
bool FromParent) const {
if (isStackEmpty())
@@ -1797,7 +1948,7 @@ DSAStackTy::hasDSA(ValueDecl *D,
continue;
const_iterator NewI = I;
DSAVarData DVar = getDSA(NewI, D);
- if (I == NewI && CPred(DVar.CKind, DVar.AppliedToPointee))
+ if (I == NewI && CPred(DVar.CKind, DVar.AppliedToPointee, I->DefaultAttr))
return DVar;
}
return {};
@@ -1873,16 +2024,14 @@ void Sema::InitDataSharingAttributesStack() {
#define DSAStack static_cast<DSAStackTy *>(VarDataSharingAttributesStack)
-void Sema::pushOpenMPFunctionRegion() {
- DSAStack->pushFunction();
-}
+void Sema::pushOpenMPFunctionRegion() { DSAStack->pushFunction(); }
void Sema::popOpenMPFunctionRegion(const FunctionScopeInfo *OldFSI) {
DSAStack->popFunction(OldFSI);
}
static bool isOpenMPDeviceDelayedContext(Sema &S) {
- assert(S.LangOpts.OpenMP && S.LangOpts.OpenMPIsDevice &&
+ assert(S.LangOpts.OpenMP && S.LangOpts.OpenMPIsTargetDevice &&
"Expected OpenMP device compilation.");
return !S.isInOpenMPTargetExecutionDirective();
}
@@ -1896,10 +2045,10 @@ enum class FunctionEmissionStatus {
};
} // anonymous namespace
-Sema::SemaDiagnosticBuilder Sema::diagIfOpenMPDeviceCode(SourceLocation Loc,
- unsigned DiagID,
- FunctionDecl *FD) {
- assert(LangOpts.OpenMP && LangOpts.OpenMPIsDevice &&
+Sema::SemaDiagnosticBuilder
+Sema::diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID,
+ const FunctionDecl *FD) {
+ assert(LangOpts.OpenMP && LangOpts.OpenMPIsTargetDevice &&
"Expected OpenMP device compilation.");
SemaDiagnosticBuilder::Kind Kind = SemaDiagnosticBuilder::K_Nop;
@@ -1936,8 +2085,8 @@ Sema::SemaDiagnosticBuilder Sema::diagIfOpenMPDeviceCode(SourceLocation Loc,
Sema::SemaDiagnosticBuilder Sema::diagIfOpenMPHostCode(SourceLocation Loc,
unsigned DiagID,
- FunctionDecl *FD) {
- assert(LangOpts.OpenMP && !LangOpts.OpenMPIsDevice &&
+ const FunctionDecl *FD) {
+ assert(LangOpts.OpenMP && !LangOpts.OpenMPIsTargetDevice &&
"Expected OpenMP host compilation.");
SemaDiagnosticBuilder::Kind Kind = SemaDiagnosticBuilder::K_Nop;
@@ -1995,7 +2144,7 @@ bool Sema::isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
//
// =========================================================================
// | type | defaultmap | pvt | first | is_device_ptr | map | res. |
- // | |(tofrom:scalar)| | pvt | | | |
+ // | |(tofrom:scalar)| | pvt | |has_dv_adr| |
// =========================================================================
// | scl | | | | - | | bycopy|
// | scl | | - | x | - | - | bycopy|
@@ -2052,14 +2201,15 @@ bool Sema::isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
DSAStack->checkMappableExprComponentListsForDeclAtLevel(
D, Level,
- [&IsVariableUsedInMapClause, &IsVariableAssociatedWithSection, D](
- OMPClauseMappableExprCommon::MappableExprComponentListRef
+ [&IsVariableUsedInMapClause, &IsVariableAssociatedWithSection,
+ D](OMPClauseMappableExprCommon::MappableExprComponentListRef
MapExprComponents,
OpenMPClauseKind WhereFoundClauseKind) {
- // Only the map clause information influences how a variable is
- // captured. E.g. is_device_ptr does not require changing the default
- // behavior.
- if (WhereFoundClauseKind != OMPC_map)
+ // Both map and has_device_addr clauses information influences how a
+ // variable is captured. E.g. is_device_ptr does not require changing
+ // the default behavior.
+ if (WhereFoundClauseKind != OMPC_map &&
+ WhereFoundClauseKind != OMPC_has_device_addr)
return false;
auto EI = MapExprComponents.rbegin();
@@ -2073,11 +2223,14 @@ bool Sema::isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
++EI;
if (EI == EE)
return false;
-
- if (isa<ArraySubscriptExpr>(EI->getAssociatedExpression()) ||
- isa<OMPArraySectionExpr>(EI->getAssociatedExpression()) ||
+ auto Last = std::prev(EE);
+ const auto *UO =
+ dyn_cast<UnaryOperator>(Last->getAssociatedExpression());
+ if ((UO && UO->getOpcode() == UO_Deref) ||
+ isa<ArraySubscriptExpr>(Last->getAssociatedExpression()) ||
+ isa<OMPArraySectionExpr>(Last->getAssociatedExpression()) ||
isa<MemberExpr>(EI->getAssociatedExpression()) ||
- isa<OMPArrayShapingExpr>(EI->getAssociatedExpression())) {
+ isa<OMPArrayShapingExpr>(Last->getAssociatedExpression())) {
IsVariableAssociatedWithSection = true;
// There is nothing more we need to know about this variable.
return true;
@@ -2128,7 +2281,8 @@ bool Sema::isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
!cast<OMPCapturedExprDecl>(D)->getInit()->isGLValue()) &&
// If the variable is implicitly firstprivate and scalar - capture by
// copy
- !(DSAStack->getDefaultDSA() == DSA_firstprivate &&
+ !((DSAStack->getDefaultDSA() == DSA_firstprivate ||
+ DSAStack->getDefaultDSA() == DSA_private) &&
!DSAStack->hasExplicitDSA(
D, [](OpenMPClauseKind K, bool) { return K != OMPC_unknown; },
Level) &&
@@ -2139,10 +2293,10 @@ bool Sema::isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
// and alignment, because the runtime library only deals with uintptr types.
// If it does not fit the uintptr size, we need to pass the data by reference
// instead.
- if (!IsByRef &&
- (Ctx.getTypeSizeInChars(Ty) >
- Ctx.getTypeSizeInChars(Ctx.getUIntPtrType()) ||
- Ctx.getDeclAlign(D) > Ctx.getTypeAlignInChars(Ctx.getUIntPtrType()))) {
+ if (!IsByRef && (Ctx.getTypeSizeInChars(Ty) >
+ Ctx.getTypeSizeInChars(Ctx.getUIntPtrType()) ||
+ Ctx.getAlignOfGlobalVarInChars(Ty) >
+ Ctx.getTypeAlignInChars(Ctx.getUIntPtrType()))) {
IsByRef = true;
}
@@ -2154,6 +2308,11 @@ unsigned Sema::getOpenMPNestingLevel() const {
return DSAStack->getNestingLevel();
}
+bool Sema::isInOpenMPTaskUntiedContext() const {
+ return isOpenMPTaskingDirective(DSAStack->getCurrentDirective()) &&
+ DSAStack->isUntiedRegion();
+}
+
bool Sema::isInOpenMPTargetExecutionDirective() const {
return (isOpenMPTargetExecutionDirective(DSAStack->getCurrentDirective()) &&
!DSAStack->isClauseParsingMode()) ||
@@ -2165,6 +2324,29 @@ bool Sema::isInOpenMPTargetExecutionDirective() const {
false);
}
+bool Sema::isOpenMPRebuildMemberExpr(ValueDecl *D) {
+ // Only rebuild for Field.
+ if (!dyn_cast<FieldDecl>(D))
+ return false;
+ DSAStackTy::DSAVarData DVarPrivate = DSAStack->hasDSA(
+ D,
+ [](OpenMPClauseKind C, bool AppliedToPointee,
+ DefaultDataSharingAttributes DefaultAttr) {
+ return isOpenMPPrivate(C) && !AppliedToPointee &&
+ (DefaultAttr == DSA_firstprivate || DefaultAttr == DSA_private);
+ },
+ [](OpenMPDirectiveKind) { return true; },
+ DSAStack->isClauseParsingMode());
+ if (DVarPrivate.CKind != OMPC_unknown)
+ return true;
+ return false;
+}
+
+static OMPCapturedExprDecl *buildCaptureDecl(Sema &S, IdentifierInfo *Id,
+ Expr *CaptureExpr, bool WithInit,
+ DeclContext *CurContext,
+ bool AsExpression);
+
VarDecl *Sema::isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo,
unsigned StopAt) {
assert(LangOpts.OpenMP && "OpenMP is not allowed");
@@ -2230,7 +2412,7 @@ VarDecl *Sema::isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo,
bool OpenMPFound = false;
for (unsigned I = StopAt + 1; I > 0; --I) {
FunctionScopeInfo *FSI = FunctionScopes[I - 1];
- if(!isa<CapturingScopeInfo>(FSI))
+ if (!isa<CapturingScopeInfo>(FSI))
return nullptr;
if (auto *RSI = dyn_cast<CapturedRegionScopeInfo>(FSI))
if (RSI->CapRegionKind == CR_OpenMP) {
@@ -2263,7 +2445,7 @@ VarDecl *Sema::isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo,
// default(none) clause and not used in any clause.
DSAStackTy::DSAVarData DVarPrivate = DSAStack->hasDSA(
D,
- [](OpenMPClauseKind C, bool AppliedToPointee) {
+ [](OpenMPClauseKind C, bool AppliedToPointee, bool) {
return isOpenMPPrivate(C) && !AppliedToPointee;
},
[](OpenMPDirectiveKind) { return true; },
@@ -2271,11 +2453,52 @@ VarDecl *Sema::isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo,
// Global shared must not be captured.
if (VD && !VD->hasLocalStorage() && DVarPrivate.CKind == OMPC_unknown &&
((DSAStack->getDefaultDSA() != DSA_none &&
+ DSAStack->getDefaultDSA() != DSA_private &&
DSAStack->getDefaultDSA() != DSA_firstprivate) ||
DVarTop.CKind == OMPC_shared))
return nullptr;
+ auto *FD = dyn_cast<FieldDecl>(D);
+ if (DVarPrivate.CKind != OMPC_unknown && !VD && FD &&
+ !DVarPrivate.PrivateCopy) {
+ DSAStackTy::DSAVarData DVarPrivate = DSAStack->hasDSA(
+ D,
+ [](OpenMPClauseKind C, bool AppliedToPointee,
+ DefaultDataSharingAttributes DefaultAttr) {
+ return isOpenMPPrivate(C) && !AppliedToPointee &&
+ (DefaultAttr == DSA_firstprivate ||
+ DefaultAttr == DSA_private);
+ },
+ [](OpenMPDirectiveKind) { return true; },
+ DSAStack->isClauseParsingMode());
+ if (DVarPrivate.CKind == OMPC_unknown)
+ return nullptr;
+
+ VarDecl *VD = DSAStack->getImplicitFDCapExprDecl(FD);
+ if (VD)
+ return VD;
+ if (getCurrentThisType().isNull())
+ return nullptr;
+ Expr *ThisExpr = BuildCXXThisExpr(SourceLocation(), getCurrentThisType(),
+ /*IsImplicit=*/true);
+ const CXXScopeSpec CS = CXXScopeSpec();
+ Expr *ME = BuildMemberExpr(ThisExpr, /*IsArrow=*/true, SourceLocation(),
+ NestedNameSpecifierLoc(), SourceLocation(), FD,
+ DeclAccessPair::make(FD, FD->getAccess()),
+ /*HadMultipleCandidates=*/false,
+ DeclarationNameInfo(), FD->getType(),
+ VK_LValue, OK_Ordinary);
+ OMPCapturedExprDecl *CD = buildCaptureDecl(
+ *this, FD->getIdentifier(), ME, DVarPrivate.CKind != OMPC_private,
+ CurContext->getParent(), /*AsExpression=*/false);
+ DeclRefExpr *VDPrivateRefExpr = buildDeclRefExpr(
+ *this, CD, CD->getType().getNonReferenceType(), SourceLocation());
+ VD = cast<VarDecl>(VDPrivateRefExpr->getDecl());
+ DSAStack->addImplicitDefaultFirstprivateFD(FD, VD);
+ return VD;
+ }
if (DVarPrivate.CKind != OMPC_unknown ||
(VD && (DSAStack->getDefaultDSA() == DSA_none ||
+ DSAStack->getDefaultDSA() == DSA_private ||
DSAStack->getDefaultDSA() == DSA_firstprivate)))
return VD ? VD : cast<VarDecl>(DVarPrivate.PrivateCopy->getDecl());
}
@@ -2304,9 +2527,24 @@ void Sema::startOpenMPCXXRangeFor() {
OpenMPClauseKind Sema::isOpenMPPrivateDecl(ValueDecl *D, unsigned Level,
unsigned CapLevel) const {
assert(LangOpts.OpenMP && "OpenMP is not allowed");
- if (DSAStack->hasExplicitDirective(
- [](OpenMPDirectiveKind K) { return isOpenMPTaskingDirective(K); },
- Level)) {
+ if (DSAStack->getCurrentDirective() != OMPD_unknown &&
+ (!DSAStack->isClauseParsingMode() ||
+ DSAStack->getParentDirective() != OMPD_unknown)) {
+ DSAStackTy::DSAVarData DVarPrivate = DSAStack->hasDSA(
+ D,
+ [](OpenMPClauseKind C, bool AppliedToPointee,
+ DefaultDataSharingAttributes DefaultAttr) {
+ return isOpenMPPrivate(C) && !AppliedToPointee &&
+ DefaultAttr == DSA_private;
+ },
+ [](OpenMPDirectiveKind) { return true; },
+ DSAStack->isClauseParsingMode());
+ if (DVarPrivate.CKind == OMPC_private && isa<OMPCapturedExprDecl>(D) &&
+ DSAStack->isImplicitDefaultFirstprivateFD(cast<VarDecl>(D)) &&
+ !DSAStack->isLoopControlVariable(D).first)
+ return OMPC_private;
+ }
+ if (DSAStack->hasExplicitDirective(isOpenMPTaskingDirective, Level)) {
bool IsTriviallyCopyable =
D->getType().getNonReferenceType().isTriviallyCopyableType(Context) &&
!D->getType()
@@ -2332,9 +2570,9 @@ OpenMPClauseKind Sema::isOpenMPPrivateDecl(ValueDecl *D, unsigned Level,
}
}
}
- if (isOpenMPLoopDirective(DSAStack->getCurrentDirective())) {
- if (DSAStack->getAssociatedLoops() > 0 &&
- !DSAStack->isLoopStarted()) {
+ if (isOpenMPLoopDirective(DSAStack->getCurrentDirective()) &&
+ !isOpenMPLoopTransformationDirective(DSAStack->getCurrentDirective())) {
+ if (DSAStack->getAssociatedLoops() > 0 && !DSAStack->isLoopStarted()) {
DSAStack->resetPossibleLoopCounter(D);
DSAStack->loopStart();
return OMPC_private;
@@ -2358,7 +2596,7 @@ OpenMPClauseKind Sema::isOpenMPPrivateDecl(ValueDecl *D, unsigned Level,
// User-defined allocators are private since they must be defined in the
// context of target region.
if (DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective, Level) &&
- DSAStack->isUsesAllocatorsDecl(Level, D).getValueOr(
+ DSAStack->isUsesAllocatorsDecl(Level, D).value_or(
DSAStackTy::UsesAllocatorsDeclKind::AllocatorTrait) ==
DSAStackTy::UsesAllocatorsDeclKind::UserDefinedAllocator)
return OMPC_private;
@@ -2448,7 +2686,11 @@ bool Sema::isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level,
unsigned NumLevels =
getOpenMPCaptureLevels(DSAStack->getDirective(Level));
if (Level == 0)
- return (NumLevels == CaptureLevel + 1) && TopDVar.CKind != OMPC_shared;
+ // non-file scope static variale with default(firstprivate)
+ // should be gloabal captured.
+ return (NumLevels == CaptureLevel + 1 &&
+ (TopDVar.CKind != OMPC_shared ||
+ DSAStack->getDefaultDSA() == DSA_firstprivate));
do {
--Level;
DSAStackTy::DSAVarData DVar = DSAStack->getImplicitDSA(D, Level);
@@ -2478,19 +2720,19 @@ void Sema::finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller,
const FunctionDecl *Callee,
SourceLocation Loc) {
assert(LangOpts.OpenMP && "Expected OpenMP compilation mode.");
- Optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
+ std::optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
OMPDeclareTargetDeclAttr::getDeviceType(Caller->getMostRecentDecl());
// Ignore host functions during device analyzis.
- if (LangOpts.OpenMPIsDevice &&
+ if (LangOpts.OpenMPIsTargetDevice &&
(!DevTy || *DevTy == OMPDeclareTargetDeclAttr::DT_Host))
return;
// Ignore nohost functions during host analyzis.
- if (!LangOpts.OpenMPIsDevice && DevTy &&
+ if (!LangOpts.OpenMPIsTargetDevice && DevTy &&
*DevTy == OMPDeclareTargetDeclAttr::DT_NoHost)
return;
const FunctionDecl *FD = Callee->getMostRecentDecl();
DevTy = OMPDeclareTargetDeclAttr::getDeviceType(FD);
- if (LangOpts.OpenMPIsDevice && DevTy &&
+ if (LangOpts.OpenMPIsTargetDevice && DevTy &&
*DevTy == OMPDeclareTargetDeclAttr::DT_Host) {
// Diagnose host function called during device codegen.
StringRef HostDevTy =
@@ -2501,16 +2743,34 @@ void Sema::finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller,
<< HostDevTy;
return;
}
- if (!LangOpts.OpenMPIsDevice && DevTy &&
- *DevTy == OMPDeclareTargetDeclAttr::DT_NoHost) {
- // Diagnose nohost function called during host codegen.
- StringRef NoHostDevTy = getOpenMPSimpleClauseTypeName(
- OMPC_device_type, OMPC_DEVICE_TYPE_nohost);
- Diag(Loc, diag::err_omp_wrong_device_function_call) << NoHostDevTy << 1;
- Diag(*OMPDeclareTargetDeclAttr::getLocation(FD),
- diag::note_omp_marked_device_type_here)
- << NoHostDevTy;
+ if (!LangOpts.OpenMPIsTargetDevice && !LangOpts.OpenMPOffloadMandatory &&
+ DevTy && *DevTy == OMPDeclareTargetDeclAttr::DT_NoHost) {
+ // In OpenMP 5.2 or later, if the function has a host variant then allow
+ // that to be called instead
+ auto &&HasHostAttr = [](const FunctionDecl *Callee) {
+ for (OMPDeclareVariantAttr *A :
+ Callee->specific_attrs<OMPDeclareVariantAttr>()) {
+ auto *DeclRefVariant = cast<DeclRefExpr>(A->getVariantFuncRef());
+ auto *VariantFD = cast<FunctionDecl>(DeclRefVariant->getDecl());
+ std::optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
+ OMPDeclareTargetDeclAttr::getDeviceType(
+ VariantFD->getMostRecentDecl());
+ if (!DevTy || *DevTy == OMPDeclareTargetDeclAttr::DT_Host)
+ return true;
}
+ return false;
+ };
+ if (getLangOpts().OpenMP >= 52 &&
+ Callee->hasAttr<OMPDeclareVariantAttr>() && HasHostAttr(Callee))
+ return;
+ // Diagnose nohost function called during host codegen.
+ StringRef NoHostDevTy = getOpenMPSimpleClauseTypeName(
+ OMPC_device_type, OMPC_DEVICE_TYPE_nohost);
+ Diag(Loc, diag::err_omp_wrong_device_function_call) << NoHostDevTy << 1;
+ Diag(*OMPDeclareTargetDeclAttr::getLocation(FD),
+ diag::note_omp_marked_device_type_here)
+ << NoHostDevTy;
+ }
}
void Sema::StartOpenMPDSABlock(OpenMPDirectiveKind DKind,
@@ -2532,7 +2792,8 @@ void Sema::EndOpenMPClause() {
static std::pair<ValueDecl *, bool>
getPrivateItem(Sema &S, Expr *&RefExpr, SourceLocation &ELoc,
- SourceRange &ERange, bool AllowArraySection = false);
+ SourceRange &ERange, bool AllowArraySection = false,
+ StringRef DiagType = "");
/// Check consistency of the reduction clauses.
static void checkReductionClauses(Sema &S, DSAStackTy *Stack,
@@ -2761,7 +3022,6 @@ public:
std::unique_ptr<CorrectionCandidateCallback> clone() override {
return std::make_unique<VarDeclFilterCCC>(*this);
}
-
};
class VarOrFuncDeclFilterCCC final : public CorrectionCandidateCallback {
@@ -3044,13 +3304,15 @@ getAllocatorKind(Sema &S, DSAStackTy *Stack, Expr *Allocator) {
Allocator->containsUnexpandedParameterPack())
return OMPAllocateDeclAttr::OMPUserDefinedMemAlloc;
auto AllocatorKindRes = OMPAllocateDeclAttr::OMPUserDefinedMemAlloc;
+ llvm::FoldingSetNodeID AEId;
const Expr *AE = Allocator->IgnoreParenImpCasts();
+ AE->IgnoreImpCasts()->Profile(AEId, S.getASTContext(), /*Canonical=*/true);
for (int I = 0; I < OMPAllocateDeclAttr::OMPUserDefinedMemAlloc; ++I) {
auto AllocatorKind = static_cast<OMPAllocateDeclAttr::AllocatorTypeTy>(I);
const Expr *DefAllocator = Stack->getAllocator(AllocatorKind);
- llvm::FoldingSetNodeID AEId, DAEId;
- AE->Profile(AEId, S.getASTContext(), /*Canonical=*/true);
- DefAllocator->Profile(DAEId, S.getASTContext(), /*Canonical=*/true);
+ llvm::FoldingSetNodeID DAEId;
+ DefAllocator->IgnoreImpCasts()->Profile(DAEId, S.getASTContext(),
+ /*Canonical=*/true);
if (AEId == DAEId) {
AllocatorKindRes = AllocatorKind;
break;
@@ -3112,36 +3374,50 @@ static bool checkPreviousOMPAllocateAttribute(
static void
applyOMPAllocateAttribute(Sema &S, VarDecl *VD,
OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind,
- Expr *Allocator, SourceRange SR) {
+ Expr *Allocator, Expr *Alignment, SourceRange SR) {
if (VD->hasAttr<OMPAllocateDeclAttr>())
return;
+ if (Alignment &&
+ (Alignment->isTypeDependent() || Alignment->isValueDependent() ||
+ Alignment->isInstantiationDependent() ||
+ Alignment->containsUnexpandedParameterPack()))
+ // Apply later when we have a usable value.
+ return;
if (Allocator &&
(Allocator->isTypeDependent() || Allocator->isValueDependent() ||
Allocator->isInstantiationDependent() ||
Allocator->containsUnexpandedParameterPack()))
return;
auto *A = OMPAllocateDeclAttr::CreateImplicit(S.Context, AllocatorKind,
- Allocator, SR);
+ Allocator, Alignment, SR);
VD->addAttr(A);
if (ASTMutationListener *ML = S.Context.getASTMutationListener())
ML->DeclarationMarkedOpenMPAllocate(VD, A);
}
-Sema::DeclGroupPtrTy Sema::ActOnOpenMPAllocateDirective(
- SourceLocation Loc, ArrayRef<Expr *> VarList,
- ArrayRef<OMPClause *> Clauses, DeclContext *Owner) {
- assert(Clauses.size() <= 1 && "Expected at most one clause.");
+Sema::DeclGroupPtrTy
+Sema::ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList,
+ ArrayRef<OMPClause *> Clauses,
+ DeclContext *Owner) {
+ assert(Clauses.size() <= 2 && "Expected at most two clauses.");
+ Expr *Alignment = nullptr;
Expr *Allocator = nullptr;
if (Clauses.empty()) {
// OpenMP 5.0, 2.11.3 allocate Directive, Restrictions.
// allocate directives that appear in a target region must specify an
// allocator clause unless a requires directive with the dynamic_allocators
// clause is present in the same compilation unit.
- if (LangOpts.OpenMPIsDevice &&
+ if (LangOpts.OpenMPIsTargetDevice &&
!DSAStack->hasRequiresDeclWithClause<OMPDynamicAllocatorsClause>())
targetDiag(Loc, diag::err_expected_allocator_clause);
} else {
- Allocator = cast<OMPAllocatorClause>(Clauses.back())->getAllocator();
+ for (const OMPClause *C : Clauses)
+ if (const auto *AC = dyn_cast<OMPAllocatorClause>(C))
+ Allocator = AC->getAllocator();
+ else if (const auto *AC = dyn_cast<OMPAlignClause>(C))
+ Alignment = AC->getAlignment();
+ else
+ llvm_unreachable("Unexpected clause on allocate directive");
}
OMPAllocateDeclAttr::AllocatorTypeTy AllocatorKind =
getAllocatorKind(*this, DSAStack, Allocator);
@@ -3182,7 +3458,7 @@ Sema::DeclGroupPtrTy Sema::ActOnOpenMPAllocateDirective(
}
Vars.push_back(RefExpr);
- applyOMPAllocateAttribute(*this, VD, AllocatorKind, Allocator,
+ applyOMPAllocateAttribute(*this, VD, AllocatorKind, Allocator, Alignment,
DE->getSourceRange());
}
if (Vars.empty())
@@ -3213,7 +3489,7 @@ Sema::ActOnOpenMPRequiresDirective(SourceLocation Loc,
void Sema::ActOnOpenMPAssumesDirective(SourceLocation Loc,
OpenMPDirectiveKind DKind,
- ArrayRef<StringRef> Assumptions,
+ ArrayRef<std::string> Assumptions,
bool SkippedClauses) {
if (!SkippedClauses && Assumptions.empty())
Diag(Loc, diag::err_omp_no_clause_for_directive)
@@ -3250,8 +3526,7 @@ void Sema::ActOnOpenMPAssumesDirective(SourceLocation Loc,
continue;
if (auto *CTD = dyn_cast<ClassTemplateDecl>(SubDC)) {
DeclContexts.push_back(CTD->getTemplatedDecl());
- for (auto *S : CTD->specializations())
- DeclContexts.push_back(S);
+ llvm::append_range(DeclContexts, CTD->specializations());
continue;
}
if (auto *DC = dyn_cast<DeclContext>(SubDC))
@@ -3414,8 +3689,9 @@ class DSAAttrChecker final : public StmtVisitor<DSAAttrChecker, void> {
bool ErrorFound = false;
bool TryCaptureCXXThisMembers = false;
CapturedStmt *CS = nullptr;
- const static unsigned DefaultmapKindNum = OMPC_DEFAULTMAP_pointer + 1;
+ const static unsigned DefaultmapKindNum = OMPC_DEFAULTMAP_unknown + 1;
llvm::SmallVector<Expr *, 4> ImplicitFirstprivate;
+ llvm::SmallVector<Expr *, 4> ImplicitPrivate;
llvm::SmallVector<Expr *, 4> ImplicitMap[DefaultmapKindNum][OMPC_MAP_delete];
llvm::SmallVector<OpenMPMapModifierKind, NumberOfOMPMapClauseModifiers>
ImplicitMapModifier[DefaultmapKindNum];
@@ -3431,6 +3707,7 @@ class DSAAttrChecker final : public StmtVisitor<DSAAttrChecker, void> {
S->getDirectiveKind() == OMPD_section ||
S->getDirectiveKind() == OMPD_master ||
S->getDirectiveKind() == OMPD_masked ||
+ S->getDirectiveKind() == OMPD_scope ||
isOpenMPLoopTransformationDirective(S->getDirectiveKind())) {
Visit(S->getAssociatedStmt());
return;
@@ -3469,7 +3746,9 @@ public:
return;
if (auto *VD = dyn_cast<VarDecl>(E->getDecl())) {
// Check the datasharing rules for the expressions in the clauses.
- if (!CS) {
+ if (!CS || (isa<OMPCapturedExprDecl>(VD) && !CS->capturesVariable(VD) &&
+ !Stack->getTopDSA(VD, /*FromParent=*/false).RefExpr &&
+ !Stack->isImplicitDefaultFirstprivateFD(VD))) {
if (auto *CED = dyn_cast<OMPCapturedExprDecl>(VD))
if (!CED->hasAttr<OMPCaptureNoInitAttr>()) {
Visit(CED->getInit());
@@ -3478,14 +3757,16 @@ public:
} else if (VD->isImplicit() || isa<OMPCapturedExprDecl>(VD))
// Do not analyze internal variables and do not enclose them into
// implicit clauses.
- return;
+ if (!Stack->isImplicitDefaultFirstprivateFD(VD))
+ return;
VD = VD->getCanonicalDecl();
// Skip internally declared variables.
if (VD->hasLocalStorage() && CS && !CS->capturesVariable(VD) &&
+ !Stack->isImplicitDefaultFirstprivateFD(VD) &&
!Stack->isImplicitTaskFirstprivate(VD))
return;
// Skip allocators in uses_allocators clauses.
- if (Stack->isUsesAllocatorsDecl(VD).hasValue())
+ if (Stack->isUsesAllocatorsDecl(VD))
return;
DSAStackTy::DSAVarData DVar = Stack->getTopDSA(VD, /*FromParent=*/false);
@@ -3494,11 +3775,12 @@ public:
return;
// Skip internally declared static variables.
- llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
+ std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
if (VD->hasGlobalStorage() && CS && !CS->capturesVariable(VD) &&
(Stack->hasRequiresDeclWithClause<OMPUnifiedSharedMemoryClause>() ||
!Res || *Res != OMPDeclareTargetDeclAttr::MT_Link) &&
+ !Stack->isImplicitDefaultFirstprivateFD(VD) &&
!Stack->isImplicitTaskFirstprivate(VD))
return;
@@ -3510,18 +3792,21 @@ public:
// by being listed in a data-sharing attribute clause.
if (DVar.CKind == OMPC_unknown &&
(Stack->getDefaultDSA() == DSA_none ||
+ Stack->getDefaultDSA() == DSA_private ||
Stack->getDefaultDSA() == DSA_firstprivate) &&
isImplicitOrExplicitTaskingRegion(DKind) &&
VarsWithInheritedDSA.count(VD) == 0) {
bool InheritedDSA = Stack->getDefaultDSA() == DSA_none;
- if (!InheritedDSA && Stack->getDefaultDSA() == DSA_firstprivate) {
+ if (!InheritedDSA && (Stack->getDefaultDSA() == DSA_firstprivate ||
+ Stack->getDefaultDSA() == DSA_private)) {
DSAStackTy::DSAVarData DVar =
Stack->getImplicitDSA(VD, /*FromParent=*/false);
InheritedDSA = DVar.CKind == OMPC_unknown;
}
if (InheritedDSA)
VarsWithInheritedDSA[VD] = E;
- return;
+ if (Stack->getDefaultDSA() == DSA_none)
+ return;
}
// OpenMP 5.0 [2.19.7.2, defaultmap clause, Description]
@@ -3529,7 +3814,7 @@ public:
// construct that does not have a predetermined data-sharing attribute
// and does not appear in a to or link clause on a declare target
// directive must be listed in a data-mapping attribute clause, a
- // data-haring attribute clause (including a data-sharing attribute
+ // data-sharing attribute clause (including a data-sharing attribute
// clause on a combined construct where target. is one of the
// constituent constructs), or an is_device_ptr clause.
OpenMPDefaultmapClauseKind ClauseKind =
@@ -3560,9 +3845,8 @@ public:
bool IsModifierPresent = Stack->getDefaultmapModifier(ClauseKind) ==
OMPC_DEFAULTMAP_MODIFIER_present;
if (IsModifierPresent) {
- if (llvm::find(ImplicitMapModifier[ClauseKind],
- OMPC_MAP_MODIFIER_present) ==
- std::end(ImplicitMapModifier[ClauseKind])) {
+ if (!llvm::is_contained(ImplicitMapModifier[ClauseKind],
+ OMPC_MAP_MODIFIER_present)) {
ImplicitMapModifier[ClauseKind].push_back(
OMPC_MAP_MODIFIER_present);
}
@@ -3581,9 +3865,8 @@ public:
// Variable is used if it has been marked as an array, array
// section, array shaping or the variable iself.
return StackComponents.size() == 1 ||
- std::all_of(
- std::next(StackComponents.rbegin()),
- StackComponents.rend(),
+ llvm::all_of(
+ llvm::drop_begin(llvm::reverse(StackComponents)),
[](const OMPClauseMappableExprCommon::
MappableComponent &MC) {
return MC.getAssociatedDeclaration() ==
@@ -3640,10 +3923,16 @@ public:
// Define implicit data-sharing attributes for task.
DVar = Stack->getImplicitDSA(VD, /*FromParent=*/false);
if (((isOpenMPTaskingDirective(DKind) && DVar.CKind != OMPC_shared) ||
- (Stack->getDefaultDSA() == DSA_firstprivate &&
- DVar.CKind == OMPC_firstprivate && !DVar.RefExpr)) &&
+ (((Stack->getDefaultDSA() == DSA_firstprivate &&
+ DVar.CKind == OMPC_firstprivate) ||
+ (Stack->getDefaultDSA() == DSA_private &&
+ DVar.CKind == OMPC_private)) &&
+ !DVar.RefExpr)) &&
!Stack->isLoopControlVariable(VD).first) {
- ImplicitFirstprivate.push_back(E);
+ if (Stack->getDefaultDSA() == DSA_private)
+ ImplicitPrivate.push_back(E);
+ else
+ ImplicitFirstprivate.push_back(E);
return;
}
@@ -3788,6 +4077,10 @@ public:
}
void VisitOMPExecutableDirective(OMPExecutableDirective *S) {
for (OMPClause *C : S->clauses()) {
+ // Skip analysis of arguments of private clauses for task|target
+ // directives.
+ if (isa_and_nonnull<OMPPrivateClause>(C))
+ continue;
// Skip analysis of arguments of implicitly defined firstprivate clause
// for task|target directives.
// Skip analysis of arguments of implicitly defined map clause for target
@@ -3805,16 +4098,27 @@ public:
VisitSubCaptures(S);
}
- void VisitOMPTileDirective(OMPTileDirective *S) {
- // #pragma omp tile does not introduce data sharing.
+ void VisitOMPLoopTransformationDirective(OMPLoopTransformationDirective *S) {
+ // Loop transformation directives do not introduce data sharing
VisitStmt(S);
}
- void VisitOMPUnrollDirective(OMPUnrollDirective *S) {
- // #pragma omp unroll does not introduce data sharing.
- VisitStmt(S);
+ void VisitCallExpr(CallExpr *S) {
+ for (Stmt *C : S->arguments()) {
+ if (C) {
+ // Check implicitly captured variables in the task-based directives to
+ // check if they must be firstprivatized.
+ Visit(C);
+ }
+ }
+ if (Expr *Callee = S->getCallee()) {
+ auto *CI = Callee->IgnoreParenImpCasts();
+ if (auto *CE = dyn_cast<MemberExpr>(CI))
+ Visit(CE->getBase());
+ else if (auto *CE = dyn_cast<DeclRefExpr>(CI))
+ Visit(CE);
+ }
}
-
void VisitStmt(Stmt *S) {
for (Stmt *C : S->children()) {
if (C) {
@@ -3848,6 +4152,7 @@ public:
ArrayRef<Expr *> getImplicitFirstprivate() const {
return ImplicitFirstprivate;
}
+ ArrayRef<Expr *> getImplicitPrivate() const { return ImplicitPrivate; }
ArrayRef<Expr *> getImplicitMap(OpenMPDefaultmapClauseKind DK,
OpenMPMapClauseKind MK) const {
return ImplicitMap[DK][MK];
@@ -3871,6 +4176,23 @@ public:
};
} // namespace
+static void handleDeclareVariantConstructTrait(DSAStackTy *Stack,
+ OpenMPDirectiveKind DKind,
+ bool ScopeEntry) {
+ SmallVector<llvm::omp::TraitProperty, 8> Traits;
+ if (isOpenMPTargetExecutionDirective(DKind))
+ Traits.emplace_back(llvm::omp::TraitProperty::construct_target_target);
+ if (isOpenMPTeamsDirective(DKind))
+ Traits.emplace_back(llvm::omp::TraitProperty::construct_teams_teams);
+ if (isOpenMPParallelDirective(DKind))
+ Traits.emplace_back(llvm::omp::TraitProperty::construct_parallel_parallel);
+ if (isOpenMPWorksharingDirective(DKind))
+ Traits.emplace_back(llvm::omp::TraitProperty::construct_for_for);
+ if (isOpenMPSimdDirective(DKind))
+ Traits.emplace_back(llvm::omp::TraitProperty::construct_simd_simd);
+ Stack->handleConstructTrait(Traits, ScopeEntry);
+}
+
void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
switch (DKind) {
case OMPD_parallel:
@@ -3878,6 +4200,8 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
case OMPD_parallel_for_simd:
case OMPD_parallel_sections:
case OMPD_parallel_master:
+ case OMPD_parallel_masked:
+ case OMPD_parallel_loop:
case OMPD_teams:
case OMPD_teams_distribute:
case OMPD_teams_distribute_simd: {
@@ -3897,6 +4221,7 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
case OMPD_target_parallel:
case OMPD_target_parallel_for:
case OMPD_target_parallel_for_simd:
+ case OMPD_target_parallel_loop:
case OMPD_target_teams_distribute:
case OMPD_target_teams_distribute_simd: {
QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
@@ -3923,14 +4248,16 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
// function directly.
getCurCapturedRegion()->TheCapturedDecl->addAttr(
AlwaysInlineAttr::CreateImplicit(
- Context, {}, AttributeCommonInfo::AS_Keyword,
- AlwaysInlineAttr::Keyword_forceinline));
- Sema::CapturedParamNameType ParamsTarget[] = {
- std::make_pair(StringRef(), QualType()) // __context with shared vars
- };
+ Context, {}, AlwaysInlineAttr::Keyword_forceinline));
+ SmallVector<Sema::CapturedParamNameType, 2> ParamsTarget;
+ if (getLangOpts().OpenMPIsTargetDevice)
+ ParamsTarget.push_back(std::make_pair(StringRef("dyn_ptr"), VoidPtrTy));
+ ParamsTarget.push_back(
+ std::make_pair(StringRef(), QualType())); // __context with shared vars;
// Start a captured region for 'target' with no implicit parameters.
ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
- ParamsTarget, /*OpenMPCaptureLevel=*/1);
+ ParamsTarget,
+ /*OpenMPCaptureLevel=*/1);
Sema::CapturedParamNameType ParamsTeamsOrParallel[] = {
std::make_pair(".global_tid.", KmpInt32PtrTy),
std::make_pair(".bound_tid.", KmpInt32PtrTy),
@@ -3968,10 +4295,14 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
// function directly.
getCurCapturedRegion()->TheCapturedDecl->addAttr(
AlwaysInlineAttr::CreateImplicit(
- Context, {}, AttributeCommonInfo::AS_Keyword,
- AlwaysInlineAttr::Keyword_forceinline));
+ Context, {}, AlwaysInlineAttr::Keyword_forceinline));
+ SmallVector<Sema::CapturedParamNameType, 2> ParamsTarget;
+ if (getLangOpts().OpenMPIsTargetDevice)
+ ParamsTarget.push_back(std::make_pair(StringRef("dyn_ptr"), VoidPtrTy));
+ ParamsTarget.push_back(
+ std::make_pair(StringRef(), QualType())); // __context with shared vars;
ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
- std::make_pair(StringRef(), QualType()),
+ ParamsTarget,
/*OpenMPCaptureLevel=*/1);
break;
}
@@ -3983,6 +4314,9 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
case OMPD_tile:
case OMPD_unroll:
break;
+ case OMPD_loop:
+ // TODO: 'loop' may require additional parameters depending on the binding.
+ // Treat similar to OMPD_simd/OMPD_for for now.
case OMPD_simd:
case OMPD_for:
case OMPD_for_simd:
@@ -3992,6 +4326,7 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
case OMPD_distribute:
case OMPD_distribute_simd:
case OMPD_ordered:
+ case OMPD_scope:
case OMPD_target_data:
case OMPD_dispatch: {
Sema::CapturedParamNameType Params[] = {
@@ -4026,13 +4361,14 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
// function directly.
getCurCapturedRegion()->TheCapturedDecl->addAttr(
AlwaysInlineAttr::CreateImplicit(
- Context, {}, AttributeCommonInfo::AS_Keyword,
- AlwaysInlineAttr::Keyword_forceinline));
+ Context, {}, AlwaysInlineAttr::Keyword_forceinline));
break;
}
case OMPD_taskloop:
case OMPD_taskloop_simd:
case OMPD_master_taskloop:
+ case OMPD_masked_taskloop:
+ case OMPD_masked_taskloop_simd:
case OMPD_master_taskloop_simd: {
QualType KmpInt32Ty =
Context.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1)
@@ -4071,10 +4407,11 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
// function directly.
getCurCapturedRegion()->TheCapturedDecl->addAttr(
AlwaysInlineAttr::CreateImplicit(
- Context, {}, AttributeCommonInfo::AS_Keyword,
- AlwaysInlineAttr::Keyword_forceinline));
+ Context, {}, AlwaysInlineAttr::Keyword_forceinline));
break;
}
+ case OMPD_parallel_masked_taskloop:
+ case OMPD_parallel_masked_taskloop_simd:
case OMPD_parallel_master_taskloop:
case OMPD_parallel_master_taskloop_simd: {
QualType KmpInt32Ty =
@@ -4122,8 +4459,7 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
// function directly.
getCurCapturedRegion()->TheCapturedDecl->addAttr(
AlwaysInlineAttr::CreateImplicit(
- Context, {}, AttributeCommonInfo::AS_Keyword,
- AlwaysInlineAttr::Keyword_forceinline));
+ Context, {}, AlwaysInlineAttr::Keyword_forceinline));
break;
}
case OMPD_distribute_parallel_for_simd:
@@ -4142,6 +4478,7 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
Params);
break;
}
+ case OMPD_target_teams_loop:
case OMPD_target_teams_distribute_parallel_for:
case OMPD_target_teams_distribute_parallel_for_simd: {
QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
@@ -4169,11 +4506,12 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
// function directly.
getCurCapturedRegion()->TheCapturedDecl->addAttr(
AlwaysInlineAttr::CreateImplicit(
- Context, {}, AttributeCommonInfo::AS_Keyword,
- AlwaysInlineAttr::Keyword_forceinline));
- Sema::CapturedParamNameType ParamsTarget[] = {
- std::make_pair(StringRef(), QualType()) // __context with shared vars
- };
+ Context, {}, AlwaysInlineAttr::Keyword_forceinline));
+ SmallVector<Sema::CapturedParamNameType, 2> ParamsTarget;
+ if (getLangOpts().OpenMPIsTargetDevice)
+ ParamsTarget.push_back(std::make_pair(StringRef("dyn_ptr"), VoidPtrTy));
+ ParamsTarget.push_back(
+ std::make_pair(StringRef(), QualType())); // __context with shared vars;
// Start a captured region for 'target' with no implicit parameters.
ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
ParamsTarget, /*OpenMPCaptureLevel=*/1);
@@ -4201,6 +4539,7 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
break;
}
+ case OMPD_teams_loop:
case OMPD_teams_distribute_parallel_for:
case OMPD_teams_distribute_parallel_for_simd: {
QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
@@ -4256,13 +4595,13 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
// function directly.
getCurCapturedRegion()->TheCapturedDecl->addAttr(
AlwaysInlineAttr::CreateImplicit(
- Context, {}, AttributeCommonInfo::AS_Keyword,
- AlwaysInlineAttr::Keyword_forceinline));
+ Context, {}, AlwaysInlineAttr::Keyword_forceinline));
break;
}
case OMPD_threadprivate:
case OMPD_allocate:
case OMPD_taskyield:
+ case OMPD_error:
case OMPD_barrier:
case OMPD_taskwait:
case OMPD_cancellation_point:
@@ -4279,12 +4618,14 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
case OMPD_declare_variant:
case OMPD_begin_declare_variant:
case OMPD_end_declare_variant:
+ case OMPD_metadirective:
llvm_unreachable("OpenMP Directive is not allowed");
case OMPD_unknown:
default:
llvm_unreachable("Unknown OpenMP directive");
}
DSAStack->setContext(CurContext);
+ handleDeclareVariantConstructTrait(DSAStack, DKind, /* ScopeEntry */ true);
}
int Sema::getNumberOfConstructScopes(unsigned Level) const {
@@ -4299,6 +4640,7 @@ int Sema::getOpenMPCaptureLevels(OpenMPDirectiveKind DKind) {
static OMPCapturedExprDecl *buildCaptureDecl(Sema &S, IdentifierInfo *Id,
Expr *CaptureExpr, bool WithInit,
+ DeclContext *CurContext,
bool AsExpression) {
assert(CaptureExpr);
ASTContext &C = S.getASTContext();
@@ -4317,11 +4659,11 @@ static OMPCapturedExprDecl *buildCaptureDecl(Sema &S, IdentifierInfo *Id,
}
WithInit = true;
}
- auto *CED = OMPCapturedExprDecl::Create(C, S.CurContext, Id, Ty,
+ auto *CED = OMPCapturedExprDecl::Create(C, CurContext, Id, Ty,
CaptureExpr->getBeginLoc());
if (!WithInit)
CED->addAttr(OMPCaptureNoInitAttr::CreateImplicit(C));
- S.CurContext->addHiddenDecl(CED);
+ CurContext->addHiddenDecl(CED);
Sema::TentativeAnalysisScope Trap(S);
S.AddInitializerToDecl(CED, Init, /*DirectInit=*/false);
return CED;
@@ -4334,17 +4676,19 @@ static DeclRefExpr *buildCapture(Sema &S, ValueDecl *D, Expr *CaptureExpr,
CD = cast<OMPCapturedExprDecl>(VD);
else
CD = buildCaptureDecl(S, D->getIdentifier(), CaptureExpr, WithInit,
+ S.CurContext,
/*AsExpression=*/false);
return buildDeclRefExpr(S, CD, CD->getType().getNonReferenceType(),
CaptureExpr->getExprLoc());
}
-static ExprResult buildCapture(Sema &S, Expr *CaptureExpr, DeclRefExpr *&Ref) {
+static ExprResult buildCapture(Sema &S, Expr *CaptureExpr, DeclRefExpr *&Ref,
+ StringRef Name) {
CaptureExpr = S.DefaultLvalueConversion(CaptureExpr).get();
if (!Ref) {
OMPCapturedExprDecl *CD = buildCaptureDecl(
- S, &S.getASTContext().Idents.get(".capture_expr."), CaptureExpr,
- /*WithInit=*/true, /*AsExpression=*/true);
+ S, &S.getASTContext().Idents.get(Name), CaptureExpr,
+ /*WithInit=*/true, S.CurContext, /*AsExpression=*/true);
Ref = buildDeclRefExpr(S, CD, CD->getType().getNonReferenceType(),
CaptureExpr->getExprLoc());
}
@@ -4404,12 +4748,12 @@ void Sema::tryCaptureOpenMPLambdas(ValueDecl *V) {
DSAStack->setForceCaptureByReferenceInTargetExecutable(
/*V=*/true);
if (RD->isLambda()) {
- llvm::DenseMap<const VarDecl *, FieldDecl *> Captures;
+ llvm::DenseMap<const ValueDecl *, FieldDecl *> Captures;
FieldDecl *ThisCapture;
RD->getCaptureFields(Captures, ThisCapture);
for (const LambdaCapture &LC : RD->captures()) {
if (LC.getCaptureKind() == LCK_ByRef) {
- VarDecl *VD = LC.getCapturedVar();
+ VarDecl *VD = cast<VarDecl>(LC.getCapturedVar());
DeclContext *VDC = VD->getDeclContext();
if (!VDC->Encloses(CurContext))
continue;
@@ -4460,6 +4804,8 @@ static bool checkOrderedOrderSpecified(Sema &S,
StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S,
ArrayRef<OMPClause *> Clauses) {
+ handleDeclareVariantConstructTrait(DSAStack, DSAStack->getCurrentDirective(),
+ /* ScopeEntry */ false);
if (DSAStack->getCurrentDirective() == OMPD_atomic ||
DSAStack->getCurrentDirective() == OMPD_critical ||
DSAStack->getCurrentDirective() == OMPD_section ||
@@ -4484,7 +4830,8 @@ StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S,
// This is required for proper codegen.
for (OMPClause *Clause : Clauses) {
if (!LangOpts.OpenMPSimd &&
- isOpenMPTaskingDirective(DSAStack->getCurrentDirective()) &&
+ (isOpenMPTaskingDirective(DSAStack->getCurrentDirective()) ||
+ DSAStack->getCurrentDirective() == OMPD_target) &&
Clause->getClauseKind() == OMPC_in_reduction) {
// Capture taskgroup task_reduction descriptors inside the tasking regions
// with the corresponding in_reduction items.
@@ -4650,6 +4997,7 @@ static bool checkNestingOfRegions(Sema &SemaRef, const DSAStackTy *Stack,
OpenMPDirectiveKind CurrentRegion,
const DeclarationNameInfo &CurrentName,
OpenMPDirectiveKind CancelRegion,
+ OpenMPBindClauseKind BindKind,
SourceLocation StartLoc) {
if (Stack->getCurScope()) {
OpenMPDirectiveKind ParentRegion = Stack->getParentDirective();
@@ -4665,6 +5013,14 @@ static bool checkNestingOfRegions(Sema &SemaRef, const DSAStackTy *Stack,
ShouldBeInTeamsRegion,
ShouldBeInLoopSimdRegion,
} Recommend = NoRecommend;
+ if (SemaRef.LangOpts.OpenMP >= 51 && Stack->isParentOrderConcurrent() &&
+ CurrentRegion != OMPD_simd && CurrentRegion != OMPD_loop &&
+ CurrentRegion != OMPD_parallel &&
+ !isOpenMPCombinedParallelADirective(CurrentRegion)) {
+ SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region_order)
+ << getOpenMPDirectiveName(CurrentRegion);
+ return true;
+ }
if (isOpenMPSimdDirective(ParentRegion) &&
((SemaRef.LangOpts.OpenMP <= 45 && CurrentRegion != OMPD_ordered) ||
(SemaRef.LangOpts.OpenMP >= 50 && CurrentRegion != OMPD_ordered &&
@@ -4716,6 +5072,18 @@ static bool checkNestingOfRegions(Sema &SemaRef, const DSAStackTy *Stack,
CurrentRegion != OMPD_cancellation_point &&
CurrentRegion != OMPD_cancel && CurrentRegion != OMPD_scan)
return false;
+ // Checks needed for mapping "loop" construct. Please check mapLoopConstruct
+ // for a detailed explanation
+ if (SemaRef.LangOpts.OpenMP >= 50 && CurrentRegion == OMPD_loop &&
+ (BindKind == OMPC_BIND_parallel || BindKind == OMPC_BIND_teams) &&
+ (isOpenMPWorksharingDirective(ParentRegion) ||
+ ParentRegion == OMPD_loop)) {
+ int ErrorMsgNumber = (BindKind == OMPC_BIND_parallel) ? 1 : 4;
+ SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region)
+ << true << getOpenMPDirectiveName(ParentRegion) << ErrorMsgNumber
+ << getOpenMPDirectiveName(CurrentRegion);
+ return true;
+ }
if (CurrentRegion == OMPD_cancellation_point ||
CurrentRegion == OMPD_cancel) {
// OpenMP [2.16, Nesting of Regions]
@@ -4744,6 +5112,8 @@ static bool checkNestingOfRegions(Sema &SemaRef, const DSAStackTy *Stack,
(SemaRef.getLangOpts().OpenMP >= 50 &&
(ParentRegion == OMPD_taskloop ||
ParentRegion == OMPD_master_taskloop ||
+ ParentRegion == OMPD_masked_taskloop ||
+ ParentRegion == OMPD_parallel_masked_taskloop ||
ParentRegion == OMPD_parallel_master_taskloop)))) ||
(CancelRegion == OMPD_sections &&
(ParentRegion == OMPD_section || ParentRegion == OMPD_sections ||
@@ -4754,6 +5124,7 @@ static bool checkNestingOfRegions(Sema &SemaRef, const DSAStackTy *Stack,
// A masked region may not be closely nested inside a worksharing, loop,
// atomic, task, or taskloop region.
NestingProhibited = isOpenMPWorksharingDirective(ParentRegion) ||
+ isOpenMPGenericLoopDirective(ParentRegion) ||
isOpenMPTaskingDirective(ParentRegion);
} else if (CurrentRegion == OMPD_critical && CurrentName.getName()) {
// OpenMP [2.16, Nesting of Regions]
@@ -4781,15 +5152,20 @@ static bool checkNestingOfRegions(Sema &SemaRef, const DSAStackTy *Stack,
diag::note_omp_previous_critical_region);
return true;
}
- } else if (CurrentRegion == OMPD_barrier) {
+ } else if (CurrentRegion == OMPD_barrier || CurrentRegion == OMPD_scope) {
+ // OpenMP 5.1 [2.22, Nesting of Regions]
+ // A scope region may not be closely nested inside a worksharing, loop,
+ // task, taskloop, critical, ordered, atomic, or masked region.
// OpenMP 5.1 [2.22, Nesting of Regions]
// A barrier region may not be closely nested inside a worksharing, loop,
// task, taskloop, critical, ordered, atomic, or masked region.
NestingProhibited =
isOpenMPWorksharingDirective(ParentRegion) ||
+ isOpenMPGenericLoopDirective(ParentRegion) ||
isOpenMPTaskingDirective(ParentRegion) ||
ParentRegion == OMPD_master || ParentRegion == OMPD_masked ||
ParentRegion == OMPD_parallel_master ||
+ ParentRegion == OMPD_parallel_masked ||
ParentRegion == OMPD_critical || ParentRegion == OMPD_ordered;
} else if (isOpenMPWorksharingDirective(CurrentRegion) &&
!isOpenMPParallelDirective(CurrentRegion) &&
@@ -4800,9 +5176,11 @@ static bool checkNestingOfRegions(Sema &SemaRef, const DSAStackTy *Stack,
// critical, ordered, atomic, or masked region.
NestingProhibited =
isOpenMPWorksharingDirective(ParentRegion) ||
+ isOpenMPGenericLoopDirective(ParentRegion) ||
isOpenMPTaskingDirective(ParentRegion) ||
ParentRegion == OMPD_master || ParentRegion == OMPD_masked ||
ParentRegion == OMPD_parallel_master ||
+ ParentRegion == OMPD_parallel_masked ||
ParentRegion == OMPD_critical || ParentRegion == OMPD_ordered;
Recommend = ShouldBeInParallelRegion;
} else if (CurrentRegion == OMPD_ordered) {
@@ -4845,14 +5223,32 @@ static bool checkNestingOfRegions(Sema &SemaRef, const DSAStackTy *Stack,
!isOpenMPTargetExecutionDirective(CurrentRegion) &&
!isOpenMPTargetDataManagementDirective(CurrentRegion) &&
(ParentRegion == OMPD_teams || ParentRegion == OMPD_target_teams)) {
- // OpenMP [2.16, Nesting of Regions]
- // distribute, parallel, parallel sections, parallel workshare, and the
- // parallel loop and parallel loop SIMD constructs are the only OpenMP
- // constructs that can be closely nested in the teams region.
+ // OpenMP [5.1, 2.22, Nesting of Regions]
+ // distribute, distribute simd, distribute parallel worksharing-loop,
+ // distribute parallel worksharing-loop SIMD, loop, parallel regions,
+ // including any parallel regions arising from combined constructs,
+ // omp_get_num_teams() regions, and omp_get_team_num() regions are the
+ // only OpenMP regions that may be strictly nested inside the teams
+ // region.
+ //
+ // As an extension, we permit atomic within teams as well.
NestingProhibited = !isOpenMPParallelDirective(CurrentRegion) &&
- !isOpenMPDistributeDirective(CurrentRegion);
+ !isOpenMPDistributeDirective(CurrentRegion) &&
+ CurrentRegion != OMPD_loop &&
+ !(SemaRef.getLangOpts().OpenMPExtensions &&
+ CurrentRegion == OMPD_atomic);
Recommend = ShouldBeInParallelRegion;
}
+ if (!NestingProhibited && CurrentRegion == OMPD_loop) {
+ // OpenMP [5.1, 2.11.7, loop Construct, Restrictions]
+ // If the bind clause is present on the loop construct and binding is
+ // teams then the corresponding loop region must be strictly nested inside
+ // a teams region.
+ NestingProhibited = BindKind == OMPC_BIND_teams &&
+ ParentRegion != OMPD_teams &&
+ ParentRegion != OMPD_target_teams;
+ Recommend = ShouldBeInTeamsRegion;
+ }
if (!NestingProhibited &&
isOpenMPNestingDistributeDirective(CurrentRegion)) {
// OpenMP 4.5 [2.17 Nesting of Regions]
@@ -4929,14 +5325,7 @@ static bool checkIfClauses(Sema &S, OpenMPDirectiveKind Kind,
// directive.
// At most one if clause with the particular directive-name-modifier can
// appear on the directive.
- bool MatchFound = false;
- for (auto NM : AllowedNameModifiers) {
- if (CurNM == NM) {
- MatchFound = true;
- break;
- }
- }
- if (!MatchFound) {
+ if (!llvm::is_contained(AllowedNameModifiers, CurNM)) {
S.Diag(IC->getNameModifierLoc(),
diag::err_omp_wrong_if_directive_name_modifier)
<< getOpenMPDirectiveName(CurNM) << getOpenMPDirectiveName(Kind);
@@ -4985,7 +5374,8 @@ static bool checkIfClauses(Sema &S, OpenMPDirectiveKind Kind,
static std::pair<ValueDecl *, bool> getPrivateItem(Sema &S, Expr *&RefExpr,
SourceLocation &ELoc,
SourceRange &ERange,
- bool AllowArraySection) {
+ bool AllowArraySection,
+ StringRef DiagType) {
if (RefExpr->isTypeDependent() || RefExpr->isValueDependent() ||
RefExpr->containsUnexpandedParameterPack())
return std::make_pair(nullptr, true);
@@ -5028,8 +5418,14 @@ static std::pair<ValueDecl *, bool> getPrivateItem(Sema &S, Expr *&RefExpr,
!isa<CXXThisExpr>(ME->getBase()->IgnoreParenImpCasts()) ||
!isa<FieldDecl>(ME->getMemberDecl()))) {
if (IsArrayExpr != NoArrayExpr) {
- S.Diag(ELoc, diag::err_omp_expected_base_var_name) << IsArrayExpr
- << ERange;
+ S.Diag(ELoc, diag::err_omp_expected_base_var_name)
+ << IsArrayExpr << ERange;
+ } else if (!DiagType.empty()) {
+ unsigned DiagSelect = S.getLangOpts().CPlusPlus
+ ? (S.getCurrentThisType().isNull() ? 1 : 2)
+ : 0;
+ S.Diag(ELoc, diag::err_omp_expected_var_name_member_expr_with_type)
+ << DiagSelect << DiagType << ERange;
} else {
S.Diag(ELoc,
AllowArraySection
@@ -5052,8 +5448,7 @@ class AllocatorChecker final : public ConstStmtVisitor<AllocatorChecker, bool> {
public:
bool VisitDeclRefExpr(const DeclRefExpr *E) {
return S->isUsesAllocatorsDecl(E->getDecl())
- .getValueOr(
- DSAStackTy::UsesAllocatorsDeclKind::AllocatorTrait) ==
+ .value_or(DSAStackTy::UsesAllocatorsDeclKind::AllocatorTrait) ==
DSAStackTy::UsesAllocatorsDeclKind::AllocatorTrait;
}
bool VisitStmt(const Stmt *S) {
@@ -5073,8 +5468,7 @@ static void checkAllocateClauses(Sema &S, DSAStackTy *Stack,
"Expected non-dependent context.");
auto AllocateRange =
llvm::make_filter_range(Clauses, OMPAllocateClause::classof);
- llvm::DenseMap<CanonicalDeclPtr<Decl>, CanonicalDeclPtr<VarDecl>>
- DeclToCopy;
+ llvm::DenseMap<CanonicalDeclPtr<Decl>, CanonicalDeclPtr<VarDecl>> DeclToCopy;
auto PrivateRange = llvm::make_filter_range(Clauses, [](const OMPClause *C) {
return isOpenMPPrivate(C->getClauseKind());
});
@@ -5180,8 +5574,10 @@ static void checkAllocateClauses(Sema &S, DSAStackTy *Stack,
if (checkPreviousOMPAllocateAttribute(S, Stack, E, PrivateVD,
AllocatorKind, AC->getAllocator()))
continue;
+ // Placeholder until allocate clause supports align modifier.
+ Expr *Alignment = nullptr;
applyOMPAllocateAttribute(S, PrivateVD, AllocatorKind, AC->getAllocator(),
- E->getSourceRange());
+ Alignment, E->getSourceRange());
}
}
}
@@ -5266,6 +5662,8 @@ static CapturedStmt *buildDistanceFunc(Sema &Actions, QualType LogicalTy,
IntegerLiteral *Zero = IntegerLiteral::Create(
Ctx, llvm::APInt(Ctx.getIntWidth(LogicalTy), 0), LogicalTy, {});
+ IntegerLiteral *One = IntegerLiteral::Create(
+ Ctx, llvm::APInt(Ctx.getIntWidth(LogicalTy), 1), LogicalTy, {});
Expr *Dist;
if (Rel == BO_NE) {
// When using a != comparison, the increment can be +1 or -1. This can be
@@ -5321,16 +5719,25 @@ static CapturedStmt *buildDistanceFunc(Sema &Actions, QualType LogicalTy,
if (Rel == BO_LE || Rel == BO_GE) {
// Add one to the range if the relational operator is inclusive.
Range =
- AssertSuccess(Actions.BuildUnaryOp(nullptr, {}, UO_PreInc, Range));
+ AssertSuccess(Actions.BuildBinOp(nullptr, {}, BO_Add, Range, One));
}
- // Divide by the absolute step amount.
+ // Divide by the absolute step amount. If the range is not a multiple of
+ // the step size, rounding-up the effective upper bound ensures that the
+ // last iteration is included.
+ // Note that the rounding-up may cause an overflow in a temporry that
+ // could be avoided, but would have occurred in a C-style for-loop as
+ // well.
Expr *Divisor = BuildVarRef(NewStep);
if (Rel == BO_GE || Rel == BO_GT)
Divisor =
AssertSuccess(Actions.BuildUnaryOp(nullptr, {}, UO_Minus, Divisor));
+ Expr *DivisorMinusOne =
+ AssertSuccess(Actions.BuildBinOp(nullptr, {}, BO_Sub, Divisor, One));
+ Expr *RangeRoundUp = AssertSuccess(
+ Actions.BuildBinOp(nullptr, {}, BO_Add, Range, DivisorMinusOne));
Dist = AssertSuccess(
- Actions.BuildBinOp(nullptr, {}, BO_Div, Range, Divisor));
+ Actions.BuildBinOp(nullptr, {}, BO_Div, RangeRoundUp, Divisor));
// If there is not at least one iteration, the range contains garbage. Fix
// to zero in this case.
@@ -5573,6 +5980,19 @@ StmtResult Sema::ActOnOpenMPCanonicalLoop(Stmt *AStmt) {
LoopVarFunc, LVRef);
}
+StmtResult Sema::ActOnOpenMPLoopnest(Stmt *AStmt) {
+ // Handle a literal loop.
+ if (isa<ForStmt>(AStmt) || isa<CXXForRangeStmt>(AStmt))
+ return ActOnOpenMPCanonicalLoop(AStmt);
+
+ // If not a literal loop, it must be the result of a loop transformation.
+ OMPExecutableDirective *LoopTransform = cast<OMPExecutableDirective>(AStmt);
+ assert(
+ isOpenMPLoopTransformationDirective(LoopTransform->getDirectiveKind()) &&
+ "Loop transformation directive expected");
+ return LoopTransform;
+}
+
static ExprResult buildUserDefinedMapperRef(Sema &SemaRef, Scope *S,
CXXScopeSpec &MapperIdScopeSpec,
const DeclarationNameInfo &MapperId,
@@ -5682,7 +6102,7 @@ processImplicitMapsWithDefaultMappers(Sema &S, DSAStackTy *Stack,
SubExprs.push_back(BaseExpr);
continue;
}
- // Check for the "default" mapper for data memebers.
+ // Check for the "default" mapper for data members.
bool FirstIter = true;
for (FieldDecl *FD : RD->fields()) {
if (!FD)
@@ -5706,7 +6126,7 @@ processImplicitMapsWithDefaultMappers(Sema &S, DSAStackTy *Stack,
CXXScopeSpec MapperIdScopeSpec;
DeclarationNameInfo MapperId;
if (OMPClause *NewClause = S.ActOnOpenMPMapClause(
- C->getMapTypeModifiers(), C->getMapTypeModifiersLoc(),
+ nullptr, C->getMapTypeModifiers(), C->getMapTypeModifiersLoc(),
MapperIdScopeSpec, MapperId, C->getMapType(),
/*IsMapTypeImplicit=*/true, SourceLocation(), SourceLocation(),
SubExprs, OMPVarListLocTy()))
@@ -5714,21 +6134,140 @@ processImplicitMapsWithDefaultMappers(Sema &S, DSAStackTy *Stack,
}
}
+bool Sema::mapLoopConstruct(llvm::SmallVector<OMPClause *> &ClausesWithoutBind,
+ ArrayRef<OMPClause *> Clauses,
+ OpenMPBindClauseKind &BindKind,
+ OpenMPDirectiveKind &Kind,
+ OpenMPDirectiveKind &PrevMappedDirective,
+ SourceLocation StartLoc, SourceLocation EndLoc,
+ const DeclarationNameInfo &DirName,
+ OpenMPDirectiveKind CancelRegion) {
+
+ bool UseClausesWithoutBind = false;
+
+ // Restricting to "#pragma omp loop bind"
+ if (getLangOpts().OpenMP >= 50 && Kind == OMPD_loop) {
+
+ const OpenMPDirectiveKind ParentDirective = DSAStack->getParentDirective();
+
+ if (BindKind == OMPC_BIND_unknown) {
+ // Setting the enclosing teams or parallel construct for the loop
+ // directive without bind clause.
+ BindKind = OMPC_BIND_thread; // Default bind(thread) if binding is unknown
+
+ if (ParentDirective == OMPD_unknown) {
+ Diag(DSAStack->getDefaultDSALocation(),
+ diag::err_omp_bind_required_on_loop);
+ } else if (ParentDirective == OMPD_parallel ||
+ ParentDirective == OMPD_target_parallel) {
+ BindKind = OMPC_BIND_parallel;
+ } else if (ParentDirective == OMPD_teams ||
+ ParentDirective == OMPD_target_teams) {
+ BindKind = OMPC_BIND_teams;
+ }
+ } else {
+ // bind clause is present in loop directive. When the loop directive is
+ // changed to a new directive the bind clause is not used. So, we should
+ // set flag indicating to only use the clauses that aren't the
+ // bind clause.
+ UseClausesWithoutBind = true;
+ }
+
+ for (OMPClause *C : Clauses) {
+ // Spec restriction : bind(teams) and reduction not permitted.
+ if (BindKind == OMPC_BIND_teams &&
+ C->getClauseKind() == llvm::omp::Clause::OMPC_reduction)
+ Diag(DSAStack->getDefaultDSALocation(),
+ diag::err_omp_loop_reduction_clause);
+
+ // A new Vector ClausesWithoutBind, which does not contain the bind
+ // clause, for passing to new directive.
+ if (C->getClauseKind() != llvm::omp::Clause::OMPC_bind)
+ ClausesWithoutBind.push_back(C);
+ }
+
+ switch (BindKind) {
+ case OMPC_BIND_parallel:
+ Kind = OMPD_for;
+ DSAStack->setCurrentDirective(OMPD_for);
+ DSAStack->setMappedDirective(OMPD_loop);
+ PrevMappedDirective = OMPD_loop;
+ break;
+ case OMPC_BIND_teams:
+ Kind = OMPD_distribute;
+ DSAStack->setCurrentDirective(OMPD_distribute);
+ DSAStack->setMappedDirective(OMPD_loop);
+ PrevMappedDirective = OMPD_loop;
+ break;
+ case OMPC_BIND_thread:
+ Kind = OMPD_simd;
+ DSAStack->setCurrentDirective(OMPD_simd);
+ DSAStack->setMappedDirective(OMPD_loop);
+ PrevMappedDirective = OMPD_loop;
+ break;
+ case OMPC_BIND_unknown:
+ break;
+ }
+ } else if (PrevMappedDirective == OMPD_loop) {
+ /// An initial pass after recognizing all the statements is done in the
+ /// Parser when the directive OMPD_loop is mapped to OMPD_for,
+ /// OMPD_distribute or OMPD_simd. A second transform pass with call from
+ /// clang::TreeTransform::TransformOMPExecutableDirective() is done
+ /// with the Directive as one of the above mapped directive without
+ /// the bind clause. Then "PrevMappedDirective" stored in the
+ /// OMPExecutableDirective is accessed and hence this else statement.
+
+ DSAStack->setMappedDirective(OMPD_loop);
+ }
+
+ return UseClausesWithoutBind;
+}
+
StmtResult Sema::ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
- Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc) {
+ Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc,
+ OpenMPDirectiveKind PrevMappedDirective) {
StmtResult Res = StmtError();
+ OpenMPBindClauseKind BindKind = OMPC_BIND_unknown;
+ llvm::SmallVector<OMPClause *> ClausesWithoutBind;
+ bool UseClausesWithoutBind = false;
+
+ if (const OMPBindClause *BC =
+ OMPExecutableDirective::getSingleClause<OMPBindClause>(Clauses))
+ BindKind = BC->getBindKind();
+
+ // Variable used to note down the DirectiveKind because mapLoopConstruct may
+ // change "Kind" variable, due to mapping of "omp loop" to other directives.
+ OpenMPDirectiveKind DK = Kind;
+ if (Kind == OMPD_loop || PrevMappedDirective == OMPD_loop) {
+ UseClausesWithoutBind = mapLoopConstruct(
+ ClausesWithoutBind, Clauses, BindKind, Kind, PrevMappedDirective,
+ StartLoc, EndLoc, DirName, CancelRegion);
+ DK = OMPD_loop;
+ }
+
// First check CancelRegion which is then used in checkNestingOfRegions.
if (checkCancelRegion(*this, Kind, CancelRegion, StartLoc) ||
- checkNestingOfRegions(*this, DSAStack, Kind, DirName, CancelRegion,
- StartLoc))
+ checkNestingOfRegions(*this, DSAStack, DK, DirName, CancelRegion,
+ BindKind, StartLoc)) {
return StmtError();
+ }
+
+ // Report affected OpenMP target offloading behavior when in HIP lang-mode.
+ if (getLangOpts().HIP && (isOpenMPTargetExecutionDirective(Kind) ||
+ isOpenMPTargetDataManagementDirective(Kind)))
+ Diag(StartLoc, diag::warn_hip_omp_target_directives);
llvm::SmallVector<OMPClause *, 8> ClausesWithImplicit;
VarsWithInheritedDSAType VarsWithInheritedDSA;
bool ErrorFound = false;
- ClausesWithImplicit.append(Clauses.begin(), Clauses.end());
+ if (getLangOpts().OpenMP >= 50 && UseClausesWithoutBind) {
+ ClausesWithImplicit.append(ClausesWithoutBind.begin(),
+ ClausesWithoutBind.end());
+ } else {
+ ClausesWithImplicit.append(Clauses.begin(), Clauses.end());
+ }
if (AStmt && !CurContext->isDependentContext() && Kind != OMPD_atomic &&
Kind != OMPD_critical && Kind != OMPD_section && Kind != OMPD_master &&
Kind != OMPD_masked && !isOpenMPLoopTransformationDirective(Kind)) {
@@ -5760,7 +6299,10 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
SmallVector<Expr *, 4> ImplicitFirstprivates(
DSAChecker.getImplicitFirstprivate().begin(),
DSAChecker.getImplicitFirstprivate().end());
- const unsigned DefaultmapKindNum = OMPC_DEFAULTMAP_pointer + 1;
+ SmallVector<Expr *, 4> ImplicitPrivates(
+ DSAChecker.getImplicitPrivate().begin(),
+ DSAChecker.getImplicitPrivate().end());
+ const unsigned DefaultmapKindNum = OMPC_DEFAULTMAP_unknown + 1;
SmallVector<Expr *, 4> ImplicitMaps[DefaultmapKindNum][OMPC_MAP_delete];
SmallVector<OpenMPMapModifierKind, NumberOfOMPMapClauseModifiers>
ImplicitMapModifiers[DefaultmapKindNum];
@@ -5812,6 +6354,42 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
ErrorFound = true;
}
}
+ if (!ImplicitPrivates.empty()) {
+ if (OMPClause *Implicit =
+ ActOnOpenMPPrivateClause(ImplicitPrivates, SourceLocation(),
+ SourceLocation(), SourceLocation())) {
+ ClausesWithImplicit.push_back(Implicit);
+ ErrorFound = cast<OMPPrivateClause>(Implicit)->varlist_size() !=
+ ImplicitPrivates.size();
+ } else {
+ ErrorFound = true;
+ }
+ }
+ // OpenMP 5.0 [2.19.7]
+ // If a list item appears in a reduction, lastprivate or linear
+ // clause on a combined target construct then it is treated as
+ // if it also appears in a map clause with a map-type of tofrom
+ if (getLangOpts().OpenMP >= 50 && Kind != OMPD_target &&
+ isOpenMPTargetExecutionDirective(Kind)) {
+ SmallVector<Expr *, 4> ImplicitExprs;
+ for (OMPClause *C : Clauses) {
+ if (auto *RC = dyn_cast<OMPReductionClause>(C))
+ for (Expr *E : RC->varlists())
+ if (!isa<DeclRefExpr>(E->IgnoreParenImpCasts()))
+ ImplicitExprs.emplace_back(E);
+ }
+ if (!ImplicitExprs.empty()) {
+ ArrayRef<Expr *> Exprs = ImplicitExprs;
+ CXXScopeSpec MapperIdScopeSpec;
+ DeclarationNameInfo MapperId;
+ if (OMPClause *Implicit = ActOnOpenMPMapClause(
+ nullptr, OMPC_MAP_MODIFIER_unknown, SourceLocation(),
+ MapperIdScopeSpec, MapperId, OMPC_MAP_tofrom,
+ /*IsMapTypeImplicit=*/true, SourceLocation(), SourceLocation(),
+ Exprs, OMPVarListLocTy(), /*NoDiagnose=*/true))
+ ClausesWithImplicit.emplace_back(Implicit);
+ }
+ }
for (unsigned I = 0, E = DefaultmapKindNum; I < E; ++I) {
int ClauseKindCnt = -1;
for (ArrayRef<Expr *> ImplicitMap : ImplicitMaps[I]) {
@@ -5822,7 +6400,7 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
DeclarationNameInfo MapperId;
auto Kind = static_cast<OpenMPMapClauseKind>(ClauseKindCnt);
if (OMPClause *Implicit = ActOnOpenMPMapClause(
- ImplicitMapModifiers[I], ImplicitMapModifiersLoc[I],
+ nullptr, ImplicitMapModifiers[I], ImplicitMapModifiersLoc[I],
MapperIdScopeSpec, MapperId, Kind, /*IsMapTypeImplicit=*/true,
SourceLocation(), SourceLocation(), ImplicitMap,
OMPVarListLocTy())) {
@@ -5910,9 +6488,18 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
if (LangOpts.OpenMP >= 50)
AllowedNameModifiers.push_back(OMPD_simd);
break;
+ case OMPD_scope:
+ Res =
+ ActOnOpenMPScopeDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc);
+ break;
case OMPD_parallel_master:
Res = ActOnOpenMPParallelMasterDirective(ClausesWithImplicit, AStmt,
- StartLoc, EndLoc);
+ StartLoc, EndLoc);
+ AllowedNameModifiers.push_back(OMPD_parallel);
+ break;
+ case OMPD_parallel_masked:
+ Res = ActOnOpenMPParallelMaskedDirective(ClausesWithImplicit, AStmt,
+ StartLoc, EndLoc);
AllowedNameModifiers.push_back(OMPD_parallel);
break;
case OMPD_parallel_sections:
@@ -5932,6 +6519,11 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
"No associated statement allowed for 'omp taskyield' directive");
Res = ActOnOpenMPTaskyieldDirective(StartLoc, EndLoc);
break;
+ case OMPD_error:
+ assert(AStmt == nullptr &&
+ "No associated statement allowed for 'omp error' directive");
+ Res = ActOnOpenMPErrorDirective(ClausesWithImplicit, StartLoc, EndLoc);
+ break;
case OMPD_barrier:
assert(ClausesWithImplicit.empty() &&
"No clauses are allowed for 'omp barrier' directive");
@@ -5940,11 +6532,9 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
Res = ActOnOpenMPBarrierDirective(StartLoc, EndLoc);
break;
case OMPD_taskwait:
- assert(ClausesWithImplicit.empty() &&
- "No clauses are allowed for 'omp taskwait' directive");
assert(AStmt == nullptr &&
"No associated statement allowed for 'omp taskwait' directive");
- Res = ActOnOpenMPTaskwaitDirective(StartLoc, EndLoc);
+ Res = ActOnOpenMPTaskwaitDirective(ClausesWithImplicit, StartLoc, EndLoc);
break;
case OMPD_taskgroup:
Res = ActOnOpenMPTaskgroupDirective(ClausesWithImplicit, AStmt, StartLoc,
@@ -6040,6 +6630,11 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
AllowedNameModifiers.push_back(OMPD_taskloop);
break;
+ case OMPD_masked_taskloop:
+ Res = ActOnOpenMPMaskedTaskLoopDirective(
+ ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
+ AllowedNameModifiers.push_back(OMPD_taskloop);
+ break;
case OMPD_master_taskloop_simd:
Res = ActOnOpenMPMasterTaskLoopSimdDirective(
ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
@@ -6047,12 +6642,28 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
if (LangOpts.OpenMP >= 50)
AllowedNameModifiers.push_back(OMPD_simd);
break;
+ case OMPD_masked_taskloop_simd:
+ Res = ActOnOpenMPMaskedTaskLoopSimdDirective(
+ ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
+ if (LangOpts.OpenMP >= 51) {
+ AllowedNameModifiers.push_back(OMPD_taskloop);
+ AllowedNameModifiers.push_back(OMPD_simd);
+ }
+ break;
case OMPD_parallel_master_taskloop:
Res = ActOnOpenMPParallelMasterTaskLoopDirective(
ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
AllowedNameModifiers.push_back(OMPD_taskloop);
AllowedNameModifiers.push_back(OMPD_parallel);
break;
+ case OMPD_parallel_masked_taskloop:
+ Res = ActOnOpenMPParallelMaskedTaskLoopDirective(
+ ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
+ if (LangOpts.OpenMP >= 51) {
+ AllowedNameModifiers.push_back(OMPD_taskloop);
+ AllowedNameModifiers.push_back(OMPD_parallel);
+ }
+ break;
case OMPD_parallel_master_taskloop_simd:
Res = ActOnOpenMPParallelMasterTaskLoopSimdDirective(
ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
@@ -6061,6 +6672,15 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
if (LangOpts.OpenMP >= 50)
AllowedNameModifiers.push_back(OMPD_simd);
break;
+ case OMPD_parallel_masked_taskloop_simd:
+ Res = ActOnOpenMPParallelMaskedTaskLoopSimdDirective(
+ ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
+ if (LangOpts.OpenMP >= 51) {
+ AllowedNameModifiers.push_back(OMPD_taskloop);
+ AllowedNameModifiers.push_back(OMPD_parallel);
+ AllowedNameModifiers.push_back(OMPD_simd);
+ }
+ break;
case OMPD_distribute:
Res = ActOnOpenMPDistributeDirective(ClausesWithImplicit, AStmt, StartLoc,
EndLoc, VarsWithInheritedDSA);
@@ -6165,6 +6785,27 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
Res = ActOnOpenMPDispatchDirective(ClausesWithImplicit, AStmt, StartLoc,
EndLoc);
break;
+ case OMPD_loop:
+ Res = ActOnOpenMPGenericLoopDirective(ClausesWithImplicit, AStmt, StartLoc,
+ EndLoc, VarsWithInheritedDSA);
+ break;
+ case OMPD_teams_loop:
+ Res = ActOnOpenMPTeamsGenericLoopDirective(
+ ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
+ break;
+ case OMPD_target_teams_loop:
+ Res = ActOnOpenMPTargetTeamsGenericLoopDirective(
+ ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
+ AllowedNameModifiers.push_back(OMPD_target);
+ break;
+ case OMPD_parallel_loop:
+ Res = ActOnOpenMPParallelGenericLoopDirective(
+ ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
+ break;
+ case OMPD_target_parallel_loop:
+ Res = ActOnOpenMPTargetParallelGenericLoopDirective(
+ ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
+ break;
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_threadprivate:
@@ -6187,6 +6828,7 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
// Check variables in the clauses if default(none) or
// default(firstprivate) was specified.
if (DSAStack->getDefaultDSA() == DSA_none ||
+ DSAStack->getDefaultDSA() == DSA_private ||
DSAStack->getDefaultDSA() == DSA_firstprivate) {
DSAAttrChecker DSAChecker(DSAStack, *this, nullptr);
for (OMPClause *C : Clauses) {
@@ -6249,6 +6891,7 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
case OMPC_write:
case OMPC_update:
case OMPC_capture:
+ case OMPC_compare:
case OMPC_seq_cst:
case OMPC_acq_rel:
case OMPC_acquire:
@@ -6265,6 +6908,7 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
case OMPC_use_device_ptr:
case OMPC_use_device_addr:
case OMPC_is_device_ptr:
+ case OMPC_has_device_addr:
case OMPC_nontemporal:
case OMPC_order:
case OMPC_destroy:
@@ -6272,6 +6916,8 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
case OMPC_exclusive:
case OMPC_uses_allocators:
case OMPC_affinity:
+ case OMPC_bind:
+ case OMPC_filter:
continue;
case OMPC_allocator:
case OMPC_flush:
@@ -6286,6 +6932,10 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
case OMPC_atomic_default_mem_order:
case OMPC_device_type:
case OMPC_match:
+ case OMPC_when:
+ case OMPC_at:
+ case OMPC_severity:
+ case OMPC_message:
default:
llvm_unreachable("Unexpected clause");
}
@@ -6302,6 +6952,7 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
continue;
ErrorFound = true;
if (DSAStack->getDefaultDSA() == DSA_none ||
+ DSAStack->getDefaultDSA() == DSA_private ||
DSAStack->getDefaultDSA() == DSA_firstprivate) {
Diag(P.second->getExprLoc(), diag::err_omp_no_dsa_for_variable)
<< P.first << P.second->getSourceRange();
@@ -6619,7 +7270,7 @@ void Sema::ActOnFinishedFunctionDefinitionInOpenMPAssumeScope(Decl *D) {
FD = cast<FunctionDecl>(D);
assert(FD && "Expected a function declaration!");
- // If we are intantiating templates we do *not* apply scoped assumptions but
+ // If we are instantiating templates we do *not* apply scoped assumptions but
// only global ones. We apply scoped assumption to the template definition
// though.
if (!inTemplateInstantiation()) {
@@ -6653,7 +7304,7 @@ void Sema::ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(
LookupOrdinaryName);
LookupParsedName(Lookup, S, &D.getCXXScopeSpec());
- TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D);
QualType FType = TInfo->getType();
bool IsConstexpr =
@@ -6664,9 +7315,11 @@ void Sema::ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(
for (auto *Candidate : Lookup) {
auto *CandidateDecl = Candidate->getUnderlyingDecl();
FunctionDecl *UDecl = nullptr;
- if (IsTemplated && isa<FunctionTemplateDecl>(CandidateDecl))
- UDecl = cast<FunctionTemplateDecl>(CandidateDecl)->getTemplatedDecl();
- else if (!IsTemplated)
+ if (IsTemplated && isa<FunctionTemplateDecl>(CandidateDecl)) {
+ auto *FTD = cast<FunctionTemplateDecl>(CandidateDecl);
+ if (FTD->getTemplateParameters()->size() == TemplateParamLists.size())
+ UDecl = FTD->getTemplatedDecl();
+ } else if (!IsTemplated)
UDecl = dyn_cast<FunctionDecl>(CandidateDecl);
if (!UDecl)
continue;
@@ -6734,7 +7387,10 @@ void Sema::ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope(
OMPDeclareVariantScope &DVScope = OMPDeclareVariantScopes.back();
auto *OMPDeclareVariantA = OMPDeclareVariantAttr::CreateImplicit(
- Context, VariantFuncRef, DVScope.TI);
+ Context, VariantFuncRef, DVScope.TI,
+ /*NothingArgs=*/nullptr, /*NothingArgsSize=*/0,
+ /*NeedDevicePtrArgs=*/nullptr, /*NeedDevicePtrArgsSize=*/0,
+ /*AppendArgs=*/nullptr, /*AppendArgsSize=*/0);
for (FunctionDecl *BaseFD : Bases)
BaseFD->addAttr(OMPDeclareVariantA);
}
@@ -6753,6 +7409,13 @@ ExprResult Sema::ActOnOpenMPCall(ExprResult Call, Scope *Scope,
if (!CalleeFnDecl)
return Call;
+ if (LangOpts.OpenMP >= 51 && CalleeFnDecl->getIdentifier() &&
+ CalleeFnDecl->getName().starts_with_insensitive("omp_")) {
+ // checking for any calls inside an Order region
+ if (Scope && Scope->isOpenMPOrderClauseScope())
+ Diag(LParenLoc, diag::err_omp_unexpected_call_to_omp_runtime_api);
+ }
+
if (!CalleeFnDecl->hasAttr<OMPDeclareVariantAttr>())
return Call;
@@ -6765,7 +7428,7 @@ ExprResult Sema::ActOnOpenMPCall(ExprResult Call, Scope *Scope,
<< ISATrait;
};
TargetOMPContext OMPCtx(Context, std::move(DiagUnknownTrait),
- getCurFunctionDecl());
+ getCurFunctionDecl(), DSAStack->getConstructTraits());
QualType CalleeFnType = CalleeFnDecl->getType();
@@ -6845,19 +7508,20 @@ ExprResult Sema::ActOnOpenMPCall(ExprResult Call, Scope *Scope,
return PseudoObjectExpr::Create(Context, CE, {NewCall.get()}, 0);
}
-Optional<std::pair<FunctionDecl *, Expr *>>
+std::optional<std::pair<FunctionDecl *, Expr *>>
Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG,
Expr *VariantRef, OMPTraitInfo &TI,
+ unsigned NumAppendArgs,
SourceRange SR) {
if (!DG || DG.get().isNull())
- return None;
+ return std::nullopt;
const int VariantId = 1;
// Must be applied only to single decl.
if (!DG.get().isSingleDecl()) {
Diag(SR.getBegin(), diag::err_omp_single_decl_in_declare_simd_variant)
<< VariantId << SR;
- return None;
+ return std::nullopt;
}
Decl *ADecl = DG.get().getSingleDecl();
if (auto *FTD = dyn_cast<FunctionTemplateDecl>(ADecl))
@@ -6868,19 +7532,19 @@ Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG,
if (!FD) {
Diag(ADecl->getLocation(), diag::err_omp_function_expected)
<< VariantId << SR;
- return None;
+ return std::nullopt;
}
auto &&HasMultiVersionAttributes = [](const FunctionDecl *FD) {
- return FD->hasAttrs() &&
- (FD->hasAttr<CPUDispatchAttr>() || FD->hasAttr<CPUSpecificAttr>() ||
- FD->hasAttr<TargetAttr>());
+ // The 'target' attribute needs to be separately checked because it does
+ // not always signify a multiversion function declaration.
+ return FD->isMultiVersion() || FD->hasAttr<TargetAttr>();
};
- // OpenMP is not compatible with CPU-specific attributes.
+ // OpenMP is not compatible with multiversion function attributes.
if (HasMultiVersionAttributes(FD)) {
Diag(FD->getLocation(), diag::err_omp_declare_variant_incompat_attributes)
<< SR;
- return None;
+ return std::nullopt;
}
// Allow #pragma omp declare variant only if the function is not used.
@@ -6898,7 +7562,7 @@ Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG,
// The VariantRef must point to function.
if (!VariantRef) {
Diag(SR.getBegin(), diag::err_omp_function_expected) << VariantId;
- return None;
+ return std::nullopt;
}
auto ShouldDelayChecks = [](Expr *&E, bool) {
@@ -6933,7 +7597,40 @@ Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG,
return true;
};
if (TI.anyScoreOrCondition(HandleNonConstantScoresAndConditions))
- return None;
+ return std::nullopt;
+
+ QualType AdjustedFnType = FD->getType();
+ if (NumAppendArgs) {
+ const auto *PTy = AdjustedFnType->getAsAdjusted<FunctionProtoType>();
+ if (!PTy) {
+ Diag(FD->getLocation(), diag::err_omp_declare_variant_prototype_required)
+ << SR;
+ return std::nullopt;
+ }
+ // Adjust the function type to account for an extra omp_interop_t for each
+ // specified in the append_args clause.
+ const TypeDecl *TD = nullptr;
+ LookupResult Result(*this, &Context.Idents.get("omp_interop_t"),
+ SR.getBegin(), Sema::LookupOrdinaryName);
+ if (LookupName(Result, getCurScope())) {
+ NamedDecl *ND = Result.getFoundDecl();
+ TD = dyn_cast_or_null<TypeDecl>(ND);
+ }
+ if (!TD) {
+ Diag(SR.getBegin(), diag::err_omp_interop_type_not_found) << SR;
+ return std::nullopt;
+ }
+ QualType InteropType = Context.getTypeDeclType(TD);
+ if (PTy->isVariadic()) {
+ Diag(FD->getLocation(), diag::err_omp_append_args_with_varargs) << SR;
+ return std::nullopt;
+ }
+ llvm::SmallVector<QualType, 8> Params;
+ Params.append(PTy->param_type_begin(), PTy->param_type_end());
+ Params.insert(Params.end(), NumAppendArgs, InteropType);
+ AdjustedFnType = Context.getFunctionType(PTy->getReturnType(), Params,
+ PTy->getExtProtoInfo());
+ }
// Convert VariantRef expression to the type of the original function to
// resolve possible conflicts.
@@ -6944,7 +7641,7 @@ Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG,
if (Method && !Method->isStatic()) {
const Type *ClassType =
Context.getTypeDeclType(Method->getParent()).getTypePtr();
- FnPtrType = Context.getMemberPointerType(FD->getType(), ClassType);
+ FnPtrType = Context.getMemberPointerType(AdjustedFnType, ClassType);
ExprResult ER;
{
// Build adrr_of unary op to correctly handle type checks for member
@@ -6956,11 +7653,11 @@ Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG,
if (!ER.isUsable()) {
Diag(VariantRef->getExprLoc(), diag::err_omp_function_expected)
<< VariantId << VariantRef->getSourceRange();
- return None;
+ return std::nullopt;
}
VariantRef = ER.get();
} else {
- FnPtrType = Context.getPointerType(FD->getType());
+ FnPtrType = Context.getPointerType(AdjustedFnType);
}
QualType VarianPtrType = Context.getPointerType(VariantRef->getType());
if (VarianPtrType.getUnqualifiedType() != FnPtrType.getUnqualifiedType()) {
@@ -6975,13 +7672,13 @@ Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG,
diag::err_omp_declare_variant_incompat_types)
<< VariantRef->getType()
<< ((Method && !Method->isStatic()) ? FnPtrType : FD->getType())
- << VariantRef->getSourceRange();
- return None;
+ << (NumAppendArgs ? 1 : 0) << VariantRef->getSourceRange();
+ return std::nullopt;
}
VariantRefCast = PerformImplicitConversion(
VariantRef, FnPtrType.getUnqualifiedType(), AA_Converting);
if (!VariantRefCast.isUsable())
- return None;
+ return std::nullopt;
}
// Drop previously built artificial addr_of unary op for member functions.
if (Method && !Method->isStatic()) {
@@ -6997,7 +7694,7 @@ Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG,
!ER.get()->IgnoreParenImpCasts()->getType()->isFunctionType()) {
Diag(VariantRef->getExprLoc(), diag::err_omp_function_expected)
<< VariantId << VariantRef->getSourceRange();
- return None;
+ return std::nullopt;
}
// The VariantRef must point to function.
@@ -7005,24 +7702,32 @@ Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG,
if (!DRE) {
Diag(VariantRef->getExprLoc(), diag::err_omp_function_expected)
<< VariantId << VariantRef->getSourceRange();
- return None;
+ return std::nullopt;
}
auto *NewFD = dyn_cast_or_null<FunctionDecl>(DRE->getDecl());
if (!NewFD) {
Diag(VariantRef->getExprLoc(), diag::err_omp_function_expected)
<< VariantId << VariantRef->getSourceRange();
- return None;
+ return std::nullopt;
+ }
+
+ if (FD->getCanonicalDecl() == NewFD->getCanonicalDecl()) {
+ Diag(VariantRef->getExprLoc(),
+ diag::err_omp_declare_variant_same_base_function)
+ << VariantRef->getSourceRange();
+ return std::nullopt;
}
// Check if function types are compatible in C.
if (!LangOpts.CPlusPlus) {
QualType NewType =
- Context.mergeFunctionTypes(FD->getType(), NewFD->getType());
+ Context.mergeFunctionTypes(AdjustedFnType, NewFD->getType());
if (NewType.isNull()) {
Diag(VariantRef->getExprLoc(),
diag::err_omp_declare_variant_incompat_types)
- << NewFD->getType() << FD->getType() << VariantRef->getSourceRange();
- return None;
+ << NewFD->getType() << FD->getType() << (NumAppendArgs ? 1 : 0)
+ << VariantRef->getSourceRange();
+ return std::nullopt;
}
if (NewType->isFunctionProtoType()) {
if (FD->getType()->isFunctionNoProtoType())
@@ -7040,7 +7745,7 @@ Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG,
SourceRange SR =
NewFD->specific_attr_begin<OMPDeclareVariantAttr>()->getRange();
Diag(SR.getBegin(), diag::note_omp_marked_declare_variant_here) << SR;
- return None;
+ return std::nullopt;
}
enum DoesntSupport {
@@ -7056,38 +7761,38 @@ Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG,
if (CXXFD->isVirtual()) {
Diag(FD->getLocation(), diag::err_omp_declare_variant_doesnt_support)
<< VirtFuncs;
- return None;
+ return std::nullopt;
}
if (isa<CXXConstructorDecl>(FD)) {
Diag(FD->getLocation(), diag::err_omp_declare_variant_doesnt_support)
<< Constructors;
- return None;
+ return std::nullopt;
}
if (isa<CXXDestructorDecl>(FD)) {
Diag(FD->getLocation(), diag::err_omp_declare_variant_doesnt_support)
<< Destructors;
- return None;
+ return std::nullopt;
}
}
if (FD->isDeleted()) {
Diag(FD->getLocation(), diag::err_omp_declare_variant_doesnt_support)
<< DeletedFuncs;
- return None;
+ return std::nullopt;
}
if (FD->isDefaulted()) {
Diag(FD->getLocation(), diag::err_omp_declare_variant_doesnt_support)
<< DefaultedFuncs;
- return None;
+ return std::nullopt;
}
if (FD->isConstexpr()) {
Diag(FD->getLocation(), diag::err_omp_declare_variant_doesnt_support)
<< (NewFD->isConsteval() ? ConstevalFuncs : ConstexprFuncs);
- return None;
+ return std::nullopt;
}
// Check general compatibility.
@@ -7103,16 +7808,76 @@ Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG,
<< FD->getLocation()),
/*TemplatesSupported=*/true, /*ConstexprSupported=*/false,
/*CLinkageMayDiffer=*/true))
- return None;
+ return std::nullopt;
return std::make_pair(FD, cast<Expr>(DRE));
}
-void Sema::ActOnOpenMPDeclareVariantDirective(FunctionDecl *FD,
- Expr *VariantRef,
- OMPTraitInfo &TI,
- SourceRange SR) {
- auto *NewAttr =
- OMPDeclareVariantAttr::CreateImplicit(Context, VariantRef, &TI, SR);
+void Sema::ActOnOpenMPDeclareVariantDirective(
+ FunctionDecl *FD, Expr *VariantRef, OMPTraitInfo &TI,
+ ArrayRef<Expr *> AdjustArgsNothing,
+ ArrayRef<Expr *> AdjustArgsNeedDevicePtr,
+ ArrayRef<OMPInteropInfo> AppendArgs, SourceLocation AdjustArgsLoc,
+ SourceLocation AppendArgsLoc, SourceRange SR) {
+
+ // OpenMP 5.1 [2.3.5, declare variant directive, Restrictions]
+ // An adjust_args clause or append_args clause can only be specified if the
+ // dispatch selector of the construct selector set appears in the match
+ // clause.
+
+ SmallVector<Expr *, 8> AllAdjustArgs;
+ llvm::append_range(AllAdjustArgs, AdjustArgsNothing);
+ llvm::append_range(AllAdjustArgs, AdjustArgsNeedDevicePtr);
+
+ if (!AllAdjustArgs.empty() || !AppendArgs.empty()) {
+ VariantMatchInfo VMI;
+ TI.getAsVariantMatchInfo(Context, VMI);
+ if (!llvm::is_contained(
+ VMI.ConstructTraits,
+ llvm::omp::TraitProperty::construct_dispatch_dispatch)) {
+ if (!AllAdjustArgs.empty())
+ Diag(AdjustArgsLoc, diag::err_omp_clause_requires_dispatch_construct)
+ << getOpenMPClauseName(OMPC_adjust_args);
+ if (!AppendArgs.empty())
+ Diag(AppendArgsLoc, diag::err_omp_clause_requires_dispatch_construct)
+ << getOpenMPClauseName(OMPC_append_args);
+ return;
+ }
+ }
+
+ // OpenMP 5.1 [2.3.5, declare variant directive, Restrictions]
+ // Each argument can only appear in a single adjust_args clause for each
+ // declare variant directive.
+ llvm::SmallPtrSet<const VarDecl *, 4> AdjustVars;
+
+ for (Expr *E : AllAdjustArgs) {
+ E = E->IgnoreParenImpCasts();
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
+ if (const auto *PVD = dyn_cast<ParmVarDecl>(DRE->getDecl())) {
+ const VarDecl *CanonPVD = PVD->getCanonicalDecl();
+ if (FD->getNumParams() > PVD->getFunctionScopeIndex() &&
+ FD->getParamDecl(PVD->getFunctionScopeIndex())
+ ->getCanonicalDecl() == CanonPVD) {
+ // It's a parameter of the function, check duplicates.
+ if (!AdjustVars.insert(CanonPVD).second) {
+ Diag(DRE->getLocation(), diag::err_omp_adjust_arg_multiple_clauses)
+ << PVD;
+ return;
+ }
+ continue;
+ }
+ }
+ }
+ // Anything that is not a function parameter is an error.
+ Diag(E->getExprLoc(), diag::err_omp_param_or_this_in_clause) << FD << 0;
+ return;
+ }
+
+ auto *NewAttr = OMPDeclareVariantAttr::CreateImplicit(
+ Context, VariantRef, &TI, const_cast<Expr **>(AdjustArgsNothing.data()),
+ AdjustArgsNothing.size(),
+ const_cast<Expr **>(AdjustArgsNeedDevicePtr.data()),
+ AdjustArgsNeedDevicePtr.size(),
+ const_cast<OMPInteropInfo *>(AppendArgs.data()), AppendArgs.size(), SR);
FD->addAttr(NewAttr);
}
@@ -7223,7 +7988,7 @@ class OpenMPIterationSpaceChecker {
/// UB > Var
/// UB >= Var
/// This will have no value when the condition is !=
- llvm::Optional<bool> TestIsLessOp;
+ std::optional<bool> TestIsLessOp;
/// This flag is true when condition is strict ( < or > ).
bool TestIsStrictOp = false;
/// This flag is true when step is subtracted on each iteration.
@@ -7232,12 +7997,13 @@ class OpenMPIterationSpaceChecker {
const ValueDecl *DepDecl = nullptr;
/// Contains number of loop (starts from 1) on which loop counter init
/// expression of this loop depends on.
- Optional<unsigned> InitDependOnLC;
+ std::optional<unsigned> InitDependOnLC;
/// Contains number of loop (starts from 1) on which loop counter condition
/// expression of this loop depends on.
- Optional<unsigned> CondDependOnLC;
+ std::optional<unsigned> CondDependOnLC;
/// Checks if the provide statement depends on the loop counter.
- Optional<unsigned> doesDependOnLoopCounter(const Stmt *S, bool IsInitializer);
+ std::optional<unsigned> doesDependOnLoopCounter(const Stmt *S,
+ bool IsInitializer);
/// Original condition required for checking of the exit condition for
/// non-rectangular loop.
Expr *Condition = nullptr;
@@ -7304,12 +8070,12 @@ public:
/// Return true if any expression is dependent.
bool dependent() const;
/// Returns true if the initializer forms non-rectangular loop.
- bool doesInitDependOnLC() const { return InitDependOnLC.hasValue(); }
+ bool doesInitDependOnLC() const { return InitDependOnLC.has_value(); }
/// Returns true if the condition forms non-rectangular loop.
- bool doesCondDependOnLC() const { return CondDependOnLC.hasValue(); }
+ bool doesCondDependOnLC() const { return CondDependOnLC.has_value(); }
/// Returns index of the loop we depend on (starting from 1), or 0 otherwise.
unsigned getLoopDependentIdx() const {
- return InitDependOnLC.getValueOr(CondDependOnLC.getValueOr(0));
+ return InitDependOnLC.value_or(CondDependOnLC.value_or(0));
}
private:
@@ -7320,7 +8086,7 @@ private:
bool setLCDeclAndLB(ValueDecl *NewLCDecl, Expr *NewDeclRefExpr, Expr *NewLB,
bool EmitDiags);
/// Helper to set upper bound.
- bool setUB(Expr *NewUB, llvm::Optional<bool> LessOp, bool StrictOp,
+ bool setUB(Expr *NewUB, std::optional<bool> LessOp, bool StrictOp,
SourceRange SR, SourceLocation SL);
/// Helper to set loop increment.
bool setStep(Expr *NewStep, bool Subtract);
@@ -7342,7 +8108,7 @@ bool OpenMPIterationSpaceChecker::setLCDeclAndLB(ValueDecl *NewLCDecl,
// State consistency checking to ensure correct usage.
assert(LCDecl == nullptr && LB == nullptr && LCRef == nullptr &&
UB == nullptr && Step == nullptr && !TestIsLessOp && !TestIsStrictOp);
- if (!NewLCDecl || !NewLB)
+ if (!NewLCDecl || !NewLB || NewLB->containsErrors())
return true;
LCDecl = getCanonicalDecl(NewLCDecl);
LCRef = NewLCRefExpr;
@@ -7358,14 +8124,13 @@ bool OpenMPIterationSpaceChecker::setLCDeclAndLB(ValueDecl *NewLCDecl,
return false;
}
-bool OpenMPIterationSpaceChecker::setUB(Expr *NewUB,
- llvm::Optional<bool> LessOp,
+bool OpenMPIterationSpaceChecker::setUB(Expr *NewUB, std::optional<bool> LessOp,
bool StrictOp, SourceRange SR,
SourceLocation SL) {
// State consistency checking to ensure correct usage.
assert(LCDecl != nullptr && LB != nullptr && UB == nullptr &&
Step == nullptr && !TestIsLessOp && !TestIsStrictOp);
- if (!NewUB)
+ if (!NewUB || NewUB->containsErrors())
return true;
UB = NewUB;
if (LessOp)
@@ -7380,7 +8145,7 @@ bool OpenMPIterationSpaceChecker::setUB(Expr *NewUB,
bool OpenMPIterationSpaceChecker::setStep(Expr *NewStep, bool Subtract) {
// State consistency checking to ensure correct usage.
assert(LCDecl != nullptr && LB != nullptr && Step == nullptr);
- if (!NewStep)
+ if (!NewStep || NewStep->containsErrors())
return true;
if (!NewStep->isValueDependent()) {
// Check that the step is integer expression.
@@ -7402,7 +8167,7 @@ bool OpenMPIterationSpaceChecker::setStep(Expr *NewStep, bool Subtract) {
// loop. If test-expr is of form b relational-op var and relational-op is
// > or >= then incr-expr must cause var to increase on each iteration of
// the loop.
- Optional<llvm::APSInt> Result =
+ std::optional<llvm::APSInt> Result =
NewStep->getIntegerConstantExpr(SemaRef.Context);
bool IsUnsigned = !NewStep->getType()->hasSignedIntegerRepresentation();
bool IsConstNeg =
@@ -7412,21 +8177,20 @@ bool OpenMPIterationSpaceChecker::setStep(Expr *NewStep, bool Subtract) {
bool IsConstZero = Result && !Result->getBoolValue();
// != with increment is treated as <; != with decrement is treated as >
- if (!TestIsLessOp.hasValue())
+ if (!TestIsLessOp)
TestIsLessOp = IsConstPos || (IsUnsigned && !Subtract);
if (UB && (IsConstZero ||
- (TestIsLessOp.getValue() ?
- (IsConstNeg || (IsUnsigned && Subtract)) :
- (IsConstPos || (IsUnsigned && !Subtract))))) {
+ (*TestIsLessOp ? (IsConstNeg || (IsUnsigned && Subtract))
+ : (IsConstPos || (IsUnsigned && !Subtract))))) {
SemaRef.Diag(NewStep->getExprLoc(),
diag::err_omp_loop_incr_not_compatible)
- << LCDecl << TestIsLessOp.getValue() << NewStep->getSourceRange();
+ << LCDecl << *TestIsLessOp << NewStep->getSourceRange();
SemaRef.Diag(ConditionLoc,
diag::note_omp_loop_cond_requres_compatible_incr)
- << TestIsLessOp.getValue() << ConditionSrcRange;
+ << *TestIsLessOp << ConditionSrcRange;
return true;
}
- if (TestIsLessOp.getValue() == Subtract) {
+ if (*TestIsLessOp == Subtract) {
NewStep =
SemaRef.CreateBuiltinUnaryOp(NewStep->getExprLoc(), UO_Minus, NewStep)
.get();
@@ -7537,7 +8301,7 @@ public:
};
} // namespace
-Optional<unsigned>
+std::optional<unsigned>
OpenMPIterationSpaceChecker::doesDependOnLoopCounter(const Stmt *S,
bool IsInitializer) {
// Check for the non-rectangular loops.
@@ -7547,7 +8311,7 @@ OpenMPIterationSpaceChecker::doesDependOnLoopCounter(const Stmt *S,
DepDecl = LoopStmtChecker.getDepDecl();
return LoopStmtChecker.getBaseLoopId();
}
- return llvm::None;
+ return std::nullopt;
}
bool OpenMPIterationSpaceChecker::checkAndSetInit(Stmt *S, bool EmitDiags) {
@@ -7673,10 +8437,10 @@ bool OpenMPIterationSpaceChecker::checkAndSetCond(Expr *S) {
Condition = S;
S = getExprAsWritten(S);
SourceLocation CondLoc = S->getBeginLoc();
- auto &&CheckAndSetCond = [this, IneqCondIsCanonical](
- BinaryOperatorKind Opcode, const Expr *LHS,
- const Expr *RHS, SourceRange SR,
- SourceLocation OpLoc) -> llvm::Optional<bool> {
+ auto &&CheckAndSetCond =
+ [this, IneqCondIsCanonical](BinaryOperatorKind Opcode, const Expr *LHS,
+ const Expr *RHS, SourceRange SR,
+ SourceLocation OpLoc) -> std::optional<bool> {
if (BinaryOperator::isRelationalOp(Opcode)) {
if (getInitLCDecl(LHS) == LCDecl)
return setUB(const_cast<Expr *>(RHS),
@@ -7688,12 +8452,12 @@ bool OpenMPIterationSpaceChecker::checkAndSetCond(Expr *S) {
(Opcode == BO_LT || Opcode == BO_GT), SR, OpLoc);
} else if (IneqCondIsCanonical && Opcode == BO_NE) {
return setUB(const_cast<Expr *>(getInitLCDecl(LHS) == LCDecl ? RHS : LHS),
- /*LessOp=*/llvm::None,
+ /*LessOp=*/std::nullopt,
/*StrictOp=*/true, SR, OpLoc);
}
- return llvm::None;
+ return std::nullopt;
};
- llvm::Optional<bool> Res;
+ std::optional<bool> Res;
if (auto *RBO = dyn_cast<CXXRewrittenBinaryOperator>(S)) {
CXXRewrittenBinaryOperator::DecomposedForm DF = RBO->getDecomposedForm();
Res = CheckAndSetCond(DF.Opcode, DF.LHS, DF.RHS, RBO->getSourceRange(),
@@ -7708,7 +8472,7 @@ bool OpenMPIterationSpaceChecker::checkAndSetCond(Expr *S) {
CE->getArg(1), CE->getSourceRange(), CE->getOperatorLoc());
}
}
- if (Res.hasValue())
+ if (Res)
return *Res;
if (dependent() || SemaRef.CurContext->isDependentContext())
return false;
@@ -7828,7 +8592,8 @@ bool OpenMPIterationSpaceChecker::checkAndSetInc(Expr *S) {
static ExprResult
tryBuildCapture(Sema &SemaRef, Expr *Capture,
- llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) {
+ llvm::MapVector<const Expr *, DeclRefExpr *> &Captures,
+ StringRef Name = ".capture_expr.") {
if (SemaRef.CurContext->isDependentContext() || Capture->containsErrors())
return Capture;
if (Capture->isEvaluatable(SemaRef.Context, Expr::SE_AllowSideEffects))
@@ -7837,9 +8602,9 @@ tryBuildCapture(Sema &SemaRef, Expr *Capture,
/*AllowExplicit=*/true);
auto I = Captures.find(Capture);
if (I != Captures.end())
- return buildCapture(SemaRef, Capture, I->second);
+ return buildCapture(SemaRef, Capture, I->second, Name);
DeclRefExpr *Ref = nullptr;
- ExprResult Res = buildCapture(SemaRef, Capture, Ref);
+ ExprResult Res = buildCapture(SemaRef, Capture, Ref, Name);
Captures[Capture] = Ref;
return Res;
}
@@ -7851,16 +8616,18 @@ calculateNumIters(Sema &SemaRef, Scope *S, SourceLocation DefaultLoc,
Expr *Lower, Expr *Upper, Expr *Step, QualType LCTy,
bool TestIsStrictOp, bool RoundToStep,
llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) {
- ExprResult NewStep = tryBuildCapture(SemaRef, Step, Captures);
+ ExprResult NewStep = tryBuildCapture(SemaRef, Step, Captures, ".new_step");
if (!NewStep.isUsable())
return nullptr;
llvm::APSInt LRes, SRes;
bool IsLowerConst = false, IsStepConst = false;
- if (Optional<llvm::APSInt> Res = Lower->getIntegerConstantExpr(SemaRef.Context)) {
+ if (std::optional<llvm::APSInt> Res =
+ Lower->getIntegerConstantExpr(SemaRef.Context)) {
LRes = *Res;
IsLowerConst = true;
}
- if (Optional<llvm::APSInt> Res = Step->getIntegerConstantExpr(SemaRef.Context)) {
+ if (std::optional<llvm::APSInt> Res =
+ Step->getIntegerConstantExpr(SemaRef.Context)) {
SRes = *Res;
IsStepConst = true;
}
@@ -7898,7 +8665,8 @@ calculateNumIters(Sema &SemaRef, Scope *S, SourceLocation DefaultLoc,
}
llvm::APSInt URes;
bool IsUpperConst = false;
- if (Optional<llvm::APSInt> Res = Upper->getIntegerConstantExpr(SemaRef.Context)) {
+ if (std::optional<llvm::APSInt> Res =
+ Upper->getIntegerConstantExpr(SemaRef.Context)) {
URes = *Res;
IsUpperConst = true;
}
@@ -8024,8 +8792,8 @@ Expr *OpenMPIterationSpaceChecker::buildNumIterations(
return nullptr;
Expr *LBVal = LB;
Expr *UBVal = UB;
- // LB = TestIsLessOp.getValue() ? min(LB(MinVal), LB(MaxVal)) :
- // max(LB(MinVal), LB(MaxVal))
+ // OuterVar = (LB = TestIsLessOp.getValue() ? min(LB(MinVal), LB(MaxVal)) :
+ // max(LB(MinVal), LB(MaxVal)))
if (InitDependOnLC) {
const LoopIterationSpace &IS = ResultIterSpaces[*InitDependOnLC - 1];
if (!IS.MinValue || !IS.MaxValue)
@@ -8070,8 +8838,10 @@ Expr *OpenMPIterationSpaceChecker::buildNumIterations(
if (!LBMaxVal.isUsable())
return nullptr;
- Expr *LBMin = tryBuildCapture(SemaRef, LBMinVal.get(), Captures).get();
- Expr *LBMax = tryBuildCapture(SemaRef, LBMaxVal.get(), Captures).get();
+ Expr *LBMin =
+ tryBuildCapture(SemaRef, LBMinVal.get(), Captures, ".lb_min").get();
+ Expr *LBMax =
+ tryBuildCapture(SemaRef, LBMaxVal.get(), Captures, ".lb_max").get();
if (!LBMin || !LBMax)
return nullptr;
// LB(MinVal) < LB(MaxVal)
@@ -8080,10 +8850,11 @@ Expr *OpenMPIterationSpaceChecker::buildNumIterations(
if (!MinLessMaxRes.isUsable())
return nullptr;
Expr *MinLessMax =
- tryBuildCapture(SemaRef, MinLessMaxRes.get(), Captures).get();
+ tryBuildCapture(SemaRef, MinLessMaxRes.get(), Captures, ".min_less_max")
+ .get();
if (!MinLessMax)
return nullptr;
- if (TestIsLessOp.getValue()) {
+ if (*TestIsLessOp) {
// LB(MinVal) < LB(MaxVal) ? LB(MinVal) : LB(MaxVal) - min(LB(MinVal),
// LB(MaxVal))
ExprResult MinLB = SemaRef.ActOnConditionalOp(DefaultLoc, DefaultLoc,
@@ -8100,6 +8871,12 @@ Expr *OpenMPIterationSpaceChecker::buildNumIterations(
return nullptr;
LBVal = MaxLB.get();
}
+ // OuterVar = LB
+ LBMinVal =
+ SemaRef.BuildBinOp(S, DefaultLoc, BO_Assign, IS.CounterVar, LBVal);
+ if (!LBMinVal.isUsable())
+ return nullptr;
+ LBVal = LBMinVal.get();
}
// UB = TestIsLessOp.getValue() ? max(UB(MinVal), UB(MaxVal)) :
// min(UB(MinVal), UB(MaxVal))
@@ -8147,8 +8924,10 @@ Expr *OpenMPIterationSpaceChecker::buildNumIterations(
if (!UBMaxVal.isUsable())
return nullptr;
- Expr *UBMin = tryBuildCapture(SemaRef, UBMinVal.get(), Captures).get();
- Expr *UBMax = tryBuildCapture(SemaRef, UBMaxVal.get(), Captures).get();
+ Expr *UBMin =
+ tryBuildCapture(SemaRef, UBMinVal.get(), Captures, ".ub_min").get();
+ Expr *UBMax =
+ tryBuildCapture(SemaRef, UBMaxVal.get(), Captures, ".ub_max").get();
if (!UBMin || !UBMax)
return nullptr;
// UB(MinVal) > UB(MaxVal)
@@ -8156,11 +8935,12 @@ Expr *OpenMPIterationSpaceChecker::buildNumIterations(
SemaRef.BuildBinOp(S, DefaultLoc, BO_GT, UBMin, UBMax);
if (!MinGreaterMaxRes.isUsable())
return nullptr;
- Expr *MinGreaterMax =
- tryBuildCapture(SemaRef, MinGreaterMaxRes.get(), Captures).get();
+ Expr *MinGreaterMax = tryBuildCapture(SemaRef, MinGreaterMaxRes.get(),
+ Captures, ".min_greater_max")
+ .get();
if (!MinGreaterMax)
return nullptr;
- if (TestIsLessOp.getValue()) {
+ if (*TestIsLessOp) {
// UB(MinVal) > UB(MaxVal) ? UB(MinVal) : UB(MaxVal) - max(UB(MinVal),
// UB(MaxVal))
ExprResult MaxUB = SemaRef.ActOnConditionalOp(
@@ -8178,10 +8958,10 @@ Expr *OpenMPIterationSpaceChecker::buildNumIterations(
UBVal = MinUB.get();
}
}
- Expr *UBExpr = TestIsLessOp.getValue() ? UBVal : LBVal;
- Expr *LBExpr = TestIsLessOp.getValue() ? LBVal : UBVal;
- Expr *Upper = tryBuildCapture(SemaRef, UBExpr, Captures).get();
- Expr *Lower = tryBuildCapture(SemaRef, LBExpr, Captures).get();
+ Expr *UBExpr = *TestIsLessOp ? UBVal : LBVal;
+ Expr *LBExpr = *TestIsLessOp ? LBVal : UBVal;
+ Expr *Upper = tryBuildCapture(SemaRef, UBExpr, Captures, ".upper").get();
+ Expr *Lower = tryBuildCapture(SemaRef, LBExpr, Captures, ".lower").get();
if (!Upper || !Lower)
return nullptr;
@@ -8242,12 +9022,12 @@ std::pair<Expr *, Expr *> OpenMPIterationSpaceChecker::buildMinMaxValues(
// init value.
Expr *MinExpr = nullptr;
Expr *MaxExpr = nullptr;
- Expr *LBExpr = TestIsLessOp.getValue() ? LB : UB;
- Expr *UBExpr = TestIsLessOp.getValue() ? UB : LB;
- bool LBNonRect = TestIsLessOp.getValue() ? InitDependOnLC.hasValue()
- : CondDependOnLC.hasValue();
- bool UBNonRect = TestIsLessOp.getValue() ? CondDependOnLC.hasValue()
- : InitDependOnLC.hasValue();
+ Expr *LBExpr = *TestIsLessOp ? LB : UB;
+ Expr *UBExpr = *TestIsLessOp ? UB : LB;
+ bool LBNonRect =
+ *TestIsLessOp ? InitDependOnLC.has_value() : CondDependOnLC.has_value();
+ bool UBNonRect =
+ *TestIsLessOp ? CondDependOnLC.has_value() : InitDependOnLC.has_value();
Expr *Lower =
LBNonRect ? LBExpr : tryBuildCapture(SemaRef, LBExpr, Captures).get();
Expr *Upper =
@@ -8255,7 +9035,7 @@ std::pair<Expr *, Expr *> OpenMPIterationSpaceChecker::buildMinMaxValues(
if (!Upper || !Lower)
return std::make_pair(nullptr, nullptr);
- if (TestIsLessOp.getValue())
+ if (*TestIsLessOp)
MinExpr = Lower;
else
MaxExpr = Upper;
@@ -8275,7 +9055,7 @@ std::pair<Expr *, Expr *> OpenMPIterationSpaceChecker::buildMinMaxValues(
if (!Diff.isUsable())
return std::make_pair(nullptr, nullptr);
- ExprResult NewStep = tryBuildCapture(SemaRef, Step, Captures);
+ ExprResult NewStep = tryBuildCapture(SemaRef, Step, Captures, ".new_step");
if (!NewStep.isUsable())
return std::make_pair(nullptr, nullptr);
Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Mul, Diff.get(), NewStep.get());
@@ -8299,7 +9079,7 @@ std::pair<Expr *, Expr *> OpenMPIterationSpaceChecker::buildMinMaxValues(
if (!Diff.isUsable())
return std::make_pair(nullptr, nullptr);
- if (TestIsLessOp.getValue()) {
+ if (*TestIsLessOp) {
// MinExpr = Lower;
// MaxExpr = Lower + (((Upper - Lower [- 1]) / Step) * Step)
Diff = SemaRef.BuildBinOp(
@@ -8332,7 +9112,7 @@ std::pair<Expr *, Expr *> OpenMPIterationSpaceChecker::buildMinMaxValues(
if (!Diff.isUsable())
return std::make_pair(nullptr, nullptr);
- if (TestIsLessOp.getValue())
+ if (*TestIsLessOp)
MaxExpr = Diff.get();
else
MinExpr = Diff.get();
@@ -8354,10 +9134,12 @@ Expr *OpenMPIterationSpaceChecker::buildPreCond(
// TODO: this can be improved by calculating min/max values but not sure that
// it will be very effective.
if (CondDependOnLC || InitDependOnLC)
- return SemaRef.PerformImplicitConversion(
- SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get(),
- SemaRef.Context.BoolTy, /*Action=*/Sema::AA_Casting,
- /*AllowExplicit=*/true).get();
+ return SemaRef
+ .PerformImplicitConversion(
+ SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get(),
+ SemaRef.Context.BoolTy, /*Action=*/Sema::AA_Casting,
+ /*AllowExplicit=*/true)
+ .get();
// Try to build LB <op> UB, where <op> is <, >, <=, or >=.
Sema::TentativeAnalysisScope Trap(SemaRef);
@@ -8369,9 +9151,8 @@ Expr *OpenMPIterationSpaceChecker::buildPreCond(
ExprResult CondExpr =
SemaRef.BuildBinOp(S, DefaultLoc,
- TestIsLessOp.getValue() ?
- (TestIsStrictOp ? BO_LT : BO_LE) :
- (TestIsStrictOp ? BO_GT : BO_GE),
+ *TestIsLessOp ? (TestIsStrictOp ? BO_LT : BO_LE)
+ : (TestIsStrictOp ? BO_GT : BO_GE),
NewLB.get(), NewUB.get());
if (CondExpr.isUsable()) {
if (!SemaRef.Context.hasSameUnqualifiedType(CondExpr.get()->getType(),
@@ -8447,12 +9228,10 @@ Expr *OpenMPIterationSpaceChecker::buildOrderedLoopData(
!SemaRef.getLangOpts().CPlusPlus)
return nullptr;
// Upper - Lower
- Expr *Upper = TestIsLessOp.getValue()
- ? Cnt
- : tryBuildCapture(SemaRef, LB, Captures).get();
- Expr *Lower = TestIsLessOp.getValue()
- ? tryBuildCapture(SemaRef, LB, Captures).get()
- : Cnt;
+ Expr *Upper =
+ *TestIsLessOp ? Cnt : tryBuildCapture(SemaRef, LB, Captures).get();
+ Expr *Lower =
+ *TestIsLessOp ? tryBuildCapture(SemaRef, LB, Captures).get() : Cnt;
if (!Upper || !Lower)
return nullptr;
@@ -8520,8 +9299,9 @@ void Sema::ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init) {
(LangOpts.OpenMP <= 45 || (DVar.CKind != OMPC_lastprivate &&
DVar.CKind != OMPC_private))) ||
((isOpenMPWorksharingDirective(DKind) || DKind == OMPD_taskloop ||
- DKind == OMPD_master_taskloop ||
+ DKind == OMPD_master_taskloop || DKind == OMPD_masked_taskloop ||
DKind == OMPD_parallel_master_taskloop ||
+ DKind == OMPD_parallel_masked_taskloop ||
isOpenMPDistributeDirective(DKind)) &&
!isOpenMPSimdDirective(DKind) && DVar.CKind != OMPC_unknown &&
DVar.CKind != OMPC_private && DVar.CKind != OMPC_lastprivate)) &&
@@ -8549,6 +9329,22 @@ void Sema::ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init) {
}
}
+namespace {
+// Utility for openmp doacross clause kind
+class OMPDoacrossKind {
+public:
+ bool isSource(const OMPDoacrossClause *C) {
+ return C->getDependenceType() == OMPC_DOACROSS_source ||
+ C->getDependenceType() == OMPC_DOACROSS_source_omp_cur_iteration;
+ }
+ bool isSink(const OMPDoacrossClause *C) {
+ return C->getDependenceType() == OMPC_DOACROSS_sink;
+ }
+ bool isSinkIter(const OMPDoacrossClause *C) {
+ return C->getDependenceType() == OMPC_DOACROSS_sink_omp_cur_iteration;
+ }
+};
+} // namespace
/// Called on a for stmt to check and extract its iteration space
/// for further processing (such as collapsing).
static bool checkOpenMPIterationSpace(
@@ -8569,9 +9365,13 @@ static bool checkOpenMPIterationSpace(
auto *CXXFor = dyn_cast_or_null<CXXForRangeStmt>(S);
// Ranged for is supported only in OpenMP 5.0.
if (!For && (SemaRef.LangOpts.OpenMP <= 45 || !CXXFor)) {
+ OpenMPDirectiveKind DK = (SemaRef.getLangOpts().OpenMP < 50 ||
+ DSA.getMappedDirective() == OMPD_unknown)
+ ? DKind
+ : DSA.getMappedDirective();
SemaRef.Diag(S->getBeginLoc(), diag::err_omp_not_for)
<< (CollapseLoopCountExpr != nullptr || OrderedLoopCountExpr != nullptr)
- << getOpenMPDirectiveName(DKind) << TotalNestedLoopCount
+ << getOpenMPDirectiveName(DK) << TotalNestedLoopCount
<< (CurrentNestedLoopCount > 0) << CurrentNestedLoopCount;
if (TotalNestedLoopCount > 1) {
if (CollapseLoopCountExpr && OrderedLoopCountExpr)
@@ -8592,6 +9392,9 @@ static bool checkOpenMPIterationSpace(
}
assert(((For && For->getBody()) || (CXXFor && CXXFor->getBody())) &&
"No loop body.");
+ // Postpone analysis in dependent contexts for ranged for loops.
+ if (CXXFor && SemaRef.CurContext->isDependentContext())
+ return false;
OpenMPIterationSpaceChecker ISC(SemaRef, SupportsNonRectangular, DSA,
For ? For->getForLoc() : CXXFor->getForLoc());
@@ -8648,6 +9451,7 @@ static bool checkOpenMPIterationSpace(
ResultIterSpaces[CurrentNestedLoopCount].NumIterations =
ISC.buildNumIterations(DSA.getCurScope(), ResultIterSpaces,
(isOpenMPWorksharingDirective(DKind) ||
+ isOpenMPGenericLoopDirective(DKind) ||
isOpenMPTaskLoopDirective(DKind) ||
isOpenMPDistributeDirective(DKind) ||
isOpenMPLoopTransformationDirective(DKind)),
@@ -8698,30 +9502,61 @@ static bool checkOpenMPIterationSpace(
}
}
for (auto &Pair : DSA.getDoacrossDependClauses()) {
- if (CurrentNestedLoopCount >= Pair.first->getNumLoops()) {
+ auto *DependC = dyn_cast<OMPDependClause>(Pair.first);
+ auto *DoacrossC = dyn_cast<OMPDoacrossClause>(Pair.first);
+ unsigned NumLoops =
+ DependC ? DependC->getNumLoops() : DoacrossC->getNumLoops();
+ if (CurrentNestedLoopCount >= NumLoops) {
// Erroneous case - clause has some problems.
continue;
}
- if (Pair.first->getDependencyKind() == OMPC_DEPEND_sink &&
+ if (DependC && DependC->getDependencyKind() == OMPC_DEPEND_sink &&
Pair.second.size() <= CurrentNestedLoopCount) {
// Erroneous case - clause has some problems.
- Pair.first->setLoopData(CurrentNestedLoopCount, nullptr);
+ DependC->setLoopData(CurrentNestedLoopCount, nullptr);
+ continue;
+ }
+ OMPDoacrossKind ODK;
+ if (DoacrossC && ODK.isSink(DoacrossC) &&
+ Pair.second.size() <= CurrentNestedLoopCount) {
+ // Erroneous case - clause has some problems.
+ DoacrossC->setLoopData(CurrentNestedLoopCount, nullptr);
continue;
}
Expr *CntValue;
- if (Pair.first->getDependencyKind() == OMPC_DEPEND_source)
+ SourceLocation DepLoc =
+ DependC ? DependC->getDependencyLoc() : DoacrossC->getDependenceLoc();
+ if ((DependC && DependC->getDependencyKind() == OMPC_DEPEND_source) ||
+ (DoacrossC && ODK.isSource(DoacrossC)))
CntValue = ISC.buildOrderedLoopData(
DSA.getCurScope(),
ResultIterSpaces[CurrentNestedLoopCount].CounterVar, Captures,
- Pair.first->getDependencyLoc());
- else
+ DepLoc);
+ else if (DoacrossC && ODK.isSinkIter(DoacrossC)) {
+ Expr *Cnt = SemaRef
+ .DefaultLvalueConversion(
+ ResultIterSpaces[CurrentNestedLoopCount].CounterVar)
+ .get();
+ if (!Cnt)
+ continue;
+ // build CounterVar - 1
+ Expr *Inc =
+ SemaRef.ActOnIntegerConstant(DoacrossC->getColonLoc(), /*Val=*/1)
+ .get();
+ CntValue = ISC.buildOrderedLoopData(
+ DSA.getCurScope(),
+ ResultIterSpaces[CurrentNestedLoopCount].CounterVar, Captures,
+ DepLoc, Inc, clang::OO_Minus);
+ } else
CntValue = ISC.buildOrderedLoopData(
DSA.getCurScope(),
ResultIterSpaces[CurrentNestedLoopCount].CounterVar, Captures,
- Pair.first->getDependencyLoc(),
- Pair.second[CurrentNestedLoopCount].first,
+ DepLoc, Pair.second[CurrentNestedLoopCount].first,
Pair.second[CurrentNestedLoopCount].second);
- Pair.first->setLoopData(CurrentNestedLoopCount, CntValue);
+ if (DependC)
+ DependC->setLoopData(CurrentNestedLoopCount, CntValue);
+ else
+ DoacrossC->setLoopData(CurrentNestedLoopCount, CntValue);
}
}
@@ -8849,7 +9684,7 @@ static ExprResult widenIterationCount(unsigned Bits, Expr *E, Sema &SemaRef) {
static bool fitsInto(unsigned Bits, bool Signed, const Expr *E, Sema &SemaRef) {
if (E == nullptr)
return false;
- if (Optional<llvm::APSInt> Result =
+ if (std::optional<llvm::APSInt> Result =
E->getIntegerConstantExpr(SemaRef.Context))
return Signed ? Result->isSignedIntN(Bits) : Result->isIntN(Bits);
return false;
@@ -8971,15 +9806,8 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
}
return false;
},
- [&SemaRef, &Captures](OMPLoopBasedDirective *Transform) {
- Stmt *DependentPreInits;
- if (auto *Dir = dyn_cast<OMPTileDirective>(Transform)) {
- DependentPreInits = Dir->getPreInits();
- } else if (auto *Dir = dyn_cast<OMPUnrollDirective>(Transform)) {
- DependentPreInits = Dir->getPreInits();
- } else {
- llvm_unreachable("Unexpected loop transformation");
- }
+ [&SemaRef, &Captures](OMPLoopTransformationDirective *Transform) {
+ Stmt *DependentPreInits = Transform->getPreInits();
if (!DependentPreInits)
return;
for (Decl *C : cast<DeclStmt>(DependentPreInits)->getDeclGroup()) {
@@ -9138,6 +9966,7 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
ExprResult LB, UB, IL, ST, EUB, CombLB, CombUB, PrevLB, PrevUB, CombEUB;
if (isOpenMPWorksharingDirective(DKind) || isOpenMPTaskLoopDirective(DKind) ||
isOpenMPDistributeDirective(DKind) ||
+ isOpenMPGenericLoopDirective(DKind) ||
isOpenMPLoopTransformationDirective(DKind)) {
// Lower bound variable, initialized with zero.
VarDecl *LBDecl = buildVarDecl(SemaRef, InitLoc, VType, ".omp.lb");
@@ -9237,6 +10066,7 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
VarDecl *IVDecl = buildVarDecl(SemaRef, InitLoc, RealVType, ".omp.iv");
IV = buildDeclRefExpr(SemaRef, IVDecl, RealVType, InitLoc);
Expr *RHS = (isOpenMPWorksharingDirective(DKind) ||
+ isOpenMPGenericLoopDirective(DKind) ||
isOpenMPTaskLoopDirective(DKind) ||
isOpenMPDistributeDirective(DKind) ||
isOpenMPLoopTransformationDirective(DKind))
@@ -9248,6 +10078,7 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
if (isOpenMPLoopBoundSharingDirective(DKind)) {
Expr *CombRHS =
(isOpenMPWorksharingDirective(DKind) ||
+ isOpenMPGenericLoopDirective(DKind) ||
isOpenMPTaskLoopDirective(DKind) ||
isOpenMPDistributeDirective(DKind))
? CombLB.get()
@@ -9279,6 +10110,7 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
}
ExprResult Cond =
(isOpenMPWorksharingDirective(DKind) ||
+ isOpenMPGenericLoopDirective(DKind) ||
isOpenMPTaskLoopDirective(DKind) || isOpenMPDistributeDirective(DKind) ||
isOpenMPLoopTransformationDirective(DKind))
? SemaRef.BuildBinOp(CurScope, CondLoc,
@@ -9328,6 +10160,7 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
// base variables for the update
ExprResult NextLB, NextUB, CombNextLB, CombNextUB;
if (isOpenMPWorksharingDirective(DKind) || isOpenMPTaskLoopDirective(DKind) ||
+ isOpenMPGenericLoopDirective(DKind) ||
isOpenMPDistributeDirective(DKind) ||
isOpenMPLoopTransformationDirective(DKind)) {
// LB + ST
@@ -9473,9 +10306,8 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
ExprResult Iter;
// Compute prod
- ExprResult Prod =
- SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get();
- for (unsigned int K = Cnt+1; K < NestedLoopCount; ++K)
+ ExprResult Prod = SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get();
+ for (unsigned int K = Cnt + 1; K < NestedLoopCount; ++K)
Prod = SemaRef.BuildBinOp(CurScope, UpdLoc, BO_Mul, Prod.get(),
IterSpaces[K].NumIterations);
@@ -9483,8 +10315,8 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
// If there is at least one more inner loop to avoid
// multiplication by 1.
if (Cnt + 1 < NestedLoopCount)
- Iter = SemaRef.BuildBinOp(CurScope, UpdLoc, BO_Div,
- Acc.get(), Prod.get());
+ Iter =
+ SemaRef.BuildBinOp(CurScope, UpdLoc, BO_Div, Acc.get(), Prod.get());
else
Iter = Acc;
if (!Iter.isUsable()) {
@@ -9497,12 +10329,11 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
// Check if there is at least one more inner loop to avoid
// multiplication by 1.
if (Cnt + 1 < NestedLoopCount)
- Prod = SemaRef.BuildBinOp(CurScope, UpdLoc, BO_Mul,
- Iter.get(), Prod.get());
+ Prod = SemaRef.BuildBinOp(CurScope, UpdLoc, BO_Mul, Iter.get(),
+ Prod.get());
else
Prod = Iter;
- Acc = SemaRef.BuildBinOp(CurScope, UpdLoc, BO_Sub,
- Acc.get(), Prod.get());
+ Acc = SemaRef.BuildBinOp(CurScope, UpdLoc, BO_Sub, Acc.get(), Prod.get());
// Build update: IS.CounterVar(Private) = IS.Start + Iter * IS.Step
auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IS.CounterVar)->getDecl());
@@ -9548,10 +10379,8 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
Built.DependentInits[Cnt] = nullptr;
Built.FinalsConditions[Cnt] = nullptr;
if (IS.IsNonRectangularLB || IS.IsNonRectangularUB) {
- Built.DependentCounters[Cnt] =
- Built.Counters[NestedLoopCount - 1 - IS.LoopDependentIdx];
- Built.DependentInits[Cnt] =
- Built.Inits[NestedLoopCount - 1 - IS.LoopDependentIdx];
+ Built.DependentCounters[Cnt] = Built.Counters[IS.LoopDependentIdx - 1];
+ Built.DependentInits[Cnt] = Built.Inits[IS.LoopDependentIdx - 1];
Built.FinalsConditions[Cnt] = IS.FinalCondition;
}
}
@@ -9657,6 +10486,24 @@ static bool checkSimdlenSafelenSpecified(Sema &S,
return false;
}
+static bool checkGenericLoopLastprivate(Sema &S, ArrayRef<OMPClause *> Clauses,
+ OpenMPDirectiveKind K,
+ DSAStackTy *Stack);
+
+bool Sema::checkLastPrivateForMappedDirectives(ArrayRef<OMPClause *> Clauses) {
+
+ // Check for syntax of lastprivate
+ // Param of the lastprivate have different meanings in the mapped directives
+ // e.g. "omp loop" Only loop iteration vars are allowed in lastprivate clause
+ // "omp for" lastprivate vars must be shared
+ if (getLangOpts().OpenMP >= 50 &&
+ DSAStack->getMappedDirective() == OMPD_loop &&
+ checkGenericLoopLastprivate(*this, Clauses, OMPD_loop, DSAStack)) {
+ return false;
+ }
+ return true;
+}
+
StmtResult
Sema::ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
@@ -9664,6 +10511,9 @@ Sema::ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
if (!AStmt)
return StmtError();
+ if (!checkLastPrivateForMappedDirectives(Clauses))
+ return StmtError();
+
assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
OMPLoopBasedDirective::HelperExprs B;
// In presence of clause 'collapse' or 'ordered' with number of loops, it will
@@ -9692,8 +10542,10 @@ Sema::ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
return StmtError();
setFunctionHasBranchProtectedScope();
- return OMPSimdDirective::Create(Context, StartLoc, EndLoc, NestedLoopCount,
- Clauses, AStmt, B);
+ auto *SimdDirective = OMPSimdDirective::Create(
+ Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B,
+ DSAStack->getMappedDirective());
+ return SimdDirective;
}
StmtResult
@@ -9703,6 +10555,9 @@ Sema::ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
if (!AStmt)
return StmtError();
+ if (!checkLastPrivateForMappedDirectives(Clauses))
+ return StmtError();
+
assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
OMPLoopBasedDirective::HelperExprs B;
// In presence of clause 'collapse' or 'ordered' with number of loops, it will
@@ -9727,10 +10582,11 @@ Sema::ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
}
}
- setFunctionHasBranchProtectedScope();
- return OMPForDirective::Create(
+ auto *ForDirective = OMPForDirective::Create(
Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B,
- DSAStack->getTaskgroupReductionRef(), DSAStack->isCancelRegion());
+ DSAStack->getTaskgroupReductionRef(), DSAStack->isCancelRegion(),
+ DSAStack->getMappedDirective());
+ return ForDirective;
}
StmtResult Sema::ActOnOpenMPForSimdDirective(
@@ -9789,7 +10645,7 @@ StmtResult Sema::ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
return StmtError();
// All associated statements must be '#pragma omp section' except for
// the first one.
- for (Stmt *SectionStmt : llvm::make_range(std::next(S.begin()), S.end())) {
+ for (Stmt *SectionStmt : llvm::drop_begin(S)) {
if (!SectionStmt || !isa<OMPSectionDirective>(SectionStmt)) {
if (SectionStmt)
Diag(SectionStmt->getBeginLoc(),
@@ -9882,6 +10738,262 @@ StmtResult Sema::ActOnOpenMPDispatchDirective(ArrayRef<OMPClause *> Clauses,
TargetCallLoc);
}
+static bool checkGenericLoopLastprivate(Sema &S, ArrayRef<OMPClause *> Clauses,
+ OpenMPDirectiveKind K,
+ DSAStackTy *Stack) {
+ bool ErrorFound = false;
+ for (OMPClause *C : Clauses) {
+ if (auto *LPC = dyn_cast<OMPLastprivateClause>(C)) {
+ for (Expr *RefExpr : LPC->varlists()) {
+ SourceLocation ELoc;
+ SourceRange ERange;
+ Expr *SimpleRefExpr = RefExpr;
+ auto Res = getPrivateItem(S, SimpleRefExpr, ELoc, ERange);
+ if (ValueDecl *D = Res.first) {
+ auto &&Info = Stack->isLoopControlVariable(D);
+ if (!Info.first) {
+ S.Diag(ELoc, diag::err_omp_lastprivate_loop_var_non_loop_iteration)
+ << getOpenMPDirectiveName(K);
+ ErrorFound = true;
+ }
+ }
+ }
+ }
+ }
+ return ErrorFound;
+}
+
+StmtResult Sema::ActOnOpenMPGenericLoopDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
+ if (!AStmt)
+ return StmtError();
+
+ // OpenMP 5.1 [2.11.7, loop construct, Restrictions]
+ // A list item may not appear in a lastprivate clause unless it is the
+ // loop iteration variable of a loop that is associated with the construct.
+ if (checkGenericLoopLastprivate(*this, Clauses, OMPD_loop, DSAStack))
+ return StmtError();
+
+ auto *CS = cast<CapturedStmt>(AStmt);
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+
+ OMPLoopDirective::HelperExprs B;
+ // In presence of clause 'collapse', it will define the nested loops number.
+ unsigned NestedLoopCount = checkOpenMPLoop(
+ OMPD_loop, getCollapseNumberExpr(Clauses), getOrderedNumberExpr(Clauses),
+ AStmt, *this, *DSAStack, VarsWithImplicitDSA, B);
+ if (NestedLoopCount == 0)
+ return StmtError();
+
+ assert((CurContext->isDependentContext() || B.builtAll()) &&
+ "omp loop exprs were not built");
+
+ setFunctionHasBranchProtectedScope();
+ return OMPGenericLoopDirective::Create(Context, StartLoc, EndLoc,
+ NestedLoopCount, Clauses, AStmt, B);
+}
+
+StmtResult Sema::ActOnOpenMPTeamsGenericLoopDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
+ if (!AStmt)
+ return StmtError();
+
+ // OpenMP 5.1 [2.11.7, loop construct, Restrictions]
+ // A list item may not appear in a lastprivate clause unless it is the
+ // loop iteration variable of a loop that is associated with the construct.
+ if (checkGenericLoopLastprivate(*this, Clauses, OMPD_teams_loop, DSAStack))
+ return StmtError();
+
+ auto *CS = cast<CapturedStmt>(AStmt);
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+ for (int ThisCaptureLevel = getOpenMPCaptureLevels(OMPD_teams_loop);
+ ThisCaptureLevel > 1; --ThisCaptureLevel) {
+ CS = cast<CapturedStmt>(CS->getCapturedStmt());
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+ }
+
+ OMPLoopDirective::HelperExprs B;
+ // In presence of clause 'collapse', it will define the nested loops number.
+ unsigned NestedLoopCount =
+ checkOpenMPLoop(OMPD_teams_loop, getCollapseNumberExpr(Clauses),
+ /*OrderedLoopCountExpr=*/nullptr, CS, *this, *DSAStack,
+ VarsWithImplicitDSA, B);
+ if (NestedLoopCount == 0)
+ return StmtError();
+
+ assert((CurContext->isDependentContext() || B.builtAll()) &&
+ "omp loop exprs were not built");
+
+ setFunctionHasBranchProtectedScope();
+ DSAStack->setParentTeamsRegionLoc(StartLoc);
+
+ return OMPTeamsGenericLoopDirective::Create(
+ Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
+}
+
+StmtResult Sema::ActOnOpenMPTargetTeamsGenericLoopDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
+ if (!AStmt)
+ return StmtError();
+
+ // OpenMP 5.1 [2.11.7, loop construct, Restrictions]
+ // A list item may not appear in a lastprivate clause unless it is the
+ // loop iteration variable of a loop that is associated with the construct.
+ if (checkGenericLoopLastprivate(*this, Clauses, OMPD_target_teams_loop,
+ DSAStack))
+ return StmtError();
+
+ auto *CS = cast<CapturedStmt>(AStmt);
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+ for (int ThisCaptureLevel = getOpenMPCaptureLevels(OMPD_target_teams_loop);
+ ThisCaptureLevel > 1; --ThisCaptureLevel) {
+ CS = cast<CapturedStmt>(CS->getCapturedStmt());
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+ }
+
+ OMPLoopDirective::HelperExprs B;
+ // In presence of clause 'collapse', it will define the nested loops number.
+ unsigned NestedLoopCount =
+ checkOpenMPLoop(OMPD_target_teams_loop, getCollapseNumberExpr(Clauses),
+ /*OrderedLoopCountExpr=*/nullptr, CS, *this, *DSAStack,
+ VarsWithImplicitDSA, B);
+ if (NestedLoopCount == 0)
+ return StmtError();
+
+ assert((CurContext->isDependentContext() || B.builtAll()) &&
+ "omp loop exprs were not built");
+
+ setFunctionHasBranchProtectedScope();
+
+ return OMPTargetTeamsGenericLoopDirective::Create(
+ Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
+}
+
+StmtResult Sema::ActOnOpenMPParallelGenericLoopDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
+ if (!AStmt)
+ return StmtError();
+
+ // OpenMP 5.1 [2.11.7, loop construct, Restrictions]
+ // A list item may not appear in a lastprivate clause unless it is the
+ // loop iteration variable of a loop that is associated with the construct.
+ if (checkGenericLoopLastprivate(*this, Clauses, OMPD_parallel_loop, DSAStack))
+ return StmtError();
+
+ auto *CS = cast<CapturedStmt>(AStmt);
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+ for (int ThisCaptureLevel = getOpenMPCaptureLevels(OMPD_parallel_loop);
+ ThisCaptureLevel > 1; --ThisCaptureLevel) {
+ CS = cast<CapturedStmt>(CS->getCapturedStmt());
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+ }
+
+ OMPLoopDirective::HelperExprs B;
+ // In presence of clause 'collapse', it will define the nested loops number.
+ unsigned NestedLoopCount =
+ checkOpenMPLoop(OMPD_parallel_loop, getCollapseNumberExpr(Clauses),
+ /*OrderedLoopCountExpr=*/nullptr, CS, *this, *DSAStack,
+ VarsWithImplicitDSA, B);
+ if (NestedLoopCount == 0)
+ return StmtError();
+
+ assert((CurContext->isDependentContext() || B.builtAll()) &&
+ "omp loop exprs were not built");
+
+ setFunctionHasBranchProtectedScope();
+
+ return OMPParallelGenericLoopDirective::Create(
+ Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
+}
+
+StmtResult Sema::ActOnOpenMPTargetParallelGenericLoopDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
+ if (!AStmt)
+ return StmtError();
+
+ // OpenMP 5.1 [2.11.7, loop construct, Restrictions]
+ // A list item may not appear in a lastprivate clause unless it is the
+ // loop iteration variable of a loop that is associated with the construct.
+ if (checkGenericLoopLastprivate(*this, Clauses, OMPD_target_parallel_loop,
+ DSAStack))
+ return StmtError();
+
+ auto *CS = cast<CapturedStmt>(AStmt);
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+ for (int ThisCaptureLevel = getOpenMPCaptureLevels(OMPD_target_parallel_loop);
+ ThisCaptureLevel > 1; --ThisCaptureLevel) {
+ CS = cast<CapturedStmt>(CS->getCapturedStmt());
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+ }
+
+ OMPLoopDirective::HelperExprs B;
+ // In presence of clause 'collapse', it will define the nested loops number.
+ unsigned NestedLoopCount =
+ checkOpenMPLoop(OMPD_target_parallel_loop, getCollapseNumberExpr(Clauses),
+ /*OrderedLoopCountExpr=*/nullptr, CS, *this, *DSAStack,
+ VarsWithImplicitDSA, B);
+ if (NestedLoopCount == 0)
+ return StmtError();
+
+ assert((CurContext->isDependentContext() || B.builtAll()) &&
+ "omp loop exprs were not built");
+
+ setFunctionHasBranchProtectedScope();
+
+ return OMPTargetParallelGenericLoopDirective::Create(
+ Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
+}
+
StmtResult Sema::ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
@@ -10104,6 +11216,29 @@ Sema::ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses,
}
StmtResult
+Sema::ActOnOpenMPParallelMaskedDirective(ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ if (!AStmt)
+ return StmtError();
+
+ assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
+ auto *CS = cast<CapturedStmt>(AStmt);
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+
+ setFunctionHasBranchProtectedScope();
+
+ return OMPParallelMaskedDirective::Create(
+ Context, StartLoc, EndLoc, Clauses, AStmt,
+ DSAStack->getTaskgroupReductionRef());
+}
+
+StmtResult
Sema::ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc) {
@@ -10120,7 +11255,7 @@ Sema::ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
return StmtError();
// All associated statements must be '#pragma omp section' except for
// the first one.
- for (Stmt *SectionStmt : llvm::make_range(std::next(S.begin()), S.end())) {
+ for (Stmt *SectionStmt : llvm::drop_begin(S)) {
if (!SectionStmt || !isa<OMPSectionDirective>(SectionStmt)) {
if (SectionStmt)
Diag(SectionStmt->getBeginLoc(),
@@ -10203,9 +11338,51 @@ StmtResult Sema::ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
return OMPBarrierDirective::Create(Context, StartLoc, EndLoc);
}
-StmtResult Sema::ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
+StmtResult Sema::ActOnOpenMPErrorDirective(ArrayRef<OMPClause *> Clauses,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ bool InExContext) {
+ const OMPAtClause *AtC =
+ OMPExecutableDirective::getSingleClause<OMPAtClause>(Clauses);
+
+ if (AtC && !InExContext && AtC->getAtKind() == OMPC_AT_execution) {
+ Diag(AtC->getAtKindKwLoc(), diag::err_omp_unexpected_execution_modifier);
+ return StmtError();
+ }
+
+ const OMPSeverityClause *SeverityC =
+ OMPExecutableDirective::getSingleClause<OMPSeverityClause>(Clauses);
+ const OMPMessageClause *MessageC =
+ OMPExecutableDirective::getSingleClause<OMPMessageClause>(Clauses);
+ Expr *ME = MessageC ? MessageC->getMessageString() : nullptr;
+
+ if (!AtC || AtC->getAtKind() == OMPC_AT_compilation) {
+ if (SeverityC && SeverityC->getSeverityKind() == OMPC_SEVERITY_warning)
+ Diag(SeverityC->getSeverityKindKwLoc(), diag::warn_diagnose_if_succeeded)
+ << (ME ? cast<StringLiteral>(ME)->getString() : "WARNING");
+ else
+ Diag(StartLoc, diag::err_diagnose_if_succeeded)
+ << (ME ? cast<StringLiteral>(ME)->getString() : "ERROR");
+ if (!SeverityC || SeverityC->getSeverityKind() != OMPC_SEVERITY_warning)
+ return StmtError();
+ }
+ return OMPErrorDirective::Create(Context, StartLoc, EndLoc, Clauses);
+}
+
+StmtResult Sema::ActOnOpenMPTaskwaitDirective(ArrayRef<OMPClause *> Clauses,
+ SourceLocation StartLoc,
SourceLocation EndLoc) {
- return OMPTaskwaitDirective::Create(Context, StartLoc, EndLoc);
+ const OMPNowaitClause *NowaitC =
+ OMPExecutableDirective::getSingleClause<OMPNowaitClause>(Clauses);
+ bool HasDependC =
+ !OMPExecutableDirective::getClausesOfKind<OMPDependClause>(Clauses)
+ .empty();
+ if (NowaitC && !HasDependC) {
+ Diag(StartLoc, diag::err_omp_nowait_clause_without_depend);
+ return StmtError();
+ }
+
+ return OMPTaskwaitDirective::Create(Context, StartLoc, EndLoc, Clauses);
}
StmtResult Sema::ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
@@ -10322,33 +11499,48 @@ StmtResult Sema::ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
const OMPClause *DependFound = nullptr;
const OMPClause *DependSourceClause = nullptr;
const OMPClause *DependSinkClause = nullptr;
+ const OMPClause *DoacrossFound = nullptr;
+ const OMPClause *DoacrossSourceClause = nullptr;
+ const OMPClause *DoacrossSinkClause = nullptr;
bool ErrorFound = false;
const OMPThreadsClause *TC = nullptr;
const OMPSIMDClause *SC = nullptr;
for (const OMPClause *C : Clauses) {
- if (auto *DC = dyn_cast<OMPDependClause>(C)) {
- DependFound = C;
- if (DC->getDependencyKind() == OMPC_DEPEND_source) {
- if (DependSourceClause) {
+ auto DOC = dyn_cast<OMPDoacrossClause>(C);
+ auto DC = dyn_cast<OMPDependClause>(C);
+ if (DC || DOC) {
+ DependFound = DC ? C : nullptr;
+ DoacrossFound = DOC ? C : nullptr;
+ OMPDoacrossKind ODK;
+ if ((DC && DC->getDependencyKind() == OMPC_DEPEND_source) ||
+ (DOC && (ODK.isSource(DOC)))) {
+ if ((DC && DependSourceClause) || (DOC && DoacrossSourceClause)) {
Diag(C->getBeginLoc(), diag::err_omp_more_one_clause)
<< getOpenMPDirectiveName(OMPD_ordered)
- << getOpenMPClauseName(OMPC_depend) << 2;
+ << getOpenMPClauseName(DC ? OMPC_depend : OMPC_doacross) << 2;
ErrorFound = true;
} else {
- DependSourceClause = C;
+ if (DC)
+ DependSourceClause = C;
+ else
+ DoacrossSourceClause = C;
}
- if (DependSinkClause) {
- Diag(C->getBeginLoc(), diag::err_omp_depend_sink_source_not_allowed)
- << 0;
+ if ((DC && DependSinkClause) || (DOC && DoacrossSinkClause)) {
+ Diag(C->getBeginLoc(), diag::err_omp_sink_and_source_not_allowed)
+ << (DC ? "depend" : "doacross") << 0;
ErrorFound = true;
}
- } else if (DC->getDependencyKind() == OMPC_DEPEND_sink) {
- if (DependSourceClause) {
- Diag(C->getBeginLoc(), diag::err_omp_depend_sink_source_not_allowed)
- << 1;
+ } else if ((DC && DC->getDependencyKind() == OMPC_DEPEND_sink) ||
+ (DOC && (ODK.isSink(DOC) || ODK.isSinkIter(DOC)))) {
+ if (DependSourceClause || DoacrossSourceClause) {
+ Diag(C->getBeginLoc(), diag::err_omp_sink_and_source_not_allowed)
+ << (DC ? "depend" : "doacross") << 1;
ErrorFound = true;
}
- DependSinkClause = C;
+ if (DC)
+ DependSinkClause = C;
+ else
+ DoacrossSinkClause = C;
}
} else if (C->getClauseKind() == OMPC_threads) {
TC = cast<OMPThreadsClause>(C);
@@ -10364,13 +11556,19 @@ StmtResult Sema::ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Diag(StartLoc, diag::err_omp_prohibited_region_simd)
<< (LangOpts.OpenMP >= 50 ? 1 : 0);
ErrorFound = true;
- } else if (DependFound && (TC || SC)) {
- Diag(DependFound->getBeginLoc(), diag::err_omp_depend_clause_thread_simd)
+ } else if ((DependFound || DoacrossFound) && (TC || SC)) {
+ SourceLocation Loc =
+ DependFound ? DependFound->getBeginLoc() : DoacrossFound->getBeginLoc();
+ Diag(Loc, diag::err_omp_depend_clause_thread_simd)
+ << getOpenMPClauseName(DependFound ? OMPC_depend : OMPC_doacross)
<< getOpenMPClauseName(TC ? TC->getClauseKind() : SC->getClauseKind());
ErrorFound = true;
- } else if (DependFound && !DSAStack->getParentOrderedRegionParam().first) {
- Diag(DependFound->getBeginLoc(),
- diag::err_omp_ordered_directive_without_param);
+ } else if ((DependFound || DoacrossFound) &&
+ !DSAStack->getParentOrderedRegionParam().first) {
+ SourceLocation Loc =
+ DependFound ? DependFound->getBeginLoc() : DoacrossFound->getBeginLoc();
+ Diag(Loc, diag::err_omp_ordered_directive_without_param)
+ << getOpenMPClauseName(DependFound ? OMPC_depend : OMPC_doacross);
ErrorFound = true;
} else if (TC || Clauses.empty()) {
if (const Expr *Param = DSAStack->getParentOrderedRegionParam().first) {
@@ -10381,7 +11579,7 @@ StmtResult Sema::ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
ErrorFound = true;
}
}
- if ((!AStmt && !DependFound) || ErrorFound)
+ if ((!AStmt && !DependFound && !DoacrossFound) || ErrorFound)
return StmtError();
// OpenMP 5.0, 2.17.9, ordered Construct, Restrictions.
@@ -10389,7 +11587,7 @@ StmtResult Sema::ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
// within a worksharing-loop, simd, or worksharing-loop SIMD region, a thread
// must not execute more than one ordered region corresponding to an ordered
// construct without a depend clause.
- if (!DependFound) {
+ if (!DependFound && !DoacrossFound) {
if (DSAStack->doesParentHasOrderedDirective()) {
Diag(StartLoc, diag::err_omp_several_directives_in_region) << "ordered";
Diag(DSAStack->getParentOrderedDirectiveLoc(),
@@ -10433,6 +11631,9 @@ class OpenMPAtomicUpdateChecker {
/// RHS binary operation does not have reference to the updated LHS
/// part.
NotAnUpdateExpression,
+ /// An expression contains semantical error not related to
+ /// 'omp atomic [update]'
+ NotAValidExpression,
/// No errors is found.
NoError
};
@@ -10489,7 +11690,6 @@ private:
bool checkBinaryOperation(BinaryOperator *AtomicBinOp, unsigned DiagId = 0,
unsigned NoteId = 0);
};
-} // namespace
bool OpenMPAtomicUpdateChecker::checkBinaryOperation(
BinaryOperator *AtomicBinOp, unsigned DiagId, unsigned NoteId) {
@@ -10611,6 +11811,10 @@ bool OpenMPAtomicUpdateChecker::checkStatement(Stmt *S, unsigned DiagId,
ErrorFound = NotABinaryOrUnaryExpression;
NoteLoc = ErrorLoc = AtomicBody->getExprLoc();
NoteRange = ErrorRange = AtomicBody->getSourceRange();
+ } else if (AtomicBody->containsErrors()) {
+ ErrorFound = NotAValidExpression;
+ NoteLoc = ErrorLoc = AtomicBody->getExprLoc();
+ NoteRange = ErrorRange = AtomicBody->getSourceRange();
}
} else {
ErrorFound = NotAScalarType;
@@ -10651,6 +11855,813 @@ bool OpenMPAtomicUpdateChecker::checkStatement(Stmt *S, unsigned DiagId,
return ErrorFound != NoError;
}
+/// Get the node id of the fixed point of an expression \a S.
+llvm::FoldingSetNodeID getNodeId(ASTContext &Context, const Expr *S) {
+ llvm::FoldingSetNodeID Id;
+ S->IgnoreParenImpCasts()->Profile(Id, Context, true);
+ return Id;
+}
+
+/// Check if two expressions are same.
+bool checkIfTwoExprsAreSame(ASTContext &Context, const Expr *LHS,
+ const Expr *RHS) {
+ return getNodeId(Context, LHS) == getNodeId(Context, RHS);
+}
+
+class OpenMPAtomicCompareChecker {
+public:
+ /// All kinds of errors that can occur in `atomic compare`
+ enum ErrorTy {
+ /// Empty compound statement.
+ NoStmt = 0,
+ /// More than one statement in a compound statement.
+ MoreThanOneStmt,
+ /// Not an assignment binary operator.
+ NotAnAssignment,
+ /// Not a conditional operator.
+ NotCondOp,
+ /// Wrong false expr. According to the spec, 'x' should be at the false
+ /// expression of a conditional expression.
+ WrongFalseExpr,
+ /// The condition of a conditional expression is not a binary operator.
+ NotABinaryOp,
+ /// Invalid binary operator (not <, >, or ==).
+ InvalidBinaryOp,
+ /// Invalid comparison (not x == e, e == x, x ordop expr, or expr ordop x).
+ InvalidComparison,
+ /// X is not a lvalue.
+ XNotLValue,
+ /// Not a scalar.
+ NotScalar,
+ /// Not an integer.
+ NotInteger,
+ /// 'else' statement is not expected.
+ UnexpectedElse,
+ /// Not an equality operator.
+ NotEQ,
+ /// Invalid assignment (not v == x).
+ InvalidAssignment,
+ /// Not if statement
+ NotIfStmt,
+ /// More than two statements in a compund statement.
+ MoreThanTwoStmts,
+ /// Not a compound statement.
+ NotCompoundStmt,
+ /// No else statement.
+ NoElse,
+ /// Not 'if (r)'.
+ InvalidCondition,
+ /// No error.
+ NoError,
+ };
+
+ struct ErrorInfoTy {
+ ErrorTy Error;
+ SourceLocation ErrorLoc;
+ SourceRange ErrorRange;
+ SourceLocation NoteLoc;
+ SourceRange NoteRange;
+ };
+
+ OpenMPAtomicCompareChecker(Sema &S) : ContextRef(S.getASTContext()) {}
+
+ /// Check if statement \a S is valid for <tt>atomic compare</tt>.
+ bool checkStmt(Stmt *S, ErrorInfoTy &ErrorInfo);
+
+ Expr *getX() const { return X; }
+ Expr *getE() const { return E; }
+ Expr *getD() const { return D; }
+ Expr *getCond() const { return C; }
+ bool isXBinopExpr() const { return IsXBinopExpr; }
+
+protected:
+ /// Reference to ASTContext
+ ASTContext &ContextRef;
+ /// 'x' lvalue part of the source atomic expression.
+ Expr *X = nullptr;
+ /// 'expr' or 'e' rvalue part of the source atomic expression.
+ Expr *E = nullptr;
+ /// 'd' rvalue part of the source atomic expression.
+ Expr *D = nullptr;
+ /// 'cond' part of the source atomic expression. It is in one of the following
+ /// forms:
+ /// expr ordop x
+ /// x ordop expr
+ /// x == e
+ /// e == x
+ Expr *C = nullptr;
+ /// True if the cond expr is in the form of 'x ordop expr'.
+ bool IsXBinopExpr = true;
+
+ /// Check if it is a valid conditional update statement (cond-update-stmt).
+ bool checkCondUpdateStmt(IfStmt *S, ErrorInfoTy &ErrorInfo);
+
+ /// Check if it is a valid conditional expression statement (cond-expr-stmt).
+ bool checkCondExprStmt(Stmt *S, ErrorInfoTy &ErrorInfo);
+
+ /// Check if all captured values have right type.
+ bool checkType(ErrorInfoTy &ErrorInfo) const;
+
+ static bool CheckValue(const Expr *E, ErrorInfoTy &ErrorInfo,
+ bool ShouldBeLValue, bool ShouldBeInteger = false) {
+ if (E->isInstantiationDependent())
+ return true;
+
+ if (ShouldBeLValue && !E->isLValue()) {
+ ErrorInfo.Error = ErrorTy::XNotLValue;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = E->getExprLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = E->getSourceRange();
+ return false;
+ }
+
+ QualType QTy = E->getType();
+ if (!QTy->isScalarType()) {
+ ErrorInfo.Error = ErrorTy::NotScalar;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = E->getExprLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = E->getSourceRange();
+ return false;
+ }
+ if (ShouldBeInteger && !QTy->isIntegerType()) {
+ ErrorInfo.Error = ErrorTy::NotInteger;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = E->getExprLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = E->getSourceRange();
+ return false;
+ }
+
+ return true;
+ }
+ };
+
+bool OpenMPAtomicCompareChecker::checkCondUpdateStmt(IfStmt *S,
+ ErrorInfoTy &ErrorInfo) {
+ auto *Then = S->getThen();
+ if (auto *CS = dyn_cast<CompoundStmt>(Then)) {
+ if (CS->body_empty()) {
+ ErrorInfo.Error = ErrorTy::NoStmt;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = CS->getBeginLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = CS->getSourceRange();
+ return false;
+ }
+ if (CS->size() > 1) {
+ ErrorInfo.Error = ErrorTy::MoreThanOneStmt;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = CS->getBeginLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = S->getSourceRange();
+ return false;
+ }
+ Then = CS->body_front();
+ }
+
+ auto *BO = dyn_cast<BinaryOperator>(Then);
+ if (!BO) {
+ ErrorInfo.Error = ErrorTy::NotAnAssignment;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = Then->getBeginLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = Then->getSourceRange();
+ return false;
+ }
+ if (BO->getOpcode() != BO_Assign) {
+ ErrorInfo.Error = ErrorTy::NotAnAssignment;
+ ErrorInfo.ErrorLoc = BO->getExprLoc();
+ ErrorInfo.NoteLoc = BO->getOperatorLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = BO->getSourceRange();
+ return false;
+ }
+
+ X = BO->getLHS();
+
+ auto *Cond = dyn_cast<BinaryOperator>(S->getCond());
+ if (!Cond) {
+ ErrorInfo.Error = ErrorTy::NotABinaryOp;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = S->getCond()->getExprLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = S->getCond()->getSourceRange();
+ return false;
+ }
+
+ switch (Cond->getOpcode()) {
+ case BO_EQ: {
+ C = Cond;
+ D = BO->getRHS();
+ if (checkIfTwoExprsAreSame(ContextRef, X, Cond->getLHS())) {
+ E = Cond->getRHS();
+ } else if (checkIfTwoExprsAreSame(ContextRef, X, Cond->getRHS())) {
+ E = Cond->getLHS();
+ } else {
+ ErrorInfo.Error = ErrorTy::InvalidComparison;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = Cond->getExprLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = Cond->getSourceRange();
+ return false;
+ }
+ break;
+ }
+ case BO_LT:
+ case BO_GT: {
+ E = BO->getRHS();
+ if (checkIfTwoExprsAreSame(ContextRef, X, Cond->getLHS()) &&
+ checkIfTwoExprsAreSame(ContextRef, E, Cond->getRHS())) {
+ C = Cond;
+ } else if (checkIfTwoExprsAreSame(ContextRef, E, Cond->getLHS()) &&
+ checkIfTwoExprsAreSame(ContextRef, X, Cond->getRHS())) {
+ C = Cond;
+ IsXBinopExpr = false;
+ } else {
+ ErrorInfo.Error = ErrorTy::InvalidComparison;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = Cond->getExprLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = Cond->getSourceRange();
+ return false;
+ }
+ break;
+ }
+ default:
+ ErrorInfo.Error = ErrorTy::InvalidBinaryOp;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = Cond->getExprLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = Cond->getSourceRange();
+ return false;
+ }
+
+ if (S->getElse()) {
+ ErrorInfo.Error = ErrorTy::UnexpectedElse;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = S->getElse()->getBeginLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = S->getElse()->getSourceRange();
+ return false;
+ }
+
+ return true;
+}
+
+bool OpenMPAtomicCompareChecker::checkCondExprStmt(Stmt *S,
+ ErrorInfoTy &ErrorInfo) {
+ auto *BO = dyn_cast<BinaryOperator>(S);
+ if (!BO) {
+ ErrorInfo.Error = ErrorTy::NotAnAssignment;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = S->getBeginLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = S->getSourceRange();
+ return false;
+ }
+ if (BO->getOpcode() != BO_Assign) {
+ ErrorInfo.Error = ErrorTy::NotAnAssignment;
+ ErrorInfo.ErrorLoc = BO->getExprLoc();
+ ErrorInfo.NoteLoc = BO->getOperatorLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = BO->getSourceRange();
+ return false;
+ }
+
+ X = BO->getLHS();
+
+ auto *CO = dyn_cast<ConditionalOperator>(BO->getRHS()->IgnoreParenImpCasts());
+ if (!CO) {
+ ErrorInfo.Error = ErrorTy::NotCondOp;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = BO->getRHS()->getExprLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = BO->getRHS()->getSourceRange();
+ return false;
+ }
+
+ if (!checkIfTwoExprsAreSame(ContextRef, X, CO->getFalseExpr())) {
+ ErrorInfo.Error = ErrorTy::WrongFalseExpr;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = CO->getFalseExpr()->getExprLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange =
+ CO->getFalseExpr()->getSourceRange();
+ return false;
+ }
+
+ auto *Cond = dyn_cast<BinaryOperator>(CO->getCond());
+ if (!Cond) {
+ ErrorInfo.Error = ErrorTy::NotABinaryOp;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = CO->getCond()->getExprLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange =
+ CO->getCond()->getSourceRange();
+ return false;
+ }
+
+ switch (Cond->getOpcode()) {
+ case BO_EQ: {
+ C = Cond;
+ D = CO->getTrueExpr();
+ if (checkIfTwoExprsAreSame(ContextRef, X, Cond->getLHS())) {
+ E = Cond->getRHS();
+ } else if (checkIfTwoExprsAreSame(ContextRef, X, Cond->getRHS())) {
+ E = Cond->getLHS();
+ } else {
+ ErrorInfo.Error = ErrorTy::InvalidComparison;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = Cond->getExprLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = Cond->getSourceRange();
+ return false;
+ }
+ break;
+ }
+ case BO_LT:
+ case BO_GT: {
+ E = CO->getTrueExpr();
+ if (checkIfTwoExprsAreSame(ContextRef, X, Cond->getLHS()) &&
+ checkIfTwoExprsAreSame(ContextRef, E, Cond->getRHS())) {
+ C = Cond;
+ } else if (checkIfTwoExprsAreSame(ContextRef, E, Cond->getLHS()) &&
+ checkIfTwoExprsAreSame(ContextRef, X, Cond->getRHS())) {
+ C = Cond;
+ IsXBinopExpr = false;
+ } else {
+ ErrorInfo.Error = ErrorTy::InvalidComparison;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = Cond->getExprLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = Cond->getSourceRange();
+ return false;
+ }
+ break;
+ }
+ default:
+ ErrorInfo.Error = ErrorTy::InvalidBinaryOp;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = Cond->getExprLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = Cond->getSourceRange();
+ return false;
+ }
+
+ return true;
+}
+
+bool OpenMPAtomicCompareChecker::checkType(ErrorInfoTy &ErrorInfo) const {
+ // 'x' and 'e' cannot be nullptr
+ assert(X && E && "X and E cannot be nullptr");
+
+ if (!CheckValue(X, ErrorInfo, true))
+ return false;
+
+ if (!CheckValue(E, ErrorInfo, false))
+ return false;
+
+ if (D && !CheckValue(D, ErrorInfo, false))
+ return false;
+
+ return true;
+}
+
+bool OpenMPAtomicCompareChecker::checkStmt(
+ Stmt *S, OpenMPAtomicCompareChecker::ErrorInfoTy &ErrorInfo) {
+ auto *CS = dyn_cast<CompoundStmt>(S);
+ if (CS) {
+ if (CS->body_empty()) {
+ ErrorInfo.Error = ErrorTy::NoStmt;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = CS->getBeginLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = CS->getSourceRange();
+ return false;
+ }
+
+ if (CS->size() != 1) {
+ ErrorInfo.Error = ErrorTy::MoreThanOneStmt;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = CS->getBeginLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = CS->getSourceRange();
+ return false;
+ }
+ S = CS->body_front();
+ }
+
+ auto Res = false;
+
+ if (auto *IS = dyn_cast<IfStmt>(S)) {
+ // Check if the statement is in one of the following forms
+ // (cond-update-stmt):
+ // if (expr ordop x) { x = expr; }
+ // if (x ordop expr) { x = expr; }
+ // if (x == e) { x = d; }
+ Res = checkCondUpdateStmt(IS, ErrorInfo);
+ } else {
+ // Check if the statement is in one of the following forms (cond-expr-stmt):
+ // x = expr ordop x ? expr : x;
+ // x = x ordop expr ? expr : x;
+ // x = x == e ? d : x;
+ Res = checkCondExprStmt(S, ErrorInfo);
+ }
+
+ if (!Res)
+ return false;
+
+ return checkType(ErrorInfo);
+}
+
+class OpenMPAtomicCompareCaptureChecker final
+ : public OpenMPAtomicCompareChecker {
+public:
+ OpenMPAtomicCompareCaptureChecker(Sema &S) : OpenMPAtomicCompareChecker(S) {}
+
+ Expr *getV() const { return V; }
+ Expr *getR() const { return R; }
+ bool isFailOnly() const { return IsFailOnly; }
+ bool isPostfixUpdate() const { return IsPostfixUpdate; }
+
+ /// Check if statement \a S is valid for <tt>atomic compare capture</tt>.
+ bool checkStmt(Stmt *S, ErrorInfoTy &ErrorInfo);
+
+private:
+ bool checkType(ErrorInfoTy &ErrorInfo);
+
+ // NOTE: Form 3, 4, 5 in the following comments mean the 3rd, 4th, and 5th
+ // form of 'conditional-update-capture-atomic' structured block on the v5.2
+ // spec p.p. 82:
+ // (1) { v = x; cond-update-stmt }
+ // (2) { cond-update-stmt v = x; }
+ // (3) if(x == e) { x = d; } else { v = x; }
+ // (4) { r = x == e; if(r) { x = d; } }
+ // (5) { r = x == e; if(r) { x = d; } else { v = x; } }
+
+ /// Check if it is valid 'if(x == e) { x = d; } else { v = x; }' (form 3)
+ bool checkForm3(IfStmt *S, ErrorInfoTy &ErrorInfo);
+
+ /// Check if it is valid '{ r = x == e; if(r) { x = d; } }',
+ /// or '{ r = x == e; if(r) { x = d; } else { v = x; } }' (form 4 and 5)
+ bool checkForm45(Stmt *S, ErrorInfoTy &ErrorInfo);
+
+ /// 'v' lvalue part of the source atomic expression.
+ Expr *V = nullptr;
+ /// 'r' lvalue part of the source atomic expression.
+ Expr *R = nullptr;
+ /// If 'v' is only updated when the comparison fails.
+ bool IsFailOnly = false;
+ /// If original value of 'x' must be stored in 'v', not an updated one.
+ bool IsPostfixUpdate = false;
+};
+
+bool OpenMPAtomicCompareCaptureChecker::checkType(ErrorInfoTy &ErrorInfo) {
+ if (!OpenMPAtomicCompareChecker::checkType(ErrorInfo))
+ return false;
+
+ if (V && !CheckValue(V, ErrorInfo, true))
+ return false;
+
+ if (R && !CheckValue(R, ErrorInfo, true, true))
+ return false;
+
+ return true;
+}
+
+bool OpenMPAtomicCompareCaptureChecker::checkForm3(IfStmt *S,
+ ErrorInfoTy &ErrorInfo) {
+ IsFailOnly = true;
+
+ auto *Then = S->getThen();
+ if (auto *CS = dyn_cast<CompoundStmt>(Then)) {
+ if (CS->body_empty()) {
+ ErrorInfo.Error = ErrorTy::NoStmt;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = CS->getBeginLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = CS->getSourceRange();
+ return false;
+ }
+ if (CS->size() > 1) {
+ ErrorInfo.Error = ErrorTy::MoreThanOneStmt;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = CS->getBeginLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = CS->getSourceRange();
+ return false;
+ }
+ Then = CS->body_front();
+ }
+
+ auto *BO = dyn_cast<BinaryOperator>(Then);
+ if (!BO) {
+ ErrorInfo.Error = ErrorTy::NotAnAssignment;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = Then->getBeginLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = Then->getSourceRange();
+ return false;
+ }
+ if (BO->getOpcode() != BO_Assign) {
+ ErrorInfo.Error = ErrorTy::NotAnAssignment;
+ ErrorInfo.ErrorLoc = BO->getExprLoc();
+ ErrorInfo.NoteLoc = BO->getOperatorLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = BO->getSourceRange();
+ return false;
+ }
+
+ X = BO->getLHS();
+ D = BO->getRHS();
+
+ auto *Cond = dyn_cast<BinaryOperator>(S->getCond());
+ if (!Cond) {
+ ErrorInfo.Error = ErrorTy::NotABinaryOp;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = S->getCond()->getExprLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = S->getCond()->getSourceRange();
+ return false;
+ }
+ if (Cond->getOpcode() != BO_EQ) {
+ ErrorInfo.Error = ErrorTy::NotEQ;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = Cond->getExprLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = Cond->getSourceRange();
+ return false;
+ }
+
+ if (checkIfTwoExprsAreSame(ContextRef, X, Cond->getLHS())) {
+ E = Cond->getRHS();
+ } else if (checkIfTwoExprsAreSame(ContextRef, X, Cond->getRHS())) {
+ E = Cond->getLHS();
+ } else {
+ ErrorInfo.Error = ErrorTy::InvalidComparison;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = Cond->getExprLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = Cond->getSourceRange();
+ return false;
+ }
+
+ C = Cond;
+
+ if (!S->getElse()) {
+ ErrorInfo.Error = ErrorTy::NoElse;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = S->getBeginLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = S->getSourceRange();
+ return false;
+ }
+
+ auto *Else = S->getElse();
+ if (auto *CS = dyn_cast<CompoundStmt>(Else)) {
+ if (CS->body_empty()) {
+ ErrorInfo.Error = ErrorTy::NoStmt;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = CS->getBeginLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = CS->getSourceRange();
+ return false;
+ }
+ if (CS->size() > 1) {
+ ErrorInfo.Error = ErrorTy::MoreThanOneStmt;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = CS->getBeginLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = S->getSourceRange();
+ return false;
+ }
+ Else = CS->body_front();
+ }
+
+ auto *ElseBO = dyn_cast<BinaryOperator>(Else);
+ if (!ElseBO) {
+ ErrorInfo.Error = ErrorTy::NotAnAssignment;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = Else->getBeginLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = Else->getSourceRange();
+ return false;
+ }
+ if (ElseBO->getOpcode() != BO_Assign) {
+ ErrorInfo.Error = ErrorTy::NotAnAssignment;
+ ErrorInfo.ErrorLoc = ElseBO->getExprLoc();
+ ErrorInfo.NoteLoc = ElseBO->getOperatorLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = ElseBO->getSourceRange();
+ return false;
+ }
+
+ if (!checkIfTwoExprsAreSame(ContextRef, X, ElseBO->getRHS())) {
+ ErrorInfo.Error = ErrorTy::InvalidAssignment;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = ElseBO->getRHS()->getExprLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange =
+ ElseBO->getRHS()->getSourceRange();
+ return false;
+ }
+
+ V = ElseBO->getLHS();
+
+ return checkType(ErrorInfo);
+}
+
+bool OpenMPAtomicCompareCaptureChecker::checkForm45(Stmt *S,
+ ErrorInfoTy &ErrorInfo) {
+ // We don't check here as they should be already done before call this
+ // function.
+ auto *CS = cast<CompoundStmt>(S);
+ assert(CS->size() == 2 && "CompoundStmt size is not expected");
+ auto *S1 = cast<BinaryOperator>(CS->body_front());
+ auto *S2 = cast<IfStmt>(CS->body_back());
+ assert(S1->getOpcode() == BO_Assign && "unexpected binary operator");
+
+ if (!checkIfTwoExprsAreSame(ContextRef, S1->getLHS(), S2->getCond())) {
+ ErrorInfo.Error = ErrorTy::InvalidCondition;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = S2->getCond()->getExprLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = S1->getLHS()->getSourceRange();
+ return false;
+ }
+
+ R = S1->getLHS();
+
+ auto *Then = S2->getThen();
+ if (auto *ThenCS = dyn_cast<CompoundStmt>(Then)) {
+ if (ThenCS->body_empty()) {
+ ErrorInfo.Error = ErrorTy::NoStmt;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = ThenCS->getBeginLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = ThenCS->getSourceRange();
+ return false;
+ }
+ if (ThenCS->size() > 1) {
+ ErrorInfo.Error = ErrorTy::MoreThanOneStmt;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = ThenCS->getBeginLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = ThenCS->getSourceRange();
+ return false;
+ }
+ Then = ThenCS->body_front();
+ }
+
+ auto *ThenBO = dyn_cast<BinaryOperator>(Then);
+ if (!ThenBO) {
+ ErrorInfo.Error = ErrorTy::NotAnAssignment;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = S2->getBeginLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = S2->getSourceRange();
+ return false;
+ }
+ if (ThenBO->getOpcode() != BO_Assign) {
+ ErrorInfo.Error = ErrorTy::NotAnAssignment;
+ ErrorInfo.ErrorLoc = ThenBO->getExprLoc();
+ ErrorInfo.NoteLoc = ThenBO->getOperatorLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = ThenBO->getSourceRange();
+ return false;
+ }
+
+ X = ThenBO->getLHS();
+ D = ThenBO->getRHS();
+
+ auto *BO = cast<BinaryOperator>(S1->getRHS()->IgnoreImpCasts());
+ if (BO->getOpcode() != BO_EQ) {
+ ErrorInfo.Error = ErrorTy::NotEQ;
+ ErrorInfo.ErrorLoc = BO->getExprLoc();
+ ErrorInfo.NoteLoc = BO->getOperatorLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = BO->getSourceRange();
+ return false;
+ }
+
+ C = BO;
+
+ if (checkIfTwoExprsAreSame(ContextRef, X, BO->getLHS())) {
+ E = BO->getRHS();
+ } else if (checkIfTwoExprsAreSame(ContextRef, X, BO->getRHS())) {
+ E = BO->getLHS();
+ } else {
+ ErrorInfo.Error = ErrorTy::InvalidComparison;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = BO->getExprLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = BO->getSourceRange();
+ return false;
+ }
+
+ if (S2->getElse()) {
+ IsFailOnly = true;
+
+ auto *Else = S2->getElse();
+ if (auto *ElseCS = dyn_cast<CompoundStmt>(Else)) {
+ if (ElseCS->body_empty()) {
+ ErrorInfo.Error = ErrorTy::NoStmt;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = ElseCS->getBeginLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = ElseCS->getSourceRange();
+ return false;
+ }
+ if (ElseCS->size() > 1) {
+ ErrorInfo.Error = ErrorTy::MoreThanOneStmt;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = ElseCS->getBeginLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = ElseCS->getSourceRange();
+ return false;
+ }
+ Else = ElseCS->body_front();
+ }
+
+ auto *ElseBO = dyn_cast<BinaryOperator>(Else);
+ if (!ElseBO) {
+ ErrorInfo.Error = ErrorTy::NotAnAssignment;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = Else->getBeginLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = Else->getSourceRange();
+ return false;
+ }
+ if (ElseBO->getOpcode() != BO_Assign) {
+ ErrorInfo.Error = ErrorTy::NotAnAssignment;
+ ErrorInfo.ErrorLoc = ElseBO->getExprLoc();
+ ErrorInfo.NoteLoc = ElseBO->getOperatorLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = ElseBO->getSourceRange();
+ return false;
+ }
+ if (!checkIfTwoExprsAreSame(ContextRef, X, ElseBO->getRHS())) {
+ ErrorInfo.Error = ErrorTy::InvalidAssignment;
+ ErrorInfo.ErrorLoc = ElseBO->getRHS()->getExprLoc();
+ ErrorInfo.NoteLoc = X->getExprLoc();
+ ErrorInfo.ErrorRange = ElseBO->getRHS()->getSourceRange();
+ ErrorInfo.NoteRange = X->getSourceRange();
+ return false;
+ }
+
+ V = ElseBO->getLHS();
+ }
+
+ return checkType(ErrorInfo);
+}
+
+bool OpenMPAtomicCompareCaptureChecker::checkStmt(Stmt *S,
+ ErrorInfoTy &ErrorInfo) {
+ // if(x == e) { x = d; } else { v = x; }
+ if (auto *IS = dyn_cast<IfStmt>(S))
+ return checkForm3(IS, ErrorInfo);
+
+ auto *CS = dyn_cast<CompoundStmt>(S);
+ if (!CS) {
+ ErrorInfo.Error = ErrorTy::NotCompoundStmt;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = S->getBeginLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = S->getSourceRange();
+ return false;
+ }
+ if (CS->body_empty()) {
+ ErrorInfo.Error = ErrorTy::NoStmt;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = CS->getBeginLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = CS->getSourceRange();
+ return false;
+ }
+
+ // { if(x == e) { x = d; } else { v = x; } }
+ if (CS->size() == 1) {
+ auto *IS = dyn_cast<IfStmt>(CS->body_front());
+ if (!IS) {
+ ErrorInfo.Error = ErrorTy::NotIfStmt;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = CS->body_front()->getBeginLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange =
+ CS->body_front()->getSourceRange();
+ return false;
+ }
+
+ return checkForm3(IS, ErrorInfo);
+ } else if (CS->size() == 2) {
+ auto *S1 = CS->body_front();
+ auto *S2 = CS->body_back();
+
+ Stmt *UpdateStmt = nullptr;
+ Stmt *CondUpdateStmt = nullptr;
+ Stmt *CondExprStmt = nullptr;
+
+ if (auto *BO = dyn_cast<BinaryOperator>(S1)) {
+ // It could be one of the following cases:
+ // { v = x; cond-update-stmt }
+ // { v = x; cond-expr-stmt }
+ // { cond-expr-stmt; v = x; }
+ // form 45
+ if (isa<BinaryOperator>(BO->getRHS()->IgnoreImpCasts()) ||
+ isa<ConditionalOperator>(BO->getRHS()->IgnoreImpCasts())) {
+ // check if form 45
+ if (isa<IfStmt>(S2))
+ return checkForm45(CS, ErrorInfo);
+ // { cond-expr-stmt; v = x; }
+ CondExprStmt = S1;
+ UpdateStmt = S2;
+ } else {
+ IsPostfixUpdate = true;
+ UpdateStmt = S1;
+ if (isa<IfStmt>(S2)) {
+ // { v = x; cond-update-stmt }
+ CondUpdateStmt = S2;
+ } else {
+ // { v = x; cond-expr-stmt }
+ CondExprStmt = S2;
+ }
+ }
+ } else {
+ // { cond-update-stmt v = x; }
+ UpdateStmt = S2;
+ CondUpdateStmt = S1;
+ }
+
+ auto CheckCondUpdateStmt = [this, &ErrorInfo](Stmt *CUS) {
+ auto *IS = dyn_cast<IfStmt>(CUS);
+ if (!IS) {
+ ErrorInfo.Error = ErrorTy::NotIfStmt;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = CUS->getBeginLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = CUS->getSourceRange();
+ return false;
+ }
+
+ return checkCondUpdateStmt(IS, ErrorInfo);
+ };
+
+ // CheckUpdateStmt has to be called *after* CheckCondUpdateStmt.
+ auto CheckUpdateStmt = [this, &ErrorInfo](Stmt *US) {
+ auto *BO = dyn_cast<BinaryOperator>(US);
+ if (!BO) {
+ ErrorInfo.Error = ErrorTy::NotAnAssignment;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = US->getBeginLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = US->getSourceRange();
+ return false;
+ }
+ if (BO->getOpcode() != BO_Assign) {
+ ErrorInfo.Error = ErrorTy::NotAnAssignment;
+ ErrorInfo.ErrorLoc = BO->getExprLoc();
+ ErrorInfo.NoteLoc = BO->getOperatorLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = BO->getSourceRange();
+ return false;
+ }
+ if (!checkIfTwoExprsAreSame(ContextRef, this->X, BO->getRHS())) {
+ ErrorInfo.Error = ErrorTy::InvalidAssignment;
+ ErrorInfo.ErrorLoc = BO->getRHS()->getExprLoc();
+ ErrorInfo.NoteLoc = this->X->getExprLoc();
+ ErrorInfo.ErrorRange = BO->getRHS()->getSourceRange();
+ ErrorInfo.NoteRange = this->X->getSourceRange();
+ return false;
+ }
+
+ this->V = BO->getLHS();
+
+ return true;
+ };
+
+ if (CondUpdateStmt && !CheckCondUpdateStmt(CondUpdateStmt))
+ return false;
+ if (CondExprStmt && !checkCondExprStmt(CondExprStmt, ErrorInfo))
+ return false;
+ if (!CheckUpdateStmt(UpdateStmt))
+ return false;
+ } else {
+ ErrorInfo.Error = ErrorTy::MoreThanTwoStmts;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = CS->getBeginLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = CS->getSourceRange();
+ return false;
+ }
+
+ return checkType(ErrorInfo);
+}
+} // namespace
+
StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
@@ -10669,11 +12680,18 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation AtomicKindLoc;
OpenMPClauseKind MemOrderKind = OMPC_unknown;
SourceLocation MemOrderLoc;
+ bool MutexClauseEncountered = false;
+ llvm::SmallSet<OpenMPClauseKind, 2> EncounteredAtomicKinds;
for (const OMPClause *C : Clauses) {
- if (C->getClauseKind() == OMPC_read || C->getClauseKind() == OMPC_write ||
- C->getClauseKind() == OMPC_update ||
- C->getClauseKind() == OMPC_capture) {
- if (AtomicKind != OMPC_unknown) {
+ switch (C->getClauseKind()) {
+ case OMPC_read:
+ case OMPC_write:
+ case OMPC_update:
+ MutexClauseEncountered = true;
+ [[fallthrough]];
+ case OMPC_capture:
+ case OMPC_compare: {
+ if (AtomicKind != OMPC_unknown && MutexClauseEncountered) {
Diag(C->getBeginLoc(), diag::err_omp_atomic_several_clauses)
<< SourceRange(C->getBeginLoc(), C->getEndLoc());
Diag(AtomicKindLoc, diag::note_omp_previous_mem_order_clause)
@@ -10681,13 +12699,28 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
} else {
AtomicKind = C->getClauseKind();
AtomicKindLoc = C->getBeginLoc();
+ if (!EncounteredAtomicKinds.insert(C->getClauseKind()).second) {
+ Diag(C->getBeginLoc(), diag::err_omp_atomic_several_clauses)
+ << SourceRange(C->getBeginLoc(), C->getEndLoc());
+ Diag(AtomicKindLoc, diag::note_omp_previous_mem_order_clause)
+ << getOpenMPClauseName(AtomicKind);
+ }
+ }
+ break;
+ }
+ case OMPC_fail: {
+ if (!EncounteredAtomicKinds.contains(OMPC_compare)) {
+ Diag(C->getBeginLoc(), diag::err_omp_atomic_fail_no_compare)
+ << SourceRange(C->getBeginLoc(), C->getEndLoc());
+ return StmtError();
}
+ break;
}
- if (C->getClauseKind() == OMPC_seq_cst ||
- C->getClauseKind() == OMPC_acq_rel ||
- C->getClauseKind() == OMPC_acquire ||
- C->getClauseKind() == OMPC_release ||
- C->getClauseKind() == OMPC_relaxed) {
+ case OMPC_seq_cst:
+ case OMPC_acq_rel:
+ case OMPC_acquire:
+ case OMPC_release:
+ case OMPC_relaxed: {
if (MemOrderKind != OMPC_unknown) {
Diag(C->getBeginLoc(), diag::err_omp_several_mem_order_clauses)
<< getOpenMPDirectiveName(OMPD_atomic) << 0
@@ -10698,8 +12731,21 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
MemOrderKind = C->getClauseKind();
MemOrderLoc = C->getBeginLoc();
}
+ break;
+ }
+ // The following clauses are allowed, but we don't need to do anything here.
+ case OMPC_hint:
+ break;
+ default:
+ llvm_unreachable("unknown clause is encountered");
}
}
+ bool IsCompareCapture = false;
+ if (EncounteredAtomicKinds.contains(OMPC_compare) &&
+ EncounteredAtomicKinds.contains(OMPC_capture)) {
+ IsCompareCapture = true;
+ AtomicKind = OMPC_compare;
+ }
// OpenMP 5.0, 2.17.7 atomic Construct, Restrictions
// If atomic-clause is read then memory-order-clause must not be acq_rel or
// release.
@@ -10731,8 +12777,12 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Expr *V = nullptr;
Expr *E = nullptr;
Expr *UE = nullptr;
+ Expr *D = nullptr;
+ Expr *CE = nullptr;
+ Expr *R = nullptr;
bool IsXLHSInRHSPart = false;
bool IsPostfixUpdate = false;
+ bool IsFailOnly = false;
// OpenMP [2.12.6, atomic Construct]
// In the next expressions:
// * x and v (as applicable) are both l-value expressions with scalar type.
@@ -10812,8 +12862,8 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
if (ErrorFound != NoError) {
Diag(ErrorLoc, diag::err_omp_atomic_read_not_expression_statement)
<< ErrorRange;
- Diag(NoteLoc, diag::note_omp_atomic_read_write) << ErrorFound
- << NoteRange;
+ Diag(NoteLoc, diag::note_omp_atomic_read_write)
+ << ErrorFound << NoteRange;
return StmtError();
}
if (CurContext->isDependentContext())
@@ -10874,8 +12924,8 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
if (ErrorFound != NoError) {
Diag(ErrorLoc, diag::err_omp_atomic_write_not_expression_statement)
<< ErrorRange;
- Diag(NoteLoc, diag::note_omp_atomic_read_write) << ErrorFound
- << NoteRange;
+ Diag(NoteLoc, diag::note_omp_atomic_read_write)
+ << ErrorFound << NoteRange;
return StmtError();
}
if (CurContext->isDependentContext())
@@ -10891,9 +12941,10 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
// x = expr binop x;
OpenMPAtomicUpdateChecker Checker(*this);
if (Checker.checkStatement(
- Body, (AtomicKind == OMPC_update)
- ? diag::err_omp_atomic_update_not_expression_statement
- : diag::err_omp_atomic_not_expression_statement,
+ Body,
+ (AtomicKind == OMPC_update)
+ ? diag::err_omp_atomic_update_not_expression_statement
+ : diag::err_omp_atomic_not_expression_statement,
diag::note_omp_atomic_update))
return StmtError();
if (!CurContext->isDependentContext()) {
@@ -11107,22 +13158,60 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
SourceRange(Body->getBeginLoc(), Body->getBeginLoc());
ErrorFound = NotACompoundStatement;
}
- if (ErrorFound != NoError) {
- Diag(ErrorLoc, diag::err_omp_atomic_capture_not_compound_statement)
- << ErrorRange;
- Diag(NoteLoc, diag::note_omp_atomic_capture) << ErrorFound << NoteRange;
+ }
+ if (ErrorFound != NoError) {
+ Diag(ErrorLoc, diag::err_omp_atomic_capture_not_compound_statement)
+ << ErrorRange;
+ Diag(NoteLoc, diag::note_omp_atomic_capture) << ErrorFound << NoteRange;
+ return StmtError();
+ }
+ if (CurContext->isDependentContext())
+ UE = V = E = X = nullptr;
+ } else if (AtomicKind == OMPC_compare) {
+ if (IsCompareCapture) {
+ OpenMPAtomicCompareCaptureChecker::ErrorInfoTy ErrorInfo;
+ OpenMPAtomicCompareCaptureChecker Checker(*this);
+ if (!Checker.checkStmt(Body, ErrorInfo)) {
+ Diag(ErrorInfo.ErrorLoc, diag::err_omp_atomic_compare_capture)
+ << ErrorInfo.ErrorRange;
+ Diag(ErrorInfo.NoteLoc, diag::note_omp_atomic_compare)
+ << ErrorInfo.Error << ErrorInfo.NoteRange;
return StmtError();
}
- if (CurContext->isDependentContext())
- UE = V = E = X = nullptr;
+ X = Checker.getX();
+ E = Checker.getE();
+ D = Checker.getD();
+ CE = Checker.getCond();
+ V = Checker.getV();
+ R = Checker.getR();
+ // We reuse IsXLHSInRHSPart to tell if it is in the form 'x ordop expr'.
+ IsXLHSInRHSPart = Checker.isXBinopExpr();
+ IsFailOnly = Checker.isFailOnly();
+ IsPostfixUpdate = Checker.isPostfixUpdate();
+ } else {
+ OpenMPAtomicCompareChecker::ErrorInfoTy ErrorInfo;
+ OpenMPAtomicCompareChecker Checker(*this);
+ if (!Checker.checkStmt(Body, ErrorInfo)) {
+ Diag(ErrorInfo.ErrorLoc, diag::err_omp_atomic_compare)
+ << ErrorInfo.ErrorRange;
+ Diag(ErrorInfo.NoteLoc, diag::note_omp_atomic_compare)
+ << ErrorInfo.Error << ErrorInfo.NoteRange;
+ return StmtError();
+ }
+ X = Checker.getX();
+ E = Checker.getE();
+ D = Checker.getD();
+ CE = Checker.getCond();
+ // We reuse IsXLHSInRHSPart to tell if it is in the form 'x ordop expr'.
+ IsXLHSInRHSPart = Checker.isXBinopExpr();
}
}
setFunctionHasBranchProtectedScope();
- return OMPAtomicDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt,
- X, V, E, UE, IsXLHSInRHSPart,
- IsPostfixUpdate);
+ return OMPAtomicDirective::Create(
+ Context, StartLoc, EndLoc, Clauses, AStmt,
+ {X, V, R, E, UE, D, CE, IsXLHSInRHSPart, IsPostfixUpdate, IsFailOnly});
}
StmtResult Sema::ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
@@ -11289,6 +13378,26 @@ static bool hasClauses(ArrayRef<OMPClause *> Clauses, const OpenMPClauseKind K,
return hasClauses(Clauses, K) || hasClauses(Clauses, ClauseTypes...);
}
+/// Check if the variables in the mapping clause are externally visible.
+static bool isClauseMappable(ArrayRef<OMPClause *> Clauses) {
+ for (const OMPClause *C : Clauses) {
+ if (auto *TC = dyn_cast<OMPToClause>(C))
+ return llvm::all_of(TC->all_decls(), [](ValueDecl *VD) {
+ return !VD || !VD->hasAttr<OMPDeclareTargetDeclAttr>() ||
+ (VD->isExternallyVisible() &&
+ VD->getVisibility() != HiddenVisibility);
+ });
+ else if (auto *FC = dyn_cast<OMPFromClause>(C))
+ return llvm::all_of(FC->all_decls(), [](ValueDecl *VD) {
+ return !VD || !VD->hasAttr<OMPDeclareTargetDeclAttr>() ||
+ (VD->isExternallyVisible() &&
+ VD->getVisibility() != HiddenVisibility);
+ });
+ }
+
+ return true;
+}
+
StmtResult Sema::ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
@@ -11422,6 +13531,12 @@ StmtResult Sema::ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
Diag(StartLoc, diag::err_omp_at_least_one_motion_clause_required);
return StmtError();
}
+
+ if (!isClauseMappable(Clauses)) {
+ Diag(StartLoc, diag::err_omp_cannot_update_with_internal_linkage);
+ return StmtError();
+ }
+
return OMPTargetUpdateDirective::Create(Context, StartLoc, EndLoc, Clauses,
AStmt);
}
@@ -11432,6 +13547,10 @@ StmtResult Sema::ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
if (!AStmt)
return StmtError();
+ // Report affected OpenMP target offloading behavior when in HIP lang-mode.
+ if (getLangOpts().HIP && (DSAStack->getParentDirective() == OMPD_target))
+ Diag(StartLoc, diag::warn_hip_omp_target_directives);
+
auto *CS = cast<CapturedStmt>(AStmt);
// 1.2.2 OpenMP Language Terminology
// Structured block - An executable statement with a single entry at the
@@ -11633,6 +13752,44 @@ StmtResult Sema::ActOnOpenMPMasterTaskLoopDirective(
DSAStack->isCancelRegion());
}
+StmtResult Sema::ActOnOpenMPMaskedTaskLoopDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
+ if (!AStmt)
+ return StmtError();
+
+ assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
+ OMPLoopBasedDirective::HelperExprs B;
+ // In presence of clause 'collapse' or 'ordered' with number of loops, it will
+ // define the nested loops number.
+ unsigned NestedLoopCount =
+ checkOpenMPLoop(OMPD_masked_taskloop, getCollapseNumberExpr(Clauses),
+ /*OrderedLoopCountExpr=*/nullptr, AStmt, *this, *DSAStack,
+ VarsWithImplicitDSA, B);
+ if (NestedLoopCount == 0)
+ return StmtError();
+
+ assert((CurContext->isDependentContext() || B.builtAll()) &&
+ "omp for loop exprs were not built");
+
+ // OpenMP, [2.9.2 taskloop Construct, Restrictions]
+ // The grainsize clause and num_tasks clause are mutually exclusive and may
+ // not appear on the same taskloop directive.
+ if (checkMutuallyExclusiveClauses(*this, Clauses,
+ {OMPC_grainsize, OMPC_num_tasks}))
+ return StmtError();
+ // OpenMP, [2.9.2 taskloop Construct, Restrictions]
+ // If a reduction clause is present on the taskloop directive, the nogroup
+ // clause must not be specified.
+ if (checkReductionClauseWithNogroup(*this, Clauses))
+ return StmtError();
+
+ setFunctionHasBranchProtectedScope();
+ return OMPMaskedTaskLoopDirective::Create(Context, StartLoc, EndLoc,
+ NestedLoopCount, Clauses, AStmt, B,
+ DSAStack->isCancelRegion());
+}
+
StmtResult Sema::ActOnOpenMPMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
@@ -11683,6 +13840,56 @@ StmtResult Sema::ActOnOpenMPMasterTaskLoopSimdDirective(
Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
}
+StmtResult Sema::ActOnOpenMPMaskedTaskLoopSimdDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
+ if (!AStmt)
+ return StmtError();
+
+ assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
+ OMPLoopBasedDirective::HelperExprs B;
+ // In presence of clause 'collapse' or 'ordered' with number of loops, it will
+ // define the nested loops number.
+ unsigned NestedLoopCount =
+ checkOpenMPLoop(OMPD_masked_taskloop_simd, getCollapseNumberExpr(Clauses),
+ /*OrderedLoopCountExpr=*/nullptr, AStmt, *this, *DSAStack,
+ VarsWithImplicitDSA, B);
+ if (NestedLoopCount == 0)
+ return StmtError();
+
+ assert((CurContext->isDependentContext() || B.builtAll()) &&
+ "omp for loop exprs were not built");
+
+ if (!CurContext->isDependentContext()) {
+ // Finalize the clauses that need pre-built expressions for CodeGen.
+ for (OMPClause *C : Clauses) {
+ if (auto *LC = dyn_cast<OMPLinearClause>(C))
+ if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
+ B.NumIterations, *this, CurScope,
+ DSAStack))
+ return StmtError();
+ }
+ }
+
+ // OpenMP, [2.9.2 taskloop Construct, Restrictions]
+ // The grainsize clause and num_tasks clause are mutually exclusive and may
+ // not appear on the same taskloop directive.
+ if (checkMutuallyExclusiveClauses(*this, Clauses,
+ {OMPC_grainsize, OMPC_num_tasks}))
+ return StmtError();
+ // OpenMP, [2.9.2 taskloop Construct, Restrictions]
+ // If a reduction clause is present on the taskloop directive, the nogroup
+ // clause must not be specified.
+ if (checkReductionClauseWithNogroup(*this, Clauses))
+ return StmtError();
+ if (checkSimdlenSafelenSpecified(*this, Clauses))
+ return StmtError();
+
+ setFunctionHasBranchProtectedScope();
+ return OMPMaskedTaskLoopSimdDirective::Create(
+ Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
+}
+
StmtResult Sema::ActOnOpenMPParallelMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
@@ -11740,6 +13947,63 @@ StmtResult Sema::ActOnOpenMPParallelMasterTaskLoopDirective(
DSAStack->isCancelRegion());
}
+StmtResult Sema::ActOnOpenMPParallelMaskedTaskLoopDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
+ if (!AStmt)
+ return StmtError();
+
+ assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
+ auto *CS = cast<CapturedStmt>(AStmt);
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+ for (int ThisCaptureLevel =
+ getOpenMPCaptureLevels(OMPD_parallel_masked_taskloop);
+ ThisCaptureLevel > 1; --ThisCaptureLevel) {
+ CS = cast<CapturedStmt>(CS->getCapturedStmt());
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+ }
+
+ OMPLoopBasedDirective::HelperExprs B;
+ // In presence of clause 'collapse' or 'ordered' with number of loops, it will
+ // define the nested loops number.
+ unsigned NestedLoopCount = checkOpenMPLoop(
+ OMPD_parallel_masked_taskloop, getCollapseNumberExpr(Clauses),
+ /*OrderedLoopCountExpr=*/nullptr, CS, *this, *DSAStack,
+ VarsWithImplicitDSA, B);
+ if (NestedLoopCount == 0)
+ return StmtError();
+
+ assert((CurContext->isDependentContext() || B.builtAll()) &&
+ "omp for loop exprs were not built");
+
+ // OpenMP, [2.9.2 taskloop Construct, Restrictions]
+ // The grainsize clause and num_tasks clause are mutually exclusive and may
+ // not appear on the same taskloop directive.
+ if (checkMutuallyExclusiveClauses(*this, Clauses,
+ {OMPC_grainsize, OMPC_num_tasks}))
+ return StmtError();
+ // OpenMP, [2.9.2 taskloop Construct, Restrictions]
+ // If a reduction clause is present on the taskloop directive, the nogroup
+ // clause must not be specified.
+ if (checkReductionClauseWithNogroup(*this, Clauses))
+ return StmtError();
+
+ setFunctionHasBranchProtectedScope();
+ return OMPParallelMaskedTaskLoopDirective::Create(
+ Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B,
+ DSAStack->isCancelRegion());
+}
+
StmtResult Sema::ActOnOpenMPParallelMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
@@ -11809,12 +14073,84 @@ StmtResult Sema::ActOnOpenMPParallelMasterTaskLoopSimdDirective(
Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
}
+StmtResult Sema::ActOnOpenMPParallelMaskedTaskLoopSimdDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
+ if (!AStmt)
+ return StmtError();
+
+ assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
+ auto *CS = cast<CapturedStmt>(AStmt);
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+ for (int ThisCaptureLevel =
+ getOpenMPCaptureLevels(OMPD_parallel_masked_taskloop_simd);
+ ThisCaptureLevel > 1; --ThisCaptureLevel) {
+ CS = cast<CapturedStmt>(CS->getCapturedStmt());
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+ }
+
+ OMPLoopBasedDirective::HelperExprs B;
+ // In presence of clause 'collapse' or 'ordered' with number of loops, it will
+ // define the nested loops number.
+ unsigned NestedLoopCount = checkOpenMPLoop(
+ OMPD_parallel_masked_taskloop_simd, getCollapseNumberExpr(Clauses),
+ /*OrderedLoopCountExpr=*/nullptr, CS, *this, *DSAStack,
+ VarsWithImplicitDSA, B);
+ if (NestedLoopCount == 0)
+ return StmtError();
+
+ assert((CurContext->isDependentContext() || B.builtAll()) &&
+ "omp for loop exprs were not built");
+
+ if (!CurContext->isDependentContext()) {
+ // Finalize the clauses that need pre-built expressions for CodeGen.
+ for (OMPClause *C : Clauses) {
+ if (auto *LC = dyn_cast<OMPLinearClause>(C))
+ if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
+ B.NumIterations, *this, CurScope,
+ DSAStack))
+ return StmtError();
+ }
+ }
+
+ // OpenMP, [2.9.2 taskloop Construct, Restrictions]
+ // The grainsize clause and num_tasks clause are mutually exclusive and may
+ // not appear on the same taskloop directive.
+ if (checkMutuallyExclusiveClauses(*this, Clauses,
+ {OMPC_grainsize, OMPC_num_tasks}))
+ return StmtError();
+ // OpenMP, [2.9.2 taskloop Construct, Restrictions]
+ // If a reduction clause is present on the taskloop directive, the nogroup
+ // clause must not be specified.
+ if (checkReductionClauseWithNogroup(*this, Clauses))
+ return StmtError();
+ if (checkSimdlenSafelenSpecified(*this, Clauses))
+ return StmtError();
+
+ setFunctionHasBranchProtectedScope();
+ return OMPParallelMaskedTaskLoopSimdDirective::Create(
+ Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
+}
+
StmtResult Sema::ActOnOpenMPDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
if (!AStmt)
return StmtError();
+ if (!checkLastPrivateForMappedDirectives(Clauses))
+ return StmtError();
+
assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
OMPLoopBasedDirective::HelperExprs B;
// In presence of clause 'collapse' with number of loops, it will
@@ -11830,8 +14166,10 @@ StmtResult Sema::ActOnOpenMPDistributeDirective(
"omp for loop exprs were not built");
setFunctionHasBranchProtectedScope();
- return OMPDistributeDirective::Create(Context, StartLoc, EndLoc,
- NestedLoopCount, Clauses, AStmt, B);
+ auto *DistributeDirective = OMPDistributeDirective::Create(
+ Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B,
+ DSAStack->getMappedDirective());
+ return DistributeDirective;
}
StmtResult Sema::ActOnOpenMPDistributeParallelForDirective(
@@ -12020,8 +14358,8 @@ StmtResult Sema::ActOnOpenMPTargetParallelForSimdDirective(
// define the nested loops number.
unsigned NestedLoopCount = checkOpenMPLoop(
OMPD_target_parallel_for_simd, getCollapseNumberExpr(Clauses),
- getOrderedNumberExpr(Clauses), CS, *this, *DSAStack,
- VarsWithImplicitDSA, B);
+ getOrderedNumberExpr(Clauses), CS, *this, *DSAStack, VarsWithImplicitDSA,
+ B);
if (NestedLoopCount == 0)
return StmtError();
@@ -12346,6 +14684,19 @@ StmtResult Sema::ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
}
setFunctionHasBranchProtectedScope();
+ const OMPClause *BareClause = nullptr;
+ bool HasThreadLimitAndNumTeamsClause = hasClauses(Clauses, OMPC_num_teams) &&
+ hasClauses(Clauses, OMPC_thread_limit);
+ bool HasBareClause = llvm::any_of(Clauses, [&](const OMPClause *C) {
+ BareClause = C;
+ return C->getClauseKind() == OMPC_ompx_bare;
+ });
+
+ if (HasBareClause && !HasThreadLimitAndNumTeamsClause) {
+ Diag(BareClause->getBeginLoc(), diag::err_ompx_bare_no_grid);
+ return StmtError();
+ }
+
return OMPTargetTeamsDirective::Create(Context, StartLoc, EndLoc, Clauses,
AStmt);
}
@@ -12605,8 +14956,8 @@ bool Sema::checkTransformableLoopNest(
llvm_unreachable("Unhandled loop transformation");
if (!DependentPreInits)
return;
- for (Decl *C : cast<DeclStmt>(DependentPreInits)->getDeclGroup())
- OriginalInits.back().push_back(C);
+ llvm::append_range(OriginalInits.back(),
+ cast<DeclStmt>(DependentPreInits)->getDeclGroup());
});
assert(OriginalInits.back().empty() && "No preinit after innermost loop");
OriginalInits.pop_back();
@@ -12768,8 +15119,8 @@ StmtResult Sema::ActOnOpenMPTileDirective(ArrayRef<OMPClause *> Clauses,
SmallVector<Stmt *, 4> BodyParts;
BodyParts.append(LoopHelper.Updates.begin(), LoopHelper.Updates.end());
BodyParts.push_back(Inner);
- Inner = CompoundStmt::Create(Context, BodyParts, Inner->getBeginLoc(),
- Inner->getEndLoc());
+ Inner = CompoundStmt::Create(Context, BodyParts, FPOptionsOverride(),
+ Inner->getBeginLoc(), Inner->getEndLoc());
Inner = new (Context)
ForStmt(Context, InitStmt.get(), CondExpr.get(), nullptr,
IncrStmt.get(), Inner, LoopHelper.Init->getBeginLoc(),
@@ -12852,17 +15203,19 @@ StmtResult Sema::ActOnOpenMPUnrollDirective(ArrayRef<OMPClause *> Clauses,
Body, OriginalInits))
return StmtError();
+ unsigned NumGeneratedLoops = PartialClause ? 1 : 0;
+
// Delay unrolling to when template is completely instantiated.
if (CurContext->isDependentContext())
return OMPUnrollDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt,
- nullptr, nullptr);
+ NumGeneratedLoops, nullptr, nullptr);
OMPLoopBasedDirective::HelperExprs &LoopHelper = LoopHelpers.front();
if (FullClause) {
if (!VerifyPositiveIntegerConstantInClause(
LoopHelper.NumIterations, OMPC_full, /*StrictlyPositive=*/false,
- /*SuppressExprDigs=*/true)
+ /*SuppressExprDiags=*/true)
.isUsable()) {
Diag(AStmt->getBeginLoc(), diag::err_omp_unroll_full_variable_trip_count);
Diag(FullClause->getBeginLoc(), diag::note_omp_directive_here)
@@ -12874,9 +15227,9 @@ StmtResult Sema::ActOnOpenMPUnrollDirective(ArrayRef<OMPClause *> Clauses,
// The generated loop may only be passed to other loop-associated directive
// when a partial clause is specified. Without the requirement it is
// sufficient to generate loop unroll metadata at code-generation.
- if (!PartialClause)
+ if (NumGeneratedLoops == 0)
return OMPUnrollDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt,
- nullptr, nullptr);
+ NumGeneratedLoops, nullptr, nullptr);
// Otherwise, we need to provide a de-sugared/transformed AST that can be
// associated with another loop directive.
@@ -12949,8 +15302,7 @@ StmtResult Sema::ActOnOpenMPUnrollDirective(ArrayRef<OMPClause *> Clauses,
uint64_t Factor;
SourceLocation FactorLoc;
if (Expr *FactorVal = PartialClause->getFactor()) {
- Factor =
- FactorVal->getIntegerConstantExpr(Context).getValue().getZExtValue();
+ Factor = FactorVal->getIntegerConstantExpr(Context)->getZExtValue();
FactorLoc = FactorVal->getExprLoc();
} else {
// TODO: Use a better profitability model.
@@ -13018,11 +15370,11 @@ StmtResult Sema::ActOnOpenMPUnrollDirective(ArrayRef<OMPClause *> Clauses,
if (!EndOfTile.isUsable())
return StmtError();
ExprResult InnerCond1 = BuildBinOp(CurScope, LoopHelper.Cond->getExprLoc(),
- BO_LE, MakeInnerRef(), EndOfTile.get());
+ BO_LT, MakeInnerRef(), EndOfTile.get());
if (!InnerCond1.isUsable())
return StmtError();
ExprResult InnerCond2 =
- BuildBinOp(CurScope, LoopHelper.Cond->getExprLoc(), BO_LE, MakeInnerRef(),
+ BuildBinOp(CurScope, LoopHelper.Cond->getExprLoc(), BO_LT, MakeInnerRef(),
MakeNumIterations());
if (!InnerCond2.isUsable())
return StmtError();
@@ -13042,8 +15394,9 @@ StmtResult Sema::ActOnOpenMPUnrollDirective(ArrayRef<OMPClause *> Clauses,
SmallVector<Stmt *> InnerBodyStmts;
InnerBodyStmts.append(LoopHelper.Updates.begin(), LoopHelper.Updates.end());
InnerBodyStmts.push_back(Body);
- CompoundStmt *InnerBody = CompoundStmt::Create(
- Context, InnerBodyStmts, Body->getBeginLoc(), Body->getEndLoc());
+ CompoundStmt *InnerBody =
+ CompoundStmt::Create(Context, InnerBodyStmts, FPOptionsOverride(),
+ Body->getBeginLoc(), Body->getEndLoc());
ForStmt *InnerFor = new (Context)
ForStmt(Context, InnerInit.get(), InnerCond.get(), nullptr,
InnerIncr.get(), InnerBody, LoopHelper.Init->getBeginLoc(),
@@ -13097,7 +15450,8 @@ StmtResult Sema::ActOnOpenMPUnrollDirective(ArrayRef<OMPClause *> Clauses,
LoopHelper.Init->getBeginLoc(), LoopHelper.Inc->getEndLoc());
return OMPUnrollDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt,
- OuterFor, buildPreInits(Context, PreInits));
+ NumGeneratedLoops, OuterFor,
+ buildPreInits(Context, PreInits));
}
OMPClause *Sema::ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr,
@@ -13136,12 +15490,6 @@ OMPClause *Sema::ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr,
case OMPC_priority:
Res = ActOnOpenMPPriorityClause(Expr, StartLoc, LParenLoc, EndLoc);
break;
- case OMPC_grainsize:
- Res = ActOnOpenMPGrainsizeClause(Expr, StartLoc, LParenLoc, EndLoc);
- break;
- case OMPC_num_tasks:
- Res = ActOnOpenMPNumTasksClause(Expr, StartLoc, LParenLoc, EndLoc);
- break;
case OMPC_hint:
Res = ActOnOpenMPHintClause(Expr, StartLoc, LParenLoc, EndLoc);
break;
@@ -13163,6 +15511,17 @@ OMPClause *Sema::ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr,
case OMPC_partial:
Res = ActOnOpenMPPartialClause(Expr, StartLoc, LParenLoc, EndLoc);
break;
+ case OMPC_message:
+ Res = ActOnOpenMPMessageClause(Expr, StartLoc, LParenLoc, EndLoc);
+ break;
+ case OMPC_align:
+ Res = ActOnOpenMPAlignClause(Expr, StartLoc, LParenLoc, EndLoc);
+ break;
+ case OMPC_ompx_dyn_cgroup_mem:
+ Res = ActOnOpenMPXDynCGroupMemClause(Expr, StartLoc, LParenLoc, EndLoc);
+ break;
+ case OMPC_grainsize:
+ case OMPC_num_tasks:
case OMPC_device:
case OMPC_if:
case OMPC_default:
@@ -13190,6 +15549,7 @@ OMPClause *Sema::ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr,
case OMPC_write:
case OMPC_update:
case OMPC_capture:
+ case OMPC_compare:
case OMPC_seq_cst:
case OMPC_acq_rel:
case OMPC_acquire:
@@ -13218,11 +15578,15 @@ OMPClause *Sema::ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr,
case OMPC_match:
case OMPC_nontemporal:
case OMPC_order:
+ case OMPC_at:
+ case OMPC_severity:
case OMPC_destroy:
case OMPC_inclusive:
case OMPC_exclusive:
case OMPC_uses_allocators:
case OMPC_affinity:
+ case OMPC_when:
+ case OMPC_bind:
default:
llvm_unreachable("Clause is not allowed.");
}
@@ -13247,9 +15611,10 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
CaptureRegion = OMPD_parallel;
break;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case OMPD_target_parallel:
case OMPD_target_parallel_for:
+ case OMPD_target_parallel_loop:
// If this clause applies to the nested 'parallel' region, capture within
// the 'target' region, otherwise do not capture.
if (NameModifier == OMPD_unknown || NameModifier == OMPD_parallel)
@@ -13261,7 +15626,8 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
CaptureRegion = OMPD_parallel;
break;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
+ case OMPD_target_teams_loop:
case OMPD_target_teams_distribute_parallel_for:
// If this clause applies to the nested 'parallel' region, capture within
// the 'teams' region, otherwise do not capture.
@@ -13274,7 +15640,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
CaptureRegion = OMPD_parallel;
break;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case OMPD_teams_distribute_parallel_for:
CaptureRegion = OMPD_teams;
break;
@@ -13283,10 +15649,25 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_target_exit_data:
CaptureRegion = OMPD_task;
break;
+ case OMPD_parallel_masked_taskloop:
+ if (NameModifier == OMPD_unknown || NameModifier == OMPD_taskloop)
+ CaptureRegion = OMPD_parallel;
+ break;
case OMPD_parallel_master_taskloop:
if (NameModifier == OMPD_unknown || NameModifier == OMPD_taskloop)
CaptureRegion = OMPD_parallel;
break;
+ case OMPD_parallel_masked_taskloop_simd:
+ if ((OpenMPVersion <= 45 && NameModifier == OMPD_unknown) ||
+ NameModifier == OMPD_taskloop) {
+ CaptureRegion = OMPD_parallel;
+ break;
+ }
+ if (OpenMPVersion <= 45)
+ break;
+ if (NameModifier == OMPD_unknown || NameModifier == OMPD_simd)
+ CaptureRegion = OMPD_taskloop;
+ break;
case OMPD_parallel_master_taskloop_simd:
if ((OpenMPVersion <= 45 && NameModifier == OMPD_unknown) ||
NameModifier == OMPD_taskloop) {
@@ -13306,6 +15687,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
break;
case OMPD_taskloop_simd:
case OMPD_master_taskloop_simd:
+ case OMPD_masked_taskloop_simd:
if (OpenMPVersion <= 45)
break;
if (NameModifier == OMPD_unknown || NameModifier == OMPD_simd)
@@ -13331,8 +15713,10 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_cancel:
case OMPD_parallel:
case OMPD_parallel_master:
+ case OMPD_parallel_masked:
case OMPD_parallel_sections:
case OMPD_parallel_for:
+ case OMPD_parallel_loop:
case OMPD_target:
case OMPD_target_teams:
case OMPD_target_teams_distribute:
@@ -13340,6 +15724,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_task:
case OMPD_taskloop:
case OMPD_master_taskloop:
+ case OMPD_masked_taskloop:
case OMPD_target_data:
case OMPD_simd:
case OMPD_for_simd:
@@ -13349,6 +15734,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_threadprivate:
case OMPD_allocate:
case OMPD_taskyield:
+ case OMPD_error:
case OMPD_barrier:
case OMPD_taskwait:
case OMPD_cancellation_point:
@@ -13363,6 +15749,8 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_end_declare_variant:
case OMPD_declare_target:
case OMPD_end_declare_target:
+ case OMPD_loop:
+ case OMPD_teams_loop:
case OMPD_teams:
case OMPD_tile:
case OMPD_unroll:
@@ -13379,6 +15767,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_atomic:
case OMPD_teams_distribute:
case OMPD_requires:
+ case OMPD_metadirective:
llvm_unreachable("Unexpected OpenMP directive with if-clause");
case OMPD_unknown:
default:
@@ -13390,6 +15779,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_target_parallel:
case OMPD_target_parallel_for:
case OMPD_target_parallel_for_simd:
+ case OMPD_target_parallel_loop:
CaptureRegion = OMPD_target;
break;
case OMPD_teams_distribute_parallel_for:
@@ -13400,13 +15790,17 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
break;
case OMPD_parallel:
case OMPD_parallel_master:
+ case OMPD_parallel_masked:
case OMPD_parallel_sections:
case OMPD_parallel_for:
case OMPD_parallel_for_simd:
+ case OMPD_parallel_loop:
case OMPD_distribute_parallel_for:
case OMPD_distribute_parallel_for_simd:
case OMPD_parallel_master_taskloop:
+ case OMPD_parallel_masked_taskloop:
case OMPD_parallel_master_taskloop_simd:
+ case OMPD_parallel_masked_taskloop_simd:
// Do not capture num_threads-clause expressions.
break;
case OMPD_target_data:
@@ -13423,10 +15817,13 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_taskloop:
case OMPD_taskloop_simd:
case OMPD_master_taskloop:
+ case OMPD_masked_taskloop:
case OMPD_master_taskloop_simd:
+ case OMPD_masked_taskloop_simd:
case OMPD_threadprivate:
case OMPD_allocate:
case OMPD_taskyield:
+ case OMPD_error:
case OMPD_barrier:
case OMPD_taskwait:
case OMPD_cancellation_point:
@@ -13441,6 +15838,9 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_end_declare_variant:
case OMPD_declare_target:
case OMPD_end_declare_target:
+ case OMPD_loop:
+ case OMPD_teams_loop:
+ case OMPD_target_teams_loop:
case OMPD_teams:
case OMPD_simd:
case OMPD_tile:
@@ -13461,6 +15861,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_teams_distribute:
case OMPD_teams_distribute_simd:
case OMPD_requires:
+ case OMPD_metadirective:
llvm_unreachable("Unexpected OpenMP directive with num_threads-clause");
case OMPD_unknown:
default:
@@ -13474,6 +15875,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_target_teams_distribute_simd:
case OMPD_target_teams_distribute_parallel_for:
case OMPD_target_teams_distribute_parallel_for_simd:
+ case OMPD_target_teams_loop:
CaptureRegion = OMPD_target;
break;
case OMPD_teams_distribute_parallel_for:
@@ -13481,6 +15883,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_teams:
case OMPD_teams_distribute:
case OMPD_teams_distribute_simd:
+ case OMPD_teams_loop:
// Do not capture num_teams-clause expressions.
break;
case OMPD_distribute_parallel_for:
@@ -13489,9 +15892,13 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_taskloop:
case OMPD_taskloop_simd:
case OMPD_master_taskloop:
+ case OMPD_masked_taskloop:
case OMPD_master_taskloop_simd:
+ case OMPD_masked_taskloop_simd:
case OMPD_parallel_master_taskloop:
+ case OMPD_parallel_masked_taskloop:
case OMPD_parallel_master_taskloop_simd:
+ case OMPD_parallel_masked_taskloop_simd:
case OMPD_target_data:
case OMPD_target_enter_data:
case OMPD_target_exit_data:
@@ -13499,17 +15906,21 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_cancel:
case OMPD_parallel:
case OMPD_parallel_master:
+ case OMPD_parallel_masked:
case OMPD_parallel_sections:
case OMPD_parallel_for:
case OMPD_parallel_for_simd:
+ case OMPD_parallel_loop:
case OMPD_target:
case OMPD_target_simd:
case OMPD_target_parallel:
case OMPD_target_parallel_for:
case OMPD_target_parallel_for_simd:
+ case OMPD_target_parallel_loop:
case OMPD_threadprivate:
case OMPD_allocate:
case OMPD_taskyield:
+ case OMPD_error:
case OMPD_barrier:
case OMPD_taskwait:
case OMPD_cancellation_point:
@@ -13524,6 +15935,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_end_declare_variant:
case OMPD_declare_target:
case OMPD_end_declare_target:
+ case OMPD_loop:
case OMPD_simd:
case OMPD_tile:
case OMPD_unroll:
@@ -13541,6 +15953,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_atomic:
case OMPD_distribute_simd:
case OMPD_requires:
+ case OMPD_metadirective:
llvm_unreachable("Unexpected OpenMP directive with num_teams-clause");
case OMPD_unknown:
default:
@@ -13549,11 +15962,18 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
break;
case OMPC_thread_limit:
switch (DKind) {
+ case OMPD_target:
case OMPD_target_teams:
case OMPD_target_teams_distribute:
case OMPD_target_teams_distribute_simd:
case OMPD_target_teams_distribute_parallel_for:
case OMPD_target_teams_distribute_parallel_for_simd:
+ case OMPD_target_teams_loop:
+ case OMPD_target_simd:
+ case OMPD_target_parallel:
+ case OMPD_target_parallel_for:
+ case OMPD_target_parallel_for_simd:
+ case OMPD_target_parallel_loop:
CaptureRegion = OMPD_target;
break;
case OMPD_teams_distribute_parallel_for:
@@ -13561,6 +15981,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_teams:
case OMPD_teams_distribute:
case OMPD_teams_distribute_simd:
+ case OMPD_teams_loop:
// Do not capture thread_limit-clause expressions.
break;
case OMPD_distribute_parallel_for:
@@ -13569,9 +15990,13 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_taskloop:
case OMPD_taskloop_simd:
case OMPD_master_taskloop:
+ case OMPD_masked_taskloop:
case OMPD_master_taskloop_simd:
+ case OMPD_masked_taskloop_simd:
case OMPD_parallel_master_taskloop:
+ case OMPD_parallel_masked_taskloop:
case OMPD_parallel_master_taskloop_simd:
+ case OMPD_parallel_masked_taskloop_simd:
case OMPD_target_data:
case OMPD_target_enter_data:
case OMPD_target_exit_data:
@@ -13579,17 +16004,15 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_cancel:
case OMPD_parallel:
case OMPD_parallel_master:
+ case OMPD_parallel_masked:
case OMPD_parallel_sections:
case OMPD_parallel_for:
case OMPD_parallel_for_simd:
- case OMPD_target:
- case OMPD_target_simd:
- case OMPD_target_parallel:
- case OMPD_target_parallel_for:
- case OMPD_target_parallel_for_simd:
+ case OMPD_parallel_loop:
case OMPD_threadprivate:
case OMPD_allocate:
case OMPD_taskyield:
+ case OMPD_error:
case OMPD_barrier:
case OMPD_taskwait:
case OMPD_cancellation_point:
@@ -13604,6 +16027,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_end_declare_variant:
case OMPD_declare_target:
case OMPD_end_declare_target:
+ case OMPD_loop:
case OMPD_simd:
case OMPD_tile:
case OMPD_unroll:
@@ -13621,6 +16045,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_atomic:
case OMPD_distribute_simd:
case OMPD_requires:
+ case OMPD_metadirective:
llvm_unreachable("Unexpected OpenMP directive with thread_limit-clause");
case OMPD_unknown:
default:
@@ -13649,9 +16074,13 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_taskloop:
case OMPD_taskloop_simd:
case OMPD_master_taskloop:
+ case OMPD_masked_taskloop:
case OMPD_master_taskloop_simd:
+ case OMPD_masked_taskloop_simd:
case OMPD_parallel_master_taskloop:
+ case OMPD_parallel_masked_taskloop:
case OMPD_parallel_master_taskloop_simd:
+ case OMPD_parallel_masked_taskloop_simd:
case OMPD_target_data:
case OMPD_target_enter_data:
case OMPD_target_exit_data:
@@ -13667,10 +16096,12 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_cancel:
case OMPD_parallel:
case OMPD_parallel_master:
+ case OMPD_parallel_masked:
case OMPD_parallel_sections:
case OMPD_threadprivate:
case OMPD_allocate:
case OMPD_taskyield:
+ case OMPD_error:
case OMPD_barrier:
case OMPD_taskwait:
case OMPD_cancellation_point:
@@ -13685,6 +16116,11 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_end_declare_variant:
case OMPD_declare_target:
case OMPD_end_declare_target:
+ case OMPD_loop:
+ case OMPD_teams_loop:
+ case OMPD_target_teams_loop:
+ case OMPD_parallel_loop:
+ case OMPD_target_parallel_loop:
case OMPD_simd:
case OMPD_tile:
case OMPD_unroll:
@@ -13701,6 +16137,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_distribute_simd:
case OMPD_target_teams:
case OMPD_requires:
+ case OMPD_metadirective:
llvm_unreachable("Unexpected OpenMP directive with schedule clause");
case OMPD_unknown:
default:
@@ -13733,9 +16170,13 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_taskloop:
case OMPD_taskloop_simd:
case OMPD_master_taskloop:
+ case OMPD_masked_taskloop:
case OMPD_master_taskloop_simd:
+ case OMPD_masked_taskloop_simd:
case OMPD_parallel_master_taskloop:
+ case OMPD_parallel_masked_taskloop:
case OMPD_parallel_master_taskloop_simd:
+ case OMPD_parallel_masked_taskloop_simd:
case OMPD_target_data:
case OMPD_target_enter_data:
case OMPD_target_exit_data:
@@ -13747,10 +16188,12 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_cancel:
case OMPD_parallel:
case OMPD_parallel_master:
+ case OMPD_parallel_masked:
case OMPD_parallel_sections:
case OMPD_threadprivate:
case OMPD_allocate:
case OMPD_taskyield:
+ case OMPD_error:
case OMPD_barrier:
case OMPD_taskwait:
case OMPD_cancellation_point:
@@ -13765,6 +16208,11 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_end_declare_variant:
case OMPD_declare_target:
case OMPD_end_declare_target:
+ case OMPD_loop:
+ case OMPD_teams_loop:
+ case OMPD_target_teams_loop:
+ case OMPD_parallel_loop:
+ case OMPD_target_parallel_loop:
case OMPD_simd:
case OMPD_tile:
case OMPD_unroll:
@@ -13781,12 +16229,33 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_atomic:
case OMPD_target_teams:
case OMPD_requires:
+ case OMPD_metadirective:
llvm_unreachable("Unexpected OpenMP directive with dist_schedule clause");
case OMPD_unknown:
default:
llvm_unreachable("Unknown OpenMP directive");
}
break;
+ case OMPC_ompx_dyn_cgroup_mem:
+ switch (DKind) {
+ case OMPD_target:
+ case OMPD_target_simd:
+ case OMPD_target_teams:
+ case OMPD_target_parallel:
+ case OMPD_target_teams_distribute:
+ case OMPD_target_teams_distribute_simd:
+ case OMPD_target_parallel_for:
+ case OMPD_target_parallel_for_simd:
+ case OMPD_target_parallel_loop:
+ case OMPD_target_teams_distribute_parallel_for:
+ case OMPD_target_teams_distribute_parallel_for_simd:
+ case OMPD_target_teams_loop:
+ CaptureRegion = OMPD_target;
+ break;
+ default:
+ llvm_unreachable("Unknown OpenMP directive");
+ }
+ break;
case OMPC_device:
switch (DKind) {
case OMPD_target_update:
@@ -13800,8 +16269,10 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_target_teams_distribute_simd:
case OMPD_target_parallel_for:
case OMPD_target_parallel_for_simd:
+ case OMPD_target_parallel_loop:
case OMPD_target_teams_distribute_parallel_for:
case OMPD_target_teams_distribute_parallel_for_simd:
+ case OMPD_target_teams_loop:
case OMPD_dispatch:
CaptureRegion = OMPD_task;
break;
@@ -13820,18 +16291,24 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_taskloop:
case OMPD_taskloop_simd:
case OMPD_master_taskloop:
+ case OMPD_masked_taskloop:
case OMPD_master_taskloop_simd:
+ case OMPD_masked_taskloop_simd:
case OMPD_parallel_master_taskloop:
+ case OMPD_parallel_masked_taskloop:
case OMPD_parallel_master_taskloop_simd:
+ case OMPD_parallel_masked_taskloop_simd:
case OMPD_cancel:
case OMPD_parallel:
case OMPD_parallel_master:
+ case OMPD_parallel_masked:
case OMPD_parallel_sections:
case OMPD_parallel_for:
case OMPD_parallel_for_simd:
case OMPD_threadprivate:
case OMPD_allocate:
case OMPD_taskyield:
+ case OMPD_error:
case OMPD_barrier:
case OMPD_taskwait:
case OMPD_cancellation_point:
@@ -13846,6 +16323,9 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_end_declare_variant:
case OMPD_declare_target:
case OMPD_end_declare_target:
+ case OMPD_loop:
+ case OMPD_teams_loop:
+ case OMPD_parallel_loop:
case OMPD_simd:
case OMPD_tile:
case OMPD_unroll:
@@ -13863,6 +16343,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_atomic:
case OMPD_distribute_simd:
case OMPD_requires:
+ case OMPD_metadirective:
llvm_unreachable("Unexpected OpenMP directive with device-clause");
case OMPD_unknown:
default:
@@ -13878,8 +16359,12 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_taskloop:
case OMPD_taskloop_simd:
case OMPD_master_taskloop:
+ case OMPD_masked_taskloop:
case OMPD_master_taskloop_simd:
+ case OMPD_masked_taskloop_simd:
break;
+ case OMPD_parallel_masked_taskloop:
+ case OMPD_parallel_masked_taskloop_simd:
case OMPD_parallel_master_taskloop:
case OMPD_parallel_master_taskloop_simd:
CaptureRegion = OMPD_parallel;
@@ -13908,12 +16393,14 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_cancel:
case OMPD_parallel:
case OMPD_parallel_master:
+ case OMPD_parallel_masked:
case OMPD_parallel_sections:
case OMPD_parallel_for:
case OMPD_parallel_for_simd:
case OMPD_threadprivate:
case OMPD_allocate:
case OMPD_taskyield:
+ case OMPD_error:
case OMPD_barrier:
case OMPD_taskwait:
case OMPD_cancellation_point:
@@ -13928,6 +16415,11 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_end_declare_variant:
case OMPD_declare_target:
case OMPD_end_declare_target:
+ case OMPD_loop:
+ case OMPD_teams_loop:
+ case OMPD_target_teams_loop:
+ case OMPD_parallel_loop:
+ case OMPD_target_parallel_loop:
case OMPD_simd:
case OMPD_tile:
case OMPD_unroll:
@@ -13945,6 +16437,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_atomic:
case OMPD_distribute_simd:
case OMPD_requires:
+ case OMPD_metadirective:
llvm_unreachable("Unexpected OpenMP directive with grainsize-clause");
case OMPD_unknown:
default:
@@ -13964,6 +16457,15 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPC_filter:
// Do not capture filter-clause expressions.
break;
+ case OMPC_when:
+ if (DKind == OMPD_metadirective) {
+ CaptureRegion = OMPD_metadirective;
+ } else if (DKind == OMPD_unknown) {
+ llvm_unreachable("Unknown OpenMP directive");
+ } else {
+ llvm_unreachable("Unexpected OpenMP directive with when clause");
+ }
+ break;
case OMPC_firstprivate:
case OMPC_lastprivate:
case OMPC_reduction:
@@ -13994,6 +16496,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPC_write:
case OMPC_update:
case OMPC_capture:
+ case OMPC_compare:
case OMPC_seq_cst:
case OMPC_acq_rel:
case OMPC_acquire:
@@ -14022,12 +16525,16 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPC_match:
case OMPC_nontemporal:
case OMPC_order:
+ case OMPC_at:
+ case OMPC_severity:
+ case OMPC_message:
case OMPC_destroy:
case OMPC_detach:
case OMPC_inclusive:
case OMPC_exclusive:
case OMPC_uses_allocators:
case OMPC_affinity:
+ case OMPC_bind:
default:
llvm_unreachable("Unexpected OpenMP clause.");
}
@@ -14159,7 +16666,7 @@ isNonNegativeIntegerValue(Expr *&ValExpr, Sema &SemaRef, OpenMPClauseKind CKind,
ValExpr = Value.get();
// The expression must evaluate to a non-negative integer value.
- if (Optional<llvm::APSInt> Result =
+ if (std::optional<llvm::APSInt> Result =
ValExpr->getIntegerConstantExpr(SemaRef.Context)) {
if (Result->isSigned() &&
!((!StrictlyPositive && Result->isNonNegative()) ||
@@ -14248,7 +16755,7 @@ ExprResult Sema::VerifyPositiveIntegerConstantInClause(Expr *E,
<< E->getSourceRange();
return ExprError();
}
- if (CKind == OMPC_aligned && !Result.isPowerOf2()) {
+ if ((CKind == OMPC_aligned || CKind == OMPC_align) && !Result.isPowerOf2()) {
Diag(E->getExprLoc(), diag::warn_omp_alignment_not_power_of_two)
<< E->getSourceRange();
return ExprError();
@@ -14289,10 +16796,22 @@ OMPClause *Sema::ActOnOpenMPSimdlenClause(Expr *Len, SourceLocation StartLoc,
/// Tries to find omp_allocator_handle_t type.
static bool findOMPAllocatorHandleT(Sema &S, SourceLocation Loc,
DSAStackTy *Stack) {
- QualType OMPAllocatorHandleT = Stack->getOMPAllocatorHandleT();
- if (!OMPAllocatorHandleT.isNull())
+ if (!Stack->getOMPAllocatorHandleT().isNull())
return true;
- // Build the predefined allocator expressions.
+
+ // Set the allocator handle type.
+ IdentifierInfo *II = &S.PP.getIdentifierTable().get("omp_allocator_handle_t");
+ ParsedType PT = S.getTypeName(*II, Loc, S.getCurScope());
+ if (!PT.getAsOpaquePtr() || PT.get().isNull()) {
+ S.Diag(Loc, diag::err_omp_implied_type_not_found)
+ << "omp_allocator_handle_t";
+ return false;
+ }
+ QualType AllocatorHandleEnumTy = PT.get();
+ AllocatorHandleEnumTy.addConst();
+ Stack->setOMPAllocatorHandleT(AllocatorHandleEnumTy);
+
+ // Fill the predefined allocator map.
bool ErrorFound = false;
for (int I = 0; I < OMPAllocateDeclAttr::OMPUserDefinedMemAlloc; ++I) {
auto AllocatorKind = static_cast<OMPAllocateDeclAttr::AllocatorTypeTy>(I);
@@ -14312,9 +16831,10 @@ static bool findOMPAllocatorHandleT(Sema &S, SourceLocation Loc,
ErrorFound = true;
break;
}
- if (OMPAllocatorHandleT.isNull())
- OMPAllocatorHandleT = AllocatorType;
- if (!S.getASTContext().hasSameType(OMPAllocatorHandleT, AllocatorType)) {
+ Res = S.PerformImplicitConversion(Res.get(), AllocatorHandleEnumTy,
+ Sema::AA_Initializing,
+ /* AllowExplicit */ true);
+ if (!Res.isUsable()) {
ErrorFound = true;
break;
}
@@ -14325,8 +16845,7 @@ static bool findOMPAllocatorHandleT(Sema &S, SourceLocation Loc,
<< "omp_allocator_handle_t";
return false;
}
- OMPAllocatorHandleT.addConst();
- Stack->setOMPAllocatorHandleT(OMPAllocatorHandleT);
+
return true;
}
@@ -14411,14 +16930,28 @@ OMPClause *Sema::ActOnOpenMPSimpleClause(
static_cast<OpenMPAtomicDefaultMemOrderClauseKind>(Argument),
ArgumentLoc, StartLoc, LParenLoc, EndLoc);
break;
- case OMPC_order:
- Res = ActOnOpenMPOrderClause(static_cast<OpenMPOrderClauseKind>(Argument),
- ArgumentLoc, StartLoc, LParenLoc, EndLoc);
+ case OMPC_fail:
+ Res = ActOnOpenMPFailClause(
+ static_cast<OpenMPClauseKind>(Argument),
+ ArgumentLoc, StartLoc, LParenLoc, EndLoc);
break;
case OMPC_update:
Res = ActOnOpenMPUpdateClause(static_cast<OpenMPDependClauseKind>(Argument),
ArgumentLoc, StartLoc, LParenLoc, EndLoc);
break;
+ case OMPC_bind:
+ Res = ActOnOpenMPBindClause(static_cast<OpenMPBindClauseKind>(Argument),
+ ArgumentLoc, StartLoc, LParenLoc, EndLoc);
+ break;
+ case OMPC_at:
+ Res = ActOnOpenMPAtClause(static_cast<OpenMPAtClauseKind>(Argument),
+ ArgumentLoc, StartLoc, LParenLoc, EndLoc);
+ break;
+ case OMPC_severity:
+ Res = ActOnOpenMPSeverityClause(
+ static_cast<OpenMPSeverityClauseKind>(Argument), ArgumentLoc, StartLoc,
+ LParenLoc, EndLoc);
+ break;
case OMPC_if:
case OMPC_final:
case OMPC_num_threads:
@@ -14450,6 +16983,7 @@ OMPClause *Sema::ActOnOpenMPSimpleClause(
case OMPC_read:
case OMPC_write:
case OMPC_capture:
+ case OMPC_compare:
case OMPC_seq_cst:
case OMPC_acq_rel:
case OMPC_acquire:
@@ -14476,6 +17010,7 @@ OMPClause *Sema::ActOnOpenMPSimpleClause(
case OMPC_use_device_ptr:
case OMPC_use_device_addr:
case OMPC_is_device_ptr:
+ case OMPC_has_device_addr:
case OMPC_unified_address:
case OMPC_unified_shared_memory:
case OMPC_reverse_offload:
@@ -14491,6 +17026,8 @@ OMPClause *Sema::ActOnOpenMPSimpleClause(
case OMPC_exclusive:
case OMPC_uses_allocators:
case OMPC_affinity:
+ case OMPC_when:
+ case OMPC_message:
default:
llvm_unreachable("Clause is not allowed.");
}
@@ -14499,13 +17036,12 @@ OMPClause *Sema::ActOnOpenMPSimpleClause(
static std::string
getListOfPossibleValues(OpenMPClauseKind K, unsigned First, unsigned Last,
- ArrayRef<unsigned> Exclude = llvm::None) {
+ ArrayRef<unsigned> Exclude = std::nullopt) {
SmallString<256> Buffer;
llvm::raw_svector_ostream Out(Buffer);
unsigned Skipped = Exclude.size();
- auto S = Exclude.begin(), E = Exclude.end();
for (unsigned I = First; I < Last; ++I) {
- if (std::find(S, E, I) != E) {
+ if (llvm::is_contained(Exclude, I)) {
--Skipped;
continue;
}
@@ -14541,6 +17077,9 @@ OMPClause *Sema::ActOnOpenMPDefaultClause(DefaultKind Kind,
case OMP_DEFAULT_firstprivate:
DSAStack->setDefaultDSAFirstPrivate(KindKwLoc);
break;
+ case OMP_DEFAULT_private:
+ DSAStack->setDefaultDSAPrivate(KindKwLoc);
+ break;
default:
llvm_unreachable("DSA unexpected in OpenMP default clause");
}
@@ -14592,22 +17131,87 @@ OMPClause *Sema::ActOnOpenMPAtomicDefaultMemOrderClause(
LParenLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPOrderClause(OpenMPOrderClauseKind Kind,
- SourceLocation KindKwLoc,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc) {
- if (Kind == OMPC_ORDER_unknown) {
+OMPClause *Sema::ActOnOpenMPAtClause(OpenMPAtClauseKind Kind,
+ SourceLocation KindKwLoc,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ if (Kind == OMPC_AT_unknown) {
+ Diag(KindKwLoc, diag::err_omp_unexpected_clause_value)
+ << getListOfPossibleValues(OMPC_at, /*First=*/0,
+ /*Last=*/OMPC_AT_unknown)
+ << getOpenMPClauseName(OMPC_at);
+ return nullptr;
+ }
+ return new (Context)
+ OMPAtClause(Kind, KindKwLoc, StartLoc, LParenLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPSeverityClause(OpenMPSeverityClauseKind Kind,
+ SourceLocation KindKwLoc,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ if (Kind == OMPC_SEVERITY_unknown) {
+ Diag(KindKwLoc, diag::err_omp_unexpected_clause_value)
+ << getListOfPossibleValues(OMPC_severity, /*First=*/0,
+ /*Last=*/OMPC_SEVERITY_unknown)
+ << getOpenMPClauseName(OMPC_severity);
+ return nullptr;
+ }
+ return new (Context)
+ OMPSeverityClause(Kind, KindKwLoc, StartLoc, LParenLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPMessageClause(Expr *ME, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ assert(ME && "NULL expr in Message clause");
+ if (!isa<StringLiteral>(ME)) {
+ Diag(ME->getBeginLoc(), diag::warn_clause_expected_string)
+ << getOpenMPClauseName(OMPC_message);
+ return nullptr;
+ }
+ return new (Context) OMPMessageClause(ME, StartLoc, LParenLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPOrderClause(
+ OpenMPOrderClauseModifier Modifier, OpenMPOrderClauseKind Kind,
+ SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
+ SourceLocation KindLoc, SourceLocation EndLoc) {
+ if (Kind != OMPC_ORDER_concurrent ||
+ (LangOpts.OpenMP < 51 && MLoc.isValid())) {
+ // Kind should be concurrent,
+ // Modifiers introduced in OpenMP 5.1
static_assert(OMPC_ORDER_unknown > 0,
"OMPC_ORDER_unknown not greater than 0");
- Diag(KindKwLoc, diag::err_omp_unexpected_clause_value)
- << getListOfPossibleValues(OMPC_order, /*First=*/0,
+
+ Diag(KindLoc, diag::err_omp_unexpected_clause_value)
+ << getListOfPossibleValues(OMPC_order,
+ /*First=*/0,
/*Last=*/OMPC_ORDER_unknown)
<< getOpenMPClauseName(OMPC_order);
return nullptr;
}
- return new (Context)
- OMPOrderClause(Kind, KindKwLoc, StartLoc, LParenLoc, EndLoc);
+ if (LangOpts.OpenMP >= 51) {
+ if (Modifier == OMPC_ORDER_MODIFIER_unknown && MLoc.isValid()) {
+ Diag(MLoc, diag::err_omp_unexpected_clause_value)
+ << getListOfPossibleValues(OMPC_order,
+ /*First=*/OMPC_ORDER_MODIFIER_unknown + 1,
+ /*Last=*/OMPC_ORDER_MODIFIER_last)
+ << getOpenMPClauseName(OMPC_order);
+ } else {
+ DSAStack->setRegionHasOrderConcurrent(/*HasOrderConcurrent=*/true);
+ if (DSAStack->getCurScope()) {
+ // mark the current scope with 'order' flag
+ unsigned existingFlags = DSAStack->getCurScope()->getFlags();
+ DSAStack->getCurScope()->setFlags(existingFlags |
+ Scope::OpenMPOrderClauseScope);
+ }
+ }
+ }
+ return new (Context) OMPOrderClause(Kind, KindLoc, StartLoc, LParenLoc,
+ EndLoc, Modifier, MLoc);
}
OMPClause *Sema::ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind,
@@ -14617,8 +17221,11 @@ OMPClause *Sema::ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind,
SourceLocation EndLoc) {
if (Kind == OMPC_DEPEND_unknown || Kind == OMPC_DEPEND_source ||
Kind == OMPC_DEPEND_sink || Kind == OMPC_DEPEND_depobj) {
- unsigned Except[] = {OMPC_DEPEND_source, OMPC_DEPEND_sink,
- OMPC_DEPEND_depobj};
+ SmallVector<unsigned> Except = {
+ OMPC_DEPEND_source, OMPC_DEPEND_sink, OMPC_DEPEND_depobj,
+ OMPC_DEPEND_outallmemory, OMPC_DEPEND_inoutallmemory};
+ if (LangOpts.OpenMP < 51)
+ Except.push_back(OMPC_DEPEND_inoutset);
Diag(KindKwLoc, diag::err_omp_unexpected_clause_value)
<< getListOfPossibleValues(OMPC_depend, /*First=*/0,
/*Last=*/OMPC_DEPEND_unknown, Except)
@@ -14668,6 +17275,17 @@ OMPClause *Sema::ActOnOpenMPPartialClause(Expr *FactorExpr,
FactorExpr);
}
+OMPClause *Sema::ActOnOpenMPAlignClause(Expr *A, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ ExprResult AlignVal;
+ AlignVal = VerifyPositiveIntegerConstantInClause(A, OMPC_align);
+ if (AlignVal.isInvalid())
+ return nullptr;
+ return OMPAlignClause::Create(Context, AlignVal.get(), StartLoc, LParenLoc,
+ EndLoc);
+}
+
OMPClause *Sema::ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Argument, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
@@ -14705,12 +17323,33 @@ OMPClause *Sema::ActOnOpenMPSingleExprWithArgClause(
StartLoc, LParenLoc, ArgumentLoc[Modifier], ArgumentLoc[DefaultmapKind],
EndLoc);
break;
+ case OMPC_order:
+ enum { OrderModifier, OrderKind };
+ Res = ActOnOpenMPOrderClause(
+ static_cast<OpenMPOrderClauseModifier>(Argument[OrderModifier]),
+ static_cast<OpenMPOrderClauseKind>(Argument[OrderKind]), StartLoc,
+ LParenLoc, ArgumentLoc[OrderModifier], ArgumentLoc[OrderKind], EndLoc);
+ break;
case OMPC_device:
assert(Argument.size() == 1 && ArgumentLoc.size() == 1);
Res = ActOnOpenMPDeviceClause(
static_cast<OpenMPDeviceClauseModifier>(Argument.back()), Expr,
StartLoc, LParenLoc, ArgumentLoc.back(), EndLoc);
break;
+ case OMPC_grainsize:
+ assert(Argument.size() == 1 && ArgumentLoc.size() == 1 &&
+ "Modifier for grainsize clause and its location are expected.");
+ Res = ActOnOpenMPGrainsizeClause(
+ static_cast<OpenMPGrainsizeClauseModifier>(Argument.back()), Expr,
+ StartLoc, LParenLoc, ArgumentLoc.back(), EndLoc);
+ break;
+ case OMPC_num_tasks:
+ assert(Argument.size() == 1 && ArgumentLoc.size() == 1 &&
+ "Modifier for num_tasks clause and its location are expected.");
+ Res = ActOnOpenMPNumTasksClause(
+ static_cast<OpenMPNumTasksClauseModifier>(Argument.back()), Expr,
+ StartLoc, LParenLoc, ArgumentLoc.back(), EndLoc);
+ break;
case OMPC_final:
case OMPC_num_threads:
case OMPC_safelen:
@@ -14743,6 +17382,7 @@ OMPClause *Sema::ActOnOpenMPSingleExprWithArgClause(
case OMPC_write:
case OMPC_update:
case OMPC_capture:
+ case OMPC_compare:
case OMPC_seq_cst:
case OMPC_acq_rel:
case OMPC_acquire:
@@ -14755,9 +17395,7 @@ OMPClause *Sema::ActOnOpenMPSingleExprWithArgClause(
case OMPC_num_teams:
case OMPC_thread_limit:
case OMPC_priority:
- case OMPC_grainsize:
case OMPC_nogroup:
- case OMPC_num_tasks:
case OMPC_hint:
case OMPC_unknown:
case OMPC_uniform:
@@ -14766,6 +17404,7 @@ OMPClause *Sema::ActOnOpenMPSingleExprWithArgClause(
case OMPC_use_device_ptr:
case OMPC_use_device_addr:
case OMPC_is_device_ptr:
+ case OMPC_has_device_addr:
case OMPC_unified_address:
case OMPC_unified_shared_memory:
case OMPC_reverse_offload:
@@ -14774,7 +17413,9 @@ OMPClause *Sema::ActOnOpenMPSingleExprWithArgClause(
case OMPC_device_type:
case OMPC_match:
case OMPC_nontemporal:
- case OMPC_order:
+ case OMPC_at:
+ case OMPC_severity:
+ case OMPC_message:
case OMPC_destroy:
case OMPC_novariants:
case OMPC_nocontext:
@@ -14783,6 +17424,8 @@ OMPClause *Sema::ActOnOpenMPSingleExprWithArgClause(
case OMPC_exclusive:
case OMPC_uses_allocators:
case OMPC_affinity:
+ case OMPC_when:
+ case OMPC_bind:
default:
llvm_unreachable("Clause is not allowed.");
}
@@ -14876,7 +17519,7 @@ OMPClause *Sema::ActOnOpenMPScheduleClause(
// OpenMP [2.7.1, Restrictions]
// chunk_size must be a loop invariant integer expression with a positive
// value.
- if (Optional<llvm::APSInt> Result =
+ if (std::optional<llvm::APSInt> Result =
ValExpr->getIntegerConstantExpr(Context)) {
if (Result->isSigned() && !Result->isStrictlyPositive()) {
Diag(ChunkSizeLoc, diag::err_omp_negative_expression_in_clause)
@@ -14929,6 +17572,12 @@ OMPClause *Sema::ActOnOpenMPClause(OpenMPClauseKind Kind,
case OMPC_capture:
Res = ActOnOpenMPCaptureClause(StartLoc, EndLoc);
break;
+ case OMPC_compare:
+ Res = ActOnOpenMPCompareClause(StartLoc, EndLoc);
+ break;
+ case OMPC_fail:
+ Res = ActOnOpenMPFailClause(StartLoc, EndLoc);
+ break;
case OMPC_seq_cst:
Res = ActOnOpenMPSeqCstClause(StartLoc, EndLoc);
break;
@@ -14976,6 +17625,9 @@ OMPClause *Sema::ActOnOpenMPClause(OpenMPClauseKind Kind,
case OMPC_partial:
Res = ActOnOpenMPPartialClause(nullptr, StartLoc, /*LParenLoc=*/{}, EndLoc);
break;
+ case OMPC_ompx_bare:
+ Res = ActOnOpenMPXBareClause(StartLoc, EndLoc);
+ break;
case OMPC_if:
case OMPC_final:
case OMPC_num_threads:
@@ -15020,11 +17672,15 @@ OMPClause *Sema::ActOnOpenMPClause(OpenMPClauseKind Kind,
case OMPC_use_device_ptr:
case OMPC_use_device_addr:
case OMPC_is_device_ptr:
+ case OMPC_has_device_addr:
case OMPC_atomic_default_mem_order:
case OMPC_device_type:
case OMPC_match:
case OMPC_nontemporal:
case OMPC_order:
+ case OMPC_at:
+ case OMPC_severity:
+ case OMPC_message:
case OMPC_novariants:
case OMPC_nocontext:
case OMPC_detach:
@@ -15032,6 +17688,8 @@ OMPClause *Sema::ActOnOpenMPClause(OpenMPClauseKind Kind,
case OMPC_exclusive:
case OMPC_uses_allocators:
case OMPC_affinity:
+ case OMPC_when:
+ case OMPC_ompx_dyn_cgroup_mem:
default:
llvm_unreachable("Clause is not allowed.");
}
@@ -15046,6 +17704,7 @@ OMPClause *Sema::ActOnOpenMPNowaitClause(SourceLocation StartLoc,
OMPClause *Sema::ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc) {
+ DSAStack->setUntiedRegion();
return new (Context) OMPUntiedClause(StartLoc, EndLoc);
}
@@ -15074,6 +17733,29 @@ OMPClause *Sema::ActOnOpenMPCaptureClause(SourceLocation StartLoc,
return new (Context) OMPCaptureClause(StartLoc, EndLoc);
}
+OMPClause *Sema::ActOnOpenMPCompareClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (Context) OMPCompareClause(StartLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPFailClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (Context) OMPFailClause(StartLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPFailClause(
+ OpenMPClauseKind Parameter, SourceLocation KindLoc,
+ SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+
+ if (!checkFailClauseParameter(Parameter)) {
+ Diag(KindLoc, diag::err_omp_atomic_fail_wrong_or_no_clauses);
+ return nullptr;
+ }
+ return new (Context)
+ OMPFailClause(Parameter, KindLoc, StartLoc, LParenLoc, EndLoc);
+}
+
OMPClause *Sema::ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc) {
return new (Context) OMPSeqCstClause(StartLoc, EndLoc);
@@ -15177,32 +17859,28 @@ StmtResult Sema::ActOnOpenMPInteropDirective(ArrayRef<OMPClause *> Clauses,
// OpenMP 5.1 [2.15.1, interop Construct, Restrictions]
// Each interop-var may be specified for at most one action-clause of each
// interop construct.
- llvm::SmallPtrSet<const VarDecl *, 4> InteropVars;
- for (const OMPClause *C : Clauses) {
+ llvm::SmallPtrSet<const ValueDecl *, 4> InteropVars;
+ for (OMPClause *C : Clauses) {
OpenMPClauseKind ClauseKind = C->getClauseKind();
- const DeclRefExpr *DRE = nullptr;
- SourceLocation VarLoc;
+ std::pair<ValueDecl *, bool> DeclResult;
+ SourceLocation ELoc;
+ SourceRange ERange;
if (ClauseKind == OMPC_init) {
- const auto *IC = cast<OMPInitClause>(C);
- VarLoc = IC->getVarLoc();
- DRE = dyn_cast_or_null<DeclRefExpr>(IC->getInteropVar());
+ auto *E = cast<OMPInitClause>(C)->getInteropVar();
+ DeclResult = getPrivateItem(*this, E, ELoc, ERange);
} else if (ClauseKind == OMPC_use) {
- const auto *UC = cast<OMPUseClause>(C);
- VarLoc = UC->getVarLoc();
- DRE = dyn_cast_or_null<DeclRefExpr>(UC->getInteropVar());
+ auto *E = cast<OMPUseClause>(C)->getInteropVar();
+ DeclResult = getPrivateItem(*this, E, ELoc, ERange);
} else if (ClauseKind == OMPC_destroy) {
- const auto *DC = cast<OMPDestroyClause>(C);
- VarLoc = DC->getVarLoc();
- DRE = dyn_cast_or_null<DeclRefExpr>(DC->getInteropVar());
+ auto *E = cast<OMPDestroyClause>(C)->getInteropVar();
+ DeclResult = getPrivateItem(*this, E, ELoc, ERange);
}
- if (!DRE)
- continue;
-
- if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
- if (!InteropVars.insert(VD->getCanonicalDecl()).second) {
- Diag(VarLoc, diag::err_omp_interop_var_multiple_actions) << VD;
+ if (DeclResult.first) {
+ if (!InteropVars.insert(DeclResult.first).second) {
+ Diag(ELoc, diag::err_omp_interop_var_multiple_actions)
+ << DeclResult.first;
return StmtError();
}
}
@@ -15214,16 +17892,20 @@ StmtResult Sema::ActOnOpenMPInteropDirective(ArrayRef<OMPClause *> Clauses,
static bool isValidInteropVariable(Sema &SemaRef, Expr *InteropVarExpr,
SourceLocation VarLoc,
OpenMPClauseKind Kind) {
- if (InteropVarExpr->isValueDependent() || InteropVarExpr->isTypeDependent() ||
- InteropVarExpr->isInstantiationDependent() ||
- InteropVarExpr->containsUnexpandedParameterPack())
+ SourceLocation ELoc;
+ SourceRange ERange;
+ Expr *RefExpr = InteropVarExpr;
+ auto Res =
+ getPrivateItem(SemaRef, RefExpr, ELoc, ERange,
+ /*AllowArraySection=*/false, /*DiagType=*/"omp_interop_t");
+
+ if (Res.second) {
+ // It will be analyzed later.
return true;
+ }
- const auto *DRE = dyn_cast<DeclRefExpr>(InteropVarExpr);
- if (!DRE || !isa<VarDecl>(DRE->getDecl())) {
- SemaRef.Diag(VarLoc, diag::err_omp_interop_variable_expected) << 0;
+ if (!Res.first)
return false;
- }
// Interop variable should be of type omp_interop_t.
bool HasError = false;
@@ -15265,8 +17947,7 @@ static bool isValidInteropVariable(Sema &SemaRef, Expr *InteropVarExpr,
}
OMPClause *
-Sema::ActOnOpenMPInitClause(Expr *InteropVar, ArrayRef<Expr *> PrefExprs,
- bool IsTarget, bool IsTargetSync,
+Sema::ActOnOpenMPInitClause(Expr *InteropVar, OMPInteropInfo &InteropInfo,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation VarLoc, SourceLocation EndLoc) {
@@ -15275,7 +17956,7 @@ Sema::ActOnOpenMPInitClause(Expr *InteropVar, ArrayRef<Expr *> PrefExprs,
// Check prefer_type values. These foreign-runtime-id values are either
// string literals or constant integral expressions.
- for (const Expr *E : PrefExprs) {
+ for (const Expr *E : InteropInfo.PreferTypes) {
if (E->isValueDependent() || E->isTypeDependent() ||
E->isInstantiationDependent() || E->containsUnexpandedParameterPack())
continue;
@@ -15287,9 +17968,8 @@ Sema::ActOnOpenMPInitClause(Expr *InteropVar, ArrayRef<Expr *> PrefExprs,
return nullptr;
}
- return OMPInitClause::Create(Context, InteropVar, PrefExprs, IsTarget,
- IsTargetSync, StartLoc, LParenLoc, VarLoc,
- EndLoc);
+ return OMPInitClause::Create(Context, InteropVar, InteropInfo, StartLoc,
+ LParenLoc, VarLoc, EndLoc);
}
OMPClause *Sema::ActOnOpenMPUseClause(Expr *InteropVar, SourceLocation StartLoc,
@@ -15309,6 +17989,13 @@ OMPClause *Sema::ActOnOpenMPDestroyClause(Expr *InteropVar,
SourceLocation LParenLoc,
SourceLocation VarLoc,
SourceLocation EndLoc) {
+ if (!InteropVar && LangOpts.OpenMP >= 52 &&
+ DSAStack->getCurrentDirective() == OMPD_depobj) {
+ Diag(StartLoc, diag::err_omp_expected_clause_argument)
+ << getOpenMPClauseName(OMPC_destroy)
+ << getOpenMPDirectiveName(OMPD_depobj);
+ return nullptr;
+ }
if (InteropVar &&
!isValidInteropVariable(*this, InteropVar, VarLoc, OMPC_destroy))
return nullptr;
@@ -15400,20 +18087,17 @@ OMPClause *Sema::ActOnOpenMPFilterClause(Expr *ThreadID,
StartLoc, LParenLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPVarListClause(
- OpenMPClauseKind Kind, ArrayRef<Expr *> VarList, Expr *DepModOrTailExpr,
- const OMPVarListLocTy &Locs, SourceLocation ColonLoc,
- CXXScopeSpec &ReductionOrMapperIdScopeSpec,
- DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier,
- ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
- ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit,
- SourceLocation ExtraModifierLoc,
- ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
- ArrayRef<SourceLocation> MotionModifiersLoc) {
+OMPClause *Sema::ActOnOpenMPVarListClause(OpenMPClauseKind Kind,
+ ArrayRef<Expr *> VarList,
+ const OMPVarListLocTy &Locs,
+ OpenMPVarListDataTy &Data) {
SourceLocation StartLoc = Locs.StartLoc;
SourceLocation LParenLoc = Locs.LParenLoc;
SourceLocation EndLoc = Locs.EndLoc;
OMPClause *Res = nullptr;
+ int ExtraModifier = Data.ExtraModifier;
+ SourceLocation ExtraModifierLoc = Data.ExtraModifierLoc;
+ SourceLocation ColonLoc = Data.ColonLoc;
switch (Kind) {
case OMPC_private:
Res = ActOnOpenMPPrivateClause(VarList, StartLoc, LParenLoc, EndLoc);
@@ -15437,28 +18121,28 @@ OMPClause *Sema::ActOnOpenMPVarListClause(
Res = ActOnOpenMPReductionClause(
VarList, static_cast<OpenMPReductionClauseModifier>(ExtraModifier),
StartLoc, LParenLoc, ExtraModifierLoc, ColonLoc, EndLoc,
- ReductionOrMapperIdScopeSpec, ReductionOrMapperId);
+ Data.ReductionOrMapperIdScopeSpec, Data.ReductionOrMapperId);
break;
case OMPC_task_reduction:
- Res = ActOnOpenMPTaskReductionClause(VarList, StartLoc, LParenLoc, ColonLoc,
- EndLoc, ReductionOrMapperIdScopeSpec,
- ReductionOrMapperId);
+ Res = ActOnOpenMPTaskReductionClause(
+ VarList, StartLoc, LParenLoc, ColonLoc, EndLoc,
+ Data.ReductionOrMapperIdScopeSpec, Data.ReductionOrMapperId);
break;
case OMPC_in_reduction:
- Res = ActOnOpenMPInReductionClause(VarList, StartLoc, LParenLoc, ColonLoc,
- EndLoc, ReductionOrMapperIdScopeSpec,
- ReductionOrMapperId);
+ Res = ActOnOpenMPInReductionClause(
+ VarList, StartLoc, LParenLoc, ColonLoc, EndLoc,
+ Data.ReductionOrMapperIdScopeSpec, Data.ReductionOrMapperId);
break;
case OMPC_linear:
assert(0 <= ExtraModifier && ExtraModifier <= OMPC_LINEAR_unknown &&
"Unexpected linear modifier.");
Res = ActOnOpenMPLinearClause(
- VarList, DepModOrTailExpr, StartLoc, LParenLoc,
+ VarList, Data.DepModOrTailExpr, StartLoc, LParenLoc,
static_cast<OpenMPLinearClauseKind>(ExtraModifier), ExtraModifierLoc,
- ColonLoc, EndLoc);
+ ColonLoc, Data.StepModifierLoc, EndLoc);
break;
case OMPC_aligned:
- Res = ActOnOpenMPAlignedClause(VarList, DepModOrTailExpr, StartLoc,
+ Res = ActOnOpenMPAlignedClause(VarList, Data.DepModOrTailExpr, StartLoc,
LParenLoc, ColonLoc, EndLoc);
break;
case OMPC_copyin:
@@ -15474,26 +18158,30 @@ OMPClause *Sema::ActOnOpenMPVarListClause(
assert(0 <= ExtraModifier && ExtraModifier <= OMPC_DEPEND_unknown &&
"Unexpected depend modifier.");
Res = ActOnOpenMPDependClause(
- DepModOrTailExpr, static_cast<OpenMPDependClauseKind>(ExtraModifier),
- ExtraModifierLoc, ColonLoc, VarList, StartLoc, LParenLoc, EndLoc);
+ {static_cast<OpenMPDependClauseKind>(ExtraModifier), ExtraModifierLoc,
+ ColonLoc, Data.OmpAllMemoryLoc},
+ Data.DepModOrTailExpr, VarList, StartLoc, LParenLoc, EndLoc);
break;
case OMPC_map:
assert(0 <= ExtraModifier && ExtraModifier <= OMPC_MAP_unknown &&
"Unexpected map modifier.");
Res = ActOnOpenMPMapClause(
- MapTypeModifiers, MapTypeModifiersLoc, ReductionOrMapperIdScopeSpec,
- ReductionOrMapperId, static_cast<OpenMPMapClauseKind>(ExtraModifier),
- IsMapTypeImplicit, ExtraModifierLoc, ColonLoc, VarList, Locs);
+ Data.IteratorExpr, Data.MapTypeModifiers, Data.MapTypeModifiersLoc,
+ Data.ReductionOrMapperIdScopeSpec, Data.ReductionOrMapperId,
+ static_cast<OpenMPMapClauseKind>(ExtraModifier), Data.IsMapTypeImplicit,
+ ExtraModifierLoc, ColonLoc, VarList, Locs);
break;
case OMPC_to:
- Res = ActOnOpenMPToClause(MotionModifiers, MotionModifiersLoc,
- ReductionOrMapperIdScopeSpec, ReductionOrMapperId,
- ColonLoc, VarList, Locs);
+ Res =
+ ActOnOpenMPToClause(Data.MotionModifiers, Data.MotionModifiersLoc,
+ Data.ReductionOrMapperIdScopeSpec,
+ Data.ReductionOrMapperId, ColonLoc, VarList, Locs);
break;
case OMPC_from:
- Res = ActOnOpenMPFromClause(MotionModifiers, MotionModifiersLoc,
- ReductionOrMapperIdScopeSpec,
- ReductionOrMapperId, ColonLoc, VarList, Locs);
+ Res = ActOnOpenMPFromClause(Data.MotionModifiers, Data.MotionModifiersLoc,
+ Data.ReductionOrMapperIdScopeSpec,
+ Data.ReductionOrMapperId, ColonLoc, VarList,
+ Locs);
break;
case OMPC_use_device_ptr:
Res = ActOnOpenMPUseDevicePtrClause(VarList, Locs);
@@ -15504,8 +18192,11 @@ OMPClause *Sema::ActOnOpenMPVarListClause(
case OMPC_is_device_ptr:
Res = ActOnOpenMPIsDevicePtrClause(VarList, Locs);
break;
+ case OMPC_has_device_addr:
+ Res = ActOnOpenMPHasDeviceAddrClause(VarList, Locs);
+ break;
case OMPC_allocate:
- Res = ActOnOpenMPAllocateClause(DepModOrTailExpr, VarList, StartLoc,
+ Res = ActOnOpenMPAllocateClause(Data.DepModOrTailExpr, VarList, StartLoc,
LParenLoc, ColonLoc, EndLoc);
break;
case OMPC_nontemporal:
@@ -15519,7 +18210,12 @@ OMPClause *Sema::ActOnOpenMPVarListClause(
break;
case OMPC_affinity:
Res = ActOnOpenMPAffinityClause(StartLoc, LParenLoc, ColonLoc, EndLoc,
- DepModOrTailExpr, VarList);
+ Data.DepModOrTailExpr, VarList);
+ break;
+ case OMPC_doacross:
+ Res = ActOnOpenMPDoacrossClause(
+ static_cast<OpenMPDoacrossClauseModifier>(ExtraModifier),
+ ExtraModifierLoc, ColonLoc, VarList, StartLoc, LParenLoc, EndLoc);
break;
case OMPC_if:
case OMPC_depobj:
@@ -15542,6 +18238,7 @@ OMPClause *Sema::ActOnOpenMPVarListClause(
case OMPC_write:
case OMPC_update:
case OMPC_capture:
+ case OMPC_compare:
case OMPC_seq_cst:
case OMPC_acq_rel:
case OMPC_acquire:
@@ -15569,11 +18266,16 @@ OMPClause *Sema::ActOnOpenMPVarListClause(
case OMPC_device_type:
case OMPC_match:
case OMPC_order:
+ case OMPC_at:
+ case OMPC_severity:
+ case OMPC_message:
case OMPC_destroy:
case OMPC_novariants:
case OMPC_nocontext:
case OMPC_detach:
case OMPC_uses_allocators:
+ case OMPC_when:
+ case OMPC_bind:
default:
llvm_unreachable("Clause is not allowed.");
}
@@ -15605,6 +18307,8 @@ OMPClause *Sema::ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation EndLoc) {
SmallVector<Expr *, 8> Vars;
SmallVector<Expr *, 8> PrivateCopies;
+ bool IsImplicitClause =
+ StartLoc.isInvalid() && LParenLoc.isInvalid() && EndLoc.isInvalid();
for (Expr *RefExpr : VarList) {
assert(RefExpr && "NULL expr in OpenMP private clause.");
SourceLocation ELoc;
@@ -15663,9 +18367,8 @@ OMPClause *Sema::ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
Diag(ELoc, diag::err_omp_variably_modified_type_not_supported)
<< getOpenMPClauseName(OMPC_private) << Type
<< getOpenMPDirectiveName(CurrDir);
- bool IsDecl =
- !VD ||
- VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
+ bool IsDecl = !VD || VD->isThisDeclarationADefinition(Context) ==
+ VarDecl::DeclarationOnly;
Diag(D->getLocation(),
IsDecl ? diag::note_previous_decl : diag::note_defined_here)
<< D;
@@ -15720,9 +18423,17 @@ OMPClause *Sema::ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
*this, VDPrivate, RefExpr->getType().getUnqualifiedType(), ELoc);
DeclRefExpr *Ref = nullptr;
- if (!VD && !CurContext->isDependentContext())
- Ref = buildCapture(*this, D, SimpleRefExpr, /*WithInit=*/false);
- DSAStack->addDSA(D, RefExpr->IgnoreParens(), OMPC_private, Ref);
+ if (!VD && !CurContext->isDependentContext()) {
+ auto *FD = dyn_cast<FieldDecl>(D);
+ VarDecl *VD = FD ? DSAStack->getImplicitFDCapExprDecl(FD) : nullptr;
+ if (VD)
+ Ref = buildDeclRefExpr(*this, VD, VD->getType().getNonReferenceType(),
+ RefExpr->getExprLoc());
+ else
+ Ref = buildCapture(*this, D, SimpleRefExpr, /*WithInit=*/false);
+ }
+ if (!IsImplicitClause)
+ DSAStack->addDSA(D, RefExpr->IgnoreParens(), OMPC_private, Ref);
Vars.push_back((VD || CurContext->isDependentContext())
? RefExpr->IgnoreParens()
: Ref);
@@ -15736,29 +18447,6 @@ OMPClause *Sema::ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
PrivateCopies);
}
-namespace {
-class DiagsUninitializedSeveretyRAII {
-private:
- DiagnosticsEngine &Diags;
- SourceLocation SavedLoc;
- bool IsIgnored = false;
-
-public:
- DiagsUninitializedSeveretyRAII(DiagnosticsEngine &Diags, SourceLocation Loc,
- bool IsIgnored)
- : Diags(Diags), SavedLoc(Loc), IsIgnored(IsIgnored) {
- if (!IsIgnored) {
- Diags.setSeverity(/*Diag*/ diag::warn_uninit_self_reference_in_init,
- /*Map*/ diag::Severity::Ignored, Loc);
- }
- }
- ~DiagsUninitializedSeveretyRAII() {
- if (!IsIgnored)
- Diags.popMappings(SavedLoc);
- }
-};
-}
-
OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
@@ -15952,9 +18640,8 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
Diag(ELoc, diag::err_omp_variably_modified_type_not_supported)
<< getOpenMPClauseName(OMPC_firstprivate) << Type
<< getOpenMPDirectiveName(DSAStack->getCurrentDirective());
- bool IsDecl =
- !VD ||
- VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
+ bool IsDecl = !VD || VD->isThisDeclarationADefinition(Context) ==
+ VarDecl::DeclarationOnly;
Diag(D->getLocation(),
IsDecl ? diag::note_previous_decl : diag::note_defined_here)
<< D;
@@ -16019,8 +18706,14 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
if (TopDVar.CKind == OMPC_lastprivate) {
Ref = TopDVar.PrivateCopy;
} else {
- Ref = buildCapture(*this, D, SimpleRefExpr, /*WithInit=*/true);
- if (!isOpenMPCapturedDecl(D))
+ auto *FD = dyn_cast<FieldDecl>(D);
+ VarDecl *VD = FD ? DSAStack->getImplicitFDCapExprDecl(FD) : nullptr;
+ if (VD)
+ Ref = buildDeclRefExpr(*this, VD, VD->getType().getNonReferenceType(),
+ RefExpr->getExprLoc());
+ else
+ Ref = buildCapture(*this, D, SimpleRefExpr, /*WithInit=*/true);
+ if (VD || !isOpenMPCapturedDecl(D))
ExprCaptures.push_back(Ref->getDecl());
}
}
@@ -16290,7 +18983,7 @@ public:
return true;
DSAStackTy::DSAVarData DVarPrivate = Stack->hasDSA(
VD,
- [](OpenMPClauseKind C, bool AppliedToPointee) {
+ [](OpenMPClauseKind C, bool AppliedToPointee, bool) {
return isOpenMPPrivate(C) && !AppliedToPointee;
},
[](OpenMPDirectiveKind) { return true; },
@@ -16349,7 +19042,7 @@ static T filterLookupForUDReductionAndMapper(
static NamedDecl *findAcceptableDecl(Sema &SemaRef, NamedDecl *D) {
assert(!LookupResult::isVisible(SemaRef, D) && "not in slow case");
- for (auto RD : D->redecls()) {
+ for (auto *RD : D->redecls()) {
// Don't bother with extra checks if we already know this one isn't visible.
if (RD == D)
continue;
@@ -16733,9 +19426,17 @@ static bool actOnOMPReductionKindClause(
// operators: +, -, *, &, |, ^, && and ||
switch (OOK) {
case OO_Plus:
- case OO_Minus:
BOK = BO_Add;
break;
+ case OO_Minus:
+ // Minus(-) operator is not supported in TR11 (OpenMP 6.0). Setting BOK to
+ // BO_Comma will automatically diagnose it for OpenMP > 52 as not allowed
+ // reduction identifier.
+ if (S.LangOpts.OpenMP > 52)
+ BOK = BO_Comma;
+ else
+ BOK = BO_Add;
+ break;
case OO_Star:
BOK = BO_Mul;
break;
@@ -16802,6 +19503,12 @@ static bool actOnOMPReductionKindClause(
}
break;
}
+
+ // OpenMP 5.2, 5.5.5 (see page 627, line 18) reduction Clause, Restrictions
+ // A reduction clause with the minus (-) operator was deprecated
+ if (OOK == OO_Minus && S.LangOpts.OpenMP == 52)
+ S.Diag(ReductionId.getLoc(), diag::warn_omp_minus_in_reduction_deprecated);
+
SourceRange ReductionIdRange;
if (ReductionIdScopeSpec.isValid())
ReductionIdRange.setBegin(ReductionIdScopeSpec.getBeginLoc());
@@ -16970,9 +19677,14 @@ static bool actOnOMPReductionKindClause(
}
if (BOK == BO_Comma && DeclareReductionRef.isUnset()) {
// Not allowed reduction identifier is found.
- S.Diag(ReductionId.getBeginLoc(),
- diag::err_omp_unknown_reduction_identifier)
- << Type << ReductionIdRange;
+ if (S.LangOpts.OpenMP > 52)
+ S.Diag(ReductionId.getBeginLoc(),
+ diag::err_omp_unknown_reduction_identifier_since_omp_6_0)
+ << Type << ReductionIdRange;
+ else
+ S.Diag(ReductionId.getBeginLoc(),
+ diag::err_omp_unknown_reduction_identifier_prior_omp_6_0)
+ << Type << ReductionIdRange;
continue;
}
@@ -17034,7 +19746,7 @@ static bool actOnOMPReductionKindClause(
if (ConstantLengthOASE && !SingleElement) {
for (llvm::APSInt &Size : ArraySizes)
PrivateTy = Context.getConstantArrayType(PrivateTy, Size, nullptr,
- ArrayType::Normal,
+ ArraySizeModifier::Normal,
/*IndexTypeQuals=*/0);
}
}
@@ -17061,7 +19773,7 @@ static bool actOnOMPReductionKindClause(
Type,
new (Context)
OpaqueValueExpr(ELoc, Context.getSizeType(), VK_PRValue),
- ArrayType::Normal, /*IndexTypeQuals=*/0, SourceRange());
+ ArraySizeModifier::Normal, /*IndexTypeQuals=*/0, SourceRange());
} else if (!ASE && !OASE &&
Context.getAsArrayType(D->getType().getNonReferenceType())) {
PrivateTy = D->getType().getNonReferenceType();
@@ -17107,14 +19819,13 @@ static bool actOnOMPReductionKindClause(
Type = ComplexTy->getElementType();
if (Type->isRealFloatingType()) {
llvm::APFloat InitValue = llvm::APFloat::getAllOnesValue(
- Context.getFloatTypeSemantics(Type),
- Context.getTypeSize(Type));
+ Context.getFloatTypeSemantics(Type));
Init = FloatingLiteral::Create(Context, InitValue, /*isexact=*/true,
Type, ELoc);
} else if (Type->isScalarType()) {
uint64_t Size = Context.getTypeSize(Type);
QualType IntTy = Context.getIntTypeForBitwidth(Size, /*Signed=*/0);
- llvm::APInt InitValue = llvm::APInt::getAllOnesValue(Size);
+ llvm::APInt InitValue = llvm::APInt::getAllOnes(Size);
Init = IntegerLiteral::Create(Context, InitValue, IntTy, ELoc);
}
if (Init && OrigType->isAnyComplexType()) {
@@ -17139,8 +19850,8 @@ static bool actOnOMPReductionKindClause(
llvm::APInt InitValue =
(BOK != BO_LT) ? IsSigned ? llvm::APInt::getSignedMinValue(Size)
: llvm::APInt::getMinValue(Size)
- : IsSigned ? llvm::APInt::getSignedMaxValue(Size)
- : llvm::APInt::getMaxValue(Size);
+ : IsSigned ? llvm::APInt::getSignedMaxValue(Size)
+ : llvm::APInt::getMaxValue(Size);
Init = IntegerLiteral::Create(Context, InitValue, IntTy, ELoc);
if (Type->isPointerType()) {
// Cast to pointer type.
@@ -17300,9 +20011,9 @@ static bool actOnOMPReductionKindClause(
// Build temp array for prefix sum.
auto *Dim = new (S.Context)
OpaqueValueExpr(ELoc, S.Context.getSizeType(), VK_PRValue);
- QualType ArrayTy =
- S.Context.getVariableArrayType(PrivateTy, Dim, ArrayType::Normal,
- /*IndexTypeQuals=*/0, {ELoc, ELoc});
+ QualType ArrayTy = S.Context.getVariableArrayType(
+ PrivateTy, Dim, ArraySizeModifier::Normal,
+ /*IndexTypeQuals=*/0, {ELoc, ELoc});
VarDecl *TempArrayVD =
buildVarDecl(S, ELoc, ArrayTy, D->getName(),
D->hasAttrs() ? &D->getAttrs() : nullptr);
@@ -17513,7 +20224,7 @@ OMPClause *Sema::ActOnOpenMPInReductionClause(
bool Sema::CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
SourceLocation LinLoc) {
if ((!LangOpts.CPlusPlus && LinKind != OMPC_LINEAR_val) ||
- LinKind == OMPC_LINEAR_unknown) {
+ LinKind == OMPC_LINEAR_unknown || LinKind == OMPC_LINEAR_step) {
Diag(LinLoc, diag::err_omp_wrong_linear_modifier) << LangOpts.CPlusPlus;
return true;
}
@@ -17551,9 +20262,8 @@ bool Sema::CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
!Ty->isIntegralType(Context) && !Ty->isPointerType())) {
Diag(ELoc, diag::err_omp_linear_expected_int_or_ptr) << Type;
if (D) {
- bool IsDecl =
- !VD ||
- VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
+ bool IsDecl = !VD || VD->isThisDeclarationADefinition(Context) ==
+ VarDecl::DeclarationOnly;
Diag(D->getLocation(),
IsDecl ? diag::note_previous_decl : diag::note_defined_here)
<< D;
@@ -17566,12 +20276,19 @@ bool Sema::CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
OMPClause *Sema::ActOnOpenMPLinearClause(
ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc,
SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind,
- SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc) {
+ SourceLocation LinLoc, SourceLocation ColonLoc,
+ SourceLocation StepModifierLoc, SourceLocation EndLoc) {
SmallVector<Expr *, 8> Vars;
SmallVector<Expr *, 8> Privates;
SmallVector<Expr *, 8> Inits;
SmallVector<Decl *, 4> ExprCaptures;
SmallVector<Expr *, 4> ExprPostUpdates;
+ // OpenMP 5.2 [Section 5.4.6, linear clause]
+ // step-simple-modifier is exclusive, can't be used with 'val', 'uval', or
+ // 'ref'
+ if (LinLoc.isValid() && StepModifierLoc.isInvalid() && Step &&
+ getLangOpts().OpenMP >= 52)
+ Diag(Step->getBeginLoc(), diag::err_omp_step_simple_modifier_exclusive);
if (CheckOpenMPLinearModifier(LinKind, LinLoc))
LinKind = OMPC_LINEAR_val;
for (Expr *RefExpr : VarList) {
@@ -17678,7 +20395,7 @@ OMPClause *Sema::ActOnOpenMPLinearClause(
// Warn about zero linear step (it would be probably better specified as
// making corresponding variables 'const').
- if (Optional<llvm::APSInt> Result =
+ if (std::optional<llvm::APSInt> Result =
StepExpr->getIntegerConstantExpr(Context)) {
if (!Result->isNegative() && !Result->isStrictlyPositive())
Diag(StepLoc, diag::warn_omp_linear_step_zero)
@@ -17691,8 +20408,8 @@ OMPClause *Sema::ActOnOpenMPLinearClause(
}
return OMPLinearClause::Create(Context, StartLoc, LParenLoc, LinKind, LinLoc,
- ColonLoc, EndLoc, Vars, Privates, Inits,
- StepExpr, CalcStepExpr,
+ ColonLoc, StepModifierLoc, EndLoc, Vars,
+ Privates, Inits, StepExpr, CalcStepExpr,
buildPreInits(Context, ExprCaptures),
buildPostUpdate(*this, ExprPostUpdates));
}
@@ -17765,13 +20482,12 @@ static bool FinishOpenMPLinearClause(OMPLinearClause &Clause, DeclRefExpr *IV,
Update = SemaRef.ActOnFinishFullExpr(Update.get(), DE->getBeginLoc(),
/*DiscardedValue*/ false);
- // Build final: Var = InitExpr + NumIterations * Step
+ // Build final: Var = PrivCopy;
ExprResult Final;
if (!Info.first)
- Final =
- buildCounterUpdate(SemaRef, S, RefExpr->getExprLoc(), CapturedRef,
- InitExpr, NumIterations, Step, /*Subtract=*/false,
- /*IsNonRectangularLB=*/false);
+ Final = SemaRef.BuildBinOp(
+ S, RefExpr->getExprLoc(), BO_Assign, CapturedRef,
+ SemaRef.DefaultLvalueConversion(*CurPrivate).get());
else
Final = *CurPrivate;
Final = SemaRef.ActOnFinishFullExpr(Final.get(), DE->getBeginLoc(),
@@ -17830,9 +20546,8 @@ OMPClause *Sema::ActOnOpenMPAlignedClause(
if (!Ty || (!Ty->isArrayType() && !Ty->isPointerType())) {
Diag(ELoc, diag::err_omp_aligned_expected_array_or_ptr)
<< QType << getLangOpts().CPlusPlus << ERange;
- bool IsDecl =
- !VD ||
- VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
+ bool IsDecl = !VD || VD->isThisDeclarationADefinition(Context) ==
+ VarDecl::DeclarationOnly;
Diag(D->getLocation(),
IsDecl ? diag::note_previous_decl : diag::note_defined_here)
<< D;
@@ -18033,9 +20748,8 @@ OMPClause *Sema::ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
Diag(ELoc, diag::err_omp_variably_modified_type_not_supported)
<< getOpenMPClauseName(OMPC_copyprivate) << Type
<< getOpenMPDirectiveName(DSAStack->getCurrentDirective());
- bool IsDecl =
- !VD ||
- VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
+ bool IsDecl = !VD || VD->isThisDeclarationADefinition(Context) ==
+ VarDecl::DeclarationOnly;
Diag(D->getLocation(),
IsDecl ? diag::note_previous_decl : diag::note_defined_here)
<< D;
@@ -18138,62 +20852,35 @@ OMPClause *Sema::ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc,
return OMPDepobjClause::Create(Context, StartLoc, LParenLoc, EndLoc, Depobj);
}
-OMPClause *
-Sema::ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind,
- SourceLocation DepLoc, SourceLocation ColonLoc,
- ArrayRef<Expr *> VarList, SourceLocation StartLoc,
- SourceLocation LParenLoc, SourceLocation EndLoc) {
- if (DSAStack->getCurrentDirective() == OMPD_ordered &&
- DepKind != OMPC_DEPEND_source && DepKind != OMPC_DEPEND_sink) {
- Diag(DepLoc, diag::err_omp_unexpected_clause_value)
- << "'source' or 'sink'" << getOpenMPClauseName(OMPC_depend);
- return nullptr;
- }
- if ((DSAStack->getCurrentDirective() != OMPD_ordered ||
- DSAStack->getCurrentDirective() == OMPD_depobj) &&
- (DepKind == OMPC_DEPEND_unknown || DepKind == OMPC_DEPEND_source ||
- DepKind == OMPC_DEPEND_sink ||
- ((LangOpts.OpenMP < 50 ||
- DSAStack->getCurrentDirective() == OMPD_depobj) &&
- DepKind == OMPC_DEPEND_depobj))) {
- SmallVector<unsigned, 3> Except;
- Except.push_back(OMPC_DEPEND_source);
- Except.push_back(OMPC_DEPEND_sink);
- if (LangOpts.OpenMP < 50 || DSAStack->getCurrentDirective() == OMPD_depobj)
- Except.push_back(OMPC_DEPEND_depobj);
- std::string Expected = (LangOpts.OpenMP >= 50 && !DepModifier)
- ? "depend modifier(iterator) or "
- : "";
- Diag(DepLoc, diag::err_omp_unexpected_clause_value)
- << Expected + getListOfPossibleValues(OMPC_depend, /*First=*/0,
- /*Last=*/OMPC_DEPEND_unknown,
- Except)
- << getOpenMPClauseName(OMPC_depend);
- return nullptr;
- }
- if (DepModifier &&
- (DepKind == OMPC_DEPEND_source || DepKind == OMPC_DEPEND_sink)) {
- Diag(DepModifier->getExprLoc(),
- diag::err_omp_depend_sink_source_with_modifier);
- return nullptr;
- }
- if (DepModifier &&
- !DepModifier->getType()->isSpecificBuiltinType(BuiltinType::OMPIterator))
- Diag(DepModifier->getExprLoc(), diag::err_omp_depend_modifier_not_iterator);
+namespace {
+// Utility struct that gathers the related info for doacross clause.
+struct DoacrossDataInfoTy {
+ // The list of expressions.
+ SmallVector<Expr *, 8> Vars;
+ // The OperatorOffset for doacross loop.
+ DSAStackTy::OperatorOffsetTy OpsOffs;
+ // The depended loop count.
+ llvm::APSInt TotalDepCount;
+};
+} // namespace
+static DoacrossDataInfoTy
+ProcessOpenMPDoacrossClauseCommon(Sema &SemaRef, bool IsSource,
+ ArrayRef<Expr *> VarList, DSAStackTy *Stack,
+ SourceLocation EndLoc) {
SmallVector<Expr *, 8> Vars;
DSAStackTy::OperatorOffsetTy OpsOffs;
llvm::APSInt DepCounter(/*BitWidth=*/32);
llvm::APSInt TotalDepCount(/*BitWidth=*/32);
- if (DepKind == OMPC_DEPEND_sink || DepKind == OMPC_DEPEND_source) {
- if (const Expr *OrderedCountExpr =
- DSAStack->getParentOrderedRegionParam().first) {
- TotalDepCount = OrderedCountExpr->EvaluateKnownConstInt(Context);
- TotalDepCount.setIsUnsigned(/*Val=*/true);
- }
+
+ if (const Expr *OrderedCountExpr =
+ Stack->getParentOrderedRegionParam().first) {
+ TotalDepCount = OrderedCountExpr->EvaluateKnownConstInt(SemaRef.Context);
+ TotalDepCount.setIsUnsigned(/*Val=*/true);
}
+
for (Expr *RefExpr : VarList) {
- assert(RefExpr && "NULL expr in OpenMP shared clause.");
+ assert(RefExpr && "NULL expr in OpenMP doacross clause.");
if (isa<DependentScopeDeclRefExpr>(RefExpr)) {
// It will be analyzed later.
Vars.push_back(RefExpr);
@@ -18202,10 +20889,10 @@ Sema::ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind,
SourceLocation ELoc = RefExpr->getExprLoc();
Expr *SimpleExpr = RefExpr->IgnoreParenCasts();
- if (DepKind == OMPC_DEPEND_sink) {
- if (DSAStack->getParentOrderedRegionParam().first &&
+ if (!IsSource) {
+ if (Stack->getParentOrderedRegionParam().first &&
DepCounter >= TotalDepCount) {
- Diag(ELoc, diag::err_omp_depend_sink_unexpected_expr);
+ SemaRef.Diag(ELoc, diag::err_omp_depend_sink_unexpected_expr);
continue;
}
++DepCounter;
@@ -18217,7 +20904,7 @@ Sema::ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind,
// directive, xi denotes the loop iteration variable of the i-th nested
// loop associated with the loop directive, and di is a constant
// non-negative integer.
- if (CurContext->isDependentContext()) {
+ if (SemaRef.CurContext->isDependentContext()) {
// It will be analyzed later.
Vars.push_back(RefExpr);
continue;
@@ -18248,7 +20935,7 @@ Sema::ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind,
}
SourceLocation ELoc;
SourceRange ERange;
- auto Res = getPrivateItem(*this, LHS, ELoc, ERange);
+ auto Res = getPrivateItem(SemaRef, LHS, ELoc, ERange);
if (Res.second) {
// It will be analyzed later.
Vars.push_back(RefExpr);
@@ -18258,139 +20945,224 @@ Sema::ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind,
continue;
if (OOK != OO_Plus && OOK != OO_Minus && (RHS || OOK != OO_None)) {
- Diag(OOLoc, diag::err_omp_depend_sink_expected_plus_minus);
+ SemaRef.Diag(OOLoc, diag::err_omp_depend_sink_expected_plus_minus);
continue;
}
if (RHS) {
- ExprResult RHSRes = VerifyPositiveIntegerConstantInClause(
+ ExprResult RHSRes = SemaRef.VerifyPositiveIntegerConstantInClause(
RHS, OMPC_depend, /*StrictlyPositive=*/false);
if (RHSRes.isInvalid())
continue;
}
- if (!CurContext->isDependentContext() &&
- DSAStack->getParentOrderedRegionParam().first &&
- DepCounter != DSAStack->isParentLoopControlVariable(D).first) {
+ if (!SemaRef.CurContext->isDependentContext() &&
+ Stack->getParentOrderedRegionParam().first &&
+ DepCounter != Stack->isParentLoopControlVariable(D).first) {
const ValueDecl *VD =
- DSAStack->getParentLoopControlVariable(DepCounter.getZExtValue());
+ Stack->getParentLoopControlVariable(DepCounter.getZExtValue());
if (VD)
- Diag(ELoc, diag::err_omp_depend_sink_expected_loop_iteration)
+ SemaRef.Diag(ELoc, diag::err_omp_depend_sink_expected_loop_iteration)
<< 1 << VD;
else
- Diag(ELoc, diag::err_omp_depend_sink_expected_loop_iteration) << 0;
+ SemaRef.Diag(ELoc, diag::err_omp_depend_sink_expected_loop_iteration)
+ << 0;
continue;
}
OpsOffs.emplace_back(RHS, OOK);
- } else {
- bool OMPDependTFound = LangOpts.OpenMP >= 50;
- if (OMPDependTFound)
- OMPDependTFound = findOMPDependT(*this, StartLoc, DSAStack,
- DepKind == OMPC_DEPEND_depobj);
- if (DepKind == OMPC_DEPEND_depobj) {
- // OpenMP 5.0, 2.17.11 depend Clause, Restrictions, C/C++
- // List items used in depend clauses with the depobj dependence type
- // must be expressions of the omp_depend_t type.
- if (!RefExpr->isValueDependent() && !RefExpr->isTypeDependent() &&
- !RefExpr->isInstantiationDependent() &&
- !RefExpr->containsUnexpandedParameterPack() &&
- (OMPDependTFound &&
- !Context.hasSameUnqualifiedType(DSAStack->getOMPDependT(),
- RefExpr->getType()))) {
- Diag(ELoc, diag::err_omp_expected_omp_depend_t_lvalue)
- << 0 << RefExpr->getType() << RefExpr->getSourceRange();
- continue;
- }
- if (!RefExpr->isLValue()) {
- Diag(ELoc, diag::err_omp_expected_omp_depend_t_lvalue)
- << 1 << RefExpr->getType() << RefExpr->getSourceRange();
- continue;
- }
- } else {
- // OpenMP 5.0 [2.17.11, Restrictions]
- // List items used in depend clauses cannot be zero-length array
- // sections.
- QualType ExprTy = RefExpr->getType().getNonReferenceType();
- const auto *OASE = dyn_cast<OMPArraySectionExpr>(SimpleExpr);
- if (OASE) {
- QualType BaseType =
- OMPArraySectionExpr::getBaseOriginalType(OASE->getBase());
- if (const auto *ATy = BaseType->getAsArrayTypeUnsafe())
- ExprTy = ATy->getElementType();
- else
- ExprTy = BaseType->getPointeeType();
- ExprTy = ExprTy.getNonReferenceType();
- const Expr *Length = OASE->getLength();
- Expr::EvalResult Result;
- if (Length && !Length->isValueDependent() &&
- Length->EvaluateAsInt(Result, Context) &&
- Result.Val.getInt().isNullValue()) {
- Diag(ELoc,
- diag::err_omp_depend_zero_length_array_section_not_allowed)
- << SimpleExpr->getSourceRange();
+ }
+ Vars.push_back(RefExpr->IgnoreParenImpCasts());
+ }
+ if (!SemaRef.CurContext->isDependentContext() && !IsSource &&
+ TotalDepCount > VarList.size() &&
+ Stack->getParentOrderedRegionParam().first &&
+ Stack->getParentLoopControlVariable(VarList.size() + 1)) {
+ SemaRef.Diag(EndLoc, diag::err_omp_depend_sink_expected_loop_iteration)
+ << 1 << Stack->getParentLoopControlVariable(VarList.size() + 1);
+ }
+ return {Vars, OpsOffs, TotalDepCount};
+}
+
+OMPClause *
+Sema::ActOnOpenMPDependClause(const OMPDependClause::DependDataTy &Data,
+ Expr *DepModifier, ArrayRef<Expr *> VarList,
+ SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ OpenMPDependClauseKind DepKind = Data.DepKind;
+ SourceLocation DepLoc = Data.DepLoc;
+ if (DSAStack->getCurrentDirective() == OMPD_ordered &&
+ DepKind != OMPC_DEPEND_source && DepKind != OMPC_DEPEND_sink) {
+ Diag(DepLoc, diag::err_omp_unexpected_clause_value)
+ << "'source' or 'sink'" << getOpenMPClauseName(OMPC_depend);
+ return nullptr;
+ }
+ if (DSAStack->getCurrentDirective() == OMPD_taskwait &&
+ DepKind == OMPC_DEPEND_mutexinoutset) {
+ Diag(DepLoc, diag::err_omp_taskwait_depend_mutexinoutset_not_allowed);
+ return nullptr;
+ }
+ if ((DSAStack->getCurrentDirective() != OMPD_ordered ||
+ DSAStack->getCurrentDirective() == OMPD_depobj) &&
+ (DepKind == OMPC_DEPEND_unknown || DepKind == OMPC_DEPEND_source ||
+ DepKind == OMPC_DEPEND_sink ||
+ ((LangOpts.OpenMP < 50 ||
+ DSAStack->getCurrentDirective() == OMPD_depobj) &&
+ DepKind == OMPC_DEPEND_depobj))) {
+ SmallVector<unsigned, 6> Except = {OMPC_DEPEND_source, OMPC_DEPEND_sink,
+ OMPC_DEPEND_outallmemory,
+ OMPC_DEPEND_inoutallmemory};
+ if (LangOpts.OpenMP < 50 || DSAStack->getCurrentDirective() == OMPD_depobj)
+ Except.push_back(OMPC_DEPEND_depobj);
+ if (LangOpts.OpenMP < 51)
+ Except.push_back(OMPC_DEPEND_inoutset);
+ std::string Expected = (LangOpts.OpenMP >= 50 && !DepModifier)
+ ? "depend modifier(iterator) or "
+ : "";
+ Diag(DepLoc, diag::err_omp_unexpected_clause_value)
+ << Expected + getListOfPossibleValues(OMPC_depend, /*First=*/0,
+ /*Last=*/OMPC_DEPEND_unknown,
+ Except)
+ << getOpenMPClauseName(OMPC_depend);
+ return nullptr;
+ }
+ if (DepModifier &&
+ (DepKind == OMPC_DEPEND_source || DepKind == OMPC_DEPEND_sink)) {
+ Diag(DepModifier->getExprLoc(),
+ diag::err_omp_depend_sink_source_with_modifier);
+ return nullptr;
+ }
+ if (DepModifier &&
+ !DepModifier->getType()->isSpecificBuiltinType(BuiltinType::OMPIterator))
+ Diag(DepModifier->getExprLoc(), diag::err_omp_depend_modifier_not_iterator);
+
+ SmallVector<Expr *, 8> Vars;
+ DSAStackTy::OperatorOffsetTy OpsOffs;
+ llvm::APSInt TotalDepCount(/*BitWidth=*/32);
+
+ if (DepKind == OMPC_DEPEND_sink || DepKind == OMPC_DEPEND_source) {
+ DoacrossDataInfoTy VarOffset = ProcessOpenMPDoacrossClauseCommon(
+ *this, DepKind == OMPC_DEPEND_source, VarList, DSAStack, EndLoc);
+ Vars = VarOffset.Vars;
+ OpsOffs = VarOffset.OpsOffs;
+ TotalDepCount = VarOffset.TotalDepCount;
+ } else {
+ for (Expr *RefExpr : VarList) {
+ assert(RefExpr && "NULL expr in OpenMP shared clause.");
+ if (isa<DependentScopeDeclRefExpr>(RefExpr)) {
+ // It will be analyzed later.
+ Vars.push_back(RefExpr);
+ continue;
+ }
+
+ SourceLocation ELoc = RefExpr->getExprLoc();
+ Expr *SimpleExpr = RefExpr->IgnoreParenCasts();
+ if (DepKind != OMPC_DEPEND_sink && DepKind != OMPC_DEPEND_source) {
+ bool OMPDependTFound = LangOpts.OpenMP >= 50;
+ if (OMPDependTFound)
+ OMPDependTFound = findOMPDependT(*this, StartLoc, DSAStack,
+ DepKind == OMPC_DEPEND_depobj);
+ if (DepKind == OMPC_DEPEND_depobj) {
+ // OpenMP 5.0, 2.17.11 depend Clause, Restrictions, C/C++
+ // List items used in depend clauses with the depobj dependence type
+ // must be expressions of the omp_depend_t type.
+ if (!RefExpr->isValueDependent() && !RefExpr->isTypeDependent() &&
+ !RefExpr->isInstantiationDependent() &&
+ !RefExpr->containsUnexpandedParameterPack() &&
+ (OMPDependTFound &&
+ !Context.hasSameUnqualifiedType(DSAStack->getOMPDependT(),
+ RefExpr->getType()))) {
+ Diag(ELoc, diag::err_omp_expected_omp_depend_t_lvalue)
+ << 0 << RefExpr->getType() << RefExpr->getSourceRange();
continue;
}
- }
+ if (!RefExpr->isLValue()) {
+ Diag(ELoc, diag::err_omp_expected_omp_depend_t_lvalue)
+ << 1 << RefExpr->getType() << RefExpr->getSourceRange();
+ continue;
+ }
+ } else {
+ // OpenMP 5.0 [2.17.11, Restrictions]
+ // List items used in depend clauses cannot be zero-length array
+ // sections.
+ QualType ExprTy = RefExpr->getType().getNonReferenceType();
+ const auto *OASE = dyn_cast<OMPArraySectionExpr>(SimpleExpr);
+ if (OASE) {
+ QualType BaseType =
+ OMPArraySectionExpr::getBaseOriginalType(OASE->getBase());
+ if (BaseType.isNull())
+ return nullptr;
+ if (const auto *ATy = BaseType->getAsArrayTypeUnsafe())
+ ExprTy = ATy->getElementType();
+ else
+ ExprTy = BaseType->getPointeeType();
+ ExprTy = ExprTy.getNonReferenceType();
+ const Expr *Length = OASE->getLength();
+ Expr::EvalResult Result;
+ if (Length && !Length->isValueDependent() &&
+ Length->EvaluateAsInt(Result, Context) &&
+ Result.Val.getInt().isZero()) {
+ Diag(ELoc,
+ diag::err_omp_depend_zero_length_array_section_not_allowed)
+ << SimpleExpr->getSourceRange();
+ continue;
+ }
+ }
- // OpenMP 5.0, 2.17.11 depend Clause, Restrictions, C/C++
- // List items used in depend clauses with the in, out, inout or
- // mutexinoutset dependence types cannot be expressions of the
- // omp_depend_t type.
- if (!RefExpr->isValueDependent() && !RefExpr->isTypeDependent() &&
- !RefExpr->isInstantiationDependent() &&
- !RefExpr->containsUnexpandedParameterPack() &&
- (OMPDependTFound &&
- DSAStack->getOMPDependT().getTypePtr() == ExprTy.getTypePtr())) {
- Diag(ELoc, diag::err_omp_expected_addressable_lvalue_or_array_item)
- << (LangOpts.OpenMP >= 50 ? 1 : 0) << 1
- << RefExpr->getSourceRange();
- continue;
- }
+ // OpenMP 5.0, 2.17.11 depend Clause, Restrictions, C/C++
+ // List items used in depend clauses with the in, out, inout,
+ // inoutset, or mutexinoutset dependence types cannot be
+ // expressions of the omp_depend_t type.
+ if (!RefExpr->isValueDependent() && !RefExpr->isTypeDependent() &&
+ !RefExpr->isInstantiationDependent() &&
+ !RefExpr->containsUnexpandedParameterPack() &&
+ (!RefExpr->IgnoreParenImpCasts()->isLValue() ||
+ (OMPDependTFound && DSAStack->getOMPDependT().getTypePtr() ==
+ ExprTy.getTypePtr()))) {
+ Diag(ELoc, diag::err_omp_expected_addressable_lvalue_or_array_item)
+ << (LangOpts.OpenMP >= 50 ? 1 : 0)
+ << (LangOpts.OpenMP >= 50 ? 1 : 0) << RefExpr->getSourceRange();
+ continue;
+ }
- auto *ASE = dyn_cast<ArraySubscriptExpr>(SimpleExpr);
- if (!RefExpr->IgnoreParenImpCasts()->isLValue() ||
- (ASE && !ASE->getBase()->isTypeDependent() &&
- !ASE->getBase()
- ->getType()
- .getNonReferenceType()
- ->isPointerType() &&
- !ASE->getBase()->getType().getNonReferenceType()->isArrayType())) {
- Diag(ELoc, diag::err_omp_expected_addressable_lvalue_or_array_item)
- << (LangOpts.OpenMP >= 50 ? 1 : 0)
- << (LangOpts.OpenMP >= 50 ? 1 : 0) << RefExpr->getSourceRange();
- continue;
- }
+ auto *ASE = dyn_cast<ArraySubscriptExpr>(SimpleExpr);
+ if (ASE && !ASE->getBase()->isTypeDependent() &&
+ !ASE->getBase()
+ ->getType()
+ .getNonReferenceType()
+ ->isPointerType() &&
+ !ASE->getBase()->getType().getNonReferenceType()->isArrayType()) {
+ Diag(ELoc, diag::err_omp_expected_addressable_lvalue_or_array_item)
+ << (LangOpts.OpenMP >= 50 ? 1 : 0)
+ << (LangOpts.OpenMP >= 50 ? 1 : 0) << RefExpr->getSourceRange();
+ continue;
+ }
- ExprResult Res;
- {
- Sema::TentativeAnalysisScope Trap(*this);
- Res = CreateBuiltinUnaryOp(ELoc, UO_AddrOf,
- RefExpr->IgnoreParenImpCasts());
- }
- if (!Res.isUsable() && !isa<OMPArraySectionExpr>(SimpleExpr) &&
- !isa<OMPArrayShapingExpr>(SimpleExpr)) {
- Diag(ELoc, diag::err_omp_expected_addressable_lvalue_or_array_item)
- << (LangOpts.OpenMP >= 50 ? 1 : 0)
- << (LangOpts.OpenMP >= 50 ? 1 : 0) << RefExpr->getSourceRange();
- continue;
+ ExprResult Res;
+ {
+ Sema::TentativeAnalysisScope Trap(*this);
+ Res = CreateBuiltinUnaryOp(ELoc, UO_AddrOf,
+ RefExpr->IgnoreParenImpCasts());
+ }
+ if (!Res.isUsable() && !isa<OMPArraySectionExpr>(SimpleExpr) &&
+ !isa<OMPArrayShapingExpr>(SimpleExpr)) {
+ Diag(ELoc, diag::err_omp_expected_addressable_lvalue_or_array_item)
+ << (LangOpts.OpenMP >= 50 ? 1 : 0)
+ << (LangOpts.OpenMP >= 50 ? 1 : 0) << RefExpr->getSourceRange();
+ continue;
+ }
}
}
+ Vars.push_back(RefExpr->IgnoreParenImpCasts());
}
- Vars.push_back(RefExpr->IgnoreParenImpCasts());
}
- if (!CurContext->isDependentContext() && DepKind == OMPC_DEPEND_sink &&
- TotalDepCount > VarList.size() &&
- DSAStack->getParentOrderedRegionParam().first &&
- DSAStack->getParentLoopControlVariable(VarList.size() + 1)) {
- Diag(EndLoc, diag::err_omp_depend_sink_expected_loop_iteration)
- << 1 << DSAStack->getParentLoopControlVariable(VarList.size() + 1);
- }
if (DepKind != OMPC_DEPEND_source && DepKind != OMPC_DEPEND_sink &&
- Vars.empty())
+ DepKind != OMPC_DEPEND_outallmemory &&
+ DepKind != OMPC_DEPEND_inoutallmemory && Vars.empty())
return nullptr;
- auto *C = OMPDependClause::Create(Context, StartLoc, LParenLoc, EndLoc,
- DepModifier, DepKind, DepLoc, ColonLoc,
- Vars, TotalDepCount.getZExtValue());
+ auto *C = OMPDependClause::Create(
+ Context, StartLoc, LParenLoc, EndLoc,
+ {DepKind, DepLoc, Data.ColonLoc, Data.OmpAllMemoryLoc}, DepModifier, Vars,
+ TotalDepCount.getZExtValue());
if ((DepKind == OMPC_DEPEND_sink || DepKind == OMPC_DEPEND_source) &&
DSAStack->isParentOrderedRegion())
DSAStack->addDoacrossDependClause(C, OpsOffs);
@@ -18425,6 +21197,18 @@ OMPClause *Sema::ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier,
if (ErrorFound)
return nullptr;
+ // OpenMP 5.0 [2.12.5, Restrictions]
+ // In case of ancestor device-modifier, a requires directive with
+ // the reverse_offload clause must be specified.
+ if (Modifier == OMPC_DEVICE_ancestor) {
+ if (!DSAStack->hasRequiresDeclWithClause<OMPReverseOffloadClause>()) {
+ targetDiag(
+ StartLoc,
+ diag::err_omp_device_ancestor_without_requires_reverse_offload);
+ ErrorFound = true;
+ }
+ }
+
OpenMPDirectiveKind DKind = DSAStack->getCurrentDirective();
OpenMPDirectiveKind CaptureRegion =
getOpenMPCaptureRegionForClause(DKind, OMPC_device, LangOpts.OpenMP);
@@ -18443,11 +21227,8 @@ OMPClause *Sema::ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier,
static bool checkTypeMappable(SourceLocation SL, SourceRange SR, Sema &SemaRef,
DSAStackTy *Stack, QualType QTy,
bool FullCheck = true) {
- NamedDecl *ND;
- if (QTy->isIncompleteType(&ND)) {
- SemaRef.Diag(SL, diag::err_incomplete_type) << QTy << SR;
+ if (SemaRef.RequireCompleteType(SL, QTy, diag::err_incomplete_type))
return false;
- }
if (FullCheck && !SemaRef.CurContext->isDependentContext() &&
!QTy.isTriviallyCopyableType(SemaRef.Context))
SemaRef.Diag(SL, diag::warn_omp_non_trivial_type_mapped) << QTy << SR;
@@ -18638,7 +21419,7 @@ public:
if (!isa<FieldDecl>(ME->getMemberDecl())) {
if (!NoDiagnose) {
SemaRef.Diag(ELoc, diag::err_omp_expected_access_to_data_field)
- << ME->getSourceRange();
+ << ME->getSourceRange();
return false;
}
if (RelevantExpr)
@@ -18654,7 +21435,7 @@ public:
if (FD->isBitField()) {
if (!NoDiagnose) {
SemaRef.Diag(ELoc, diag::err_omp_bit_fields_forbidden_in_clause)
- << ME->getSourceRange() << getOpenMPClauseName(CKind);
+ << ME->getSourceRange() << getOpenMPClauseName(CKind);
return false;
}
if (RelevantExpr)
@@ -18674,7 +21455,7 @@ public:
if (CurType->isUnionType()) {
if (!NoDiagnose) {
SemaRef.Diag(ELoc, diag::err_omp_union_type_not_allowed)
- << ME->getSourceRange();
+ << ME->getSourceRange();
return false;
}
return RelevantExpr || Visit(E);
@@ -18701,7 +21482,7 @@ public:
if (!E->getType()->isAnyPointerType() && !E->getType()->isArrayType()) {
if (!NoDiagnose) {
SemaRef.Diag(ELoc, diag::err_omp_expected_base_var_name)
- << 0 << AE->getSourceRange();
+ << 0 << AE->getSourceRange();
return false;
}
return RelevantExpr || Visit(E);
@@ -18710,15 +21491,14 @@ public:
// If we got an array subscript that express the whole dimension we
// can have any array expressions before. If it only expressing part of
// the dimension, we can only have unitary-size array expressions.
- if (checkArrayExpressionDoesNotReferToWholeSize(SemaRef, AE,
- E->getType()))
+ if (checkArrayExpressionDoesNotReferToWholeSize(SemaRef, AE, E->getType()))
AllowWholeSizeArraySection = false;
if (const auto *TE = dyn_cast<CXXThisExpr>(E->IgnoreParenCasts())) {
Expr::EvalResult Result;
if (!AE->getIdx()->isValueDependent() &&
AE->getIdx()->EvaluateAsInt(Result, SemaRef.getASTContext()) &&
- !Result.Val.getInt().isNullValue()) {
+ !Result.Val.getInt().isZero()) {
SemaRef.Diag(AE->getIdx()->getExprLoc(),
diag::err_omp_invalid_map_this_expr);
SemaRef.Diag(AE->getIdx()->getExprLoc(),
@@ -18735,10 +21515,13 @@ public:
}
bool VisitOMPArraySectionExpr(OMPArraySectionExpr *OASE) {
- assert(!NoDiagnose && "Array sections cannot be implicitly mapped.");
+ // After OMP 5.0 Array section in reduction clause will be implicitly
+ // mapped
+ assert(!(SemaRef.getLangOpts().OpenMP < 50 && NoDiagnose) &&
+ "Array sections cannot be implicitly mapped.");
Expr *E = OASE->getBase()->IgnoreParenImpCasts();
QualType CurType =
- OMPArraySectionExpr::getBaseOriginalType(E).getCanonicalType();
+ OMPArraySectionExpr::getBaseOriginalType(E).getCanonicalType();
// OpenMP 4.5 [2.15.5.1, map Clause, Restrictions, C++, p.1]
// If the type of a list item is a reference to a type T then the type
@@ -18750,14 +21533,14 @@ public:
if (!IsPointer && !CurType->isArrayType()) {
SemaRef.Diag(ELoc, diag::err_omp_expected_base_var_name)
- << 0 << OASE->getSourceRange();
+ << 0 << OASE->getSourceRange();
return false;
}
bool NotWhole =
- checkArrayExpressionDoesNotReferToWholeSize(SemaRef, OASE, CurType);
+ checkArrayExpressionDoesNotReferToWholeSize(SemaRef, OASE, CurType);
bool NotUnity =
- checkArrayExpressionDoesNotReferToUnitySize(SemaRef, OASE, CurType);
+ checkArrayExpressionDoesNotReferToUnitySize(SemaRef, OASE, CurType);
if (AllowWholeSizeArraySection) {
// Any array section is currently allowed. Allowing a whole size array
@@ -18778,9 +21561,11 @@ public:
} else if (AllowUnitySizeArraySection && NotUnity) {
// A unity or whole array section is not allowed and that is not
// compatible with the properties of the current array section.
- SemaRef.Diag(
- ELoc, diag::err_array_section_does_not_specify_contiguous_storage)
- << OASE->getSourceRange();
+ if (NoDiagnose)
+ return false;
+ SemaRef.Diag(ELoc,
+ diag::err_array_section_does_not_specify_contiguous_storage)
+ << OASE->getSourceRange();
return false;
}
@@ -18792,7 +21577,7 @@ public:
Expr::EvalResult ResultL;
if (!OASE->getLength()->isValueDependent() &&
OASE->getLength()->EvaluateAsInt(ResultR, SemaRef.getASTContext()) &&
- !ResultR.Val.getInt().isOneValue()) {
+ !ResultR.Val.getInt().isOne()) {
SemaRef.Diag(OASE->getLength()->getExprLoc(),
diag::err_omp_invalid_map_this_expr);
SemaRef.Diag(OASE->getLength()->getExprLoc(),
@@ -18801,7 +21586,7 @@ public:
if (OASE->getLowerBound() && !OASE->getLowerBound()->isValueDependent() &&
OASE->getLowerBound()->EvaluateAsInt(ResultL,
SemaRef.getASTContext()) &&
- !ResultL.Val.getInt().isNullValue()) {
+ !ResultL.Val.getInt().isZero()) {
SemaRef.Diag(OASE->getLowerBound()->getExprLoc(),
diag::err_omp_invalid_map_this_expr);
SemaRef.Diag(OASE->getLowerBound()->getExprLoc(),
@@ -18843,8 +21628,8 @@ public:
}
// Pointer arithmetic is the only thing we expect to happen here so after we
- // make sure the binary operator is a pointer type, the we only thing need
- // to to is to visit the subtree that has the same type as root (so that we
+ // make sure the binary operator is a pointer type, the only thing we need
+ // to do is to visit the subtree that has the same type as root (so that we
// know the other subtree is just an offset)
Expr *LE = BO->getLHS()->IgnoreParenImpCasts();
Expr *RE = BO->getRHS()->IgnoreParenImpCasts();
@@ -18879,9 +21664,7 @@ public:
emitErrorMsg();
return false;
}
- const Expr *getFoundBase() const {
- return RelevantExpr;
- }
+ const Expr *getFoundBase() const { return RelevantExpr; }
explicit MapBaseChecker(
Sema &SemaRef, OpenMPClauseKind CKind, OpenMPDirectiveKind DKind,
OMPClauseMappableExprCommon::MappableExprComponentList &Components,
@@ -18892,9 +21675,9 @@ public:
} // namespace
/// Return the expression of the base of the mappable expression or null if it
-/// cannot be determined and do all the necessary checks to see if the expression
-/// is valid as a standalone mappable expression. In the process, record all the
-/// components of the expression.
+/// cannot be determined and do all the necessary checks to see if the
+/// expression is valid as a standalone mappable expression. In the process,
+/// record all the components of the expression.
static const Expr *checkMapClauseExpressionBase(
Sema &SemaRef, Expr *E,
OMPClauseMappableExprCommon::MappableExprComponentList &CurComponents,
@@ -19084,9 +21867,9 @@ static bool checkMapConflicts(
return true;
}
if (CI->getAssociatedExpression()->getStmtClass() !=
- SI->getAssociatedExpression()->getStmtClass() ||
- CI->getAssociatedDeclaration()->getCanonicalDecl() ==
- SI->getAssociatedDeclaration()->getCanonicalDecl()) {
+ SI->getAssociatedExpression()->getStmtClass() ||
+ CI->getAssociatedDeclaration()->getCanonicalDecl() ==
+ SI->getAssociatedDeclaration()->getCanonicalDecl()) {
assert(CI != CE && SI != SE);
SemaRef.Diag(DerivedLoc, diag::err_omp_same_pointer_dereferenced)
<< DerivedLoc;
@@ -19307,7 +22090,7 @@ struct MappableVarListInfo {
VarBaseDeclarations.reserve(VarList.size());
}
};
-}
+} // namespace
// Check the validity of the provided variable list for the provided clause kind
// \a CKind. In the check process the valid expressions, mappable expression
@@ -19321,7 +22104,8 @@ static void checkMappableExpressionList(
CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo MapperId,
ArrayRef<Expr *> UnresolvedMappers,
OpenMPMapClauseKind MapType = OMPC_MAP_unknown,
- bool IsMapTypeImplicit = false) {
+ ArrayRef<OpenMPMapModifierKind> Modifiers = std::nullopt,
+ bool IsMapTypeImplicit = false, bool NoDiagnose = false) {
// We only expect mappable expressions in 'to', 'from', and 'map' clauses.
assert((CKind == OMPC_map || CKind == OMPC_to || CKind == OMPC_from) &&
"Unexpected clause kind with mappable expressions!");
@@ -19342,6 +22126,9 @@ static void checkMappableExpressionList(
bool UpdateUMIt = false;
Expr *UnresolvedMapper = nullptr;
+ bool HasHoldModifier =
+ llvm::is_contained(Modifiers, OMPC_MAP_MODIFIER_ompx_hold);
+
// Keep track of the mappable components and base declarations in this clause.
// Each entry in the list is going to have a list of components associated. We
// record each set of the components so that we can build the clause later on.
@@ -19400,9 +22187,9 @@ static void checkMappableExpressionList(
// Obtain the array or member expression bases if required. Also, fill the
// components array with all the components identified in the process.
- const Expr *BE = checkMapClauseExpressionBase(
- SemaRef, SimpleExpr, CurComponents, CKind, DSAS->getCurrentDirective(),
- /*NoDiagnose=*/false);
+ const Expr *BE =
+ checkMapClauseExpressionBase(SemaRef, SimpleExpr, CurComponents, CKind,
+ DSAS->getCurrentDirective(), NoDiagnose);
if (!BE)
continue;
@@ -19448,6 +22235,8 @@ static void checkMappableExpressionList(
// OpenMP 4.5 [2.10.5, target update Construct]
// threadprivate variables cannot appear in a from clause.
if (VD && DSAS->isThreadPrivate(VD)) {
+ if (NoDiagnose)
+ continue;
DSAStackTy::DSAVarData DVar = DSAS->getTopDSA(VD, /*FromParent=*/false);
SemaRef.Diag(ELoc, diag::err_omp_threadprivate_in_clause)
<< getOpenMPClauseName(CKind);
@@ -19508,17 +22297,19 @@ static void checkMappableExpressionList(
// OpenMP 4.5 [2.15.5.1, map Clause, Restrictions, p.9]
// A list item must have a mappable type.
if (!checkTypeMappable(VE->getExprLoc(), VE->getSourceRange(), SemaRef,
- DSAS, Type))
+ DSAS, Type, /*FullCheck=*/true))
continue;
if (CKind == OMPC_map) {
// target enter data
// OpenMP [2.10.2, Restrictions, p. 99]
// A map-type must be specified in all map clauses and must be either
- // to or alloc.
+ // to or alloc. Starting with OpenMP 5.2 the default map type is `to` if
+ // no map type is present.
OpenMPDirectiveKind DKind = DSAS->getCurrentDirective();
if (DKind == OMPD_target_enter_data &&
- !(MapType == OMPC_MAP_to || MapType == OMPC_MAP_alloc)) {
+ !(MapType == OMPC_MAP_to || MapType == OMPC_MAP_alloc ||
+ SemaRef.getLangOpts().OpenMP >= 52)) {
SemaRef.Diag(StartLoc, diag::err_omp_invalid_map_type_for_directive)
<< (IsMapTypeImplicit ? 1 : 0)
<< getOpenMPSimpleClauseTypeName(OMPC_map, MapType)
@@ -19529,10 +22320,11 @@ static void checkMappableExpressionList(
// target exit_data
// OpenMP [2.10.3, Restrictions, p. 102]
// A map-type must be specified in all map clauses and must be either
- // from, release, or delete.
+ // from, release, or delete. Starting with OpenMP 5.2 the default map
+ // type is `from` if no map type is present.
if (DKind == OMPD_target_exit_data &&
!(MapType == OMPC_MAP_from || MapType == OMPC_MAP_release ||
- MapType == OMPC_MAP_delete)) {
+ MapType == OMPC_MAP_delete || SemaRef.getLangOpts().OpenMP >= 52)) {
SemaRef.Diag(StartLoc, diag::err_omp_invalid_map_type_for_directive)
<< (IsMapTypeImplicit ? 1 : 0)
<< getOpenMPSimpleClauseTypeName(OMPC_map, MapType)
@@ -19540,6 +22332,21 @@ static void checkMappableExpressionList(
continue;
}
+ // The 'ompx_hold' modifier is specifically intended to be used on a
+ // 'target' or 'target data' directive to prevent data from being unmapped
+ // during the associated statement. It is not permitted on a 'target
+ // enter data' or 'target exit data' directive, which have no associated
+ // statement.
+ if ((DKind == OMPD_target_enter_data || DKind == OMPD_target_exit_data) &&
+ HasHoldModifier) {
+ SemaRef.Diag(StartLoc,
+ diag::err_omp_invalid_map_type_modifier_for_directive)
+ << getOpenMPSimpleClauseTypeName(OMPC_map,
+ OMPC_MAP_MODIFIER_ompx_hold)
+ << getOpenMPDirectiveName(DKind);
+ continue;
+ }
+
// target, target data
// OpenMP 5.0 [2.12.2, Restrictions, p. 163]
// OpenMP 5.0 [2.12.5, Restrictions, p. 174]
@@ -19595,7 +22402,7 @@ static void checkMappableExpressionList(
/*WhereFoundClauseKind=*/OMPC_map);
// Save the components and declaration to create the clause. For purposes of
- // the clause creation, any component list that has has base 'this' uses
+ // the clause creation, any component list that has base 'this' uses
// null as base declaration.
MVLI.VarComponents.resize(MVLI.VarComponents.size() + 1);
MVLI.VarComponents.back().append(CurComponents.begin(),
@@ -19606,22 +22413,29 @@ static void checkMappableExpressionList(
}
OMPClause *Sema::ActOnOpenMPMapClause(
- ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
+ Expr *IteratorModifier, ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc,
SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
- const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers) {
+ const OMPVarListLocTy &Locs, bool NoDiagnose,
+ ArrayRef<Expr *> UnresolvedMappers) {
OpenMPMapModifierKind Modifiers[] = {
OMPC_MAP_MODIFIER_unknown, OMPC_MAP_MODIFIER_unknown,
+ OMPC_MAP_MODIFIER_unknown, OMPC_MAP_MODIFIER_unknown,
OMPC_MAP_MODIFIER_unknown, OMPC_MAP_MODIFIER_unknown};
SourceLocation ModifiersLoc[NumberOfOMPMapClauseModifiers];
+ if (IteratorModifier && !IteratorModifier->getType()->isSpecificBuiltinType(
+ BuiltinType::OMPIterator))
+ Diag(IteratorModifier->getExprLoc(),
+ diag::err_omp_map_modifier_not_iterator);
+
// Process map-type-modifiers, flag errors for duplicate modifiers.
unsigned Count = 0;
for (unsigned I = 0, E = MapTypeModifiers.size(); I < E; ++I) {
if (MapTypeModifiers[I] != OMPC_MAP_MODIFIER_unknown &&
- llvm::find(Modifiers, MapTypeModifiers[I]) != std::end(Modifiers)) {
+ llvm::is_contained(Modifiers, MapTypeModifiers[I])) {
Diag(MapTypeModifiersLoc[I], diag::err_omp_duplicate_map_type_modifier);
continue;
}
@@ -19635,15 +22449,16 @@ OMPClause *Sema::ActOnOpenMPMapClause(
MappableVarListInfo MVLI(VarList);
checkMappableExpressionList(*this, DSAStack, OMPC_map, MVLI, Locs.StartLoc,
MapperIdScopeSpec, MapperId, UnresolvedMappers,
- MapType, IsMapTypeImplicit);
+ MapType, Modifiers, IsMapTypeImplicit,
+ NoDiagnose);
// We need to produce a map clause even if we don't have variables so that
// other diagnostics related with non-existing map clauses are accurate.
- return OMPMapClause::Create(Context, Locs, MVLI.ProcessedVarList,
- MVLI.VarBaseDeclarations, MVLI.VarComponents,
- MVLI.UDMapperList, Modifiers, ModifiersLoc,
- MapperIdScopeSpec.getWithLocInContext(Context),
- MapperId, MapType, IsMapTypeImplicit, MapLoc);
+ return OMPMapClause::Create(
+ Context, Locs, MVLI.ProcessedVarList, MVLI.VarBaseDeclarations,
+ MVLI.VarComponents, MVLI.UDMapperList, IteratorModifier, Modifiers,
+ ModifiersLoc, MapperIdScopeSpec.getWithLocInContext(Context), MapperId,
+ MapType, IsMapTypeImplicit, MapLoc);
}
QualType Sema::ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
@@ -19878,12 +22693,12 @@ void Sema::ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer,
PopFunctionScopeInfo();
if (Initializer != nullptr) {
- DRD->setInitializer(Initializer, OMPDeclareReductionDecl::CallInit);
+ DRD->setInitializer(Initializer, OMPDeclareReductionInitKind::Call);
} else if (OmpPrivParm->hasInit()) {
DRD->setInitializer(OmpPrivParm->getInit(),
OmpPrivParm->isDirectInit()
- ? OMPDeclareReductionDecl::DirectInit
- : OMPDeclareReductionDecl::CopyInit);
+ ? OMPDeclareReductionInitKind::Direct
+ : OMPDeclareReductionInitKind::Copy);
} else {
DRD->setInvalidDecl();
}
@@ -19904,7 +22719,7 @@ Sema::DeclGroupPtrTy Sema::ActOnOpenMPDeclareReductionDirectiveEnd(
}
TypeResult Sema::ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D) {
- TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D);
QualType T = TInfo->getType();
if (D.isInvalidType())
return true;
@@ -20037,6 +22852,11 @@ Sema::ActOnOpenMPDeclareMapperDirectiveVarDecl(Scope *S, QualType MapperType,
return E;
}
+void Sema::ActOnOpenMPIteratorVarDecl(VarDecl *VD) {
+ if (DSAStack->getDeclareMapperVarRef())
+ DSAStack->addIteratorVarDecl(VD);
+}
+
bool Sema::isOpenMPDeclareMapperVarDeclAllowed(const VarDecl *VD) const {
assert(LangOpts.OpenMP && "Expected OpenMP mode.");
const Expr *Ref = DSAStack->getDeclareMapperVarRef();
@@ -20045,6 +22865,8 @@ bool Sema::isOpenMPDeclareMapperVarDeclAllowed(const VarDecl *VD) const {
return true;
if (VD->isUsableInConstantExpressions(Context))
return true;
+ if (LangOpts.OpenMP >= 52 && DSAStack->isIteratorVarDecl(VD))
+ return true;
return false;
}
return true;
@@ -20129,10 +22951,21 @@ OMPClause *Sema::ActOnOpenMPPriorityClause(Expr *Priority,
StartLoc, LParenLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPGrainsizeClause(Expr *Grainsize,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc) {
+OMPClause *Sema::ActOnOpenMPGrainsizeClause(
+ OpenMPGrainsizeClauseModifier Modifier, Expr *Grainsize,
+ SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation ModifierLoc, SourceLocation EndLoc) {
+ assert((ModifierLoc.isInvalid() || LangOpts.OpenMP >= 51) &&
+ "Unexpected grainsize modifier in OpenMP < 51.");
+
+ if (ModifierLoc.isValid() && Modifier == OMPC_GRAINSIZE_unknown) {
+ std::string Values = getListOfPossibleValues(OMPC_grainsize, /*First=*/0,
+ OMPC_GRAINSIZE_unknown);
+ Diag(ModifierLoc, diag::err_omp_unexpected_clause_value)
+ << Values << getOpenMPClauseName(OMPC_grainsize);
+ return nullptr;
+ }
+
Expr *ValExpr = Grainsize;
Stmt *HelperValStmt = nullptr;
OpenMPDirectiveKind CaptureRegion = OMPD_unknown;
@@ -20140,20 +22973,33 @@ OMPClause *Sema::ActOnOpenMPGrainsizeClause(Expr *Grainsize,
// OpenMP [2.9.2, taskloop Constrcut]
// The parameter of the grainsize clause must be a positive integer
// expression.
- if (!isNonNegativeIntegerValue(
- ValExpr, *this, OMPC_grainsize,
- /*StrictlyPositive=*/true, /*BuildCapture=*/true,
- DSAStack->getCurrentDirective(), &CaptureRegion, &HelperValStmt))
+ if (!isNonNegativeIntegerValue(ValExpr, *this, OMPC_grainsize,
+ /*StrictlyPositive=*/true,
+ /*BuildCapture=*/true,
+ DSAStack->getCurrentDirective(),
+ &CaptureRegion, &HelperValStmt))
return nullptr;
- return new (Context) OMPGrainsizeClause(ValExpr, HelperValStmt, CaptureRegion,
- StartLoc, LParenLoc, EndLoc);
+ return new (Context)
+ OMPGrainsizeClause(Modifier, ValExpr, HelperValStmt, CaptureRegion,
+ StartLoc, LParenLoc, ModifierLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPNumTasksClause(Expr *NumTasks,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc) {
+OMPClause *Sema::ActOnOpenMPNumTasksClause(
+ OpenMPNumTasksClauseModifier Modifier, Expr *NumTasks,
+ SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation ModifierLoc, SourceLocation EndLoc) {
+ assert((ModifierLoc.isInvalid() || LangOpts.OpenMP >= 51) &&
+ "Unexpected num_tasks modifier in OpenMP < 51.");
+
+ if (ModifierLoc.isValid() && Modifier == OMPC_NUMTASKS_unknown) {
+ std::string Values = getListOfPossibleValues(OMPC_num_tasks, /*First=*/0,
+ OMPC_NUMTASKS_unknown);
+ Diag(ModifierLoc, diag::err_omp_unexpected_clause_value)
+ << Values << getOpenMPClauseName(OMPC_num_tasks);
+ return nullptr;
+ }
+
Expr *ValExpr = NumTasks;
Stmt *HelperValStmt = nullptr;
OpenMPDirectiveKind CaptureRegion = OMPD_unknown;
@@ -20167,8 +23013,9 @@ OMPClause *Sema::ActOnOpenMPNumTasksClause(Expr *NumTasks,
DSAStack->getCurrentDirective(), &CaptureRegion, &HelperValStmt))
return nullptr;
- return new (Context) OMPNumTasksClause(ValExpr, HelperValStmt, CaptureRegion,
- StartLoc, LParenLoc, EndLoc);
+ return new (Context)
+ OMPNumTasksClause(Modifier, ValExpr, HelperValStmt, CaptureRegion,
+ StartLoc, LParenLoc, ModifierLoc, EndLoc);
}
OMPClause *Sema::ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
@@ -20177,7 +23024,8 @@ OMPClause *Sema::ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
// OpenMP [2.13.2, critical construct, Description]
// ... where hint-expression is an integer constant expression that evaluates
// to a valid lock hint.
- ExprResult HintExpr = VerifyPositiveIntegerConstantInClause(Hint, OMPC_hint);
+ ExprResult HintExpr =
+ VerifyPositiveIntegerConstantInClause(Hint, OMPC_hint, false);
if (HintExpr.isInvalid())
return nullptr;
return new (Context)
@@ -20277,7 +23125,7 @@ OMPClause *Sema::ActOnOpenMPDistScheduleClause(
// OpenMP [2.7.1, Restrictions]
// chunk_size must be a loop invariant integer expression with a positive
// value.
- if (Optional<llvm::APSInt> Result =
+ if (std::optional<llvm::APSInt> Result =
ValExpr->getIntegerConstantExpr(Context)) {
if (Result->isSigned() && !Result->isStrictlyPositive()) {
Diag(ChunkSizeLoc, diag::err_omp_negative_expression_in_clause)
@@ -20400,6 +23248,11 @@ bool Sema::ActOnStartOpenMPDeclareTargetContext(
Diag(DTCI.Loc, diag::err_omp_region_not_file_context);
return false;
}
+
+ // Report affected OpenMP target offloading behavior when in HIP lang-mode.
+ if (getLangOpts().HIP)
+ Diag(DTCI.Loc, diag::warn_hip_omp_target_directives);
+
DeclareTargetNesting.push_back(DTCI);
return true;
}
@@ -20414,8 +23267,15 @@ Sema::ActOnOpenMPEndDeclareTargetDirective() {
void Sema::ActOnFinishedOpenMPDeclareTargetContext(
DeclareTargetContextInfo &DTCI) {
for (auto &It : DTCI.ExplicitlyMapped)
- ActOnOpenMPDeclareTargetName(It.first, It.second.Loc, It.second.MT,
- DTCI.DT);
+ ActOnOpenMPDeclareTargetName(It.first, It.second.Loc, It.second.MT, DTCI);
+}
+
+void Sema::DiagnoseUnterminatedOpenMPDeclareTarget() {
+ if (DeclareTargetNesting.empty())
+ return;
+ DeclareTargetContextInfo &DTCI = DeclareTargetNesting.back();
+ Diag(DTCI.Loc, diag::warn_omp_unterminated_declare_target)
+ << getOpenMPDirectiveName(DTCI.Kind);
}
NamedDecl *Sema::lookupOpenMPDeclareTargetName(Scope *CurScope,
@@ -20452,9 +23312,9 @@ NamedDecl *Sema::lookupOpenMPDeclareTargetName(Scope *CurScope,
return ND;
}
-void Sema::ActOnOpenMPDeclareTargetName(
- NamedDecl *ND, SourceLocation Loc, OMPDeclareTargetDeclAttr::MapTypeTy MT,
- OMPDeclareTargetDeclAttr::DevTypeTy DT) {
+void Sema::ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc,
+ OMPDeclareTargetDeclAttr::MapTypeTy MT,
+ DeclareTargetContextInfo &DTCI) {
assert((isa<VarDecl>(ND) || isa<FunctionDecl>(ND) ||
isa<FunctionTemplateDecl>(ND)) &&
"Expected variable, function or function template.");
@@ -20465,35 +23325,51 @@ void Sema::ActOnOpenMPDeclareTargetName(
(ND->isUsed(/*CheckUsedAttr=*/false) || ND->isReferenced()))
Diag(Loc, diag::warn_omp_declare_target_after_first_use);
+ // Report affected OpenMP target offloading behavior when in HIP lang-mode.
+ if (getLangOpts().HIP)
+ Diag(Loc, diag::warn_hip_omp_target_directives);
+
// Explicit declare target lists have precedence.
const unsigned Level = -1;
auto *VD = cast<ValueDecl>(ND);
- llvm::Optional<OMPDeclareTargetDeclAttr *> ActiveAttr =
+ std::optional<OMPDeclareTargetDeclAttr *> ActiveAttr =
OMPDeclareTargetDeclAttr::getActiveAttr(VD);
- if (ActiveAttr.hasValue() && ActiveAttr.getValue()->getDevType() != DT &&
- ActiveAttr.getValue()->getLevel() == Level) {
+ if (ActiveAttr && (*ActiveAttr)->getDevType() != DTCI.DT &&
+ (*ActiveAttr)->getLevel() == Level) {
Diag(Loc, diag::err_omp_device_type_mismatch)
- << OMPDeclareTargetDeclAttr::ConvertDevTypeTyToStr(DT)
+ << OMPDeclareTargetDeclAttr::ConvertDevTypeTyToStr(DTCI.DT)
<< OMPDeclareTargetDeclAttr::ConvertDevTypeTyToStr(
- ActiveAttr.getValue()->getDevType());
+ (*ActiveAttr)->getDevType());
return;
}
- if (ActiveAttr.hasValue() && ActiveAttr.getValue()->getMapType() != MT &&
- ActiveAttr.getValue()->getLevel() == Level) {
+ if (ActiveAttr && (*ActiveAttr)->getMapType() != MT &&
+ (*ActiveAttr)->getLevel() == Level) {
Diag(Loc, diag::err_omp_declare_target_to_and_link) << ND;
return;
}
- if (ActiveAttr.hasValue() && ActiveAttr.getValue()->getLevel() == Level)
+ if (ActiveAttr && (*ActiveAttr)->getLevel() == Level)
return;
- auto *A = OMPDeclareTargetDeclAttr::CreateImplicit(Context, MT, DT, Level,
- SourceRange(Loc, Loc));
+ Expr *IndirectE = nullptr;
+ bool IsIndirect = false;
+ if (DTCI.Indirect) {
+ IndirectE = *DTCI.Indirect;
+ if (!IndirectE)
+ IsIndirect = true;
+ }
+ auto *A = OMPDeclareTargetDeclAttr::CreateImplicit(
+ Context, MT, DTCI.DT, IndirectE, IsIndirect, Level,
+ SourceRange(Loc, Loc));
ND->addAttr(A);
if (ASTMutationListener *ML = Context.getASTMutationListener())
ML->DeclarationMarkedOpenMPDeclareTarget(ND, A);
checkDeclIsAllowedInOpenMPTarget(nullptr, ND, Loc);
+ if (auto *VD = dyn_cast<VarDecl>(ND);
+ LangOpts.OpenMP && VD && VD->hasAttr<OMPDeclareTargetDeclAttr>() &&
+ VD->hasGlobalStorage())
+ ActOnOpenMPDeclareTargetInitializer(ND);
}
static void checkDeclInTargetContext(SourceLocation SL, SourceRange SR,
@@ -20501,13 +23377,14 @@ static void checkDeclInTargetContext(SourceLocation SL, SourceRange SR,
if (!D || !isa<VarDecl>(D))
return;
auto *VD = cast<VarDecl>(D);
- Optional<OMPDeclareTargetDeclAttr::MapTypeTy> MapTy =
+ std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> MapTy =
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
if (SemaRef.LangOpts.OpenMP >= 50 &&
(SemaRef.getCurLambda(/*IgnoreNonLambdaCapturingScope=*/true) ||
SemaRef.getCurBlock() || SemaRef.getCurCapturedRegion()) &&
VD->hasGlobalStorage()) {
- if (!MapTy || *MapTy != OMPDeclareTargetDeclAttr::MT_To) {
+ if (!MapTy || (*MapTy != OMPDeclareTargetDeclAttr::MT_To &&
+ *MapTy != OMPDeclareTargetDeclAttr::MT_Enter)) {
// OpenMP 5.0, 2.12.7 declare target Directive, Restrictions
// If a lambda declaration and definition appears between a
// declare target directive and the matching end declare target
@@ -20520,7 +23397,7 @@ static void checkDeclInTargetContext(SourceLocation SL, SourceRange SR,
return;
}
}
- if (MapTy.hasValue())
+ if (MapTy)
return;
SemaRef.Diag(VD->getLocation(), diag::warn_omp_not_in_target_context);
SemaRef.Diag(SL, diag::note_used_here) << SR;
@@ -20556,7 +23433,7 @@ void Sema::checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
if (const auto *FTD = dyn_cast<FunctionTemplateDecl>(D))
D = FTD->getTemplatedDecl();
if (auto *FD = dyn_cast<FunctionDecl>(D)) {
- llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
+ std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(FD);
if (IdLoc.isValid() && Res && *Res == OMPDeclareTargetDeclAttr::MT_Link) {
Diag(IdLoc, diag::err_omp_function_in_link_clause);
@@ -20574,14 +23451,24 @@ void Sema::checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
// Checking declaration inside declare target region.
if (isa<VarDecl>(D) || isa<FunctionDecl>(D) ||
isa<FunctionTemplateDecl>(D)) {
- llvm::Optional<OMPDeclareTargetDeclAttr *> ActiveAttr =
+ std::optional<OMPDeclareTargetDeclAttr *> ActiveAttr =
OMPDeclareTargetDeclAttr::getActiveAttr(VD);
unsigned Level = DeclareTargetNesting.size();
- if (ActiveAttr.hasValue() && ActiveAttr.getValue()->getLevel() >= Level)
+ if (ActiveAttr && (*ActiveAttr)->getLevel() >= Level)
return;
DeclareTargetContextInfo &DTCI = DeclareTargetNesting.back();
+ Expr *IndirectE = nullptr;
+ bool IsIndirect = false;
+ if (DTCI.Indirect) {
+ IndirectE = *DTCI.Indirect;
+ if (!IndirectE)
+ IsIndirect = true;
+ }
auto *A = OMPDeclareTargetDeclAttr::CreateImplicit(
- Context, OMPDeclareTargetDeclAttr::MT_To, DTCI.DT, Level,
+ Context,
+ getLangOpts().OpenMP >= 52 ? OMPDeclareTargetDeclAttr::MT_Enter
+ : OMPDeclareTargetDeclAttr::MT_To,
+ DTCI.DT, IndirectE, IsIndirect, Level,
SourceRange(DTCI.Loc, DTCI.Loc));
D->addAttr(A);
if (ASTMutationListener *ML = Context.getASTMutationListener())
@@ -20595,6 +23482,55 @@ void Sema::checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
checkDeclInTargetContext(E->getExprLoc(), E->getSourceRange(), *this, D);
}
+/// This class visits every VarDecl that the initializer references and adds
+/// OMPDeclareTargetDeclAttr to each of them.
+class GlobalDeclRefChecker final
+ : public StmtVisitor<GlobalDeclRefChecker> {
+ SmallVector<VarDecl *> DeclVector;
+ Attr *A;
+
+public:
+ /// A StmtVisitor class function that visits all DeclRefExpr and adds
+ /// OMPDeclareTargetDeclAttr to them.
+ void VisitDeclRefExpr(DeclRefExpr *Node) {
+ if (auto *VD = dyn_cast<VarDecl>(Node->getDecl())) {
+ VD->addAttr(A);
+ DeclVector.push_back(VD);
+ }
+ }
+ /// A function that iterates across each of the Expr's children.
+ void VisitExpr(Expr *Ex) {
+ for (auto *Child : Ex->children()) {
+ Visit(Child);
+ }
+ }
+ /// A function that keeps a record of all the Decls that are variables, has
+ /// OMPDeclareTargetDeclAttr, and has global storage in the DeclVector. Pop
+ /// each Decl one at a time and use the inherited 'visit' functions to look
+ /// for DeclRefExpr.
+ void declareTargetInitializer(Decl *TD) {
+ A = TD->getAttr<OMPDeclareTargetDeclAttr>();
+ DeclVector.push_back(cast<VarDecl>(TD));
+ while (!DeclVector.empty()) {
+ VarDecl *TargetVarDecl = DeclVector.pop_back_val();
+ if (TargetVarDecl->hasAttr<OMPDeclareTargetDeclAttr>() &&
+ TargetVarDecl->hasInit() && TargetVarDecl->hasGlobalStorage()) {
+ if (Expr *Ex = TargetVarDecl->getInit())
+ Visit(Ex);
+ }
+ }
+ }
+};
+
+/// Adding OMPDeclareTargetDeclAttr to variables with static storage
+/// duration that are referenced in the initializer expression list of
+/// variables with static storage duration in declare target directive.
+void Sema::ActOnOpenMPDeclareTargetInitializer(Decl *TargetDecl) {
+ GlobalDeclRefChecker Checker;
+ if (isa<VarDecl>(TargetDecl))
+ Checker.declareTargetInitializer(TargetDecl);
+}
+
OMPClause *Sema::ActOnOpenMPToClause(
ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
ArrayRef<SourceLocation> MotionModifiersLoc,
@@ -20609,7 +23545,7 @@ OMPClause *Sema::ActOnOpenMPToClause(
unsigned Count = 0;
for (unsigned I = 0, E = MotionModifiers.size(); I < E; ++I) {
if (MotionModifiers[I] != OMPC_MOTION_MODIFIER_unknown &&
- llvm::find(Modifiers, MotionModifiers[I]) != std::end(Modifiers)) {
+ llvm::is_contained(Modifiers, MotionModifiers[I])) {
Diag(MotionModifiersLoc[I], diag::err_omp_duplicate_motion_modifier);
continue;
}
@@ -20646,7 +23582,7 @@ OMPClause *Sema::ActOnOpenMPFromClause(
unsigned Count = 0;
for (unsigned I = 0, E = MotionModifiers.size(); I < E; ++I) {
if (MotionModifiers[I] != OMPC_MOTION_MODIFIER_unknown &&
- llvm::find(Modifiers, MotionModifiers[I]) != std::end(Modifiers)) {
+ llvm::is_contained(Modifiers, MotionModifiers[I])) {
Diag(MotionModifiersLoc[I], diag::err_omp_duplicate_motion_modifier);
continue;
}
@@ -20889,6 +23825,92 @@ OMPClause *Sema::ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
MVLI.VarComponents);
}
+OMPClause *Sema::ActOnOpenMPHasDeviceAddrClause(ArrayRef<Expr *> VarList,
+ const OMPVarListLocTy &Locs) {
+ MappableVarListInfo MVLI(VarList);
+ for (Expr *RefExpr : VarList) {
+ assert(RefExpr && "NULL expr in OpenMP has_device_addr clause.");
+ SourceLocation ELoc;
+ SourceRange ERange;
+ Expr *SimpleRefExpr = RefExpr;
+ auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange,
+ /*AllowArraySection=*/true);
+ if (Res.second) {
+ // It will be analyzed later.
+ MVLI.ProcessedVarList.push_back(RefExpr);
+ }
+ ValueDecl *D = Res.first;
+ if (!D)
+ continue;
+
+ // Check if the declaration in the clause does not show up in any data
+ // sharing attribute.
+ DSAStackTy::DSAVarData DVar = DSAStack->getTopDSA(D, /*FromParent=*/false);
+ if (isOpenMPPrivate(DVar.CKind)) {
+ Diag(ELoc, diag::err_omp_variable_in_given_clause_and_dsa)
+ << getOpenMPClauseName(DVar.CKind)
+ << getOpenMPClauseName(OMPC_has_device_addr)
+ << getOpenMPDirectiveName(DSAStack->getCurrentDirective());
+ reportOriginalDsa(*this, DSAStack, D, DVar);
+ continue;
+ }
+
+ const Expr *ConflictExpr;
+ if (DSAStack->checkMappableExprComponentListsForDecl(
+ D, /*CurrentRegionOnly=*/true,
+ [&ConflictExpr](
+ OMPClauseMappableExprCommon::MappableExprComponentListRef R,
+ OpenMPClauseKind) -> bool {
+ ConflictExpr = R.front().getAssociatedExpression();
+ return true;
+ })) {
+ Diag(ELoc, diag::err_omp_map_shared_storage) << RefExpr->getSourceRange();
+ Diag(ConflictExpr->getExprLoc(), diag::note_used_here)
+ << ConflictExpr->getSourceRange();
+ continue;
+ }
+
+ // Store the components in the stack so that they can be used to check
+ // against other clauses later on.
+ Expr *Component = SimpleRefExpr;
+ auto *VD = dyn_cast<VarDecl>(D);
+ if (VD && (isa<OMPArraySectionExpr>(RefExpr->IgnoreParenImpCasts()) ||
+ isa<ArraySubscriptExpr>(RefExpr->IgnoreParenImpCasts())))
+ Component = DefaultFunctionArrayLvalueConversion(SimpleRefExpr).get();
+ OMPClauseMappableExprCommon::MappableComponent MC(
+ Component, D, /*IsNonContiguous=*/false);
+ DSAStack->addMappableExpressionComponents(
+ D, MC, /*WhereFoundClauseKind=*/OMPC_has_device_addr);
+
+ // Record the expression we've just processed.
+ if (!VD && !CurContext->isDependentContext()) {
+ DeclRefExpr *Ref =
+ buildCapture(*this, D, SimpleRefExpr, /*WithInit=*/true);
+ assert(Ref && "has_device_addr capture failed");
+ MVLI.ProcessedVarList.push_back(Ref);
+ } else
+ MVLI.ProcessedVarList.push_back(RefExpr->IgnoreParens());
+
+ // Create a mappable component for the list item. List items in this clause
+ // only need a component. We use a null declaration to signal fields in
+ // 'this'.
+ assert((isa<DeclRefExpr>(SimpleRefExpr) ||
+ isa<CXXThisExpr>(cast<MemberExpr>(SimpleRefExpr)->getBase())) &&
+ "Unexpected device pointer expression!");
+ MVLI.VarBaseDeclarations.push_back(
+ isa<DeclRefExpr>(SimpleRefExpr) ? D : nullptr);
+ MVLI.VarComponents.resize(MVLI.VarComponents.size() + 1);
+ MVLI.VarComponents.back().push_back(MC);
+ }
+
+ if (MVLI.ProcessedVarList.empty())
+ return nullptr;
+
+ return OMPHasDeviceAddrClause::Create(Context, Locs, MVLI.ProcessedVarList,
+ MVLI.VarBaseDeclarations,
+ MVLI.VarComponents);
+}
+
OMPClause *Sema::ActOnOpenMPAllocateClause(
Expr *Allocator, ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation ColonLoc, SourceLocation LParenLoc, SourceLocation EndLoc) {
@@ -20914,7 +23936,7 @@ OMPClause *Sema::ActOnOpenMPAllocateClause(
// target region must specify an allocator expression unless a requires
// directive with the dynamic_allocators clause is present in the same
// compilation unit.
- if (LangOpts.OpenMPIsDevice &&
+ if (LangOpts.OpenMPIsTargetDevice &&
!DSAStack->hasRequiresDeclWithClause<OMPDynamicAllocatorsClause>())
targetDiag(StartLoc, diag::err_expected_allocator_expression);
}
@@ -20991,6 +24013,17 @@ OMPClause *Sema::ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList,
Vars);
}
+StmtResult Sema::ActOnOpenMPScopeDirective(ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ if (!AStmt)
+ return StmtError();
+
+ setFunctionHasBranchProtectedScope();
+
+ return OMPScopeDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt);
+}
+
OMPClause *Sema::ActOnOpenMPInclusiveClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
@@ -21016,8 +24049,7 @@ OMPClause *Sema::ActOnOpenMPInclusiveClause(ArrayRef<Expr *> VarList,
// A list item that appears in the inclusive or exclusive clause must appear
// in a reduction clause with the inscan modifier on the enclosing
// worksharing-loop, worksharing-loop SIMD, or simd construct.
- if (DVar.CKind != OMPC_reduction ||
- DVar.Modifier != OMPC_REDUCTION_inscan)
+ if (DVar.CKind != OMPC_reduction || DVar.Modifier != OMPC_REDUCTION_inscan)
Diag(ELoc, diag::err_omp_inclusive_exclusive_not_reduction)
<< RefExpr->getSourceRange();
@@ -21126,17 +24158,26 @@ OMPClause *Sema::ActOnOpenMPUsesAllocatorClause(
AllocatorExpr = D.Allocator->IgnoreParenImpCasts();
auto *DRE = dyn_cast<DeclRefExpr>(AllocatorExpr);
bool IsPredefinedAllocator = false;
- if (DRE)
- IsPredefinedAllocator = PredefinedAllocators.count(DRE->getDecl());
- if (!DRE ||
- !(Context.hasSameUnqualifiedType(
- AllocatorExpr->getType(), DSAStack->getOMPAllocatorHandleT()) ||
- Context.typesAreCompatible(AllocatorExpr->getType(),
- DSAStack->getOMPAllocatorHandleT(),
- /*CompareUnqualified=*/true)) ||
- (!IsPredefinedAllocator &&
- (AllocatorExpr->getType().isConstant(Context) ||
- !AllocatorExpr->isLValue()))) {
+ if (DRE) {
+ OMPAllocateDeclAttr::AllocatorTypeTy AllocatorTy =
+ getAllocatorKind(*this, DSAStack, AllocatorExpr);
+ IsPredefinedAllocator =
+ AllocatorTy !=
+ OMPAllocateDeclAttr::AllocatorTypeTy::OMPUserDefinedMemAlloc;
+ }
+ QualType OMPAllocatorHandleT = DSAStack->getOMPAllocatorHandleT();
+ QualType AllocatorExprType = AllocatorExpr->getType();
+ bool IsTypeCompatible = IsPredefinedAllocator;
+ IsTypeCompatible = IsTypeCompatible ||
+ Context.hasSameUnqualifiedType(AllocatorExprType,
+ OMPAllocatorHandleT);
+ IsTypeCompatible =
+ IsTypeCompatible ||
+ Context.typesAreCompatible(AllocatorExprType, OMPAllocatorHandleT);
+ bool IsNonConstantLValue =
+ !AllocatorExprType.isConstant(Context) && AllocatorExpr->isLValue();
+ if (!DRE || !IsTypeCompatible ||
+ (!IsPredefinedAllocator && !IsNonConstantLValue)) {
Diag(D.Allocator->getExprLoc(), diag::err_omp_var_expected)
<< "omp_allocator_handle_t" << (DRE ? 1 : 0)
<< AllocatorExpr->getType() << D.Allocator->getSourceRange();
@@ -21253,3 +24294,95 @@ OMPClause *Sema::ActOnOpenMPAffinityClause(
return OMPAffinityClause::Create(Context, StartLoc, LParenLoc, ColonLoc,
EndLoc, Modifier, Vars);
}
+
+OMPClause *Sema::ActOnOpenMPBindClause(OpenMPBindClauseKind Kind,
+ SourceLocation KindLoc,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ if (Kind == OMPC_BIND_unknown) {
+ Diag(KindLoc, diag::err_omp_unexpected_clause_value)
+ << getListOfPossibleValues(OMPC_bind, /*First=*/0,
+ /*Last=*/unsigned(OMPC_BIND_unknown))
+ << getOpenMPClauseName(OMPC_bind);
+ return nullptr;
+ }
+
+ return OMPBindClause::Create(Context, Kind, KindLoc, StartLoc, LParenLoc,
+ EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPXDynCGroupMemClause(Expr *Size,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ Expr *ValExpr = Size;
+ Stmt *HelperValStmt = nullptr;
+
+ // OpenMP [2.5, Restrictions]
+ // The ompx_dyn_cgroup_mem expression must evaluate to a positive integer
+ // value.
+ if (!isNonNegativeIntegerValue(ValExpr, *this, OMPC_ompx_dyn_cgroup_mem,
+ /*StrictlyPositive=*/false))
+ return nullptr;
+
+ OpenMPDirectiveKind DKind = DSAStack->getCurrentDirective();
+ OpenMPDirectiveKind CaptureRegion = getOpenMPCaptureRegionForClause(
+ DKind, OMPC_ompx_dyn_cgroup_mem, LangOpts.OpenMP);
+ if (CaptureRegion != OMPD_unknown && !CurContext->isDependentContext()) {
+ ValExpr = MakeFullExpr(ValExpr).get();
+ llvm::MapVector<const Expr *, DeclRefExpr *> Captures;
+ ValExpr = tryBuildCapture(*this, ValExpr, Captures).get();
+ HelperValStmt = buildPreInits(Context, Captures);
+ }
+
+ return new (Context) OMPXDynCGroupMemClause(
+ ValExpr, HelperValStmt, CaptureRegion, StartLoc, LParenLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPDoacrossClause(
+ OpenMPDoacrossClauseModifier DepType, SourceLocation DepLoc,
+ SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc,
+ SourceLocation LParenLoc, SourceLocation EndLoc) {
+
+ if (DSAStack->getCurrentDirective() == OMPD_ordered &&
+ DepType != OMPC_DOACROSS_source && DepType != OMPC_DOACROSS_sink &&
+ DepType != OMPC_DOACROSS_sink_omp_cur_iteration &&
+ DepType != OMPC_DOACROSS_source_omp_cur_iteration &&
+ DepType != OMPC_DOACROSS_source) {
+ Diag(DepLoc, diag::err_omp_unexpected_clause_value)
+ << "'source' or 'sink'" << getOpenMPClauseName(OMPC_doacross);
+ return nullptr;
+ }
+
+ SmallVector<Expr *, 8> Vars;
+ DSAStackTy::OperatorOffsetTy OpsOffs;
+ llvm::APSInt TotalDepCount(/*BitWidth=*/32);
+ DoacrossDataInfoTy VarOffset = ProcessOpenMPDoacrossClauseCommon(
+ *this,
+ DepType == OMPC_DOACROSS_source ||
+ DepType == OMPC_DOACROSS_source_omp_cur_iteration ||
+ DepType == OMPC_DOACROSS_sink_omp_cur_iteration,
+ VarList, DSAStack, EndLoc);
+ Vars = VarOffset.Vars;
+ OpsOffs = VarOffset.OpsOffs;
+ TotalDepCount = VarOffset.TotalDepCount;
+ auto *C = OMPDoacrossClause::Create(Context, StartLoc, LParenLoc, EndLoc,
+ DepType, DepLoc, ColonLoc, Vars,
+ TotalDepCount.getZExtValue());
+ if (DSAStack->isParentOrderedRegion())
+ DSAStack->addDoacrossDependClause(C, OpsOffs);
+ return C;
+}
+
+OMPClause *Sema::ActOnOpenMPXAttributeClause(ArrayRef<const Attr *> Attrs,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return new (Context) OMPXAttributeClause(Attrs, StartLoc, LParenLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPXBareClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return new (Context) OMPXBareClause(StartLoc, EndLoc);
+}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaOverload.cpp b/contrib/llvm-project/clang/lib/Sema/SemaOverload.cpp
index 0758fbb84107..b708272ebe7d 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaOverload.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaOverload.cpp
@@ -11,18 +11,23 @@
//===----------------------------------------------------------------------===//
#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTLambda.h"
#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DependenceFlags.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
+#include "clang/AST/Type.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/DiagnosticOptions.h"
+#include "clang/Basic/OperatorKinds.h"
#include "clang/Basic/PartialDiagnostic.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
+#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Overload.h"
@@ -30,12 +35,15 @@
#include "clang/Sema/Template.h"
#include "clang/Sema/TemplateDeduction.h"
#include "llvm/ADT/DenseSet.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Casting.h"
#include <algorithm>
+#include <cstddef>
#include <cstdlib>
+#include <optional>
using namespace clang;
using namespace sema;
@@ -49,11 +57,10 @@ static bool functionHasPassObjectSizeParams(const FunctionDecl *FD) {
}
/// A convenience routine for creating a decayed reference to a function.
-static ExprResult
-CreateFunctionRefExpr(Sema &S, FunctionDecl *Fn, NamedDecl *FoundDecl,
- const Expr *Base, bool HadMultipleCandidates,
- SourceLocation Loc = SourceLocation(),
- const DeclarationNameLoc &LocInfo = DeclarationNameLoc()){
+static ExprResult CreateFunctionRefExpr(
+ Sema &S, FunctionDecl *Fn, NamedDecl *FoundDecl, const Expr *Base,
+ bool HadMultipleCandidates, SourceLocation Loc = SourceLocation(),
+ const DeclarationNameLoc &LocInfo = DeclarationNameLoc()) {
if (S.DiagnoseUseOfDecl(FoundDecl, Loc))
return ExprError();
// If FoundDecl is different from Fn (such as if one is a template
@@ -117,7 +124,7 @@ CompareDerivedToBaseConversions(Sema &S, SourceLocation Loc,
/// corresponding to the given implicit conversion kind.
ImplicitConversionRank clang::GetConversionRank(ImplicitConversionKind Kind) {
static const ImplicitConversionRank
- Rank[(int)ICK_Num_Conversion_Kinds] = {
+ Rank[] = {
ICR_Exact_Match,
ICR_Exact_Match,
ICR_Exact_Match,
@@ -138,6 +145,7 @@ ImplicitConversionRank clang::GetConversionRank(ImplicitConversionKind Kind) {
ICR_Conversion,
ICR_Conversion,
ICR_Conversion,
+ ICR_Conversion,
ICR_OCL_Scalar_Widening,
ICR_Complex_Real_Conversion,
ICR_Conversion,
@@ -146,16 +154,21 @@ ImplicitConversionRank clang::GetConversionRank(ImplicitConversionKind Kind) {
ICR_Exact_Match, // NOTE(gbiv): This may not be completely right --
// it was omitted by the patch that added
// ICK_Zero_Event_Conversion
+ ICR_Exact_Match, // NOTE(ctopper): This may not be completely right --
+ // it was omitted by the patch that added
+ // ICK_Zero_Queue_Conversion
ICR_C_Conversion,
- ICR_C_Conversion_Extension
+ ICR_C_Conversion_Extension,
+ ICR_Conversion,
};
+ static_assert(std::size(Rank) == (int)ICK_Num_Conversion_Kinds);
return Rank[(int)Kind];
}
/// GetImplicitConversionName - Return the name of this kind of
/// implicit conversion.
static const char* GetImplicitConversionName(ImplicitConversionKind Kind) {
- static const char* const Name[(int)ICK_Num_Conversion_Kinds] = {
+ static const char* const Name[] = {
"No conversion",
"Lvalue-to-rvalue",
"Array-to-pointer",
@@ -176,15 +189,19 @@ static const char* GetImplicitConversionName(ImplicitConversionKind Kind) {
"Derived-to-base conversion",
"Vector conversion",
"SVE Vector conversion",
+ "RVV Vector conversion",
"Vector splat",
"Complex-real conversion",
"Block Pointer conversion",
"Transparent Union Conversion",
"Writeback conversion",
"OpenCL Zero Event Conversion",
+ "OpenCL Zero Queue Conversion",
"C specific type conversion",
- "Incompatible pointer conversion"
+ "Incompatible pointer conversion",
+ "Fixed point conversion",
};
+ static_assert(std::size(Name) == (int)ICK_Num_Conversion_Kinds);
return Name[Kind];
}
@@ -355,7 +372,7 @@ NarrowingKind StandardConversionSequence::getNarrowingKind(
if (Initializer->isValueDependent())
return NK_Dependent_Narrowing;
- if (Optional<llvm::APSInt> IntConstantValue =
+ if (std::optional<llvm::APSInt> IntConstantValue =
Initializer->getIntegerConstantExpr(Ctx)) {
// Convert the integer to the floating type.
llvm::APFloat Result(Ctx.getFloatTypeSemantics(ToType));
@@ -438,7 +455,7 @@ NarrowingKind StandardConversionSequence::getNarrowingKind(
if (Initializer->isValueDependent())
return NK_Dependent_Narrowing;
- Optional<llvm::APSInt> OptInitializerValue;
+ std::optional<llvm::APSInt> OptInitializerValue;
if (!(OptInitializerValue = Initializer->getIntegerConstantExpr(Ctx))) {
// Such conversions on variables are always narrowing.
return NK_Variable_Narrowing;
@@ -541,8 +558,8 @@ void UserDefinedConversionSequence::dump() const {
/// error. Useful for debugging overloading issues.
void ImplicitConversionSequence::dump() const {
raw_ostream &OS = llvm::errs();
- if (isStdInitializerListElement())
- OS << "Worst std::initializer_list element conversion: ";
+ if (hasInitializerListContainerType())
+ OS << "Worst list element conversion: ";
switch (ConversionKind) {
case StandardConversion:
OS << "Standard conversion: ";
@@ -637,7 +654,7 @@ clang::MakeDeductionFailureInfo(ASTContext &Context,
auto *Saved = new (Context) DFIDeducedMismatchArgs;
Saved->FirstArg = Info.FirstArg;
Saved->SecondArg = Info.SecondArg;
- Saved->TemplateArgs = Info.take();
+ Saved->TemplateArgs = Info.takeSugared();
Saved->CallArgIndex = Info.CallArgIndex;
Result.Data = Saved;
break;
@@ -666,7 +683,7 @@ clang::MakeDeductionFailureInfo(ASTContext &Context,
}
case Sema::TDK_SubstitutionFailure:
- Result.Data = Info.take();
+ Result.Data = Info.takeSugared();
if (Info.hasSFINAEDiagnostic()) {
PartialDiagnosticAt *Diag = new (Result.Diagnostic) PartialDiagnosticAt(
SourceLocation(), PartialDiagnostic::NullDiagnostic());
@@ -677,7 +694,7 @@ clang::MakeDeductionFailureInfo(ASTContext &Context,
case Sema::TDK_ConstraintsNotSatisfied: {
CNSInfo *Saved = new (Context) CNSInfo;
- Saved->TemplateArgs = Info.take();
+ Saved->TemplateArgs = Info.takeSugared();
Saved->Satisfaction = Info.AssociatedConstraintsSatisfaction;
Result.Data = Saved;
break;
@@ -685,6 +702,7 @@ clang::MakeDeductionFailureInfo(ASTContext &Context,
case Sema::TDK_Success:
case Sema::TDK_NonDependentConversionFailure:
+ case Sema::TDK_AlreadyDiagnosed:
llvm_unreachable("not a deduction failure");
}
@@ -734,6 +752,7 @@ void DeductionFailureInfo::Destroy() {
// Unhandled
case Sema::TDK_MiscellaneousDeductionFailure:
+ case Sema::TDK_AlreadyDiagnosed:
break;
}
}
@@ -771,6 +790,7 @@ TemplateParameter DeductionFailureInfo::getTemplateParameter() {
// Unhandled
case Sema::TDK_MiscellaneousDeductionFailure:
+ case Sema::TDK_AlreadyDiagnosed:
break;
}
@@ -806,6 +826,7 @@ TemplateArgumentList *DeductionFailureInfo::getTemplateArgumentList() {
// Unhandled
case Sema::TDK_MiscellaneousDeductionFailure:
+ case Sema::TDK_AlreadyDiagnosed:
break;
}
@@ -837,6 +858,7 @@ const TemplateArgument *DeductionFailureInfo::getFirstArg() {
// Unhandled
case Sema::TDK_MiscellaneousDeductionFailure:
+ case Sema::TDK_AlreadyDiagnosed:
break;
}
@@ -868,24 +890,93 @@ const TemplateArgument *DeductionFailureInfo::getSecondArg() {
// Unhandled
case Sema::TDK_MiscellaneousDeductionFailure:
+ case Sema::TDK_AlreadyDiagnosed:
break;
}
return nullptr;
}
-llvm::Optional<unsigned> DeductionFailureInfo::getCallArgIndex() {
+std::optional<unsigned> DeductionFailureInfo::getCallArgIndex() {
switch (static_cast<Sema::TemplateDeductionResult>(Result)) {
case Sema::TDK_DeducedMismatch:
case Sema::TDK_DeducedMismatchNested:
return static_cast<DFIDeducedMismatchArgs*>(Data)->CallArgIndex;
default:
- return llvm::None;
+ return std::nullopt;
}
}
-bool OverloadCandidateSet::OperatorRewriteInfo::shouldAddReversed(
+static bool FunctionsCorrespond(ASTContext &Ctx, const FunctionDecl *X,
+ const FunctionDecl *Y) {
+ if (!X || !Y)
+ return false;
+ if (X->getNumParams() != Y->getNumParams())
+ return false;
+ // FIXME: when do rewritten comparison operators
+ // with explicit object parameters correspond?
+ // https://cplusplus.github.io/CWG/issues/2797.html
+ for (unsigned I = 0; I < X->getNumParams(); ++I)
+ if (!Ctx.hasSameUnqualifiedType(X->getParamDecl(I)->getType(),
+ Y->getParamDecl(I)->getType()))
+ return false;
+ if (auto *FTX = X->getDescribedFunctionTemplate()) {
+ auto *FTY = Y->getDescribedFunctionTemplate();
+ if (!FTY)
+ return false;
+ if (!Ctx.isSameTemplateParameterList(FTX->getTemplateParameters(),
+ FTY->getTemplateParameters()))
+ return false;
+ }
+ return true;
+}
+
+static bool shouldAddReversedEqEq(Sema &S, SourceLocation OpLoc,
+ Expr *FirstOperand, FunctionDecl *EqFD) {
+ assert(EqFD->getOverloadedOperator() ==
+ OverloadedOperatorKind::OO_EqualEqual);
+ // C++2a [over.match.oper]p4:
+ // A non-template function or function template F named operator== is a
+ // rewrite target with first operand o unless a search for the name operator!=
+ // in the scope S from the instantiation context of the operator expression
+ // finds a function or function template that would correspond
+ // ([basic.scope.scope]) to F if its name were operator==, where S is the
+ // scope of the class type of o if F is a class member, and the namespace
+ // scope of which F is a member otherwise. A function template specialization
+ // named operator== is a rewrite target if its function template is a rewrite
+ // target.
+ DeclarationName NotEqOp = S.Context.DeclarationNames.getCXXOperatorName(
+ OverloadedOperatorKind::OO_ExclaimEqual);
+ if (isa<CXXMethodDecl>(EqFD)) {
+ // If F is a class member, search scope is class type of first operand.
+ QualType RHS = FirstOperand->getType();
+ auto *RHSRec = RHS->getAs<RecordType>();
+ if (!RHSRec)
+ return true;
+ LookupResult Members(S, NotEqOp, OpLoc,
+ Sema::LookupNameKind::LookupMemberName);
+ S.LookupQualifiedName(Members, RHSRec->getDecl());
+ Members.suppressAccessDiagnostics();
+ for (NamedDecl *Op : Members)
+ if (FunctionsCorrespond(S.Context, EqFD, Op->getAsFunction()))
+ return false;
+ return true;
+ }
+ // Otherwise the search scope is the namespace scope of which F is a member.
+ for (NamedDecl *Op : EqFD->getEnclosingNamespaceContext()->lookup(NotEqOp)) {
+ auto *NotEqFD = Op->getAsFunction();
+ if (auto *UD = dyn_cast<UsingShadowDecl>(Op))
+ NotEqFD = UD->getUnderlyingDecl()->getAsFunction();
+ if (FunctionsCorrespond(S.Context, EqFD, NotEqFD) && S.isVisible(NotEqFD) &&
+ declaresSameEntity(cast<Decl>(EqFD->getEnclosingNamespaceContext()),
+ cast<Decl>(Op->getLexicalDeclContext())))
+ return false;
+ }
+ return true;
+}
+
+bool OverloadCandidateSet::OperatorRewriteInfo::allowsReversed(
OverloadedOperatorKind Op) {
if (!AllowRewrittenCandidates)
return false;
@@ -893,14 +984,21 @@ bool OverloadCandidateSet::OperatorRewriteInfo::shouldAddReversed(
}
bool OverloadCandidateSet::OperatorRewriteInfo::shouldAddReversed(
- ASTContext &Ctx, const FunctionDecl *FD) {
- if (!shouldAddReversed(FD->getDeclName().getCXXOverloadedOperator()))
+ Sema &S, ArrayRef<Expr *> OriginalArgs, FunctionDecl *FD) {
+ auto Op = FD->getOverloadedOperator();
+ if (!allowsReversed(Op))
return false;
+ if (Op == OverloadedOperatorKind::OO_EqualEqual) {
+ assert(OriginalArgs.size() == 2);
+ if (!shouldAddReversedEqEq(
+ S, OpLoc, /*FirstOperand in reversed args*/ OriginalArgs[1], FD))
+ return false;
+ }
// Don't bother adding a reversed candidate that can never be a better
// match than the non-reversed version.
- return FD->getNumParams() != 2 ||
- !Ctx.hasSameUnqualifiedType(FD->getParamDecl(0)->getType(),
- FD->getParamDecl(1)->getType()) ||
+ return FD->getNumNonObjectParams() != 2 ||
+ !S.Context.hasSameUnqualifiedType(FD->getParamDecl(0)->getType(),
+ FD->getParamDecl(1)->getType()) ||
FD->hasAttr<EnableIfAttr>();
}
@@ -984,8 +1082,7 @@ checkPlaceholderForOverload(Sema &S, Expr *&E,
/// checkArgPlaceholdersForOverload - Check a set of call operands for
/// placeholders.
-static bool checkArgPlaceholdersForOverload(Sema &S,
- MultiExprArg Args,
+static bool checkArgPlaceholdersForOverload(Sema &S, MultiExprArg Args,
UnbridgedCastsSet &unbridged) {
for (unsigned i = 0, e = Args.size(); i != e; ++i)
if (checkPlaceholderForOverload(S, Args[i], &unbridged))
@@ -1140,9 +1237,11 @@ Sema::CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &Old,
return Ovl_Overload;
}
-bool Sema::IsOverload(FunctionDecl *New, FunctionDecl *Old,
- bool UseMemberUsingDeclRules, bool ConsiderCudaAttrs,
- bool ConsiderRequiresClauses) {
+static bool IsOverloadOrOverrideImpl(Sema &SemaRef, FunctionDecl *New,
+ FunctionDecl *Old,
+ bool UseMemberUsingDeclRules,
+ bool ConsiderCudaAttrs,
+ bool UseOverrideRules = false) {
// C++ [basic.start.main]p2: This function shall not be overloaded.
if (New->isMain())
return false;
@@ -1161,8 +1260,8 @@ bool Sema::IsOverload(FunctionDecl *New, FunctionDecl *Old,
return true;
// Is the function New an overload of the function Old?
- QualType OldQType = Context.getCanonicalType(Old->getType());
- QualType NewQType = Context.getCanonicalType(New->getType());
+ QualType OldQType = SemaRef.Context.getCanonicalType(Old->getType());
+ QualType NewQType = SemaRef.Context.getCanonicalType(New->getType());
// Compare the signatures (C++ 1.3.10) of the two functions to
// determine whether they are overloads. If we find any mismatch
@@ -1174,80 +1273,199 @@ bool Sema::IsOverload(FunctionDecl *New, FunctionDecl *Old,
isa<FunctionNoProtoType>(NewQType.getTypePtr()))
return false;
- const FunctionProtoType *OldType = cast<FunctionProtoType>(OldQType);
- const FunctionProtoType *NewType = cast<FunctionProtoType>(NewQType);
+ const auto *OldType = cast<FunctionProtoType>(OldQType);
+ const auto *NewType = cast<FunctionProtoType>(NewQType);
// The signature of a function includes the types of its
// parameters (C++ 1.3.10), which includes the presence or absence
// of the ellipsis; see C++ DR 357).
- if (OldQType != NewQType &&
- (OldType->getNumParams() != NewType->getNumParams() ||
- OldType->isVariadic() != NewType->isVariadic() ||
- !FunctionParamTypesAreEqual(OldType, NewType)))
+ if (OldQType != NewQType && OldType->isVariadic() != NewType->isVariadic())
return true;
- // C++ [temp.over.link]p4:
- // The signature of a function template consists of its function
- // signature, its return type and its template parameter list. The names
- // of the template parameters are significant only for establishing the
- // relationship between the template parameters and the rest of the
- // signature.
- //
- // We check the return type and template parameter lists for function
- // templates first; the remaining checks follow.
- //
- // However, we don't consider either of these when deciding whether
- // a member introduced by a shadow declaration is hidden.
- if (!UseMemberUsingDeclRules && NewTemplate &&
- (!TemplateParameterListsAreEqual(NewTemplate->getTemplateParameters(),
- OldTemplate->getTemplateParameters(),
- false, TPL_TemplateMatch) ||
- !Context.hasSameType(Old->getDeclaredReturnType(),
- New->getDeclaredReturnType())))
+ // For member-like friends, the enclosing class is part of the signature.
+ if ((New->isMemberLikeConstrainedFriend() ||
+ Old->isMemberLikeConstrainedFriend()) &&
+ !New->getLexicalDeclContext()->Equals(Old->getLexicalDeclContext()))
return true;
- // If the function is a class member, its signature includes the
- // cv-qualifiers (if any) and ref-qualifier (if any) on the function itself.
- //
- // As part of this, also check whether one of the member functions
- // is static, in which case they are not overloads (C++
- // 13.1p2). While not part of the definition of the signature,
- // this check is important to determine whether these functions
- // can be overloaded.
- CXXMethodDecl *OldMethod = dyn_cast<CXXMethodDecl>(Old);
- CXXMethodDecl *NewMethod = dyn_cast<CXXMethodDecl>(New);
- if (OldMethod && NewMethod &&
- !OldMethod->isStatic() && !NewMethod->isStatic()) {
- if (OldMethod->getRefQualifier() != NewMethod->getRefQualifier()) {
- if (!UseMemberUsingDeclRules &&
- (OldMethod->getRefQualifier() == RQ_None ||
- NewMethod->getRefQualifier() == RQ_None)) {
- // C++0x [over.load]p2:
- // - Member function declarations with the same name and the same
- // parameter-type-list as well as member function template
- // declarations with the same name, the same parameter-type-list, and
- // the same template parameter lists cannot be overloaded if any of
- // them, but not all, have a ref-qualifier (8.3.5).
- Diag(NewMethod->getLocation(), diag::err_ref_qualifier_overload)
- << NewMethod->getRefQualifier() << OldMethod->getRefQualifier();
- Diag(OldMethod->getLocation(), diag::note_previous_declaration);
- }
+ // Compare the parameter lists.
+ // This can only be done once we have establish that friend functions
+ // inhabit the same context, otherwise we might tried to instantiate
+ // references to non-instantiated entities during constraint substitution.
+ // GH78101.
+ if (NewTemplate) {
+ // C++ [temp.over.link]p4:
+ // The signature of a function template consists of its function
+ // signature, its return type and its template parameter list. The names
+ // of the template parameters are significant only for establishing the
+ // relationship between the template parameters and the rest of the
+ // signature.
+ //
+ // We check the return type and template parameter lists for function
+ // templates first; the remaining checks follow.
+ bool SameTemplateParameterList = SemaRef.TemplateParameterListsAreEqual(
+ NewTemplate, NewTemplate->getTemplateParameters(), OldTemplate,
+ OldTemplate->getTemplateParameters(), false, Sema::TPL_TemplateMatch);
+ bool SameReturnType = SemaRef.Context.hasSameType(
+ Old->getDeclaredReturnType(), New->getDeclaredReturnType());
+ // FIXME(GH58571): Match template parameter list even for non-constrained
+ // template heads. This currently ensures that the code prior to C++20 is
+ // not newly broken.
+ bool ConstraintsInTemplateHead =
+ NewTemplate->getTemplateParameters()->hasAssociatedConstraints() ||
+ OldTemplate->getTemplateParameters()->hasAssociatedConstraints();
+ // C++ [namespace.udecl]p11:
+ // The set of declarations named by a using-declarator that inhabits a
+ // class C does not include member functions and member function
+ // templates of a base class that "correspond" to (and thus would
+ // conflict with) a declaration of a function or function template in
+ // C.
+ // Comparing return types is not required for the "correspond" check to
+ // decide whether a member introduced by a shadow declaration is hidden.
+ if (UseMemberUsingDeclRules && ConstraintsInTemplateHead &&
+ !SameTemplateParameterList)
return true;
- }
+ if (!UseMemberUsingDeclRules &&
+ (!SameTemplateParameterList || !SameReturnType))
+ return true;
+ }
+
+ const auto *OldMethod = dyn_cast<CXXMethodDecl>(Old);
+ const auto *NewMethod = dyn_cast<CXXMethodDecl>(New);
+
+ int OldParamsOffset = 0;
+ int NewParamsOffset = 0;
+
+ // When determining if a method is an overload from a base class, act as if
+ // the implicit object parameter are of the same type.
+
+ auto NormalizeQualifiers = [&](const CXXMethodDecl *M, Qualifiers Q) {
+ if (M->isExplicitObjectMemberFunction())
+ return Q;
+
+ // We do not allow overloading based off of '__restrict'.
+ Q.removeRestrict();
// We may not have applied the implicit const for a constexpr member
// function yet (because we haven't yet resolved whether this is a static
// or non-static member function). Add it now, on the assumption that this
// is a redeclaration of OldMethod.
- auto OldQuals = OldMethod->getMethodQualifiers();
- auto NewQuals = NewMethod->getMethodQualifiers();
- if (!getLangOpts().CPlusPlus14 && NewMethod->isConstexpr() &&
+ if (!SemaRef.getLangOpts().CPlusPlus14 &&
+ (M->isConstexpr() || M->isConsteval()) &&
!isa<CXXConstructorDecl>(NewMethod))
- NewQuals.addConst();
- // We do not allow overloading based off of '__restrict'.
- OldQuals.removeRestrict();
- NewQuals.removeRestrict();
- if (OldQuals != NewQuals)
+ Q.addConst();
+ return Q;
+ };
+
+ auto CompareType = [&](QualType Base, QualType D) {
+ auto BS = Base.getNonReferenceType().getCanonicalType().split();
+ BS.Quals = NormalizeQualifiers(OldMethod, BS.Quals);
+
+ auto DS = D.getNonReferenceType().getCanonicalType().split();
+ DS.Quals = NormalizeQualifiers(NewMethod, DS.Quals);
+
+ if (BS.Quals != DS.Quals)
+ return false;
+
+ if (OldMethod->isImplicitObjectMemberFunction() &&
+ OldMethod->getParent() != NewMethod->getParent()) {
+ QualType ParentType =
+ SemaRef.Context.getTypeDeclType(OldMethod->getParent())
+ .getCanonicalType();
+ if (ParentType.getTypePtr() != BS.Ty)
+ return false;
+ BS.Ty = DS.Ty;
+ }
+
+ // FIXME: should we ignore some type attributes here?
+ if (BS.Ty != DS.Ty)
+ return false;
+
+ if (Base->isLValueReferenceType())
+ return D->isLValueReferenceType();
+ return Base->isRValueReferenceType() == D->isRValueReferenceType();
+ };
+
+ // If the function is a class member, its signature includes the
+ // cv-qualifiers (if any) and ref-qualifier (if any) on the function itself.
+ auto DiagnoseInconsistentRefQualifiers = [&]() {
+ if (SemaRef.LangOpts.CPlusPlus23)
+ return false;
+ if (OldMethod->getRefQualifier() == NewMethod->getRefQualifier())
+ return false;
+ if (OldMethod->isExplicitObjectMemberFunction() ||
+ NewMethod->isExplicitObjectMemberFunction())
+ return false;
+ if (!UseMemberUsingDeclRules && (OldMethod->getRefQualifier() == RQ_None ||
+ NewMethod->getRefQualifier() == RQ_None)) {
+ SemaRef.Diag(NewMethod->getLocation(), diag::err_ref_qualifier_overload)
+ << NewMethod->getRefQualifier() << OldMethod->getRefQualifier();
+ SemaRef.Diag(OldMethod->getLocation(), diag::note_previous_declaration);
+ return true;
+ }
+ return false;
+ };
+
+ if (OldMethod && OldMethod->isExplicitObjectMemberFunction())
+ OldParamsOffset++;
+ if (NewMethod && NewMethod->isExplicitObjectMemberFunction())
+ NewParamsOffset++;
+
+ if (OldType->getNumParams() - OldParamsOffset !=
+ NewType->getNumParams() - NewParamsOffset ||
+ !SemaRef.FunctionParamTypesAreEqual(
+ {OldType->param_type_begin() + OldParamsOffset,
+ OldType->param_type_end()},
+ {NewType->param_type_begin() + NewParamsOffset,
+ NewType->param_type_end()},
+ nullptr)) {
+ return true;
+ }
+
+ if (OldMethod && NewMethod && !OldMethod->isStatic() &&
+ !OldMethod->isStatic()) {
+ bool HaveCorrespondingObjectParameters = [&](const CXXMethodDecl *Old,
+ const CXXMethodDecl *New) {
+ auto NewObjectType = New->getFunctionObjectParameterReferenceType();
+ auto OldObjectType = Old->getFunctionObjectParameterReferenceType();
+
+ auto IsImplicitWithNoRefQual = [](const CXXMethodDecl *F) {
+ return F->getRefQualifier() == RQ_None &&
+ !F->isExplicitObjectMemberFunction();
+ };
+
+ if (IsImplicitWithNoRefQual(Old) != IsImplicitWithNoRefQual(New) &&
+ CompareType(OldObjectType.getNonReferenceType(),
+ NewObjectType.getNonReferenceType()))
+ return true;
+ return CompareType(OldObjectType, NewObjectType);
+ }(OldMethod, NewMethod);
+
+ if (!HaveCorrespondingObjectParameters) {
+ if (DiagnoseInconsistentRefQualifiers())
+ return true;
+ // CWG2554
+ // and, if at least one is an explicit object member function, ignoring
+ // object parameters
+ if (!UseOverrideRules || (!NewMethod->isExplicitObjectMemberFunction() &&
+ !OldMethod->isExplicitObjectMemberFunction()))
+ return true;
+ }
+ }
+
+ if (!UseOverrideRules) {
+ Expr *NewRC = New->getTrailingRequiresClause(),
+ *OldRC = Old->getTrailingRequiresClause();
+ if ((NewRC != nullptr) != (OldRC != nullptr))
+ return true;
+
+ if (NewRC && !SemaRef.AreConstraintExpressionsEqual(Old, OldRC, New, NewRC))
+ return true;
+ }
+
+ if (NewMethod && OldMethod && OldMethod->isImplicitObjectMemberFunction() &&
+ NewMethod->isImplicitObjectMemberFunction()) {
+ if (DiagnoseInconsistentRefQualifiers())
return true;
}
@@ -1269,20 +1487,20 @@ bool Sema::IsOverload(FunctionDecl *New, FunctionDecl *Old,
if (NewI == NewE || OldI == OldE)
return true;
llvm::FoldingSetNodeID NewID, OldID;
- NewI->getCond()->Profile(NewID, Context, true);
- OldI->getCond()->Profile(OldID, Context, true);
+ NewI->getCond()->Profile(NewID, SemaRef.Context, true);
+ OldI->getCond()->Profile(OldID, SemaRef.Context, true);
if (NewID != OldID)
return true;
}
- if (getLangOpts().CUDA && ConsiderCudaAttrs) {
+ if (SemaRef.getLangOpts().CUDA && ConsiderCudaAttrs) {
// Don't allow overloading of destructors. (In theory we could, but it
// would be a giant change to clang.)
if (!isa<CXXDestructorDecl>(New)) {
- CUDAFunctionTarget NewTarget = IdentifyCUDATarget(New),
- OldTarget = IdentifyCUDATarget(Old);
- if (NewTarget != CFT_InvalidTarget) {
- assert((OldTarget != CFT_InvalidTarget) &&
+ Sema::CUDAFunctionTarget NewTarget = SemaRef.IdentifyCUDATarget(New),
+ OldTarget = SemaRef.IdentifyCUDATarget(Old);
+ if (NewTarget != Sema::CFT_InvalidTarget) {
+ assert((OldTarget != Sema::CFT_InvalidTarget) &&
"Unexpected invalid target.");
// Allow overloading of functions with same signature and different CUDA
@@ -1293,27 +1511,24 @@ bool Sema::IsOverload(FunctionDecl *New, FunctionDecl *Old,
}
}
- if (ConsiderRequiresClauses) {
- Expr *NewRC = New->getTrailingRequiresClause(),
- *OldRC = Old->getTrailingRequiresClause();
- if ((NewRC != nullptr) != (OldRC != nullptr))
- // RC are most certainly different - these are overloads.
- return true;
-
- if (NewRC) {
- llvm::FoldingSetNodeID NewID, OldID;
- NewRC->Profile(NewID, Context, /*Canonical=*/true);
- OldRC->Profile(OldID, Context, /*Canonical=*/true);
- if (NewID != OldID)
- // RCs are not equivalent - these are overloads.
- return true;
- }
- }
-
// The signatures match; this is not an overload.
return false;
}
+bool Sema::IsOverload(FunctionDecl *New, FunctionDecl *Old,
+ bool UseMemberUsingDeclRules, bool ConsiderCudaAttrs) {
+ return IsOverloadOrOverrideImpl(*this, New, Old, UseMemberUsingDeclRules,
+ ConsiderCudaAttrs);
+}
+
+bool Sema::IsOverride(FunctionDecl *MD, FunctionDecl *BaseMD,
+ bool UseMemberUsingDeclRules, bool ConsiderCudaAttrs) {
+ return IsOverloadOrOverrideImpl(*this, MD, BaseMD,
+ /*UseMemberUsingDeclRules=*/false,
+ /*ConsiderCudaAttrs=*/true,
+ /*UseOverrideRules=*/true);
+}
+
/// Tries a user-defined conversion from From to ToType.
///
/// Produces an implicit conversion sequence for when a standard conversion
@@ -1619,8 +1834,9 @@ bool Sema::IsFunctionConversion(QualType FromType, QualType ToType,
///
/// \param ICK Will be set to the vector conversion kind, if this is a vector
/// conversion.
-static bool IsVectorConversion(Sema &S, QualType FromType,
- QualType ToType, ImplicitConversionKind &ICK) {
+static bool IsVectorConversion(Sema &S, QualType FromType, QualType ToType,
+ ImplicitConversionKind &ICK, Expr *From,
+ bool InOverloadResolution, bool CStyle) {
// We need at least one of these types to be a vector type to have a vector
// conversion.
if (!ToType->isVectorType() && !FromType->isVectorType())
@@ -1644,13 +1860,22 @@ static bool IsVectorConversion(Sema &S, QualType FromType,
}
}
- if (ToType->isSizelessBuiltinType() || FromType->isSizelessBuiltinType())
+ if (ToType->isSVESizelessBuiltinType() ||
+ FromType->isSVESizelessBuiltinType())
if (S.Context.areCompatibleSveTypes(FromType, ToType) ||
S.Context.areLaxCompatibleSveTypes(FromType, ToType)) {
ICK = ICK_SVE_Vector_Conversion;
return true;
}
+ if (ToType->isRVVSizelessBuiltinType() ||
+ FromType->isRVVSizelessBuiltinType())
+ if (S.Context.areCompatibleRVVTypes(FromType, ToType) ||
+ S.Context.areLaxCompatibleRVVTypes(FromType, ToType)) {
+ ICK = ICK_RVV_Vector_Conversion;
+ return true;
+ }
+
// We can perform the conversion between vector types in the following cases:
// 1)vector types are equivalent AltiVec and GCC vector types
// 2)lax vector conversions are permitted and the vector types are of the
@@ -1662,6 +1887,14 @@ static bool IsVectorConversion(Sema &S, QualType FromType,
if (S.Context.areCompatibleVectorTypes(FromType, ToType) ||
(S.isLaxVectorConversion(FromType, ToType) &&
!ToType->hasAttr(attr::ArmMveStrictPolymorphism))) {
+ if (S.getASTContext().getTargetInfo().getTriple().isPPC() &&
+ S.isLaxVectorConversion(FromType, ToType) &&
+ S.anyAltivecTypes(FromType, ToType) &&
+ !S.Context.areCompatibleVectorTypes(FromType, ToType) &&
+ !InOverloadResolution && !CStyle) {
+ S.Diag(From->getBeginLoc(), diag::warn_deprecated_lax_vec_conv_all)
+ << FromType << ToType;
+ }
ICK = ICK_Vector_Conversion;
return true;
}
@@ -1734,7 +1967,8 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
// fact that non-static member functions *must* have such an address-of
// expression.
CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Fn);
- if (Method && !Method->isStatic()) {
+ if (Method && !Method->isStatic() &&
+ !Method->isExplicitObjectMemberFunction()) {
assert(isa<UnaryOperator>(From->IgnoreParens()) &&
"Non-unary operator on non-static member address");
assert(cast<UnaryOperator>(From->IgnoreParens())->getOpcode()
@@ -1749,13 +1983,6 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
"Non-address-of operator for overloaded function expression");
FromType = S.Context.getPointerType(FromType);
}
-
- // Check that we've computed the proper type after overload resolution.
- // FIXME: FixOverloadedFunctionReference has side-effects; we shouldn't
- // be calling it from within an NDEBUG block.
- assert(S.Context.hasSameType(
- FromType,
- S.FixOverloadedFunctionReference(From, AccessPair, Fn)->getType()));
} else {
return false;
}
@@ -1869,24 +2096,28 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
SCS.Second = ICK_Complex_Real;
FromType = ToType.getUnqualifiedType();
} else if (FromType->isRealFloatingType() && ToType->isRealFloatingType()) {
- // FIXME: disable conversions between long double and __float128 if
- // their representation is different until there is back end support
+ // FIXME: disable conversions between long double, __ibm128 and __float128
+ // if their representation is different until there is back end support
// We of course allow this conversion if long double is really double.
- // Conversions between bfloat and other floats are not permitted.
- if (FromType == S.Context.BFloat16Ty || ToType == S.Context.BFloat16Ty)
+ // Conversions between bfloat16 and float16 are currently not supported.
+ if ((FromType->isBFloat16Type() &&
+ (ToType->isFloat16Type() || ToType->isHalfType())) ||
+ (ToType->isBFloat16Type() &&
+ (FromType->isFloat16Type() || FromType->isHalfType())))
return false;
- if (&S.Context.getFloatTypeSemantics(FromType) !=
- &S.Context.getFloatTypeSemantics(ToType)) {
- bool Float128AndLongDouble = ((FromType == S.Context.Float128Ty &&
- ToType == S.Context.LongDoubleTy) ||
- (FromType == S.Context.LongDoubleTy &&
- ToType == S.Context.Float128Ty));
- if (Float128AndLongDouble &&
- (&S.Context.getFloatTypeSemantics(S.Context.LongDoubleTy) ==
- &llvm::APFloat::PPCDoubleDouble()))
- return false;
- }
+
+ // Conversions between IEEE-quad and IBM-extended semantics are not
+ // permitted.
+ const llvm::fltSemantics &FromSem =
+ S.Context.getFloatTypeSemantics(FromType);
+ const llvm::fltSemantics &ToSem = S.Context.getFloatTypeSemantics(ToType);
+ if ((&FromSem == &llvm::APFloat::PPCDoubleDouble() &&
+ &ToSem == &llvm::APFloat::IEEEquad()) ||
+ (&FromSem == &llvm::APFloat::IEEEquad() &&
+ &ToSem == &llvm::APFloat::PPCDoubleDouble()))
+ return false;
+
// Floating point conversions (C++ 4.8).
SCS.Second = ICK_Floating_Conversion;
FromType = ToType.getUnqualifiedType();
@@ -1894,9 +2125,6 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
ToType->isIntegralType(S.Context)) ||
(FromType->isIntegralOrUnscopedEnumerationType() &&
ToType->isRealFloatingType())) {
- // Conversions between bfloat and int are not permitted.
- if (FromType->isBFloat16Type() || ToType->isBFloat16Type())
- return false;
// Floating-integral conversions (C++ 4.9).
SCS.Second = ICK_Floating_Integral;
@@ -1916,7 +2144,8 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
InOverloadResolution, FromType)) {
// Pointer to member conversions (4.11).
SCS.Second = ICK_Pointer_Member;
- } else if (IsVectorConversion(S, FromType, ToType, SecondICK)) {
+ } else if (IsVectorConversion(S, FromType, ToType, SecondICK, From,
+ InOverloadResolution, CStyle)) {
SCS.Second = SecondICK;
FromType = ToType.getUnqualifiedType();
} else if (!S.getLangOpts().CPlusPlus &&
@@ -1948,6 +2177,9 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
From->isIntegerConstantExpr(S.getASTContext())) {
SCS.Second = ICK_Compatible_Conversion;
FromType = ToType;
+ } else if (ToType->isFixedPointType() || FromType->isFixedPointType()) {
+ SCS.Second = ICK_Fixed_Point_Conversion;
+ FromType = ToType;
} else {
// No second conversion required.
SCS.Second = ICK_Identity;
@@ -2071,9 +2303,9 @@ bool Sema::IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType) {
// int can represent all the values of the source type; otherwise,
// the source rvalue can be converted to an rvalue of type unsigned
// int (C++ 4.5p1).
- if (FromType->isPromotableIntegerType() && !FromType->isBooleanType() &&
+ if (Context.isPromotableIntegerType(FromType) && !FromType->isBooleanType() &&
!FromType->isEnumeralType()) {
- if (// We can promote any signed, promotable integer type to an int
+ if ( // We can promote any signed, promotable integer type to an int
(FromType->isSignedIntegerType() ||
// We can promote any unsigned integer type whose size is
// less than int to an int.
@@ -2185,7 +2417,7 @@ bool Sema::IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType) {
// compatibility.
if (From) {
if (FieldDecl *MemberDecl = From->getSourceBitField()) {
- Optional<llvm::APSInt> BitWidth;
+ std::optional<llvm::APSInt> BitWidth;
if (FromType->isIntegralType(Context) &&
(BitWidth =
MemberDecl->getBitWidth()->getIntegerConstantExpr(Context))) {
@@ -2237,7 +2469,8 @@ bool Sema::IsFloatingPointPromotion(QualType FromType, QualType ToType) {
(FromBuiltin->getKind() == BuiltinType::Float ||
FromBuiltin->getKind() == BuiltinType::Double) &&
(ToBuiltin->getKind() == BuiltinType::LongDouble ||
- ToBuiltin->getKind() == BuiltinType::Float128))
+ ToBuiltin->getKind() == BuiltinType::Float128 ||
+ ToBuiltin->getKind() == BuiltinType::Ibm128))
return true;
// Half can be promoted to float.
@@ -2403,9 +2636,8 @@ bool Sema::IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
if (FromType->isObjCObjectPointerType() && ToPointeeType->isVoidType() &&
!getLangOpts().ObjCAutoRefCount) {
ConvertedType = BuildSimilarlyQualifiedPointerType(
- FromType->getAs<ObjCObjectPointerType>(),
- ToPointeeType,
- ToType, Context);
+ FromType->castAs<ObjCObjectPointerType>(), ToPointeeType, ToType,
+ Context);
return true;
}
const PointerType *FromTypePtr = FromType->getAs<PointerType>();
@@ -2953,30 +3185,68 @@ void Sema::HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
}
/// FunctionParamTypesAreEqual - This routine checks two function proto types
-/// for equality of their argument types. Caller has already checked that
-/// they have same number of arguments. If the parameters are different,
+/// for equality of their parameter types. Caller has already checked that
+/// they have same number of parameters. If the parameters are different,
/// ArgPos will have the parameter index of the first different parameter.
-bool Sema::FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
- const FunctionProtoType *NewType,
- unsigned *ArgPos) {
- for (FunctionProtoType::param_type_iterator O = OldType->param_type_begin(),
- N = NewType->param_type_begin(),
- E = OldType->param_type_end();
- O && (O != E); ++O, ++N) {
+/// If `Reversed` is true, the parameters of `NewType` will be compared in
+/// reverse order. That's useful if one of the functions is being used as a C++20
+/// synthesized operator overload with a reversed parameter order.
+bool Sema::FunctionParamTypesAreEqual(ArrayRef<QualType> Old,
+ ArrayRef<QualType> New, unsigned *ArgPos,
+ bool Reversed) {
+ assert(llvm::size(Old) == llvm::size(New) &&
+ "Can't compare parameters of functions with different number of "
+ "parameters!");
+
+ for (auto &&[Idx, Type] : llvm::enumerate(Old)) {
+ // Reverse iterate over the parameters of `OldType` if `Reversed` is true.
+ size_t J = Reversed ? (llvm::size(New) - Idx - 1) : Idx;
+
// Ignore address spaces in pointee type. This is to disallow overloading
// on __ptr32/__ptr64 address spaces.
- QualType Old = Context.removePtrSizeAddrSpace(O->getUnqualifiedType());
- QualType New = Context.removePtrSizeAddrSpace(N->getUnqualifiedType());
+ QualType OldType =
+ Context.removePtrSizeAddrSpace(Type.getUnqualifiedType());
+ QualType NewType =
+ Context.removePtrSizeAddrSpace((New.begin() + J)->getUnqualifiedType());
- if (!Context.hasSameType(Old, New)) {
+ if (!Context.hasSameType(OldType, NewType)) {
if (ArgPos)
- *ArgPos = O - OldType->param_type_begin();
+ *ArgPos = Idx;
return false;
}
}
return true;
}
+bool Sema::FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
+ const FunctionProtoType *NewType,
+ unsigned *ArgPos, bool Reversed) {
+ return FunctionParamTypesAreEqual(OldType->param_types(),
+ NewType->param_types(), ArgPos, Reversed);
+}
+
+bool Sema::FunctionNonObjectParamTypesAreEqual(const FunctionDecl *OldFunction,
+ const FunctionDecl *NewFunction,
+ unsigned *ArgPos,
+ bool Reversed) {
+
+ if (OldFunction->getNumNonObjectParams() !=
+ NewFunction->getNumNonObjectParams())
+ return false;
+
+ unsigned OldIgnore =
+ unsigned(OldFunction->hasCXXExplicitFunctionObjectParameter());
+ unsigned NewIgnore =
+ unsigned(NewFunction->hasCXXExplicitFunctionObjectParameter());
+
+ auto *OldPT = cast<FunctionProtoType>(OldFunction->getFunctionType());
+ auto *NewPT = cast<FunctionProtoType>(NewFunction->getFunctionType());
+
+ return FunctionParamTypesAreEqual(OldPT->param_types().slice(OldIgnore),
+ NewPT->param_types().slice(NewIgnore),
+ ArgPos, Reversed);
+}
+
/// CheckPointerConversion - Check the pointer conversion from the
/// expression From to the type ToType. This routine checks for
/// ambiguous or inaccessible derived-to-base pointer
@@ -3196,9 +3466,8 @@ static bool isQualificationConversionStep(QualType FromType, QualType ToType,
Qualifiers FromQuals = FromType.getQualifiers();
Qualifiers ToQuals = ToType.getQualifiers();
- // Ignore __unaligned qualifier if this type is void.
- if (ToType.getUnqualifiedType()->isVoidType())
- FromQuals.removeUnaligned();
+ // Ignore __unaligned qualifier.
+ FromQuals.removeUnaligned();
// Objective-C ARC:
// Check Objective-C lifetime conversions.
@@ -3244,6 +3513,19 @@ static bool isQualificationConversionStep(QualType FromType, QualType ToType,
!PreviousToQualsIncludeConst)
return false;
+ // The following wording is from C++20, where the result of the conversion
+ // is T3, not T2.
+ // -- if [...] P1,i [...] is "array of unknown bound of", P3,i is
+ // "array of unknown bound of"
+ if (FromType->isIncompleteArrayType() && !ToType->isIncompleteArrayType())
+ return false;
+
+ // -- if the resulting P3,i is different from P1,i [...], then const is
+ // added to every cv 3_k for 0 < k < i.
+ if (!CStyle && FromType->isConstantArrayType() &&
+ ToType->isIncompleteArrayType() && !PreviousToQualsIncludeConst)
+ return false;
+
// Keep track of whether all prior cv-qualifiers in the "to" type
// include const.
PreviousToQualsIncludeConst =
@@ -3369,14 +3651,14 @@ IsInitializerListConstructorConversion(Sema &S, Expr *From, QualType ToType,
case OR_Success: {
// Record the standard conversion we used and the conversion function.
CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(Best->Function);
- QualType ThisType = Constructor->getThisType();
+ QualType ThisType = Constructor->getFunctionObjectParameterType();
// Initializer lists don't have conversions as such.
User.Before.setAsIdentityConversion();
User.HadMultipleCandidates = HadMultipleCandidates;
User.ConversionFunction = Constructor;
User.FoundConversionFunction = Best->FoundDecl;
User.After.setAsIdentityConversion();
- User.After.setFromType(ThisType->castAs<PointerType>()->getPointeeType());
+ User.After.setFromType(ThisType);
User.After.setAllToTypes(ToType);
return Result;
}
@@ -3486,7 +3768,7 @@ IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
if (Info.ConstructorTmpl)
S.AddTemplateOverloadCandidate(
Info.ConstructorTmpl, Info.FoundDecl,
- /*ExplicitArgs*/ nullptr, llvm::makeArrayRef(Args, NumArgs),
+ /*ExplicitArgs*/ nullptr, llvm::ArrayRef(Args, NumArgs),
CandidateSet, SuppressUserConversions,
/*PartialOverloading*/ false,
AllowExplicit == AllowedExplicit::All);
@@ -3494,8 +3776,8 @@ IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
// Allow one user-defined conversion when user specifies a
// From->ToType conversion via an static cast (c-style, etc).
S.AddOverloadCandidate(Info.Constructor, Info.FoundDecl,
- llvm::makeArrayRef(Args, NumArgs),
- CandidateSet, SuppressUserConversions,
+ llvm::ArrayRef(Args, NumArgs), CandidateSet,
+ SuppressUserConversions,
/*PartialOverloading*/ false,
AllowExplicit == AllowedExplicit::All);
}
@@ -3556,7 +3838,6 @@ IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
// sequence converts the source type to the type required by
// the argument of the constructor.
//
- QualType ThisType = Constructor->getThisType();
if (isa<InitListExpr>(From)) {
// Initializer lists don't have conversions as such.
User.Before.setAsIdentityConversion();
@@ -3572,7 +3853,7 @@ IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
User.ConversionFunction = Constructor;
User.FoundConversionFunction = Best->FoundDecl;
User.After.setAsIdentityConversion();
- User.After.setFromType(ThisType->castAs<PointerType>()->getPointeeType());
+ User.After.setFromType(Constructor->getFunctionObjectParameterType());
User.After.setAllToTypes(ToType);
return Result;
}
@@ -3703,8 +3984,7 @@ compareConversionFunctions(Sema &S, FunctionDecl *Function1,
CallingConv Conv2CC = Conv2FuncRet->getCallConv();
CXXMethodDecl *CallOp = Conv2->getParent()->getLambdaCallOperator();
- const FunctionProtoType *CallOpProto =
- CallOp->getType()->getAs<FunctionProtoType>();
+ const auto *CallOpProto = CallOp->getType()->castAs<FunctionProtoType>();
CallingConv CallOpCC =
CallOp->getType()->castAs<FunctionType>()->getCallConv();
@@ -3775,7 +4055,9 @@ CompareImplicitConversionSequences(Sema &S, SourceLocation Loc,
if (S.getLangOpts().CPlusPlus11 && !S.getLangOpts().WritableStrings &&
hasDeprecatedStringLiteralToCharPtrConversion(ICS1) !=
- hasDeprecatedStringLiteralToCharPtrConversion(ICS2))
+ hasDeprecatedStringLiteralToCharPtrConversion(ICS2) &&
+ // Ill-formedness must not differ
+ ICS1.isBad() == ICS2.isBad())
return hasDeprecatedStringLiteralToCharPtrConversion(ICS1)
? ImplicitConversionSequence::Worse
: ImplicitConversionSequence::Better;
@@ -3801,16 +4083,45 @@ CompareImplicitConversionSequences(Sema &S, SourceLocation Loc,
// list-initialization sequence L2 if:
// - L1 converts to std::initializer_list<X> for some X and L2 does not, or,
// if not that,
- // - L1 converts to type "array of N1 T", L2 converts to type "array of N2 T",
- // and N1 is smaller than N2.,
+ // — L1 and L2 convert to arrays of the same element type, and either the
+ // number of elements n_1 initialized by L1 is less than the number of
+ // elements n_2 initialized by L2, or (C++20) n_1 = n_2 and L2 converts to
+ // an array of unknown bound and L1 does not,
// even if one of the other rules in this paragraph would otherwise apply.
if (!ICS1.isBad()) {
- if (ICS1.isStdInitializerListElement() &&
- !ICS2.isStdInitializerListElement())
- return ImplicitConversionSequence::Better;
- if (!ICS1.isStdInitializerListElement() &&
- ICS2.isStdInitializerListElement())
- return ImplicitConversionSequence::Worse;
+ bool StdInit1 = false, StdInit2 = false;
+ if (ICS1.hasInitializerListContainerType())
+ StdInit1 = S.isStdInitializerList(ICS1.getInitializerListContainerType(),
+ nullptr);
+ if (ICS2.hasInitializerListContainerType())
+ StdInit2 = S.isStdInitializerList(ICS2.getInitializerListContainerType(),
+ nullptr);
+ if (StdInit1 != StdInit2)
+ return StdInit1 ? ImplicitConversionSequence::Better
+ : ImplicitConversionSequence::Worse;
+
+ if (ICS1.hasInitializerListContainerType() &&
+ ICS2.hasInitializerListContainerType())
+ if (auto *CAT1 = S.Context.getAsConstantArrayType(
+ ICS1.getInitializerListContainerType()))
+ if (auto *CAT2 = S.Context.getAsConstantArrayType(
+ ICS2.getInitializerListContainerType())) {
+ if (S.Context.hasSameUnqualifiedType(CAT1->getElementType(),
+ CAT2->getElementType())) {
+ // Both to arrays of the same element type
+ if (CAT1->getSize() != CAT2->getSize())
+ // Different sized, the smaller wins
+ return CAT1->getSize().ult(CAT2->getSize())
+ ? ImplicitConversionSequence::Better
+ : ImplicitConversionSequence::Worse;
+ if (ICS1.isInitializerListOfIncompleteArray() !=
+ ICS2.isInitializerListOfIncompleteArray())
+ // One is incomplete, it loses
+ return ICS2.isInitializerListOfIncompleteArray()
+ ? ImplicitConversionSequence::Better
+ : ImplicitConversionSequence::Worse;
+ }
+ }
}
if (ICS1.isStandard())
@@ -4166,6 +4477,20 @@ CompareStandardConversionSequences(Sema &S, SourceLocation Loc,
: ImplicitConversionSequence::Worse;
}
+ if (SCS1.Second == ICK_RVV_Vector_Conversion &&
+ SCS2.Second == ICK_RVV_Vector_Conversion) {
+ bool SCS1IsCompatibleRVVVectorConversion =
+ S.Context.areCompatibleRVVTypes(SCS1.getFromType(), SCS1.getToType(2));
+ bool SCS2IsCompatibleRVVVectorConversion =
+ S.Context.areCompatibleRVVTypes(SCS2.getFromType(), SCS2.getToType(2));
+
+ if (SCS1IsCompatibleRVVVectorConversion !=
+ SCS2IsCompatibleRVVVectorConversion)
+ return SCS1IsCompatibleRVVVectorConversion
+ ? ImplicitConversionSequence::Better
+ : ImplicitConversionSequence::Worse;
+ }
+
return ImplicitConversionSequence::Indistinguishable;
}
@@ -4176,12 +4501,15 @@ static ImplicitConversionSequence::CompareKind
CompareQualificationConversions(Sema &S,
const StandardConversionSequence& SCS1,
const StandardConversionSequence& SCS2) {
- // C++ 13.3.3.2p3:
+ // C++ [over.ics.rank]p3:
// -- S1 and S2 differ only in their qualification conversion and
- // yield similar types T1 and T2 (C++ 4.4), respectively, and the
- // cv-qualification signature of type T1 is a proper subset of
- // the cv-qualification signature of type T2, and S1 is not the
+ // yield similar types T1 and T2 (C++ 4.4), respectively, [...]
+ // [C++98]
+ // [...] and the cv-qualification signature of type T1 is a proper subset
+ // of the cv-qualification signature of type T2, and S1 is not the
// deprecated string literal array-to-pointer conversion (4.2).
+ // [C++2a]
+ // [...] where T1 can be converted to T2 by a qualification conversion.
if (SCS1.First != SCS2.First || SCS1.Second != SCS2.Second ||
SCS1.Third != SCS2.Third || SCS1.Third != ICK_Qualification)
return ImplicitConversionSequence::Indistinguishable;
@@ -4202,79 +4530,35 @@ CompareQualificationConversions(Sema &S,
if (UnqualT1 == UnqualT2)
return ImplicitConversionSequence::Indistinguishable;
- ImplicitConversionSequence::CompareKind Result
- = ImplicitConversionSequence::Indistinguishable;
+ // Don't ever prefer a standard conversion sequence that uses the deprecated
+ // string literal array to pointer conversion.
+ bool CanPick1 = !SCS1.DeprecatedStringLiteralToCharPtr;
+ bool CanPick2 = !SCS2.DeprecatedStringLiteralToCharPtr;
// Objective-C++ ARC:
// Prefer qualification conversions not involving a change in lifetime
- // to qualification conversions that do not change lifetime.
- if (SCS1.QualificationIncludesObjCLifetime !=
- SCS2.QualificationIncludesObjCLifetime) {
- Result = SCS1.QualificationIncludesObjCLifetime
- ? ImplicitConversionSequence::Worse
- : ImplicitConversionSequence::Better;
- }
-
- while (S.Context.UnwrapSimilarTypes(T1, T2)) {
- // Within each iteration of the loop, we check the qualifiers to
- // determine if this still looks like a qualification
- // conversion. Then, if all is well, we unwrap one more level of
- // pointers or pointers-to-members and do it all again
- // until there are no more pointers or pointers-to-members left
- // to unwrap. This essentially mimics what
- // IsQualificationConversion does, but here we're checking for a
- // strict subset of qualifiers.
- if (T1.getQualifiers().withoutObjCLifetime() ==
- T2.getQualifiers().withoutObjCLifetime())
- // The qualifiers are the same, so this doesn't tell us anything
- // about how the sequences rank.
- // ObjC ownership quals are omitted above as they interfere with
- // the ARC overload rule.
- ;
- else if (T2.isMoreQualifiedThan(T1)) {
- // T1 has fewer qualifiers, so it could be the better sequence.
- if (Result == ImplicitConversionSequence::Worse)
- // Neither has qualifiers that are a subset of the other's
- // qualifiers.
- return ImplicitConversionSequence::Indistinguishable;
-
- Result = ImplicitConversionSequence::Better;
- } else if (T1.isMoreQualifiedThan(T2)) {
- // T2 has fewer qualifiers, so it could be the better sequence.
- if (Result == ImplicitConversionSequence::Better)
- // Neither has qualifiers that are a subset of the other's
- // qualifiers.
- return ImplicitConversionSequence::Indistinguishable;
-
- Result = ImplicitConversionSequence::Worse;
- } else {
- // Qualifiers are disjoint.
- return ImplicitConversionSequence::Indistinguishable;
- }
-
- // If the types after this point are equivalent, we're done.
- if (S.Context.hasSameUnqualifiedType(T1, T2))
- break;
- }
-
- // Check that the winning standard conversion sequence isn't using
- // the deprecated string literal array to pointer conversion.
- switch (Result) {
- case ImplicitConversionSequence::Better:
- if (SCS1.DeprecatedStringLiteralToCharPtr)
- Result = ImplicitConversionSequence::Indistinguishable;
- break;
-
- case ImplicitConversionSequence::Indistinguishable:
- break;
-
- case ImplicitConversionSequence::Worse:
- if (SCS2.DeprecatedStringLiteralToCharPtr)
- Result = ImplicitConversionSequence::Indistinguishable;
- break;
- }
+ // to qualification conversions that do change lifetime.
+ if (SCS1.QualificationIncludesObjCLifetime &&
+ !SCS2.QualificationIncludesObjCLifetime)
+ CanPick1 = false;
+ if (SCS2.QualificationIncludesObjCLifetime &&
+ !SCS1.QualificationIncludesObjCLifetime)
+ CanPick2 = false;
- return Result;
+ bool ObjCLifetimeConversion;
+ if (CanPick1 &&
+ !S.IsQualificationConversion(T1, T2, false, ObjCLifetimeConversion))
+ CanPick1 = false;
+ // FIXME: In Objective-C ARC, we can have qualification conversions in both
+ // directions, so we can't short-cut this second check in general.
+ if (CanPick2 &&
+ !S.IsQualificationConversion(T2, T1, false, ObjCLifetimeConversion))
+ CanPick2 = false;
+
+ if (CanPick1 != CanPick2)
+ return CanPick1 ? ImplicitConversionSequence::Better
+ : ImplicitConversionSequence::Worse;
+ return ImplicitConversionSequence::Indistinguishable;
}
/// CompareDerivedToBaseConversions - Compares two standard conversion
@@ -4486,15 +4770,6 @@ CompareDerivedToBaseConversions(Sema &S, SourceLocation Loc,
return ImplicitConversionSequence::Indistinguishable;
}
-/// Determine whether the given type is valid, e.g., it is not an invalid
-/// C++ class.
-static bool isTypeValid(QualType T) {
- if (CXXRecordDecl *Record = T->getAsCXXRecordDecl())
- return !Record->isInvalidDecl();
-
- return true;
-}
-
static QualType withoutUnaligned(ASTContext &Ctx, QualType T) {
if (!T.getQualifiers().hasUnaligned())
return T;
@@ -4544,7 +4819,6 @@ Sema::CompareReferenceRelationship(SourceLocation Loc,
if (UnqualT1 == UnqualT2) {
// Nothing to do.
} else if (isCompleteType(Loc, OrigT2) &&
- isTypeValid(UnqualT1) && isTypeValid(UnqualT2) &&
IsDerivedFrom(Loc, UnqualT2, UnqualT1))
Conv |= ReferenceConversions::DerivedToBase;
else if (UnqualT1->isObjCObjectOrInterfaceType() &&
@@ -5009,9 +5283,27 @@ TryListConversion(Sema &S, InitListExpr *From, QualType ToType,
ImplicitConversionSequence Result;
Result.setBad(BadConversionSequence::no_conversion, From, ToType);
- // We need a complete type for what follows. Incomplete types can never be
- // initialized from init lists.
- if (!S.isCompleteType(From->getBeginLoc(), ToType))
+ // We need a complete type for what follows. With one C++20 exception,
+ // incomplete types can never be initialized from init lists.
+ QualType InitTy = ToType;
+ const ArrayType *AT = S.Context.getAsArrayType(ToType);
+ if (AT && S.getLangOpts().CPlusPlus20)
+ if (const auto *IAT = dyn_cast<IncompleteArrayType>(AT))
+ // C++20 allows list initialization of an incomplete array type.
+ InitTy = IAT->getElementType();
+ if (!S.isCompleteType(From->getBeginLoc(), InitTy))
+ return Result;
+
+ // C++20 [over.ics.list]/2:
+ // If the initializer list is a designated-initializer-list, a conversion
+ // is only possible if the parameter has an aggregate type
+ //
+ // FIXME: The exception for reference initialization here is not part of the
+ // language rules, but follow other compilers in adding it as a tentative DR
+ // resolution.
+ bool IsDesignatedInit = From->hasDesignatedInit();
+ if (!ToType->isAggregateType() && !ToType->isReferenceType() &&
+ IsDesignatedInit)
return Result;
// Per DR1467:
@@ -5024,7 +5316,7 @@ TryListConversion(Sema &S, InitListExpr *From, QualType ToType,
// and the initializer list has a single element that is an
// appropriately-typed string literal (8.5.2 [dcl.init.string]), the
// implicit conversion sequence is the identity conversion.
- if (From->getNumInits() == 1) {
+ if (From->getNumInits() == 1 && !IsDesignatedInit) {
if (ToType->isRecordType()) {
QualType InitType = From->getInit(0)->getType();
if (S.Context.hasSameUnqualifiedType(InitType, ToType) ||
@@ -5035,18 +5327,16 @@ TryListConversion(Sema &S, InitListExpr *From, QualType ToType,
AllowObjCWritebackConversion);
}
- if (const auto *AT = S.Context.getAsArrayType(ToType)) {
- if (S.IsStringInit(From->getInit(0), AT)) {
- InitializedEntity Entity =
+ if (AT && S.IsStringInit(From->getInit(0), AT)) {
+ InitializedEntity Entity =
InitializedEntity::InitializeParameter(S.Context, ToType,
/*Consumed=*/false);
- if (S.CanPerformCopyInitialization(Entity, From)) {
- Result.setStandard();
- Result.Standard.setAsIdentityConversion();
- Result.Standard.setFromType(ToType);
- Result.Standard.setAllToTypes(ToType);
- return Result;
- }
+ if (S.CanPerformCopyInitialization(Entity, From)) {
+ Result.setStandard();
+ Result.Standard.setAsIdentityConversion();
+ Result.Standard.setFromType(ToType);
+ Result.Standard.setAllToTypes(ToType);
+ return Result;
}
}
}
@@ -5064,43 +5354,89 @@ TryListConversion(Sema &S, InitListExpr *From, QualType ToType,
// default-constructible, and if all the elements of the initializer list
// can be implicitly converted to X, the implicit conversion sequence is
// the worst conversion necessary to convert an element of the list to X.
- //
- // FIXME: We're missing a lot of these checks.
- bool toStdInitializerList = false;
- QualType X;
- if (ToType->isArrayType())
- X = S.Context.getAsArrayType(ToType)->getElementType();
- else
- toStdInitializerList = S.isStdInitializerList(ToType, &X);
- if (!X.isNull()) {
- for (unsigned i = 0, e = From->getNumInits(); i < e; ++i) {
- Expr *Init = From->getInit(i);
- ImplicitConversionSequence ICS =
- TryCopyInitialization(S, Init, X, SuppressUserConversions,
- InOverloadResolution,
- AllowObjCWritebackConversion);
- // If a single element isn't convertible, fail.
- if (ICS.isBad()) {
- Result = ICS;
- break;
+ if ((AT || S.isStdInitializerList(ToType, &InitTy)) && !IsDesignatedInit) {
+ unsigned e = From->getNumInits();
+ ImplicitConversionSequence DfltElt;
+ DfltElt.setBad(BadConversionSequence::no_conversion, QualType(),
+ QualType());
+ QualType ContTy = ToType;
+ bool IsUnbounded = false;
+ if (AT) {
+ InitTy = AT->getElementType();
+ if (ConstantArrayType const *CT = dyn_cast<ConstantArrayType>(AT)) {
+ if (CT->getSize().ult(e)) {
+ // Too many inits, fatally bad
+ Result.setBad(BadConversionSequence::too_many_initializers, From,
+ ToType);
+ Result.setInitializerListContainerType(ContTy, IsUnbounded);
+ return Result;
+ }
+ if (CT->getSize().ugt(e)) {
+ // Need an init from empty {}, is there one?
+ InitListExpr EmptyList(S.Context, From->getEndLoc(), std::nullopt,
+ From->getEndLoc());
+ EmptyList.setType(S.Context.VoidTy);
+ DfltElt = TryListConversion(
+ S, &EmptyList, InitTy, SuppressUserConversions,
+ InOverloadResolution, AllowObjCWritebackConversion);
+ if (DfltElt.isBad()) {
+ // No {} init, fatally bad
+ Result.setBad(BadConversionSequence::too_few_initializers, From,
+ ToType);
+ Result.setInitializerListContainerType(ContTy, IsUnbounded);
+ return Result;
+ }
+ }
+ } else {
+ assert(isa<IncompleteArrayType>(AT) && "Expected incomplete array");
+ IsUnbounded = true;
+ if (!e) {
+ // Cannot convert to zero-sized.
+ Result.setBad(BadConversionSequence::too_few_initializers, From,
+ ToType);
+ Result.setInitializerListContainerType(ContTy, IsUnbounded);
+ return Result;
+ }
+ llvm::APInt Size(S.Context.getTypeSize(S.Context.getSizeType()), e);
+ ContTy = S.Context.getConstantArrayType(InitTy, Size, nullptr,
+ ArraySizeModifier::Normal, 0);
}
- // Otherwise, look for the worst conversion.
- if (Result.isBad() || CompareImplicitConversionSequences(
- S, From->getBeginLoc(), ICS, Result) ==
- ImplicitConversionSequence::Worse)
- Result = ICS;
}
- // For an empty list, we won't have computed any conversion sequence.
- // Introduce the identity conversion sequence.
- if (From->getNumInits() == 0) {
- Result.setStandard();
- Result.Standard.setAsIdentityConversion();
- Result.Standard.setFromType(ToType);
- Result.Standard.setAllToTypes(ToType);
+ Result.setStandard();
+ Result.Standard.setAsIdentityConversion();
+ Result.Standard.setFromType(InitTy);
+ Result.Standard.setAllToTypes(InitTy);
+ for (unsigned i = 0; i < e; ++i) {
+ Expr *Init = From->getInit(i);
+ ImplicitConversionSequence ICS = TryCopyInitialization(
+ S, Init, InitTy, SuppressUserConversions, InOverloadResolution,
+ AllowObjCWritebackConversion);
+
+ // Keep the worse conversion seen so far.
+ // FIXME: Sequences are not totally ordered, so 'worse' can be
+ // ambiguous. CWG has been informed.
+ if (CompareImplicitConversionSequences(S, From->getBeginLoc(), ICS,
+ Result) ==
+ ImplicitConversionSequence::Worse) {
+ Result = ICS;
+ // Bail as soon as we find something unconvertible.
+ if (Result.isBad()) {
+ Result.setInitializerListContainerType(ContTy, IsUnbounded);
+ return Result;
+ }
+ }
}
- Result.setStdInitializerListElement(toStdInitializerList);
+ // If we needed any implicit {} initialization, compare that now.
+ // over.ics.list/6 indicates we should compare that conversion. Again CWG
+ // has been informed that this might not be the best thing.
+ if (!DfltElt.isBad() && CompareImplicitConversionSequences(
+ S, From->getEndLoc(), DfltElt, Result) ==
+ ImplicitConversionSequence::Worse)
+ Result = DfltElt;
+ // Record the type being initialized so that we may compare sequences
+ Result.setInitializerListContainerType(ContTy, IsUnbounded);
return Result;
}
@@ -5160,7 +5496,7 @@ TryListConversion(Sema &S, InitListExpr *From, QualType ToType,
// If the initializer list has a single element that is reference-related
// to the parameter type, we initialize the reference from that.
- if (From->getNumInits() == 1) {
+ if (From->getNumInits() == 1 && !IsDesignatedInit) {
Expr *Init = From->getInit(0);
QualType T2 = Init->getType();
@@ -5288,16 +5624,55 @@ static bool TryCopyInitialization(const CanQualType FromQTy,
/// TryObjectArgumentInitialization - Try to initialize the object
/// parameter of the given member function (@c Method) from the
/// expression @p From.
-static ImplicitConversionSequence
-TryObjectArgumentInitialization(Sema &S, SourceLocation Loc, QualType FromType,
- Expr::Classification FromClassification,
- CXXMethodDecl *Method,
- CXXRecordDecl *ActingContext) {
+static ImplicitConversionSequence TryObjectArgumentInitialization(
+ Sema &S, SourceLocation Loc, QualType FromType,
+ Expr::Classification FromClassification, CXXMethodDecl *Method,
+ const CXXRecordDecl *ActingContext, bool InOverloadResolution = false,
+ QualType ExplicitParameterType = QualType(),
+ bool SuppressUserConversion = false) {
+
+ // We need to have an object of class type.
+ if (const auto *PT = FromType->getAs<PointerType>()) {
+ FromType = PT->getPointeeType();
+
+ // When we had a pointer, it's implicitly dereferenced, so we
+ // better have an lvalue.
+ assert(FromClassification.isLValue());
+ }
+
+ auto ValueKindFromClassification = [](Expr::Classification C) {
+ if (C.isPRValue())
+ return clang::VK_PRValue;
+ if (C.isXValue())
+ return VK_XValue;
+ return clang::VK_LValue;
+ };
+
+ if (Method->isExplicitObjectMemberFunction()) {
+ if (ExplicitParameterType.isNull())
+ ExplicitParameterType = Method->getFunctionObjectParameterReferenceType();
+ OpaqueValueExpr TmpExpr(Loc, FromType.getNonReferenceType(),
+ ValueKindFromClassification(FromClassification));
+ ImplicitConversionSequence ICS = TryCopyInitialization(
+ S, &TmpExpr, ExplicitParameterType, SuppressUserConversion,
+ /*InOverloadResolution=*/true, false);
+ if (ICS.isBad())
+ ICS.Bad.FromExpr = nullptr;
+ return ICS;
+ }
+
+ assert(FromType->isRecordType());
+
QualType ClassType = S.Context.getTypeDeclType(ActingContext);
- // [class.dtor]p2: A destructor can be invoked for a const, volatile or
- // const volatile object.
+ // C++98 [class.dtor]p2:
+ // A destructor can be invoked for a const, volatile or const volatile
+ // object.
+ // C++98 [over.match.funcs]p4:
+ // For static member functions, the implicit object parameter is considered
+ // to match any object (since if the function is selected, the object is
+ // discarded).
Qualifiers Quals = Method->getMethodQualifiers();
- if (isa<CXXDestructorDecl>(Method)) {
+ if (isa<CXXDestructorDecl>(Method) || Method->isStatic()) {
Quals.addConst();
Quals.addVolatile();
}
@@ -5308,17 +5683,6 @@ TryObjectArgumentInitialization(Sema &S, SourceLocation Loc, QualType FromType,
// to exit early.
ImplicitConversionSequence ICS;
- // We need to have an object of class type.
- if (const PointerType *PT = FromType->getAs<PointerType>()) {
- FromType = PT->getPointeeType();
-
- // When we had a pointer, it's implicitly dereferenced, so we
- // better have an lvalue.
- assert(FromClassification.isLValue());
- }
-
- assert(FromType->isRecordType());
-
// C++0x [over.match.funcs]p4:
// For non-static member functions, the type of the implicit object
// parameter is
@@ -5339,9 +5703,11 @@ TryObjectArgumentInitialization(Sema &S, SourceLocation Loc, QualType FromType,
// First check the qualifiers.
QualType FromTypeCanon = S.Context.getCanonicalType(FromType);
- if (ImplicitParamType.getCVRQualifiers()
- != FromTypeCanon.getLocalCVRQualifiers() &&
- !ImplicitParamType.isAtLeastAsQualifiedAs(FromTypeCanon)) {
+ // MSVC ignores __unaligned qualifier for overload candidates; do the same.
+ if (ImplicitParamType.getCVRQualifiers() !=
+ FromTypeCanon.getLocalCVRQualifiers() &&
+ !ImplicitParamType.isAtLeastAsQualifiedAs(
+ withoutUnaligned(S.Context, FromTypeCanon))) {
ICS.setBad(BadConversionSequence::bad_qualifiers,
FromType, ImplicitParamType);
return ICS;
@@ -5363,9 +5729,9 @@ TryObjectArgumentInitialization(Sema &S, SourceLocation Loc, QualType FromType,
ImplicitConversionKind SecondKind;
if (ClassTypeCanon == FromTypeCanon.getLocalUnqualifiedType()) {
SecondKind = ICK_Identity;
- } else if (S.IsDerivedFrom(Loc, FromType, ClassType))
+ } else if (S.IsDerivedFrom(Loc, FromType, ClassType)) {
SecondKind = ICK_Derived_To_Base;
- else {
+ } else if (!Method->isExplicitObjectMemberFunction()) {
ICS.setBad(BadConversionSequence::unrelated_class,
FromType, ImplicitParamType);
return ICS;
@@ -5415,14 +5781,11 @@ TryObjectArgumentInitialization(Sema &S, SourceLocation Loc, QualType FromType,
/// PerformObjectArgumentInitialization - Perform initialization of
/// the implicit object parameter for the given Method with the given
/// expression.
-ExprResult
-Sema::PerformObjectArgumentInitialization(Expr *From,
- NestedNameSpecifier *Qualifier,
- NamedDecl *FoundDecl,
- CXXMethodDecl *Method) {
+ExprResult Sema::PerformImplicitObjectArgumentInitialization(
+ Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl,
+ CXXMethodDecl *Method) {
QualType FromRecordType, DestType;
- QualType ImplicitParamRecordType =
- Method->getThisType()->castAs<PointerType>()->getPointeeType();
+ QualType ImplicitParamRecordType = Method->getFunctionObjectParameterType();
Expr::Classification FromClassification;
if (const PointerType *PT = From->getType()->getAs<PointerType>()) {
@@ -5479,6 +5842,10 @@ Sema::PerformObjectArgumentInitialization(Expr *From,
case BadConversionSequence::no_conversion:
case BadConversionSequence::unrelated_class:
break;
+
+ case BadConversionSequence::too_few_initializers:
+ case BadConversionSequence::too_many_initializers:
+ llvm_unreachable("Lists are not objects");
}
return Diag(From->getBeginLoc(), diag::err_member_function_call_bad_type)
@@ -5589,6 +5956,7 @@ static bool CheckConvertedConstantConversions(Sema &S,
case ICK_Derived_To_Base:
case ICK_Vector_Conversion:
case ICK_SVE_Vector_Conversion:
+ case ICK_RVV_Vector_Conversion:
case ICK_Vector_Splat:
case ICK_Complex_Real:
case ICK_Block_Pointer_Conversion:
@@ -5597,6 +5965,7 @@ static bool CheckConvertedConstantConversions(Sema &S,
case ICK_Zero_Event_Conversion:
case ICK_C_Only_Conversion:
case ICK_Incompatible_Pointer_Conversion:
+ case ICK_Fixed_Point_Conversion:
return false;
case ICK_Lvalue_To_Rvalue:
@@ -5615,14 +5984,14 @@ static bool CheckConvertedConstantConversions(Sema &S,
llvm_unreachable("unknown conversion kind");
}
-/// CheckConvertedConstantExpression - Check that the expression From is a
-/// converted constant expression of type T, perform the conversion and produce
-/// the converted expression, per C++11 [expr.const]p3.
-static ExprResult CheckConvertedConstantExpression(Sema &S, Expr *From,
- QualType T, APValue &Value,
+/// BuildConvertedConstantExpression - Check that the expression From is a
+/// converted constant expression of type T, perform the conversion but
+/// does not evaluate the expression
+static ExprResult BuildConvertedConstantExpression(Sema &S, Expr *From,
+ QualType T,
Sema::CCEKind CCE,
- bool RequireInt,
- NamedDecl *Dest) {
+ NamedDecl *Dest,
+ APValue &PreNarrowingValue) {
assert(S.getLangOpts().CPlusPlus11 &&
"converted constant expression outside C++11");
@@ -5635,7 +6004,7 @@ static ExprResult CheckConvertedConstantExpression(Sema &S, Expr *From,
// expression is a constant expression and the implicit conversion
// sequence contains only [... list of conversions ...].
ImplicitConversionSequence ICS =
- CCE == Sema::CCEK_ExplicitBool
+ (CCE == Sema::CCEK_ExplicitBool || CCE == Sema::CCEK_Noexcept)
? TryContextuallyConvertToBool(S, From)
: TryCopyInitialization(S, From, T,
/*SuppressUserConversions=*/false,
@@ -5662,7 +6031,8 @@ static ExprResult CheckConvertedConstantExpression(Sema &S, Expr *From,
return ExprError();
case ImplicitConversionSequence::EllipsisConversion:
- llvm_unreachable("ellipsis conversion in converted constant expression");
+ case ImplicitConversionSequence::StaticObjectArgumentConversion:
+ llvm_unreachable("bad conversion in converted constant expression");
}
// Check that we would only use permitted conversions.
@@ -5677,6 +6047,16 @@ static ExprResult CheckConvertedConstantExpression(Sema &S, Expr *From,
diag::err_typecheck_converted_constant_expression_indirect)
<< From->getType() << From->getSourceRange() << T;
}
+ // 'TryCopyInitialization' returns incorrect info for attempts to bind
+ // a reference to a bit-field due to C++ [over.ics.ref]p4. Namely,
+ // 'SCS->DirectBinding' occurs to be set to 'true' despite it is not
+ // the direct binding according to C++ [dcl.init.ref]p5. Hence, check this
+ // case explicitly.
+ if (From->refersToBitField() && T.getTypePtr()->isReferenceType()) {
+ return S.Diag(From->getBeginLoc(),
+ diag::err_reference_bind_to_bitfield_in_cce)
+ << From->getSourceRange();
+ }
// Usually we can simply apply the ImplicitConversionSequence we formed
// earlier, but that's not guaranteed to work when initializing an object of
@@ -5697,15 +6077,14 @@ static ExprResult CheckConvertedConstantExpression(Sema &S, Expr *From,
// C++2a [intro.execution]p5:
// A full-expression is [...] a constant-expression [...]
- Result =
- S.ActOnFinishFullExpr(Result.get(), From->getExprLoc(),
- /*DiscardedValue=*/false, /*IsConstexpr=*/true);
+ Result = S.ActOnFinishFullExpr(Result.get(), From->getExprLoc(),
+ /*DiscardedValue=*/false, /*IsConstexpr=*/true,
+ CCE == Sema::CCEKind::CCEK_TemplateArg);
if (Result.isInvalid())
return Result;
// Check for a narrowing implicit conversion.
bool ReturnPreNarrowingValue = false;
- APValue PreNarrowingValue;
QualType PreNarrowingType;
switch (SCS->getNarrowingKind(S.Context, Result.get(), PreNarrowingValue,
PreNarrowingType)) {
@@ -5739,12 +6118,69 @@ static ExprResult CheckConvertedConstantExpression(Sema &S, Expr *From,
<< CCE << /*Constant*/ 0 << From->getType() << T;
break;
}
+ if (!ReturnPreNarrowingValue)
+ PreNarrowingValue = {};
- if (Result.get()->isValueDependent()) {
+ return Result;
+}
+
+/// CheckConvertedConstantExpression - Check that the expression From is a
+/// converted constant expression of type T, perform the conversion and produce
+/// the converted expression, per C++11 [expr.const]p3.
+static ExprResult CheckConvertedConstantExpression(Sema &S, Expr *From,
+ QualType T, APValue &Value,
+ Sema::CCEKind CCE,
+ bool RequireInt,
+ NamedDecl *Dest) {
+
+ APValue PreNarrowingValue;
+ ExprResult Result = BuildConvertedConstantExpression(S, From, T, CCE, Dest,
+ PreNarrowingValue);
+ if (Result.isInvalid() || Result.get()->isValueDependent()) {
Value = APValue();
return Result;
}
+ return S.EvaluateConvertedConstantExpression(Result.get(), T, Value, CCE,
+ RequireInt, PreNarrowingValue);
+}
+ExprResult Sema::BuildConvertedConstantExpression(Expr *From, QualType T,
+ CCEKind CCE,
+ NamedDecl *Dest) {
+ APValue PreNarrowingValue;
+ return ::BuildConvertedConstantExpression(*this, From, T, CCE, Dest,
+ PreNarrowingValue);
+}
+
+ExprResult Sema::CheckConvertedConstantExpression(Expr *From, QualType T,
+ APValue &Value, CCEKind CCE,
+ NamedDecl *Dest) {
+ return ::CheckConvertedConstantExpression(*this, From, T, Value, CCE, false,
+ Dest);
+}
+
+ExprResult Sema::CheckConvertedConstantExpression(Expr *From, QualType T,
+ llvm::APSInt &Value,
+ CCEKind CCE) {
+ assert(T->isIntegralOrEnumerationType() && "unexpected converted const type");
+
+ APValue V;
+ auto R = ::CheckConvertedConstantExpression(*this, From, T, V, CCE, true,
+ /*Dest=*/nullptr);
+ if (!R.isInvalid() && !R.get()->isValueDependent())
+ Value = V.getInt();
+ return R;
+}
+
+/// EvaluateConvertedConstantExpression - Evaluate an Expression
+/// That is a converted constant expression
+/// (which was built with BuildConvertedConstantExpression)
+ExprResult
+Sema::EvaluateConvertedConstantExpression(Expr *E, QualType T, APValue &Value,
+ Sema::CCEKind CCE, bool RequireInt,
+ const APValue &PreNarrowingValue) {
+
+ ExprResult Result = E;
// Check the expression is a constant expression.
SmallVector<PartialDiagnosticAt, 8> Notes;
Expr::EvalResult Eval;
@@ -5758,7 +6194,7 @@ static ExprResult CheckConvertedConstantExpression(Sema &S, Expr *From,
else
Kind = ConstantExprKind::Normal;
- if (!Result.get()->EvaluateAsConstantExpr(Eval, S.Context, Kind) ||
+ if (!E->EvaluateAsConstantExpr(Eval, Context, Kind) ||
(RequireInt && !Eval.Val.isInt())) {
// The expression can't be folded, so we can't keep it at this position in
// the AST.
@@ -5768,8 +6204,16 @@ static ExprResult CheckConvertedConstantExpression(Sema &S, Expr *From,
if (Notes.empty()) {
// It's a constant expression.
- Expr *E = ConstantExpr::Create(S.Context, Result.get(), Value);
- if (ReturnPreNarrowingValue)
+ Expr *E = Result.get();
+ if (const auto *CE = dyn_cast<ConstantExpr>(E)) {
+ // We expect a ConstantExpr to have a value associated with it
+ // by this point.
+ assert(CE->getResultStorageKind() != ConstantResultStorageKind::None &&
+ "ConstantExpr has no value associated with it");
+ } else {
+ E = ConstantExpr::Create(Context, Result.get(), Value);
+ }
+ if (!PreNarrowingValue.isAbsent())
Value = std::move(PreNarrowingValue);
return E;
}
@@ -5778,42 +6222,21 @@ static ExprResult CheckConvertedConstantExpression(Sema &S, Expr *From,
// It's not a constant expression. Produce an appropriate diagnostic.
if (Notes.size() == 1 &&
Notes[0].second.getDiagID() == diag::note_invalid_subexpr_in_const_expr) {
- S.Diag(Notes[0].first, diag::err_expr_not_cce) << CCE;
+ Diag(Notes[0].first, diag::err_expr_not_cce) << CCE;
} else if (!Notes.empty() && Notes[0].second.getDiagID() ==
diag::note_constexpr_invalid_template_arg) {
Notes[0].second.setDiagID(diag::err_constexpr_invalid_template_arg);
for (unsigned I = 0; I < Notes.size(); ++I)
- S.Diag(Notes[I].first, Notes[I].second);
+ Diag(Notes[I].first, Notes[I].second);
} else {
- S.Diag(From->getBeginLoc(), diag::err_expr_not_cce)
- << CCE << From->getSourceRange();
+ Diag(E->getBeginLoc(), diag::err_expr_not_cce)
+ << CCE << E->getSourceRange();
for (unsigned I = 0; I < Notes.size(); ++I)
- S.Diag(Notes[I].first, Notes[I].second);
+ Diag(Notes[I].first, Notes[I].second);
}
return ExprError();
}
-ExprResult Sema::CheckConvertedConstantExpression(Expr *From, QualType T,
- APValue &Value, CCEKind CCE,
- NamedDecl *Dest) {
- return ::CheckConvertedConstantExpression(*this, From, T, Value, CCE, false,
- Dest);
-}
-
-ExprResult Sema::CheckConvertedConstantExpression(Expr *From, QualType T,
- llvm::APSInt &Value,
- CCEKind CCE) {
- assert(T->isIntegralOrEnumerationType() && "unexpected converted const type");
-
- APValue V;
- auto R = ::CheckConvertedConstantExpression(*this, From, T, V, CCE, true,
- /*Dest=*/nullptr);
- if (!R.isInvalid() && !R.get()->isValueDependent())
- Value = V.getInt();
- return R;
-}
-
-
/// dropPointerConversions - If the given standard conversion sequence
/// involves any pointer conversions, remove them. This may change
/// the result type of the conversion sequence.
@@ -5846,6 +6269,7 @@ TryContextuallyConvertToObjCPointer(Sema &S, Expr *From) {
case ImplicitConversionSequence::BadConversion:
case ImplicitConversionSequence::AmbiguousConversion:
case ImplicitConversionSequence::EllipsisConversion:
+ case ImplicitConversionSequence::StaticObjectArgumentConversion:
break;
case ImplicitConversionSequence::UserDefinedConversion:
@@ -5875,6 +6299,64 @@ ExprResult Sema::PerformContextuallyConvertToObjCPointer(Expr *From) {
return ExprResult();
}
+static QualType GetExplicitObjectType(Sema &S, const Expr *MemExprE) {
+ const Expr *Base = nullptr;
+ assert((isa<UnresolvedMemberExpr, MemberExpr>(MemExprE)) &&
+ "expected a member expression");
+
+ if (const auto M = dyn_cast<UnresolvedMemberExpr>(MemExprE);
+ M && !M->isImplicitAccess())
+ Base = M->getBase();
+ else if (const auto M = dyn_cast<MemberExpr>(MemExprE);
+ M && !M->isImplicitAccess())
+ Base = M->getBase();
+
+ QualType T = Base ? Base->getType() : S.getCurrentThisType();
+
+ if (T->isPointerType())
+ T = T->getPointeeType();
+
+ return T;
+}
+
+static Expr *GetExplicitObjectExpr(Sema &S, Expr *Obj,
+ const FunctionDecl *Fun) {
+ QualType ObjType = Obj->getType();
+ if (ObjType->isPointerType()) {
+ ObjType = ObjType->getPointeeType();
+ Obj = UnaryOperator::Create(S.getASTContext(), Obj, UO_Deref, ObjType,
+ VK_LValue, OK_Ordinary, SourceLocation(),
+ /*CanOverflow=*/false, FPOptionsOverride());
+ }
+ if (Obj->Classify(S.getASTContext()).isPRValue()) {
+ Obj = S.CreateMaterializeTemporaryExpr(
+ ObjType, Obj,
+ !Fun->getParamDecl(0)->getType()->isRValueReferenceType());
+ }
+ return Obj;
+}
+
+ExprResult Sema::InitializeExplicitObjectArgument(Sema &S, Expr *Obj,
+ FunctionDecl *Fun) {
+ Obj = GetExplicitObjectExpr(S, Obj, Fun);
+ return S.PerformCopyInitialization(
+ InitializedEntity::InitializeParameter(S.Context, Fun->getParamDecl(0)),
+ Obj->getExprLoc(), Obj);
+}
+
+static void PrepareExplicitObjectArgument(Sema &S, CXXMethodDecl *Method,
+ Expr *Object, MultiExprArg &Args,
+ SmallVectorImpl<Expr *> &NewArgs) {
+ assert(Method->isExplicitObjectMemberFunction() &&
+ "Method is not an explicit member function");
+ assert(NewArgs.empty() && "NewArgs should be empty");
+ NewArgs.reserve(Args.size() + 1);
+ Expr *This = GetExplicitObjectExpr(S, Object, Method);
+ NewArgs.push_back(This);
+ NewArgs.append(Args.begin(), Args.end());
+ Args = NewArgs;
+}
+
/// Determine whether the provided type is an integral type, or an enumeration
/// type of a permitted flavor.
bool Sema::ICEConvertDiagnoser::match(QualType T) {
@@ -6043,10 +6525,12 @@ ExprResult Sema::PerformContextualImplicitConversion(
From = result.get();
}
+ // Try converting the expression to an Lvalue first, to get rid of qualifiers.
+ ExprResult Converted = DefaultLvalueConversion(From);
+ QualType T = Converted.isUsable() ? Converted.get()->getType() : QualType();
// If the expression already has a matching type, we're golden.
- QualType T = From->getType();
if (Converter.match(T))
- return DefaultLvalueConversion(From);
+ return Converted;
// FIXME: Check for missing '()' if T is a function type?
@@ -6179,7 +6663,7 @@ ExprResult Sema::PerformContextualImplicitConversion(
HadMultipleCandidates,
ExplicitConversions))
return ExprError();
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case OR_Deleted:
// We'll complain below about a non-integral condition type.
break;
@@ -6264,7 +6748,7 @@ void Sema::AddOverloadCandidate(
OverloadCandidateSet &CandidateSet, bool SuppressUserConversions,
bool PartialOverloading, bool AllowExplicit, bool AllowExplicitConversions,
ADLCallKind IsADLCandidate, ConversionSequenceList EarlyConversions,
- OverloadCandidateParamOrder PO) {
+ OverloadCandidateParamOrder PO, bool AggregateCandidateDeduction) {
const FunctionProtoType *Proto
= dyn_cast<FunctionProtoType>(Function->getType()->getAs<FunctionType>());
assert(Proto && "Functions without a prototype cannot be overloaded");
@@ -6338,8 +6822,29 @@ void Sema::AddOverloadCandidate(
return;
}
- if (Function->isMultiVersion() && Function->hasAttr<TargetAttr>() &&
- !Function->getAttr<TargetAttr>()->isDefaultVersion()) {
+ // Functions with internal linkage are only viable in the same module unit.
+ if (getLangOpts().CPlusPlusModules && Function->isInAnotherModuleUnit()) {
+ /// FIXME: Currently, the semantics of linkage in clang is slightly
+ /// different from the semantics in C++ spec. In C++ spec, only names
+ /// have linkage. So that all entities of the same should share one
+ /// linkage. But in clang, different entities of the same could have
+ /// different linkage.
+ NamedDecl *ND = Function;
+ if (auto *SpecInfo = Function->getTemplateSpecializationInfo())
+ ND = SpecInfo->getTemplate();
+
+ if (ND->getFormalLinkage() == Linkage::Internal) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_module_mismatched;
+ return;
+ }
+ }
+
+ if (Function->isMultiVersion() &&
+ ((Function->hasAttr<TargetAttr>() &&
+ !Function->getAttr<TargetAttr>()->isDefaultVersion()) ||
+ (Function->hasAttr<TargetVersionAttr>() &&
+ !Function->getAttr<TargetVersionAttr>()->isDefaultVersion()))) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_non_default_multiversion_function;
return;
@@ -6397,7 +6902,8 @@ void Sema::AddOverloadCandidate(
// parameters is viable only if it has an ellipsis in its parameter
// list (8.3.5).
if (TooManyArguments(NumParams, Args.size(), PartialOverloading) &&
- !Proto->isVariadic()) {
+ !Proto->isVariadic() &&
+ shouldEnforceArgLimit(PartialOverloading, Function)) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_too_many_arguments;
return;
@@ -6409,7 +6915,8 @@ void Sema::AddOverloadCandidate(
// parameter list is truncated on the right, so that there are
// exactly m parameters.
unsigned MinRequiredArgs = Function->getMinRequiredArguments();
- if (Args.size() < MinRequiredArgs && !PartialOverloading) {
+ if (!AggregateCandidateDeduction && Args.size() < MinRequiredArgs &&
+ !PartialOverloading) {
// Not enough arguments.
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_too_few_arguments;
@@ -6417,21 +6924,24 @@ void Sema::AddOverloadCandidate(
}
// (CUDA B.1): Check for invalid calls between targets.
- if (getLangOpts().CUDA)
- if (const FunctionDecl *Caller = dyn_cast<FunctionDecl>(CurContext))
- // Skip the check for callers that are implicit members, because in this
- // case we may not yet know what the member's target is; the target is
- // inferred for the member automatically, based on the bases and fields of
- // the class.
- if (!Caller->isImplicit() && !IsAllowedCUDACall(Caller, Function)) {
- Candidate.Viable = false;
- Candidate.FailureKind = ovl_fail_bad_target;
- return;
- }
+ if (getLangOpts().CUDA) {
+ const FunctionDecl *Caller = getCurFunctionDecl(/*AllowLambda=*/true);
+ // Skip the check for callers that are implicit members, because in this
+ // case we may not yet know what the member's target is; the target is
+ // inferred for the member automatically, based on the bases and fields of
+ // the class.
+ if (!(Caller && Caller->isImplicit()) &&
+ !IsAllowedCUDACall(Caller, Function)) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_bad_target;
+ return;
+ }
+ }
if (Function->getTrailingRequiresClause()) {
ConstraintSatisfaction Satisfaction;
- if (CheckFunctionConstraints(Function, Satisfaction) ||
+ if (CheckFunctionConstraints(Function, Satisfaction, /*Loc*/ {},
+ /*ForOverloadResolution*/ true) ||
!Satisfaction.IsSatisfied) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_constraints_not_satisfied;
@@ -6581,7 +7091,7 @@ static bool convertArgsForAvailabilityChecks(
assert(!isa<CXXConstructorDecl>(Method) &&
"Shouldn't have `this` for ctors!");
assert(!Method->isStatic() && "Shouldn't have `this` for static methods!");
- ExprResult R = S.PerformObjectArgumentInitialization(
+ ExprResult R = S.PerformImplicitObjectArgumentInitialization(
ThisArg, /*Qualifier=*/nullptr, Method, Method);
if (R.isInvalid())
return false;
@@ -6657,7 +7167,7 @@ EnableIfAttr *Sema::CheckEnableIf(FunctionDecl *Function,
// very difficult. Ideally, we should handle them more gracefully.
if (EIA->getCond()->isValueDependent() ||
!EIA->getCond()->EvaluateWithSubstitution(
- Result, Context, Function, llvm::makeArrayRef(ConvertedArgs)))
+ Result, Context, Function, llvm::ArrayRef(ConvertedArgs)))
return EIA;
if (!Result.isInt() || !Result.getInt().getBoolValue())
@@ -6829,7 +7339,7 @@ void Sema::AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType,
} else {
AddMethodCandidate(cast<CXXMethodDecl>(Decl), FoundDecl, ActingContext,
ObjectType, ObjectClassification, Args, CandidateSet,
- SuppressUserConversions, false, None, PO);
+ SuppressUserConversions, false, std::nullopt, PO);
}
}
@@ -6881,13 +7391,15 @@ Sema::AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl,
Candidate.IgnoreObjectArgument = false;
Candidate.ExplicitCallArguments = Args.size();
- unsigned NumParams = Proto->getNumParams();
+ unsigned NumParams = Method->getNumExplicitParams();
+ unsigned ExplicitOffset = Method->isExplicitObjectMemberFunction() ? 1 : 0;
// (C++ 13.3.2p2): A candidate function having fewer than m
// parameters is viable only if it has an ellipsis in its parameter
// list (8.3.5).
if (TooManyArguments(NumParams, Args.size(), PartialOverloading) &&
- !Proto->isVariadic()) {
+ !Proto->isVariadic() &&
+ shouldEnforceArgLimit(PartialOverloading, Method)) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_too_many_arguments;
return;
@@ -6898,7 +7410,7 @@ Sema::AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl,
// (8.3.6). For the purposes of overload resolution, the
// parameter list is truncated on the right, so that there are
// exactly m parameters.
- unsigned MinRequiredArgs = Method->getMinRequiredArguments();
+ unsigned MinRequiredArgs = Method->getMinRequiredExplicitArguments();
if (Args.size() < MinRequiredArgs && !PartialOverloading) {
// Not enough arguments.
Candidate.Viable = false;
@@ -6908,17 +7420,27 @@ Sema::AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl,
Candidate.Viable = true;
- if (Method->isStatic() || ObjectType.isNull())
- // The implicit object argument is ignored.
+ unsigned FirstConvIdx = PO == OverloadCandidateParamOrder::Reversed ? 1 : 0;
+ if (ObjectType.isNull())
Candidate.IgnoreObjectArgument = true;
- else {
- unsigned ConvIdx = PO == OverloadCandidateParamOrder::Reversed ? 1 : 0;
+ else if (Method->isStatic()) {
+ // [over.best.ics.general]p8
+ // When the parameter is the implicit object parameter of a static member
+ // function, the implicit conversion sequence is a standard conversion
+ // sequence that is neither better nor worse than any other standard
+ // conversion sequence.
+ //
+ // This is a rule that was introduced in C++23 to support static lambdas. We
+ // apply it retroactively because we want to support static lambdas as an
+ // extension and it doesn't hurt previous code.
+ Candidate.Conversions[FirstConvIdx].setStaticObjectArgument();
+ } else {
// Determine the implicit conversion sequence for the object
// parameter.
- Candidate.Conversions[ConvIdx] = TryObjectArgumentInitialization(
+ Candidate.Conversions[FirstConvIdx] = TryObjectArgumentInitialization(
*this, CandidateSet.getLocation(), ObjectType, ObjectClassification,
- Method, ActingContext);
- if (Candidate.Conversions[ConvIdx].isBad()) {
+ Method, ActingContext, /*InOverloadResolution=*/true);
+ if (Candidate.Conversions[FirstConvIdx].isBad()) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_bad_conversion;
return;
@@ -6927,16 +7449,16 @@ Sema::AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl,
// (CUDA B.1): Check for invalid calls between targets.
if (getLangOpts().CUDA)
- if (const FunctionDecl *Caller = dyn_cast<FunctionDecl>(CurContext))
- if (!IsAllowedCUDACall(Caller, Method)) {
- Candidate.Viable = false;
- Candidate.FailureKind = ovl_fail_bad_target;
- return;
- }
+ if (!IsAllowedCUDACall(getCurFunctionDecl(/*AllowLambda=*/true), Method)) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_bad_target;
+ return;
+ }
if (Method->getTrailingRequiresClause()) {
ConstraintSatisfaction Satisfaction;
- if (CheckFunctionConstraints(Method, Satisfaction) ||
+ if (CheckFunctionConstraints(Method, Satisfaction, /*Loc*/ {},
+ /*ForOverloadResolution*/ true) ||
!Satisfaction.IsSatisfied) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_constraints_not_satisfied;
@@ -6957,7 +7479,7 @@ Sema::AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl,
// exist for each argument an implicit conversion sequence
// (13.3.3.1) that converts that argument to the corresponding
// parameter of F.
- QualType ParamType = Proto->getParamType(ArgIdx);
+ QualType ParamType = Proto->getParamType(ArgIdx + ExplicitOffset);
Candidate.Conversions[ConvIdx]
= TryCopyInitialization(*this, Args[ArgIdx], ParamType,
SuppressUserConversions,
@@ -6985,8 +7507,11 @@ Sema::AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl,
return;
}
- if (Method->isMultiVersion() && Method->hasAttr<TargetAttr>() &&
- !Method->getAttr<TargetAttr>()->isDefaultVersion()) {
+ if (Method->isMultiVersion() &&
+ ((Method->hasAttr<TargetAttr>() &&
+ !Method->getAttr<TargetAttr>()->isDefaultVersion()) ||
+ (Method->hasAttr<TargetVersionAttr>() &&
+ !Method->getAttr<TargetVersionAttr>()->isDefaultVersion()))) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_non_default_multiversion_function;
}
@@ -7019,7 +7544,8 @@ void Sema::AddMethodTemplateCandidate(
ConversionSequenceList Conversions;
if (TemplateDeductionResult Result = DeduceTemplateArguments(
MethodTmpl, ExplicitTemplateArgs, Args, Specialization, Info,
- PartialOverloading, [&](ArrayRef<QualType> ParamTypes) {
+ PartialOverloading, /*AggregateDeductionCandidate=*/false, ObjectType,
+ ObjectClassification, [&](ArrayRef<QualType> ParamTypes) {
return CheckNonDependentConversions(
MethodTmpl, ParamTypes, Args, CandidateSet, Conversions,
SuppressUserConversions, ActingContext, ObjectType,
@@ -7072,7 +7598,7 @@ void Sema::AddTemplateOverloadCandidate(
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet, bool SuppressUserConversions,
bool PartialOverloading, bool AllowExplicit, ADLCallKind IsADLCandidate,
- OverloadCandidateParamOrder PO) {
+ OverloadCandidateParamOrder PO, bool AggregateCandidateDeduction) {
if (!CandidateSet.isNewCandidate(FunctionTemplate, PO))
return;
@@ -7102,7 +7628,10 @@ void Sema::AddTemplateOverloadCandidate(
ConversionSequenceList Conversions;
if (TemplateDeductionResult Result = DeduceTemplateArguments(
FunctionTemplate, ExplicitTemplateArgs, Args, Specialization, Info,
- PartialOverloading, [&](ArrayRef<QualType> ParamTypes) {
+ PartialOverloading, AggregateCandidateDeduction,
+ /*ObjectType=*/QualType(),
+ /*ObjectClassification=*/Expr::Classification(),
+ [&](ArrayRef<QualType> ParamTypes) {
return CheckNonDependentConversions(
FunctionTemplate, ParamTypes, Args, CandidateSet, Conversions,
SuppressUserConversions, nullptr, QualType(), {}, PO);
@@ -7138,7 +7667,8 @@ void Sema::AddTemplateOverloadCandidate(
AddOverloadCandidate(
Specialization, FoundDecl, Args, CandidateSet, SuppressUserConversions,
PartialOverloading, AllowExplicit,
- /*AllowExplicitConversions*/ false, IsADLCandidate, Conversions, PO);
+ /*AllowExplicitConversions=*/false, IsADLCandidate, Conversions, PO,
+ Info.AggregateDeductionCandidateHasMismatchedArity);
}
/// Check that implicit conversion sequences can be formed for each argument
@@ -7173,20 +7703,38 @@ bool Sema::CheckNonDependentConversions(
if (HasThisConversion && !cast<CXXMethodDecl>(FD)->isStatic() &&
!ObjectType.isNull()) {
unsigned ConvIdx = PO == OverloadCandidateParamOrder::Reversed ? 1 : 0;
- Conversions[ConvIdx] = TryObjectArgumentInitialization(
- *this, CandidateSet.getLocation(), ObjectType, ObjectClassification,
- Method, ActingContext);
- if (Conversions[ConvIdx].isBad())
- return true;
+ if (!FD->hasCXXExplicitFunctionObjectParameter() ||
+ !ParamTypes[0]->isDependentType()) {
+ Conversions[ConvIdx] = TryObjectArgumentInitialization(
+ *this, CandidateSet.getLocation(), ObjectType, ObjectClassification,
+ Method, ActingContext, /*InOverloadResolution=*/true,
+ FD->hasCXXExplicitFunctionObjectParameter() ? ParamTypes[0]
+ : QualType());
+ if (Conversions[ConvIdx].isBad())
+ return true;
+ }
}
+ unsigned Offset =
+ Method && Method->hasCXXExplicitFunctionObjectParameter() ? 1 : 0;
+
for (unsigned I = 0, N = std::min(ParamTypes.size(), Args.size()); I != N;
++I) {
- QualType ParamType = ParamTypes[I];
+ QualType ParamType = ParamTypes[I + Offset];
if (!ParamType->isDependentType()) {
- unsigned ConvIdx = PO == OverloadCandidateParamOrder::Reversed
- ? 0
- : (ThisConversions + I);
+ unsigned ConvIdx;
+ if (PO == OverloadCandidateParamOrder::Reversed) {
+ ConvIdx = Args.size() - 1 - I;
+ assert(Args.size() + ThisConversions == 2 &&
+ "number of args (including 'this') must be exactly 2 for "
+ "reversed order");
+ // For members, there would be only one arg 'Args[0]' whose ConvIdx
+ // would also be 0. 'this' got ConvIdx = 1 previously.
+ assert(!HasThisConversion || (ConvIdx == 0 && I == 0));
+ } else {
+ // For members, 'this' got ConvIdx = 0 previously.
+ ConvIdx = ThisConversions + I;
+ }
Conversions[ConvIdx]
= TryCopyInitialization(*this, Args[I], ParamType,
SuppressUserConversions,
@@ -7315,15 +7863,21 @@ void Sema::AddConversionCandidate(
//
// Determine the implicit conversion sequence for the implicit
// object parameter.
- QualType ImplicitParamType = From->getType();
- if (const PointerType *FromPtrType = ImplicitParamType->getAs<PointerType>())
- ImplicitParamType = FromPtrType->getPointeeType();
- CXXRecordDecl *ConversionContext
- = cast<CXXRecordDecl>(ImplicitParamType->castAs<RecordType>()->getDecl());
-
+ QualType ObjectType = From->getType();
+ if (const auto *FromPtrType = ObjectType->getAs<PointerType>())
+ ObjectType = FromPtrType->getPointeeType();
+ const auto *ConversionContext =
+ cast<CXXRecordDecl>(ObjectType->castAs<RecordType>()->getDecl());
+
+ // C++23 [over.best.ics.general]
+ // However, if the target is [...]
+ // - the object parameter of a user-defined conversion function
+ // [...] user-defined conversion sequences are not considered.
Candidate.Conversions[0] = TryObjectArgumentInitialization(
*this, CandidateSet.getLocation(), From->getType(),
- From->Classify(Context), Conversion, ConversionContext);
+ From->Classify(Context), Conversion, ConversionContext,
+ /*InOverloadResolution*/ false, /*ExplicitParameterType=*/QualType(),
+ /*SuppressUserConversion*/ true);
if (Candidate.Conversions[0].isBad()) {
Candidate.Viable = false;
@@ -7432,15 +7986,18 @@ void Sema::AddConversionCandidate(
}
if (EnableIfAttr *FailedAttr =
- CheckEnableIf(Conversion, CandidateSet.getLocation(), None)) {
+ CheckEnableIf(Conversion, CandidateSet.getLocation(), std::nullopt)) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_enable_if;
Candidate.DeductionFailure.Data = FailedAttr;
return;
}
- if (Conversion->isMultiVersion() && Conversion->hasAttr<TargetAttr>() &&
- !Conversion->getAttr<TargetAttr>()->isDefaultVersion()) {
+ if (Conversion->isMultiVersion() &&
+ ((Conversion->hasAttr<TargetAttr>() &&
+ !Conversion->getAttr<TargetAttr>()->isDefaultVersion()) ||
+ (Conversion->hasAttr<TargetVersionAttr>() &&
+ !Conversion->getAttr<TargetVersionAttr>()->isDefaultVersion()))) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_non_default_multiversion_function;
}
@@ -7474,11 +8031,14 @@ void Sema::AddTemplateConversionCandidate(
return;
}
+ QualType ObjectType = From->getType();
+ Expr::Classification ObjectClassification = From->Classify(getASTContext());
+
TemplateDeductionInfo Info(CandidateSet.getLocation());
CXXConversionDecl *Specialization = nullptr;
- if (TemplateDeductionResult Result
- = DeduceTemplateArguments(FunctionTemplate, ToType,
- Specialization, Info)) {
+ if (TemplateDeductionResult Result = DeduceTemplateArguments(
+ FunctionTemplate, ObjectType, ObjectClassification, ToType,
+ Specialization, Info)) {
OverloadCandidate &Candidate = CandidateSet.addCandidate();
Candidate.FoundDecl = FoundDecl;
Candidate.Function = FunctionTemplate->getTemplatedDecl();
@@ -7530,9 +8090,18 @@ void Sema::AddSurrogateCandidate(CXXConversionDecl *Conversion,
// Determine the implicit conversion sequence for the implicit
// object parameter.
- ImplicitConversionSequence ObjectInit = TryObjectArgumentInitialization(
- *this, CandidateSet.getLocation(), Object->getType(),
- Object->Classify(Context), Conversion, ActingContext);
+ ImplicitConversionSequence ObjectInit;
+ if (Conversion->hasCXXExplicitFunctionObjectParameter()) {
+ ObjectInit = TryCopyInitialization(*this, Object,
+ Conversion->getParamDecl(0)->getType(),
+ /*SuppressUserConversions=*/false,
+ /*InOverloadResolution=*/true, false);
+ } else {
+ ObjectInit = TryObjectArgumentInitialization(
+ *this, CandidateSet.getLocation(), Object->getType(),
+ Object->Classify(Context), Conversion, ActingContext);
+ }
+
if (ObjectInit.isBad()) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_bad_conversion;
@@ -7602,8 +8171,19 @@ void Sema::AddSurrogateCandidate(CXXConversionDecl *Conversion,
}
}
+ if (Conversion->getTrailingRequiresClause()) {
+ ConstraintSatisfaction Satisfaction;
+ if (CheckFunctionConstraints(Conversion, Satisfaction, /*Loc*/ {},
+ /*ForOverloadResolution*/ true) ||
+ !Satisfaction.IsSatisfied) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_constraints_not_satisfied;
+ return;
+ }
+ }
+
if (EnableIfAttr *FailedAttr =
- CheckEnableIf(Conversion, CandidateSet.getLocation(), None)) {
+ CheckEnableIf(Conversion, CandidateSet.getLocation(), std::nullopt)) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_enable_if;
Candidate.DeductionFailure.Data = FailedAttr;
@@ -7635,7 +8215,7 @@ void Sema::AddNonMemberOperatorCandidates(
if (FunTmpl) {
AddTemplateOverloadCandidate(FunTmpl, F.getPair(), ExplicitTemplateArgs,
FunctionArgs, CandidateSet);
- if (CandidateSet.getRewriteInfo().shouldAddReversed(Context, FD))
+ if (CandidateSet.getRewriteInfo().shouldAddReversed(*this, Args, FD))
AddTemplateOverloadCandidate(
FunTmpl, F.getPair(), ExplicitTemplateArgs,
{FunctionArgs[1], FunctionArgs[0]}, CandidateSet, false, false,
@@ -7644,11 +8224,11 @@ void Sema::AddNonMemberOperatorCandidates(
if (ExplicitTemplateArgs)
continue;
AddOverloadCandidate(FD, F.getPair(), FunctionArgs, CandidateSet);
- if (CandidateSet.getRewriteInfo().shouldAddReversed(Context, FD))
- AddOverloadCandidate(FD, F.getPair(),
- {FunctionArgs[1], FunctionArgs[0]}, CandidateSet,
- false, false, true, false, ADLCallKind::NotADL,
- None, OverloadCandidateParamOrder::Reversed);
+ if (CandidateSet.getRewriteInfo().shouldAddReversed(*this, Args, FD))
+ AddOverloadCandidate(
+ FD, F.getPair(), {FunctionArgs[1], FunctionArgs[0]}, CandidateSet,
+ false, false, true, false, ADLCallKind::NotADL, std::nullopt,
+ OverloadCandidateParamOrder::Reversed);
}
}
}
@@ -7692,15 +8272,20 @@ void Sema::AddMemberOperatorCandidates(OverloadedOperatorKind Op,
LookupResult Operators(*this, OpName, OpLoc, LookupOrdinaryName);
LookupQualifiedName(Operators, T1Rec->getDecl());
- Operators.suppressDiagnostics();
+ Operators.suppressAccessDiagnostics();
for (LookupResult::iterator Oper = Operators.begin(),
- OperEnd = Operators.end();
- Oper != OperEnd;
- ++Oper)
+ OperEnd = Operators.end();
+ Oper != OperEnd; ++Oper) {
+ if (Oper->getAsFunction() &&
+ PO == OverloadCandidateParamOrder::Reversed &&
+ !CandidateSet.getRewriteInfo().shouldAddReversed(
+ *this, {Args[1], Args[0]}, Oper->getAsFunction()))
+ continue;
AddMethodCandidate(Oper.getPair(), Args[0]->getType(),
Args[0]->Classify(Context), Args.slice(1),
CandidateSet, /*SuppressUserConversion=*/false, PO);
+ }
}
}
@@ -7774,8 +8359,7 @@ namespace {
/// enumeration types.
class BuiltinCandidateTypeSet {
/// TypeSet - A set of types.
- typedef llvm::SetVector<QualType, SmallVector<QualType, 8>,
- llvm::SmallPtrSet<QualType, 8>> TypeSet;
+ typedef llvm::SmallSetVector<QualType, 8> TypeSet;
/// PointerTypes - The set of pointer types that will be used in the
/// built-in candidates.
@@ -8136,6 +8720,49 @@ static Qualifiers CollectVRQualifiers(ASTContext &Context, Expr* ArgExpr) {
return VRQuals;
}
+// Note: We're currently only handling qualifiers that are meaningful for the
+// LHS of compound assignment overloading.
+static void forAllQualifierCombinationsImpl(
+ QualifiersAndAtomic Available, QualifiersAndAtomic Applied,
+ llvm::function_ref<void(QualifiersAndAtomic)> Callback) {
+ // _Atomic
+ if (Available.hasAtomic()) {
+ Available.removeAtomic();
+ forAllQualifierCombinationsImpl(Available, Applied.withAtomic(), Callback);
+ forAllQualifierCombinationsImpl(Available, Applied, Callback);
+ return;
+ }
+
+ // volatile
+ if (Available.hasVolatile()) {
+ Available.removeVolatile();
+ assert(!Applied.hasVolatile());
+ forAllQualifierCombinationsImpl(Available, Applied.withVolatile(),
+ Callback);
+ forAllQualifierCombinationsImpl(Available, Applied, Callback);
+ return;
+ }
+
+ Callback(Applied);
+}
+
+static void forAllQualifierCombinations(
+ QualifiersAndAtomic Quals,
+ llvm::function_ref<void(QualifiersAndAtomic)> Callback) {
+ return forAllQualifierCombinationsImpl(Quals, QualifiersAndAtomic(),
+ Callback);
+}
+
+static QualType makeQualifiedLValueReferenceType(QualType Base,
+ QualifiersAndAtomic Quals,
+ Sema &S) {
+ if (Quals.hasAtomic())
+ Base = S.Context.getAtomicType(Base);
+ if (Quals.hasVolatile())
+ Base = S.Context.getVolatileType(Base);
+ return S.Context.getLValueReferenceType(Base);
+}
+
namespace {
/// Helper class to manage the addition of builtin operator overload
@@ -8146,7 +8773,7 @@ class BuiltinOperatorOverloadBuilder {
// Common instance state available to all overload candidate addition methods.
Sema &S;
ArrayRef<Expr *> Args;
- Qualifiers VisibleTypeConversionsQuals;
+ QualifiersAndAtomic VisibleTypeConversionsQuals;
bool HasArithmeticOrEnumeralCandidateType;
SmallVectorImpl<BuiltinCandidateTypeSet> &CandidateTypes;
OverloadCandidateSet &CandidateSet;
@@ -8173,6 +8800,8 @@ class BuiltinOperatorOverloadBuilder {
ArithmeticTypes.push_back(S.Context.LongDoubleTy);
if (S.Context.getTargetInfo().hasFloat128Type())
ArithmeticTypes.push_back(S.Context.Float128Ty);
+ if (S.Context.getTargetInfo().hasIbm128Type())
+ ArithmeticTypes.push_back(S.Context.Ibm128Ty);
// Start of integral types.
FirstIntegralType = ArithmeticTypes.size();
@@ -8268,7 +8897,7 @@ class BuiltinOperatorOverloadBuilder {
public:
BuiltinOperatorOverloadBuilder(
Sema &S, ArrayRef<Expr *> Args,
- Qualifiers VisibleTypeConversionsQuals,
+ QualifiersAndAtomic VisibleTypeConversionsQuals,
bool HasArithmeticOrEnumeralCandidateType,
SmallVectorImpl<BuiltinCandidateTypeSet> &CandidateTypes,
OverloadCandidateSet &CandidateSet)
@@ -8889,18 +9518,14 @@ public:
ParamTypes[1] = ArithmeticTypes[Right];
auto LeftBaseTy = AdjustAddressSpaceForBuiltinOperandType(
S, ArithmeticTypes[Left], Args[0]);
- // Add this built-in operator as a candidate (VQ is empty).
- ParamTypes[0] = S.Context.getLValueReferenceType(LeftBaseTy);
- S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet,
- /*IsAssignmentOperator=*/isEqualOp);
- // Add this built-in operator as a candidate (VQ is 'volatile').
- if (VisibleTypeConversionsQuals.hasVolatile()) {
- ParamTypes[0] = S.Context.getVolatileType(LeftBaseTy);
- ParamTypes[0] = S.Context.getLValueReferenceType(ParamTypes[0]);
- S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet,
- /*IsAssignmentOperator=*/isEqualOp);
- }
+ forAllQualifierCombinations(
+ VisibleTypeConversionsQuals, [&](QualifiersAndAtomic Quals) {
+ ParamTypes[0] =
+ makeQualifiedLValueReferenceType(LeftBaseTy, Quals, S);
+ S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet,
+ /*IsAssignmentOperator=*/isEqualOp);
+ });
}
}
@@ -8947,16 +9572,13 @@ public:
ParamTypes[1] = ArithmeticTypes[Right];
auto LeftBaseTy = AdjustAddressSpaceForBuiltinOperandType(
S, ArithmeticTypes[Left], Args[0]);
- // Add this built-in operator as a candidate (VQ is empty).
- ParamTypes[0] = S.Context.getLValueReferenceType(LeftBaseTy);
- S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet);
- if (VisibleTypeConversionsQuals.hasVolatile()) {
- // Add this built-in operator as a candidate (VQ is 'volatile').
- ParamTypes[0] = LeftBaseTy;
- ParamTypes[0] = S.Context.getVolatileType(ParamTypes[0]);
- ParamTypes[0] = S.Context.getLValueReferenceType(ParamTypes[0]);
- S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet);
- }
+
+ forAllQualifierCombinations(
+ VisibleTypeConversionsQuals, [&](QualifiersAndAtomic Quals) {
+ ParamTypes[0] =
+ makeQualifiedLValueReferenceType(LeftBaseTy, Quals, S);
+ S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet);
+ });
}
}
}
@@ -9120,10 +9742,13 @@ void Sema::AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
// if the operator we're looking at has built-in operator candidates
// that make use of these types. Also record whether we encounter non-record
// candidate types or either arithmetic or enumeral candidate types.
- Qualifiers VisibleTypeConversionsQuals;
+ QualifiersAndAtomic VisibleTypeConversionsQuals;
VisibleTypeConversionsQuals.addConst();
- for (unsigned ArgIdx = 0, N = Args.size(); ArgIdx != N; ++ArgIdx)
+ for (unsigned ArgIdx = 0, N = Args.size(); ArgIdx != N; ++ArgIdx) {
VisibleTypeConversionsQuals += CollectVRQualifiers(Context, Args[ArgIdx]);
+ if (Args[ArgIdx]->getType()->isAtomicType())
+ VisibleTypeConversionsQuals.addAtomic();
+ }
bool HasNonRecordCandidateType = false;
bool HasArithmeticOrEnumeralCandidateType = false;
@@ -9185,7 +9810,7 @@ void Sema::AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
case OO_Plus: // '+' is either unary or binary
if (Args.size() == 1)
OpBuilder.addUnaryPlusPointerOverloads();
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case OO_Minus: // '-' is either unary or binary
if (Args.size() == 1) {
@@ -9260,12 +9885,12 @@ void Sema::AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
case OO_Equal:
OpBuilder.addAssignmentMemberPointerOrEnumeralOverloads();
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case OO_PlusEqual:
case OO_MinusEqual:
OpBuilder.addAssignmentPointerOverloads(Op == OO_Equal);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case OO_StarEqual:
case OO_SlashEqual:
@@ -9291,7 +9916,8 @@ void Sema::AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
break;
case OO_Subscript:
- OpBuilder.addSubscriptOverloads();
+ if (Args.size() == 2)
+ OpBuilder.addSubscriptOverloads();
break;
case OO_ArrowStar:
@@ -9353,13 +9979,14 @@ Sema::AddArgumentDependentLookupCandidates(DeclarationName Name,
AddOverloadCandidate(
FD, FoundDecl, Args, CandidateSet, /*SuppressUserConversions=*/false,
PartialOverloading, /*AllowExplicit=*/true,
- /*AllowExplicitConversions=*/false, ADLCallKind::UsesADL);
- if (CandidateSet.getRewriteInfo().shouldAddReversed(Context, FD)) {
+ /*AllowExplicitConversion=*/false, ADLCallKind::UsesADL);
+ if (CandidateSet.getRewriteInfo().shouldAddReversed(*this, Args, FD)) {
AddOverloadCandidate(
FD, FoundDecl, {Args[1], Args[0]}, CandidateSet,
/*SuppressUserConversions=*/false, PartialOverloading,
- /*AllowExplicit=*/true, /*AllowExplicitConversions=*/false,
- ADLCallKind::UsesADL, None, OverloadCandidateParamOrder::Reversed);
+ /*AllowExplicit=*/true, /*AllowExplicitConversion=*/false,
+ ADLCallKind::UsesADL, std::nullopt,
+ OverloadCandidateParamOrder::Reversed);
}
} else {
auto *FTD = cast<FunctionTemplateDecl>(*I);
@@ -9368,7 +9995,7 @@ Sema::AddArgumentDependentLookupCandidates(DeclarationName Name,
/*SuppressUserConversions=*/false, PartialOverloading,
/*AllowExplicit=*/true, ADLCallKind::UsesADL);
if (CandidateSet.getRewriteInfo().shouldAddReversed(
- Context, FTD->getTemplatedDecl())) {
+ *this, Args, FTD->getTemplatedDecl())) {
AddTemplateOverloadCandidate(
FTD, FoundDecl, ExplicitTemplateArgs, {Args[1], Args[0]},
CandidateSet, /*SuppressUserConversions=*/false, PartialOverloading,
@@ -9410,8 +10037,8 @@ static Comparison compareEnableIfAttrs(const Sema &S, const FunctionDecl *Cand1,
llvm::FoldingSetNodeID Cand1ID, Cand2ID;
for (auto Pair : zip_longest(Cand1Attrs, Cand2Attrs)) {
- Optional<EnableIfAttr *> Cand1A = std::get<0>(Pair);
- Optional<EnableIfAttr *> Cand2A = std::get<1>(Pair);
+ std::optional<EnableIfAttr *> Cand1A = std::get<0>(Pair);
+ std::optional<EnableIfAttr *> Cand2A = std::get<1>(Pair);
// It's impossible for Cand1 to be better than (or equal to) Cand2 if Cand1
// has fewer enable_if attributes than Cand2, and vice versa.
@@ -9489,43 +10116,97 @@ isBetterMultiversionCandidate(const OverloadCandidate &Cand1,
}
/// Compute the type of the implicit object parameter for the given function,
-/// if any. Returns None if there is no implicit object parameter, and a null
-/// QualType if there is a 'matches anything' implicit object parameter.
-static Optional<QualType> getImplicitObjectParamType(ASTContext &Context,
- const FunctionDecl *F) {
+/// if any. Returns std::nullopt if there is no implicit object parameter, and a
+/// null QualType if there is a 'matches anything' implicit object parameter.
+static std::optional<QualType>
+getImplicitObjectParamType(ASTContext &Context, const FunctionDecl *F) {
if (!isa<CXXMethodDecl>(F) || isa<CXXConstructorDecl>(F))
- return llvm::None;
+ return std::nullopt;
auto *M = cast<CXXMethodDecl>(F);
// Static member functions' object parameters match all types.
if (M->isStatic())
return QualType();
-
- QualType T = M->getThisObjectType();
- if (M->getRefQualifier() == RQ_RValue)
- return Context.getRValueReferenceType(T);
- return Context.getLValueReferenceType(T);
+ return M->getFunctionObjectParameterReferenceType();
}
-static bool haveSameParameterTypes(ASTContext &Context, const FunctionDecl *F1,
- const FunctionDecl *F2, unsigned NumParams) {
+// As a Clang extension, allow ambiguity among F1 and F2 if they represent
+// represent the same entity.
+static bool allowAmbiguity(ASTContext &Context, const FunctionDecl *F1,
+ const FunctionDecl *F2) {
if (declaresSameEntity(F1, F2))
return true;
-
+ auto PT1 = F1->getPrimaryTemplate();
+ auto PT2 = F2->getPrimaryTemplate();
+ if (PT1 && PT2) {
+ if (declaresSameEntity(PT1, PT2) ||
+ declaresSameEntity(PT1->getInstantiatedFromMemberTemplate(),
+ PT2->getInstantiatedFromMemberTemplate()))
+ return true;
+ }
+ // TODO: It is not clear whether comparing parameters is necessary (i.e.
+ // different functions with same params). Consider removing this (as no test
+ // fail w/o it).
auto NextParam = [&](const FunctionDecl *F, unsigned &I, bool First) {
if (First) {
- if (Optional<QualType> T = getImplicitObjectParamType(Context, F))
+ if (std::optional<QualType> T = getImplicitObjectParamType(Context, F))
return *T;
}
assert(I < F->getNumParams());
return F->getParamDecl(I++)->getType();
};
+ unsigned F1NumParams = F1->getNumParams() + isa<CXXMethodDecl>(F1);
+ unsigned F2NumParams = F2->getNumParams() + isa<CXXMethodDecl>(F2);
+
+ if (F1NumParams != F2NumParams)
+ return false;
+
unsigned I1 = 0, I2 = 0;
- for (unsigned I = 0; I != NumParams; ++I) {
+ for (unsigned I = 0; I != F1NumParams; ++I) {
QualType T1 = NextParam(F1, I1, I == 0);
QualType T2 = NextParam(F2, I2, I == 0);
- if (!T1.isNull() && !T1.isNull() && !Context.hasSameUnqualifiedType(T1, T2))
+ assert(!T1.isNull() && !T2.isNull() && "Unexpected null param types");
+ if (!Context.hasSameUnqualifiedType(T1, T2))
+ return false;
+ }
+ return true;
+}
+
+/// We're allowed to use constraints partial ordering only if the candidates
+/// have the same parameter types:
+/// [over.match.best.general]p2.6
+/// F1 and F2 are non-template functions with the same
+/// non-object-parameter-type-lists, and F1 is more constrained than F2 [...]
+static bool sameFunctionParameterTypeLists(Sema &S,
+ const OverloadCandidate &Cand1,
+ const OverloadCandidate &Cand2) {
+ if (!Cand1.Function || !Cand2.Function)
+ return false;
+
+ FunctionDecl *Fn1 = Cand1.Function;
+ FunctionDecl *Fn2 = Cand2.Function;
+
+ if (Fn1->isVariadic() != Fn1->isVariadic())
+ return false;
+
+ if (!S.FunctionNonObjectParamTypesAreEqual(
+ Fn1, Fn2, nullptr, Cand1.isReversed() ^ Cand2.isReversed()))
+ return false;
+
+ auto *Mem1 = dyn_cast<CXXMethodDecl>(Fn1);
+ auto *Mem2 = dyn_cast<CXXMethodDecl>(Fn2);
+ if (Mem1 && Mem2) {
+ // if they are member functions, both are direct members of the same class,
+ // and
+ if (Mem1->getParent() != Mem2->getParent())
+ return false;
+ // if both are non-static member functions, they have the same types for
+ // their object parameters
+ if (Mem1->isInstance() && Mem2->isInstance() &&
+ !S.getASTContext().hasSameType(
+ Mem1->getFunctionObjectParameterReferenceType(),
+ Mem1->getFunctionObjectParameterReferenceType()))
return false;
}
return true;
@@ -9579,7 +10260,7 @@ bool clang::isBetterOverloadCandidate(
// overloading resolution diagnostics.
if (S.getLangOpts().CUDA && Cand1.Function && Cand2.Function &&
S.getLangOpts().GPUExcludeWrongSideOverloads) {
- if (FunctionDecl *Caller = dyn_cast<FunctionDecl>(S.CurContext)) {
+ if (FunctionDecl *Caller = S.getCurFunctionDecl(/*AllowLambda=*/true)) {
bool IsCallerImplicitHD = Sema::isCUDAImplicitHostDeviceFunction(Caller);
bool IsCand1ImplicitHD =
Sema::isCUDAImplicitHostDeviceFunction(Cand1.Function);
@@ -9612,7 +10293,7 @@ bool clang::isBetterOverloadCandidate(
}
}
- // C++ [over.match.best]p1:
+ // C++ [over.match.best]p1: (Changed in C++23)
//
// -- if F is a static member function, ICS1(F) is defined such
// that ICS1(F) is neither better nor worse than ICS1(G) for
@@ -9669,15 +10350,14 @@ bool clang::isBetterOverloadCandidate(
case ImplicitConversionSequence::Worse:
if (Cand1.Function && Cand2.Function &&
Cand1.isReversed() != Cand2.isReversed() &&
- haveSameParameterTypes(S.Context, Cand1.Function, Cand2.Function,
- NumArgs)) {
+ allowAmbiguity(S.Context, Cand1.Function, Cand2.Function)) {
// Work around large-scale breakage caused by considering reversed
// forms of operator== in C++20:
//
- // When comparing a function against a reversed function with the same
- // parameter types, if we have a better conversion for one argument and
- // a worse conversion for the other, the implicit conversion sequences
- // are treated as being equally good.
+ // When comparing a function against a reversed function, if we have a
+ // better conversion for one argument and a worse conversion for the
+ // other, the implicit conversion sequences are treated as being equally
+ // good.
//
// This prevents a comparison function from being considered ambiguous
// with a reversed form that is written in the same way.
@@ -9768,28 +10448,28 @@ bool clang::isBetterOverloadCandidate(
// -— F1 and F2 are non-template functions with the same
// parameter-type-lists, and F1 is more constrained than F2 [...],
- if (Cand1.Function && Cand2.Function && !Cand1IsSpecialization &&
- !Cand2IsSpecialization && Cand1.Function->hasPrototype() &&
- Cand2.Function->hasPrototype()) {
- auto *PT1 = cast<FunctionProtoType>(Cand1.Function->getFunctionType());
- auto *PT2 = cast<FunctionProtoType>(Cand2.Function->getFunctionType());
- if (PT1->getNumParams() == PT2->getNumParams() &&
- PT1->isVariadic() == PT2->isVariadic() &&
- S.FunctionParamTypesAreEqual(PT1, PT2)) {
- Expr *RC1 = Cand1.Function->getTrailingRequiresClause();
- Expr *RC2 = Cand2.Function->getTrailingRequiresClause();
- if (RC1 && RC2) {
- bool AtLeastAsConstrained1, AtLeastAsConstrained2;
- if (S.IsAtLeastAsConstrained(Cand1.Function, {RC1}, Cand2.Function,
- {RC2}, AtLeastAsConstrained1) ||
- S.IsAtLeastAsConstrained(Cand2.Function, {RC2}, Cand1.Function,
- {RC1}, AtLeastAsConstrained2))
- return false;
- if (AtLeastAsConstrained1 != AtLeastAsConstrained2)
- return AtLeastAsConstrained1;
- } else if (RC1 || RC2) {
- return RC1 != nullptr;
- }
+ if (!Cand1IsSpecialization && !Cand2IsSpecialization &&
+ sameFunctionParameterTypeLists(S, Cand1, Cand2)) {
+ FunctionDecl *Function1 = Cand1.Function;
+ FunctionDecl *Function2 = Cand2.Function;
+ if (FunctionDecl *MF = Function1->getInstantiatedFromMemberFunction())
+ Function1 = MF;
+ if (FunctionDecl *MF = Function2->getInstantiatedFromMemberFunction())
+ Function2 = MF;
+
+ const Expr *RC1 = Function1->getTrailingRequiresClause();
+ const Expr *RC2 = Function2->getTrailingRequiresClause();
+ if (RC1 && RC2) {
+ bool AtLeastAsConstrained1, AtLeastAsConstrained2;
+ if (S.IsAtLeastAsConstrained(Function1, RC1, Function2, RC2,
+ AtLeastAsConstrained1) ||
+ S.IsAtLeastAsConstrained(Function2, RC2, Function1, RC1,
+ AtLeastAsConstrained2))
+ return false;
+ if (AtLeastAsConstrained1 != AtLeastAsConstrained2)
+ return AtLeastAsConstrained1;
+ } else if (RC1 || RC2) {
+ return RC1 != nullptr;
}
}
@@ -9798,9 +10478,9 @@ bool clang::isBetterOverloadCandidate(
// F1 and F2 have the same type.
// FIXME: Implement the "all parameters have the same type" check.
bool Cand1IsInherited =
- dyn_cast_or_null<ConstructorUsingShadowDecl>(Cand1.FoundDecl.getDecl());
+ isa_and_nonnull<ConstructorUsingShadowDecl>(Cand1.FoundDecl.getDecl());
bool Cand2IsInherited =
- dyn_cast_or_null<ConstructorUsingShadowDecl>(Cand2.FoundDecl.getDecl());
+ isa_and_nonnull<ConstructorUsingShadowDecl>(Cand2.FoundDecl.getDecl());
if (Cand1IsInherited != Cand2IsInherited)
return Cand2IsInherited;
else if (Cand1IsInherited) {
@@ -9834,8 +10514,23 @@ bool clang::isBetterOverloadCandidate(
return Guide2->isImplicit();
// -- F1 is the copy deduction candidate(16.3.1.8) and F2 is not
- if (Guide1->isCopyDeductionCandidate())
+ if (Guide1->getDeductionCandidateKind() == DeductionCandidate::Copy)
return true;
+ if (Guide2->getDeductionCandidateKind() == DeductionCandidate::Copy)
+ return false;
+
+ // --F1 is generated from a non-template constructor and F2 is generated
+ // from a constructor template
+ const auto *Constructor1 = Guide1->getCorrespondingConstructor();
+ const auto *Constructor2 = Guide2->getCorrespondingConstructor();
+ if (Constructor1 && Constructor2) {
+ bool isC1Templated = Constructor1->getTemplatedKind() !=
+ FunctionDecl::TemplatedKind::TK_NonTemplate;
+ bool isC2Templated = Constructor2->getTemplatedKind() !=
+ FunctionDecl::TemplatedKind::TK_NonTemplate;
+ if (isC1Templated != isC2Templated)
+ return isC2Templated;
+ }
}
}
@@ -9862,7 +10557,7 @@ bool clang::isBetterOverloadCandidate(
// If other rules cannot determine which is better, CUDA preference is used
// to determine which is better.
if (S.getLangOpts().CUDA && Cand1.Function && Cand2.Function) {
- FunctionDecl *Caller = dyn_cast<FunctionDecl>(S.CurContext);
+ FunctionDecl *Caller = S.getCurFunctionDecl(/*AllowLambda=*/true);
return S.IdentifyCUDAPreference(Caller, Cand1.Function) >
S.IdentifyCUDAPreference(Caller, Cand2.Function);
}
@@ -9879,7 +10574,7 @@ bool clang::isBetterOverloadCandidate(
if (AS1 != AS2) {
if (Qualifiers::isAddressSpaceSupersetOf(AS2, AS1))
return true;
- if (Qualifiers::isAddressSpaceSupersetOf(AS2, AS1))
+ if (Qualifiers::isAddressSpaceSupersetOf(AS1, AS2))
return false;
}
}
@@ -9954,6 +10649,13 @@ void Sema::diagnoseEquivalentInternalLinkageDeclarations(
}
}
+bool OverloadCandidate::NotValidBecauseConstraintExprHasError() const {
+ return FailureKind == ovl_fail_bad_deduction &&
+ DeductionFailure.Result == Sema::TDK_ConstraintsNotSatisfied &&
+ static_cast<CNSInfo *>(DeductionFailure.Data)
+ ->Satisfaction.ContainsErrors;
+}
+
/// Computes the best viable function (C++ 13.3.3)
/// within an overload candidate set.
///
@@ -9983,7 +10685,7 @@ OverloadCandidateSet::BestViableFunction(Sema &S, SourceLocation Loc,
// -fgpu-exclude-wrong-side-overloads is on, all candidates are compared
// uniformly in isBetterOverloadCandidate.
if (S.getLangOpts().CUDA && !S.getLangOpts().GPUExcludeWrongSideOverloads) {
- const FunctionDecl *Caller = dyn_cast<FunctionDecl>(S.CurContext);
+ const FunctionDecl *Caller = S.getCurFunctionDecl(/*AllowLambda=*/true);
bool ContainsSameSideCandidate =
llvm::any_of(Candidates, [&](OverloadCandidate *Cand) {
// Check viable function only.
@@ -10006,10 +10708,18 @@ OverloadCandidateSet::BestViableFunction(Sema &S, SourceLocation Loc,
Best = end();
for (auto *Cand : Candidates) {
Cand->Best = false;
- if (Cand->Viable)
+ if (Cand->Viable) {
if (Best == end() ||
isBetterOverloadCandidate(S, *Cand, *Best, Loc, Kind))
Best = Cand;
+ } else if (Cand->NotValidBecauseConstraintExprHasError()) {
+ // This candidate has constraint that we were unable to evaluate because
+ // it referenced an expression that contained an error. Rather than fall
+ // back onto a potentially unintended candidate (made worse by
+ // subsuming constraints), treat this as 'no viable candidate'.
+ Best = end();
+ return OR_No_Viable_Function;
+ }
}
// If we didn't find any viable functions, abort.
@@ -10079,7 +10789,8 @@ enum OverloadCandidateSelect {
};
static std::pair<OverloadCandidateKind, OverloadCandidateSelect>
-ClassifyOverloadCandidate(Sema &S, NamedDecl *Found, FunctionDecl *Fn,
+ClassifyOverloadCandidate(Sema &S, const NamedDecl *Found,
+ const FunctionDecl *Fn,
OverloadCandidateRewriteKind CRK,
std::string &Description) {
@@ -10103,7 +10814,7 @@ ClassifyOverloadCandidate(Sema &S, NamedDecl *Found, FunctionDecl *Fn,
if (CRK & CRK_Reversed)
return oc_reversed_binary_operator;
- if (CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(Fn)) {
+ if (const auto *Ctor = dyn_cast<CXXConstructorDecl>(Fn)) {
if (!Ctor->isImplicit()) {
if (isa<ConstructorUsingShadowDecl>(Found))
return oc_inherited_constructor;
@@ -10122,7 +10833,7 @@ ClassifyOverloadCandidate(Sema &S, NamedDecl *Found, FunctionDecl *Fn,
return oc_implicit_copy_constructor;
}
- if (CXXMethodDecl *Meth = dyn_cast<CXXMethodDecl>(Fn)) {
+ if (const auto *Meth = dyn_cast<CXXMethodDecl>(Fn)) {
// This actually gets spelled 'candidate function' for now, but
// it doesn't hurt to split it out.
if (!Meth->isImplicit())
@@ -10144,10 +10855,10 @@ ClassifyOverloadCandidate(Sema &S, NamedDecl *Found, FunctionDecl *Fn,
return std::make_pair(Kind, Select);
}
-void MaybeEmitInheritedConstructorNote(Sema &S, Decl *FoundDecl) {
+void MaybeEmitInheritedConstructorNote(Sema &S, const Decl *FoundDecl) {
// FIXME: It'd be nice to only emit a note once per using-decl per overload
// set.
- if (auto *Shadow = dyn_cast<ConstructorUsingShadowDecl>(FoundDecl))
+ if (const auto *Shadow = dyn_cast<ConstructorUsingShadowDecl>(FoundDecl))
S.Diag(FoundDecl->getLocation(),
diag::note_ovl_candidate_inherited_constructor)
<< Shadow->getNominatedBaseClass();
@@ -10196,10 +10907,19 @@ static bool checkAddressOfFunctionIsAvailable(Sema &S, const FunctionDecl *FD,
return false;
if (!Satisfaction.IsSatisfied) {
if (Complain) {
- if (InOverloadResolution)
+ if (InOverloadResolution) {
+ SmallString<128> TemplateArgString;
+ if (FunctionTemplateDecl *FunTmpl = FD->getPrimaryTemplate()) {
+ TemplateArgString += " ";
+ TemplateArgString += S.getTemplateArgumentBindingsText(
+ FunTmpl->getTemplateParameters(),
+ *FD->getTemplateSpecializationArgs());
+ }
+
S.Diag(FD->getBeginLoc(),
- diag::note_ovl_candidate_unsatisfied_constraints);
- else
+ diag::note_ovl_candidate_unsatisfied_constraints)
+ << TemplateArgString;
+ } else
S.Diag(Loc, diag::err_addrof_function_constraints_not_satisfied)
<< FD;
S.DiagnoseUnsatisfiedConstraint(Satisfaction);
@@ -10245,7 +10965,7 @@ bool Sema::checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
// Don't print candidates other than the one that matches the calling
// convention of the call operator, since that is guaranteed to exist.
-static bool shouldSkipNotingLambdaConversionDecl(FunctionDecl *Fn) {
+static bool shouldSkipNotingLambdaConversionDecl(const FunctionDecl *Fn) {
const auto *ConvD = dyn_cast<CXXConversionDecl>(Fn);
if (!ConvD)
@@ -10265,7 +10985,7 @@ static bool shouldSkipNotingLambdaConversionDecl(FunctionDecl *Fn) {
}
// Notes the location of an overload candidate.
-void Sema::NoteOverloadCandidate(NamedDecl *Found, FunctionDecl *Fn,
+void Sema::NoteOverloadCandidate(const NamedDecl *Found, const FunctionDecl *Fn,
OverloadCandidateRewriteKind RewriteKind,
QualType DestType, bool TakingAddress) {
if (TakingAddress && !checkAddressOfCandidateIsAvailable(*this, Fn))
@@ -10273,6 +10993,9 @@ void Sema::NoteOverloadCandidate(NamedDecl *Found, FunctionDecl *Fn,
if (Fn->isMultiVersion() && Fn->hasAttr<TargetAttr>() &&
!Fn->getAttr<TargetAttr>()->isDefaultVersion())
return;
+ if (Fn->isMultiVersion() && Fn->hasAttr<TargetVersionAttr>() &&
+ !Fn->getAttr<TargetVersionAttr>()->isDefaultVersion())
+ return;
if (shouldSkipNotingLambdaConversionDecl(Fn))
return;
@@ -10408,6 +11131,8 @@ static void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand,
Expr *FromExpr = Conv.Bad.FromExpr;
QualType FromTy = Conv.Bad.getFromType();
QualType ToTy = Conv.Bad.getToType();
+ SourceRange ToParamRange =
+ !isObjectArgument ? Fn->getParamDecl(I)->getSourceRange() : SourceRange();
if (FromTy == S.Context.OverloadTy) {
assert(FromExpr && "overload set argument came from implicit argument?");
@@ -10418,8 +11143,7 @@ static void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand,
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_overload)
<< (unsigned)FnKindPair.first << (unsigned)FnKindPair.second << FnDesc
- << (FromExpr ? FromExpr->getSourceRange() : SourceRange()) << ToTy
- << Name << I + 1;
+ << ToParamRange << ToTy << Name << I + 1;
MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
return;
}
@@ -10448,14 +11172,12 @@ static void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand,
if (isObjectArgument)
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_addrspace_this)
<< (unsigned)FnKindPair.first << (unsigned)FnKindPair.second
- << FnDesc << (FromExpr ? FromExpr->getSourceRange() : SourceRange())
- << FromQs.getAddressSpace() << ToQs.getAddressSpace();
+ << FnDesc << FromQs.getAddressSpace() << ToQs.getAddressSpace();
else
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_addrspace)
<< (unsigned)FnKindPair.first << (unsigned)FnKindPair.second
- << FnDesc << (FromExpr ? FromExpr->getSourceRange() : SourceRange())
- << FromQs.getAddressSpace() << ToQs.getAddressSpace()
- << ToTy->isReferenceType() << I + 1;
+ << FnDesc << ToParamRange << FromQs.getAddressSpace()
+ << ToQs.getAddressSpace() << ToTy->isReferenceType() << I + 1;
MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
return;
}
@@ -10463,9 +11185,8 @@ static void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand,
if (FromQs.getObjCLifetime() != ToQs.getObjCLifetime()) {
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_ownership)
<< (unsigned)FnKindPair.first << (unsigned)FnKindPair.second << FnDesc
- << (FromExpr ? FromExpr->getSourceRange() : SourceRange()) << FromTy
- << FromQs.getObjCLifetime() << ToQs.getObjCLifetime()
- << (unsigned)isObjectArgument << I + 1;
+ << ToParamRange << FromTy << FromQs.getObjCLifetime()
+ << ToQs.getObjCLifetime() << (unsigned)isObjectArgument << I + 1;
MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
return;
}
@@ -10473,18 +11194,8 @@ static void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand,
if (FromQs.getObjCGCAttr() != ToQs.getObjCGCAttr()) {
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_gc)
<< (unsigned)FnKindPair.first << (unsigned)FnKindPair.second << FnDesc
- << (FromExpr ? FromExpr->getSourceRange() : SourceRange()) << FromTy
- << FromQs.getObjCGCAttr() << ToQs.getObjCGCAttr()
- << (unsigned)isObjectArgument << I + 1;
- MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
- return;
- }
-
- if (FromQs.hasUnaligned() != ToQs.hasUnaligned()) {
- S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_unaligned)
- << (unsigned)FnKindPair.first << (unsigned)FnKindPair.second << FnDesc
- << (FromExpr ? FromExpr->getSourceRange() : SourceRange()) << FromTy
- << FromQs.hasUnaligned() << I + 1;
+ << ToParamRange << FromTy << FromQs.getObjCGCAttr()
+ << ToQs.getObjCGCAttr() << (unsigned)isObjectArgument << I + 1;
MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
return;
}
@@ -10495,13 +11206,11 @@ static void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand,
if (isObjectArgument) {
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_cvr_this)
<< (unsigned)FnKindPair.first << (unsigned)FnKindPair.second << FnDesc
- << (FromExpr ? FromExpr->getSourceRange() : SourceRange()) << FromTy
- << (CVR - 1);
+ << FromTy << (CVR - 1);
} else {
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_cvr)
<< (unsigned)FnKindPair.first << (unsigned)FnKindPair.second << FnDesc
- << (FromExpr ? FromExpr->getSourceRange() : SourceRange()) << FromTy
- << (CVR - 1) << I + 1;
+ << ToParamRange << FromTy << (CVR - 1) << I + 1;
}
MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
return;
@@ -10513,7 +11222,7 @@ static void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand,
<< (unsigned)FnKindPair.first << (unsigned)FnKindPair.second << FnDesc
<< (unsigned)isObjectArgument << I + 1
<< (Conv.Bad.Kind == BadConversionSequence::rvalue_ref_to_lvalue)
- << (FromExpr ? FromExpr->getSourceRange() : SourceRange());
+ << ToParamRange;
MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
return;
}
@@ -10523,8 +11232,11 @@ static void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand,
if (FromExpr && isa<InitListExpr>(FromExpr)) {
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_list_argument)
<< (unsigned)FnKindPair.first << (unsigned)FnKindPair.second << FnDesc
- << (FromExpr ? FromExpr->getSourceRange() : SourceRange()) << FromTy
- << ToTy << (unsigned)isObjectArgument << I + 1;
+ << ToParamRange << FromTy << ToTy << (unsigned)isObjectArgument << I + 1
+ << (Conv.Bad.Kind == BadConversionSequence::too_few_initializers ? 1
+ : Conv.Bad.Kind == BadConversionSequence::too_many_initializers
+ ? 2
+ : 0);
MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
return;
}
@@ -10539,8 +11251,7 @@ static void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand,
// Emit the generic diagnostic and, optionally, add the hints to it.
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_conv_incomplete)
<< (unsigned)FnKindPair.first << (unsigned)FnKindPair.second << FnDesc
- << (FromExpr ? FromExpr->getSourceRange() : SourceRange()) << FromTy
- << ToTy << (unsigned)isObjectArgument << I + 1
+ << ToParamRange << FromTy << ToTy << (unsigned)isObjectArgument << I + 1
<< (unsigned)(Cand->Fix.Kind);
MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
@@ -10581,24 +11292,24 @@ static void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand,
if (BaseToDerivedConversion) {
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_base_to_derived_conv)
<< (unsigned)FnKindPair.first << (unsigned)FnKindPair.second << FnDesc
- << (FromExpr ? FromExpr->getSourceRange() : SourceRange())
- << (BaseToDerivedConversion - 1) << FromTy << ToTy << I + 1;
+ << ToParamRange << (BaseToDerivedConversion - 1) << FromTy << ToTy
+ << I + 1;
MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
return;
}
if (isa<ObjCObjectPointerType>(CFromTy) &&
isa<PointerType>(CToTy)) {
- Qualifiers FromQs = CFromTy.getQualifiers();
- Qualifiers ToQs = CToTy.getQualifiers();
- if (FromQs.getObjCLifetime() != ToQs.getObjCLifetime()) {
- S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_arc_conv)
- << (unsigned)FnKindPair.first << (unsigned)FnKindPair.second
- << FnDesc << (FromExpr ? FromExpr->getSourceRange() : SourceRange())
- << FromTy << ToTy << (unsigned)isObjectArgument << I + 1;
- MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
- return;
- }
+ Qualifiers FromQs = CFromTy.getQualifiers();
+ Qualifiers ToQs = CToTy.getQualifiers();
+ if (FromQs.getObjCLifetime() != ToQs.getObjCLifetime()) {
+ S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_arc_conv)
+ << (unsigned)FnKindPair.first << (unsigned)FnKindPair.second << FnDesc
+ << ToParamRange << FromTy << ToTy << (unsigned)isObjectArgument
+ << I + 1;
+ MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
+ return;
+ }
}
if (TakingCandidateAddress &&
@@ -10608,14 +11319,16 @@ static void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand,
// Emit the generic diagnostic and, optionally, add the hints to it.
PartialDiagnostic FDiag = S.PDiag(diag::note_ovl_candidate_bad_conv);
FDiag << (unsigned)FnKindPair.first << (unsigned)FnKindPair.second << FnDesc
- << (FromExpr ? FromExpr->getSourceRange() : SourceRange()) << FromTy
- << ToTy << (unsigned)isObjectArgument << I + 1
+ << ToParamRange << FromTy << ToTy << (unsigned)isObjectArgument << I + 1
<< (unsigned)(Cand->Fix.Kind);
- // If we can fix the conversion, suggest the FixIts.
- for (std::vector<FixItHint>::iterator HI = Cand->Fix.Hints.begin(),
- HE = Cand->Fix.Hints.end(); HI != HE; ++HI)
- FDiag << *HI;
+ // Check that location of Fn is not in system header.
+ if (!S.SourceMgr.isInSystemHeader(Fn->getLocation())) {
+ // If we can fix the conversion, suggest the FixIts.
+ for (const FixItHint &HI : Cand->Fix.Hints)
+ FDiag << HI;
+ }
+
S.Diag(Fn->getLocation(), FDiag);
MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
@@ -10663,37 +11376,43 @@ static void DiagnoseArityMismatch(Sema &S, NamedDecl *Found, Decl *D,
// TODO: treat calls to a missing default constructor as a special case
const auto *FnTy = Fn->getType()->castAs<FunctionProtoType>();
- unsigned MinParams = Fn->getMinRequiredArguments();
+ unsigned MinParams = Fn->getMinRequiredExplicitArguments();
// at least / at most / exactly
+ bool HasExplicitObjectParam = Fn->hasCXXExplicitFunctionObjectParameter();
+ unsigned ParamCount = FnTy->getNumParams() - (HasExplicitObjectParam ? 1 : 0);
unsigned mode, modeCount;
if (NumFormalArgs < MinParams) {
- if (MinParams != FnTy->getNumParams() || FnTy->isVariadic() ||
+ if (MinParams != ParamCount || FnTy->isVariadic() ||
FnTy->isTemplateVariadic())
mode = 0; // "at least"
else
mode = 2; // "exactly"
modeCount = MinParams;
} else {
- if (MinParams != FnTy->getNumParams())
+ if (MinParams != ParamCount)
mode = 1; // "at most"
else
mode = 2; // "exactly"
- modeCount = FnTy->getNumParams();
+ modeCount = ParamCount;
}
std::string Description;
std::pair<OverloadCandidateKind, OverloadCandidateSelect> FnKindPair =
ClassifyOverloadCandidate(S, Found, Fn, CRK_None, Description);
- if (modeCount == 1 && Fn->getParamDecl(0)->getDeclName())
+ if (modeCount == 1 &&
+ Fn->getParamDecl(HasExplicitObjectParam ? 1 : 0)->getDeclName())
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_arity_one)
<< (unsigned)FnKindPair.first << (unsigned)FnKindPair.second
- << Description << mode << Fn->getParamDecl(0) << NumFormalArgs;
+ << Description << mode
+ << Fn->getParamDecl(HasExplicitObjectParam ? 1 : 0) << NumFormalArgs
+ << HasExplicitObjectParam << Fn->getParametersSourceRange();
else
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_arity)
<< (unsigned)FnKindPair.first << (unsigned)FnKindPair.second
- << Description << mode << modeCount << NumFormalArgs;
+ << Description << mode << modeCount << NumFormalArgs
+ << HasExplicitObjectParam << Fn->getParametersSourceRange();
MaybeEmitInheritedConstructorNote(S, Found);
}
@@ -11004,7 +11723,7 @@ static void DiagnoseBadDeduction(Sema &S, OverloadCandidate *Cand,
/// CUDA: diagnose an invalid call across targets.
static void DiagnoseBadTarget(Sema &S, OverloadCandidate *Cand) {
- FunctionDecl *Caller = cast<FunctionDecl>(S.CurContext);
+ FunctionDecl *Caller = S.getCurFunctionDecl(/*AllowLambda=*/true);
FunctionDecl *Callee = Cand->Function;
Sema::CUDAFunctionTarget CallerTarget = S.IdentifyCUDATarget(Caller),
@@ -11126,6 +11845,13 @@ static void NoteFunctionCandidate(Sema &S, OverloadCandidate *Cand,
if (shouldSkipNotingLambdaConversionDecl(Fn))
return;
+ // There is no physical candidate declaration to point to for OpenCL builtins.
+ // Except for failed conversions, the notes are identical for each candidate,
+ // so do not generate such notes.
+ if (S.getLangOpts().OpenCL && Fn->isImplicit() &&
+ Cand->FailureKind != ovl_fail_bad_conversion)
+ return;
+
// Note deleted candidates, but only if they're viable.
if (Cand->Viable) {
if (Fn->isDeleted()) {
@@ -11268,8 +11994,18 @@ static void NoteSurrogateCandidate(Sema &S, OverloadCandidate *Cand) {
if (isRValueReference) FnType = S.Context.getRValueReferenceType(FnType);
if (isLValueReference) FnType = S.Context.getLValueReferenceType(FnType);
- S.Diag(Cand->Surrogate->getLocation(), diag::note_ovl_surrogate_cand)
- << FnType;
+ if (!Cand->Viable &&
+ Cand->FailureKind == ovl_fail_constraints_not_satisfied) {
+ S.Diag(Cand->Surrogate->getLocation(),
+ diag::note_ovl_surrogate_constraints_not_satisfied)
+ << Cand->Surrogate;
+ ConstraintSatisfaction Satisfaction;
+ if (S.CheckFunctionConstraints(Cand->Surrogate, Satisfaction))
+ S.DiagnoseUnsatisfiedConstraint(Satisfaction);
+ } else {
+ S.Diag(Cand->Surrogate->getLocation(), diag::note_ovl_surrogate_cand)
+ << FnType;
+ }
}
static void NoteBuiltinOperatorCandidate(Sema &S, StringRef Opc,
@@ -11314,6 +12050,7 @@ static unsigned RankDeductionFailure(const DeductionFailureInfo &DFI) {
switch ((Sema::TemplateDeductionResult)DFI.Result) {
case Sema::TDK_Success:
case Sema::TDK_NonDependentConversionFailure:
+ case Sema::TDK_AlreadyDiagnosed:
llvm_unreachable("non-deduction failure while diagnosing bad deduction");
case Sema::TDK_Invalid:
@@ -11348,6 +12085,7 @@ static unsigned RankDeductionFailure(const DeductionFailureInfo &DFI) {
}
namespace {
+
struct CompareOverloadCandidatesForDisplay {
Sema &S;
SourceLocation Loc;
@@ -11385,13 +12123,9 @@ struct CompareOverloadCandidatesForDisplay {
if (L->Viable) {
if (!R->Viable) return true;
- // TODO: introduce a tri-valued comparison for overload
- // candidates. Would be more worthwhile if we had a sort
- // that could exploit it.
- if (isBetterOverloadCandidate(S, *L, *R, SourceLocation(), CSK))
- return true;
- if (isBetterOverloadCandidate(S, *R, *L, SourceLocation(), CSK))
- return false;
+ if (int Ord = CompareConversions(*L, *R))
+ return Ord < 0;
+ // Use other tie breakers.
} else if (R->Viable)
return false;
@@ -11443,30 +12177,8 @@ struct CompareOverloadCandidatesForDisplay {
}
// If there's any ordering between the defined conversions...
- // FIXME: this might not be transitive.
- assert(L->Conversions.size() == R->Conversions.size());
-
- int leftBetter = 0;
- unsigned I = (L->IgnoreObjectArgument || R->IgnoreObjectArgument);
- for (unsigned E = L->Conversions.size(); I != E; ++I) {
- switch (CompareImplicitConversionSequences(S, Loc,
- L->Conversions[I],
- R->Conversions[I])) {
- case ImplicitConversionSequence::Better:
- leftBetter++;
- break;
-
- case ImplicitConversionSequence::Worse:
- leftBetter--;
- break;
-
- case ImplicitConversionSequence::Indistinguishable:
- break;
- }
- }
- if (leftBetter > 0) return true;
- if (leftBetter < 0) return false;
-
+ if (int Ord = CompareConversions(*L, *R))
+ return Ord < 0;
} else if (RFailureKind == ovl_fail_bad_conversion)
return false;
@@ -11474,9 +12186,12 @@ struct CompareOverloadCandidatesForDisplay {
if (RFailureKind != ovl_fail_bad_deduction)
return true;
- if (L->DeductionFailure.Result != R->DeductionFailure.Result)
- return RankDeductionFailure(L->DeductionFailure)
- < RankDeductionFailure(R->DeductionFailure);
+ if (L->DeductionFailure.Result != R->DeductionFailure.Result) {
+ unsigned LRank = RankDeductionFailure(L->DeductionFailure);
+ unsigned RRank = RankDeductionFailure(R->DeductionFailure);
+ if (LRank != RRank)
+ return LRank < RRank;
+ }
} else if (RFailureKind == ovl_fail_bad_deduction)
return false;
@@ -11488,10 +12203,66 @@ struct CompareOverloadCandidatesForDisplay {
SourceLocation RLoc = GetLocationForCandidate(R);
// Put candidates without locations (e.g. builtins) at the end.
- if (LLoc.isInvalid()) return false;
- if (RLoc.isInvalid()) return true;
+ if (LLoc.isValid() && RLoc.isValid())
+ return S.SourceMgr.isBeforeInTranslationUnit(LLoc, RLoc);
+ if (LLoc.isValid() && !RLoc.isValid())
+ return true;
+ if (RLoc.isValid() && !LLoc.isValid())
+ return false;
+ assert(!LLoc.isValid() && !RLoc.isValid());
+ // For builtins and other functions without locations, fallback to the order
+ // in which they were added into the candidate set.
+ return L < R;
+ }
- return S.SourceMgr.isBeforeInTranslationUnit(LLoc, RLoc);
+private:
+ struct ConversionSignals {
+ unsigned KindRank = 0;
+ ImplicitConversionRank Rank = ICR_Exact_Match;
+
+ static ConversionSignals ForSequence(ImplicitConversionSequence &Seq) {
+ ConversionSignals Sig;
+ Sig.KindRank = Seq.getKindRank();
+ if (Seq.isStandard())
+ Sig.Rank = Seq.Standard.getRank();
+ else if (Seq.isUserDefined())
+ Sig.Rank = Seq.UserDefined.After.getRank();
+ // We intend StaticObjectArgumentConversion to compare the same as
+ // StandardConversion with ICR_ExactMatch rank.
+ return Sig;
+ }
+
+ static ConversionSignals ForObjectArgument() {
+ // We intend StaticObjectArgumentConversion to compare the same as
+ // StandardConversion with ICR_ExactMatch rank. Default give us that.
+ return {};
+ }
+ };
+
+ // Returns -1 if conversions in L are considered better.
+ // 0 if they are considered indistinguishable.
+ // 1 if conversions in R are better.
+ int CompareConversions(const OverloadCandidate &L,
+ const OverloadCandidate &R) {
+ // We cannot use `isBetterOverloadCandidate` because it is defined
+ // according to the C++ standard and provides a partial order, but we need
+ // a total order as this function is used in sort.
+ assert(L.Conversions.size() == R.Conversions.size());
+ for (unsigned I = 0, N = L.Conversions.size(); I != N; ++I) {
+ auto LS = L.IgnoreObjectArgument && I == 0
+ ? ConversionSignals::ForObjectArgument()
+ : ConversionSignals::ForSequence(L.Conversions[I]);
+ auto RS = R.IgnoreObjectArgument
+ ? ConversionSignals::ForObjectArgument()
+ : ConversionSignals::ForSequence(R.Conversions[I]);
+ if (std::tie(LS.KindRank, LS.Rank) != std::tie(RS.KindRank, RS.Rank))
+ return std::tie(LS.KindRank, LS.Rank) < std::tie(RS.KindRank, RS.Rank)
+ ? -1
+ : 1;
+ }
+ // FIXME: find a way to compare templates for being more or less
+ // specialized that provides a strict weak ordering.
+ return 0;
}
};
}
@@ -11551,7 +12322,9 @@ CompleteNonViableCandidate(Sema &S, OverloadCandidate *Cand,
// Conversion 0 is 'this', which doesn't have a corresponding parameter.
ConvIdx = 1;
if (CSK == OverloadCandidateSet::CSK_Operator &&
- Cand->Function->getDeclName().getCXXOverloadedOperator() != OO_Call)
+ Cand->Function->getDeclName().getCXXOverloadedOperator() != OO_Call &&
+ Cand->Function->getDeclName().getCXXOverloadedOperator() !=
+ OO_Subscript)
// Argument 0 is 'this', which doesn't have a corresponding parameter.
ArgIdx = 1;
}
@@ -11661,7 +12434,16 @@ void OverloadCandidateSet::NoteCandidates(
S.Diag(PD.first, PD.second, shouldDeferDiags(S, Args, OpLoc));
- NoteCandidates(S, Args, Cands, Opc, OpLoc);
+ // In WebAssembly we don't want to emit further diagnostics if a table is
+ // passed as an argument to a function.
+ bool NoteCands = true;
+ for (const Expr *Arg : Args) {
+ if (Arg->getType()->isWebAssemblyTableType())
+ NoteCands = false;
+ }
+
+ if (NoteCands)
+ NoteCandidates(S, Args, Cands, Opc, OpLoc);
if (OCD == OCD_AmbiguousCandidates)
MaybeDiagnoseAmbiguousConstraints(S, {begin(), end()});
@@ -12008,7 +12790,9 @@ private:
= dyn_cast<CXXMethodDecl>(FunctionTemplate->getTemplatedDecl())) {
// Skip non-static function templates when converting to pointer, and
// static when converting to member pointer.
- if (Method->isStatic() == TargetTypeIsNonStaticMemberFunction)
+ bool CanConvertToFunctionPointer =
+ Method->isStatic() || Method->isExplicitObjectMemberFunction();
+ if (CanConvertToFunctionPointer == TargetTypeIsNonStaticMemberFunction)
return false;
}
else if (TargetTypeIsNonStaticMemberFunction)
@@ -12053,21 +12837,28 @@ private:
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Fn)) {
// Skip non-static functions when converting to pointer, and static
// when converting to member pointer.
- if (Method->isStatic() == TargetTypeIsNonStaticMemberFunction)
+ bool CanConvertToFunctionPointer =
+ Method->isStatic() || Method->isExplicitObjectMemberFunction();
+ if (CanConvertToFunctionPointer == TargetTypeIsNonStaticMemberFunction)
return false;
}
else if (TargetTypeIsNonStaticMemberFunction)
return false;
if (FunctionDecl *FunDecl = dyn_cast<FunctionDecl>(Fn)) {
- if (S.getLangOpts().CUDA)
- if (FunctionDecl *Caller = dyn_cast<FunctionDecl>(S.CurContext))
- if (!Caller->isImplicit() && !S.IsAllowedCUDACall(Caller, FunDecl))
- return false;
+ if (S.getLangOpts().CUDA) {
+ FunctionDecl *Caller = S.getCurFunctionDecl(/*AllowLambda=*/true);
+ if (!(Caller && Caller->isImplicit()) &&
+ !S.IsAllowedCUDACall(Caller, FunDecl))
+ return false;
+ }
if (FunDecl->isMultiVersion()) {
const auto *TA = FunDecl->getAttr<TargetAttr>();
if (TA && !TA->isDefaultVersion())
return false;
+ const auto *TVA = FunDecl->getAttr<TargetVersionAttr>();
+ if (TVA && !TVA->isDefaultVersion())
+ return false;
}
// If any candidate has a placeholder return type, trigger its deduction
@@ -12178,7 +12969,8 @@ private:
}
void EliminateSuboptimalCudaMatches() {
- S.EraseUnwantedCUDAMatches(dyn_cast<FunctionDecl>(S.CurContext), Matches);
+ S.EraseUnwantedCUDAMatches(S.getCurFunctionDecl(/*AllowLambda=*/true),
+ Matches);
}
public:
@@ -12329,20 +13121,31 @@ Sema::resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &Pair) {
DeclAccessPair DAP;
SmallVector<FunctionDecl *, 2> AmbiguousDecls;
- auto CheckMoreConstrained =
- [&] (FunctionDecl *FD1, FunctionDecl *FD2) -> Optional<bool> {
- SmallVector<const Expr *, 1> AC1, AC2;
- FD1->getAssociatedConstraints(AC1);
- FD2->getAssociatedConstraints(AC2);
- bool AtLeastAsConstrained1, AtLeastAsConstrained2;
- if (IsAtLeastAsConstrained(FD1, AC1, FD2, AC2, AtLeastAsConstrained1))
- return None;
- if (IsAtLeastAsConstrained(FD2, AC2, FD1, AC1, AtLeastAsConstrained2))
- return None;
- if (AtLeastAsConstrained1 == AtLeastAsConstrained2)
- return None;
- return AtLeastAsConstrained1;
- };
+ // Return positive for better, negative for worse, 0 for equal preference.
+ auto CheckCUDAPreference = [&](FunctionDecl *FD1, FunctionDecl *FD2) {
+ FunctionDecl *Caller = getCurFunctionDecl(/*AllowLambda=*/true);
+ return static_cast<int>(IdentifyCUDAPreference(Caller, FD1)) -
+ static_cast<int>(IdentifyCUDAPreference(Caller, FD2));
+ };
+
+ auto CheckMoreConstrained = [&](FunctionDecl *FD1,
+ FunctionDecl *FD2) -> std::optional<bool> {
+ if (FunctionDecl *MF = FD1->getInstantiatedFromMemberFunction())
+ FD1 = MF;
+ if (FunctionDecl *MF = FD2->getInstantiatedFromMemberFunction())
+ FD2 = MF;
+ SmallVector<const Expr *, 1> AC1, AC2;
+ FD1->getAssociatedConstraints(AC1);
+ FD2->getAssociatedConstraints(AC2);
+ bool AtLeastAsConstrained1, AtLeastAsConstrained2;
+ if (IsAtLeastAsConstrained(FD1, AC1, FD2, AC2, AtLeastAsConstrained1))
+ return std::nullopt;
+ if (IsAtLeastAsConstrained(FD2, AC2, FD1, AC1, AtLeastAsConstrained2))
+ return std::nullopt;
+ if (AtLeastAsConstrained1 == AtLeastAsConstrained2)
+ return std::nullopt;
+ return AtLeastAsConstrained1;
+ };
// Don't use the AddressOfResolver because we're specifically looking for
// cases where we have one overload candidate that lacks
@@ -12355,11 +13158,33 @@ Sema::resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &Pair) {
if (!checkAddressOfFunctionIsAvailable(FD))
continue;
+ // If we found a better result, update Result.
+ auto FoundBetter = [&]() {
+ IsResultAmbiguous = false;
+ DAP = I.getPair();
+ Result = FD;
+ };
+
// We have more than one result - see if it is more constrained than the
// previous one.
if (Result) {
- Optional<bool> MoreConstrainedThanPrevious = CheckMoreConstrained(FD,
- Result);
+ // Check CUDA preference first. If the candidates have differennt CUDA
+ // preference, choose the one with higher CUDA preference. Otherwise,
+ // choose the one with more constraints.
+ if (getLangOpts().CUDA) {
+ int PreferenceByCUDA = CheckCUDAPreference(FD, Result);
+ // FD has different preference than Result.
+ if (PreferenceByCUDA != 0) {
+ // FD is more preferable than Result.
+ if (PreferenceByCUDA > 0)
+ FoundBetter();
+ continue;
+ }
+ }
+ // FD has the same CUDA prefernece than Result. Continue check
+ // constraints.
+ std::optional<bool> MoreConstrainedThanPrevious =
+ CheckMoreConstrained(FD, Result);
if (!MoreConstrainedThanPrevious) {
IsResultAmbiguous = true;
AmbiguousDecls.push_back(FD);
@@ -12369,9 +13194,7 @@ Sema::resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &Pair) {
continue;
// FD is more constrained - replace Result with it.
}
- IsResultAmbiguous = false;
- DAP = I.getPair();
- Result = FD;
+ FoundBetter();
}
if (IsResultAmbiguous)
@@ -12381,9 +13204,15 @@ Sema::resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &Pair) {
SmallVector<const Expr *, 1> ResultAC;
// We skipped over some ambiguous declarations which might be ambiguous with
// the selected result.
- for (FunctionDecl *Skipped : AmbiguousDecls)
- if (!CheckMoreConstrained(Skipped, Result).hasValue())
+ for (FunctionDecl *Skipped : AmbiguousDecls) {
+ // If skipped candidate has different CUDA preference than the result,
+ // there is no ambiguity. Otherwise check whether they have different
+ // constraints.
+ if (getLangOpts().CUDA && CheckCUDAPreference(Skipped, Result) != 0)
+ continue;
+ if (!CheckMoreConstrained(Skipped, Result))
return nullptr;
+ }
Pair = DAP;
}
return Result;
@@ -12397,7 +13226,7 @@ Sema::resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &Pair) {
/// Returns false if resolveAddressOfSingleOverloadCandidate fails.
/// Otherwise, returns true. This may emit diagnostics and return true.
bool Sema::resolveAndFixAddressOfSingleOverloadCandidate(
- ExprResult &SrcExpr, bool DoFunctionPointerConverion) {
+ ExprResult &SrcExpr, bool DoFunctionPointerConversion) {
Expr *E = SrcExpr.get();
assert(E->getType() == Context.OverloadTy && "SrcExpr must be an overload");
@@ -12412,8 +13241,11 @@ bool Sema::resolveAndFixAddressOfSingleOverloadCandidate(
// for both.
DiagnoseUseOfDecl(Found, E->getExprLoc());
CheckAddressOfMemberAccess(E, DAP);
- Expr *Fixed = FixOverloadedFunctionReference(E, DAP, Found);
- if (DoFunctionPointerConverion && Fixed->getType()->isFunctionType())
+ ExprResult Res = FixOverloadedFunctionReference(E, DAP, Found);
+ if (Res.isInvalid())
+ return false;
+ Expr *Fixed = Res.get();
+ if (DoFunctionPointerConversion && Fixed->getType()->isFunctionType())
SrcExpr = DefaultFunctionArrayConversion(Fixed, /*Diagnose=*/false);
else
SrcExpr = Fixed;
@@ -12430,10 +13262,9 @@ bool Sema::resolveAndFixAddressOfSingleOverloadCandidate(
///
/// If no template-ids are found, no diagnostics are emitted and NULL is
/// returned.
-FunctionDecl *
-Sema::ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
- bool Complain,
- DeclAccessPair *FoundResult) {
+FunctionDecl *Sema::ResolveSingleFunctionTemplateSpecialization(
+ OverloadExpr *ovl, bool Complain, DeclAccessPair *FoundResult,
+ TemplateSpecCandidateSet *FailedTSC) {
// C++ [over.over]p1:
// [...] [Note: any redundant set of parentheses surrounding the
// overloaded function name is ignored (5.1). ]
@@ -12447,7 +13278,6 @@ Sema::ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
TemplateArgumentListInfo ExplicitTemplateArgs;
ovl->copyTemplateArgumentsInto(ExplicitTemplateArgs);
- TemplateSpecCandidateSet FailedCandidates(ovl->getNameLoc());
// Look through all of the overloaded functions, searching for one
// whose type matches exactly.
@@ -12470,16 +13300,16 @@ Sema::ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
// function template specialization, which is added to the set of
// overloaded functions considered.
FunctionDecl *Specialization = nullptr;
- TemplateDeductionInfo Info(FailedCandidates.getLocation());
+ TemplateDeductionInfo Info(ovl->getNameLoc());
if (TemplateDeductionResult Result
= DeduceTemplateArguments(FunctionTemplate, &ExplicitTemplateArgs,
Specialization, Info,
/*IsAddressOfFunction*/true)) {
// Make a note of the failed deduction for diagnostics.
- // TODO: Actually use the failed-deduction info?
- FailedCandidates.addCandidate()
- .set(I.getPair(), FunctionTemplate->getTemplatedDecl(),
- MakeDeductionFailureInfo(Context, Result, Info));
+ if (FailedTSC)
+ FailedTSC->addCandidate().set(
+ I.getPair(), FunctionTemplate->getTemplatedDecl(),
+ MakeDeductionFailureInfo(Context, Result, Info));
continue;
}
@@ -12515,10 +13345,9 @@ Sema::ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
// expression, regardless of whether or not it succeeded. Always
// returns true if 'complain' is set.
bool Sema::ResolveAndFixSingleFunctionTemplateSpecialization(
- ExprResult &SrcExpr, bool doFunctionPointerConverion,
- bool complain, SourceRange OpRangeForComplaining,
- QualType DestTypeForComplaining,
- unsigned DiagIDForComplaining) {
+ ExprResult &SrcExpr, bool doFunctionPointerConversion, bool complain,
+ SourceRange OpRangeForComplaining, QualType DestTypeForComplaining,
+ unsigned DiagIDForComplaining) {
assert(SrcExpr.get()->getType() == Context.OverloadTy);
OverloadExpr::FindResult ovl = OverloadExpr::find(SrcExpr.get());
@@ -12559,7 +13388,7 @@ bool Sema::ResolveAndFixSingleFunctionTemplateSpecialization(
FixOverloadedFunctionReference(SrcExpr.get(), found, fn);
// If desired, do function-to-pointer decay.
- if (doFunctionPointerConverion) {
+ if (doFunctionPointerConversion) {
SingleFunctionExpression =
DefaultFunctionArrayLvalueConversion(SingleFunctionExpression.get());
if (SingleFunctionExpression.isInvalid()) {
@@ -12607,7 +13436,7 @@ static void AddOverloadedCallCandidate(Sema &S,
return;
}
// Prevent ill-formed function decls to be added as overload candidates.
- if (!dyn_cast<FunctionProtoType>(Func->getType()->getAs<FunctionType>()))
+ if (!isa<FunctionProtoType>(Func->getType()->getAs<FunctionType>()))
return;
S.AddOverloadCandidate(Func, FoundDecl, Args, CandidateSet,
@@ -12836,17 +13665,16 @@ DiagnoseTwoPhaseOperatorLookup(Sema &SemaRef, OverloadedOperatorKind Op,
namespace {
class BuildRecoveryCallExprRAII {
Sema &SemaRef;
+ Sema::SatisfactionStackResetRAII SatStack;
+
public:
- BuildRecoveryCallExprRAII(Sema &S) : SemaRef(S) {
+ BuildRecoveryCallExprRAII(Sema &S) : SemaRef(S), SatStack(S) {
assert(SemaRef.IsBuildingRecoveryCallExpr == false);
SemaRef.IsBuildingRecoveryCallExpr = true;
}
- ~BuildRecoveryCallExprRAII() {
- SemaRef.IsBuildingRecoveryCallExpr = false;
- }
+ ~BuildRecoveryCallExprRAII() { SemaRef.IsBuildingRecoveryCallExpr = false; }
};
-
}
/// Attempts to recover from a call where no functions were found.
@@ -13019,7 +13847,7 @@ bool Sema::buildOverloadedCallSet(Scope *S, Expr *Fn,
// Guess at what the return type for an unresolvable overload should be.
static QualType chooseRecoveryType(OverloadCandidateSet &CS,
OverloadCandidateSet::iterator *Best) {
- llvm::Optional<QualType> Result;
+ std::optional<QualType> Result;
// Adjust Type after seeing a candidate.
auto ConsiderCandidate = [&](const OverloadCandidate &Candidate) {
if (!Candidate.Function)
@@ -13053,7 +13881,7 @@ static QualType chooseRecoveryType(OverloadCandidateSet &CS,
if (!Result)
return QualType();
- auto Value = Result.getValue();
+ auto Value = *Result;
if (Value.isNull() || Value->isUndeducedType())
return QualType();
return Value;
@@ -13078,10 +13906,13 @@ static ExprResult FinishOverloadedCallExpr(Sema &SemaRef, Scope *S, Expr *Fn,
SemaRef.CheckUnresolvedLookupAccess(ULE, (*Best)->FoundDecl);
if (SemaRef.DiagnoseUseOfDecl(FDecl, ULE->getNameLoc()))
return ExprError();
- Fn = SemaRef.FixOverloadedFunctionReference(Fn, (*Best)->FoundDecl, FDecl);
- return SemaRef.BuildResolvedCallExpr(Fn, FDecl, LParenLoc, Args, RParenLoc,
- ExecConfig, /*IsExecConfig=*/false,
- (*Best)->IsADLCandidate);
+ ExprResult Res =
+ SemaRef.FixOverloadedFunctionReference(Fn, (*Best)->FoundDecl, FDecl);
+ if (Res.isInvalid())
+ return ExprError();
+ return SemaRef.BuildResolvedCallExpr(
+ Res.get(), FDecl, LParenLoc, Args, RParenLoc, ExecConfig,
+ /*IsExecConfig=*/false, (*Best)->IsADLCandidate);
}
case OR_No_Viable_Function: {
@@ -13136,10 +13967,13 @@ static ExprResult FinishOverloadedCallExpr(Sema &SemaRef, Scope *S, Expr *Fn,
// We emitted an error for the unavailable/deleted function call but keep
// the call in the AST.
FunctionDecl *FDecl = (*Best)->Function;
- Fn = SemaRef.FixOverloadedFunctionReference(Fn, (*Best)->FoundDecl, FDecl);
- return SemaRef.BuildResolvedCallExpr(Fn, FDecl, LParenLoc, Args, RParenLoc,
- ExecConfig, /*IsExecConfig=*/false,
- (*Best)->IsADLCandidate);
+ ExprResult Res =
+ SemaRef.FixOverloadedFunctionReference(Fn, (*Best)->FoundDecl, FDecl);
+ if (Res.isInvalid())
+ return ExprError();
+ return SemaRef.BuildResolvedCallExpr(
+ Res.get(), FDecl, LParenLoc, Args, RParenLoc, ExecConfig,
+ /*IsExecConfig=*/false, (*Best)->IsADLCandidate);
}
}
@@ -13192,6 +14026,22 @@ ExprResult Sema::BuildOverloadedCallExpr(Scope *S, Expr *Fn,
OverloadingResult OverloadResult =
CandidateSet.BestViableFunction(*this, Fn->getBeginLoc(), Best);
+ // Model the case with a call to a templated function whose definition
+ // encloses the call and whose return type contains a placeholder type as if
+ // the UnresolvedLookupExpr was type-dependent.
+ if (OverloadResult == OR_Success) {
+ const FunctionDecl *FDecl = Best->Function;
+ if (FDecl && FDecl->isTemplateInstantiation() &&
+ FDecl->getReturnType()->isUndeducedType()) {
+ if (const auto *TP =
+ FDecl->getTemplateInstantiationPattern(/*ForDefinition=*/false);
+ TP && TP->willHaveBody()) {
+ return CallExpr::Create(Context, Fn, Args, Context.DependentTy,
+ VK_PRValue, RParenLoc, CurFPFeatureOverrides());
+ }
+ }
+ }
+
return FinishOverloadedCallExpr(*this, S, Fn, ULE, LParenLoc, Args, RParenLoc,
ExecConfig, &CandidateSet, &Best,
OverloadResult, AllowTypoCorrection);
@@ -13213,6 +14063,83 @@ ExprResult Sema::CreateUnresolvedLookupExpr(CXXRecordDecl *NamingClass,
Fns.begin(), Fns.end());
}
+ExprResult Sema::BuildCXXMemberCallExpr(Expr *E, NamedDecl *FoundDecl,
+ CXXConversionDecl *Method,
+ bool HadMultipleCandidates) {
+ // Convert the expression to match the conversion function's implicit object
+ // parameter.
+ ExprResult Exp;
+ if (Method->isExplicitObjectMemberFunction())
+ Exp = InitializeExplicitObjectArgument(*this, E, Method);
+ else
+ Exp = PerformImplicitObjectArgumentInitialization(E, /*Qualifier=*/nullptr,
+ FoundDecl, Method);
+ if (Exp.isInvalid())
+ return true;
+
+ if (Method->getParent()->isLambda() &&
+ Method->getConversionType()->isBlockPointerType()) {
+ // This is a lambda conversion to block pointer; check if the argument
+ // was a LambdaExpr.
+ Expr *SubE = E;
+ auto *CE = dyn_cast<CastExpr>(SubE);
+ if (CE && CE->getCastKind() == CK_NoOp)
+ SubE = CE->getSubExpr();
+ SubE = SubE->IgnoreParens();
+ if (auto *BE = dyn_cast<CXXBindTemporaryExpr>(SubE))
+ SubE = BE->getSubExpr();
+ if (isa<LambdaExpr>(SubE)) {
+ // For the conversion to block pointer on a lambda expression, we
+ // construct a special BlockLiteral instead; this doesn't really make
+ // a difference in ARC, but outside of ARC the resulting block literal
+ // follows the normal lifetime rules for block literals instead of being
+ // autoreleased.
+ PushExpressionEvaluationContext(
+ ExpressionEvaluationContext::PotentiallyEvaluated);
+ ExprResult BlockExp = BuildBlockForLambdaConversion(
+ Exp.get()->getExprLoc(), Exp.get()->getExprLoc(), Method, Exp.get());
+ PopExpressionEvaluationContext();
+
+ // FIXME: This note should be produced by a CodeSynthesisContext.
+ if (BlockExp.isInvalid())
+ Diag(Exp.get()->getExprLoc(), diag::note_lambda_to_block_conv);
+ return BlockExp;
+ }
+ }
+ CallExpr *CE;
+ QualType ResultType = Method->getReturnType();
+ ExprValueKind VK = Expr::getValueKindForType(ResultType);
+ ResultType = ResultType.getNonLValueExprType(Context);
+ if (Method->isExplicitObjectMemberFunction()) {
+ ExprResult FnExpr =
+ CreateFunctionRefExpr(*this, Method, FoundDecl, Exp.get(),
+ HadMultipleCandidates, E->getBeginLoc());
+ if (FnExpr.isInvalid())
+ return ExprError();
+ Expr *ObjectParam = Exp.get();
+ CE = CallExpr::Create(Context, FnExpr.get(), MultiExprArg(&ObjectParam, 1),
+ ResultType, VK, Exp.get()->getEndLoc(),
+ CurFPFeatureOverrides());
+ } else {
+ MemberExpr *ME =
+ BuildMemberExpr(Exp.get(), /*IsArrow=*/false, SourceLocation(),
+ NestedNameSpecifierLoc(), SourceLocation(), Method,
+ DeclAccessPair::make(FoundDecl, FoundDecl->getAccess()),
+ HadMultipleCandidates, DeclarationNameInfo(),
+ Context.BoundMemberTy, VK_PRValue, OK_Ordinary);
+
+ CE = CXXMemberCallExpr::Create(Context, ME, /*Args=*/{}, ResultType, VK,
+ Exp.get()->getEndLoc(),
+ CurFPFeatureOverrides());
+ }
+
+ if (CheckFunctionCall(Method, CE,
+ Method->getType()->castAs<FunctionProtoType>()))
+ return ExprError();
+
+ return CheckForImmediateInvocation(CE, CE->getDirectCallee());
+}
+
/// Create a unary operation that may resolve to an overloaded
/// operator.
///
@@ -13307,14 +14234,17 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
// Convert the arguments.
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(FnDecl)) {
- CheckMemberOperatorAccess(OpLoc, Args[0], nullptr, Best->FoundDecl);
+ CheckMemberOperatorAccess(OpLoc, Input, nullptr, Best->FoundDecl);
- ExprResult InputRes =
- PerformObjectArgumentInitialization(Input, /*Qualifier=*/nullptr,
- Best->FoundDecl, Method);
- if (InputRes.isInvalid())
+ ExprResult InputInit;
+ if (Method->isExplicitObjectMemberFunction())
+ InputInit = InitializeExplicitObjectArgument(*this, Input, Method);
+ else
+ InputInit = PerformImplicitObjectArgumentInitialization(
+ Input, /*Qualifier=*/nullptr, Best->FoundDecl, Method);
+ if (InputInit.isInvalid())
return ExprError();
- Base = Input = InputRes.get();
+ Base = Input = InputInit.get();
} else {
// Convert the arguments.
ExprResult InputInit
@@ -13389,12 +14319,17 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
return ExprError();
case OR_Deleted:
+ // CreateOverloadedUnaryOp fills the first element of ArgsArray with the
+ // object whose method was called. Later in NoteCandidates size of ArgsArray
+ // is passed further and it eventually ends up compared to number of
+ // function candidate parameters which never includes the object parameter,
+ // so slice ArgsArray to make sure apples are compared to apples.
CandidateSet.NoteCandidates(
PartialDiagnosticAt(OpLoc, PDiag(diag::err_ovl_deleted_oper)
<< UnaryOperator::getOpcodeStr(Opc)
<< Input->getSourceRange()),
- *this, OCD_AllCandidates, ArgsArray, UnaryOperator::getOpcodeStr(Opc),
- OpLoc);
+ *this, OCD_AllCandidates, ArgsArray.drop_front(),
+ UnaryOperator::getOpcodeStr(Opc), OpLoc);
return ExprError();
}
@@ -13422,14 +14357,14 @@ void Sema::LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet,
// Add operator candidates that are member functions.
AddMemberOperatorCandidates(Op, OpLoc, Args, CandidateSet);
- if (CandidateSet.getRewriteInfo().shouldAddReversed(Op))
+ if (CandidateSet.getRewriteInfo().allowsReversed(Op))
AddMemberOperatorCandidates(Op, OpLoc, {Args[1], Args[0]}, CandidateSet,
OverloadCandidateParamOrder::Reversed);
// In C++20, also add any rewritten member candidates.
if (ExtraOp) {
AddMemberOperatorCandidates(ExtraOp, OpLoc, Args, CandidateSet);
- if (CandidateSet.getRewriteInfo().shouldAddReversed(ExtraOp))
+ if (CandidateSet.getRewriteInfo().allowsReversed(ExtraOp))
AddMemberOperatorCandidates(ExtraOp, OpLoc, {Args[1], Args[0]},
CandidateSet,
OverloadCandidateParamOrder::Reversed);
@@ -13535,6 +14470,23 @@ ExprResult Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
CurFPFeatureOverrides());
}
+ // If this is the .* operator, which is not overloadable, just
+ // create a built-in binary operator.
+ if (Opc == BO_PtrMemD) {
+ auto CheckPlaceholder = [&](Expr *&Arg) {
+ ExprResult Res = CheckPlaceholderExpr(Arg);
+ if (Res.isUsable())
+ Arg = Res.get();
+ return !Res.isUsable();
+ };
+
+ // CreateBuiltinBinOp() doesn't like it if we tell it to create a '.*'
+ // expression that contains placeholders (in either the LHS or RHS).
+ if (CheckPlaceholder(Args[0]) || CheckPlaceholder(Args[1]))
+ return ExprError();
+ return CreateBuiltinBinOp(OpLoc, Opc, Args[0], Args[1]);
+ }
+
// Always do placeholder-like conversions on the RHS.
if (checkPlaceholderForOverload(*this, Args[1]))
return ExprError();
@@ -13554,15 +14506,10 @@ ExprResult Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
if (Opc == BO_Assign && !Args[0]->getType()->isOverloadableType())
return CreateBuiltinBinOp(OpLoc, Opc, Args[0], Args[1]);
- // If this is the .* operator, which is not overloadable, just
- // create a built-in binary operator.
- if (Opc == BO_PtrMemD)
- return CreateBuiltinBinOp(OpLoc, Opc, Args[0], Args[1]);
-
// Build the overload set.
- OverloadCandidateSet CandidateSet(
- OpLoc, OverloadCandidateSet::CSK_Operator,
- OverloadCandidateSet::OperatorRewriteInfo(Op, AllowRewrittenCandidates));
+ OverloadCandidateSet CandidateSet(OpLoc, OverloadCandidateSet::CSK_Operator,
+ OverloadCandidateSet::OperatorRewriteInfo(
+ Op, OpLoc, AllowRewrittenCandidates));
if (DefaultedFn)
CandidateSet.exclude(DefaultedFn);
LookupOverloadedBinOp(CandidateSet, Op, Fns, Args, PerformADL);
@@ -13581,6 +14528,10 @@ ExprResult Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
std::swap(Args[0], Args[1]);
if (FnDecl) {
+
+ if (FnDecl->isInvalidDecl())
+ return ExprError();
+
Expr *Base = nullptr;
// We matched an overloaded operator. Build a call to that
// operator.
@@ -13613,7 +14564,7 @@ ExprResult Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
llvm::SmallVector<FunctionDecl*, 4> AmbiguousWith;
for (OverloadCandidate &Cand : CandidateSet) {
if (Cand.Viable && Cand.Function && Cand.isReversed() &&
- haveSameParameterTypes(Context, Cand.Function, FnDecl, 2)) {
+ allowAmbiguity(Context, Cand.Function, FnDecl)) {
for (unsigned ArgIdx = 0; ArgIdx < 2; ++ArgIdx) {
if (CompareImplicitConversionSequences(
*this, OpLoc, Cand.Conversions[ArgIdx],
@@ -13637,6 +14588,25 @@ ExprResult Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
if (AmbiguousWithSelf) {
Diag(FnDecl->getLocation(),
diag::note_ovl_ambiguous_oper_binary_reversed_self);
+ // Mark member== const or provide matching != to disallow reversed
+ // args. Eg.
+ // struct S { bool operator==(const S&); };
+ // S()==S();
+ if (auto *MD = dyn_cast<CXXMethodDecl>(FnDecl))
+ if (Op == OverloadedOperatorKind::OO_EqualEqual &&
+ !MD->isConst() &&
+ !MD->hasCXXExplicitFunctionObjectParameter() &&
+ Context.hasSameUnqualifiedType(
+ MD->getFunctionObjectParameterType(),
+ MD->getParamDecl(0)->getType().getNonReferenceType()) &&
+ Context.hasSameUnqualifiedType(
+ MD->getFunctionObjectParameterType(),
+ Args[0]->getType()) &&
+ Context.hasSameUnqualifiedType(
+ MD->getFunctionObjectParameterType(),
+ Args[1]->getType()))
+ Diag(FnDecl->getLocation(),
+ diag::note_ovl_ambiguous_eqeq_reversed_self_non_const);
} else {
Diag(FnDecl->getLocation(),
diag::note_ovl_ambiguous_oper_binary_selected_candidate);
@@ -13652,19 +14622,22 @@ ExprResult Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
// Best->Access is only meaningful for class members.
CheckMemberOperatorAccess(OpLoc, Args[0], Args[1], Best->FoundDecl);
- ExprResult Arg1 =
- PerformCopyInitialization(
- InitializedEntity::InitializeParameter(Context,
- FnDecl->getParamDecl(0)),
+ ExprResult Arg0, Arg1;
+ unsigned ParamIdx = 0;
+ if (Method->isExplicitObjectMemberFunction()) {
+ Arg0 = InitializeExplicitObjectArgument(*this, Args[0], FnDecl);
+ ParamIdx = 1;
+ } else {
+ Arg0 = PerformImplicitObjectArgumentInitialization(
+ Args[0], /*Qualifier=*/nullptr, Best->FoundDecl, Method);
+ }
+ Arg1 = PerformCopyInitialization(
+ InitializedEntity::InitializeParameter(
+ Context, FnDecl->getParamDecl(ParamIdx)),
SourceLocation(), Args[1]);
- if (Arg1.isInvalid())
+ if (Arg0.isInvalid() || Arg1.isInvalid())
return ExprError();
- ExprResult Arg0 =
- PerformObjectArgumentInitialization(Args[0], /*Qualifier=*/nullptr,
- Best->FoundDecl, Method);
- if (Arg0.isInvalid())
- return ExprError();
Base = Args[0] = Arg0.getAs<Expr>();
Args[1] = RHS = Arg1.getAs<Expr>();
} else {
@@ -13699,22 +14672,27 @@ ExprResult Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
ExprValueKind VK = Expr::getValueKindForType(ResultTy);
ResultTy = ResultTy.getNonLValueExprType(Context);
- CXXOperatorCallExpr *TheCall = CXXOperatorCallExpr::Create(
+ CallExpr *TheCall;
+ ArrayRef<const Expr *> ArgsArray(Args, 2);
+ const Expr *ImplicitThis = nullptr;
+
+ // We always create a CXXOperatorCallExpr, even for explicit object
+ // members; CodeGen should take care not to emit the this pointer.
+ TheCall = CXXOperatorCallExpr::Create(
Context, ChosenOp, FnExpr.get(), Args, ResultTy, VK, OpLoc,
CurFPFeatureOverrides(), Best->IsADLCandidate);
- if (CheckCallReturnType(FnDecl->getReturnType(), OpLoc, TheCall,
- FnDecl))
- return ExprError();
-
- ArrayRef<const Expr *> ArgsArray(Args, 2);
- const Expr *ImplicitThis = nullptr;
- // Cut off the implicit 'this'.
- if (isa<CXXMethodDecl>(FnDecl)) {
+ if (const auto *Method = dyn_cast<CXXMethodDecl>(FnDecl);
+ Method && Method->isImplicitObjectMemberFunction()) {
+ // Cut off the implicit 'this'.
ImplicitThis = ArgsArray[0];
ArgsArray = ArgsArray.slice(1);
}
+ if (CheckCallReturnType(FnDecl->getReturnType(), OpLoc, TheCall,
+ FnDecl))
+ return ExprError();
+
// Check for a self move.
if (Op == OO_Equal)
DiagnoseSelfMove(Args[0], Args[1], OpLoc);
@@ -13722,7 +14700,7 @@ ExprResult Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
if (ImplicitThis) {
QualType ThisType = Context.getPointerType(ImplicitThis->getType());
QualType ThisTypeFromDecl = Context.getPointerType(
- cast<CXXMethodDecl>(FnDecl)->getThisObjectType());
+ cast<CXXMethodDecl>(FnDecl)->getFunctionObjectParameterType());
CheckArgAlignment(OpLoc, FnDecl, "'this'", ThisType,
ThisTypeFromDecl);
@@ -13760,7 +14738,7 @@ ExprResult Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
R = CreateOverloadedBinOp(
OpLoc, Opc, Fns, IsReversed ? ZeroLiteral : R.get(),
- IsReversed ? R.get() : ZeroLiteral, PerformADL,
+ IsReversed ? R.get() : ZeroLiteral, /*PerformADL=*/true,
/*AllowRewrittenCandidates=*/false);
popCodeSynthesisContext();
@@ -13989,17 +14967,65 @@ ExprResult Sema::BuildSynthesizedThreeWayComparison(
return PseudoObjectExpr::Create(Context, SyntacticForm, SemanticForm, 2);
}
-ExprResult
-Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
- SourceLocation RLoc,
- Expr *Base, Expr *Idx) {
- Expr *Args[2] = { Base, Idx };
+static bool PrepareArgumentsForCallToObjectOfClassType(
+ Sema &S, SmallVectorImpl<Expr *> &MethodArgs, CXXMethodDecl *Method,
+ MultiExprArg Args, SourceLocation LParenLoc) {
+
+ const auto *Proto = Method->getType()->castAs<FunctionProtoType>();
+ unsigned NumParams = Proto->getNumParams();
+ unsigned NumArgsSlots =
+ MethodArgs.size() + std::max<unsigned>(Args.size(), NumParams);
+ // Build the full argument list for the method call (the implicit object
+ // parameter is placed at the beginning of the list).
+ MethodArgs.reserve(MethodArgs.size() + NumArgsSlots);
+ bool IsError = false;
+ // Initialize the implicit object parameter.
+ // Check the argument types.
+ for (unsigned i = 0; i != NumParams; i++) {
+ Expr *Arg;
+ if (i < Args.size()) {
+ Arg = Args[i];
+ ExprResult InputInit =
+ S.PerformCopyInitialization(InitializedEntity::InitializeParameter(
+ S.Context, Method->getParamDecl(i)),
+ SourceLocation(), Arg);
+ IsError |= InputInit.isInvalid();
+ Arg = InputInit.getAs<Expr>();
+ } else {
+ ExprResult DefArg =
+ S.BuildCXXDefaultArgExpr(LParenLoc, Method, Method->getParamDecl(i));
+ if (DefArg.isInvalid()) {
+ IsError = true;
+ break;
+ }
+ Arg = DefArg.getAs<Expr>();
+ }
+
+ MethodArgs.push_back(Arg);
+ }
+ return IsError;
+}
+
+ExprResult Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
+ SourceLocation RLoc,
+ Expr *Base,
+ MultiExprArg ArgExpr) {
+ SmallVector<Expr *, 2> Args;
+ Args.push_back(Base);
+ for (auto *e : ArgExpr) {
+ Args.push_back(e);
+ }
DeclarationName OpName =
Context.DeclarationNames.getCXXOperatorName(OO_Subscript);
+ SourceRange Range = ArgExpr.empty()
+ ? SourceRange{}
+ : SourceRange(ArgExpr.front()->getBeginLoc(),
+ ArgExpr.back()->getEndLoc());
+
// If either side is type-dependent, create an appropriate dependent
// expression.
- if (Args[0]->isTypeDependent() || Args[1]->isTypeDependent()) {
+ if (Expr::hasAnyTypeDependentArguments(Args)) {
CXXRecordDecl *NamingClass = nullptr; // lookup ignores member operators
// CHECKME: no 'operator' keyword?
@@ -14016,12 +15042,11 @@ Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
CurFPFeatureOverrides());
}
- // Handle placeholders on both operands.
- if (checkPlaceholderForOverload(*this, Args[0]))
- return ExprError();
- if (checkPlaceholderForOverload(*this, Args[1]))
+ // Handle placeholders
+ UnbridgedCastsSet UnbridgedCasts;
+ if (checkArgPlaceholdersForOverload(*this, Args, UnbridgedCasts)) {
return ExprError();
-
+ }
// Build an empty overload set.
OverloadCandidateSet CandidateSet(LLoc, OverloadCandidateSet::CSK_Operator);
@@ -14031,7 +15056,8 @@ Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
AddMemberOperatorCandidates(OO_Subscript, LLoc, Args, CandidateSet);
// Add builtin operator candidates.
- AddBuiltinOperatorCandidates(OO_Subscript, LLoc, Args, CandidateSet);
+ if (Args.size() == 2)
+ AddBuiltinOperatorCandidates(OO_Subscript, LLoc, Args, CandidateSet);
bool HadMultipleCandidates = (CandidateSet.size() > 1);
@@ -14046,38 +15072,40 @@ Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
// We matched an overloaded operator. Build a call to that
// operator.
- CheckMemberOperatorAccess(LLoc, Args[0], Args[1], Best->FoundDecl);
+ CheckMemberOperatorAccess(LLoc, Args[0], ArgExpr, Best->FoundDecl);
// Convert the arguments.
CXXMethodDecl *Method = cast<CXXMethodDecl>(FnDecl);
- ExprResult Arg0 =
- PerformObjectArgumentInitialization(Args[0], /*Qualifier=*/nullptr,
- Best->FoundDecl, Method);
- if (Arg0.isInvalid())
- return ExprError();
- Args[0] = Arg0.get();
+ SmallVector<Expr *, 2> MethodArgs;
- // Convert the arguments.
- ExprResult InputInit
- = PerformCopyInitialization(InitializedEntity::InitializeParameter(
- Context,
- FnDecl->getParamDecl(0)),
- SourceLocation(),
- Args[1]);
- if (InputInit.isInvalid())
- return ExprError();
+ // Initialize the object parameter.
+ if (Method->isExplicitObjectMemberFunction()) {
+ ExprResult Res =
+ InitializeExplicitObjectArgument(*this, Args[0], Method);
+ if (Res.isInvalid())
+ return ExprError();
+ Args[0] = Res.get();
+ ArgExpr = Args;
+ } else {
+ ExprResult Arg0 = PerformImplicitObjectArgumentInitialization(
+ Args[0], /*Qualifier=*/nullptr, Best->FoundDecl, Method);
+ if (Arg0.isInvalid())
+ return ExprError();
- Args[1] = InputInit.getAs<Expr>();
+ MethodArgs.push_back(Arg0.get());
+ }
+
+ bool IsError = PrepareArgumentsForCallToObjectOfClassType(
+ *this, MethodArgs, Method, ArgExpr, LLoc);
+ if (IsError)
+ return ExprError();
// Build the actual expression node.
DeclarationNameInfo OpLocInfo(OpName, LLoc);
OpLocInfo.setCXXOperatorNameRange(SourceRange(LLoc, RLoc));
- ExprResult FnExpr = CreateFunctionRefExpr(*this, FnDecl,
- Best->FoundDecl,
- Base,
- HadMultipleCandidates,
- OpLocInfo.getLoc(),
- OpLocInfo.getInfo());
+ ExprResult FnExpr = CreateFunctionRefExpr(
+ *this, FnDecl, Best->FoundDecl, Base, HadMultipleCandidates,
+ OpLocInfo.getLoc(), OpLocInfo.getInfo());
if (FnExpr.isInvalid())
return ExprError();
@@ -14086,9 +15114,10 @@ Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
ExprValueKind VK = Expr::getValueKindForType(ResultTy);
ResultTy = ResultTy.getNonLValueExprType(Context);
- CXXOperatorCallExpr *TheCall = CXXOperatorCallExpr::Create(
- Context, OO_Subscript, FnExpr.get(), Args, ResultTy, VK, RLoc,
+ CallExpr *TheCall = CXXOperatorCallExpr::Create(
+ Context, OO_Subscript, FnExpr.get(), MethodArgs, ResultTy, VK, RLoc,
CurFPFeatureOverrides());
+
if (CheckCallReturnType(FnDecl->getReturnType(), LLoc, TheCall, FnDecl))
return ExprError();
@@ -14096,7 +15125,8 @@ Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
Method->getType()->castAs<FunctionProtoType>()))
return ExprError();
- return MaybeBindToTemporary(TheCall);
+ return CheckForImmediateInvocation(MaybeBindToTemporary(TheCall),
+ FnDecl);
} else {
// We matched a built-in operator. Convert the arguments, then
// break out so that we will build the appropriate built-in
@@ -14120,33 +15150,41 @@ Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
}
case OR_No_Viable_Function: {
- PartialDiagnostic PD = CandidateSet.empty()
- ? (PDiag(diag::err_ovl_no_oper)
- << Args[0]->getType() << /*subscript*/ 0
- << Args[0]->getSourceRange() << Args[1]->getSourceRange())
- : (PDiag(diag::err_ovl_no_viable_subscript)
- << Args[0]->getType() << Args[0]->getSourceRange()
- << Args[1]->getSourceRange());
+ PartialDiagnostic PD =
+ CandidateSet.empty()
+ ? (PDiag(diag::err_ovl_no_oper)
+ << Args[0]->getType() << /*subscript*/ 0
+ << Args[0]->getSourceRange() << Range)
+ : (PDiag(diag::err_ovl_no_viable_subscript)
+ << Args[0]->getType() << Args[0]->getSourceRange() << Range);
CandidateSet.NoteCandidates(PartialDiagnosticAt(LLoc, PD), *this,
- OCD_AllCandidates, Args, "[]", LLoc);
+ OCD_AllCandidates, ArgExpr, "[]", LLoc);
return ExprError();
}
case OR_Ambiguous:
- CandidateSet.NoteCandidates(
- PartialDiagnosticAt(LLoc, PDiag(diag::err_ovl_ambiguous_oper_binary)
- << "[]" << Args[0]->getType()
- << Args[1]->getType()
- << Args[0]->getSourceRange()
- << Args[1]->getSourceRange()),
- *this, OCD_AmbiguousCandidates, Args, "[]", LLoc);
+ if (Args.size() == 2) {
+ CandidateSet.NoteCandidates(
+ PartialDiagnosticAt(
+ LLoc, PDiag(diag::err_ovl_ambiguous_oper_binary)
+ << "[]" << Args[0]->getType() << Args[1]->getType()
+ << Args[0]->getSourceRange() << Range),
+ *this, OCD_AmbiguousCandidates, Args, "[]", LLoc);
+ } else {
+ CandidateSet.NoteCandidates(
+ PartialDiagnosticAt(LLoc,
+ PDiag(diag::err_ovl_ambiguous_subscript_call)
+ << Args[0]->getType()
+ << Args[0]->getSourceRange() << Range),
+ *this, OCD_AmbiguousCandidates, Args, "[]", LLoc);
+ }
return ExprError();
case OR_Deleted:
CandidateSet.NoteCandidates(
PartialDiagnosticAt(LLoc, PDiag(diag::err_ovl_deleted_oper)
<< "[]" << Args[0]->getSourceRange()
- << Args[1]->getSourceRange()),
+ << Range),
*this, OCD_AllCandidates, Args, "[]", LLoc);
return ExprError();
}
@@ -14166,6 +15204,7 @@ ExprResult Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
+ Expr *ExecConfig, bool IsExecConfig,
bool AllowRecovery) {
assert(MemExprE->getType() == Context.BoundMemberTy ||
MemExprE->getType() == Context.OverloadTy);
@@ -14230,7 +15269,7 @@ ExprResult Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
if (!AllowRecovery)
return ExprError();
std::vector<Expr *> SubExprs = {MemExprE};
- llvm::for_each(Args, [&SubExprs](Expr *E) { SubExprs.push_back(E); });
+ llvm::append_range(SubExprs, Args);
return CreateRecoveryExpr(MemExprE->getBeginLoc(), RParenLoc, SubExprs,
Type);
};
@@ -14244,6 +15283,7 @@ ExprResult Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
MemberExpr *MemExpr;
CXXMethodDecl *Method = nullptr;
+ bool HadMultipleCandidates = false;
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_public);
NestedNameSpecifier *Qualifier = nullptr;
if (isa<MemberExpr>(NakedMemExpr)) {
@@ -14275,11 +15315,24 @@ ExprResult Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
for (UnresolvedMemberExpr::decls_iterator I = UnresExpr->decls_begin(),
E = UnresExpr->decls_end(); I != E; ++I) {
+ QualType ExplicitObjectType = ObjectType;
+
NamedDecl *Func = *I;
CXXRecordDecl *ActingDC = cast<CXXRecordDecl>(Func->getDeclContext());
if (isa<UsingShadowDecl>(Func))
Func = cast<UsingShadowDecl>(Func)->getTargetDecl();
+ bool HasExplicitParameter = false;
+ if (const auto *M = dyn_cast<FunctionDecl>(Func);
+ M && M->hasCXXExplicitFunctionObjectParameter())
+ HasExplicitParameter = true;
+ else if (const auto *M = dyn_cast<FunctionTemplateDecl>(Func);
+ M &&
+ M->getTemplatedDecl()->hasCXXExplicitFunctionObjectParameter())
+ HasExplicitParameter = true;
+
+ if (HasExplicitParameter)
+ ExplicitObjectType = GetExplicitObjectType(*this, UnresExpr);
// Microsoft supports direct constructor calls.
if (getLangOpts().MicrosoftExt && isa<CXXConstructorDecl>(Func)) {
@@ -14292,17 +15345,20 @@ ExprResult Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
if (TemplateArgs)
continue;
- AddMethodCandidate(Method, I.getPair(), ActingDC, ObjectType,
+ AddMethodCandidate(Method, I.getPair(), ActingDC, ExplicitObjectType,
ObjectClassification, Args, CandidateSet,
/*SuppressUserConversions=*/false);
} else {
- AddMethodTemplateCandidate(
- cast<FunctionTemplateDecl>(Func), I.getPair(), ActingDC,
- TemplateArgs, ObjectType, ObjectClassification, Args, CandidateSet,
- /*SuppressUserConversions=*/false);
+ AddMethodTemplateCandidate(cast<FunctionTemplateDecl>(Func),
+ I.getPair(), ActingDC, TemplateArgs,
+ ExplicitObjectType, ObjectClassification,
+ Args, CandidateSet,
+ /*SuppressUserConversions=*/false);
}
}
+ HadMultipleCandidates = (CandidateSet.size() > 1);
+
DeclarationName DeclName = UnresExpr->getMemberName();
UnbridgedCasts.restore();
@@ -14315,7 +15371,7 @@ ExprResult Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
Method = cast<CXXMethodDecl>(Best->Function);
FoundDecl = Best->FoundDecl;
CheckUnresolvedMemberAccess(UnresExpr, Best->FoundDecl);
- if (DiagnoseUseOfDecl(Best->FoundDecl, UnresExpr->getNameLoc()))
+ if (DiagnoseUseOfOverloadedDecl(Best->FoundDecl, UnresExpr->getNameLoc()))
break;
// If FoundDecl is different from Method (such as if one is a template
// and the other a specialization), make sure DiagnoseUseOfDecl is
@@ -14324,7 +15380,7 @@ ExprResult Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
// DiagnoseUseOfDecl to accept both the FoundDecl and the decl
// being used.
if (Method != FoundDecl.getDecl() &&
- DiagnoseUseOfDecl(Method, UnresExpr->getNameLoc()))
+ DiagnoseUseOfOverloadedDecl(Method, UnresExpr->getNameLoc()))
break;
Succeeded = true;
break;
@@ -14356,13 +15412,17 @@ ExprResult Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
if (!Succeeded)
return BuildRecoveryExpr(chooseRecoveryType(CandidateSet, &Best));
- MemExprE = FixOverloadedFunctionReference(MemExprE, FoundDecl, Method);
+ ExprResult Res =
+ FixOverloadedFunctionReference(MemExprE, FoundDecl, Method);
+ if (Res.isInvalid())
+ return ExprError();
+ MemExprE = Res.get();
- // If overload resolution picked a static member, build a
- // non-member call based on that function.
+ // If overload resolution picked a static member
+ // build a non-member call based on that function.
if (Method->isStatic()) {
- return BuildResolvedCallExpr(MemExprE, Method, LParenLoc, Args,
- RParenLoc);
+ return BuildResolvedCallExpr(MemExprE, Method, LParenLoc, Args, RParenLoc,
+ ExecConfig, IsExecConfig);
}
MemExpr = cast<MemberExpr>(MemExprE->IgnoreParens());
@@ -14374,27 +15434,41 @@ ExprResult Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
assert(Method && "Member call to something that isn't a method?");
const auto *Proto = Method->getType()->castAs<FunctionProtoType>();
- CXXMemberCallExpr *TheCall = CXXMemberCallExpr::Create(
- Context, MemExprE, Args, ResultType, VK, RParenLoc,
- CurFPFeatureOverrides(), Proto->getNumParams());
- // Check for a valid return type.
- if (CheckCallReturnType(Method->getReturnType(), MemExpr->getMemberLoc(),
- TheCall, Method))
- return BuildRecoveryExpr(ResultType);
+ CallExpr *TheCall = nullptr;
+ llvm::SmallVector<Expr *, 8> NewArgs;
+ if (Method->isExplicitObjectMemberFunction()) {
+ PrepareExplicitObjectArgument(*this, Method, MemExpr->getBase(), Args,
+ NewArgs);
+ // Build the actual expression node.
+ ExprResult FnExpr =
+ CreateFunctionRefExpr(*this, Method, FoundDecl, MemExpr,
+ HadMultipleCandidates, MemExpr->getExprLoc());
+ if (FnExpr.isInvalid())
+ return ExprError();
- // Convert the object argument (for a non-static member function call).
- // We only need to do this if there was actually an overload; otherwise
- // it was done at lookup.
- if (!Method->isStatic()) {
- ExprResult ObjectArg =
- PerformObjectArgumentInitialization(MemExpr->getBase(), Qualifier,
- FoundDecl, Method);
+ TheCall =
+ CallExpr::Create(Context, FnExpr.get(), Args, ResultType, VK, RParenLoc,
+ CurFPFeatureOverrides(), Proto->getNumParams());
+ } else {
+ // Convert the object argument (for a non-static member function call).
+ // We only need to do this if there was actually an overload; otherwise
+ // it was done at lookup.
+ ExprResult ObjectArg = PerformImplicitObjectArgumentInitialization(
+ MemExpr->getBase(), Qualifier, FoundDecl, Method);
if (ObjectArg.isInvalid())
return ExprError();
MemExpr->setBase(ObjectArg.get());
+ TheCall = CXXMemberCallExpr::Create(Context, MemExprE, Args, ResultType, VK,
+ RParenLoc, CurFPFeatureOverrides(),
+ Proto->getNumParams());
}
+ // Check for a valid return type.
+ if (CheckCallReturnType(Method->getReturnType(), MemExpr->getMemberLoc(),
+ TheCall, Method))
+ return BuildRecoveryExpr(ResultType);
+
// Convert the rest of the arguments
if (ConvertArgumentsForCall(TheCall, MemExpr, Method, Proto, Args,
RParenLoc))
@@ -14421,10 +15495,9 @@ ExprResult Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
}
}
- if ((isa<CXXConstructorDecl>(CurContext) ||
- isa<CXXDestructorDecl>(CurContext)) &&
- TheCall->getMethodDecl()->isPure()) {
- const CXXMethodDecl *MD = TheCall->getMethodDecl();
+ if (isa<CXXConstructorDecl, CXXDestructorDecl>(CurContext) &&
+ TheCall->getDirectCallee()->isPureVirtual()) {
+ const FunctionDecl *MD = TheCall->getDirectCallee();
if (isa<CXXThisExpr>(MemExpr->getBase()->IgnoreParenCasts()) &&
MemExpr->performsVirtualDispatch(getLangOpts())) {
@@ -14440,8 +15513,7 @@ ExprResult Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
}
}
- if (CXXDestructorDecl *DD =
- dyn_cast<CXXDestructorDecl>(TheCall->getMethodDecl())) {
+ if (auto *DD = dyn_cast<CXXDestructorDecl>(TheCall->getDirectCallee())) {
// a->A::f() doesn't go through the vtable, except in AppleKext mode.
bool CallCanBeVirtual = !MemExpr->hasQualifier() || getLangOpts().AppleKext;
CheckVirtualDtorCall(DD, MemExpr->getBeginLoc(), /*IsDelete=*/false,
@@ -14450,7 +15522,7 @@ ExprResult Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
}
return CheckForImmediateInvocation(MaybeBindToTemporary(TheCall),
- TheCall->getMethodDecl());
+ TheCall->getDirectCallee());
}
/// BuildCallToObjectOfClassType - Build a call to an object of class
@@ -14491,7 +15563,7 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
const auto *Record = Object.get()->getType()->castAs<RecordType>();
LookupResult R(*this, OpName, LParenLoc, LookupOrdinaryName);
LookupQualifiedName(R, Record->getDecl());
- R.suppressDiagnostics();
+ R.suppressAccessDiagnostics();
for (LookupResult::iterator Oper = R.begin(), OperEnd = R.end();
Oper != OperEnd; ++Oper) {
@@ -14500,6 +15572,22 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
/*SuppressUserConversion=*/false);
}
+ // When calling a lambda, both the call operator, and
+ // the conversion operator to function pointer
+ // are considered. But when constraint checking
+ // on the call operator fails, it will also fail on the
+ // conversion operator as the constraints are always the same.
+ // As the user probably does not intend to perform a surrogate call,
+ // we filter them out to produce better error diagnostics, ie to avoid
+ // showing 2 failed overloads instead of one.
+ bool IgnoreSurrogateFunctions = false;
+ if (CandidateSet.size() == 1 && Record->getAsCXXRecordDecl()->isLambda()) {
+ const OverloadCandidate &Candidate = *CandidateSet.begin();
+ if (!Candidate.Viable &&
+ Candidate.FailureKind == ovl_fail_constraints_not_satisfied)
+ IgnoreSurrogateFunctions = true;
+ }
+
// C++ [over.call.object]p2:
// In addition, for each (non-explicit in C++0x) conversion function
// declared in T of the form
@@ -14519,7 +15607,8 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
// within T by another intervening declaration.
const auto &Conversions =
cast<CXXRecordDecl>(Record->getDecl())->getVisibleConversionFunctions();
- for (auto I = Conversions.begin(), E = Conversions.end(); I != E; ++I) {
+ for (auto I = Conversions.begin(), E = Conversions.end();
+ !IgnoreSurrogateFunctions && I != E; ++I) {
NamedDecl *D = *I;
CXXRecordDecl *ActingContext = cast<CXXRecordDecl>(D->getDeclContext());
if (isa<UsingShadowDecl>(D))
@@ -14571,12 +15660,13 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
break;
}
case OR_Ambiguous:
- CandidateSet.NoteCandidates(
- PartialDiagnosticAt(Object.get()->getBeginLoc(),
- PDiag(diag::err_ovl_ambiguous_object_call)
- << Object.get()->getType()
- << Object.get()->getSourceRange()),
- *this, OCD_AmbiguousCandidates, Args);
+ if (!R.isAmbiguous())
+ CandidateSet.NoteCandidates(
+ PartialDiagnosticAt(Object.get()->getBeginLoc(),
+ PDiag(diag::err_ovl_ambiguous_object_call)
+ << Object.get()->getType()
+ << Object.get()->getSourceRange()),
+ *this, OCD_AmbiguousCandidates, Args);
break;
case OR_Deleted:
@@ -14649,57 +15739,30 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
if (NewFn.isInvalid())
return true;
- // The number of argument slots to allocate in the call. If we have default
- // arguments we need to allocate space for them as well. We additionally
- // need one more slot for the object parameter.
- unsigned NumArgsSlots = 1 + std::max<unsigned>(Args.size(), NumParams);
-
- // Build the full argument list for the method call (the implicit object
- // parameter is placed at the beginning of the list).
- SmallVector<Expr *, 8> MethodArgs(NumArgsSlots);
+ SmallVector<Expr *, 8> MethodArgs;
+ MethodArgs.reserve(NumParams + 1);
bool IsError = false;
- // Initialize the implicit object parameter.
- ExprResult ObjRes =
- PerformObjectArgumentInitialization(Object.get(), /*Qualifier=*/nullptr,
- Best->FoundDecl, Method);
- if (ObjRes.isInvalid())
- IsError = true;
- else
- Object = ObjRes;
- MethodArgs[0] = Object.get();
-
- // Check the argument types.
- for (unsigned i = 0; i != NumParams; i++) {
- Expr *Arg;
- if (i < Args.size()) {
- Arg = Args[i];
-
- // Pass the argument.
-
- ExprResult InputInit
- = PerformCopyInitialization(InitializedEntity::InitializeParameter(
- Context,
- Method->getParamDecl(i)),
- SourceLocation(), Arg);
-
- IsError |= InputInit.isInvalid();
- Arg = InputInit.getAs<Expr>();
- } else {
- ExprResult DefArg
- = BuildCXXDefaultArgExpr(LParenLoc, Method, Method->getParamDecl(i));
- if (DefArg.isInvalid()) {
- IsError = true;
- break;
- }
-
- Arg = DefArg.getAs<Expr>();
- }
-
- MethodArgs[i + 1] = Arg;
+ // Initialize the object parameter.
+ llvm::SmallVector<Expr *, 8> NewArgs;
+ if (Method->isExplicitObjectMemberFunction()) {
+ // FIXME: we should do that during the definition of the lambda when we can.
+ DiagnoseInvalidExplicitObjectParameterInLambda(Method);
+ PrepareExplicitObjectArgument(*this, Method, Obj, Args, NewArgs);
+ } else {
+ ExprResult ObjRes = PerformImplicitObjectArgumentInitialization(
+ Object.get(), /*Qualifier=*/nullptr, Best->FoundDecl, Method);
+ if (ObjRes.isInvalid())
+ IsError = true;
+ else
+ Object = ObjRes;
+ MethodArgs.push_back(Object.get());
}
+ IsError |= PrepareArgumentsForCallToObjectOfClassType(
+ *this, MethodArgs, Method, Args, LParenLoc);
+
// If this is a variadic call, handle args passed through "...".
if (Proto->isVariadic()) {
// Promote the arguments (C99 6.5.2.2p7).
@@ -14707,7 +15770,7 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
ExprResult Arg = DefaultVariadicArgumentPromotion(Args[i], VariadicMethod,
nullptr);
IsError |= Arg.isInvalid();
- MethodArgs[i + 1] = Arg.get();
+ MethodArgs.push_back(Arg.get());
}
}
@@ -14721,7 +15784,7 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
ExprValueKind VK = Expr::getValueKindForType(ResultTy);
ResultTy = ResultTy.getNonLValueExprType(Context);
- CXXOperatorCallExpr *TheCall = CXXOperatorCallExpr::Create(
+ CallExpr *TheCall = CXXOperatorCallExpr::Create(
Context, OO_Call, NewFn.get(), MethodArgs, ResultTy, VK, RParenLoc,
CurFPFeatureOverrides());
@@ -14764,12 +15827,13 @@ Sema::BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc,
LookupResult R(*this, OpName, OpLoc, LookupOrdinaryName);
LookupQualifiedName(R, Base->getType()->castAs<RecordType>()->getDecl());
- R.suppressDiagnostics();
+ R.suppressAccessDiagnostics();
for (LookupResult::iterator Oper = R.begin(), OperEnd = R.end();
Oper != OperEnd; ++Oper) {
AddMethodCandidate(Oper.getPair(), Base->getType(), Base->Classify(Context),
- None, CandidateSet, /*SuppressUserConversion=*/false);
+ std::nullopt, CandidateSet,
+ /*SuppressUserConversion=*/false);
}
bool HadMultipleCandidates = (CandidateSet.size() > 1);
@@ -14804,11 +15868,12 @@ Sema::BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc,
return ExprError();
}
case OR_Ambiguous:
- CandidateSet.NoteCandidates(
- PartialDiagnosticAt(OpLoc, PDiag(diag::err_ovl_ambiguous_oper_unary)
- << "->" << Base->getType()
- << Base->getSourceRange()),
- *this, OCD_AmbiguousCandidates, Base);
+ if (!R.isAmbiguous())
+ CandidateSet.NoteCandidates(
+ PartialDiagnosticAt(OpLoc, PDiag(diag::err_ovl_ambiguous_oper_unary)
+ << "->" << Base->getType()
+ << Base->getSourceRange()),
+ *this, OCD_AmbiguousCandidates, Base);
return ExprError();
case OR_Deleted:
@@ -14823,12 +15888,19 @@ Sema::BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc,
// Convert the object parameter.
CXXMethodDecl *Method = cast<CXXMethodDecl>(Best->Function);
- ExprResult BaseResult =
- PerformObjectArgumentInitialization(Base, /*Qualifier=*/nullptr,
- Best->FoundDecl, Method);
- if (BaseResult.isInvalid())
- return ExprError();
- Base = BaseResult.get();
+
+ if (Method->isExplicitObjectMemberFunction()) {
+ ExprResult R = InitializeExplicitObjectArgument(*this, Base, Method);
+ if (R.isInvalid())
+ return ExprError();
+ Base = R.get();
+ } else {
+ ExprResult BaseResult = PerformImplicitObjectArgumentInitialization(
+ Base, /*Qualifier=*/nullptr, Best->FoundDecl, Method);
+ if (BaseResult.isInvalid())
+ return ExprError();
+ Base = BaseResult.get();
+ }
// Build the operator call.
ExprResult FnExpr = CreateFunctionRefExpr(*this, Method, Best->FoundDecl,
@@ -14839,7 +15911,8 @@ Sema::BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc,
QualType ResultTy = Method->getReturnType();
ExprValueKind VK = Expr::getValueKindForType(ResultTy);
ResultTy = ResultTy.getNonLValueExprType(Context);
- CXXOperatorCallExpr *TheCall =
+
+ CallExpr *TheCall =
CXXOperatorCallExpr::Create(Context, OO_Arrow, FnExpr.get(), Base,
ResultTy, VK, OpLoc, CurFPFeatureOverrides());
@@ -14850,7 +15923,7 @@ Sema::BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc,
Method->getType()->castAs<FunctionProtoType>()))
return ExprError();
- return MaybeBindToTemporary(TheCall);
+ return CheckForImmediateInvocation(MaybeBindToTemporary(TheCall), Method);
}
/// BuildLiteralOperatorCall - Build a UserDefinedLiteral by creating a call to
@@ -14918,8 +15991,8 @@ ExprResult Sema::BuildLiteralOperatorCall(LookupResult &R,
ResultTy = ResultTy.getNonLValueExprType(Context);
UserDefinedLiteral *UDL = UserDefinedLiteral::Create(
- Context, Fn.get(), llvm::makeArrayRef(ConvArgs, Args.size()), ResultTy,
- VK, LitEndLoc, UDSuffixLoc, CurFPFeatureOverrides());
+ Context, Fn.get(), llvm::ArrayRef(ConvArgs, Args.size()), ResultTy, VK,
+ LitEndLoc, UDSuffixLoc, CurFPFeatureOverrides());
if (CheckCallReturnType(FD->getReturnType(), UDSuffixLoc, UDL, FD))
return ExprError();
@@ -14959,7 +16032,8 @@ Sema::BuildForRangeBeginEndCall(SourceLocation Loc,
*CallExpr = ExprError();
return FRS_DiagnosticIssued;
}
- *CallExpr = BuildCallExpr(S, MemberRef.get(), Loc, None, Loc, nullptr);
+ *CallExpr =
+ BuildCallExpr(S, MemberRef.get(), Loc, std::nullopt, Loc, nullptr);
if (CallExpr->isInvalid()) {
*CallExpr = ExprError();
return FRS_DiagnosticIssued;
@@ -15004,37 +16078,44 @@ Sema::BuildForRangeBeginEndCall(SourceLocation Loc,
/// perhaps a '&' around it). We have resolved the overloaded function
/// to the function declaration Fn, so patch up the expression E to
/// refer (possibly indirectly) to Fn. Returns the new expr.
-Expr *Sema::FixOverloadedFunctionReference(Expr *E, DeclAccessPair Found,
- FunctionDecl *Fn) {
+ExprResult Sema::FixOverloadedFunctionReference(Expr *E, DeclAccessPair Found,
+ FunctionDecl *Fn) {
if (ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
- Expr *SubExpr = FixOverloadedFunctionReference(PE->getSubExpr(),
- Found, Fn);
- if (SubExpr == PE->getSubExpr())
+ ExprResult SubExpr =
+ FixOverloadedFunctionReference(PE->getSubExpr(), Found, Fn);
+ if (SubExpr.isInvalid())
+ return ExprError();
+ if (SubExpr.get() == PE->getSubExpr())
return PE;
- return new (Context) ParenExpr(PE->getLParen(), PE->getRParen(), SubExpr);
+ return new (Context)
+ ParenExpr(PE->getLParen(), PE->getRParen(), SubExpr.get());
}
if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) {
- Expr *SubExpr = FixOverloadedFunctionReference(ICE->getSubExpr(),
- Found, Fn);
+ ExprResult SubExpr =
+ FixOverloadedFunctionReference(ICE->getSubExpr(), Found, Fn);
+ if (SubExpr.isInvalid())
+ return ExprError();
assert(Context.hasSameType(ICE->getSubExpr()->getType(),
- SubExpr->getType()) &&
+ SubExpr.get()->getType()) &&
"Implicit cast type cannot be determined from overload");
assert(ICE->path_empty() && "fixing up hierarchy conversion?");
- if (SubExpr == ICE->getSubExpr())
+ if (SubExpr.get() == ICE->getSubExpr())
return ICE;
return ImplicitCastExpr::Create(Context, ICE->getType(), ICE->getCastKind(),
- SubExpr, nullptr, ICE->getValueKind(),
+ SubExpr.get(), nullptr, ICE->getValueKind(),
CurFPFeatureOverrides());
}
if (auto *GSE = dyn_cast<GenericSelectionExpr>(E)) {
if (!GSE->isResultDependent()) {
- Expr *SubExpr =
+ ExprResult SubExpr =
FixOverloadedFunctionReference(GSE->getResultExpr(), Found, Fn);
- if (SubExpr == GSE->getResultExpr())
+ if (SubExpr.isInvalid())
+ return ExprError();
+ if (SubExpr.get() == GSE->getResultExpr())
return GSE;
// Replace the resulting type information before rebuilding the generic
@@ -15042,10 +16123,16 @@ Expr *Sema::FixOverloadedFunctionReference(Expr *E, DeclAccessPair Found,
ArrayRef<Expr *> A = GSE->getAssocExprs();
SmallVector<Expr *, 4> AssocExprs(A.begin(), A.end());
unsigned ResultIdx = GSE->getResultIndex();
- AssocExprs[ResultIdx] = SubExpr;
-
+ AssocExprs[ResultIdx] = SubExpr.get();
+
+ if (GSE->isExprPredicate())
+ return GenericSelectionExpr::Create(
+ Context, GSE->getGenericLoc(), GSE->getControllingExpr(),
+ GSE->getAssocTypeSourceInfos(), AssocExprs, GSE->getDefaultLoc(),
+ GSE->getRParenLoc(), GSE->containsUnexpandedParameterPack(),
+ ResultIdx);
return GenericSelectionExpr::Create(
- Context, GSE->getGenericLoc(), GSE->getControllingExpr(),
+ Context, GSE->getGenericLoc(), GSE->getControllingType(),
GSE->getAssocTypeSourceInfos(), AssocExprs, GSE->getDefaultLoc(),
GSE->getRParenLoc(), GSE->containsUnexpandedParameterPack(),
ResultIdx);
@@ -15066,15 +16153,21 @@ Expr *Sema::FixOverloadedFunctionReference(Expr *E, DeclAccessPair Found,
// Fix the subexpression, which really has to be an
// UnresolvedLookupExpr holding an overloaded member function
// or template.
- Expr *SubExpr = FixOverloadedFunctionReference(UnOp->getSubExpr(),
- Found, Fn);
- if (SubExpr == UnOp->getSubExpr())
+ ExprResult SubExpr =
+ FixOverloadedFunctionReference(UnOp->getSubExpr(), Found, Fn);
+ if (SubExpr.isInvalid())
+ return ExprError();
+ if (SubExpr.get() == UnOp->getSubExpr())
return UnOp;
- assert(isa<DeclRefExpr>(SubExpr)
- && "fixed to something other than a decl ref");
- assert(cast<DeclRefExpr>(SubExpr)->getQualifier()
- && "fixed to a member ref with no nested name qualifier");
+ if (CheckUseOfCXXMethodAsAddressOfOperand(UnOp->getBeginLoc(),
+ SubExpr.get(), Method))
+ return ExprError();
+
+ assert(isa<DeclRefExpr>(SubExpr.get()) &&
+ "fixed to something other than a decl ref");
+ assert(cast<DeclRefExpr>(SubExpr.get())->getQualifier() &&
+ "fixed to a member ref with no nested name qualifier");
// We have taken the address of a pointer to member
// function. Perform the computation here so that we get the
@@ -15087,20 +16180,21 @@ Expr *Sema::FixOverloadedFunctionReference(Expr *E, DeclAccessPair Found,
if (Context.getTargetInfo().getCXXABI().isMicrosoft())
(void)isCompleteType(UnOp->getOperatorLoc(), MemPtrType);
- return UnaryOperator::Create(
- Context, SubExpr, UO_AddrOf, MemPtrType, VK_PRValue, OK_Ordinary,
- UnOp->getOperatorLoc(), false, CurFPFeatureOverrides());
+ return UnaryOperator::Create(Context, SubExpr.get(), UO_AddrOf,
+ MemPtrType, VK_PRValue, OK_Ordinary,
+ UnOp->getOperatorLoc(), false,
+ CurFPFeatureOverrides());
}
}
- Expr *SubExpr = FixOverloadedFunctionReference(UnOp->getSubExpr(),
- Found, Fn);
- if (SubExpr == UnOp->getSubExpr())
+ ExprResult SubExpr =
+ FixOverloadedFunctionReference(UnOp->getSubExpr(), Found, Fn);
+ if (SubExpr.isInvalid())
+ return ExprError();
+ if (SubExpr.get() == UnOp->getSubExpr())
return UnOp;
- return UnaryOperator::Create(
- Context, SubExpr, UO_AddrOf, Context.getPointerType(SubExpr->getType()),
- VK_PRValue, OK_Ordinary, UnOp->getOperatorLoc(), false,
- CurFPFeatureOverrides());
+ return CreateBuiltinUnaryOp(UnOp->getOperatorLoc(), UO_AddrOf,
+ SubExpr.get());
}
if (UnresolvedLookupExpr *ULE = dyn_cast<UnresolvedLookupExpr>(E)) {
@@ -15111,10 +16205,20 @@ Expr *Sema::FixOverloadedFunctionReference(Expr *E, DeclAccessPair Found,
TemplateArgs = &TemplateArgsBuffer;
}
- DeclRefExpr *DRE =
- BuildDeclRefExpr(Fn, Fn->getType(), VK_LValue, ULE->getNameInfo(),
- ULE->getQualifierLoc(), Found.getDecl(),
- ULE->getTemplateKeywordLoc(), TemplateArgs);
+ QualType Type = Fn->getType();
+ ExprValueKind ValueKind = getLangOpts().CPlusPlus ? VK_LValue : VK_PRValue;
+
+ // FIXME: Duplicated from BuildDeclarationNameExpr.
+ if (unsigned BID = Fn->getBuiltinID()) {
+ if (!Context.BuiltinInfo.isDirectlyAddressable(BID)) {
+ Type = Context.BuiltinFnTy;
+ ValueKind = VK_PRValue;
+ }
+ }
+
+ DeclRefExpr *DRE = BuildDeclRefExpr(
+ Fn, Type, ValueKind, ULE->getNameInfo(), ULE->getQualifierLoc(),
+ Found.getDecl(), ULE->getTemplateKeywordLoc(), TemplateArgs);
DRE->setHadMultipleCandidates(ULE->getNumDecls() > 1);
return DRE;
}
@@ -15174,3 +16278,21 @@ ExprResult Sema::FixOverloadedFunctionReference(ExprResult E,
FunctionDecl *Fn) {
return FixOverloadedFunctionReference(E.get(), Found, Fn);
}
+
+bool clang::shouldEnforceArgLimit(bool PartialOverloading,
+ FunctionDecl *Function) {
+ if (!PartialOverloading || !Function)
+ return true;
+ if (Function->isVariadic())
+ return false;
+ if (const auto *Proto =
+ dyn_cast<FunctionProtoType>(Function->getFunctionType()))
+ if (Proto->isTemplateVariadic())
+ return false;
+ if (auto *Pattern = Function->getTemplateInstantiationPattern())
+ if (const auto *Proto =
+ dyn_cast<FunctionProtoType>(Pattern->getFunctionType()))
+ if (Proto->isTemplateVariadic())
+ return false;
+ return true;
+}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaPseudoObject.cpp b/contrib/llvm-project/clang/lib/Sema/SemaPseudoObject.cpp
index 7fdb34905b61..528c261c4a29 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaPseudoObject.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaPseudoObject.cpp
@@ -152,8 +152,13 @@ namespace {
assocTypes.push_back(assoc.getTypeSourceInfo());
}
+ if (gse->isExprPredicate())
+ return GenericSelectionExpr::Create(
+ S.Context, gse->getGenericLoc(), gse->getControllingExpr(),
+ assocTypes, assocExprs, gse->getDefaultLoc(), gse->getRParenLoc(),
+ gse->containsUnexpandedParameterPack(), resultIndex);
return GenericSelectionExpr::Create(
- S.Context, gse->getGenericLoc(), gse->getControllingExpr(),
+ S.Context, gse->getGenericLoc(), gse->getControllingType(),
assocTypes, assocExprs, gse->getDefaultLoc(), gse->getRParenLoc(),
gse->containsUnexpandedParameterPack(), resultIndex);
}
@@ -737,11 +742,11 @@ ExprResult ObjCPropertyOpBuilder::buildGet() {
assert(InstanceReceiver || RefExpr->isSuperReceiver());
msg = S.BuildInstanceMessageImplicit(InstanceReceiver, receiverType,
GenericLoc, Getter->getSelector(),
- Getter, None);
+ Getter, std::nullopt);
} else {
msg = S.BuildClassMessageImplicit(receiverType, RefExpr->isSuperReceiver(),
- GenericLoc, Getter->getSelector(),
- Getter, None);
+ GenericLoc, Getter->getSelector(), Getter,
+ std::nullopt);
}
return msg;
}
@@ -1190,7 +1195,7 @@ bool ObjCSubscriptOpBuilder::findAtIndexGetter() {
/*isPropertyAccessor=*/false,
/*isSynthesizedAccessorStub=*/false,
/*isImplicitlyDeclared=*/true, /*isDefined=*/false,
- ObjCMethodDecl::Required, false);
+ ObjCImplementationControl::Required, false);
ParmVarDecl *Argument = ParmVarDecl::Create(S.Context, AtIndexGetter,
SourceLocation(), SourceLocation(),
arrayRef ? &S.Context.Idents.get("index")
@@ -1200,7 +1205,7 @@ bool ObjCSubscriptOpBuilder::findAtIndexGetter() {
/*TInfo=*/nullptr,
SC_None,
nullptr);
- AtIndexGetter->setMethodParams(S.Context, Argument, None);
+ AtIndexGetter->setMethodParams(S.Context, Argument, std::nullopt);
}
if (!AtIndexGetter) {
@@ -1296,7 +1301,7 @@ bool ObjCSubscriptOpBuilder::findAtIndexSetter() {
/*isPropertyAccessor=*/false,
/*isSynthesizedAccessorStub=*/false,
/*isImplicitlyDeclared=*/true, /*isDefined=*/false,
- ObjCMethodDecl::Required, false);
+ ObjCImplementationControl::Required, false);
SmallVector<ParmVarDecl *, 2> Params;
ParmVarDecl *object = ParmVarDecl::Create(S.Context, AtIndexSetter,
SourceLocation(), SourceLocation(),
@@ -1316,7 +1321,7 @@ bool ObjCSubscriptOpBuilder::findAtIndexSetter() {
SC_None,
nullptr);
Params.push_back(key);
- AtIndexSetter->setMethodParams(S.Context, Params, None);
+ AtIndexSetter->setMethodParams(S.Context, Params, std::nullopt);
}
if (!AtIndexSetter) {
@@ -1446,7 +1451,8 @@ MSPropertyOpBuilder::getBaseMSProperty(MSPropertySubscriptExpr *E) {
Expr *MSPropertyOpBuilder::rebuildAndCaptureObject(Expr *syntacticBase) {
InstanceBase = capture(RefExpr->getBaseExpr());
- llvm::for_each(CallArgs, [this](Expr *&Arg) { Arg = capture(Arg); });
+ for (Expr *&Arg : CallArgs)
+ Arg = capture(Arg);
syntacticBase = Rebuilder(S, [=](Expr *, unsigned Idx) -> Expr * {
switch (Idx) {
case 0:
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaRISCVVectorLookup.cpp b/contrib/llvm-project/clang/lib/Sema/SemaRISCVVectorLookup.cpp
new file mode 100644
index 000000000000..00a5ea65f3f4
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Sema/SemaRISCVVectorLookup.cpp
@@ -0,0 +1,497 @@
+//==- SemaRISCVVectorLookup.cpp - Name Lookup for RISC-V Vector Intrinsic -==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements name lookup for RISC-V vector intrinsic.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/RISCVIntrinsicManager.h"
+#include "clang/Sema/Sema.h"
+#include "clang/Support/RISCVVIntrinsicUtils.h"
+#include "llvm/ADT/SmallVector.h"
+#include <optional>
+#include <string>
+#include <vector>
+
+using namespace llvm;
+using namespace clang;
+using namespace clang::RISCV;
+
+using IntrinsicKind = sema::RISCVIntrinsicManager::IntrinsicKind;
+
+namespace {
+
+// Function definition of a RVV intrinsic.
+struct RVVIntrinsicDef {
+ /// Mapping to which clang built-in function, e.g. __builtin_rvv_vadd.
+ std::string BuiltinName;
+
+ /// Function signature, first element is return type.
+ RVVTypes Signature;
+};
+
+struct RVVOverloadIntrinsicDef {
+ // Indexes of RISCVIntrinsicManagerImpl::IntrinsicList.
+ SmallVector<uint32_t, 8> Indexes;
+};
+
+} // namespace
+
+static const PrototypeDescriptor RVVSignatureTable[] = {
+#define DECL_SIGNATURE_TABLE
+#include "clang/Basic/riscv_vector_builtin_sema.inc"
+#undef DECL_SIGNATURE_TABLE
+};
+
+static const PrototypeDescriptor RVSiFiveVectorSignatureTable[] = {
+#define DECL_SIGNATURE_TABLE
+#include "clang/Basic/riscv_sifive_vector_builtin_sema.inc"
+#undef DECL_SIGNATURE_TABLE
+};
+
+static const RVVIntrinsicRecord RVVIntrinsicRecords[] = {
+#define DECL_INTRINSIC_RECORDS
+#include "clang/Basic/riscv_vector_builtin_sema.inc"
+#undef DECL_INTRINSIC_RECORDS
+};
+
+static const RVVIntrinsicRecord RVSiFiveVectorIntrinsicRecords[] = {
+#define DECL_INTRINSIC_RECORDS
+#include "clang/Basic/riscv_sifive_vector_builtin_sema.inc"
+#undef DECL_INTRINSIC_RECORDS
+};
+
+// Get subsequence of signature table.
+static ArrayRef<PrototypeDescriptor>
+ProtoSeq2ArrayRef(IntrinsicKind K, uint16_t Index, uint8_t Length) {
+ switch (K) {
+ case IntrinsicKind::RVV:
+ return ArrayRef(&RVVSignatureTable[Index], Length);
+ case IntrinsicKind::SIFIVE_VECTOR:
+ return ArrayRef(&RVSiFiveVectorSignatureTable[Index], Length);
+ }
+ llvm_unreachable("Unhandled IntrinsicKind");
+}
+
+static QualType RVVType2Qual(ASTContext &Context, const RVVType *Type) {
+ QualType QT;
+ switch (Type->getScalarType()) {
+ case ScalarTypeKind::Void:
+ QT = Context.VoidTy;
+ break;
+ case ScalarTypeKind::Size_t:
+ QT = Context.getSizeType();
+ break;
+ case ScalarTypeKind::Ptrdiff_t:
+ QT = Context.getPointerDiffType();
+ break;
+ case ScalarTypeKind::UnsignedLong:
+ QT = Context.UnsignedLongTy;
+ break;
+ case ScalarTypeKind::SignedLong:
+ QT = Context.LongTy;
+ break;
+ case ScalarTypeKind::Boolean:
+ QT = Context.BoolTy;
+ break;
+ case ScalarTypeKind::SignedInteger:
+ QT = Context.getIntTypeForBitwidth(Type->getElementBitwidth(), true);
+ break;
+ case ScalarTypeKind::UnsignedInteger:
+ QT = Context.getIntTypeForBitwidth(Type->getElementBitwidth(), false);
+ break;
+ case ScalarTypeKind::BFloat:
+ QT = Context.BFloat16Ty;
+ break;
+ case ScalarTypeKind::Float:
+ switch (Type->getElementBitwidth()) {
+ case 64:
+ QT = Context.DoubleTy;
+ break;
+ case 32:
+ QT = Context.FloatTy;
+ break;
+ case 16:
+ QT = Context.Float16Ty;
+ break;
+ default:
+ llvm_unreachable("Unsupported floating point width.");
+ }
+ break;
+ case Invalid:
+ case Undefined:
+ llvm_unreachable("Unhandled type.");
+ }
+ if (Type->isVector()) {
+ if (Type->isTuple())
+ QT = Context.getScalableVectorType(QT, *Type->getScale(), Type->getNF());
+ else
+ QT = Context.getScalableVectorType(QT, *Type->getScale());
+ }
+
+ if (Type->isConstant())
+ QT = Context.getConstType(QT);
+
+ // Transform the type to a pointer as the last step, if necessary.
+ if (Type->isPointer())
+ QT = Context.getPointerType(QT);
+
+ return QT;
+}
+
+namespace {
+class RISCVIntrinsicManagerImpl : public sema::RISCVIntrinsicManager {
+private:
+ Sema &S;
+ ASTContext &Context;
+ RVVTypeCache TypeCache;
+ bool ConstructedRISCVVBuiltins;
+ bool ConstructedRISCVSiFiveVectorBuiltins;
+
+ // List of all RVV intrinsic.
+ std::vector<RVVIntrinsicDef> IntrinsicList;
+ // Mapping function name to index of IntrinsicList.
+ StringMap<uint32_t> Intrinsics;
+ // Mapping function name to RVVOverloadIntrinsicDef.
+ StringMap<RVVOverloadIntrinsicDef> OverloadIntrinsics;
+
+
+ // Create RVVIntrinsicDef.
+ void InitRVVIntrinsic(const RVVIntrinsicRecord &Record, StringRef SuffixStr,
+ StringRef OverloadedSuffixStr, bool IsMask,
+ RVVTypes &Types, bool HasPolicy, Policy PolicyAttrs);
+
+ // Create FunctionDecl for a vector intrinsic.
+ void CreateRVVIntrinsicDecl(LookupResult &LR, IdentifierInfo *II,
+ Preprocessor &PP, uint32_t Index,
+ bool IsOverload);
+
+ void ConstructRVVIntrinsics(ArrayRef<RVVIntrinsicRecord> Recs,
+ IntrinsicKind K);
+
+public:
+ RISCVIntrinsicManagerImpl(clang::Sema &S) : S(S), Context(S.Context) {
+ ConstructedRISCVVBuiltins = false;
+ ConstructedRISCVSiFiveVectorBuiltins = false;
+ }
+
+ // Initialize IntrinsicList
+ void InitIntrinsicList() override;
+
+ // Create RISC-V vector intrinsic and insert into symbol table if found, and
+ // return true, otherwise return false.
+ bool CreateIntrinsicIfFound(LookupResult &LR, IdentifierInfo *II,
+ Preprocessor &PP) override;
+};
+} // namespace
+
+void RISCVIntrinsicManagerImpl::ConstructRVVIntrinsics(
+ ArrayRef<RVVIntrinsicRecord> Recs, IntrinsicKind K) {
+ const TargetInfo &TI = Context.getTargetInfo();
+ static const std::pair<const char *, RVVRequire> FeatureCheckList[] = {
+ {"64bit", RVV_REQ_RV64},
+ {"xsfvcp", RVV_REQ_Xsfvcp},
+ {"xsfvfnrclipxfqf", RVV_REQ_Xsfvfnrclipxfqf},
+ {"xsfvfwmaccqqq", RVV_REQ_Xsfvfwmaccqqq},
+ {"xsfvqmaccdod", RVV_REQ_Xsfvqmaccdod},
+ {"xsfvqmaccqoq", RVV_REQ_Xsfvqmaccqoq},
+ {"zvbb", RVV_REQ_Zvbb},
+ {"zvbc", RVV_REQ_Zvbc},
+ {"zvkb", RVV_REQ_Zvkb},
+ {"zvkg", RVV_REQ_Zvkg},
+ {"zvkned", RVV_REQ_Zvkned},
+ {"zvknha", RVV_REQ_Zvknha},
+ {"zvknhb", RVV_REQ_Zvknhb},
+ {"zvksed", RVV_REQ_Zvksed},
+ {"zvksh", RVV_REQ_Zvksh},
+ {"experimental", RVV_REQ_Experimental}};
+
+ // Construction of RVVIntrinsicRecords need to sync with createRVVIntrinsics
+ // in RISCVVEmitter.cpp.
+ for (auto &Record : Recs) {
+ // Check requirements.
+ if (llvm::any_of(FeatureCheckList, [&](const auto &Item) {
+ return (Record.RequiredExtensions & Item.second) == Item.second &&
+ !TI.hasFeature(Item.first);
+ }))
+ continue;
+
+ // Create Intrinsics for each type and LMUL.
+ BasicType BaseType = BasicType::Unknown;
+ ArrayRef<PrototypeDescriptor> BasicProtoSeq =
+ ProtoSeq2ArrayRef(K, Record.PrototypeIndex, Record.PrototypeLength);
+ ArrayRef<PrototypeDescriptor> SuffixProto =
+ ProtoSeq2ArrayRef(K, Record.SuffixIndex, Record.SuffixLength);
+ ArrayRef<PrototypeDescriptor> OverloadedSuffixProto = ProtoSeq2ArrayRef(
+ K, Record.OverloadedSuffixIndex, Record.OverloadedSuffixSize);
+
+ PolicyScheme UnMaskedPolicyScheme =
+ static_cast<PolicyScheme>(Record.UnMaskedPolicyScheme);
+ PolicyScheme MaskedPolicyScheme =
+ static_cast<PolicyScheme>(Record.MaskedPolicyScheme);
+
+ const Policy DefaultPolicy;
+
+ llvm::SmallVector<PrototypeDescriptor> ProtoSeq =
+ RVVIntrinsic::computeBuiltinTypes(
+ BasicProtoSeq, /*IsMasked=*/false,
+ /*HasMaskedOffOperand=*/false, Record.HasVL, Record.NF,
+ UnMaskedPolicyScheme, DefaultPolicy, Record.IsTuple);
+
+ llvm::SmallVector<PrototypeDescriptor> ProtoMaskSeq;
+ if (Record.HasMasked)
+ ProtoMaskSeq = RVVIntrinsic::computeBuiltinTypes(
+ BasicProtoSeq, /*IsMasked=*/true, Record.HasMaskedOffOperand,
+ Record.HasVL, Record.NF, MaskedPolicyScheme, DefaultPolicy,
+ Record.IsTuple);
+
+ bool UnMaskedHasPolicy = UnMaskedPolicyScheme != PolicyScheme::SchemeNone;
+ bool MaskedHasPolicy = MaskedPolicyScheme != PolicyScheme::SchemeNone;
+ SmallVector<Policy> SupportedUnMaskedPolicies =
+ RVVIntrinsic::getSupportedUnMaskedPolicies();
+ SmallVector<Policy> SupportedMaskedPolicies =
+ RVVIntrinsic::getSupportedMaskedPolicies(Record.HasTailPolicy,
+ Record.HasMaskPolicy);
+
+ for (unsigned int TypeRangeMaskShift = 0;
+ TypeRangeMaskShift <= static_cast<unsigned int>(BasicType::MaxOffset);
+ ++TypeRangeMaskShift) {
+ unsigned int BaseTypeI = 1 << TypeRangeMaskShift;
+ BaseType = static_cast<BasicType>(BaseTypeI);
+
+ if ((BaseTypeI & Record.TypeRangeMask) != BaseTypeI)
+ continue;
+
+ if (BaseType == BasicType::Float16) {
+ if ((Record.RequiredExtensions & RVV_REQ_Zvfhmin) == RVV_REQ_Zvfhmin) {
+ if (!TI.hasFeature("zvfhmin"))
+ continue;
+ } else if (!TI.hasFeature("zvfh")) {
+ continue;
+ }
+ }
+
+ // Expanded with different LMUL.
+ for (int Log2LMUL = -3; Log2LMUL <= 3; Log2LMUL++) {
+ if (!(Record.Log2LMULMask & (1 << (Log2LMUL + 3))))
+ continue;
+
+ std::optional<RVVTypes> Types =
+ TypeCache.computeTypes(BaseType, Log2LMUL, Record.NF, ProtoSeq);
+
+ // Ignored to create new intrinsic if there are any illegal types.
+ if (!Types.has_value())
+ continue;
+
+ std::string SuffixStr = RVVIntrinsic::getSuffixStr(
+ TypeCache, BaseType, Log2LMUL, SuffixProto);
+ std::string OverloadedSuffixStr = RVVIntrinsic::getSuffixStr(
+ TypeCache, BaseType, Log2LMUL, OverloadedSuffixProto);
+
+ // Create non-masked intrinsic.
+ InitRVVIntrinsic(Record, SuffixStr, OverloadedSuffixStr, false, *Types,
+ UnMaskedHasPolicy, DefaultPolicy);
+
+ // Create non-masked policy intrinsic.
+ if (Record.UnMaskedPolicyScheme != PolicyScheme::SchemeNone) {
+ for (auto P : SupportedUnMaskedPolicies) {
+ llvm::SmallVector<PrototypeDescriptor> PolicyPrototype =
+ RVVIntrinsic::computeBuiltinTypes(
+ BasicProtoSeq, /*IsMasked=*/false,
+ /*HasMaskedOffOperand=*/false, Record.HasVL, Record.NF,
+ UnMaskedPolicyScheme, P, Record.IsTuple);
+ std::optional<RVVTypes> PolicyTypes = TypeCache.computeTypes(
+ BaseType, Log2LMUL, Record.NF, PolicyPrototype);
+ InitRVVIntrinsic(Record, SuffixStr, OverloadedSuffixStr,
+ /*IsMask=*/false, *PolicyTypes, UnMaskedHasPolicy,
+ P);
+ }
+ }
+ if (!Record.HasMasked)
+ continue;
+ // Create masked intrinsic.
+ std::optional<RVVTypes> MaskTypes =
+ TypeCache.computeTypes(BaseType, Log2LMUL, Record.NF, ProtoMaskSeq);
+ InitRVVIntrinsic(Record, SuffixStr, OverloadedSuffixStr, true,
+ *MaskTypes, MaskedHasPolicy, DefaultPolicy);
+ if (Record.MaskedPolicyScheme == PolicyScheme::SchemeNone)
+ continue;
+ // Create masked policy intrinsic.
+ for (auto P : SupportedMaskedPolicies) {
+ llvm::SmallVector<PrototypeDescriptor> PolicyPrototype =
+ RVVIntrinsic::computeBuiltinTypes(
+ BasicProtoSeq, /*IsMasked=*/true, Record.HasMaskedOffOperand,
+ Record.HasVL, Record.NF, MaskedPolicyScheme, P,
+ Record.IsTuple);
+ std::optional<RVVTypes> PolicyTypes = TypeCache.computeTypes(
+ BaseType, Log2LMUL, Record.NF, PolicyPrototype);
+ InitRVVIntrinsic(Record, SuffixStr, OverloadedSuffixStr,
+ /*IsMask=*/true, *PolicyTypes, MaskedHasPolicy, P);
+ }
+ } // End for different LMUL
+ } // End for different TypeRange
+ }
+}
+
+void RISCVIntrinsicManagerImpl::InitIntrinsicList() {
+
+ if (S.DeclareRISCVVBuiltins && !ConstructedRISCVVBuiltins) {
+ ConstructedRISCVVBuiltins = true;
+ ConstructRVVIntrinsics(RVVIntrinsicRecords,
+ IntrinsicKind::RVV);
+ }
+ if (S.DeclareRISCVSiFiveVectorBuiltins &&
+ !ConstructedRISCVSiFiveVectorBuiltins) {
+ ConstructedRISCVSiFiveVectorBuiltins = true;
+ ConstructRVVIntrinsics(RVSiFiveVectorIntrinsicRecords,
+ IntrinsicKind::SIFIVE_VECTOR);
+ }
+}
+
+// Compute name and signatures for intrinsic with practical types.
+void RISCVIntrinsicManagerImpl::InitRVVIntrinsic(
+ const RVVIntrinsicRecord &Record, StringRef SuffixStr,
+ StringRef OverloadedSuffixStr, bool IsMasked, RVVTypes &Signature,
+ bool HasPolicy, Policy PolicyAttrs) {
+ // Function name, e.g. vadd_vv_i32m1.
+ std::string Name = Record.Name;
+ if (!SuffixStr.empty())
+ Name += "_" + SuffixStr.str();
+
+ // Overloaded function name, e.g. vadd.
+ std::string OverloadedName;
+ if (!Record.OverloadedName)
+ OverloadedName = StringRef(Record.Name).split("_").first.str();
+ else
+ OverloadedName = Record.OverloadedName;
+ if (!OverloadedSuffixStr.empty())
+ OverloadedName += "_" + OverloadedSuffixStr.str();
+
+ // clang built-in function name, e.g. __builtin_rvv_vadd.
+ std::string BuiltinName = "__builtin_rvv_" + std::string(Record.Name);
+
+ RVVIntrinsic::updateNamesAndPolicy(IsMasked, HasPolicy, Name, BuiltinName,
+ OverloadedName, PolicyAttrs,
+ Record.HasFRMRoundModeOp);
+
+ // Put into IntrinsicList.
+ uint32_t Index = IntrinsicList.size();
+ IntrinsicList.push_back({BuiltinName, Signature});
+
+ // Creating mapping to Intrinsics.
+ Intrinsics.insert({Name, Index});
+
+ // Get the RVVOverloadIntrinsicDef.
+ RVVOverloadIntrinsicDef &OverloadIntrinsicDef =
+ OverloadIntrinsics[OverloadedName];
+
+ // And added the index.
+ OverloadIntrinsicDef.Indexes.push_back(Index);
+}
+
+void RISCVIntrinsicManagerImpl::CreateRVVIntrinsicDecl(LookupResult &LR,
+ IdentifierInfo *II,
+ Preprocessor &PP,
+ uint32_t Index,
+ bool IsOverload) {
+ ASTContext &Context = S.Context;
+ RVVIntrinsicDef &IDef = IntrinsicList[Index];
+ RVVTypes Sigs = IDef.Signature;
+ size_t SigLength = Sigs.size();
+ RVVType *ReturnType = Sigs[0];
+ QualType RetType = RVVType2Qual(Context, ReturnType);
+ SmallVector<QualType, 8> ArgTypes;
+ QualType BuiltinFuncType;
+
+ // Skip return type, and convert RVVType to QualType for arguments.
+ for (size_t i = 1; i < SigLength; ++i)
+ ArgTypes.push_back(RVVType2Qual(Context, Sigs[i]));
+
+ FunctionProtoType::ExtProtoInfo PI(
+ Context.getDefaultCallingConvention(false, false, true));
+
+ PI.Variadic = false;
+
+ SourceLocation Loc = LR.getNameLoc();
+ BuiltinFuncType = Context.getFunctionType(RetType, ArgTypes, PI);
+ DeclContext *Parent = Context.getTranslationUnitDecl();
+
+ FunctionDecl *RVVIntrinsicDecl = FunctionDecl::Create(
+ Context, Parent, Loc, Loc, II, BuiltinFuncType, /*TInfo=*/nullptr,
+ SC_Extern, S.getCurFPFeatures().isFPConstrained(),
+ /*isInlineSpecified*/ false,
+ /*hasWrittenPrototype*/ true);
+
+ // Create Decl objects for each parameter, adding them to the
+ // FunctionDecl.
+ const auto *FP = cast<FunctionProtoType>(BuiltinFuncType);
+ SmallVector<ParmVarDecl *, 8> ParmList;
+ for (unsigned IParm = 0, E = FP->getNumParams(); IParm != E; ++IParm) {
+ ParmVarDecl *Parm =
+ ParmVarDecl::Create(Context, RVVIntrinsicDecl, Loc, Loc, nullptr,
+ FP->getParamType(IParm), nullptr, SC_None, nullptr);
+ Parm->setScopeInfo(0, IParm);
+ ParmList.push_back(Parm);
+ }
+ RVVIntrinsicDecl->setParams(ParmList);
+
+ // Add function attributes.
+ if (IsOverload)
+ RVVIntrinsicDecl->addAttr(OverloadableAttr::CreateImplicit(Context));
+
+ // Setup alias to __builtin_rvv_*
+ IdentifierInfo &IntrinsicII = PP.getIdentifierTable().get(IDef.BuiltinName);
+ RVVIntrinsicDecl->addAttr(
+ BuiltinAliasAttr::CreateImplicit(S.Context, &IntrinsicII));
+
+ // Add to symbol table.
+ LR.addDecl(RVVIntrinsicDecl);
+}
+
+bool RISCVIntrinsicManagerImpl::CreateIntrinsicIfFound(LookupResult &LR,
+ IdentifierInfo *II,
+ Preprocessor &PP) {
+ StringRef Name = II->getName();
+
+ // Lookup the function name from the overload intrinsics first.
+ auto OvIItr = OverloadIntrinsics.find(Name);
+ if (OvIItr != OverloadIntrinsics.end()) {
+ const RVVOverloadIntrinsicDef &OvIntrinsicDef = OvIItr->second;
+ for (auto Index : OvIntrinsicDef.Indexes)
+ CreateRVVIntrinsicDecl(LR, II, PP, Index,
+ /*IsOverload*/ true);
+
+ // If we added overloads, need to resolve the lookup result.
+ LR.resolveKind();
+ return true;
+ }
+
+ // Lookup the function name from the intrinsics.
+ auto Itr = Intrinsics.find(Name);
+ if (Itr != Intrinsics.end()) {
+ CreateRVVIntrinsicDecl(LR, II, PP, Itr->second,
+ /*IsOverload*/ false);
+ return true;
+ }
+
+ // It's not an RVV intrinsics.
+ return false;
+}
+
+namespace clang {
+std::unique_ptr<clang::sema::RISCVIntrinsicManager>
+CreateRISCVIntrinsicManager(Sema &S) {
+ return std::make_unique<RISCVIntrinsicManagerImpl>(S);
+}
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaSYCL.cpp b/contrib/llvm-project/clang/lib/Sema/SemaSYCL.cpp
index 3b48a53efc0d..ca0254d29e7f 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaSYCL.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaSYCL.cpp
@@ -33,50 +33,100 @@ Sema::SemaDiagnosticBuilder Sema::SYCLDiagIfDeviceCode(SourceLocation Loc,
return SemaDiagnosticBuilder(DiagKind, Loc, DiagID, FD, *this);
}
-bool Sema::checkSYCLDeviceFunction(SourceLocation Loc, FunctionDecl *Callee) {
+static bool isZeroSizedArray(Sema &SemaRef, QualType Ty) {
+ if (const auto *CAT = SemaRef.getASTContext().getAsConstantArrayType(Ty))
+ return CAT->getSize() == 0;
+ return false;
+}
+
+void Sema::deepTypeCheckForSYCLDevice(SourceLocation UsedAt,
+ llvm::DenseSet<QualType> Visited,
+ ValueDecl *DeclToCheck) {
assert(getLangOpts().SYCLIsDevice &&
"Should only be called during SYCL compilation");
- assert(Callee && "Callee may not be null.");
+ // Emit notes only for the first discovered declaration of unsupported type
+ // to avoid mess of notes. This flag is to track that error already happened.
+ bool NeedToEmitNotes = true;
- // Errors in unevaluated context don't need to be generated,
- // so we can safely skip them.
- if (isUnevaluatedContext() || isConstantEvaluated())
- return true;
+ auto Check = [&](QualType TypeToCheck, const ValueDecl *D) {
+ bool ErrorFound = false;
+ if (isZeroSizedArray(*this, TypeToCheck)) {
+ SYCLDiagIfDeviceCode(UsedAt, diag::err_typecheck_zero_array_size) << 1;
+ ErrorFound = true;
+ }
+ // Checks for other types can also be done here.
+ if (ErrorFound) {
+ if (NeedToEmitNotes) {
+ if (auto *FD = dyn_cast<FieldDecl>(D))
+ SYCLDiagIfDeviceCode(FD->getLocation(),
+ diag::note_illegal_field_declared_here)
+ << FD->getType()->isPointerType() << FD->getType();
+ else
+ SYCLDiagIfDeviceCode(D->getLocation(), diag::note_declared_at);
+ }
+ }
- SemaDiagnosticBuilder::Kind DiagKind = SemaDiagnosticBuilder::K_Nop;
+ return ErrorFound;
+ };
- return DiagKind != SemaDiagnosticBuilder::K_Immediate &&
- DiagKind != SemaDiagnosticBuilder::K_ImmediateWithCallStack;
-}
+ // In case we have a Record used do the DFS for a bad field.
+ SmallVector<const ValueDecl *, 4> StackForRecursion;
+ StackForRecursion.push_back(DeclToCheck);
-// The SYCL kernel's 'object type' used for diagnostics and naming/mangling is
-// the first parameter to a sycl_kernel labeled function template. In SYCL1.2.1,
-// this was passed by value, and in SYCL2020, it is passed by reference.
-static QualType GetSYCLKernelObjectType(const FunctionDecl *KernelCaller) {
- assert(KernelCaller->getNumParams() > 0 && "Insufficient kernel parameters");
- QualType KernelParamTy = KernelCaller->getParamDecl(0)->getType();
+ // While doing DFS save how we get there to emit a nice set of notes.
+ SmallVector<const FieldDecl *, 4> History;
+ History.push_back(nullptr);
- // SYCL 2020 kernels are passed by reference.
- if (KernelParamTy->isReferenceType())
- return KernelParamTy->getPointeeType();
+ do {
+ const ValueDecl *Next = StackForRecursion.pop_back_val();
+ if (!Next) {
+ assert(!History.empty());
+ // Found a marker, we have gone up a level.
+ History.pop_back();
+ continue;
+ }
+ QualType NextTy = Next->getType();
- // SYCL 1.2.1
- return KernelParamTy;
-}
+ if (!Visited.insert(NextTy).second)
+ continue;
-void Sema::AddSYCLKernelLambda(const FunctionDecl *FD) {
- auto MangleCallback = [](ASTContext &Ctx,
- const NamedDecl *ND) -> llvm::Optional<unsigned> {
- if (const auto *RD = dyn_cast<CXXRecordDecl>(ND))
- Ctx.AddSYCLKernelNamingDecl(RD);
- // We always want to go into the lambda mangling (skipping the unnamed
- // struct version), so make sure we return a value here.
- return 1;
- };
+ auto EmitHistory = [&]() {
+ // The first element is always nullptr.
+ for (uint64_t Index = 1; Index < History.size(); ++Index) {
+ SYCLDiagIfDeviceCode(History[Index]->getLocation(),
+ diag::note_within_field_of_type)
+ << History[Index]->getType();
+ }
+ };
+
+ if (Check(NextTy, Next)) {
+ if (NeedToEmitNotes)
+ EmitHistory();
+ NeedToEmitNotes = false;
+ }
+
+ // In case pointer/array/reference type is met get pointee type, then
+ // proceed with that type.
+ while (NextTy->isAnyPointerType() || NextTy->isArrayType() ||
+ NextTy->isReferenceType()) {
+ if (NextTy->isArrayType())
+ NextTy = QualType{NextTy->getArrayElementTypeNoTypeQual(), 0};
+ else
+ NextTy = NextTy->getPointeeType();
+ if (Check(NextTy, Next)) {
+ if (NeedToEmitNotes)
+ EmitHistory();
+ NeedToEmitNotes = false;
+ }
+ }
- QualType Ty = GetSYCLKernelObjectType(FD);
- std::unique_ptr<MangleContext> Ctx{ItaniumMangleContext::create(
- Context, Context.getDiagnostics(), MangleCallback)};
- llvm::raw_null_ostream Out;
- Ctx->mangleTypeName(Ty, Out);
+ if (const auto *RecDecl = NextTy->getAsRecordDecl()) {
+ if (auto *NextFD = dyn_cast<FieldDecl>(Next))
+ History.push_back(NextFD);
+ // When nullptr is discovered, this means we've gone back up a level, so
+ // the history should be cleaned.
+ StackForRecursion.push_back(nullptr);
+ llvm::copy(RecDecl->fields(), std::back_inserter(StackForRecursion));
+ }
+ } while (!StackForRecursion.empty());
}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaStmt.cpp b/contrib/llvm-project/clang/lib/Sema/SemaStmt.cpp
index 03e9d7bc87a2..9e7c8c7e4e8c 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaStmt.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaStmt.cpp
@@ -39,6 +39,7 @@
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
using namespace clang;
using namespace sema;
@@ -216,9 +217,9 @@ static bool DiagnoseNoDiscard(Sema &S, const WarnUnusedResultAttr *A,
return S.Diag(Loc, diag::warn_unused_result_msg) << A << Msg << R1 << R2;
}
-void Sema::DiagnoseUnusedExprResult(const Stmt *S) {
+void Sema::DiagnoseUnusedExprResult(const Stmt *S, unsigned DiagID) {
if (const LabelStmt *Label = dyn_cast_or_null<LabelStmt>(S))
- return DiagnoseUnusedExprResult(Label->getSubStmt());
+ return DiagnoseUnusedExprResult(Label->getSubStmt(), DiagID);
const Expr *E = dyn_cast_or_null<Expr>(S);
if (!E)
@@ -264,7 +265,6 @@ void Sema::DiagnoseUnusedExprResult(const Stmt *S) {
// Okay, we have an unused result. Depending on what the base expression is,
// we might want to make a more specific diagnostic. Check for one of these
// cases now.
- unsigned DiagID = diag::warn_unused_expr;
if (const FullExpr *Temps = dyn_cast<FullExpr>(E))
E = Temps->getSubExpr();
if (const CXXBindTemporaryExpr *TempExpr = dyn_cast<CXXBindTemporaryExpr>(E))
@@ -339,10 +339,10 @@ void Sema::DiagnoseUnusedExprResult(const Stmt *S) {
if (LangOpts.OpenMP && isa<CallExpr>(Source) &&
POE->getNumSemanticExprs() == 1 &&
isa<CallExpr>(POE->getSemanticExpr(0)))
- return DiagnoseUnusedExprResult(POE->getSemanticExpr(0));
+ return DiagnoseUnusedExprResult(POE->getSemanticExpr(0), DiagID);
if (isa<ObjCSubscriptRefExpr>(Source))
DiagID = diag::warn_unused_container_subscript_expr;
- else
+ else if (isa<ObjCPropertyRefExpr>(Source))
DiagID = diag::warn_unused_property_expr;
} else if (const CXXFunctionalCastExpr *FC
= dyn_cast<CXXFunctionalCastExpr>(E)) {
@@ -379,7 +379,12 @@ void Sema::DiagnoseUnusedExprResult(const Stmt *S) {
return;
}
- DiagRuntimeBehavior(Loc, nullptr, PDiag(DiagID) << R1 << R2);
+ // Do not diagnose use of a comma operator in a SFINAE context because the
+ // type of the left operand could be used for SFINAE, so technically it is
+ // *used*.
+ if (DiagID != diag::warn_unused_comma_left_operand || !isSFINAEContext())
+ DiagIfReachable(Loc, S ? llvm::ArrayRef(S) : std::nullopt,
+ PDiag(DiagID) << R1 << R2);
}
void Sema::ActOnStartOfCompoundStmt(bool IsStmtExpr) {
@@ -406,9 +411,13 @@ StmtResult Sema::ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr) {
const unsigned NumElts = Elts.size();
- // If we're in C89 mode, check that we don't have any decls after stmts. If
- // so, emit an extension diagnostic.
- if (!getLangOpts().C99 && !getLangOpts().CPlusPlus) {
+ // If we're in C mode, check that we don't have any decls after stmts. If
+ // so, emit an extension diagnostic in C89 and potentially a warning in later
+ // versions.
+ const unsigned MixedDeclsCodeID = getLangOpts().C99
+ ? diag::warn_mixed_decls_code
+ : diag::ext_mixed_decls_code;
+ if (!getLangOpts().CPlusPlus && !Diags.isIgnored(MixedDeclsCodeID, L)) {
// Note that __extension__ can be around a decl.
unsigned i = 0;
// Skip over all declarations.
@@ -421,7 +430,7 @@ StmtResult Sema::ActOnCompoundStmt(SourceLocation L, SourceLocation R,
if (i != NumElts) {
Decl *D = *cast<DeclStmt>(Elts[i])->decl_begin();
- Diag(D->getLocation(), diag::ext_mixed_decls_code);
+ Diag(D->getLocation(), MixedDeclsCodeID);
}
}
@@ -434,7 +443,16 @@ StmtResult Sema::ActOnCompoundStmt(SourceLocation L, SourceLocation R,
DiagnoseEmptyLoopBody(Elts[i], Elts[i + 1]);
}
- return CompoundStmt::Create(Context, Elts, L, R);
+ // Calculate difference between FP options in this compound statement and in
+ // the enclosing one. If this is a function body, take the difference against
+ // default options. In this case the difference will indicate options that are
+ // changed upon entry to the statement.
+ FPOptions FPO = (getCurFunction()->CompoundScopes.size() == 1)
+ ? FPOptions(getLangOpts())
+ : getCurCompoundScope().InitialFPFeatures;
+ FPOptionsOverride FPDiff = getCurFPFeatures().getChangesFrom(FPO);
+
+ return CompoundStmt::Create(Context, Elts, FPDiff, L, R);
}
ExprResult
@@ -543,7 +561,7 @@ Sema::ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
}
ReservedIdentifierStatus Status = TheDecl->isReserved(getLangOpts());
- if (Status != ReservedIdentifierStatus::NotReserved &&
+ if (isReservedInAllContexts(Status) &&
!Context.getSourceManager().isInSystemHeader(IdentLoc))
Diag(IdentLoc, diag::warn_reserved_extern_symbol)
<< TheDecl << static_cast<int>(Status);
@@ -579,7 +597,7 @@ StmtResult Sema::BuildAttributedStmt(SourceLocation AttrsLoc,
return AttributedStmt::Create(Context, AttrsLoc, Attrs, SubStmt);
}
-StmtResult Sema::ActOnAttributedStmt(const ParsedAttributesWithRange &Attrs,
+StmtResult Sema::ActOnAttributedStmt(const ParsedAttributes &Attrs,
Stmt *SubStmt) {
SmallVector<const Attr *, 1> SemanticAttrs;
ProcessStmtAttributes(SubStmt, Attrs, SemanticAttrs);
@@ -671,7 +689,7 @@ bool Sema::checkMustTailAttr(const Stmt *St, const Attr &MTA) {
if (CMD->isStatic())
Type.MemberType = FuncType::ft_static_member;
else {
- Type.This = CMD->getThisType()->getPointeeType();
+ Type.This = CMD->getFunctionObjectParameterType();
Type.MemberType = FuncType::ft_non_static_member;
}
Type.Func = CMD->getType()->castAs<FunctionProtoType>();
@@ -768,6 +786,12 @@ bool Sema::checkMustTailAttr(const Stmt *St, const Attr &MTA) {
return false;
}
+ const auto *CalleeDecl = CE->getCalleeDecl();
+ if (CalleeDecl && CalleeDecl->hasAttr<CXX11NoReturnAttr>()) {
+ Diag(St->getBeginLoc(), diag::err_musttail_no_return) << &MTA;
+ return false;
+ }
+
// Caller and callee must match in whether they have a "this" parameter.
if (CallerType.This.isNull() != CalleeType.This.isNull()) {
if (const auto *ND = dyn_cast_or_null<NamedDecl>(CE->getCalleeDecl())) {
@@ -858,38 +882,44 @@ public:
};
}
-StmtResult Sema::ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr,
+StmtResult Sema::ActOnIfStmt(SourceLocation IfLoc,
+ IfStatementKind StatementKind,
SourceLocation LParenLoc, Stmt *InitStmt,
ConditionResult Cond, SourceLocation RParenLoc,
Stmt *thenStmt, SourceLocation ElseLoc,
Stmt *elseStmt) {
if (Cond.isInvalid())
- Cond = ConditionResult(
- *this, nullptr,
- MakeFullExpr(new (Context) OpaqueValueExpr(SourceLocation(),
- Context.BoolTy, VK_PRValue),
- IfLoc),
- false);
+ return StmtError();
+
+ bool ConstevalOrNegatedConsteval =
+ StatementKind == IfStatementKind::ConstevalNonNegated ||
+ StatementKind == IfStatementKind::ConstevalNegated;
Expr *CondExpr = Cond.get().second;
+ assert((CondExpr || ConstevalOrNegatedConsteval) &&
+ "If statement: missing condition");
// Only call the CommaVisitor when not C89 due to differences in scope flags.
- if ((getLangOpts().C99 || getLangOpts().CPlusPlus) &&
+ if (CondExpr && (getLangOpts().C99 || getLangOpts().CPlusPlus) &&
!Diags.isIgnored(diag::warn_comma_operator, CondExpr->getExprLoc()))
CommaVisitor(*this).Visit(CondExpr);
- if (!elseStmt)
- DiagnoseEmptyStmtBody(CondExpr->getEndLoc(), thenStmt,
- diag::warn_empty_if_body);
+ if (!ConstevalOrNegatedConsteval && !elseStmt)
+ DiagnoseEmptyStmtBody(RParenLoc, thenStmt, diag::warn_empty_if_body);
- if (IsConstexpr) {
+ if (ConstevalOrNegatedConsteval ||
+ StatementKind == IfStatementKind::Constexpr) {
auto DiagnoseLikelihood = [&](const Stmt *S) {
if (const Attr *A = Stmt::getLikelihoodAttr(S)) {
Diags.Report(A->getLocation(),
- diag::warn_attribute_has_no_effect_on_if_constexpr)
- << A << A->getRange();
+ diag::warn_attribute_has_no_effect_on_compile_time_if)
+ << A << ConstevalOrNegatedConsteval << A->getRange();
Diags.Report(IfLoc,
- diag::note_attribute_has_no_effect_on_if_constexpr_here)
- << SourceRange(IfLoc, LParenLoc.getLocWithOffset(-1));
+ diag::note_attribute_has_no_effect_on_compile_time_if_here)
+ << ConstevalOrNegatedConsteval
+ << SourceRange(IfLoc, (ConstevalOrNegatedConsteval
+ ? thenStmt->getBeginLoc()
+ : LParenLoc)
+ .getLocWithOffset(-1));
}
};
DiagnoseLikelihood(thenStmt);
@@ -908,11 +938,25 @@ StmtResult Sema::ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr,
}
}
- return BuildIfStmt(IfLoc, IsConstexpr, LParenLoc, InitStmt, Cond, RParenLoc,
+ if (ConstevalOrNegatedConsteval) {
+ bool Immediate = ExprEvalContexts.back().Context ==
+ ExpressionEvaluationContext::ImmediateFunctionContext;
+ if (CurContext->isFunctionOrMethod()) {
+ const auto *FD =
+ dyn_cast<FunctionDecl>(Decl::castFromDeclContext(CurContext));
+ if (FD && FD->isImmediateFunction())
+ Immediate = true;
+ }
+ if (isUnevaluatedContext() || Immediate)
+ Diags.Report(IfLoc, diag::warn_consteval_if_always_true) << Immediate;
+ }
+
+ return BuildIfStmt(IfLoc, StatementKind, LParenLoc, InitStmt, Cond, RParenLoc,
thenStmt, ElseLoc, elseStmt);
}
-StmtResult Sema::BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
+StmtResult Sema::BuildIfStmt(SourceLocation IfLoc,
+ IfStatementKind StatementKind,
SourceLocation LParenLoc, Stmt *InitStmt,
ConditionResult Cond, SourceLocation RParenLoc,
Stmt *thenStmt, SourceLocation ElseLoc,
@@ -920,12 +964,13 @@ StmtResult Sema::BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
if (Cond.isInvalid())
return StmtError();
- if (IsConstexpr || isa<ObjCAvailabilityCheckExpr>(Cond.get().second))
+ if (StatementKind != IfStatementKind::Ordinary ||
+ isa<ObjCAvailabilityCheckExpr>(Cond.get().second))
setFunctionHasBranchProtectedScope();
- return IfStmt::Create(Context, IfLoc, IsConstexpr, InitStmt, Cond.get().first,
- Cond.get().second, LParenLoc, RParenLoc, thenStmt,
- ElseLoc, elseStmt);
+ return IfStmt::Create(Context, IfLoc, StatementKind, InitStmt,
+ Cond.get().first, Cond.get().second, LParenLoc,
+ RParenLoc, thenStmt, ElseLoc, elseStmt);
}
namespace {
@@ -1232,6 +1277,9 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
bool CaseListIsErroneous = false;
+ // FIXME: We'd better diagnose missing or duplicate default labels even
+ // in the dependent case. Because default labels themselves are never
+ // dependent.
for (SwitchCase *SC = SS->getSwitchCaseList(); SC && !HasDependentValue;
SC = SC->getNextSwitchCase()) {
@@ -1302,6 +1350,7 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
assert(!HasConstantCond ||
(ConstantCondValue.getBitWidth() == CondWidth &&
ConstantCondValue.isSigned() == CondIsSigned));
+ Diag(SwitchLoc, diag::warn_switch_default);
}
bool ShouldCheckConstantCond = HasConstantCond;
@@ -1462,7 +1511,7 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
// If switch has default case, then ignore it.
if (!CaseListIsErroneous && !CaseListIsIncomplete && !HasConstantCond &&
ET && ET->getDecl()->isCompleteDefinition() &&
- !empty(ET->getDecl()->enumerators())) {
+ !ET->getDecl()->enumerators().empty()) {
const EnumDecl *ED = ET->getDecl();
EnumValsTy EnumVals;
@@ -1563,7 +1612,7 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
auto DB = Diag(CondExpr->getExprLoc(), TheDefaultStmt
? diag::warn_def_missing_case
: diag::warn_missing_case)
- << (int)UnhandledNames.size();
+ << CondExpr->getSourceRange() << (int)UnhandledNames.size();
for (size_t I = 0, E = std::min(UnhandledNames.size(), (size_t)3);
I != E; ++I)
@@ -1691,9 +1740,7 @@ Sema::ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
namespace {
// Use SetVector since the diagnostic cares about the ordering of the Decl's.
- using DeclSetVector =
- llvm::SetVector<VarDecl *, llvm::SmallVector<VarDecl *, 8>,
- llvm::SmallPtrSet<VarDecl *, 8>>;
+ using DeclSetVector = llvm::SmallSetVector<VarDecl *, 8>;
// This visitor will traverse a conditional statement and store all
// the evaluated decls into a vector. Simple is set to true if none
@@ -2271,11 +2318,14 @@ Sema::ActOnObjCForCollectionStmt(SourceLocation ForLoc,
// If the type contained 'auto', deduce the 'auto' to 'id'.
if (FirstType->getContainedAutoType()) {
- OpaqueValueExpr OpaqueId(D->getLocation(), Context.getObjCIdType(),
- VK_PRValue);
+ SourceLocation Loc = D->getLocation();
+ OpaqueValueExpr OpaqueId(Loc, Context.getObjCIdType(), VK_PRValue);
Expr *DeducedInit = &OpaqueId;
- if (DeduceAutoType(D->getTypeSourceInfo(), DeducedInit, FirstType) ==
- DAR_Failed)
+ TemplateDeductionInfo Info(Loc);
+ FirstType = QualType();
+ TemplateDeductionResult Result = DeduceAutoType(
+ D->getTypeSourceInfo()->getTypeLoc(), DeducedInit, FirstType, Info);
+ if (Result != TDK_Success && Result != TDK_AlreadyDiagnosed)
DiagnoseAutoDeductionFailure(D, DeducedInit);
if (FirstType.isNull()) {
D->setInvalidDecl();
@@ -2339,10 +2389,16 @@ static bool FinishForRangeVarDecl(Sema &SemaRef, VarDecl *Decl, Expr *Init,
// Deduce the type for the iterator variable now rather than leaving it to
// AddInitializerToDecl, so we can produce a more suitable diagnostic.
QualType InitType;
- if ((!isa<InitListExpr>(Init) && Init->getType()->isVoidType()) ||
- SemaRef.DeduceAutoType(Decl->getTypeSourceInfo(), Init, InitType) ==
- Sema::DAR_Failed)
+ if (!isa<InitListExpr>(Init) && Init->getType()->isVoidType()) {
SemaRef.Diag(Loc, DiagID) << Init->getType();
+ } else {
+ TemplateDeductionInfo Info(Init->getExprLoc());
+ Sema::TemplateDeductionResult Result = SemaRef.DeduceAutoType(
+ Decl->getTypeSourceInfo()->getTypeLoc(), Init, InitType, Info);
+ if (Result != Sema::TDK_Success && Result != Sema::TDK_AlreadyDiagnosed)
+ SemaRef.Diag(Loc, DiagID) << Init->getType();
+ }
+
if (InitType.isNull()) {
Decl->setInvalidDecl();
return true;
@@ -2438,6 +2494,7 @@ StmtResult Sema::ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
Stmt *First, SourceLocation ColonLoc,
Expr *Range, SourceLocation RParenLoc,
BuildForRangeKind Kind) {
+ // FIXME: recover in order to allow the body to be parsed.
if (!First)
return StmtError();
@@ -2622,7 +2679,7 @@ BuildNonArrayForRange(Sema &SemaRef, Expr *BeginRange, Expr *EndRange,
SemaRef.PDiag(diag::err_for_range_invalid)
<< BeginRange->getType() << BEFFound),
SemaRef, OCD_AllCandidates, BeginRange);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Sema::FRS_DiagnosticIssued:
for (NamedDecl *D : OldFound) {
@@ -2729,7 +2786,7 @@ StmtResult Sema::BuildCXXForRangeStmt(SourceLocation ForLoc,
if (auto *DD = dyn_cast<DecompositionDecl>(LoopVar))
for (auto *Binding : DD->bindings())
Binding->setType(Context.DependentTy);
- LoopVar->setType(SubstAutoType(LoopVar->getType(), Context.DependentTy));
+ LoopVar->setType(SubstAutoTypeDependent(LoopVar->getType()));
}
} else if (!BeginDeclStmt.get()) {
SourceLocation RangeLoc = RangeVar->getLocation();
@@ -3149,7 +3206,7 @@ static void DiagnoseForRangeConstVariableCopies(Sema &SemaRef,
// (The function `getTypeSize` returns the size in bits.)
ASTContext &Ctx = SemaRef.Context;
if (Ctx.getTypeSize(VariableType) <= 64 * 8 &&
- (VariableType.isTriviallyCopyableType(Ctx) ||
+ (VariableType.isTriviallyCopyConstructibleType(Ctx) ||
hasTrivialABIAttr(VariableType)))
return;
@@ -3282,7 +3339,7 @@ Sema::ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope) {
// C99 6.8.6.2p1: A break shall appear only in or as a loop body.
return StmtError(Diag(ContinueLoc, diag::err_continue_not_in_loop));
}
- if (S->getFlags() & Scope::ConditionVarScope) {
+ if (S->isConditionVarScope()) {
// We cannot 'continue;' from within a statement expression in the
// initializer of a condition variable because we would jump past the
// initialization of that variable.
@@ -3316,8 +3373,8 @@ Sema::ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope) {
/// being thrown, or being co_returned from a coroutine. This expression
/// might be modified by the implementation.
///
-/// \param ForceCXX2b Overrides detection of current language mode
-/// and uses the rules for C++2b.
+/// \param Mode Overrides detection of current language mode
+/// and uses the rules for C++23.
///
/// \returns An aggregate which contains the Candidate and isMoveEligible
/// and isCopyElidable methods. If Candidate is non-null, it means
@@ -3338,7 +3395,7 @@ Sema::NamedReturnInfo Sema::getNamedReturnInfo(Expr *&E,
if (Res.Candidate && !E->isXValue() &&
(Mode == SimplerImplicitMoveMode::ForceOn ||
(Mode != SimplerImplicitMoveMode::ForceOff &&
- getLangOpts().CPlusPlus2b))) {
+ getLangOpts().CPlusPlus23))) {
E = ImplicitCastExpr::Create(Context, VD->getType().getNonReferenceType(),
CK_NoOp, E, nullptr, VK_XValue,
FPOptionsOverride());
@@ -3453,7 +3510,7 @@ const VarDecl *Sema::getCopyElisionCandidate(NamedReturnInfo &Info,
/// Verify that the initialization sequence that was picked for the
/// first overload resolution is permissible under C++98.
///
-/// Reject (possibly converting) contructors not taking an rvalue reference,
+/// Reject (possibly converting) constructors not taking an rvalue reference,
/// or user conversion operators which are not ref-qualified.
static bool
VerifyInitializationSequenceCXX98(const Sema &S,
@@ -3482,7 +3539,7 @@ ExprResult Sema::PerformMoveOrCopyInitialization(
const InitializedEntity &Entity, const NamedReturnInfo &NRInfo, Expr *Value,
bool SupressSimplerImplicitMoves) {
if (getLangOpts().CPlusPlus &&
- (!getLangOpts().CPlusPlus2b || SupressSimplerImplicitMoves) &&
+ (!getLangOpts().CPlusPlus23 || SupressSimplerImplicitMoves) &&
NRInfo.isMoveEligible()) {
ImplicitCastExpr AsRvalue(ImplicitCastExpr::OnStack, Value->getType(),
CK_NoOp, Value, VK_XValue, FPOptionsOverride());
@@ -3530,11 +3587,12 @@ StmtResult Sema::ActOnCapScopeReturnStmt(SourceLocation ReturnLoc,
CapturingScopeInfo *CurCap = cast<CapturingScopeInfo>(getCurFunction());
QualType FnRetType = CurCap->ReturnType;
LambdaScopeInfo *CurLambda = dyn_cast<LambdaScopeInfo>(CurCap);
+ if (CurLambda && CurLambda->CallOperator->getType().isNull())
+ return StmtError();
bool HasDeducedReturnType =
CurLambda && hasDeducedReturnType(CurLambda->CallOperator);
- if (ExprEvalContexts.back().Context ==
- ExpressionEvaluationContext::DiscardedStatement &&
+ if (ExprEvalContexts.back().isDiscardedStatementContext() &&
(HasDeducedReturnType || CurCap->HasImplicitReturnType)) {
if (RetValExp) {
ExprResult ER =
@@ -3684,6 +3742,11 @@ StmtResult Sema::ActOnCapScopeReturnStmt(SourceLocation ReturnLoc,
if (FunctionScopes.back()->FirstReturnLoc.isInvalid())
FunctionScopes.back()->FirstReturnLoc = ReturnLoc;
+ if (auto *CurBlock = dyn_cast<BlockScopeInfo>(CurCap);
+ CurBlock && CurCap->HasImplicitReturnType && RetValExp &&
+ RetValExp->containsErrors())
+ CurBlock->TheDecl->setInvalidDecl();
+
return Result;
}
@@ -3732,17 +3795,13 @@ TypeLoc Sema::getReturnTypeLoc(FunctionDecl *FD) const {
/// C++1y [dcl.spec.auto]p6.
bool Sema::DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
- Expr *&RetExpr,
- AutoType *AT) {
- // If this is the conversion function for a lambda, we choose to deduce it
+ Expr *RetExpr, const AutoType *AT) {
+ // If this is the conversion function for a lambda, we choose to deduce its
// type from the corresponding call operator, not from the synthesized return
// statement within it. See Sema::DeduceReturnType.
if (isLambdaConversionOperator(FD))
return false;
- TypeLoc OrigResultType = getReturnTypeLoc(FD);
- QualType Deduced;
-
if (RetExpr && isa<InitListExpr>(RetExpr)) {
// If the deduction is for a return statement and the initializer is
// a braced-init-list, the program is ill-formed.
@@ -3762,80 +3821,84 @@ bool Sema::DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
return false;
}
- if (RetExpr) {
- // Otherwise, [...] deduce a value for U using the rules of template
- // argument deduction.
- DeduceAutoResult DAR = DeduceAutoType(OrigResultType, RetExpr, Deduced);
-
- if (DAR == DAR_Failed && !FD->isInvalidDecl())
- Diag(RetExpr->getExprLoc(), diag::err_auto_fn_deduction_failure)
- << OrigResultType.getType() << RetExpr->getType();
-
- if (DAR != DAR_Succeeded)
- return true;
-
- // If a local type is part of the returned type, mark its fields as
- // referenced.
- LocalTypedefNameReferencer Referencer(*this);
- Referencer.TraverseType(RetExpr->getType());
- } else {
- // In the case of a return with no operand, the initializer is considered
- // to be void().
- //
- // Deduction here can only succeed if the return type is exactly 'cv auto'
- // or 'decltype(auto)', so just check for that case directly.
+ TypeLoc OrigResultType = getReturnTypeLoc(FD);
+ // In the case of a return with no operand, the initializer is considered
+ // to be void().
+ CXXScalarValueInitExpr VoidVal(Context.VoidTy, nullptr, SourceLocation());
+ if (!RetExpr) {
+ // For a function with a deduced result type to return with omitted
+ // expression, the result type as written must be 'auto' or
+ // 'decltype(auto)', possibly cv-qualified or constrained, but not
+ // ref-qualified.
if (!OrigResultType.getType()->getAs<AutoType>()) {
Diag(ReturnLoc, diag::err_auto_fn_return_void_but_not_auto)
- << OrigResultType.getType();
+ << OrigResultType.getType();
return true;
}
- // We always deduce U = void in this case.
- Deduced = SubstAutoType(OrigResultType.getType(), Context.VoidTy);
- if (Deduced.isNull())
- return true;
+ RetExpr = &VoidVal;
}
- // CUDA: Kernel function must have 'void' return type.
- if (getLangOpts().CUDA)
- if (FD->hasAttr<CUDAGlobalAttr>() && !Deduced->isVoidType()) {
- Diag(FD->getLocation(), diag::err_kern_type_not_void_return)
- << FD->getType() << FD->getSourceRange();
- return true;
+ QualType Deduced = AT->getDeducedType();
+ {
+ // Otherwise, [...] deduce a value for U using the rules of template
+ // argument deduction.
+ auto RetExprLoc = RetExpr->getExprLoc();
+ TemplateDeductionInfo Info(RetExprLoc);
+ SourceLocation TemplateSpecLoc;
+ if (RetExpr->getType() == Context.OverloadTy) {
+ auto FindResult = OverloadExpr::find(RetExpr);
+ if (FindResult.Expression)
+ TemplateSpecLoc = FindResult.Expression->getNameLoc();
}
-
- // If a function with a declared return type that contains a placeholder type
- // has multiple return statements, the return type is deduced for each return
- // statement. [...] if the type deduced is not the same in each deduction,
- // the program is ill-formed.
- QualType DeducedT = AT->getDeducedType();
- if (!DeducedT.isNull() && !FD->isInvalidDecl()) {
- AutoType *NewAT = Deduced->getContainedAutoType();
- // It is possible that NewAT->getDeducedType() is null. When that happens,
- // we should not crash, instead we ignore this deduction.
- if (NewAT->getDeducedType().isNull())
- return false;
-
- CanQualType OldDeducedType = Context.getCanonicalFunctionResultType(
- DeducedT);
- CanQualType NewDeducedType = Context.getCanonicalFunctionResultType(
- NewAT->getDeducedType());
- if (!FD->isDependentContext() && OldDeducedType != NewDeducedType) {
+ TemplateSpecCandidateSet FailedTSC(TemplateSpecLoc);
+ TemplateDeductionResult Res = DeduceAutoType(
+ OrigResultType, RetExpr, Deduced, Info, /*DependentDeduction=*/false,
+ /*IgnoreConstraints=*/false, &FailedTSC);
+ if (Res != TDK_Success && FD->isInvalidDecl())
+ return true;
+ switch (Res) {
+ case TDK_Success:
+ break;
+ case TDK_AlreadyDiagnosed:
+ return true;
+ case TDK_Inconsistent: {
+ // If a function with a declared return type that contains a placeholder
+ // type has multiple return statements, the return type is deduced for
+ // each return statement. [...] if the type deduced is not the same in
+ // each deduction, the program is ill-formed.
const LambdaScopeInfo *LambdaSI = getCurLambda();
- if (LambdaSI && LambdaSI->HasImplicitReturnType) {
+ if (LambdaSI && LambdaSI->HasImplicitReturnType)
Diag(ReturnLoc, diag::err_typecheck_missing_return_type_incompatible)
- << NewAT->getDeducedType() << DeducedT
- << true /*IsLambda*/;
- } else {
+ << Info.SecondArg << Info.FirstArg << true /*IsLambda*/;
+ else
Diag(ReturnLoc, diag::err_auto_fn_different_deductions)
- << (AT->isDecltypeAuto() ? 1 : 0)
- << NewAT->getDeducedType() << DeducedT;
- }
+ << (AT->isDecltypeAuto() ? 1 : 0) << Info.SecondArg
+ << Info.FirstArg;
+ return true;
+ }
+ default:
+ Diag(RetExpr->getExprLoc(), diag::err_auto_fn_deduction_failure)
+ << OrigResultType.getType() << RetExpr->getType();
+ FailedTSC.NoteCandidates(*this, RetExprLoc);
return true;
}
- } else if (!FD->isInvalidDecl()) {
+ }
+
+ // If a local type is part of the returned type, mark its fields as
+ // referenced.
+ LocalTypedefNameReferencer(*this).TraverseType(RetExpr->getType());
+
+ // CUDA: Kernel function must have 'void' return type.
+ if (getLangOpts().CUDA && FD->hasAttr<CUDAGlobalAttr>() &&
+ !Deduced->isVoidType()) {
+ Diag(FD->getLocation(), diag::err_kern_type_not_void_return)
+ << FD->getType() << FD->getSourceRange();
+ return true;
+ }
+
+ if (!FD->isInvalidDecl() && AT->getDeducedType() != Deduced)
// Update all declarations of the function to have the deduced return type.
Context.adjustDeducedFunctionResultType(FD, Deduced);
- }
return false;
}
@@ -3849,17 +3912,15 @@ Sema::ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
RetValExp, nullptr, /*RecoverUncorrectedTypos=*/true);
if (RetVal.isInvalid())
return StmtError();
- StmtResult R = BuildReturnStmt(ReturnLoc, RetVal.get());
- if (R.isInvalid() || ExprEvalContexts.back().Context ==
- ExpressionEvaluationContext::DiscardedStatement)
+ StmtResult R =
+ BuildReturnStmt(ReturnLoc, RetVal.get(), /*AllowRecovery=*/true);
+ if (R.isInvalid() || ExprEvalContexts.back().isDiscardedStatementContext())
return R;
- if (VarDecl *VD =
- const_cast<VarDecl*>(cast<ReturnStmt>(R.get())->getNRVOCandidate())) {
- CurScope->addNRVOCandidate(VD);
- } else {
- CurScope->setNoNRVO();
- }
+ VarDecl *VD =
+ const_cast<VarDecl *>(cast<ReturnStmt>(R.get())->getNRVOCandidate());
+
+ CurScope->updateNRVOCandidate(VD);
CheckJumpOutOfSEHFinally(*this, ReturnLoc, *CurScope->getFnParent());
@@ -3868,7 +3929,7 @@ Sema::ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
static bool CheckSimplerImplicitMovesMSVCWorkaround(const Sema &S,
const Expr *E) {
- if (!E || !S.getLangOpts().CPlusPlus2b || !S.getLangOpts().MSVCCompat)
+ if (!E || !S.getLangOpts().CPlusPlus23 || !S.getLangOpts().MSVCCompat)
return false;
const Decl *D = E->getReferencedDeclOfCallee();
if (!D || !S.SourceMgr.isInSystemHeader(D->getLocation()))
@@ -3880,12 +3941,13 @@ static bool CheckSimplerImplicitMovesMSVCWorkaround(const Sema &S,
return false;
}
-StmtResult Sema::BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
+StmtResult Sema::BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
+ bool AllowRecovery) {
// Check for unexpanded parameter packs.
if (RetValExp && DiagnoseUnexpandedParameterPack(RetValExp))
return StmtError();
- // HACK: We supress simpler implicit move here in msvc compatibility mode
+ // HACK: We suppress simpler implicit move here in msvc compatibility mode
// just as a temporary work around, as the MSVC STL has issues with
// this change.
bool SupressSimplerImplicitMoves =
@@ -3934,10 +3996,17 @@ StmtResult Sema::BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
} else // If we don't have a function/method context, bail.
return StmtError();
+ if (RetValExp) {
+ const auto *ATy = dyn_cast<ArrayType>(RetValExp->getType());
+ if (ATy && ATy->getElementType().isWebAssemblyReferenceType()) {
+ Diag(ReturnLoc, diag::err_wasm_table_art) << 1;
+ return StmtError();
+ }
+ }
+
// C++1z: discarded return statements are not considered when deducing a
// return type.
- if (ExprEvalContexts.back().Context ==
- ExpressionEvaluationContext::DiscardedStatement &&
+ if (ExprEvalContexts.back().isDiscardedStatementContext() &&
FnRetType->getContainedAutoType()) {
if (RetValExp) {
ExprResult ER =
@@ -3958,11 +4027,25 @@ StmtResult Sema::BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
// If we've already decided this function is invalid, e.g. because
// we saw a `return` whose expression had an error, don't keep
// trying to deduce its return type.
- if (FD->isInvalidDecl())
- return StmtError();
- if (DeduceFunctionTypeFromReturnExpr(FD, ReturnLoc, RetValExp, AT)) {
+ // (Some return values may be needlessly wrapped in RecoveryExpr).
+ if (FD->isInvalidDecl() ||
+ DeduceFunctionTypeFromReturnExpr(FD, ReturnLoc, RetValExp, AT)) {
FD->setInvalidDecl();
- return StmtError();
+ if (!AllowRecovery)
+ return StmtError();
+ // The deduction failure is diagnosed and marked, try to recover.
+ if (RetValExp) {
+ // Wrap return value with a recovery expression of the previous type.
+ // If no deduction yet, use DependentTy.
+ auto Recovery = CreateRecoveryExpr(
+ RetValExp->getBeginLoc(), RetValExp->getEndLoc(), RetValExp,
+ AT->isDeduced() ? FnRetType : QualType());
+ if (Recovery.isInvalid())
+ return StmtError();
+ RetValExp = Recovery.get();
+ } else {
+ // Nothing to do: a ReturnStmt with no value is fine recovery.
+ }
} else {
FnRetType = FD->getReturnType();
}
@@ -3975,7 +4058,7 @@ StmtResult Sema::BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
ReturnStmt *Result = nullptr;
if (FnRetType->isVoidType()) {
if (RetValExp) {
- if (isa<InitListExpr>(RetValExp)) {
+ if (auto *ILE = dyn_cast<InitListExpr>(RetValExp)) {
// We simply never allow init lists as the return value of void
// functions. This is compatible because this was never allowed before,
// so there's no legacy code to deal with.
@@ -3991,8 +4074,12 @@ StmtResult Sema::BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
Diag(ReturnLoc, diag::err_return_init_list)
<< CurDecl << FunctionKind << RetValExp->getSourceRange();
- // Drop the expression.
- RetValExp = nullptr;
+ // Preserve the initializers in the AST.
+ RetValExp = AllowRecovery
+ ? CreateRecoveryExpr(ILE->getLBraceLoc(),
+ ILE->getRBraceLoc(), ILE->inits())
+ .get()
+ : nullptr;
} else if (!RetValExp->isTypeDependent()) {
// C99 6.8.6.4p1 (ext_ since GCC warns)
unsigned D = diag::ext_return_has_expr;
@@ -4051,7 +4138,9 @@ StmtResult Sema::BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
} else if (!RetValExp && !HasDependentReturnType) {
FunctionDecl *FD = getCurFunctionDecl();
- if (getLangOpts().CPlusPlus11 && FD && FD->isConstexpr()) {
+ if ((FD && FD->isInvalidDecl()) || FnRetType->containsErrors()) {
+ // The intended return type might have been "void", so don't warn.
+ } else if (getLangOpts().CPlusPlus11 && FD && FD->isConstexpr()) {
// C++11 [stmt.return]p2
Diag(ReturnLoc, diag::err_constexpr_return_missing_expr)
<< FD << FD->isConsteval();
@@ -4089,6 +4178,9 @@ StmtResult Sema::BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
InitializedEntity::InitializeResult(ReturnLoc, RetType);
ExprResult Res = PerformMoveOrCopyInitialization(
Entity, NRInfo, RetValExp, SupressSimplerImplicitMoves);
+ if (Res.isInvalid() && AllowRecovery)
+ Res = CreateRecoveryExpr(RetValExp->getBeginLoc(),
+ RetValExp->getEndLoc(), RetValExp, RetType);
if (Res.isInvalid()) {
// FIXME: Clean up temporaries here anyway?
return StmtError();
@@ -4157,7 +4249,14 @@ Sema::ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
if (!getLangOpts().ObjCExceptions)
Diag(AtLoc, diag::err_objc_exceptions_disabled) << "@try";
- setFunctionHasBranchProtectedScope();
+ // Objective-C try is incompatible with SEH __try.
+ sema::FunctionScopeInfo *FSI = getCurFunction();
+ if (FSI->FirstSEHTryLoc.isValid()) {
+ Diag(AtLoc, diag::err_mixing_cxx_try_seh_try) << 1;
+ Diag(FSI->FirstSEHTryLoc, diag::note_conflicting_try_here) << "'__try'";
+ }
+
+ FSI->setHasObjCTry(AtLoc);
unsigned NumCatchStmts = CatchStmts.size();
return ObjCAtTryStmt::Create(Context, AtLoc, Try, CatchStmts.data(),
NumCatchStmts, Finally);
@@ -4287,9 +4386,9 @@ public:
if (QT->isPointerType())
IsPointer = true;
+ QT = QT.getUnqualifiedType();
if (IsPointer || QT->isReferenceType())
QT = QT->getPointeeType();
- QT = QT.getUnqualifiedType();
}
/// Used when creating a CatchHandlerType from a base class type; pretends the
@@ -4337,32 +4436,42 @@ template <> struct DenseMapInfo<CatchHandlerType> {
namespace {
class CatchTypePublicBases {
- ASTContext &Ctx;
- const llvm::DenseMap<CatchHandlerType, CXXCatchStmt *> &TypesToCheck;
- const bool CheckAgainstPointer;
+ const llvm::DenseMap<QualType, CXXCatchStmt *> &TypesToCheck;
CXXCatchStmt *FoundHandler;
- CanQualType FoundHandlerType;
+ QualType FoundHandlerType;
+ QualType TestAgainstType;
public:
- CatchTypePublicBases(
- ASTContext &Ctx,
- const llvm::DenseMap<CatchHandlerType, CXXCatchStmt *> &T, bool C)
- : Ctx(Ctx), TypesToCheck(T), CheckAgainstPointer(C),
- FoundHandler(nullptr) {}
+ CatchTypePublicBases(const llvm::DenseMap<QualType, CXXCatchStmt *> &T,
+ QualType QT)
+ : TypesToCheck(T), FoundHandler(nullptr), TestAgainstType(QT) {}
CXXCatchStmt *getFoundHandler() const { return FoundHandler; }
- CanQualType getFoundHandlerType() const { return FoundHandlerType; }
+ QualType getFoundHandlerType() const { return FoundHandlerType; }
bool operator()(const CXXBaseSpecifier *S, CXXBasePath &) {
if (S->getAccessSpecifier() == AccessSpecifier::AS_public) {
- CatchHandlerType Check(S->getType(), CheckAgainstPointer);
+ QualType Check = S->getType().getCanonicalType();
const auto &M = TypesToCheck;
auto I = M.find(Check);
if (I != M.end()) {
- FoundHandler = I->second;
- FoundHandlerType = Ctx.getCanonicalType(S->getType());
- return true;
+ // We're pretty sure we found what we need to find. However, we still
+ // need to make sure that we properly compare for pointers and
+ // references, to handle cases like:
+ //
+ // } catch (Base *b) {
+ // } catch (Derived &d) {
+ // }
+ //
+ // where there is a qualification mismatch that disqualifies this
+ // handler as a potential problem.
+ if (I->second->getCaughtType()->isPointerType() ==
+ TestAgainstType->isPointerType()) {
+ FoundHandler = I->second;
+ FoundHandlerType = Check;
+ return true;
+ }
}
}
return false;
@@ -4374,13 +4483,22 @@ public:
/// handlers and creates a try statement from them.
StmtResult Sema::ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers) {
- // Don't report an error if 'try' is used in system headers.
- if (!getLangOpts().CXXExceptions &&
+ const llvm::Triple &T = Context.getTargetInfo().getTriple();
+ const bool IsOpenMPGPUTarget =
+ getLangOpts().OpenMPIsTargetDevice && (T.isNVPTX() || T.isAMDGCN());
+ // Don't report an error if 'try' is used in system headers or in an OpenMP
+ // target region compiled for a GPU architecture.
+ if (!IsOpenMPGPUTarget && !getLangOpts().CXXExceptions &&
!getSourceManager().isInSystemHeader(TryLoc) && !getLangOpts().CUDA) {
// Delay error emission for the OpenMP device code.
targetDiag(TryLoc, diag::err_exceptions_disabled) << "try";
}
+ // In OpenMP target regions, we assume that catch is never reached on GPU
+ // targets.
+ if (IsOpenMPGPUTarget)
+ targetDiag(TryLoc, diag::warn_try_not_valid_on_target) << T.str();
+
// Exceptions aren't allowed in CUDA device code.
if (getLangOpts().CUDA)
CUDADiagIfDeviceCode(TryLoc, diag::err_cuda_device_exceptions)
@@ -4393,7 +4511,7 @@ StmtResult Sema::ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
// C++ try is incompatible with SEH __try.
if (!getLangOpts().Borland && FSI->FirstSEHTryLoc.isValid()) {
- Diag(TryLoc, diag::err_mixing_cxx_try_seh_try);
+ Diag(TryLoc, diag::err_mixing_cxx_try_seh_try) << 0;
Diag(FSI->FirstSEHTryLoc, diag::note_conflicting_try_here) << "'__try'";
}
@@ -4401,6 +4519,7 @@ StmtResult Sema::ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
assert(!Handlers.empty() &&
"The parser shouldn't call this if there are no handlers.");
+ llvm::DenseMap<QualType, CXXCatchStmt *> HandledBaseTypes;
llvm::DenseMap<CatchHandlerType, CXXCatchStmt *> HandledTypes;
for (unsigned i = 0; i < NumHandlers; ++i) {
CXXCatchStmt *H = cast<CXXCatchStmt>(Handlers[i]);
@@ -4418,8 +4537,7 @@ StmtResult Sema::ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
// Walk the type hierarchy to diagnose when this type has already been
// handled (duplication), or cannot be handled (derivation inversion). We
// ignore top-level cv-qualifiers, per [except.handle]p3
- CatchHandlerType HandlerCHT =
- (QualType)Context.getCanonicalType(H->getCaughtType());
+ CatchHandlerType HandlerCHT = H->getCaughtType().getCanonicalType();
// We can ignore whether the type is a reference or a pointer; we need the
// underlying declaration type in order to get at the underlying record
@@ -4435,10 +4553,12 @@ StmtResult Sema::ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
// as the original type.
CXXBasePaths Paths;
Paths.setOrigin(RD);
- CatchTypePublicBases CTPB(Context, HandledTypes, HandlerCHT.isPointer());
+ CatchTypePublicBases CTPB(HandledBaseTypes,
+ H->getCaughtType().getCanonicalType());
if (RD->lookupInBases(CTPB, Paths)) {
const CXXCatchStmt *Problem = CTPB.getFoundHandler();
- if (!Paths.isAmbiguous(CTPB.getFoundHandlerType())) {
+ if (!Paths.isAmbiguous(
+ CanQualType::CreateUnsafe(CTPB.getFoundHandlerType()))) {
Diag(H->getExceptionDecl()->getTypeSpecStartLoc(),
diag::warn_exception_caught_by_earlier_handler)
<< H->getCaughtType();
@@ -4447,11 +4567,16 @@ StmtResult Sema::ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
<< Problem->getCaughtType();
}
}
+ // Strip the qualifiers here because we're going to be comparing this
+ // type to the base type specifiers of a class, which are ignored in a
+ // base specifier per [class.derived.general]p2.
+ HandledBaseTypes[Underlying.getUnqualifiedType()] = H;
}
// Add the type the list of ones we have handled; diagnose if we've already
// handled it.
- auto R = HandledTypes.insert(std::make_pair(H->getCaughtType(), H));
+ auto R = HandledTypes.insert(
+ std::make_pair(H->getCaughtType().getCanonicalType(), H));
if (!R.second) {
const CXXCatchStmt *Problem = R.first->second;
Diag(H->getExceptionDecl()->getTypeSpecStartLoc(),
@@ -4465,7 +4590,8 @@ StmtResult Sema::ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
FSI->setHasCXXTry(TryLoc);
- return CXXTryStmt::Create(Context, TryLoc, TryBlock, Handlers);
+ return CXXTryStmt::Create(Context, TryLoc, cast<CompoundStmt>(TryBlock),
+ Handlers);
}
StmtResult Sema::ActOnSEHTryBlock(bool IsCXXTry, SourceLocation TryLoc,
@@ -4477,9 +4603,12 @@ StmtResult Sema::ActOnSEHTryBlock(bool IsCXXTry, SourceLocation TryLoc,
// SEH __try is incompatible with C++ try. Borland appears to support this,
// however.
if (!getLangOpts().Borland) {
- if (FSI->FirstCXXTryLoc.isValid()) {
- Diag(TryLoc, diag::err_mixing_cxx_try_seh_try);
- Diag(FSI->FirstCXXTryLoc, diag::note_conflicting_try_here) << "'try'";
+ if (FSI->FirstCXXOrObjCTryLoc.isValid()) {
+ Diag(TryLoc, diag::err_mixing_cxx_try_seh_try) << FSI->FirstTryType;
+ Diag(FSI->FirstCXXOrObjCTryLoc, diag::note_conflicting_try_here)
+ << (FSI->FirstTryType == sema::FunctionScopeInfo::TryLocIsCXX
+ ? "'try'"
+ : "'@try'");
}
}
@@ -4573,10 +4702,11 @@ Sema::CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc,
RecordDecl *RD = nullptr;
if (getLangOpts().CPlusPlus)
- RD = CXXRecordDecl::Create(Context, TTK_Struct, DC, Loc, Loc,
+ RD = CXXRecordDecl::Create(Context, TagTypeKind::Struct, DC, Loc, Loc,
/*Id=*/nullptr);
else
- RD = RecordDecl::Create(Context, TTK_Struct, DC, Loc, Loc, /*Id=*/nullptr);
+ RD = RecordDecl::Create(Context, TagTypeKind::Struct, DC, Loc, Loc,
+ /*Id=*/nullptr);
RD->setCapturedRecord();
DC->addDecl(RD);
@@ -4620,11 +4750,11 @@ buildCapturedStmtCaptureList(Sema &S, CapturedRegionScopeInfo *RSI,
if (S.getLangOpts().OpenMP && RSI->CapRegionKind == CR_OpenMP)
S.setOpenMPCaptureKind(Field, Cap.getVariable(), RSI->OpenMPLevel);
- Captures.push_back(CapturedStmt::Capture(Cap.getLocation(),
- Cap.isReferenceCapture()
- ? CapturedStmt::VCK_ByRef
- : CapturedStmt::VCK_ByCopy,
- Cap.getVariable()));
+ Captures.push_back(CapturedStmt::Capture(
+ Cap.getLocation(),
+ Cap.isReferenceCapture() ? CapturedStmt::VCK_ByRef
+ : CapturedStmt::VCK_ByCopy,
+ cast<VarDecl>(Cap.getVariable())));
}
CaptureInits.push_back(Init.get());
}
@@ -4643,7 +4773,7 @@ void Sema::ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
QualType ParamType = Context.getPointerType(Context.getTagDeclType(RD));
auto *Param =
ImplicitParamDecl::Create(Context, DC, Loc, ParamName, ParamType,
- ImplicitParamDecl::CapturedContext);
+ ImplicitParamKind::CapturedContext);
DC->addDecl(Param);
CD->setContextParam(0, Param);
@@ -4658,6 +4788,7 @@ void Sema::ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
PushExpressionEvaluationContext(
ExpressionEvaluationContext::PotentiallyEvaluated);
+ ExprEvalContexts.back().InImmediateEscalatingFunctionContext = false;
}
void Sema::ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
@@ -4683,7 +4814,7 @@ void Sema::ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
.withRestrict();
auto *Param =
ImplicitParamDecl::Create(Context, DC, Loc, ParamName, ParamType,
- ImplicitParamDecl::CapturedContext);
+ ImplicitParamKind::CapturedContext);
DC->addDecl(Param);
CD->setContextParam(ParamNum, Param);
ContextIsFound = true;
@@ -4691,7 +4822,7 @@ void Sema::ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
IdentifierInfo *ParamName = &Context.Idents.get(I->first);
auto *Param =
ImplicitParamDecl::Create(Context, DC, Loc, ParamName, I->second,
- ImplicitParamDecl::CapturedContext);
+ ImplicitParamKind::CapturedContext);
DC->addDecl(Param);
CD->setParam(ParamNum, Param);
}
@@ -4703,7 +4834,7 @@ void Sema::ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
QualType ParamType = Context.getPointerType(Context.getTagDeclType(RD));
auto *Param =
ImplicitParamDecl::Create(Context, DC, Loc, ParamName, ParamType,
- ImplicitParamDecl::CapturedContext);
+ ImplicitParamKind::CapturedContext);
DC->addDecl(Param);
CD->setContextParam(ParamNum, Param);
}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaStmtAsm.cpp b/contrib/llvm-project/clang/lib/Sema/SemaStmtAsm.cpp
index 243d0b921cd7..83351b703c15 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaStmtAsm.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaStmtAsm.cpp
@@ -22,8 +22,10 @@
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/SemaInternal.h"
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/MC/MCParser/MCAsmParser.h"
+#include <optional>
using namespace clang;
using namespace sema;
@@ -254,7 +256,7 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
// The parser verifies that there is a string literal here.
- assert(AsmString->isAscii());
+ assert(AsmString->isOrdinary());
FunctionDecl *FD = dyn_cast<FunctionDecl>(getCurLexicalContext());
llvm::StringMap<bool> FeatureMap;
@@ -262,14 +264,15 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
for (unsigned i = 0; i != NumOutputs; i++) {
StringLiteral *Literal = Constraints[i];
- assert(Literal->isAscii());
+ assert(Literal->isOrdinary());
StringRef OutputName;
if (Names[i])
OutputName = Names[i]->getName();
TargetInfo::ConstraintInfo Info(Literal->getString(), OutputName);
- if (!Context.getTargetInfo().validateOutputConstraint(Info)) {
+ if (!Context.getTargetInfo().validateOutputConstraint(Info) &&
+ !(LangOpts.HIPStdPar && LangOpts.CUDAIsDevice)) {
targetDiag(Literal->getBeginLoc(),
diag::err_asm_invalid_output_constraint)
<< Info.getConstraintStr();
@@ -296,9 +299,9 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
checkExprMemoryConstraintCompat(*this, OutputExpr, Info, false))
return StmtError();
- // Disallow _ExtInt, since the backends tend to have difficulties with
- // non-normal sizes.
- if (OutputExpr->getType()->isExtIntType())
+ // Disallow bit-precise integer types, since the backends tend to have
+ // difficulties with abnormal sizes.
+ if (OutputExpr->getType()->isBitIntType())
return StmtError(
Diag(OutputExpr->getBeginLoc(), diag::err_asm_invalid_type)
<< OutputExpr->getType() << 0 /*Input*/
@@ -330,7 +333,7 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
if (RequireCompleteType(OutputExpr->getBeginLoc(), Exprs[i]->getType(),
diag::err_dereference_incomplete_type))
return StmtError();
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
default:
return StmtError(Diag(OutputExpr->getBeginLoc(),
diag::err_asm_invalid_lvalue_in_output)
@@ -353,7 +356,7 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
for (unsigned i = NumOutputs, e = NumOutputs + NumInputs; i != e; i++) {
StringLiteral *Literal = Constraints[i];
- assert(Literal->isAscii());
+ assert(Literal->isOrdinary());
StringRef InputName;
if (Names[i])
@@ -377,6 +380,11 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
Expr *InputExpr = Exprs[i];
+ if (InputExpr->getType()->isMemberPointerType())
+ return StmtError(Diag(InputExpr->getBeginLoc(),
+ diag::err_asm_pmf_through_constraint_not_permitted)
+ << InputExpr->getSourceRange());
+
// Referring to parameters is not allowed in naked functions.
if (CheckNakedParmReference(InputExpr, *this))
return StmtError();
@@ -393,30 +401,31 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
diag::err_asm_invalid_lvalue_in_input)
<< Info.getConstraintStr()
<< InputExpr->getSourceRange());
- } else if (Info.requiresImmediateConstant() && !Info.allowsRegister()) {
- if (!InputExpr->isValueDependent()) {
- Expr::EvalResult EVResult;
- if (InputExpr->EvaluateAsRValue(EVResult, Context, true)) {
- // For compatibility with GCC, we also allow pointers that would be
- // integral constant expressions if they were cast to int.
- llvm::APSInt IntResult;
- if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(),
- Context))
- if (!Info.isValidAsmImmediate(IntResult))
- return StmtError(Diag(InputExpr->getBeginLoc(),
- diag::err_invalid_asm_value_for_constraint)
- << toString(IntResult, 10)
- << Info.getConstraintStr()
- << InputExpr->getSourceRange());
- }
- }
-
} else {
ExprResult Result = DefaultFunctionArrayLvalueConversion(Exprs[i]);
if (Result.isInvalid())
return StmtError();
- Exprs[i] = Result.get();
+ InputExpr = Exprs[i] = Result.get();
+
+ if (Info.requiresImmediateConstant() && !Info.allowsRegister()) {
+ if (!InputExpr->isValueDependent()) {
+ Expr::EvalResult EVResult;
+ if (InputExpr->EvaluateAsRValue(EVResult, Context, true)) {
+ // For compatibility with GCC, we also allow pointers that would be
+ // integral constant expressions if they were cast to int.
+ llvm::APSInt IntResult;
+ if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(),
+ Context))
+ if (!Info.isValidAsmImmediate(IntResult))
+ return StmtError(
+ Diag(InputExpr->getBeginLoc(),
+ diag::err_invalid_asm_value_for_constraint)
+ << toString(IntResult, 10) << Info.getConstraintStr()
+ << InputExpr->getSourceRange());
+ }
+ }
+ }
}
if (Info.allowsRegister()) {
@@ -428,7 +437,7 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
}
}
- if (InputExpr->getType()->isExtIntType())
+ if (InputExpr->getType()->isBitIntType())
return StmtError(
Diag(InputExpr->getBeginLoc(), diag::err_asm_invalid_type)
<< InputExpr->getType() << 1 /*Output*/
@@ -453,12 +462,12 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
<< Info.getConstraintStr();
}
- Optional<SourceLocation> UnwindClobberLoc;
+ std::optional<SourceLocation> UnwindClobberLoc;
// Check that the clobbers are valid.
for (unsigned i = 0; i != NumClobbers; i++) {
StringLiteral *Literal = Clobbers[i];
- assert(Literal->isAscii());
+ assert(Literal->isOrdinary());
StringRef Clobber = Literal->getString();
@@ -666,8 +675,17 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
// output was a register, just extend the shorter one to the size of the
// larger one.
if (!SmallerValueMentioned && InputDomain != AD_Other &&
- OutputConstraintInfos[TiedTo].allowsRegister())
+ OutputConstraintInfos[TiedTo].allowsRegister()) {
+ // FIXME: GCC supports the OutSize to be 128 at maximum. Currently codegen
+ // crash when the size larger than the register size. So we limit it here.
+ if (OutTy->isStructureType() &&
+ Context.getIntTypeForBitwidth(OutSize, /*Signed*/ false).isNull()) {
+ targetDiag(OutputExpr->getExprLoc(), diag::err_store_value_to_reg);
+ return NS;
+ }
+
continue;
+ }
// Either both of the operands were mentioned or the smaller one was
// mentioned. One more special case that we'll allow: if the tied input is
@@ -706,10 +724,7 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
NamedOperandList.emplace_back(
std::make_pair(Names[i]->getName(), Exprs[i]));
// Sort NamedOperandList.
- std::stable_sort(NamedOperandList.begin(), NamedOperandList.end(),
- [](const NamedOperand &LHS, const NamedOperand &RHS) {
- return LHS.first < RHS.first;
- });
+ llvm::stable_sort(NamedOperandList, llvm::less_first());
// Find adjacent duplicate operand.
SmallVector<NamedOperand, 4>::iterator Found =
std::adjacent_find(begin(NamedOperandList), end(NamedOperandList),
@@ -726,6 +741,9 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
}
if (NS->isAsmGoto())
setFunctionHasBranchIntoScope();
+
+ CleanupVarDeclMarking();
+ DiscardCleanupsInEvaluationContext();
return NS;
}
@@ -922,13 +940,24 @@ StmtResult Sema::ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
bool IsSimple = (NumOutputs != 0 || NumInputs != 0);
setFunctionHasBranchProtectedScope();
+ bool InvalidOperand = false;
for (uint64_t I = 0; I < NumOutputs + NumInputs; ++I) {
- if (Exprs[I]->getType()->isExtIntType())
- return StmtError(
- Diag(Exprs[I]->getBeginLoc(), diag::err_asm_invalid_type)
- << Exprs[I]->getType() << (I < NumOutputs)
- << Exprs[I]->getSourceRange());
+ Expr *E = Exprs[I];
+ if (E->getType()->isBitIntType()) {
+ InvalidOperand = true;
+ Diag(E->getBeginLoc(), diag::err_asm_invalid_type)
+ << E->getType() << (I < NumOutputs)
+ << E->getSourceRange();
+ } else if (E->refersToBitField()) {
+ InvalidOperand = true;
+ FieldDecl *BitField = E->getSourceBitField();
+ Diag(E->getBeginLoc(), diag::err_ms_asm_bitfield_unsupported)
+ << E->getSourceRange();
+ Diag(BitField->getLocation(), diag::note_bitfield_decl);
+ }
}
+ if (InvalidOperand)
+ return StmtError();
MSAsmStmt *NS =
new (Context) MSAsmStmt(Context, AsmLoc, LBraceLoc, IsSimple,
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaStmtAttr.cpp b/contrib/llvm-project/clang/lib/Sema/SemaStmtAttr.cpp
index 4f2977f89ce1..e6a4d3e63e4a 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaStmtAttr.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaStmtAttr.cpp
@@ -19,6 +19,7 @@
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/SemaInternal.h"
#include "llvm/ADT/StringExtras.h"
+#include <optional>
using namespace clang;
using namespace sema;
@@ -52,6 +53,13 @@ static Attr *handleFallThroughAttr(Sema &S, Stmt *St, const ParsedAttr &A,
static Attr *handleSuppressAttr(Sema &S, Stmt *St, const ParsedAttr &A,
SourceRange Range) {
+ if (A.getAttributeSpellingListIndex() == SuppressAttr::CXX11_gsl_suppress &&
+ A.getNumArgs() < 1) {
+ // Suppression attribute with GSL spelling requires at least 1 argument.
+ S.Diag(A.getLoc(), diag::err_attribute_too_few_arguments) << A << 1;
+ return nullptr;
+ }
+
std::vector<StringRef> DiagnosticIdentifiers;
for (unsigned I = 0, E = A.getNumArgs(); I != E; ++I) {
StringRef RuleName;
@@ -59,8 +67,6 @@ static Attr *handleSuppressAttr(Sema &S, Stmt *St, const ParsedAttr &A,
if (!S.checkStringLiteralArgumentAttr(A, I, RuleName, nullptr))
return nullptr;
- // FIXME: Warn if the rule name is unknown. This is tricky because only
- // clang-tidy knows about available rules.
DiagnosticIdentifiers.push_back(RuleName);
}
@@ -175,17 +181,22 @@ static Attr *handleLoopHintAttr(Sema &S, Stmt *St, const ParsedAttr &A,
namespace {
class CallExprFinder : public ConstEvaluatedExprVisitor<CallExprFinder> {
- bool FoundCallExpr = false;
+ bool FoundAsmStmt = false;
+ std::vector<const CallExpr *> CallExprs;
public:
typedef ConstEvaluatedExprVisitor<CallExprFinder> Inherited;
CallExprFinder(Sema &S, const Stmt *St) : Inherited(S.Context) { Visit(St); }
- bool foundCallExpr() { return FoundCallExpr; }
+ bool foundCallExpr() { return !CallExprs.empty(); }
+ const std::vector<const CallExpr *> &getCallExprs() { return CallExprs; }
+
+ bool foundAsmStmt() { return FoundAsmStmt; }
+
+ void VisitCallExpr(const CallExpr *E) { CallExprs.push_back(E); }
- void VisitCallExpr(const CallExpr *E) { FoundCallExpr = true; }
- void VisitAsmStmt(const AsmStmt *S) { FoundCallExpr = true; }
+ void VisitAsmStmt(const AsmStmt *S) { FoundAsmStmt = true; }
void Visit(const Stmt *St) {
if (!St)
@@ -200,15 +211,98 @@ static Attr *handleNoMergeAttr(Sema &S, Stmt *St, const ParsedAttr &A,
NoMergeAttr NMA(S.Context, A);
CallExprFinder CEF(S, St);
- if (!CEF.foundCallExpr()) {
- S.Diag(St->getBeginLoc(), diag::warn_nomerge_attribute_ignored_in_stmt)
- << NMA.getSpelling();
+ if (!CEF.foundCallExpr() && !CEF.foundAsmStmt()) {
+ S.Diag(St->getBeginLoc(), diag::warn_attribute_ignored_no_calls_in_stmt)
+ << A;
return nullptr;
}
return ::new (S.Context) NoMergeAttr(S.Context, A);
}
+template <typename OtherAttr, int DiagIdx>
+static bool CheckStmtInlineAttr(Sema &SemaRef, const Stmt *OrigSt,
+ const Stmt *CurSt,
+ const AttributeCommonInfo &A) {
+ CallExprFinder OrigCEF(SemaRef, OrigSt);
+ CallExprFinder CEF(SemaRef, CurSt);
+
+ // If the call expressions lists are equal in size, we can skip
+ // previously emitted diagnostics. However, if the statement has a pack
+ // expansion, we have no way of telling which CallExpr is the instantiated
+ // version of the other. In this case, we will end up re-diagnosing in the
+ // instantiation.
+ // ie: [[clang::always_inline]] non_dependent(), (other_call<Pack>()...)
+ // will diagnose nondependent again.
+ bool CanSuppressDiag =
+ OrigSt && CEF.getCallExprs().size() == OrigCEF.getCallExprs().size();
+
+ if (!CEF.foundCallExpr()) {
+ return SemaRef.Diag(CurSt->getBeginLoc(),
+ diag::warn_attribute_ignored_no_calls_in_stmt)
+ << A;
+ }
+
+ for (const auto &Tup :
+ llvm::zip_longest(OrigCEF.getCallExprs(), CEF.getCallExprs())) {
+ // If the original call expression already had a callee, we already
+ // diagnosed this, so skip it here. We can't skip if there isn't a 1:1
+ // relationship between the two lists of call expressions.
+ if (!CanSuppressDiag || !(*std::get<0>(Tup))->getCalleeDecl()) {
+ const Decl *Callee = (*std::get<1>(Tup))->getCalleeDecl();
+ if (Callee &&
+ (Callee->hasAttr<OtherAttr>() || Callee->hasAttr<FlattenAttr>())) {
+ SemaRef.Diag(CurSt->getBeginLoc(),
+ diag::warn_function_stmt_attribute_precedence)
+ << A << (Callee->hasAttr<OtherAttr>() ? DiagIdx : 1);
+ SemaRef.Diag(Callee->getBeginLoc(), diag::note_conflicting_attribute);
+ }
+ }
+ }
+
+ return false;
+}
+
+bool Sema::CheckNoInlineAttr(const Stmt *OrigSt, const Stmt *CurSt,
+ const AttributeCommonInfo &A) {
+ return CheckStmtInlineAttr<AlwaysInlineAttr, 0>(*this, OrigSt, CurSt, A);
+}
+
+bool Sema::CheckAlwaysInlineAttr(const Stmt *OrigSt, const Stmt *CurSt,
+ const AttributeCommonInfo &A) {
+ return CheckStmtInlineAttr<NoInlineAttr, 2>(*this, OrigSt, CurSt, A);
+}
+
+static Attr *handleNoInlineAttr(Sema &S, Stmt *St, const ParsedAttr &A,
+ SourceRange Range) {
+ NoInlineAttr NIA(S.Context, A);
+ if (!NIA.isClangNoInline()) {
+ S.Diag(St->getBeginLoc(), diag::warn_function_attribute_ignored_in_stmt)
+ << "[[clang::noinline]]";
+ return nullptr;
+ }
+
+ if (S.CheckNoInlineAttr(/*OrigSt=*/nullptr, St, A))
+ return nullptr;
+
+ return ::new (S.Context) NoInlineAttr(S.Context, A);
+}
+
+static Attr *handleAlwaysInlineAttr(Sema &S, Stmt *St, const ParsedAttr &A,
+ SourceRange Range) {
+ AlwaysInlineAttr AIA(S.Context, A);
+ if (!AIA.isClangAlwaysInline()) {
+ S.Diag(St->getBeginLoc(), diag::warn_function_attribute_ignored_in_stmt)
+ << "[[clang::always_inline]]";
+ return nullptr;
+ }
+
+ if (S.CheckAlwaysInlineAttr(/*OrigSt=*/nullptr, St, A))
+ return nullptr;
+
+ return ::new (S.Context) AlwaysInlineAttr(S.Context, A);
+}
+
static Attr *handleMustTailAttr(Sema &S, Stmt *St, const ParsedAttr &A,
SourceRange Range) {
// Validation is in Sema::ActOnAttributedStmt().
@@ -233,6 +327,90 @@ static Attr *handleUnlikely(Sema &S, Stmt *St, const ParsedAttr &A,
return ::new (S.Context) UnlikelyAttr(S.Context, A);
}
+CodeAlignAttr *Sema::BuildCodeAlignAttr(const AttributeCommonInfo &CI,
+ Expr *E) {
+ if (!E->isValueDependent()) {
+ llvm::APSInt ArgVal;
+ ExprResult Res = VerifyIntegerConstantExpression(E, &ArgVal);
+ if (Res.isInvalid())
+ return nullptr;
+ E = Res.get();
+
+ // This attribute requires an integer argument which is a constant power of
+ // two between 1 and 4096 inclusive.
+ if (ArgVal < CodeAlignAttr::MinimumAlignment ||
+ ArgVal > CodeAlignAttr::MaximumAlignment || !ArgVal.isPowerOf2()) {
+ if (std::optional<int64_t> Value = ArgVal.trySExtValue())
+ Diag(CI.getLoc(), diag::err_attribute_power_of_two_in_range)
+ << CI << CodeAlignAttr::MinimumAlignment
+ << CodeAlignAttr::MaximumAlignment << Value.value();
+ else
+ Diag(CI.getLoc(), diag::err_attribute_power_of_two_in_range)
+ << CI << CodeAlignAttr::MinimumAlignment
+ << CodeAlignAttr::MaximumAlignment << E;
+ return nullptr;
+ }
+ }
+ return new (Context) CodeAlignAttr(Context, CI, E);
+}
+
+static Attr *handleCodeAlignAttr(Sema &S, Stmt *St, const ParsedAttr &A) {
+
+ Expr *E = A.getArgAsExpr(0);
+ return S.BuildCodeAlignAttr(A, E);
+}
+
+// Diagnose non-identical duplicates as a 'conflicting' loop attributes
+// and suppress duplicate errors in cases where the two match.
+template <typename LoopAttrT>
+static void CheckForDuplicateLoopAttrs(Sema &S, ArrayRef<const Attr *> Attrs) {
+ auto FindFunc = [](const Attr *A) { return isa<const LoopAttrT>(A); };
+ const auto *FirstItr = std::find_if(Attrs.begin(), Attrs.end(), FindFunc);
+
+ if (FirstItr == Attrs.end()) // no attributes found
+ return;
+
+ const auto *LastFoundItr = FirstItr;
+ std::optional<llvm::APSInt> FirstValue;
+
+ const auto *CAFA =
+ dyn_cast<ConstantExpr>(cast<LoopAttrT>(*FirstItr)->getAlignment());
+ // Return early if first alignment expression is dependent (since we don't
+ // know what the effective size will be), and skip the loop entirely.
+ if (!CAFA)
+ return;
+
+ while (Attrs.end() != (LastFoundItr = std::find_if(LastFoundItr + 1,
+ Attrs.end(), FindFunc))) {
+ const auto *CASA =
+ dyn_cast<ConstantExpr>(cast<LoopAttrT>(*LastFoundItr)->getAlignment());
+ // If the value is dependent, we can not test anything.
+ if (!CASA)
+ return;
+ // Test the attribute values.
+ llvm::APSInt SecondValue = CASA->getResultAsAPSInt();
+ if (!FirstValue)
+ FirstValue = CAFA->getResultAsAPSInt();
+
+ if (FirstValue != SecondValue) {
+ S.Diag((*LastFoundItr)->getLocation(), diag::err_loop_attr_conflict)
+ << *FirstItr;
+ S.Diag((*FirstItr)->getLocation(), diag::note_previous_attribute);
+ }
+ return;
+ }
+}
+
+static Attr *handleMSConstexprAttr(Sema &S, Stmt *St, const ParsedAttr &A,
+ SourceRange Range) {
+ if (!S.getLangOpts().isCompatibleWithMSVC(LangOptions::MSVC2022_3)) {
+ S.Diag(A.getLoc(), diag::warn_unknown_attribute_ignored)
+ << A << A.getRange();
+ return nullptr;
+ }
+ return ::new (S.Context) MSConstexprAttr(S.Context, A);
+}
+
#define WANT_STMT_MERGE_LOGIC
#include "clang/Sema/AttrParsedAttrImpl.inc"
#undef WANT_STMT_MERGE_LOGIC
@@ -249,21 +427,33 @@ CheckForIncompatibleAttributes(Sema &S,
if (!DiagnoseMutualExclusions(S, Attrs))
return;
- // There are 6 categories of loop hints attributes: vectorize, interleave,
- // unroll, unroll_and_jam, pipeline and distribute. Except for distribute they
- // come in two variants: a state form and a numeric form. The state form
- // selectively defaults/enables/disables the transformation for the loop
- // (for unroll, default indicates full unrolling rather than enabling the
- // transformation). The numeric form form provides an integer hint (for
- // example, unroll count) to the transformer. The following array accumulates
- // the hints encountered while iterating through the attributes to check for
- // compatibility.
+ enum CategoryType {
+ // For the following categories, they come in two variants: a state form and
+ // a numeric form. The state form may be one of default, enable, and
+ // disable. The numeric form provides an integer hint (for example, unroll
+ // count) to the transformer.
+ Vectorize,
+ Interleave,
+ UnrollAndJam,
+ Pipeline,
+ // For unroll, default indicates full unrolling rather than enabling the
+ // transformation.
+ Unroll,
+ // The loop distribution transformation only has a state form that is
+ // exposed by #pragma clang loop distribute (enable | disable).
+ Distribute,
+ // The vector predication only has a state form that is exposed by
+ // #pragma clang loop vectorize_predicate (enable | disable).
+ VectorizePredicate,
+ // This serves as a indicator to how many category are listed in this enum.
+ NumberOfCategories
+ };
+ // The following array accumulates the hints encountered while iterating
+ // through the attributes to check for compatibility.
struct {
const LoopHintAttr *StateAttr;
const LoopHintAttr *NumericAttr;
- } HintAttrs[] = {{nullptr, nullptr}, {nullptr, nullptr}, {nullptr, nullptr},
- {nullptr, nullptr}, {nullptr, nullptr}, {nullptr, nullptr},
- {nullptr, nullptr}};
+ } HintAttrs[CategoryType::NumberOfCategories] = {};
for (const auto *I : Attrs) {
const LoopHintAttr *LH = dyn_cast<LoopHintAttr>(I);
@@ -272,16 +462,8 @@ CheckForIncompatibleAttributes(Sema &S,
if (!LH)
continue;
+ CategoryType Category = CategoryType::NumberOfCategories;
LoopHintAttr::OptionType Option = LH->getOption();
- enum {
- Vectorize,
- Interleave,
- Unroll,
- UnrollAndJam,
- Distribute,
- Pipeline,
- VectorizePredicate
- } Category;
switch (Option) {
case LoopHintAttr::Vectorize:
case LoopHintAttr::VectorizeWidth:
@@ -312,7 +494,7 @@ CheckForIncompatibleAttributes(Sema &S,
break;
};
- assert(Category < sizeof(HintAttrs) / sizeof(HintAttrs[0]));
+ assert(Category != NumberOfCategories && "Unhandled loop hint option");
auto &CategoryState = HintAttrs[Category];
const LoopHintAttr *PrevAttr;
if (Option == LoopHintAttr::Vectorize ||
@@ -363,7 +545,7 @@ static Attr *handleOpenCLUnrollHint(Sema &S, Stmt *St, const ParsedAttr &A,
unsigned UnrollFactor = 0;
if (A.getNumArgs() == 1) {
Expr *E = A.getArgAsExpr(0);
- Optional<llvm::APSInt> ArgVal;
+ std::optional<llvm::APSInt> ArgVal;
if (!(ArgVal = E->getIntegerConstantExpr(S.Context))) {
S.Diag(A.getLoc(), diag::err_attribute_argument_type)
@@ -397,7 +579,9 @@ static Attr *ProcessStmtAttribute(Sema &S, Stmt *St, const ParsedAttr &A,
!(A.existsInTarget(S.Context.getTargetInfo()) ||
(S.Context.getLangOpts().SYCLIsDevice && Aux &&
A.existsInTarget(*Aux)))) {
- S.Diag(A.getLoc(), A.isDeclspecAttribute()
+ S.Diag(A.getLoc(), A.isRegularKeywordAttribute()
+ ? (unsigned)diag::err_keyword_not_supported_on_target
+ : A.isDeclspecAttribute()
? (unsigned)diag::warn_unhandled_ms_attribute_ignored
: (unsigned)diag::warn_unknown_attribute_ignored)
<< A << A.getRange();
@@ -408,6 +592,8 @@ static Attr *ProcessStmtAttribute(Sema &S, Stmt *St, const ParsedAttr &A,
return nullptr;
switch (A.getKind()) {
+ case ParsedAttr::AT_AlwaysInline:
+ return handleAlwaysInlineAttr(S, St, A, Range);
case ParsedAttr::AT_FallThrough:
return handleFallThroughAttr(S, St, A, Range);
case ParsedAttr::AT_LoopHint:
@@ -418,24 +604,29 @@ static Attr *ProcessStmtAttribute(Sema &S, Stmt *St, const ParsedAttr &A,
return handleSuppressAttr(S, St, A, Range);
case ParsedAttr::AT_NoMerge:
return handleNoMergeAttr(S, St, A, Range);
+ case ParsedAttr::AT_NoInline:
+ return handleNoInlineAttr(S, St, A, Range);
case ParsedAttr::AT_MustTail:
return handleMustTailAttr(S, St, A, Range);
case ParsedAttr::AT_Likely:
return handleLikely(S, St, A, Range);
case ParsedAttr::AT_Unlikely:
return handleUnlikely(S, St, A, Range);
+ case ParsedAttr::AT_CodeAlign:
+ return handleCodeAlignAttr(S, St, A);
+ case ParsedAttr::AT_MSConstexpr:
+ return handleMSConstexprAttr(S, St, A, Range);
default:
// N.B., ClangAttrEmitter.cpp emits a diagnostic helper that ensures a
// declaration attribute is not written on a statement, but this code is
// needed for attributes in Attr.td that do not list any subjects.
S.Diag(A.getRange().getBegin(), diag::err_decl_attribute_invalid_on_stmt)
- << A << St->getBeginLoc();
+ << A << A.isRegularKeywordAttribute() << St->getBeginLoc();
return nullptr;
}
}
-void Sema::ProcessStmtAttributes(Stmt *S,
- const ParsedAttributesWithRange &InAttrs,
+void Sema::ProcessStmtAttributes(Stmt *S, const ParsedAttributes &InAttrs,
SmallVectorImpl<const Attr *> &OutAttrs) {
for (const ParsedAttr &AL : InAttrs) {
if (const Attr *A = ProcessStmtAttribute(*this, S, AL, InAttrs.Range))
@@ -443,4 +634,10 @@ void Sema::ProcessStmtAttributes(Stmt *S,
}
CheckForIncompatibleAttributes(*this, OutAttrs);
+ CheckForDuplicateLoopAttrs<CodeAlignAttr>(*this, OutAttrs);
+}
+
+bool Sema::CheckRebuiltStmtAttributes(ArrayRef<const Attr *> Attrs) {
+ CheckForDuplicateLoopAttrs<CodeAlignAttr>(*this, Attrs);
+ return false;
}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaTemplate.cpp b/contrib/llvm-project/clang/lib/Sema/SemaTemplate.cpp
index 5d26f2d2c11a..b619f5d729e8 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaTemplate.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaTemplate.cpp
@@ -11,18 +11,23 @@
#include "TreeTransform.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
#include "clang/AST/DeclFriend.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/TemplateName.h"
#include "clang/AST/TypeVisitor.h"
#include "clang/Basic/Builtins.h"
+#include "clang/Basic/DiagnosticSema.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/PartialDiagnostic.h"
+#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/Stack.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Overload.h"
@@ -36,6 +41,7 @@
#include "llvm/ADT/StringExtras.h"
#include <iterator>
+#include <optional>
using namespace clang;
using namespace sema;
@@ -106,7 +112,7 @@ NamedDecl *Sema::getAsTemplateNameDecl(NamedDecl *D,
return D;
}
- if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(D)) {
+ if (const auto *Record = dyn_cast<CXXRecordDecl>(D)) {
// C++ [temp.local]p1:
// Like normal (non-template) classes, class templates have an
// injected-class-name (Clause 9). The injected-class-name
@@ -123,8 +129,7 @@ NamedDecl *Sema::getAsTemplateNameDecl(NamedDecl *D,
if (Record->getDescribedClassTemplate())
return Record->getDescribedClassTemplate();
- if (ClassTemplateSpecializationDecl *Spec
- = dyn_cast<ClassTemplateSpecializationDecl>(Record))
+ if (const auto *Spec = dyn_cast<ClassTemplateSpecializationDecl>(Record))
return Spec->getSpecializedTemplate();
}
@@ -223,6 +228,7 @@ TemplateNameKind Sema::isTemplateName(Scope *S,
return TNK_Non_template;
NamedDecl *D = nullptr;
+ UsingShadowDecl *FoundUsingShadow = dyn_cast<UsingShadowDecl>(*R.begin());
if (R.isAmbiguous()) {
// If we got an ambiguity involving a non-function template, treat this
// as a template name, and pick an arbitrary template for error recovery.
@@ -233,6 +239,7 @@ TemplateNameKind Sema::isTemplateName(Scope *S,
AnyFunctionTemplates = true;
else {
D = FoundTemplate;
+ FoundUsingShadow = dyn_cast<UsingShadowDecl>(FoundD);
break;
}
}
@@ -280,13 +287,13 @@ TemplateNameKind Sema::isTemplateName(Scope *S,
}
TemplateDecl *TD = cast<TemplateDecl>(D);
-
+ Template =
+ FoundUsingShadow ? TemplateName(FoundUsingShadow) : TemplateName(TD);
+ assert(!FoundUsingShadow || FoundUsingShadow->getTargetDecl() == TD);
if (SS.isSet() && !SS.isInvalid()) {
NestedNameSpecifier *Qualifier = SS.getScopeRep();
- Template = Context.getQualifiedTemplateName(Qualifier,
- hasTemplateKeyword, TD);
- } else {
- Template = TemplateName(TD);
+ Template = Context.getQualifiedTemplateName(Qualifier, hasTemplateKeyword,
+ Template);
}
if (isa<FunctionTemplateDecl>(TD)) {
@@ -310,9 +317,8 @@ TemplateNameKind Sema::isTemplateName(Scope *S,
}
bool Sema::isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
- SourceLocation NameLoc,
- ParsedTemplateTy *Template) {
- CXXScopeSpec SS;
+ SourceLocation NameLoc, CXXScopeSpec &SS,
+ ParsedTemplateTy *Template /*=nullptr*/) {
bool MemberOfUnknownSpecialization = false;
// We could use redeclaration lookup here, but we don't need to: the
@@ -394,6 +400,7 @@ bool Sema::LookupTemplateName(LookupResult &Found,
LookupCtx = computeDeclContext(ObjectType);
IsDependent = !LookupCtx && ObjectType->isDependentType();
assert((IsDependent || !ObjectType->isIncompleteType() ||
+ !ObjectType->getAs<TagType>() ||
ObjectType->castAs<TagType>()->isBeingDefined()) &&
"Caller should have completed object type");
@@ -485,8 +492,7 @@ bool Sema::LookupTemplateName(LookupResult &Found,
// all language modes, and diagnose the empty lookup in ActOnCallExpr if we
// successfully form a call to an undeclared template-id.
bool AllFunctions =
- getLangOpts().CPlusPlus20 &&
- std::all_of(Found.begin(), Found.end(), [](NamedDecl *ND) {
+ getLangOpts().CPlusPlus20 && llvm::all_of(Found, [](NamedDecl *ND) {
return isa<FunctionDecl>(ND->getUnderlyingDecl());
});
if (AllFunctions || (Found.empty() && !IsDependent)) {
@@ -588,7 +594,7 @@ bool Sema::LookupTemplateName(LookupResult &Found,
// postfix-expression and does not name a class template, the name
// found in the class of the object expression is used, otherwise
FoundOuter.clear();
- } else if (!Found.isSuppressingDiagnostics()) {
+ } else if (!Found.isSuppressingAmbiguousDiagnostics()) {
// - if the name found is a class template, it must refer to the same
// entity as the one found in the class of the object expression,
// otherwise the program is ill-formed.
@@ -744,19 +750,21 @@ Sema::ActOnDependentIdExpression(const CXXScopeSpec &SS,
// Check if the nested name specifier is an enum type.
bool IsEnum = false;
if (NestedNameSpecifier *NNS = SS.getScopeRep())
- IsEnum = dyn_cast_or_null<EnumType>(NNS->getAsType());
+ IsEnum = isa_and_nonnull<EnumType>(NNS->getAsType());
if (!MightBeCxx11UnevalField && !isAddressOfOperand && !IsEnum &&
- isa<CXXMethodDecl>(DC) && cast<CXXMethodDecl>(DC)->isInstance()) {
- QualType ThisType = cast<CXXMethodDecl>(DC)->getThisType();
+ isa<CXXMethodDecl>(DC) &&
+ cast<CXXMethodDecl>(DC)->isImplicitObjectMemberFunction()) {
+ QualType ThisType = cast<CXXMethodDecl>(DC)->getThisType().getNonReferenceType();
// Since the 'this' expression is synthesized, we don't need to
// perform the double-lookup check.
NamedDecl *FirstQualifierInScope = nullptr;
return CXXDependentScopeMemberExpr::Create(
- Context, /*This*/ nullptr, ThisType, /*IsArrow*/ true,
- /*Op*/ SourceLocation(), SS.getWithLocInContext(Context), TemplateKWLoc,
+ Context, /*This=*/nullptr, ThisType,
+ /*IsArrow=*/!Context.getLangOpts().HLSL,
+ /*Op=*/SourceLocation(), SS.getWithLocInContext(Context), TemplateKWLoc,
FirstQualifierInScope, NameInfo, TemplateArgs);
}
@@ -796,8 +804,9 @@ bool Sema::DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
if (PatternDef && !IsEntityBeingDefined) {
NamedDecl *SuggestedDef = nullptr;
- if (!hasVisibleDefinition(const_cast<NamedDecl*>(PatternDef), &SuggestedDef,
- /*OnlyNeedComplete*/false)) {
+ if (!hasReachableDefinition(const_cast<NamedDecl *>(PatternDef),
+ &SuggestedDef,
+ /*OnlyNeedComplete*/ false)) {
// If we're allowed to diagnose this and recover, do so.
bool Recover = Complain && !isSFINAEContext();
if (Complain)
@@ -811,7 +820,6 @@ bool Sema::DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
if (!Complain || (PatternDef && PatternDef->isInvalidDecl()))
return true;
- llvm::Optional<unsigned> Note;
QualType InstantiationTy;
if (TagDecl *TD = dyn_cast<TagDecl>(Instantiation))
InstantiationTy = Context.getTypeDeclType(TD);
@@ -829,25 +837,25 @@ bool Sema::DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
diag::err_explicit_instantiation_undefined_member)
<< /*member function*/ 1 << Instantiation->getDeclName()
<< Instantiation->getDeclContext();
- Note = diag::note_explicit_instantiation_here;
+ Diag(Pattern->getLocation(), diag::note_explicit_instantiation_here);
} else {
assert(isa<TagDecl>(Instantiation) && "Must be a TagDecl!");
Diag(PointOfInstantiation,
diag::err_implicit_instantiate_member_undefined)
<< InstantiationTy;
- Note = diag::note_member_declared_at;
+ Diag(Pattern->getLocation(), diag::note_member_declared_at);
}
} else {
if (isa<FunctionDecl>(Instantiation)) {
Diag(PointOfInstantiation,
diag::err_explicit_instantiation_undefined_func_template)
<< Pattern;
- Note = diag::note_explicit_instantiation_here;
+ Diag(Pattern->getLocation(), diag::note_explicit_instantiation_here);
} else if (isa<TagDecl>(Instantiation)) {
Diag(PointOfInstantiation, diag::err_template_instantiate_undefined)
<< (TSK != TSK_ImplicitInstantiation)
<< InstantiationTy;
- Note = diag::note_template_decl_here;
+ NoteTemplateLocation(*Pattern);
} else {
assert(isa<VarDecl>(Instantiation) && "Must be a VarDecl!");
if (isa<VarTemplateSpecializationDecl>(Instantiation)) {
@@ -860,11 +868,9 @@ bool Sema::DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
diag::err_explicit_instantiation_undefined_member)
<< /*static data member*/ 2 << Instantiation->getDeclName()
<< Instantiation->getDeclContext();
- Note = diag::note_explicit_instantiation_here;
+ Diag(Pattern->getLocation(), diag::note_explicit_instantiation_here);
}
}
- if (Note) // Diagnostics were emitted.
- Diag(Pattern->getLocation(), Note.getValue());
// In general, Instantiation isn't marked invalid to get more than one
// error for multiple undefined instantiations. But the code that does
@@ -889,8 +895,9 @@ void Sema::DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl) {
// Make this a warning when MSVC compatibility is requested.
unsigned DiagId = getLangOpts().MSVCCompat ? diag::ext_template_param_shadow
: diag::err_template_param_shadow;
- Diag(Loc, DiagId) << cast<NamedDecl>(PrevDecl)->getDeclName();
- Diag(PrevDecl->getLocation(), diag::note_template_param_here);
+ const auto *ND = cast<NamedDecl>(PrevDecl);
+ Diag(Loc, DiagId) << ND->getDeclName();
+ NoteTemplateParameterLocation(*ND);
}
/// AdjustDeclIfTemplate - If the given decl happens to be a template, reset
@@ -936,7 +943,7 @@ static TemplateArgumentLoc translateTemplateArgument(Sema &SemaRef,
TemplateName Template = Arg.getAsTemplate().get();
TemplateArgument TArg;
if (Arg.getEllipsisLoc().isValid())
- TArg = TemplateArgument(Template, Optional<unsigned int>());
+ TArg = TemplateArgument(Template, std::optional<unsigned int>());
else
TArg = Template;
return TemplateArgumentLoc(
@@ -998,8 +1005,8 @@ ParsedTemplateArgument Sema::ActOnTemplateTypeArgument(TypeResult ParsedType) {
TemplateName Name = DTST.getTypePtr()->getTemplateName();
if (SS.isSet())
Name = Context.getQualifiedTemplateName(SS.getScopeRep(),
- /*HasTemplateKeyword*/ false,
- Name.getAsTemplateDecl());
+ /*HasTemplateKeyword=*/false,
+ Name);
ParsedTemplateArgument Result(SS, TemplateTy::make(Name),
DTST.getTemplateNameLoc());
if (EllipsisLoc.isValid())
@@ -1101,19 +1108,8 @@ makeTemplateArgumentListInfo(Sema &S, TemplateIdAnnotation &TemplateId) {
return TemplateArgs;
}
-bool Sema::ActOnTypeConstraint(const CXXScopeSpec &SS,
- TemplateIdAnnotation *TypeConstr,
- TemplateTypeParmDecl *ConstrainedParameter,
- SourceLocation EllipsisLoc) {
- return BuildTypeConstraint(SS, TypeConstr, ConstrainedParameter, EllipsisLoc,
- false);
-}
+bool Sema::CheckTypeConstraint(TemplateIdAnnotation *TypeConstr) {
-bool Sema::BuildTypeConstraint(const CXXScopeSpec &SS,
- TemplateIdAnnotation *TypeConstr,
- TemplateTypeParmDecl *ConstrainedParameter,
- SourceLocation EllipsisLoc,
- bool AllowUnexpandedPack) {
TemplateName TN = TypeConstr->Template.get();
ConceptDecl *CD = cast<ConceptDecl>(TN.getAsTemplateDecl());
@@ -1131,9 +1127,32 @@ bool Sema::BuildTypeConstraint(const CXXScopeSpec &SS,
if (!WereArgsSpecified &&
CD->getTemplateParameters()->getMinRequiredArguments() > 1) {
Diag(TypeConstr->TemplateNameLoc,
- diag::err_type_constraint_missing_arguments) << CD;
+ diag::err_type_constraint_missing_arguments)
+ << CD;
return true;
}
+ return false;
+}
+
+bool Sema::ActOnTypeConstraint(const CXXScopeSpec &SS,
+ TemplateIdAnnotation *TypeConstr,
+ TemplateTypeParmDecl *ConstrainedParameter,
+ SourceLocation EllipsisLoc) {
+ return BuildTypeConstraint(SS, TypeConstr, ConstrainedParameter, EllipsisLoc,
+ false);
+}
+
+bool Sema::BuildTypeConstraint(const CXXScopeSpec &SS,
+ TemplateIdAnnotation *TypeConstr,
+ TemplateTypeParmDecl *ConstrainedParameter,
+ SourceLocation EllipsisLoc,
+ bool AllowUnexpandedPack) {
+
+ if (CheckTypeConstraint(TypeConstr))
+ return true;
+
+ TemplateName TN = TypeConstr->Template.get();
+ ConceptDecl *CD = cast<ConceptDecl>(TN.getAsTemplateDecl());
DeclarationNameInfo ConceptName(DeclarationName(TypeConstr->Name),
TypeConstr->TemplateNameLoc);
@@ -1202,11 +1221,11 @@ static ExprResult formImmediatelyDeclaredConstraint(
ImmediatelyDeclaredConstraint.get(), BO_LAnd,
EllipsisLoc, /*RHS=*/nullptr,
/*RParenLoc=*/SourceLocation(),
- /*NumExpansions=*/None);
+ /*NumExpansions=*/std::nullopt);
}
/// Attach a type-constraint to a template parameter.
-/// \returns true if an error occured. This can happen if the
+/// \returns true if an error occurred. This can happen if the
/// immediately-declared constraint could not be formed (e.g. incorrect number
/// of arguments for the named concept).
bool Sema::AttachTypeConstraint(NestedNameSpecifierLoc NS,
@@ -1238,42 +1257,52 @@ bool Sema::AttachTypeConstraint(NestedNameSpecifierLoc NS,
if (ImmediatelyDeclaredConstraint.isInvalid())
return true;
- ConstrainedParameter->setTypeConstraint(NS, NameInfo,
- /*FoundDecl=*/NamedConcept,
- NamedConcept, ArgsAsWritten,
+ auto *CL = ConceptReference::Create(Context, /*NNS=*/NS,
+ /*TemplateKWLoc=*/SourceLocation{},
+ /*ConceptNameInfo=*/NameInfo,
+ /*FoundDecl=*/NamedConcept,
+ /*NamedConcept=*/NamedConcept,
+ /*ArgsWritten=*/ArgsAsWritten);
+ ConstrainedParameter->setTypeConstraint(CL,
ImmediatelyDeclaredConstraint.get());
return false;
}
-bool Sema::AttachTypeConstraint(AutoTypeLoc TL, NonTypeTemplateParmDecl *NTTP,
+bool Sema::AttachTypeConstraint(AutoTypeLoc TL,
+ NonTypeTemplateParmDecl *NewConstrainedParm,
+ NonTypeTemplateParmDecl *OrigConstrainedParm,
SourceLocation EllipsisLoc) {
- if (NTTP->getType() != TL.getType() ||
+ if (NewConstrainedParm->getType() != TL.getType() ||
TL.getAutoKeyword() != AutoTypeKeyword::Auto) {
- Diag(NTTP->getTypeSourceInfo()->getTypeLoc().getBeginLoc(),
+ Diag(NewConstrainedParm->getTypeSourceInfo()->getTypeLoc().getBeginLoc(),
diag::err_unsupported_placeholder_constraint)
- << NTTP->getTypeSourceInfo()->getTypeLoc().getSourceRange();
+ << NewConstrainedParm->getTypeSourceInfo()
+ ->getTypeLoc()
+ .getSourceRange();
return true;
}
// FIXME: Concepts: This should be the type of the placeholder, but this is
// unclear in the wording right now.
DeclRefExpr *Ref =
- BuildDeclRefExpr(NTTP, NTTP->getType(), VK_PRValue, NTTP->getLocation());
+ BuildDeclRefExpr(OrigConstrainedParm, OrigConstrainedParm->getType(),
+ VK_PRValue, OrigConstrainedParm->getLocation());
if (!Ref)
return true;
- ExprResult ImmediatelyDeclaredConstraint =
- formImmediatelyDeclaredConstraint(
- *this, TL.getNestedNameSpecifierLoc(), TL.getConceptNameInfo(),
- TL.getNamedConcept(), TL.getLAngleLoc(), TL.getRAngleLoc(),
- BuildDecltypeType(Ref, NTTP->getLocation()), NTTP->getLocation(),
- [&] (TemplateArgumentListInfo &ConstraintArgs) {
- for (unsigned I = 0, C = TL.getNumArgs(); I != C; ++I)
- ConstraintArgs.addArgument(TL.getArgLoc(I));
- }, EllipsisLoc);
+ ExprResult ImmediatelyDeclaredConstraint = formImmediatelyDeclaredConstraint(
+ *this, TL.getNestedNameSpecifierLoc(), TL.getConceptNameInfo(),
+ TL.getNamedConcept(), TL.getLAngleLoc(), TL.getRAngleLoc(),
+ BuildDecltypeType(Ref), OrigConstrainedParm->getLocation(),
+ [&](TemplateArgumentListInfo &ConstraintArgs) {
+ for (unsigned I = 0, C = TL.getNumArgs(); I != C; ++I)
+ ConstraintArgs.addArgument(TL.getArgLoc(I));
+ },
+ EllipsisLoc);
if (ImmediatelyDeclaredConstraint.isInvalid() ||
- !ImmediatelyDeclaredConstraint.isUsable())
+ !ImmediatelyDeclaredConstraint.isUsable())
return true;
- NTTP->setPlaceholderTypeConstraint(ImmediatelyDeclaredConstraint.get());
+ NewConstrainedParm->setPlaceholderTypeConstraint(
+ ImmediatelyDeclaredConstraint.get());
return false;
}
@@ -1290,7 +1319,7 @@ QualType Sema::CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
// - an identifier associated by name lookup with a non-type
// template-parameter declared with a type that contains a
// placeholder type (7.1.7.4),
- TSI = SubstAutoTypeSourceInfo(TSI, Context.DependentTy);
+ TSI = SubstAutoTypeSourceInfoDependent(TSI);
}
return CheckNonTypeTemplateParameterType(TSI->getType(), Loc);
@@ -1469,7 +1498,7 @@ NamedDecl *Sema::ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Position,
SourceLocation EqualLoc,
Expr *Default) {
- TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D);
// Check that we have valid decl-specifiers specified.
auto CheckValidDeclSpecifiers = [this, &D] {
@@ -1525,11 +1554,11 @@ NamedDecl *Sema::ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
CheckValidDeclSpecifiers();
- if (TInfo->getType()->isUndeducedType()) {
- Diag(D.getIdentifierLoc(),
- diag::warn_cxx14_compat_template_nontype_parm_auto_type)
- << QualType(TInfo->getType()->getContainedAutoType(), 0);
- }
+ if (const auto *T = TInfo->getType()->getContainedDeducedType())
+ if (isa<AutoType>(T))
+ Diag(D.getIdentifierLoc(),
+ diag::warn_cxx14_compat_template_nontype_parm_auto_type)
+ << QualType(TInfo->getType()->getContainedAutoType(), 0);
assert(S->isTemplateParamScope() &&
"Non-type template parameter not in template parameter scope!");
@@ -1553,7 +1582,7 @@ NamedDecl *Sema::ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
if (AutoTypeLoc TL = TInfo->getTypeLoc().getContainedAutoTypeLoc())
if (TL.isConstrained())
- if (AttachTypeConstraint(TL, Param, D.getEllipsisLoc()))
+ if (AttachTypeConstraint(TL, Param, Param, D.getEllipsisLoc()))
Invalid = true;
if (Invalid)
@@ -1586,15 +1615,6 @@ NamedDecl *Sema::ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
if (DiagnoseUnexpandedParameterPack(Default, UPPC_DefaultArgument))
return Param;
- TemplateArgument Converted;
- ExprResult DefaultRes =
- CheckTemplateArgument(Param, Param->getType(), Default, Converted);
- if (DefaultRes.isInvalid()) {
- Param->setInvalidDecl();
- return Param;
- }
- Default = DefaultRes.get();
-
Param->setDefaultArgument(Default);
}
@@ -1681,6 +1701,101 @@ NamedDecl *Sema::ActOnTemplateTemplateParameter(Scope* S,
return Param;
}
+namespace {
+class ConstraintRefersToContainingTemplateChecker
+ : public TreeTransform<ConstraintRefersToContainingTemplateChecker> {
+ bool Result = false;
+ const FunctionDecl *Friend = nullptr;
+ unsigned TemplateDepth = 0;
+
+ // Check a record-decl that we've seen to see if it is a lexical parent of the
+ // Friend, likely because it was referred to without its template arguments.
+ void CheckIfContainingRecord(const CXXRecordDecl *CheckingRD) {
+ CheckingRD = CheckingRD->getMostRecentDecl();
+ if (!CheckingRD->isTemplated())
+ return;
+
+ for (const DeclContext *DC = Friend->getLexicalDeclContext();
+ DC && !DC->isFileContext(); DC = DC->getParent())
+ if (const auto *RD = dyn_cast<CXXRecordDecl>(DC))
+ if (CheckingRD == RD->getMostRecentDecl())
+ Result = true;
+ }
+
+ void CheckNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D) {
+ assert(D->getDepth() <= TemplateDepth &&
+ "Nothing should reference a value below the actual template depth, "
+ "depth is likely wrong");
+ if (D->getDepth() != TemplateDepth)
+ Result = true;
+
+ // Necessary because the type of the NTTP might be what refers to the parent
+ // constriant.
+ TransformType(D->getType());
+ }
+
+public:
+ using inherited = TreeTransform<ConstraintRefersToContainingTemplateChecker>;
+
+ ConstraintRefersToContainingTemplateChecker(Sema &SemaRef,
+ const FunctionDecl *Friend,
+ unsigned TemplateDepth)
+ : inherited(SemaRef), Friend(Friend), TemplateDepth(TemplateDepth) {}
+ bool getResult() const { return Result; }
+
+ // This should be the only template parm type that we have to deal with.
+ // SubstTempalteTypeParmPack, SubstNonTypeTemplateParmPack, and
+ // FunctionParmPackExpr are all partially substituted, which cannot happen
+ // with concepts at this point in translation.
+ using inherited::TransformTemplateTypeParmType;
+ QualType TransformTemplateTypeParmType(TypeLocBuilder &TLB,
+ TemplateTypeParmTypeLoc TL, bool) {
+ assert(TL.getDecl()->getDepth() <= TemplateDepth &&
+ "Nothing should reference a value below the actual template depth, "
+ "depth is likely wrong");
+ if (TL.getDecl()->getDepth() != TemplateDepth)
+ Result = true;
+ return inherited::TransformTemplateTypeParmType(
+ TLB, TL,
+ /*SuppressObjCLifetime=*/false);
+ }
+
+ Decl *TransformDecl(SourceLocation Loc, Decl *D) {
+ if (!D)
+ return D;
+ // FIXME : This is possibly an incomplete list, but it is unclear what other
+ // Decl kinds could be used to refer to the template parameters. This is a
+ // best guess so far based on examples currently available, but the
+ // unreachable should catch future instances/cases.
+ if (auto *TD = dyn_cast<TypedefNameDecl>(D))
+ TransformType(TD->getUnderlyingType());
+ else if (auto *NTTPD = dyn_cast<NonTypeTemplateParmDecl>(D))
+ CheckNonTypeTemplateParmDecl(NTTPD);
+ else if (auto *VD = dyn_cast<ValueDecl>(D))
+ TransformType(VD->getType());
+ else if (auto *TD = dyn_cast<TemplateDecl>(D))
+ TransformTemplateParameterList(TD->getTemplateParameters());
+ else if (auto *RD = dyn_cast<CXXRecordDecl>(D))
+ CheckIfContainingRecord(RD);
+ else if (isa<NamedDecl>(D)) {
+ // No direct types to visit here I believe.
+ } else
+ llvm_unreachable("Don't know how to handle this declaration type yet");
+ return D;
+ }
+};
+} // namespace
+
+bool Sema::ConstraintExpressionDependsOnEnclosingTemplate(
+ const FunctionDecl *Friend, unsigned TemplateDepth,
+ const Expr *Constraint) {
+ assert(Friend->getFriendObjectKind() && "Only works on a friend");
+ ConstraintRefersToContainingTemplateChecker Checker(*this, Friend,
+ TemplateDepth);
+ Checker.TransformExpr(const_cast<Expr *>(Constraint));
+ return Checker.getResult();
+}
+
/// ActOnTemplateParameterList - Builds a TemplateParameterList, optionally
/// constrained by RequiresClause, that contains the template parameters in
/// Params.
@@ -1700,8 +1815,7 @@ Sema::ActOnTemplateParameterList(unsigned Depth,
return TemplateParameterList::Create(
Context, TemplateLoc, LAngleLoc,
- llvm::makeArrayRef(Params.data(), Params.size()),
- RAngleLoc, RequiresClause);
+ llvm::ArrayRef(Params.data(), Params.size()), RAngleLoc, RequiresClause);
}
static void SetNestedNameSpecifier(Sema &S, TagDecl *T,
@@ -1710,6 +1824,35 @@ static void SetNestedNameSpecifier(Sema &S, TagDecl *T,
T->setQualifierInfo(SS.getWithLocInContext(S.Context));
}
+// Returns the template parameter list with all default template argument
+// information.
+static TemplateParameterList *GetTemplateParameterList(TemplateDecl *TD) {
+ // Make sure we get the template parameter list from the most
+ // recent declaration, since that is the only one that is guaranteed to
+ // have all the default template argument information.
+ Decl *D = TD->getMostRecentDecl();
+ // C++11 [temp.param]p12:
+ // A default template argument shall not be specified in a friend class
+ // template declaration.
+ //
+ // Skip past friend *declarations* because they are not supposed to contain
+ // default template arguments. Moreover, these declarations may introduce
+ // template parameters living in different template depths than the
+ // corresponding template parameters in TD, causing unmatched constraint
+ // substitution.
+ //
+ // FIXME: Diagnose such cases within a class template:
+ // template <class T>
+ // struct S {
+ // template <class = void> friend struct C;
+ // };
+ // template struct S<int>;
+ while (D->getFriendObjectKind() != Decl::FriendObjectKind::FOK_None &&
+ D->getPreviousDecl())
+ D = D->getPreviousDecl();
+ return cast<TemplateDecl>(D)->getTemplateParameters();
+}
+
DeclResult Sema::CheckClassTemplate(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc,
@@ -1727,7 +1870,8 @@ DeclResult Sema::CheckClassTemplate(
return true;
TagTypeKind Kind = TypeWithKeyword::getTagTypeKindForTypeSpec(TagSpec);
- assert(Kind != TTK_Enum && "can't build template of enumerated type");
+ assert(Kind != TagTypeKind::Enum &&
+ "can't build template of enumerated type");
// There is no such thing as an unnamed class template.
if (!Name) {
@@ -1881,10 +2025,13 @@ DeclResult Sema::CheckClassTemplate(
// for a friend in a dependent context: the template parameter list itself
// could be dependent.
if (!(TUK == TUK_Friend && CurContext->isDependentContext()) &&
- !TemplateParameterListsAreEqual(TemplateParams,
- PrevClassTemplate->getTemplateParameters(),
- /*Complain=*/true,
- TPL_TemplateMatch))
+ !TemplateParameterListsAreEqual(
+ TemplateCompareNewDeclInfo(SemanticContext ? SemanticContext
+ : CurContext,
+ CurContext, KWLoc),
+ TemplateParams, PrevClassTemplate,
+ PrevClassTemplate->getTemplateParameters(), /*Complain=*/true,
+ TPL_TemplateMatch))
return true;
// C++ [temp.class]p4:
@@ -1943,13 +2090,13 @@ DeclResult Sema::CheckClassTemplate(
if (!(TUK == TUK_Friend && CurContext->isDependentContext()) &&
CheckTemplateParameterList(
TemplateParams,
- PrevClassTemplate
- ? PrevClassTemplate->getMostRecentDecl()->getTemplateParameters()
- : nullptr,
+ PrevClassTemplate ? GetTemplateParameterList(PrevClassTemplate)
+ : nullptr,
(SS.isSet() && SemanticContext && SemanticContext->isRecord() &&
SemanticContext->isDependentContext())
? TPC_ClassTemplateMember
- : TUK == TUK_Friend ? TPC_FriendClassTemplate : TPC_ClassTemplate,
+ : TUK == TUK_Friend ? TPC_FriendClassTemplate
+ : TPC_ClassTemplate,
SkipBody))
Invalid = true;
@@ -1980,8 +2127,8 @@ DeclResult Sema::CheckClassTemplate(
SetNestedNameSpecifier(*this, NewClass, SS);
if (NumOuterTemplateParamLists > 0)
NewClass->setTemplateParameterListsInfo(
- Context, llvm::makeArrayRef(OuterTemplateParamLists,
- NumOuterTemplateParamLists));
+ Context,
+ llvm::ArrayRef(OuterTemplateParamLists, NumOuterTemplateParamLists));
// Add alignment attributes if necessary; these attributes are checked when
// the ASTContext lays out the structure.
@@ -2064,7 +2211,7 @@ DeclResult Sema::CheckClassTemplate(
}
if (PrevClassTemplate)
- CheckRedeclarationModuleOwnership(NewTemplate, PrevClassTemplate);
+ CheckRedeclarationInModule(NewTemplate, PrevClassTemplate);
if (Invalid) {
NewTemplate->setInvalidDecl();
@@ -2132,10 +2279,24 @@ public:
struct ConvertConstructorToDeductionGuideTransform {
ConvertConstructorToDeductionGuideTransform(Sema &S,
ClassTemplateDecl *Template)
- : SemaRef(S), Template(Template) {}
+ : SemaRef(S), Template(Template) {
+ // If the template is nested, then we need to use the original
+ // pattern to iterate over the constructors.
+ ClassTemplateDecl *Pattern = Template;
+ while (Pattern->getInstantiatedFromMemberTemplate()) {
+ if (Pattern->isMemberSpecialization())
+ break;
+ Pattern = Pattern->getInstantiatedFromMemberTemplate();
+ NestedPattern = Pattern;
+ }
+
+ if (NestedPattern)
+ OuterInstantiationArgs = SemaRef.getTemplateInstantiationArgs(Template);
+ }
Sema &SemaRef;
ClassTemplateDecl *Template;
+ ClassTemplateDecl *NestedPattern = nullptr;
DeclContext *DC = Template->getDeclContext();
CXXRecordDecl *Primary = Template->getTemplatedDecl();
@@ -2148,6 +2309,10 @@ struct ConvertConstructorToDeductionGuideTransform {
// depth-0 template parameters.
unsigned Depth1IndexAdjustment = Template->getTemplateParameters()->size();
+ // Instantiation arguments for the outermost depth-1 templates
+ // when the template is nested
+ MultiLevelTemplateArgumentList OuterInstantiationArgs;
+
/// Transform a constructor declaration into a deduction guide.
NamedDecl *transformConstructor(FunctionTemplateDecl *FTD,
CXXConstructorDecl *CD) {
@@ -2162,33 +2327,71 @@ struct ConvertConstructorToDeductionGuideTransform {
// -- The template parameters are the template parameters of the class
// template followed by the template parameters (including default
// template arguments) of the constructor, if any.
- TemplateParameterList *TemplateParams = Template->getTemplateParameters();
+ TemplateParameterList *TemplateParams = GetTemplateParameterList(Template);
if (FTD) {
TemplateParameterList *InnerParams = FTD->getTemplateParameters();
SmallVector<NamedDecl *, 16> AllParams;
+ SmallVector<TemplateArgument, 16> Depth1Args;
AllParams.reserve(TemplateParams->size() + InnerParams->size());
AllParams.insert(AllParams.begin(),
TemplateParams->begin(), TemplateParams->end());
SubstArgs.reserve(InnerParams->size());
+ Depth1Args.reserve(InnerParams->size());
// Later template parameters could refer to earlier ones, so build up
// a list of substituted template arguments as we go.
for (NamedDecl *Param : *InnerParams) {
MultiLevelTemplateArgumentList Args;
Args.setKind(TemplateSubstitutionKind::Rewrite);
- Args.addOuterTemplateArguments(SubstArgs);
+ Args.addOuterTemplateArguments(Depth1Args);
Args.addOuterRetainedLevel();
+ if (NestedPattern)
+ Args.addOuterRetainedLevels(NestedPattern->getTemplateDepth());
NamedDecl *NewParam = transformTemplateParameter(Param, Args);
if (!NewParam)
return nullptr;
+
+ // Constraints require that we substitute depth-1 arguments
+ // to match depths when substituted for evaluation later
+ Depth1Args.push_back(SemaRef.Context.getCanonicalTemplateArgument(
+ SemaRef.Context.getInjectedTemplateArg(NewParam)));
+
+ if (NestedPattern) {
+ TemplateDeclInstantiator Instantiator(SemaRef, DC,
+ OuterInstantiationArgs);
+ Instantiator.setEvaluateConstraints(false);
+ SemaRef.runWithSufficientStackSpace(NewParam->getLocation(), [&] {
+ NewParam = cast<NamedDecl>(Instantiator.Visit(NewParam));
+ });
+ }
+
+ assert(NewParam->getTemplateDepth() == 0 &&
+ "Unexpected template parameter depth");
+
AllParams.push_back(NewParam);
SubstArgs.push_back(SemaRef.Context.getCanonicalTemplateArgument(
SemaRef.Context.getInjectedTemplateArg(NewParam)));
}
+
+ // Substitute new template parameters into requires-clause if present.
+ Expr *RequiresClause = nullptr;
+ if (Expr *InnerRC = InnerParams->getRequiresClause()) {
+ MultiLevelTemplateArgumentList Args;
+ Args.setKind(TemplateSubstitutionKind::Rewrite);
+ Args.addOuterTemplateArguments(Depth1Args);
+ Args.addOuterRetainedLevel();
+ if (NestedPattern)
+ Args.addOuterRetainedLevels(NestedPattern->getTemplateDepth());
+ ExprResult E = SemaRef.SubstExpr(InnerRC, Args);
+ if (E.isInvalid())
+ return nullptr;
+ RequiresClause = E.getAs<Expr>();
+ }
+
TemplateParams = TemplateParameterList::Create(
SemaRef.Context, InnerParams->getTemplateLoc(),
InnerParams->getLAngleLoc(), AllParams, InnerParams->getRAngleLoc(),
- /*FIXME: RequiresClause*/ nullptr);
+ RequiresClause);
}
// If we built a new template-parameter-list, track that we need to
@@ -2201,6 +2404,9 @@ struct ConvertConstructorToDeductionGuideTransform {
Args.addOuterRetainedLevel();
}
+ if (NestedPattern)
+ Args.addOuterRetainedLevels(NestedPattern->getTemplateDepth());
+
FunctionProtoTypeLoc FPTL = CD->getTypeSourceInfo()->getTypeLoc()
.getAsAdjusted<FunctionProtoTypeLoc>();
assert(FPTL && "no prototype for constructor declaration");
@@ -2232,6 +2438,9 @@ struct ConvertConstructorToDeductionGuideTransform {
QualType Result = SemaRef.BuildFunctionType(DeducedType, ParamTypes, Loc,
DeductionGuideName, EPI);
TypeSourceInfo *TSI = SemaRef.Context.getTrivialTypeSourceInfo(Result, Loc);
+ if (NestedPattern)
+ TSI = SemaRef.SubstType(TSI, OuterInstantiationArgs, Loc,
+ DeductionGuideName);
FunctionProtoTypeLoc FPTL =
TSI->getTypeLoc().castAs<FunctionProtoTypeLoc>();
@@ -2239,15 +2448,19 @@ struct ConvertConstructorToDeductionGuideTransform {
// Build the parameters, needed during deduction / substitution.
SmallVector<ParmVarDecl*, 4> Params;
for (auto T : ParamTypes) {
- ParmVarDecl *NewParam = ParmVarDecl::Create(
- SemaRef.Context, DC, Loc, Loc, nullptr, T,
- SemaRef.Context.getTrivialTypeSourceInfo(T, Loc), SC_None, nullptr);
+ auto *TSI = SemaRef.Context.getTrivialTypeSourceInfo(T, Loc);
+ if (NestedPattern)
+ TSI = SemaRef.SubstType(TSI, OuterInstantiationArgs, Loc,
+ DeclarationName());
+ ParmVarDecl *NewParam =
+ ParmVarDecl::Create(SemaRef.Context, DC, Loc, Loc, nullptr,
+ TSI->getType(), TSI, SC_None, nullptr);
NewParam->setScopeInfo(0, Params.size());
FPTL.setParam(Params.size(), NewParam);
Params.push_back(NewParam);
}
- return buildDeductionGuide(Template->getTemplateParameters(), nullptr,
+ return buildDeductionGuide(GetTemplateParameterList(Template), nullptr,
ExplicitSpecifier(), TSI, Loc, Loc, Loc);
}
@@ -2262,27 +2475,15 @@ private:
// substitute it directly.
auto *NewTTP = TemplateTypeParmDecl::Create(
SemaRef.Context, DC, TTP->getBeginLoc(), TTP->getLocation(),
- /*Depth*/ 0, Depth1IndexAdjustment + TTP->getIndex(),
+ TTP->getDepth() - 1, Depth1IndexAdjustment + TTP->getIndex(),
TTP->getIdentifier(), TTP->wasDeclaredWithTypename(),
TTP->isParameterPack(), TTP->hasTypeConstraint(),
- TTP->isExpandedParameterPack() ?
- llvm::Optional<unsigned>(TTP->getNumExpansionParameters()) : None);
- if (const auto *TC = TTP->getTypeConstraint()) {
- TemplateArgumentListInfo TransformedArgs;
- const auto *ArgsAsWritten = TC->getTemplateArgsAsWritten();
- if (!ArgsAsWritten ||
- SemaRef.Subst(ArgsAsWritten->getTemplateArgs(),
- ArgsAsWritten->NumTemplateArgs, TransformedArgs,
- Args))
- SemaRef.AttachTypeConstraint(
- TC->getNestedNameSpecifierLoc(), TC->getConceptNameInfo(),
- TC->getNamedConcept(), ArgsAsWritten ? &TransformedArgs : nullptr,
- NewTTP,
- NewTTP->isParameterPack()
- ? cast<CXXFoldExpr>(TC->getImmediatelyDeclaredConstraint())
- ->getEllipsisLoc()
- : SourceLocation());
- }
+ TTP->isExpandedParameterPack()
+ ? std::optional<unsigned>(TTP->getNumExpansionParameters())
+ : std::nullopt);
+ if (const auto *TC = TTP->getTypeConstraint())
+ SemaRef.SubstTypeConstraint(NewTTP, TC, Args,
+ /*EvaluateConstraint*/ true);
if (TTP->hasDefaultArgument()) {
TypeSourceInfo *InstantiatedDefaultArg =
SemaRef.SubstType(TTP->getDefaultArgumentInfo(), Args,
@@ -2309,7 +2510,8 @@ private:
// the index of the parameter once it's done.
auto *NewParam =
cast<TemplateParmDecl>(SemaRef.SubstDecl(OldParam, DC, Args));
- assert(NewParam->getDepth() == 0 && "unexpected template param depth");
+ assert(NewParam->getDepth() == OldParam->getDepth() - 1 &&
+ "unexpected template param depth");
NewParam->setPosition(NewParam->getPosition() + Depth1IndexAdjustment);
return NewParam;
}
@@ -2326,6 +2528,9 @@ private:
for (auto *OldParam : TL.getParams()) {
ParmVarDecl *NewParam =
transformFunctionTypeParam(OldParam, Args, MaterializedTypedefs);
+ if (NestedPattern && NewParam)
+ NewParam = transformFunctionTypeParam(NewParam, OuterInstantiationArgs,
+ MaterializedTypedefs);
if (!NewParam)
return QualType();
ParamTypes.push_back(NewParam->getType());
@@ -2409,15 +2614,15 @@ private:
: ParamTy->isRValueReferenceType() ? VK_XValue
: VK_PRValue);
}
+ // Handle arrays and functions decay.
+ auto NewType = NewDI->getType();
+ if (NewType->isArrayType() || NewType->isFunctionType())
+ NewType = SemaRef.Context.getDecayedType(NewType);
- ParmVarDecl *NewParam = ParmVarDecl::Create(SemaRef.Context, DC,
- OldParam->getInnerLocStart(),
- OldParam->getLocation(),
- OldParam->getIdentifier(),
- NewDI->getType(),
- NewDI,
- OldParam->getStorageClass(),
- NewDefArg.get());
+ ParmVarDecl *NewParam = ParmVarDecl::Create(
+ SemaRef.Context, DC, OldParam->getInnerLocStart(),
+ OldParam->getLocation(), OldParam->getIdentifier(), NewType, NewDI,
+ OldParam->getStorageClass(), NewDefArg.get());
NewParam->setScopeInfo(OldParam->getFunctionScopeDepth(),
OldParam->getFunctionScopeIndex());
SemaRef.CurrentInstantiationScope->InstantiatedLocal(OldParam, NewParam);
@@ -2461,12 +2666,53 @@ private:
};
}
+FunctionTemplateDecl *Sema::DeclareImplicitDeductionGuideFromInitList(
+ TemplateDecl *Template, MutableArrayRef<QualType> ParamTypes,
+ SourceLocation Loc) {
+ if (CXXRecordDecl *DefRecord =
+ cast<CXXRecordDecl>(Template->getTemplatedDecl())->getDefinition()) {
+ if (TemplateDecl *DescribedTemplate =
+ DefRecord->getDescribedClassTemplate())
+ Template = DescribedTemplate;
+ }
+
+ DeclContext *DC = Template->getDeclContext();
+ if (DC->isDependentContext())
+ return nullptr;
+
+ ConvertConstructorToDeductionGuideTransform Transform(
+ *this, cast<ClassTemplateDecl>(Template));
+ if (!isCompleteType(Loc, Transform.DeducedType))
+ return nullptr;
+
+ // In case we were expanding a pack when we attempted to declare deduction
+ // guides, turn off pack expansion for everything we're about to do.
+ ArgumentPackSubstitutionIndexRAII SubstIndex(*this,
+ /*NewSubstitutionIndex=*/-1);
+ // Create a template instantiation record to track the "instantiation" of
+ // constructors into deduction guides.
+ InstantiatingTemplate BuildingDeductionGuides(
+ *this, Loc, Template,
+ Sema::InstantiatingTemplate::BuildingDeductionGuidesTag{});
+ if (BuildingDeductionGuides.isInvalid())
+ return nullptr;
+
+ ClassTemplateDecl *Pattern =
+ Transform.NestedPattern ? Transform.NestedPattern : Transform.Template;
+ ContextRAII SavedContext(*this, Pattern->getTemplatedDecl());
+
+ auto *DG = cast<FunctionTemplateDecl>(
+ Transform.buildSimpleDeductionGuide(ParamTypes));
+ SavedContext.pop();
+ return DG;
+}
+
void Sema::DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc) {
if (CXXRecordDecl *DefRecord =
cast<CXXRecordDecl>(Template->getTemplatedDecl())->getDefinition()) {
- TemplateDecl *DescribedTemplate = DefRecord->getDescribedClassTemplate();
- Template = DescribedTemplate ? DescribedTemplate : Template;
+ if (TemplateDecl *DescribedTemplate = DefRecord->getDescribedClassTemplate())
+ Template = DescribedTemplate;
}
DeclContext *DC = Template->getDeclContext();
@@ -2490,9 +2736,9 @@ void Sema::DeclareImplicitDeductionGuides(TemplateDecl *Template,
ArgumentPackSubstitutionIndexRAII SubstIndex(*this, -1);
// Create a template instantiation record to track the "instantiation" of
// constructors into deduction guides.
- // FIXME: Add a kind for this to give more meaningful diagnostics. But can
- // this substitution process actually fail?
- InstantiatingTemplate BuildingDeductionGuides(*this, Loc, Template);
+ InstantiatingTemplate BuildingDeductionGuides(
+ *this, Loc, Template,
+ Sema::InstantiatingTemplate::BuildingDeductionGuidesTag{});
if (BuildingDeductionGuides.isInvalid())
return;
@@ -2500,13 +2746,24 @@ void Sema::DeclareImplicitDeductionGuides(TemplateDecl *Template,
// FIXME: Skip constructors for which deduction must necessarily fail (those
// for which some class template parameter without a default argument never
// appears in a deduced context).
+ ClassTemplateDecl *Pattern =
+ Transform.NestedPattern ? Transform.NestedPattern : Transform.Template;
+ ContextRAII SavedContext(*this, Pattern->getTemplatedDecl());
+ llvm::SmallPtrSet<NamedDecl *, 8> ProcessedCtors;
bool AddedAny = false;
- for (NamedDecl *D : LookupConstructors(Transform.Primary)) {
+ for (NamedDecl *D : LookupConstructors(Pattern->getTemplatedDecl())) {
D = D->getUnderlyingDecl();
if (D->isInvalidDecl() || D->isImplicit())
continue;
+
D = cast<NamedDecl>(D->getCanonicalDecl());
+ // Within C++20 modules, we may have multiple same constructors in
+ // multiple same RecordDecls. And it doesn't make sense to create
+ // duplicated deduction guides for the duplicated constructors.
+ if (ProcessedCtors.count(D))
+ continue;
+
auto *FTD = dyn_cast<FunctionTemplateDecl>(D);
auto *CD =
dyn_cast_or_null<CXXConstructorDecl>(FTD ? FTD->getTemplatedDecl() : D);
@@ -2516,11 +2773,12 @@ void Sema::DeclareImplicitDeductionGuides(TemplateDecl *Template,
continue;
// Cannot make a deduction guide when unparsed arguments are present.
- if (std::any_of(CD->param_begin(), CD->param_end(), [](ParmVarDecl *P) {
+ if (llvm::any_of(CD->parameters(), [](ParmVarDecl *P) {
return !P || P->hasUnparsedDefaultArg();
}))
continue;
+ ProcessedCtors.insert(D);
Transform.transformConstructor(FTD, CD);
AddedAny = true;
}
@@ -2530,7 +2788,7 @@ void Sema::DeclareImplicitDeductionGuides(TemplateDecl *Template,
// additional function template derived as above from a hypothetical
// constructor C().
if (!AddedAny)
- Transform.buildSimpleDeductionGuide(None);
+ Transform.buildSimpleDeductionGuide(std::nullopt);
// -- An additional function template derived as above from a hypothetical
// constructor C(C), called the copy deduction candidate.
@@ -2538,7 +2796,9 @@ void Sema::DeclareImplicitDeductionGuides(TemplateDecl *Template,
cast<FunctionTemplateDecl>(
Transform.buildSimpleDeductionGuide(Transform.DeducedType))
->getTemplatedDecl())
- ->setIsCopyDeductionCandidate();
+ ->setDeductionCandidateKind(DeductionCandidate::Copy);
+
+ SavedContext.pop();
}
/// Diagnose the presence of a default template argument on a
@@ -2691,8 +2951,15 @@ bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams,
for (TemplateParameterList::iterator NewParam = NewParams->begin(),
NewParamEnd = NewParams->end();
NewParam != NewParamEnd; ++NewParam) {
- // Variables used to diagnose redundant default arguments
+ // Whether we've seen a duplicate default argument in the same translation
+ // unit.
bool RedundantDefaultArg = false;
+ // Whether we've found inconsis inconsitent default arguments in different
+ // translation unit.
+ bool InconsistentDefaultArg = false;
+ // The name of the module which contains the inconsistent default argument.
+ std::string PrevModuleName;
+
SourceLocation OldDefaultLoc;
SourceLocation NewDefaultLoc;
@@ -2725,7 +2992,15 @@ bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams,
OldDefaultLoc = OldTypeParm->getDefaultArgumentLoc();
NewDefaultLoc = NewTypeParm->getDefaultArgumentLoc();
SawDefaultArgument = true;
- RedundantDefaultArg = true;
+
+ if (!OldTypeParm->getOwningModule())
+ RedundantDefaultArg = true;
+ else if (!getASTContext().isSameDefaultTemplateArgument(OldTypeParm,
+ NewTypeParm)) {
+ InconsistentDefaultArg = true;
+ PrevModuleName =
+ OldTypeParm->getImportedOwningModule()->getFullModuleName();
+ }
PreviousDefaultArgLoc = NewDefaultLoc;
} else if (OldTypeParm && OldTypeParm->hasDefaultArgument()) {
// Merge the default argument from the old declaration to the
@@ -2770,7 +3045,14 @@ bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams,
OldDefaultLoc = OldNonTypeParm->getDefaultArgumentLoc();
NewDefaultLoc = NewNonTypeParm->getDefaultArgumentLoc();
SawDefaultArgument = true;
- RedundantDefaultArg = true;
+ if (!OldNonTypeParm->getOwningModule())
+ RedundantDefaultArg = true;
+ else if (!getASTContext().isSameDefaultTemplateArgument(
+ OldNonTypeParm, NewNonTypeParm)) {
+ InconsistentDefaultArg = true;
+ PrevModuleName =
+ OldNonTypeParm->getImportedOwningModule()->getFullModuleName();
+ }
PreviousDefaultArgLoc = NewDefaultLoc;
} else if (OldNonTypeParm && OldNonTypeParm->hasDefaultArgument()) {
// Merge the default argument from the old declaration to the
@@ -2814,7 +3096,14 @@ bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams,
OldDefaultLoc = OldTemplateParm->getDefaultArgument().getLocation();
NewDefaultLoc = NewTemplateParm->getDefaultArgument().getLocation();
SawDefaultArgument = true;
- RedundantDefaultArg = true;
+ if (!OldTemplateParm->getOwningModule())
+ RedundantDefaultArg = true;
+ else if (!getASTContext().isSameDefaultTemplateArgument(
+ OldTemplateParm, NewTemplateParm)) {
+ InconsistentDefaultArg = true;
+ PrevModuleName =
+ OldTemplateParm->getImportedOwningModule()->getFullModuleName();
+ }
PreviousDefaultArgLoc = NewDefaultLoc;
} else if (OldTemplateParm && OldTemplateParm->hasDefaultArgument()) {
// Merge the default argument from the old declaration to the
@@ -2841,13 +3130,32 @@ bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams,
Invalid = true;
}
+ // [basic.def.odr]/13:
+ // There can be more than one definition of a
+ // ...
+ // default template argument
+ // ...
+ // in a program provided that each definition appears in a different
+ // translation unit and the definitions satisfy the [same-meaning
+ // criteria of the ODR].
+ //
+ // Simply, the design of modules allows the definition of template default
+ // argument to be repeated across translation unit. Note that the ODR is
+ // checked elsewhere. But it is still not allowed to repeat template default
+ // argument in the same translation unit.
if (RedundantDefaultArg) {
- // C++ [temp.param]p12:
- // A template-parameter shall not be given default arguments
- // by two different declarations in the same scope.
Diag(NewDefaultLoc, diag::err_template_param_default_arg_redefinition);
Diag(OldDefaultLoc, diag::note_template_param_prev_default_arg);
Invalid = true;
+ } else if (InconsistentDefaultArg) {
+ // We could only diagnose about the case that the OldParam is imported.
+ // The case NewParam is imported should be handled in ASTReader.
+ Diag(NewDefaultLoc,
+ diag::err_template_param_default_arg_inconsistent_redefinition);
+ Diag(OldDefaultLoc,
+ diag::note_template_param_prev_default_arg_in_other_module)
+ << PrevModuleName;
+ Invalid = true;
} else if (MissingDefaultArg && TPC != TPC_FunctionTemplate) {
// C++ [temp.param]p11:
// If a template-parameter of a class template has a default
@@ -3355,7 +3663,7 @@ TemplateParameterList *Sema::MatchTemplateParametersToScopeSpecifier(
// Fabricate an empty template parameter list for the invented header.
return TemplateParameterList::Create(Context, SourceLocation(),
- SourceLocation(), None,
+ SourceLocation(), std::nullopt,
SourceLocation(), nullptr);
}
@@ -3440,45 +3748,54 @@ void Sema::NoteAllFoundTemplates(TemplateName Name) {
static QualType
checkBuiltinTemplateIdType(Sema &SemaRef, BuiltinTemplateDecl *BTD,
- const SmallVectorImpl<TemplateArgument> &Converted,
+ ArrayRef<TemplateArgument> Converted,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs) {
ASTContext &Context = SemaRef.getASTContext();
+
switch (BTD->getBuiltinTemplateKind()) {
case BTK__make_integer_seq: {
// Specializations of __make_integer_seq<S, T, N> are treated like
// S<T, 0, ..., N-1>.
+ QualType OrigType = Converted[1].getAsType();
// C++14 [inteseq.intseq]p1:
// T shall be an integer type.
- if (!Converted[1].getAsType()->isIntegralType(Context)) {
+ if (!OrigType->isDependentType() && !OrigType->isIntegralType(Context)) {
SemaRef.Diag(TemplateArgs[1].getLocation(),
diag::err_integer_sequence_integral_element_type);
return QualType();
}
- // C++14 [inteseq.make]p1:
- // If N is negative the program is ill-formed.
TemplateArgument NumArgsArg = Converted[2];
- llvm::APSInt NumArgs = NumArgsArg.getAsIntegral();
- if (NumArgs < 0) {
+ if (NumArgsArg.isDependent())
+ return Context.getCanonicalTemplateSpecializationType(TemplateName(BTD),
+ Converted);
+
+ TemplateArgumentListInfo SyntheticTemplateArgs;
+ // The type argument, wrapped in substitution sugar, gets reused as the
+ // first template argument in the synthetic template argument list.
+ SyntheticTemplateArgs.addArgument(
+ TemplateArgumentLoc(TemplateArgument(OrigType),
+ SemaRef.Context.getTrivialTypeSourceInfo(
+ OrigType, TemplateArgs[1].getLocation())));
+
+ if (llvm::APSInt NumArgs = NumArgsArg.getAsIntegral(); NumArgs >= 0) {
+ // Expand N into 0 ... N-1.
+ for (llvm::APSInt I(NumArgs.getBitWidth(), NumArgs.isUnsigned());
+ I < NumArgs; ++I) {
+ TemplateArgument TA(Context, I, OrigType);
+ SyntheticTemplateArgs.addArgument(SemaRef.getTrivialTemplateArgumentLoc(
+ TA, OrigType, TemplateArgs[2].getLocation()));
+ }
+ } else {
+ // C++14 [inteseq.make]p1:
+ // If N is negative the program is ill-formed.
SemaRef.Diag(TemplateArgs[2].getLocation(),
diag::err_integer_sequence_negative_length);
return QualType();
}
- QualType ArgTy = NumArgsArg.getIntegralType();
- TemplateArgumentListInfo SyntheticTemplateArgs;
- // The type argument gets reused as the first template argument in the
- // synthetic template argument list.
- SyntheticTemplateArgs.addArgument(TemplateArgs[1]);
- // Expand N into 0 ... N-1.
- for (llvm::APSInt I(NumArgs.getBitWidth(), NumArgs.isUnsigned());
- I < NumArgs; ++I) {
- TemplateArgument TA(Context, I, ArgTy);
- SyntheticTemplateArgs.addArgument(SemaRef.getTrivialTemplateArgumentLoc(
- TA, ArgTy, TemplateArgs[2].getLocation()));
- }
// The first template argument will be reused as the template decl that
// our synthetic template arguments will be applied to.
return SemaRef.CheckTemplateIdType(Converted[0].getAsTemplate(),
@@ -3492,11 +3809,15 @@ checkBuiltinTemplateIdType(Sema &SemaRef, BuiltinTemplateDecl *BTD,
assert(Converted.size() == 2 &&
"__type_pack_element should be given an index and a parameter pack");
- // If the Index is out of bounds, the program is ill-formed.
TemplateArgument IndexArg = Converted[0], Ts = Converted[1];
+ if (IndexArg.isDependent() || Ts.isDependent())
+ return Context.getCanonicalTemplateSpecializationType(TemplateName(BTD),
+ Converted);
+
llvm::APSInt Index = IndexArg.getAsIntegral();
assert(Index >= 0 && "the index used with __type_pack_element should be of "
"type std::size_t, and hence be non-negative");
+ // If the Index is out of bounds, the program is ill-formed.
if (Index >= Ts.pack_size()) {
SemaRef.Diag(TemplateArgs[0].getLocation(),
diag::err_type_pack_element_out_of_bounds);
@@ -3504,15 +3825,17 @@ checkBuiltinTemplateIdType(Sema &SemaRef, BuiltinTemplateDecl *BTD,
}
// We simply return the type at index `Index`.
- auto Nth = std::next(Ts.pack_begin(), Index.getExtValue());
- return Nth->getAsType();
+ int64_t N = Index.getExtValue();
+ return Ts.getPackAsArray()[N].getAsType();
}
llvm_unreachable("unexpected BuiltinTemplateDecl!");
}
/// Determine whether this alias template is "enable_if_t".
+/// libc++ >=14 uses "__enable_if_t" in C++11 mode.
static bool isEnableIfAliasTemplate(TypeAliasTemplateDecl *AliasTemplate) {
- return AliasTemplate->getName().equals("enable_if_t");
+ return AliasTemplate->getName().equals("enable_if_t") ||
+ AliasTemplate->getName().equals("__enable_if_t");
}
/// Collect all of the separable terms in the given condition, which
@@ -3527,9 +3850,8 @@ static void collectConjunctionTerms(Expr *Clause,
if (BinOp->getOpcode() == BO_LAnd) {
collectConjunctionTerms(BinOp->getLHS(), Terms);
collectConjunctionTerms(BinOp->getRHS(), Terms);
+ return;
}
-
- return;
}
Terms.push_back(Clause);
@@ -3657,10 +3979,9 @@ QualType Sema::CheckTemplateIdType(TemplateName Name,
// assume the template is a type template. Either our assumption is
// correct, or the code is ill-formed and will be diagnosed when the
// dependent name is substituted.
- return Context.getDependentTemplateSpecializationType(ETK_None,
- DTN->getQualifier(),
- DTN->getIdentifier(),
- TemplateArgs);
+ return Context.getDependentTemplateSpecializationType(
+ ElaboratedTypeKeyword::None, DTN->getQualifier(), DTN->getIdentifier(),
+ TemplateArgs.arguments());
if (Name.getAsAssumedTemplateName() &&
resolveAssumedTemplateNameAsType(/*Scope*/nullptr, Name, TemplateLoc))
@@ -3672,7 +3993,8 @@ QualType Sema::CheckTemplateIdType(TemplateName Name,
// We might have a substituted template template parameter pack. If so,
// build a template specialization type for it.
if (Name.getAsSubstTemplateTemplateParmPack())
- return Context.getTemplateSpecializationType(Name, TemplateArgs);
+ return Context.getTemplateSpecializationType(Name,
+ TemplateArgs.arguments());
Diag(TemplateLoc, diag::err_template_id_not_a_type)
<< Name;
@@ -3682,10 +4004,10 @@ QualType Sema::CheckTemplateIdType(TemplateName Name,
// Check that the template argument list is well-formed for this
// template.
- SmallVector<TemplateArgument, 4> Converted;
- if (CheckTemplateArgumentList(Template, TemplateLoc, TemplateArgs,
- false, Converted,
- /*UpdateArgsWithConversion=*/true))
+ SmallVector<TemplateArgument, 4> SugaredConverted, CanonicalConverted;
+ if (CheckTemplateArgumentList(Template, TemplateLoc, TemplateArgs, false,
+ SugaredConverted, CanonicalConverted,
+ /*UpdateArgsWithConversions=*/true))
return QualType();
QualType CanonType;
@@ -3698,12 +4020,10 @@ QualType Sema::CheckTemplateIdType(TemplateName Name,
if (Pattern->isInvalidDecl())
return QualType();
- TemplateArgumentList StackTemplateArgs(TemplateArgumentList::OnStack,
- Converted);
-
// Only substitute for the innermost template argument list.
MultiLevelTemplateArgumentList TemplateArgLists;
- TemplateArgLists.addOuterTemplateArguments(&StackTemplateArgs);
+ TemplateArgLists.addOuterTemplateArguments(Template, CanonicalConverted,
+ /*Final=*/false);
TemplateArgLists.addOuterRetainedLevels(
AliasTemplate->getTemplateParameters()->getDepth());
@@ -3750,9 +4070,12 @@ QualType Sema::CheckTemplateIdType(TemplateName Name,
return QualType();
}
+ } else if (auto *BTD = dyn_cast<BuiltinTemplateDecl>(Template)) {
+ CanonType = checkBuiltinTemplateIdType(*this, BTD, SugaredConverted,
+ TemplateLoc, TemplateArgs);
} else if (Name.isDependent() ||
TemplateSpecializationType::anyDependentTemplateArguments(
- TemplateArgs, Converted)) {
+ TemplateArgs, CanonicalConverted)) {
// This class template specialization is a dependent
// type. Therefore, its canonical type is another class template
// specialization type that contains all of the converted
@@ -3760,7 +4083,8 @@ QualType Sema::CheckTemplateIdType(TemplateName Name,
// A<T, T> have identical types when A is declared as:
//
// template<typename T, typename U = T> struct A;
- CanonType = Context.getCanonicalTemplateSpecializationType(Name, Converted);
+ CanonType = Context.getCanonicalTemplateSpecializationType(
+ Name, CanonicalConverted);
// This might work out to be a current instantiation, in which
// case the canonical type needs to be the InjectedClassNameType.
@@ -3799,13 +4123,13 @@ QualType Sema::CheckTemplateIdType(TemplateName Name,
break;
}
}
- } else if (ClassTemplateDecl *ClassTemplate
- = dyn_cast<ClassTemplateDecl>(Template)) {
+ } else if (ClassTemplateDecl *ClassTemplate =
+ dyn_cast<ClassTemplateDecl>(Template)) {
// Find the class template specialization declaration that
// corresponds to these arguments.
void *InsertPos = nullptr;
- ClassTemplateSpecializationDecl *Decl
- = ClassTemplate->findSpecialization(Converted, InsertPos);
+ ClassTemplateSpecializationDecl *Decl =
+ ClassTemplate->findSpecialization(CanonicalConverted, InsertPos);
if (!Decl) {
// This is the first time we have referenced this class template
// specialization. Create the canonical declaration and add it to
@@ -3814,7 +4138,8 @@ QualType Sema::CheckTemplateIdType(TemplateName Name,
Context, ClassTemplate->getTemplatedDecl()->getTagKind(),
ClassTemplate->getDeclContext(),
ClassTemplate->getTemplatedDecl()->getBeginLoc(),
- ClassTemplate->getLocation(), ClassTemplate, Converted, nullptr);
+ ClassTemplate->getLocation(), ClassTemplate, CanonicalConverted,
+ nullptr);
ClassTemplate->AddSpecialization(Decl, InsertPos);
if (ClassTemplate->isOutOfLine())
Decl->setLexicalDeclContext(ClassTemplate->getLexicalDeclContext());
@@ -3824,8 +4149,9 @@ QualType Sema::CheckTemplateIdType(TemplateName Name,
ClassTemplate->getTemplatedDecl()->hasAttrs()) {
InstantiatingTemplate Inst(*this, TemplateLoc, Decl);
if (!Inst.isInvalid()) {
- MultiLevelTemplateArgumentList TemplateArgLists;
- TemplateArgLists.addOuterTemplateArguments(Converted);
+ MultiLevelTemplateArgumentList TemplateArgLists(Template,
+ CanonicalConverted,
+ /*Final=*/false);
InstantiateAttrsForDecl(TemplateArgLists,
ClassTemplate->getTemplatedDecl(), Decl);
}
@@ -3837,15 +4163,15 @@ QualType Sema::CheckTemplateIdType(TemplateName Name,
CanonType = Context.getTypeDeclType(Decl);
assert(isa<RecordType>(CanonType) &&
"type of non-dependent specialization is not a RecordType");
- } else if (auto *BTD = dyn_cast<BuiltinTemplateDecl>(Template)) {
- CanonType = checkBuiltinTemplateIdType(*this, BTD, Converted, TemplateLoc,
- TemplateArgs);
+ } else {
+ llvm_unreachable("Unhandled template kind");
}
// Build the fully-sugared type for this class template
// specialization, which refers back to the class template
// specialization we created or found.
- return Context.getTemplateSpecializationType(Name, TemplateArgs, CanonType);
+ return Context.getTemplateSpecializationType(Name, TemplateArgs.arguments(),
+ CanonType);
}
void Sema::ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &ParsedName,
@@ -3906,7 +4232,8 @@ TypeResult Sema::ActOnTemplateIdType(
TemplateTy TemplateD, IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc, SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc,
- bool IsCtorOrDtorName, bool IsClassName) {
+ bool IsCtorOrDtorName, bool IsClassName,
+ ImplicitTypenameContext AllowImplicitTypename) {
if (SS.isInvalid())
return true;
@@ -3920,9 +4247,18 @@ TypeResult Sema::ActOnTemplateIdType(
// qualified-id denotes a type, forming an
// elaborated-type-specifier (7.1.5.3).
if (!LookupCtx && isDependentScopeSpecifier(SS)) {
- Diag(SS.getBeginLoc(), diag::err_typename_missing_template)
- << SS.getScopeRep() << TemplateII->getName();
- // Recover as if 'typename' were specified.
+ // C++2a relaxes some of those restrictions in [temp.res]p5.
+ if (AllowImplicitTypename == ImplicitTypenameContext::Yes) {
+ if (getLangOpts().CPlusPlus20)
+ Diag(SS.getBeginLoc(), diag::warn_cxx17_compat_implicit_typename);
+ else
+ Diag(SS.getBeginLoc(), diag::ext_implicit_typename)
+ << SS.getScopeRep() << TemplateII->getName()
+ << FixItHint::CreateInsertion(SS.getBeginLoc(), "typename ");
+ } else
+ Diag(SS.getBeginLoc(), diag::err_typename_missing_template)
+ << SS.getScopeRep() << TemplateII->getName();
+
// FIXME: This is not quite correct recovery as we don't transform SS
// into the corresponding dependent form (and we don't diagnose missing
// 'template' keywords within SS as a result).
@@ -3956,11 +4292,10 @@ TypeResult Sema::ActOnTemplateIdType(
translateTemplateArguments(TemplateArgsIn, TemplateArgs);
if (DependentTemplateName *DTN = Template.getAsDependentTemplateName()) {
- QualType T
- = Context.getDependentTemplateSpecializationType(ETK_None,
- DTN->getQualifier(),
- DTN->getIdentifier(),
- TemplateArgs);
+ assert(SS.getScopeRep() == DTN->getQualifier());
+ QualType T = Context.getDependentTemplateSpecializationType(
+ ElaboratedTypeKeyword::None, DTN->getQualifier(), DTN->getIdentifier(),
+ TemplateArgs.arguments());
// Build type-source information.
TypeLocBuilder TLB;
DependentTemplateSpecializationTypeLoc SpecTL
@@ -3976,14 +4311,14 @@ TypeResult Sema::ActOnTemplateIdType(
return CreateParsedType(T, TLB.getTypeSourceInfo(Context, T));
}
- QualType Result = CheckTemplateIdType(Template, TemplateIILoc, TemplateArgs);
- if (Result.isNull())
+ QualType SpecTy = CheckTemplateIdType(Template, TemplateIILoc, TemplateArgs);
+ if (SpecTy.isNull())
return true;
// Build type-source information.
TypeLocBuilder TLB;
- TemplateSpecializationTypeLoc SpecTL
- = TLB.push<TemplateSpecializationTypeLoc>(Result);
+ TemplateSpecializationTypeLoc SpecTL =
+ TLB.push<TemplateSpecializationTypeLoc>(SpecTy);
SpecTL.setTemplateKeywordLoc(TemplateKWLoc);
SpecTL.setTemplateNameLoc(TemplateIILoc);
SpecTL.setLAngleLoc(LAngleLoc);
@@ -3991,18 +4326,15 @@ TypeResult Sema::ActOnTemplateIdType(
for (unsigned i = 0, e = SpecTL.getNumArgs(); i != e; ++i)
SpecTL.setArgLocInfo(i, TemplateArgs[i].getLocInfo());
- // NOTE: avoid constructing an ElaboratedTypeLoc if this is a
- // constructor or destructor name (in such a case, the scope specifier
- // will be attached to the enclosing Decl or Expr node).
- if (SS.isNotEmpty() && !IsCtorOrDtorName) {
- // Create an elaborated-type-specifier containing the nested-name-specifier.
- Result = Context.getElaboratedType(ETK_None, SS.getScopeRep(), Result);
- ElaboratedTypeLoc ElabTL = TLB.push<ElaboratedTypeLoc>(Result);
- ElabTL.setElaboratedKeywordLoc(SourceLocation());
+ // Create an elaborated-type-specifier containing the nested-name-specifier.
+ QualType ElTy =
+ getElaboratedType(ElaboratedTypeKeyword::None,
+ !IsCtorOrDtorName ? SS : CXXScopeSpec(), SpecTy);
+ ElaboratedTypeLoc ElabTL = TLB.push<ElaboratedTypeLoc>(ElTy);
+ ElabTL.setElaboratedKeywordLoc(SourceLocation());
+ if (!ElabTL.isEmpty())
ElabTL.setQualifierLoc(SS.getWithLocInContext(Context));
- }
-
- return CreateParsedType(Result, TLB.getTypeSourceInfo(Context, Result));
+ return CreateParsedType(ElTy, TLB.getTypeSourceInfo(Context, ElTy));
}
TypeResult Sema::ActOnTagTemplateIdType(TagUseKind TUK,
@@ -4030,10 +4362,10 @@ TypeResult Sema::ActOnTagTemplateIdType(TagUseKind TUK,
= TypeWithKeyword::getKeywordForTagTypeKind(TagKind);
if (DependentTemplateName *DTN = Template.getAsDependentTemplateName()) {
- QualType T = Context.getDependentTemplateSpecializationType(Keyword,
- DTN->getQualifier(),
- DTN->getIdentifier(),
- TemplateArgs);
+ assert(SS.getScopeRep() == DTN->getQualifier());
+ QualType T = Context.getDependentTemplateSpecializationType(
+ Keyword, DTN->getQualifier(), DTN->getIdentifier(),
+ TemplateArgs.arguments());
// Build type-source information.
TypeLocBuilder TLB;
@@ -4057,7 +4389,7 @@ TypeResult Sema::ActOnTagTemplateIdType(TagUseKind TUK,
// resolves to an alias template specialization, the
// elaborated-type-specifier is ill-formed.
Diag(TemplateLoc, diag::err_tag_reference_non_tag)
- << TAT << NTK_TypeAliasTemplate << TagKind;
+ << TAT << NTK_TypeAliasTemplate << llvm::to_underlying(TagKind);
Diag(TAT->getLocation(), diag::note_declared_at);
}
@@ -4115,6 +4447,7 @@ static bool isTemplateArgumentTemplateParameter(
case TemplateArgument::NullPtr:
case TemplateArgument::Integral:
case TemplateArgument::Declaration:
+ case TemplateArgument::StructuralValue:
case TemplateArgument::Pack:
case TemplateArgument::TemplateExpansion:
return false;
@@ -4198,7 +4531,7 @@ static void checkMoreSpecializedThanPrimary(Sema &S, PartialSpecDecl *Partial) {
<< SFINAEArgString;
}
- S.Diag(Template->getLocation(), diag::note_template_decl_here);
+ S.NoteTemplateLocation(*Template);
SmallVector<const Expr *, 3> PartialAC, TemplateAC;
Template->getAssociatedConstraints(TemplateAC);
Partial->getAssociatedConstraints(PartialAC);
@@ -4293,7 +4626,7 @@ DeclResult Sema::ActOnVarTemplateSpecialization(
bool IsPartialSpecialization) {
// D must be variable template id.
assert(D.getName().getKind() == UnqualifiedIdKind::IK_TemplateId &&
- "Variable template specialization is declared with a template it.");
+ "Variable template specialization is declared with a template id.");
TemplateIdAnnotation *TemplateId = D.getName().TemplateId;
TemplateArgumentListInfo TemplateArgs =
@@ -4323,36 +4656,39 @@ DeclResult Sema::ActOnVarTemplateSpecialization(
// Check for unexpanded parameter packs in any of the template arguments.
for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I)
if (DiagnoseUnexpandedParameterPack(TemplateArgs[I],
- UPPC_PartialSpecialization))
+ IsPartialSpecialization
+ ? UPPC_PartialSpecialization
+ : UPPC_ExplicitSpecialization))
return true;
// Check that the template argument list is well-formed for this
// template.
- SmallVector<TemplateArgument, 4> Converted;
+ SmallVector<TemplateArgument, 4> SugaredConverted, CanonicalConverted;
if (CheckTemplateArgumentList(VarTemplate, TemplateNameLoc, TemplateArgs,
- false, Converted,
- /*UpdateArgsWithConversion=*/true))
+ false, SugaredConverted, CanonicalConverted,
+ /*UpdateArgsWithConversions=*/true))
return true;
// Find the variable template (partial) specialization declaration that
// corresponds to these arguments.
if (IsPartialSpecialization) {
if (CheckTemplatePartialSpecializationArgs(TemplateNameLoc, VarTemplate,
- TemplateArgs.size(), Converted))
+ TemplateArgs.size(),
+ CanonicalConverted))
return true;
// FIXME: Move these checks to CheckTemplatePartialSpecializationArgs so we
// also do them during instantiation.
if (!Name.isDependent() &&
- !TemplateSpecializationType::anyDependentTemplateArguments(TemplateArgs,
- Converted)) {
+ !TemplateSpecializationType::anyDependentTemplateArguments(
+ TemplateArgs, CanonicalConverted)) {
Diag(TemplateNameLoc, diag::err_partial_spec_fully_specialized)
<< VarTemplate->getDeclName();
IsPartialSpecialization = false;
}
if (isSameAsPrimaryTemplate(VarTemplate->getTemplateParameters(),
- Converted) &&
+ CanonicalConverted) &&
(!Context.getLangOpts().CPlusPlus20 ||
!TemplateParams->hasAssociatedConstraints())) {
// C++ [temp.class.spec]p9b3:
@@ -4373,10 +4709,10 @@ DeclResult Sema::ActOnVarTemplateSpecialization(
VarTemplateSpecializationDecl *PrevDecl = nullptr;
if (IsPartialSpecialization)
- PrevDecl = VarTemplate->findPartialSpecialization(Converted, TemplateParams,
- InsertPos);
+ PrevDecl = VarTemplate->findPartialSpecialization(
+ CanonicalConverted, TemplateParams, InsertPos);
else
- PrevDecl = VarTemplate->findSpecialization(Converted, InsertPos);
+ PrevDecl = VarTemplate->findSpecialization(CanonicalConverted, InsertPos);
VarTemplateSpecializationDecl *Specialization = nullptr;
@@ -4403,7 +4739,7 @@ DeclResult Sema::ActOnVarTemplateSpecialization(
VarTemplatePartialSpecializationDecl::Create(
Context, VarTemplate->getDeclContext(), TemplateKWLoc,
TemplateNameLoc, TemplateParams, VarTemplate, DI->getType(), DI, SC,
- Converted, TemplateArgs);
+ CanonicalConverted, TemplateArgs);
if (!PrevPartial)
VarTemplate->AddPartialSpecialization(Partial, InsertPos);
@@ -4420,7 +4756,7 @@ DeclResult Sema::ActOnVarTemplateSpecialization(
// this explicit specialization or friend declaration.
Specialization = VarTemplateSpecializationDecl::Create(
Context, VarTemplate->getDeclContext(), TemplateKWLoc, TemplateNameLoc,
- VarTemplate, DI->getType(), DI, SC, Converted);
+ VarTemplate, DI->getType(), DI, SC, CanonicalConverted);
Specialization->setTemplateArgsInfo(TemplateArgs);
if (!PrevDecl)
@@ -4498,25 +4834,26 @@ Sema::CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc,
assert(Template && "A variable template id without template?");
// Check that the template argument list is well-formed for this template.
- SmallVector<TemplateArgument, 4> Converted;
+ SmallVector<TemplateArgument, 4> SugaredConverted, CanonicalConverted;
if (CheckTemplateArgumentList(
Template, TemplateNameLoc,
const_cast<TemplateArgumentListInfo &>(TemplateArgs), false,
- Converted, /*UpdateArgsWithConversion=*/true))
+ SugaredConverted, CanonicalConverted,
+ /*UpdateArgsWithConversions=*/true))
return true;
// Produce a placeholder value if the specialization is dependent.
if (Template->getDeclContext()->isDependentContext() ||
- TemplateSpecializationType::anyDependentTemplateArguments(TemplateArgs,
- Converted))
+ TemplateSpecializationType::anyDependentTemplateArguments(
+ TemplateArgs, CanonicalConverted))
return DeclResult();
// Find the variable template specialization declaration that
// corresponds to these arguments.
void *InsertPos = nullptr;
- if (VarTemplateSpecializationDecl *Spec = Template->findSpecialization(
- Converted, InsertPos)) {
- checkSpecializationVisibility(TemplateNameLoc, Spec);
+ if (VarTemplateSpecializationDecl *Spec =
+ Template->findSpecialization(CanonicalConverted, InsertPos)) {
+ checkSpecializationReachability(TemplateNameLoc, Spec);
// If we already have a variable template specialization, return it.
return Spec;
}
@@ -4527,7 +4864,7 @@ Sema::CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc,
// that it represents. That is,
VarDecl *InstantiationPattern = Template->getTemplatedDecl();
TemplateArgumentList TemplateArgList(TemplateArgumentList::OnStack,
- Converted);
+ CanonicalConverted);
TemplateArgumentList *InstantiationArgs = &TemplateArgList;
bool AmbiguousPartialSpec = false;
typedef PartialSpecMatchResult MatchResult;
@@ -4559,7 +4896,7 @@ Sema::CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc,
} else {
Matched.push_back(PartialSpecMatchResult());
Matched.back().Partial = Partial;
- Matched.back().Args = Info.take();
+ Matched.back().Args = Info.takeCanonical();
}
}
@@ -4615,7 +4952,7 @@ Sema::CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc,
// FIXME: LateAttrs et al.?
VarTemplateSpecializationDecl *Decl = BuildVarTemplateInstantiation(
Template, InstantiationPattern, *InstantiationArgs, TemplateArgs,
- Converted, TemplateNameLoc /*, LateAttrs, StartingScope*/);
+ CanonicalConverted, TemplateNameLoc /*, LateAttrs, StartingScope*/);
if (!Decl)
return true;
@@ -4637,7 +4974,7 @@ Sema::CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc,
dyn_cast<VarTemplatePartialSpecializationDecl>(InstantiationPattern))
Decl->setInstantiationOf(D, InstantiationArgs);
- checkSpecializationVisibility(TemplateNameLoc, Decl);
+ checkSpecializationReachability(TemplateNameLoc, Decl);
assert(Decl && "No variable template specialization?");
return Decl;
@@ -4672,8 +5009,7 @@ void Sema::diagnoseMissingTemplateArguments(TemplateName Name,
Diag(Loc, diag::err_template_missing_args)
<< (int)getTemplateNameKindForDiagnostics(Name) << Name;
if (TemplateDecl *TD = Name.getAsTemplateDecl()) {
- Diag(TD->getLocation(), diag::note_template_decl_here)
- << TD->getTemplateParameters()->getSourceRange();
+ NoteTemplateLocation(*TD, TD->getTemplateParameters()->getSourceRange());
}
}
@@ -4686,30 +5022,42 @@ Sema::CheckConceptTemplateId(const CXXScopeSpec &SS,
const TemplateArgumentListInfo *TemplateArgs) {
assert(NamedConcept && "A concept template id without a template?");
- llvm::SmallVector<TemplateArgument, 4> Converted;
- if (CheckTemplateArgumentList(NamedConcept, ConceptNameInfo.getLoc(),
- const_cast<TemplateArgumentListInfo&>(*TemplateArgs),
- /*PartialTemplateArgs=*/false, Converted,
- /*UpdateArgsWithConversion=*/false))
+ llvm::SmallVector<TemplateArgument, 4> SugaredConverted, CanonicalConverted;
+ if (CheckTemplateArgumentList(
+ NamedConcept, ConceptNameInfo.getLoc(),
+ const_cast<TemplateArgumentListInfo &>(*TemplateArgs),
+ /*PartialTemplateArgs=*/false, SugaredConverted, CanonicalConverted,
+ /*UpdateArgsWithConversions=*/false))
return ExprError();
+ auto *CSD = ImplicitConceptSpecializationDecl::Create(
+ Context, NamedConcept->getDeclContext(), NamedConcept->getLocation(),
+ CanonicalConverted);
ConstraintSatisfaction Satisfaction;
bool AreArgsDependent =
- TemplateSpecializationType::anyDependentTemplateArguments(*TemplateArgs,
- Converted);
+ TemplateSpecializationType::anyDependentTemplateArguments(
+ *TemplateArgs, CanonicalConverted);
+ MultiLevelTemplateArgumentList MLTAL(NamedConcept, CanonicalConverted,
+ /*Final=*/false);
+ LocalInstantiationScope Scope(*this);
+
+ EnterExpressionEvaluationContext EECtx{
+ *this, ExpressionEvaluationContext::ConstantEvaluated, CSD};
+
if (!AreArgsDependent &&
CheckConstraintSatisfaction(
- NamedConcept, {NamedConcept->getConstraintExpr()}, Converted,
+ NamedConcept, {NamedConcept->getConstraintExpr()}, MLTAL,
SourceRange(SS.isSet() ? SS.getBeginLoc() : ConceptNameInfo.getLoc(),
TemplateArgs->getRAngleLoc()),
Satisfaction))
return ExprError();
-
- return ConceptSpecializationExpr::Create(Context,
+ auto *CL = ConceptReference::Create(
+ Context,
SS.isSet() ? SS.getWithLocInContext(Context) : NestedNameSpecifierLoc{},
TemplateKWLoc, ConceptNameInfo, FoundDecl, NamedConcept,
- ASTTemplateArgumentListInfo::Create(Context, *TemplateArgs), Converted,
- AreArgsDependent ? nullptr : &Satisfaction);
+ ASTTemplateArgumentListInfo::Create(Context, *TemplateArgs));
+ return ConceptSpecializationExpr::Create(
+ Context, CL, CSD, AreArgsDependent ? nullptr : &Satisfaction);
}
ExprResult Sema::BuildTemplateIdExpr(const CXXScopeSpec &SS,
@@ -4737,7 +5085,7 @@ ExprResult Sema::BuildTemplateIdExpr(const CXXScopeSpec &SS,
return ExprError();
}
}
-
+ bool KnownDependent = false;
// In C++1y, check variable template ids.
if (R.getAsSingle<VarTemplateDecl>()) {
ExprResult Res = CheckVarTemplateId(SS, R.getLookupNameInfo(),
@@ -4746,6 +5094,7 @@ ExprResult Sema::BuildTemplateIdExpr(const CXXScopeSpec &SS,
if (Res.isInvalid() || Res.isUsable())
return Res;
// Result is dependent. Carry on to build an UnresolvedLookupEpxr.
+ KnownDependent = true;
}
if (R.getAsSingle<ConceptDecl>()) {
@@ -4757,13 +5106,10 @@ ExprResult Sema::BuildTemplateIdExpr(const CXXScopeSpec &SS,
// We don't want lookup warnings at this point.
R.suppressDiagnostics();
- UnresolvedLookupExpr *ULE
- = UnresolvedLookupExpr::Create(Context, R.getNamingClass(),
- SS.getWithLocInContext(Context),
- TemplateKWLoc,
- R.getLookupNameInfo(),
- RequiresADL, TemplateArgs,
- R.begin(), R.end());
+ UnresolvedLookupExpr *ULE = UnresolvedLookupExpr::Create(
+ Context, R.getNamingClass(), SS.getWithLocInContext(Context),
+ TemplateKWLoc, R.getLookupNameInfo(), RequiresADL, TemplateArgs,
+ R.begin(), R.end(), KnownDependent);
return ULE;
}
@@ -4798,13 +5144,20 @@ Sema::BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
return ExprError();
}
- if (ClassTemplateDecl *Temp = R.getAsSingle<ClassTemplateDecl>()) {
- Diag(NameInfo.getLoc(), diag::err_template_kw_refers_to_class_template)
- << SS.getScopeRep()
- << NameInfo.getName().getAsString() << SS.getRange();
- Diag(Temp->getLocation(), diag::note_referenced_class_template);
+ auto DiagnoseTypeTemplateDecl = [&](TemplateDecl *Temp,
+ bool isTypeAliasTemplateDecl) {
+ Diag(NameInfo.getLoc(), diag::err_template_kw_refers_to_type_template)
+ << SS.getScopeRep() << NameInfo.getName().getAsString() << SS.getRange()
+ << isTypeAliasTemplateDecl;
+ Diag(Temp->getLocation(), diag::note_referenced_type_template) << 0;
return ExprError();
- }
+ };
+
+ if (ClassTemplateDecl *Temp = R.getAsSingle<ClassTemplateDecl>())
+ return DiagnoseTypeTemplateDecl(Temp, false);
+
+ if (TypeAliasTemplateDecl *Temp = R.getAsSingle<TypeAliasTemplateDecl>())
+ return DiagnoseTypeTemplateDecl(Temp, true);
return BuildTemplateIdExpr(SS, TemplateKWLoc, R, /*ADL*/ false, TemplateArgs);
}
@@ -4950,9 +5303,10 @@ TemplateNameKind Sema::ActOnTemplateName(Scope *S,
return TNK_Non_template;
}
-bool Sema::CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
- TemplateArgumentLoc &AL,
- SmallVectorImpl<TemplateArgument> &Converted) {
+bool Sema::CheckTemplateTypeArgument(
+ TemplateTypeParmDecl *Param, TemplateArgumentLoc &AL,
+ SmallVectorImpl<TemplateArgument> &SugaredConverted,
+ SmallVectorImpl<TemplateArgument> &CanonicalConverted) {
const TemplateArgument &Arg = AL.getArgument();
QualType ArgType;
TypeSourceInfo *TSI = nullptr;
@@ -5007,12 +5361,12 @@ bool Sema::CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
? diag::ext_ms_template_type_arg_missing_typename
: diag::err_template_arg_must_be_type_suggest)
<< FixItHint::CreateInsertion(Loc, "typename ");
- Diag(Param->getLocation(), diag::note_template_param_here);
+ NoteTemplateParameterLocation(*Param);
// Recover by synthesizing a type using the location information that we
// already have.
- ArgType =
- Context.getDependentNameType(ETK_Typename, SS.getScopeRep(), II);
+ ArgType = Context.getDependentNameType(ElaboratedTypeKeyword::Typename,
+ SS.getScopeRep(), II);
TypeLocBuilder TLB;
DependentNameTypeLoc TL = TLB.push<DependentNameTypeLoc>(ArgType);
TL.setElaboratedKeywordLoc(SourceLocation(/*synthesized*/));
@@ -5029,14 +5383,14 @@ bool Sema::CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
}
}
// fallthrough
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
default: {
// We have a template type parameter but the template argument
// is not a type.
SourceRange SR = AL.getSourceRange();
Diag(SR.getBegin(), diag::err_template_arg_must_be_type) << SR;
- Diag(Param->getLocation(), diag::note_template_param_here);
+ NoteTemplateParameterLocation(*Param);
return true;
}
@@ -5045,9 +5399,6 @@ bool Sema::CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
if (CheckTemplateArgument(TSI))
return true;
- // Add the converted template type argument.
- ArgType = Context.getCanonicalType(ArgType);
-
// Objective-C ARC:
// If an explicitly-specified template argument type is a lifetime type
// with no lifetime qualifier, the __strong lifetime qualifier is inferred.
@@ -5059,7 +5410,9 @@ bool Sema::CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
ArgType = Context.getQualifiedType(ArgType, Qs);
}
- Converted.push_back(TemplateArgument(ArgType));
+ SugaredConverted.push_back(TemplateArgument(ArgType));
+ CanonicalConverted.push_back(
+ TemplateArgument(Context.getCanonicalType(ArgType)));
return false;
}
@@ -5084,33 +5437,33 @@ bool Sema::CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
/// \param Converted the list of template arguments provided for template
/// parameters that precede \p Param in the template parameter list.
/// \returns the substituted template argument, or NULL if an error occurred.
-static TypeSourceInfo *
-SubstDefaultTemplateArgument(Sema &SemaRef,
- TemplateDecl *Template,
- SourceLocation TemplateLoc,
- SourceLocation RAngleLoc,
- TemplateTypeParmDecl *Param,
- SmallVectorImpl<TemplateArgument> &Converted) {
+static TypeSourceInfo *SubstDefaultTemplateArgument(
+ Sema &SemaRef, TemplateDecl *Template, SourceLocation TemplateLoc,
+ SourceLocation RAngleLoc, TemplateTypeParmDecl *Param,
+ ArrayRef<TemplateArgument> SugaredConverted,
+ ArrayRef<TemplateArgument> CanonicalConverted) {
TypeSourceInfo *ArgType = Param->getDefaultArgumentInfo();
// If the argument type is dependent, instantiate it now based
// on the previously-computed template arguments.
if (ArgType->getType()->isInstantiationDependentType()) {
- Sema::InstantiatingTemplate Inst(SemaRef, TemplateLoc,
- Param, Template, Converted,
+ Sema::InstantiatingTemplate Inst(SemaRef, TemplateLoc, Param, Template,
+ SugaredConverted,
SourceRange(TemplateLoc, RAngleLoc));
if (Inst.isInvalid())
return nullptr;
- TemplateArgumentList TemplateArgs(TemplateArgumentList::OnStack, Converted);
-
// Only substitute for the innermost template argument list.
- MultiLevelTemplateArgumentList TemplateArgLists;
- TemplateArgLists.addOuterTemplateArguments(&TemplateArgs);
+ MultiLevelTemplateArgumentList TemplateArgLists(Template, SugaredConverted,
+ /*Final=*/true);
for (unsigned i = 0, e = Param->getDepth(); i != e; ++i)
- TemplateArgLists.addOuterTemplateArguments(None);
+ TemplateArgLists.addOuterTemplateArguments(std::nullopt);
- Sema::ContextRAII SavedContext(SemaRef, Template->getDeclContext());
+ bool ForLambdaCallOperator = false;
+ if (const auto *Rec = dyn_cast<CXXRecordDecl>(Template->getDeclContext()))
+ ForLambdaCallOperator = Rec->isLambda();
+ Sema::ContextRAII SavedContext(SemaRef, Template->getDeclContext(),
+ !ForLambdaCallOperator);
ArgType =
SemaRef.SubstType(ArgType, TemplateArgLists,
Param->getDefaultArgumentLoc(), Param->getDeclName());
@@ -5141,26 +5494,22 @@ SubstDefaultTemplateArgument(Sema &SemaRef,
/// parameters that precede \p Param in the template parameter list.
///
/// \returns the substituted template argument, or NULL if an error occurred.
-static ExprResult
-SubstDefaultTemplateArgument(Sema &SemaRef,
- TemplateDecl *Template,
- SourceLocation TemplateLoc,
- SourceLocation RAngleLoc,
- NonTypeTemplateParmDecl *Param,
- SmallVectorImpl<TemplateArgument> &Converted) {
- Sema::InstantiatingTemplate Inst(SemaRef, TemplateLoc,
- Param, Template, Converted,
+static ExprResult SubstDefaultTemplateArgument(
+ Sema &SemaRef, TemplateDecl *Template, SourceLocation TemplateLoc,
+ SourceLocation RAngleLoc, NonTypeTemplateParmDecl *Param,
+ ArrayRef<TemplateArgument> SugaredConverted,
+ ArrayRef<TemplateArgument> CanonicalConverted) {
+ Sema::InstantiatingTemplate Inst(SemaRef, TemplateLoc, Param, Template,
+ SugaredConverted,
SourceRange(TemplateLoc, RAngleLoc));
if (Inst.isInvalid())
return ExprError();
- TemplateArgumentList TemplateArgs(TemplateArgumentList::OnStack, Converted);
-
// Only substitute for the innermost template argument list.
- MultiLevelTemplateArgumentList TemplateArgLists;
- TemplateArgLists.addOuterTemplateArguments(&TemplateArgs);
+ MultiLevelTemplateArgumentList TemplateArgLists(Template, SugaredConverted,
+ /*Final=*/true);
for (unsigned i = 0, e = Param->getDepth(); i != e; ++i)
- TemplateArgLists.addOuterTemplateArguments(None);
+ TemplateArgLists.addOuterTemplateArguments(std::nullopt);
Sema::ContextRAII SavedContext(SemaRef, Template->getDeclContext());
EnterExpressionEvaluationContext ConstantEvaluated(
@@ -5193,27 +5542,23 @@ SubstDefaultTemplateArgument(Sema &SemaRef,
/// source-location information) that precedes the template name.
///
/// \returns the substituted template argument, or NULL if an error occurred.
-static TemplateName
-SubstDefaultTemplateArgument(Sema &SemaRef,
- TemplateDecl *Template,
- SourceLocation TemplateLoc,
- SourceLocation RAngleLoc,
- TemplateTemplateParmDecl *Param,
- SmallVectorImpl<TemplateArgument> &Converted,
- NestedNameSpecifierLoc &QualifierLoc) {
+static TemplateName SubstDefaultTemplateArgument(
+ Sema &SemaRef, TemplateDecl *Template, SourceLocation TemplateLoc,
+ SourceLocation RAngleLoc, TemplateTemplateParmDecl *Param,
+ ArrayRef<TemplateArgument> SugaredConverted,
+ ArrayRef<TemplateArgument> CanonicalConverted,
+ NestedNameSpecifierLoc &QualifierLoc) {
Sema::InstantiatingTemplate Inst(
- SemaRef, TemplateLoc, TemplateParameter(Param), Template, Converted,
- SourceRange(TemplateLoc, RAngleLoc));
+ SemaRef, TemplateLoc, TemplateParameter(Param), Template,
+ SugaredConverted, SourceRange(TemplateLoc, RAngleLoc));
if (Inst.isInvalid())
return TemplateName();
- TemplateArgumentList TemplateArgs(TemplateArgumentList::OnStack, Converted);
-
// Only substitute for the innermost template argument list.
- MultiLevelTemplateArgumentList TemplateArgLists;
- TemplateArgLists.addOuterTemplateArguments(&TemplateArgs);
+ MultiLevelTemplateArgumentList TemplateArgLists(Template, SugaredConverted,
+ /*Final=*/true);
for (unsigned i = 0, e = Param->getDepth(); i != e; ++i)
- TemplateArgLists.addOuterTemplateArguments(None);
+ TemplateArgLists.addOuterTemplateArguments(std::nullopt);
Sema::ContextRAII SavedContext(SemaRef, Template->getDeclContext());
// Substitute into the nested-name-specifier first,
@@ -5235,26 +5580,21 @@ SubstDefaultTemplateArgument(Sema &SemaRef,
/// If the given template parameter has a default template
/// argument, substitute into that default template argument and
/// return the corresponding template argument.
-TemplateArgumentLoc
-Sema::SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
- SourceLocation TemplateLoc,
- SourceLocation RAngleLoc,
- Decl *Param,
- SmallVectorImpl<TemplateArgument>
- &Converted,
- bool &HasDefaultArg) {
+TemplateArgumentLoc Sema::SubstDefaultTemplateArgumentIfAvailable(
+ TemplateDecl *Template, SourceLocation TemplateLoc,
+ SourceLocation RAngleLoc, Decl *Param,
+ ArrayRef<TemplateArgument> SugaredConverted,
+ ArrayRef<TemplateArgument> CanonicalConverted, bool &HasDefaultArg) {
HasDefaultArg = false;
if (TemplateTypeParmDecl *TypeParm = dyn_cast<TemplateTypeParmDecl>(Param)) {
- if (!hasVisibleDefaultArgument(TypeParm))
+ if (!hasReachableDefaultArgument(TypeParm))
return TemplateArgumentLoc();
HasDefaultArg = true;
- TypeSourceInfo *DI = SubstDefaultTemplateArgument(*this, Template,
- TemplateLoc,
- RAngleLoc,
- TypeParm,
- Converted);
+ TypeSourceInfo *DI = SubstDefaultTemplateArgument(
+ *this, Template, TemplateLoc, RAngleLoc, TypeParm, SugaredConverted,
+ CanonicalConverted);
if (DI)
return TemplateArgumentLoc(TemplateArgument(DI->getType()), DI);
@@ -5263,15 +5603,13 @@ Sema::SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
if (NonTypeTemplateParmDecl *NonTypeParm
= dyn_cast<NonTypeTemplateParmDecl>(Param)) {
- if (!hasVisibleDefaultArgument(NonTypeParm))
+ if (!hasReachableDefaultArgument(NonTypeParm))
return TemplateArgumentLoc();
HasDefaultArg = true;
- ExprResult Arg = SubstDefaultTemplateArgument(*this, Template,
- TemplateLoc,
- RAngleLoc,
- NonTypeParm,
- Converted);
+ ExprResult Arg = SubstDefaultTemplateArgument(
+ *this, Template, TemplateLoc, RAngleLoc, NonTypeParm, SugaredConverted,
+ CanonicalConverted);
if (Arg.isInvalid())
return TemplateArgumentLoc();
@@ -5281,17 +5619,14 @@ Sema::SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
TemplateTemplateParmDecl *TempTempParm
= cast<TemplateTemplateParmDecl>(Param);
- if (!hasVisibleDefaultArgument(TempTempParm))
+ if (!hasReachableDefaultArgument(TempTempParm))
return TemplateArgumentLoc();
HasDefaultArg = true;
NestedNameSpecifierLoc QualifierLoc;
- TemplateName TName = SubstDefaultTemplateArgument(*this, Template,
- TemplateLoc,
- RAngleLoc,
- TempTempParm,
- Converted,
- QualifierLoc);
+ TemplateName TName = SubstDefaultTemplateArgument(
+ *this, Template, TemplateLoc, RAngleLoc, TempTempParm, SugaredConverted,
+ CanonicalConverted, QualifierLoc);
if (TName.isNull())
return TemplateArgumentLoc();
@@ -5309,7 +5644,7 @@ convertTypeTemplateArgumentToTemplate(ASTContext &Context, TypeLoc TLoc) {
// Extract and step over any surrounding nested-name-specifier.
NestedNameSpecifierLoc QualLoc;
if (auto ETLoc = TLoc.getAs<ElaboratedTypeLoc>()) {
- if (ETLoc.getTypePtr()->getKeyword() != ETK_None)
+ if (ETLoc.getTypePtr()->getKeyword() != ElaboratedTypeKeyword::None)
return TemplateArgumentLoc();
QualLoc = ETLoc.getQualifierLoc();
@@ -5361,17 +5696,17 @@ convertTypeTemplateArgumentToTemplate(ASTContext &Context, TypeLoc TLoc) {
/// explicitly written, deduced, etc.
///
/// \returns true on error, false otherwise.
-bool Sema::CheckTemplateArgument(NamedDecl *Param,
- TemplateArgumentLoc &Arg,
- NamedDecl *Template,
- SourceLocation TemplateLoc,
- SourceLocation RAngleLoc,
- unsigned ArgumentPackIndex,
- SmallVectorImpl<TemplateArgument> &Converted,
- CheckTemplateArgumentKind CTAK) {
+bool Sema::CheckTemplateArgument(
+ NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template,
+ SourceLocation TemplateLoc, SourceLocation RAngleLoc,
+ unsigned ArgumentPackIndex,
+ SmallVectorImpl<TemplateArgument> &SugaredConverted,
+ SmallVectorImpl<TemplateArgument> &CanonicalConverted,
+ CheckTemplateArgumentKind CTAK) {
// Check template type parameters.
if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(Param))
- return CheckTemplateTypeArgument(TTP, Arg, Converted);
+ return CheckTemplateTypeArgument(TTP, Arg, SugaredConverted,
+ CanonicalConverted);
// Check non-type template parameters.
if (NonTypeTemplateParmDecl *NTTP =dyn_cast<NonTypeTemplateParmDecl>(Param)) {
@@ -5386,27 +5721,22 @@ bool Sema::CheckTemplateArgument(NamedDecl *Param,
!isa<TemplateTemplateParmDecl>(Template) &&
!Template->getDeclContext()->isDependentContext()) {
// Do substitution on the type of the non-type template parameter.
- InstantiatingTemplate Inst(*this, TemplateLoc, Template,
- NTTP, Converted,
+ InstantiatingTemplate Inst(*this, TemplateLoc, Template, NTTP,
+ SugaredConverted,
SourceRange(TemplateLoc, RAngleLoc));
if (Inst.isInvalid())
return true;
- TemplateArgumentList TemplateArgs(TemplateArgumentList::OnStack,
- Converted);
-
+ MultiLevelTemplateArgumentList MLTAL(Template, SugaredConverted,
+ /*Final=*/true);
// If the parameter is a pack expansion, expand this slice of the pack.
if (auto *PET = NTTPType->getAs<PackExpansionType>()) {
Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(*this,
ArgumentPackIndex);
- NTTPType = SubstType(PET->getPattern(),
- MultiLevelTemplateArgumentList(TemplateArgs),
- NTTP->getLocation(),
+ NTTPType = SubstType(PET->getPattern(), MLTAL, NTTP->getLocation(),
NTTP->getDeclName());
} else {
- NTTPType = SubstType(NTTPType,
- MultiLevelTemplateArgumentList(TemplateArgs),
- NTTP->getLocation(),
+ NTTPType = SubstType(NTTPType, MLTAL, NTTP->getLocation(),
NTTP->getDeclName());
}
@@ -5424,11 +5754,11 @@ bool Sema::CheckTemplateArgument(NamedDecl *Param,
llvm_unreachable("Should never see a NULL template argument here");
case TemplateArgument::Expression: {
- TemplateArgument Result;
+ Expr *E = Arg.getArgument().getAsExpr();
+ TemplateArgument SugaredResult, CanonicalResult;
unsigned CurSFINAEErrors = NumSFINAEErrors;
- ExprResult Res =
- CheckTemplateArgument(NTTP, NTTPType, Arg.getArgument().getAsExpr(),
- Result, CTAK);
+ ExprResult Res = CheckTemplateArgument(NTTP, NTTPType, E, SugaredResult,
+ CanonicalResult, CTAK);
if (Res.isInvalid())
return true;
// If the current template argument causes an error, give up now.
@@ -5437,21 +5767,25 @@ bool Sema::CheckTemplateArgument(NamedDecl *Param,
// If the resulting expression is new, then use it in place of the
// old expression in the template argument.
- if (Res.get() != Arg.getArgument().getAsExpr()) {
+ if (Res.get() != E) {
TemplateArgument TA(Res.get());
Arg = TemplateArgumentLoc(TA, Res.get());
}
- Converted.push_back(Result);
+ SugaredConverted.push_back(SugaredResult);
+ CanonicalConverted.push_back(CanonicalResult);
break;
}
case TemplateArgument::Declaration:
case TemplateArgument::Integral:
+ case TemplateArgument::StructuralValue:
case TemplateArgument::NullPtr:
// We've already checked this template argument, so just copy
// it to the list of converted arguments.
- Converted.push_back(Arg.getArgument());
+ SugaredConverted.push_back(Arg.getArgument());
+ CanonicalConverted.push_back(
+ Context.getCanonicalTemplateArgument(Arg.getArgument()));
break;
case TemplateArgument::Template:
@@ -5487,12 +5821,14 @@ bool Sema::CheckTemplateArgument(NamedDecl *Param,
return true;
}
- TemplateArgument Result;
- E = CheckTemplateArgument(NTTP, NTTPType, E.get(), Result);
+ TemplateArgument SugaredResult, CanonicalResult;
+ E = CheckTemplateArgument(NTTP, NTTPType, E.get(), SugaredResult,
+ CanonicalResult, CTAK_Specified);
if (E.isInvalid())
return true;
- Converted.push_back(Result);
+ SugaredConverted.push_back(SugaredResult);
+ CanonicalConverted.push_back(CanonicalResult);
break;
}
@@ -5501,8 +5837,8 @@ bool Sema::CheckTemplateArgument(NamedDecl *Param,
// therefore cannot be a non-type template argument.
Diag(Arg.getLocation(), diag::err_template_arg_must_be_expr)
<< Arg.getSourceRange();
+ NoteTemplateParameterLocation(*Param);
- Diag(Param->getLocation(), diag::note_template_param_here);
return true;
case TemplateArgument::Type: {
@@ -5522,7 +5858,7 @@ bool Sema::CheckTemplateArgument(NamedDecl *Param,
Diag(SR.getBegin(), diag::err_template_arg_nontype_ambig) << SR << T;
else
Diag(SR.getBegin(), diag::err_template_arg_must_be_expr) << SR;
- Diag(Param->getLocation(), diag::note_template_param_here);
+ NoteTemplateParameterLocation(*Param);
return true;
}
@@ -5549,15 +5885,17 @@ bool Sema::CheckTemplateArgument(NamedDecl *Param,
{
// Set up a template instantiation context.
LocalInstantiationScope Scope(*this);
- InstantiatingTemplate Inst(*this, TemplateLoc, Template,
- TempParm, Converted,
+ InstantiatingTemplate Inst(*this, TemplateLoc, Template, TempParm,
+ SugaredConverted,
SourceRange(TemplateLoc, RAngleLoc));
if (Inst.isInvalid())
return true;
- TemplateArgumentList TemplateArgs(TemplateArgumentList::OnStack, Converted);
- Params = SubstTemplateParams(Params, CurContext,
- MultiLevelTemplateArgumentList(TemplateArgs));
+ Params =
+ SubstTemplateParams(Params, CurContext,
+ MultiLevelTemplateArgumentList(
+ Template, SugaredConverted, /*Final=*/true),
+ /*EvaluateConstraints=*/false);
if (!Params)
return true;
}
@@ -5582,7 +5920,9 @@ bool Sema::CheckTemplateArgument(NamedDecl *Param,
if (CheckTemplateTemplateArgument(TempParm, Params, Arg))
return true;
- Converted.push_back(Arg.getArgument());
+ SugaredConverted.push_back(Arg.getArgument());
+ CanonicalConverted.push_back(
+ Context.getCanonicalTemplateArgument(Arg.getArgument()));
break;
case TemplateArgument::Expression:
@@ -5594,11 +5934,10 @@ bool Sema::CheckTemplateArgument(NamedDecl *Param,
return true;
case TemplateArgument::Declaration:
- llvm_unreachable("Declaration argument with template template parameter");
case TemplateArgument::Integral:
- llvm_unreachable("Integral argument with template template parameter");
+ case TemplateArgument::StructuralValue:
case TemplateArgument::NullPtr:
- llvm_unreachable("Null pointer argument with template template parameter");
+ llvm_unreachable("non-type argument with template template parameter");
case TemplateArgument::Pack:
llvm_unreachable("Caller must expand template argument packs");
@@ -5619,10 +5958,10 @@ static bool diagnoseMissingArgument(Sema &S, SourceLocation Loc,
->getTemplateParameters()
->getParam(D->getIndex()));
- // If there's a default argument that's not visible, diagnose that we're
+ // If there's a default argument that's not reachable, diagnose that we're
// missing a module import.
llvm::SmallVector<Module*, 8> Modules;
- if (D->hasDefaultArgument() && !S.hasVisibleDefaultArgument(D, &Modules)) {
+ if (D->hasDefaultArgument() && !S.hasReachableDefaultArgument(D, &Modules)) {
S.diagnoseMissingImport(Loc, cast<NamedDecl>(TD),
D->getDefaultArgumentLoc(), Modules,
Sema::MissingImportKind::DefaultArgument,
@@ -5639,8 +5978,7 @@ static bool diagnoseMissingArgument(Sema &S, SourceLocation Loc,
<< /*not enough args*/0
<< (int)S.getTemplateNameKindForDiagnostics(TemplateName(TD))
<< TD;
- S.Diag(TD->getLocation(), diag::note_template_decl_here)
- << Params->getSourceRange();
+ S.NoteTemplateLocation(*TD, Params->getSourceRange());
return true;
}
@@ -5649,7 +5987,8 @@ static bool diagnoseMissingArgument(Sema &S, SourceLocation Loc,
bool Sema::CheckTemplateArgumentList(
TemplateDecl *Template, SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs,
- SmallVectorImpl<TemplateArgument> &Converted,
+ SmallVectorImpl<TemplateArgument> &SugaredConverted,
+ SmallVectorImpl<TemplateArgument> &CanonicalConverted,
bool UpdateArgsWithConversions, bool *ConstraintsNotSatisfied) {
if (ConstraintsNotSatisfied)
@@ -5660,12 +5999,7 @@ bool Sema::CheckTemplateArgumentList(
// template.
TemplateArgumentListInfo NewArgs = TemplateArgs;
- // Make sure we get the template parameter list from the most
- // recent declaration, since that is the only one that is guaranteed to
- // have all the default template argument information.
- TemplateParameterList *Params =
- cast<TemplateDecl>(Template->getMostRecentDecl())
- ->getTemplateParameters();
+ TemplateParameterList *Params = GetTemplateParameterList(Template);
SourceLocation RAngleLoc = NewArgs.getRAngleLoc();
@@ -5675,7 +6009,8 @@ bool Sema::CheckTemplateArgumentList(
// corresponding parameter declared by the template in its
// template-parameter-list.
bool isTemplateTemplateParameter = isa<TemplateTemplateParmDecl>(Template);
- SmallVector<TemplateArgument, 2> ArgumentPack;
+ SmallVector<TemplateArgument, 2> SugaredArgumentPack;
+ SmallVector<TemplateArgument, 2> CanonicalArgumentPack;
unsigned ArgIdx = 0, NumArgs = NewArgs.size();
LocalInstantiationScope InstScope(*this, true);
for (TemplateParameterList::iterator Param = Params->begin(),
@@ -5683,13 +6018,17 @@ bool Sema::CheckTemplateArgumentList(
Param != ParamEnd; /* increment in loop */) {
// If we have an expanded parameter pack, make sure we don't have too
// many arguments.
- if (Optional<unsigned> Expansions = getExpandedPackSize(*Param)) {
- if (*Expansions == ArgumentPack.size()) {
+ if (std::optional<unsigned> Expansions = getExpandedPackSize(*Param)) {
+ if (*Expansions == SugaredArgumentPack.size()) {
// We're done with this parameter pack. Pack up its arguments and add
// them to the list.
- Converted.push_back(
- TemplateArgument::CreatePackCopy(Context, ArgumentPack));
- ArgumentPack.clear();
+ SugaredConverted.push_back(
+ TemplateArgument::CreatePackCopy(Context, SugaredArgumentPack));
+ SugaredArgumentPack.clear();
+
+ CanonicalConverted.push_back(
+ TemplateArgument::CreatePackCopy(Context, CanonicalArgumentPack));
+ CanonicalArgumentPack.clear();
// This argument is assigned to the next parameter.
++Param;
@@ -5700,19 +6039,24 @@ bool Sema::CheckTemplateArgumentList(
<< /*not enough args*/0
<< (int)getTemplateNameKindForDiagnostics(TemplateName(Template))
<< Template;
- Diag(Template->getLocation(), diag::note_template_decl_here)
- << Params->getSourceRange();
+ NoteTemplateLocation(*Template, Params->getSourceRange());
return true;
}
}
if (ArgIdx < NumArgs) {
// Check the template argument we were given.
- if (CheckTemplateArgument(*Param, NewArgs[ArgIdx], Template,
- TemplateLoc, RAngleLoc,
- ArgumentPack.size(), Converted))
+ if (CheckTemplateArgument(*Param, NewArgs[ArgIdx], Template, TemplateLoc,
+ RAngleLoc, SugaredArgumentPack.size(),
+ SugaredConverted, CanonicalConverted,
+ CTAK_Specified))
return true;
+ CanonicalConverted.back().setIsDefaulted(
+ clang::isSubstitutedDefaultArgument(
+ Context, NewArgs[ArgIdx].getArgument(), *Param,
+ CanonicalConverted, Params->getDepth()));
+
bool PackExpansionIntoNonPack =
NewArgs[ArgIdx].getArgument().isPackExpansion() &&
(!(*Param)->isTemplateParameterPack() || getExpandedPackSize(*Param));
@@ -5727,7 +6071,7 @@ bool Sema::CheckTemplateArgumentList(
diag::err_template_expansion_into_fixed_list)
<< (isa<ConceptDecl>(Template) ? 1 : 0)
<< NewArgs[ArgIdx].getSourceRange();
- Diag((*Param)->getLocation(), diag::note_template_param_here);
+ NoteTemplateParameterLocation(**Param);
return true;
}
@@ -5739,7 +6083,8 @@ bool Sema::CheckTemplateArgumentList(
// deduced argument and place it on the argument pack. Note that we
// stay on the same template parameter so that we can deduce more
// arguments.
- ArgumentPack.push_back(Converted.pop_back_val());
+ SugaredArgumentPack.push_back(SugaredConverted.pop_back_val());
+ CanonicalArgumentPack.push_back(CanonicalConverted.pop_back_val());
} else {
// Move to the next template parameter.
++Param;
@@ -5749,16 +6094,25 @@ bool Sema::CheckTemplateArgumentList(
// the remaining arguments, because we don't know what parameters they'll
// match up with.
if (PackExpansionIntoNonPack) {
- if (!ArgumentPack.empty()) {
+ if (!SugaredArgumentPack.empty()) {
// If we were part way through filling in an expanded parameter pack,
// fall back to just producing individual arguments.
- Converted.insert(Converted.end(),
- ArgumentPack.begin(), ArgumentPack.end());
- ArgumentPack.clear();
+ SugaredConverted.insert(SugaredConverted.end(),
+ SugaredArgumentPack.begin(),
+ SugaredArgumentPack.end());
+ SugaredArgumentPack.clear();
+
+ CanonicalConverted.insert(CanonicalConverted.end(),
+ CanonicalArgumentPack.begin(),
+ CanonicalArgumentPack.end());
+ CanonicalArgumentPack.clear();
}
while (ArgIdx < NumArgs) {
- Converted.push_back(NewArgs[ArgIdx].getArgument());
+ const TemplateArgument &Arg = NewArgs[ArgIdx].getArgument();
+ SugaredConverted.push_back(Arg);
+ CanonicalConverted.push_back(
+ Context.getCanonicalTemplateArgument(Arg));
++ArgIdx;
}
@@ -5770,9 +6124,12 @@ bool Sema::CheckTemplateArgumentList(
// If we're checking a partial template argument list, we're done.
if (PartialTemplateArgs) {
- if ((*Param)->isTemplateParameterPack() && !ArgumentPack.empty())
- Converted.push_back(
- TemplateArgument::CreatePackCopy(Context, ArgumentPack));
+ if ((*Param)->isTemplateParameterPack() && !SugaredArgumentPack.empty()) {
+ SugaredConverted.push_back(
+ TemplateArgument::CreatePackCopy(Context, SugaredArgumentPack));
+ CanonicalConverted.push_back(
+ TemplateArgument::CreatePackCopy(Context, CanonicalArgumentPack));
+ }
return false;
}
@@ -5785,12 +6142,20 @@ bool Sema::CheckTemplateArgumentList(
// A non-expanded parameter pack before the end of the parameter list
// only occurs for an ill-formed template parameter list, unless we've
// got a partial argument list for a function template, so just bail out.
- if (Param + 1 != ParamEnd)
+ if (Param + 1 != ParamEnd) {
+ assert(
+ (Template->getMostRecentDecl()->getKind() != Decl::Kind::Concept) &&
+ "Concept templates must have parameter packs at the end.");
return true;
+ }
- Converted.push_back(
- TemplateArgument::CreatePackCopy(Context, ArgumentPack));
- ArgumentPack.clear();
+ SugaredConverted.push_back(
+ TemplateArgument::CreatePackCopy(Context, SugaredArgumentPack));
+ SugaredArgumentPack.clear();
+
+ CanonicalConverted.push_back(
+ TemplateArgument::CreatePackCopy(Context, CanonicalArgumentPack));
+ CanonicalArgumentPack.clear();
++Param;
continue;
@@ -5805,16 +6170,13 @@ bool Sema::CheckTemplateArgumentList(
// (when the template parameter was part of a nested template) into
// the default argument.
if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*Param)) {
- if (!hasVisibleDefaultArgument(TTP))
+ if (!hasReachableDefaultArgument(TTP))
return diagnoseMissingArgument(*this, TemplateLoc, Template, TTP,
NewArgs);
- TypeSourceInfo *ArgType = SubstDefaultTemplateArgument(*this,
- Template,
- TemplateLoc,
- RAngleLoc,
- TTP,
- Converted);
+ TypeSourceInfo *ArgType = SubstDefaultTemplateArgument(
+ *this, Template, TemplateLoc, RAngleLoc, TTP, SugaredConverted,
+ CanonicalConverted);
if (!ArgType)
return true;
@@ -5822,15 +6184,13 @@ bool Sema::CheckTemplateArgumentList(
ArgType);
} else if (NonTypeTemplateParmDecl *NTTP
= dyn_cast<NonTypeTemplateParmDecl>(*Param)) {
- if (!hasVisibleDefaultArgument(NTTP))
+ if (!hasReachableDefaultArgument(NTTP))
return diagnoseMissingArgument(*this, TemplateLoc, Template, NTTP,
NewArgs);
- ExprResult E = SubstDefaultTemplateArgument(*this, Template,
- TemplateLoc,
- RAngleLoc,
- NTTP,
- Converted);
+ ExprResult E = SubstDefaultTemplateArgument(
+ *this, Template, TemplateLoc, RAngleLoc, NTTP, SugaredConverted,
+ CanonicalConverted);
if (E.isInvalid())
return true;
@@ -5840,17 +6200,14 @@ bool Sema::CheckTemplateArgumentList(
TemplateTemplateParmDecl *TempParm
= cast<TemplateTemplateParmDecl>(*Param);
- if (!hasVisibleDefaultArgument(TempParm))
+ if (!hasReachableDefaultArgument(TempParm))
return diagnoseMissingArgument(*this, TemplateLoc, Template, TempParm,
NewArgs);
NestedNameSpecifierLoc QualifierLoc;
- TemplateName Name = SubstDefaultTemplateArgument(*this, Template,
- TemplateLoc,
- RAngleLoc,
- TempParm,
- Converted,
- QualifierLoc);
+ TemplateName Name = SubstDefaultTemplateArgument(
+ *this, Template, TemplateLoc, RAngleLoc, TempParm, SugaredConverted,
+ CanonicalConverted, QualifierLoc);
if (Name.isNull())
return true;
@@ -5863,16 +6220,20 @@ bool Sema::CheckTemplateArgumentList(
// the default template argument. We're not actually instantiating a
// template here, we just create this object to put a note into the
// context stack.
- InstantiatingTemplate Inst(*this, RAngleLoc, Template, *Param, Converted,
+ InstantiatingTemplate Inst(*this, RAngleLoc, Template, *Param,
+ SugaredConverted,
SourceRange(TemplateLoc, RAngleLoc));
if (Inst.isInvalid())
return true;
// Check the default template argument.
- if (CheckTemplateArgument(*Param, Arg, Template, TemplateLoc,
- RAngleLoc, 0, Converted))
+ if (CheckTemplateArgument(*Param, Arg, Template, TemplateLoc, RAngleLoc, 0,
+ SugaredConverted, CanonicalConverted,
+ CTAK_Specified))
return true;
+ CanonicalConverted.back().setIsDefaulted(true);
+
// Core issue 150 (assumed resolution): if this is a template template
// parameter, keep track of the default template arguments from the
// template definition.
@@ -5890,8 +6251,12 @@ bool Sema::CheckTemplateArgumentList(
// still dependent).
if (ArgIdx < NumArgs && CurrentInstantiationScope &&
CurrentInstantiationScope->getPartiallySubstitutedPack()) {
- while (ArgIdx < NumArgs && NewArgs[ArgIdx].getArgument().isPackExpansion())
- Converted.push_back(NewArgs[ArgIdx++].getArgument());
+ while (ArgIdx < NumArgs &&
+ NewArgs[ArgIdx].getArgument().isPackExpansion()) {
+ const TemplateArgument &Arg = NewArgs[ArgIdx++].getArgument();
+ SugaredConverted.push_back(Arg);
+ CanonicalConverted.push_back(Context.getCanonicalTemplateArgument(Arg));
+ }
}
// If we have any leftover arguments, then there were too many arguments.
@@ -5902,8 +6267,7 @@ bool Sema::CheckTemplateArgumentList(
<< (int)getTemplateNameKindForDiagnostics(TemplateName(Template))
<< Template
<< SourceRange(NewArgs[ArgIdx].getLocation(), NewArgs.getRAngleLoc());
- Diag(Template->getLocation(), diag::note_template_decl_here)
- << Params->getSourceRange();
+ NoteTemplateLocation(*Template, Params->getSourceRange());
return true;
}
@@ -5912,13 +6276,39 @@ bool Sema::CheckTemplateArgumentList(
if (UpdateArgsWithConversions)
TemplateArgs = std::move(NewArgs);
- if (!PartialTemplateArgs &&
- EnsureTemplateArgumentListConstraints(
- Template, Converted, SourceRange(TemplateLoc,
- TemplateArgs.getRAngleLoc()))) {
- if (ConstraintsNotSatisfied)
- *ConstraintsNotSatisfied = true;
- return true;
+ if (!PartialTemplateArgs) {
+ TemplateArgumentList StackTemplateArgs(TemplateArgumentList::OnStack,
+ CanonicalConverted);
+ // Setup the context/ThisScope for the case where we are needing to
+ // re-instantiate constraints outside of normal instantiation.
+ DeclContext *NewContext = Template->getDeclContext();
+
+ // If this template is in a template, make sure we extract the templated
+ // decl.
+ if (auto *TD = dyn_cast<TemplateDecl>(NewContext))
+ NewContext = Decl::castToDeclContext(TD->getTemplatedDecl());
+ auto *RD = dyn_cast<CXXRecordDecl>(NewContext);
+
+ Qualifiers ThisQuals;
+ if (const auto *Method =
+ dyn_cast_or_null<CXXMethodDecl>(Template->getTemplatedDecl()))
+ ThisQuals = Method->getMethodQualifiers();
+
+ ContextRAII Context(*this, NewContext);
+ CXXThisScopeRAII(*this, RD, ThisQuals, RD != nullptr);
+
+ MultiLevelTemplateArgumentList MLTAL = getTemplateInstantiationArgs(
+ Template, NewContext, /*Final=*/false, &StackTemplateArgs,
+ /*RelativeToPrimary=*/true,
+ /*Pattern=*/nullptr,
+ /*ForConceptInstantiation=*/true);
+ if (EnsureTemplateArgumentListConstraints(
+ Template, MLTAL,
+ SourceRange(TemplateLoc, TemplateArgs.getRAngleLoc()))) {
+ if (ConstraintsNotSatisfied)
+ *ConstraintsNotSatisfied = true;
+ return true;
+ }
}
return false;
@@ -6063,7 +6453,7 @@ bool UnnamedLocalNoLinkageFinder::VisitTypeOfExprType(const TypeOfExprType*) {
}
bool UnnamedLocalNoLinkageFinder::VisitTypeOfType(const TypeOfType* T) {
- return Visit(T->getUnderlyingType());
+ return Visit(T->getUnmodifiedType());
}
bool UnnamedLocalNoLinkageFinder::VisitDecltypeType(const DecltypeType*) {
@@ -6151,12 +6541,12 @@ bool UnnamedLocalNoLinkageFinder::VisitPipeType(const PipeType* T) {
return false;
}
-bool UnnamedLocalNoLinkageFinder::VisitExtIntType(const ExtIntType *T) {
+bool UnnamedLocalNoLinkageFinder::VisitBitIntType(const BitIntType *T) {
return false;
}
-bool UnnamedLocalNoLinkageFinder::VisitDependentExtIntType(
- const DependentExtIntType *T) {
+bool UnnamedLocalNoLinkageFinder::VisitDependentBitIntType(
+ const DependentBitIntType *T) {
return false;
}
@@ -6212,8 +6602,9 @@ bool Sema::CheckTemplateArgument(TypeSourceInfo *ArgInfo) {
assert(ArgInfo && "invalid TypeSourceInfo");
QualType Arg = ArgInfo->getType();
SourceRange SR = ArgInfo->getTypeLoc().getSourceRange();
+ QualType CanonArg = Context.getCanonicalType(Arg);
- if (Arg->isVariablyModifiedType()) {
+ if (CanonArg->isVariablyModifiedType()) {
return Diag(SR.getBegin(), diag::err_variably_modified_template_arg) << Arg;
} else if (Context.hasSameUnqualifiedType(Arg, Context.OverloadTy)) {
return Diag(SR.getBegin(), diag::err_template_arg_overload_type) << SR;
@@ -6226,9 +6617,9 @@ bool Sema::CheckTemplateArgument(TypeSourceInfo *ArgInfo) {
//
// C++11 allows these, and even in C++03 we allow them as an extension with
// a warning.
- if (LangOpts.CPlusPlus11 || Arg->hasUnnamedOrLocalType()) {
+ if (LangOpts.CPlusPlus11 || CanonArg->hasUnnamedOrLocalType()) {
UnnamedLocalNoLinkageFinder Finder(*this, SR);
- (void)Finder.Visit(Context.getCanonicalType(Arg));
+ (void)Finder.Visit(CanonArg);
}
return false;
@@ -6288,7 +6679,7 @@ isNullPointerValueTemplateArgument(Sema &S, NonTypeTemplateParmDecl *Param,
for (unsigned I = 0, N = Notes.size(); I != N; ++I)
S.Diag(Notes[I].first, Notes[I].second);
- S.Diag(Param->getLocation(), diag::note_template_param_here);
+ S.NoteTemplateParameterLocation(*Param);
return NPV_Error;
}
@@ -6300,7 +6691,7 @@ isNullPointerValueTemplateArgument(Sema &S, NonTypeTemplateParmDecl *Param,
// - a constant expression that evaluates to a null pointer value (4.10); or
// - a constant expression that evaluates to a null member pointer value
// (4.11); or
- if ((EvalResult.Val.isLValue() && !EvalResult.Val.getLValueBase()) ||
+ if ((EvalResult.Val.isLValue() && EvalResult.Val.isNullPointer()) ||
(EvalResult.Val.isMemberPointer() &&
!EvalResult.Val.getMemberPointerDecl())) {
// If our expression has an appropriate type, we've succeeded.
@@ -6314,10 +6705,20 @@ isNullPointerValueTemplateArgument(Sema &S, NonTypeTemplateParmDecl *Param,
// then recover as if the types were correct.
S.Diag(Arg->getExprLoc(), diag::err_template_arg_wrongtype_null_constant)
<< Arg->getType() << ParamType << Arg->getSourceRange();
- S.Diag(Param->getLocation(), diag::note_template_param_here);
+ S.NoteTemplateParameterLocation(*Param);
return NPV_NullPointer;
}
+ if (EvalResult.Val.isLValue() && !EvalResult.Val.getLValueBase()) {
+ // We found a pointer that isn't null, but doesn't refer to an object.
+ // We could just return NPV_NotNullPointer, but we can print a better
+ // message with the information we have here.
+ S.Diag(Arg->getExprLoc(), diag::err_template_arg_invalid)
+ << EvalResult.Val.getAsString(S.Context, ParamType);
+ S.NoteTemplateParameterLocation(*Param);
+ return NPV_Error;
+ }
+
// If we don't have a null pointer value, but we do have a NULL pointer
// constant, suggest a cast to the appropriate type.
if (Arg->isNullPointerConstant(S.Context, Expr::NPC_NeverValueDependent)) {
@@ -6326,7 +6727,7 @@ isNullPointerValueTemplateArgument(Sema &S, NonTypeTemplateParmDecl *Param,
<< ParamType << FixItHint::CreateInsertion(Arg->getBeginLoc(), Code)
<< FixItHint::CreateInsertion(S.getLocForEndOfToken(Arg->getEndLoc()),
")");
- S.Diag(Param->getLocation(), diag::note_template_param_here);
+ S.NoteTemplateParameterLocation(*Param);
return NPV_NullPointer;
}
@@ -6367,7 +6768,7 @@ static bool CheckTemplateArgumentIsCompatibleWithParameter(
S.Diag(Arg->getBeginLoc(),
diag::err_template_arg_ref_bind_ignores_quals)
<< ParamType << Arg->getType() << Arg->getSourceRange();
- S.Diag(Param->getLocation(), diag::note_template_param_here);
+ S.NoteTemplateParameterLocation(*Param);
return true;
}
}
@@ -6385,7 +6786,7 @@ static bool CheckTemplateArgumentIsCompatibleWithParameter(
else
S.Diag(Arg->getBeginLoc(), diag::err_template_arg_not_convertible)
<< ArgIn->getType() << ParamType << Arg->getSourceRange();
- S.Diag(Param->getLocation(), diag::note_template_param_here);
+ S.NoteTemplateParameterLocation(*Param);
return true;
}
}
@@ -6395,12 +6796,9 @@ static bool CheckTemplateArgumentIsCompatibleWithParameter(
/// Checks whether the given template argument is the address
/// of an object or function according to C++ [temp.arg.nontype]p1.
-static bool
-CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
- NonTypeTemplateParmDecl *Param,
- QualType ParamType,
- Expr *ArgIn,
- TemplateArgument &Converted) {
+static bool CheckTemplateArgumentAddressOfObjectOrFunction(
+ Sema &S, NonTypeTemplateParmDecl *Param, QualType ParamType, Expr *ArgIn,
+ TemplateArgument &SugaredConverted, TemplateArgument &CanonicalConverted) {
bool Invalid = false;
Expr *Arg = ArgIn;
QualType ArgType = Arg->getType();
@@ -6504,8 +6902,11 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
Entity)) {
case NPV_NullPointer:
S.Diag(Arg->getExprLoc(), diag::warn_cxx98_compat_template_arg_null);
- Converted = TemplateArgument(S.Context.getCanonicalType(ParamType),
- /*isNullPtr=*/true);
+ SugaredConverted = TemplateArgument(ParamType,
+ /*isNullPtr=*/true);
+ CanonicalConverted =
+ TemplateArgument(S.Context.getCanonicalType(ParamType),
+ /*isNullPtr=*/true);
return false;
case NPV_Error:
@@ -6519,14 +6920,16 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
// Stop checking the precise nature of the argument if it is value dependent,
// it should be checked when instantiated.
if (Arg->isValueDependent()) {
- Converted = TemplateArgument(ArgIn);
+ SugaredConverted = TemplateArgument(ArgIn);
+ CanonicalConverted =
+ S.Context.getCanonicalTemplateArgument(SugaredConverted);
return false;
}
if (!Entity) {
S.Diag(Arg->getBeginLoc(), diag::err_template_arg_not_decl_ref)
<< Arg->getSourceRange();
- S.Diag(Param->getLocation(), diag::note_template_param_here);
+ S.NoteTemplateParameterLocation(*Param);
return true;
}
@@ -6534,7 +6937,7 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
if (isa<FieldDecl>(Entity) || isa<IndirectFieldDecl>(Entity)) {
S.Diag(Arg->getBeginLoc(), diag::err_template_arg_field)
<< Entity << Arg->getSourceRange();
- S.Diag(Param->getLocation(), diag::note_template_param_here);
+ S.NoteTemplateParameterLocation(*Param);
return true;
}
@@ -6543,7 +6946,7 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
if (!Method->isStatic()) {
S.Diag(Arg->getBeginLoc(), diag::err_template_arg_method)
<< Method << Arg->getSourceRange();
- S.Diag(Param->getLocation(), diag::note_template_param_here);
+ S.NoteTemplateParameterLocation(*Param);
return true;
}
}
@@ -6562,7 +6965,7 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
}
// Address / reference template args must have external linkage in C++98.
- if (Entity->getFormalLinkage() == InternalLinkage) {
+ if (Entity->getFormalLinkage() == Linkage::Internal) {
S.Diag(Arg->getBeginLoc(),
S.getLangOpts().CPlusPlus11
? diag::warn_cxx98_compat_template_arg_object_internal
@@ -6583,7 +6986,7 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
if (Var->getType()->isReferenceType()) {
S.Diag(Arg->getBeginLoc(), diag::err_template_arg_reference_var)
<< Var->getType() << Arg->getSourceRange();
- S.Diag(Param->getLocation(), diag::note_template_param_here);
+ S.NoteTemplateParameterLocation(*Param);
return true;
}
@@ -6604,14 +7007,14 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
ParamType.getNonReferenceType())) {
S.Diag(AddrOpLoc, diag::err_template_arg_address_of_non_pointer)
<< ParamType;
- S.Diag(Param->getLocation(), diag::note_template_param_here);
+ S.NoteTemplateParameterLocation(*Param);
return true;
}
S.Diag(AddrOpLoc, diag::err_template_arg_address_of_non_pointer)
<< ParamType
<< FixItHint::CreateRemoval(AddrOpLoc);
- S.Diag(Param->getLocation(), diag::note_template_param_here);
+ S.NoteTemplateParameterLocation(*Param);
ArgType = Entity->getType();
}
@@ -6633,14 +7036,14 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
if (!S.Context.hasSameUnqualifiedType(ArgType, ParamType)) {
S.Diag(Arg->getBeginLoc(), diag::err_template_arg_not_address_of)
<< ParamType;
- S.Diag(Param->getLocation(), diag::note_template_param_here);
+ S.NoteTemplateParameterLocation(*Param);
return true;
}
S.Diag(Arg->getBeginLoc(), diag::err_template_arg_not_address_of)
<< ParamType << FixItHint::CreateInsertion(Arg->getBeginLoc(), "&");
- S.Diag(Param->getLocation(), diag::note_template_param_here);
+ S.NoteTemplateParameterLocation(*Param);
}
}
@@ -6649,19 +7052,21 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
return true;
// Create the template argument.
- Converted = TemplateArgument(cast<ValueDecl>(Entity->getCanonicalDecl()),
- S.Context.getCanonicalType(ParamType));
+ SugaredConverted = TemplateArgument(Entity, ParamType);
+ CanonicalConverted =
+ TemplateArgument(cast<ValueDecl>(Entity->getCanonicalDecl()),
+ S.Context.getCanonicalType(ParamType));
S.MarkAnyDeclReferenced(Arg->getBeginLoc(), Entity, false);
return false;
}
/// Checks whether the given template argument is a pointer to
/// member constant according to C++ [temp.arg.nontype]p1.
-static bool CheckTemplateArgumentPointerToMember(Sema &S,
- NonTypeTemplateParmDecl *Param,
- QualType ParamType,
- Expr *&ResultArg,
- TemplateArgument &Converted) {
+static bool
+CheckTemplateArgumentPointerToMember(Sema &S, NonTypeTemplateParmDecl *Param,
+ QualType ParamType, Expr *&ResultArg,
+ TemplateArgument &SugaredConverted,
+ TemplateArgument &CanonicalConverted) {
bool Invalid = false;
Expr *Arg = ResultArg;
@@ -6709,10 +7114,14 @@ static bool CheckTemplateArgumentPointerToMember(Sema &S,
if (VD->getType()->isMemberPointerType()) {
if (isa<NonTypeTemplateParmDecl>(VD)) {
if (Arg->isTypeDependent() || Arg->isValueDependent()) {
- Converted = TemplateArgument(Arg);
+ SugaredConverted = TemplateArgument(Arg);
+ CanonicalConverted =
+ S.Context.getCanonicalTemplateArgument(SugaredConverted);
} else {
- VD = cast<ValueDecl>(VD->getCanonicalDecl());
- Converted = TemplateArgument(VD, ParamType);
+ SugaredConverted = TemplateArgument(VD, ParamType);
+ CanonicalConverted =
+ TemplateArgument(cast<ValueDecl>(VD->getCanonicalDecl()),
+ S.Context.getCanonicalType(ParamType));
}
return Invalid;
}
@@ -6730,8 +7139,10 @@ static bool CheckTemplateArgumentPointerToMember(Sema &S,
return true;
case NPV_NullPointer:
S.Diag(ResultArg->getExprLoc(), diag::warn_cxx98_compat_template_arg_null);
- Converted = TemplateArgument(S.Context.getCanonicalType(ParamType),
- /*isNullPtr*/true);
+ SugaredConverted = TemplateArgument(ParamType,
+ /*isNullPtr*/ true);
+ CanonicalConverted = TemplateArgument(S.Context.getCanonicalType(ParamType),
+ /*isNullPtr*/ true);
return false;
case NPV_NotNullPointer:
break;
@@ -6748,7 +7159,7 @@ static bool CheckTemplateArgumentPointerToMember(Sema &S,
// We can't perform this conversion.
S.Diag(ResultArg->getBeginLoc(), diag::err_template_arg_not_convertible)
<< ResultArg->getType() << ParamType << ResultArg->getSourceRange();
- S.Diag(Param->getLocation(), diag::note_template_param_here);
+ S.NoteTemplateParameterLocation(*Param);
return true;
}
@@ -6762,16 +7173,22 @@ static bool CheckTemplateArgumentPointerToMember(Sema &S,
isa<CXXMethodDecl>(DRE->getDecl())) {
assert((isa<FieldDecl>(DRE->getDecl()) ||
isa<IndirectFieldDecl>(DRE->getDecl()) ||
- !cast<CXXMethodDecl>(DRE->getDecl())->isStatic()) &&
+ cast<CXXMethodDecl>(DRE->getDecl())
+ ->isImplicitObjectMemberFunction()) &&
"Only non-static member pointers can make it here");
// Okay: this is the address of a non-static member, and therefore
// a member pointer constant.
if (Arg->isTypeDependent() || Arg->isValueDependent()) {
- Converted = TemplateArgument(Arg);
+ SugaredConverted = TemplateArgument(Arg);
+ CanonicalConverted =
+ S.Context.getCanonicalTemplateArgument(SugaredConverted);
} else {
- ValueDecl *D = cast<ValueDecl>(DRE->getDecl()->getCanonicalDecl());
- Converted = TemplateArgument(D, S.Context.getCanonicalType(ParamType));
+ ValueDecl *D = DRE->getDecl();
+ SugaredConverted = TemplateArgument(D, ParamType);
+ CanonicalConverted =
+ TemplateArgument(cast<ValueDecl>(D->getCanonicalDecl()),
+ S.Context.getCanonicalType(ParamType));
}
return Invalid;
}
@@ -6792,7 +7209,8 @@ static bool CheckTemplateArgumentPointerToMember(Sema &S,
/// type of the non-type template parameter after it has been instantiated.
ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType ParamType, Expr *Arg,
- TemplateArgument &Converted,
+ TemplateArgument &SugaredConverted,
+ TemplateArgument &CanonicalConverted,
CheckTemplateArgumentKind CTAK) {
SourceLocation StartLoc = Arg->getBeginLoc();
@@ -6807,7 +7225,9 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
if (CTAK == CTAK_Deduced && Arg->isTypeDependent()) {
auto *AT = dyn_cast<AutoType>(DeducedT);
if (AT && AT->isDecltypeAuto()) {
- Converted = TemplateArgument(Arg);
+ SugaredConverted = TemplateArgument(Arg);
+ CanonicalConverted = TemplateArgument(
+ Context.getCanonicalTemplateArgument(SugaredConverted));
return Arg;
}
}
@@ -6815,7 +7235,6 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
// When checking a deduced template argument, deduce from its type even if
// the type is dependent, in order to check the types of non-type template
// arguments line up properly in partial ordering.
- Optional<unsigned> Depth = Param->getDepth() + 1;
Expr *DeductionArg = Arg;
if (auto *PE = dyn_cast<PackExpansionExpr>(DeductionArg))
DeductionArg = PE->getPattern();
@@ -6831,20 +7250,30 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
DeduceTemplateSpecializationFromInitializer(TSI, Entity, Kind, Inits);
if (ParamType.isNull())
return ExprError();
- } else if (DeduceAutoType(
- TSI, DeductionArg, ParamType, Depth,
- // We do not check constraints right now because the
- // immediately-declared constraint of the auto type is also
- // an associated constraint, and will be checked along with
- // the other associated constraints after checking the
- // template argument list.
- /*IgnoreConstraints=*/true) == DAR_Failed) {
- Diag(Arg->getExprLoc(),
- diag::err_non_type_template_parm_type_deduction_failure)
- << Param->getDeclName() << Param->getType() << Arg->getType()
- << Arg->getSourceRange();
- Diag(Param->getLocation(), diag::note_template_param_here);
- return ExprError();
+ } else {
+ TemplateDeductionInfo Info(DeductionArg->getExprLoc(),
+ Param->getDepth() + 1);
+ ParamType = QualType();
+ TemplateDeductionResult Result =
+ DeduceAutoType(TSI->getTypeLoc(), DeductionArg, ParamType, Info,
+ /*DependentDeduction=*/true,
+ // We do not check constraints right now because the
+ // immediately-declared constraint of the auto type is
+ // also an associated constraint, and will be checked
+ // along with the other associated constraints after
+ // checking the template argument list.
+ /*IgnoreConstraints=*/true);
+ if (Result == TDK_AlreadyDiagnosed) {
+ if (ParamType.isNull())
+ return ExprError();
+ } else if (Result != TDK_Success) {
+ Diag(Arg->getExprLoc(),
+ diag::err_non_type_template_parm_type_deduction_failure)
+ << Param->getDeclName() << Param->getType() << Arg->getType()
+ << Arg->getSourceRange();
+ NoteTemplateParameterLocation(*Param);
+ return ExprError();
+ }
}
// CheckNonTypeTemplateParameterType will produce a diagnostic if there's
// an error. The error message normally references the parameter
@@ -6852,7 +7281,7 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
// where the parameter type is deduced.
ParamType = CheckNonTypeTemplateParameterType(ParamType, Arg->getExprLoc());
if (ParamType.isNull()) {
- Diag(Param->getLocation(), diag::note_template_param_here);
+ NoteTemplateParameterLocation(*Param);
return ExprError();
}
}
@@ -6876,7 +7305,9 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
// work. Similarly for CTAD, when comparing 'A<x>' against 'A'.
if ((ParamType->isDependentType() || Arg->isTypeDependent()) &&
!Arg->getType()->getContainedDeducedType()) {
- Converted = TemplateArgument(Arg);
+ SugaredConverted = TemplateArgument(Arg);
+ CanonicalConverted = TemplateArgument(
+ Context.getCanonicalTemplateArgument(SugaredConverted));
return Arg;
}
// FIXME: This attempts to implement C++ [temp.deduct.type]p17. Per DR1770,
@@ -6886,16 +7317,13 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
Diag(StartLoc, diag::err_deduced_non_type_template_arg_type_mismatch)
<< Arg->getType()
<< ParamType.getUnqualifiedType();
- Diag(Param->getLocation(), diag::note_template_param_here);
+ NoteTemplateParameterLocation(*Param);
return ExprError();
}
// If either the parameter has a dependent type or the argument is
- // type-dependent, there's nothing we can check now. The argument only
- // contains an unexpanded pack during partial ordering, and there's
- // nothing more we can check in that case.
- if (ParamType->isDependentType() || Arg->isTypeDependent() ||
- Arg->containsUnexpandedParameterPack()) {
+ // type-dependent, there's nothing we can check now.
+ if (ParamType->isDependentType() || Arg->isTypeDependent()) {
// Force the argument to the type of the parameter to maintain invariants.
auto *PE = dyn_cast<PackExpansionExpr>(Arg);
if (PE)
@@ -6913,87 +7341,100 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
PackExpansionExpr(E.get()->getType(), E.get(), PE->getEllipsisLoc(),
PE->getNumExpansions());
}
- Converted = TemplateArgument(E.get());
+ SugaredConverted = TemplateArgument(E.get());
+ CanonicalConverted = TemplateArgument(
+ Context.getCanonicalTemplateArgument(SugaredConverted));
return E;
}
+ QualType CanonParamType = Context.getCanonicalType(ParamType);
+ // Avoid making a copy when initializing a template parameter of class type
+ // from a template parameter object of the same type. This is going beyond
+ // the standard, but is required for soundness: in
+ // template<A a> struct X { X *p; X<a> *q; };
+ // ... we need p and q to have the same type.
+ //
+ // Similarly, don't inject a call to a copy constructor when initializing
+ // from a template parameter of the same type.
+ Expr *InnerArg = Arg->IgnoreParenImpCasts();
+ if (ParamType->isRecordType() && isa<DeclRefExpr>(InnerArg) &&
+ Context.hasSameUnqualifiedType(ParamType, InnerArg->getType())) {
+ NamedDecl *ND = cast<DeclRefExpr>(InnerArg)->getDecl();
+ if (auto *TPO = dyn_cast<TemplateParamObjectDecl>(ND)) {
+
+ SugaredConverted = TemplateArgument(TPO, ParamType);
+ CanonicalConverted =
+ TemplateArgument(TPO->getCanonicalDecl(), CanonParamType);
+ return Arg;
+ }
+ if (isa<NonTypeTemplateParmDecl>(ND)) {
+ SugaredConverted = TemplateArgument(Arg);
+ CanonicalConverted =
+ Context.getCanonicalTemplateArgument(SugaredConverted);
+ return Arg;
+ }
+ }
+
// The initialization of the parameter from the argument is
// a constant-evaluated context.
EnterExpressionEvaluationContext ConstantEvaluated(
*this, Sema::ExpressionEvaluationContext::ConstantEvaluated);
- if (getLangOpts().CPlusPlus17) {
- QualType CanonParamType = Context.getCanonicalType(ParamType);
-
- // Avoid making a copy when initializing a template parameter of class type
- // from a template parameter object of the same type. This is going beyond
- // the standard, but is required for soundness: in
- // template<A a> struct X { X *p; X<a> *q; };
- // ... we need p and q to have the same type.
- //
- // Similarly, don't inject a call to a copy constructor when initializing
- // from a template parameter of the same type.
- Expr *InnerArg = Arg->IgnoreParenImpCasts();
- if (ParamType->isRecordType() && isa<DeclRefExpr>(InnerArg) &&
- Context.hasSameUnqualifiedType(ParamType, InnerArg->getType())) {
- NamedDecl *ND = cast<DeclRefExpr>(InnerArg)->getDecl();
- if (auto *TPO = dyn_cast<TemplateParamObjectDecl>(ND)) {
- Converted = TemplateArgument(TPO, CanonParamType);
- return Arg;
- }
- if (isa<NonTypeTemplateParmDecl>(ND)) {
- Converted = TemplateArgument(Arg);
- return Arg;
- }
- }
+ bool IsConvertedConstantExpression = true;
+ if (isa<InitListExpr>(Arg) || ParamType->isRecordType()) {
+ InitializationKind Kind = InitializationKind::CreateForInit(
+ Arg->getBeginLoc(), /*DirectInit=*/false, Arg);
+ Expr *Inits[1] = {Arg};
+ InitializedEntity Entity =
+ InitializedEntity::InitializeTemplateParameter(ParamType, Param);
+ InitializationSequence InitSeq(*this, Entity, Kind, Inits);
+ ExprResult Result = InitSeq.Perform(*this, Entity, Kind, Inits);
+ if (Result.isInvalid() || !Result.get())
+ return ExprError();
+ Result = ActOnConstantExpression(Result.get());
+ if (Result.isInvalid() || !Result.get())
+ return ExprError();
+ Arg = ActOnFinishFullExpr(Result.get(), Arg->getBeginLoc(),
+ /*DiscardedValue=*/false,
+ /*IsConstexpr=*/true, /*IsTemplateArgument=*/true)
+ .get();
+ IsConvertedConstantExpression = false;
+ }
+ if (getLangOpts().CPlusPlus17) {
// C++17 [temp.arg.nontype]p1:
// A template-argument for a non-type template parameter shall be
// a converted constant expression of the type of the template-parameter.
APValue Value;
- ExprResult ArgResult = CheckConvertedConstantExpression(
- Arg, ParamType, Value, CCEK_TemplateArg, Param);
- if (ArgResult.isInvalid())
- return ExprError();
+ ExprResult ArgResult;
+ if (IsConvertedConstantExpression) {
+ ArgResult = BuildConvertedConstantExpression(Arg, ParamType,
+ CCEK_TemplateArg, Param);
+ if (ArgResult.isInvalid())
+ return ExprError();
+ } else {
+ ArgResult = Arg;
+ }
// For a value-dependent argument, CheckConvertedConstantExpression is
// permitted (and expected) to be unable to determine a value.
if (ArgResult.get()->isValueDependent()) {
- Converted = TemplateArgument(ArgResult.get());
+ SugaredConverted = TemplateArgument(ArgResult.get());
+ CanonicalConverted =
+ Context.getCanonicalTemplateArgument(SugaredConverted);
return ArgResult;
}
- // Convert the APValue to a TemplateArgument.
- switch (Value.getKind()) {
- case APValue::None:
- assert(ParamType->isNullPtrType());
- Converted = TemplateArgument(CanonParamType, /*isNullPtr*/true);
- break;
- case APValue::Indeterminate:
- llvm_unreachable("result of constant evaluation should be initialized");
- break;
- case APValue::Int:
- assert(ParamType->isIntegralOrEnumerationType());
- Converted = TemplateArgument(Context, Value.getInt(), CanonParamType);
- break;
- case APValue::MemberPointer: {
- assert(ParamType->isMemberPointerType());
-
- // FIXME: We need TemplateArgument representation and mangling for these.
- if (!Value.getMemberPointerPath().empty()) {
- Diag(Arg->getBeginLoc(),
- diag::err_template_arg_member_ptr_base_derived_not_supported)
- << Value.getMemberPointerDecl() << ParamType
- << Arg->getSourceRange();
- return ExprError();
- }
+ APValue PreNarrowingValue;
+ ArgResult = EvaluateConvertedConstantExpression(
+ ArgResult.get(), ParamType, Value, CCEK_TemplateArg, /*RequireInt=*/
+ false, PreNarrowingValue);
+ if (ArgResult.isInvalid())
+ return ExprError();
- auto *VD = const_cast<ValueDecl*>(Value.getMemberPointerDecl());
- Converted = VD ? TemplateArgument(VD, CanonParamType)
- : TemplateArgument(CanonParamType, /*isNullPtr*/true);
- break;
- }
- case APValue::LValue: {
+ if (Value.isLValue()) {
+ APValue::LValueBase Base = Value.getLValueBase();
+ auto *VD = const_cast<ValueDecl *>(Base.dyn_cast<const ValueDecl *>());
// For a non-type template-parameter of pointer or reference type,
// the value of the constant expression shall not refer to
assert(ParamType->isPointerType() || ParamType->isReferenceType() ||
@@ -7002,55 +7443,44 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
// -- a string literal
// -- the result of a typeid expression, or
// -- a predefined __func__ variable
- APValue::LValueBase Base = Value.getLValueBase();
- auto *VD = const_cast<ValueDecl *>(Base.dyn_cast<const ValueDecl *>());
- if (Base && (!VD || isa<LifetimeExtendedTemporaryDecl>(VD))) {
+ if (Base &&
+ (!VD ||
+ isa<LifetimeExtendedTemporaryDecl, UnnamedGlobalConstantDecl>(VD))) {
Diag(Arg->getBeginLoc(), diag::err_template_arg_not_decl_ref)
<< Arg->getSourceRange();
return ExprError();
}
- // -- a subobject
- // FIXME: Until C++20
- if (Value.hasLValuePath() && Value.getLValuePath().size() == 1 &&
- VD && VD->getType()->isArrayType() &&
+
+ if (Value.hasLValuePath() && Value.getLValuePath().size() == 1 && VD &&
+ VD->getType()->isArrayType() &&
Value.getLValuePath()[0].getAsArrayIndex() == 0 &&
!Value.isLValueOnePastTheEnd() && ParamType->isPointerType()) {
- // Per defect report (no number yet):
- // ... other than a pointer to the first element of a complete array
- // object.
- } else if (!Value.hasLValuePath() || Value.getLValuePath().size() ||
- Value.isLValueOnePastTheEnd()) {
- Diag(StartLoc, diag::err_non_type_template_arg_subobject)
- << Value.getAsString(Context, ParamType);
- return ExprError();
+ SugaredConverted = TemplateArgument(VD, ParamType);
+ CanonicalConverted = TemplateArgument(
+ cast<ValueDecl>(VD->getCanonicalDecl()), CanonParamType);
+ return ArgResult.get();
+ }
+
+ // -- a subobject [until C++20]
+ if (!getLangOpts().CPlusPlus20) {
+ if (!Value.hasLValuePath() || Value.getLValuePath().size() ||
+ Value.isLValueOnePastTheEnd()) {
+ Diag(StartLoc, diag::err_non_type_template_arg_subobject)
+ << Value.getAsString(Context, ParamType);
+ return ExprError();
+ }
+ assert((VD || !ParamType->isReferenceType()) &&
+ "null reference should not be a constant expression");
+ assert((!VD || !ParamType->isNullPtrType()) &&
+ "non-null value of type nullptr_t?");
}
- assert((VD || !ParamType->isReferenceType()) &&
- "null reference should not be a constant expression");
- assert((!VD || !ParamType->isNullPtrType()) &&
- "non-null value of type nullptr_t?");
- Converted = VD ? TemplateArgument(VD, CanonParamType)
- : TemplateArgument(CanonParamType, /*isNullPtr*/true);
- break;
}
- case APValue::Struct:
- case APValue::Union:
- // Get or create the corresponding template parameter object.
- Converted = TemplateArgument(
- Context.getTemplateParamObjectDecl(CanonParamType, Value),
- CanonParamType);
- break;
- case APValue::AddrLabelDiff:
+
+ if (Value.isAddrLabelDiff())
return Diag(StartLoc, diag::err_non_type_template_arg_addr_label_diff);
- case APValue::FixedPoint:
- case APValue::Float:
- case APValue::ComplexInt:
- case APValue::ComplexFloat:
- case APValue::Vector:
- case APValue::Array:
- return Diag(StartLoc, diag::err_non_type_template_arg_unsupported)
- << ParamType;
- }
+ SugaredConverted = TemplateArgument(Context, ParamType, Value);
+ CanonicalConverted = TemplateArgument(Context, CanonParamType, Value);
return ArgResult.get();
}
@@ -7088,7 +7518,9 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
// We can't check arbitrary value-dependent arguments.
if (ArgResult.get()->isValueDependent()) {
- Converted = TemplateArgument(ArgResult.get());
+ SugaredConverted = TemplateArgument(ArgResult.get());
+ CanonicalConverted =
+ Context.getCanonicalTemplateArgument(SugaredConverted);
return ArgResult;
}
@@ -7098,12 +7530,13 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType IntegerType = ParamType;
if (const EnumType *Enum = IntegerType->getAs<EnumType>())
IntegerType = Enum->getDecl()->getIntegerType();
- Value = Value.extOrTrunc(IntegerType->isExtIntType()
+ Value = Value.extOrTrunc(IntegerType->isBitIntType()
? Context.getIntWidth(IntegerType)
: Context.getTypeSize(IntegerType));
- Converted = TemplateArgument(Context, Value,
- Context.getCanonicalType(ParamType));
+ SugaredConverted = TemplateArgument(Context, Value, ParamType);
+ CanonicalConverted =
+ TemplateArgument(Context, Value, Context.getCanonicalType(ParamType));
return ArgResult;
}
@@ -7125,7 +7558,7 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
if (!ArgType->isIntegralOrEnumerationType()) {
Diag(Arg->getBeginLoc(), diag::err_template_arg_not_integral_or_enumeral)
<< ArgType << Arg->getSourceRange();
- Diag(Param->getLocation(), diag::note_template_param_here);
+ NoteTemplateParameterLocation(*Param);
return ExprError();
} else if (!Arg->isValueDependent()) {
class TmplArgICEDiagnoser : public VerifyICEDiagnoser {
@@ -7163,7 +7596,7 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
// We can't perform this conversion.
Diag(Arg->getBeginLoc(), diag::err_template_arg_not_convertible)
<< Arg->getType() << ParamType << Arg->getSourceRange();
- Diag(Param->getLocation(), diag::note_template_param_here);
+ NoteTemplateParameterLocation(*Param);
return ExprError();
}
@@ -7173,13 +7606,16 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
if (Arg->isValueDependent()) {
// The argument is value-dependent. Create a new
// TemplateArgument with the converted expression.
- Converted = TemplateArgument(Arg);
+ SugaredConverted = TemplateArgument(Arg);
+ CanonicalConverted =
+ Context.getCanonicalTemplateArgument(SugaredConverted);
return Arg;
}
- QualType IntegerType = Context.getCanonicalType(ParamType);
- if (const EnumType *Enum = IntegerType->getAs<EnumType>())
- IntegerType = Context.getCanonicalType(Enum->getDecl()->getIntegerType());
+ QualType IntegerType = ParamType;
+ if (const EnumType *Enum = IntegerType->getAs<EnumType>()) {
+ IntegerType = Enum->getDecl()->getIntegerType();
+ }
if (ParamType->isBooleanType()) {
// Value must be zero or one.
@@ -7193,7 +7629,7 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
// Coerce the template argument's value to the value it will have
// based on the template parameter's type.
- unsigned AllowedBits = IntegerType->isExtIntType()
+ unsigned AllowedBits = IntegerType->isBitIntType()
? Context.getIntWidth(IntegerType)
: Context.getTypeSize(IntegerType);
if (Value.getBitWidth() != AllowedBits)
@@ -7206,7 +7642,7 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
Diag(Arg->getBeginLoc(), diag::warn_template_arg_negative)
<< toString(OldValue, 10) << toString(Value, 10) << Param->getType()
<< Arg->getSourceRange();
- Diag(Param->getLocation(), diag::note_template_param_here);
+ NoteTemplateParameterLocation(*Param);
}
// Complain if we overflowed the template parameter's type.
@@ -7216,19 +7652,19 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
else if (OldValue.isUnsigned())
RequiredBits = OldValue.getActiveBits() + 1;
else
- RequiredBits = OldValue.getMinSignedBits();
+ RequiredBits = OldValue.getSignificantBits();
if (RequiredBits > AllowedBits) {
Diag(Arg->getBeginLoc(), diag::warn_template_arg_too_large)
<< toString(OldValue, 10) << toString(Value, 10) << Param->getType()
<< Arg->getSourceRange();
- Diag(Param->getLocation(), diag::note_template_param_here);
+ NoteTemplateParameterLocation(*Param);
}
}
- Converted = TemplateArgument(Context, Value,
- ParamType->isEnumeralType()
- ? Context.getCanonicalType(ParamType)
- : IntegerType);
+ QualType T = ParamType->isEnumeralType() ? ParamType : IntegerType;
+ SugaredConverted = TemplateArgument(Context, Value, T);
+ CanonicalConverted =
+ TemplateArgument(Context, Value, Context.getCanonicalType(T));
return Arg;
}
@@ -7266,22 +7702,25 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
if (DiagnoseUseOfDecl(Fn, Arg->getBeginLoc()))
return ExprError();
- Arg = FixOverloadedFunctionReference(Arg, FoundResult, Fn);
+ ExprResult Res = FixOverloadedFunctionReference(Arg, FoundResult, Fn);
+ if (Res.isInvalid())
+ return ExprError();
+ Arg = Res.get();
ArgType = Arg->getType();
} else
return ExprError();
}
if (!ParamType->isMemberPointerType()) {
- if (CheckTemplateArgumentAddressOfObjectOrFunction(*this, Param,
- ParamType,
- Arg, Converted))
+ if (CheckTemplateArgumentAddressOfObjectOrFunction(
+ *this, Param, ParamType, Arg, SugaredConverted,
+ CanonicalConverted))
return ExprError();
return Arg;
}
- if (CheckTemplateArgumentPointerToMember(*this, Param, ParamType, Arg,
- Converted))
+ if (CheckTemplateArgumentPointerToMember(
+ *this, Param, ParamType, Arg, SugaredConverted, CanonicalConverted))
return ExprError();
return Arg;
}
@@ -7294,9 +7733,8 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
assert(ParamType->getPointeeType()->isIncompleteOrObjectType() &&
"Only object pointers allowed here");
- if (CheckTemplateArgumentAddressOfObjectOrFunction(*this, Param,
- ParamType,
- Arg, Converted))
+ if (CheckTemplateArgumentAddressOfObjectOrFunction(
+ *this, Param, ParamType, Arg, SugaredConverted, CanonicalConverted))
return ExprError();
return Arg;
}
@@ -7318,16 +7756,17 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
FoundResult)) {
if (DiagnoseUseOfDecl(Fn, Arg->getBeginLoc()))
return ExprError();
-
- Arg = FixOverloadedFunctionReference(Arg, FoundResult, Fn);
+ ExprResult Res = FixOverloadedFunctionReference(Arg, FoundResult, Fn);
+ if (Res.isInvalid())
+ return ExprError();
+ Arg = Res.get();
ArgType = Arg->getType();
} else
return ExprError();
}
- if (CheckTemplateArgumentAddressOfObjectOrFunction(*this, Param,
- ParamType,
- Arg, Converted))
+ if (CheckTemplateArgumentAddressOfObjectOrFunction(
+ *this, Param, ParamType, Arg, SugaredConverted, CanonicalConverted))
return ExprError();
return Arg;
}
@@ -7335,7 +7774,9 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
// Deal with parameters of type std::nullptr_t.
if (ParamType->isNullPtrType()) {
if (Arg->isTypeDependent() || Arg->isValueDependent()) {
- Converted = TemplateArgument(Arg);
+ SugaredConverted = TemplateArgument(Arg);
+ CanonicalConverted =
+ Context.getCanonicalTemplateArgument(SugaredConverted);
return Arg;
}
@@ -7343,7 +7784,7 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
case NPV_NotNullPointer:
Diag(Arg->getExprLoc(), diag::err_template_arg_not_convertible)
<< Arg->getType() << ParamType;
- Diag(Param->getLocation(), diag::note_template_param_here);
+ NoteTemplateParameterLocation(*Param);
return ExprError();
case NPV_Error:
@@ -7351,8 +7792,10 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
case NPV_NullPointer:
Diag(Arg->getExprLoc(), diag::warn_cxx98_compat_template_arg_null);
- Converted = TemplateArgument(Context.getCanonicalType(ParamType),
- /*isNullPtr*/true);
+ SugaredConverted = TemplateArgument(ParamType,
+ /*isNullPtr=*/true);
+ CanonicalConverted = TemplateArgument(Context.getCanonicalType(ParamType),
+ /*isNullPtr=*/true);
return Arg;
}
}
@@ -7361,8 +7804,8 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
// member, qualification conversions (4.4) are applied.
assert(ParamType->isMemberPointerType() && "Only pointers to members remain");
- if (CheckTemplateArgumentPointerToMember(*this, Param, ParamType, Arg,
- Converted))
+ if (CheckTemplateArgumentPointerToMember(
+ *this, Param, ParamType, Arg, SugaredConverted, CanonicalConverted))
return ExprError();
return Arg;
}
@@ -7435,10 +7878,10 @@ bool Sema::CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param,
if (isTemplateTemplateParameterAtLeastAsSpecializedAs(Params, Template,
Arg.getLocation())) {
- // C++2a[temp.func.order]p2
+ // P2113
+ // C++20[temp.func.order]p2
// [...] If both deductions succeed, the partial ordering selects the
- // more constrained template as described by the rules in
- // [temp.constr.order].
+ // more constrained template (if one exists) as determined below.
SmallVector<const Expr *, 3> ParamsAC, TemplateAC;
Params->getAssociatedConstraints(ParamsAC);
// C++2a[temp.arg.template]p3
@@ -7446,7 +7889,9 @@ bool Sema::CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param,
// are not considered.
if (ParamsAC.empty())
return false;
+
Template->getAssociatedConstraints(TemplateAC);
+
bool IsParamAtLeastAsConstrained;
if (IsAtLeastAsConstrained(Param, ParamsAC, Template, TemplateAC,
IsParamAtLeastAsConstrained))
@@ -7474,6 +7919,37 @@ bool Sema::CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param,
Arg.getLocation());
}
+static Sema::SemaDiagnosticBuilder noteLocation(Sema &S, const NamedDecl &Decl,
+ unsigned HereDiagID,
+ unsigned ExternalDiagID) {
+ if (Decl.getLocation().isValid())
+ return S.Diag(Decl.getLocation(), HereDiagID);
+
+ SmallString<128> Str;
+ llvm::raw_svector_ostream Out(Str);
+ PrintingPolicy PP = S.getPrintingPolicy();
+ PP.TerseOutput = 1;
+ Decl.print(Out, PP);
+ return S.Diag(Decl.getLocation(), ExternalDiagID) << Out.str();
+}
+
+void Sema::NoteTemplateLocation(const NamedDecl &Decl,
+ std::optional<SourceRange> ParamRange) {
+ SemaDiagnosticBuilder DB =
+ noteLocation(*this, Decl, diag::note_template_decl_here,
+ diag::note_template_decl_external);
+ if (ParamRange && ParamRange->isValid()) {
+ assert(Decl.getLocation().isValid() &&
+ "Parameter range has location when Decl does not");
+ DB << *ParamRange;
+ }
+}
+
+void Sema::NoteTemplateParameterLocation(const NamedDecl &Decl) {
+ noteLocation(*this, Decl, diag::note_template_param_here,
+ diag::note_template_param_external);
+}
+
/// Given a non-type template argument that refers to a
/// declaration and the type of its corresponding non-type template
/// parameter, produce an expression that properly refers to that
@@ -7588,12 +8064,9 @@ Sema::BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
/// This routine takes care of the mapping from an integral template
/// argument (which may have any integral type) to the appropriate
/// literal value.
-ExprResult
-Sema::BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
- SourceLocation Loc) {
- assert(Arg.getKind() == TemplateArgument::Integral &&
- "Operation is only valid for integral template arguments");
- QualType OrigT = Arg.getIntegralType();
+static Expr *BuildExpressionFromIntegralTemplateArgumentValue(
+ Sema &S, QualType OrigT, const llvm::APSInt &Int, SourceLocation Loc) {
+ assert(OrigT->isIntegralOrEnumerationType());
// If this is an enum type that we're instantiating, we need to use an integer
// type the same size as the enumerator. We don't want to build an
@@ -7606,46 +8079,151 @@ Sema::BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
Expr *E;
if (T->isAnyCharacterType()) {
- CharacterLiteral::CharacterKind Kind;
+ CharacterLiteralKind Kind;
if (T->isWideCharType())
- Kind = CharacterLiteral::Wide;
- else if (T->isChar8Type() && getLangOpts().Char8)
- Kind = CharacterLiteral::UTF8;
+ Kind = CharacterLiteralKind::Wide;
+ else if (T->isChar8Type() && S.getLangOpts().Char8)
+ Kind = CharacterLiteralKind::UTF8;
else if (T->isChar16Type())
- Kind = CharacterLiteral::UTF16;
+ Kind = CharacterLiteralKind::UTF16;
else if (T->isChar32Type())
- Kind = CharacterLiteral::UTF32;
+ Kind = CharacterLiteralKind::UTF32;
else
- Kind = CharacterLiteral::Ascii;
+ Kind = CharacterLiteralKind::Ascii;
- E = new (Context) CharacterLiteral(Arg.getAsIntegral().getZExtValue(),
- Kind, T, Loc);
+ E = new (S.Context) CharacterLiteral(Int.getZExtValue(), Kind, T, Loc);
} else if (T->isBooleanType()) {
- E = new (Context) CXXBoolLiteralExpr(Arg.getAsIntegral().getBoolValue(),
- T, Loc);
- } else if (T->isNullPtrType()) {
- E = new (Context) CXXNullPtrLiteralExpr(Context.NullPtrTy, Loc);
+ E = CXXBoolLiteralExpr::Create(S.Context, Int.getBoolValue(), T, Loc);
} else {
- E = IntegerLiteral::Create(Context, Arg.getAsIntegral(), T, Loc);
+ E = IntegerLiteral::Create(S.Context, Int, T, Loc);
}
if (OrigT->isEnumeralType()) {
// FIXME: This is a hack. We need a better way to handle substituted
// non-type template parameters.
- E = CStyleCastExpr::Create(Context, OrigT, VK_PRValue, CK_IntegralCast, E,
- nullptr, CurFPFeatureOverrides(),
- Context.getTrivialTypeSourceInfo(OrigT, Loc),
+ E = CStyleCastExpr::Create(S.Context, OrigT, VK_PRValue, CK_IntegralCast, E,
+ nullptr, S.CurFPFeatureOverrides(),
+ S.Context.getTrivialTypeSourceInfo(OrigT, Loc),
Loc, Loc);
}
return E;
}
+static Expr *BuildExpressionFromNonTypeTemplateArgumentValue(
+ Sema &S, QualType T, const APValue &Val, SourceLocation Loc) {
+ auto MakeInitList = [&](ArrayRef<Expr *> Elts) -> Expr * {
+ auto *ILE = new (S.Context) InitListExpr(S.Context, Loc, Elts, Loc);
+ ILE->setType(T);
+ return ILE;
+ };
+
+ switch (Val.getKind()) {
+ case APValue::AddrLabelDiff:
+ // This cannot occur in a template argument at all.
+ case APValue::Array:
+ case APValue::Struct:
+ case APValue::Union:
+ // These can only occur within a template parameter object, which is
+ // represented as a TemplateArgument::Declaration.
+ llvm_unreachable("unexpected template argument value");
+
+ case APValue::Int:
+ return BuildExpressionFromIntegralTemplateArgumentValue(S, T, Val.getInt(),
+ Loc);
+
+ case APValue::Float:
+ return FloatingLiteral::Create(S.Context, Val.getFloat(), /*IsExact=*/true,
+ T, Loc);
+
+ case APValue::FixedPoint:
+ return FixedPointLiteral::CreateFromRawInt(
+ S.Context, Val.getFixedPoint().getValue(), T, Loc,
+ Val.getFixedPoint().getScale());
+
+ case APValue::ComplexInt: {
+ QualType ElemT = T->castAs<ComplexType>()->getElementType();
+ return MakeInitList({BuildExpressionFromIntegralTemplateArgumentValue(
+ S, ElemT, Val.getComplexIntReal(), Loc),
+ BuildExpressionFromIntegralTemplateArgumentValue(
+ S, ElemT, Val.getComplexIntImag(), Loc)});
+ }
+
+ case APValue::ComplexFloat: {
+ QualType ElemT = T->castAs<ComplexType>()->getElementType();
+ return MakeInitList(
+ {FloatingLiteral::Create(S.Context, Val.getComplexFloatReal(), true,
+ ElemT, Loc),
+ FloatingLiteral::Create(S.Context, Val.getComplexFloatImag(), true,
+ ElemT, Loc)});
+ }
+
+ case APValue::Vector: {
+ QualType ElemT = T->castAs<VectorType>()->getElementType();
+ llvm::SmallVector<Expr *, 8> Elts;
+ for (unsigned I = 0, N = Val.getVectorLength(); I != N; ++I)
+ Elts.push_back(BuildExpressionFromNonTypeTemplateArgumentValue(
+ S, ElemT, Val.getVectorElt(I), Loc));
+ return MakeInitList(Elts);
+ }
+
+ case APValue::None:
+ case APValue::Indeterminate:
+ llvm_unreachable("Unexpected APValue kind.");
+ case APValue::LValue:
+ case APValue::MemberPointer:
+ // There isn't necessarily a valid equivalent source-level syntax for
+ // these; in particular, a naive lowering might violate access control.
+ // So for now we lower to a ConstantExpr holding the value, wrapped around
+ // an OpaqueValueExpr.
+ // FIXME: We should have a better representation for this.
+ ExprValueKind VK = VK_PRValue;
+ if (T->isReferenceType()) {
+ T = T->getPointeeType();
+ VK = VK_LValue;
+ }
+ auto *OVE = new (S.Context) OpaqueValueExpr(Loc, T, VK);
+ return ConstantExpr::Create(S.Context, OVE, Val);
+ }
+ llvm_unreachable("Unhandled APValue::ValueKind enum");
+}
+
+ExprResult
+Sema::BuildExpressionFromNonTypeTemplateArgument(const TemplateArgument &Arg,
+ SourceLocation Loc) {
+ switch (Arg.getKind()) {
+ case TemplateArgument::Null:
+ case TemplateArgument::Type:
+ case TemplateArgument::Template:
+ case TemplateArgument::TemplateExpansion:
+ case TemplateArgument::Pack:
+ llvm_unreachable("not a non-type template argument");
+
+ case TemplateArgument::Expression:
+ return Arg.getAsExpr();
+
+ case TemplateArgument::NullPtr:
+ case TemplateArgument::Declaration:
+ return BuildExpressionFromDeclTemplateArgument(
+ Arg, Arg.getNonTypeTemplateArgumentType(), Loc);
+
+ case TemplateArgument::Integral:
+ return BuildExpressionFromIntegralTemplateArgumentValue(
+ *this, Arg.getIntegralType(), Arg.getAsIntegral(), Loc);
+
+ case TemplateArgument::StructuralValue:
+ return BuildExpressionFromNonTypeTemplateArgumentValue(
+ *this, Arg.getStructuralValueType(), Arg.getAsStructuralValue(), Loc);
+ }
+ llvm_unreachable("Unhandled TemplateArgument::ArgKind enum");
+}
+
/// Match two template parameters within template parameter lists.
-static bool MatchTemplateParameterKind(Sema &S, NamedDecl *New, NamedDecl *Old,
- bool Complain,
- Sema::TemplateParameterListEqualKind Kind,
- SourceLocation TemplateArgLoc) {
+static bool MatchTemplateParameterKind(
+ Sema &S, NamedDecl *New,
+ const Sema::TemplateCompareNewDeclInfo &NewInstFrom, NamedDecl *Old,
+ const NamedDecl *OldInstFrom, bool Complain,
+ Sema::TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc) {
// Check the actual kind (type, non-type, template).
if (Old->getKind() != New->getKind()) {
if (Complain) {
@@ -7701,8 +8279,14 @@ static bool MatchTemplateParameterKind(Sema &S, NamedDecl *New, NamedDecl *Old,
// to actually compare the arguments.
if (Kind != Sema::TPL_TemplateTemplateArgumentMatch ||
(!OldNTTP->getType()->isDependentType() &&
- !NewNTTP->getType()->isDependentType()))
- if (!S.Context.hasSameType(OldNTTP->getType(), NewNTTP->getType())) {
+ !NewNTTP->getType()->isDependentType())) {
+ // C++20 [temp.over.link]p6:
+ // Two [non-type] template-parameters are equivalent [if] they have
+ // equivalent types ignoring the use of type-constraints for
+ // placeholder types
+ QualType OldType = S.Context.getUnconstrainedType(OldNTTP->getType());
+ QualType NewType = S.Context.getUnconstrainedType(NewNTTP->getType());
+ if (!S.Context.hasSameType(OldType, NewType)) {
if (Complain) {
unsigned NextDiag = diag::err_template_nontype_parm_different_type;
if (TemplateArgLoc.isValid()) {
@@ -7720,27 +8304,43 @@ static bool MatchTemplateParameterKind(Sema &S, NamedDecl *New, NamedDecl *Old,
return false;
}
+ }
}
// For template template parameters, check the template parameter types.
// The template parameter lists of template template
// parameters must agree.
- else if (TemplateTemplateParmDecl *OldTTP
- = dyn_cast<TemplateTemplateParmDecl>(Old)) {
+ else if (TemplateTemplateParmDecl *OldTTP =
+ dyn_cast<TemplateTemplateParmDecl>(Old)) {
TemplateTemplateParmDecl *NewTTP = cast<TemplateTemplateParmDecl>(New);
- if (!S.TemplateParameterListsAreEqual(NewTTP->getTemplateParameters(),
- OldTTP->getTemplateParameters(),
- Complain,
- (Kind == Sema::TPL_TemplateMatch
- ? Sema::TPL_TemplateTemplateParmMatch
- : Kind),
- TemplateArgLoc))
+ if (!S.TemplateParameterListsAreEqual(
+ NewInstFrom, NewTTP->getTemplateParameters(), OldInstFrom,
+ OldTTP->getTemplateParameters(), Complain,
+ (Kind == Sema::TPL_TemplateMatch
+ ? Sema::TPL_TemplateTemplateParmMatch
+ : Kind),
+ TemplateArgLoc))
return false;
- } else if (Kind != Sema::TPL_TemplateTemplateArgumentMatch) {
+ }
+
+ if (Kind != Sema::TPL_TemplateParamsEquivalent &&
+ Kind != Sema::TPL_TemplateTemplateArgumentMatch &&
+ !isa<TemplateTemplateParmDecl>(Old)) {
const Expr *NewC = nullptr, *OldC = nullptr;
- if (const auto *TC = cast<TemplateTypeParmDecl>(New)->getTypeConstraint())
- NewC = TC->getImmediatelyDeclaredConstraint();
- if (const auto *TC = cast<TemplateTypeParmDecl>(Old)->getTypeConstraint())
- OldC = TC->getImmediatelyDeclaredConstraint();
+
+ if (isa<TemplateTypeParmDecl>(New)) {
+ if (const auto *TC = cast<TemplateTypeParmDecl>(New)->getTypeConstraint())
+ NewC = TC->getImmediatelyDeclaredConstraint();
+ if (const auto *TC = cast<TemplateTypeParmDecl>(Old)->getTypeConstraint())
+ OldC = TC->getImmediatelyDeclaredConstraint();
+ } else if (isa<NonTypeTemplateParmDecl>(New)) {
+ if (const Expr *E = cast<NonTypeTemplateParmDecl>(New)
+ ->getPlaceholderTypeConstraint())
+ NewC = E;
+ if (const Expr *E = cast<NonTypeTemplateParmDecl>(Old)
+ ->getPlaceholderTypeConstraint())
+ OldC = E;
+ } else
+ llvm_unreachable("unexpected template parameter type");
auto Diagnose = [&] {
S.Diag(NewC ? NewC->getBeginLoc() : New->getBeginLoc(),
@@ -7756,10 +8356,8 @@ static bool MatchTemplateParameterKind(Sema &S, NamedDecl *New, NamedDecl *Old,
}
if (NewC) {
- llvm::FoldingSetNodeID OldCID, NewCID;
- OldC->Profile(OldCID, S.Context, /*Canonical=*/true);
- NewC->Profile(NewCID, S.Context, /*Canonical=*/true);
- if (OldCID != NewCID) {
+ if (!S.AreConstraintExpressionsEqual(OldInstFrom, OldC, NewInstFrom,
+ NewC)) {
if (Complain)
Diagnose();
return false;
@@ -7815,12 +8413,10 @@ void DiagnoseTemplateParameterListArityMismatch(Sema &S,
///
/// \returns True if the template parameter lists are equal, false
/// otherwise.
-bool
-Sema::TemplateParameterListsAreEqual(TemplateParameterList *New,
- TemplateParameterList *Old,
- bool Complain,
- TemplateParameterListEqualKind Kind,
- SourceLocation TemplateArgLoc) {
+bool Sema::TemplateParameterListsAreEqual(
+ const TemplateCompareNewDeclInfo &NewInstFrom, TemplateParameterList *New,
+ const NamedDecl *OldInstFrom, TemplateParameterList *Old, bool Complain,
+ TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc) {
if (Old->size() != New->size() && Kind != TPL_TemplateTemplateArgumentMatch) {
if (Complain)
DiagnoseTemplateParameterListArityMismatch(*this, New, Old, Kind,
@@ -7850,8 +8446,9 @@ Sema::TemplateParameterListsAreEqual(TemplateParameterList *New,
return false;
}
- if (!MatchTemplateParameterKind(*this, *NewParm, *OldParm, Complain,
- Kind, TemplateArgLoc))
+ if (!MatchTemplateParameterKind(*this, *NewParm, NewInstFrom, *OldParm,
+ OldInstFrom, Complain, Kind,
+ TemplateArgLoc))
return false;
++NewParm;
@@ -7866,8 +8463,9 @@ Sema::TemplateParameterListsAreEqual(TemplateParameterList *New,
// template parameter pack in P (ignoring whether those template
// parameters are template parameter packs).
for (; NewParm != NewParmEnd; ++NewParm) {
- if (!MatchTemplateParameterKind(*this, *NewParm, *OldParm, Complain,
- Kind, TemplateArgLoc))
+ if (!MatchTemplateParameterKind(*this, *NewParm, NewInstFrom, *OldParm,
+ OldInstFrom, Complain, Kind,
+ TemplateArgLoc))
return false;
}
}
@@ -7881,7 +8479,8 @@ Sema::TemplateParameterListsAreEqual(TemplateParameterList *New,
return false;
}
- if (Kind != TPL_TemplateTemplateArgumentMatch) {
+ if (Kind != TPL_TemplateTemplateArgumentMatch &&
+ Kind != TPL_TemplateParamsEquivalent) {
const Expr *NewRC = New->getRequiresClause();
const Expr *OldRC = Old->getRequiresClause();
@@ -7899,10 +8498,8 @@ Sema::TemplateParameterListsAreEqual(TemplateParameterList *New,
}
if (NewRC) {
- llvm::FoldingSetNodeID OldRCID, NewRCID;
- OldRC->Profile(OldRCID, Context, /*Canonical=*/true);
- NewRC->Profile(NewRCID, Context, /*Canonical=*/true);
- if (OldRCID != NewRCID) {
+ if (!AreConstraintExpressionsEqual(OldInstFrom, OldRC, NewInstFrom,
+ NewRC)) {
if (Complain)
Diagnose();
return false;
@@ -8179,9 +8776,7 @@ static bool CheckNonTypeTemplatePartialSpecializationArgs(
S.Diag(IsDefaultArgument ? TemplateNameLoc : ArgExpr->getBeginLoc(),
diag::err_dependent_typed_non_type_arg_in_partial_spec)
<< Param->getType();
- S.Diag(Param->getLocation(), diag::note_template_param_here)
- << (IsDefaultArgument ? ParamUseRange : SourceRange())
- << ParamUseRange;
+ S.NoteTemplateParameterLocation(*Param);
return true;
}
}
@@ -8324,7 +8919,8 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
// Check that the specialization uses the same tag kind as the
// original template.
TagTypeKind Kind = TypeWithKeyword::getTagTypeKindForTypeSpec(TagSpec);
- assert(Kind != TTK_Enum && "Invalid enum tag in class template spec!");
+ assert(Kind != TagTypeKind::Enum &&
+ "Invalid enum tag in class template spec!");
if (!isAcceptableTagRedeclaration(ClassTemplate->getTemplatedDecl(),
Kind, TUK == TUK_Definition, KWLoc,
ClassTemplate->getIdentifier())) {
@@ -8344,29 +8940,32 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
// Check for unexpanded parameter packs in any of the template arguments.
for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I)
if (DiagnoseUnexpandedParameterPack(TemplateArgs[I],
- UPPC_PartialSpecialization))
+ isPartialSpecialization
+ ? UPPC_PartialSpecialization
+ : UPPC_ExplicitSpecialization))
return true;
// Check that the template argument list is well-formed for this
// template.
- SmallVector<TemplateArgument, 4> Converted;
- if (CheckTemplateArgumentList(ClassTemplate, TemplateNameLoc,
- TemplateArgs, false, Converted,
- /*UpdateArgsWithConversion=*/true))
+ SmallVector<TemplateArgument, 4> SugaredConverted, CanonicalConverted;
+ if (CheckTemplateArgumentList(ClassTemplate, TemplateNameLoc, TemplateArgs,
+ false, SugaredConverted, CanonicalConverted,
+ /*UpdateArgsWithConversions=*/true))
return true;
// Find the class template (partial) specialization declaration that
// corresponds to these arguments.
if (isPartialSpecialization) {
if (CheckTemplatePartialSpecializationArgs(TemplateNameLoc, ClassTemplate,
- TemplateArgs.size(), Converted))
+ TemplateArgs.size(),
+ CanonicalConverted))
return true;
// FIXME: Move this to CheckTemplatePartialSpecializationArgs so we
// also do it during instantiation.
if (!Name.isDependent() &&
- !TemplateSpecializationType::anyDependentTemplateArguments(TemplateArgs,
- Converted)) {
+ !TemplateSpecializationType::anyDependentTemplateArguments(
+ TemplateArgs, CanonicalConverted)) {
Diag(TemplateNameLoc, diag::err_partial_spec_fully_specialized)
<< ClassTemplate->getDeclName();
isPartialSpecialization = false;
@@ -8377,11 +8976,10 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
ClassTemplateSpecializationDecl *PrevDecl = nullptr;
if (isPartialSpecialization)
- PrevDecl = ClassTemplate->findPartialSpecialization(Converted,
- TemplateParams,
- InsertPos);
+ PrevDecl = ClassTemplate->findPartialSpecialization(
+ CanonicalConverted, TemplateParams, InsertPos);
else
- PrevDecl = ClassTemplate->findSpecialization(Converted, InsertPos);
+ PrevDecl = ClassTemplate->findSpecialization(CanonicalConverted, InsertPos);
ClassTemplateSpecializationDecl *Specialization = nullptr;
@@ -8400,7 +8998,7 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
// arguments of the class template partial specialization.
TemplateName CanonTemplate = Context.getCanonicalTemplateName(Name);
CanonType = Context.getTemplateSpecializationType(CanonTemplate,
- Converted);
+ CanonicalConverted);
if (Context.hasSameType(CanonType,
ClassTemplate->getInjectedClassNameSpecialization()) &&
@@ -8430,16 +9028,11 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
// Create a new class template partial specialization declaration node.
ClassTemplatePartialSpecializationDecl *PrevPartial
= cast_or_null<ClassTemplatePartialSpecializationDecl>(PrevDecl);
- ClassTemplatePartialSpecializationDecl *Partial
- = ClassTemplatePartialSpecializationDecl::Create(Context, Kind,
- ClassTemplate->getDeclContext(),
- KWLoc, TemplateNameLoc,
- TemplateParams,
- ClassTemplate,
- Converted,
- TemplateArgs,
- CanonType,
- PrevPartial);
+ ClassTemplatePartialSpecializationDecl *Partial =
+ ClassTemplatePartialSpecializationDecl::Create(
+ Context, Kind, ClassTemplate->getDeclContext(), KWLoc,
+ TemplateNameLoc, TemplateParams, ClassTemplate, CanonicalConverted,
+ TemplateArgs, CanonType, PrevPartial);
SetNestedNameSpecifier(*this, Partial, SS);
if (TemplateParameterLists.size() > 1 && SS.isSet()) {
Partial->setTemplateParameterListsInfo(
@@ -8459,13 +9052,9 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
} else {
// Create a new class template specialization declaration node for
// this explicit specialization or friend declaration.
- Specialization
- = ClassTemplateSpecializationDecl::Create(Context, Kind,
- ClassTemplate->getDeclContext(),
- KWLoc, TemplateNameLoc,
- ClassTemplate,
- Converted,
- PrevDecl);
+ Specialization = ClassTemplateSpecializationDecl::Create(
+ Context, Kind, ClassTemplate->getDeclContext(), KWLoc, TemplateNameLoc,
+ ClassTemplate, CanonicalConverted, PrevDecl);
SetNestedNameSpecifier(*this, Specialization, SS);
if (TemplateParameterLists.size() > 0) {
Specialization->setTemplateParameterListsInfo(Context,
@@ -8477,8 +9066,8 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
if (CurContext->isDependentContext()) {
TemplateName CanonTemplate = Context.getCanonicalTemplateName(Name);
- CanonType = Context.getTemplateSpecializationType(
- CanonTemplate, Converted);
+ CanonType = Context.getTemplateSpecializationType(CanonTemplate,
+ CanonicalConverted);
} else {
CanonType = Context.getTypeDeclType(Specialization);
}
@@ -8622,17 +9211,33 @@ Decl *Sema::ActOnConceptDefinition(Scope *S,
return nullptr;
}
- if (TemplateParameterLists.front()->size() == 0) {
+ TemplateParameterList *Params = TemplateParameterLists.front();
+
+ if (Params->size() == 0) {
Diag(NameLoc, diag::err_concept_no_parameters);
return nullptr;
}
+ // Ensure that the parameter pack, if present, is the last parameter in the
+ // template.
+ for (TemplateParameterList::const_iterator ParamIt = Params->begin(),
+ ParamEnd = Params->end();
+ ParamIt != ParamEnd; ++ParamIt) {
+ Decl const *Param = *ParamIt;
+ if (Param->isParameterPack()) {
+ if (++ParamIt == ParamEnd)
+ break;
+ Diag(Param->getLocation(),
+ diag::err_template_param_pack_must_be_last_template_parameter);
+ return nullptr;
+ }
+ }
+
if (DiagnoseUnexpandedParameterPack(ConstraintExpr))
return nullptr;
- ConceptDecl *NewDecl = ConceptDecl::Create(Context, DC, NameLoc, Name,
- TemplateParameterLists.front(),
- ConstraintExpr);
+ ConceptDecl *NewDecl =
+ ConceptDecl::Create(Context, DC, NameLoc, Name, Params, ConstraintExpr);
if (NewDecl->hasAssociatedConstraints()) {
// C++2a [temp.concept]p4:
@@ -8644,28 +9249,67 @@ Decl *Sema::ActOnConceptDefinition(Scope *S,
// Check for conflicting previous declaration.
DeclarationNameInfo NameInfo(NewDecl->getDeclName(), NameLoc);
LookupResult Previous(*this, NameInfo, LookupOrdinaryName,
- ForVisibleRedeclaration);
+ forRedeclarationInCurContext());
LookupName(Previous, S);
-
FilterLookupForScope(Previous, DC, S, /*ConsiderLinkage=*/false,
/*AllowInlineNamespace*/false);
- if (!Previous.empty()) {
- auto *Old = Previous.getRepresentativeDecl();
- Diag(NameLoc, isa<ConceptDecl>(Old) ? diag::err_redefinition :
- diag::err_redefinition_different_kind) << NewDecl->getDeclName();
- Diag(Old->getLocation(), diag::note_previous_definition);
- }
+ bool AddToScope = true;
+ CheckConceptRedefinition(NewDecl, Previous, AddToScope);
ActOnDocumentableDecl(NewDecl);
- PushOnScopeChains(NewDecl, S);
+ if (AddToScope)
+ PushOnScopeChains(NewDecl, S);
return NewDecl;
}
+void Sema::CheckConceptRedefinition(ConceptDecl *NewDecl,
+ LookupResult &Previous, bool &AddToScope) {
+ AddToScope = true;
+
+ if (Previous.empty())
+ return;
+
+ auto *OldConcept = dyn_cast<ConceptDecl>(Previous.getRepresentativeDecl()->getUnderlyingDecl());
+ if (!OldConcept) {
+ auto *Old = Previous.getRepresentativeDecl();
+ Diag(NewDecl->getLocation(), diag::err_redefinition_different_kind)
+ << NewDecl->getDeclName();
+ notePreviousDefinition(Old, NewDecl->getLocation());
+ AddToScope = false;
+ return;
+ }
+ // Check if we can merge with a concept declaration.
+ bool IsSame = Context.isSameEntity(NewDecl, OldConcept);
+ if (!IsSame) {
+ Diag(NewDecl->getLocation(), diag::err_redefinition_different_concept)
+ << NewDecl->getDeclName();
+ notePreviousDefinition(OldConcept, NewDecl->getLocation());
+ AddToScope = false;
+ return;
+ }
+ if (hasReachableDefinition(OldConcept) &&
+ IsRedefinitionInModule(NewDecl, OldConcept)) {
+ Diag(NewDecl->getLocation(), diag::err_redefinition)
+ << NewDecl->getDeclName();
+ notePreviousDefinition(OldConcept, NewDecl->getLocation());
+ AddToScope = false;
+ return;
+ }
+ if (!Previous.isSingleResult()) {
+ // FIXME: we should produce an error in case of ambig and failed lookups.
+ // Other decls (e.g. namespaces) also have this shortcoming.
+ return;
+ }
+ // We unwrap canonical decl late to check for module visibility.
+ Context.setPrimaryMergedDecl(NewDecl, OldConcept->getCanonicalDecl());
+}
+
/// \brief Strips various properties off an implicit instantiation
/// that has just been explicitly specialized.
-static void StripImplicitInstantiation(NamedDecl *D) {
- D->dropAttr<DLLImportAttr>();
- D->dropAttr<DLLExportAttr>();
+static void StripImplicitInstantiation(NamedDecl *D, bool MinGW) {
+ if (MinGW || (isa<FunctionDecl>(D) &&
+ cast<FunctionDecl>(D)->isFunctionTemplateSpecialization()))
+ D->dropAttrs<DLLImportAttr, DLLExportAttr>();
if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
FD->setInlineSpecified(false);
@@ -8702,7 +9346,7 @@ static SourceLocation DiagLocForExplicitInstantiation(
///
/// \param PrevTSK the kind of the old explicit specialization or instantiatin.
///
-/// \param PrevPointOfInstantiation if valid, indicates where the previus
+/// \param PrevPointOfInstantiation if valid, indicates where the previous
/// declaration was instantiated (either implicitly or explicitly).
///
/// \param HasNoEffect will be set to true to indicate that the new
@@ -8740,11 +9384,13 @@ Sema::CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
if (PrevPointOfInstantiation.isInvalid()) {
// The declaration itself has not actually been instantiated, so it is
// still okay to specialize it.
- StripImplicitInstantiation(PrevDecl);
+ StripImplicitInstantiation(
+ PrevDecl,
+ Context.getTargetInfo().getTriple().isWindowsGNUEnvironment());
return false;
}
// Fall through
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case TSK_ExplicitInstantiationDeclaration:
case TSK_ExplicitInstantiationDefinition:
@@ -8889,10 +9535,9 @@ Sema::CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
///
/// There really isn't any useful analysis we can do here, so we
/// just store the information.
-bool
-Sema::CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
- const TemplateArgumentListInfo &ExplicitTemplateArgs,
- LookupResult &Previous) {
+bool Sema::CheckDependentFunctionTemplateSpecialization(
+ FunctionDecl *FD, const TemplateArgumentListInfo *ExplicitTemplateArgs,
+ LookupResult &Previous) {
// Remove anything from Previous that isn't a function template in
// the correct context.
DeclContext *FDLookupContext = FD->getDeclContext()->getRedeclContext();
@@ -8916,13 +9561,14 @@ Sema::CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
}
F.done();
+ bool IsFriend = FD->getFriendObjectKind() != Decl::FOK_None;
if (Previous.empty()) {
- Diag(FD->getLocation(),
- diag::err_dependent_function_template_spec_no_match);
+ Diag(FD->getLocation(), diag::err_dependent_function_template_spec_no_match)
+ << IsFriend;
for (auto &P : DiscardedCandidates)
Diag(P.second->getLocation(),
diag::note_dependent_function_template_spec_discard_reason)
- << P.first;
+ << P.first << IsFriend;
return true;
}
@@ -9448,7 +10094,7 @@ static bool CheckExplicitInstantiation(Sema &S, NamedDecl *D,
// An explicit instantiation declaration shall not name a specialization of
// a template with internal linkage.
if (TSK == TSK_ExplicitInstantiationDeclaration &&
- D->getFormalLinkage() == InternalLinkage) {
+ D->getFormalLinkage() == Linkage::Internal) {
S.Diag(InstLoc, diag::err_explicit_instantiation_internal_linkage) << D;
return true;
}
@@ -9520,14 +10166,15 @@ DeclResult Sema::ActOnExplicitInstantiation(
// Check that the specialization uses the same tag kind as the
// original template.
TagTypeKind Kind = TypeWithKeyword::getTagTypeKindForTypeSpec(TagSpec);
- assert(Kind != TTK_Enum &&
+ assert(Kind != TagTypeKind::Enum &&
"Invalid enum tag in class template explicit instantiation!");
ClassTemplateDecl *ClassTemplate = dyn_cast<ClassTemplateDecl>(TD);
if (!ClassTemplate) {
NonTagKind NTK = getNonTagTypeDeclKind(TD, Kind);
- Diag(TemplateNameLoc, diag::err_tag_reference_non_tag) << TD << NTK << Kind;
+ Diag(TemplateNameLoc, diag::err_tag_reference_non_tag)
+ << TD << NTK << llvm::to_underlying(Kind);
Diag(TD->getLocation(), diag::note_previous_use);
return true;
}
@@ -9601,17 +10248,17 @@ DeclResult Sema::ActOnExplicitInstantiation(
// Check that the template argument list is well-formed for this
// template.
- SmallVector<TemplateArgument, 4> Converted;
- if (CheckTemplateArgumentList(ClassTemplate, TemplateNameLoc,
- TemplateArgs, false, Converted,
- /*UpdateArgsWithConversion=*/true))
+ SmallVector<TemplateArgument, 4> SugaredConverted, CanonicalConverted;
+ if (CheckTemplateArgumentList(ClassTemplate, TemplateNameLoc, TemplateArgs,
+ false, SugaredConverted, CanonicalConverted,
+ /*UpdateArgsWithConversions=*/true))
return true;
// Find the class template specialization declaration that
// corresponds to these arguments.
void *InsertPos = nullptr;
- ClassTemplateSpecializationDecl *PrevDecl
- = ClassTemplate->findSpecialization(Converted, InsertPos);
+ ClassTemplateSpecializationDecl *PrevDecl =
+ ClassTemplate->findSpecialization(CanonicalConverted, InsertPos);
TemplateSpecializationKind PrevDecl_TSK
= PrevDecl ? PrevDecl->getTemplateSpecializationKind() : TSK_Undeclared;
@@ -9668,15 +10315,22 @@ DeclResult Sema::ActOnExplicitInstantiation(
if (!Specialization) {
// Create a new class template specialization declaration node for
// this explicit specialization.
- Specialization
- = ClassTemplateSpecializationDecl::Create(Context, Kind,
- ClassTemplate->getDeclContext(),
- KWLoc, TemplateNameLoc,
- ClassTemplate,
- Converted,
- PrevDecl);
+ Specialization = ClassTemplateSpecializationDecl::Create(
+ Context, Kind, ClassTemplate->getDeclContext(), KWLoc, TemplateNameLoc,
+ ClassTemplate, CanonicalConverted, PrevDecl);
SetNestedNameSpecifier(*this, Specialization, SS);
+ // A MSInheritanceAttr attached to the previous declaration must be
+ // propagated to the new node prior to instantiation.
+ if (PrevDecl) {
+ if (const auto *A = PrevDecl->getAttr<MSInheritanceAttr>()) {
+ auto *Clone = A->clone(getASTContext());
+ Clone->setInherited(true);
+ Specialization->addAttr(Clone);
+ Consumer.AssignInheritanceModel(Specialization);
+ }
+ }
+
if (!HasNoEffect && !PrevDecl) {
// Insert the new specialization.
ClassTemplate->AddSpecialization(Specialization, InsertPos);
@@ -9749,7 +10403,7 @@ DeclResult Sema::ActOnExplicitInstantiation(
if (!getDLLAttr(Def) && getDLLAttr(Specialization) &&
(Context.getTargetInfo().shouldDLLImportComdatSymbols() &&
- !Context.getTargetInfo().getTriple().isPS4CPU())) {
+ !Context.getTargetInfo().getTriple().isPS())) {
// An explicit instantiation definition can add a dll attribute to a
// template with a previous instantiation declaration. MinGW doesn't
// allow this.
@@ -9767,7 +10421,7 @@ DeclResult Sema::ActOnExplicitInstantiation(
!PreviouslyDLLExported && Specialization->hasAttr<DLLExportAttr>();
if (Old_TSK == TSK_ImplicitInstantiation && NewlyDLLExported &&
(Context.getTargetInfo().shouldDLLImportComdatSymbols() &&
- !Context.getTargetInfo().getTriple().isPS4CPU())) {
+ !Context.getTargetInfo().getTriple().isPS())) {
// An explicit instantiation definition can add a dll attribute to a
// template with a previous implicit instantiation. MinGW doesn't allow
// this. We limit clang to only adding dllexport, to avoid potentially
@@ -9793,11 +10447,6 @@ DeclResult Sema::ActOnExplicitInstantiation(
dllExportImportClassTemplateSpecialization(*this, Def);
}
- if (Def->hasAttr<MSInheritanceAttr>()) {
- Specialization->addAttr(Def->getAttr<MSInheritanceAttr>());
- Consumer.AssignInheritanceModel(Specialization);
- }
-
// Set the template specialization kind. Make sure it is set before
// instantiating the members which will trigger ASTConsumer callbacks.
Specialization->setTemplateSpecializationKind(TSK);
@@ -9821,13 +10470,11 @@ Sema::ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
bool Owned = false;
bool IsDependent = false;
- Decl *TagD = ActOnTag(S, TagSpec, Sema::TUK_Reference,
- KWLoc, SS, Name, NameLoc, Attr, AS_none,
- /*ModulePrivateLoc=*/SourceLocation(),
- MultiTemplateParamsArg(), Owned, IsDependent,
- SourceLocation(), false, TypeResult(),
- /*IsTypeSpecifier*/false,
- /*IsTemplateParamOrArg*/false);
+ Decl *TagD = ActOnTag(S, TagSpec, Sema::TUK_Reference, KWLoc, SS, Name,
+ NameLoc, Attr, AS_none, /*ModulePrivateLoc=*/SourceLocation(),
+ MultiTemplateParamsArg(), Owned, IsDependent, SourceLocation(),
+ false, TypeResult(), /*IsTypeSpecifier*/ false,
+ /*IsTemplateParamOrArg*/ false, /*OOK=*/OOK_Outside).get();
assert(!IsDependent && "explicit instantiation of dependent name not yet handled");
if (!TagD)
@@ -9951,7 +10598,7 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
S = S->getParent();
// Determine the type of the declaration.
- TypeSourceInfo *T = GetTypeForDeclarator(D, S);
+ TypeSourceInfo *T = GetTypeForDeclarator(D);
QualType R = T->getType();
if (R.isNull())
return true;
@@ -10125,7 +10772,7 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
}
// Check the new variable specialization against the parsed input.
- if (PrevTemplate && Prev && !Context.hasSameType(Prev->getType(), R)) {
+ if (PrevTemplate && !Context.hasSameType(Prev->getType(), R)) {
Diag(T->getTypeLoc().getBeginLoc(),
diag::err_invalid_var_template_spec_type)
<< 0 << PrevTemplate << R << Prev->getType();
@@ -10352,7 +10999,8 @@ Sema::ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
if (TUK == TUK_Declaration || TUK == TUK_Definition) {
Diag(NameLoc, diag::err_dependent_tag_decl)
- << (TUK == TUK_Definition) << Kind << SS.getRange();
+ << (TUK == TUK_Definition) << llvm::to_underlying(Kind)
+ << SS.getRange();
return true;
}
@@ -10369,10 +11017,11 @@ Sema::ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
return CreateParsedType(Result, TLB.getTypeSourceInfo(Context, Result));
}
-TypeResult
-Sema::ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
- const CXXScopeSpec &SS, const IdentifierInfo &II,
- SourceLocation IdLoc) {
+TypeResult Sema::ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
+ const CXXScopeSpec &SS,
+ const IdentifierInfo &II,
+ SourceLocation IdLoc,
+ ImplicitTypenameContext IsImplicitTypename) {
if (SS.isInvalid())
return true;
@@ -10385,9 +11034,13 @@ Sema::ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
NestedNameSpecifierLoc QualifierLoc = SS.getWithLocInContext(Context);
TypeSourceInfo *TSI = nullptr;
- QualType T = CheckTypenameType(TypenameLoc.isValid()? ETK_Typename : ETK_None,
- TypenameLoc, QualifierLoc, II, IdLoc, &TSI,
- /*DeducedTSTContext=*/true);
+ QualType T =
+ CheckTypenameType((TypenameLoc.isValid() ||
+ IsImplicitTypename == ImplicitTypenameContext::Yes)
+ ? ElaboratedTypeKeyword::Typename
+ : ElaboratedTypeKeyword::None,
+ TypenameLoc, QualifierLoc, II, IdLoc, &TSI,
+ /*DeducedTSTContext=*/true);
if (T.isNull())
return true;
return CreateParsedType(T, TSI);
@@ -10433,10 +11086,9 @@ Sema::ActOnTypenameType(Scope *S,
// Construct a dependent template specialization type.
assert(DTN && "dependent template has non-dependent name?");
assert(DTN->getQualifier() == SS.getScopeRep());
- QualType T = Context.getDependentTemplateSpecializationType(ETK_Typename,
- DTN->getQualifier(),
- DTN->getIdentifier(),
- TemplateArgs);
+ QualType T = Context.getDependentTemplateSpecializationType(
+ ElaboratedTypeKeyword::Typename, DTN->getQualifier(),
+ DTN->getIdentifier(), TemplateArgs.arguments());
// Create source-location information for this type.
TypeLocBuilder Builder;
@@ -10468,7 +11120,8 @@ Sema::ActOnTypenameType(Scope *S,
for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I)
SpecTL.setArgLocInfo(I, TemplateArgs[I].getLocInfo());
- T = Context.getElaboratedType(ETK_Typename, SS.getScopeRep(), T);
+ T = Context.getElaboratedType(ElaboratedTypeKeyword::Typename,
+ SS.getScopeRep(), T);
ElaboratedTypeLoc TL = Builder.push<ElaboratedTypeLoc>(T);
TL.setElaboratedKeywordLoc(TypenameLoc);
TL.setQualifierLoc(SS.getWithLocInContext(Context));
@@ -10645,7 +11298,7 @@ Sema::CheckTypenameType(ElaboratedTypeKeyword Keyword,
}
// Fall through to create a dependent typename type, from which we can recover
// better.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case LookupResult::NotFoundInCurrentInstantiation:
// Okay, it's a member of an unknown instantiation.
@@ -10671,7 +11324,7 @@ Sema::CheckTypenameType(ElaboratedTypeKeyword Keyword,
// ignore functions, but that appears to be an oversight.
auto *LookupRD = dyn_cast_or_null<CXXRecordDecl>(Ctx);
auto *FoundRD = dyn_cast<CXXRecordDecl>(Type);
- if (Keyword == ETK_Typename && LookupRD && FoundRD &&
+ if (Keyword == ElaboratedTypeKeyword::Typename && LookupRD && FoundRD &&
FoundRD->isInjectedClassName() &&
declaresSameEntity(LookupRD, cast<Decl>(FoundRD->getParent())))
Diag(IILoc, diag::ext_out_of_line_qualified_id_type_names_constructor)
@@ -10701,7 +11354,7 @@ Sema::CheckTypenameType(ElaboratedTypeKeyword Keyword,
else
Diag(IILoc, diag::err_deduced_tst)
<< (int)getTemplateNameKindForDiagnostics(TemplateName(TD));
- Diag(TD->getLocation(), diag::note_template_decl_here);
+ NoteTemplateLocation(*TD);
return QualType();
}
return Context.getElaboratedType(
@@ -10882,7 +11535,7 @@ bool Sema::RebuildTemplateParamsInCurrentInstantiation(
// - an identifier associated by name lookup with a non-type
// template-parameter declared with a type that contains a
// placeholder type (7.1.7.4),
- NewTSI = SubstAutoTypeSourceInfo(NewTSI, Context.DependentTy);
+ NewTSI = SubstAutoTypeSourceInfoDependent(NewTSI);
}
if (NewTSI != NTTP->getTypeSourceInfo()) {
@@ -10928,9 +11581,9 @@ Sema::getTemplateArgumentBindingsText(const TemplateParameterList *Params,
}
Out << " = ";
- Args[I].print(
- getPrintingPolicy(), Out,
- TemplateParameterList::shouldIncludeTypeForArgument(Params, I));
+ Args[I].print(getPrintingPolicy(), Out,
+ TemplateParameterList::shouldIncludeTypeForArgument(
+ getPrintingPolicy(), Params, I));
}
Out << ']';
@@ -10947,6 +11600,7 @@ void Sema::MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
// Take tokens to avoid allocations
LPT->Toks.swap(Toks);
LPT->D = FnD;
+ LPT->FPO = getCurFPFeatures();
LateParsedTemplateMap.insert(std::make_pair(FD, std::move(LPT)));
FD->setLateTemplateParsed(true);
@@ -10995,10 +11649,12 @@ class ExplicitSpecializationVisibilityChecker {
Sema &S;
SourceLocation Loc;
llvm::SmallVector<Module *, 8> Modules;
+ Sema::AcceptableKind Kind;
public:
- ExplicitSpecializationVisibilityChecker(Sema &S, SourceLocation Loc)
- : S(S), Loc(Loc) {}
+ ExplicitSpecializationVisibilityChecker(Sema &S, SourceLocation Loc,
+ Sema::AcceptableKind Kind)
+ : S(S), Loc(Loc), Kind(Kind) {}
void check(NamedDecl *ND) {
if (auto *FD = dyn_cast<FunctionDecl>(ND))
@@ -11026,6 +11682,23 @@ private:
S.diagnoseMissingImport(Loc, D, D->getLocation(), Modules, Kind, Recover);
}
+ bool CheckMemberSpecialization(const NamedDecl *D) {
+ return Kind == Sema::AcceptableKind::Visible
+ ? S.hasVisibleMemberSpecialization(D)
+ : S.hasReachableMemberSpecialization(D);
+ }
+
+ bool CheckExplicitSpecialization(const NamedDecl *D) {
+ return Kind == Sema::AcceptableKind::Visible
+ ? S.hasVisibleExplicitSpecialization(D)
+ : S.hasReachableExplicitSpecialization(D);
+ }
+
+ bool CheckDeclaration(const NamedDecl *D) {
+ return Kind == Sema::AcceptableKind::Visible ? S.hasVisibleDeclaration(D)
+ : S.hasReachableDeclaration(D);
+ }
+
// Check a specific declaration. There are three problematic cases:
//
// 1) The declaration is an explicit specialization of a template
@@ -11042,10 +11715,9 @@ private:
void checkImpl(SpecDecl *Spec) {
bool IsHiddenExplicitSpecialization = false;
if (Spec->getTemplateSpecializationKind() == TSK_ExplicitSpecialization) {
- IsHiddenExplicitSpecialization =
- Spec->getMemberSpecializationInfo()
- ? !S.hasVisibleMemberSpecialization(Spec, &Modules)
- : !S.hasVisibleExplicitSpecialization(Spec, &Modules);
+ IsHiddenExplicitSpecialization = Spec->getMemberSpecializationInfo()
+ ? !CheckMemberSpecialization(Spec)
+ : !CheckExplicitSpecialization(Spec);
} else {
checkInstantiated(Spec);
}
@@ -11069,7 +11741,7 @@ private:
checkTemplate(TD);
else if (auto *TD =
From.dyn_cast<ClassTemplatePartialSpecializationDecl *>()) {
- if (!S.hasVisibleDeclaration(TD))
+ if (!CheckDeclaration(TD))
diagnose(TD, true);
checkTemplate(TD);
}
@@ -11085,7 +11757,7 @@ private:
checkTemplate(TD);
else if (auto *TD =
From.dyn_cast<VarTemplatePartialSpecializationDecl *>()) {
- if (!S.hasVisibleDeclaration(TD))
+ if (!CheckDeclaration(TD))
diagnose(TD, true);
checkTemplate(TD);
}
@@ -11096,7 +11768,7 @@ private:
template<typename TemplDecl>
void checkTemplate(TemplDecl *TD) {
if (TD->isMemberSpecialization()) {
- if (!S.hasVisibleMemberSpecialization(TD, &Modules))
+ if (!CheckMemberSpecialization(TD))
diagnose(TD->getMostRecentDecl(), false);
}
}
@@ -11107,5 +11779,39 @@ void Sema::checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec) {
if (!getLangOpts().Modules)
return;
- ExplicitSpecializationVisibilityChecker(*this, Loc).check(Spec);
+ ExplicitSpecializationVisibilityChecker(*this, Loc,
+ Sema::AcceptableKind::Visible)
+ .check(Spec);
+}
+
+void Sema::checkSpecializationReachability(SourceLocation Loc,
+ NamedDecl *Spec) {
+ if (!getLangOpts().CPlusPlusModules)
+ return checkSpecializationVisibility(Loc, Spec);
+
+ ExplicitSpecializationVisibilityChecker(*this, Loc,
+ Sema::AcceptableKind::Reachable)
+ .check(Spec);
+}
+
+/// Returns the top most location responsible for the definition of \p N.
+/// If \p N is a a template specialization, this is the location
+/// of the top of the instantiation stack.
+/// Otherwise, the location of \p N is returned.
+SourceLocation Sema::getTopMostPointOfInstantiation(const NamedDecl *N) const {
+ if (!getLangOpts().CPlusPlus || CodeSynthesisContexts.empty())
+ return N->getLocation();
+ if (const auto *FD = dyn_cast<FunctionDecl>(N)) {
+ if (!FD->isFunctionTemplateSpecialization())
+ return FD->getLocation();
+ } else if (!isa<ClassTemplateSpecializationDecl,
+ VarTemplateSpecializationDecl>(N)) {
+ return N->getLocation();
+ }
+ for (const CodeSynthesisContext &CSC : CodeSynthesisContexts) {
+ if (!CSC.isInstantiationRecord() || CSC.PointOfInstantiation.isInvalid())
+ continue;
+ return CSC.PointOfInstantiation;
+ }
+ return N->getLocation();
}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaTemplateDeduction.cpp b/contrib/llvm-project/clang/lib/Sema/SemaTemplateDeduction.cpp
index 08e798304b0c..e9e7ab5bb669 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaTemplateDeduction.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaTemplateDeduction.cpp
@@ -10,7 +10,6 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Sema/TemplateDeduction.h"
#include "TreeTransform.h"
#include "TypeLocBuilder.h"
#include "clang/AST/ASTContext.h"
@@ -37,15 +36,16 @@
#include "clang/Basic/PartialDiagnostic.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/Specifiers.h"
+#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Sema.h"
#include "clang/Sema/Template.h"
+#include "clang/Sema/TemplateDeduction.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/FoldingSet.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
@@ -54,7 +54,9 @@
#include "llvm/Support/ErrorHandling.h"
#include <algorithm>
#include <cassert>
+#include <optional>
#include <tuple>
+#include <type_traits>
#include <utility>
namespace clang {
@@ -131,30 +133,16 @@ static bool hasSameExtendedValue(llvm::APSInt X, llvm::APSInt Y) {
return X == Y;
}
-static Sema::TemplateDeductionResult
-DeduceTemplateArguments(Sema &S,
- TemplateParameterList *TemplateParams,
- const TemplateArgument &Param,
- TemplateArgument Arg,
- TemplateDeductionInfo &Info,
- SmallVectorImpl<DeducedTemplateArgument> &Deduced);
-
-static Sema::TemplateDeductionResult
-DeduceTemplateArgumentsByTypeMatch(Sema &S,
- TemplateParameterList *TemplateParams,
- QualType Param,
- QualType Arg,
- TemplateDeductionInfo &Info,
- SmallVectorImpl<DeducedTemplateArgument> &
- Deduced,
- unsigned TDF,
- bool PartialOrdering = false,
- bool DeducedFromArrayBound = false);
+static Sema::TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(
+ Sema &S, TemplateParameterList *TemplateParams, QualType Param,
+ QualType Arg, TemplateDeductionInfo &Info,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned TDF,
+ bool PartialOrdering = false, bool DeducedFromArrayBound = false);
static Sema::TemplateDeductionResult
DeduceTemplateArguments(Sema &S, TemplateParameterList *TemplateParams,
- ArrayRef<TemplateArgument> Params,
- ArrayRef<TemplateArgument> Args,
+ ArrayRef<TemplateArgument> Ps,
+ ArrayRef<TemplateArgument> As,
TemplateDeductionInfo &Info,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
bool NumberOfArgumentsMustMatch);
@@ -224,7 +212,8 @@ static bool isSameDeclaration(Decl *X, Decl *Y) {
static DeducedTemplateArgument
checkDeducedTemplateArguments(ASTContext &Context,
const DeducedTemplateArgument &X,
- const DeducedTemplateArgument &Y) {
+ const DeducedTemplateArgument &Y,
+ bool AggregateCandidateDeduction = false) {
// We have no deduction for one or both of the arguments; they're compatible.
if (X.isNull())
return Y;
@@ -249,11 +238,13 @@ checkDeducedTemplateArguments(ASTContext &Context,
case TemplateArgument::Null:
llvm_unreachable("Non-deduced template arguments handled above");
- case TemplateArgument::Type:
+ case TemplateArgument::Type: {
// If two template type arguments have the same type, they're compatible.
- if (Y.getKind() == TemplateArgument::Type &&
- Context.hasSameType(X.getAsType(), Y.getAsType()))
- return X;
+ QualType TX = X.getAsType(), TY = Y.getAsType();
+ if (Y.getKind() == TemplateArgument::Type && Context.hasSameType(TX, TY))
+ return DeducedTemplateArgument(Context.getCommonSugaredType(TX, TY),
+ X.wasDeducedFromArrayBound() ||
+ Y.wasDeducedFromArrayBound());
// If one of the two arguments was deduced from an array bound, the other
// supersedes it.
@@ -262,6 +253,7 @@ checkDeducedTemplateArguments(ASTContext &Context,
// The arguments are not compatible.
return DeducedTemplateArgument();
+ }
case TemplateArgument::Integral:
// If we deduced a constant in one case and either a dependent expression or
@@ -276,6 +268,16 @@ checkDeducedTemplateArguments(ASTContext &Context,
// All other combinations are incompatible.
return DeducedTemplateArgument();
+ case TemplateArgument::StructuralValue:
+ // If we deduced a value and a dependent expression, keep the value.
+ if (Y.getKind() == TemplateArgument::Expression ||
+ (Y.getKind() == TemplateArgument::StructuralValue &&
+ X.structurallyEquals(Y)))
+ return X;
+
+ // All other combinations are incompatible.
+ return DeducedTemplateArgument();
+
case TemplateArgument::Template:
if (Y.getKind() == TemplateArgument::Template &&
Context.hasSameTemplateName(X.getAsTemplate(), Y.getAsTemplate()))
@@ -339,7 +341,9 @@ checkDeducedTemplateArguments(ASTContext &Context,
// If we deduced a null pointer and a dependent expression, keep the
// null pointer.
if (Y.getKind() == TemplateArgument::Expression)
- return X;
+ return TemplateArgument(Context.getCommonSugaredType(
+ X.getNullPtrType(), Y.getAsExpr()->getType()),
+ true);
// If we deduced a null pointer and an integral constant, keep the
// integral constant.
@@ -348,27 +352,33 @@ checkDeducedTemplateArguments(ASTContext &Context,
// If we deduced two null pointers, they are the same.
if (Y.getKind() == TemplateArgument::NullPtr)
- return X;
+ return TemplateArgument(
+ Context.getCommonSugaredType(X.getNullPtrType(), Y.getNullPtrType()),
+ true);
// All other combinations are incompatible.
return DeducedTemplateArgument();
case TemplateArgument::Pack: {
if (Y.getKind() != TemplateArgument::Pack ||
- X.pack_size() != Y.pack_size())
+ (!AggregateCandidateDeduction && X.pack_size() != Y.pack_size()))
return DeducedTemplateArgument();
llvm::SmallVector<TemplateArgument, 8> NewPack;
- for (TemplateArgument::pack_iterator XA = X.pack_begin(),
- XAEnd = X.pack_end(),
- YA = Y.pack_begin();
+ for (TemplateArgument::pack_iterator
+ XA = X.pack_begin(),
+ XAEnd = X.pack_end(), YA = Y.pack_begin(), YAEnd = Y.pack_end();
XA != XAEnd; ++XA, ++YA) {
- TemplateArgument Merged = checkDeducedTemplateArguments(
- Context, DeducedTemplateArgument(*XA, X.wasDeducedFromArrayBound()),
- DeducedTemplateArgument(*YA, Y.wasDeducedFromArrayBound()));
- if (Merged.isNull() && !(XA->isNull() && YA->isNull()))
- return DeducedTemplateArgument();
- NewPack.push_back(Merged);
+ if (YA != YAEnd) {
+ TemplateArgument Merged = checkDeducedTemplateArguments(
+ Context, DeducedTemplateArgument(*XA, X.wasDeducedFromArrayBound()),
+ DeducedTemplateArgument(*YA, Y.wasDeducedFromArrayBound()));
+ if (Merged.isNull() && !(XA->isNull() && YA->isNull()))
+ return DeducedTemplateArgument();
+ NewPack.push_back(Merged);
+ } else {
+ NewPack.push_back(*XA);
+ }
}
return DeducedTemplateArgument(
@@ -547,9 +557,9 @@ DeduceTemplateArguments(Sema &S,
///
/// \param TemplateParams the template parameters that we are deducing
///
-/// \param Param the parameter type
+/// \param P the parameter type
///
-/// \param Arg the argument type
+/// \param A the argument type
///
/// \param Info information about the template argument deduction itself
///
@@ -559,75 +569,78 @@ DeduceTemplateArguments(Sema &S,
/// "success" result means that template argument deduction has not yet failed,
/// but it may still fail, later, for other reasons.
static Sema::TemplateDeductionResult
-DeduceTemplateArguments(Sema &S,
- TemplateParameterList *TemplateParams,
- const TemplateSpecializationType *Param,
- QualType Arg,
- TemplateDeductionInfo &Info,
- SmallVectorImpl<DeducedTemplateArgument> &Deduced) {
- assert(Arg.isCanonical() && "Argument type must be canonical");
+DeduceTemplateSpecArguments(Sema &S, TemplateParameterList *TemplateParams,
+ const QualType P, QualType A,
+ TemplateDeductionInfo &Info,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced) {
+ QualType UP = P;
+ if (const auto *IP = P->getAs<InjectedClassNameType>())
+ UP = IP->getInjectedSpecializationType();
+ // FIXME: Try to preserve type sugar here, which is hard
+ // because of the unresolved template arguments.
+ const auto *TP = UP.getCanonicalType()->castAs<TemplateSpecializationType>();
+ TemplateName TNP = TP->getTemplateName();
+
+ // If the parameter is an alias template, there is nothing to deduce.
+ if (const auto *TD = TNP.getAsTemplateDecl(); TD && TD->isTypeAlias())
+ return Sema::TDK_Success;
+
+ ArrayRef<TemplateArgument> PResolved = TP->template_arguments();
+ QualType UA = A;
// Treat an injected-class-name as its underlying template-id.
- if (auto *Injected = dyn_cast<InjectedClassNameType>(Arg))
- Arg = Injected->getInjectedSpecializationType();
+ if (const auto *Injected = A->getAs<InjectedClassNameType>())
+ UA = Injected->getInjectedSpecializationType();
// Check whether the template argument is a dependent template-id.
- if (const TemplateSpecializationType *SpecArg
- = dyn_cast<TemplateSpecializationType>(Arg)) {
- // Perform template argument deduction for the template name.
- if (Sema::TemplateDeductionResult Result
- = DeduceTemplateArguments(S, TemplateParams,
- Param->getTemplateName(),
- SpecArg->getTemplateName(),
- Info, Deduced))
- return Result;
+ // FIXME: Should not lose sugar here.
+ if (const auto *SA =
+ dyn_cast<TemplateSpecializationType>(UA.getCanonicalType())) {
+ TemplateName TNA = SA->getTemplateName();
+ // If the argument is an alias template, there is nothing to deduce.
+ if (const auto *TD = TNA.getAsTemplateDecl(); TD && TD->isTypeAlias())
+ return Sema::TDK_Success;
+ // Perform template argument deduction for the template name.
+ if (auto Result =
+ DeduceTemplateArguments(S, TemplateParams, TNP, TNA, Info, Deduced))
+ return Result;
// Perform template argument deduction on each template
// argument. Ignore any missing/extra arguments, since they could be
// filled in by default arguments.
- return DeduceTemplateArguments(S, TemplateParams,
- Param->template_arguments(),
- SpecArg->template_arguments(), Info, Deduced,
+ return DeduceTemplateArguments(S, TemplateParams, PResolved,
+ SA->template_arguments(), Info, Deduced,
/*NumberOfArgumentsMustMatch=*/false);
}
// If the argument type is a class template specialization, we
// perform template argument deduction using its template
// arguments.
- const RecordType *RecordArg = dyn_cast<RecordType>(Arg);
- if (!RecordArg) {
- Info.FirstArg = TemplateArgument(QualType(Param, 0));
- Info.SecondArg = TemplateArgument(Arg);
- return Sema::TDK_NonDeducedMismatch;
- }
-
- ClassTemplateSpecializationDecl *SpecArg
- = dyn_cast<ClassTemplateSpecializationDecl>(RecordArg->getDecl());
- if (!SpecArg) {
- Info.FirstArg = TemplateArgument(QualType(Param, 0));
- Info.SecondArg = TemplateArgument(Arg);
+ const auto *RA = UA->getAs<RecordType>();
+ const auto *SA =
+ RA ? dyn_cast<ClassTemplateSpecializationDecl>(RA->getDecl()) : nullptr;
+ if (!SA) {
+ Info.FirstArg = TemplateArgument(P);
+ Info.SecondArg = TemplateArgument(A);
return Sema::TDK_NonDeducedMismatch;
}
// Perform template argument deduction for the template name.
- if (Sema::TemplateDeductionResult Result
- = DeduceTemplateArguments(S,
- TemplateParams,
- Param->getTemplateName(),
- TemplateName(SpecArg->getSpecializedTemplate()),
- Info, Deduced))
+ if (auto Result = DeduceTemplateArguments(
+ S, TemplateParams, TP->getTemplateName(),
+ TemplateName(SA->getSpecializedTemplate()), Info, Deduced))
return Result;
// Perform template argument deduction for the template arguments.
- return DeduceTemplateArguments(S, TemplateParams, Param->template_arguments(),
- SpecArg->getTemplateArgs().asArray(), Info,
- Deduced, /*NumberOfArgumentsMustMatch=*/true);
+ return DeduceTemplateArguments(S, TemplateParams, PResolved,
+ SA->getTemplateArgs().asArray(), Info, Deduced,
+ /*NumberOfArgumentsMustMatch=*/true);
}
-/// Determines whether the given type is an opaque type that
-/// might be more qualified when instantiated.
-static bool IsPossiblyOpaquelyQualifiedType(QualType T) {
+static bool IsPossiblyOpaquelyQualifiedTypeInternal(const Type *T) {
+ assert(T->isCanonicalUnqualified());
+
switch (T->getTypeClass()) {
case Type::TypeOfExpr:
case Type::TypeOf:
@@ -641,14 +654,21 @@ static bool IsPossiblyOpaquelyQualifiedType(QualType T) {
case Type::IncompleteArray:
case Type::VariableArray:
case Type::DependentSizedArray:
- return IsPossiblyOpaquelyQualifiedType(
- cast<ArrayType>(T)->getElementType());
+ return IsPossiblyOpaquelyQualifiedTypeInternal(
+ cast<ArrayType>(T)->getElementType().getTypePtr());
default:
return false;
}
}
+/// Determines whether the given type is an opaque type that
+/// might be more qualified when instantiated.
+static bool IsPossiblyOpaquelyQualifiedType(QualType T) {
+ return IsPossiblyOpaquelyQualifiedTypeInternal(
+ T->getCanonicalTypeInternal().getTypePtr());
+}
+
/// Helper function to build a TemplateParameter when we don't
/// know its type statically.
static TemplateParameter makeTemplateParameter(Decl *D) {
@@ -689,8 +709,10 @@ public:
/// Prepare to deduce the packs named within Pattern.
PackDeductionScope(Sema &S, TemplateParameterList *TemplateParams,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
- TemplateDeductionInfo &Info, TemplateArgument Pattern)
- : S(S), TemplateParams(TemplateParams), Deduced(Deduced), Info(Info) {
+ TemplateDeductionInfo &Info, TemplateArgument Pattern,
+ bool DeducePackIfNotAlreadyDeduced = false)
+ : S(S), TemplateParams(TemplateParams), Deduced(Deduced), Info(Info),
+ DeducePackIfNotAlreadyDeduced(DeducePackIfNotAlreadyDeduced){
unsigned NumNamedPacks = addPacks(Pattern);
finishConstruction(NumNamedPacks);
}
@@ -715,7 +737,7 @@ private:
// FIXME: What if we encounter multiple packs with different numbers of
// pre-expanded expansions? (This should already have been diagnosed
// during substitution.)
- if (Optional<unsigned> ExpandedPackExpansions =
+ if (std::optional<unsigned> ExpandedPackExpansions =
getExpandedPackSize(TemplateParams->getParam(Index)))
FixedNumExpansions = ExpandedPackExpansions;
@@ -843,7 +865,7 @@ public:
/// Determine whether this pack expansion scope has a known, fixed arity.
/// This happens if it involves a pack from an outer template that has
/// (notionally) already been expanded.
- bool hasFixedArity() { return FixedNumExpansions.hasValue(); }
+ bool hasFixedArity() { return FixedNumExpansions.has_value(); }
/// Determine whether the next element of the argument is still part of this
/// pack. This is the case unless the pack is already expanded to a fixed
@@ -909,7 +931,7 @@ public:
new (S.Context) TemplateArgument[Pack.New.size()];
std::copy(Pack.New.begin(), Pack.New.end(), ArgumentPack);
NewPack = DeducedTemplateArgument(
- TemplateArgument(llvm::makeArrayRef(ArgumentPack, Pack.New.size())),
+ TemplateArgument(llvm::ArrayRef(ArgumentPack, Pack.New.size())),
// FIXME: This is wrong, it's possible that some pack elements are
// deduced from an array bound and others are not:
// template<typename ...T, T ...V> void g(const T (&...p)[V]);
@@ -934,8 +956,13 @@ public:
// Check the new pack matches any previous value.
DeducedTemplateArgument OldPack = *Loc;
- DeducedTemplateArgument Result =
- checkDeducedTemplateArguments(S.Context, OldPack, NewPack);
+ DeducedTemplateArgument Result = checkDeducedTemplateArguments(
+ S.Context, OldPack, NewPack, DeducePackIfNotAlreadyDeduced);
+
+ Info.AggregateDeductionCandidateHasMismatchedArity =
+ OldPack.getKind() == TemplateArgument::Pack &&
+ NewPack.getKind() == TemplateArgument::Pack &&
+ OldPack.pack_size() != NewPack.pack_size() && !Result.isNull();
// If we deferred a deduction of this pack, check that one now too.
if (!Result.isNull() && !Pack.DeferredDeduction.isNull()) {
@@ -954,7 +981,7 @@ public:
// If we have a pre-expanded pack and we didn't deduce enough elements
// for it, fail deduction.
- if (Optional<unsigned> Expansions = getExpandedPackSize(Param)) {
+ if (std::optional<unsigned> Expansions = getExpandedPackSize(Param)) {
if (*Expansions != PackElements) {
Info.Param = makeTemplateParameter(Param);
Info.FirstArg = Result;
@@ -975,8 +1002,9 @@ private:
TemplateDeductionInfo &Info;
unsigned PackElements = 0;
bool IsPartiallyExpanded = false;
+ bool DeducePackIfNotAlreadyDeduced = false;
/// The number of expansions, if we have a fully-expanded pack in this scope.
- Optional<unsigned> FixedNumExpansions;
+ std::optional<unsigned> FixedNumExpansions;
SmallVector<DeducedPack, 2> Packs;
};
@@ -1047,11 +1075,12 @@ DeduceTemplateArguments(Sema &S,
return Sema::TDK_MiscellaneousDeductionFailure;
}
- if (Sema::TemplateDeductionResult Result
- = DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
- Params[ParamIdx], Args[ArgIdx],
- Info, Deduced, TDF,
- PartialOrdering))
+ if (Sema::TemplateDeductionResult Result =
+ DeduceTemplateArgumentsByTypeMatch(
+ S, TemplateParams, Params[ParamIdx].getUnqualifiedType(),
+ Args[ArgIdx].getUnqualifiedType(), Info, Deduced, TDF,
+ PartialOrdering,
+ /*DeducedFromArrayBound=*/false))
return Result;
++ArgIdx;
@@ -1073,10 +1102,11 @@ DeduceTemplateArguments(Sema &S,
if (ParamIdx + 1 == NumParams || PackScope.hasFixedArity()) {
for (; ArgIdx < NumArgs && PackScope.hasNextElement(); ++ArgIdx) {
// Deduce template arguments from the pattern.
- if (Sema::TemplateDeductionResult Result
- = DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, Pattern,
- Args[ArgIdx], Info, Deduced,
- TDF, PartialOrdering))
+ if (Sema::TemplateDeductionResult Result =
+ DeduceTemplateArgumentsByTypeMatch(
+ S, TemplateParams, Pattern.getUnqualifiedType(),
+ Args[ArgIdx].getUnqualifiedType(), Info, Deduced, TDF,
+ PartialOrdering, /*DeducedFromArrayBound=*/false))
return Result;
PackScope.nextPackElement();
@@ -1099,7 +1129,7 @@ DeduceTemplateArguments(Sema &S,
// If the parameter type contains an explicitly-specified pack that we
// could not expand, skip the number of parameters notionally created
// by the expansion.
- Optional<unsigned> NumExpansions = Expansion->getNumExpansions();
+ std::optional<unsigned> NumExpansions = Expansion->getNumExpansions();
if (NumExpansions && !PackScope.isPartiallyExpanded()) {
for (unsigned I = 0; I != *NumExpansions && ArgIdx < NumArgs;
++I, ++ArgIdx)
@@ -1113,6 +1143,16 @@ DeduceTemplateArguments(Sema &S,
return Result;
}
+ // DR692, DR1395
+ // C++0x [temp.deduct.type]p10:
+ // If the parameter-declaration corresponding to P_i ...
+ // During partial ordering, if Ai was originally a function parameter pack:
+ // - if P does not contain a function parameter type corresponding to Ai then
+ // Ai is ignored;
+ if (PartialOrdering && ArgIdx + 1 == NumArgs &&
+ isa<PackExpansionType>(Args[ArgIdx]))
+ return Sema::TDK_Success;
+
// Make sure we don't have any extra arguments.
if (ArgIdx < NumArgs)
return Sema::TDK_MiscellaneousDeductionFailure;
@@ -1155,26 +1195,25 @@ static bool hasInconsistentOrSupersetQualifiersOf(QualType ParamType,
/// function types (noreturn adjustment, implicit calling conventions). If any
/// of parameter and argument is not a function, just perform type comparison.
///
-/// \param Param the template parameter type.
+/// \param P the template parameter type.
///
-/// \param Arg the argument type.
-bool Sema::isSameOrCompatibleFunctionType(CanQualType Param,
- CanQualType Arg) {
- const FunctionType *ParamFunction = Param->getAs<FunctionType>(),
- *ArgFunction = Arg->getAs<FunctionType>();
+/// \param A the argument type.
+bool Sema::isSameOrCompatibleFunctionType(QualType P, QualType A) {
+ const FunctionType *PF = P->getAs<FunctionType>(),
+ *AF = A->getAs<FunctionType>();
// Just compare if not functions.
- if (!ParamFunction || !ArgFunction)
- return Param == Arg;
+ if (!PF || !AF)
+ return Context.hasSameType(P, A);
// Noreturn and noexcept adjustment.
QualType AdjustedParam;
- if (IsFunctionConversion(Param, Arg, AdjustedParam))
- return Arg == Context.getCanonicalType(AdjustedParam);
+ if (IsFunctionConversion(P, A, AdjustedParam))
+ return Context.hasSameType(AdjustedParam, A);
// FIXME: Compatible calling conventions.
- return Param == Arg;
+ return Context.hasSameType(P, A);
}
/// Get the index of the first template parameter that was originally from the
@@ -1203,16 +1242,21 @@ static bool isForwardingReference(QualType Param, unsigned FirstInnerIndex) {
return false;
}
+static CXXRecordDecl *getCanonicalRD(QualType T) {
+ return cast<CXXRecordDecl>(
+ T->castAs<RecordType>()->getDecl()->getCanonicalDecl());
+}
+
/// Attempt to deduce the template arguments by checking the base types
/// according to (C++20 [temp.deduct.call] p4b3.
///
/// \param S the semantic analysis object within which we are deducing.
///
-/// \param RecordT the top level record object we are deducing against.
+/// \param RD the top level record object we are deducing against.
///
/// \param TemplateParams the template parameters that we are deducing.
///
-/// \param SpecParam the template specialization parameter type.
+/// \param P the template specialization parameter type.
///
/// \param Info information about the template argument deduction itself.
///
@@ -1221,10 +1265,11 @@ static bool isForwardingReference(QualType Param, unsigned FirstInnerIndex) {
/// \returns the result of template argument deduction with the bases. "invalid"
/// means no matches, "success" found a single item, and the
/// "MiscellaneousDeductionFailure" result happens when the match is ambiguous.
-static Sema::TemplateDeductionResult DeduceTemplateBases(
- Sema &S, const RecordType *RecordT, TemplateParameterList *TemplateParams,
- const TemplateSpecializationType *SpecParam, TemplateDeductionInfo &Info,
- SmallVectorImpl<DeducedTemplateArgument> &Deduced) {
+static Sema::TemplateDeductionResult
+DeduceTemplateBases(Sema &S, const CXXRecordDecl *RD,
+ TemplateParameterList *TemplateParams, QualType P,
+ TemplateDeductionInfo &Info,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced) {
// C++14 [temp.deduct.call] p4b3:
// If P is a class and P has the form simple-template-id, then the
// transformed A can be a derived class of the deduced A. Likewise if
@@ -1244,45 +1289,44 @@ static Sema::TemplateDeductionResult DeduceTemplateBases(
// visited, while ToVisit is our stack of records that we still need to
// visit. Matches contains a list of matches that have yet to be
// disqualified.
- llvm::SmallPtrSet<const RecordType *, 8> Visited;
- SmallVector<const RecordType *, 8> ToVisit;
+ llvm::SmallPtrSet<const CXXRecordDecl *, 8> Visited;
+ SmallVector<QualType, 8> ToVisit;
// We iterate over this later, so we have to use MapVector to ensure
// determinism.
- llvm::MapVector<const RecordType *, SmallVector<DeducedTemplateArgument, 8>>
+ llvm::MapVector<const CXXRecordDecl *,
+ SmallVector<DeducedTemplateArgument, 8>>
Matches;
- auto AddBases = [&Visited, &ToVisit](const RecordType *RT) {
- CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ auto AddBases = [&Visited, &ToVisit](const CXXRecordDecl *RD) {
for (const auto &Base : RD->bases()) {
- assert(Base.getType()->isRecordType() &&
- "Base class that isn't a record?");
- const RecordType *RT = Base.getType()->getAs<RecordType>();
- if (Visited.insert(RT).second)
- ToVisit.push_back(Base.getType()->getAs<RecordType>());
+ QualType T = Base.getType();
+ assert(T->isRecordType() && "Base class that isn't a record?");
+ if (Visited.insert(::getCanonicalRD(T)).second)
+ ToVisit.push_back(T);
}
};
// Set up the loop by adding all the bases.
- AddBases(RecordT);
+ AddBases(RD);
// Search each path of bases until we either run into a successful match
// (where all bases of it are invalid), or we run out of bases.
while (!ToVisit.empty()) {
- const RecordType *NextT = ToVisit.pop_back_val();
+ QualType NextT = ToVisit.pop_back_val();
SmallVector<DeducedTemplateArgument, 8> DeducedCopy(Deduced.begin(),
Deduced.end());
TemplateDeductionInfo BaseInfo(TemplateDeductionInfo::ForBase, Info);
- Sema::TemplateDeductionResult BaseResult =
- DeduceTemplateArguments(S, TemplateParams, SpecParam,
- QualType(NextT, 0), BaseInfo, DeducedCopy);
+ Sema::TemplateDeductionResult BaseResult = DeduceTemplateSpecArguments(
+ S, TemplateParams, P, NextT, BaseInfo, DeducedCopy);
// If this was a successful deduction, add it to the list of matches,
// otherwise we need to continue searching its bases.
+ const CXXRecordDecl *RD = ::getCanonicalRD(NextT);
if (BaseResult == Sema::TDK_Success)
- Matches.insert({NextT, DeducedCopy});
+ Matches.insert({RD, DeducedCopy});
else
- AddBases(NextT);
+ AddBases(RD);
}
// At this point, 'Matches' contains a list of seemingly valid bases, however
@@ -1297,14 +1341,14 @@ static Sema::TemplateDeductionResult DeduceTemplateBases(
AddBases(Match.first);
// We can give up once we have a single item (or have run out of things to
- // search) since cyclical inheritence isn't valid.
+ // search) since cyclical inheritance isn't valid.
while (Matches.size() > 1 && !ToVisit.empty()) {
- const RecordType *NextT = ToVisit.pop_back_val();
- Matches.erase(NextT);
+ const CXXRecordDecl *RD = ::getCanonicalRD(ToVisit.pop_back_val());
+ Matches.erase(RD);
- // Always add all bases, since the inheritence tree can contain
+ // Always add all bases, since the inheritance tree can contain
// disqualifications for multiple matches.
- AddBases(NextT);
+ AddBases(RD);
}
}
@@ -1324,9 +1368,9 @@ static Sema::TemplateDeductionResult DeduceTemplateBases(
///
/// \param TemplateParams the template parameters that we are deducing
///
-/// \param ParamIn the parameter type
+/// \param P the parameter type
///
-/// \param ArgIn the argument type
+/// \param A the argument type
///
/// \param Info information about the template argument deduction itself
///
@@ -1341,41 +1385,33 @@ static Sema::TemplateDeductionResult DeduceTemplateBases(
/// \returns the result of template argument deduction so far. Note that a
/// "success" result means that template argument deduction has not yet failed,
/// but it may still fail, later, for other reasons.
-static Sema::TemplateDeductionResult
-DeduceTemplateArgumentsByTypeMatch(Sema &S,
- TemplateParameterList *TemplateParams,
- QualType ParamIn, QualType ArgIn,
- TemplateDeductionInfo &Info,
- SmallVectorImpl<DeducedTemplateArgument> &Deduced,
- unsigned TDF,
- bool PartialOrdering,
- bool DeducedFromArrayBound) {
- // We only want to look at the canonical types, since typedefs and
- // sugar are not part of template argument deduction.
- QualType Param = S.Context.getCanonicalType(ParamIn);
- QualType Arg = S.Context.getCanonicalType(ArgIn);
+static Sema::TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(
+ Sema &S, TemplateParameterList *TemplateParams, QualType P, QualType A,
+ TemplateDeductionInfo &Info,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned TDF,
+ bool PartialOrdering, bool DeducedFromArrayBound) {
// If the argument type is a pack expansion, look at its pattern.
// This isn't explicitly called out
- if (const PackExpansionType *ArgExpansion
- = dyn_cast<PackExpansionType>(Arg))
- Arg = ArgExpansion->getPattern();
+ if (const auto *AExp = dyn_cast<PackExpansionType>(A))
+ A = AExp->getPattern();
+ assert(!isa<PackExpansionType>(A.getCanonicalType()));
if (PartialOrdering) {
// C++11 [temp.deduct.partial]p5:
// Before the partial ordering is done, certain transformations are
// performed on the types used for partial ordering:
// - If P is a reference type, P is replaced by the type referred to.
- const ReferenceType *ParamRef = Param->getAs<ReferenceType>();
- if (ParamRef)
- Param = ParamRef->getPointeeType();
+ const ReferenceType *PRef = P->getAs<ReferenceType>();
+ if (PRef)
+ P = PRef->getPointeeType();
// - If A is a reference type, A is replaced by the type referred to.
- const ReferenceType *ArgRef = Arg->getAs<ReferenceType>();
- if (ArgRef)
- Arg = ArgRef->getPointeeType();
+ const ReferenceType *ARef = A->getAs<ReferenceType>();
+ if (ARef)
+ A = A->getPointeeType();
- if (ParamRef && ArgRef && S.Context.hasSameUnqualifiedType(Param, Arg)) {
+ if (PRef && ARef && S.Context.hasSameUnqualifiedType(P, A)) {
// C++11 [temp.deduct.partial]p9:
// If, for a given type, deduction succeeds in both directions (i.e.,
// the types are identical after the transformations above) and both
@@ -1395,29 +1431,26 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
// succeeds, so we model this as a deduction failure. Note that
// [the first type] is P and [the other type] is A here; the standard
// gets this backwards.
- Qualifiers ParamQuals = Param.getQualifiers();
- Qualifiers ArgQuals = Arg.getQualifiers();
- if ((ParamRef->isLValueReferenceType() &&
- !ArgRef->isLValueReferenceType()) ||
- ParamQuals.isStrictSupersetOf(ArgQuals) ||
- (ParamQuals.hasNonTrivialObjCLifetime() &&
- ArgQuals.getObjCLifetime() == Qualifiers::OCL_ExplicitNone &&
- ParamQuals.withoutObjCLifetime() ==
- ArgQuals.withoutObjCLifetime())) {
- Info.FirstArg = TemplateArgument(ParamIn);
- Info.SecondArg = TemplateArgument(ArgIn);
+ Qualifiers PQuals = P.getQualifiers(), AQuals = A.getQualifiers();
+ if ((PRef->isLValueReferenceType() && !ARef->isLValueReferenceType()) ||
+ PQuals.isStrictSupersetOf(AQuals) ||
+ (PQuals.hasNonTrivialObjCLifetime() &&
+ AQuals.getObjCLifetime() == Qualifiers::OCL_ExplicitNone &&
+ PQuals.withoutObjCLifetime() == AQuals.withoutObjCLifetime())) {
+ Info.FirstArg = TemplateArgument(P);
+ Info.SecondArg = TemplateArgument(A);
return Sema::TDK_NonDeducedMismatch;
}
}
-
+ Qualifiers DiscardedQuals;
// C++11 [temp.deduct.partial]p7:
// Remove any top-level cv-qualifiers:
// - If P is a cv-qualified type, P is replaced by the cv-unqualified
// version of P.
- Param = Param.getUnqualifiedType();
+ P = S.Context.getUnqualifiedArrayType(P, DiscardedQuals);
// - If A is a cv-qualified type, A is replaced by the cv-unqualified
// version of A.
- Arg = Arg.getUnqualifiedType();
+ A = S.Context.getUnqualifiedArrayType(A, DiscardedQuals);
} else {
// C++0x [temp.deduct.call]p4 bullet 1:
// - If the original P is a reference type, the deduced A (i.e., the type
@@ -1425,13 +1458,12 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
// transformed A.
if (TDF & TDF_ParamWithReferenceType) {
Qualifiers Quals;
- QualType UnqualParam = S.Context.getUnqualifiedArrayType(Param, Quals);
- Quals.setCVRQualifiers(Quals.getCVRQualifiers() &
- Arg.getCVRQualifiers());
- Param = S.Context.getQualifiedType(UnqualParam, Quals);
+ QualType UnqualP = S.Context.getUnqualifiedArrayType(P, Quals);
+ Quals.setCVRQualifiers(Quals.getCVRQualifiers() & A.getCVRQualifiers());
+ P = S.Context.getQualifiedType(UnqualP, Quals);
}
- if ((TDF & TDF_TopLevelParameterTypeList) && !Param->isFunctionType()) {
+ if ((TDF & TDF_TopLevelParameterTypeList) && !P->isFunctionType()) {
// C++0x [temp.deduct.type]p10:
// If P and A are function types that originated from deduction when
// taking the address of a function template (14.8.2.2) or when deducing
@@ -1444,8 +1476,9 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
// Pi is T&& and Ai is X&, the adjusted Pi will be T, causing T to be
// deduced as X&. - end note ]
TDF &= ~TDF_TopLevelParameterTypeList;
- if (isForwardingReference(Param, 0) && Arg->isLValueReferenceType())
- Param = Param->getPointeeType();
+ if (isForwardingReference(P, /*FirstInnerIndex=*/0) &&
+ A->isLValueReferenceType())
+ P = P->getPointeeType();
}
}
@@ -1456,53 +1489,48 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
//
// T
// cv-list T
- if (const TemplateTypeParmType *TemplateTypeParm
- = Param->getAs<TemplateTypeParmType>()) {
+ if (const auto *TTP = P->getAs<TemplateTypeParmType>()) {
// Just skip any attempts to deduce from a placeholder type or a parameter
// at a different depth.
- if (Arg->isPlaceholderType() ||
- Info.getDeducedDepth() != TemplateTypeParm->getDepth())
+ if (A->isPlaceholderType() || Info.getDeducedDepth() != TTP->getDepth())
return Sema::TDK_Success;
- unsigned Index = TemplateTypeParm->getIndex();
- bool RecanonicalizeArg = false;
+ unsigned Index = TTP->getIndex();
// If the argument type is an array type, move the qualifiers up to the
// top level, so they can be matched with the qualifiers on the parameter.
- if (isa<ArrayType>(Arg)) {
+ if (A->isArrayType()) {
Qualifiers Quals;
- Arg = S.Context.getUnqualifiedArrayType(Arg, Quals);
- if (Quals) {
- Arg = S.Context.getQualifiedType(Arg, Quals);
- RecanonicalizeArg = true;
- }
+ A = S.Context.getUnqualifiedArrayType(A, Quals);
+ if (Quals)
+ A = S.Context.getQualifiedType(A, Quals);
}
// The argument type can not be less qualified than the parameter
// type.
if (!(TDF & TDF_IgnoreQualifiers) &&
- hasInconsistentOrSupersetQualifiersOf(Param, Arg)) {
+ hasInconsistentOrSupersetQualifiersOf(P, A)) {
Info.Param = cast<TemplateTypeParmDecl>(TemplateParams->getParam(Index));
- Info.FirstArg = TemplateArgument(Param);
- Info.SecondArg = TemplateArgument(Arg);
+ Info.FirstArg = TemplateArgument(P);
+ Info.SecondArg = TemplateArgument(A);
return Sema::TDK_Underqualified;
}
// Do not match a function type with a cv-qualified type.
// http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_active.html#1584
- if (Arg->isFunctionType() && Param.hasQualifiers()) {
+ if (A->isFunctionType() && P.hasQualifiers())
return Sema::TDK_NonDeducedMismatch;
- }
- assert(TemplateTypeParm->getDepth() == Info.getDeducedDepth() &&
+ assert(TTP->getDepth() == Info.getDeducedDepth() &&
"saw template type parameter with wrong depth");
- assert(Arg != S.Context.OverloadTy && "Unresolved overloaded function");
- QualType DeducedType = Arg;
+ assert(A->getCanonicalTypeInternal() != S.Context.OverloadTy &&
+ "Unresolved overloaded function");
+ QualType DeducedType = A;
// Remove any qualifiers on the parameter from the deduced type.
// We checked the qualifiers for consistency above.
Qualifiers DeducedQs = DeducedType.getQualifiers();
- Qualifiers ParamQs = Param.getQualifiers();
+ Qualifiers ParamQs = P.getQualifiers();
DeducedQs.removeCVRQualifiers(ParamQs.getCVRQualifiers());
if (ParamQs.hasObjCGCAttr())
DeducedQs.removeObjCGCAttr();
@@ -1517,29 +1545,24 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
if (ParamQs.hasObjCLifetime() && !DeducedType->isObjCLifetimeType() &&
!DeducedType->isDependentType()) {
Info.Param = cast<TemplateTypeParmDecl>(TemplateParams->getParam(Index));
- Info.FirstArg = TemplateArgument(Param);
- Info.SecondArg = TemplateArgument(Arg);
+ Info.FirstArg = TemplateArgument(P);
+ Info.SecondArg = TemplateArgument(A);
return Sema::TDK_Underqualified;
}
// Objective-C ARC:
// If template deduction would produce an argument type with lifetime type
// but no lifetime qualifier, the __strong lifetime qualifier is inferred.
- if (S.getLangOpts().ObjCAutoRefCount &&
- DeducedType->isObjCLifetimeType() &&
+ if (S.getLangOpts().ObjCAutoRefCount && DeducedType->isObjCLifetimeType() &&
!DeducedQs.hasObjCLifetime())
DeducedQs.setObjCLifetime(Qualifiers::OCL_Strong);
- DeducedType = S.Context.getQualifiedType(DeducedType.getUnqualifiedType(),
- DeducedQs);
-
- if (RecanonicalizeArg)
- DeducedType = S.Context.getCanonicalType(DeducedType);
+ DeducedType =
+ S.Context.getQualifiedType(DeducedType.getUnqualifiedType(), DeducedQs);
DeducedTemplateArgument NewDeduced(DeducedType, DeducedFromArrayBound);
- DeducedTemplateArgument Result = checkDeducedTemplateArguments(S.Context,
- Deduced[Index],
- NewDeduced);
+ DeducedTemplateArgument Result =
+ checkDeducedTemplateArguments(S.Context, Deduced[Index], NewDeduced);
if (Result.isNull()) {
Info.Param = cast<TemplateTypeParmDecl>(TemplateParams->getParam(Index));
Info.FirstArg = Deduced[Index];
@@ -1552,69 +1575,57 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
}
// Set up the template argument deduction information for a failure.
- Info.FirstArg = TemplateArgument(ParamIn);
- Info.SecondArg = TemplateArgument(ArgIn);
+ Info.FirstArg = TemplateArgument(P);
+ Info.SecondArg = TemplateArgument(A);
// If the parameter is an already-substituted template parameter
// pack, do nothing: we don't know which of its arguments to look
// at, so we have to wait until all of the parameter packs in this
// expansion have arguments.
- if (isa<SubstTemplateTypeParmPackType>(Param))
+ if (P->getAs<SubstTemplateTypeParmPackType>())
return Sema::TDK_Success;
// Check the cv-qualifiers on the parameter and argument types.
- CanQualType CanParam = S.Context.getCanonicalType(Param);
- CanQualType CanArg = S.Context.getCanonicalType(Arg);
if (!(TDF & TDF_IgnoreQualifiers)) {
if (TDF & TDF_ParamWithReferenceType) {
- if (hasInconsistentOrSupersetQualifiersOf(Param, Arg))
+ if (hasInconsistentOrSupersetQualifiersOf(P, A))
return Sema::TDK_NonDeducedMismatch;
} else if (TDF & TDF_ArgWithReferenceType) {
// C++ [temp.deduct.conv]p4:
// If the original A is a reference type, A can be more cv-qualified
// than the deduced A
- if (!Arg.getQualifiers().compatiblyIncludes(Param.getQualifiers()))
+ if (!A.getQualifiers().compatiblyIncludes(P.getQualifiers()))
return Sema::TDK_NonDeducedMismatch;
// Strip out all extra qualifiers from the argument to figure out the
// type we're converting to, prior to the qualification conversion.
Qualifiers Quals;
- Arg = S.Context.getUnqualifiedArrayType(Arg, Quals);
- Arg = S.Context.getQualifiedType(Arg, Param.getQualifiers());
- } else if (!IsPossiblyOpaquelyQualifiedType(Param)) {
- if (Param.getCVRQualifiers() != Arg.getCVRQualifiers())
+ A = S.Context.getUnqualifiedArrayType(A, Quals);
+ A = S.Context.getQualifiedType(A, P.getQualifiers());
+ } else if (!IsPossiblyOpaquelyQualifiedType(P)) {
+ if (P.getCVRQualifiers() != A.getCVRQualifiers())
return Sema::TDK_NonDeducedMismatch;
}
+ }
- // If the parameter type is not dependent, there is nothing to deduce.
- if (!Param->isDependentType()) {
- if (!(TDF & TDF_SkipNonDependent)) {
- bool NonDeduced =
- (TDF & TDF_AllowCompatibleFunctionType)
- ? !S.isSameOrCompatibleFunctionType(CanParam, CanArg)
- : Param != Arg;
- if (NonDeduced) {
- return Sema::TDK_NonDeducedMismatch;
- }
- }
+ // If the parameter type is not dependent, there is nothing to deduce.
+ if (!P->isDependentType()) {
+ if (TDF & TDF_SkipNonDependent)
return Sema::TDK_Success;
- }
- } else if (!Param->isDependentType()) {
- if (!(TDF & TDF_SkipNonDependent)) {
- CanQualType ParamUnqualType = CanParam.getUnqualifiedType(),
- ArgUnqualType = CanArg.getUnqualifiedType();
- bool Success =
- (TDF & TDF_AllowCompatibleFunctionType)
- ? S.isSameOrCompatibleFunctionType(ParamUnqualType, ArgUnqualType)
- : ParamUnqualType == ArgUnqualType;
- if (Success)
- return Sema::TDK_Success;
- } else {
+ if ((TDF & TDF_IgnoreQualifiers) ? S.Context.hasSameUnqualifiedType(P, A)
+ : S.Context.hasSameType(P, A))
return Sema::TDK_Success;
- }
+ if (TDF & TDF_AllowCompatibleFunctionType &&
+ S.isSameOrCompatibleFunctionType(P, A))
+ return Sema::TDK_Success;
+ if (!(TDF & TDF_IgnoreQualifiers))
+ return Sema::TDK_NonDeducedMismatch;
+ // Otherwise, when ignoring qualifiers, the types not having the same
+ // unqualified type does not mean they do not match, so in this case we
+ // must keep going and analyze with a non-dependent parameter type.
}
- switch (Param->getTypeClass()) {
+ switch (P.getCanonicalType()->getTypeClass()) {
// Non-canonical types cannot appear here.
#define NON_CANONICAL_TYPE(Class, Base) \
case Type::Class: llvm_unreachable("deducing non-canonical type: " #Class);
@@ -1625,8 +1636,15 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
case Type::SubstTemplateTypeParmPack:
llvm_unreachable("Type nodes handled above");
- // These types cannot be dependent, so simply check whether the types are
- // the same.
+ case Type::Auto:
+ // C++23 [temp.deduct.funcaddr]/3:
+ // A placeholder type in the return type of a function template is a
+ // non-deduced context.
+ // There's no corresponding wording for [temp.deduct.decl], but we treat
+ // it the same to match other compilers.
+ if (P->isDependentType())
+ return Sema::TDK_Success;
+ [[fallthrough]];
case Type::Builtin:
case Type::VariableArray:
case Type::Vector:
@@ -1636,135 +1654,118 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
case Type::ObjCObject:
case Type::ObjCInterface:
case Type::ObjCObjectPointer:
- case Type::ExtInt:
- if (TDF & TDF_SkipNonDependent)
- return Sema::TDK_Success;
-
- if (TDF & TDF_IgnoreQualifiers) {
- Param = Param.getUnqualifiedType();
- Arg = Arg.getUnqualifiedType();
- }
-
- return Param == Arg? Sema::TDK_Success : Sema::TDK_NonDeducedMismatch;
+ case Type::BitInt:
+ return (TDF & TDF_SkipNonDependent) ||
+ ((TDF & TDF_IgnoreQualifiers)
+ ? S.Context.hasSameUnqualifiedType(P, A)
+ : S.Context.hasSameType(P, A))
+ ? Sema::TDK_Success
+ : Sema::TDK_NonDeducedMismatch;
// _Complex T [placeholder extension]
- case Type::Complex:
- if (const ComplexType *ComplexArg = Arg->getAs<ComplexType>())
- return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
- cast<ComplexType>(Param)->getElementType(),
- ComplexArg->getElementType(),
- Info, Deduced, TDF);
-
- return Sema::TDK_NonDeducedMismatch;
+ case Type::Complex: {
+ const auto *CP = P->castAs<ComplexType>(), *CA = A->getAs<ComplexType>();
+ if (!CA)
+ return Sema::TDK_NonDeducedMismatch;
+ return DeduceTemplateArgumentsByTypeMatch(
+ S, TemplateParams, CP->getElementType(), CA->getElementType(), Info,
+ Deduced, TDF);
+ }
// _Atomic T [extension]
- case Type::Atomic:
- if (const AtomicType *AtomicArg = Arg->getAs<AtomicType>())
- return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
- cast<AtomicType>(Param)->getValueType(),
- AtomicArg->getValueType(),
- Info, Deduced, TDF);
-
- return Sema::TDK_NonDeducedMismatch;
+ case Type::Atomic: {
+ const auto *PA = P->castAs<AtomicType>(), *AA = A->getAs<AtomicType>();
+ if (!AA)
+ return Sema::TDK_NonDeducedMismatch;
+ return DeduceTemplateArgumentsByTypeMatch(
+ S, TemplateParams, PA->getValueType(), AA->getValueType(), Info,
+ Deduced, TDF);
+ }
// T *
case Type::Pointer: {
QualType PointeeType;
- if (const PointerType *PointerArg = Arg->getAs<PointerType>()) {
- PointeeType = PointerArg->getPointeeType();
- } else if (const ObjCObjectPointerType *PointerArg
- = Arg->getAs<ObjCObjectPointerType>()) {
- PointeeType = PointerArg->getPointeeType();
+ if (const auto *PA = A->getAs<PointerType>()) {
+ PointeeType = PA->getPointeeType();
+ } else if (const auto *PA = A->getAs<ObjCObjectPointerType>()) {
+ PointeeType = PA->getPointeeType();
} else {
return Sema::TDK_NonDeducedMismatch;
}
-
- unsigned SubTDF = TDF & (TDF_IgnoreQualifiers | TDF_DerivedClass);
- return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
- cast<PointerType>(Param)->getPointeeType(),
- PointeeType,
- Info, Deduced, SubTDF);
+ return DeduceTemplateArgumentsByTypeMatch(
+ S, TemplateParams, P->castAs<PointerType>()->getPointeeType(),
+ PointeeType, Info, Deduced,
+ TDF & (TDF_IgnoreQualifiers | TDF_DerivedClass));
}
// T &
case Type::LValueReference: {
- const LValueReferenceType *ReferenceArg =
- Arg->getAs<LValueReferenceType>();
- if (!ReferenceArg)
+ const auto *RP = P->castAs<LValueReferenceType>(),
+ *RA = A->getAs<LValueReferenceType>();
+ if (!RA)
return Sema::TDK_NonDeducedMismatch;
- return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
- cast<LValueReferenceType>(Param)->getPointeeType(),
- ReferenceArg->getPointeeType(), Info, Deduced, 0);
+ return DeduceTemplateArgumentsByTypeMatch(
+ S, TemplateParams, RP->getPointeeType(), RA->getPointeeType(), Info,
+ Deduced, 0);
}
// T && [C++0x]
case Type::RValueReference: {
- const RValueReferenceType *ReferenceArg =
- Arg->getAs<RValueReferenceType>();
- if (!ReferenceArg)
+ const auto *RP = P->castAs<RValueReferenceType>(),
+ *RA = A->getAs<RValueReferenceType>();
+ if (!RA)
return Sema::TDK_NonDeducedMismatch;
- return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
- cast<RValueReferenceType>(Param)->getPointeeType(),
- ReferenceArg->getPointeeType(),
- Info, Deduced, 0);
+ return DeduceTemplateArgumentsByTypeMatch(
+ S, TemplateParams, RP->getPointeeType(), RA->getPointeeType(), Info,
+ Deduced, 0);
}
// T [] (implied, but not stated explicitly)
case Type::IncompleteArray: {
- const IncompleteArrayType *IncompleteArrayArg =
- S.Context.getAsIncompleteArrayType(Arg);
- if (!IncompleteArrayArg)
+ const auto *IAA = S.Context.getAsIncompleteArrayType(A);
+ if (!IAA)
return Sema::TDK_NonDeducedMismatch;
- unsigned SubTDF = TDF & TDF_IgnoreQualifiers;
- return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
- S.Context.getAsIncompleteArrayType(Param)->getElementType(),
- IncompleteArrayArg->getElementType(),
- Info, Deduced, SubTDF);
+ const auto *IAP = S.Context.getAsIncompleteArrayType(P);
+ assert(IAP && "Template parameter not of incomplete array type");
+
+ return DeduceTemplateArgumentsByTypeMatch(
+ S, TemplateParams, IAP->getElementType(), IAA->getElementType(), Info,
+ Deduced, TDF & TDF_IgnoreQualifiers);
}
// T [integer-constant]
case Type::ConstantArray: {
- const ConstantArrayType *ConstantArrayArg =
- S.Context.getAsConstantArrayType(Arg);
- if (!ConstantArrayArg)
- return Sema::TDK_NonDeducedMismatch;
-
- const ConstantArrayType *ConstantArrayParm =
- S.Context.getAsConstantArrayType(Param);
- if (ConstantArrayArg->getSize() != ConstantArrayParm->getSize())
+ const auto *CAA = S.Context.getAsConstantArrayType(A),
+ *CAP = S.Context.getAsConstantArrayType(P);
+ assert(CAP);
+ if (!CAA || CAA->getSize() != CAP->getSize())
return Sema::TDK_NonDeducedMismatch;
- unsigned SubTDF = TDF & TDF_IgnoreQualifiers;
- return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
- ConstantArrayParm->getElementType(),
- ConstantArrayArg->getElementType(),
- Info, Deduced, SubTDF);
+ return DeduceTemplateArgumentsByTypeMatch(
+ S, TemplateParams, CAP->getElementType(), CAA->getElementType(), Info,
+ Deduced, TDF & TDF_IgnoreQualifiers);
}
// type [i]
case Type::DependentSizedArray: {
- const ArrayType *ArrayArg = S.Context.getAsArrayType(Arg);
- if (!ArrayArg)
+ const auto *AA = S.Context.getAsArrayType(A);
+ if (!AA)
return Sema::TDK_NonDeducedMismatch;
- unsigned SubTDF = TDF & TDF_IgnoreQualifiers;
-
// Check the element type of the arrays
- const DependentSizedArrayType *DependentArrayParm
- = S.Context.getAsDependentSizedArrayType(Param);
- if (Sema::TemplateDeductionResult Result
- = DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
- DependentArrayParm->getElementType(),
- ArrayArg->getElementType(),
- Info, Deduced, SubTDF))
+ const auto *DAP = S.Context.getAsDependentSizedArrayType(P);
+ assert(DAP);
+ if (auto Result = DeduceTemplateArgumentsByTypeMatch(
+ S, TemplateParams, DAP->getElementType(), AA->getElementType(),
+ Info, Deduced, TDF & TDF_IgnoreQualifiers))
return Result;
// Determine the array bound is something we can deduce.
- const NonTypeTemplateParmDecl *NTTP
- = getDeducedParameterFromExpr(Info, DependentArrayParm->getSizeExpr());
+ const NonTypeTemplateParmDecl *NTTP =
+ getDeducedParameterFromExpr(Info, DAP->getSizeExpr());
if (!NTTP)
return Sema::TDK_Success;
@@ -1772,20 +1773,16 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
// template parameter.
assert(NTTP->getDepth() == Info.getDeducedDepth() &&
"saw non-type template parameter with wrong depth");
- if (const ConstantArrayType *ConstantArrayArg
- = dyn_cast<ConstantArrayType>(ArrayArg)) {
- llvm::APSInt Size(ConstantArrayArg->getSize());
- return DeduceNonTypeTemplateArgument(S, TemplateParams, NTTP, Size,
- S.Context.getSizeType(),
- /*ArrayBound=*/true,
- Info, Deduced);
+ if (const auto *CAA = dyn_cast<ConstantArrayType>(AA)) {
+ llvm::APSInt Size(CAA->getSize());
+ return DeduceNonTypeTemplateArgument(
+ S, TemplateParams, NTTP, Size, S.Context.getSizeType(),
+ /*ArrayBound=*/true, Info, Deduced);
}
- if (const DependentSizedArrayType *DependentArrayArg
- = dyn_cast<DependentSizedArrayType>(ArrayArg))
- if (DependentArrayArg->getSizeExpr())
- return DeduceNonTypeTemplateArgument(S, TemplateParams, NTTP,
- DependentArrayArg->getSizeExpr(),
- Info, Deduced);
+ if (const auto *DAA = dyn_cast<DependentSizedArrayType>(AA))
+ if (DAA->getSizeExpr())
+ return DeduceNonTypeTemplateArgument(
+ S, TemplateParams, NTTP, DAA->getSizeExpr(), Info, Deduced);
// Incomplete type does not match a dependently-sized array type
return Sema::TDK_NonDeducedMismatch;
@@ -1795,34 +1792,29 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
// T(*)()
// T(*)(T)
case Type::FunctionProto: {
- unsigned SubTDF = TDF & TDF_TopLevelParameterTypeList;
- const FunctionProtoType *FunctionProtoArg =
- dyn_cast<FunctionProtoType>(Arg);
- if (!FunctionProtoArg)
+ const auto *FPP = P->castAs<FunctionProtoType>(),
+ *FPA = A->getAs<FunctionProtoType>();
+ if (!FPA)
return Sema::TDK_NonDeducedMismatch;
- const FunctionProtoType *FunctionProtoParam =
- cast<FunctionProtoType>(Param);
-
- if (FunctionProtoParam->getMethodQuals()
- != FunctionProtoArg->getMethodQuals() ||
- FunctionProtoParam->getRefQualifier()
- != FunctionProtoArg->getRefQualifier() ||
- FunctionProtoParam->isVariadic() != FunctionProtoArg->isVariadic())
+ if (FPP->getMethodQuals() != FPA->getMethodQuals() ||
+ FPP->getRefQualifier() != FPA->getRefQualifier() ||
+ FPP->isVariadic() != FPA->isVariadic())
return Sema::TDK_NonDeducedMismatch;
// Check return types.
if (auto Result = DeduceTemplateArgumentsByTypeMatch(
- S, TemplateParams, FunctionProtoParam->getReturnType(),
- FunctionProtoArg->getReturnType(), Info, Deduced, 0))
+ S, TemplateParams, FPP->getReturnType(), FPA->getReturnType(),
+ Info, Deduced, 0,
+ /*PartialOrdering=*/false,
+ /*DeducedFromArrayBound=*/false))
return Result;
// Check parameter types.
if (auto Result = DeduceTemplateArguments(
- S, TemplateParams, FunctionProtoParam->param_type_begin(),
- FunctionProtoParam->getNumParams(),
- FunctionProtoArg->param_type_begin(),
- FunctionProtoArg->getNumParams(), Info, Deduced, SubTDF))
+ S, TemplateParams, FPP->param_type_begin(), FPP->getNumParams(),
+ FPA->param_type_begin(), FPA->getNumParams(), Info, Deduced,
+ TDF & TDF_TopLevelParameterTypeList, PartialOrdering))
return Result;
if (TDF & TDF_AllowCompatibleFunctionType)
@@ -1831,28 +1823,28 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
// FIXME: Per core-2016/10/1019 (no corresponding core issue yet), permit
// deducing through the noexcept-specifier if it's part of the canonical
// type. libstdc++ relies on this.
- Expr *NoexceptExpr = FunctionProtoParam->getNoexceptExpr();
+ Expr *NoexceptExpr = FPP->getNoexceptExpr();
if (const NonTypeTemplateParmDecl *NTTP =
- NoexceptExpr ? getDeducedParameterFromExpr(Info, NoexceptExpr)
- : nullptr) {
+ NoexceptExpr ? getDeducedParameterFromExpr(Info, NoexceptExpr)
+ : nullptr) {
assert(NTTP->getDepth() == Info.getDeducedDepth() &&
"saw non-type template parameter with wrong depth");
llvm::APSInt Noexcept(1);
- switch (FunctionProtoArg->canThrow()) {
+ switch (FPA->canThrow()) {
case CT_Cannot:
Noexcept = 1;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case CT_Can:
// We give E in noexcept(E) the "deduced from array bound" treatment.
// FIXME: Should we?
return DeduceNonTypeTemplateArgument(
S, TemplateParams, NTTP, Noexcept, S.Context.BoolTy,
- /*ArrayBound*/true, Info, Deduced);
+ /*DeducedFromArrayBound=*/true, Info, Deduced);
case CT_Dependent:
- if (Expr *ArgNoexceptExpr = FunctionProtoArg->getNoexceptExpr())
+ if (Expr *ArgNoexceptExpr = FPA->getNoexceptExpr())
return DeduceNonTypeTemplateArgument(
S, TemplateParams, NTTP, ArgNoexceptExpr, Info, Deduced);
// Can't deduce anything from throw(T...).
@@ -1870,11 +1862,6 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
case Type::InjectedClassName:
// Treat a template's injected-class-name as if the template
// specialization type had been used.
- Param = cast<InjectedClassNameType>(Param)
- ->getInjectedSpecializationType();
- assert(isa<TemplateSpecializationType>(Param) &&
- "injected class name is not a template specialization type");
- LLVM_FALLTHROUGH;
// template-name<T> (where template-name refers to a class template)
// template-name<i>
@@ -1882,41 +1869,33 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
// TT<i>
// TT<>
case Type::TemplateSpecialization: {
- const TemplateSpecializationType *SpecParam =
- cast<TemplateSpecializationType>(Param);
-
// When Arg cannot be a derived class, we can just try to deduce template
// arguments from the template-id.
- const RecordType *RecordT = Arg->getAs<RecordType>();
- if (!(TDF & TDF_DerivedClass) || !RecordT)
- return DeduceTemplateArguments(S, TemplateParams, SpecParam, Arg, Info,
- Deduced);
+ if (!(TDF & TDF_DerivedClass) || !A->isRecordType())
+ return DeduceTemplateSpecArguments(S, TemplateParams, P, A, Info,
+ Deduced);
SmallVector<DeducedTemplateArgument, 8> DeducedOrig(Deduced.begin(),
Deduced.end());
- Sema::TemplateDeductionResult Result = DeduceTemplateArguments(
- S, TemplateParams, SpecParam, Arg, Info, Deduced);
-
+ auto Result =
+ DeduceTemplateSpecArguments(S, TemplateParams, P, A, Info, Deduced);
if (Result == Sema::TDK_Success)
return Result;
// We cannot inspect base classes as part of deduction when the type
// is incomplete, so either instantiate any templates necessary to
// complete the type, or skip over it if it cannot be completed.
- if (!S.isCompleteType(Info.getLocation(), Arg))
+ if (!S.isCompleteType(Info.getLocation(), A))
return Result;
// Reset the incorrectly deduced argument from above.
Deduced = DeducedOrig;
// Check bases according to C++14 [temp.deduct.call] p4b3:
- Sema::TemplateDeductionResult BaseResult = DeduceTemplateBases(
- S, RecordT, TemplateParams, SpecParam, Info, Deduced);
-
- if (BaseResult != Sema::TDK_Invalid)
- return BaseResult;
- return Result;
+ auto BaseResult = DeduceTemplateBases(S, getCanonicalRD(A),
+ TemplateParams, P, Info, Deduced);
+ return BaseResult != Sema::TDK_Invalid ? BaseResult : Result;
}
// T type::*
@@ -1929,33 +1908,27 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
// T (T::*)()
// T (T::*)(T)
case Type::MemberPointer: {
- const MemberPointerType *MemPtrParam = cast<MemberPointerType>(Param);
- const MemberPointerType *MemPtrArg = dyn_cast<MemberPointerType>(Arg);
- if (!MemPtrArg)
+ const auto *MPP = P->castAs<MemberPointerType>(),
+ *MPA = A->getAs<MemberPointerType>();
+ if (!MPA)
return Sema::TDK_NonDeducedMismatch;
- QualType ParamPointeeType = MemPtrParam->getPointeeType();
- if (ParamPointeeType->isFunctionType())
- S.adjustMemberFunctionCC(ParamPointeeType, /*IsStatic=*/true,
+ QualType PPT = MPP->getPointeeType();
+ if (PPT->isFunctionType())
+ S.adjustMemberFunctionCC(PPT, /*HasThisPointer=*/false,
/*IsCtorOrDtor=*/false, Info.getLocation());
- QualType ArgPointeeType = MemPtrArg->getPointeeType();
- if (ArgPointeeType->isFunctionType())
- S.adjustMemberFunctionCC(ArgPointeeType, /*IsStatic=*/true,
+ QualType APT = MPA->getPointeeType();
+ if (APT->isFunctionType())
+ S.adjustMemberFunctionCC(APT, /*HasThisPointer=*/false,
/*IsCtorOrDtor=*/false, Info.getLocation());
- if (Sema::TemplateDeductionResult Result
- = DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
- ParamPointeeType,
- ArgPointeeType,
- Info, Deduced,
- TDF & TDF_IgnoreQualifiers))
+ unsigned SubTDF = TDF & TDF_IgnoreQualifiers;
+ if (auto Result = DeduceTemplateArgumentsByTypeMatch(
+ S, TemplateParams, PPT, APT, Info, Deduced, SubTDF))
return Result;
-
- return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
- QualType(MemPtrParam->getClass(), 0),
- QualType(MemPtrArg->getClass(), 0),
- Info, Deduced,
- TDF & TDF_IgnoreQualifiers);
+ return DeduceTemplateArgumentsByTypeMatch(
+ S, TemplateParams, QualType(MPP->getClass(), 0),
+ QualType(MPA->getClass(), 0), Info, Deduced, SubTDF);
}
// (clang extension)
@@ -1964,70 +1937,58 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
// T(^)()
// T(^)(T)
case Type::BlockPointer: {
- const BlockPointerType *BlockPtrParam = cast<BlockPointerType>(Param);
- const BlockPointerType *BlockPtrArg = dyn_cast<BlockPointerType>(Arg);
-
- if (!BlockPtrArg)
+ const auto *BPP = P->castAs<BlockPointerType>(),
+ *BPA = A->getAs<BlockPointerType>();
+ if (!BPA)
return Sema::TDK_NonDeducedMismatch;
-
- return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
- BlockPtrParam->getPointeeType(),
- BlockPtrArg->getPointeeType(),
- Info, Deduced, 0);
+ return DeduceTemplateArgumentsByTypeMatch(
+ S, TemplateParams, BPP->getPointeeType(), BPA->getPointeeType(), Info,
+ Deduced, 0);
}
// (clang extension)
//
// T __attribute__(((ext_vector_type(<integral constant>))))
case Type::ExtVector: {
- const ExtVectorType *VectorParam = cast<ExtVectorType>(Param);
- if (const ExtVectorType *VectorArg = dyn_cast<ExtVectorType>(Arg)) {
+ const auto *VP = P->castAs<ExtVectorType>();
+ QualType ElementType;
+ if (const auto *VA = A->getAs<ExtVectorType>()) {
// Make sure that the vectors have the same number of elements.
- if (VectorParam->getNumElements() != VectorArg->getNumElements())
+ if (VP->getNumElements() != VA->getNumElements())
return Sema::TDK_NonDeducedMismatch;
-
- // Perform deduction on the element types.
- return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
- VectorParam->getElementType(),
- VectorArg->getElementType(),
- Info, Deduced, TDF);
- }
-
- if (const DependentSizedExtVectorType *VectorArg
- = dyn_cast<DependentSizedExtVectorType>(Arg)) {
+ ElementType = VA->getElementType();
+ } else if (const auto *VA = A->getAs<DependentSizedExtVectorType>()) {
// We can't check the number of elements, since the argument has a
// dependent number of elements. This can only occur during partial
// ordering.
-
- // Perform deduction on the element types.
- return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
- VectorParam->getElementType(),
- VectorArg->getElementType(),
- Info, Deduced, TDF);
+ ElementType = VA->getElementType();
+ } else {
+ return Sema::TDK_NonDeducedMismatch;
}
-
- return Sema::TDK_NonDeducedMismatch;
+ // Perform deduction on the element types.
+ return DeduceTemplateArgumentsByTypeMatch(
+ S, TemplateParams, VP->getElementType(), ElementType, Info, Deduced,
+ TDF);
}
case Type::DependentVector: {
- const auto *VectorParam = cast<DependentVectorType>(Param);
+ const auto *VP = P->castAs<DependentVectorType>();
- if (const auto *VectorArg = dyn_cast<VectorType>(Arg)) {
+ if (const auto *VA = A->getAs<VectorType>()) {
// Perform deduction on the element types.
- if (Sema::TemplateDeductionResult Result =
- DeduceTemplateArgumentsByTypeMatch(
- S, TemplateParams, VectorParam->getElementType(),
- VectorArg->getElementType(), Info, Deduced, TDF))
+ if (auto Result = DeduceTemplateArgumentsByTypeMatch(
+ S, TemplateParams, VP->getElementType(), VA->getElementType(),
+ Info, Deduced, TDF))
return Result;
// Perform deduction on the vector size, if we can.
const NonTypeTemplateParmDecl *NTTP =
- getDeducedParameterFromExpr(Info, VectorParam->getSizeExpr());
+ getDeducedParameterFromExpr(Info, VP->getSizeExpr());
if (!NTTP)
return Sema::TDK_Success;
llvm::APSInt ArgSize(S.Context.getTypeSize(S.Context.IntTy), false);
- ArgSize = VectorArg->getNumElements();
+ ArgSize = VA->getNumElements();
// Note that we use the "array bound" rules here; just like in that
// case, we don't have any particular type for the vector size, but
// we can provide one if necessary.
@@ -2036,22 +1997,21 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
Info, Deduced);
}
- if (const auto *VectorArg = dyn_cast<DependentVectorType>(Arg)) {
+ if (const auto *VA = A->getAs<DependentVectorType>()) {
// Perform deduction on the element types.
- if (Sema::TemplateDeductionResult Result =
- DeduceTemplateArgumentsByTypeMatch(
- S, TemplateParams, VectorParam->getElementType(),
- VectorArg->getElementType(), Info, Deduced, TDF))
+ if (auto Result = DeduceTemplateArgumentsByTypeMatch(
+ S, TemplateParams, VP->getElementType(), VA->getElementType(),
+ Info, Deduced, TDF))
return Result;
// Perform deduction on the vector size, if we can.
- const NonTypeTemplateParmDecl *NTTP = getDeducedParameterFromExpr(
- Info, VectorParam->getSizeExpr());
+ const NonTypeTemplateParmDecl *NTTP =
+ getDeducedParameterFromExpr(Info, VP->getSizeExpr());
if (!NTTP)
return Sema::TDK_Success;
- return DeduceNonTypeTemplateArgument(
- S, TemplateParams, NTTP, VectorArg->getSizeExpr(), Info, Deduced);
+ return DeduceNonTypeTemplateArgument(S, TemplateParams, NTTP,
+ VA->getSizeExpr(), Info, Deduced);
}
return Sema::TDK_NonDeducedMismatch;
@@ -2061,26 +2021,23 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
//
// T __attribute__(((ext_vector_type(N))))
case Type::DependentSizedExtVector: {
- const DependentSizedExtVectorType *VectorParam
- = cast<DependentSizedExtVectorType>(Param);
+ const auto *VP = P->castAs<DependentSizedExtVectorType>();
- if (const ExtVectorType *VectorArg = dyn_cast<ExtVectorType>(Arg)) {
+ if (const auto *VA = A->getAs<ExtVectorType>()) {
// Perform deduction on the element types.
- if (Sema::TemplateDeductionResult Result
- = DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
- VectorParam->getElementType(),
- VectorArg->getElementType(),
- Info, Deduced, TDF))
+ if (auto Result = DeduceTemplateArgumentsByTypeMatch(
+ S, TemplateParams, VP->getElementType(), VA->getElementType(),
+ Info, Deduced, TDF))
return Result;
// Perform deduction on the vector size, if we can.
const NonTypeTemplateParmDecl *NTTP =
- getDeducedParameterFromExpr(Info, VectorParam->getSizeExpr());
+ getDeducedParameterFromExpr(Info, VP->getSizeExpr());
if (!NTTP)
return Sema::TDK_Success;
llvm::APSInt ArgSize(S.Context.getTypeSize(S.Context.IntTy), false);
- ArgSize = VectorArg->getNumElements();
+ ArgSize = VA->getNumElements();
// Note that we use the "array bound" rules here; just like in that
// case, we don't have any particular type for the vector size, but
// we can provide one if necessary.
@@ -2089,25 +2046,21 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
Deduced);
}
- if (const DependentSizedExtVectorType *VectorArg
- = dyn_cast<DependentSizedExtVectorType>(Arg)) {
+ if (const auto *VA = A->getAs<DependentSizedExtVectorType>()) {
// Perform deduction on the element types.
- if (Sema::TemplateDeductionResult Result
- = DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
- VectorParam->getElementType(),
- VectorArg->getElementType(),
- Info, Deduced, TDF))
+ if (auto Result = DeduceTemplateArgumentsByTypeMatch(
+ S, TemplateParams, VP->getElementType(), VA->getElementType(),
+ Info, Deduced, TDF))
return Result;
// Perform deduction on the vector size, if we can.
const NonTypeTemplateParmDecl *NTTP =
- getDeducedParameterFromExpr(Info, VectorParam->getSizeExpr());
+ getDeducedParameterFromExpr(Info, VP->getSizeExpr());
if (!NTTP)
return Sema::TDK_Success;
return DeduceNonTypeTemplateArgument(S, TemplateParams, NTTP,
- VectorArg->getSizeExpr(),
- Info, Deduced);
+ VA->getSizeExpr(), Info, Deduced);
}
return Sema::TDK_NonDeducedMismatch;
@@ -2118,62 +2071,59 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
// T __attribute__((matrix_type(<integral constant>,
// <integral constant>)))
case Type::ConstantMatrix: {
- const ConstantMatrixType *MatrixArg = dyn_cast<ConstantMatrixType>(Arg);
- if (!MatrixArg)
+ const auto *MP = P->castAs<ConstantMatrixType>(),
+ *MA = A->getAs<ConstantMatrixType>();
+ if (!MA)
return Sema::TDK_NonDeducedMismatch;
- const ConstantMatrixType *MatrixParam = cast<ConstantMatrixType>(Param);
// Check that the dimensions are the same
- if (MatrixParam->getNumRows() != MatrixArg->getNumRows() ||
- MatrixParam->getNumColumns() != MatrixArg->getNumColumns()) {
+ if (MP->getNumRows() != MA->getNumRows() ||
+ MP->getNumColumns() != MA->getNumColumns()) {
return Sema::TDK_NonDeducedMismatch;
}
// Perform deduction on element types.
return DeduceTemplateArgumentsByTypeMatch(
- S, TemplateParams, MatrixParam->getElementType(),
- MatrixArg->getElementType(), Info, Deduced, TDF);
+ S, TemplateParams, MP->getElementType(), MA->getElementType(), Info,
+ Deduced, TDF);
}
case Type::DependentSizedMatrix: {
- const MatrixType *MatrixArg = dyn_cast<MatrixType>(Arg);
- if (!MatrixArg)
+ const auto *MP = P->castAs<DependentSizedMatrixType>();
+ const auto *MA = A->getAs<MatrixType>();
+ if (!MA)
return Sema::TDK_NonDeducedMismatch;
// Check the element type of the matrixes.
- const DependentSizedMatrixType *MatrixParam =
- cast<DependentSizedMatrixType>(Param);
- if (Sema::TemplateDeductionResult Result =
- DeduceTemplateArgumentsByTypeMatch(
- S, TemplateParams, MatrixParam->getElementType(),
- MatrixArg->getElementType(), Info, Deduced, TDF))
+ if (auto Result = DeduceTemplateArgumentsByTypeMatch(
+ S, TemplateParams, MP->getElementType(), MA->getElementType(),
+ Info, Deduced, TDF))
return Result;
// Try to deduce a matrix dimension.
auto DeduceMatrixArg =
[&S, &Info, &Deduced, &TemplateParams](
- Expr *ParamExpr, const MatrixType *Arg,
+ Expr *ParamExpr, const MatrixType *A,
unsigned (ConstantMatrixType::*GetArgDimension)() const,
Expr *(DependentSizedMatrixType::*GetArgDimensionExpr)() const) {
- const auto *ArgConstMatrix = dyn_cast<ConstantMatrixType>(Arg);
- const auto *ArgDepMatrix = dyn_cast<DependentSizedMatrixType>(Arg);
+ const auto *ACM = dyn_cast<ConstantMatrixType>(A);
+ const auto *ADM = dyn_cast<DependentSizedMatrixType>(A);
if (!ParamExpr->isValueDependent()) {
- Optional<llvm::APSInt> ParamConst =
+ std::optional<llvm::APSInt> ParamConst =
ParamExpr->getIntegerConstantExpr(S.Context);
if (!ParamConst)
return Sema::TDK_NonDeducedMismatch;
- if (ArgConstMatrix) {
- if ((ArgConstMatrix->*GetArgDimension)() == *ParamConst)
+ if (ACM) {
+ if ((ACM->*GetArgDimension)() == *ParamConst)
return Sema::TDK_Success;
return Sema::TDK_NonDeducedMismatch;
}
- Expr *ArgExpr = (ArgDepMatrix->*GetArgDimensionExpr)();
- if (!ArgExpr->isValueDependent())
- if (Optional<llvm::APSInt> ArgConst =
- ArgExpr->getIntegerConstantExpr(S.Context))
- if (*ArgConst == *ParamConst)
- return Sema::TDK_Success;
+ Expr *ArgExpr = (ADM->*GetArgDimensionExpr)();
+ if (std::optional<llvm::APSInt> ArgConst =
+ ArgExpr->getIntegerConstantExpr(S.Context))
+ if (*ArgConst == *ParamConst)
+ return Sema::TDK_Success;
return Sema::TDK_NonDeducedMismatch;
}
@@ -2182,27 +2132,26 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
if (!NTTP)
return Sema::TDK_Success;
- if (ArgConstMatrix) {
+ if (ACM) {
llvm::APSInt ArgConst(
S.Context.getTypeSize(S.Context.getSizeType()));
- ArgConst = (ArgConstMatrix->*GetArgDimension)();
+ ArgConst = (ACM->*GetArgDimension)();
return DeduceNonTypeTemplateArgument(
S, TemplateParams, NTTP, ArgConst, S.Context.getSizeType(),
/*ArrayBound=*/true, Info, Deduced);
}
- return DeduceNonTypeTemplateArgument(
- S, TemplateParams, NTTP, (ArgDepMatrix->*GetArgDimensionExpr)(),
- Info, Deduced);
+ return DeduceNonTypeTemplateArgument(S, TemplateParams, NTTP,
+ (ADM->*GetArgDimensionExpr)(),
+ Info, Deduced);
};
- auto Result = DeduceMatrixArg(MatrixParam->getRowExpr(), MatrixArg,
- &ConstantMatrixType::getNumRows,
- &DependentSizedMatrixType::getRowExpr);
- if (Result)
+ if (auto Result = DeduceMatrixArg(MP->getRowExpr(), MA,
+ &ConstantMatrixType::getNumRows,
+ &DependentSizedMatrixType::getRowExpr))
return Result;
- return DeduceMatrixArg(MatrixParam->getColumnExpr(), MatrixArg,
+ return DeduceMatrixArg(MP->getColumnExpr(), MA,
&ConstantMatrixType::getNumColumns,
&DependentSizedMatrixType::getColumnExpr);
}
@@ -2211,44 +2160,39 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
//
// T __attribute__(((address_space(N))))
case Type::DependentAddressSpace: {
- const DependentAddressSpaceType *AddressSpaceParam =
- cast<DependentAddressSpaceType>(Param);
+ const auto *ASP = P->castAs<DependentAddressSpaceType>();
- if (const DependentAddressSpaceType *AddressSpaceArg =
- dyn_cast<DependentAddressSpaceType>(Arg)) {
+ if (const auto *ASA = A->getAs<DependentAddressSpaceType>()) {
// Perform deduction on the pointer type.
- if (Sema::TemplateDeductionResult Result =
- DeduceTemplateArgumentsByTypeMatch(
- S, TemplateParams, AddressSpaceParam->getPointeeType(),
- AddressSpaceArg->getPointeeType(), Info, Deduced, TDF))
+ if (auto Result = DeduceTemplateArgumentsByTypeMatch(
+ S, TemplateParams, ASP->getPointeeType(), ASA->getPointeeType(),
+ Info, Deduced, TDF))
return Result;
// Perform deduction on the address space, if we can.
- const NonTypeTemplateParmDecl *NTTP = getDeducedParameterFromExpr(
- Info, AddressSpaceParam->getAddrSpaceExpr());
+ const NonTypeTemplateParmDecl *NTTP =
+ getDeducedParameterFromExpr(Info, ASP->getAddrSpaceExpr());
if (!NTTP)
return Sema::TDK_Success;
return DeduceNonTypeTemplateArgument(
- S, TemplateParams, NTTP, AddressSpaceArg->getAddrSpaceExpr(), Info,
- Deduced);
+ S, TemplateParams, NTTP, ASA->getAddrSpaceExpr(), Info, Deduced);
}
- if (isTargetAddressSpace(Arg.getAddressSpace())) {
+ if (isTargetAddressSpace(A.getAddressSpace())) {
llvm::APSInt ArgAddressSpace(S.Context.getTypeSize(S.Context.IntTy),
false);
- ArgAddressSpace = toTargetAddressSpace(Arg.getAddressSpace());
+ ArgAddressSpace = toTargetAddressSpace(A.getAddressSpace());
// Perform deduction on the pointer types.
- if (Sema::TemplateDeductionResult Result =
- DeduceTemplateArgumentsByTypeMatch(
- S, TemplateParams, AddressSpaceParam->getPointeeType(),
- S.Context.removeAddrSpaceQualType(Arg), Info, Deduced, TDF))
+ if (auto Result = DeduceTemplateArgumentsByTypeMatch(
+ S, TemplateParams, ASP->getPointeeType(),
+ S.Context.removeAddrSpaceQualType(A), Info, Deduced, TDF))
return Result;
// Perform deduction on the address space, if we can.
- const NonTypeTemplateParmDecl *NTTP = getDeducedParameterFromExpr(
- Info, AddressSpaceParam->getAddrSpaceExpr());
+ const NonTypeTemplateParmDecl *NTTP =
+ getDeducedParameterFromExpr(Info, ASP->getAddrSpaceExpr());
if (!NTTP)
return Sema::TDK_Success;
@@ -2259,31 +2203,32 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
return Sema::TDK_NonDeducedMismatch;
}
- case Type::DependentExtInt: {
- const auto *IntParam = cast<DependentExtIntType>(Param);
+ case Type::DependentBitInt: {
+ const auto *IP = P->castAs<DependentBitIntType>();
- if (const auto *IntArg = dyn_cast<ExtIntType>(Arg)){
- if (IntParam->isUnsigned() != IntArg->isUnsigned())
+ if (const auto *IA = A->getAs<BitIntType>()) {
+ if (IP->isUnsigned() != IA->isUnsigned())
return Sema::TDK_NonDeducedMismatch;
const NonTypeTemplateParmDecl *NTTP =
- getDeducedParameterFromExpr(Info, IntParam->getNumBitsExpr());
+ getDeducedParameterFromExpr(Info, IP->getNumBitsExpr());
if (!NTTP)
return Sema::TDK_Success;
llvm::APSInt ArgSize(S.Context.getTypeSize(S.Context.IntTy), false);
- ArgSize = IntArg->getNumBits();
+ ArgSize = IA->getNumBits();
return DeduceNonTypeTemplateArgument(S, TemplateParams, NTTP, ArgSize,
S.Context.IntTy, true, Info,
Deduced);
}
- if (const auto *IntArg = dyn_cast<DependentExtIntType>(Arg)) {
- if (IntParam->isUnsigned() != IntArg->isUnsigned())
+ if (const auto *IA = A->getAs<DependentBitIntType>()) {
+ if (IP->isUnsigned() != IA->isUnsigned())
return Sema::TDK_NonDeducedMismatch;
return Sema::TDK_Success;
}
+
return Sema::TDK_NonDeducedMismatch;
}
@@ -2293,125 +2238,121 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
case Type::UnresolvedUsing:
case Type::Decltype:
case Type::UnaryTransform:
- case Type::Auto:
case Type::DeducedTemplateSpecialization:
case Type::DependentTemplateSpecialization:
case Type::PackExpansion:
case Type::Pipe:
// No template argument deduction for these types
return Sema::TDK_Success;
- }
+ }
llvm_unreachable("Invalid Type Class!");
}
static Sema::TemplateDeductionResult
-DeduceTemplateArguments(Sema &S,
- TemplateParameterList *TemplateParams,
- const TemplateArgument &Param,
- TemplateArgument Arg,
+DeduceTemplateArguments(Sema &S, TemplateParameterList *TemplateParams,
+ const TemplateArgument &P, TemplateArgument A,
TemplateDeductionInfo &Info,
SmallVectorImpl<DeducedTemplateArgument> &Deduced) {
// If the template argument is a pack expansion, perform template argument
// deduction against the pattern of that expansion. This only occurs during
// partial ordering.
- if (Arg.isPackExpansion())
- Arg = Arg.getPackExpansionPattern();
+ if (A.isPackExpansion())
+ A = A.getPackExpansionPattern();
- switch (Param.getKind()) {
+ switch (P.getKind()) {
case TemplateArgument::Null:
llvm_unreachable("Null template argument in parameter list");
case TemplateArgument::Type:
- if (Arg.getKind() == TemplateArgument::Type)
- return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
- Param.getAsType(),
- Arg.getAsType(),
- Info, Deduced, 0);
- Info.FirstArg = Param;
- Info.SecondArg = Arg;
+ if (A.getKind() == TemplateArgument::Type)
+ return DeduceTemplateArgumentsByTypeMatch(
+ S, TemplateParams, P.getAsType(), A.getAsType(), Info, Deduced, 0);
+ Info.FirstArg = P;
+ Info.SecondArg = A;
return Sema::TDK_NonDeducedMismatch;
case TemplateArgument::Template:
- if (Arg.getKind() == TemplateArgument::Template)
- return DeduceTemplateArguments(S, TemplateParams,
- Param.getAsTemplate(),
- Arg.getAsTemplate(), Info, Deduced);
- Info.FirstArg = Param;
- Info.SecondArg = Arg;
+ if (A.getKind() == TemplateArgument::Template)
+ return DeduceTemplateArguments(S, TemplateParams, P.getAsTemplate(),
+ A.getAsTemplate(), Info, Deduced);
+ Info.FirstArg = P;
+ Info.SecondArg = A;
return Sema::TDK_NonDeducedMismatch;
case TemplateArgument::TemplateExpansion:
llvm_unreachable("caller should handle pack expansions");
case TemplateArgument::Declaration:
- if (Arg.getKind() == TemplateArgument::Declaration &&
- isSameDeclaration(Param.getAsDecl(), Arg.getAsDecl()))
+ if (A.getKind() == TemplateArgument::Declaration &&
+ isSameDeclaration(P.getAsDecl(), A.getAsDecl()))
return Sema::TDK_Success;
- Info.FirstArg = Param;
- Info.SecondArg = Arg;
+ Info.FirstArg = P;
+ Info.SecondArg = A;
return Sema::TDK_NonDeducedMismatch;
case TemplateArgument::NullPtr:
- if (Arg.getKind() == TemplateArgument::NullPtr &&
- S.Context.hasSameType(Param.getNullPtrType(), Arg.getNullPtrType()))
+ if (A.getKind() == TemplateArgument::NullPtr &&
+ S.Context.hasSameType(P.getNullPtrType(), A.getNullPtrType()))
return Sema::TDK_Success;
- Info.FirstArg = Param;
- Info.SecondArg = Arg;
+ Info.FirstArg = P;
+ Info.SecondArg = A;
return Sema::TDK_NonDeducedMismatch;
case TemplateArgument::Integral:
- if (Arg.getKind() == TemplateArgument::Integral) {
- if (hasSameExtendedValue(Param.getAsIntegral(), Arg.getAsIntegral()))
+ if (A.getKind() == TemplateArgument::Integral) {
+ if (hasSameExtendedValue(P.getAsIntegral(), A.getAsIntegral()))
return Sema::TDK_Success;
-
- Info.FirstArg = Param;
- Info.SecondArg = Arg;
- return Sema::TDK_NonDeducedMismatch;
}
+ Info.FirstArg = P;
+ Info.SecondArg = A;
+ return Sema::TDK_NonDeducedMismatch;
- if (Arg.getKind() == TemplateArgument::Expression) {
- Info.FirstArg = Param;
- Info.SecondArg = Arg;
- return Sema::TDK_NonDeducedMismatch;
- }
+ case TemplateArgument::StructuralValue:
+ if (A.getKind() == TemplateArgument::StructuralValue &&
+ A.structurallyEquals(P))
+ return Sema::TDK_Success;
- Info.FirstArg = Param;
- Info.SecondArg = Arg;
+ Info.FirstArg = P;
+ Info.SecondArg = A;
return Sema::TDK_NonDeducedMismatch;
case TemplateArgument::Expression:
if (const NonTypeTemplateParmDecl *NTTP =
- getDeducedParameterFromExpr(Info, Param.getAsExpr())) {
- if (Arg.getKind() == TemplateArgument::Integral)
- return DeduceNonTypeTemplateArgument(S, TemplateParams, NTTP,
- Arg.getAsIntegral(),
- Arg.getIntegralType(),
- /*ArrayBound=*/false,
- Info, Deduced);
- if (Arg.getKind() == TemplateArgument::NullPtr)
+ getDeducedParameterFromExpr(Info, P.getAsExpr())) {
+ switch (A.getKind()) {
+ case TemplateArgument::Integral:
+ case TemplateArgument::Expression:
+ case TemplateArgument::StructuralValue:
+ return DeduceNonTypeTemplateArgument(
+ S, TemplateParams, NTTP, DeducedTemplateArgument(A),
+ A.getNonTypeTemplateArgumentType(), Info, Deduced);
+
+ case TemplateArgument::NullPtr:
return DeduceNullPtrTemplateArgument(S, TemplateParams, NTTP,
- Arg.getNullPtrType(),
- Info, Deduced);
- if (Arg.getKind() == TemplateArgument::Expression)
- return DeduceNonTypeTemplateArgument(S, TemplateParams, NTTP,
- Arg.getAsExpr(), Info, Deduced);
- if (Arg.getKind() == TemplateArgument::Declaration)
- return DeduceNonTypeTemplateArgument(S, TemplateParams, NTTP,
- Arg.getAsDecl(),
- Arg.getParamTypeForDecl(),
- Info, Deduced);
+ A.getNullPtrType(), Info, Deduced);
- Info.FirstArg = Param;
- Info.SecondArg = Arg;
- return Sema::TDK_NonDeducedMismatch;
+ case TemplateArgument::Declaration:
+ return DeduceNonTypeTemplateArgument(
+ S, TemplateParams, NTTP, A.getAsDecl(), A.getParamTypeForDecl(),
+ Info, Deduced);
+
+ case TemplateArgument::Null:
+ case TemplateArgument::Type:
+ case TemplateArgument::Template:
+ case TemplateArgument::TemplateExpansion:
+ case TemplateArgument::Pack:
+ Info.FirstArg = P;
+ Info.SecondArg = A;
+ return Sema::TDK_NonDeducedMismatch;
+ }
+ llvm_unreachable("Unknown template argument kind");
}
// Can't deduce anything, but that's okay.
return Sema::TDK_Success;
-
case TemplateArgument::Pack:
llvm_unreachable("Argument packs should be expanded by the caller!");
}
@@ -2464,8 +2405,8 @@ static bool hasPackExpansionBeforeEnd(ArrayRef<TemplateArgument> Args) {
static Sema::TemplateDeductionResult
DeduceTemplateArguments(Sema &S, TemplateParameterList *TemplateParams,
- ArrayRef<TemplateArgument> Params,
- ArrayRef<TemplateArgument> Args,
+ ArrayRef<TemplateArgument> Ps,
+ ArrayRef<TemplateArgument> As,
TemplateDeductionInfo &Info,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
bool NumberOfArgumentsMustMatch) {
@@ -2473,7 +2414,7 @@ DeduceTemplateArguments(Sema &S, TemplateParameterList *TemplateParams,
// If the template argument list of P contains a pack expansion that is not
// the last template argument, the entire template argument list is a
// non-deduced context.
- if (hasPackExpansionBeforeEnd(Params))
+ if (hasPackExpansionBeforeEnd(Ps))
return Sema::TDK_Success;
// C++0x [temp.deduct.type]p9:
@@ -2481,12 +2422,13 @@ DeduceTemplateArguments(Sema &S, TemplateParameterList *TemplateParams,
// respective template argument list P is compared with the corresponding
// argument Ai of the corresponding template argument list of A.
unsigned ArgIdx = 0, ParamIdx = 0;
- for (; hasTemplateArgumentForDeduction(Params, ParamIdx); ++ParamIdx) {
- if (!Params[ParamIdx].isPackExpansion()) {
+ for (; hasTemplateArgumentForDeduction(Ps, ParamIdx); ++ParamIdx) {
+ const TemplateArgument &P = Ps[ParamIdx];
+ if (!P.isPackExpansion()) {
// The simple case: deduce template arguments by matching Pi and Ai.
// Check whether we have enough arguments.
- if (!hasTemplateArgumentForDeduction(Args, ArgIdx))
+ if (!hasTemplateArgumentForDeduction(As, ArgIdx))
return NumberOfArgumentsMustMatch
? Sema::TDK_MiscellaneousDeductionFailure
: Sema::TDK_Success;
@@ -2494,14 +2436,12 @@ DeduceTemplateArguments(Sema &S, TemplateParameterList *TemplateParams,
// C++1z [temp.deduct.type]p9:
// During partial ordering, if Ai was originally a pack expansion [and]
// Pi is not a pack expansion, template argument deduction fails.
- if (Args[ArgIdx].isPackExpansion())
+ if (As[ArgIdx].isPackExpansion())
return Sema::TDK_MiscellaneousDeductionFailure;
// Perform deduction for this Pi/Ai pair.
- if (Sema::TemplateDeductionResult Result
- = DeduceTemplateArguments(S, TemplateParams,
- Params[ParamIdx], Args[ArgIdx],
- Info, Deduced))
+ if (auto Result = DeduceTemplateArguments(S, TemplateParams, P,
+ As[ArgIdx], Info, Deduced))
return Result;
// Move to the next argument.
@@ -2516,7 +2456,7 @@ DeduceTemplateArguments(Sema &S, TemplateParameterList *TemplateParams,
// each remaining argument in the template argument list of A. Each
// comparison deduces template arguments for subsequent positions in the
// template parameter packs expanded by Pi.
- TemplateArgument Pattern = Params[ParamIdx].getPackExpansionPattern();
+ TemplateArgument Pattern = P.getPackExpansionPattern();
// Prepare to deduce the packs within the pattern.
PackDeductionScope PackScope(S, TemplateParams, Deduced, Info, Pattern);
@@ -2524,13 +2464,12 @@ DeduceTemplateArguments(Sema &S, TemplateParameterList *TemplateParams,
// Keep track of the deduced template arguments for each parameter pack
// expanded by this pack expansion (the outer index) and for each
// template argument (the inner SmallVectors).
- for (; hasTemplateArgumentForDeduction(Args, ArgIdx) &&
+ for (; hasTemplateArgumentForDeduction(As, ArgIdx) &&
PackScope.hasNextElement();
++ArgIdx) {
// Deduce template arguments from the pattern.
- if (Sema::TemplateDeductionResult Result
- = DeduceTemplateArguments(S, TemplateParams, Pattern, Args[ArgIdx],
- Info, Deduced))
+ if (auto Result = DeduceTemplateArguments(S, TemplateParams, Pattern,
+ As[ArgIdx], Info, Deduced))
return Result;
PackScope.nextPackElement();
@@ -2546,21 +2485,21 @@ DeduceTemplateArguments(Sema &S, TemplateParameterList *TemplateParams,
}
static Sema::TemplateDeductionResult
-DeduceTemplateArguments(Sema &S,
- TemplateParameterList *TemplateParams,
+DeduceTemplateArguments(Sema &S, TemplateParameterList *TemplateParams,
const TemplateArgumentList &ParamList,
const TemplateArgumentList &ArgList,
TemplateDeductionInfo &Info,
SmallVectorImpl<DeducedTemplateArgument> &Deduced) {
return DeduceTemplateArguments(S, TemplateParams, ParamList.asArray(),
ArgList.asArray(), Info, Deduced,
- /*NumberOfArgumentsMustMatch*/false);
+ /*NumberOfArgumentsMustMatch=*/false);
}
/// Determine whether two template arguments are the same.
static bool isSameTemplateArg(ASTContext &Context,
TemplateArgument X,
const TemplateArgument &Y,
+ bool PartialOrdering,
bool PackExpansionMatchesPack = false) {
// If we're checking deduced arguments (X) against original arguments (Y),
// we will have flattened packs to non-expansions in X.
@@ -2594,6 +2533,9 @@ static bool isSameTemplateArg(ASTContext &Context,
case TemplateArgument::Integral:
return hasSameExtendedValue(X.getAsIntegral(), Y.getAsIntegral());
+ case TemplateArgument::StructuralValue:
+ return X.structurallyEquals(Y);
+
case TemplateArgument::Expression: {
llvm::FoldingSetNodeID XID, YID;
X.getAsExpr()->Profile(XID, Context, true);
@@ -2601,18 +2543,33 @@ static bool isSameTemplateArg(ASTContext &Context,
return XID == YID;
}
- case TemplateArgument::Pack:
- if (X.pack_size() != Y.pack_size())
- return false;
+ case TemplateArgument::Pack: {
+ unsigned PackIterationSize = X.pack_size();
+ if (X.pack_size() != Y.pack_size()) {
+ if (!PartialOrdering)
+ return false;
- for (TemplateArgument::pack_iterator XP = X.pack_begin(),
- XPEnd = X.pack_end(),
- YP = Y.pack_begin();
- XP != XPEnd; ++XP, ++YP)
- if (!isSameTemplateArg(Context, *XP, *YP, PackExpansionMatchesPack))
+ // C++0x [temp.deduct.type]p9:
+ // During partial ordering, if Ai was originally a pack expansion:
+ // - if P does not contain a template argument corresponding to Ai
+ // then Ai is ignored;
+ bool XHasMoreArg = X.pack_size() > Y.pack_size();
+ if (!(XHasMoreArg && X.pack_elements().back().isPackExpansion()) &&
+ !(!XHasMoreArg && Y.pack_elements().back().isPackExpansion()))
return false;
+ if (XHasMoreArg)
+ PackIterationSize = Y.pack_size();
+ }
+
+ ArrayRef<TemplateArgument> XP = X.pack_elements();
+ ArrayRef<TemplateArgument> YP = Y.pack_elements();
+ for (unsigned i = 0; i < PackIterationSize; ++i)
+ if (!isSameTemplateArg(Context, XP[i], YP[i], PartialOrdering,
+ PackExpansionMatchesPack))
+ return false;
return true;
+ }
}
llvm_unreachable("Invalid TemplateArgument Kind!");
@@ -2659,9 +2616,9 @@ Sema::getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
E);
}
- case TemplateArgument::Integral: {
- Expr *E =
- BuildExpressionFromIntegralTemplateArgument(Arg, Loc).getAs<Expr>();
+ case TemplateArgument::Integral:
+ case TemplateArgument::StructuralValue: {
+ Expr *E = BuildExpressionFromNonTypeTemplateArgument(Arg, Loc).get();
return TemplateArgumentLoc(TemplateArgument(E), E);
}
@@ -2702,13 +2659,11 @@ Sema::getIdentityTemplateArgumentLoc(NamedDecl *TemplateParm,
/// Convert the given deduced template argument and add it to the set of
/// fully-converted template arguments.
-static bool
-ConvertDeducedTemplateArgument(Sema &S, NamedDecl *Param,
- DeducedTemplateArgument Arg,
- NamedDecl *Template,
- TemplateDeductionInfo &Info,
- bool IsDeduced,
- SmallVectorImpl<TemplateArgument> &Output) {
+static bool ConvertDeducedTemplateArgument(
+ Sema &S, NamedDecl *Param, DeducedTemplateArgument Arg, NamedDecl *Template,
+ TemplateDeductionInfo &Info, bool IsDeduced,
+ SmallVectorImpl<TemplateArgument> &SugaredOutput,
+ SmallVectorImpl<TemplateArgument> &CanonicalOutput) {
auto ConvertArg = [&](DeducedTemplateArgument Arg,
unsigned ArgumentPackIndex) {
// Convert the deduced template argument into a template
@@ -2720,7 +2675,8 @@ ConvertDeducedTemplateArgument(Sema &S, NamedDecl *Param,
// Check the template argument, converting it as necessary.
return S.CheckTemplateArgument(
Param, ArgLoc, Template, Template->getLocation(),
- Template->getSourceRange().getEnd(), ArgumentPackIndex, Output,
+ Template->getSourceRange().getEnd(), ArgumentPackIndex, SugaredOutput,
+ CanonicalOutput,
IsDeduced
? (Arg.wasDeducedFromArrayBound() ? Sema::CTAK_DeducedFromArrayBound
: Sema::CTAK_Deduced)
@@ -2730,7 +2686,8 @@ ConvertDeducedTemplateArgument(Sema &S, NamedDecl *Param,
if (Arg.getKind() == TemplateArgument::Pack) {
// This is a template argument pack, so check each of its arguments against
// the template parameter.
- SmallVector<TemplateArgument, 2> PackedArgsBuilder;
+ SmallVector<TemplateArgument, 2> SugaredPackedArgsBuilder,
+ CanonicalPackedArgsBuilder;
for (const auto &P : Arg.pack_elements()) {
// When converting the deduced template argument, append it to the
// general output list. We need to do this so that the template argument
@@ -2749,23 +2706,24 @@ ConvertDeducedTemplateArgument(Sema &S, NamedDecl *Param,
<< Arg << Param;
return true;
}
- if (ConvertArg(InnerArg, PackedArgsBuilder.size()))
+ if (ConvertArg(InnerArg, SugaredPackedArgsBuilder.size()))
return true;
// Move the converted template argument into our argument pack.
- PackedArgsBuilder.push_back(Output.pop_back_val());
+ SugaredPackedArgsBuilder.push_back(SugaredOutput.pop_back_val());
+ CanonicalPackedArgsBuilder.push_back(CanonicalOutput.pop_back_val());
}
// If the pack is empty, we still need to substitute into the parameter
// itself, in case that substitution fails.
- if (PackedArgsBuilder.empty()) {
+ if (SugaredPackedArgsBuilder.empty()) {
LocalInstantiationScope Scope(S);
- TemplateArgumentList TemplateArgs(TemplateArgumentList::OnStack, Output);
- MultiLevelTemplateArgumentList Args(TemplateArgs);
+ MultiLevelTemplateArgumentList Args(Template, SugaredOutput,
+ /*Final=*/true);
if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param)) {
Sema::InstantiatingTemplate Inst(S, Template->getLocation(), Template,
- NTTP, Output,
+ NTTP, SugaredOutput,
Template->getSourceRange());
if (Inst.isInvalid() ||
S.SubstType(NTTP->getType(), Args, NTTP->getLocation(),
@@ -2773,7 +2731,7 @@ ConvertDeducedTemplateArgument(Sema &S, NamedDecl *Param,
return true;
} else if (auto *TTP = dyn_cast<TemplateTemplateParmDecl>(Param)) {
Sema::InstantiatingTemplate Inst(S, Template->getLocation(), Template,
- TTP, Output,
+ TTP, SugaredOutput,
Template->getSourceRange());
if (Inst.isInvalid() || !S.SubstDecl(TTP, S.CurContext, Args))
return true;
@@ -2782,8 +2740,10 @@ ConvertDeducedTemplateArgument(Sema &S, NamedDecl *Param,
}
// Create the resulting argument pack.
- Output.push_back(
- TemplateArgument::CreatePackCopy(S.Context, PackedArgsBuilder));
+ SugaredOutput.push_back(
+ TemplateArgument::CreatePackCopy(S.Context, SugaredPackedArgsBuilder));
+ CanonicalOutput.push_back(TemplateArgument::CreatePackCopy(
+ S.Context, CanonicalPackedArgsBuilder));
return false;
}
@@ -2793,11 +2753,13 @@ ConvertDeducedTemplateArgument(Sema &S, NamedDecl *Param,
// FIXME: This should not be a template, but
// ClassTemplatePartialSpecializationDecl sadly does not derive from
// TemplateDecl.
-template<typename TemplateDeclT>
+template <typename TemplateDeclT>
static Sema::TemplateDeductionResult ConvertDeducedTemplateArguments(
Sema &S, TemplateDeclT *Template, bool IsDeduced,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
- TemplateDeductionInfo &Info, SmallVectorImpl<TemplateArgument> &Builder,
+ TemplateDeductionInfo &Info,
+ SmallVectorImpl<TemplateArgument> &SugaredBuilder,
+ SmallVectorImpl<TemplateArgument> &CanonicalBuilder,
LocalInstantiationScope *CurrentInstantiationScope = nullptr,
unsigned NumAlreadyConverted = 0, bool PartialOverloading = false) {
TemplateParameterList *TemplateParams = Template->getTemplateParameters();
@@ -2831,7 +2793,9 @@ static Sema::TemplateDeductionResult ConvertDeducedTemplateArguments(
// We have already fully type-checked and converted this
// argument, because it was explicitly-specified. Just record the
// presence of this argument.
- Builder.push_back(Deduced[I]);
+ SugaredBuilder.push_back(Deduced[I]);
+ CanonicalBuilder.push_back(
+ S.Context.getCanonicalTemplateArgument(Deduced[I]));
continue;
}
}
@@ -2839,10 +2803,13 @@ static Sema::TemplateDeductionResult ConvertDeducedTemplateArguments(
// We may have deduced this argument, so it still needs to be
// checked and converted.
if (ConvertDeducedTemplateArgument(S, Param, Deduced[I], Template, Info,
- IsDeduced, Builder)) {
+ IsDeduced, SugaredBuilder,
+ CanonicalBuilder)) {
Info.Param = makeTemplateParameter(Param);
// FIXME: These template arguments are temporary. Free them!
- Info.reset(TemplateArgumentList::CreateCopy(S.Context, Builder));
+ Info.reset(
+ TemplateArgumentList::CreateCopy(S.Context, SugaredBuilder),
+ TemplateArgumentList::CreateCopy(S.Context, CanonicalBuilder));
return Sema::TDK_SubstitutionFailure;
}
@@ -2858,15 +2825,31 @@ static Sema::TemplateDeductionResult ConvertDeducedTemplateArguments(
return Sema::TDK_Incomplete;
}
- TemplateArgumentLoc DefArg = S.SubstDefaultTemplateArgumentIfAvailable(
- TD, TD->getLocation(), TD->getSourceRange().getEnd(), Param, Builder,
- HasDefaultArg);
+ TemplateArgumentLoc DefArg;
+ {
+ Qualifiers ThisTypeQuals;
+ CXXRecordDecl *ThisContext = nullptr;
+ if (auto *Rec = dyn_cast<CXXRecordDecl>(TD->getDeclContext()))
+ if (Rec->isLambda())
+ if (auto *Method = dyn_cast<CXXMethodDecl>(Rec->getDeclContext())) {
+ ThisContext = Method->getParent();
+ ThisTypeQuals = Method->getMethodQualifiers();
+ }
+
+ Sema::CXXThisScopeRAII ThisScope(S, ThisContext, ThisTypeQuals,
+ S.getLangOpts().CPlusPlus17);
+
+ DefArg = S.SubstDefaultTemplateArgumentIfAvailable(
+ TD, TD->getLocation(), TD->getSourceRange().getEnd(), Param,
+ SugaredBuilder, CanonicalBuilder, HasDefaultArg);
+ }
// If there was no default argument, deduction is incomplete.
if (DefArg.getArgument().isNull()) {
Info.Param = makeTemplateParameter(
const_cast<NamedDecl *>(TemplateParams->getParam(I)));
- Info.reset(TemplateArgumentList::CreateCopy(S.Context, Builder));
+ Info.reset(TemplateArgumentList::CreateCopy(S.Context, SugaredBuilder),
+ TemplateArgumentList::CreateCopy(S.Context, CanonicalBuilder));
if (PartialOverloading) break;
return HasDefaultArg ? Sema::TDK_SubstitutionFailure
@@ -2874,13 +2857,14 @@ static Sema::TemplateDeductionResult ConvertDeducedTemplateArguments(
}
// Check whether we can actually use the default argument.
- if (S.CheckTemplateArgument(Param, DefArg, TD, TD->getLocation(),
- TD->getSourceRange().getEnd(), 0, Builder,
- Sema::CTAK_Specified)) {
+ if (S.CheckTemplateArgument(
+ Param, DefArg, TD, TD->getLocation(), TD->getSourceRange().getEnd(),
+ 0, SugaredBuilder, CanonicalBuilder, Sema::CTAK_Specified)) {
Info.Param = makeTemplateParameter(
const_cast<NamedDecl *>(TemplateParams->getParam(I)));
// FIXME: These template arguments are temporary. Free them!
- Info.reset(TemplateArgumentList::CreateCopy(S.Context, Builder));
+ Info.reset(TemplateArgumentList::CreateCopy(S.Context, SugaredBuilder),
+ TemplateArgumentList::CreateCopy(S.Context, CanonicalBuilder));
return Sema::TDK_SubstitutionFailure;
}
@@ -2907,19 +2891,54 @@ template<>
struct IsPartialSpecialization<VarTemplatePartialSpecializationDecl> {
static constexpr bool value = true;
};
+template <typename TemplateDeclT>
+static bool DeducedArgsNeedReplacement(TemplateDeclT *Template) {
+ return false;
+}
+template <>
+bool DeducedArgsNeedReplacement<VarTemplatePartialSpecializationDecl>(
+ VarTemplatePartialSpecializationDecl *Spec) {
+ return !Spec->isClassScopeExplicitSpecialization();
+}
+template <>
+bool DeducedArgsNeedReplacement<ClassTemplatePartialSpecializationDecl>(
+ ClassTemplatePartialSpecializationDecl *Spec) {
+ return !Spec->isClassScopeExplicitSpecialization();
+}
-template<typename TemplateDeclT>
+template <typename TemplateDeclT>
static Sema::TemplateDeductionResult
-CheckDeducedArgumentConstraints(Sema& S, TemplateDeclT *Template,
- ArrayRef<TemplateArgument> DeducedArgs,
- TemplateDeductionInfo& Info) {
+CheckDeducedArgumentConstraints(Sema &S, TemplateDeclT *Template,
+ ArrayRef<TemplateArgument> SugaredDeducedArgs,
+ ArrayRef<TemplateArgument> CanonicalDeducedArgs,
+ TemplateDeductionInfo &Info) {
llvm::SmallVector<const Expr *, 3> AssociatedConstraints;
Template->getAssociatedConstraints(AssociatedConstraints);
- if (S.CheckConstraintSatisfaction(Template, AssociatedConstraints,
- DeducedArgs, Info.getLocation(),
+
+ bool NeedsReplacement = DeducedArgsNeedReplacement(Template);
+ TemplateArgumentList DeducedTAL{TemplateArgumentList::OnStack,
+ CanonicalDeducedArgs};
+
+ MultiLevelTemplateArgumentList MLTAL = S.getTemplateInstantiationArgs(
+ Template, Template->getDeclContext(), /*Final=*/false,
+ /*InnerMost=*/NeedsReplacement ? nullptr : &DeducedTAL,
+ /*RelativeToPrimary=*/true, /*Pattern=*/
+ nullptr, /*ForConstraintInstantiation=*/true);
+
+ // getTemplateInstantiationArgs picks up the non-deduced version of the
+ // template args when this is a variable template partial specialization and
+ // not class-scope explicit specialization, so replace with Deduced Args
+ // instead of adding to inner-most.
+ if (NeedsReplacement)
+ MLTAL.replaceInnermostTemplateArguments(Template, CanonicalDeducedArgs);
+
+ if (S.CheckConstraintSatisfaction(Template, AssociatedConstraints, MLTAL,
+ Info.getLocation(),
Info.AssociatedConstraintsSatisfaction) ||
!Info.AssociatedConstraintsSatisfaction.IsSatisfied) {
- Info.reset(TemplateArgumentList::CreateCopy(S.Context, DeducedArgs));
+ Info.reset(
+ TemplateArgumentList::CreateCopy(S.Context, SugaredDeducedArgs),
+ TemplateArgumentList::CreateCopy(S.Context, CanonicalDeducedArgs));
return Sema::TDK_ConstraintsNotSatisfied;
}
return Sema::TDK_Success;
@@ -2944,16 +2963,19 @@ FinishTemplateArgumentDeduction(
// C++ [temp.deduct.type]p2:
// [...] or if any template argument remains neither deduced nor
// explicitly specified, template argument deduction fails.
- SmallVector<TemplateArgument, 4> Builder;
+ SmallVector<TemplateArgument, 4> SugaredBuilder, CanonicalBuilder;
if (auto Result = ConvertDeducedTemplateArguments(
- S, Partial, IsPartialOrdering, Deduced, Info, Builder))
+ S, Partial, IsPartialOrdering, Deduced, Info, SugaredBuilder,
+ CanonicalBuilder))
return Result;
// Form the template argument list from the deduced template arguments.
- TemplateArgumentList *DeducedArgumentList
- = TemplateArgumentList::CreateCopy(S.Context, Builder);
+ TemplateArgumentList *SugaredDeducedArgumentList =
+ TemplateArgumentList::CreateCopy(S.Context, SugaredBuilder);
+ TemplateArgumentList *CanonicalDeducedArgumentList =
+ TemplateArgumentList::CreateCopy(S.Context, CanonicalBuilder);
- Info.reset(DeducedArgumentList);
+ Info.reset(SugaredDeducedArgumentList, CanonicalDeducedArgumentList);
// Substitute the deduced template arguments into the template
// arguments of the class template partial specialization, and
@@ -2964,14 +2986,15 @@ FinishTemplateArgumentDeduction(
auto *Template = Partial->getSpecializedTemplate();
const ASTTemplateArgumentListInfo *PartialTemplArgInfo =
Partial->getTemplateArgsAsWritten();
- const TemplateArgumentLoc *PartialTemplateArgs =
- PartialTemplArgInfo->getTemplateArgs();
TemplateArgumentListInfo InstArgs(PartialTemplArgInfo->LAngleLoc,
PartialTemplArgInfo->RAngleLoc);
- if (S.Subst(PartialTemplateArgs, PartialTemplArgInfo->NumTemplateArgs,
- InstArgs, MultiLevelTemplateArgumentList(*DeducedArgumentList))) {
+ if (S.SubstTemplateArguments(PartialTemplArgInfo->arguments(),
+ MultiLevelTemplateArgumentList(Partial,
+ SugaredBuilder,
+ /*Final=*/true),
+ InstArgs)) {
unsigned ArgIdx = InstArgs.size(), ParamIdx = ArgIdx;
if (ParamIdx >= Partial->getTemplateParameters()->size())
ParamIdx = Partial->getTemplateParameters()->size() - 1;
@@ -2979,23 +3002,25 @@ FinishTemplateArgumentDeduction(
Decl *Param = const_cast<NamedDecl *>(
Partial->getTemplateParameters()->getParam(ParamIdx));
Info.Param = makeTemplateParameter(Param);
- Info.FirstArg = PartialTemplateArgs[ArgIdx].getArgument();
+ Info.FirstArg = (*PartialTemplArgInfo)[ArgIdx].getArgument();
return Sema::TDK_SubstitutionFailure;
}
bool ConstraintsNotSatisfied;
- SmallVector<TemplateArgument, 4> ConvertedInstArgs;
- if (S.CheckTemplateArgumentList(Template, Partial->getLocation(), InstArgs,
- false, ConvertedInstArgs,
- /*UpdateArgsWithConversions=*/true,
- &ConstraintsNotSatisfied))
- return ConstraintsNotSatisfied ? Sema::TDK_ConstraintsNotSatisfied :
- Sema::TDK_SubstitutionFailure;
+ SmallVector<TemplateArgument, 4> SugaredConvertedInstArgs,
+ CanonicalConvertedInstArgs;
+ if (S.CheckTemplateArgumentList(
+ Template, Partial->getLocation(), InstArgs, false,
+ SugaredConvertedInstArgs, CanonicalConvertedInstArgs,
+ /*UpdateArgsWithConversions=*/true, &ConstraintsNotSatisfied))
+ return ConstraintsNotSatisfied ? Sema::TDK_ConstraintsNotSatisfied
+ : Sema::TDK_SubstitutionFailure;
TemplateParameterList *TemplateParams = Template->getTemplateParameters();
for (unsigned I = 0, E = TemplateParams->size(); I != E; ++I) {
- TemplateArgument InstArg = ConvertedInstArgs.data()[I];
- if (!isSameTemplateArg(S.Context, TemplateArgs[I], InstArg)) {
+ TemplateArgument InstArg = SugaredConvertedInstArgs.data()[I];
+ if (!isSameTemplateArg(S.Context, TemplateArgs[I], InstArg,
+ IsPartialOrdering)) {
Info.Param = makeTemplateParameter(TemplateParams->getParam(I));
Info.FirstArg = TemplateArgs[I];
Info.SecondArg = InstArg;
@@ -3006,7 +3031,8 @@ FinishTemplateArgumentDeduction(
if (Trap.hasErrorOccurred())
return Sema::TDK_SubstitutionFailure;
- if (auto Result = CheckDeducedArgumentConstraints(S, Partial, Builder, Info))
+ if (auto Result = CheckDeducedArgumentConstraints(S, Partial, SugaredBuilder,
+ CanonicalBuilder, Info))
return Result;
return Sema::TDK_Success;
@@ -3030,17 +3056,20 @@ static Sema::TemplateDeductionResult FinishTemplateArgumentDeduction(
// C++ [temp.deduct.type]p2:
// [...] or if any template argument remains neither deduced nor
// explicitly specified, template argument deduction fails.
- SmallVector<TemplateArgument, 4> Builder;
+ SmallVector<TemplateArgument, 4> SugaredBuilder, CanonicalBuilder;
if (auto Result = ConvertDeducedTemplateArguments(
- S, Template, /*IsDeduced*/PartialOrdering, Deduced, Info, Builder))
+ S, Template, /*IsDeduced*/ PartialOrdering, Deduced, Info,
+ SugaredBuilder, CanonicalBuilder,
+ /*CurrentInstantiationScope=*/nullptr,
+ /*NumAlreadyConverted=*/0U, /*PartialOverloading=*/false))
return Result;
// Check that we produced the correct argument list.
TemplateParameterList *TemplateParams = Template->getTemplateParameters();
for (unsigned I = 0, E = TemplateParams->size(); I != E; ++I) {
- TemplateArgument InstArg = Builder[I];
- if (!isSameTemplateArg(S.Context, TemplateArgs[I], InstArg,
- /*PackExpansionMatchesPack*/true)) {
+ TemplateArgument InstArg = CanonicalBuilder[I];
+ if (!isSameTemplateArg(S.Context, TemplateArgs[I], InstArg, PartialOrdering,
+ /*PackExpansionMatchesPack=*/true)) {
Info.Param = makeTemplateParameter(TemplateParams->getParam(I));
Info.FirstArg = TemplateArgs[I];
Info.SecondArg = InstArg;
@@ -3051,8 +3080,8 @@ static Sema::TemplateDeductionResult FinishTemplateArgumentDeduction(
if (Trap.hasErrorOccurred())
return Sema::TDK_SubstitutionFailure;
- if (auto Result = CheckDeducedArgumentConstraints(S, Template, Builder,
- Info))
+ if (auto Result = CheckDeducedArgumentConstraints(S, Template, SugaredBuilder,
+ CanonicalBuilder, Info))
return Result;
return Sema::TDK_Success;
@@ -3204,14 +3233,12 @@ static bool isSimpleTemplateIdType(QualType T) {
///
/// \returns TDK_Success if substitution was successful, or some failure
/// condition.
-Sema::TemplateDeductionResult
-Sema::SubstituteExplicitTemplateArguments(
- FunctionTemplateDecl *FunctionTemplate,
- TemplateArgumentListInfo &ExplicitTemplateArgs,
- SmallVectorImpl<DeducedTemplateArgument> &Deduced,
- SmallVectorImpl<QualType> &ParamTypes,
- QualType *FunctionType,
- TemplateDeductionInfo &Info) {
+Sema::TemplateDeductionResult Sema::SubstituteExplicitTemplateArguments(
+ FunctionTemplateDecl *FunctionTemplate,
+ TemplateArgumentListInfo &ExplicitTemplateArgs,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced,
+ SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
+ TemplateDeductionInfo &Info) {
FunctionDecl *Function = FunctionTemplate->getTemplatedDecl();
TemplateParameterList *TemplateParams
= FunctionTemplate->getTemplateParameters();
@@ -3219,7 +3246,7 @@ Sema::SubstituteExplicitTemplateArguments(
if (ExplicitTemplateArgs.size() == 0) {
// No arguments to substitute; just copy over the parameter types and
// fill in the function type.
- for (auto P : Function->parameters())
+ for (auto *P : Function->parameters())
ParamTypes.push_back(P->getType());
if (FunctionType)
@@ -3237,7 +3264,7 @@ Sema::SubstituteExplicitTemplateArguments(
// declaration order of their corresponding template-parameters. The
// template argument list shall not specify more template-arguments than
// there are corresponding template-parameters.
- SmallVector<TemplateArgument, 4> Builder;
+ SmallVector<TemplateArgument, 4> SugaredBuilder, CanonicalBuilder;
// Enter a new template instantiation context where we check the
// explicitly-specified template arguments against this function template,
@@ -3250,9 +3277,11 @@ Sema::SubstituteExplicitTemplateArguments(
return TDK_InstantiationDepth;
if (CheckTemplateArgumentList(FunctionTemplate, SourceLocation(),
- ExplicitTemplateArgs, true, Builder, false) ||
+ ExplicitTemplateArgs, true, SugaredBuilder,
+ CanonicalBuilder,
+ /*UpdateArgsWithConversions=*/false) ||
Trap.hasErrorOccurred()) {
- unsigned Index = Builder.size();
+ unsigned Index = SugaredBuilder.size();
if (Index >= TemplateParams->size())
return TDK_SubstitutionFailure;
Info.Param = makeTemplateParameter(TemplateParams->getParam(Index));
@@ -3261,9 +3290,12 @@ Sema::SubstituteExplicitTemplateArguments(
// Form the template argument list from the explicitly-specified
// template arguments.
- TemplateArgumentList *ExplicitArgumentList
- = TemplateArgumentList::CreateCopy(Context, Builder);
- Info.setExplicitArgs(ExplicitArgumentList);
+ TemplateArgumentList *SugaredExplicitArgumentList =
+ TemplateArgumentList::CreateCopy(Context, SugaredBuilder);
+ TemplateArgumentList *CanonicalExplicitArgumentList =
+ TemplateArgumentList::CreateCopy(Context, CanonicalBuilder);
+ Info.setExplicitArgs(SugaredExplicitArgumentList,
+ CanonicalExplicitArgumentList);
// Template argument deduction and the final substitution should be
// done in the context of the templated declaration. Explicit
@@ -3276,15 +3308,15 @@ Sema::SubstituteExplicitTemplateArguments(
// the explicit template arguments. They'll be used as part of deduction
// for this template parameter pack.
unsigned PartiallySubstitutedPackIndex = -1u;
- if (!Builder.empty()) {
- const TemplateArgument &Arg = Builder.back();
+ if (!CanonicalBuilder.empty()) {
+ const TemplateArgument &Arg = CanonicalBuilder.back();
if (Arg.getKind() == TemplateArgument::Pack) {
- auto *Param = TemplateParams->getParam(Builder.size() - 1);
+ auto *Param = TemplateParams->getParam(CanonicalBuilder.size() - 1);
// If this is a fully-saturated fixed-size pack, it should be
// fully-substituted, not partially-substituted.
- Optional<unsigned> Expansions = getExpandedPackSize(Param);
+ std::optional<unsigned> Expansions = getExpandedPackSize(Param);
if (!Expansions || Arg.pack_size() < *Expansions) {
- PartiallySubstitutedPackIndex = Builder.size() - 1;
+ PartiallySubstitutedPackIndex = CanonicalBuilder.size() - 1;
CurrentInstantiationScope->SetPartiallySubstitutedPack(
Param, Arg.pack_begin(), Arg.pack_size());
}
@@ -3300,15 +3332,18 @@ Sema::SubstituteExplicitTemplateArguments(
ExtParameterInfoBuilder ExtParamInfos;
+ MultiLevelTemplateArgumentList MLTAL(FunctionTemplate,
+ SugaredExplicitArgumentList->asArray(),
+ /*Final=*/true);
+
// Instantiate the types of each of the function parameters given the
// explicitly-specified template arguments. If the function has a trailing
// return type, substitute it after the arguments to ensure we substitute
// in lexical order.
if (Proto->hasTrailingReturn()) {
if (SubstParmTypes(Function->getLocation(), Function->parameters(),
- Proto->getExtParameterInfosOrNull(),
- MultiLevelTemplateArgumentList(*ExplicitArgumentList),
- ParamTypes, /*params*/ nullptr, ExtParamInfos))
+ Proto->getExtParameterInfosOrNull(), MLTAL, ParamTypes,
+ /*params=*/nullptr, ExtParamInfos))
return TDK_SubstitutionFailure;
}
@@ -3332,8 +3367,7 @@ Sema::SubstituteExplicitTemplateArguments(
getLangOpts().CPlusPlus11);
ResultType =
- SubstType(Proto->getReturnType(),
- MultiLevelTemplateArgumentList(*ExplicitArgumentList),
+ SubstType(Proto->getReturnType(), MLTAL,
Function->getTypeSpecStartLoc(), Function->getDeclName());
if (ResultType.isNull() || Trap.hasErrorOccurred())
return TDK_SubstitutionFailure;
@@ -3350,9 +3384,8 @@ Sema::SubstituteExplicitTemplateArguments(
// explicitly-specified template arguments if we didn't do so earlier.
if (!Proto->hasTrailingReturn() &&
SubstParmTypes(Function->getLocation(), Function->parameters(),
- Proto->getExtParameterInfosOrNull(),
- MultiLevelTemplateArgumentList(*ExplicitArgumentList),
- ParamTypes, /*params*/ nullptr, ExtParamInfos))
+ Proto->getExtParameterInfosOrNull(), MLTAL, ParamTypes,
+ /*params*/ nullptr, ExtParamInfos))
return TDK_SubstitutionFailure;
if (FunctionType) {
@@ -3364,9 +3397,15 @@ Sema::SubstituteExplicitTemplateArguments(
// specification.
SmallVector<QualType, 4> ExceptionStorage;
if (getLangOpts().CPlusPlus17 &&
- SubstExceptionSpec(
- Function->getLocation(), EPI.ExceptionSpec, ExceptionStorage,
- MultiLevelTemplateArgumentList(*ExplicitArgumentList)))
+ SubstExceptionSpec(Function->getLocation(), EPI.ExceptionSpec,
+ ExceptionStorage,
+ getTemplateInstantiationArgs(
+ FunctionTemplate, nullptr, /*Final=*/true,
+ /*Innermost=*/SugaredExplicitArgumentList,
+ /*RelativeToPrimary=*/false,
+ /*Pattern=*/nullptr,
+ /*ForConstraintInstantiation=*/false,
+ /*SkipForSpecialization=*/true)))
return TDK_SubstitutionFailure;
*FunctionType = BuildFunctionType(ResultType, ParamTypes,
@@ -3388,8 +3427,8 @@ Sema::SubstituteExplicitTemplateArguments(
// parameter pack, however, will be set to NULL since the deduction
// mechanism handles the partially-substituted argument pack directly.
Deduced.reserve(TemplateParams->size());
- for (unsigned I = 0, N = ExplicitArgumentList->size(); I != N; ++I) {
- const TemplateArgument &Arg = ExplicitArgumentList->get(I);
+ for (unsigned I = 0, N = SugaredExplicitArgumentList->size(); I != N; ++I) {
+ const TemplateArgument &Arg = SugaredExplicitArgumentList->get(I);
if (I == PartiallySubstitutedPackIndex)
Deduced.push_back(DeducedTemplateArgument());
else
@@ -3531,7 +3570,7 @@ static unsigned getPackIndexForParam(Sema &S,
for (auto *PD : FunctionTemplate->getTemplatedDecl()->parameters()) {
if (PD->isParameterPack()) {
unsigned NumExpansions =
- S.getNumArgumentsInExpansion(PD->getType(), Args).getValueOr(1);
+ S.getNumArgumentsInExpansion(PD->getType(), Args).value_or(1);
if (Idx + NumExpansions > ParamIdx)
return ParamIdx - Idx;
Idx += NumExpansions;
@@ -3545,6 +3584,48 @@ static unsigned getPackIndexForParam(Sema &S,
llvm_unreachable("parameter index would not be produced from template");
}
+// if `Specialization` is a `CXXConstructorDecl` or `CXXConversionDecl`,
+// we'll try to instantiate and update its explicit specifier after constraint
+// checking.
+static Sema::TemplateDeductionResult instantiateExplicitSpecifierDeferred(
+ Sema &S, FunctionDecl *Specialization,
+ const MultiLevelTemplateArgumentList &SubstArgs,
+ TemplateDeductionInfo &Info, FunctionTemplateDecl *FunctionTemplate,
+ ArrayRef<TemplateArgument> DeducedArgs) {
+ auto GetExplicitSpecifier = [](FunctionDecl *D) {
+ return isa<CXXConstructorDecl>(D)
+ ? cast<CXXConstructorDecl>(D)->getExplicitSpecifier()
+ : cast<CXXConversionDecl>(D)->getExplicitSpecifier();
+ };
+ auto SetExplicitSpecifier = [](FunctionDecl *D, ExplicitSpecifier ES) {
+ isa<CXXConstructorDecl>(D)
+ ? cast<CXXConstructorDecl>(D)->setExplicitSpecifier(ES)
+ : cast<CXXConversionDecl>(D)->setExplicitSpecifier(ES);
+ };
+
+ ExplicitSpecifier ES = GetExplicitSpecifier(Specialization);
+ Expr *ExplicitExpr = ES.getExpr();
+ if (!ExplicitExpr)
+ return Sema::TDK_Success;
+ if (!ExplicitExpr->isValueDependent())
+ return Sema::TDK_Success;
+
+ Sema::InstantiatingTemplate Inst(
+ S, Info.getLocation(), FunctionTemplate, DeducedArgs,
+ Sema::CodeSynthesisContext::DeducedTemplateArgumentSubstitution, Info);
+ if (Inst.isInvalid())
+ return Sema::TDK_InstantiationDepth;
+ Sema::SFINAETrap Trap(S);
+ const ExplicitSpecifier InstantiatedES =
+ S.instantiateExplicitSpecifier(SubstArgs, ES);
+ if (InstantiatedES.isInvalid() || Trap.hasErrorOccurred()) {
+ Specialization->setInvalidDecl(true);
+ return Sema::TDK_SubstitutionFailure;
+ }
+ SetExplicitSpecifier(Specialization, InstantiatedES);
+ return Sema::TDK_Success;
+}
+
/// Finish template argument deduction for a function template,
/// checking the deduced template arguments for completeness and forming
/// the function template specialization.
@@ -3577,11 +3658,11 @@ Sema::TemplateDeductionResult Sema::FinishTemplateArgumentDeduction(
// C++ [temp.deduct.type]p2:
// [...] or if any template argument remains neither deduced nor
// explicitly specified, template argument deduction fails.
- SmallVector<TemplateArgument, 4> Builder;
+ SmallVector<TemplateArgument, 4> SugaredBuilder, CanonicalBuilder;
if (auto Result = ConvertDeducedTemplateArguments(
- *this, FunctionTemplate, /*IsDeduced*/true, Deduced, Info, Builder,
- CurrentInstantiationScope, NumExplicitlySpecified,
- PartialOverloading))
+ *this, FunctionTemplate, /*IsDeduced*/ true, Deduced, Info,
+ SugaredBuilder, CanonicalBuilder, CurrentInstantiationScope,
+ NumExplicitlySpecified, PartialOverloading))
return Result;
// C++ [temp.deduct.call]p10: [DR1391]
@@ -3597,18 +3678,39 @@ Sema::TemplateDeductionResult Sema::FinishTemplateArgumentDeduction(
return TDK_NonDependentConversionFailure;
// Form the template argument list from the deduced template arguments.
- TemplateArgumentList *DeducedArgumentList
- = TemplateArgumentList::CreateCopy(Context, Builder);
- Info.reset(DeducedArgumentList);
+ TemplateArgumentList *SugaredDeducedArgumentList =
+ TemplateArgumentList::CreateCopy(Context, SugaredBuilder);
+ TemplateArgumentList *CanonicalDeducedArgumentList =
+ TemplateArgumentList::CreateCopy(Context, CanonicalBuilder);
+ Info.reset(SugaredDeducedArgumentList, CanonicalDeducedArgumentList);
// Substitute the deduced template arguments into the function template
// declaration to produce the function template specialization.
DeclContext *Owner = FunctionTemplate->getDeclContext();
if (FunctionTemplate->getFriendObjectKind())
Owner = FunctionTemplate->getLexicalDeclContext();
- MultiLevelTemplateArgumentList SubstArgs(*DeducedArgumentList);
+ FunctionDecl *FD = FunctionTemplate->getTemplatedDecl();
+ // additional check for inline friend,
+ // ```
+ // template <class F1> int foo(F1 X);
+ // template <int A1> struct A {
+ // template <class F1> friend int foo(F1 X) { return A1; }
+ // };
+ // template struct A<1>;
+ // int a = foo(1.0);
+ // ```
+ const FunctionDecl *FDFriend;
+ if (FD->getFriendObjectKind() == Decl::FriendObjectKind::FOK_None &&
+ FD->isDefined(FDFriend, /*CheckForPendingFriendDefinition*/ true) &&
+ FDFriend->getFriendObjectKind() != Decl::FriendObjectKind::FOK_None) {
+ FD = const_cast<FunctionDecl *>(FDFriend);
+ Owner = FD->getLexicalDeclContext();
+ }
+ MultiLevelTemplateArgumentList SubstArgs(
+ FunctionTemplate, CanonicalDeducedArgumentList->asArray(),
+ /*Final=*/false);
Specialization = cast_or_null<FunctionDecl>(
- SubstDecl(FunctionTemplate->getTemplatedDecl(), Owner, SubstArgs));
+ SubstDecl(FD, Owner, SubstArgs));
if (!Specialization || Specialization->isInvalidDecl())
return TDK_SubstitutionFailure;
@@ -3617,9 +3719,10 @@ Sema::TemplateDeductionResult Sema::FinishTemplateArgumentDeduction(
// If the template argument list is owned by the function template
// specialization, release it.
- if (Specialization->getTemplateSpecializationArgs() == DeducedArgumentList &&
+ if (Specialization->getTemplateSpecializationArgs() ==
+ CanonicalDeducedArgumentList &&
!Trap.hasErrorOccurred())
- Info.take();
+ Info.takeCanonical();
// There may have been an error that did not prevent us from constructing a
// declaration. Mark the declaration invalid and return with a substitution
@@ -3638,17 +3741,31 @@ Sema::TemplateDeductionResult Sema::FinishTemplateArgumentDeduction(
// ([temp.constr.constr]). If the constraints are not satisfied, type
// deduction fails.
if (!PartialOverloading ||
- (Builder.size() == FunctionTemplate->getTemplateParameters()->size())) {
- if (CheckInstantiatedFunctionTemplateConstraints(Info.getLocation(),
- Specialization, Builder, Info.AssociatedConstraintsSatisfaction))
+ (CanonicalBuilder.size() ==
+ FunctionTemplate->getTemplateParameters()->size())) {
+ if (CheckInstantiatedFunctionTemplateConstraints(
+ Info.getLocation(), Specialization, CanonicalBuilder,
+ Info.AssociatedConstraintsSatisfaction))
return TDK_MiscellaneousDeductionFailure;
if (!Info.AssociatedConstraintsSatisfaction.IsSatisfied) {
- Info.reset(TemplateArgumentList::CreateCopy(Context, Builder));
+ Info.reset(Info.takeSugared(),
+ TemplateArgumentList::CreateCopy(Context, CanonicalBuilder));
return TDK_ConstraintsNotSatisfied;
}
}
+ // We skipped the instantiation of the explicit-specifier during the
+ // substitution of `FD` before. So, we try to instantiate it back if
+ // `Specialization` is either a constructor or a conversion function.
+ if (isa<CXXConstructorDecl, CXXConversionDecl>(Specialization)) {
+ if (TDK_Success != instantiateExplicitSpecifierDeferred(
+ *this, Specialization, SubstArgs, Info,
+ FunctionTemplate, DeducedArgs)) {
+ return TDK_SubstitutionFailure;
+ }
+ }
+
if (OriginalCallArgs) {
// C++ [temp.deduct.call]p4:
// In general, the deduction process attempts to find template argument
@@ -3659,7 +3776,9 @@ Sema::TemplateDeductionResult Sema::FinishTemplateArgumentDeduction(
OriginalCallArg OriginalArg = (*OriginalCallArgs)[I];
auto ParamIdx = OriginalArg.ArgIdx;
- if (ParamIdx >= Specialization->getNumParams())
+ unsigned ExplicitOffset =
+ Specialization->hasCXXExplicitFunctionObjectParameter() ? 1 : 0;
+ if (ParamIdx >= Specialization->getNumParams() - ExplicitOffset)
// FIXME: This presumably means a pack ended up smaller than we
// expected while deducing. Should this not result in deduction
// failure? Can it even happen?
@@ -3669,7 +3788,8 @@ Sema::TemplateDeductionResult Sema::FinishTemplateArgumentDeduction(
if (!OriginalArg.DecomposedParam) {
// P is one of the function parameters, just look up its substituted
// type.
- DeducedA = Specialization->getParamDecl(ParamIdx)->getType();
+ DeducedA =
+ Specialization->getParamDecl(ParamIdx + ExplicitOffset)->getType();
} else {
// P is a decomposed element of a parameter corresponding to a
// braced-init-list argument. Substitute back into P to find the
@@ -3719,7 +3839,7 @@ static QualType GetTypeOfFunction(Sema &S, const OverloadExpr::FindResult &R,
return {};
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Fn))
- if (Method->isInstance()) {
+ if (Method->isImplicitObjectMemberFunction()) {
// An instance method that's referenced in a form that doesn't
// look like a member pointer is just invalid.
if (!R.HasFormOfMemberPointer)
@@ -3740,7 +3860,8 @@ static QualType GetTypeOfFunction(Sema &S, const OverloadExpr::FindResult &R,
static QualType
ResolveOverloadForDeduction(Sema &S, TemplateParameterList *TemplateParams,
Expr *Arg, QualType ParamType,
- bool ParamWasReference) {
+ bool ParamWasReference,
+ TemplateSpecCandidateSet *FailedTSC = nullptr) {
OverloadExpr::FindResult R = OverloadExpr::find(Arg);
@@ -3762,8 +3883,10 @@ ResolveOverloadForDeduction(Sema &S, TemplateParameterList *TemplateParams,
!ParamType->isMemberFunctionPointerType()) {
if (Ovl->hasExplicitTemplateArgs()) {
// But we can still look for an explicit specialization.
- if (FunctionDecl *ExplicitSpec
- = S.ResolveSingleFunctionTemplateSpecialization(Ovl))
+ if (FunctionDecl *ExplicitSpec =
+ S.ResolveSingleFunctionTemplateSpecialization(
+ Ovl, /*Complain=*/false,
+ /*FoundDeclAccessPair=*/nullptr, FailedTSC))
return GetTypeOfFunction(S, R, ExplicitSpec);
}
@@ -3845,7 +3968,9 @@ ResolveOverloadForDeduction(Sema &S, TemplateParameterList *TemplateParams,
/// overloaded function set that could not be resolved.
static bool AdjustFunctionParmAndArgTypesForDeduction(
Sema &S, TemplateParameterList *TemplateParams, unsigned FirstInnerIndex,
- QualType &ParamType, QualType &ArgType, Expr *Arg, unsigned &TDF) {
+ QualType &ParamType, QualType &ArgType,
+ Expr::Classification ArgClassification, Expr *Arg, unsigned &TDF,
+ TemplateSpecCandidateSet *FailedTSC = nullptr) {
// C++0x [temp.deduct.call]p3:
// If P is a cv-qualified type, the top level cv-qualifiers of P's type
// are ignored for type deduction.
@@ -3862,25 +3987,28 @@ static bool AdjustFunctionParmAndArgTypesForDeduction(
// but there are sometimes special circumstances. Typically
// involving a template-id-expr.
if (ArgType == S.Context.OverloadTy) {
- ArgType = ResolveOverloadForDeduction(S, TemplateParams,
- Arg, ParamType,
- ParamRefType != nullptr);
+ assert(Arg && "expected a non-null arg expression");
+ ArgType = ResolveOverloadForDeduction(S, TemplateParams, Arg, ParamType,
+ ParamRefType != nullptr, FailedTSC);
if (ArgType.isNull())
return true;
}
if (ParamRefType) {
// If the argument has incomplete array type, try to complete its type.
- if (ArgType->isIncompleteArrayType())
+ if (ArgType->isIncompleteArrayType()) {
+ assert(Arg && "expected a non-null arg expression");
ArgType = S.getCompletedType(Arg);
+ }
// C++1z [temp.deduct.call]p3:
// If P is a forwarding reference and the argument is an lvalue, the type
// "lvalue reference to A" is used in place of A for type deduction.
if (isForwardingReference(QualType(ParamRefType, 0), FirstInnerIndex) &&
- Arg->isLValue()) {
- if (S.getLangOpts().OpenCL && !ArgType.hasAddressSpace())
- ArgType = S.Context.getAddrSpaceQualType(ArgType, LangAS::opencl_generic);
+ ArgClassification.isLValue()) {
+ if (S.getLangOpts().OpenCL && !ArgType.hasAddressSpace())
+ ArgType = S.Context.getAddrSpaceQualType(
+ ArgType, S.Context.getDefaultOpenCLPointeeAddrSpace());
ArgType = S.Context.getLValueReferenceType(ArgType);
}
} else {
@@ -3889,13 +4017,11 @@ static bool AdjustFunctionParmAndArgTypesForDeduction(
// - If A is an array type, the pointer type produced by the
// array-to-pointer standard conversion (4.2) is used in place of
// A for type deduction; otherwise,
- if (ArgType->isArrayType())
- ArgType = S.Context.getArrayDecayedType(ArgType);
// - If A is a function type, the pointer type produced by the
// function-to-pointer standard conversion (4.3) is used in place
// of A for type deduction; otherwise,
- else if (ArgType->isFunctionType())
- ArgType = S.Context.getPointerType(ArgType);
+ if (ArgType->canDecayToPointerType())
+ ArgType = S.Context.getDecayedType(ArgType);
else {
// - If A is a cv-qualified type, the top level cv-qualifiers of A's
// type are ignored for type deduction.
@@ -3940,10 +4066,13 @@ hasDeducibleTemplateParameters(Sema &S, FunctionTemplateDecl *FunctionTemplate,
static Sema::TemplateDeductionResult DeduceTemplateArgumentsFromCallArgument(
Sema &S, TemplateParameterList *TemplateParams, unsigned FirstInnerIndex,
- QualType ParamType, Expr *Arg, TemplateDeductionInfo &Info,
+ QualType ParamType, QualType ArgType,
+ Expr::Classification ArgClassification, Expr *Arg,
+ TemplateDeductionInfo &Info,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<Sema::OriginalCallArg> &OriginalCallArgs,
- bool DecomposedParam, unsigned ArgIdx, unsigned TDF);
+ bool DecomposedParam, unsigned ArgIdx, unsigned TDF,
+ TemplateSpecCandidateSet *FailedTSC = nullptr);
/// Attempt template argument deduction from an initializer list
/// deemed to be an argument in a function call.
@@ -3984,8 +4113,9 @@ static Sema::TemplateDeductionResult DeduceFromInitializerList(
if (ElTy->isDependentType()) {
for (Expr *E : ILE->inits()) {
if (auto Result = DeduceTemplateArgumentsFromCallArgument(
- S, TemplateParams, 0, ElTy, E, Info, Deduced, OriginalCallArgs, true,
- ArgIdx, TDF))
+ S, TemplateParams, 0, ElTy, E->getType(),
+ E->Classify(S.getASTContext()), E, Info, Deduced,
+ OriginalCallArgs, true, ArgIdx, TDF))
return Result;
}
}
@@ -4016,21 +4146,25 @@ static Sema::TemplateDeductionResult DeduceFromInitializerList(
/// single parameter / argument pair.
static Sema::TemplateDeductionResult DeduceTemplateArgumentsFromCallArgument(
Sema &S, TemplateParameterList *TemplateParams, unsigned FirstInnerIndex,
- QualType ParamType, Expr *Arg, TemplateDeductionInfo &Info,
+ QualType ParamType, QualType ArgType,
+ Expr::Classification ArgClassification, Expr *Arg,
+ TemplateDeductionInfo &Info,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<Sema::OriginalCallArg> &OriginalCallArgs,
- bool DecomposedParam, unsigned ArgIdx, unsigned TDF) {
- QualType ArgType = Arg->getType();
+ bool DecomposedParam, unsigned ArgIdx, unsigned TDF,
+ TemplateSpecCandidateSet *FailedTSC) {
+
QualType OrigParamType = ParamType;
// If P is a reference type [...]
// If P is a cv-qualified type [...]
if (AdjustFunctionParmAndArgTypesForDeduction(
- S, TemplateParams, FirstInnerIndex, ParamType, ArgType, Arg, TDF))
+ S, TemplateParams, FirstInnerIndex, ParamType, ArgType,
+ ArgClassification, Arg, TDF, FailedTSC))
return Sema::TDK_Success;
// If [...] the argument is a non-empty initializer list [...]
- if (InitListExpr *ILE = dyn_cast<InitListExpr>(Arg))
+ if (InitListExpr *ILE = dyn_cast_if_present<InitListExpr>(Arg))
return DeduceFromInitializerList(S, TemplateParams, ParamType, ILE, Info,
Deduced, OriginalCallArgs, ArgIdx, TDF);
@@ -4039,8 +4173,9 @@ static Sema::TemplateDeductionResult DeduceTemplateArgumentsFromCallArgument(
//
// Keep track of the argument type and corresponding parameter index,
// so we can check for compatibility between the deduced A and A.
- OriginalCallArgs.push_back(
- Sema::OriginalCallArg(OrigParamType, DecomposedParam, ArgIdx, ArgType));
+ if (Arg)
+ OriginalCallArgs.push_back(
+ Sema::OriginalCallArg(OrigParamType, DecomposedParam, ArgIdx, ArgType));
return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, ParamType,
ArgType, Info, Deduced, TDF);
}
@@ -4074,13 +4209,20 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, TemplateDeductionInfo &Info,
- bool PartialOverloading,
+ bool PartialOverloading, bool AggregateDeductionCandidate,
+ QualType ObjectType, Expr::Classification ObjectClassification,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent) {
if (FunctionTemplate->isInvalidDecl())
return TDK_Invalid;
FunctionDecl *Function = FunctionTemplate->getTemplatedDecl();
unsigned NumParams = Function->getNumParams();
+ bool HasExplicitObject = false;
+ int ExplicitObjectOffset = 0;
+ if (Function->hasCXXExplicitFunctionObjectParameter()) {
+ HasExplicitObject = true;
+ ExplicitObjectOffset = 1;
+ }
unsigned FirstInnerIndex = getFirstInnerIndex(FunctionTemplate);
@@ -4088,9 +4230,11 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
// Template argument deduction is done by comparing each function template
// parameter type (call it P) with the type of the corresponding argument
// of the call (call it A) as described below.
- if (Args.size() < Function->getMinRequiredArguments() && !PartialOverloading)
+ if (Args.size() < Function->getMinRequiredExplicitArguments() &&
+ !PartialOverloading)
return TDK_TooFewArguments;
- else if (TooManyArguments(NumParams, Args.size(), PartialOverloading)) {
+ else if (TooManyArguments(NumParams, Args.size() + ExplicitObjectOffset,
+ PartialOverloading)) {
const auto *Proto = Function->getType()->castAs<FunctionProtoType>();
if (Proto->isTemplateVariadic())
/* Do nothing */;
@@ -4126,7 +4270,8 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
SmallVector<OriginalCallArg, 8> OriginalCallArgs;
// Deduce an argument of type ParamType from an expression with index ArgIdx.
- auto DeduceCallArgument = [&](QualType ParamType, unsigned ArgIdx) {
+ auto DeduceCallArgument = [&](QualType ParamType, unsigned ArgIdx,
+ bool ExplicitObjetArgument) {
// C++ [demp.deduct.call]p1: (DR1391)
// Template argument deduction is done by comparing each function template
// parameter that contains template-parameters that participate in
@@ -4134,10 +4279,21 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
if (!hasDeducibleTemplateParameters(*this, FunctionTemplate, ParamType))
return Sema::TDK_Success;
+ if (ExplicitObjetArgument) {
+ // ... with the type of the corresponding argument
+ return DeduceTemplateArgumentsFromCallArgument(
+ *this, TemplateParams, FirstInnerIndex, ParamType, ObjectType,
+ ObjectClassification,
+ /*Arg=*/nullptr, Info, Deduced, OriginalCallArgs,
+ /*Decomposed*/ false, ArgIdx, /*TDF*/ 0);
+ }
+
// ... with the type of the corresponding argument
return DeduceTemplateArgumentsFromCallArgument(
- *this, TemplateParams, FirstInnerIndex, ParamType, Args[ArgIdx], Info, Deduced,
- OriginalCallArgs, /*Decomposed*/false, ArgIdx, /*TDF*/ 0);
+ *this, TemplateParams, FirstInnerIndex, ParamType,
+ Args[ArgIdx]->getType(), Args[ArgIdx]->Classify(getASTContext()),
+ Args[ArgIdx], Info, Deduced, OriginalCallArgs, /*Decomposed*/ false,
+ ArgIdx, /*TDF*/ 0);
};
// Deduce template arguments from the function parameters.
@@ -4151,19 +4307,31 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
dyn_cast<PackExpansionType>(ParamType);
if (!ParamExpansion) {
// Simple case: matching a function parameter to a function argument.
- if (ArgIdx >= Args.size())
+ if (ArgIdx >= Args.size() && !(HasExplicitObject && ParamIdx == 0))
break;
ParamTypesForArgChecking.push_back(ParamType);
- if (auto Result = DeduceCallArgument(ParamType, ArgIdx++))
+
+ if (ParamIdx == 0 && HasExplicitObject) {
+ if (auto Result = DeduceCallArgument(ParamType, 0,
+ /*ExplicitObjetArgument=*/true))
+ return Result;
+ continue;
+ }
+
+ if (auto Result = DeduceCallArgument(ParamType, ArgIdx++,
+ /*ExplicitObjetArgument=*/false))
return Result;
continue;
}
+ bool IsTrailingPack = ParamIdx + 1 == NumParamTypes;
+
QualType ParamPattern = ParamExpansion->getPattern();
PackDeductionScope PackScope(*this, TemplateParams, Deduced, Info,
- ParamPattern);
+ ParamPattern,
+ AggregateDeductionCandidate && IsTrailingPack);
// C++0x [temp.deduct.call]p1:
// For a function parameter pack that occurs at the end of the
@@ -4181,18 +4349,20 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
// the length of the explicitly-specified pack if it's expanded by the
// parameter pack and 0 otherwise, and we treat each deduction as a
// non-deduced context.
- if (ParamIdx + 1 == NumParamTypes || PackScope.hasFixedArity()) {
+ if (IsTrailingPack || PackScope.hasFixedArity()) {
for (; ArgIdx < Args.size() && PackScope.hasNextElement();
PackScope.nextPackElement(), ++ArgIdx) {
ParamTypesForArgChecking.push_back(ParamPattern);
- if (auto Result = DeduceCallArgument(ParamPattern, ArgIdx))
+ if (auto Result = DeduceCallArgument(ParamPattern, ArgIdx,
+ /*ExplicitObjetArgument=*/false))
return Result;
}
} else {
// If the parameter type contains an explicitly-specified pack that we
// could not expand, skip the number of parameters notionally created
// by the expansion.
- Optional<unsigned> NumExpansions = ParamExpansion->getNumExpansions();
+ std::optional<unsigned> NumExpansions =
+ ParamExpansion->getNumExpansions();
if (NumExpansions && !PackScope.isPartiallyExpanded()) {
for (unsigned I = 0; I != *NumExpansions && ArgIdx < Args.size();
++I, ++ArgIdx) {
@@ -4336,17 +4506,15 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
Deduced.resize(TemplateParams->size());
// If the function has a deduced return type, substitute it for a dependent
- // type so that we treat it as a non-deduced context in what follows. If we
- // are looking up by signature, the signature type should also have a deduced
- // return type, which we instead expect to exactly match.
+ // type so that we treat it as a non-deduced context in what follows.
bool HasDeducedReturnType = false;
- if (getLangOpts().CPlusPlus14 && IsAddressOfFunction &&
+ if (getLangOpts().CPlusPlus14 &&
Function->getReturnType()->getContainedAutoType()) {
- FunctionType = SubstAutoType(FunctionType, Context.DependentTy);
+ FunctionType = SubstAutoTypeDependent(FunctionType);
HasDeducedReturnType = true;
}
- if (!ArgFunctionType.isNull()) {
+ if (!ArgFunctionType.isNull() && !FunctionType.isNull()) {
unsigned TDF =
TDF_TopLevelParameterTypeList | TDF_AllowCompatibleFunctionType;
// Deduce template arguments from the function type.
@@ -4368,11 +4536,17 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
// If the function has a deduced return type, deduce it now, so we can check
// that the deduced function type matches the requested type.
- if (HasDeducedReturnType &&
+ if (HasDeducedReturnType && IsAddressOfFunction &&
Specialization->getReturnType()->isUndeducedType() &&
DeduceReturnType(Specialization, Info.getLocation(), false))
return TDK_MiscellaneousDeductionFailure;
+ if (IsAddressOfFunction && getLangOpts().CPlusPlus20 &&
+ Specialization->isImmediateEscalating() &&
+ CheckIfFunctionSpecializationIsImmediate(Specialization,
+ Info.getLocation()))
+ return TDK_MiscellaneousDeductionFailure;
+
// If the function has a dependent exception specification, resolve it now,
// so we can check that the exception specification matches.
auto *SpecializationFPT =
@@ -4387,23 +4561,31 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
// noreturn can't be dependent, so we don't actually need this for them
// right now.)
QualType SpecializationType = Specialization->getType();
- if (!IsAddressOfFunction)
+ if (!IsAddressOfFunction) {
ArgFunctionType = adjustCCAndNoReturn(ArgFunctionType, SpecializationType,
/*AdjustExceptionSpec*/true);
+ // Revert placeholder types in the return type back to undeduced types so
+ // that the comparison below compares the declared return types.
+ if (HasDeducedReturnType) {
+ SpecializationType = SubstAutoType(SpecializationType, QualType());
+ ArgFunctionType = SubstAutoType(ArgFunctionType, QualType());
+ }
+ }
+
// If the requested function type does not match the actual type of the
// specialization with respect to arguments of compatible pointer to function
// types, template argument deduction fails.
if (!ArgFunctionType.isNull()) {
- if (IsAddressOfFunction &&
- !isSameOrCompatibleFunctionType(
- Context.getCanonicalType(SpecializationType),
- Context.getCanonicalType(ArgFunctionType)))
- return TDK_MiscellaneousDeductionFailure;
-
- if (!IsAddressOfFunction &&
- !Context.hasSameType(SpecializationType, ArgFunctionType))
- return TDK_MiscellaneousDeductionFailure;
+ if (IsAddressOfFunction
+ ? !isSameOrCompatibleFunctionType(
+ Context.getCanonicalType(SpecializationType),
+ Context.getCanonicalType(ArgFunctionType))
+ : !Context.hasSameType(SpecializationType, ArgFunctionType)) {
+ Info.FirstArg = TemplateArgument(SpecializationType);
+ Info.SecondArg = TemplateArgument(ArgFunctionType);
+ return TDK_NonDeducedMismatch;
+ }
}
return TDK_Success;
@@ -4412,11 +4594,10 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
/// Deduce template arguments for a templated conversion
/// function (C++ [temp.deduct.conv]) and, if successful, produce a
/// conversion function template specialization.
-Sema::TemplateDeductionResult
-Sema::DeduceTemplateArguments(FunctionTemplateDecl *ConversionTemplate,
- QualType ToType,
- CXXConversionDecl *&Specialization,
- TemplateDeductionInfo &Info) {
+Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
+ FunctionTemplateDecl *ConversionTemplate, QualType ObjectType,
+ Expr::Classification ObjectClassification, QualType ToType,
+ CXXConversionDecl *&Specialization, TemplateDeductionInfo &Info) {
if (ConversionTemplate->isInvalidDecl())
return TDK_Invalid;
@@ -4511,6 +4692,19 @@ Sema::DeduceTemplateArguments(FunctionTemplateDecl *ConversionTemplate,
if ((P->isPointerType() && A->isPointerType()) ||
(P->isMemberPointerType() && A->isMemberPointerType()))
TDF |= TDF_IgnoreQualifiers;
+
+ SmallVector<Sema::OriginalCallArg, 1> OriginalCallArgs;
+ if (ConversionGeneric->isExplicitObjectMemberFunction()) {
+ QualType ParamType = ConversionGeneric->getParamDecl(0)->getType();
+ if (TemplateDeductionResult Result =
+ DeduceTemplateArgumentsFromCallArgument(
+ *this, TemplateParams, getFirstInnerIndex(ConversionTemplate),
+ ParamType, ObjectType, ObjectClassification,
+ /*Arg=*/nullptr, Info, Deduced, OriginalCallArgs,
+ /*Decomposed*/ false, 0, /*TDF*/ 0))
+ return Result;
+ }
+
if (TemplateDeductionResult Result
= DeduceTemplateArgumentsByTypeMatch(*this, TemplateParams,
P, A, Info, Deduced, TDF))
@@ -4523,7 +4717,8 @@ Sema::DeduceTemplateArguments(FunctionTemplateDecl *ConversionTemplate,
TemplateDeductionResult Result;
runWithSufficientStackSpace(Info.getLocation(), [&] {
Result = FinishTemplateArgumentDeduction(ConversionTemplate, Deduced, 0,
- ConversionSpecialized, Info);
+ ConversionSpecialized, Info,
+ &OriginalCallArgs);
});
Specialization = cast_or_null<CXXConversionDecl>(ConversionSpecialized);
return Result;
@@ -4573,10 +4768,11 @@ namespace {
QualType Replacement;
bool ReplacementIsPack;
bool UseTypeSugar;
+ using inherited = TreeTransform<SubstituteDeducedTypeTransform>;
public:
SubstituteDeducedTypeTransform(Sema &SemaRef, DependentAuto DA)
- : TreeTransform<SubstituteDeducedTypeTransform>(SemaRef), Replacement(),
+ : TreeTransform<SubstituteDeducedTypeTransform>(SemaRef),
ReplacementIsPack(DA.IsPack), UseTypeSugar(true) {}
SubstituteDeducedTypeTransform(Sema &SemaRef, QualType Replacement,
@@ -4633,6 +4829,16 @@ namespace {
// Lambdas never need to be transformed.
return E;
}
+ bool TransformExceptionSpec(SourceLocation Loc,
+ FunctionProtoType::ExceptionSpecInfo &ESI,
+ SmallVectorImpl<QualType> &Exceptions,
+ bool &Changed) {
+ if (ESI.Type == EST_Uninstantiated) {
+ ESI.instantiate();
+ Changed = true;
+ }
+ return inherited::TransformExceptionSpec(Loc, ESI, Exceptions, Changed);
+ }
QualType Apply(TypeLoc TL) {
// Create some scratch storage for the transformed type locations.
@@ -4645,42 +4851,9 @@ namespace {
} // namespace
-Sema::DeduceAutoResult
-Sema::DeduceAutoType(TypeSourceInfo *Type, Expr *&Init, QualType &Result,
- Optional<unsigned> DependentDeductionDepth,
- bool IgnoreConstraints) {
- return DeduceAutoType(Type->getTypeLoc(), Init, Result,
- DependentDeductionDepth, IgnoreConstraints);
-}
-
-/// Attempt to produce an informative diagostic explaining why auto deduction
-/// failed.
-/// \return \c true if diagnosed, \c false if not.
-static bool diagnoseAutoDeductionFailure(Sema &S,
- Sema::TemplateDeductionResult TDK,
- TemplateDeductionInfo &Info,
- ArrayRef<SourceRange> Ranges) {
- switch (TDK) {
- case Sema::TDK_Inconsistent: {
- // Inconsistent deduction means we were deducing from an initializer list.
- auto D = S.Diag(Info.getLocation(), diag::err_auto_inconsistent_deduction);
- D << Info.FirstArg << Info.SecondArg;
- for (auto R : Ranges)
- D << R;
- return true;
- }
-
- // FIXME: Are there other cases for which a custom diagnostic is more useful
- // than the basic "types don't match" diagnostic?
-
- default:
- return false;
- }
-}
-
-static Sema::DeduceAutoResult
-CheckDeducedPlaceholderConstraints(Sema &S, const AutoType &Type,
- AutoTypeLoc TypeLoc, QualType Deduced) {
+static bool CheckDeducedPlaceholderConstraints(Sema &S, const AutoType &Type,
+ AutoTypeLoc TypeLoc,
+ QualType Deduced) {
ConstraintSatisfaction Satisfaction;
ConceptDecl *Concept = Type.getTypeConstraintConcept();
TemplateArgumentListInfo TemplateArgs(TypeLoc.getLAngleLoc(),
@@ -4692,14 +4865,17 @@ CheckDeducedPlaceholderConstraints(Sema &S, const AutoType &Type,
for (unsigned I = 0, C = TypeLoc.getNumArgs(); I != C; ++I)
TemplateArgs.addArgument(TypeLoc.getArgLoc(I));
- llvm::SmallVector<TemplateArgument, 4> Converted;
+ llvm::SmallVector<TemplateArgument, 4> SugaredConverted, CanonicalConverted;
if (S.CheckTemplateArgumentList(Concept, SourceLocation(), TemplateArgs,
- /*PartialTemplateArgs=*/false, Converted))
- return Sema::DAR_FailedAlreadyDiagnosed;
+ /*PartialTemplateArgs=*/false,
+ SugaredConverted, CanonicalConverted))
+ return true;
+ MultiLevelTemplateArgumentList MLTAL(Concept, CanonicalConverted,
+ /*Final=*/false);
if (S.CheckConstraintSatisfaction(Concept, {Concept->getConstraintExpr()},
- Converted, TypeLoc.getLocalSourceRange(),
+ MLTAL, TypeLoc.getLocalSourceRange(),
Satisfaction))
- return Sema::DAR_FailedAlreadyDiagnosed;
+ return true;
if (!Satisfaction.IsSatisfied) {
std::string Buf;
llvm::raw_string_ostream OS(Buf);
@@ -4713,11 +4889,11 @@ CheckDeducedPlaceholderConstraints(Sema &S, const AutoType &Type,
OS.flush();
S.Diag(TypeLoc.getConceptNameLoc(),
diag::err_placeholder_constraints_not_satisfied)
- << Deduced << Buf << TypeLoc.getLocalSourceRange();
+ << Deduced << Buf << TypeLoc.getLocalSourceRange();
S.DiagnoseUnsatisfiedConstraint(Satisfaction);
- return Sema::DAR_FailedAlreadyDiagnosed;
+ return true;
}
- return Sema::DAR_Succeeded;
+ return false;
}
/// Deduce the type for an auto type-specifier (C++11 [dcl.spec.auto]p6)
@@ -4730,187 +4906,184 @@ CheckDeducedPlaceholderConstraints(Sema &S, const AutoType &Type,
/// \param Init the initializer for the variable whose type is to be deduced.
/// \param Result if type deduction was successful, this will be set to the
/// deduced type.
-/// \param DependentDeductionDepth Set if we should permit deduction in
+/// \param Info the argument will be updated to provide additional information
+/// about template argument deduction.
+/// \param DependentDeduction Set if we should permit deduction in
/// dependent cases. This is necessary for template partial ordering with
-/// 'auto' template parameters. The value specified is the template
-/// parameter depth at which we should perform 'auto' deduction.
+/// 'auto' template parameters. The template parameter depth to be used
+/// should be specified in the 'Info' parameter.
/// \param IgnoreConstraints Set if we should not fail if the deduced type does
/// not satisfy the type-constraint in the auto type.
-Sema::DeduceAutoResult
-Sema::DeduceAutoType(TypeLoc Type, Expr *&Init, QualType &Result,
- Optional<unsigned> DependentDeductionDepth,
- bool IgnoreConstraints) {
+Sema::TemplateDeductionResult
+Sema::DeduceAutoType(TypeLoc Type, Expr *Init, QualType &Result,
+ TemplateDeductionInfo &Info, bool DependentDeduction,
+ bool IgnoreConstraints,
+ TemplateSpecCandidateSet *FailedTSC) {
+ assert(DependentDeduction || Info.getDeducedDepth() == 0);
if (Init->containsErrors())
- return DAR_FailedAlreadyDiagnosed;
- if (Init->getType()->isNonOverloadPlaceholderType()) {
+ return TDK_AlreadyDiagnosed;
+
+ const AutoType *AT = Type.getType()->getContainedAutoType();
+ assert(AT);
+
+ if (Init->getType()->isNonOverloadPlaceholderType() || AT->isDecltypeAuto()) {
ExprResult NonPlaceholder = CheckPlaceholderExpr(Init);
if (NonPlaceholder.isInvalid())
- return DAR_FailedAlreadyDiagnosed;
+ return TDK_AlreadyDiagnosed;
Init = NonPlaceholder.get();
}
DependentAuto DependentResult = {
/*.IsPack = */ (bool)Type.getAs<PackExpansionTypeLoc>()};
- if (!DependentDeductionDepth &&
+ if (!DependentDeduction &&
(Type.getType()->isDependentType() || Init->isTypeDependent() ||
Init->containsUnexpandedParameterPack())) {
Result = SubstituteDeducedTypeTransform(*this, DependentResult).Apply(Type);
assert(!Result.isNull() && "substituting DependentTy can't fail");
- return DAR_Succeeded;
+ return TDK_Success;
}
- // Find the depth of template parameter to synthesize.
- unsigned Depth = DependentDeductionDepth.getValueOr(0);
-
- // If this is a 'decltype(auto)' specifier, do the decltype dance.
- // Since 'decltype(auto)' can only occur at the top of the type, we
- // don't need to go digging for it.
- if (const AutoType *AT = Type.getType()->getAs<AutoType>()) {
- if (AT->isDecltypeAuto()) {
- if (isa<InitListExpr>(Init)) {
- Diag(Init->getBeginLoc(), diag::err_decltype_auto_initializer_list);
- return DAR_FailedAlreadyDiagnosed;
- }
-
- ExprResult ER = CheckPlaceholderExpr(Init);
- if (ER.isInvalid())
- return DAR_FailedAlreadyDiagnosed;
- Init = ER.get();
- QualType Deduced = BuildDecltypeType(Init, Init->getBeginLoc(), false);
- if (Deduced.isNull())
- return DAR_FailedAlreadyDiagnosed;
- // FIXME: Support a non-canonical deduced type for 'auto'.
- Deduced = Context.getCanonicalType(Deduced);
- if (AT->isConstrained() && !IgnoreConstraints) {
- auto ConstraintsResult =
- CheckDeducedPlaceholderConstraints(*this, *AT,
- Type.getContainedAutoTypeLoc(),
- Deduced);
- if (ConstraintsResult != DAR_Succeeded)
- return ConstraintsResult;
- }
- Result = SubstituteDeducedTypeTransform(*this, Deduced).Apply(Type);
- if (Result.isNull())
- return DAR_FailedAlreadyDiagnosed;
- return DAR_Succeeded;
- } else if (!getLangOpts().CPlusPlus) {
- if (isa<InitListExpr>(Init)) {
- Diag(Init->getBeginLoc(), diag::err_auto_init_list_from_c);
- return DAR_FailedAlreadyDiagnosed;
- }
- }
+ // Make sure that we treat 'char[]' equaly as 'char*' in C23 mode.
+ auto *String = dyn_cast<StringLiteral>(Init);
+ if (getLangOpts().C23 && String && Type.getType()->isArrayType()) {
+ Diag(Type.getBeginLoc(), diag::ext_c23_auto_non_plain_identifier);
+ TypeLoc TL = TypeLoc(Init->getType(), Type.getOpaqueData());
+ Result = SubstituteDeducedTypeTransform(*this, DependentResult).Apply(TL);
+ assert(!Result.isNull() && "substituting DependentTy can't fail");
+ return TDK_Success;
}
- SourceLocation Loc = Init->getExprLoc();
-
- LocalInstantiationScope InstScope(*this);
+ // Emit a warning if 'auto*' is used in pedantic and in C23 mode.
+ if (getLangOpts().C23 && Type.getType()->isPointerType()) {
+ Diag(Type.getBeginLoc(), diag::ext_c23_auto_non_plain_identifier);
+ }
- // Build template<class TemplParam> void Func(FuncParam);
- TemplateTypeParmDecl *TemplParam = TemplateTypeParmDecl::Create(
- Context, nullptr, SourceLocation(), Loc, Depth, 0, nullptr, false, false,
- false);
- QualType TemplArg = QualType(TemplParam->getTypeForDecl(), 0);
- NamedDecl *TemplParamPtr = TemplParam;
- FixedSizeTemplateParameterListStorage<1, false> TemplateParamsSt(
- Context, Loc, Loc, TemplParamPtr, Loc, nullptr);
-
- QualType FuncParam =
- SubstituteDeducedTypeTransform(*this, TemplArg, /*UseTypeSugar*/false)
- .Apply(Type);
- assert(!FuncParam.isNull() &&
- "substituting template parameter for 'auto' failed");
+ auto *InitList = dyn_cast<InitListExpr>(Init);
+ if (!getLangOpts().CPlusPlus && InitList) {
+ Diag(Init->getBeginLoc(), diag::err_auto_init_list_from_c)
+ << (int)AT->getKeyword() << getLangOpts().C23;
+ return TDK_AlreadyDiagnosed;
+ }
// Deduce type of TemplParam in Func(Init)
SmallVector<DeducedTemplateArgument, 1> Deduced;
Deduced.resize(1);
- TemplateDeductionInfo Info(Loc, Depth);
-
// If deduction failed, don't diagnose if the initializer is dependent; it
// might acquire a matching type in the instantiation.
- auto DeductionFailed = [&](TemplateDeductionResult TDK,
- ArrayRef<SourceRange> Ranges) -> DeduceAutoResult {
+ auto DeductionFailed = [&](TemplateDeductionResult TDK) {
if (Init->isTypeDependent()) {
Result =
SubstituteDeducedTypeTransform(*this, DependentResult).Apply(Type);
assert(!Result.isNull() && "substituting DependentTy can't fail");
- return DAR_Succeeded;
+ return TDK_Success;
}
- if (diagnoseAutoDeductionFailure(*this, TDK, Info, Ranges))
- return DAR_FailedAlreadyDiagnosed;
- return DAR_Failed;
+ return TDK;
};
SmallVector<OriginalCallArg, 4> OriginalCallArgs;
- InitListExpr *InitList = dyn_cast<InitListExpr>(Init);
- if (InitList) {
- // Notionally, we substitute std::initializer_list<T> for 'auto' and deduce
- // against that. Such deduction only succeeds if removing cv-qualifiers and
- // references results in std::initializer_list<T>.
- if (!Type.getType().getNonReferenceType()->getAs<AutoType>())
- return DAR_Failed;
-
- // Resolving a core issue: a braced-init-list containing any designators is
- // a non-deduced context.
- for (Expr *E : InitList->inits())
- if (isa<DesignatedInitExpr>(E))
- return DAR_Failed;
+ QualType DeducedType;
+ // If this is a 'decltype(auto)' specifier, do the decltype dance.
+ if (AT->isDecltypeAuto()) {
+ if (InitList) {
+ Diag(Init->getBeginLoc(), diag::err_decltype_auto_initializer_list);
+ return TDK_AlreadyDiagnosed;
+ }
- SourceRange DeducedFromInitRange;
- for (unsigned i = 0, e = InitList->getNumInits(); i < e; ++i) {
- Expr *Init = InitList->getInit(i);
+ DeducedType = getDecltypeForExpr(Init);
+ assert(!DeducedType.isNull());
+ } else {
+ LocalInstantiationScope InstScope(*this);
+
+ // Build template<class TemplParam> void Func(FuncParam);
+ SourceLocation Loc = Init->getExprLoc();
+ TemplateTypeParmDecl *TemplParam = TemplateTypeParmDecl::Create(
+ Context, nullptr, SourceLocation(), Loc, Info.getDeducedDepth(), 0,
+ nullptr, false, false, false);
+ QualType TemplArg = QualType(TemplParam->getTypeForDecl(), 0);
+ NamedDecl *TemplParamPtr = TemplParam;
+ FixedSizeTemplateParameterListStorage<1, false> TemplateParamsSt(
+ Context, Loc, Loc, TemplParamPtr, Loc, nullptr);
+
+ if (InitList) {
+ // Notionally, we substitute std::initializer_list<T> for 'auto' and
+ // deduce against that. Such deduction only succeeds if removing
+ // cv-qualifiers and references results in std::initializer_list<T>.
+ if (!Type.getType().getNonReferenceType()->getAs<AutoType>())
+ return TDK_Invalid;
+
+ SourceRange DeducedFromInitRange;
+ for (Expr *Init : InitList->inits()) {
+ // Resolving a core issue: a braced-init-list containing any designators
+ // is a non-deduced context.
+ if (isa<DesignatedInitExpr>(Init))
+ return TDK_Invalid;
+ if (auto TDK = DeduceTemplateArgumentsFromCallArgument(
+ *this, TemplateParamsSt.get(), 0, TemplArg, Init->getType(),
+ Init->Classify(getASTContext()), Init, Info, Deduced,
+ OriginalCallArgs, /*Decomposed=*/true,
+ /*ArgIdx=*/0, /*TDF=*/0)) {
+ if (TDK == TDK_Inconsistent) {
+ Diag(Info.getLocation(), diag::err_auto_inconsistent_deduction)
+ << Info.FirstArg << Info.SecondArg << DeducedFromInitRange
+ << Init->getSourceRange();
+ return DeductionFailed(TDK_AlreadyDiagnosed);
+ }
+ return DeductionFailed(TDK);
+ }
+ if (DeducedFromInitRange.isInvalid() &&
+ Deduced[0].getKind() != TemplateArgument::Null)
+ DeducedFromInitRange = Init->getSourceRange();
+ }
+ } else {
+ if (!getLangOpts().CPlusPlus && Init->refersToBitField()) {
+ Diag(Loc, diag::err_auto_bitfield);
+ return TDK_AlreadyDiagnosed;
+ }
+ QualType FuncParam =
+ SubstituteDeducedTypeTransform(*this, TemplArg).Apply(Type);
+ assert(!FuncParam.isNull() &&
+ "substituting template parameter for 'auto' failed");
if (auto TDK = DeduceTemplateArgumentsFromCallArgument(
- *this, TemplateParamsSt.get(), 0, TemplArg, Init,
- Info, Deduced, OriginalCallArgs, /*Decomposed*/ true,
- /*ArgIdx*/ 0, /*TDF*/ 0))
- return DeductionFailed(TDK, {DeducedFromInitRange,
- Init->getSourceRange()});
-
- if (DeducedFromInitRange.isInvalid() &&
- Deduced[0].getKind() != TemplateArgument::Null)
- DeducedFromInitRange = Init->getSourceRange();
+ *this, TemplateParamsSt.get(), 0, FuncParam, Init->getType(),
+ Init->Classify(getASTContext()), Init, Info, Deduced,
+ OriginalCallArgs, /*Decomposed=*/false, /*ArgIdx=*/0, /*TDF=*/0,
+ FailedTSC))
+ return DeductionFailed(TDK);
}
- } else {
- if (!getLangOpts().CPlusPlus && Init->refersToBitField()) {
- Diag(Loc, diag::err_auto_bitfield);
- return DAR_FailedAlreadyDiagnosed;
- }
-
- if (auto TDK = DeduceTemplateArgumentsFromCallArgument(
- *this, TemplateParamsSt.get(), 0, FuncParam, Init, Info, Deduced,
- OriginalCallArgs, /*Decomposed*/ false, /*ArgIdx*/ 0, /*TDF*/ 0))
- return DeductionFailed(TDK, {});
- }
- // Could be null if somehow 'auto' appears in a non-deduced context.
- if (Deduced[0].getKind() != TemplateArgument::Type)
- return DeductionFailed(TDK_Incomplete, {});
+ // Could be null if somehow 'auto' appears in a non-deduced context.
+ if (Deduced[0].getKind() != TemplateArgument::Type)
+ return DeductionFailed(TDK_Incomplete);
+ DeducedType = Deduced[0].getAsType();
- QualType DeducedType = Deduced[0].getAsType();
-
- if (InitList) {
- DeducedType = BuildStdInitializerList(DeducedType, Loc);
- if (DeducedType.isNull())
- return DAR_FailedAlreadyDiagnosed;
+ if (InitList) {
+ DeducedType = BuildStdInitializerList(DeducedType, Loc);
+ if (DeducedType.isNull())
+ return TDK_AlreadyDiagnosed;
+ }
}
- if (const auto *AT = Type.getType()->getAs<AutoType>()) {
- if (AT->isConstrained() && !IgnoreConstraints) {
- auto ConstraintsResult =
- CheckDeducedPlaceholderConstraints(*this, *AT,
- Type.getContainedAutoTypeLoc(),
- DeducedType);
- if (ConstraintsResult != DAR_Succeeded)
- return ConstraintsResult;
+ if (!Result.isNull()) {
+ if (!Context.hasSameType(DeducedType, Result)) {
+ Info.FirstArg = Result;
+ Info.SecondArg = DeducedType;
+ return DeductionFailed(TDK_Inconsistent);
}
+ DeducedType = Context.getCommonSugaredType(Result, DeducedType);
}
+ if (AT->isConstrained() && !IgnoreConstraints &&
+ CheckDeducedPlaceholderConstraints(
+ *this, *AT, Type.getContainedAutoTypeLoc(), DeducedType))
+ return TDK_AlreadyDiagnosed;
+
Result = SubstituteDeducedTypeTransform(*this, DeducedType).Apply(Type);
if (Result.isNull())
- return DAR_FailedAlreadyDiagnosed;
+ return TDK_AlreadyDiagnosed;
// Check that the deduced argument type is compatible with the original
// argument type per C++ [temp.deduct.call]p4.
@@ -4921,36 +5094,38 @@ Sema::DeduceAutoType(TypeLoc Type, Expr *&Init, QualType &Result,
if (auto TDK =
CheckOriginalCallArgDeduction(*this, Info, OriginalArg, DeducedA)) {
Result = QualType();
- return DeductionFailed(TDK, {});
+ return DeductionFailed(TDK);
}
}
- return DAR_Succeeded;
+ return TDK_Success;
}
QualType Sema::SubstAutoType(QualType TypeWithAuto,
QualType TypeToReplaceAuto) {
- if (TypeToReplaceAuto->isDependentType())
- return SubstituteDeducedTypeTransform(
- *this, DependentAuto{
- TypeToReplaceAuto->containsUnexpandedParameterPack()})
- .TransformType(TypeWithAuto);
+ assert(TypeToReplaceAuto != Context.DependentTy);
return SubstituteDeducedTypeTransform(*this, TypeToReplaceAuto)
.TransformType(TypeWithAuto);
}
TypeSourceInfo *Sema::SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType TypeToReplaceAuto) {
- if (TypeToReplaceAuto->isDependentType())
- return SubstituteDeducedTypeTransform(
- *this,
- DependentAuto{
- TypeToReplaceAuto->containsUnexpandedParameterPack()})
- .TransformType(TypeWithAuto);
+ assert(TypeToReplaceAuto != Context.DependentTy);
return SubstituteDeducedTypeTransform(*this, TypeToReplaceAuto)
.TransformType(TypeWithAuto);
}
+QualType Sema::SubstAutoTypeDependent(QualType TypeWithAuto) {
+ return SubstituteDeducedTypeTransform(*this, DependentAuto{false})
+ .TransformType(TypeWithAuto);
+}
+
+TypeSourceInfo *
+Sema::SubstAutoTypeSourceInfoDependent(TypeSourceInfo *TypeWithAuto) {
+ return SubstituteDeducedTypeTransform(*this, DependentAuto{false})
+ .TransformType(TypeWithAuto);
+}
+
QualType Sema::ReplaceAutoType(QualType TypeWithAuto,
QualType TypeToReplaceAuto) {
return SubstituteDeducedTypeTransform(*this, TypeToReplaceAuto,
@@ -5043,6 +5218,33 @@ bool Sema::DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
return StillUndeduced;
}
+bool Sema::CheckIfFunctionSpecializationIsImmediate(FunctionDecl *FD,
+ SourceLocation Loc) {
+ assert(FD->isImmediateEscalating());
+
+ if (isLambdaConversionOperator(FD)) {
+ CXXRecordDecl *Lambda = cast<CXXMethodDecl>(FD)->getParent();
+ FunctionDecl *CallOp = Lambda->getLambdaCallOperator();
+
+ // For a generic lambda, instantiate the call operator if needed.
+ if (auto *Args = FD->getTemplateSpecializationArgs()) {
+ CallOp = InstantiateFunctionDeclaration(
+ CallOp->getDescribedFunctionTemplate(), Args, Loc);
+ if (!CallOp || CallOp->isInvalidDecl())
+ return true;
+ runWithSufficientStackSpace(
+ Loc, [&] { InstantiateFunctionDefinition(Loc, CallOp); });
+ }
+ return CallOp->isInvalidDecl();
+ }
+
+ if (FD->getTemplateInstantiationPattern()) {
+ runWithSufficientStackSpace(
+ Loc, [&] { InstantiateFunctionDefinition(Loc, FD); });
+ }
+ return false;
+}
+
/// If this is a non-static member function,
static void
AddImplicitObjectParameterType(ASTContext &Context,
@@ -5055,6 +5257,8 @@ AddImplicitObjectParameterType(ASTContext &Context,
//
// The standard doesn't say explicitly, but we pick the appropriate kind of
// reference type based on [over.match.funcs]p4.
+ assert(Method && Method->isImplicitObjectMemberFunction() &&
+ "expected an implicit objet function");
QualType ArgTy = Context.getTypeDeclType(Method->getParent());
ArgTy = Context.getQualifiedType(ArgTy, Method->getMethodQualifiers());
if (Method->getRefQualifier() == RQ_RValue)
@@ -5116,14 +5320,17 @@ static bool isAtLeastAsSpecializedAs(Sema &S,
unsigned NumComparedArguments = NumCallArguments1;
- if (!Method2 && Method1 && !Method1->isStatic()) {
+ if (!Method2 && Method1 && Method1->isImplicitObjectMemberFunction()) {
// Compare 'this' from Method1 against first parameter from Method2.
AddImplicitObjectParameterType(S.Context, Method1, Args1);
++NumComparedArguments;
- } else if (!Method1 && Method2 && !Method2->isStatic()) {
+ } else if (!Method1 && Method2 &&
+ Method2->isImplicitObjectMemberFunction()) {
// Compare 'this' from Method2 against first parameter from Method1.
AddImplicitObjectParameterType(S.Context, Method2, Args2);
- } else if (Method1 && Method2 && Reversed) {
+ } else if (Method1 && Method2 && Reversed &&
+ Method1->isImplicitObjectMemberFunction() &&
+ Method2->isImplicitObjectMemberFunction()) {
// Compare 'this' from Method1 against second parameter from Method2
// and 'this' from Method2 against second parameter from Method1.
AddImplicitObjectParameterType(S.Context, Method1, Args1);
@@ -5145,6 +5352,7 @@ static bool isAtLeastAsSpecializedAs(Sema &S,
Args2.resize(NumComparedArguments);
if (Reversed)
std::reverse(Args2.begin(), Args2.end());
+
if (DeduceTemplateArguments(S, TemplateParams, Args2.data(), Args2.size(),
Args1.data(), Args1.size(), Info, Deduced,
TDF_None, /*PartialOrdering=*/true))
@@ -5226,27 +5434,6 @@ static bool isAtLeastAsSpecializedAs(Sema &S,
return true;
}
-/// Determine whether this a function template whose parameter-type-list
-/// ends with a function parameter pack.
-static bool isVariadicFunctionTemplate(FunctionTemplateDecl *FunTmpl) {
- FunctionDecl *Function = FunTmpl->getTemplatedDecl();
- unsigned NumParams = Function->getNumParams();
- if (NumParams == 0)
- return false;
-
- ParmVarDecl *Last = Function->getParamDecl(NumParams - 1);
- if (!Last->isParameterPack())
- return false;
-
- // Make sure that no previous parameter is a parameter pack.
- while (--NumParams > 0) {
- if (Function->getParamDecl(NumParams - 1)->isParameterPack())
- return false;
- }
-
- return true;
-}
-
/// Returns the more specialized function template according
/// to the rules of function template partial ordering (C++ [temp.func.order]).
///
@@ -5269,49 +5456,126 @@ static bool isVariadicFunctionTemplate(FunctionTemplateDecl *FunTmpl) {
///
/// \returns the more specialized function template. If neither
/// template is more specialized, returns NULL.
-FunctionTemplateDecl *
-Sema::getMoreSpecializedTemplate(FunctionTemplateDecl *FT1,
- FunctionTemplateDecl *FT2,
- SourceLocation Loc,
- TemplatePartialOrderingContext TPOC,
- unsigned NumCallArguments1,
- unsigned NumCallArguments2,
- bool Reversed) {
-
- auto JudgeByConstraints = [&] () -> FunctionTemplateDecl * {
- llvm::SmallVector<const Expr *, 3> AC1, AC2;
- FT1->getAssociatedConstraints(AC1);
- FT2->getAssociatedConstraints(AC2);
- bool AtLeastAsConstrained1, AtLeastAsConstrained2;
- if (IsAtLeastAsConstrained(FT1, AC1, FT2, AC2, AtLeastAsConstrained1))
- return nullptr;
- if (IsAtLeastAsConstrained(FT2, AC2, FT1, AC1, AtLeastAsConstrained2))
- return nullptr;
- if (AtLeastAsConstrained1 == AtLeastAsConstrained2)
- return nullptr;
- return AtLeastAsConstrained1 ? FT1 : FT2;
- };
+FunctionTemplateDecl *Sema::getMoreSpecializedTemplate(
+ FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc,
+ TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1,
+ unsigned NumCallArguments2, bool Reversed) {
bool Better1 = isAtLeastAsSpecializedAs(*this, Loc, FT1, FT2, TPOC,
NumCallArguments1, Reversed);
bool Better2 = isAtLeastAsSpecializedAs(*this, Loc, FT2, FT1, TPOC,
NumCallArguments2, Reversed);
+ // C++ [temp.deduct.partial]p10:
+ // F is more specialized than G if F is at least as specialized as G and G
+ // is not at least as specialized as F.
if (Better1 != Better2) // We have a clear winner
return Better1 ? FT1 : FT2;
if (!Better1 && !Better2) // Neither is better than the other
- return JudgeByConstraints();
+ return nullptr;
- // FIXME: This mimics what GCC implements, but doesn't match up with the
- // proposed resolution for core issue 692. This area needs to be sorted out,
- // but for now we attempt to maintain compatibility.
- bool Variadic1 = isVariadicFunctionTemplate(FT1);
- bool Variadic2 = isVariadicFunctionTemplate(FT2);
- if (Variadic1 != Variadic2)
- return Variadic1? FT2 : FT1;
+ // C++ [temp.deduct.partial]p11:
+ // ... and if G has a trailing function parameter pack for which F does not
+ // have a corresponding parameter, and if F does not have a trailing
+ // function parameter pack, then F is more specialized than G.
+ FunctionDecl *FD1 = FT1->getTemplatedDecl();
+ FunctionDecl *FD2 = FT2->getTemplatedDecl();
+ unsigned NumParams1 = FD1->getNumParams();
+ unsigned NumParams2 = FD2->getNumParams();
+ bool Variadic1 = NumParams1 && FD1->parameters().back()->isParameterPack();
+ bool Variadic2 = NumParams2 && FD2->parameters().back()->isParameterPack();
+ if (Variadic1 != Variadic2) {
+ if (Variadic1 && NumParams1 > NumParams2)
+ return FT2;
+ if (Variadic2 && NumParams2 > NumParams1)
+ return FT1;
+ }
- return JudgeByConstraints();
+ // This a speculative fix for CWG1432 (Similar to the fix for CWG1395) that
+ // there is no wording or even resolution for this issue.
+ for (int i = 0, e = std::min(NumParams1, NumParams2); i < e; ++i) {
+ QualType T1 = FD1->getParamDecl(i)->getType().getCanonicalType();
+ QualType T2 = FD2->getParamDecl(i)->getType().getCanonicalType();
+ auto *TST1 = dyn_cast<TemplateSpecializationType>(T1);
+ auto *TST2 = dyn_cast<TemplateSpecializationType>(T2);
+ if (!TST1 || !TST2)
+ continue;
+ const TemplateArgument &TA1 = TST1->template_arguments().back();
+ if (TA1.getKind() == TemplateArgument::Pack) {
+ assert(TST1->template_arguments().size() ==
+ TST2->template_arguments().size());
+ const TemplateArgument &TA2 = TST2->template_arguments().back();
+ assert(TA2.getKind() == TemplateArgument::Pack);
+ unsigned PackSize1 = TA1.pack_size();
+ unsigned PackSize2 = TA2.pack_size();
+ bool IsPackExpansion1 =
+ PackSize1 && TA1.pack_elements().back().isPackExpansion();
+ bool IsPackExpansion2 =
+ PackSize2 && TA2.pack_elements().back().isPackExpansion();
+ if (PackSize1 != PackSize2 && IsPackExpansion1 != IsPackExpansion2) {
+ if (PackSize1 > PackSize2 && IsPackExpansion1)
+ return FT2;
+ if (PackSize1 < PackSize2 && IsPackExpansion2)
+ return FT1;
+ }
+ }
+ }
+
+ if (!Context.getLangOpts().CPlusPlus20)
+ return nullptr;
+
+ // Match GCC on not implementing [temp.func.order]p6.2.1.
+
+ // C++20 [temp.func.order]p6:
+ // If deduction against the other template succeeds for both transformed
+ // templates, constraints can be considered as follows:
+
+ // C++20 [temp.func.order]p6.1:
+ // If their template-parameter-lists (possibly including template-parameters
+ // invented for an abbreviated function template ([dcl.fct])) or function
+ // parameter lists differ in length, neither template is more specialized
+ // than the other.
+ TemplateParameterList *TPL1 = FT1->getTemplateParameters();
+ TemplateParameterList *TPL2 = FT2->getTemplateParameters();
+ if (TPL1->size() != TPL2->size() || NumParams1 != NumParams2)
+ return nullptr;
+
+ // C++20 [temp.func.order]p6.2.2:
+ // Otherwise, if the corresponding template-parameters of the
+ // template-parameter-lists are not equivalent ([temp.over.link]) or if the
+ // function parameters that positionally correspond between the two
+ // templates are not of the same type, neither template is more specialized
+ // than the other.
+ if (!TemplateParameterListsAreEqual(TPL1, TPL2, false,
+ Sema::TPL_TemplateParamsEquivalent))
+ return nullptr;
+
+ for (unsigned i = 0; i < NumParams1; ++i)
+ if (!Context.hasSameType(FD1->getParamDecl(i)->getType(),
+ FD2->getParamDecl(i)->getType()))
+ return nullptr;
+
+ // C++20 [temp.func.order]p6.3:
+ // Otherwise, if the context in which the partial ordering is done is
+ // that of a call to a conversion function and the return types of the
+ // templates are not the same, then neither template is more specialized
+ // than the other.
+ if (TPOC == TPOC_Conversion &&
+ !Context.hasSameType(FD1->getReturnType(), FD2->getReturnType()))
+ return nullptr;
+
+ llvm::SmallVector<const Expr *, 3> AC1, AC2;
+ FT1->getAssociatedConstraints(AC1);
+ FT2->getAssociatedConstraints(AC2);
+ bool AtLeastAsConstrained1, AtLeastAsConstrained2;
+ if (IsAtLeastAsConstrained(FT1, AC1, FT2, AC2, AtLeastAsConstrained1))
+ return nullptr;
+ if (IsAtLeastAsConstrained(FT2, AC2, FT1, AC1, AtLeastAsConstrained2))
+ return nullptr;
+ if (AtLeastAsConstrained1 == AtLeastAsConstrained2)
+ return nullptr;
+ return AtLeastAsConstrained1 ? FT1 : FT2;
}
/// Determine if the two templates are equivalent.
@@ -5476,7 +5740,7 @@ static bool isAtLeastAsSpecializedAs(Sema &S, QualType T1, QualType T2,
if (Inst.isInvalid())
return false;
- auto *TST1 = T1->castAs<TemplateSpecializationType>();
+ const auto *TST1 = cast<TemplateSpecializationType>(T1);
bool AtLeastAsSpecialized;
S.runWithSufficientStackSpace(Info.getLocation(), [&] {
AtLeastAsSpecialized = !FinishTemplateArgumentDeduction(
@@ -5488,6 +5752,180 @@ static bool isAtLeastAsSpecializedAs(Sema &S, QualType T1, QualType T2,
return AtLeastAsSpecialized;
}
+namespace {
+// A dummy class to return nullptr instead of P2 when performing "more
+// specialized than primary" check.
+struct GetP2 {
+ template <typename T1, typename T2,
+ std::enable_if_t<std::is_same_v<T1, T2>, bool> = true>
+ T2 *operator()(T1 *, T2 *P2) {
+ return P2;
+ }
+ template <typename T1, typename T2,
+ std::enable_if_t<!std::is_same_v<T1, T2>, bool> = true>
+ T1 *operator()(T1 *, T2 *) {
+ return nullptr;
+ }
+};
+
+// The assumption is that two template argument lists have the same size.
+struct TemplateArgumentListAreEqual {
+ ASTContext &Ctx;
+ TemplateArgumentListAreEqual(ASTContext &Ctx) : Ctx(Ctx) {}
+
+ template <typename T1, typename T2,
+ std::enable_if_t<std::is_same_v<T1, T2>, bool> = true>
+ bool operator()(T1 *PS1, T2 *PS2) {
+ ArrayRef<TemplateArgument> Args1 = PS1->getTemplateArgs().asArray(),
+ Args2 = PS2->getTemplateArgs().asArray();
+
+ for (unsigned I = 0, E = Args1.size(); I < E; ++I) {
+ // We use profile, instead of structural comparison of the arguments,
+ // because canonicalization can't do the right thing for dependent
+ // expressions.
+ llvm::FoldingSetNodeID IDA, IDB;
+ Args1[I].Profile(IDA, Ctx);
+ Args2[I].Profile(IDB, Ctx);
+ if (IDA != IDB)
+ return false;
+ }
+ return true;
+ }
+
+ template <typename T1, typename T2,
+ std::enable_if_t<!std::is_same_v<T1, T2>, bool> = true>
+ bool operator()(T1 *Spec, T2 *Primary) {
+ ArrayRef<TemplateArgument> Args1 = Spec->getTemplateArgs().asArray(),
+ Args2 = Primary->getInjectedTemplateArgs();
+
+ for (unsigned I = 0, E = Args1.size(); I < E; ++I) {
+ // We use profile, instead of structural comparison of the arguments,
+ // because canonicalization can't do the right thing for dependent
+ // expressions.
+ llvm::FoldingSetNodeID IDA, IDB;
+ Args1[I].Profile(IDA, Ctx);
+ // Unlike the specialization arguments, the injected arguments are not
+ // always canonical.
+ Ctx.getCanonicalTemplateArgument(Args2[I]).Profile(IDB, Ctx);
+ if (IDA != IDB)
+ return false;
+ }
+ return true;
+ }
+};
+} // namespace
+
+/// Returns the more specialized template specialization between T1/P1 and
+/// T2/P2.
+/// - If IsMoreSpecialThanPrimaryCheck is true, T1/P1 is the partial
+/// specialization and T2/P2 is the primary template.
+/// - otherwise, both T1/P1 and T2/P2 are the partial specialization.
+///
+/// \param T1 the type of the first template partial specialization
+///
+/// \param T2 if IsMoreSpecialThanPrimaryCheck is true, the type of the second
+/// template partial specialization; otherwise, the type of the
+/// primary template.
+///
+/// \param P1 the first template partial specialization
+///
+/// \param P2 if IsMoreSpecialThanPrimaryCheck is true, the second template
+/// partial specialization; otherwise, the primary template.
+///
+/// \returns - If IsMoreSpecialThanPrimaryCheck is true, returns P1 if P1 is
+/// more specialized, returns nullptr if P1 is not more specialized.
+/// - otherwise, returns the more specialized template partial
+/// specialization. If neither partial specialization is more
+/// specialized, returns NULL.
+template <typename TemplateLikeDecl, typename PrimaryDel>
+static TemplateLikeDecl *
+getMoreSpecialized(Sema &S, QualType T1, QualType T2, TemplateLikeDecl *P1,
+ PrimaryDel *P2, TemplateDeductionInfo &Info) {
+ constexpr bool IsMoreSpecialThanPrimaryCheck =
+ !std::is_same_v<TemplateLikeDecl, PrimaryDel>;
+
+ bool Better1 = isAtLeastAsSpecializedAs(S, T1, T2, P2, Info);
+ if (IsMoreSpecialThanPrimaryCheck && !Better1)
+ return nullptr;
+
+ bool Better2 = isAtLeastAsSpecializedAs(S, T2, T1, P1, Info);
+ if (IsMoreSpecialThanPrimaryCheck && !Better2)
+ return P1;
+
+ // C++ [temp.deduct.partial]p10:
+ // F is more specialized than G if F is at least as specialized as G and G
+ // is not at least as specialized as F.
+ if (Better1 != Better2) // We have a clear winner
+ return Better1 ? P1 : GetP2()(P1, P2);
+
+ if (!Better1 && !Better2)
+ return nullptr;
+
+ // This a speculative fix for CWG1432 (Similar to the fix for CWG1395) that
+ // there is no wording or even resolution for this issue.
+ auto *TST1 = cast<TemplateSpecializationType>(T1);
+ auto *TST2 = cast<TemplateSpecializationType>(T2);
+ const TemplateArgument &TA1 = TST1->template_arguments().back();
+ if (TA1.getKind() == TemplateArgument::Pack) {
+ assert(TST1->template_arguments().size() ==
+ TST2->template_arguments().size());
+ const TemplateArgument &TA2 = TST2->template_arguments().back();
+ assert(TA2.getKind() == TemplateArgument::Pack);
+ unsigned PackSize1 = TA1.pack_size();
+ unsigned PackSize2 = TA2.pack_size();
+ bool IsPackExpansion1 =
+ PackSize1 && TA1.pack_elements().back().isPackExpansion();
+ bool IsPackExpansion2 =
+ PackSize2 && TA2.pack_elements().back().isPackExpansion();
+ if (PackSize1 != PackSize2 && IsPackExpansion1 != IsPackExpansion2) {
+ if (PackSize1 > PackSize2 && IsPackExpansion1)
+ return GetP2()(P1, P2);
+ if (PackSize1 < PackSize2 && IsPackExpansion2)
+ return P1;
+ }
+ }
+
+ if (!S.Context.getLangOpts().CPlusPlus20)
+ return nullptr;
+
+ // Match GCC on not implementing [temp.func.order]p6.2.1.
+
+ // C++20 [temp.func.order]p6:
+ // If deduction against the other template succeeds for both transformed
+ // templates, constraints can be considered as follows:
+
+ TemplateParameterList *TPL1 = P1->getTemplateParameters();
+ TemplateParameterList *TPL2 = P2->getTemplateParameters();
+ if (TPL1->size() != TPL2->size())
+ return nullptr;
+
+ // C++20 [temp.func.order]p6.2.2:
+ // Otherwise, if the corresponding template-parameters of the
+ // template-parameter-lists are not equivalent ([temp.over.link]) or if the
+ // function parameters that positionally correspond between the two
+ // templates are not of the same type, neither template is more specialized
+ // than the other.
+ if (!S.TemplateParameterListsAreEqual(TPL1, TPL2, false,
+ Sema::TPL_TemplateParamsEquivalent))
+ return nullptr;
+
+ if (!TemplateArgumentListAreEqual(S.getASTContext())(P1, P2))
+ return nullptr;
+
+ llvm::SmallVector<const Expr *, 3> AC1, AC2;
+ P1->getAssociatedConstraints(AC1);
+ P2->getAssociatedConstraints(AC2);
+ bool AtLeastAsConstrained1, AtLeastAsConstrained2;
+ if (S.IsAtLeastAsConstrained(P1, AC1, P2, AC2, AtLeastAsConstrained1) ||
+ (IsMoreSpecialThanPrimaryCheck && !AtLeastAsConstrained1))
+ return nullptr;
+ if (S.IsAtLeastAsConstrained(P2, AC2, P1, AC1, AtLeastAsConstrained2))
+ return nullptr;
+ if (AtLeastAsConstrained1 == AtLeastAsConstrained2)
+ return nullptr;
+ return AtLeastAsConstrained1 ? P1 : GetP2()(P1, P2);
+}
+
/// Returns the more specialized class template partial specialization
/// according to the rules of partial ordering of class template partial
/// specializations (C++ [temp.class.order]).
@@ -5507,26 +5945,7 @@ Sema::getMoreSpecializedPartialSpecialization(
QualType PT2 = PS2->getInjectedSpecializationType();
TemplateDeductionInfo Info(Loc);
- bool Better1 = isAtLeastAsSpecializedAs(*this, PT1, PT2, PS2, Info);
- bool Better2 = isAtLeastAsSpecializedAs(*this, PT2, PT1, PS1, Info);
-
- if (!Better1 && !Better2)
- return nullptr;
- if (Better1 && Better2) {
- llvm::SmallVector<const Expr *, 3> AC1, AC2;
- PS1->getAssociatedConstraints(AC1);
- PS2->getAssociatedConstraints(AC2);
- bool AtLeastAsConstrained1, AtLeastAsConstrained2;
- if (IsAtLeastAsConstrained(PS1, AC1, PS2, AC2, AtLeastAsConstrained1))
- return nullptr;
- if (IsAtLeastAsConstrained(PS2, AC2, PS1, AC1, AtLeastAsConstrained2))
- return nullptr;
- if (AtLeastAsConstrained1 == AtLeastAsConstrained2)
- return nullptr;
- return AtLeastAsConstrained1 ? PS1 : PS2;
- }
-
- return Better1 ? PS1 : PS2;
+ return getMoreSpecialized(*this, PT1, PT2, PS1, PS2, Info);
}
bool Sema::isMoreSpecializedThanPrimary(
@@ -5534,24 +5953,12 @@ bool Sema::isMoreSpecializedThanPrimary(
ClassTemplateDecl *Primary = Spec->getSpecializedTemplate();
QualType PrimaryT = Primary->getInjectedClassNameSpecialization();
QualType PartialT = Spec->getInjectedSpecializationType();
- if (!isAtLeastAsSpecializedAs(*this, PartialT, PrimaryT, Primary, Info))
- return false;
- if (!isAtLeastAsSpecializedAs(*this, PrimaryT, PartialT, Spec, Info))
- return true;
- Info.clearSFINAEDiagnostic();
- llvm::SmallVector<const Expr *, 3> PrimaryAC, SpecAC;
- Primary->getAssociatedConstraints(PrimaryAC);
- Spec->getAssociatedConstraints(SpecAC);
- bool AtLeastAsConstrainedPrimary, AtLeastAsConstrainedSpec;
- if (IsAtLeastAsConstrained(Spec, SpecAC, Primary, PrimaryAC,
- AtLeastAsConstrainedSpec))
- return false;
- if (!AtLeastAsConstrainedSpec)
- return false;
- if (IsAtLeastAsConstrained(Primary, PrimaryAC, Spec, SpecAC,
- AtLeastAsConstrainedPrimary))
- return false;
- return !AtLeastAsConstrainedPrimary;
+
+ ClassTemplatePartialSpecializationDecl *MaybeSpec =
+ getMoreSpecialized(*this, PartialT, PrimaryT, Spec, Primary, Info);
+ if (MaybeSpec)
+ Info.clearSFINAEDiagnostic();
+ return MaybeSpec;
}
VarTemplatePartialSpecializationDecl *
@@ -5571,62 +5978,24 @@ Sema::getMoreSpecializedPartialSpecialization(
CanonTemplate, PS2->getTemplateArgs().asArray());
TemplateDeductionInfo Info(Loc);
- bool Better1 = isAtLeastAsSpecializedAs(*this, PT1, PT2, PS2, Info);
- bool Better2 = isAtLeastAsSpecializedAs(*this, PT2, PT1, PS1, Info);
-
- if (!Better1 && !Better2)
- return nullptr;
- if (Better1 && Better2) {
- llvm::SmallVector<const Expr *, 3> AC1, AC2;
- PS1->getAssociatedConstraints(AC1);
- PS2->getAssociatedConstraints(AC2);
- bool AtLeastAsConstrained1, AtLeastAsConstrained2;
- if (IsAtLeastAsConstrained(PS1, AC1, PS2, AC2, AtLeastAsConstrained1))
- return nullptr;
- if (IsAtLeastAsConstrained(PS2, AC2, PS1, AC1, AtLeastAsConstrained2))
- return nullptr;
- if (AtLeastAsConstrained1 == AtLeastAsConstrained2)
- return nullptr;
- return AtLeastAsConstrained1 ? PS1 : PS2;
- }
-
- return Better1 ? PS1 : PS2;
+ return getMoreSpecialized(*this, PT1, PT2, PS1, PS2, Info);
}
bool Sema::isMoreSpecializedThanPrimary(
VarTemplatePartialSpecializationDecl *Spec, TemplateDeductionInfo &Info) {
- TemplateDecl *Primary = Spec->getSpecializedTemplate();
- // FIXME: Cache the injected template arguments rather than recomputing
- // them for each partial specialization.
- SmallVector<TemplateArgument, 8> PrimaryArgs;
- Context.getInjectedTemplateArgs(Primary->getTemplateParameters(),
- PrimaryArgs);
-
+ VarTemplateDecl *Primary = Spec->getSpecializedTemplate();
TemplateName CanonTemplate =
Context.getCanonicalTemplateName(TemplateName(Primary));
QualType PrimaryT = Context.getTemplateSpecializationType(
- CanonTemplate, PrimaryArgs);
+ CanonTemplate, Primary->getInjectedTemplateArgs());
QualType PartialT = Context.getTemplateSpecializationType(
CanonTemplate, Spec->getTemplateArgs().asArray());
- if (!isAtLeastAsSpecializedAs(*this, PartialT, PrimaryT, Primary, Info))
- return false;
- if (!isAtLeastAsSpecializedAs(*this, PrimaryT, PartialT, Spec, Info))
- return true;
- Info.clearSFINAEDiagnostic();
- llvm::SmallVector<const Expr *, 3> PrimaryAC, SpecAC;
- Primary->getAssociatedConstraints(PrimaryAC);
- Spec->getAssociatedConstraints(SpecAC);
- bool AtLeastAsConstrainedPrimary, AtLeastAsConstrainedSpec;
- if (IsAtLeastAsConstrained(Spec, SpecAC, Primary, PrimaryAC,
- AtLeastAsConstrainedSpec))
- return false;
- if (!AtLeastAsConstrainedSpec)
- return false;
- if (IsAtLeastAsConstrained(Primary, PrimaryAC, Spec, SpecAC,
- AtLeastAsConstrainedPrimary))
- return false;
- return !AtLeastAsConstrainedPrimary;
+ VarTemplatePartialSpecializationDecl *MaybeSpec =
+ getMoreSpecialized(*this, PartialT, PrimaryT, Spec, Primary, Info);
+ if (MaybeSpec)
+ Info.clearSFINAEDiagnostic();
+ return MaybeSpec;
}
bool Sema::isTemplateTemplateParameterAtLeastAsSpecializedAs(
@@ -5678,13 +6047,15 @@ bool Sema::isTemplateTemplateParameterAtLeastAsSpecializedAs(
// C++1z [temp.arg.template]p3:
// If the rewrite produces an invalid type, then P is not at least as
// specialized as A.
- if (CheckTemplateArgumentList(AArg, Loc, PArgList, false, PArgs) ||
+ SmallVector<TemplateArgument, 4> SugaredPArgs;
+ if (CheckTemplateArgumentList(AArg, Loc, PArgList, false, SugaredPArgs,
+ PArgs) ||
Trap.hasErrorOccurred())
return false;
}
- QualType AType = Context.getTemplateSpecializationType(X, AArgs);
- QualType PType = Context.getTemplateSpecializationType(X, PArgs);
+ QualType AType = Context.getCanonicalTemplateSpecializationType(X, AArgs);
+ QualType PType = Context.getCanonicalTemplateSpecializationType(X, PArgs);
// ... the function template corresponding to P is at least as specialized
// as the function template corresponding to A according to the partial
@@ -5710,8 +6081,8 @@ struct MarkUsedTemplateParameterVisitor :
}
bool TraverseTemplateName(TemplateName Template) {
- if (auto *TTP =
- dyn_cast<TemplateTemplateParmDecl>(Template.getAsTemplateDecl()))
+ if (auto *TTP = llvm::dyn_cast_or_null<TemplateTemplateParmDecl>(
+ Template.getAsTemplateDecl()))
if (TTP->getDepth() == Depth)
Used[TTP->getIndex()] = true;
RecursiveASTVisitor<MarkUsedTemplateParameterVisitor>::
@@ -5856,7 +6227,7 @@ MarkUsedTemplateParameters(ASTContext &Ctx, QualType T,
cast<DependentSizedArrayType>(T)->getSizeExpr(),
OnlyDeduced, Depth, Used);
// Fall through to check the element type
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Type::ConstantArray:
case Type::IncompleteArray:
@@ -5956,9 +6327,8 @@ MarkUsedTemplateParameters(ASTContext &Ctx, QualType T,
case Type::SubstTemplateTypeParmPack: {
const SubstTemplateTypeParmPackType *Subst
= cast<SubstTemplateTypeParmPackType>(T);
- MarkUsedTemplateParameters(Ctx,
- QualType(Subst->getReplacedParameter(), 0),
- OnlyDeduced, Depth, Used);
+ if (Subst->getReplacedParameter()->getDepth() == Depth)
+ Used[Subst->getIndex()] = true;
MarkUsedTemplateParameters(Ctx, Subst->getArgumentPack(),
OnlyDeduced, Depth, Used);
break;
@@ -5966,7 +6336,7 @@ MarkUsedTemplateParameters(ASTContext &Ctx, QualType T,
case Type::InjectedClassName:
T = cast<InjectedClassNameType>(T)->getInjectedSpecializationType();
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Type::TemplateSpecialization: {
const TemplateSpecializationType *Spec
@@ -5982,9 +6352,8 @@ MarkUsedTemplateParameters(ASTContext &Ctx, QualType T,
hasPackExpansionBeforeEnd(Spec->template_arguments()))
break;
- for (unsigned I = 0, N = Spec->getNumArgs(); I != N; ++I)
- MarkUsedTemplateParameters(Ctx, Spec->getArg(I), OnlyDeduced, Depth,
- Used);
+ for (const auto &Arg : Spec->template_arguments())
+ MarkUsedTemplateParameters(Ctx, Arg, OnlyDeduced, Depth, Used);
break;
}
@@ -6028,16 +6397,14 @@ MarkUsedTemplateParameters(ASTContext &Ctx, QualType T,
MarkUsedTemplateParameters(Ctx, Spec->getQualifier(),
OnlyDeduced, Depth, Used);
- for (unsigned I = 0, N = Spec->getNumArgs(); I != N; ++I)
- MarkUsedTemplateParameters(Ctx, Spec->getArg(I), OnlyDeduced, Depth,
- Used);
+ for (const auto &Arg : Spec->template_arguments())
+ MarkUsedTemplateParameters(Ctx, Arg, OnlyDeduced, Depth, Used);
break;
}
case Type::TypeOf:
if (!OnlyDeduced)
- MarkUsedTemplateParameters(Ctx,
- cast<TypeOfType>(T)->getUnderlyingType(),
+ MarkUsedTemplateParameters(Ctx, cast<TypeOfType>(T)->getUnmodifiedType(),
OnlyDeduced, Depth, Used);
break;
@@ -6074,9 +6441,9 @@ MarkUsedTemplateParameters(ASTContext &Ctx, QualType T,
cast<DeducedType>(T)->getDeducedType(),
OnlyDeduced, Depth, Used);
break;
- case Type::DependentExtInt:
+ case Type::DependentBitInt:
MarkUsedTemplateParameters(Ctx,
- cast<DependentExtIntType>(T)->getNumBitsExpr(),
+ cast<DependentBitIntType>(T)->getNumBitsExpr(),
OnlyDeduced, Depth, Used);
break;
@@ -6091,7 +6458,7 @@ MarkUsedTemplateParameters(ASTContext &Ctx, QualType T,
case Type::ObjCObjectPointer:
case Type::UnresolvedUsing:
case Type::Pipe:
- case Type::ExtInt:
+ case Type::BitInt:
#define TYPE(Class, Base)
#define ABSTRACT_TYPE(Class, Base)
#define DEPENDENT_TYPE(Class, Base)
@@ -6113,11 +6480,8 @@ MarkUsedTemplateParameters(ASTContext &Ctx,
case TemplateArgument::Null:
case TemplateArgument::Integral:
case TemplateArgument::Declaration:
- break;
-
case TemplateArgument::NullPtr:
- MarkUsedTemplateParameters(Ctx, TemplateArg.getNullPtrType(), OnlyDeduced,
- Depth, Used);
+ case TemplateArgument::StructuralValue:
break;
case TemplateArgument::Type:
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiate.cpp b/contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiate.cpp
index 74889aa3ca88..e12186d7d82f 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiate.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiate.cpp
@@ -10,26 +10,35 @@
//===----------------------------------------------------------------------===/
#include "TreeTransform.h"
+#include "clang/AST/ASTConcept.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTLambda.h"
#include "clang/AST/ASTMutationListener.h"
+#include "clang/AST/DeclBase.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
+#include "clang/AST/ExprConcepts.h"
#include "clang/AST/PrettyDeclStackTrace.h"
+#include "clang/AST/Type.h"
#include "clang/AST/TypeVisitor.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/Stack.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
+#include "clang/Sema/Sema.h"
#include "clang/Sema/SemaConcept.h"
#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/Template.h"
#include "clang/Sema/TemplateDeduction.h"
#include "clang/Sema/TemplateInstCallback.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TimeProfiler.h"
+#include <optional>
using namespace clang;
using namespace sema;
@@ -38,13 +47,279 @@ using namespace sema;
// Template Instantiation Support
//===----------------------------------------------------------------------===/
+namespace {
+namespace TemplateInstArgsHelpers {
+struct Response {
+ const Decl *NextDecl = nullptr;
+ bool IsDone = false;
+ bool ClearRelativeToPrimary = true;
+ static Response Done() {
+ Response R;
+ R.IsDone = true;
+ return R;
+ }
+ static Response ChangeDecl(const Decl *ND) {
+ Response R;
+ R.NextDecl = ND;
+ return R;
+ }
+ static Response ChangeDecl(const DeclContext *Ctx) {
+ Response R;
+ R.NextDecl = Decl::castFromDeclContext(Ctx);
+ return R;
+ }
+
+ static Response UseNextDecl(const Decl *CurDecl) {
+ return ChangeDecl(CurDecl->getDeclContext());
+ }
+
+ static Response DontClearRelativeToPrimaryNextDecl(const Decl *CurDecl) {
+ Response R = Response::UseNextDecl(CurDecl);
+ R.ClearRelativeToPrimary = false;
+ return R;
+ }
+};
+// Add template arguments from a variable template instantiation.
+Response
+HandleVarTemplateSpec(const VarTemplateSpecializationDecl *VarTemplSpec,
+ MultiLevelTemplateArgumentList &Result,
+ bool SkipForSpecialization) {
+ // For a class-scope explicit specialization, there are no template arguments
+ // at this level, but there may be enclosing template arguments.
+ if (VarTemplSpec->isClassScopeExplicitSpecialization())
+ return Response::DontClearRelativeToPrimaryNextDecl(VarTemplSpec);
+
+ // We're done when we hit an explicit specialization.
+ if (VarTemplSpec->getSpecializationKind() == TSK_ExplicitSpecialization &&
+ !isa<VarTemplatePartialSpecializationDecl>(VarTemplSpec))
+ return Response::Done();
+
+ // If this variable template specialization was instantiated from a
+ // specialized member that is a variable template, we're done.
+ assert(VarTemplSpec->getSpecializedTemplate() && "No variable template?");
+ llvm::PointerUnion<VarTemplateDecl *, VarTemplatePartialSpecializationDecl *>
+ Specialized = VarTemplSpec->getSpecializedTemplateOrPartial();
+ if (VarTemplatePartialSpecializationDecl *Partial =
+ Specialized.dyn_cast<VarTemplatePartialSpecializationDecl *>()) {
+ if (!SkipForSpecialization)
+ Result.addOuterTemplateArguments(
+ Partial, VarTemplSpec->getTemplateInstantiationArgs().asArray(),
+ /*Final=*/false);
+ if (Partial->isMemberSpecialization())
+ return Response::Done();
+ } else {
+ VarTemplateDecl *Tmpl = Specialized.get<VarTemplateDecl *>();
+ if (!SkipForSpecialization)
+ Result.addOuterTemplateArguments(
+ Tmpl, VarTemplSpec->getTemplateInstantiationArgs().asArray(),
+ /*Final=*/false);
+ if (Tmpl->isMemberSpecialization())
+ return Response::Done();
+ }
+ return Response::DontClearRelativeToPrimaryNextDecl(VarTemplSpec);
+}
+
+// If we have a template template parameter with translation unit context,
+// then we're performing substitution into a default template argument of
+// this template template parameter before we've constructed the template
+// that will own this template template parameter. In this case, we
+// use empty template parameter lists for all of the outer templates
+// to avoid performing any substitutions.
+Response
+HandleDefaultTempArgIntoTempTempParam(const TemplateTemplateParmDecl *TTP,
+ MultiLevelTemplateArgumentList &Result) {
+ for (unsigned I = 0, N = TTP->getDepth() + 1; I != N; ++I)
+ Result.addOuterTemplateArguments(std::nullopt);
+ return Response::Done();
+}
+
+Response HandlePartialClassTemplateSpec(
+ const ClassTemplatePartialSpecializationDecl *PartialClassTemplSpec,
+ MultiLevelTemplateArgumentList &Result, bool SkipForSpecialization) {
+ if (!SkipForSpecialization)
+ Result.addOuterRetainedLevels(PartialClassTemplSpec->getTemplateDepth());
+ return Response::Done();
+}
+
+// Add template arguments from a class template instantiation.
+Response
+HandleClassTemplateSpec(const ClassTemplateSpecializationDecl *ClassTemplSpec,
+ MultiLevelTemplateArgumentList &Result,
+ bool SkipForSpecialization) {
+ if (!ClassTemplSpec->isClassScopeExplicitSpecialization()) {
+ // We're done when we hit an explicit specialization.
+ if (ClassTemplSpec->getSpecializationKind() == TSK_ExplicitSpecialization &&
+ !isa<ClassTemplatePartialSpecializationDecl>(ClassTemplSpec))
+ return Response::Done();
+
+ if (!SkipForSpecialization)
+ Result.addOuterTemplateArguments(
+ const_cast<ClassTemplateSpecializationDecl *>(ClassTemplSpec),
+ ClassTemplSpec->getTemplateInstantiationArgs().asArray(),
+ /*Final=*/false);
+
+ // If this class template specialization was instantiated from a
+ // specialized member that is a class template, we're done.
+ assert(ClassTemplSpec->getSpecializedTemplate() && "No class template?");
+ if (ClassTemplSpec->getSpecializedTemplate()->isMemberSpecialization())
+ return Response::Done();
+
+ // If this was instantiated from a partial template specialization, we need
+ // to get the next level of declaration context from the partial
+ // specialization, as the ClassTemplateSpecializationDecl's
+ // DeclContext/LexicalDeclContext will be for the primary template.
+ if (auto *InstFromPartialTempl = ClassTemplSpec->getSpecializedTemplateOrPartial()
+ .dyn_cast<ClassTemplatePartialSpecializationDecl *>())
+ return Response::ChangeDecl(InstFromPartialTempl->getLexicalDeclContext());
+ }
+ return Response::UseNextDecl(ClassTemplSpec);
+}
+
+Response HandleFunction(const FunctionDecl *Function,
+ MultiLevelTemplateArgumentList &Result,
+ const FunctionDecl *Pattern, bool RelativeToPrimary,
+ bool ForConstraintInstantiation) {
+ // Add template arguments from a function template specialization.
+ if (!RelativeToPrimary &&
+ Function->getTemplateSpecializationKindForInstantiation() ==
+ TSK_ExplicitSpecialization)
+ return Response::Done();
+
+ if (!RelativeToPrimary &&
+ Function->getTemplateSpecializationKind() == TSK_ExplicitSpecialization) {
+ // This is an implicit instantiation of an explicit specialization. We
+ // don't get any template arguments from this function but might get
+ // some from an enclosing template.
+ return Response::UseNextDecl(Function);
+ } else if (const TemplateArgumentList *TemplateArgs =
+ Function->getTemplateSpecializationArgs()) {
+ // Add the template arguments for this specialization.
+ Result.addOuterTemplateArguments(const_cast<FunctionDecl *>(Function),
+ TemplateArgs->asArray(),
+ /*Final=*/false);
+
+ // If this function was instantiated from a specialized member that is
+ // a function template, we're done.
+ assert(Function->getPrimaryTemplate() && "No function template?");
+ if (Function->getPrimaryTemplate()->isMemberSpecialization())
+ return Response::Done();
+
+ // If this function is a generic lambda specialization, we are done.
+ if (!ForConstraintInstantiation &&
+ isGenericLambdaCallOperatorOrStaticInvokerSpecialization(Function))
+ return Response::Done();
+
+ } else if (Function->getDescribedFunctionTemplate()) {
+ assert(
+ (ForConstraintInstantiation || Result.getNumSubstitutedLevels() == 0) &&
+ "Outer template not instantiated?");
+ }
+ // If this is a friend or local declaration and it declares an entity at
+ // namespace scope, take arguments from its lexical parent
+ // instead of its semantic parent, unless of course the pattern we're
+ // instantiating actually comes from the file's context!
+ if ((Function->getFriendObjectKind() || Function->isLocalExternDecl()) &&
+ Function->getNonTransparentDeclContext()->isFileContext() &&
+ (!Pattern || !Pattern->getLexicalDeclContext()->isFileContext())) {
+ return Response::ChangeDecl(Function->getLexicalDeclContext());
+ }
+
+ if (ForConstraintInstantiation && Function->getFriendObjectKind())
+ return Response::ChangeDecl(Function->getLexicalDeclContext());
+ return Response::UseNextDecl(Function);
+}
+
+Response HandleFunctionTemplateDecl(const FunctionTemplateDecl *FTD,
+ MultiLevelTemplateArgumentList &Result) {
+ if (!isa<ClassTemplateSpecializationDecl>(FTD->getDeclContext())) {
+ Result.addOuterTemplateArguments(
+ const_cast<FunctionTemplateDecl *>(FTD),
+ const_cast<FunctionTemplateDecl *>(FTD)->getInjectedTemplateArgs(),
+ /*Final=*/false);
+
+ NestedNameSpecifier *NNS = FTD->getTemplatedDecl()->getQualifier();
+
+ while (const Type *Ty = NNS ? NNS->getAsType() : nullptr) {
+ if (NNS->isInstantiationDependent()) {
+ if (const auto *TSTy = Ty->getAs<TemplateSpecializationType>())
+ Result.addOuterTemplateArguments(
+ const_cast<FunctionTemplateDecl *>(FTD), TSTy->template_arguments(),
+ /*Final=*/false);
+ }
+
+ NNS = NNS->getPrefix();
+ }
+ }
+
+ return Response::ChangeDecl(FTD->getLexicalDeclContext());
+}
+
+Response HandleRecordDecl(const CXXRecordDecl *Rec,
+ MultiLevelTemplateArgumentList &Result,
+ ASTContext &Context,
+ bool ForConstraintInstantiation) {
+ if (ClassTemplateDecl *ClassTemplate = Rec->getDescribedClassTemplate()) {
+ assert(
+ (ForConstraintInstantiation || Result.getNumSubstitutedLevels() == 0) &&
+ "Outer template not instantiated?");
+ if (ClassTemplate->isMemberSpecialization())
+ return Response::Done();
+ if (ForConstraintInstantiation)
+ Result.addOuterTemplateArguments(const_cast<CXXRecordDecl *>(Rec),
+ ClassTemplate->getInjectedTemplateArgs(),
+ /*Final=*/false);
+ }
+
+ if (const MemberSpecializationInfo *MSInfo =
+ Rec->getMemberSpecializationInfo())
+ if (MSInfo->getTemplateSpecializationKind() == TSK_ExplicitSpecialization)
+ return Response::Done();
+
+ bool IsFriend = Rec->getFriendObjectKind() ||
+ (Rec->getDescribedClassTemplate() &&
+ Rec->getDescribedClassTemplate()->getFriendObjectKind());
+ if (ForConstraintInstantiation && IsFriend &&
+ Rec->getNonTransparentDeclContext()->isFileContext()) {
+ return Response::ChangeDecl(Rec->getLexicalDeclContext());
+ }
+
+ // This is to make sure we pick up the VarTemplateSpecializationDecl that this
+ // lambda is defined inside of.
+ if (Rec->isLambda())
+ if (const Decl *LCD = Rec->getLambdaContextDecl())
+ return Response::ChangeDecl(LCD);
+
+ return Response::UseNextDecl(Rec);
+}
+
+Response HandleImplicitConceptSpecializationDecl(
+ const ImplicitConceptSpecializationDecl *CSD,
+ MultiLevelTemplateArgumentList &Result) {
+ Result.addOuterTemplateArguments(
+ const_cast<ImplicitConceptSpecializationDecl *>(CSD),
+ CSD->getTemplateArguments(),
+ /*Final=*/false);
+ return Response::UseNextDecl(CSD);
+}
+
+Response HandleGenericDeclContext(const Decl *CurDecl) {
+ return Response::UseNextDecl(CurDecl);
+}
+} // namespace TemplateInstArgsHelpers
+} // namespace
+
/// Retrieve the template argument list(s) that should be used to
/// instantiate the definition of the given declaration.
///
-/// \param D the declaration for which we are computing template instantiation
+/// \param ND the declaration for which we are computing template instantiation
/// arguments.
///
-/// \param Innermost if non-NULL, the innermost template argument list.
+/// \param DC In the event we don't HAVE a declaration yet, we instead provide
+/// the decl context where it will be created. In this case, the `Innermost`
+/// should likely be provided. If ND is non-null, this is ignored.
+///
+/// \param Innermost if non-NULL, specifies a template argument list for the
+/// template declaration passed as ND.
///
/// \param RelativeToPrimary true if we should get the template
/// arguments relative to the primary template, even when we're
@@ -52,146 +327,86 @@ using namespace sema;
/// template specializations.
///
/// \param Pattern If non-NULL, indicates the pattern from which we will be
-/// instantiating the definition of the given declaration, \p D. This is
+/// instantiating the definition of the given declaration, \p ND. This is
/// used to determine the proper set of template instantiation arguments for
/// friend function template specializations.
-MultiLevelTemplateArgumentList
-Sema::getTemplateInstantiationArgs(NamedDecl *D,
- const TemplateArgumentList *Innermost,
- bool RelativeToPrimary,
- const FunctionDecl *Pattern) {
+///
+/// \param ForConstraintInstantiation when collecting arguments,
+/// ForConstraintInstantiation indicates we should continue looking when
+/// encountering a lambda generic call operator, and continue looking for
+/// arguments on an enclosing class template.
+
+MultiLevelTemplateArgumentList Sema::getTemplateInstantiationArgs(
+ const NamedDecl *ND, const DeclContext *DC, bool Final,
+ const TemplateArgumentList *Innermost, bool RelativeToPrimary,
+ const FunctionDecl *Pattern, bool ForConstraintInstantiation,
+ bool SkipForSpecialization) {
+ assert((ND || DC) && "Can't find arguments for a decl if one isn't provided");
// Accumulate the set of template argument lists in this structure.
MultiLevelTemplateArgumentList Result;
- if (Innermost)
- Result.addOuterTemplateArguments(Innermost);
-
- DeclContext *Ctx = dyn_cast<DeclContext>(D);
- if (!Ctx) {
- Ctx = D->getDeclContext();
-
- // Add template arguments from a variable template instantiation. For a
- // class-scope explicit specialization, there are no template arguments
- // at this level, but there may be enclosing template arguments.
- VarTemplateSpecializationDecl *Spec =
- dyn_cast<VarTemplateSpecializationDecl>(D);
- if (Spec && !Spec->isClassScopeExplicitSpecialization()) {
- // We're done when we hit an explicit specialization.
- if (Spec->getSpecializationKind() == TSK_ExplicitSpecialization &&
- !isa<VarTemplatePartialSpecializationDecl>(Spec))
- return Result;
-
- Result.addOuterTemplateArguments(&Spec->getTemplateInstantiationArgs());
-
- // If this variable template specialization was instantiated from a
- // specialized member that is a variable template, we're done.
- assert(Spec->getSpecializedTemplate() && "No variable template?");
- llvm::PointerUnion<VarTemplateDecl*,
- VarTemplatePartialSpecializationDecl*> Specialized
- = Spec->getSpecializedTemplateOrPartial();
- if (VarTemplatePartialSpecializationDecl *Partial =
- Specialized.dyn_cast<VarTemplatePartialSpecializationDecl *>()) {
- if (Partial->isMemberSpecialization())
- return Result;
- } else {
- VarTemplateDecl *Tmpl = Specialized.get<VarTemplateDecl *>();
- if (Tmpl->isMemberSpecialization())
- return Result;
- }
- }
-
- // If we have a template template parameter with translation unit context,
- // then we're performing substitution into a default template argument of
- // this template template parameter before we've constructed the template
- // that will own this template template parameter. In this case, we
- // use empty template parameter lists for all of the outer templates
- // to avoid performing any substitutions.
- if (Ctx->isTranslationUnit()) {
- if (TemplateTemplateParmDecl *TTP
- = dyn_cast<TemplateTemplateParmDecl>(D)) {
- for (unsigned I = 0, N = TTP->getDepth() + 1; I != N; ++I)
- Result.addOuterTemplateArguments(None);
- return Result;
- }
- }
+ using namespace TemplateInstArgsHelpers;
+ const Decl *CurDecl = ND;
+
+ if (!CurDecl)
+ CurDecl = Decl::castFromDeclContext(DC);
+
+ if (Innermost) {
+ Result.addOuterTemplateArguments(const_cast<NamedDecl *>(ND),
+ Innermost->asArray(), Final);
+ // Populate placeholder template arguments for TemplateTemplateParmDecls.
+ // This is essential for the case e.g.
+ //
+ // template <class> concept Concept = false;
+ // template <template <Concept C> class T> void foo(T<int>)
+ //
+ // where parameter C has a depth of 1 but the substituting argument `int`
+ // has a depth of 0.
+ if (const auto *TTP = dyn_cast<TemplateTemplateParmDecl>(CurDecl))
+ HandleDefaultTempArgIntoTempTempParam(TTP, Result);
+ CurDecl = Response::UseNextDecl(CurDecl).NextDecl;
}
- while (!Ctx->isFileContext()) {
- // Add template arguments from a class template instantiation.
- ClassTemplateSpecializationDecl *Spec
- = dyn_cast<ClassTemplateSpecializationDecl>(Ctx);
- if (Spec && !Spec->isClassScopeExplicitSpecialization()) {
- // We're done when we hit an explicit specialization.
- if (Spec->getSpecializationKind() == TSK_ExplicitSpecialization &&
- !isa<ClassTemplatePartialSpecializationDecl>(Spec))
- break;
-
- Result.addOuterTemplateArguments(&Spec->getTemplateInstantiationArgs());
-
- // If this class template specialization was instantiated from a
- // specialized member that is a class template, we're done.
- assert(Spec->getSpecializedTemplate() && "No class template?");
- if (Spec->getSpecializedTemplate()->isMemberSpecialization())
- break;
- }
- // Add template arguments from a function template specialization.
- else if (FunctionDecl *Function = dyn_cast<FunctionDecl>(Ctx)) {
- if (!RelativeToPrimary &&
- Function->getTemplateSpecializationKindForInstantiation() ==
- TSK_ExplicitSpecialization)
- break;
-
- if (!RelativeToPrimary && Function->getTemplateSpecializationKind() ==
- TSK_ExplicitSpecialization) {
- // This is an implicit instantiation of an explicit specialization. We
- // don't get any template arguments from this function but might get
- // some from an enclosing template.
- } else if (const TemplateArgumentList *TemplateArgs
- = Function->getTemplateSpecializationArgs()) {
- // Add the template arguments for this specialization.
- Result.addOuterTemplateArguments(TemplateArgs);
-
- // If this function was instantiated from a specialized member that is
- // a function template, we're done.
- assert(Function->getPrimaryTemplate() && "No function template?");
- if (Function->getPrimaryTemplate()->isMemberSpecialization())
- break;
-
- // If this function is a generic lambda specialization, we are done.
- if (isGenericLambdaCallOperatorOrStaticInvokerSpecialization(Function))
- break;
-
- } else if (FunctionTemplateDecl *FunTmpl
- = Function->getDescribedFunctionTemplate()) {
- // Add the "injected" template arguments.
- Result.addOuterTemplateArguments(FunTmpl->getInjectedTemplateArgs());
- }
-
- // If this is a friend declaration and it declares an entity at
- // namespace scope, take arguments from its lexical parent
- // instead of its semantic parent, unless of course the pattern we're
- // instantiating actually comes from the file's context!
- if (Function->getFriendObjectKind() &&
- Function->getDeclContext()->isFileContext() &&
- (!Pattern || !Pattern->getLexicalDeclContext()->isFileContext())) {
- Ctx = Function->getLexicalDeclContext();
- RelativeToPrimary = false;
- continue;
- }
- } else if (CXXRecordDecl *Rec = dyn_cast<CXXRecordDecl>(Ctx)) {
- if (ClassTemplateDecl *ClassTemplate = Rec->getDescribedClassTemplate()) {
- QualType T = ClassTemplate->getInjectedClassNameSpecialization();
- const TemplateSpecializationType *TST =
- cast<TemplateSpecializationType>(Context.getCanonicalType(T));
- Result.addOuterTemplateArguments(
- llvm::makeArrayRef(TST->getArgs(), TST->getNumArgs()));
- if (ClassTemplate->isMemberSpecialization())
- break;
+ while (!CurDecl->isFileContextDecl()) {
+ Response R;
+ if (const auto *VarTemplSpec =
+ dyn_cast<VarTemplateSpecializationDecl>(CurDecl)) {
+ R = HandleVarTemplateSpec(VarTemplSpec, Result, SkipForSpecialization);
+ } else if (const auto *PartialClassTemplSpec =
+ dyn_cast<ClassTemplatePartialSpecializationDecl>(CurDecl)) {
+ R = HandlePartialClassTemplateSpec(PartialClassTemplSpec, Result,
+ SkipForSpecialization);
+ } else if (const auto *ClassTemplSpec =
+ dyn_cast<ClassTemplateSpecializationDecl>(CurDecl)) {
+ R = HandleClassTemplateSpec(ClassTemplSpec, Result,
+ SkipForSpecialization);
+ } else if (const auto *Function = dyn_cast<FunctionDecl>(CurDecl)) {
+ R = HandleFunction(Function, Result, Pattern, RelativeToPrimary,
+ ForConstraintInstantiation);
+ } else if (const auto *Rec = dyn_cast<CXXRecordDecl>(CurDecl)) {
+ R = HandleRecordDecl(Rec, Result, Context, ForConstraintInstantiation);
+ } else if (const auto *CSD =
+ dyn_cast<ImplicitConceptSpecializationDecl>(CurDecl)) {
+ R = HandleImplicitConceptSpecializationDecl(CSD, Result);
+ } else if (const auto *FTD = dyn_cast<FunctionTemplateDecl>(CurDecl)) {
+ R = HandleFunctionTemplateDecl(FTD, Result);
+ } else if (const auto *CTD = dyn_cast<ClassTemplateDecl>(CurDecl)) {
+ R = Response::ChangeDecl(CTD->getLexicalDeclContext());
+ } else if (!isa<DeclContext>(CurDecl)) {
+ R = Response::DontClearRelativeToPrimaryNextDecl(CurDecl);
+ if (const auto *TTP = dyn_cast<TemplateTemplateParmDecl>(CurDecl)) {
+ R = HandleDefaultTempArgIntoTempTempParam(TTP, Result);
}
+ } else {
+ R = HandleGenericDeclContext(CurDecl);
}
- Ctx = Ctx->getParent();
- RelativeToPrimary = false;
+ if (R.IsDone)
+ return Result;
+ if (R.ClearRelativeToPrimary)
+ RelativeToPrimary = false;
+ assert(R.NextDecl);
+ CurDecl = R.NextDecl;
}
return Result;
@@ -211,6 +426,7 @@ bool Sema::CodeSynthesisContext::isInstantiationRecord() const {
return true;
case RequirementInstantiation:
+ case RequirementParameterInstantiation:
case DefaultTemplateArgumentChecking:
case DeclaringSpecialMember:
case DeclaringImplicitEqualityComparison:
@@ -222,6 +438,9 @@ bool Sema::CodeSynthesisContext::isInstantiationRecord() const {
case RewritingOperatorAsSpaceship:
case InitializingStructuredBinding:
case MarkingClassDllexported:
+ case BuildingBuiltinDumpStructCall:
+ case LambdaExpressionSubstitution:
+ case BuildingDeductionGuides:
return false;
// This function should never be called when Kind's value is Memoization.
@@ -383,8 +602,8 @@ Sema::InstantiatingTemplate::InstantiatingTemplate(
: InstantiatingTemplate(
SemaRef, CodeSynthesisContext::RequirementInstantiation,
PointOfInstantiation, InstantiationRange, /*Entity=*/nullptr,
- /*Template=*/nullptr, /*TemplateArgs=*/None, &DeductionInfo) {}
-
+ /*Template=*/nullptr, /*TemplateArgs=*/std::nullopt, &DeductionInfo) {
+}
Sema::InstantiatingTemplate::InstantiatingTemplate(
Sema &SemaRef, SourceLocation PointOfInstantiation,
@@ -393,8 +612,16 @@ Sema::InstantiatingTemplate::InstantiatingTemplate(
: InstantiatingTemplate(
SemaRef, CodeSynthesisContext::NestedRequirementConstraintsCheck,
PointOfInstantiation, InstantiationRange, /*Entity=*/nullptr,
- /*Template=*/nullptr, /*TemplateArgs=*/None) {}
+ /*Template=*/nullptr, /*TemplateArgs=*/std::nullopt) {}
+Sema::InstantiatingTemplate::InstantiatingTemplate(
+ Sema &SemaRef, SourceLocation PointOfInstantiation, const RequiresExpr *RE,
+ sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange)
+ : InstantiatingTemplate(
+ SemaRef, CodeSynthesisContext::RequirementParameterInstantiation,
+ PointOfInstantiation, InstantiationRange, /*Entity=*/nullptr,
+ /*Template=*/nullptr, /*TemplateArgs=*/std::nullopt, &DeductionInfo) {
+}
Sema::InstantiatingTemplate::InstantiatingTemplate(
Sema &SemaRef, SourceLocation PointOfInstantiation,
@@ -430,6 +657,14 @@ Sema::InstantiatingTemplate::InstantiatingTemplate(
SemaRef, CodeSynthesisContext::ParameterMappingSubstitution,
PointOfInstantiation, InstantiationRange, Template) {}
+Sema::InstantiatingTemplate::InstantiatingTemplate(
+ Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Entity,
+ BuildingDeductionGuidesTag, SourceRange InstantiationRange)
+ : InstantiatingTemplate(
+ SemaRef, CodeSynthesisContext::BuildingDeductionGuides,
+ PointOfInstantiation, InstantiationRange, Entity) {}
+
+
void Sema::pushCodeSynthesisContext(CodeSynthesisContext Ctx) {
Ctx.SavedInNonInstantiationSFINAEContext = InNonInstantiationSFINAEContext;
InNonInstantiationSFINAEContext = false;
@@ -491,6 +726,19 @@ void Sema::InstantiatingTemplate::Clear() {
}
}
+static std::string convertCallArgsToString(Sema &S,
+ llvm::ArrayRef<const Expr *> Args) {
+ std::string Result;
+ llvm::raw_string_ostream OS(Result);
+ llvm::ListSeparator Comma;
+ for (const Expr *Arg : Args) {
+ OS << Comma;
+ Arg->IgnoreParens()->printPretty(OS, nullptr,
+ S.Context.getPrintingPolicy());
+ }
+ return Result;
+}
+
bool Sema::InstantiatingTemplate::CheckInstantiationDepth(
SourceLocation PointOfInstantiation,
SourceRange InstantiationRange) {
@@ -573,6 +821,10 @@ void Sema::PrintInstantiationStack() {
Diags.Report(Active->PointOfInstantiation,
diag::note_template_nsdmi_here)
<< FD << Active->InstantiationRange;
+ } else if (ClassTemplateDecl *CTD = dyn_cast<ClassTemplateDecl>(D)) {
+ Diags.Report(Active->PointOfInstantiation,
+ diag::note_template_class_instantiation_here)
+ << CTD << Active->InstantiationRange;
} else {
Diags.Report(Active->PointOfInstantiation,
diag::note_template_type_alias_instantiation_here)
@@ -586,7 +838,7 @@ void Sema::PrintInstantiationStack() {
TemplateDecl *Template = cast<TemplateDecl>(Active->Template);
SmallString<128> TemplateArgsStr;
llvm::raw_svector_ostream OS(TemplateArgsStr);
- Template->printName(OS);
+ Template->printName(OS, getPrintingPolicy());
printTemplateArgumentList(OS, Active->template_arguments(),
getPrintingPolicy());
Diags.Report(Active->PointOfInstantiation,
@@ -652,7 +904,7 @@ void Sema::PrintInstantiationStack() {
SmallString<128> TemplateArgsStr;
llvm::raw_svector_ostream OS(TemplateArgsStr);
- FD->printName(OS);
+ FD->printName(OS, getPrintingPolicy());
printTemplateArgumentList(OS, Active->template_arguments(),
getPrintingPolicy());
Diags.Report(Active->PointOfInstantiation,
@@ -722,6 +974,11 @@ void Sema::PrintInstantiationStack() {
diag::note_template_requirement_instantiation_here)
<< Active->InstantiationRange;
break;
+ case CodeSynthesisContext::RequirementParameterInstantiation:
+ Diags.Report(Active->PointOfInstantiation,
+ diag::note_template_requirement_params_instantiation_here)
+ << Active->InstantiationRange;
+ break;
case CodeSynthesisContext::NestedRequirementConstraintsCheck:
Diags.Report(Active->PointOfInstantiation,
@@ -753,11 +1010,13 @@ void Sema::PrintInstantiationStack() {
<< MD->isExplicitlyDefaulted() << DFK.asSpecialMember()
<< Context.getTagDeclType(MD->getParent());
} else if (DFK.isComparison()) {
+ QualType RecordType = FD->getParamDecl(0)
+ ->getType()
+ .getNonReferenceType()
+ .getUnqualifiedType();
Diags.Report(Active->PointOfInstantiation,
diag::note_comparison_synthesized_at)
- << (int)DFK.asComparison()
- << Context.getTagDeclType(
- cast<CXXRecordDecl>(FD->getLexicalDeclContext()));
+ << (int)DFK.asComparison() << RecordType;
}
break;
}
@@ -779,9 +1038,20 @@ void Sema::PrintInstantiationStack() {
<< cast<CXXRecordDecl>(Active->Entity) << !getLangOpts().CPlusPlus11;
break;
+ case CodeSynthesisContext::BuildingBuiltinDumpStructCall:
+ Diags.Report(Active->PointOfInstantiation,
+ diag::note_building_builtin_dump_struct_call)
+ << convertCallArgsToString(
+ *this, llvm::ArrayRef(Active->CallArgs, Active->NumCallArgs));
+ break;
+
case CodeSynthesisContext::Memoization:
break;
+ case CodeSynthesisContext::LambdaExpressionSubstitution:
+ Diags.Report(Active->PointOfInstantiation,
+ diag::note_lambda_substitution_here);
+ break;
case CodeSynthesisContext::ConstraintsCheck: {
unsigned DiagID = 0;
if (!Active->Entity) {
@@ -804,7 +1074,7 @@ void Sema::PrintInstantiationStack() {
}
SmallString<128> TemplateArgsStr;
llvm::raw_svector_ostream OS(TemplateArgsStr);
- cast<NamedDecl>(Active->Entity)->printName(OS);
+ cast<NamedDecl>(Active->Entity)->printName(OS, getPrintingPolicy());
if (!isa<FunctionDecl>(Active->Entity)) {
printTemplateArgumentList(OS, Active->template_arguments(),
getPrintingPolicy());
@@ -829,13 +1099,17 @@ void Sema::PrintInstantiationStack() {
diag::note_parameter_mapping_substitution_here)
<< Active->InstantiationRange;
break;
+ case CodeSynthesisContext::BuildingDeductionGuides:
+ Diags.Report(Active->PointOfInstantiation,
+ diag::note_building_deduction_guide_here);
+ break;
}
}
}
-Optional<TemplateDeductionInfo *> Sema::isSFINAEContext() const {
+std::optional<TemplateDeductionInfo *> Sema::isSFINAEContext() const {
if (InNonInstantiationSFINAEContext)
- return Optional<TemplateDeductionInfo *>(nullptr);
+ return std::optional<TemplateDeductionInfo *>(nullptr);
for (SmallVectorImpl<CodeSynthesisContext>::const_reverse_iterator
Active = CodeSynthesisContexts.rbegin(),
@@ -849,7 +1123,7 @@ Optional<TemplateDeductionInfo *> Sema::isSFINAEContext() const {
// context, depending on what else is on the stack.
if (isa<TypeAliasTemplateDecl>(Active->Entity))
break;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case CodeSynthesisContext::DefaultFunctionArgumentInstantiation:
case CodeSynthesisContext::ExceptionSpecInstantiation:
case CodeSynthesisContext::ConstraintsCheck:
@@ -857,7 +1131,14 @@ Optional<TemplateDeductionInfo *> Sema::isSFINAEContext() const {
case CodeSynthesisContext::ConstraintNormalization:
case CodeSynthesisContext::NestedRequirementConstraintsCheck:
// This is a template instantiation, so there is no SFINAE.
- return None;
+ return std::nullopt;
+ case CodeSynthesisContext::LambdaExpressionSubstitution:
+ // [temp.deduct]p9
+ // A lambda-expression appearing in a function type or a template
+ // parameter is not considered part of the immediate context for the
+ // purposes of template argument deduction.
+ // CWG2672: A lambda-expression body is never in the immediate context.
+ return std::nullopt;
case CodeSynthesisContext::DefaultTemplateArgumentInstantiation:
case CodeSynthesisContext::PriorTemplateArgumentSubstitution:
@@ -870,11 +1151,14 @@ Optional<TemplateDeductionInfo *> Sema::isSFINAEContext() const {
case CodeSynthesisContext::ExplicitTemplateArgumentSubstitution:
case CodeSynthesisContext::DeducedTemplateArgumentSubstitution:
+ // We're either substituting explicitly-specified template arguments,
+ // deduced template arguments. SFINAE applies unless we are in a lambda
+ // body, see [temp.deduct]p9.
case CodeSynthesisContext::ConstraintSubstitution:
case CodeSynthesisContext::RequirementInstantiation:
- // We're either substituting explicitly-specified template arguments,
- // deduced template arguments, a constraint expression or a requirement
- // in a requires expression, so SFINAE applies.
+ case CodeSynthesisContext::RequirementParameterInstantiation:
+ // SFINAE always applies in a constraint expression or a requirement
+ // in a requires expression.
assert(Active->DeductionInfo && "Missing deduction info pointer");
return Active->DeductionInfo;
@@ -883,9 +1167,11 @@ Optional<TemplateDeductionInfo *> Sema::isSFINAEContext() const {
case CodeSynthesisContext::DefiningSynthesizedFunction:
case CodeSynthesisContext::InitializingStructuredBinding:
case CodeSynthesisContext::MarkingClassDllexported:
+ case CodeSynthesisContext::BuildingBuiltinDumpStructCall:
+ case CodeSynthesisContext::BuildingDeductionGuides:
// This happens in a context unrelated to template instantiation, so
// there is no SFINAE.
- return None;
+ return std::nullopt;
case CodeSynthesisContext::ExceptionSpecEvaluation:
// FIXME: This should not be treated as a SFINAE context, because
@@ -900,10 +1186,10 @@ Optional<TemplateDeductionInfo *> Sema::isSFINAEContext() const {
// The inner context was transparent for SFINAE. If it occurred within a
// non-instantiation SFINAE context, then SFINAE applies.
if (Active->SavedInNonInstantiationSFINAEContext)
- return Optional<TemplateDeductionInfo *>(nullptr);
+ return std::optional<TemplateDeductionInfo *>(nullptr);
}
- return None;
+ return std::nullopt;
}
//===----------------------------------------------------------------------===/
@@ -914,16 +1200,24 @@ namespace {
const MultiLevelTemplateArgumentList &TemplateArgs;
SourceLocation Loc;
DeclarationName Entity;
+ // Whether to evaluate the C++20 constraints or simply substitute into them.
+ bool EvaluateConstraints = true;
public:
typedef TreeTransform<TemplateInstantiator> inherited;
TemplateInstantiator(Sema &SemaRef,
const MultiLevelTemplateArgumentList &TemplateArgs,
- SourceLocation Loc,
- DeclarationName Entity)
- : inherited(SemaRef), TemplateArgs(TemplateArgs), Loc(Loc),
- Entity(Entity) { }
+ SourceLocation Loc, DeclarationName Entity)
+ : inherited(SemaRef), TemplateArgs(TemplateArgs), Loc(Loc),
+ Entity(Entity) {}
+
+ void setEvaluateConstraints(bool B) {
+ EvaluateConstraints = B;
+ }
+ bool getEvaluateConstraints() {
+ return EvaluateConstraints;
+ }
/// Determine whether the given type \p T has already been
/// transformed.
@@ -949,11 +1243,18 @@ namespace {
return TemplateArgs.getNewDepth(Depth);
}
+ std::optional<unsigned> getPackIndex(TemplateArgument Pack) {
+ int Index = getSema().ArgumentPackSubstitutionIndex;
+ if (Index == -1)
+ return std::nullopt;
+ return Pack.pack_size() - 1 - Index;
+ }
+
bool TryExpandParameterPacks(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
bool &ShouldExpand, bool &RetainExpansion,
- Optional<unsigned> &NumExpansions) {
+ std::optional<unsigned> &NumExpansions) {
return getSema().CheckParameterPacksForExpansion(EllipsisLoc,
PatternRange, Unexpanded,
TemplateArgs,
@@ -1036,7 +1337,8 @@ namespace {
// We recreated a local declaration, but not by instantiating it. There
// may be pending dependent diagnostics to produce.
- if (auto *DC = dyn_cast<DeclContext>(Old))
+ if (auto *DC = dyn_cast<DeclContext>(Old);
+ DC && DC->isDependentContext() && DC->isFunctionOrMethod())
SemaRef.PerformDependentDiagnostics(DC, TemplateArgs);
}
@@ -1048,6 +1350,11 @@ namespace {
/// declaration.
NamedDecl *TransformFirstQualifierInScope(NamedDecl *D, SourceLocation Loc);
+ bool TransformExceptionSpec(SourceLocation Loc,
+ FunctionProtoType::ExceptionSpecInfo &ESI,
+ SmallVectorImpl<QualType> &Exceptions,
+ bool &Changed);
+
/// Rebuild the exception declaration and register the declaration
/// as an instantiated local.
VarDecl *RebuildExceptionDecl(VarDecl *ExceptionDecl,
@@ -1076,7 +1383,13 @@ namespace {
bool AllowInjectedClassName = false);
const LoopHintAttr *TransformLoopHintAttr(const LoopHintAttr *LH);
-
+ const NoInlineAttr *TransformStmtNoInlineAttr(const Stmt *OrigS,
+ const Stmt *InstS,
+ const NoInlineAttr *A);
+ const AlwaysInlineAttr *
+ TransformStmtAlwaysInlineAttr(const Stmt *OrigS, const Stmt *InstS,
+ const AlwaysInlineAttr *A);
+ const CodeAlignAttr *TransformCodeAlignAttr(const CodeAlignAttr *CA);
ExprResult TransformPredefinedExpr(PredefinedExpr *E);
ExprResult TransformDeclRefExpr(DeclRefExpr *E);
ExprResult TransformCXXDefaultArgExpr(CXXDefaultArgExpr *E);
@@ -1112,30 +1425,98 @@ namespace {
Qualifiers ThisTypeQuals,
Fn TransformExceptionSpec);
- ParmVarDecl *TransformFunctionTypeParam(ParmVarDecl *OldParm,
- int indexAdjustment,
- Optional<unsigned> NumExpansions,
- bool ExpectParameterPack);
+ ParmVarDecl *
+ TransformFunctionTypeParam(ParmVarDecl *OldParm, int indexAdjustment,
+ std::optional<unsigned> NumExpansions,
+ bool ExpectParameterPack);
+ using inherited::TransformTemplateTypeParmType;
/// Transforms a template type parameter type by performing
/// substitution of the corresponding template type argument.
QualType TransformTemplateTypeParmType(TypeLocBuilder &TLB,
- TemplateTypeParmTypeLoc TL);
+ TemplateTypeParmTypeLoc TL,
+ bool SuppressObjCLifetime);
+
+ QualType BuildSubstTemplateTypeParmType(
+ TypeLocBuilder &TLB, bool SuppressObjCLifetime, bool Final,
+ Decl *AssociatedDecl, unsigned Index, std::optional<unsigned> PackIndex,
+ TemplateArgument Arg, SourceLocation NameLoc);
/// Transforms an already-substituted template type parameter pack
/// into either itself (if we aren't substituting into its pack expansion)
/// or the appropriate substituted argument.
- QualType TransformSubstTemplateTypeParmPackType(TypeLocBuilder &TLB,
- SubstTemplateTypeParmPackTypeLoc TL);
+ using inherited::TransformSubstTemplateTypeParmPackType;
+ QualType
+ TransformSubstTemplateTypeParmPackType(TypeLocBuilder &TLB,
+ SubstTemplateTypeParmPackTypeLoc TL,
+ bool SuppressObjCLifetime);
ExprResult TransformLambdaExpr(LambdaExpr *E) {
LocalInstantiationScope Scope(SemaRef, /*CombineWithOuterScope=*/true);
- return TreeTransform<TemplateInstantiator>::TransformLambdaExpr(E);
+ Sema::ConstraintEvalRAII<TemplateInstantiator> RAII(*this);
+
+ ExprResult Result = inherited::TransformLambdaExpr(E);
+ if (Result.isInvalid())
+ return Result;
+
+ CXXMethodDecl *MD = Result.getAs<LambdaExpr>()->getCallOperator();
+ for (ParmVarDecl *PVD : MD->parameters()) {
+ assert(PVD && "null in a parameter list");
+ if (!PVD->hasDefaultArg())
+ continue;
+ Expr *UninstExpr = PVD->getUninstantiatedDefaultArg();
+ // FIXME: Obtain the source location for the '=' token.
+ SourceLocation EqualLoc = UninstExpr->getBeginLoc();
+ if (SemaRef.SubstDefaultArgument(EqualLoc, PVD, TemplateArgs)) {
+ // If substitution fails, the default argument is set to a
+ // RecoveryExpr that wraps the uninstantiated default argument so
+ // that downstream diagnostics are omitted.
+ ExprResult ErrorResult = SemaRef.CreateRecoveryExpr(
+ UninstExpr->getBeginLoc(), UninstExpr->getEndLoc(),
+ { UninstExpr }, UninstExpr->getType());
+ if (ErrorResult.isUsable())
+ PVD->setDefaultArg(ErrorResult.get());
+ }
+ }
+
+ return Result;
+ }
+
+ StmtResult TransformLambdaBody(LambdaExpr *E, Stmt *Body) {
+ // Currently, we instantiate the body when instantiating the lambda
+ // expression. However, `EvaluateConstraints` is disabled during the
+ // instantiation of the lambda expression, causing the instantiation
+ // failure of the return type requirement in the body. If p0588r1 is fully
+ // implemented, the body will be lazily instantiated, and this problem
+ // will not occur. Here, `EvaluateConstraints` is temporarily set to
+ // `true` to temporarily fix this issue.
+ // FIXME: This temporary fix can be removed after fully implementing
+ // p0588r1.
+ bool Prev = EvaluateConstraints;
+ EvaluateConstraints = true;
+ StmtResult Stmt = inherited::TransformLambdaBody(E, Body);
+ EvaluateConstraints = Prev;
+ return Stmt;
}
ExprResult TransformRequiresExpr(RequiresExpr *E) {
LocalInstantiationScope Scope(SemaRef, /*CombineWithOuterScope=*/true);
- return TreeTransform<TemplateInstantiator>::TransformRequiresExpr(E);
+ ExprResult TransReq = inherited::TransformRequiresExpr(E);
+ if (TransReq.isInvalid())
+ return TransReq;
+ assert(TransReq.get() != E &&
+ "Do not change value of isSatisfied for the existing expression. "
+ "Create a new expression instead.");
+ if (E->getBody()->isDependentContext()) {
+ Sema::SFINAETrap Trap(SemaRef);
+ // We recreate the RequiresExpr body, but not by instantiating it.
+ // Produce pending diagnostics for dependent access check.
+ SemaRef.PerformDependentDiagnostics(E->getBody(), TemplateArgs);
+ // FIXME: Store SFINAE diagnostics in RequiresExpr for diagnosis.
+ if (Trap.hasErrorOccurred())
+ TransReq.getAs<RequiresExpr>()->setSatisfied(false);
+ }
+ return TransReq;
}
bool TransformRequiresExprRequirements(
@@ -1175,6 +1556,7 @@ namespace {
DeclContext *Owner = OrigTPL->getParam(0)->getDeclContext();
TemplateDeclInstantiator DeclInstantiator(getSema(),
/* DeclContext *Owner */ Owner, TemplateArgs);
+ DeclInstantiator.setEvaluateConstraints(EvaluateConstraints);
return DeclInstantiator.SubstTemplateParams(OrigTPL);
}
@@ -1184,11 +1566,19 @@ namespace {
TransformExprRequirement(concepts::ExprRequirement *Req);
concepts::NestedRequirement *
TransformNestedRequirement(concepts::NestedRequirement *Req);
+ ExprResult TransformRequiresTypeParams(
+ SourceLocation KWLoc, SourceLocation RBraceLoc, const RequiresExpr *RE,
+ RequiresExprBodyDecl *Body, ArrayRef<ParmVarDecl *> Params,
+ SmallVectorImpl<QualType> &PTypes,
+ SmallVectorImpl<ParmVarDecl *> &TransParams,
+ Sema::ExtParameterInfoBuilder &PInfos);
private:
- ExprResult transformNonTypeTemplateParmRef(NonTypeTemplateParmDecl *parm,
- SourceLocation loc,
- TemplateArgument arg);
+ ExprResult
+ transformNonTypeTemplateParmRef(Decl *AssociatedDecl,
+ const NonTypeTemplateParmDecl *parm,
+ SourceLocation loc, TemplateArgument arg,
+ std::optional<unsigned> PackIndex);
};
}
@@ -1257,6 +1647,16 @@ Decl *TemplateInstantiator::TransformDefinition(SourceLocation Loc, Decl *D) {
return Inst;
}
+bool TemplateInstantiator::TransformExceptionSpec(
+ SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI,
+ SmallVectorImpl<QualType> &Exceptions, bool &Changed) {
+ if (ESI.Type == EST_Uninstantiated) {
+ ESI.instantiate();
+ Changed = true;
+ }
+ return inherited::TransformExceptionSpec(Loc, ESI, Exceptions, Changed);
+}
+
NamedDecl *
TemplateInstantiator::TransformFirstQualifierInScope(NamedDecl *D,
SourceLocation Loc) {
@@ -1332,7 +1732,8 @@ TemplateInstantiator::RebuildElaboratedType(SourceLocation KeywordLoc,
// TODO: should we even warn on struct/class mismatches for this? Seems
// like it's likely to produce a lot of spurious errors.
- if (Id && Keyword != ETK_None && Keyword != ETK_Typename) {
+ if (Id && Keyword != ElaboratedTypeKeyword::None &&
+ Keyword != ElaboratedTypeKeyword::Typename) {
TagTypeKind Kind = TypeWithKeyword::getTagTypeKindForKeyword(Keyword);
if (!SemaRef.isAcceptableTagRedeclaration(TD, Kind, /*isDefinition*/false,
TagLocation, Id)) {
@@ -1345,10 +1746,7 @@ TemplateInstantiator::RebuildElaboratedType(SourceLocation KeywordLoc,
}
}
- return TreeTransform<TemplateInstantiator>::RebuildElaboratedType(KeywordLoc,
- Keyword,
- QualifierLoc,
- T);
+ return inherited::RebuildElaboratedType(KeywordLoc, Keyword, QualifierLoc, T);
}
TemplateName TemplateInstantiator::TransformTemplateName(
@@ -1381,6 +1779,9 @@ TemplateName TemplateInstantiator::TransformTemplateName(
return Arg.getAsTemplate();
}
+ auto [AssociatedDecl, Final] =
+ TemplateArgs.getAssociatedDecl(TTP->getDepth());
+ std::optional<unsigned> PackIndex;
if (TTP->isParameterPack()) {
assert(Arg.getKind() == TemplateArgument::Pack &&
"Missing argument pack");
@@ -1389,9 +1790,11 @@ TemplateName TemplateInstantiator::TransformTemplateName(
// We have the template argument pack to substitute, but we're not
// actually expanding the enclosing pack expansion yet. So, just
// keep the entire argument pack.
- return getSema().Context.getSubstTemplateTemplateParmPack(TTP, Arg);
+ return getSema().Context.getSubstTemplateTemplateParmPack(
+ Arg, AssociatedDecl, TTP->getIndex(), Final);
}
+ PackIndex = getPackIndex(Arg);
Arg = getPackSubstitutedTemplateArgument(getSema(), Arg);
}
@@ -1400,8 +1803,10 @@ TemplateName TemplateInstantiator::TransformTemplateName(
assert(!Template.getAsQualifiedTemplateName() &&
"template decl to substitute is qualified?");
- Template = getSema().Context.getSubstTemplateTemplateParm(TTP, Template);
- return Template;
+ if (Final)
+ return Template;
+ return getSema().Context.getSubstTemplateTemplateParm(
+ Template, AssociatedDecl, TTP->getIndex(), PackIndex);
}
}
@@ -1410,9 +1815,14 @@ TemplateName TemplateInstantiator::TransformTemplateName(
if (getSema().ArgumentPackSubstitutionIndex == -1)
return Name;
- TemplateArgument Arg = SubstPack->getArgumentPack();
- Arg = getPackSubstitutedTemplateArgument(getSema(), Arg);
- return Arg.getAsTemplate().getNameToSubstitute();
+ TemplateArgument Pack = SubstPack->getArgumentPack();
+ TemplateName Template =
+ getPackSubstitutedTemplateArgument(getSema(), Pack).getAsTemplate();
+ if (SubstPack->getFinal())
+ return Template;
+ return getSema().Context.getSubstTemplateTemplateParm(
+ Template.getNameToSubstitute(), SubstPack->getAssociatedDecl(),
+ SubstPack->getIndex(), getPackIndex(Pack));
}
return inherited::TransformTemplateName(SS, Name, NameLoc, ObjectType,
@@ -1456,6 +1866,8 @@ TemplateInstantiator::TransformTemplateParmRefExpr(DeclRefExpr *E,
return Arg.getAsExpr();
}
+ auto [AssociatedDecl, _] = TemplateArgs.getAssociatedDecl(NTTP->getDepth());
+ std::optional<unsigned> PackIndex;
if (NTTP->isParameterPack()) {
assert(Arg.getKind() == TemplateArgument::Pack &&
"Missing argument pack");
@@ -1473,16 +1885,17 @@ TemplateInstantiator::TransformTemplateParmRefExpr(DeclRefExpr *E,
QualType ExprType = TargetType.getNonLValueExprType(SemaRef.Context);
if (TargetType->isRecordType())
ExprType.addConst();
-
+ // FIXME: Pass in Final.
return new (SemaRef.Context) SubstNonTypeTemplateParmPackExpr(
ExprType, TargetType->isReferenceType() ? VK_LValue : VK_PRValue,
- NTTP, E->getLocation(), Arg);
+ E->getLocation(), Arg, AssociatedDecl, NTTP->getPosition());
}
-
+ PackIndex = getPackIndex(Arg);
Arg = getPackSubstitutedTemplateArgument(getSema(), Arg);
}
-
- return transformNonTypeTemplateParmRef(NTTP, E->getLocation(), Arg);
+ // FIXME: Don't put subst node on Final replacement.
+ return transformNonTypeTemplateParmRef(AssociatedDecl, NTTP, E->getLocation(),
+ Arg, PackIndex);
}
const LoopHintAttr *
@@ -1501,11 +1914,31 @@ TemplateInstantiator::TransformLoopHintAttr(const LoopHintAttr *LH) {
return LoopHintAttr::CreateImplicit(getSema().Context, LH->getOption(),
LH->getState(), TransformedExpr, *LH);
}
+const NoInlineAttr *TemplateInstantiator::TransformStmtNoInlineAttr(
+ const Stmt *OrigS, const Stmt *InstS, const NoInlineAttr *A) {
+ if (!A || getSema().CheckNoInlineAttr(OrigS, InstS, *A))
+ return nullptr;
+
+ return A;
+}
+const AlwaysInlineAttr *TemplateInstantiator::TransformStmtAlwaysInlineAttr(
+ const Stmt *OrigS, const Stmt *InstS, const AlwaysInlineAttr *A) {
+ if (!A || getSema().CheckAlwaysInlineAttr(OrigS, InstS, *A))
+ return nullptr;
+
+ return A;
+}
+
+const CodeAlignAttr *
+TemplateInstantiator::TransformCodeAlignAttr(const CodeAlignAttr *CA) {
+ Expr *TransformedExpr = getDerived().TransformExpr(CA->getAlignment()).get();
+ return getSema().BuildCodeAlignAttr(*CA, TransformedExpr);
+}
ExprResult TemplateInstantiator::transformNonTypeTemplateParmRef(
- NonTypeTemplateParmDecl *parm,
- SourceLocation loc,
- TemplateArgument arg) {
+ Decl *AssociatedDecl, const NonTypeTemplateParmDecl *parm,
+ SourceLocation loc, TemplateArgument arg,
+ std::optional<unsigned> PackIndex) {
ExprResult result;
// Determine the substituted parameter type. We can usually infer this from
@@ -1542,9 +1975,8 @@ ExprResult TemplateInstantiator::transformNonTypeTemplateParmRef(
}
} else if (arg.getKind() == TemplateArgument::Declaration ||
arg.getKind() == TemplateArgument::NullPtr) {
- ValueDecl *VD;
if (arg.getKind() == TemplateArgument::Declaration) {
- VD = arg.getAsDecl();
+ ValueDecl *VD = arg.getAsDecl();
// Find the instantiation of the template argument. This is
// required for nested templates.
@@ -1552,30 +1984,30 @@ ExprResult TemplateInstantiator::transformNonTypeTemplateParmRef(
getSema().FindInstantiatedDecl(loc, VD, TemplateArgs));
if (!VD)
return ExprError();
- } else {
- // Propagate NULL template argument.
- VD = nullptr;
}
- QualType paramType = VD ? arg.getParamTypeForDecl() : arg.getNullPtrType();
+ QualType paramType = arg.getNonTypeTemplateArgumentType();
assert(!paramType.isNull() && "type substitution failed for param type");
assert(!paramType->isDependentType() && "param type still dependent");
result = SemaRef.BuildExpressionFromDeclTemplateArgument(arg, paramType, loc);
refParam = paramType->isReferenceType();
} else {
- result = SemaRef.BuildExpressionFromIntegralTemplateArgument(arg, loc);
+ QualType paramType = arg.getNonTypeTemplateArgumentType();
+ result = SemaRef.BuildExpressionFromNonTypeTemplateArgument(arg, loc);
+ refParam = paramType->isReferenceType();
assert(result.isInvalid() ||
SemaRef.Context.hasSameType(result.get()->getType(),
- arg.getIntegralType()));
+ paramType.getNonReferenceType()));
}
if (result.isInvalid())
return ExprError();
Expr *resultExpr = result.get();
+ // FIXME: Don't put subst node on final replacement.
return new (SemaRef.Context) SubstNonTypeTemplateParmExpr(
- resultExpr->getType(), resultExpr->getValueKind(), loc, parm, refParam,
- resultExpr);
+ resultExpr->getType(), resultExpr->getValueKind(), loc, resultExpr,
+ AssociatedDecl, parm->getIndex(), PackIndex, refParam);
}
ExprResult
@@ -1586,11 +2018,12 @@ TemplateInstantiator::TransformSubstNonTypeTemplateParmPackExpr(
return E;
}
- TemplateArgument Arg = E->getArgumentPack();
- Arg = getPackSubstitutedTemplateArgument(getSema(), Arg);
- return transformNonTypeTemplateParmRef(E->getParameterPack(),
- E->getParameterPackLocation(),
- Arg);
+ TemplateArgument Pack = E->getArgumentPack();
+ TemplateArgument Arg = getPackSubstitutedTemplateArgument(getSema(), Pack);
+ // FIXME: Don't put subst node on final replacement.
+ return transformNonTypeTemplateParmRef(
+ E->getAssociatedDecl(), E->getParameterPack(),
+ E->getParameterPackLocation(), Arg, getPackIndex(Pack));
}
ExprResult
@@ -1624,13 +2057,16 @@ TemplateInstantiator::TransformSubstNonTypeTemplateParmExpr(
// Type=char)),
// Type=decltype(2)))
// The call to CheckTemplateArgument here produces the ImpCast.
- TemplateArgument Converted;
- if (SemaRef.CheckTemplateArgument(E->getParameter(), SubstType,
- SubstReplacement.get(),
- Converted).isInvalid())
+ TemplateArgument SugaredConverted, CanonicalConverted;
+ if (SemaRef
+ .CheckTemplateArgument(E->getParameter(), SubstType,
+ SubstReplacement.get(), SugaredConverted,
+ CanonicalConverted, Sema::CTAK_Specified)
+ .isInvalid())
return true;
- return transformNonTypeTemplateParmRef(E->getParameter(),
- E->getExprLoc(), Converted);
+ return transformNonTypeTemplateParmRef(E->getAssociatedDecl(),
+ E->getParameter(), E->getExprLoc(),
+ SugaredConverted, E->getPackIndex());
}
ExprResult TemplateInstantiator::RebuildVarDeclRefExpr(VarDecl *PD,
@@ -1723,7 +2159,7 @@ TemplateInstantiator::TransformDeclRefExpr(DeclRefExpr *E) {
if (PD->isParameterPack())
return TransformFunctionParmPackRefExpr(E, PD);
- return TreeTransform<TemplateInstantiator>::TransformDeclRefExpr(E);
+ return inherited::TransformDeclRefExpr(E);
}
ExprResult TemplateInstantiator::TransformCXXDefaultArgExpr(
@@ -1731,9 +2167,9 @@ ExprResult TemplateInstantiator::TransformCXXDefaultArgExpr(
assert(!cast<FunctionDecl>(E->getParam()->getDeclContext())->
getDescribedFunctionTemplate() &&
"Default arg expressions are never formed in dependent cases.");
- return SemaRef.BuildCXXDefaultArgExpr(E->getUsedLocation(),
- cast<FunctionDecl>(E->getParam()->getDeclContext()),
- E->getParam());
+ return SemaRef.BuildCXXDefaultArgExpr(
+ E->getUsedLocation(), cast<FunctionDecl>(E->getParam()->getDeclContext()),
+ E->getParam());
}
template<typename Fn>
@@ -1748,22 +2184,50 @@ QualType TemplateInstantiator::TransformFunctionProtoType(TypeLocBuilder &TLB,
TLB, TL, ThisContext, ThisTypeQuals, TransformExceptionSpec);
}
-ParmVarDecl *
-TemplateInstantiator::TransformFunctionTypeParam(ParmVarDecl *OldParm,
- int indexAdjustment,
- Optional<unsigned> NumExpansions,
- bool ExpectParameterPack) {
- auto NewParm =
- SemaRef.SubstParmVarDecl(OldParm, TemplateArgs, indexAdjustment,
- NumExpansions, ExpectParameterPack);
+ParmVarDecl *TemplateInstantiator::TransformFunctionTypeParam(
+ ParmVarDecl *OldParm, int indexAdjustment,
+ std::optional<unsigned> NumExpansions, bool ExpectParameterPack) {
+ auto NewParm = SemaRef.SubstParmVarDecl(
+ OldParm, TemplateArgs, indexAdjustment, NumExpansions,
+ ExpectParameterPack, EvaluateConstraints);
if (NewParm && SemaRef.getLangOpts().OpenCL)
SemaRef.deduceOpenCLAddressSpace(NewParm);
return NewParm;
}
+QualType TemplateInstantiator::BuildSubstTemplateTypeParmType(
+ TypeLocBuilder &TLB, bool SuppressObjCLifetime, bool Final,
+ Decl *AssociatedDecl, unsigned Index, std::optional<unsigned> PackIndex,
+ TemplateArgument Arg, SourceLocation NameLoc) {
+ QualType Replacement = Arg.getAsType();
+
+ // If the template parameter had ObjC lifetime qualifiers,
+ // then any such qualifiers on the replacement type are ignored.
+ if (SuppressObjCLifetime) {
+ Qualifiers RQs;
+ RQs = Replacement.getQualifiers();
+ RQs.removeObjCLifetime();
+ Replacement =
+ SemaRef.Context.getQualifiedType(Replacement.getUnqualifiedType(), RQs);
+ }
+
+ if (Final) {
+ TLB.pushTrivial(SemaRef.Context, Replacement, NameLoc);
+ return Replacement;
+ }
+ // TODO: only do this uniquing once, at the start of instantiation.
+ QualType Result = getSema().Context.getSubstTemplateTypeParmType(
+ Replacement, AssociatedDecl, Index, PackIndex);
+ SubstTemplateTypeParmTypeLoc NewTL =
+ TLB.push<SubstTemplateTypeParmTypeLoc>(Result);
+ NewTL.setNameLoc(NameLoc);
+ return Result;
+}
+
QualType
TemplateInstantiator::TransformTemplateTypeParmType(TypeLocBuilder &TLB,
- TemplateTypeParmTypeLoc TL) {
+ TemplateTypeParmTypeLoc TL,
+ bool SuppressObjCLifetime) {
const TemplateTypeParmType *T = TL.getTypePtr();
if (T->getDepth() < TemplateArgs.getNumLevels()) {
// Replace the template type parameter with its corresponding
@@ -1800,6 +2264,9 @@ TemplateInstantiator::TransformTemplateTypeParmType(TypeLocBuilder &TLB,
return NewT;
}
+ auto [AssociatedDecl, Final] =
+ TemplateArgs.getAssociatedDecl(T->getDepth());
+ std::optional<unsigned> PackIndex;
if (T->isParameterPack()) {
assert(Arg.getKind() == TemplateArgument::Pack &&
"Missing argument pack");
@@ -1808,29 +2275,25 @@ TemplateInstantiator::TransformTemplateTypeParmType(TypeLocBuilder &TLB,
// We have the template argument pack, but we're not expanding the
// enclosing pack expansion yet. Just save the template argument
// pack for later substitution.
- QualType Result
- = getSema().Context.getSubstTemplateTypeParmPackType(T, Arg);
+ QualType Result = getSema().Context.getSubstTemplateTypeParmPackType(
+ AssociatedDecl, T->getIndex(), Final, Arg);
SubstTemplateTypeParmPackTypeLoc NewTL
= TLB.push<SubstTemplateTypeParmPackTypeLoc>(Result);
NewTL.setNameLoc(TL.getNameLoc());
return Result;
}
+ // PackIndex starts from last element.
+ PackIndex = getPackIndex(Arg);
Arg = getPackSubstitutedTemplateArgument(getSema(), Arg);
}
assert(Arg.getKind() == TemplateArgument::Type &&
"Template argument kind mismatch");
- QualType Replacement = Arg.getAsType();
-
- // TODO: only do this uniquing once, at the start of instantiation.
- QualType Result
- = getSema().Context.getSubstTemplateTypeParmType(T, Replacement);
- SubstTemplateTypeParmTypeLoc NewTL
- = TLB.push<SubstTemplateTypeParmTypeLoc>(Result);
- NewTL.setNameLoc(TL.getNameLoc());
- return Result;
+ return BuildSubstTemplateTypeParmType(TLB, SuppressObjCLifetime, Final,
+ AssociatedDecl, T->getIndex(),
+ PackIndex, Arg, TL.getNameLoc());
}
// The template type parameter comes from an inner template (e.g.,
@@ -1840,8 +2303,7 @@ TemplateInstantiator::TransformTemplateTypeParmType(TypeLocBuilder &TLB,
TemplateTypeParmDecl *NewTTPDecl = nullptr;
if (TemplateTypeParmDecl *OldTTPDecl = T->getDecl())
NewTTPDecl = cast_or_null<TemplateTypeParmDecl>(
- TransformDecl(TL.getNameLoc(), OldTTPDecl));
-
+ TransformDecl(TL.getNameLoc(), OldTTPDecl));
QualType Result = getSema().Context.getTemplateTypeParmType(
T->getDepth() - TemplateArgs.getNumSubstitutedLevels(), T->getIndex(),
T->isParameterPack(), NewTTPDecl);
@@ -1850,34 +2312,35 @@ TemplateInstantiator::TransformTemplateTypeParmType(TypeLocBuilder &TLB,
return Result;
}
-QualType
-TemplateInstantiator::TransformSubstTemplateTypeParmPackType(
- TypeLocBuilder &TLB,
- SubstTemplateTypeParmPackTypeLoc TL) {
+QualType TemplateInstantiator::TransformSubstTemplateTypeParmPackType(
+ TypeLocBuilder &TLB, SubstTemplateTypeParmPackTypeLoc TL,
+ bool SuppressObjCLifetime) {
+ const SubstTemplateTypeParmPackType *T = TL.getTypePtr();
+
+ Decl *NewReplaced = TransformDecl(TL.getNameLoc(), T->getAssociatedDecl());
+
if (getSema().ArgumentPackSubstitutionIndex == -1) {
// We aren't expanding the parameter pack, so just return ourselves.
- SubstTemplateTypeParmPackTypeLoc NewTL
- = TLB.push<SubstTemplateTypeParmPackTypeLoc>(TL.getType());
+ QualType Result = TL.getType();
+ if (NewReplaced != T->getAssociatedDecl())
+ Result = getSema().Context.getSubstTemplateTypeParmPackType(
+ NewReplaced, T->getIndex(), T->getFinal(), T->getArgumentPack());
+ SubstTemplateTypeParmPackTypeLoc NewTL =
+ TLB.push<SubstTemplateTypeParmPackTypeLoc>(Result);
NewTL.setNameLoc(TL.getNameLoc());
- return TL.getType();
+ return Result;
}
- TemplateArgument Arg = TL.getTypePtr()->getArgumentPack();
- Arg = getPackSubstitutedTemplateArgument(getSema(), Arg);
- QualType Result = Arg.getAsType();
-
- Result = getSema().Context.getSubstTemplateTypeParmType(
- TL.getTypePtr()->getReplacedParameter(),
- Result);
- SubstTemplateTypeParmTypeLoc NewTL
- = TLB.push<SubstTemplateTypeParmTypeLoc>(Result);
- NewTL.setNameLoc(TL.getNameLoc());
- return Result;
+ TemplateArgument Pack = T->getArgumentPack();
+ TemplateArgument Arg = getPackSubstitutedTemplateArgument(getSema(), Pack);
+ return BuildSubstTemplateTypeParmType(
+ TLB, SuppressObjCLifetime, T->getFinal(), NewReplaced, T->getIndex(),
+ getPackIndex(Pack), Arg, TL.getNameLoc());
}
-template<typename EntityPrinter>
static concepts::Requirement::SubstitutionDiagnostic *
-createSubstDiag(Sema &S, TemplateDeductionInfo &Info, EntityPrinter Printer) {
+createSubstDiag(Sema &S, TemplateDeductionInfo &Info,
+ concepts::EntityPrinter Printer) {
SmallString<128> Message;
SourceLocation ErrorLoc;
if (Info.hasSFINAEDiagnostic()) {
@@ -1901,6 +2364,51 @@ createSubstDiag(Sema &S, TemplateDeductionInfo &Info, EntityPrinter Printer) {
StringRef(MessageBuf, Message.size())};
}
+concepts::Requirement::SubstitutionDiagnostic *
+concepts::createSubstDiagAt(Sema &S, SourceLocation Location,
+ EntityPrinter Printer) {
+ SmallString<128> Entity;
+ llvm::raw_svector_ostream OS(Entity);
+ Printer(OS);
+ char *EntityBuf = new (S.Context) char[Entity.size()];
+ llvm::copy(Entity, EntityBuf);
+ return new (S.Context) concepts::Requirement::SubstitutionDiagnostic{
+ /*SubstitutedEntity=*/StringRef(EntityBuf, Entity.size()),
+ /*DiagLoc=*/Location, /*DiagMessage=*/StringRef()};
+}
+
+ExprResult TemplateInstantiator::TransformRequiresTypeParams(
+ SourceLocation KWLoc, SourceLocation RBraceLoc, const RequiresExpr *RE,
+ RequiresExprBodyDecl *Body, ArrayRef<ParmVarDecl *> Params,
+ SmallVectorImpl<QualType> &PTypes,
+ SmallVectorImpl<ParmVarDecl *> &TransParams,
+ Sema::ExtParameterInfoBuilder &PInfos) {
+
+ TemplateDeductionInfo Info(KWLoc);
+ Sema::InstantiatingTemplate TypeInst(SemaRef, KWLoc,
+ RE, Info,
+ SourceRange{KWLoc, RBraceLoc});
+ Sema::SFINAETrap Trap(SemaRef);
+
+ unsigned ErrorIdx;
+ if (getDerived().TransformFunctionTypeParams(
+ KWLoc, Params, /*ParamTypes=*/nullptr, /*ParamInfos=*/nullptr, PTypes,
+ &TransParams, PInfos, &ErrorIdx) ||
+ Trap.hasErrorOccurred()) {
+ SmallVector<concepts::Requirement *, 4> TransReqs;
+ ParmVarDecl *FailedDecl = Params[ErrorIdx];
+ // Add a 'failed' Requirement to contain the error that caused the failure
+ // here.
+ TransReqs.push_back(RebuildTypeRequirement(createSubstDiag(
+ SemaRef, Info, [&](llvm::raw_ostream &OS) { OS << *FailedDecl; })));
+ return getDerived().RebuildRequiresExpr(KWLoc, Body, RE->getLParenLoc(),
+ TransParams, RE->getRParenLoc(),
+ TransReqs, RBraceLoc);
+ }
+
+ return ExprResult{};
+}
+
concepts::TypeRequirement *
TemplateInstantiator::TransformTypeRequirement(concepts::TypeRequirement *Req) {
if (!Req->isDependent() && !AlwaysRebuild())
@@ -1947,6 +2455,9 @@ TemplateInstantiator::TransformExprRequirement(concepts::ExprRequirement *Req) {
if (ExprInst.isInvalid())
return nullptr;
ExprResult TransExprRes = TransformExpr(E);
+ if (!TransExprRes.isInvalid() && !Trap.hasErrorOccurred() &&
+ TransExprRes.get()->hasPlaceholderType())
+ TransExprRes = SemaRef.CheckPlaceholderExpr(TransExprRes.get());
if (TransExprRes.isInvalid() || Trap.hasErrorOccurred())
TransExpr = createSubstDiag(SemaRef, Info, [&](llvm::raw_ostream &OS) {
E->printPretty(OS, nullptr, SemaRef.getPrintingPolicy());
@@ -1955,7 +2466,7 @@ TemplateInstantiator::TransformExprRequirement(concepts::ExprRequirement *Req) {
TransExpr = TransExprRes.get();
}
- llvm::Optional<concepts::ExprRequirement::ReturnTypeRequirement> TransRetReq;
+ std::optional<concepts::ExprRequirement::ReturnTypeRequirement> TransRetReq;
const auto &RetReq = Req->getReturnTypeRequirement();
if (RetReq.isEmpty())
TransRetReq.emplace();
@@ -1969,8 +2480,7 @@ TemplateInstantiator::TransformExprRequirement(concepts::ExprRequirement *Req) {
Req, Info, OrigTPL->getSourceRange());
if (TPLInst.isInvalid())
return nullptr;
- TemplateParameterList *TPL =
- TransformTemplateParameterList(OrigTPL);
+ TemplateParameterList *TPL = TransformTemplateParameterList(OrigTPL);
if (!TPL)
TransRetReq.emplace(createSubstDiag(SemaRef, Info,
[&] (llvm::raw_ostream& OS) {
@@ -1982,8 +2492,7 @@ TemplateInstantiator::TransformExprRequirement(concepts::ExprRequirement *Req) {
TransRetReq.emplace(TPL);
}
}
- assert(TransRetReq.hasValue() &&
- "All code paths leading here must set TransRetReq");
+ assert(TransRetReq && "All code paths leading here must set TransRetReq");
if (Expr *E = TransExpr.dyn_cast<Expr *>())
return RebuildExprRequirement(E, Req->isSimple(), Req->getNoexceptLoc(),
std::move(*TransRetReq));
@@ -1997,18 +2506,30 @@ TemplateInstantiator::TransformNestedRequirement(
concepts::NestedRequirement *Req) {
if (!Req->isDependent() && !AlwaysRebuild())
return Req;
- if (Req->isSubstitutionFailure()) {
+ if (Req->hasInvalidConstraint()) {
if (AlwaysRebuild())
- return RebuildNestedRequirement(
- Req->getSubstitutionDiagnostic());
+ return RebuildNestedRequirement(Req->getInvalidConstraintEntity(),
+ Req->getConstraintSatisfaction());
return Req;
}
Sema::InstantiatingTemplate ReqInst(SemaRef,
Req->getConstraintExpr()->getBeginLoc(), Req,
Sema::InstantiatingTemplate::ConstraintsCheck{},
Req->getConstraintExpr()->getSourceRange());
+ if (!getEvaluateConstraints()) {
+ ExprResult TransConstraint = TransformExpr(Req->getConstraintExpr());
+ if (TransConstraint.isInvalid() || !TransConstraint.get())
+ return nullptr;
+ if (TransConstraint.get()->isInstantiationDependent())
+ return new (SemaRef.Context)
+ concepts::NestedRequirement(TransConstraint.get());
+ ConstraintSatisfaction Satisfaction;
+ return new (SemaRef.Context) concepts::NestedRequirement(
+ SemaRef.Context, TransConstraint.get(), Satisfaction);
+ }
ExprResult TransConstraint;
+ ConstraintSatisfaction Satisfaction;
TemplateDeductionInfo Info(Req->getConstraintExpr()->getBeginLoc());
{
EnterExpressionEvaluationContext ContextRAII(
@@ -2019,15 +2540,32 @@ TemplateInstantiator::TransformNestedRequirement(
Req->getConstraintExpr()->getSourceRange());
if (ConstrInst.isInvalid())
return nullptr;
- TransConstraint = TransformExpr(Req->getConstraintExpr());
- if (TransConstraint.isInvalid() || Trap.hasErrorOccurred())
- return RebuildNestedRequirement(createSubstDiag(SemaRef, Info,
- [&] (llvm::raw_ostream& OS) {
- Req->getConstraintExpr()->printPretty(OS, nullptr,
- SemaRef.getPrintingPolicy());
- }));
+ llvm::SmallVector<Expr *> Result;
+ if (!SemaRef.CheckConstraintSatisfaction(
+ nullptr, {Req->getConstraintExpr()}, Result, TemplateArgs,
+ Req->getConstraintExpr()->getSourceRange(), Satisfaction) &&
+ !Result.empty())
+ TransConstraint = Result[0];
+ assert(!Trap.hasErrorOccurred() && "Substitution failures must be handled "
+ "by CheckConstraintSatisfaction.");
}
- return RebuildNestedRequirement(TransConstraint.get());
+ if (TransConstraint.isUsable() &&
+ TransConstraint.get()->isInstantiationDependent())
+ return new (SemaRef.Context)
+ concepts::NestedRequirement(TransConstraint.get());
+ if (TransConstraint.isInvalid() || !TransConstraint.get() ||
+ Satisfaction.HasSubstitutionFailure()) {
+ SmallString<128> Entity;
+ llvm::raw_svector_ostream OS(Entity);
+ Req->getConstraintExpr()->printPretty(OS, nullptr,
+ SemaRef.getPrintingPolicy());
+ char *EntityBuf = new (SemaRef.Context) char[Entity.size()];
+ std::copy(Entity.begin(), Entity.end(), EntityBuf);
+ return new (SemaRef.Context) concepts::NestedRequirement(
+ SemaRef.Context, StringRef(EntityBuf, Entity.size()), Satisfaction);
+ }
+ return new (SemaRef.Context) concepts::NestedRequirement(
+ SemaRef.Context, TransConstraint.get(), Satisfaction);
}
@@ -2157,7 +2695,8 @@ TypeSourceInfo *Sema::SubstFunctionDeclType(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
- Qualifiers ThisTypeQuals) {
+ Qualifiers ThisTypeQuals,
+ bool EvaluateConstraints) {
assert(!CodeSynthesisContexts.empty() &&
"Cannot perform an instantiation without some context on the "
"instantiation stack");
@@ -2166,6 +2705,7 @@ TypeSourceInfo *Sema::SubstFunctionDeclType(TypeSourceInfo *T,
return T;
TemplateInstantiator Instantiator(*this, Args, Loc, Entity);
+ Instantiator.setEvaluateConstraints(EvaluateConstraints);
TypeLocBuilder TLB;
@@ -2188,7 +2728,9 @@ TypeSourceInfo *Sema::SubstFunctionDeclType(TypeSourceInfo *T,
} else {
Result = Instantiator.TransformType(TLB, TL);
}
- if (Result.isNull())
+ // When there are errors resolving types, clang may use IntTy as a fallback,
+ // breaking our assumption that function declarations have function types.
+ if (Result.isNull() || !Result->isFunctionType())
return nullptr;
return TLB.getTypeSourceInfo(Context, Result);
@@ -2198,8 +2740,6 @@ bool Sema::SubstExceptionSpec(SourceLocation Loc,
FunctionProtoType::ExceptionSpecInfo &ESI,
SmallVectorImpl<QualType> &ExceptionStorage,
const MultiLevelTemplateArgumentList &Args) {
- assert(ESI.Type != EST_Uninstantiated);
-
bool Changed = false;
TemplateInstantiator Instantiator(*this, Args, Loc, DeclarationName());
return Instantiator.TransformExceptionSpec(Loc, ESI, ExceptionStorage,
@@ -2308,11 +2848,41 @@ namespace {
} // namespace
-ParmVarDecl *Sema::SubstParmVarDecl(ParmVarDecl *OldParm,
- const MultiLevelTemplateArgumentList &TemplateArgs,
- int indexAdjustment,
- Optional<unsigned> NumExpansions,
- bool ExpectParameterPack) {
+bool Sema::SubstTypeConstraint(
+ TemplateTypeParmDecl *Inst, const TypeConstraint *TC,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ bool EvaluateConstraints) {
+ const ASTTemplateArgumentListInfo *TemplArgInfo =
+ TC->getTemplateArgsAsWritten();
+
+ if (!EvaluateConstraints) {
+ Inst->setTypeConstraint(TC->getConceptReference(),
+ TC->getImmediatelyDeclaredConstraint());
+ return false;
+ }
+
+ TemplateArgumentListInfo InstArgs;
+
+ if (TemplArgInfo) {
+ InstArgs.setLAngleLoc(TemplArgInfo->LAngleLoc);
+ InstArgs.setRAngleLoc(TemplArgInfo->RAngleLoc);
+ if (SubstTemplateArguments(TemplArgInfo->arguments(), TemplateArgs,
+ InstArgs))
+ return true;
+ }
+ return AttachTypeConstraint(
+ TC->getNestedNameSpecifierLoc(), TC->getConceptNameInfo(),
+ TC->getNamedConcept(), &InstArgs, Inst,
+ Inst->isParameterPack()
+ ? cast<CXXFoldExpr>(TC->getImmediatelyDeclaredConstraint())
+ ->getEllipsisLoc()
+ : SourceLocation());
+}
+
+ParmVarDecl *Sema::SubstParmVarDecl(
+ ParmVarDecl *OldParm, const MultiLevelTemplateArgumentList &TemplateArgs,
+ int indexAdjustment, std::optional<unsigned> NumExpansions,
+ bool ExpectParameterPack, bool EvaluateConstraint) {
TypeSourceInfo *OldDI = OldParm->getTypeSourceInfo();
TypeSourceInfo *NewDI = nullptr;
@@ -2370,26 +2940,7 @@ ParmVarDecl *Sema::SubstParmVarDecl(ParmVarDecl *OldParm,
// template's described function, but we might also get here later.
// Make sure we do not instantiate the TypeConstraint more than once.
if (Inst && !Inst->getTypeConstraint()) {
- // TODO: Concepts: do not instantiate the constraint (delayed constraint
- // substitution)
- const ASTTemplateArgumentListInfo *TemplArgInfo
- = TC->getTemplateArgsAsWritten();
- TemplateArgumentListInfo InstArgs;
-
- if (TemplArgInfo) {
- InstArgs.setLAngleLoc(TemplArgInfo->LAngleLoc);
- InstArgs.setRAngleLoc(TemplArgInfo->RAngleLoc);
- if (Subst(TemplArgInfo->getTemplateArgs(),
- TemplArgInfo->NumTemplateArgs, InstArgs, TemplateArgs))
- return nullptr;
- }
- if (AttachTypeConstraint(
- TC->getNestedNameSpecifierLoc(), TC->getConceptNameInfo(),
- TC->getNamedConcept(), TemplArgInfo ? &InstArgs : nullptr, Inst,
- TTP->isParameterPack()
- ? cast<CXXFoldExpr>(TC->getImmediatelyDeclaredConstraint())
- ->getEllipsisLoc()
- : SourceLocation()))
+ if (SubstTypeConstraint(Inst, TC, TemplateArgs, EvaluateConstraint))
return nullptr;
}
}
@@ -2412,31 +2963,21 @@ ParmVarDecl *Sema::SubstParmVarDecl(ParmVarDecl *OldParm,
NewParm->setUnparsedDefaultArg();
UnparsedDefaultArgInstantiations[OldParm].push_back(NewParm);
} else if (Expr *Arg = OldParm->getDefaultArg()) {
- FunctionDecl *OwningFunc = cast<FunctionDecl>(OldParm->getDeclContext());
- if (OwningFunc->isInLocalScopeForInstantiation()) {
- // Instantiate default arguments for methods of local classes (DR1484)
- // and non-defining declarations.
- Sema::ContextRAII SavedContext(*this, OwningFunc);
- LocalInstantiationScope Local(*this, true);
- ExprResult NewArg = SubstExpr(Arg, TemplateArgs);
- if (NewArg.isUsable()) {
- // It would be nice if we still had this.
- SourceLocation EqualLoc = NewArg.get()->getBeginLoc();
- ExprResult Result =
- ConvertParamDefaultArgument(NewParm, NewArg.get(), EqualLoc);
- if (Result.isInvalid())
- return nullptr;
-
- SetParamDefaultArgument(NewParm, Result.getAs<Expr>(), EqualLoc);
- }
- } else {
- // FIXME: if we non-lazily instantiated non-dependent default args for
- // non-dependent parameter types we could remove a bunch of duplicate
- // conversion warnings for such arguments.
- NewParm->setUninstantiatedDefaultArg(Arg);
- }
+ // Default arguments cannot be substituted until the declaration context
+ // for the associated function or lambda capture class is available.
+ // This is necessary for cases like the following where construction of
+ // the lambda capture class for the outer lambda is dependent on the
+ // parameter types but where the default argument is dependent on the
+ // outer lambda's declaration context.
+ // template <typename T>
+ // auto f() {
+ // return [](T = []{ return T{}; }()) { return 0; };
+ // }
+ NewParm->setUninstantiatedDefaultArg(Arg);
}
+ NewParm->setExplicitObjectParameterLoc(
+ OldParm->getExplicitObjectParamThisLoc());
NewParm->setHasInheritedDefaultArg(OldParm->hasInheritedDefaultArg());
if (OldParm->isParameterPack() && !NewParm->isParameterPack()) {
@@ -2479,6 +3020,88 @@ bool Sema::SubstParmTypes(
Loc, Params, nullptr, ExtParamInfos, ParamTypes, OutParams, ParamInfos);
}
+/// Substitute the given template arguments into the default argument.
+bool Sema::SubstDefaultArgument(
+ SourceLocation Loc,
+ ParmVarDecl *Param,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ bool ForCallExpr) {
+ FunctionDecl *FD = cast<FunctionDecl>(Param->getDeclContext());
+ Expr *PatternExpr = Param->getUninstantiatedDefaultArg();
+
+ EnterExpressionEvaluationContext EvalContext(
+ *this, ExpressionEvaluationContext::PotentiallyEvaluated, Param);
+
+ InstantiatingTemplate Inst(*this, Loc, Param, TemplateArgs.getInnermost());
+ if (Inst.isInvalid())
+ return true;
+ if (Inst.isAlreadyInstantiating()) {
+ Diag(Param->getBeginLoc(), diag::err_recursive_default_argument) << FD;
+ Param->setInvalidDecl();
+ return true;
+ }
+
+ ExprResult Result;
+ {
+ // C++ [dcl.fct.default]p5:
+ // The names in the [default argument] expression are bound, and
+ // the semantic constraints are checked, at the point where the
+ // default argument expression appears.
+ ContextRAII SavedContext(*this, FD);
+ std::unique_ptr<LocalInstantiationScope> LIS;
+
+ if (ForCallExpr) {
+ // When instantiating a default argument due to use in a call expression,
+ // an instantiation scope that includes the parameters of the callee is
+ // required to satisfy references from the default argument. For example:
+ // template<typename T> void f(T a, int = decltype(a)());
+ // void g() { f(0); }
+ LIS = std::make_unique<LocalInstantiationScope>(*this);
+ FunctionDecl *PatternFD = FD->getTemplateInstantiationPattern(
+ /*ForDefinition*/ false);
+ if (addInstantiatedParametersToScope(FD, PatternFD, *LIS, TemplateArgs))
+ return true;
+ }
+
+ runWithSufficientStackSpace(Loc, [&] {
+ Result = SubstInitializer(PatternExpr, TemplateArgs,
+ /*DirectInit*/false);
+ });
+ }
+ if (Result.isInvalid())
+ return true;
+
+ if (ForCallExpr) {
+ // Check the expression as an initializer for the parameter.
+ InitializedEntity Entity
+ = InitializedEntity::InitializeParameter(Context, Param);
+ InitializationKind Kind = InitializationKind::CreateCopy(
+ Param->getLocation(),
+ /*FIXME:EqualLoc*/ PatternExpr->getBeginLoc());
+ Expr *ResultE = Result.getAs<Expr>();
+
+ InitializationSequence InitSeq(*this, Entity, Kind, ResultE);
+ Result = InitSeq.Perform(*this, Entity, Kind, ResultE);
+ if (Result.isInvalid())
+ return true;
+
+ Result =
+ ActOnFinishFullExpr(Result.getAs<Expr>(), Param->getOuterLocStart(),
+ /*DiscardedValue*/ false);
+ } else {
+ // FIXME: Obtain the source location for the '=' token.
+ SourceLocation EqualLoc = PatternExpr->getBeginLoc();
+ Result = ConvertParamDefaultArgument(Param, Result.getAs<Expr>(), EqualLoc);
+ }
+ if (Result.isInvalid())
+ return true;
+
+ // Remember the instantiated default argument.
+ Param->setDefaultArg(Result.getAs<Expr>());
+
+ return false;
+}
+
/// Perform substitution on the base class specifiers of the
/// given class template specialization.
///
@@ -2511,7 +3134,7 @@ Sema::SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
Unexpanded);
bool ShouldExpand = false;
bool RetainExpansion = false;
- Optional<unsigned> NumExpansions;
+ std::optional<unsigned> NumExpansions;
if (CheckParameterPacksForExpansion(Base.getEllipsisLoc(),
Base.getSourceRange(),
Unexpanded,
@@ -2698,6 +3321,7 @@ Sema::InstantiateClass(SourceLocation PointOfInstantiation,
Instantiation->setInvalidDecl();
TemplateDeclInstantiator Instantiator(*this, Instantiation, TemplateArgs);
+ Instantiator.setEvaluateConstraints(false);
SmallVector<Decl*, 4> Fields;
// Delay instantiation of late parsed attributes.
LateInstantiatedAttrVec LateAttrs;
@@ -2788,11 +3412,10 @@ Sema::InstantiateClass(SourceLocation PointOfInstantiation,
CurrentInstantiationScope = I->Scope;
// Allow 'this' within late-parsed attributes.
- NamedDecl *ND = dyn_cast<NamedDecl>(I->NewDecl);
- CXXRecordDecl *ThisContext =
- dyn_cast_or_null<CXXRecordDecl>(ND->getDeclContext());
+ auto *ND = cast<NamedDecl>(I->NewDecl);
+ auto *ThisContext = dyn_cast_or_null<CXXRecordDecl>(ND->getDeclContext());
CXXThisScopeRAII ThisScope(*this, ThisContext, Qualifiers(),
- ND && ND->isCXXInstanceMember());
+ ND->isCXXInstanceMember());
Attr *NewAttr =
instantiateTemplateAttribute(I->TmplAttr, Context, *this, TemplateArgs);
@@ -2989,6 +3612,8 @@ bool Sema::InstantiateInClassInitializer(
ContextRAII SavedContext(*this, Instantiation->getParent());
EnterExpressionEvaluationContext EvalContext(
*this, Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
+ ExprEvalContexts.back().DelayedDefaultInitializationContext = {
+ PointOfInstantiation, Instantiation, CurContext};
LocalInstantiationScope Scope(*this, true);
@@ -3086,7 +3711,7 @@ getPatternForClassTemplateSpecialization(
} else {
Matched.push_back(PartialSpecMatchResult());
Matched.back().Partial = Partial;
- Matched.back().Args = Info.take();
+ Matched.back().Args = Info.takeCanonical();
}
}
@@ -3231,6 +3856,17 @@ Sema::InstantiateClassMembers(SourceLocation PointOfInstantiation,
if (FunctionDecl *Pattern =
Function->getInstantiatedFromMemberFunction()) {
+ if (Function->isIneligibleOrNotSelected())
+ continue;
+
+ if (Function->getTrailingRequiresClause()) {
+ ConstraintSatisfaction Satisfaction;
+ if (CheckFunctionConstraints(Function, Satisfaction) ||
+ !Satisfaction.IsSatisfied) {
+ continue;
+ }
+ }
+
if (Function->hasAttr<ExcludeFromExplicitInstantiationAttr>())
continue;
@@ -3466,11 +4102,9 @@ bool Sema::SubstTemplateArguments(
ArrayRef<TemplateArgumentLoc> Args,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateArgumentListInfo &Out) {
- TemplateInstantiator Instantiator(*this, TemplateArgs,
- SourceLocation(),
+ TemplateInstantiator Instantiator(*this, TemplateArgs, SourceLocation(),
DeclarationName());
- return Instantiator.TransformTemplateArguments(Args.begin(), Args.end(),
- Out);
+ return Instantiator.TransformTemplateArguments(Args.begin(), Args.end(), Out);
}
ExprResult
@@ -3484,11 +4118,29 @@ Sema::SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs) {
return Instantiator.TransformExpr(E);
}
+ExprResult
+Sema::SubstConstraintExpr(Expr *E,
+ const MultiLevelTemplateArgumentList &TemplateArgs) {
+ // FIXME: should call SubstExpr directly if this function is equivalent or
+ // should it be different?
+ return SubstExpr(E, TemplateArgs);
+}
+
+ExprResult Sema::SubstConstraintExprWithoutSatisfaction(
+ Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs) {
+ if (!E)
+ return E;
+
+ TemplateInstantiator Instantiator(*this, TemplateArgs, SourceLocation(),
+ DeclarationName());
+ Instantiator.setEvaluateConstraints(false);
+ return Instantiator.TransformExpr(E);
+}
+
ExprResult Sema::SubstInitializer(Expr *Init,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit) {
- TemplateInstantiator Instantiator(*this, TemplateArgs,
- SourceLocation(),
+ TemplateInstantiator Instantiator(*this, TemplateArgs, SourceLocation(),
DeclarationName());
return Instantiator.TransformInitializer(Init, CXXDirectInit);
}
@@ -3537,15 +4189,6 @@ Sema::SubstTemplateName(NestedNameSpecifierLoc QualifierLoc,
return Instantiator.TransformTemplateName(SS, Name, Loc);
}
-bool Sema::Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
- TemplateArgumentListInfo &Result,
- const MultiLevelTemplateArgumentList &TemplateArgs) {
- TemplateInstantiator Instantiator(*this, TemplateArgs, SourceLocation(),
- DeclarationName());
-
- return Instantiator.TransformTemplateArguments(Args, NumArgs, Result);
-}
-
static const Decl *getCanonicalParmVarDecl(const Decl *D) {
// When storing ParmVarDecls in the local instantiation scope, we always
// want to use the ParmVarDecl from the canonical function declaration,
@@ -3628,7 +4271,7 @@ void LocalInstantiationScope::InstantiatedLocal(const Decl *D, Decl *Inst) {
LocalInstantiationScope *Current = this;
while (Current->CombineWithOuterScope && Current->Outer) {
Current = Current->Outer;
- assert(Current->LocalDecls.find(D) == Current->LocalDecls.end() &&
+ assert(!Current->LocalDecls.contains(D) &&
"Instantiated local in inner and outer scopes");
}
#endif
@@ -3652,7 +4295,7 @@ void LocalInstantiationScope::MakeInstantiatedLocalArgPack(const Decl *D) {
// This should be the first time we've been told about this decl.
for (LocalInstantiationScope *Current = this;
Current && Current->CombineWithOuterScope; Current = Current->Outer)
- assert(Current->LocalDecls.find(D) == Current->LocalDecls.end() &&
+ assert(!Current->LocalDecls.contains(D) &&
"Creating local pack after instantiation of local");
#endif
@@ -3665,7 +4308,7 @@ void LocalInstantiationScope::MakeInstantiatedLocalArgPack(const Decl *D) {
bool LocalInstantiationScope::isLocalPackExpansion(const Decl *D) {
for (DeclArgumentPack *Pack : ArgumentPacks)
- if (std::find(Pack->begin(), Pack->end(), D) != Pack->end())
+ if (llvm::is_contained(*Pack, D))
return true;
return false;
}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp b/contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
index 25f134868758..fbc8572ea0e0 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
@@ -22,6 +22,7 @@
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
+#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/ScopeInfo.h"
@@ -29,6 +30,7 @@
#include "clang/Sema/Template.h"
#include "clang/Sema/TemplateInstCallback.h"
#include "llvm/Support/TimeProfiler.h"
+#include <optional>
using namespace clang;
@@ -93,11 +95,14 @@ static void instantiateDependentAlignedAttr(
if (!Result.isInvalid())
S.AddAlignedAttr(New, *Aligned, Result.getAs<Expr>(), IsPackExpansion);
} else {
- TypeSourceInfo *Result = S.SubstType(Aligned->getAlignmentType(),
- TemplateArgs, Aligned->getLocation(),
- DeclarationName());
- if (Result)
- S.AddAlignedAttr(New, *Aligned, Result, IsPackExpansion);
+ if (TypeSourceInfo *Result =
+ S.SubstType(Aligned->getAlignmentType(), TemplateArgs,
+ Aligned->getLocation(), DeclarationName())) {
+ if (!S.CheckAlignasTypeArgument(Aligned->getSpelling(), Result,
+ Aligned->getLocation(),
+ Result->getTypeLoc().getSourceRange()))
+ S.AddAlignedAttr(New, *Aligned, Result, IsPackExpansion);
+ }
}
}
@@ -120,7 +125,7 @@ static void instantiateDependentAlignedAttr(
// Determine whether we can expand this attribute pack yet.
bool Expand = true, RetainExpansion = false;
- Optional<unsigned> NumExpansions;
+ std::optional<unsigned> NumExpansions;
// FIXME: Use the actual location of the ellipsis.
SourceLocation EllipsisLoc = Aligned->getLocation();
if (S.CheckParameterPacksForExpansion(EllipsisLoc, Aligned->getRange(),
@@ -188,15 +193,37 @@ static void instantiateDependentAnnotationAttr(
const AnnotateAttr *Attr, Decl *New) {
EnterExpressionEvaluationContext Unevaluated(
S, Sema::ExpressionEvaluationContext::ConstantEvaluated);
+
+ // If the attribute has delayed arguments it will have to instantiate those
+ // and handle them as new arguments for the attribute.
+ bool HasDelayedArgs = Attr->delayedArgs_size();
+
+ ArrayRef<Expr *> ArgsToInstantiate =
+ HasDelayedArgs
+ ? ArrayRef<Expr *>{Attr->delayedArgs_begin(), Attr->delayedArgs_end()}
+ : ArrayRef<Expr *>{Attr->args_begin(), Attr->args_end()};
+
SmallVector<Expr *, 4> Args;
- Args.reserve(Attr->args_size());
- for (auto *E : Attr->args()) {
- ExprResult Result = S.SubstExpr(E, TemplateArgs);
- if (!Result.isUsable())
+ if (S.SubstExprs(ArgsToInstantiate,
+ /*IsCall=*/false, TemplateArgs, Args))
+ return;
+
+ StringRef Str = Attr->getAnnotation();
+ if (HasDelayedArgs) {
+ if (Args.size() < 1) {
+ S.Diag(Attr->getLoc(), diag::err_attribute_too_few_arguments)
+ << Attr << 1;
+ return;
+ }
+
+ if (!S.checkStringLiteralArgumentAttr(*Attr, Args[0], Str))
return;
- Args.push_back(Result.get());
+
+ llvm::SmallVector<Expr *, 4> ActualArgs;
+ ActualArgs.insert(ActualArgs.begin(), Args.begin() + 1, Args.end());
+ std::swap(Args, ActualArgs);
}
- S.AddAnnotationAttr(New, *Attr, Attr->getAnnotation(), Args);
+ S.AddAnnotationAttr(New, *Attr, Str, Args);
}
static Expr *instantiateDependentFunctionAttrCondition(
@@ -275,7 +302,15 @@ static void instantiateDependentCUDALaunchBoundsAttr(
MinBlocks = Result.getAs<Expr>();
}
- S.AddLaunchBoundsAttr(New, Attr, MaxThreads, MinBlocks);
+ Expr *MaxBlocks = nullptr;
+ if (Attr.getMaxBlocks()) {
+ Result = S.SubstExpr(Attr.getMaxBlocks(), TemplateArgs);
+ if (Result.isInvalid())
+ return;
+ MaxBlocks = Result.getAs<Expr>();
+ }
+
+ S.AddLaunchBoundsAttr(New, Attr, MaxThreads, MinBlocks, MaxBlocks);
}
static void
@@ -436,18 +471,19 @@ static void instantiateOMPDeclareVariantAttr(
return;
Expr *E = VariantFuncRef.get();
+
// Check function/variant ref for `omp declare variant` but not for `omp
// begin declare variant` (which use implicit attributes).
- Optional<std::pair<FunctionDecl *, Expr *>> DeclVarData =
- S.checkOpenMPDeclareVariantFunction(S.ConvertDeclToDeclGroup(New),
- VariantFuncRef.get(), TI,
+ std::optional<std::pair<FunctionDecl *, Expr *>> DeclVarData =
+ S.checkOpenMPDeclareVariantFunction(S.ConvertDeclToDeclGroup(New), E, TI,
+ Attr.appendArgs_size(),
Attr.getRange());
if (!DeclVarData)
return;
- E = DeclVarData.getValue().second;
- FD = DeclVarData.getValue().first;
+ E = DeclVarData->second;
+ FD = DeclVarData->first;
if (auto *VariantDRE = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) {
if (auto *VariantFD = dyn_cast<FunctionDecl>(VariantDRE->getDecl())) {
@@ -481,7 +517,30 @@ static void instantiateOMPDeclareVariantAttr(
}
}
- S.ActOnOpenMPDeclareVariantDirective(FD, E, TI, Attr.getRange());
+ SmallVector<Expr *, 8> NothingExprs;
+ SmallVector<Expr *, 8> NeedDevicePtrExprs;
+ SmallVector<OMPInteropInfo, 4> AppendArgs;
+
+ for (Expr *E : Attr.adjustArgsNothing()) {
+ ExprResult ER = Subst(E);
+ if (ER.isInvalid())
+ continue;
+ NothingExprs.push_back(ER.get());
+ }
+ for (Expr *E : Attr.adjustArgsNeedDevicePtr()) {
+ ExprResult ER = Subst(E);
+ if (ER.isInvalid())
+ continue;
+ NeedDevicePtrExprs.push_back(ER.get());
+ }
+ for (OMPInteropInfo &II : Attr.appendArgs()) {
+ // When prefer_type is implemented for append_args handle them here too.
+ AppendArgs.emplace_back(II.IsTarget, II.IsTargetSync);
+ }
+
+ S.ActOnOpenMPDeclareVariantDirective(
+ FD, E, TI, NothingExprs, NeedDevicePtrExprs, AppendArgs, SourceLocation(),
+ SourceLocation(), Attr.getRange());
}
static void instantiateDependentAMDGPUFlatWorkGroupSizeAttr(
@@ -504,18 +563,16 @@ static void instantiateDependentAMDGPUFlatWorkGroupSizeAttr(
S.addAMDGPUFlatWorkGroupSizeAttr(New, Attr, MinExpr, MaxExpr);
}
-static ExplicitSpecifier
-instantiateExplicitSpecifier(Sema &S,
- const MultiLevelTemplateArgumentList &TemplateArgs,
- ExplicitSpecifier ES, FunctionDecl *New) {
+ExplicitSpecifier Sema::instantiateExplicitSpecifier(
+ const MultiLevelTemplateArgumentList &TemplateArgs, ExplicitSpecifier ES) {
if (!ES.getExpr())
return ES;
Expr *OldCond = ES.getExpr();
Expr *Cond = nullptr;
{
EnterExpressionEvaluationContext Unevaluated(
- S, Sema::ExpressionEvaluationContext::ConstantEvaluated);
- ExprResult SubstResult = S.SubstExpr(OldCond, TemplateArgs);
+ *this, Sema::ExpressionEvaluationContext::ConstantEvaluated);
+ ExprResult SubstResult = SubstExpr(OldCond, TemplateArgs);
if (SubstResult.isInvalid()) {
return ExplicitSpecifier::Invalid();
}
@@ -523,7 +580,7 @@ instantiateExplicitSpecifier(Sema &S,
}
ExplicitSpecifier Result(Cond, ES.getKind());
if (!Cond->isTypeDependent())
- S.tryResolveExplicitSpecifier(Result);
+ tryResolveExplicitSpecifier(Result);
return Result;
}
@@ -556,30 +613,10 @@ static void instantiateDependentAMDGPUWavesPerEUAttr(
static void instantiateDependentSYCLKernelAttr(
Sema &S, const MultiLevelTemplateArgumentList &TemplateArgs,
const SYCLKernelAttr &Attr, Decl *New) {
- // Functions cannot be partially specialized, so if we are being instantiated,
- // we are obviously a complete specialization. Since this attribute is only
- // valid on function template declarations, we know that this is a full
- // instantiation of a kernel.
- S.AddSYCLKernelLambda(cast<FunctionDecl>(New));
-
- // Evaluate whether this would change any of the already evaluated
- // __builtin_sycl_unique_stable_name values.
- for (auto &Itr : S.Context.SYCLUniqueStableNameEvaluatedValues) {
- const std::string &CurName = Itr.first->ComputeName(S.Context);
- if (Itr.second != CurName) {
- S.Diag(New->getLocation(),
- diag::err_kernel_invalidates_sycl_unique_stable_name);
- S.Diag(Itr.first->getLocation(),
- diag::note_sycl_unique_stable_name_evaluated_here);
- // Update this so future diagnostics work correctly.
- Itr.second = CurName;
- }
- }
-
New->addAttr(Attr.clone(S.getASTContext()));
}
-/// Determine whether the attribute A might be relevent to the declaration D.
+/// Determine whether the attribute A might be relevant to the declaration D.
/// If not, we can skip instantiating it. The attribute may or may not have
/// been instantiated yet.
static bool isRelevantAttr(Sema &S, const Decl *D, const Attr *A) {
@@ -598,9 +635,42 @@ static bool isRelevantAttr(Sema &S, const Decl *D, const Attr *A) {
return true;
}
+ if (const auto *BA = dyn_cast<BuiltinAttr>(A)) {
+ const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
+ switch (BA->getID()) {
+ case Builtin::BIforward:
+ // Do not treat 'std::forward' as a builtin if it takes an rvalue reference
+ // type and returns an lvalue reference type. The library implementation
+ // will produce an error in this case; don't get in its way.
+ if (FD && FD->getNumParams() >= 1 &&
+ FD->getParamDecl(0)->getType()->isRValueReferenceType() &&
+ FD->getReturnType()->isLValueReferenceType()) {
+ return false;
+ }
+ [[fallthrough]];
+ case Builtin::BImove:
+ case Builtin::BImove_if_noexcept:
+ // HACK: Super-old versions of libc++ (3.1 and earlier) provide
+ // std::forward and std::move overloads that sometimes return by value
+ // instead of by reference when building in C++98 mode. Don't treat such
+ // cases as builtins.
+ if (FD && !FD->getReturnType()->isReferenceType())
+ return false;
+ break;
+ }
+ }
+
return true;
}
+static void instantiateDependentHLSLParamModifierAttr(
+ Sema &S, const MultiLevelTemplateArgumentList &TemplateArgs,
+ const HLSLParamModifierAttr *Attr, Decl *New) {
+ ParmVarDecl *P = cast<ParmVarDecl>(New);
+ P->addAttr(Attr->clone(S.getASTContext()));
+ P->setType(S.getASTContext().getLValueReferenceType(P->getType()));
+}
+
void Sema::InstantiateAttrsForDecl(
const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Tmpl,
Decl *New, LateInstantiatedAttrVec *LateAttrs,
@@ -722,6 +792,12 @@ void Sema::InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
*AMDGPUFlatWorkGroupSize, New);
}
+ if (const auto *ParamAttr = dyn_cast<HLSLParamModifierAttr>(TmplAttr)) {
+ instantiateDependentHLSLParamModifierAttr(*this, TemplateArgs, ParamAttr,
+ New);
+ continue;
+ }
+
// Existing DLL attribute on the instantiation takes precedence.
if (TmplAttr->getKind() == attr::DLLExport ||
TmplAttr->getKind() == attr::DLLImport) {
@@ -782,6 +858,22 @@ void Sema::InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
}
}
+/// Update instantiation attributes after template was late parsed.
+///
+/// Some attributes are evaluated based on the body of template. If it is
+/// late parsed, such attributes cannot be evaluated when declaration is
+/// instantiated. This function is used to update instantiation attributes when
+/// template definition is ready.
+void Sema::updateAttrsForLateParsedTemplate(const Decl *Pattern, Decl *Inst) {
+ for (const auto *Attr : Pattern->attrs()) {
+ if (auto *A = dyn_cast<StrictFPAttr>(Attr)) {
+ if (!Inst->hasAttr<StrictFPAttr>())
+ Inst->addAttr(A->clone(getASTContext()));
+ continue;
+ }
+ }
+}
+
/// In the MS ABI, we need to instantiate default arguments of dllexported
/// default constructors along with the constructor definition. This allows IR
/// gen to emit a constructor closure which calls the default constructor with
@@ -798,7 +890,7 @@ void Sema::InstantiateDefaultCtorDefaultArgs(CXXConstructorDecl *Ctor) {
for (unsigned I = 0; I != NumParams; ++I) {
(void)CheckCXXDefaultArgExpr(Attr->getLocation(), Ctor,
Ctor->getParamDecl(I));
- DiscardCleanupsInEvaluationContext();
+ CleanupVarDeclMarking();
}
}
@@ -825,6 +917,10 @@ TemplateDeclInstantiator::VisitTranslationUnitDecl(TranslationUnitDecl *D) {
llvm_unreachable("Translation units cannot be instantiated");
}
+Decl *TemplateDeclInstantiator::VisitHLSLBufferDecl(HLSLBufferDecl *Decl) {
+ llvm_unreachable("HLSL buffer declarations cannot be instantiated");
+}
+
Decl *
TemplateDeclInstantiator::VisitPragmaCommentDecl(PragmaCommentDecl *D) {
llvm_unreachable("pragma comment cannot be instantiated");
@@ -844,6 +940,11 @@ Decl *TemplateDeclInstantiator::VisitMSGuidDecl(MSGuidDecl *D) {
llvm_unreachable("GUID declaration cannot be instantiated");
}
+Decl *TemplateDeclInstantiator::VisitUnnamedGlobalConstantDecl(
+ UnnamedGlobalConstantDecl *D) {
+ llvm_unreachable("UnnamedGlobalConstantDecl cannot be instantiated");
+}
+
Decl *TemplateDeclInstantiator::VisitTemplateParamObjectDecl(
TemplateParamObjectDecl *D) {
llvm_unreachable("template parameter objects cannot be instantiated");
@@ -951,6 +1052,7 @@ Decl *TemplateDeclInstantiator::InstantiateTypedefNameDecl(TypedefNameDecl *D,
SemaRef.inferGslPointerAttribute(Typedef);
Typedef->setAccess(D->getAccess());
+ Typedef->setReferenced(D->isReferenced());
return Typedef;
}
@@ -1118,6 +1220,9 @@ Decl *TemplateDeclInstantiator::VisitVarDecl(VarDecl *D,
if (Var->isStaticLocal())
SemaRef.CheckStaticLocalForDllExport(Var);
+ if (Var->getTLSKind())
+ SemaRef.CheckThreadLocalForLargeAlignment(Var);
+
return Var;
}
@@ -1344,11 +1449,14 @@ Decl *TemplateDeclInstantiator::VisitStaticAssertDecl(StaticAssertDecl *D) {
if (InstantiatedAssertExpr.isInvalid())
return nullptr;
- return SemaRef.BuildStaticAssertDeclaration(D->getLocation(),
- InstantiatedAssertExpr.get(),
- D->getMessage(),
- D->getRParenLoc(),
- D->isFailed());
+ ExprResult InstantiatedMessageExpr =
+ SemaRef.SubstExpr(D->getMessage(), TemplateArgs);
+ if (InstantiatedMessageExpr.isInvalid())
+ return nullptr;
+
+ return SemaRef.BuildStaticAssertDeclaration(
+ D->getLocation(), InstantiatedAssertExpr.get(),
+ InstantiatedMessageExpr.get(), D->getRParenLoc(), D->isFailed());
}
Decl *TemplateDeclInstantiator::VisitEnumDecl(EnumDecl *D) {
@@ -1568,33 +1676,16 @@ Decl *TemplateDeclInstantiator::VisitClassTemplateDecl(ClassTemplateDecl *D) {
if (!PrevClassTemplate && QualifierLoc) {
SemaRef.Diag(Pattern->getLocation(), diag::err_not_tag_in_scope)
- << D->getTemplatedDecl()->getTagKind() << Pattern->getDeclName() << DC
- << QualifierLoc.getSourceRange();
+ << llvm::to_underlying(D->getTemplatedDecl()->getTagKind())
+ << Pattern->getDeclName() << DC << QualifierLoc.getSourceRange();
return nullptr;
}
-
- if (PrevClassTemplate) {
- TemplateParameterList *PrevParams
- = PrevClassTemplate->getMostRecentDecl()->getTemplateParameters();
-
- // Make sure the parameter lists match.
- if (!SemaRef.TemplateParameterListsAreEqual(InstParams, PrevParams, true,
- Sema::TPL_TemplateMatch))
- return nullptr;
-
- // Do some additional validation, then merge default arguments
- // from the existing declarations.
- if (SemaRef.CheckTemplateParameterList(InstParams, PrevParams,
- Sema::TPC_ClassTemplate))
- return nullptr;
- }
}
CXXRecordDecl *RecordInst = CXXRecordDecl::Create(
SemaRef.Context, Pattern->getTagKind(), DC, Pattern->getBeginLoc(),
Pattern->getLocation(), Pattern->getIdentifier(), PrevDecl,
/*DelayTypeCreation=*/true);
-
if (QualifierLoc)
RecordInst->setQualifierInfo(QualifierLoc);
@@ -1604,16 +1695,38 @@ Decl *TemplateDeclInstantiator::VisitClassTemplateDecl(ClassTemplateDecl *D) {
ClassTemplateDecl *Inst
= ClassTemplateDecl::Create(SemaRef.Context, DC, D->getLocation(),
D->getIdentifier(), InstParams, RecordInst);
- assert(!(isFriend && Owner->isDependentContext()));
- Inst->setPreviousDecl(PrevClassTemplate);
-
RecordInst->setDescribedClassTemplate(Inst);
if (isFriend) {
- if (PrevClassTemplate)
+ assert(!Owner->isDependentContext());
+ Inst->setLexicalDeclContext(Owner);
+ RecordInst->setLexicalDeclContext(Owner);
+
+ if (PrevClassTemplate) {
+ Inst->setCommonPtr(PrevClassTemplate->getCommonPtr());
+ RecordInst->setTypeForDecl(
+ PrevClassTemplate->getTemplatedDecl()->getTypeForDecl());
+ const ClassTemplateDecl *MostRecentPrevCT =
+ PrevClassTemplate->getMostRecentDecl();
+ TemplateParameterList *PrevParams =
+ MostRecentPrevCT->getTemplateParameters();
+
+ // Make sure the parameter lists match.
+ if (!SemaRef.TemplateParameterListsAreEqual(
+ RecordInst, InstParams, MostRecentPrevCT->getTemplatedDecl(),
+ PrevParams, true, Sema::TPL_TemplateMatch))
+ return nullptr;
+
+ // Do some additional validation, then merge default arguments
+ // from the existing declarations.
+ if (SemaRef.CheckTemplateParameterList(InstParams, PrevParams,
+ Sema::TPC_ClassTemplate))
+ return nullptr;
+
Inst->setAccess(PrevClassTemplate->getAccess());
- else
+ } else {
Inst->setAccess(D->getAccess());
+ }
Inst->setObjectOfFriendDecl();
// TODO: do we want to track the instantiation progeny of this
@@ -1624,15 +1737,15 @@ Decl *TemplateDeclInstantiator::VisitClassTemplateDecl(ClassTemplateDecl *D) {
Inst->setInstantiatedFromMemberTemplate(D);
}
+ Inst->setPreviousDecl(PrevClassTemplate);
+
// Trigger creation of the type for the instantiation.
- SemaRef.Context.getInjectedClassNameType(RecordInst,
- Inst->getInjectedClassNameSpecialization());
+ SemaRef.Context.getInjectedClassNameType(
+ RecordInst, Inst->getInjectedClassNameSpecialization());
// Finish handling of friends.
if (isFriend) {
DC->makeDeclVisibleInContext(Inst);
- Inst->setLexicalDeclContext(Owner);
- RecordInst->setLexicalDeclContext(Owner);
return Inst;
}
@@ -1769,6 +1882,7 @@ TemplateDeclInstantiator::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
// merged with the local instantiation scope for the function template
// itself.
LocalInstantiationScope Scope(SemaRef);
+ Sema::ConstraintEvalRAII<TemplateDeclInstantiator> RAII(*this);
TemplateParameterList *TempParams = D->getTemplateParameters();
TemplateParameterList *InstParams = SubstTemplateParams(TempParams);
@@ -1816,9 +1930,7 @@ TemplateDeclInstantiator::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
Decl *TemplateDeclInstantiator::VisitCXXRecordDecl(CXXRecordDecl *D) {
CXXRecordDecl *PrevDecl = nullptr;
- if (D->isInjectedClassName())
- PrevDecl = cast<CXXRecordDecl>(Owner);
- else if (CXXRecordDecl *PatternPrev = getPreviousDeclForInstantiation(D)) {
+ if (CXXRecordDecl *PatternPrev = getPreviousDeclForInstantiation(D)) {
NamedDecl *Prev = SemaRef.FindInstantiatedDecl(D->getLocation(),
PatternPrev,
TemplateArgs);
@@ -1827,15 +1939,20 @@ Decl *TemplateDeclInstantiator::VisitCXXRecordDecl(CXXRecordDecl *D) {
}
CXXRecordDecl *Record = nullptr;
+ bool IsInjectedClassName = D->isInjectedClassName();
if (D->isLambda())
Record = CXXRecordDecl::CreateLambda(
SemaRef.Context, Owner, D->getLambdaTypeInfo(), D->getLocation(),
- D->isDependentLambda(), D->isGenericLambda(),
+ D->getLambdaDependencyKind(), D->isGenericLambda(),
D->getLambdaCaptureDefault());
else
Record = CXXRecordDecl::Create(SemaRef.Context, D->getTagKind(), Owner,
D->getBeginLoc(), D->getLocation(),
- D->getIdentifier(), PrevDecl);
+ D->getIdentifier(), PrevDecl,
+ /*DelayTypeCreation=*/IsInjectedClassName);
+ // Link the type of the injected-class-name to that of the outer class.
+ if (IsInjectedClassName)
+ (void)SemaRef.Context.getTypeDeclType(Record, cast<CXXRecordDecl>(Owner));
// Substitute the nested name specifier, if any.
if (SubstQualifier(D, Record))
@@ -1850,7 +1967,7 @@ Decl *TemplateDeclInstantiator::VisitCXXRecordDecl(CXXRecordDecl *D) {
// specifier. Remove once this area of the code gets sorted out.
if (D->getAccess() != AS_none)
Record->setAccess(D->getAccess());
- if (!D->isInjectedClassName())
+ if (!IsInjectedClassName)
Record->setInstantiationOfMemberClass(D, TSK_ImplicitInstantiation);
// If the original function was part of a friend declaration,
@@ -1903,6 +2020,9 @@ Decl *TemplateDeclInstantiator::VisitCXXRecordDecl(CXXRecordDecl *D) {
SemaRef.DiagnoseUnusedNestedTypedefs(Record);
+ if (IsInjectedClassName)
+ assert(Record->isInjectedClassName() && "Broken injected-class-name");
+
return Record;
}
@@ -1931,7 +2051,7 @@ static QualType adjustFunctionTypeForInstantiation(ASTContext &Context,
/// Normal class members are of more specific types and therefore
/// don't make it here. This function serves three purposes:
/// 1) instantiating function templates
-/// 2) substituting friend declarations
+/// 2) substituting friend and local function declarations
/// 3) substituting deduction guide declarations for nested class templates
Decl *TemplateDeclInstantiator::VisitFunctionDecl(
FunctionDecl *D, TemplateParameterList *TemplateParams,
@@ -1965,8 +2085,8 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(
ExplicitSpecifier InstantiatedExplicitSpecifier;
if (auto *DGuide = dyn_cast<CXXDeductionGuideDecl>(D)) {
- InstantiatedExplicitSpecifier = instantiateExplicitSpecifier(
- SemaRef, TemplateArgs, DGuide->getExplicitSpecifier(), DGuide);
+ InstantiatedExplicitSpecifier = SemaRef.instantiateExplicitSpecifier(
+ TemplateArgs, DGuide->getExplicitSpecifier());
if (InstantiatedExplicitSpecifier.isInvalid())
return nullptr;
}
@@ -2002,19 +2122,7 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(
return nullptr;
}
- // FIXME: Concepts: Do not substitute into constraint expressions
Expr *TrailingRequiresClause = D->getTrailingRequiresClause();
- if (TrailingRequiresClause) {
- EnterExpressionEvaluationContext ConstantEvaluated(
- SemaRef, Sema::ExpressionEvaluationContext::Unevaluated);
- ExprResult SubstRC = SemaRef.SubstExpr(TrailingRequiresClause,
- TemplateArgs);
- if (SubstRC.isInvalid())
- return nullptr;
- TrailingRequiresClause = SubstRC.get();
- if (!SemaRef.CheckConstraintExpression(TrailingRequiresClause))
- return nullptr;
- }
// If we're instantiating a local function declaration, put the result
// in the enclosing namespace; otherwise we need to find the instantiated
@@ -2044,16 +2152,17 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(
Function = CXXDeductionGuideDecl::Create(
SemaRef.Context, DC, D->getInnerLocStart(),
InstantiatedExplicitSpecifier, NameInfo, T, TInfo,
- D->getSourceRange().getEnd());
- if (DGuide->isCopyDeductionCandidate())
- cast<CXXDeductionGuideDecl>(Function)->setIsCopyDeductionCandidate();
+ D->getSourceRange().getEnd(), DGuide->getCorrespondingConstructor(),
+ DGuide->getDeductionCandidateKind());
Function->setAccess(D->getAccess());
} else {
Function = FunctionDecl::Create(
SemaRef.Context, DC, D->getInnerLocStart(), NameInfo, T, TInfo,
- D->getCanonicalDecl()->getStorageClass(), D->isInlineSpecified(),
- D->hasWrittenPrototype(), D->getConstexprKind(),
+ D->getCanonicalDecl()->getStorageClass(), D->UsesFPIntrin(),
+ D->isInlineSpecified(), D->hasWrittenPrototype(), D->getConstexprKind(),
TrailingRequiresClause);
+ Function->setFriendConstraintRefersToEnclosingTemplate(
+ D->FriendConstraintRefersToEnclosingTemplate());
Function->setRangeEnd(D->getSourceRange().getEnd());
}
@@ -2071,6 +2180,9 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(
assert(D->getDeclContext()->isFileContext());
LexicalDC = D->getDeclContext();
}
+ else if (D->isLocalExternDecl()) {
+ LexicalDC = SemaRef.CurContext;
+ }
Function->setLexicalDeclContext(LexicalDC);
@@ -2122,6 +2234,11 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(
// definition. We don't want non-template functions to be marked as being
// template instantiations.
Function->setInstantiationOfMemberFunction(D, TSK_ImplicitInstantiation);
+ } else if (!isFriend) {
+ // If this is not a function template, and this is not a friend (that is,
+ // this is a locally declared function), save the instantiation relationship
+ // for the purposes of constraint instantiation.
+ Function->setInstantiatedFromDecl(D);
}
if (isFriend) {
@@ -2142,43 +2259,47 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(
D->isLocalExternDecl() ? Sema::ForExternalRedeclaration
: SemaRef.forRedeclarationInCurContext());
- if (DependentFunctionTemplateSpecializationInfo *Info
- = D->getDependentSpecializationInfo()) {
- assert(isFriend && "non-friend has dependent specialization info?");
+ if (DependentFunctionTemplateSpecializationInfo *DFTSI =
+ D->getDependentSpecializationInfo()) {
+ assert(isFriend && "dependent specialization info on "
+ "non-member non-friend function?");
// Instantiate the explicit template arguments.
- TemplateArgumentListInfo ExplicitArgs(Info->getLAngleLoc(),
- Info->getRAngleLoc());
- if (SemaRef.Subst(Info->getTemplateArgs(), Info->getNumTemplateArgs(),
- ExplicitArgs, TemplateArgs))
- return nullptr;
-
- // Map the candidate templates to their instantiations.
- for (unsigned I = 0, E = Info->getNumTemplates(); I != E; ++I) {
- Decl *Temp = SemaRef.FindInstantiatedDecl(D->getLocation(),
- Info->getTemplate(I),
- TemplateArgs);
- if (!Temp) return nullptr;
+ TemplateArgumentListInfo ExplicitArgs;
+ if (const auto *ArgsWritten = DFTSI->TemplateArgumentsAsWritten) {
+ ExplicitArgs.setLAngleLoc(ArgsWritten->getLAngleLoc());
+ ExplicitArgs.setRAngleLoc(ArgsWritten->getRAngleLoc());
+ if (SemaRef.SubstTemplateArguments(ArgsWritten->arguments(), TemplateArgs,
+ ExplicitArgs))
+ return nullptr;
+ }
- Previous.addDecl(cast<FunctionTemplateDecl>(Temp));
+ // Map the candidates for the primary template to their instantiations.
+ for (FunctionTemplateDecl *FTD : DFTSI->getCandidates()) {
+ if (NamedDecl *ND =
+ SemaRef.FindInstantiatedDecl(D->getLocation(), FTD, TemplateArgs))
+ Previous.addDecl(ND);
+ else
+ return nullptr;
}
- if (SemaRef.CheckFunctionTemplateSpecialization(Function,
- &ExplicitArgs,
- Previous))
+ if (SemaRef.CheckFunctionTemplateSpecialization(
+ Function,
+ DFTSI->TemplateArgumentsAsWritten ? &ExplicitArgs : nullptr,
+ Previous))
Function->setInvalidDecl();
IsExplicitSpecialization = true;
- } else if (const ASTTemplateArgumentListInfo *Info =
+ } else if (const ASTTemplateArgumentListInfo *ArgsWritten =
D->getTemplateSpecializationArgsAsWritten()) {
// The name of this function was written as a template-id.
SemaRef.LookupQualifiedName(Previous, DC);
// Instantiate the explicit template arguments.
- TemplateArgumentListInfo ExplicitArgs(Info->getLAngleLoc(),
- Info->getRAngleLoc());
- if (SemaRef.Subst(Info->getTemplateArgs(), Info->getNumTemplateArgs(),
- ExplicitArgs, TemplateArgs))
+ TemplateArgumentListInfo ExplicitArgs(ArgsWritten->getLAngleLoc(),
+ ArgsWritten->getRAngleLoc());
+ if (SemaRef.SubstTemplateArguments(ArgsWritten->arguments(), TemplateArgs,
+ ExplicitArgs))
return nullptr;
if (SemaRef.CheckFunctionTemplateSpecialization(Function,
@@ -2195,7 +2316,7 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(
// In C++, the previous declaration we find might be a tag type
// (class or enum). In this case, the new declaration will hide the
- // tag type. Note that this does does not apply if we're declaring a
+ // tag type. Note that this does not apply if we're declaring a
// typedef (C++ [dcl.typedef]p4).
if (Previous.isSingleTagDecl())
Previous.clear();
@@ -2203,13 +2324,47 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(
// Filter out previous declarations that don't match the scope. The only
// effect this has is to remove declarations found in inline namespaces
// for friend declarations with unqualified names.
- SemaRef.FilterLookupForScope(Previous, DC, /*Scope*/ nullptr,
- /*ConsiderLinkage*/ true,
- QualifierLoc.hasQualifier());
+ if (isFriend && !QualifierLoc) {
+ SemaRef.FilterLookupForScope(Previous, DC, /*Scope=*/ nullptr,
+ /*ConsiderLinkage=*/ true,
+ QualifierLoc.hasQualifier());
+ }
+ }
+
+ // Per [temp.inst], default arguments in function declarations at local scope
+ // are instantiated along with the enclosing declaration. For example:
+ //
+ // template<typename T>
+ // void ft() {
+ // void f(int = []{ return T::value; }());
+ // }
+ // template void ft<int>(); // error: type 'int' cannot be used prior
+ // to '::' because it has no members
+ //
+ // The error is issued during instantiation of ft<int>() because substitution
+ // into the default argument fails; the default argument is instantiated even
+ // though it is never used.
+ if (Function->isLocalExternDecl()) {
+ for (ParmVarDecl *PVD : Function->parameters()) {
+ if (!PVD->hasDefaultArg())
+ continue;
+ if (SemaRef.SubstDefaultArgument(D->getInnerLocStart(), PVD, TemplateArgs)) {
+ // If substitution fails, the default argument is set to a
+ // RecoveryExpr that wraps the uninstantiated default argument so
+ // that downstream diagnostics are omitted.
+ Expr *UninstExpr = PVD->getUninstantiatedDefaultArg();
+ ExprResult ErrorResult = SemaRef.CreateRecoveryExpr(
+ UninstExpr->getBeginLoc(), UninstExpr->getEndLoc(),
+ { UninstExpr }, UninstExpr->getType());
+ if (ErrorResult.isUsable())
+ PVD->setDefaultArg(ErrorResult.get());
+ }
+ }
}
SemaRef.CheckFunctionDeclaration(/*Scope*/ nullptr, Function, Previous,
- IsExplicitSpecialization);
+ IsExplicitSpecialization,
+ Function->isThisDeclarationADefinition());
// Check the template parameter list against the previous declaration. The
// goal here is to pick up default arguments added since the friend was
@@ -2265,7 +2420,6 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(
Decl *TemplateDeclInstantiator::VisitCXXMethodDecl(
CXXMethodDecl *D, TemplateParameterList *TemplateParams,
- Optional<const ASTTemplateArgumentListInfo *> ClassScopeSpecializationArgs,
RewriteKind FunctionRewriteKind) {
FunctionTemplateDecl *FunctionTemplate = D->getDescribedFunctionTemplate();
if (FunctionTemplate && !TemplateParams) {
@@ -2294,6 +2448,9 @@ Decl *TemplateDeclInstantiator::VisitCXXMethodDecl(
cast<Decl>(Owner)->isDefinedOutsideFunctionOrMethod());
LocalInstantiationScope Scope(SemaRef, MergeWithParentScope);
+ Sema::LambdaScopeForCallOperatorInstantiationRAII LambdaScope(
+ SemaRef, const_cast<CXXMethodDecl *>(D), TemplateArgs, Scope);
+
// Instantiate enclosing template arguments for friends.
SmallVector<TemplateParameterList *, 4> TempParamLists;
unsigned NumTempParamLists = 0;
@@ -2308,11 +2465,25 @@ Decl *TemplateDeclInstantiator::VisitCXXMethodDecl(
}
}
- ExplicitSpecifier InstantiatedExplicitSpecifier =
- instantiateExplicitSpecifier(SemaRef, TemplateArgs,
- ExplicitSpecifier::getFromDecl(D), D);
- if (InstantiatedExplicitSpecifier.isInvalid())
- return nullptr;
+ auto InstantiatedExplicitSpecifier = ExplicitSpecifier::getFromDecl(D);
+ // deduction guides need this
+ const bool CouldInstantiate =
+ InstantiatedExplicitSpecifier.getExpr() == nullptr ||
+ !InstantiatedExplicitSpecifier.getExpr()->isValueDependent();
+
+ // Delay the instantiation of the explicit-specifier until after the
+ // constraints are checked during template argument deduction.
+ if (CouldInstantiate ||
+ SemaRef.CodeSynthesisContexts.back().Kind !=
+ Sema::CodeSynthesisContext::DeducedTemplateArgumentSubstitution) {
+ InstantiatedExplicitSpecifier = SemaRef.instantiateExplicitSpecifier(
+ TemplateArgs, InstantiatedExplicitSpecifier);
+
+ if (InstantiatedExplicitSpecifier.isInvalid())
+ return nullptr;
+ } else {
+ InstantiatedExplicitSpecifier.setKind(ExplicitSpecKind::Unresolved);
+ }
// Implicit destructors/constructors created for local classes in
// DeclareImplicit* (see SemaDeclCXX.cpp) might not have an associated TSI.
@@ -2359,23 +2530,6 @@ Decl *TemplateDeclInstantiator::VisitCXXMethodDecl(
return nullptr;
}
- // FIXME: Concepts: Do not substitute into constraint expressions
- Expr *TrailingRequiresClause = D->getTrailingRequiresClause();
- if (TrailingRequiresClause) {
- EnterExpressionEvaluationContext ConstantEvaluated(
- SemaRef, Sema::ExpressionEvaluationContext::Unevaluated);
- auto *ThisContext = dyn_cast_or_null<CXXRecordDecl>(Owner);
- Sema::CXXThisScopeRAII ThisScope(SemaRef, ThisContext,
- D->getMethodQualifiers(), ThisContext);
- ExprResult SubstRC = SemaRef.SubstExpr(TrailingRequiresClause,
- TemplateArgs);
- if (SubstRC.isInvalid())
- return nullptr;
- TrailingRequiresClause = SubstRC.get();
- if (!SemaRef.CheckConstraintExpression(TrailingRequiresClause))
- return nullptr;
- }
-
DeclContext *DC = Owner;
if (isFriend) {
if (QualifierLoc) {
@@ -2393,6 +2547,9 @@ Decl *TemplateDeclInstantiator::VisitCXXMethodDecl(
if (!DC) return nullptr;
}
+ CXXRecordDecl *Record = cast<CXXRecordDecl>(DC);
+ Expr *TrailingRequiresClause = D->getTrailingRequiresClause();
+
DeclarationNameInfo NameInfo
= SemaRef.SubstDeclarationNameInfo(D->getNameInfo(), TemplateArgs);
@@ -2400,22 +2557,23 @@ Decl *TemplateDeclInstantiator::VisitCXXMethodDecl(
adjustForRewrite(FunctionRewriteKind, D, T, TInfo, NameInfo);
// Build the instantiated method declaration.
- CXXRecordDecl *Record = cast<CXXRecordDecl>(DC);
CXXMethodDecl *Method = nullptr;
SourceLocation StartLoc = D->getInnerLocStart();
if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(D)) {
Method = CXXConstructorDecl::Create(
SemaRef.Context, Record, StartLoc, NameInfo, T, TInfo,
- InstantiatedExplicitSpecifier, Constructor->isInlineSpecified(), false,
+ InstantiatedExplicitSpecifier, Constructor->UsesFPIntrin(),
+ Constructor->isInlineSpecified(), false,
Constructor->getConstexprKind(), InheritedConstructor(),
TrailingRequiresClause);
Method->setRangeEnd(Constructor->getEndLoc());
} else if (CXXDestructorDecl *Destructor = dyn_cast<CXXDestructorDecl>(D)) {
Method = CXXDestructorDecl::Create(
SemaRef.Context, Record, StartLoc, NameInfo, T, TInfo,
- Destructor->isInlineSpecified(), false, Destructor->getConstexprKind(),
- TrailingRequiresClause);
+ Destructor->UsesFPIntrin(), Destructor->isInlineSpecified(), false,
+ Destructor->getConstexprKind(), TrailingRequiresClause);
+ Method->setIneligibleOrNotSelected(true);
Method->setRangeEnd(Destructor->getEndLoc());
Method->setDeclName(SemaRef.Context.DeclarationNames.getCXXDestructorName(
SemaRef.Context.getCanonicalType(
@@ -2423,15 +2581,15 @@ Decl *TemplateDeclInstantiator::VisitCXXMethodDecl(
} else if (CXXConversionDecl *Conversion = dyn_cast<CXXConversionDecl>(D)) {
Method = CXXConversionDecl::Create(
SemaRef.Context, Record, StartLoc, NameInfo, T, TInfo,
- Conversion->isInlineSpecified(), InstantiatedExplicitSpecifier,
- Conversion->getConstexprKind(), Conversion->getEndLoc(),
- TrailingRequiresClause);
+ Conversion->UsesFPIntrin(), Conversion->isInlineSpecified(),
+ InstantiatedExplicitSpecifier, Conversion->getConstexprKind(),
+ Conversion->getEndLoc(), TrailingRequiresClause);
} else {
StorageClass SC = D->isStatic() ? SC_Static : SC_None;
- Method = CXXMethodDecl::Create(SemaRef.Context, Record, StartLoc, NameInfo,
- T, TInfo, SC, D->isInlineSpecified(),
- D->getConstexprKind(), D->getEndLoc(),
- TrailingRequiresClause);
+ Method = CXXMethodDecl::Create(
+ SemaRef.Context, Record, StartLoc, NameInfo, T, TInfo, SC,
+ D->UsesFPIntrin(), D->isInlineSpecified(), D->getConstexprKind(),
+ D->getEndLoc(), TrailingRequiresClause);
}
if (D->isInlined())
@@ -2483,7 +2641,7 @@ Decl *TemplateDeclInstantiator::VisitCXXMethodDecl(
if (NumTempParamLists)
Method->setTemplateParameterListsInfo(
SemaRef.Context,
- llvm::makeArrayRef(TempParamLists.data(), NumTempParamLists));
+ llvm::ArrayRef(TempParamLists.data(), NumTempParamLists));
Method->setLexicalDeclContext(Owner);
Method->setObjectOfFriendDecl();
@@ -2505,42 +2663,42 @@ Decl *TemplateDeclInstantiator::VisitCXXMethodDecl(
// If the name of this function was written as a template-id, instantiate
// the explicit template arguments.
- if (DependentFunctionTemplateSpecializationInfo *Info
- = D->getDependentSpecializationInfo()) {
- assert(isFriend && "non-friend has dependent specialization info?");
-
+ if (DependentFunctionTemplateSpecializationInfo *DFTSI =
+ D->getDependentSpecializationInfo()) {
// Instantiate the explicit template arguments.
- TemplateArgumentListInfo ExplicitArgs(Info->getLAngleLoc(),
- Info->getRAngleLoc());
- if (SemaRef.Subst(Info->getTemplateArgs(), Info->getNumTemplateArgs(),
- ExplicitArgs, TemplateArgs))
- return nullptr;
-
- // Map the candidate templates to their instantiations.
- for (unsigned I = 0, E = Info->getNumTemplates(); I != E; ++I) {
- Decl *Temp = SemaRef.FindInstantiatedDecl(D->getLocation(),
- Info->getTemplate(I),
- TemplateArgs);
- if (!Temp) return nullptr;
+ TemplateArgumentListInfo ExplicitArgs;
+ if (const auto *ArgsWritten = DFTSI->TemplateArgumentsAsWritten) {
+ ExplicitArgs.setLAngleLoc(ArgsWritten->getLAngleLoc());
+ ExplicitArgs.setRAngleLoc(ArgsWritten->getRAngleLoc());
+ if (SemaRef.SubstTemplateArguments(ArgsWritten->arguments(), TemplateArgs,
+ ExplicitArgs))
+ return nullptr;
+ }
- Previous.addDecl(cast<FunctionTemplateDecl>(Temp));
+ // Map the candidates for the primary template to their instantiations.
+ for (FunctionTemplateDecl *FTD : DFTSI->getCandidates()) {
+ if (NamedDecl *ND =
+ SemaRef.FindInstantiatedDecl(D->getLocation(), FTD, TemplateArgs))
+ Previous.addDecl(ND);
+ else
+ return nullptr;
}
- if (SemaRef.CheckFunctionTemplateSpecialization(Method,
- &ExplicitArgs,
- Previous))
+ if (SemaRef.CheckFunctionTemplateSpecialization(
+ Method, DFTSI->TemplateArgumentsAsWritten ? &ExplicitArgs : nullptr,
+ Previous))
Method->setInvalidDecl();
IsExplicitSpecialization = true;
- } else if (const ASTTemplateArgumentListInfo *Info =
- ClassScopeSpecializationArgs.getValueOr(
- D->getTemplateSpecializationArgsAsWritten())) {
+ } else if (const ASTTemplateArgumentListInfo *ArgsWritten =
+ D->getTemplateSpecializationArgsAsWritten()) {
SemaRef.LookupQualifiedName(Previous, DC);
- TemplateArgumentListInfo ExplicitArgs(Info->getLAngleLoc(),
- Info->getRAngleLoc());
- if (SemaRef.Subst(Info->getTemplateArgs(), Info->getNumTemplateArgs(),
- ExplicitArgs, TemplateArgs))
+ TemplateArgumentListInfo ExplicitArgs(ArgsWritten->getLAngleLoc(),
+ ArgsWritten->getRAngleLoc());
+
+ if (SemaRef.SubstTemplateArguments(ArgsWritten->arguments(), TemplateArgs,
+ ExplicitArgs))
return nullptr;
if (SemaRef.CheckFunctionTemplateSpecialization(Method,
@@ -2549,29 +2707,55 @@ Decl *TemplateDeclInstantiator::VisitCXXMethodDecl(
Method->setInvalidDecl();
IsExplicitSpecialization = true;
- } else if (ClassScopeSpecializationArgs) {
- // Class-scope explicit specialization written without explicit template
- // arguments.
- SemaRef.LookupQualifiedName(Previous, DC);
- if (SemaRef.CheckFunctionTemplateSpecialization(Method, nullptr, Previous))
- Method->setInvalidDecl();
-
- IsExplicitSpecialization = true;
} else if (!FunctionTemplate || TemplateParams || isFriend) {
SemaRef.LookupQualifiedName(Previous, Record);
// In C++, the previous declaration we find might be a tag type
// (class or enum). In this case, the new declaration will hide the
- // tag type. Note that this does does not apply if we're declaring a
+ // tag type. Note that this does not apply if we're declaring a
// typedef (C++ [dcl.typedef]p4).
if (Previous.isSingleTagDecl())
Previous.clear();
}
+ // Per [temp.inst], default arguments in member functions of local classes
+ // are instantiated along with the member function declaration. For example:
+ //
+ // template<typename T>
+ // void ft() {
+ // struct lc {
+ // int operator()(int p = []{ return T::value; }());
+ // };
+ // }
+ // template void ft<int>(); // error: type 'int' cannot be used prior
+ // to '::'because it has no members
+ //
+ // The error is issued during instantiation of ft<int>()::lc::operator()
+ // because substitution into the default argument fails; the default argument
+ // is instantiated even though it is never used.
+ if (D->isInLocalScopeForInstantiation()) {
+ for (unsigned P = 0; P < Params.size(); ++P) {
+ if (!Params[P]->hasDefaultArg())
+ continue;
+ if (SemaRef.SubstDefaultArgument(StartLoc, Params[P], TemplateArgs)) {
+ // If substitution fails, the default argument is set to a
+ // RecoveryExpr that wraps the uninstantiated default argument so
+ // that downstream diagnostics are omitted.
+ Expr *UninstExpr = Params[P]->getUninstantiatedDefaultArg();
+ ExprResult ErrorResult = SemaRef.CreateRecoveryExpr(
+ UninstExpr->getBeginLoc(), UninstExpr->getEndLoc(),
+ { UninstExpr }, UninstExpr->getType());
+ if (ErrorResult.isUsable())
+ Params[P]->setDefaultArg(ErrorResult.get());
+ }
+ }
+ }
+
SemaRef.CheckFunctionDeclaration(nullptr, Method, Previous,
- IsExplicitSpecialization);
+ IsExplicitSpecialization,
+ Method->isThisDeclarationADefinition());
- if (D->isPure())
+ if (D->isPureVirtual())
SemaRef.CheckPureMethod(Method, SourceRange());
// Propagate access. For a non-friend declaration, the access is
@@ -2600,6 +2784,22 @@ Decl *TemplateDeclInstantiator::VisitCXXMethodDecl(
if (IsExplicitSpecialization && !isFriend)
SemaRef.CompleteMemberSpecialization(Method, Previous);
+ // If the method is a special member function, we need to mark it as
+ // ineligible so that Owner->addDecl() won't mark the class as non trivial.
+ // At the end of the class instantiation, we calculate eligibility again and
+ // then we adjust trivility if needed.
+ // We need this check to happen only after the method parameters are set,
+ // because being e.g. a copy constructor depends on the instantiated
+ // arguments.
+ if (auto *Constructor = dyn_cast<CXXConstructorDecl>(Method)) {
+ if (Constructor->isDefaultConstructor() ||
+ Constructor->isCopyOrMoveConstructor())
+ Method->setIneligibleOrNotSelected(true);
+ } else if (Method->isCopyAssignmentOperator() ||
+ Method->isMoveAssignmentOperator()) {
+ Method->setIneligibleOrNotSelected(true);
+ }
+
// If there's a function template, let our caller handle it.
if (FunctionTemplate) {
// do nothing
@@ -2654,15 +2854,16 @@ Decl *TemplateDeclInstantiator::VisitCXXConversionDecl(CXXConversionDecl *D) {
}
Decl *TemplateDeclInstantiator::VisitParmVarDecl(ParmVarDecl *D) {
- return SemaRef.SubstParmVarDecl(D, TemplateArgs, /*indexAdjustment*/ 0, None,
- /*ExpectParameterPack=*/ false);
+ return SemaRef.SubstParmVarDecl(D, TemplateArgs, /*indexAdjustment*/ 0,
+ std::nullopt,
+ /*ExpectParameterPack=*/false);
}
Decl *TemplateDeclInstantiator::VisitTemplateTypeParmDecl(
TemplateTypeParmDecl *D) {
assert(D->getTypeForDecl()->isTemplateTypeParmType());
- Optional<unsigned> NumExpanded;
+ std::optional<unsigned> NumExpanded;
if (const TypeConstraint *TC = D->getTypeConstraint()) {
if (D->isPackExpansion() && !D->isExpandedParameterPack()) {
@@ -2702,31 +2903,11 @@ Decl *TemplateDeclInstantiator::VisitTemplateTypeParmDecl(
Inst->setImplicit(D->isImplicit());
if (auto *TC = D->getTypeConstraint()) {
if (!D->isImplicit()) {
- // Invented template parameter type constraints will be instantiated with
- // the corresponding auto-typed parameter as it might reference other
- // parameters.
-
- // TODO: Concepts: do not instantiate the constraint (delayed constraint
- // substitution)
- const ASTTemplateArgumentListInfo *TemplArgInfo
- = TC->getTemplateArgsAsWritten();
- TemplateArgumentListInfo InstArgs;
-
- if (TemplArgInfo) {
- InstArgs.setLAngleLoc(TemplArgInfo->LAngleLoc);
- InstArgs.setRAngleLoc(TemplArgInfo->RAngleLoc);
- if (SemaRef.Subst(TemplArgInfo->getTemplateArgs(),
- TemplArgInfo->NumTemplateArgs,
- InstArgs, TemplateArgs))
- return nullptr;
- }
- if (SemaRef.AttachTypeConstraint(
- TC->getNestedNameSpecifierLoc(), TC->getConceptNameInfo(),
- TC->getNamedConcept(), &InstArgs, Inst,
- D->isParameterPack()
- ? cast<CXXFoldExpr>(TC->getImmediatelyDeclaredConstraint())
- ->getEllipsisLoc()
- : SourceLocation()))
+ // Invented template parameter type constraints will be instantiated
+ // with the corresponding auto-typed parameter as it might reference
+ // other parameters.
+ if (SemaRef.SubstTypeConstraint(Inst, TC, TemplateArgs,
+ EvaluateConstraints))
return nullptr;
}
}
@@ -2793,9 +2974,9 @@ Decl *TemplateDeclInstantiator::VisitNonTypeTemplateParmDecl(
// be expanded.
bool Expand = true;
bool RetainExpansion = false;
- Optional<unsigned> OrigNumExpansions
- = Expansion.getTypePtr()->getNumExpansions();
- Optional<unsigned> NumExpansions = OrigNumExpansions;
+ std::optional<unsigned> OrigNumExpansions =
+ Expansion.getTypePtr()->getNumExpansions();
+ std::optional<unsigned> NumExpansions = OrigNumExpansions;
if (SemaRef.CheckParameterPacksForExpansion(Expansion.getEllipsisLoc(),
Pattern.getSourceRange(),
Unexpanded,
@@ -2875,14 +3056,21 @@ Decl *TemplateDeclInstantiator::VisitNonTypeTemplateParmDecl(
D->getPosition(), D->getIdentifier(), T, D->isParameterPack(), DI);
if (AutoTypeLoc AutoLoc = DI->getTypeLoc().getContainedAutoTypeLoc())
- if (AutoLoc.isConstrained())
- if (SemaRef.AttachTypeConstraint(
- AutoLoc, Param,
- IsExpandedParameterPack
- ? DI->getTypeLoc().getAs<PackExpansionTypeLoc>()
- .getEllipsisLoc()
- : SourceLocation()))
+ if (AutoLoc.isConstrained()) {
+ SourceLocation EllipsisLoc;
+ if (IsExpandedParameterPack)
+ EllipsisLoc =
+ DI->getTypeLoc().getAs<PackExpansionTypeLoc>().getEllipsisLoc();
+ else if (auto *Constraint = dyn_cast_if_present<CXXFoldExpr>(
+ D->getPlaceholderTypeConstraint()))
+ EllipsisLoc = Constraint->getEllipsisLoc();
+ // Note: We attach the uninstantiated constriant here, so that it can be
+ // instantiated relative to the top level, like all our other
+ // constraints.
+ if (SemaRef.AttachTypeConstraint(AutoLoc, /*NewConstrainedParm=*/Param,
+ /*OrigConstrainedParm=*/D, EllipsisLoc))
Invalid = true;
+ }
Param->setAccess(AS_public);
Param->setImplicit(D->isImplicit());
@@ -2958,7 +3146,7 @@ TemplateDeclInstantiator::VisitTemplateTemplateParmDecl(
// be expanded.
bool Expand = true;
bool RetainExpansion = false;
- Optional<unsigned> NumExpansions;
+ std::optional<unsigned> NumExpansions;
if (SemaRef.CheckParameterPacksForExpansion(D->getLocation(),
TempParams->getSourceRange(),
Unexpanded,
@@ -3184,9 +3372,11 @@ Decl *TemplateDeclInstantiator::VisitUsingEnumDecl(UsingEnumDecl *D) {
if (SemaRef.RequireCompleteEnumDecl(EnumD, EnumD->getLocation()))
return nullptr;
+ TypeSourceInfo *TSI = SemaRef.SubstType(D->getEnumType(), TemplateArgs,
+ D->getLocation(), D->getDeclName());
UsingEnumDecl *NewUD =
UsingEnumDecl::Create(SemaRef.Context, Owner, D->getUsingLoc(),
- D->getEnumLoc(), D->getLocation(), EnumD);
+ D->getEnumLoc(), D->getLocation(), TSI);
SemaRef.Context.setInstantiatedFromUsingEnumDecl(NewUD, D);
NewUD->setAccess(D->getAccess());
@@ -3227,7 +3417,7 @@ Decl *TemplateDeclInstantiator::instantiateUnresolvedUsingDecl(
// be expanded.
bool Expand = true;
bool RetainExpansion = false;
- Optional<unsigned> NumExpansions;
+ std::optional<unsigned> NumExpansions;
if (SemaRef.CheckParameterPacksForExpansion(
D->getEllipsisLoc(), D->getSourceRange(), Unexpanded, TemplateArgs,
Expand, RetainExpansion, NumExpansions))
@@ -3345,13 +3535,6 @@ Decl *TemplateDeclInstantiator::VisitUsingPackDecl(UsingPackDecl *D) {
return NewD;
}
-Decl *TemplateDeclInstantiator::VisitClassScopeFunctionSpecializationDecl(
- ClassScopeFunctionSpecializationDecl *Decl) {
- CXXMethodDecl *OldFD = Decl->getSpecialization();
- return cast_or_null<CXXMethodDecl>(
- VisitCXXMethodDecl(OldFD, nullptr, Decl->getTemplateArgsAsWritten()));
-}
-
Decl *TemplateDeclInstantiator::VisitOMPThreadPrivateDecl(
OMPThreadPrivateDecl *D) {
SmallVector<Expr *, 5> Vars;
@@ -3380,12 +3563,23 @@ Decl *TemplateDeclInstantiator::VisitOMPAllocateDecl(OMPAllocateDecl *D) {
SmallVector<OMPClause *, 4> Clauses;
// Copy map clauses from the original mapper.
for (OMPClause *C : D->clauselists()) {
- auto *AC = cast<OMPAllocatorClause>(C);
- ExprResult NewE = SemaRef.SubstExpr(AC->getAllocator(), TemplateArgs);
- if (!NewE.isUsable())
- continue;
- OMPClause *IC = SemaRef.ActOnOpenMPAllocatorClause(
- NewE.get(), AC->getBeginLoc(), AC->getLParenLoc(), AC->getEndLoc());
+ OMPClause *IC = nullptr;
+ if (auto *AC = dyn_cast<OMPAllocatorClause>(C)) {
+ ExprResult NewE = SemaRef.SubstExpr(AC->getAllocator(), TemplateArgs);
+ if (!NewE.isUsable())
+ continue;
+ IC = SemaRef.ActOnOpenMPAllocatorClause(
+ NewE.get(), AC->getBeginLoc(), AC->getLParenLoc(), AC->getEndLoc());
+ } else if (auto *AC = dyn_cast<OMPAlignClause>(C)) {
+ ExprResult NewE = SemaRef.SubstExpr(AC->getAlignment(), TemplateArgs);
+ if (!NewE.isUsable())
+ continue;
+ IC = SemaRef.ActOnOpenMPAlignClause(NewE.get(), AC->getBeginLoc(),
+ AC->getLParenLoc(), AC->getEndLoc());
+ // If align clause value ends up being invalid, this can end up null.
+ if (!IC)
+ continue;
+ }
Clauses.push_back(IC);
}
@@ -3464,7 +3658,7 @@ Decl *TemplateDeclInstantiator::VisitOMPDeclareReductionDecl(
SemaRef.CurrentInstantiationScope->InstantiatedLocal(
cast<DeclRefExpr>(D->getInitPriv())->getDecl(),
cast<DeclRefExpr>(NewDRD->getInitPriv())->getDecl());
- if (D->getInitializerKind() == OMPDeclareReductionDecl::CallInit) {
+ if (D->getInitializerKind() == OMPDeclareReductionInitKind::Call) {
SubstInitializer = SemaRef.SubstExpr(Init, TemplateArgs).get();
} else {
auto *OldPrivParm =
@@ -3479,9 +3673,9 @@ Decl *TemplateDeclInstantiator::VisitOMPDeclareReductionDecl(
}
IsCorrect = IsCorrect && SubstCombiner &&
(!Init ||
- (D->getInitializerKind() == OMPDeclareReductionDecl::CallInit &&
+ (D->getInitializerKind() == OMPDeclareReductionInitKind::Call &&
SubstInitializer) ||
- (D->getInitializerKind() != OMPDeclareReductionDecl::CallInit &&
+ (D->getInitializerKind() != OMPDeclareReductionInitKind::Call &&
!SubstInitializer));
(void)SemaRef.ActOnOpenMPDeclareReductionDirectiveEnd(
@@ -3555,9 +3749,10 @@ TemplateDeclInstantiator::VisitOMPDeclareMapperDecl(OMPDeclareMapperDecl *D) {
OMPVarListLocTy Locs(OldC->getBeginLoc(), OldC->getLParenLoc(),
OldC->getEndLoc());
OMPClause *NewC = SemaRef.ActOnOpenMPMapClause(
- OldC->getMapTypeModifiers(), OldC->getMapTypeModifiersLoc(), SS,
- NewNameInfo, OldC->getMapType(), OldC->isImplicitMapType(),
- OldC->getMapLoc(), OldC->getColonLoc(), NewVars, Locs);
+ OldC->getIteratorModifier(), OldC->getMapTypeModifiers(),
+ OldC->getMapTypeModifiersLoc(), SS, NewNameInfo, OldC->getMapType(),
+ OldC->isImplicitMapType(), OldC->getMapLoc(), OldC->getColonLoc(),
+ NewVars, Locs);
Clauses.push_back(NewC);
}
SemaRef.EndOpenMPDSABlock(nullptr);
@@ -3624,26 +3819,23 @@ TemplateDeclInstantiator::VisitClassTemplateSpecializationDecl(
SmallVector<TemplateArgumentLoc, 4> ArgLocs;
for (unsigned I = 0; I != Loc.getNumArgs(); ++I)
ArgLocs.push_back(Loc.getArgLoc(I));
- if (SemaRef.Subst(ArgLocs.data(), ArgLocs.size(),
- InstTemplateArgs, TemplateArgs))
+ if (SemaRef.SubstTemplateArguments(ArgLocs, TemplateArgs, InstTemplateArgs))
return nullptr;
// Check that the template argument list is well-formed for this
// class template.
- SmallVector<TemplateArgument, 4> Converted;
- if (SemaRef.CheckTemplateArgumentList(InstClassTemplate,
- D->getLocation(),
- InstTemplateArgs,
- false,
- Converted,
- /*UpdateArgsWithConversion=*/true))
+ SmallVector<TemplateArgument, 4> SugaredConverted, CanonicalConverted;
+ if (SemaRef.CheckTemplateArgumentList(InstClassTemplate, D->getLocation(),
+ InstTemplateArgs, false,
+ SugaredConverted, CanonicalConverted,
+ /*UpdateArgsWithConversions=*/true))
return nullptr;
// Figure out where to insert this class template explicit specialization
// in the member template's set of class template explicit specializations.
void *InsertPos = nullptr;
ClassTemplateSpecializationDecl *PrevDecl =
- InstClassTemplate->findSpecialization(Converted, InsertPos);
+ InstClassTemplate->findSpecialization(CanonicalConverted, InsertPos);
// Check whether we've already seen a conflicting instantiation of this
// declaration (for instance, if there was a prior implicit instantiation).
@@ -3681,7 +3873,7 @@ TemplateDeclInstantiator::VisitClassTemplateSpecializationDecl(
ClassTemplateSpecializationDecl *InstD =
ClassTemplateSpecializationDecl::Create(
SemaRef.Context, D->getTagKind(), Owner, D->getBeginLoc(),
- D->getLocation(), InstClassTemplate, Converted, PrevDecl);
+ D->getLocation(), InstClassTemplate, CanonicalConverted, PrevDecl);
// Add this partial specialization to the set of class template partial
// specializations.
@@ -3695,7 +3887,7 @@ TemplateDeclInstantiator::VisitClassTemplateSpecializationDecl(
// Build the canonical type that describes the converted template
// arguments of the class template explicit specialization.
QualType CanonType = SemaRef.Context.getTemplateSpecializationType(
- TemplateName(InstClassTemplate), Converted,
+ TemplateName(InstClassTemplate), CanonicalConverted,
SemaRef.Context.getRecordType(InstD));
// Build the fully-sugared type for this class template
@@ -3746,25 +3938,28 @@ Decl *TemplateDeclInstantiator::VisitVarTemplateSpecializationDecl(
return nullptr;
// Substitute the current template arguments.
- const TemplateArgumentListInfo &TemplateArgsInfo = D->getTemplateArgsInfo();
- VarTemplateArgsInfo.setLAngleLoc(TemplateArgsInfo.getLAngleLoc());
- VarTemplateArgsInfo.setRAngleLoc(TemplateArgsInfo.getRAngleLoc());
+ if (const ASTTemplateArgumentListInfo *TemplateArgsInfo =
+ D->getTemplateArgsInfo()) {
+ VarTemplateArgsInfo.setLAngleLoc(TemplateArgsInfo->getLAngleLoc());
+ VarTemplateArgsInfo.setRAngleLoc(TemplateArgsInfo->getRAngleLoc());
- if (SemaRef.Subst(TemplateArgsInfo.getArgumentArray(),
- TemplateArgsInfo.size(), VarTemplateArgsInfo, TemplateArgs))
- return nullptr;
+ if (SemaRef.SubstTemplateArguments(TemplateArgsInfo->arguments(),
+ TemplateArgs, VarTemplateArgsInfo))
+ return nullptr;
+ }
// Check that the template argument list is well-formed for this template.
- SmallVector<TemplateArgument, 4> Converted;
+ SmallVector<TemplateArgument, 4> SugaredConverted, CanonicalConverted;
if (SemaRef.CheckTemplateArgumentList(InstVarTemplate, D->getLocation(),
- VarTemplateArgsInfo, false, Converted,
- /*UpdateArgsWithConversion=*/true))
+ VarTemplateArgsInfo, false,
+ SugaredConverted, CanonicalConverted,
+ /*UpdateArgsWithConversions=*/true))
return nullptr;
// Check whether we've already seen a declaration of this specialization.
void *InsertPos = nullptr;
VarTemplateSpecializationDecl *PrevDecl =
- InstVarTemplate->findSpecialization(Converted, InsertPos);
+ InstVarTemplate->findSpecialization(CanonicalConverted, InsertPos);
// Check whether we've already seen a conflicting instantiation of this
// declaration (for instance, if there was a prior implicit instantiation).
@@ -3776,7 +3971,7 @@ Decl *TemplateDeclInstantiator::VisitVarTemplateSpecializationDecl(
return nullptr;
return VisitVarTemplateSpecializationDecl(
- InstVarTemplate, D, VarTemplateArgsInfo, Converted, PrevDecl);
+ InstVarTemplate, D, VarTemplateArgsInfo, CanonicalConverted, PrevDecl);
}
Decl *TemplateDeclInstantiator::VisitVarTemplateSpecializationDecl(
@@ -3841,6 +4036,11 @@ Decl *TemplateDeclInstantiator::VisitConceptDecl(ConceptDecl *D) {
llvm_unreachable("Concept definitions cannot reside inside a template");
}
+Decl *TemplateDeclInstantiator::VisitImplicitConceptSpecializationDecl(
+ ImplicitConceptSpecializationDecl *D) {
+ llvm_unreachable("Concept specializations cannot reside inside a template");
+}
+
Decl *
TemplateDeclInstantiator::VisitRequiresExprBodyDecl(RequiresExprBodyDecl *D) {
return RequiresExprBodyDecl::Create(SemaRef.Context, D->getDeclContext(),
@@ -3912,14 +4112,14 @@ FunctionDecl *Sema::SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD,
Decl *R;
if (auto *MD = dyn_cast<CXXMethodDecl>(Spaceship)) {
R = Instantiator.VisitCXXMethodDecl(
- MD, nullptr, None,
+ MD, /*TemplateParams=*/nullptr,
TemplateDeclInstantiator::RewriteKind::RewriteSpaceshipAsEqualEqual);
} else {
assert(Spaceship->getFriendObjectKind() &&
"defaulted spaceship is neither a member nor a friend");
R = Instantiator.VisitFunctionDecl(
- Spaceship, nullptr,
+ Spaceship, /*TemplateParams=*/nullptr,
TemplateDeclInstantiator::RewriteKind::RewriteSpaceshipAsEqualEqual);
if (!R)
return nullptr;
@@ -3958,18 +4158,7 @@ TemplateDeclInstantiator::SubstTemplateParams(TemplateParameterList *L) {
if (Invalid)
return nullptr;
- // FIXME: Concepts: Substitution into requires clause should only happen when
- // checking satisfaction.
- Expr *InstRequiresClause = nullptr;
- if (Expr *E = L->getRequiresClause()) {
- EnterExpressionEvaluationContext ConstantEvaluated(
- SemaRef, Sema::ExpressionEvaluationContext::Unevaluated);
- ExprResult Res = SemaRef.SubstExpr(E, TemplateArgs);
- if (Res.isInvalid() || !Res.isUsable()) {
- return nullptr;
- }
- InstRequiresClause = Res.get();
- }
+ Expr *InstRequiresClause = L->getRequiresClause();
TemplateParameterList *InstL
= TemplateParameterList::Create(SemaRef.Context, L->getTemplateLoc(),
@@ -3980,8 +4169,10 @@ TemplateDeclInstantiator::SubstTemplateParams(TemplateParameterList *L) {
TemplateParameterList *
Sema::SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner,
- const MultiLevelTemplateArgumentList &TemplateArgs) {
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ bool EvaluateConstraints) {
TemplateDeclInstantiator Instantiator(*this, Owner, TemplateArgs);
+ Instantiator.setEvaluateConstraints(EvaluateConstraints);
return Instantiator.SubstTemplateParams(Params);
}
@@ -4018,39 +4209,35 @@ TemplateDeclInstantiator::InstantiateClassTemplatePartialSpecialization(
= PartialSpec->getTemplateArgsAsWritten();
TemplateArgumentListInfo InstTemplateArgs(TemplArgInfo->LAngleLoc,
TemplArgInfo->RAngleLoc);
- if (SemaRef.Subst(TemplArgInfo->getTemplateArgs(),
- TemplArgInfo->NumTemplateArgs,
- InstTemplateArgs, TemplateArgs))
+ if (SemaRef.SubstTemplateArguments(TemplArgInfo->arguments(), TemplateArgs,
+ InstTemplateArgs))
return nullptr;
// Check that the template argument list is well-formed for this
// class template.
- SmallVector<TemplateArgument, 4> Converted;
- if (SemaRef.CheckTemplateArgumentList(ClassTemplate,
- PartialSpec->getLocation(),
- InstTemplateArgs,
- false,
- Converted))
+ SmallVector<TemplateArgument, 4> SugaredConverted, CanonicalConverted;
+ if (SemaRef.CheckTemplateArgumentList(
+ ClassTemplate, PartialSpec->getLocation(), InstTemplateArgs,
+ /*PartialTemplateArgs=*/false, SugaredConverted, CanonicalConverted))
return nullptr;
// Check these arguments are valid for a template partial specialization.
if (SemaRef.CheckTemplatePartialSpecializationArgs(
PartialSpec->getLocation(), ClassTemplate, InstTemplateArgs.size(),
- Converted))
+ CanonicalConverted))
return nullptr;
// Figure out where to insert this class template partial specialization
// in the member template's set of class template partial specializations.
void *InsertPos = nullptr;
- ClassTemplateSpecializationDecl *PrevDecl
- = ClassTemplate->findPartialSpecialization(Converted, InstParams,
+ ClassTemplateSpecializationDecl *PrevDecl =
+ ClassTemplate->findPartialSpecialization(CanonicalConverted, InstParams,
InsertPos);
// Build the canonical type that describes the converted template
// arguments of the class template partial specialization.
- QualType CanonType
- = SemaRef.Context.getTemplateSpecializationType(TemplateName(ClassTemplate),
- Converted);
+ QualType CanonType = SemaRef.Context.getTemplateSpecializationType(
+ TemplateName(ClassTemplate), CanonicalConverted);
// Build the fully-sugared type for this class template
// specialization as the user wrote in the specialization
@@ -4095,7 +4282,8 @@ TemplateDeclInstantiator::InstantiateClassTemplatePartialSpecialization(
ClassTemplatePartialSpecializationDecl::Create(
SemaRef.Context, PartialSpec->getTagKind(), Owner,
PartialSpec->getBeginLoc(), PartialSpec->getLocation(), InstParams,
- ClassTemplate, Converted, InstTemplateArgs, CanonType, nullptr);
+ ClassTemplate, CanonicalConverted, InstTemplateArgs, CanonType,
+ nullptr);
// Substitute the nested name specifier, if any.
if (SubstQualifier(PartialSpec, InstPartialSpec))
return nullptr;
@@ -4146,34 +4334,35 @@ TemplateDeclInstantiator::InstantiateVarTemplatePartialSpecialization(
= PartialSpec->getTemplateArgsAsWritten();
TemplateArgumentListInfo InstTemplateArgs(TemplArgInfo->LAngleLoc,
TemplArgInfo->RAngleLoc);
- if (SemaRef.Subst(TemplArgInfo->getTemplateArgs(),
- TemplArgInfo->NumTemplateArgs,
- InstTemplateArgs, TemplateArgs))
+ if (SemaRef.SubstTemplateArguments(TemplArgInfo->arguments(), TemplateArgs,
+ InstTemplateArgs))
return nullptr;
// Check that the template argument list is well-formed for this
// class template.
- SmallVector<TemplateArgument, 4> Converted;
- if (SemaRef.CheckTemplateArgumentList(VarTemplate, PartialSpec->getLocation(),
- InstTemplateArgs, false, Converted))
+ SmallVector<TemplateArgument, 4> SugaredConverted, CanonicalConverted;
+ if (SemaRef.CheckTemplateArgumentList(
+ VarTemplate, PartialSpec->getLocation(), InstTemplateArgs,
+ /*PartialTemplateArgs=*/false, SugaredConverted, CanonicalConverted))
return nullptr;
// Check these arguments are valid for a template partial specialization.
if (SemaRef.CheckTemplatePartialSpecializationArgs(
PartialSpec->getLocation(), VarTemplate, InstTemplateArgs.size(),
- Converted))
+ CanonicalConverted))
return nullptr;
// Figure out where to insert this variable template partial specialization
// in the member template's set of variable template partial specializations.
void *InsertPos = nullptr;
VarTemplateSpecializationDecl *PrevDecl =
- VarTemplate->findPartialSpecialization(Converted, InstParams, InsertPos);
+ VarTemplate->findPartialSpecialization(CanonicalConverted, InstParams,
+ InsertPos);
// Build the canonical type that describes the converted template
// arguments of the variable template partial specialization.
QualType CanonType = SemaRef.Context.getTemplateSpecializationType(
- TemplateName(VarTemplate), Converted);
+ TemplateName(VarTemplate), CanonicalConverted);
// Build the fully-sugared type for this variable template
// specialization as the user wrote in the specialization
@@ -4229,7 +4418,8 @@ TemplateDeclInstantiator::InstantiateVarTemplatePartialSpecialization(
VarTemplatePartialSpecializationDecl::Create(
SemaRef.Context, Owner, PartialSpec->getInnerLocStart(),
PartialSpec->getLocation(), InstParams, VarTemplate, DI->getType(),
- DI, PartialSpec->getStorageClass(), Converted, InstTemplateArgs);
+ DI, PartialSpec->getStorageClass(), CanonicalConverted,
+ InstTemplateArgs);
// Substitute the nested name specifier, if any.
if (SubstQualifier(PartialSpec, InstPartialSpec))
@@ -4262,14 +4452,12 @@ TemplateDeclInstantiator::SubstFunctionType(FunctionDecl *D,
Qualifiers ThisTypeQuals;
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) {
ThisContext = cast<CXXRecordDecl>(Owner);
- ThisTypeQuals = Method->getMethodQualifiers();
+ ThisTypeQuals = Method->getFunctionObjectParameterType().getQualifiers();
}
- TypeSourceInfo *NewTInfo
- = SemaRef.SubstFunctionDeclType(OldTInfo, TemplateArgs,
- D->getTypeSpecStartLoc(),
- D->getDeclName(),
- ThisContext, ThisTypeQuals);
+ TypeSourceInfo *NewTInfo = SemaRef.SubstFunctionDeclType(
+ OldTInfo, TemplateArgs, D->getTypeSpecStartLoc(), D->getDeclName(),
+ ThisContext, ThisTypeQuals, EvaluateConstraints);
if (!NewTInfo)
return nullptr;
@@ -4288,7 +4476,7 @@ TemplateDeclInstantiator::SubstFunctionType(FunctionDecl *D,
LocalInstantiationScope *Scope = SemaRef.CurrentInstantiationScope;
- Optional<unsigned> NumArgumentsInExpansion;
+ std::optional<unsigned> NumArgumentsInExpansion;
if (OldParam->isParameterPack())
NumArgumentsInExpansion =
SemaRef.getNumArgumentsInExpansion(OldParam->getType(),
@@ -4353,13 +4541,43 @@ TemplateDeclInstantiator::SubstFunctionType(FunctionDecl *D,
return NewTInfo;
}
+/// Introduce the instantiated local variables into the local
+/// instantiation scope.
+void Sema::addInstantiatedLocalVarsToScope(FunctionDecl *Function,
+ const FunctionDecl *PatternDecl,
+ LocalInstantiationScope &Scope) {
+ LambdaScopeInfo *LSI = cast<LambdaScopeInfo>(getFunctionScopes().back());
+
+ for (auto *decl : PatternDecl->decls()) {
+ if (!isa<VarDecl>(decl) || isa<ParmVarDecl>(decl))
+ continue;
+
+ VarDecl *VD = cast<VarDecl>(decl);
+ IdentifierInfo *II = VD->getIdentifier();
+
+ auto it = llvm::find_if(Function->decls(), [&](Decl *inst) {
+ VarDecl *InstVD = dyn_cast<VarDecl>(inst);
+ return InstVD && InstVD->isLocalVarDecl() &&
+ InstVD->getIdentifier() == II;
+ });
+
+ if (it == Function->decls().end())
+ continue;
+
+ Scope.InstantiatedLocal(VD, *it);
+ LSI->addCapture(cast<VarDecl>(*it), /*isBlock=*/false, /*isByref=*/false,
+ /*isNested=*/false, VD->getLocation(), SourceLocation(),
+ VD->getType(), /*Invalid=*/false);
+ }
+}
+
/// Introduce the instantiated function parameters into the local
/// instantiation scope, and set the parameter names to those used
/// in the template.
-static bool addInstantiatedParametersToScope(Sema &S, FunctionDecl *Function,
- const FunctionDecl *PatternDecl,
- LocalInstantiationScope &Scope,
- const MultiLevelTemplateArgumentList &TemplateArgs) {
+bool Sema::addInstantiatedParametersToScope(
+ FunctionDecl *Function, const FunctionDecl *PatternDecl,
+ LocalInstantiationScope &Scope,
+ const MultiLevelTemplateArgumentList &TemplateArgs) {
unsigned FParamIdx = 0;
for (unsigned I = 0, N = PatternDecl->getNumParams(); I != N; ++I) {
const ParmVarDecl *PatternParam = PatternDecl->getParamDecl(I);
@@ -4375,9 +4593,9 @@ static bool addInstantiatedParametersToScope(Sema &S, FunctionDecl *Function,
// it's instantiation-dependent.
// FIXME: Updating the type to work around this is at best fragile.
if (!PatternDecl->getType()->isDependentType()) {
- QualType T = S.SubstType(PatternParam->getType(), TemplateArgs,
- FunctionParam->getLocation(),
- FunctionParam->getDeclName());
+ QualType T = SubstType(PatternParam->getType(), TemplateArgs,
+ FunctionParam->getLocation(),
+ FunctionParam->getDeclName());
if (T.isNull())
return true;
FunctionParam->setType(T);
@@ -4390,8 +4608,8 @@ static bool addInstantiatedParametersToScope(Sema &S, FunctionDecl *Function,
// Expand the parameter pack.
Scope.MakeInstantiatedLocalArgPack(PatternParam);
- Optional<unsigned> NumArgumentsInExpansion
- = S.getNumArgumentsInExpansion(PatternParam->getType(), TemplateArgs);
+ std::optional<unsigned> NumArgumentsInExpansion =
+ getNumArgumentsInExpansion(PatternParam->getType(), TemplateArgs);
if (NumArgumentsInExpansion) {
QualType PatternType =
PatternParam->getType()->castAs<PackExpansionType>()->getPattern();
@@ -4399,10 +4617,10 @@ static bool addInstantiatedParametersToScope(Sema &S, FunctionDecl *Function,
ParmVarDecl *FunctionParam = Function->getParamDecl(FParamIdx);
FunctionParam->setDeclName(PatternParam->getDeclName());
if (!PatternDecl->getType()->isDependentType()) {
- Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(S, Arg);
- QualType T = S.SubstType(PatternType, TemplateArgs,
- FunctionParam->getLocation(),
- FunctionParam->getDeclName());
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(*this, Arg);
+ QualType T =
+ SubstType(PatternType, TemplateArgs, FunctionParam->getLocation(),
+ FunctionParam->getDeclName());
if (T.isNull())
return true;
FunctionParam->setType(T);
@@ -4420,10 +4638,6 @@ static bool addInstantiatedParametersToScope(Sema &S, FunctionDecl *Function,
bool Sema::InstantiateDefaultArgument(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param) {
assert(Param->hasUninstantiatedDefaultArg());
- Expr *UninstExpr = Param->getUninstantiatedDefaultArg();
-
- EnterExpressionEvaluationContext EvalContext(
- *this, ExpressionEvaluationContext::PotentiallyEvaluated, Param);
// Instantiate the expression.
//
@@ -4442,63 +4656,13 @@ bool Sema::InstantiateDefaultArgument(SourceLocation CallLoc, FunctionDecl *FD,
//
// template<typename T>
// A<T> Foo(int a = A<T>::FooImpl());
- MultiLevelTemplateArgumentList TemplateArgs
- = getTemplateInstantiationArgs(FD, nullptr, /*RelativeToPrimary=*/true);
-
- InstantiatingTemplate Inst(*this, CallLoc, Param,
- TemplateArgs.getInnermost());
- if (Inst.isInvalid())
- return true;
- if (Inst.isAlreadyInstantiating()) {
- Diag(Param->getBeginLoc(), diag::err_recursive_default_argument) << FD;
- Param->setInvalidDecl();
- return true;
- }
-
- ExprResult Result;
- {
- // C++ [dcl.fct.default]p5:
- // The names in the [default argument] expression are bound, and
- // the semantic constraints are checked, at the point where the
- // default argument expression appears.
- ContextRAII SavedContext(*this, FD);
- LocalInstantiationScope Local(*this);
-
- FunctionDecl *Pattern = FD->getTemplateInstantiationPattern(
- /*ForDefinition*/ false);
- if (addInstantiatedParametersToScope(*this, FD, Pattern, Local,
- TemplateArgs))
- return true;
-
- runWithSufficientStackSpace(CallLoc, [&] {
- Result = SubstInitializer(UninstExpr, TemplateArgs,
- /*DirectInit*/false);
- });
- }
- if (Result.isInvalid())
- return true;
-
- // Check the expression as an initializer for the parameter.
- InitializedEntity Entity
- = InitializedEntity::InitializeParameter(Context, Param);
- InitializationKind Kind = InitializationKind::CreateCopy(
- Param->getLocation(),
- /*FIXME:EqualLoc*/ UninstExpr->getBeginLoc());
- Expr *ResultE = Result.getAs<Expr>();
+ MultiLevelTemplateArgumentList TemplateArgs = getTemplateInstantiationArgs(
+ FD, FD->getLexicalDeclContext(), /*Final=*/false, nullptr,
+ /*RelativeToPrimary=*/true);
- InitializationSequence InitSeq(*this, Entity, Kind, ResultE);
- Result = InitSeq.Perform(*this, Entity, Kind, ResultE);
- if (Result.isInvalid())
- return true;
-
- Result =
- ActOnFinishFullExpr(Result.getAs<Expr>(), Param->getOuterLocStart(),
- /*DiscardedValue*/ false);
- if (Result.isInvalid())
+ if (SubstDefaultArgument(CallLoc, Param, TemplateArgs, /*ForCallExpr*/ true))
return true;
- // Remember the instantiated default argument.
- Param->setDefaultArg(Result.getAs<Expr>());
if (ASTMutationListener *L = getASTMutationListener())
L->DefaultArgumentInstantiated(Param);
@@ -4532,16 +4696,16 @@ void Sema::InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
Sema::ContextRAII savedContext(*this, Decl);
LocalInstantiationScope Scope(*this);
- MultiLevelTemplateArgumentList TemplateArgs =
- getTemplateInstantiationArgs(Decl, nullptr, /*RelativeToPrimary*/true);
+ MultiLevelTemplateArgumentList TemplateArgs = getTemplateInstantiationArgs(
+ Decl, Decl->getLexicalDeclContext(), /*Final=*/false, nullptr,
+ /*RelativeToPrimary*/ true);
// FIXME: We can't use getTemplateInstantiationPattern(false) in general
// here, because for a non-defining friend declaration in a class template,
// we don't store enough information to map back to the friend declaration in
// the template.
FunctionDecl *Template = Proto->getExceptionSpecTemplate();
- if (addInstantiatedParametersToScope(*this, Decl, Template, Scope,
- TemplateArgs)) {
+ if (addInstantiatedParametersToScope(Decl, Template, Scope, TemplateArgs)) {
UpdateExceptionSpec(Decl, EST_None);
return;
}
@@ -4550,53 +4714,6 @@ void Sema::InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
TemplateArgs);
}
-bool Sema::CheckInstantiatedFunctionTemplateConstraints(
- SourceLocation PointOfInstantiation, FunctionDecl *Decl,
- ArrayRef<TemplateArgument> TemplateArgs,
- ConstraintSatisfaction &Satisfaction) {
- // In most cases we're not going to have constraints, so check for that first.
- FunctionTemplateDecl *Template = Decl->getPrimaryTemplate();
- // Note - code synthesis context for the constraints check is created
- // inside CheckConstraintsSatisfaction.
- SmallVector<const Expr *, 3> TemplateAC;
- Template->getAssociatedConstraints(TemplateAC);
- if (TemplateAC.empty()) {
- Satisfaction.IsSatisfied = true;
- return false;
- }
-
- // Enter the scope of this instantiation. We don't use
- // PushDeclContext because we don't have a scope.
- Sema::ContextRAII savedContext(*this, Decl);
- LocalInstantiationScope Scope(*this);
-
- // If this is not an explicit specialization - we need to get the instantiated
- // version of the template arguments and add them to scope for the
- // substitution.
- if (Decl->isTemplateInstantiation()) {
- InstantiatingTemplate Inst(*this, Decl->getPointOfInstantiation(),
- InstantiatingTemplate::ConstraintsCheck{}, Decl->getPrimaryTemplate(),
- TemplateArgs, SourceRange());
- if (Inst.isInvalid())
- return true;
- MultiLevelTemplateArgumentList MLTAL(
- *Decl->getTemplateSpecializationArgs());
- if (addInstantiatedParametersToScope(
- *this, Decl, Decl->getPrimaryTemplate()->getTemplatedDecl(),
- Scope, MLTAL))
- return true;
- }
- Qualifiers ThisQuals;
- CXXRecordDecl *Record = nullptr;
- if (auto *Method = dyn_cast<CXXMethodDecl>(Decl)) {
- ThisQuals = Method->getMethodQualifiers();
- Record = Method->getParent();
- }
- CXXThisScopeRAII ThisScope(*this, Record, ThisQuals, Record != nullptr);
- return CheckConstraintSatisfaction(Template, TemplateAC, TemplateArgs,
- PointOfInstantiation, Satisfaction);
-}
-
/// Initializes the common fields of an instantiation function
/// declaration (New) from the corresponding fields of its template (Tmpl).
///
@@ -4624,11 +4741,7 @@ TemplateDeclInstantiator::InitFunctionInstantiation(FunctionDecl *New,
ActiveInstType &ActiveInst = SemaRef.CodeSynthesisContexts.back();
if (ActiveInst.Kind == ActiveInstType::ExplicitTemplateArgumentSubstitution ||
ActiveInst.Kind == ActiveInstType::DeducedTemplateArgumentSubstitution) {
- if (FunctionTemplateDecl *FunTmpl
- = dyn_cast<FunctionTemplateDecl>(ActiveInst.Entity)) {
- assert(FunTmpl->getTemplatedDecl() == Tmpl &&
- "Deduction from the wrong function template?");
- (void) FunTmpl;
+ if (isa<FunctionTemplateDecl>(ActiveInst.Entity)) {
SemaRef.InstantiatingSpecializations.erase(
{ActiveInst.Entity->getCanonicalDecl(), ActiveInst.Kind});
atTemplateEnd(SemaRef.TemplateInstCallbacks, SemaRef, ActiveInst);
@@ -4755,7 +4868,8 @@ Sema::InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD,
return nullptr;
ContextRAII SavedContext(*this, FD);
- MultiLevelTemplateArgumentList MArgs(*Args);
+ MultiLevelTemplateArgumentList MArgs(FTD, Args->asArray(),
+ /*Final=*/false);
return cast_or_null<FunctionDecl>(SubstDecl(FD, FD->getParent(), MArgs));
}
@@ -4792,6 +4906,12 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
if (TSK == TSK_ExplicitSpecialization)
return;
+ // Never implicitly instantiate a builtin; we don't actually need a function
+ // body.
+ if (Function->getBuiltinID() && TSK == TSK_ImplicitInstantiation &&
+ !DefinitionRequired)
+ return;
+
// Don't instantiate a definition if we already have one.
const FunctionDecl *ExistingDefn = nullptr;
if (Function->isDefined(ExistingDefn,
@@ -4827,7 +4947,8 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
/*Complain*/DefinitionRequired)) {
if (DefinitionRequired)
Function->setInvalidDecl();
- else if (TSK == TSK_ExplicitInstantiationDefinition) {
+ else if (TSK == TSK_ExplicitInstantiationDefinition ||
+ (Function->isConstexpr() && !Recursive)) {
// Try again at the end of the translation unit (at which point a
// definition will be required).
assert(!Recursive);
@@ -4842,7 +4963,7 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
Diag(PatternDecl->getLocation(), diag::note_forward_template_decl);
if (getLangOpts().CPlusPlus11)
Diag(PointOfInstantiation, diag::note_inst_declaration_hint)
- << Function;
+ << Function;
}
}
@@ -4888,6 +5009,7 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
"missing LateParsedTemplate");
LateTemplateParser(OpaqueParser, *LPTIter->second);
Pattern = PatternDecl->getBody(PatternDecl);
+ updateAttrsForLateParsedTemplate(PatternDecl, Function);
}
// Note, we should never try to instantiate a deleted function template.
@@ -4926,8 +5048,10 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
// unimported module.
Function->setVisibleDespiteOwningModule();
- // Copy the inner loc start from the pattern.
+ // Copy the source locations from the pattern.
+ Function->setLocation(PatternDecl->getLocation());
Function->setInnerLocStart(PatternDecl->getInnerLocStart());
+ Function->setRangeEnd(PatternDecl->getEndLoc());
EnterExpressionEvaluationContext EvalContext(
*this, Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
@@ -5001,6 +5125,7 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
} IR{*this, PatternRec, NewRec};
TypeSourceInfo *NewSI = IR.TransformType(Function->getTypeSourceInfo());
+ assert(NewSI && "Type Transform failed?");
Function->setType(NewSI->getType());
Function->setTypeSourceInfo(NewSI);
@@ -5014,8 +5139,9 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
RebuildTypeSourceInfoForDefaultSpecialMembers();
SetDeclDefaulted(Function, PatternDecl->getLocation());
} else {
- MultiLevelTemplateArgumentList TemplateArgs =
- getTemplateInstantiationArgs(Function, nullptr, false, PatternDecl);
+ MultiLevelTemplateArgumentList TemplateArgs = getTemplateInstantiationArgs(
+ Function, Function->getLexicalDeclContext(), /*Final=*/false, nullptr,
+ false, PatternDecl);
// Substitute into the qualifier; we can get a substitution failure here
// through evil use of alias templates.
@@ -5029,7 +5155,11 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
// PushDeclContext because we don't have a scope.
Sema::ContextRAII savedContext(*this, Function);
- if (addInstantiatedParametersToScope(*this, Function, PatternDecl, Scope,
+ FPFeaturesStateRAII SavedFPFeatures(*this);
+ CurFPFeatures = FPOptions(getLangOpts());
+ FpPragmaStack.CurrentValue = FPOptionsOverride();
+
+ if (addInstantiatedParametersToScope(Function, PatternDecl, Scope,
TemplateArgs))
return;
@@ -5084,8 +5214,7 @@ VarTemplateSpecializationDecl *Sema::BuildVarTemplateInstantiation(
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
- SourceLocation PointOfInstantiation,
- LateInstantiatedAttrVec *LateAttrs,
+ SourceLocation PointOfInstantiation, LateInstantiatedAttrVec *LateAttrs,
LocalInstantiationScope *StartingScope) {
if (FromVar->isInvalidDecl())
return nullptr;
@@ -5094,9 +5223,6 @@ VarTemplateSpecializationDecl *Sema::BuildVarTemplateInstantiation(
if (Inst.isInvalid())
return nullptr;
- MultiLevelTemplateArgumentList TemplateArgLists;
- TemplateArgLists.addOuterTemplateArguments(&TemplateArgList);
-
// Instantiate the first declaration of the variable template: for a partial
// specialization of a static data member template, the first declaration may
// or may not be the declaration in the class; if it's in the class, we want
@@ -5107,15 +5233,21 @@ VarTemplateSpecializationDecl *Sema::BuildVarTemplateInstantiation(
// partial specialization, don't do this. The member specialization completely
// replaces the original declaration in this case.
bool IsMemberSpec = false;
- if (VarTemplatePartialSpecializationDecl *PartialSpec =
- dyn_cast<VarTemplatePartialSpecializationDecl>(FromVar))
+ MultiLevelTemplateArgumentList MultiLevelList;
+ if (auto *PartialSpec =
+ dyn_cast<VarTemplatePartialSpecializationDecl>(FromVar)) {
IsMemberSpec = PartialSpec->isMemberSpecialization();
- else if (VarTemplateDecl *FromTemplate = FromVar->getDescribedVarTemplate())
- IsMemberSpec = FromTemplate->isMemberSpecialization();
+ MultiLevelList.addOuterTemplateArguments(
+ PartialSpec, TemplateArgList.asArray(), /*Final=*/false);
+ } else {
+ assert(VarTemplate == FromVar->getDescribedVarTemplate());
+ IsMemberSpec = VarTemplate->isMemberSpecialization();
+ MultiLevelList.addOuterTemplateArguments(
+ VarTemplate, TemplateArgList.asArray(), /*Final=*/false);
+ }
if (!IsMemberSpec)
FromVar = FromVar->getFirstDecl();
- MultiLevelTemplateArgumentList MultiLevelList(TemplateArgList);
TemplateDeclInstantiator Instantiator(*this, FromVar->getDeclContext(),
MultiLevelList);
@@ -5544,8 +5676,18 @@ void Sema::InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
// declaration of the definition.
TemplateDeclInstantiator Instantiator(*this, Var->getDeclContext(),
TemplateArgs);
+
+ TemplateArgumentListInfo TemplateArgInfo;
+ if (const ASTTemplateArgumentListInfo *ArgInfo =
+ VarSpec->getTemplateArgsInfo()) {
+ TemplateArgInfo.setLAngleLoc(ArgInfo->getLAngleLoc());
+ TemplateArgInfo.setRAngleLoc(ArgInfo->getRAngleLoc());
+ for (const TemplateArgumentLoc &Arg : ArgInfo->arguments())
+ TemplateArgInfo.addArgument(Arg);
+ }
+
Var = cast_or_null<VarDecl>(Instantiator.VisitVarTemplateSpecializationDecl(
- VarSpec->getSpecializedTemplate(), Def, VarSpec->getTemplateArgsInfo(),
+ VarSpec->getSpecializedTemplate(), Def, TemplateArgInfo,
VarSpec->getTemplateArgs().asArray(), VarSpec));
if (Var) {
llvm::PointerUnion<VarTemplateDecl *,
@@ -5604,7 +5746,7 @@ Sema::InstantiateMemInitializers(CXXConstructorDecl *New,
collectUnexpandedParameterPacks(Init->getInit(), Unexpanded);
bool ShouldExpand = false;
bool RetainExpansion = false;
- Optional<unsigned> NumExpansions;
+ std::optional<unsigned> NumExpansions;
if (CheckParameterPacksForExpansion(Init->getEllipsisLoc(),
BaseTL.getSourceRange(),
Unexpanded,
@@ -5985,11 +6127,11 @@ NamedDecl *Sema::FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool FindingInstantiatedContext) {
DeclContext *ParentDC = D->getDeclContext();
- // Determine whether our parent context depends on any of the tempalte
+ // Determine whether our parent context depends on any of the template
// arguments we're currently substituting.
bool ParentDependsOnArgs = isDependentContextAtLevel(
ParentDC, TemplateArgs.getNumRetainedOuterLevels());
- // FIXME: Parmeters of pointer to functions (y below) that are themselves
+ // FIXME: Parameters of pointer to functions (y below) that are themselves
// parameters (p below) can have their ParentDC set to the translation-unit
// - thus we can not consistently check if the ParentDC of such a parameter
// is Dependent or/and a FunctionOrMethod.
@@ -6013,7 +6155,9 @@ NamedDecl *Sema::FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
(ParentDependsOnArgs && (ParentDC->isFunctionOrMethod() ||
isa<OMPDeclareReductionDecl>(ParentDC) ||
isa<OMPDeclareMapperDecl>(ParentDC))) ||
- (isa<CXXRecordDecl>(D) && cast<CXXRecordDecl>(D)->isLambda())) {
+ (isa<CXXRecordDecl>(D) && cast<CXXRecordDecl>(D)->isLambda() &&
+ cast<CXXRecordDecl>(D)->getTemplateDepth() >
+ TemplateArgs.getNumRetainedOuterLevels())) {
// D is a local of some kind. Look into the map of local
// declarations to their instantiations.
if (CurrentInstantiationScope) {
@@ -6082,13 +6226,13 @@ NamedDecl *Sema::FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
return D;
// Determine whether this record is the "templated" declaration describing
- // a class template or class template partial specialization.
+ // a class template or class template specialization.
ClassTemplateDecl *ClassTemplate = Record->getDescribedClassTemplate();
if (ClassTemplate)
ClassTemplate = ClassTemplate->getCanonicalDecl();
- else if (ClassTemplatePartialSpecializationDecl *PartialSpec
- = dyn_cast<ClassTemplatePartialSpecializationDecl>(Record))
- ClassTemplate = PartialSpec->getSpecializedTemplate()->getCanonicalDecl();
+ else if (ClassTemplateSpecializationDecl *Spec =
+ dyn_cast<ClassTemplateSpecializationDecl>(Record))
+ ClassTemplate = Spec->getSpecializedTemplate()->getCanonicalDecl();
// Walk the current context to find either the record or an instantiation of
// it.
@@ -6116,7 +6260,8 @@ NamedDecl *Sema::FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
// Move to the outer template scope.
if (FunctionDecl *FD = dyn_cast<FunctionDecl>(DC)) {
- if (FD->getFriendObjectKind() && FD->getDeclContext()->isFileContext()){
+ if (FD->getFriendObjectKind() &&
+ FD->getNonTransparentDeclContext()->isFileContext()) {
DC = FD->getLexicalDeclContext();
continue;
}
@@ -6357,7 +6502,7 @@ void Sema::PerformPendingInstantiations(bool LocalOnly) {
void Sema::PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs) {
- for (auto DD : Pattern->ddiags()) {
+ for (auto *DD : Pattern->ddiags()) {
switch (DD->getKind()) {
case DependentDiagnostic::Access:
HandleDependentAccessCheck(*DD, TemplateArgs);
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaTemplateVariadic.cpp b/contrib/llvm-project/clang/lib/Sema/SemaTemplateVariadic.cpp
index 1951aec3d17d..4a7872b2cc73 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaTemplateVariadic.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaTemplateVariadic.cpp
@@ -18,6 +18,7 @@
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/Template.h"
+#include <optional>
using namespace clang;
@@ -308,8 +309,7 @@ Sema::DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
}
return declaresSameEntity(Pack.first.get<NamedDecl *>(), LocalPack);
};
- if (std::find_if(LSI->LocalPacks.begin(), LSI->LocalPacks.end(),
- DeclaresThisPack) != LSI->LocalPacks.end())
+ if (llvm::any_of(LSI->LocalPacks, DeclaresThisPack))
LambdaParamPackReferences.push_back(Pack);
}
@@ -328,8 +328,8 @@ Sema::DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
bool EnclosingStmtExpr = false;
for (unsigned N = FunctionScopes.size(); N; --N) {
sema::FunctionScopeInfo *Func = FunctionScopes[N-1];
- if (std::any_of(
- Func->CompoundScopes.begin(), Func->CompoundScopes.end(),
+ if (llvm::any_of(
+ Func->CompoundScopes,
[](sema::CompoundScopeInfo &CSI) { return CSI.IsStmtExpr; })) {
EnclosingStmtExpr = true;
break;
@@ -402,6 +402,13 @@ bool Sema::DiagnoseUnexpandedParameterPack(Expr *E,
if (!E->containsUnexpandedParameterPack())
return false;
+ // CollectUnexpandedParameterPacksVisitor does not expect to see a
+ // FunctionParmPackExpr, but diagnosing unexpected parameter packs may still
+ // see such an expression in a lambda body.
+ // We'll bail out early in this case to avoid triggering an assertion.
+ if (isa<FunctionParmPackExpr>(E) && getEnclosingLambda())
+ return false;
+
SmallVector<UnexpandedParameterPack, 2> Unexpanded;
CollectUnexpandedParameterPacksVisitor(Unexpanded).TraverseStmt(E);
assert(!Unexpanded.empty() && "Unable to find unexpanded parameter packs");
@@ -422,7 +429,7 @@ bool Sema::DiagnoseUnexpandedParameterPackInRequiresExpr(RequiresExpr *RE) {
llvm::SmallPtrSet<NamedDecl*, 8> ParmSet(Parms.begin(), Parms.end());
SmallVector<UnexpandedParameterPack, 2> UnexpandedParms;
for (auto Parm : Unexpanded)
- if (ParmSet.contains(Parm.first.dyn_cast<NamedDecl*>()))
+ if (ParmSet.contains(Parm.first.dyn_cast<NamedDecl *>()))
UnexpandedParms.push_back(Parm);
if (UnexpandedParms.empty())
return false;
@@ -595,7 +602,8 @@ TypeResult Sema::ActOnPackExpansion(ParsedType Type,
if (!TSInfo)
return true;
- TypeSourceInfo *TSResult = CheckPackExpansion(TSInfo, EllipsisLoc, None);
+ TypeSourceInfo *TSResult =
+ CheckPackExpansion(TSInfo, EllipsisLoc, std::nullopt);
if (!TSResult)
return true;
@@ -604,7 +612,7 @@ TypeResult Sema::ActOnPackExpansion(ParsedType Type,
TypeSourceInfo *
Sema::CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc,
- Optional<unsigned> NumExpansions) {
+ std::optional<unsigned> NumExpansions) {
// Create the pack expansion type and source-location information.
QualType Result = CheckPackExpansion(Pattern->getType(),
Pattern->getTypeLoc().getSourceRange(),
@@ -622,7 +630,7 @@ Sema::CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc,
QualType Sema::CheckPackExpansion(QualType Pattern, SourceRange PatternRange,
SourceLocation EllipsisLoc,
- Optional<unsigned> NumExpansions) {
+ std::optional<unsigned> NumExpansions) {
// C++11 [temp.variadic]p5:
// The pattern of a pack expansion shall name one or more
// parameter packs that are not expanded by a nested pack
@@ -642,11 +650,11 @@ QualType Sema::CheckPackExpansion(QualType Pattern, SourceRange PatternRange,
}
ExprResult Sema::ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc) {
- return CheckPackExpansion(Pattern, EllipsisLoc, None);
+ return CheckPackExpansion(Pattern, EllipsisLoc, std::nullopt);
}
ExprResult Sema::CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
- Optional<unsigned> NumExpansions) {
+ std::optional<unsigned> NumExpansions) {
if (!Pattern)
return ExprError();
@@ -670,29 +678,27 @@ bool Sema::CheckParameterPacksForExpansion(
SourceLocation EllipsisLoc, SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand,
- bool &RetainExpansion, Optional<unsigned> &NumExpansions) {
+ bool &RetainExpansion, std::optional<unsigned> &NumExpansions) {
ShouldExpand = true;
RetainExpansion = false;
std::pair<IdentifierInfo *, SourceLocation> FirstPack;
bool HaveFirstPack = false;
- Optional<unsigned> NumPartialExpansions;
+ std::optional<unsigned> NumPartialExpansions;
SourceLocation PartiallySubstitutedPackLoc;
- for (ArrayRef<UnexpandedParameterPack>::iterator i = Unexpanded.begin(),
- end = Unexpanded.end();
- i != end; ++i) {
+ for (UnexpandedParameterPack ParmPack : Unexpanded) {
// Compute the depth and index for this parameter pack.
unsigned Depth = 0, Index = 0;
IdentifierInfo *Name;
bool IsVarDeclPack = false;
- if (const TemplateTypeParmType *TTP
- = i->first.dyn_cast<const TemplateTypeParmType *>()) {
+ if (const TemplateTypeParmType *TTP =
+ ParmPack.first.dyn_cast<const TemplateTypeParmType *>()) {
Depth = TTP->getDepth();
Index = TTP->getIndex();
Name = TTP->getIdentifier();
} else {
- NamedDecl *ND = i->first.get<NamedDecl *>();
+ NamedDecl *ND = ParmPack.first.get<NamedDecl *>();
if (isa<VarDecl>(ND))
IsVarDeclPack = true;
else
@@ -707,9 +713,9 @@ bool Sema::CheckParameterPacksForExpansion(
// Figure out whether we're instantiating to an argument pack or not.
typedef LocalInstantiationScope::DeclArgumentPack DeclArgumentPack;
- llvm::PointerUnion<Decl *, DeclArgumentPack *> *Instantiation
- = CurrentInstantiationScope->findInstantiationOf(
- i->first.get<NamedDecl *>());
+ llvm::PointerUnion<Decl *, DeclArgumentPack *> *Instantiation =
+ CurrentInstantiationScope->findInstantiationOf(
+ ParmPack.first.get<NamedDecl *>());
if (Instantiation->is<DeclArgumentPack *>()) {
// We could expand this function parameter pack.
NewPackSize = Instantiation->get<DeclArgumentPack *>()->size();
@@ -738,15 +744,15 @@ bool Sema::CheckParameterPacksForExpansion(
// arguments corresponding to a template parameter pack, even when the
// sequence contains explicitly specified template arguments.
if (!IsVarDeclPack && CurrentInstantiationScope) {
- if (NamedDecl *PartialPack
- = CurrentInstantiationScope->getPartiallySubstitutedPack()){
+ if (NamedDecl *PartialPack =
+ CurrentInstantiationScope->getPartiallySubstitutedPack()) {
unsigned PartialDepth, PartialIndex;
std::tie(PartialDepth, PartialIndex) = getDepthAndIndex(PartialPack);
if (PartialDepth == Depth && PartialIndex == Index) {
RetainExpansion = true;
// We don't actually know the new pack size yet.
NumPartialExpansions = NewPackSize;
- PartiallySubstitutedPackLoc = i->second;
+ PartiallySubstitutedPackLoc = ParmPack.second;
continue;
}
}
@@ -757,7 +763,7 @@ bool Sema::CheckParameterPacksForExpansion(
// Record it.
NumExpansions = NewPackSize;
FirstPack.first = Name;
- FirstPack.second = i->second;
+ FirstPack.second = ParmPack.second;
HaveFirstPack = true;
continue;
}
@@ -768,12 +774,12 @@ bool Sema::CheckParameterPacksForExpansion(
// the same number of arguments specified.
if (HaveFirstPack)
Diag(EllipsisLoc, diag::err_pack_expansion_length_conflict)
- << FirstPack.first << Name << *NumExpansions << NewPackSize
- << SourceRange(FirstPack.second) << SourceRange(i->second);
+ << FirstPack.first << Name << *NumExpansions << NewPackSize
+ << SourceRange(FirstPack.second) << SourceRange(ParmPack.second);
else
Diag(EllipsisLoc, diag::err_pack_expansion_length_conflict_multilevel)
- << Name << *NumExpansions << NewPackSize
- << SourceRange(i->second);
+ << Name << *NumExpansions << NewPackSize
+ << SourceRange(ParmPack.second);
return true;
}
}
@@ -792,8 +798,8 @@ bool Sema::CheckParameterPacksForExpansion(
NamedDecl *PartialPack =
CurrentInstantiationScope->getPartiallySubstitutedPack();
Diag(EllipsisLoc, diag::err_pack_expansion_length_conflict_partial)
- << PartialPack << *NumPartialExpansions << *NumExpansions
- << SourceRange(PartiallySubstitutedPackLoc);
+ << PartialPack << *NumPartialExpansions << *NumExpansions
+ << SourceRange(PartiallySubstitutedPackLoc);
return true;
}
@@ -803,20 +809,20 @@ bool Sema::CheckParameterPacksForExpansion(
return false;
}
-Optional<unsigned> Sema::getNumArgumentsInExpansion(QualType T,
- const MultiLevelTemplateArgumentList &TemplateArgs) {
+std::optional<unsigned> Sema::getNumArgumentsInExpansion(
+ QualType T, const MultiLevelTemplateArgumentList &TemplateArgs) {
QualType Pattern = cast<PackExpansionType>(T)->getPattern();
SmallVector<UnexpandedParameterPack, 2> Unexpanded;
CollectUnexpandedParameterPacksVisitor(Unexpanded).TraverseType(Pattern);
- Optional<unsigned> Result;
+ std::optional<unsigned> Result;
for (unsigned I = 0, N = Unexpanded.size(); I != N; ++I) {
// Compute the depth and index for this parameter pack.
unsigned Depth;
unsigned Index;
- if (const TemplateTypeParmType *TTP
- = Unexpanded[I].first.dyn_cast<const TemplateTypeParmType *>()) {
+ if (const TemplateTypeParmType *TTP =
+ Unexpanded[I].first.dyn_cast<const TemplateTypeParmType *>()) {
Depth = TTP->getDepth();
Index = TTP->getIndex();
} else {
@@ -825,13 +831,13 @@ Optional<unsigned> Sema::getNumArgumentsInExpansion(QualType T,
// Function parameter pack or init-capture pack.
typedef LocalInstantiationScope::DeclArgumentPack DeclArgumentPack;
- llvm::PointerUnion<Decl *, DeclArgumentPack *> *Instantiation
- = CurrentInstantiationScope->findInstantiationOf(
- Unexpanded[I].first.get<NamedDecl *>());
- if (Instantiation->is<Decl*>())
+ llvm::PointerUnion<Decl *, DeclArgumentPack *> *Instantiation =
+ CurrentInstantiationScope->findInstantiationOf(
+ Unexpanded[I].first.get<NamedDecl *>());
+ if (Instantiation->is<Decl *>())
// The pattern refers to an unexpanded pack. We're not ready to expand
// this pack yet.
- return None;
+ return std::nullopt;
unsigned Size = Instantiation->get<DeclArgumentPack *>()->size();
assert((!Result || *Result == Size) && "inconsistent pack sizes");
@@ -845,7 +851,7 @@ Optional<unsigned> Sema::getNumArgumentsInExpansion(QualType T,
!TemplateArgs.hasTemplateArgument(Depth, Index))
// The pattern refers to an unknown template argument. We're not ready to
// expand this pack yet.
- return None;
+ return std::nullopt;
// Determine the size of the argument pack.
unsigned Size = TemplateArgs(Depth, Index).pack_size();
@@ -860,8 +866,10 @@ bool Sema::containsUnexpandedParameterPacks(Declarator &D) {
const DeclSpec &DS = D.getDeclSpec();
switch (DS.getTypeSpecType()) {
case TST_typename:
+ case TST_typeof_unqualType:
case TST_typeofType:
- case TST_underlyingType:
+#define TRANSFORM_TYPE_TRAIT_DEF(_, Trait) case TST_##Trait:
+#include "clang/Basic/TransformTypeTraits.def"
case TST_atomic: {
QualType T = DS.getRepAsType().get();
if (!T.isNull() && T->containsUnexpandedParameterPack())
@@ -869,9 +877,10 @@ bool Sema::containsUnexpandedParameterPacks(Declarator &D) {
break;
}
+ case TST_typeof_unqualExpr:
case TST_typeofExpr:
case TST_decltype:
- case TST_extint:
+ case TST_bitint:
if (DS.getRepAsExpr() &&
DS.getRepAsExpr()->containsUnexpandedParameterPack())
return true;
@@ -893,6 +902,7 @@ bool Sema::containsUnexpandedParameterPacks(Declarator &D) {
case TST_Fract:
case TST_Float16:
case TST_float128:
+ case TST_ibm128:
case TST_bool:
case TST_decimal32:
case TST_decimal64:
@@ -1051,10 +1061,9 @@ ExprResult Sema::ActOnSizeofParameterPackExpr(Scope *S,
RParenLoc);
}
-TemplateArgumentLoc
-Sema::getTemplateArgumentPackExpansionPattern(
- TemplateArgumentLoc OrigLoc,
- SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const {
+TemplateArgumentLoc Sema::getTemplateArgumentPackExpansionPattern(
+ TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis,
+ std::optional<unsigned> &NumExpansions) const {
const TemplateArgument &Argument = OrigLoc.getArgument();
assert(Argument.isPackExpansion());
switch (Argument.getKind()) {
@@ -1103,6 +1112,7 @@ Sema::getTemplateArgumentPackExpansionPattern(
case TemplateArgument::NullPtr:
case TemplateArgument::Template:
case TemplateArgument::Integral:
+ case TemplateArgument::StructuralValue:
case TemplateArgument::Pack:
case TemplateArgument::Null:
return TemplateArgumentLoc();
@@ -1111,7 +1121,7 @@ Sema::getTemplateArgumentPackExpansionPattern(
llvm_unreachable("Invalid TemplateArgument Kind!");
}
-Optional<unsigned> Sema::getFullyPackExpandedSize(TemplateArgument Arg) {
+std::optional<unsigned> Sema::getFullyPackExpandedSize(TemplateArgument Arg) {
assert(Arg.containsUnexpandedParameterPack());
// If this is a substituted pack, grab that pack. If not, we don't know
@@ -1125,7 +1135,7 @@ Optional<unsigned> Sema::getFullyPackExpandedSize(TemplateArgument Arg) {
if (auto *Subst = Arg.getAsType()->getAs<SubstTemplateTypeParmPackType>())
Pack = Subst->getArgumentPack();
else
- return None;
+ return std::nullopt;
break;
case TemplateArgument::Expression:
@@ -1135,10 +1145,10 @@ Optional<unsigned> Sema::getFullyPackExpandedSize(TemplateArgument Arg) {
else if (auto *Subst = dyn_cast<FunctionParmPackExpr>(Arg.getAsExpr())) {
for (VarDecl *PD : *Subst)
if (PD->isParameterPack())
- return None;
+ return std::nullopt;
return Subst->getNumExpansions();
} else
- return None;
+ return std::nullopt;
break;
case TemplateArgument::Template:
@@ -1146,16 +1156,17 @@ Optional<unsigned> Sema::getFullyPackExpandedSize(TemplateArgument Arg) {
Arg.getAsTemplate().getAsSubstTemplateTemplateParmPack())
Pack = Subst->getArgumentPack();
else
- return None;
+ return std::nullopt;
break;
case TemplateArgument::Declaration:
case TemplateArgument::NullPtr:
case TemplateArgument::TemplateExpansion:
case TemplateArgument::Integral:
+ case TemplateArgument::StructuralValue:
case TemplateArgument::Pack:
case TemplateArgument::Null:
- return None;
+ return std::nullopt;
}
// Check that no argument in the pack is itself a pack expansion.
@@ -1163,7 +1174,7 @@ Optional<unsigned> Sema::getFullyPackExpandedSize(TemplateArgument Arg) {
// There's no point recursing in this case; we would have already
// expanded this pack expansion into the enclosing pack if we could.
if (Elem.isPackExpansion())
- return None;
+ return std::nullopt;
}
return Pack.pack_size();
}
@@ -1218,10 +1229,11 @@ ExprResult Sema::ActOnCXXFoldExpr(Scope *S, SourceLocation LParenLoc, Expr *LHS,
if (!LHS || !RHS) {
Expr *Pack = LHS ? LHS : RHS;
assert(Pack && "fold expression with neither LHS nor RHS");
- DiscardOperands();
- if (!Pack->containsUnexpandedParameterPack())
+ if (!Pack->containsUnexpandedParameterPack()) {
+ DiscardOperands();
return Diag(EllipsisLoc, diag::err_pack_expansion_without_parameter_packs)
<< Pack->getSourceRange();
+ }
}
BinaryOperatorKind Opc = ConvertTokenKindToBinaryOpcode(Operator);
@@ -1244,7 +1256,7 @@ ExprResult Sema::ActOnCXXFoldExpr(Scope *S, SourceLocation LParenLoc, Expr *LHS,
}
return BuildCXXFoldExpr(ULE, LParenLoc, LHS, Opc, EllipsisLoc, RHS, RParenLoc,
- None);
+ std::nullopt);
}
ExprResult Sema::BuildCXXFoldExpr(UnresolvedLookupExpr *Callee,
@@ -1252,7 +1264,7 @@ ExprResult Sema::BuildCXXFoldExpr(UnresolvedLookupExpr *Callee,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc,
- Optional<unsigned> NumExpansions) {
+ std::optional<unsigned> NumExpansions) {
return new (Context)
CXXFoldExpr(Context.DependentTy, Callee, LParenLoc, LHS, Operator,
EllipsisLoc, RHS, RParenLoc, NumExpansions);
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaType.cpp b/contrib/llvm-project/clang/lib/Sema/SemaType.cpp
index bca21b351c91..92086d7277fd 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaType.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaType.cpp
@@ -16,12 +16,16 @@
#include "clang/AST/ASTMutationListener.h"
#include "clang/AST/ASTStructuralEquivalence.h"
#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/Decl.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
+#include "clang/AST/Type.h"
#include "clang/AST/TypeLoc.h"
#include "clang/AST/TypeLocVisitor.h"
#include "clang/Basic/PartialDiagnostic.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/DeclSpec.h"
@@ -32,12 +36,15 @@
#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/Template.h"
#include "clang/Sema/TemplateInstCallback.h"
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
#include <bitset>
+#include <optional>
using namespace clang;
@@ -98,8 +105,10 @@ static void diagnoseBadTypeAttribute(Sema &S, const ParsedAttr &attr,
}
}
- S.Diag(loc, diag::warn_type_attribute_wrong_type) << name << WhichType
- << type;
+ S.Diag(loc, attr.isRegularKeywordAttribute()
+ ? diag::err_type_attribute_wrong_type
+ : diag::warn_type_attribute_wrong_type)
+ << name << WhichType << type;
}
// objc_gc applies to Objective-C pointers or, otherwise, to the
@@ -120,12 +129,15 @@ static void diagnoseBadTypeAttribute(Sema &S, const ParsedAttr &attr,
case ParsedAttr::AT_SwiftAsyncCall: \
case ParsedAttr::AT_VectorCall: \
case ParsedAttr::AT_AArch64VectorPcs: \
+ case ParsedAttr::AT_AArch64SVEPcs: \
+ case ParsedAttr::AT_AMDGPUKernelCall: \
case ParsedAttr::AT_MSABI: \
case ParsedAttr::AT_SysVABI: \
case ParsedAttr::AT_Pcs: \
case ParsedAttr::AT_IntelOclBicc: \
case ParsedAttr::AT_PreserveMost: \
- case ParsedAttr::AT_PreserveAll
+ case ParsedAttr::AT_PreserveAll: \
+ case ParsedAttr::AT_M68kRTD
// Function type attributes.
#define FUNCTION_TYPE_ATTRS_CASELIST \
@@ -133,6 +145,12 @@ static void diagnoseBadTypeAttribute(Sema &S, const ParsedAttr &attr,
case ParsedAttr::AT_NoReturn: \
case ParsedAttr::AT_Regparm: \
case ParsedAttr::AT_CmseNSCall: \
+ case ParsedAttr::AT_ArmStreaming: \
+ case ParsedAttr::AT_ArmStreamingCompatible: \
+ case ParsedAttr::AT_ArmPreserves: \
+ case ParsedAttr::AT_ArmIn: \
+ case ParsedAttr::AT_ArmOut: \
+ case ParsedAttr::AT_ArmInOut: \
case ParsedAttr::AT_AnyX86NoCallerSavedRegisters: \
case ParsedAttr::AT_AnyX86NoCfCheck: \
CALLING_CONV_ATTRS_CASELIST
@@ -165,12 +183,6 @@ namespace {
/// DeclSpec.
unsigned chunkIndex;
- /// Whether there are non-trivial modifications to the decl spec.
- bool trivial;
-
- /// Whether we saved the attributes in the decl spec.
- bool hasSavedAttrs;
-
/// The original set of attributes on the DeclSpec.
SmallVector<ParsedAttr *, 2> savedAttrs;
@@ -199,8 +211,7 @@ namespace {
public:
TypeProcessingState(Sema &sema, Declarator &declarator)
: sema(sema), declarator(declarator),
- chunkIndex(declarator.getNumTypeObjects()), trivial(true),
- hasSavedAttrs(false), parsedNoDeref(false) {}
+ chunkIndex(declarator.getNumTypeObjects()), parsedNoDeref(false) {}
Sema &getSema() const {
return sema;
@@ -232,13 +243,12 @@ namespace {
/// Save the current set of attributes on the DeclSpec.
void saveDeclSpecAttrs() {
// Don't try to save them multiple times.
- if (hasSavedAttrs) return;
+ if (!savedAttrs.empty())
+ return;
DeclSpec &spec = getMutableDeclSpec();
- for (ParsedAttr &AL : spec.getAttributes())
- savedAttrs.push_back(&AL);
- trivial &= savedAttrs.empty();
- hasSavedAttrs = true;
+ llvm::append_range(savedAttrs,
+ llvm::make_pointer_range(spec.getAttributes()));
}
/// Record that we had nowhere to put the given type attribute.
@@ -265,6 +275,12 @@ namespace {
return T;
}
+ /// Get a BTFTagAttributed type for the btf_type_tag attribute.
+ QualType getBTFTagAttributedType(const BTFTypeTagAttr *BTFAttr,
+ QualType WrappedType) {
+ return sema.Context.getBTFTagAttributedType(BTFAttr, WrappedType);
+ }
+
/// Completely replace the \c auto in \p TypeWithAuto by
/// \p Replacement. Also replace \p TypeWithAuto in \c TypeAttrPair if
/// necessary.
@@ -323,23 +339,18 @@ namespace {
bool didParseNoDeref() const { return parsedNoDeref; }
~TypeProcessingState() {
- if (trivial) return;
+ if (savedAttrs.empty())
+ return;
- restoreDeclSpecAttrs();
+ getMutableDeclSpec().getAttributes().clearListOnly();
+ for (ParsedAttr *AL : savedAttrs)
+ getMutableDeclSpec().getAttributes().addAtEnd(AL);
}
private:
DeclSpec &getMutableDeclSpec() const {
return const_cast<DeclSpec&>(declarator.getDeclSpec());
}
-
- void restoreDeclSpecAttrs() {
- assert(hasSavedAttrs);
-
- getMutableDeclSpec().getAttributes().clearListOnly();
- for (ParsedAttr *AL : savedAttrs)
- getMutableDeclSpec().getAttributes().addAtEnd(AL);
- }
};
} // end anonymous namespace
@@ -360,11 +371,14 @@ enum TypeAttrLocation {
TAL_DeclName
};
-static void processTypeAttrs(TypeProcessingState &state, QualType &type,
- TypeAttrLocation TAL, ParsedAttributesView &attrs);
+static void
+processTypeAttrs(TypeProcessingState &state, QualType &type,
+ TypeAttrLocation TAL, const ParsedAttributesView &attrs,
+ Sema::CUDAFunctionTarget CFT = Sema::CFT_HostDevice);
static bool handleFunctionTypeAttr(TypeProcessingState &state, ParsedAttr &attr,
- QualType &type);
+ QualType &type,
+ Sema::CUDAFunctionTarget CFT);
static bool handleMSPointerTypeQualifierAttr(TypeProcessingState &state,
ParsedAttr &attr, QualType &type);
@@ -433,7 +447,7 @@ static DeclaratorChunk *maybeMovePastReturnType(Declarator &declarator,
if (onlyBlockPointers)
continue;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case DeclaratorChunk::BlockPointer:
result = &ptrChunk;
@@ -610,7 +624,8 @@ static void distributeFunctionTypeAttr(TypeProcessingState &state,
/// distributed, false if no location was found.
static bool distributeFunctionTypeAttrToInnermost(
TypeProcessingState &state, ParsedAttr &attr,
- ParsedAttributesView &attrList, QualType &declSpecType) {
+ ParsedAttributesView &attrList, QualType &declSpecType,
+ Sema::CUDAFunctionTarget CFT) {
Declarator &declarator = state.getDeclarator();
// Put it on the innermost function chunk, if there is one.
@@ -622,28 +637,20 @@ static bool distributeFunctionTypeAttrToInnermost(
return true;
}
- return handleFunctionTypeAttr(state, attr, declSpecType);
+ return handleFunctionTypeAttr(state, attr, declSpecType, CFT);
}
/// A function type attribute was written in the decl spec. Try to
/// apply it somewhere.
-static void distributeFunctionTypeAttrFromDeclSpec(TypeProcessingState &state,
- ParsedAttr &attr,
- QualType &declSpecType) {
+static void
+distributeFunctionTypeAttrFromDeclSpec(TypeProcessingState &state,
+ ParsedAttr &attr, QualType &declSpecType,
+ Sema::CUDAFunctionTarget CFT) {
state.saveDeclSpecAttrs();
- // C++11 attributes before the decl specifiers actually appertain to
- // the declarators. Move them straight there. We don't support the
- // 'put them wherever you like' semantics we allow for GNU attributes.
- if (attr.isStandardAttributeSyntax()) {
- moveAttrFromListToList(attr, state.getCurrentAttributes(),
- state.getDeclarator().getAttributes());
- return;
- }
-
// Try to distribute to the innermost.
if (distributeFunctionTypeAttrToInnermost(
- state, attr, state.getCurrentAttributes(), declSpecType))
+ state, attr, state.getCurrentAttributes(), declSpecType, CFT))
return;
// If that failed, diagnose the bad attribute when the declarator is
@@ -651,16 +658,18 @@ static void distributeFunctionTypeAttrFromDeclSpec(TypeProcessingState &state,
state.addIgnoredTypeAttr(attr);
}
-/// A function type attribute was written on the declarator. Try to
-/// apply it somewhere.
-static void distributeFunctionTypeAttrFromDeclarator(TypeProcessingState &state,
- ParsedAttr &attr,
- QualType &declSpecType) {
+/// A function type attribute was written on the declarator or declaration.
+/// Try to apply it somewhere.
+/// `Attrs` is the attribute list containing the declaration (either of the
+/// declarator or the declaration).
+static void distributeFunctionTypeAttrFromDeclarator(
+ TypeProcessingState &state, ParsedAttr &attr, QualType &declSpecType,
+ Sema::CUDAFunctionTarget CFT) {
Declarator &declarator = state.getDeclarator();
// Try to distribute to the innermost.
if (distributeFunctionTypeAttrToInnermost(
- state, attr, declarator.getAttributes(), declSpecType))
+ state, attr, declarator.getAttributes(), declSpecType, CFT))
return;
// If that failed, diagnose the bad attribute when the declarator is
@@ -669,7 +678,7 @@ static void distributeFunctionTypeAttrFromDeclarator(TypeProcessingState &state,
state.addIgnoredTypeAttr(attr);
}
-/// Given that there are attributes written on the declarator
+/// Given that there are attributes written on the declarator or declaration
/// itself, try to distribute any type attributes to the appropriate
/// declarator chunk.
///
@@ -678,11 +687,12 @@ static void distributeFunctionTypeAttrFromDeclarator(TypeProcessingState &state,
/// int (f ATTR)();
/// but not necessarily this:
/// int f() ATTR;
+///
+/// `Attrs` is the attribute list containing the declaration (either of the
+/// declarator or the declaration).
static void distributeTypeAttrsFromDeclarator(TypeProcessingState &state,
- QualType &declSpecType) {
- // Collect all the type attributes from the declarator itself.
- assert(!state.getDeclarator().getAttributes().empty() &&
- "declarator has no attrs!");
+ QualType &declSpecType,
+ Sema::CUDAFunctionTarget CFT) {
// The called functions in this loop actually remove things from the current
// list, so iterating over the existing list isn't possible. Instead, make a
// non-owning copy and iterate over that.
@@ -690,7 +700,7 @@ static void distributeTypeAttrsFromDeclarator(TypeProcessingState &state,
for (ParsedAttr &attr : AttrsCopy) {
// Do not distribute [[]] attributes. They have strict rules for what
// they appertain to.
- if (attr.isStandardAttributeSyntax())
+ if (attr.isStandardAttributeSyntax() || attr.isRegularKeywordAttribute())
continue;
switch (attr.getKind()) {
@@ -699,7 +709,7 @@ static void distributeTypeAttrsFromDeclarator(TypeProcessingState &state,
break;
FUNCTION_TYPE_ATTRS_CASELIST:
- distributeFunctionTypeAttrFromDeclarator(state, attr, declSpecType);
+ distributeFunctionTypeAttrFromDeclarator(state, attr, declSpecType, CFT);
break;
MS_TYPE_ATTRS_CASELIST:
@@ -769,7 +779,7 @@ static void maybeSynthesizeBlockSignature(TypeProcessingState &state,
/*NumExceptions=*/0,
/*NoexceptExpr=*/nullptr,
/*ExceptionSpecTokens=*/nullptr,
- /*DeclsInPrototype=*/None, loc, loc, declarator));
+ /*DeclsInPrototype=*/std::nullopt, loc, loc, declarator));
// For consistency, make sure the state still has us as processing
// the decl spec.
@@ -837,8 +847,8 @@ static bool checkOmittedBlockReturnType(Sema &S, Declarator &declarator,
/// Apply Objective-C type arguments to the given type.
static QualType applyObjCTypeArgs(Sema &S, SourceLocation loc, QualType type,
ArrayRef<TypeSourceInfo *> typeArgs,
- SourceRange typeArgsRange,
- bool failOnError = false) {
+ SourceRange typeArgsRange, bool failOnError,
+ bool rebuilding) {
// We can only apply type arguments to an Objective-C class type.
const auto *objcObjectType = type->getAs<ObjCObjectType>();
if (!objcObjectType || !objcObjectType->getInterface()) {
@@ -902,7 +912,9 @@ static QualType applyObjCTypeArgs(Sema &S, SourceLocation loc, QualType type,
}
}
- if (!diagnosed) {
+ // When rebuilding, qualifiers might have gotten here through a
+ // final substitution.
+ if (!rebuilding && !diagnosed) {
S.Diag(qual.getBeginLoc(), diag::err_objc_type_arg_qualified)
<< typeArg << typeArg.getQualifiers().getAsString()
<< FixItHint::CreateRemoval(rangeToRemove);
@@ -951,7 +963,7 @@ static QualType applyObjCTypeArgs(Sema &S, SourceLocation loc, QualType type,
// Retrieve the bound.
QualType bound = typeParam->getUnderlyingType();
- const auto *boundObjC = bound->getAs<ObjCObjectPointerType>();
+ const auto *boundObjC = bound->castAs<ObjCObjectPointerType>();
// Determine whether the type argument is substitutable for the bound.
if (typeArgObjC->isObjCIdType()) {
@@ -1064,22 +1076,18 @@ QualType Sema::BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
return Result;
}
-QualType Sema::BuildObjCObjectType(QualType BaseType,
- SourceLocation Loc,
- SourceLocation TypeArgsLAngleLoc,
- ArrayRef<TypeSourceInfo *> TypeArgs,
- SourceLocation TypeArgsRAngleLoc,
- SourceLocation ProtocolLAngleLoc,
- ArrayRef<ObjCProtocolDecl *> Protocols,
- ArrayRef<SourceLocation> ProtocolLocs,
- SourceLocation ProtocolRAngleLoc,
- bool FailOnError) {
+QualType Sema::BuildObjCObjectType(
+ QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc,
+ ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc,
+ SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols,
+ ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc,
+ bool FailOnError, bool Rebuilding) {
QualType Result = BaseType;
if (!TypeArgs.empty()) {
- Result = applyObjCTypeArgs(*this, Loc, Result, TypeArgs,
- SourceRange(TypeArgsLAngleLoc,
- TypeArgsRAngleLoc),
- FailOnError);
+ Result =
+ applyObjCTypeArgs(*this, Loc, Result, TypeArgs,
+ SourceRange(TypeArgsLAngleLoc, TypeArgsRAngleLoc),
+ FailOnError, Rebuilding);
if (FailOnError && Result.isNull())
return QualType();
}
@@ -1107,11 +1115,10 @@ TypeResult Sema::actOnObjCProtocolQualifierType(
SourceLocation rAngleLoc) {
// Form id<protocol-list>.
QualType Result = Context.getObjCObjectType(
- Context.ObjCBuiltinIdTy, { },
- llvm::makeArrayRef(
- (ObjCProtocolDecl * const *)protocols.data(),
- protocols.size()),
- false);
+ Context.ObjCBuiltinIdTy, {},
+ llvm::ArrayRef((ObjCProtocolDecl *const *)protocols.data(),
+ protocols.size()),
+ false);
Result = Context.getObjCObjectPointerType(Result);
TypeSourceInfo *ResultTInfo = Context.CreateTypeSourceInfo(Result);
@@ -1178,10 +1185,11 @@ TypeResult Sema::actOnObjCTypeArgsAndProtocolQualifiers(
T, BaseTypeInfo->getTypeLoc().getSourceRange().getBegin(),
TypeArgsLAngleLoc, ActualTypeArgInfos, TypeArgsRAngleLoc,
ProtocolLAngleLoc,
- llvm::makeArrayRef((ObjCProtocolDecl * const *)Protocols.data(),
- Protocols.size()),
+ llvm::ArrayRef((ObjCProtocolDecl *const *)Protocols.data(),
+ Protocols.size()),
ProtocolLocs, ProtocolRAngleLoc,
- /*FailOnError=*/false);
+ /*FailOnError=*/false,
+ /*Rebuilding=*/false);
if (Result == T)
return BaseType;
@@ -1257,6 +1265,18 @@ getImageAccess(const ParsedAttributesView &Attrs) {
return OpenCLAccessAttr::Keyword_read_only;
}
+static UnaryTransformType::UTTKind
+TSTToUnaryTransformType(DeclSpec::TST SwitchTST) {
+ switch (SwitchTST) {
+#define TRANSFORM_TYPE_TRAIT_DEF(Enum, Trait) \
+ case TST_##Trait: \
+ return UnaryTransformType::Enum;
+#include "clang/Basic/TransformTypeTraits.def"
+ default:
+ llvm_unreachable("attempted to parse a non-unary transform builtin");
+ }
+}
+
/// Convert the specified declspec to the appropriate type
/// object.
/// \param state Specifies the declarator containing the declaration specifier
@@ -1348,40 +1368,38 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
// allowed to be completely missing a declspec. This is handled in the
// parser already though by it pretending to have seen an 'int' in this
// case.
- if (S.getLangOpts().ImplicitInt) {
- // In C89 mode, we only warn if there is a completely missing declspec
- // when one is not allowed.
- if (DS.isEmpty()) {
- S.Diag(DeclLoc, diag::ext_missing_declspec)
- << DS.getSourceRange()
- << FixItHint::CreateInsertion(DS.getBeginLoc(), "int");
- }
+ if (S.getLangOpts().isImplicitIntRequired()) {
+ S.Diag(DeclLoc, diag::warn_missing_type_specifier)
+ << DS.getSourceRange()
+ << FixItHint::CreateInsertion(DS.getBeginLoc(), "int");
} else if (!DS.hasTypeSpecifier()) {
// C99 and C++ require a type specifier. For example, C99 6.7.2p2 says:
// "At least one type specifier shall be given in the declaration
// specifiers in each declaration, and in the specifier-qualifier list in
// each struct declaration and type name."
- if (S.getLangOpts().CPlusPlus && !DS.isTypeSpecPipe()) {
+ if (!S.getLangOpts().isImplicitIntAllowed() && !DS.isTypeSpecPipe()) {
S.Diag(DeclLoc, diag::err_missing_type_specifier)
- << DS.getSourceRange();
+ << DS.getSourceRange();
- // When this occurs in C++ code, often something is very broken with the
- // value being declared, poison it as invalid so we don't get chains of
+ // When this occurs, often something is very broken with the value
+ // being declared, poison it as invalid so we don't get chains of
// errors.
declarator.setInvalidType(true);
- } else if ((S.getLangOpts().OpenCLVersion >= 200 ||
- S.getLangOpts().OpenCLCPlusPlus) &&
+ } else if (S.getLangOpts().getOpenCLCompatibleVersion() >= 200 &&
DS.isTypeSpecPipe()) {
S.Diag(DeclLoc, diag::err_missing_actual_pipe_type)
- << DS.getSourceRange();
+ << DS.getSourceRange();
declarator.setInvalidType(true);
} else {
+ assert(S.getLangOpts().isImplicitIntAllowed() &&
+ "implicit int is disabled?");
S.Diag(DeclLoc, diag::ext_missing_type_specifier)
- << DS.getSourceRange();
+ << DS.getSourceRange()
+ << FixItHint::CreateInsertion(DS.getBeginLoc(), "int");
}
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case DeclSpec::TST_int: {
if (DS.getTypeSpecSign() != TypeSpecifierSign::Unsigned) {
switch (DS.getTypeSpecWidth()) {
@@ -1436,12 +1454,11 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
}
break;
}
- case DeclSpec::TST_extint: {
- if (!S.Context.getTargetInfo().hasExtIntType())
- S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_unsupported)
- << "_ExtInt";
+ case DeclSpec::TST_bitint: {
+ if (!S.Context.getTargetInfo().hasBitIntType())
+ S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_unsupported) << "_BitInt";
Result =
- S.BuildExtIntType(DS.getTypeSpecSign() == TypeSpecifierSign::Unsigned,
+ S.BuildBitIntType(DS.getTypeSpecSign() == TypeSpecifierSign::Unsigned,
DS.getRepAsExpr(), DS.getBeginLoc());
if (Result.isNull()) {
Result = Context.IntTy;
@@ -1497,8 +1514,8 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
}
case DeclSpec::TST_int128:
if (!S.Context.getTargetInfo().hasInt128Type() &&
- !S.getLangOpts().SYCLIsDevice &&
- !(S.getLangOpts().OpenMP && S.getLangOpts().OpenMPIsDevice))
+ !(S.getLangOpts().SYCLIsDevice || S.getLangOpts().CUDAIsDevice ||
+ (S.getLangOpts().OpenMP && S.getLangOpts().OpenMPIsTargetDevice)))
S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_unsupported)
<< "__int128";
if (DS.getTypeSpecSign() == TypeSpecifierSign::Unsigned)
@@ -1511,16 +1528,17 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
// do not diagnose _Float16 usage to avoid false alarm.
// ToDo: more precise diagnostics for CUDA.
if (!S.Context.getTargetInfo().hasFloat16Type() && !S.getLangOpts().CUDA &&
- !(S.getLangOpts().OpenMP && S.getLangOpts().OpenMPIsDevice))
+ !(S.getLangOpts().OpenMP && S.getLangOpts().OpenMPIsTargetDevice))
S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_unsupported)
<< "_Float16";
Result = Context.Float16Ty;
break;
case DeclSpec::TST_half: Result = Context.HalfTy; break;
case DeclSpec::TST_BFloat16:
- if (!S.Context.getTargetInfo().hasBFloat16Type())
- S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_unsupported)
- << "__bf16";
+ if (!S.Context.getTargetInfo().hasBFloat16Type() &&
+ !(S.getLangOpts().OpenMP && S.getLangOpts().OpenMPIsTargetDevice) &&
+ !S.getLangOpts().SYCLIsDevice)
+ S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_unsupported) << "__bf16";
Result = Context.BFloat16Ty;
break;
case DeclSpec::TST_float: Result = Context.FloatTy; break;
@@ -1533,7 +1551,7 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
if (!S.getOpenCLOptions().isSupported("cl_khr_fp64", S.getLangOpts()))
S.Diag(DS.getTypeSpecTypeLoc(), diag::err_opencl_requires_extension)
<< 0 << Result
- << (S.getLangOpts().OpenCLVersion == 300
+ << (S.getLangOpts().getOpenCLCompatibleVersion() == 300
? "cl_khr_fp64 and __opencl_c_fp64"
: "cl_khr_fp64");
else if (!S.getOpenCLOptions().isAvailableOption("cl_khr_fp64", S.getLangOpts()))
@@ -1543,11 +1561,18 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
case DeclSpec::TST_float128:
if (!S.Context.getTargetInfo().hasFloat128Type() &&
!S.getLangOpts().SYCLIsDevice &&
- !(S.getLangOpts().OpenMP && S.getLangOpts().OpenMPIsDevice))
+ !(S.getLangOpts().OpenMP && S.getLangOpts().OpenMPIsTargetDevice))
S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_unsupported)
<< "__float128";
Result = Context.Float128Ty;
break;
+ case DeclSpec::TST_ibm128:
+ if (!S.Context.getTargetInfo().hasIbm128Type() &&
+ !S.getLangOpts().SYCLIsDevice &&
+ !(S.getLangOpts().OpenMP && S.getLangOpts().OpenMPIsTargetDevice))
+ S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_unsupported) << "__ibm128";
+ Result = Context.Ibm128Ty;
+ break;
case DeclSpec::TST_bool:
Result = Context.BoolTy; // _Bool or bool
break;
@@ -1602,6 +1627,7 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
// TypeQuals handled by caller.
break;
}
+ case DeclSpec::TST_typeof_unqualType:
case DeclSpec::TST_typeofType:
// FIXME: Preserve type source info.
Result = S.GetTypeFromParser(DS.getRepAsType());
@@ -1610,13 +1636,20 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
if (const TagType *TT = Result->getAs<TagType>())
S.DiagnoseUseOfDecl(TT->getDecl(), DS.getTypeSpecTypeLoc());
// TypeQuals handled by caller.
- Result = Context.getTypeOfType(Result);
+ Result = Context.getTypeOfType(
+ Result, DS.getTypeSpecType() == DeclSpec::TST_typeof_unqualType
+ ? TypeOfKind::Unqualified
+ : TypeOfKind::Qualified);
break;
+ case DeclSpec::TST_typeof_unqualExpr:
case DeclSpec::TST_typeofExpr: {
Expr *E = DS.getRepAsExpr();
assert(E && "Didn't get an expression for typeof?");
// TypeQuals handled by caller.
- Result = S.BuildTypeofExprType(E, DS.getTypeSpecTypeLoc());
+ Result = S.BuildTypeofExprType(E, DS.getTypeSpecType() ==
+ DeclSpec::TST_typeof_unqualExpr
+ ? TypeOfKind::Unqualified
+ : TypeOfKind::Qualified);
if (Result.isNull()) {
Result = Context.IntTy;
declarator.setInvalidType(true);
@@ -1627,19 +1660,20 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
Expr *E = DS.getRepAsExpr();
assert(E && "Didn't get an expression for decltype?");
// TypeQuals handled by caller.
- Result = S.BuildDecltypeType(E, DS.getTypeSpecTypeLoc());
+ Result = S.BuildDecltypeType(E);
if (Result.isNull()) {
Result = Context.IntTy;
declarator.setInvalidType(true);
}
break;
}
- case DeclSpec::TST_underlyingType:
+#define TRANSFORM_TYPE_TRAIT_DEF(_, Trait) case DeclSpec::TST_##Trait:
+#include "clang/Basic/TransformTypeTraits.def"
Result = S.GetTypeFromParser(DS.getRepAsType());
- assert(!Result.isNull() && "Didn't get a type for __underlying_type?");
- Result = S.BuildUnaryTransformType(Result,
- UnaryTransformType::EnumUnderlyingType,
- DS.getTypeSpecTypeLoc());
+ assert(!Result.isNull() && "Didn't get a type for the transformation?");
+ Result = S.BuildUnaryTransformType(
+ Result, TSTToUnaryTransformType(DS.getTypeSpecType()),
+ DS.getTypeSpecTypeLoc());
if (Result.isNull()) {
Result = Context.IntTy;
declarator.setInvalidType(true);
@@ -1726,7 +1760,8 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
if (S.getLangOpts().OpenCL) {
const auto &OpenCLOptions = S.getOpenCLOptions();
- bool IsOpenCLC30 = (S.getLangOpts().OpenCLVersion == 300);
+ bool IsOpenCLC30Compatible =
+ S.getLangOpts().getOpenCLCompatibleVersion() == 300;
// OpenCL C v3.0 s6.3.3 - OpenCL image types require __opencl_c_images
// support.
// OpenCL C v3.0 s6.2.1 - OpenCL 3d image write types requires support
@@ -1735,7 +1770,7 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
// that support OpenCL 3.0, cl_khr_3d_image_writes must be returned when and
// only when the optional feature is supported
if ((Result->isImageType() || Result->isSamplerT()) &&
- (IsOpenCLC30 &&
+ (IsOpenCLC30Compatible &&
!OpenCLOptions.isSupported("__opencl_c_images", S.getLangOpts()))) {
S.Diag(DS.getTypeSpecTypeLoc(), diag::err_opencl_requires_extension)
<< 0 << Result << "__opencl_c_images";
@@ -1745,7 +1780,7 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
S.getLangOpts())) {
S.Diag(DS.getTypeSpecTypeLoc(), diag::err_opencl_requires_extension)
<< 0 << Result
- << (IsOpenCLC30
+ << (IsOpenCLC30Compatible
? "cl_khr_3d_image_writes and __opencl_c_3d_image_writes"
: "cl_khr_3d_image_writes");
declarator.setInvalidType();
@@ -1769,11 +1804,11 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
} else if (DS.isTypeAltiVecVector()) {
unsigned typeSize = static_cast<unsigned>(Context.getTypeSize(Result));
assert(typeSize > 0 && "type size for vector must be greater than 0 bits");
- VectorType::VectorKind VecKind = VectorType::AltiVecVector;
+ VectorKind VecKind = VectorKind::AltiVecVector;
if (DS.isTypeAltiVecPixel())
- VecKind = VectorType::AltiVecPixel;
+ VecKind = VectorKind::AltiVecPixel;
else if (DS.isTypeAltiVecBool())
- VecKind = VectorType::AltiVecBool;
+ VecKind = VectorKind::AltiVecBool;
Result = Context.getVectorType(Result, 128/typeSize, VecKind);
}
@@ -1790,8 +1825,42 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
// list of type attributes to be temporarily saved while the type
// attributes are pushed around.
// pipe attributes will be handled later ( at GetFullTypeForDeclarator )
- if (!DS.isTypeSpecPipe())
+ if (!DS.isTypeSpecPipe()) {
+ // We also apply declaration attributes that "slide" to the decl spec.
+ // Ordering can be important for attributes. The decalaration attributes
+ // come syntactically before the decl spec attributes, so we process them
+ // in that order.
+ ParsedAttributesView SlidingAttrs;
+ for (ParsedAttr &AL : declarator.getDeclarationAttributes()) {
+ if (AL.slidesFromDeclToDeclSpecLegacyBehavior()) {
+ SlidingAttrs.addAtEnd(&AL);
+
+ // For standard syntax attributes, which would normally appertain to the
+ // declaration here, suggest moving them to the type instead. But only
+ // do this for our own vendor attributes; moving other vendors'
+ // attributes might hurt portability.
+ // There's one special case that we need to deal with here: The
+ // `MatrixType` attribute may only be used in a typedef declaration. If
+ // it's being used anywhere else, don't output the warning as
+ // ProcessDeclAttributes() will output an error anyway.
+ if (AL.isStandardAttributeSyntax() && AL.isClangScope() &&
+ !(AL.getKind() == ParsedAttr::AT_MatrixType &&
+ DS.getStorageClassSpec() != DeclSpec::SCS_typedef)) {
+ S.Diag(AL.getLoc(), diag::warn_type_attribute_deprecated_on_decl)
+ << AL;
+ }
+ }
+ }
+ // During this call to processTypeAttrs(),
+ // TypeProcessingState::getCurrentAttributes() will erroneously return a
+ // reference to the DeclSpec attributes, rather than the declaration
+ // attributes. However, this doesn't matter, as getCurrentAttributes()
+ // is only called when distributing attributes from one attribute list
+ // to another. Declaration attributes are always C++11 attributes, and these
+ // are never distributed.
+ processTypeAttrs(state, Result, TAL_DeclSpec, SlidingAttrs);
processTypeAttrs(state, Result, TAL_DeclSpec, DS.getAttributes());
+ }
// Apply const/volatile/restrict qualifiers to T.
if (unsigned TypeQuals = DS.getTypeQualifiers()) {
@@ -1867,6 +1936,14 @@ static std::string getPrintableNameForEntity(DeclarationName Entity) {
return "type name";
}
+static bool isDependentOrGNUAutoType(QualType T) {
+ if (T->isDependentType())
+ return true;
+
+ const auto *AT = dyn_cast<AutoType>(T);
+ return AT && AT->isGNUAutoType();
+}
+
QualType Sema::BuildQualifiedType(QualType T, SourceLocation Loc,
Qualifiers Qs, const DeclSpec *DS) {
if (T.isNull())
@@ -1900,7 +1977,10 @@ QualType Sema::BuildQualifiedType(QualType T, SourceLocation Loc,
DiagID = diag::err_typecheck_invalid_restrict_invalid_pointee;
ProblemTy = EltTy;
}
- } else if (!T->isDependentType()) {
+ } else if (!isDependentOrGNUAutoType(T)) {
+ // For an __auto_type variable, we may not have seen the initializer yet
+ // and so have no idea whether the underlying type is a pointer type or
+ // not.
DiagID = diag::err_typecheck_invalid_restrict_not_pointer;
ProblemTy = T;
}
@@ -2085,9 +2165,7 @@ static QualType deduceOpenCLPointeeAddrSpace(Sema &S, QualType PointeeType) {
!PointeeType->isSamplerT() &&
!PointeeType.hasAddressSpace())
PointeeType = S.getASTContext().getAddrSpaceQualType(
- PointeeType, S.getLangOpts().OpenCLGenericAddressSpace
- ? LangAS::opencl_generic
- : LangAS::opencl_private);
+ PointeeType, S.getASTContext().getDefaultOpenCLPointeeAddrSpace());
return PointeeType;
}
@@ -2120,6 +2198,11 @@ QualType Sema::BuildPointerType(QualType T,
return QualType();
}
+ if (getLangOpts().HLSL && Loc.isValid()) {
+ Diag(Loc, diag::err_hlsl_pointers_unsupported) << 0;
+ return QualType();
+ }
+
if (checkQualifiedFunction(*this, T, Loc, QFK_Pointer))
return QualType();
@@ -2132,6 +2215,21 @@ QualType Sema::BuildPointerType(QualType T,
if (getLangOpts().OpenCL)
T = deduceOpenCLPointeeAddrSpace(*this, T);
+ // In WebAssembly, pointers to reference types and pointers to tables are
+ // illegal.
+ if (getASTContext().getTargetInfo().getTriple().isWasm()) {
+ if (T.isWebAssemblyReferenceType()) {
+ Diag(Loc, diag::err_wasm_reference_pr) << 0;
+ return QualType();
+ }
+
+ // We need to desugar the type here in case T is a ParenType.
+ if (T->getUnqualifiedDesugaredType()->isWebAssemblyTableType()) {
+ Diag(Loc, diag::err_wasm_table_pr) << 0;
+ return QualType();
+ }
+ }
+
// Build the pointer type.
return Context.getPointerType(T);
}
@@ -2185,6 +2283,11 @@ QualType Sema::BuildReferenceType(QualType T, bool SpelledAsLValue,
return QualType();
}
+ if (getLangOpts().HLSL && Loc.isValid()) {
+ Diag(Loc, diag::err_hlsl_pointers_unsupported) << 1;
+ return QualType();
+ }
+
if (checkQualifiedFunction(*this, T, Loc, QFK_Reference))
return QualType();
@@ -2202,6 +2305,17 @@ QualType Sema::BuildReferenceType(QualType T, bool SpelledAsLValue,
if (getLangOpts().OpenCL)
T = deduceOpenCLPointeeAddrSpace(*this, T);
+ // In WebAssembly, references to reference types and tables are illegal.
+ if (getASTContext().getTargetInfo().getTriple().isWasm() &&
+ T.isWebAssemblyReferenceType()) {
+ Diag(Loc, diag::err_wasm_reference_pr) << 1;
+ return QualType();
+ }
+ if (T->isWebAssemblyTableType()) {
+ Diag(Loc, diag::err_wasm_table_pr) << 1;
+ return QualType();
+ }
+
// Handle restrict on references.
if (LValueRef)
return Context.getLValueReferenceType(T, SpelledAsLValue);
@@ -2232,7 +2346,7 @@ QualType Sema::BuildWritePipeType(QualType T, SourceLocation Loc) {
return Context.getWritePipeType(T);
}
-/// Build a extended int type.
+/// Build a bit-precise integer type.
///
/// \param IsUnsigned Boolean representing the signedness of the type.
///
@@ -2240,10 +2354,10 @@ QualType Sema::BuildWritePipeType(QualType T, SourceLocation Loc) {
/// that.
///
/// \param Loc Location of the keyword.
-QualType Sema::BuildExtIntType(bool IsUnsigned, Expr *BitWidth,
+QualType Sema::BuildBitIntType(bool IsUnsigned, Expr *BitWidth,
SourceLocation Loc) {
if (BitWidth->isInstantiationDependent())
- return Context.getDependentExtIntType(IsUnsigned, BitWidth);
+ return Context.getDependentBitIntType(IsUnsigned, BitWidth);
llvm::APSInt Bits(32);
ExprResult ICE =
@@ -2252,24 +2366,25 @@ QualType Sema::BuildExtIntType(bool IsUnsigned, Expr *BitWidth,
if (ICE.isInvalid())
return QualType();
- int64_t NumBits = Bits.getSExtValue();
+ size_t NumBits = Bits.getZExtValue();
if (!IsUnsigned && NumBits < 2) {
- Diag(Loc, diag::err_ext_int_bad_size) << 0;
+ Diag(Loc, diag::err_bit_int_bad_size) << 0;
return QualType();
}
if (IsUnsigned && NumBits < 1) {
- Diag(Loc, diag::err_ext_int_bad_size) << 1;
+ Diag(Loc, diag::err_bit_int_bad_size) << 1;
return QualType();
}
- if (NumBits > llvm::IntegerType::MAX_INT_BITS) {
- Diag(Loc, diag::err_ext_int_max_size) << IsUnsigned
- << llvm::IntegerType::MAX_INT_BITS;
+ const TargetInfo &TI = getASTContext().getTargetInfo();
+ if (NumBits > TI.getMaxBitIntWidth()) {
+ Diag(Loc, diag::err_bit_int_max_size)
+ << IsUnsigned << static_cast<uint64_t>(TI.getMaxBitIntWidth());
return QualType();
}
- return Context.getExtIntType(IsUnsigned, NumBits);
+ return Context.getBitIntType(IsUnsigned, NumBits);
}
/// Check whether the specified array bound can be evaluated using the relevant
@@ -2330,6 +2445,23 @@ static ExprResult checkArraySize(Sema &S, Expr *&ArraySize,
return R;
}
+bool Sema::checkArrayElementAlignment(QualType EltTy, SourceLocation Loc) {
+ EltTy = Context.getBaseElementType(EltTy);
+ if (EltTy->isIncompleteType() || EltTy->isDependentType() ||
+ EltTy->isUndeducedType())
+ return true;
+
+ CharUnits Size = Context.getTypeSizeInChars(EltTy);
+ CharUnits Alignment = Context.getTypeAlignInChars(EltTy);
+
+ if (Size.isMultipleOf(Alignment))
+ return true;
+
+ Diag(Loc, diag::err_array_element_alignment)
+ << EltTy << Size.getQuantity() << Alignment.getQuantity();
+ return false;
+}
+
/// Build an array type.
///
/// \param T The type of each element in the array.
@@ -2345,7 +2477,7 @@ static ExprResult checkArraySize(Sema &S, Expr *&ArraySize,
///
/// \returns A suitable array type, if there are no errors. Otherwise,
/// returns a NULL type.
-QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
+QualType Sema::BuildArrayType(QualType T, ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity) {
@@ -2387,12 +2519,22 @@ QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
} else {
// C99 6.7.5.2p1: If the element type is an incomplete or function type,
// reject it (e.g. void ary[7], struct foo ary[7], void ary[7]())
- if (RequireCompleteSizedType(Loc, T,
+ if (!T.isWebAssemblyReferenceType() &&
+ RequireCompleteSizedType(Loc, T,
diag::err_array_incomplete_or_sizeless_type))
return QualType();
}
- if (T->isSizelessType()) {
+ // Multi-dimensional arrays of WebAssembly references are not allowed.
+ if (Context.getTargetInfo().getTriple().isWasm() && T->isArrayType()) {
+ const auto *ATy = dyn_cast<ArrayType>(T);
+ if (ATy && ATy->getElementType().isWebAssemblyReferenceType()) {
+ Diag(Loc, diag::err_wasm_reftype_multidimensional_array);
+ return QualType();
+ }
+ }
+
+ if (T->isSizelessType() && !T.isWebAssemblyReferenceType()) {
Diag(Loc, diag::err_array_incomplete_or_sizeless_type) << 1 << T;
return QualType();
}
@@ -2413,6 +2555,9 @@ QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
return QualType();
}
+ if (!checkArrayElementAlignment(T, Loc))
+ return QualType();
+
// Do placeholder conversions on the array size expression.
if (ArraySize && ArraySize->hasPlaceholderType()) {
ExprResult Result = CheckPlaceholderExpr(ArraySize);
@@ -2439,6 +2584,27 @@ QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
return QualType();
}
+ auto IsStaticAssertLike = [](const Expr *ArraySize, ASTContext &Context) {
+ if (!ArraySize)
+ return false;
+
+ // If the array size expression is a conditional expression whose branches
+ // are both integer constant expressions, one negative and one positive,
+ // then it's assumed to be like an old-style static assertion. e.g.,
+ // int old_style_assert[expr ? 1 : -1];
+ // We will accept any integer constant expressions instead of assuming the
+ // values 1 and -1 are always used.
+ if (const auto *CondExpr = dyn_cast_if_present<ConditionalOperator>(
+ ArraySize->IgnoreParenImpCasts())) {
+ std::optional<llvm::APSInt> LHS =
+ CondExpr->getLHS()->getIntegerConstantExpr(Context);
+ std::optional<llvm::APSInt> RHS =
+ CondExpr->getRHS()->getIntegerConstantExpr(Context);
+ return LHS && RHS && LHS->isNegative() != RHS->isNegative();
+ }
+ return false;
+ };
+
// VLAs always produce at least a -Wvla diagnostic, sometimes an error.
unsigned VLADiag;
bool VLAIsError;
@@ -2452,6 +2618,18 @@ QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
} else if (isSFINAEContext()) {
VLADiag = diag::err_vla_in_sfinae;
VLAIsError = true;
+ } else if (getLangOpts().OpenMP && isInOpenMPTaskUntiedContext()) {
+ VLADiag = diag::err_openmp_vla_in_task_untied;
+ VLAIsError = true;
+ } else if (getLangOpts().CPlusPlus) {
+ if (getLangOpts().CPlusPlus11 && IsStaticAssertLike(ArraySize, Context))
+ VLADiag = getLangOpts().GNUMode
+ ? diag::ext_vla_cxx_in_gnu_mode_static_assert
+ : diag::ext_vla_cxx_static_assert;
+ else
+ VLADiag = getLangOpts().GNUMode ? diag::ext_vla_cxx_in_gnu_mode
+ : diag::ext_vla_cxx;
+ VLAIsError = false;
} else {
VLADiag = diag::ext_vla;
VLAIsError = false;
@@ -2459,7 +2637,7 @@ QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
llvm::APSInt ConstVal(Context.getTypeSize(Context.getSizeType()));
if (!ArraySize) {
- if (ASM == ArrayType::Star) {
+ if (ASM == ArraySizeModifier::Star) {
Diag(Loc, VLADiag);
if (VLAIsError)
return QualType();
@@ -2505,13 +2683,13 @@ QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
<< ArraySize->getSourceRange();
return QualType();
}
- if (ConstVal == 0) {
+ if (ConstVal == 0 && !T.isWebAssemblyReferenceType()) {
// GCC accepts zero sized static arrays. We allow them when
// we're not in a SFINAE context.
Diag(ArraySize->getBeginLoc(),
isSFINAEContext() ? diag::err_typecheck_zero_array_size
: diag::ext_typecheck_zero_array_size)
- << ArraySize->getSourceRange();
+ << 0 << ArraySize->getSourceRange();
}
// Is the array too large?
@@ -2530,22 +2708,26 @@ QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
}
}
- if (T->isVariableArrayType() && !Context.getTargetInfo().isVLASupported()) {
- // CUDA device code and some other targets don't support VLAs.
- targetDiag(Loc, (getLangOpts().CUDA && getLangOpts().CUDAIsDevice)
- ? diag::err_cuda_vla
- : diag::err_vla_unsupported)
- << ((getLangOpts().CUDA && getLangOpts().CUDAIsDevice)
- ? CurrentCUDATarget()
- : CFT_InvalidTarget);
+ if (T->isVariableArrayType()) {
+ if (!Context.getTargetInfo().isVLASupported()) {
+ // CUDA device code and some other targets don't support VLAs.
+ bool IsCUDADevice = (getLangOpts().CUDA && getLangOpts().CUDAIsDevice);
+ targetDiag(Loc,
+ IsCUDADevice ? diag::err_cuda_vla : diag::err_vla_unsupported)
+ << (IsCUDADevice ? CurrentCUDATarget() : 0);
+ } else if (sema::FunctionScopeInfo *FSI = getCurFunction()) {
+ // VLAs are supported on this target, but we may need to do delayed
+ // checking that the VLA is not being used within a coroutine.
+ FSI->setHasVLA(Loc);
+ }
}
// If this is not C99, diagnose array size modifiers on non-VLAs.
if (!getLangOpts().C99 && !T->isVariableArrayType() &&
- (ASM != ArrayType::Normal || Quals != 0)) {
+ (ASM != ArraySizeModifier::Normal || Quals != 0)) {
Diag(Loc, getLangOpts().CPlusPlus ? diag::err_c99_array_usage_cxx
: diag::ext_c99_array_usage)
- << ASM;
+ << llvm::to_underlying(ASM);
}
// OpenCL v2.0 s6.12.5 - Arrays of blocks are not supported.
@@ -2569,17 +2751,28 @@ QualType Sema::BuildVectorType(QualType CurType, Expr *SizeExpr,
// can't already be a vector.
if ((!CurType->isDependentType() &&
(!CurType->isBuiltinType() || CurType->isBooleanType() ||
- (!CurType->isIntegerType() && !CurType->isRealFloatingType()))) ||
+ (!CurType->isIntegerType() && !CurType->isRealFloatingType())) &&
+ !CurType->isBitIntType()) ||
CurType->isArrayType()) {
Diag(AttrLoc, diag::err_attribute_invalid_vector_type) << CurType;
return QualType();
}
+ // Only support _BitInt elements with byte-sized power of 2 NumBits.
+ if (const auto *BIT = CurType->getAs<BitIntType>()) {
+ unsigned NumBits = BIT->getNumBits();
+ if (!llvm::isPowerOf2_32(NumBits) || NumBits < 8) {
+ Diag(AttrLoc, diag::err_attribute_invalid_bitint_vector_type)
+ << (NumBits < 8);
+ return QualType();
+ }
+ }
if (SizeExpr->isTypeDependent() || SizeExpr->isValueDependent())
return Context.getDependentVectorType(CurType, SizeExpr, AttrLoc,
- VectorType::GenericVector);
+ VectorKind::Generic);
- Optional<llvm::APSInt> VecSize = SizeExpr->getIntegerConstantExpr(Context);
+ std::optional<llvm::APSInt> VecSize =
+ SizeExpr->getIntegerConstantExpr(Context);
if (!VecSize) {
Diag(AttrLoc, diag::err_attribute_argument_type)
<< "vector_size" << AANT_ArgumentIntegerConstant
@@ -2589,7 +2782,7 @@ QualType Sema::BuildVectorType(QualType CurType, Expr *SizeExpr,
if (CurType->isDependentType())
return Context.getDependentVectorType(CurType, SizeExpr, AttrLoc,
- VectorType::GenericVector);
+ VectorKind::Generic);
// vecSize is specified in bytes - convert to bits.
if (!VecSize->isIntN(61)) {
@@ -2607,7 +2800,7 @@ QualType Sema::BuildVectorType(QualType CurType, Expr *SizeExpr,
return QualType();
}
- if (VectorSizeBits % TypeSize) {
+ if (!TypeSize || VectorSizeBits % TypeSize) {
Diag(AttrLoc, diag::err_attribute_invalid_size)
<< SizeExpr->getSourceRange();
return QualType();
@@ -2620,7 +2813,7 @@ QualType Sema::BuildVectorType(QualType CurType, Expr *SizeExpr,
}
return Context.getVectorType(CurType, VectorSizeBits / TypeSize,
- VectorType::GenericVector);
+ VectorKind::Generic);
}
/// Build an ext-vector type.
@@ -2635,15 +2828,29 @@ QualType Sema::BuildExtVectorType(QualType T, Expr *ArraySize,
// reserved data type under OpenCL v2.0 s6.1.4), we don't support selects
// on bitvectors, and we have no well-defined ABI for bitvectors, so vectors
// of bool aren't allowed.
+ //
+ // We explictly allow bool elements in ext_vector_type for C/C++.
+ bool IsNoBoolVecLang = getLangOpts().OpenCL || getLangOpts().OpenCLCPlusPlus;
if ((!T->isDependentType() && !T->isIntegerType() &&
!T->isRealFloatingType()) ||
- T->isBooleanType()) {
+ (IsNoBoolVecLang && T->isBooleanType())) {
Diag(AttrLoc, diag::err_attribute_invalid_vector_type) << T;
return QualType();
}
+ // Only support _BitInt elements with byte-sized power of 2 NumBits.
+ if (T->isBitIntType()) {
+ unsigned NumBits = T->castAs<BitIntType>()->getNumBits();
+ if (!llvm::isPowerOf2_32(NumBits) || NumBits < 8) {
+ Diag(AttrLoc, diag::err_attribute_invalid_bitint_vector_type)
+ << (NumBits < 8);
+ return QualType();
+ }
+ }
+
if (!ArraySize->isTypeDependent() && !ArraySize->isValueDependent()) {
- Optional<llvm::APSInt> vecSize = ArraySize->getIntegerConstantExpr(Context);
+ std::optional<llvm::APSInt> vecSize =
+ ArraySize->getIntegerConstantExpr(Context);
if (!vecSize) {
Diag(AttrLoc, diag::err_attribute_argument_type)
<< "ext_vector_type" << AANT_ArgumentIntegerConstant
@@ -2689,8 +2896,9 @@ QualType Sema::BuildMatrixType(QualType ElementTy, Expr *NumRows, Expr *NumCols,
return Context.getDependentSizedMatrixType(ElementTy, NumRows, NumCols,
AttrLoc);
- Optional<llvm::APSInt> ValueRows = NumRows->getIntegerConstantExpr(Context);
- Optional<llvm::APSInt> ValueColumns =
+ std::optional<llvm::APSInt> ValueRows =
+ NumRows->getIntegerConstantExpr(Context);
+ std::optional<llvm::APSInt> ValueColumns =
NumCols->getIntegerConstantExpr(Context);
auto const RowRange = NumRows->getSourceRange();
@@ -2755,7 +2963,8 @@ bool Sema::CheckFunctionReturnType(QualType T, SourceLocation Loc) {
}
// Functions cannot return half FP.
- if (T->isHalfType() && !getLangOpts().HalfArgsAndReturns) {
+ if (T->isHalfType() && !getLangOpts().NativeHalfArgsAndReturns &&
+ !Context.getTargetInfo().allowHalfArgsAndReturns()) {
Diag(Loc, diag::err_parameters_retval_cannot_have_fp16_type) << 1 <<
FixItHint::CreateInsertion(Loc, "*");
return true;
@@ -2779,6 +2988,8 @@ bool Sema::CheckFunctionReturnType(QualType T, SourceLocation Loc) {
if (T.isVolatileQualified() && getLangOpts().CPlusPlus20)
Diag(Loc, diag::warn_deprecated_volatile_return) << T;
+ if (T.getAddressSpace() != LangAS::Default && getLangOpts().HLSL)
+ return true;
return false;
}
@@ -2861,11 +3072,15 @@ QualType Sema::BuildFunctionType(QualType T,
if (ParamType->isVoidType()) {
Diag(Loc, diag::err_param_with_void_type);
Invalid = true;
- } else if (ParamType->isHalfType() && !getLangOpts().HalfArgsAndReturns) {
+ } else if (ParamType->isHalfType() && !getLangOpts().NativeHalfArgsAndReturns &&
+ !Context.getTargetInfo().allowHalfArgsAndReturns()) {
// Disallow half FP arguments.
Diag(Loc, diag::err_parameters_retval_cannot_have_fp16_type) << 0 <<
FixItHint::CreateInsertion(Loc, "*");
Invalid = true;
+ } else if (ParamType->isWebAssemblyTableType()) {
+ Diag(Loc, diag::err_wasm_table_as_function_parameter);
+ Invalid = true;
}
// C++2a [dcl.fct]p4:
@@ -2937,13 +3152,18 @@ QualType Sema::BuildMemberPointerType(QualType T, QualType Class,
return QualType();
}
+ if (getLangOpts().HLSL && Loc.isValid()) {
+ Diag(Loc, diag::err_hlsl_pointers_unsupported) << 0;
+ return QualType();
+ }
+
// Adjust the default free function calling convention to the default method
// calling convention.
bool IsCtorOrDtor =
(Entity.getNameKind() == DeclarationName::CXXConstructorName) ||
(Entity.getNameKind() == DeclarationName::CXXDestructorName);
if (T->isFunctionType())
- adjustMemberFunctionCC(T, /*IsStatic=*/false, IsCtorOrDtor, Loc);
+ adjustMemberFunctionCC(T, /*HasThisPointer=*/true, IsCtorOrDtor, Loc);
return Context.getMemberPointerType(T, Class.getTypePtr());
}
@@ -3367,8 +3587,11 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
break;
}
- if (!D.getAttributes().empty())
- distributeTypeAttrsFromDeclarator(state, T);
+ // Note: We don't need to distribute declaration attributes (i.e.
+ // D.getDeclarationAttributes()) because those are always C++11 attributes,
+ // and those don't get distributed.
+ distributeTypeAttrsFromDeclarator(
+ state, T, SemaRef.IdentifyCUDATarget(D.getAttributes()));
// Find the deduced type in this type. Look in the trailing return type if we
// have one, otherwise in the DeclSpec type.
@@ -3446,11 +3669,20 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
Error = 6; // Interface member.
} else {
switch (cast<TagDecl>(SemaRef.CurContext)->getTagKind()) {
- case TTK_Enum: llvm_unreachable("unhandled tag kind");
- case TTK_Struct: Error = Cxx ? 1 : 2; /* Struct member */ break;
- case TTK_Union: Error = Cxx ? 3 : 4; /* Union member */ break;
- case TTK_Class: Error = 5; /* Class member */ break;
- case TTK_Interface: Error = 6; /* Interface member */ break;
+ case TagTypeKind::Enum:
+ llvm_unreachable("unhandled tag kind");
+ case TagTypeKind::Struct:
+ Error = Cxx ? 1 : 2; /* Struct member */
+ break;
+ case TagTypeKind::Union:
+ Error = Cxx ? 3 : 4; /* Union member */
+ break;
+ case TagTypeKind::Class:
+ Error = 5; /* Class member */
+ break;
+ case TagTypeKind::Interface:
+ Error = 6; /* Interface member */
+ break;
}
}
if (D.getDeclSpec().isFriendSpecified())
@@ -3478,7 +3710,7 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
!D.getNumTypeObjects() &&
D.getDeclSpec().getParsedSpecifiers() == DeclSpec::PQ_TypeSpecifier)
break;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case DeclaratorContext::TemplateTypeArg:
Error = 10; // Template type argument
break;
@@ -3500,8 +3732,12 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
case DeclaratorContext::FunctionalCast:
if (isa<DeducedTemplateSpecializationType>(Deduced))
break;
- LLVM_FALLTHROUGH;
+ if (SemaRef.getLangOpts().CPlusPlus23 && IsCXXAutoType &&
+ !Auto->isDecltypeAuto())
+ break; // auto(x)
+ [[fallthrough]];
case DeclaratorContext::TypeName:
+ case DeclaratorContext::Association:
Error = 15; // Generic
break;
case DeclaratorContext::File:
@@ -3558,7 +3794,7 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
<< Kind << Error << (int)SemaRef.getTemplateNameKindForDiagnostics(TN)
<< QualType(Deduced, 0) << AutoRange;
if (auto *TD = TN.getAsTemplateDecl())
- SemaRef.Diag(TD->getLocation(), diag::note_template_decl_here);
+ SemaRef.NoteTemplateLocation(*TD);
T = SemaRef.Context.IntTy;
D.setInvalidType(true);
@@ -3612,6 +3848,7 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
case DeclaratorContext::ObjCCatch:
case DeclaratorContext::TemplateArg:
case DeclaratorContext::TemplateTypeArg:
+ case DeclaratorContext::Association:
DiagID = diag::err_type_defined_in_type_specifier;
break;
case DeclaratorContext::Prototype:
@@ -3767,7 +4004,7 @@ static void warnAboutRedundantParens(Sema &S, Declarator &D, QualType T) {
case DeclaratorChunk::Paren:
if (&C == &Paren)
continue;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case DeclaratorChunk::Pointer:
StartsWithDeclaratorId = false;
continue;
@@ -3874,7 +4111,8 @@ static CallingConv getCCForDeclaratorChunk(
// function type. We'll diagnose the failure to apply them in
// handleFunctionTypeAttr.
CallingConv CC;
- if (!S.CheckCallingConvAttr(AL, CC) &&
+ if (!S.CheckCallingConvAttr(AL, CC, /*FunctionDecl=*/nullptr,
+ S.IdentifyCUDATarget(D.getAttributes())) &&
(!FTI.isVariadic || supportsVariadicCall(CC))) {
return CC;
}
@@ -3907,8 +4145,9 @@ static CallingConv getCCForDeclaratorChunk(
D.getTypeObject(I).Kind == DeclaratorChunk::MemberPointer;
} else if (D.getContext() == DeclaratorContext::LambdaExpr) {
// This can only be a call operator for a lambda, which is an instance
- // method.
- IsCXXInstanceMethod = true;
+ // method, unless explicitly specified as 'static'.
+ IsCXXInstanceMethod =
+ D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_static;
} else {
// We're the innermost decl chunk, so must be a function declarator.
assert(D.isFunctionDeclarator());
@@ -3936,6 +4175,20 @@ static CallingConv getCCForDeclaratorChunk(
break;
}
}
+ } else if (S.getLangOpts().CUDA) {
+ // If we're compiling CUDA/HIP code and targeting SPIR-V we need to make
+ // sure the kernels will be marked with the right calling convention so that
+ // they will be visible by the APIs that ingest SPIR-V.
+ llvm::Triple Triple = S.Context.getTargetInfo().getTriple();
+ if (Triple.getArch() == llvm::Triple::spirv32 ||
+ Triple.getArch() == llvm::Triple::spirv64) {
+ for (const ParsedAttr &AL : D.getDeclSpec().getAttributes()) {
+ if (AL.getKind() == ParsedAttr::AT_CUDAGlobal) {
+ CC = CC_OpenCLKernel;
+ break;
+ }
+ }
+ }
}
return CC;
@@ -4171,7 +4424,7 @@ bool Sema::isCFError(RecordDecl *RD) {
// NSError. CFErrorRef used to be declared with "objc_bridge" but is now
// declared with "objc_bridge_mutable", so look for either one of the two
// attributes.
- if (RD->getTagKind() == TTK_Struct) {
+ if (RD->getTagKind() == TagTypeKind::Struct) {
IdentifierInfo *bridgedType = nullptr;
if (auto bridgeAttr = RD->getAttr<ObjCBridgeAttr>())
bridgedType = bridgeAttr->getBridgedType();
@@ -4254,8 +4507,8 @@ static void fixItNullability(Sema &S, DiagBuilderT &Diag,
InsertionText = InsertionText.drop_back().drop_front();
else
InsertionText = InsertionText.drop_front();
- } else if (!isIdentifierBody(NextChar[0], /*allow dollar*/true) &&
- !isIdentifierBody(NextChar[-1], /*allow dollar*/true)) {
+ } else if (!isAsciiIdentifierContinue(NextChar[0], /*allow dollar*/ true) &&
+ !isAsciiIdentifierContinue(NextChar[-1], /*allow dollar*/ true)) {
InsertionText = InsertionText.drop_back().drop_front();
}
@@ -4390,7 +4643,7 @@ static bool hasOuterPointerLikeChunk(const Declarator &D, unsigned endIndex) {
return false;
}
-static bool IsNoDerefableChunk(DeclaratorChunk Chunk) {
+static bool IsNoDerefableChunk(const DeclaratorChunk &Chunk) {
return (Chunk.Kind == DeclaratorChunk::Pointer ||
Chunk.Kind == DeclaratorChunk::Array);
}
@@ -4523,7 +4776,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
}
// Determine whether we should infer _Nonnull on pointer types.
- Optional<NullabilityKind> inferNullability;
+ std::optional<NullabilityKind> inferNullability;
bool inferNullabilityCS = false;
bool inferNullabilityInnerOnly = false;
bool inferNullabilityInnerOnlyComplete = false;
@@ -4556,8 +4809,8 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// inner pointers.
complainAboutMissingNullability = CAMN_InnerPointers;
- if (T->canHaveNullability(/*ResultIfUnknown*/false) &&
- !T->getNullability(S.Context)) {
+ if (T->canHaveNullability(/*ResultIfUnknown*/ false) &&
+ !T->getNullability()) {
// Note that we allow but don't require nullability on dependent types.
++NumPointersRemaining;
}
@@ -4593,7 +4846,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
case DeclaratorContext::TrailingReturn:
case DeclaratorContext::TrailingReturnVar:
isFunctionOrMethod = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case DeclaratorContext::Member:
if (state.getDeclarator().isObjCIvar() && !isFunctionOrMethod) {
@@ -4602,12 +4855,17 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
}
// Weak properties are inferred to be nullable.
- if (state.getDeclarator().isObjCWeakProperty() && inAssumeNonNullRegion) {
- inferNullability = NullabilityKind::Nullable;
+ if (state.getDeclarator().isObjCWeakProperty()) {
+ // Weak properties cannot be nonnull, and should not complain about
+ // missing nullable attributes during completeness checks.
+ complainAboutMissingNullability = CAMN_No;
+ if (inAssumeNonNullRegion) {
+ inferNullability = NullabilityKind::Nullable;
+ }
break;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case DeclaratorContext::File:
case DeclaratorContext::KNRTypeList: {
@@ -4650,7 +4908,8 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
AttrList.hasAttribute(ParsedAttr::AT_CFReturnsNotRetained);
};
if (const auto *InnermostChunk = D.getInnermostNonParenChunk()) {
- if (hasCFReturnsAttr(D.getAttributes()) ||
+ if (hasCFReturnsAttr(D.getDeclarationAttributes()) ||
+ hasCFReturnsAttr(D.getAttributes()) ||
hasCFReturnsAttr(InnermostChunk->getAttrs()) ||
hasCFReturnsAttr(D.getDeclSpec().getAttributes())) {
inferNullability = NullabilityKind::Nullable;
@@ -4685,6 +4944,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
case DeclaratorContext::TypeName:
case DeclaratorContext::FunctionalCast:
case DeclaratorContext::RequiresExpr:
+ case DeclaratorContext::Association:
// Don't infer in these contexts.
break;
}
@@ -4723,12 +4983,14 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// If we're supposed to infer nullability, do so now.
if (inferNullability && !inferNullabilityInnerOnlyComplete) {
- ParsedAttr::Syntax syntax = inferNullabilityCS
- ? ParsedAttr::AS_ContextSensitiveKeyword
- : ParsedAttr::AS_Keyword;
+ ParsedAttr::Form form =
+ inferNullabilityCS
+ ? ParsedAttr::Form::ContextSensitiveKeyword()
+ : ParsedAttr::Form::Keyword(false /*IsAlignAs*/,
+ false /*IsRegularKeywordAttribute*/);
ParsedAttr *nullabilityAttr = Pool.create(
S.getNullabilityKeyword(*inferNullability), SourceRange(pointerLoc),
- nullptr, SourceLocation(), nullptr, 0, syntax);
+ nullptr, SourceLocation(), nullptr, 0, form);
attrs.addAtEnd(nullabilityAttr);
@@ -4760,7 +5022,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
case CAMN_InnerPointers:
if (NumPointersRemaining == 0)
break;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case CAMN_Yes:
checkNullabilityConsistency(S, pointerKind, pointerLoc, pointerEndLoc);
@@ -4771,8 +5033,8 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// If the type itself could have nullability but does not, infer pointer
// nullability and perform consistency checking.
if (S.CodeSynthesisContexts.empty()) {
- if (T->canHaveNullability(/*ResultIfUnknown*/false) &&
- !T->getNullability(S.Context)) {
+ if (T->canHaveNullability(/*ResultIfUnknown*/ false) &&
+ !T->getNullability()) {
if (isVaList(T)) {
// Record that we've seen a pointer, but do nothing else.
if (NumPointersRemaining > 0)
@@ -4795,9 +5057,8 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
}
}
- if (complainAboutMissingNullability == CAMN_Yes &&
- T->isArrayType() && !T->getNullability(S.Context) && !isVaList(T) &&
- D.isPrototypeContext() &&
+ if (complainAboutMissingNullability == CAMN_Yes && T->isArrayType() &&
+ !T->getNullability() && !isVaList(T) && D.isPrototypeContext() &&
!hasOuterPointerLikeChunk(D, D.getNumTypeObjects())) {
checkNullabilityConsistency(S, SimplePointerKind::Array,
D.getDeclSpec().getTypeSpecTypeLoc());
@@ -4810,6 +5071,12 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// Walk the DeclTypeInfo, building the recursive type as we go.
// DeclTypeInfos are ordered from the identifier out, which is
// opposite of what we want :).
+
+ // Track if the produced type matches the structure of the declarator.
+ // This is used later to decide if we can fill `TypeLoc` from
+ // `DeclaratorChunk`s. E.g. it must be false if Clang recovers from
+ // an error by replacing the type with `int`.
+ bool AreDeclaratorChunksValid = true;
for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i) {
unsigned chunkIndex = e - i - 1;
state.setCurrentChunkIndex(chunkIndex);
@@ -4900,33 +5167,47 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
}
DeclaratorChunk::ArrayTypeInfo &ATI = DeclType.Arr;
Expr *ArraySize = static_cast<Expr*>(ATI.NumElts);
- ArrayType::ArraySizeModifier ASM;
+ ArraySizeModifier ASM;
+
+ // Microsoft property fields can have multiple sizeless array chunks
+ // (i.e. int x[][][]). Skip all of these except one to avoid creating
+ // bad incomplete array types.
+ if (chunkIndex != 0 && !ArraySize &&
+ D.getDeclSpec().getAttributes().hasMSPropertyAttr()) {
+ // This is a sizeless chunk. If the next is also, skip this one.
+ DeclaratorChunk &NextDeclType = D.getTypeObject(chunkIndex - 1);
+ if (NextDeclType.Kind == DeclaratorChunk::Array &&
+ !NextDeclType.Arr.NumElts)
+ break;
+ }
+
if (ATI.isStar)
- ASM = ArrayType::Star;
+ ASM = ArraySizeModifier::Star;
else if (ATI.hasStatic)
- ASM = ArrayType::Static;
+ ASM = ArraySizeModifier::Static;
else
- ASM = ArrayType::Normal;
- if (ASM == ArrayType::Star && !D.isPrototypeContext()) {
+ ASM = ArraySizeModifier::Normal;
+ if (ASM == ArraySizeModifier::Star && !D.isPrototypeContext()) {
// FIXME: This check isn't quite right: it allows star in prototypes
// for function definitions, and disallows some edge cases detailed
// in http://gcc.gnu.org/ml/gcc-patches/2009-02/msg00133.html
S.Diag(DeclType.Loc, diag::err_array_star_outside_prototype);
- ASM = ArrayType::Normal;
+ ASM = ArraySizeModifier::Normal;
D.setInvalidType(true);
}
// C99 6.7.5.2p1: The optional type qualifiers and the keyword static
// shall appear only in a declaration of a function parameter with an
// array type, ...
- if (ASM == ArrayType::Static || ATI.TypeQuals) {
+ if (ASM == ArraySizeModifier::Static || ATI.TypeQuals) {
if (!(D.isPrototypeContext() ||
D.getContext() == DeclaratorContext::KNRTypeList)) {
- S.Diag(DeclType.Loc, diag::err_array_static_outside_prototype) <<
- (ASM == ArrayType::Static ? "'static'" : "type qualifier");
+ S.Diag(DeclType.Loc, diag::err_array_static_outside_prototype)
+ << (ASM == ArraySizeModifier::Static ? "'static'"
+ : "type qualifier");
// Remove the 'static' and the type qualifiers.
- if (ASM == ArrayType::Static)
- ASM = ArrayType::Normal;
+ if (ASM == ArraySizeModifier::Static)
+ ASM = ArraySizeModifier::Normal;
ATI.TypeQuals = 0;
D.setInvalidType(true);
}
@@ -4934,32 +5215,21 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// C99 6.7.5.2p1: ... and then only in the outermost array type
// derivation.
if (hasOuterPointerLikeChunk(D, chunkIndex)) {
- S.Diag(DeclType.Loc, diag::err_array_static_not_outermost) <<
- (ASM == ArrayType::Static ? "'static'" : "type qualifier");
- if (ASM == ArrayType::Static)
- ASM = ArrayType::Normal;
+ S.Diag(DeclType.Loc, diag::err_array_static_not_outermost)
+ << (ASM == ArraySizeModifier::Static ? "'static'"
+ : "type qualifier");
+ if (ASM == ArraySizeModifier::Static)
+ ASM = ArraySizeModifier::Normal;
ATI.TypeQuals = 0;
D.setInvalidType(true);
}
}
- const AutoType *AT = T->getContainedAutoType();
- // Allow arrays of auto if we are a generic lambda parameter.
- // i.e. [](auto (&array)[5]) { return array[0]; }; OK
- if (AT && D.getContext() != DeclaratorContext::LambdaExprParameter) {
- // We've already diagnosed this for decltype(auto).
- if (!AT->isDecltypeAuto())
- S.Diag(DeclType.Loc, diag::err_illegal_decl_array_of_auto)
- << getPrintableNameForEntity(Name) << T;
- T = QualType();
- break;
- }
// Array parameters can be marked nullable as well, although it's not
// necessary if they're marked 'static'.
if (complainAboutMissingNullability == CAMN_Yes &&
!hasNullabilityAttr(DeclType.getAttrs()) &&
- ASM != ArrayType::Static &&
- D.isPrototypeContext() &&
+ ASM != ArraySizeModifier::Static && D.isPrototypeContext() &&
!hasOuterPointerLikeChunk(D, chunkIndex)) {
checkNullabilityConsistency(S, SimplePointerKind::Array, DeclType.Loc);
}
@@ -4990,6 +5260,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
: diag::err_deduced_return_type);
T = Context.IntTy;
D.setInvalidType(true);
+ AreDeclaratorChunksValid = false;
} else {
S.Diag(D.getDeclSpec().getTypeSpecTypeLoc(),
diag::warn_cxx11_compat_deduced_return_type);
@@ -5000,6 +5271,8 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
S.Diag(D.getBeginLoc(), diag::err_trailing_return_in_parens)
<< T << D.getSourceRange();
D.setInvalidType(true);
+ // FIXME: recover and fill decls in `TypeLoc`s.
+ AreDeclaratorChunksValid = false;
} else if (D.getName().getKind() ==
UnqualifiedIdKind::IK_DeductionGuideName) {
if (T != Context.DependentTy) {
@@ -5007,6 +5280,8 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
diag::err_deduction_guide_with_complex_decl)
<< D.getSourceRange();
D.setInvalidType(true);
+ // FIXME: recover and fill decls in `TypeLoc`s.
+ AreDeclaratorChunksValid = false;
}
} else if (D.getContext() != DeclaratorContext::LambdaExpr &&
(T.hasQualifiers() || !isa<AutoType>(T) ||
@@ -5017,6 +5292,8 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
diag::err_trailing_return_without_auto)
<< T << D.getDeclSpec().getSourceRange();
D.setInvalidType(true);
+ // FIXME: recover and fill decls in `TypeLoc`s.
+ AreDeclaratorChunksValid = false;
}
T = S.GetTypeFromParser(FTI.getTrailingReturnType(), &TInfo);
if (T.isNull()) {
@@ -5057,6 +5334,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
S.Diag(DeclType.Loc, diagID) << T->isFunctionType() << T;
T = Context.IntTy;
D.setInvalidType(true);
+ AreDeclaratorChunksValid = false;
}
// Do not allow returning half FP value.
@@ -5069,7 +5347,8 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
<< T << 0 /*pointer hint*/;
D.setInvalidType(true);
}
- } else if (!S.getLangOpts().HalfArgsAndReturns) {
+ } else if (!S.getLangOpts().NativeHalfArgsAndReturns &&
+ !S.Context.getTargetInfo().allowHalfArgsAndReturns()) {
S.Diag(D.getIdentifierLoc(),
diag::err_parameters_retval_cannot_have_fp16_type) << 1;
D.setInvalidType(true);
@@ -5093,8 +5372,8 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
"__cl_clang_variadic_functions", S.getLangOpts()) &&
!(D.getIdentifier() &&
((D.getIdentifier()->getName() == "printf" &&
- (LangOpts.OpenCLCPlusPlus || LangOpts.OpenCLVersion >= 120)) ||
- D.getIdentifier()->getName().startswith("__")))) {
+ LangOpts.getOpenCLCompatibleVersion() >= 120) ||
+ D.getIdentifier()->getName().starts_with("__")))) {
S.Diag(D.getIdentifierLoc(), diag::err_opencl_variadic_function);
D.setInvalidType(true);
}
@@ -5122,6 +5401,8 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
ObjCObjectPointerTypeLoc TLoc = TLB.push<ObjCObjectPointerTypeLoc>(T);
TLoc.setStarLoc(FixitLoc);
TInfo = TLB.getTypeSourceInfo(Context, T);
+ } else {
+ AreDeclaratorChunksValid = false;
}
D.setInvalidType(true);
@@ -5208,17 +5489,29 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
FunctionType::ExtInfo EI(
getCCForDeclaratorChunk(S, D, DeclType.getAttrs(), FTI, chunkIndex));
- if (!FTI.NumParams && !FTI.isVariadic && !LangOpts.CPlusPlus
- && !LangOpts.OpenCL) {
+ // OpenCL disallows functions without a prototype, but it doesn't enforce
+ // strict prototypes as in C23 because it allows a function definition to
+ // have an identifier list. See OpenCL 3.0 6.11/g for more details.
+ if (!FTI.NumParams && !FTI.isVariadic &&
+ !LangOpts.requiresStrictPrototypes() && !LangOpts.OpenCL) {
// Simple void foo(), where the incoming T is the result type.
T = Context.getFunctionNoProtoType(T, EI);
} else {
// We allow a zero-parameter variadic function in C if the
// function is marked with the "overloadable" attribute. Scan
- // for this attribute now.
- if (!FTI.NumParams && FTI.isVariadic && !LangOpts.CPlusPlus)
- if (!D.getAttributes().hasAttribute(ParsedAttr::AT_Overloadable))
+ // for this attribute now. We also allow it in C23 per WG14 N2975.
+ if (!FTI.NumParams && FTI.isVariadic && !LangOpts.CPlusPlus) {
+ if (LangOpts.C23)
+ S.Diag(FTI.getEllipsisLoc(),
+ diag::warn_c17_compat_ellipsis_only_parameter);
+ else if (!D.getDeclarationAttributes().hasAttribute(
+ ParsedAttr::AT_Overloadable) &&
+ !D.getAttributes().hasAttribute(
+ ParsedAttr::AT_Overloadable) &&
+ !D.getDeclSpec().getAttributes().hasAttribute(
+ ParsedAttr::AT_Overloadable))
S.Diag(FTI.getEllipsisLoc(), diag::err_ellipsis_first_param);
+ }
if (FTI.NumParams && FTI.Params[0].Param == nullptr) {
// C99 6.7.5.3p3: Reject int(x,y,z) when it's not a function
@@ -5226,8 +5519,11 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
S.Diag(FTI.Params[0].IdentLoc,
diag::err_ident_list_in_fn_declaration);
D.setInvalidType(true);
- // Recover by creating a K&R-style function type.
- T = Context.getFunctionNoProtoType(T, EI);
+ // Recover by creating a K&R-style function type, if possible.
+ T = (!LangOpts.requiresStrictPrototypes() && !LangOpts.OpenCL)
+ ? Context.getFunctionNoProtoType(T, EI)
+ : Context.IntTy;
+ AreDeclaratorChunksValid = false;
break;
}
@@ -5292,16 +5588,17 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
D.setInvalidType();
Param->setInvalidDecl();
}
- } else if (!S.getLangOpts().HalfArgsAndReturns) {
+ } else if (!S.getLangOpts().NativeHalfArgsAndReturns &&
+ !S.Context.getTargetInfo().allowHalfArgsAndReturns()) {
S.Diag(Param->getLocation(),
diag::err_parameters_retval_cannot_have_fp16_type) << 0;
D.setInvalidType();
}
} else if (!FTI.hasPrototype) {
- if (ParamTy->isPromotableIntegerType()) {
+ if (Context.isPromotableIntegerType(ParamTy)) {
ParamTy = Context.getPromotedIntegerType(ParamTy);
Param->setKNRPromoted(true);
- } else if (const BuiltinType* BTy = ParamTy->getAs<BuiltinType>()) {
+ } else if (const BuiltinType *BTy = ParamTy->getAs<BuiltinType>()) {
if (BTy->getKind() == BuiltinType::Float) {
ParamTy = Context.DoubleTy;
Param->setKNRPromoted(true);
@@ -5423,13 +5720,13 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// Avoid emitting extra errors if we already errored on the scope.
D.setInvalidType(true);
} else if (S.isDependentScopeSpecifier(SS) ||
- dyn_cast_or_null<CXXRecordDecl>(S.computeDeclContext(SS))) {
+ isa_and_nonnull<CXXRecordDecl>(S.computeDeclContext(SS))) {
NestedNameSpecifier *NNS = SS.getScopeRep();
NestedNameSpecifier *NNSPrefix = NNS->getPrefix();
switch (NNS->getKind()) {
case NestedNameSpecifier::Identifier:
- ClsType = Context.getDependentNameType(ETK_None, NNSPrefix,
- NNS->getAsIdentifier());
+ ClsType = Context.getDependentNameType(
+ ElaboratedTypeKeyword::None, NNSPrefix, NNS->getAsIdentifier());
break;
case NestedNameSpecifier::Namespace:
@@ -5446,8 +5743,9 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// in ClsType; hence we wrap ClsType into an ElaboratedType.
// NOTE: in particular, no wrap occurs if ClsType already is an
// Elaborated, DependentName, or DependentTemplateSpecialization.
- if (NNSPrefix && isa<TemplateSpecializationType>(NNS->getAsType()))
- ClsType = Context.getElaboratedType(ETK_None, NNSPrefix, ClsType);
+ if (isa<TemplateSpecializationType>(NNS->getAsType()))
+ ClsType = Context.getElaboratedType(ElaboratedTypeKeyword::None,
+ NNSPrefix, ClsType);
break;
}
} else {
@@ -5461,9 +5759,13 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
if (!ClsType.isNull())
T = S.BuildMemberPointerType(T, ClsType, DeclType.Loc,
D.getIdentifier());
+ else
+ AreDeclaratorChunksValid = false;
+
if (T.isNull()) {
T = Context.IntTy;
D.setInvalidType(true);
+ AreDeclaratorChunksValid = false;
} else if (DeclType.Mem.TypeQuals) {
T = S.BuildQualifiedType(T, DeclType.Loc, DeclType.Mem.TypeQuals);
}
@@ -5481,10 +5783,12 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
if (T.isNull()) {
D.setInvalidType(true);
T = Context.IntTy;
+ AreDeclaratorChunksValid = false;
}
// See if there are any attributes on this declarator chunk.
- processTypeAttrs(state, T, TAL_DeclChunk, DeclType.getAttrs());
+ processTypeAttrs(state, T, TAL_DeclChunk, DeclType.getAttrs(),
+ S.IdentifyCUDATarget(D.getAttributes()));
if (DeclType.Kind != DeclaratorChunk::Paren) {
if (ExpectNoDerefChunk && !IsNoDerefableChunk(DeclType))
@@ -5499,15 +5803,16 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
diag::warn_noderef_on_non_pointer_or_array);
// GNU warning -Wstrict-prototypes
- // Warn if a function declaration is without a prototype.
+ // Warn if a function declaration or definition is without a prototype.
// This warning is issued for all kinds of unprototyped function
// declarations (i.e. function type typedef, function pointer etc.)
// C99 6.7.5.3p14:
// The empty list in a function declarator that is not part of a definition
// of that function specifies that no information about the number or types
// of the parameters is supplied.
- if (!LangOpts.CPlusPlus &&
- D.getFunctionDefinitionKind() == FunctionDefinitionKind::Declaration) {
+ // See ActOnFinishFunctionBody() and MergeFunctionDecl() for handling of
+ // function declarations whose behavior changes in C23.
+ if (!LangOpts.requiresStrictPrototypes()) {
bool IsBlock = false;
for (const DeclaratorChunk &DeclType : D.type_objects()) {
switch (DeclType.Kind) {
@@ -5516,10 +5821,12 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
break;
case DeclaratorChunk::Function: {
const DeclaratorChunk::FunctionTypeInfo &FTI = DeclType.Fun;
- // We supress the warning when there's no LParen location, as this
+ // We suppress the warning when there's no LParen location, as this
// indicates the declaration was an implicit declaration, which gets
- // warned about separately via -Wimplicit-function-declaration.
- if (FTI.NumParams == 0 && !FTI.isVariadic && FTI.getLParenLoc().isValid())
+ // warned about separately via -Wimplicit-function-declaration. We also
+ // suppress the warning when we know the function has a prototype.
+ if (!FTI.hasPrototype && FTI.NumParams == 0 && !FTI.isVariadic &&
+ FTI.getLParenLoc().isValid())
S.Diag(DeclType.Loc, diag::warn_strict_prototypes)
<< IsBlock
<< FixItHint::CreateInsertion(FTI.getRParenLoc(), "void");
@@ -5546,7 +5853,12 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
//
// Core issue 547 also allows cv-qualifiers on function types that are
// top-level template type arguments.
- enum { NonMember, Member, DeductionGuide } Kind = NonMember;
+ enum {
+ NonMember,
+ Member,
+ ExplicitObjectMember,
+ DeductionGuide
+ } Kind = NonMember;
if (D.getName().getKind() == UnqualifiedIdKind::IK_DeductionGuideName)
Kind = DeductionGuide;
else if (!D.getCXXScopeSpec().isSet()) {
@@ -5560,6 +5872,18 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
Kind = Member;
}
+ if (Kind == Member) {
+ unsigned I;
+ if (D.isFunctionDeclarator(I)) {
+ const DeclaratorChunk &Chunk = D.getTypeObject(I);
+ if (Chunk.Fun.NumParams) {
+ auto *P = dyn_cast_or_null<ParmVarDecl>(Chunk.Fun.Params->Param);
+ if (P && P->isExplicitObjectParameter())
+ Kind = ExplicitObjectMember;
+ }
+ }
+ }
+
// C++11 [dcl.fct]p6 (w/DR1417):
// An attempt to specify a function type with a cv-qualifier-seq or a
// ref-qualifier (including by typedef-name) is ill-formed unless it is:
@@ -5577,7 +5901,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
//
// ... for instance.
if (IsQualifiedFunction &&
- !(Kind == Member &&
+ !(Kind == Member && !D.isExplicitObjectMemberFunction() &&
D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_static) &&
!IsTypedefName && D.getContext() != DeclaratorContext::TemplateArg &&
D.getContext() != DeclaratorContext::TemplateTypeArg) {
@@ -5626,7 +5950,14 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
}
}
- // Apply any undistributed attributes from the declarator.
+ // Apply any undistributed attributes from the declaration or declarator.
+ ParsedAttributesView NonSlidingAttrs;
+ for (ParsedAttr &AL : D.getDeclarationAttributes()) {
+ if (!AL.slidesFromDeclToDeclSpecLegacyBehavior()) {
+ NonSlidingAttrs.addAtEnd(&AL);
+ }
+ }
+ processTypeAttrs(state, T, TAL_DeclName, NonSlidingAttrs);
processTypeAttrs(state, T, TAL_DeclName, D.getAttributes());
// Diagnose any ignored type attributes.
@@ -5673,7 +6004,8 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
<< T << D.getSourceRange();
D.setEllipsisLoc(SourceLocation());
} else {
- T = Context.getPackExpansionType(T, None, /*ExpectPackInType=*/false);
+ T = Context.getPackExpansionType(T, std::nullopt,
+ /*ExpectPackInType=*/false);
}
break;
case DeclaratorContext::TemplateParam:
@@ -5686,7 +6018,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// parameter packs in the type of the non-type template parameter, then
// it expands those parameter packs.
if (T->containsUnexpandedParameterPack())
- T = Context.getPackExpansionType(T, None);
+ T = Context.getPackExpansionType(T, std::nullopt);
else
S.Diag(D.getEllipsisLoc(),
LangOpts.CPlusPlus11
@@ -5717,6 +6049,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
case DeclaratorContext::TrailingReturnVar:
case DeclaratorContext::TemplateArg:
case DeclaratorContext::TemplateTypeArg:
+ case DeclaratorContext::Association:
// FIXME: We may want to allow parameter packs in block-literal contexts
// in the future.
S.Diag(D.getEllipsisLoc(),
@@ -5727,9 +6060,8 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
}
assert(!T.isNull() && "T must not be null at the end of this function");
- if (D.isInvalidType())
+ if (!AreDeclaratorChunksValid)
return Context.getTrivialTypeSourceInfo(T);
-
return GetTypeSourceInfoForDeclarator(state, T, TInfo);
}
@@ -5738,7 +6070,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
///
/// The result of this call will never be null, but the associated
/// type may be a null type if there's an unrecoverable error.
-TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D, Scope *S) {
+TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D) {
// Determine the type of the declarator. Not all forms of declarator
// have a type.
@@ -5794,7 +6126,7 @@ static void transferARCOwnershipToDeclaratorChunk(TypeProcessingState &state,
ParsedAttr *attr = D.getAttributePool().create(
&S.Context.Idents.get("objc_ownership"), SourceLocation(),
/*scope*/ nullptr, SourceLocation(),
- /*args*/ &Args, 1, ParsedAttr::AS_GNU);
+ /*args*/ &Args, 1, ParsedAttr::Form::GNU());
chunk.getAttrs().addAtEnd(attr);
// TODO: mark whether we did this inference?
}
@@ -5871,6 +6203,21 @@ static void fillAttributedTypeLoc(AttributedTypeLoc TL,
TL.setAttr(State.takeAttrForAttributedType(TL.getTypePtr()));
}
+static void fillMatrixTypeLoc(MatrixTypeLoc MTL,
+ const ParsedAttributesView &Attrs) {
+ for (const ParsedAttr &AL : Attrs) {
+ if (AL.getKind() == ParsedAttr::AT_MatrixType) {
+ MTL.setAttrNameLoc(AL.getLoc());
+ MTL.setAttrRowOperand(AL.getArgAsExpr(0));
+ MTL.setAttrColumnOperand(AL.getArgAsExpr(1));
+ MTL.setAttrOperandParensRange(SourceRange());
+ return;
+ }
+ }
+
+ llvm_unreachable("no matrix_type attribute found at the expected location!");
+}
+
namespace {
class TypeSpecLocFiller : public TypeLocVisitor<TypeSpecLocFiller> {
Sema &SemaRef;
@@ -5887,6 +6234,9 @@ namespace {
Visit(TL.getModifiedLoc());
fillAttributedTypeLoc(TL, State);
}
+ void VisitBTFTagAttributedTypeLoc(BTFTagAttributedTypeLoc TL) {
+ Visit(TL.getWrappedLoc());
+ }
void VisitMacroQualifiedTypeLoc(MacroQualifiedTypeLoc TL) {
Visit(TL.getInnerLoc());
TL.setExpansionLoc(
@@ -5895,13 +6245,16 @@ namespace {
void VisitQualifiedTypeLoc(QualifiedTypeLoc TL) {
Visit(TL.getUnqualifiedLoc());
}
+ // Allow to fill pointee's type locations, e.g.,
+ // int __attr * __attr * __attr *p;
+ void VisitPointerTypeLoc(PointerTypeLoc TL) { Visit(TL.getNextTypeLoc()); }
void VisitTypedefTypeLoc(TypedefTypeLoc TL) {
TL.setNameLoc(DS.getTypeSpecTypeLoc());
}
void VisitObjCInterfaceTypeLoc(ObjCInterfaceTypeLoc TL) {
TL.setNameLoc(DS.getTypeSpecTypeLoc());
// FIXME. We should have DS.getTypeSpecTypeEndLoc(). But, it requires
- // addition field. What we have is good enough for dispay of location
+ // addition field. What we have is good enough for display of location
// of 'fixit' on interface name.
TL.setNameEndLoc(DS.getEndLoc());
}
@@ -5939,22 +6292,28 @@ namespace {
}
void VisitTypeOfExprTypeLoc(TypeOfExprTypeLoc TL) {
- assert(DS.getTypeSpecType() == DeclSpec::TST_typeofExpr);
+ assert(DS.getTypeSpecType() == DeclSpec::TST_typeofExpr ||
+ DS.getTypeSpecType() == DeclSpec::TST_typeof_unqualExpr);
TL.setTypeofLoc(DS.getTypeSpecTypeLoc());
TL.setParensRange(DS.getTypeofParensRange());
}
void VisitTypeOfTypeLoc(TypeOfTypeLoc TL) {
- assert(DS.getTypeSpecType() == DeclSpec::TST_typeofType);
+ assert(DS.getTypeSpecType() == DeclSpec::TST_typeofType ||
+ DS.getTypeSpecType() == DeclSpec::TST_typeof_unqualType);
TL.setTypeofLoc(DS.getTypeSpecTypeLoc());
TL.setParensRange(DS.getTypeofParensRange());
assert(DS.getRepAsType());
TypeSourceInfo *TInfo = nullptr;
Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo);
- TL.setUnderlyingTInfo(TInfo);
+ TL.setUnmodifiedTInfo(TInfo);
+ }
+ void VisitDecltypeTypeLoc(DecltypeTypeLoc TL) {
+ assert(DS.getTypeSpecType() == DeclSpec::TST_decltype);
+ TL.setDecltypeLoc(DS.getTypeSpecTypeLoc());
+ TL.setRParenLoc(DS.getTypeofParensRange().getEnd());
}
void VisitUnaryTransformTypeLoc(UnaryTransformTypeLoc TL) {
- // FIXME: This holds only because we only have one unary transform.
- assert(DS.getTypeSpecType() == DeclSpec::TST_underlyingType);
+ assert(DS.isTransformTypeTrait(DS.getTypeSpecType()));
TL.setKWLoc(DS.getTypeSpecTypeLoc());
TL.setParensRange(DS.getTypeofParensRange());
assert(DS.getRepAsType());
@@ -5976,19 +6335,19 @@ namespace {
}
}
void VisitElaboratedTypeLoc(ElaboratedTypeLoc TL) {
- ElaboratedTypeKeyword Keyword
- = TypeWithKeyword::getKeywordForTypeSpec(DS.getTypeSpecType());
if (DS.getTypeSpecType() == TST_typename) {
TypeSourceInfo *TInfo = nullptr;
Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo);
- if (TInfo) {
- TL.copy(TInfo->getTypeLoc().castAs<ElaboratedTypeLoc>());
- return;
- }
+ if (TInfo)
+ if (auto ETL = TInfo->getTypeLoc().getAs<ElaboratedTypeLoc>()) {
+ TL.copy(ETL);
+ return;
+ }
}
- TL.setElaboratedKeywordLoc(Keyword != ETK_None
- ? DS.getTypeSpecTypeLoc()
- : SourceLocation());
+ const ElaboratedType *T = TL.getTypePtr();
+ TL.setElaboratedKeywordLoc(T->getKeyword() != ElaboratedTypeKeyword::None
+ ? DS.getTypeSpecTypeLoc()
+ : SourceLocation());
const CXXScopeSpec& SS = DS.getTypeSpecScope();
TL.setQualifierLoc(SS.getWithLocInContext(Context));
Visit(TL.getNextTypeLoc().getUnqualifiedLoc());
@@ -6015,29 +6374,34 @@ namespace {
DS.getTypeSpecType() == TST_auto_type ||
DS.getTypeSpecType() == TST_unspecified);
TL.setNameLoc(DS.getTypeSpecTypeLoc());
+ if (DS.getTypeSpecType() == TST_decltype_auto)
+ TL.setRParenLoc(DS.getTypeofParensRange().getEnd());
if (!DS.isConstrainedAuto())
return;
TemplateIdAnnotation *TemplateId = DS.getRepAsTemplateId();
if (!TemplateId)
return;
- if (DS.getTypeSpecScope().isNotEmpty())
- TL.setNestedNameSpecifierLoc(
- DS.getTypeSpecScope().getWithLocInContext(Context));
- else
- TL.setNestedNameSpecifierLoc(NestedNameSpecifierLoc());
- TL.setTemplateKWLoc(TemplateId->TemplateKWLoc);
- TL.setConceptNameLoc(TemplateId->TemplateNameLoc);
- TL.setFoundDecl(nullptr);
- TL.setLAngleLoc(TemplateId->LAngleLoc);
- TL.setRAngleLoc(TemplateId->RAngleLoc);
- if (TemplateId->NumArgs == 0)
- return;
- TemplateArgumentListInfo TemplateArgsInfo;
- ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(),
- TemplateId->NumArgs);
- SemaRef.translateTemplateArguments(TemplateArgsPtr, TemplateArgsInfo);
- for (unsigned I = 0; I < TemplateId->NumArgs; ++I)
- TL.setArgLocInfo(I, TemplateArgsInfo.arguments()[I].getLocInfo());
+
+ NestedNameSpecifierLoc NNS =
+ (DS.getTypeSpecScope().isNotEmpty()
+ ? DS.getTypeSpecScope().getWithLocInContext(Context)
+ : NestedNameSpecifierLoc());
+ TemplateArgumentListInfo TemplateArgsInfo(TemplateId->LAngleLoc,
+ TemplateId->RAngleLoc);
+ if (TemplateId->NumArgs > 0) {
+ ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(),
+ TemplateId->NumArgs);
+ SemaRef.translateTemplateArguments(TemplateArgsPtr, TemplateArgsInfo);
+ }
+ DeclarationNameInfo DNI = DeclarationNameInfo(
+ TL.getTypePtr()->getTypeConstraintConcept()->getDeclName(),
+ TemplateId->TemplateNameLoc);
+ auto *CR = ConceptReference::Create(
+ Context, NNS, TemplateId->TemplateKWLoc, DNI,
+ /*FoundDecl=*/nullptr,
+ /*NamedDecl=*/TL.getTypePtr()->getTypeConstraintConcept(),
+ ASTTemplateArgumentListInfo::Create(Context, TemplateArgsInfo));
+ TL.setConceptReference(CR);
}
void VisitTagTypeLoc(TagTypeLoc TL) {
TL.setNameLoc(DS.getTypeSpecTypeNameLoc());
@@ -6069,11 +6433,11 @@ namespace {
TL.getValueLoc().initializeFullCopy(TInfo->getTypeLoc());
}
- void VisitExtIntTypeLoc(ExtIntTypeLoc TL) {
+ void VisitExtIntTypeLoc(BitIntTypeLoc TL) {
TL.setNameLoc(DS.getTypeSpecTypeLoc());
}
- void VisitDependentExtIntTypeLoc(DependentExtIntTypeLoc TL) {
+ void VisitDependentExtIntTypeLoc(DependentBitIntTypeLoc TL) {
TL.setNameLoc(DS.getTypeSpecTypeLoc());
}
@@ -6103,6 +6467,9 @@ namespace {
void VisitAttributedTypeLoc(AttributedTypeLoc TL) {
fillAttributedTypeLoc(TL, State);
}
+ void VisitBTFTagAttributedTypeLoc(BTFTagAttributedTypeLoc TL) {
+ // nothing
+ }
void VisitAdjustedTypeLoc(AdjustedTypeLoc TL) {
// nothing
}
@@ -6203,7 +6570,7 @@ namespace {
assert(Chunk.Kind == DeclaratorChunk::Pipe);
TL.setKWLoc(Chunk.Loc);
}
- void VisitExtIntTypeLoc(ExtIntTypeLoc TL) {
+ void VisitBitIntTypeLoc(BitIntTypeLoc TL) {
TL.setNameLoc(Chunk.Loc);
}
void VisitMacroQualifiedTypeLoc(MacroQualifiedTypeLoc TL) {
@@ -6220,6 +6587,9 @@ namespace {
VisitDependentSizedExtVectorTypeLoc(DependentSizedExtVectorTypeLoc TL) {
TL.setNameLoc(Chunk.Loc);
}
+ void VisitMatrixTypeLoc(MatrixTypeLoc TL) {
+ fillMatrixTypeLoc(TL, Chunk.getAttrs());
+ }
void VisitTypeLoc(TypeLoc TL) {
llvm_unreachable("unsupported TypeLoc kind in declarator!");
@@ -6267,21 +6637,6 @@ fillDependentAddressSpaceTypeLoc(DependentAddressSpaceTypeLoc DASTL,
"no address_space attribute found at the expected location!");
}
-static void fillMatrixTypeLoc(MatrixTypeLoc MTL,
- const ParsedAttributesView &Attrs) {
- for (const ParsedAttr &AL : Attrs) {
- if (AL.getKind() == ParsedAttr::AT_MatrixType) {
- MTL.setAttrNameLoc(AL.getLoc());
- MTL.setAttrRowOperand(AL.getArgAsExpr(0));
- MTL.setAttrColumnOperand(AL.getArgAsExpr(1));
- MTL.setAttrOperandParensRange(SourceRange());
- return;
- }
- }
-
- llvm_unreachable("no matrix_type attribute found at the expected location!");
-}
-
/// Create and instantiate a TypeSourceInfo with type source information.
///
/// \param T QualType referring to the type as written in source code.
@@ -6306,6 +6661,12 @@ GetTypeSourceInfoForDeclarator(TypeProcessingState &State,
}
for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i) {
+ // Microsoft property fields can have multiple sizeless array chunks
+ // (i.e. int x[][][]). Don't create more than one level of incomplete array.
+ if (CurrTL.getTypeLocClass() == TypeLoc::IncompleteArray && e != 1 &&
+ D.getDeclSpec().getAttributes().hasMSPropertyAttr())
+ continue;
+
// An AtomicTypeLoc might be produced by an atomic qualifier in this
// declarator chunk.
if (AtomicTypeLoc ATL = CurrTL.getAs<AtomicTypeLoc>()) {
@@ -6313,29 +6674,42 @@ GetTypeSourceInfoForDeclarator(TypeProcessingState &State,
CurrTL = ATL.getValueLoc().getUnqualifiedLoc();
}
- while (MacroQualifiedTypeLoc TL = CurrTL.getAs<MacroQualifiedTypeLoc>()) {
- TL.setExpansionLoc(
- State.getExpansionLocForMacroQualifiedType(TL.getTypePtr()));
- CurrTL = TL.getNextTypeLoc().getUnqualifiedLoc();
- }
+ bool HasDesugaredTypeLoc = true;
+ while (HasDesugaredTypeLoc) {
+ switch (CurrTL.getTypeLocClass()) {
+ case TypeLoc::MacroQualified: {
+ auto TL = CurrTL.castAs<MacroQualifiedTypeLoc>();
+ TL.setExpansionLoc(
+ State.getExpansionLocForMacroQualifiedType(TL.getTypePtr()));
+ CurrTL = TL.getNextTypeLoc().getUnqualifiedLoc();
+ break;
+ }
- while (AttributedTypeLoc TL = CurrTL.getAs<AttributedTypeLoc>()) {
- fillAttributedTypeLoc(TL, State);
- CurrTL = TL.getNextTypeLoc().getUnqualifiedLoc();
- }
+ case TypeLoc::Attributed: {
+ auto TL = CurrTL.castAs<AttributedTypeLoc>();
+ fillAttributedTypeLoc(TL, State);
+ CurrTL = TL.getNextTypeLoc().getUnqualifiedLoc();
+ break;
+ }
- while (DependentAddressSpaceTypeLoc TL =
- CurrTL.getAs<DependentAddressSpaceTypeLoc>()) {
- fillDependentAddressSpaceTypeLoc(TL, D.getTypeObject(i).getAttrs());
- CurrTL = TL.getPointeeTypeLoc().getUnqualifiedLoc();
- }
+ case TypeLoc::Adjusted:
+ case TypeLoc::BTFTagAttributed: {
+ CurrTL = CurrTL.getNextTypeLoc().getUnqualifiedLoc();
+ break;
+ }
- if (MatrixTypeLoc TL = CurrTL.getAs<MatrixTypeLoc>())
- fillMatrixTypeLoc(TL, D.getTypeObject(i).getAttrs());
+ case TypeLoc::DependentAddressSpace: {
+ auto TL = CurrTL.castAs<DependentAddressSpaceTypeLoc>();
+ fillDependentAddressSpaceTypeLoc(TL, D.getTypeObject(i).getAttrs());
+ CurrTL = TL.getPointeeTypeLoc().getUnqualifiedLoc();
+ break;
+ }
- // FIXME: Ordering here?
- while (AdjustedTypeLoc TL = CurrTL.getAs<AdjustedTypeLoc>())
- CurrTL = TL.getNextTypeLoc().getUnqualifiedLoc();
+ default:
+ HasDesugaredTypeLoc = false;
+ break;
+ }
+ }
DeclaratorLocFiller(S.Context, State, D.getTypeObject(i)).Visit(CurrTL);
CurrTL = CurrTL.getNextTypeLoc().getUnqualifiedLoc();
@@ -6359,8 +6733,8 @@ ParsedType Sema::CreateParsedType(QualType T, TypeSourceInfo *TInfo) {
// FIXME: LocInfoTypes are "transient", only needed for passing to/from Parser
// and Sema during declaration parsing. Try deallocating/caching them when
// it's appropriate, instead of allocating them and keeping them around.
- LocInfoType *LocT = (LocInfoType*)BumpAlloc.Allocate(sizeof(LocInfoType),
- TypeAlignment);
+ LocInfoType *LocT = (LocInfoType *)BumpAlloc.Allocate(sizeof(LocInfoType),
+ alignof(LocInfoType));
new (LocT) LocInfoType(T, TInfo);
assert(LocT->getTypeClass() != T->getTypeClass() &&
"LocInfoType's TypeClass conflicts with an existing Type class");
@@ -6374,13 +6748,13 @@ void LocInfoType::getAsStringInternal(std::string &Str,
" GetTypeFromParser");
}
-TypeResult Sema::ActOnTypeName(Scope *S, Declarator &D) {
+TypeResult Sema::ActOnTypeName(Declarator &D) {
// C99 6.7.6: Type names have no identifier. This is already validated by
// the parser.
assert(D.getIdentifier() == nullptr &&
"Type name should have no identifier!");
- TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D);
QualType T = TInfo->getType();
if (D.isInvalidType())
return true;
@@ -6420,7 +6794,7 @@ static bool BuildAddressSpaceIndex(Sema &S, LangAS &ASIdx,
const Expr *AddrSpace,
SourceLocation AttrLoc) {
if (!AddrSpace->isValueDependent()) {
- Optional<llvm::APSInt> OptAddrSpace =
+ std::optional<llvm::APSInt> OptAddrSpace =
AddrSpace->getIntegerConstantExpr(S.Context);
if (!OptAddrSpace) {
S.Diag(AttrLoc, diag::err_attribute_argument_type)
@@ -6495,6 +6869,33 @@ QualType Sema::BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
return BuildAddressSpaceAttr(T, ASIdx, AddrSpace, AttrLoc);
}
+static void HandleBTFTypeTagAttribute(QualType &Type, const ParsedAttr &Attr,
+ TypeProcessingState &State) {
+ Sema &S = State.getSema();
+
+ // Check the number of attribute arguments.
+ if (Attr.getNumArgs() != 1) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments)
+ << Attr << 1;
+ Attr.setInvalid();
+ return;
+ }
+
+ // Ensure the argument is a string.
+ auto *StrLiteral = dyn_cast<StringLiteral>(Attr.getArgAsExpr(0));
+ if (!StrLiteral) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_type)
+ << Attr << AANT_ArgumentString;
+ Attr.setInvalid();
+ return;
+ }
+
+ ASTContext &Ctx = S.Context;
+ StringRef BTFTypeTag = StrLiteral->getString();
+ Type = State.getBTFTagAttributedType(
+ ::new (Ctx) BTFTypeTagAttr(Ctx, Attr, BTFTypeTag), Type);
+}
+
/// HandleAddressSpaceTypeAttribute - Process an address_space attribute on the
/// specified type. The attribute contains 1 argument, the id of the address
/// space for the type.
@@ -6561,6 +6962,8 @@ static void HandleAddressSpaceTypeAttribute(QualType &Type,
// The keyword-based type attributes imply which address space to use.
ASIdx = S.getLangOpts().SYCLIsDevice ? Attr.asSYCLLangAS()
: Attr.asOpenCLLangAS();
+ if (S.getLangOpts().HLSL)
+ ASIdx = Attr.asHLSLLangAS();
if (ASIdx == LangAS::Default)
llvm_unreachable("Invalid address space");
@@ -7012,17 +7415,25 @@ static bool handleMSPointerTypeQualifierAttr(TypeProcessingState &State,
}
std::bitset<attr::LastAttr> Attrs;
- attr::Kind NewAttrKind = A->getKind();
QualType Desugared = Type;
- const AttributedType *AT = dyn_cast<AttributedType>(Type);
- while (AT) {
+ for (;;) {
+ if (const TypedefType *TT = dyn_cast<TypedefType>(Desugared)) {
+ Desugared = TT->desugar();
+ continue;
+ } else if (const ElaboratedType *ET = dyn_cast<ElaboratedType>(Desugared)) {
+ Desugared = ET->desugar();
+ continue;
+ }
+ const AttributedType *AT = dyn_cast<AttributedType>(Desugared);
+ if (!AT)
+ break;
Attrs[AT->getAttrKind()] = true;
Desugared = AT->getModifiedType();
- AT = dyn_cast<AttributedType>(Desugared);
}
// You cannot specify duplicate type attributes, so if the attribute has
// already been applied, flag it.
+ attr::Kind NewAttrKind = A->getKind();
if (Attrs[NewAttrKind]) {
S.Diag(PAttr.getLoc(), diag::warn_duplicate_attribute_exact) << PAttr;
return true;
@@ -7034,23 +7445,20 @@ static bool handleMSPointerTypeQualifierAttr(TypeProcessingState &State,
if (Attrs[attr::Ptr32] && Attrs[attr::Ptr64]) {
S.Diag(PAttr.getLoc(), diag::err_attributes_are_not_compatible)
<< "'__ptr32'"
- << "'__ptr64'";
+ << "'__ptr64'" << /*isRegularKeyword=*/0;
return true;
} else if (Attrs[attr::SPtr] && Attrs[attr::UPtr]) {
S.Diag(PAttr.getLoc(), diag::err_attributes_are_not_compatible)
<< "'__sptr'"
- << "'__uptr'";
+ << "'__uptr'" << /*isRegularKeyword=*/0;
return true;
}
- // Pointer type qualifiers can only operate on pointer types, but not
- // pointer-to-member types.
- //
- // FIXME: Should we really be disallowing this attribute if there is any
- // type sugar between it and the pointer (other than attributes)? Eg, this
- // disallows the attribute on a parenthesized pointer.
- // And if so, should we really allow *any* type attribute?
+ // Check the raw (i.e., desugared) Canonical type to see if it
+ // is a pointer type.
if (!isa<PointerType>(Desugared)) {
+ // Pointer type qualifiers can only operate on pointer types, but not
+ // pointer-to-member types.
if (Type->isMemberPointerType())
S.Diag(PAttr.getLoc(), diag::err_attribute_no_member_pointers) << PAttr;
else
@@ -7060,7 +7468,8 @@ static bool handleMSPointerTypeQualifierAttr(TypeProcessingState &State,
// Add address space to type based on its attributes.
LangAS ASIdx = LangAS::Default;
- uint64_t PtrWidth = S.Context.getTargetInfo().getPointerWidth(0);
+ uint64_t PtrWidth =
+ S.Context.getTargetInfo().getPointerWidth(LangAS::Default);
if (PtrWidth == 32) {
if (Attrs[attr::Ptr64])
ASIdx = LangAS::ptr64;
@@ -7081,6 +7490,56 @@ static bool handleMSPointerTypeQualifierAttr(TypeProcessingState &State,
return false;
}
+static bool HandleWebAssemblyFuncrefAttr(TypeProcessingState &State,
+ QualType &QT, ParsedAttr &PAttr) {
+ assert(PAttr.getKind() == ParsedAttr::AT_WebAssemblyFuncref);
+
+ Sema &S = State.getSema();
+ Attr *A = createSimpleAttr<WebAssemblyFuncrefAttr>(S.Context, PAttr);
+
+ std::bitset<attr::LastAttr> Attrs;
+ attr::Kind NewAttrKind = A->getKind();
+ const auto *AT = dyn_cast<AttributedType>(QT);
+ while (AT) {
+ Attrs[AT->getAttrKind()] = true;
+ AT = dyn_cast<AttributedType>(AT->getModifiedType());
+ }
+
+ // You cannot specify duplicate type attributes, so if the attribute has
+ // already been applied, flag it.
+ if (Attrs[NewAttrKind]) {
+ S.Diag(PAttr.getLoc(), diag::warn_duplicate_attribute_exact) << PAttr;
+ return true;
+ }
+
+ // Add address space to type based on its attributes.
+ LangAS ASIdx = LangAS::wasm_funcref;
+ QualType Pointee = QT->getPointeeType();
+ Pointee = S.Context.getAddrSpaceQualType(
+ S.Context.removeAddrSpaceQualType(Pointee), ASIdx);
+ QT = State.getAttributedType(A, QT, S.Context.getPointerType(Pointee));
+ return false;
+}
+
+/// Rebuild an attributed type without the nullability attribute on it.
+static QualType rebuildAttributedTypeWithoutNullability(ASTContext &Ctx,
+ QualType Type) {
+ auto Attributed = dyn_cast<AttributedType>(Type.getTypePtr());
+ if (!Attributed)
+ return Type;
+
+ // Skip the nullability attribute; we're done.
+ if (Attributed->getImmediateNullability())
+ return Attributed->getModifiedType();
+
+ // Build the modified type.
+ QualType Modified = rebuildAttributedTypeWithoutNullability(
+ Ctx, Attributed->getModifiedType());
+ assert(Modified.getTypePtr() != Attributed->getModifiedType().getTypePtr());
+ return Ctx.getAttributedType(Attributed->getAttrKind(), Modified,
+ Attributed->getEquivalentType());
+}
+
/// Map a nullability attribute kind to a nullability kind.
static NullabilityKind mapNullabilityAttrKind(ParsedAttr::Kind kind) {
switch (kind) {
@@ -7101,74 +7560,65 @@ static NullabilityKind mapNullabilityAttrKind(ParsedAttr::Kind kind) {
}
}
-/// Applies a nullability type specifier to the given type, if possible.
-///
-/// \param state The type processing state.
-///
-/// \param type The type to which the nullability specifier will be
-/// added. On success, this type will be updated appropriately.
-///
-/// \param attr The attribute as written on the type.
-///
-/// \param allowOnArrayType Whether to accept nullability specifiers on an
-/// array type (e.g., because it will decay to a pointer).
-///
-/// \returns true if a problem has been diagnosed, false on success.
-static bool checkNullabilityTypeSpecifier(TypeProcessingState &state,
- QualType &type,
- ParsedAttr &attr,
- bool allowOnArrayType) {
- Sema &S = state.getSema();
-
- NullabilityKind nullability = mapNullabilityAttrKind(attr.getKind());
- SourceLocation nullabilityLoc = attr.getLoc();
- bool isContextSensitive = attr.isContextSensitiveKeywordAttribute();
-
- recordNullabilitySeen(S, nullabilityLoc);
+static bool CheckNullabilityTypeSpecifier(
+ Sema &S, TypeProcessingState *State, ParsedAttr *PAttr, QualType &QT,
+ NullabilityKind Nullability, SourceLocation NullabilityLoc,
+ bool IsContextSensitive, bool AllowOnArrayType, bool OverrideExisting) {
+ bool Implicit = (State == nullptr);
+ if (!Implicit)
+ recordNullabilitySeen(S, NullabilityLoc);
// Check for existing nullability attributes on the type.
- QualType desugared = type;
- while (auto attributed = dyn_cast<AttributedType>(desugared.getTypePtr())) {
+ QualType Desugared = QT;
+ while (auto *Attributed = dyn_cast<AttributedType>(Desugared.getTypePtr())) {
// Check whether there is already a null
- if (auto existingNullability = attributed->getImmediateNullability()) {
+ if (auto ExistingNullability = Attributed->getImmediateNullability()) {
// Duplicated nullability.
- if (nullability == *existingNullability) {
- S.Diag(nullabilityLoc, diag::warn_nullability_duplicate)
- << DiagNullabilityKind(nullability, isContextSensitive)
- << FixItHint::CreateRemoval(nullabilityLoc);
+ if (Nullability == *ExistingNullability) {
+ if (Implicit)
+ break;
+
+ S.Diag(NullabilityLoc, diag::warn_nullability_duplicate)
+ << DiagNullabilityKind(Nullability, IsContextSensitive)
+ << FixItHint::CreateRemoval(NullabilityLoc);
break;
}
- // Conflicting nullability.
- S.Diag(nullabilityLoc, diag::err_nullability_conflicting)
- << DiagNullabilityKind(nullability, isContextSensitive)
- << DiagNullabilityKind(*existingNullability, false);
- return true;
+ if (!OverrideExisting) {
+ // Conflicting nullability.
+ S.Diag(NullabilityLoc, diag::err_nullability_conflicting)
+ << DiagNullabilityKind(Nullability, IsContextSensitive)
+ << DiagNullabilityKind(*ExistingNullability, false);
+ return true;
+ }
+
+ // Rebuild the attributed type, dropping the existing nullability.
+ QT = rebuildAttributedTypeWithoutNullability(S.Context, QT);
}
- desugared = attributed->getModifiedType();
+ Desugared = Attributed->getModifiedType();
}
// If there is already a different nullability specifier, complain.
// This (unlike the code above) looks through typedefs that might
// have nullability specifiers on them, which means we cannot
// provide a useful Fix-It.
- if (auto existingNullability = desugared->getNullability(S.Context)) {
- if (nullability != *existingNullability) {
- S.Diag(nullabilityLoc, diag::err_nullability_conflicting)
- << DiagNullabilityKind(nullability, isContextSensitive)
- << DiagNullabilityKind(*existingNullability, false);
+ if (auto ExistingNullability = Desugared->getNullability()) {
+ if (Nullability != *ExistingNullability && !Implicit) {
+ S.Diag(NullabilityLoc, diag::err_nullability_conflicting)
+ << DiagNullabilityKind(Nullability, IsContextSensitive)
+ << DiagNullabilityKind(*ExistingNullability, false);
// Try to find the typedef with the existing nullability specifier.
- if (auto typedefType = desugared->getAs<TypedefType>()) {
- TypedefNameDecl *typedefDecl = typedefType->getDecl();
+ if (auto TT = Desugared->getAs<TypedefType>()) {
+ TypedefNameDecl *typedefDecl = TT->getDecl();
QualType underlyingType = typedefDecl->getUnderlyingType();
- if (auto typedefNullability
- = AttributedType::stripOuterNullability(underlyingType)) {
- if (*typedefNullability == *existingNullability) {
+ if (auto typedefNullability =
+ AttributedType::stripOuterNullability(underlyingType)) {
+ if (*typedefNullability == *ExistingNullability) {
S.Diag(typedefDecl->getLocation(), diag::note_nullability_here)
- << DiagNullabilityKind(*existingNullability, false);
+ << DiagNullabilityKind(*ExistingNullability, false);
}
}
}
@@ -7178,44 +7628,73 @@ static bool checkNullabilityTypeSpecifier(TypeProcessingState &state,
}
// If this definitely isn't a pointer type, reject the specifier.
- if (!desugared->canHaveNullability() &&
- !(allowOnArrayType && desugared->isArrayType())) {
- S.Diag(nullabilityLoc, diag::err_nullability_nonpointer)
- << DiagNullabilityKind(nullability, isContextSensitive) << type;
+ if (!Desugared->canHaveNullability() &&
+ !(AllowOnArrayType && Desugared->isArrayType())) {
+ if (!Implicit)
+ S.Diag(NullabilityLoc, diag::err_nullability_nonpointer)
+ << DiagNullabilityKind(Nullability, IsContextSensitive) << QT;
+
return true;
}
// For the context-sensitive keywords/Objective-C property
// attributes, require that the type be a single-level pointer.
- if (isContextSensitive) {
+ if (IsContextSensitive) {
// Make sure that the pointee isn't itself a pointer type.
const Type *pointeeType = nullptr;
- if (desugared->isArrayType())
- pointeeType = desugared->getArrayElementTypeNoTypeQual();
- else if (desugared->isAnyPointerType())
- pointeeType = desugared->getPointeeType().getTypePtr();
+ if (Desugared->isArrayType())
+ pointeeType = Desugared->getArrayElementTypeNoTypeQual();
+ else if (Desugared->isAnyPointerType())
+ pointeeType = Desugared->getPointeeType().getTypePtr();
if (pointeeType && (pointeeType->isAnyPointerType() ||
pointeeType->isObjCObjectPointerType() ||
pointeeType->isMemberPointerType())) {
- S.Diag(nullabilityLoc, diag::err_nullability_cs_multilevel)
- << DiagNullabilityKind(nullability, true)
- << type;
- S.Diag(nullabilityLoc, diag::note_nullability_type_specifier)
- << DiagNullabilityKind(nullability, false)
- << type
- << FixItHint::CreateReplacement(nullabilityLoc,
- getNullabilitySpelling(nullability));
+ S.Diag(NullabilityLoc, diag::err_nullability_cs_multilevel)
+ << DiagNullabilityKind(Nullability, true) << QT;
+ S.Diag(NullabilityLoc, diag::note_nullability_type_specifier)
+ << DiagNullabilityKind(Nullability, false) << QT
+ << FixItHint::CreateReplacement(NullabilityLoc,
+ getNullabilitySpelling(Nullability));
return true;
}
}
// Form the attributed type.
- type = state.getAttributedType(
- createNullabilityAttr(S.Context, attr, nullability), type, type);
+ if (State) {
+ assert(PAttr);
+ Attr *A = createNullabilityAttr(S.Context, *PAttr, Nullability);
+ QT = State->getAttributedType(A, QT, QT);
+ } else {
+ attr::Kind attrKind = AttributedType::getNullabilityAttrKind(Nullability);
+ QT = S.Context.getAttributedType(attrKind, QT, QT);
+ }
return false;
}
+static bool CheckNullabilityTypeSpecifier(TypeProcessingState &State,
+ QualType &Type, ParsedAttr &Attr,
+ bool AllowOnArrayType) {
+ NullabilityKind Nullability = mapNullabilityAttrKind(Attr.getKind());
+ SourceLocation NullabilityLoc = Attr.getLoc();
+ bool IsContextSensitive = Attr.isContextSensitiveKeywordAttribute();
+
+ return CheckNullabilityTypeSpecifier(State.getSema(), &State, &Attr, Type,
+ Nullability, NullabilityLoc,
+ IsContextSensitive, AllowOnArrayType,
+ /*overrideExisting*/ false);
+}
+
+bool Sema::CheckImplicitNullabilityTypeSpecifier(QualType &Type,
+ NullabilityKind Nullability,
+ SourceLocation DiagLoc,
+ bool AllowArrayTypes,
+ bool OverrideExisting) {
+ return CheckNullabilityTypeSpecifier(
+ *this, nullptr, nullptr, Type, Nullability, DiagLoc,
+ /*isContextSensitive*/ false, AllowArrayTypes, OverrideExisting);
+}
+
/// Check the application of the Objective-C '__kindof' qualifier to
/// the given type.
static bool checkObjCKindOfType(TypeProcessingState &state, QualType &type,
@@ -7253,7 +7732,7 @@ static bool checkObjCKindOfType(TypeProcessingState &state, QualType &type,
// If we started with an object pointer type, rebuild it.
if (ptrType) {
equivType = S.Context.getObjCObjectPointerType(equivType);
- if (auto nullability = type->getNullability(S.Context)) {
+ if (auto nullability = type->getNullability()) {
// We create a nullability attribute from the __kindof attribute.
// Make sure that will make sense.
assert(attr.getAttributeSpellingListIndex() == 0 &&
@@ -7382,6 +7861,12 @@ static Attr *getCCTypeAttr(ASTContext &Ctx, ParsedAttr &Attr) {
return createSimpleAttr<VectorCallAttr>(Ctx, Attr);
case ParsedAttr::AT_AArch64VectorPcs:
return createSimpleAttr<AArch64VectorPcsAttr>(Ctx, Attr);
+ case ParsedAttr::AT_AArch64SVEPcs:
+ return createSimpleAttr<AArch64SVEPcsAttr>(Ctx, Attr);
+ case ParsedAttr::AT_ArmStreaming:
+ return createSimpleAttr<ArmStreamingAttr>(Ctx, Attr);
+ case ParsedAttr::AT_AMDGPUKernelCall:
+ return createSimpleAttr<AMDGPUKernelCallAttr>(Ctx, Attr);
case ParsedAttr::AT_Pcs: {
// The attribute may have had a fixit applied where we treated an
// identifier as a string literal. The contents of the string are valid,
@@ -7406,14 +7891,83 @@ static Attr *getCCTypeAttr(ASTContext &Ctx, ParsedAttr &Attr) {
return createSimpleAttr<PreserveMostAttr>(Ctx, Attr);
case ParsedAttr::AT_PreserveAll:
return createSimpleAttr<PreserveAllAttr>(Ctx, Attr);
+ case ParsedAttr::AT_M68kRTD:
+ return createSimpleAttr<M68kRTDAttr>(Ctx, Attr);
}
llvm_unreachable("unexpected attribute kind!");
}
+static bool checkMutualExclusion(TypeProcessingState &state,
+ const FunctionProtoType::ExtProtoInfo &EPI,
+ ParsedAttr &Attr,
+ AttributeCommonInfo::Kind OtherKind) {
+ auto OtherAttr = std::find_if(
+ state.getCurrentAttributes().begin(), state.getCurrentAttributes().end(),
+ [OtherKind](const ParsedAttr &A) { return A.getKind() == OtherKind; });
+ if (OtherAttr == state.getCurrentAttributes().end() || OtherAttr->isInvalid())
+ return false;
+
+ Sema &S = state.getSema();
+ S.Diag(Attr.getLoc(), diag::err_attributes_are_not_compatible)
+ << *OtherAttr << Attr
+ << (OtherAttr->isRegularKeywordAttribute() ||
+ Attr.isRegularKeywordAttribute());
+ S.Diag(OtherAttr->getLoc(), diag::note_conflicting_attribute);
+ Attr.setInvalid();
+ return true;
+}
+
+static bool handleArmStateAttribute(Sema &S,
+ FunctionProtoType::ExtProtoInfo &EPI,
+ ParsedAttr &Attr,
+ FunctionType::ArmStateValue State) {
+ if (!Attr.getNumArgs()) {
+ S.Diag(Attr.getLoc(), diag::err_missing_arm_state) << Attr;
+ Attr.setInvalid();
+ return true;
+ }
+
+ for (unsigned I = 0; I < Attr.getNumArgs(); ++I) {
+ StringRef StateName;
+ SourceLocation LiteralLoc;
+ if (!S.checkStringLiteralArgumentAttr(Attr, I, StateName, &LiteralLoc))
+ return true;
+
+ unsigned Shift;
+ FunctionType::ArmStateValue ExistingState;
+ if (StateName == "za") {
+ Shift = FunctionType::SME_ZAShift;
+ ExistingState = FunctionType::getArmZAState(EPI.AArch64SMEAttributes);
+ } else if (StateName == "zt0") {
+ Shift = FunctionType::SME_ZT0Shift;
+ ExistingState = FunctionType::getArmZT0State(EPI.AArch64SMEAttributes);
+ } else {
+ S.Diag(LiteralLoc, diag::err_unknown_arm_state) << StateName;
+ Attr.setInvalid();
+ return true;
+ }
+
+ // __arm_in(S), __arm_out(S), __arm_inout(S) and __arm_preserves(S)
+ // are all mutually exclusive for the same S, so check if there are
+ // conflicting attributes.
+ if (ExistingState != FunctionType::ARM_None && ExistingState != State) {
+ S.Diag(LiteralLoc, diag::err_conflicting_attributes_arm_state)
+ << StateName;
+ Attr.setInvalid();
+ return true;
+ }
+
+ EPI.setArmSMEAttribute(
+ (FunctionType::AArch64SMETypeAttributes)((State << Shift)));
+ }
+ return false;
+}
+
/// Process an individual function attribute. Returns true to
/// indicate that the attribute was handled, false if it wasn't.
static bool handleFunctionTypeAttr(TypeProcessingState &state, ParsedAttr &attr,
- QualType &type) {
+ QualType &type,
+ Sema::CUDAFunctionTarget CFT) {
Sema &S = state.getSema();
FunctionTypeUnwrapper unwrapped(S, type);
@@ -7527,8 +8081,8 @@ static bool handleFunctionTypeAttr(TypeProcessingState &state, ParsedAttr &attr,
CallingConv CC = fn->getCallConv();
if (CC == CC_X86FastCall) {
S.Diag(attr.getLoc(), diag::err_attributes_are_not_compatible)
- << FunctionType::getNameForCallConv(CC)
- << "regparm";
+ << FunctionType::getNameForCallConv(CC) << "regparm"
+ << attr.isRegularKeywordAttribute();
attr.setInvalid();
return true;
}
@@ -7539,6 +8093,72 @@ static bool handleFunctionTypeAttr(TypeProcessingState &state, ParsedAttr &attr,
return true;
}
+ if (attr.getKind() == ParsedAttr::AT_ArmStreaming ||
+ attr.getKind() == ParsedAttr::AT_ArmStreamingCompatible ||
+ attr.getKind() == ParsedAttr::AT_ArmPreserves ||
+ attr.getKind() == ParsedAttr::AT_ArmIn ||
+ attr.getKind() == ParsedAttr::AT_ArmOut ||
+ attr.getKind() == ParsedAttr::AT_ArmInOut) {
+ if (S.CheckAttrTarget(attr))
+ return true;
+
+ if (attr.getKind() == ParsedAttr::AT_ArmStreaming ||
+ attr.getKind() == ParsedAttr::AT_ArmStreamingCompatible)
+ if (S.CheckAttrNoArgs(attr))
+ return true;
+
+ if (!unwrapped.isFunctionType())
+ return false;
+
+ const auto *FnTy = unwrapped.get()->getAs<FunctionProtoType>();
+ if (!FnTy) {
+ // SME ACLE attributes are not supported on K&R-style unprototyped C
+ // functions.
+ S.Diag(attr.getLoc(), diag::warn_attribute_wrong_decl_type) <<
+ attr << attr.isRegularKeywordAttribute() << ExpectedFunctionWithProtoType;
+ attr.setInvalid();
+ return false;
+ }
+
+ FunctionProtoType::ExtProtoInfo EPI = FnTy->getExtProtoInfo();
+ switch (attr.getKind()) {
+ case ParsedAttr::AT_ArmStreaming:
+ if (checkMutualExclusion(state, EPI, attr,
+ ParsedAttr::AT_ArmStreamingCompatible))
+ return true;
+ EPI.setArmSMEAttribute(FunctionType::SME_PStateSMEnabledMask);
+ break;
+ case ParsedAttr::AT_ArmStreamingCompatible:
+ if (checkMutualExclusion(state, EPI, attr, ParsedAttr::AT_ArmStreaming))
+ return true;
+ EPI.setArmSMEAttribute(FunctionType::SME_PStateSMCompatibleMask);
+ break;
+ case ParsedAttr::AT_ArmPreserves:
+ if (handleArmStateAttribute(S, EPI, attr, FunctionType::ARM_Preserves))
+ return true;
+ break;
+ case ParsedAttr::AT_ArmIn:
+ if (handleArmStateAttribute(S, EPI, attr, FunctionType::ARM_In))
+ return true;
+ break;
+ case ParsedAttr::AT_ArmOut:
+ if (handleArmStateAttribute(S, EPI, attr, FunctionType::ARM_Out))
+ return true;
+ break;
+ case ParsedAttr::AT_ArmInOut:
+ if (handleArmStateAttribute(S, EPI, attr, FunctionType::ARM_InOut))
+ return true;
+ break;
+ default:
+ llvm_unreachable("Unsupported attribute");
+ }
+
+ QualType newtype = S.Context.getFunctionType(FnTy->getReturnType(),
+ FnTy->getParamTypes(), EPI);
+ type = unwrapped.wrap(S, newtype->getAs<FunctionType>());
+ return true;
+ }
+
if (attr.getKind() == ParsedAttr::AT_NoThrow) {
// Delay if this is not a function type.
if (!unwrapped.isFunctionType())
@@ -7564,7 +8184,7 @@ static bool handleFunctionTypeAttr(TypeProcessingState &state, ParsedAttr &attr,
case EST_NoexceptTrue:
case EST_NoThrow:
// Exception spec doesn't conflict with nothrow, so don't warn.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case EST_Unparsed:
case EST_Uninstantiated:
case EST_DependentNoexcept:
@@ -7595,7 +8215,7 @@ static bool handleFunctionTypeAttr(TypeProcessingState &state, ParsedAttr &attr,
// Otherwise, a calling convention.
CallingConv CC;
- if (S.CheckCallingConvAttr(attr, CC))
+ if (S.CheckCallingConvAttr(attr, CC, /*FunctionDecl=*/nullptr, CFT))
return true;
const FunctionType *fn = unwrapped.get();
@@ -7607,8 +8227,9 @@ static bool handleFunctionTypeAttr(TypeProcessingState &state, ParsedAttr &attr,
// and the CCs don't match.
if (S.getCallingConvAttributedType(type)) {
S.Diag(attr.getLoc(), diag::err_attributes_are_not_compatible)
- << FunctionType::getNameForCallConv(CC)
- << FunctionType::getNameForCallConv(CCOld);
+ << FunctionType::getNameForCallConv(CC)
+ << FunctionType::getNameForCallConv(CCOld)
+ << attr.isRegularKeywordAttribute();
attr.setInvalid();
return true;
}
@@ -7640,7 +8261,8 @@ static bool handleFunctionTypeAttr(TypeProcessingState &state, ParsedAttr &attr,
// Also diagnose fastcall with regparm.
if (CC == CC_X86FastCall && fn->getHasRegParm()) {
S.Diag(attr.getLoc(), diag::err_attributes_are_not_compatible)
- << "regparm" << FunctionType::getNameForCallConv(CC_X86FastCall);
+ << "regparm" << FunctionType::getNameForCallConv(CC_X86FastCall)
+ << attr.isRegularKeywordAttribute();
attr.setInvalid();
return true;
}
@@ -7674,14 +8296,15 @@ bool Sema::hasExplicitCallingConv(QualType T) {
return false;
}
-void Sema::adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
- SourceLocation Loc) {
+void Sema::adjustMemberFunctionCC(QualType &T, bool HasThisPointer,
+ bool IsCtorOrDtor, SourceLocation Loc) {
FunctionTypeUnwrapper Unwrapped(*this, T);
const FunctionType *FT = Unwrapped.get();
bool IsVariadic = (isa<FunctionProtoType>(FT) &&
cast<FunctionProtoType>(FT)->isVariadic());
CallingConv CurCC = FT->getCallConv();
- CallingConv ToCC = Context.getDefaultCallingConvention(IsVariadic, !IsStatic);
+ CallingConv ToCC =
+ Context.getDefaultCallingConvention(IsVariadic, HasThisPointer);
if (CurCC == ToCC)
return;
@@ -7701,9 +8324,9 @@ void Sema::adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
// we should adjust a __cdecl type to __thiscall for instance methods, and a
// __thiscall type to __cdecl for static methods.
CallingConv DefaultCC =
- Context.getDefaultCallingConvention(IsVariadic, IsStatic);
+ Context.getDefaultCallingConvention(IsVariadic, !HasThisPointer);
- if (CurCC != DefaultCC || DefaultCC == ToCC)
+ if (CurCC != DefaultCC)
return;
if (hasExplicitCallingConv(T))
@@ -7757,8 +8380,7 @@ static void HandleExtVectorTypeAttr(QualType &CurType, const ParsedAttr &Attr,
CurType = T;
}
-static bool isPermittedNeonBaseType(QualType &Ty,
- VectorType::VectorKind VecKind, Sema &S) {
+static bool isPermittedNeonBaseType(QualType &Ty, VectorKind VecKind, Sema &S) {
const BuiltinType *BTy = Ty->getAs<BuiltinType>();
if (!BTy)
return false;
@@ -7770,7 +8392,7 @@ static bool isPermittedNeonBaseType(QualType &Ty,
bool IsPolyUnsigned = Triple.getArch() == llvm::Triple::aarch64 ||
Triple.getArch() == llvm::Triple::aarch64_32 ||
Triple.getArch() == llvm::Triple::aarch64_be;
- if (VecKind == VectorType::NeonPolyVector) {
+ if (VecKind == VectorKind::NeonPoly) {
if (IsPolyUnsigned) {
// AArch64 polynomial vectors are unsigned.
return BTy->getKind() == BuiltinType::UChar ||
@@ -7809,8 +8431,8 @@ static bool isPermittedNeonBaseType(QualType &Ty,
static bool verifyValidIntegerConstantExpr(Sema &S, const ParsedAttr &Attr,
llvm::APSInt &Result) {
const auto *AttrExpr = Attr.getArgAsExpr(0);
- if (!AttrExpr->isTypeDependent() && !AttrExpr->isValueDependent()) {
- if (Optional<llvm::APSInt> Res =
+ if (!AttrExpr->isTypeDependent()) {
+ if (std::optional<llvm::APSInt> Res =
AttrExpr->getIntegerConstantExpr(S.Context)) {
Result = *Res;
return true;
@@ -7830,20 +8452,41 @@ static bool verifyValidIntegerConstantExpr(Sema &S, const ParsedAttr &Attr,
/// not the vector size in bytes. The vector width and element type must
/// match one of the standard Neon vector types.
static void HandleNeonVectorTypeAttr(QualType &CurType, const ParsedAttr &Attr,
- Sema &S, VectorType::VectorKind VecKind) {
+ Sema &S, VectorKind VecKind) {
+ bool IsTargetCUDAAndHostARM = false;
+ if (S.getLangOpts().CUDAIsDevice) {
+ const TargetInfo *AuxTI = S.getASTContext().getAuxTargetInfo();
+ IsTargetCUDAAndHostARM =
+ AuxTI && (AuxTI->getTriple().isAArch64() || AuxTI->getTriple().isARM());
+ }
+
// Target must have NEON (or MVE, whose vectors are similar enough
// not to need a separate attribute)
- if (!S.Context.getTargetInfo().hasFeature("neon") &&
- !S.Context.getTargetInfo().hasFeature("mve")) {
+ if (!(S.Context.getTargetInfo().hasFeature("neon") ||
+ S.Context.getTargetInfo().hasFeature("mve") ||
+ S.Context.getTargetInfo().hasFeature("sve") ||
+ S.Context.getTargetInfo().hasFeature("sme") ||
+ IsTargetCUDAAndHostARM) &&
+ VecKind == VectorKind::Neon) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_unsupported)
+ << Attr << "'neon', 'mve', 'sve' or 'sme'";
+ Attr.setInvalid();
+ return;
+ }
+ if (!(S.Context.getTargetInfo().hasFeature("neon") ||
+ S.Context.getTargetInfo().hasFeature("mve") ||
+ IsTargetCUDAAndHostARM) &&
+ VecKind == VectorKind::NeonPoly) {
S.Diag(Attr.getLoc(), diag::err_attribute_unsupported)
<< Attr << "'neon' or 'mve'";
Attr.setInvalid();
return;
}
+
// Check the attribute arguments.
if (Attr.getNumArgs() != 1) {
- S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << Attr
- << 1;
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments)
+ << Attr << 1;
Attr.setInvalid();
return;
}
@@ -7853,7 +8496,8 @@ static void HandleNeonVectorTypeAttr(QualType &CurType, const ParsedAttr &Attr,
return;
// Only certain element types are supported for Neon vectors.
- if (!isPermittedNeonBaseType(CurType, VecKind, S)) {
+ if (!isPermittedNeonBaseType(CurType, VecKind, S) &&
+ !IsTargetCUDAAndHostARM) {
S.Diag(Attr.getLoc(), diag::err_attribute_invalid_vector_type) << CurType;
Attr.setInvalid();
return;
@@ -7884,8 +8528,10 @@ static void HandleArmSveVectorBitsTypeAttr(QualType &CurType, ParsedAttr &Attr,
return;
}
- // Attribute is unsupported if '-msve-vector-bits=<bits>' isn't specified.
- if (!S.getLangOpts().ArmSveVectorBits) {
+ // Attribute is unsupported if '-msve-vector-bits=<bits>' isn't specified, or
+ // if <bits>+ syntax is used.
+ if (!S.getLangOpts().VScaleMin ||
+ S.getLangOpts().VScaleMin != S.getLangOpts().VScaleMax) {
S.Diag(Attr.getLoc(), diag::err_attribute_arm_feature_sve_bits_unsupported)
<< Attr;
Attr.setInvalid();
@@ -7908,15 +8554,15 @@ static void HandleArmSveVectorBitsTypeAttr(QualType &CurType, ParsedAttr &Attr,
unsigned VecSize = static_cast<unsigned>(SveVectorSizeInBits.getZExtValue());
// The attribute vector size must match -msve-vector-bits.
- if (VecSize != S.getLangOpts().ArmSveVectorBits) {
+ if (VecSize != S.getLangOpts().VScaleMin * 128) {
S.Diag(Attr.getLoc(), diag::err_attribute_bad_sve_vector_size)
- << VecSize << S.getLangOpts().ArmSveVectorBits;
+ << VecSize << S.getLangOpts().VScaleMin * 128;
Attr.setInvalid();
return;
}
// Attribute can only be attached to a single SVE vector or predicate type.
- if (!CurType->isVLSTBuiltinType()) {
+ if (!CurType->isSveVLSBuiltinType()) {
S.Diag(Attr.getLoc(), diag::err_attribute_invalid_sve_type)
<< Attr << CurType;
Attr.setInvalid();
@@ -7927,11 +8573,11 @@ static void HandleArmSveVectorBitsTypeAttr(QualType &CurType, ParsedAttr &Attr,
QualType EltType = CurType->getSveEltType(S.Context);
unsigned TypeSize = S.Context.getTypeSize(EltType);
- VectorType::VectorKind VecKind = VectorType::SveFixedLengthDataVector;
+ VectorKind VecKind = VectorKind::SveFixedLengthData;
if (BT->getKind() == BuiltinType::SveBool) {
// Predicates are represented as i8.
VecSize /= S.Context.getCharWidth() * S.Context.getCharWidth();
- VecKind = VectorType::SveFixedLengthPredicateVector;
+ VecKind = VectorKind::SveFixedLengthPredicate;
} else
VecSize /= TypeSize;
CurType = S.Context.getVectorType(EltType, VecSize, VecKind);
@@ -7941,7 +8587,7 @@ static void HandleArmMveStrictPolymorphismAttr(TypeProcessingState &State,
QualType &CurType,
ParsedAttr &Attr) {
const VectorType *VT = dyn_cast<VectorType>(CurType);
- if (!VT || VT->getVectorKind() != VectorType::NeonVector) {
+ if (!VT || VT->getVectorKind() != VectorKind::Neon) {
State.getSema().Diag(Attr.getLoc(),
diag::err_attribute_arm_mve_polymorphism);
Attr.setInvalid();
@@ -7954,6 +8600,78 @@ static void HandleArmMveStrictPolymorphismAttr(TypeProcessingState &State,
CurType, CurType);
}
+/// HandleRISCVRVVVectorBitsTypeAttr - The "riscv_rvv_vector_bits" attribute is
+/// used to create fixed-length versions of sizeless RVV types such as
+/// vint8m1_t_t.
+static void HandleRISCVRVVVectorBitsTypeAttr(QualType &CurType,
+ ParsedAttr &Attr, Sema &S) {
+ // Target must have vector extension.
+ if (!S.Context.getTargetInfo().hasFeature("zve32x")) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_unsupported)
+ << Attr << "'zve32x'";
+ Attr.setInvalid();
+ return;
+ }
+
+ auto VScale = S.Context.getTargetInfo().getVScaleRange(S.getLangOpts());
+ if (!VScale || !VScale->first || VScale->first != VScale->second) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_riscv_rvv_bits_unsupported)
+ << Attr;
+ Attr.setInvalid();
+ return;
+ }
+
+ // Check the attribute arguments.
+ if (Attr.getNumArgs() != 1) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments)
+ << Attr << 1;
+ Attr.setInvalid();
+ return;
+ }
+
+ // The vector size must be an integer constant expression.
+ llvm::APSInt RVVVectorSizeInBits(32);
+ if (!verifyValidIntegerConstantExpr(S, Attr, RVVVectorSizeInBits))
+ return;
+
+ // Attribute can only be attached to a single RVV vector type.
+ if (!CurType->isRVVVLSBuiltinType()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_invalid_rvv_type)
+ << Attr << CurType;
+ Attr.setInvalid();
+ return;
+ }
+
+ unsigned VecSize = static_cast<unsigned>(RVVVectorSizeInBits.getZExtValue());
+
+ ASTContext::BuiltinVectorTypeInfo Info =
+ S.Context.getBuiltinVectorTypeInfo(CurType->castAs<BuiltinType>());
+ unsigned MinElts = Info.EC.getKnownMinValue();
+
+ VectorKind VecKind = VectorKind::RVVFixedLengthData;
+ unsigned ExpectedSize = VScale->first * MinElts;
+ QualType EltType = CurType->getRVVEltType(S.Context);
+ unsigned EltSize = S.Context.getTypeSize(EltType);
+ unsigned NumElts;
+ if (Info.ElementType == S.Context.BoolTy) {
+ NumElts = VecSize / S.Context.getCharWidth();
+ VecKind = VectorKind::RVVFixedLengthMask;
+ } else {
+ ExpectedSize *= EltSize;
+ NumElts = VecSize / EltSize;
+ }
+
+ // The attribute vector size must match -mrvv-vector-bits.
+ if (ExpectedSize % 8 != 0 || VecSize != ExpectedSize) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_bad_rvv_vector_size)
+ << VecSize << ExpectedSize;
+ Attr.setInvalid();
+ return;
+ }
+
+ CurType = S.Context.getVectorType(EltType, NumElts, VecKind);
+}
+
/// Handle OpenCL Access Qualifier Attribute.
static void HandleOpenCLAccessAttr(QualType &CurType, const ParsedAttr &Attr,
Sema &S) {
@@ -8031,6 +8749,34 @@ static void HandleMatrixTypeAttr(QualType &CurType, const ParsedAttr &Attr,
CurType = T;
}
+static void HandleAnnotateTypeAttr(TypeProcessingState &State,
+ QualType &CurType, const ParsedAttr &PA) {
+ Sema &S = State.getSema();
+
+ if (PA.getNumArgs() < 1) {
+ S.Diag(PA.getLoc(), diag::err_attribute_too_few_arguments) << PA << 1;
+ return;
+ }
+
+ // Make sure that there is a string literal as the annotation's first
+ // argument.
+ StringRef Str;
+ if (!S.checkStringLiteralArgumentAttr(PA, 0, Str))
+ return;
+
+ llvm::SmallVector<Expr *, 4> Args;
+ Args.reserve(PA.getNumArgs() - 1);
+ for (unsigned Idx = 1; Idx < PA.getNumArgs(); Idx++) {
+ assert(!PA.isArgIdent(Idx));
+ Args.push_back(PA.getArgAsExpr(Idx));
+ }
+ if (!S.ConstantFoldAttrArgs(PA, Args))
+ return;
+ auto *AnnotateTypeAttr =
+ AnnotateTypeAttr::Create(S.Context, Str, Args.data(), Args.size(), PA);
+ CurType = State.getAttributedType(AnnotateTypeAttr, CurType, CurType);
+}
+
static void HandleLifetimeBoundAttr(TypeProcessingState &State,
QualType &CurType,
ParsedAttr &Attr) {
@@ -8041,22 +8787,26 @@ static void HandleLifetimeBoundAttr(TypeProcessingState &State,
}
}
-static bool isAddressSpaceKind(const ParsedAttr &attr) {
- auto attrKind = attr.getKind();
-
- return attrKind == ParsedAttr::AT_AddressSpace ||
- attrKind == ParsedAttr::AT_OpenCLPrivateAddressSpace ||
- attrKind == ParsedAttr::AT_OpenCLGlobalAddressSpace ||
- attrKind == ParsedAttr::AT_OpenCLGlobalDeviceAddressSpace ||
- attrKind == ParsedAttr::AT_OpenCLGlobalHostAddressSpace ||
- attrKind == ParsedAttr::AT_OpenCLLocalAddressSpace ||
- attrKind == ParsedAttr::AT_OpenCLConstantAddressSpace ||
- attrKind == ParsedAttr::AT_OpenCLGenericAddressSpace;
+static void HandleHLSLParamModifierAttr(QualType &CurType,
+ const ParsedAttr &Attr, Sema &S) {
+ // Don't apply this attribute to template dependent types. It is applied on
+ // substitution during template instantiation.
+ if (CurType->isDependentType())
+ return;
+ if (Attr.getSemanticSpelling() == HLSLParamModifierAttr::Keyword_inout ||
+ Attr.getSemanticSpelling() == HLSLParamModifierAttr::Keyword_out)
+ CurType = S.getASTContext().getLValueReferenceType(CurType);
}
static void processTypeAttrs(TypeProcessingState &state, QualType &type,
TypeAttrLocation TAL,
- ParsedAttributesView &attrs) {
+ const ParsedAttributesView &attrs,
+ Sema::CUDAFunctionTarget CFT) {
+
+ state.setParsedNoDeref(false);
+ if (attrs.empty())
+ return;
+
// Scan through and apply attributes to this type where it makes sense. Some
// attributes (such as __address_space__, __vector_size__, etc) apply to the
// type, but others can be present in the type specifiers even though they
@@ -8066,21 +8816,19 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
// sure we visit every element once. Copy the attributes list, and iterate
// over that.
ParsedAttributesView AttrsCopy{attrs};
-
- state.setParsedNoDeref(false);
-
for (ParsedAttr &attr : AttrsCopy) {
// Skip attributes that were marked to be invalid.
if (attr.isInvalid())
continue;
- if (attr.isStandardAttributeSyntax()) {
+ if (attr.isStandardAttributeSyntax() || attr.isRegularKeywordAttribute()) {
// [[gnu::...]] attributes are treated as declaration attributes, so may
// not appertain to a DeclaratorChunk. If we handle them as type
// attributes, accept them in that position and diagnose the GCC
// incompatibility.
if (attr.isGNUScope()) {
+ assert(attr.isStandardAttributeSyntax());
bool IsTypeAttr = attr.isTypeAttr();
if (TAL == TAL_DeclChunk) {
state.getSema().Diag(attr.getLoc(),
@@ -8091,11 +8839,14 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
if (!IsTypeAttr)
continue;
}
- } else if (TAL != TAL_DeclChunk && !isAddressSpaceKind(attr)) {
+ } else if (TAL != TAL_DeclSpec && TAL != TAL_DeclChunk &&
+ !attr.isTypeAttr()) {
// Otherwise, only consider type processing for a C++11 attribute if
- // it's actually been applied to a type.
- // We also allow C++11 address_space and
- // OpenCL language address space attributes to pass through.
+ // - it has actually been applied to a type (decl-specifier-seq or
+ // declarator chunk), or
+ // - it is a type attribute, irrespective of where it was applied (so
+ // that we can support the legacy behavior of some type attributes
+ // that can be applied to the declaration name).
continue;
}
}
@@ -8105,23 +8856,34 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
switch (attr.getKind()) {
default:
// A [[]] attribute on a declarator chunk must appertain to a type.
- if (attr.isStandardAttributeSyntax() && TAL == TAL_DeclChunk) {
+ if ((attr.isStandardAttributeSyntax() ||
+ attr.isRegularKeywordAttribute()) &&
+ TAL == TAL_DeclChunk) {
state.getSema().Diag(attr.getLoc(), diag::err_attribute_not_type_attr)
- << attr;
+ << attr << attr.isRegularKeywordAttribute();
attr.setUsedAsTypeAttr();
}
break;
case ParsedAttr::UnknownAttribute:
- if (attr.isStandardAttributeSyntax() && TAL == TAL_DeclChunk)
+ if (attr.isStandardAttributeSyntax()) {
state.getSema().Diag(attr.getLoc(),
diag::warn_unknown_attribute_ignored)
<< attr << attr.getRange();
+ // Mark the attribute as invalid so we don't emit the same diagnostic
+ // multiple times.
+ attr.setInvalid();
+ }
break;
case ParsedAttr::IgnoredAttribute:
break;
+ case ParsedAttr::AT_BTFTypeTag:
+ HandleBTFTypeTagAttribute(type, attr, state);
+ attr.setUsedAsTypeAttr();
+ break;
+
case ParsedAttr::AT_MayAlias:
// FIXME: This attribute needs to actually be handled, but if we ignore
// it it breaks large amounts of Linux software.
@@ -8134,6 +8896,7 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
case ParsedAttr::AT_OpenCLLocalAddressSpace:
case ParsedAttr::AT_OpenCLConstantAddressSpace:
case ParsedAttr::AT_OpenCLGenericAddressSpace:
+ case ParsedAttr::AT_HLSLGroupSharedAddressSpace:
case ParsedAttr::AT_AddressSpace:
HandleAddressSpaceTypeAttribute(type, attr, state);
attr.setUsedAsTypeAttr();
@@ -8152,13 +8915,12 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
attr.setUsedAsTypeAttr();
break;
case ParsedAttr::AT_NeonVectorType:
- HandleNeonVectorTypeAttr(type, attr, state.getSema(),
- VectorType::NeonVector);
+ HandleNeonVectorTypeAttr(type, attr, state.getSema(), VectorKind::Neon);
attr.setUsedAsTypeAttr();
break;
case ParsedAttr::AT_NeonPolyVectorType:
HandleNeonVectorTypeAttr(type, attr, state.getSema(),
- VectorType::NeonPolyVector);
+ VectorKind::NeonPoly);
attr.setUsedAsTypeAttr();
break;
case ParsedAttr::AT_ArmSveVectorBits:
@@ -8170,6 +8932,10 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
attr.setUsedAsTypeAttr();
break;
}
+ case ParsedAttr::AT_RISCVRVVVectorBits:
+ HandleRISCVRVVVectorBitsTypeAttr(type, attr, state.getSema());
+ attr.setUsedAsTypeAttr();
+ break;
case ParsedAttr::AT_OpenCLAccess:
HandleOpenCLAccessAttr(type, attr, state.getSema());
attr.setUsedAsTypeAttr();
@@ -8180,6 +8946,15 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
break;
case ParsedAttr::AT_NoDeref: {
+ // FIXME: `noderef` currently doesn't work correctly in [[]] syntax.
+ // See https://github.com/llvm/llvm-project/issues/55790 for details.
+ // For the time being, we simply emit a warning that the attribute is
+ // ignored.
+ if (attr.isStandardAttributeSyntax()) {
+ state.getSema().Diag(attr.getLoc(), diag::warn_attribute_ignored)
+ << attr;
+ break;
+ }
ASTContext &Ctx = state.getSema().Context;
type = state.getAttributedType(createSimpleAttr<NoDerefAttr>(Ctx, attr),
type, type);
@@ -8193,6 +8968,18 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
attr.setUsedAsTypeAttr();
break;
+ case ParsedAttr::AT_WebAssemblyFuncref: {
+ if (!HandleWebAssemblyFuncrefAttr(state, type, attr))
+ attr.setUsedAsTypeAttr();
+ break;
+ }
+
+ case ParsedAttr::AT_HLSLParamModifier: {
+ HandleHLSLParamModifierAttr(type, attr, state.getSema());
+ attr.setUsedAsTypeAttr();
+ break;
+ }
+
MS_TYPE_ATTRS_CASELIST:
if (!handleMSPointerTypeQualifierAttr(state, attr, type))
attr.setUsedAsTypeAttr();
@@ -8214,11 +9001,8 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
bool allowOnArrayType =
state.getDeclarator().isPrototypeContext() &&
!hasOuterPointerLikeChunk(state.getDeclarator(), endIndex);
- if (checkNullabilityTypeSpecifier(
- state,
- type,
- attr,
- allowOnArrayType)) {
+ if (CheckNullabilityTypeSpecifier(state, type, attr,
+ allowOnArrayType)) {
attr.setInvalid();
}
@@ -8253,17 +9037,28 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
// clang, so revert to attribute-based handling for C.
if (!state.getSema().getLangOpts().CPlusPlus)
break;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
FUNCTION_TYPE_ATTRS_CASELIST:
attr.setUsedAsTypeAttr();
+ // Attributes with standard syntax have strict rules for what they
+ // appertain to and hence should not use the "distribution" logic below.
+ if (attr.isStandardAttributeSyntax() ||
+ attr.isRegularKeywordAttribute()) {
+ if (!handleFunctionTypeAttr(state, attr, type, CFT)) {
+ diagnoseBadTypeAttribute(state.getSema(), attr, type);
+ attr.setInvalid();
+ }
+ break;
+ }
+
// Never process function type attributes as part of the
// declaration-specifiers.
if (TAL == TAL_DeclSpec)
- distributeFunctionTypeAttrFromDeclSpec(state, attr, type);
+ distributeFunctionTypeAttrFromDeclSpec(state, attr, type, CFT);
// Otherwise, handle the possible delays.
- else if (!handleFunctionTypeAttr(state, attr, type))
+ else if (!handleFunctionTypeAttr(state, attr, type, CFT))
distributeFunctionTypeAttr(state, attr, type);
break;
case ParsedAttr::AT_AcquireHandle: {
@@ -8287,6 +9082,11 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
attr.setUsedAsTypeAttr();
break;
}
+ case ParsedAttr::AT_AnnotateType: {
+ HandleAnnotateTypeAttr(state, type, attr);
+ attr.setUsedAsTypeAttr();
+ break;
+ }
}
// Handle attributes that are defined in a macro. We do not want this to be
@@ -8303,10 +9103,6 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
attr.getMacroExpansionLoc());
}
}
-
- if (!state.getSema().getLangOpts().OpenCL ||
- type.getAddressSpace() != LangAS::Default)
- return;
}
void Sema::completeExprArrayBound(Expr *E) {
@@ -8441,17 +9237,8 @@ bool Sema::hasStructuralCompatLayout(Decl *D, Decl *Suggested) {
return Ctx.IsEquivalent(D, Suggested);
}
-/// Determine whether there is any declaration of \p D that was ever a
-/// definition (perhaps before module merging) and is currently visible.
-/// \param D The definition of the entity.
-/// \param Suggested Filled in with the declaration that should be made visible
-/// in order to provide a definition of this entity.
-/// \param OnlyNeedComplete If \c true, we only need the type to be complete,
-/// not defined. This only matters for enums with a fixed underlying
-/// type, since in all other cases, a type is complete if and only if it
-/// is defined.
-bool Sema::hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
- bool OnlyNeedComplete) {
+bool Sema::hasAcceptableDefinition(NamedDecl *D, NamedDecl **Suggested,
+ AcceptableKind Kind, bool OnlyNeedComplete) {
// Easy case: if we don't have modules, all declarations are visible.
if (!getLangOpts().Modules && !getLangOpts().ModulesLocalVisibility)
return true;
@@ -8477,12 +9264,13 @@ bool Sema::hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
// of it will do.
*Suggested = nullptr;
for (auto *Redecl : ED->redecls()) {
- if (isVisible(Redecl))
+ if (isAcceptable(Redecl, Kind))
return true;
if (Redecl->isThisDeclarationADefinition() ||
(Redecl->isCanonicalDecl() && !*Suggested))
*Suggested = Redecl;
}
+
return false;
}
D = ED->getDefinition();
@@ -8495,13 +9283,14 @@ bool Sema::hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
VD = Pattern;
D = VD->getDefinition();
}
+
assert(D && "missing definition for pattern of instantiated definition");
*Suggested = D;
- auto DefinitionIsVisible = [&] {
+ auto DefinitionIsAcceptable = [&] {
// The (primary) definition might be in a visible module.
- if (isVisible(D))
+ if (isAcceptable(D, Kind))
return true;
// A visible module might have a merged definition instead.
@@ -8519,19 +9308,51 @@ bool Sema::hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
return false;
};
- if (DefinitionIsVisible())
+ if (DefinitionIsAcceptable())
return true;
// The external source may have additional definitions of this entity that are
// visible, so complete the redeclaration chain now and ask again.
if (auto *Source = Context.getExternalSource()) {
Source->CompleteRedeclChain(D);
- return DefinitionIsVisible();
+ return DefinitionIsAcceptable();
}
return false;
}
+/// Determine whether there is any declaration of \p D that was ever a
+/// definition (perhaps before module merging) and is currently visible.
+/// \param D The definition of the entity.
+/// \param Suggested Filled in with the declaration that should be made visible
+/// in order to provide a definition of this entity.
+/// \param OnlyNeedComplete If \c true, we only need the type to be complete,
+/// not defined. This only matters for enums with a fixed underlying
+/// type, since in all other cases, a type is complete if and only if it
+/// is defined.
+bool Sema::hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
+ bool OnlyNeedComplete) {
+ return hasAcceptableDefinition(D, Suggested, Sema::AcceptableKind::Visible,
+ OnlyNeedComplete);
+}
+
+/// Determine whether there is any declaration of \p D that was ever a
+/// definition (perhaps before module merging) and is currently
+/// reachable.
+/// \param D The definition of the entity.
+/// \param Suggested Filled in with the declaration that should be made
+/// reachable
+/// in order to provide a definition of this entity.
+/// \param OnlyNeedComplete If \c true, we only need the type to be complete,
+/// not defined. This only matters for enums with a fixed underlying
+/// type, since in all other cases, a type is complete if and only if it
+/// is defined.
+bool Sema::hasReachableDefinition(NamedDecl *D, NamedDecl **Suggested,
+ bool OnlyNeedComplete) {
+ return hasAcceptableDefinition(D, Suggested, Sema::AcceptableKind::Reachable,
+ OnlyNeedComplete);
+}
+
/// Locks in the inheritance model for the given class and all of its bases.
static void assignInheritanceModel(Sema &S, CXXRecordDecl *RD) {
RD = RD->getMostRecentNonInjectedDecl();
@@ -8558,8 +9379,7 @@ static void assignInheritanceModel(Sema &S, CXXRecordDecl *RD) {
? S.ImplicitMSInheritanceAttrLoc
: RD->getSourceRange();
RD->addAttr(MSInheritanceAttr::CreateImplicit(
- S.getASTContext(), BestCase, Loc, AttributeCommonInfo::AS_Microsoft,
- MSInheritanceAttr::Spelling(IM)));
+ S.getASTContext(), BestCase, Loc, MSInheritanceAttr::Spelling(IM)));
S.Consumer.AssignInheritanceModel(RD);
}
}
@@ -8601,20 +9421,19 @@ bool Sema::RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
// Check that any necessary explicit specializations are visible. For an
// enum, we just need the declaration, so don't check this.
if (Def && !isa<EnumDecl>(Def))
- checkSpecializationVisibility(Loc, Def);
+ checkSpecializationReachability(Loc, Def);
// If we have a complete type, we're done.
if (!Incomplete) {
- // If we know about the definition but it is not visible, complain.
- NamedDecl *SuggestedDef = nullptr;
+ NamedDecl *Suggested = nullptr;
if (Def &&
- !hasVisibleDefinition(Def, &SuggestedDef, /*OnlyNeedComplete*/true)) {
+ !hasReachableDefinition(Def, &Suggested, /*OnlyNeedComplete=*/true)) {
// If the user is going to see an error here, recover by making the
// definition visible.
bool TreatAsComplete = Diagnoser && !isSFINAEContext();
- if (Diagnoser && SuggestedDef)
- diagnoseMissingImport(Loc, SuggestedDef, MissingImportKind::Definition,
- /*Recover*/TreatAsComplete);
+ if (Diagnoser && Suggested)
+ diagnoseMissingImport(Loc, Suggested, MissingImportKind::Definition,
+ /*Recover*/ TreatAsComplete);
return !TreatAsComplete;
} else if (Def && !TemplateInstCallbacks.empty()) {
CodeSynthesisContext TempInst;
@@ -8747,9 +9566,12 @@ bool Sema::RequireCompleteType(SourceLocation Loc, QualType T,
/// \returns diagnostic %select index.
static unsigned getLiteralDiagFromTagKind(TagTypeKind Tag) {
switch (Tag) {
- case TTK_Struct: return 0;
- case TTK_Interface: return 1;
- case TTK_Class: return 2;
+ case TagTypeKind::Struct:
+ return 0;
+ case TagTypeKind::Interface:
+ return 1;
+ case TagTypeKind::Class:
+ return 2;
default: llvm_unreachable("Invalid tag kind for literal type diagnostic!");
}
}
@@ -8875,60 +9697,31 @@ QualType Sema::getElaboratedType(ElaboratedTypeKeyword Keyword,
TagDecl *OwnedTagDecl) {
if (T.isNull())
return T;
- NestedNameSpecifier *NNS;
- if (SS.isValid())
- NNS = SS.getScopeRep();
- else {
- if (Keyword == ETK_None)
- return T;
- NNS = nullptr;
- }
- return Context.getElaboratedType(Keyword, NNS, T, OwnedTagDecl);
+ return Context.getElaboratedType(
+ Keyword, SS.isValid() ? SS.getScopeRep() : nullptr, T, OwnedTagDecl);
}
-QualType Sema::BuildTypeofExprType(Expr *E, SourceLocation Loc) {
+QualType Sema::BuildTypeofExprType(Expr *E, TypeOfKind Kind) {
assert(!E->hasPlaceholderType() && "unexpected placeholder");
if (!getLangOpts().CPlusPlus && E->refersToBitField())
- Diag(E->getExprLoc(), diag::err_sizeof_alignof_typeof_bitfield) << 2;
+ Diag(E->getExprLoc(), diag::err_sizeof_alignof_typeof_bitfield)
+ << (Kind == TypeOfKind::Unqualified ? 3 : 2);
if (!E->isTypeDependent()) {
QualType T = E->getType();
if (const TagType *TT = T->getAs<TagType>())
DiagnoseUseOfDecl(TT->getDecl(), E->getExprLoc());
}
- return Context.getTypeOfExprType(E);
-}
-
-/// getDecltypeForParenthesizedExpr - Given an expr, will return the type for
-/// that expression, as in [dcl.type.simple]p4 but without taking id-expressions
-/// and class member access into account.
-QualType Sema::getDecltypeForParenthesizedExpr(Expr *E) {
- // C++11 [dcl.type.simple]p4:
- // [...]
- QualType T = E->getType();
- switch (E->getValueKind()) {
- // - otherwise, if e is an xvalue, decltype(e) is T&&, where T is the
- // type of e;
- case VK_XValue:
- return Context.getRValueReferenceType(T);
- // - otherwise, if e is an lvalue, decltype(e) is T&, where T is the
- // type of e;
- case VK_LValue:
- return Context.getLValueReferenceType(T);
- // - otherwise, decltype(e) is the type of e.
- case VK_PRValue:
- return T;
- }
- llvm_unreachable("Unknown value kind");
+ return Context.getTypeOfExprType(E, Kind);
}
/// getDecltypeForExpr - Given an expr, will return the decltype for
/// that expression, according to the rules in C++11
/// [dcl.type.simple]p4 and C++11 [expr.lambda.prim]p18.
-static QualType getDecltypeForExpr(Sema &S, Expr *E) {
+QualType Sema::getDecltypeForExpr(Expr *E) {
if (E->isTypeDependent())
- return S.Context.DependentTy;
+ return Context.DependentTy;
Expr *IDExpr = E;
if (auto *ImplCastExpr = dyn_cast<ImplicitCastExpr>(E))
@@ -8945,7 +9738,7 @@ static QualType getDecltypeForExpr(Sema &S, Expr *E) {
// parameter object. This rule makes no difference before C++20 so we apply
// it unconditionally.
if (const auto *SNTTPE = dyn_cast<SubstNonTypeTemplateParmExpr>(IDExpr))
- return SNTTPE->getParameterType(S.Context);
+ return SNTTPE->getParameterType(Context);
// - if e is an unparenthesized id-expression or an unparenthesized class
// member access (5.2.5), decltype(e) is the type of the entity named
@@ -8953,22 +9746,21 @@ static QualType getDecltypeForExpr(Sema &S, Expr *E) {
// functions, the program is ill-formed;
//
// We apply the same rules for Objective-C ivar and property references.
- if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(IDExpr)) {
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(IDExpr)) {
const ValueDecl *VD = DRE->getDecl();
- if (auto *TPO = dyn_cast<TemplateParamObjectDecl>(VD))
- return TPO->getType().getUnqualifiedType();
- return VD->getType();
- } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(IDExpr)) {
- if (const ValueDecl *VD = ME->getMemberDecl())
+ QualType T = VD->getType();
+ return isa<TemplateParamObjectDecl>(VD) ? T.getUnqualifiedType() : T;
+ }
+ if (const auto *ME = dyn_cast<MemberExpr>(IDExpr)) {
+ if (const auto *VD = ME->getMemberDecl())
if (isa<FieldDecl>(VD) || isa<VarDecl>(VD))
return VD->getType();
- } else if (const ObjCIvarRefExpr *IR = dyn_cast<ObjCIvarRefExpr>(IDExpr)) {
+ } else if (const auto *IR = dyn_cast<ObjCIvarRefExpr>(IDExpr)) {
return IR->getDecl()->getType();
- } else if (const ObjCPropertyRefExpr *PR =
- dyn_cast<ObjCPropertyRefExpr>(IDExpr)) {
+ } else if (const auto *PR = dyn_cast<ObjCPropertyRefExpr>(IDExpr)) {
if (PR->isExplicitProperty())
return PR->getExplicitProperty()->getType();
- } else if (auto *PE = dyn_cast<PredefinedExpr>(IDExpr)) {
+ } else if (const auto *PE = dyn_cast<PredefinedExpr>(IDExpr)) {
return PE->getType();
}
@@ -8979,24 +9771,20 @@ static QualType getDecltypeForExpr(Sema &S, Expr *E) {
// access to a corresponding data member of the closure type that
// would have been declared if x were an odr-use of the denoted
// entity.
- using namespace sema;
- if (S.getCurLambda()) {
- if (isa<ParenExpr>(IDExpr)) {
- if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(IDExpr->IgnoreParens())) {
- if (VarDecl *Var = dyn_cast<VarDecl>(DRE->getDecl())) {
- QualType T = S.getCapturedDeclRefType(Var, DRE->getLocation());
- if (!T.isNull())
- return S.Context.getLValueReferenceType(T);
- }
+ if (getCurLambda() && isa<ParenExpr>(IDExpr)) {
+ if (auto *DRE = dyn_cast<DeclRefExpr>(IDExpr->IgnoreParens())) {
+ if (auto *Var = dyn_cast<VarDecl>(DRE->getDecl())) {
+ QualType T = getCapturedDeclRefType(Var, DRE->getLocation());
+ if (!T.isNull())
+ return Context.getLValueReferenceType(T);
}
}
}
- return S.getDecltypeForParenthesizedExpr(E);
+ return Context.getReferenceQualifiedType(E);
}
-QualType Sema::BuildDecltypeType(Expr *E, SourceLocation Loc,
- bool AsUnevaluated) {
+QualType Sema::BuildDecltypeType(Expr *E, bool AsUnevaluated) {
assert(!E->hasPlaceholderType() && "unexpected placeholder");
if (AsUnevaluated && CodeSynthesisContexts.empty() &&
@@ -9007,47 +9795,264 @@ QualType Sema::BuildDecltypeType(Expr *E, SourceLocation Loc,
// used to build SFINAE gadgets.
Diag(E->getExprLoc(), diag::warn_side_effects_unevaluated_context);
}
+ return Context.getDecltypeType(E, getDecltypeForExpr(E));
+}
+
+static QualType GetEnumUnderlyingType(Sema &S, QualType BaseType,
+ SourceLocation Loc) {
+ assert(BaseType->isEnumeralType());
+ EnumDecl *ED = BaseType->castAs<EnumType>()->getDecl();
+ assert(ED && "EnumType has no EnumDecl");
- return Context.getDecltypeType(E, getDecltypeForExpr(*this, E));
+ S.DiagnoseUseOfDecl(ED, Loc);
+
+ QualType Underlying = ED->getIntegerType();
+ assert(!Underlying.isNull());
+
+ return Underlying;
}
-QualType Sema::BuildUnaryTransformType(QualType BaseType,
- UnaryTransformType::UTTKind UKind,
- SourceLocation Loc) {
- switch (UKind) {
- case UnaryTransformType::EnumUnderlyingType:
- if (!BaseType->isDependentType() && !BaseType->isEnumeralType()) {
- Diag(Loc, diag::err_only_enums_have_underlying_types);
- return QualType();
- } else {
- QualType Underlying = BaseType;
- if (!BaseType->isDependentType()) {
- // The enum could be incomplete if we're parsing its definition or
- // recovering from an error.
- NamedDecl *FwdDecl = nullptr;
- if (BaseType->isIncompleteType(&FwdDecl)) {
- Diag(Loc, diag::err_underlying_type_of_incomplete_enum) << BaseType;
- Diag(FwdDecl->getLocation(), diag::note_forward_declaration) << FwdDecl;
- return QualType();
- }
+QualType Sema::BuiltinEnumUnderlyingType(QualType BaseType,
+ SourceLocation Loc) {
+ if (!BaseType->isEnumeralType()) {
+ Diag(Loc, diag::err_only_enums_have_underlying_types);
+ return QualType();
+ }
- EnumDecl *ED = BaseType->castAs<EnumType>()->getDecl();
- assert(ED && "EnumType has no EnumDecl");
+ // The enum could be incomplete if we're parsing its definition or
+ // recovering from an error.
+ NamedDecl *FwdDecl = nullptr;
+ if (BaseType->isIncompleteType(&FwdDecl)) {
+ Diag(Loc, diag::err_underlying_type_of_incomplete_enum) << BaseType;
+ Diag(FwdDecl->getLocation(), diag::note_forward_declaration) << FwdDecl;
+ return QualType();
+ }
- DiagnoseUseOfDecl(ED, Loc);
+ return GetEnumUnderlyingType(*this, BaseType, Loc);
+}
- Underlying = ED->getIntegerType();
- assert(!Underlying.isNull());
- }
- return Context.getUnaryTransformType(BaseType, Underlying,
- UnaryTransformType::EnumUnderlyingType);
+QualType Sema::BuiltinAddPointer(QualType BaseType, SourceLocation Loc) {
+ QualType Pointer = BaseType.isReferenceable() || BaseType->isVoidType()
+ ? BuildPointerType(BaseType.getNonReferenceType(), Loc,
+ DeclarationName())
+ : BaseType;
+
+ return Pointer.isNull() ? QualType() : Pointer;
+}
+
+QualType Sema::BuiltinRemovePointer(QualType BaseType, SourceLocation Loc) {
+ // We don't want block pointers or ObjectiveC's id type.
+ if (!BaseType->isAnyPointerType() || BaseType->isObjCIdType())
+ return BaseType;
+
+ return BaseType->getPointeeType();
+}
+
+QualType Sema::BuiltinDecay(QualType BaseType, SourceLocation Loc) {
+ QualType Underlying = BaseType.getNonReferenceType();
+ if (Underlying->isArrayType())
+ return Context.getDecayedType(Underlying);
+
+ if (Underlying->isFunctionType())
+ return BuiltinAddPointer(BaseType, Loc);
+
+ SplitQualType Split = Underlying.getSplitUnqualifiedType();
+ // std::decay is supposed to produce 'std::remove_cv', but since 'restrict' is
+ // in the same group of qualifiers as 'const' and 'volatile', we're extending
+ // '__decay(T)' so that it removes all qualifiers.
+ Split.Quals.removeCVRQualifiers();
+ return Context.getQualifiedType(Split);
+}
+
+QualType Sema::BuiltinAddReference(QualType BaseType, UTTKind UKind,
+ SourceLocation Loc) {
+ assert(LangOpts.CPlusPlus);
+ QualType Reference =
+ BaseType.isReferenceable()
+ ? BuildReferenceType(BaseType,
+ UKind == UnaryTransformType::AddLvalueReference,
+ Loc, DeclarationName())
+ : BaseType;
+ return Reference.isNull() ? QualType() : Reference;
+}
+
+QualType Sema::BuiltinRemoveExtent(QualType BaseType, UTTKind UKind,
+ SourceLocation Loc) {
+ if (UKind == UnaryTransformType::RemoveAllExtents)
+ return Context.getBaseElementType(BaseType);
+
+ if (const auto *AT = Context.getAsArrayType(BaseType))
+ return AT->getElementType();
+
+ return BaseType;
+}
+
+QualType Sema::BuiltinRemoveReference(QualType BaseType, UTTKind UKind,
+ SourceLocation Loc) {
+ assert(LangOpts.CPlusPlus);
+ QualType T = BaseType.getNonReferenceType();
+ if (UKind == UTTKind::RemoveCVRef &&
+ (T.isConstQualified() || T.isVolatileQualified())) {
+ Qualifiers Quals;
+ QualType Unqual = Context.getUnqualifiedArrayType(T, Quals);
+ Quals.removeConst();
+ Quals.removeVolatile();
+ T = Context.getQualifiedType(Unqual, Quals);
+ }
+ return T;
+}
+
+QualType Sema::BuiltinChangeCVRQualifiers(QualType BaseType, UTTKind UKind,
+ SourceLocation Loc) {
+ if ((BaseType->isReferenceType() && UKind != UTTKind::RemoveRestrict) ||
+ BaseType->isFunctionType())
+ return BaseType;
+
+ Qualifiers Quals;
+ QualType Unqual = Context.getUnqualifiedArrayType(BaseType, Quals);
+
+ if (UKind == UTTKind::RemoveConst || UKind == UTTKind::RemoveCV)
+ Quals.removeConst();
+ if (UKind == UTTKind::RemoveVolatile || UKind == UTTKind::RemoveCV)
+ Quals.removeVolatile();
+ if (UKind == UTTKind::RemoveRestrict)
+ Quals.removeRestrict();
+
+ return Context.getQualifiedType(Unqual, Quals);
+}
+
+static QualType ChangeIntegralSignedness(Sema &S, QualType BaseType,
+ bool IsMakeSigned,
+ SourceLocation Loc) {
+ if (BaseType->isEnumeralType()) {
+ QualType Underlying = GetEnumUnderlyingType(S, BaseType, Loc);
+ if (auto *BitInt = dyn_cast<BitIntType>(Underlying)) {
+ unsigned int Bits = BitInt->getNumBits();
+ if (Bits > 1)
+ return S.Context.getBitIntType(!IsMakeSigned, Bits);
+
+ S.Diag(Loc, diag::err_make_signed_integral_only)
+ << IsMakeSigned << /*_BitInt(1)*/ true << BaseType << 1 << Underlying;
+ return QualType();
+ }
+ if (Underlying->isBooleanType()) {
+ S.Diag(Loc, diag::err_make_signed_integral_only)
+ << IsMakeSigned << /*_BitInt(1)*/ false << BaseType << 1
+ << Underlying;
+ return QualType();
}
}
- llvm_unreachable("unknown unary transform type");
+
+ bool Int128Unsupported = !S.Context.getTargetInfo().hasInt128Type();
+ std::array<CanQualType *, 6> AllSignedIntegers = {
+ &S.Context.SignedCharTy, &S.Context.ShortTy, &S.Context.IntTy,
+ &S.Context.LongTy, &S.Context.LongLongTy, &S.Context.Int128Ty};
+ ArrayRef<CanQualType *> AvailableSignedIntegers(
+ AllSignedIntegers.data(), AllSignedIntegers.size() - Int128Unsupported);
+ std::array<CanQualType *, 6> AllUnsignedIntegers = {
+ &S.Context.UnsignedCharTy, &S.Context.UnsignedShortTy,
+ &S.Context.UnsignedIntTy, &S.Context.UnsignedLongTy,
+ &S.Context.UnsignedLongLongTy, &S.Context.UnsignedInt128Ty};
+ ArrayRef<CanQualType *> AvailableUnsignedIntegers(AllUnsignedIntegers.data(),
+ AllUnsignedIntegers.size() -
+ Int128Unsupported);
+ ArrayRef<CanQualType *> *Consider =
+ IsMakeSigned ? &AvailableSignedIntegers : &AvailableUnsignedIntegers;
+
+ uint64_t BaseSize = S.Context.getTypeSize(BaseType);
+ auto *Result =
+ llvm::find_if(*Consider, [&S, BaseSize](const CanQual<Type> *T) {
+ return BaseSize == S.Context.getTypeSize(T->getTypePtr());
+ });
+
+ assert(Result != Consider->end());
+ return QualType((*Result)->getTypePtr(), 0);
+}
+
+QualType Sema::BuiltinChangeSignedness(QualType BaseType, UTTKind UKind,
+ SourceLocation Loc) {
+ bool IsMakeSigned = UKind == UnaryTransformType::MakeSigned;
+ if ((!BaseType->isIntegerType() && !BaseType->isEnumeralType()) ||
+ BaseType->isBooleanType() ||
+ (BaseType->isBitIntType() &&
+ BaseType->getAs<BitIntType>()->getNumBits() < 2)) {
+ Diag(Loc, diag::err_make_signed_integral_only)
+ << IsMakeSigned << BaseType->isBitIntType() << BaseType << 0;
+ return QualType();
+ }
+
+ bool IsNonIntIntegral =
+ BaseType->isChar16Type() || BaseType->isChar32Type() ||
+ BaseType->isWideCharType() || BaseType->isEnumeralType();
+
+ QualType Underlying =
+ IsNonIntIntegral
+ ? ChangeIntegralSignedness(*this, BaseType, IsMakeSigned, Loc)
+ : IsMakeSigned ? Context.getCorrespondingSignedType(BaseType)
+ : Context.getCorrespondingUnsignedType(BaseType);
+ if (Underlying.isNull())
+ return Underlying;
+ return Context.getQualifiedType(Underlying, BaseType.getQualifiers());
+}
+
+QualType Sema::BuildUnaryTransformType(QualType BaseType, UTTKind UKind,
+ SourceLocation Loc) {
+ if (BaseType->isDependentType())
+ return Context.getUnaryTransformType(BaseType, BaseType, UKind);
+ QualType Result;
+ switch (UKind) {
+ case UnaryTransformType::EnumUnderlyingType: {
+ Result = BuiltinEnumUnderlyingType(BaseType, Loc);
+ break;
+ }
+ case UnaryTransformType::AddPointer: {
+ Result = BuiltinAddPointer(BaseType, Loc);
+ break;
+ }
+ case UnaryTransformType::RemovePointer: {
+ Result = BuiltinRemovePointer(BaseType, Loc);
+ break;
+ }
+ case UnaryTransformType::Decay: {
+ Result = BuiltinDecay(BaseType, Loc);
+ break;
+ }
+ case UnaryTransformType::AddLvalueReference:
+ case UnaryTransformType::AddRvalueReference: {
+ Result = BuiltinAddReference(BaseType, UKind, Loc);
+ break;
+ }
+ case UnaryTransformType::RemoveAllExtents:
+ case UnaryTransformType::RemoveExtent: {
+ Result = BuiltinRemoveExtent(BaseType, UKind, Loc);
+ break;
+ }
+ case UnaryTransformType::RemoveCVRef:
+ case UnaryTransformType::RemoveReference: {
+ Result = BuiltinRemoveReference(BaseType, UKind, Loc);
+ break;
+ }
+ case UnaryTransformType::RemoveConst:
+ case UnaryTransformType::RemoveCV:
+ case UnaryTransformType::RemoveRestrict:
+ case UnaryTransformType::RemoveVolatile: {
+ Result = BuiltinChangeCVRQualifiers(BaseType, UKind, Loc);
+ break;
+ }
+ case UnaryTransformType::MakeSigned:
+ case UnaryTransformType::MakeUnsigned: {
+ Result = BuiltinChangeSignedness(BaseType, UKind, Loc);
+ break;
+ }
+ }
+
+ return !Result.isNull()
+ ? Context.getUnaryTransformType(BaseType, Result, UKind)
+ : Result;
}
QualType Sema::BuildAtomicType(QualType T, SourceLocation Loc) {
- if (!T->isDependentType()) {
+ if (!isDependentOrGNUAutoType(T)) {
// FIXME: It isn't entirely clear whether incomplete atomic types
// are allowed or not; for simplicity, ban them for the moment.
if (RequireCompleteType(Loc, T, diag::err_atomic_specifier_bad_type, 0))
@@ -9066,12 +10071,14 @@ QualType Sema::BuildAtomicType(QualType T, SourceLocation Loc) {
DisallowedKind = 5;
else if (T->isSizelessType())
DisallowedKind = 6;
- else if (!T.isTriviallyCopyableType(Context))
+ else if (!T.isTriviallyCopyableType(Context) && getLangOpts().CPlusPlus)
// Some other non-trivially-copyable type (probably a C++ class)
DisallowedKind = 7;
- else if (T->isExtIntType()) {
- DisallowedKind = 8;
- }
+ else if (T->isBitIntType())
+ DisallowedKind = 8;
+ else if (getLangOpts().C23 && T->isUndeducedAutoType())
+ // _Atomic auto is prohibited in C23
+ DisallowedKind = 9;
if (DisallowedKind != -1) {
Diag(Loc, diag::err_atomic_specifier_bad_type) << DisallowedKind << T;
diff --git a/contrib/llvm-project/clang/lib/Sema/TreeTransform.h b/contrib/llvm-project/clang/lib/Sema/TreeTransform.h
index d8a5b6ad4f94..2f012cade6b9 100644
--- a/contrib/llvm-project/clang/lib/Sema/TreeTransform.h
+++ b/contrib/llvm-project/clang/lib/Sema/TreeTransform.h
@@ -19,8 +19,8 @@
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
-#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/OpenMPClause.h"
@@ -31,6 +31,7 @@
#include "clang/Basic/DiagnosticParse.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Sema/Designator.h"
+#include "clang/Sema/EnterExpressionEvaluationContext.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/ParsedTemplate.h"
@@ -40,6 +41,7 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/Support/ErrorHandling.h"
#include <algorithm>
+#include <optional>
using namespace llvm::omp;
@@ -161,7 +163,7 @@ public:
/// Wether CXXConstructExpr can be skipped when they are implicit.
/// They will be reconstructed when used if needed.
- /// This is usefull when the user that cause rebuilding of the
+ /// This is useful when the user that cause rebuilding of the
/// CXXConstructExpr is outside of the expression at which the TreeTransform
/// started.
bool AllowSkippingCXXConstructExpr() { return true; }
@@ -279,9 +281,8 @@ public:
bool TryExpandParameterPacks(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
- bool &ShouldExpand,
- bool &RetainExpansion,
- Optional<unsigned> &NumExpansions) {
+ bool &ShouldExpand, bool &RetainExpansion,
+ std::optional<unsigned> &NumExpansions) {
ShouldExpand = false;
return false;
}
@@ -377,22 +378,43 @@ public:
/// By default, this routine transforms a statement by delegating to the
/// appropriate TransformXXXAttr function to transform a specific kind
/// of attribute. Subclasses may override this function to transform
- /// attributed statements using some other mechanism.
+ /// attributed statements/types using some other mechanism.
///
/// \returns the transformed attribute
const Attr *TransformAttr(const Attr *S);
-/// Transform the specified attribute.
-///
-/// Subclasses should override the transformation of attributes with a pragma
-/// spelling to transform expressions stored within the attribute.
-///
-/// \returns the transformed attribute.
-#define ATTR(X)
-#define PRAGMA_SPELLING_ATTR(X) \
+ // Transform the given statement attribute.
+ //
+ // Delegates to the appropriate TransformXXXAttr function to transform a
+ // specific kind of statement attribute. Unlike the non-statement taking
+ // version of this, this implements all attributes, not just pragmas.
+ const Attr *TransformStmtAttr(const Stmt *OrigS, const Stmt *InstS,
+ const Attr *A);
+
+ // Transform the specified attribute.
+ //
+ // Subclasses should override the transformation of attributes with a pragma
+ // spelling to transform expressions stored within the attribute.
+ //
+ // \returns the transformed attribute.
+#define ATTR(X) \
const X##Attr *Transform##X##Attr(const X##Attr *R) { return R; }
#include "clang/Basic/AttrList.inc"
+ // Transform the specified attribute.
+ //
+ // Subclasses should override the transformation of attributes to do
+ // transformation and checking of statement attributes. By default, this
+ // delegates to the non-statement taking version.
+ //
+ // \returns the transformed attribute.
+#define ATTR(X) \
+ const X##Attr *TransformStmt##X##Attr(const Stmt *, const Stmt *, \
+ const X##Attr *A) { \
+ return getDerived().Transform##X##Attr(A); \
+ }
+#include "clang/Basic/AttrList.inc"
+
/// Transform the given expression.
///
/// By default, this routine transforms an expression by delegating to the
@@ -522,12 +544,13 @@ public:
///
/// By default, transforms the types of conversion function, constructor,
/// and destructor names and then (if needed) rebuilds the declaration name.
- /// Identifiers and selectors are returned unmodified. Sublcasses may
+ /// Identifiers and selectors are returned unmodified. Subclasses may
/// override this function to provide alternate behavior.
DeclarationNameInfo
TransformDeclarationNameInfo(const DeclarationNameInfo &NameInfo);
- bool TransformRequiresExprRequirements(ArrayRef<concepts::Requirement *> Reqs,
+ bool TransformRequiresExprRequirements(
+ ArrayRef<concepts::Requirement *> Reqs,
llvm::SmallVectorImpl<concepts::Requirement *> &Transformed);
concepts::TypeRequirement *
TransformTypeRequirement(concepts::TypeRequirement *Req);
@@ -636,6 +659,14 @@ public:
QualType Transform##CLASS##Type(TypeLocBuilder &TLB, CLASS##TypeLoc T);
#include "clang/AST/TypeLocNodes.def"
+ QualType TransformTemplateTypeParmType(TypeLocBuilder &TLB,
+ TemplateTypeParmTypeLoc TL,
+ bool SuppressObjCLifetime);
+ QualType
+ TransformSubstTemplateTypeParmPackType(TypeLocBuilder &TLB,
+ SubstTemplateTypeParmPackTypeLoc TL,
+ bool SuppressObjCLifetime);
+
template<typename Fn>
QualType TransformFunctionProtoType(TypeLocBuilder &TLB,
FunctionProtoTypeLoc TL,
@@ -643,6 +674,10 @@ public:
Qualifiers ThisTypeQuals,
Fn TransformExceptionSpec);
+ template <typename Fn>
+ QualType TransformAttributedType(TypeLocBuilder &TLB, AttributedTypeLoc TL,
+ Fn TransformModifiedType);
+
bool TransformExceptionSpec(SourceLocation Loc,
FunctionProtoType::ExceptionSpecInfo &ESI,
SmallVectorImpl<QualType> &Exceptions,
@@ -671,13 +706,49 @@ public:
/// The result vectors should be kept in sync; null entries in the
/// variables vector are acceptable.
///
+ /// LastParamTransformed, if non-null, will be set to the index of the last
+ /// parameter on which transfromation was started. In the event of an error,
+ /// this will contain the parameter which failed to instantiate.
+ ///
/// Return true on error.
bool TransformFunctionTypeParams(
SourceLocation Loc, ArrayRef<ParmVarDecl *> Params,
const QualType *ParamTypes,
const FunctionProtoType::ExtParameterInfo *ParamInfos,
SmallVectorImpl<QualType> &PTypes, SmallVectorImpl<ParmVarDecl *> *PVars,
- Sema::ExtParameterInfoBuilder &PInfos);
+ Sema::ExtParameterInfoBuilder &PInfos, unsigned *LastParamTransformed);
+
+ bool TransformFunctionTypeParams(
+ SourceLocation Loc, ArrayRef<ParmVarDecl *> Params,
+ const QualType *ParamTypes,
+ const FunctionProtoType::ExtParameterInfo *ParamInfos,
+ SmallVectorImpl<QualType> &PTypes, SmallVectorImpl<ParmVarDecl *> *PVars,
+ Sema::ExtParameterInfoBuilder &PInfos) {
+ return getDerived().TransformFunctionTypeParams(
+ Loc, Params, ParamTypes, ParamInfos, PTypes, PVars, PInfos, nullptr);
+ }
+
+ /// Transforms the parameters of a requires expresison into the given vectors.
+ ///
+ /// The result vectors should be kept in sync; null entries in the
+ /// variables vector are acceptable.
+ ///
+ /// Returns an unset ExprResult on success. Returns an ExprResult the 'not
+ /// satisfied' RequiresExpr if subsitution failed, OR an ExprError, both of
+ /// which are cases where transformation shouldn't continue.
+ ExprResult TransformRequiresTypeParams(
+ SourceLocation KWLoc, SourceLocation RBraceLoc, const RequiresExpr *RE,
+ RequiresExprBodyDecl *Body, ArrayRef<ParmVarDecl *> Params,
+ SmallVectorImpl<QualType> &PTypes,
+ SmallVectorImpl<ParmVarDecl *> &TransParams,
+ Sema::ExtParameterInfoBuilder &PInfos) {
+ if (getDerived().TransformFunctionTypeParams(
+ KWLoc, Params, /*ParamTypes=*/nullptr,
+ /*ParamInfos=*/nullptr, PTypes, &TransParams, PInfos))
+ return ExprError();
+
+ return ExprResult{};
+ }
/// Transforms a single function-type parameter. Return null
/// on error.
@@ -686,7 +757,7 @@ public:
/// scope index; can be negative
ParmVarDecl *TransformFunctionTypeParam(ParmVarDecl *OldParm,
int indexAdjustment,
- Optional<unsigned> NumExpansions,
+ std::optional<unsigned> NumExpansions,
bool ExpectParameterPack);
/// Transform the body of a lambda-expression.
@@ -811,12 +882,9 @@ public:
/// By default, performs semantic analysis when building the array type.
/// Subclasses may override this routine to provide different behavior.
/// Also by default, all of the other Rebuild*Array
- QualType RebuildArrayType(QualType ElementType,
- ArrayType::ArraySizeModifier SizeMod,
- const llvm::APInt *Size,
- Expr *SizeExpr,
- unsigned IndexTypeQuals,
- SourceRange BracketsRange);
+ QualType RebuildArrayType(QualType ElementType, ArraySizeModifier SizeMod,
+ const llvm::APInt *Size, Expr *SizeExpr,
+ unsigned IndexTypeQuals, SourceRange BracketsRange);
/// Build a new constant array type given the element type, size
/// modifier, (known) size of the array, and index type qualifiers.
@@ -824,9 +892,8 @@ public:
/// By default, performs semantic analysis when building the array type.
/// Subclasses may override this routine to provide different behavior.
QualType RebuildConstantArrayType(QualType ElementType,
- ArrayType::ArraySizeModifier SizeMod,
- const llvm::APInt &Size,
- Expr *SizeExpr,
+ ArraySizeModifier SizeMod,
+ const llvm::APInt &Size, Expr *SizeExpr,
unsigned IndexTypeQuals,
SourceRange BracketsRange);
@@ -836,7 +903,7 @@ public:
/// By default, performs semantic analysis when building the array type.
/// Subclasses may override this routine to provide different behavior.
QualType RebuildIncompleteArrayType(QualType ElementType,
- ArrayType::ArraySizeModifier SizeMod,
+ ArraySizeModifier SizeMod,
unsigned IndexTypeQuals,
SourceRange BracketsRange);
@@ -846,8 +913,7 @@ public:
/// By default, performs semantic analysis when building the array type.
/// Subclasses may override this routine to provide different behavior.
QualType RebuildVariableArrayType(QualType ElementType,
- ArrayType::ArraySizeModifier SizeMod,
- Expr *SizeExpr,
+ ArraySizeModifier SizeMod, Expr *SizeExpr,
unsigned IndexTypeQuals,
SourceRange BracketsRange);
@@ -857,7 +923,7 @@ public:
/// By default, performs semantic analysis when building the array type.
/// Subclasses may override this routine to provide different behavior.
QualType RebuildDependentSizedArrayType(QualType ElementType,
- ArrayType::ArraySizeModifier SizeMod,
+ ArraySizeModifier SizeMod,
Expr *SizeExpr,
unsigned IndexTypeQuals,
SourceRange BracketsRange);
@@ -868,7 +934,7 @@ public:
/// By default, performs semantic analysis when building the vector type.
/// Subclasses may override this routine to provide different behavior.
QualType RebuildVectorType(QualType ElementType, unsigned NumElements,
- VectorType::VectorKind VecKind);
+ VectorKind VecKind);
/// Build a new potentially dependently-sized extended vector type
/// given the element type and number of elements.
@@ -876,8 +942,7 @@ public:
/// By default, performs semantic analysis when building the vector type.
/// Subclasses may override this routine to provide different behavior.
QualType RebuildDependentVectorType(QualType ElementType, Expr *SizeExpr,
- SourceLocation AttributeLoc,
- VectorType::VectorKind);
+ SourceLocation AttributeLoc, VectorKind);
/// Build a new extended vector type given the element type and
/// number of elements.
@@ -933,6 +998,11 @@ public:
/// the UnresolvedUsingTypenameDecl was transformed to.
QualType RebuildUnresolvedUsingType(SourceLocation NameLoc, Decl *D);
+ /// Build a new type found via an alias.
+ QualType RebuildUsingType(UsingShadowDecl *Found, QualType Underlying) {
+ return SemaRef.Context.getUsingType(Found, Underlying);
+ }
+
/// Build a new typedef type.
QualType RebuildTypedefType(TypedefNameDecl *Typedef) {
return SemaRef.Context.getTypeDeclType(Typedef);
@@ -958,12 +1028,13 @@ public:
///
/// By default, performs semantic analysis when building the typeof type.
/// Subclasses may override this routine to provide different behavior.
- QualType RebuildTypeOfExprType(Expr *Underlying, SourceLocation Loc);
+ QualType RebuildTypeOfExprType(Expr *Underlying, SourceLocation Loc,
+ TypeOfKind Kind);
/// Build a new typeof(type) type.
///
/// By default, builds a new TypeOfType with the given underlying type.
- QualType RebuildTypeOfType(QualType Underlying);
+ QualType RebuildTypeOfType(QualType Underlying, TypeOfKind Kind);
/// Build a new unary transform type.
QualType RebuildUnaryTransformType(QualType BaseType,
@@ -1056,23 +1127,18 @@ public:
// If it's still dependent, make a dependent specialization.
if (InstName.getAsDependentTemplateName())
- return SemaRef.Context.getDependentTemplateSpecializationType(Keyword,
- QualifierLoc.getNestedNameSpecifier(),
- Name,
- Args);
+ return SemaRef.Context.getDependentTemplateSpecializationType(
+ Keyword, QualifierLoc.getNestedNameSpecifier(), Name,
+ Args.arguments());
// Otherwise, make an elaborated type wrapping a non-dependent
// specialization.
QualType T =
- getDerived().RebuildTemplateSpecializationType(InstName, NameLoc, Args);
- if (T.isNull()) return QualType();
-
- if (Keyword == ETK_None && QualifierLoc.getNestedNameSpecifier() == nullptr)
- return T;
-
- return SemaRef.Context.getElaboratedType(Keyword,
- QualifierLoc.getNestedNameSpecifier(),
- T);
+ getDerived().RebuildTemplateSpecializationType(InstName, NameLoc, Args);
+ if (T.isNull())
+ return QualType();
+ return SemaRef.Context.getElaboratedType(
+ Keyword, QualifierLoc.getNestedNameSpecifier(), T);
}
/// Build a new typename type that refers to an identifier.
@@ -1097,7 +1163,8 @@ public:
Id);
}
- if (Keyword == ETK_None || Keyword == ETK_Typename) {
+ if (Keyword == ElaboratedTypeKeyword::None ||
+ Keyword == ElaboratedTypeKeyword::Typename) {
return SemaRef.CheckTypenameType(Keyword, KeywordLoc, QualifierLoc,
*Id, IdLoc, DeducedTSTContext);
}
@@ -1146,14 +1213,15 @@ public:
case LookupResult::FoundUnresolvedValue: {
NamedDecl *SomeDecl = Result.getRepresentativeDecl();
Sema::NonTagKind NTK = SemaRef.getNonTagTypeDeclKind(SomeDecl, Kind);
- SemaRef.Diag(IdLoc, diag::err_tag_reference_non_tag) << SomeDecl
- << NTK << Kind;
+ SemaRef.Diag(IdLoc, diag::err_tag_reference_non_tag)
+ << SomeDecl << NTK << llvm::to_underlying(Kind);
SemaRef.Diag(SomeDecl->getLocation(), diag::note_declared_at);
break;
}
default:
SemaRef.Diag(IdLoc, diag::err_not_tag_in_scope)
- << Kind << Id << DC << QualifierLoc.getSourceRange();
+ << llvm::to_underlying(Kind) << Id << DC
+ << QualifierLoc.getSourceRange();
break;
}
return QualType();
@@ -1177,10 +1245,9 @@ public:
///
/// By default, builds a new PackExpansionType type from the given pattern.
/// Subclasses may override this routine to provide different behavior.
- QualType RebuildPackExpansionType(QualType Pattern,
- SourceRange PatternRange,
+ QualType RebuildPackExpansionType(QualType Pattern, SourceRange PatternRange,
SourceLocation EllipsisLoc,
- Optional<unsigned> NumExpansions) {
+ std::optional<unsigned> NumExpansions) {
return getSema().CheckPackExpansion(Pattern, PatternRange, EllipsisLoc,
NumExpansions);
}
@@ -1195,12 +1262,12 @@ public:
QualType RebuildPipeType(QualType ValueType, SourceLocation KWLoc,
bool isReadPipe);
- /// Build an extended int given its value type.
- QualType RebuildExtIntType(bool IsUnsigned, unsigned NumBits,
+ /// Build a bit-precise int given its value type.
+ QualType RebuildBitIntType(bool IsUnsigned, unsigned NumBits,
SourceLocation Loc);
- /// Build a dependent extended int given its value type.
- QualType RebuildDependentExtIntType(bool IsUnsigned, Expr *NumBitsExpr,
+ /// Build a dependent bit-precise int given its value type.
+ QualType RebuildDependentBitIntType(bool IsUnsigned, Expr *NumBitsExpr,
SourceLocation Loc);
/// Build a new template name given a nested name specifier, a flag
@@ -1247,9 +1314,11 @@ public:
/// be resolved to a specific template, then builds the appropriate kind of
/// template name. Subclasses may override this routine to provide different
/// behavior.
- TemplateName RebuildTemplateName(TemplateTemplateParmDecl *Param,
- const TemplateArgument &ArgPack) {
- return getSema().Context.getSubstTemplateTemplateParmPack(Param, ArgPack);
+ TemplateName RebuildTemplateName(const TemplateArgument &ArgPack,
+ Decl *AssociatedDecl, unsigned Index,
+ bool Final) {
+ return getSema().Context.getSubstTemplateTemplateParmPack(
+ ArgPack, AssociatedDecl, Index, Final);
}
/// Build a new compound statement.
@@ -1313,6 +1382,8 @@ public:
StmtResult RebuildAttributedStmt(SourceLocation AttrLoc,
ArrayRef<const Attr *> Attrs,
Stmt *SubStmt) {
+ if (SemaRef.CheckRebuiltStmtAttributes(Attrs))
+ return StmtError();
return SemaRef.BuildAttributedStmt(AttrLoc, Attrs, SubStmt);
}
@@ -1320,12 +1391,12 @@ public:
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
- StmtResult RebuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
+ StmtResult RebuildIfStmt(SourceLocation IfLoc, IfStatementKind Kind,
SourceLocation LParenLoc, Sema::ConditionResult Cond,
SourceLocation RParenLoc, Stmt *Init, Stmt *Then,
SourceLocation ElseLoc, Stmt *Else) {
- return getSema().ActOnIfStmt(IfLoc, IsConstexpr, LParenLoc, Init, Cond,
- RParenLoc, Then, ElseLoc, Else);
+ return getSema().ActOnIfStmt(IfLoc, Kind, LParenLoc, Init, Cond, RParenLoc,
+ Then, ElseLoc, Else);
}
/// Start building a new switch statement.
@@ -1465,9 +1536,28 @@ public:
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
- ExprResult RebuildCoawaitExpr(SourceLocation CoawaitLoc, Expr *Result,
+ ExprResult RebuildCoawaitExpr(SourceLocation CoawaitLoc, Expr *Operand,
+ UnresolvedLookupExpr *OpCoawaitLookup,
bool IsImplicit) {
- return getSema().BuildResolvedCoawaitExpr(CoawaitLoc, Result, IsImplicit);
+ // This function rebuilds a coawait-expr given its operator.
+ // For an explicit coawait-expr, the rebuild involves the full set
+ // of transformations performed by BuildUnresolvedCoawaitExpr(),
+ // including calling await_transform().
+ // For an implicit coawait-expr, we need to rebuild the "operator
+ // coawait" but not await_transform(), so use BuildResolvedCoawaitExpr().
+ // This mirrors how the implicit CoawaitExpr is originally created
+ // in Sema::ActOnCoroutineBodyStart().
+ if (IsImplicit) {
+ ExprResult Suspend = getSema().BuildOperatorCoawaitCall(
+ CoawaitLoc, Operand, OpCoawaitLookup);
+ if (Suspend.isInvalid())
+ return ExprError();
+ return getSema().BuildResolvedCoawaitExpr(CoawaitLoc, Operand,
+ Suspend.get(), true);
+ }
+
+ return getSema().BuildUnresolvedCoawaitExpr(CoawaitLoc, Operand,
+ OpCoawaitLookup);
}
/// Build a new co_await expression.
@@ -1558,14 +1648,15 @@ public:
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
- StmtResult RebuildOMPExecutableDirective(OpenMPDirectiveKind Kind,
- DeclarationNameInfo DirName,
- OpenMPDirectiveKind CancelRegion,
- ArrayRef<OMPClause *> Clauses,
- Stmt *AStmt, SourceLocation StartLoc,
- SourceLocation EndLoc) {
+ StmtResult RebuildOMPExecutableDirective(
+ OpenMPDirectiveKind Kind, DeclarationNameInfo DirName,
+ OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc,
+ OpenMPDirectiveKind PrevMappedDirective = OMPD_unknown) {
+
return getSema().ActOnOpenMPExecutableDirective(
- Kind, DirName, CancelRegion, Clauses, AStmt, StartLoc, EndLoc);
+ Kind, DirName, CancelRegion, Clauses, AStmt, StartLoc, EndLoc,
+ PrevMappedDirective);
}
/// Build a new OpenMP 'if' clause.
@@ -1819,16 +1910,14 @@ public:
///
/// By default, performs semantic analysis to build the new OpenMP clause.
/// Subclasses may override this routine to provide different behavior.
- OMPClause *RebuildOMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- OpenMPLinearClauseKind Modifier,
- SourceLocation ModifierLoc,
- SourceLocation ColonLoc,
- SourceLocation EndLoc) {
+ OMPClause *RebuildOMPLinearClause(
+ ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc,
+ SourceLocation LParenLoc, OpenMPLinearClauseKind Modifier,
+ SourceLocation ModifierLoc, SourceLocation ColonLoc,
+ SourceLocation StepModifierLoc, SourceLocation EndLoc) {
return getSema().ActOnOpenMPLinearClause(VarList, Step, StartLoc, LParenLoc,
Modifier, ModifierLoc, ColonLoc,
- EndLoc);
+ StepModifierLoc, EndLoc);
}
/// Build a new OpenMP 'aligned' clause.
@@ -1895,14 +1984,13 @@ public:
///
/// By default, performs semantic analysis to build the new OpenMP clause.
/// Subclasses may override this routine to provide different behavior.
- OMPClause *
- RebuildOMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind,
- SourceLocation DepLoc, SourceLocation ColonLoc,
- ArrayRef<Expr *> VarList, SourceLocation StartLoc,
- SourceLocation LParenLoc, SourceLocation EndLoc) {
- return getSema().ActOnOpenMPDependClause(DepModifier, DepKind, DepLoc,
- ColonLoc, VarList, StartLoc,
- LParenLoc, EndLoc);
+ OMPClause *RebuildOMPDependClause(OMPDependClause::DependDataTy Data,
+ Expr *DepModifier, ArrayRef<Expr *> VarList,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPDependClause(Data, DepModifier, VarList,
+ StartLoc, LParenLoc, EndLoc);
}
/// Build a new OpenMP 'device' clause.
@@ -1923,16 +2011,17 @@ public:
/// By default, performs semantic analysis to build the new OpenMP clause.
/// Subclasses may override this routine to provide different behavior.
OMPClause *RebuildOMPMapClause(
- ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
+ Expr *IteratorModifier, ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc,
CXXScopeSpec MapperIdScopeSpec, DeclarationNameInfo MapperId,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers) {
- return getSema().ActOnOpenMPMapClause(MapTypeModifiers, MapTypeModifiersLoc,
- MapperIdScopeSpec, MapperId, MapType,
- IsMapTypeImplicit, MapLoc, ColonLoc,
- VarList, Locs, UnresolvedMappers);
+ return getSema().ActOnOpenMPMapClause(
+ IteratorModifier, MapTypeModifiers, MapTypeModifiersLoc,
+ MapperIdScopeSpec, MapperId, MapType, IsMapTypeImplicit, MapLoc,
+ ColonLoc, VarList, Locs,
+ /*NoDiagnose=*/false, UnresolvedMappers);
}
/// Build a new OpenMP 'allocate' clause.
@@ -1986,22 +2075,26 @@ public:
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
- OMPClause *RebuildOMPGrainsizeClause(Expr *Grainsize, SourceLocation StartLoc,
+ OMPClause *RebuildOMPGrainsizeClause(OpenMPGrainsizeClauseModifier Modifier,
+ Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
+ SourceLocation ModifierLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPGrainsizeClause(Grainsize, StartLoc, LParenLoc,
- EndLoc);
+ return getSema().ActOnOpenMPGrainsizeClause(Modifier, Device, StartLoc,
+ LParenLoc, ModifierLoc, EndLoc);
}
/// Build a new OpenMP 'num_tasks' clause.
///
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
- OMPClause *RebuildOMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
+ OMPClause *RebuildOMPNumTasksClause(OpenMPNumTasksClauseModifier Modifier,
+ Expr *NumTasks, SourceLocation StartLoc,
SourceLocation LParenLoc,
+ SourceLocation ModifierLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPNumTasksClause(NumTasks, StartLoc, LParenLoc,
- EndLoc);
+ return getSema().ActOnOpenMPNumTasksClause(Modifier, NumTasks, StartLoc,
+ LParenLoc, ModifierLoc, EndLoc);
}
/// Build a new OpenMP 'hint' clause.
@@ -2096,6 +2189,15 @@ public:
return getSema().ActOnOpenMPIsDevicePtrClause(VarList, Locs);
}
+ /// Build a new OpenMP 'has_device_addr' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPHasDeviceAddrClause(ArrayRef<Expr *> VarList,
+ const OMPVarListLocTy &Locs) {
+ return getSema().ActOnOpenMPHasDeviceAddrClause(VarList, Locs);
+ }
+
/// Build a new OpenMP 'defaultmap' clause.
///
/// By default, performs semantic analysis to build the new OpenMP clause.
@@ -2175,28 +2277,25 @@ public:
///
/// By default, performs semantic analysis to build the new OpenMP clause.
/// Subclasses may override this routine to provide different behavior.
- OMPClause *RebuildOMPOrderClause(OpenMPOrderClauseKind Kind,
- SourceLocation KindKwLoc,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc) {
- return getSema().ActOnOpenMPOrderClause(Kind, KindKwLoc, StartLoc,
- LParenLoc, EndLoc);
+ OMPClause *RebuildOMPOrderClause(
+ OpenMPOrderClauseKind Kind, SourceLocation KindKwLoc,
+ SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc,
+ OpenMPOrderClauseModifier Modifier, SourceLocation ModifierKwLoc) {
+ return getSema().ActOnOpenMPOrderClause(Modifier, Kind, StartLoc, LParenLoc,
+ ModifierKwLoc, KindKwLoc, EndLoc);
}
/// Build a new OpenMP 'init' clause.
///
/// By default, performs semantic analysis to build the new OpenMP clause.
/// Subclasses may override this routine to provide different behavior.
- OMPClause *RebuildOMPInitClause(Expr *InteropVar, ArrayRef<Expr *> PrefExprs,
- bool IsTarget, bool IsTargetSync,
+ OMPClause *RebuildOMPInitClause(Expr *InteropVar, OMPInteropInfo &InteropInfo,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation VarLoc,
SourceLocation EndLoc) {
- return getSema().ActOnOpenMPInitClause(InteropVar, PrefExprs, IsTarget,
- IsTargetSync, StartLoc, LParenLoc,
- VarLoc, EndLoc);
+ return getSema().ActOnOpenMPInitClause(InteropVar, InteropInfo, StartLoc,
+ LParenLoc, VarLoc, EndLoc);
}
/// Build a new OpenMP 'use' clause.
@@ -2256,6 +2355,109 @@ public:
EndLoc);
}
+ /// Build a new OpenMP 'bind' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPBindClause(OpenMPBindClauseKind Kind,
+ SourceLocation KindLoc,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPBindClause(Kind, KindLoc, StartLoc, LParenLoc,
+ EndLoc);
+ }
+
+ /// Build a new OpenMP 'ompx_dyn_cgroup_mem' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPXDynCGroupMemClause(Expr *Size, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPXDynCGroupMemClause(Size, StartLoc, LParenLoc,
+ EndLoc);
+ }
+
+ /// Build a new OpenMP 'ompx_attribute' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPXAttributeClause(ArrayRef<const Attr *> Attrs,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPXAttributeClause(Attrs, StartLoc, LParenLoc,
+ EndLoc);
+ }
+
+ /// Build a new OpenMP 'ompx_bare' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPXBareClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPXBareClause(StartLoc, EndLoc);
+ }
+
+ /// Build a new OpenMP 'align' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPAlignClause(Expr *A, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPAlignClause(A, StartLoc, LParenLoc, EndLoc);
+ }
+
+ /// Build a new OpenMP 'at' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPAtClause(OpenMPAtClauseKind Kind, SourceLocation KwLoc,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPAtClause(Kind, KwLoc, StartLoc, LParenLoc,
+ EndLoc);
+ }
+
+ /// Build a new OpenMP 'severity' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPSeverityClause(OpenMPSeverityClauseKind Kind,
+ SourceLocation KwLoc,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPSeverityClause(Kind, KwLoc, StartLoc, LParenLoc,
+ EndLoc);
+ }
+
+ /// Build a new OpenMP 'message' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPMessageClause(Expr *MS, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPMessageClause(MS, StartLoc, LParenLoc, EndLoc);
+ }
+
+ /// Build a new OpenMP 'doacross' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *
+ RebuildOMPDoacrossClause(OpenMPDoacrossClauseModifier DepType,
+ SourceLocation DepLoc, SourceLocation ColonLoc,
+ ArrayRef<Expr *> VarList, SourceLocation StartLoc,
+ SourceLocation LParenLoc, SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPDoacrossClause(
+ DepType, DepLoc, ColonLoc, VarList, StartLoc, LParenLoc, EndLoc);
+ }
+
/// Rebuild the operand to an Objective-C \@synchronized statement.
///
/// By default, performs semantic analysis to build the new statement.
@@ -2424,8 +2626,7 @@ public:
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
- ExprResult RebuildPredefinedExpr(SourceLocation Loc,
- PredefinedExpr::IdentKind IK) {
+ ExprResult RebuildPredefinedExpr(SourceLocation Loc, PredefinedIdentKind IK) {
return getSema().BuildPredefinedExpr(Loc, IK);
}
@@ -2601,6 +2802,13 @@ public:
/*Scope=*/nullptr, Callee, LParenLoc, Args, RParenLoc, ExecConfig);
}
+ ExprResult RebuildCxxSubscriptExpr(Expr *Callee, SourceLocation LParenLoc,
+ MultiExprArg Args,
+ SourceLocation RParenLoc) {
+ return getSema().ActOnArraySubscriptExpr(
+ /*Scope=*/nullptr, Callee, LParenLoc, Args, RParenLoc);
+ }
+
/// Build a new member access expression.
///
/// By default, performs semantic analysis to build the new expression.
@@ -2652,6 +2860,21 @@ public:
R.addDecl(FoundDecl);
R.resolveKind();
+ if (getSema().isUnevaluatedContext() && Base->isImplicitCXXThis() &&
+ isa<FieldDecl, IndirectFieldDecl, MSPropertyDecl>(Member)) {
+ if (auto *ThisClass = cast<CXXThisExpr>(Base)
+ ->getType()
+ ->getPointeeType()
+ ->getAsCXXRecordDecl()) {
+ auto *Class = cast<CXXRecordDecl>(Member->getDeclContext());
+ // In unevaluated contexts, an expression supposed to be a member access
+ // might reference a member in an unrelated class.
+ if (!ThisClass->Equals(Class) && !ThisClass->isDerivedFrom(Class))
+ return getSema().BuildDeclRefExpr(Member, Member->getType(),
+ VK_LValue, Member->getLocation());
+ }
+ }
+
return getSema().BuildMemberReferenceExpr(Base, BaseType, OpLoc, isArrow,
SS, TemplateKWLoc,
FirstQualifierInScope,
@@ -2721,20 +2944,18 @@ public:
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
- ExprResult RebuildExtVectorElementExpr(Expr *Base,
- SourceLocation OpLoc,
- SourceLocation AccessorLoc,
- IdentifierInfo &Accessor) {
+ ExprResult RebuildExtVectorElementExpr(Expr *Base, SourceLocation OpLoc,
+ bool IsArrow,
+ SourceLocation AccessorLoc,
+ IdentifierInfo &Accessor) {
CXXScopeSpec SS;
DeclarationNameInfo NameInfo(&Accessor, AccessorLoc);
- return getSema().BuildMemberReferenceExpr(Base, Base->getType(),
- OpLoc, /*IsArrow*/ false,
- SS, SourceLocation(),
- /*FirstQualifierInScope*/ nullptr,
- NameInfo,
- /* TemplateArgs */ nullptr,
- /*S*/ nullptr);
+ return getSema().BuildMemberReferenceExpr(
+ Base, Base->getType(), OpLoc, IsArrow, SS, SourceLocation(),
+ /*FirstQualifierInScope*/ nullptr, NameInfo,
+ /* TemplateArgs */ nullptr,
+ /*S*/ nullptr);
}
/// Build a new initializer list expression.
@@ -2828,7 +3049,7 @@ public:
RParenLoc);
}
- /// Build a new generic selection expression.
+ /// Build a new generic selection expression with an expression predicate.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
@@ -2839,9 +3060,25 @@ public:
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs) {
return getSema().CreateGenericSelectionExpr(KeyLoc, DefaultLoc, RParenLoc,
+ /*PredicateIsExpr=*/true,
ControllingExpr, Types, Exprs);
}
+ /// Build a new generic selection expression with a type predicate.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildGenericSelectionExpr(SourceLocation KeyLoc,
+ SourceLocation DefaultLoc,
+ SourceLocation RParenLoc,
+ TypeSourceInfo *ControllingType,
+ ArrayRef<TypeSourceInfo *> Types,
+ ArrayRef<Expr *> Exprs) {
+ return getSema().CreateGenericSelectionExpr(KeyLoc, DefaultLoc, RParenLoc,
+ /*PredicateIsExpr=*/false,
+ ControllingType, Types, Exprs);
+ }
+
/// Build a new overloaded operator call expression.
///
/// By default, performs semantic analysis to build the new expression.
@@ -2851,10 +3088,11 @@ public:
/// argument-dependent lookup, etc. Subclasses may override this routine to
/// provide different behavior.
ExprResult RebuildCXXOperatorCallExpr(OverloadedOperatorKind Op,
- SourceLocation OpLoc,
- Expr *Callee,
- Expr *First,
- Expr *Second);
+ SourceLocation OpLoc,
+ SourceLocation CalleeLoc,
+ bool RequiresADL,
+ const UnresolvedSetImpl &Functions,
+ Expr *First, Expr *Second);
/// Build a new C++ "named" cast expression, such as static_cast or
/// reinterpret_cast.
@@ -2988,6 +3226,13 @@ public:
Expr *Sub,
SourceLocation RParenLoc,
bool ListInitialization) {
+ // If Sub is a ParenListExpr, then Sub is the syntatic form of a
+ // CXXParenListInitExpr. Pass its expanded arguments so that the
+ // CXXParenListInitExpr can be rebuilt.
+ if (auto *PLE = dyn_cast<ParenListExpr>(Sub))
+ return getSema().BuildCXXTypeConstructExpr(
+ TInfo, LParenLoc, MultiExprArg(PLE->getExprs(), PLE->getNumExprs()),
+ RParenLoc, ListInitialization);
return getSema().BuildCXXTypeConstructExpr(TInfo, LParenLoc,
MultiExprArg(&Sub, 1), RParenLoc,
ListInitialization);
@@ -3072,9 +3317,10 @@ public:
/// By default, builds a new default-argument expression, which does not
/// require any semantic analysis. Subclasses may override this routine to
/// provide different behavior.
- ExprResult RebuildCXXDefaultArgExpr(SourceLocation Loc, ParmVarDecl *Param) {
+ ExprResult RebuildCXXDefaultArgExpr(SourceLocation Loc, ParmVarDecl *Param,
+ Expr *RewrittenExpr) {
return CXXDefaultArgExpr::Create(getSema().Context, Loc, Param,
- getSema().CurContext);
+ RewrittenExpr, getSema().CurContext);
}
/// Build a new C++11 default-initialization expression.
@@ -3084,8 +3330,7 @@ public:
/// routine to provide different behavior.
ExprResult RebuildCXXDefaultInitExpr(SourceLocation Loc,
FieldDecl *Field) {
- return CXXDefaultInitExpr::Create(getSema().Context, Loc, Field,
- getSema().CurContext);
+ return getSema().BuildCXXDefaultInitExpr(Loc, Field);
}
/// Build a new C++ zero-initialization expression.
@@ -3095,25 +3340,23 @@ public:
ExprResult RebuildCXXScalarValueInitExpr(TypeSourceInfo *TSInfo,
SourceLocation LParenLoc,
SourceLocation RParenLoc) {
- return getSema().BuildCXXTypeConstructExpr(
- TSInfo, LParenLoc, None, RParenLoc, /*ListInitialization=*/false);
+ return getSema().BuildCXXTypeConstructExpr(TSInfo, LParenLoc, std::nullopt,
+ RParenLoc,
+ /*ListInitialization=*/false);
}
/// Build a new C++ "new" expression.
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
- ExprResult RebuildCXXNewExpr(SourceLocation StartLoc,
- bool UseGlobal,
+ ExprResult RebuildCXXNewExpr(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
- SourceRange TypeIdParens,
- QualType AllocatedType,
+ SourceRange TypeIdParens, QualType AllocatedType,
TypeSourceInfo *AllocatedTypeInfo,
- Optional<Expr *> ArraySize,
- SourceRange DirectInitRange,
- Expr *Initializer) {
+ std::optional<Expr *> ArraySize,
+ SourceRange DirectInitRange, Expr *Initializer) {
return getSema().BuildCXXNew(StartLoc, UseGlobal,
PlacementLParen,
PlacementArgs,
@@ -3212,17 +3455,12 @@ public:
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
- ExprResult RebuildCXXConstructExpr(QualType T,
- SourceLocation Loc,
- CXXConstructorDecl *Constructor,
- bool IsElidable,
- MultiExprArg Args,
- bool HadMultipleCandidates,
- bool ListInitialization,
- bool StdInitListInitialization,
- bool RequiresZeroInit,
- CXXConstructExpr::ConstructionKind ConstructKind,
- SourceRange ParenRange) {
+ ExprResult RebuildCXXConstructExpr(
+ QualType T, SourceLocation Loc, CXXConstructorDecl *Constructor,
+ bool IsElidable, MultiExprArg Args, bool HadMultipleCandidates,
+ bool ListInitialization, bool StdInitListInitialization,
+ bool RequiresZeroInit, CXXConstructionKind ConstructKind,
+ SourceRange ParenRange) {
// Reconstruct the constructor we originally found, which might be
// different if this is a call to an inherited constructor.
CXXConstructorDecl *FoundCtor = Constructor;
@@ -3335,11 +3573,10 @@ public:
}
/// Build a new expression to compute the length of a parameter pack.
- ExprResult RebuildSizeOfPackExpr(SourceLocation OperatorLoc,
- NamedDecl *Pack,
+ ExprResult RebuildSizeOfPackExpr(SourceLocation OperatorLoc, NamedDecl *Pack,
SourceLocation PackLoc,
SourceLocation RParenLoc,
- Optional<unsigned> Length,
+ std::optional<unsigned> Length,
ArrayRef<TemplateArgument> PartialArgs) {
return SizeOfPackExpr::Create(SemaRef.Context, OperatorLoc, Pack, PackLoc,
RParenLoc, Length, PartialArgs);
@@ -3350,11 +3587,12 @@ public:
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
- ExprResult RebuildSourceLocExpr(SourceLocExpr::IdentKind Kind,
+ ExprResult RebuildSourceLocExpr(SourceLocIdentKind Kind, QualType ResultTy,
SourceLocation BuiltinLoc,
SourceLocation RPLoc,
DeclContext *ParentContext) {
- return getSema().BuildSourceLocExpr(Kind, BuiltinLoc, RPLoc, ParentContext);
+ return getSema().BuildSourceLocExpr(Kind, ResultTy, BuiltinLoc, RPLoc,
+ ParentContext);
}
/// Build a new Objective-C boxed expression.
@@ -3382,11 +3620,14 @@ public:
/// Subclasses may override this routine to provide different behavior.
ExprResult RebuildRequiresExpr(SourceLocation RequiresKWLoc,
RequiresExprBodyDecl *Body,
+ SourceLocation LParenLoc,
ArrayRef<ParmVarDecl *> LocalParameters,
+ SourceLocation RParenLoc,
ArrayRef<concepts::Requirement *> Requirements,
SourceLocation ClosingBraceLoc) {
- return RequiresExpr::Create(SemaRef.Context, RequiresKWLoc, Body,
- LocalParameters, Requirements, ClosingBraceLoc);
+ return RequiresExpr::Create(SemaRef.Context, RequiresKWLoc, Body, LParenLoc,
+ LocalParameters, RParenLoc, Requirements,
+ ClosingBraceLoc);
}
concepts::TypeRequirement *
@@ -3416,9 +3657,10 @@ public:
}
concepts::NestedRequirement *
- RebuildNestedRequirement(
- concepts::Requirement::SubstitutionDiagnostic *SubstDiag) {
- return SemaRef.BuildNestedRequirement(SubstDiag);
+ RebuildNestedRequirement(StringRef InvalidConstraintEntity,
+ const ASTConstraintSatisfaction &Satisfaction) {
+ return SemaRef.BuildNestedRequirement(InvalidConstraintEntity,
+ Satisfaction);
}
concepts::NestedRequirement *RebuildNestedRequirement(Expr *Constraint) {
@@ -3641,9 +3883,9 @@ public:
/// By default, performs semantic analysis to build a new pack expansion
/// for a template argument. Subclasses may override this routine to provide
/// different behavior.
- TemplateArgumentLoc RebuildPackExpansion(TemplateArgumentLoc Pattern,
- SourceLocation EllipsisLoc,
- Optional<unsigned> NumExpansions) {
+ TemplateArgumentLoc
+ RebuildPackExpansion(TemplateArgumentLoc Pattern, SourceLocation EllipsisLoc,
+ std::optional<unsigned> NumExpansions) {
switch (Pattern.getArgument().getKind()) {
case TemplateArgument::Expression: {
ExprResult Result
@@ -3666,6 +3908,7 @@ public:
case TemplateArgument::Null:
case TemplateArgument::Integral:
case TemplateArgument::Declaration:
+ case TemplateArgument::StructuralValue:
case TemplateArgument::Pack:
case TemplateArgument::TemplateExpansion:
case TemplateArgument::NullPtr:
@@ -3690,7 +3933,7 @@ public:
/// for an expression. Subclasses may override this routine to provide
/// different behavior.
ExprResult RebuildPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
- Optional<unsigned> NumExpansions) {
+ std::optional<unsigned> NumExpansions) {
return getSema().CheckPackExpansion(Pattern, EllipsisLoc, NumExpansions);
}
@@ -3703,7 +3946,7 @@ public:
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc,
- Optional<unsigned> NumExpansions) {
+ std::optional<unsigned> NumExpansions) {
return getSema().BuildCXXFoldExpr(ULE, LParenLoc, LHS, Operator,
EllipsisLoc, RHS, RParenLoc,
NumExpansions);
@@ -3840,8 +4083,10 @@ ExprResult TreeTransform<Derived>::TransformInitializer(Expr *Init,
if (auto *FE = dyn_cast<FullExpr>(Init))
Init = FE->getSubExpr();
- if (auto *AIL = dyn_cast<ArrayInitLoopExpr>(Init))
- Init = AIL->getCommonExpr();
+ if (auto *AIL = dyn_cast<ArrayInitLoopExpr>(Init)) {
+ OpaqueValueExpr *OVE = AIL->getCommonExpr();
+ Init = OVE->getSourceExpr();
+ }
if (MaterializeTemporaryExpr *MTE = dyn_cast<MaterializeTemporaryExpr>(Init))
Init = MTE->getSubExpr();
@@ -3866,13 +4111,13 @@ ExprResult TreeTransform<Derived>::TransformInitializer(Expr *Init,
// Revert value-initialization back to empty parens.
if (CXXScalarValueInitExpr *VIE = dyn_cast<CXXScalarValueInitExpr>(Init)) {
SourceRange Parens = VIE->getSourceRange();
- return getDerived().RebuildParenListExpr(Parens.getBegin(), None,
+ return getDerived().RebuildParenListExpr(Parens.getBegin(), std::nullopt,
Parens.getEnd());
}
// FIXME: We shouldn't build ImplicitValueInitExprs for direct-initialization.
if (isa<ImplicitValueInitExpr>(Init))
- return getDerived().RebuildParenListExpr(SourceLocation(), None,
+ return getDerived().RebuildParenListExpr(SourceLocation(), std::nullopt,
SourceLocation());
// Revert initialization by constructor back to a parenthesized or braced list
@@ -3940,8 +4185,8 @@ bool TreeTransform<Derived>::TransformExprs(Expr *const *Inputs,
// be expanded.
bool Expand = true;
bool RetainExpansion = false;
- Optional<unsigned> OrigNumExpansions = Expansion->getNumExpansions();
- Optional<unsigned> NumExpansions = OrigNumExpansions;
+ std::optional<unsigned> OrigNumExpansions = Expansion->getNumExpansions();
+ std::optional<unsigned> NumExpansions = OrigNumExpansions;
if (getDerived().TryExpandParameterPacks(Expansion->getEllipsisLoc(),
Pattern->getSourceRange(),
Unexpanded,
@@ -4046,7 +4291,8 @@ Sema::ConditionResult TreeTransform<Derived>::TransformCondition(
if (CondExpr.isInvalid())
return Sema::ConditionError();
- return getSema().ActOnCondition(nullptr, Loc, CondExpr.get(), Kind);
+ return getSema().ActOnCondition(nullptr, Loc, CondExpr.get(), Kind,
+ /*MissingOK=*/true);
}
return Sema::ConditionResult();
@@ -4057,9 +4303,13 @@ NestedNameSpecifierLoc TreeTransform<Derived>::TransformNestedNameSpecifierLoc(
NestedNameSpecifierLoc NNS, QualType ObjectType,
NamedDecl *FirstQualifierInScope) {
SmallVector<NestedNameSpecifierLoc, 4> Qualifiers;
- for (NestedNameSpecifierLoc Qualifier = NNS; Qualifier;
- Qualifier = Qualifier.getPrefix())
- Qualifiers.push_back(Qualifier);
+
+ auto insertNNS = [&Qualifiers](NestedNameSpecifierLoc NNS) {
+ for (NestedNameSpecifierLoc Qualifier = NNS; Qualifier;
+ Qualifier = Qualifier.getPrefix())
+ Qualifiers.push_back(Qualifier);
+ };
+ insertNNS(NNS);
CXXScopeSpec SS;
while (!Qualifiers.empty()) {
@@ -4116,24 +4366,27 @@ NestedNameSpecifierLoc TreeTransform<Derived>::TransformNestedNameSpecifierLoc(
if (!TL)
return NestedNameSpecifierLoc();
- if (TL.getType()->isDependentType() || TL.getType()->isRecordType() ||
- (SemaRef.getLangOpts().CPlusPlus11 &&
- TL.getType()->isEnumeralType())) {
- assert(!TL.getType().hasLocalQualifiers() &&
- "Can't get cv-qualifiers here");
- if (TL.getType()->isEnumeralType())
+ QualType T = TL.getType();
+ if (T->isDependentType() || T->isRecordType() ||
+ (SemaRef.getLangOpts().CPlusPlus11 && T->isEnumeralType())) {
+ if (T->isEnumeralType())
SemaRef.Diag(TL.getBeginLoc(),
diag::warn_cxx98_compat_enum_nested_name_spec);
+
+ if (const auto ETL = TL.getAs<ElaboratedTypeLoc>()) {
+ SS.Adopt(ETL.getQualifierLoc());
+ TL = ETL.getNamedTypeLoc();
+ }
SS.Extend(SemaRef.Context, /*FIXME:*/ SourceLocation(), TL,
Q.getLocalEndLoc());
break;
}
// If the nested-name-specifier is an invalid type def, don't emit an
// error because a previous error should have already been emitted.
- TypedefTypeLoc TTL = TL.getAs<TypedefTypeLoc>();
+ TypedefTypeLoc TTL = TL.getAsAdjusted<TypedefTypeLoc>();
if (!TTL || !TTL.getTypedefNameDecl()->isInvalidDecl()) {
SemaRef.Diag(TL.getBeginLoc(), diag::err_nested_name_spec_non_tag)
- << TL.getType() << SS.getRange();
+ << T << SS.getRange();
}
return NestedNameSpecifierLoc();
}
@@ -4232,7 +4485,7 @@ TreeTransform<Derived>::TransformTemplateName(CXXScopeSpec &SS,
NamedDecl *FirstQualifierInScope,
bool AllowInjectedClassName) {
if (QualifiedTemplateName *QTN = Name.getAsQualifiedTemplateName()) {
- TemplateDecl *Template = QTN->getTemplateDecl();
+ TemplateDecl *Template = QTN->getUnderlyingTemplate().getAsTemplateDecl();
assert(Template && "qualified template name must refer to a template");
TemplateDecl *TransTemplate
@@ -4296,18 +4549,9 @@ TreeTransform<Derived>::TransformTemplateName(CXXScopeSpec &SS,
if (SubstTemplateTemplateParmPackStorage *SubstPack
= Name.getAsSubstTemplateTemplateParmPack()) {
- TemplateTemplateParmDecl *TransParam
- = cast_or_null<TemplateTemplateParmDecl>(
- getDerived().TransformDecl(NameLoc, SubstPack->getParameterPack()));
- if (!TransParam)
- return TemplateName();
-
- if (!getDerived().AlwaysRebuild() &&
- TransParam == SubstPack->getParameterPack())
- return Name;
-
- return getDerived().RebuildTemplateName(TransParam,
- SubstPack->getArgumentPack());
+ return getDerived().RebuildTemplateName(
+ SubstPack->getArgumentPack(), SubstPack->getAssociatedDecl(),
+ SubstPack->getIndex(), SubstPack->getFinal());
}
// These should be getting filtered out before they reach the AST.
@@ -4334,7 +4578,8 @@ bool TreeTransform<Derived>::TransformTemplateArgument(
case TemplateArgument::Integral:
case TemplateArgument::NullPtr:
- case TemplateArgument::Declaration: {
+ case TemplateArgument::Declaration:
+ case TemplateArgument::StructuralValue: {
// Transform a resolved template argument straight to a resolved template
// argument. We get here when substituting into an already-substituted
// template type argument during concept satisfaction checking.
@@ -4361,9 +4606,15 @@ bool TreeTransform<Derived>::TransformTemplateArgument(
else if (Arg.getKind() == TemplateArgument::NullPtr)
Output = TemplateArgumentLoc(TemplateArgument(NewT, /*IsNullPtr=*/true),
TemplateArgumentLocInfo());
- else
+ else if (Arg.getKind() == TemplateArgument::Declaration)
Output = TemplateArgumentLoc(TemplateArgument(NewD, NewT),
TemplateArgumentLocInfo());
+ else if (Arg.getKind() == TemplateArgument::StructuralValue)
+ Output = TemplateArgumentLoc(
+ TemplateArgument(getSema().Context, NewT, Arg.getAsStructuralValue()),
+ TemplateArgumentLocInfo());
+ else
+ llvm_unreachable("unexpected template argument kind");
return false;
}
@@ -4410,7 +4661,7 @@ bool TreeTransform<Derived>::TransformTemplateArgument(
getSema(),
Uneval ? Sema::ExpressionEvaluationContext::Unevaluated
: Sema::ExpressionEvaluationContext::ConstantEvaluated,
- /*LambdaContextDecl=*/nullptr, /*ExprContext=*/
+ Sema::ReuseLambdaContextDecl, /*ExprContext=*/
Sema::ExpressionEvaluationContextRecord::EK_TemplateArgument);
Expr *InputExpr = Input.getSourceExpression();
@@ -4521,7 +4772,7 @@ bool TreeTransform<Derived>::TransformTemplateArguments(
// We have a pack expansion, for which we will be substituting into
// the pattern.
SourceLocation Ellipsis;
- Optional<unsigned> OrigNumExpansions;
+ std::optional<unsigned> OrigNumExpansions;
TemplateArgumentLoc Pattern
= getSema().getTemplateArgumentPackExpansionPattern(
In, Ellipsis, OrigNumExpansions);
@@ -4534,7 +4785,7 @@ bool TreeTransform<Derived>::TransformTemplateArguments(
// be expanded.
bool Expand = true;
bool RetainExpansion = false;
- Optional<unsigned> NumExpansions = OrigNumExpansions;
+ std::optional<unsigned> NumExpansions = OrigNumExpansions;
if (getDerived().TryExpandParameterPacks(Ellipsis,
Pattern.getSourceRange(),
Unexpanded,
@@ -4721,7 +4972,20 @@ template<typename Derived>
QualType
TreeTransform<Derived>::TransformQualifiedType(TypeLocBuilder &TLB,
QualifiedTypeLoc T) {
- QualType Result = getDerived().TransformType(TLB, T.getUnqualifiedLoc());
+ QualType Result;
+ TypeLoc UnqualTL = T.getUnqualifiedLoc();
+ auto SuppressObjCLifetime =
+ T.getType().getLocalQualifiers().hasObjCLifetime();
+ if (auto TTP = UnqualTL.getAs<TemplateTypeParmTypeLoc>()) {
+ Result = getDerived().TransformTemplateTypeParmType(TLB, TTP,
+ SuppressObjCLifetime);
+ } else if (auto STTP = UnqualTL.getAs<SubstTemplateTypeParmPackTypeLoc>()) {
+ Result = getDerived().TransformSubstTemplateTypeParmPackType(
+ TLB, STTP, SuppressObjCLifetime);
+ } else {
+ Result = getDerived().TransformType(TLB, UnqualTL);
+ }
+
if (Result.isNull())
return QualType();
@@ -4745,8 +5009,8 @@ QualType TreeTransform<Derived>::RebuildQualifiedType(QualType T,
SourceLocation Loc = TL.getBeginLoc();
Qualifiers Quals = TL.getType().getLocalQualifiers();
- if (((T.getAddressSpace() != LangAS::Default &&
- Quals.getAddressSpace() != LangAS::Default)) &&
+ if ((T.getAddressSpace() != LangAS::Default &&
+ Quals.getAddressSpace() != LangAS::Default) &&
T.getAddressSpace() != Quals.getAddressSpace()) {
SemaRef.Diag(Loc, diag::err_address_space_mismatch_templ_inst)
<< TL.getType() << T;
@@ -4784,16 +5048,7 @@ QualType TreeTransform<Derived>::RebuildQualifiedType(QualType T,
// A lifetime qualifier applied to a substituted template parameter
// overrides the lifetime qualifier from the template argument.
const AutoType *AutoTy;
- if (const SubstTemplateTypeParmType *SubstTypeParam
- = dyn_cast<SubstTemplateTypeParmType>(T)) {
- QualType Replacement = SubstTypeParam->getReplacementType();
- Qualifiers Qs = Replacement.getQualifiers();
- Qs.removeObjCLifetime();
- Replacement = SemaRef.Context.getQualifiedType(
- Replacement.getUnqualifiedType(), Qs);
- T = SemaRef.Context.getSubstTemplateTypeParmType(
- SubstTypeParam->getReplacedParameter(), Replacement);
- } else if ((AutoTy = dyn_cast<AutoType>(T)) && AutoTy->isDeduced()) {
+ if ((AutoTy = dyn_cast<AutoType>(T)) && AutoTy->isDeduced()) {
// 'auto' types behave the same way as template parameters.
QualType Deduced = AutoTy->getDeducedType();
Qualifiers Qs = Deduced.getQualifiers();
@@ -5246,6 +5501,9 @@ TreeTransform<Derived>::TransformDependentSizedArrayType(TypeLocBuilder &TLB,
EnterExpressionEvaluationContext Unevaluated(
SemaRef, Sema::ExpressionEvaluationContext::ConstantEvaluated);
+ // If we have a VLA then it won't be a constant.
+ SemaRef.ExprEvalContexts.back().InConditionallyConstantEvaluateContext = true;
+
// Prefer the expression from the TypeLoc; the other may have been uniqued.
Expr *origSize = TL.getSizeExpr();
if (!origSize) origSize = T->getSizeExpr();
@@ -5538,8 +5796,8 @@ QualType TreeTransform<Derived>::TransformExtVectorType(TypeLocBuilder &TLB,
template <typename Derived>
ParmVarDecl *TreeTransform<Derived>::TransformFunctionTypeParam(
- ParmVarDecl *OldParm, int indexAdjustment, Optional<unsigned> NumExpansions,
- bool ExpectParameterPack) {
+ ParmVarDecl *OldParm, int indexAdjustment,
+ std::optional<unsigned> NumExpansions, bool ExpectParameterPack) {
TypeSourceInfo *OldDI = OldParm->getTypeSourceInfo();
TypeSourceInfo *NewDI = nullptr;
@@ -5599,15 +5857,18 @@ bool TreeTransform<Derived>::TransformFunctionTypeParams(
const FunctionProtoType::ExtParameterInfo *ParamInfos,
SmallVectorImpl<QualType> &OutParamTypes,
SmallVectorImpl<ParmVarDecl *> *PVars,
- Sema::ExtParameterInfoBuilder &PInfos) {
+ Sema::ExtParameterInfoBuilder &PInfos,
+ unsigned *LastParamTransformed) {
int indexAdjustment = 0;
unsigned NumParams = Params.size();
for (unsigned i = 0; i != NumParams; ++i) {
+ if (LastParamTransformed)
+ *LastParamTransformed = i;
if (ParmVarDecl *OldParm = Params[i]) {
assert(OldParm->getFunctionScopeIndex() == i);
- Optional<unsigned> NumExpansions;
+ std::optional<unsigned> NumExpansions;
ParmVarDecl *NewParm = nullptr;
if (OldParm->isParameterPack()) {
// We have a function parameter pack that may need to be expanded.
@@ -5622,7 +5883,7 @@ bool TreeTransform<Derived>::TransformFunctionTypeParams(
// Determine whether we should expand the parameter packs.
bool ShouldExpand = false;
bool RetainExpansion = false;
- Optional<unsigned> OrigNumExpansions;
+ std::optional<unsigned> OrigNumExpansions;
if (Unexpanded.size() > 0) {
OrigNumExpansions = ExpansionTL.getTypePtr()->getNumExpansions();
NumExpansions = OrigNumExpansions;
@@ -5705,7 +5966,8 @@ bool TreeTransform<Derived>::TransformFunctionTypeParams(
"transformation.");
} else {
NewParm = getDerived().TransformFunctionTypeParam(
- OldParm, indexAdjustment, None, /*ExpectParameterPack=*/ false);
+ OldParm, indexAdjustment, std::nullopt,
+ /*ExpectParameterPack=*/false);
}
if (!NewParm)
@@ -5721,9 +5983,10 @@ bool TreeTransform<Derived>::TransformFunctionTypeParams(
// Deal with the possibility that we don't have a parameter
// declaration for this parameter.
+ assert(ParamTypes);
QualType OldType = ParamTypes[i];
bool IsPackExpansion = false;
- Optional<unsigned> NumExpansions;
+ std::optional<unsigned> NumExpansions;
QualType NewType;
if (const PackExpansionType *Expansion
= dyn_cast<PackExpansionType>(OldType)) {
@@ -5753,8 +6016,8 @@ bool TreeTransform<Derived>::TransformFunctionTypeParams(
return true;
if (NewType->containsUnexpandedParameterPack()) {
- NewType =
- getSema().getASTContext().getPackExpansionType(NewType, None);
+ NewType = getSema().getASTContext().getPackExpansionType(
+ NewType, std::nullopt);
if (NewType.isNull())
return true;
@@ -5826,12 +6089,11 @@ QualType
TreeTransform<Derived>::TransformFunctionProtoType(TypeLocBuilder &TLB,
FunctionProtoTypeLoc TL) {
SmallVector<QualType, 4> ExceptionStorage;
- TreeTransform *This = this; // Work around gcc.gnu.org/PR56135.
return getDerived().TransformFunctionProtoType(
TLB, TL, nullptr, Qualifiers(),
[&](FunctionProtoType::ExceptionSpecInfo &ESI, bool &Changed) {
- return This->getDerived().TransformExceptionSpec(
- TL.getBeginLoc(), ESI, ExceptionStorage, Changed);
+ return getDerived().TransformExceptionSpec(TL.getBeginLoc(), ESI,
+ ExceptionStorage, Changed);
});
}
@@ -5899,8 +6161,8 @@ QualType TreeTransform<Derived>::TransformFunctionProtoType(
if (auto NewExtParamInfos =
ExtParamInfos.getPointerOrNull(ParamTypes.size())) {
if (!EPI.ExtParameterInfos ||
- llvm::makeArrayRef(EPI.ExtParameterInfos, TL.getNumParams())
- != llvm::makeArrayRef(NewExtParamInfos, ParamTypes.size())) {
+ llvm::ArrayRef(EPI.ExtParameterInfos, TL.getNumParams()) !=
+ llvm::ArrayRef(NewExtParamInfos, ParamTypes.size())) {
EPIChanged = true;
}
EPI.ExtParameterInfos = NewExtParamInfos;
@@ -5911,7 +6173,7 @@ QualType TreeTransform<Derived>::TransformFunctionProtoType(
QualType Result = TL.getType();
if (getDerived().AlwaysRebuild() || ResultType != T->getReturnType() ||
- T->getParamTypes() != llvm::makeArrayRef(ParamTypes) || EPIChanged) {
+ T->getParamTypes() != llvm::ArrayRef(ParamTypes) || EPIChanged) {
Result = getDerived().RebuildFunctionProtoType(ResultType, ParamTypes, EPI);
if (Result.isNull())
return QualType();
@@ -5937,6 +6199,13 @@ bool TreeTransform<Derived>::TransformExceptionSpec(
// Instantiate a dynamic noexcept expression, if any.
if (isComputedNoexcept(ESI.Type)) {
+ // Update this scrope because ContextDecl in Sema will be used in
+ // TransformExpr.
+ auto *Method = dyn_cast_if_present<CXXMethodDecl>(ESI.SourceTemplate);
+ Sema::CXXThisScopeRAII ThisScope(
+ SemaRef, Method ? Method->getParent() : nullptr,
+ Method ? Method->getMethodQualifiers() : Qualifiers{},
+ Method != nullptr);
EnterExpressionEvaluationContext Unevaluated(
getSema(), Sema::ExpressionEvaluationContext::ConstantEvaluated);
ExprResult NoexceptExpr = getDerived().TransformExpr(ESI.NoexceptExpr);
@@ -5945,7 +6214,7 @@ bool TreeTransform<Derived>::TransformExceptionSpec(
ExceptionSpecificationType EST = ESI.Type;
NoexceptExpr =
- getSema().ActOnNoexceptSpec(Loc, NoexceptExpr.get(), EST);
+ getSema().ActOnNoexceptSpec(NoexceptExpr.get(), EST);
if (NoexceptExpr.isInvalid())
return true;
@@ -5975,7 +6244,7 @@ bool TreeTransform<Derived>::TransformExceptionSpec(
// be expanded.
bool Expand = false;
bool RetainExpansion = false;
- Optional<unsigned> NumExpansions = PackExpansion->getNumExpansions();
+ std::optional<unsigned> NumExpansions = PackExpansion->getNumExpansions();
// FIXME: Track the location of the ellipsis (and track source location
// information for the types in the exception specification in general).
if (getDerived().TryExpandParameterPacks(
@@ -6047,9 +6316,9 @@ QualType TreeTransform<Derived>::TransformFunctionNoProtoType(
return Result;
}
-template<typename Derived> QualType
-TreeTransform<Derived>::TransformUnresolvedUsingType(TypeLocBuilder &TLB,
- UnresolvedUsingTypeLoc TL) {
+template <typename Derived>
+QualType TreeTransform<Derived>::TransformUnresolvedUsingType(
+ TypeLocBuilder &TLB, UnresolvedUsingTypeLoc TL) {
const UnresolvedUsingType *T = TL.getTypePtr();
Decl *D = getDerived().TransformDecl(TL.getNameLoc(), T->getDecl());
if (!D)
@@ -6070,6 +6339,32 @@ TreeTransform<Derived>::TransformUnresolvedUsingType(TypeLocBuilder &TLB,
return Result;
}
+template <typename Derived>
+QualType TreeTransform<Derived>::TransformUsingType(TypeLocBuilder &TLB,
+ UsingTypeLoc TL) {
+ const UsingType *T = TL.getTypePtr();
+
+ auto *Found = cast_or_null<UsingShadowDecl>(getDerived().TransformDecl(
+ TL.getLocalSourceRange().getBegin(), T->getFoundDecl()));
+ if (!Found)
+ return QualType();
+
+ QualType Underlying = getDerived().TransformType(T->desugar());
+ if (Underlying.isNull())
+ return QualType();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() || Found != T->getFoundDecl() ||
+ Underlying != T->getUnderlyingType()) {
+ Result = getDerived().RebuildUsingType(Found, Underlying);
+ if (Result.isNull())
+ return QualType();
+ }
+
+ TLB.pushTypeSpec(Result).setNameLoc(TL.getNameLoc());
+ return Result;
+}
+
template<typename Derived>
QualType TreeTransform<Derived>::TransformTypedefType(TypeLocBuilder &TLB,
TypedefTypeLoc TL) {
@@ -6111,13 +6406,13 @@ QualType TreeTransform<Derived>::TransformTypeOfExprType(TypeLocBuilder &TLB,
return QualType();
QualType Result = TL.getType();
- if (getDerived().AlwaysRebuild() ||
- E.get() != TL.getUnderlyingExpr()) {
- Result = getDerived().RebuildTypeOfExprType(E.get(), TL.getTypeofLoc());
+ TypeOfKind Kind = Result->getAs<TypeOfExprType>()->getKind();
+ if (getDerived().AlwaysRebuild() || E.get() != TL.getUnderlyingExpr()) {
+ Result =
+ getDerived().RebuildTypeOfExprType(E.get(), TL.getTypeofLoc(), Kind);
if (Result.isNull())
return QualType();
}
- else E.get();
TypeOfExprTypeLoc NewTL = TLB.push<TypeOfExprTypeLoc>(Result);
NewTL.setTypeofLoc(TL.getTypeofLoc());
@@ -6130,14 +6425,15 @@ QualType TreeTransform<Derived>::TransformTypeOfExprType(TypeLocBuilder &TLB,
template<typename Derived>
QualType TreeTransform<Derived>::TransformTypeOfType(TypeLocBuilder &TLB,
TypeOfTypeLoc TL) {
- TypeSourceInfo* Old_Under_TI = TL.getUnderlyingTInfo();
+ TypeSourceInfo* Old_Under_TI = TL.getUnmodifiedTInfo();
TypeSourceInfo* New_Under_TI = getDerived().TransformType(Old_Under_TI);
if (!New_Under_TI)
return QualType();
QualType Result = TL.getType();
+ TypeOfKind Kind = Result->getAs<TypeOfType>()->getKind();
if (getDerived().AlwaysRebuild() || New_Under_TI != Old_Under_TI) {
- Result = getDerived().RebuildTypeOfType(New_Under_TI->getType());
+ Result = getDerived().RebuildTypeOfType(New_Under_TI->getType(), Kind);
if (Result.isNull())
return QualType();
}
@@ -6146,7 +6442,7 @@ QualType TreeTransform<Derived>::TransformTypeOfType(TypeLocBuilder &TLB,
NewTL.setTypeofLoc(TL.getTypeofLoc());
NewTL.setLParenLoc(TL.getLParenLoc());
NewTL.setRParenLoc(TL.getRParenLoc());
- NewTL.setUnderlyingTInfo(New_Under_TI);
+ NewTL.setUnmodifiedTInfo(New_Under_TI);
return Result;
}
@@ -6172,15 +6468,15 @@ QualType TreeTransform<Derived>::TransformDecltypeType(TypeLocBuilder &TLB,
QualType Result = TL.getType();
if (getDerived().AlwaysRebuild() ||
E.get() != T->getUnderlyingExpr()) {
- Result = getDerived().RebuildDecltypeType(E.get(), TL.getNameLoc());
+ Result = getDerived().RebuildDecltypeType(E.get(), TL.getDecltypeLoc());
if (Result.isNull())
return QualType();
}
else E.get();
DecltypeTypeLoc NewTL = TLB.push<DecltypeTypeLoc>(Result);
- NewTL.setNameLoc(TL.getNameLoc());
-
+ NewTL.setDecltypeLoc(TL.getDecltypeLoc());
+ NewTL.setRParenLoc(TL.getRParenLoc());
return Result;
}
@@ -6303,6 +6599,14 @@ template<typename Derived>
QualType TreeTransform<Derived>::TransformTemplateTypeParmType(
TypeLocBuilder &TLB,
TemplateTypeParmTypeLoc TL) {
+ return getDerived().TransformTemplateTypeParmType(
+ TLB, TL,
+ /*SuppressObjCLifetime=*/false);
+}
+
+template <typename Derived>
+QualType TreeTransform<Derived>::TransformTemplateTypeParmType(
+ TypeLocBuilder &TLB, TemplateTypeParmTypeLoc TL, bool) {
return TransformTypeSpecType(TLB, TL);
}
@@ -6312,6 +6616,9 @@ QualType TreeTransform<Derived>::TransformSubstTemplateTypeParmType(
SubstTemplateTypeParmTypeLoc TL) {
const SubstTemplateTypeParmType *T = TL.getTypePtr();
+ Decl *NewReplaced =
+ getDerived().TransformDecl(TL.getNameLoc(), T->getAssociatedDecl());
+
// Substitute into the replacement type, which itself might involve something
// that needs to be transformed. This only tends to occur with default
// template arguments of template template parameters.
@@ -6320,11 +6627,8 @@ QualType TreeTransform<Derived>::TransformSubstTemplateTypeParmType(
if (Replacement.isNull())
return QualType();
- // Always canonicalize the replacement type.
- Replacement = SemaRef.Context.getCanonicalType(Replacement);
- QualType Result
- = SemaRef.Context.getSubstTemplateTypeParmType(T->getReplacedParameter(),
- Replacement);
+ QualType Result = SemaRef.Context.getSubstTemplateTypeParmType(
+ Replacement, NewReplaced, T->getIndex(), T->getPackIndex());
// Propagate type-source information.
SubstTemplateTypeParmTypeLoc NewTL
@@ -6338,6 +6642,13 @@ template<typename Derived>
QualType TreeTransform<Derived>::TransformSubstTemplateTypeParmPackType(
TypeLocBuilder &TLB,
SubstTemplateTypeParmPackTypeLoc TL) {
+ return getDerived().TransformSubstTemplateTypeParmPackType(
+ TLB, TL, /*SuppressObjCLifetime=*/false);
+}
+
+template <typename Derived>
+QualType TreeTransform<Derived>::TransformSubstTemplateTypeParmPackType(
+ TypeLocBuilder &TLB, SubstTemplateTypeParmPackTypeLoc TL, bool) {
return TransformTypeSpecType(TLB, TL);
}
@@ -6405,27 +6716,27 @@ QualType TreeTransform<Derived>::TransformPipeType(TypeLocBuilder &TLB,
}
template <typename Derived>
-QualType TreeTransform<Derived>::TransformExtIntType(TypeLocBuilder &TLB,
- ExtIntTypeLoc TL) {
- const ExtIntType *EIT = TL.getTypePtr();
+QualType TreeTransform<Derived>::TransformBitIntType(TypeLocBuilder &TLB,
+ BitIntTypeLoc TL) {
+ const BitIntType *EIT = TL.getTypePtr();
QualType Result = TL.getType();
if (getDerived().AlwaysRebuild()) {
- Result = getDerived().RebuildExtIntType(EIT->isUnsigned(),
+ Result = getDerived().RebuildBitIntType(EIT->isUnsigned(),
EIT->getNumBits(), TL.getNameLoc());
if (Result.isNull())
return QualType();
}
- ExtIntTypeLoc NewTL = TLB.push<ExtIntTypeLoc>(Result);
+ BitIntTypeLoc NewTL = TLB.push<BitIntTypeLoc>(Result);
NewTL.setNameLoc(TL.getNameLoc());
return Result;
}
template <typename Derived>
-QualType TreeTransform<Derived>::TransformDependentExtIntType(
- TypeLocBuilder &TLB, DependentExtIntTypeLoc TL) {
- const DependentExtIntType *EIT = TL.getTypePtr();
+QualType TreeTransform<Derived>::TransformDependentBitIntType(
+ TypeLocBuilder &TLB, DependentBitIntTypeLoc TL) {
+ const DependentBitIntType *EIT = TL.getTypePtr();
EnterExpressionEvaluationContext Unevaluated(
SemaRef, Sema::ExpressionEvaluationContext::ConstantEvaluated);
@@ -6438,18 +6749,18 @@ QualType TreeTransform<Derived>::TransformDependentExtIntType(
QualType Result = TL.getType();
if (getDerived().AlwaysRebuild() || BitsExpr.get() != EIT->getNumBitsExpr()) {
- Result = getDerived().RebuildDependentExtIntType(
+ Result = getDerived().RebuildDependentBitIntType(
EIT->isUnsigned(), BitsExpr.get(), TL.getNameLoc());
if (Result.isNull())
return QualType();
}
- if (isa<DependentExtIntType>(Result)) {
- DependentExtIntTypeLoc NewTL = TLB.push<DependentExtIntTypeLoc>(Result);
+ if (isa<DependentBitIntType>(Result)) {
+ DependentBitIntTypeLoc NewTL = TLB.push<DependentBitIntTypeLoc>(Result);
NewTL.setNameLoc(TL.getNameLoc());
} else {
- ExtIntTypeLoc NewTL = TLB.push<ExtIntTypeLoc>(Result);
+ BitIntTypeLoc NewTL = TLB.push<BitIntTypeLoc>(Result);
NewTL.setNameLoc(TL.getNameLoc());
}
return Result;
@@ -6535,16 +6846,16 @@ QualType TreeTransform<Derived>::TransformAutoType(TypeLocBuilder &TLB,
TemplateArgumentListInfo NewTemplateArgs;
NestedNameSpecifierLoc NewNestedNameSpec;
if (T->isConstrained()) {
+ assert(TL.getConceptReference());
NewCD = cast_or_null<ConceptDecl>(getDerived().TransformDecl(
TL.getConceptNameLoc(), T->getTypeConstraintConcept()));
NewTemplateArgs.setLAngleLoc(TL.getLAngleLoc());
NewTemplateArgs.setRAngleLoc(TL.getRAngleLoc());
typedef TemplateArgumentLocContainerIterator<AutoTypeLoc> ArgIterator;
- if (getDerived().TransformTemplateArguments(ArgIterator(TL, 0),
- ArgIterator(TL,
- TL.getNumArgs()),
- NewTemplateArgs))
+ if (getDerived().TransformTemplateArguments(
+ ArgIterator(TL, 0), ArgIterator(TL, TL.getNumArgs()),
+ NewTemplateArgs))
return QualType();
if (TL.getNestedNameSpecifierLoc()) {
@@ -6561,7 +6872,7 @@ QualType TreeTransform<Derived>::TransformAutoType(TypeLocBuilder &TLB,
T->isDependentType() || T->isConstrained()) {
// FIXME: Maybe don't rebuild if all template arguments are the same.
llvm::SmallVector<TemplateArgument, 4> NewArgList;
- NewArgList.reserve(NewArgList.size());
+ NewArgList.reserve(NewTemplateArgs.size());
for (const auto &ArgLoc : NewTemplateArgs.arguments())
NewArgList.push_back(ArgLoc.getArgument());
Result = getDerived().RebuildAutoType(NewDeduced, T->getKeyword(), NewCD,
@@ -6572,14 +6883,20 @@ QualType TreeTransform<Derived>::TransformAutoType(TypeLocBuilder &TLB,
AutoTypeLoc NewTL = TLB.push<AutoTypeLoc>(Result);
NewTL.setNameLoc(TL.getNameLoc());
- NewTL.setNestedNameSpecifierLoc(NewNestedNameSpec);
- NewTL.setTemplateKWLoc(TL.getTemplateKWLoc());
- NewTL.setConceptNameLoc(TL.getConceptNameLoc());
- NewTL.setFoundDecl(TL.getFoundDecl());
- NewTL.setLAngleLoc(TL.getLAngleLoc());
- NewTL.setRAngleLoc(TL.getRAngleLoc());
- for (unsigned I = 0; I < NewTL.getNumArgs(); ++I)
- NewTL.setArgLocInfo(I, NewTemplateArgs.arguments()[I].getLocInfo());
+ NewTL.setRParenLoc(TL.getRParenLoc());
+ NewTL.setConceptReference(nullptr);
+
+ if (T->isConstrained()) {
+ DeclarationNameInfo DNI = DeclarationNameInfo(
+ TL.getTypePtr()->getTypeConstraintConcept()->getDeclName(),
+ TL.getConceptNameLoc(),
+ TL.getTypePtr()->getTypeConstraintConcept()->getDeclName());
+ auto *CR = ConceptReference::Create(
+ SemaRef.Context, NewNestedNameSpec, TL.getTemplateKWLoc(), DNI,
+ TL.getFoundDecl(), TL.getTypePtr()->getTypeConstraintConcept(),
+ ASTTemplateArgumentListInfo::Create(SemaRef.Context, NewTemplateArgs));
+ NewTL.setConceptReference(CR);
+ }
return Result;
}
@@ -6657,12 +6974,9 @@ QualType TreeTransform<Derived>::TransformDependentTemplateSpecializationType(
// FIXME: maybe don't rebuild if all the template arguments are the same.
if (DependentTemplateName *DTN = Template.getAsDependentTemplateName()) {
- QualType Result
- = getSema().Context.getDependentTemplateSpecializationType(
- TL.getTypePtr()->getKeyword(),
- DTN->getQualifier(),
- DTN->getIdentifier(),
- NewTemplateArgs);
+ QualType Result = getSema().Context.getDependentTemplateSpecializationType(
+ TL.getTypePtr()->getKeyword(), DTN->getQualifier(),
+ DTN->getIdentifier(), NewTemplateArgs.arguments());
DependentTemplateSpecializationTypeLoc NewTL
= TLB.push<DependentTemplateSpecializationTypeLoc>(Result);
@@ -6720,7 +7034,8 @@ TreeTransform<Derived>::TransformElaboratedType(TypeLocBuilder &TLB,
// If the identifier resolves to a typedef-name or the simple-template-id
// resolves to an alias template specialization, the
// elaborated-type-specifier is ill-formed.
- if (T->getKeyword() != ETK_None && T->getKeyword() != ETK_Typename) {
+ if (T->getKeyword() != ElaboratedTypeKeyword::None &&
+ T->getKeyword() != ElaboratedTypeKeyword::Typename) {
if (const TemplateSpecializationType *TST =
NamedT->getAs<TemplateSpecializationType>()) {
TemplateName Template = TST->getTemplateName();
@@ -6729,7 +7044,8 @@ TreeTransform<Derived>::TransformElaboratedType(TypeLocBuilder &TLB,
SemaRef.Diag(TL.getNamedTypeLoc().getBeginLoc(),
diag::err_tag_reference_non_tag)
<< TAT << Sema::NTK_TypeAliasTemplate
- << ElaboratedType::getTagTypeKindForKeyword(T->getKeyword());
+ << llvm::to_underlying(
+ ElaboratedType::getTagTypeKindForKeyword(T->getKeyword()));
SemaRef.Diag(TAT->getLocation(), diag::note_declared_at);
}
}
@@ -6752,12 +7068,12 @@ TreeTransform<Derived>::TransformElaboratedType(TypeLocBuilder &TLB,
return Result;
}
-template<typename Derived>
+template <typename Derived>
+template <typename Fn>
QualType TreeTransform<Derived>::TransformAttributedType(
- TypeLocBuilder &TLB,
- AttributedTypeLoc TL) {
+ TypeLocBuilder &TLB, AttributedTypeLoc TL, Fn TransformModifiedTypeFn) {
const AttributedType *oldType = TL.getTypePtr();
- QualType modifiedType = getDerived().TransformType(TLB, TL.getModifiedLoc());
+ QualType modifiedType = TransformModifiedTypeFn(TLB, TL.getModifiedLoc());
if (modifiedType.isNull())
return QualType();
@@ -6783,7 +7099,8 @@ QualType TreeTransform<Derived>::TransformAttributedType(
// type sugar, and therefore cannot be diagnosed in any other way.
if (auto nullability = oldType->getImmediateNullability()) {
if (!modifiedType->canHaveNullability()) {
- SemaRef.Diag(TL.getAttr()->getLocation(),
+ SemaRef.Diag((TL.getAttr() ? TL.getAttr()->getLocation()
+ : TL.getModifiedLoc().getBeginLoc()),
diag::err_nullability_nonpointer)
<< DiagNullabilityKind(*nullability, false) << modifiedType;
return QualType();
@@ -6800,6 +7117,22 @@ QualType TreeTransform<Derived>::TransformAttributedType(
return result;
}
+template <typename Derived>
+QualType TreeTransform<Derived>::TransformAttributedType(TypeLocBuilder &TLB,
+ AttributedTypeLoc TL) {
+ return getDerived().TransformAttributedType(
+ TLB, TL, [&](TypeLocBuilder &TLB, TypeLoc ModifiedLoc) -> QualType {
+ return getDerived().TransformType(TLB, ModifiedLoc);
+ });
+}
+
+template <typename Derived>
+QualType TreeTransform<Derived>::TransformBTFTagAttributedType(
+ TypeLocBuilder &TLB, BTFTagAttributedTypeLoc TL) {
+ // The BTFTagAttributedType is available for C only.
+ llvm_unreachable("Unexpected TreeTransform for BTFTagAttributedType");
+}
+
template<typename Derived>
QualType
TreeTransform<Derived>::TransformParenType(TypeLocBuilder &TLB,
@@ -7013,12 +7346,10 @@ TreeTransform<Derived>::TransformObjCTypeParamType(TypeLocBuilder &TLB,
QualType Result = TL.getType();
if (getDerived().AlwaysRebuild() ||
OTP != T->getDecl()) {
- Result = getDerived().RebuildObjCTypeParamType(OTP,
- TL.getProtocolLAngleLoc(),
- llvm::makeArrayRef(TL.getTypePtr()->qual_begin(),
- TL.getNumProtocols()),
- TL.getProtocolLocs(),
- TL.getProtocolRAngleLoc());
+ Result = getDerived().RebuildObjCTypeParamType(
+ OTP, TL.getProtocolLAngleLoc(),
+ llvm::ArrayRef(TL.getTypePtr()->qual_begin(), TL.getNumProtocols()),
+ TL.getProtocolLocs(), TL.getProtocolRAngleLoc());
if (Result.isNull())
return QualType();
}
@@ -7066,7 +7397,7 @@ TreeTransform<Derived>::TransformObjCObjectType(TypeLocBuilder &TLB,
TypeLoc PatternLoc = PackExpansionLoc.getPatternLoc();
bool Expand = false;
bool RetainExpansion = false;
- Optional<unsigned> NumExpansions = PackExpansion->getNumExpansions();
+ std::optional<unsigned> NumExpansions = PackExpansion->getNumExpansions();
if (getDerived().TryExpandParameterPacks(
PackExpansionLoc.getEllipsisLoc(), PatternLoc.getSourceRange(),
Unexpanded, Expand, RetainExpansion, NumExpansions))
@@ -7116,7 +7447,8 @@ TreeTransform<Derived>::TransformObjCObjectType(TypeLocBuilder &TLB,
TypeLocBuilder TypeArgBuilder;
TypeArgBuilder.reserve(TypeArgLoc.getFullDataSize());
- QualType NewTypeArg = getDerived().TransformType(TypeArgBuilder, TypeArgLoc);
+ QualType NewTypeArg =
+ getDerived().TransformType(TypeArgBuilder, TypeArgLoc);
if (NewTypeArg.isNull())
return QualType();
@@ -7137,7 +7469,7 @@ TreeTransform<Derived>::TransformObjCObjectType(TypeLocBuilder &TLB,
Result = getDerived().RebuildObjCObjectType(
BaseType, TL.getBeginLoc(), TL.getTypeArgsLAngleLoc(), NewTypeArgInfos,
TL.getTypeArgsRAngleLoc(), TL.getProtocolLAngleLoc(),
- llvm::makeArrayRef(TL.getTypePtr()->qual_begin(), TL.getNumProtocols()),
+ llvm::ArrayRef(TL.getTypePtr()->qual_begin(), TL.getNumProtocols()),
TL.getProtocolLocs(), TL.getProtocolRAngleLoc());
if (Result.isNull())
@@ -7199,6 +7531,10 @@ StmtResult
TreeTransform<Derived>::TransformCompoundStmt(CompoundStmt *S,
bool IsStmtExpr) {
Sema::CompoundScopeRAII CompoundScope(getSema());
+ Sema::FPFeaturesStateRAII FPSave(getSema());
+ if (S->hasStoredFPFeatures())
+ getSema().resetFPOptions(
+ S->getStoredFPFeatures().applyOverrides(getSema().getLangOpts()));
const Stmt *ExprResult = S->getStmtExprResult();
bool SubStmtInvalid = false;
@@ -7321,36 +7657,52 @@ const Attr *TreeTransform<Derived>::TransformAttr(const Attr *R) {
return R;
switch (R->getKind()) {
-// Transform attributes with a pragma spelling by calling TransformXXXAttr.
-#define ATTR(X)
-#define PRAGMA_SPELLING_ATTR(X) \
+// Transform attributes by calling TransformXXXAttr.
+#define ATTR(X) \
case attr::X: \
return getDerived().Transform##X##Attr(cast<X##Attr>(R));
#include "clang/Basic/AttrList.inc"
- default:
+ }
+ return R;
+}
+
+template <typename Derived>
+const Attr *TreeTransform<Derived>::TransformStmtAttr(const Stmt *OrigS,
+ const Stmt *InstS,
+ const Attr *R) {
+ if (!R)
return R;
+
+ switch (R->getKind()) {
+// Transform attributes by calling TransformStmtXXXAttr.
+#define ATTR(X) \
+ case attr::X: \
+ return getDerived().TransformStmt##X##Attr(OrigS, InstS, cast<X##Attr>(R));
+#include "clang/Basic/AttrList.inc"
}
+ return TransformAttr(R);
}
template <typename Derived>
StmtResult
TreeTransform<Derived>::TransformAttributedStmt(AttributedStmt *S,
StmtDiscardKind SDK) {
+ StmtResult SubStmt = getDerived().TransformStmt(S->getSubStmt(), SDK);
+ if (SubStmt.isInvalid())
+ return StmtError();
+
bool AttrsChanged = false;
SmallVector<const Attr *, 1> Attrs;
// Visit attributes and keep track if any are transformed.
for (const auto *I : S->getAttrs()) {
- const Attr *R = getDerived().TransformAttr(I);
+ const Attr *R =
+ getDerived().TransformStmtAttr(S->getSubStmt(), SubStmt.get(), I);
AttrsChanged |= (I != R);
if (R)
Attrs.push_back(R);
}
- StmtResult SubStmt = getDerived().TransformStmt(S->getSubStmt(), SDK);
- if (SubStmt.isInvalid())
- return StmtError();
-
if (SubStmt.get() == S->getSubStmt() && !AttrsChanged)
return S;
@@ -7371,16 +7723,19 @@ TreeTransform<Derived>::TransformIfStmt(IfStmt *S) {
if (Init.isInvalid())
return StmtError();
- // Transform the condition
- Sema::ConditionResult Cond = getDerived().TransformCondition(
- S->getIfLoc(), S->getConditionVariable(), S->getCond(),
- S->isConstexpr() ? Sema::ConditionKind::ConstexprIf
- : Sema::ConditionKind::Boolean);
- if (Cond.isInvalid())
- return StmtError();
+ Sema::ConditionResult Cond;
+ if (!S->isConsteval()) {
+ // Transform the condition
+ Cond = getDerived().TransformCondition(
+ S->getIfLoc(), S->getConditionVariable(), S->getCond(),
+ S->isConstexpr() ? Sema::ConditionKind::ConstexprIf
+ : Sema::ConditionKind::Boolean);
+ if (Cond.isInvalid())
+ return StmtError();
+ }
// If this is a constexpr if, determine which arm we should instantiate.
- llvm::Optional<bool> ConstexprConditionValue;
+ std::optional<bool> ConstexprConditionValue;
if (S->isConstexpr())
ConstexprConditionValue = Cond.getKnownValue();
@@ -7391,7 +7746,11 @@ TreeTransform<Derived>::TransformIfStmt(IfStmt *S) {
if (Then.isInvalid())
return StmtError();
} else {
- Then = new (getSema().Context) NullStmt(S->getThen()->getBeginLoc());
+ // Discarded branch is replaced with empty CompoundStmt so we can keep
+ // proper source location for start and end of original branch, so
+ // subsequent transformations like CoverageMapping work properly
+ Then = new (getSema().Context)
+ CompoundStmt(S->getThen()->getBeginLoc(), S->getThen()->getEndLoc());
}
// Transform the "else" branch.
@@ -7400,6 +7759,13 @@ TreeTransform<Derived>::TransformIfStmt(IfStmt *S) {
Else = getDerived().TransformStmt(S->getElse());
if (Else.isInvalid())
return StmtError();
+ } else if (S->getElse() && ConstexprConditionValue &&
+ *ConstexprConditionValue) {
+ // Same thing here as with <then> branch, we are discarding it, we can't
+ // replace it with NULL nor NullStmt as we need to keep for source location
+ // range, for CoverageMapping
+ Else = new (getSema().Context)
+ CompoundStmt(S->getElse()->getBeginLoc(), S->getElse()->getEndLoc());
}
if (!getDerived().AlwaysRebuild() &&
@@ -7410,7 +7776,7 @@ TreeTransform<Derived>::TransformIfStmt(IfStmt *S) {
return S;
return getDerived().RebuildIfStmt(
- S->getIfLoc(), S->isConstexpr(), S->getLParenLoc(), Cond,
+ S->getIfLoc(), S->getStatementKind(), S->getLParenLoc(), Cond,
S->getRParenLoc(), Init.get(), Then.get(), S->getElseLoc(), Else.get());
}
@@ -7697,8 +8063,7 @@ TreeTransform<Derived>::TransformGCCAsmStmt(GCCAsmStmt *S) {
template<typename Derived>
StmtResult
TreeTransform<Derived>::TransformMSAsmStmt(MSAsmStmt *S) {
- ArrayRef<Token> AsmToks =
- llvm::makeArrayRef(S->getAsmToks(), S->getNumAsmToks());
+ ArrayRef<Token> AsmToks = llvm::ArrayRef(S->getAsmToks(), S->getNumAsmToks());
bool HadError = false, HadChange = false;
@@ -7726,8 +8091,7 @@ TreeTransform<Derived>::TransformMSAsmStmt(MSAsmStmt *S) {
TransformedExprs, S->getEndLoc());
}
-// C++ Coroutines TS
-
+// C++ Coroutines
template<typename Derived>
StmtResult
TreeTransform<Derived>::TransformCoroutineBodyStmt(CoroutineBodyStmt *S) {
@@ -7835,11 +8199,12 @@ TreeTransform<Derived>::TransformCoroutineBodyStmt(CoroutineBodyStmt *S) {
return StmtError();
Builder.Deallocate = DeallocRes.get();
- assert(S->getResultDecl() && "ResultDecl must already be built");
- StmtResult ResultDecl = getDerived().TransformStmt(S->getResultDecl());
- if (ResultDecl.isInvalid())
- return StmtError();
- Builder.ResultDecl = ResultDecl.get();
+ if (auto *ResultDecl = S->getResultDecl()) {
+ StmtResult Res = getDerived().TransformStmt(ResultDecl);
+ if (Res.isInvalid())
+ return StmtError();
+ Builder.ResultDecl = Res.get();
+ }
if (auto *ReturnStmt = S->getReturnStmt()) {
StmtResult Res = getDerived().TransformStmt(ReturnStmt);
@@ -7866,18 +8231,27 @@ TreeTransform<Derived>::TransformCoreturnStmt(CoreturnStmt *S) {
S->isImplicit());
}
-template<typename Derived>
-ExprResult
-TreeTransform<Derived>::TransformCoawaitExpr(CoawaitExpr *E) {
- ExprResult Result = getDerived().TransformInitializer(E->getOperand(),
- /*NotCopyInit*/false);
- if (Result.isInvalid())
+template <typename Derived>
+ExprResult TreeTransform<Derived>::TransformCoawaitExpr(CoawaitExpr *E) {
+ ExprResult Operand = getDerived().TransformInitializer(E->getOperand(),
+ /*NotCopyInit*/ false);
+ if (Operand.isInvalid())
return ExprError();
+ // Rebuild the common-expr from the operand rather than transforming it
+ // separately.
+
+ // FIXME: getCurScope() should not be used during template instantiation.
+ // We should pick up the set of unqualified lookup results for operator
+ // co_await during the initial parse.
+ ExprResult Lookup = getSema().BuildOperatorCoawaitLookupExpr(
+ getSema().getCurScope(), E->getKeywordLoc());
+
// Always rebuild; we don't know if this needs to be injected into a new
// context or if the promise type has changed.
- return getDerived().RebuildCoawaitExpr(E->getKeywordLoc(), Result.get(),
- E->isImplicit());
+ return getDerived().RebuildCoawaitExpr(
+ E->getKeywordLoc(), Operand.get(),
+ cast<UnresolvedLookupExpr>(Lookup.get()), E->isImplicit());
}
template <typename Derived>
@@ -8501,7 +8875,17 @@ StmtResult TreeTransform<Derived>::TransformOMPExecutableDirective(
return getDerived().RebuildOMPExecutableDirective(
D->getDirectiveKind(), DirName, CancelRegion, TClauses,
- AssociatedStmt.get(), D->getBeginLoc(), D->getEndLoc());
+ AssociatedStmt.get(), D->getBeginLoc(), D->getEndLoc(),
+ D->getMappedDirective());
+}
+
+template <typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformOMPMetaDirective(OMPMetaDirective *D) {
+ // TODO: Fix This
+ SemaRef.Diag(D->getBeginLoc(), diag::err_omp_instantiation_not_supported)
+ << getOpenMPDirectiveName(D->getDirectiveKind());
+ return StmtError();
}
template <typename Derived>
@@ -8594,6 +8978,17 @@ TreeTransform<Derived>::TransformOMPSectionDirective(OMPSectionDirective *D) {
template <typename Derived>
StmtResult
+TreeTransform<Derived>::TransformOMPScopeDirective(OMPScopeDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_scope, DirName, nullptr,
+ D->getBeginLoc());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult
TreeTransform<Derived>::TransformOMPSingleDirective(OMPSingleDirective *D) {
DeclarationNameInfo DirName;
getDerived().getSema().StartOpenMPDSABlock(OMPD_single, DirName, nullptr,
@@ -8658,6 +9053,17 @@ StmtResult TreeTransform<Derived>::TransformOMPParallelMasterDirective(
}
template <typename Derived>
+StmtResult TreeTransform<Derived>::TransformOMPParallelMaskedDirective(
+ OMPParallelMaskedDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_parallel_masked, DirName,
+ nullptr, D->getBeginLoc());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
StmtResult TreeTransform<Derived>::TransformOMPParallelSectionsDirective(
OMPParallelSectionsDirective *D) {
DeclarationNameInfo DirName;
@@ -8713,6 +9119,17 @@ TreeTransform<Derived>::TransformOMPTaskwaitDirective(OMPTaskwaitDirective *D) {
}
template <typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformOMPErrorDirective(OMPErrorDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_error, DirName, nullptr,
+ D->getBeginLoc());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
StmtResult TreeTransform<Derived>::TransformOMPTaskgroupDirective(
OMPTaskgroupDirective *D) {
DeclarationNameInfo DirName;
@@ -8922,6 +9339,17 @@ StmtResult TreeTransform<Derived>::TransformOMPMasterTaskLoopDirective(
}
template <typename Derived>
+StmtResult TreeTransform<Derived>::TransformOMPMaskedTaskLoopDirective(
+ OMPMaskedTaskLoopDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_masked_taskloop, DirName,
+ nullptr, D->getBeginLoc());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
StmtResult TreeTransform<Derived>::TransformOMPMasterTaskLoopSimdDirective(
OMPMasterTaskLoopSimdDirective *D) {
DeclarationNameInfo DirName;
@@ -8933,6 +9361,17 @@ StmtResult TreeTransform<Derived>::TransformOMPMasterTaskLoopSimdDirective(
}
template <typename Derived>
+StmtResult TreeTransform<Derived>::TransformOMPMaskedTaskLoopSimdDirective(
+ OMPMaskedTaskLoopSimdDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_masked_taskloop_simd, DirName,
+ nullptr, D->getBeginLoc());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
StmtResult TreeTransform<Derived>::TransformOMPParallelMasterTaskLoopDirective(
OMPParallelMasterTaskLoopDirective *D) {
DeclarationNameInfo DirName;
@@ -8944,6 +9383,17 @@ StmtResult TreeTransform<Derived>::TransformOMPParallelMasterTaskLoopDirective(
}
template <typename Derived>
+StmtResult TreeTransform<Derived>::TransformOMPParallelMaskedTaskLoopDirective(
+ OMPParallelMaskedTaskLoopDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(
+ OMPD_parallel_masked_taskloop, DirName, nullptr, D->getBeginLoc());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
StmtResult
TreeTransform<Derived>::TransformOMPParallelMasterTaskLoopSimdDirective(
OMPParallelMasterTaskLoopSimdDirective *D) {
@@ -8956,6 +9406,18 @@ TreeTransform<Derived>::TransformOMPParallelMasterTaskLoopSimdDirective(
}
template <typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformOMPParallelMaskedTaskLoopSimdDirective(
+ OMPParallelMaskedTaskLoopSimdDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(
+ OMPD_parallel_masked_taskloop_simd, DirName, nullptr, D->getBeginLoc());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
StmtResult TreeTransform<Derived>::TransformOMPDistributeDirective(
OMPDistributeDirective *D) {
DeclarationNameInfo DirName;
@@ -9160,6 +9622,62 @@ TreeTransform<Derived>::TransformOMPMaskedDirective(OMPMaskedDirective *D) {
return Res;
}
+template <typename Derived>
+StmtResult TreeTransform<Derived>::TransformOMPGenericLoopDirective(
+ OMPGenericLoopDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_loop, DirName, nullptr,
+ D->getBeginLoc());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult TreeTransform<Derived>::TransformOMPTeamsGenericLoopDirective(
+ OMPTeamsGenericLoopDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_teams_loop, DirName, nullptr,
+ D->getBeginLoc());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult TreeTransform<Derived>::TransformOMPTargetTeamsGenericLoopDirective(
+ OMPTargetTeamsGenericLoopDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_target_teams_loop, DirName,
+ nullptr, D->getBeginLoc());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult TreeTransform<Derived>::TransformOMPParallelGenericLoopDirective(
+ OMPParallelGenericLoopDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_parallel_loop, DirName,
+ nullptr, D->getBeginLoc());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformOMPTargetParallelGenericLoopDirective(
+ OMPTargetParallelGenericLoopDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_target_parallel_loop, DirName,
+ nullptr, D->getBeginLoc());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
//===----------------------------------------------------------------------===//
// OpenMP clause transformation
//===----------------------------------------------------------------------===//
@@ -9383,6 +9901,19 @@ TreeTransform<Derived>::TransformOMPCaptureClause(OMPCaptureClause *C) {
template <typename Derived>
OMPClause *
+TreeTransform<Derived>::TransformOMPCompareClause(OMPCompareClause *C) {
+ // No need to rebuild this clause, no template-dependent parameters.
+ return C;
+}
+
+template <typename Derived>
+OMPClause *TreeTransform<Derived>::TransformOMPFailClause(OMPFailClause *C) {
+ // No need to rebuild this clause, no template-dependent parameters.
+ return C;
+}
+
+template <typename Derived>
+OMPClause *
TreeTransform<Derived>::TransformOMPSeqCstClause(OMPSeqCstClause *C) {
// No need to rebuild this clause, no template-dependent parameters.
return C;
@@ -9442,17 +9973,17 @@ OMPClause *TreeTransform<Derived>::TransformOMPInitClause(OMPInitClause *C) {
if (IVR.isInvalid())
return nullptr;
- llvm::SmallVector<Expr *, 8> PrefExprs;
- PrefExprs.reserve(C->varlist_size() - 1);
+ OMPInteropInfo InteropInfo(C->getIsTarget(), C->getIsTargetSync());
+ InteropInfo.PreferTypes.reserve(C->varlist_size() - 1);
for (Expr *E : llvm::drop_begin(C->varlists())) {
ExprResult ER = getDerived().TransformExpr(cast<Expr>(E));
if (ER.isInvalid())
return nullptr;
- PrefExprs.push_back(ER.get());
+ InteropInfo.PreferTypes.push_back(ER.get());
}
- return getDerived().RebuildOMPInitClause(
- IVR.get(), PrefExprs, C->getIsTarget(), C->getIsTargetSync(),
- C->getBeginLoc(), C->getLParenLoc(), C->getVarLoc(), C->getEndLoc());
+ return getDerived().RebuildOMPInitClause(IVR.get(), InteropInfo,
+ C->getBeginLoc(), C->getLParenLoc(),
+ C->getVarLoc(), C->getEndLoc());
}
template <typename Derived>
@@ -9510,6 +10041,15 @@ TreeTransform<Derived>::TransformOMPFilterClause(OMPFilterClause *C) {
}
template <typename Derived>
+OMPClause *TreeTransform<Derived>::TransformOMPAlignClause(OMPAlignClause *C) {
+ ExprResult E = getDerived().TransformExpr(C->getAlignment());
+ if (E.isInvalid())
+ return nullptr;
+ return getDerived().RebuildOMPAlignClause(E.get(), C->getBeginLoc(),
+ C->getLParenLoc(), C->getEndLoc());
+}
+
+template <typename Derived>
OMPClause *TreeTransform<Derived>::TransformOMPUnifiedAddressClause(
OMPUnifiedAddressClause *C) {
llvm_unreachable("unified_address clause cannot appear in dependent context");
@@ -9543,6 +10083,32 @@ OMPClause *TreeTransform<Derived>::TransformOMPAtomicDefaultMemOrderClause(
}
template <typename Derived>
+OMPClause *TreeTransform<Derived>::TransformOMPAtClause(OMPAtClause *C) {
+ return getDerived().RebuildOMPAtClause(C->getAtKind(), C->getAtKindKwLoc(),
+ C->getBeginLoc(), C->getLParenLoc(),
+ C->getEndLoc());
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPSeverityClause(OMPSeverityClause *C) {
+ return getDerived().RebuildOMPSeverityClause(
+ C->getSeverityKind(), C->getSeverityKindKwLoc(), C->getBeginLoc(),
+ C->getLParenLoc(), C->getEndLoc());
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPMessageClause(OMPMessageClause *C) {
+ ExprResult E = getDerived().TransformExpr(C->getMessageString());
+ if (E.isInvalid())
+ return nullptr;
+ return getDerived().RebuildOMPMessageClause(
+ C->getMessageString(), C->getBeginLoc(), C->getLParenLoc(),
+ C->getEndLoc());
+}
+
+template <typename Derived>
OMPClause *
TreeTransform<Derived>::TransformOMPPrivateClause(OMPPrivateClause *C) {
llvm::SmallVector<Expr *, 16> Vars;
@@ -9757,7 +10323,8 @@ TreeTransform<Derived>::TransformOMPLinearClause(OMPLinearClause *C) {
return nullptr;
return getDerived().RebuildOMPLinearClause(
Vars, Step.get(), C->getBeginLoc(), C->getLParenLoc(), C->getModifier(),
- C->getModifierLoc(), C->getColonLoc(), C->getEndLoc());
+ C->getModifierLoc(), C->getColonLoc(), C->getStepModifierLoc(),
+ C->getEndLoc());
}
template <typename Derived>
@@ -9852,9 +10419,9 @@ TreeTransform<Derived>::TransformOMPDependClause(OMPDependClause *C) {
Vars.push_back(EVar.get());
}
return getDerived().RebuildOMPDependClause(
- DepModifier, C->getDependencyKind(), C->getDependencyLoc(),
- C->getColonLoc(), Vars, C->getBeginLoc(), C->getLParenLoc(),
- C->getEndLoc());
+ {C->getDependencyKind(), C->getDependencyLoc(), C->getColonLoc(),
+ C->getOmpAllMemoryLoc()},
+ DepModifier, Vars, C->getBeginLoc(), C->getLParenLoc(), C->getEndLoc());
}
template <typename Derived>
@@ -9925,6 +10492,13 @@ template <typename Derived>
OMPClause *TreeTransform<Derived>::TransformOMPMapClause(OMPMapClause *C) {
OMPVarListLocTy Locs(C->getBeginLoc(), C->getLParenLoc(), C->getEndLoc());
llvm::SmallVector<Expr *, 16> Vars;
+ Expr *IteratorModifier = C->getIteratorModifier();
+ if (IteratorModifier) {
+ ExprResult MapModRes = getDerived().TransformExpr(IteratorModifier);
+ if (MapModRes.isInvalid())
+ return nullptr;
+ IteratorModifier = MapModRes.get();
+ }
CXXScopeSpec MapperIdScopeSpec;
DeclarationNameInfo MapperIdInfo;
llvm::SmallVector<Expr *, 16> UnresolvedMappers;
@@ -9932,9 +10506,9 @@ OMPClause *TreeTransform<Derived>::TransformOMPMapClause(OMPMapClause *C) {
*this, C, Vars, MapperIdScopeSpec, MapperIdInfo, UnresolvedMappers))
return nullptr;
return getDerived().RebuildOMPMapClause(
- C->getMapTypeModifiers(), C->getMapTypeModifiersLoc(), MapperIdScopeSpec,
- MapperIdInfo, C->getMapType(), C->isImplicitMapType(), C->getMapLoc(),
- C->getColonLoc(), Vars, Locs, UnresolvedMappers);
+ IteratorModifier, C->getMapTypeModifiers(), C->getMapTypeModifiersLoc(),
+ MapperIdScopeSpec, MapperIdInfo, C->getMapType(), C->isImplicitMapType(),
+ C->getMapLoc(), C->getColonLoc(), Vars, Locs, UnresolvedMappers);
}
template <typename Derived>
@@ -9997,7 +10571,8 @@ TreeTransform<Derived>::TransformOMPGrainsizeClause(OMPGrainsizeClause *C) {
if (E.isInvalid())
return nullptr;
return getDerived().RebuildOMPGrainsizeClause(
- E.get(), C->getBeginLoc(), C->getLParenLoc(), C->getEndLoc());
+ C->getModifier(), E.get(), C->getBeginLoc(), C->getLParenLoc(),
+ C->getModifierLoc(), C->getEndLoc());
}
template <typename Derived>
@@ -10007,7 +10582,8 @@ TreeTransform<Derived>::TransformOMPNumTasksClause(OMPNumTasksClause *C) {
if (E.isInvalid())
return nullptr;
return getDerived().RebuildOMPNumTasksClause(
- E.get(), C->getBeginLoc(), C->getLParenLoc(), C->getEndLoc());
+ C->getModifier(), E.get(), C->getBeginLoc(), C->getLParenLoc(),
+ C->getModifierLoc(), C->getEndLoc());
}
template <typename Derived>
@@ -10120,6 +10696,21 @@ TreeTransform<Derived>::TransformOMPIsDevicePtrClause(OMPIsDevicePtrClause *C) {
}
template <typename Derived>
+OMPClause *TreeTransform<Derived>::TransformOMPHasDeviceAddrClause(
+ OMPHasDeviceAddrClause *C) {
+ llvm::SmallVector<Expr *, 16> Vars;
+ Vars.reserve(C->varlist_size());
+ for (auto *VE : C->varlists()) {
+ ExprResult EVar = getDerived().TransformExpr(cast<Expr>(VE));
+ if (EVar.isInvalid())
+ return nullptr;
+ Vars.push_back(EVar.get());
+ }
+ OMPVarListLocTy Locs(C->getBeginLoc(), C->getLParenLoc(), C->getEndLoc());
+ return getDerived().RebuildOMPHasDeviceAddrClause(Vars, Locs);
+}
+
+template <typename Derived>
OMPClause *
TreeTransform<Derived>::TransformOMPNontemporalClause(OMPNontemporalClause *C) {
llvm::SmallVector<Expr *, 16> Vars;
@@ -10214,9 +10805,57 @@ TreeTransform<Derived>::TransformOMPAffinityClause(OMPAffinityClause *C) {
template <typename Derived>
OMPClause *TreeTransform<Derived>::TransformOMPOrderClause(OMPOrderClause *C) {
- return getDerived().RebuildOMPOrderClause(C->getKind(), C->getKindKwLoc(),
- C->getBeginLoc(), C->getLParenLoc(),
- C->getEndLoc());
+ return getDerived().RebuildOMPOrderClause(
+ C->getKind(), C->getKindKwLoc(), C->getBeginLoc(), C->getLParenLoc(),
+ C->getEndLoc(), C->getModifier(), C->getModifierKwLoc());
+}
+
+template <typename Derived>
+OMPClause *TreeTransform<Derived>::TransformOMPBindClause(OMPBindClause *C) {
+ return getDerived().RebuildOMPBindClause(
+ C->getBindKind(), C->getBindKindLoc(), C->getBeginLoc(),
+ C->getLParenLoc(), C->getEndLoc());
+}
+
+template <typename Derived>
+OMPClause *TreeTransform<Derived>::TransformOMPXDynCGroupMemClause(
+ OMPXDynCGroupMemClause *C) {
+ ExprResult Size = getDerived().TransformExpr(C->getSize());
+ if (Size.isInvalid())
+ return nullptr;
+ return getDerived().RebuildOMPXDynCGroupMemClause(
+ Size.get(), C->getBeginLoc(), C->getLParenLoc(), C->getEndLoc());
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPDoacrossClause(OMPDoacrossClause *C) {
+ llvm::SmallVector<Expr *, 16> Vars;
+ Vars.reserve(C->varlist_size());
+ for (auto *VE : C->varlists()) {
+ ExprResult EVar = getDerived().TransformExpr(cast<Expr>(VE));
+ if (EVar.isInvalid())
+ return nullptr;
+ Vars.push_back(EVar.get());
+ }
+ return getDerived().RebuildOMPDoacrossClause(
+ C->getDependenceType(), C->getDependenceLoc(), C->getColonLoc(), Vars,
+ C->getBeginLoc(), C->getLParenLoc(), C->getEndLoc());
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPXAttributeClause(OMPXAttributeClause *C) {
+ SmallVector<const Attr *> NewAttrs;
+ for (auto *A : C->getAttrs())
+ NewAttrs.push_back(getDerived().TransformAttr(A));
+ return getDerived().RebuildOMPXAttributeClause(
+ NewAttrs, C->getBeginLoc(), C->getLParenLoc(), C->getEndLoc());
+}
+
+template <typename Derived>
+OMPClause *TreeTransform<Derived>::TransformOMPXBareClause(OMPXBareClause *C) {
+ return getDerived().RebuildOMPXBareClause(C->getBeginLoc(), C->getEndLoc());
}
//===----------------------------------------------------------------------===//
@@ -10356,17 +10995,20 @@ TreeTransform<Derived>::TransformCharacterLiteral(CharacterLiteral *E) {
template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformUserDefinedLiteral(UserDefinedLiteral *E) {
- if (FunctionDecl *FD = E->getDirectCallee())
- SemaRef.MarkFunctionReferenced(E->getBeginLoc(), FD);
- return SemaRef.MaybeBindToTemporary(E);
+ return getDerived().TransformCallExpr(E);
}
template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformGenericSelectionExpr(GenericSelectionExpr *E) {
- ExprResult ControllingExpr =
- getDerived().TransformExpr(E->getControllingExpr());
- if (ControllingExpr.isInvalid())
+ ExprResult ControllingExpr;
+ TypeSourceInfo *ControllingType = nullptr;
+ if (E->isExprPredicate())
+ ControllingExpr = getDerived().TransformExpr(E->getControllingExpr());
+ else
+ ControllingType = getDerived().TransformType(E->getControllingType());
+
+ if (ControllingExpr.isInvalid() && !ControllingType)
return ExprError();
SmallVector<Expr *, 4> AssocExprs;
@@ -10389,12 +11031,16 @@ TreeTransform<Derived>::TransformGenericSelectionExpr(GenericSelectionExpr *E) {
AssocExprs.push_back(AssocExpr.get());
}
+ if (!ControllingType)
return getDerived().RebuildGenericSelectionExpr(E->getGenericLoc(),
E->getDefaultLoc(),
E->getRParenLoc(),
ControllingExpr.get(),
AssocTypes,
AssocExprs);
+ return getDerived().RebuildGenericSelectionExpr(
+ E->getGenericLoc(), E->getDefaultLoc(), E->getRParenLoc(),
+ ControllingType, AssocTypes, AssocExprs);
}
template<typename Derived>
@@ -10857,11 +11503,15 @@ TreeTransform<Derived>::TransformMemberExpr(MemberExpr *E) {
FoundDecl == E->getFoundDecl() &&
!E->hasExplicitTemplateArgs()) {
- // Mark it referenced in the new context regardless.
- // FIXME: this is a bit instantiation-specific.
- SemaRef.MarkMemberReferenced(E);
-
- return E;
+ // Skip for member expression of (this->f), rebuilt thisi->f is needed
+ // for Openmp where the field need to be privatizized in the case.
+ if (!(isa<CXXThisExpr>(E->getBase()) &&
+ getSema().isOpenMPRebuildMemberExpr(cast<ValueDecl>(Member)))) {
+ // Mark it referenced in the new context regardless.
+ // FIXME: this is a bit instantiation-specific.
+ SemaRef.MarkMemberReferenced(E);
+ return E;
+ }
}
TemplateArgumentListInfo TransArgs;
@@ -10909,7 +11559,8 @@ TreeTransform<Derived>::TransformBinaryOperator(BinaryOperator *E) {
if (LHS.isInvalid())
return ExprError();
- ExprResult RHS = getDerived().TransformExpr(E->getRHS());
+ ExprResult RHS =
+ getDerived().TransformInitializer(E->getRHS(), /*NotCopyInit=*/false);
if (RHS.isInvalid())
return ExprError();
@@ -10923,7 +11574,7 @@ TreeTransform<Derived>::TransformBinaryOperator(BinaryOperator *E) {
return getDerived().RebuildBinaryOperator(
E->getOperatorLoc(), E->getOpcode(), LHS.get(), RHS.get());
Sema::FPFeaturesStateRAII FPFeaturesState(getSema());
- FPOptionsOverride NewOverrides(E->getFPFeatures(getSema().getLangOpts()));
+ FPOptionsOverride NewOverrides(E->getFPFeatures());
getSema().CurFPFeatures =
NewOverrides.applyOverrides(getSema().getLangOpts());
getSema().FpPragmaStack.CurrentValue = NewOverrides;
@@ -10944,14 +11595,10 @@ ExprResult TreeTransform<Derived>::TransformCXXRewrittenBinaryOperator(
if (RHS.isInvalid())
return ExprError();
- if (!getDerived().AlwaysRebuild() &&
- LHS.get() == Decomp.LHS &&
- RHS.get() == Decomp.RHS)
- return E;
-
// Extract the already-resolved callee declarations so that we can restrict
// ourselves to using them as the unqualified lookup results when rebuilding.
UnresolvedSet<2> UnqualLookups;
+ bool ChangedAnyLookups = false;
Expr *PossibleBinOps[] = {E->getSemanticForm(),
const_cast<Expr *>(Decomp.InnerBinOp)};
for (Expr *PossibleBinOp : PossibleBinOps) {
@@ -10968,9 +11615,23 @@ ExprResult TreeTransform<Derived>::TransformCXXRewrittenBinaryOperator(
E->getOperatorLoc(), Callee->getFoundDecl()));
if (!Found)
return ExprError();
+ if (Found != Callee->getFoundDecl())
+ ChangedAnyLookups = true;
UnqualLookups.addDecl(Found);
}
+ if (!getDerived().AlwaysRebuild() && !ChangedAnyLookups &&
+ LHS.get() == Decomp.LHS && RHS.get() == Decomp.RHS) {
+ // Mark all functions used in the rewrite as referenced. Note that when
+ // a < b is rewritten to (a <=> b) < 0, both the <=> and the < might be
+ // function calls, and/or there might be a user-defined conversion sequence
+ // applied to the operands of the <.
+ // FIXME: this is a bit instantiation-specific.
+ const Expr *StopAt[] = {Decomp.LHS, Decomp.RHS};
+ SemaRef.MarkDeclarationsReferencedInExpr(E, false, StopAt);
+ return E;
+ }
+
return getDerived().RebuildCXXRewrittenBinaryOperator(
E->getOperatorLoc(), Decomp.Opcode, UnqualLookups, LHS.get(), RHS.get());
}
@@ -10980,7 +11641,7 @@ ExprResult
TreeTransform<Derived>::TransformCompoundAssignOperator(
CompoundAssignOperator *E) {
Sema::FPFeaturesStateRAII FPFeaturesState(getSema());
- FPOptionsOverride NewOverrides(E->getFPFeatures(getSema().getLangOpts()));
+ FPOptionsOverride NewOverrides(E->getFPFeatures());
getSema().CurFPFeatures =
NewOverrides.applyOverrides(getSema().getLangOpts());
getSema().FpPragmaStack.CurrentValue = NewOverrides;
@@ -11112,9 +11773,9 @@ TreeTransform<Derived>::TransformExtVectorElementExpr(ExtVectorElementExpr *E) {
// FIXME: Bad source location
SourceLocation FakeOperatorLoc =
SemaRef.getLocForEndOfToken(E->getBase()->getEndLoc());
- return getDerived().RebuildExtVectorElementExpr(Base.get(), FakeOperatorLoc,
- E->getAccessorLoc(),
- E->getAccessor());
+ return getDerived().RebuildExtVectorElementExpr(
+ Base.get(), FakeOperatorLoc, E->isArrow(), E->getAccessorLoc(),
+ E->getAccessor());
}
template<typename Derived>
@@ -11159,22 +11820,23 @@ TreeTransform<Derived>::TransformDesignatedInitExpr(DesignatedInitExpr *E) {
bool ExprChanged = false;
for (const DesignatedInitExpr::Designator &D : E->designators()) {
if (D.isFieldDesignator()) {
- Desig.AddDesignator(Designator::getField(D.getFieldName(),
- D.getDotLoc(),
- D.getFieldLoc()));
- if (D.getField()) {
+ if (D.getFieldDecl()) {
FieldDecl *Field = cast_or_null<FieldDecl>(
- getDerived().TransformDecl(D.getFieldLoc(), D.getField()));
- if (Field != D.getField())
+ getDerived().TransformDecl(D.getFieldLoc(), D.getFieldDecl()));
+ if (Field != D.getFieldDecl())
// Rebuild the expression when the transformed FieldDecl is
// different to the already assigned FieldDecl.
ExprChanged = true;
+ if (Field->isAnonymousStructOrUnion())
+ continue;
} else {
// Ensure that the designator expression is rebuilt when there isn't
// a resolved FieldDecl in the designator as we don't want to assign
// a FieldDecl to a pattern designator that will be instantiated again.
ExprChanged = true;
}
+ Desig.AddDesignator(Designator::CreateFieldDesignator(
+ D.getFieldName(), D.getDotLoc(), D.getFieldLoc()));
continue;
}
@@ -11184,7 +11846,7 @@ TreeTransform<Derived>::TransformDesignatedInitExpr(DesignatedInitExpr *E) {
return ExprError();
Desig.AddDesignator(
- Designator::getArray(Index.get(), D.getLBracketLoc()));
+ Designator::CreateArrayDesignator(Index.get(), D.getLBracketLoc()));
ExprChanged = ExprChanged || Init.get() != E->getArrayIndex(D);
ArrayExprs.push_back(Index.get());
@@ -11201,10 +11863,8 @@ TreeTransform<Derived>::TransformDesignatedInitExpr(DesignatedInitExpr *E) {
if (End.isInvalid())
return ExprError();
- Desig.AddDesignator(Designator::getArrayRange(Start.get(),
- End.get(),
- D.getLBracketLoc(),
- D.getEllipsisLoc()));
+ Desig.AddDesignator(Designator::CreateArrayRangeDesignator(
+ Start.get(), End.get(), D.getLBracketLoc(), D.getEllipsisLoc()));
ExprChanged = ExprChanged || Start.get() != E->getArrayRangeStart(D) ||
End.get() != E->getArrayRangeEnd(D);
@@ -11393,6 +12053,7 @@ TreeTransform<Derived>::TransformCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
case OO_Array_Delete:
llvm_unreachable("new and delete operators cannot use CXXOperatorCallExpr");
+ case OO_Subscript:
case OO_Call: {
// This is a call to an object's operator().
assert(E->getNumArgs() >= 1 && "Object call is missing arguments");
@@ -11412,17 +12073,20 @@ TreeTransform<Derived>::TransformCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
Args))
return ExprError();
+ if (E->getOperator() == OO_Subscript)
+ return getDerived().RebuildCxxSubscriptExpr(Object.get(), FakeLParenLoc,
+ Args, E->getEndLoc());
+
return getDerived().RebuildCallExpr(Object.get(), FakeLParenLoc, Args,
E->getEndLoc());
}
-#define OVERLOADED_OPERATOR(Name,Spelling,Token,Unary,Binary,MemberOnly) \
- case OO_##Name:
+#define OVERLOADED_OPERATOR(Name, Spelling, Token, Unary, Binary, MemberOnly) \
+ case OO_##Name: \
+ break;
+
#define OVERLOADED_OPERATOR_MULTI(Name,Spelling,Unary,Binary,MemberOnly)
#include "clang/Basic/OperatorKinds.def"
- case OO_Subscript:
- // Handled below.
- break;
case OO_Conditional:
llvm_unreachable("conditional operator is not actually overloadable");
@@ -11432,10 +12096,6 @@ TreeTransform<Derived>::TransformCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
llvm_unreachable("not an overloaded operator?");
}
- ExprResult Callee = getDerived().TransformExpr(E->getCallee());
- if (Callee.isInvalid())
- return ExprError();
-
ExprResult First;
if (E->getOperator() == OO_Amp)
First = getDerived().TransformAddressOfOperand(E->getArg(0));
@@ -11446,28 +12106,45 @@ TreeTransform<Derived>::TransformCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
ExprResult Second;
if (E->getNumArgs() == 2) {
- Second = getDerived().TransformExpr(E->getArg(1));
+ Second =
+ getDerived().TransformInitializer(E->getArg(1), /*NotCopyInit=*/false);
if (Second.isInvalid())
return ExprError();
}
- if (!getDerived().AlwaysRebuild() &&
- Callee.get() == E->getCallee() &&
- First.get() == E->getArg(0) &&
- (E->getNumArgs() != 2 || Second.get() == E->getArg(1)))
- return SemaRef.MaybeBindToTemporary(E);
-
Sema::FPFeaturesStateRAII FPFeaturesState(getSema());
FPOptionsOverride NewOverrides(E->getFPFeatures());
getSema().CurFPFeatures =
NewOverrides.applyOverrides(getSema().getLangOpts());
getSema().FpPragmaStack.CurrentValue = NewOverrides;
- return getDerived().RebuildCXXOperatorCallExpr(E->getOperator(),
- E->getOperatorLoc(),
- Callee.get(),
- First.get(),
- Second.get());
+ Expr *Callee = E->getCallee();
+ if (UnresolvedLookupExpr *ULE = dyn_cast<UnresolvedLookupExpr>(Callee)) {
+ LookupResult R(SemaRef, ULE->getName(), ULE->getNameLoc(),
+ Sema::LookupOrdinaryName);
+ if (getDerived().TransformOverloadExprDecls(ULE, ULE->requiresADL(), R))
+ return ExprError();
+
+ return getDerived().RebuildCXXOperatorCallExpr(
+ E->getOperator(), E->getOperatorLoc(), Callee->getBeginLoc(),
+ ULE->requiresADL(), R.asUnresolvedSet(), First.get(), Second.get());
+ }
+
+ UnresolvedSet<1> Functions;
+ if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Callee))
+ Callee = ICE->getSubExprAsWritten();
+ NamedDecl *DR = cast<DeclRefExpr>(Callee)->getDecl();
+ ValueDecl *VD = cast_or_null<ValueDecl>(
+ getDerived().TransformDecl(DR->getLocation(), DR));
+ if (!VD)
+ return ExprError();
+
+ if (!isa<CXXMethodDecl>(VD))
+ Functions.addDecl(VD);
+
+ return getDerived().RebuildCXXOperatorCallExpr(
+ E->getOperator(), E->getOperatorLoc(), Callee->getBeginLoc(),
+ /*RequiresADL=*/false, Functions, First.get(), Second.get());
}
template<typename Derived>
@@ -11478,14 +12155,14 @@ TreeTransform<Derived>::TransformCXXMemberCallExpr(CXXMemberCallExpr *E) {
template <typename Derived>
ExprResult TreeTransform<Derived>::TransformSourceLocExpr(SourceLocExpr *E) {
- bool NeedRebuildFunc = E->getIdentKind() == SourceLocExpr::Function &&
+ bool NeedRebuildFunc = SourceLocExpr::MayBeDependent(E->getIdentKind()) &&
getSema().CurContext != E->getParentContext();
if (!getDerived().AlwaysRebuild() && !NeedRebuildFunc)
return E;
- return getDerived().RebuildSourceLocExpr(E->getIdentKind(), E->getBeginLoc(),
- E->getEndLoc(),
+ return getDerived().RebuildSourceLocExpr(E->getIdentKind(), E->getType(),
+ E->getBeginLoc(), E->getEndLoc(),
getSema().CurContext);
}
@@ -11708,7 +12385,16 @@ TreeTransform<Derived>::TransformCXXNullPtrLiteralExpr(
template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformCXXThisExpr(CXXThisExpr *E) {
- QualType T = getSema().getCurrentThisType();
+
+ // In lambdas, the qualifiers of the type depends of where in
+ // the call operator `this` appear, and we do not have a good way to
+ // rebuild this information, so we transform the type.
+ //
+ // In other contexts, the type of `this` may be overrided
+ // for type deduction, so we need to recompute it.
+ QualType T = getSema().getCurLambda() ?
+ getDerived().TransformType(E->getType())
+ : getSema().getCurrentThisType();
if (!getDerived().AlwaysRebuild() && T == E->getType()) {
// Mark it referenced in the new context regardless.
@@ -11743,11 +12429,20 @@ TreeTransform<Derived>::TransformCXXDefaultArgExpr(CXXDefaultArgExpr *E) {
if (!Param)
return ExprError();
+ ExprResult InitRes;
+ if (E->hasRewrittenInit()) {
+ InitRes = getDerived().TransformExpr(E->getRewrittenExpr());
+ if (InitRes.isInvalid())
+ return ExprError();
+ }
+
if (!getDerived().AlwaysRebuild() && Param == E->getParam() &&
- E->getUsedContext() == SemaRef.CurContext)
+ E->getUsedContext() == SemaRef.CurContext &&
+ InitRes.get() == E->getRewrittenExpr())
return E;
- return getDerived().RebuildCXXDefaultArgExpr(E->getUsedLocation(), Param);
+ return getDerived().RebuildCXXDefaultArgExpr(E->getUsedLocation(), Param,
+ InitRes.get());
}
template<typename Derived>
@@ -11792,10 +12487,10 @@ TreeTransform<Derived>::TransformCXXNewExpr(CXXNewExpr *E) {
return ExprError();
// Transform the size of the array we're allocating (if any).
- Optional<Expr *> ArraySize;
- if (Optional<Expr *> OldArraySize = E->getArraySize()) {
+ std::optional<Expr *> ArraySize;
+ if (E->isArray()) {
ExprResult NewArraySize;
- if (*OldArraySize) {
+ if (std::optional<Expr *> OldArraySize = E->getArraySize()) {
NewArraySize = getDerived().TransformExpr(*OldArraySize);
if (NewArraySize.isInvalid())
return ExprError();
@@ -11981,12 +12676,9 @@ TreeTransform<Derived>::TransformCXXPseudoDestructorExpr(
E->getDestroyedTypeLoc());
} else {
// Look for a destructor known with the given name.
- ParsedType T = SemaRef.getDestructorName(E->getTildeLoc(),
- *E->getDestroyedTypeIdentifier(),
- E->getDestroyedTypeLoc(),
- /*Scope=*/nullptr,
- SS, ObjectTypePtr,
- false);
+ ParsedType T = SemaRef.getDestructorName(
+ *E->getDestroyedTypeIdentifier(), E->getDestroyedTypeLoc(),
+ /*Scope=*/nullptr, SS, ObjectTypePtr, false);
if (!T)
return ExprError();
@@ -12174,9 +12866,9 @@ TreeTransform<Derived>::TransformTypeTraitExpr(TypeTraitExpr *E) {
// be expanded.
bool Expand = true;
bool RetainExpansion = false;
- Optional<unsigned> OrigNumExpansions =
+ std::optional<unsigned> OrigNumExpansions =
ExpansionTL.getTypePtr()->getNumExpansions();
- Optional<unsigned> NumExpansions = OrigNumExpansions;
+ std::optional<unsigned> NumExpansions = OrigNumExpansions;
if (getDerived().TryExpandParameterPacks(ExpansionTL.getEllipsisLoc(),
PatternTL.getSourceRange(),
Unexpanded,
@@ -12297,7 +12989,8 @@ TreeTransform<Derived>::TransformRequiresExpr(RequiresExpr *E) {
// C++2a [expr.prim.req]p2
// Expressions appearing within a requirement-body are unevaluated operands.
EnterExpressionEvaluationContext Ctx(
- SemaRef, Sema::ExpressionEvaluationContext::Unevaluated);
+ SemaRef, Sema::ExpressionEvaluationContext::Unevaluated,
+ Sema::ReuseLambdaContextDecl);
RequiresExprBodyDecl *Body = RequiresExprBodyDecl::Create(
getSema().Context, getSema().CurContext,
@@ -12305,16 +12998,20 @@ TreeTransform<Derived>::TransformRequiresExpr(RequiresExpr *E) {
Sema::ContextRAII SavedContext(getSema(), Body, /*NewThisContext*/false);
- if (getDerived().TransformFunctionTypeParams(E->getRequiresKWLoc(),
- E->getLocalParameters(),
- /*ParamTypes=*/nullptr,
- /*ParamInfos=*/nullptr,
- TransParamTypes, &TransParams,
- ExtParamInfos))
- return ExprError();
+ ExprResult TypeParamResult = getDerived().TransformRequiresTypeParams(
+ E->getRequiresKWLoc(), E->getRBraceLoc(), E, Body,
+ E->getLocalParameters(), TransParamTypes, TransParams, ExtParamInfos);
for (ParmVarDecl *Param : TransParams)
- Param->setDeclContext(Body);
+ if (Param)
+ Param->setDeclContext(Body);
+
+ // On failure to transform, TransformRequiresTypeParams returns an expression
+ // in the event that the transformation of the type params failed in some way.
+ // It is expected that this will result in a 'not satisfied' Requires clause
+ // when instantiating.
+ if (!TypeParamResult.isUnset())
+ return TypeParamResult;
SmallVector<concepts::Requirement *, 4> TransReqs;
if (getDerived().TransformRequiresExprRequirements(E->getRequirements(),
@@ -12331,9 +13028,9 @@ TreeTransform<Derived>::TransformRequiresExpr(RequiresExpr *E) {
}
}
- return getDerived().RebuildRequiresExpr(E->getRequiresKWLoc(), Body,
- TransParams, TransReqs,
- E->getRBraceLoc());
+ return getDerived().RebuildRequiresExpr(
+ E->getRequiresKWLoc(), Body, E->getLParenLoc(), TransParams,
+ E->getRParenLoc(), TransReqs, E->getRBraceLoc());
}
template<typename Derived>
@@ -12380,12 +13077,14 @@ TreeTransform<Derived>::TransformExprRequirement(concepts::ExprRequirement *Req)
TransExpr = Req->getExprSubstitutionDiagnostic();
else {
ExprResult TransExprRes = getDerived().TransformExpr(Req->getExpr());
+ if (TransExprRes.isUsable() && TransExprRes.get()->hasPlaceholderType())
+ TransExprRes = SemaRef.CheckPlaceholderExpr(TransExprRes.get());
if (TransExprRes.isInvalid())
return nullptr;
TransExpr = TransExprRes.get();
}
- llvm::Optional<concepts::ExprRequirement::ReturnTypeRequirement> TransRetReq;
+ std::optional<concepts::ExprRequirement::ReturnTypeRequirement> TransRetReq;
const auto &RetReq = Req->getReturnTypeRequirement();
if (RetReq.isEmpty())
TransRetReq.emplace();
@@ -12400,8 +13099,7 @@ TreeTransform<Derived>::TransformExprRequirement(concepts::ExprRequirement *Req)
return nullptr;
TransRetReq.emplace(TPL);
}
- assert(TransRetReq.hasValue() &&
- "All code paths leading here must set TransRetReq");
+ assert(TransRetReq && "All code paths leading here must set TransRetReq");
if (Expr *E = TransExpr.dyn_cast<Expr *>())
return getDerived().RebuildExprRequirement(E, Req->isSimple(),
Req->getNoexceptLoc(),
@@ -12415,10 +13113,10 @@ template<typename Derived>
concepts::NestedRequirement *
TreeTransform<Derived>::TransformNestedRequirement(
concepts::NestedRequirement *Req) {
- if (Req->isSubstitutionFailure()) {
+ if (Req->hasInvalidConstraint()) {
if (getDerived().AlwaysRebuild())
return getDerived().RebuildNestedRequirement(
- Req->getSubstitutionDiagnostic());
+ Req->getInvalidConstraintEntity(), Req->getConstraintSatisfaction());
return Req;
}
ExprResult TransConstraint =
@@ -12627,6 +13325,9 @@ ExprResult TreeTransform<Derived>::TransformCXXInheritedCtorInitExpr(
template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
+ if (auto *Dtor = E->getTemporary()->getDestructor())
+ SemaRef.MarkFunctionReferenced(E->getBeginLoc(),
+ const_cast<CXXDestructorDecl *>(Dtor));
return getDerived().TransformExpr(E->getSubExpr());
}
@@ -12705,10 +13406,10 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
continue;
TransformedInitCapture &Result = InitCaptures[C - E->capture_begin()];
- VarDecl *OldVD = C->getCapturedVar();
+ auto *OldVD = cast<VarDecl>(C->getCapturedVar());
auto SubstInitCapture = [&](SourceLocation EllipsisLoc,
- Optional<unsigned> NumExpansions) {
+ std::optional<unsigned> NumExpansions) {
ExprResult NewExprInitResult = getDerived().TransformInitializer(
OldVD->getInit(), OldVD->getInitStyle() == VarDecl::CallInit);
@@ -12720,9 +13421,10 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
QualType NewInitCaptureType =
getSema().buildLambdaInitCaptureInitialization(
- C->getLocation(), OldVD->getType()->isReferenceType(),
+ C->getLocation(), C->getCaptureKind() == LCK_ByRef,
EllipsisLoc, NumExpansions, OldVD->getIdentifier(),
- C->getCapturedVar()->getInitStyle() != VarDecl::CInit,
+ cast<VarDecl>(C->getCapturedVar())->getInitStyle() !=
+ VarDecl::CInit,
NewExprInit);
Result.Expansions.push_back(
InitCaptureInfoTy(NewExprInit, NewInitCaptureType));
@@ -12740,9 +13442,9 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
// be expanded.
bool Expand = true;
bool RetainExpansion = false;
- Optional<unsigned> OrigNumExpansions =
+ std::optional<unsigned> OrigNumExpansions =
ExpansionTL.getTypePtr()->getNumExpansions();
- Optional<unsigned> NumExpansions = OrigNumExpansions;
+ std::optional<unsigned> NumExpansions = OrigNumExpansions;
if (getDerived().TryExpandParameterPacks(
ExpansionTL.getEllipsisLoc(),
OldVD->getInit()->getSourceRange(), Unexpanded, Expand,
@@ -12751,7 +13453,7 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
if (Expand) {
for (unsigned I = 0; I != *NumExpansions; ++I) {
Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), I);
- SubstInitCapture(SourceLocation(), None);
+ SubstInitCapture(SourceLocation(), std::nullopt);
}
}
if (!Expand || RetainExpansion) {
@@ -12760,98 +13462,47 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
Result.EllipsisLoc = ExpansionTL.getEllipsisLoc();
}
} else {
- SubstInitCapture(SourceLocation(), None);
+ SubstInitCapture(SourceLocation(), std::nullopt);
}
}
LambdaScopeInfo *LSI = getSema().PushLambdaScope();
Sema::FunctionScopeRAII FuncScopeCleanup(getSema());
- // Transform the template parameters, and add them to the current
- // instantiation scope. The null case is handled correctly.
- auto TPL = getDerived().TransformTemplateParameterList(
- E->getTemplateParameterList());
- LSI->GLTemplateParameterList = TPL;
-
- // Transform the type of the original lambda's call operator.
- // The transformation MUST be done in the CurrentInstantiationScope since
- // it introduces a mapping of the original to the newly created
- // transformed parameters.
- TypeSourceInfo *NewCallOpTSI = nullptr;
- {
- TypeSourceInfo *OldCallOpTSI = E->getCallOperator()->getTypeSourceInfo();
- FunctionProtoTypeLoc OldCallOpFPTL =
- OldCallOpTSI->getTypeLoc().getAs<FunctionProtoTypeLoc>();
-
- TypeLocBuilder NewCallOpTLBuilder;
- SmallVector<QualType, 4> ExceptionStorage;
- TreeTransform *This = this; // Work around gcc.gnu.org/PR56135.
- QualType NewCallOpType = TransformFunctionProtoType(
- NewCallOpTLBuilder, OldCallOpFPTL, nullptr, Qualifiers(),
- [&](FunctionProtoType::ExceptionSpecInfo &ESI, bool &Changed) {
- return This->TransformExceptionSpec(OldCallOpFPTL.getBeginLoc(), ESI,
- ExceptionStorage, Changed);
- });
- if (NewCallOpType.isNull())
- return ExprError();
- NewCallOpTSI = NewCallOpTLBuilder.getTypeSourceInfo(getSema().Context,
- NewCallOpType);
- }
+ // Create the local class that will describe the lambda.
- // Transform the trailing requires clause
- ExprResult NewTrailingRequiresClause;
- if (Expr *TRC = E->getCallOperator()->getTrailingRequiresClause())
- // FIXME: Concepts: Substitution into requires clause should only happen
- // when checking satisfaction.
- NewTrailingRequiresClause = getDerived().TransformExpr(TRC);
+ // FIXME: DependencyKind below is wrong when substituting inside a templated
+ // context that isn't a DeclContext (such as a variable template), or when
+ // substituting an unevaluated lambda inside of a function's parameter's type
+ // - as parameter types are not instantiated from within a function's DC. We
+ // use evaluation contexts to distinguish the function parameter case.
+ CXXRecordDecl::LambdaDependencyKind DependencyKind =
+ CXXRecordDecl::LDK_Unknown;
+ if ((getSema().isUnevaluatedContext() ||
+ getSema().isConstantEvaluatedContext()) &&
+ (getSema().CurContext->isFileContext() ||
+ !getSema().CurContext->getParent()->isDependentContext()))
+ DependencyKind = CXXRecordDecl::LDK_NeverDependent;
- // Create the local class that will describe the lambda.
- // FIXME: KnownDependent below is wrong when substituting inside a templated
- // context that isn't a DeclContext (such as a variable template).
CXXRecordDecl *OldClass = E->getLambdaClass();
- CXXRecordDecl *Class
- = getSema().createLambdaClosureType(E->getIntroducerRange(),
- NewCallOpTSI,
- /*KnownDependent=*/false,
- E->getCaptureDefault());
+ CXXRecordDecl *Class = getSema().createLambdaClosureType(
+ E->getIntroducerRange(), /*Info=*/nullptr, DependencyKind,
+ E->getCaptureDefault());
getDerived().transformedLocalDecl(OldClass, {Class});
- Optional<std::tuple<bool, unsigned, unsigned, Decl *>> Mangling;
- if (getDerived().ReplacingOriginal())
- Mangling = std::make_tuple(OldClass->hasKnownLambdaInternalLinkage(),
- OldClass->getLambdaManglingNumber(),
- OldClass->getDeviceLambdaManglingNumber(),
- OldClass->getLambdaContextDecl());
-
- // Build the call operator.
- CXXMethodDecl *NewCallOperator = getSema().startLambdaDefinition(
- Class, E->getIntroducerRange(), NewCallOpTSI,
- E->getCallOperator()->getEndLoc(),
- NewCallOpTSI->getTypeLoc().castAs<FunctionProtoTypeLoc>().getParams(),
- E->getCallOperator()->getConstexprKind(),
- NewTrailingRequiresClause.get());
-
- LSI->CallOperator = NewCallOperator;
-
- getDerived().transformAttrs(E->getCallOperator(), NewCallOperator);
- getDerived().transformedLocalDecl(E->getCallOperator(), {NewCallOperator});
+ CXXMethodDecl *NewCallOperator =
+ getSema().CreateLambdaCallOperator(E->getIntroducerRange(), Class);
+ NewCallOperator->setLexicalDeclContext(getSema().CurContext);
- // Number the lambda for linkage purposes if necessary.
- getSema().handleLambdaNumbering(Class, NewCallOperator, Mangling);
+ // Enter the scope of the lambda.
+ getSema().buildLambdaScope(LSI, NewCallOperator, E->getIntroducerRange(),
+ E->getCaptureDefault(), E->getCaptureDefaultLoc(),
+ E->hasExplicitParameters(), E->isMutable());
// Introduce the context of the call operator.
Sema::ContextRAII SavedContext(getSema(), NewCallOperator,
/*NewThisContext*/false);
- // Enter the scope of the lambda.
- getSema().buildLambdaScope(LSI, NewCallOperator,
- E->getIntroducerRange(),
- E->getCaptureDefault(),
- E->getCaptureDefaultLoc(),
- E->hasExplicitParameters(),
- E->hasExplicitResultType(),
- E->isMutable());
-
bool Invalid = false;
// Transform captures.
@@ -12865,6 +13516,16 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
// Capturing 'this' is trivial.
if (C->capturesThis()) {
+ // If this is a lambda that is part of a default member initialiser
+ // and which we're instantiating outside the class that 'this' is
+ // supposed to refer to, adjust the type of 'this' accordingly.
+ //
+ // Otherwise, leave the type of 'this' as-is.
+ Sema::CXXThisScopeRAII ThisScope(
+ getSema(),
+ dyn_cast_if_present<CXXRecordDecl>(
+ getSema().getFunctionLevelDeclContext()),
+ Qualifiers());
getSema().CheckCXXThisCapture(C->getLocation(), C->isExplicit(),
/*BuildAndDiagnose*/ true, nullptr,
C->getCaptureKind() == LCK_StarThis);
@@ -12879,7 +13540,7 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
if (E->isInitCapture(C)) {
TransformedInitCapture &NewC = InitCaptures[C - E->capture_begin()];
- VarDecl *OldVD = C->getCapturedVar();
+ auto *OldVD = cast<VarDecl>(C->getCapturedVar());
llvm::SmallVector<Decl*, 4> NewVDs;
for (InitCaptureInfoTy &Info : NewC.Expansions) {
@@ -12891,13 +13552,14 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
}
VarDecl *NewVD = getSema().createLambdaInitCaptureVarDecl(
OldVD->getLocation(), InitQualType, NewC.EllipsisLoc,
- OldVD->getIdentifier(), OldVD->getInitStyle(), Init.get());
+ OldVD->getIdentifier(), OldVD->getInitStyle(), Init.get(),
+ getSema().CurContext);
if (!NewVD) {
Invalid = true;
break;
}
NewVDs.push_back(NewVD);
- getSema().addInitCapture(LSI, NewVD);
+ getSema().addInitCapture(LSI, NewVD, C->getCaptureKind() == LCK_ByRef);
}
if (Invalid)
@@ -12920,7 +13582,7 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
UnexpandedParameterPack Unexpanded(C->getCapturedVar(), C->getLocation());
bool ShouldExpand = false;
bool RetainExpansion = false;
- Optional<unsigned> NumExpansions;
+ std::optional<unsigned> NumExpansions;
if (getDerived().TryExpandParameterPacks(C->getEllipsisLoc(),
C->getLocation(),
Unexpanded,
@@ -12934,7 +13596,7 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
// The transform has determined that we should perform an expansion;
// transform and capture each of the arguments.
// expansion of the pattern. Do so.
- VarDecl *Pack = C->getCapturedVar();
+ auto *Pack = cast<VarDecl>(C->getCapturedVar());
for (unsigned I = 0; I != *NumExpansions; ++I) {
Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), I);
VarDecl *CapturedVar
@@ -12958,9 +13620,8 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
}
// Transform the captured variable.
- VarDecl *CapturedVar
- = cast_or_null<VarDecl>(getDerived().TransformDecl(C->getLocation(),
- C->getCapturedVar()));
+ auto *CapturedVar = cast_or_null<ValueDecl>(
+ getDerived().TransformDecl(C->getLocation(), C->getCapturedVar()));
if (!CapturedVar || CapturedVar->isInvalidDecl()) {
Invalid = true;
continue;
@@ -12972,15 +13633,103 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
}
getSema().finishLambdaExplicitCaptures(LSI);
+ // Transform the template parameters, and add them to the current
+ // instantiation scope. The null case is handled correctly.
+ auto TPL = getDerived().TransformTemplateParameterList(
+ E->getTemplateParameterList());
+ LSI->GLTemplateParameterList = TPL;
+ if (TPL)
+ getSema().AddTemplateParametersToLambdaCallOperator(NewCallOperator, Class,
+ TPL);
+
+ // Transform the type of the original lambda's call operator.
+ // The transformation MUST be done in the CurrentInstantiationScope since
+ // it introduces a mapping of the original to the newly created
+ // transformed parameters.
+ TypeSourceInfo *NewCallOpTSI = nullptr;
+ {
+ auto OldCallOpTypeLoc =
+ E->getCallOperator()->getTypeSourceInfo()->getTypeLoc();
+
+ auto TransformFunctionProtoTypeLoc =
+ [this](TypeLocBuilder &TLB, FunctionProtoTypeLoc FPTL) -> QualType {
+ SmallVector<QualType, 4> ExceptionStorage;
+ return this->TransformFunctionProtoType(
+ TLB, FPTL, nullptr, Qualifiers(),
+ [&](FunctionProtoType::ExceptionSpecInfo &ESI, bool &Changed) {
+ return TransformExceptionSpec(FPTL.getBeginLoc(), ESI,
+ ExceptionStorage, Changed);
+ });
+ };
+
+ QualType NewCallOpType;
+ TypeLocBuilder NewCallOpTLBuilder;
+
+ if (auto ATL = OldCallOpTypeLoc.getAs<AttributedTypeLoc>()) {
+ NewCallOpType = this->TransformAttributedType(
+ NewCallOpTLBuilder, ATL,
+ [&](TypeLocBuilder &TLB, TypeLoc TL) -> QualType {
+ return TransformFunctionProtoTypeLoc(
+ TLB, TL.castAs<FunctionProtoTypeLoc>());
+ });
+ } else {
+ auto FPTL = OldCallOpTypeLoc.castAs<FunctionProtoTypeLoc>();
+ NewCallOpType = TransformFunctionProtoTypeLoc(NewCallOpTLBuilder, FPTL);
+ }
+
+ if (NewCallOpType.isNull())
+ return ExprError();
+ NewCallOpTSI =
+ NewCallOpTLBuilder.getTypeSourceInfo(getSema().Context, NewCallOpType);
+ }
+
+ ArrayRef<ParmVarDecl *> Params;
+ if (auto ATL = NewCallOpTSI->getTypeLoc().getAs<AttributedTypeLoc>()) {
+ Params = ATL.getModifiedLoc().castAs<FunctionProtoTypeLoc>().getParams();
+ } else {
+ auto FPTL = NewCallOpTSI->getTypeLoc().castAs<FunctionProtoTypeLoc>();
+ Params = FPTL.getParams();
+ }
+
+ getSema().CompleteLambdaCallOperator(
+ NewCallOperator, E->getCallOperator()->getLocation(),
+ E->getCallOperator()->getInnerLocStart(),
+ E->getCallOperator()->getTrailingRequiresClause(), NewCallOpTSI,
+ E->getCallOperator()->getConstexprKind(),
+ E->getCallOperator()->getStorageClass(), Params,
+ E->hasExplicitResultType());
+
+ getDerived().transformAttrs(E->getCallOperator(), NewCallOperator);
+ getDerived().transformedLocalDecl(E->getCallOperator(), {NewCallOperator});
+
+ {
+ // Number the lambda for linkage purposes if necessary.
+ Sema::ContextRAII ManglingContext(getSema(), Class->getDeclContext());
+
+ std::optional<CXXRecordDecl::LambdaNumbering> Numbering;
+ if (getDerived().ReplacingOriginal()) {
+ Numbering = OldClass->getLambdaNumbering();
+ }
+
+ getSema().handleLambdaNumbering(Class, NewCallOperator, Numbering);
+ }
+
// FIXME: Sema's lambda-building mechanism expects us to push an expression
// evaluation context even if we're not transforming the function body.
getSema().PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
+ Sema::CodeSynthesisContext C;
+ C.Kind = clang::Sema::CodeSynthesisContext::LambdaExpressionSubstitution;
+ C.PointOfInstantiation = E->getBody()->getBeginLoc();
+ getSema().pushCodeSynthesisContext(C);
+
// Instantiate the body of the lambda expression.
StmtResult Body =
Invalid ? StmtError() : getDerived().TransformLambdaBody(E, E->getBody());
+ getSema().popCodeSynthesisContext();
+
// ActOnLambda* will pop the function scope for us.
FuncScopeCleanup.disable();
@@ -13297,7 +14046,7 @@ TreeTransform<Derived>::TransformSizeOfPackExpr(SizeOfPackExpr *E) {
UnexpandedParameterPack Unexpanded(E->getPack(), E->getPackLoc());
bool ShouldExpand = false;
bool RetainExpansion = false;
- Optional<unsigned> NumExpansions;
+ std::optional<unsigned> NumExpansions;
if (getDerived().TryExpandParameterPacks(E->getOperatorLoc(), E->getPackLoc(),
Unexpanded,
ShouldExpand, RetainExpansion,
@@ -13310,9 +14059,9 @@ TreeTransform<Derived>::TransformSizeOfPackExpr(SizeOfPackExpr *E) {
auto *Pack = E->getPack();
if (auto *TTPD = dyn_cast<TemplateTypeParmDecl>(Pack)) {
ArgStorage = getSema().Context.getPackExpansionType(
- getSema().Context.getTypeDeclType(TTPD), None);
+ getSema().Context.getTypeDeclType(TTPD), std::nullopt);
} else if (auto *TTPD = dyn_cast<TemplateTemplateParmDecl>(Pack)) {
- ArgStorage = TemplateArgument(TemplateName(TTPD), None);
+ ArgStorage = TemplateArgument(TemplateName(TTPD), std::nullopt);
} else {
auto *VD = cast<ValueDecl>(Pack);
ExprResult DRE = getSema().BuildDeclRefExpr(
@@ -13321,8 +14070,9 @@ TreeTransform<Derived>::TransformSizeOfPackExpr(SizeOfPackExpr *E) {
E->getPackLoc());
if (DRE.isInvalid())
return ExprError();
- ArgStorage = new (getSema().Context) PackExpansionExpr(
- getSema().Context.DependentTy, DRE.get(), E->getPackLoc(), None);
+ ArgStorage = new (getSema().Context)
+ PackExpansionExpr(getSema().Context.DependentTy, DRE.get(),
+ E->getPackLoc(), std::nullopt);
}
PackArgs = ArgStorage;
}
@@ -13334,13 +14084,13 @@ TreeTransform<Derived>::TransformSizeOfPackExpr(SizeOfPackExpr *E) {
getDerived().TransformDecl(E->getPackLoc(), E->getPack()));
if (!Pack)
return ExprError();
- return getDerived().RebuildSizeOfPackExpr(E->getOperatorLoc(), Pack,
- E->getPackLoc(),
- E->getRParenLoc(), None, None);
+ return getDerived().RebuildSizeOfPackExpr(
+ E->getOperatorLoc(), Pack, E->getPackLoc(), E->getRParenLoc(),
+ std::nullopt, std::nullopt);
}
// Try to compute the result without performing a partial substitution.
- Optional<unsigned> Result = 0;
+ std::optional<unsigned> Result = 0;
for (const TemplateArgument &Arg : PackArgs) {
if (!Arg.isPackExpansion()) {
Result = *Result + 1;
@@ -13352,7 +14102,7 @@ TreeTransform<Derived>::TransformSizeOfPackExpr(SizeOfPackExpr *E) {
// Find the pattern of the pack expansion.
SourceLocation Ellipsis;
- Optional<unsigned> OrigNumExpansions;
+ std::optional<unsigned> OrigNumExpansions;
TemplateArgumentLoc Pattern =
getSema().getTemplateArgumentPackExpansionPattern(ArgLoc, Ellipsis,
OrigNumExpansions);
@@ -13365,12 +14115,12 @@ TreeTransform<Derived>::TransformSizeOfPackExpr(SizeOfPackExpr *E) {
return true;
// See if we can determine the number of arguments from the result.
- Optional<unsigned> NumExpansions =
+ std::optional<unsigned> NumExpansions =
getSema().getFullyPackExpandedSize(OutPattern.getArgument());
if (!NumExpansions) {
// No: we must be in an alias template expansion, and we're going to need
// to actually expand the packs.
- Result = None;
+ Result = std::nullopt;
break;
}
@@ -13380,9 +14130,9 @@ TreeTransform<Derived>::TransformSizeOfPackExpr(SizeOfPackExpr *E) {
// Common case: we could determine the number of expansions without
// substituting.
if (Result)
- return getDerived().RebuildSizeOfPackExpr(E->getOperatorLoc(), E->getPack(),
- E->getPackLoc(),
- E->getRParenLoc(), *Result, None);
+ return getDerived().RebuildSizeOfPackExpr(
+ E->getOperatorLoc(), E->getPack(), E->getPackLoc(), E->getRParenLoc(),
+ *Result, std::nullopt);
TemplateArgumentListInfo TransformedPackArgs(E->getPackLoc(),
E->getPackLoc());
@@ -13407,13 +14157,13 @@ TreeTransform<Derived>::TransformSizeOfPackExpr(SizeOfPackExpr *E) {
}
if (PartialSubstitution)
- return getDerived().RebuildSizeOfPackExpr(E->getOperatorLoc(), E->getPack(),
- E->getPackLoc(),
- E->getRParenLoc(), None, Args);
+ return getDerived().RebuildSizeOfPackExpr(
+ E->getOperatorLoc(), E->getPack(), E->getPackLoc(), E->getRParenLoc(),
+ std::nullopt, Args);
return getDerived().RebuildSizeOfPackExpr(E->getOperatorLoc(), E->getPack(),
E->getPackLoc(), E->getRParenLoc(),
- Args.size(), None);
+ Args.size(), std::nullopt);
}
template<typename Derived>
@@ -13467,8 +14217,8 @@ TreeTransform<Derived>::TransformCXXFoldExpr(CXXFoldExpr *E) {
// be expanded.
bool Expand = true;
bool RetainExpansion = false;
- Optional<unsigned> OrigNumExpansions = E->getNumExpansions(),
- NumExpansions = OrigNumExpansions;
+ std::optional<unsigned> OrigNumExpansions = E->getNumExpansions(),
+ NumExpansions = OrigNumExpansions;
if (getDerived().TryExpandParameterPacks(E->getEllipsisLoc(),
Pattern->getSourceRange(),
Unexpanded,
@@ -13553,13 +14303,17 @@ TreeTransform<Derived>::TransformCXXFoldExpr(CXXFoldExpr *E) {
// We've got down to a single element; build a binary operator.
Expr *LHS = LeftFold ? Result.get() : Out.get();
Expr *RHS = LeftFold ? Out.get() : Result.get();
- if (Callee)
+ if (Callee) {
+ UnresolvedSet<16> Functions;
+ Functions.append(Callee->decls_begin(), Callee->decls_end());
Result = getDerived().RebuildCXXOperatorCallExpr(
BinaryOperator::getOverloadedOperator(E->getOperator()),
- E->getEllipsisLoc(), Callee, LHS, RHS);
- else
+ E->getEllipsisLoc(), Callee->getBeginLoc(), Callee->requiresADL(),
+ Functions, LHS, RHS);
+ } else {
Result = getDerived().RebuildBinaryOperator(E->getEllipsisLoc(),
E->getOperator(), LHS, RHS);
+ }
} else
Result = Out;
@@ -13592,6 +14346,19 @@ TreeTransform<Derived>::TransformCXXFoldExpr(CXXFoldExpr *E) {
return Result;
}
+template <typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXParenListInitExpr(CXXParenListInitExpr *E) {
+ SmallVector<Expr *, 4> TransformedInits;
+ ArrayRef<Expr *> InitExprs = E->getInitExprs();
+ if (TransformExprs(InitExprs.data(), InitExprs.size(), true,
+ TransformedInits))
+ return ExprError();
+
+ return getDerived().RebuildParenListExpr(E->getBeginLoc(), TransformedInits,
+ E->getEndLoc());
+}
+
template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformCXXStdInitializerListExpr(
@@ -13664,8 +14431,8 @@ TreeTransform<Derived>::TransformObjCDictionaryLiteral(
// and should be expanded.
bool Expand = true;
bool RetainExpansion = false;
- Optional<unsigned> OrigNumExpansions = OrigElement.NumExpansions;
- Optional<unsigned> NumExpansions = OrigNumExpansions;
+ std::optional<unsigned> OrigNumExpansions = OrigElement.NumExpansions;
+ std::optional<unsigned> NumExpansions = OrigNumExpansions;
SourceRange PatternRange(OrigElement.Key->getBeginLoc(),
OrigElement.Value->getEndLoc());
if (getDerived().TryExpandParameterPacks(OrigElement.EllipsisLoc,
@@ -13752,9 +14519,8 @@ TreeTransform<Derived>::TransformObjCDictionaryLiteral(
if (Value.get() != OrigElement.Value)
ArgChanged = true;
- ObjCDictionaryElement Element = {
- Key.get(), Value.get(), SourceLocation(), None
- };
+ ObjCDictionaryElement Element = {Key.get(), Value.get(), SourceLocation(),
+ std::nullopt};
Elements.push_back(Element);
}
@@ -14112,7 +14878,12 @@ TreeTransform<Derived>::TransformBlockExpr(BlockExpr *E) {
oldCapture));
assert(blockScope->CaptureMap.count(newCapture));
}
- assert(oldBlock->capturesCXXThis() == blockScope->isCXXThisCaptured());
+
+ // The this pointer may not be captured by the instantiated block, even when
+ // it's captured by the original block, if the expression causing the
+ // capture is in the discarded branch of a constexpr if statement.
+ assert((!blockScope->isCXXThisCaptured() || oldBlock->capturesCXXThis()) &&
+ "this pointer isn't captured in the old block");
}
#endif
@@ -14211,11 +14982,11 @@ QualType TreeTransform<Derived>::RebuildObjCObjectType(
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc) {
- return SemaRef.BuildObjCObjectType(BaseType, Loc, TypeArgsLAngleLoc,
- TypeArgs, TypeArgsRAngleLoc,
- ProtocolLAngleLoc, Protocols, ProtocolLocs,
- ProtocolRAngleLoc,
- /*FailOnError=*/true);
+ return SemaRef.BuildObjCObjectType(BaseType, Loc, TypeArgsLAngleLoc, TypeArgs,
+ TypeArgsRAngleLoc, ProtocolLAngleLoc,
+ Protocols, ProtocolLocs, ProtocolRAngleLoc,
+ /*FailOnError=*/true,
+ /*Rebuilding=*/true);
}
template<typename Derived>
@@ -14225,14 +14996,10 @@ QualType TreeTransform<Derived>::RebuildObjCObjectPointerType(
return SemaRef.Context.getObjCObjectPointerType(PointeeType);
}
-template<typename Derived>
-QualType
-TreeTransform<Derived>::RebuildArrayType(QualType ElementType,
- ArrayType::ArraySizeModifier SizeMod,
- const llvm::APInt *Size,
- Expr *SizeExpr,
- unsigned IndexTypeQuals,
- SourceRange BracketsRange) {
+template <typename Derived>
+QualType TreeTransform<Derived>::RebuildArrayType(
+ QualType ElementType, ArraySizeModifier SizeMod, const llvm::APInt *Size,
+ Expr *SizeExpr, unsigned IndexTypeQuals, SourceRange BracketsRange) {
if (SizeExpr || !Size)
return SemaRef.BuildArrayType(ElementType, SizeMod, SizeExpr,
IndexTypeQuals, BracketsRange,
@@ -14243,11 +15010,10 @@ TreeTransform<Derived>::RebuildArrayType(QualType ElementType,
SemaRef.Context.UnsignedIntTy, SemaRef.Context.UnsignedLongTy,
SemaRef.Context.UnsignedLongLongTy, SemaRef.Context.UnsignedInt128Ty
};
- const unsigned NumTypes = llvm::array_lengthof(Types);
QualType SizeType;
- for (unsigned I = 0; I != NumTypes; ++I)
- if (Size->getBitWidth() == SemaRef.Context.getIntWidth(Types[I])) {
- SizeType = Types[I];
+ for (const auto &T : Types)
+ if (Size->getBitWidth() == SemaRef.Context.getIntWidth(T)) {
+ SizeType = T;
break;
}
@@ -14261,47 +15027,35 @@ TreeTransform<Derived>::RebuildArrayType(QualType ElementType,
getDerived().getBaseEntity());
}
-template<typename Derived>
-QualType
-TreeTransform<Derived>::RebuildConstantArrayType(QualType ElementType,
- ArrayType::ArraySizeModifier SizeMod,
- const llvm::APInt &Size,
- Expr *SizeExpr,
- unsigned IndexTypeQuals,
- SourceRange BracketsRange) {
+template <typename Derived>
+QualType TreeTransform<Derived>::RebuildConstantArrayType(
+ QualType ElementType, ArraySizeModifier SizeMod, const llvm::APInt &Size,
+ Expr *SizeExpr, unsigned IndexTypeQuals, SourceRange BracketsRange) {
return getDerived().RebuildArrayType(ElementType, SizeMod, &Size, SizeExpr,
IndexTypeQuals, BracketsRange);
}
-template<typename Derived>
-QualType
-TreeTransform<Derived>::RebuildIncompleteArrayType(QualType ElementType,
- ArrayType::ArraySizeModifier SizeMod,
- unsigned IndexTypeQuals,
- SourceRange BracketsRange) {
+template <typename Derived>
+QualType TreeTransform<Derived>::RebuildIncompleteArrayType(
+ QualType ElementType, ArraySizeModifier SizeMod, unsigned IndexTypeQuals,
+ SourceRange BracketsRange) {
return getDerived().RebuildArrayType(ElementType, SizeMod, nullptr, nullptr,
IndexTypeQuals, BracketsRange);
}
-template<typename Derived>
-QualType
-TreeTransform<Derived>::RebuildVariableArrayType(QualType ElementType,
- ArrayType::ArraySizeModifier SizeMod,
- Expr *SizeExpr,
- unsigned IndexTypeQuals,
- SourceRange BracketsRange) {
+template <typename Derived>
+QualType TreeTransform<Derived>::RebuildVariableArrayType(
+ QualType ElementType, ArraySizeModifier SizeMod, Expr *SizeExpr,
+ unsigned IndexTypeQuals, SourceRange BracketsRange) {
return getDerived().RebuildArrayType(ElementType, SizeMod, nullptr,
SizeExpr,
IndexTypeQuals, BracketsRange);
}
-template<typename Derived>
-QualType
-TreeTransform<Derived>::RebuildDependentSizedArrayType(QualType ElementType,
- ArrayType::ArraySizeModifier SizeMod,
- Expr *SizeExpr,
- unsigned IndexTypeQuals,
- SourceRange BracketsRange) {
+template <typename Derived>
+QualType TreeTransform<Derived>::RebuildDependentSizedArrayType(
+ QualType ElementType, ArraySizeModifier SizeMod, Expr *SizeExpr,
+ unsigned IndexTypeQuals, SourceRange BracketsRange) {
return getDerived().RebuildArrayType(ElementType, SizeMod, nullptr,
SizeExpr,
IndexTypeQuals, BracketsRange);
@@ -14315,10 +15069,9 @@ QualType TreeTransform<Derived>::RebuildDependentAddressSpaceType(
}
template <typename Derived>
-QualType
-TreeTransform<Derived>::RebuildVectorType(QualType ElementType,
- unsigned NumElements,
- VectorType::VectorKind VecKind) {
+QualType TreeTransform<Derived>::RebuildVectorType(QualType ElementType,
+ unsigned NumElements,
+ VectorKind VecKind) {
// FIXME: semantic checking!
return SemaRef.Context.getVectorType(ElementType, NumElements, VecKind);
}
@@ -14326,7 +15079,7 @@ TreeTransform<Derived>::RebuildVectorType(QualType ElementType,
template <typename Derived>
QualType TreeTransform<Derived>::RebuildDependentVectorType(
QualType ElementType, Expr *SizeExpr, SourceLocation AttributeLoc,
- VectorType::VectorKind VecKind) {
+ VectorKind VecKind) {
return SemaRef.BuildVectorType(ElementType, SizeExpr, AttributeLoc);
}
@@ -14388,7 +15141,6 @@ QualType TreeTransform<Derived>::RebuildUnresolvedUsingType(SourceLocation Loc,
if (D->isInvalidDecl()) return QualType();
// FIXME: Doesn't account for ObjCInterfaceDecl!
- TypeDecl *Ty;
if (auto *UPD = dyn_cast<UsingPackDecl>(D)) {
// A valid resolved using typename pack expansion decl can have multiple
// UsingDecls, but they must each have exactly one type, and it must be
@@ -14424,34 +15176,35 @@ QualType TreeTransform<Derived>::RebuildUnresolvedUsingType(SourceLocation Loc,
// A valid resolved using typename decl points to exactly one type decl.
assert(++Using->shadow_begin() == Using->shadow_end());
- NamedDecl *Target = Using->shadow_begin()->getTargetDecl();
- if (SemaRef.DiagnoseUseOfDecl(Target, Loc))
+ UsingShadowDecl *Shadow = *Using->shadow_begin();
+ if (SemaRef.DiagnoseUseOfDecl(Shadow->getTargetDecl(), Loc))
return QualType();
- Ty = cast<TypeDecl>(Target);
+ return SemaRef.Context.getUsingType(
+ Shadow, SemaRef.Context.getTypeDeclType(
+ cast<TypeDecl>(Shadow->getTargetDecl())));
} else {
assert(isa<UnresolvedUsingTypenameDecl>(D) &&
"UnresolvedUsingTypenameDecl transformed to non-using decl");
- Ty = cast<UnresolvedUsingTypenameDecl>(D);
+ return SemaRef.Context.getTypeDeclType(
+ cast<UnresolvedUsingTypenameDecl>(D));
}
-
- return SemaRef.Context.getTypeDeclType(Ty);
}
-template<typename Derived>
-QualType TreeTransform<Derived>::RebuildTypeOfExprType(Expr *E,
- SourceLocation Loc) {
- return SemaRef.BuildTypeofExprType(E, Loc);
+template <typename Derived>
+QualType TreeTransform<Derived>::RebuildTypeOfExprType(Expr *E, SourceLocation,
+ TypeOfKind Kind) {
+ return SemaRef.BuildTypeofExprType(E, Kind);
}
template<typename Derived>
-QualType TreeTransform<Derived>::RebuildTypeOfType(QualType Underlying) {
- return SemaRef.Context.getTypeOfType(Underlying);
+QualType TreeTransform<Derived>::RebuildTypeOfType(QualType Underlying,
+ TypeOfKind Kind) {
+ return SemaRef.Context.getTypeOfType(Underlying, Kind);
}
-template<typename Derived>
-QualType TreeTransform<Derived>::RebuildDecltypeType(Expr *E,
- SourceLocation Loc) {
- return SemaRef.BuildDecltypeType(E, Loc);
+template <typename Derived>
+QualType TreeTransform<Derived>::RebuildDecltypeType(Expr *E, SourceLocation) {
+ return SemaRef.BuildDecltypeType(E);
}
template<typename Derived>
@@ -14484,20 +15237,20 @@ QualType TreeTransform<Derived>::RebuildPipeType(QualType ValueType,
}
template <typename Derived>
-QualType TreeTransform<Derived>::RebuildExtIntType(bool IsUnsigned,
+QualType TreeTransform<Derived>::RebuildBitIntType(bool IsUnsigned,
unsigned NumBits,
SourceLocation Loc) {
llvm::APInt NumBitsAP(SemaRef.Context.getIntWidth(SemaRef.Context.IntTy),
NumBits, true);
IntegerLiteral *Bits = IntegerLiteral::Create(SemaRef.Context, NumBitsAP,
SemaRef.Context.IntTy, Loc);
- return SemaRef.BuildExtIntType(IsUnsigned, Bits, Loc);
+ return SemaRef.BuildBitIntType(IsUnsigned, Bits, Loc);
}
template <typename Derived>
-QualType TreeTransform<Derived>::RebuildDependentExtIntType(
+QualType TreeTransform<Derived>::RebuildDependentBitIntType(
bool IsUnsigned, Expr *NumBitsExpr, SourceLocation Loc) {
- return SemaRef.BuildExtIntType(IsUnsigned, NumBitsExpr, Loc);
+ return SemaRef.BuildBitIntType(IsUnsigned, NumBitsExpr, Loc);
}
template<typename Derived>
@@ -14506,7 +15259,7 @@ TreeTransform<Derived>::RebuildTemplateName(CXXScopeSpec &SS,
bool TemplateKW,
TemplateDecl *Template) {
return SemaRef.Context.getQualifiedTemplateName(SS.getScopeRep(), TemplateKW,
- Template);
+ TemplateName(Template));
}
template<typename Derived>
@@ -14547,14 +15300,11 @@ TreeTransform<Derived>::RebuildTemplateName(CXXScopeSpec &SS,
return Template.get();
}
-template<typename Derived>
-ExprResult
-TreeTransform<Derived>::RebuildCXXOperatorCallExpr(OverloadedOperatorKind Op,
- SourceLocation OpLoc,
- Expr *OrigCallee,
- Expr *First,
- Expr *Second) {
- Expr *Callee = OrigCallee->IgnoreParenCasts();
+template <typename Derived>
+ExprResult TreeTransform<Derived>::RebuildCXXOperatorCallExpr(
+ OverloadedOperatorKind Op, SourceLocation OpLoc, SourceLocation CalleeLoc,
+ bool RequiresADL, const UnresolvedSetImpl &Functions, Expr *First,
+ Expr *Second) {
bool isPostIncDec = Second && (Op == OO_PlusPlus || Op == OO_MinusMinus);
if (First->getObjectKind() == OK_ObjCProperty) {
@@ -14579,9 +15329,13 @@ TreeTransform<Derived>::RebuildCXXOperatorCallExpr(OverloadedOperatorKind Op,
if (Op == OO_Subscript) {
if (!First->getType()->isOverloadableType() &&
!Second->getType()->isOverloadableType())
- return getSema().CreateBuiltinArraySubscriptExpr(
- First, Callee->getBeginLoc(), Second, OpLoc);
+ return getSema().CreateBuiltinArraySubscriptExpr(First, CalleeLoc, Second,
+ OpLoc);
} else if (Op == OO_Arrow) {
+ // It is possible that the type refers to a RecoveryExpr created earlier
+ // in the tree transformation.
+ if (First->getType()->isDependentType())
+ return ExprError();
// -> is never a builtin operation.
return SemaRef.BuildOverloadedArrowExpr(nullptr, First, OpLoc);
} else if (Second == nullptr || isPostIncDec) {
@@ -14610,27 +15364,6 @@ TreeTransform<Derived>::RebuildCXXOperatorCallExpr(OverloadedOperatorKind Op,
}
}
- // Compute the transformed set of functions (and function templates) to be
- // used during overload resolution.
- UnresolvedSet<16> Functions;
- bool RequiresADL;
-
- if (UnresolvedLookupExpr *ULE = dyn_cast<UnresolvedLookupExpr>(Callee)) {
- Functions.append(ULE->decls_begin(), ULE->decls_end());
- // If the overload could not be resolved in the template definition
- // (because we had a dependent argument), ADL is performed as part of
- // template instantiation.
- RequiresADL = ULE->requiresADL();
- } else {
- // If we've resolved this to a particular non-member function, just call
- // that function. If we resolved it to a member function,
- // CreateOverloaded* will find that function for us.
- NamedDecl *ND = cast<DeclRefExpr>(Callee)->getDecl();
- if (!isa<CXXMethodDecl>(ND))
- Functions.addDecl(ND);
- RequiresADL = false;
- }
-
// Add any functions found via argument-dependent lookup.
Expr *Args[2] = { First, Second };
unsigned NumArgs = 1 + (Second != nullptr);
@@ -14643,23 +15376,6 @@ TreeTransform<Derived>::RebuildCXXOperatorCallExpr(OverloadedOperatorKind Op,
RequiresADL);
}
- if (Op == OO_Subscript) {
- SourceLocation LBrace;
- SourceLocation RBrace;
-
- if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Callee)) {
- DeclarationNameLoc NameLoc = DRE->getNameInfo().getInfo();
- LBrace = NameLoc.getCXXOperatorNameBeginLoc();
- RBrace = NameLoc.getCXXOperatorNameEndLoc();
- } else {
- LBrace = Callee->getBeginLoc();
- RBrace = OpLoc;
- }
-
- return SemaRef.CreateOverloadedArraySubscriptExpr(LBrace, RBrace,
- First, Second);
- }
-
// Create the overloaded operator invocation for binary operators.
BinaryOperatorKind Opc = BinaryOperator::getOverloadedOpcode(Op);
ExprResult Result = SemaRef.CreateOverloadedBinOp(
diff --git a/contrib/llvm-project/clang/lib/Sema/TypeLocBuilder.cpp b/contrib/llvm-project/clang/lib/Sema/TypeLocBuilder.cpp
index 2dcbbd83c691..fcd090ff2020 100644
--- a/contrib/llvm-project/clang/lib/Sema/TypeLocBuilder.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/TypeLocBuilder.cpp
@@ -41,6 +41,29 @@ void TypeLocBuilder::pushFullCopy(TypeLoc L) {
}
}
+void TypeLocBuilder::pushTrivial(ASTContext &Context, QualType T,
+ SourceLocation Loc) {
+ auto L = TypeLoc(T, nullptr);
+ reserve(L.getFullDataSize());
+
+ SmallVector<TypeLoc, 4> TypeLocs;
+ for (auto CurTL = L; CurTL; CurTL = CurTL.getNextTypeLoc())
+ TypeLocs.push_back(CurTL);
+
+ for (const auto &CurTL : llvm::reverse(TypeLocs)) {
+ switch (CurTL.getTypeLocClass()) {
+#define ABSTRACT_TYPELOC(CLASS, PARENT)
+#define TYPELOC(CLASS, PARENT) \
+ case TypeLoc::CLASS: { \
+ auto NewTL = push<class CLASS##TypeLoc>(CurTL.getType()); \
+ NewTL.initializeLocal(Context, Loc); \
+ break; \
+ }
+#include "clang/AST/TypeLocNodes.def"
+ }
+ }
+}
+
void TypeLocBuilder::grow(size_t NewCapacity) {
assert(NewCapacity > Capacity);
@@ -85,7 +108,7 @@ TypeLoc TypeLocBuilder::pushImpl(QualType T, size_t LocalSize, unsigned LocalAli
// FIXME: 4 and 8 are sufficient at the moment, but it's pretty ugly to
// hardcode them.
if (LocalAlignment == 4) {
- if (NumBytesAtAlign8 == 0) {
+ if (!AtAlign8) {
NumBytesAtAlign4 += LocalSize;
} else {
unsigned Padding = NumBytesAtAlign4 % 8;
@@ -114,7 +137,7 @@ TypeLoc TypeLocBuilder::pushImpl(QualType T, size_t LocalSize, unsigned LocalAli
NumBytesAtAlign4 += LocalSize;
}
} else if (LocalAlignment == 8) {
- if (NumBytesAtAlign8 == 0) {
+ if (!AtAlign8) {
// We have not seen any 8-byte aligned element yet. We insert a padding
// only if the new Index is not 8-byte-aligned.
if ((Index - LocalSize) % 8 != 0) {
@@ -149,7 +172,7 @@ TypeLoc TypeLocBuilder::pushImpl(QualType T, size_t LocalSize, unsigned LocalAli
// Forget about any padding.
NumBytesAtAlign4 = 0;
- NumBytesAtAlign8 += LocalSize;
+ AtAlign8 = true;
} else {
assert(LocalSize == 0);
}
diff --git a/contrib/llvm-project/clang/lib/Sema/TypeLocBuilder.h b/contrib/llvm-project/clang/lib/Sema/TypeLocBuilder.h
index 738f731c9fe2..15eb2226cdf7 100644
--- a/contrib/llvm-project/clang/lib/Sema/TypeLocBuilder.h
+++ b/contrib/llvm-project/clang/lib/Sema/TypeLocBuilder.h
@@ -40,18 +40,22 @@ class TypeLocBuilder {
/// The inline buffer.
enum { BufferMaxAlignment = alignof(void *) };
alignas(BufferMaxAlignment) char InlineBuffer[InlineCapacity];
- unsigned NumBytesAtAlign4, NumBytesAtAlign8;
+ unsigned NumBytesAtAlign4;
+ bool AtAlign8;
public:
TypeLocBuilder()
: Buffer(InlineBuffer), Capacity(InlineCapacity), Index(InlineCapacity),
- NumBytesAtAlign4(0), NumBytesAtAlign8(0) {}
+ NumBytesAtAlign4(0), AtAlign8(false) {}
~TypeLocBuilder() {
if (Buffer != InlineBuffer)
delete[] Buffer;
}
+ TypeLocBuilder(const TypeLocBuilder &) = delete;
+ TypeLocBuilder &operator=(const TypeLocBuilder &) = delete;
+
/// Ensures that this buffer has at least as much capacity as described.
void reserve(size_t Requested) {
if (Requested > Capacity)
@@ -63,6 +67,10 @@ public:
/// must be empty for this to work.
void pushFullCopy(TypeLoc L);
+ /// Pushes 'T' with all locations pointing to 'Loc'.
+ /// The builder must be empty for this to work.
+ void pushTrivial(ASTContext &Context, QualType T, SourceLocation Loc);
+
/// Pushes space for a typespec TypeLoc. Invalidates any TypeLocs
/// previously retrieved from this builder.
TypeSpecTypeLoc pushTypeSpec(QualType T) {
@@ -77,7 +85,8 @@ public:
LastTy = QualType();
#endif
Index = Capacity;
- NumBytesAtAlign4 = NumBytesAtAlign8 = 0;
+ NumBytesAtAlign4 = 0;
+ AtAlign8 = false;
}
/// Tell the TypeLocBuilder that the type it is storing has been
diff --git a/contrib/llvm-project/clang/lib/Sema/UsedDeclVisitor.h b/contrib/llvm-project/clang/lib/Sema/UsedDeclVisitor.h
index c33d30478e2a..580d702f96fe 100644
--- a/contrib/llvm-project/clang/lib/Sema/UsedDeclVisitor.h
+++ b/contrib/llvm-project/clang/lib/Sema/UsedDeclVisitor.h
@@ -72,7 +72,8 @@ public:
QualType Destroyed = S.Context.getBaseElementType(DestroyedOrNull);
if (const RecordType *DestroyedRec = Destroyed->getAs<RecordType>()) {
CXXRecordDecl *Record = cast<CXXRecordDecl>(DestroyedRec->getDecl());
- asImpl().visitUsedDecl(E->getBeginLoc(), S.LookupDestructor(Record));
+ if (Record->getDefinition())
+ asImpl().visitUsedDecl(E->getBeginLoc(), S.LookupDestructor(Record));
}
}
@@ -81,11 +82,28 @@ public:
void VisitCXXConstructExpr(CXXConstructExpr *E) {
asImpl().visitUsedDecl(E->getBeginLoc(), E->getConstructor());
+ CXXConstructorDecl *D = E->getConstructor();
+ for (const CXXCtorInitializer *Init : D->inits()) {
+ if (Init->isInClassMemberInitializer())
+ asImpl().Visit(Init->getInit());
+ }
Inherited::VisitCXXConstructExpr(E);
}
void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) {
asImpl().Visit(E->getExpr());
+ Inherited::VisitCXXDefaultArgExpr(E);
+ }
+
+ void VisitCXXDefaultInitExpr(CXXDefaultInitExpr *E) {
+ asImpl().Visit(E->getExpr());
+ Inherited::VisitCXXDefaultInitExpr(E);
+ }
+
+ void VisitInitListExpr(InitListExpr *ILE) {
+ if (ILE->hasArrayFiller())
+ asImpl().Visit(ILE->getArrayFiller());
+ Inherited::VisitInitListExpr(ILE);
}
void visitUsedDecl(SourceLocation Loc, Decl *D) {
diff --git a/contrib/llvm-project/clang/lib/Serialization/ASTCommon.cpp b/contrib/llvm-project/clang/lib/Serialization/ASTCommon.cpp
index 5fe1f96327dd..6110e287b7fb 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ASTCommon.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/ASTCommon.cpp
@@ -168,6 +168,9 @@ serialization::TypeIdxFromBuiltin(const BuiltinType *BT) {
case BuiltinType::Float128:
ID = PREDEF_TYPE_FLOAT128_ID;
break;
+ case BuiltinType::Ibm128:
+ ID = PREDEF_TYPE_IBM128_ID;
+ break;
case BuiltinType::NullPtr:
ID = PREDEF_TYPE_NULLPTR_ID;
break;
@@ -247,6 +250,11 @@ serialization::TypeIdxFromBuiltin(const BuiltinType *BT) {
ID = PREDEF_TYPE_##Id##_ID; \
break;
#include "clang/Basic/RISCVVTypes.def"
+#define WASM_TYPE(Name, Id, SingletonId) \
+ case BuiltinType::Id: \
+ ID = PREDEF_TYPE_##Id##_ID; \
+ break;
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
case BuiltinType::BuiltinFn:
ID = PREDEF_TYPE_BUILTIN_FN;
break;
@@ -388,6 +396,7 @@ bool serialization::isRedeclarableDeclKind(unsigned Kind) {
case Decl::Field:
case Decl::MSProperty:
case Decl::MSGuid:
+ case Decl::UnnamedGlobalConstant:
case Decl::TemplateParamObject:
case Decl::ObjCIvar:
case Decl::ObjCAtDefsField:
@@ -408,13 +417,13 @@ bool serialization::isRedeclarableDeclKind(unsigned Kind) {
case Decl::PragmaComment:
case Decl::PragmaDetectMismatch:
case Decl::FileScopeAsm:
+ case Decl::TopLevelStmt:
case Decl::AccessSpec:
case Decl::Friend:
case Decl::FriendTemplate:
case Decl::StaticAssert:
case Decl::Block:
case Decl::Captured:
- case Decl::ClassScopeFunctionSpecialization:
case Decl::Import:
case Decl::OMPThreadPrivate:
case Decl::OMPAllocate:
@@ -426,9 +435,11 @@ bool serialization::isRedeclarableDeclKind(unsigned Kind) {
case Decl::Decomposition:
case Decl::Binding:
case Decl::Concept:
+ case Decl::ImplicitConceptSpecialization:
case Decl::LifetimeExtendedTemporary:
case Decl::RequiresExprBody:
case Decl::UnresolvedUsingIfExists:
+ case Decl::HLSLBuffer:
return false;
// These indirectly derive from Redeclarable<T> but are not actually
@@ -468,13 +479,14 @@ bool serialization::needsAnonymousDeclarationNumber(const NamedDecl *D) {
if (auto *VD = dyn_cast<VarDecl>(D))
return VD->isStaticLocal();
// FIXME: What about CapturedDecls (and declarations nested within them)?
- return isa<TagDecl>(D) || isa<BlockDecl>(D);
+ return isa<TagDecl, BlockDecl>(D);
}
// Otherwise, we only care about anonymous class members / block-scope decls.
- // FIXME: We need to handle lambdas and blocks within inline / templated
- // variables too.
- if (D->getDeclName() || !isa<CXXRecordDecl>(D->getLexicalDeclContext()))
+ // FIXME: We need to handle blocks within inline / templated variables too.
+ if (D->getDeclName())
+ return false;
+ if (!isa<RecordDecl, ObjCInterfaceDecl>(D->getLexicalDeclContext()))
return false;
- return isa<TagDecl>(D) || isa<FieldDecl>(D);
+ return isa<TagDecl, FieldDecl>(D);
}
diff --git a/contrib/llvm-project/clang/lib/Serialization/ASTReader.cpp b/contrib/llvm-project/clang/lib/Serialization/ASTReader.cpp
index 1722572f1a27..490b8cb10a48 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ASTReader.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/ASTReader.cpp
@@ -10,15 +10,14 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Basic/OpenMPKinds.h"
-#include "clang/Serialization/ASTRecordReader.h"
#include "ASTCommon.h"
#include "ASTReaderInternals.h"
-#include "clang/AST/AbstractTypeReader.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTMutationListener.h"
+#include "clang/AST/ASTStructuralEquivalence.h"
#include "clang/AST/ASTUnresolvedSet.h"
+#include "clang/AST/AbstractTypeReader.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclBase.h"
#include "clang/AST/DeclCXX.h"
@@ -31,8 +30,9 @@
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/NestedNameSpecifier.h"
-#include "clang/AST/OpenMPClause.h"
+#include "clang/AST/ODRDiagsEmitter.h"
#include "clang/AST/ODRHash.h"
+#include "clang/AST/OpenMPClause.h"
#include "clang/AST/RawCommentList.h"
#include "clang/AST/TemplateBase.h"
#include "clang/AST/TemplateName.h"
@@ -42,7 +42,9 @@
#include "clang/AST/UnresolvedSet.h"
#include "clang/Basic/CommentOptions.h"
#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/DiagnosticError.h"
#include "clang/Basic/DiagnosticOptions.h"
+#include "clang/Basic/DiagnosticSema.h"
#include "clang/Basic/ExceptionSpecificationType.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/FileSystemOptions.h"
@@ -51,6 +53,7 @@
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/ObjCRuntime.h"
+#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/OperatorKinds.h"
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/Sanitizers.h"
@@ -76,6 +79,7 @@
#include "clang/Sema/Weak.h"
#include "clang/Serialization/ASTBitCodes.h"
#include "clang/Serialization/ASTDeserializationListener.h"
+#include "clang/Serialization/ASTRecordReader.h"
#include "clang/Serialization/ContinuousRangeMap.h"
#include "clang/Serialization/GlobalModuleIndex.h"
#include "clang/Serialization/InMemoryModuleCache.h"
@@ -93,8 +97,6 @@
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/IntrusiveRefCntPtr.h"
-#include "llvm/ADT/None.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/ScopeExit.h"
#include "llvm/ADT/SmallPtrSet.h"
@@ -103,7 +105,6 @@
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Bitstream/BitstreamReader.h"
#include "llvm/Support/Casting.h"
@@ -118,9 +119,11 @@
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/SaveAndRestore.h"
+#include "llvm/Support/TimeProfiler.h"
#include "llvm/Support/Timer.h"
#include "llvm/Support/VersionTuple.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/Triple.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
@@ -131,6 +134,7 @@
#include <limits>
#include <map>
#include <memory>
+#include <optional>
#include <string>
#include <system_error>
#include <tuple>
@@ -141,7 +145,6 @@ using namespace clang;
using namespace clang::serialization;
using namespace clang::serialization::reader;
using llvm::BitstreamCursor;
-using llvm::RoundingMode;
//===----------------------------------------------------------------------===//
// ChainedASTReaderListener implementation
@@ -205,11 +208,12 @@ bool ChainedASTReaderListener::ReadHeaderSearchOptions(
}
bool ChainedASTReaderListener::ReadPreprocessorOptions(
- const PreprocessorOptions &PPOpts, bool Complain,
+ const PreprocessorOptions &PPOpts, bool ReadMacros, bool Complain,
std::string &SuggestedPredefines) {
- return First->ReadPreprocessorOptions(PPOpts, Complain,
+ return First->ReadPreprocessorOptions(PPOpts, ReadMacros, Complain,
SuggestedPredefines) ||
- Second->ReadPreprocessorOptions(PPOpts, Complain, SuggestedPredefines);
+ Second->ReadPreprocessorOptions(PPOpts, ReadMacros, Complain,
+ SuggestedPredefines);
}
void ChainedASTReaderListener::ReadCounter(const serialization::ModuleFile &M,
@@ -274,12 +278,17 @@ static bool checkLanguageOptions(const LangOptions &LangOpts,
const LangOptions &ExistingLangOpts,
DiagnosticsEngine *Diags,
bool AllowCompatibleDifferences = true) {
-#define LANGOPT(Name, Bits, Default, Description) \
- if (ExistingLangOpts.Name != LangOpts.Name) { \
- if (Diags) \
- Diags->Report(diag::err_pch_langopt_mismatch) \
- << Description << LangOpts.Name << ExistingLangOpts.Name; \
- return true; \
+#define LANGOPT(Name, Bits, Default, Description) \
+ if (ExistingLangOpts.Name != LangOpts.Name) { \
+ if (Diags) { \
+ if (Bits == 1) \
+ Diags->Report(diag::err_pch_langopt_mismatch) \
+ << Description << LangOpts.Name << ExistingLangOpts.Name; \
+ else \
+ Diags->Report(diag::err_pch_langopt_value_mismatch) \
+ << Description; \
+ } \
+ return true; \
}
#define VALUE_LANGOPT(Name, Bits, Default, Description) \
@@ -312,7 +321,7 @@ static bool checkLanguageOptions(const LangOptions &LangOpts,
#define BENIGN_LANGOPT(Name, Bits, Default, Description)
#define BENIGN_ENUM_LANGOPT(Name, Type, Bits, Default, Description)
-#define BENIGN_VALUE_LANGOPT(Name, Type, Bits, Default, Description)
+#define BENIGN_VALUE_LANGOPT(Name, Bits, Default, Description)
#include "clang/Basic/LangOptions.def"
if (ExistingLangOpts.ModuleFeatures != LangOpts.ModuleFeatures) {
@@ -499,14 +508,17 @@ static bool isExtHandlingFromDiagsError(DiagnosticsEngine &Diags) {
}
static bool checkDiagnosticMappings(DiagnosticsEngine &StoredDiags,
- DiagnosticsEngine &Diags,
- bool IsSystem, bool Complain) {
+ DiagnosticsEngine &Diags, bool IsSystem,
+ bool SystemHeaderWarningsInModule,
+ bool Complain) {
// Top-level options
if (IsSystem) {
if (Diags.getSuppressSystemWarnings())
return false;
- // If -Wsystem-headers was not enabled before, be conservative
- if (StoredDiags.getSuppressSystemWarnings()) {
+ // If -Wsystem-headers was not enabled before, and it was not explicit,
+ // be conservative
+ if (StoredDiags.getSuppressSystemWarnings() &&
+ !SystemHeaderWarningsInModule) {
if (Complain)
Diags.Report(diag::err_pch_diagopt_mismatch) << "-Wsystem-headers";
return true;
@@ -555,7 +567,8 @@ static Module *getTopImportImplicitModule(ModuleManager &ModuleMgr,
StringRef ModuleName = TopImport->ModuleName;
assert(!ModuleName.empty() && "diagnostic options read before module name");
- Module *M = PP.getHeaderSearchInfo().lookupModule(ModuleName);
+ Module *M =
+ PP.getHeaderSearchInfo().lookupModule(ModuleName, TopImport->ImportLoc);
assert(M && "missing module");
return M;
}
@@ -577,10 +590,17 @@ bool PCHValidator::ReadDiagnosticOptions(
if (!TopM)
return false;
+ Module *Importer = PP.getCurrentModule();
+
+ DiagnosticOptions &ExistingOpts = ExistingDiags.getDiagnosticOptions();
+ bool SystemHeaderWarningsInModule =
+ Importer && llvm::is_contained(ExistingOpts.SystemHeaderWarningsModules,
+ Importer->Name);
+
// FIXME: if the diagnostics are incompatible, save a DiagnosticOptions that
// contains the union of their flags.
return checkDiagnosticMappings(*Diags, ExistingDiags, TopM->IsSystem,
- Complain);
+ SystemHeaderWarningsInModule, Complain);
}
/// Collect the macro definitions provided by the given preprocessor
@@ -621,79 +641,119 @@ collectMacroDefinitions(const PreprocessorOptions &PPOpts,
}
}
+enum OptionValidation {
+ OptionValidateNone,
+ OptionValidateContradictions,
+ OptionValidateStrictMatches,
+};
+
/// Check the preprocessor options deserialized from the control block
/// against the preprocessor options in an existing preprocessor.
///
/// \param Diags If non-null, produce diagnostics for any mismatches incurred.
-/// \param Validate If true, validate preprocessor options. If false, allow
-/// macros defined by \p ExistingPPOpts to override those defined by
-/// \p PPOpts in SuggestedPredefines.
-static bool checkPreprocessorOptions(const PreprocessorOptions &PPOpts,
- const PreprocessorOptions &ExistingPPOpts,
- DiagnosticsEngine *Diags,
- FileManager &FileMgr,
- std::string &SuggestedPredefines,
- const LangOptions &LangOpts,
- bool Validate = true) {
- // Check macro definitions.
- MacroDefinitionsMap ASTFileMacros;
- collectMacroDefinitions(PPOpts, ASTFileMacros);
- MacroDefinitionsMap ExistingMacros;
- SmallVector<StringRef, 4> ExistingMacroNames;
- collectMacroDefinitions(ExistingPPOpts, ExistingMacros, &ExistingMacroNames);
-
- for (unsigned I = 0, N = ExistingMacroNames.size(); I != N; ++I) {
- // Dig out the macro definition in the existing preprocessor options.
- StringRef MacroName = ExistingMacroNames[I];
- std::pair<StringRef, bool> Existing = ExistingMacros[MacroName];
-
- // Check whether we know anything about this macro name or not.
- llvm::StringMap<std::pair<StringRef, bool /*IsUndef*/>>::iterator Known =
- ASTFileMacros.find(MacroName);
- if (!Validate || Known == ASTFileMacros.end()) {
- // FIXME: Check whether this identifier was referenced anywhere in the
- // AST file. If so, we should reject the AST file. Unfortunately, this
- // information isn't in the control block. What shall we do about it?
-
- if (Existing.second) {
- SuggestedPredefines += "#undef ";
- SuggestedPredefines += MacroName.str();
- SuggestedPredefines += '\n';
- } else {
- SuggestedPredefines += "#define ";
- SuggestedPredefines += MacroName.str();
- SuggestedPredefines += ' ';
- SuggestedPredefines += Existing.first.str();
- SuggestedPredefines += '\n';
+/// \param Validation If set to OptionValidateNone, ignore differences in
+/// preprocessor options. If set to OptionValidateContradictions,
+/// require that options passed both in the AST file and on the command
+/// line (-D or -U) match, but tolerate options missing in one or the
+/// other. If set to OptionValidateContradictions, require that there
+/// are no differences in the options between the two.
+static bool checkPreprocessorOptions(
+ const PreprocessorOptions &PPOpts,
+ const PreprocessorOptions &ExistingPPOpts, bool ReadMacros,
+ DiagnosticsEngine *Diags, FileManager &FileMgr,
+ std::string &SuggestedPredefines, const LangOptions &LangOpts,
+ OptionValidation Validation = OptionValidateContradictions) {
+ if (ReadMacros) {
+ // Check macro definitions.
+ MacroDefinitionsMap ASTFileMacros;
+ collectMacroDefinitions(PPOpts, ASTFileMacros);
+ MacroDefinitionsMap ExistingMacros;
+ SmallVector<StringRef, 4> ExistingMacroNames;
+ collectMacroDefinitions(ExistingPPOpts, ExistingMacros,
+ &ExistingMacroNames);
+
+ // Use a line marker to enter the <command line> file, as the defines and
+ // undefines here will have come from the command line.
+ SuggestedPredefines += "# 1 \"<command line>\" 1\n";
+
+ for (unsigned I = 0, N = ExistingMacroNames.size(); I != N; ++I) {
+ // Dig out the macro definition in the existing preprocessor options.
+ StringRef MacroName = ExistingMacroNames[I];
+ std::pair<StringRef, bool> Existing = ExistingMacros[MacroName];
+
+ // Check whether we know anything about this macro name or not.
+ llvm::StringMap<std::pair<StringRef, bool /*IsUndef*/>>::iterator Known =
+ ASTFileMacros.find(MacroName);
+ if (Validation == OptionValidateNone || Known == ASTFileMacros.end()) {
+ if (Validation == OptionValidateStrictMatches) {
+ // If strict matches are requested, don't tolerate any extra defines
+ // on the command line that are missing in the AST file.
+ if (Diags) {
+ Diags->Report(diag::err_pch_macro_def_undef) << MacroName << true;
+ }
+ return true;
+ }
+ // FIXME: Check whether this identifier was referenced anywhere in the
+ // AST file. If so, we should reject the AST file. Unfortunately, this
+ // information isn't in the control block. What shall we do about it?
+
+ if (Existing.second) {
+ SuggestedPredefines += "#undef ";
+ SuggestedPredefines += MacroName.str();
+ SuggestedPredefines += '\n';
+ } else {
+ SuggestedPredefines += "#define ";
+ SuggestedPredefines += MacroName.str();
+ SuggestedPredefines += ' ';
+ SuggestedPredefines += Existing.first.str();
+ SuggestedPredefines += '\n';
+ }
+ continue;
+ }
+
+ // If the macro was defined in one but undef'd in the other, we have a
+ // conflict.
+ if (Existing.second != Known->second.second) {
+ if (Diags) {
+ Diags->Report(diag::err_pch_macro_def_undef)
+ << MacroName << Known->second.second;
+ }
+ return true;
+ }
+
+ // If the macro was #undef'd in both, or if the macro bodies are
+ // identical, it's fine.
+ if (Existing.second || Existing.first == Known->second.first) {
+ ASTFileMacros.erase(Known);
+ continue;
}
- continue;
- }
- // If the macro was defined in one but undef'd in the other, we have a
- // conflict.
- if (Existing.second != Known->second.second) {
+ // The macro bodies differ; complain.
if (Diags) {
- Diags->Report(diag::err_pch_macro_def_undef)
- << MacroName << Known->second.second;
+ Diags->Report(diag::err_pch_macro_def_conflict)
+ << MacroName << Known->second.first << Existing.first;
}
return true;
}
- // If the macro was #undef'd in both, or if the macro bodies are identical,
- // it's fine.
- if (Existing.second || Existing.first == Known->second.first)
- continue;
+ // Leave the <command line> file and return to <built-in>.
+ SuggestedPredefines += "# 1 \"<built-in>\" 2\n";
- // The macro bodies differ; complain.
- if (Diags) {
- Diags->Report(diag::err_pch_macro_def_conflict)
- << MacroName << Known->second.first << Existing.first;
+ if (Validation == OptionValidateStrictMatches) {
+ // If strict matches are requested, don't tolerate any extra defines in
+ // the AST file that are missing on the command line.
+ for (const auto &MacroName : ASTFileMacros.keys()) {
+ if (Diags) {
+ Diags->Report(diag::err_pch_macro_def_undef) << MacroName << false;
+ }
+ return true;
+ }
}
- return true;
}
// Check whether we're using predefines.
- if (PPOpts.UsePredefines != ExistingPPOpts.UsePredefines && Validate) {
+ if (PPOpts.UsePredefines != ExistingPPOpts.UsePredefines &&
+ Validation != OptionValidateNone) {
if (Diags) {
Diags->Report(diag::err_pch_undef) << ExistingPPOpts.UsePredefines;
}
@@ -702,7 +762,8 @@ static bool checkPreprocessorOptions(const PreprocessorOptions &PPOpts,
// Detailed record is important since it is used for the module cache hash.
if (LangOpts.Modules &&
- PPOpts.DetailedRecord != ExistingPPOpts.DetailedRecord && Validate) {
+ PPOpts.DetailedRecord != ExistingPPOpts.DetailedRecord &&
+ Validation != OptionValidateNone) {
if (Diags) {
Diags->Report(diag::err_pch_pp_detailed_record) << PPOpts.DetailedRecord;
}
@@ -726,8 +787,7 @@ static bool checkPreprocessorOptions(const PreprocessorOptions &PPOpts,
if (File == ExistingPPOpts.ImplicitPCHInclude)
continue;
- if (std::find(PPOpts.Includes.begin(), PPOpts.Includes.end(), File)
- != PPOpts.Includes.end())
+ if (llvm::is_contained(PPOpts.Includes, File))
continue;
SuggestedPredefines += "#include \"";
@@ -737,9 +797,7 @@ static bool checkPreprocessorOptions(const PreprocessorOptions &PPOpts,
for (unsigned I = 0, N = ExistingPPOpts.MacroIncludes.size(); I != N; ++I) {
StringRef File = ExistingPPOpts.MacroIncludes[I];
- if (std::find(PPOpts.MacroIncludes.begin(), PPOpts.MacroIncludes.end(),
- File)
- != PPOpts.MacroIncludes.end())
+ if (llvm::is_contained(PPOpts.MacroIncludes, File))
continue;
SuggestedPredefines += "#__include_macros \"";
@@ -751,28 +809,22 @@ static bool checkPreprocessorOptions(const PreprocessorOptions &PPOpts,
}
bool PCHValidator::ReadPreprocessorOptions(const PreprocessorOptions &PPOpts,
- bool Complain,
+ bool ReadMacros, bool Complain,
std::string &SuggestedPredefines) {
const PreprocessorOptions &ExistingPPOpts = PP.getPreprocessorOpts();
- return checkPreprocessorOptions(PPOpts, ExistingPPOpts,
- Complain? &Reader.Diags : nullptr,
- PP.getFileManager(),
- SuggestedPredefines,
- PP.getLangOpts());
+ return checkPreprocessorOptions(
+ PPOpts, ExistingPPOpts, ReadMacros, Complain ? &Reader.Diags : nullptr,
+ PP.getFileManager(), SuggestedPredefines, PP.getLangOpts());
}
bool SimpleASTReaderListener::ReadPreprocessorOptions(
- const PreprocessorOptions &PPOpts,
- bool Complain,
- std::string &SuggestedPredefines) {
- return checkPreprocessorOptions(PPOpts,
- PP.getPreprocessorOpts(),
- nullptr,
- PP.getFileManager(),
- SuggestedPredefines,
- PP.getLangOpts(),
- false);
+ const PreprocessorOptions &PPOpts, bool ReadMacros, bool Complain,
+ std::string &SuggestedPredefines) {
+ return checkPreprocessorOptions(PPOpts, PP.getPreprocessorOpts(), ReadMacros,
+ nullptr, PP.getFileManager(),
+ SuggestedPredefines, PP.getLangOpts(),
+ OptionValidateNone);
}
/// Check the header search options deserialized from the control block
@@ -860,9 +912,10 @@ ASTSelectorLookupTrait::ReadKey(const unsigned char* d, unsigned) {
using namespace llvm::support;
SelectorTable &SelTable = Reader.getContext().Selectors;
- unsigned N = endian::readNext<uint16_t, little, unaligned>(d);
+ unsigned N =
+ endian::readNext<uint16_t, llvm::endianness::little, unaligned>(d);
IdentifierInfo *FirstII = Reader.getLocalIdentifier(
- F, endian::readNext<uint32_t, little, unaligned>(d));
+ F, endian::readNext<uint32_t, llvm::endianness::little, unaligned>(d));
if (N == 0)
return SelTable.getNullarySelector(FirstII);
else if (N == 1)
@@ -872,7 +925,7 @@ ASTSelectorLookupTrait::ReadKey(const unsigned char* d, unsigned) {
Args.push_back(FirstII);
for (unsigned I = 1; I != N; ++I)
Args.push_back(Reader.getLocalIdentifier(
- F, endian::readNext<uint32_t, little, unaligned>(d)));
+ F, endian::readNext<uint32_t, llvm::endianness::little, unaligned>(d)));
return SelTable.getSelector(N, Args.data());
}
@@ -885,9 +938,11 @@ ASTSelectorLookupTrait::ReadData(Selector, const unsigned char* d,
data_type Result;
Result.ID = Reader.getGlobalSelectorID(
- F, endian::readNext<uint32_t, little, unaligned>(d));
- unsigned FullInstanceBits = endian::readNext<uint16_t, little, unaligned>(d);
- unsigned FullFactoryBits = endian::readNext<uint16_t, little, unaligned>(d);
+ F, endian::readNext<uint32_t, llvm::endianness::little, unaligned>(d));
+ unsigned FullInstanceBits =
+ endian::readNext<uint16_t, llvm::endianness::little, unaligned>(d);
+ unsigned FullFactoryBits =
+ endian::readNext<uint16_t, llvm::endianness::little, unaligned>(d);
Result.InstanceBits = FullInstanceBits & 0x3;
Result.InstanceHasMoreThanOneDecl = (FullInstanceBits >> 2) & 0x1;
Result.FactoryBits = FullFactoryBits & 0x3;
@@ -898,14 +953,16 @@ ASTSelectorLookupTrait::ReadData(Selector, const unsigned char* d,
// Load instance methods
for (unsigned I = 0; I != NumInstanceMethods; ++I) {
if (ObjCMethodDecl *Method = Reader.GetLocalDeclAs<ObjCMethodDecl>(
- F, endian::readNext<uint32_t, little, unaligned>(d)))
+ F,
+ endian::readNext<uint32_t, llvm::endianness::little, unaligned>(d)))
Result.Instance.push_back(Method);
}
// Load factory methods
for (unsigned I = 0; I != NumFactoryMethods; ++I) {
if (ObjCMethodDecl *Method = Reader.GetLocalDeclAs<ObjCMethodDecl>(
- F, endian::readNext<uint32_t, little, unaligned>(d)))
+ F,
+ endian::readNext<uint32_t, llvm::endianness::little, unaligned>(d)))
Result.Factory.push_back(Method);
}
@@ -946,7 +1003,8 @@ static bool readBit(unsigned &Bits) {
IdentID ASTIdentifierLookupTrait::ReadIdentifierID(const unsigned char *d) {
using namespace llvm::support;
- unsigned RawID = endian::readNext<uint32_t, little, unaligned>(d);
+ unsigned RawID =
+ endian::readNext<uint32_t, llvm::endianness::little, unaligned>(d);
return Reader.getGlobalIdentifierID(F, RawID >> 1);
}
@@ -964,7 +1022,8 @@ IdentifierInfo *ASTIdentifierLookupTrait::ReadData(const internal_key_type& k,
unsigned DataLen) {
using namespace llvm::support;
- unsigned RawID = endian::readNext<uint32_t, little, unaligned>(d);
+ unsigned RawID =
+ endian::readNext<uint32_t, llvm::endianness::little, unaligned>(d);
bool IsInteresting = RawID & 0x01;
// Wipe out the "is interesting" bit.
@@ -987,8 +1046,10 @@ IdentifierInfo *ASTIdentifierLookupTrait::ReadData(const internal_key_type& k,
return II;
}
- unsigned ObjCOrBuiltinID = endian::readNext<uint16_t, little, unaligned>(d);
- unsigned Bits = endian::readNext<uint16_t, little, unaligned>(d);
+ unsigned ObjCOrBuiltinID =
+ endian::readNext<uint16_t, llvm::endianness::little, unaligned>(d);
+ unsigned Bits =
+ endian::readNext<uint16_t, llvm::endianness::little, unaligned>(d);
bool CPlusPlusOperatorKeyword = readBit(Bits);
bool HasRevertedTokenIDToIdentifier = readBit(Bits);
bool Poisoned = readBit(Bits);
@@ -1017,7 +1078,7 @@ IdentifierInfo *ASTIdentifierLookupTrait::ReadData(const internal_key_type& k,
// definition.
if (HadMacroDefinition) {
uint32_t MacroDirectivesOffset =
- endian::readNext<uint32_t, little, unaligned>(d);
+ endian::readNext<uint32_t, llvm::endianness::little, unaligned>(d);
DataLen -= 4;
Reader.addPendingMacro(II, &F, MacroDirectivesOffset);
@@ -1031,7 +1092,8 @@ IdentifierInfo *ASTIdentifierLookupTrait::ReadData(const internal_key_type& k,
SmallVector<uint32_t, 4> DeclIDs;
for (; DataLen > 0; DataLen -= 4)
DeclIDs.push_back(Reader.getGlobalDeclID(
- F, endian::readNext<uint32_t, little, unaligned>(d)));
+ F,
+ endian::readNext<uint32_t, llvm::endianness::little, unaligned>(d)));
Reader.SetGloballyVisibleDecls(II, DeclIDs);
}
@@ -1100,7 +1162,8 @@ ModuleFile *
ASTDeclContextNameLookupTrait::ReadFileRef(const unsigned char *&d) {
using namespace llvm::support;
- uint32_t ModuleFileID = endian::readNext<uint32_t, little, unaligned>(d);
+ uint32_t ModuleFileID =
+ endian::readNext<uint32_t, llvm::endianness::little, unaligned>(d);
return Reader.getLocalModuleFile(F, ModuleFileID);
}
@@ -1120,15 +1183,18 @@ ASTDeclContextNameLookupTrait::ReadKey(const unsigned char *d, unsigned) {
case DeclarationName::CXXLiteralOperatorName:
case DeclarationName::CXXDeductionGuideName:
Data = (uint64_t)Reader.getLocalIdentifier(
- F, endian::readNext<uint32_t, little, unaligned>(d));
+ F, endian::readNext<uint32_t, llvm::endianness::little, unaligned>(d));
break;
case DeclarationName::ObjCZeroArgSelector:
case DeclarationName::ObjCOneArgSelector:
case DeclarationName::ObjCMultiArgSelector:
Data =
- (uint64_t)Reader.getLocalSelector(
- F, endian::readNext<uint32_t, little, unaligned>(
- d)).getAsOpaquePtr();
+ (uint64_t)Reader
+ .getLocalSelector(
+ F,
+ endian::readNext<uint32_t, llvm::endianness::little, unaligned>(
+ d))
+ .getAsOpaquePtr();
break;
case DeclarationName::CXXOperatorName:
Data = *d++; // OverloadedOperatorKind
@@ -1151,7 +1217,8 @@ void ASTDeclContextNameLookupTrait::ReadDataInto(internal_key_type,
using namespace llvm::support;
for (unsigned NumDecls = DataLen / 4; NumDecls; --NumDecls) {
- uint32_t LocalID = endian::readNext<uint32_t, little, unaligned>(d);
+ uint32_t LocalID =
+ endian::readNext<uint32_t, llvm::endianness::little, unaligned>(d);
Val.insert(Reader.getGlobalDeclID(F, LocalID));
}
}
@@ -1197,7 +1264,7 @@ bool ASTReader::ReadLexicalDeclContextStorage(ModuleFile &M,
auto &Lex = LexicalDecls[DC];
if (!Lex.first) {
Lex = std::make_pair(
- &M, llvm::makeArrayRef(
+ &M, llvm::ArrayRef(
reinterpret_cast<const llvm::support::unaligned_uint32_t *>(
Blob.data()),
Blob.size() / 4));
@@ -1263,7 +1330,29 @@ void ASTReader::Error(unsigned DiagID, StringRef Arg1, StringRef Arg2,
}
void ASTReader::Error(llvm::Error &&Err) const {
- Error(toString(std::move(Err)));
+ llvm::Error RemainingErr =
+ handleErrors(std::move(Err), [this](const DiagnosticError &E) {
+ auto Diag = E.getDiagnostic().second;
+
+ // Ideally we'd just emit it, but have to handle a possible in-flight
+ // diagnostic. Note that the location is currently ignored as well.
+ auto NumArgs = Diag.getStorage()->NumDiagArgs;
+ assert(NumArgs <= 3 && "Can only have up to 3 arguments");
+ StringRef Arg1, Arg2, Arg3;
+ switch (NumArgs) {
+ case 3:
+ Arg3 = Diag.getStringArg(2);
+ [[fallthrough]];
+ case 2:
+ Arg2 = Diag.getStringArg(1);
+ [[fallthrough]];
+ case 1:
+ Arg1 = Diag.getStringArg(0);
+ }
+ Error(Diag.getDiagID(), Arg1, Arg2, Arg3);
+ });
+ if (RemainingErr)
+ Error(toString(std::move(RemainingErr)));
}
//===----------------------------------------------------------------------===//
@@ -1271,9 +1360,7 @@ void ASTReader::Error(llvm::Error &&Err) const {
//===----------------------------------------------------------------------===//
/// Read the line table in the source manager block.
-/// \returns true if there was an error.
-bool ASTReader::ParseLineTable(ModuleFile &F,
- const RecordData &Record) {
+void ASTReader::ParseLineTable(ModuleFile &F, const RecordData &Record) {
unsigned Idx = 0;
LineTableInfo &LineTable = SourceMgr.getLineTable();
@@ -1290,10 +1377,7 @@ bool ASTReader::ParseLineTable(ModuleFile &F,
// Parse the line entries
std::vector<LineEntry> Entries;
while (Idx < Record.size()) {
- int FID = Record[Idx++];
- assert(FID >= 0 && "Serialized line entries for non-local file.");
- // Remap FileID from 1-based old view.
- FID += F.SLocEntryBaseID - 1;
+ FileID FID = ReadFileID(F, Record, Idx);
// Extract the line entries
unsigned NumEntries = Record[Idx++];
@@ -1310,14 +1394,12 @@ bool ASTReader::ParseLineTable(ModuleFile &F,
Entries.push_back(LineEntry::get(FileOffset, LineNo, FilenameID,
FileKind, IncludeOffset));
}
- LineTable.AddEntry(FileID::get(FID), Entries);
+ LineTable.AddEntry(FID, Entries);
}
-
- return false;
}
/// Read a source manager block
-bool ASTReader::ReadSourceManagerBlock(ModuleFile &F) {
+llvm::Error ASTReader::ReadSourceManagerBlock(ModuleFile &F) {
using namespace SrcMgr;
BitstreamCursor &SLocEntryCursor = F.SLocEntryCursor;
@@ -1329,36 +1411,29 @@ bool ASTReader::ReadSourceManagerBlock(ModuleFile &F) {
SLocEntryCursor = F.Stream;
// The stream itself is going to skip over the source manager block.
- if (llvm::Error Err = F.Stream.SkipBlock()) {
- Error(std::move(Err));
- return true;
- }
+ if (llvm::Error Err = F.Stream.SkipBlock())
+ return Err;
// Enter the source manager block.
- if (llvm::Error Err =
- SLocEntryCursor.EnterSubBlock(SOURCE_MANAGER_BLOCK_ID)) {
- Error(std::move(Err));
- return true;
- }
+ if (llvm::Error Err = SLocEntryCursor.EnterSubBlock(SOURCE_MANAGER_BLOCK_ID))
+ return Err;
F.SourceManagerBlockStartOffset = SLocEntryCursor.GetCurrentBitNo();
RecordData Record;
while (true) {
Expected<llvm::BitstreamEntry> MaybeE =
SLocEntryCursor.advanceSkippingSubblocks();
- if (!MaybeE) {
- Error(MaybeE.takeError());
- return true;
- }
+ if (!MaybeE)
+ return MaybeE.takeError();
llvm::BitstreamEntry E = MaybeE.get();
switch (E.Kind) {
case llvm::BitstreamEntry::SubBlock: // Handled for us already.
case llvm::BitstreamEntry::Error:
- Error("malformed block record in AST file");
- return true;
+ return llvm::createStringError(std::errc::illegal_byte_sequence,
+ "malformed block record in AST file");
case llvm::BitstreamEntry::EndBlock:
- return false;
+ return llvm::Error::success();
case llvm::BitstreamEntry::Record:
// The interesting case.
break;
@@ -1369,10 +1444,8 @@ bool ASTReader::ReadSourceManagerBlock(ModuleFile &F) {
StringRef Blob;
Expected<unsigned> MaybeRecord =
SLocEntryCursor.readRecord(E.ID, Record, &Blob);
- if (!MaybeRecord) {
- Error(MaybeRecord.takeError());
- return true;
- }
+ if (!MaybeRecord)
+ return MaybeRecord.takeError();
switch (MaybeRecord.get()) {
default: // Default behavior: ignore.
break;
@@ -1381,44 +1454,82 @@ bool ASTReader::ReadSourceManagerBlock(ModuleFile &F) {
case SM_SLOC_BUFFER_ENTRY:
case SM_SLOC_EXPANSION_ENTRY:
// Once we hit one of the source location entries, we're done.
- return false;
+ return llvm::Error::success();
}
}
}
-/// If a header file is not found at the path that we expect it to be
-/// and the PCH file was moved from its original location, try to resolve the
-/// file by assuming that header+PCH were moved together and the header is in
-/// the same place relative to the PCH.
-static std::string
-resolveFileRelativeToOriginalDir(const std::string &Filename,
- const std::string &OriginalDir,
- const std::string &CurrDir) {
- assert(OriginalDir != CurrDir &&
- "No point trying to resolve the file if the PCH dir didn't change");
+llvm::Expected<SourceLocation::UIntTy>
+ASTReader::readSLocOffset(ModuleFile *F, unsigned Index) {
+ BitstreamCursor &Cursor = F->SLocEntryCursor;
+ SavedStreamPosition SavedPosition(Cursor);
+ if (llvm::Error Err = Cursor.JumpToBit(F->SLocEntryOffsetsBase +
+ F->SLocEntryOffsets[Index]))
+ return std::move(Err);
- using namespace llvm::sys;
+ Expected<llvm::BitstreamEntry> MaybeEntry = Cursor.advance();
+ if (!MaybeEntry)
+ return MaybeEntry.takeError();
+
+ llvm::BitstreamEntry Entry = MaybeEntry.get();
+ if (Entry.Kind != llvm::BitstreamEntry::Record)
+ return llvm::createStringError(
+ std::errc::illegal_byte_sequence,
+ "incorrectly-formatted source location entry in AST file");
- SmallString<128> filePath(Filename);
- fs::make_absolute(filePath);
- assert(path::is_absolute(OriginalDir));
- SmallString<128> currPCHPath(CurrDir);
+ RecordData Record;
+ StringRef Blob;
+ Expected<unsigned> MaybeSLOC = Cursor.readRecord(Entry.ID, Record, &Blob);
+ if (!MaybeSLOC)
+ return MaybeSLOC.takeError();
- path::const_iterator fileDirI = path::begin(path::parent_path(filePath)),
- fileDirE = path::end(path::parent_path(filePath));
- path::const_iterator origDirI = path::begin(OriginalDir),
- origDirE = path::end(OriginalDir);
- // Skip the common path components from filePath and OriginalDir.
- while (fileDirI != fileDirE && origDirI != origDirE &&
- *fileDirI == *origDirI) {
- ++fileDirI;
- ++origDirI;
+ switch (MaybeSLOC.get()) {
+ default:
+ return llvm::createStringError(
+ std::errc::illegal_byte_sequence,
+ "incorrectly-formatted source location entry in AST file");
+ case SM_SLOC_FILE_ENTRY:
+ case SM_SLOC_BUFFER_ENTRY:
+ case SM_SLOC_EXPANSION_ENTRY:
+ return F->SLocEntryBaseOffset + Record[0];
}
- for (; origDirI != origDirE; ++origDirI)
- path::append(currPCHPath, "..");
- path::append(currPCHPath, fileDirI, fileDirE);
- path::append(currPCHPath, path::filename(Filename));
- return std::string(currPCHPath.str());
+}
+
+int ASTReader::getSLocEntryID(SourceLocation::UIntTy SLocOffset) {
+ auto SLocMapI =
+ GlobalSLocOffsetMap.find(SourceManager::MaxLoadedOffset - SLocOffset - 1);
+ assert(SLocMapI != GlobalSLocOffsetMap.end() &&
+ "Corrupted global sloc offset map");
+ ModuleFile *F = SLocMapI->second;
+
+ bool Invalid = false;
+
+ auto It = llvm::upper_bound(
+ llvm::index_range(0, F->LocalNumSLocEntries), SLocOffset,
+ [&](SourceLocation::UIntTy Offset, std::size_t LocalIndex) {
+ int ID = F->SLocEntryBaseID + LocalIndex;
+ std::size_t Index = -ID - 2;
+ if (!SourceMgr.SLocEntryOffsetLoaded[Index]) {
+ assert(!SourceMgr.SLocEntryLoaded[Index]);
+ auto MaybeEntryOffset = readSLocOffset(F, LocalIndex);
+ if (!MaybeEntryOffset) {
+ Error(MaybeEntryOffset.takeError());
+ Invalid = true;
+ return true;
+ }
+ SourceMgr.LoadedSLocEntryTable[Index] =
+ SrcMgr::SLocEntry::getOffsetOnly(*MaybeEntryOffset);
+ SourceMgr.SLocEntryOffsetLoaded[Index] = true;
+ }
+ return Offset < SourceMgr.LoadedSLocEntryTable[Index].getOffset();
+ });
+
+ if (Invalid)
+ return 0;
+
+ // The iterator points to the first entry with start offset greater than the
+ // offset of interest. The previous entry must contain the offset of interest.
+ return F->SLocEntryBaseID + *std::prev(It);
}
bool ASTReader::ReadSLocEntry(int ID) {
@@ -1453,18 +1564,25 @@ bool ASTReader::ReadSLocEntry(int ID) {
unsigned RecCode = MaybeRecCode.get();
if (RecCode == SM_SLOC_BUFFER_BLOB_COMPRESSED) {
- if (!llvm::zlib::isAvailable()) {
- Error("zlib is not available");
+ // Inspect the first byte to differentiate zlib (\x78) and zstd
+ // (little-endian 0xFD2FB528).
+ const llvm::compression::Format F =
+ Blob.size() > 0 && Blob.data()[0] == 0x78
+ ? llvm::compression::Format::Zlib
+ : llvm::compression::Format::Zstd;
+ if (const char *Reason = llvm::compression::getReasonIfUnsupported(F)) {
+ Error(Reason);
return nullptr;
}
- SmallString<0> Uncompressed;
- if (llvm::Error E =
- llvm::zlib::uncompress(Blob, Uncompressed, Record[0])) {
+ SmallVector<uint8_t, 0> Decompressed;
+ if (llvm::Error E = llvm::compression::decompress(
+ F, llvm::arrayRefFromStringRef(Blob), Decompressed, Record[0])) {
Error("could not decompress embedded file contents: " +
llvm::toString(std::move(E)));
return nullptr;
}
- return llvm::MemoryBuffer::getMemBufferCopy(Uncompressed, Name);
+ return llvm::MemoryBuffer::getMemBufferCopy(
+ llvm::toStringRef(Decompressed), Name);
} else if (RecCode == SM_SLOC_BUFFER_BLOB) {
return llvm::MemoryBuffer::getMemBuffer(Blob.drop_back(1), Name, true);
} else {
@@ -1515,7 +1633,7 @@ bool ASTReader::ReadSLocEntry(int ID) {
// we will also try to fail gracefully by setting up the SLocEntry.
unsigned InputID = Record[4];
InputFile IF = getInputFile(*F, InputID);
- Optional<FileEntryRef> File = IF.getFile();
+ OptionalFileEntryRef File = IF.getFile();
bool OverriddenBuffer = IF.isOverridden();
// Note that we only check if a File was returned. If it was out-of-date
@@ -1543,8 +1661,8 @@ bool ASTReader::ReadSLocEntry(int ID) {
if (NumFileDecls && ContextObj) {
const DeclID *FirstDecl = F->FileSortedDecls + Record[6];
assert(F->FileSortedDecls && "FILE_SORTED_DECLS not encountered yet ?");
- FileDeclIDs[FID] = FileDeclsInfo(F, llvm::makeArrayRef(FirstDecl,
- NumFileDecls));
+ FileDeclIDs[FID] =
+ FileDeclsInfo(F, llvm::ArrayRef(FirstDecl, NumFileDecls));
}
const SrcMgr::ContentCache &ContentCache =
@@ -1574,20 +1692,24 @@ bool ASTReader::ReadSLocEntry(int ID) {
auto Buffer = ReadBuffer(SLocEntryCursor, Name);
if (!Buffer)
return true;
- SourceMgr.createFileID(std::move(Buffer), FileCharacter, ID,
- BaseOffset + Offset, IncludeLoc);
+ FileID FID = SourceMgr.createFileID(std::move(Buffer), FileCharacter, ID,
+ BaseOffset + Offset, IncludeLoc);
+ if (Record[3]) {
+ auto &FileInfo =
+ const_cast<SrcMgr::FileInfo &>(SourceMgr.getSLocEntry(FID).getFile());
+ FileInfo.setHasLineDirectives();
+ }
break;
}
case SM_SLOC_EXPANSION_ENTRY: {
- SourceLocation SpellingLoc = ReadSourceLocation(*F, Record[1]);
- SourceMgr.createExpansionLoc(SpellingLoc,
- ReadSourceLocation(*F, Record[2]),
- ReadSourceLocation(*F, Record[3]),
- Record[5],
- Record[4],
- ID,
- BaseOffset + Record[0]);
+ LocSeq::State Seq;
+ SourceLocation SpellingLoc = ReadSourceLocation(*F, Record[1], Seq);
+ SourceLocation ExpansionBegin = ReadSourceLocation(*F, Record[2], Seq);
+ SourceLocation ExpansionEnd = ReadSourceLocation(*F, Record[3], Seq);
+ SourceMgr.createExpansionLoc(SpellingLoc, ExpansionBegin, ExpansionEnd,
+ Record[5], Record[4], ID,
+ BaseOffset + Record[0]);
break;
}
}
@@ -1632,13 +1754,11 @@ SourceLocation ASTReader::getImportLocation(ModuleFile *F) {
/// Enter a subblock of the specified BlockID with the specified cursor. Read
/// the abbreviations that are at the top of the block and then leave the cursor
/// pointing into the block.
-bool ASTReader::ReadBlockAbbrevs(BitstreamCursor &Cursor, unsigned BlockID,
- uint64_t *StartOfBlockOffset) {
- if (llvm::Error Err = Cursor.EnterSubBlock(BlockID)) {
- // FIXME this drops errors on the floor.
- consumeError(std::move(Err));
- return true;
- }
+llvm::Error ASTReader::ReadBlockAbbrevs(BitstreamCursor &Cursor,
+ unsigned BlockID,
+ uint64_t *StartOfBlockOffset) {
+ if (llvm::Error Err = Cursor.EnterSubBlock(BlockID))
+ return Err;
if (StartOfBlockOffset)
*StartOfBlockOffset = Cursor.GetCurrentBitNo();
@@ -1646,40 +1766,70 @@ bool ASTReader::ReadBlockAbbrevs(BitstreamCursor &Cursor, unsigned BlockID,
while (true) {
uint64_t Offset = Cursor.GetCurrentBitNo();
Expected<unsigned> MaybeCode = Cursor.ReadCode();
- if (!MaybeCode) {
- // FIXME this drops errors on the floor.
- consumeError(MaybeCode.takeError());
- return true;
- }
+ if (!MaybeCode)
+ return MaybeCode.takeError();
unsigned Code = MaybeCode.get();
// We expect all abbrevs to be at the start of the block.
if (Code != llvm::bitc::DEFINE_ABBREV) {
- if (llvm::Error Err = Cursor.JumpToBit(Offset)) {
- // FIXME this drops errors on the floor.
- consumeError(std::move(Err));
- return true;
- }
- return false;
- }
- if (llvm::Error Err = Cursor.ReadAbbrevRecord()) {
- // FIXME this drops errors on the floor.
- consumeError(std::move(Err));
- return true;
+ if (llvm::Error Err = Cursor.JumpToBit(Offset))
+ return Err;
+ return llvm::Error::success();
}
+ if (llvm::Error Err = Cursor.ReadAbbrevRecord())
+ return Err;
}
}
-Token ASTReader::ReadToken(ModuleFile &F, const RecordDataImpl &Record,
+Token ASTReader::ReadToken(ModuleFile &M, const RecordDataImpl &Record,
unsigned &Idx) {
Token Tok;
Tok.startToken();
- Tok.setLocation(ReadSourceLocation(F, Record, Idx));
- Tok.setLength(Record[Idx++]);
- if (IdentifierInfo *II = getLocalIdentifier(F, Record[Idx++]))
- Tok.setIdentifierInfo(II);
+ Tok.setLocation(ReadSourceLocation(M, Record, Idx));
Tok.setKind((tok::TokenKind)Record[Idx++]);
Tok.setFlag((Token::TokenFlags)Record[Idx++]);
+
+ if (Tok.isAnnotation()) {
+ Tok.setAnnotationEndLoc(ReadSourceLocation(M, Record, Idx));
+ switch (Tok.getKind()) {
+ case tok::annot_pragma_loop_hint: {
+ auto *Info = new (PP.getPreprocessorAllocator()) PragmaLoopHintInfo;
+ Info->PragmaName = ReadToken(M, Record, Idx);
+ Info->Option = ReadToken(M, Record, Idx);
+ unsigned NumTokens = Record[Idx++];
+ SmallVector<Token, 4> Toks;
+ Toks.reserve(NumTokens);
+ for (unsigned I = 0; I < NumTokens; ++I)
+ Toks.push_back(ReadToken(M, Record, Idx));
+ Info->Toks = llvm::ArrayRef(Toks).copy(PP.getPreprocessorAllocator());
+ Tok.setAnnotationValue(static_cast<void *>(Info));
+ break;
+ }
+ case tok::annot_pragma_pack: {
+ auto *Info = new (PP.getPreprocessorAllocator()) Sema::PragmaPackInfo;
+ Info->Action = static_cast<Sema::PragmaMsStackAction>(Record[Idx++]);
+ auto SlotLabel = ReadString(Record, Idx);
+ Info->SlotLabel =
+ llvm::StringRef(SlotLabel).copy(PP.getPreprocessorAllocator());
+ Info->Alignment = ReadToken(M, Record, Idx);
+ Tok.setAnnotationValue(static_cast<void *>(Info));
+ break;
+ }
+ // Some annotation tokens do not use the PtrData field.
+ case tok::annot_pragma_openmp:
+ case tok::annot_pragma_openmp_end:
+ case tok::annot_pragma_unused:
+ case tok::annot_pragma_openacc:
+ case tok::annot_pragma_openacc_end:
+ break;
+ default:
+ llvm_unreachable("missing deserialization code for annotation token");
+ }
+ } else {
+ Tok.setLength(Record[Idx++]);
+ if (IdentifierInfo *II = getLocalIdentifier(M, Record[Idx++]))
+ Tok.setIdentifierInfo(II);
+ }
return Tok;
}
@@ -1698,6 +1848,7 @@ MacroInfo *ASTReader::ReadMacroRecord(ModuleFile &F, uint64_t Offset) {
RecordData Record;
SmallVector<IdentifierInfo*, 16> MacroParams;
MacroInfo *Macro = nullptr;
+ llvm::MutableArrayRef<Token> MacroTokens;
while (true) {
// Advance to the next record, but if we get to the end of the block, don't
@@ -1752,7 +1903,8 @@ MacroInfo *ASTReader::ReadMacroRecord(ModuleFile &F, uint64_t Offset) {
MI->setDefinitionEndLoc(ReadSourceLocation(F, Record, NextIndex));
MI->setIsUsed(Record[NextIndex++]);
MI->setUsedForHeaderGuard(Record[NextIndex++]);
-
+ MacroTokens = MI->allocateTokens(Record[NextIndex++],
+ PP.getPreprocessorAllocator());
if (RecType == PP_MACRO_FUNCTION_LIKE) {
// Decode function-like macro info.
bool isC99VarArgs = Record[NextIndex++];
@@ -1797,10 +1949,14 @@ MacroInfo *ASTReader::ReadMacroRecord(ModuleFile &F, uint64_t Offset) {
// If we see a TOKEN before a PP_MACRO_*, then the file is
// erroneous, just pretend we didn't see this.
if (!Macro) break;
+ if (MacroTokens.empty()) {
+ Error("unexpected number of macro tokens for a macro in AST file");
+ return Macro;
+ }
unsigned Idx = 0;
- Token Tok = ReadToken(F, Record, Idx);
- Macro->AddTokenToBody(Tok);
+ MacroTokens[0] = ReadToken(F, Record, Idx);
+ MacroTokens = MacroTokens.drop_front();
break;
}
}
@@ -1821,15 +1977,30 @@ ASTReader::getGlobalPreprocessedEntityID(ModuleFile &M,
return LocalID + I->second;
}
+const FileEntry *HeaderFileInfoTrait::getFile(const internal_key_type &Key) {
+ FileManager &FileMgr = Reader.getFileManager();
+ if (!Key.Imported) {
+ if (auto File = FileMgr.getFile(Key.Filename))
+ return *File;
+ return nullptr;
+ }
+
+ std::string Resolved = std::string(Key.Filename);
+ Reader.ResolveImportedPath(M, Resolved);
+ if (auto File = FileMgr.getFile(Resolved))
+ return *File;
+ return nullptr;
+}
+
unsigned HeaderFileInfoTrait::ComputeHash(internal_key_ref ikey) {
return llvm::hash_combine(ikey.Size, ikey.ModTime);
}
HeaderFileInfoTrait::internal_key_type
-HeaderFileInfoTrait::GetInternalKey(const FileEntry *FE) {
- internal_key_type ikey = {FE->getSize(),
- M.HasTimestamps ? FE->getModificationTime() : 0,
- FE->getName(), /*Imported*/ false};
+HeaderFileInfoTrait::GetInternalKey(external_key_type ekey) {
+ internal_key_type ikey = {ekey.getSize(),
+ M.HasTimestamps ? ekey.getModificationTime() : 0,
+ ekey.getName(), /*Imported*/ false};
return ikey;
}
@@ -1841,23 +2012,8 @@ bool HeaderFileInfoTrait::EqualKey(internal_key_ref a, internal_key_ref b) {
return true;
// Determine whether the actual files are equivalent.
- FileManager &FileMgr = Reader.getFileManager();
- auto GetFile = [&](const internal_key_type &Key) -> const FileEntry* {
- if (!Key.Imported) {
- if (auto File = FileMgr.getFile(Key.Filename))
- return *File;
- return nullptr;
- }
-
- std::string Resolved = std::string(Key.Filename);
- Reader.ResolveImportedPath(M, Resolved);
- if (auto File = FileMgr.getFile(Resolved))
- return *File;
- return nullptr;
- };
-
- const FileEntry *FEA = GetFile(a);
- const FileEntry *FEB = GetFile(b);
+ const FileEntry *FEA = getFile(a);
+ const FileEntry *FEB = getFile(b);
return FEA && FEA == FEB;
}
@@ -1871,8 +2027,10 @@ HeaderFileInfoTrait::ReadKey(const unsigned char *d, unsigned) {
using namespace llvm::support;
internal_key_type ikey;
- ikey.Size = off_t(endian::readNext<uint64_t, little, unaligned>(d));
- ikey.ModTime = time_t(endian::readNext<uint64_t, little, unaligned>(d));
+ ikey.Size =
+ off_t(endian::readNext<uint64_t, llvm::endianness::little, unaligned>(d));
+ ikey.ModTime = time_t(
+ endian::readNext<uint64_t, llvm::endianness::little, unaligned>(d));
ikey.Filename = (const char *)d;
ikey.Imported = true;
return ikey;
@@ -1886,19 +2044,23 @@ HeaderFileInfoTrait::ReadData(internal_key_ref key, const unsigned char *d,
const unsigned char *End = d + DataLen;
HeaderFileInfo HFI;
unsigned Flags = *d++;
+
+ bool Included = (Flags >> 6) & 0x01;
+ if (Included)
+ if (const FileEntry *FE = getFile(key))
+ // Not using \c Preprocessor::markIncluded(), since that would attempt to
+ // deserialize this header file info again.
+ Reader.getPreprocessor().getIncludedFiles().insert(FE);
+
// FIXME: Refactor with mergeHeaderFileInfo in HeaderSearch.cpp.
HFI.isImport |= (Flags >> 5) & 0x01;
HFI.isPragmaOnce |= (Flags >> 4) & 0x01;
HFI.DirInfo = (Flags >> 1) & 0x07;
HFI.IndexHeaderMapHeader = Flags & 0x01;
- // FIXME: Find a better way to handle this. Maybe just store a
- // "has been included" flag?
- HFI.NumIncludes = std::max(endian::readNext<uint16_t, little, unaligned>(d),
- HFI.NumIncludes);
HFI.ControllingMacroID = Reader.getGlobalIdentifierID(
- M, endian::readNext<uint32_t, little, unaligned>(d));
+ M, endian::readNext<uint32_t, llvm::endianness::little, unaligned>(d));
if (unsigned FrameworkOffset =
- endian::readNext<uint32_t, little, unaligned>(d)) {
+ endian::readNext<uint32_t, llvm::endianness::little, unaligned>(d)) {
// The framework offset is 1 greater than the actual offset,
// since 0 is used as an indicator for "no framework name".
StringRef FrameworkName(FrameworkStrings + FrameworkOffset - 1);
@@ -1908,9 +2070,10 @@ HeaderFileInfoTrait::ReadData(internal_key_ref key, const unsigned char *d,
assert((End - d) % 4 == 0 &&
"Wrong data length in HeaderFileInfo deserialization");
while (d != End) {
- uint32_t LocalSMID = endian::readNext<uint32_t, little, unaligned>(d);
- auto HeaderRole = static_cast<ModuleMap::ModuleHeaderRole>(LocalSMID & 3);
- LocalSMID >>= 2;
+ uint32_t LocalSMID =
+ endian::readNext<uint32_t, llvm::endianness::little, unaligned>(d);
+ auto HeaderRole = static_cast<ModuleMap::ModuleHeaderRole>(LocalSMID & 7);
+ LocalSMID >>= 3;
// This header is part of a module. Associate it with the module to enable
// implicit module import.
@@ -1923,11 +2086,12 @@ HeaderFileInfoTrait::ReadData(internal_key_ref key, const unsigned char *d,
std::string Filename = std::string(key.Filename);
if (key.Imported)
Reader.ResolveImportedPath(M, Filename);
- // FIXME: NameAsWritten
- Module::Header H = {std::string(key.Filename), "",
- *FileMgr.getFile(Filename)};
- ModMap.addHeader(Mod, H, HeaderRole, /*Imported*/true);
- HFI.isModuleHeader |= !(HeaderRole & ModuleMap::TextualHeader);
+ if (auto FE = FileMgr.getOptionalFileRef(Filename)) {
+ // FIXME: NameAsWritten
+ Module::Header H = {std::string(key.Filename), "", *FE};
+ ModMap.addHeader(Mod, H, HeaderRole, /*Imported=*/true);
+ }
+ HFI.isModuleHeader |= ModuleMap::isModular(HeaderRole);
}
// This HeaderFileInfo was externally loaded.
@@ -2227,7 +2391,7 @@ bool ASTReader::shouldDisableValidationForFile(
// If a PCH is loaded and validation is disabled for PCH then disable
// validation for the PCH and the modules it loads.
- ModuleKind K = CurrentDeserializingModuleKind.getValueOr(M.Kind);
+ ModuleKind K = CurrentDeserializingModuleKind.value_or(M.Kind);
switch (K) {
case MK_MainFile:
@@ -2243,12 +2407,20 @@ bool ASTReader::shouldDisableValidationForFile(
return false;
}
-ASTReader::InputFileInfo
-ASTReader::readInputFileInfo(ModuleFile &F, unsigned ID) {
+InputFileInfo ASTReader::getInputFileInfo(ModuleFile &F, unsigned ID) {
+ // If this ID is bogus, just return an empty input file.
+ if (ID == 0 || ID > F.InputFileInfosLoaded.size())
+ return InputFileInfo();
+
+ // If we've already loaded this input file, return it.
+ if (!F.InputFileInfosLoaded[ID - 1].Filename.empty())
+ return F.InputFileInfosLoaded[ID - 1];
+
// Go find this input file.
BitstreamCursor &Cursor = F.InputFilesCursor;
SavedStreamPosition SavedPosition(Cursor);
- if (llvm::Error Err = Cursor.JumpToBit(F.InputFileOffsets[ID - 1])) {
+ if (llvm::Error Err = Cursor.JumpToBit(F.InputFilesOffsetBase +
+ F.InputFileOffsets[ID - 1])) {
// FIXME this drops errors on the floor.
consumeError(std::move(Err));
}
@@ -2276,9 +2448,22 @@ ASTReader::readInputFileInfo(ModuleFile &F, unsigned ID) {
R.StoredTime = static_cast<time_t>(Record[2]);
R.Overridden = static_cast<bool>(Record[3]);
R.Transient = static_cast<bool>(Record[4]);
- R.TopLevelModuleMap = static_cast<bool>(Record[5]);
- R.Filename = std::string(Blob);
- ResolveImportedPath(F, R.Filename);
+ R.TopLevel = static_cast<bool>(Record[5]);
+ R.ModuleMap = static_cast<bool>(Record[6]);
+ std::tie(R.FilenameAsRequested, R.Filename) = [&]() {
+ uint16_t AsRequestedLength = Record[7];
+
+ std::string NameAsRequested = Blob.substr(0, AsRequestedLength).str();
+ std::string Name = Blob.substr(AsRequestedLength).str();
+
+ ResolveImportedPath(F, NameAsRequested);
+ ResolveImportedPath(F, Name);
+
+ if (Name.empty())
+ Name = NameAsRequested;
+
+ return std::make_pair(std::move(NameAsRequested), std::move(Name));
+ }();
Expected<llvm::BitstreamEntry> MaybeEntry = Cursor.advance();
if (!MaybeEntry) // FIXME this drops errors on the floor.
@@ -2297,6 +2482,9 @@ ASTReader::readInputFileInfo(ModuleFile &F, unsigned ID) {
}
R.ContentHash = (static_cast<uint64_t>(Record[1]) << 32) |
static_cast<uint64_t>(Record[0]);
+
+ // Note that we've loaded this input file info.
+ F.InputFileInfosLoaded[ID - 1] = R;
return R;
}
@@ -2316,35 +2504,38 @@ InputFile ASTReader::getInputFile(ModuleFile &F, unsigned ID, bool Complain) {
// Go find this input file.
BitstreamCursor &Cursor = F.InputFilesCursor;
SavedStreamPosition SavedPosition(Cursor);
- if (llvm::Error Err = Cursor.JumpToBit(F.InputFileOffsets[ID - 1])) {
+ if (llvm::Error Err = Cursor.JumpToBit(F.InputFilesOffsetBase +
+ F.InputFileOffsets[ID - 1])) {
// FIXME this drops errors on the floor.
consumeError(std::move(Err));
}
- InputFileInfo FI = readInputFileInfo(F, ID);
+ InputFileInfo FI = getInputFileInfo(F, ID);
off_t StoredSize = FI.StoredSize;
time_t StoredTime = FI.StoredTime;
bool Overridden = FI.Overridden;
bool Transient = FI.Transient;
- StringRef Filename = FI.Filename;
+ StringRef Filename = FI.FilenameAsRequested;
uint64_t StoredContentHash = FI.ContentHash;
- OptionalFileEntryRefDegradesToFileEntryPtr File =
- expectedToOptional(FileMgr.getFileRef(Filename, /*OpenFile=*/false));
+ // For standard C++ modules, we don't need to check the inputs.
+ bool SkipChecks = F.StandardCXXModule;
+
+ const HeaderSearchOptions &HSOpts =
+ PP.getHeaderSearchInfo().getHeaderSearchOpts();
- // If we didn't find the file, resolve it relative to the
- // original directory from which this AST file was created.
- if (!File && !F.OriginalDir.empty() && !F.BaseDirectory.empty() &&
- F.OriginalDir != F.BaseDirectory) {
- std::string Resolved = resolveFileRelativeToOriginalDir(
- std::string(Filename), F.OriginalDir, F.BaseDirectory);
- if (!Resolved.empty())
- File = expectedToOptional(FileMgr.getFileRef(Resolved));
+ // The option ForceCheckCXX20ModulesInputFiles is only meaningful for C++20
+ // modules.
+ if (F.StandardCXXModule && HSOpts.ForceCheckCXX20ModulesInputFiles) {
+ SkipChecks = false;
+ Overridden = false;
}
+ auto File = FileMgr.getOptionalFileRef(Filename, /*OpenFile=*/false);
+
// For an overridden file, create a virtual file with the stored
// size/timestamp.
- if ((Overridden || Transient) && !File)
+ if ((Overridden || Transient || SkipChecks) && !File)
File = FileMgr.getVirtualFileRef(Filename, StoredSize, StoredTime);
if (!File) {
@@ -2367,7 +2558,8 @@ InputFile ASTReader::getInputFile(ModuleFile &F, unsigned ID, bool Complain) {
// PCH.
SourceManager &SM = getSourceManager();
// FIXME: Reject if the overrides are different.
- if ((!Overridden && !Transient) && SM.isFileOverridden(File)) {
+ if ((!Overridden && !Transient) && !SkipChecks &&
+ SM.isFileOverridden(*File)) {
if (Complain)
Error(diag::err_fe_pch_file_overridden, Filename);
@@ -2380,46 +2572,71 @@ InputFile ASTReader::getInputFile(ModuleFile &F, unsigned ID, bool Complain) {
}
}
- enum ModificationType {
- Size,
- ModTime,
- Content,
- None,
+ struct Change {
+ enum ModificationKind {
+ Size,
+ ModTime,
+ Content,
+ None,
+ } Kind;
+ std::optional<int64_t> Old = std::nullopt;
+ std::optional<int64_t> New = std::nullopt;
+ };
+ auto HasInputContentChanged = [&](Change OriginalChange) {
+ assert(ValidateASTInputFilesContent &&
+ "We should only check the content of the inputs with "
+ "ValidateASTInputFilesContent enabled.");
+
+ if (StoredContentHash == static_cast<uint64_t>(llvm::hash_code(-1)))
+ return OriginalChange;
+
+ auto MemBuffOrError = FileMgr.getBufferForFile(*File);
+ if (!MemBuffOrError) {
+ if (!Complain)
+ return OriginalChange;
+ std::string ErrorStr = "could not get buffer for file '";
+ ErrorStr += File->getName();
+ ErrorStr += "'";
+ Error(ErrorStr);
+ return OriginalChange;
+ }
+
+ // FIXME: hash_value is not guaranteed to be stable!
+ auto ContentHash = hash_value(MemBuffOrError.get()->getBuffer());
+ if (StoredContentHash == static_cast<uint64_t>(ContentHash))
+ return Change{Change::None};
+
+ return Change{Change::Content};
};
auto HasInputFileChanged = [&]() {
if (StoredSize != File->getSize())
- return ModificationType::Size;
+ return Change{Change::Size, StoredSize, File->getSize()};
if (!shouldDisableValidationForFile(F) && StoredTime &&
StoredTime != File->getModificationTime()) {
+ Change MTimeChange = {Change::ModTime, StoredTime,
+ File->getModificationTime()};
+
// In case the modification time changes but not the content,
// accept the cached file as legit.
- if (ValidateASTInputFilesContent &&
- StoredContentHash != static_cast<uint64_t>(llvm::hash_code(-1))) {
- auto MemBuffOrError = FileMgr.getBufferForFile(File);
- if (!MemBuffOrError) {
- if (!Complain)
- return ModificationType::ModTime;
- std::string ErrorStr = "could not get buffer for file '";
- ErrorStr += File->getName();
- ErrorStr += "'";
- Error(ErrorStr);
- return ModificationType::ModTime;
- }
+ if (ValidateASTInputFilesContent)
+ return HasInputContentChanged(MTimeChange);
- auto ContentHash = hash_value(MemBuffOrError.get()->getBuffer());
- if (StoredContentHash == static_cast<uint64_t>(ContentHash))
- return ModificationType::None;
- return ModificationType::Content;
- }
- return ModificationType::ModTime;
+ return MTimeChange;
}
- return ModificationType::None;
+ return Change{Change::None};
};
bool IsOutOfDate = false;
- auto FileChange = HasInputFileChanged();
+ auto FileChange = SkipChecks ? Change{Change::None} : HasInputFileChanged();
+ // When ForceCheckCXX20ModulesInputFiles and ValidateASTInputFilesContent
+ // enabled, it is better to check the contents of the inputs. Since we can't
+ // get correct modified time information for inputs from overriden inputs.
+ if (HSOpts.ForceCheckCXX20ModulesInputFiles && ValidateASTInputFilesContent &&
+ F.StandardCXXModule && FileChange.Kind == Change::None)
+ FileChange = HasInputContentChanged(FileChange);
+
// For an overridden file, there is nothing to validate.
- if (!Overridden && FileChange != ModificationType::None) {
+ if (!Overridden && FileChange.Kind != Change::None) {
if (Complain && !Diags.isDiagnosticInFlight()) {
// Build a list of the PCH imports that got us here (in reverse).
SmallVector<ModuleFile *, 4> ImportStack(1, &F);
@@ -2430,7 +2647,10 @@ InputFile ASTReader::getInputFile(ModuleFile &F, unsigned ID, bool Complain) {
StringRef TopLevelPCHName(ImportStack.back()->FileName);
Diag(diag::err_fe_ast_file_modified)
<< Filename << moduleKindForDiagnostic(ImportStack.back()->Kind)
- << TopLevelPCHName << FileChange;
+ << TopLevelPCHName << FileChange.Kind
+ << (FileChange.Old && FileChange.New)
+ << llvm::itostr(FileChange.Old.value_or(0))
+ << llvm::itostr(FileChange.New.value_or(0));
// Print the import stack.
if (ImportStack.size() > 1) {
@@ -2466,7 +2686,8 @@ void ASTReader::ResolveImportedPath(ModuleFile &M, std::string &Filename) {
}
void ASTReader::ResolveImportedPath(std::string &Filename, StringRef Prefix) {
- if (Filename.empty() || llvm::sys::path::is_absolute(Filename))
+ if (Filename.empty() || llvm::sys::path::is_absolute(Filename) ||
+ Filename == "<built-in>" || Filename == "<command line>")
return;
SmallString<128> Buffer;
@@ -2645,12 +2866,11 @@ ASTReader::ReadControlBlock(ModuleFile &F,
// so we verify all input files. Otherwise, verify only user input
// files.
- unsigned N = NumUserInputs;
- if (ValidateSystemInputs ||
- (HSOpts.ModulesValidateOncePerBuildSession &&
- F.InputFilesValidationTimestamp <= HSOpts.BuildSessionTimestamp &&
- F.Kind == MK_ImplicitModule))
- N = NumInputs;
+ unsigned N = ValidateSystemInputs ? NumInputs : NumUserInputs;
+ if (HSOpts.ModulesValidateOncePerBuildSession &&
+ F.InputFilesValidationTimestamp > HSOpts.BuildSessionTimestamp &&
+ F.Kind == MK_ImplicitModule)
+ N = NumUserInputs;
for (unsigned I = 0; I < N; ++I) {
InputFile IF = getInputFile(F, I+1, Complain);
@@ -2667,10 +2887,10 @@ ASTReader::ReadControlBlock(ModuleFile &F,
: NumUserInputs;
for (unsigned I = 0; I < N; ++I) {
bool IsSystem = I >= NumUserInputs;
- InputFileInfo FI = readInputFileInfo(F, I+1);
- Listener->visitInputFile(FI.Filename, IsSystem, FI.Overridden,
- F.Kind == MK_ExplicitModule ||
- F.Kind == MK_PrebuiltModule);
+ InputFileInfo FI = getInputFileInfo(F, I + 1);
+ Listener->visitInputFile(
+ FI.FilenameAsRequested, IsSystem, FI.Overridden,
+ F.Kind == MK_ExplicitModule || F.Kind == MK_PrebuiltModule);
}
}
@@ -2689,6 +2909,7 @@ ASTReader::ReadControlBlock(ModuleFile &F,
Error("malformed block record in AST file");
return Failure;
}
+ F.InputFilesOffsetBase = F.InputFilesCursor.GetCurrentBitNo();
continue;
case OPTIONS_BLOCK_ID:
@@ -2759,7 +2980,7 @@ ASTReader::ReadControlBlock(ModuleFile &F,
return VersionMismatch;
}
- bool hasErrors = Record[6];
+ bool hasErrors = Record[7];
if (hasErrors && !DisableValidation) {
// If requested by the caller and the module hasn't already been read
// or compiled, mark modules on error as out-of-date.
@@ -2783,7 +3004,9 @@ ASTReader::ReadControlBlock(ModuleFile &F,
if (F.RelocatablePCH)
F.BaseDirectory = isysroot.empty() ? "/" : isysroot;
- F.HasTimestamps = Record[5];
+ F.StandardCXXModule = Record[5];
+
+ F.HasTimestamps = Record[6];
const std::string &CurBranch = getClangFullRepositoryVersion();
StringRef ASTBranch = Blob;
@@ -2807,36 +3030,51 @@ ASTReader::ReadControlBlock(ModuleFile &F,
while (Idx < N) {
// Read information about the AST file.
ModuleKind ImportedKind = (ModuleKind)Record[Idx++];
+ // Whether we're importing a standard c++ module.
+ bool IsImportingStdCXXModule = Record[Idx++];
// The import location will be the local one for now; we will adjust
// all import locations of module imports after the global source
// location info are setup, in ReadAST.
SourceLocation ImportLoc =
ReadUntranslatedSourceLocation(Record[Idx++]);
- off_t StoredSize = (off_t)Record[Idx++];
- time_t StoredModTime = (time_t)Record[Idx++];
- auto FirstSignatureByte = Record.begin() + Idx;
- ASTFileSignature StoredSignature = ASTFileSignature::create(
- FirstSignatureByte, FirstSignatureByte + ASTFileSignature::size);
- Idx += ASTFileSignature::size;
+ off_t StoredSize = !IsImportingStdCXXModule ? (off_t)Record[Idx++] : 0;
+ time_t StoredModTime =
+ !IsImportingStdCXXModule ? (time_t)Record[Idx++] : 0;
+
+ ASTFileSignature StoredSignature;
+ if (!IsImportingStdCXXModule) {
+ auto FirstSignatureByte = Record.begin() + Idx;
+ StoredSignature = ASTFileSignature::create(
+ FirstSignatureByte, FirstSignatureByte + ASTFileSignature::size);
+ Idx += ASTFileSignature::size;
+ }
std::string ImportedName = ReadString(Record, Idx);
std::string ImportedFile;
// For prebuilt and explicit modules first consult the file map for
// an override. Note that here we don't search prebuilt module
- // directories, only the explicit name to file mappings. Also, we will
- // still verify the size/signature making sure it is essentially the
- // same file but perhaps in a different location.
+ // directories if we're not importing standard c++ module, only the
+ // explicit name to file mappings. Also, we will still verify the
+ // size/signature making sure it is essentially the same file but
+ // perhaps in a different location.
if (ImportedKind == MK_PrebuiltModule || ImportedKind == MK_ExplicitModule)
ImportedFile = PP.getHeaderSearchInfo().getPrebuiltModuleFileName(
- ImportedName, /*FileMapOnly*/ true);
-
- if (ImportedFile.empty())
- // Use BaseDirectoryAsWritten to ensure we use the same path in the
- // ModuleCache as when writing.
- ImportedFile = ReadPath(BaseDirectoryAsWritten, Record, Idx);
- else
- SkipPath(Record, Idx);
+ ImportedName, /*FileMapOnly*/ !IsImportingStdCXXModule);
+
+ // For C++20 Modules, we won't record the path to the imported modules
+ // in the BMI
+ if (!IsImportingStdCXXModule) {
+ if (ImportedFile.empty()) {
+ // Use BaseDirectoryAsWritten to ensure we use the same path in the
+ // ModuleCache as when writing.
+ ImportedFile = ReadPath(BaseDirectoryAsWritten, Record, Idx);
+ } else
+ SkipPath(Record, Idx);
+ } else if (ImportedFile.empty()) {
+ Diag(clang::diag::err_failed_to_find_module_file) << ImportedName;
+ return Missing;
+ }
// If our client can't cope with us being out of date, we can't cope with
// our dependency being missing.
@@ -2884,10 +3122,6 @@ ASTReader::ReadControlBlock(ModuleFile &F,
F.OriginalSourceFileID = FileID::get(Record[0]);
break;
- case ORIGINAL_PCH_DIR:
- F.OriginalDir = std::string(Blob);
- break;
-
case MODULE_NAME:
F.ModuleName = std::string(Blob);
Diag(diag::remark_module_import)
@@ -2909,10 +3143,13 @@ ASTReader::ReadControlBlock(ModuleFile &F,
BaseDirectoryAsWritten = Blob;
assert(!F.ModuleName.empty() &&
"MODULE_DIRECTORY found before MODULE_NAME");
+ F.BaseDirectory = std::string(Blob);
+ if (!PP.getPreprocessorOpts().ModulesCheckRelocated)
+ break;
// If we've already loaded a module map file covering this module, we may
// have a better path for it (relative to the current build).
Module *M = PP.getHeaderSearchInfo().lookupModule(
- F.ModuleName, /*AllowSearch*/ true,
+ F.ModuleName, SourceLocation(), /*AllowSearch*/ true,
/*AllowExtraModuleMapSearch*/ true);
if (M && M->Directory) {
// If we're implicitly loading a module, the base directory can't
@@ -2921,7 +3158,7 @@ ASTReader::ReadControlBlock(ModuleFile &F,
if (!bool(PP.getPreprocessorOpts().DisablePCHOrModuleValidation &
DisableValidationForModuleKind::Module) &&
F.Kind != MK_ExplicitModule && F.Kind != MK_PrebuiltModule) {
- auto BuildDir = PP.getFileManager().getDirectory(Blob);
+ auto BuildDir = PP.getFileManager().getOptionalDirectoryRef(Blob);
if (!BuildDir || *BuildDir != M->Directory) {
if (!canRecoverFromOutOfDate(F.FileName, ClientLoadCapabilities))
Diag(diag::err_imported_module_relocated)
@@ -2930,8 +3167,6 @@ ASTReader::ReadControlBlock(ModuleFile &F,
}
}
F.BaseDirectory = std::string(M->Directory->getName());
- } else {
- F.BaseDirectory = std::string(Blob);
}
break;
}
@@ -2948,36 +3183,34 @@ ASTReader::ReadControlBlock(ModuleFile &F,
F.InputFileOffsets =
(const llvm::support::unaligned_uint64_t *)Blob.data();
F.InputFilesLoaded.resize(NumInputs);
+ F.InputFileInfosLoaded.resize(NumInputs);
F.NumUserInputFiles = NumUserInputs;
break;
}
}
}
-ASTReader::ASTReadResult
-ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
+llvm::Error ASTReader::ReadASTBlock(ModuleFile &F,
+ unsigned ClientLoadCapabilities) {
BitstreamCursor &Stream = F.Stream;
- if (llvm::Error Err = Stream.EnterSubBlock(AST_BLOCK_ID)) {
- Error(std::move(Err));
- return Failure;
- }
+ if (llvm::Error Err = Stream.EnterSubBlock(AST_BLOCK_ID))
+ return Err;
F.ASTBlockStartOffset = Stream.GetCurrentBitNo();
// Read all of the records and blocks for the AST file.
RecordData Record;
while (true) {
Expected<llvm::BitstreamEntry> MaybeEntry = Stream.advance();
- if (!MaybeEntry) {
- Error(MaybeEntry.takeError());
- return Failure;
- }
+ if (!MaybeEntry)
+ return MaybeEntry.takeError();
llvm::BitstreamEntry Entry = MaybeEntry.get();
switch (Entry.Kind) {
case llvm::BitstreamEntry::Error:
- Error("error at end of module block in AST file");
- return Failure;
+ return llvm::createStringError(
+ std::errc::illegal_byte_sequence,
+ "error at end of module block in AST file");
case llvm::BitstreamEntry::EndBlock:
// Outside of C++, we do not store a lookup map for the translation unit.
// Instead, mark it as needing a lookup map to be built if this module
@@ -2990,7 +3223,7 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
DC->setMustBuildLookupTable();
}
- return Success;
+ return llvm::Error::success();
case llvm::BitstreamEntry::SubBlock:
switch (Entry.ID) {
case DECLTYPES_BLOCK_ID:
@@ -2999,15 +3232,11 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
// cursor to it, enter the block and read the abbrevs in that block.
// With the main cursor, we just skip over it.
F.DeclsCursor = Stream;
- if (llvm::Error Err = Stream.SkipBlock()) {
- Error(std::move(Err));
- return Failure;
- }
- if (ReadBlockAbbrevs(F.DeclsCursor, DECLTYPES_BLOCK_ID,
- &F.DeclsBlockStartOffset)) {
- Error("malformed block record in AST file");
- return Failure;
- }
+ if (llvm::Error Err = Stream.SkipBlock())
+ return Err;
+ if (llvm::Error Err = ReadBlockAbbrevs(
+ F.DeclsCursor, DECLTYPES_BLOCK_ID, &F.DeclsBlockStartOffset))
+ return Err;
break;
case PREPROCESSOR_BLOCK_ID:
@@ -3015,14 +3244,11 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
if (!PP.getExternalSource())
PP.setExternalSource(this);
- if (llvm::Error Err = Stream.SkipBlock()) {
- Error(std::move(Err));
- return Failure;
- }
- if (ReadBlockAbbrevs(F.MacroCursor, PREPROCESSOR_BLOCK_ID)) {
- Error("malformed block record in AST file");
- return Failure;
- }
+ if (llvm::Error Err = Stream.SkipBlock())
+ return Err;
+ if (llvm::Error Err =
+ ReadBlockAbbrevs(F.MacroCursor, PREPROCESSOR_BLOCK_ID))
+ return Err;
F.MacroStartOffset = F.MacroCursor.GetCurrentBitNo();
break;
@@ -3030,14 +3256,11 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
F.PreprocessorDetailCursor = Stream;
if (llvm::Error Err = Stream.SkipBlock()) {
- Error(std::move(Err));
- return Failure;
- }
- if (ReadBlockAbbrevs(F.PreprocessorDetailCursor,
- PREPROCESSOR_DETAIL_BLOCK_ID)) {
- Error("malformed preprocessor detail record in AST file");
- return Failure;
+ return Err;
}
+ if (llvm::Error Err = ReadBlockAbbrevs(F.PreprocessorDetailCursor,
+ PREPROCESSOR_DETAIL_BLOCK_ID))
+ return Err;
F.PreprocessorDetailStartOffset
= F.PreprocessorDetailCursor.GetCurrentBitNo();
@@ -3048,36 +3271,29 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
break;
case SOURCE_MANAGER_BLOCK_ID:
- if (ReadSourceManagerBlock(F))
- return Failure;
+ if (llvm::Error Err = ReadSourceManagerBlock(F))
+ return Err;
break;
case SUBMODULE_BLOCK_ID:
- if (ASTReadResult Result =
- ReadSubmoduleBlock(F, ClientLoadCapabilities))
- return Result;
+ if (llvm::Error Err = ReadSubmoduleBlock(F, ClientLoadCapabilities))
+ return Err;
break;
case COMMENTS_BLOCK_ID: {
BitstreamCursor C = Stream;
- if (llvm::Error Err = Stream.SkipBlock()) {
- Error(std::move(Err));
- return Failure;
- }
- if (ReadBlockAbbrevs(C, COMMENTS_BLOCK_ID)) {
- Error("malformed comments block in AST file");
- return Failure;
- }
+ if (llvm::Error Err = Stream.SkipBlock())
+ return Err;
+ if (llvm::Error Err = ReadBlockAbbrevs(C, COMMENTS_BLOCK_ID))
+ return Err;
CommentsCursors.push_back(std::make_pair(C, &F));
break;
}
default:
- if (llvm::Error Err = Stream.SkipBlock()) {
- Error(std::move(Err));
- return Failure;
- }
+ if (llvm::Error Err = Stream.SkipBlock())
+ return Err;
break;
}
continue;
@@ -3092,10 +3308,8 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
StringRef Blob;
Expected<unsigned> MaybeRecordType =
Stream.readRecord(Entry.ID, Record, &Blob);
- if (!MaybeRecordType) {
- Error(MaybeRecordType.takeError());
- return Failure;
- }
+ if (!MaybeRecordType)
+ return MaybeRecordType.takeError();
ASTRecordTypes RecordType = (ASTRecordTypes)MaybeRecordType.get();
// If we're not loading an AST context, we don't care about most records.
@@ -3105,12 +3319,12 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
case IDENTIFIER_OFFSET:
case INTERESTING_IDENTIFIERS:
case STATISTICS:
+ case PP_ASSUME_NONNULL_LOC:
case PP_CONDITIONAL_STACK:
case PP_COUNTER_VALUE:
case SOURCE_LOCATION_OFFSETS:
case MODULE_OFFSET_MAP:
case SOURCE_MANAGER_LINE_TABLE:
- case SOURCE_LOCATION_PRELOADS:
case PPD_ENTITIES_OFFSETS:
case HEADER_SEARCH_TABLE:
case IMPORTED_MODULES:
@@ -3126,10 +3340,10 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
break;
case TYPE_OFFSET: {
- if (F.LocalNumTypes != 0) {
- Error("duplicate TYPE_OFFSET record in AST file");
- return Failure;
- }
+ if (F.LocalNumTypes != 0)
+ return llvm::createStringError(
+ std::errc::illegal_byte_sequence,
+ "duplicate TYPE_OFFSET record in AST file");
F.TypeOffsets = reinterpret_cast<const UnderalignedInt64 *>(Blob.data());
F.LocalNumTypes = Record[0];
unsigned LocalBaseTypeIndex = Record[1];
@@ -3150,10 +3364,10 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
}
case DECL_OFFSET: {
- if (F.LocalNumDecls != 0) {
- Error("duplicate DECL_OFFSET record in AST file");
- return Failure;
- }
+ if (F.LocalNumDecls != 0)
+ return llvm::createStringError(
+ std::errc::illegal_byte_sequence,
+ "duplicate DECL_OFFSET record in AST file");
F.DeclOffsets = (const DeclOffset *)Blob.data();
F.LocalNumDecls = Record[0];
unsigned LocalBaseDeclID = Record[1];
@@ -3218,10 +3432,10 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
break;
case IDENTIFIER_OFFSET: {
- if (F.LocalNumIdentifiers != 0) {
- Error("duplicate IDENTIFIER_OFFSET record in AST file");
- return Failure;
- }
+ if (F.LocalNumIdentifiers != 0)
+ return llvm::createStringError(
+ std::errc::illegal_byte_sequence,
+ "duplicate IDENTIFIER_OFFSET record in AST file");
F.IdentifierOffsets = (const uint32_t *)Blob.data();
F.LocalNumIdentifiers = Record[0];
unsigned LocalBaseIdentifierID = Record[1];
@@ -3272,10 +3486,9 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
break;
}
- if (SpecialTypes.size() != Record.size()) {
- Error("invalid special-types record");
- return Failure;
- }
+ if (SpecialTypes.size() != Record.size())
+ return llvm::createStringError(std::errc::illegal_byte_sequence,
+ "invalid special-types record");
for (unsigned I = 0, N = Record.size(); I != N; ++I) {
serialization::TypeID ID = getGlobalTypeID(F, Record[I]);
@@ -3304,10 +3517,9 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
break;
case WEAK_UNDECLARED_IDENTIFIERS:
- if (Record.size() % 4 != 0) {
- Error("invalid weak identifiers record");
- return Failure;
- }
+ if (Record.size() % 3 != 0)
+ return llvm::createStringError(std::errc::illegal_byte_sequence,
+ "invalid weak identifiers record");
// FIXME: Ignore weak undeclared identifiers from non-original PCH
// files. This isn't the way to do it :)
@@ -3320,8 +3532,7 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
WeakUndeclaredIdentifiers.push_back(
getGlobalIdentifierID(F, Record[I++]));
WeakUndeclaredIdentifiers.push_back(
- ReadSourceLocation(F, Record, I).getRawEncoding());
- WeakUndeclaredIdentifiers.push_back(Record[I++]);
+ ReadSourceLocation(F, Record, I).getRawEncoding());
}
break;
@@ -3369,11 +3580,19 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
}
break;
+ case PP_ASSUME_NONNULL_LOC: {
+ unsigned Idx = 0;
+ if (!Record.empty())
+ PP.setPreambleRecordedPragmaAssumeNonNullLoc(
+ ReadSourceLocation(F, Record, Idx));
+ break;
+ }
+
case PP_CONDITIONAL_STACK:
if (!Record.empty()) {
unsigned Idx = 0, End = Record.size() - 1;
bool ReachedEOFWhileSkipping = Record[Idx++];
- llvm::Optional<Preprocessor::PreambleSkipInfo> SkipInfo;
+ std::optional<Preprocessor::PreambleSkipInfo> SkipInfo;
if (ReachedEOFWhileSkipping) {
SourceLocation HashToken = ReadSourceLocation(F, Record, Idx);
SourceLocation IfTokenLoc = ReadSourceLocation(F, Record, Idx);
@@ -3415,8 +3634,12 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
SourceMgr.AllocateLoadedSLocEntries(F.LocalNumSLocEntries,
SLocSpaceSize);
if (!F.SLocEntryBaseID) {
- Error("ran out of source locations");
- break;
+ if (!Diags.isDiagnosticInFlight()) {
+ Diags.Report(SourceLocation(), diag::remark_sloc_usage);
+ SourceMgr.noteSLocAddressSpaceUsage(Diags);
+ }
+ return llvm::createStringError(std::errc::invalid_argument,
+ "ran out of source locations");
}
// Make our entry in the range map. BaseID is negative and growing, so
// we invert it. Because we invert it, though, we need the other end of
@@ -3448,34 +3671,18 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
break;
case SOURCE_MANAGER_LINE_TABLE:
- if (ParseLineTable(F, Record)) {
- Error("malformed SOURCE_MANAGER_LINE_TABLE in AST file");
- return Failure;
- }
+ ParseLineTable(F, Record);
break;
- case SOURCE_LOCATION_PRELOADS: {
- // Need to transform from the local view (1-based IDs) to the global view,
- // which is based off F.SLocEntryBaseID.
- if (!F.PreloadSLocEntries.empty()) {
- Error("Multiple SOURCE_LOCATION_PRELOADS records in AST file");
- return Failure;
- }
-
- F.PreloadSLocEntries.swap(Record);
- break;
- }
-
case EXT_VECTOR_DECLS:
for (unsigned I = 0, N = Record.size(); I != N; ++I)
ExtVectorDecls.push_back(getGlobalDeclID(F, Record[I]));
break;
case VTABLE_USES:
- if (Record.size() % 3 != 0) {
- Error("Invalid VTABLE_USES record");
- return Failure;
- }
+ if (Record.size() % 3 != 0)
+ return llvm::createStringError(std::errc::illegal_byte_sequence,
+ "Invalid VTABLE_USES record");
// Later tables overwrite earlier ones.
// FIXME: Modules will have some trouble with this. This is clearly not
@@ -3491,15 +3698,15 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
break;
case PENDING_IMPLICIT_INSTANTIATIONS:
- if (PendingInstantiations.size() % 2 != 0) {
- Error("Invalid existing PendingInstantiations");
- return Failure;
- }
+ if (PendingInstantiations.size() % 2 != 0)
+ return llvm::createStringError(
+ std::errc::illegal_byte_sequence,
+ "Invalid existing PendingInstantiations");
- if (Record.size() % 2 != 0) {
- Error("Invalid PENDING_IMPLICIT_INSTANTIATIONS block");
- return Failure;
- }
+ if (Record.size() % 2 != 0)
+ return llvm::createStringError(
+ std::errc::illegal_byte_sequence,
+ "Invalid PENDING_IMPLICIT_INSTANTIATIONS block");
for (unsigned I = 0, N = Record.size(); I != N; /* in loop */) {
PendingInstantiations.push_back(getGlobalDeclID(F, Record[I++]));
@@ -3509,10 +3716,9 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
break;
case SEMA_DECL_REFS:
- if (Record.size() != 3) {
- Error("Invalid SEMA_DECL_REFS block");
- return Failure;
- }
+ if (Record.size() != 3)
+ return llvm::createStringError(std::errc::illegal_byte_sequence,
+ "Invalid SEMA_DECL_REFS block");
for (unsigned I = 0, N = Record.size(); I != N; ++I)
SemaDeclRefs.push_back(getGlobalDeclID(F, Record[I]));
break;
@@ -3568,10 +3774,10 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
}
case DECL_UPDATE_OFFSETS:
- if (Record.size() % 2 != 0) {
- Error("invalid DECL_UPDATE_OFFSETS block in AST file");
- return Failure;
- }
+ if (Record.size() % 2 != 0)
+ return llvm::createStringError(
+ std::errc::illegal_byte_sequence,
+ "invalid DECL_UPDATE_OFFSETS block in AST file");
for (unsigned I = 0, N = Record.size(); I != N; I += 2) {
GlobalDeclID ID = getGlobalDeclID(F, Record[I]);
DeclUpdateOffsets[ID].push_back(std::make_pair(&F, Record[I + 1]));
@@ -3585,10 +3791,10 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
break;
case OBJC_CATEGORIES_MAP:
- if (F.LocalNumObjCCategoriesInMap != 0) {
- Error("duplicate OBJC_CATEGORIES_MAP record in AST file");
- return Failure;
- }
+ if (F.LocalNumObjCCategoriesInMap != 0)
+ return llvm::createStringError(
+ std::errc::illegal_byte_sequence,
+ "duplicate OBJC_CATEGORIES_MAP record in AST file");
F.LocalNumObjCCategoriesInMap = Record[0];
F.ObjCCategoriesMap = (const ObjCCategoriesInfo *)Blob.data();
@@ -3653,15 +3859,13 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
break;
case UNDEFINED_BUT_USED:
- if (UndefinedButUsed.size() % 2 != 0) {
- Error("Invalid existing UndefinedButUsed");
- return Failure;
- }
+ if (UndefinedButUsed.size() % 2 != 0)
+ return llvm::createStringError(std::errc::illegal_byte_sequence,
+ "Invalid existing UndefinedButUsed");
- if (Record.size() % 2 != 0) {
- Error("invalid undefined-but-used record");
- return Failure;
- }
+ if (Record.size() % 2 != 0)
+ return llvm::createStringError(std::errc::illegal_byte_sequence,
+ "invalid undefined-but-used record");
for (unsigned I = 0, N = Record.size(); I != N; /* in loop */) {
UndefinedButUsed.push_back(getGlobalDeclID(F, Record[I++]));
UndefinedButUsed.push_back(
@@ -3691,7 +3895,7 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
unsigned GlobalID = getGlobalSubmoduleID(F, Record[I++]);
SourceLocation Loc = ReadSourceLocation(F, Record, I);
if (GlobalID) {
- ImportedModules.push_back(ImportedSubmodule(GlobalID, Loc));
+ PendingImportedModules.push_back(ImportedSubmodule(GlobalID, Loc));
if (DeserializationListener)
DeserializationListener->ModuleImportRead(GlobalID, Loc);
}
@@ -3700,10 +3904,10 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
break;
case MACRO_OFFSET: {
- if (F.LocalNumMacros != 0) {
- Error("duplicate MACRO_OFFSET record in AST file");
- return Failure;
- }
+ if (F.LocalNumMacros != 0)
+ return llvm::createStringError(
+ std::errc::illegal_byte_sequence,
+ "duplicate MACRO_OFFSET record in AST file");
F.MacroOffsets = (const uint32_t *)Blob.data();
F.LocalNumMacros = Record[0];
unsigned LocalBaseMacroID = Record[1];
@@ -3731,26 +3935,24 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
break;
case OPTIMIZE_PRAGMA_OPTIONS:
- if (Record.size() != 1) {
- Error("invalid pragma optimize record");
- return Failure;
- }
+ if (Record.size() != 1)
+ return llvm::createStringError(std::errc::illegal_byte_sequence,
+ "invalid pragma optimize record");
OptimizeOffPragmaLocation = ReadSourceLocation(F, Record[0]);
break;
case MSSTRUCT_PRAGMA_OPTIONS:
- if (Record.size() != 1) {
- Error("invalid pragma ms_struct record");
- return Failure;
- }
+ if (Record.size() != 1)
+ return llvm::createStringError(std::errc::illegal_byte_sequence,
+ "invalid pragma ms_struct record");
PragmaMSStructState = Record[0];
break;
case POINTERS_TO_MEMBERS_PRAGMA_OPTIONS:
- if (Record.size() != 2) {
- Error("invalid pragma ms_struct record");
- return Failure;
- }
+ if (Record.size() != 2)
+ return llvm::createStringError(
+ std::errc::illegal_byte_sequence,
+ "invalid pragma pointers to members record");
PragmaMSPointersToMembersState = Record[0];
PointersToMembersPragmaLocation = ReadSourceLocation(F, Record[1]);
break;
@@ -3762,18 +3964,16 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
break;
case CUDA_PRAGMA_FORCE_HOST_DEVICE_DEPTH:
- if (Record.size() != 1) {
- Error("invalid cuda pragma options record");
- return Failure;
- }
+ if (Record.size() != 1)
+ return llvm::createStringError(std::errc::illegal_byte_sequence,
+ "invalid cuda pragma options record");
ForceCUDAHostDeviceDepth = Record[0];
break;
case ALIGN_PACK_PRAGMA_OPTIONS: {
- if (Record.size() < 3) {
- Error("invalid pragma pack record");
- return Failure;
- }
+ if (Record.size() < 3)
+ return llvm::createStringError(std::errc::illegal_byte_sequence,
+ "invalid pragma pack record");
PragmaAlignPackCurrentValue = ReadAlignPackInfo(Record[0]);
PragmaAlignPackCurrentLocation = ReadSourceLocation(F, Record[1]);
unsigned NumStackEntries = Record[2];
@@ -3793,10 +3993,9 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
}
case FLOAT_CONTROL_PRAGMA_OPTIONS: {
- if (Record.size() < 3) {
- Error("invalid pragma pack record");
- return Failure;
- }
+ if (Record.size() < 3)
+ return llvm::createStringError(std::errc::illegal_byte_sequence,
+ "invalid pragma float control record");
FpPragmaCurrentValue = FPOptionsOverride::getFromOpaqueInt(Record[0]);
FpPragmaCurrentLocation = ReadSourceLocation(F, Record[1]);
unsigned NumStackEntries = Record[2];
@@ -3857,8 +4056,9 @@ void ASTReader::ReadModuleOffsetMap(ModuleFile &F) const {
// how it goes...
using namespace llvm::support;
ModuleKind Kind = static_cast<ModuleKind>(
- endian::readNext<uint8_t, little, unaligned>(Data));
- uint16_t Len = endian::readNext<uint16_t, little, unaligned>(Data);
+ endian::readNext<uint8_t, llvm::endianness::little, unaligned>(Data));
+ uint16_t Len =
+ endian::readNext<uint16_t, llvm::endianness::little, unaligned>(Data);
StringRef Name = StringRef((const char*)Data, Len);
Data += Len;
ModuleFile *OM = (Kind == MK_PrebuiltModule || Kind == MK_ExplicitModule ||
@@ -3874,21 +4074,21 @@ void ASTReader::ReadModuleOffsetMap(ModuleFile &F) const {
}
SourceLocation::UIntTy SLocOffset =
- endian::readNext<uint32_t, little, unaligned>(Data);
+ endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Data);
uint32_t IdentifierIDOffset =
- endian::readNext<uint32_t, little, unaligned>(Data);
+ endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Data);
uint32_t MacroIDOffset =
- endian::readNext<uint32_t, little, unaligned>(Data);
+ endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Data);
uint32_t PreprocessedEntityIDOffset =
- endian::readNext<uint32_t, little, unaligned>(Data);
+ endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Data);
uint32_t SubmoduleIDOffset =
- endian::readNext<uint32_t, little, unaligned>(Data);
+ endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Data);
uint32_t SelectorIDOffset =
- endian::readNext<uint32_t, little, unaligned>(Data);
+ endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Data);
uint32_t DeclIDOffset =
- endian::readNext<uint32_t, little, unaligned>(Data);
+ endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Data);
uint32_t TypeIndexOffset =
- endian::readNext<uint32_t, little, unaligned>(Data);
+ endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Data);
auto mapOffset = [&](uint32_t Offset, uint32_t BaseOffset,
RemapBuilder &Remap) {
@@ -3932,18 +4132,21 @@ ASTReader::ReadModuleMapFileBlock(RecordData &Record, ModuleFile &F,
// usable header search context.
assert(!F.ModuleName.empty() &&
"MODULE_NAME should come before MODULE_MAP_FILE");
- if (F.Kind == MK_ImplicitModule && ModuleMgr.begin()->Kind != MK_MainFile) {
+ if (PP.getPreprocessorOpts().ModulesCheckRelocated &&
+ F.Kind == MK_ImplicitModule && ModuleMgr.begin()->Kind != MK_MainFile) {
// An implicitly-loaded module file should have its module listed in some
// module map file that we've already loaded.
- Module *M = PP.getHeaderSearchInfo().lookupModule(F.ModuleName);
+ Module *M =
+ PP.getHeaderSearchInfo().lookupModule(F.ModuleName, F.ImportLoc);
auto &Map = PP.getHeaderSearchInfo().getModuleMap();
- const FileEntry *ModMap = M ? Map.getModuleMapFileForUniquing(M) : nullptr;
+ OptionalFileEntryRef ModMap =
+ M ? Map.getModuleMapFileForUniquing(M) : std::nullopt;
// Don't emit module relocation error if we have -fno-validate-pch
if (!bool(PP.getPreprocessorOpts().DisablePCHOrModuleValidation &
DisableValidationForModuleKind::Module) &&
!ModMap) {
if (!canRecoverFromOutOfDate(F.FileName, ClientLoadCapabilities)) {
- if (auto ASTFE = M ? M->getASTFile() : None) {
+ if (auto ASTFE = M ? M->getASTFile() : std::nullopt) {
// This module was defined by an imported (explicit) module.
Diag(diag::err_module_file_conflict) << F.ModuleName << F.FileName
<< ASTFE->getName();
@@ -3980,11 +4183,11 @@ ASTReader::ReadModuleMapFileBlock(RecordData &Record, ModuleFile &F,
return OutOfDate;
}
- llvm::SmallPtrSet<const FileEntry *, 1> AdditionalStoredMaps;
+ ModuleMap::AdditionalModMapsSet AdditionalStoredMaps;
for (unsigned I = 0, N = Record[Idx++]; I < N; ++I) {
// FIXME: we should use input files rather than storing names.
std::string Filename = ReadPath(F, Record, Idx);
- auto SF = FileMgr.getFile(Filename, false, false);
+ auto SF = FileMgr.getOptionalFileRef(Filename, false, false);
if (!SF) {
if (!canRecoverFromOutOfDate(F.FileName, ClientLoadCapabilities))
Error("could not find file '" + Filename +"' referenced by AST file");
@@ -3996,13 +4199,13 @@ ASTReader::ReadModuleMapFileBlock(RecordData &Record, ModuleFile &F,
// Check any additional module map files (e.g. module.private.modulemap)
// that are not in the pcm.
if (auto *AdditionalModuleMaps = Map.getAdditionalModuleMapFiles(M)) {
- for (const FileEntry *ModMap : *AdditionalModuleMaps) {
+ for (FileEntryRef ModMap : *AdditionalModuleMaps) {
// Remove files that match
// Note: SmallPtrSet::erase is really remove
if (!AdditionalStoredMaps.erase(ModMap)) {
if (!canRecoverFromOutOfDate(F.FileName, ClientLoadCapabilities))
Diag(diag::err_module_different_modmap)
- << F.ModuleName << /*new*/0 << ModMap->getName();
+ << F.ModuleName << /*new*/0 << ModMap.getName();
return OutOfDate;
}
}
@@ -4010,10 +4213,10 @@ ASTReader::ReadModuleMapFileBlock(RecordData &Record, ModuleFile &F,
// Check any additional module map files that are in the pcm, but not
// found in header search. Cases that match are already removed.
- for (const FileEntry *ModMap : AdditionalStoredMaps) {
+ for (FileEntryRef ModMap : AdditionalStoredMaps) {
if (!canRecoverFromOutOfDate(F.FileName, ClientLoadCapabilities))
Diag(diag::err_module_different_modmap)
- << F.ModuleName << /*not new*/1 << ModMap->getName();
+ << F.ModuleName << /*not new*/1 << ModMap.getName();
return OutOfDate;
}
}
@@ -4096,7 +4299,7 @@ void ASTReader::makeModuleVisible(Module *Mod,
auto HiddenNames = std::move(*Hidden);
HiddenNamesMap.erase(Hidden);
makeNamesVisible(HiddenNames.second, HiddenNames.first);
- assert(HiddenNamesMap.find(Mod) == HiddenNamesMap.end() &&
+ assert(!HiddenNamesMap.contains(Mod) &&
"making names visible added hidden names");
}
@@ -4221,14 +4424,14 @@ static bool SkipCursorToBlock(BitstreamCursor &Cursor, unsigned BlockID) {
}
}
-ASTReader::ASTReadResult ASTReader::ReadAST(StringRef FileName,
- ModuleKind Type,
+ASTReader::ASTReadResult ASTReader::ReadAST(StringRef FileName, ModuleKind Type,
SourceLocation ImportLoc,
unsigned ClientLoadCapabilities,
- SmallVectorImpl<ImportedSubmodule> *Imported) {
- llvm::SaveAndRestore<SourceLocation>
- SetCurImportLocRAII(CurrentImportLoc, ImportLoc);
- llvm::SaveAndRestore<Optional<ModuleKind>> SetCurModuleKindRAII(
+ ModuleFile **NewLoadedModuleFile) {
+ llvm::TimeTraceScope scope("ReadAST", FileName);
+
+ llvm::SaveAndRestore SetCurImportLocRAII(CurrentImportLoc, ImportLoc);
+ llvm::SaveAndRestore<std::optional<ModuleKind>> SetCurModuleKindRAII(
CurrentDeserializingModuleKind, Type);
// Defer any pending actions until we get to the end of reading the AST file.
@@ -4240,57 +4443,54 @@ ASTReader::ASTReadResult ASTReader::ReadAST(StringRef FileName,
PreviousGeneration = incrementGeneration(*ContextObj);
unsigned NumModules = ModuleMgr.size();
- auto removeModulesAndReturn = [&](ASTReadResult ReadResult) {
- assert(ReadResult && "expected to return error");
- ModuleMgr.removeModules(ModuleMgr.begin() + NumModules,
- PP.getLangOpts().Modules
- ? &PP.getHeaderSearchInfo().getModuleMap()
- : nullptr);
+ SmallVector<ImportedModule, 4> Loaded;
+ if (ASTReadResult ReadResult =
+ ReadASTCore(FileName, Type, ImportLoc,
+ /*ImportedBy=*/nullptr, Loaded, 0, 0, ASTFileSignature(),
+ ClientLoadCapabilities)) {
+ ModuleMgr.removeModules(ModuleMgr.begin() + NumModules);
// If we find that any modules are unusable, the global index is going
// to be out-of-date. Just remove it.
GlobalIndex.reset();
ModuleMgr.setGlobalIndex(nullptr);
return ReadResult;
- };
-
- SmallVector<ImportedModule, 4> Loaded;
- switch (ASTReadResult ReadResult =
- ReadASTCore(FileName, Type, ImportLoc,
- /*ImportedBy=*/nullptr, Loaded, 0, 0,
- ASTFileSignature(), ClientLoadCapabilities)) {
- case Failure:
- case Missing:
- case OutOfDate:
- case VersionMismatch:
- case ConfigurationMismatch:
- case HadErrors:
- return removeModulesAndReturn(ReadResult);
- case Success:
- break;
}
- // Here comes stuff that we only do once the entire chain is loaded.
+ if (NewLoadedModuleFile && !Loaded.empty())
+ *NewLoadedModuleFile = Loaded.back().Mod;
+
+ // Here comes stuff that we only do once the entire chain is loaded. Do *not*
+ // remove modules from this point. Various fields are updated during reading
+ // the AST block and removing the modules would result in dangling pointers.
+ // They are generally only incidentally dereferenced, ie. a binary search
+ // runs over `GlobalSLocEntryMap`, which could cause an invalid module to
+ // be dereferenced but it wouldn't actually be used.
- // Load the AST blocks of all of the modules that we loaded. We can still
+ // Load the AST blocks of all of the modules that we loaded. We can still
// hit errors parsing the ASTs at this point.
for (ImportedModule &M : Loaded) {
ModuleFile &F = *M.Mod;
+ llvm::TimeTraceScope Scope2("Read Loaded AST", F.ModuleName);
// Read the AST block.
- if (ASTReadResult Result = ReadASTBlock(F, ClientLoadCapabilities))
- return removeModulesAndReturn(Result);
+ if (llvm::Error Err = ReadASTBlock(F, ClientLoadCapabilities)) {
+ Error(std::move(Err));
+ return Failure;
+ }
// The AST block should always have a definition for the main module.
if (F.isModule() && !F.DidReadTopLevelSubmodule) {
Error(diag::err_module_file_missing_top_level_submodule, F.FileName);
- return removeModulesAndReturn(Failure);
+ return Failure;
}
// Read the extension blocks.
while (!SkipCursorToBlock(F.Stream, EXTENSION_BLOCK_ID)) {
- if (ASTReadResult Result = ReadExtensionBlock(F))
- return removeModulesAndReturn(Result);
+ if (llvm::Error Err = ReadExtensionBlock(F)) {
+ Error(std::move(Err));
+ return Failure;
+ }
}
// Once read, set the ModuleFile bit base offset and update the size in
@@ -4304,44 +4504,61 @@ ASTReader::ASTReadResult ASTReader::ReadAST(StringRef FileName,
for (ImportedModule &M : Loaded) {
ModuleFile &F = *M.Mod;
- // Preload SLocEntries.
- for (unsigned I = 0, N = F.PreloadSLocEntries.size(); I != N; ++I) {
- int Index = int(F.PreloadSLocEntries[I] - 1) + F.SLocEntryBaseID;
- // Load it through the SourceManager and don't call ReadSLocEntry()
- // directly because the entry may have already been loaded in which case
- // calling ReadSLocEntry() directly would trigger an assertion in
- // SourceManager.
- SourceMgr.getLoadedSLocEntryByID(Index);
- }
-
// Map the original source file ID into the ID space of the current
// compilation.
- if (F.OriginalSourceFileID.isValid()) {
- F.OriginalSourceFileID = FileID::get(
- F.SLocEntryBaseID + F.OriginalSourceFileID.getOpaqueValue() - 1);
- }
+ if (F.OriginalSourceFileID.isValid())
+ F.OriginalSourceFileID = TranslateFileID(F, F.OriginalSourceFileID);
- // Preload all the pending interesting identifiers by marking them out of
- // date.
for (auto Offset : F.PreloadIdentifierOffsets) {
const unsigned char *Data = F.IdentifierTableData + Offset;
ASTIdentifierLookupTrait Trait(*this, F);
auto KeyDataLen = Trait.ReadKeyDataLength(Data);
auto Key = Trait.ReadKey(Data, KeyDataLen.first);
- auto &II = PP.getIdentifierTable().getOwn(Key);
- II.setOutOfDate(true);
+
+ IdentifierInfo *II;
+ if (!PP.getLangOpts().CPlusPlus) {
+ // Identifiers present in both the module file and the importing
+ // instance are marked out-of-date so that they can be deserialized
+ // on next use via ASTReader::updateOutOfDateIdentifier().
+ // Identifiers present in the module file but not in the importing
+ // instance are ignored for now, preventing growth of the identifier
+ // table. They will be deserialized on first use via ASTReader::get().
+ auto It = PP.getIdentifierTable().find(Key);
+ if (It == PP.getIdentifierTable().end())
+ continue;
+ II = It->second;
+ } else {
+ // With C++ modules, not many identifiers are considered interesting.
+ // All identifiers in the module file can be placed into the identifier
+ // table of the importing instance and marked as out-of-date. This makes
+ // ASTReader::get() a no-op, and deserialization will take place on
+ // first/next use via ASTReader::updateOutOfDateIdentifier().
+ II = &PP.getIdentifierTable().getOwn(Key);
+ }
+
+ II->setOutOfDate(true);
// Mark this identifier as being from an AST file so that we can track
// whether we need to serialize it.
- markIdentifierFromAST(*this, II);
+ markIdentifierFromAST(*this, *II);
// Associate the ID with the identifier so that the writer can reuse it.
auto ID = Trait.ReadIdentifierID(Data + KeyDataLen.first);
- SetIdentifierInfo(ID, &II);
+ SetIdentifierInfo(ID, II);
}
}
+ // Builtins and library builtins have already been initialized. Mark all
+ // identifiers as out-of-date, so that they are deserialized on first use.
+ if (Type == MK_PCH || Type == MK_Preamble || Type == MK_MainFile)
+ for (auto &Id : PP.getIdentifierTable())
+ Id.second->setOutOfDate(true);
+
+ // Mark selectors as out of date.
+ for (const auto &Sel : SelectorGeneration)
+ SelectorOutOfDate[Sel.first] = true;
+
// Setup the import locations and notify the module manager that we've
// committed to these module files.
for (ImportedModule &M : Loaded) {
@@ -4359,25 +4576,6 @@ ASTReader::ASTReadResult ASTReader::ReadAST(StringRef FileName,
F.ImportLoc = TranslateSourceLocation(*M.ImportedBy, M.ImportLoc);
}
- if (!PP.getLangOpts().CPlusPlus ||
- (Type != MK_ImplicitModule && Type != MK_ExplicitModule &&
- Type != MK_PrebuiltModule)) {
- // Mark all of the identifiers in the identifier table as being out of date,
- // so that various accessors know to check the loaded modules when the
- // identifier is used.
- //
- // For C++ modules, we don't need information on many identifiers (just
- // those that provide macros or are poisoned), so we mark all of
- // the interesting ones via PreloadIdentifierOffsets.
- for (IdentifierTable::iterator Id = PP.getIdentifierTable().begin(),
- IdEnd = PP.getIdentifierTable().end();
- Id != IdEnd; ++Id)
- Id->second->setOutOfDate(true);
- }
- // Mark selectors as out of date.
- for (auto Sel : SelectorGeneration)
- SelectorOutOfDate[Sel.first] = true;
-
// Resolve any unresolved module exports.
for (unsigned I = 0, N = UnresolvedModuleRefs.size(); I != N; ++I) {
UnresolvedModuleRef &Unresolved = UnresolvedModuleRefs[I];
@@ -4399,6 +4597,11 @@ ASTReader::ASTReadResult ASTReader::ReadAST(StringRef FileName,
Unresolved.Mod->Imports.insert(ResolvedMod);
continue;
+ case UnresolvedModuleRef::Affecting:
+ if (ResolvedMod)
+ Unresolved.Mod->AffectingClangModules.insert(ResolvedMod);
+ continue;
+
case UnresolvedModuleRef::Export:
if (ResolvedMod || Unresolved.IsWildcard)
Unresolved.Mod->Exports.push_back(
@@ -4408,10 +4611,6 @@ ASTReader::ASTReadResult ASTReader::ReadAST(StringRef FileName,
}
UnresolvedModuleRefs.clear();
- if (Imported)
- Imported->append(ImportedModules.begin(),
- ImportedModules.end());
-
// FIXME: How do we load the 'use'd modules? They may not be submodules.
// Might be unnecessary as use declarations are only used to build the
// module itself.
@@ -4447,18 +4646,16 @@ ASTReader::ASTReadResult ASTReader::ReadAST(StringRef FileName,
}
}
- if (PP.getHeaderSearchInfo()
- .getHeaderSearchOpts()
- .ModulesValidateOncePerBuildSession) {
+ HeaderSearchOptions &HSOpts = PP.getHeaderSearchInfo().getHeaderSearchOpts();
+ if (HSOpts.ModulesValidateOncePerBuildSession) {
// Now we are certain that the module and all modules it depends on are
- // up to date. Create or update timestamp files for modules that are
- // located in the module cache (not for PCH files that could be anywhere
- // in the filesystem).
+ // up-to-date. For implicitly-built module files, ensure the corresponding
+ // timestamp files are up-to-date in this build session.
for (unsigned I = 0, N = Loaded.size(); I != N; ++I) {
ImportedModule &M = Loaded[I];
- if (M.Mod->Kind == MK_ImplicitModule) {
+ if (M.Mod->Kind == MK_ImplicitModule &&
+ M.Mod->InputFilesValidationTimestamp < HSOpts.BuildSessionTimestamp)
updateModuleTimestamp(*M.Mod);
- }
}
}
@@ -4638,12 +4835,6 @@ ASTReader::ReadASTCore(StringRef FileName,
ShouldFinalizePCM = true;
return Success;
- case UNHASHED_CONTROL_BLOCK_ID:
- // This block is handled using look-ahead during ReadControlBlock. We
- // shouldn't get here!
- Error("malformed block record in AST file");
- return Failure;
-
default:
if (llvm::Error Err = Stream.SkipBlock()) {
Error(std::move(Err));
@@ -4754,20 +4945,27 @@ ASTReader::ASTReadResult ASTReader::readUnhashedControlBlockImpl(
// Read and process a record.
Record.clear();
- Expected<unsigned> MaybeRecordType = Stream.readRecord(Entry.ID, Record);
+ StringRef Blob;
+ Expected<unsigned> MaybeRecordType =
+ Stream.readRecord(Entry.ID, Record, &Blob);
if (!MaybeRecordType) {
// FIXME this drops the error.
return Failure;
}
switch ((UnhashedControlBlockRecordTypes)MaybeRecordType.get()) {
case SIGNATURE:
- if (F)
- F->Signature = ASTFileSignature::create(Record.begin(), Record.end());
+ if (F) {
+ F->Signature = ASTFileSignature::create(Blob.begin(), Blob.end());
+ assert(F->Signature != ASTFileSignature::createDummy() &&
+ "Dummy AST file signature not backpatched in ASTWriter.");
+ }
break;
case AST_BLOCK_HASH:
- if (F)
- F->ASTBlockHash =
- ASTFileSignature::create(Record.begin(), Record.end());
+ if (F) {
+ F->ASTBlockHash = ASTFileSignature::create(Blob.begin(), Blob.end());
+ assert(F->ASTBlockHash != ASTFileSignature::createDummy() &&
+ "Dummy AST block hash not backpatched in ASTWriter.");
+ }
break;
case DIAGNOSTIC_OPTIONS: {
bool Complain = (ClientLoadCapabilities & ARR_OutOfDate) == 0;
@@ -4777,6 +4975,13 @@ ASTReader::ASTReadResult ASTReader::readUnhashedControlBlockImpl(
Result = OutOfDate; // Don't return early. Read the signature.
break;
}
+ case HEADER_SEARCH_PATHS: {
+ bool Complain = (ClientLoadCapabilities & ARR_ConfigurationMismatch) == 0;
+ if (!AllowCompatibleConfigurationMismatch &&
+ ParseHeaderSearchPaths(Record, Complain, *Listener))
+ Result = ConfigurationMismatch;
+ break;
+ }
case DIAG_PRAGMA_MAPPINGS:
if (!F)
break;
@@ -4786,6 +4991,17 @@ ASTReader::ASTReadResult ASTReader::readUnhashedControlBlockImpl(
F->PragmaDiagMappings.insert(F->PragmaDiagMappings.end(),
Record.begin(), Record.end());
break;
+ case HEADER_SEARCH_ENTRY_USAGE:
+ if (!F)
+ break;
+ unsigned Count = Record[0];
+ const char *Byte = Blob.data();
+ F->SearchPathUsage = llvm::BitVector(Count, false);
+ for (unsigned I = 0; I < Count; ++Byte)
+ for (unsigned Bit = 0; Bit < 8 && I < Count; ++Bit, ++I)
+ if (*Byte & (1 << Bit))
+ F->SearchPathUsage[I] = true;
+ break;
}
}
}
@@ -4811,32 +5027,26 @@ static bool parseModuleFileExtensionMetadata(
return false;
}
-ASTReader::ASTReadResult ASTReader::ReadExtensionBlock(ModuleFile &F) {
+llvm::Error ASTReader::ReadExtensionBlock(ModuleFile &F) {
BitstreamCursor &Stream = F.Stream;
RecordData Record;
while (true) {
Expected<llvm::BitstreamEntry> MaybeEntry = Stream.advance();
- if (!MaybeEntry) {
- Error(MaybeEntry.takeError());
- return Failure;
- }
+ if (!MaybeEntry)
+ return MaybeEntry.takeError();
llvm::BitstreamEntry Entry = MaybeEntry.get();
switch (Entry.Kind) {
case llvm::BitstreamEntry::SubBlock:
- if (llvm::Error Err = Stream.SkipBlock()) {
- Error(std::move(Err));
- return Failure;
- }
+ if (llvm::Error Err = Stream.SkipBlock())
+ return Err;
continue;
-
case llvm::BitstreamEntry::EndBlock:
- return Success;
-
+ return llvm::Error::success();
case llvm::BitstreamEntry::Error:
- return HadErrors;
-
+ return llvm::createStringError(std::errc::illegal_byte_sequence,
+ "malformed block record in AST file");
case llvm::BitstreamEntry::Record:
break;
}
@@ -4845,17 +5055,15 @@ ASTReader::ASTReadResult ASTReader::ReadExtensionBlock(ModuleFile &F) {
StringRef Blob;
Expected<unsigned> MaybeRecCode =
Stream.readRecord(Entry.ID, Record, &Blob);
- if (!MaybeRecCode) {
- Error(MaybeRecCode.takeError());
- return Failure;
- }
+ if (!MaybeRecCode)
+ return MaybeRecCode.takeError();
switch (MaybeRecCode.get()) {
case EXTENSION_METADATA: {
ModuleFileExtensionMetadata Metadata;
- if (parseModuleFileExtensionMetadata(Record, Blob, Metadata)) {
- Error("malformed EXTENSION_METADATA in AST file");
- return Failure;
- }
+ if (parseModuleFileExtensionMetadata(Record, Blob, Metadata))
+ return llvm::createStringError(
+ std::errc::illegal_byte_sequence,
+ "malformed EXTENSION_METADATA in AST file");
// Find a module file extension with this block name.
auto Known = ModuleFileExtensions.find(Metadata.BlockName);
@@ -4872,7 +5080,7 @@ ASTReader::ASTReadResult ASTReader::ReadExtensionBlock(ModuleFile &F) {
}
}
- return Success;
+ return llvm::Error::success();
}
void ASTReader::InitializeContext() {
@@ -5002,7 +5210,7 @@ void ASTReader::InitializeContext() {
// Re-export any modules that were imported by a non-module AST file.
// FIXME: This does not make macro-only imports visible again.
- for (auto &Import : ImportedModules) {
+ for (auto &Import : PendingImportedModules) {
if (Module *Imported = getSubmodule(Import.ID)) {
makeModuleVisible(Imported, Module::AllVisible,
/*ImportLoc=*/Import.ImportLoc);
@@ -5012,6 +5220,10 @@ void ASTReader::InitializeContext() {
// nullptr here, we do the same later, in UpdateSema().
}
}
+
+ // Hand off these modules to Sema.
+ PendingImportedModulesSema.append(PendingImportedModules);
+ PendingImportedModules.clear();
}
void ASTReader::finalizeForWriting() {
@@ -5055,9 +5267,12 @@ static ASTFileSignature readASTFileSignature(StringRef PCH) {
consumeError(MaybeRecord.takeError());
return ASTFileSignature();
}
- if (SIGNATURE == MaybeRecord.get())
- return ASTFileSignature::create(Record.begin(),
- Record.begin() + ASTFileSignature::size);
+ if (SIGNATURE == MaybeRecord.get()) {
+ auto Signature = ASTFileSignature::create(Blob.begin(), Blob.end());
+ assert(Signature != ASTFileSignature::createDummy() &&
+ "Dummy AST file signature not backpatched in ASTWriter.");
+ return Signature;
+ }
}
}
@@ -5068,7 +5283,8 @@ std::string ASTReader::getOriginalSourceFile(
const std::string &ASTFileName, FileManager &FileMgr,
const PCHContainerReader &PCHContainerRdr, DiagnosticsEngine &Diags) {
// Open the AST file.
- auto Buffer = FileMgr.getBufferForFile(ASTFileName);
+ auto Buffer = FileMgr.getBufferForFile(ASTFileName, /*IsVolatile=*/false,
+ /*RequiresNullTerminator=*/false);
if (!Buffer) {
Diags.Report(diag::err_fe_unable_to_read_pch_file)
<< ASTFileName << Buffer.getError().message();
@@ -5131,16 +5347,19 @@ namespace {
const PreprocessorOptions &ExistingPPOpts;
std::string ExistingModuleCachePath;
FileManager &FileMgr;
+ bool StrictOptionMatches;
public:
SimplePCHValidator(const LangOptions &ExistingLangOpts,
const TargetOptions &ExistingTargetOpts,
const PreprocessorOptions &ExistingPPOpts,
- StringRef ExistingModuleCachePath, FileManager &FileMgr)
+ StringRef ExistingModuleCachePath, FileManager &FileMgr,
+ bool StrictOptionMatches)
: ExistingLangOpts(ExistingLangOpts),
ExistingTargetOpts(ExistingTargetOpts),
ExistingPPOpts(ExistingPPOpts),
- ExistingModuleCachePath(ExistingModuleCachePath), FileMgr(FileMgr) {}
+ ExistingModuleCachePath(ExistingModuleCachePath), FileMgr(FileMgr),
+ StrictOptionMatches(StrictOptionMatches) {}
bool ReadLanguageOptions(const LangOptions &LangOpts, bool Complain,
bool AllowCompatibleDifferences) override {
@@ -5163,10 +5382,13 @@ namespace {
}
bool ReadPreprocessorOptions(const PreprocessorOptions &PPOpts,
- bool Complain,
+ bool ReadMacros, bool Complain,
std::string &SuggestedPredefines) override {
- return checkPreprocessorOptions(ExistingPPOpts, PPOpts, nullptr, FileMgr,
- SuggestedPredefines, ExistingLangOpts);
+ return checkPreprocessorOptions(
+ PPOpts, ExistingPPOpts, ReadMacros, /*Diags=*/nullptr, FileMgr,
+ SuggestedPredefines, ExistingLangOpts,
+ StrictOptionMatches ? OptionValidateStrictMatches
+ : OptionValidateContradictions);
}
};
@@ -5174,19 +5396,28 @@ namespace {
bool ASTReader::readASTFileControlBlock(
StringRef Filename, FileManager &FileMgr,
- const PCHContainerReader &PCHContainerRdr,
- bool FindModuleFileExtensions,
+ const InMemoryModuleCache &ModuleCache,
+ const PCHContainerReader &PCHContainerRdr, bool FindModuleFileExtensions,
ASTReaderListener &Listener, bool ValidateDiagnosticOptions) {
// Open the AST file.
- // FIXME: This allows use of the VFS; we do not allow use of the
- // VFS when actually loading a module.
- auto Buffer = FileMgr.getBufferForFile(Filename);
+ std::unique_ptr<llvm::MemoryBuffer> OwnedBuffer;
+ llvm::MemoryBuffer *Buffer = ModuleCache.lookupPCM(Filename);
if (!Buffer) {
- return true;
+ // FIXME: We should add the pcm to the InMemoryModuleCache if it could be
+ // read again later, but we do not have the context here to determine if it
+ // is safe to change the result of InMemoryModuleCache::getPCMState().
+
+ // FIXME: This allows use of the VFS; we do not allow use of the
+ // VFS when actually loading a module.
+ auto BufferOrErr = FileMgr.getBufferForFile(Filename);
+ if (!BufferOrErr)
+ return true;
+ OwnedBuffer = std::move(*BufferOrErr);
+ Buffer = OwnedBuffer.get();
}
// Initialize the stream
- StringRef Bytes = PCHContainerRdr.ExtractPCH(**Buffer);
+ StringRef Bytes = PCHContainerRdr.ExtractPCH(*Buffer);
BitstreamCursor Stream(Bytes);
// Sniff for the signature.
@@ -5203,6 +5434,7 @@ bool ASTReader::readASTFileControlBlock(
bool NeedsSystemInputFiles = Listener.needsSystemInputFileVisitation();
bool NeedsImports = Listener.needsImportVisitation();
BitstreamCursor InputFilesCursor;
+ uint64_t InputFilesOffsetBase = 0;
RecordData Record;
std::string ModuleDir;
@@ -5238,6 +5470,7 @@ bool ASTReader::readASTFileControlBlock(
if (NeedsInputFiles &&
ReadBlockAbbrevs(InputFilesCursor, INPUT_FILES_BLOCK_ID))
return true;
+ InputFilesOffsetBase = InputFilesCursor.GetCurrentBitNo();
break;
default:
@@ -5310,7 +5543,8 @@ bool ASTReader::readASTFileControlBlock(
BitstreamCursor &Cursor = InputFilesCursor;
SavedStreamPosition SavedPosition(Cursor);
- if (llvm::Error Err = Cursor.JumpToBit(InputFileOffs[I])) {
+ if (llvm::Error Err =
+ Cursor.JumpToBit(InputFilesOffsetBase + InputFileOffs[I])) {
// FIXME this drops errors on the floor.
consumeError(std::move(Err));
}
@@ -5355,9 +5589,24 @@ bool ASTReader::readASTFileControlBlock(
unsigned Idx = 0, N = Record.size();
while (Idx < N) {
// Read information about the AST file.
- Idx +=
- 1 + 1 + 1 + 1 +
- ASTFileSignature::size; // Kind, ImportLoc, Size, ModTime, Signature
+
+ // Skip Kind
+ Idx++;
+ bool IsStandardCXXModule = Record[Idx++];
+
+ // Skip ImportLoc
+ Idx++;
+
+ // In C++20 Modules, we don't record the path to imported
+ // modules in the BMI files.
+ if (IsStandardCXXModule) {
+ std::string ModuleName = ReadString(Record, Idx);
+ Listener.visitImport(ModuleName, /*Filename=*/"");
+ continue;
+ }
+
+ // Skip Size, ModTime and Signature
+ Idx += 1 + 1 + ASTFileSignature::size;
std::string ModuleName = ReadString(Record, Idx);
std::string Filename = ReadString(Record, Idx);
ResolveImportedPath(Filename, ModuleDir);
@@ -5439,26 +5688,27 @@ bool ASTReader::readASTFileControlBlock(
}
bool ASTReader::isAcceptableASTFile(StringRef Filename, FileManager &FileMgr,
+ const InMemoryModuleCache &ModuleCache,
const PCHContainerReader &PCHContainerRdr,
const LangOptions &LangOpts,
const TargetOptions &TargetOpts,
const PreprocessorOptions &PPOpts,
- StringRef ExistingModuleCachePath) {
+ StringRef ExistingModuleCachePath,
+ bool RequireStrictOptionMatches) {
SimplePCHValidator validator(LangOpts, TargetOpts, PPOpts,
- ExistingModuleCachePath, FileMgr);
- return !readASTFileControlBlock(Filename, FileMgr, PCHContainerRdr,
- /*FindModuleFileExtensions=*/false,
- validator,
+ ExistingModuleCachePath, FileMgr,
+ RequireStrictOptionMatches);
+ return !readASTFileControlBlock(Filename, FileMgr, ModuleCache,
+ PCHContainerRdr,
+ /*FindModuleFileExtensions=*/false, validator,
/*ValidateDiagnosticOptions=*/true);
}
-ASTReader::ASTReadResult
-ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
+llvm::Error ASTReader::ReadSubmoduleBlock(ModuleFile &F,
+ unsigned ClientLoadCapabilities) {
// Enter the submodule block.
- if (llvm::Error Err = F.Stream.EnterSubBlock(SUBMODULE_BLOCK_ID)) {
- Error(std::move(Err));
- return Failure;
- }
+ if (llvm::Error Err = F.Stream.EnterSubBlock(SUBMODULE_BLOCK_ID))
+ return Err;
ModuleMap &ModMap = PP.getHeaderSearchInfo().getModuleMap();
bool First = true;
@@ -5467,19 +5717,17 @@ ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
while (true) {
Expected<llvm::BitstreamEntry> MaybeEntry =
F.Stream.advanceSkippingSubblocks();
- if (!MaybeEntry) {
- Error(MaybeEntry.takeError());
- return Failure;
- }
+ if (!MaybeEntry)
+ return MaybeEntry.takeError();
llvm::BitstreamEntry Entry = MaybeEntry.get();
switch (Entry.Kind) {
case llvm::BitstreamEntry::SubBlock: // Handled for us already.
case llvm::BitstreamEntry::Error:
- Error("malformed block record in AST file");
- return Failure;
+ return llvm::createStringError(std::errc::illegal_byte_sequence,
+ "malformed block record in AST file");
case llvm::BitstreamEntry::EndBlock:
- return Success;
+ return llvm::Error::success();
case llvm::BitstreamEntry::Record:
// The interesting case.
break;
@@ -5489,16 +5737,14 @@ ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
StringRef Blob;
Record.clear();
Expected<unsigned> MaybeKind = F.Stream.readRecord(Entry.ID, Record, &Blob);
- if (!MaybeKind) {
- Error(MaybeKind.takeError());
- return Failure;
- }
+ if (!MaybeKind)
+ return MaybeKind.takeError();
unsigned Kind = MaybeKind.get();
- if ((Kind == SUBMODULE_METADATA) != First) {
- Error("submodule metadata record should be at beginning of block");
- return Failure;
- }
+ if ((Kind == SUBMODULE_METADATA) != First)
+ return llvm::createStringError(
+ std::errc::illegal_byte_sequence,
+ "submodule metadata record should be at beginning of block");
First = false;
// Submodule information is only valid if we have a current module.
@@ -5512,16 +5758,16 @@ ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
break;
case SUBMODULE_DEFINITION: {
- if (Record.size() < 12) {
- Error("malformed module definition");
- return Failure;
- }
+ if (Record.size() < 13)
+ return llvm::createStringError(std::errc::illegal_byte_sequence,
+ "malformed module definition");
StringRef Name = Blob;
unsigned Idx = 0;
SubmoduleID GlobalID = getGlobalSubmoduleID(F, Record[Idx++]);
SubmoduleID Parent = getGlobalSubmoduleID(F, Record[Idx++]);
Module::ModuleKind Kind = (Module::ModuleKind)Record[Idx++];
+ SourceLocation DefinitionLoc = ReadSourceLocation(F, Record[Idx++]);
bool IsFramework = Record[Idx++];
bool IsExplicit = Record[Idx++];
bool IsSystem = Record[Idx++];
@@ -5531,6 +5777,7 @@ ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
bool InferExportWildcard = Record[Idx++];
bool ConfigMacrosExhaustive = Record[Idx++];
bool ModuleMapIsPrivate = Record[Idx++];
+ bool NamedModuleHasInit = Record[Idx++];
Module *ParentModule = nullptr;
if (Parent)
@@ -5542,26 +5789,26 @@ ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
ModMap.findOrCreateModule(Name, ParentModule, IsFramework, IsExplicit)
.first;
- // FIXME: set the definition loc for CurrentModule, or call
- // ModMap.setInferredModuleAllowedBy()
+ // FIXME: Call ModMap.setInferredModuleAllowedBy()
SubmoduleID GlobalIndex = GlobalID - NUM_PREDEF_SUBMODULE_IDS;
if (GlobalIndex >= SubmodulesLoaded.size() ||
- SubmodulesLoaded[GlobalIndex]) {
- Error("too many submodules");
- return Failure;
- }
+ SubmodulesLoaded[GlobalIndex])
+ return llvm::createStringError(std::errc::invalid_argument,
+ "too many submodules");
if (!ParentModule) {
- if (const FileEntry *CurFile = CurrentModule->getASTFile()) {
+ if (OptionalFileEntryRef CurFile = CurrentModule->getASTFile()) {
// Don't emit module relocation error if we have -fno-validate-pch
if (!bool(PP.getPreprocessorOpts().DisablePCHOrModuleValidation &
DisableValidationForModuleKind::Module) &&
CurFile != F.File) {
- Error(diag::err_module_file_conflict,
- CurrentModule->getTopLevelModuleName(), CurFile->getName(),
- F.File->getName());
- return Failure;
+ auto ConflictError =
+ PartialDiagnostic(diag::err_module_file_conflict,
+ ContextObj->DiagAllocator)
+ << CurrentModule->getTopLevelModuleName() << CurFile->getName()
+ << F.File.getName();
+ return DiagnosticError::create(CurrentImportLoc, ConflictError);
}
}
@@ -5571,6 +5818,7 @@ ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
}
CurrentModule->Kind = Kind;
+ CurrentModule->DefinitionLoc = DefinitionLoc;
CurrentModule->Signature = F.Signature;
CurrentModule->IsFromModuleFile = true;
CurrentModule->IsSystem = IsSystem || CurrentModule->IsSystem;
@@ -5580,6 +5828,7 @@ ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
CurrentModule->InferExportWildcard = InferExportWildcard;
CurrentModule->ConfigMacrosExhaustive = ConfigMacrosExhaustive;
CurrentModule->ModuleMapIsPrivate = ModuleMapIsPrivate;
+ CurrentModule->NamedModuleHasInit = NamedModuleHasInit;
if (DeserializationListener)
DeserializationListener->ModuleRead(GlobalID, CurrentModule);
@@ -5605,17 +5854,20 @@ ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
}
case SUBMODULE_UMBRELLA_HEADER: {
+ // FIXME: This doesn't work for framework modules as `Filename` is the
+ // name as written in the module file and does not include
+ // `Headers/`, so this path will never exist.
std::string Filename = std::string(Blob);
ResolveImportedPath(F, Filename);
- if (auto Umbrella = PP.getFileManager().getFile(Filename)) {
- if (!CurrentModule->getUmbrellaHeader())
+ if (auto Umbrella = PP.getFileManager().getOptionalFileRef(Filename)) {
+ if (!CurrentModule->getUmbrellaHeaderAsWritten()) {
// FIXME: NameAsWritten
- ModMap.setUmbrellaHeader(CurrentModule, *Umbrella, Blob, "");
- else if (CurrentModule->getUmbrellaHeader().Entry != *Umbrella) {
- if ((ClientLoadCapabilities & ARR_OutOfDate) == 0)
- Error("mismatched umbrella headers in submodule");
- return OutOfDate;
+ ModMap.setUmbrellaHeaderAsWritten(CurrentModule, *Umbrella, Blob, "");
}
+ // Note that it's too late at this point to return out of date if the
+ // name from the PCM doesn't match up with the one in the module map,
+ // but also quite unlikely since we will have already checked the
+ // modification time and size of the module map file itself.
}
break;
}
@@ -5634,21 +5886,22 @@ ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
// them here.
break;
- case SUBMODULE_TOPHEADER:
- CurrentModule->addTopHeaderFilename(Blob);
+ case SUBMODULE_TOPHEADER: {
+ std::string HeaderName(Blob);
+ ResolveImportedPath(F, HeaderName);
+ CurrentModule->addTopHeaderFilename(HeaderName);
break;
+ }
case SUBMODULE_UMBRELLA_DIR: {
+ // See comments in SUBMODULE_UMBRELLA_HEADER
std::string Dirname = std::string(Blob);
ResolveImportedPath(F, Dirname);
- if (auto Umbrella = PP.getFileManager().getDirectory(Dirname)) {
- if (!CurrentModule->getUmbrellaDir())
+ if (auto Umbrella =
+ PP.getFileManager().getOptionalDirectoryRef(Dirname)) {
+ if (!CurrentModule->getUmbrellaDirAsWritten()) {
// FIXME: NameAsWritten
- ModMap.setUmbrellaDir(CurrentModule, *Umbrella, Blob, "");
- else if (CurrentModule->getUmbrellaDir().Entry != *Umbrella) {
- if ((ClientLoadCapabilities & ARR_OutOfDate) == 0)
- Error("mismatched umbrella directories in submodule");
- return OutOfDate;
+ ModMap.setUmbrellaDirAsWritten(CurrentModule, *Umbrella, Blob, "");
}
}
break;
@@ -5686,6 +5939,18 @@ ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
}
break;
+ case SUBMODULE_AFFECTING_MODULES:
+ for (unsigned Idx = 0; Idx != Record.size(); ++Idx) {
+ UnresolvedModuleRef Unresolved;
+ Unresolved.File = &F;
+ Unresolved.Mod = CurrentModule;
+ Unresolved.ID = Record[Idx];
+ Unresolved.Kind = UnresolvedModuleRef::Affecting;
+ Unresolved.IsWildcard = false;
+ UnresolvedModuleRefs.push_back(Unresolved);
+ }
+ break;
+
case SUBMODULE_EXPORTS:
for (unsigned Idx = 0; Idx + 1 < Record.size(); Idx += 2) {
UnresolvedModuleRef Unresolved;
@@ -5848,6 +6113,28 @@ bool ASTReader::ParseHeaderSearchOptions(const RecordData &Record,
unsigned Idx = 0;
HSOpts.Sysroot = ReadString(Record, Idx);
+ HSOpts.ResourceDir = ReadString(Record, Idx);
+ HSOpts.ModuleCachePath = ReadString(Record, Idx);
+ HSOpts.ModuleUserBuildPath = ReadString(Record, Idx);
+ HSOpts.DisableModuleHash = Record[Idx++];
+ HSOpts.ImplicitModuleMaps = Record[Idx++];
+ HSOpts.ModuleMapFileHomeIsCwd = Record[Idx++];
+ HSOpts.EnablePrebuiltImplicitModules = Record[Idx++];
+ HSOpts.UseBuiltinIncludes = Record[Idx++];
+ HSOpts.UseStandardSystemIncludes = Record[Idx++];
+ HSOpts.UseStandardCXXIncludes = Record[Idx++];
+ HSOpts.UseLibcxx = Record[Idx++];
+ std::string SpecificModuleCachePath = ReadString(Record, Idx);
+
+ return Listener.ReadHeaderSearchOptions(HSOpts, SpecificModuleCachePath,
+ Complain);
+}
+
+bool ASTReader::ParseHeaderSearchPaths(const RecordData &Record, bool Complain,
+ ASTReaderListener &Listener) {
+ HeaderSearchOptions HSOpts;
+ unsigned Idx = 0;
+
// Include entries.
for (unsigned N = Record[Idx++]; N; --N) {
std::string Path = ReadString(Record, Idx);
@@ -5866,21 +6153,13 @@ bool ASTReader::ParseHeaderSearchOptions(const RecordData &Record,
HSOpts.SystemHeaderPrefixes.emplace_back(std::move(Prefix), IsSystemHeader);
}
- HSOpts.ResourceDir = ReadString(Record, Idx);
- HSOpts.ModuleCachePath = ReadString(Record, Idx);
- HSOpts.ModuleUserBuildPath = ReadString(Record, Idx);
- HSOpts.DisableModuleHash = Record[Idx++];
- HSOpts.ImplicitModuleMaps = Record[Idx++];
- HSOpts.ModuleMapFileHomeIsCwd = Record[Idx++];
- HSOpts.EnablePrebuiltImplicitModules = Record[Idx++];
- HSOpts.UseBuiltinIncludes = Record[Idx++];
- HSOpts.UseStandardSystemIncludes = Record[Idx++];
- HSOpts.UseStandardCXXIncludes = Record[Idx++];
- HSOpts.UseLibcxx = Record[Idx++];
- std::string SpecificModuleCachePath = ReadString(Record, Idx);
+ // VFS overlay files.
+ for (unsigned N = Record[Idx++]; N; --N) {
+ std::string VFSOverlayFile = ReadString(Record, Idx);
+ HSOpts.VFSOverlayFiles.emplace_back(std::move(VFSOverlayFile));
+ }
- return Listener.ReadHeaderSearchOptions(HSOpts, SpecificModuleCachePath,
- Complain);
+ return Listener.ReadHeaderSearchPaths(HSOpts, Complain);
}
bool ASTReader::ParsePreprocessorOptions(const RecordData &Record,
@@ -5891,10 +6170,13 @@ bool ASTReader::ParsePreprocessorOptions(const RecordData &Record,
unsigned Idx = 0;
// Macro definitions/undefs
- for (unsigned N = Record[Idx++]; N; --N) {
- std::string Macro = ReadString(Record, Idx);
- bool IsUndef = Record[Idx++];
- PPOpts.Macros.push_back(std::make_pair(Macro, IsUndef));
+ bool ReadMacros = Record[Idx++];
+ if (ReadMacros) {
+ for (unsigned N = Record[Idx++]; N; --N) {
+ std::string Macro = ReadString(Record, Idx);
+ bool IsUndef = Record[Idx++];
+ PPOpts.Macros.push_back(std::make_pair(Macro, IsUndef));
+ }
}
// Includes
@@ -5913,7 +6195,7 @@ bool ASTReader::ParsePreprocessorOptions(const RecordData &Record,
PPOpts.ObjCXXARCStandardLibrary =
static_cast<ObjCXXARCStandardLibraryKind>(Record[Idx++]);
SuggestedPredefines.clear();
- return Listener.ReadPreprocessorOptions(PPOpts, Complain,
+ return Listener.ReadPreprocessorOptions(PPOpts, ReadMacros, Complain,
SuggestedPredefines);
}
@@ -6046,10 +6328,9 @@ PreprocessedEntity *ASTReader::ReadPreprocessedEntity(unsigned Index) {
case PPD_INCLUSION_DIRECTIVE: {
const char *FullFileNameStart = Blob.data() + Record[0];
StringRef FullFileName(FullFileNameStart, Blob.size() - Record[0]);
- const FileEntry *File = nullptr;
+ OptionalFileEntryRef File;
if (!FullFileName.empty())
- if (auto FE = PP.getFileManager().getFile(FullFileName))
- File = *FE;
+ File = PP.getFileManager().getOptionalFileRef(FullFileName);
// FIXME: Stable encoding
InclusionDirective::InclusionKind Kind
@@ -6186,8 +6467,8 @@ std::pair<unsigned, unsigned>
/// Optionally returns true or false if the preallocated preprocessed
/// entity with index \arg Index came from file \arg FID.
-Optional<bool> ASTReader::isPreprocessedEntityInFileID(unsigned Index,
- FileID FID) {
+std::optional<bool> ASTReader::isPreprocessedEntityInFileID(unsigned Index,
+ FileID FID) {
if (FID.isInvalid())
return false;
@@ -6210,11 +6491,11 @@ namespace {
/// Visitor used to search for information about a header file.
class HeaderFileInfoVisitor {
- const FileEntry *FE;
- Optional<HeaderFileInfo> HFI;
+ FileEntryRef FE;
+ std::optional<HeaderFileInfo> HFI;
public:
- explicit HeaderFileInfoVisitor(const FileEntry *FE) : FE(FE) {}
+ explicit HeaderFileInfoVisitor(FileEntryRef FE) : FE(FE) {}
bool operator()(ModuleFile &M) {
HeaderFileInfoLookupTable *Table
@@ -6231,16 +6512,16 @@ namespace {
return true;
}
- Optional<HeaderFileInfo> getHeaderFileInfo() const { return HFI; }
+ std::optional<HeaderFileInfo> getHeaderFileInfo() const { return HFI; }
};
} // namespace
-HeaderFileInfo ASTReader::GetHeaderFileInfo(const FileEntry *FE) {
+HeaderFileInfo ASTReader::GetHeaderFileInfo(FileEntryRef FE) {
HeaderFileInfoVisitor Visitor(FE);
ModuleMgr.visit(Visitor);
- if (Optional<HeaderFileInfo> HFI = Visitor.getHeaderFileInfo())
- return *HFI;
+ if (std::optional<HeaderFileInfo> HFI = Visitor.getHeaderFileInfo())
+ return *HFI;
return HeaderFileInfo();
}
@@ -6257,9 +6538,8 @@ void ASTReader::ReadPragmaDiagnosticMappings(DiagnosticsEngine &Diag) {
DiagStates.clear();
- auto ReadDiagState =
- [&](const DiagState &BasedOn, SourceLocation Loc,
- bool IncludeNonPragmaStates) -> DiagnosticsEngine::DiagState * {
+ auto ReadDiagState = [&](const DiagState &BasedOn,
+ bool IncludeNonPragmaStates) {
unsigned BackrefID = Record[Idx++];
if (BackrefID != 0)
return DiagStates[BackrefID - 1];
@@ -6320,7 +6600,7 @@ void ASTReader::ReadPragmaDiagnosticMappings(DiagnosticsEngine &Diag) {
Initial.EnableAllWarnings = Flags & 1; Flags >>= 1;
Initial.IgnoreAllWarnings = Flags & 1; Flags >>= 1;
Initial.ExtBehavior = (diag::Severity)Flags;
- FirstState = ReadDiagState(Initial, SourceLocation(), true);
+ FirstState = ReadDiagState(Initial, true);
assert(F.OriginalSourceFileID.isValid());
@@ -6333,8 +6613,7 @@ void ASTReader::ReadPragmaDiagnosticMappings(DiagnosticsEngine &Diag) {
// For prefix ASTs, start with whatever the user configured on the
// command line.
Idx++; // Skip flags.
- FirstState = ReadDiagState(*Diag.DiagStatesByLoc.CurDiagState,
- SourceLocation(), false);
+ FirstState = ReadDiagState(*Diag.DiagStatesByLoc.CurDiagState, false);
}
// Read the state transitions.
@@ -6356,8 +6635,7 @@ void ASTReader::ReadPragmaDiagnosticMappings(DiagnosticsEngine &Diag) {
F.StateTransitions.reserve(F.StateTransitions.size() + Transitions);
for (unsigned I = 0; I != Transitions; ++I) {
unsigned Offset = Record[Idx++];
- auto *State =
- ReadDiagState(*FirstState, Loc.getLocWithOffset(Offset), false);
+ auto *State = ReadDiagState(*FirstState, false);
F.StateTransitions.push_back({State, Offset});
}
}
@@ -6365,9 +6643,8 @@ void ASTReader::ReadPragmaDiagnosticMappings(DiagnosticsEngine &Diag) {
// Read the final state.
assert(Idx < Record.size() &&
"Invalid data, missing final pragma diagnostic state");
- SourceLocation CurStateLoc =
- ReadSourceLocation(F, F.PragmaDiagMappings[Idx++]);
- auto *CurState = ReadDiagState(*FirstState, CurStateLoc, false);
+ SourceLocation CurStateLoc = ReadSourceLocation(F, Record[Idx++]);
+ auto *CurState = ReadDiagState(*FirstState, false);
if (!F.isModule()) {
Diag.DiagStatesByLoc.CurDiagState = CurState;
@@ -6398,12 +6675,13 @@ ASTReader::RecordLocation ASTReader::TypeCursorForIndex(unsigned Index) {
M->DeclsBlockStartOffset);
}
-static llvm::Optional<Type::TypeClass> getTypeClassForCode(TypeCode code) {
+static std::optional<Type::TypeClass> getTypeClassForCode(TypeCode code) {
switch (code) {
#define TYPE_BIT_CODE(CLASS_ID, CODE_ID, CODE_VALUE) \
case TYPE_##CODE_ID: return Type::CLASS_ID;
#include "clang/Serialization/TypeBitCodes.def"
- default: return llvm::None;
+ default:
+ return std::nullopt;
}
}
@@ -6463,11 +6741,13 @@ QualType ASTReader::readTypeRecord(unsigned Index) {
namespace clang {
class TypeLocReader : public TypeLocVisitor<TypeLocReader> {
+ using LocSeq = SourceLocationSequence;
+
ASTRecordReader &Reader;
+ LocSeq *Seq;
- SourceLocation readSourceLocation() {
- return Reader.readSourceLocation();
- }
+ SourceLocation readSourceLocation() { return Reader.readSourceLocation(Seq); }
+ SourceRange readSourceRange() { return Reader.readSourceRange(Seq); }
TypeSourceInfo *GetTypeSourceInfo() {
return Reader.readTypeSourceInfo();
@@ -6482,7 +6762,8 @@ class TypeLocReader : public TypeLocVisitor<TypeLocReader> {
}
public:
- TypeLocReader(ASTRecordReader &Reader) : Reader(Reader) {}
+ TypeLocReader(ASTRecordReader &Reader, LocSeq *Seq)
+ : Reader(Reader), Seq(Seq) {}
// We want compile-time assurance that we've enumerated all of
// these, so unfortunately we have to declare them first, then
@@ -6579,7 +6860,7 @@ void TypeLocReader::VisitDependentAddressSpaceTypeLoc(
DependentAddressSpaceTypeLoc TL) {
TL.setAttrNameLoc(readSourceLocation());
- TL.setAttrOperandParensRange(Reader.readSourceRange());
+ TL.setAttrOperandParensRange(readSourceRange());
TL.setAttrExprOperand(Reader.readExpr());
}
@@ -6603,7 +6884,7 @@ void TypeLocReader::VisitExtVectorTypeLoc(ExtVectorTypeLoc TL) {
void TypeLocReader::VisitConstantMatrixTypeLoc(ConstantMatrixTypeLoc TL) {
TL.setAttrNameLoc(readSourceLocation());
- TL.setAttrOperandParensRange(Reader.readSourceRange());
+ TL.setAttrOperandParensRange(readSourceRange());
TL.setAttrRowOperand(Reader.readExpr());
TL.setAttrColumnOperand(Reader.readExpr());
}
@@ -6611,7 +6892,7 @@ void TypeLocReader::VisitConstantMatrixTypeLoc(ConstantMatrixTypeLoc TL) {
void TypeLocReader::VisitDependentSizedMatrixTypeLoc(
DependentSizedMatrixTypeLoc TL) {
TL.setAttrNameLoc(readSourceLocation());
- TL.setAttrOperandParensRange(Reader.readSourceRange());
+ TL.setAttrOperandParensRange(readSourceRange());
TL.setAttrRowOperand(Reader.readExpr());
TL.setAttrColumnOperand(Reader.readExpr());
}
@@ -6620,7 +6901,7 @@ void TypeLocReader::VisitFunctionTypeLoc(FunctionTypeLoc TL) {
TL.setLocalRangeBegin(readSourceLocation());
TL.setLParenLoc(readSourceLocation());
TL.setRParenLoc(readSourceLocation());
- TL.setExceptionSpecRange(Reader.readSourceRange());
+ TL.setExceptionSpecRange(readSourceRange());
TL.setLocalRangeEnd(readSourceLocation());
for (unsigned i = 0, e = TL.getNumParams(); i != e; ++i) {
TL.setParam(i, Reader.readDeclAs<ParmVarDecl>());
@@ -6639,6 +6920,10 @@ void TypeLocReader::VisitUnresolvedUsingTypeLoc(UnresolvedUsingTypeLoc TL) {
TL.setNameLoc(readSourceLocation());
}
+void TypeLocReader::VisitUsingTypeLoc(UsingTypeLoc TL) {
+ TL.setNameLoc(readSourceLocation());
+}
+
void TypeLocReader::VisitTypedefTypeLoc(TypedefTypeLoc TL) {
TL.setNameLoc(readSourceLocation());
}
@@ -6653,11 +6938,12 @@ void TypeLocReader::VisitTypeOfTypeLoc(TypeOfTypeLoc TL) {
TL.setTypeofLoc(readSourceLocation());
TL.setLParenLoc(readSourceLocation());
TL.setRParenLoc(readSourceLocation());
- TL.setUnderlyingTInfo(GetTypeSourceInfo());
+ TL.setUnmodifiedTInfo(GetTypeSourceInfo());
}
void TypeLocReader::VisitDecltypeTypeLoc(DecltypeTypeLoc TL) {
- TL.setNameLoc(readSourceLocation());
+ TL.setDecltypeLoc(readSourceLocation());
+ TL.setRParenLoc(readSourceLocation());
}
void TypeLocReader::VisitUnaryTransformTypeLoc(UnaryTransformTypeLoc TL) {
@@ -6667,19 +6953,24 @@ void TypeLocReader::VisitUnaryTransformTypeLoc(UnaryTransformTypeLoc TL) {
TL.setUnderlyingTInfo(GetTypeSourceInfo());
}
+ConceptReference *ASTRecordReader::readConceptReference() {
+ auto NNS = readNestedNameSpecifierLoc();
+ auto TemplateKWLoc = readSourceLocation();
+ auto ConceptNameLoc = readDeclarationNameInfo();
+ auto FoundDecl = readDeclAs<NamedDecl>();
+ auto NamedConcept = readDeclAs<ConceptDecl>();
+ auto *CR = ConceptReference::Create(
+ getContext(), NNS, TemplateKWLoc, ConceptNameLoc, FoundDecl, NamedConcept,
+ (readBool() ? readASTTemplateArgumentListInfo() : nullptr));
+ return CR;
+}
+
void TypeLocReader::VisitAutoTypeLoc(AutoTypeLoc TL) {
TL.setNameLoc(readSourceLocation());
- if (Reader.readBool()) {
- TL.setNestedNameSpecifierLoc(ReadNestedNameSpecifierLoc());
- TL.setTemplateKWLoc(readSourceLocation());
- TL.setConceptNameLoc(readSourceLocation());
- TL.setFoundDecl(Reader.readDeclAs<NamedDecl>());
- TL.setLAngleLoc(readSourceLocation());
- TL.setRAngleLoc(readSourceLocation());
- for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i)
- TL.setArgLocInfo(i, Reader.readTemplateArgumentLocInfo(
- TL.getTypePtr()->getArg(i).getKind()));
- }
+ if (Reader.readBool())
+ TL.setConceptReference(Reader.readConceptReference());
+ if (Reader.readBool())
+ TL.setRParenLoc(readSourceLocation());
}
void TypeLocReader::VisitDeducedTemplateSpecializationTypeLoc(
@@ -6699,6 +6990,10 @@ void TypeLocReader::VisitAttributedTypeLoc(AttributedTypeLoc TL) {
TL.setAttr(ReadAttr());
}
+void TypeLocReader::VisitBTFTagAttributedTypeLoc(BTFTagAttributedTypeLoc TL) {
+ // Nothing to do.
+}
+
void TypeLocReader::VisitTemplateTypeParmTypeLoc(TemplateTypeParmTypeLoc TL) {
TL.setNameLoc(readSourceLocation());
}
@@ -6720,10 +7015,9 @@ void TypeLocReader::VisitTemplateSpecializationTypeLoc(
TL.setLAngleLoc(readSourceLocation());
TL.setRAngleLoc(readSourceLocation());
for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i)
- TL.setArgLocInfo(
- i,
- Reader.readTemplateArgumentLocInfo(
- TL.getTypePtr()->getArg(i).getKind()));
+ TL.setArgLocInfo(i,
+ Reader.readTemplateArgumentLocInfo(
+ TL.getTypePtr()->template_arguments()[i].getKind()));
}
void TypeLocReader::VisitParenTypeLoc(ParenTypeLoc TL) {
@@ -6755,10 +7049,9 @@ void TypeLocReader::VisitDependentTemplateSpecializationTypeLoc(
TL.setLAngleLoc(readSourceLocation());
TL.setRAngleLoc(readSourceLocation());
for (unsigned I = 0, E = TL.getNumArgs(); I != E; ++I)
- TL.setArgLocInfo(
- I,
- Reader.readTemplateArgumentLocInfo(
- TL.getTypePtr()->getArg(I).getKind()));
+ TL.setArgLocInfo(I,
+ Reader.readTemplateArgumentLocInfo(
+ TL.getTypePtr()->template_arguments()[I].getKind()));
}
void TypeLocReader::VisitPackExpansionTypeLoc(PackExpansionTypeLoc TL) {
@@ -6767,6 +7060,7 @@ void TypeLocReader::VisitPackExpansionTypeLoc(PackExpansionTypeLoc TL) {
void TypeLocReader::VisitObjCInterfaceTypeLoc(ObjCInterfaceTypeLoc TL) {
TL.setNameLoc(readSourceLocation());
+ TL.setNameEndLoc(readSourceLocation());
}
void TypeLocReader::VisitObjCTypeParamTypeLoc(ObjCTypeParamTypeLoc TL) {
@@ -6804,17 +7098,17 @@ void TypeLocReader::VisitPipeTypeLoc(PipeTypeLoc TL) {
TL.setKWLoc(readSourceLocation());
}
-void TypeLocReader::VisitExtIntTypeLoc(clang::ExtIntTypeLoc TL) {
+void TypeLocReader::VisitBitIntTypeLoc(clang::BitIntTypeLoc TL) {
TL.setNameLoc(readSourceLocation());
}
-void TypeLocReader::VisitDependentExtIntTypeLoc(
- clang::DependentExtIntTypeLoc TL) {
+void TypeLocReader::VisitDependentBitIntTypeLoc(
+ clang::DependentBitIntTypeLoc TL) {
TL.setNameLoc(readSourceLocation());
}
-
-void ASTRecordReader::readTypeLoc(TypeLoc TL) {
- TypeLocReader TLR(*this);
+void ASTRecordReader::readTypeLoc(TypeLoc TL, LocSeq *ParentSeq) {
+ LocSeq::State Seq(ParentSeq);
+ TypeLocReader TLR(*this, Seq);
for (; !TL.isNull(); TL = TL.getNextTypeLoc())
TLR.Visit(TL);
}
@@ -6839,6 +7133,10 @@ QualType ASTReader::GetType(TypeID ID) {
if (Index < NUM_PREDEF_TYPE_IDS) {
QualType T;
switch ((PredefinedTypeIDs)Index) {
+ case PREDEF_TYPE_LAST_ID:
+ // We should never use this one.
+ llvm_unreachable("Invalid predefined type");
+ break;
case PREDEF_TYPE_NULL_ID:
return QualType();
case PREDEF_TYPE_VOID_ID:
@@ -6984,6 +7282,9 @@ QualType ASTReader::GetType(TypeID ID) {
case PREDEF_TYPE_FLOAT128_ID:
T = Context.Float128Ty;
break;
+ case PREDEF_TYPE_IBM128_ID:
+ T = Context.Ibm128Ty;
+ break;
case PREDEF_TYPE_OVERLOAD_ID:
T = Context.OverloadTy;
break;
@@ -7084,6 +7385,11 @@ QualType ASTReader::GetType(TypeID ID) {
T = Context.SingletonId; \
break;
#include "clang/Basic/RISCVVTypes.def"
+#define WASM_TYPE(Name, Id, SingletonId) \
+ case PREDEF_TYPE_##Id##_ID: \
+ T = Context.SingletonId; \
+ break;
+#include "clang/Basic/WebAssemblyReferenceTypes.def"
}
assert(!T.isNull() && "Unknown predefined type");
@@ -7154,6 +7460,7 @@ ASTRecordReader::readTemplateArgumentLocInfo(TemplateArgument::ArgKind Kind) {
case TemplateArgument::Integral:
case TemplateArgument::Declaration:
case TemplateArgument::NullPtr:
+ case TemplateArgument::StructuralValue:
case TemplateArgument::Pack:
// FIXME: Is this right?
return TemplateArgumentLocInfo();
@@ -7171,15 +7478,20 @@ TemplateArgumentLoc ASTRecordReader::readTemplateArgumentLoc() {
return TemplateArgumentLoc(Arg, readTemplateArgumentLocInfo(Arg.getKind()));
}
-const ASTTemplateArgumentListInfo *
-ASTRecordReader::readASTTemplateArgumentListInfo() {
- SourceLocation LAngleLoc = readSourceLocation();
- SourceLocation RAngleLoc = readSourceLocation();
+void ASTRecordReader::readTemplateArgumentListInfo(
+ TemplateArgumentListInfo &Result) {
+ Result.setLAngleLoc(readSourceLocation());
+ Result.setRAngleLoc(readSourceLocation());
unsigned NumArgsAsWritten = readInt();
- TemplateArgumentListInfo TemplArgsInfo(LAngleLoc, RAngleLoc);
for (unsigned i = 0; i != NumArgsAsWritten; ++i)
- TemplArgsInfo.addArgument(readTemplateArgumentLoc());
- return ASTTemplateArgumentListInfo::Create(getContext(), TemplArgsInfo);
+ Result.addArgument(readTemplateArgumentLoc());
+}
+
+const ASTTemplateArgumentListInfo *
+ASTRecordReader::readASTTemplateArgumentListInfo() {
+ TemplateArgumentListInfo Result;
+ readTemplateArgumentListInfo(Result);
+ return ASTTemplateArgumentListInfo::Create(getContext(), Result);
}
Decl *ASTReader::GetExternalDecl(uint32_t ID) {
@@ -7208,8 +7520,7 @@ void ASTReader::CompleteRedeclChain(const Decl *D) {
//
// FIXME: Merging a function definition should merge
// all mergeable entities within it.
- if (isa<TranslationUnitDecl>(DC) || isa<NamespaceDecl>(DC) ||
- isa<CXXRecordDecl>(DC) || isa<EnumDecl>(DC)) {
+ if (isa<TranslationUnitDecl, NamespaceDecl, RecordDecl, EnumDecl>(DC)) {
if (DeclarationName Name = cast<NamedDecl>(D)->getDeclName()) {
if (!getContext().getLangOpts().CPlusPlus &&
isa<TranslationUnitDecl>(DC)) {
@@ -7253,6 +7564,7 @@ ASTReader::GetExternalCXXCtorInitializers(uint64_t Offset) {
return nullptr;
}
ReadingKindTracker ReadingKind(Read_Decl, *this);
+ Deserializing D(this);
Expected<unsigned> MaybeCode = Cursor.ReadCode();
if (!MaybeCode) {
@@ -7287,6 +7599,7 @@ CXXBaseSpecifier *ASTReader::GetExternalCXXBaseSpecifiers(uint64_t Offset) {
return nullptr;
}
ReadingKindTracker ReadingKind(Read_Decl, *this);
+ Deserializing D(this);
Expected<unsigned> MaybeCode = Cursor.ReadCode();
if (!MaybeCode) {
@@ -7554,7 +7867,7 @@ void ASTReader::FindExternalLexicalDecls(
};
if (isa<TranslationUnitDecl>(DC)) {
- for (auto Lexical : TULexicalDecls)
+ for (const auto &Lexical : TULexicalDecls)
Visit(Lexical.first, Lexical.second);
} else {
auto I = LexicalDecls.find(DC);
@@ -7730,24 +8043,18 @@ void ASTReader::StartTranslationUnit(ASTConsumer *Consumer) {
void ASTReader::PrintStats() {
std::fprintf(stderr, "*** AST File Statistics:\n");
- unsigned NumTypesLoaded
- = TypesLoaded.size() - std::count(TypesLoaded.begin(), TypesLoaded.end(),
- QualType());
- unsigned NumDeclsLoaded
- = DeclsLoaded.size() - std::count(DeclsLoaded.begin(), DeclsLoaded.end(),
- (Decl *)nullptr);
- unsigned NumIdentifiersLoaded
- = IdentifiersLoaded.size() - std::count(IdentifiersLoaded.begin(),
- IdentifiersLoaded.end(),
- (IdentifierInfo *)nullptr);
- unsigned NumMacrosLoaded
- = MacrosLoaded.size() - std::count(MacrosLoaded.begin(),
- MacrosLoaded.end(),
- (MacroInfo *)nullptr);
- unsigned NumSelectorsLoaded
- = SelectorsLoaded.size() - std::count(SelectorsLoaded.begin(),
- SelectorsLoaded.end(),
- Selector());
+ unsigned NumTypesLoaded =
+ TypesLoaded.size() - llvm::count(TypesLoaded.materialized(), QualType());
+ unsigned NumDeclsLoaded =
+ DeclsLoaded.size() -
+ llvm::count(DeclsLoaded.materialized(), (Decl *)nullptr);
+ unsigned NumIdentifiersLoaded =
+ IdentifiersLoaded.size() -
+ llvm::count(IdentifiersLoaded, (IdentifierInfo *)nullptr);
+ unsigned NumMacrosLoaded =
+ MacrosLoaded.size() - llvm::count(MacrosLoaded, (MacroInfo *)nullptr);
+ unsigned NumSelectorsLoaded =
+ SelectorsLoaded.size() - llvm::count(SelectorsLoaded, Selector());
if (unsigned TotalNumSLocEntries = getTotalNumSLocs())
std::fprintf(stderr, " %u/%u source location entries read (%f%%)\n",
@@ -7950,8 +8257,8 @@ void ASTReader::UpdateSema() {
PragmaAlignPackStack.front().PushLocation);
DropFirst = true;
}
- for (const auto &Entry : llvm::makeArrayRef(PragmaAlignPackStack)
- .drop_front(DropFirst ? 1 : 0)) {
+ for (const auto &Entry :
+ llvm::ArrayRef(PragmaAlignPackStack).drop_front(DropFirst ? 1 : 0)) {
SemaObj->AlignPackStack.Stack.emplace_back(
Entry.SlotLabel, Entry.Value, Entry.Location, Entry.PushLocation);
}
@@ -7982,7 +8289,7 @@ void ASTReader::UpdateSema() {
DropFirst = true;
}
for (const auto &Entry :
- llvm::makeArrayRef(FpPragmaStack).drop_front(DropFirst ? 1 : 0))
+ llvm::ArrayRef(FpPragmaStack).drop_front(DropFirst ? 1 : 0))
SemaObj->FpPragmaStack.Stack.emplace_back(
Entry.SlotLabel, Entry.Value, Entry.Location, Entry.PushLocation);
if (FpPragmaCurrentLocation.isInvalid()) {
@@ -7996,13 +8303,14 @@ void ASTReader::UpdateSema() {
}
// For non-modular AST files, restore visiblity of modules.
- for (auto &Import : ImportedModules) {
+ for (auto &Import : PendingImportedModulesSema) {
if (Import.ImportLoc.isInvalid())
continue;
if (Module *Imported = getSubmodule(Import.ID)) {
SemaObj->makeModuleVisible(Imported, Import.ImportLoc);
}
}
+ PendingImportedModulesSema.clear();
}
IdentifierInfo *ASTReader::get(StringRef Name) {
@@ -8018,7 +8326,7 @@ IdentifierInfo *ASTReader::get(StringRef Name) {
// lookups). Perform the lookup in PCH files, though, since we don't build
// a complete initial identifier table if we're carrying on from a PCH.
if (PP.getLangOpts().CPlusPlus) {
- for (auto F : ModuleMgr.pch_modules())
+ for (auto *F : ModuleMgr.pch_modules())
if (Visitor(*F))
break;
} else {
@@ -8187,13 +8495,16 @@ namespace serialization {
if (Reader.DeserializationListener)
Reader.DeserializationListener->SelectorRead(Data.ID, Sel);
- InstanceMethods.append(Data.Instance.begin(), Data.Instance.end());
- FactoryMethods.append(Data.Factory.begin(), Data.Factory.end());
+ // Append methods in the reverse order, so that later we can process them
+ // in the order they appear in the source code by iterating through
+ // the vector in the reverse order.
+ InstanceMethods.append(Data.Instance.rbegin(), Data.Instance.rend());
+ FactoryMethods.append(Data.Factory.rbegin(), Data.Factory.rend());
InstanceBits = Data.InstanceBits;
FactoryBits = Data.FactoryBits;
InstanceHasMoreThanOneDecl = Data.InstanceHasMoreThanOneDecl;
FactoryHasMoreThanOneDecl = Data.FactoryHasMoreThanOneDecl;
- return true;
+ return false;
}
/// Retrieve the instance methods found by this visitor.
@@ -8222,9 +8533,8 @@ namespace serialization {
/// Add the given set of methods to the method list.
static void addMethodsToPool(Sema &S, ArrayRef<ObjCMethodDecl *> Methods,
ObjCMethodList &List) {
- for (unsigned I = 0, N = Methods.size(); I != N; ++I) {
- S.addMethodToGlobalList(&List, Methods[I]);
- }
+ for (ObjCMethodDecl *M : llvm::reverse(Methods))
+ S.addMethodToGlobalList(&List, M);
}
void ASTReader::ReadMethodPool(Selector Sel) {
@@ -8249,8 +8559,9 @@ void ASTReader::ReadMethodPool(Selector Sel) {
return;
Sema &S = *getSema();
- Sema::GlobalMethodPool::iterator Pos
- = S.MethodPool.insert(std::make_pair(Sel, Sema::GlobalMethods())).first;
+ Sema::GlobalMethodPool::iterator Pos =
+ S.MethodPool.insert(std::make_pair(Sel, Sema::GlobalMethodPool::Lists()))
+ .first;
Pos->second.first.setBits(Visitor.getInstanceBits());
Pos->second.first.setHasMoreThanOneDecl(Visitor.instanceHasMoreThanOneDecl());
@@ -8397,11 +8708,9 @@ void ASTReader::ReadWeakUndeclaredIdentifiers(
= DecodeIdentifierInfo(WeakUndeclaredIdentifiers[I++]);
IdentifierInfo *AliasId
= DecodeIdentifierInfo(WeakUndeclaredIdentifiers[I++]);
- SourceLocation Loc
- = SourceLocation::getFromRawEncoding(WeakUndeclaredIdentifiers[I++]);
- bool Used = WeakUndeclaredIdentifiers[I++];
+ SourceLocation Loc =
+ SourceLocation::getFromRawEncoding(WeakUndeclaredIdentifiers[I++]);
WeakInfo WI(AliasId, Loc);
- WI.setUsed(Used);
WeakIDs.push_back(std::make_pair(WeakId, WI));
}
WeakUndeclaredIdentifiers.clear();
@@ -8444,6 +8753,7 @@ void ASTReader::ReadLateParsedTemplates(
auto LT = std::make_unique<LateParsedTemplate>();
LT->D = GetLocalDecl(*FMod, LateParsed[Idx++]);
+ LT->FPO = FPOptions::getFromOpaqueInt(LateParsed[Idx++]);
ModuleFile *F = getOwningModuleFile(LT->D);
assert(F && "No module");
@@ -8460,6 +8770,17 @@ void ASTReader::ReadLateParsedTemplates(
LateParsedTemplates.clear();
}
+void ASTReader::AssignedLambdaNumbering(const CXXRecordDecl *Lambda) {
+ if (Lambda->getLambdaContextDecl()) {
+ // Keep track of this lambda so it can be merged with another lambda that
+ // is loaded later.
+ LambdaDeclarationsForMerging.insert(
+ {{Lambda->getLambdaContextDecl()->getCanonicalDecl(),
+ Lambda->getLambdaIndexInContext()},
+ const_cast<CXXRecordDecl *>(Lambda)});
+ }
+}
+
void ASTReader::LoadSelector(Selector Sel) {
// It would be complicated to avoid reading the methods anyway. So don't.
ReadMethodPool(Sel);
@@ -8647,10 +8968,10 @@ Module *ASTReader::getModule(unsigned ID) {
return getSubmodule(ID);
}
-ModuleFile *ASTReader::getLocalModuleFile(ModuleFile &F, unsigned ID) {
+ModuleFile *ASTReader::getLocalModuleFile(ModuleFile &M, unsigned ID) {
if (ID & 1) {
// It's a module, look it up by submodule ID.
- auto I = GlobalSubmoduleMap.find(getGlobalSubmoduleID(F, ID >> 1));
+ auto I = GlobalSubmoduleMap.find(getGlobalSubmoduleID(M, ID >> 1));
return I == GlobalSubmoduleMap.end() ? nullptr : I->second;
} else {
// It's a prefix (preamble, PCH, ...). Look it up by index.
@@ -8660,25 +8981,24 @@ ModuleFile *ASTReader::getLocalModuleFile(ModuleFile &F, unsigned ID) {
}
}
-unsigned ASTReader::getModuleFileID(ModuleFile *F) {
- if (!F)
+unsigned ASTReader::getModuleFileID(ModuleFile *M) {
+ if (!M)
return 1;
// For a file representing a module, use the submodule ID of the top-level
// module as the file ID. For any other kind of file, the number of such
// files loaded beforehand will be the same on reload.
// FIXME: Is this true even if we have an explicit module file and a PCH?
- if (F->isModule())
- return ((F->BaseSubmoduleID + NUM_PREDEF_SUBMODULE_IDS) << 1) | 1;
+ if (M->isModule())
+ return ((M->BaseSubmoduleID + NUM_PREDEF_SUBMODULE_IDS) << 1) | 1;
auto PCHModules = getModuleManager().pch_modules();
- auto I = llvm::find(PCHModules, F);
+ auto I = llvm::find(PCHModules, M);
assert(I != PCHModules.end() && "emitting reference to unknown file");
return (I - PCHModules.end()) << 1;
}
-llvm::Optional<ASTSourceDescriptor>
-ASTReader::getSourceDescriptor(unsigned ID) {
+std::optional<ASTSourceDescriptor> ASTReader::getSourceDescriptor(unsigned ID) {
if (Module *M = getSubmodule(ID))
return ASTSourceDescriptor(*M);
@@ -8689,10 +9009,11 @@ ASTReader::getSourceDescriptor(unsigned ID) {
ModuleFile &MF = ModuleMgr.getPrimaryModule();
StringRef ModuleName = llvm::sys::path::filename(MF.OriginalSourceFileName);
StringRef FileName = llvm::sys::path::filename(MF.FileName);
- return ASTSourceDescriptor(ModuleName, MF.OriginalDir, FileName,
- MF.Signature);
+ return ASTSourceDescriptor(ModuleName,
+ llvm::sys::path::parent_path(MF.FileName),
+ FileName, MF.Signature);
}
- return None;
+ return std::nullopt;
}
ExternalASTSource::ExtKind ASTReader::hasExternalDefinitions(const Decl *FD) {
@@ -8984,11 +9305,10 @@ ASTRecordReader::readNestedNameSpecifierLoc() {
return Builder.getWithLocInContext(Context);
}
-SourceRange
-ASTReader::ReadSourceRange(ModuleFile &F, const RecordData &Record,
- unsigned &Idx) {
- SourceLocation beg = ReadSourceLocation(F, Record, Idx);
- SourceLocation end = ReadSourceLocation(F, Record, Idx);
+SourceRange ASTReader::ReadSourceRange(ModuleFile &F, const RecordData &Record,
+ unsigned &Idx, LocSeq *Seq) {
+ SourceLocation beg = ReadSourceLocation(F, Record, Idx, Seq);
+ SourceLocation end = ReadSourceLocation(F, Record, Idx, Seq);
return SourceRange(beg, end);
}
@@ -8998,7 +9318,7 @@ llvm::APFloat ASTRecordReader::readAPFloat(const llvm::fltSemantics &Sem) {
}
// Read a string
-std::string ASTReader::ReadString(const RecordData &Record, unsigned &Idx) {
+std::string ASTReader::ReadString(const RecordDataImpl &Record, unsigned &Idx) {
unsigned Len = Record[Idx++];
std::string Result(Record.data() + Idx, Record.data() + Idx + Len);
Idx += Len;
@@ -9143,6 +9463,22 @@ void ASTReader::ReadComments() {
}
}
+void ASTReader::visitInputFileInfos(
+ serialization::ModuleFile &MF, bool IncludeSystem,
+ llvm::function_ref<void(const serialization::InputFileInfo &IFI,
+ bool IsSystem)>
+ Visitor) {
+ unsigned NumUserInputs = MF.NumUserInputFiles;
+ unsigned NumInputs = MF.InputFilesLoaded.size();
+ assert(NumUserInputs <= NumInputs);
+ unsigned N = IncludeSystem ? NumInputs : NumUserInputs;
+ for (unsigned I = 0; I < N; ++I) {
+ bool IsSystem = I >= NumUserInputs;
+ InputFileInfo IFI = getInputFileInfo(MF, I+1);
+ Visitor(IFI, IsSystem);
+ }
+}
+
void ASTReader::visitInputFiles(serialization::ModuleFile &MF,
bool IncludeSystem, bool Complain,
llvm::function_ref<void(const serialization::InputFile &IF,
@@ -9160,35 +9496,23 @@ void ASTReader::visitInputFiles(serialization::ModuleFile &MF,
void ASTReader::visitTopLevelModuleMaps(
serialization::ModuleFile &MF,
- llvm::function_ref<void(const FileEntry *FE)> Visitor) {
+ llvm::function_ref<void(FileEntryRef FE)> Visitor) {
unsigned NumInputs = MF.InputFilesLoaded.size();
for (unsigned I = 0; I < NumInputs; ++I) {
- InputFileInfo IFI = readInputFileInfo(MF, I + 1);
- if (IFI.TopLevelModuleMap)
- // FIXME: This unnecessarily re-reads the InputFileInfo.
+ InputFileInfo IFI = getInputFileInfo(MF, I + 1);
+ if (IFI.TopLevel && IFI.ModuleMap)
if (auto FE = getInputFile(MF, I + 1).getFile())
- Visitor(FE);
+ Visitor(*FE);
}
}
-std::string ASTReader::getOwningModuleNameForDiagnostic(const Decl *D) {
- // If we know the owning module, use it.
- if (Module *M = D->getImportedOwningModule())
- return M->getFullModuleName();
-
- // Otherwise, use the name of the top-level module the decl is within.
- if (ModuleFile *M = getOwningModuleFile(D))
- return M->ModuleName;
-
- // Not from a module.
- return {};
-}
-
void ASTReader::finishPendingActions() {
- while (!PendingIdentifierInfos.empty() || !PendingFunctionTypes.empty() ||
- !PendingIncompleteDeclChains.empty() || !PendingDeclChains.empty() ||
- !PendingMacroIDs.empty() || !PendingDeclContextInfos.empty() ||
- !PendingUpdateRecords.empty()) {
+ while (
+ !PendingIdentifierInfos.empty() || !PendingDeducedFunctionTypes.empty() ||
+ !PendingDeducedVarTypes.empty() || !PendingIncompleteDeclChains.empty() ||
+ !PendingDeclChains.empty() || !PendingMacroIDs.empty() ||
+ !PendingDeclContextInfos.empty() || !PendingUpdateRecords.empty() ||
+ !PendingObjCExtensionIvarRedeclarations.empty()) {
// If any identifiers with corresponding top-level declarations have
// been loaded, load those declarations now.
using TopLevelDeclsMap =
@@ -9206,18 +9530,35 @@ void ASTReader::finishPendingActions() {
// Load each function type that we deferred loading because it was a
// deduced type that might refer to a local type declared within itself.
- for (unsigned I = 0; I != PendingFunctionTypes.size(); ++I) {
- auto *FD = PendingFunctionTypes[I].first;
- FD->setType(GetType(PendingFunctionTypes[I].second));
+ for (unsigned I = 0; I != PendingDeducedFunctionTypes.size(); ++I) {
+ auto *FD = PendingDeducedFunctionTypes[I].first;
+ FD->setType(GetType(PendingDeducedFunctionTypes[I].second));
+
+ if (auto *DT = FD->getReturnType()->getContainedDeducedType()) {
+ // If we gave a function a deduced return type, remember that we need to
+ // propagate that along the redeclaration chain.
+ if (DT->isDeduced()) {
+ PendingDeducedTypeUpdates.insert(
+ {FD->getCanonicalDecl(), FD->getReturnType()});
+ continue;
+ }
- // If we gave a function a deduced return type, remember that we need to
- // propagate that along the redeclaration chain.
- auto *DT = FD->getReturnType()->getContainedDeducedType();
- if (DT && DT->isDeduced())
- PendingDeducedTypeUpdates.insert(
- {FD->getCanonicalDecl(), FD->getReturnType()});
+ // The function has undeduced DeduceType return type. We hope we can
+ // find the deduced type by iterating the redecls in other modules
+ // later.
+ PendingUndeducedFunctionDecls.push_back(FD);
+ continue;
+ }
}
- PendingFunctionTypes.clear();
+ PendingDeducedFunctionTypes.clear();
+
+ // Load each variable type that we deferred loading because it was a
+ // deduced type that might refer to a local type declared within itself.
+ for (unsigned I = 0; I != PendingDeducedVarTypes.size(); ++I) {
+ auto *VD = PendingDeducedVarTypes[I].first;
+ VD->setType(GetType(PendingDeducedVarTypes[I].second));
+ }
+ PendingDeducedVarTypes.clear();
// For each decl chain that we wanted to complete while deserializing, mark
// it as "still needs to be completed".
@@ -9279,6 +9620,43 @@ void ASTReader::finishPendingActions() {
ReadingKindTracker ReadingKind(Read_Decl, *this);
loadDeclUpdateRecords(Update);
}
+
+ while (!PendingObjCExtensionIvarRedeclarations.empty()) {
+ auto ExtensionsPair = PendingObjCExtensionIvarRedeclarations.back().first;
+ auto DuplicateIvars =
+ PendingObjCExtensionIvarRedeclarations.back().second;
+ llvm::DenseSet<std::pair<Decl *, Decl *>> NonEquivalentDecls;
+ StructuralEquivalenceContext Ctx(
+ ExtensionsPair.first->getASTContext(),
+ ExtensionsPair.second->getASTContext(), NonEquivalentDecls,
+ StructuralEquivalenceKind::Default, /*StrictTypeSpelling =*/false,
+ /*Complain =*/false,
+ /*ErrorOnTagTypeMismatch =*/true);
+ if (Ctx.IsEquivalent(ExtensionsPair.first, ExtensionsPair.second)) {
+ // Merge redeclared ivars with their predecessors.
+ for (auto IvarPair : DuplicateIvars) {
+ ObjCIvarDecl *Ivar = IvarPair.first, *PrevIvar = IvarPair.second;
+ // Change semantic DeclContext but keep the lexical one.
+ Ivar->setDeclContextsImpl(PrevIvar->getDeclContext(),
+ Ivar->getLexicalDeclContext(),
+ getContext());
+ getContext().setPrimaryMergedDecl(Ivar, PrevIvar->getCanonicalDecl());
+ }
+ // Invalidate duplicate extension and the cached ivar list.
+ ExtensionsPair.first->setInvalidDecl();
+ ExtensionsPair.second->getClassInterface()
+ ->getDefinition()
+ ->setIvarList(nullptr);
+ } else {
+ for (auto IvarPair : DuplicateIvars) {
+ Diag(IvarPair.first->getLocation(),
+ diag::err_duplicate_ivar_declaration)
+ << IvarPair.first->getIdentifier();
+ Diag(IvarPair.second->getLocation(), diag::note_previous_definition);
+ }
+ }
+ PendingObjCExtensionIvarRedeclarations.pop_back();
+ }
}
// At this point, all update records for loaded decls are in place, so any
@@ -9356,7 +9734,6 @@ void ASTReader::finishPendingActions() {
continue;
// FIXME: Check for =delete/=default?
- // FIXME: Complain about ODR violations here?
const FunctionDecl *Defn = nullptr;
if (!getContext().getLangOpts().Modules || !FD->hasBody(Defn)) {
FD->setLazyBody(PB->second);
@@ -9366,6 +9743,9 @@ void ASTReader::finishPendingActions() {
if (!FD->isLateTemplateParsed() &&
!NonConstDefn->isLateTemplateParsed() &&
+ // We only perform ODR checks for decls not in the explicit
+ // global module fragment.
+ !FD->shouldSkipCheckingODR() &&
FD->getODRHash() != NonConstDefn->getODRHash()) {
if (!isa<CXXMethodDecl>(FD)) {
PendingFunctionOdrMergeFailures[FD].push_back(NonConstDefn);
@@ -9387,6 +9767,12 @@ void ASTReader::finishPendingActions() {
}
PendingBodies.clear();
+ // Inform any classes that had members added that they now have more members.
+ for (auto [RD, MD] : PendingAddedClassMembers) {
+ RD->addedMember(MD);
+ }
+ PendingAddedClassMembers.clear();
+
// Do some cleanup.
for (auto *ND : PendingMergedDefinitionsToDeduplicate)
getContext().deduplicateMergedDefinitonsFor(ND);
@@ -9395,8 +9781,11 @@ void ASTReader::finishPendingActions() {
void ASTReader::diagnoseOdrViolations() {
if (PendingOdrMergeFailures.empty() && PendingOdrMergeChecks.empty() &&
+ PendingRecordOdrMergeFailures.empty() &&
PendingFunctionOdrMergeFailures.empty() &&
- PendingEnumOdrMergeFailures.empty())
+ PendingEnumOdrMergeFailures.empty() &&
+ PendingObjCInterfaceOdrMergeFailures.empty() &&
+ PendingObjCProtocolOdrMergeFailures.empty())
return;
// Trigger the import of the full definition of each class that had any
@@ -9418,6 +9807,25 @@ void ASTReader::diagnoseOdrViolations() {
}
}
+ // Trigger the import of the full definition of each record in C/ObjC.
+ auto RecordOdrMergeFailures = std::move(PendingRecordOdrMergeFailures);
+ PendingRecordOdrMergeFailures.clear();
+ for (auto &Merge : RecordOdrMergeFailures) {
+ Merge.first->decls_begin();
+ for (auto &D : Merge.second)
+ D->decls_begin();
+ }
+
+ // Trigger the import of the full interface definition.
+ auto ObjCInterfaceOdrMergeFailures =
+ std::move(PendingObjCInterfaceOdrMergeFailures);
+ PendingObjCInterfaceOdrMergeFailures.clear();
+ for (auto &Merge : ObjCInterfaceOdrMergeFailures) {
+ Merge.first->decls_begin();
+ for (auto &InterfacePair : Merge.second)
+ InterfacePair.first->decls_begin();
+ }
+
// Trigger the import of functions.
auto FunctionOdrMergeFailures = std::move(PendingFunctionOdrMergeFailures);
PendingFunctionOdrMergeFailures.clear();
@@ -9442,6 +9850,16 @@ void ASTReader::diagnoseOdrViolations() {
}
}
+ // Trigger the import of the full protocol definition.
+ auto ObjCProtocolOdrMergeFailures =
+ std::move(PendingObjCProtocolOdrMergeFailures);
+ PendingObjCProtocolOdrMergeFailures.clear();
+ for (auto &Merge : ObjCProtocolOdrMergeFailures) {
+ Merge.first->decls_begin();
+ for (auto &ProtocolPair : Merge.second)
+ ProtocolPair.first->decls_begin();
+ }
+
// For each declaration from a merged context, check that the canonical
// definition of that context also contains a declaration of the same
// entity.
@@ -9463,7 +9881,7 @@ void ASTReader::diagnoseOdrViolations() {
bool Found = false;
const Decl *DCanon = D->getCanonicalDecl();
- for (auto RI : D->redecls()) {
+ for (auto *RI : D->redecls()) {
if (RI->getLexicalDeclContext() == CanonDef) {
Found = true;
break;
@@ -9505,9 +9923,10 @@ void ASTReader::diagnoseOdrViolations() {
Deserializing RecursionGuard(this);
std::string CanonDefModule =
- getOwningModuleNameForDiagnostic(cast<Decl>(CanonDef));
+ ODRDiagsEmitter::getOwningModuleNameForDiagnostic(
+ cast<Decl>(CanonDef));
Diag(D->getLocation(), diag::err_module_odr_violation_missing_decl)
- << D << getOwningModuleNameForDiagnostic(D)
+ << D << ODRDiagsEmitter::getOwningModuleNameForDiagnostic(D)
<< CanonDef << CanonDefModule.empty() << CanonDefModule;
if (Candidates.empty())
@@ -9524,489 +9943,14 @@ void ASTReader::diagnoseOdrViolations() {
}
}
- if (OdrMergeFailures.empty() && FunctionOdrMergeFailures.empty() &&
- EnumOdrMergeFailures.empty())
+ if (OdrMergeFailures.empty() && RecordOdrMergeFailures.empty() &&
+ FunctionOdrMergeFailures.empty() && EnumOdrMergeFailures.empty() &&
+ ObjCInterfaceOdrMergeFailures.empty() &&
+ ObjCProtocolOdrMergeFailures.empty())
return;
- // Ensure we don't accidentally recursively enter deserialization while
- // we're producing our diagnostics.
- Deserializing RecursionGuard(this);
-
- // Common code for hashing helpers.
- ODRHash Hash;
- auto ComputeQualTypeODRHash = [&Hash](QualType Ty) {
- Hash.clear();
- Hash.AddQualType(Ty);
- return Hash.CalculateHash();
- };
-
- auto ComputeODRHash = [&Hash](const Stmt *S) {
- assert(S);
- Hash.clear();
- Hash.AddStmt(S);
- return Hash.CalculateHash();
- };
-
- auto ComputeSubDeclODRHash = [&Hash](const Decl *D) {
- assert(D);
- Hash.clear();
- Hash.AddSubDecl(D);
- return Hash.CalculateHash();
- };
-
- auto ComputeTemplateArgumentODRHash = [&Hash](const TemplateArgument &TA) {
- Hash.clear();
- Hash.AddTemplateArgument(TA);
- return Hash.CalculateHash();
- };
-
- auto ComputeTemplateParameterListODRHash =
- [&Hash](const TemplateParameterList *TPL) {
- assert(TPL);
- Hash.clear();
- Hash.AddTemplateParameterList(TPL);
- return Hash.CalculateHash();
- };
-
- // Used with err_module_odr_violation_mismatch_decl and
- // note_module_odr_violation_mismatch_decl
- // This list should be the same Decl's as in ODRHash::isDeclToBeProcessed
- enum ODRMismatchDecl {
- EndOfClass,
- PublicSpecifer,
- PrivateSpecifer,
- ProtectedSpecifer,
- StaticAssert,
- Field,
- CXXMethod,
- TypeAlias,
- TypeDef,
- Var,
- Friend,
- FunctionTemplate,
- Other
- };
-
- // Used with err_module_odr_violation_mismatch_decl_diff and
- // note_module_odr_violation_mismatch_decl_diff
- enum ODRMismatchDeclDifference {
- StaticAssertCondition,
- StaticAssertMessage,
- StaticAssertOnlyMessage,
- FieldName,
- FieldTypeName,
- FieldSingleBitField,
- FieldDifferentWidthBitField,
- FieldSingleMutable,
- FieldSingleInitializer,
- FieldDifferentInitializers,
- MethodName,
- MethodDeleted,
- MethodDefaulted,
- MethodVirtual,
- MethodStatic,
- MethodVolatile,
- MethodConst,
- MethodInline,
- MethodNumberParameters,
- MethodParameterType,
- MethodParameterName,
- MethodParameterSingleDefaultArgument,
- MethodParameterDifferentDefaultArgument,
- MethodNoTemplateArguments,
- MethodDifferentNumberTemplateArguments,
- MethodDifferentTemplateArgument,
- MethodSingleBody,
- MethodDifferentBody,
- TypedefName,
- TypedefType,
- VarName,
- VarType,
- VarSingleInitializer,
- VarDifferentInitializer,
- VarConstexpr,
- FriendTypeFunction,
- FriendType,
- FriendFunction,
- FunctionTemplateDifferentNumberParameters,
- FunctionTemplateParameterDifferentKind,
- FunctionTemplateParameterName,
- FunctionTemplateParameterSingleDefaultArgument,
- FunctionTemplateParameterDifferentDefaultArgument,
- FunctionTemplateParameterDifferentType,
- FunctionTemplatePackParameter,
- };
-
- // These lambdas have the common portions of the ODR diagnostics. This
- // has the same return as Diag(), so addition parameters can be passed
- // in with operator<<
- auto ODRDiagDeclError = [this](NamedDecl *FirstRecord, StringRef FirstModule,
- SourceLocation Loc, SourceRange Range,
- ODRMismatchDeclDifference DiffType) {
- return Diag(Loc, diag::err_module_odr_violation_mismatch_decl_diff)
- << FirstRecord << FirstModule.empty() << FirstModule << Range
- << DiffType;
- };
- auto ODRDiagDeclNote = [this](StringRef SecondModule, SourceLocation Loc,
- SourceRange Range, ODRMismatchDeclDifference DiffType) {
- return Diag(Loc, diag::note_module_odr_violation_mismatch_decl_diff)
- << SecondModule << Range << DiffType;
- };
-
- auto ODRDiagField = [this, &ODRDiagDeclError, &ODRDiagDeclNote,
- &ComputeQualTypeODRHash, &ComputeODRHash](
- NamedDecl *FirstRecord, StringRef FirstModule,
- StringRef SecondModule, FieldDecl *FirstField,
- FieldDecl *SecondField) {
- IdentifierInfo *FirstII = FirstField->getIdentifier();
- IdentifierInfo *SecondII = SecondField->getIdentifier();
- if (FirstII->getName() != SecondII->getName()) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstField->getLocation(),
- FirstField->getSourceRange(), FieldName)
- << FirstII;
- ODRDiagDeclNote(SecondModule, SecondField->getLocation(),
- SecondField->getSourceRange(), FieldName)
- << SecondII;
-
- return true;
- }
-
- assert(getContext().hasSameType(FirstField->getType(),
- SecondField->getType()));
-
- QualType FirstType = FirstField->getType();
- QualType SecondType = SecondField->getType();
- if (ComputeQualTypeODRHash(FirstType) !=
- ComputeQualTypeODRHash(SecondType)) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstField->getLocation(),
- FirstField->getSourceRange(), FieldTypeName)
- << FirstII << FirstType;
- ODRDiagDeclNote(SecondModule, SecondField->getLocation(),
- SecondField->getSourceRange(), FieldTypeName)
- << SecondII << SecondType;
-
- return true;
- }
-
- const bool IsFirstBitField = FirstField->isBitField();
- const bool IsSecondBitField = SecondField->isBitField();
- if (IsFirstBitField != IsSecondBitField) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstField->getLocation(),
- FirstField->getSourceRange(), FieldSingleBitField)
- << FirstII << IsFirstBitField;
- ODRDiagDeclNote(SecondModule, SecondField->getLocation(),
- SecondField->getSourceRange(), FieldSingleBitField)
- << SecondII << IsSecondBitField;
- return true;
- }
-
- if (IsFirstBitField && IsSecondBitField) {
- unsigned FirstBitWidthHash =
- ComputeODRHash(FirstField->getBitWidth());
- unsigned SecondBitWidthHash =
- ComputeODRHash(SecondField->getBitWidth());
- if (FirstBitWidthHash != SecondBitWidthHash) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstField->getLocation(),
- FirstField->getSourceRange(),
- FieldDifferentWidthBitField)
- << FirstII << FirstField->getBitWidth()->getSourceRange();
- ODRDiagDeclNote(SecondModule, SecondField->getLocation(),
- SecondField->getSourceRange(),
- FieldDifferentWidthBitField)
- << SecondII << SecondField->getBitWidth()->getSourceRange();
- return true;
- }
- }
-
- if (!PP.getLangOpts().CPlusPlus)
- return false;
-
- const bool IsFirstMutable = FirstField->isMutable();
- const bool IsSecondMutable = SecondField->isMutable();
- if (IsFirstMutable != IsSecondMutable) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstField->getLocation(),
- FirstField->getSourceRange(), FieldSingleMutable)
- << FirstII << IsFirstMutable;
- ODRDiagDeclNote(SecondModule, SecondField->getLocation(),
- SecondField->getSourceRange(), FieldSingleMutable)
- << SecondII << IsSecondMutable;
- return true;
- }
-
- const Expr *FirstInitializer = FirstField->getInClassInitializer();
- const Expr *SecondInitializer = SecondField->getInClassInitializer();
- if ((!FirstInitializer && SecondInitializer) ||
- (FirstInitializer && !SecondInitializer)) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstField->getLocation(),
- FirstField->getSourceRange(), FieldSingleInitializer)
- << FirstII << (FirstInitializer != nullptr);
- ODRDiagDeclNote(SecondModule, SecondField->getLocation(),
- SecondField->getSourceRange(), FieldSingleInitializer)
- << SecondII << (SecondInitializer != nullptr);
- return true;
- }
-
- if (FirstInitializer && SecondInitializer) {
- unsigned FirstInitHash = ComputeODRHash(FirstInitializer);
- unsigned SecondInitHash = ComputeODRHash(SecondInitializer);
- if (FirstInitHash != SecondInitHash) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstField->getLocation(),
- FirstField->getSourceRange(),
- FieldDifferentInitializers)
- << FirstII << FirstInitializer->getSourceRange();
- ODRDiagDeclNote(SecondModule, SecondField->getLocation(),
- SecondField->getSourceRange(),
- FieldDifferentInitializers)
- << SecondII << SecondInitializer->getSourceRange();
- return true;
- }
- }
-
- return false;
- };
-
- auto ODRDiagTypeDefOrAlias =
- [&ODRDiagDeclError, &ODRDiagDeclNote, &ComputeQualTypeODRHash](
- NamedDecl *FirstRecord, StringRef FirstModule, StringRef SecondModule,
- TypedefNameDecl *FirstTD, TypedefNameDecl *SecondTD,
- bool IsTypeAlias) {
- auto FirstName = FirstTD->getDeclName();
- auto SecondName = SecondTD->getDeclName();
- if (FirstName != SecondName) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstTD->getLocation(),
- FirstTD->getSourceRange(), TypedefName)
- << IsTypeAlias << FirstName;
- ODRDiagDeclNote(SecondModule, SecondTD->getLocation(),
- SecondTD->getSourceRange(), TypedefName)
- << IsTypeAlias << SecondName;
- return true;
- }
-
- QualType FirstType = FirstTD->getUnderlyingType();
- QualType SecondType = SecondTD->getUnderlyingType();
- if (ComputeQualTypeODRHash(FirstType) !=
- ComputeQualTypeODRHash(SecondType)) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstTD->getLocation(),
- FirstTD->getSourceRange(), TypedefType)
- << IsTypeAlias << FirstName << FirstType;
- ODRDiagDeclNote(SecondModule, SecondTD->getLocation(),
- SecondTD->getSourceRange(), TypedefType)
- << IsTypeAlias << SecondName << SecondType;
- return true;
- }
-
- return false;
- };
-
- auto ODRDiagVar = [&ODRDiagDeclError, &ODRDiagDeclNote,
- &ComputeQualTypeODRHash, &ComputeODRHash,
- this](NamedDecl *FirstRecord, StringRef FirstModule,
- StringRef SecondModule, VarDecl *FirstVD,
- VarDecl *SecondVD) {
- auto FirstName = FirstVD->getDeclName();
- auto SecondName = SecondVD->getDeclName();
- if (FirstName != SecondName) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstVD->getLocation(),
- FirstVD->getSourceRange(), VarName)
- << FirstName;
- ODRDiagDeclNote(SecondModule, SecondVD->getLocation(),
- SecondVD->getSourceRange(), VarName)
- << SecondName;
- return true;
- }
-
- QualType FirstType = FirstVD->getType();
- QualType SecondType = SecondVD->getType();
- if (ComputeQualTypeODRHash(FirstType) !=
- ComputeQualTypeODRHash(SecondType)) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstVD->getLocation(),
- FirstVD->getSourceRange(), VarType)
- << FirstName << FirstType;
- ODRDiagDeclNote(SecondModule, SecondVD->getLocation(),
- SecondVD->getSourceRange(), VarType)
- << SecondName << SecondType;
- return true;
- }
-
- if (!PP.getLangOpts().CPlusPlus)
- return false;
-
- const Expr *FirstInit = FirstVD->getInit();
- const Expr *SecondInit = SecondVD->getInit();
- if ((FirstInit == nullptr) != (SecondInit == nullptr)) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstVD->getLocation(),
- FirstVD->getSourceRange(), VarSingleInitializer)
- << FirstName << (FirstInit == nullptr)
- << (FirstInit ? FirstInit->getSourceRange() : SourceRange());
- ODRDiagDeclNote(SecondModule, SecondVD->getLocation(),
- SecondVD->getSourceRange(), VarSingleInitializer)
- << SecondName << (SecondInit == nullptr)
- << (SecondInit ? SecondInit->getSourceRange() : SourceRange());
- return true;
- }
-
- if (FirstInit && SecondInit &&
- ComputeODRHash(FirstInit) != ComputeODRHash(SecondInit)) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstVD->getLocation(),
- FirstVD->getSourceRange(), VarDifferentInitializer)
- << FirstName << FirstInit->getSourceRange();
- ODRDiagDeclNote(SecondModule, SecondVD->getLocation(),
- SecondVD->getSourceRange(), VarDifferentInitializer)
- << SecondName << SecondInit->getSourceRange();
- return true;
- }
-
- const bool FirstIsConstexpr = FirstVD->isConstexpr();
- const bool SecondIsConstexpr = SecondVD->isConstexpr();
- if (FirstIsConstexpr != SecondIsConstexpr) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstVD->getLocation(),
- FirstVD->getSourceRange(), VarConstexpr)
- << FirstName << FirstIsConstexpr;
- ODRDiagDeclNote(SecondModule, SecondVD->getLocation(),
- SecondVD->getSourceRange(), VarConstexpr)
- << SecondName << SecondIsConstexpr;
- return true;
- }
- return false;
- };
-
- auto DifferenceSelector = [](Decl *D) {
- assert(D && "valid Decl required");
- switch (D->getKind()) {
- default:
- return Other;
- case Decl::AccessSpec:
- switch (D->getAccess()) {
- case AS_public:
- return PublicSpecifer;
- case AS_private:
- return PrivateSpecifer;
- case AS_protected:
- return ProtectedSpecifer;
- case AS_none:
- break;
- }
- llvm_unreachable("Invalid access specifier");
- case Decl::StaticAssert:
- return StaticAssert;
- case Decl::Field:
- return Field;
- case Decl::CXXMethod:
- case Decl::CXXConstructor:
- case Decl::CXXDestructor:
- return CXXMethod;
- case Decl::TypeAlias:
- return TypeAlias;
- case Decl::Typedef:
- return TypeDef;
- case Decl::Var:
- return Var;
- case Decl::Friend:
- return Friend;
- case Decl::FunctionTemplate:
- return FunctionTemplate;
- }
- };
-
- using DeclHashes = llvm::SmallVector<std::pair<Decl *, unsigned>, 4>;
- auto PopulateHashes = [&ComputeSubDeclODRHash](DeclHashes &Hashes,
- RecordDecl *Record,
- const DeclContext *DC) {
- for (auto *D : Record->decls()) {
- if (!ODRHash::isDeclToBeProcessed(D, DC))
- continue;
- Hashes.emplace_back(D, ComputeSubDeclODRHash(D));
- }
- };
-
- struct DiffResult {
- Decl *FirstDecl = nullptr, *SecondDecl = nullptr;
- ODRMismatchDecl FirstDiffType = Other, SecondDiffType = Other;
- };
-
- // If there is a diagnoseable difference, FirstDiffType and
- // SecondDiffType will not be Other and FirstDecl and SecondDecl will be
- // filled in if not EndOfClass.
- auto FindTypeDiffs = [&DifferenceSelector](DeclHashes &FirstHashes,
- DeclHashes &SecondHashes) {
- DiffResult DR;
- auto FirstIt = FirstHashes.begin();
- auto SecondIt = SecondHashes.begin();
- while (FirstIt != FirstHashes.end() || SecondIt != SecondHashes.end()) {
- if (FirstIt != FirstHashes.end() && SecondIt != SecondHashes.end() &&
- FirstIt->second == SecondIt->second) {
- ++FirstIt;
- ++SecondIt;
- continue;
- }
-
- DR.FirstDecl = FirstIt == FirstHashes.end() ? nullptr : FirstIt->first;
- DR.SecondDecl =
- SecondIt == SecondHashes.end() ? nullptr : SecondIt->first;
-
- DR.FirstDiffType =
- DR.FirstDecl ? DifferenceSelector(DR.FirstDecl) : EndOfClass;
- DR.SecondDiffType =
- DR.SecondDecl ? DifferenceSelector(DR.SecondDecl) : EndOfClass;
- return DR;
- }
- return DR;
- };
-
- // Use this to diagnose that an unexpected Decl was encountered
- // or no difference was detected. This causes a generic error
- // message to be emitted.
- auto DiagnoseODRUnexpected = [this](DiffResult &DR, NamedDecl *FirstRecord,
- StringRef FirstModule,
- NamedDecl *SecondRecord,
- StringRef SecondModule) {
- Diag(FirstRecord->getLocation(),
- diag::err_module_odr_violation_different_definitions)
- << FirstRecord << FirstModule.empty() << FirstModule;
-
- if (DR.FirstDecl) {
- Diag(DR.FirstDecl->getLocation(), diag::note_first_module_difference)
- << FirstRecord << DR.FirstDecl->getSourceRange();
- }
-
- Diag(SecondRecord->getLocation(),
- diag::note_module_odr_violation_different_definitions)
- << SecondModule;
-
- if (DR.SecondDecl) {
- Diag(DR.SecondDecl->getLocation(), diag::note_second_module_difference)
- << DR.SecondDecl->getSourceRange();
- }
- };
-
- auto DiagnoseODRMismatch =
- [this](DiffResult &DR, NamedDecl *FirstRecord, StringRef FirstModule,
- NamedDecl *SecondRecord, StringRef SecondModule) {
- SourceLocation FirstLoc;
- SourceRange FirstRange;
- auto *FirstTag = dyn_cast<TagDecl>(FirstRecord);
- if (DR.FirstDiffType == EndOfClass && FirstTag) {
- FirstLoc = FirstTag->getBraceRange().getEnd();
- } else {
- FirstLoc = DR.FirstDecl->getLocation();
- FirstRange = DR.FirstDecl->getSourceRange();
- }
- Diag(FirstLoc, diag::err_module_odr_violation_mismatch_decl)
- << FirstRecord << FirstModule.empty() << FirstModule << FirstRange
- << DR.FirstDiffType;
-
- SourceLocation SecondLoc;
- SourceRange SecondRange;
- auto *SecondTag = dyn_cast<TagDecl>(SecondRecord);
- if (DR.SecondDiffType == EndOfClass && SecondTag) {
- SecondLoc = SecondTag->getBraceRange().getEnd();
- } else {
- SecondLoc = DR.SecondDecl->getLocation();
- SecondRange = DR.SecondDecl->getSourceRange();
- }
- Diag(SecondLoc, diag::note_module_odr_violation_mismatch_decl)
- << SecondModule << SecondRange << DR.SecondDiffType;
- };
+ ODRDiagsEmitter DiagsEmitter(Diags, getContext(),
+ getPreprocessor().getLangOpts());
// Issue any pending ODR-failure diagnostics.
for (auto &Merge : OdrMergeFailures) {
@@ -10017,1190 +9961,12 @@ void ASTReader::diagnoseOdrViolations() {
bool Diagnosed = false;
CXXRecordDecl *FirstRecord = Merge.first;
- std::string FirstModule = getOwningModuleNameForDiagnostic(FirstRecord);
for (auto &RecordPair : Merge.second) {
- CXXRecordDecl *SecondRecord = RecordPair.first;
- // Multiple different declarations got merged together; tell the user
- // where they came from.
- if (FirstRecord == SecondRecord)
- continue;
-
- std::string SecondModule = getOwningModuleNameForDiagnostic(SecondRecord);
-
- auto *FirstDD = FirstRecord->DefinitionData;
- auto *SecondDD = RecordPair.second;
-
- assert(FirstDD && SecondDD && "Definitions without DefinitionData");
-
- // Diagnostics from DefinitionData are emitted here.
- if (FirstDD != SecondDD) {
- enum ODRDefinitionDataDifference {
- NumBases,
- NumVBases,
- BaseType,
- BaseVirtual,
- BaseAccess,
- };
- auto ODRDiagBaseError = [FirstRecord, &FirstModule,
- this](SourceLocation Loc, SourceRange Range,
- ODRDefinitionDataDifference DiffType) {
- return Diag(Loc, diag::err_module_odr_violation_definition_data)
- << FirstRecord << FirstModule.empty() << FirstModule << Range
- << DiffType;
- };
- auto ODRDiagBaseNote = [&SecondModule,
- this](SourceLocation Loc, SourceRange Range,
- ODRDefinitionDataDifference DiffType) {
- return Diag(Loc, diag::note_module_odr_violation_definition_data)
- << SecondModule << Range << DiffType;
- };
-
- unsigned FirstNumBases = FirstDD->NumBases;
- unsigned FirstNumVBases = FirstDD->NumVBases;
- unsigned SecondNumBases = SecondDD->NumBases;
- unsigned SecondNumVBases = SecondDD->NumVBases;
-
- auto GetSourceRange = [](struct CXXRecordDecl::DefinitionData *DD) {
- unsigned NumBases = DD->NumBases;
- if (NumBases == 0) return SourceRange();
- auto bases = DD->bases();
- return SourceRange(bases[0].getBeginLoc(),
- bases[NumBases - 1].getEndLoc());
- };
-
- if (FirstNumBases != SecondNumBases) {
- ODRDiagBaseError(FirstRecord->getLocation(), GetSourceRange(FirstDD),
- NumBases)
- << FirstNumBases;
- ODRDiagBaseNote(SecondRecord->getLocation(), GetSourceRange(SecondDD),
- NumBases)
- << SecondNumBases;
- Diagnosed = true;
- break;
- }
-
- if (FirstNumVBases != SecondNumVBases) {
- ODRDiagBaseError(FirstRecord->getLocation(), GetSourceRange(FirstDD),
- NumVBases)
- << FirstNumVBases;
- ODRDiagBaseNote(SecondRecord->getLocation(), GetSourceRange(SecondDD),
- NumVBases)
- << SecondNumVBases;
- Diagnosed = true;
- break;
- }
-
- auto FirstBases = FirstDD->bases();
- auto SecondBases = SecondDD->bases();
- unsigned i = 0;
- for (i = 0; i < FirstNumBases; ++i) {
- auto FirstBase = FirstBases[i];
- auto SecondBase = SecondBases[i];
- if (ComputeQualTypeODRHash(FirstBase.getType()) !=
- ComputeQualTypeODRHash(SecondBase.getType())) {
- ODRDiagBaseError(FirstRecord->getLocation(),
- FirstBase.getSourceRange(), BaseType)
- << (i + 1) << FirstBase.getType();
- ODRDiagBaseNote(SecondRecord->getLocation(),
- SecondBase.getSourceRange(), BaseType)
- << (i + 1) << SecondBase.getType();
- break;
- }
-
- if (FirstBase.isVirtual() != SecondBase.isVirtual()) {
- ODRDiagBaseError(FirstRecord->getLocation(),
- FirstBase.getSourceRange(), BaseVirtual)
- << (i + 1) << FirstBase.isVirtual() << FirstBase.getType();
- ODRDiagBaseNote(SecondRecord->getLocation(),
- SecondBase.getSourceRange(), BaseVirtual)
- << (i + 1) << SecondBase.isVirtual() << SecondBase.getType();
- break;
- }
-
- if (FirstBase.getAccessSpecifierAsWritten() !=
- SecondBase.getAccessSpecifierAsWritten()) {
- ODRDiagBaseError(FirstRecord->getLocation(),
- FirstBase.getSourceRange(), BaseAccess)
- << (i + 1) << FirstBase.getType()
- << (int)FirstBase.getAccessSpecifierAsWritten();
- ODRDiagBaseNote(SecondRecord->getLocation(),
- SecondBase.getSourceRange(), BaseAccess)
- << (i + 1) << SecondBase.getType()
- << (int)SecondBase.getAccessSpecifierAsWritten();
- break;
- }
- }
-
- if (i != FirstNumBases) {
- Diagnosed = true;
- break;
- }
- }
-
- const ClassTemplateDecl *FirstTemplate =
- FirstRecord->getDescribedClassTemplate();
- const ClassTemplateDecl *SecondTemplate =
- SecondRecord->getDescribedClassTemplate();
-
- assert(!FirstTemplate == !SecondTemplate &&
- "Both pointers should be null or non-null");
-
- enum ODRTemplateDifference {
- ParamEmptyName,
- ParamName,
- ParamSingleDefaultArgument,
- ParamDifferentDefaultArgument,
- };
-
- if (FirstTemplate && SecondTemplate) {
- DeclHashes FirstTemplateHashes;
- DeclHashes SecondTemplateHashes;
-
- auto PopulateTemplateParameterHashs =
- [&ComputeSubDeclODRHash](DeclHashes &Hashes,
- const ClassTemplateDecl *TD) {
- for (auto *D : TD->getTemplateParameters()->asArray()) {
- Hashes.emplace_back(D, ComputeSubDeclODRHash(D));
- }
- };
-
- PopulateTemplateParameterHashs(FirstTemplateHashes, FirstTemplate);
- PopulateTemplateParameterHashs(SecondTemplateHashes, SecondTemplate);
-
- assert(FirstTemplateHashes.size() == SecondTemplateHashes.size() &&
- "Number of template parameters should be equal.");
-
- auto FirstIt = FirstTemplateHashes.begin();
- auto FirstEnd = FirstTemplateHashes.end();
- auto SecondIt = SecondTemplateHashes.begin();
- for (; FirstIt != FirstEnd; ++FirstIt, ++SecondIt) {
- if (FirstIt->second == SecondIt->second)
- continue;
-
- auto ODRDiagTemplateError = [FirstRecord, &FirstModule, this](
- SourceLocation Loc, SourceRange Range,
- ODRTemplateDifference DiffType) {
- return Diag(Loc, diag::err_module_odr_violation_template_parameter)
- << FirstRecord << FirstModule.empty() << FirstModule << Range
- << DiffType;
- };
- auto ODRDiagTemplateNote = [&SecondModule, this](
- SourceLocation Loc, SourceRange Range,
- ODRTemplateDifference DiffType) {
- return Diag(Loc, diag::note_module_odr_violation_template_parameter)
- << SecondModule << Range << DiffType;
- };
-
- const NamedDecl* FirstDecl = cast<NamedDecl>(FirstIt->first);
- const NamedDecl* SecondDecl = cast<NamedDecl>(SecondIt->first);
-
- assert(FirstDecl->getKind() == SecondDecl->getKind() &&
- "Parameter Decl's should be the same kind.");
-
- DeclarationName FirstName = FirstDecl->getDeclName();
- DeclarationName SecondName = SecondDecl->getDeclName();
-
- if (FirstName != SecondName) {
- const bool FirstNameEmpty =
- FirstName.isIdentifier() && !FirstName.getAsIdentifierInfo();
- const bool SecondNameEmpty =
- SecondName.isIdentifier() && !SecondName.getAsIdentifierInfo();
- assert((!FirstNameEmpty || !SecondNameEmpty) &&
- "Both template parameters cannot be unnamed.");
- ODRDiagTemplateError(FirstDecl->getLocation(),
- FirstDecl->getSourceRange(),
- FirstNameEmpty ? ParamEmptyName : ParamName)
- << FirstName;
- ODRDiagTemplateNote(SecondDecl->getLocation(),
- SecondDecl->getSourceRange(),
- SecondNameEmpty ? ParamEmptyName : ParamName)
- << SecondName;
- break;
- }
-
- switch (FirstDecl->getKind()) {
- default:
- llvm_unreachable("Invalid template parameter type.");
- case Decl::TemplateTypeParm: {
- const auto *FirstParam = cast<TemplateTypeParmDecl>(FirstDecl);
- const auto *SecondParam = cast<TemplateTypeParmDecl>(SecondDecl);
- const bool HasFirstDefaultArgument =
- FirstParam->hasDefaultArgument() &&
- !FirstParam->defaultArgumentWasInherited();
- const bool HasSecondDefaultArgument =
- SecondParam->hasDefaultArgument() &&
- !SecondParam->defaultArgumentWasInherited();
-
- if (HasFirstDefaultArgument != HasSecondDefaultArgument) {
- ODRDiagTemplateError(FirstDecl->getLocation(),
- FirstDecl->getSourceRange(),
- ParamSingleDefaultArgument)
- << HasFirstDefaultArgument;
- ODRDiagTemplateNote(SecondDecl->getLocation(),
- SecondDecl->getSourceRange(),
- ParamSingleDefaultArgument)
- << HasSecondDefaultArgument;
- break;
- }
-
- assert(HasFirstDefaultArgument && HasSecondDefaultArgument &&
- "Expecting default arguments.");
-
- ODRDiagTemplateError(FirstDecl->getLocation(),
- FirstDecl->getSourceRange(),
- ParamDifferentDefaultArgument);
- ODRDiagTemplateNote(SecondDecl->getLocation(),
- SecondDecl->getSourceRange(),
- ParamDifferentDefaultArgument);
-
- break;
- }
- case Decl::NonTypeTemplateParm: {
- const auto *FirstParam = cast<NonTypeTemplateParmDecl>(FirstDecl);
- const auto *SecondParam = cast<NonTypeTemplateParmDecl>(SecondDecl);
- const bool HasFirstDefaultArgument =
- FirstParam->hasDefaultArgument() &&
- !FirstParam->defaultArgumentWasInherited();
- const bool HasSecondDefaultArgument =
- SecondParam->hasDefaultArgument() &&
- !SecondParam->defaultArgumentWasInherited();
-
- if (HasFirstDefaultArgument != HasSecondDefaultArgument) {
- ODRDiagTemplateError(FirstDecl->getLocation(),
- FirstDecl->getSourceRange(),
- ParamSingleDefaultArgument)
- << HasFirstDefaultArgument;
- ODRDiagTemplateNote(SecondDecl->getLocation(),
- SecondDecl->getSourceRange(),
- ParamSingleDefaultArgument)
- << HasSecondDefaultArgument;
- break;
- }
-
- assert(HasFirstDefaultArgument && HasSecondDefaultArgument &&
- "Expecting default arguments.");
-
- ODRDiagTemplateError(FirstDecl->getLocation(),
- FirstDecl->getSourceRange(),
- ParamDifferentDefaultArgument);
- ODRDiagTemplateNote(SecondDecl->getLocation(),
- SecondDecl->getSourceRange(),
- ParamDifferentDefaultArgument);
-
- break;
- }
- case Decl::TemplateTemplateParm: {
- const auto *FirstParam = cast<TemplateTemplateParmDecl>(FirstDecl);
- const auto *SecondParam =
- cast<TemplateTemplateParmDecl>(SecondDecl);
- const bool HasFirstDefaultArgument =
- FirstParam->hasDefaultArgument() &&
- !FirstParam->defaultArgumentWasInherited();
- const bool HasSecondDefaultArgument =
- SecondParam->hasDefaultArgument() &&
- !SecondParam->defaultArgumentWasInherited();
-
- if (HasFirstDefaultArgument != HasSecondDefaultArgument) {
- ODRDiagTemplateError(FirstDecl->getLocation(),
- FirstDecl->getSourceRange(),
- ParamSingleDefaultArgument)
- << HasFirstDefaultArgument;
- ODRDiagTemplateNote(SecondDecl->getLocation(),
- SecondDecl->getSourceRange(),
- ParamSingleDefaultArgument)
- << HasSecondDefaultArgument;
- break;
- }
-
- assert(HasFirstDefaultArgument && HasSecondDefaultArgument &&
- "Expecting default arguments.");
-
- ODRDiagTemplateError(FirstDecl->getLocation(),
- FirstDecl->getSourceRange(),
- ParamDifferentDefaultArgument);
- ODRDiagTemplateNote(SecondDecl->getLocation(),
- SecondDecl->getSourceRange(),
- ParamDifferentDefaultArgument);
-
- break;
- }
- }
-
- break;
- }
-
- if (FirstIt != FirstEnd) {
- Diagnosed = true;
- break;
- }
- }
-
- DeclHashes FirstHashes;
- DeclHashes SecondHashes;
- const DeclContext *DC = FirstRecord;
- PopulateHashes(FirstHashes, FirstRecord, DC);
- PopulateHashes(SecondHashes, SecondRecord, DC);
-
- auto DR = FindTypeDiffs(FirstHashes, SecondHashes);
- ODRMismatchDecl FirstDiffType = DR.FirstDiffType;
- ODRMismatchDecl SecondDiffType = DR.SecondDiffType;
- Decl *FirstDecl = DR.FirstDecl;
- Decl *SecondDecl = DR.SecondDecl;
-
- if (FirstDiffType == Other || SecondDiffType == Other) {
- DiagnoseODRUnexpected(DR, FirstRecord, FirstModule, SecondRecord,
- SecondModule);
- Diagnosed = true;
- break;
- }
-
- if (FirstDiffType != SecondDiffType) {
- DiagnoseODRMismatch(DR, FirstRecord, FirstModule, SecondRecord,
- SecondModule);
- Diagnosed = true;
- break;
- }
-
- assert(FirstDiffType == SecondDiffType);
-
- switch (FirstDiffType) {
- case Other:
- case EndOfClass:
- case PublicSpecifer:
- case PrivateSpecifer:
- case ProtectedSpecifer:
- llvm_unreachable("Invalid diff type");
-
- case StaticAssert: {
- StaticAssertDecl *FirstSA = cast<StaticAssertDecl>(FirstDecl);
- StaticAssertDecl *SecondSA = cast<StaticAssertDecl>(SecondDecl);
-
- Expr *FirstExpr = FirstSA->getAssertExpr();
- Expr *SecondExpr = SecondSA->getAssertExpr();
- unsigned FirstODRHash = ComputeODRHash(FirstExpr);
- unsigned SecondODRHash = ComputeODRHash(SecondExpr);
- if (FirstODRHash != SecondODRHash) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstExpr->getBeginLoc(),
- FirstExpr->getSourceRange(), StaticAssertCondition);
- ODRDiagDeclNote(SecondModule, SecondExpr->getBeginLoc(),
- SecondExpr->getSourceRange(), StaticAssertCondition);
- Diagnosed = true;
- break;
- }
-
- StringLiteral *FirstStr = FirstSA->getMessage();
- StringLiteral *SecondStr = SecondSA->getMessage();
- assert((FirstStr || SecondStr) && "Both messages cannot be empty");
- if ((FirstStr && !SecondStr) || (!FirstStr && SecondStr)) {
- SourceLocation FirstLoc, SecondLoc;
- SourceRange FirstRange, SecondRange;
- if (FirstStr) {
- FirstLoc = FirstStr->getBeginLoc();
- FirstRange = FirstStr->getSourceRange();
- } else {
- FirstLoc = FirstSA->getBeginLoc();
- FirstRange = FirstSA->getSourceRange();
- }
- if (SecondStr) {
- SecondLoc = SecondStr->getBeginLoc();
- SecondRange = SecondStr->getSourceRange();
- } else {
- SecondLoc = SecondSA->getBeginLoc();
- SecondRange = SecondSA->getSourceRange();
- }
- ODRDiagDeclError(FirstRecord, FirstModule, FirstLoc, FirstRange,
- StaticAssertOnlyMessage)
- << (FirstStr == nullptr);
- ODRDiagDeclNote(SecondModule, SecondLoc, SecondRange,
- StaticAssertOnlyMessage)
- << (SecondStr == nullptr);
- Diagnosed = true;
- break;
- }
-
- if (FirstStr && SecondStr &&
- FirstStr->getString() != SecondStr->getString()) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstStr->getBeginLoc(),
- FirstStr->getSourceRange(), StaticAssertMessage);
- ODRDiagDeclNote(SecondModule, SecondStr->getBeginLoc(),
- SecondStr->getSourceRange(), StaticAssertMessage);
- Diagnosed = true;
- break;
- }
- break;
- }
- case Field: {
- Diagnosed = ODRDiagField(FirstRecord, FirstModule, SecondModule,
- cast<FieldDecl>(FirstDecl),
- cast<FieldDecl>(SecondDecl));
- break;
- }
- case CXXMethod: {
- enum {
- DiagMethod,
- DiagConstructor,
- DiagDestructor,
- } FirstMethodType,
- SecondMethodType;
- auto GetMethodTypeForDiagnostics = [](const CXXMethodDecl* D) {
- if (isa<CXXConstructorDecl>(D)) return DiagConstructor;
- if (isa<CXXDestructorDecl>(D)) return DiagDestructor;
- return DiagMethod;
- };
- const CXXMethodDecl *FirstMethod = cast<CXXMethodDecl>(FirstDecl);
- const CXXMethodDecl *SecondMethod = cast<CXXMethodDecl>(SecondDecl);
- FirstMethodType = GetMethodTypeForDiagnostics(FirstMethod);
- SecondMethodType = GetMethodTypeForDiagnostics(SecondMethod);
- auto FirstName = FirstMethod->getDeclName();
- auto SecondName = SecondMethod->getDeclName();
- if (FirstMethodType != SecondMethodType || FirstName != SecondName) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodName)
- << FirstMethodType << FirstName;
- ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
- SecondMethod->getSourceRange(), MethodName)
- << SecondMethodType << SecondName;
-
- Diagnosed = true;
- break;
- }
-
- const bool FirstDeleted = FirstMethod->isDeletedAsWritten();
- const bool SecondDeleted = SecondMethod->isDeletedAsWritten();
- if (FirstDeleted != SecondDeleted) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodDeleted)
- << FirstMethodType << FirstName << FirstDeleted;
-
- ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
- SecondMethod->getSourceRange(), MethodDeleted)
- << SecondMethodType << SecondName << SecondDeleted;
- Diagnosed = true;
- break;
- }
-
- const bool FirstDefaulted = FirstMethod->isExplicitlyDefaulted();
- const bool SecondDefaulted = SecondMethod->isExplicitlyDefaulted();
- if (FirstDefaulted != SecondDefaulted) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodDefaulted)
- << FirstMethodType << FirstName << FirstDefaulted;
-
- ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
- SecondMethod->getSourceRange(), MethodDefaulted)
- << SecondMethodType << SecondName << SecondDefaulted;
- Diagnosed = true;
- break;
- }
-
- const bool FirstVirtual = FirstMethod->isVirtualAsWritten();
- const bool SecondVirtual = SecondMethod->isVirtualAsWritten();
- const bool FirstPure = FirstMethod->isPure();
- const bool SecondPure = SecondMethod->isPure();
- if ((FirstVirtual || SecondVirtual) &&
- (FirstVirtual != SecondVirtual || FirstPure != SecondPure)) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodVirtual)
- << FirstMethodType << FirstName << FirstPure << FirstVirtual;
- ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
- SecondMethod->getSourceRange(), MethodVirtual)
- << SecondMethodType << SecondName << SecondPure << SecondVirtual;
- Diagnosed = true;
- break;
- }
-
- // CXXMethodDecl::isStatic uses the canonical Decl. With Decl merging,
- // FirstDecl is the canonical Decl of SecondDecl, so the storage
- // class needs to be checked instead.
- const auto FirstStorage = FirstMethod->getStorageClass();
- const auto SecondStorage = SecondMethod->getStorageClass();
- const bool FirstStatic = FirstStorage == SC_Static;
- const bool SecondStatic = SecondStorage == SC_Static;
- if (FirstStatic != SecondStatic) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodStatic)
- << FirstMethodType << FirstName << FirstStatic;
- ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
- SecondMethod->getSourceRange(), MethodStatic)
- << SecondMethodType << SecondName << SecondStatic;
- Diagnosed = true;
- break;
- }
-
- const bool FirstVolatile = FirstMethod->isVolatile();
- const bool SecondVolatile = SecondMethod->isVolatile();
- if (FirstVolatile != SecondVolatile) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodVolatile)
- << FirstMethodType << FirstName << FirstVolatile;
- ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
- SecondMethod->getSourceRange(), MethodVolatile)
- << SecondMethodType << SecondName << SecondVolatile;
- Diagnosed = true;
- break;
- }
-
- const bool FirstConst = FirstMethod->isConst();
- const bool SecondConst = SecondMethod->isConst();
- if (FirstConst != SecondConst) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodConst)
- << FirstMethodType << FirstName << FirstConst;
- ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
- SecondMethod->getSourceRange(), MethodConst)
- << SecondMethodType << SecondName << SecondConst;
- Diagnosed = true;
- break;
- }
-
- const bool FirstInline = FirstMethod->isInlineSpecified();
- const bool SecondInline = SecondMethod->isInlineSpecified();
- if (FirstInline != SecondInline) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodInline)
- << FirstMethodType << FirstName << FirstInline;
- ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
- SecondMethod->getSourceRange(), MethodInline)
- << SecondMethodType << SecondName << SecondInline;
- Diagnosed = true;
- break;
- }
-
- const unsigned FirstNumParameters = FirstMethod->param_size();
- const unsigned SecondNumParameters = SecondMethod->param_size();
- if (FirstNumParameters != SecondNumParameters) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstMethod->getLocation(),
- FirstMethod->getSourceRange(),
- MethodNumberParameters)
- << FirstMethodType << FirstName << FirstNumParameters;
- ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
- SecondMethod->getSourceRange(),
- MethodNumberParameters)
- << SecondMethodType << SecondName << SecondNumParameters;
- Diagnosed = true;
- break;
- }
-
- // Need this status boolean to know when break out of the switch.
- bool ParameterMismatch = false;
- for (unsigned I = 0; I < FirstNumParameters; ++I) {
- const ParmVarDecl *FirstParam = FirstMethod->getParamDecl(I);
- const ParmVarDecl *SecondParam = SecondMethod->getParamDecl(I);
-
- QualType FirstParamType = FirstParam->getType();
- QualType SecondParamType = SecondParam->getType();
- if (FirstParamType != SecondParamType &&
- ComputeQualTypeODRHash(FirstParamType) !=
- ComputeQualTypeODRHash(SecondParamType)) {
- if (const DecayedType *ParamDecayedType =
- FirstParamType->getAs<DecayedType>()) {
- ODRDiagDeclError(
- FirstRecord, FirstModule, FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodParameterType)
- << FirstMethodType << FirstName << (I + 1) << FirstParamType
- << true << ParamDecayedType->getOriginalType();
- } else {
- ODRDiagDeclError(
- FirstRecord, FirstModule, FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodParameterType)
- << FirstMethodType << FirstName << (I + 1) << FirstParamType
- << false;
- }
-
- if (const DecayedType *ParamDecayedType =
- SecondParamType->getAs<DecayedType>()) {
- ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
- SecondMethod->getSourceRange(),
- MethodParameterType)
- << SecondMethodType << SecondName << (I + 1)
- << SecondParamType << true
- << ParamDecayedType->getOriginalType();
- } else {
- ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
- SecondMethod->getSourceRange(),
- MethodParameterType)
- << SecondMethodType << SecondName << (I + 1)
- << SecondParamType << false;
- }
- ParameterMismatch = true;
- break;
- }
-
- DeclarationName FirstParamName = FirstParam->getDeclName();
- DeclarationName SecondParamName = SecondParam->getDeclName();
- if (FirstParamName != SecondParamName) {
- ODRDiagDeclError(FirstRecord, FirstModule,
- FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodParameterName)
- << FirstMethodType << FirstName << (I + 1) << FirstParamName;
- ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
- SecondMethod->getSourceRange(), MethodParameterName)
- << SecondMethodType << SecondName << (I + 1) << SecondParamName;
- ParameterMismatch = true;
- break;
- }
-
- const Expr *FirstInit = FirstParam->getInit();
- const Expr *SecondInit = SecondParam->getInit();
- if ((FirstInit == nullptr) != (SecondInit == nullptr)) {
- ODRDiagDeclError(FirstRecord, FirstModule,
- FirstMethod->getLocation(),
- FirstMethod->getSourceRange(),
- MethodParameterSingleDefaultArgument)
- << FirstMethodType << FirstName << (I + 1)
- << (FirstInit == nullptr)
- << (FirstInit ? FirstInit->getSourceRange() : SourceRange());
- ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
- SecondMethod->getSourceRange(),
- MethodParameterSingleDefaultArgument)
- << SecondMethodType << SecondName << (I + 1)
- << (SecondInit == nullptr)
- << (SecondInit ? SecondInit->getSourceRange() : SourceRange());
- ParameterMismatch = true;
- break;
- }
-
- if (FirstInit && SecondInit &&
- ComputeODRHash(FirstInit) != ComputeODRHash(SecondInit)) {
- ODRDiagDeclError(FirstRecord, FirstModule,
- FirstMethod->getLocation(),
- FirstMethod->getSourceRange(),
- MethodParameterDifferentDefaultArgument)
- << FirstMethodType << FirstName << (I + 1)
- << FirstInit->getSourceRange();
- ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
- SecondMethod->getSourceRange(),
- MethodParameterDifferentDefaultArgument)
- << SecondMethodType << SecondName << (I + 1)
- << SecondInit->getSourceRange();
- ParameterMismatch = true;
- break;
-
- }
- }
-
- if (ParameterMismatch) {
- Diagnosed = true;
- break;
- }
-
- const auto *FirstTemplateArgs =
- FirstMethod->getTemplateSpecializationArgs();
- const auto *SecondTemplateArgs =
- SecondMethod->getTemplateSpecializationArgs();
-
- if ((FirstTemplateArgs && !SecondTemplateArgs) ||
- (!FirstTemplateArgs && SecondTemplateArgs)) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstMethod->getLocation(),
- FirstMethod->getSourceRange(),
- MethodNoTemplateArguments)
- << FirstMethodType << FirstName << (FirstTemplateArgs != nullptr);
- ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
- SecondMethod->getSourceRange(),
- MethodNoTemplateArguments)
- << SecondMethodType << SecondName
- << (SecondTemplateArgs != nullptr);
-
- Diagnosed = true;
- break;
- }
-
- if (FirstTemplateArgs && SecondTemplateArgs) {
- // Remove pack expansions from argument list.
- auto ExpandTemplateArgumentList =
- [](const TemplateArgumentList *TAL) {
- llvm::SmallVector<const TemplateArgument *, 8> ExpandedList;
- for (const TemplateArgument &TA : TAL->asArray()) {
- if (TA.getKind() != TemplateArgument::Pack) {
- ExpandedList.push_back(&TA);
- continue;
- }
- for (const TemplateArgument &PackTA : TA.getPackAsArray()) {
- ExpandedList.push_back(&PackTA);
- }
- }
- return ExpandedList;
- };
- llvm::SmallVector<const TemplateArgument *, 8> FirstExpandedList =
- ExpandTemplateArgumentList(FirstTemplateArgs);
- llvm::SmallVector<const TemplateArgument *, 8> SecondExpandedList =
- ExpandTemplateArgumentList(SecondTemplateArgs);
-
- if (FirstExpandedList.size() != SecondExpandedList.size()) {
- ODRDiagDeclError(FirstRecord, FirstModule,
- FirstMethod->getLocation(),
- FirstMethod->getSourceRange(),
- MethodDifferentNumberTemplateArguments)
- << FirstMethodType << FirstName
- << (unsigned)FirstExpandedList.size();
- ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
- SecondMethod->getSourceRange(),
- MethodDifferentNumberTemplateArguments)
- << SecondMethodType << SecondName
- << (unsigned)SecondExpandedList.size();
-
- Diagnosed = true;
- break;
- }
-
- bool TemplateArgumentMismatch = false;
- for (unsigned i = 0, e = FirstExpandedList.size(); i != e; ++i) {
- const TemplateArgument &FirstTA = *FirstExpandedList[i],
- &SecondTA = *SecondExpandedList[i];
- if (ComputeTemplateArgumentODRHash(FirstTA) ==
- ComputeTemplateArgumentODRHash(SecondTA)) {
- continue;
- }
-
- ODRDiagDeclError(
- FirstRecord, FirstModule, FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodDifferentTemplateArgument)
- << FirstMethodType << FirstName << FirstTA << i + 1;
- ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
- SecondMethod->getSourceRange(),
- MethodDifferentTemplateArgument)
- << SecondMethodType << SecondName << SecondTA << i + 1;
-
- TemplateArgumentMismatch = true;
- break;
- }
-
- if (TemplateArgumentMismatch) {
- Diagnosed = true;
- break;
- }
- }
-
- // Compute the hash of the method as if it has no body.
- auto ComputeCXXMethodODRHash = [&Hash](const CXXMethodDecl *D) {
- Hash.clear();
- Hash.AddFunctionDecl(D, true /*SkipBody*/);
- return Hash.CalculateHash();
- };
-
- // Compare the hash generated to the hash stored. A difference means
- // that a body was present in the original source. Due to merging,
- // the stardard way of detecting a body will not work.
- const bool HasFirstBody =
- ComputeCXXMethodODRHash(FirstMethod) != FirstMethod->getODRHash();
- const bool HasSecondBody =
- ComputeCXXMethodODRHash(SecondMethod) != SecondMethod->getODRHash();
-
- if (HasFirstBody != HasSecondBody) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodSingleBody)
- << FirstMethodType << FirstName << HasFirstBody;
- ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
- SecondMethod->getSourceRange(), MethodSingleBody)
- << SecondMethodType << SecondName << HasSecondBody;
- Diagnosed = true;
- break;
- }
-
- if (HasFirstBody && HasSecondBody) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodDifferentBody)
- << FirstMethodType << FirstName;
- ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
- SecondMethod->getSourceRange(), MethodDifferentBody)
- << SecondMethodType << SecondName;
- Diagnosed = true;
- break;
- }
-
- break;
- }
- case TypeAlias:
- case TypeDef: {
- Diagnosed = ODRDiagTypeDefOrAlias(
- FirstRecord, FirstModule, SecondModule,
- cast<TypedefNameDecl>(FirstDecl), cast<TypedefNameDecl>(SecondDecl),
- FirstDiffType == TypeAlias);
- break;
- }
- case Var: {
- Diagnosed =
- ODRDiagVar(FirstRecord, FirstModule, SecondModule,
- cast<VarDecl>(FirstDecl), cast<VarDecl>(SecondDecl));
- break;
- }
- case Friend: {
- FriendDecl *FirstFriend = cast<FriendDecl>(FirstDecl);
- FriendDecl *SecondFriend = cast<FriendDecl>(SecondDecl);
-
- NamedDecl *FirstND = FirstFriend->getFriendDecl();
- NamedDecl *SecondND = SecondFriend->getFriendDecl();
-
- TypeSourceInfo *FirstTSI = FirstFriend->getFriendType();
- TypeSourceInfo *SecondTSI = SecondFriend->getFriendType();
-
- if (FirstND && SecondND) {
- ODRDiagDeclError(FirstRecord, FirstModule,
- FirstFriend->getFriendLoc(),
- FirstFriend->getSourceRange(), FriendFunction)
- << FirstND;
- ODRDiagDeclNote(SecondModule, SecondFriend->getFriendLoc(),
- SecondFriend->getSourceRange(), FriendFunction)
- << SecondND;
-
- Diagnosed = true;
- break;
- }
-
- if (FirstTSI && SecondTSI) {
- QualType FirstFriendType = FirstTSI->getType();
- QualType SecondFriendType = SecondTSI->getType();
- assert(ComputeQualTypeODRHash(FirstFriendType) !=
- ComputeQualTypeODRHash(SecondFriendType));
- ODRDiagDeclError(FirstRecord, FirstModule,
- FirstFriend->getFriendLoc(),
- FirstFriend->getSourceRange(), FriendType)
- << FirstFriendType;
- ODRDiagDeclNote(SecondModule, SecondFriend->getFriendLoc(),
- SecondFriend->getSourceRange(), FriendType)
- << SecondFriendType;
- Diagnosed = true;
- break;
- }
-
- ODRDiagDeclError(FirstRecord, FirstModule, FirstFriend->getFriendLoc(),
- FirstFriend->getSourceRange(), FriendTypeFunction)
- << (FirstTSI == nullptr);
- ODRDiagDeclNote(SecondModule, SecondFriend->getFriendLoc(),
- SecondFriend->getSourceRange(), FriendTypeFunction)
- << (SecondTSI == nullptr);
-
+ if (DiagsEmitter.diagnoseMismatch(FirstRecord, RecordPair.first,
+ RecordPair.second)) {
Diagnosed = true;
break;
}
- case FunctionTemplate: {
- FunctionTemplateDecl *FirstTemplate =
- cast<FunctionTemplateDecl>(FirstDecl);
- FunctionTemplateDecl *SecondTemplate =
- cast<FunctionTemplateDecl>(SecondDecl);
-
- TemplateParameterList *FirstTPL =
- FirstTemplate->getTemplateParameters();
- TemplateParameterList *SecondTPL =
- SecondTemplate->getTemplateParameters();
-
- if (FirstTPL->size() != SecondTPL->size()) {
- ODRDiagDeclError(FirstRecord, FirstModule,
- FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
- FunctionTemplateDifferentNumberParameters)
- << FirstTemplate << FirstTPL->size();
- ODRDiagDeclNote(SecondModule, SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
- FunctionTemplateDifferentNumberParameters)
- << SecondTemplate << SecondTPL->size();
-
- Diagnosed = true;
- break;
- }
-
- bool ParameterMismatch = false;
- for (unsigned i = 0, e = FirstTPL->size(); i != e; ++i) {
- NamedDecl *FirstParam = FirstTPL->getParam(i);
- NamedDecl *SecondParam = SecondTPL->getParam(i);
-
- if (FirstParam->getKind() != SecondParam->getKind()) {
- enum {
- TemplateTypeParameter,
- NonTypeTemplateParameter,
- TemplateTemplateParameter,
- };
- auto GetParamType = [](NamedDecl *D) {
- switch (D->getKind()) {
- default:
- llvm_unreachable("Unexpected template parameter type");
- case Decl::TemplateTypeParm:
- return TemplateTypeParameter;
- case Decl::NonTypeTemplateParm:
- return NonTypeTemplateParameter;
- case Decl::TemplateTemplateParm:
- return TemplateTemplateParameter;
- }
- };
-
- ODRDiagDeclError(FirstRecord, FirstModule,
- FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
- FunctionTemplateParameterDifferentKind)
- << FirstTemplate << (i + 1) << GetParamType(FirstParam);
- ODRDiagDeclNote(SecondModule, SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
- FunctionTemplateParameterDifferentKind)
- << SecondTemplate << (i + 1) << GetParamType(SecondParam);
-
- ParameterMismatch = true;
- break;
- }
-
- if (FirstParam->getName() != SecondParam->getName()) {
- ODRDiagDeclError(
- FirstRecord, FirstModule, FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(), FunctionTemplateParameterName)
- << FirstTemplate << (i + 1) << (bool)FirstParam->getIdentifier()
- << FirstParam;
- ODRDiagDeclNote(SecondModule, SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
- FunctionTemplateParameterName)
- << SecondTemplate << (i + 1)
- << (bool)SecondParam->getIdentifier() << SecondParam;
- ParameterMismatch = true;
- break;
- }
-
- if (isa<TemplateTypeParmDecl>(FirstParam) &&
- isa<TemplateTypeParmDecl>(SecondParam)) {
- TemplateTypeParmDecl *FirstTTPD =
- cast<TemplateTypeParmDecl>(FirstParam);
- TemplateTypeParmDecl *SecondTTPD =
- cast<TemplateTypeParmDecl>(SecondParam);
- bool HasFirstDefaultArgument =
- FirstTTPD->hasDefaultArgument() &&
- !FirstTTPD->defaultArgumentWasInherited();
- bool HasSecondDefaultArgument =
- SecondTTPD->hasDefaultArgument() &&
- !SecondTTPD->defaultArgumentWasInherited();
- if (HasFirstDefaultArgument != HasSecondDefaultArgument) {
- ODRDiagDeclError(FirstRecord, FirstModule,
- FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
- FunctionTemplateParameterSingleDefaultArgument)
- << FirstTemplate << (i + 1) << HasFirstDefaultArgument;
- ODRDiagDeclNote(SecondModule, SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
- FunctionTemplateParameterSingleDefaultArgument)
- << SecondTemplate << (i + 1) << HasSecondDefaultArgument;
- ParameterMismatch = true;
- break;
- }
-
- if (HasFirstDefaultArgument && HasSecondDefaultArgument) {
- QualType FirstType = FirstTTPD->getDefaultArgument();
- QualType SecondType = SecondTTPD->getDefaultArgument();
- if (ComputeQualTypeODRHash(FirstType) !=
- ComputeQualTypeODRHash(SecondType)) {
- ODRDiagDeclError(
- FirstRecord, FirstModule, FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
- FunctionTemplateParameterDifferentDefaultArgument)
- << FirstTemplate << (i + 1) << FirstType;
- ODRDiagDeclNote(
- SecondModule, SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
- FunctionTemplateParameterDifferentDefaultArgument)
- << SecondTemplate << (i + 1) << SecondType;
- ParameterMismatch = true;
- break;
- }
- }
-
- if (FirstTTPD->isParameterPack() !=
- SecondTTPD->isParameterPack()) {
- ODRDiagDeclError(FirstRecord, FirstModule,
- FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
- FunctionTemplatePackParameter)
- << FirstTemplate << (i + 1) << FirstTTPD->isParameterPack();
- ODRDiagDeclNote(SecondModule, SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
- FunctionTemplatePackParameter)
- << SecondTemplate << (i + 1) << SecondTTPD->isParameterPack();
- ParameterMismatch = true;
- break;
- }
- }
-
- if (isa<TemplateTemplateParmDecl>(FirstParam) &&
- isa<TemplateTemplateParmDecl>(SecondParam)) {
- TemplateTemplateParmDecl *FirstTTPD =
- cast<TemplateTemplateParmDecl>(FirstParam);
- TemplateTemplateParmDecl *SecondTTPD =
- cast<TemplateTemplateParmDecl>(SecondParam);
-
- TemplateParameterList *FirstTPL =
- FirstTTPD->getTemplateParameters();
- TemplateParameterList *SecondTPL =
- SecondTTPD->getTemplateParameters();
-
- if (ComputeTemplateParameterListODRHash(FirstTPL) !=
- ComputeTemplateParameterListODRHash(SecondTPL)) {
- ODRDiagDeclError(FirstRecord, FirstModule,
- FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
- FunctionTemplateParameterDifferentType)
- << FirstTemplate << (i + 1);
- ODRDiagDeclNote(SecondModule, SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
- FunctionTemplateParameterDifferentType)
- << SecondTemplate << (i + 1);
- ParameterMismatch = true;
- break;
- }
-
- bool HasFirstDefaultArgument =
- FirstTTPD->hasDefaultArgument() &&
- !FirstTTPD->defaultArgumentWasInherited();
- bool HasSecondDefaultArgument =
- SecondTTPD->hasDefaultArgument() &&
- !SecondTTPD->defaultArgumentWasInherited();
- if (HasFirstDefaultArgument != HasSecondDefaultArgument) {
- ODRDiagDeclError(FirstRecord, FirstModule,
- FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
- FunctionTemplateParameterSingleDefaultArgument)
- << FirstTemplate << (i + 1) << HasFirstDefaultArgument;
- ODRDiagDeclNote(SecondModule, SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
- FunctionTemplateParameterSingleDefaultArgument)
- << SecondTemplate << (i + 1) << HasSecondDefaultArgument;
- ParameterMismatch = true;
- break;
- }
-
- if (HasFirstDefaultArgument && HasSecondDefaultArgument) {
- TemplateArgument FirstTA =
- FirstTTPD->getDefaultArgument().getArgument();
- TemplateArgument SecondTA =
- SecondTTPD->getDefaultArgument().getArgument();
- if (ComputeTemplateArgumentODRHash(FirstTA) !=
- ComputeTemplateArgumentODRHash(SecondTA)) {
- ODRDiagDeclError(
- FirstRecord, FirstModule, FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
- FunctionTemplateParameterDifferentDefaultArgument)
- << FirstTemplate << (i + 1) << FirstTA;
- ODRDiagDeclNote(
- SecondModule, SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
- FunctionTemplateParameterDifferentDefaultArgument)
- << SecondTemplate << (i + 1) << SecondTA;
- ParameterMismatch = true;
- break;
- }
- }
-
- if (FirstTTPD->isParameterPack() !=
- SecondTTPD->isParameterPack()) {
- ODRDiagDeclError(FirstRecord, FirstModule,
- FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
- FunctionTemplatePackParameter)
- << FirstTemplate << (i + 1) << FirstTTPD->isParameterPack();
- ODRDiagDeclNote(SecondModule, SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
- FunctionTemplatePackParameter)
- << SecondTemplate << (i + 1) << SecondTTPD->isParameterPack();
- ParameterMismatch = true;
- break;
- }
- }
-
- if (isa<NonTypeTemplateParmDecl>(FirstParam) &&
- isa<NonTypeTemplateParmDecl>(SecondParam)) {
- NonTypeTemplateParmDecl *FirstNTTPD =
- cast<NonTypeTemplateParmDecl>(FirstParam);
- NonTypeTemplateParmDecl *SecondNTTPD =
- cast<NonTypeTemplateParmDecl>(SecondParam);
-
- QualType FirstType = FirstNTTPD->getType();
- QualType SecondType = SecondNTTPD->getType();
- if (ComputeQualTypeODRHash(FirstType) !=
- ComputeQualTypeODRHash(SecondType)) {
- ODRDiagDeclError(FirstRecord, FirstModule,
- FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
- FunctionTemplateParameterDifferentType)
- << FirstTemplate << (i + 1);
- ODRDiagDeclNote(SecondModule, SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
- FunctionTemplateParameterDifferentType)
- << SecondTemplate << (i + 1);
- ParameterMismatch = true;
- break;
- }
-
- bool HasFirstDefaultArgument =
- FirstNTTPD->hasDefaultArgument() &&
- !FirstNTTPD->defaultArgumentWasInherited();
- bool HasSecondDefaultArgument =
- SecondNTTPD->hasDefaultArgument() &&
- !SecondNTTPD->defaultArgumentWasInherited();
- if (HasFirstDefaultArgument != HasSecondDefaultArgument) {
- ODRDiagDeclError(FirstRecord, FirstModule,
- FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
- FunctionTemplateParameterSingleDefaultArgument)
- << FirstTemplate << (i + 1) << HasFirstDefaultArgument;
- ODRDiagDeclNote(SecondModule, SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
- FunctionTemplateParameterSingleDefaultArgument)
- << SecondTemplate << (i + 1) << HasSecondDefaultArgument;
- ParameterMismatch = true;
- break;
- }
-
- if (HasFirstDefaultArgument && HasSecondDefaultArgument) {
- Expr *FirstDefaultArgument = FirstNTTPD->getDefaultArgument();
- Expr *SecondDefaultArgument = SecondNTTPD->getDefaultArgument();
- if (ComputeODRHash(FirstDefaultArgument) !=
- ComputeODRHash(SecondDefaultArgument)) {
- ODRDiagDeclError(
- FirstRecord, FirstModule, FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
- FunctionTemplateParameterDifferentDefaultArgument)
- << FirstTemplate << (i + 1) << FirstDefaultArgument;
- ODRDiagDeclNote(
- SecondModule, SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
- FunctionTemplateParameterDifferentDefaultArgument)
- << SecondTemplate << (i + 1) << SecondDefaultArgument;
- ParameterMismatch = true;
- break;
- }
- }
-
- if (FirstNTTPD->isParameterPack() !=
- SecondNTTPD->isParameterPack()) {
- ODRDiagDeclError(FirstRecord, FirstModule,
- FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
- FunctionTemplatePackParameter)
- << FirstTemplate << (i + 1) << FirstNTTPD->isParameterPack();
- ODRDiagDeclNote(SecondModule, SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
- FunctionTemplatePackParameter)
- << SecondTemplate << (i + 1)
- << SecondNTTPD->isParameterPack();
- ParameterMismatch = true;
- break;
- }
- }
- }
-
- if (ParameterMismatch) {
- Diagnosed = true;
- break;
- }
-
- break;
- }
- }
-
- if (Diagnosed)
- continue;
-
- Diag(FirstDecl->getLocation(),
- diag::err_module_odr_violation_mismatch_decl_unknown)
- << FirstRecord << FirstModule.empty() << FirstModule << FirstDiffType
- << FirstDecl->getSourceRange();
- Diag(SecondDecl->getLocation(),
- diag::note_module_odr_violation_mismatch_decl_unknown)
- << SecondModule << FirstDiffType << SecondDecl->getSourceRange();
- Diagnosed = true;
}
if (!Diagnosed) {
@@ -11212,160 +9978,39 @@ void ASTReader::diagnoseOdrViolations() {
// FIXME: How can this even happen?
Diag(Merge.first->getLocation(),
diag::err_module_odr_violation_different_instantiations)
- << Merge.first;
+ << Merge.first;
}
}
- // Issue ODR failures diagnostics for functions.
- for (auto &Merge : FunctionOdrMergeFailures) {
- enum ODRFunctionDifference {
- ReturnType,
- ParameterName,
- ParameterType,
- ParameterSingleDefaultArgument,
- ParameterDifferentDefaultArgument,
- FunctionBody,
- };
-
- FunctionDecl *FirstFunction = Merge.first;
- std::string FirstModule = getOwningModuleNameForDiagnostic(FirstFunction);
+ // Issue any pending ODR-failure diagnostics for RecordDecl in C/ObjC. Note
+ // that in C++ this is done as a part of CXXRecordDecl ODR checking.
+ for (auto &Merge : RecordOdrMergeFailures) {
+ // If we've already pointed out a specific problem with this class, don't
+ // bother issuing a general "something's different" diagnostic.
+ if (!DiagnosedOdrMergeFailures.insert(Merge.first).second)
+ continue;
+ RecordDecl *FirstRecord = Merge.first;
bool Diagnosed = false;
- for (auto &SecondFunction : Merge.second) {
-
- if (FirstFunction == SecondFunction)
- continue;
-
- std::string SecondModule =
- getOwningModuleNameForDiagnostic(SecondFunction);
-
- auto ODRDiagError = [FirstFunction, &FirstModule,
- this](SourceLocation Loc, SourceRange Range,
- ODRFunctionDifference DiffType) {
- return Diag(Loc, diag::err_module_odr_violation_function)
- << FirstFunction << FirstModule.empty() << FirstModule << Range
- << DiffType;
- };
- auto ODRDiagNote = [&SecondModule, this](SourceLocation Loc,
- SourceRange Range,
- ODRFunctionDifference DiffType) {
- return Diag(Loc, diag::note_module_odr_violation_function)
- << SecondModule << Range << DiffType;
- };
-
- if (ComputeQualTypeODRHash(FirstFunction->getReturnType()) !=
- ComputeQualTypeODRHash(SecondFunction->getReturnType())) {
- ODRDiagError(FirstFunction->getReturnTypeSourceRange().getBegin(),
- FirstFunction->getReturnTypeSourceRange(), ReturnType)
- << FirstFunction->getReturnType();
- ODRDiagNote(SecondFunction->getReturnTypeSourceRange().getBegin(),
- SecondFunction->getReturnTypeSourceRange(), ReturnType)
- << SecondFunction->getReturnType();
+ for (auto *SecondRecord : Merge.second) {
+ if (DiagsEmitter.diagnoseMismatch(FirstRecord, SecondRecord)) {
Diagnosed = true;
break;
}
+ }
+ (void)Diagnosed;
+ assert(Diagnosed && "Unable to emit ODR diagnostic.");
+ }
- assert(FirstFunction->param_size() == SecondFunction->param_size() &&
- "Merged functions with different number of parameters");
-
- auto ParamSize = FirstFunction->param_size();
- bool ParameterMismatch = false;
- for (unsigned I = 0; I < ParamSize; ++I) {
- auto *FirstParam = FirstFunction->getParamDecl(I);
- auto *SecondParam = SecondFunction->getParamDecl(I);
-
- assert(getContext().hasSameType(FirstParam->getType(),
- SecondParam->getType()) &&
- "Merged function has different parameter types.");
-
- if (FirstParam->getDeclName() != SecondParam->getDeclName()) {
- ODRDiagError(FirstParam->getLocation(), FirstParam->getSourceRange(),
- ParameterName)
- << I + 1 << FirstParam->getDeclName();
- ODRDiagNote(SecondParam->getLocation(), SecondParam->getSourceRange(),
- ParameterName)
- << I + 1 << SecondParam->getDeclName();
- ParameterMismatch = true;
- break;
- };
-
- QualType FirstParamType = FirstParam->getType();
- QualType SecondParamType = SecondParam->getType();
- if (FirstParamType != SecondParamType &&
- ComputeQualTypeODRHash(FirstParamType) !=
- ComputeQualTypeODRHash(SecondParamType)) {
- if (const DecayedType *ParamDecayedType =
- FirstParamType->getAs<DecayedType>()) {
- ODRDiagError(FirstParam->getLocation(),
- FirstParam->getSourceRange(), ParameterType)
- << (I + 1) << FirstParamType << true
- << ParamDecayedType->getOriginalType();
- } else {
- ODRDiagError(FirstParam->getLocation(),
- FirstParam->getSourceRange(), ParameterType)
- << (I + 1) << FirstParamType << false;
- }
-
- if (const DecayedType *ParamDecayedType =
- SecondParamType->getAs<DecayedType>()) {
- ODRDiagNote(SecondParam->getLocation(),
- SecondParam->getSourceRange(), ParameterType)
- << (I + 1) << SecondParamType << true
- << ParamDecayedType->getOriginalType();
- } else {
- ODRDiagNote(SecondParam->getLocation(),
- SecondParam->getSourceRange(), ParameterType)
- << (I + 1) << SecondParamType << false;
- }
- ParameterMismatch = true;
- break;
- }
-
- const Expr *FirstInit = FirstParam->getInit();
- const Expr *SecondInit = SecondParam->getInit();
- if ((FirstInit == nullptr) != (SecondInit == nullptr)) {
- ODRDiagError(FirstParam->getLocation(), FirstParam->getSourceRange(),
- ParameterSingleDefaultArgument)
- << (I + 1) << (FirstInit == nullptr)
- << (FirstInit ? FirstInit->getSourceRange() : SourceRange());
- ODRDiagNote(SecondParam->getLocation(), SecondParam->getSourceRange(),
- ParameterSingleDefaultArgument)
- << (I + 1) << (SecondInit == nullptr)
- << (SecondInit ? SecondInit->getSourceRange() : SourceRange());
- ParameterMismatch = true;
- break;
- }
-
- if (FirstInit && SecondInit &&
- ComputeODRHash(FirstInit) != ComputeODRHash(SecondInit)) {
- ODRDiagError(FirstParam->getLocation(), FirstParam->getSourceRange(),
- ParameterDifferentDefaultArgument)
- << (I + 1) << FirstInit->getSourceRange();
- ODRDiagNote(SecondParam->getLocation(), SecondParam->getSourceRange(),
- ParameterDifferentDefaultArgument)
- << (I + 1) << SecondInit->getSourceRange();
- ParameterMismatch = true;
- break;
- }
-
- assert(ComputeSubDeclODRHash(FirstParam) ==
- ComputeSubDeclODRHash(SecondParam) &&
- "Undiagnosed parameter difference.");
- }
-
- if (ParameterMismatch) {
+ // Issue ODR failures diagnostics for functions.
+ for (auto &Merge : FunctionOdrMergeFailures) {
+ FunctionDecl *FirstFunction = Merge.first;
+ bool Diagnosed = false;
+ for (auto &SecondFunction : Merge.second) {
+ if (DiagsEmitter.diagnoseMismatch(FirstFunction, SecondFunction)) {
Diagnosed = true;
break;
}
-
- // If no error has been generated before now, assume the problem is in
- // the body and generate a message.
- ODRDiagError(FirstFunction->getLocation(),
- FirstFunction->getSourceRange(), FunctionBody);
- ODRDiagNote(SecondFunction->getLocation(),
- SecondFunction->getSourceRange(), FunctionBody);
- Diagnosed = true;
- break;
}
(void)Diagnosed;
assert(Diagnosed && "Unable to emit ODR diagnostic.");
@@ -11373,188 +10018,57 @@ void ASTReader::diagnoseOdrViolations() {
// Issue ODR failures diagnostics for enums.
for (auto &Merge : EnumOdrMergeFailures) {
- enum ODREnumDifference {
- SingleScopedEnum,
- EnumTagKeywordMismatch,
- SingleSpecifiedType,
- DifferentSpecifiedTypes,
- DifferentNumberEnumConstants,
- EnumConstantName,
- EnumConstantSingleInitilizer,
- EnumConstantDifferentInitilizer,
- };
-
// If we've already pointed out a specific problem with this enum, don't
// bother issuing a general "something's different" diagnostic.
if (!DiagnosedOdrMergeFailures.insert(Merge.first).second)
continue;
EnumDecl *FirstEnum = Merge.first;
- std::string FirstModule = getOwningModuleNameForDiagnostic(FirstEnum);
-
- using DeclHashes =
- llvm::SmallVector<std::pair<EnumConstantDecl *, unsigned>, 4>;
- auto PopulateHashes = [&ComputeSubDeclODRHash, FirstEnum](
- DeclHashes &Hashes, EnumDecl *Enum) {
- for (auto *D : Enum->decls()) {
- // Due to decl merging, the first EnumDecl is the parent of
- // Decls in both records.
- if (!ODRHash::isDeclToBeProcessed(D, FirstEnum))
- continue;
- assert(isa<EnumConstantDecl>(D) && "Unexpected Decl kind");
- Hashes.emplace_back(cast<EnumConstantDecl>(D),
- ComputeSubDeclODRHash(D));
- }
- };
- DeclHashes FirstHashes;
- PopulateHashes(FirstHashes, FirstEnum);
bool Diagnosed = false;
for (auto &SecondEnum : Merge.second) {
-
- if (FirstEnum == SecondEnum)
- continue;
-
- std::string SecondModule =
- getOwningModuleNameForDiagnostic(SecondEnum);
-
- auto ODRDiagError = [FirstEnum, &FirstModule,
- this](SourceLocation Loc, SourceRange Range,
- ODREnumDifference DiffType) {
- return Diag(Loc, diag::err_module_odr_violation_enum)
- << FirstEnum << FirstModule.empty() << FirstModule << Range
- << DiffType;
- };
- auto ODRDiagNote = [&SecondModule, this](SourceLocation Loc,
- SourceRange Range,
- ODREnumDifference DiffType) {
- return Diag(Loc, diag::note_module_odr_violation_enum)
- << SecondModule << Range << DiffType;
- };
-
- if (FirstEnum->isScoped() != SecondEnum->isScoped()) {
- ODRDiagError(FirstEnum->getLocation(), FirstEnum->getSourceRange(),
- SingleScopedEnum)
- << FirstEnum->isScoped();
- ODRDiagNote(SecondEnum->getLocation(), SecondEnum->getSourceRange(),
- SingleScopedEnum)
- << SecondEnum->isScoped();
+ if (DiagsEmitter.diagnoseMismatch(FirstEnum, SecondEnum)) {
Diagnosed = true;
- continue;
- }
-
- if (FirstEnum->isScoped() && SecondEnum->isScoped()) {
- if (FirstEnum->isScopedUsingClassTag() !=
- SecondEnum->isScopedUsingClassTag()) {
- ODRDiagError(FirstEnum->getLocation(), FirstEnum->getSourceRange(),
- EnumTagKeywordMismatch)
- << FirstEnum->isScopedUsingClassTag();
- ODRDiagNote(SecondEnum->getLocation(), SecondEnum->getSourceRange(),
- EnumTagKeywordMismatch)
- << SecondEnum->isScopedUsingClassTag();
- Diagnosed = true;
- continue;
- }
- }
-
- QualType FirstUnderlyingType =
- FirstEnum->getIntegerTypeSourceInfo()
- ? FirstEnum->getIntegerTypeSourceInfo()->getType()
- : QualType();
- QualType SecondUnderlyingType =
- SecondEnum->getIntegerTypeSourceInfo()
- ? SecondEnum->getIntegerTypeSourceInfo()->getType()
- : QualType();
- if (FirstUnderlyingType.isNull() != SecondUnderlyingType.isNull()) {
- ODRDiagError(FirstEnum->getLocation(), FirstEnum->getSourceRange(),
- SingleSpecifiedType)
- << !FirstUnderlyingType.isNull();
- ODRDiagNote(SecondEnum->getLocation(), SecondEnum->getSourceRange(),
- SingleSpecifiedType)
- << !SecondUnderlyingType.isNull();
- Diagnosed = true;
- continue;
- }
-
- if (!FirstUnderlyingType.isNull() && !SecondUnderlyingType.isNull()) {
- if (ComputeQualTypeODRHash(FirstUnderlyingType) !=
- ComputeQualTypeODRHash(SecondUnderlyingType)) {
- ODRDiagError(FirstEnum->getLocation(), FirstEnum->getSourceRange(),
- DifferentSpecifiedTypes)
- << FirstUnderlyingType;
- ODRDiagNote(SecondEnum->getLocation(), SecondEnum->getSourceRange(),
- DifferentSpecifiedTypes)
- << SecondUnderlyingType;
- Diagnosed = true;
- continue;
- }
+ break;
}
+ }
+ (void)Diagnosed;
+ assert(Diagnosed && "Unable to emit ODR diagnostic.");
+ }
- DeclHashes SecondHashes;
- PopulateHashes(SecondHashes, SecondEnum);
+ for (auto &Merge : ObjCInterfaceOdrMergeFailures) {
+ // If we've already pointed out a specific problem with this interface,
+ // don't bother issuing a general "something's different" diagnostic.
+ if (!DiagnosedOdrMergeFailures.insert(Merge.first).second)
+ continue;
- if (FirstHashes.size() != SecondHashes.size()) {
- ODRDiagError(FirstEnum->getLocation(), FirstEnum->getSourceRange(),
- DifferentNumberEnumConstants)
- << (int)FirstHashes.size();
- ODRDiagNote(SecondEnum->getLocation(), SecondEnum->getSourceRange(),
- DifferentNumberEnumConstants)
- << (int)SecondHashes.size();
+ bool Diagnosed = false;
+ ObjCInterfaceDecl *FirstID = Merge.first;
+ for (auto &InterfacePair : Merge.second) {
+ if (DiagsEmitter.diagnoseMismatch(FirstID, InterfacePair.first,
+ InterfacePair.second)) {
Diagnosed = true;
- continue;
+ break;
}
+ }
+ (void)Diagnosed;
+ assert(Diagnosed && "Unable to emit ODR diagnostic.");
+ }
- for (unsigned I = 0; I < FirstHashes.size(); ++I) {
- if (FirstHashes[I].second == SecondHashes[I].second)
- continue;
- const EnumConstantDecl *FirstEnumConstant = FirstHashes[I].first;
- const EnumConstantDecl *SecondEnumConstant = SecondHashes[I].first;
-
- if (FirstEnumConstant->getDeclName() !=
- SecondEnumConstant->getDeclName()) {
-
- ODRDiagError(FirstEnumConstant->getLocation(),
- FirstEnumConstant->getSourceRange(), EnumConstantName)
- << I + 1 << FirstEnumConstant;
- ODRDiagNote(SecondEnumConstant->getLocation(),
- SecondEnumConstant->getSourceRange(), EnumConstantName)
- << I + 1 << SecondEnumConstant;
- Diagnosed = true;
- break;
- }
-
- const Expr *FirstInit = FirstEnumConstant->getInitExpr();
- const Expr *SecondInit = SecondEnumConstant->getInitExpr();
- if (!FirstInit && !SecondInit)
- continue;
-
- if (!FirstInit || !SecondInit) {
- ODRDiagError(FirstEnumConstant->getLocation(),
- FirstEnumConstant->getSourceRange(),
- EnumConstantSingleInitilizer)
- << I + 1 << FirstEnumConstant << (FirstInit != nullptr);
- ODRDiagNote(SecondEnumConstant->getLocation(),
- SecondEnumConstant->getSourceRange(),
- EnumConstantSingleInitilizer)
- << I + 1 << SecondEnumConstant << (SecondInit != nullptr);
- Diagnosed = true;
- break;
- }
+ for (auto &Merge : ObjCProtocolOdrMergeFailures) {
+ // If we've already pointed out a specific problem with this protocol,
+ // don't bother issuing a general "something's different" diagnostic.
+ if (!DiagnosedOdrMergeFailures.insert(Merge.first).second)
+ continue;
- if (ComputeODRHash(FirstInit) != ComputeODRHash(SecondInit)) {
- ODRDiagError(FirstEnumConstant->getLocation(),
- FirstEnumConstant->getSourceRange(),
- EnumConstantDifferentInitilizer)
- << I + 1 << FirstEnumConstant;
- ODRDiagNote(SecondEnumConstant->getLocation(),
- SecondEnumConstant->getSourceRange(),
- EnumConstantDifferentInitilizer)
- << I + 1 << SecondEnumConstant;
- Diagnosed = true;
- break;
- }
+ ObjCProtocolDecl *FirstProtocol = Merge.first;
+ bool Diagnosed = false;
+ for (auto &ProtocolPair : Merge.second) {
+ if (DiagsEmitter.diagnoseMismatch(FirstProtocol, ProtocolPair.first,
+ ProtocolPair.second)) {
+ Diagnosed = true;
+ break;
}
}
-
(void)Diagnosed;
assert(Diagnosed && "Unable to emit ODR diagnostic.");
}
@@ -11603,6 +10117,13 @@ void ASTReader::FinishedDeserializing() {
getContext().adjustDeducedFunctionResultType(Update.first,
Update.second);
}
+
+ auto UDTUpdates = std::move(PendingUndeducedFunctionDecls);
+ PendingUndeducedFunctionDecls.clear();
+ // We hope we can find the deduced type for the functions by iterating
+ // redeclarations in other modules.
+ for (FunctionDecl *UndeducedFD : UDTUpdates)
+ (void)UndeducedFD->getMostRecentDecl();
}
if (ReadTimer)
@@ -11637,8 +10158,7 @@ void ASTReader::pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name) {
// Adding the decl to IdResolver may have failed because it was already in
// (even though it was not added in scope). If it is already in, make sure
// it gets in the scope as well.
- if (std::find(SemaObj->IdResolver.begin(Name),
- SemaObj->IdResolver.end(), D) != SemaObj->IdResolver.end())
+ if (llvm::is_contained(SemaObj->IdResolver.decls(Name), D))
SemaObj->TUScope->AddDecl(D);
}
}
@@ -11794,6 +10314,12 @@ OMPClause *OMPClauseReader::readClause() {
case llvm::omp::OMPC_capture:
C = new (Context) OMPCaptureClause();
break;
+ case llvm::omp::OMPC_compare:
+ C = new (Context) OMPCompareClause();
+ break;
+ case llvm::omp::OMPC_fail:
+ C = new (Context) OMPFailClause();
+ break;
case llvm::omp::OMPC_seq_cst:
C = new (Context) OMPSeqCstClause();
break;
@@ -11833,7 +10359,16 @@ OMPClause *OMPClauseReader::readClause() {
case llvm::omp::OMPC_atomic_default_mem_order:
C = new (Context) OMPAtomicDefaultMemOrderClause();
break;
- case llvm::omp::OMPC_private:
+ case llvm::omp::OMPC_at:
+ C = new (Context) OMPAtClause();
+ break;
+ case llvm::omp::OMPC_severity:
+ C = new (Context) OMPSeverityClause();
+ break;
+ case llvm::omp::OMPC_message:
+ C = new (Context) OMPMessageClause();
+ break;
+ case llvm::omp::OMPC_private:
C = OMPPrivateClause::CreateEmpty(Context, Record.readInt());
break;
case llvm::omp::OMPC_firstprivate:
@@ -11962,6 +10497,15 @@ OMPClause *OMPClauseReader::readClause() {
C = OMPIsDevicePtrClause::CreateEmpty(Context, Sizes);
break;
}
+ case llvm::omp::OMPC_has_device_addr: {
+ OMPMappableExprListSizeTy Sizes;
+ Sizes.NumVars = Record.readInt();
+ Sizes.NumUniqueDeclarations = Record.readInt();
+ Sizes.NumComponentLists = Record.readInt();
+ Sizes.NumComponents = Record.readInt();
+ C = OMPHasDeviceAddrClause::CreateEmpty(Context, Sizes);
+ break;
+ }
case llvm::omp::OMPC_allocate:
C = OMPAllocateClause::CreateEmpty(Context, Record.readInt());
break;
@@ -12004,6 +10548,27 @@ OMPClause *OMPClauseReader::readClause() {
case llvm::omp::OMPC_filter:
C = new (Context) OMPFilterClause();
break;
+ case llvm::omp::OMPC_bind:
+ C = OMPBindClause::CreateEmpty(Context);
+ break;
+ case llvm::omp::OMPC_align:
+ C = new (Context) OMPAlignClause();
+ break;
+ case llvm::omp::OMPC_ompx_dyn_cgroup_mem:
+ C = new (Context) OMPXDynCGroupMemClause();
+ break;
+ case llvm::omp::OMPC_doacross: {
+ unsigned NumVars = Record.readInt();
+ unsigned NumLoops = Record.readInt();
+ C = OMPDoacrossClause::CreateEmpty(Context, NumVars, NumLoops);
+ break;
+ }
+ case llvm::omp::OMPC_ompx_attribute:
+ C = new (Context) OMPXAttributeClause();
+ break;
+ case llvm::omp::OMPC_ompx_bare:
+ C = new (Context) OMPXBareClause();
+ break;
#define OMP_CLAUSE_NO_CLASS(Enum, Str) \
case llvm::omp::Enum: \
break;
@@ -12146,6 +10711,18 @@ void OMPClauseReader::VisitOMPUpdateClause(OMPUpdateClause *C) {
void OMPClauseReader::VisitOMPCaptureClause(OMPCaptureClause *) {}
+void OMPClauseReader::VisitOMPCompareClause(OMPCompareClause *) {}
+
+// Read the parameter of fail clause. This will have been saved when
+// OMPClauseWriter is called.
+void OMPClauseReader::VisitOMPFailClause(OMPFailClause *C) {
+ C->setLParenLoc(Record.readSourceLocation());
+ SourceLocation FailParameterLoc = Record.readSourceLocation();
+ C->setFailParameterLoc(FailParameterLoc);
+ OpenMPClauseKind CKind = Record.readEnum<OpenMPClauseKind>();
+ C->setFailParameter(CKind);
+}
+
void OMPClauseReader::VisitOMPSeqCstClause(OMPSeqCstClause *) {}
void OMPClauseReader::VisitOMPAcqRelClause(OMPAcqRelClause *) {}
@@ -12218,6 +10795,23 @@ void OMPClauseReader::VisitOMPAtomicDefaultMemOrderClause(
C->setAtomicDefaultMemOrderKindKwLoc(Record.readSourceLocation());
}
+void OMPClauseReader::VisitOMPAtClause(OMPAtClause *C) {
+ C->setAtKind(static_cast<OpenMPAtClauseKind>(Record.readInt()));
+ C->setLParenLoc(Record.readSourceLocation());
+ C->setAtKindKwLoc(Record.readSourceLocation());
+}
+
+void OMPClauseReader::VisitOMPSeverityClause(OMPSeverityClause *C) {
+ C->setSeverityKind(static_cast<OpenMPSeverityClauseKind>(Record.readInt()));
+ C->setLParenLoc(Record.readSourceLocation());
+ C->setSeverityKindKwLoc(Record.readSourceLocation());
+}
+
+void OMPClauseReader::VisitOMPMessageClause(OMPMessageClause *C) {
+ C->setMessageString(Record.readSubExpr());
+ C->setLParenLoc(Record.readSourceLocation());
+}
+
void OMPClauseReader::VisitOMPPrivateClause(OMPPrivateClause *C) {
C->setLParenLoc(Record.readSourceLocation());
unsigned NumVars = C->varlist_size();
@@ -12523,6 +11117,7 @@ void OMPClauseReader::VisitOMPDependClause(OMPDependClause *C) {
static_cast<OpenMPDependClauseKind>(Record.readInt()));
C->setDependencyLoc(Record.readSourceLocation());
C->setColonLoc(Record.readSourceLocation());
+ C->setOmpAllMemoryLoc(Record.readSourceLocation());
unsigned NumVars = C->varlist_size();
SmallVector<Expr *, 16> Vars;
Vars.reserve(NumVars);
@@ -12543,10 +11138,13 @@ void OMPClauseReader::VisitOMPDeviceClause(OMPDeviceClause *C) {
void OMPClauseReader::VisitOMPMapClause(OMPMapClause *C) {
C->setLParenLoc(Record.readSourceLocation());
+ bool HasIteratorModifier = false;
for (unsigned I = 0; I < NumberOfOMPMapClauseModifiers; ++I) {
C->setMapTypeModifier(
I, static_cast<OpenMPMapModifierKind>(Record.readInt()));
C->setMapTypeModifierLoc(I, Record.readSourceLocation());
+ if (C->getMapTypeModifier(I) == OMPC_MAP_MODIFIER_iterator)
+ HasIteratorModifier = true;
}
C->setMapperQualifierLoc(Record.readNestedNameSpecifierLoc());
C->setMapperIdInfo(Record.readDeclarationNameInfo());
@@ -12571,6 +11169,9 @@ void OMPClauseReader::VisitOMPMapClause(OMPMapClause *C) {
UDMappers.push_back(Record.readExpr());
C->setUDMapperRefs(UDMappers);
+ if (HasIteratorModifier)
+ C->setIteratorModifier(Record.readExpr());
+
SmallVector<ValueDecl *, 16> Decls;
Decls.reserve(UniqueDecls);
for (unsigned i = 0; i < UniqueDecls; ++i)
@@ -12632,13 +11233,17 @@ void OMPClauseReader::VisitOMPPriorityClause(OMPPriorityClause *C) {
void OMPClauseReader::VisitOMPGrainsizeClause(OMPGrainsizeClause *C) {
VisitOMPClauseWithPreInit(C);
+ C->setModifier(Record.readEnum<OpenMPGrainsizeClauseModifier>());
C->setGrainsize(Record.readSubExpr());
+ C->setModifierLoc(Record.readSourceLocation());
C->setLParenLoc(Record.readSourceLocation());
}
void OMPClauseReader::VisitOMPNumTasksClause(OMPNumTasksClause *C) {
VisitOMPClauseWithPreInit(C);
+ C->setModifier(Record.readEnum<OpenMPNumTasksClauseModifier>());
C->setNumTasks(Record.readSubExpr());
+ C->setModifierLoc(Record.readSourceLocation());
C->setLParenLoc(Record.readSourceLocation());
}
@@ -12914,6 +11519,49 @@ void OMPClauseReader::VisitOMPIsDevicePtrClause(OMPIsDevicePtrClause *C) {
C->setComponents(Components, ListSizes);
}
+void OMPClauseReader::VisitOMPHasDeviceAddrClause(OMPHasDeviceAddrClause *C) {
+ C->setLParenLoc(Record.readSourceLocation());
+ auto NumVars = C->varlist_size();
+ auto UniqueDecls = C->getUniqueDeclarationsNum();
+ auto TotalLists = C->getTotalComponentListNum();
+ auto TotalComponents = C->getTotalComponentsNum();
+
+ SmallVector<Expr *, 16> Vars;
+ Vars.reserve(NumVars);
+ for (unsigned I = 0; I != NumVars; ++I)
+ Vars.push_back(Record.readSubExpr());
+ C->setVarRefs(Vars);
+ Vars.clear();
+
+ SmallVector<ValueDecl *, 16> Decls;
+ Decls.reserve(UniqueDecls);
+ for (unsigned I = 0; I < UniqueDecls; ++I)
+ Decls.push_back(Record.readDeclAs<ValueDecl>());
+ C->setUniqueDecls(Decls);
+
+ SmallVector<unsigned, 16> ListsPerDecl;
+ ListsPerDecl.reserve(UniqueDecls);
+ for (unsigned I = 0; I < UniqueDecls; ++I)
+ ListsPerDecl.push_back(Record.readInt());
+ C->setDeclNumLists(ListsPerDecl);
+
+ SmallVector<unsigned, 32> ListSizes;
+ ListSizes.reserve(TotalLists);
+ for (unsigned i = 0; i < TotalLists; ++i)
+ ListSizes.push_back(Record.readInt());
+ C->setComponentListSizes(ListSizes);
+
+ SmallVector<OMPClauseMappableExprCommon::MappableComponent, 32> Components;
+ Components.reserve(TotalComponents);
+ for (unsigned I = 0; I < TotalComponents; ++I) {
+ Expr *AssociatedExpr = Record.readSubExpr();
+ auto *AssociatedDecl = Record.readDeclAs<ValueDecl>();
+ Components.emplace_back(AssociatedExpr, AssociatedDecl,
+ /*IsNonContiguous=*/false);
+ }
+ C->setComponents(Components, ListSizes);
+}
+
void OMPClauseReader::VisitOMPNontemporalClause(OMPNontemporalClause *C) {
C->setLParenLoc(Record.readSourceLocation());
unsigned NumVars = C->varlist_size();
@@ -12978,8 +11626,10 @@ void OMPClauseReader::VisitOMPAffinityClause(OMPAffinityClause *C) {
void OMPClauseReader::VisitOMPOrderClause(OMPOrderClause *C) {
C->setKind(Record.readEnum<OpenMPOrderClauseKind>());
+ C->setModifier(Record.readEnum<OpenMPOrderClauseModifier>());
C->setLParenLoc(Record.readSourceLocation());
C->setKindKwLoc(Record.readSourceLocation());
+ C->setModifierKwLoc(Record.readSourceLocation());
}
void OMPClauseReader::VisitOMPFilterClause(OMPFilterClause *C) {
@@ -12988,6 +11638,50 @@ void OMPClauseReader::VisitOMPFilterClause(OMPFilterClause *C) {
C->setLParenLoc(Record.readSourceLocation());
}
+void OMPClauseReader::VisitOMPBindClause(OMPBindClause *C) {
+ C->setBindKind(Record.readEnum<OpenMPBindClauseKind>());
+ C->setLParenLoc(Record.readSourceLocation());
+ C->setBindKindLoc(Record.readSourceLocation());
+}
+
+void OMPClauseReader::VisitOMPAlignClause(OMPAlignClause *C) {
+ C->setAlignment(Record.readExpr());
+ C->setLParenLoc(Record.readSourceLocation());
+}
+
+void OMPClauseReader::VisitOMPXDynCGroupMemClause(OMPXDynCGroupMemClause *C) {
+ VisitOMPClauseWithPreInit(C);
+ C->setSize(Record.readSubExpr());
+ C->setLParenLoc(Record.readSourceLocation());
+}
+
+void OMPClauseReader::VisitOMPDoacrossClause(OMPDoacrossClause *C) {
+ C->setLParenLoc(Record.readSourceLocation());
+ C->setDependenceType(
+ static_cast<OpenMPDoacrossClauseModifier>(Record.readInt()));
+ C->setDependenceLoc(Record.readSourceLocation());
+ C->setColonLoc(Record.readSourceLocation());
+ unsigned NumVars = C->varlist_size();
+ SmallVector<Expr *, 16> Vars;
+ Vars.reserve(NumVars);
+ for (unsigned I = 0; I != NumVars; ++I)
+ Vars.push_back(Record.readSubExpr());
+ C->setVarRefs(Vars);
+ for (unsigned I = 0, E = C->getNumLoops(); I < E; ++I)
+ C->setLoopData(I, Record.readSubExpr());
+}
+
+void OMPClauseReader::VisitOMPXAttributeClause(OMPXAttributeClause *C) {
+ AttrVec Attrs;
+ Record.readAttributes(Attrs);
+ C->setAttrs(Attrs);
+ C->setLocStart(Record.readSourceLocation());
+ C->setLParenLoc(Record.readSourceLocation());
+ C->setLocEnd(Record.readSourceLocation());
+}
+
+void OMPClauseReader::VisitOMPXBareClause(OMPXBareClause *C) {}
+
OMPTraitInfo *ASTRecordReader::readOMPTraitInfo() {
OMPTraitInfo &TI = getContext().getNewOMPTraitInfo();
TI.Sets.resize(readUInt32());
diff --git a/contrib/llvm-project/clang/lib/Serialization/ASTReaderDecl.cpp b/contrib/llvm-project/clang/lib/Serialization/ASTReaderDecl.cpp
index ff79f91e5db1..110f55f8c0f4 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ASTReaderDecl.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/ASTReaderDecl.cpp
@@ -13,7 +13,9 @@
#include "ASTCommon.h"
#include "ASTReaderInternals.h"
+#include "clang/AST/ASTConcept.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTStructuralEquivalence.h"
#include "clang/AST/Attr.h"
#include "clang/AST/AttrIterator.h"
#include "clang/AST/Decl.h"
@@ -36,6 +38,7 @@
#include "clang/AST/Type.h"
#include "clang/AST/UnresolvedSet.h"
#include "clang/Basic/AttrKinds.h"
+#include "clang/Basic/DiagnosticSema.h"
#include "clang/Basic/ExceptionSpecificationType.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
@@ -87,7 +90,7 @@ namespace clang {
using RecordData = ASTReader::RecordData;
TypeID DeferredTypeID = 0;
- unsigned AnonymousDeclNumber;
+ unsigned AnonymousDeclNumber = 0;
GlobalDeclID NamedDeclForTagDecl = 0;
IdentifierInfo *TypedefNameForLinkage = nullptr;
@@ -156,9 +159,12 @@ namespace clang {
return Record.getSubmodule(readSubmoduleID());
}
- void ReadCXXRecordDefinition(CXXRecordDecl *D, bool Update);
+ void ReadCXXRecordDefinition(CXXRecordDecl *D, bool Update,
+ Decl *LambdaContext = nullptr,
+ unsigned IndexInLambdaContext = 0);
void ReadCXXDefinitionData(struct CXXRecordDecl::DefinitionData &Data,
- const CXXRecordDecl *D);
+ const CXXRecordDecl *D, Decl *LambdaContext,
+ unsigned IndexInLambdaContext);
void MergeDefinitionData(CXXRecordDecl *D,
struct CXXRecordDecl::DefinitionData &&NewDD);
void ReadObjCDefinitionData(struct ObjCInterfaceDecl::DefinitionData &Data);
@@ -176,6 +182,13 @@ namespace clang {
static void setAnonymousDeclForMerging(ASTReader &Reader, DeclContext *DC,
unsigned Index, NamedDecl *D);
+ /// Commit to a primary definition of the class RD, which is known to be
+ /// a definition of the class. We might not have read the definition data
+ /// for it yet. If we haven't then allocate placeholder definition data
+ /// now too.
+ static CXXRecordDecl *getOrFakePrimaryClassDefinition(ASTReader &Reader,
+ CXXRecordDecl *RD);
+
/// Results from loading a RedeclarableDecl.
class RedeclarableResult {
Decl *MergeWith;
@@ -321,6 +334,7 @@ namespace clang {
void VisitNamedDecl(NamedDecl *ND);
void VisitLabelDecl(LabelDecl *LD);
void VisitNamespaceDecl(NamespaceDecl *D);
+ void VisitHLSLBufferDecl(HLSLBufferDecl *D);
void VisitUsingDirectiveDecl(UsingDirectiveDecl *D);
void VisitNamespaceAliasDecl(NamespaceAliasDecl *D);
void VisitTypeDecl(TypeDecl *TD);
@@ -332,7 +346,7 @@ namespace clang {
RedeclarableResult VisitTagDecl(TagDecl *TD);
void VisitEnumDecl(EnumDecl *ED);
RedeclarableResult VisitRecordDeclImpl(RecordDecl *RD);
- void VisitRecordDecl(RecordDecl *RD) { VisitRecordDeclImpl(RD); }
+ void VisitRecordDecl(RecordDecl *RD);
RedeclarableResult VisitCXXRecordDeclImpl(CXXRecordDecl *D);
void VisitCXXRecordDecl(CXXRecordDecl *D) { VisitCXXRecordDeclImpl(D); }
RedeclarableResult VisitClassTemplateSpecializationDeclImpl(
@@ -344,9 +358,7 @@ namespace clang {
}
void VisitClassTemplatePartialSpecializationDecl(
- ClassTemplatePartialSpecializationDecl *D);
- void VisitClassScopeFunctionSpecializationDecl(
- ClassScopeFunctionSpecializationDecl *D);
+ ClassTemplatePartialSpecializationDecl *D);
RedeclarableResult
VisitVarTemplateSpecializationDeclImpl(VarTemplateSpecializationDecl *D);
@@ -370,17 +382,21 @@ namespace clang {
void VisitFieldDecl(FieldDecl *FD);
void VisitMSPropertyDecl(MSPropertyDecl *FD);
void VisitMSGuidDecl(MSGuidDecl *D);
+ void VisitUnnamedGlobalConstantDecl(UnnamedGlobalConstantDecl *D);
void VisitTemplateParamObjectDecl(TemplateParamObjectDecl *D);
void VisitIndirectFieldDecl(IndirectFieldDecl *FD);
RedeclarableResult VisitVarDeclImpl(VarDecl *D);
+ void ReadVarDeclInit(VarDecl *VD);
void VisitVarDecl(VarDecl *VD) { VisitVarDeclImpl(VD); }
void VisitImplicitParamDecl(ImplicitParamDecl *PD);
void VisitParmVarDecl(ParmVarDecl *PD);
void VisitDecompositionDecl(DecompositionDecl *DD);
void VisitBindingDecl(BindingDecl *BD);
void VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D);
- DeclID VisitTemplateDecl(TemplateDecl *D);
+ void VisitTemplateDecl(TemplateDecl *D);
void VisitConceptDecl(ConceptDecl *D);
+ void VisitImplicitConceptSpecializationDecl(
+ ImplicitConceptSpecializationDecl *D);
void VisitRequiresExprBodyDecl(RequiresExprBodyDecl *D);
RedeclarableResult VisitRedeclarableTemplateDecl(RedeclarableTemplateDecl *D);
void VisitClassTemplateDecl(ClassTemplateDecl *D);
@@ -397,6 +413,7 @@ namespace clang {
void VisitLinkageSpecDecl(LinkageSpecDecl *D);
void VisitExportDecl(ExportDecl *D);
void VisitFileScopeAsmDecl(FileScopeAsmDecl *AD);
+ void VisitTopLevelStmtDecl(TopLevelStmtDecl *D);
void VisitImportDecl(ImportDecl *D);
void VisitAccessSpecDecl(AccessSpecDecl *D);
void VisitFriendDecl(FriendDecl *D);
@@ -412,14 +429,18 @@ namespace clang {
template<typename T>
RedeclarableResult VisitRedeclarable(Redeclarable<T> *D);
- template<typename T>
- void mergeRedeclarable(Redeclarable<T> *D, RedeclarableResult &Redecl,
- DeclID TemplatePatternID = 0);
+ template <typename T>
+ void mergeRedeclarable(Redeclarable<T> *D, RedeclarableResult &Redecl);
- template<typename T>
+ void mergeLambda(CXXRecordDecl *D, RedeclarableResult &Redecl,
+ Decl *Context, unsigned Number);
+
+ void mergeRedeclarableTemplate(RedeclarableTemplateDecl *D,
+ RedeclarableResult &Redecl);
+
+ template <typename T>
void mergeRedeclarable(Redeclarable<T> *D, T *Existing,
- RedeclarableResult &Redecl,
- DeclID TemplatePatternID = 0);
+ RedeclarableResult &Redecl);
template<typename T>
void mergeMergeable(Mergeable<T> *D);
@@ -428,7 +449,7 @@ namespace clang {
void mergeTemplatePattern(RedeclarableTemplateDecl *D,
RedeclarableTemplateDecl *Existing,
- DeclID DsID, bool IsKeyDecl);
+ bool IsKeyDecl);
ObjCTypeParamList *ReadObjCTypeParamList();
@@ -461,9 +482,8 @@ namespace {
/// Iterator over the redeclarations of a declaration that have already
/// been merged into the same redeclaration chain.
-template<typename DeclT>
-class MergedRedeclIterator {
- DeclT *Start;
+template <typename DeclT> class MergedRedeclIterator {
+ DeclT *Start = nullptr;
DeclT *Canonical = nullptr;
DeclT *Current = nullptr;
@@ -551,17 +571,34 @@ void ASTDeclReader::Visit(Decl *D) {
ID->TypeForDecl = Reader.GetType(DeferredTypeID).getTypePtrOrNull();
} else if (auto *FD = dyn_cast<FunctionDecl>(D)) {
// FunctionDecl's body was written last after all other Stmts/Exprs.
- // We only read it if FD doesn't already have a body (e.g., from another
- // module).
- // FIXME: Can we diagnose ODR violations somehow?
if (Record.readInt())
ReadFunctionDefinition(FD);
+ } else if (auto *VD = dyn_cast<VarDecl>(D)) {
+ ReadVarDeclInit(VD);
+ } else if (auto *FD = dyn_cast<FieldDecl>(D)) {
+ if (FD->hasInClassInitializer() && Record.readInt()) {
+ FD->setLazyInClassInitializer(LazyDeclStmtPtr(GetCurrentCursorOffset()));
+ }
}
}
void ASTDeclReader::VisitDecl(Decl *D) {
+ BitsUnpacker DeclBits(Record.readInt());
+ auto ModuleOwnership =
+ (Decl::ModuleOwnershipKind)DeclBits.getNextBits(/*Width=*/3);
+ D->setReferenced(DeclBits.getNextBit());
+ D->Used = DeclBits.getNextBit();
+ IsDeclMarkedUsed |= D->Used;
+ D->setAccess((AccessSpecifier)DeclBits.getNextBits(/*Width=*/2));
+ D->setImplicit(DeclBits.getNextBit());
+ bool HasStandaloneLexicalDC = DeclBits.getNextBit();
+ bool HasAttrs = DeclBits.getNextBit();
+ D->setTopLevelDeclInObjCContainer(DeclBits.getNextBit());
+ D->InvalidDecl = DeclBits.getNextBit();
+ D->FromASTFile = true;
+
if (D->isTemplateParameter() || D->isTemplateParameterPack() ||
- isa<ParmVarDecl>(D) || isa<ObjCTypeParamDecl>(D)) {
+ isa<ParmVarDecl, ObjCTypeParamDecl>(D)) {
// We don't want to deserialize the DeclContext of a template
// parameter or of a parameter of a function template immediately. These
// entities might be used in the formulation of its DeclContext (for
@@ -569,7 +606,8 @@ void ASTDeclReader::VisitDecl(Decl *D) {
// return type of the function). Use the translation unit DeclContext as a
// placeholder.
GlobalDeclID SemaDCIDForTemplateParmDecl = readDeclID();
- GlobalDeclID LexicalDCIDForTemplateParmDecl = readDeclID();
+ GlobalDeclID LexicalDCIDForTemplateParmDecl =
+ HasStandaloneLexicalDC ? readDeclID() : 0;
if (!LexicalDCIDForTemplateParmDecl)
LexicalDCIDForTemplateParmDecl = SemaDCIDForTemplateParmDecl;
Reader.addPendingDeclContextInfo(D,
@@ -578,40 +616,50 @@ void ASTDeclReader::VisitDecl(Decl *D) {
D->setDeclContext(Reader.getContext().getTranslationUnitDecl());
} else {
auto *SemaDC = readDeclAs<DeclContext>();
- auto *LexicalDC = readDeclAs<DeclContext>();
+ auto *LexicalDC =
+ HasStandaloneLexicalDC ? readDeclAs<DeclContext>() : nullptr;
if (!LexicalDC)
LexicalDC = SemaDC;
- DeclContext *MergedSemaDC = Reader.MergedDeclContexts.lookup(SemaDC);
+ // If the context is a class, we might not have actually merged it yet, in
+ // the case where the definition comes from an update record.
+ DeclContext *MergedSemaDC;
+ if (auto *RD = dyn_cast<CXXRecordDecl>(SemaDC))
+ MergedSemaDC = getOrFakePrimaryClassDefinition(Reader, RD);
+ else
+ MergedSemaDC = Reader.MergedDeclContexts.lookup(SemaDC);
// Avoid calling setLexicalDeclContext() directly because it uses
// Decl::getASTContext() internally which is unsafe during derialization.
D->setDeclContextsImpl(MergedSemaDC ? MergedSemaDC : SemaDC, LexicalDC,
Reader.getContext());
}
D->setLocation(ThisDeclLoc);
- D->InvalidDecl = Record.readInt();
- if (Record.readInt()) { // hasAttrs
+
+ if (HasAttrs) {
AttrVec Attrs;
Record.readAttributes(Attrs);
// Avoid calling setAttrs() directly because it uses Decl::getASTContext()
// internally which is unsafe during derialization.
D->setAttrsImpl(Attrs, Reader.getContext());
}
- D->setImplicit(Record.readInt());
- D->Used = Record.readInt();
- IsDeclMarkedUsed |= D->Used;
- D->setReferenced(Record.readInt());
- D->setTopLevelDeclInObjCContainer(Record.readInt());
- D->setAccess((AccessSpecifier)Record.readInt());
- D->FromASTFile = true;
- bool ModulePrivate = Record.readInt();
// Determine whether this declaration is part of a (sub)module. If so, it
// may not yet be visible.
+ bool ModulePrivate =
+ (ModuleOwnership == Decl::ModuleOwnershipKind::ModulePrivate);
if (unsigned SubmoduleID = readSubmoduleID()) {
+ switch (ModuleOwnership) {
+ case Decl::ModuleOwnershipKind::Visible:
+ ModuleOwnership = Decl::ModuleOwnershipKind::VisibleWhenImported;
+ break;
+ case Decl::ModuleOwnershipKind::Unowned:
+ case Decl::ModuleOwnershipKind::VisibleWhenImported:
+ case Decl::ModuleOwnershipKind::ReachableWhenImported:
+ case Decl::ModuleOwnershipKind::ModulePrivate:
+ break;
+ }
+
+ D->setModuleOwnershipKind(ModuleOwnership);
// Store the owning submodule ID in the declaration.
- D->setModuleOwnershipKind(
- ModulePrivate ? Decl::ModuleOwnershipKind::ModulePrivate
- : Decl::ModuleOwnershipKind::VisibleWhenImported);
D->setOwningModuleID(SubmoduleID);
if (ModulePrivate) {
@@ -709,15 +757,17 @@ ASTDeclReader::RedeclarableResult ASTDeclReader::VisitTagDecl(TagDecl *TD) {
VisitTypeDecl(TD);
TD->IdentifierNamespace = Record.readInt();
- TD->setTagKind((TagDecl::TagKind)Record.readInt());
- if (!isa<CXXRecordDecl>(TD))
- TD->setCompleteDefinition(Record.readInt());
- TD->setEmbeddedInDeclarator(Record.readInt());
- TD->setFreeStanding(Record.readInt());
- TD->setCompleteDefinitionRequired(Record.readInt());
+
+ BitsUnpacker TagDeclBits(Record.readInt());
+ TD->setTagKind(
+ static_cast<TagTypeKind>(TagDeclBits.getNextBits(/*Width=*/3)));
+ TD->setCompleteDefinition(TagDeclBits.getNextBit());
+ TD->setEmbeddedInDeclarator(TagDeclBits.getNextBit());
+ TD->setFreeStanding(TagDeclBits.getNextBit());
+ TD->setCompleteDefinitionRequired(TagDeclBits.getNextBit());
TD->setBraceRange(readSourceRange());
- switch (Record.readInt()) {
+ switch (TagDeclBits.getNextBits(/*Width=*/2)) {
case 0:
break;
case 1: { // ExtInfo
@@ -746,14 +796,19 @@ void ASTDeclReader::VisitEnumDecl(EnumDecl *ED) {
else
ED->setIntegerType(Record.readType());
ED->setPromotionType(Record.readType());
- ED->setNumPositiveBits(Record.readInt());
- ED->setNumNegativeBits(Record.readInt());
- ED->setScoped(Record.readInt());
- ED->setScopedUsingClassTag(Record.readInt());
- ED->setFixed(Record.readInt());
- ED->setHasODRHash(true);
- ED->ODRHash = Record.readInt();
+ BitsUnpacker EnumDeclBits(Record.readInt());
+ ED->setNumPositiveBits(EnumDeclBits.getNextBits(/*Width=*/8));
+ ED->setNumNegativeBits(EnumDeclBits.getNextBits(/*Width=*/8));
+ bool ShouldSkipCheckingODR = EnumDeclBits.getNextBit();
+ ED->setScoped(EnumDeclBits.getNextBit());
+ ED->setScopedUsingClassTag(EnumDeclBits.getNextBit());
+ ED->setFixed(EnumDeclBits.getNextBit());
+
+ if (!ShouldSkipCheckingODR) {
+ ED->setHasODRHash(true);
+ ED->ODRHash = Record.readInt();
+ }
// If this is a definition subject to the ODR, and we already have a
// definition, merge this one into it.
@@ -773,9 +828,12 @@ void ASTDeclReader::VisitEnumDecl(EnumDecl *ED) {
}
if (OldDef) {
Reader.MergedDeclContexts.insert(std::make_pair(ED, OldDef));
- ED->setCompleteDefinition(false);
+ ED->demoteThisDefinitionToDeclaration();
Reader.mergeDefinitionVisibility(OldDef, ED);
- if (OldDef->getODRHash() != ED->getODRHash())
+ // We don't want to check the ODR hash value for declarations from global
+ // module fragment.
+ if (!ED->shouldSkipCheckingODR() &&
+ OldDef->getODRHash() != ED->getODRHash())
Reader.PendingEnumOdrMergeFailures[OldDef].push_back(ED);
} else {
OldDef = ED;
@@ -793,27 +851,65 @@ void ASTDeclReader::VisitEnumDecl(EnumDecl *ED) {
ASTDeclReader::RedeclarableResult
ASTDeclReader::VisitRecordDeclImpl(RecordDecl *RD) {
RedeclarableResult Redecl = VisitTagDecl(RD);
- RD->setHasFlexibleArrayMember(Record.readInt());
- RD->setAnonymousStructOrUnion(Record.readInt());
- RD->setHasObjectMember(Record.readInt());
- RD->setHasVolatileMember(Record.readInt());
- RD->setNonTrivialToPrimitiveDefaultInitialize(Record.readInt());
- RD->setNonTrivialToPrimitiveCopy(Record.readInt());
- RD->setNonTrivialToPrimitiveDestroy(Record.readInt());
- RD->setHasNonTrivialToPrimitiveDefaultInitializeCUnion(Record.readInt());
- RD->setHasNonTrivialToPrimitiveDestructCUnion(Record.readInt());
- RD->setHasNonTrivialToPrimitiveCopyCUnion(Record.readInt());
- RD->setParamDestroyedInCallee(Record.readInt());
- RD->setArgPassingRestrictions((RecordDecl::ArgPassingKind)Record.readInt());
+
+ BitsUnpacker RecordDeclBits(Record.readInt());
+ RD->setHasFlexibleArrayMember(RecordDeclBits.getNextBit());
+ RD->setAnonymousStructOrUnion(RecordDeclBits.getNextBit());
+ RD->setHasObjectMember(RecordDeclBits.getNextBit());
+ RD->setHasVolatileMember(RecordDeclBits.getNextBit());
+ RD->setNonTrivialToPrimitiveDefaultInitialize(RecordDeclBits.getNextBit());
+ RD->setNonTrivialToPrimitiveCopy(RecordDeclBits.getNextBit());
+ RD->setNonTrivialToPrimitiveDestroy(RecordDeclBits.getNextBit());
+ RD->setHasNonTrivialToPrimitiveDefaultInitializeCUnion(
+ RecordDeclBits.getNextBit());
+ RD->setHasNonTrivialToPrimitiveDestructCUnion(RecordDeclBits.getNextBit());
+ RD->setHasNonTrivialToPrimitiveCopyCUnion(RecordDeclBits.getNextBit());
+ RD->setParamDestroyedInCallee(RecordDeclBits.getNextBit());
+ RD->setArgPassingRestrictions(
+ (RecordArgPassingKind)RecordDeclBits.getNextBits(/*Width=*/2));
return Redecl;
}
+void ASTDeclReader::VisitRecordDecl(RecordDecl *RD) {
+ VisitRecordDeclImpl(RD);
+ // We should only reach here if we're in C/Objective-C. There is no
+ // global module fragment.
+ assert(!RD->shouldSkipCheckingODR());
+ RD->setODRHash(Record.readInt());
+
+ // Maintain the invariant of a redeclaration chain containing only
+ // a single definition.
+ if (RD->isCompleteDefinition()) {
+ RecordDecl *Canon = static_cast<RecordDecl *>(RD->getCanonicalDecl());
+ RecordDecl *&OldDef = Reader.RecordDefinitions[Canon];
+ if (!OldDef) {
+ // This is the first time we've seen an imported definition. Look for a
+ // local definition before deciding that we are the first definition.
+ for (auto *D : merged_redecls(Canon)) {
+ if (!D->isFromASTFile() && D->isCompleteDefinition()) {
+ OldDef = D;
+ break;
+ }
+ }
+ }
+ if (OldDef) {
+ Reader.MergedDeclContexts.insert(std::make_pair(RD, OldDef));
+ RD->demoteThisDefinitionToDeclaration();
+ Reader.mergeDefinitionVisibility(OldDef, RD);
+ if (OldDef->getODRHash() != RD->getODRHash())
+ Reader.PendingRecordOdrMergeFailures[OldDef].push_back(RD);
+ } else {
+ OldDef = RD;
+ }
+ }
+}
+
void ASTDeclReader::VisitValueDecl(ValueDecl *VD) {
VisitNamedDecl(VD);
- // For function declarations, defer reading the type in case the function has
- // a deduced return type that references an entity declared within the
- // function.
- if (isa<FunctionDecl>(VD))
+ // For function or variable declarations, defer reading the type in case the
+ // declaration has a deduced type that references an entity declared within
+ // the function definition or variable initializer.
+ if (isa<FunctionDecl, VarDecl>(VD))
DeferredTypeID = Record.getGlobalTypeID(Record.readInt());
else
VD->setType(Record.readType());
@@ -823,7 +919,7 @@ void ASTDeclReader::VisitEnumConstantDecl(EnumConstantDecl *ECD) {
VisitValueDecl(ECD);
if (Record.readInt())
ECD->setInitExpr(Record.readExpr());
- ECD->setInitVal(Record.readAPSInt());
+ ECD->setInitVal(Reader.getContext(), Record.readAPSInt());
mergeMergeable(ECD);
}
@@ -844,84 +940,27 @@ void ASTDeclReader::VisitDeclaratorDecl(DeclaratorDecl *DD) {
void ASTDeclReader::VisitFunctionDecl(FunctionDecl *FD) {
RedeclarableResult Redecl = VisitRedeclarable(FD);
- VisitDeclaratorDecl(FD);
- // Attach a type to this function. Use the real type if possible, but fall
- // back to the type as written if it involves a deduced return type.
- if (FD->getTypeSourceInfo() &&
- FD->getTypeSourceInfo()->getType()->castAs<FunctionType>()
- ->getReturnType()->getContainedAutoType()) {
- // We'll set up the real type in Visit, once we've finished loading the
- // function.
- FD->setType(FD->getTypeSourceInfo()->getType());
- Reader.PendingFunctionTypes.push_back({FD, DeferredTypeID});
- } else {
- FD->setType(Reader.GetType(DeferredTypeID));
- }
- DeferredTypeID = 0;
-
- FD->DNLoc = Record.readDeclarationNameLoc(FD->getDeclName());
- FD->IdentifierNamespace = Record.readInt();
-
- // FunctionDecl's body is handled last at ASTDeclReader::Visit,
- // after everything else is read.
-
- FD->setStorageClass(static_cast<StorageClass>(Record.readInt()));
- FD->setInlineSpecified(Record.readInt());
- FD->setImplicitlyInline(Record.readInt());
- FD->setVirtualAsWritten(Record.readInt());
- // We defer calling `FunctionDecl::setPure()` here as for methods of
- // `CXXTemplateSpecializationDecl`s, we may not have connected up the
- // definition (which is required for `setPure`).
- const bool Pure = Record.readInt();
- FD->setHasInheritedPrototype(Record.readInt());
- FD->setHasWrittenPrototype(Record.readInt());
- FD->setDeletedAsWritten(Record.readInt());
- FD->setTrivial(Record.readInt());
- FD->setTrivialForCall(Record.readInt());
- FD->setDefaulted(Record.readInt());
- FD->setExplicitlyDefaulted(Record.readInt());
- FD->setHasImplicitReturnZero(Record.readInt());
- FD->setConstexprKind(static_cast<ConstexprSpecKind>(Record.readInt()));
- FD->setUsesSEHTry(Record.readInt());
- FD->setHasSkippedBody(Record.readInt());
- FD->setIsMultiVersion(Record.readInt());
- FD->setLateTemplateParsed(Record.readInt());
-
- FD->setCachedLinkage(static_cast<Linkage>(Record.readInt()));
- FD->EndRangeLoc = readSourceLocation();
-
- FD->ODRHash = Record.readInt();
- FD->setHasODRHash(true);
-
- if (FD->isDefaulted()) {
- if (unsigned NumLookups = Record.readInt()) {
- SmallVector<DeclAccessPair, 8> Lookups;
- for (unsigned I = 0; I != NumLookups; ++I) {
- NamedDecl *ND = Record.readDeclAs<NamedDecl>();
- AccessSpecifier AS = (AccessSpecifier)Record.readInt();
- Lookups.push_back(DeclAccessPair::make(ND, AS));
- }
- FD->setDefaultedFunctionInfo(FunctionDecl::DefaultedFunctionInfo::Create(
- Reader.getContext(), Lookups));
- }
- }
+ FunctionDecl *Existing = nullptr;
switch ((FunctionDecl::TemplatedKind)Record.readInt()) {
case FunctionDecl::TK_NonTemplate:
- mergeRedeclarable(FD, Redecl);
break;
- case FunctionDecl::TK_FunctionTemplate:
- // Merged when we merge the template.
- FD->setDescribedFunctionTemplate(readDeclAs<FunctionTemplateDecl>());
+ case FunctionDecl::TK_DependentNonTemplate:
+ FD->setInstantiatedFromDecl(readDeclAs<FunctionDecl>());
+ break;
+ case FunctionDecl::TK_FunctionTemplate: {
+ auto *Template = readDeclAs<FunctionTemplateDecl>();
+ Template->init(FD);
+ FD->setDescribedFunctionTemplate(Template);
break;
+ }
case FunctionDecl::TK_MemberSpecialization: {
auto *InstFD = readDeclAs<FunctionDecl>();
auto TSK = (TemplateSpecializationKind)Record.readInt();
SourceLocation POI = readSourceLocation();
FD->setInstantiationOfMemberFunction(Reader.getContext(), InstFD, TSK);
FD->getMemberSpecializationInfo()->setPointOfInstantiation(POI);
- mergeRedeclarable(FD, Redecl);
break;
}
case FunctionDecl::TK_FunctionTemplateSpecialization: {
@@ -933,27 +972,16 @@ void ASTDeclReader::VisitFunctionDecl(FunctionDecl *FD) {
Record.readTemplateArgumentList(TemplArgs, /*Canonicalize*/ true);
// Template args as written.
- SmallVector<TemplateArgumentLoc, 8> TemplArgLocs;
- SourceLocation LAngleLoc, RAngleLoc;
- bool HasTemplateArgumentsAsWritten = Record.readInt();
- if (HasTemplateArgumentsAsWritten) {
- unsigned NumTemplateArgLocs = Record.readInt();
- TemplArgLocs.reserve(NumTemplateArgLocs);
- for (unsigned i = 0; i != NumTemplateArgLocs; ++i)
- TemplArgLocs.push_back(Record.readTemplateArgumentLoc());
-
- LAngleLoc = readSourceLocation();
- RAngleLoc = readSourceLocation();
- }
+ TemplateArgumentListInfo TemplArgsWritten;
+ bool HasTemplateArgumentsAsWritten = Record.readBool();
+ if (HasTemplateArgumentsAsWritten)
+ Record.readTemplateArgumentListInfo(TemplArgsWritten);
SourceLocation POI = readSourceLocation();
ASTContext &C = Reader.getContext();
- TemplateArgumentList *TemplArgList
- = TemplateArgumentList::CreateCopy(C, TemplArgs);
- TemplateArgumentListInfo TemplArgsInfo(LAngleLoc, RAngleLoc);
- for (unsigned i = 0, e = TemplArgLocs.size(); i != e; ++i)
- TemplArgsInfo.addArgument(TemplArgLocs[i]);
+ TemplateArgumentList *TemplArgList =
+ TemplateArgumentList::CreateCopy(C, TemplArgs);
MemberSpecializationInfo *MSInfo = nullptr;
if (Record.readInt()) {
@@ -968,7 +996,7 @@ void ASTDeclReader::VisitFunctionDecl(FunctionDecl *FD) {
FunctionTemplateSpecializationInfo *FTInfo =
FunctionTemplateSpecializationInfo::Create(
C, FD, Template, TSK, TemplArgList,
- HasTemplateArgumentsAsWritten ? &TemplArgsInfo : nullptr, POI,
+ HasTemplateArgumentsAsWritten ? &TemplArgsWritten : nullptr, POI,
MSInfo);
FD->TemplateOrSpecialization = FTInfo;
@@ -992,37 +1020,134 @@ void ASTDeclReader::VisitFunctionDecl(FunctionDecl *FD) {
else {
assert(Reader.getContext().getLangOpts().Modules &&
"already deserialized this template specialization");
- mergeRedeclarable(FD, ExistingInfo->getFunction(), Redecl);
+ Existing = ExistingInfo->getFunction();
}
}
break;
}
case FunctionDecl::TK_DependentFunctionTemplateSpecialization: {
// Templates.
- UnresolvedSet<8> TemplDecls;
- unsigned NumTemplates = Record.readInt();
- while (NumTemplates--)
- TemplDecls.addDecl(readDeclAs<NamedDecl>());
+ UnresolvedSet<8> Candidates;
+ unsigned NumCandidates = Record.readInt();
+ while (NumCandidates--)
+ Candidates.addDecl(readDeclAs<NamedDecl>());
// Templates args.
- TemplateArgumentListInfo TemplArgs;
- unsigned NumArgs = Record.readInt();
- while (NumArgs--)
- TemplArgs.addArgument(Record.readTemplateArgumentLoc());
- TemplArgs.setLAngleLoc(readSourceLocation());
- TemplArgs.setRAngleLoc(readSourceLocation());
-
- FD->setDependentTemplateSpecialization(Reader.getContext(),
- TemplDecls, TemplArgs);
+ TemplateArgumentListInfo TemplArgsWritten;
+ bool HasTemplateArgumentsAsWritten = Record.readBool();
+ if (HasTemplateArgumentsAsWritten)
+ Record.readTemplateArgumentListInfo(TemplArgsWritten);
+
+ FD->setDependentTemplateSpecialization(
+ Reader.getContext(), Candidates,
+ HasTemplateArgumentsAsWritten ? &TemplArgsWritten : nullptr);
// These are not merged; we don't need to merge redeclarations of dependent
// template friends.
break;
}
}
+ VisitDeclaratorDecl(FD);
+
+ // Attach a type to this function. Use the real type if possible, but fall
+ // back to the type as written if it involves a deduced return type.
+ if (FD->getTypeSourceInfo() && FD->getTypeSourceInfo()
+ ->getType()
+ ->castAs<FunctionType>()
+ ->getReturnType()
+ ->getContainedAutoType()) {
+ // We'll set up the real type in Visit, once we've finished loading the
+ // function.
+ FD->setType(FD->getTypeSourceInfo()->getType());
+ Reader.PendingDeducedFunctionTypes.push_back({FD, DeferredTypeID});
+ } else {
+ FD->setType(Reader.GetType(DeferredTypeID));
+ }
+ DeferredTypeID = 0;
+
+ FD->DNLoc = Record.readDeclarationNameLoc(FD->getDeclName());
+ FD->IdentifierNamespace = Record.readInt();
+
+ // FunctionDecl's body is handled last at ASTDeclReader::Visit,
+ // after everything else is read.
+ BitsUnpacker FunctionDeclBits(Record.readInt());
+
+ FD->setCachedLinkage((Linkage)FunctionDeclBits.getNextBits(/*Width=*/3));
+ FD->setStorageClass((StorageClass)FunctionDeclBits.getNextBits(/*Width=*/3));
+ bool ShouldSkipCheckingODR = FunctionDeclBits.getNextBit();
+ FD->setInlineSpecified(FunctionDeclBits.getNextBit());
+ FD->setImplicitlyInline(FunctionDeclBits.getNextBit());
+ FD->setHasSkippedBody(FunctionDeclBits.getNextBit());
+ FD->setVirtualAsWritten(FunctionDeclBits.getNextBit());
+ // We defer calling `FunctionDecl::setPure()` here as for methods of
+ // `CXXTemplateSpecializationDecl`s, we may not have connected up the
+ // definition (which is required for `setPure`).
+ const bool Pure = FunctionDeclBits.getNextBit();
+ FD->setHasInheritedPrototype(FunctionDeclBits.getNextBit());
+ FD->setHasWrittenPrototype(FunctionDeclBits.getNextBit());
+ FD->setDeletedAsWritten(FunctionDeclBits.getNextBit());
+ FD->setTrivial(FunctionDeclBits.getNextBit());
+ FD->setTrivialForCall(FunctionDeclBits.getNextBit());
+ FD->setDefaulted(FunctionDeclBits.getNextBit());
+ FD->setExplicitlyDefaulted(FunctionDeclBits.getNextBit());
+ FD->setIneligibleOrNotSelected(FunctionDeclBits.getNextBit());
+ FD->setConstexprKind(
+ (ConstexprSpecKind)FunctionDeclBits.getNextBits(/*Width=*/2));
+ FD->setHasImplicitReturnZero(FunctionDeclBits.getNextBit());
+ FD->setIsMultiVersion(FunctionDeclBits.getNextBit());
+ FD->setLateTemplateParsed(FunctionDeclBits.getNextBit());
+ FD->setFriendConstraintRefersToEnclosingTemplate(
+ FunctionDeclBits.getNextBit());
+ FD->setUsesSEHTry(FunctionDeclBits.getNextBit());
+
+ FD->EndRangeLoc = readSourceLocation();
+ if (FD->isExplicitlyDefaulted())
+ FD->setDefaultLoc(readSourceLocation());
+
+ if (!ShouldSkipCheckingODR) {
+ FD->ODRHash = Record.readInt();
+ FD->setHasODRHash(true);
+ }
+
+ if (FD->isDefaulted()) {
+ if (unsigned NumLookups = Record.readInt()) {
+ SmallVector<DeclAccessPair, 8> Lookups;
+ for (unsigned I = 0; I != NumLookups; ++I) {
+ NamedDecl *ND = Record.readDeclAs<NamedDecl>();
+ AccessSpecifier AS = (AccessSpecifier)Record.readInt();
+ Lookups.push_back(DeclAccessPair::make(ND, AS));
+ }
+ FD->setDefaultedFunctionInfo(FunctionDecl::DefaultedFunctionInfo::Create(
+ Reader.getContext(), Lookups));
+ }
+ }
+
+ if (Existing)
+ mergeRedeclarable(FD, Existing, Redecl);
+ else if (auto Kind = FD->getTemplatedKind();
+ Kind == FunctionDecl::TK_FunctionTemplate ||
+ Kind == FunctionDecl::TK_FunctionTemplateSpecialization) {
+ // Function Templates have their FunctionTemplateDecls merged instead of
+ // their FunctionDecls.
+ auto merge = [this, &Redecl, FD](auto &&F) {
+ auto *Existing = cast_or_null<FunctionDecl>(Redecl.getKnownMergeTarget());
+ RedeclarableResult NewRedecl(Existing ? F(Existing) : nullptr,
+ Redecl.getFirstID(), Redecl.isKeyDecl());
+ mergeRedeclarableTemplate(F(FD), NewRedecl);
+ };
+ if (Kind == FunctionDecl::TK_FunctionTemplate)
+ merge(
+ [](FunctionDecl *FD) { return FD->getDescribedFunctionTemplate(); });
+ else
+ merge([](FunctionDecl *FD) {
+ return FD->getTemplateSpecializationInfo()->getTemplate();
+ });
+ } else
+ mergeRedeclarable(FD, Redecl);
+
// Defer calling `setPure` until merging above has guaranteed we've set
// `DefinitionData` (as this will need to access it).
- FD->setPure(Pure);
+ FD->setIsPureVirtual(Pure);
// Read in the parameters.
unsigned NumParams = Record.readInt();
@@ -1057,7 +1182,8 @@ void ASTDeclReader::VisitObjCMethodDecl(ObjCMethodDecl *MD) {
Reader.getContext().setObjCMethodRedeclaration(MD,
readDeclAs<ObjCMethodDecl>());
- MD->setDeclImplementation((ObjCMethodDecl::ImplementationControl)Record.readInt());
+ MD->setDeclImplementation(
+ static_cast<ObjCImplementationControl>(Record.readInt()));
MD->setObjCDeclQualifier((Decl::ObjCDeclQualifier)Record.readInt());
MD->setRelatedResultType(Record.readInt());
MD->setReturnType(Record.readType());
@@ -1123,6 +1249,8 @@ void ASTDeclReader::ReadObjCDefinitionData(
Data.EndLoc = readSourceLocation();
Data.HasDesignatedInitializers = Record.readInt();
+ Data.ODRHash = Record.readInt();
+ Data.HasODRHash = true;
// Read the directly referenced protocols and their SourceLocations.
unsigned NumProtocols = Record.readInt();
@@ -1149,7 +1277,17 @@ void ASTDeclReader::ReadObjCDefinitionData(
void ASTDeclReader::MergeDefinitionData(ObjCInterfaceDecl *D,
struct ObjCInterfaceDecl::DefinitionData &&NewDD) {
- // FIXME: odr checking?
+ struct ObjCInterfaceDecl::DefinitionData &DD = D->data();
+ if (DD.Definition == NewDD.Definition)
+ return;
+
+ Reader.MergedDeclContexts.insert(
+ std::make_pair(NewDD.Definition, DD.Definition));
+ Reader.mergeDefinitionVisibility(DD.Definition, NewDD.Definition);
+
+ if (D->getODRHash() != NewDD.ODRHash)
+ Reader.PendingObjCInterfaceOdrMergeFailures[DD.Definition].push_back(
+ {NewDD.Definition, &NewDD});
}
void ASTDeclReader::VisitObjCInterfaceDecl(ObjCInterfaceDecl *ID) {
@@ -1196,6 +1334,39 @@ void ASTDeclReader::VisitObjCIvarDecl(ObjCIvarDecl *IVD) {
IVD->setNextIvar(nullptr);
bool synth = Record.readInt();
IVD->setSynthesize(synth);
+
+ // Check ivar redeclaration.
+ if (IVD->isInvalidDecl())
+ return;
+ // Don't check ObjCInterfaceDecl as interfaces are named and mismatches can be
+ // detected in VisitObjCInterfaceDecl. Here we are looking for redeclarations
+ // in extensions.
+ if (isa<ObjCInterfaceDecl>(IVD->getDeclContext()))
+ return;
+ ObjCInterfaceDecl *CanonIntf =
+ IVD->getContainingInterface()->getCanonicalDecl();
+ IdentifierInfo *II = IVD->getIdentifier();
+ ObjCIvarDecl *PrevIvar = CanonIntf->lookupInstanceVariable(II);
+ if (PrevIvar && PrevIvar != IVD) {
+ auto *ParentExt = dyn_cast<ObjCCategoryDecl>(IVD->getDeclContext());
+ auto *PrevParentExt =
+ dyn_cast<ObjCCategoryDecl>(PrevIvar->getDeclContext());
+ if (ParentExt && PrevParentExt) {
+ // Postpone diagnostic as we should merge identical extensions from
+ // different modules.
+ Reader
+ .PendingObjCExtensionIvarRedeclarations[std::make_pair(ParentExt,
+ PrevParentExt)]
+ .push_back(std::make_pair(IVD, PrevIvar));
+ } else if (ParentExt || PrevParentExt) {
+ // Duplicate ivars in extension + implementation are never compatible.
+ // Compatibility of implementation + implementation should be handled in
+ // VisitObjCImplementationDecl.
+ Reader.Diag(IVD->getLocation(), diag::err_duplicate_ivar_declaration)
+ << II;
+ Reader.Diag(PrevIvar->getLocation(), diag::note_previous_definition);
+ }
+ }
}
void ASTDeclReader::ReadObjCDefinitionData(
@@ -1211,11 +1382,23 @@ void ASTDeclReader::ReadObjCDefinitionData(
ProtoLocs.push_back(readSourceLocation());
Data.ReferencedProtocols.set(ProtoRefs.data(), NumProtoRefs,
ProtoLocs.data(), Reader.getContext());
+ Data.ODRHash = Record.readInt();
+ Data.HasODRHash = true;
}
-void ASTDeclReader::MergeDefinitionData(ObjCProtocolDecl *D,
- struct ObjCProtocolDecl::DefinitionData &&NewDD) {
- // FIXME: odr checking?
+void ASTDeclReader::MergeDefinitionData(
+ ObjCProtocolDecl *D, struct ObjCProtocolDecl::DefinitionData &&NewDD) {
+ struct ObjCProtocolDecl::DefinitionData &DD = D->data();
+ if (DD.Definition == NewDD.Definition)
+ return;
+
+ Reader.MergedDeclContexts.insert(
+ std::make_pair(NewDD.Definition, DD.Definition));
+ Reader.mergeDefinitionVisibility(DD.Definition, NewDD.Definition);
+
+ if (D->getODRHash() != NewDD.ODRHash)
+ Reader.PendingObjCProtocolOdrMergeFailures[DD.Definition].push_back(
+ {NewDD.Definition, &NewDD});
}
void ASTDeclReader::VisitObjCProtocolDecl(ObjCProtocolDecl *PD) {
@@ -1350,15 +1533,13 @@ void ASTDeclReader::VisitFieldDecl(FieldDecl *FD) {
VisitDeclaratorDecl(FD);
FD->Mutable = Record.readInt();
- if (auto ISK = static_cast<FieldDecl::InitStorageKind>(Record.readInt())) {
- FD->InitStorage.setInt(ISK);
- FD->InitStorage.setPointer(ISK == FieldDecl::ISK_CapturedVLAType
- ? Record.readType().getAsOpaquePtr()
- : Record.readExpr());
- }
-
- if (auto *BW = Record.readExpr())
- FD->setBitWidth(BW);
+ unsigned Bits = Record.readInt();
+ FD->StorageKind = Bits >> 1;
+ if (FD->StorageKind == FieldDecl::ISK_CapturedVLAType)
+ FD->CapturedVLAType =
+ cast<VariableArrayType>(Record.readType().getTypePtr());
+ else if (Bits & 1)
+ FD->setBitWidth(Record.readExpr());
if (!FD->getDeclName()) {
if (auto *Tmpl = readDeclAs<FieldDecl>())
@@ -1386,6 +1567,17 @@ void ASTDeclReader::VisitMSGuidDecl(MSGuidDecl *D) {
Reader.getContext().setPrimaryMergedDecl(D, Existing->getCanonicalDecl());
}
+void ASTDeclReader::VisitUnnamedGlobalConstantDecl(
+ UnnamedGlobalConstantDecl *D) {
+ VisitValueDecl(D);
+ D->Value = Record.readAPValue();
+
+ // Add this to the AST context's lookup structure, and merge if needed.
+ if (UnnamedGlobalConstantDecl *Existing =
+ Reader.getContext().UnnamedGlobalConstantDecls.GetOrInsertNode(D))
+ Reader.getContext().setPrimaryMergedDecl(D, Existing->getCanonicalDecl());
+}
+
void ASTDeclReader::VisitTemplateParamObjectDecl(TemplateParamObjectDecl *D) {
VisitValueDecl(D);
D->Value = Record.readAPValue();
@@ -1414,54 +1606,64 @@ ASTDeclReader::RedeclarableResult ASTDeclReader::VisitVarDeclImpl(VarDecl *VD) {
RedeclarableResult Redecl = VisitRedeclarable(VD);
VisitDeclaratorDecl(VD);
- VD->VarDeclBits.SClass = (StorageClass)Record.readInt();
- VD->VarDeclBits.TSCSpec = Record.readInt();
- VD->VarDeclBits.InitStyle = Record.readInt();
- VD->VarDeclBits.ARCPseudoStrong = Record.readInt();
+ BitsUnpacker VarDeclBits(Record.readInt());
+ auto VarLinkage = Linkage(VarDeclBits.getNextBits(/*Width=*/3));
+ bool DefGeneratedInModule = VarDeclBits.getNextBit();
+ VD->VarDeclBits.SClass = (StorageClass)VarDeclBits.getNextBits(/*Width=*/3);
+ VD->VarDeclBits.TSCSpec = VarDeclBits.getNextBits(/*Width=*/2);
+ VD->VarDeclBits.InitStyle = VarDeclBits.getNextBits(/*Width=*/2);
+ VD->VarDeclBits.ARCPseudoStrong = VarDeclBits.getNextBit();
+ bool HasDeducedType = false;
if (!isa<ParmVarDecl>(VD)) {
VD->NonParmVarDeclBits.IsThisDeclarationADemotedDefinition =
- Record.readInt();
- VD->NonParmVarDeclBits.ExceptionVar = Record.readInt();
- VD->NonParmVarDeclBits.NRVOVariable = Record.readInt();
- VD->NonParmVarDeclBits.CXXForRangeDecl = Record.readInt();
- VD->NonParmVarDeclBits.ObjCForDecl = Record.readInt();
- VD->NonParmVarDeclBits.IsInline = Record.readInt();
- VD->NonParmVarDeclBits.IsInlineSpecified = Record.readInt();
- VD->NonParmVarDeclBits.IsConstexpr = Record.readInt();
- VD->NonParmVarDeclBits.IsInitCapture = Record.readInt();
- VD->NonParmVarDeclBits.PreviousDeclInSameBlockScope = Record.readInt();
- VD->NonParmVarDeclBits.ImplicitParamKind = Record.readInt();
- VD->NonParmVarDeclBits.EscapingByref = Record.readInt();
- }
- auto VarLinkage = Linkage(Record.readInt());
+ VarDeclBits.getNextBit();
+ VD->NonParmVarDeclBits.ExceptionVar = VarDeclBits.getNextBit();
+ VD->NonParmVarDeclBits.NRVOVariable = VarDeclBits.getNextBit();
+ VD->NonParmVarDeclBits.CXXForRangeDecl = VarDeclBits.getNextBit();
+
+ VD->NonParmVarDeclBits.IsInline = VarDeclBits.getNextBit();
+ VD->NonParmVarDeclBits.IsInlineSpecified = VarDeclBits.getNextBit();
+ VD->NonParmVarDeclBits.IsConstexpr = VarDeclBits.getNextBit();
+ VD->NonParmVarDeclBits.IsInitCapture = VarDeclBits.getNextBit();
+ VD->NonParmVarDeclBits.PreviousDeclInSameBlockScope =
+ VarDeclBits.getNextBit();
+
+ VD->NonParmVarDeclBits.EscapingByref = VarDeclBits.getNextBit();
+ HasDeducedType = VarDeclBits.getNextBit();
+ VD->NonParmVarDeclBits.ImplicitParamKind =
+ VarDeclBits.getNextBits(/*Width*/ 3);
+
+ VD->NonParmVarDeclBits.ObjCForDecl = VarDeclBits.getNextBit();
+ }
+
+ // If this variable has a deduced type, defer reading that type until we are
+ // done deserializing this variable, because the type might refer back to the
+ // variable.
+ if (HasDeducedType)
+ Reader.PendingDeducedVarTypes.push_back({VD, DeferredTypeID});
+ else
+ VD->setType(Reader.GetType(DeferredTypeID));
+ DeferredTypeID = 0;
+
VD->setCachedLinkage(VarLinkage);
// Reconstruct the one piece of the IdentifierNamespace that we need.
- if (VD->getStorageClass() == SC_Extern && VarLinkage != NoLinkage &&
+ if (VD->getStorageClass() == SC_Extern && VarLinkage != Linkage::None &&
VD->getLexicalDeclContext()->isFunctionOrMethod())
VD->setLocalExternDecl();
- if (uint64_t Val = Record.readInt()) {
- VD->setInit(Record.readExpr());
- if (Val != 1) {
- EvaluatedStmt *Eval = VD->ensureEvaluatedStmt();
- Eval->HasConstantInitialization = (Val & 2) != 0;
- Eval->HasConstantDestruction = (Val & 4) != 0;
- }
+ if (DefGeneratedInModule) {
+ Reader.DefinitionSource[VD] =
+ Loc.F->Kind == ModuleKind::MK_MainFile ||
+ Reader.getContext().getLangOpts().BuildingPCHWithObjectFile;
}
- if (VD->hasAttr<BlocksAttr>() && VD->getType()->getAsCXXRecordDecl()) {
+ if (VD->hasAttr<BlocksAttr>()) {
Expr *CopyExpr = Record.readExpr();
if (CopyExpr)
Reader.getContext().setBlockVarCopyInit(VD, CopyExpr, Record.readInt());
}
- if (VD->getStorageDuration() == SD_Static && Record.readInt()) {
- Reader.DefinitionSource[VD] =
- Loc.F->Kind == ModuleKind::MK_MainFile ||
- Reader.getContext().getLangOpts().BuildingPCHWithObjectFile;
- }
-
enum VarKind {
VarNotTemplate = 0, VarTemplate, StaticDataMemberSpecialization
};
@@ -1490,16 +1692,37 @@ ASTDeclReader::RedeclarableResult ASTDeclReader::VisitVarDeclImpl(VarDecl *VD) {
return Redecl;
}
+void ASTDeclReader::ReadVarDeclInit(VarDecl *VD) {
+ if (uint64_t Val = Record.readInt()) {
+ EvaluatedStmt *Eval = VD->ensureEvaluatedStmt();
+ Eval->HasConstantInitialization = (Val & 2) != 0;
+ Eval->HasConstantDestruction = (Val & 4) != 0;
+ Eval->WasEvaluated = (Val & 8) != 0;
+ if (Eval->WasEvaluated) {
+ Eval->Evaluated = Record.readAPValue();
+ if (Eval->Evaluated.needsCleanup())
+ Reader.getContext().addDestruction(&Eval->Evaluated);
+ }
+
+ // Store the offset of the initializer. Don't deserialize it yet: it might
+ // not be needed, and might refer back to the variable, for example if it
+ // contains a lambda.
+ Eval->Value = GetCurrentCursorOffset();
+ }
+}
+
void ASTDeclReader::VisitImplicitParamDecl(ImplicitParamDecl *PD) {
VisitVarDecl(PD);
}
void ASTDeclReader::VisitParmVarDecl(ParmVarDecl *PD) {
VisitVarDecl(PD);
- unsigned isObjCMethodParam = Record.readInt();
- unsigned scopeDepth = Record.readInt();
+
unsigned scopeIndex = Record.readInt();
- unsigned declQualifier = Record.readInt();
+ BitsUnpacker ParmVarDeclBits(Record.readInt());
+ unsigned isObjCMethodParam = ParmVarDeclBits.getNextBit();
+ unsigned scopeDepth = ParmVarDeclBits.getNextBits(/*Width=*/7);
+ unsigned declQualifier = ParmVarDeclBits.getNextBits(/*Width=*/7);
if (isObjCMethodParam) {
assert(scopeDepth == 0);
PD->setObjCMethodScopeInfo(scopeIndex);
@@ -1507,11 +1730,15 @@ void ASTDeclReader::VisitParmVarDecl(ParmVarDecl *PD) {
} else {
PD->setScopeInfo(scopeDepth, scopeIndex);
}
- PD->ParmVarDeclBits.IsKNRPromoted = Record.readInt();
- PD->ParmVarDeclBits.HasInheritedDefaultArg = Record.readInt();
- if (Record.readInt()) // hasUninstantiatedDefaultArg.
+ PD->ParmVarDeclBits.IsKNRPromoted = ParmVarDeclBits.getNextBit();
+
+ PD->ParmVarDeclBits.HasInheritedDefaultArg = ParmVarDeclBits.getNextBit();
+ if (ParmVarDeclBits.getNextBit()) // hasUninstantiatedDefaultArg.
PD->setUninstantiatedDefaultArg(Record.readExpr());
+ if (ParmVarDeclBits.getNextBit()) // Valid explicit object parameter
+ PD->ExplicitObjectParameterIntroducerLoc = Record.readSourceLocation();
+
// FIXME: If this is a redeclaration of a function from another module, handle
// inheritance of default arguments.
}
@@ -1536,6 +1763,11 @@ void ASTDeclReader::VisitFileScopeAsmDecl(FileScopeAsmDecl *AD) {
AD->setRParenLoc(readSourceLocation());
}
+void ASTDeclReader::VisitTopLevelStmtDecl(TopLevelStmtDecl *D) {
+ VisitDecl(D);
+ D->Statement = Record.readStmt();
+}
+
void ASTDeclReader::VisitBlockDecl(BlockDecl *BD) {
VisitDecl(BD);
BD->setBody(cast_or_null<CompoundStmt>(Record.readStmt()));
@@ -1584,7 +1816,7 @@ void ASTDeclReader::VisitCapturedDecl(CapturedDecl *CD) {
void ASTDeclReader::VisitLinkageSpecDecl(LinkageSpecDecl *D) {
VisitDecl(D);
- D->setLanguage((LinkageSpecDecl::LanguageIDs)Record.readInt());
+ D->setLanguage(static_cast<LinkageSpecLanguageIDs>(Record.readInt()));
D->setExternLoc(readSourceLocation());
D->setRBraceLoc(readSourceLocation());
}
@@ -1602,7 +1834,10 @@ void ASTDeclReader::VisitLabelDecl(LabelDecl *D) {
void ASTDeclReader::VisitNamespaceDecl(NamespaceDecl *D) {
RedeclarableResult Redecl = VisitRedeclarable(D);
VisitNamedDecl(D);
- D->setInline(Record.readInt());
+
+ BitsUnpacker NamespaceDeclBits(Record.readInt());
+ D->setInline(NamespaceDeclBits.getNextBit());
+ D->setNested(NamespaceDeclBits.getNextBit());
D->LocStart = readSourceLocation();
D->RBraceLoc = readSourceLocation();
@@ -1616,7 +1851,7 @@ void ASTDeclReader::VisitNamespaceDecl(NamespaceDecl *D) {
} else {
// Link this namespace back to the first declaration, which has already
// been deserialized.
- D->AnonOrFirstNamespaceAndInline.setPointer(D->getFirstDecl());
+ D->AnonOrFirstNamespaceAndFlags.setPointer(D->getFirstDecl());
}
mergeRedeclarable(D, Redecl);
@@ -1631,6 +1866,15 @@ void ASTDeclReader::VisitNamespaceDecl(NamespaceDecl *D) {
}
}
+void ASTDeclReader::VisitHLSLBufferDecl(HLSLBufferDecl *D) {
+ VisitNamedDecl(D);
+ VisitDeclContext(D);
+ D->IsCBuffer = Record.readBool();
+ D->KwLoc = readSourceLocation();
+ D->LBraceLoc = readSourceLocation();
+ D->RBraceLoc = readSourceLocation();
+}
+
void ASTDeclReader::VisitNamespaceAliasDecl(NamespaceAliasDecl *D) {
RedeclarableResult Redecl = VisitRedeclarable(D);
VisitNamedDecl(D);
@@ -1657,7 +1901,7 @@ void ASTDeclReader::VisitUsingEnumDecl(UsingEnumDecl *D) {
VisitNamedDecl(D);
D->setUsingLoc(readSourceLocation());
D->setEnumLoc(readSourceLocation());
- D->Enum = readDeclAs<EnumDecl>();
+ D->setEnumType(Record.readTypeSourceInfo());
D->FirstUsingShadow.setPointer(readDeclAs<UsingShadowDecl>());
if (auto *Pattern = readDeclAs<UsingEnumDecl>())
Reader.getContext().setInstantiatedFromUsingEnumDecl(D, Pattern);
@@ -1726,67 +1970,98 @@ void ASTDeclReader::VisitUnresolvedUsingIfExistsDecl(
}
void ASTDeclReader::ReadCXXDefinitionData(
- struct CXXRecordDecl::DefinitionData &Data, const CXXRecordDecl *D) {
- #define FIELD(Name, Width, Merge) \
- Data.Name = Record.readInt();
- #include "clang/AST/CXXRecordDeclDefinitionBits.def"
+ struct CXXRecordDecl::DefinitionData &Data, const CXXRecordDecl *D,
+ Decl *LambdaContext, unsigned IndexInLambdaContext) {
- // Note: the caller has deserialized the IsLambda bit already.
- Data.ODRHash = Record.readInt();
- Data.HasODRHash = true;
+ BitsUnpacker CXXRecordDeclBits = Record.readInt();
+
+ bool ShouldSkipCheckingODR = CXXRecordDeclBits.getNextBit();
+
+#define FIELD(Name, Width, Merge) \
+ if (!CXXRecordDeclBits.canGetNextNBits(Width)) \
+ CXXRecordDeclBits.updateValue(Record.readInt()); \
+ Data.Name = CXXRecordDeclBits.getNextBits(Width);
+
+#include "clang/AST/CXXRecordDeclDefinitionBits.def"
+#undef FIELD
+
+ // We only perform ODR checks for decls not in GMF.
+ if (!ShouldSkipCheckingODR) {
+ // Note: the caller has deserialized the IsLambda bit already.
+ Data.ODRHash = Record.readInt();
+ Data.HasODRHash = true;
+ }
if (Record.readInt()) {
- Reader.DefinitionSource[D] =
+ Reader.DefinitionSource[D] =
Loc.F->Kind == ModuleKind::MK_MainFile ||
Reader.getContext().getLangOpts().BuildingPCHWithObjectFile;
}
- Data.NumBases = Record.readInt();
- if (Data.NumBases)
- Data.Bases = ReadGlobalOffset();
- Data.NumVBases = Record.readInt();
- if (Data.NumVBases)
- Data.VBases = ReadGlobalOffset();
-
Record.readUnresolvedSet(Data.Conversions);
Data.ComputedVisibleConversions = Record.readInt();
if (Data.ComputedVisibleConversions)
Record.readUnresolvedSet(Data.VisibleConversions);
assert(Data.Definition && "Data.Definition should be already set!");
- Data.FirstFriend = readDeclID();
- if (Data.IsLambda) {
+ if (!Data.IsLambda) {
+ assert(!LambdaContext && !IndexInLambdaContext &&
+ "given lambda context for non-lambda");
+
+ Data.NumBases = Record.readInt();
+ if (Data.NumBases)
+ Data.Bases = ReadGlobalOffset();
+
+ Data.NumVBases = Record.readInt();
+ if (Data.NumVBases)
+ Data.VBases = ReadGlobalOffset();
+
+ Data.FirstFriend = readDeclID();
+ } else {
using Capture = LambdaCapture;
auto &Lambda = static_cast<CXXRecordDecl::LambdaDefinitionData &>(Data);
- Lambda.Dependent = Record.readInt();
- Lambda.IsGenericLambda = Record.readInt();
- Lambda.CaptureDefault = Record.readInt();
- Lambda.NumCaptures = Record.readInt();
+
+ BitsUnpacker LambdaBits(Record.readInt());
+ Lambda.DependencyKind = LambdaBits.getNextBits(/*Width=*/2);
+ Lambda.IsGenericLambda = LambdaBits.getNextBit();
+ Lambda.CaptureDefault = LambdaBits.getNextBits(/*Width=*/2);
+ Lambda.NumCaptures = LambdaBits.getNextBits(/*Width=*/15);
+ Lambda.HasKnownInternalLinkage = LambdaBits.getNextBit();
+
Lambda.NumExplicitCaptures = Record.readInt();
- Lambda.HasKnownInternalLinkage = Record.readInt();
Lambda.ManglingNumber = Record.readInt();
- D->setDeviceLambdaManglingNumber(Record.readInt());
- Lambda.ContextDecl = readDeclID();
- Lambda.Captures = (Capture *)Reader.getContext().Allocate(
- sizeof(Capture) * Lambda.NumCaptures);
- Capture *ToCapture = Lambda.Captures;
+ if (unsigned DeviceManglingNumber = Record.readInt())
+ Reader.getContext().DeviceLambdaManglingNumbers[D] = DeviceManglingNumber;
+ Lambda.IndexInContext = IndexInLambdaContext;
+ Lambda.ContextDecl = LambdaContext;
+ Capture *ToCapture = nullptr;
+ if (Lambda.NumCaptures) {
+ ToCapture = (Capture *)Reader.getContext().Allocate(sizeof(Capture) *
+ Lambda.NumCaptures);
+ Lambda.AddCaptureList(Reader.getContext(), ToCapture);
+ }
Lambda.MethodTyInfo = readTypeSourceInfo();
for (unsigned I = 0, N = Lambda.NumCaptures; I != N; ++I) {
SourceLocation Loc = readSourceLocation();
- bool IsImplicit = Record.readInt();
- auto Kind = static_cast<LambdaCaptureKind>(Record.readInt());
+ BitsUnpacker CaptureBits(Record.readInt());
+ bool IsImplicit = CaptureBits.getNextBit();
+ auto Kind =
+ static_cast<LambdaCaptureKind>(CaptureBits.getNextBits(/*Width=*/3));
switch (Kind) {
case LCK_StarThis:
case LCK_This:
case LCK_VLAType:
- *ToCapture++ = Capture(Loc, IsImplicit, Kind, nullptr,SourceLocation());
+ new (ToCapture)
+ Capture(Loc, IsImplicit, Kind, nullptr, SourceLocation());
+ ToCapture++;
break;
case LCK_ByCopy:
case LCK_ByRef:
- auto *Var = readDeclAs<VarDecl>();
+ auto *Var = readDeclAs<ValueDecl>();
SourceLocation EllipsisLoc = readSourceLocation();
- *ToCapture++ = Capture(Loc, IsImplicit, Kind, Var, EllipsisLoc);
+ new (ToCapture) Capture(Loc, IsImplicit, Kind, Var, EllipsisLoc);
+ ToCapture++;
break;
}
}
@@ -1806,7 +2081,7 @@ void ASTDeclReader::MergeDefinitionData(
Reader.PendingDefinitions.erase(MergeDD.Definition);
MergeDD.Definition->setCompleteDefinition(false);
Reader.mergeDefinitionVisibility(DD.Definition, MergeDD.Definition);
- assert(Reader.Lookups.find(MergeDD.Definition) == Reader.Lookups.end() &&
+ assert(!Reader.Lookups.contains(MergeDD.Definition) &&
"already loaded pending lookups for merged definition");
}
@@ -1854,10 +2129,32 @@ void ASTDeclReader::MergeDefinitionData(
// lazily load it.
if (DD.IsLambda) {
- // FIXME: ODR-checking for merging lambdas (this happens, for instance,
- // when they occur within the body of a function template specialization).
+ auto &Lambda1 = static_cast<CXXRecordDecl::LambdaDefinitionData &>(DD);
+ auto &Lambda2 = static_cast<CXXRecordDecl::LambdaDefinitionData &>(MergeDD);
+ DetectedOdrViolation |= Lambda1.DependencyKind != Lambda2.DependencyKind;
+ DetectedOdrViolation |= Lambda1.IsGenericLambda != Lambda2.IsGenericLambda;
+ DetectedOdrViolation |= Lambda1.CaptureDefault != Lambda2.CaptureDefault;
+ DetectedOdrViolation |= Lambda1.NumCaptures != Lambda2.NumCaptures;
+ DetectedOdrViolation |=
+ Lambda1.NumExplicitCaptures != Lambda2.NumExplicitCaptures;
+ DetectedOdrViolation |=
+ Lambda1.HasKnownInternalLinkage != Lambda2.HasKnownInternalLinkage;
+ DetectedOdrViolation |= Lambda1.ManglingNumber != Lambda2.ManglingNumber;
+
+ if (Lambda1.NumCaptures && Lambda1.NumCaptures == Lambda2.NumCaptures) {
+ for (unsigned I = 0, N = Lambda1.NumCaptures; I != N; ++I) {
+ LambdaCapture &Cap1 = Lambda1.Captures.front()[I];
+ LambdaCapture &Cap2 = Lambda2.Captures.front()[I];
+ DetectedOdrViolation |= Cap1.getCaptureKind() != Cap2.getCaptureKind();
+ }
+ Lambda1.AddCaptureList(Reader.getContext(), Lambda2.Captures.front());
+ }
}
+ // We don't want to check ODR for decls in the global module fragment.
+ if (MergeDD.Definition->shouldSkipCheckingODR())
+ return;
+
if (D->getODRHash() != MergeDD.ODRHash) {
DetectedOdrViolation = true;
}
@@ -1867,16 +2164,20 @@ void ASTDeclReader::MergeDefinitionData(
{MergeDD.Definition, &MergeDD});
}
-void ASTDeclReader::ReadCXXRecordDefinition(CXXRecordDecl *D, bool Update) {
+void ASTDeclReader::ReadCXXRecordDefinition(CXXRecordDecl *D, bool Update,
+ Decl *LambdaContext,
+ unsigned IndexInLambdaContext) {
struct CXXRecordDecl::DefinitionData *DD;
ASTContext &C = Reader.getContext();
// Determine whether this is a lambda closure type, so that we can
// allocate the appropriate DefinitionData structure.
bool IsLambda = Record.readInt();
+ assert(!(IsLambda && Update) &&
+ "lambda definition should not be added by update record");
if (IsLambda)
- DD = new (C) CXXRecordDecl::LambdaDefinitionData(D, nullptr, false, false,
- LCD_None);
+ DD = new (C) CXXRecordDecl::LambdaDefinitionData(
+ D, nullptr, CXXRecordDecl::LDK_Unknown, false, LCD_None);
else
DD = new (C) struct CXXRecordDecl::DefinitionData(D);
@@ -1887,7 +2188,7 @@ void ASTDeclReader::ReadCXXRecordDefinition(CXXRecordDecl *D, bool Update) {
if (!Canon->DefinitionData)
Canon->DefinitionData = DD;
D->DefinitionData = Canon->DefinitionData;
- ReadCXXDefinitionData(*DD, D);
+ ReadCXXDefinitionData(*DD, D, LambdaContext, IndexInLambdaContext);
// We might already have a different definition for this record. This can
// happen either because we're reading an update record, or because we've
@@ -1914,8 +2215,15 @@ ASTDeclReader::VisitCXXRecordDeclImpl(CXXRecordDecl *D) {
ASTContext &C = Reader.getContext();
enum CXXRecKind {
- CXXRecNotTemplate = 0, CXXRecTemplate, CXXRecMemberSpecialization
+ CXXRecNotTemplate = 0,
+ CXXRecTemplate,
+ CXXRecMemberSpecialization,
+ CXXLambda
};
+
+ Decl *LambdaContext = nullptr;
+ unsigned IndexInLambdaContext = 0;
+
switch ((CXXRecKind)Record.readInt()) {
case CXXRecNotTemplate:
// Merged when we merge the folding set entry in the primary template.
@@ -1947,11 +2255,19 @@ ASTDeclReader::VisitCXXRecordDeclImpl(CXXRecordDecl *D) {
mergeRedeclarable(D, Redecl);
break;
}
+ case CXXLambda: {
+ LambdaContext = readDecl();
+ if (LambdaContext)
+ IndexInLambdaContext = Record.readInt();
+ mergeLambda(D, Redecl, LambdaContext, IndexInLambdaContext);
+ break;
+ }
}
bool WasDefinition = Record.readInt();
if (WasDefinition)
- ReadCXXRecordDefinition(D, /*Update*/false);
+ ReadCXXRecordDefinition(D, /*Update=*/false, LambdaContext,
+ IndexInLambdaContext);
else
// Propagate DefinitionData pointer from the canonical declaration.
D->DefinitionData = D->getCanonicalDecl()->DefinitionData;
@@ -1974,7 +2290,8 @@ void ASTDeclReader::VisitCXXDeductionGuideDecl(CXXDeductionGuideDecl *D) {
D->setExplicitSpecifier(Record.readExplicitSpec());
D->Ctor = readDeclAs<CXXConstructorDecl>();
VisitFunctionDecl(D);
- D->setIsCopyDeductionCandidate(Record.readInt());
+ D->setDeductionCandidateKind(
+ static_cast<DeductionCandidate>(Record.readInt()));
}
void ASTDeclReader::VisitCXXMethodDecl(CXXMethodDecl *D) {
@@ -2061,7 +2378,7 @@ void ASTDeclReader::VisitFriendTemplateDecl(FriendTemplateDecl *D) {
VisitDecl(D);
unsigned NumParams = Record.readInt();
D->NumParams = NumParams;
- D->Params = new TemplateParameterList*[NumParams];
+ D->Params = new (Reader.getContext()) TemplateParameterList *[NumParams];
for (unsigned i = 0; i != NumParams; ++i)
D->Params[i] = Record.readTemplateParameterList();
if (Record.readInt()) // HasFriendDecl
@@ -2071,15 +2388,12 @@ void ASTDeclReader::VisitFriendTemplateDecl(FriendTemplateDecl *D) {
D->FriendLoc = readSourceLocation();
}
-DeclID ASTDeclReader::VisitTemplateDecl(TemplateDecl *D) {
+void ASTDeclReader::VisitTemplateDecl(TemplateDecl *D) {
VisitNamedDecl(D);
- DeclID PatternID = readDeclID();
- auto *TemplatedDecl = cast_or_null<NamedDecl>(Reader.GetDecl(PatternID));
- TemplateParameterList *TemplateParams = Record.readTemplateParameterList();
- D->init(TemplatedDecl, TemplateParams);
-
- return PatternID;
+ assert(!D->TemplateParams && "TemplateParams already set!");
+ D->TemplateParams = Record.readTemplateParameterList();
+ D->init(readDeclAs<NamedDecl>());
}
void ASTDeclReader::VisitConceptDecl(ConceptDecl *D) {
@@ -2088,6 +2402,17 @@ void ASTDeclReader::VisitConceptDecl(ConceptDecl *D) {
mergeMergeable(D);
}
+void ASTDeclReader::VisitImplicitConceptSpecializationDecl(
+ ImplicitConceptSpecializationDecl *D) {
+ // The size of the template list was read during creation of the Decl, so we
+ // don't have to re-read it here.
+ VisitDecl(D);
+ llvm::SmallVector<TemplateArgument, 4> Args;
+ for (unsigned I = 0; I < D->NumTemplateArgs; ++I)
+ Args.push_back(Record.readTemplateArgument(/*Canonicalize=*/true));
+ D->setTemplateArguments(Args);
+}
+
void ASTDeclReader::VisitRequiresExprBodyDecl(RequiresExprBodyDecl *D) {
}
@@ -2116,21 +2441,15 @@ ASTDeclReader::VisitRedeclarableTemplateDecl(RedeclarableTemplateDecl *D) {
}
}
- DeclID PatternID = VisitTemplateDecl(D);
+ VisitTemplateDecl(D);
D->IdentifierNamespace = Record.readInt();
- mergeRedeclarable(D, Redecl, PatternID);
-
- // If we merged the template with a prior declaration chain, merge the common
- // pointer.
- // FIXME: Actually merge here, don't just overwrite.
- D->Common = D->getCanonicalDecl()->Common;
-
return Redecl;
}
void ASTDeclReader::VisitClassTemplateDecl(ClassTemplateDecl *D) {
RedeclarableResult Redecl = VisitRedeclarableTemplateDecl(D);
+ mergeRedeclarableTemplate(D, Redecl);
if (ThisDeclID == Redecl.getFirstID()) {
// This ClassTemplateDecl owns a CommonPtr; read it to keep track of all of
@@ -2158,6 +2477,7 @@ void ASTDeclReader::VisitBuiltinTemplateDecl(BuiltinTemplateDecl *D) {
/// VarTemplateDecl beyond TemplateDecl...
void ASTDeclReader::VisitVarTemplateDecl(VarTemplateDecl *D) {
RedeclarableResult Redecl = VisitRedeclarableTemplateDecl(D);
+ mergeRedeclarableTemplate(D, Redecl);
if (ThisDeclID == Redecl.getFirstID()) {
// This VarTemplateDecl owns a CommonPtr; read it to keep track of all of
@@ -2259,14 +2579,6 @@ void ASTDeclReader::VisitClassTemplatePartialSpecializationDecl(
}
}
-void ASTDeclReader::VisitClassScopeFunctionSpecializationDecl(
- ClassScopeFunctionSpecializationDecl *D) {
- VisitDecl(D);
- D->Specialization = readDeclAs<CXXMethodDecl>();
- if (Record.readInt())
- D->TemplateArgs = Record.readASTTemplateArgumentListInfo();
-}
-
void ASTDeclReader::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
RedeclarableResult Redecl = VisitRedeclarableTemplateDecl(D);
@@ -2286,8 +2598,6 @@ void ASTDeclReader::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
ASTDeclReader::RedeclarableResult
ASTDeclReader::VisitVarTemplateSpecializationDeclImpl(
VarTemplateSpecializationDecl *D) {
- RedeclarableResult Redecl = VisitVarDeclImpl(D);
-
ASTContext &C = Reader.getContext();
if (Decl *InstD = readDecl()) {
if (auto *VTD = dyn_cast<VarTemplateDecl>(InstD)) {
@@ -2324,17 +2634,23 @@ ASTDeclReader::VisitVarTemplateSpecializationDeclImpl(
D->SpecializationKind = (TemplateSpecializationKind)Record.readInt();
D->IsCompleteDefinition = Record.readInt();
+ RedeclarableResult Redecl = VisitVarDeclImpl(D);
+
bool writtenAsCanonicalDecl = Record.readInt();
if (writtenAsCanonicalDecl) {
auto *CanonPattern = readDeclAs<VarTemplateDecl>();
if (D->isCanonicalDecl()) { // It's kept in the folding set.
- // FIXME: If it's already present, merge it.
+ VarTemplateSpecializationDecl *CanonSpec;
if (auto *Partial = dyn_cast<VarTemplatePartialSpecializationDecl>(D)) {
- CanonPattern->getCommonPtr()->PartialSpecializations
- .GetOrInsertNode(Partial);
+ CanonSpec = CanonPattern->getCommonPtr()
+ ->PartialSpecializations.GetOrInsertNode(Partial);
} else {
- CanonPattern->getCommonPtr()->Specializations.GetOrInsertNode(D);
+ CanonSpec =
+ CanonPattern->getCommonPtr()->Specializations.GetOrInsertNode(D);
}
+ // If we already have a matching specialization, merge it.
+ if (CanonSpec != D)
+ mergeRedeclarable<VarDecl>(D, CanonSpec, Redecl);
}
}
@@ -2367,16 +2683,13 @@ void ASTDeclReader::VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D) {
D->setDeclaredWithTypename(Record.readInt());
- if (Record.readBool()) {
- NestedNameSpecifierLoc NNS = Record.readNestedNameSpecifierLoc();
- DeclarationNameInfo DN = Record.readDeclarationNameInfo();
- ConceptDecl *NamedConcept = Record.readDeclAs<ConceptDecl>();
- const ASTTemplateArgumentListInfo *ArgsAsWritten = nullptr;
+ if (D->hasTypeConstraint()) {
+ ConceptReference *CR = nullptr;
if (Record.readBool())
- ArgsAsWritten = Record.readASTTemplateArgumentListInfo();
+ CR = Record.readConceptReference();
Expr *ImmediatelyDeclaredConstraint = Record.readExpr();
- D->setTypeConstraint(NNS, DN, /*FoundDecl=*/nullptr, NamedConcept,
- ArgsAsWritten, ImmediatelyDeclaredConstraint);
+
+ D->setTypeConstraint(CR, ImmediatelyDeclaredConstraint);
if ((D->ExpandedParameterPack = Record.readInt()))
D->NumExpanded = Record.readInt();
}
@@ -2427,7 +2740,8 @@ void ASTDeclReader::VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D) {
}
void ASTDeclReader::VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D) {
- VisitRedeclarableTemplateDecl(D);
+ RedeclarableResult Redecl = VisitRedeclarableTemplateDecl(D);
+ mergeRedeclarableTemplate(D, Redecl);
}
void ASTDeclReader::VisitStaticAssertDecl(StaticAssertDecl *D) {
@@ -2524,10 +2838,9 @@ ASTDeclReader::VisitRedeclarable(Redeclarable<T> *D) {
/// Attempts to merge the given declaration (D) with another declaration
/// of the same entity.
-template<typename T>
+template <typename T>
void ASTDeclReader::mergeRedeclarable(Redeclarable<T> *DBase,
- RedeclarableResult &Redecl,
- DeclID TemplatePatternID) {
+ RedeclarableResult &Redecl) {
// If modules are not available, there is no reason to perform this merge.
if (!Reader.getContext().getLangOpts().Modules)
return;
@@ -2540,10 +2853,54 @@ void ASTDeclReader::mergeRedeclarable(Redeclarable<T> *DBase,
if (auto *Existing = Redecl.getKnownMergeTarget())
// We already know of an existing declaration we should merge with.
- mergeRedeclarable(D, cast<T>(Existing), Redecl, TemplatePatternID);
+ mergeRedeclarable(D, cast<T>(Existing), Redecl);
else if (FindExistingResult ExistingRes = findExisting(D))
if (T *Existing = ExistingRes)
- mergeRedeclarable(D, Existing, Redecl, TemplatePatternID);
+ mergeRedeclarable(D, Existing, Redecl);
+}
+
+/// Attempt to merge D with a previous declaration of the same lambda, which is
+/// found by its index within its context declaration, if it has one.
+///
+/// We can't look up lambdas in their enclosing lexical or semantic context in
+/// general, because for lambdas in variables, both of those might be a
+/// namespace or the translation unit.
+void ASTDeclReader::mergeLambda(CXXRecordDecl *D, RedeclarableResult &Redecl,
+ Decl *Context, unsigned IndexInContext) {
+ // If we don't have a mangling context, treat this like any other
+ // declaration.
+ if (!Context)
+ return mergeRedeclarable(D, Redecl);
+
+ // If modules are not available, there is no reason to perform this merge.
+ if (!Reader.getContext().getLangOpts().Modules)
+ return;
+
+ // If we're not the canonical declaration, we don't need to merge.
+ if (!D->isFirstDecl())
+ return;
+
+ if (auto *Existing = Redecl.getKnownMergeTarget())
+ // We already know of an existing declaration we should merge with.
+ mergeRedeclarable(D, cast<TagDecl>(Existing), Redecl);
+
+ // Look up this lambda to see if we've seen it before. If so, merge with the
+ // one we already loaded.
+ NamedDecl *&Slot = Reader.LambdaDeclarationsForMerging[{
+ Context->getCanonicalDecl(), IndexInContext}];
+ if (Slot)
+ mergeRedeclarable(D, cast<TagDecl>(Slot), Redecl);
+ else
+ Slot = D;
+}
+
+void ASTDeclReader::mergeRedeclarableTemplate(RedeclarableTemplateDecl *D,
+ RedeclarableResult &Redecl) {
+ mergeRedeclarable(D, Redecl);
+ // If we merged the template with a prior declaration chain, merge the
+ // common pointer.
+ // FIXME: Actually merge here, don't just overwrite.
+ D->Common = D->getCanonicalDecl()->Common;
}
/// "Cast" to type T, asserting if we don't have an implicit conversion.
@@ -2558,7 +2915,7 @@ template<typename T> static T assert_cast(...) {
/// declarations.
void ASTDeclReader::mergeTemplatePattern(RedeclarableTemplateDecl *D,
RedeclarableTemplateDecl *Existing,
- DeclID DsID, bool IsKeyDecl) {
+ bool IsKeyDecl) {
auto *DPattern = D->getTemplatedDecl();
auto *ExistingPattern = Existing->getTemplatedDecl();
RedeclarableResult Result(/*MergeWith*/ ExistingPattern,
@@ -2598,17 +2955,13 @@ void ASTDeclReader::mergeTemplatePattern(RedeclarableTemplateDecl *D,
/// Attempts to merge the given declaration (D) with another declaration
/// of the same entity.
-template<typename T>
+template <typename T>
void ASTDeclReader::mergeRedeclarable(Redeclarable<T> *DBase, T *Existing,
- RedeclarableResult &Redecl,
- DeclID TemplatePatternID) {
+ RedeclarableResult &Redecl) {
auto *D = static_cast<T *>(DBase);
T *ExistingCanon = Existing->getCanonicalDecl();
T *DCanon = D->getCanonicalDecl();
if (ExistingCanon != DCanon) {
- assert(DCanon->getGlobalID() == Redecl.getFirstID() &&
- "already merged this declaration");
-
// Have our redeclaration link point back at the canonical declaration
// of the existing declaration, so that this declaration has the
// appropriate canonical declaration.
@@ -2621,14 +2974,14 @@ void ASTDeclReader::mergeRedeclarable(Redeclarable<T> *DBase, T *Existing,
// We cannot have loaded any redeclarations of this declaration yet, so
// there's nothing else that needs to be updated.
if (auto *Namespace = dyn_cast<NamespaceDecl>(D))
- Namespace->AnonOrFirstNamespaceAndInline.setPointer(
- assert_cast<NamespaceDecl*>(ExistingCanon));
+ Namespace->AnonOrFirstNamespaceAndFlags.setPointer(
+ assert_cast<NamespaceDecl *>(ExistingCanon));
// When we merge a template, merge its pattern.
if (auto *DTemplate = dyn_cast<RedeclarableTemplateDecl>(D))
mergeTemplatePattern(
- DTemplate, assert_cast<RedeclarableTemplateDecl*>(ExistingCanon),
- TemplatePatternID, Redecl.isKeyDecl());
+ DTemplate, assert_cast<RedeclarableTemplateDecl *>(ExistingCanon),
+ Redecl.isKeyDecl());
// If this declaration is a key declaration, make a note of that.
if (Redecl.isKeyDecl())
@@ -2645,7 +2998,7 @@ static bool allowODRLikeMergeInC(NamedDecl *ND) {
if (!ND)
return false;
// TODO: implement merge for other necessary decls.
- if (isa<EnumConstantDecl>(ND))
+ if (isa<EnumConstantDecl, FieldDecl, IndirectFieldDecl>(ND))
return true;
return false;
}
@@ -2720,7 +3073,7 @@ void ASTDeclReader::VisitOMPDeclareReductionDecl(OMPDeclareReductionDecl *D) {
Expr *Priv = Record.readExpr();
D->setInitializerData(Orig, Priv);
Expr *Init = Record.readExpr();
- auto IK = static_cast<OMPDeclareReductionDecl::InitKind>(Record.readInt());
+ auto IK = static_cast<OMPDeclareReductionInitKind>(Record.readInt());
D->setInitializer(Init, IK);
D->PrevDeclInScope = readDeclID();
}
@@ -2751,6 +3104,8 @@ public:
return Reader.readInt();
}
+ bool readBool() { return Reader.readBool(); }
+
SourceRange readSourceRange() {
return Reader.readSourceRange();
}
@@ -2761,6 +3116,8 @@ public:
Expr *readExpr() { return Reader.readExpr(); }
+ Attr *readAttr() { return Reader.readAttr(); }
+
std::string readString() {
return Reader.readString();
}
@@ -2804,10 +3161,15 @@ Attr *ASTRecordReader::readAttr() {
unsigned ParsedKind = Record.readInt();
unsigned Syntax = Record.readInt();
unsigned SpellingIndex = Record.readInt();
+ bool IsAlignas = (ParsedKind == AttributeCommonInfo::AT_Aligned &&
+ Syntax == AttributeCommonInfo::AS_Keyword &&
+ SpellingIndex == AlignedAttr::Keyword_alignas);
+ bool IsRegularKeywordAttribute = Record.readBool();
AttributeCommonInfo Info(AttrName, ScopeName, AttrRange, ScopeLoc,
AttributeCommonInfo::Kind(ParsedKind),
- AttributeCommonInfo::Syntax(Syntax), SpellingIndex);
+ {AttributeCommonInfo::Syntax(Syntax), SpellingIndex,
+ IsAlignas, IsRegularKeywordAttribute});
#include "clang/Serialization/AttrPCHRead.inc"
@@ -2818,7 +3180,8 @@ Attr *ASTRecordReader::readAttr() {
/// Reads attributes from the current stream position.
void ASTRecordReader::readAttributes(AttrVec &Attrs) {
for (unsigned I = 0, E = readInt(); I != E; ++I)
- Attrs.push_back(readAttr());
+ if (auto *A = readAttr())
+ Attrs.push_back(A);
}
//===----------------------------------------------------------------------===//
@@ -2855,16 +3218,11 @@ static bool isConsumerInterestedIn(ASTContext &Ctx, Decl *D, bool HasBody) {
return false;
}
- if (isa<FileScopeAsmDecl>(D) ||
- isa<ObjCProtocolDecl>(D) ||
- isa<ObjCImplDecl>(D) ||
- isa<ImportDecl>(D) ||
- isa<PragmaCommentDecl>(D) ||
- isa<PragmaDetectMismatchDecl>(D))
+ if (isa<FileScopeAsmDecl, TopLevelStmtDecl, ObjCProtocolDecl, ObjCImplDecl,
+ ImportDecl, PragmaCommentDecl, PragmaDetectMismatchDecl>(D))
return true;
- if (isa<OMPThreadPrivateDecl>(D) || isa<OMPDeclareReductionDecl>(D) ||
- isa<OMPDeclareMapperDecl>(D) || isa<OMPAllocateDecl>(D) ||
- isa<OMPRequiresDecl>(D))
+ if (isa<OMPThreadPrivateDecl, OMPDeclareReductionDecl, OMPDeclareMapperDecl,
+ OMPAllocateDecl, OMPRequiresDecl>(D))
return !D->getDeclContext()->isFunctionOrMethod();
if (const auto *Var = dyn_cast<VarDecl>(D))
return Var->isFileVarDecl() &&
@@ -2903,385 +3261,30 @@ uint64_t ASTReader::getGlobalBitOffset(ModuleFile &M, uint64_t LocalOffset) {
return LocalOffset + M.GlobalBitOffset;
}
-static bool isSameTemplateParameterList(const ASTContext &C,
- const TemplateParameterList *X,
- const TemplateParameterList *Y);
-
-/// Determine whether two template parameters are similar enough
-/// that they may be used in declarations of the same template.
-static bool isSameTemplateParameter(const NamedDecl *X,
- const NamedDecl *Y) {
- if (X->getKind() != Y->getKind())
- return false;
-
- if (const auto *TX = dyn_cast<TemplateTypeParmDecl>(X)) {
- const auto *TY = cast<TemplateTypeParmDecl>(Y);
- if (TX->isParameterPack() != TY->isParameterPack())
- return false;
- if (TX->hasTypeConstraint() != TY->hasTypeConstraint())
- return false;
- const TypeConstraint *TXTC = TX->getTypeConstraint();
- const TypeConstraint *TYTC = TY->getTypeConstraint();
- if (!TXTC != !TYTC)
- return false;
- if (TXTC && TYTC) {
- if (TXTC->getNamedConcept() != TYTC->getNamedConcept())
- return false;
- if (TXTC->hasExplicitTemplateArgs() != TYTC->hasExplicitTemplateArgs())
- return false;
- if (TXTC->hasExplicitTemplateArgs()) {
- const auto *TXTCArgs = TXTC->getTemplateArgsAsWritten();
- const auto *TYTCArgs = TYTC->getTemplateArgsAsWritten();
- if (TXTCArgs->NumTemplateArgs != TYTCArgs->NumTemplateArgs)
- return false;
- llvm::FoldingSetNodeID XID, YID;
- for (const auto &ArgLoc : TXTCArgs->arguments())
- ArgLoc.getArgument().Profile(XID, X->getASTContext());
- for (const auto &ArgLoc : TYTCArgs->arguments())
- ArgLoc.getArgument().Profile(YID, Y->getASTContext());
- if (XID != YID)
- return false;
- }
- }
- return true;
- }
-
- if (const auto *TX = dyn_cast<NonTypeTemplateParmDecl>(X)) {
- const auto *TY = cast<NonTypeTemplateParmDecl>(Y);
- return TX->isParameterPack() == TY->isParameterPack() &&
- TX->getASTContext().hasSameType(TX->getType(), TY->getType());
- }
-
- const auto *TX = cast<TemplateTemplateParmDecl>(X);
- const auto *TY = cast<TemplateTemplateParmDecl>(Y);
- return TX->isParameterPack() == TY->isParameterPack() &&
- isSameTemplateParameterList(TX->getASTContext(),
- TX->getTemplateParameters(),
- TY->getTemplateParameters());
-}
-
-static NamespaceDecl *getNamespace(const NestedNameSpecifier *X) {
- if (auto *NS = X->getAsNamespace())
- return NS;
- if (auto *NAS = X->getAsNamespaceAlias())
- return NAS->getNamespace();
- return nullptr;
-}
-
-static bool isSameQualifier(const NestedNameSpecifier *X,
- const NestedNameSpecifier *Y) {
- if (auto *NSX = getNamespace(X)) {
- auto *NSY = getNamespace(Y);
- if (!NSY || NSX->getCanonicalDecl() != NSY->getCanonicalDecl())
- return false;
- } else if (X->getKind() != Y->getKind())
- return false;
-
- // FIXME: For namespaces and types, we're permitted to check that the entity
- // is named via the same tokens. We should probably do so.
- switch (X->getKind()) {
- case NestedNameSpecifier::Identifier:
- if (X->getAsIdentifier() != Y->getAsIdentifier())
- return false;
- break;
- case NestedNameSpecifier::Namespace:
- case NestedNameSpecifier::NamespaceAlias:
- // We've already checked that we named the same namespace.
- break;
- case NestedNameSpecifier::TypeSpec:
- case NestedNameSpecifier::TypeSpecWithTemplate:
- if (X->getAsType()->getCanonicalTypeInternal() !=
- Y->getAsType()->getCanonicalTypeInternal())
- return false;
- break;
- case NestedNameSpecifier::Global:
- case NestedNameSpecifier::Super:
- return true;
- }
-
- // Recurse into earlier portion of NNS, if any.
- auto *PX = X->getPrefix();
- auto *PY = Y->getPrefix();
- if (PX && PY)
- return isSameQualifier(PX, PY);
- return !PX && !PY;
-}
-
-/// Determine whether two template parameter lists are similar enough
-/// that they may be used in declarations of the same template.
-static bool isSameTemplateParameterList(const ASTContext &C,
- const TemplateParameterList *X,
- const TemplateParameterList *Y) {
- if (X->size() != Y->size())
- return false;
-
- for (unsigned I = 0, N = X->size(); I != N; ++I)
- if (!isSameTemplateParameter(X->getParam(I), Y->getParam(I)))
- return false;
-
- const Expr *XRC = X->getRequiresClause();
- const Expr *YRC = Y->getRequiresClause();
- if (!XRC != !YRC)
- return false;
- if (XRC) {
- llvm::FoldingSetNodeID XRCID, YRCID;
- XRC->Profile(XRCID, C, /*Canonical=*/true);
- YRC->Profile(YRCID, C, /*Canonical=*/true);
- if (XRCID != YRCID)
- return false;
- }
-
- return true;
-}
-
-/// Determine whether the attributes we can overload on are identical for A and
-/// B. Will ignore any overloadable attrs represented in the type of A and B.
-static bool hasSameOverloadableAttrs(const FunctionDecl *A,
- const FunctionDecl *B) {
- // Note that pass_object_size attributes are represented in the function's
- // ExtParameterInfo, so we don't need to check them here.
-
- llvm::FoldingSetNodeID Cand1ID, Cand2ID;
- auto AEnableIfAttrs = A->specific_attrs<EnableIfAttr>();
- auto BEnableIfAttrs = B->specific_attrs<EnableIfAttr>();
-
- for (auto Pair : zip_longest(AEnableIfAttrs, BEnableIfAttrs)) {
- Optional<EnableIfAttr *> Cand1A = std::get<0>(Pair);
- Optional<EnableIfAttr *> Cand2A = std::get<1>(Pair);
-
- // Return false if the number of enable_if attributes is different.
- if (!Cand1A || !Cand2A)
- return false;
-
- Cand1ID.clear();
- Cand2ID.clear();
-
- (*Cand1A)->getCond()->Profile(Cand1ID, A->getASTContext(), true);
- (*Cand2A)->getCond()->Profile(Cand2ID, B->getASTContext(), true);
-
- // Return false if any of the enable_if expressions of A and B are
- // different.
- if (Cand1ID != Cand2ID)
- return false;
- }
- return true;
-}
-
-/// Determine whether the two declarations refer to the same entity.
-static bool isSameEntity(NamedDecl *X, NamedDecl *Y) {
- assert(X->getDeclName() == Y->getDeclName() && "Declaration name mismatch!");
-
- if (X == Y)
- return true;
-
- // Must be in the same context.
- //
- // Note that we can't use DeclContext::Equals here, because the DeclContexts
- // could be two different declarations of the same function. (We will fix the
- // semantic DC to refer to the primary definition after merging.)
- if (!declaresSameEntity(cast<Decl>(X->getDeclContext()->getRedeclContext()),
- cast<Decl>(Y->getDeclContext()->getRedeclContext())))
- return false;
-
- // Two typedefs refer to the same entity if they have the same underlying
- // type.
- if (const auto *TypedefX = dyn_cast<TypedefNameDecl>(X))
- if (const auto *TypedefY = dyn_cast<TypedefNameDecl>(Y))
- return X->getASTContext().hasSameType(TypedefX->getUnderlyingType(),
- TypedefY->getUnderlyingType());
-
- // Must have the same kind.
- if (X->getKind() != Y->getKind())
- return false;
-
- // Objective-C classes and protocols with the same name always match.
- if (isa<ObjCInterfaceDecl>(X) || isa<ObjCProtocolDecl>(X))
- return true;
-
- if (isa<ClassTemplateSpecializationDecl>(X)) {
- // No need to handle these here: we merge them when adding them to the
- // template.
- return false;
- }
-
- // Compatible tags match.
- if (const auto *TagX = dyn_cast<TagDecl>(X)) {
- const auto *TagY = cast<TagDecl>(Y);
- return (TagX->getTagKind() == TagY->getTagKind()) ||
- ((TagX->getTagKind() == TTK_Struct || TagX->getTagKind() == TTK_Class ||
- TagX->getTagKind() == TTK_Interface) &&
- (TagY->getTagKind() == TTK_Struct || TagY->getTagKind() == TTK_Class ||
- TagY->getTagKind() == TTK_Interface));
- }
-
- // Functions with the same type and linkage match.
- // FIXME: This needs to cope with merging of prototyped/non-prototyped
- // functions, etc.
- if (const auto *FuncX = dyn_cast<FunctionDecl>(X)) {
- const auto *FuncY = cast<FunctionDecl>(Y);
- if (const auto *CtorX = dyn_cast<CXXConstructorDecl>(X)) {
- const auto *CtorY = cast<CXXConstructorDecl>(Y);
- if (CtorX->getInheritedConstructor() &&
- !isSameEntity(CtorX->getInheritedConstructor().getConstructor(),
- CtorY->getInheritedConstructor().getConstructor()))
- return false;
- }
-
- if (FuncX->isMultiVersion() != FuncY->isMultiVersion())
- return false;
-
- // Multiversioned functions with different feature strings are represented
- // as separate declarations.
- if (FuncX->isMultiVersion()) {
- const auto *TAX = FuncX->getAttr<TargetAttr>();
- const auto *TAY = FuncY->getAttr<TargetAttr>();
- assert(TAX && TAY && "Multiversion Function without target attribute");
-
- if (TAX->getFeaturesStr() != TAY->getFeaturesStr())
- return false;
- }
-
- ASTContext &C = FuncX->getASTContext();
-
- const Expr *XRC = FuncX->getTrailingRequiresClause();
- const Expr *YRC = FuncY->getTrailingRequiresClause();
- if (!XRC != !YRC)
- return false;
- if (XRC) {
- llvm::FoldingSetNodeID XRCID, YRCID;
- XRC->Profile(XRCID, C, /*Canonical=*/true);
- YRC->Profile(YRCID, C, /*Canonical=*/true);
- if (XRCID != YRCID)
- return false;
- }
-
- auto GetTypeAsWritten = [](const FunctionDecl *FD) {
- // Map to the first declaration that we've already merged into this one.
- // The TSI of redeclarations might not match (due to calling conventions
- // being inherited onto the type but not the TSI), but the TSI type of
- // the first declaration of the function should match across modules.
- FD = FD->getCanonicalDecl();
- return FD->getTypeSourceInfo() ? FD->getTypeSourceInfo()->getType()
- : FD->getType();
- };
- QualType XT = GetTypeAsWritten(FuncX), YT = GetTypeAsWritten(FuncY);
- if (!C.hasSameType(XT, YT)) {
- // We can get functions with different types on the redecl chain in C++17
- // if they have differing exception specifications and at least one of
- // the excpetion specs is unresolved.
- auto *XFPT = XT->getAs<FunctionProtoType>();
- auto *YFPT = YT->getAs<FunctionProtoType>();
- if (C.getLangOpts().CPlusPlus17 && XFPT && YFPT &&
- (isUnresolvedExceptionSpec(XFPT->getExceptionSpecType()) ||
- isUnresolvedExceptionSpec(YFPT->getExceptionSpecType())) &&
- C.hasSameFunctionTypeIgnoringExceptionSpec(XT, YT))
- return true;
- return false;
- }
-
- return FuncX->getLinkageInternal() == FuncY->getLinkageInternal() &&
- hasSameOverloadableAttrs(FuncX, FuncY);
- }
-
- // Variables with the same type and linkage match.
- if (const auto *VarX = dyn_cast<VarDecl>(X)) {
- const auto *VarY = cast<VarDecl>(Y);
- if (VarX->getLinkageInternal() == VarY->getLinkageInternal()) {
- ASTContext &C = VarX->getASTContext();
- if (C.hasSameType(VarX->getType(), VarY->getType()))
- return true;
-
- // We can get decls with different types on the redecl chain. Eg.
- // template <typename T> struct S { static T Var[]; }; // #1
- // template <typename T> T S<T>::Var[sizeof(T)]; // #2
- // Only? happens when completing an incomplete array type. In this case
- // when comparing #1 and #2 we should go through their element type.
- const ArrayType *VarXTy = C.getAsArrayType(VarX->getType());
- const ArrayType *VarYTy = C.getAsArrayType(VarY->getType());
- if (!VarXTy || !VarYTy)
- return false;
- if (VarXTy->isIncompleteArrayType() || VarYTy->isIncompleteArrayType())
- return C.hasSameType(VarXTy->getElementType(), VarYTy->getElementType());
- }
- return false;
- }
+CXXRecordDecl *
+ASTDeclReader::getOrFakePrimaryClassDefinition(ASTReader &Reader,
+ CXXRecordDecl *RD) {
+ // Try to dig out the definition.
+ auto *DD = RD->DefinitionData;
+ if (!DD)
+ DD = RD->getCanonicalDecl()->DefinitionData;
- // Namespaces with the same name and inlinedness match.
- if (const auto *NamespaceX = dyn_cast<NamespaceDecl>(X)) {
- const auto *NamespaceY = cast<NamespaceDecl>(Y);
- return NamespaceX->isInline() == NamespaceY->isInline();
- }
-
- // Identical template names and kinds match if their template parameter lists
- // and patterns match.
- if (const auto *TemplateX = dyn_cast<TemplateDecl>(X)) {
- const auto *TemplateY = cast<TemplateDecl>(Y);
- return isSameEntity(TemplateX->getTemplatedDecl(),
- TemplateY->getTemplatedDecl()) &&
- isSameTemplateParameterList(TemplateX->getASTContext(),
- TemplateX->getTemplateParameters(),
- TemplateY->getTemplateParameters());
- }
-
- // Fields with the same name and the same type match.
- if (const auto *FDX = dyn_cast<FieldDecl>(X)) {
- const auto *FDY = cast<FieldDecl>(Y);
- // FIXME: Also check the bitwidth is odr-equivalent, if any.
- return X->getASTContext().hasSameType(FDX->getType(), FDY->getType());
- }
-
- // Indirect fields with the same target field match.
- if (const auto *IFDX = dyn_cast<IndirectFieldDecl>(X)) {
- const auto *IFDY = cast<IndirectFieldDecl>(Y);
- return IFDX->getAnonField()->getCanonicalDecl() ==
- IFDY->getAnonField()->getCanonicalDecl();
- }
-
- // Enumerators with the same name match.
- if (isa<EnumConstantDecl>(X))
- // FIXME: Also check the value is odr-equivalent.
- return true;
+ // If there's no definition yet, then DC's definition is added by an update
+ // record, but we've not yet loaded that update record. In this case, we
+ // commit to DC being the canonical definition now, and will fix this when
+ // we load the update record.
+ if (!DD) {
+ DD = new (Reader.getContext()) struct CXXRecordDecl::DefinitionData(RD);
+ RD->setCompleteDefinition(true);
+ RD->DefinitionData = DD;
+ RD->getCanonicalDecl()->DefinitionData = DD;
- // Using shadow declarations with the same target match.
- if (const auto *USX = dyn_cast<UsingShadowDecl>(X)) {
- const auto *USY = cast<UsingShadowDecl>(Y);
- return USX->getTargetDecl() == USY->getTargetDecl();
+ // Track that we did this horrible thing so that we can fix it later.
+ Reader.PendingFakeDefinitionData.insert(
+ std::make_pair(DD, ASTReader::PendingFakeDefinitionKind::Fake));
}
- // Using declarations with the same qualifier match. (We already know that
- // the name matches.)
- if (const auto *UX = dyn_cast<UsingDecl>(X)) {
- const auto *UY = cast<UsingDecl>(Y);
- return isSameQualifier(UX->getQualifier(), UY->getQualifier()) &&
- UX->hasTypename() == UY->hasTypename() &&
- UX->isAccessDeclaration() == UY->isAccessDeclaration();
- }
- if (const auto *UX = dyn_cast<UnresolvedUsingValueDecl>(X)) {
- const auto *UY = cast<UnresolvedUsingValueDecl>(Y);
- return isSameQualifier(UX->getQualifier(), UY->getQualifier()) &&
- UX->isAccessDeclaration() == UY->isAccessDeclaration();
- }
- if (const auto *UX = dyn_cast<UnresolvedUsingTypenameDecl>(X)) {
- return isSameQualifier(
- UX->getQualifier(),
- cast<UnresolvedUsingTypenameDecl>(Y)->getQualifier());
- }
-
- // Using-pack declarations are only created by instantiation, and match if
- // they're instantiated from matching UnresolvedUsing...Decls.
- if (const auto *UX = dyn_cast<UsingPackDecl>(X)) {
- return declaresSameEntity(
- UX->getInstantiatedFromUsingDecl(),
- cast<UsingPackDecl>(Y)->getInstantiatedFromUsingDecl());
- }
-
- // Namespace alias definitions with the same target match.
- if (const auto *NAX = dyn_cast<NamespaceAliasDecl>(X)) {
- const auto *NAY = cast<NamespaceAliasDecl>(Y);
- return NAX->getNamespace()->Equals(NAY->getNamespace());
- }
-
- return false;
+ return DD->Definition;
}
/// Find the context in which we should search for previous declarations when
@@ -3291,34 +3294,19 @@ DeclContext *ASTDeclReader::getPrimaryContextForMerging(ASTReader &Reader,
if (auto *ND = dyn_cast<NamespaceDecl>(DC))
return ND->getOriginalNamespace();
- if (auto *RD = dyn_cast<CXXRecordDecl>(DC)) {
- // Try to dig out the definition.
- auto *DD = RD->DefinitionData;
- if (!DD)
- DD = RD->getCanonicalDecl()->DefinitionData;
-
- // If there's no definition yet, then DC's definition is added by an update
- // record, but we've not yet loaded that update record. In this case, we
- // commit to DC being the canonical definition now, and will fix this when
- // we load the update record.
- if (!DD) {
- DD = new (Reader.getContext()) struct CXXRecordDecl::DefinitionData(RD);
- RD->setCompleteDefinition(true);
- RD->DefinitionData = DD;
- RD->getCanonicalDecl()->DefinitionData = DD;
-
- // Track that we did this horrible thing so that we can fix it later.
- Reader.PendingFakeDefinitionData.insert(
- std::make_pair(DD, ASTReader::PendingFakeDefinitionKind::Fake));
- }
+ if (auto *RD = dyn_cast<CXXRecordDecl>(DC))
+ return getOrFakePrimaryClassDefinition(Reader, RD);
- return DD->Definition;
- }
+ if (auto *RD = dyn_cast<RecordDecl>(DC))
+ return RD->getDefinition();
if (auto *ED = dyn_cast<EnumDecl>(DC))
return ED->getASTContext().getLangOpts().CPlusPlus? ED->getDefinition()
: nullptr;
+ if (auto *OID = dyn_cast<ObjCInterfaceDecl>(DC))
+ return OID->getDefinition();
+
// We can see the TU here only if we have no Sema object. In that case,
// there's no TU scope to look in, so using the DC alone is sufficient.
if (auto *TU = dyn_cast<TranslationUnitDecl>(DC))
@@ -3386,6 +3374,8 @@ ASTDeclReader::getPrimaryDCForAnonymousDecl(DeclContext *LexicalDC) {
if (auto *RD = dyn_cast<CXXRecordDecl>(LexicalDC)) {
auto *DD = RD->getCanonicalDecl()->DefinitionData;
return DD ? DD->Definition : nullptr;
+ } else if (auto *OID = dyn_cast<ObjCInterfaceDecl>(LexicalDC)) {
+ return OID->getCanonicalDecl()->getDefinition();
}
// For anything else, walk its merged redeclarations looking for a definition.
@@ -3398,6 +3388,9 @@ ASTDeclReader::getPrimaryDCForAnonymousDecl(DeclContext *LexicalDC) {
if (auto *MD = dyn_cast<ObjCMethodDecl>(D))
if (MD->isThisDeclarationADefinition())
return MD;
+ if (auto *RD = dyn_cast<RecordDecl>(D))
+ if (RD->isThisDeclarationADefinition())
+ return RD;
}
// No merged definition yet.
@@ -3456,12 +3449,13 @@ ASTDeclReader::FindExistingResult ASTDeclReader::findExisting(NamedDecl *D) {
return Result;
}
+ ASTContext &C = Reader.getContext();
DeclContext *DC = D->getDeclContext()->getRedeclContext();
if (TypedefNameForLinkage) {
auto It = Reader.ImportedTypedefNamesForLinkage.find(
std::make_pair(DC, TypedefNameForLinkage));
if (It != Reader.ImportedTypedefNamesForLinkage.end())
- if (isSameEntity(It->second, D))
+ if (C.isSameEntity(It->second, D))
return FindExistingResult(Reader, D, It->second, AnonymousDeclNumber,
TypedefNameForLinkage);
// Go on to check in other places in case an existing typedef name
@@ -3473,7 +3467,7 @@ ASTDeclReader::FindExistingResult ASTDeclReader::findExisting(NamedDecl *D) {
// in its context by number.
if (auto *Existing = getAnonymousDeclForMerging(
Reader, D->getLexicalDeclContext(), AnonymousDeclNumber))
- if (isSameEntity(Existing, D))
+ if (C.isSameEntity(Existing, D))
return FindExistingResult(Reader, D, Existing, AnonymousDeclNumber,
TypedefNameForLinkage);
} else if (DC->isTranslationUnit() &&
@@ -3505,7 +3499,7 @@ ASTDeclReader::FindExistingResult ASTDeclReader::findExisting(NamedDecl *D) {
IEnd = IdResolver.end();
I != IEnd; ++I) {
if (NamedDecl *Existing = getDeclForMerging(*I, TypedefNameForLinkage))
- if (isSameEntity(Existing, D))
+ if (C.isSameEntity(Existing, D))
return FindExistingResult(Reader, D, Existing, AnonymousDeclNumber,
TypedefNameForLinkage);
}
@@ -3513,7 +3507,7 @@ ASTDeclReader::FindExistingResult ASTDeclReader::findExisting(NamedDecl *D) {
DeclContext::lookup_result R = MergeDC->noload_lookup(Name);
for (DeclContext::lookup_iterator I = R.begin(), E = R.end(); I != E; ++I) {
if (NamedDecl *Existing = getDeclForMerging(*I, TypedefNameForLinkage))
- if (isSameEntity(Existing, D))
+ if (C.isSameEntity(Existing, D))
return FindExistingResult(Reader, D, Existing, AnonymousDeclNumber,
TypedefNameForLinkage);
}
@@ -3525,11 +3519,14 @@ ASTDeclReader::FindExistingResult ASTDeclReader::findExisting(NamedDecl *D) {
// If this declaration is from a merged context, make a note that we need to
// check that the canonical definition of that context contains the decl.
//
+ // Note that we don't perform ODR checks for decls from the global module
+ // fragment.
+ //
// FIXME: We should do something similar if we merge two definitions of the
// same template specialization into the same CXXRecordDecl.
auto MergedDCIt = Reader.MergedDeclContexts.find(D->getLexicalDeclContext());
if (MergedDCIt != Reader.MergedDeclContexts.end() &&
- MergedDCIt->second == D->getDeclContext())
+ !D->shouldSkipCheckingODR() && MergedDCIt->second == D->getDeclContext())
Reader.PendingOdrMergeChecks.push_back(D);
return FindExistingResult(Reader, D, /*Existing=*/nullptr,
@@ -3573,6 +3570,13 @@ void ASTDeclReader::mergeInheritableAttributes(ASTReader &Reader, Decl *D,
NewAttr->setInherited(true);
D->addAttr(NewAttr);
}
+
+ const auto *AA = Previous->getAttr<AvailabilityAttr>();
+ if (AA && !D->hasAttr<AvailabilityAttr>()) {
+ NewAttr = AA->clone(Context);
+ NewAttr->setInherited(true);
+ D->addAttr(NewAttr);
+ }
}
template<typename DeclT>
@@ -3817,7 +3821,7 @@ Decl *ASTReader::ReadDeclRecord(DeclID ID) {
Expected<unsigned> MaybeDeclCode = Record.readRecord(DeclsCursor, Code);
if (!MaybeDeclCode)
llvm::report_fatal_error(
- "ASTReader::readDeclRecord failed reading decl code: " +
+ Twine("ASTReader::readDeclRecord failed reading decl code: ") +
toString(MaybeDeclCode.takeError()));
switch ((DeclCode)MaybeDeclCode.get()) {
case DECL_CONTEXT_LEXICAL:
@@ -3928,9 +3932,6 @@ Decl *ASTReader::ReadDeclRecord(DeclID ID) {
case DECL_VAR_TEMPLATE_PARTIAL_SPECIALIZATION:
D = VarTemplatePartialSpecializationDecl::CreateDeserialized(Context, ID);
break;
- case DECL_CLASS_SCOPE_FUNCTION_SPECIALIZATION:
- D = ClassScopeFunctionSpecializationDecl::CreateDeserialized(Context, ID);
- break;
case DECL_FUNCTION_TEMPLATE:
D = FunctionTemplateDecl::CreateDeserialized(Context, ID);
break;
@@ -4029,6 +4030,9 @@ Decl *ASTReader::ReadDeclRecord(DeclID ID) {
case DECL_FILE_SCOPE_ASM:
D = FileScopeAsmDecl::CreateDeserialized(Context, ID);
break;
+ case DECL_TOP_LEVEL_STMT_DECL:
+ D = TopLevelStmtDecl::CreateDeserialized(Context, ID);
+ break;
case DECL_BLOCK:
D = BlockDecl::CreateDeserialized(Context, ID);
break;
@@ -4038,6 +4042,9 @@ Decl *ASTReader::ReadDeclRecord(DeclID ID) {
case DECL_MS_GUID:
D = MSGuidDecl::CreateDeserialized(Context, ID);
break;
+ case DECL_UNNAMED_GLOBAL_CONSTANT:
+ D = UnnamedGlobalConstantDecl::CreateDeserialized(Context, ID);
+ break;
case DECL_TEMPLATE_PARAM_OBJECT:
D = TemplateParamObjectDecl::CreateDeserialized(Context, ID);
break;
@@ -4103,6 +4110,13 @@ Decl *ASTReader::ReadDeclRecord(DeclID ID) {
case DECL_OBJC_TYPE_PARAM:
D = ObjCTypeParamDecl::CreateDeserialized(Context, ID);
break;
+ case DECL_HLSL_BUFFER:
+ D = HLSLBufferDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_IMPLICIT_CONCEPT_SPECIALIZATION:
+ D = ImplicitConceptSpecializationDecl::CreateDeserialized(Context, ID,
+ Record.readInt());
+ break;
}
assert(D && "Unknown declaration reading AST file");
@@ -4156,8 +4170,7 @@ void ASTReader::PassInterestingDeclsToConsumer() {
// Guard variable to avoid recursively redoing the process of passing
// decls to consumer.
- SaveAndRestore<bool> GuardPassingDeclsToConsumer(PassingDeclsToConsumer,
- true);
+ SaveAndRestore GuardPassingDeclsToConsumer(PassingDeclsToConsumer, true);
// Ensure that we've loaded all potentially-interesting declarations
// that need to be eagerly loaded.
@@ -4202,12 +4215,12 @@ void ASTReader::loadDeclUpdateRecords(PendingUpdateRecord &Record) {
if (llvm::Error JumpFailed = Cursor.JumpToBit(Offset))
// FIXME don't do a fatal error.
llvm::report_fatal_error(
- "ASTReader::loadDeclUpdateRecords failed jumping: " +
+ Twine("ASTReader::loadDeclUpdateRecords failed jumping: ") +
toString(std::move(JumpFailed)));
Expected<unsigned> MaybeCode = Cursor.ReadCode();
if (!MaybeCode)
llvm::report_fatal_error(
- "ASTReader::loadDeclUpdateRecords failed reading code: " +
+ Twine("ASTReader::loadDeclUpdateRecords failed reading code: ") +
toString(MaybeCode.takeError()));
unsigned Code = MaybeCode.get();
ASTRecordReader Record(*this, *F);
@@ -4216,7 +4229,7 @@ void ASTReader::loadDeclUpdateRecords(PendingUpdateRecord &Record) {
"Expected DECL_UPDATES record!");
else
llvm::report_fatal_error(
- "ASTReader::loadDeclUpdateRecords failed reading rec code: " +
+ Twine("ASTReader::loadDeclUpdateRecords failed reading rec code: ") +
toString(MaybeCode.takeError()));
ASTDeclReader Reader(*this, Record, RecordLocation(F, Offset), ID,
@@ -4235,7 +4248,7 @@ void ASTReader::loadDeclUpdateRecords(PendingUpdateRecord &Record) {
}
// Add the lazy specializations to the template.
assert((PendingLazySpecializationIDs.empty() || isa<ClassTemplateDecl>(D) ||
- isa<FunctionTemplateDecl>(D) || isa<VarTemplateDecl>(D)) &&
+ isa<FunctionTemplateDecl, VarTemplateDecl>(D)) &&
"Must not have pending specializations");
if (auto *CTD = dyn_cast<ClassTemplateDecl>(D))
ASTDeclReader::AddLazySpecializations(CTD, PendingLazySpecializationIDs);
@@ -4283,14 +4296,14 @@ void ASTReader::loadPendingDeclChain(Decl *FirstLocal, uint64_t LocalOffset) {
SavedStreamPosition SavedPosition(Cursor);
if (llvm::Error JumpFailed = Cursor.JumpToBit(LocalOffset))
llvm::report_fatal_error(
- "ASTReader::loadPendingDeclChain failed jumping: " +
+ Twine("ASTReader::loadPendingDeclChain failed jumping: ") +
toString(std::move(JumpFailed)));
RecordData Record;
Expected<unsigned> MaybeCode = Cursor.ReadCode();
if (!MaybeCode)
llvm::report_fatal_error(
- "ASTReader::loadPendingDeclChain failed reading code: " +
+ Twine("ASTReader::loadPendingDeclChain failed reading code: ") +
toString(MaybeCode.takeError()));
unsigned Code = MaybeCode.get();
if (Expected<unsigned> MaybeRecCode = Cursor.readRecord(Code, Record))
@@ -4298,7 +4311,7 @@ void ASTReader::loadPendingDeclChain(Decl *FirstLocal, uint64_t LocalOffset) {
"expected LOCAL_REDECLARATIONS record!");
else
llvm::report_fatal_error(
- "ASTReader::loadPendingDeclChain failed reading rec code: " +
+ Twine("ASTReader::loadPendingDeclChain failed reading rec code: ") +
toString(MaybeCode.takeError()));
// FIXME: We have several different dispatches on decl kind here; maybe
@@ -4333,23 +4346,22 @@ namespace {
// Check for duplicate categories.
if (Cat->getDeclName()) {
ObjCCategoryDecl *&Existing = NameCategoryMap[Cat->getDeclName()];
- if (Existing &&
- Reader.getOwningModuleFile(Existing)
- != Reader.getOwningModuleFile(Cat)) {
- // FIXME: We should not warn for duplicates in diamond:
- //
- // MT //
- // / \ //
- // ML MR //
- // \ / //
- // MB //
- //
- // If there are duplicates in ML/MR, there will be warning when
- // creating MB *and* when importing MB. We should not warn when
- // importing.
- Reader.Diag(Cat->getLocation(), diag::warn_dup_category_def)
- << Interface->getDeclName() << Cat->getDeclName();
- Reader.Diag(Existing->getLocation(), diag::note_previous_definition);
+ if (Existing && Reader.getOwningModuleFile(Existing) !=
+ Reader.getOwningModuleFile(Cat)) {
+ llvm::DenseSet<std::pair<Decl *, Decl *>> NonEquivalentDecls;
+ StructuralEquivalenceContext Ctx(
+ Cat->getASTContext(), Existing->getASTContext(),
+ NonEquivalentDecls, StructuralEquivalenceKind::Default,
+ /*StrictTypeSpelling =*/false,
+ /*Complain =*/false,
+ /*ErrorOnTagTypeMismatch =*/true);
+ if (!Ctx.IsEquivalent(Cat, Existing)) {
+ // Warn only if the categories with the same name are different.
+ Reader.Diag(Cat->getLocation(), diag::warn_dup_category_def)
+ << Interface->getDeclName() << Cat->getDeclName();
+ Reader.Diag(Existing->getLocation(),
+ diag::note_previous_definition);
+ }
} else if (!Existing) {
// Record this category.
Existing = Cat;
@@ -4458,13 +4470,9 @@ void ASTDeclReader::UpdateDecl(Decl *D,
switch ((DeclUpdateKind)Record.readInt()) {
case UPD_CXX_ADDED_IMPLICIT_MEMBER: {
auto *RD = cast<CXXRecordDecl>(D);
- // FIXME: If we also have an update record for instantiating the
- // definition of D, we need that to happen before we get here.
Decl *MD = Record.readDecl();
assert(MD && "couldn't read decl from update record");
- // FIXME: We should call addHiddenDecl instead, to add the member
- // to its DeclContext.
- RD->addedMember(MD);
+ Reader.PendingAddedClassMembers.push_back({RD, MD});
break;
}
@@ -4492,15 +4500,7 @@ void ASTDeclReader::UpdateDecl(Decl *D,
auto *VD = cast<VarDecl>(D);
VD->NonParmVarDeclBits.IsInline = Record.readInt();
VD->NonParmVarDeclBits.IsInlineSpecified = Record.readInt();
- uint64_t Val = Record.readInt();
- if (Val && !VD->getInit()) {
- VD->setInit(Record.readExpr());
- if (Val != 1) {
- EvaluatedStmt *Eval = VD->ensureEvaluatedStmt();
- Eval->HasConstantInitialization = (Val & 2) != 0;
- Eval->HasConstantDestruction = (Val & 4) != 0;
- }
- }
+ ReadVarDeclInit(VD);
break;
}
@@ -4509,7 +4509,9 @@ void ASTDeclReader::UpdateDecl(Decl *D,
if (auto *VTSD = dyn_cast<VarTemplateSpecializationDecl>(D)) {
VTSD->setPointOfInstantiation(POI);
} else if (auto *VD = dyn_cast<VarDecl>(D)) {
- VD->getMemberSpecializationInfo()->setPointOfInstantiation(POI);
+ MemberSpecializationInfo *MSInfo = VD->getMemberSpecializationInfo();
+ assert(MSInfo && "No member specialization information");
+ MSInfo->setPointOfInstantiation(POI);
} else {
auto *FD = cast<FunctionDecl>(D);
if (auto *FTSInfo = FD->TemplateOrSpecialization
@@ -4543,7 +4545,7 @@ void ASTDeclReader::UpdateDecl(Decl *D,
// Only apply the update if the field still has an uninstantiated
// default member initializer.
- if (FD->hasInClassInitializer() && !FD->getInClassInitializer()) {
+ if (FD->hasInClassInitializer() && !FD->hasNonNullInClassInitializer()) {
if (DefaultInit)
FD->setInClassInitializer(DefaultInit);
else
@@ -4584,7 +4586,7 @@ void ASTDeclReader::UpdateDecl(Decl *D,
!Reader.PendingFakeDefinitionData.count(OldDD));
RD->setParamDestroyedInCallee(Record.readInt());
RD->setArgPassingRestrictions(
- (RecordDecl::ArgPassingKind)Record.readInt());
+ static_cast<RecordArgPassingKind>(Record.readInt()));
ReadCXXRecordDefinition(RD, /*Update*/true);
// Visible update is handled separately.
@@ -4621,7 +4623,7 @@ void ASTDeclReader::UpdateDecl(Decl *D,
}
}
- RD->setTagKind((TagTypeKind)Record.readInt());
+ RD->setTagKind(static_cast<TagTypeKind>(Record.readInt()));
RD->setLocation(readSourceLocation());
RD->setLocStart(readSourceLocation());
RD->setBraceRange(readSourceRange());
@@ -4697,19 +4699,18 @@ void ASTDeclReader::UpdateDecl(Decl *D,
break;
case UPD_DECL_MARKED_OPENMP_THREADPRIVATE:
- D->addAttr(OMPThreadPrivateDeclAttr::CreateImplicit(
- Reader.getContext(), readSourceRange(),
- AttributeCommonInfo::AS_Pragma));
+ D->addAttr(OMPThreadPrivateDeclAttr::CreateImplicit(Reader.getContext(),
+ readSourceRange()));
break;
case UPD_DECL_MARKED_OPENMP_ALLOCATE: {
auto AllocatorKind =
static_cast<OMPAllocateDeclAttr::AllocatorTypeTy>(Record.readInt());
Expr *Allocator = Record.readExpr();
+ Expr *Alignment = Record.readExpr();
SourceRange SR = readSourceRange();
D->addAttr(OMPAllocateDeclAttr::CreateImplicit(
- Reader.getContext(), AllocatorKind, Allocator, SR,
- AttributeCommonInfo::AS_Pragma));
+ Reader.getContext(), AllocatorKind, Allocator, Alignment, SR));
break;
}
@@ -4725,10 +4726,12 @@ void ASTDeclReader::UpdateDecl(Decl *D,
case UPD_DECL_MARKED_OPENMP_DECLARETARGET: {
auto MapType = Record.readEnum<OMPDeclareTargetDeclAttr::MapTypeTy>();
auto DevType = Record.readEnum<OMPDeclareTargetDeclAttr::DevTypeTy>();
+ Expr *IndirectE = Record.readExpr();
+ bool Indirect = Record.readBool();
unsigned Level = Record.readInt();
D->addAttr(OMPDeclareTargetDeclAttr::CreateImplicit(
- Reader.getContext(), MapType, DevType, Level, readSourceRange(),
- AttributeCommonInfo::AS_Pragma));
+ Reader.getContext(), MapType, DevType, IndirectE, Indirect, Level,
+ readSourceRange()));
break;
}
diff --git a/contrib/llvm-project/clang/lib/Serialization/ASTReaderInternals.h b/contrib/llvm-project/clang/lib/Serialization/ASTReaderInternals.h
index 265a77fdb215..25a46ddabcb7 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ASTReaderInternals.h
+++ b/contrib/llvm-project/clang/lib/Serialization/ASTReaderInternals.h
@@ -30,7 +30,6 @@ class ASTReader;
class FileEntry;
struct HeaderFileInfo;
class HeaderSearch;
-class IdentifierTable;
class ObjCMethodDecl;
namespace serialization {
@@ -248,7 +247,7 @@ class HeaderFileInfoTrait {
const char *FrameworkStrings;
public:
- using external_key_type = const FileEntry *;
+ using external_key_type = FileEntryRef;
struct internal_key_type {
off_t Size;
@@ -268,7 +267,7 @@ public:
: Reader(Reader), M(M), HS(HS), FrameworkStrings(FrameworkStrings) {}
static hash_value_type ComputeHash(internal_key_ref ikey);
- internal_key_type GetInternalKey(const FileEntry *FE);
+ internal_key_type GetInternalKey(external_key_type ekey);
bool EqualKey(internal_key_ref a, internal_key_ref b);
static std::pair<unsigned, unsigned>
@@ -277,6 +276,9 @@ public:
static internal_key_type ReadKey(const unsigned char *d, unsigned);
data_type ReadData(internal_key_ref,const unsigned char *d, unsigned DataLen);
+
+private:
+ const FileEntry *getFile(const internal_key_type &Key);
};
/// The on-disk hash table used for known header files.
diff --git a/contrib/llvm-project/clang/lib/Serialization/ASTReaderStmt.cpp b/contrib/llvm-project/clang/lib/Serialization/ASTReaderStmt.cpp
index b100f946f558..85ecfa1a1a0b 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ASTReaderStmt.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/ASTReaderStmt.cpp
@@ -61,6 +61,7 @@
#include <algorithm>
#include <cassert>
#include <cstdint>
+#include <optional>
#include <string>
using namespace clang;
@@ -72,6 +73,8 @@ namespace clang {
ASTRecordReader &Record;
llvm::BitstreamCursor &DeclsCursor;
+ std::optional<BitsUnpacker> CurrentUnpackingBits;
+
SourceLocation readSourceLocation() {
return Record.readSourceLocation();
}
@@ -107,18 +110,16 @@ namespace clang {
/// The number of record fields required for the Expr class
/// itself.
- static const unsigned NumExprFields =
- NumStmtFields + llvm::BitWidth<ExprDependence> + 3;
+ static const unsigned NumExprFields = NumStmtFields + 2;
+
+ /// The number of bits required for the packing bits for the Expr class.
+ static const unsigned NumExprBits = 10;
/// Read and initialize a ExplicitTemplateArgumentList structure.
void ReadTemplateKWAndArgsInfo(ASTTemplateKWAndArgsInfo &Args,
TemplateArgumentLoc *ArgsLocArray,
unsigned NumTemplateArgs);
- /// Read and initialize a ExplicitTemplateArgumentList structure.
- void ReadExplicitTemplateArgumentList(ASTTemplateArgumentListInfo &ArgList,
- unsigned NumTemplateArgs);
-
void VisitStmt(Stmt *S);
#define STMT(Type, Base) \
void Visit##Type(Type *);
@@ -153,10 +154,15 @@ void ASTStmtReader::VisitCompoundStmt(CompoundStmt *S) {
VisitStmt(S);
SmallVector<Stmt *, 16> Stmts;
unsigned NumStmts = Record.readInt();
+ unsigned HasFPFeatures = Record.readInt();
+ assert(S->hasStoredFPFeatures() == HasFPFeatures);
while (NumStmts--)
Stmts.push_back(Record.readSubStmt());
S->setStmts(Stmts);
- S->CompoundStmtBits.LBraceLoc = readSourceLocation();
+ if (HasFPFeatures)
+ S->setStoredFPFeatures(
+ FPOptionsOverride::getFromOpaqueInt(Record.readInt()));
+ S->LBraceLoc = readSourceLocation();
S->RBraceLoc = readSourceLocation();
}
@@ -213,17 +219,19 @@ void ASTStmtReader::VisitAttributedStmt(AttributedStmt *S) {
void ASTStmtReader::VisitIfStmt(IfStmt *S) {
VisitStmt(S);
- S->setConstexpr(Record.readInt());
- bool HasElse = Record.readInt();
- bool HasVar = Record.readInt();
- bool HasInit = Record.readInt();
+ CurrentUnpackingBits.emplace(Record.readInt());
+ bool HasElse = CurrentUnpackingBits->getNextBit();
+ bool HasVar = CurrentUnpackingBits->getNextBit();
+ bool HasInit = CurrentUnpackingBits->getNextBit();
+
+ S->setStatementKind(static_cast<IfStatementKind>(Record.readInt()));
S->setCond(Record.readSubExpr());
S->setThen(Record.readSubStmt());
if (HasElse)
S->setElse(Record.readSubStmt());
if (HasVar)
- S->setConditionVariable(Record.getContext(), readDeclAs<VarDecl>());
+ S->setConditionVariableDeclStmt(cast<DeclStmt>(Record.readSubStmt()));
if (HasInit)
S->setInit(Record.readSubStmt());
@@ -248,7 +256,7 @@ void ASTStmtReader::VisitSwitchStmt(SwitchStmt *S) {
if (HasInit)
S->setInit(Record.readSubStmt());
if (HasVar)
- S->setConditionVariable(Record.getContext(), readDeclAs<VarDecl>());
+ S->setConditionVariableDeclStmt(cast<DeclStmt>(Record.readSubStmt()));
S->setSwitchLoc(readSourceLocation());
S->setLParenLoc(readSourceLocation());
@@ -274,7 +282,7 @@ void ASTStmtReader::VisitWhileStmt(WhileStmt *S) {
S->setCond(Record.readSubExpr());
S->setBody(Record.readSubStmt());
if (HasVar)
- S->setConditionVariable(Record.getContext(), readDeclAs<VarDecl>());
+ S->setConditionVariableDeclStmt(cast<DeclStmt>(Record.readSubStmt()));
S->setWhileLoc(readSourceLocation());
S->setLParenLoc(readSourceLocation());
@@ -294,7 +302,7 @@ void ASTStmtReader::VisitForStmt(ForStmt *S) {
VisitStmt(S);
S->setInit(Record.readSubStmt());
S->setCond(Record.readSubExpr());
- S->setConditionVariable(Record.getContext(), readDeclAs<VarDecl>());
+ S->setConditionVariableDeclStmt(cast_or_null<DeclStmt>(Record.readSubStmt()));
S->setInc(Record.readSubExpr());
S->setBody(Record.readSubStmt());
S->setForLoc(readSourceLocation());
@@ -395,8 +403,10 @@ void ASTStmtReader::VisitGCCAsmStmt(GCCAsmStmt *S) {
Clobbers.push_back(cast_or_null<StringLiteral>(Record.readSubStmt()));
// Labels
- for (unsigned I = 0, N = NumLabels; I != N; ++I)
+ for (unsigned I = 0, N = NumLabels; I != N; ++I) {
+ Names.push_back(Record.readIdentifier());
Exprs.push_back(Record.readSubStmt());
+ }
S->setOutputsAndInputsAndClobbers(Record.getContext(),
Names.data(), Constraints.data(),
@@ -520,29 +530,15 @@ void ASTStmtReader::VisitCapturedStmt(CapturedStmt *S) {
void ASTStmtReader::VisitExpr(Expr *E) {
VisitStmt(E);
- E->setType(Record.readType());
+ CurrentUnpackingBits.emplace(Record.readInt());
+ E->setDependence(static_cast<ExprDependence>(
+ CurrentUnpackingBits->getNextBits(/*Width=*/5)));
+ E->setValueKind(static_cast<ExprValueKind>(
+ CurrentUnpackingBits->getNextBits(/*Width=*/2)));
+ E->setObjectKind(static_cast<ExprObjectKind>(
+ CurrentUnpackingBits->getNextBits(/*Width=*/3)));
- // FIXME: write and read all DependentFlags with a single call.
- bool TypeDependent = Record.readInt();
- bool ValueDependent = Record.readInt();
- bool InstantiationDependent = Record.readInt();
- bool ContainsUnexpandedTemplateParameters = Record.readInt();
- bool ContainsErrors = Record.readInt();
- auto Deps = ExprDependence::None;
- if (TypeDependent)
- Deps |= ExprDependence::Type;
- if (ValueDependent)
- Deps |= ExprDependence::Value;
- if (InstantiationDependent)
- Deps |= ExprDependence::Instantiation;
- if (ContainsUnexpandedTemplateParameters)
- Deps |= ExprDependence::UnexpandedPack;
- if (ContainsErrors)
- Deps |= ExprDependence::Error;
- E->setDependence(Deps);
-
- E->setValueKind(static_cast<ExprValueKind>(Record.readInt()));
- E->setObjectKind(static_cast<ExprObjectKind>(Record.readInt()));
+ E->setType(Record.readType());
assert(Record.getIdx() == NumExprFields &&
"Incorrect expression field count");
}
@@ -550,8 +546,8 @@ void ASTStmtReader::VisitExpr(Expr *E) {
void ASTStmtReader::VisitConstantExpr(ConstantExpr *E) {
VisitExpr(E);
- auto StorageKind = Record.readInt();
- assert(E->ConstantExprBits.ResultKind == StorageKind && "Wrong ResultKind!");
+ auto StorageKind = static_cast<ConstantResultStorageKind>(Record.readInt());
+ assert(E->getResultStorageKind() == StorageKind && "Wrong ResultKind!");
E->ConstantExprBits.APValueKind = Record.readInt();
E->ConstantExprBits.IsUnsigned = Record.readInt();
@@ -560,22 +556,20 @@ void ASTStmtReader::VisitConstantExpr(ConstantExpr *E) {
E->ConstantExprBits.IsImmediateInvocation = Record.readInt();
switch (StorageKind) {
- case ConstantExpr::RSK_None:
+ case ConstantResultStorageKind::None:
break;
- case ConstantExpr::RSK_Int64:
+ case ConstantResultStorageKind::Int64:
E->Int64Result() = Record.readInt();
break;
- case ConstantExpr::RSK_APValue:
+ case ConstantResultStorageKind::APValue:
E->APValueResult() = Record.readAPValue();
if (E->APValueResult().needsCleanup()) {
E->ConstantExprBits.HasCleanup = true;
Record.getContext().addDestruction(&E->APValueResult());
}
break;
- default:
- llvm_unreachable("unexpected ResultKind!");
}
E->setSubExpr(Record.readSubExpr());
@@ -596,6 +590,7 @@ void ASTStmtReader::VisitPredefinedExpr(PredefinedExpr *E) {
bool HasFunctionName = Record.readInt();
E->PredefinedExprBits.HasFunctionName = HasFunctionName;
E->PredefinedExprBits.Kind = Record.readInt();
+ E->PredefinedExprBits.IsTransparent = Record.readInt();
E->setLocation(readSourceLocation());
if (HasFunctionName)
E->setFunctionName(cast<StringLiteral>(Record.readSubExpr()));
@@ -604,12 +599,18 @@ void ASTStmtReader::VisitPredefinedExpr(PredefinedExpr *E) {
void ASTStmtReader::VisitDeclRefExpr(DeclRefExpr *E) {
VisitExpr(E);
- E->DeclRefExprBits.HasQualifier = Record.readInt();
- E->DeclRefExprBits.HasFoundDecl = Record.readInt();
- E->DeclRefExprBits.HasTemplateKWAndArgsInfo = Record.readInt();
- E->DeclRefExprBits.HadMultipleCandidates = Record.readInt();
- E->DeclRefExprBits.RefersToEnclosingVariableOrCapture = Record.readInt();
- E->DeclRefExprBits.NonOdrUseReason = Record.readInt();
+ CurrentUnpackingBits.emplace(Record.readInt());
+ E->DeclRefExprBits.HadMultipleCandidates = CurrentUnpackingBits->getNextBit();
+ E->DeclRefExprBits.RefersToEnclosingVariableOrCapture =
+ CurrentUnpackingBits->getNextBit();
+ E->DeclRefExprBits.NonOdrUseReason =
+ CurrentUnpackingBits->getNextBits(/*Width=*/2);
+ E->DeclRefExprBits.IsImmediateEscalating = CurrentUnpackingBits->getNextBit();
+ E->DeclRefExprBits.HasFoundDecl = CurrentUnpackingBits->getNextBit();
+ E->DeclRefExprBits.HasQualifier = CurrentUnpackingBits->getNextBit();
+ E->DeclRefExprBits.HasTemplateKWAndArgsInfo =
+ CurrentUnpackingBits->getNextBit();
+ E->DeclRefExprBits.CapturedByCopyInLambdaWithExplicitObjectParameter = false;
unsigned NumTemplateArgs = 0;
if (E->hasTemplateKWAndArgsInfo())
NumTemplateArgs = Record.readInt();
@@ -695,7 +696,7 @@ void ASTStmtReader::VisitCharacterLiteral(CharacterLiteral *E) {
VisitExpr(E);
E->setValue(Record.readInt());
E->setLocation(readSourceLocation());
- E->setKind(static_cast<CharacterLiteral::CharacterKind>(Record.readInt()));
+ E->setKind(static_cast<CharacterLiteralKind>(Record.readInt()));
}
void ASTStmtReader::VisitParenExpr(ParenExpr *E) {
@@ -717,12 +718,13 @@ void ASTStmtReader::VisitParenListExpr(ParenListExpr *E) {
void ASTStmtReader::VisitUnaryOperator(UnaryOperator *E) {
VisitExpr(E);
- bool hasFP_Features = Record.readInt();
+ bool hasFP_Features = CurrentUnpackingBits->getNextBit();
assert(hasFP_Features == E->hasStoredFPFeatures());
E->setSubExpr(Record.readSubExpr());
- E->setOpcode((UnaryOperator::Opcode)Record.readInt());
+ E->setOpcode(
+ (UnaryOperator::Opcode)CurrentUnpackingBits->getNextBits(/*Width=*/5));
E->setOperatorLoc(readSourceLocation());
- E->setCanOverflow(Record.readInt());
+ E->setCanOverflow(CurrentUnpackingBits->getNextBit());
if (hasFP_Features)
E->setStoredFPFeatures(
FPOptionsOverride::getFromOpaqueInt(Record.readInt()));
@@ -787,6 +789,7 @@ static ConstraintSatisfaction
readConstraintSatisfaction(ASTRecordReader &Record) {
ConstraintSatisfaction Satisfaction;
Satisfaction.IsSatisfied = Record.readInt();
+ Satisfaction.ContainsErrors = Record.readInt();
if (!Satisfaction.IsSatisfied) {
unsigned NumDetailRecords = Record.readInt();
for (unsigned i = 0; i != NumDetailRecords; ++i) {
@@ -808,17 +811,9 @@ readConstraintSatisfaction(ASTRecordReader &Record) {
void ASTStmtReader::VisitConceptSpecializationExpr(
ConceptSpecializationExpr *E) {
VisitExpr(E);
- unsigned NumTemplateArgs = Record.readInt();
- E->NestedNameSpec = Record.readNestedNameSpecifierLoc();
- E->TemplateKWLoc = Record.readSourceLocation();
- E->ConceptName = Record.readDeclarationNameInfo();
- E->NamedConcept = readDeclAs<ConceptDecl>();
- E->FoundDecl = Record.readDeclAs<NamedDecl>();
- E->ArgsAsWritten = Record.readASTTemplateArgumentListInfo();
- llvm::SmallVector<TemplateArgument, 4> Args;
- for (unsigned I = 0; I < NumTemplateArgs; ++I)
- Args.push_back(Record.readTemplateArgument());
- E->setTemplateArguments(Args);
+ E->SpecDecl = Record.readDeclAs<ImplicitConceptSpecializationDecl>();
+ if (Record.readBool())
+ E->ConceptRef = Record.readConceptReference();
E->Satisfaction = E->isValueDependent() ? nullptr :
ASTConstraintSatisfaction::Create(Record.getContext(),
readConstraintSatisfaction(Record));
@@ -875,7 +870,7 @@ void ASTStmtReader::VisitRequiresExpr(RequiresExpr *E) {
} else
E = Record.readExpr();
- llvm::Optional<concepts::ExprRequirement::ReturnTypeRequirement> Req;
+ std::optional<concepts::ExprRequirement::ReturnTypeRequirement> Req;
ConceptSpecializationExpr *SubstitutedConstraintExpr = nullptr;
SourceLocation NoexceptLoc;
if (RK == concepts::Requirement::RK_Simple) {
@@ -913,9 +908,17 @@ void ASTStmtReader::VisitRequiresExpr(RequiresExpr *E) {
std::move(*Req));
} break;
case concepts::Requirement::RK_Nested: {
- if (/* IsSubstitutionDiagnostic */Record.readInt()) {
+ bool HasInvalidConstraint = Record.readInt();
+ if (HasInvalidConstraint) {
+ std::string InvalidConstraint = Record.readString();
+ char *InvalidConstraintBuf =
+ new (Record.getContext()) char[InvalidConstraint.size()];
+ std::copy(InvalidConstraint.begin(), InvalidConstraint.end(),
+ InvalidConstraintBuf);
R = new (Record.getContext()) concepts::NestedRequirement(
- readSubstitutionDiagnostic(Record));
+ Record.getContext(),
+ StringRef(InvalidConstraintBuf, InvalidConstraint.size()),
+ readConstraintSatisfaction(Record));
break;
}
Expr *E = Record.readExpr();
@@ -933,6 +936,8 @@ void ASTStmtReader::VisitRequiresExpr(RequiresExpr *E) {
}
std::copy(Requirements.begin(), Requirements.end(),
E->getTrailingObjects<concepts::Requirement *>());
+ E->LParenLoc = Record.readSourceLocation();
+ E->RParenLoc = Record.readSourceLocation();
E->RBraceLoc = Record.readSourceLocation();
}
@@ -1007,14 +1012,18 @@ void ASTStmtReader::VisitOMPIteratorExpr(OMPIteratorExpr *E) {
void ASTStmtReader::VisitCallExpr(CallExpr *E) {
VisitExpr(E);
+
unsigned NumArgs = Record.readInt();
- bool HasFPFeatures = Record.readInt();
+ CurrentUnpackingBits.emplace(Record.readInt());
+ E->setADLCallKind(
+ static_cast<CallExpr::ADLCallKind>(CurrentUnpackingBits->getNextBit()));
+ bool HasFPFeatures = CurrentUnpackingBits->getNextBit();
assert((NumArgs == E->getNumArgs()) && "Wrong NumArgs!");
E->setRParenLoc(readSourceLocation());
E->setCallee(Record.readSubExpr());
for (unsigned I = 0; I != NumArgs; ++I)
E->setArg(I, Record.readSubExpr());
- E->setADLCallKind(static_cast<CallExpr::ADLCallKind>(Record.readInt()));
+
if (HasFPFeatures)
E->setStoredFPFeatures(
FPOptionsOverride::getFromOpaqueInt(Record.readInt()));
@@ -1027,27 +1036,29 @@ void ASTStmtReader::VisitCXXMemberCallExpr(CXXMemberCallExpr *E) {
void ASTStmtReader::VisitMemberExpr(MemberExpr *E) {
VisitExpr(E);
- bool HasQualifier = Record.readInt();
- bool HasFoundDecl = Record.readInt();
- bool HasTemplateInfo = Record.readInt();
+ CurrentUnpackingBits.emplace(Record.readInt());
+ bool HasQualifier = CurrentUnpackingBits->getNextBit();
+ bool HasFoundDecl = CurrentUnpackingBits->getNextBit();
+ bool HasTemplateInfo = CurrentUnpackingBits->getNextBit();
unsigned NumTemplateArgs = Record.readInt();
E->Base = Record.readSubExpr();
E->MemberDecl = Record.readDeclAs<ValueDecl>();
E->MemberDNLoc = Record.readDeclarationNameLoc(E->MemberDecl->getDeclName());
E->MemberLoc = Record.readSourceLocation();
- E->MemberExprBits.IsArrow = Record.readInt();
+ E->MemberExprBits.IsArrow = CurrentUnpackingBits->getNextBit();
E->MemberExprBits.HasQualifierOrFoundDecl = HasQualifier || HasFoundDecl;
E->MemberExprBits.HasTemplateKWAndArgsInfo = HasTemplateInfo;
- E->MemberExprBits.HadMultipleCandidates = Record.readInt();
- E->MemberExprBits.NonOdrUseReason = Record.readInt();
+ E->MemberExprBits.HadMultipleCandidates = CurrentUnpackingBits->getNextBit();
+ E->MemberExprBits.NonOdrUseReason =
+ CurrentUnpackingBits->getNextBits(/*Width=*/2);
E->MemberExprBits.OperatorLoc = Record.readSourceLocation();
if (HasQualifier || HasFoundDecl) {
DeclAccessPair FoundDecl;
if (HasFoundDecl) {
auto *FoundD = Record.readDeclAs<NamedDecl>();
- auto AS = (AccessSpecifier)Record.readInt();
+ auto AS = (AccessSpecifier)CurrentUnpackingBits->getNextBits(/*Width=*/2);
FoundDecl = DeclAccessPair::make(FoundD, AS);
} else {
FoundDecl = DeclAccessPair::make(E->MemberDecl,
@@ -1094,10 +1105,14 @@ void ASTStmtReader::VisitCastExpr(CastExpr *E) {
VisitExpr(E);
unsigned NumBaseSpecs = Record.readInt();
assert(NumBaseSpecs == E->path_size());
- unsigned HasFPFeatures = Record.readInt();
+
+ CurrentUnpackingBits.emplace(Record.readInt());
+ E->setCastKind((CastKind)CurrentUnpackingBits->getNextBits(/*Width=*/7));
+ unsigned HasFPFeatures = CurrentUnpackingBits->getNextBit();
assert(E->hasStoredFPFeatures() == HasFPFeatures);
+
E->setSubExpr(Record.readSubExpr());
- E->setCastKind((CastKind)Record.readInt());
+
CastExpr::path_iterator BaseI = E->path_begin();
while (NumBaseSpecs--) {
auto *BaseSpec = new (Record.getContext()) CXXBaseSpecifier;
@@ -1110,10 +1125,12 @@ void ASTStmtReader::VisitCastExpr(CastExpr *E) {
}
void ASTStmtReader::VisitBinaryOperator(BinaryOperator *E) {
- bool hasFP_Features;
VisitExpr(E);
- E->setHasStoredFPFeatures(hasFP_Features = Record.readInt());
- E->setOpcode((BinaryOperator::Opcode)Record.readInt());
+ CurrentUnpackingBits.emplace(Record.readInt());
+ E->setOpcode(
+ (BinaryOperator::Opcode)CurrentUnpackingBits->getNextBits(/*Width=*/6));
+ bool hasFP_Features = CurrentUnpackingBits->getNextBit();
+ E->setHasStoredFPFeatures(hasFP_Features);
E->setLHS(Record.readSubExpr());
E->setRHS(Record.readSubExpr());
E->setOperatorLoc(readSourceLocation());
@@ -1151,7 +1168,7 @@ ASTStmtReader::VisitBinaryConditionalOperator(BinaryConditionalOperator *E) {
void ASTStmtReader::VisitImplicitCastExpr(ImplicitCastExpr *E) {
VisitCastExpr(E);
- E->setIsPartOfExplicitCast(Record.readInt());
+ E->setIsPartOfExplicitCast(CurrentUnpackingBits->getNextBit());
}
void ASTStmtReader::VisitExplicitCastExpr(ExplicitCastExpr *E) {
@@ -1225,9 +1242,9 @@ void ASTStmtReader::VisitDesignatedInitExpr(DesignatedInitExpr *E) {
auto *Field = readDeclAs<FieldDecl>();
SourceLocation DotLoc = readSourceLocation();
SourceLocation FieldLoc = readSourceLocation();
- Designators.push_back(Designator(Field->getIdentifier(), DotLoc,
- FieldLoc));
- Designators.back().setField(Field);
+ Designators.push_back(Designator::CreateFieldDesignator(
+ Field->getIdentifier(), DotLoc, FieldLoc));
+ Designators.back().setFieldDecl(Field);
break;
}
@@ -1235,7 +1252,8 @@ void ASTStmtReader::VisitDesignatedInitExpr(DesignatedInitExpr *E) {
const IdentifierInfo *Name = Record.readIdentifier();
SourceLocation DotLoc = readSourceLocation();
SourceLocation FieldLoc = readSourceLocation();
- Designators.push_back(Designator(Name, DotLoc, FieldLoc));
+ Designators.push_back(Designator::CreateFieldDesignator(Name, DotLoc,
+ FieldLoc));
break;
}
@@ -1243,7 +1261,9 @@ void ASTStmtReader::VisitDesignatedInitExpr(DesignatedInitExpr *E) {
unsigned Index = Record.readInt();
SourceLocation LBracketLoc = readSourceLocation();
SourceLocation RBracketLoc = readSourceLocation();
- Designators.push_back(Designator(Index, LBracketLoc, RBracketLoc));
+ Designators.push_back(Designator::CreateArrayDesignator(Index,
+ LBracketLoc,
+ RBracketLoc));
break;
}
@@ -1252,8 +1272,8 @@ void ASTStmtReader::VisitDesignatedInitExpr(DesignatedInitExpr *E) {
SourceLocation LBracketLoc = readSourceLocation();
SourceLocation EllipsisLoc = readSourceLocation();
SourceLocation RBracketLoc = readSourceLocation();
- Designators.push_back(Designator(Index, LBracketLoc, EllipsisLoc,
- RBracketLoc));
+ Designators.push_back(Designator::CreateArrayRangeDesignator(
+ Index, LBracketLoc, EllipsisLoc, RBracketLoc));
break;
}
}
@@ -1300,8 +1320,7 @@ void ASTStmtReader::VisitSourceLocExpr(SourceLocExpr *E) {
E->ParentContext = readDeclAs<DeclContext>();
E->BuiltinLoc = readSourceLocation();
E->RParenLoc = readSourceLocation();
- E->SourceLocExprBits.Kind =
- static_cast<SourceLocExpr::IdentKind>(Record.readInt());
+ E->SourceLocExprBits.Kind = Record.readInt();
}
void ASTStmtReader::VisitAddrLabelExpr(AddrLabelExpr *E) {
@@ -1363,6 +1382,7 @@ void ASTStmtReader::VisitGenericSelectionExpr(GenericSelectionExpr *E) {
unsigned NumAssocs = Record.readInt();
assert(NumAssocs == E->getNumAssocs() && "Wrong NumAssocs!");
+ E->IsExprPredicate = Record.readInt();
E->ResultIndex = Record.readInt();
E->GenericSelectionExprBits.GenericLoc = readSourceLocation();
E->DefaultLoc = readSourceLocation();
@@ -1709,6 +1729,7 @@ void ASTStmtReader::VisitCXXConstructExpr(CXXConstructExpr *E) {
E->CXXConstructExprBits.StdInitListInitialization = Record.readInt();
E->CXXConstructExprBits.ZeroInitialization = Record.readInt();
E->CXXConstructExprBits.ConstructionKind = Record.readInt();
+ E->CXXConstructExprBits.IsImmediateEscalating = Record.readInt();
E->CXXConstructExprBits.Loc = readSourceLocation();
E->Constructor = readDeclAs<CXXConstructorDecl>();
E->ParenOrBraceRange = readSourceRange();
@@ -1763,8 +1784,8 @@ void ASTStmtReader::VisitCXXNamedCastExpr(CXXNamedCastExpr *E) {
SourceRange R = readSourceRange();
E->Loc = R.getBegin();
E->RParenLoc = R.getEnd();
- R = readSourceRange();
- E->AngleBrackets = R;
+ if (CurrentUnpackingBits->getNextBit())
+ E->AngleBrackets = readSourceRange();
}
void ASTStmtReader::VisitCXXStaticCastExpr(CXXStaticCastExpr *E) {
@@ -1842,13 +1863,19 @@ void ASTStmtReader::VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) {
E->Param = readDeclAs<ParmVarDecl>();
E->UsedContext = readDeclAs<DeclContext>();
E->CXXDefaultArgExprBits.Loc = readSourceLocation();
+ E->CXXDefaultArgExprBits.HasRewrittenInit = Record.readInt();
+ if (E->CXXDefaultArgExprBits.HasRewrittenInit)
+ *E->getTrailingObjects<Expr *>() = Record.readSubExpr();
}
void ASTStmtReader::VisitCXXDefaultInitExpr(CXXDefaultInitExpr *E) {
VisitExpr(E);
+ E->CXXDefaultInitExprBits.HasRewrittenInit = Record.readInt();
E->Field = readDeclAs<FieldDecl>();
E->UsedContext = readDeclAs<DeclContext>();
E->CXXDefaultInitExprBits.Loc = readSourceLocation();
+ if (E->CXXDefaultInitExprBits.HasRewrittenInit)
+ *E->getTrailingObjects<Expr *>() = Record.readSubExpr();
}
void ASTStmtReader::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
@@ -1874,6 +1901,7 @@ void ASTStmtReader::VisitCXXNewExpr(CXXNewExpr *E) {
E->CXXNewExprBits.IsGlobalNew = Record.readInt();
E->CXXNewExprBits.ShouldPassAlignment = Record.readInt();
E->CXXNewExprBits.UsualArrayDeleteWantsSize = Record.readInt();
+ E->CXXNewExprBits.HasInitializer = Record.readInt();
E->CXXNewExprBits.StoredInitializationStyle = Record.readInt();
assert((IsArray == E->isArray()) && "Wrong IsArray!");
@@ -1954,9 +1982,10 @@ void ASTStmtReader::VisitCXXDependentScopeMemberExpr(
CXXDependentScopeMemberExpr *E) {
VisitExpr(E);
- bool HasTemplateKWAndArgsInfo = Record.readInt();
unsigned NumTemplateArgs = Record.readInt();
- bool HasFirstQualifierFoundInScope = Record.readInt();
+ CurrentUnpackingBits.emplace(Record.readInt());
+ bool HasTemplateKWAndArgsInfo = CurrentUnpackingBits->getNextBit();
+ bool HasFirstQualifierFoundInScope = CurrentUnpackingBits->getNextBit();
assert((HasTemplateKWAndArgsInfo == E->hasTemplateKWAndArgsInfo()) &&
"Wrong HasTemplateKWAndArgsInfo!");
@@ -1972,11 +2001,18 @@ void ASTStmtReader::VisitCXXDependentScopeMemberExpr(
assert((NumTemplateArgs == E->getNumTemplateArgs()) &&
"Wrong NumTemplateArgs!");
- E->CXXDependentScopeMemberExprBits.IsArrow = Record.readInt();
- E->CXXDependentScopeMemberExprBits.OperatorLoc = readSourceLocation();
+ E->CXXDependentScopeMemberExprBits.IsArrow =
+ CurrentUnpackingBits->getNextBit();
+
E->BaseType = Record.readType();
E->QualifierLoc = Record.readNestedNameSpecifierLoc();
- E->Base = Record.readSubExpr();
+ // not ImplicitAccess
+ if (CurrentUnpackingBits->getNextBit())
+ E->Base = Record.readSubExpr();
+ else
+ E->Base = nullptr;
+
+ E->CXXDependentScopeMemberExprBits.OperatorLoc = readSourceLocation();
if (HasFirstQualifierFoundInScope)
*E->getTrailingObjects<NamedDecl *>() = readDeclAs<NamedDecl>();
@@ -1988,11 +2024,11 @@ void
ASTStmtReader::VisitDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *E) {
VisitExpr(E);
- if (Record.readInt()) // HasTemplateKWAndArgsInfo
+ if (CurrentUnpackingBits->getNextBit()) // HasTemplateKWAndArgsInfo
ReadTemplateKWAndArgsInfo(
*E->getTrailingObjects<ASTTemplateKWAndArgsInfo>(),
E->getTrailingObjects<TemplateArgumentLoc>(),
- /*NumTemplateArgs=*/Record.readInt());
+ /*NumTemplateArgs=*/CurrentUnpackingBits->getNextBits(/*Width=*/16));
E->QualifierLoc = Record.readNestedNameSpecifierLoc();
E->NameInfo = Record.readDeclarationNameInfo();
@@ -2006,16 +2042,18 @@ ASTStmtReader::VisitCXXUnresolvedConstructExpr(CXXUnresolvedConstructExpr *E) {
Record.skipInts(1);
for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I)
E->setArg(I, Record.readSubExpr());
- E->TSI = readTypeSourceInfo();
+ E->TypeAndInitForm.setPointer(readTypeSourceInfo());
E->setLParenLoc(readSourceLocation());
E->setRParenLoc(readSourceLocation());
+ E->TypeAndInitForm.setInt(Record.readInt());
}
void ASTStmtReader::VisitOverloadExpr(OverloadExpr *E) {
VisitExpr(E);
unsigned NumResults = Record.readInt();
- bool HasTemplateKWAndArgsInfo = Record.readInt();
+ CurrentUnpackingBits.emplace(Record.readInt());
+ bool HasTemplateKWAndArgsInfo = CurrentUnpackingBits->getNextBit();
assert((E->getNumDecls() == NumResults) && "Wrong NumResults!");
assert((E->hasTemplateKWAndArgsInfo() == HasTemplateKWAndArgsInfo) &&
"Wrong HasTemplateKWAndArgsInfo!");
@@ -2048,17 +2086,24 @@ void ASTStmtReader::VisitOverloadExpr(OverloadExpr *E) {
void ASTStmtReader::VisitUnresolvedMemberExpr(UnresolvedMemberExpr *E) {
VisitOverloadExpr(E);
- E->UnresolvedMemberExprBits.IsArrow = Record.readInt();
- E->UnresolvedMemberExprBits.HasUnresolvedUsing = Record.readInt();
- E->Base = Record.readSubExpr();
- E->BaseType = Record.readType();
+ E->UnresolvedMemberExprBits.IsArrow = CurrentUnpackingBits->getNextBit();
+ E->UnresolvedMemberExprBits.HasUnresolvedUsing =
+ CurrentUnpackingBits->getNextBit();
+
+ if (/*!isImplicitAccess=*/CurrentUnpackingBits->getNextBit())
+ E->Base = Record.readSubExpr();
+ else
+ E->Base = nullptr;
+
E->OperatorLoc = readSourceLocation();
+
+ E->BaseType = Record.readType();
}
void ASTStmtReader::VisitUnresolvedLookupExpr(UnresolvedLookupExpr *E) {
VisitOverloadExpr(E);
- E->UnresolvedLookupExprBits.RequiresADL = Record.readInt();
- E->UnresolvedLookupExprBits.Overloaded = Record.readInt();
+ E->UnresolvedLookupExprBits.RequiresADL = CurrentUnpackingBits->getNextBit();
+ E->UnresolvedLookupExprBits.Overloaded = CurrentUnpackingBits->getNextBit();
E->NamingClass = readDeclAs<CXXRecordDecl>();
}
@@ -2132,8 +2177,13 @@ void ASTStmtReader::VisitSizeOfPackExpr(SizeOfPackExpr *E) {
void ASTStmtReader::VisitSubstNonTypeTemplateParmExpr(
SubstNonTypeTemplateParmExpr *E) {
VisitExpr(E);
- E->ParamAndRef.setPointer(readDeclAs<NonTypeTemplateParmDecl>());
- E->ParamAndRef.setInt(Record.readInt());
+ E->AssociatedDeclAndRef.setPointer(readDeclAs<Decl>());
+ E->AssociatedDeclAndRef.setInt(CurrentUnpackingBits->getNextBit());
+ E->Index = CurrentUnpackingBits->getNextBits(/*Width=*/12);
+ if (CurrentUnpackingBits->getNextBit())
+ E->PackIndex = Record.readInt();
+ else
+ E->PackIndex = 0;
E->SubstNonTypeTemplateParmExprBits.NameLoc = readSourceLocation();
E->Replacement = Record.readSubExpr();
}
@@ -2141,7 +2191,8 @@ void ASTStmtReader::VisitSubstNonTypeTemplateParmExpr(
void ASTStmtReader::VisitSubstNonTypeTemplateParmPackExpr(
SubstNonTypeTemplateParmPackExpr *E) {
VisitExpr(E);
- E->Param = readDeclAs<NonTypeTemplateParmDecl>();
+ E->AssociatedDecl = readDeclAs<Decl>();
+ E->Index = Record.readInt();
TemplateArgument ArgPack = Record.readTemplateArgument();
if (ArgPack.getKind() != TemplateArgument::Pack)
return;
@@ -2182,6 +2233,31 @@ void ASTStmtReader::VisitCXXFoldExpr(CXXFoldExpr *E) {
E->Opcode = (BinaryOperatorKind)Record.readInt();
}
+void ASTStmtReader::VisitCXXParenListInitExpr(CXXParenListInitExpr *E) {
+ VisitExpr(E);
+ unsigned ExpectedNumExprs = Record.readInt();
+ assert(E->NumExprs == ExpectedNumExprs &&
+ "expected number of expressions does not equal the actual number of "
+ "serialized expressions.");
+ E->NumUserSpecifiedExprs = Record.readInt();
+ E->InitLoc = readSourceLocation();
+ E->LParenLoc = readSourceLocation();
+ E->RParenLoc = readSourceLocation();
+ for (unsigned I = 0; I < ExpectedNumExprs; I++)
+ E->getTrailingObjects<Expr *>()[I] = Record.readSubExpr();
+
+ bool HasArrayFillerOrUnionDecl = Record.readBool();
+ if (HasArrayFillerOrUnionDecl) {
+ bool HasArrayFiller = Record.readBool();
+ if (HasArrayFiller) {
+ E->setArrayFiller(Record.readSubExpr());
+ } else {
+ E->setInitializedFieldInUnion(readDeclAs<FieldDecl>());
+ }
+ }
+ E->updateDependence();
+}
+
void ASTStmtReader::VisitOpaqueValueExpr(OpaqueValueExpr *E) {
VisitExpr(E);
E->SourceExpr = Record.readSubExpr();
@@ -2294,6 +2370,7 @@ void ASTStmtReader::VisitOMPExecutableDirective(OMPExecutableDirective *E) {
Record.readOMPChildren(E->Data);
E->setLocStart(readSourceLocation());
E->setLocEnd(readSourceLocation());
+ E->setMappedDirective(Record.readEnum<OpenMPDirectiveKind>());
}
void ASTStmtReader::VisitOMPLoopBasedDirective(OMPLoopBasedDirective *D) {
@@ -2307,6 +2384,13 @@ void ASTStmtReader::VisitOMPLoopDirective(OMPLoopDirective *D) {
VisitOMPLoopBasedDirective(D);
}
+void ASTStmtReader::VisitOMPMetaDirective(OMPMetaDirective *D) {
+ VisitStmt(D);
+ // The NumClauses field was read in ReadStmtFromStream.
+ Record.skipInts(1);
+ VisitOMPExecutableDirective(D);
+}
+
void ASTStmtReader::VisitOMPParallelDirective(OMPParallelDirective *D) {
VisitStmt(D);
VisitOMPExecutableDirective(D);
@@ -2317,12 +2401,18 @@ void ASTStmtReader::VisitOMPSimdDirective(OMPSimdDirective *D) {
VisitOMPLoopDirective(D);
}
-void ASTStmtReader::VisitOMPTileDirective(OMPTileDirective *D) {
+void ASTStmtReader::VisitOMPLoopTransformationDirective(
+ OMPLoopTransformationDirective *D) {
VisitOMPLoopBasedDirective(D);
+ D->setNumGeneratedLoops(Record.readUInt32());
+}
+
+void ASTStmtReader::VisitOMPTileDirective(OMPTileDirective *D) {
+ VisitOMPLoopTransformationDirective(D);
}
void ASTStmtReader::VisitOMPUnrollDirective(OMPUnrollDirective *D) {
- VisitOMPLoopBasedDirective(D);
+ VisitOMPLoopTransformationDirective(D);
}
void ASTStmtReader::VisitOMPForDirective(OMPForDirective *D) {
@@ -2346,6 +2436,11 @@ void ASTStmtReader::VisitOMPSectionDirective(OMPSectionDirective *D) {
D->setHasCancel(Record.readBool());
}
+void ASTStmtReader::VisitOMPScopeDirective(OMPScopeDirective *D) {
+ VisitStmt(D);
+ VisitOMPExecutableDirective(D);
+}
+
void ASTStmtReader::VisitOMPSingleDirective(OMPSingleDirective *D) {
VisitStmt(D);
VisitOMPExecutableDirective(D);
@@ -2378,6 +2473,12 @@ void ASTStmtReader::VisitOMPParallelMasterDirective(
VisitOMPExecutableDirective(D);
}
+void ASTStmtReader::VisitOMPParallelMaskedDirective(
+ OMPParallelMaskedDirective *D) {
+ VisitStmt(D);
+ VisitOMPExecutableDirective(D);
+}
+
void ASTStmtReader::VisitOMPParallelSectionsDirective(
OMPParallelSectionsDirective *D) {
VisitStmt(D);
@@ -2403,6 +2504,15 @@ void ASTStmtReader::VisitOMPBarrierDirective(OMPBarrierDirective *D) {
void ASTStmtReader::VisitOMPTaskwaitDirective(OMPTaskwaitDirective *D) {
VisitStmt(D);
+ // The NumClauses field was read in ReadStmtFromStream.
+ Record.skipInts(1);
+ VisitOMPExecutableDirective(D);
+}
+
+void ASTStmtReader::VisitOMPErrorDirective(OMPErrorDirective *D) {
+ VisitStmt(D);
+ // The NumClauses field was read in ReadStmtFromStream.
+ Record.skipInts(1);
VisitOMPExecutableDirective(D);
}
@@ -2434,8 +2544,9 @@ void ASTStmtReader::VisitOMPOrderedDirective(OMPOrderedDirective *D) {
void ASTStmtReader::VisitOMPAtomicDirective(OMPAtomicDirective *D) {
VisitStmt(D);
VisitOMPExecutableDirective(D);
- D->IsXLHSInRHSPart = Record.readBool();
- D->IsPostfixUpdate = Record.readBool();
+ D->Flags.IsXLHSInRHSPart = Record.readBool() ? 1 : 0;
+ D->Flags.IsPostfixUpdate = Record.readBool() ? 1 : 0;
+ D->Flags.IsFailOnly = Record.readBool() ? 1 : 0;
}
void ASTStmtReader::VisitOMPTargetDirective(OMPTargetDirective *D) {
@@ -2506,22 +2617,44 @@ void ASTStmtReader::VisitOMPMasterTaskLoopDirective(
D->setHasCancel(Record.readBool());
}
+void ASTStmtReader::VisitOMPMaskedTaskLoopDirective(
+ OMPMaskedTaskLoopDirective *D) {
+ VisitOMPLoopDirective(D);
+ D->setHasCancel(Record.readBool());
+}
+
void ASTStmtReader::VisitOMPMasterTaskLoopSimdDirective(
OMPMasterTaskLoopSimdDirective *D) {
VisitOMPLoopDirective(D);
}
+void ASTStmtReader::VisitOMPMaskedTaskLoopSimdDirective(
+ OMPMaskedTaskLoopSimdDirective *D) {
+ VisitOMPLoopDirective(D);
+}
+
void ASTStmtReader::VisitOMPParallelMasterTaskLoopDirective(
OMPParallelMasterTaskLoopDirective *D) {
VisitOMPLoopDirective(D);
D->setHasCancel(Record.readBool());
}
+void ASTStmtReader::VisitOMPParallelMaskedTaskLoopDirective(
+ OMPParallelMaskedTaskLoopDirective *D) {
+ VisitOMPLoopDirective(D);
+ D->setHasCancel(Record.readBool());
+}
+
void ASTStmtReader::VisitOMPParallelMasterTaskLoopSimdDirective(
OMPParallelMasterTaskLoopSimdDirective *D) {
VisitOMPLoopDirective(D);
}
+void ASTStmtReader::VisitOMPParallelMaskedTaskLoopSimdDirective(
+ OMPParallelMaskedTaskLoopSimdDirective *D) {
+ VisitOMPLoopDirective(D);
+}
+
void ASTStmtReader::VisitOMPDistributeDirective(OMPDistributeDirective *D) {
VisitOMPLoopDirective(D);
}
@@ -2619,6 +2752,30 @@ void ASTStmtReader::VisitOMPMaskedDirective(OMPMaskedDirective *D) {
VisitOMPExecutableDirective(D);
}
+void ASTStmtReader::VisitOMPGenericLoopDirective(OMPGenericLoopDirective *D) {
+ VisitOMPLoopDirective(D);
+}
+
+void ASTStmtReader::VisitOMPTeamsGenericLoopDirective(
+ OMPTeamsGenericLoopDirective *D) {
+ VisitOMPLoopDirective(D);
+}
+
+void ASTStmtReader::VisitOMPTargetTeamsGenericLoopDirective(
+ OMPTargetTeamsGenericLoopDirective *D) {
+ VisitOMPLoopDirective(D);
+}
+
+void ASTStmtReader::VisitOMPParallelGenericLoopDirective(
+ OMPParallelGenericLoopDirective *D) {
+ VisitOMPLoopDirective(D);
+}
+
+void ASTStmtReader::VisitOMPTargetParallelGenericLoopDirective(
+ OMPTargetParallelGenericLoopDirective *D) {
+ VisitOMPLoopDirective(D);
+}
+
//===----------------------------------------------------------------------===//
// ASTReader Implementation
//===----------------------------------------------------------------------===//
@@ -2705,7 +2862,7 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
case STMT_REF_PTR:
IsStmtReference = true;
- assert(StmtEntries.find(Record[0]) != StmtEntries.end() &&
+ assert(StmtEntries.contains(Record[0]) &&
"No stmt was recorded for this offset reference!");
S = StmtEntries[Record.readInt()];
break;
@@ -2718,10 +2875,12 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
S = new (Context) NullStmt(Empty);
break;
- case STMT_COMPOUND:
- S = CompoundStmt::CreateEmpty(
- Context, /*NumStmts=*/Record[ASTStmtReader::NumStmtFields]);
+ case STMT_COMPOUND: {
+ unsigned NumStmts = Record[ASTStmtReader::NumStmtFields];
+ bool HasFPFeatures = Record[ASTStmtReader::NumStmtFields + 1];
+ S = CompoundStmt::CreateEmpty(Context, NumStmts, HasFPFeatures);
break;
+ }
case STMT_CASE:
S = CaseStmt::CreateEmpty(
@@ -2743,13 +2902,14 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
/*NumAttrs*/Record[ASTStmtReader::NumStmtFields]);
break;
- case STMT_IF:
- S = IfStmt::CreateEmpty(
- Context,
- /* HasElse=*/Record[ASTStmtReader::NumStmtFields + 1],
- /* HasVar=*/Record[ASTStmtReader::NumStmtFields + 2],
- /* HasInit=*/Record[ASTStmtReader::NumStmtFields + 3]);
+ case STMT_IF: {
+ BitsUnpacker IfStmtBits(Record[ASTStmtReader::NumStmtFields]);
+ bool HasElse = IfStmtBits.getNextBit();
+ bool HasVar = IfStmtBits.getNextBit();
+ bool HasInit = IfStmtBits.getNextBit();
+ S = IfStmt::CreateEmpty(Context, HasElse, HasVar, HasInit);
break;
+ }
case STMT_SWITCH:
S = SwitchStmt::CreateEmpty(
@@ -2812,7 +2972,7 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
case EXPR_CONSTANT:
S = ConstantExpr::CreateEmpty(
- Context, static_cast<ConstantExpr::ResultStorageKind>(
+ Context, static_cast<ConstantResultStorageKind>(
/*StorageKind=*/Record[ASTStmtReader::NumExprFields]));
break;
@@ -2826,15 +2986,19 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
/*HasFunctionName*/ Record[ASTStmtReader::NumExprFields]);
break;
- case EXPR_DECL_REF:
- S = DeclRefExpr::CreateEmpty(
- Context,
- /*HasQualifier=*/Record[ASTStmtReader::NumExprFields],
- /*HasFoundDecl=*/Record[ASTStmtReader::NumExprFields + 1],
- /*HasTemplateKWAndArgsInfo=*/Record[ASTStmtReader::NumExprFields + 2],
- /*NumTemplateArgs=*/Record[ASTStmtReader::NumExprFields + 2] ?
- Record[ASTStmtReader::NumExprFields + 6] : 0);
+ case EXPR_DECL_REF: {
+ BitsUnpacker DeclRefExprBits(Record[ASTStmtReader::NumExprFields]);
+ DeclRefExprBits.advance(5);
+ bool HasFoundDecl = DeclRefExprBits.getNextBit();
+ bool HasQualifier = DeclRefExprBits.getNextBit();
+ bool HasTemplateKWAndArgsInfo = DeclRefExprBits.getNextBit();
+ unsigned NumTemplateArgs = HasTemplateKWAndArgsInfo
+ ? Record[ASTStmtReader::NumExprFields + 1]
+ : 0;
+ S = DeclRefExpr::CreateEmpty(Context, HasQualifier, HasFoundDecl,
+ HasTemplateKWAndArgsInfo, NumTemplateArgs);
break;
+ }
case EXPR_INTEGER_LITERAL:
S = IntegerLiteral::Create(Context, Empty);
@@ -2874,10 +3038,13 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
/* NumExprs=*/Record[ASTStmtReader::NumExprFields]);
break;
- case EXPR_UNARY_OPERATOR:
- S = UnaryOperator::CreateEmpty(Context,
- Record[ASTStmtReader::NumExprFields]);
+ case EXPR_UNARY_OPERATOR: {
+ BitsUnpacker UnaryOperatorBits(Record[ASTStmtReader::NumStmtFields]);
+ UnaryOperatorBits.advance(ASTStmtReader::NumExprBits);
+ bool HasFPFeatures = UnaryOperatorBits.getNextBit();
+ S = UnaryOperator::CreateEmpty(Context, HasFPFeatures);
break;
+ }
case EXPR_OFFSETOF:
S = OffsetOfExpr::CreateEmpty(Context,
@@ -2911,33 +3078,46 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
Record[ASTStmtReader::NumExprFields]);
break;
- case EXPR_CALL:
- S = CallExpr::CreateEmpty(
- Context, /*NumArgs=*/Record[ASTStmtReader::NumExprFields],
- /*HasFPFeatures=*/Record[ASTStmtReader::NumExprFields + 1], Empty);
+ case EXPR_CALL: {
+ auto NumArgs = Record[ASTStmtReader::NumExprFields];
+ BitsUnpacker CallExprBits(Record[ASTStmtReader::NumExprFields + 1]);
+ CallExprBits.advance(1);
+ auto HasFPFeatures = CallExprBits.getNextBit();
+ S = CallExpr::CreateEmpty(Context, NumArgs, HasFPFeatures, Empty);
break;
+ }
case EXPR_RECOVERY:
S = RecoveryExpr::CreateEmpty(
Context, /*NumArgs=*/Record[ASTStmtReader::NumExprFields]);
break;
- case EXPR_MEMBER:
- S = MemberExpr::CreateEmpty(Context, Record[ASTStmtReader::NumExprFields],
- Record[ASTStmtReader::NumExprFields + 1],
- Record[ASTStmtReader::NumExprFields + 2],
- Record[ASTStmtReader::NumExprFields + 3]);
+ case EXPR_MEMBER: {
+ BitsUnpacker ExprMemberBits(Record[ASTStmtReader::NumExprFields]);
+ bool HasQualifier = ExprMemberBits.getNextBit();
+ bool HasFoundDecl = ExprMemberBits.getNextBit();
+ bool HasTemplateInfo = ExprMemberBits.getNextBit();
+ unsigned NumTemplateArgs = Record[ASTStmtReader::NumExprFields + 1];
+ S = MemberExpr::CreateEmpty(Context, HasQualifier, HasFoundDecl,
+ HasTemplateInfo, NumTemplateArgs);
break;
+ }
- case EXPR_BINARY_OPERATOR:
- S = BinaryOperator::CreateEmpty(Context,
- Record[ASTStmtReader::NumExprFields]);
+ case EXPR_BINARY_OPERATOR: {
+ BitsUnpacker BinaryOperatorBits(Record[ASTStmtReader::NumExprFields]);
+ BinaryOperatorBits.advance(/*Size of opcode*/ 6);
+ bool HasFPFeatures = BinaryOperatorBits.getNextBit();
+ S = BinaryOperator::CreateEmpty(Context, HasFPFeatures);
break;
+ }
- case EXPR_COMPOUND_ASSIGN_OPERATOR:
- S = CompoundAssignOperator::CreateEmpty(
- Context, Record[ASTStmtReader::NumExprFields]);
+ case EXPR_COMPOUND_ASSIGN_OPERATOR: {
+ BitsUnpacker BinaryOperatorBits(Record[ASTStmtReader::NumExprFields]);
+ BinaryOperatorBits.advance(/*Size of opcode*/ 6);
+ bool HasFPFeatures = BinaryOperatorBits.getNextBit();
+ S = CompoundAssignOperator::CreateEmpty(Context, HasFPFeatures);
break;
+ }
case EXPR_CONDITIONAL_OPERATOR:
S = new (Context) ConditionalOperator(Empty);
@@ -2947,19 +3127,23 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
S = new (Context) BinaryConditionalOperator(Empty);
break;
- case EXPR_IMPLICIT_CAST:
- S = ImplicitCastExpr::CreateEmpty(
- Context,
- /*PathSize*/ Record[ASTStmtReader::NumExprFields],
- /*HasFPFeatures*/ Record[ASTStmtReader::NumExprFields + 1]);
+ case EXPR_IMPLICIT_CAST: {
+ unsigned PathSize = Record[ASTStmtReader::NumExprFields];
+ BitsUnpacker CastExprBits(Record[ASTStmtReader::NumExprFields + 1]);
+ CastExprBits.advance(7);
+ bool HasFPFeatures = CastExprBits.getNextBit();
+ S = ImplicitCastExpr::CreateEmpty(Context, PathSize, HasFPFeatures);
break;
+ }
- case EXPR_CSTYLE_CAST:
- S = CStyleCastExpr::CreateEmpty(
- Context,
- /*PathSize*/ Record[ASTStmtReader::NumExprFields],
- /*HasFPFeatures*/ Record[ASTStmtReader::NumExprFields + 1]);
+ case EXPR_CSTYLE_CAST: {
+ unsigned PathSize = Record[ASTStmtReader::NumExprFields];
+ BitsUnpacker CastExprBits(Record[ASTStmtReader::NumExprFields + 1]);
+ CastExprBits.advance(7);
+ bool HasFPFeatures = CastExprBits.getNextBit();
+ S = CStyleCastExpr::CreateEmpty(Context, PathSize, HasFPFeatures);
break;
+ }
case EXPR_COMPOUND_LITERAL:
S = new (Context) CompoundLiteralExpr(Empty);
@@ -3183,6 +3367,11 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
S = OMPCanonicalLoop::createEmpty(Context);
break;
+ case STMT_OMP_META_DIRECTIVE:
+ S = OMPMetaDirective::CreateEmpty(
+ Context, Record[ASTStmtReader::NumStmtFields], Empty);
+ break;
+
case STMT_OMP_PARALLEL_DIRECTIVE:
S =
OMPParallelDirective::CreateEmpty(Context,
@@ -3237,6 +3426,11 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
S = OMPSectionDirective::CreateEmpty(Context, Empty);
break;
+ case STMT_OMP_SCOPE_DIRECTIVE:
+ S = OMPScopeDirective::CreateEmpty(
+ Context, Record[ASTStmtReader::NumStmtFields], Empty);
+ break;
+
case STMT_OMP_SINGLE_DIRECTIVE:
S = OMPSingleDirective::CreateEmpty(
Context, Record[ASTStmtReader::NumStmtFields], Empty);
@@ -3272,6 +3466,11 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
Context, Record[ASTStmtReader::NumStmtFields], Empty);
break;
+ case STMT_OMP_PARALLEL_MASKED_DIRECTIVE:
+ S = OMPParallelMaskedDirective::CreateEmpty(
+ Context, Record[ASTStmtReader::NumStmtFields], Empty);
+ break;
+
case STMT_OMP_PARALLEL_SECTIONS_DIRECTIVE:
S = OMPParallelSectionsDirective::CreateEmpty(
Context, Record[ASTStmtReader::NumStmtFields], Empty);
@@ -3291,7 +3490,13 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
case STMT_OMP_TASKWAIT_DIRECTIVE:
- S = OMPTaskwaitDirective::CreateEmpty(Context, Empty);
+ S = OMPTaskwaitDirective::CreateEmpty(
+ Context, Record[ASTStmtReader::NumStmtFields], Empty);
+ break;
+
+ case STMT_OMP_ERROR_DIRECTIVE:
+ S = OMPErrorDirective::CreateEmpty(
+ Context, Record[ASTStmtReader::NumStmtFields], Empty);
break;
case STMT_OMP_TASKGROUP_DIRECTIVE:
@@ -3403,6 +3608,14 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
}
+ case STMT_OMP_MASKED_TASKLOOP_DIRECTIVE: {
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
+ S = OMPMaskedTaskLoopDirective::CreateEmpty(Context, NumClauses,
+ CollapsedNum, Empty);
+ break;
+ }
+
case STMT_OMP_MASTER_TASKLOOP_SIMD_DIRECTIVE: {
unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
@@ -3411,6 +3624,14 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
}
+ case STMT_OMP_MASKED_TASKLOOP_SIMD_DIRECTIVE: {
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
+ S = OMPMaskedTaskLoopSimdDirective::CreateEmpty(Context, NumClauses,
+ CollapsedNum, Empty);
+ break;
+ }
+
case STMT_OMP_PARALLEL_MASTER_TASKLOOP_DIRECTIVE: {
unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
@@ -3419,6 +3640,14 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
}
+ case STMT_OMP_PARALLEL_MASKED_TASKLOOP_DIRECTIVE: {
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
+ S = OMPParallelMaskedTaskLoopDirective::CreateEmpty(Context, NumClauses,
+ CollapsedNum, Empty);
+ break;
+ }
+
case STMT_OMP_PARALLEL_MASTER_TASKLOOP_SIMD_DIRECTIVE: {
unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
@@ -3427,6 +3656,14 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
}
+ case STMT_OMP_PARALLEL_MASKED_TASKLOOP_SIMD_DIRECTIVE: {
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
+ S = OMPParallelMaskedTaskLoopSimdDirective::CreateEmpty(
+ Context, NumClauses, CollapsedNum, Empty);
+ break;
+ }
+
case STMT_OMP_DISTRIBUTE_DIRECTIVE: {
unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
@@ -3560,17 +3797,65 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
Context, Record[ASTStmtReader::NumStmtFields], Empty);
break;
- case EXPR_CXX_OPERATOR_CALL:
- S = CXXOperatorCallExpr::CreateEmpty(
- Context, /*NumArgs=*/Record[ASTStmtReader::NumExprFields],
- /*HasFPFeatures=*/Record[ASTStmtReader::NumExprFields + 1], Empty);
+ case STMT_OMP_GENERIC_LOOP_DIRECTIVE: {
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
+ S = OMPGenericLoopDirective::CreateEmpty(Context, NumClauses,
+ CollapsedNum, Empty);
break;
+ }
- case EXPR_CXX_MEMBER_CALL:
- S = CXXMemberCallExpr::CreateEmpty(
- Context, /*NumArgs=*/Record[ASTStmtReader::NumExprFields],
- /*HasFPFeatures=*/Record[ASTStmtReader::NumExprFields + 1], Empty);
+ case STMT_OMP_TEAMS_GENERIC_LOOP_DIRECTIVE: {
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
+ S = OMPTeamsGenericLoopDirective::CreateEmpty(Context, NumClauses,
+ CollapsedNum, Empty);
+ break;
+ }
+
+ case STMT_OMP_TARGET_TEAMS_GENERIC_LOOP_DIRECTIVE: {
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
+ S = OMPTargetTeamsGenericLoopDirective::CreateEmpty(Context, NumClauses,
+ CollapsedNum, Empty);
+ break;
+ }
+
+ case STMT_OMP_PARALLEL_GENERIC_LOOP_DIRECTIVE: {
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
+ S = OMPParallelGenericLoopDirective::CreateEmpty(Context, NumClauses,
+ CollapsedNum, Empty);
break;
+ }
+
+ case STMT_OMP_TARGET_PARALLEL_GENERIC_LOOP_DIRECTIVE: {
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
+ S = OMPTargetParallelGenericLoopDirective::CreateEmpty(
+ Context, NumClauses, CollapsedNum, Empty);
+ break;
+ }
+
+ case EXPR_CXX_OPERATOR_CALL: {
+ auto NumArgs = Record[ASTStmtReader::NumExprFields];
+ BitsUnpacker CallExprBits(Record[ASTStmtReader::NumExprFields + 1]);
+ CallExprBits.advance(1);
+ auto HasFPFeatures = CallExprBits.getNextBit();
+ S = CXXOperatorCallExpr::CreateEmpty(Context, NumArgs, HasFPFeatures,
+ Empty);
+ break;
+ }
+
+ case EXPR_CXX_MEMBER_CALL: {
+ auto NumArgs = Record[ASTStmtReader::NumExprFields];
+ BitsUnpacker CallExprBits(Record[ASTStmtReader::NumExprFields + 1]);
+ CallExprBits.advance(1);
+ auto HasFPFeatures = CallExprBits.getNextBit();
+ S = CXXMemberCallExpr::CreateEmpty(Context, NumArgs, HasFPFeatures,
+ Empty);
+ break;
+ }
case EXPR_CXX_REWRITTEN_BINARY_OPERATOR:
S = new (Context) CXXRewrittenBinaryOperator(Empty);
@@ -3592,22 +3877,26 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
/* NumArgs=*/Record[ASTStmtReader::NumExprFields]);
break;
- case EXPR_CXX_STATIC_CAST:
- S = CXXStaticCastExpr::CreateEmpty(
- Context,
- /*PathSize*/ Record[ASTStmtReader::NumExprFields],
- /*HasFPFeatures*/ Record[ASTStmtReader::NumExprFields + 1]);
+ case EXPR_CXX_STATIC_CAST: {
+ unsigned PathSize = Record[ASTStmtReader::NumExprFields];
+ BitsUnpacker CastExprBits(Record[ASTStmtReader::NumExprFields + 1]);
+ CastExprBits.advance(7);
+ bool HasFPFeatures = CastExprBits.getNextBit();
+ S = CXXStaticCastExpr::CreateEmpty(Context, PathSize, HasFPFeatures);
break;
+ }
- case EXPR_CXX_DYNAMIC_CAST:
- S = CXXDynamicCastExpr::CreateEmpty(Context,
- /*PathSize*/ Record[ASTStmtReader::NumExprFields]);
+ case EXPR_CXX_DYNAMIC_CAST: {
+ unsigned PathSize = Record[ASTStmtReader::NumExprFields];
+ S = CXXDynamicCastExpr::CreateEmpty(Context, PathSize);
break;
+ }
- case EXPR_CXX_REINTERPRET_CAST:
- S = CXXReinterpretCastExpr::CreateEmpty(Context,
- /*PathSize*/ Record[ASTStmtReader::NumExprFields]);
+ case EXPR_CXX_REINTERPRET_CAST: {
+ unsigned PathSize = Record[ASTStmtReader::NumExprFields];
+ S = CXXReinterpretCastExpr::CreateEmpty(Context, PathSize);
break;
+ }
case EXPR_CXX_CONST_CAST:
S = CXXConstCastExpr::CreateEmpty(Context);
@@ -3617,23 +3906,33 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
S = CXXAddrspaceCastExpr::CreateEmpty(Context);
break;
- case EXPR_CXX_FUNCTIONAL_CAST:
- S = CXXFunctionalCastExpr::CreateEmpty(
- Context,
- /*PathSize*/ Record[ASTStmtReader::NumExprFields],
- /*HasFPFeatures*/ Record[ASTStmtReader::NumExprFields + 1]);
+ case EXPR_CXX_FUNCTIONAL_CAST: {
+ unsigned PathSize = Record[ASTStmtReader::NumExprFields];
+ BitsUnpacker CastExprBits(Record[ASTStmtReader::NumExprFields + 1]);
+ CastExprBits.advance(7);
+ bool HasFPFeatures = CastExprBits.getNextBit();
+ S = CXXFunctionalCastExpr::CreateEmpty(Context, PathSize, HasFPFeatures);
break;
+ }
- case EXPR_BUILTIN_BIT_CAST:
- assert(Record[ASTStmtReader::NumExprFields] == 0 && "Wrong PathSize!");
+ case EXPR_BUILTIN_BIT_CAST: {
+#ifndef NDEBUG
+ unsigned PathSize = Record[ASTStmtReader::NumExprFields];
+ assert(PathSize == 0 && "Wrong PathSize!");
+#endif
S = new (Context) BuiltinBitCastExpr(Empty);
break;
+ }
- case EXPR_USER_DEFINED_LITERAL:
- S = UserDefinedLiteral::CreateEmpty(
- Context, /*NumArgs=*/Record[ASTStmtReader::NumExprFields],
- /*HasFPFeatures=*/Record[ASTStmtReader::NumExprFields + 1], Empty);
+ case EXPR_USER_DEFINED_LITERAL: {
+ auto NumArgs = Record[ASTStmtReader::NumExprFields];
+ BitsUnpacker CallExprBits(Record[ASTStmtReader::NumExprFields + 1]);
+ CallExprBits.advance(1);
+ auto HasFPFeatures = CallExprBits.getNextBit();
+ S = UserDefinedLiteral::CreateEmpty(Context, NumArgs, HasFPFeatures,
+ Empty);
break;
+ }
case EXPR_CXX_STD_INITIALIZER_LIST:
S = new (Context) CXXStdInitializerListExpr(Empty);
@@ -3672,7 +3971,7 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
case EXPR_CXX_THIS:
- S = new (Context) CXXThisExpr(Empty);
+ S = CXXThisExpr::CreateEmpty(Context);
break;
case EXPR_CXX_THROW:
@@ -3680,11 +3979,13 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
case EXPR_CXX_DEFAULT_ARG:
- S = new (Context) CXXDefaultArgExpr(Empty);
+ S = CXXDefaultArgExpr::CreateEmpty(
+ Context, /*HasRewrittenInit=*/Record[ASTStmtReader::NumExprFields]);
break;
case EXPR_CXX_DEFAULT_INIT:
- S = new (Context) CXXDefaultInitExpr(Empty);
+ S = CXXDefaultInitExpr::CreateEmpty(
+ Context, /*HasRewrittenInit=*/Record[ASTStmtReader::NumExprFields]);
break;
case EXPR_CXX_BIND_TEMPORARY:
@@ -3717,49 +4018,62 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
Record[ASTStmtReader::NumExprFields]);
break;
- case EXPR_CXX_DEPENDENT_SCOPE_MEMBER:
+ case EXPR_CXX_DEPENDENT_SCOPE_MEMBER: {
+ unsigned NumTemplateArgs = Record[ASTStmtReader::NumExprFields];
+ BitsUnpacker DependentScopeMemberBits(
+ Record[ASTStmtReader::NumExprFields + 1]);
+ bool HasTemplateKWAndArgsInfo = DependentScopeMemberBits.getNextBit();
+
+ bool HasFirstQualifierFoundInScope =
+ DependentScopeMemberBits.getNextBit();
S = CXXDependentScopeMemberExpr::CreateEmpty(
- Context,
- /*HasTemplateKWAndArgsInfo=*/Record[ASTStmtReader::NumExprFields],
- /*NumTemplateArgs=*/Record[ASTStmtReader::NumExprFields + 1],
- /*HasFirstQualifierFoundInScope=*/
- Record[ASTStmtReader::NumExprFields + 2]);
+ Context, HasTemplateKWAndArgsInfo, NumTemplateArgs,
+ HasFirstQualifierFoundInScope);
break;
+ }
- case EXPR_CXX_DEPENDENT_SCOPE_DECL_REF:
- S = DependentScopeDeclRefExpr::CreateEmpty(Context,
- /*HasTemplateKWAndArgsInfo=*/Record[ASTStmtReader::NumExprFields],
- /*NumTemplateArgs=*/Record[ASTStmtReader::NumExprFields]
- ? Record[ASTStmtReader::NumExprFields + 1]
- : 0);
+ case EXPR_CXX_DEPENDENT_SCOPE_DECL_REF: {
+ BitsUnpacker DependentScopeDeclRefBits(
+ Record[ASTStmtReader::NumStmtFields]);
+ DependentScopeDeclRefBits.advance(ASTStmtReader::NumExprBits);
+ bool HasTemplateKWAndArgsInfo = DependentScopeDeclRefBits.getNextBit();
+ unsigned NumTemplateArgs =
+ HasTemplateKWAndArgsInfo
+ ? DependentScopeDeclRefBits.getNextBits(/*Width=*/16)
+ : 0;
+ S = DependentScopeDeclRefExpr::CreateEmpty(
+ Context, HasTemplateKWAndArgsInfo, NumTemplateArgs);
break;
+ }
case EXPR_CXX_UNRESOLVED_CONSTRUCT:
S = CXXUnresolvedConstructExpr::CreateEmpty(Context,
/*NumArgs=*/Record[ASTStmtReader::NumExprFields]);
break;
- case EXPR_CXX_UNRESOLVED_MEMBER:
+ case EXPR_CXX_UNRESOLVED_MEMBER: {
+ auto NumResults = Record[ASTStmtReader::NumExprFields];
+ BitsUnpacker OverloadExprBits(Record[ASTStmtReader::NumExprFields + 1]);
+ auto HasTemplateKWAndArgsInfo = OverloadExprBits.getNextBit();
+ auto NumTemplateArgs = HasTemplateKWAndArgsInfo
+ ? Record[ASTStmtReader::NumExprFields + 2]
+ : 0;
S = UnresolvedMemberExpr::CreateEmpty(
- Context,
- /*NumResults=*/Record[ASTStmtReader::NumExprFields],
- /*HasTemplateKWAndArgsInfo=*/Record[ASTStmtReader::NumExprFields + 1],
- /*NumTemplateArgs=*/
- Record[ASTStmtReader::NumExprFields + 1]
- ? Record[ASTStmtReader::NumExprFields + 2]
- : 0);
+ Context, NumResults, HasTemplateKWAndArgsInfo, NumTemplateArgs);
break;
+ }
- case EXPR_CXX_UNRESOLVED_LOOKUP:
+ case EXPR_CXX_UNRESOLVED_LOOKUP: {
+ auto NumResults = Record[ASTStmtReader::NumExprFields];
+ BitsUnpacker OverloadExprBits(Record[ASTStmtReader::NumExprFields + 1]);
+ auto HasTemplateKWAndArgsInfo = OverloadExprBits.getNextBit();
+ auto NumTemplateArgs = HasTemplateKWAndArgsInfo
+ ? Record[ASTStmtReader::NumExprFields + 2]
+ : 0;
S = UnresolvedLookupExpr::CreateEmpty(
- Context,
- /*NumResults=*/Record[ASTStmtReader::NumExprFields],
- /*HasTemplateKWAndArgsInfo=*/Record[ASTStmtReader::NumExprFields + 1],
- /*NumTemplateArgs=*/
- Record[ASTStmtReader::NumExprFields + 1]
- ? Record[ASTStmtReader::NumExprFields + 2]
- : 0);
+ Context, NumResults, HasTemplateKWAndArgsInfo, NumTemplateArgs);
break;
+ }
case EXPR_TYPE_TRAIT:
S = TypeTraitExpr::CreateDeserialized(Context,
@@ -3809,15 +4123,24 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
S = new (Context) CXXFoldExpr(Empty);
break;
+ case EXPR_CXX_PAREN_LIST_INIT:
+ S = CXXParenListInitExpr::CreateEmpty(
+ Context, /*numExprs=*/Record[ASTStmtReader::NumExprFields], Empty);
+ break;
+
case EXPR_OPAQUE_VALUE:
S = new (Context) OpaqueValueExpr(Empty);
break;
- case EXPR_CUDA_KERNEL_CALL:
- S = CUDAKernelCallExpr::CreateEmpty(
- Context, /*NumArgs=*/Record[ASTStmtReader::NumExprFields],
- /*HasFPFeatures=*/Record[ASTStmtReader::NumExprFields + 1], Empty);
+ case EXPR_CUDA_KERNEL_CALL: {
+ auto NumArgs = Record[ASTStmtReader::NumExprFields];
+ BitsUnpacker CallExprBits(Record[ASTStmtReader::NumExprFields + 1]);
+ CallExprBits.advance(1);
+ auto HasFPFeatures = CallExprBits.getNextBit();
+ S = CUDAKernelCallExpr::CreateEmpty(Context, NumArgs, HasFPFeatures,
+ Empty);
break;
+ }
case EXPR_ASTYPE:
S = new (Context) AsTypeExpr(Empty);
@@ -3862,8 +4185,7 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
case EXPR_CONCEPT_SPECIALIZATION: {
- unsigned numTemplateArgs = Record[ASTStmtReader::NumExprFields];
- S = ConceptSpecializationExpr::Create(Context, Empty, numTemplateArgs);
+ S = new (Context) ConceptSpecializationExpr(Empty);
break;
}
diff --git a/contrib/llvm-project/clang/lib/Serialization/ASTWriter.cpp b/contrib/llvm-project/clang/lib/Serialization/ASTWriter.cpp
index 66c207ad9243..378a1f86bd53 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ASTWriter.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/ASTWriter.cpp
@@ -77,7 +77,6 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/Hashing.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/ScopeExit.h"
@@ -100,6 +99,7 @@
#include "llvm/Support/OnDiskHashTable.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/SHA1.h"
+#include "llvm/Support/TimeProfiler.h"
#include "llvm/Support/VersionTuple.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
@@ -108,9 +108,9 @@
#include <cstdlib>
#include <cstring>
#include <ctime>
-#include <deque>
#include <limits>
#include <memory>
+#include <optional>
#include <queue>
#include <tuple>
#include <utility>
@@ -132,6 +132,18 @@ static StringRef bytes(const SmallVectorImpl<T> &v) {
sizeof(T) * v.size());
}
+static std::string bytes(const std::vector<bool> &V) {
+ std::string Str;
+ Str.reserve(V.size() / 8);
+ for (unsigned I = 0, E = V.size(); I < E;) {
+ char Byte = 0;
+ for (unsigned Bit = 0; Bit < 8 && I < E; ++Bit, ++I)
+ Byte |= V[I] << Bit;
+ Str += Byte;
+ }
+ return Str;
+}
+
//===----------------------------------------------------------------------===//
// Type serialization
//===----------------------------------------------------------------------===//
@@ -149,6 +161,82 @@ static TypeCode getTypeCodeForTypeClass(Type::TypeClass id) {
namespace {
+std::set<const FileEntry *> GetAffectingModuleMaps(const Preprocessor &PP,
+ Module *RootModule) {
+ SmallVector<const Module *> ModulesToProcess{RootModule};
+
+ const HeaderSearch &HS = PP.getHeaderSearchInfo();
+
+ SmallVector<OptionalFileEntryRef, 16> FilesByUID;
+ HS.getFileMgr().GetUniqueIDMapping(FilesByUID);
+
+ if (FilesByUID.size() > HS.header_file_size())
+ FilesByUID.resize(HS.header_file_size());
+
+ for (unsigned UID = 0, LastUID = FilesByUID.size(); UID != LastUID; ++UID) {
+ OptionalFileEntryRef File = FilesByUID[UID];
+ if (!File)
+ continue;
+
+ const HeaderFileInfo *HFI =
+ HS.getExistingFileInfo(*File, /*WantExternal*/ false);
+ if (!HFI || (HFI->isModuleHeader && !HFI->isCompilingModuleHeader))
+ continue;
+
+ for (const auto &KH : HS.findResolvedModulesForHeader(*File)) {
+ if (!KH.getModule())
+ continue;
+ ModulesToProcess.push_back(KH.getModule());
+ }
+ }
+
+ const ModuleMap &MM = HS.getModuleMap();
+ SourceManager &SourceMgr = PP.getSourceManager();
+
+ std::set<const FileEntry *> ModuleMaps{};
+ auto CollectIncludingModuleMaps = [&](FileEntryRef F) {
+ if (!ModuleMaps.insert(F).second)
+ return;
+ FileID FID = SourceMgr.translateFile(F);
+ SourceLocation Loc = SourceMgr.getIncludeLoc(FID);
+ // The include location of inferred module maps can point into the header
+ // file that triggered the inferring. Cut off the walk if that's the case.
+ while (Loc.isValid() && isModuleMap(SourceMgr.getFileCharacteristic(Loc))) {
+ FID = SourceMgr.getFileID(Loc);
+ if (!ModuleMaps.insert(*SourceMgr.getFileEntryRefForID(FID)).second)
+ break;
+ Loc = SourceMgr.getIncludeLoc(FID);
+ }
+ };
+
+ std::set<const Module *> ProcessedModules;
+ auto CollectIncludingMapsFromAncestors = [&](const Module *M) {
+ for (const Module *Mod = M; Mod; Mod = Mod->Parent) {
+ if (!ProcessedModules.insert(Mod).second)
+ break;
+ // The containing module map is affecting, because it's being pointed
+ // into by Module::DefinitionLoc.
+ if (auto ModuleMapFile = MM.getContainingModuleMapFile(Mod))
+ CollectIncludingModuleMaps(*ModuleMapFile);
+ // For inferred modules, the module map that allowed inferring is not in
+ // the include chain of the virtual containing module map file. It did
+ // affect the compilation, though.
+ if (auto ModuleMapFile = MM.getModuleMapFileForUniquing(Mod))
+ CollectIncludingModuleMaps(*ModuleMapFile);
+ }
+ };
+
+ for (const Module *CurrentModule : ModulesToProcess) {
+ CollectIncludingMapsFromAncestors(CurrentModule);
+ for (const Module *ImportedModule : CurrentModule->Imports)
+ CollectIncludingMapsFromAncestors(ImportedModule);
+ for (const Module *UndeclaredModule : CurrentModule->UndeclaredUses)
+ CollectIncludingMapsFromAncestors(UndeclaredModule);
+ }
+
+ return ModuleMaps;
+}
+
class ASTTypeWriter {
ASTWriter &Writer;
ASTWriter::RecordData Record;
@@ -175,10 +263,19 @@ public:
};
class TypeLocWriter : public TypeLocVisitor<TypeLocWriter> {
+ using LocSeq = SourceLocationSequence;
+
ASTRecordWriter &Record;
+ LocSeq *Seq;
+
+ void addSourceLocation(SourceLocation Loc) {
+ Record.AddSourceLocation(Loc, Seq);
+ }
+ void addSourceRange(SourceRange Range) { Record.AddSourceRange(Range, Seq); }
public:
- TypeLocWriter(ASTRecordWriter &Record) : Record(Record) {}
+ TypeLocWriter(ASTRecordWriter &Record, LocSeq *Seq)
+ : Record(Record), Seq(Seq) {}
#define ABSTRACT_TYPELOC(CLASS, PARENT)
#define TYPELOC(CLASS, PARENT) \
@@ -196,7 +293,7 @@ void TypeLocWriter::VisitQualifiedTypeLoc(QualifiedTypeLoc TL) {
}
void TypeLocWriter::VisitBuiltinTypeLoc(BuiltinTypeLoc TL) {
- Record.AddSourceLocation(TL.getBuiltinLoc());
+ addSourceLocation(TL.getBuiltinLoc());
if (TL.needsExtraLocalData()) {
Record.push_back(TL.getWrittenTypeSpec());
Record.push_back(static_cast<uint64_t>(TL.getWrittenSignSpec()));
@@ -206,11 +303,11 @@ void TypeLocWriter::VisitBuiltinTypeLoc(BuiltinTypeLoc TL) {
}
void TypeLocWriter::VisitComplexTypeLoc(ComplexTypeLoc TL) {
- Record.AddSourceLocation(TL.getNameLoc());
+ addSourceLocation(TL.getNameLoc());
}
void TypeLocWriter::VisitPointerTypeLoc(PointerTypeLoc TL) {
- Record.AddSourceLocation(TL.getStarLoc());
+ addSourceLocation(TL.getStarLoc());
}
void TypeLocWriter::VisitDecayedTypeLoc(DecayedTypeLoc TL) {
@@ -222,25 +319,25 @@ void TypeLocWriter::VisitAdjustedTypeLoc(AdjustedTypeLoc TL) {
}
void TypeLocWriter::VisitBlockPointerTypeLoc(BlockPointerTypeLoc TL) {
- Record.AddSourceLocation(TL.getCaretLoc());
+ addSourceLocation(TL.getCaretLoc());
}
void TypeLocWriter::VisitLValueReferenceTypeLoc(LValueReferenceTypeLoc TL) {
- Record.AddSourceLocation(TL.getAmpLoc());
+ addSourceLocation(TL.getAmpLoc());
}
void TypeLocWriter::VisitRValueReferenceTypeLoc(RValueReferenceTypeLoc TL) {
- Record.AddSourceLocation(TL.getAmpAmpLoc());
+ addSourceLocation(TL.getAmpAmpLoc());
}
void TypeLocWriter::VisitMemberPointerTypeLoc(MemberPointerTypeLoc TL) {
- Record.AddSourceLocation(TL.getStarLoc());
+ addSourceLocation(TL.getStarLoc());
Record.AddTypeSourceInfo(TL.getClassTInfo());
}
void TypeLocWriter::VisitArrayTypeLoc(ArrayTypeLoc TL) {
- Record.AddSourceLocation(TL.getLBracketLoc());
- Record.AddSourceLocation(TL.getRBracketLoc());
+ addSourceLocation(TL.getLBracketLoc());
+ addSourceLocation(TL.getRBracketLoc());
Record.push_back(TL.getSizeExpr() ? 1 : 0);
if (TL.getSizeExpr())
Record.AddStmt(TL.getSizeExpr());
@@ -265,56 +362,56 @@ void TypeLocWriter::VisitDependentSizedArrayTypeLoc(
void TypeLocWriter::VisitDependentAddressSpaceTypeLoc(
DependentAddressSpaceTypeLoc TL) {
- Record.AddSourceLocation(TL.getAttrNameLoc());
+ addSourceLocation(TL.getAttrNameLoc());
SourceRange range = TL.getAttrOperandParensRange();
- Record.AddSourceLocation(range.getBegin());
- Record.AddSourceLocation(range.getEnd());
+ addSourceLocation(range.getBegin());
+ addSourceLocation(range.getEnd());
Record.AddStmt(TL.getAttrExprOperand());
}
void TypeLocWriter::VisitDependentSizedExtVectorTypeLoc(
DependentSizedExtVectorTypeLoc TL) {
- Record.AddSourceLocation(TL.getNameLoc());
+ addSourceLocation(TL.getNameLoc());
}
void TypeLocWriter::VisitVectorTypeLoc(VectorTypeLoc TL) {
- Record.AddSourceLocation(TL.getNameLoc());
+ addSourceLocation(TL.getNameLoc());
}
void TypeLocWriter::VisitDependentVectorTypeLoc(
DependentVectorTypeLoc TL) {
- Record.AddSourceLocation(TL.getNameLoc());
+ addSourceLocation(TL.getNameLoc());
}
void TypeLocWriter::VisitExtVectorTypeLoc(ExtVectorTypeLoc TL) {
- Record.AddSourceLocation(TL.getNameLoc());
+ addSourceLocation(TL.getNameLoc());
}
void TypeLocWriter::VisitConstantMatrixTypeLoc(ConstantMatrixTypeLoc TL) {
- Record.AddSourceLocation(TL.getAttrNameLoc());
+ addSourceLocation(TL.getAttrNameLoc());
SourceRange range = TL.getAttrOperandParensRange();
- Record.AddSourceLocation(range.getBegin());
- Record.AddSourceLocation(range.getEnd());
+ addSourceLocation(range.getBegin());
+ addSourceLocation(range.getEnd());
Record.AddStmt(TL.getAttrRowOperand());
Record.AddStmt(TL.getAttrColumnOperand());
}
void TypeLocWriter::VisitDependentSizedMatrixTypeLoc(
DependentSizedMatrixTypeLoc TL) {
- Record.AddSourceLocation(TL.getAttrNameLoc());
+ addSourceLocation(TL.getAttrNameLoc());
SourceRange range = TL.getAttrOperandParensRange();
- Record.AddSourceLocation(range.getBegin());
- Record.AddSourceLocation(range.getEnd());
+ addSourceLocation(range.getBegin());
+ addSourceLocation(range.getEnd());
Record.AddStmt(TL.getAttrRowOperand());
Record.AddStmt(TL.getAttrColumnOperand());
}
void TypeLocWriter::VisitFunctionTypeLoc(FunctionTypeLoc TL) {
- Record.AddSourceLocation(TL.getLocalRangeBegin());
- Record.AddSourceLocation(TL.getLParenLoc());
- Record.AddSourceLocation(TL.getRParenLoc());
- Record.AddSourceRange(TL.getExceptionSpecRange());
- Record.AddSourceLocation(TL.getLocalRangeEnd());
+ addSourceLocation(TL.getLocalRangeBegin());
+ addSourceLocation(TL.getLParenLoc());
+ addSourceLocation(TL.getRParenLoc());
+ addSourceRange(TL.getExceptionSpecRange());
+ addSourceLocation(TL.getLocalRangeEnd());
for (unsigned i = 0, e = TL.getNumParams(); i != e; ++i)
Record.AddDeclRef(TL.getParam(i));
}
@@ -328,181 +425,198 @@ void TypeLocWriter::VisitFunctionNoProtoTypeLoc(FunctionNoProtoTypeLoc TL) {
}
void TypeLocWriter::VisitUnresolvedUsingTypeLoc(UnresolvedUsingTypeLoc TL) {
- Record.AddSourceLocation(TL.getNameLoc());
+ addSourceLocation(TL.getNameLoc());
+}
+
+void TypeLocWriter::VisitUsingTypeLoc(UsingTypeLoc TL) {
+ addSourceLocation(TL.getNameLoc());
}
void TypeLocWriter::VisitTypedefTypeLoc(TypedefTypeLoc TL) {
- Record.AddSourceLocation(TL.getNameLoc());
+ addSourceLocation(TL.getNameLoc());
}
void TypeLocWriter::VisitObjCTypeParamTypeLoc(ObjCTypeParamTypeLoc TL) {
if (TL.getNumProtocols()) {
- Record.AddSourceLocation(TL.getProtocolLAngleLoc());
- Record.AddSourceLocation(TL.getProtocolRAngleLoc());
+ addSourceLocation(TL.getProtocolLAngleLoc());
+ addSourceLocation(TL.getProtocolRAngleLoc());
}
for (unsigned i = 0, e = TL.getNumProtocols(); i != e; ++i)
- Record.AddSourceLocation(TL.getProtocolLoc(i));
+ addSourceLocation(TL.getProtocolLoc(i));
}
void TypeLocWriter::VisitTypeOfExprTypeLoc(TypeOfExprTypeLoc TL) {
- Record.AddSourceLocation(TL.getTypeofLoc());
- Record.AddSourceLocation(TL.getLParenLoc());
- Record.AddSourceLocation(TL.getRParenLoc());
+ addSourceLocation(TL.getTypeofLoc());
+ addSourceLocation(TL.getLParenLoc());
+ addSourceLocation(TL.getRParenLoc());
}
void TypeLocWriter::VisitTypeOfTypeLoc(TypeOfTypeLoc TL) {
- Record.AddSourceLocation(TL.getTypeofLoc());
- Record.AddSourceLocation(TL.getLParenLoc());
- Record.AddSourceLocation(TL.getRParenLoc());
- Record.AddTypeSourceInfo(TL.getUnderlyingTInfo());
+ addSourceLocation(TL.getTypeofLoc());
+ addSourceLocation(TL.getLParenLoc());
+ addSourceLocation(TL.getRParenLoc());
+ Record.AddTypeSourceInfo(TL.getUnmodifiedTInfo());
}
void TypeLocWriter::VisitDecltypeTypeLoc(DecltypeTypeLoc TL) {
- Record.AddSourceLocation(TL.getNameLoc());
+ addSourceLocation(TL.getDecltypeLoc());
+ addSourceLocation(TL.getRParenLoc());
}
void TypeLocWriter::VisitUnaryTransformTypeLoc(UnaryTransformTypeLoc TL) {
- Record.AddSourceLocation(TL.getKWLoc());
- Record.AddSourceLocation(TL.getLParenLoc());
- Record.AddSourceLocation(TL.getRParenLoc());
+ addSourceLocation(TL.getKWLoc());
+ addSourceLocation(TL.getLParenLoc());
+ addSourceLocation(TL.getRParenLoc());
Record.AddTypeSourceInfo(TL.getUnderlyingTInfo());
}
+void ASTRecordWriter::AddConceptReference(const ConceptReference *CR) {
+ assert(CR);
+ AddNestedNameSpecifierLoc(CR->getNestedNameSpecifierLoc());
+ AddSourceLocation(CR->getTemplateKWLoc());
+ AddDeclarationNameInfo(CR->getConceptNameInfo());
+ AddDeclRef(CR->getFoundDecl());
+ AddDeclRef(CR->getNamedConcept());
+ push_back(CR->getTemplateArgsAsWritten() != nullptr);
+ if (CR->getTemplateArgsAsWritten())
+ AddASTTemplateArgumentListInfo(CR->getTemplateArgsAsWritten());
+}
+
void TypeLocWriter::VisitAutoTypeLoc(AutoTypeLoc TL) {
- Record.AddSourceLocation(TL.getNameLoc());
- Record.push_back(TL.isConstrained());
- if (TL.isConstrained()) {
- Record.AddNestedNameSpecifierLoc(TL.getNestedNameSpecifierLoc());
- Record.AddSourceLocation(TL.getTemplateKWLoc());
- Record.AddSourceLocation(TL.getConceptNameLoc());
- Record.AddDeclRef(TL.getFoundDecl());
- Record.AddSourceLocation(TL.getLAngleLoc());
- Record.AddSourceLocation(TL.getRAngleLoc());
- for (unsigned I = 0; I < TL.getNumArgs(); ++I)
- Record.AddTemplateArgumentLocInfo(TL.getTypePtr()->getArg(I).getKind(),
- TL.getArgLocInfo(I));
- }
+ addSourceLocation(TL.getNameLoc());
+ auto *CR = TL.getConceptReference();
+ Record.push_back(TL.isConstrained() && CR);
+ if (TL.isConstrained() && CR)
+ Record.AddConceptReference(CR);
+ Record.push_back(TL.isDecltypeAuto());
+ if (TL.isDecltypeAuto())
+ addSourceLocation(TL.getRParenLoc());
}
void TypeLocWriter::VisitDeducedTemplateSpecializationTypeLoc(
DeducedTemplateSpecializationTypeLoc TL) {
- Record.AddSourceLocation(TL.getTemplateNameLoc());
+ addSourceLocation(TL.getTemplateNameLoc());
}
void TypeLocWriter::VisitRecordTypeLoc(RecordTypeLoc TL) {
- Record.AddSourceLocation(TL.getNameLoc());
+ addSourceLocation(TL.getNameLoc());
}
void TypeLocWriter::VisitEnumTypeLoc(EnumTypeLoc TL) {
- Record.AddSourceLocation(TL.getNameLoc());
+ addSourceLocation(TL.getNameLoc());
}
void TypeLocWriter::VisitAttributedTypeLoc(AttributedTypeLoc TL) {
Record.AddAttr(TL.getAttr());
}
+void TypeLocWriter::VisitBTFTagAttributedTypeLoc(BTFTagAttributedTypeLoc TL) {
+ // Nothing to do.
+}
+
void TypeLocWriter::VisitTemplateTypeParmTypeLoc(TemplateTypeParmTypeLoc TL) {
- Record.AddSourceLocation(TL.getNameLoc());
+ addSourceLocation(TL.getNameLoc());
}
void TypeLocWriter::VisitSubstTemplateTypeParmTypeLoc(
SubstTemplateTypeParmTypeLoc TL) {
- Record.AddSourceLocation(TL.getNameLoc());
+ addSourceLocation(TL.getNameLoc());
}
void TypeLocWriter::VisitSubstTemplateTypeParmPackTypeLoc(
SubstTemplateTypeParmPackTypeLoc TL) {
- Record.AddSourceLocation(TL.getNameLoc());
+ addSourceLocation(TL.getNameLoc());
}
void TypeLocWriter::VisitTemplateSpecializationTypeLoc(
TemplateSpecializationTypeLoc TL) {
- Record.AddSourceLocation(TL.getTemplateKeywordLoc());
- Record.AddSourceLocation(TL.getTemplateNameLoc());
- Record.AddSourceLocation(TL.getLAngleLoc());
- Record.AddSourceLocation(TL.getRAngleLoc());
+ addSourceLocation(TL.getTemplateKeywordLoc());
+ addSourceLocation(TL.getTemplateNameLoc());
+ addSourceLocation(TL.getLAngleLoc());
+ addSourceLocation(TL.getRAngleLoc());
for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i)
Record.AddTemplateArgumentLocInfo(TL.getArgLoc(i).getArgument().getKind(),
TL.getArgLoc(i).getLocInfo());
}
void TypeLocWriter::VisitParenTypeLoc(ParenTypeLoc TL) {
- Record.AddSourceLocation(TL.getLParenLoc());
- Record.AddSourceLocation(TL.getRParenLoc());
+ addSourceLocation(TL.getLParenLoc());
+ addSourceLocation(TL.getRParenLoc());
}
void TypeLocWriter::VisitMacroQualifiedTypeLoc(MacroQualifiedTypeLoc TL) {
- Record.AddSourceLocation(TL.getExpansionLoc());
+ addSourceLocation(TL.getExpansionLoc());
}
void TypeLocWriter::VisitElaboratedTypeLoc(ElaboratedTypeLoc TL) {
- Record.AddSourceLocation(TL.getElaboratedKeywordLoc());
+ addSourceLocation(TL.getElaboratedKeywordLoc());
Record.AddNestedNameSpecifierLoc(TL.getQualifierLoc());
}
void TypeLocWriter::VisitInjectedClassNameTypeLoc(InjectedClassNameTypeLoc TL) {
- Record.AddSourceLocation(TL.getNameLoc());
+ addSourceLocation(TL.getNameLoc());
}
void TypeLocWriter::VisitDependentNameTypeLoc(DependentNameTypeLoc TL) {
- Record.AddSourceLocation(TL.getElaboratedKeywordLoc());
+ addSourceLocation(TL.getElaboratedKeywordLoc());
Record.AddNestedNameSpecifierLoc(TL.getQualifierLoc());
- Record.AddSourceLocation(TL.getNameLoc());
+ addSourceLocation(TL.getNameLoc());
}
void TypeLocWriter::VisitDependentTemplateSpecializationTypeLoc(
DependentTemplateSpecializationTypeLoc TL) {
- Record.AddSourceLocation(TL.getElaboratedKeywordLoc());
+ addSourceLocation(TL.getElaboratedKeywordLoc());
Record.AddNestedNameSpecifierLoc(TL.getQualifierLoc());
- Record.AddSourceLocation(TL.getTemplateKeywordLoc());
- Record.AddSourceLocation(TL.getTemplateNameLoc());
- Record.AddSourceLocation(TL.getLAngleLoc());
- Record.AddSourceLocation(TL.getRAngleLoc());
+ addSourceLocation(TL.getTemplateKeywordLoc());
+ addSourceLocation(TL.getTemplateNameLoc());
+ addSourceLocation(TL.getLAngleLoc());
+ addSourceLocation(TL.getRAngleLoc());
for (unsigned I = 0, E = TL.getNumArgs(); I != E; ++I)
Record.AddTemplateArgumentLocInfo(TL.getArgLoc(I).getArgument().getKind(),
TL.getArgLoc(I).getLocInfo());
}
void TypeLocWriter::VisitPackExpansionTypeLoc(PackExpansionTypeLoc TL) {
- Record.AddSourceLocation(TL.getEllipsisLoc());
+ addSourceLocation(TL.getEllipsisLoc());
}
void TypeLocWriter::VisitObjCInterfaceTypeLoc(ObjCInterfaceTypeLoc TL) {
- Record.AddSourceLocation(TL.getNameLoc());
+ addSourceLocation(TL.getNameLoc());
+ addSourceLocation(TL.getNameEndLoc());
}
void TypeLocWriter::VisitObjCObjectTypeLoc(ObjCObjectTypeLoc TL) {
Record.push_back(TL.hasBaseTypeAsWritten());
- Record.AddSourceLocation(TL.getTypeArgsLAngleLoc());
- Record.AddSourceLocation(TL.getTypeArgsRAngleLoc());
+ addSourceLocation(TL.getTypeArgsLAngleLoc());
+ addSourceLocation(TL.getTypeArgsRAngleLoc());
for (unsigned i = 0, e = TL.getNumTypeArgs(); i != e; ++i)
Record.AddTypeSourceInfo(TL.getTypeArgTInfo(i));
- Record.AddSourceLocation(TL.getProtocolLAngleLoc());
- Record.AddSourceLocation(TL.getProtocolRAngleLoc());
+ addSourceLocation(TL.getProtocolLAngleLoc());
+ addSourceLocation(TL.getProtocolRAngleLoc());
for (unsigned i = 0, e = TL.getNumProtocols(); i != e; ++i)
- Record.AddSourceLocation(TL.getProtocolLoc(i));
+ addSourceLocation(TL.getProtocolLoc(i));
}
void TypeLocWriter::VisitObjCObjectPointerTypeLoc(ObjCObjectPointerTypeLoc TL) {
- Record.AddSourceLocation(TL.getStarLoc());
+ addSourceLocation(TL.getStarLoc());
}
void TypeLocWriter::VisitAtomicTypeLoc(AtomicTypeLoc TL) {
- Record.AddSourceLocation(TL.getKWLoc());
- Record.AddSourceLocation(TL.getLParenLoc());
- Record.AddSourceLocation(TL.getRParenLoc());
+ addSourceLocation(TL.getKWLoc());
+ addSourceLocation(TL.getLParenLoc());
+ addSourceLocation(TL.getRParenLoc());
}
void TypeLocWriter::VisitPipeTypeLoc(PipeTypeLoc TL) {
- Record.AddSourceLocation(TL.getKWLoc());
+ addSourceLocation(TL.getKWLoc());
}
-void TypeLocWriter::VisitExtIntTypeLoc(clang::ExtIntTypeLoc TL) {
- Record.AddSourceLocation(TL.getNameLoc());
+void TypeLocWriter::VisitBitIntTypeLoc(clang::BitIntTypeLoc TL) {
+ addSourceLocation(TL.getNameLoc());
}
-void TypeLocWriter::VisitDependentExtIntTypeLoc(
- clang::DependentExtIntTypeLoc TL) {
- Record.AddSourceLocation(TL.getNameLoc());
+void TypeLocWriter::VisitDependentBitIntTypeLoc(
+ clang::DependentBitIntTypeLoc TL) {
+ addSourceLocation(TL.getNameLoc());
}
void ASTWriter::WriteTypeAbbrevs() {
@@ -516,30 +630,6 @@ void ASTWriter::WriteTypeAbbrevs() {
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 3)); // Quals
TypeExtQualAbbrev = Stream.EmitAbbrev(std::move(Abv));
-
- // Abbreviation for TYPE_FUNCTION_PROTO
- Abv = std::make_shared<BitCodeAbbrev>();
- Abv->Add(BitCodeAbbrevOp(serialization::TYPE_FUNCTION_PROTO));
- // FunctionType
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // ReturnType
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // NoReturn
- Abv->Add(BitCodeAbbrevOp(0)); // HasRegParm
- Abv->Add(BitCodeAbbrevOp(0)); // RegParm
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 4)); // CC
- Abv->Add(BitCodeAbbrevOp(0)); // ProducesResult
- Abv->Add(BitCodeAbbrevOp(0)); // NoCallerSavedRegs
- Abv->Add(BitCodeAbbrevOp(0)); // NoCfCheck
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // CmseNSCall
- // FunctionProtoType
- Abv->Add(BitCodeAbbrevOp(0)); // IsVariadic
- Abv->Add(BitCodeAbbrevOp(0)); // HasTrailingReturn
- Abv->Add(BitCodeAbbrevOp(0)); // TypeQuals
- Abv->Add(BitCodeAbbrevOp(0)); // RefQualifier
- Abv->Add(BitCodeAbbrevOp(EST_None)); // ExceptionSpec
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // NumParams
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Params
- TypeFunctionProtoAbbrev = Stream.EmitAbbrev(std::move(Abv));
}
//===----------------------------------------------------------------------===//
@@ -667,6 +757,7 @@ static void AddStmtsExprs(llvm::BitstreamWriter &Stream,
RECORD(EXPR_USER_DEFINED_LITERAL);
RECORD(EXPR_CXX_STD_INITIALIZER_LIST);
RECORD(EXPR_CXX_BOOL_LITERAL);
+ RECORD(EXPR_CXX_PAREN_LIST_INIT);
RECORD(EXPR_CXX_NULL_PTR_LITERAL);
RECORD(EXPR_CXX_TYPEID_EXPR);
RECORD(EXPR_CXX_TYPEID_TYPE);
@@ -719,7 +810,6 @@ void ASTWriter::WriteBlockInfoBlock() {
RECORD(MODULE_MAP_FILE);
RECORD(IMPORTS);
RECORD(ORIGINAL_FILE);
- RECORD(ORIGINAL_PCH_DIR);
RECORD(ORIGINAL_FILE_ID);
RECORD(INPUT_FILE_OFFSETS);
@@ -749,7 +839,6 @@ void ASTWriter::WriteBlockInfoBlock() {
RECORD(METHOD_POOL);
RECORD(PP_COUNTER_VALUE);
RECORD(SOURCE_LOCATION_OFFSETS);
- RECORD(SOURCE_LOCATION_PRELOADS);
RECORD(EXT_VECTOR_DECLS);
RECORD(UNUSED_FILESCOPED_DECLS);
RECORD(PPD_ENTITIES_OFFSETS);
@@ -789,6 +878,7 @@ void ASTWriter::WriteBlockInfoBlock() {
RECORD(CUDA_PRAGMA_FORCE_HOST_DEVICE_DEPTH);
RECORD(PP_CONDITIONAL_STACK);
RECORD(DECLS_TO_CHECK_FOR_DEFERRED_DIAGS);
+ RECORD(PP_ASSUME_NONNULL_LOC);
// SourceManager Block.
BLOCK(SOURCE_MANAGER_BLOCK);
@@ -815,6 +905,7 @@ void ASTWriter::WriteBlockInfoBlock() {
RECORD(SUBMODULE_TOPHEADER);
RECORD(SUBMODULE_UMBRELLA_DIR);
RECORD(SUBMODULE_IMPORTS);
+ RECORD(SUBMODULE_AFFECTING_MODULES);
RECORD(SUBMODULE_EXPORTS);
RECORD(SUBMODULE_REQUIRES);
RECORD(SUBMODULE_EXCLUDED_HEADER);
@@ -938,7 +1029,6 @@ void ASTWriter::WriteBlockInfoBlock() {
RECORD(DECL_INDIRECTFIELD);
RECORD(DECL_EXPANDED_NON_TYPE_TEMPLATE_PARM_PACK);
RECORD(DECL_EXPANDED_TEMPLATE_TEMPLATE_PARM_PACK);
- RECORD(DECL_CLASS_SCOPE_FUNCTION_SPECIALIZATION);
RECORD(DECL_IMPORT);
RECORD(DECL_OMP_THREADPRIVATE);
RECORD(DECL_EMPTY);
@@ -948,6 +1038,7 @@ void ASTWriter::WriteBlockInfoBlock() {
RECORD(DECL_PRAGMA_DETECT_MISMATCH);
RECORD(DECL_OMP_DECLARE_REDUCTION);
RECORD(DECL_OMP_ALLOCATE);
+ RECORD(DECL_HLSL_BUFFER);
// Statements and Exprs can occur in the Decls and Types block.
AddStmtsExprs(Stream, Record);
@@ -965,6 +1056,7 @@ void ASTWriter::WriteBlockInfoBlock() {
RECORD(SIGNATURE);
RECORD(AST_BLOCK_HASH);
RECORD(DIAGNOSTIC_OPTIONS);
+ RECORD(HEADER_SEARCH_PATHS);
RECORD(DIAG_PRAGMA_MAPPINGS);
#undef RECORD
@@ -1030,82 +1122,169 @@ adjustFilenameForRelocatableAST(const char *Filename, StringRef BaseDir) {
}
std::pair<ASTFileSignature, ASTFileSignature>
-ASTWriter::createSignature(StringRef AllBytes, StringRef ASTBlockBytes) {
+ASTWriter::createSignature() const {
+ StringRef AllBytes(Buffer.data(), Buffer.size());
+
llvm::SHA1 Hasher;
- Hasher.update(ASTBlockBytes);
- auto Hash = Hasher.result();
- ASTFileSignature ASTBlockHash = ASTFileSignature::create(Hash);
+ Hasher.update(AllBytes.slice(ASTBlockRange.first, ASTBlockRange.second));
+ ASTFileSignature ASTBlockHash = ASTFileSignature::create(Hasher.result());
- // Add the remaining bytes (i.e. bytes before the unhashed control block that
- // are not part of the AST block).
+ // Add the remaining bytes:
+ // 1. Before the unhashed control block.
+ Hasher.update(AllBytes.slice(0, UnhashedControlBlockRange.first));
+ // 2. Between the unhashed control block and the AST block.
Hasher.update(
- AllBytes.take_front(ASTBlockBytes.bytes_end() - AllBytes.bytes_begin()));
- Hasher.update(
- AllBytes.take_back(AllBytes.bytes_end() - ASTBlockBytes.bytes_end()));
- Hash = Hasher.result();
- ASTFileSignature Signature = ASTFileSignature::create(Hash);
+ AllBytes.slice(UnhashedControlBlockRange.second, ASTBlockRange.first));
+ // 3. After the AST block.
+ Hasher.update(AllBytes.slice(ASTBlockRange.second, StringRef::npos));
+ ASTFileSignature Signature = ASTFileSignature::create(Hasher.result());
return std::make_pair(ASTBlockHash, Signature);
}
-ASTFileSignature ASTWriter::writeUnhashedControlBlock(Preprocessor &PP,
- ASTContext &Context) {
+ASTFileSignature ASTWriter::backpatchSignature() {
+ if (!WritingModule ||
+ !PP->getHeaderSearchInfo().getHeaderSearchOpts().ModulesHashContent)
+ return {};
+
+ // For implicit modules, write the hash of the PCM as its signature.
+
+ auto BackpatchSignatureAt = [&](const ASTFileSignature &S, uint64_t BitNo) {
+ for (uint8_t Byte : S) {
+ Stream.BackpatchByte(BitNo, Byte);
+ BitNo += 8;
+ }
+ };
+
+ ASTFileSignature ASTBlockHash;
+ ASTFileSignature Signature;
+ std::tie(ASTBlockHash, Signature) = createSignature();
+
+ BackpatchSignatureAt(ASTBlockHash, ASTBlockHashOffset);
+ BackpatchSignatureAt(Signature, SignatureOffset);
+
+ return Signature;
+}
+
+void ASTWriter::writeUnhashedControlBlock(Preprocessor &PP,
+ ASTContext &Context) {
+ using namespace llvm;
+
// Flush first to prepare the PCM hash (signature).
Stream.FlushToWord();
- auto StartOfUnhashedControl = Stream.GetCurrentBitNo() >> 3;
+ UnhashedControlBlockRange.first = Stream.GetCurrentBitNo() >> 3;
// Enter the block and prepare to write records.
RecordData Record;
Stream.EnterSubblock(UNHASHED_CONTROL_BLOCK_ID, 5);
// For implicit modules, write the hash of the PCM as its signature.
- ASTFileSignature Signature;
if (WritingModule &&
PP.getHeaderSearchInfo().getHeaderSearchOpts().ModulesHashContent) {
- ASTFileSignature ASTBlockHash;
- auto ASTBlockStartByte = ASTBlockRange.first >> 3;
- auto ASTBlockByteLength = (ASTBlockRange.second >> 3) - ASTBlockStartByte;
- std::tie(ASTBlockHash, Signature) = createSignature(
- StringRef(Buffer.begin(), StartOfUnhashedControl),
- StringRef(Buffer.begin() + ASTBlockStartByte, ASTBlockByteLength));
-
- Record.append(ASTBlockHash.begin(), ASTBlockHash.end());
- Stream.EmitRecord(AST_BLOCK_HASH, Record);
+ // At this point, we don't know the actual signature of the file or the AST
+ // block - we're only able to compute those at the end of the serialization
+ // process. Let's store dummy signatures for now, and replace them with the
+ // real ones later on.
+ // The bitstream VBR-encodes record elements, which makes backpatching them
+ // really difficult. Let's store the signatures as blobs instead - they are
+ // guaranteed to be word-aligned, and we control their format/encoding.
+ auto Dummy = ASTFileSignature::createDummy();
+ SmallString<128> Blob{Dummy.begin(), Dummy.end()};
+
+ auto Abbrev = std::make_shared<BitCodeAbbrev>();
+ Abbrev->Add(BitCodeAbbrevOp(AST_BLOCK_HASH));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
+ unsigned ASTBlockHashAbbrev = Stream.EmitAbbrev(std::move(Abbrev));
+
+ Abbrev = std::make_shared<BitCodeAbbrev>();
+ Abbrev->Add(BitCodeAbbrevOp(SIGNATURE));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
+ unsigned SignatureAbbrev = Stream.EmitAbbrev(std::move(Abbrev));
+
+ Record.push_back(AST_BLOCK_HASH);
+ Stream.EmitRecordWithBlob(ASTBlockHashAbbrev, Record, Blob);
+ ASTBlockHashOffset = Stream.GetCurrentBitNo() - Blob.size() * 8;
Record.clear();
- Record.append(Signature.begin(), Signature.end());
- Stream.EmitRecord(SIGNATURE, Record);
+
+ Record.push_back(SIGNATURE);
+ Stream.EmitRecordWithBlob(SignatureAbbrev, Record, Blob);
+ SignatureOffset = Stream.GetCurrentBitNo() - Blob.size() * 8;
Record.clear();
}
+ const auto &HSOpts = PP.getHeaderSearchInfo().getHeaderSearchOpts();
+
// Diagnostic options.
const auto &Diags = Context.getDiagnostics();
const DiagnosticOptions &DiagOpts = Diags.getDiagnosticOptions();
+ if (!HSOpts.ModulesSkipDiagnosticOptions) {
#define DIAGOPT(Name, Bits, Default) Record.push_back(DiagOpts.Name);
#define ENUM_DIAGOPT(Name, Type, Bits, Default) \
Record.push_back(static_cast<unsigned>(DiagOpts.get##Name()));
#include "clang/Basic/DiagnosticOptions.def"
- Record.push_back(DiagOpts.Warnings.size());
- for (unsigned I = 0, N = DiagOpts.Warnings.size(); I != N; ++I)
- AddString(DiagOpts.Warnings[I], Record);
- Record.push_back(DiagOpts.Remarks.size());
- for (unsigned I = 0, N = DiagOpts.Remarks.size(); I != N; ++I)
- AddString(DiagOpts.Remarks[I], Record);
- // Note: we don't serialize the log or serialization file names, because they
- // are generally transient files and will almost always be overridden.
- Stream.EmitRecord(DIAGNOSTIC_OPTIONS, Record);
-
- // Write out the diagnostic/pragma mappings.
- WritePragmaDiagnosticMappings(Diags, /* isModule = */ WritingModule);
+ Record.push_back(DiagOpts.Warnings.size());
+ for (unsigned I = 0, N = DiagOpts.Warnings.size(); I != N; ++I)
+ AddString(DiagOpts.Warnings[I], Record);
+ Record.push_back(DiagOpts.Remarks.size());
+ for (unsigned I = 0, N = DiagOpts.Remarks.size(); I != N; ++I)
+ AddString(DiagOpts.Remarks[I], Record);
+ // Note: we don't serialize the log or serialization file names, because
+ // they are generally transient files and will almost always be overridden.
+ Stream.EmitRecord(DIAGNOSTIC_OPTIONS, Record);
+ Record.clear();
+ }
+
+ // Header search paths.
+ if (!HSOpts.ModulesSkipHeaderSearchPaths) {
+ // Include entries.
+ Record.push_back(HSOpts.UserEntries.size());
+ for (unsigned I = 0, N = HSOpts.UserEntries.size(); I != N; ++I) {
+ const HeaderSearchOptions::Entry &Entry = HSOpts.UserEntries[I];
+ AddString(Entry.Path, Record);
+ Record.push_back(static_cast<unsigned>(Entry.Group));
+ Record.push_back(Entry.IsFramework);
+ Record.push_back(Entry.IgnoreSysRoot);
+ }
+
+ // System header prefixes.
+ Record.push_back(HSOpts.SystemHeaderPrefixes.size());
+ for (unsigned I = 0, N = HSOpts.SystemHeaderPrefixes.size(); I != N; ++I) {
+ AddString(HSOpts.SystemHeaderPrefixes[I].Prefix, Record);
+ Record.push_back(HSOpts.SystemHeaderPrefixes[I].IsSystemHeader);
+ }
+
+ // VFS overlay files.
+ Record.push_back(HSOpts.VFSOverlayFiles.size());
+ for (StringRef VFSOverlayFile : HSOpts.VFSOverlayFiles)
+ AddString(VFSOverlayFile, Record);
+
+ Stream.EmitRecord(HEADER_SEARCH_PATHS, Record);
+ }
+
+ if (!HSOpts.ModulesSkipPragmaDiagnosticMappings)
+ WritePragmaDiagnosticMappings(Diags, /* isModule = */ WritingModule);
+
+ // Header search entry usage.
+ auto HSEntryUsage = PP.getHeaderSearchInfo().computeUserEntryUsage();
+ auto Abbrev = std::make_shared<BitCodeAbbrev>();
+ Abbrev->Add(BitCodeAbbrevOp(HEADER_SEARCH_ENTRY_USAGE));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // Number of bits.
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Bit vector.
+ unsigned HSUsageAbbrevCode = Stream.EmitAbbrev(std::move(Abbrev));
+ {
+ RecordData::value_type Record[] = {HEADER_SEARCH_ENTRY_USAGE,
+ HSEntryUsage.size()};
+ Stream.EmitRecordWithBlob(HSUsageAbbrevCode, Record, bytes(HSEntryUsage));
+ }
// Leave the options block.
Stream.ExitBlock();
- return Signature;
+ UnhashedControlBlockRange.second = Stream.GetCurrentBitNo() >> 3;
}
/// Write the control block.
void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context,
- StringRef isysroot,
- const std::string &OutputFile) {
+ StringRef isysroot) {
using namespace llvm;
Stream.EnterSubblock(CONTROL_BLOCK_ID, 5);
@@ -1119,6 +1298,8 @@ void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context,
MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // Clang maj.
MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // Clang min.
MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Relocatable
+ // Standard C++ module
+ MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1));
MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Timestamps
MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Errors
MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // SVN branch/tag
@@ -1126,15 +1307,15 @@ void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context,
assert((!WritingModule || isysroot.empty()) &&
"writing module as a relocatable PCH?");
{
- RecordData::value_type Record[] = {
- METADATA,
- VERSION_MAJOR,
- VERSION_MINOR,
- CLANG_VERSION_MAJOR,
- CLANG_VERSION_MINOR,
- !isysroot.empty(),
- IncludeTimestamps,
- ASTHasCompilerErrors};
+ RecordData::value_type Record[] = {METADATA,
+ VERSION_MAJOR,
+ VERSION_MINOR,
+ CLANG_VERSION_MAJOR,
+ CLANG_VERSION_MINOR,
+ !isysroot.empty(),
+ isWritingStdCXXNamedModules(),
+ IncludeTimestamps,
+ ASTHasCompilerErrors};
Stream.EmitRecordWithBlob(MetadataAbbrevCode, Record,
getClangFullRepositoryVersion());
}
@@ -1150,16 +1331,26 @@ void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context,
}
if (WritingModule && WritingModule->Directory) {
- SmallString<128> BaseDir(WritingModule->Directory->getName());
+ SmallString<128> BaseDir;
+ if (PP.getHeaderSearchInfo().getHeaderSearchOpts().ModuleFileHomeIsCwd) {
+ // Use the current working directory as the base path for all inputs.
+ auto CWD =
+ Context.getSourceManager().getFileManager().getOptionalDirectoryRef(
+ ".");
+ BaseDir.assign(CWD->getName());
+ } else {
+ BaseDir.assign(WritingModule->Directory->getName());
+ }
cleanPathForOutput(Context.getSourceManager().getFileManager(), BaseDir);
// If the home of the module is the current working directory, then we
// want to pick up the cwd of the build process loading the module, not
// our cwd, when we load this module.
- if (!PP.getHeaderSearchInfo()
- .getHeaderSearchOpts()
- .ModuleMapFileHomeIsCwd ||
- WritingModule->Directory->getName() != StringRef(".")) {
+ if (!PP.getHeaderSearchInfo().getHeaderSearchOpts().ModuleFileHomeIsCwd &&
+ (!PP.getHeaderSearchInfo()
+ .getHeaderSearchOpts()
+ .ModuleMapFileHomeIsCwd ||
+ WritingModule->Directory->getName() != StringRef("."))) {
// Module directory.
auto Abbrev = std::make_shared<BitCodeAbbrev>();
Abbrev->Add(BitCodeAbbrevOp(MODULE_DIRECTORY));
@@ -1183,7 +1374,8 @@ void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context,
auto &Map = PP.getHeaderSearchInfo().getModuleMap();
AddPath(WritingModule->PresumedModuleMapFile.empty()
- ? Map.getModuleMapFileForUniquing(WritingModule)->getName()
+ ? Map.getModuleMapFileForUniquing(WritingModule)
+ ->getNameAsRequested()
: StringRef(WritingModule->PresumedModuleMapFile),
Record);
@@ -1191,8 +1383,13 @@ void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context,
if (auto *AdditionalModMaps =
Map.getAdditionalModuleMapFiles(WritingModule)) {
Record.push_back(AdditionalModMaps->size());
- for (const FileEntry *F : *AdditionalModMaps)
- AddPath(F->getName(), Record);
+ SmallVector<FileEntryRef, 1> ModMaps(AdditionalModMaps->begin(),
+ AdditionalModMaps->end());
+ llvm::sort(ModMaps, [](FileEntryRef A, FileEntryRef B) {
+ return A.getName() < B.getName();
+ });
+ for (FileEntryRef F : ModMaps)
+ AddPath(F.getName(), Record);
} else {
Record.push_back(0);
}
@@ -1211,18 +1408,23 @@ void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context,
continue;
Record.push_back((unsigned)M.Kind); // FIXME: Stable encoding
+ Record.push_back(M.StandardCXXModule);
AddSourceLocation(M.ImportLoc, Record);
- // If we have calculated signature, there is no need to store
- // the size or timestamp.
- Record.push_back(M.Signature ? 0 : M.File->getSize());
- Record.push_back(M.Signature ? 0 : getTimestampForOutput(M.File));
-
- for (auto I : M.Signature)
- Record.push_back(I);
+ // We don't want to hard code the information about imported modules
+ // in the C++20 named modules.
+ if (!M.StandardCXXModule) {
+ // If we have calculated signature, there is no need to store
+ // the size or timestamp.
+ Record.push_back(M.Signature ? 0 : M.File.getSize());
+ Record.push_back(M.Signature ? 0 : getTimestampForOutput(M.File));
+ llvm::append_range(Record, M.Signature);
+ }
AddString(M.ModuleName, Record);
- AddPath(M.FileName, Record);
+
+ if (!M.StandardCXXModule)
+ AddPath(M.FileName, Record);
}
Stream.EmitRecord(IMPORTS, Record);
}
@@ -1294,27 +1496,10 @@ void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context,
// Header search options.
Record.clear();
- const HeaderSearchOptions &HSOpts
- = PP.getHeaderSearchInfo().getHeaderSearchOpts();
- AddString(HSOpts.Sysroot, Record);
-
- // Include entries.
- Record.push_back(HSOpts.UserEntries.size());
- for (unsigned I = 0, N = HSOpts.UserEntries.size(); I != N; ++I) {
- const HeaderSearchOptions::Entry &Entry = HSOpts.UserEntries[I];
- AddString(Entry.Path, Record);
- Record.push_back(static_cast<unsigned>(Entry.Group));
- Record.push_back(Entry.IsFramework);
- Record.push_back(Entry.IgnoreSysRoot);
- }
-
- // System header prefixes.
- Record.push_back(HSOpts.SystemHeaderPrefixes.size());
- for (unsigned I = 0, N = HSOpts.SystemHeaderPrefixes.size(); I != N; ++I) {
- AddString(HSOpts.SystemHeaderPrefixes[I].Prefix, Record);
- Record.push_back(HSOpts.SystemHeaderPrefixes[I].IsSystemHeader);
- }
+ const HeaderSearchOptions &HSOpts =
+ PP.getHeaderSearchInfo().getHeaderSearchOpts();
+ AddString(HSOpts.Sysroot, Record);
AddString(HSOpts.ResourceDir, Record);
AddString(HSOpts.ModuleCachePath, Record);
AddString(HSOpts.ModuleUserBuildPath, Record);
@@ -1334,11 +1519,19 @@ void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context,
Record.clear();
const PreprocessorOptions &PPOpts = PP.getPreprocessorOpts();
- // Macro definitions.
- Record.push_back(PPOpts.Macros.size());
- for (unsigned I = 0, N = PPOpts.Macros.size(); I != N; ++I) {
- AddString(PPOpts.Macros[I].first, Record);
- Record.push_back(PPOpts.Macros[I].second);
+ // If we're building an implicit module with a context hash, the importer is
+ // guaranteed to have the same macros defined on the command line. Skip
+ // writing them.
+ bool SkipMacros = BuildingImplicitModule && !HSOpts.DisableModuleHash;
+ bool WriteMacros = !SkipMacros;
+ Record.push_back(WriteMacros);
+ if (WriteMacros) {
+ // Macro definitions.
+ Record.push_back(PPOpts.Macros.size());
+ for (unsigned I = 0, N = PPOpts.Macros.size(); I != N; ++I) {
+ AddString(PPOpts.Macros[I].first, Record);
+ Record.push_back(PPOpts.Macros[I].second);
+ }
}
// Includes
@@ -1363,7 +1556,7 @@ void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context,
// Original file name and file ID
SourceManager &SM = Context.getSourceManager();
- if (const FileEntry *MainFile = SM.getFileEntryForID(SM.getMainFileID())) {
+ if (auto MainFile = SM.getFileEntryRefForID(SM.getMainFileID())) {
auto FileAbbrev = std::make_shared<BitCodeAbbrev>();
FileAbbrev->Add(BitCodeAbbrevOp(ORIGINAL_FILE));
FileAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // File ID
@@ -1372,33 +1565,16 @@ void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context,
Record.clear();
Record.push_back(ORIGINAL_FILE);
- Record.push_back(SM.getMainFileID().getOpaqueValue());
+ AddFileID(SM.getMainFileID(), Record);
EmitRecordWithPath(FileAbbrevCode, Record, MainFile->getName());
}
Record.clear();
- Record.push_back(SM.getMainFileID().getOpaqueValue());
+ AddFileID(SM.getMainFileID(), Record);
Stream.EmitRecord(ORIGINAL_FILE_ID, Record);
- // Original PCH directory
- if (!OutputFile.empty() && OutputFile != "-") {
- auto Abbrev = std::make_shared<BitCodeAbbrev>();
- Abbrev->Add(BitCodeAbbrevOp(ORIGINAL_PCH_DIR));
- Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // File name
- unsigned AbbrevCode = Stream.EmitAbbrev(std::move(Abbrev));
-
- SmallString<128> OutputPath(OutputFile);
-
- SM.getFileManager().makeAbsolutePath(OutputPath);
- StringRef origDir = llvm::sys::path::parent_path(OutputPath);
-
- RecordData::value_type Record[] = {ORIGINAL_PCH_DIR};
- Stream.EmitRecordWithBlob(AbbrevCode, Record, origDir);
- }
-
WriteInputFiles(Context.SourceMgr,
- PP.getHeaderSearchInfo().getHeaderSearchOpts(),
- PP.getLangOpts().Modules);
+ PP.getHeaderSearchInfo().getHeaderSearchOpts());
Stream.ExitBlock();
}
@@ -1406,19 +1582,21 @@ namespace {
/// An input file.
struct InputFileEntry {
- const FileEntry *File;
+ FileEntryRef File;
bool IsSystemFile;
bool IsTransient;
bool BufferOverridden;
- bool IsTopLevelModuleMap;
+ bool IsTopLevel;
+ bool IsModuleMap;
uint32_t ContentHash[2];
+
+ InputFileEntry(FileEntryRef File) : File(File) {}
};
} // namespace
void ASTWriter::WriteInputFiles(SourceManager &SourceMgr,
- HeaderSearchOptions &HSOpts,
- bool Modules) {
+ HeaderSearchOptions &HSOpts) {
using namespace llvm;
Stream.EnterSubblock(INPUT_FILES_BLOCK_ID, 4);
@@ -1431,8 +1609,10 @@ void ASTWriter::WriteInputFiles(SourceManager &SourceMgr,
IFAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 32)); // Modification time
IFAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Overridden
IFAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Transient
+ IFAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Top-level
IFAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Module map
- IFAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // File name
+ IFAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 16)); // Name as req. len
+ IFAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Name as req. + name
unsigned IFAbbrevCode = Stream.EmitAbbrev(std::move(IFAbbrev));
// Create input file hash abbreviation.
@@ -1442,9 +1622,11 @@ void ASTWriter::WriteInputFiles(SourceManager &SourceMgr,
IFHAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
unsigned IFHAbbrevCode = Stream.EmitAbbrev(std::move(IFHAbbrev));
- // Get all ContentCache objects for files, sorted by whether the file is a
- // system one or not. System files go at the back, users files at the front.
- std::deque<InputFileEntry> SortedFiles;
+ uint64_t InputFilesOffsetBase = Stream.GetCurrentBitNo();
+
+ // Get all ContentCache objects for files.
+ std::vector<InputFileEntry> UserFiles;
+ std::vector<InputFileEntry> SystemFiles;
for (unsigned I = 1, N = SourceMgr.local_sloc_entry_size(); I != N; ++I) {
// Get this source location entry.
const SrcMgr::SLocEntry *SLoc = &SourceMgr.getLocalSLocEntry(I);
@@ -1458,13 +1640,16 @@ void ASTWriter::WriteInputFiles(SourceManager &SourceMgr,
if (!Cache->OrigEntry)
continue;
- InputFileEntry Entry;
- Entry.File = Cache->OrigEntry;
+ // Do not emit input files that do not affect current module.
+ if (!IsSLocAffecting[I])
+ continue;
+
+ InputFileEntry Entry(*Cache->OrigEntry);
Entry.IsSystemFile = isSystem(File.getFileCharacteristic());
Entry.IsTransient = Cache->IsTransient;
Entry.BufferOverridden = Cache->BufferOverridden;
- Entry.IsTopLevelModuleMap = isModuleMap(File.getFileCharacteristic()) &&
- File.getIncludeLoc().isInvalid();
+ Entry.IsTopLevel = File.getIncludeLoc().isInvalid();
+ Entry.IsModuleMap = isModuleMap(File.getFileCharacteristic());
auto ContentHash = hash_code(-1);
if (PP->getHeaderSearchInfo()
@@ -1474,9 +1659,8 @@ void ASTWriter::WriteInputFiles(SourceManager &SourceMgr,
if (MemBuff)
ContentHash = hash_value(MemBuff->getBuffer());
else
- // FIXME: The path should be taken from the FileEntryRef.
PP->Diag(SourceLocation(), diag::err_module_unable_to_hash_content)
- << Entry.File->getName();
+ << Entry.File.getName();
}
auto CH = llvm::APInt(64, ContentHash);
Entry.ContentHash[0] =
@@ -1485,11 +1669,15 @@ void ASTWriter::WriteInputFiles(SourceManager &SourceMgr,
static_cast<uint32_t>(CH.getHiBits(32).getZExtValue());
if (Entry.IsSystemFile)
- SortedFiles.push_back(Entry);
+ SystemFiles.push_back(Entry);
else
- SortedFiles.push_front(Entry);
+ UserFiles.push_back(Entry);
}
+ // User files go at the front, system files at the back.
+ auto SortedFiles = llvm::concat<InputFileEntry>(std::move(UserFiles),
+ std::move(SystemFiles));
+
unsigned UserFilesNum = 0;
// Write out all of the input files.
std::vector<uint64_t> InputFileOffsets;
@@ -1499,7 +1687,7 @@ void ASTWriter::WriteInputFiles(SourceManager &SourceMgr,
continue; // already recorded this file.
// Record this entry's offset.
- InputFileOffsets.push_back(Stream.GetCurrentBitNo());
+ InputFileOffsets.push_back(Stream.GetCurrentBitNo() - InputFilesOffsetBase);
InputFileID = InputFileOffsets.size();
@@ -1509,17 +1697,28 @@ void ASTWriter::WriteInputFiles(SourceManager &SourceMgr,
// Emit size/modification time for this file.
// And whether this file was overridden.
{
+ SmallString<128> NameAsRequested = Entry.File.getNameAsRequested();
+ SmallString<128> Name = Entry.File.getName();
+
+ PreparePathForOutput(NameAsRequested);
+ PreparePathForOutput(Name);
+
+ if (Name == NameAsRequested)
+ Name.clear();
+
RecordData::value_type Record[] = {
INPUT_FILE,
InputFileOffsets.size(),
- (uint64_t)Entry.File->getSize(),
+ (uint64_t)Entry.File.getSize(),
(uint64_t)getTimestampForOutput(Entry.File),
Entry.BufferOverridden,
Entry.IsTransient,
- Entry.IsTopLevelModuleMap};
+ Entry.IsTopLevel,
+ Entry.IsModuleMap,
+ NameAsRequested.size()};
- // FIXME: The path should be taken from the FileEntryRef.
- EmitRecordWithPath(IFAbbrevCode, Record, Entry.File->getName());
+ Stream.EmitRecordWithBlob(IFAbbrevCode, Record,
+ (NameAsRequested + Name).str());
}
// Emit content hash for this file.
@@ -1609,8 +1808,8 @@ static unsigned CreateSLocExpansionAbbrev(llvm::BitstreamWriter &Stream) {
Abbrev->Add(BitCodeAbbrevOp(SM_SLOC_EXPANSION_ENTRY));
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // Offset
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // Spelling location
- Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // Start location
- Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // End location
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Start location
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // End location
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Is token range
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Token length
return Stream.EmitAbbrev(std::move(Abbrev));
@@ -1650,6 +1849,7 @@ namespace {
struct data_type {
const HeaderFileInfo &HFI;
+ bool AlreadyIncluded;
ArrayRef<ModuleMap::KnownHeader> KnownHeaders;
UnresolvedModule Unresolved;
};
@@ -1668,7 +1868,7 @@ namespace {
std::pair<unsigned, unsigned>
EmitKeyDataLength(raw_ostream& Out, key_type_ref key, data_type_ref Data) {
unsigned KeyLen = key.Filename.size() + 1 + 8 + 8;
- unsigned DataLen = 1 + 2 + 4 + 4;
+ unsigned DataLen = 1 + 4 + 4;
for (auto ModInfo : Data.KnownHeaders)
if (Writer.getLocalOrImportedSubmoduleID(ModInfo.getModule()))
DataLen += 4;
@@ -1680,7 +1880,7 @@ namespace {
void EmitKey(raw_ostream& Out, key_type_ref key, unsigned KeyLen) {
using namespace llvm::support;
- endian::Writer LE(Out, little);
+ endian::Writer LE(Out, llvm::endianness::little);
LE.write<uint64_t>(key.Size);
KeyLen -= 8;
LE.write<uint64_t>(key.ModTime);
@@ -1692,15 +1892,16 @@ namespace {
data_type_ref Data, unsigned DataLen) {
using namespace llvm::support;
- endian::Writer LE(Out, little);
+ endian::Writer LE(Out, llvm::endianness::little);
uint64_t Start = Out.tell(); (void)Start;
- unsigned char Flags = (Data.HFI.isImport << 5)
- | (Data.HFI.isPragmaOnce << 4)
+ unsigned char Flags = (Data.AlreadyIncluded << 6)
+ | (Data.HFI.isImport << 5)
+ | (Writer.isWritingStdCXXNamedModules() ? 0 :
+ Data.HFI.isPragmaOnce << 4)
| (Data.HFI.DirInfo << 1)
| Data.HFI.IndexHeaderMapHeader;
LE.write<uint8_t>(Flags);
- LE.write<uint16_t>(Data.HFI.NumIncludes);
if (!Data.HFI.ControllingMacro)
LE.write<uint32_t>(Data.HFI.ControllingMacroID);
@@ -1725,14 +1926,12 @@ namespace {
auto EmitModule = [&](Module *M, ModuleMap::ModuleHeaderRole Role) {
if (uint32_t ModID = Writer.getLocalOrImportedSubmoduleID(M)) {
- uint32_t Value = (ModID << 2) | (unsigned)Role;
- assert((Value >> 2) == ModID && "overflow in header module info");
+ uint32_t Value = (ModID << 3) | (unsigned)Role;
+ assert((Value >> 3) == ModID && "overflow in header module info");
LE.write<uint32_t>(Value);
}
};
- // FIXME: If the header is excluded, we should write out some
- // record of that fact.
for (auto ModInfo : Data.KnownHeaders)
EmitModule(ModInfo.getModule(), ModInfo.getRole());
if (Data.Unresolved.getPointer())
@@ -1774,17 +1973,17 @@ void ASTWriter::WriteHeaderSearch(const HeaderSearch &HS) {
// headers list when emitting resolved headers in the first loop below.
// FIXME: It'd be preferable to avoid doing this if we were given
// sufficient stat information in the module map.
- HS.getModuleMap().resolveHeaderDirectives(M);
+ HS.getModuleMap().resolveHeaderDirectives(M, /*File=*/std::nullopt);
// If the file didn't exist, we can still create a module if we were given
// enough information in the module map.
- for (auto U : M->MissingHeaders) {
+ for (const auto &U : M->MissingHeaders) {
// Check that we were given enough information to build a module
// without this file existing on disk.
if (!U.Size || (!U.ModTime && IncludeTimestamps)) {
PP->Diag(U.FileNameLoc, diag::err_module_no_size_mtime_for_header)
- << WritingModule->getFullModuleName() << U.Size.hasValue()
- << U.FileName;
+ << WritingModule->getFullModuleName() << U.Size.has_value()
+ << U.FileName;
continue;
}
@@ -1797,29 +1996,27 @@ void ASTWriter::WriteHeaderSearch(const HeaderSearch &HS) {
SavedStrings.push_back(FilenameDup.data());
HeaderFileInfoTrait::key_type Key = {
- FilenameDup, *U.Size, IncludeTimestamps ? *U.ModTime : 0
- };
+ FilenameDup, *U.Size, IncludeTimestamps ? *U.ModTime : 0};
HeaderFileInfoTrait::data_type Data = {
- Empty, {}, {M, ModuleMap::headerKindToRole(U.Kind)}
- };
+ Empty, false, {}, {M, ModuleMap::headerKindToRole(U.Kind)}};
// FIXME: Deal with cases where there are multiple unresolved header
// directives in different submodules for the same header.
Generator.insert(Key, Data, GeneratorTrait);
++NumHeaderSearchEntries;
}
-
- Worklist.append(M->submodule_begin(), M->submodule_end());
+ auto SubmodulesRange = M->submodules();
+ Worklist.append(SubmodulesRange.begin(), SubmodulesRange.end());
}
}
- SmallVector<const FileEntry *, 16> FilesByUID;
+ SmallVector<OptionalFileEntryRef, 16> FilesByUID;
HS.getFileMgr().GetUniqueIDMapping(FilesByUID);
if (FilesByUID.size() > HS.header_file_size())
FilesByUID.resize(HS.header_file_size());
for (unsigned UID = 0, LastUID = FilesByUID.size(); UID != LastUID; ++UID) {
- const FileEntry *File = FilesByUID[UID];
+ OptionalFileEntryRef File = FilesByUID[UID];
if (!File)
continue;
@@ -1830,7 +2027,7 @@ void ASTWriter::WriteHeaderSearch(const HeaderSearch &HS) {
// from a different module; in that case, we rely on the module(s)
// containing the header to provide this information.
const HeaderFileInfo *HFI =
- HS.getExistingFileInfo(File, /*WantExternal*/!Chain);
+ HS.getExistingFileInfo(*File, /*WantExternal*/!Chain);
if (!HFI || (HFI->isModuleHeader && !HFI->isCompilingModuleHeader))
continue;
@@ -1844,11 +2041,13 @@ void ASTWriter::WriteHeaderSearch(const HeaderSearch &HS) {
SavedStrings.push_back(Filename.data());
}
+ bool Included = PP->alreadyIncluded(*File);
+
HeaderFileInfoTrait::key_type Key = {
- Filename, File->getSize(), getTimestampForOutput(File)
+ Filename, File->getSize(), getTimestampForOutput(*File)
};
HeaderFileInfoTrait::data_type Data = {
- *HFI, HS.getModuleMap().findResolvedModulesForHeader(File), {}
+ *HFI, Included, HS.getModuleMap().findResolvedModulesForHeader(*File), {}
};
Generator.insert(Key, Data, GeneratorTrait);
++NumHeaderSearchEntries;
@@ -1862,7 +2061,7 @@ void ASTWriter::WriteHeaderSearch(const HeaderSearch &HS) {
llvm::raw_svector_ostream Out(TableData);
// Make sure that no bucket is at offset 0
- endian::write<uint32_t>(Out, 0, little);
+ endian::write<uint32_t>(Out, 0, llvm::endianness::little);
BucketOffset = Generator.Emit(Out, GeneratorTrait);
}
@@ -1895,17 +2094,22 @@ static void emitBlob(llvm::BitstreamWriter &Stream, StringRef Blob,
// Compress the buffer if possible. We expect that almost all PCM
// consumers will not want its contents.
- SmallString<0> CompressedBuffer;
- if (llvm::zlib::isAvailable()) {
- llvm::Error E = llvm::zlib::compress(Blob.drop_back(1), CompressedBuffer);
- if (!E) {
- RecordDataType Record[] = {SM_SLOC_BUFFER_BLOB_COMPRESSED,
- Blob.size() - 1};
- Stream.EmitRecordWithBlob(SLocBufferBlobCompressedAbbrv, Record,
- CompressedBuffer);
- return;
- }
- llvm::consumeError(std::move(E));
+ SmallVector<uint8_t, 0> CompressedBuffer;
+ if (llvm::compression::zstd::isAvailable()) {
+ llvm::compression::zstd::compress(
+ llvm::arrayRefFromStringRef(Blob.drop_back(1)), CompressedBuffer, 9);
+ RecordDataType Record[] = {SM_SLOC_BUFFER_BLOB_COMPRESSED, Blob.size() - 1};
+ Stream.EmitRecordWithBlob(SLocBufferBlobCompressedAbbrv, Record,
+ llvm::toStringRef(CompressedBuffer));
+ return;
+ }
+ if (llvm::compression::zlib::isAvailable()) {
+ llvm::compression::zlib::compress(
+ llvm::arrayRefFromStringRef(Blob.drop_back(1)), CompressedBuffer);
+ RecordDataType Record[] = {SM_SLOC_BUFFER_BLOB_COMPRESSED, Blob.size() - 1};
+ Stream.EmitRecordWithBlob(SLocBufferBlobCompressedAbbrv, Record,
+ llvm::toStringRef(CompressedBuffer));
+ return;
}
RecordDataType Record[] = {SM_SLOC_BUFFER_BLOB};
@@ -1940,7 +2144,6 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr,
// entry, which is always the same dummy entry.
std::vector<uint32_t> SLocEntryOffsets;
uint64_t SLocEntryOffsetsBase = Stream.GetCurrentBitNo();
- RecordData PreloadSLocs;
SLocEntryOffsets.reserve(SourceMgr.local_sloc_entry_size() - 1);
for (unsigned I = 1, N = SourceMgr.local_sloc_entry_size();
I != N; ++I) {
@@ -1952,7 +2155,6 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr,
// Record the offset of this source-location entry.
uint64_t Offset = Stream.GetCurrentBitNo() - SLocEntryOffsetsBase;
assert((Offset >> 32) == 0 && "SLocEntry offset too large");
- SLocEntryOffsets.push_back(Offset);
// Figure out which record code to use.
unsigned Code;
@@ -1967,25 +2169,29 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr,
Record.clear();
Record.push_back(Code);
- // Starting offset of this entry within this module, so skip the dummy.
- Record.push_back(SLoc->getOffset() - 2);
if (SLoc->isFile()) {
const SrcMgr::FileInfo &File = SLoc->getFile();
+ const SrcMgr::ContentCache *Content = &File.getContentCache();
+ // Do not emit files that were not listed as inputs.
+ if (!IsSLocAffecting[I])
+ continue;
+ SLocEntryOffsets.push_back(Offset);
+ // Starting offset of this entry within this module, so skip the dummy.
+ Record.push_back(getAdjustedOffset(SLoc->getOffset()) - 2);
AddSourceLocation(File.getIncludeLoc(), Record);
Record.push_back(File.getFileCharacteristic()); // FIXME: stable encoding
Record.push_back(File.hasLineDirectives());
- const SrcMgr::ContentCache *Content = &File.getContentCache();
bool EmitBlob = false;
if (Content->OrigEntry) {
assert(Content->OrigEntry == Content->ContentsEntry &&
"Writing to AST an overridden file is not supported");
// The source location entry is a file. Emit input file ID.
- assert(InputFileIDs[Content->OrigEntry] != 0 && "Missed file entry");
- Record.push_back(InputFileIDs[Content->OrigEntry]);
+ assert(InputFileIDs[*Content->OrigEntry] != 0 && "Missed file entry");
+ Record.push_back(InputFileIDs[*Content->OrigEntry]);
- Record.push_back(File.NumCreatedFIDs);
+ Record.push_back(getAdjustedNumCreatedFIDs(FID));
FileDeclIDsTy::iterator FDI = FileDeclIDs.find(FID);
if (FDI != FileDeclIDs.end()) {
@@ -2007,21 +2213,18 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr,
// We add one to the size so that we capture the trailing NULL
// that is required by llvm::MemoryBuffer::getMemBuffer (on
// the reader side).
- llvm::Optional<llvm::MemoryBufferRef> Buffer =
+ std::optional<llvm::MemoryBufferRef> Buffer =
Content->getBufferOrNone(PP.getDiagnostics(), PP.getFileManager());
StringRef Name = Buffer ? Buffer->getBufferIdentifier() : "";
Stream.EmitRecordWithBlob(SLocBufferAbbrv, Record,
StringRef(Name.data(), Name.size() + 1));
EmitBlob = true;
-
- if (Name == "<built-in>")
- PreloadSLocs.push_back(SLocEntryOffsets.size());
}
if (EmitBlob) {
// Include the implicit terminating null character in the on-disk buffer
// if we're writing it uncompressed.
- llvm::Optional<llvm::MemoryBufferRef> Buffer =
+ std::optional<llvm::MemoryBufferRef> Buffer =
Content->getBufferOrNone(PP.getDiagnostics(), PP.getFileManager());
if (!Buffer)
Buffer = llvm::MemoryBufferRef("<<<INVALID BUFFER>>>", "");
@@ -2032,19 +2235,23 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr,
} else {
// The source location entry is a macro expansion.
const SrcMgr::ExpansionInfo &Expansion = SLoc->getExpansion();
- AddSourceLocation(Expansion.getSpellingLoc(), Record);
- AddSourceLocation(Expansion.getExpansionLocStart(), Record);
+ SLocEntryOffsets.push_back(Offset);
+ // Starting offset of this entry within this module, so skip the dummy.
+ Record.push_back(getAdjustedOffset(SLoc->getOffset()) - 2);
+ LocSeq::State Seq;
+ AddSourceLocation(Expansion.getSpellingLoc(), Record, Seq);
+ AddSourceLocation(Expansion.getExpansionLocStart(), Record, Seq);
AddSourceLocation(Expansion.isMacroArgExpansion()
? SourceLocation()
: Expansion.getExpansionLocEnd(),
- Record);
+ Record, Seq);
Record.push_back(Expansion.isExpansionTokenRange());
// Compute the token length for this macro expansion.
SourceLocation::UIntTy NextOffset = SourceMgr.getNextLocalOffset();
if (I + 1 != N)
NextOffset = SourceMgr.getLocalSLocEntry(I + 1).getOffset();
- Record.push_back(NextOffset - SLoc->getOffset() - 1);
+ Record.push_back(getAdjustedOffset(NextOffset - SLoc->getOffset()) - 1);
Stream.EmitRecordWithAbbrev(SLocExpansionAbbrv, Record);
}
}
@@ -2068,14 +2275,11 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr,
{
RecordData::value_type Record[] = {
SOURCE_LOCATION_OFFSETS, SLocEntryOffsets.size(),
- SourceMgr.getNextLocalOffset() - 1 /* skip dummy */,
+ getAdjustedOffset(SourceMgr.getNextLocalOffset()) - 1 /* skip dummy */,
SLocEntryOffsetsBase - SourceManagerBlockOffset};
Stream.EmitRecordWithBlob(SLocOffsetsAbbrev, Record,
bytes(SLocEntryOffsets));
}
- // Write the source location entry preloads array, telling the AST
- // reader which source locations entries it should load eagerly.
- Stream.EmitRecord(SOURCE_LOCATION_PRELOADS, PreloadSLocs);
// Write the line table. It depends on remapping working, so it must come
// after the source location offsets.
@@ -2104,8 +2308,7 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr,
if (L.first.ID < 0)
continue;
- // Emit the file ID
- Record.push_back(L.first.ID);
+ AddFileID(L.first, Record);
// Emit the line entries
Record.push_back(L.second.size());
@@ -2161,10 +2364,21 @@ void ASTWriter::WritePreprocessor(const Preprocessor &PP, bool IsModule) {
Stream.EmitRecord(PP_COUNTER_VALUE, Record);
}
+ // If we have a recorded #pragma assume_nonnull, remember it so it can be
+ // replayed when the preamble terminates into the main file.
+ SourceLocation AssumeNonNullLoc =
+ PP.getPreambleRecordedPragmaAssumeNonNullLoc();
+ if (AssumeNonNullLoc.isValid()) {
+ assert(PP.isRecordingPreamble());
+ AddSourceLocation(AssumeNonNullLoc, Record);
+ Stream.EmitRecord(PP_ASSUME_NONNULL_LOC, Record);
+ Record.clear();
+ }
+
if (PP.isRecordingPreamble() && PP.hasRecordedPreamble()) {
assert(!IsModule);
auto SkipInfo = PP.getPreambleSkipInfo();
- if (SkipInfo.hasValue()) {
+ if (SkipInfo) {
Record.push_back(true);
AddSourceLocation(SkipInfo->HashTokenLoc, Record);
AddSourceLocation(SkipInfo->IfTokenLoc, Record);
@@ -2198,11 +2412,14 @@ void ASTWriter::WritePreprocessor(const Preprocessor &PP, bool IsModule) {
// Construct the list of identifiers with macro directives that need to be
// serialized.
SmallVector<const IdentifierInfo *, 128> MacroIdentifiers;
- for (auto &Id : PP.getIdentifierTable())
- if (Id.second->hadMacroDefinition() &&
- (!Id.second->isFromAST() ||
- Id.second->hasChangedSinceDeserialization()))
- MacroIdentifiers.push_back(Id.second);
+ // It is meaningless to emit macros for named modules. It only wastes times
+ // and spaces.
+ if (!isWritingStdCXXNamedModules())
+ for (auto &Id : PP.getIdentifierTable())
+ if (Id.second->hadMacroDefinition() &&
+ (!Id.second->isFromAST() ||
+ Id.second->hasChangedSinceDeserialization()))
+ MacroIdentifiers.push_back(Id.second);
// Sort the set of macro definitions that need to be serialized by the
// name of the macro, to provide a stable ordering.
llvm::sort(MacroIdentifiers, llvm::deref<std::less<>>());
@@ -2214,13 +2431,22 @@ void ASTWriter::WritePreprocessor(const Preprocessor &PP, bool IsModule) {
uint64_t StartOffset = Stream.GetCurrentBitNo() - MacroOffsetsBase;
assert((StartOffset >> 32) == 0 && "Macro identifiers offset too large");
- // Emit the macro directives in reverse source order.
- for (; MD; MD = MD->getPrevious()) {
- // Once we hit an ignored macro, we're done: the rest of the chain
- // will all be ignored macros.
- if (shouldIgnoreMacro(MD, IsModule, PP))
- break;
-
+ // Write out any exported module macros.
+ bool EmittedModuleMacros = false;
+ // C+=20 Header Units are compiled module interfaces, but they preserve
+ // macros that are live (i.e. have a defined value) at the end of the
+ // compilation. So when writing a header unit, we preserve only the final
+ // value of each macro (and discard any that are undefined). Header units
+ // do not have sub-modules (although they might import other header units).
+ // PCH files, conversely, retain the history of each macro's define/undef
+ // and of leaf macros in sub modules.
+ if (IsModule && WritingModule->isHeaderUnit()) {
+ // This is for the main TU when it is a C++20 header unit.
+ // We preserve the final state of defined macros, and we do not emit ones
+ // that are undefined.
+ if (!MD || shouldIgnoreMacro(MD, IsModule, PP) ||
+ MD->getKind() == MacroDirective::MD_Undefine)
+ continue;
AddSourceLocation(MD->getLocation(), Record);
Record.push_back(MD->getKind());
if (auto *DefMD = dyn_cast<DefMacroDirective>(MD)) {
@@ -2228,35 +2454,51 @@ void ASTWriter::WritePreprocessor(const Preprocessor &PP, bool IsModule) {
} else if (auto *VisMD = dyn_cast<VisibilityMacroDirective>(MD)) {
Record.push_back(VisMD->isPublic());
}
- }
+ ModuleMacroRecord.push_back(getSubmoduleID(WritingModule));
+ ModuleMacroRecord.push_back(getMacroRef(MD->getMacroInfo(), Name));
+ Stream.EmitRecord(PP_MODULE_MACRO, ModuleMacroRecord);
+ ModuleMacroRecord.clear();
+ EmittedModuleMacros = true;
+ } else {
+ // Emit the macro directives in reverse source order.
+ for (; MD; MD = MD->getPrevious()) {
+ // Once we hit an ignored macro, we're done: the rest of the chain
+ // will all be ignored macros.
+ if (shouldIgnoreMacro(MD, IsModule, PP))
+ break;
+ AddSourceLocation(MD->getLocation(), Record);
+ Record.push_back(MD->getKind());
+ if (auto *DefMD = dyn_cast<DefMacroDirective>(MD)) {
+ Record.push_back(getMacroRef(DefMD->getInfo(), Name));
+ } else if (auto *VisMD = dyn_cast<VisibilityMacroDirective>(MD)) {
+ Record.push_back(VisMD->isPublic());
+ }
+ }
- // Write out any exported module macros.
- bool EmittedModuleMacros = false;
- // We write out exported module macros for PCH as well.
- auto Leafs = PP.getLeafModuleMacros(Name);
- SmallVector<ModuleMacro*, 8> Worklist(Leafs.begin(), Leafs.end());
- llvm::DenseMap<ModuleMacro*, unsigned> Visits;
- while (!Worklist.empty()) {
- auto *Macro = Worklist.pop_back_val();
+ // We write out exported module macros for PCH as well.
+ auto Leafs = PP.getLeafModuleMacros(Name);
+ SmallVector<ModuleMacro *, 8> Worklist(Leafs.begin(), Leafs.end());
+ llvm::DenseMap<ModuleMacro *, unsigned> Visits;
+ while (!Worklist.empty()) {
+ auto *Macro = Worklist.pop_back_val();
- // Emit a record indicating this submodule exports this macro.
- ModuleMacroRecord.push_back(
- getSubmoduleID(Macro->getOwningModule()));
- ModuleMacroRecord.push_back(getMacroRef(Macro->getMacroInfo(), Name));
- for (auto *M : Macro->overrides())
- ModuleMacroRecord.push_back(getSubmoduleID(M->getOwningModule()));
+ // Emit a record indicating this submodule exports this macro.
+ ModuleMacroRecord.push_back(getSubmoduleID(Macro->getOwningModule()));
+ ModuleMacroRecord.push_back(getMacroRef(Macro->getMacroInfo(), Name));
+ for (auto *M : Macro->overrides())
+ ModuleMacroRecord.push_back(getSubmoduleID(M->getOwningModule()));
- Stream.EmitRecord(PP_MODULE_MACRO, ModuleMacroRecord);
- ModuleMacroRecord.clear();
+ Stream.EmitRecord(PP_MODULE_MACRO, ModuleMacroRecord);
+ ModuleMacroRecord.clear();
- // Enqueue overridden macros once we've visited all their ancestors.
- for (auto *M : Macro->overrides())
- if (++Visits[M] == M->getNumOverridingMacros())
- Worklist.push_back(M);
+ // Enqueue overridden macros once we've visited all their ancestors.
+ for (auto *M : Macro->overrides())
+ if (++Visits[M] == M->getNumOverridingMacros())
+ Worklist.push_back(M);
- EmittedModuleMacros = true;
+ EmittedModuleMacros = true;
+ }
}
-
if (Record.empty() && !EmittedModuleMacros)
continue;
@@ -2297,6 +2539,7 @@ void ASTWriter::WritePreprocessor(const Preprocessor &PP, bool IsModule) {
AddSourceLocation(MI->getDefinitionEndLoc(), Record);
Record.push_back(MI->isUsed());
Record.push_back(MI->isUsedForHeaderGuard());
+ Record.push_back(MI->getNumTokens());
unsigned Code;
if (MI->isObjectLike()) {
Code = PP_MACRO_OBJECT_LIKE;
@@ -2394,7 +2637,7 @@ void ASTWriter::WritePreprocessorDetail(PreprocessingRecord &PPRec,
uint64_t Offset = Stream.GetCurrentBitNo() - MacroOffsetsBase;
assert((Offset >> 32) == 0 && "Preprocessed entity offset too large");
PreprocessedEntityOffsets.push_back(
- PPEntityOffset((*E)->getSourceRange(), Offset));
+ PPEntityOffset(getAdjustedRange((*E)->getSourceRange()), Offset));
if (auto *MD = dyn_cast<MacroDefinitionRecord>(*E)) {
// Record this macro definition's ID.
@@ -2494,12 +2737,12 @@ unsigned ASTWriter::getLocalOrImportedSubmoduleID(const Module *Mod) {
}
unsigned ASTWriter::getSubmoduleID(Module *Mod) {
+ unsigned ID = getLocalOrImportedSubmoduleID(Mod);
// FIXME: This can easily happen, if we have a reference to a submodule that
// did not result in us loading a module file for that submodule. For
// instance, a cross-top-level-module 'conflict' declaration will hit this.
- unsigned ID = getLocalOrImportedSubmoduleID(Mod);
- assert((ID || !Mod) &&
- "asked for module ID for non-local, non-imported module");
+ // assert((ID || !Mod) &&
+ // "asked for module ID for non-local, non-imported module");
return ID;
}
@@ -2507,9 +2750,8 @@ unsigned ASTWriter::getSubmoduleID(Module *Mod) {
/// given module).
static unsigned getNumberOfModules(Module *Mod) {
unsigned ChildModules = 0;
- for (auto Sub = Mod->submodule_begin(), SubEnd = Mod->submodule_end();
- Sub != SubEnd; ++Sub)
- ChildModules += getNumberOfModules(*Sub);
+ for (auto *Submodule : Mod->submodules())
+ ChildModules += getNumberOfModules(Submodule);
return ChildModules + 1;
}
@@ -2525,7 +2767,8 @@ void ASTWriter::WriteSubmodules(Module *WritingModule) {
Abbrev->Add(BitCodeAbbrevOp(SUBMODULE_DEFINITION));
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // ID
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Parent
- Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // Kind
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 4)); // Kind
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // Definition location
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsFramework
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsExplicit
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsSystem
@@ -2535,6 +2778,7 @@ void ASTWriter::WriteSubmodules(Module *WritingModule) {
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // InferExportWild...
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // ConfigMacrosExh...
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // ModuleMapIsPriv...
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // NamedModuleHasN...
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Name
unsigned DefinitionAbbrev = Stream.EmitAbbrev(std::move(Abbrev));
@@ -2626,12 +2870,16 @@ void ASTWriter::WriteSubmodules(Module *WritingModule) {
ParentID = SubmoduleIDs[Mod->Parent];
}
+ uint64_t DefinitionLoc =
+ SourceLocationEncoding::encode(getAdjustedLocation(Mod->DefinitionLoc));
+
// Emit the definition of the block.
{
RecordData::value_type Record[] = {SUBMODULE_DEFINITION,
ID,
ParentID,
(RecordData::value_type)Mod->Kind,
+ DefinitionLoc,
Mod->IsFramework,
Mod->IsExplicit,
Mod->IsSystem,
@@ -2640,7 +2888,8 @@ void ASTWriter::WriteSubmodules(Module *WritingModule) {
Mod->InferExplicitSubmodules,
Mod->InferExportWildcard,
Mod->ConfigMacrosExhaustive,
- Mod->ModuleMapIsPrivate};
+ Mod->ModuleMapIsPrivate,
+ Mod->NamedModuleHasInit};
Stream.EmitRecordWithBlob(DefinitionAbbrev, Record, Mod->Name);
}
@@ -2651,14 +2900,16 @@ void ASTWriter::WriteSubmodules(Module *WritingModule) {
}
// Emit the umbrella header, if there is one.
- if (auto UmbrellaHeader = Mod->getUmbrellaHeader()) {
+ if (std::optional<Module::Header> UmbrellaHeader =
+ Mod->getUmbrellaHeaderAsWritten()) {
RecordData::value_type Record[] = {SUBMODULE_UMBRELLA_HEADER};
Stream.EmitRecordWithBlob(UmbrellaAbbrev, Record,
- UmbrellaHeader.NameAsWritten);
- } else if (auto UmbrellaDir = Mod->getUmbrellaDir()) {
+ UmbrellaHeader->NameAsWritten);
+ } else if (std::optional<Module::DirectoryName> UmbrellaDir =
+ Mod->getUmbrellaDirAsWritten()) {
RecordData::value_type Record[] = {SUBMODULE_UMBRELLA_DIR};
Stream.EmitRecordWithBlob(UmbrellaDirAbbrev, Record,
- UmbrellaDir.NameAsWritten);
+ UmbrellaDir->NameAsWritten);
}
// Emit the headers.
@@ -2682,10 +2933,12 @@ void ASTWriter::WriteSubmodules(Module *WritingModule) {
// Emit the top headers.
{
- auto TopHeaders = Mod->getTopHeaders(PP->getFileManager());
RecordData::value_type Record[] = {SUBMODULE_TOPHEADER};
- for (auto *H : TopHeaders)
- Stream.EmitRecordWithBlob(TopHeaderAbbrev, Record, H->getName());
+ for (FileEntryRef H : Mod->getTopHeaders(PP->getFileManager())) {
+ SmallString<128> HeaderName(H.getName());
+ PreparePathForOutput(HeaderName);
+ Stream.EmitRecordWithBlob(TopHeaderAbbrev, Record, HeaderName);
+ }
}
// Emit the imports.
@@ -2696,6 +2949,14 @@ void ASTWriter::WriteSubmodules(Module *WritingModule) {
Stream.EmitRecord(SUBMODULE_IMPORTS, Record);
}
+ // Emit the modules affecting compilation that were not imported.
+ if (!Mod->AffectingClangModules.empty()) {
+ RecordData Record;
+ for (auto *I : Mod->AffectingClangModules)
+ Record.push_back(getSubmoduleID(I));
+ Stream.EmitRecord(SUBMODULE_AFFECTING_MODULES, Record);
+ }
+
// Emit the exports.
if (!Mod->Exports.empty()) {
RecordData Record;
@@ -2712,6 +2973,8 @@ void ASTWriter::WriteSubmodules(Module *WritingModule) {
// Might be unnecessary as use declarations are only used to build the
// module itself.
+ // TODO: Consider serializing undeclared uses of modules.
+
// Emit the link libraries.
for (const auto &LL : Mod->LinkLibraries) {
RecordData::value_type Record[] = {SUBMODULE_LINK_LIBRARY,
@@ -2788,20 +3051,41 @@ void ASTWriter::WritePragmaDiagnosticMappings(const DiagnosticsEngine &Diag,
assert(Flags == EncodeDiagStateFlags(State) &&
"diag state flags vary in single AST file");
+ // If we ever serialize non-pragma mappings outside the initial state, the
+ // code below will need to consider more than getDefaultMapping.
+ assert(!IncludeNonPragmaStates ||
+ State == Diag.DiagStatesByLoc.FirstDiagState);
+
unsigned &DiagStateID = DiagStateIDMap[State];
Record.push_back(DiagStateID);
if (DiagStateID == 0) {
DiagStateID = ++CurrID;
+ SmallVector<std::pair<unsigned, DiagnosticMapping>> Mappings;
// Add a placeholder for the number of mappings.
auto SizeIdx = Record.size();
Record.emplace_back();
for (const auto &I : *State) {
- if (I.second.isPragma() || IncludeNonPragmaStates) {
- Record.push_back(I.first);
- Record.push_back(I.second.serialize());
- }
+ // Maybe skip non-pragmas.
+ if (!I.second.isPragma() && !IncludeNonPragmaStates)
+ continue;
+ // Skip default mappings. We have a mapping for every diagnostic ever
+ // emitted, regardless of whether it was customized.
+ if (!I.second.isPragma() &&
+ I.second == DiagnosticIDs::getDefaultMapping(I.first))
+ continue;
+ Mappings.push_back(I);
+ }
+
+ // Sort by diag::kind for deterministic output.
+ llvm::sort(Mappings, [](const auto &LHS, const auto &RHS) {
+ return LHS.first < RHS.first;
+ });
+
+ for (const auto &I : Mappings) {
+ Record.push_back(I.first);
+ Record.push_back(I.second.serialize());
}
// Update the placeholder.
Record[SizeIdx] = (Record.size() - SizeIdx) / 2;
@@ -2828,7 +3112,7 @@ void ASTWriter::WritePragmaDiagnosticMappings(const DiagnosticsEngine &Diag,
Record.push_back(FileIDAndFile.second.StateTransitions.size());
for (auto &StatePoint : FileIDAndFile.second.StateTransitions) {
- Record.push_back(StatePoint.Offset);
+ Record.push_back(getAdjustedOffset(StatePoint.Offset));
AddDiagState(StatePoint.State, false);
}
}
@@ -2948,6 +3232,7 @@ void ASTWriter::WriteFileDeclIDsMap() {
for (auto &FileDeclEntry : SortedFileDeclIDs) {
DeclIDInFileInfo &Info = *FileDeclEntry.second;
Info.FirstDeclIndex = FileGroupedDeclIDs.size();
+ llvm::stable_sort(Info.DeclIDs);
for (auto &LocDeclEntry : Info.DeclIDs)
FileGroupedDeclIDs.push_back(LocDeclEntry.second);
}
@@ -2967,6 +3252,13 @@ void ASTWriter::WriteComments() {
auto _ = llvm::make_scope_exit([this] { Stream.ExitBlock(); });
if (!PP->getPreprocessorOpts().WriteCommentListToPCH)
return;
+
+ // Don't write comments to BMI to reduce the size of BMI.
+ // If language services (e.g., clangd) want such abilities,
+ // we can offer a special option then.
+ if (isWritingStdCXXNamedModules())
+ return;
+
RecordData Record;
for (const auto &FO : Context->Comments.OrderedComments) {
for (const auto &OC : FO.second) {
@@ -3017,11 +3309,11 @@ public:
unsigned DataLen = 4 + 2 + 2; // 2 bytes for each of the method counts
for (const ObjCMethodList *Method = &Methods.Instance; Method;
Method = Method->getNext())
- if (Method->getMethod())
+ if (ShouldWriteMethodListNode(Method))
DataLen += 4;
for (const ObjCMethodList *Method = &Methods.Factory; Method;
Method = Method->getNext())
- if (Method->getMethod())
+ if (ShouldWriteMethodListNode(Method))
DataLen += 4;
return emitULEBKeyDataLength(KeyLen, DataLen, Out);
}
@@ -3029,7 +3321,7 @@ public:
void EmitKey(raw_ostream& Out, Selector Sel, unsigned) {
using namespace llvm::support;
- endian::Writer LE(Out, little);
+ endian::Writer LE(Out, llvm::endianness::little);
uint64_t Start = Out.tell();
assert((Start >> 32) == 0 && "Selector key offset too large");
Writer.SetSelectorOffset(Sel, Start);
@@ -3046,19 +3338,19 @@ public:
data_type_ref Methods, unsigned DataLen) {
using namespace llvm::support;
- endian::Writer LE(Out, little);
+ endian::Writer LE(Out, llvm::endianness::little);
uint64_t Start = Out.tell(); (void)Start;
LE.write<uint32_t>(Methods.ID);
unsigned NumInstanceMethods = 0;
for (const ObjCMethodList *Method = &Methods.Instance; Method;
Method = Method->getNext())
- if (Method->getMethod())
+ if (ShouldWriteMethodListNode(Method))
++NumInstanceMethods;
unsigned NumFactoryMethods = 0;
for (const ObjCMethodList *Method = &Methods.Factory; Method;
Method = Method->getNext())
- if (Method->getMethod())
+ if (ShouldWriteMethodListNode(Method))
++NumFactoryMethods;
unsigned InstanceBits = Methods.Instance.getBits();
@@ -3079,15 +3371,20 @@ public:
LE.write<uint16_t>(FullFactoryBits);
for (const ObjCMethodList *Method = &Methods.Instance; Method;
Method = Method->getNext())
- if (Method->getMethod())
+ if (ShouldWriteMethodListNode(Method))
LE.write<uint32_t>(Writer.getDeclID(Method->getMethod()));
for (const ObjCMethodList *Method = &Methods.Factory; Method;
Method = Method->getNext())
- if (Method->getMethod())
+ if (ShouldWriteMethodListNode(Method))
LE.write<uint32_t>(Writer.getDeclID(Method->getMethod()));
assert(Out.tell() - Start == DataLen && "Data length is wrong");
}
+
+private:
+ static bool ShouldWriteMethodListNode(const ObjCMethodList *Node) {
+ return (Node->getMethod() && !Node->getMethod()->isFromASTFile());
+ }
};
} // namespace
@@ -3130,15 +3427,21 @@ void ASTWriter::WriteSelectors(Sema &SemaRef) {
if (Chain && ID < FirstSelectorID) {
// Selector already exists. Did it change?
bool changed = false;
- for (ObjCMethodList *M = &Data.Instance;
- !changed && M && M->getMethod(); M = M->getNext()) {
- if (!M->getMethod()->isFromASTFile())
+ for (ObjCMethodList *M = &Data.Instance; M && M->getMethod();
+ M = M->getNext()) {
+ if (!M->getMethod()->isFromASTFile()) {
changed = true;
+ Data.Instance = *M;
+ break;
+ }
}
- for (ObjCMethodList *M = &Data.Factory; !changed && M && M->getMethod();
+ for (ObjCMethodList *M = &Data.Factory; M && M->getMethod();
M = M->getNext()) {
- if (!M->getMethod()->isFromASTFile())
+ if (!M->getMethod()->isFromASTFile()) {
changed = true;
+ Data.Factory = *M;
+ break;
+ }
}
if (!changed)
continue;
@@ -3158,7 +3461,7 @@ void ASTWriter::WriteSelectors(Sema &SemaRef) {
ASTMethodPoolTrait Trait(*this);
llvm::raw_svector_ostream Out(MethodPool);
// Make sure that no bucket is at offset 0
- endian::write<uint32_t>(Out, 0, little);
+ endian::write<uint32_t>(Out, 0, llvm::endianness::little);
BucketOffset = Generator.Emit(Out, Trait);
}
@@ -3324,26 +3627,24 @@ public:
// the mapping from persistent IDs to strings.
Writer.SetIdentifierOffset(II, Out.tell());
+ auto MacroOffset = Writer.getMacroDirectivesOffset(II);
+
// Emit the offset of the key/data length information to the interesting
// identifiers table if necessary.
- if (InterestingIdentifierOffsets && isInterestingIdentifier(II))
+ if (InterestingIdentifierOffsets &&
+ isInterestingIdentifier(II, MacroOffset))
InterestingIdentifierOffsets->push_back(Out.tell());
unsigned KeyLen = II->getLength() + 1;
unsigned DataLen = 4; // 4 bytes for the persistent ID << 1
- auto MacroOffset = Writer.getMacroDirectivesOffset(II);
if (isInterestingIdentifier(II, MacroOffset)) {
DataLen += 2; // 2 bytes for builtin ID
DataLen += 2; // 2 bytes for flags
if (MacroOffset)
DataLen += 4; // MacroDirectives offset.
- if (NeedDecls) {
- for (IdentifierResolver::iterator D = IdResolver.begin(II),
- DEnd = IdResolver.end();
- D != DEnd; ++D)
- DataLen += 4;
- }
+ if (NeedDecls)
+ DataLen += std::distance(IdResolver.begin(II), IdResolver.end()) * 4;
}
return emitULEBKeyDataLength(KeyLen, DataLen, Out);
}
@@ -3357,7 +3658,7 @@ public:
IdentID ID, unsigned) {
using namespace llvm::support;
- endian::Writer LE(Out, little);
+ endian::Writer LE(Out, llvm::endianness::little);
auto MacroOffset = Writer.getMacroDirectivesOffset(II);
if (!isInterestingIdentifier(II, MacroOffset)) {
@@ -3388,13 +3689,10 @@ public:
// "stat"), but the ASTReader adds declarations to the end of the list
// (so we need to see the struct "stat" before the function "stat").
// Only emit declarations that aren't from a chained PCH, though.
- SmallVector<NamedDecl *, 16> Decls(IdResolver.begin(II),
- IdResolver.end());
- for (SmallVectorImpl<NamedDecl *>::reverse_iterator D = Decls.rbegin(),
- DEnd = Decls.rend();
- D != DEnd; ++D)
+ SmallVector<NamedDecl *, 16> Decls(IdResolver.decls(II));
+ for (NamedDecl *D : llvm::reverse(Decls))
LE.write<uint32_t>(
- Writer.getDeclID(getDeclForLocalLookup(PP.getLangOpts(), *D)));
+ Writer.getDeclID(getDeclForLocalLookup(PP.getLangOpts(), D)));
}
}
};
@@ -3417,9 +3715,8 @@ void ASTWriter::WriteIdentifierTable(Preprocessor &PP,
// strings.
{
llvm::OnDiskChainedHashTableGenerator<ASTIdentifierTableTrait> Generator;
- ASTIdentifierTableTrait Trait(
- *this, PP, IdResolver, IsModule,
- (getLangOpts().CPlusPlus && IsModule) ? &InterestingIdents : nullptr);
+ ASTIdentifierTableTrait Trait(*this, PP, IdResolver, IsModule,
+ IsModule ? &InterestingIdents : nullptr);
// Look for any identifiers that were named while processing the
// headers, but are otherwise not needed. We add these to the hash
@@ -3428,13 +3725,13 @@ void ASTWriter::WriteIdentifierTable(Preprocessor &PP,
// file.
SmallVector<const IdentifierInfo *, 128> IIs;
for (const auto &ID : PP.getIdentifierTable())
- IIs.push_back(ID.second);
- // Sort the identifiers lexicographically before getting them references so
+ if (Trait.isInterestingNonMacroIdentifier(ID.second))
+ IIs.push_back(ID.second);
+ // Sort the identifiers lexicographically before getting the references so
// that their order is stable.
llvm::sort(IIs, llvm::deref<std::less<>>());
for (const IdentifierInfo *II : IIs)
- if (Trait.isInterestingNonMacroIdentifier(II))
- getIdentifierRef(II);
+ getIdentifierRef(II);
// Create the on-disk hash table representation. We only store offsets
// for identifiers that appear here for the first time.
@@ -3460,7 +3757,7 @@ void ASTWriter::WriteIdentifierTable(Preprocessor &PP,
llvm::raw_svector_ostream Out(IdentifierTable);
// Make sure that no bucket is at offset 0
- endian::write<uint32_t>(Out, 0, little);
+ endian::write<uint32_t>(Out, 0, llvm::endianness::little);
BucketOffset = Generator.Emit(Out, Trait);
}
@@ -3537,8 +3834,7 @@ public:
data_type ImportData(const reader::ASTDeclContextNameLookupTrait::data_type &FromReader) {
unsigned Start = DeclIDs.size();
- for (auto ID : FromReader)
- DeclIDs.push_back(ID);
+ llvm::append_range(DeclIDs, FromReader);
return std::make_pair(Start, DeclIDs.size());
}
@@ -3556,7 +3852,8 @@ public:
using namespace llvm::support;
- endian::write<uint32_t>(Out, Writer.getChain()->getModuleFileID(F), little);
+ endian::write<uint32_t>(Out, Writer.getChain()->getModuleFileID(F),
+ llvm::endianness::little);
}
std::pair<unsigned, unsigned> EmitKeyDataLength(raw_ostream &Out,
@@ -3591,7 +3888,7 @@ public:
void EmitKey(raw_ostream &Out, DeclarationNameKey Name, unsigned) {
using namespace llvm::support;
- endian::Writer LE(Out, little);
+ endian::Writer LE(Out, llvm::endianness::little);
LE.write<uint8_t>(Name.getKind());
switch (Name.getKind()) {
case DeclarationName::Identifier:
@@ -3623,7 +3920,7 @@ public:
unsigned DataLen) {
using namespace llvm::support;
- endian::Writer LE(Out, little);
+ endian::Writer LE(Out, llvm::endianness::little);
uint64_t Start = Out.tell(); (void)Start;
for (unsigned I = Lookup.first, N = Lookup.second; I != N; ++I)
LE.write<uint32_t>(DeclIDs[I]);
@@ -4036,6 +4333,7 @@ void ASTWriter::WriteLateParsedTemplates(Sema &SemaRef) {
LateParsedTemplate &LPT = *LPTMapEntry.second;
AddDeclRef(FD, Record);
AddDeclRef(LPT.D, Record);
+ Record.push_back(LPT.FPO.getAsOpaqueInt());
Record.push_back(LPT.Toks.size());
for (const auto &Tok : LPT.Toks) {
@@ -4150,8 +4448,13 @@ void ASTWriter::WriteModuleFileExtension(Sema &SemaRef,
void ASTRecordWriter::AddAttr(const Attr *A) {
auto &Record = *this;
- if (!A)
+ // FIXME: Clang can't handle the serialization/deserialization of
+ // preferred_name properly now. See
+ // https://github.com/llvm/llvm-project/issues/56490 for example.
+ if (!A || (isa<PreferredNameAttr>(A) &&
+ Writer->isWritingStdCXXNamedModules()))
return Record.push_back(0);
+
Record.push_back(A->getKind() + 1); // FIXME: stable encoding, target attrs
Record.AddIdentifierRef(A->getAttrName());
@@ -4161,6 +4464,7 @@ void ASTRecordWriter::AddAttr(const Attr *A) {
Record.push_back(A->getParsedKind());
Record.push_back(A->getSyntax());
Record.push_back(A->getAttributeSpellingListIndexRaw());
+ Record.push_back(A->isRegularKeywordAttribute());
#include "clang/Serialization/AttrPCHWrite.inc"
}
@@ -4174,15 +4478,47 @@ void ASTRecordWriter::AddAttributes(ArrayRef<const Attr *> Attrs) {
void ASTWriter::AddToken(const Token &Tok, RecordDataImpl &Record) {
AddSourceLocation(Tok.getLocation(), Record);
- Record.push_back(Tok.getLength());
-
- // FIXME: When reading literal tokens, reconstruct the literal pointer
- // if it is needed.
- AddIdentifierRef(Tok.getIdentifierInfo(), Record);
// FIXME: Should translate token kind to a stable encoding.
Record.push_back(Tok.getKind());
// FIXME: Should translate token flags to a stable encoding.
Record.push_back(Tok.getFlags());
+
+ if (Tok.isAnnotation()) {
+ AddSourceLocation(Tok.getAnnotationEndLoc(), Record);
+ switch (Tok.getKind()) {
+ case tok::annot_pragma_loop_hint: {
+ auto *Info = static_cast<PragmaLoopHintInfo *>(Tok.getAnnotationValue());
+ AddToken(Info->PragmaName, Record);
+ AddToken(Info->Option, Record);
+ Record.push_back(Info->Toks.size());
+ for (const auto &T : Info->Toks)
+ AddToken(T, Record);
+ break;
+ }
+ case tok::annot_pragma_pack: {
+ auto *Info =
+ static_cast<Sema::PragmaPackInfo *>(Tok.getAnnotationValue());
+ Record.push_back(static_cast<unsigned>(Info->Action));
+ AddString(Info->SlotLabel, Record);
+ AddToken(Info->Alignment, Record);
+ break;
+ }
+ // Some annotation tokens do not use the PtrData field.
+ case tok::annot_pragma_openmp:
+ case tok::annot_pragma_openmp_end:
+ case tok::annot_pragma_unused:
+ case tok::annot_pragma_openacc:
+ case tok::annot_pragma_openacc_end:
+ break;
+ default:
+ llvm_unreachable("missing serialization code for annotation token");
+ }
+ } else {
+ Record.push_back(Tok.getLength());
+ // FIXME: When reading literal tokens, reconstruct the literal pointer if it
+ // is needed.
+ AddIdentifierRef(Tok.getIdentifierInfo(), Record);
+ }
}
void ASTWriter::AddString(StringRef Str, RecordDataImpl &Record) {
@@ -4193,6 +4529,11 @@ void ASTWriter::AddString(StringRef Str, RecordDataImpl &Record) {
bool ASTWriter::PreparePathForOutput(SmallVectorImpl<char> &Path) {
assert(Context && "should have context when outputting path");
+ // Leave special file names as they are.
+ StringRef PathStr(Path.data(), Path.size());
+ if (PathStr == "<built-in>" || PathStr == "<command line>")
+ return false;
+
bool Changed =
cleanPathForOutput(Context->getSourceManager().getFileManager(), Path);
@@ -4224,11 +4565,11 @@ void ASTWriter::EmitRecordWithPath(unsigned Abbrev, RecordDataRef Record,
void ASTWriter::AddVersionTuple(const VersionTuple &Version,
RecordDataImpl &Record) {
Record.push_back(Version.getMajor());
- if (Optional<unsigned> Minor = Version.getMinor())
+ if (std::optional<unsigned> Minor = Version.getMinor())
Record.push_back(*Minor + 1);
else
Record.push_back(0);
- if (Optional<unsigned> Subminor = Version.getSubminor())
+ if (std::optional<unsigned> Subminor = Version.getSubminor())
Record.push_back(*Subminor + 1);
else
Record.push_back(0);
@@ -4260,9 +4601,10 @@ ASTWriter::ASTWriter(llvm::BitstreamWriter &Stream,
SmallVectorImpl<char> &Buffer,
InMemoryModuleCache &ModuleCache,
ArrayRef<std::shared_ptr<ModuleFileExtension>> Extensions,
- bool IncludeTimestamps)
+ bool IncludeTimestamps, bool BuildingImplicitModule)
: Stream(Stream), Buffer(Buffer), ModuleCache(ModuleCache),
- IncludeTimestamps(IncludeTimestamps) {
+ IncludeTimestamps(IncludeTimestamps),
+ BuildingImplicitModule(BuildingImplicitModule) {
for (const auto &Ext : Extensions) {
if (auto Writer = Ext->createExtensionWriter(*this))
ModuleFileExtensionWriters.push_back(std::move(Writer));
@@ -4280,14 +4622,14 @@ time_t ASTWriter::getTimestampForOutput(const FileEntry *E) const {
return IncludeTimestamps ? E->getModificationTime() : 0;
}
-ASTFileSignature ASTWriter::WriteAST(Sema &SemaRef,
- const std::string &OutputFile,
+ASTFileSignature ASTWriter::WriteAST(Sema &SemaRef, StringRef OutputFile,
Module *WritingModule, StringRef isysroot,
- bool hasErrors,
bool ShouldCacheASTInMemory) {
+ llvm::TimeTraceScope scope("WriteAST", OutputFile);
WritingAST = true;
- ASTHasCompilerErrors = hasErrors;
+ ASTHasCompilerErrors =
+ SemaRef.PP.getDiagnostics().hasUncompilableErrorOccurred();
// Emit the file header.
Stream.Emit((unsigned)'C', 8);
@@ -4300,8 +4642,7 @@ ASTFileSignature ASTWriter::WriteAST(Sema &SemaRef,
Context = &SemaRef.Context;
PP = &SemaRef.PP;
this->WritingModule = WritingModule;
- ASTFileSignature Signature =
- WriteASTCore(SemaRef, isysroot, OutputFile, WritingModule);
+ ASTFileSignature Signature = WriteASTCore(SemaRef, isysroot, WritingModule);
Context = nullptr;
PP = nullptr;
this->WritingModule = nullptr;
@@ -4326,8 +4667,69 @@ static void AddLazyVectorDecls(ASTWriter &Writer, Vector &Vec,
}
}
+void ASTWriter::collectNonAffectingInputFiles() {
+ SourceManager &SrcMgr = PP->getSourceManager();
+ unsigned N = SrcMgr.local_sloc_entry_size();
+
+ IsSLocAffecting.resize(N, true);
+
+ if (!WritingModule)
+ return;
+
+ auto AffectingModuleMaps = GetAffectingModuleMaps(*PP, WritingModule);
+
+ unsigned FileIDAdjustment = 0;
+ unsigned OffsetAdjustment = 0;
+
+ NonAffectingFileIDAdjustments.reserve(N);
+ NonAffectingOffsetAdjustments.reserve(N);
+
+ NonAffectingFileIDAdjustments.push_back(FileIDAdjustment);
+ NonAffectingOffsetAdjustments.push_back(OffsetAdjustment);
+
+ for (unsigned I = 1; I != N; ++I) {
+ const SrcMgr::SLocEntry *SLoc = &SrcMgr.getLocalSLocEntry(I);
+ FileID FID = FileID::get(I);
+ assert(&SrcMgr.getSLocEntry(FID) == SLoc);
+
+ if (!SLoc->isFile())
+ continue;
+ const SrcMgr::FileInfo &File = SLoc->getFile();
+ const SrcMgr::ContentCache *Cache = &File.getContentCache();
+ if (!Cache->OrigEntry)
+ continue;
+
+ if (!isModuleMap(File.getFileCharacteristic()) ||
+ AffectingModuleMaps.empty() ||
+ llvm::is_contained(AffectingModuleMaps, *Cache->OrigEntry))
+ continue;
+
+ IsSLocAffecting[I] = false;
+
+ FileIDAdjustment += 1;
+ // Even empty files take up one element in the offset table.
+ OffsetAdjustment += SrcMgr.getFileIDSize(FID) + 1;
+
+ // If the previous file was non-affecting as well, just extend its entry
+ // with our information.
+ if (!NonAffectingFileIDs.empty() &&
+ NonAffectingFileIDs.back().ID == FID.ID - 1) {
+ NonAffectingFileIDs.back() = FID;
+ NonAffectingRanges.back().setEnd(SrcMgr.getLocForEndOfFile(FID));
+ NonAffectingFileIDAdjustments.back() = FileIDAdjustment;
+ NonAffectingOffsetAdjustments.back() = OffsetAdjustment;
+ continue;
+ }
+
+ NonAffectingFileIDs.push_back(FID);
+ NonAffectingRanges.emplace_back(SrcMgr.getLocForStartOfFile(FID),
+ SrcMgr.getLocForEndOfFile(FID));
+ NonAffectingFileIDAdjustments.push_back(FileIDAdjustment);
+ NonAffectingOffsetAdjustments.push_back(OffsetAdjustment);
+ }
+}
+
ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
- const std::string &OutputFile,
Module *WritingModule) {
using namespace llvm;
@@ -4340,6 +4742,12 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
ASTContext &Context = SemaRef.Context;
Preprocessor &PP = SemaRef.PP;
+ // This needs to be done very early, since everything that writes
+ // SourceLocations or FileIDs depends on it.
+ collectNonAffectingInputFiles();
+
+ writeUnhashedControlBlock(PP, Context);
+
// Set up predefined declaration IDs.
auto RegisterPredefDecl = [&] (Decl *D, PredefinedDeclIDs ID) {
if (D) {
@@ -4396,13 +4804,14 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
// entire table, since later PCH files in a PCH chain are only interested in
// the results at the end of the chain.
RecordData WeakUndeclaredIdentifiers;
- for (auto &WeakUndeclaredIdentifier : SemaRef.WeakUndeclaredIdentifiers) {
- IdentifierInfo *II = WeakUndeclaredIdentifier.first;
- WeakInfo &WI = WeakUndeclaredIdentifier.second;
- AddIdentifierRef(II, WeakUndeclaredIdentifiers);
- AddIdentifierRef(WI.getAlias(), WeakUndeclaredIdentifiers);
- AddSourceLocation(WI.getLocation(), WeakUndeclaredIdentifiers);
- WeakUndeclaredIdentifiers.push_back(WI.getUsed());
+ for (const auto &WeakUndeclaredIdentifierList :
+ SemaRef.WeakUndeclaredIdentifiers) {
+ const IdentifierInfo *const II = WeakUndeclaredIdentifierList.first;
+ for (const auto &WI : WeakUndeclaredIdentifierList.second) {
+ AddIdentifierRef(II, WeakUndeclaredIdentifiers);
+ AddIdentifierRef(WI.getAlias(), WeakUndeclaredIdentifiers);
+ AddSourceLocation(WI.getLocation(), WeakUndeclaredIdentifiers);
+ }
}
// Build a record containing all of the ext_vector declarations.
@@ -4480,11 +4889,11 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
}
// Write the control block
- WriteControlBlock(PP, Context, isysroot, OutputFile);
+ WriteControlBlock(PP, Context, isysroot);
// Write the remaining AST contents.
Stream.FlushToWord();
- ASTBlockRange.first = Stream.GetCurrentBitNo();
+ ASTBlockRange.first = Stream.GetCurrentBitNo() >> 3;
Stream.EnterSubblock(AST_BLOCK_ID, 5);
ASTBlockStartOffset = Stream.GetCurrentBitNo();
@@ -4567,13 +4976,9 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
}
// Sort the identifiers to visit based on their name.
llvm::sort(IIs, llvm::deref<std::less<>>());
- for (const IdentifierInfo *II : IIs) {
- for (IdentifierResolver::iterator D = SemaRef.IdResolver.begin(II),
- DEnd = SemaRef.IdResolver.end();
- D != DEnd; ++D) {
- GetDeclRef(*D);
- }
- }
+ for (const IdentifierInfo *II : IIs)
+ for (const Decl *D : SemaRef.IdResolver.decls(II))
+ GetDeclRef(D);
}
// For method pool in the module, if it contains an entry for a selector,
@@ -4630,7 +5035,7 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
for (ModuleFile &M : Chain->ModuleMgr) {
using namespace llvm::support;
- endian::Writer LE(Out, little);
+ endian::Writer LE(Out, llvm::endianness::little);
LE.write<uint8_t>(static_cast<uint8_t>(M.Kind));
StringRef Name = M.isModule() ? M.ModuleName : M.FileName;
LE.write<uint16_t>(Name.size());
@@ -4795,7 +5200,7 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
};
llvm::SmallVector<ModuleInfo, 64> Imports;
for (const auto *I : Context.local_imports()) {
- assert(SubmoduleIDs.find(I->getImportedModule()) != SubmoduleIDs.end());
+ assert(SubmoduleIDs.contains(I->getImportedModule()));
Imports.push_back(ModuleInfo(SubmoduleIDs[I->getImportedModule()],
I->getImportedModule()));
}
@@ -4841,13 +5246,13 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
Stream.EmitRecord(STATISTICS, Record);
Stream.ExitBlock();
Stream.FlushToWord();
- ASTBlockRange.second = Stream.GetCurrentBitNo();
+ ASTBlockRange.second = Stream.GetCurrentBitNo() >> 3;
// Write the module file extension blocks.
for (const auto &ExtWriter : ModuleFileExtensionWriters)
WriteModuleFileExtension(SemaRef, *ExtWriter);
- return writeUnhashedControlBlock(PP, Context);
+ return backpatchSignature();
}
void ASTWriter::WriteDeclUpdatesBlocks(RecordDataImpl &OffsetsRecord) {
@@ -4861,6 +5266,7 @@ void ASTWriter::WriteDeclUpdatesBlocks(RecordDataImpl &OffsetsRecord) {
const Decl *D = DeclUpdate.first;
bool HasUpdatedBody = false;
+ bool HasAddedVarDefinition = false;
RecordData RecordData;
ASTRecordWriter Record(*this, RecordData);
for (auto &Update : DeclUpdate.second) {
@@ -4870,6 +5276,8 @@ void ASTWriter::WriteDeclUpdatesBlocks(RecordDataImpl &OffsetsRecord) {
// to skip over the lazy body to reach statements for other records.
if (Kind == UPD_CXX_ADDED_FUNCTION_DEFINITION)
HasUpdatedBody = true;
+ else if (Kind == UPD_CXX_ADDED_VAR_DEFINITION)
+ HasAddedVarDefinition = true;
else
Record.push_back(Kind);
@@ -4882,6 +5290,7 @@ void ASTWriter::WriteDeclUpdatesBlocks(RecordDataImpl &OffsetsRecord) {
break;
case UPD_CXX_ADDED_FUNCTION_DEFINITION:
+ case UPD_CXX_ADDED_VAR_DEFINITION:
break;
case UPD_CXX_POINT_OF_INSTANTIATION:
@@ -4889,14 +5298,6 @@ void ASTWriter::WriteDeclUpdatesBlocks(RecordDataImpl &OffsetsRecord) {
Record.AddSourceLocation(Update.getLoc());
break;
- case UPD_CXX_ADDED_VAR_DEFINITION: {
- const VarDecl *VD = cast<VarDecl>(D);
- Record.push_back(VD->isInline());
- Record.push_back(VD->isInlineSpecified());
- Record.AddVarDeclInit(VD);
- break;
- }
-
case UPD_CXX_INSTANTIATED_DEFAULT_ARGUMENT:
Record.AddStmt(const_cast<Expr *>(
cast<ParmVarDecl>(Update.getDecl())->getDefaultArg()));
@@ -4911,7 +5312,7 @@ void ASTWriter::WriteDeclUpdatesBlocks(RecordDataImpl &OffsetsRecord) {
auto *RD = cast<CXXRecordDecl>(D);
UpdatedDeclContexts.insert(RD->getPrimaryContext());
Record.push_back(RD->isParamDestroyedInCallee());
- Record.push_back(RD->getArgPassingRestrictions());
+ Record.push_back(llvm::to_underlying(RD->getArgPassingRestrictions()));
Record.AddCXXDefinitionData(RD);
Record.AddOffset(WriteDeclContextLexicalBlock(
*Context, const_cast<CXXRecordDecl *>(RD)));
@@ -4940,7 +5341,7 @@ void ASTWriter::WriteDeclUpdatesBlocks(RecordDataImpl &OffsetsRecord) {
Record.push_back(false);
}
}
- Record.push_back(RD->getTagKind());
+ Record.push_back(llvm::to_underlying(RD->getTagKind()));
Record.AddSourceLocation(RD->getLocation());
Record.AddSourceLocation(RD->getBeginLoc());
Record.AddSourceRange(RD->getBraceRange());
@@ -4987,6 +5388,7 @@ void ASTWriter::WriteDeclUpdatesBlocks(RecordDataImpl &OffsetsRecord) {
auto *A = D->getAttr<OMPAllocateDeclAttr>();
Record.push_back(A->getAllocatorType());
Record.AddStmt(A->getAllocator());
+ Record.AddStmt(A->getAlignment());
Record.AddSourceRange(A->getRange());
break;
}
@@ -5002,17 +5404,25 @@ void ASTWriter::WriteDeclUpdatesBlocks(RecordDataImpl &OffsetsRecord) {
break;
case UPD_ADDED_ATTR_TO_RECORD:
- Record.AddAttributes(llvm::makeArrayRef(Update.getAttr()));
+ Record.AddAttributes(llvm::ArrayRef(Update.getAttr()));
break;
}
}
+ // Add a trailing update record, if any. These must go last because we
+ // lazily load their attached statement.
if (HasUpdatedBody) {
const auto *Def = cast<FunctionDecl>(D);
Record.push_back(UPD_CXX_ADDED_FUNCTION_DEFINITION);
Record.push_back(Def->isInlined());
Record.AddSourceLocation(Def->getInnerLocStart());
Record.AddFunctionDefinition(Def);
+ } else if (HasAddedVarDefinition) {
+ const auto *VD = cast<VarDecl>(D);
+ Record.push_back(UPD_CXX_ADDED_VAR_DEFINITION);
+ Record.push_back(VD->isInline());
+ Record.push_back(VD->isInlineSpecified());
+ Record.AddVarDeclInit(VD);
}
OffsetsRecord.push_back(GetDeclRef(D));
@@ -5026,14 +5436,82 @@ void ASTWriter::AddAlignPackInfo(const Sema::AlignPackInfo &Info,
Record.push_back(Raw);
}
-void ASTWriter::AddSourceLocation(SourceLocation Loc, RecordDataImpl &Record) {
- SourceLocation::UIntTy Raw = Loc.getRawEncoding();
- Record.push_back((Raw << 1) | (Raw >> (8 * sizeof(Raw) - 1)));
+FileID ASTWriter::getAdjustedFileID(FileID FID) const {
+ if (FID.isInvalid() || PP->getSourceManager().isLoadedFileID(FID) ||
+ NonAffectingFileIDs.empty())
+ return FID;
+ auto It = llvm::lower_bound(NonAffectingFileIDs, FID);
+ unsigned Idx = std::distance(NonAffectingFileIDs.begin(), It);
+ unsigned Offset = NonAffectingFileIDAdjustments[Idx];
+ return FileID::get(FID.getOpaqueValue() - Offset);
+}
+
+unsigned ASTWriter::getAdjustedNumCreatedFIDs(FileID FID) const {
+ unsigned NumCreatedFIDs = PP->getSourceManager()
+ .getLocalSLocEntry(FID.ID)
+ .getFile()
+ .NumCreatedFIDs;
+
+ unsigned AdjustedNumCreatedFIDs = 0;
+ for (unsigned I = FID.ID, N = I + NumCreatedFIDs; I != N; ++I)
+ if (IsSLocAffecting[I])
+ ++AdjustedNumCreatedFIDs;
+ return AdjustedNumCreatedFIDs;
+}
+
+SourceLocation ASTWriter::getAdjustedLocation(SourceLocation Loc) const {
+ if (Loc.isInvalid())
+ return Loc;
+ return Loc.getLocWithOffset(-getAdjustment(Loc.getOffset()));
+}
+
+SourceRange ASTWriter::getAdjustedRange(SourceRange Range) const {
+ return SourceRange(getAdjustedLocation(Range.getBegin()),
+ getAdjustedLocation(Range.getEnd()));
+}
+
+SourceLocation::UIntTy
+ASTWriter::getAdjustedOffset(SourceLocation::UIntTy Offset) const {
+ return Offset - getAdjustment(Offset);
+}
+
+SourceLocation::UIntTy
+ASTWriter::getAdjustment(SourceLocation::UIntTy Offset) const {
+ if (NonAffectingRanges.empty())
+ return 0;
+
+ if (PP->getSourceManager().isLoadedOffset(Offset))
+ return 0;
+
+ if (Offset > NonAffectingRanges.back().getEnd().getOffset())
+ return NonAffectingOffsetAdjustments.back();
+
+ if (Offset < NonAffectingRanges.front().getBegin().getOffset())
+ return 0;
+
+ auto Contains = [](const SourceRange &Range, SourceLocation::UIntTy Offset) {
+ return Range.getEnd().getOffset() < Offset;
+ };
+
+ auto It = llvm::lower_bound(NonAffectingRanges, Offset, Contains);
+ unsigned Idx = std::distance(NonAffectingRanges.begin(), It);
+ return NonAffectingOffsetAdjustments[Idx];
+}
+
+void ASTWriter::AddFileID(FileID FID, RecordDataImpl &Record) {
+ Record.push_back(getAdjustedFileID(FID).getOpaqueValue());
+}
+
+void ASTWriter::AddSourceLocation(SourceLocation Loc, RecordDataImpl &Record,
+ SourceLocationSequence *Seq) {
+ Loc = getAdjustedLocation(Loc);
+ Record.push_back(SourceLocationEncoding::encode(Loc, Seq));
}
-void ASTWriter::AddSourceRange(SourceRange Range, RecordDataImpl &Record) {
- AddSourceLocation(Range.getBegin(), Record);
- AddSourceLocation(Range.getEnd(), Record);
+void ASTWriter::AddSourceRange(SourceRange Range, RecordDataImpl &Record,
+ SourceLocationSequence *Seq) {
+ AddSourceLocation(Range.getBegin(), Record, Seq);
+ AddSourceLocation(Range.getEnd(), Record, Seq);
}
void ASTRecordWriter::AddAPFloat(const llvm::APFloat &Value) {
@@ -5074,7 +5552,7 @@ MacroID ASTWriter::getMacroID(MacroInfo *MI) {
if (!MI || MI->isBuiltinMacro())
return 0;
- assert(MacroIDs.find(MI) != MacroIDs.end() && "Macro not emitted!");
+ assert(MacroIDs.contains(MI) && "Macro not emitted!");
return MacroIDs[MI];
}
@@ -5131,6 +5609,7 @@ void ASTRecordWriter::AddTemplateArgumentLocInfo(
case TemplateArgument::Integral:
case TemplateArgument::Declaration:
case TemplateArgument::NullPtr:
+ case TemplateArgument::StructuralValue:
case TemplateArgument::Pack:
// FIXME: Is this right?
break;
@@ -5160,8 +5639,9 @@ void ASTRecordWriter::AddTypeSourceInfo(TypeSourceInfo *TInfo) {
AddTypeLoc(TInfo->getTypeLoc());
}
-void ASTRecordWriter::AddTypeLoc(TypeLoc TL) {
- TypeLocWriter TLW(*this);
+void ASTRecordWriter::AddTypeLoc(TypeLoc TL, LocSeq *OuterSeq) {
+ LocSeq::State Seq(OuterSeq);
+ TypeLocWriter TLW(*this, Seq);
for (; !TL.isNull(); TL = TL.getNextTypeLoc())
TLW.Visit(TL);
}
@@ -5248,7 +5728,7 @@ DeclID ASTWriter::getDeclID(const Decl *D) {
if (D->isFromASTFile())
return D->getGlobalID();
- assert(DeclIDs.find(D) != DeclIDs.end() && "Declaration not emitted!");
+ assert(DeclIDs.contains(D) && "Declaration not emitted!");
return DeclIDs[D];
}
@@ -5267,7 +5747,7 @@ void ASTWriter::associateDeclWithFile(const Decl *D, DeclID ID) {
// a function/objc method, should not have TU as lexical context.
// TemplateTemplateParmDecls that are part of an alias template, should not
// have TU as lexical context.
- if (isa<ParmVarDecl>(D) || isa<TemplateTemplateParmDecl>(D))
+ if (isa<ParmVarDecl, TemplateTemplateParmDecl>(D))
return;
SourceManager &SM = Context->getSourceManager();
@@ -5279,6 +5759,7 @@ void ASTWriter::associateDeclWithFile(const Decl *D, DeclID ID) {
if (FID.isInvalid())
return;
assert(SM.getSLocEntry(FID).isFile());
+ assert(IsSLocAffecting[FID.ID]);
std::unique_ptr<DeclIDInFileInfo> &Info = FileDeclIDs[FID];
if (!Info)
@@ -5286,16 +5767,7 @@ void ASTWriter::associateDeclWithFile(const Decl *D, DeclID ID) {
std::pair<unsigned, serialization::DeclID> LocDecl(Offset, ID);
LocDeclIDsTy &Decls = Info->DeclIDs;
-
- if (Decls.empty() || Decls.back().first <= Offset) {
- Decls.push_back(LocDecl);
- return;
- }
-
- LocDeclIDsTy::iterator I =
- llvm::upper_bound(Decls, LocDecl, llvm::less_first());
-
- Decls.insert(I, LocDecl);
+ Decls.push_back(LocDecl);
}
unsigned ASTWriter::getAnonymousDeclarationNumber(const NamedDecl *D) {
@@ -5533,14 +6005,32 @@ void ASTRecordWriter::AddCXXCtorInitializers(
void ASTRecordWriter::AddCXXDefinitionData(const CXXRecordDecl *D) {
auto &Data = D->data();
+
Record->push_back(Data.IsLambda);
- #define FIELD(Name, Width, Merge) \
- Record->push_back(Data.Name);
- #include "clang/AST/CXXRecordDeclDefinitionBits.def"
+ BitsPacker DefinitionBits;
+
+ bool ShouldSkipCheckingODR = D->shouldSkipCheckingODR();
+ DefinitionBits.addBit(ShouldSkipCheckingODR);
+
+#define FIELD(Name, Width, Merge) \
+ if (!DefinitionBits.canWriteNextNBits(Width)) { \
+ Record->push_back(DefinitionBits); \
+ DefinitionBits.reset(0); \
+ } \
+ DefinitionBits.addBits(Data.Name, Width);
+
+#include "clang/AST/CXXRecordDeclDefinitionBits.def"
+#undef FIELD
+
+ Record->push_back(DefinitionBits);
+
+ // We only perform ODR checks for decls not in GMF.
+ if (!ShouldSkipCheckingODR)
+ // getODRHash will compute the ODRHash if it has not been previously
+ // computed.
+ Record->push_back(D->getODRHash());
- // getODRHash will compute the ODRHash if it has not been previously computed.
- Record->push_back(D->getODRHash());
bool ModulesDebugInfo =
Writer->Context->getLangOpts().ModulesDebugInfo && !D->isDependentType();
Record->push_back(ModulesDebugInfo);
@@ -5549,40 +6039,49 @@ void ASTRecordWriter::AddCXXDefinitionData(const CXXRecordDecl *D) {
// IsLambda bit is already saved.
- Record->push_back(Data.NumBases);
- if (Data.NumBases > 0)
- AddCXXBaseSpecifiers(Data.bases());
-
- // FIXME: Make VBases lazily computed when needed to avoid storing them.
- Record->push_back(Data.NumVBases);
- if (Data.NumVBases > 0)
- AddCXXBaseSpecifiers(Data.vbases());
-
AddUnresolvedSet(Data.Conversions.get(*Writer->Context));
Record->push_back(Data.ComputedVisibleConversions);
if (Data.ComputedVisibleConversions)
AddUnresolvedSet(Data.VisibleConversions.get(*Writer->Context));
// Data.Definition is the owning decl, no need to write it.
- AddDeclRef(D->getFirstFriend());
- // Add lambda-specific data.
- if (Data.IsLambda) {
+ if (!Data.IsLambda) {
+ Record->push_back(Data.NumBases);
+ if (Data.NumBases > 0)
+ AddCXXBaseSpecifiers(Data.bases());
+
+ // FIXME: Make VBases lazily computed when needed to avoid storing them.
+ Record->push_back(Data.NumVBases);
+ if (Data.NumVBases > 0)
+ AddCXXBaseSpecifiers(Data.vbases());
+
+ AddDeclRef(D->getFirstFriend());
+ } else {
auto &Lambda = D->getLambdaData();
- Record->push_back(Lambda.Dependent);
- Record->push_back(Lambda.IsGenericLambda);
- Record->push_back(Lambda.CaptureDefault);
- Record->push_back(Lambda.NumCaptures);
+
+ BitsPacker LambdaBits;
+ LambdaBits.addBits(Lambda.DependencyKind, /*Width=*/2);
+ LambdaBits.addBit(Lambda.IsGenericLambda);
+ LambdaBits.addBits(Lambda.CaptureDefault, /*Width=*/2);
+ LambdaBits.addBits(Lambda.NumCaptures, /*Width=*/15);
+ LambdaBits.addBit(Lambda.HasKnownInternalLinkage);
+ Record->push_back(LambdaBits);
+
Record->push_back(Lambda.NumExplicitCaptures);
- Record->push_back(Lambda.HasKnownInternalLinkage);
Record->push_back(Lambda.ManglingNumber);
Record->push_back(D->getDeviceLambdaManglingNumber());
- AddDeclRef(D->getLambdaContextDecl());
+ // The lambda context declaration and index within the context are provided
+ // separately, so that they can be used for merging.
AddTypeSourceInfo(Lambda.MethodTyInfo);
for (unsigned I = 0, N = Lambda.NumCaptures; I != N; ++I) {
- const LambdaCapture &Capture = Lambda.Captures[I];
+ const LambdaCapture &Capture = Lambda.Captures.front()[I];
AddSourceLocation(Capture.getLocation());
- Record->push_back(Capture.isImplicit());
- Record->push_back(Capture.getCaptureKind());
+
+ BitsPacker CaptureBits;
+ CaptureBits.addBit(Capture.isImplicit());
+ CaptureBits.addBits(Capture.getCaptureKind(), /*Width=*/3);
+ Record->push_back(CaptureBits);
+
switch (Capture.getCaptureKind()) {
case LCK_StarThis:
case LCK_This:
@@ -5590,7 +6089,7 @@ void ASTRecordWriter::AddCXXDefinitionData(const CXXRecordDecl *D) {
break;
case LCK_ByCopy:
case LCK_ByRef:
- VarDecl *Var =
+ ValueDecl *Var =
Capture.capturesVariable() ? Capture.getCapturedVar() : nullptr;
AddDeclRef(Var);
AddSourceLocation(Capture.isPackExpansion() ? Capture.getEllipsisLoc()
@@ -5608,13 +6107,20 @@ void ASTRecordWriter::AddVarDeclInit(const VarDecl *VD) {
return;
}
- unsigned Val = 1;
+ uint64_t Val = 1;
if (EvaluatedStmt *ES = VD->getEvaluatedStmt()) {
Val |= (ES->HasConstantInitialization ? 2 : 0);
Val |= (ES->HasConstantDestruction ? 4 : 0);
- // FIXME: Also emit the constant initializer value.
+ APValue *Evaluated = VD->getEvaluatedValue();
+ // If the evaluated result is constant, emit it.
+ if (Evaluated && (Evaluated->isInt() || Evaluated->isFloat()))
+ Val |= 8;
}
push_back(Val);
+ if (Val & 8) {
+ AddAPValue(*VD->getEvaluatedValue());
+ }
+
writeStmtRef(Init);
}
@@ -5681,12 +6187,12 @@ void ASTWriter::SelectorRead(SelectorID ID, Selector S) {
void ASTWriter::MacroDefinitionRead(serialization::PreprocessedEntityID ID,
MacroDefinitionRecord *MD) {
- assert(MacroDefinitions.find(MD) == MacroDefinitions.end());
+ assert(!MacroDefinitions.contains(MD));
MacroDefinitions[MD] = ID;
}
void ASTWriter::ModuleRead(serialization::SubmoduleID ID, Module *Mod) {
- assert(SubmoduleIDs.find(Mod) == SubmoduleIDs.end());
+ assert(!SubmoduleIDs.contains(Mod));
SubmoduleIDs[Mod] = ID;
}
@@ -5746,8 +6252,7 @@ void ASTWriter::AddedVisibleDecl(const DeclContext *DC, const Decl *D) {
// We're adding a visible declaration to a predefined decl context. Ensure
// that we write out all of its lookup results so we don't get a nasty
// surprise when we try to emit its lookup table.
- for (auto *Child : DC->decls())
- DeclsToEmitEvenIfUnreferenced.push_back(Child);
+ llvm::append_range(DeclsToEmitEvenIfUnreferenced, DC->decls());
}
DeclsToEmitEvenIfUnreferenced.push_back(D);
}
@@ -6135,6 +6640,15 @@ void OMPClauseWriter::VisitOMPUpdateClause(OMPUpdateClause *C) {
void OMPClauseWriter::VisitOMPCaptureClause(OMPCaptureClause *) {}
+void OMPClauseWriter::VisitOMPCompareClause(OMPCompareClause *) {}
+
+// Save the parameter of fail clause.
+void OMPClauseWriter::VisitOMPFailClause(OMPFailClause *C) {
+ Record.AddSourceLocation(C->getLParenLoc());
+ Record.AddSourceLocation(C->getFailParameterLoc());
+ Record.writeEnum(C->getFailParameter());
+}
+
void OMPClauseWriter::VisitOMPSeqCstClause(OMPSeqCstClause *) {}
void OMPClauseWriter::VisitOMPAcqRelClause(OMPAcqRelClause *) {}
@@ -6191,6 +6705,11 @@ void OMPClauseWriter::VisitOMPFilterClause(OMPFilterClause *C) {
Record.AddSourceLocation(C->getLParenLoc());
}
+void OMPClauseWriter::VisitOMPAlignClause(OMPAlignClause *C) {
+ Record.AddStmt(C->getAlignment());
+ Record.AddSourceLocation(C->getLParenLoc());
+}
+
void OMPClauseWriter::VisitOMPPrivateClause(OMPPrivateClause *C) {
Record.push_back(C->varlist_size());
Record.AddSourceLocation(C->getLParenLoc());
@@ -6395,6 +6914,7 @@ void OMPClauseWriter::VisitOMPDependClause(OMPDependClause *C) {
Record.push_back(C->getDependencyKind());
Record.AddSourceLocation(C->getDependencyLoc());
Record.AddSourceLocation(C->getColonLoc());
+ Record.AddSourceLocation(C->getOmpAllMemoryLoc());
for (auto *VE : C->varlists())
Record.AddStmt(VE);
for (unsigned I = 0, E = C->getNumLoops(); I < E; ++I)
@@ -6415,9 +6935,12 @@ void OMPClauseWriter::VisitOMPMapClause(OMPMapClause *C) {
Record.push_back(C->getTotalComponentListNum());
Record.push_back(C->getTotalComponentsNum());
Record.AddSourceLocation(C->getLParenLoc());
+ bool HasIteratorModifier = false;
for (unsigned I = 0; I < NumberOfOMPMapClauseModifiers; ++I) {
Record.push_back(C->getMapTypeModifier(I));
Record.AddSourceLocation(C->getMapTypeModifierLoc(I));
+ if (C->getMapTypeModifier(I) == OMPC_MAP_MODIFIER_iterator)
+ HasIteratorModifier = true;
}
Record.AddNestedNameSpecifierLoc(C->getMapperQualifierLoc());
Record.AddDeclarationNameInfo(C->getMapperIdInfo());
@@ -6428,6 +6951,8 @@ void OMPClauseWriter::VisitOMPMapClause(OMPMapClause *C) {
Record.AddStmt(E);
for (auto *E : C->mapperlists())
Record.AddStmt(E);
+ if (HasIteratorModifier)
+ Record.AddStmt(C->getIteratorModifier());
for (auto *D : C->all_decls())
Record.AddDeclRef(D);
for (auto N : C->all_num_lists())
@@ -6469,13 +6994,17 @@ void OMPClauseWriter::VisitOMPPriorityClause(OMPPriorityClause *C) {
void OMPClauseWriter::VisitOMPGrainsizeClause(OMPGrainsizeClause *C) {
VisitOMPClauseWithPreInit(C);
+ Record.writeEnum(C->getModifier());
Record.AddStmt(C->getGrainsize());
+ Record.AddSourceLocation(C->getModifierLoc());
Record.AddSourceLocation(C->getLParenLoc());
}
void OMPClauseWriter::VisitOMPNumTasksClause(OMPNumTasksClause *C) {
VisitOMPClauseWithPreInit(C);
+ Record.writeEnum(C->getModifier());
Record.AddStmt(C->getNumTasks());
+ Record.AddSourceLocation(C->getModifierLoc());
Record.AddSourceLocation(C->getLParenLoc());
}
@@ -6625,6 +7154,26 @@ void OMPClauseWriter::VisitOMPIsDevicePtrClause(OMPIsDevicePtrClause *C) {
}
}
+void OMPClauseWriter::VisitOMPHasDeviceAddrClause(OMPHasDeviceAddrClause *C) {
+ Record.push_back(C->varlist_size());
+ Record.push_back(C->getUniqueDeclarationsNum());
+ Record.push_back(C->getTotalComponentListNum());
+ Record.push_back(C->getTotalComponentsNum());
+ Record.AddSourceLocation(C->getLParenLoc());
+ for (auto *E : C->varlists())
+ Record.AddStmt(E);
+ for (auto *D : C->all_decls())
+ Record.AddDeclRef(D);
+ for (auto N : C->all_num_lists())
+ Record.push_back(N);
+ for (auto N : C->all_lists_sizes())
+ Record.push_back(N);
+ for (auto &M : C->all_components()) {
+ Record.AddStmt(M.getAssociatedExpression());
+ Record.AddDeclRef(M.getAssociatedDeclaration());
+ }
+}
+
void OMPClauseWriter::VisitOMPUnifiedAddressClause(OMPUnifiedAddressClause *) {}
void OMPClauseWriter::VisitOMPUnifiedSharedMemoryClause(
@@ -6643,6 +7192,23 @@ void OMPClauseWriter::VisitOMPAtomicDefaultMemOrderClause(
Record.AddSourceLocation(C->getAtomicDefaultMemOrderKindKwLoc());
}
+void OMPClauseWriter::VisitOMPAtClause(OMPAtClause *C) {
+ Record.push_back(C->getAtKind());
+ Record.AddSourceLocation(C->getLParenLoc());
+ Record.AddSourceLocation(C->getAtKindKwLoc());
+}
+
+void OMPClauseWriter::VisitOMPSeverityClause(OMPSeverityClause *C) {
+ Record.push_back(C->getSeverityKind());
+ Record.AddSourceLocation(C->getLParenLoc());
+ Record.AddSourceLocation(C->getSeverityKindKwLoc());
+}
+
+void OMPClauseWriter::VisitOMPMessageClause(OMPMessageClause *C) {
+ Record.AddStmt(C->getMessageString());
+ Record.AddSourceLocation(C->getLParenLoc());
+}
+
void OMPClauseWriter::VisitOMPNontemporalClause(OMPNontemporalClause *C) {
Record.push_back(C->varlist_size());
Record.AddSourceLocation(C->getLParenLoc());
@@ -6668,8 +7234,10 @@ void OMPClauseWriter::VisitOMPExclusiveClause(OMPExclusiveClause *C) {
void OMPClauseWriter::VisitOMPOrderClause(OMPOrderClause *C) {
Record.writeEnum(C->getKind());
+ Record.writeEnum(C->getModifier());
Record.AddSourceLocation(C->getLParenLoc());
Record.AddSourceLocation(C->getKindKwLoc());
+ Record.AddSourceLocation(C->getModifierKwLoc());
}
void OMPClauseWriter::VisitOMPUsesAllocatorsClause(OMPUsesAllocatorsClause *C) {
@@ -6693,6 +7261,40 @@ void OMPClauseWriter::VisitOMPAffinityClause(OMPAffinityClause *C) {
Record.AddStmt(E);
}
+void OMPClauseWriter::VisitOMPBindClause(OMPBindClause *C) {
+ Record.writeEnum(C->getBindKind());
+ Record.AddSourceLocation(C->getLParenLoc());
+ Record.AddSourceLocation(C->getBindKindLoc());
+}
+
+void OMPClauseWriter::VisitOMPXDynCGroupMemClause(OMPXDynCGroupMemClause *C) {
+ VisitOMPClauseWithPreInit(C);
+ Record.AddStmt(C->getSize());
+ Record.AddSourceLocation(C->getLParenLoc());
+}
+
+void OMPClauseWriter::VisitOMPDoacrossClause(OMPDoacrossClause *C) {
+ Record.push_back(C->varlist_size());
+ Record.push_back(C->getNumLoops());
+ Record.AddSourceLocation(C->getLParenLoc());
+ Record.push_back(C->getDependenceType());
+ Record.AddSourceLocation(C->getDependenceLoc());
+ Record.AddSourceLocation(C->getColonLoc());
+ for (auto *VE : C->varlists())
+ Record.AddStmt(VE);
+ for (unsigned I = 0, E = C->getNumLoops(); I < E; ++I)
+ Record.AddStmt(C->getLoopData(I));
+}
+
+void OMPClauseWriter::VisitOMPXAttributeClause(OMPXAttributeClause *C) {
+ Record.AddAttributes(C->getAttrs());
+ Record.AddSourceLocation(C->getBeginLoc());
+ Record.AddSourceLocation(C->getLParenLoc());
+ Record.AddSourceLocation(C->getEndLoc());
+}
+
+void OMPClauseWriter::VisitOMPXBareClause(OMPXBareClause *C) {}
+
void ASTRecordWriter::writeOMPTraitInfo(const OMPTraitInfo *TI) {
writeUInt32(TI->Sets.size());
for (const auto &Set : TI->Sets) {
diff --git a/contrib/llvm-project/clang/lib/Serialization/ASTWriterDecl.cpp b/contrib/llvm-project/clang/lib/Serialization/ASTWriterDecl.cpp
index e9315f67d553..42583c09f009 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ASTWriterDecl.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/ASTWriterDecl.cpp
@@ -23,6 +23,7 @@
#include "clang/Serialization/ASTRecordWriter.h"
#include "llvm/Bitstream/BitstreamWriter.h"
#include "llvm/Support/ErrorHandling.h"
+#include <optional>
using namespace clang;
using namespace serialization;
@@ -80,8 +81,6 @@ namespace clang {
void VisitVarTemplateSpecializationDecl(VarTemplateSpecializationDecl *D);
void VisitVarTemplatePartialSpecializationDecl(
VarTemplatePartialSpecializationDecl *D);
- void VisitClassScopeFunctionSpecializationDecl(
- ClassScopeFunctionSpecializationDecl *D);
void VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D);
void VisitValueDecl(ValueDecl *D);
void VisitEnumConstantDecl(EnumConstantDecl *D);
@@ -96,6 +95,7 @@ namespace clang {
void VisitFieldDecl(FieldDecl *D);
void VisitMSPropertyDecl(MSPropertyDecl *D);
void VisitMSGuidDecl(MSGuidDecl *D);
+ void VisitUnnamedGlobalConstantDecl(UnnamedGlobalConstantDecl *D);
void VisitTemplateParamObjectDecl(TemplateParamObjectDecl *D);
void VisitIndirectFieldDecl(IndirectFieldDecl *D);
void VisitVarDecl(VarDecl *D);
@@ -106,6 +106,8 @@ namespace clang {
void VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D);
void VisitTemplateDecl(TemplateDecl *D);
void VisitConceptDecl(ConceptDecl *D);
+ void VisitImplicitConceptSpecializationDecl(
+ ImplicitConceptSpecializationDecl *D);
void VisitRequiresExprBodyDecl(RequiresExprBodyDecl *D);
void VisitRedeclarableTemplateDecl(RedeclarableTemplateDecl *D);
void VisitClassTemplateDecl(ClassTemplateDecl *D);
@@ -121,6 +123,7 @@ namespace clang {
void VisitLinkageSpecDecl(LinkageSpecDecl *D);
void VisitExportDecl(ExportDecl *D);
void VisitFileScopeAsmDecl(FileScopeAsmDecl *D);
+ void VisitTopLevelStmtDecl(TopLevelStmtDecl *D);
void VisitImportDecl(ImportDecl *D);
void VisitAccessSpecDecl(AccessSpecDecl *D);
void VisitFriendDecl(FriendDecl *D);
@@ -130,10 +133,9 @@ namespace clang {
void VisitCapturedDecl(CapturedDecl *D);
void VisitEmptyDecl(EmptyDecl *D);
void VisitLifetimeExtendedTemporaryDecl(LifetimeExtendedTemporaryDecl *D);
-
void VisitDeclContext(DeclContext *DC);
template <typename T> void VisitRedeclarable(Redeclarable<T> *D);
-
+ void VisitHLSLBufferDecl(HLSLBufferDecl *D);
// FIXME: Put in the same order is DeclNodes.td?
void VisitObjCMethodDecl(ObjCMethodDecl *D);
@@ -166,7 +168,7 @@ namespace clang {
}
Record.push_back(typeParams->size());
- for (auto typeParam : *typeParams) {
+ for (auto *typeParam : *typeParams) {
Record.AddDeclRef(typeParam);
}
Record.AddSourceLocation(typeParams->getLAngleLoc());
@@ -202,7 +204,7 @@ namespace clang {
return Common->PartialSpecializations;
}
ArrayRef<Decl> getPartialSpecializations(FunctionTemplateDecl::Common *) {
- return None;
+ return std::nullopt;
}
template<typename DeclTy>
@@ -220,7 +222,7 @@ namespace clang {
ArrayRef<DeclID> LazySpecializations;
if (auto *LS = Common->LazySpecializations)
- LazySpecializations = llvm::makeArrayRef(LS + 1, LS[0]);
+ LazySpecializations = llvm::ArrayRef(LS + 1, LS[0]);
// Add a slot to the record for the number of specializations.
unsigned I = Record.size();
@@ -274,7 +276,7 @@ void ASTDeclWriter::Visit(Decl *D) {
// Source locations require array (variable-length) abbreviations. The
// abbreviation infrastructure requires that arrays are encoded last, so
// we handle it here in the case of those classes derived from DeclaratorDecl
- if (DeclaratorDecl *DD = dyn_cast<DeclaratorDecl>(D)) {
+ if (auto *DD = dyn_cast<DeclaratorDecl>(D)) {
if (auto *TInfo = DD->getTypeSourceInfo())
Record.AddTypeLoc(TInfo->getTypeLoc());
}
@@ -282,35 +284,71 @@ void ASTDeclWriter::Visit(Decl *D) {
// Handle FunctionDecl's body here and write it after all other Stmts/Exprs
// have been written. We want it last because we will not read it back when
// retrieving it from the AST, we'll just lazily set the offset.
- if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (auto *FD = dyn_cast<FunctionDecl>(D)) {
Record.push_back(FD->doesThisDeclarationHaveABody());
if (FD->doesThisDeclarationHaveABody())
Record.AddFunctionDefinition(FD);
}
+ // Similar to FunctionDecls, handle VarDecl's initializer here and write it
+ // after all other Stmts/Exprs. We will not read the initializer until after
+ // we have finished recursive deserialization, because it can recursively
+ // refer back to the variable.
+ if (auto *VD = dyn_cast<VarDecl>(D)) {
+ Record.AddVarDeclInit(VD);
+ }
+
+ // And similarly for FieldDecls. We already serialized whether there is a
+ // default member initializer.
+ if (auto *FD = dyn_cast<FieldDecl>(D)) {
+ if (FD->hasInClassInitializer()) {
+ if (Expr *Init = FD->getInClassInitializer()) {
+ Record.push_back(1);
+ Record.AddStmt(Init);
+ } else {
+ Record.push_back(0);
+ // Initializer has not been instantiated yet.
+ }
+ }
+ }
+
// If this declaration is also a DeclContext, write blocks for the
// declarations that lexically stored inside its context and those
// declarations that are visible from its context.
- if (DeclContext *DC = dyn_cast<DeclContext>(D))
+ if (auto *DC = dyn_cast<DeclContext>(D))
VisitDeclContext(DC);
}
void ASTDeclWriter::VisitDecl(Decl *D) {
+ BitsPacker DeclBits;
+
+ // The order matters here. It will be better to put the bit with higher
+ // probability to be 0 in the end of the bits.
+ //
+ // Since we're using VBR6 format to store it.
+ // It will be pretty effient if all the higher bits are 0.
+ // For example, if we need to pack 8 bits into a value and the stored value
+ // is 0xf0, the actual stored value will be 0b000111'110000, which takes 12
+ // bits actually. However, if we changed the order to be 0x0f, then we can
+ // store it as 0b001111, which takes 6 bits only now.
+ DeclBits.addBits((uint64_t)D->getModuleOwnershipKind(), /*BitWidth=*/3);
+ DeclBits.addBit(D->isReferenced());
+ DeclBits.addBit(D->isUsed(false));
+ DeclBits.addBits(D->getAccess(), /*BitWidth=*/2);
+ DeclBits.addBit(D->isImplicit());
+ DeclBits.addBit(D->getDeclContext() != D->getLexicalDeclContext());
+ DeclBits.addBit(D->hasAttrs());
+ DeclBits.addBit(D->isTopLevelDeclInObjCContainer());
+ DeclBits.addBit(D->isInvalidDecl());
+ Record.push_back(DeclBits);
+
Record.AddDeclRef(cast_or_null<Decl>(D->getDeclContext()));
if (D->getDeclContext() != D->getLexicalDeclContext())
Record.AddDeclRef(cast_or_null<Decl>(D->getLexicalDeclContext()));
- else
- Record.push_back(0);
- Record.push_back(D->isInvalidDecl());
- Record.push_back(D->hasAttrs());
+
if (D->hasAttrs())
Record.AddAttributes(D->getAttrs());
- Record.push_back(D->isImplicit());
- Record.push_back(D->isUsed(false));
- Record.push_back(D->isReferenced());
- Record.push_back(D->isTopLevelDeclInObjCContainer());
- Record.push_back(D->getAccess());
- Record.push_back(D->isModulePrivate());
+
Record.push_back(Writer.getSubmoduleID(D->getOwningModule()));
// If this declaration injected a name into a context different from its
@@ -407,41 +445,59 @@ void ASTDeclWriter::VisitTypeAliasDecl(TypeAliasDecl *D) {
}
void ASTDeclWriter::VisitTagDecl(TagDecl *D) {
+ static_assert(DeclContext::NumTagDeclBits == 23,
+ "You need to update the serializer after you change the "
+ "TagDeclBits");
+
VisitRedeclarable(D);
VisitTypeDecl(D);
Record.push_back(D->getIdentifierNamespace());
- Record.push_back((unsigned)D->getTagKind()); // FIXME: stable encoding
- if (!isa<CXXRecordDecl>(D))
- Record.push_back(D->isCompleteDefinition());
- Record.push_back(D->isEmbeddedInDeclarator());
- Record.push_back(D->isFreeStanding());
- Record.push_back(D->isCompleteDefinitionRequired());
+
+ BitsPacker TagDeclBits;
+ TagDeclBits.addBits(llvm::to_underlying(D->getTagKind()), /*BitWidth=*/3);
+ TagDeclBits.addBit(!isa<CXXRecordDecl>(D) ? D->isCompleteDefinition() : 0);
+ TagDeclBits.addBit(D->isEmbeddedInDeclarator());
+ TagDeclBits.addBit(D->isFreeStanding());
+ TagDeclBits.addBit(D->isCompleteDefinitionRequired());
+ TagDeclBits.addBits(
+ D->hasExtInfo() ? 1 : (D->getTypedefNameForAnonDecl() ? 2 : 0),
+ /*BitWidth=*/2);
+ Record.push_back(TagDeclBits);
+
Record.AddSourceRange(D->getBraceRange());
if (D->hasExtInfo()) {
- Record.push_back(1);
Record.AddQualifierInfo(*D->getExtInfo());
} else if (auto *TD = D->getTypedefNameForAnonDecl()) {
- Record.push_back(2);
Record.AddDeclRef(TD);
Record.AddIdentifierRef(TD->getDeclName().getAsIdentifierInfo());
- } else {
- Record.push_back(0);
}
}
void ASTDeclWriter::VisitEnumDecl(EnumDecl *D) {
+ static_assert(DeclContext::NumEnumDeclBits == 43,
+ "You need to update the serializer after you change the "
+ "EnumDeclBits");
+
VisitTagDecl(D);
Record.AddTypeSourceInfo(D->getIntegerTypeSourceInfo());
if (!D->getIntegerTypeSourceInfo())
Record.AddTypeRef(D->getIntegerType());
Record.AddTypeRef(D->getPromotionType());
- Record.push_back(D->getNumPositiveBits());
- Record.push_back(D->getNumNegativeBits());
- Record.push_back(D->isScoped());
- Record.push_back(D->isScopedUsingClassTag());
- Record.push_back(D->isFixed());
- Record.push_back(D->getODRHash());
+
+ BitsPacker EnumDeclBits;
+ EnumDeclBits.addBits(D->getNumPositiveBits(), /*BitWidth=*/8);
+ EnumDeclBits.addBits(D->getNumNegativeBits(), /*BitWidth=*/8);
+ bool ShouldSkipCheckingODR = D->shouldSkipCheckingODR();
+ EnumDeclBits.addBit(ShouldSkipCheckingODR);
+ EnumDeclBits.addBit(D->isScoped());
+ EnumDeclBits.addBit(D->isScopedUsingClassTag());
+ EnumDeclBits.addBit(D->isFixed());
+ Record.push_back(EnumDeclBits);
+
+ // We only perform ODR checks for decls not in GMF.
+ if (!ShouldSkipCheckingODR)
+ Record.push_back(D->getODRHash());
if (MemberSpecializationInfo *MemberInfo = D->getMemberSpecializationInfo()) {
Record.AddDeclRef(MemberInfo->getInstantiatedFrom());
@@ -451,22 +507,14 @@ void ASTDeclWriter::VisitEnumDecl(EnumDecl *D) {
Record.AddDeclRef(nullptr);
}
- if (D->getDeclContext() == D->getLexicalDeclContext() &&
- !D->hasAttrs() &&
- !D->isImplicit() &&
- !D->isUsed(false) &&
- !D->hasExtInfo() &&
+ if (D->getDeclContext() == D->getLexicalDeclContext() && !D->hasAttrs() &&
+ !D->isInvalidDecl() && !D->isImplicit() && !D->hasExtInfo() &&
!D->getTypedefNameForAnonDecl() &&
D->getFirstDecl() == D->getMostRecentDecl() &&
- !D->isInvalidDecl() &&
- !D->isReferenced() &&
!D->isTopLevelDeclInObjCContainer() &&
- D->getAccess() == AS_none &&
- !D->isModulePrivate() &&
!CXXRecordDecl::classofKind(D->getKind()) &&
- !D->getIntegerTypeSourceInfo() &&
- !D->getMemberSpecializationInfo() &&
- !needsAnonymousDeclarationNumber(D) &&
+ !D->getIntegerTypeSourceInfo() && !D->getMemberSpecializationInfo() &&
+ !needsAnonymousDeclarationNumber(D) && !D->shouldSkipCheckingODR() &&
D->getDeclName().getNameKind() == DeclarationName::Identifier)
AbbrevToUse = Writer.getDeclEnumAbbrev();
@@ -474,32 +522,37 @@ void ASTDeclWriter::VisitEnumDecl(EnumDecl *D) {
}
void ASTDeclWriter::VisitRecordDecl(RecordDecl *D) {
+ static_assert(DeclContext::NumRecordDeclBits == 64,
+ "You need to update the serializer after you change the "
+ "RecordDeclBits");
+
VisitTagDecl(D);
- Record.push_back(D->hasFlexibleArrayMember());
- Record.push_back(D->isAnonymousStructOrUnion());
- Record.push_back(D->hasObjectMember());
- Record.push_back(D->hasVolatileMember());
- Record.push_back(D->isNonTrivialToPrimitiveDefaultInitialize());
- Record.push_back(D->isNonTrivialToPrimitiveCopy());
- Record.push_back(D->isNonTrivialToPrimitiveDestroy());
- Record.push_back(D->hasNonTrivialToPrimitiveDefaultInitializeCUnion());
- Record.push_back(D->hasNonTrivialToPrimitiveDestructCUnion());
- Record.push_back(D->hasNonTrivialToPrimitiveCopyCUnion());
- Record.push_back(D->isParamDestroyedInCallee());
- Record.push_back(D->getArgPassingRestrictions());
- if (D->getDeclContext() == D->getLexicalDeclContext() &&
- !D->hasAttrs() &&
- !D->isImplicit() &&
- !D->isUsed(false) &&
- !D->hasExtInfo() &&
+ BitsPacker RecordDeclBits;
+ RecordDeclBits.addBit(D->hasFlexibleArrayMember());
+ RecordDeclBits.addBit(D->isAnonymousStructOrUnion());
+ RecordDeclBits.addBit(D->hasObjectMember());
+ RecordDeclBits.addBit(D->hasVolatileMember());
+ RecordDeclBits.addBit(D->isNonTrivialToPrimitiveDefaultInitialize());
+ RecordDeclBits.addBit(D->isNonTrivialToPrimitiveCopy());
+ RecordDeclBits.addBit(D->isNonTrivialToPrimitiveDestroy());
+ RecordDeclBits.addBit(D->hasNonTrivialToPrimitiveDefaultInitializeCUnion());
+ RecordDeclBits.addBit(D->hasNonTrivialToPrimitiveDestructCUnion());
+ RecordDeclBits.addBit(D->hasNonTrivialToPrimitiveCopyCUnion());
+ RecordDeclBits.addBit(D->isParamDestroyedInCallee());
+ RecordDeclBits.addBits(llvm::to_underlying(D->getArgPassingRestrictions()), 2);
+ Record.push_back(RecordDeclBits);
+
+ // Only compute this for C/Objective-C, in C++ this is computed as part
+ // of CXXRecordDecl.
+ if (!isa<CXXRecordDecl>(D))
+ Record.push_back(D->getODRHash());
+
+ if (D->getDeclContext() == D->getLexicalDeclContext() && !D->hasAttrs() &&
+ !D->isImplicit() && !D->isInvalidDecl() && !D->hasExtInfo() &&
!D->getTypedefNameForAnonDecl() &&
D->getFirstDecl() == D->getMostRecentDecl() &&
- !D->isInvalidDecl() &&
- !D->isReferenced() &&
!D->isTopLevelDeclInObjCContainer() &&
- D->getAccess() == AS_none &&
- !D->isModulePrivate() &&
!CXXRecordDecl::classofKind(D->getKind()) &&
!needsAnonymousDeclarationNumber(D) &&
D->getDeclName().getNameKind() == DeclarationName::Identifier)
@@ -538,52 +591,19 @@ void ASTDeclWriter::VisitDeclaratorDecl(DeclaratorDecl *D) {
}
void ASTDeclWriter::VisitFunctionDecl(FunctionDecl *D) {
- VisitRedeclarable(D);
- VisitDeclaratorDecl(D);
- Record.AddDeclarationNameLoc(D->DNLoc, D->getDeclName());
- Record.push_back(D->getIdentifierNamespace());
-
- // FunctionDecl's body is handled last at ASTWriterDecl::Visit,
- // after everything else is written.
- Record.push_back(static_cast<int>(D->getStorageClass())); // FIXME: stable encoding
- Record.push_back(D->isInlineSpecified());
- Record.push_back(D->isInlined());
- Record.push_back(D->isVirtualAsWritten());
- Record.push_back(D->isPure());
- Record.push_back(D->hasInheritedPrototype());
- Record.push_back(D->hasWrittenPrototype());
- Record.push_back(D->isDeletedBit());
- Record.push_back(D->isTrivial());
- Record.push_back(D->isTrivialForCall());
- Record.push_back(D->isDefaulted());
- Record.push_back(D->isExplicitlyDefaulted());
- Record.push_back(D->hasImplicitReturnZero());
- Record.push_back(static_cast<uint64_t>(D->getConstexprKind()));
- Record.push_back(D->usesSEHTry());
- Record.push_back(D->hasSkippedBody());
- Record.push_back(D->isMultiVersion());
- Record.push_back(D->isLateTemplateParsed());
- Record.push_back(D->getLinkageInternal());
- Record.AddSourceLocation(D->getEndLoc());
-
- Record.push_back(D->getODRHash());
+ static_assert(DeclContext::NumFunctionDeclBits == 44,
+ "You need to update the serializer after you change the "
+ "FunctionDeclBits");
- if (D->isDefaulted()) {
- if (auto *FDI = D->getDefaultedFunctionInfo()) {
- Record.push_back(FDI->getUnqualifiedLookups().size());
- for (DeclAccessPair P : FDI->getUnqualifiedLookups()) {
- Record.AddDeclRef(P.getDecl());
- Record.push_back(P.getAccess());
- }
- } else {
- Record.push_back(0);
- }
- }
+ VisitRedeclarable(D);
Record.push_back(D->getTemplatedKind());
switch (D->getTemplatedKind()) {
case FunctionDecl::TK_NonTemplate:
break;
+ case FunctionDecl::TK_DependentNonTemplate:
+ Record.AddDeclRef(D->getInstantiatedFromDecl());
+ break;
case FunctionDecl::TK_FunctionTemplate:
Record.AddDeclRef(D->getDescribedFunctionTemplate());
break;
@@ -608,15 +628,9 @@ void ASTDeclWriter::VisitFunctionDecl(FunctionDecl *D) {
// Template args as written.
Record.push_back(FTSInfo->TemplateArgumentsAsWritten != nullptr);
- if (FTSInfo->TemplateArgumentsAsWritten) {
- Record.push_back(FTSInfo->TemplateArgumentsAsWritten->NumTemplateArgs);
- for (int i=0, e = FTSInfo->TemplateArgumentsAsWritten->NumTemplateArgs;
- i!=e; ++i)
- Record.AddTemplateArgumentLoc(
- (*FTSInfo->TemplateArgumentsAsWritten)[i]);
- Record.AddSourceLocation(FTSInfo->TemplateArgumentsAsWritten->LAngleLoc);
- Record.AddSourceLocation(FTSInfo->TemplateArgumentsAsWritten->RAngleLoc);
- }
+ if (FTSInfo->TemplateArgumentsAsWritten)
+ Record.AddASTTemplateArgumentListInfo(
+ FTSInfo->TemplateArgumentsAsWritten);
Record.AddSourceLocation(FTSInfo->getPointOfInstantiation());
@@ -641,23 +655,76 @@ void ASTDeclWriter::VisitFunctionDecl(FunctionDecl *D) {
DependentFunctionTemplateSpecializationInfo *
DFTSInfo = D->getDependentSpecializationInfo();
- // Templates.
- Record.push_back(DFTSInfo->getNumTemplates());
- for (int i=0, e = DFTSInfo->getNumTemplates(); i != e; ++i)
- Record.AddDeclRef(DFTSInfo->getTemplate(i));
+ // Candidates.
+ Record.push_back(DFTSInfo->getCandidates().size());
+ for (FunctionTemplateDecl *FTD : DFTSInfo->getCandidates())
+ Record.AddDeclRef(FTD);
// Templates args.
- Record.push_back(DFTSInfo->getNumTemplateArgs());
- for (int i=0, e = DFTSInfo->getNumTemplateArgs(); i != e; ++i)
- Record.AddTemplateArgumentLoc(DFTSInfo->getTemplateArg(i));
- Record.AddSourceLocation(DFTSInfo->getLAngleLoc());
- Record.AddSourceLocation(DFTSInfo->getRAngleLoc());
+ Record.push_back(DFTSInfo->TemplateArgumentsAsWritten != nullptr);
+ if (DFTSInfo->TemplateArgumentsAsWritten)
+ Record.AddASTTemplateArgumentListInfo(
+ DFTSInfo->TemplateArgumentsAsWritten);
break;
}
}
+ VisitDeclaratorDecl(D);
+ Record.AddDeclarationNameLoc(D->DNLoc, D->getDeclName());
+ Record.push_back(D->getIdentifierNamespace());
+
+ // The order matters here. It will be better to put the bit with higher
+ // probability to be 0 in the end of the bits. See the comments in VisitDecl
+ // for details.
+ BitsPacker FunctionDeclBits;
+ // FIXME: stable encoding
+ FunctionDeclBits.addBits(llvm::to_underlying(D->getLinkageInternal()), 3);
+ FunctionDeclBits.addBits((uint32_t)D->getStorageClass(), /*BitWidth=*/3);
+ bool ShouldSkipCheckingODR = D->shouldSkipCheckingODR();
+ FunctionDeclBits.addBit(ShouldSkipCheckingODR);
+ FunctionDeclBits.addBit(D->isInlineSpecified());
+ FunctionDeclBits.addBit(D->isInlined());
+ FunctionDeclBits.addBit(D->hasSkippedBody());
+ FunctionDeclBits.addBit(D->isVirtualAsWritten());
+ FunctionDeclBits.addBit(D->isPureVirtual());
+ FunctionDeclBits.addBit(D->hasInheritedPrototype());
+ FunctionDeclBits.addBit(D->hasWrittenPrototype());
+ FunctionDeclBits.addBit(D->isDeletedBit());
+ FunctionDeclBits.addBit(D->isTrivial());
+ FunctionDeclBits.addBit(D->isTrivialForCall());
+ FunctionDeclBits.addBit(D->isDefaulted());
+ FunctionDeclBits.addBit(D->isExplicitlyDefaulted());
+ FunctionDeclBits.addBit(D->isIneligibleOrNotSelected());
+ FunctionDeclBits.addBits((uint64_t)(D->getConstexprKind()), /*BitWidth=*/2);
+ FunctionDeclBits.addBit(D->hasImplicitReturnZero());
+ FunctionDeclBits.addBit(D->isMultiVersion());
+ FunctionDeclBits.addBit(D->isLateTemplateParsed());
+ FunctionDeclBits.addBit(D->FriendConstraintRefersToEnclosingTemplate());
+ FunctionDeclBits.addBit(D->usesSEHTry());
+ Record.push_back(FunctionDeclBits);
+
+ Record.AddSourceLocation(D->getEndLoc());
+ if (D->isExplicitlyDefaulted())
+ Record.AddSourceLocation(D->getDefaultLoc());
+
+ // We only perform ODR checks for decls not in GMF.
+ if (!ShouldSkipCheckingODR)
+ Record.push_back(D->getODRHash());
+
+ if (D->isDefaulted()) {
+ if (auto *FDI = D->getDefaultedFunctionInfo()) {
+ Record.push_back(FDI->getUnqualifiedLookups().size());
+ for (DeclAccessPair P : FDI->getUnqualifiedLookups()) {
+ Record.AddDeclRef(P.getDecl());
+ Record.push_back(P.getAccess());
+ }
+ } else {
+ Record.push_back(0);
+ }
+ }
+
Record.push_back(D->param_size());
- for (auto P : D->parameters())
+ for (auto *P : D->parameters())
Record.AddDeclRef(P);
Code = serialization::DECL_FUNCTION;
}
@@ -676,11 +743,15 @@ void ASTDeclWriter::VisitCXXDeductionGuideDecl(CXXDeductionGuideDecl *D) {
addExplicitSpecifier(D->getExplicitSpecifier(), Record);
Record.AddDeclRef(D->Ctor);
VisitFunctionDecl(D);
- Record.push_back(D->isCopyDeductionCandidate());
+ Record.push_back(static_cast<unsigned char>(D->getDeductionCandidateKind()));
Code = serialization::DECL_CXX_DEDUCTION_GUIDE;
}
void ASTDeclWriter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
+ static_assert(DeclContext::NumObjCMethodDeclBits == 37,
+ "You need to update the serializer after you change the "
+ "ObjCMethodDeclBits");
+
VisitNamedDecl(D);
// FIXME: convert to LazyStmtPtr?
// Unlike C/C++, method bodies will never be in header files.
@@ -707,7 +778,7 @@ void ASTDeclWriter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
}
// FIXME: stable encoding for @required/@optional
- Record.push_back(D->getImplementationControl());
+ Record.push_back(llvm::to_underlying(D->getImplementationControl()));
// FIXME: stable encoding for in/out/inout/bycopy/byref/oneway/nullability
Record.push_back(D->getObjCDeclQualifier());
Record.push_back(D->hasRelatedResultType());
@@ -739,6 +810,10 @@ void ASTDeclWriter::VisitObjCTypeParamDecl(ObjCTypeParamDecl *D) {
}
void ASTDeclWriter::VisitObjCContainerDecl(ObjCContainerDecl *D) {
+ static_assert(DeclContext::NumObjCContainerDeclBits == 64,
+ "You need to update the serializer after you change the "
+ "ObjCContainerDeclBits");
+
VisitNamedDecl(D);
Record.AddSourceLocation(D->getAtStartLoc());
Record.AddSourceRange(D->getAtEndRange());
@@ -759,6 +834,7 @@ void ASTDeclWriter::VisitObjCInterfaceDecl(ObjCInterfaceDecl *D) {
Record.AddTypeSourceInfo(D->getSuperClassTInfo());
Record.AddSourceLocation(D->getEndOfDefinitionLoc());
Record.push_back(Data.HasDesignatedInitializers);
+ Record.push_back(D->getODRHash());
// Write out the protocols that are directly referenced by the @interface.
Record.push_back(Data.ReferencedProtocols.size());
@@ -821,6 +897,7 @@ void ASTDeclWriter::VisitObjCProtocolDecl(ObjCProtocolDecl *D) {
Record.AddDeclRef(I);
for (const auto &PL : D->protocol_locs())
Record.AddSourceLocation(PL);
+ Record.push_back(D->getODRHash());
}
Code = serialization::DECL_OBJC_PROTOCOL;
@@ -896,7 +973,7 @@ void ASTDeclWriter::VisitObjCImplementationDecl(ObjCImplementationDecl *D) {
Record.push_back(D->NumIvarInitializers);
if (D->NumIvarInitializers)
Record.AddCXXCtorInitializers(
- llvm::makeArrayRef(D->init_begin(), D->init_end()));
+ llvm::ArrayRef(D->init_begin(), D->init_end()));
Code = serialization::DECL_OBJC_IMPLEMENTATION;
}
@@ -917,14 +994,11 @@ void ASTDeclWriter::VisitFieldDecl(FieldDecl *D) {
VisitDeclaratorDecl(D);
Record.push_back(D->isMutable());
- FieldDecl::InitStorageKind ISK = D->InitStorage.getInt();
- Record.push_back(ISK);
- if (ISK == FieldDecl::ISK_CapturedVLAType)
+ Record.push_back((D->StorageKind << 1) | D->BitField);
+ if (D->StorageKind == FieldDecl::ISK_CapturedVLAType)
Record.AddTypeRef(QualType(D->getCapturedVLAType(), 0));
- else if (ISK)
- Record.AddStmt(D->getInClassInitializer());
-
- Record.AddStmt(D->getBitWidth());
+ else if (D->BitField)
+ Record.AddStmt(D->getBitWidth());
if (!D->getDeclName())
Record.AddDeclRef(Context.getInstantiatedFromUnnamedFieldDecl(D));
@@ -962,11 +1036,17 @@ void ASTDeclWriter::VisitMSGuidDecl(MSGuidDecl *D) {
Record.push_back(Parts.Part1);
Record.push_back(Parts.Part2);
Record.push_back(Parts.Part3);
- for (auto C : Parts.Part4And5)
- Record.push_back(C);
+ Record.append(std::begin(Parts.Part4And5), std::end(Parts.Part4And5));
Code = serialization::DECL_MS_GUID;
}
+void ASTDeclWriter::VisitUnnamedGlobalConstantDecl(
+ UnnamedGlobalConstantDecl *D) {
+ VisitValueDecl(D);
+ Record.AddAPValue(D->getValue());
+ Code = serialization::DECL_UNNAMED_GLOBAL_CONSTANT;
+}
+
void ASTDeclWriter::VisitTemplateParamObjectDecl(TemplateParamObjectDecl *D) {
VisitValueDecl(D);
Record.AddAPValue(D->getValue());
@@ -985,58 +1065,72 @@ void ASTDeclWriter::VisitIndirectFieldDecl(IndirectFieldDecl *D) {
void ASTDeclWriter::VisitVarDecl(VarDecl *D) {
VisitRedeclarable(D);
VisitDeclaratorDecl(D);
- Record.push_back(D->getStorageClass());
- Record.push_back(D->getTSCSpec());
- Record.push_back(D->getInitStyle());
- Record.push_back(D->isARCPseudoStrong());
+
+ // The order matters here. It will be better to put the bit with higher
+ // probability to be 0 in the end of the bits. See the comments in VisitDecl
+ // for details.
+ BitsPacker VarDeclBits;
+ VarDeclBits.addBits(llvm::to_underlying(D->getLinkageInternal()),
+ /*BitWidth=*/3);
+
+ bool ModulesCodegen = false;
+ if (Writer.WritingModule && D->getStorageDuration() == SD_Static &&
+ !D->getDescribedVarTemplate()) {
+ // When building a C++20 module interface unit or a partition unit, a
+ // strong definition in the module interface is provided by the
+ // compilation of that unit, not by its users. (Inline variables are still
+ // emitted in module users.)
+ ModulesCodegen =
+ (Writer.WritingModule->isInterfaceOrPartition() ||
+ (D->hasAttr<DLLExportAttr>() &&
+ Writer.Context->getLangOpts().BuildingPCHWithObjectFile)) &&
+ Writer.Context->GetGVALinkageForVariable(D) >= GVA_StrongExternal;
+ }
+ VarDeclBits.addBit(ModulesCodegen);
+
+ VarDeclBits.addBits(D->getStorageClass(), /*BitWidth=*/3);
+ VarDeclBits.addBits(D->getTSCSpec(), /*BitWidth=*/2);
+ VarDeclBits.addBits(D->getInitStyle(), /*BitWidth=*/2);
+ VarDeclBits.addBit(D->isARCPseudoStrong());
+
+ bool HasDeducedType = false;
if (!isa<ParmVarDecl>(D)) {
- Record.push_back(D->isThisDeclarationADemotedDefinition());
- Record.push_back(D->isExceptionVariable());
- Record.push_back(D->isNRVOVariable());
- Record.push_back(D->isCXXForRangeDecl());
- Record.push_back(D->isObjCForDecl());
- Record.push_back(D->isInline());
- Record.push_back(D->isInlineSpecified());
- Record.push_back(D->isConstexpr());
- Record.push_back(D->isInitCapture());
- Record.push_back(D->isPreviousDeclInSameBlockScope());
+ VarDeclBits.addBit(D->isThisDeclarationADemotedDefinition());
+ VarDeclBits.addBit(D->isExceptionVariable());
+ VarDeclBits.addBit(D->isNRVOVariable());
+ VarDeclBits.addBit(D->isCXXForRangeDecl());
+
+ VarDeclBits.addBit(D->isInline());
+ VarDeclBits.addBit(D->isInlineSpecified());
+ VarDeclBits.addBit(D->isConstexpr());
+ VarDeclBits.addBit(D->isInitCapture());
+ VarDeclBits.addBit(D->isPreviousDeclInSameBlockScope());
+
+ VarDeclBits.addBit(D->isEscapingByref());
+ HasDeducedType = D->getType()->getContainedDeducedType();
+ VarDeclBits.addBit(HasDeducedType);
+
if (const auto *IPD = dyn_cast<ImplicitParamDecl>(D))
- Record.push_back(static_cast<unsigned>(IPD->getParameterKind()));
+ VarDeclBits.addBits(llvm::to_underlying(IPD->getParameterKind()),
+ /*Width=*/3);
else
- Record.push_back(0);
- Record.push_back(D->isEscapingByref());
+ VarDeclBits.addBits(0, /*Width=*/3);
+
+ VarDeclBits.addBit(D->isObjCForDecl());
}
- Record.push_back(D->getLinkageInternal());
- Record.AddVarDeclInit(D);
+ Record.push_back(VarDeclBits);
- if (D->hasAttr<BlocksAttr>() && D->getType()->getAsCXXRecordDecl()) {
+ if (ModulesCodegen)
+ Writer.ModularCodegenDecls.push_back(Writer.GetDeclRef(D));
+
+ if (D->hasAttr<BlocksAttr>()) {
BlockVarCopyInit Init = Writer.Context->getBlockVarCopyInit(D);
Record.AddStmt(Init.getCopyExpr());
if (Init.getCopyExpr())
Record.push_back(Init.canThrow());
}
- if (D->getStorageDuration() == SD_Static) {
- bool ModulesCodegen = false;
- if (Writer.WritingModule &&
- !D->getDescribedVarTemplate() && !D->getMemberSpecializationInfo() &&
- !isa<VarTemplateSpecializationDecl>(D)) {
- // When building a C++ Modules TS module interface unit, a strong
- // definition in the module interface is provided by the compilation of
- // that module interface unit, not by its users. (Inline variables are
- // still emitted in module users.)
- ModulesCodegen =
- (Writer.WritingModule->Kind == Module::ModuleInterfaceUnit ||
- (D->hasAttr<DLLExportAttr>() &&
- Writer.Context->getLangOpts().BuildingPCHWithObjectFile)) &&
- Writer.Context->GetGVALinkageForVariable(D) == GVA_StrongExternal;
- }
- Record.push_back(ModulesCodegen);
- if (ModulesCodegen)
- Writer.ModularCodegenDecls.push_back(Writer.GetDeclRef(D));
- }
-
enum {
VarNotTemplate = 0, VarTemplate, StaticDataMemberSpecialization
};
@@ -1053,28 +1147,17 @@ void ASTDeclWriter::VisitVarDecl(VarDecl *D) {
Record.push_back(VarNotTemplate);
}
- if (D->getDeclContext() == D->getLexicalDeclContext() &&
- !D->hasAttrs() &&
- !D->isImplicit() &&
- !D->isUsed(false) &&
- !D->isInvalidDecl() &&
- !D->isReferenced() &&
+ if (D->getDeclContext() == D->getLexicalDeclContext() && !D->hasAttrs() &&
!D->isTopLevelDeclInObjCContainer() &&
- D->getAccess() == AS_none &&
- !D->isModulePrivate() &&
!needsAnonymousDeclarationNumber(D) &&
D->getDeclName().getNameKind() == DeclarationName::Identifier &&
- !D->hasExtInfo() &&
- D->getFirstDecl() == D->getMostRecentDecl() &&
- D->getKind() == Decl::Var &&
- !D->isInline() &&
- !D->isConstexpr() &&
- !D->isInitCapture() &&
- !D->isPreviousDeclInSameBlockScope() &&
- !(D->hasAttr<BlocksAttr>() && D->getType()->getAsCXXRecordDecl()) &&
- !D->isEscapingByref() &&
- D->getStorageDuration() != SD_Static &&
- !D->getMemberSpecializationInfo())
+ !D->hasExtInfo() && D->getFirstDecl() == D->getMostRecentDecl() &&
+ D->getKind() == Decl::Var && !D->isInline() && !D->isConstexpr() &&
+ !D->isInitCapture() && !D->isPreviousDeclInSameBlockScope() &&
+ !D->isEscapingByref() && !HasDeducedType &&
+ D->getStorageDuration() != SD_Static && !D->getDescribedVarTemplate() &&
+ !D->getMemberSpecializationInfo() && !D->isObjCForDecl() &&
+ !isa<ImplicitParamDecl>(D) && !D->isEscapingByref())
AbbrevToUse = Writer.getDeclVarAbbrev();
Code = serialization::DECL_VAR;
@@ -1087,37 +1170,37 @@ void ASTDeclWriter::VisitImplicitParamDecl(ImplicitParamDecl *D) {
void ASTDeclWriter::VisitParmVarDecl(ParmVarDecl *D) {
VisitVarDecl(D);
- Record.push_back(D->isObjCMethodParameter());
- Record.push_back(D->getFunctionScopeDepth());
+
+ // See the implementation of `ParmVarDecl::getParameterIndex()`, which may
+ // exceed the size of the normal bitfield. So it may be better to not pack
+ // these bits.
Record.push_back(D->getFunctionScopeIndex());
- Record.push_back(D->getObjCDeclQualifier()); // FIXME: stable encoding
- Record.push_back(D->isKNRPromoted());
- Record.push_back(D->hasInheritedDefaultArg());
- Record.push_back(D->hasUninstantiatedDefaultArg());
+
+ BitsPacker ParmVarDeclBits;
+ ParmVarDeclBits.addBit(D->isObjCMethodParameter());
+ ParmVarDeclBits.addBits(D->getFunctionScopeDepth(), /*BitsWidth=*/7);
+ // FIXME: stable encoding
+ ParmVarDeclBits.addBits(D->getObjCDeclQualifier(), /*BitsWidth=*/7);
+ ParmVarDeclBits.addBit(D->isKNRPromoted());
+ ParmVarDeclBits.addBit(D->hasInheritedDefaultArg());
+ ParmVarDeclBits.addBit(D->hasUninstantiatedDefaultArg());
+ ParmVarDeclBits.addBit(D->getExplicitObjectParamThisLoc().isValid());
+ Record.push_back(ParmVarDeclBits);
+
if (D->hasUninstantiatedDefaultArg())
Record.AddStmt(D->getUninstantiatedDefaultArg());
+ if (D->getExplicitObjectParamThisLoc().isValid())
+ Record.AddSourceLocation(D->getExplicitObjectParamThisLoc());
Code = serialization::DECL_PARM_VAR;
// If the assumptions about the DECL_PARM_VAR abbrev are true, use it. Here
// we dynamically check for the properties that we optimize for, but don't
// know are true of all PARM_VAR_DECLs.
- if (D->getDeclContext() == D->getLexicalDeclContext() &&
- !D->hasAttrs() &&
- !D->hasExtInfo() &&
- !D->isImplicit() &&
- !D->isUsed(false) &&
- !D->isInvalidDecl() &&
- !D->isReferenced() &&
- D->getAccess() == AS_none &&
- !D->isModulePrivate() &&
- D->getStorageClass() == 0 &&
+ if (D->getDeclContext() == D->getLexicalDeclContext() && !D->hasAttrs() &&
+ !D->hasExtInfo() && D->getStorageClass() == 0 && !D->isInvalidDecl() &&
+ !D->isTopLevelDeclInObjCContainer() &&
D->getInitStyle() == VarDecl::CInit && // Can params have anything else?
- D->getFunctionScopeDepth() == 0 &&
- D->getObjCDeclQualifier() == 0 &&
- !D->isKNRPromoted() &&
- !D->hasInheritedDefaultArg() &&
- D->getInit() == nullptr &&
- !D->hasUninstantiatedDefaultArg()) // No default expr.
+ D->getInit() == nullptr) // No default expr.
AbbrevToUse = Writer.getDeclParmVarAbbrev();
// Check things we know are true of *every* PARM_VAR_DECL, which is more than
@@ -1155,6 +1238,12 @@ void ASTDeclWriter::VisitFileScopeAsmDecl(FileScopeAsmDecl *D) {
Code = serialization::DECL_FILE_SCOPE_ASM;
}
+void ASTDeclWriter::VisitTopLevelStmtDecl(TopLevelStmtDecl *D) {
+ VisitDecl(D);
+ Record.AddStmt(D->getStmt());
+ Code = serialization::DECL_TOP_LEVEL_STMT_DECL;
+}
+
void ASTDeclWriter::VisitEmptyDecl(EmptyDecl *D) {
VisitDecl(D);
Code = serialization::DECL_EMPTY;
@@ -1212,8 +1301,12 @@ void ASTDeclWriter::VisitCapturedDecl(CapturedDecl *CD) {
}
void ASTDeclWriter::VisitLinkageSpecDecl(LinkageSpecDecl *D) {
+ static_assert(DeclContext::NumLinkageSpecDeclBits == 17,
+ "You need to update the serializer after you change the"
+ "LinkageSpecDeclBits");
+
VisitDecl(D);
- Record.push_back(D->getLanguage());
+ Record.push_back(llvm::to_underlying(D->getLanguage()));
Record.AddSourceLocation(D->getExternLoc());
Record.AddSourceLocation(D->getRBraceLoc());
Code = serialization::DECL_LINKAGE_SPEC;
@@ -1235,7 +1328,12 @@ void ASTDeclWriter::VisitLabelDecl(LabelDecl *D) {
void ASTDeclWriter::VisitNamespaceDecl(NamespaceDecl *D) {
VisitRedeclarable(D);
VisitNamedDecl(D);
- Record.push_back(D->isInline());
+
+ BitsPacker NamespaceDeclBits;
+ NamespaceDeclBits.addBit(D->isInline());
+ NamespaceDeclBits.addBit(D->isNested());
+ Record.push_back(NamespaceDeclBits);
+
Record.AddSourceLocation(D->getBeginLoc());
Record.AddSourceLocation(D->getRBraceLoc());
@@ -1283,7 +1381,7 @@ void ASTDeclWriter::VisitUsingEnumDecl(UsingEnumDecl *D) {
VisitNamedDecl(D);
Record.AddSourceLocation(D->getUsingLoc());
Record.AddSourceLocation(D->getEnumLoc());
- Record.AddDeclRef(D->getEnumDecl());
+ Record.AddTypeSourceInfo(D->getEnumType());
Record.AddDeclRef(D->FirstUsingShadow.getPointer());
Record.AddDeclRef(Context.getInstantiatedFromUsingEnumDecl(D));
Code = serialization::DECL_USING_ENUM;
@@ -1305,6 +1403,13 @@ void ASTDeclWriter::VisitUsingShadowDecl(UsingShadowDecl *D) {
Record.push_back(D->getIdentifierNamespace());
Record.AddDeclRef(D->UsingOrNextShadow);
Record.AddDeclRef(Context.getInstantiatedFromUsingShadowDecl(D));
+
+ if (D->getDeclContext() == D->getLexicalDeclContext() &&
+ D->getFirstDecl() == D->getMostRecentDecl() && !D->hasAttrs() &&
+ !needsAnonymousDeclarationNumber(D) &&
+ D->getDeclName().getNameKind() == DeclarationName::Identifier)
+ AbbrevToUse = Writer.getDeclUsingShadowAbbrev();
+
Code = serialization::DECL_USING_SHADOW;
}
@@ -1355,7 +1460,10 @@ void ASTDeclWriter::VisitCXXRecordDecl(CXXRecordDecl *D) {
VisitRecordDecl(D);
enum {
- CXXRecNotTemplate = 0, CXXRecTemplate, CXXRecMemberSpecialization
+ CXXRecNotTemplate = 0,
+ CXXRecTemplate,
+ CXXRecMemberSpecialization,
+ CXXLambda
};
if (ClassTemplateDecl *TemplD = D->getDescribedClassTemplate()) {
Record.push_back(CXXRecTemplate);
@@ -1366,6 +1474,15 @@ void ASTDeclWriter::VisitCXXRecordDecl(CXXRecordDecl *D) {
Record.AddDeclRef(MSInfo->getInstantiatedFrom());
Record.push_back(MSInfo->getTemplateSpecializationKind());
Record.AddSourceLocation(MSInfo->getPointOfInstantiation());
+ } else if (D->isLambda()) {
+ // For a lambda, we need some information early for merging.
+ Record.push_back(CXXLambda);
+ if (auto *Context = D->getLambdaContextDecl()) {
+ Record.AddDeclRef(Context);
+ Record.push_back(D->getLambdaIndexInContext());
+ } else {
+ Record.push_back(0);
+ }
} else {
Record.push_back(CXXRecNotTemplate);
}
@@ -1394,20 +1511,45 @@ void ASTDeclWriter::VisitCXXMethodDecl(CXXMethodDecl *D) {
}
if (D->getDeclContext() == D->getLexicalDeclContext() &&
- D->getFirstDecl() == D->getMostRecentDecl() &&
- !D->isInvalidDecl() &&
- !D->hasAttrs() &&
- !D->isTopLevelDeclInObjCContainer() &&
+ D->getFirstDecl() == D->getMostRecentDecl() && !D->isInvalidDecl() &&
+ !D->hasAttrs() && !D->isTopLevelDeclInObjCContainer() &&
D->getDeclName().getNameKind() == DeclarationName::Identifier &&
- !D->hasExtInfo() &&
- !D->hasInheritedPrototype() &&
- D->hasWrittenPrototype())
- AbbrevToUse = Writer.getDeclCXXMethodAbbrev();
+ !D->shouldSkipCheckingODR() && !D->hasExtInfo() &&
+ !D->isExplicitlyDefaulted()) {
+ if (D->getTemplatedKind() == FunctionDecl::TK_NonTemplate ||
+ D->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate ||
+ D->getTemplatedKind() == FunctionDecl::TK_MemberSpecialization ||
+ D->getTemplatedKind() == FunctionDecl::TK_DependentNonTemplate)
+ AbbrevToUse = Writer.getDeclCXXMethodAbbrev(D->getTemplatedKind());
+ else if (D->getTemplatedKind() ==
+ FunctionDecl::TK_FunctionTemplateSpecialization) {
+ FunctionTemplateSpecializationInfo *FTSInfo =
+ D->getTemplateSpecializationInfo();
+
+ if (FTSInfo->TemplateArguments->size() == 1) {
+ const TemplateArgument &TA = FTSInfo->TemplateArguments->get(0);
+ if (TA.getKind() == TemplateArgument::Type &&
+ !FTSInfo->TemplateArgumentsAsWritten &&
+ !FTSInfo->getMemberSpecializationInfo())
+ AbbrevToUse = Writer.getDeclCXXMethodAbbrev(D->getTemplatedKind());
+ }
+ } else if (D->getTemplatedKind() ==
+ FunctionDecl::TK_DependentFunctionTemplateSpecialization) {
+ DependentFunctionTemplateSpecializationInfo *DFTSInfo =
+ D->getDependentSpecializationInfo();
+ if (!DFTSInfo->TemplateArgumentsAsWritten)
+ AbbrevToUse = Writer.getDeclCXXMethodAbbrev(D->getTemplatedKind());
+ }
+ }
Code = serialization::DECL_CXX_METHOD;
}
void ASTDeclWriter::VisitCXXConstructorDecl(CXXConstructorDecl *D) {
+ static_assert(DeclContext::NumCXXConstructorDeclBits == 64,
+ "You need to update the serializer after you change the "
+ "CXXConstructorDeclBits");
+
Record.push_back(D->getTrailingAllocKind());
addExplicitSpecifier(D->getExplicitSpecifier(), Record);
if (auto Inherited = D->getInheritedConstructor()) {
@@ -1495,8 +1637,8 @@ void ASTDeclWriter::VisitFriendTemplateDecl(FriendTemplateDecl *D) {
void ASTDeclWriter::VisitTemplateDecl(TemplateDecl *D) {
VisitNamedDecl(D);
- Record.AddDeclRef(D->getTemplatedDecl());
Record.AddTemplateParameterList(D->getTemplateParameters());
+ Record.AddDeclRef(D->getTemplatedDecl());
}
void ASTDeclWriter::VisitConceptDecl(ConceptDecl *D) {
@@ -1505,6 +1647,15 @@ void ASTDeclWriter::VisitConceptDecl(ConceptDecl *D) {
Code = serialization::DECL_CONCEPT;
}
+void ASTDeclWriter::VisitImplicitConceptSpecializationDecl(
+ ImplicitConceptSpecializationDecl *D) {
+ Record.push_back(D->getTemplateArguments().size());
+ VisitDecl(D);
+ for (const TemplateArgument &Arg : D->getTemplateArguments())
+ Record.AddTemplateArgument(Arg);
+ Code = serialization::DECL_IMPLICIT_CONCEPT_SPECIALIZATION;
+}
+
void ASTDeclWriter::VisitRequiresExprBodyDecl(RequiresExprBodyDecl *D) {
Code = serialization::DECL_REQUIRES_EXPR_BODY;
}
@@ -1597,8 +1748,6 @@ void ASTDeclWriter::VisitVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *D) {
RegisterTemplateSpecialization(D->getSpecializedTemplate(), D);
- VisitVarDecl(D);
-
llvm::PointerUnion<VarTemplateDecl *, VarTemplatePartialSpecializationDecl *>
InstFrom = D->getSpecializedTemplateOrPartial();
if (Decl *InstFromD = InstFrom.dyn_cast<VarTemplateDecl *>()) {
@@ -1619,6 +1768,9 @@ void ASTDeclWriter::VisitVarTemplateSpecializationDecl(
Record.AddSourceLocation(D->getPointOfInstantiation());
Record.push_back(D->getSpecializationKind());
Record.push_back(D->IsCompleteDefinition);
+
+ VisitVarDecl(D);
+
Record.push_back(D->isCanonicalDecl());
if (D->isCanonicalDecl()) {
@@ -1645,17 +1797,6 @@ void ASTDeclWriter::VisitVarTemplatePartialSpecializationDecl(
Code = serialization::DECL_VAR_TEMPLATE_PARTIAL_SPECIALIZATION;
}
-void ASTDeclWriter::VisitClassScopeFunctionSpecializationDecl(
- ClassScopeFunctionSpecializationDecl *D) {
- VisitDecl(D);
- Record.AddDeclRef(D->getSpecialization());
- Record.push_back(D->hasExplicitTemplateArgs());
- if (D->hasExplicitTemplateArgs())
- Record.AddASTTemplateArgumentListInfo(D->getTemplateArgsAsWritten());
- Code = serialization::DECL_CLASS_SCOPE_FUNCTION_SPECIALIZATION;
-}
-
-
void ASTDeclWriter::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
VisitRedeclarableTemplateDecl(D);
@@ -1671,14 +1812,12 @@ void ASTDeclWriter::VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D) {
Record.push_back(D->wasDeclaredWithTypename());
const TypeConstraint *TC = D->getTypeConstraint();
- Record.push_back(TC != nullptr);
+ assert((bool)TC == D->hasTypeConstraint());
if (TC) {
- Record.AddNestedNameSpecifierLoc(TC->getNestedNameSpecifierLoc());
- Record.AddDeclarationNameInfo(TC->getConceptNameInfo());
- Record.AddDeclRef(TC->getNamedConcept());
- Record.push_back(TC->getTemplateArgsAsWritten() != nullptr);
- if (TC->getTemplateArgsAsWritten())
- Record.AddASTTemplateArgumentListInfo(TC->getTemplateArgsAsWritten());
+ auto *CR = TC->getConceptReference();
+ Record.push_back(CR != nullptr);
+ if (CR)
+ Record.AddConceptReference(CR);
Record.AddStmt(TC->getImmediatelyDeclaredConstraint());
Record.push_back(D->isExpandedParameterPack());
if (D->isExpandedParameterPack())
@@ -1691,6 +1830,13 @@ void ASTDeclWriter::VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D) {
if (OwnsDefaultArg)
Record.AddTypeSourceInfo(D->getDefaultArgumentInfo());
+ if (!TC && !OwnsDefaultArg &&
+ D->getDeclContext() == D->getLexicalDeclContext() &&
+ !D->isInvalidDecl() && !D->hasAttrs() &&
+ !D->isTopLevelDeclInObjCContainer() && !D->isImplicit() &&
+ D->getDeclName().getNameKind() == DeclarationName::Identifier)
+ AbbrevToUse = Writer.getDeclTemplateTypeParmAbbrev();
+
Code = serialization::DECL_TEMPLATE_TYPE_PARM;
}
@@ -1774,6 +1920,10 @@ void ASTDeclWriter::VisitStaticAssertDecl(StaticAssertDecl *D) {
/// Emit the DeclContext part of a declaration context decl.
void ASTDeclWriter::VisitDeclContext(DeclContext *DC) {
+ static_assert(DeclContext::NumDeclContextBits == 13,
+ "You need to update the serializer after you change the "
+ "DeclContextBits");
+
Record.AddOffset(Writer.WriteDeclContextLexicalBlock(Context, DC));
Record.AddOffset(Writer.WriteDeclContextVisibleBlock(Context, DC));
}
@@ -1854,6 +2004,17 @@ void ASTDeclWriter::VisitRedeclarable(Redeclarable<T> *D) {
}
}
+void ASTDeclWriter::VisitHLSLBufferDecl(HLSLBufferDecl *D) {
+ VisitNamedDecl(D);
+ VisitDeclContext(D);
+ Record.push_back(D->isCBuffer());
+ Record.AddSourceLocation(D->getLocStart());
+ Record.AddSourceLocation(D->getLBraceLoc());
+ Record.AddSourceLocation(D->getRBraceLoc());
+
+ Code = serialization::DECL_HLSL_BUFFER;
+}
+
void ASTDeclWriter::VisitOMPThreadPrivateDecl(OMPThreadPrivateDecl *D) {
Record.writeOMPChildren(D->Data);
VisitDecl(D);
@@ -1873,6 +2034,10 @@ void ASTDeclWriter::VisitOMPRequiresDecl(OMPRequiresDecl *D) {
}
void ASTDeclWriter::VisitOMPDeclareReductionDecl(OMPDeclareReductionDecl *D) {
+ static_assert(DeclContext::NumOMPDeclareReductionDeclBits == 15,
+ "You need to update the serializer after you change the "
+ "NumOMPDeclareReductionDeclBits");
+
VisitValueDecl(D);
Record.AddSourceLocation(D->getBeginLoc());
Record.AddStmt(D->getCombinerIn());
@@ -1881,7 +2046,7 @@ void ASTDeclWriter::VisitOMPDeclareReductionDecl(OMPDeclareReductionDecl *D) {
Record.AddStmt(D->getInitOrig());
Record.AddStmt(D->getInitPriv());
Record.AddStmt(D->getInitializer());
- Record.push_back(D->getInitializerKind());
+ Record.push_back(llvm::to_underlying(D->getInitializerKind()));
Record.AddDeclRef(D->getPrevDeclInScope());
Code = serialization::DECL_OMP_DECLARE_REDUCTION;
}
@@ -1903,6 +2068,107 @@ void ASTDeclWriter::VisitOMPCapturedExprDecl(OMPCapturedExprDecl *D) {
// ASTWriter Implementation
//===----------------------------------------------------------------------===//
+namespace {
+template <FunctionDecl::TemplatedKind Kind>
+std::shared_ptr<llvm::BitCodeAbbrev>
+getFunctionDeclAbbrev(serialization::DeclCode Code) {
+ using namespace llvm;
+
+ auto Abv = std::make_shared<BitCodeAbbrev>();
+ Abv->Add(BitCodeAbbrevOp(Code));
+ // RedeclarableDecl
+ Abv->Add(BitCodeAbbrevOp(0)); // CanonicalDecl
+ Abv->Add(BitCodeAbbrevOp(Kind));
+ if constexpr (Kind == FunctionDecl::TK_NonTemplate) {
+
+ } else if constexpr (Kind == FunctionDecl::TK_FunctionTemplate) {
+ // DescribedFunctionTemplate
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));
+ } else if constexpr (Kind == FunctionDecl::TK_DependentNonTemplate) {
+ // Instantiated From Decl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));
+ } else if constexpr (Kind == FunctionDecl::TK_MemberSpecialization) {
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // InstantiatedFrom
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed,
+ 3)); // TemplateSpecializationKind
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Specialized Location
+ } else if constexpr (Kind ==
+ FunctionDecl::TK_FunctionTemplateSpecialization) {
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Template
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed,
+ 3)); // TemplateSpecializationKind
+ Abv->Add(BitCodeAbbrevOp(1)); // Template Argument Size
+ Abv->Add(BitCodeAbbrevOp(TemplateArgument::Type)); // Template Argument Kind
+ Abv->Add(
+ BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Template Argument Type
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Is Defaulted
+ Abv->Add(BitCodeAbbrevOp(0)); // TemplateArgumentsAsWritten
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SourceLocation
+ Abv->Add(BitCodeAbbrevOp(0));
+ Abv->Add(
+ BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Canonical Decl of template
+ } else if constexpr (Kind == FunctionDecl::
+ TK_DependentFunctionTemplateSpecialization) {
+ // Candidates of specialization
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+ Abv->Add(BitCodeAbbrevOp(0)); // TemplateArgumentsAsWritten
+ } else {
+ llvm_unreachable("Unknown templated kind?");
+ }
+ // Decl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed,
+ 8)); // Packed DeclBits: ModuleOwnershipKind,
+ // isUsed, isReferenced, AccessSpecifier,
+ // isImplicit
+ //
+ // The following bits should be 0:
+ // HasStandaloneLexicalDC, HasAttrs,
+ // TopLevelDeclInObjCContainer,
+ // isInvalidDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // DeclContext
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SubmoduleID
+ // NamedDecl
+ Abv->Add(BitCodeAbbrevOp(DeclarationName::Identifier)); // NameKind
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Identifier
+ Abv->Add(BitCodeAbbrevOp(0)); // AnonDeclNumber
+ // ValueDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
+ // DeclaratorDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // InnerLocStart
+ Abv->Add(BitCodeAbbrevOp(0)); // HasExtInfo
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // TSIType
+ // FunctionDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 11)); // IDNS
+ Abv->Add(BitCodeAbbrevOp(
+ BitCodeAbbrevOp::Fixed,
+ 28)); // Packed Function Bits: StorageClass, Inline, InlineSpecified,
+ // VirtualAsWritten, Pure, HasInheritedProto, HasWrittenProto,
+ // Deleted, Trivial, TrivialForCall, Defaulted, ExplicitlyDefaulted,
+ // IsIneligibleOrNotSelected, ImplicitReturnZero, Constexpr,
+ // UsesSEHTry, SkippedBody, MultiVersion, LateParsed,
+ // FriendConstraintRefersToEnclosingTemplate, Linkage,
+ // ShouldSkipCheckingODR
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LocEnd
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // ODRHash
+ // This Array slurps the rest of the record. Fortunately we want to encode
+ // (nearly) all the remaining (variable number of) fields in the same way.
+ //
+ // This is:
+ // NumParams and Params[] from FunctionDecl, and
+ // NumOverriddenMethods, OverriddenMethods[] from CXXMethodDecl.
+ //
+ // Add an AbbrevOp for 'size then elements' and use it here.
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));
+ return Abv;
+}
+
+template <FunctionDecl::TemplatedKind Kind>
+std::shared_ptr<llvm::BitCodeAbbrev> getCXXMethodAbbrev() {
+ return getFunctionDeclAbbrev<Kind>(serialization::DECL_CXX_METHOD);
+}
+} // namespace
+
void ASTWriter::WriteDeclAbbrevs() {
using namespace llvm;
@@ -1912,16 +2178,15 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv = std::make_shared<BitCodeAbbrev>();
Abv->Add(BitCodeAbbrevOp(serialization::DECL_FIELD));
// Decl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed,
+ 7)); // Packed DeclBits: ModuleOwnershipKind,
+ // isUsed, isReferenced, AccessSpecifier,
+ //
+ // The following bits should be 0:
+ // isImplicit, HasStandaloneLexicalDC, HasAttrs,
+ // TopLevelDeclInObjCContainer,
+ // isInvalidDecl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // DeclContext
- Abv->Add(BitCodeAbbrevOp(0)); // LexicalDeclContext
- Abv->Add(BitCodeAbbrevOp(0)); // isInvalidDecl
- Abv->Add(BitCodeAbbrevOp(0)); // HasAttrs
- Abv->Add(BitCodeAbbrevOp(0)); // isImplicit
- Abv->Add(BitCodeAbbrevOp(0)); // isUsed
- Abv->Add(BitCodeAbbrevOp(0)); // isReferenced
- Abv->Add(BitCodeAbbrevOp(0)); // TopLevelDeclInObjCContainer
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // AccessSpecifier
- Abv->Add(BitCodeAbbrevOp(0)); // ModulePrivate
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SubmoduleID
// NamedDecl
Abv->Add(BitCodeAbbrevOp(0)); // NameKind = Identifier
@@ -1935,7 +2200,7 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // TSIType
// FieldDecl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isMutable
- Abv->Add(BitCodeAbbrevOp(0)); // InitStyle
+ Abv->Add(BitCodeAbbrevOp(0)); // StorageKind
// Type Source Info
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // TypeLoc
@@ -1945,16 +2210,12 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv = std::make_shared<BitCodeAbbrev>();
Abv->Add(BitCodeAbbrevOp(serialization::DECL_OBJC_IVAR));
// Decl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed,
+ 12)); // Packed DeclBits: HasStandaloneLexicalDC,
+ // isInvalidDecl, HasAttrs, isImplicit, isUsed,
+ // isReferenced, TopLevelDeclInObjCContainer,
+ // AccessSpecifier, ModuleOwnershipKind
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // DeclContext
- Abv->Add(BitCodeAbbrevOp(0)); // LexicalDeclContext
- Abv->Add(BitCodeAbbrevOp(0)); // isInvalidDecl
- Abv->Add(BitCodeAbbrevOp(0)); // HasAttrs
- Abv->Add(BitCodeAbbrevOp(0)); // isImplicit
- Abv->Add(BitCodeAbbrevOp(0)); // isUsed
- Abv->Add(BitCodeAbbrevOp(0)); // isReferenced
- Abv->Add(BitCodeAbbrevOp(0)); // TopLevelDeclInObjCContainer
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // AccessSpecifier
- Abv->Add(BitCodeAbbrevOp(0)); // ModulePrivate
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SubmoduleID
// NamedDecl
Abv->Add(BitCodeAbbrevOp(0)); // NameKind = Identifier
@@ -1983,16 +2244,15 @@ void ASTWriter::WriteDeclAbbrevs() {
// Redeclarable
Abv->Add(BitCodeAbbrevOp(0)); // No redeclaration
// Decl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed,
+ 7)); // Packed DeclBits: ModuleOwnershipKind,
+ // isUsed, isReferenced, AccessSpecifier,
+ //
+ // The following bits should be 0:
+ // isImplicit, HasStandaloneLexicalDC, HasAttrs,
+ // TopLevelDeclInObjCContainer,
+ // isInvalidDecl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // DeclContext
- Abv->Add(BitCodeAbbrevOp(0)); // LexicalDeclContext
- Abv->Add(BitCodeAbbrevOp(0)); // isInvalidDecl
- Abv->Add(BitCodeAbbrevOp(0)); // HasAttrs
- Abv->Add(BitCodeAbbrevOp(0)); // isImplicit
- Abv->Add(BitCodeAbbrevOp(0)); // isUsed
- Abv->Add(BitCodeAbbrevOp(0)); // isReferenced
- Abv->Add(BitCodeAbbrevOp(0)); // TopLevelDeclInObjCContainer
- Abv->Add(BitCodeAbbrevOp(AS_none)); // C++ AccessSpecifier
- Abv->Add(BitCodeAbbrevOp(0)); // ModulePrivate
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SubmoduleID
// NamedDecl
Abv->Add(BitCodeAbbrevOp(0)); // NameKind = Identifier
@@ -2003,23 +2263,18 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type Ref
// TagDecl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // IdentifierNamespace
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // getTagKind
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isCompleteDefinition
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // EmbeddedInDeclarator
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsFreeStanding
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsCompleteDefinitionRequired
+ Abv->Add(BitCodeAbbrevOp(
+ BitCodeAbbrevOp::Fixed,
+ 9)); // Packed Tag Decl Bits: getTagKind, isCompleteDefinition,
+ // EmbeddedInDeclarator, IsFreeStanding,
+ // isCompleteDefinitionRequired, ExtInfoKind
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SourceLocation
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SourceLocation
- Abv->Add(BitCodeAbbrevOp(0)); // ExtInfoKind
// EnumDecl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // AddTypeRef
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // IntegerType
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // getPromotionType
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // getNumPositiveBits
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // getNumNegativeBits
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isScoped
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isScopedUsingClassTag
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isFixed
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 20)); // Enum Decl Bits
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));// ODRHash
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // InstantiatedMembEnum
// DC
@@ -2033,16 +2288,15 @@ void ASTWriter::WriteDeclAbbrevs() {
// Redeclarable
Abv->Add(BitCodeAbbrevOp(0)); // No redeclaration
// Decl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed,
+ 7)); // Packed DeclBits: ModuleOwnershipKind,
+ // isUsed, isReferenced, AccessSpecifier,
+ //
+ // The following bits should be 0:
+ // isImplicit, HasStandaloneLexicalDC, HasAttrs,
+ // TopLevelDeclInObjCContainer,
+ // isInvalidDecl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // DeclContext
- Abv->Add(BitCodeAbbrevOp(0)); // LexicalDeclContext
- Abv->Add(BitCodeAbbrevOp(0)); // isInvalidDecl
- Abv->Add(BitCodeAbbrevOp(0)); // HasAttrs
- Abv->Add(BitCodeAbbrevOp(0)); // isImplicit
- Abv->Add(BitCodeAbbrevOp(0)); // isUsed
- Abv->Add(BitCodeAbbrevOp(0)); // isReferenced
- Abv->Add(BitCodeAbbrevOp(0)); // TopLevelDeclInObjCContainer
- Abv->Add(BitCodeAbbrevOp(AS_none)); // C++ AccessSpecifier
- Abv->Add(BitCodeAbbrevOp(0)); // ModulePrivate
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SubmoduleID
// NamedDecl
Abv->Add(BitCodeAbbrevOp(0)); // NameKind = Identifier
@@ -2053,36 +2307,26 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type Ref
// TagDecl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // IdentifierNamespace
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // getTagKind
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isCompleteDefinition
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // EmbeddedInDeclarator
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsFreeStanding
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsCompleteDefinitionRequired
+ Abv->Add(BitCodeAbbrevOp(
+ BitCodeAbbrevOp::Fixed,
+ 9)); // Packed Tag Decl Bits: getTagKind, isCompleteDefinition,
+ // EmbeddedInDeclarator, IsFreeStanding,
+ // isCompleteDefinitionRequired, ExtInfoKind
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SourceLocation
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SourceLocation
- Abv->Add(BitCodeAbbrevOp(0)); // ExtInfoKind
// RecordDecl
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // FlexibleArrayMember
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // AnonymousStructUnion
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // hasObjectMember
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // hasVolatileMember
-
- // isNonTrivialToPrimitiveDefaultInitialize
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1));
- // isNonTrivialToPrimitiveCopy
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1));
- // isNonTrivialToPrimitiveDestroy
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1));
- // hasNonTrivialToPrimitiveDefaultInitializeCUnion
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1));
- // hasNonTrivialToPrimitiveDestructCUnion
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1));
- // hasNonTrivialToPrimitiveCopyCUnion
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1));
- // isParamDestroyedInCallee
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1));
- // getArgPassingRestrictions
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2));
+ Abv->Add(BitCodeAbbrevOp(
+ BitCodeAbbrevOp::Fixed,
+ 13)); // Packed Record Decl Bits: FlexibleArrayMember,
+ // AnonymousStructUnion, hasObjectMember, hasVolatileMember,
+ // isNonTrivialToPrimitiveDefaultInitialize,
+ // isNonTrivialToPrimitiveCopy, isNonTrivialToPrimitiveDestroy,
+ // hasNonTrivialToPrimitiveDefaultInitializeCUnion,
+ // hasNonTrivialToPrimitiveDestructCUnion,
+ // hasNonTrivialToPrimitiveCopyCUnion, isParamDestroyedInCallee,
+ // getArgPassingRestrictions
+ // ODRHash
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 26));
// DC
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LexicalOffset
@@ -2095,16 +2339,13 @@ void ASTWriter::WriteDeclAbbrevs() {
// Redeclarable
Abv->Add(BitCodeAbbrevOp(0)); // No redeclaration
// Decl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed,
+ 8)); // Packed DeclBits: ModuleOwnershipKind, isUsed,
+ // isReferenced, AccessSpecifier,
+ // HasStandaloneLexicalDC, HasAttrs, isImplicit,
+ // TopLevelDeclInObjCContainer,
+ // isInvalidDecl,
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // DeclContext
- Abv->Add(BitCodeAbbrevOp(0)); // LexicalDeclContext
- Abv->Add(BitCodeAbbrevOp(0)); // isInvalidDecl
- Abv->Add(BitCodeAbbrevOp(0)); // HasAttrs
- Abv->Add(BitCodeAbbrevOp(0)); // isImplicit
- Abv->Add(BitCodeAbbrevOp(0)); // isUsed
- Abv->Add(BitCodeAbbrevOp(0)); // isReferenced
- Abv->Add(BitCodeAbbrevOp(0)); // TopLevelDeclInObjCContainer
- Abv->Add(BitCodeAbbrevOp(AS_none)); // C++ AccessSpecifier
- Abv->Add(BitCodeAbbrevOp(0)); // ModulePrivate
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SubmoduleID
// NamedDecl
Abv->Add(BitCodeAbbrevOp(0)); // NameKind = Identifier
@@ -2117,21 +2358,18 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(0)); // hasExtInfo
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // TSIType
// VarDecl
- Abv->Add(BitCodeAbbrevOp(0)); // SClass
- Abv->Add(BitCodeAbbrevOp(0)); // TSCSpec
- Abv->Add(BitCodeAbbrevOp(0)); // InitStyle
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isARCPseudoStrong
- Abv->Add(BitCodeAbbrevOp(0)); // Linkage
- Abv->Add(BitCodeAbbrevOp(0)); // HasInit
- Abv->Add(BitCodeAbbrevOp(0)); // HasMemberSpecializationInfo
+ Abv->Add(
+ BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed,
+ 12)); // Packed Var Decl bits: SClass, TSCSpec, InitStyle,
+ // isARCPseudoStrong, Linkage, ModulesCodegen
+ Abv->Add(BitCodeAbbrevOp(0)); // VarKind (local enum)
// ParmVarDecl
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsObjCMethodParameter
- Abv->Add(BitCodeAbbrevOp(0)); // ScopeDepth
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // ScopeIndex
- Abv->Add(BitCodeAbbrevOp(0)); // ObjCDeclQualifier
- Abv->Add(BitCodeAbbrevOp(0)); // KNRPromoted
- Abv->Add(BitCodeAbbrevOp(0)); // HasInheritedDefaultArg
- Abv->Add(BitCodeAbbrevOp(0)); // HasUninstantiatedDefaultArg
+ Abv->Add(BitCodeAbbrevOp(
+ BitCodeAbbrevOp::Fixed,
+ 19)); // Packed Parm Var Decl bits: IsObjCMethodParameter, ScopeDepth,
+ // ObjCDeclQualifier, KNRPromoted,
+ // HasInheritedDefaultArg, HasUninstantiatedDefaultArg
// Type Source Info
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // TypeLoc
@@ -2143,16 +2381,13 @@ void ASTWriter::WriteDeclAbbrevs() {
// Redeclarable
Abv->Add(BitCodeAbbrevOp(0)); // No redeclaration
// Decl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed,
+ 7)); // Packed DeclBits: ModuleOwnershipKind,
+ // isReferenced, isUsed, AccessSpecifier. Other
+ // higher bits should be 0: isImplicit,
+ // HasStandaloneLexicalDC, HasAttrs,
+ // TopLevelDeclInObjCContainer, isInvalidDecl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // DeclContext
- Abv->Add(BitCodeAbbrevOp(0)); // LexicalDeclContext
- Abv->Add(BitCodeAbbrevOp(0)); // isInvalidDecl
- Abv->Add(BitCodeAbbrevOp(0)); // HasAttrs
- Abv->Add(BitCodeAbbrevOp(0)); // isImplicit
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isUsed
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isReferenced
- Abv->Add(BitCodeAbbrevOp(0)); // TopLevelDeclInObjCContainer
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // C++ AccessSpecifier
- Abv->Add(BitCodeAbbrevOp(0)); // ModulePrivate
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SubmoduleID
// NamedDecl
Abv->Add(BitCodeAbbrevOp(0)); // NameKind = Identifier
@@ -2172,16 +2407,12 @@ void ASTWriter::WriteDeclAbbrevs() {
// Redeclarable
Abv->Add(BitCodeAbbrevOp(0)); // No redeclaration
// Decl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed,
+ 12)); // Packed DeclBits: HasStandaloneLexicalDC,
+ // isInvalidDecl, HasAttrs, isImplicit, isUsed,
+ // isReferenced, TopLevelDeclInObjCContainer,
+ // AccessSpecifier, ModuleOwnershipKind
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // DeclContext
- Abv->Add(BitCodeAbbrevOp(0)); // LexicalDeclContext
- Abv->Add(BitCodeAbbrevOp(0)); // isInvalidDecl
- Abv->Add(BitCodeAbbrevOp(0)); // HasAttrs
- Abv->Add(BitCodeAbbrevOp(0)); // isImplicit
- Abv->Add(BitCodeAbbrevOp(0)); // isUsed
- Abv->Add(BitCodeAbbrevOp(0)); // isReferenced
- Abv->Add(BitCodeAbbrevOp(0)); // TopLevelDeclInObjCContainer
- Abv->Add(BitCodeAbbrevOp(AS_none)); // C++ AccessSpecifier
- Abv->Add(BitCodeAbbrevOp(0)); // ModulePrivate
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SubmoduleID
// NamedDecl
Abv->Add(BitCodeAbbrevOp(0)); // NameKind = Identifier
@@ -2194,113 +2425,100 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(0)); // hasExtInfo
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // TSIType
// VarDecl
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); // SClass
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // TSCSpec
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // InitStyle
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isARCPseudoStrong
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsThisDeclarationADemotedDefinition
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isExceptionVariable
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isNRVOVariable
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isCXXForRangeDecl
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isObjCForDecl
- Abv->Add(BitCodeAbbrevOp(0)); // isInline
- Abv->Add(BitCodeAbbrevOp(0)); // isInlineSpecified
- Abv->Add(BitCodeAbbrevOp(0)); // isConstexpr
- Abv->Add(BitCodeAbbrevOp(0)); // isInitCapture
- Abv->Add(BitCodeAbbrevOp(0)); // isPrevDeclInSameScope
- Abv->Add(BitCodeAbbrevOp(0)); // ImplicitParamKind
- Abv->Add(BitCodeAbbrevOp(0)); // EscapingByref
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); // Linkage
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); // HasConstant*
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // VarKind (local enum)
+ Abv->Add(BitCodeAbbrevOp(
+ BitCodeAbbrevOp::Fixed,
+ 21)); // Packed Var Decl bits: Linkage, ModulesCodegen,
+ // SClass, TSCSpec, InitStyle,
+ // isARCPseudoStrong, IsThisDeclarationADemotedDefinition,
+ // isExceptionVariable, isNRVOVariable, isCXXForRangeDecl,
+ // isInline, isInlineSpecified, isConstexpr,
+ // isInitCapture, isPrevDeclInSameScope,
+ // EscapingByref, HasDeducedType, ImplicitParamKind, isObjCForDecl
+ Abv->Add(BitCodeAbbrevOp(0)); // VarKind (local enum)
// Type Source Info
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // TypeLoc
DeclVarAbbrev = Stream.EmitAbbrev(std::move(Abv));
// Abbreviation for DECL_CXX_METHOD
+ DeclCXXMethodAbbrev =
+ Stream.EmitAbbrev(getCXXMethodAbbrev<FunctionDecl::TK_NonTemplate>());
+ DeclTemplateCXXMethodAbbrev = Stream.EmitAbbrev(
+ getCXXMethodAbbrev<FunctionDecl::TK_FunctionTemplate>());
+ DeclDependentNonTemplateCXXMethodAbbrev = Stream.EmitAbbrev(
+ getCXXMethodAbbrev<FunctionDecl::TK_DependentNonTemplate>());
+ DeclMemberSpecializedCXXMethodAbbrev = Stream.EmitAbbrev(
+ getCXXMethodAbbrev<FunctionDecl::TK_MemberSpecialization>());
+ DeclTemplateSpecializedCXXMethodAbbrev = Stream.EmitAbbrev(
+ getCXXMethodAbbrev<FunctionDecl::TK_FunctionTemplateSpecialization>());
+ DeclDependentSpecializationCXXMethodAbbrev = Stream.EmitAbbrev(
+ getCXXMethodAbbrev<
+ FunctionDecl::TK_DependentFunctionTemplateSpecialization>());
+
+ // Abbreviation for DECL_TEMPLATE_TYPE_PARM
Abv = std::make_shared<BitCodeAbbrev>();
- Abv->Add(BitCodeAbbrevOp(serialization::DECL_CXX_METHOD));
- // RedeclarableDecl
- Abv->Add(BitCodeAbbrevOp(0)); // CanonicalDecl
+ Abv->Add(BitCodeAbbrevOp(serialization::DECL_TEMPLATE_TYPE_PARM));
+ Abv->Add(BitCodeAbbrevOp(0)); // hasTypeConstraint
// Decl
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // DeclContext
- Abv->Add(BitCodeAbbrevOp(0)); // LexicalDeclContext
- Abv->Add(BitCodeAbbrevOp(0)); // Invalid
- Abv->Add(BitCodeAbbrevOp(0)); // HasAttrs
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Implicit
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Used
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Referenced
- Abv->Add(BitCodeAbbrevOp(0)); // InObjCContainer
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // Access
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // ModulePrivate
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SubmoduleID
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed,
+ 7)); // Packed DeclBits: ModuleOwnershipKind,
+ // isReferenced, isUsed, AccessSpecifier. Other
+ // higher bits should be 0: isImplicit,
+ // HasStandaloneLexicalDC, HasAttrs,
+ // TopLevelDeclInObjCContainer, isInvalidDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // DeclContext
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SubmoduleID
// NamedDecl
- Abv->Add(BitCodeAbbrevOp(DeclarationName::Identifier)); // NameKind
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Identifier
- Abv->Add(BitCodeAbbrevOp(0)); // AnonDeclNumber
- // ValueDecl
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
- // DeclaratorDecl
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // InnerLocStart
- Abv->Add(BitCodeAbbrevOp(0)); // HasExtInfo
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // TSIType
- // FunctionDecl
+ Abv->Add(BitCodeAbbrevOp(0)); // NameKind = Identifier
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Name
+ Abv->Add(BitCodeAbbrevOp(0));
+ // TypeDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Source Location
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type Ref
+ // TemplateTypeParmDecl
+ Abv->Add(
+ BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // wasDeclaredWithTypename
+ Abv->Add(BitCodeAbbrevOp(0)); // OwnsDefaultArg
+ DeclTemplateTypeParmAbbrev = Stream.EmitAbbrev(std::move(Abv));
+
+ // Abbreviation for DECL_USING_SHADOW
+ Abv = std::make_shared<BitCodeAbbrev>();
+ Abv->Add(BitCodeAbbrevOp(serialization::DECL_USING_SHADOW));
+ // Redeclarable
+ Abv->Add(BitCodeAbbrevOp(0)); // No redeclaration
+ // Decl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed,
+ 12)); // Packed DeclBits: HasStandaloneLexicalDC,
+ // isInvalidDecl, HasAttrs, isImplicit, isUsed,
+ // isReferenced, TopLevelDeclInObjCContainer,
+ // AccessSpecifier, ModuleOwnershipKind
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // DeclContext
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SubmoduleID
+ // NamedDecl
+ Abv->Add(BitCodeAbbrevOp(0)); // NameKind = Identifier
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Name
+ Abv->Add(BitCodeAbbrevOp(0));
+ // UsingShadowDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // TargetDecl
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 11)); // IDNS
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); // StorageClass
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Inline
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // InlineSpecified
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // VirtualAsWritten
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Pure
- Abv->Add(BitCodeAbbrevOp(0)); // HasInheritedProto
- Abv->Add(BitCodeAbbrevOp(1)); // HasWrittenProto
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Deleted
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Trivial
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // TrivialForCall
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Defaulted
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // ExplicitlyDefaulted
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // ImplicitReturnZero
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Constexpr
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // UsesSEHTry
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // SkippedBody
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // MultiVersion
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // LateParsed
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); // Linkage
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LocEnd
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // ODRHash
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); // TemplateKind
- // This Array slurps the rest of the record. Fortunately we want to encode
- // (nearly) all the remaining (variable number of) fields in the same way.
- //
- // This is the function template information if any, then
- // NumParams and Params[] from FunctionDecl, and
- // NumOverriddenMethods, OverriddenMethods[] from CXXMethodDecl.
- //
- // Add an AbbrevOp for 'size then elements' and use it here.
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));
- DeclCXXMethodAbbrev = Stream.EmitAbbrev(std::move(Abv));
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // UsingOrNextShadow
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR,
+ 6)); // InstantiatedFromUsingShadowDecl
+ DeclUsingShadowAbbrev = Stream.EmitAbbrev(std::move(Abv));
// Abbreviation for EXPR_DECL_REF
Abv = std::make_shared<BitCodeAbbrev>();
Abv->Add(BitCodeAbbrevOp(serialization::EXPR_DECL_REF));
- //Stmt
- // Expr
+ // Stmt
+ // Expr
+ // PackingBits: DependenceKind, ValueKind. ObjectKind should be 0.
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 7));
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //TypeDependent
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //ValueDependent
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //InstantiationDependent
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //UnexpandedParamPack
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //ContainsErrors
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetValueKind
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetObjectKind
- //DeclRefExpr
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //HasQualifier
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //GetDeclFound
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //ExplicitTemplateArgs
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //HadMultipleCandidates
- Abv->Add(BitCodeAbbrevOp(0)); // RefersToEnclosingVariableOrCapture
- Abv->Add(BitCodeAbbrevOp(0)); // NonOdrUseReason
+ // DeclRefExpr
+ // Packing Bits: , HadMultipleCandidates, RefersToEnclosingVariableOrCapture,
+ // IsImmediateEscalating, NonOdrUseReason.
+ // GetDeclFound, HasQualifier and ExplicitTemplateArgs should be 0.
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 5));
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // DeclRef
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Location
DeclRefExprAbbrev = Stream.EmitAbbrev(std::move(Abv));
@@ -2310,15 +2528,10 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(serialization::EXPR_INTEGER_LITERAL));
//Stmt
// Expr
+ // DependenceKind, ValueKind, ObjectKind
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 10));
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //TypeDependent
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //ValueDependent
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //InstantiationDependent
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //UnexpandedParamPack
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //ContainsErrors
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetValueKind
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetObjectKind
- //Integer Literal
+ // Integer Literal
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Location
Abv->Add(BitCodeAbbrevOp(32)); // Bit Width
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Value
@@ -2329,15 +2542,10 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(serialization::EXPR_CHARACTER_LITERAL));
//Stmt
// Expr
+ // DependenceKind, ValueKind, ObjectKind
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 10));
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //TypeDependent
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //ValueDependent
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //InstantiationDependent
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //UnexpandedParamPack
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //ContainsErrors
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetValueKind
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetObjectKind
- //Character Literal
+ // Character Literal
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // getValue
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Location
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); // getKind
@@ -2348,22 +2556,108 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(serialization::EXPR_IMPLICIT_CAST));
// Stmt
// Expr
+ // Packing Bits: DependenceKind, ValueKind, ObjectKind,
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 10));
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //TypeDependent
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //ValueDependent
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //InstantiationDependent
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //UnexpandedParamPack
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //ContainsErrors
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetValueKind
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetObjectKind
// CastExpr
Abv->Add(BitCodeAbbrevOp(0)); // PathSize
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // HasFPFeatures
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 6)); // CastKind
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // PartOfExplicitCast
+ // Packing Bits: CastKind, StoredFPFeatures, isPartOfExplicitCast
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 9));
// ImplicitCastExpr
ExprImplicitCastAbbrev = Stream.EmitAbbrev(std::move(Abv));
+ // Abbreviation for EXPR_BINARY_OPERATOR
+ Abv = std::make_shared<BitCodeAbbrev>();
+ Abv->Add(BitCodeAbbrevOp(serialization::EXPR_BINARY_OPERATOR));
+ // Stmt
+ // Expr
+ // Packing Bits: DependenceKind. ValueKind and ObjectKind should
+ // be 0 in this case.
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 5));
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
+ // BinaryOperator
+ Abv->Add(
+ BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // OpCode and HasFPFeatures
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Source Location
+ BinaryOperatorAbbrev = Stream.EmitAbbrev(std::move(Abv));
+
+ // Abbreviation for EXPR_COMPOUND_ASSIGN_OPERATOR
+ Abv = std::make_shared<BitCodeAbbrev>();
+ Abv->Add(BitCodeAbbrevOp(serialization::EXPR_COMPOUND_ASSIGN_OPERATOR));
+ // Stmt
+ // Expr
+ // Packing Bits: DependenceKind. ValueKind and ObjectKind should
+ // be 0 in this case.
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 5));
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
+ // BinaryOperator
+ // Packing Bits: OpCode. The HasFPFeatures bit should be 0
+ Abv->Add(
+ BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // OpCode and HasFPFeatures
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Source Location
+ // CompoundAssignOperator
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LHSType
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Result Type
+ CompoundAssignOperatorAbbrev = Stream.EmitAbbrev(std::move(Abv));
+
+ // Abbreviation for EXPR_CALL
+ Abv = std::make_shared<BitCodeAbbrev>();
+ Abv->Add(BitCodeAbbrevOp(serialization::EXPR_CALL));
+ // Stmt
+ // Expr
+ // Packing Bits: DependenceKind, ValueKind, ObjectKind,
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 10));
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
+ // CallExpr
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // NumArgs
+ Abv->Add(BitCodeAbbrevOp(0)); // ADLCallKind
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Source Location
+ CallExprAbbrev = Stream.EmitAbbrev(std::move(Abv));
+
+ // Abbreviation for EXPR_CXX_OPERATOR_CALL
+ Abv = std::make_shared<BitCodeAbbrev>();
+ Abv->Add(BitCodeAbbrevOp(serialization::EXPR_CXX_OPERATOR_CALL));
+ // Stmt
+ // Expr
+ // Packing Bits: DependenceKind, ValueKind, ObjectKind,
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 10));
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
+ // CallExpr
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // NumArgs
+ Abv->Add(BitCodeAbbrevOp(0)); // ADLCallKind
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Source Location
+ // CXXOperatorCallExpr
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Operator Kind
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Source Location
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Source Location
+ CXXOperatorCallExprAbbrev = Stream.EmitAbbrev(std::move(Abv));
+
+ // Abbreviation for EXPR_CXX_MEMBER_CALL
+ Abv = std::make_shared<BitCodeAbbrev>();
+ Abv->Add(BitCodeAbbrevOp(serialization::EXPR_CXX_MEMBER_CALL));
+ // Stmt
+ // Expr
+ // Packing Bits: DependenceKind, ValueKind, ObjectKind,
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 10));
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
+ // CallExpr
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // NumArgs
+ Abv->Add(BitCodeAbbrevOp(0)); // ADLCallKind
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Source Location
+ // CXXMemberCallExpr
+ CXXMemberCallExprAbbrev = Stream.EmitAbbrev(std::move(Abv));
+
+ // Abbreviation for STMT_COMPOUND
+ Abv = std::make_shared<BitCodeAbbrev>();
+ Abv->Add(BitCodeAbbrevOp(serialization::STMT_COMPOUND));
+ // Stmt
+ // CompoundStmt
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Num Stmts
+ Abv->Add(BitCodeAbbrevOp(0)); // hasStoredFPFeatures
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Source Location
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Source Location
+ CompoundStmtAbbrev = Stream.EmitAbbrev(std::move(Abv));
+
Abv = std::make_shared<BitCodeAbbrev>();
Abv->Add(BitCodeAbbrevOp(serialization::DECL_CONTEXT_LEXICAL));
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
@@ -2388,13 +2682,26 @@ void ASTWriter::WriteDeclAbbrevs() {
/// relatively painless since they would presumably only do it for top-level
/// decls.
static bool isRequiredDecl(const Decl *D, ASTContext &Context,
- bool WritingModule) {
+ Module *WritingModule) {
+ // Named modules have different semantics than header modules. Every named
+ // module units owns a translation unit. So the importer of named modules
+ // doesn't need to deserilize everything ahead of time.
+ if (WritingModule && WritingModule->isNamedModule()) {
+ // The PragmaCommentDecl and PragmaDetectMismatchDecl are MSVC's extension.
+ // And the behavior of MSVC for such cases will leak this to the module
+ // users. Given pragma is not a standard thing, the compiler has the space
+ // to do their own decision. Let's follow MSVC here.
+ if (isa<PragmaCommentDecl, PragmaDetectMismatchDecl>(D))
+ return true;
+ return false;
+ }
+
// An ObjCMethodDecl is never considered as "required" because its
// implementation container always is.
// File scoped assembly or obj-c or OMP declare target implementation must be
// seen.
- if (isa<FileScopeAsmDecl>(D) || isa<ObjCImplDecl>(D))
+ if (isa<FileScopeAsmDecl, TopLevelStmtDecl, ObjCImplDecl>(D))
return true;
if (WritingModule && isPartOfPerModuleInitializer(D)) {
@@ -2434,11 +2741,12 @@ void ASTWriter::WriteDecl(ASTContext &Context, Decl *D) {
SourceLocation Loc = D->getLocation();
unsigned Index = ID - FirstDeclID;
if (DeclOffsets.size() == Index)
- DeclOffsets.emplace_back(Loc, Offset, DeclTypesBlockStartOffset);
+ DeclOffsets.emplace_back(getAdjustedLocation(Loc), Offset,
+ DeclTypesBlockStartOffset);
else if (DeclOffsets.size() < Index) {
// FIXME: Can/should this happen?
DeclOffsets.resize(Index+1);
- DeclOffsets[Index].setLocation(Loc);
+ DeclOffsets[Index].setLocation(getAdjustedLocation(Loc));
DeclOffsets[Index].setBitOffset(Offset, DeclTypesBlockStartOffset);
} else {
llvm_unreachable("declarations should be emitted in ID order");
@@ -2461,15 +2769,15 @@ void ASTRecordWriter::AddFunctionDefinition(const FunctionDecl *FD) {
assert(FD->doesThisDeclarationHaveABody());
bool ModulesCodegen = false;
if (!FD->isDependentContext()) {
- Optional<GVALinkage> Linkage;
+ std::optional<GVALinkage> Linkage;
if (Writer->WritingModule &&
- Writer->WritingModule->Kind == Module::ModuleInterfaceUnit) {
- // When building a C++ Modules TS module interface unit, a strong
- // definition in the module interface is provided by the compilation of
- // that module interface unit, not by its users. (Inline functions are
- // still emitted in module users.)
+ Writer->WritingModule->isInterfaceOrPartition()) {
+ // When building a C++20 module interface unit or a partition unit, a
+ // strong definition in the module interface is provided by the
+ // compilation of that unit, not by its users. (Inline functions are still
+ // emitted in module users.)
Linkage = Writer->Context->GetGVALinkageForFunction(FD);
- ModulesCodegen = *Linkage == GVA_StrongExternal;
+ ModulesCodegen = *Linkage >= GVA_StrongExternal;
}
if (Writer->Context->getLangOpts().ModulesCodegen ||
(FD->hasAttr<DLLExportAttr>() &&
@@ -2491,8 +2799,7 @@ void ASTRecordWriter::AddFunctionDefinition(const FunctionDecl *FD) {
if (auto *CD = dyn_cast<CXXConstructorDecl>(FD)) {
Record->push_back(CD->getNumCtorInitializers());
if (CD->getNumCtorInitializers())
- AddCXXCtorInitializers(
- llvm::makeArrayRef(CD->init_begin(), CD->init_end()));
+ AddCXXCtorInitializers(llvm::ArrayRef(CD->init_begin(), CD->init_end()));
}
AddStmt(FD->getBody());
}
diff --git a/contrib/llvm-project/clang/lib/Serialization/ASTWriterStmt.cpp b/contrib/llvm-project/clang/lib/Serialization/ASTWriterStmt.cpp
index 2bb5e4f3563d..e5836f5dcbe9 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ASTWriterStmt.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/ASTWriterStmt.cpp
@@ -11,15 +11,16 @@
///
//===----------------------------------------------------------------------===//
-#include "clang/AST/ExprOpenMP.h"
-#include "clang/Serialization/ASTRecordWriter.h"
-#include "clang/Sema/DeclSpec.h"
+#include "clang/AST/ASTConcept.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/Lex/Token.h"
+#include "clang/Sema/DeclSpec.h"
+#include "clang/Serialization/ASTRecordWriter.h"
#include "llvm/Bitstream/BitstreamWriter.h"
using namespace clang;
@@ -36,14 +37,70 @@ namespace clang {
serialization::StmtCode Code;
unsigned AbbrevToUse;
+ /// A helper that can help us to write a packed bit across function
+ /// calls. For example, we may write seperate bits in seperate functions:
+ ///
+ /// void VisitA(A* a) {
+ /// Record.push_back(a->isSomething());
+ /// }
+ ///
+ /// void Visitb(B *b) {
+ /// VisitA(b);
+ /// Record.push_back(b->isAnother());
+ /// }
+ ///
+ /// In such cases, it'll be better if we can pack these 2 bits. We achieve
+ /// this by writing a zero value in `VisitA` and recorded that first and add
+ /// the new bit to the recorded value.
+ class PakedBitsWriter {
+ public:
+ PakedBitsWriter(ASTRecordWriter &Record) : RecordRef(Record) {}
+ ~PakedBitsWriter() { assert(!CurrentIndex); }
+
+ void addBit(bool Value) {
+ assert(CurrentIndex && "Writing Bits without recording first!");
+ PackingBits.addBit(Value);
+ }
+ void addBits(uint32_t Value, uint32_t BitsWidth) {
+ assert(CurrentIndex && "Writing Bits without recording first!");
+ PackingBits.addBits(Value, BitsWidth);
+ }
+
+ void writeBits() {
+ if (!CurrentIndex)
+ return;
+
+ RecordRef[*CurrentIndex] = (uint32_t)PackingBits;
+ CurrentIndex = std::nullopt;
+ PackingBits.reset(0);
+ }
+
+ void updateBits() {
+ writeBits();
+
+ CurrentIndex = RecordRef.size();
+ RecordRef.push_back(0);
+ }
+
+ private:
+ BitsPacker PackingBits;
+ ASTRecordWriter &RecordRef;
+ std::optional<unsigned> CurrentIndex;
+ };
+
+ PakedBitsWriter CurrentPackingBits;
+
public:
ASTStmtWriter(ASTWriter &Writer, ASTWriter::RecordData &Record)
: Writer(Writer), Record(Writer, Record),
- Code(serialization::STMT_NULL_PTR), AbbrevToUse(0) {}
+ Code(serialization::STMT_NULL_PTR), AbbrevToUse(0),
+ CurrentPackingBits(this->Record) {}
ASTStmtWriter(const ASTStmtWriter&) = delete;
+ ASTStmtWriter &operator=(const ASTStmtWriter &) = delete;
uint64_t Emit() {
+ CurrentPackingBits.writeBits();
assert(Code != serialization::STMT_NULL_PTR &&
"unhandled sub-statement writing AST file");
return Record.EmitStmt(Code, AbbrevToUse);
@@ -80,11 +137,20 @@ void ASTStmtWriter::VisitNullStmt(NullStmt *S) {
void ASTStmtWriter::VisitCompoundStmt(CompoundStmt *S) {
VisitStmt(S);
+
Record.push_back(S->size());
+ Record.push_back(S->hasStoredFPFeatures());
+
for (auto *CS : S->body())
Record.AddStmt(CS);
+ if (S->hasStoredFPFeatures())
+ Record.push_back(S->getStoredFPFeatures().getAsOpaqueInt());
Record.AddSourceLocation(S->getLBracLoc());
Record.AddSourceLocation(S->getRBracLoc());
+
+ if (!S->hasStoredFPFeatures())
+ AbbrevToUse = Writer.getCompoundStmtAbbrev();
+
Code = serialization::STMT_COMPOUND;
}
@@ -138,17 +204,18 @@ void ASTStmtWriter::VisitIfStmt(IfStmt *S) {
bool HasVar = S->getConditionVariableDeclStmt() != nullptr;
bool HasInit = S->getInit() != nullptr;
- Record.push_back(S->isConstexpr());
- Record.push_back(HasElse);
- Record.push_back(HasVar);
- Record.push_back(HasInit);
+ CurrentPackingBits.updateBits();
+ CurrentPackingBits.addBit(HasElse);
+ CurrentPackingBits.addBit(HasVar);
+ CurrentPackingBits.addBit(HasInit);
+ Record.push_back(static_cast<uint64_t>(S->getStatementKind()));
Record.AddStmt(S->getCond());
Record.AddStmt(S->getThen());
if (HasElse)
Record.AddStmt(S->getElse());
if (HasVar)
- Record.AddDeclRef(S->getConditionVariable());
+ Record.AddStmt(S->getConditionVariableDeclStmt());
if (HasInit)
Record.AddStmt(S->getInit());
@@ -175,7 +242,7 @@ void ASTStmtWriter::VisitSwitchStmt(SwitchStmt *S) {
if (HasInit)
Record.AddStmt(S->getInit());
if (HasVar)
- Record.AddDeclRef(S->getConditionVariable());
+ Record.AddStmt(S->getConditionVariableDeclStmt());
Record.AddSourceLocation(S->getSwitchLoc());
Record.AddSourceLocation(S->getLParenLoc());
@@ -196,7 +263,7 @@ void ASTStmtWriter::VisitWhileStmt(WhileStmt *S) {
Record.AddStmt(S->getCond());
Record.AddStmt(S->getBody());
if (HasVar)
- Record.AddDeclRef(S->getConditionVariable());
+ Record.AddStmt(S->getConditionVariableDeclStmt());
Record.AddSourceLocation(S->getWhileLoc());
Record.AddSourceLocation(S->getLParenLoc());
@@ -218,7 +285,7 @@ void ASTStmtWriter::VisitForStmt(ForStmt *S) {
VisitStmt(S);
Record.AddStmt(S->getInit());
Record.AddStmt(S->getCond());
- Record.AddDeclRef(S->getConditionVariable());
+ Record.AddStmt(S->getConditionVariableDeclStmt());
Record.AddStmt(S->getInc());
Record.AddStmt(S->getBody());
Record.AddSourceLocation(S->getForLoc());
@@ -314,7 +381,10 @@ void ASTStmtWriter::VisitGCCAsmStmt(GCCAsmStmt *S) {
Record.AddStmt(S->getClobberStringLiteral(I));
// Labels
- for (auto *E : S->labels()) Record.AddStmt(E);
+ for (unsigned I = 0, N = S->getNumLabels(); I != N; ++I) {
+ Record.AddIdentifierRef(S->getLabelIdentifier(I));
+ Record.AddStmt(S->getLabelExpr(I));
+ }
Code = serialization::STMT_GCCASM;
}
@@ -400,6 +470,7 @@ static void
addConstraintSatisfaction(ASTRecordWriter &Record,
const ASTConstraintSatisfaction &Satisfaction) {
Record.push_back(Satisfaction.IsSatisfied);
+ Record.push_back(Satisfaction.ContainsErrors);
if (!Satisfaction.IsSatisfied) {
Record.push_back(Satisfaction.NumRecords);
for (const auto &DetailRecord : Satisfaction) {
@@ -430,16 +501,11 @@ addSubstitutionDiagnostic(
void ASTStmtWriter::VisitConceptSpecializationExpr(
ConceptSpecializationExpr *E) {
VisitExpr(E);
- ArrayRef<TemplateArgument> TemplateArgs = E->getTemplateArguments();
- Record.push_back(TemplateArgs.size());
- Record.AddNestedNameSpecifierLoc(E->getNestedNameSpecifierLoc());
- Record.AddSourceLocation(E->getTemplateKWLoc());
- Record.AddDeclarationNameInfo(E->getConceptNameInfo());
- Record.AddDeclRef(E->getNamedConcept());
- Record.AddDeclRef(E->getFoundDecl());
- Record.AddASTTemplateArgumentListInfo(E->getTemplateArgsAsWritten());
- for (const TemplateArgument &Arg : TemplateArgs)
- Record.AddTemplateArgument(Arg);
+ Record.AddDeclRef(E->getSpecializationDecl());
+ const ConceptReference *CR = E->getConceptReference();
+ Record.push_back(CR != nullptr);
+ if (CR)
+ Record.AddConceptReference(CR);
if (!E->isValueDependent())
addConstraintSatisfaction(Record, E->getSatisfaction());
@@ -493,17 +559,19 @@ void ASTStmtWriter::VisitRequiresExpr(RequiresExpr *E) {
} else {
auto *NestedReq = cast<concepts::NestedRequirement>(R);
Record.push_back(concepts::Requirement::RK_Nested);
- Record.push_back(NestedReq->isSubstitutionFailure());
- if (NestedReq->isSubstitutionFailure()){
- addSubstitutionDiagnostic(Record,
- NestedReq->getSubstitutionDiagnostic());
+ Record.push_back(NestedReq->hasInvalidConstraint());
+ if (NestedReq->hasInvalidConstraint()) {
+ Record.AddString(NestedReq->getInvalidConstraintEntity());
+ addConstraintSatisfaction(Record, *NestedReq->Satisfaction);
} else {
- Record.AddStmt(NestedReq->Value.get<Expr *>());
+ Record.AddStmt(NestedReq->getConstraintExpr());
if (!NestedReq->isDependent())
addConstraintSatisfaction(Record, *NestedReq->Satisfaction);
}
}
}
+ Record.AddSourceLocation(E->getLParenLoc());
+ Record.AddSourceLocation(E->getRParenLoc());
Record.AddSourceLocation(E->getEndLoc());
Code = serialization::EXPR_REQUIRES;
@@ -543,14 +611,13 @@ void ASTStmtWriter::VisitCapturedStmt(CapturedStmt *S) {
void ASTStmtWriter::VisitExpr(Expr *E) {
VisitStmt(E);
+
+ CurrentPackingBits.updateBits();
+ CurrentPackingBits.addBits(E->getDependence(), /*BitsWidth=*/5);
+ CurrentPackingBits.addBits(E->getValueKind(), /*BitsWidth=*/2);
+ CurrentPackingBits.addBits(E->getObjectKind(), /*BitsWidth=*/3);
+
Record.AddTypeRef(E->getType());
- Record.push_back(E->isTypeDependent());
- Record.push_back(E->isValueDependent());
- Record.push_back(E->isInstantiationDependent());
- Record.push_back(E->containsUnexpandedParameterPack());
- Record.push_back(E->containsErrors());
- Record.push_back(E->getValueKind());
- Record.push_back(E->getObjectKind());
}
void ASTStmtWriter::VisitConstantExpr(ConstantExpr *E) {
@@ -563,17 +630,15 @@ void ASTStmtWriter::VisitConstantExpr(ConstantExpr *E) {
// HasCleanup not serialized since we can just query the APValue.
Record.push_back(E->ConstantExprBits.IsImmediateInvocation);
- switch (E->ConstantExprBits.ResultKind) {
- case ConstantExpr::RSK_None:
+ switch (E->getResultStorageKind()) {
+ case ConstantResultStorageKind::None:
break;
- case ConstantExpr::RSK_Int64:
+ case ConstantResultStorageKind::Int64:
Record.push_back(E->Int64Result());
break;
- case ConstantExpr::RSK_APValue:
+ case ConstantResultStorageKind::APValue:
Record.AddAPValue(E->APValueResult());
break;
- default:
- llvm_unreachable("unexpected ResultKind!");
}
Record.AddStmt(E->getSubExpr());
@@ -596,7 +661,9 @@ void ASTStmtWriter::VisitPredefinedExpr(PredefinedExpr *E) {
bool HasFunctionName = E->getFunctionName() != nullptr;
Record.push_back(HasFunctionName);
- Record.push_back(E->getIdentKind()); // FIXME: stable encoding
+ Record.push_back(
+ llvm::to_underlying(E->getIdentKind())); // FIXME: stable encoding
+ Record.push_back(E->isTransparent());
Record.AddSourceLocation(E->getLocation());
if (HasFunctionName)
Record.AddStmt(E->getFunctionName());
@@ -606,12 +673,15 @@ void ASTStmtWriter::VisitPredefinedExpr(PredefinedExpr *E) {
void ASTStmtWriter::VisitDeclRefExpr(DeclRefExpr *E) {
VisitExpr(E);
- Record.push_back(E->hasQualifier());
- Record.push_back(E->getDecl() != E->getFoundDecl());
- Record.push_back(E->hasTemplateKWAndArgsInfo());
- Record.push_back(E->hadMultipleCandidates());
- Record.push_back(E->refersToEnclosingVariableOrCapture());
- Record.push_back(E->isNonOdrUse());
+ CurrentPackingBits.updateBits();
+
+ CurrentPackingBits.addBit(E->hadMultipleCandidates());
+ CurrentPackingBits.addBit(E->refersToEnclosingVariableOrCapture());
+ CurrentPackingBits.addBits(E->isNonOdrUse(), /*Width=*/2);
+ CurrentPackingBits.addBit(E->isImmediateEscalating());
+ CurrentPackingBits.addBit(E->getDecl() != E->getFoundDecl());
+ CurrentPackingBits.addBit(E->hasQualifier());
+ CurrentPackingBits.addBit(E->hasTemplateKWAndArgsInfo());
if (E->hasTemplateKWAndArgsInfo()) {
unsigned NumTemplateArgs = E->getNumTemplateArgs();
@@ -622,8 +692,7 @@ void ASTStmtWriter::VisitDeclRefExpr(DeclRefExpr *E) {
if ((!E->hasTemplateKWAndArgsInfo()) && (!E->hasQualifier()) &&
(E->getDecl() == E->getFoundDecl()) &&
- nk == DeclarationName::Identifier &&
- !E->refersToEnclosingVariableOrCapture() && !E->isNonOdrUse()) {
+ nk == DeclarationName::Identifier && E->getObjectKind() == OK_Ordinary) {
AbbrevToUse = Writer.getDeclRefExprAbbrev();
}
@@ -685,7 +754,7 @@ void ASTStmtWriter::VisitStringLiteral(StringLiteral *E) {
Record.push_back(E->getNumConcatenated());
Record.push_back(E->getLength());
Record.push_back(E->getCharByteWidth());
- Record.push_back(E->getKind());
+ Record.push_back(llvm::to_underlying(E->getKind()));
Record.push_back(E->isPascal());
// Store the trailing array of SourceLocation.
@@ -704,7 +773,7 @@ void ASTStmtWriter::VisitCharacterLiteral(CharacterLiteral *E) {
VisitExpr(E);
Record.push_back(E->getValue());
Record.AddSourceLocation(E->getLocation());
- Record.push_back(E->getKind());
+ Record.push_back(llvm::to_underlying(E->getKind()));
AbbrevToUse = Writer.getCharacterLiteralAbbrev();
@@ -734,11 +803,13 @@ void ASTStmtWriter::VisitUnaryOperator(UnaryOperator *E) {
bool HasFPFeatures = E->hasStoredFPFeatures();
// Write this first for easy access when deserializing, as they affect the
// size of the UnaryOperator.
- Record.push_back(HasFPFeatures);
+ CurrentPackingBits.addBit(HasFPFeatures);
Record.AddStmt(E->getSubExpr());
- Record.push_back(E->getOpcode()); // FIXME: stable encoding
+ CurrentPackingBits.addBits(E->getOpcode(),
+ /*Width=*/5); // FIXME: stable encoding
Record.AddSourceLocation(E->getOperatorLoc());
- Record.push_back(E->canOverflow());
+ CurrentPackingBits.addBit(E->canOverflow());
+
if (HasFPFeatures)
Record.push_back(E->getStoredFPFeatures().getAsOpaqueInt());
Code = serialization::EXPR_UNARY_OPERATOR;
@@ -863,16 +934,25 @@ void ASTStmtWriter::VisitOMPIteratorExpr(OMPIteratorExpr *E) {
void ASTStmtWriter::VisitCallExpr(CallExpr *E) {
VisitExpr(E);
+
Record.push_back(E->getNumArgs());
- Record.push_back(E->hasStoredFPFeatures());
+ CurrentPackingBits.updateBits();
+ CurrentPackingBits.addBit(static_cast<bool>(E->getADLCallKind()));
+ CurrentPackingBits.addBit(E->hasStoredFPFeatures());
+
Record.AddSourceLocation(E->getRParenLoc());
Record.AddStmt(E->getCallee());
for (CallExpr::arg_iterator Arg = E->arg_begin(), ArgEnd = E->arg_end();
Arg != ArgEnd; ++Arg)
Record.AddStmt(*Arg);
- Record.push_back(static_cast<unsigned>(E->getADLCallKind()));
+
if (E->hasStoredFPFeatures())
Record.push_back(E->getFPFeatures().getAsOpaqueInt());
+
+ if (!E->hasStoredFPFeatures() && !static_cast<bool>(E->getADLCallKind()) &&
+ E->getStmtClass() == Stmt::CallExprClass)
+ AbbrevToUse = Writer.getCallExprAbbrev();
+
Code = serialization::EXPR_CALL;
}
@@ -899,9 +979,10 @@ void ASTStmtWriter::VisitMemberExpr(MemberExpr *E) {
// Write these first for easy access when deserializing, as they affect the
// size of the MemberExpr.
- Record.push_back(HasQualifier);
- Record.push_back(HasFoundDecl);
- Record.push_back(HasTemplateInfo);
+ CurrentPackingBits.updateBits();
+ CurrentPackingBits.addBit(HasQualifier);
+ CurrentPackingBits.addBit(HasFoundDecl);
+ CurrentPackingBits.addBit(HasTemplateInfo);
Record.push_back(NumTemplateArgs);
Record.AddStmt(E->getBase());
@@ -909,15 +990,15 @@ void ASTStmtWriter::VisitMemberExpr(MemberExpr *E) {
Record.AddDeclarationNameLoc(E->MemberDNLoc,
E->getMemberDecl()->getDeclName());
Record.AddSourceLocation(E->getMemberLoc());
- Record.push_back(E->isArrow());
- Record.push_back(E->hadMultipleCandidates());
- Record.push_back(E->isNonOdrUse());
+ CurrentPackingBits.addBit(E->isArrow());
+ CurrentPackingBits.addBit(E->hadMultipleCandidates());
+ CurrentPackingBits.addBits(E->isNonOdrUse(), /*Width=*/2);
Record.AddSourceLocation(E->getOperatorLoc());
if (HasFoundDecl) {
DeclAccessPair FoundDecl = E->getFoundDecl();
Record.AddDeclRef(FoundDecl.getDecl());
- Record.push_back(FoundDecl.getAccess());
+ CurrentPackingBits.addBits(FoundDecl.getAccess(), /*BitWidth=*/2);
}
if (HasQualifier)
@@ -957,10 +1038,13 @@ void ASTStmtWriter::VisitObjCBridgedCastExpr(ObjCBridgedCastExpr *E) {
void ASTStmtWriter::VisitCastExpr(CastExpr *E) {
VisitExpr(E);
+
Record.push_back(E->path_size());
- Record.push_back(E->hasStoredFPFeatures());
+ CurrentPackingBits.updateBits();
+ // 7 bits should be enough to store the casting kinds.
+ CurrentPackingBits.addBits(E->getCastKind(), /*Width=*/7);
+ CurrentPackingBits.addBit(E->hasStoredFPFeatures());
Record.AddStmt(E->getSubExpr());
- Record.push_back(E->getCastKind()); // FIXME: stable encoding
for (CastExpr::path_iterator
PI = E->path_begin(), PE = E->path_end(); PI != PE; ++PI)
@@ -972,16 +1056,23 @@ void ASTStmtWriter::VisitCastExpr(CastExpr *E) {
void ASTStmtWriter::VisitBinaryOperator(BinaryOperator *E) {
VisitExpr(E);
- bool HasFPFeatures = E->hasStoredFPFeatures();
+
// Write this first for easy access when deserializing, as they affect the
// size of the UnaryOperator.
- Record.push_back(HasFPFeatures);
- Record.push_back(E->getOpcode()); // FIXME: stable encoding
+ CurrentPackingBits.updateBits();
+ CurrentPackingBits.addBits(E->getOpcode(), /*Width=*/6);
+ bool HasFPFeatures = E->hasStoredFPFeatures();
+ CurrentPackingBits.addBit(HasFPFeatures);
Record.AddStmt(E->getLHS());
Record.AddStmt(E->getRHS());
Record.AddSourceLocation(E->getOperatorLoc());
if (HasFPFeatures)
Record.push_back(E->getStoredFPFeatures().getAsOpaqueInt());
+
+ if (!HasFPFeatures && E->getValueKind() == VK_PRValue &&
+ E->getObjectKind() == OK_Ordinary)
+ AbbrevToUse = Writer.getBinaryOperatorAbbrev();
+
Code = serialization::EXPR_BINARY_OPERATOR;
}
@@ -989,6 +1080,11 @@ void ASTStmtWriter::VisitCompoundAssignOperator(CompoundAssignOperator *E) {
VisitBinaryOperator(E);
Record.AddTypeRef(E->getComputationLHSType());
Record.AddTypeRef(E->getComputationResultType());
+
+ if (!E->hasStoredFPFeatures() && E->getValueKind() == VK_PRValue &&
+ E->getObjectKind() == OK_Ordinary)
+ AbbrevToUse = Writer.getCompoundAssignOperatorAbbrev();
+
Code = serialization::EXPR_COMPOUND_ASSIGN_OPERATOR;
}
@@ -1017,7 +1113,7 @@ ASTStmtWriter::VisitBinaryConditionalOperator(BinaryConditionalOperator *E) {
void ASTStmtWriter::VisitImplicitCastExpr(ImplicitCastExpr *E) {
VisitCastExpr(E);
- Record.push_back(E->isPartOfExplicitCast());
+ CurrentPackingBits.addBit(E->isPartOfExplicitCast());
if (E->path_size() == 0 && !E->hasStoredFPFeatures())
AbbrevToUse = Writer.getExprImplicitCastAbbrev();
@@ -1091,7 +1187,7 @@ void ASTStmtWriter::VisitDesignatedInitExpr(DesignatedInitExpr *E) {
Record.push_back(E->usesGNUSyntax());
for (const DesignatedInitExpr::Designator &D : E->designators()) {
if (D.isFieldDesignator()) {
- if (FieldDecl *Field = D.getField()) {
+ if (FieldDecl *Field = D.getFieldDecl()) {
Record.push_back(serialization::DESIG_FIELD_DECL);
Record.AddDeclRef(Field);
} else {
@@ -1102,13 +1198,13 @@ void ASTStmtWriter::VisitDesignatedInitExpr(DesignatedInitExpr *E) {
Record.AddSourceLocation(D.getFieldLoc());
} else if (D.isArrayDesignator()) {
Record.push_back(serialization::DESIG_ARRAY);
- Record.push_back(D.getFirstExprIndex());
+ Record.push_back(D.getArrayIndex());
Record.AddSourceLocation(D.getLBracketLoc());
Record.AddSourceLocation(D.getRBracketLoc());
} else {
assert(D.isArrayRangeDesignator() && "Unknown designator");
Record.push_back(serialization::DESIG_ARRAY_RANGE);
- Record.push_back(D.getFirstExprIndex());
+ Record.push_back(D.getArrayIndex());
Record.AddSourceLocation(D.getLBracketLoc());
Record.AddSourceLocation(D.getEllipsisLoc());
Record.AddSourceLocation(D.getRBracketLoc());
@@ -1161,7 +1257,7 @@ void ASTStmtWriter::VisitSourceLocExpr(SourceLocExpr *E) {
Record.AddDeclRef(cast_or_null<Decl>(E->getParentContext()));
Record.AddSourceLocation(E->getBeginLoc());
Record.AddSourceLocation(E->getEndLoc());
- Record.push_back(E->getIdentKind());
+ Record.push_back(llvm::to_underlying(E->getIdentKind()));
Code = serialization::EXPR_SOURCE_LOC;
}
@@ -1228,6 +1324,7 @@ void ASTStmtWriter::VisitGenericSelectionExpr(GenericSelectionExpr *E) {
VisitExpr(E);
Record.push_back(E->getNumAssocs());
+ Record.push_back(E->isExprPredicate());
Record.push_back(E->ResultIndex);
Record.AddSourceLocation(E->getGenericLoc());
Record.AddSourceLocation(E->getDefaultLoc());
@@ -1483,8 +1580,8 @@ void ASTStmtWriter::VisitObjCAtTryStmt(ObjCAtTryStmt *S) {
Record.push_back(S->getNumCatchStmts());
Record.push_back(S->getFinallyStmt() != nullptr);
Record.AddStmt(S->getTryBody());
- for (unsigned I = 0, N = S->getNumCatchStmts(); I != N; ++I)
- Record.AddStmt(S->getCatchStmt(I));
+ for (ObjCAtCatchStmt *C : S->catch_stmts())
+ Record.AddStmt(C);
if (S->getFinallyStmt())
Record.AddStmt(S->getFinallyStmt());
Record.AddSourceLocation(S->getAtTryLoc());
@@ -1573,11 +1670,19 @@ void ASTStmtWriter::VisitCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
VisitCallExpr(E);
Record.push_back(E->getOperator());
Record.AddSourceRange(E->Range);
+
+ if (!E->hasStoredFPFeatures() && !static_cast<bool>(E->getADLCallKind()))
+ AbbrevToUse = Writer.getCXXOperatorCallExprAbbrev();
+
Code = serialization::EXPR_CXX_OPERATOR_CALL;
}
void ASTStmtWriter::VisitCXXMemberCallExpr(CXXMemberCallExpr *E) {
VisitCallExpr(E);
+
+ if (!E->hasStoredFPFeatures() && !static_cast<bool>(E->getADLCallKind()))
+ AbbrevToUse = Writer.getCXXMemberCallExprAbbrev();
+
Code = serialization::EXPR_CXX_MEMBER_CALL;
}
@@ -1598,7 +1703,9 @@ void ASTStmtWriter::VisitCXXConstructExpr(CXXConstructExpr *E) {
Record.push_back(E->isListInitialization());
Record.push_back(E->isStdInitListInitialization());
Record.push_back(E->requiresZeroInitialization());
- Record.push_back(E->getConstructionKind()); // FIXME: stable encoding
+ Record.push_back(
+ llvm::to_underlying(E->getConstructionKind())); // FIXME: stable encoding
+ Record.push_back(E->isImmediateEscalating());
Record.AddSourceLocation(E->getLocation());
Record.AddDeclRef(E->getConstructor());
Record.AddSourceRange(E->getParenOrBraceRange());
@@ -1656,7 +1763,9 @@ void ASTStmtWriter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E)
void ASTStmtWriter::VisitCXXNamedCastExpr(CXXNamedCastExpr *E) {
VisitExplicitCastExpr(E);
Record.AddSourceRange(SourceRange(E->getOperatorLoc(), E->getRParenLoc()));
- Record.AddSourceRange(E->getAngleBrackets());
+ CurrentPackingBits.addBit(E->getAngleBrackets().isValid());
+ if (E->getAngleBrackets().isValid())
+ Record.AddSourceRange(E->getAngleBrackets());
}
void ASTStmtWriter::VisitCXXStaticCastExpr(CXXStaticCastExpr *E) {
@@ -1733,6 +1842,7 @@ void ASTStmtWriter::VisitCXXThisExpr(CXXThisExpr *E) {
VisitExpr(E);
Record.AddSourceLocation(E->getLocation());
Record.push_back(E->isImplicit());
+
Code = serialization::EXPR_CXX_THIS;
}
@@ -1749,14 +1859,20 @@ void ASTStmtWriter::VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) {
Record.AddDeclRef(E->getParam());
Record.AddDeclRef(cast_or_null<Decl>(E->getUsedContext()));
Record.AddSourceLocation(E->getUsedLocation());
+ Record.push_back(E->hasRewrittenInit());
+ if (E->hasRewrittenInit())
+ Record.AddStmt(E->getRewrittenExpr());
Code = serialization::EXPR_CXX_DEFAULT_ARG;
}
void ASTStmtWriter::VisitCXXDefaultInitExpr(CXXDefaultInitExpr *E) {
VisitExpr(E);
+ Record.push_back(E->hasRewrittenInit());
Record.AddDeclRef(E->getField());
Record.AddDeclRef(cast_or_null<Decl>(E->getUsedContext()));
Record.AddSourceLocation(E->getExprLoc());
+ if (E->hasRewrittenInit())
+ Record.AddStmt(E->getRewrittenExpr());
Code = serialization::EXPR_CXX_DEFAULT_INIT;
}
@@ -1785,6 +1901,7 @@ void ASTStmtWriter::VisitCXXNewExpr(CXXNewExpr *E) {
Record.push_back(E->isGlobalNew());
Record.push_back(E->passAlignment());
Record.push_back(E->doesUsualArrayDeleteWantSize());
+ Record.push_back(E->CXXNewExprBits.HasInitializer);
Record.push_back(E->CXXNewExprBits.StoredInitializationStyle);
Record.AddDeclRef(E->getOperatorNew());
@@ -1860,10 +1977,10 @@ void ASTStmtWriter::VisitCXXDependentScopeMemberExpr(
// Don't emit anything here (or if you do you will have to update
// the corresponding deserialization function).
-
- Record.push_back(E->hasTemplateKWAndArgsInfo());
Record.push_back(E->getNumTemplateArgs());
- Record.push_back(E->hasFirstQualifierFoundInScope());
+ CurrentPackingBits.updateBits();
+ CurrentPackingBits.addBit(E->hasTemplateKWAndArgsInfo());
+ CurrentPackingBits.addBit(E->hasFirstQualifierFoundInScope());
if (E->hasTemplateKWAndArgsInfo()) {
const ASTTemplateKWAndArgsInfo &ArgInfo =
@@ -1872,14 +1989,15 @@ void ASTStmtWriter::VisitCXXDependentScopeMemberExpr(
E->getTrailingObjects<TemplateArgumentLoc>());
}
- Record.push_back(E->isArrow());
- Record.AddSourceLocation(E->getOperatorLoc());
+ CurrentPackingBits.addBit(E->isArrow());
+
Record.AddTypeRef(E->getBaseType());
Record.AddNestedNameSpecifierLoc(E->getQualifierLoc());
+ CurrentPackingBits.addBit(!E->isImplicitAccess());
if (!E->isImplicitAccess())
Record.AddStmt(E->getBase());
- else
- Record.AddStmt(nullptr);
+
+ Record.AddSourceLocation(E->getOperatorLoc());
if (E->hasFirstQualifierFoundInScope())
Record.AddDeclRef(E->getFirstQualifierFoundInScope());
@@ -1894,12 +2012,14 @@ ASTStmtWriter::VisitDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *E) {
// Don't emit anything here, HasTemplateKWAndArgsInfo must be
// emitted first.
+ CurrentPackingBits.addBit(
+ E->DependentScopeDeclRefExprBits.HasTemplateKWAndArgsInfo);
- Record.push_back(E->DependentScopeDeclRefExprBits.HasTemplateKWAndArgsInfo);
if (E->DependentScopeDeclRefExprBits.HasTemplateKWAndArgsInfo) {
const ASTTemplateKWAndArgsInfo &ArgInfo =
*E->getTrailingObjects<ASTTemplateKWAndArgsInfo>();
- Record.push_back(ArgInfo.NumTemplateArgs);
+ // 16 bits should be enought to store the number of args
+ CurrentPackingBits.addBits(ArgInfo.NumTemplateArgs, /*Width=*/16);
AddTemplateKWAndArgsInfo(ArgInfo,
E->getTrailingObjects<TemplateArgumentLoc>());
}
@@ -1919,6 +2039,7 @@ ASTStmtWriter::VisitCXXUnresolvedConstructExpr(CXXUnresolvedConstructExpr *E) {
Record.AddTypeSourceInfo(E->getTypeSourceInfo());
Record.AddSourceLocation(E->getLParenLoc());
Record.AddSourceLocation(E->getRParenLoc());
+ Record.push_back(E->isListInitialization());
Code = serialization::EXPR_CXX_UNRESOLVED_CONSTRUCT;
}
@@ -1926,7 +2047,9 @@ void ASTStmtWriter::VisitOverloadExpr(OverloadExpr *E) {
VisitExpr(E);
Record.push_back(E->getNumDecls());
- Record.push_back(E->hasTemplateKWAndArgsInfo());
+
+ CurrentPackingBits.updateBits();
+ CurrentPackingBits.addBit(E->hasTemplateKWAndArgsInfo());
if (E->hasTemplateKWAndArgsInfo()) {
const ASTTemplateKWAndArgsInfo &ArgInfo =
*E->getTrailingASTTemplateKWAndArgsInfo();
@@ -1947,18 +2070,22 @@ void ASTStmtWriter::VisitOverloadExpr(OverloadExpr *E) {
void ASTStmtWriter::VisitUnresolvedMemberExpr(UnresolvedMemberExpr *E) {
VisitOverloadExpr(E);
- Record.push_back(E->isArrow());
- Record.push_back(E->hasUnresolvedUsing());
- Record.AddStmt(!E->isImplicitAccess() ? E->getBase() : nullptr);
- Record.AddTypeRef(E->getBaseType());
+ CurrentPackingBits.addBit(E->isArrow());
+ CurrentPackingBits.addBit(E->hasUnresolvedUsing());
+ CurrentPackingBits.addBit(!E->isImplicitAccess());
+ if (!E->isImplicitAccess())
+ Record.AddStmt(E->getBase());
+
Record.AddSourceLocation(E->getOperatorLoc());
+
+ Record.AddTypeRef(E->getBaseType());
Code = serialization::EXPR_CXX_UNRESOLVED_MEMBER;
}
void ASTStmtWriter::VisitUnresolvedLookupExpr(UnresolvedLookupExpr *E) {
VisitOverloadExpr(E);
- Record.push_back(E->requiresADL());
- Record.push_back(E->isOverloaded());
+ CurrentPackingBits.addBit(E->requiresADL());
+ CurrentPackingBits.addBit(E->isOverloaded());
Record.AddDeclRef(E->getNamingClass());
Code = serialization::EXPR_CXX_UNRESOLVED_LOOKUP;
}
@@ -2029,8 +2156,13 @@ void ASTStmtWriter::VisitSizeOfPackExpr(SizeOfPackExpr *E) {
void ASTStmtWriter::VisitSubstNonTypeTemplateParmExpr(
SubstNonTypeTemplateParmExpr *E) {
VisitExpr(E);
- Record.AddDeclRef(E->getParameter());
- Record.push_back(E->isReferenceParameter());
+ Record.AddDeclRef(E->getAssociatedDecl());
+ CurrentPackingBits.addBit(E->isReferenceParameter());
+ CurrentPackingBits.addBits(E->getIndex(), /*Width=*/12);
+ CurrentPackingBits.addBit((bool)E->getPackIndex());
+ if (auto PackIndex = E->getPackIndex())
+ Record.push_back(*PackIndex + 1);
+
Record.AddSourceLocation(E->getNameLoc());
Record.AddStmt(E->getReplacement());
Code = serialization::EXPR_SUBST_NON_TYPE_TEMPLATE_PARM;
@@ -2039,7 +2171,8 @@ void ASTStmtWriter::VisitSubstNonTypeTemplateParmExpr(
void ASTStmtWriter::VisitSubstNonTypeTemplateParmPackExpr(
SubstNonTypeTemplateParmPackExpr *E) {
VisitExpr(E);
- Record.AddDeclRef(E->getParameterPack());
+ Record.AddDeclRef(E->getAssociatedDecl());
+ Record.push_back(E->getIndex());
Record.AddTemplateArgument(E->getArgumentPack());
Record.AddSourceLocation(E->getParameterPackLocation());
Code = serialization::EXPR_SUBST_NON_TYPE_TEMPLATE_PARM_PACK;
@@ -2079,6 +2212,30 @@ void ASTStmtWriter::VisitCXXFoldExpr(CXXFoldExpr *E) {
Code = serialization::EXPR_CXX_FOLD;
}
+void ASTStmtWriter::VisitCXXParenListInitExpr(CXXParenListInitExpr *E) {
+ VisitExpr(E);
+ ArrayRef<Expr *> InitExprs = E->getInitExprs();
+ Record.push_back(InitExprs.size());
+ Record.push_back(E->getUserSpecifiedInitExprs().size());
+ Record.AddSourceLocation(E->getInitLoc());
+ Record.AddSourceLocation(E->getBeginLoc());
+ Record.AddSourceLocation(E->getEndLoc());
+ for (Expr *InitExpr : E->getInitExprs())
+ Record.AddStmt(InitExpr);
+ Expr *ArrayFiller = E->getArrayFiller();
+ FieldDecl *UnionField = E->getInitializedFieldInUnion();
+ bool HasArrayFillerOrUnionDecl = ArrayFiller || UnionField;
+ Record.push_back(HasArrayFillerOrUnionDecl);
+ if (HasArrayFillerOrUnionDecl) {
+ Record.push_back(static_cast<bool>(ArrayFiller));
+ if (ArrayFiller)
+ Record.AddStmt(ArrayFiller);
+ else
+ Record.AddDeclRef(UnionField);
+ }
+ Code = serialization::EXPR_CXX_PAREN_LIST_INIT;
+}
+
void ASTStmtWriter::VisitOpaqueValueExpr(OpaqueValueExpr *E) {
VisitExpr(E);
Record.AddStmt(E->getSourceExpr());
@@ -2193,6 +2350,7 @@ void ASTStmtWriter::VisitOMPExecutableDirective(OMPExecutableDirective *E) {
Record.writeOMPChildren(E->Data);
Record.AddSourceLocation(E->getBeginLoc());
Record.AddSourceLocation(E->getEndLoc());
+ Record.writeEnum(E->getMappedDirective());
}
void ASTStmtWriter::VisitOMPLoopBasedDirective(OMPLoopBasedDirective *D) {
@@ -2205,6 +2363,13 @@ void ASTStmtWriter::VisitOMPLoopDirective(OMPLoopDirective *D) {
VisitOMPLoopBasedDirective(D);
}
+void ASTStmtWriter::VisitOMPMetaDirective(OMPMetaDirective *D) {
+ VisitStmt(D);
+ Record.push_back(D->getNumClauses());
+ VisitOMPExecutableDirective(D);
+ Code = serialization::STMT_OMP_META_DIRECTIVE;
+}
+
void ASTStmtWriter::VisitOMPParallelDirective(OMPParallelDirective *D) {
VisitStmt(D);
VisitOMPExecutableDirective(D);
@@ -2217,13 +2382,19 @@ void ASTStmtWriter::VisitOMPSimdDirective(OMPSimdDirective *D) {
Code = serialization::STMT_OMP_SIMD_DIRECTIVE;
}
-void ASTStmtWriter::VisitOMPTileDirective(OMPTileDirective *D) {
+void ASTStmtWriter::VisitOMPLoopTransformationDirective(
+ OMPLoopTransformationDirective *D) {
VisitOMPLoopBasedDirective(D);
+ Record.writeUInt32(D->getNumGeneratedLoops());
+}
+
+void ASTStmtWriter::VisitOMPTileDirective(OMPTileDirective *D) {
+ VisitOMPLoopTransformationDirective(D);
Code = serialization::STMT_OMP_TILE_DIRECTIVE;
}
void ASTStmtWriter::VisitOMPUnrollDirective(OMPUnrollDirective *D) {
- VisitOMPLoopBasedDirective(D);
+ VisitOMPLoopTransformationDirective(D);
Code = serialization::STMT_OMP_UNROLL_DIRECTIVE;
}
@@ -2252,6 +2423,12 @@ void ASTStmtWriter::VisitOMPSectionDirective(OMPSectionDirective *D) {
Code = serialization::STMT_OMP_SECTION_DIRECTIVE;
}
+void ASTStmtWriter::VisitOMPScopeDirective(OMPScopeDirective *D) {
+ VisitStmt(D);
+ VisitOMPExecutableDirective(D);
+ Code = serialization::STMT_OMP_SCOPE_DIRECTIVE;
+}
+
void ASTStmtWriter::VisitOMPSingleDirective(OMPSingleDirective *D) {
VisitStmt(D);
VisitOMPExecutableDirective(D);
@@ -2290,6 +2467,13 @@ void ASTStmtWriter::VisitOMPParallelMasterDirective(
Code = serialization::STMT_OMP_PARALLEL_MASTER_DIRECTIVE;
}
+void ASTStmtWriter::VisitOMPParallelMaskedDirective(
+ OMPParallelMaskedDirective *D) {
+ VisitStmt(D);
+ VisitOMPExecutableDirective(D);
+ Code = serialization::STMT_OMP_PARALLEL_MASKED_DIRECTIVE;
+}
+
void ASTStmtWriter::VisitOMPParallelSectionsDirective(
OMPParallelSectionsDirective *D) {
VisitStmt(D);
@@ -2310,6 +2494,7 @@ void ASTStmtWriter::VisitOMPAtomicDirective(OMPAtomicDirective *D) {
VisitOMPExecutableDirective(D);
Record.writeBool(D->isXLHSInRHSPart());
Record.writeBool(D->isPostfixUpdate());
+ Record.writeBool(D->isFailOnly());
Code = serialization::STMT_OMP_ATOMIC_DIRECTIVE;
}
@@ -2368,10 +2553,18 @@ void ASTStmtWriter::VisitOMPBarrierDirective(OMPBarrierDirective *D) {
void ASTStmtWriter::VisitOMPTaskwaitDirective(OMPTaskwaitDirective *D) {
VisitStmt(D);
+ Record.push_back(D->getNumClauses());
VisitOMPExecutableDirective(D);
Code = serialization::STMT_OMP_TASKWAIT_DIRECTIVE;
}
+void ASTStmtWriter::VisitOMPErrorDirective(OMPErrorDirective *D) {
+ VisitStmt(D);
+ Record.push_back(D->getNumClauses());
+ VisitOMPExecutableDirective(D);
+ Code = serialization::STMT_OMP_ERROR_DIRECTIVE;
+}
+
void ASTStmtWriter::VisitOMPTaskgroupDirective(OMPTaskgroupDirective *D) {
VisitStmt(D);
VisitOMPExecutableDirective(D);
@@ -2441,12 +2634,25 @@ void ASTStmtWriter::VisitOMPMasterTaskLoopDirective(
Code = serialization::STMT_OMP_MASTER_TASKLOOP_DIRECTIVE;
}
+void ASTStmtWriter::VisitOMPMaskedTaskLoopDirective(
+ OMPMaskedTaskLoopDirective *D) {
+ VisitOMPLoopDirective(D);
+ Record.writeBool(D->hasCancel());
+ Code = serialization::STMT_OMP_MASKED_TASKLOOP_DIRECTIVE;
+}
+
void ASTStmtWriter::VisitOMPMasterTaskLoopSimdDirective(
OMPMasterTaskLoopSimdDirective *D) {
VisitOMPLoopDirective(D);
Code = serialization::STMT_OMP_MASTER_TASKLOOP_SIMD_DIRECTIVE;
}
+void ASTStmtWriter::VisitOMPMaskedTaskLoopSimdDirective(
+ OMPMaskedTaskLoopSimdDirective *D) {
+ VisitOMPLoopDirective(D);
+ Code = serialization::STMT_OMP_MASKED_TASKLOOP_SIMD_DIRECTIVE;
+}
+
void ASTStmtWriter::VisitOMPParallelMasterTaskLoopDirective(
OMPParallelMasterTaskLoopDirective *D) {
VisitOMPLoopDirective(D);
@@ -2454,12 +2660,25 @@ void ASTStmtWriter::VisitOMPParallelMasterTaskLoopDirective(
Code = serialization::STMT_OMP_PARALLEL_MASTER_TASKLOOP_DIRECTIVE;
}
+void ASTStmtWriter::VisitOMPParallelMaskedTaskLoopDirective(
+ OMPParallelMaskedTaskLoopDirective *D) {
+ VisitOMPLoopDirective(D);
+ Record.writeBool(D->hasCancel());
+ Code = serialization::STMT_OMP_PARALLEL_MASKED_TASKLOOP_DIRECTIVE;
+}
+
void ASTStmtWriter::VisitOMPParallelMasterTaskLoopSimdDirective(
OMPParallelMasterTaskLoopSimdDirective *D) {
VisitOMPLoopDirective(D);
Code = serialization::STMT_OMP_PARALLEL_MASTER_TASKLOOP_SIMD_DIRECTIVE;
}
+void ASTStmtWriter::VisitOMPParallelMaskedTaskLoopSimdDirective(
+ OMPParallelMaskedTaskLoopSimdDirective *D) {
+ VisitOMPLoopDirective(D);
+ Code = serialization::STMT_OMP_PARALLEL_MASKED_TASKLOOP_SIMD_DIRECTIVE;
+}
+
void ASTStmtWriter::VisitOMPDistributeDirective(OMPDistributeDirective *D) {
VisitOMPLoopDirective(D);
Code = serialization::STMT_OMP_DISTRIBUTE_DIRECTIVE;
@@ -2577,21 +2796,48 @@ void ASTStmtWriter::VisitOMPMaskedDirective(OMPMaskedDirective *D) {
Code = serialization::STMT_OMP_MASKED_DIRECTIVE;
}
+void ASTStmtWriter::VisitOMPGenericLoopDirective(OMPGenericLoopDirective *D) {
+ VisitOMPLoopDirective(D);
+ Code = serialization::STMT_OMP_GENERIC_LOOP_DIRECTIVE;
+}
+
+void ASTStmtWriter::VisitOMPTeamsGenericLoopDirective(
+ OMPTeamsGenericLoopDirective *D) {
+ VisitOMPLoopDirective(D);
+ Code = serialization::STMT_OMP_TEAMS_GENERIC_LOOP_DIRECTIVE;
+}
+
+void ASTStmtWriter::VisitOMPTargetTeamsGenericLoopDirective(
+ OMPTargetTeamsGenericLoopDirective *D) {
+ VisitOMPLoopDirective(D);
+ Code = serialization::STMT_OMP_TARGET_TEAMS_GENERIC_LOOP_DIRECTIVE;
+}
+
+void ASTStmtWriter::VisitOMPParallelGenericLoopDirective(
+ OMPParallelGenericLoopDirective *D) {
+ VisitOMPLoopDirective(D);
+ Code = serialization::STMT_OMP_PARALLEL_GENERIC_LOOP_DIRECTIVE;
+}
+
+void ASTStmtWriter::VisitOMPTargetParallelGenericLoopDirective(
+ OMPTargetParallelGenericLoopDirective *D) {
+ VisitOMPLoopDirective(D);
+ Code = serialization::STMT_OMP_TARGET_PARALLEL_GENERIC_LOOP_DIRECTIVE;
+}
+
//===----------------------------------------------------------------------===//
// ASTWriter Implementation
//===----------------------------------------------------------------------===//
unsigned ASTWriter::RecordSwitchCaseID(SwitchCase *S) {
- assert(SwitchCaseIDs.find(S) == SwitchCaseIDs.end() &&
- "SwitchCase recorded twice");
+ assert(!SwitchCaseIDs.contains(S) && "SwitchCase recorded twice");
unsigned NextID = SwitchCaseIDs.size();
SwitchCaseIDs[S] = NextID;
return NextID;
}
unsigned ASTWriter::getSwitchCaseID(SwitchCase *S) {
- assert(SwitchCaseIDs.find(S) != SwitchCaseIDs.end() &&
- "SwitchCase hasn't been seen yet");
+ assert(SwitchCaseIDs.contains(S) && "SwitchCase hasn't been seen yet");
return SwitchCaseIDs[S];
}
diff --git a/contrib/llvm-project/clang/lib/Serialization/GeneratePCH.cpp b/contrib/llvm-project/clang/lib/Serialization/GeneratePCH.cpp
index d869796b82c1..cf8084333811 100644
--- a/contrib/llvm-project/clang/lib/Serialization/GeneratePCH.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/GeneratePCH.cpp
@@ -25,11 +25,11 @@ PCHGenerator::PCHGenerator(
StringRef OutputFile, StringRef isysroot, std::shared_ptr<PCHBuffer> Buffer,
ArrayRef<std::shared_ptr<ModuleFileExtension>> Extensions,
bool AllowASTWithErrors, bool IncludeTimestamps,
- bool ShouldCacheASTInMemory)
+ bool BuildingImplicitModule, bool ShouldCacheASTInMemory)
: PP(PP), OutputFile(OutputFile), isysroot(isysroot.str()),
SemaPtr(nullptr), Buffer(std::move(Buffer)), Stream(this->Buffer->Data),
Writer(Stream, this->Buffer->Data, ModuleCache, Extensions,
- IncludeTimestamps),
+ IncludeTimestamps, BuildingImplicitModule),
AllowASTWithErrors(AllowASTWithErrors),
ShouldCacheASTInMemory(ShouldCacheASTInMemory) {
this->Buffer->IsComplete = false;
@@ -50,7 +50,8 @@ void PCHGenerator::HandleTranslationUnit(ASTContext &Ctx) {
Module *Module = nullptr;
if (PP.getLangOpts().isCompilingModule()) {
Module = PP.getHeaderSearchInfo().lookupModule(
- PP.getLangOpts().CurrentModule, /*AllowSearch*/ false);
+ PP.getLangOpts().CurrentModule, SourceLocation(),
+ /*AllowSearch*/ false);
if (!Module) {
assert(hasErrors && "emitting module but current module doesn't exist");
return;
@@ -64,12 +65,8 @@ void PCHGenerator::HandleTranslationUnit(ASTContext &Ctx) {
// Emit the PCH file to the Buffer.
assert(SemaPtr && "No Sema?");
- Buffer->Signature =
- Writer.WriteAST(*SemaPtr, OutputFile, Module, isysroot,
- // For serialization we are lenient if the errors were
- // only warn-as-error kind.
- PP.getDiagnostics().hasUncompilableErrorOccurred(),
- ShouldCacheASTInMemory);
+ Buffer->Signature = Writer.WriteAST(*SemaPtr, OutputFile, Module, isysroot,
+ ShouldCacheASTInMemory);
Buffer->IsComplete = true;
}
diff --git a/contrib/llvm-project/clang/lib/Serialization/GlobalModuleIndex.cpp b/contrib/llvm-project/clang/lib/Serialization/GlobalModuleIndex.cpp
index 52ce17d984bf..dd4fc3e00905 100644
--- a/contrib/llvm-project/clang/lib/Serialization/GlobalModuleIndex.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/GlobalModuleIndex.cpp
@@ -25,12 +25,12 @@
#include "llvm/Bitstream/BitstreamWriter.h"
#include "llvm/Support/DJB.h"
#include "llvm/Support/FileSystem.h"
-#include "llvm/Support/FileUtilities.h"
#include "llvm/Support/LockFileManager.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/OnDiskHashTable.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/TimeProfiler.h"
+#include "llvm/Support/raw_ostream.h"
#include <cstdio>
using namespace clang;
using namespace serialization;
@@ -89,8 +89,10 @@ public:
static std::pair<unsigned, unsigned>
ReadKeyDataLength(const unsigned char*& d) {
using namespace llvm::support;
- unsigned KeyLen = endian::readNext<uint16_t, little, unaligned>(d);
- unsigned DataLen = endian::readNext<uint16_t, little, unaligned>(d);
+ unsigned KeyLen =
+ endian::readNext<uint16_t, llvm::endianness::little, unaligned>(d);
+ unsigned DataLen =
+ endian::readNext<uint16_t, llvm::endianness::little, unaligned>(d);
return std::make_pair(KeyLen, DataLen);
}
@@ -111,7 +113,8 @@ public:
data_type Result;
while (DataLen > 0) {
- unsigned ID = endian::readNext<uint32_t, little, unaligned>(d);
+ unsigned ID =
+ endian::readNext<uint32_t, llvm::endianness::little, unaligned>(d);
Result.push_back(ID);
DataLen -= 4;
}
@@ -277,19 +280,10 @@ GlobalModuleIndex::readIndex(StringRef Path) {
return std::make_pair(nullptr, Res.takeError());
}
- return std::make_pair(new GlobalModuleIndex(std::move(Buffer), Cursor),
+ return std::make_pair(new GlobalModuleIndex(std::move(Buffer), std::move(Cursor)),
llvm::Error::success());
}
-void
-GlobalModuleIndex::getKnownModules(SmallVectorImpl<ModuleFile *> &ModuleFiles) {
- ModuleFiles.clear();
- for (unsigned I = 0, N = Modules.size(); I != N; ++I) {
- if (ModuleFile *MF = Modules[I].File)
- ModuleFiles.push_back(MF);
- }
-}
-
void GlobalModuleIndex::getModuleDependencies(
ModuleFile *File,
SmallVectorImpl<ModuleFile *> &Dependencies) {
@@ -348,8 +342,8 @@ bool GlobalModuleIndex::loadedModuleFile(ModuleFile *File) {
// If the size and modification time match what we expected, record this
// module file.
bool Failed = true;
- if (File->File->getSize() == Info.Size &&
- File->File->getModificationTime() == Info.ModTime) {
+ if (File->File.getSize() == Info.Size &&
+ File->File.getModificationTime() == Info.ModTime) {
Info.File = File;
ModulesByFile[File] = Known->second;
@@ -414,15 +408,15 @@ namespace {
const PCHContainerReader &PCHContainerRdr;
/// Mapping from files to module file information.
- typedef llvm::MapVector<const FileEntry *, ModuleFileInfo> ModuleFilesMap;
+ using ModuleFilesMap = llvm::MapVector<FileEntryRef, ModuleFileInfo>;
/// Information about each of the known module files.
ModuleFilesMap ModuleFiles;
/// Mapping from the imported module file to the imported
/// information.
- typedef std::multimap<const FileEntry *, ImportedModuleFileInfo>
- ImportedModuleFilesMap;
+ using ImportedModuleFilesMap =
+ std::multimap<FileEntryRef, ImportedModuleFileInfo>;
/// Information about each importing of a module file.
ImportedModuleFilesMap ImportedModuleFiles;
@@ -439,9 +433,8 @@ namespace {
void emitBlockInfoBlock(llvm::BitstreamWriter &Stream);
/// Retrieve the module file information for the given file.
- ModuleFileInfo &getModuleFileInfo(const FileEntry *File) {
- llvm::MapVector<const FileEntry *, ModuleFileInfo>::iterator Known
- = ModuleFiles.find(File);
+ ModuleFileInfo &getModuleFileInfo(FileEntryRef File) {
+ auto Known = ModuleFiles.find(File);
if (Known != ModuleFiles.end())
return Known->second;
@@ -457,7 +450,7 @@ namespace {
: FileMgr(FileMgr), PCHContainerRdr(PCHContainerRdr) {}
/// Load the contents of the given module file into the builder.
- llvm::Error loadModuleFile(const FileEntry *File);
+ llvm::Error loadModuleFile(FileEntryRef File);
/// Write the index to the given bitstream.
/// \returns true if an error occurred, false otherwise.
@@ -521,14 +514,15 @@ namespace {
// The first bit indicates whether this identifier is interesting.
// That's all we care about.
using namespace llvm::support;
- unsigned RawID = endian::readNext<uint32_t, little, unaligned>(d);
+ unsigned RawID =
+ endian::readNext<uint32_t, llvm::endianness::little, unaligned>(d);
bool IsInteresting = RawID & 0x01;
return std::make_pair(k, IsInteresting);
}
};
}
-llvm::Error GlobalModuleIndexBuilder::loadModuleFile(const FileEntry *File) {
+llvm::Error GlobalModuleIndexBuilder::loadModuleFile(FileEntryRef File) {
// Open the module file.
auto Buffer = FileMgr.getBufferForFile(File, /*isVolatile=*/true);
@@ -634,6 +628,9 @@ llvm::Error GlobalModuleIndexBuilder::loadModuleFile(const FileEntry *File) {
// Skip the imported kind
++Idx;
+ // Skip if it is standard C++ module
+ ++Idx;
+
// Skip the import location
++Idx;
@@ -659,9 +656,9 @@ llvm::Error GlobalModuleIndexBuilder::loadModuleFile(const FileEntry *File) {
Idx += Length;
// Find the imported module file.
- auto DependsOnFile
- = FileMgr.getFile(ImportedFile, /*OpenFile=*/false,
- /*CacheFailure=*/false);
+ auto DependsOnFile =
+ FileMgr.getOptionalFileRef(ImportedFile, /*OpenFile=*/false,
+ /*CacheFailure=*/false);
if (!DependsOnFile)
return llvm::createStringError(std::errc::bad_file_descriptor,
@@ -703,9 +700,12 @@ llvm::Error GlobalModuleIndexBuilder::loadModuleFile(const FileEntry *File) {
}
// Get Signature.
- if (State == DiagnosticOptionsBlock && Code == SIGNATURE)
- getModuleFileInfo(File).Signature = ASTFileSignature::create(
- Record.begin(), Record.begin() + ASTFileSignature::size);
+ if (State == DiagnosticOptionsBlock && Code == SIGNATURE) {
+ auto Signature = ASTFileSignature::create(Blob.begin(), Blob.end());
+ assert(Signature != ASTFileSignature::createDummy() &&
+ "Dummy AST file signature not backpatched in ASTWriter.");
+ getModuleFileInfo(File).Signature = Signature;
+ }
// We don't care about this record.
}
@@ -733,7 +733,7 @@ public:
std::pair<unsigned,unsigned>
EmitKeyDataLength(raw_ostream& Out, key_type_ref Key, data_type_ref Data) {
using namespace llvm::support;
- endian::Writer LE(Out, little);
+ endian::Writer LE(Out, llvm::endianness::little);
unsigned KeyLen = Key.size();
unsigned DataLen = Data.size() * 4;
LE.write<uint16_t>(KeyLen);
@@ -749,7 +749,7 @@ public:
unsigned DataLen) {
using namespace llvm::support;
for (unsigned I = 0, N = Data.size(); I != N; ++I)
- endian::write<uint32_t>(Out, Data[I], little);
+ endian::write<uint32_t>(Out, Data[I], llvm::endianness::little);
}
};
@@ -757,14 +757,14 @@ public:
bool GlobalModuleIndexBuilder::writeIndex(llvm::BitstreamWriter &Stream) {
for (auto MapEntry : ImportedModuleFiles) {
- auto *File = MapEntry.first;
+ auto File = MapEntry.first;
ImportedModuleFileInfo &Info = MapEntry.second;
if (getModuleFileInfo(File).Signature) {
if (getModuleFileInfo(File).Signature != Info.StoredSignature)
// Verify Signature.
return true;
- } else if (Info.StoredSize != File->getSize() ||
- Info.StoredModTime != File->getModificationTime())
+ } else if (Info.StoredSize != File.getSize() ||
+ Info.StoredModTime != File.getModificationTime())
// Verify Size and ModTime.
return true;
}
@@ -795,11 +795,11 @@ bool GlobalModuleIndexBuilder::writeIndex(llvm::BitstreamWriter &Stream) {
M != MEnd; ++M) {
Record.clear();
Record.push_back(M->second.ID);
- Record.push_back(M->first->getSize());
- Record.push_back(M->first->getModificationTime());
+ Record.push_back(M->first.getSize());
+ Record.push_back(M->first.getModificationTime());
// File name
- StringRef Name(M->first->getName());
+ StringRef Name(M->first.getName());
Record.push_back(Name.size());
Record.append(Name.begin(), Name.end());
@@ -828,7 +828,7 @@ bool GlobalModuleIndexBuilder::writeIndex(llvm::BitstreamWriter &Stream) {
using namespace llvm::support;
llvm::raw_svector_ostream Out(IdentifierTable);
// Make sure that no bucket is at offset 0
- endian::write<uint32_t>(Out, 0, little);
+ endian::write<uint32_t>(Out, 0, llvm::endianness::little);
BucketOffset = Generator.Emit(Out, Trait);
}
@@ -895,7 +895,7 @@ GlobalModuleIndex::writeIndex(FileManager &FileMgr,
}
// If we can't find the module file, skip it.
- auto ModuleFile = FileMgr.getFile(D->path());
+ auto ModuleFile = FileMgr.getOptionalFileRef(D->path());
if (!ModuleFile)
continue;
@@ -913,8 +913,10 @@ GlobalModuleIndex::writeIndex(FileManager &FileMgr,
"failed writing index");
}
- return llvm::writeFileAtomically((IndexPath + "-%%%%%%%%").str(), IndexPath,
- OutputBuffer);
+ return llvm::writeToOutput(IndexPath, [&OutputBuffer](llvm::raw_ostream &OS) {
+ OS << OutputBuffer;
+ return llvm::Error::success();
+ });
}
namespace {
diff --git a/contrib/llvm-project/clang/lib/Serialization/ModuleFileExtension.cpp b/contrib/llvm-project/clang/lib/Serialization/ModuleFileExtension.cpp
index 6b7fd1d54340..95fff41e0d7a 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ModuleFileExtension.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/ModuleFileExtension.cpp
@@ -11,12 +11,10 @@ using namespace clang;
char ModuleFileExtension::ID = 0;
-ModuleFileExtension::~ModuleFileExtension() { }
+ModuleFileExtension::~ModuleFileExtension() {}
-llvm::hash_code ModuleFileExtension::hashExtension(llvm::hash_code Code) const {
- return Code;
-}
+void ModuleFileExtension::hashExtension(ExtensionHashBuilder &HBuilder) const {}
-ModuleFileExtensionWriter::~ModuleFileExtensionWriter() { }
+ModuleFileExtensionWriter::~ModuleFileExtensionWriter() {}
-ModuleFileExtensionReader::~ModuleFileExtensionReader() { }
+ModuleFileExtensionReader::~ModuleFileExtensionReader() {}
diff --git a/contrib/llvm-project/clang/lib/Serialization/ModuleManager.cpp b/contrib/llvm-project/clang/lib/Serialization/ModuleManager.cpp
index 40ffa6cfee8f..51b642941296 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ModuleManager.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/ModuleManager.cpp
@@ -52,18 +52,14 @@ ModuleFile *ModuleManager::lookupByFileName(StringRef Name) const {
ModuleFile *ModuleManager::lookupByModuleName(StringRef Name) const {
if (const Module *Mod = HeaderSearchInfo.getModuleMap().findModule(Name))
- if (const FileEntry *File = Mod->getASTFile())
- return lookup(File);
+ if (OptionalFileEntryRef File = Mod->getASTFile())
+ return lookup(*File);
return nullptr;
}
ModuleFile *ModuleManager::lookup(const FileEntry *File) const {
- auto Known = Modules.find(File);
- if (Known == Modules.end())
- return nullptr;
-
- return Known->second;
+ return Modules.lookup(File);
}
std::unique_ptr<llvm::MemoryBuffer>
@@ -112,7 +108,7 @@ ModuleManager::addModule(StringRef FileName, ModuleKind Type,
// Look for the file entry. This only fails if the expected size or
// modification time differ.
- OptionalFileEntryRefDegradesToFileEntryPtr Entry;
+ OptionalFileEntryRef Entry;
if (Type == MK_ExplicitModule || Type == MK_PrebuiltModule) {
// If we're not expecting to pull this file out of the module cache, it
// might have a different mtime due to being moved across filesystems in
@@ -127,7 +123,7 @@ ModuleManager::addModule(StringRef FileName, ModuleKind Type,
return OutOfDate;
}
- if (!Entry && FileName != "-") {
+ if (!Entry) {
ErrorStr = "module file not found";
return Missing;
}
@@ -147,15 +143,15 @@ ModuleManager::addModule(StringRef FileName, ModuleKind Type,
// being consistent across operating systems and across subsequent accesses
// to the Modules map.
auto implicitModuleNamesMatch = [](ModuleKind Kind, const ModuleFile *MF,
- const FileEntry *Entry) -> bool {
+ FileEntryRef Entry) -> bool {
if (Kind != MK_ImplicitModule)
return true;
- return Entry->getName() == MF->FileName;
+ return Entry.getName() == MF->FileName;
};
// Check whether we already loaded this module, before
- if (ModuleFile *ModuleEntry = Modules.lookup(Entry)) {
- if (implicitModuleNamesMatch(Type, ModuleEntry, Entry)) {
+ if (ModuleFile *ModuleEntry = Modules.lookup(*Entry)) {
+ if (implicitModuleNamesMatch(Type, ModuleEntry, *Entry)) {
// Check the stored signature.
if (checkSignature(ModuleEntry->Signature, ExpectedSignature, ErrorStr))
return OutOfDate;
@@ -167,10 +163,9 @@ ModuleManager::addModule(StringRef FileName, ModuleKind Type,
}
// Allocate a new module.
- auto NewModule = std::make_unique<ModuleFile>(Type, Generation);
+ auto NewModule = std::make_unique<ModuleFile>(Type, *Entry, Generation);
NewModule->Index = Chain.size();
NewModule->FileName = FileName.str();
- NewModule->File = Entry;
NewModule->ImportLoc = ImportLoc;
NewModule->InputFilesValidationTimestamp = 0;
@@ -202,21 +197,15 @@ ModuleManager::addModule(StringRef FileName, ModuleKind Type,
Entry->closeFile();
return OutOfDate;
} else {
- // Open the AST file.
- llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> Buf((std::error_code()));
- if (FileName == "-") {
- Buf = llvm::MemoryBuffer::getSTDIN();
- } else {
- // Get a buffer of the file and close the file descriptor when done.
- // The file is volatile because in a parallel build we expect multiple
- // compiler processes to use the same module file rebuilding it if needed.
- //
- // RequiresNullTerminator is false because module files don't need it, and
- // this allows the file to still be mmapped.
- Buf = FileMgr.getBufferForFile(NewModule->File,
- /*IsVolatile=*/true,
- /*RequiresNullTerminator=*/false);
- }
+ // Get a buffer of the file and close the file descriptor when done.
+ // The file is volatile because in a parallel build we expect multiple
+ // compiler processes to use the same module file rebuilding it if needed.
+ //
+ // RequiresNullTerminator is false because module files don't need it, and
+ // this allows the file to still be mmapped.
+ auto Buf = FileMgr.getBufferForFile(NewModule->File,
+ /*IsVolatile=*/true,
+ /*RequiresNullTerminator=*/false);
if (!Buf) {
ErrorStr = Buf.getError().message();
@@ -236,7 +225,7 @@ ModuleManager::addModule(StringRef FileName, ModuleKind Type,
return OutOfDate;
// We're keeping this module. Store it everywhere.
- Module = Modules[Entry] = NewModule.get();
+ Module = Modules[*Entry] = NewModule.get();
updateModuleImports(*NewModule, ImportedBy, ImportLoc);
@@ -249,7 +238,7 @@ ModuleManager::addModule(StringRef FileName, ModuleKind Type,
return NewlyLoaded;
}
-void ModuleManager::removeModules(ModuleIterator First, ModuleMap *modMap) {
+void ModuleManager::removeModules(ModuleIterator First) {
auto Last = end();
if (First == Last)
return;
@@ -270,8 +259,7 @@ void ModuleManager::removeModules(ModuleIterator First, ModuleMap *modMap) {
I->Imports.remove_if(IsVictim);
I->ImportedBy.remove_if(IsVictim);
}
- Roots.erase(std::remove_if(Roots.begin(), Roots.end(), IsVictim),
- Roots.end());
+ llvm::erase_if(Roots, IsVictim);
// Remove the modules from the PCH chain.
for (auto I = First; I != Last; ++I) {
@@ -281,19 +269,10 @@ void ModuleManager::removeModules(ModuleIterator First, ModuleMap *modMap) {
}
}
- // Delete the modules and erase them from the various structures.
- for (ModuleIterator victim = First; victim != Last; ++victim) {
+ // Delete the modules.
+ for (ModuleIterator victim = First; victim != Last; ++victim)
Modules.erase(victim->File);
- if (modMap) {
- StringRef ModuleName = victim->ModuleName;
- if (Module *mod = modMap->findModule(ModuleName)) {
- mod->setASTFile(None);
- }
- }
- }
-
- // Delete the modules.
Chain.erase(Chain.begin() + (First - begin()), Chain.end());
}
@@ -305,23 +284,22 @@ ModuleManager::addInMemoryBuffer(StringRef FileName,
InMemoryBuffers[Entry] = std::move(Buffer);
}
-ModuleManager::VisitState *ModuleManager::allocateVisitState() {
+std::unique_ptr<ModuleManager::VisitState> ModuleManager::allocateVisitState() {
// Fast path: if we have a cached state, use it.
if (FirstVisitState) {
- VisitState *Result = FirstVisitState;
- FirstVisitState = FirstVisitState->NextState;
- Result->NextState = nullptr;
+ auto Result = std::move(FirstVisitState);
+ FirstVisitState = std::move(Result->NextState);
return Result;
}
// Allocate and return a new state.
- return new VisitState(size());
+ return std::make_unique<VisitState>(size());
}
-void ModuleManager::returnVisitState(VisitState *State) {
+void ModuleManager::returnVisitState(std::unique_ptr<VisitState> State) {
assert(State->NextState == nullptr && "Visited state is in list?");
- State->NextState = FirstVisitState;
- FirstVisitState = State;
+ State->NextState = std::move(FirstVisitState);
+ FirstVisitState = std::move(State);
}
void ModuleManager::setGlobalIndex(GlobalModuleIndex *Index) {
@@ -352,8 +330,6 @@ ModuleManager::ModuleManager(FileManager &FileMgr,
: FileMgr(FileMgr), ModuleCache(&ModuleCache),
PCHContainerRdr(PCHContainerRdr), HeaderSearchInfo(HeaderSearchInfo) {}
-ModuleManager::~ModuleManager() { delete FirstVisitState; }
-
void ModuleManager::visit(llvm::function_ref<bool(ModuleFile &M)> Visitor,
llvm::SmallPtrSetImpl<ModuleFile *> *ModuleFilesHit) {
// If the visitation order vector is the wrong size, recompute the order.
@@ -384,26 +360,23 @@ void ModuleManager::visit(llvm::function_ref<bool(ModuleFile &M)> Visitor,
// For any module that this module depends on, push it on the
// stack (if it hasn't already been marked as visited).
- for (auto M = CurrentModule->Imports.rbegin(),
- MEnd = CurrentModule->Imports.rend();
- M != MEnd; ++M) {
+ for (ModuleFile *M : llvm::reverse(CurrentModule->Imports)) {
// Remove our current module as an impediment to visiting the
// module we depend on. If we were the last unvisited module
// that depends on this particular module, push it into the
// queue to be visited.
- unsigned &NumUnusedEdges = UnusedIncomingEdges[(*M)->Index];
+ unsigned &NumUnusedEdges = UnusedIncomingEdges[M->Index];
if (NumUnusedEdges && (--NumUnusedEdges == 0))
- Queue.push_back(*M);
+ Queue.push_back(M);
}
}
assert(VisitOrder.size() == N && "Visitation order is wrong?");
- delete FirstVisitState;
FirstVisitState = nullptr;
}
- VisitState *State = allocateVisitState();
+ auto State = allocateVisitState();
unsigned VisitNumber = State->NextVisitNumber++;
// If the caller has provided us with a hit-set that came from the global
@@ -455,28 +428,25 @@ void ModuleManager::visit(llvm::function_ref<bool(ModuleFile &M)> Visitor,
} while (true);
}
- returnVisitState(State);
+ returnVisitState(std::move(State));
}
bool ModuleManager::lookupModuleFile(StringRef FileName, off_t ExpectedSize,
time_t ExpectedModTime,
- Optional<FileEntryRef> &File) {
- File = None;
- if (FileName == "-")
+ OptionalFileEntryRef &File) {
+ if (FileName == "-") {
+ File = expectedToOptional(FileMgr.getSTDIN());
return false;
+ }
// Open the file immediately to ensure there is no race between stat'ing and
// opening the file.
- Optional<FileEntryRef> FileOrErr =
- expectedToOptional(FileMgr.getFileRef(FileName, /*OpenFile=*/true,
- /*CacheFailure=*/false));
- if (!FileOrErr)
- return false;
-
- File = *FileOrErr;
+ File = FileMgr.getOptionalFileRef(FileName, /*OpenFile=*/true,
+ /*CacheFailure=*/false);
- if ((ExpectedSize && ExpectedSize != File->getSize()) ||
- (ExpectedModTime && ExpectedModTime != File->getModificationTime()))
+ if (File &&
+ ((ExpectedSize && ExpectedSize != File->getSize()) ||
+ (ExpectedModTime && ExpectedModTime != File->getModificationTime())))
// Do not destroy File, as it may be referenced. If we need to rebuild it,
// it will be destroyed by removeModules.
return true;
diff --git a/contrib/llvm-project/clang/lib/Serialization/MultiOnDiskHashTable.h b/contrib/llvm-project/clang/lib/Serialization/MultiOnDiskHashTable.h
index adc97d57e0ac..2402a628b512 100644
--- a/contrib/llvm-project/clang/lib/Serialization/MultiOnDiskHashTable.h
+++ b/contrib/llvm-project/clang/lib/Serialization/MultiOnDiskHashTable.h
@@ -199,10 +199,12 @@ public:
storage_type Ptr = Data;
- uint32_t BucketOffset = endian::readNext<uint32_t, little, unaligned>(Ptr);
+ uint32_t BucketOffset =
+ endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Ptr);
// Read the list of overridden files.
- uint32_t NumFiles = endian::readNext<uint32_t, little, unaligned>(Ptr);
+ uint32_t NumFiles =
+ endian::readNext<uint32_t, llvm::endianness::little, unaligned>(Ptr);
// FIXME: Add a reserve() to TinyPtrVector so that we don't need to make
// an additional copy.
llvm::SmallVector<file_type, 16> OverriddenFiles;
@@ -311,7 +313,7 @@ public:
// Write our header information.
{
- endian::Writer Writer(OutStream, little);
+ endian::Writer Writer(OutStream, llvm::endianness::little);
// Reserve four bytes for the bucket offset.
Writer.write<uint32_t>(0);
diff --git a/contrib/llvm-project/clang/lib/Serialization/PCHContainerOperations.cpp b/contrib/llvm-project/clang/lib/Serialization/PCHContainerOperations.cpp
index d4990fce2d99..56ca3394385b 100644
--- a/contrib/llvm-project/clang/lib/Serialization/PCHContainerOperations.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/PCHContainerOperations.cpp
@@ -57,6 +57,11 @@ std::unique_ptr<ASTConsumer> RawPCHContainerWriter::CreatePCHContainerGenerator(
return std::make_unique<RawPCHContainerGenerator>(std::move(OS), Buffer);
}
+ArrayRef<llvm::StringRef> RawPCHContainerReader::getFormats() const {
+ static StringRef Raw("raw");
+ return ArrayRef(Raw);
+}
+
StringRef
RawPCHContainerReader::ExtractPCH(llvm::MemoryBufferRef Buffer) const {
return Buffer.getBuffer();
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp
index c06604b6cffe..a54f1b1e71d4 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp
@@ -7,18 +7,20 @@
//===----------------------------------------------------------------------===//
// This file reports various statistics about analyzer visitation.
//===----------------------------------------------------------------------===//
-#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/DeclObjC.h"
#include "clang/Basic/SourceManager.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/raw_ostream.h"
+#include <optional>
using namespace clang;
using namespace ento;
@@ -51,15 +53,14 @@ void AnalyzerStatsChecker::checkEndAnalysis(ExplodedGraph &G,
const Decl *D = LC->getDecl();
// Iterate over the exploded graph.
- for (ExplodedGraph::node_iterator I = G.nodes_begin();
- I != G.nodes_end(); ++I) {
- const ProgramPoint &P = I->getLocation();
+ for (const ExplodedNode &N : G.nodes()) {
+ const ProgramPoint &P = N.getLocation();
// Only check the coverage in the top level function (optimization).
if (D != P.getLocationContext()->getDecl())
continue;
- if (Optional<BlockEntrance> BE = P.getAs<BlockEntrance>()) {
+ if (std::optional<BlockEntrance> BE = P.getAs<BlockEntrance>()) {
const CFGBlock *CB = BE->getBlock();
reachable.insert(CB);
}
@@ -93,11 +94,10 @@ void AnalyzerStatsChecker::checkEndAnalysis(ExplodedGraph &G,
if (!Loc.isValid())
return;
- if (isa<FunctionDecl>(D) || isa<ObjCMethodDecl>(D)) {
+ if (isa<FunctionDecl, ObjCMethodDecl>(D)) {
const NamedDecl *ND = cast<NamedDecl>(D);
output << *ND;
- }
- else if (isa<BlockDecl>(D)) {
+ } else if (isa<BlockDecl>(D)) {
output << "block(line:" << Loc.getLine() << ":col:" << Loc.getColumn();
}
@@ -115,16 +115,13 @@ void AnalyzerStatsChecker::checkEndAnalysis(ExplodedGraph &G,
output.str(), PathDiagnosticLocation(D, SM));
// Emit warning for each block we bailed out on.
- typedef CoreEngine::BlocksExhausted::const_iterator ExhaustedIterator;
const CoreEngine &CE = Eng.getCoreEngine();
- for (ExhaustedIterator I = CE.blocks_exhausted_begin(),
- E = CE.blocks_exhausted_end(); I != E; ++I) {
- const BlockEdge &BE = I->first;
+ for (const BlockEdge &BE : make_first_range(CE.exhausted_blocks())) {
const CFGBlock *Exit = BE.getDst();
if (Exit->empty())
continue;
const CFGElement &CE = Exit->front();
- if (Optional<CFGStmt> CS = CE.getAs<CFGStmt>()) {
+ if (std::optional<CFGStmt> CS = CE.getAs<CFGStmt>()) {
SmallString<128> bufI;
llvm::raw_svector_ostream outputI(bufI);
outputI << "(" << NameOfRootFunction << ")" <<
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp
index 605b11874ef5..c990ad138f89 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp
@@ -25,7 +25,7 @@ using namespace ento;
namespace {
class ArrayBoundChecker :
public Checker<check::Location> {
- mutable std::unique_ptr<BuiltinBug> BT;
+ const BugType BT{this, "Out-of-bound array access"};
public:
void checkLocation(SVal l, bool isLoad, const Stmt* S,
@@ -58,25 +58,20 @@ void ArrayBoundChecker::checkLocation(SVal l, bool isLoad, const Stmt* LoadS,
DefinedOrUnknownSVal ElementCount = getDynamicElementCount(
state, ER->getSuperRegion(), C.getSValBuilder(), ER->getValueType());
- ProgramStateRef StInBound = state->assumeInBound(Idx, ElementCount, true);
- ProgramStateRef StOutBound = state->assumeInBound(Idx, ElementCount, false);
+ ProgramStateRef StInBound, StOutBound;
+ std::tie(StInBound, StOutBound) = state->assumeInBoundDual(Idx, ElementCount);
if (StOutBound && !StInBound) {
ExplodedNode *N = C.generateErrorNode(StOutBound);
if (!N)
return;
- if (!BT)
- BT.reset(new BuiltinBug(
- this, "Out-of-bound array access",
- "Access out-of-bound array element (buffer overflow)"));
-
// FIXME: It would be nice to eventually make this diagnostic more clear,
// e.g., by referencing the original declaration or by saying *why* this
// reference is outside the range.
// Generate a report for this bug.
- auto report =
- std::make_unique<PathSensitiveBugReport>(*BT, BT->getDescription(), N);
+ auto report = std::make_unique<PathSensitiveBugReport>(
+ BT, "Access out-of-bound array element (buffer overflow)", N);
report->addRange(LoadS->getSourceRange());
C.emitReport(std::move(report));
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
index 2a5fe9d8ed92..6c7a1601402e 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
@@ -11,9 +11,10 @@
//
//===----------------------------------------------------------------------===//
-#include "Taint.h"
#include "clang/AST/CharUnits.h"
+#include "clang/AST/ParentMapContext.h"
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Checkers/Taint.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
@@ -22,77 +23,134 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicExtent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/FormatVariadic.h"
#include "llvm/Support/raw_ostream.h"
+#include <optional>
using namespace clang;
using namespace ento;
using namespace taint;
+using llvm::formatv;
namespace {
-class ArrayBoundCheckerV2 :
- public Checker<check::Location> {
- mutable std::unique_ptr<BuiltinBug> BT;
+enum OOB_Kind { OOB_Precedes, OOB_Exceeds, OOB_Taint };
- enum OOB_Kind { OOB_Precedes, OOB_Excedes, OOB_Tainted };
-
- void reportOOB(CheckerContext &C, ProgramStateRef errorState, OOB_Kind kind,
- std::unique_ptr<BugReporterVisitor> Visitor = nullptr) const;
-
-public:
- void checkLocation(SVal l, bool isLoad, const Stmt*S,
- CheckerContext &C) const;
+struct Messages {
+ std::string Short, Full;
};
-// FIXME: Eventually replace RegionRawOffset with this class.
-class RegionRawOffsetV2 {
-private:
- const SubRegion *baseRegion;
- SVal byteOffset;
+// NOTE: The `ArraySubscriptExpr` and `UnaryOperator` callbacks are `PostStmt`
+// instead of `PreStmt` because the current implementation passes the whole
+// expression to `CheckerContext::getSVal()` which only works after the
+// symbolic evaluation of the expression. (To turn them into `PreStmt`
+// callbacks, we'd need to duplicate the logic that evaluates these
+// expressions.) The `MemberExpr` callback would work as `PreStmt` but it's
+// defined as `PostStmt` for the sake of consistency with the other callbacks.
+class ArrayBoundCheckerV2 : public Checker<check::PostStmt<ArraySubscriptExpr>,
+ check::PostStmt<UnaryOperator>,
+ check::PostStmt<MemberExpr>> {
+ BugType BT{this, "Out-of-bound access"};
+ BugType TaintBT{this, "Out-of-bound access", categories::TaintedData};
- RegionRawOffsetV2()
- : baseRegion(nullptr), byteOffset(UnknownVal()) {}
+ void performCheck(const Expr *E, CheckerContext &C) const;
-public:
- RegionRawOffsetV2(const SubRegion* base, SVal offset)
- : baseRegion(base), byteOffset(offset) {}
+ void reportOOB(CheckerContext &C, ProgramStateRef ErrorState, OOB_Kind Kind,
+ NonLoc Offset, Messages Msgs) const;
- NonLoc getByteOffset() const { return byteOffset.castAs<NonLoc>(); }
- const SubRegion *getRegion() const { return baseRegion; }
+ static bool isFromCtypeMacro(const Stmt *S, ASTContext &AC);
- static RegionRawOffsetV2 computeOffset(ProgramStateRef state,
- SValBuilder &svalBuilder,
- SVal location);
+ static bool isInAddressOf(const Stmt *S, ASTContext &AC);
- void dump() const;
- void dumpToStream(raw_ostream &os) const;
+public:
+ void checkPostStmt(const ArraySubscriptExpr *E, CheckerContext &C) const {
+ performCheck(E, C);
+ }
+ void checkPostStmt(const UnaryOperator *E, CheckerContext &C) const {
+ if (E->getOpcode() == UO_Deref)
+ performCheck(E, C);
+ }
+ void checkPostStmt(const MemberExpr *E, CheckerContext &C) const {
+ if (E->isArrow())
+ performCheck(E->getBase(), C);
+ }
};
-}
-static SVal computeExtentBegin(SValBuilder &svalBuilder,
- const MemRegion *region) {
- const MemSpaceRegion *SR = region->getMemorySpace();
- if (SR->getKind() == MemRegion::UnknownSpaceRegionKind)
- return UnknownVal();
- else
- return svalBuilder.makeZeroArrayIndex();
+} // anonymous namespace
+
+/// For a given Location that can be represented as a symbolic expression
+/// Arr[Idx] (or perhaps Arr[Idx1][Idx2] etc.), return the parent memory block
+/// Arr and the distance of Location from the beginning of Arr (expressed in a
+/// NonLoc that specifies the number of CharUnits). Returns nullopt when these
+/// cannot be determined.
+static std::optional<std::pair<const SubRegion *, NonLoc>>
+computeOffset(ProgramStateRef State, SValBuilder &SVB, SVal Location) {
+ QualType T = SVB.getArrayIndexType();
+ auto EvalBinOp = [&SVB, State, T](BinaryOperatorKind Op, NonLoc L, NonLoc R) {
+ // We will use this utility to add and multiply values.
+ return SVB.evalBinOpNN(State, Op, L, R, T).getAs<NonLoc>();
+ };
+
+ const SubRegion *OwnerRegion = nullptr;
+ std::optional<NonLoc> Offset = SVB.makeZeroArrayIndex();
+
+ const ElementRegion *CurRegion =
+ dyn_cast_or_null<ElementRegion>(Location.getAsRegion());
+
+ while (CurRegion) {
+ const auto Index = CurRegion->getIndex().getAs<NonLoc>();
+ if (!Index)
+ return std::nullopt;
+
+ QualType ElemType = CurRegion->getElementType();
+
+ // FIXME: The following early return was presumably added to safeguard the
+ // getTypeSizeInChars() call (which doesn't accept an incomplete type), but
+ // it seems that `ElemType` cannot be incomplete at this point.
+ if (ElemType->isIncompleteType())
+ return std::nullopt;
+
+ // Calculate Delta = Index * sizeof(ElemType).
+ NonLoc Size = SVB.makeArrayIndex(
+ SVB.getContext().getTypeSizeInChars(ElemType).getQuantity());
+ auto Delta = EvalBinOp(BO_Mul, *Index, Size);
+ if (!Delta)
+ return std::nullopt;
+
+ // Perform Offset += Delta.
+ Offset = EvalBinOp(BO_Add, *Offset, *Delta);
+ if (!Offset)
+ return std::nullopt;
+
+ OwnerRegion = CurRegion->getSuperRegion()->getAs<SubRegion>();
+ // When this is just another ElementRegion layer, we need to continue the
+ // offset calculations:
+ CurRegion = dyn_cast_or_null<ElementRegion>(OwnerRegion);
+ }
+
+ if (OwnerRegion)
+ return std::make_pair(OwnerRegion, *Offset);
+
+ return std::nullopt;
}
// TODO: once the constraint manager is smart enough to handle non simplified
// symbolic expressions remove this function. Note that this can not be used in
// the constraint manager as is, since this does not handle overflows. It is
// safe to assume, however, that memory offsets will not overflow.
+// NOTE: callers of this function need to be aware of the effects of overflows
+// and signed<->unsigned conversions!
static std::pair<NonLoc, nonloc::ConcreteInt>
getSimplifiedOffsets(NonLoc offset, nonloc::ConcreteInt extent,
SValBuilder &svalBuilder) {
- Optional<nonloc::SymbolVal> SymVal = offset.getAs<nonloc::SymbolVal>();
+ std::optional<nonloc::SymbolVal> SymVal = offset.getAs<nonloc::SymbolVal>();
if (SymVal && SymVal->isExpression()) {
if (const SymIntExpr *SIE = dyn_cast<SymIntExpr>(SymVal->getSymbol())) {
llvm::APSInt constant =
APSIntType(extent.getValue()).convert(SIE->getRHS());
switch (SIE->getOpcode()) {
case BO_Mul:
- // The constant should never be 0 here, since it the result of scaling
- // based on the size of a type which is never 0.
+ // The constant should never be 0 here, becasue multiplication by zero
+ // is simplified by the engine.
if ((extent.getValue() % constant) != 0)
return std::pair<NonLoc, nonloc::ConcreteInt>(offset, extent);
else
@@ -113,10 +171,154 @@ getSimplifiedOffsets(NonLoc offset, nonloc::ConcreteInt extent,
return std::pair<NonLoc, nonloc::ConcreteInt>(offset, extent);
}
-void ArrayBoundCheckerV2::checkLocation(SVal location, bool isLoad,
- const Stmt* LoadS,
- CheckerContext &checkerContext) const {
+// Evaluate the comparison Value < Threshold with the help of the custom
+// simplification algorithm defined for this checker. Return a pair of states,
+// where the first one corresponds to "value below threshold" and the second
+// corresponds to "value at or above threshold". Returns {nullptr, nullptr} in
+// the case when the evaluation fails.
+// If the optional argument CheckEquality is true, then use BO_EQ instead of
+// the default BO_LT after consistently applying the same simplification steps.
+static std::pair<ProgramStateRef, ProgramStateRef>
+compareValueToThreshold(ProgramStateRef State, NonLoc Value, NonLoc Threshold,
+ SValBuilder &SVB, bool CheckEquality = false) {
+ if (auto ConcreteThreshold = Threshold.getAs<nonloc::ConcreteInt>()) {
+ std::tie(Value, Threshold) = getSimplifiedOffsets(Value, *ConcreteThreshold, SVB);
+ }
+ if (auto ConcreteThreshold = Threshold.getAs<nonloc::ConcreteInt>()) {
+ QualType T = Value.getType(SVB.getContext());
+ if (T->isUnsignedIntegerType() && ConcreteThreshold->getValue().isNegative()) {
+ // In this case we reduced the bound check to a comparison of the form
+ // (symbol or value with unsigned type) < (negative number)
+ // which is always false. We are handling these cases separately because
+ // evalBinOpNN can perform a signed->unsigned conversion that turns the
+ // negative number into a huge positive value and leads to wildly
+ // inaccurate conclusions.
+ return {nullptr, State};
+ }
+ }
+ const BinaryOperatorKind OpKind = CheckEquality ? BO_EQ : BO_LT;
+ auto BelowThreshold =
+ SVB.evalBinOpNN(State, OpKind, Value, Threshold, SVB.getConditionType())
+ .getAs<NonLoc>();
+
+ if (BelowThreshold)
+ return State->assume(*BelowThreshold);
+
+ return {nullptr, nullptr};
+}
+
+static std::string getRegionName(const SubRegion *Region) {
+ if (std::string RegName = Region->getDescriptiveName(); !RegName.empty())
+ return RegName;
+
+ // Field regions only have descriptive names when their parent has a
+ // descriptive name; so we provide a fallback representation for them:
+ if (const auto *FR = Region->getAs<FieldRegion>()) {
+ if (StringRef Name = FR->getDecl()->getName(); !Name.empty())
+ return formatv("the field '{0}'", Name);
+ return "the unnamed field";
+ }
+
+ if (isa<AllocaRegion>(Region))
+ return "the memory returned by 'alloca'";
+
+ if (isa<SymbolicRegion>(Region) &&
+ isa<HeapSpaceRegion>(Region->getMemorySpace()))
+ return "the heap area";
+
+ if (isa<StringRegion>(Region))
+ return "the string literal";
+
+ return "the region";
+}
+
+static std::optional<int64_t> getConcreteValue(NonLoc SV) {
+ if (auto ConcreteVal = SV.getAs<nonloc::ConcreteInt>()) {
+ return ConcreteVal->getValue().tryExtValue();
+ }
+ return std::nullopt;
+}
+
+static std::string getShortMsg(OOB_Kind Kind, std::string RegName) {
+ static const char *ShortMsgTemplates[] = {
+ "Out of bound access to memory preceding {0}",
+ "Out of bound access to memory after the end of {0}",
+ "Potential out of bound access to {0} with tainted offset"};
+
+ return formatv(ShortMsgTemplates[Kind], RegName);
+}
+
+static Messages getPrecedesMsgs(const SubRegion *Region, NonLoc Offset) {
+ std::string RegName = getRegionName(Region);
+ SmallString<128> Buf;
+ llvm::raw_svector_ostream Out(Buf);
+ Out << "Access of " << RegName << " at negative byte offset";
+ if (auto ConcreteIdx = Offset.getAs<nonloc::ConcreteInt>())
+ Out << ' ' << ConcreteIdx->getValue();
+ return {getShortMsg(OOB_Precedes, RegName), std::string(Buf)};
+}
+
+static Messages getExceedsMsgs(ASTContext &ACtx, const SubRegion *Region,
+ NonLoc Offset, NonLoc Extent, SVal Location) {
+ std::string RegName = getRegionName(Region);
+ const auto *EReg = Location.getAsRegion()->getAs<ElementRegion>();
+ assert(EReg && "this checker only handles element access");
+ QualType ElemType = EReg->getElementType();
+
+ std::optional<int64_t> OffsetN = getConcreteValue(Offset);
+ std::optional<int64_t> ExtentN = getConcreteValue(Extent);
+
+ bool UseByteOffsets = true;
+ if (int64_t ElemSize = ACtx.getTypeSizeInChars(ElemType).getQuantity()) {
+ const bool OffsetHasRemainder = OffsetN && *OffsetN % ElemSize;
+ const bool ExtentHasRemainder = ExtentN && *ExtentN % ElemSize;
+ if (!OffsetHasRemainder && !ExtentHasRemainder) {
+ UseByteOffsets = false;
+ if (OffsetN)
+ *OffsetN /= ElemSize;
+ if (ExtentN)
+ *ExtentN /= ElemSize;
+ }
+ }
+ SmallString<256> Buf;
+ llvm::raw_svector_ostream Out(Buf);
+ Out << "Access of ";
+ if (!ExtentN && !UseByteOffsets)
+ Out << "'" << ElemType.getAsString() << "' element in ";
+ Out << RegName << " at ";
+ if (OffsetN) {
+ Out << (UseByteOffsets ? "byte offset " : "index ") << *OffsetN;
+ } else {
+ Out << "an overflowing " << (UseByteOffsets ? "byte offset" : "index");
+ }
+ if (ExtentN) {
+ Out << ", while it holds only ";
+ if (*ExtentN != 1)
+ Out << *ExtentN;
+ else
+ Out << "a single";
+ if (UseByteOffsets)
+ Out << " byte";
+ else
+ Out << " '" << ElemType.getAsString() << "' element";
+
+ if (*ExtentN > 1)
+ Out << "s";
+ }
+
+ return {getShortMsg(OOB_Exceeds, RegName), std::string(Buf)};
+}
+
+static Messages getTaintMsgs(const SubRegion *Region, const char *OffsetName) {
+ std::string RegName = getRegionName(Region);
+ return {formatv("Potential out of bound access to {0} with tainted {1}",
+ RegName, OffsetName),
+ formatv("Access of {0} with a tainted {1} that may be too large",
+ RegName, OffsetName)};
+}
+
+void ArrayBoundCheckerV2::performCheck(const Expr *E, CheckerContext &C) const {
// NOTE: Instead of using ProgramState::assumeInBound(), we are prototyping
// some new logic here that reasons directly about memory region extents.
// Once that logic is more mature, we can bring it back to assumeInBound()
@@ -126,230 +328,152 @@ void ArrayBoundCheckerV2::checkLocation(SVal location, bool isLoad,
// memory access is within the extent of the base region. Since we
// have some flexibility in defining the base region, we can achieve
// various levels of conservatism in our buffer overflow checking.
- ProgramStateRef state = checkerContext.getState();
- SValBuilder &svalBuilder = checkerContext.getSValBuilder();
- const RegionRawOffsetV2 &rawOffset =
- RegionRawOffsetV2::computeOffset(state, svalBuilder, location);
+ const SVal Location = C.getSVal(E);
- if (!rawOffset.getRegion())
+ // The header ctype.h (from e.g. glibc) implements the isXXXXX() macros as
+ // #define isXXXXX(arg) (LOOKUP_TABLE[arg] & BITMASK_FOR_XXXXX)
+ // and incomplete analysis of these leads to false positives. As even
+ // accurate reports would be confusing for the users, just disable reports
+ // from these macros:
+ if (isFromCtypeMacro(E, C.getASTContext()))
return;
- NonLoc rawOffsetVal = rawOffset.getByteOffset();
-
- // CHECK LOWER BOUND: Is byteOffset < extent begin?
- // If so, we are doing a load/store
- // before the first valid offset in the memory region.
-
- SVal extentBegin = computeExtentBegin(svalBuilder, rawOffset.getRegion());
+ ProgramStateRef State = C.getState();
+ SValBuilder &SVB = C.getSValBuilder();
- if (Optional<NonLoc> NV = extentBegin.getAs<NonLoc>()) {
- if (NV->getAs<nonloc::ConcreteInt>()) {
- std::pair<NonLoc, nonloc::ConcreteInt> simplifiedOffsets =
- getSimplifiedOffsets(rawOffset.getByteOffset(),
- NV->castAs<nonloc::ConcreteInt>(),
- svalBuilder);
- rawOffsetVal = simplifiedOffsets.first;
- *NV = simplifiedOffsets.second;
- }
-
- SVal lowerBound = svalBuilder.evalBinOpNN(state, BO_LT, rawOffsetVal, *NV,
- svalBuilder.getConditionType());
+ const std::optional<std::pair<const SubRegion *, NonLoc>> &RawOffset =
+ computeOffset(State, SVB, Location);
- Optional<NonLoc> lowerBoundToCheck = lowerBound.getAs<NonLoc>();
- if (!lowerBoundToCheck)
- return;
-
- ProgramStateRef state_precedesLowerBound, state_withinLowerBound;
- std::tie(state_precedesLowerBound, state_withinLowerBound) =
- state->assume(*lowerBoundToCheck);
+ if (!RawOffset)
+ return;
- // Are we constrained enough to definitely precede the lower bound?
- if (state_precedesLowerBound && !state_withinLowerBound) {
- reportOOB(checkerContext, state_precedesLowerBound, OOB_Precedes);
+ auto [Reg, ByteOffset] = *RawOffset;
+
+ // CHECK LOWER BOUND
+ const MemSpaceRegion *Space = Reg->getMemorySpace();
+ if (!(isa<SymbolicRegion>(Reg) && isa<UnknownSpaceRegion>(Space))) {
+ // A symbolic region in unknown space represents an unknown pointer that
+ // may point into the middle of an array, so we don't look for underflows.
+ // Both conditions are significant because we want to check underflows in
+ // symbolic regions on the heap (which may be introduced by checkers like
+ // MallocChecker that call SValBuilder::getConjuredHeapSymbolVal()) and
+ // non-symbolic regions (e.g. a field subregion of a symbolic region) in
+ // unknown space.
+ auto [PrecedesLowerBound, WithinLowerBound] = compareValueToThreshold(
+ State, ByteOffset, SVB.makeZeroArrayIndex(), SVB);
+
+ if (PrecedesLowerBound && !WithinLowerBound) {
+ // We know that the index definitely precedes the lower bound.
+ Messages Msgs = getPrecedesMsgs(Reg, ByteOffset);
+ reportOOB(C, PrecedesLowerBound, OOB_Precedes, ByteOffset, Msgs);
return;
}
- // Otherwise, assume the constraint of the lower bound.
- assert(state_withinLowerBound);
- state = state_withinLowerBound;
+ if (WithinLowerBound)
+ State = WithinLowerBound;
}
- do {
- // CHECK UPPER BOUND: Is byteOffset >= size(baseRegion)? If so,
- // we are doing a load/store after the last valid offset.
- const MemRegion *MR = rawOffset.getRegion();
- DefinedOrUnknownSVal Size = getDynamicExtent(state, MR, svalBuilder);
- if (!Size.getAs<NonLoc>())
- break;
-
- if (Size.getAs<nonloc::ConcreteInt>()) {
- std::pair<NonLoc, nonloc::ConcreteInt> simplifiedOffsets =
- getSimplifiedOffsets(rawOffset.getByteOffset(),
- Size.castAs<nonloc::ConcreteInt>(), svalBuilder);
- rawOffsetVal = simplifiedOffsets.first;
- Size = simplifiedOffsets.second;
- }
-
- SVal upperbound = svalBuilder.evalBinOpNN(state, BO_GE, rawOffsetVal,
- Size.castAs<NonLoc>(),
- svalBuilder.getConditionType());
-
- Optional<NonLoc> upperboundToCheck = upperbound.getAs<NonLoc>();
- if (!upperboundToCheck)
- break;
-
- ProgramStateRef state_exceedsUpperBound, state_withinUpperBound;
- std::tie(state_exceedsUpperBound, state_withinUpperBound) =
- state->assume(*upperboundToCheck);
-
- // If we are under constrained and the index variables are tainted, report.
- if (state_exceedsUpperBound && state_withinUpperBound) {
- SVal ByteOffset = rawOffset.getByteOffset();
- if (isTainted(state, ByteOffset)) {
- reportOOB(checkerContext, state_exceedsUpperBound, OOB_Tainted,
- std::make_unique<TaintBugVisitor>(ByteOffset));
+ // CHECK UPPER BOUND
+ DefinedOrUnknownSVal Size = getDynamicExtent(State, Reg, SVB);
+ if (auto KnownSize = Size.getAs<NonLoc>()) {
+ auto [WithinUpperBound, ExceedsUpperBound] =
+ compareValueToThreshold(State, ByteOffset, *KnownSize, SVB);
+
+ if (ExceedsUpperBound) {
+ if (!WithinUpperBound) {
+ // We know that the index definitely exceeds the upper bound.
+ if (isa<ArraySubscriptExpr>(E) && isInAddressOf(E, C.getASTContext())) {
+ // ...but this is within an addressof expression, so we need to check
+ // for the exceptional case that `&array[size]` is valid.
+ auto [EqualsToThreshold, NotEqualToThreshold] =
+ compareValueToThreshold(ExceedsUpperBound, ByteOffset, *KnownSize,
+ SVB, /*CheckEquality=*/true);
+ if (EqualsToThreshold && !NotEqualToThreshold) {
+ // We are definitely in the exceptional case, so return early
+ // instead of reporting a bug.
+ C.addTransition(EqualsToThreshold);
+ return;
+ }
+ }
+ Messages Msgs = getExceedsMsgs(C.getASTContext(), Reg, ByteOffset,
+ *KnownSize, Location);
+ reportOOB(C, ExceedsUpperBound, OOB_Exceeds, ByteOffset, Msgs);
+ return;
+ }
+ if (isTainted(State, ByteOffset)) {
+ // Both cases are possible, but the offset is tainted, so report.
+ std::string RegName = getRegionName(Reg);
+
+ // Diagnostic detail: "tainted offset" is always correct, but the
+ // common case is that 'idx' is tainted in 'arr[idx]' and then it's
+ // nicer to say "tainted index".
+ const char *OffsetName = "offset";
+ if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(E))
+ if (isTainted(State, ASE->getIdx(), C.getLocationContext()))
+ OffsetName = "index";
+
+ Messages Msgs = getTaintMsgs(Reg, OffsetName);
+ reportOOB(C, ExceedsUpperBound, OOB_Taint, ByteOffset, Msgs);
return;
}
- } else if (state_exceedsUpperBound) {
- // If we are constrained enough to definitely exceed the upper bound,
- // report.
- assert(!state_withinUpperBound);
- reportOOB(checkerContext, state_exceedsUpperBound, OOB_Excedes);
- return;
}
- assert(state_withinUpperBound);
- state = state_withinUpperBound;
+ if (WithinUpperBound)
+ State = WithinUpperBound;
}
- while (false);
- checkerContext.addTransition(state);
+ C.addTransition(State);
}
-void ArrayBoundCheckerV2::reportOOB(
- CheckerContext &checkerContext, ProgramStateRef errorState, OOB_Kind kind,
- std::unique_ptr<BugReporterVisitor> Visitor) const {
+void ArrayBoundCheckerV2::reportOOB(CheckerContext &C,
+ ProgramStateRef ErrorState, OOB_Kind Kind,
+ NonLoc Offset, Messages Msgs) const {
- ExplodedNode *errorNode = checkerContext.generateErrorNode(errorState);
- if (!errorNode)
+ ExplodedNode *ErrorNode = C.generateErrorNode(ErrorState);
+ if (!ErrorNode)
return;
- if (!BT)
- BT.reset(new BuiltinBug(this, "Out-of-bound access"));
-
- // FIXME: This diagnostics are preliminary. We should get far better
- // diagnostics for explaining buffer overruns.
-
- SmallString<256> buf;
- llvm::raw_svector_ostream os(buf);
- os << "Out of bound memory access ";
- switch (kind) {
- case OOB_Precedes:
- os << "(accessed memory precedes memory block)";
- break;
- case OOB_Excedes:
- os << "(access exceeds upper limit of memory block)";
- break;
- case OOB_Tainted:
- os << "(index is tainted)";
- break;
- }
+ auto BR = std::make_unique<PathSensitiveBugReport>(
+ Kind == OOB_Taint ? TaintBT : BT, Msgs.Short, Msgs.Full, ErrorNode);
- auto BR = std::make_unique<PathSensitiveBugReport>(*BT, os.str(), errorNode);
- BR->addVisitor(std::move(Visitor));
- checkerContext.emitReport(std::move(BR));
-}
+ // Track back the propagation of taintedness.
+ if (Kind == OOB_Taint)
+ for (SymbolRef Sym : getTaintedSymbols(ErrorState, Offset))
+ BR->markInteresting(Sym);
-#ifndef NDEBUG
-LLVM_DUMP_METHOD void RegionRawOffsetV2::dump() const {
- dumpToStream(llvm::errs());
+ C.emitReport(std::move(BR));
}
-void RegionRawOffsetV2::dumpToStream(raw_ostream &os) const {
- os << "raw_offset_v2{" << getRegion() << ',' << getByteOffset() << '}';
-}
-#endif
+bool ArrayBoundCheckerV2::isFromCtypeMacro(const Stmt *S, ASTContext &ACtx) {
+ SourceLocation Loc = S->getBeginLoc();
+ if (!Loc.isMacroID())
+ return false;
-// Lazily computes a value to be used by 'computeOffset'. If 'val'
-// is unknown or undefined, we lazily substitute '0'. Otherwise,
-// return 'val'.
-static inline SVal getValue(SVal val, SValBuilder &svalBuilder) {
- return val.getAs<UndefinedVal>() ? svalBuilder.makeArrayIndex(0) : val;
-}
+ StringRef MacroName = Lexer::getImmediateMacroName(
+ Loc, ACtx.getSourceManager(), ACtx.getLangOpts());
-// Scale a base value by a scaling factor, and return the scaled
-// value as an SVal. Used by 'computeOffset'.
-static inline SVal scaleValue(ProgramStateRef state,
- NonLoc baseVal, CharUnits scaling,
- SValBuilder &sb) {
- return sb.evalBinOpNN(state, BO_Mul, baseVal,
- sb.makeArrayIndex(scaling.getQuantity()),
- sb.getArrayIndexType());
-}
+ if (MacroName.size() < 7 || MacroName[0] != 'i' || MacroName[1] != 's')
+ return false;
-// Add an SVal to another, treating unknown and undefined values as
-// summing to UnknownVal. Used by 'computeOffset'.
-static SVal addValue(ProgramStateRef state, SVal x, SVal y,
- SValBuilder &svalBuilder) {
- // We treat UnknownVals and UndefinedVals the same here because we
- // only care about computing offsets.
- if (x.isUnknownOrUndef() || y.isUnknownOrUndef())
- return UnknownVal();
-
- return svalBuilder.evalBinOpNN(state, BO_Add, x.castAs<NonLoc>(),
- y.castAs<NonLoc>(),
- svalBuilder.getArrayIndexType());
+ return ((MacroName == "isalnum") || (MacroName == "isalpha") ||
+ (MacroName == "isblank") || (MacroName == "isdigit") ||
+ (MacroName == "isgraph") || (MacroName == "islower") ||
+ (MacroName == "isnctrl") || (MacroName == "isprint") ||
+ (MacroName == "ispunct") || (MacroName == "isspace") ||
+ (MacroName == "isupper") || (MacroName == "isxdigit"));
}
-/// Compute a raw byte offset from a base region. Used for array bounds
-/// checking.
-RegionRawOffsetV2 RegionRawOffsetV2::computeOffset(ProgramStateRef state,
- SValBuilder &svalBuilder,
- SVal location)
-{
- const MemRegion *region = location.getAsRegion();
- SVal offset = UndefinedVal();
-
- while (region) {
- switch (region->getKind()) {
- default: {
- if (const SubRegion *subReg = dyn_cast<SubRegion>(region)) {
- offset = getValue(offset, svalBuilder);
- if (!offset.isUnknownOrUndef())
- return RegionRawOffsetV2(subReg, offset);
- }
- return RegionRawOffsetV2();
- }
- case MemRegion::ElementRegionKind: {
- const ElementRegion *elemReg = cast<ElementRegion>(region);
- SVal index = elemReg->getIndex();
- if (!index.getAs<NonLoc>())
- return RegionRawOffsetV2();
- QualType elemType = elemReg->getElementType();
- // If the element is an incomplete type, go no further.
- ASTContext &astContext = svalBuilder.getContext();
- if (elemType->isIncompleteType())
- return RegionRawOffsetV2();
-
- // Update the offset.
- offset = addValue(state,
- getValue(offset, svalBuilder),
- scaleValue(state,
- index.castAs<NonLoc>(),
- astContext.getTypeSizeInChars(elemType),
- svalBuilder),
- svalBuilder);
-
- if (offset.isUnknownOrUndef())
- return RegionRawOffsetV2();
-
- region = elemReg->getSuperRegion();
- continue;
- }
- }
- }
- return RegionRawOffsetV2();
+bool ArrayBoundCheckerV2::isInAddressOf(const Stmt *S, ASTContext &ACtx) {
+ ParentMapContext &ParentCtx = ACtx.getParentMapContext();
+ do {
+ const DynTypedNodeList Parents = ParentCtx.getParents(*S);
+ if (Parents.empty())
+ return false;
+ S = Parents[0].get<Stmt>();
+ } while (isa_and_nonnull<ParenExpr, ImplicitCastExpr>(S));
+ const auto *UnaryOp = dyn_cast_or_null<UnaryOperator>(S);
+ return UnaryOp && UnaryOp->getOpcode() == UO_AddrOf;
}
void ento::registerArrayBoundCheckerV2(CheckerManager &mgr) {
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
index a86a410ebcbc..c72a97cc01e9 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
@@ -12,7 +12,6 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/Expr.h"
@@ -20,18 +19,22 @@
#include "clang/AST/StmtObjC.h"
#include "clang/Analysis/DomainSpecific/CocoaConventions.h"
#include "clang/Analysis/SelectorExtras.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/Support/raw_ostream.h"
+#include <optional>
using namespace clang;
using namespace ento;
@@ -41,7 +44,7 @@ namespace {
class APIMisuse : public BugType {
public:
APIMisuse(const CheckerBase *checker, const char *name)
- : BugType(checker, name, "API Misuse (Apple)") {}
+ : BugType(checker, name, categories::AppleAPIMisuse) {}
};
} // end anonymous namespace
@@ -93,56 +96,64 @@ static FoundationClass findKnownClass(const ObjCInterfaceDecl *ID,
//===----------------------------------------------------------------------===//
namespace {
- class NilArgChecker : public Checker<check::PreObjCMessage,
- check::PostStmt<ObjCDictionaryLiteral>,
- check::PostStmt<ObjCArrayLiteral> > {
- mutable std::unique_ptr<APIMisuse> BT;
-
- mutable llvm::SmallDenseMap<Selector, unsigned, 16> StringSelectors;
- mutable Selector ArrayWithObjectSel;
- mutable Selector AddObjectSel;
- mutable Selector InsertObjectAtIndexSel;
- mutable Selector ReplaceObjectAtIndexWithObjectSel;
- mutable Selector SetObjectAtIndexedSubscriptSel;
- mutable Selector ArrayByAddingObjectSel;
- mutable Selector DictionaryWithObjectForKeySel;
- mutable Selector SetObjectForKeySel;
- mutable Selector SetObjectForKeyedSubscriptSel;
- mutable Selector RemoveObjectForKeySel;
-
- void warnIfNilExpr(const Expr *E,
- const char *Msg,
- CheckerContext &C) const;
-
- void warnIfNilArg(CheckerContext &C,
- const ObjCMethodCall &msg, unsigned Arg,
- FoundationClass Class,
- bool CanBeSubscript = false) const;
-
- void generateBugReport(ExplodedNode *N,
- StringRef Msg,
- SourceRange Range,
- const Expr *Expr,
- CheckerContext &C) const;
-
- public:
- void checkPreObjCMessage(const ObjCMethodCall &M, CheckerContext &C) const;
- void checkPostStmt(const ObjCDictionaryLiteral *DL,
- CheckerContext &C) const;
- void checkPostStmt(const ObjCArrayLiteral *AL,
- CheckerContext &C) const;
- };
+class NilArgChecker : public Checker<check::PreObjCMessage,
+ check::PostStmt<ObjCDictionaryLiteral>,
+ check::PostStmt<ObjCArrayLiteral>,
+ EventDispatcher<ImplicitNullDerefEvent>> {
+ mutable std::unique_ptr<APIMisuse> BT;
+
+ mutable llvm::SmallDenseMap<Selector, unsigned, 16> StringSelectors;
+ mutable Selector ArrayWithObjectSel;
+ mutable Selector AddObjectSel;
+ mutable Selector InsertObjectAtIndexSel;
+ mutable Selector ReplaceObjectAtIndexWithObjectSel;
+ mutable Selector SetObjectAtIndexedSubscriptSel;
+ mutable Selector ArrayByAddingObjectSel;
+ mutable Selector DictionaryWithObjectForKeySel;
+ mutable Selector SetObjectForKeySel;
+ mutable Selector SetObjectForKeyedSubscriptSel;
+ mutable Selector RemoveObjectForKeySel;
+
+ void warnIfNilExpr(const Expr *E, const char *Msg, CheckerContext &C) const;
+
+ void warnIfNilArg(CheckerContext &C, const ObjCMethodCall &msg, unsigned Arg,
+ FoundationClass Class, bool CanBeSubscript = false) const;
+
+ void generateBugReport(ExplodedNode *N, StringRef Msg, SourceRange Range,
+ const Expr *Expr, CheckerContext &C) const;
+
+public:
+ void checkPreObjCMessage(const ObjCMethodCall &M, CheckerContext &C) const;
+ void checkPostStmt(const ObjCDictionaryLiteral *DL, CheckerContext &C) const;
+ void checkPostStmt(const ObjCArrayLiteral *AL, CheckerContext &C) const;
+};
} // end anonymous namespace
void NilArgChecker::warnIfNilExpr(const Expr *E,
const char *Msg,
CheckerContext &C) const {
- ProgramStateRef State = C.getState();
- if (State->isNull(C.getSVal(E)).isConstrainedTrue()) {
+ auto Location = C.getSVal(E).getAs<Loc>();
+ if (!Location)
+ return;
+
+ auto [NonNull, Null] = C.getState()->assume(*Location);
+ // If it's known to be null.
+ if (!NonNull && Null) {
if (ExplodedNode *N = C.generateErrorNode()) {
generateBugReport(N, Msg, E->getSourceRange(), E, C);
+ return;
+ }
+ }
+
+ // If it might be null, assume that it cannot after this operation.
+ if (Null) {
+ // One needs to make sure the pointer is non-null to be used here.
+ if (ExplodedNode *N = C.generateSink(Null, C.getPredecessor())) {
+ dispatchEvent({*Location, /*IsLoad=*/false, N, &C.getBugReporter(),
+ /*IsDirectDereference=*/false});
}
+ C.addTransition(NonNull);
}
}
@@ -341,15 +352,11 @@ void NilArgChecker::checkPostStmt(const ObjCDictionaryLiteral *DL,
namespace {
class CFNumberChecker : public Checker< check::PreStmt<CallExpr> > {
mutable std::unique_ptr<APIMisuse> BT;
- mutable IdentifierInfo *ICreate, *IGetValue;
+ mutable IdentifierInfo *ICreate = nullptr, *IGetValue = nullptr;
public:
- CFNumberChecker() : ICreate(nullptr), IGetValue(nullptr) {}
+ CFNumberChecker() = default;
void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
-
-private:
- void EmitError(const TypedRegion* R, const Expr *Ex,
- uint64_t SourceSize, uint64_t TargetSize, uint64_t NumberKind);
};
} // end anonymous namespace
@@ -372,7 +379,7 @@ enum CFNumberType {
kCFNumberCGFloatType = 16
};
-static Optional<uint64_t> GetCFNumberSize(ASTContext &Ctx, uint64_t i) {
+static std::optional<uint64_t> GetCFNumberSize(ASTContext &Ctx, uint64_t i) {
static const unsigned char FixedSize[] = { 8, 16, 32, 64, 32, 64 };
if (i < kCFNumberCharType)
@@ -393,7 +400,7 @@ static Optional<uint64_t> GetCFNumberSize(ASTContext &Ctx, uint64_t i) {
case kCFNumberCGFloatType:
// FIXME: We need a way to map from names to Type*.
default:
- return None;
+ return std::nullopt;
}
return Ctx.getTypeSize(T);
@@ -445,12 +452,13 @@ void CFNumberChecker::checkPreStmt(const CallExpr *CE,
// FIXME: We really should allow ranges of valid theType values, and
// bifurcate the state appropriately.
- Optional<nonloc::ConcreteInt> V = TheTypeVal.getAs<nonloc::ConcreteInt>();
+ std::optional<nonloc::ConcreteInt> V =
+ dyn_cast<nonloc::ConcreteInt>(TheTypeVal);
if (!V)
return;
uint64_t NumberKind = V->getValue().getLimitedValue();
- Optional<uint64_t> OptCFNumberSize = GetCFNumberSize(Ctx, NumberKind);
+ std::optional<uint64_t> OptCFNumberSize = GetCFNumberSize(Ctx, NumberKind);
// FIXME: In some cases we can emit an error.
if (!OptCFNumberSize)
@@ -465,7 +473,7 @@ void CFNumberChecker::checkPreStmt(const CallExpr *CE,
// FIXME: Eventually we should handle arbitrary locations. We can do this
// by having an enhanced memory model that does low-level typing.
- Optional<loc::MemRegionVal> LV = TheValueExpr.getAs<loc::MemRegionVal>();
+ std::optional<loc::MemRegionVal> LV = TheValueExpr.getAs<loc::MemRegionVal>();
if (!LV)
return;
@@ -533,10 +541,12 @@ void CFNumberChecker::checkPreStmt(const CallExpr *CE,
namespace {
class CFRetainReleaseChecker : public Checker<check::PreCall> {
mutable APIMisuse BT{this, "null passed to CF memory management function"};
- CallDescription CFRetain{"CFRetain", 1},
- CFRelease{"CFRelease", 1},
- CFMakeCollectable{"CFMakeCollectable", 1},
- CFAutorelease{"CFAutorelease", 1};
+ const CallDescriptionSet ModelledCalls = {
+ {{"CFRetain"}, 1},
+ {{"CFRelease"}, 1},
+ {{"CFMakeCollectable"}, 1},
+ {{"CFAutorelease"}, 1},
+ };
public:
void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
@@ -550,13 +560,12 @@ void CFRetainReleaseChecker::checkPreCall(const CallEvent &Call,
return;
// Check if we called CFRetain/CFRelease/CFMakeCollectable/CFAutorelease.
- if (!(Call.isCalled(CFRetain) || Call.isCalled(CFRelease) ||
- Call.isCalled(CFMakeCollectable) || Call.isCalled(CFAutorelease)))
+ if (!ModelledCalls.contains(Call))
return;
// Get the argument's value.
SVal ArgVal = Call.getArgSVal(0);
- Optional<DefinedSVal> DefArgVal = ArgVal.getAs<DefinedSVal>();
+ std::optional<DefinedSVal> DefArgVal = ArgVal.getAs<DefinedSVal>();
if (!DefArgVal)
return;
@@ -744,7 +753,7 @@ void VariadicMethodTypeChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
return;
// Verify that all arguments have Objective-C types.
- Optional<ExplodedNode*> errorNode;
+ std::optional<ExplodedNode *> errorNode;
for (unsigned I = variadicArgsBegin; I != variadicArgsEnd; ++I) {
QualType ArgTy = msg.getArgExpr(I)->getType();
@@ -756,7 +765,7 @@ void VariadicMethodTypeChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
continue;
// Ignore pointer constants.
- if (msg.getArgSVal(I).getAs<loc::ConcreteInt>())
+ if (isa<loc::ConcreteInt>(msg.getArgSVal(I)))
continue;
// Ignore pointer types annotated with 'NSObject' attribute.
@@ -768,10 +777,10 @@ void VariadicMethodTypeChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
continue;
// Generate only one error node to use for all bug reports.
- if (!errorNode.hasValue())
+ if (!errorNode)
errorNode = C.generateNonFatalErrorNode();
- if (!errorNode.getValue())
+ if (!*errorNode)
continue;
SmallString<128> sbuf;
@@ -788,8 +797,8 @@ void VariadicMethodTypeChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
ArgTy.print(os, C.getLangOpts());
os << "'";
- auto R = std::make_unique<PathSensitiveBugReport>(*BT, os.str(),
- errorNode.getValue());
+ auto R =
+ std::make_unique<PathSensitiveBugReport>(*BT, os.str(), *errorNode);
R->addRange(msg.getArgSourceRange(I));
C.emitReport(std::move(R));
}
@@ -810,13 +819,13 @@ class ObjCLoopChecker
check::PostObjCMessage,
check::DeadSymbols,
check::PointerEscape > {
- mutable IdentifierInfo *CountSelectorII;
+ mutable IdentifierInfo *CountSelectorII = nullptr;
bool isCollectionCountMethod(const ObjCMethodCall &M,
CheckerContext &C) const;
public:
- ObjCLoopChecker() : CountSelectorII(nullptr) {}
+ ObjCLoopChecker() = default;
void checkPostStmt(const ObjCForCollectionStmt *FCS, CheckerContext &C) const;
void checkPostObjCMessage(const ObjCMethodCall &M, CheckerContext &C) const;
void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const;
@@ -859,7 +868,8 @@ static ProgramStateRef checkCollectionNonNil(CheckerContext &C,
return nullptr;
SVal CollectionVal = C.getSVal(FCS->getCollection());
- Optional<DefinedSVal> KnownCollection = CollectionVal.getAs<DefinedSVal>();
+ std::optional<DefinedSVal> KnownCollection =
+ CollectionVal.getAs<DefinedSVal>();
if (!KnownCollection)
return State;
@@ -891,7 +901,7 @@ static ProgramStateRef checkElementNonNil(CheckerContext &C,
const Stmt *Element = FCS->getElement();
// FIXME: Copied from ExprEngineObjC.
- Optional<Loc> ElementLoc;
+ std::optional<Loc> ElementLoc;
if (const DeclStmt *DS = dyn_cast<DeclStmt>(Element)) {
const VarDecl *ElemDecl = cast<VarDecl>(DS->getSingleDecl());
assert(ElemDecl->getInit() == nullptr);
@@ -905,7 +915,7 @@ static ProgramStateRef checkElementNonNil(CheckerContext &C,
// Go ahead and assume the value is non-nil.
SVal Val = State->getSVal(*ElementLoc);
- return State->assume(Val.castAs<DefinedOrUnknownSVal>(), true);
+ return State->assume(cast<DefinedOrUnknownSVal>(Val), true);
}
/// Returns NULL state if the collection is known to contain elements
@@ -930,8 +940,8 @@ assumeCollectionNonEmpty(CheckerContext &C, ProgramStateRef State,
nonloc::SymbolVal(*CountS),
SvalBuilder.makeIntVal(0, (*CountS)->getType()),
SvalBuilder.getConditionType());
- Optional<DefinedSVal> CountGreaterThanZero =
- CountGreaterThanZeroVal.getAs<DefinedSVal>();
+ std::optional<DefinedSVal> CountGreaterThanZero =
+ CountGreaterThanZeroVal.getAs<DefinedSVal>();
if (!CountGreaterThanZero) {
// The SValBuilder cannot construct a valid SVal for this condition.
// This means we cannot properly reason about it.
@@ -959,14 +969,13 @@ static bool alreadyExecutedAtLeastOneLoopIteration(const ExplodedNode *N,
return false;
ProgramPoint P = N->getLocation();
- if (Optional<BlockEdge> BE = P.getAs<BlockEdge>()) {
+ if (std::optional<BlockEdge> BE = P.getAs<BlockEdge>()) {
return BE->getSrc()->getLoopTarget() == FCS;
}
// Keep looking for a block edge.
- for (ExplodedNode::const_pred_iterator I = N->pred_begin(),
- E = N->pred_end(); I != E; ++I) {
- if (alreadyExecutedAtLeastOneLoopIteration(*I, FCS))
+ for (const ExplodedNode *N : N->preds()) {
+ if (alreadyExecutedAtLeastOneLoopIteration(N, FCS))
return true;
}
@@ -1094,12 +1103,8 @@ ObjCLoopChecker::checkPointerEscape(ProgramStateRef State,
PointerEscapeKind Kind) const {
SymbolRef ImmutableReceiver = getMethodReceiverIfKnownImmutable(Call);
- // Remove the invalidated symbols form the collection count map.
- for (InvalidatedSymbols::const_iterator I = Escaped.begin(),
- E = Escaped.end();
- I != E; ++I) {
- SymbolRef Sym = *I;
-
+ // Remove the invalidated symbols from the collection count map.
+ for (SymbolRef Sym : Escaped) {
// Don't invalidate this symbol's count if we know the method being called
// is declared on an immutable class. This isn't completely correct if the
// receiver is also passed as an argument, but in most uses of NSArray,
@@ -1121,9 +1126,7 @@ void ObjCLoopChecker::checkDeadSymbols(SymbolReaper &SymReaper,
// Remove the dead symbols from the collection count map.
ContainerCountMapTy Tracked = State->get<ContainerCountMap>();
- for (ContainerCountMapTy::iterator I = Tracked.begin(),
- E = Tracked.end(); I != E; ++I) {
- SymbolRef Sym = I->first;
+ for (SymbolRef Sym : llvm::make_first_range(Tracked)) {
if (SymReaper.isDead(Sym)) {
State = State->remove<ContainerCountMap>(Sym);
State = State->remove<ContainerNonEmptyMap>(Sym);
@@ -1142,13 +1145,13 @@ class ObjCNonNilReturnValueChecker
check::PostStmt<ObjCArrayLiteral>,
check::PostStmt<ObjCDictionaryLiteral>,
check::PostStmt<ObjCBoxedExpr> > {
- mutable bool Initialized;
+ mutable bool Initialized = false;
mutable Selector ObjectAtIndex;
mutable Selector ObjectAtIndexedSubscript;
mutable Selector NullSelector;
public:
- ObjCNonNilReturnValueChecker() : Initialized(false) {}
+ ObjCNonNilReturnValueChecker() = default;
ProgramStateRef assumeExprIsNonNull(const Expr *NonNullExpr,
ProgramStateRef State,
@@ -1176,7 +1179,8 @@ ObjCNonNilReturnValueChecker::assumeExprIsNonNull(const Expr *NonNullExpr,
ProgramStateRef State,
CheckerContext &C) const {
SVal Val = C.getSVal(NonNullExpr);
- if (Optional<DefinedOrUnknownSVal> DV = Val.getAs<DefinedOrUnknownSVal>())
+ if (std::optional<DefinedOrUnknownSVal> DV =
+ Val.getAs<DefinedOrUnknownSVal>())
return State->assume(*DV, true);
return State;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BitwiseShiftChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BitwiseShiftChecker.cpp
new file mode 100644
index 000000000000..339927c165fe
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BitwiseShiftChecker.cpp
@@ -0,0 +1,370 @@
+//== BitwiseShiftChecker.cpp ------------------------------------*- C++ -*--==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines BitwiseShiftChecker, which is a path-sensitive checker
+// that looks for undefined behavior when the operands of the bitwise shift
+// operators '<<' and '>>' are invalid (negative or too large).
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/APSIntType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+#include "llvm/Support/FormatVariadic.h"
+#include <memory>
+
+using namespace clang;
+using namespace ento;
+using llvm::formatv;
+
+namespace {
+enum class OperandSide { Left, Right };
+
+using BugReportPtr = std::unique_ptr<PathSensitiveBugReport>;
+
+struct NoteTagTemplate {
+ llvm::StringLiteral SignInfo;
+ llvm::StringLiteral UpperBoundIntro;
+};
+
+constexpr NoteTagTemplate NoteTagTemplates[] = {
+ {"", "right operand of bit shift is less than "},
+ {"left operand of bit shift is non-negative", " and right operand is less than "},
+ {"right operand of bit shift is non-negative", " but less than "},
+ {"both operands of bit shift are non-negative", " and right operand is less than "}
+};
+
+/// An implementation detail class which is introduced to split the checker
+/// logic into several methods while maintaining a consistently updated state
+/// and access to other contextual data.
+class BitwiseShiftValidator {
+ CheckerContext &Ctx;
+ ProgramStateRef FoldedState;
+ const BinaryOperator *const Op;
+ const BugType &BT;
+ const bool PedanticFlag;
+
+ // The following data members are only used for note tag creation:
+ enum { NonNegLeft = 1, NonNegRight = 2 };
+ unsigned NonNegOperands = 0;
+
+ std::optional<unsigned> UpperBoundBitCount = std::nullopt;
+
+public:
+ BitwiseShiftValidator(const BinaryOperator *O, CheckerContext &C,
+ const BugType &B, bool P)
+ : Ctx(C), FoldedState(C.getState()), Op(O), BT(B), PedanticFlag(P) {}
+ void run();
+
+private:
+ const Expr *operandExpr(OperandSide Side) const {
+ return Side == OperandSide::Left ? Op->getLHS() : Op->getRHS();
+ }
+
+ bool shouldPerformPedanticChecks() const {
+ // The pedantic flag has no effect under C++20 because the affected issues
+ // are no longer undefined under that version of the standard.
+ return PedanticFlag && !Ctx.getASTContext().getLangOpts().CPlusPlus20;
+ }
+
+ bool assumeRequirement(OperandSide Side, BinaryOperator::Opcode Cmp, unsigned Limit);
+
+ void recordAssumption(OperandSide Side, BinaryOperator::Opcode Cmp, unsigned Limit);
+ const NoteTag *createNoteTag() const;
+
+ BugReportPtr createBugReport(StringRef ShortMsg, StringRef Msg) const;
+
+ BugReportPtr checkOvershift();
+ BugReportPtr checkOperandNegative(OperandSide Side);
+ BugReportPtr checkLeftShiftOverflow();
+
+ bool isLeftShift() const { return Op->getOpcode() == BO_Shl; }
+ StringRef shiftDir() const { return isLeftShift() ? "left" : "right"; }
+ static StringRef pluralSuffix(unsigned n) { return n <= 1 ? "" : "s"; }
+ static StringRef verbSuffix(unsigned n) { return n <= 1 ? "s" : ""; }
+};
+
+void BitwiseShiftValidator::run() {
+ // Report a bug if the right operand is >= the bit width of the type of the
+ // left operand:
+ if (BugReportPtr BR = checkOvershift()) {
+ Ctx.emitReport(std::move(BR));
+ return;
+ }
+
+ // Report a bug if the right operand is negative:
+ if (BugReportPtr BR = checkOperandNegative(OperandSide::Right)) {
+ Ctx.emitReport(std::move(BR));
+ return;
+ }
+
+ if (shouldPerformPedanticChecks()) {
+ // Report a bug if the left operand is negative:
+ if (BugReportPtr BR = checkOperandNegative(OperandSide::Left)) {
+ Ctx.emitReport(std::move(BR));
+ return;
+ }
+
+ // Report a bug when left shift of a concrete signed value overflows:
+ if (BugReportPtr BR = checkLeftShiftOverflow()) {
+ Ctx.emitReport(std::move(BR));
+ return;
+ }
+ }
+
+ // No bugs detected, update the state and add a single note tag which
+ // summarizes the new assumptions.
+ Ctx.addTransition(FoldedState, createNoteTag());
+}
+
+/// This method checks a requirement that must be satisfied by the value on the
+/// given Side of a bitwise shift operator in well-defined code. If the
+/// requirement is incompatible with prior knowledge, this method reports
+/// failure by returning false.
+bool BitwiseShiftValidator::assumeRequirement(OperandSide Side,
+ BinaryOperator::Opcode Comparison,
+ unsigned Limit) {
+ SValBuilder &SVB = Ctx.getSValBuilder();
+
+ const SVal OperandVal = Ctx.getSVal(operandExpr(Side));
+ const auto LimitVal = SVB.makeIntVal(Limit, Ctx.getASTContext().IntTy);
+ // Note that the type of `LimitVal` must be a signed, because otherwise a
+ // negative `Val` could be converted to a large positive value.
+
+ auto ResultVal = SVB.evalBinOp(FoldedState, Comparison, OperandVal, LimitVal,
+ SVB.getConditionType());
+ if (auto DURes = ResultVal.getAs<DefinedOrUnknownSVal>()) {
+ auto [StTrue, StFalse] = FoldedState->assume(DURes.value());
+ if (!StTrue) {
+ // We detected undefined behavior (the caller will report it).
+ FoldedState = StFalse;
+ return false;
+ }
+ // The code may be valid, so let's assume that it's valid:
+ FoldedState = StTrue;
+ if (StFalse) {
+ // Record note tag data for the assumption that we made
+ recordAssumption(Side, Comparison, Limit);
+ }
+ }
+ return true;
+}
+
+BugReportPtr BitwiseShiftValidator::checkOvershift() {
+ const QualType LHSTy = Op->getLHS()->getType();
+ const unsigned LHSBitWidth = Ctx.getASTContext().getIntWidth(LHSTy);
+
+ if (assumeRequirement(OperandSide::Right, BO_LT, LHSBitWidth))
+ return nullptr;
+
+ const SVal Right = Ctx.getSVal(operandExpr(OperandSide::Right));
+
+ std::string RightOpStr = "", LowerBoundStr = "";
+ if (auto ConcreteRight = Right.getAs<nonloc::ConcreteInt>())
+ RightOpStr = formatv(" '{0}'", ConcreteRight->getValue());
+ else {
+ SValBuilder &SVB = Ctx.getSValBuilder();
+ if (const llvm::APSInt *MinRight = SVB.getMinValue(FoldedState, Right)) {
+ LowerBoundStr = formatv(" >= {0},", MinRight->getExtValue());
+ }
+ }
+
+ std::string ShortMsg = formatv(
+ "{0} shift{1}{2} overflows the capacity of '{3}'",
+ isLeftShift() ? "Left" : "Right", RightOpStr.empty() ? "" : " by",
+ RightOpStr, LHSTy.getAsString());
+ std::string Msg = formatv(
+ "The result of {0} shift is undefined because the right "
+ "operand{1} is{2} not smaller than {3}, the capacity of '{4}'",
+ shiftDir(), RightOpStr, LowerBoundStr, LHSBitWidth, LHSTy.getAsString());
+ return createBugReport(ShortMsg, Msg);
+}
+
+// Before C++20, at 5.8 [expr.shift] (N4296, 2014-11-19) the standard says
+// 1. "... The behaviour is undefined if the right operand is negative..."
+// 2. "The value of E1 << E2 ...
+// if E1 has a signed type and non-negative value ...
+// otherwise, the behavior is undefined."
+// 3. "The value of E1 >> E2 ...
+// If E1 has a signed type and a negative value,
+// the resulting value is implementation-defined."
+// However, negative left arguments work in practice and the C++20 standard
+// eliminates conditions 2 and 3.
+BugReportPtr BitwiseShiftValidator::checkOperandNegative(OperandSide Side) {
+ // If the type is unsigned, it cannot be negative
+ if (!operandExpr(Side)->getType()->isSignedIntegerType())
+ return nullptr;
+
+ // Main check: determine whether the operand is constrained to be negative
+ if (assumeRequirement(Side, BO_GE, 0))
+ return nullptr;
+
+ std::string ShortMsg = formatv("{0} operand is negative in {1} shift",
+ Side == OperandSide::Left ? "Left" : "Right",
+ shiftDir())
+ .str();
+ std::string Msg = formatv("The result of {0} shift is undefined "
+ "because the {1} operand is negative",
+ shiftDir(),
+ Side == OperandSide::Left ? "left" : "right")
+ .str();
+
+ return createBugReport(ShortMsg, Msg);
+}
+
+BugReportPtr BitwiseShiftValidator::checkLeftShiftOverflow() {
+ // A right shift cannot be an overflowing left shift...
+ if (!isLeftShift())
+ return nullptr;
+
+ // In C++ it's well-defined to shift to the sign bit. In C however, it's UB.
+ // 5.8.2 [expr.shift] (N4296, 2014-11-19)
+ const bool ShouldPreserveSignBit = !Ctx.getLangOpts().CPlusPlus;
+
+ const Expr *LHS = operandExpr(OperandSide::Left);
+ const QualType LHSTy = LHS->getType();
+ const unsigned LeftBitWidth = Ctx.getASTContext().getIntWidth(LHSTy);
+ assert(LeftBitWidth > 0);
+
+ // Quote "For unsigned lhs, the value of LHS << RHS is the value of LHS *
+ // 2^RHS, reduced modulo maximum value of the return type plus 1."
+ if (LHSTy->isUnsignedIntegerType())
+ return nullptr;
+
+ // We only support concrete integers as left operand.
+ const auto Left = Ctx.getSVal(LHS).getAs<nonloc::ConcreteInt>();
+ if (!Left.has_value())
+ return nullptr;
+
+ // We should have already reported a bug if the left operand of the shift was
+ // negative, so it cannot be negative here.
+ assert(Left->getValue().isNonNegative());
+
+ const unsigned LeftAvailableBitWidth =
+ LeftBitWidth - static_cast<unsigned>(ShouldPreserveSignBit);
+ const unsigned UsedBitsInLeftOperand = Left->getValue().getActiveBits();
+ assert(LeftBitWidth >= UsedBitsInLeftOperand);
+ const unsigned MaximalAllowedShift =
+ LeftAvailableBitWidth - UsedBitsInLeftOperand;
+
+ if (assumeRequirement(OperandSide::Right, BO_LT, MaximalAllowedShift + 1))
+ return nullptr;
+
+ const std::string CapacityMsg =
+ formatv("because '{0}' can hold only {1} bits ({2} the sign bit)",
+ LHSTy.getAsString(), LeftAvailableBitWidth,
+ ShouldPreserveSignBit ? "excluding" : "including");
+
+ const SVal Right = Ctx.getSVal(Op->getRHS());
+
+ std::string ShortMsg, Msg;
+ if (const auto ConcreteRight = Right.getAs<nonloc::ConcreteInt>()) {
+ // Here ConcreteRight must contain a small non-negative integer, because
+ // otherwise one of the earlier checks should've reported a bug.
+ const unsigned RHS = ConcreteRight->getValue().getExtValue();
+ assert(RHS > MaximalAllowedShift);
+ const unsigned OverflownBits = RHS - MaximalAllowedShift;
+ ShortMsg = formatv(
+ "The shift '{0} << {1}' overflows the capacity of '{2}'",
+ Left->getValue(), ConcreteRight->getValue(), LHSTy.getAsString());
+ Msg = formatv(
+ "The shift '{0} << {1}' is undefined {2}, so {3} bit{4} overflow{5}",
+ Left->getValue(), ConcreteRight->getValue(), CapacityMsg, OverflownBits,
+ pluralSuffix(OverflownBits), verbSuffix(OverflownBits));
+ } else {
+ ShortMsg = formatv("Left shift of '{0}' overflows the capacity of '{1}'",
+ Left->getValue(), LHSTy.getAsString());
+ Msg = formatv(
+ "Left shift of '{0}' is undefined {1}, so some bits overflow",
+ Left->getValue(), CapacityMsg);
+ }
+
+ return createBugReport(ShortMsg, Msg);
+}
+
+void BitwiseShiftValidator::recordAssumption(OperandSide Side,
+ BinaryOperator::Opcode Comparison,
+ unsigned Limit) {
+ switch (Comparison) {
+ case BO_GE:
+ assert(Limit == 0);
+ NonNegOperands |= (Side == OperandSide::Left ? NonNegLeft : NonNegRight);
+ break;
+ case BO_LT:
+ assert(Side == OperandSide::Right);
+ if (!UpperBoundBitCount || Limit < UpperBoundBitCount.value())
+ UpperBoundBitCount = Limit;
+ break;
+ default:
+ llvm_unreachable("this checker does not use other comparison operators");
+ }
+}
+
+const NoteTag *BitwiseShiftValidator::createNoteTag() const {
+ if (!NonNegOperands && !UpperBoundBitCount)
+ return nullptr;
+
+ SmallString<128> Buf;
+ llvm::raw_svector_ostream Out(Buf);
+ Out << "Assuming ";
+ NoteTagTemplate Templ = NoteTagTemplates[NonNegOperands];
+ Out << Templ.SignInfo;
+ if (UpperBoundBitCount)
+ Out << Templ.UpperBoundIntro << UpperBoundBitCount.value();
+ const std::string Msg(Out.str());
+
+ return Ctx.getNoteTag(Msg, /*isPrunable=*/true);
+}
+
+std::unique_ptr<PathSensitiveBugReport>
+BitwiseShiftValidator::createBugReport(StringRef ShortMsg, StringRef Msg) const {
+ ProgramStateRef State = Ctx.getState();
+ if (ExplodedNode *ErrNode = Ctx.generateErrorNode(State)) {
+ auto BR =
+ std::make_unique<PathSensitiveBugReport>(BT, ShortMsg, Msg, ErrNode);
+ bugreporter::trackExpressionValue(ErrNode, Op->getLHS(), *BR);
+ bugreporter::trackExpressionValue(ErrNode, Op->getRHS(), *BR);
+ return BR;
+ }
+ return nullptr;
+}
+} // anonymous namespace
+
+class BitwiseShiftChecker : public Checker<check::PreStmt<BinaryOperator>> {
+ BugType BT{this, "Bitwise shift", "Suspicious operation"};
+
+public:
+ void checkPreStmt(const BinaryOperator *B, CheckerContext &Ctx) const {
+ BinaryOperator::Opcode Op = B->getOpcode();
+
+ if (Op != BO_Shl && Op != BO_Shr)
+ return;
+
+ BitwiseShiftValidator(B, Ctx, BT, Pedantic).run();
+ }
+
+ bool Pedantic = false;
+};
+
+void ento::registerBitwiseShiftChecker(CheckerManager &Mgr) {
+ auto *Chk = Mgr.registerChecker<BitwiseShiftChecker>();
+ const AnalyzerOptions &Opts = Mgr.getAnalyzerOptions();
+ Chk->Pedantic = Opts.getCheckerBooleanOption(Chk, "Pedantic");
+}
+
+bool ento::shouldRegisterBitwiseShiftChecker(const CheckerManager &mgr) {
+ return true;
+}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp
index 2752b37f9b3f..66e080adb138 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp
@@ -17,6 +17,7 @@
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
@@ -24,20 +25,31 @@ using namespace clang;
using namespace ento;
namespace {
-
class BlockInCriticalSectionChecker : public Checker<check::PostCall> {
-
- mutable IdentifierInfo *IILockGuard, *IIUniqueLock;
-
- CallDescription LockFn, UnlockFn, SleepFn, GetcFn, FgetsFn, ReadFn, RecvFn,
- PthreadLockFn, PthreadTryLockFn, PthreadUnlockFn,
- MtxLock, MtxTimedLock, MtxTryLock, MtxUnlock;
-
- StringRef ClassLockGuard, ClassUniqueLock;
-
- mutable bool IdentifierInfoInitialized;
-
- std::unique_ptr<BugType> BlockInCritSectionBugType;
+ mutable IdentifierInfo *IILockGuard = nullptr;
+ mutable IdentifierInfo *IIUniqueLock = nullptr;
+ mutable bool IdentifierInfoInitialized = false;
+
+ const CallDescription LockFn{{"lock"}};
+ const CallDescription UnlockFn{{"unlock"}};
+ const CallDescription SleepFn{{"sleep"}};
+ const CallDescription GetcFn{{"getc"}};
+ const CallDescription FgetsFn{{"fgets"}};
+ const CallDescription ReadFn{{"read"}};
+ const CallDescription RecvFn{{"recv"}};
+ const CallDescription PthreadLockFn{{"pthread_mutex_lock"}};
+ const CallDescription PthreadTryLockFn{{"pthread_mutex_trylock"}};
+ const CallDescription PthreadUnlockFn{{"pthread_mutex_unlock"}};
+ const CallDescription MtxLock{{"mtx_lock"}};
+ const CallDescription MtxTimedLock{{"mtx_timedlock"}};
+ const CallDescription MtxTryLock{{"mtx_trylock"}};
+ const CallDescription MtxUnlock{{"mtx_unlock"}};
+
+ const llvm::StringLiteral ClassLockGuard{"lock_guard"};
+ const llvm::StringLiteral ClassUniqueLock{"unique_lock"};
+
+ const BugType BlockInCritSectionBugType{
+ this, "Call to blocking function in critical section", "Blocking Error"};
void initIdentifierInfo(ASTContext &Ctx) const;
@@ -46,8 +58,6 @@ class BlockInCriticalSectionChecker : public Checker<check::PostCall> {
CheckerContext &C) const;
public:
- BlockInCriticalSectionChecker();
-
bool isBlockingFunction(const CallEvent &Call) const;
bool isLockFunction(const CallEvent &Call) const;
bool isUnlockFunction(const CallEvent &Call) const;
@@ -62,26 +72,6 @@ public:
REGISTER_TRAIT_WITH_PROGRAMSTATE(MutexCounter, unsigned)
-BlockInCriticalSectionChecker::BlockInCriticalSectionChecker()
- : IILockGuard(nullptr), IIUniqueLock(nullptr),
- LockFn("lock"), UnlockFn("unlock"), SleepFn("sleep"), GetcFn("getc"),
- FgetsFn("fgets"), ReadFn("read"), RecvFn("recv"),
- PthreadLockFn("pthread_mutex_lock"),
- PthreadTryLockFn("pthread_mutex_trylock"),
- PthreadUnlockFn("pthread_mutex_unlock"),
- MtxLock("mtx_lock"),
- MtxTimedLock("mtx_timedlock"),
- MtxTryLock("mtx_trylock"),
- MtxUnlock("mtx_unlock"),
- ClassLockGuard("lock_guard"),
- ClassUniqueLock("unique_lock"),
- IdentifierInfoInitialized(false) {
- // Initialize the bug type.
- BlockInCritSectionBugType.reset(
- new BugType(this, "Call to blocking function in critical section",
- "Blocking Error"));
-}
-
void BlockInCriticalSectionChecker::initIdentifierInfo(ASTContext &Ctx) const {
if (!IdentifierInfoInitialized) {
/* In case of checking C code, or when the corresponding headers are not
@@ -96,14 +86,7 @@ void BlockInCriticalSectionChecker::initIdentifierInfo(ASTContext &Ctx) const {
}
bool BlockInCriticalSectionChecker::isBlockingFunction(const CallEvent &Call) const {
- if (Call.isCalled(SleepFn)
- || Call.isCalled(GetcFn)
- || Call.isCalled(FgetsFn)
- || Call.isCalled(ReadFn)
- || Call.isCalled(RecvFn)) {
- return true;
- }
- return false;
+ return matchesAny(Call, SleepFn, GetcFn, FgetsFn, ReadFn, RecvFn);
}
bool BlockInCriticalSectionChecker::isLockFunction(const CallEvent &Call) const {
@@ -113,15 +96,8 @@ bool BlockInCriticalSectionChecker::isLockFunction(const CallEvent &Call) const
return true;
}
- if (Call.isCalled(LockFn)
- || Call.isCalled(PthreadLockFn)
- || Call.isCalled(PthreadTryLockFn)
- || Call.isCalled(MtxLock)
- || Call.isCalled(MtxTimedLock)
- || Call.isCalled(MtxTryLock)) {
- return true;
- }
- return false;
+ return matchesAny(Call, LockFn, PthreadLockFn, PthreadTryLockFn, MtxLock,
+ MtxTimedLock, MtxTryLock);
}
bool BlockInCriticalSectionChecker::isUnlockFunction(const CallEvent &Call) const {
@@ -132,12 +108,7 @@ bool BlockInCriticalSectionChecker::isUnlockFunction(const CallEvent &Call) cons
return true;
}
- if (Call.isCalled(UnlockFn)
- || Call.isCalled(PthreadUnlockFn)
- || Call.isCalled(MtxUnlock)) {
- return true;
- }
- return false;
+ return matchesAny(Call, UnlockFn, PthreadUnlockFn, MtxUnlock);
}
void BlockInCriticalSectionChecker::checkPostCall(const CallEvent &Call,
@@ -173,7 +144,7 @@ void BlockInCriticalSectionChecker::reportBlockInCritSection(
llvm::raw_string_ostream os(msg);
os << "Call to blocking function '" << Call.getCalleeIdentifier()->getName()
<< "' inside of critical section";
- auto R = std::make_unique<PathSensitiveBugReport>(*BlockInCritSectionBugType,
+ auto R = std::make_unique<PathSensitiveBugReport>(BlockInCritSectionBugType,
os.str(), ErrNode);
R->addRange(Call.getSourceRange());
R->markInteresting(BlockDescSym);
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp
index 6c0caf3c4e78..a09db6d2d0ec 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp
@@ -12,31 +12,33 @@
//===----------------------------------------------------------------------===//
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Checkers/Taint.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include <optional>
using namespace clang;
using namespace ento;
namespace {
class BoolAssignmentChecker : public Checker< check::Bind > {
- mutable std::unique_ptr<BuiltinBug> BT;
- void emitReport(ProgramStateRef state, CheckerContext &C) const;
+ const BugType BT{this, "Assignment of a non-Boolean value"};
+ void emitReport(ProgramStateRef state, CheckerContext &C,
+ bool IsTainted = false) const;
+
public:
void checkBind(SVal loc, SVal val, const Stmt *S, CheckerContext &C) const;
};
} // end anonymous namespace
-void BoolAssignmentChecker::emitReport(ProgramStateRef state,
- CheckerContext &C) const {
+void BoolAssignmentChecker::emitReport(ProgramStateRef state, CheckerContext &C,
+ bool IsTainted) const {
if (ExplodedNode *N = C.generateNonFatalErrorNode(state)) {
- if (!BT)
- BT.reset(new BuiltinBug(this, "Assignment of a non-Boolean value"));
-
- C.emitReport(
- std::make_unique<PathSensitiveBugReport>(*BT, BT->getDescription(), N));
+ StringRef Msg = IsTainted ? "Might assign a tainted non-Boolean value"
+ : "Assignment of a non-Boolean value";
+ C.emitReport(std::make_unique<PathSensitiveBugReport>(BT, Msg, N));
}
}
@@ -70,7 +72,7 @@ void BoolAssignmentChecker::checkBind(SVal loc, SVal val, const Stmt *S,
// Get the value of the right-hand side. We only care about values
// that are defined (UnknownVals and UndefinedVals are handled by other
// checkers).
- Optional<NonLoc> NV = val.getAs<NonLoc>();
+ std::optional<NonLoc> NV = val.getAs<NonLoc>();
if (!NV)
return;
@@ -90,6 +92,8 @@ void BoolAssignmentChecker::checkBind(SVal loc, SVal val, const Stmt *S,
if (!StIn)
emitReport(StOut, C);
+ if (StIn && StOut && taint::isTainted(state, *NV))
+ emitReport(StOut, C, /*IsTainted=*/true);
}
void ento::registerBoolAssignmentChecker(CheckerManager &mgr) {
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp
index 13781b336426..61521c259ca9 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp
@@ -66,7 +66,8 @@ bool BuiltinFunctionChecker::evalCall(const CallEvent &Call,
case Builtin::BI__builtin_expect:
case Builtin::BI__builtin_expect_with_probability:
case Builtin::BI__builtin_assume_aligned:
- case Builtin::BI__builtin_addressof: {
+ case Builtin::BI__builtin_addressof:
+ case Builtin::BI__builtin_function_start: {
// For __builtin_unpredictable, __builtin_expect,
// __builtin_expect_with_probability and __builtin_assume_aligned,
// just return the value of the subexpression.
@@ -80,22 +81,20 @@ bool BuiltinFunctionChecker::evalCall(const CallEvent &Call,
case Builtin::BI__builtin_alloca_with_align:
case Builtin::BI__builtin_alloca: {
- // FIXME: Refactor into StoreManager itself?
- MemRegionManager& RM = C.getStoreManager().getRegionManager();
- const AllocaRegion* R =
- RM.getAllocaRegion(CE, C.blockCount(), C.getLocationContext());
-
- // Set the extent of the region in bytes. This enables us to use the
- // SVal of the argument directly. If we save the extent in bits, we
- // cannot represent values like symbol*8.
- auto Size = Call.getArgSVal(0);
- if (Size.isUndef())
- return true; // Return true to model purity.
-
- state = setDynamicExtent(state, R, Size.castAs<DefinedOrUnknownSVal>(),
- C.getSValBuilder());
+ SValBuilder &SVB = C.getSValBuilder();
+ const loc::MemRegionVal R =
+ SVB.getAllocaRegionVal(CE, C.getLocationContext(), C.blockCount());
- C.addTransition(state->BindExpr(CE, LCtx, loc::MemRegionVal(R)));
+ // Set the extent of the region in bytes. This enables us to use the SVal
+ // of the argument directly. If we saved the extent in bits, it'd be more
+ // difficult to reason about values like symbol*8.
+ auto Size = Call.getArgSVal(0);
+ if (auto DefSize = Size.getAs<DefinedOrUnknownSVal>()) {
+ // This `getAs()` is mostly paranoia, because core.CallAndMessage reports
+ // undefined function arguments (unless it's disabled somehow).
+ state = setDynamicExtent(state, R.getRegion(), *DefSize, SVB);
+ }
+ C.addTransition(state->BindExpr(CE, LCtx, R));
return true;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
index 69b90be9aa7e..b7b64c3da4f6 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
@@ -12,11 +12,13 @@
//===----------------------------------------------------------------------===//
#include "InterCheckerAPI.h"
+#include "clang/Basic/Builtins.h"
#include "clang/Basic/CharInfo.h"
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicExtent.h"
@@ -25,30 +27,21 @@
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/raw_ostream.h"
+#include <functional>
+#include <optional>
using namespace clang;
using namespace ento;
+using namespace std::placeholders;
namespace {
struct AnyArgExpr {
- // FIXME: Remove constructor in C++17 to turn it into an aggregate.
- AnyArgExpr(const Expr *Expression, unsigned ArgumentIndex)
- : Expression{Expression}, ArgumentIndex{ArgumentIndex} {}
const Expr *Expression;
unsigned ArgumentIndex;
};
-
-struct SourceArgExpr : AnyArgExpr {
- using AnyArgExpr::AnyArgExpr; // FIXME: Remove using in C++17.
-};
-
-struct DestinationArgExpr : AnyArgExpr {
- using AnyArgExpr::AnyArgExpr; // FIXME: Same.
-};
-
-struct SizeArgExpr : AnyArgExpr {
- using AnyArgExpr::AnyArgExpr; // FIXME: Same.
-};
+struct SourceArgExpr : AnyArgExpr {};
+struct DestinationArgExpr : AnyArgExpr {};
+struct SizeArgExpr : AnyArgExpr {};
using ErrorMessage = SmallString<128>;
enum class AccessKind { write, read };
@@ -72,6 +65,16 @@ static ErrorMessage createOutOfBoundErrorMsg(StringRef FunctionDescription,
}
enum class ConcatFnKind { none = 0, strcat = 1, strlcat = 2 };
+
+enum class CharKind { Regular = 0, Wide };
+constexpr CharKind CK_Regular = CharKind::Regular;
+constexpr CharKind CK_Wide = CharKind::Wide;
+
+static QualType getCharPtrType(ASTContext &Ctx, CharKind CK) {
+ return Ctx.getPointerType(CK == CharKind::Regular ? Ctx.CharTy
+ : Ctx.WideCharTy);
+}
+
class CStringChecker : public Checker< eval::Call,
check::PreStmt<DeclStmt>,
check::LiveSymbols,
@@ -79,23 +82,25 @@ class CStringChecker : public Checker< eval::Call,
check::RegionChanges
> {
mutable std::unique_ptr<BugType> BT_Null, BT_Bounds, BT_Overlap,
- BT_NotCString, BT_AdditionOverflow;
+ BT_NotCString, BT_AdditionOverflow, BT_UninitRead;
- mutable const char *CurrentFunctionDescription;
+ mutable const char *CurrentFunctionDescription = nullptr;
public:
/// The filter is used to filter out the diagnostics which are not enabled by
/// the user.
struct CStringChecksFilter {
- DefaultBool CheckCStringNullArg;
- DefaultBool CheckCStringOutOfBounds;
- DefaultBool CheckCStringBufferOverlap;
- DefaultBool CheckCStringNotNullTerm;
+ bool CheckCStringNullArg = false;
+ bool CheckCStringOutOfBounds = false;
+ bool CheckCStringBufferOverlap = false;
+ bool CheckCStringNotNullTerm = false;
+ bool CheckCStringUninitializedRead = false;
CheckerNameRef CheckNameCStringNullArg;
CheckerNameRef CheckNameCStringOutOfBounds;
CheckerNameRef CheckNameCStringBufferOverlap;
CheckerNameRef CheckNameCStringNotNullTerm;
+ CheckerNameRef CheckNameCStringUninitializedRead;
};
CStringChecksFilter Filter;
@@ -115,33 +120,52 @@ public:
const LocationContext *LCtx,
const CallEvent *Call) const;
- typedef void (CStringChecker::*FnCheck)(CheckerContext &,
- const CallExpr *) const;
+ using FnCheck = std::function<void(const CStringChecker *, CheckerContext &,
+ const CallEvent &)>;
+
CallDescriptionMap<FnCheck> Callbacks = {
- {{CDF_MaybeBuiltin, "memcpy", 3}, &CStringChecker::evalMemcpy},
- {{CDF_MaybeBuiltin, "mempcpy", 3}, &CStringChecker::evalMempcpy},
- {{CDF_MaybeBuiltin, "memcmp", 3}, &CStringChecker::evalMemcmp},
- {{CDF_MaybeBuiltin, "memmove", 3}, &CStringChecker::evalMemmove},
- {{CDF_MaybeBuiltin, "memset", 3}, &CStringChecker::evalMemset},
- {{CDF_MaybeBuiltin, "explicit_memset", 3}, &CStringChecker::evalMemset},
- {{CDF_MaybeBuiltin, "strcpy", 2}, &CStringChecker::evalStrcpy},
- {{CDF_MaybeBuiltin, "strncpy", 3}, &CStringChecker::evalStrncpy},
- {{CDF_MaybeBuiltin, "stpcpy", 2}, &CStringChecker::evalStpcpy},
- {{CDF_MaybeBuiltin, "strlcpy", 3}, &CStringChecker::evalStrlcpy},
- {{CDF_MaybeBuiltin, "strcat", 2}, &CStringChecker::evalStrcat},
- {{CDF_MaybeBuiltin, "strncat", 3}, &CStringChecker::evalStrncat},
- {{CDF_MaybeBuiltin, "strlcat", 3}, &CStringChecker::evalStrlcat},
- {{CDF_MaybeBuiltin, "strlen", 1}, &CStringChecker::evalstrLength},
- {{CDF_MaybeBuiltin, "strnlen", 2}, &CStringChecker::evalstrnLength},
- {{CDF_MaybeBuiltin, "strcmp", 2}, &CStringChecker::evalStrcmp},
- {{CDF_MaybeBuiltin, "strncmp", 3}, &CStringChecker::evalStrncmp},
- {{CDF_MaybeBuiltin, "strcasecmp", 2}, &CStringChecker::evalStrcasecmp},
- {{CDF_MaybeBuiltin, "strncasecmp", 3}, &CStringChecker::evalStrncasecmp},
- {{CDF_MaybeBuiltin, "strsep", 2}, &CStringChecker::evalStrsep},
- {{CDF_MaybeBuiltin, "bcopy", 3}, &CStringChecker::evalBcopy},
- {{CDF_MaybeBuiltin, "bcmp", 3}, &CStringChecker::evalMemcmp},
- {{CDF_MaybeBuiltin, "bzero", 2}, &CStringChecker::evalBzero},
- {{CDF_MaybeBuiltin, "explicit_bzero", 2}, &CStringChecker::evalBzero},
+ {{CDF_MaybeBuiltin, {"memcpy"}, 3},
+ std::bind(&CStringChecker::evalMemcpy, _1, _2, _3, CK_Regular)},
+ {{CDF_MaybeBuiltin, {"wmemcpy"}, 3},
+ std::bind(&CStringChecker::evalMemcpy, _1, _2, _3, CK_Wide)},
+ {{CDF_MaybeBuiltin, {"mempcpy"}, 3},
+ std::bind(&CStringChecker::evalMempcpy, _1, _2, _3, CK_Regular)},
+ {{CDF_None, {"wmempcpy"}, 3},
+ std::bind(&CStringChecker::evalMempcpy, _1, _2, _3, CK_Wide)},
+ {{CDF_MaybeBuiltin, {"memcmp"}, 3},
+ std::bind(&CStringChecker::evalMemcmp, _1, _2, _3, CK_Regular)},
+ {{CDF_MaybeBuiltin, {"wmemcmp"}, 3},
+ std::bind(&CStringChecker::evalMemcmp, _1, _2, _3, CK_Wide)},
+ {{CDF_MaybeBuiltin, {"memmove"}, 3},
+ std::bind(&CStringChecker::evalMemmove, _1, _2, _3, CK_Regular)},
+ {{CDF_MaybeBuiltin, {"wmemmove"}, 3},
+ std::bind(&CStringChecker::evalMemmove, _1, _2, _3, CK_Wide)},
+ {{CDF_MaybeBuiltin, {"memset"}, 3}, &CStringChecker::evalMemset},
+ {{CDF_MaybeBuiltin, {"explicit_memset"}, 3}, &CStringChecker::evalMemset},
+ {{CDF_MaybeBuiltin, {"strcpy"}, 2}, &CStringChecker::evalStrcpy},
+ {{CDF_MaybeBuiltin, {"strncpy"}, 3}, &CStringChecker::evalStrncpy},
+ {{CDF_MaybeBuiltin, {"stpcpy"}, 2}, &CStringChecker::evalStpcpy},
+ {{CDF_MaybeBuiltin, {"strlcpy"}, 3}, &CStringChecker::evalStrlcpy},
+ {{CDF_MaybeBuiltin, {"strcat"}, 2}, &CStringChecker::evalStrcat},
+ {{CDF_MaybeBuiltin, {"strncat"}, 3}, &CStringChecker::evalStrncat},
+ {{CDF_MaybeBuiltin, {"strlcat"}, 3}, &CStringChecker::evalStrlcat},
+ {{CDF_MaybeBuiltin, {"strlen"}, 1}, &CStringChecker::evalstrLength},
+ {{CDF_MaybeBuiltin, {"wcslen"}, 1}, &CStringChecker::evalstrLength},
+ {{CDF_MaybeBuiltin, {"strnlen"}, 2}, &CStringChecker::evalstrnLength},
+ {{CDF_MaybeBuiltin, {"wcsnlen"}, 2}, &CStringChecker::evalstrnLength},
+ {{CDF_MaybeBuiltin, {"strcmp"}, 2}, &CStringChecker::evalStrcmp},
+ {{CDF_MaybeBuiltin, {"strncmp"}, 3}, &CStringChecker::evalStrncmp},
+ {{CDF_MaybeBuiltin, {"strcasecmp"}, 2}, &CStringChecker::evalStrcasecmp},
+ {{CDF_MaybeBuiltin, {"strncasecmp"}, 3},
+ &CStringChecker::evalStrncasecmp},
+ {{CDF_MaybeBuiltin, {"strsep"}, 2}, &CStringChecker::evalStrsep},
+ {{CDF_MaybeBuiltin, {"bcopy"}, 3}, &CStringChecker::evalBcopy},
+ {{CDF_MaybeBuiltin, {"bcmp"}, 3},
+ std::bind(&CStringChecker::evalMemcmp, _1, _2, _3, CK_Regular)},
+ {{CDF_MaybeBuiltin, {"bzero"}, 2}, &CStringChecker::evalBzero},
+ {{CDF_MaybeBuiltin, {"explicit_bzero"}, 2}, &CStringChecker::evalBzero},
+ {{CDF_MaybeBuiltin, {"sprintf"}, 2}, &CStringChecker::evalSprintf},
+ {{CDF_MaybeBuiltin, {"snprintf"}, 2}, &CStringChecker::evalSnprintf},
};
// These require a bit of special handling.
@@ -149,51 +173,53 @@ public:
StdCopyBackward{{"std", "copy_backward"}, 3};
FnCheck identifyCall(const CallEvent &Call, CheckerContext &C) const;
- void evalMemcpy(CheckerContext &C, const CallExpr *CE) const;
- void evalMempcpy(CheckerContext &C, const CallExpr *CE) const;
- void evalMemmove(CheckerContext &C, const CallExpr *CE) const;
- void evalBcopy(CheckerContext &C, const CallExpr *CE) const;
- void evalCopyCommon(CheckerContext &C, const CallExpr *CE,
+ void evalMemcpy(CheckerContext &C, const CallEvent &Call, CharKind CK) const;
+ void evalMempcpy(CheckerContext &C, const CallEvent &Call, CharKind CK) const;
+ void evalMemmove(CheckerContext &C, const CallEvent &Call, CharKind CK) const;
+ void evalBcopy(CheckerContext &C, const CallEvent &Call) const;
+ void evalCopyCommon(CheckerContext &C, const CallEvent &Call,
ProgramStateRef state, SizeArgExpr Size,
DestinationArgExpr Dest, SourceArgExpr Source,
- bool Restricted, bool IsMempcpy) const;
+ bool Restricted, bool IsMempcpy, CharKind CK) const;
- void evalMemcmp(CheckerContext &C, const CallExpr *CE) const;
+ void evalMemcmp(CheckerContext &C, const CallEvent &Call, CharKind CK) const;
- void evalstrLength(CheckerContext &C, const CallExpr *CE) const;
- void evalstrnLength(CheckerContext &C, const CallExpr *CE) const;
- void evalstrLengthCommon(CheckerContext &C,
- const CallExpr *CE,
+ void evalstrLength(CheckerContext &C, const CallEvent &Call) const;
+ void evalstrnLength(CheckerContext &C, const CallEvent &Call) const;
+ void evalstrLengthCommon(CheckerContext &C, const CallEvent &Call,
bool IsStrnlen = false) const;
- void evalStrcpy(CheckerContext &C, const CallExpr *CE) const;
- void evalStrncpy(CheckerContext &C, const CallExpr *CE) const;
- void evalStpcpy(CheckerContext &C, const CallExpr *CE) const;
- void evalStrlcpy(CheckerContext &C, const CallExpr *CE) const;
- void evalStrcpyCommon(CheckerContext &C, const CallExpr *CE, bool ReturnEnd,
- bool IsBounded, ConcatFnKind appendK,
+ void evalStrcpy(CheckerContext &C, const CallEvent &Call) const;
+ void evalStrncpy(CheckerContext &C, const CallEvent &Call) const;
+ void evalStpcpy(CheckerContext &C, const CallEvent &Call) const;
+ void evalStrlcpy(CheckerContext &C, const CallEvent &Call) const;
+ void evalStrcpyCommon(CheckerContext &C, const CallEvent &Call,
+ bool ReturnEnd, bool IsBounded, ConcatFnKind appendK,
bool returnPtr = true) const;
- void evalStrcat(CheckerContext &C, const CallExpr *CE) const;
- void evalStrncat(CheckerContext &C, const CallExpr *CE) const;
- void evalStrlcat(CheckerContext &C, const CallExpr *CE) const;
+ void evalStrcat(CheckerContext &C, const CallEvent &Call) const;
+ void evalStrncat(CheckerContext &C, const CallEvent &Call) const;
+ void evalStrlcat(CheckerContext &C, const CallEvent &Call) const;
- void evalStrcmp(CheckerContext &C, const CallExpr *CE) const;
- void evalStrncmp(CheckerContext &C, const CallExpr *CE) const;
- void evalStrcasecmp(CheckerContext &C, const CallExpr *CE) const;
- void evalStrncasecmp(CheckerContext &C, const CallExpr *CE) const;
- void evalStrcmpCommon(CheckerContext &C,
- const CallExpr *CE,
- bool IsBounded = false,
- bool IgnoreCase = false) const;
+ void evalStrcmp(CheckerContext &C, const CallEvent &Call) const;
+ void evalStrncmp(CheckerContext &C, const CallEvent &Call) const;
+ void evalStrcasecmp(CheckerContext &C, const CallEvent &Call) const;
+ void evalStrncasecmp(CheckerContext &C, const CallEvent &Call) const;
+ void evalStrcmpCommon(CheckerContext &C, const CallEvent &Call,
+ bool IsBounded = false, bool IgnoreCase = false) const;
- void evalStrsep(CheckerContext &C, const CallExpr *CE) const;
+ void evalStrsep(CheckerContext &C, const CallEvent &Call) const;
- void evalStdCopy(CheckerContext &C, const CallExpr *CE) const;
- void evalStdCopyBackward(CheckerContext &C, const CallExpr *CE) const;
- void evalStdCopyCommon(CheckerContext &C, const CallExpr *CE) const;
- void evalMemset(CheckerContext &C, const CallExpr *CE) const;
- void evalBzero(CheckerContext &C, const CallExpr *CE) const;
+ void evalStdCopy(CheckerContext &C, const CallEvent &Call) const;
+ void evalStdCopyBackward(CheckerContext &C, const CallEvent &Call) const;
+ void evalStdCopyCommon(CheckerContext &C, const CallEvent &Call) const;
+ void evalMemset(CheckerContext &C, const CallEvent &Call) const;
+ void evalBzero(CheckerContext &C, const CallEvent &Call) const;
+
+ void evalSprintf(CheckerContext &C, const CallEvent &Call) const;
+ void evalSnprintf(CheckerContext &C, const CallEvent &Call) const;
+ void evalSprintfCommon(CheckerContext &C, const CallEvent &Call,
+ bool IsBounded, bool IsBuiltin) const;
// Utility methods
std::pair<ProgramStateRef , ProgramStateRef >
@@ -219,11 +245,34 @@ public:
const Expr *expr,
SVal val) const;
- static ProgramStateRef InvalidateBuffer(CheckerContext &C,
- ProgramStateRef state,
- const Expr *Ex, SVal V,
- bool IsSourceBuffer,
- const Expr *Size);
+ /// Invalidate the destination buffer determined by characters copied.
+ static ProgramStateRef
+ invalidateDestinationBufferBySize(CheckerContext &C, ProgramStateRef S,
+ const Expr *BufE, SVal BufV, SVal SizeV,
+ QualType SizeTy);
+
+ /// Operation never overflows, do not invalidate the super region.
+ static ProgramStateRef invalidateDestinationBufferNeverOverflows(
+ CheckerContext &C, ProgramStateRef S, const Expr *BufE, SVal BufV);
+
+ /// We do not know whether the operation can overflow (e.g. size is unknown),
+ /// invalidate the super region and escape related pointers.
+ static ProgramStateRef invalidateDestinationBufferAlwaysEscapeSuperRegion(
+ CheckerContext &C, ProgramStateRef S, const Expr *BufE, SVal BufV);
+
+ /// Invalidate the source buffer for escaping pointers.
+ static ProgramStateRef invalidateSourceBuffer(CheckerContext &C,
+ ProgramStateRef S,
+ const Expr *BufE, SVal BufV);
+
+ /// @param InvalidationTraitOperations Determine how to invlidate the
+ /// MemRegion by setting the invalidation traits. Return true to cause pointer
+ /// escape, or false otherwise.
+ static ProgramStateRef invalidateBufferAux(
+ CheckerContext &C, ProgramStateRef State, const Expr *Ex, SVal V,
+ llvm::function_ref<bool(RegionAndSymbolInvalidationTraits &,
+ const MemRegion *)>
+ InvalidationTraitOperations);
static bool SummarizeRegion(raw_ostream &os, ASTContext &Ctx,
const MemRegion *MR);
@@ -237,13 +286,16 @@ public:
AnyArgExpr Arg, SVal l) const;
ProgramStateRef CheckLocation(CheckerContext &C, ProgramStateRef state,
AnyArgExpr Buffer, SVal Element,
- AccessKind Access) const;
+ AccessKind Access,
+ CharKind CK = CharKind::Regular) const;
ProgramStateRef CheckBufferAccess(CheckerContext &C, ProgramStateRef State,
AnyArgExpr Buffer, SizeArgExpr Size,
- AccessKind Access) const;
+ AccessKind Access,
+ CharKind CK = CharKind::Regular) const;
ProgramStateRef CheckOverlap(CheckerContext &C, ProgramStateRef state,
SizeArgExpr Size, AnyArgExpr First,
- AnyArgExpr Second) const;
+ AnyArgExpr Second,
+ CharKind CK = CharKind::Regular) const;
void emitOverlapBug(CheckerContext &C,
ProgramStateRef state,
const Stmt *First,
@@ -256,7 +308,8 @@ public:
void emitNotCStringBug(CheckerContext &C, ProgramStateRef State,
const Stmt *S, StringRef WarningMsg) const;
void emitAdditionOverflowBug(CheckerContext &C, ProgramStateRef State) const;
-
+ void emitUninitializedReadBug(CheckerContext &C, ProgramStateRef State,
+ const Expr *E) const;
ProgramStateRef checkAdditionOverflow(CheckerContext &C,
ProgramStateRef state,
NonLoc left,
@@ -265,10 +318,9 @@ public:
// Return true if the destination buffer of the copy function may be in bound.
// Expects SVal of Size to be positive and unsigned.
// Expects SVal of FirstBuf to be a FieldRegion.
- static bool IsFirstBufInBound(CheckerContext &C,
- ProgramStateRef state,
- const Expr *FirstBuf,
- const Expr *Size);
+ static bool isFirstBufInBound(CheckerContext &C, ProgramStateRef State,
+ SVal BufVal, QualType BufTy, SVal LengthVal,
+ QualType LengthTy);
};
} //end anonymous namespace
@@ -282,7 +334,7 @@ REGISTER_MAP_WITH_PROGRAMSTATE(CStringLength, const MemRegion *, SVal)
std::pair<ProgramStateRef , ProgramStateRef >
CStringChecker::assumeZero(CheckerContext &C, ProgramStateRef state, SVal V,
QualType Ty) {
- Optional<DefinedSVal> val = V.getAs<DefinedSVal>();
+ std::optional<DefinedSVal> val = V.getAs<DefinedSVal>();
if (!val)
return std::pair<ProgramStateRef , ProgramStateRef >(state, state);
@@ -325,7 +377,8 @@ ProgramStateRef CStringChecker::checkNonNull(CheckerContext &C,
ProgramStateRef CStringChecker::CheckLocation(CheckerContext &C,
ProgramStateRef state,
AnyArgExpr Buffer, SVal Element,
- AccessKind Access) const {
+ AccessKind Access,
+ CharKind CK) const {
// If a previous check has failed, propagate the failure.
if (!state)
@@ -340,19 +393,38 @@ ProgramStateRef CStringChecker::CheckLocation(CheckerContext &C,
if (!ER)
return state;
- if (ER->getValueType() != C.getASTContext().CharTy)
- return state;
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ ASTContext &Ctx = svalBuilder.getContext();
+
+ // Get the index of the accessed element.
+ NonLoc Idx = ER->getIndex();
+
+ if (CK == CharKind::Regular) {
+ if (ER->getValueType() != Ctx.CharTy)
+ return state;
+ } else {
+ if (ER->getValueType() != Ctx.WideCharTy)
+ return state;
+
+ QualType SizeTy = Ctx.getSizeType();
+ NonLoc WideSize =
+ svalBuilder
+ .makeIntVal(Ctx.getTypeSizeInChars(Ctx.WideCharTy).getQuantity(),
+ SizeTy)
+ .castAs<NonLoc>();
+ SVal Offset = svalBuilder.evalBinOpNN(state, BO_Mul, Idx, WideSize, SizeTy);
+ if (Offset.isUnknown())
+ return state;
+ Idx = Offset.castAs<NonLoc>();
+ }
// Get the size of the array.
const auto *superReg = cast<SubRegion>(ER->getSuperRegion());
DefinedOrUnknownSVal Size =
getDynamicExtent(state, superReg, C.getSValBuilder());
- // Get the index of the accessed element.
- DefinedOrUnknownSVal Idx = ER->getIndex().castAs<DefinedOrUnknownSVal>();
-
- ProgramStateRef StInBound = state->assumeInBound(Idx, Size, true);
- ProgramStateRef StOutBound = state->assumeInBound(Idx, Size, false);
+ ProgramStateRef StInBound, StOutBound;
+ std::tie(StInBound, StOutBound) = state->assumeInBoundDual(Idx, Size);
if (StOutBound && !StInBound) {
// These checks are either enabled by the CString out-of-bounds checker
// explicitly or implicitly by the Malloc checker.
@@ -367,16 +439,24 @@ ProgramStateRef CStringChecker::CheckLocation(CheckerContext &C,
return nullptr;
}
+ // Ensure that we wouldn't read uninitialized value.
+ if (Access == AccessKind::read) {
+ if (Filter.CheckCStringUninitializedRead &&
+ StInBound->getSVal(ER).isUndef()) {
+ emitUninitializedReadBug(C, StInBound, Buffer.Expression);
+ return nullptr;
+ }
+ }
+
// Array bound check succeeded. From this point forward the array bound
// should always succeed.
return StInBound;
}
-ProgramStateRef CStringChecker::CheckBufferAccess(CheckerContext &C,
- ProgramStateRef State,
- AnyArgExpr Buffer,
- SizeArgExpr Size,
- AccessKind Access) const {
+ProgramStateRef
+CStringChecker::CheckBufferAccess(CheckerContext &C, ProgramStateRef State,
+ AnyArgExpr Buffer, SizeArgExpr Size,
+ AccessKind Access, CharKind CK) const {
// If a previous check has failed, propagate the failure.
if (!State)
return nullptr;
@@ -385,7 +465,7 @@ ProgramStateRef CStringChecker::CheckBufferAccess(CheckerContext &C,
ASTContext &Ctx = svalBuilder.getContext();
QualType SizeTy = Size.Expression->getType();
- QualType PtrTy = Ctx.getPointerType(Ctx.CharTy);
+ QualType PtrTy = getCharPtrType(Ctx, CK);
// Check that the first buffer is non-null.
SVal BufVal = C.getSVal(Buffer.Expression);
@@ -397,11 +477,19 @@ ProgramStateRef CStringChecker::CheckBufferAccess(CheckerContext &C,
if (!Filter.CheckCStringOutOfBounds)
return State;
+ SVal BufStart =
+ svalBuilder.evalCast(BufVal, PtrTy, Buffer.Expression->getType());
+
+ // Check if the first byte of the buffer is accessible.
+ State = CheckLocation(C, State, Buffer, BufStart, Access, CK);
+ if (!State)
+ return nullptr;
+
// Get the access length and make sure it is known.
// FIXME: This assumes the caller has already checked that the access length
// is positive. And that it's unsigned.
SVal LengthVal = C.getSVal(Size.Expression);
- Optional<NonLoc> Length = LengthVal.getAs<NonLoc>();
+ std::optional<NonLoc> Length = LengthVal.getAs<NonLoc>();
if (!Length)
return State;
@@ -413,14 +501,11 @@ ProgramStateRef CStringChecker::CheckBufferAccess(CheckerContext &C,
NonLoc LastOffset = Offset.castAs<NonLoc>();
// Check that the first buffer is sufficiently long.
- SVal BufStart =
- svalBuilder.evalCast(BufVal, PtrTy, Buffer.Expression->getType());
- if (Optional<Loc> BufLoc = BufStart.getAs<Loc>()) {
+ if (std::optional<Loc> BufLoc = BufStart.getAs<Loc>()) {
SVal BufEnd =
svalBuilder.evalBinOpLN(State, BO_Add, *BufLoc, LastOffset, PtrTy);
-
- State = CheckLocation(C, State, Buffer, BufEnd, Access);
+ State = CheckLocation(C, State, Buffer, BufEnd, Access, CK);
// If the buffer isn't large enough, abort.
if (!State)
@@ -434,7 +519,8 @@ ProgramStateRef CStringChecker::CheckBufferAccess(CheckerContext &C,
ProgramStateRef CStringChecker::CheckOverlap(CheckerContext &C,
ProgramStateRef state,
SizeArgExpr Size, AnyArgExpr First,
- AnyArgExpr Second) const {
+ AnyArgExpr Second,
+ CharKind CK) const {
if (!Filter.CheckCStringBufferOverlap)
return state;
@@ -448,16 +534,21 @@ ProgramStateRef CStringChecker::CheckOverlap(CheckerContext &C,
ProgramStateRef stateTrue, stateFalse;
+ // Assume different address spaces cannot overlap.
+ if (First.Expression->getType()->getPointeeType().getAddressSpace() !=
+ Second.Expression->getType()->getPointeeType().getAddressSpace())
+ return state;
+
// Get the buffer values and make sure they're known locations.
const LocationContext *LCtx = C.getLocationContext();
SVal firstVal = state->getSVal(First.Expression, LCtx);
SVal secondVal = state->getSVal(Second.Expression, LCtx);
- Optional<Loc> firstLoc = firstVal.getAs<Loc>();
+ std::optional<Loc> firstLoc = firstVal.getAs<Loc>();
if (!firstLoc)
return state;
- Optional<Loc> secondLoc = secondVal.getAs<Loc>();
+ std::optional<Loc> secondLoc = secondVal.getAs<Loc>();
if (!secondLoc)
return state;
@@ -480,7 +571,7 @@ ProgramStateRef CStringChecker::CheckOverlap(CheckerContext &C,
QualType cmpTy = svalBuilder.getConditionType();
SVal reverse =
svalBuilder.evalBinOpLL(state, BO_GT, *firstLoc, *secondLoc, cmpTy);
- Optional<DefinedOrUnknownSVal> reverseTest =
+ std::optional<DefinedOrUnknownSVal> reverseTest =
reverse.getAs<DefinedOrUnknownSVal>();
if (!reverseTest)
return state;
@@ -501,31 +592,31 @@ ProgramStateRef CStringChecker::CheckOverlap(CheckerContext &C,
// Get the length, and make sure it too is known.
SVal LengthVal = state->getSVal(Size.Expression, LCtx);
- Optional<NonLoc> Length = LengthVal.getAs<NonLoc>();
+ std::optional<NonLoc> Length = LengthVal.getAs<NonLoc>();
if (!Length)
return state;
// Convert the first buffer's start address to char*.
// Bail out if the cast fails.
ASTContext &Ctx = svalBuilder.getContext();
- QualType CharPtrTy = Ctx.getPointerType(Ctx.CharTy);
+ QualType CharPtrTy = getCharPtrType(Ctx, CK);
SVal FirstStart =
svalBuilder.evalCast(*firstLoc, CharPtrTy, First.Expression->getType());
- Optional<Loc> FirstStartLoc = FirstStart.getAs<Loc>();
+ std::optional<Loc> FirstStartLoc = FirstStart.getAs<Loc>();
if (!FirstStartLoc)
return state;
// Compute the end of the first buffer. Bail out if THAT fails.
SVal FirstEnd = svalBuilder.evalBinOpLN(state, BO_Add, *FirstStartLoc,
*Length, CharPtrTy);
- Optional<Loc> FirstEndLoc = FirstEnd.getAs<Loc>();
+ std::optional<Loc> FirstEndLoc = FirstEnd.getAs<Loc>();
if (!FirstEndLoc)
return state;
// Is the end of the first buffer past the start of the second buffer?
SVal Overlap =
svalBuilder.evalBinOpLL(state, BO_GT, *FirstEndLoc, *secondLoc, cmpTy);
- Optional<DefinedOrUnknownSVal> OverlapTest =
+ std::optional<DefinedOrUnknownSVal> OverlapTest =
Overlap.getAs<DefinedOrUnknownSVal>();
if (!OverlapTest)
return state;
@@ -565,13 +656,15 @@ void CStringChecker::emitOverlapBug(CheckerContext &C, ProgramStateRef state,
void CStringChecker::emitNullArgBug(CheckerContext &C, ProgramStateRef State,
const Stmt *S, StringRef WarningMsg) const {
if (ExplodedNode *N = C.generateErrorNode(State)) {
- if (!BT_Null)
- BT_Null.reset(new BuiltinBug(
- Filter.CheckNameCStringNullArg, categories::UnixAPI,
- "Null pointer argument in call to byte string function"));
+ if (!BT_Null) {
+ // FIXME: This call uses the string constant 'categories::UnixAPI' as the
+ // description of the bug; it should be replaced by a real description.
+ BT_Null.reset(
+ new BugType(Filter.CheckNameCStringNullArg, categories::UnixAPI));
+ }
- BuiltinBug *BT = static_cast<BuiltinBug *>(BT_Null.get());
- auto Report = std::make_unique<PathSensitiveBugReport>(*BT, WarningMsg, N);
+ auto Report =
+ std::make_unique<PathSensitiveBugReport>(*BT_Null, WarningMsg, N);
Report->addRange(S->getSourceRange());
if (const auto *Ex = dyn_cast<Expr>(S))
bugreporter::trackExpressionValue(N, Ex, *Report);
@@ -579,23 +672,39 @@ void CStringChecker::emitNullArgBug(CheckerContext &C, ProgramStateRef State,
}
}
+void CStringChecker::emitUninitializedReadBug(CheckerContext &C,
+ ProgramStateRef State,
+ const Expr *E) const {
+ if (ExplodedNode *N = C.generateErrorNode(State)) {
+ const char *Msg =
+ "Bytes string function accesses uninitialized/garbage values";
+ if (!BT_UninitRead)
+ BT_UninitRead.reset(new BugType(Filter.CheckNameCStringUninitializedRead,
+ "Accessing unitialized/garbage values"));
+
+ auto Report =
+ std::make_unique<PathSensitiveBugReport>(*BT_UninitRead, Msg, N);
+ Report->addRange(E->getSourceRange());
+ bugreporter::trackExpressionValue(N, E, *Report);
+ C.emitReport(std::move(Report));
+ }
+}
+
void CStringChecker::emitOutOfBoundsBug(CheckerContext &C,
ProgramStateRef State, const Stmt *S,
StringRef WarningMsg) const {
if (ExplodedNode *N = C.generateErrorNode(State)) {
if (!BT_Bounds)
- BT_Bounds.reset(new BuiltinBug(
- Filter.CheckCStringOutOfBounds ? Filter.CheckNameCStringOutOfBounds
- : Filter.CheckNameCStringNullArg,
- "Out-of-bound array access",
- "Byte string function accesses out-of-bound array element"));
-
- BuiltinBug *BT = static_cast<BuiltinBug *>(BT_Bounds.get());
+ BT_Bounds.reset(new BugType(Filter.CheckCStringOutOfBounds
+ ? Filter.CheckNameCStringOutOfBounds
+ : Filter.CheckNameCStringNullArg,
+ "Out-of-bound array access"));
// FIXME: It would be nice to eventually make this diagnostic more clear,
// e.g., by referencing the original declaration or by saying *why* this
// reference is outside the range.
- auto Report = std::make_unique<PathSensitiveBugReport>(*BT, WarningMsg, N);
+ auto Report =
+ std::make_unique<PathSensitiveBugReport>(*BT_Bounds, WarningMsg, N);
Report->addRange(S->getSourceRange());
C.emitReport(std::move(Report));
}
@@ -605,10 +714,12 @@ void CStringChecker::emitNotCStringBug(CheckerContext &C, ProgramStateRef State,
const Stmt *S,
StringRef WarningMsg) const {
if (ExplodedNode *N = C.generateNonFatalErrorNode(State)) {
- if (!BT_NotCString)
- BT_NotCString.reset(new BuiltinBug(
- Filter.CheckNameCStringNotNullTerm, categories::UnixAPI,
- "Argument is not a null-terminated string."));
+ if (!BT_NotCString) {
+ // FIXME: This call uses the string constant 'categories::UnixAPI' as the
+ // description of the bug; it should be replaced by a real description.
+ BT_NotCString.reset(
+ new BugType(Filter.CheckNameCStringNotNullTerm, categories::UnixAPI));
+ }
auto Report =
std::make_unique<PathSensitiveBugReport>(*BT_NotCString, WarningMsg, N);
@@ -621,10 +732,13 @@ void CStringChecker::emitNotCStringBug(CheckerContext &C, ProgramStateRef State,
void CStringChecker::emitAdditionOverflowBug(CheckerContext &C,
ProgramStateRef State) const {
if (ExplodedNode *N = C.generateErrorNode(State)) {
- if (!BT_NotCString)
- BT_NotCString.reset(
- new BuiltinBug(Filter.CheckNameCStringOutOfBounds, "API",
- "Sum of expressions causes overflow."));
+ if (!BT_AdditionOverflow) {
+ // FIXME: This call uses the word "API" as the description of the bug;
+ // it should be replaced by a better error message (if this unlikely
+ // situation continues to exist as a separate bug type).
+ BT_AdditionOverflow.reset(
+ new BugType(Filter.CheckNameCStringOutOfBounds, "API"));
+ }
// This isn't a great error message, but this should never occur in real
// code anyway -- you'd have to create a buffer longer than a size_t can
@@ -633,8 +747,8 @@ void CStringChecker::emitAdditionOverflowBug(CheckerContext &C,
"This expression will create a string whose length is too big to "
"be represented as a size_t";
- auto Report =
- std::make_unique<PathSensitiveBugReport>(*BT_NotCString, WarningMsg, N);
+ auto Report = std::make_unique<PathSensitiveBugReport>(*BT_AdditionOverflow,
+ WarningMsg, N);
C.emitReport(std::move(Report));
}
}
@@ -659,7 +773,7 @@ ProgramStateRef CStringChecker::checkAdditionOverflow(CheckerContext &C,
NonLoc maxVal = svalBuilder.makeIntVal(maxValInt);
SVal maxMinusRight;
- if (right.getAs<nonloc::ConcreteInt>()) {
+ if (isa<nonloc::ConcreteInt>(right)) {
maxMinusRight = svalBuilder.evalBinOpNN(state, BO_Sub, maxVal, right,
sizeTy);
} else {
@@ -670,7 +784,7 @@ ProgramStateRef CStringChecker::checkAdditionOverflow(CheckerContext &C,
left = right;
}
- if (Optional<NonLoc> maxMinusRightNL = maxMinusRight.getAs<NonLoc>()) {
+ if (std::optional<NonLoc> maxMinusRightNL = maxMinusRight.getAs<NonLoc>()) {
QualType cmpTy = svalBuilder.getConditionType();
// If left > max - right, we have an overflow.
SVal willOverflow = svalBuilder.evalBinOpNN(state, BO_GT, left,
@@ -756,7 +870,7 @@ SVal CStringChecker::getCStringLengthForRegion(CheckerContext &C,
C.blockCount());
if (!hypothetical) {
- if (Optional<NonLoc> strLn = strLength.getAs<NonLoc>()) {
+ if (std::optional<NonLoc> strLn = strLength.getAs<NonLoc>()) {
// In case of unbounded calls strlen etc bound the range to SIZE_MAX/4
BasicValueFactory &BVF = svalBuilder.getBasicValueFactory();
const llvm::APSInt &maxValInt = BVF.getMaxValue(sizeTy);
@@ -764,8 +878,8 @@ SVal CStringChecker::getCStringLengthForRegion(CheckerContext &C,
const llvm::APSInt *maxLengthInt = BVF.evalAPSInt(BO_Div, maxValInt,
fourInt);
NonLoc maxLength = svalBuilder.makeIntVal(*maxLengthInt);
- SVal evalLength = svalBuilder.evalBinOpNN(state, BO_LE, *strLn,
- maxLength, sizeTy);
+ SVal evalLength = svalBuilder.evalBinOpNN(state, BO_LE, *strLn, maxLength,
+ svalBuilder.getConditionType());
state = state->assume(evalLength.castAs<DefinedOrUnknownSVal>(), true);
}
state = state->set<CStringLength>(MR, strLength);
@@ -782,7 +896,7 @@ SVal CStringChecker::getCStringLength(CheckerContext &C, ProgramStateRef &state,
// If we can't get a region, see if it's something we /know/ isn't a
// C string. In the context of locations, the only time we can issue such
// a warning is for labels.
- if (Optional<loc::GotoLabel> Label = Buf.getAs<loc::GotoLabel>()) {
+ if (std::optional<loc::GotoLabel> Label = Buf.getAs<loc::GotoLabel>()) {
if (Filter.CheckCStringNotNullTerm) {
SmallString<120> buf;
llvm::raw_svector_ostream os(buf);
@@ -811,11 +925,25 @@ SVal CStringChecker::getCStringLength(CheckerContext &C, ProgramStateRef &state,
SValBuilder &svalBuilder = C.getSValBuilder();
QualType sizeTy = svalBuilder.getContext().getSizeType();
const StringLiteral *strLit = cast<StringRegion>(MR)->getStringLiteral();
- return svalBuilder.makeIntVal(strLit->getByteLength(), sizeTy);
+ return svalBuilder.makeIntVal(strLit->getLength(), sizeTy);
+ }
+ case MemRegion::NonParamVarRegionKind: {
+ // If we have a global constant with a string literal initializer,
+ // compute the initializer's length.
+ const VarDecl *Decl = cast<NonParamVarRegion>(MR)->getDecl();
+ if (Decl->getType().isConstQualified() && Decl->hasGlobalStorage()) {
+ if (const Expr *Init = Decl->getInit()) {
+ if (auto *StrLit = dyn_cast<StringLiteral>(Init)) {
+ SValBuilder &SvalBuilder = C.getSValBuilder();
+ QualType SizeTy = SvalBuilder.getContext().getSizeType();
+ return SvalBuilder.makeIntVal(StrLit->getLength(), SizeTy);
+ }
+ }
+ }
+ [[fallthrough]];
}
case MemRegion::SymbolicRegionKind:
case MemRegion::AllocaRegionKind:
- case MemRegion::NonParamVarRegionKind:
case MemRegion::ParamVarRegionKind:
case MemRegion::FieldRegionKind:
case MemRegion::ObjCIvarRegionKind:
@@ -869,43 +997,40 @@ const StringLiteral *CStringChecker::getCStringLiteral(CheckerContext &C,
return strRegion->getStringLiteral();
}
-bool CStringChecker::IsFirstBufInBound(CheckerContext &C,
- ProgramStateRef state,
- const Expr *FirstBuf,
- const Expr *Size) {
+bool CStringChecker::isFirstBufInBound(CheckerContext &C, ProgramStateRef State,
+ SVal BufVal, QualType BufTy,
+ SVal LengthVal, QualType LengthTy) {
// If we do not know that the buffer is long enough we return 'true'.
// Otherwise the parent region of this field region would also get
// invalidated, which would lead to warnings based on an unknown state.
+ if (LengthVal.isUnknown())
+ return false;
+
// Originally copied from CheckBufferAccess and CheckLocation.
- SValBuilder &svalBuilder = C.getSValBuilder();
- ASTContext &Ctx = svalBuilder.getContext();
- const LocationContext *LCtx = C.getLocationContext();
+ SValBuilder &SB = C.getSValBuilder();
+ ASTContext &Ctx = C.getASTContext();
- QualType sizeTy = Size->getType();
QualType PtrTy = Ctx.getPointerType(Ctx.CharTy);
- SVal BufVal = state->getSVal(FirstBuf, LCtx);
- SVal LengthVal = state->getSVal(Size, LCtx);
- Optional<NonLoc> Length = LengthVal.getAs<NonLoc>();
+ std::optional<NonLoc> Length = LengthVal.getAs<NonLoc>();
if (!Length)
return true; // cf top comment.
// Compute the offset of the last element to be accessed: size-1.
- NonLoc One = svalBuilder.makeIntVal(1, sizeTy).castAs<NonLoc>();
- SVal Offset = svalBuilder.evalBinOpNN(state, BO_Sub, *Length, One, sizeTy);
+ NonLoc One = SB.makeIntVal(1, LengthTy).castAs<NonLoc>();
+ SVal Offset = SB.evalBinOpNN(State, BO_Sub, *Length, One, LengthTy);
if (Offset.isUnknown())
return true; // cf top comment
NonLoc LastOffset = Offset.castAs<NonLoc>();
// Check that the first buffer is sufficiently long.
- SVal BufStart = svalBuilder.evalCast(BufVal, PtrTy, FirstBuf->getType());
- Optional<Loc> BufLoc = BufStart.getAs<Loc>();
+ SVal BufStart = SB.evalCast(BufVal, PtrTy, BufTy);
+ std::optional<Loc> BufLoc = BufStart.getAs<Loc>();
if (!BufLoc)
return true; // cf top comment.
- SVal BufEnd =
- svalBuilder.evalBinOpLN(state, BO_Add, *BufLoc, LastOffset, PtrTy);
+ SVal BufEnd = SB.evalBinOpLN(State, BO_Add, *BufLoc, LastOffset, PtrTy);
// Check for out of bound array element access.
const MemRegion *R = BufEnd.getAsRegion();
@@ -919,33 +1044,95 @@ bool CStringChecker::IsFirstBufInBound(CheckerContext &C,
// FIXME: Does this crash when a non-standard definition
// of a library function is encountered?
assert(ER->getValueType() == C.getASTContext().CharTy &&
- "IsFirstBufInBound should only be called with char* ElementRegions");
+ "isFirstBufInBound should only be called with char* ElementRegions");
// Get the size of the array.
const SubRegion *superReg = cast<SubRegion>(ER->getSuperRegion());
- DefinedOrUnknownSVal SizeDV = getDynamicExtent(state, superReg, svalBuilder);
+ DefinedOrUnknownSVal SizeDV = getDynamicExtent(State, superReg, SB);
// Get the index of the accessed element.
DefinedOrUnknownSVal Idx = ER->getIndex().castAs<DefinedOrUnknownSVal>();
- ProgramStateRef StInBound = state->assumeInBound(Idx, SizeDV, true);
+ ProgramStateRef StInBound = State->assumeInBound(Idx, SizeDV, true);
return static_cast<bool>(StInBound);
}
-ProgramStateRef CStringChecker::InvalidateBuffer(CheckerContext &C,
- ProgramStateRef state,
- const Expr *E, SVal V,
- bool IsSourceBuffer,
- const Expr *Size) {
- Optional<Loc> L = V.getAs<Loc>();
+ProgramStateRef CStringChecker::invalidateDestinationBufferBySize(
+ CheckerContext &C, ProgramStateRef S, const Expr *BufE, SVal BufV,
+ SVal SizeV, QualType SizeTy) {
+ auto InvalidationTraitOperations =
+ [&C, S, BufTy = BufE->getType(), BufV, SizeV,
+ SizeTy](RegionAndSymbolInvalidationTraits &ITraits, const MemRegion *R) {
+ // If destination buffer is a field region and access is in bound, do
+ // not invalidate its super region.
+ if (MemRegion::FieldRegionKind == R->getKind() &&
+ isFirstBufInBound(C, S, BufV, BufTy, SizeV, SizeTy)) {
+ ITraits.setTrait(
+ R,
+ RegionAndSymbolInvalidationTraits::TK_DoNotInvalidateSuperRegion);
+ }
+ return false;
+ };
+
+ return invalidateBufferAux(C, S, BufE, BufV, InvalidationTraitOperations);
+}
+
+ProgramStateRef
+CStringChecker::invalidateDestinationBufferAlwaysEscapeSuperRegion(
+ CheckerContext &C, ProgramStateRef S, const Expr *BufE, SVal BufV) {
+ auto InvalidationTraitOperations = [](RegionAndSymbolInvalidationTraits &,
+ const MemRegion *R) {
+ return isa<FieldRegion>(R);
+ };
+
+ return invalidateBufferAux(C, S, BufE, BufV, InvalidationTraitOperations);
+}
+
+ProgramStateRef CStringChecker::invalidateDestinationBufferNeverOverflows(
+ CheckerContext &C, ProgramStateRef S, const Expr *BufE, SVal BufV) {
+ auto InvalidationTraitOperations =
+ [](RegionAndSymbolInvalidationTraits &ITraits, const MemRegion *R) {
+ if (MemRegion::FieldRegionKind == R->getKind())
+ ITraits.setTrait(
+ R,
+ RegionAndSymbolInvalidationTraits::TK_DoNotInvalidateSuperRegion);
+ return false;
+ };
+
+ return invalidateBufferAux(C, S, BufE, BufV, InvalidationTraitOperations);
+}
+
+ProgramStateRef CStringChecker::invalidateSourceBuffer(CheckerContext &C,
+ ProgramStateRef S,
+ const Expr *BufE,
+ SVal BufV) {
+ auto InvalidationTraitOperations =
+ [](RegionAndSymbolInvalidationTraits &ITraits, const MemRegion *R) {
+ ITraits.setTrait(
+ R->getBaseRegion(),
+ RegionAndSymbolInvalidationTraits::TK_PreserveContents);
+ ITraits.setTrait(R,
+ RegionAndSymbolInvalidationTraits::TK_SuppressEscape);
+ return true;
+ };
+
+ return invalidateBufferAux(C, S, BufE, BufV, InvalidationTraitOperations);
+}
+
+ProgramStateRef CStringChecker::invalidateBufferAux(
+ CheckerContext &C, ProgramStateRef State, const Expr *E, SVal V,
+ llvm::function_ref<bool(RegionAndSymbolInvalidationTraits &,
+ const MemRegion *)>
+ InvalidationTraitOperations) {
+ std::optional<Loc> L = V.getAs<Loc>();
if (!L)
- return state;
+ return State;
// FIXME: This is a simplified version of what's in CFRefCount.cpp -- it makes
// some assumptions about the value that CFRefCount can't. Even so, it should
// probably be refactored.
- if (Optional<loc::MemRegionVal> MR = L->getAs<loc::MemRegionVal>()) {
+ if (std::optional<loc::MemRegionVal> MR = L->getAs<loc::MemRegionVal>()) {
const MemRegion *R = MR->getRegion()->StripCasts();
// Are we dealing with an ElementRegion? If so, we should be invalidating
@@ -957,29 +1144,10 @@ ProgramStateRef CStringChecker::InvalidateBuffer(CheckerContext &C,
// Invalidate this region.
const LocationContext *LCtx = C.getPredecessor()->getLocationContext();
-
- bool CausesPointerEscape = false;
RegionAndSymbolInvalidationTraits ITraits;
- // Invalidate and escape only indirect regions accessible through the source
- // buffer.
- if (IsSourceBuffer) {
- ITraits.setTrait(R->getBaseRegion(),
- RegionAndSymbolInvalidationTraits::TK_PreserveContents);
- ITraits.setTrait(R, RegionAndSymbolInvalidationTraits::TK_SuppressEscape);
- CausesPointerEscape = true;
- } else {
- const MemRegion::Kind& K = R->getKind();
- if (K == MemRegion::FieldRegionKind)
- if (Size && IsFirstBufInBound(C, state, E, Size)) {
- // If destination buffer is a field region and access is in bound,
- // do not invalidate its super region.
- ITraits.setTrait(
- R,
- RegionAndSymbolInvalidationTraits::TK_DoNotInvalidateSuperRegion);
- }
- }
+ bool CausesPointerEscape = InvalidationTraitOperations(ITraits, R);
- return state->invalidateRegions(R, E, C.blockCount(), LCtx,
+ return State->invalidateRegions(R, E, C.blockCount(), LCtx,
CausesPointerEscape, nullptr, nullptr,
&ITraits);
}
@@ -987,7 +1155,7 @@ ProgramStateRef CStringChecker::InvalidateBuffer(CheckerContext &C,
// If we have a non-region value by chance, just remove the binding.
// FIXME: is this necessary or correct? This handles the non-Region
// cases. Is it ever valid to store to these?
- return state->killBinding(*L);
+ return State->killBinding(*L);
}
bool CStringChecker::SummarizeRegion(raw_ostream &os, ASTContext &Ctx,
@@ -1009,23 +1177,20 @@ bool CStringChecker::SummarizeRegion(raw_ostream &os, ASTContext &Ctx,
case MemRegion::CXXThisRegionKind:
case MemRegion::CXXTempObjectRegionKind:
os << "a C++ temp object of type "
- << cast<TypedValueRegion>(MR)->getValueType().getAsString();
+ << cast<TypedValueRegion>(MR)->getValueType();
return true;
case MemRegion::NonParamVarRegionKind:
- os << "a variable of type"
- << cast<TypedValueRegion>(MR)->getValueType().getAsString();
+ os << "a variable of type" << cast<TypedValueRegion>(MR)->getValueType();
return true;
case MemRegion::ParamVarRegionKind:
- os << "a parameter of type"
- << cast<TypedValueRegion>(MR)->getValueType().getAsString();
+ os << "a parameter of type" << cast<TypedValueRegion>(MR)->getValueType();
return true;
case MemRegion::FieldRegionKind:
- os << "a field of type "
- << cast<TypedValueRegion>(MR)->getValueType().getAsString();
+ os << "a field of type " << cast<TypedValueRegion>(MR)->getValueType();
return true;
case MemRegion::ObjCIvarRegionKind:
os << "an instance variable of type "
- << cast<TypedValueRegion>(MR)->getValueType().getAsString();
+ << cast<TypedValueRegion>(MR)->getValueType();
return true;
default:
return false;
@@ -1048,7 +1213,7 @@ bool CStringChecker::memsetAux(const Expr *DstBuffer, SVal CharVal,
RegionOffset Offset = MR->getAsOffset();
const MemRegion *BR = Offset.getRegion();
- Optional<NonLoc> SizeNL = SizeVal.getAs<NonLoc>();
+ std::optional<NonLoc> SizeNL = SizeVal.getAs<NonLoc>();
if (!SizeNL)
return false;
@@ -1087,8 +1252,8 @@ bool CStringChecker::memsetAux(const Expr *DstBuffer, SVal CharVal,
} else {
// If the destination buffer's extent is not equal to the value of
// third argument, just invalidate buffer.
- State = InvalidateBuffer(C, State, DstBuffer, MemVal,
- /*IsSourceBuffer*/ false, Size);
+ State = invalidateDestinationBufferBySize(C, State, DstBuffer, MemVal,
+ SizeVal, Size->getType());
}
if (StateNullChar && !StateNonNullChar) {
@@ -1113,8 +1278,8 @@ bool CStringChecker::memsetAux(const Expr *DstBuffer, SVal CharVal,
} else {
// If the offset is not zero and char value is not concrete, we can do
// nothing but invalidate the buffer.
- State = InvalidateBuffer(C, State, DstBuffer, MemVal,
- /*IsSourceBuffer*/ false, Size);
+ State = invalidateDestinationBufferBySize(C, State, DstBuffer, MemVal,
+ SizeVal, Size->getType());
}
return true;
}
@@ -1123,11 +1288,11 @@ bool CStringChecker::memsetAux(const Expr *DstBuffer, SVal CharVal,
// evaluation of individual function calls.
//===----------------------------------------------------------------------===//
-void CStringChecker::evalCopyCommon(CheckerContext &C, const CallExpr *CE,
+void CStringChecker::evalCopyCommon(CheckerContext &C, const CallEvent &Call,
ProgramStateRef state, SizeArgExpr Size,
DestinationArgExpr Dest,
SourceArgExpr Source, bool Restricted,
- bool IsMempcpy) const {
+ bool IsMempcpy, CharKind CK) const {
CurrentFunctionDescription = "memory copy function";
// See if the size argument is zero.
@@ -1145,7 +1310,8 @@ void CStringChecker::evalCopyCommon(CheckerContext &C, const CallExpr *CE,
// If the size is zero, there won't be any actual memory access, so
// just bind the return value to the destination buffer and return.
if (stateZeroSize && !stateNonZeroSize) {
- stateZeroSize = stateZeroSize->BindExpr(CE, LCtx, destVal);
+ stateZeroSize =
+ stateZeroSize->BindExpr(Call.getOriginExpr(), LCtx, destVal);
C.addTransition(stateZeroSize);
return;
}
@@ -1170,11 +1336,11 @@ void CStringChecker::evalCopyCommon(CheckerContext &C, const CallExpr *CE,
return;
// Ensure the accesses are valid and that the buffers do not overlap.
- state = CheckBufferAccess(C, state, Dest, Size, AccessKind::write);
- state = CheckBufferAccess(C, state, Source, Size, AccessKind::read);
+ state = CheckBufferAccess(C, state, Dest, Size, AccessKind::write, CK);
+ state = CheckBufferAccess(C, state, Source, Size, AccessKind::read, CK);
if (Restricted)
- state = CheckOverlap(C, state, Size, Dest, Source);
+ state = CheckOverlap(C, state, Size, Dest, Source, CK);
if (!state)
return;
@@ -1185,7 +1351,7 @@ void CStringChecker::evalCopyCommon(CheckerContext &C, const CallExpr *CE,
// Get the byte after the last byte copied.
SValBuilder &SvalBuilder = C.getSValBuilder();
ASTContext &Ctx = SvalBuilder.getContext();
- QualType CharPtrTy = Ctx.getPointerType(Ctx.CharTy);
+ QualType CharPtrTy = getCharPtrType(Ctx, CK);
SVal DestRegCharVal =
SvalBuilder.evalCast(destVal, CharPtrTy, Dest.Expression->getType());
SVal lastElement = C.getSValBuilder().evalBinOp(
@@ -1193,15 +1359,15 @@ void CStringChecker::evalCopyCommon(CheckerContext &C, const CallExpr *CE,
// If we don't know how much we copied, we can at least
// conjure a return value for later.
if (lastElement.isUnknown())
- lastElement = C.getSValBuilder().conjureSymbolVal(nullptr, CE, LCtx,
- C.blockCount());
+ lastElement = C.getSValBuilder().conjureSymbolVal(
+ nullptr, Call.getOriginExpr(), LCtx, C.blockCount());
// The byte after the last byte copied is the return value.
- state = state->BindExpr(CE, LCtx, lastElement);
+ state = state->BindExpr(Call.getOriginExpr(), LCtx, lastElement);
} else {
// All other copies return the destination buffer.
// (Well, bcopy() has a void return type, but this won't hurt.)
- state = state->BindExpr(CE, LCtx, destVal);
+ state = state->BindExpr(Call.getOriginExpr(), LCtx, destVal);
}
// Invalidate the destination (regular invalidation without pointer-escaping
@@ -1210,76 +1376,82 @@ void CStringChecker::evalCopyCommon(CheckerContext &C, const CallExpr *CE,
// can use LazyCompoundVals to copy the source values into the destination.
// This would probably remove any existing bindings past the end of the
// copied region, but that's still an improvement over blank invalidation.
- state =
- InvalidateBuffer(C, state, Dest.Expression, C.getSVal(Dest.Expression),
- /*IsSourceBuffer*/ false, Size.Expression);
+ state = invalidateDestinationBufferBySize(
+ C, state, Dest.Expression, C.getSVal(Dest.Expression), sizeVal,
+ Size.Expression->getType());
// Invalidate the source (const-invalidation without const-pointer-escaping
// the address of the top-level region).
- state = InvalidateBuffer(C, state, Source.Expression,
- C.getSVal(Source.Expression),
- /*IsSourceBuffer*/ true, nullptr);
+ state = invalidateSourceBuffer(C, state, Source.Expression,
+ C.getSVal(Source.Expression));
C.addTransition(state);
}
}
-void CStringChecker::evalMemcpy(CheckerContext &C, const CallExpr *CE) const {
+void CStringChecker::evalMemcpy(CheckerContext &C, const CallEvent &Call,
+ CharKind CK) const {
// void *memcpy(void *restrict dst, const void *restrict src, size_t n);
// The return value is the address of the destination buffer.
- DestinationArgExpr Dest = {CE->getArg(0), 0};
- SourceArgExpr Src = {CE->getArg(1), 1};
- SizeArgExpr Size = {CE->getArg(2), 2};
+ DestinationArgExpr Dest = {{Call.getArgExpr(0), 0}};
+ SourceArgExpr Src = {{Call.getArgExpr(1), 1}};
+ SizeArgExpr Size = {{Call.getArgExpr(2), 2}};
ProgramStateRef State = C.getState();
constexpr bool IsRestricted = true;
constexpr bool IsMempcpy = false;
- evalCopyCommon(C, CE, State, Size, Dest, Src, IsRestricted, IsMempcpy);
+ evalCopyCommon(C, Call, State, Size, Dest, Src, IsRestricted, IsMempcpy, CK);
}
-void CStringChecker::evalMempcpy(CheckerContext &C, const CallExpr *CE) const {
+void CStringChecker::evalMempcpy(CheckerContext &C, const CallEvent &Call,
+ CharKind CK) const {
// void *mempcpy(void *restrict dst, const void *restrict src, size_t n);
// The return value is a pointer to the byte following the last written byte.
- DestinationArgExpr Dest = {CE->getArg(0), 0};
- SourceArgExpr Src = {CE->getArg(1), 1};
- SizeArgExpr Size = {CE->getArg(2), 2};
+ DestinationArgExpr Dest = {{Call.getArgExpr(0), 0}};
+ SourceArgExpr Src = {{Call.getArgExpr(1), 1}};
+ SizeArgExpr Size = {{Call.getArgExpr(2), 2}};
constexpr bool IsRestricted = true;
constexpr bool IsMempcpy = true;
- evalCopyCommon(C, CE, C.getState(), Size, Dest, Src, IsRestricted, IsMempcpy);
+ evalCopyCommon(C, Call, C.getState(), Size, Dest, Src, IsRestricted,
+ IsMempcpy, CK);
}
-void CStringChecker::evalMemmove(CheckerContext &C, const CallExpr *CE) const {
+void CStringChecker::evalMemmove(CheckerContext &C, const CallEvent &Call,
+ CharKind CK) const {
// void *memmove(void *dst, const void *src, size_t n);
// The return value is the address of the destination buffer.
- DestinationArgExpr Dest = {CE->getArg(0), 0};
- SourceArgExpr Src = {CE->getArg(1), 1};
- SizeArgExpr Size = {CE->getArg(2), 2};
+ DestinationArgExpr Dest = {{Call.getArgExpr(0), 0}};
+ SourceArgExpr Src = {{Call.getArgExpr(1), 1}};
+ SizeArgExpr Size = {{Call.getArgExpr(2), 2}};
constexpr bool IsRestricted = false;
constexpr bool IsMempcpy = false;
- evalCopyCommon(C, CE, C.getState(), Size, Dest, Src, IsRestricted, IsMempcpy);
+ evalCopyCommon(C, Call, C.getState(), Size, Dest, Src, IsRestricted,
+ IsMempcpy, CK);
}
-void CStringChecker::evalBcopy(CheckerContext &C, const CallExpr *CE) const {
+void CStringChecker::evalBcopy(CheckerContext &C, const CallEvent &Call) const {
// void bcopy(const void *src, void *dst, size_t n);
- SourceArgExpr Src(CE->getArg(0), 0);
- DestinationArgExpr Dest = {CE->getArg(1), 1};
- SizeArgExpr Size = {CE->getArg(2), 2};
+ SourceArgExpr Src{{Call.getArgExpr(0), 0}};
+ DestinationArgExpr Dest = {{Call.getArgExpr(1), 1}};
+ SizeArgExpr Size = {{Call.getArgExpr(2), 2}};
constexpr bool IsRestricted = false;
constexpr bool IsMempcpy = false;
- evalCopyCommon(C, CE, C.getState(), Size, Dest, Src, IsRestricted, IsMempcpy);
+ evalCopyCommon(C, Call, C.getState(), Size, Dest, Src, IsRestricted,
+ IsMempcpy, CharKind::Regular);
}
-void CStringChecker::evalMemcmp(CheckerContext &C, const CallExpr *CE) const {
+void CStringChecker::evalMemcmp(CheckerContext &C, const CallEvent &Call,
+ CharKind CK) const {
// int memcmp(const void *s1, const void *s2, size_t n);
CurrentFunctionDescription = "memory comparison function";
- AnyArgExpr Left = {CE->getArg(0), 0};
- AnyArgExpr Right = {CE->getArg(1), 1};
- SizeArgExpr Size = {CE->getArg(2), 2};
+ AnyArgExpr Left = {Call.getArgExpr(0), 0};
+ AnyArgExpr Right = {Call.getArgExpr(1), 1};
+ SizeArgExpr Size = {{Call.getArgExpr(2), 2}};
ProgramStateRef State = C.getState();
SValBuilder &Builder = C.getSValBuilder();
@@ -1297,7 +1469,8 @@ void CStringChecker::evalMemcmp(CheckerContext &C, const CallExpr *CE) const {
// have to check either of the buffers.
if (stateZeroSize) {
State = stateZeroSize;
- State = State->BindExpr(CE, LCtx, Builder.makeZeroVal(CE->getType()));
+ State = State->BindExpr(Call.getOriginExpr(), LCtx,
+ Builder.makeZeroVal(Call.getResultType()));
C.addTransition(State);
}
@@ -1323,8 +1496,8 @@ void CStringChecker::evalMemcmp(CheckerContext &C, const CallExpr *CE) const {
State = SameBuffer;
State = CheckBufferAccess(C, State, Left, Size, AccessKind::read);
if (State) {
- State =
- SameBuffer->BindExpr(CE, LCtx, Builder.makeZeroVal(CE->getType()));
+ State = SameBuffer->BindExpr(Call.getOriginExpr(), LCtx,
+ Builder.makeZeroVal(Call.getResultType()));
C.addTransition(State);
}
return;
@@ -1333,37 +1506,39 @@ void CStringChecker::evalMemcmp(CheckerContext &C, const CallExpr *CE) const {
// If the two arguments might be different buffers, we have to check
// the size of both of them.
assert(NotSameBuffer);
- State = CheckBufferAccess(C, State, Right, Size, AccessKind::read);
- State = CheckBufferAccess(C, State, Left, Size, AccessKind::read);
+ State = CheckBufferAccess(C, State, Right, Size, AccessKind::read, CK);
+ State = CheckBufferAccess(C, State, Left, Size, AccessKind::read, CK);
if (State) {
// The return value is the comparison result, which we don't know.
- SVal CmpV = Builder.conjureSymbolVal(nullptr, CE, LCtx, C.blockCount());
- State = State->BindExpr(CE, LCtx, CmpV);
+ SVal CmpV = Builder.conjureSymbolVal(nullptr, Call.getOriginExpr(), LCtx,
+ C.blockCount());
+ State = State->BindExpr(Call.getOriginExpr(), LCtx, CmpV);
C.addTransition(State);
}
}
}
void CStringChecker::evalstrLength(CheckerContext &C,
- const CallExpr *CE) const {
+ const CallEvent &Call) const {
// size_t strlen(const char *s);
- evalstrLengthCommon(C, CE, /* IsStrnlen = */ false);
+ evalstrLengthCommon(C, Call, /* IsStrnlen = */ false);
}
void CStringChecker::evalstrnLength(CheckerContext &C,
- const CallExpr *CE) const {
+ const CallEvent &Call) const {
// size_t strnlen(const char *s, size_t maxlen);
- evalstrLengthCommon(C, CE, /* IsStrnlen = */ true);
+ evalstrLengthCommon(C, Call, /* IsStrnlen = */ true);
}
-void CStringChecker::evalstrLengthCommon(CheckerContext &C, const CallExpr *CE,
+void CStringChecker::evalstrLengthCommon(CheckerContext &C,
+ const CallEvent &Call,
bool IsStrnlen) const {
CurrentFunctionDescription = "string length function";
ProgramStateRef state = C.getState();
const LocationContext *LCtx = C.getLocationContext();
if (IsStrnlen) {
- const Expr *maxlenExpr = CE->getArg(1);
+ const Expr *maxlenExpr = Call.getArgExpr(1);
SVal maxlenVal = state->getSVal(maxlenExpr, LCtx);
ProgramStateRef stateZeroSize, stateNonZeroSize;
@@ -1373,8 +1548,8 @@ void CStringChecker::evalstrLengthCommon(CheckerContext &C, const CallExpr *CE,
// If the size can be zero, the result will be 0 in that case, and we don't
// have to check the string itself.
if (stateZeroSize) {
- SVal zero = C.getSValBuilder().makeZeroVal(CE->getType());
- stateZeroSize = stateZeroSize->BindExpr(CE, LCtx, zero);
+ SVal zero = C.getSValBuilder().makeZeroVal(Call.getResultType());
+ stateZeroSize = stateZeroSize->BindExpr(Call.getOriginExpr(), LCtx, zero);
C.addTransition(stateZeroSize);
}
@@ -1387,7 +1562,7 @@ void CStringChecker::evalstrLengthCommon(CheckerContext &C, const CallExpr *CE,
}
// Check that the string argument is non-null.
- AnyArgExpr Arg = {CE->getArg(0), 0};
+ AnyArgExpr Arg = {Call.getArgExpr(0), 0};
SVal ArgVal = state->getSVal(Arg.Expression, LCtx);
state = checkNonNull(C, state, Arg, ArgVal);
@@ -1410,11 +1585,11 @@ void CStringChecker::evalstrLengthCommon(CheckerContext &C, const CallExpr *CE,
// It's a little unfortunate to be getting this again,
// but it's not that expensive...
- const Expr *maxlenExpr = CE->getArg(1);
+ const Expr *maxlenExpr = Call.getArgExpr(1);
SVal maxlenVal = state->getSVal(maxlenExpr, LCtx);
- Optional<NonLoc> strLengthNL = strLength.getAs<NonLoc>();
- Optional<NonLoc> maxlenValNL = maxlenVal.getAs<NonLoc>();
+ std::optional<NonLoc> strLengthNL = strLength.getAs<NonLoc>();
+ std::optional<NonLoc> maxlenValNL = maxlenVal.getAs<NonLoc>();
if (strLengthNL && maxlenValNL) {
ProgramStateRef stateStringTooLong, stateStringNotTooLong;
@@ -1439,8 +1614,8 @@ void CStringChecker::evalstrLengthCommon(CheckerContext &C, const CallExpr *CE,
// no guarantee the full string length will actually be returned.
// All we know is the return value is the min of the string length
// and the limit. This is better than nothing.
- result = C.getSValBuilder().conjureSymbolVal(nullptr, CE, LCtx,
- C.blockCount());
+ result = C.getSValBuilder().conjureSymbolVal(
+ nullptr, Call.getOriginExpr(), LCtx, C.blockCount());
NonLoc resultNL = result.castAs<NonLoc>();
if (strLengthNL) {
@@ -1463,78 +1638,85 @@ void CStringChecker::evalstrLengthCommon(CheckerContext &C, const CallExpr *CE,
// If we don't know the length of the string, conjure a return
// value, so it can be used in constraints, at least.
if (result.isUnknown()) {
- result = C.getSValBuilder().conjureSymbolVal(nullptr, CE, LCtx,
- C.blockCount());
+ result = C.getSValBuilder().conjureSymbolVal(
+ nullptr, Call.getOriginExpr(), LCtx, C.blockCount());
}
}
// Bind the return value.
assert(!result.isUnknown() && "Should have conjured a value by now");
- state = state->BindExpr(CE, LCtx, result);
+ state = state->BindExpr(Call.getOriginExpr(), LCtx, result);
C.addTransition(state);
}
-void CStringChecker::evalStrcpy(CheckerContext &C, const CallExpr *CE) const {
+void CStringChecker::evalStrcpy(CheckerContext &C,
+ const CallEvent &Call) const {
// char *strcpy(char *restrict dst, const char *restrict src);
- evalStrcpyCommon(C, CE,
+ evalStrcpyCommon(C, Call,
/* ReturnEnd = */ false,
/* IsBounded = */ false,
/* appendK = */ ConcatFnKind::none);
}
-void CStringChecker::evalStrncpy(CheckerContext &C, const CallExpr *CE) const {
+void CStringChecker::evalStrncpy(CheckerContext &C,
+ const CallEvent &Call) const {
// char *strncpy(char *restrict dst, const char *restrict src, size_t n);
- evalStrcpyCommon(C, CE,
+ evalStrcpyCommon(C, Call,
/* ReturnEnd = */ false,
/* IsBounded = */ true,
/* appendK = */ ConcatFnKind::none);
}
-void CStringChecker::evalStpcpy(CheckerContext &C, const CallExpr *CE) const {
+void CStringChecker::evalStpcpy(CheckerContext &C,
+ const CallEvent &Call) const {
// char *stpcpy(char *restrict dst, const char *restrict src);
- evalStrcpyCommon(C, CE,
+ evalStrcpyCommon(C, Call,
/* ReturnEnd = */ true,
/* IsBounded = */ false,
/* appendK = */ ConcatFnKind::none);
}
-void CStringChecker::evalStrlcpy(CheckerContext &C, const CallExpr *CE) const {
+void CStringChecker::evalStrlcpy(CheckerContext &C,
+ const CallEvent &Call) const {
// size_t strlcpy(char *dest, const char *src, size_t size);
- evalStrcpyCommon(C, CE,
+ evalStrcpyCommon(C, Call,
/* ReturnEnd = */ true,
/* IsBounded = */ true,
/* appendK = */ ConcatFnKind::none,
/* returnPtr = */ false);
}
-void CStringChecker::evalStrcat(CheckerContext &C, const CallExpr *CE) const {
+void CStringChecker::evalStrcat(CheckerContext &C,
+ const CallEvent &Call) const {
// char *strcat(char *restrict s1, const char *restrict s2);
- evalStrcpyCommon(C, CE,
+ evalStrcpyCommon(C, Call,
/* ReturnEnd = */ false,
/* IsBounded = */ false,
/* appendK = */ ConcatFnKind::strcat);
}
-void CStringChecker::evalStrncat(CheckerContext &C, const CallExpr *CE) const {
- //char *strncat(char *restrict s1, const char *restrict s2, size_t n);
- evalStrcpyCommon(C, CE,
+void CStringChecker::evalStrncat(CheckerContext &C,
+ const CallEvent &Call) const {
+ // char *strncat(char *restrict s1, const char *restrict s2, size_t n);
+ evalStrcpyCommon(C, Call,
/* ReturnEnd = */ false,
/* IsBounded = */ true,
/* appendK = */ ConcatFnKind::strcat);
}
-void CStringChecker::evalStrlcat(CheckerContext &C, const CallExpr *CE) const {
+void CStringChecker::evalStrlcat(CheckerContext &C,
+ const CallEvent &Call) const {
// size_t strlcat(char *dst, const char *src, size_t size);
// It will append at most size - strlen(dst) - 1 bytes,
// NULL-terminating the result.
- evalStrcpyCommon(C, CE,
+ evalStrcpyCommon(C, Call,
/* ReturnEnd = */ false,
/* IsBounded = */ true,
/* appendK = */ ConcatFnKind::strlcat,
/* returnPtr = */ false);
}
-void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
+void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallEvent &Call,
bool ReturnEnd, bool IsBounded,
ConcatFnKind appendK,
bool returnPtr) const {
@@ -1547,14 +1729,14 @@ void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
const LocationContext *LCtx = C.getLocationContext();
// Check that the destination is non-null.
- DestinationArgExpr Dst = {CE->getArg(0), 0};
+ DestinationArgExpr Dst = {{Call.getArgExpr(0), 0}};
SVal DstVal = state->getSVal(Dst.Expression, LCtx);
state = checkNonNull(C, state, Dst, DstVal);
if (!state)
return;
// Check that the source is non-null.
- SourceArgExpr srcExpr = {CE->getArg(1), 1};
+ SourceArgExpr srcExpr = {{Call.getArgExpr(1), 1}};
SVal srcVal = state->getSVal(srcExpr.Expression, LCtx);
state = checkNonNull(C, state, srcExpr, srcVal);
if (!state)
@@ -1562,11 +1744,11 @@ void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
// Get the string length of the source.
SVal strLength = getCStringLength(C, state, srcExpr.Expression, srcVal);
- Optional<NonLoc> strLengthNL = strLength.getAs<NonLoc>();
+ std::optional<NonLoc> strLengthNL = strLength.getAs<NonLoc>();
// Get the string length of the destination buffer.
SVal dstStrLength = getCStringLength(C, state, Dst.Expression, DstVal);
- Optional<NonLoc> dstStrLengthNL = dstStrLength.getAs<NonLoc>();
+ std::optional<NonLoc> dstStrLengthNL = dstStrLength.getAs<NonLoc>();
// If the source isn't a valid C string, give up.
if (strLength.isUndef())
@@ -1585,11 +1767,12 @@ void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
// FIXME: Why do we choose the srcExpr if the access has no size?
// Note that the 3rd argument of the call would be the size parameter.
- SizeArgExpr SrcExprAsSizeDummy = {srcExpr.Expression, srcExpr.ArgumentIndex};
+ SizeArgExpr SrcExprAsSizeDummy = {
+ {srcExpr.Expression, srcExpr.ArgumentIndex}};
state = CheckOverlap(
C, state,
- (IsBounded ? SizeArgExpr{CE->getArg(2), 2} : SrcExprAsSizeDummy), Dst,
- srcExpr);
+ (IsBounded ? SizeArgExpr{{Call.getArgExpr(2), 2}} : SrcExprAsSizeDummy),
+ Dst, srcExpr);
if (!state)
return;
@@ -1597,14 +1780,14 @@ void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
// If the function is strncpy, strncat, etc... it is bounded.
if (IsBounded) {
// Get the max number of characters to copy.
- SizeArgExpr lenExpr = {CE->getArg(2), 2};
+ SizeArgExpr lenExpr = {{Call.getArgExpr(2), 2}};
SVal lenVal = state->getSVal(lenExpr.Expression, LCtx);
// Protect against misdeclared strncpy().
lenVal =
svalBuilder.evalCast(lenVal, sizeTy, lenExpr.Expression->getType());
- Optional<NonLoc> lenValNL = lenVal.getAs<NonLoc>();
+ std::optional<NonLoc> lenValNL = lenVal.getAs<NonLoc>();
// If we know both values, we might be able to figure out how much
// we're copying.
@@ -1641,12 +1824,12 @@ void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
// amountCopied = min (size - dstLen - 1 , srcLen)
SVal freeSpace = svalBuilder.evalBinOpNN(state, BO_Sub, *lenValNL,
*dstStrLengthNL, sizeTy);
- if (!freeSpace.getAs<NonLoc>())
+ if (!isa<NonLoc>(freeSpace))
return;
freeSpace =
svalBuilder.evalBinOp(state, BO_Sub, freeSpace,
svalBuilder.makeIntVal(1, sizeTy), sizeTy);
- Optional<NonLoc> freeSpaceNL = freeSpace.getAs<NonLoc>();
+ std::optional<NonLoc> freeSpaceNL = freeSpace.getAs<NonLoc>();
// While unlikely, it is possible that the subtraction is
// too complex to compute, let's check whether it succeeded.
@@ -1711,16 +1894,19 @@ void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
// If the size is known to be zero, we're done.
if (StateZeroSize && !StateNonZeroSize) {
if (returnPtr) {
- StateZeroSize = StateZeroSize->BindExpr(CE, LCtx, DstVal);
+ StateZeroSize =
+ StateZeroSize->BindExpr(Call.getOriginExpr(), LCtx, DstVal);
} else {
if (appendK == ConcatFnKind::none) {
// strlcpy returns strlen(src)
- StateZeroSize = StateZeroSize->BindExpr(CE, LCtx, strLength);
+ StateZeroSize = StateZeroSize->BindExpr(Call.getOriginExpr(),
+ LCtx, strLength);
} else {
// strlcat returns strlen(src) + strlen(dst)
SVal retSize = svalBuilder.evalBinOp(
state, BO_Add, strLength, dstStrLength, sizeTy);
- StateZeroSize = StateZeroSize->BindExpr(CE, LCtx, retSize);
+ StateZeroSize =
+ StateZeroSize->BindExpr(Call.getOriginExpr(), LCtx, retSize);
}
}
C.addTransition(StateZeroSize);
@@ -1771,7 +1957,7 @@ void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
*dstStrLengthNL, sizeTy);
}
- Optional<NonLoc> amountCopiedNL = amountCopied.getAs<NonLoc>();
+ std::optional<NonLoc> amountCopiedNL = amountCopied.getAs<NonLoc>();
// If we know both string lengths, we might know the final string length.
if (amountCopiedNL && dstStrLengthNL) {
@@ -1789,10 +1975,12 @@ void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
if (finalStrLength.isUnknown()) {
// Try to get a "hypothetical" string length symbol, which we can later
// set as a real value if that turns out to be the case.
- finalStrLength = getCStringLength(C, state, CE, DstVal, true);
+ finalStrLength =
+ getCStringLength(C, state, Call.getOriginExpr(), DstVal, true);
assert(!finalStrLength.isUndef());
- if (Optional<NonLoc> finalStrLengthNL = finalStrLength.getAs<NonLoc>()) {
+ if (std::optional<NonLoc> finalStrLengthNL =
+ finalStrLength.getAs<NonLoc>()) {
if (amountCopiedNL && appendK == ConcatFnKind::none) {
// we overwrite dst string with the src
// finalStrLength >= srcStrLength
@@ -1843,28 +2031,38 @@ void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
// If the destination is a MemRegion, try to check for a buffer overflow and
// record the new string length.
- if (Optional<loc::MemRegionVal> dstRegVal =
- DstVal.getAs<loc::MemRegionVal>()) {
+ if (std::optional<loc::MemRegionVal> dstRegVal =
+ DstVal.getAs<loc::MemRegionVal>()) {
QualType ptrTy = Dst.Expression->getType();
// If we have an exact value on a bounded copy, use that to check for
// overflows, rather than our estimate about how much is actually copied.
- if (Optional<NonLoc> maxLastNL = maxLastElementIndex.getAs<NonLoc>()) {
+ if (std::optional<NonLoc> maxLastNL = maxLastElementIndex.getAs<NonLoc>()) {
SVal maxLastElement =
svalBuilder.evalBinOpLN(state, BO_Add, *dstRegVal, *maxLastNL, ptrTy);
+ // Check if the first byte of the destination is writable.
+ state = CheckLocation(C, state, Dst, DstVal, AccessKind::write);
+ if (!state)
+ return;
+ // Check if the last byte of the destination is writable.
state = CheckLocation(C, state, Dst, maxLastElement, AccessKind::write);
if (!state)
return;
}
// Then, if the final length is known...
- if (Optional<NonLoc> knownStrLength = finalStrLength.getAs<NonLoc>()) {
+ if (std::optional<NonLoc> knownStrLength = finalStrLength.getAs<NonLoc>()) {
SVal lastElement = svalBuilder.evalBinOpLN(state, BO_Add, *dstRegVal,
*knownStrLength, ptrTy);
// ...and we haven't checked the bound, we'll check the actual copy.
if (!boundWarning) {
+ // Check if the first byte of the destination is writable.
+ state = CheckLocation(C, state, Dst, DstVal, AccessKind::write);
+ if (!state)
+ return;
+ // Check if the last byte of the destination is writable.
state = CheckLocation(C, state, Dst, lastElement, AccessKind::write);
if (!state)
return;
@@ -1882,13 +2080,13 @@ void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
// can use LazyCompoundVals to copy the source values into the destination.
// This would probably remove any existing bindings past the end of the
// string, but that's still an improvement over blank invalidation.
- state = InvalidateBuffer(C, state, Dst.Expression, *dstRegVal,
- /*IsSourceBuffer*/ false, nullptr);
+ state = invalidateDestinationBufferBySize(C, state, Dst.Expression,
+ *dstRegVal, amountCopied,
+ C.getASTContext().getSizeType());
// Invalidate the source (const-invalidation without const-pointer-escaping
// the address of the top-level region).
- state = InvalidateBuffer(C, state, srcExpr.Expression, srcVal,
- /*IsSourceBuffer*/ true, nullptr);
+ state = invalidateSourceBuffer(C, state, srcExpr.Expression, srcVal);
// Set the C string length of the destination, if we know it.
if (IsBounded && (appendK == ConcatFnKind::none)) {
@@ -1908,51 +2106,54 @@ void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
// If this is a stpcpy-style copy, but we were unable to check for a buffer
// overflow, we still need a result. Conjure a return value.
if (ReturnEnd && Result.isUnknown()) {
- Result = svalBuilder.conjureSymbolVal(nullptr, CE, LCtx, C.blockCount());
+ Result = svalBuilder.conjureSymbolVal(nullptr, Call.getOriginExpr(), LCtx,
+ C.blockCount());
}
}
// Set the return value.
- state = state->BindExpr(CE, LCtx, Result);
+ state = state->BindExpr(Call.getOriginExpr(), LCtx, Result);
C.addTransition(state);
}
-void CStringChecker::evalStrcmp(CheckerContext &C, const CallExpr *CE) const {
+void CStringChecker::evalStrcmp(CheckerContext &C,
+ const CallEvent &Call) const {
//int strcmp(const char *s1, const char *s2);
- evalStrcmpCommon(C, CE, /* IsBounded = */ false, /* IgnoreCase = */ false);
+ evalStrcmpCommon(C, Call, /* IsBounded = */ false, /* IgnoreCase = */ false);
}
-void CStringChecker::evalStrncmp(CheckerContext &C, const CallExpr *CE) const {
+void CStringChecker::evalStrncmp(CheckerContext &C,
+ const CallEvent &Call) const {
//int strncmp(const char *s1, const char *s2, size_t n);
- evalStrcmpCommon(C, CE, /* IsBounded = */ true, /* IgnoreCase = */ false);
+ evalStrcmpCommon(C, Call, /* IsBounded = */ true, /* IgnoreCase = */ false);
}
void CStringChecker::evalStrcasecmp(CheckerContext &C,
- const CallExpr *CE) const {
+ const CallEvent &Call) const {
//int strcasecmp(const char *s1, const char *s2);
- evalStrcmpCommon(C, CE, /* IsBounded = */ false, /* IgnoreCase = */ true);
+ evalStrcmpCommon(C, Call, /* IsBounded = */ false, /* IgnoreCase = */ true);
}
void CStringChecker::evalStrncasecmp(CheckerContext &C,
- const CallExpr *CE) const {
+ const CallEvent &Call) const {
//int strncasecmp(const char *s1, const char *s2, size_t n);
- evalStrcmpCommon(C, CE, /* IsBounded = */ true, /* IgnoreCase = */ true);
+ evalStrcmpCommon(C, Call, /* IsBounded = */ true, /* IgnoreCase = */ true);
}
-void CStringChecker::evalStrcmpCommon(CheckerContext &C, const CallExpr *CE,
- bool IsBounded, bool IgnoreCase) const {
+void CStringChecker::evalStrcmpCommon(CheckerContext &C, const CallEvent &Call,
+ bool IsBounded, bool IgnoreCase) const {
CurrentFunctionDescription = "string comparison function";
ProgramStateRef state = C.getState();
const LocationContext *LCtx = C.getLocationContext();
// Check that the first string is non-null
- AnyArgExpr Left = {CE->getArg(0), 0};
+ AnyArgExpr Left = {Call.getArgExpr(0), 0};
SVal LeftVal = state->getSVal(Left.Expression, LCtx);
state = checkNonNull(C, state, Left, LeftVal);
if (!state)
return;
// Check that the second string is non-null.
- AnyArgExpr Right = {CE->getArg(1), 1};
+ AnyArgExpr Right = {Call.getArgExpr(1), 1};
SVal RightVal = state->getSVal(Right.Expression, LCtx);
state = checkNonNull(C, state, Right, RightVal);
if (!state)
@@ -1983,8 +2184,9 @@ void CStringChecker::evalStrcmpCommon(CheckerContext &C, const CallExpr *CE,
// If the two arguments might be the same buffer, we know the result is 0,
// and we only need to check one size.
if (StSameBuf) {
- StSameBuf = StSameBuf->BindExpr(CE, LCtx,
- svalBuilder.makeZeroVal(CE->getType()));
+ StSameBuf =
+ StSameBuf->BindExpr(Call.getOriginExpr(), LCtx,
+ svalBuilder.makeZeroVal(Call.getResultType()));
C.addTransition(StSameBuf);
// If the two arguments are GUARANTEED to be the same, we're done!
@@ -2004,8 +2206,8 @@ void CStringChecker::evalStrcmpCommon(CheckerContext &C, const CallExpr *CE,
const StringLiteral *RightStrLiteral =
getCStringLiteral(C, state, Right.Expression, RightVal);
bool canComputeResult = false;
- SVal resultVal = svalBuilder.conjureSymbolVal(nullptr, CE, LCtx,
- C.blockCount());
+ SVal resultVal = svalBuilder.conjureSymbolVal(nullptr, Call.getOriginExpr(),
+ LCtx, C.blockCount());
if (LeftStrLiteral && RightStrLiteral) {
StringRef LeftStrRef = LeftStrLiteral->getString();
@@ -2013,7 +2215,7 @@ void CStringChecker::evalStrcmpCommon(CheckerContext &C, const CallExpr *CE,
if (IsBounded) {
// Get the max number of characters to compare.
- const Expr *lenExpr = CE->getArg(2);
+ const Expr *lenExpr = Call.getArgExpr(2);
SVal lenVal = state->getSVal(lenExpr, LCtx);
// If the length is known, we can get the right substrings.
@@ -2045,13 +2247,13 @@ void CStringChecker::evalStrcmpCommon(CheckerContext &C, const CallExpr *CE,
// The strcmp function returns an integer greater than, equal to, or less
// than zero, [c11, p7.24.4.2].
if (compareRes == 0) {
- resultVal = svalBuilder.makeIntVal(compareRes, CE->getType());
+ resultVal = svalBuilder.makeIntVal(compareRes, Call.getResultType());
}
else {
- DefinedSVal zeroVal = svalBuilder.makeIntVal(0, CE->getType());
+ DefinedSVal zeroVal = svalBuilder.makeIntVal(0, Call.getResultType());
// Constrain strcmp's result range based on the result of StringRef's
// comparison methods.
- BinaryOperatorKind op = (compareRes == 1) ? BO_GT : BO_LT;
+ BinaryOperatorKind op = (compareRes > 0) ? BO_GT : BO_LT;
SVal compareWithZero =
svalBuilder.evalBinOp(state, op, resultVal, zeroVal,
svalBuilder.getConditionType());
@@ -2061,20 +2263,21 @@ void CStringChecker::evalStrcmpCommon(CheckerContext &C, const CallExpr *CE,
}
}
- state = state->BindExpr(CE, LCtx, resultVal);
+ state = state->BindExpr(Call.getOriginExpr(), LCtx, resultVal);
// Record this as a possible path.
C.addTransition(state);
}
-void CStringChecker::evalStrsep(CheckerContext &C, const CallExpr *CE) const {
- //char *strsep(char **stringp, const char *delim);
- // Sanity: does the search string parameter match the return type?
- SourceArgExpr SearchStrPtr = {CE->getArg(0), 0};
+void CStringChecker::evalStrsep(CheckerContext &C,
+ const CallEvent &Call) const {
+ // char *strsep(char **stringp, const char *delim);
+ // Verify whether the search string parameter matches the return type.
+ SourceArgExpr SearchStrPtr = {{Call.getArgExpr(0), 0}};
QualType CharPtrTy = SearchStrPtr.Expression->getType()->getPointeeType();
- if (CharPtrTy.isNull() ||
- CE->getType().getUnqualifiedType() != CharPtrTy.getUnqualifiedType())
+ if (CharPtrTy.isNull() || Call.getResultType().getUnqualifiedType() !=
+ CharPtrTy.getUnqualifiedType())
return;
CurrentFunctionDescription = "strsep()";
@@ -2089,7 +2292,7 @@ void CStringChecker::evalStrsep(CheckerContext &C, const CallExpr *CE) const {
return;
// Check that the delimiter string is non-null.
- AnyArgExpr DelimStr = {CE->getArg(1), 1};
+ AnyArgExpr DelimStr = {Call.getArgExpr(1), 1};
SVal DelimStrVal = State->getSVal(DelimStr.Expression, LCtx);
State = checkNonNull(C, State, DelimStr, DelimStrVal);
if (!State)
@@ -2097,48 +2300,49 @@ void CStringChecker::evalStrsep(CheckerContext &C, const CallExpr *CE) const {
SValBuilder &SVB = C.getSValBuilder();
SVal Result;
- if (Optional<Loc> SearchStrLoc = SearchStrVal.getAs<Loc>()) {
+ if (std::optional<Loc> SearchStrLoc = SearchStrVal.getAs<Loc>()) {
// Get the current value of the search string pointer, as a char*.
Result = State->getSVal(*SearchStrLoc, CharPtrTy);
// Invalidate the search string, representing the change of one delimiter
// character to NUL.
- State = InvalidateBuffer(C, State, SearchStrPtr.Expression, Result,
- /*IsSourceBuffer*/ false, nullptr);
+ // As the replacement never overflows, do not invalidate its super region.
+ State = invalidateDestinationBufferNeverOverflows(
+ C, State, SearchStrPtr.Expression, Result);
// Overwrite the search string pointer. The new value is either an address
// further along in the same string, or NULL if there are no more tokens.
- State = State->bindLoc(*SearchStrLoc,
- SVB.conjureSymbolVal(getTag(),
- CE,
- LCtx,
- CharPtrTy,
- C.blockCount()),
- LCtx);
+ State =
+ State->bindLoc(*SearchStrLoc,
+ SVB.conjureSymbolVal(getTag(), Call.getOriginExpr(),
+ LCtx, CharPtrTy, C.blockCount()),
+ LCtx);
} else {
assert(SearchStrVal.isUnknown());
// Conjure a symbolic value. It's the best we can do.
- Result = SVB.conjureSymbolVal(nullptr, CE, LCtx, C.blockCount());
+ Result = SVB.conjureSymbolVal(nullptr, Call.getOriginExpr(), LCtx,
+ C.blockCount());
}
// Set the return value, and finish.
- State = State->BindExpr(CE, LCtx, Result);
+ State = State->BindExpr(Call.getOriginExpr(), LCtx, Result);
C.addTransition(State);
}
// These should probably be moved into a C++ standard library checker.
-void CStringChecker::evalStdCopy(CheckerContext &C, const CallExpr *CE) const {
- evalStdCopyCommon(C, CE);
+void CStringChecker::evalStdCopy(CheckerContext &C,
+ const CallEvent &Call) const {
+ evalStdCopyCommon(C, Call);
}
void CStringChecker::evalStdCopyBackward(CheckerContext &C,
- const CallExpr *CE) const {
- evalStdCopyCommon(C, CE);
+ const CallEvent &Call) const {
+ evalStdCopyCommon(C, Call);
}
void CStringChecker::evalStdCopyCommon(CheckerContext &C,
- const CallExpr *CE) const {
- if (!CE->getArg(2)->getType()->isPointerType())
+ const CallEvent &Call) const {
+ if (!Call.getArgExpr(2)->getType()->isPointerType())
return;
ProgramStateRef State = C.getState();
@@ -2151,26 +2355,30 @@ void CStringChecker::evalStdCopyCommon(CheckerContext &C,
// _OutputIterator __result)
// Invalidate the destination buffer
- const Expr *Dst = CE->getArg(2);
+ const Expr *Dst = Call.getArgExpr(2);
SVal DstVal = State->getSVal(Dst, LCtx);
- State = InvalidateBuffer(C, State, Dst, DstVal, /*IsSource=*/false,
- /*Size=*/nullptr);
+ // FIXME: As we do not know how many items are copied, we also invalidate the
+ // super region containing the target location.
+ State =
+ invalidateDestinationBufferAlwaysEscapeSuperRegion(C, State, Dst, DstVal);
SValBuilder &SVB = C.getSValBuilder();
- SVal ResultVal = SVB.conjureSymbolVal(nullptr, CE, LCtx, C.blockCount());
- State = State->BindExpr(CE, LCtx, ResultVal);
+ SVal ResultVal =
+ SVB.conjureSymbolVal(nullptr, Call.getOriginExpr(), LCtx, C.blockCount());
+ State = State->BindExpr(Call.getOriginExpr(), LCtx, ResultVal);
C.addTransition(State);
}
-void CStringChecker::evalMemset(CheckerContext &C, const CallExpr *CE) const {
+void CStringChecker::evalMemset(CheckerContext &C,
+ const CallEvent &Call) const {
// void *memset(void *s, int c, size_t n);
CurrentFunctionDescription = "memory set function";
- DestinationArgExpr Buffer = {CE->getArg(0), 0};
- AnyArgExpr CharE = {CE->getArg(1), 1};
- SizeArgExpr Size = {CE->getArg(2), 2};
+ DestinationArgExpr Buffer = {{Call.getArgExpr(0), 0}};
+ AnyArgExpr CharE = {Call.getArgExpr(1), 1};
+ SizeArgExpr Size = {{Call.getArgExpr(2), 2}};
ProgramStateRef State = C.getState();
@@ -2188,7 +2396,7 @@ void CStringChecker::evalMemset(CheckerContext &C, const CallExpr *CE) const {
// If the size is zero, there won't be any actual memory access, so
// just bind the return value to the buffer and return.
if (ZeroSize && !NonZeroSize) {
- ZeroSize = ZeroSize->BindExpr(CE, LCtx, BufferPtrVal);
+ ZeroSize = ZeroSize->BindExpr(Call.getOriginExpr(), LCtx, BufferPtrVal);
C.addTransition(ZeroSize);
return;
}
@@ -2210,15 +2418,15 @@ void CStringChecker::evalMemset(CheckerContext &C, const CallExpr *CE) const {
Size.Expression, C, State))
return;
- State = State->BindExpr(CE, LCtx, BufferPtrVal);
+ State = State->BindExpr(Call.getOriginExpr(), LCtx, BufferPtrVal);
C.addTransition(State);
}
-void CStringChecker::evalBzero(CheckerContext &C, const CallExpr *CE) const {
+void CStringChecker::evalBzero(CheckerContext &C, const CallEvent &Call) const {
CurrentFunctionDescription = "memory clearance function";
- DestinationArgExpr Buffer = {CE->getArg(0), 0};
- SizeArgExpr Size = {CE->getArg(1), 1};
+ DestinationArgExpr Buffer = {{Call.getArgExpr(0), 0}};
+ SizeArgExpr Size = {{Call.getArgExpr(1), 1}};
SVal Zero = C.getSValBuilder().makeZeroVal(C.getASTContext().IntTy);
ProgramStateRef State = C.getState();
@@ -2257,6 +2465,57 @@ void CStringChecker::evalBzero(CheckerContext &C, const CallExpr *CE) const {
C.addTransition(State);
}
+void CStringChecker::evalSprintf(CheckerContext &C,
+ const CallEvent &Call) const {
+ CurrentFunctionDescription = "'sprintf'";
+ const auto *CE = cast<CallExpr>(Call.getOriginExpr());
+ bool IsBI = CE->getBuiltinCallee() == Builtin::BI__builtin___sprintf_chk;
+ evalSprintfCommon(C, Call, /* IsBounded */ false, IsBI);
+}
+
+void CStringChecker::evalSnprintf(CheckerContext &C,
+ const CallEvent &Call) const {
+ CurrentFunctionDescription = "'snprintf'";
+ const auto *CE = cast<CallExpr>(Call.getOriginExpr());
+ bool IsBI = CE->getBuiltinCallee() == Builtin::BI__builtin___snprintf_chk;
+ evalSprintfCommon(C, Call, /* IsBounded */ true, IsBI);
+}
+
+void CStringChecker::evalSprintfCommon(CheckerContext &C, const CallEvent &Call,
+ bool IsBounded, bool IsBuiltin) const {
+ ProgramStateRef State = C.getState();
+ const auto *CE = cast<CallExpr>(Call.getOriginExpr());
+ DestinationArgExpr Dest = {{Call.getArgExpr(0), 0}};
+
+ const auto NumParams = Call.parameters().size();
+ assert(CE->getNumArgs() >= NumParams);
+
+ const auto AllArguments =
+ llvm::make_range(CE->getArgs(), CE->getArgs() + CE->getNumArgs());
+ const auto VariadicArguments = drop_begin(enumerate(AllArguments), NumParams);
+
+ for (const auto &[ArgIdx, ArgExpr] : VariadicArguments) {
+ // We consider only string buffers
+ if (const QualType type = ArgExpr->getType();
+ !type->isAnyPointerType() ||
+ !type->getPointeeType()->isAnyCharacterType())
+ continue;
+ SourceArgExpr Source = {{ArgExpr, unsigned(ArgIdx)}};
+
+ // Ensure the buffers do not overlap.
+ SizeArgExpr SrcExprAsSizeDummy = {
+ {Source.Expression, Source.ArgumentIndex}};
+ State = CheckOverlap(
+ C, State,
+ (IsBounded ? SizeArgExpr{{Call.getArgExpr(1), 1}} : SrcExprAsSizeDummy),
+ Dest, Source);
+ if (!State)
+ return;
+ }
+
+ C.addTransition(State);
+}
+
//===----------------------------------------------------------------------===//
// The driver method, and other Checker callbacks.
//===----------------------------------------------------------------------===//
@@ -2271,11 +2530,10 @@ CStringChecker::FnCheck CStringChecker::identifyCall(const CallEvent &Call,
if (!FD)
return nullptr;
- if (Call.isCalled(StdCopy)) {
+ if (StdCopy.matches(Call))
return &CStringChecker::evalStdCopy;
- } else if (Call.isCalled(StdCopyBackward)) {
+ if (StdCopyBackward.matches(Call))
return &CStringChecker::evalStdCopyBackward;
- }
// Pro-actively check that argument types are safe to do arithmetic upon.
// We do not want to crash if someone accidentally passes a structure
@@ -2302,8 +2560,8 @@ bool CStringChecker::evalCall(const CallEvent &Call, CheckerContext &C) const {
return false;
// Check and evaluate the call.
- const auto *CE = cast<CallExpr>(Call.getOriginExpr());
- (this->*Callback)(C, CE);
+ assert(isa<CallExpr>(Call.getOriginExpr()));
+ Callback(this, C, Call);
// If the evaluate call resulted in no change, chain to the next eval call
// handler.
@@ -2364,9 +2622,7 @@ CStringChecker::checkRegionChanges(ProgramStateRef state,
llvm::SmallPtrSet<const MemRegion *, 32> SuperRegions;
// First build sets for the changed regions and their super-regions.
- for (ArrayRef<const MemRegion *>::iterator
- I = Regions.begin(), E = Regions.end(); I != E; ++I) {
- const MemRegion *MR = *I;
+ for (const MemRegion *MR : Regions) {
Invalidated.insert(MR);
SuperRegions.insert(MR);
@@ -2379,10 +2635,7 @@ CStringChecker::checkRegionChanges(ProgramStateRef state,
CStringLengthTy::Factory &F = state->get_context<CStringLength>();
// Then loop over the entries in the current state.
- for (CStringLengthTy::iterator I = Entries.begin(),
- E = Entries.end(); I != E; ++I) {
- const MemRegion *MR = I.getKey();
-
+ for (const MemRegion *MR : llvm::make_first_range(Entries)) {
// Is this entry for a super-region of a changed region?
if (SuperRegions.count(MR)) {
Entries = F.remove(Entries, MR);
@@ -2408,13 +2661,9 @@ void CStringChecker::checkLiveSymbols(ProgramStateRef state,
// Mark all symbols in our string length map as valid.
CStringLengthTy Entries = state->get<CStringLength>();
- for (CStringLengthTy::iterator I = Entries.begin(), E = Entries.end();
- I != E; ++I) {
- SVal Len = I.getData();
-
- for (SymExpr::symbol_iterator si = Len.symbol_begin(),
- se = Len.symbol_end(); si != se; ++si)
- SR.markInUse(*si);
+ for (SVal Len : llvm::make_second_range(Entries)) {
+ for (SymbolRef Sym : Len.symbols())
+ SR.markInUse(Sym);
}
}
@@ -2426,12 +2675,10 @@ void CStringChecker::checkDeadSymbols(SymbolReaper &SR,
return;
CStringLengthTy::Factory &F = state->get_context<CStringLength>();
- for (CStringLengthTy::iterator I = Entries.begin(), E = Entries.end();
- I != E; ++I) {
- SVal Len = I.getData();
+ for (auto [Reg, Len] : Entries) {
if (SymbolRef Sym = Len.getAsSymbol()) {
if (SR.isDead(Sym))
- Entries = F.remove(Entries, I.getKey());
+ Entries = F.remove(Entries, Reg);
}
}
@@ -2460,3 +2707,4 @@ REGISTER_CHECKER(CStringNullArg)
REGISTER_CHECKER(CStringOutOfBounds)
REGISTER_CHECKER(CStringBufferOverlap)
REGISTER_CHECKER(CStringNotNullTerm)
+REGISTER_CHECKER(CStringUninitializedRead)
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CXXDeleteChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CXXDeleteChecker.cpp
new file mode 100644
index 000000000000..b4dee1e300e8
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CXXDeleteChecker.cpp
@@ -0,0 +1,238 @@
+//=== CXXDeleteChecker.cpp -------------------------------------*- C++ -*--===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the following new checkers for C++ delete expressions:
+//
+// * DeleteWithNonVirtualDtorChecker
+// Defines a checker for the OOP52-CPP CERT rule: Do not delete a
+// polymorphic object without a virtual destructor.
+//
+// Diagnostic flags -Wnon-virtual-dtor and -Wdelete-non-virtual-dtor
+// report if an object with a virtual function but a non-virtual
+// destructor exists or is deleted, respectively.
+//
+// This check exceeds them by comparing the dynamic and static types of
+// the object at the point of destruction and only warns if it happens
+// through a pointer to a base type without a virtual destructor. The
+// check places a note at the last point where the conversion from
+// derived to base happened.
+//
+// * CXXArrayDeleteChecker
+// Defines a checker for the EXP51-CPP CERT rule: Do not delete an array
+// through a pointer of the incorrect type.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/CommonBugCategories.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class CXXDeleteChecker : public Checker<check::PreStmt<CXXDeleteExpr>> {
+protected:
+ class PtrCastVisitor : public BugReporterVisitor {
+ public:
+ void Profile(llvm::FoldingSetNodeID &ID) const override {
+ static int X = 0;
+ ID.AddPointer(&X);
+ }
+ PathDiagnosticPieceRef VisitNode(const ExplodedNode *N,
+ BugReporterContext &BRC,
+ PathSensitiveBugReport &BR) override;
+ };
+
+ virtual void
+ checkTypedDeleteExpr(const CXXDeleteExpr *DE, CheckerContext &C,
+ const TypedValueRegion *BaseClassRegion,
+ const SymbolicRegion *DerivedClassRegion) const = 0;
+
+public:
+ void checkPreStmt(const CXXDeleteExpr *DE, CheckerContext &C) const;
+};
+
+class DeleteWithNonVirtualDtorChecker : public CXXDeleteChecker {
+ const BugType BT{
+ this, "Destruction of a polymorphic object with no virtual destructor"};
+
+ void
+ checkTypedDeleteExpr(const CXXDeleteExpr *DE, CheckerContext &C,
+ const TypedValueRegion *BaseClassRegion,
+ const SymbolicRegion *DerivedClassRegion) const override;
+};
+
+class CXXArrayDeleteChecker : public CXXDeleteChecker {
+ const BugType BT{this,
+ "Deleting an array of polymorphic objects is undefined"};
+
+ void
+ checkTypedDeleteExpr(const CXXDeleteExpr *DE, CheckerContext &C,
+ const TypedValueRegion *BaseClassRegion,
+ const SymbolicRegion *DerivedClassRegion) const override;
+};
+} // namespace
+
+void CXXDeleteChecker::checkPreStmt(const CXXDeleteExpr *DE,
+ CheckerContext &C) const {
+ const Expr *DeletedObj = DE->getArgument();
+ const MemRegion *MR = C.getSVal(DeletedObj).getAsRegion();
+ if (!MR)
+ return;
+
+ OverloadedOperatorKind DeleteKind =
+ DE->getOperatorDelete()->getOverloadedOperator();
+
+ if (DeleteKind != OO_Delete && DeleteKind != OO_Array_Delete)
+ return;
+
+ const auto *BaseClassRegion = MR->getAs<TypedValueRegion>();
+ const auto *DerivedClassRegion = MR->getBaseRegion()->getAs<SymbolicRegion>();
+ if (!BaseClassRegion || !DerivedClassRegion)
+ return;
+
+ checkTypedDeleteExpr(DE, C, BaseClassRegion, DerivedClassRegion);
+}
+
+void DeleteWithNonVirtualDtorChecker::checkTypedDeleteExpr(
+ const CXXDeleteExpr *DE, CheckerContext &C,
+ const TypedValueRegion *BaseClassRegion,
+ const SymbolicRegion *DerivedClassRegion) const {
+ const auto *BaseClass = BaseClassRegion->getValueType()->getAsCXXRecordDecl();
+ const auto *DerivedClass =
+ DerivedClassRegion->getSymbol()->getType()->getPointeeCXXRecordDecl();
+ if (!BaseClass || !DerivedClass)
+ return;
+
+ if (!BaseClass->hasDefinition() || !DerivedClass->hasDefinition())
+ return;
+
+ if (BaseClass->getDestructor()->isVirtual())
+ return;
+
+ if (!DerivedClass->isDerivedFrom(BaseClass))
+ return;
+
+ ExplodedNode *N = C.generateNonFatalErrorNode();
+ if (!N)
+ return;
+ auto R = std::make_unique<PathSensitiveBugReport>(BT, BT.getDescription(), N);
+
+ // Mark region of problematic base class for later use in the BugVisitor.
+ R->markInteresting(BaseClassRegion);
+ R->addVisitor<PtrCastVisitor>();
+ C.emitReport(std::move(R));
+}
+
+void CXXArrayDeleteChecker::checkTypedDeleteExpr(
+ const CXXDeleteExpr *DE, CheckerContext &C,
+ const TypedValueRegion *BaseClassRegion,
+ const SymbolicRegion *DerivedClassRegion) const {
+ const auto *BaseClass = BaseClassRegion->getValueType()->getAsCXXRecordDecl();
+ const auto *DerivedClass =
+ DerivedClassRegion->getSymbol()->getType()->getPointeeCXXRecordDecl();
+ if (!BaseClass || !DerivedClass)
+ return;
+
+ if (!BaseClass->hasDefinition() || !DerivedClass->hasDefinition())
+ return;
+
+ if (DE->getOperatorDelete()->getOverloadedOperator() != OO_Array_Delete)
+ return;
+
+ if (!DerivedClass->isDerivedFrom(BaseClass))
+ return;
+
+ ExplodedNode *N = C.generateNonFatalErrorNode();
+ if (!N)
+ return;
+
+ SmallString<256> Buf;
+ llvm::raw_svector_ostream OS(Buf);
+
+ QualType SourceType = BaseClassRegion->getValueType();
+ QualType TargetType =
+ DerivedClassRegion->getSymbol()->getType()->getPointeeType();
+
+ OS << "Deleting an array of '" << TargetType.getAsString()
+ << "' objects as their base class '"
+ << SourceType.getAsString(C.getASTContext().getPrintingPolicy())
+ << "' is undefined";
+
+ auto R = std::make_unique<PathSensitiveBugReport>(BT, OS.str(), N);
+
+ // Mark region of problematic base class for later use in the BugVisitor.
+ R->markInteresting(BaseClassRegion);
+ R->addVisitor<PtrCastVisitor>();
+ C.emitReport(std::move(R));
+}
+
+PathDiagnosticPieceRef
+CXXDeleteChecker::PtrCastVisitor::VisitNode(const ExplodedNode *N,
+ BugReporterContext &BRC,
+ PathSensitiveBugReport &BR) {
+ const Stmt *S = N->getStmtForDiagnostics();
+ if (!S)
+ return nullptr;
+
+ const auto *CastE = dyn_cast<CastExpr>(S);
+ if (!CastE)
+ return nullptr;
+
+ // FIXME: This way of getting base types does not support reference types.
+ QualType SourceType = CastE->getSubExpr()->getType()->getPointeeType();
+ QualType TargetType = CastE->getType()->getPointeeType();
+
+ if (SourceType.isNull() || TargetType.isNull() || SourceType == TargetType)
+ return nullptr;
+
+ // Region associated with the current cast expression.
+ const MemRegion *M = N->getSVal(CastE).getAsRegion();
+ if (!M)
+ return nullptr;
+
+ // Check if target region was marked as problematic previously.
+ if (!BR.isInteresting(M))
+ return nullptr;
+
+ SmallString<256> Buf;
+ llvm::raw_svector_ostream OS(Buf);
+
+ OS << "Casting from '" << SourceType.getAsString() << "' to '"
+ << TargetType.getAsString() << "' here";
+
+ PathDiagnosticLocation Pos(S, BRC.getSourceManager(),
+ N->getLocationContext());
+ return std::make_shared<PathDiagnosticEventPiece>(Pos, OS.str(),
+ /*addPosRange=*/true);
+}
+
+void ento::registerCXXArrayDeleteChecker(CheckerManager &mgr) {
+ mgr.registerChecker<CXXArrayDeleteChecker>();
+}
+
+bool ento::shouldRegisterCXXArrayDeleteChecker(const CheckerManager &mgr) {
+ return true;
+}
+
+void ento::registerDeleteWithNonVirtualDtorChecker(CheckerManager &mgr) {
+ mgr.registerChecker<DeleteWithNonVirtualDtorChecker>();
+}
+
+bool ento::shouldRegisterDeleteWithNonVirtualDtorChecker(
+ const CheckerManager &mgr) {
+ return true;
+}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
index 3e46e2372516..f2e1f69c32cf 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
@@ -73,7 +73,7 @@ public:
CK_NumCheckKinds
};
- DefaultBool ChecksEnabled[CK_NumCheckKinds];
+ bool ChecksEnabled[CK_NumCheckKinds] = {false};
// The original core.CallAndMessage checker name. This should rather be an
// array, as seen in MallocChecker and CStringChecker.
CheckerNameRef OriginalName;
@@ -123,11 +123,10 @@ private:
void LazyInit_BT(const char *desc, std::unique_ptr<BugType> &BT) const {
if (!BT)
- BT.reset(new BuiltinBug(OriginalName, desc));
+ BT.reset(new BugType(OriginalName, desc));
}
- bool uninitRefOrPointer(CheckerContext &C, const SVal &V,
- SourceRange ArgRange, const Expr *ArgEx,
- std::unique_ptr<BugType> &BT,
+ bool uninitRefOrPointer(CheckerContext &C, SVal V, SourceRange ArgRange,
+ const Expr *ArgEx, std::unique_ptr<BugType> &BT,
const ParmVarDecl *ParamDecl, const char *BD,
int ArgumentNumber) const;
};
@@ -185,7 +184,7 @@ static void describeUninitializedArgumentInCall(const CallEvent &Call,
}
bool CallAndMessageChecker::uninitRefOrPointer(
- CheckerContext &C, const SVal &V, SourceRange ArgRange, const Expr *ArgEx,
+ CheckerContext &C, SVal V, SourceRange ArgRange, const Expr *ArgEx,
std::unique_ptr<BugType> &BT, const ParmVarDecl *ParamDecl, const char *BD,
int ArgumentNumber) const {
@@ -263,7 +262,7 @@ public:
if (Find(FR))
return true;
} else {
- const SVal &V = StoreMgr.getBinding(store, loc::MemRegionVal(FR));
+ SVal V = StoreMgr.getBinding(store, loc::MemRegionVal(FR));
if (V.isUndef())
return true;
}
@@ -379,7 +378,7 @@ ProgramStateRef CallAndMessageChecker::checkFunctionPointerCall(
return nullptr;
}
if (!BT_call_undef)
- BT_call_undef.reset(new BuiltinBug(
+ BT_call_undef.reset(new BugType(
OriginalName,
"Called function pointer is an uninitialized pointer value"));
emitBadCall(BT_call_undef.get(), C, Callee);
@@ -395,7 +394,7 @@ ProgramStateRef CallAndMessageChecker::checkFunctionPointerCall(
return nullptr;
}
if (!BT_call_null)
- BT_call_null.reset(new BuiltinBug(
+ BT_call_null.reset(new BugType(
OriginalName, "Called function pointer is null (null dereference)"));
emitBadCall(BT_call_null.get(), C, Callee);
return nullptr;
@@ -450,7 +449,7 @@ ProgramStateRef CallAndMessageChecker::checkCXXMethodCall(
return nullptr;
}
if (!BT_cxx_call_undef)
- BT_cxx_call_undef.reset(new BuiltinBug(
+ BT_cxx_call_undef.reset(new BugType(
OriginalName, "Called C++ object pointer is uninitialized"));
emitBadCall(BT_cxx_call_undef.get(), C, CC->getCXXThisExpr());
return nullptr;
@@ -466,7 +465,7 @@ ProgramStateRef CallAndMessageChecker::checkCXXMethodCall(
}
if (!BT_cxx_call_null)
BT_cxx_call_null.reset(
- new BuiltinBug(OriginalName, "Called C++ object pointer is null"));
+ new BugType(OriginalName, "Called C++ object pointer is null"));
emitBadCall(BT_cxx_call_null.get(), C, CC->getCXXThisExpr());
return nullptr;
}
@@ -495,13 +494,13 @@ CallAndMessageChecker::checkCXXDeallocation(const CXXDeallocatorCall *DC,
return nullptr;
if (!BT_cxx_delete_undef)
BT_cxx_delete_undef.reset(
- new BuiltinBug(OriginalName, "Uninitialized argument value"));
+ new BugType(OriginalName, "Uninitialized argument value"));
if (DE->isArrayFormAsWritten())
Desc = "Argument to 'delete[]' is uninitialized";
else
Desc = "Argument to 'delete' is uninitialized";
- BugType *BT = BT_cxx_delete_undef.get();
- auto R = std::make_unique<PathSensitiveBugReport>(*BT, Desc, N);
+ auto R =
+ std::make_unique<PathSensitiveBugReport>(*BT_cxx_delete_undef, Desc, N);
bugreporter::trackExpressionValue(N, DE, *R);
C.emitReport(std::move(R));
return nullptr;
@@ -585,21 +584,21 @@ void CallAndMessageChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
switch (msg.getMessageKind()) {
case OCM_Message:
if (!BT_msg_undef)
- BT_msg_undef.reset(new BuiltinBug(OriginalName,
- "Receiver in message expression "
- "is an uninitialized value"));
+ BT_msg_undef.reset(new BugType(OriginalName,
+ "Receiver in message expression "
+ "is an uninitialized value"));
BT = BT_msg_undef.get();
break;
case OCM_PropertyAccess:
if (!BT_objc_prop_undef)
- BT_objc_prop_undef.reset(new BuiltinBug(
+ BT_objc_prop_undef.reset(new BugType(
OriginalName,
"Property access on an uninitialized object pointer"));
BT = BT_objc_prop_undef.get();
break;
case OCM_Subscript:
if (!BT_objc_subscript_undef)
- BT_objc_subscript_undef.reset(new BuiltinBug(
+ BT_objc_subscript_undef.reset(new BugType(
OriginalName,
"Subscript access on an uninitialized object pointer"));
BT = BT_objc_subscript_undef.get();
@@ -634,8 +633,8 @@ void CallAndMessageChecker::emitNilReceiverBug(CheckerContext &C,
}
if (!BT_msg_ret)
- BT_msg_ret.reset(new BuiltinBug(OriginalName,
- "Receiver in message expression is 'nil'"));
+ BT_msg_ret.reset(
+ new BugType(OriginalName, "Receiver in message expression is 'nil'"));
const ObjCMessageExpr *ME = msg.getOriginExpr();
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
index 2d2e14de3f2b..a50772f881f7 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
@@ -24,7 +24,7 @@ using namespace ento;
namespace {
class CastSizeChecker : public Checker< check::PreStmt<CastExpr> > {
- mutable std::unique_ptr<BuiltinBug> BT;
+ const BugType BT{this, "Cast region with wrong size."};
public:
void checkPreStmt(const CastExpr *CE, CheckerContext &C) const;
@@ -131,12 +131,10 @@ void CastSizeChecker::checkPreStmt(const CastExpr *CE,CheckerContext &C) const {
return;
if (ExplodedNode *errorNode = C.generateErrorNode()) {
- if (!BT)
- BT.reset(new BuiltinBug(this, "Cast region with wrong size.",
- "Cast a region whose size is not a multiple"
- " of the destination type size."));
- auto R = std::make_unique<PathSensitiveBugReport>(*BT, BT->getDescription(),
- errorNode);
+ constexpr llvm::StringLiteral Msg =
+ "Cast a region whose size is not a multiple of the destination type "
+ "size.";
+ auto R = std::make_unique<PathSensitiveBugReport>(BT, Msg, errorNode);
R->addRange(CE->getSourceRange());
C.emitReport(std::move(R));
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CastValueChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CastValueChecker.cpp
index 131c1345af99..f02d20d45678 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CastValueChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CastValueChecker.cpp
@@ -20,10 +20,11 @@
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicType.h"
-#include "llvm/ADT/Optional.h"
+#include <optional>
#include <utility>
using namespace clang;
@@ -107,7 +108,7 @@ static const NoteTag *getNoteTag(CheckerContext &C,
bool CastSucceeds, bool IsKnownCast) {
std::string CastToName =
CastInfo ? CastInfo->to()->getAsCXXRecordDecl()->getNameAsString()
- : CastToTy->getPointeeCXXRecordDecl()->getNameAsString();
+ : CastToTy.getAsString();
Object = Object->IgnoreParenImpCasts();
return C.getNoteTag(
@@ -162,9 +163,9 @@ static const NoteTag *getNoteTag(CheckerContext &C,
bool First = true;
for (QualType CastToTy: CastToTyVec) {
std::string CastToName =
- CastToTy->getAsCXXRecordDecl() ?
- CastToTy->getAsCXXRecordDecl()->getNameAsString() :
- CastToTy->getPointeeCXXRecordDecl()->getNameAsString();
+ CastToTy->getAsCXXRecordDecl()
+ ? CastToTy->getAsCXXRecordDecl()->getNameAsString()
+ : CastToTy.getAsString();
Out << ' ' << ((CastToTyVec.size() == 1) ? "not" :
(First ? "neither" : "nor")) << " a '" << CastToName
<< '\'';
@@ -249,7 +250,7 @@ static void addCastTransition(const CallEvent &Call, DefinedOrUnknownSVal DV,
CastSucceeds);
SVal V = CastSucceeds ? C.getSValBuilder().evalCast(DV, CastToTy, CastFromTy)
- : C.getSValBuilder().makeNull();
+ : C.getSValBuilder().makeNullWithType(CastToTy);
C.addTransition(
State->BindExpr(Call.getOriginExpr(), C.getLocationContext(), V, false),
getNoteTag(C, CastInfo, CastToTy, Object, CastSucceeds, IsKnownCast));
@@ -358,7 +359,9 @@ static void evalNullParamNullReturn(const CallEvent &Call,
if (ProgramStateRef State = C.getState()->assume(DV, false))
C.addTransition(State->BindExpr(Call.getOriginExpr(),
C.getLocationContext(),
- C.getSValBuilder().makeNull(), false),
+ C.getSValBuilder().makeNullWithType(
+ Call.getOriginExpr()->getType()),
+ false),
C.getNoteTag("Assuming null pointer is passed into cast",
/*IsPrunable=*/true));
}
@@ -469,7 +472,7 @@ bool CastValueChecker::evalCall(const CallEvent &Call,
const CastCheck &Check = Lookup->first;
CallKind Kind = Lookup->second;
- Optional<DefinedOrUnknownSVal> DV;
+ std::optional<DefinedOrUnknownSVal> DV;
switch (Kind) {
case CallKind::Function: {
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
index 78b3c209ad6b..978bc0bb082f 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
@@ -45,6 +45,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
#include "llvm/Support/raw_ostream.h"
+#include <optional>
using namespace clang;
using namespace ento;
@@ -98,18 +99,23 @@ class ObjCDeallocChecker
check::PointerEscape,
check::PreStmt<ReturnStmt>> {
- mutable IdentifierInfo *NSObjectII, *SenTestCaseII, *XCTestCaseII,
- *Block_releaseII, *CIFilterII;
+ mutable const IdentifierInfo *NSObjectII = nullptr;
+ mutable const IdentifierInfo *SenTestCaseII = nullptr;
+ mutable const IdentifierInfo *XCTestCaseII = nullptr;
+ mutable const IdentifierInfo *Block_releaseII = nullptr;
+ mutable const IdentifierInfo *CIFilterII = nullptr;
- mutable Selector DeallocSel, ReleaseSel;
+ mutable Selector DeallocSel;
+ mutable Selector ReleaseSel;
- std::unique_ptr<BugType> MissingReleaseBugType;
- std::unique_ptr<BugType> ExtraReleaseBugType;
- std::unique_ptr<BugType> MistakenDeallocBugType;
+ const BugType MissingReleaseBugType{this, "Missing ivar release (leak)",
+ categories::MemoryRefCount};
+ const BugType ExtraReleaseBugType{this, "Extra ivar release",
+ categories::MemoryRefCount};
+ const BugType MistakenDeallocBugType{this, "Mistaken dealloc",
+ categories::MemoryRefCount};
public:
- ObjCDeallocChecker();
-
void checkASTDecl(const ObjCImplementationDecl *D, AnalysisManager& Mgr,
BugReporter &BR) const;
void checkBeginFunction(CheckerContext &Ctx) const;
@@ -282,11 +288,11 @@ void ObjCDeallocChecker::checkBeginFunction(
continue;
SVal LVal = State->getLValue(PropImpl->getPropertyIvarDecl(), SelfVal);
- Optional<Loc> LValLoc = LVal.getAs<Loc>();
+ std::optional<Loc> LValLoc = LVal.getAs<Loc>();
if (!LValLoc)
continue;
- SVal InitialVal = State->getSVal(LValLoc.getValue());
+ SVal InitialVal = State->getSVal(*LValLoc);
SymbolRef Symbol = InitialVal.getAsSymbol();
if (!Symbol || !isa<SymbolRegionValue>(Symbol))
continue;
@@ -320,7 +326,9 @@ ObjCDeallocChecker::getInstanceSymbolFromIvarSymbol(SymbolRef IvarSym) const {
if (!IvarRegion)
return nullptr;
- return IvarRegion->getSymbolicBase()->getSymbol();
+ const SymbolicRegion *SR = IvarRegion->getSymbolicBase();
+ assert(SR && "Symbolic base should not be nullptr");
+ return SR->getSymbol();
}
/// If we are in -dealloc or -dealloc is on the stack, handle the call if it is
@@ -576,7 +584,7 @@ void ObjCDeallocChecker::diagnoseMissingReleases(CheckerContext &C) const {
OS << " by a synthesized property but not released"
" before '[super dealloc]'";
- auto BR = std::make_unique<PathSensitiveBugReport>(*MissingReleaseBugType,
+ auto BR = std::make_unique<PathSensitiveBugReport>(MissingReleaseBugType,
OS.str(), ErrNode);
C.emitReport(std::move(BR));
}
@@ -698,7 +706,7 @@ bool ObjCDeallocChecker::diagnoseExtraRelease(SymbolRef ReleasedValue,
OS << " property but was released in 'dealloc'";
}
- auto BR = std::make_unique<PathSensitiveBugReport>(*ExtraReleaseBugType,
+ auto BR = std::make_unique<PathSensitiveBugReport>(ExtraReleaseBugType,
OS.str(), ErrNode);
BR->addRange(M.getOriginExpr()->getSourceRange());
@@ -740,7 +748,7 @@ bool ObjCDeallocChecker::diagnoseMistakenDealloc(SymbolRef DeallocedValue,
OS << "'" << *PropImpl->getPropertyIvarDecl()
<< "' should be released rather than deallocated";
- auto BR = std::make_unique<PathSensitiveBugReport>(*MistakenDeallocBugType,
+ auto BR = std::make_unique<PathSensitiveBugReport>(MistakenDeallocBugType,
OS.str(), ErrNode);
BR->addRange(M.getOriginExpr()->getSourceRange());
@@ -749,23 +757,6 @@ bool ObjCDeallocChecker::diagnoseMistakenDealloc(SymbolRef DeallocedValue,
return true;
}
-ObjCDeallocChecker::ObjCDeallocChecker()
- : NSObjectII(nullptr), SenTestCaseII(nullptr), XCTestCaseII(nullptr),
- CIFilterII(nullptr) {
-
- MissingReleaseBugType.reset(
- new BugType(this, "Missing ivar release (leak)",
- categories::MemoryRefCount));
-
- ExtraReleaseBugType.reset(
- new BugType(this, "Extra ivar release",
- categories::MemoryRefCount));
-
- MistakenDeallocBugType.reset(
- new BugType(this, "Mistaken dealloc",
- categories::MemoryRefCount));
-}
-
void ObjCDeallocChecker::initIdentifierInfoAndSelectors(
ASTContext &Ctx) const {
if (NSObjectII)
@@ -817,8 +808,8 @@ const ObjCPropertyDecl *ObjCDeallocChecker::findShadowedPropertyDecl(
IdentifierInfo *ID = PropDecl->getIdentifier();
DeclContext::lookup_result R = CatDecl->getClassInterface()->lookup(ID);
- for (DeclContext::lookup_iterator I = R.begin(), E = R.end(); I != E; ++I) {
- auto *ShadowedPropDecl = dyn_cast<ObjCPropertyDecl>(*I);
+ for (const NamedDecl *D : R) {
+ auto *ShadowedPropDecl = dyn_cast<ObjCPropertyDecl>(D);
if (!ShadowedPropDecl)
continue;
@@ -953,11 +944,11 @@ ObjCDeallocChecker::getValueReleasedByNillingOut(const ObjCMethodCall &M,
ProgramStateRef State = C.getState();
SVal LVal = State->getLValue(PropIvarDecl, ReceiverVal);
- Optional<Loc> LValLoc = LVal.getAs<Loc>();
+ std::optional<Loc> LValLoc = LVal.getAs<Loc>();
if (!LValLoc)
return nullptr;
- SVal CurrentValInIvar = State->getSVal(LValLoc.getValue());
+ SVal CurrentValInIvar = State->getSVal(*LValLoc);
return CurrentValInIvar.getAsSymbol();
}
@@ -1004,7 +995,7 @@ bool ObjCDeallocChecker::instanceDeallocIsOnStack(const CheckerContext &C,
return false;
}
-/// Returns true if the ID is a class in which which is known to have
+/// Returns true if the ID is a class in which is known to have
/// a separate teardown lifecycle. In this case, -dealloc warnings
/// about missing releases should be suppressed.
bool ObjCDeallocChecker::classHasSeparateTeardown(
@@ -1042,8 +1033,8 @@ bool ObjCDeallocChecker::isReleasedByCIFilterDealloc(
StringRef IvarName = PropImpl->getPropertyIvarDecl()->getName();
const char *ReleasePrefix = "input";
- if (!(PropName.startswith(ReleasePrefix) ||
- IvarName.startswith(ReleasePrefix))) {
+ if (!(PropName.starts_with(ReleasePrefix) ||
+ IvarName.starts_with(ReleasePrefix))) {
return false;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckObjCInstMethSignature.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckObjCInstMethSignature.cpp
index 175dfcef0df4..c8fe5c2ccf38 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckObjCInstMethSignature.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckObjCInstMethSignature.cpp
@@ -6,7 +6,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines a CheckObjCInstMethSignature, a flow-insenstive check
+// This file defines a CheckObjCInstMethSignature, a flow-insensitive check
// that determines if an Objective-C class interface incorrectly redefines
// the method signature in a subclass.
//
@@ -55,13 +55,11 @@ static void CompareReturnTypes(const ObjCMethodDecl *MethDerived,
<< *MethAncestor->getClassInterface()
<< "', defines the instance method '";
MethDerived->getSelector().print(os);
- os << "' whose return type is '"
- << ResDerived.getAsString()
+ os << "' whose return type is '" << ResDerived
<< "'. A method with the same name (same selector) is also defined in "
"class '"
- << *MethAncestor->getClassInterface()
- << "' and has a return type of '"
- << ResAncestor.getAsString()
+ << *MethAncestor->getClassInterface() << "' and has a return type of '"
+ << ResAncestor
<< "'. These two types are incompatible, and may result in undefined "
"behavior for clients of these classes.";
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp
index d06c87631bfb..17af1aebd6d2 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp
@@ -27,7 +27,6 @@ using namespace ento;
static bool isArc4RandomAvailable(const ASTContext &Ctx) {
const llvm::Triple &T = Ctx.getTargetInfo().getTriple();
return T.getVendor() == llvm::Triple::Apple ||
- T.getOS() == llvm::Triple::CloudABI ||
T.isOSFreeBSD() ||
T.isOSNetBSD() ||
T.isOSOpenBSD() ||
@@ -36,20 +35,20 @@ static bool isArc4RandomAvailable(const ASTContext &Ctx) {
namespace {
struct ChecksFilter {
- DefaultBool check_bcmp;
- DefaultBool check_bcopy;
- DefaultBool check_bzero;
- DefaultBool check_gets;
- DefaultBool check_getpw;
- DefaultBool check_mktemp;
- DefaultBool check_mkstemp;
- DefaultBool check_strcpy;
- DefaultBool check_DeprecatedOrUnsafeBufferHandling;
- DefaultBool check_rand;
- DefaultBool check_vfork;
- DefaultBool check_FloatLoopCounter;
- DefaultBool check_UncheckedReturn;
- DefaultBool check_decodeValueOfObjCType;
+ bool check_bcmp = false;
+ bool check_bcopy = false;
+ bool check_bzero = false;
+ bool check_gets = false;
+ bool check_getpw = false;
+ bool check_mktemp = false;
+ bool check_mkstemp = false;
+ bool check_strcpy = false;
+ bool check_DeprecatedOrUnsafeBufferHandling = false;
+ bool check_rand = false;
+ bool check_vfork = false;
+ bool check_FloatLoopCounter = false;
+ bool check_UncheckedReturn = false;
+ bool check_decodeValueOfObjCType = false;
CheckerNameRef checkName_bcmp;
CheckerNameRef checkName_bcopy;
@@ -141,42 +140,42 @@ void WalkAST::VisitCallExpr(CallExpr *CE) {
if (!II) // if no identifier, not a simple C function
return;
StringRef Name = II->getName();
- if (Name.startswith("__builtin_"))
- Name = Name.substr(10);
+ Name.consume_front("__builtin_");
// Set the evaluation function by switching on the callee name.
- FnCheck evalFunction = llvm::StringSwitch<FnCheck>(Name)
- .Case("bcmp", &WalkAST::checkCall_bcmp)
- .Case("bcopy", &WalkAST::checkCall_bcopy)
- .Case("bzero", &WalkAST::checkCall_bzero)
- .Case("gets", &WalkAST::checkCall_gets)
- .Case("getpw", &WalkAST::checkCall_getpw)
- .Case("mktemp", &WalkAST::checkCall_mktemp)
- .Case("mkstemp", &WalkAST::checkCall_mkstemp)
- .Case("mkdtemp", &WalkAST::checkCall_mkstemp)
- .Case("mkstemps", &WalkAST::checkCall_mkstemp)
- .Cases("strcpy", "__strcpy_chk", &WalkAST::checkCall_strcpy)
- .Cases("strcat", "__strcat_chk", &WalkAST::checkCall_strcat)
- .Cases("sprintf", "vsprintf", "scanf", "wscanf", "fscanf", "fwscanf",
- "vscanf", "vwscanf", "vfscanf", "vfwscanf",
- &WalkAST::checkDeprecatedOrUnsafeBufferHandling)
- .Cases("sscanf", "swscanf", "vsscanf", "vswscanf", "swprintf",
- "snprintf", "vswprintf", "vsnprintf", "memcpy", "memmove",
- &WalkAST::checkDeprecatedOrUnsafeBufferHandling)
- .Cases("strncpy", "strncat", "memset",
- &WalkAST::checkDeprecatedOrUnsafeBufferHandling)
- .Case("drand48", &WalkAST::checkCall_rand)
- .Case("erand48", &WalkAST::checkCall_rand)
- .Case("jrand48", &WalkAST::checkCall_rand)
- .Case("lrand48", &WalkAST::checkCall_rand)
- .Case("mrand48", &WalkAST::checkCall_rand)
- .Case("nrand48", &WalkAST::checkCall_rand)
- .Case("lcong48", &WalkAST::checkCall_rand)
- .Case("rand", &WalkAST::checkCall_rand)
- .Case("rand_r", &WalkAST::checkCall_rand)
- .Case("random", &WalkAST::checkCall_random)
- .Case("vfork", &WalkAST::checkCall_vfork)
- .Default(nullptr);
+ FnCheck evalFunction =
+ llvm::StringSwitch<FnCheck>(Name)
+ .Case("bcmp", &WalkAST::checkCall_bcmp)
+ .Case("bcopy", &WalkAST::checkCall_bcopy)
+ .Case("bzero", &WalkAST::checkCall_bzero)
+ .Case("gets", &WalkAST::checkCall_gets)
+ .Case("getpw", &WalkAST::checkCall_getpw)
+ .Case("mktemp", &WalkAST::checkCall_mktemp)
+ .Case("mkstemp", &WalkAST::checkCall_mkstemp)
+ .Case("mkdtemp", &WalkAST::checkCall_mkstemp)
+ .Case("mkstemps", &WalkAST::checkCall_mkstemp)
+ .Cases("strcpy", "__strcpy_chk", &WalkAST::checkCall_strcpy)
+ .Cases("strcat", "__strcat_chk", &WalkAST::checkCall_strcat)
+ .Cases("sprintf", "vsprintf", "scanf", "wscanf", "fscanf", "fwscanf",
+ "vscanf", "vwscanf", "vfscanf", "vfwscanf",
+ &WalkAST::checkDeprecatedOrUnsafeBufferHandling)
+ .Cases("sscanf", "swscanf", "vsscanf", "vswscanf", "swprintf",
+ "snprintf", "vswprintf", "vsnprintf", "memcpy", "memmove",
+ &WalkAST::checkDeprecatedOrUnsafeBufferHandling)
+ .Cases("strncpy", "strncat", "memset", "fprintf",
+ &WalkAST::checkDeprecatedOrUnsafeBufferHandling)
+ .Case("drand48", &WalkAST::checkCall_rand)
+ .Case("erand48", &WalkAST::checkCall_rand)
+ .Case("jrand48", &WalkAST::checkCall_rand)
+ .Case("lrand48", &WalkAST::checkCall_rand)
+ .Case("mrand48", &WalkAST::checkCall_rand)
+ .Case("nrand48", &WalkAST::checkCall_rand)
+ .Case("lcong48", &WalkAST::checkCall_rand)
+ .Case("rand", &WalkAST::checkCall_rand)
+ .Case("rand_r", &WalkAST::checkCall_rand)
+ .Case("random", &WalkAST::checkCall_random)
+ .Case("vfork", &WalkAST::checkCall_vfork)
+ .Default(nullptr);
// If the callee isn't defined, it is not of security concern.
// Check and evaluate the call.
@@ -219,7 +218,6 @@ void WalkAST::VisitForStmt(ForStmt *FS) {
//===----------------------------------------------------------------------===//
// Check: floating point variable used as loop counter.
-// Originally: <rdar://problem/6336718>
// Implements: CERT security coding advisory FLP-30.
//===----------------------------------------------------------------------===//
@@ -325,7 +323,7 @@ void WalkAST::checkLoopConditionForFloat(const ForStmt *FS) {
llvm::raw_svector_ostream os(sbuf);
os << "Variable '" << drCond->getDecl()->getName()
- << "' with floating point type '" << drCond->getType().getAsString()
+ << "' with floating point type '" << drCond->getType()
<< "' should not be used as a loop counter";
ranges.push_back(drCond->getSourceRange());
@@ -467,8 +465,8 @@ void WalkAST::checkCall_bzero(const CallExpr *CE, const FunctionDecl *FD) {
//===----------------------------------------------------------------------===//
-// Check: Any use of 'gets' is insecure.
-// Originally: <rdar://problem/6335715>
+// Check: Any use of 'gets' is insecure. Most man pages literally says this.
+//
// Implements (part of): 300-BSI (buildsecurityin.us-cert.gov)
// CWE-242: Use of Inherently Dangerous Function
//===----------------------------------------------------------------------===//
@@ -739,10 +737,10 @@ void WalkAST::checkCall_strcat(const CallExpr *CE, const FunctionDecl *FD) {
// Check: Any use of 'sprintf', 'vsprintf', 'scanf', 'wscanf', 'fscanf',
// 'fwscanf', 'vscanf', 'vwscanf', 'vfscanf', 'vfwscanf', 'sscanf',
// 'swscanf', 'vsscanf', 'vswscanf', 'swprintf', 'snprintf', 'vswprintf',
-// 'vsnprintf', 'memcpy', 'memmove', 'strncpy', 'strncat', 'memset'
-// is deprecated since C11.
+// 'vsnprintf', 'memcpy', 'memmove', 'strncpy', 'strncat', 'memset',
+// 'fprintf' is deprecated since C11.
//
-// Use of 'sprintf', 'vsprintf', 'scanf', 'wscanf','fscanf',
+// Use of 'sprintf', 'fprintf', 'vsprintf', 'scanf', 'wscanf', 'fscanf',
// 'fwscanf', 'vscanf', 'vwscanf', 'vfscanf', 'vfwscanf', 'sscanf',
// 'swscanf', 'vsscanf', 'vswscanf' without buffer limitations
// is insecure.
@@ -764,14 +762,14 @@ void WalkAST::checkDeprecatedOrUnsafeBufferHandling(const CallExpr *CE,
enum { DEPR_ONLY = -1, UNKNOWN_CALL = -2 };
StringRef Name = FD->getIdentifier()->getName();
- if (Name.startswith("__builtin_"))
- Name = Name.substr(10);
+ Name.consume_front("__builtin_");
int ArgIndex =
llvm::StringSwitch<int>(Name)
.Cases("scanf", "wscanf", "vscanf", "vwscanf", 0)
- .Cases("sprintf", "vsprintf", "fscanf", "fwscanf", "vfscanf",
- "vfwscanf", "sscanf", "swscanf", "vsscanf", "vswscanf", 1)
+ .Cases("fscanf", "fwscanf", "vfscanf", "vfwscanf", "sscanf",
+ "swscanf", "vsscanf", "vswscanf", 1)
+ .Cases("sprintf", "vsprintf", "fprintf", 1)
.Cases("swprintf", "snprintf", "vswprintf", "vsnprintf", "memcpy",
"memmove", "memset", "strncpy", "strncat", DEPR_ONLY)
.Default(UNKNOWN_CALL);
@@ -785,9 +783,8 @@ void WalkAST::checkDeprecatedOrUnsafeBufferHandling(const CallExpr *CE,
// real flow analysis.
auto FormatString =
dyn_cast<StringLiteral>(CE->getArg(ArgIndex)->IgnoreParenImpCasts());
- if (FormatString &&
- FormatString->getString().find("%s") == StringRef::npos &&
- FormatString->getString().find("%[") == StringRef::npos)
+ if (FormatString && !FormatString->getString().contains("%s") &&
+ !FormatString->getString().contains("%["))
BoundsProvided = true;
}
@@ -848,8 +845,13 @@ bool WalkAST::checkCall_strCommon(const CallExpr *CE, const FunctionDecl *FD) {
}
//===----------------------------------------------------------------------===//
-// Check: Linear congruent random number generators should not be used
-// Originally: <rdar://problem/63371000>
+// Check: Linear congruent random number generators should not be used,
+// i.e. rand(), random().
+//
+// E. Bach, "Efficient prediction of Marsaglia-Zaman random number generators,"
+// in IEEE Transactions on Information Theory, vol. 44, no. 3, pp. 1253-1257,
+// May 1998, https://doi.org/10.1109/18.669305
+//
// CWE-338: Use of cryptographically weak prng
//===----------------------------------------------------------------------===//
@@ -891,11 +893,7 @@ void WalkAST::checkCall_rand(const CallExpr *CE, const FunctionDecl *FD) {
CE->getCallee()->getSourceRange());
}
-//===----------------------------------------------------------------------===//
-// Check: 'random' should not be used
-// Originally: <rdar://problem/63371000>
-//===----------------------------------------------------------------------===//
-
+// See justification for rand().
void WalkAST::checkCall_random(const CallExpr *CE, const FunctionDecl *FD) {
if (!CheckRand || !filter.check_rand)
return;
@@ -975,6 +973,8 @@ void WalkAST::checkMsg_decodeValueOfObjCType(const ObjCMessageExpr *ME) {
if (VT < VersionTuple(11, 0))
return;
break;
+ case llvm::Triple::XROS:
+ break;
default:
return;
}
@@ -991,8 +991,18 @@ void WalkAST::checkMsg_decodeValueOfObjCType(const ObjCMessageExpr *ME) {
}
//===----------------------------------------------------------------------===//
-// Check: Should check whether privileges are dropped successfully.
-// Originally: <rdar://problem/6337132>
+// Check: The caller should always verify that the privileges
+// were dropped successfully.
+//
+// Some library functions, like setuid() and setgid(), should always be used
+// with a check of the return value to verify that the function completed
+// successfully. If the drop fails, the software will continue to run
+// with the raised privileges, which might provide additional access
+// to unprivileged users.
+//
+// (Note that this check predates __attribute__((warn_unused_result)).
+// Do we still need it now that we have a compiler warning for this?
+// Are these standard functions already annotated this way?)
//===----------------------------------------------------------------------===//
void WalkAST::checkUncheckedReturnValue(CallExpr *CE) {
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp
index fd53c04f4bbf..be7be15022d3 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp
@@ -14,6 +14,7 @@
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
@@ -40,9 +41,9 @@ bool isRootChanged(intptr_t k) { return k == ROOT_CHANGED; }
// bug<--foo()-- JAIL_ENTERED<--foo()--
class ChrootChecker : public Checker<eval::Call, check::PreCall> {
// This bug refers to possibly break out of a chroot() jail.
- mutable std::unique_ptr<BuiltinBug> BT_BreakJail;
+ const BugType BT_BreakJail{this, "Break out of jail"};
- const CallDescription Chroot{"chroot", 1}, Chdir{"chdir", 1};
+ const CallDescription Chroot{{"chroot"}, 1}, Chdir{{"chdir"}, 1};
public:
ChrootChecker() {}
@@ -63,11 +64,11 @@ private:
} // end anonymous namespace
bool ChrootChecker::evalCall(const CallEvent &Call, CheckerContext &C) const {
- if (Call.isCalled(Chroot)) {
+ if (Chroot.matches(Call)) {
evalChroot(Call, C);
return true;
}
- if (Call.isCalled(Chdir)) {
+ if (Chdir.matches(Call)) {
evalChdir(Call, C);
return true;
}
@@ -115,7 +116,7 @@ void ChrootChecker::evalChdir(const CallEvent &Call, CheckerContext &C) const {
void ChrootChecker::checkPreCall(const CallEvent &Call,
CheckerContext &C) const {
// Ignore chroot and chdir.
- if (Call.isCalled(Chroot) || Call.isCalled(Chdir))
+ if (matchesAny(Call, Chroot, Chdir))
return;
// If jail state is ROOT_CHANGED, generate BugReport.
@@ -123,12 +124,10 @@ void ChrootChecker::checkPreCall(const CallEvent &Call,
if (k)
if (isRootChanged((intptr_t) *k))
if (ExplodedNode *N = C.generateNonFatalErrorNode()) {
- if (!BT_BreakJail)
- BT_BreakJail.reset(new BuiltinBug(
- this, "Break out of jail", "No call of chdir(\"/\") immediately "
- "after chroot"));
- C.emitReport(std::make_unique<PathSensitiveBugReport>(
- *BT_BreakJail, BT_BreakJail->getDescription(), N));
+ constexpr llvm::StringLiteral Msg =
+ "No call of chdir(\"/\") immediately after chroot";
+ C.emitReport(
+ std::make_unique<PathSensitiveBugReport>(BT_BreakJail, Msg, N));
}
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CloneChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CloneChecker.cpp
index 7968aed85e1b..6692a45a09f7 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CloneChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CloneChecker.cpp
@@ -30,12 +30,13 @@ class CloneChecker
public:
// Checker options.
int MinComplexity;
- bool ReportNormalClones;
+ bool ReportNormalClones = false;
StringRef IgnoredFilesPattern;
private:
mutable CloneDetector Detector;
- mutable std::unique_ptr<BugType> BT_Exact, BT_Suspicious;
+ const BugType BT_Exact{this, "Exact code clone", "Code clone"};
+ const BugType BT_Suspicious{this, "Suspicious code clone", "Code clone"};
public:
void checkASTCodeBody(const Decl *D, AnalysisManager &Mgr,
@@ -107,15 +108,11 @@ static PathDiagnosticLocation makeLocation(const StmtSequence &S,
void CloneChecker::reportClones(
BugReporter &BR, AnalysisManager &Mgr,
std::vector<CloneDetector::CloneGroup> &CloneGroups) const {
-
- if (!BT_Exact)
- BT_Exact.reset(new BugType(this, "Exact code clone", "Code clone"));
-
for (const CloneDetector::CloneGroup &Group : CloneGroups) {
// We group the clones by printing the first as a warning and all others
// as a note.
auto R = std::make_unique<BasicBugReport>(
- *BT_Exact, "Duplicate code detected", makeLocation(Group.front(), Mgr));
+ BT_Exact, "Duplicate code detected", makeLocation(Group.front(), Mgr));
R->addRange(Group.front().getSourceRange());
for (unsigned i = 1; i < Group.size(); ++i)
@@ -154,10 +151,6 @@ void CloneChecker::reportSuspiciousClones(
}
}
- if (!BT_Suspicious)
- BT_Suspicious.reset(
- new BugType(this, "Suspicious code clone", "Code clone"));
-
ASTContext &ACtx = BR.getContext();
SourceManager &SM = ACtx.getSourceManager();
AnalysisDeclContext *ADC =
@@ -170,7 +163,7 @@ void CloneChecker::reportSuspiciousClones(
// Think how to perform more accurate suggestions?
auto R = std::make_unique<BasicBugReport>(
- *BT_Suspicious,
+ BT_Suspicious,
"Potential copy-paste error; did you really mean to use '" +
Pair.FirstCloneInfo.Variable->getNameAsString() + "' here?",
PathDiagnosticLocation::createBegin(Pair.FirstCloneInfo.Mention, SM,
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ContainerModeling.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ContainerModeling.cpp
index 1a7f0d5ab74c..65a2ec4076fd 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ContainerModeling.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ContainerModeling.cpp
@@ -10,11 +10,12 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicType.h"
@@ -71,42 +72,27 @@ public:
SVal) const;
CallDescriptionMap<NoItParamFn> NoIterParamFunctions = {
- {{0, "clear", 0},
- &ContainerModeling::handleClear},
- {{0, "assign", 2},
- &ContainerModeling::handleAssign},
- {{0, "push_back", 1},
- &ContainerModeling::handlePushBack},
- {{0, "emplace_back", 1},
- &ContainerModeling::handlePushBack},
- {{0, "pop_back", 0},
- &ContainerModeling::handlePopBack},
- {{0, "push_front", 1},
- &ContainerModeling::handlePushFront},
- {{0, "emplace_front", 1},
- &ContainerModeling::handlePushFront},
- {{0, "pop_front", 0},
- &ContainerModeling::handlePopFront},
+ {{{"clear"}, 0}, &ContainerModeling::handleClear},
+ {{{"assign"}, 2}, &ContainerModeling::handleAssign},
+ {{{"push_back"}, 1}, &ContainerModeling::handlePushBack},
+ {{{"emplace_back"}, 1}, &ContainerModeling::handlePushBack},
+ {{{"pop_back"}, 0}, &ContainerModeling::handlePopBack},
+ {{{"push_front"}, 1}, &ContainerModeling::handlePushFront},
+ {{{"emplace_front"}, 1}, &ContainerModeling::handlePushFront},
+ {{{"pop_front"}, 0}, &ContainerModeling::handlePopFront},
};
-
+
CallDescriptionMap<OneItParamFn> OneIterParamFunctions = {
- {{0, "insert", 2},
- &ContainerModeling::handleInsert},
- {{0, "emplace", 2},
- &ContainerModeling::handleInsert},
- {{0, "erase", 1},
- &ContainerModeling::handleErase},
- {{0, "erase_after", 1},
- &ContainerModeling::handleEraseAfter},
+ {{{"insert"}, 2}, &ContainerModeling::handleInsert},
+ {{{"emplace"}, 2}, &ContainerModeling::handleInsert},
+ {{{"erase"}, 1}, &ContainerModeling::handleErase},
+ {{{"erase_after"}, 1}, &ContainerModeling::handleEraseAfter},
};
-
+
CallDescriptionMap<TwoItParamFn> TwoIterParamFunctions = {
- {{0, "erase", 2},
- &ContainerModeling::handleErase},
- {{0, "erase_after", 2},
- &ContainerModeling::handleEraseAfter},
+ {{{"erase"}, 2}, &ContainerModeling::handleErase},
+ {{{"erase_after"}, 2}, &ContainerModeling::handleEraseAfter},
};
-
};
bool isBeginCall(const FunctionDecl *Func);
@@ -241,7 +227,7 @@ void ContainerModeling::checkDeadSymbols(SymbolReaper &SR,
CheckerContext &C) const {
// Cleanup
auto State = C.getState();
-
+
auto ContMap = State->get<ContainerMap>();
for (const auto &Cont : ContMap) {
if (!SR.isLiveRegion(Cont.first)) {
@@ -763,14 +749,14 @@ bool isBeginCall(const FunctionDecl *Func) {
const auto *IdInfo = Func->getIdentifier();
if (!IdInfo)
return false;
- return IdInfo->getName().endswith_insensitive("begin");
+ return IdInfo->getName().ends_with_insensitive("begin");
}
bool isEndCall(const FunctionDecl *Func) {
const auto *IdInfo = Func->getIdentifier();
if (!IdInfo)
return false;
- return IdInfo->getName().endswith_insensitive("end");
+ return IdInfo->getName().ends_with_insensitive("end");
}
const CXXRecordDecl *getCXXRecordDecl(ProgramStateRef State,
@@ -1035,7 +1021,7 @@ SymbolRef rebaseSymbol(ProgramStateRef State, SValBuilder &SVB,
SymbolRef NewSym) {
auto &SymMgr = SVB.getSymbolManager();
auto Diff = SVB.evalBinOpNN(State, BO_Sub, nonloc::SymbolVal(OrigExpr),
- nonloc::SymbolVal(OldExpr),
+ nonloc::SymbolVal(OldExpr),
SymMgr.getType(OrigExpr));
const auto DiffInt = Diff.getAs<nonloc::ConcreteInt>();
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ConversionChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ConversionChecker.cpp
index 4216a6883119..eca8d3cc0722 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ConversionChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ConversionChecker.cpp
@@ -42,22 +42,22 @@ public:
void checkPreStmt(const ImplicitCastExpr *Cast, CheckerContext &C) const;
private:
- mutable std::unique_ptr<BuiltinBug> BT;
+ const BugType BT{this, "Conversion"};
bool isLossOfPrecision(const ImplicitCastExpr *Cast, QualType DestType,
CheckerContext &C) const;
bool isLossOfSign(const ImplicitCastExpr *Cast, CheckerContext &C) const;
- void reportBug(ExplodedNode *N, CheckerContext &C, const char Msg[]) const;
+ void reportBug(ExplodedNode *N, const Expr *E, CheckerContext &C,
+ const char Msg[]) const;
};
}
void ConversionChecker::checkPreStmt(const ImplicitCastExpr *Cast,
CheckerContext &C) const {
- // TODO: For now we only warn about DeclRefExpr, to avoid noise. Warn for
- // calculations also.
- if (!isa<DeclRefExpr>(Cast->IgnoreParenImpCasts()))
+ // Don't warn for implicit conversions to bool
+ if (Cast->getType()->isBooleanType())
return;
// Don't warn for loss of sign/precision in macros.
@@ -69,6 +69,9 @@ void ConversionChecker::checkPreStmt(const ImplicitCastExpr *Cast,
const Stmt *Parent = PM.getParent(Cast);
if (!Parent)
return;
+ // Dont warn if this is part of an explicit cast
+ if (isa<ExplicitCastExpr>(Parent))
+ return;
bool LossOfSign = false;
bool LossOfPrecision = false;
@@ -77,8 +80,10 @@ void ConversionChecker::checkPreStmt(const ImplicitCastExpr *Cast,
if (const auto *B = dyn_cast<BinaryOperator>(Parent)) {
BinaryOperator::Opcode Opc = B->getOpcode();
if (Opc == BO_Assign) {
- LossOfSign = isLossOfSign(Cast, C);
- LossOfPrecision = isLossOfPrecision(Cast, Cast->getType(), C);
+ if (!Cast->IgnoreParenImpCasts()->isEvaluatable(C.getASTContext())) {
+ LossOfSign = isLossOfSign(Cast, C);
+ LossOfPrecision = isLossOfPrecision(Cast, Cast->getType(), C);
+ }
} else if (Opc == BO_AddAssign || Opc == BO_SubAssign) {
// No loss of sign.
LossOfPrecision = isLossOfPrecision(Cast, B->getLHS()->getType(), C);
@@ -97,7 +102,12 @@ void ConversionChecker::checkPreStmt(const ImplicitCastExpr *Cast,
} else if (B->isRelationalOp() || B->isMultiplicativeOp()) {
LossOfSign = isLossOfSign(Cast, C);
}
- } else if (isa<DeclStmt>(Parent)) {
+ } else if (isa<DeclStmt, ReturnStmt>(Parent)) {
+ if (!Cast->IgnoreParenImpCasts()->isEvaluatable(C.getASTContext())) {
+ LossOfSign = isLossOfSign(Cast, C);
+ LossOfPrecision = isLossOfPrecision(Cast, Cast->getType(), C);
+ }
+ } else {
LossOfSign = isLossOfSign(Cast, C);
LossOfPrecision = isLossOfPrecision(Cast, Cast->getType(), C);
}
@@ -108,20 +118,17 @@ void ConversionChecker::checkPreStmt(const ImplicitCastExpr *Cast,
if (!N)
return;
if (LossOfSign)
- reportBug(N, C, "Loss of sign in implicit conversion");
+ reportBug(N, Cast, C, "Loss of sign in implicit conversion");
if (LossOfPrecision)
- reportBug(N, C, "Loss of precision in implicit conversion");
+ reportBug(N, Cast, C, "Loss of precision in implicit conversion");
}
}
-void ConversionChecker::reportBug(ExplodedNode *N, CheckerContext &C,
- const char Msg[]) const {
- if (!BT)
- BT.reset(
- new BuiltinBug(this, "Conversion", "Possible loss of sign/precision."));
-
+void ConversionChecker::reportBug(ExplodedNode *N, const Expr *E,
+ CheckerContext &C, const char Msg[]) const {
// Generate a report for this bug.
- auto R = std::make_unique<PathSensitiveBugReport>(*BT, Msg, N);
+ auto R = std::make_unique<PathSensitiveBugReport>(BT, Msg, N);
+ bugreporter::trackExpressionValue(N, E, *R);
C.emitReport(std::move(R));
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp
index 8070d869f678..86f446fc411c 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp
@@ -38,17 +38,17 @@ public:
llvm::DenseSet<const VarDecl *> &S;
bool TraverseObjCAtFinallyStmt(ObjCAtFinallyStmt *S) {
- SaveAndRestore<bool> inFinally(inEH, true);
+ SaveAndRestore inFinally(inEH, true);
return ::RecursiveASTVisitor<EHCodeVisitor>::TraverseObjCAtFinallyStmt(S);
}
bool TraverseObjCAtCatchStmt(ObjCAtCatchStmt *S) {
- SaveAndRestore<bool> inCatch(inEH, true);
+ SaveAndRestore inCatch(inEH, true);
return ::RecursiveASTVisitor<EHCodeVisitor>::TraverseObjCAtCatchStmt(S);
}
bool TraverseCXXCatchStmt(CXXCatchStmt *S) {
- SaveAndRestore<bool> inCatch(inEH, true);
+ SaveAndRestore inCatch(inEH, true);
return TraverseStmt(S->getHandlerBlock());
}
@@ -93,9 +93,9 @@ void ReachableCode::computeReachableBlocks() {
if (isReachable)
continue;
isReachable = true;
- for (CFGBlock::const_succ_iterator i = block->succ_begin(),
- e = block->succ_end(); i != e; ++i)
- if (const CFGBlock *succ = *i)
+
+ for (const CFGBlock *succ : block->succs())
+ if (succ)
worklist.push_back(succ);
}
}
@@ -103,15 +103,12 @@ void ReachableCode::computeReachableBlocks() {
static const Expr *
LookThroughTransitiveAssignmentsAndCommaOperators(const Expr *Ex) {
while (Ex) {
- const BinaryOperator *BO =
- dyn_cast<BinaryOperator>(Ex->IgnoreParenCasts());
+ Ex = Ex->IgnoreParenCasts();
+ const BinaryOperator *BO = dyn_cast<BinaryOperator>(Ex);
if (!BO)
break;
- if (BO->getOpcode() == BO_Assign) {
- Ex = BO->getRHS();
- continue;
- }
- if (BO->getOpcode() == BO_Comma) {
+ BinaryOperatorKind Op = BO->getOpcode();
+ if (Op == BO_Assign || Op == BO_Comma) {
Ex = BO->getRHS();
continue;
}
@@ -186,7 +183,7 @@ public:
// Files autogenerated by DriverKit IIG contain some dead stores that
// we don't want to report.
- if (Data.startswith("/* iig"))
+ if (Data.starts_with("/* iig"))
return true;
return false;
@@ -243,7 +240,7 @@ public:
case DeadIncrement:
BugType = "Dead increment";
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case Standard:
if (!BugType) BugType = "Dead assignment";
os << "Value stored to '" << *V << "' is never read";
@@ -334,8 +331,7 @@ public:
// Special case: check for assigning null to a pointer.
// This is a common form of defensive programming.
const Expr *RHS =
- LookThroughTransitiveAssignmentsAndCommaOperators(B->getRHS());
- RHS = RHS->IgnoreParenCasts();
+ LookThroughTransitiveAssignmentsAndCommaOperators(B->getRHS());
QualType T = VD->getType();
if (T.isVolatileQualified())
@@ -418,8 +414,7 @@ public:
if (isConstant(E))
return;
- if (const DeclRefExpr *DRE =
- dyn_cast<DeclRefExpr>(E->IgnoreParenCasts()))
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
// Special case: check for initialization from constant
// variables.
@@ -441,7 +436,7 @@ public:
PathDiagnosticLocation Loc =
PathDiagnosticLocation::create(V, BR.getSourceManager());
- Report(V, DeadInit, Loc, E->getSourceRange());
+ Report(V, DeadInit, Loc, V->getInit()->getSourceRange());
}
}
}
@@ -453,8 +448,9 @@ private:
bool isConstant(const InitListExpr *Candidate) const {
// We consider init list to be constant if each member of the list can be
// interpreted as constant.
- return llvm::all_of(Candidate->inits(),
- [this](const Expr *Init) { return isConstant(Init); });
+ return llvm::all_of(Candidate->inits(), [this](const Expr *Init) {
+ return isConstant(Init->IgnoreParenCasts());
+ });
}
/// Return true if the given expression can be interpreted as constant
@@ -464,7 +460,7 @@ private:
return true;
// We should also allow defensive initialization of structs, i.e. { 0 }
- if (const auto *ILE = dyn_cast<InitListExpr>(E->IgnoreParenCasts())) {
+ if (const auto *ILE = dyn_cast<InitListExpr>(E)) {
return isConstant(ILE);
}
@@ -507,7 +503,7 @@ public:
// Treat local variables captured by reference in C++ lambdas as escaped.
void findLambdaReferenceCaptures(const LambdaExpr *LE) {
const CXXRecordDecl *LambdaClass = LE->getLambdaClass();
- llvm::DenseMap<const VarDecl *, FieldDecl *> CaptureFields;
+ llvm::DenseMap<const ValueDecl *, FieldDecl *> CaptureFields;
FieldDecl *ThisCaptureField;
LambdaClass->getCaptureFields(CaptureFields, ThisCaptureField);
@@ -515,14 +511,14 @@ public:
if (!C.capturesVariable())
continue;
- VarDecl *VD = C.getCapturedVar();
+ ValueDecl *VD = C.getCapturedVar();
const FieldDecl *FD = CaptureFields[VD];
- if (!FD)
+ if (!FD || !isa<VarDecl>(VD))
continue;
// If the capture field is a reference type, it is capture-by-reference.
if (FD->getType()->isReferenceType())
- Escaped.insert(VD);
+ Escaped.insert(cast<VarDecl>(VD));
}
}
};
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp
index 7cdd78b8adfb..04bbe85473c0 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp
@@ -271,9 +271,8 @@ public:
const Table &Config = mgr.options.Config;
SmallVector<const Table::MapEntryTy *, 32> Keys;
- for (Table::const_iterator I = Config.begin(), E = Config.end(); I != E;
- ++I)
- Keys.push_back(&*I);
+ for (const auto &Entry : Config)
+ Keys.push_back(&Entry);
llvm::array_pod_sort(Keys.begin(), Keys.end(), compareEntry);
llvm::errs() << "[config]\n";
@@ -302,7 +301,7 @@ class ExplodedGraphViewer : public Checker< check::EndAnalysis > {
public:
ExplodedGraphViewer() {}
void checkEndAnalysis(ExplodedGraph &G, BugReporter &B,ExprEngine &Eng) const {
- Eng.ViewGraph(0);
+ Eng.ViewGraph(false);
}
};
@@ -323,7 +322,7 @@ bool ento::shouldRegisterExplodedGraphViewer(const CheckerManager &mgr) {
namespace {
class ReportStmts : public Checker<check::PreStmt<Stmt>> {
- BuiltinBug BT_stmtLoc{this, "Statement"};
+ BugType BT_stmtLoc{this, "Statement"};
public:
void checkPreStmt(const Stmt *S, CheckerContext &C) const {
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DebugContainerModeling.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DebugContainerModeling.cpp
index 6fed999ffc80..97f769b1c451 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DebugContainerModeling.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DebugContainerModeling.cpp
@@ -13,6 +13,7 @@
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
@@ -27,7 +28,8 @@ namespace {
class DebugContainerModeling
: public Checker<eval::Call> {
- std::unique_ptr<BugType> DebugMsgBugType;
+ const BugType DebugMsgBugType{this, "Checking analyzer assumptions", "debug",
+ /*SuppressOnSink=*/true};
template <typename Getter>
void analyzerContainerDataField(const CallExpr *CE, CheckerContext &C,
@@ -40,26 +42,18 @@ class DebugContainerModeling
CheckerContext &) const;
CallDescriptionMap<FnCheck> Callbacks = {
- {{0, "clang_analyzer_container_begin", 1},
- &DebugContainerModeling::analyzerContainerBegin},
- {{0, "clang_analyzer_container_end", 1},
- &DebugContainerModeling::analyzerContainerEnd},
+ {{{"clang_analyzer_container_begin"}, 1},
+ &DebugContainerModeling::analyzerContainerBegin},
+ {{{"clang_analyzer_container_end"}, 1},
+ &DebugContainerModeling::analyzerContainerEnd},
};
public:
- DebugContainerModeling();
-
bool evalCall(const CallEvent &Call, CheckerContext &C) const;
};
} //namespace
-DebugContainerModeling::DebugContainerModeling() {
- DebugMsgBugType.reset(
- new BugType(this, "Checking analyzer assumptions", "debug",
- /*SuppressOnSink=*/true));
-}
-
bool DebugContainerModeling::evalCall(const CallEvent &Call,
CheckerContext &C) const {
const auto *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
@@ -136,8 +130,8 @@ ExplodedNode *DebugContainerModeling::reportDebugMsg(llvm::StringRef Msg,
return nullptr;
auto &BR = C.getBugReporter();
- BR.emitReport(std::make_unique<PathSensitiveBugReport>(*DebugMsgBugType,
- Msg, N));
+ BR.emitReport(
+ std::make_unique<PathSensitiveBugReport>(DebugMsgBugType, Msg, N));
return N;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DebugIteratorModeling.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DebugIteratorModeling.cpp
index 5833eea56da8..ff479c7b0ac8 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DebugIteratorModeling.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DebugIteratorModeling.cpp
@@ -13,6 +13,7 @@
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
@@ -27,7 +28,8 @@ namespace {
class DebugIteratorModeling
: public Checker<eval::Call> {
- std::unique_ptr<BugType> DebugMsgBugType;
+ const BugType DebugMsgBugType{this, "Checking analyzer assumptions", "debug",
+ /*SuppressOnSink=*/true};
template <typename Getter>
void analyzerIteratorDataField(const CallExpr *CE, CheckerContext &C,
@@ -41,28 +43,20 @@ class DebugIteratorModeling
CheckerContext &) const;
CallDescriptionMap<FnCheck> Callbacks = {
- {{0, "clang_analyzer_iterator_position", 1},
- &DebugIteratorModeling::analyzerIteratorPosition},
- {{0, "clang_analyzer_iterator_container", 1},
- &DebugIteratorModeling::analyzerIteratorContainer},
- {{0, "clang_analyzer_iterator_validity", 1},
- &DebugIteratorModeling::analyzerIteratorValidity},
+ {{{"clang_analyzer_iterator_position"}, 1},
+ &DebugIteratorModeling::analyzerIteratorPosition},
+ {{{"clang_analyzer_iterator_container"}, 1},
+ &DebugIteratorModeling::analyzerIteratorContainer},
+ {{{"clang_analyzer_iterator_validity"}, 1},
+ &DebugIteratorModeling::analyzerIteratorValidity},
};
public:
- DebugIteratorModeling();
-
bool evalCall(const CallEvent &Call, CheckerContext &C) const;
};
} //namespace
-DebugIteratorModeling::DebugIteratorModeling() {
- DebugMsgBugType.reset(
- new BugType(this, "Checking analyzer assumptions", "debug",
- /*SuppressOnSink=*/true));
-}
-
bool DebugIteratorModeling::evalCall(const CallEvent &Call,
CheckerContext &C) const {
const auto *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
@@ -130,8 +124,8 @@ ExplodedNode *DebugIteratorModeling::reportDebugMsg(llvm::StringRef Msg,
return nullptr;
auto &BR = C.getBugReporter();
- BR.emitReport(std::make_unique<PathSensitiveBugReport>(*DebugMsgBugType,
- Msg, N));
+ BR.emitReport(
+ std::make_unique<PathSensitiveBugReport>(DebugMsgBugType, Msg, N));
return N;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DeleteWithNonVirtualDtorChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DeleteWithNonVirtualDtorChecker.cpp
deleted file mode 100644
index 7c5833762008..000000000000
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DeleteWithNonVirtualDtorChecker.cpp
+++ /dev/null
@@ -1,155 +0,0 @@
-//===-- DeleteWithNonVirtualDtorChecker.cpp -----------------------*- C++ -*--//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// Defines a checker for the OOP52-CPP CERT rule: Do not delete a polymorphic
-// object without a virtual destructor.
-//
-// Diagnostic flags -Wnon-virtual-dtor and -Wdelete-non-virtual-dtor report if
-// an object with a virtual function but a non-virtual destructor exists or is
-// deleted, respectively.
-//
-// This check exceeds them by comparing the dynamic and static types of the
-// object at the point of destruction and only warns if it happens through a
-// pointer to a base type without a virtual destructor. The check places a note
-// at the last point where the conversion from derived to base happened.
-//
-//===----------------------------------------------------------------------===//
-
-#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
-#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
-#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
-#include "clang/StaticAnalyzer/Core/Checker.h"
-#include "clang/StaticAnalyzer/Core/CheckerManager.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicType.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
-
-using namespace clang;
-using namespace ento;
-
-namespace {
-class DeleteWithNonVirtualDtorChecker
- : public Checker<check::PreStmt<CXXDeleteExpr>> {
- mutable std::unique_ptr<BugType> BT;
-
- class DeleteBugVisitor : public BugReporterVisitor {
- public:
- DeleteBugVisitor() : Satisfied(false) {}
- void Profile(llvm::FoldingSetNodeID &ID) const override {
- static int X = 0;
- ID.AddPointer(&X);
- }
- PathDiagnosticPieceRef VisitNode(const ExplodedNode *N,
- BugReporterContext &BRC,
- PathSensitiveBugReport &BR) override;
-
- private:
- bool Satisfied;
- };
-
-public:
- void checkPreStmt(const CXXDeleteExpr *DE, CheckerContext &C) const;
-};
-} // end anonymous namespace
-
-void DeleteWithNonVirtualDtorChecker::checkPreStmt(const CXXDeleteExpr *DE,
- CheckerContext &C) const {
- const Expr *DeletedObj = DE->getArgument();
- const MemRegion *MR = C.getSVal(DeletedObj).getAsRegion();
- if (!MR)
- return;
-
- const auto *BaseClassRegion = MR->getAs<TypedValueRegion>();
- const auto *DerivedClassRegion = MR->getBaseRegion()->getAs<SymbolicRegion>();
- if (!BaseClassRegion || !DerivedClassRegion)
- return;
-
- const auto *BaseClass = BaseClassRegion->getValueType()->getAsCXXRecordDecl();
- const auto *DerivedClass =
- DerivedClassRegion->getSymbol()->getType()->getPointeeCXXRecordDecl();
- if (!BaseClass || !DerivedClass)
- return;
-
- if (!BaseClass->hasDefinition() || !DerivedClass->hasDefinition())
- return;
-
- if (BaseClass->getDestructor()->isVirtual())
- return;
-
- if (!DerivedClass->isDerivedFrom(BaseClass))
- return;
-
- if (!BT)
- BT.reset(new BugType(this,
- "Destruction of a polymorphic object with no "
- "virtual destructor",
- "Logic error"));
-
- ExplodedNode *N = C.generateNonFatalErrorNode();
- if (!N)
- return;
- auto R = std::make_unique<PathSensitiveBugReport>(*BT, BT->getDescription(), N);
-
- // Mark region of problematic base class for later use in the BugVisitor.
- R->markInteresting(BaseClassRegion);
- R->addVisitor(std::make_unique<DeleteBugVisitor>());
- C.emitReport(std::move(R));
-}
-
-PathDiagnosticPieceRef
-DeleteWithNonVirtualDtorChecker::DeleteBugVisitor::VisitNode(
- const ExplodedNode *N, BugReporterContext &BRC,
- PathSensitiveBugReport &BR) {
- // Stop traversal after the first conversion was found on a path.
- if (Satisfied)
- return nullptr;
-
- const Stmt *S = N->getStmtForDiagnostics();
- if (!S)
- return nullptr;
-
- const auto *CastE = dyn_cast<CastExpr>(S);
- if (!CastE)
- return nullptr;
-
- // Only interested in DerivedToBase implicit casts.
- // Explicit casts can have different CastKinds.
- if (const auto *ImplCastE = dyn_cast<ImplicitCastExpr>(CastE)) {
- if (ImplCastE->getCastKind() != CK_DerivedToBase)
- return nullptr;
- }
-
- // Region associated with the current cast expression.
- const MemRegion *M = N->getSVal(CastE).getAsRegion();
- if (!M)
- return nullptr;
-
- // Check if target region was marked as problematic previously.
- if (!BR.isInteresting(M))
- return nullptr;
-
- // Stop traversal on this path.
- Satisfied = true;
-
- SmallString<256> Buf;
- llvm::raw_svector_ostream OS(Buf);
- OS << "Conversion from derived to base happened here";
- PathDiagnosticLocation Pos(S, BRC.getSourceManager(),
- N->getLocationContext());
- return std::make_shared<PathDiagnosticEventPiece>(Pos, OS.str(), true);
-}
-
-void ento::registerDeleteWithNonVirtualDtorChecker(CheckerManager &mgr) {
- mgr.registerChecker<DeleteWithNonVirtualDtorChecker>();
-}
-
-bool ento::shouldRegisterDeleteWithNonVirtualDtorChecker(
- const CheckerManager &mgr) {
- return true;
-}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
index 4a9c7ce3c66d..a678c3827e7f 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
@@ -11,9 +11,10 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprOpenMP.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
@@ -39,6 +40,8 @@ class DereferenceChecker
void reportBug(DerefKind K, ProgramStateRef State, const Stmt *S,
CheckerContext &C) const;
+ bool suppressReport(CheckerContext &C, const Expr *E) const;
+
public:
void checkLocation(SVal location, bool isLoad, const Stmt* S,
CheckerContext &C) const;
@@ -49,6 +52,8 @@ public:
const Expr *Ex, const ProgramState *state,
const LocationContext *LCtx,
bool loadedFrom = false);
+
+ bool SuppressAddressSpaces = false;
};
} // end anonymous namespace
@@ -109,9 +114,35 @@ static const Expr *getDereferenceExpr(const Stmt *S, bool IsBind=false){
return E;
}
-static bool suppressReport(const Expr *E) {
- // Do not report dereferences on memory in non-default address spaces.
- return E->getType().hasAddressSpace();
+bool DereferenceChecker::suppressReport(CheckerContext &C,
+ const Expr *E) const {
+ // Do not report dereferences on memory that use address space #256, #257,
+ // and #258. Those address spaces are used when dereferencing address spaces
+ // relative to the GS, FS, and SS segments on x86/x86-64 targets.
+ // Dereferencing a null pointer in these address spaces is not defined
+ // as an error. All other null dereferences in other address spaces
+ // are defined as an error unless explicitly defined.
+ // See https://clang.llvm.org/docs/LanguageExtensions.html, the section
+ // "X86/X86-64 Language Extensions"
+
+ QualType Ty = E->getType();
+ if (!Ty.hasAddressSpace())
+ return false;
+ if (SuppressAddressSpaces)
+ return true;
+
+ const llvm::Triple::ArchType Arch =
+ C.getASTContext().getTargetInfo().getTriple().getArch();
+
+ if ((Arch == llvm::Triple::x86) || (Arch == llvm::Triple::x86_64)) {
+ switch (toTargetAddressSpace(E->getType().getAddressSpace())) {
+ case 256:
+ case 257:
+ case 258:
+ return true;
+ }
+ }
+ return false;
}
static bool isDeclRefExprToReference(const Expr *E) {
@@ -209,7 +240,7 @@ void DereferenceChecker::checkLocation(SVal l, bool isLoad, const Stmt* S,
// Check for dereference of an undefined value.
if (l.isUndef()) {
const Expr *DerefExpr = getDereferenceExpr(S);
- if (!suppressReport(DerefExpr))
+ if (!suppressReport(C, DerefExpr))
reportBug(DerefKind::UndefinedPointerValue, C.getState(), DerefExpr, C);
return;
}
@@ -217,7 +248,7 @@ void DereferenceChecker::checkLocation(SVal l, bool isLoad, const Stmt* S,
DefinedOrUnknownSVal location = l.castAs<DefinedOrUnknownSVal>();
// Check for null dereferences.
- if (!location.getAs<Loc>())
+ if (!isa<Loc>(location))
return;
ProgramStateRef state = C.getState();
@@ -230,7 +261,7 @@ void DereferenceChecker::checkLocation(SVal l, bool isLoad, const Stmt* S,
// We know that 'location' can only be null. This is what
// we call an "explicit" null dereference.
const Expr *expr = getDereferenceExpr(S);
- if (!suppressReport(expr)) {
+ if (!suppressReport(C, expr)) {
reportBug(DerefKind::NullPointer, nullState, expr, C);
return;
}
@@ -272,7 +303,7 @@ void DereferenceChecker::checkBind(SVal L, SVal V, const Stmt *S,
if (StNull) {
if (!StNonNull) {
const Expr *expr = getDereferenceExpr(S, /*IsBind=*/true);
- if (!suppressReport(expr)) {
+ if (!suppressReport(C, expr)) {
reportBug(DerefKind::NullPointer, StNull, expr, C);
return;
}
@@ -308,7 +339,9 @@ void DereferenceChecker::checkBind(SVal L, SVal V, const Stmt *S,
}
void ento::registerDereferenceChecker(CheckerManager &mgr) {
- mgr.registerChecker<DereferenceChecker>();
+ auto *Chk = mgr.registerChecker<DereferenceChecker>();
+ Chk->SuppressAddressSpaces = mgr.getAnalyzerOptions().getCheckerBooleanOption(
+ mgr.getCurrentCheckerName(), "SuppressAddressSpaces");
}
bool ento::shouldRegisterDereferenceChecker(const CheckerManager &mgr) {
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp
index df88b71ff063..49486ea796c2 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp
@@ -44,8 +44,8 @@ static bool DefaultMethodFilter(const ObjCMethodDecl *M) {
M->getMethodFamily() == OMF_dealloc ||
M->getMethodFamily() == OMF_copy ||
M->getMethodFamily() == OMF_mutableCopy ||
- M->getSelector().getNameForSlot(0).find("init") != StringRef::npos ||
- M->getSelector().getNameForSlot(0).find("Init") != StringRef::npos;
+ M->getSelector().getNameForSlot(0).contains("init") ||
+ M->getSelector().getNameForSlot(0).contains("Init");
}
class DirectIvarAssignment :
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp
index 2b3164ba4a2c..5496f087447f 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp
@@ -11,12 +11,14 @@
//
//===----------------------------------------------------------------------===//
-#include "Taint.h"
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Checkers/Taint.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/CommonBugCategories.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include <optional>
using namespace clang;
using namespace ento;
@@ -24,9 +26,13 @@ using namespace taint;
namespace {
class DivZeroChecker : public Checker< check::PreStmt<BinaryOperator> > {
- mutable std::unique_ptr<BuiltinBug> BT;
- void reportBug(const char *Msg, ProgramStateRef StateZero, CheckerContext &C,
- std::unique_ptr<BugReporterVisitor> Visitor = nullptr) const;
+ const BugType BT{this, "Division by zero"};
+ const BugType TaintBT{this, "Division by zero", categories::TaintedData};
+ void reportBug(StringRef Msg, ProgramStateRef StateZero,
+ CheckerContext &C) const;
+ void reportTaintBug(StringRef Msg, ProgramStateRef StateZero,
+ CheckerContext &C,
+ llvm::ArrayRef<SymbolRef> TaintedSyms) const;
public:
void checkPreStmt(const BinaryOperator *B, CheckerContext &C) const;
@@ -40,16 +46,23 @@ static const Expr *getDenomExpr(const ExplodedNode *N) {
return nullptr;
}
-void DivZeroChecker::reportBug(
- const char *Msg, ProgramStateRef StateZero, CheckerContext &C,
- std::unique_ptr<BugReporterVisitor> Visitor) const {
+void DivZeroChecker::reportBug(StringRef Msg, ProgramStateRef StateZero,
+ CheckerContext &C) const {
if (ExplodedNode *N = C.generateErrorNode(StateZero)) {
- if (!BT)
- BT.reset(new BuiltinBug(this, "Division by zero"));
+ auto R = std::make_unique<PathSensitiveBugReport>(BT, Msg, N);
+ bugreporter::trackExpressionValue(N, getDenomExpr(N), *R);
+ C.emitReport(std::move(R));
+ }
+}
- auto R = std::make_unique<PathSensitiveBugReport>(*BT, Msg, N);
- R->addVisitor(std::move(Visitor));
+void DivZeroChecker::reportTaintBug(
+ StringRef Msg, ProgramStateRef StateZero, CheckerContext &C,
+ llvm::ArrayRef<SymbolRef> TaintedSyms) const {
+ if (ExplodedNode *N = C.generateErrorNode(StateZero)) {
+ auto R = std::make_unique<PathSensitiveBugReport>(TaintBT, Msg, N);
bugreporter::trackExpressionValue(N, getDenomExpr(N), *R);
+ for (auto Sym : TaintedSyms)
+ R->markInteresting(Sym);
C.emitReport(std::move(R));
}
}
@@ -67,7 +80,7 @@ void DivZeroChecker::checkPreStmt(const BinaryOperator *B,
return;
SVal Denom = C.getSVal(B->getRHS());
- Optional<DefinedSVal> DV = Denom.getAs<DefinedSVal>();
+ std::optional<DefinedSVal> DV = Denom.getAs<DefinedSVal>();
// Divide-by-undefined handled in the generic checking for uses of
// undefined values.
@@ -85,11 +98,13 @@ void DivZeroChecker::checkPreStmt(const BinaryOperator *B,
return;
}
- bool TaintedD = isTainted(C.getState(), *DV);
- if ((stateNotZero && stateZero && TaintedD)) {
- reportBug("Division by a tainted value, possibly zero", stateZero, C,
- std::make_unique<taint::TaintBugVisitor>(*DV));
- return;
+ if ((stateNotZero && stateZero)) {
+ std::vector<SymbolRef> taintedSyms = getTaintedSymbols(C.getState(), *DV);
+ if (!taintedSyms.empty()) {
+ reportTaintBug("Division by a tainted value, possibly zero", stateZero, C,
+ taintedSyms);
+ return;
+ }
}
// If we get here, then the denom should not be zero. We abandon the implicit
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DynamicTypeChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DynamicTypeChecker.cpp
index dbc930d7d37b..0ad307d3ebd5 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DynamicTypeChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DynamicTypeChecker.cpp
@@ -30,12 +30,7 @@ using namespace ento;
namespace {
class DynamicTypeChecker : public Checker<check::PostStmt<ImplicitCastExpr>> {
- mutable std::unique_ptr<BugType> BT;
- void initBugType() const {
- if (!BT)
- BT.reset(
- new BugType(this, "Dynamic and static type mismatch", "Type Error"));
- }
+ const BugType BT{this, "Dynamic and static type mismatch", "Type Error"};
class DynamicTypeBugVisitor : public BugReporterVisitor {
public:
@@ -70,7 +65,6 @@ void DynamicTypeChecker::reportTypeError(QualType DynamicType,
const MemRegion *Reg,
const Stmt *ReportedNode,
CheckerContext &C) const {
- initBugType();
SmallString<192> Buf;
llvm::raw_svector_ostream OS(Buf);
OS << "Object has a dynamic type '";
@@ -81,7 +75,7 @@ void DynamicTypeChecker::reportTypeError(QualType DynamicType,
llvm::Twine());
OS << "'";
auto R = std::make_unique<PathSensitiveBugReport>(
- *BT, OS.str(), C.generateNonFatalErrorNode());
+ BT, OS.str(), C.generateNonFatalErrorNode());
R->markInteresting(Reg);
R->addVisitor(std::make_unique<DynamicTypeBugVisitor>(Reg));
R->addRange(ReportedNode->getSourceRange());
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
index 14ba5d769969..034774a252b1 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
@@ -31,6 +31,8 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicType.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "llvm/ADT/STLExtras.h"
+#include <optional>
using namespace clang;
using namespace ento;
@@ -56,9 +58,6 @@ class DynamicTypePropagation:
check::PreObjCMessage,
check::PostObjCMessage > {
- const ObjCObjectType *getObjectTypeForAllocAndNew(const ObjCMessageExpr *MsgE,
- CheckerContext &C) const;
-
/// Return a better dynamic type if one can be derived from the cast.
const ObjCObjectPointerType *getBetterObjCType(const Expr *CastE,
CheckerContext &C) const;
@@ -108,7 +107,7 @@ public:
void checkPostObjCMessage(const ObjCMethodCall &M, CheckerContext &C) const;
/// This value is set to true, when the Generics checker is turned on.
- DefaultBool CheckGenerics;
+ bool CheckGenerics = false;
CheckerNameRef GenericCheckName;
};
@@ -235,11 +234,9 @@ void DynamicTypePropagation::checkDeadSymbols(SymbolReaper &SR,
MostSpecializedTypeArgsMapTy TyArgMap =
State->get<MostSpecializedTypeArgsMap>();
- for (MostSpecializedTypeArgsMapTy::iterator I = TyArgMap.begin(),
- E = TyArgMap.end();
- I != E; ++I) {
- if (SR.isDead(I->first)) {
- State = State->remove<MostSpecializedTypeArgsMap>(I->first);
+ for (SymbolRef Sym : llvm::make_first_range(TyArgMap)) {
+ if (SR.isDead(Sym)) {
+ State = State->remove<MostSpecializedTypeArgsMap>(Sym);
}
}
@@ -271,12 +268,12 @@ void DynamicTypePropagation::checkPreCall(const CallEvent &Call,
// a more-derived class.
switch (Ctor->getOriginExpr()->getConstructionKind()) {
- case CXXConstructExpr::CK_Complete:
- case CXXConstructExpr::CK_Delegating:
+ case CXXConstructionKind::Complete:
+ case CXXConstructionKind::Delegating:
// No additional type info necessary.
return;
- case CXXConstructExpr::CK_NonVirtualBase:
- case CXXConstructExpr::CK_VirtualBase:
+ case CXXConstructionKind::NonVirtualBase:
+ case CXXConstructionKind::VirtualBase:
if (const MemRegion *Target = Ctor->getCXXThisVal().getAsRegion())
recordFixedType(Target, Ctor->getDecl(), C);
return;
@@ -363,16 +360,16 @@ void DynamicTypePropagation::checkPostCall(const CallEvent &Call,
if (const CXXConstructorCall *Ctor = dyn_cast<CXXConstructorCall>(&Call)) {
// We may need to undo the effects of our pre-call check.
switch (Ctor->getOriginExpr()->getConstructionKind()) {
- case CXXConstructExpr::CK_Complete:
- case CXXConstructExpr::CK_Delegating:
+ case CXXConstructionKind::Complete:
+ case CXXConstructionKind::Delegating:
// No additional work necessary.
// Note: This will leave behind the actual type of the object for
// complete constructors, but arguably that's a good thing, since it
// means the dynamic type info will be correct even for objects
// constructed with operator new.
return;
- case CXXConstructExpr::CK_NonVirtualBase:
- case CXXConstructExpr::CK_VirtualBase:
+ case CXXConstructionKind::NonVirtualBase:
+ case CXXConstructionKind::VirtualBase:
if (const MemRegion *Target = Ctor->getCXXThisVal().getAsRegion()) {
// We just finished a base constructor. Now we can use the subclass's
// type when resolving virtual calls.
@@ -384,7 +381,7 @@ void DynamicTypePropagation::checkPostCall(const CallEvent &Call,
// FIXME: Instead of relying on the ParentMap, we should have the
// trigger-statement (InitListExpr in this case) available in this
// callback, ideally as part of CallEvent.
- if (dyn_cast_or_null<InitListExpr>(
+ if (isa_and_nonnull<InitListExpr>(
LCtx->getParentMap().getParent(Ctor->getOriginExpr())))
return;
@@ -716,7 +713,7 @@ static bool isObjCTypeParamDependent(QualType Type) {
class IsObjCTypeParamDependentTypeVisitor
: public RecursiveASTVisitor<IsObjCTypeParamDependentTypeVisitor> {
public:
- IsObjCTypeParamDependentTypeVisitor() : Result(false) {}
+ IsObjCTypeParamDependentTypeVisitor() = default;
bool VisitObjCTypeParamType(const ObjCTypeParamType *Type) {
if (isa<ObjCTypeParamDecl>(Type->getDecl())) {
Result = true;
@@ -725,7 +722,7 @@ static bool isObjCTypeParamDependent(QualType Type) {
return true;
}
- bool Result;
+ bool Result = false;
};
IsObjCTypeParamDependentTypeVisitor Visitor;
@@ -744,8 +741,6 @@ findMethodDecl(const ObjCMessageExpr *MessageExpr,
const ObjCMethodDecl *Method = nullptr;
QualType ReceiverType = MessageExpr->getReceiverType();
- const auto *ReceiverObjectPtrType =
- ReceiverType->getAs<ObjCObjectPointerType>();
// Do this "devirtualization" on instance and class methods only. Trust the
// static type on super and super class calls.
@@ -755,7 +750,8 @@ findMethodDecl(const ObjCMessageExpr *MessageExpr,
// type, look up the method in the tracked type, not in the receiver type.
// This way we preserve more information.
if (ReceiverType->isObjCIdType() || ReceiverType->isObjCClassType() ||
- ASTCtxt.canAssignObjCInterfaces(ReceiverObjectPtrType, TrackedType)) {
+ ASTCtxt.canAssignObjCInterfaces(
+ ReceiverType->castAs<ObjCObjectPointerType>(), TrackedType)) {
const ObjCInterfaceDecl *InterfaceDecl = TrackedType->getInterfaceDecl();
// The method might not be found.
Selector Sel = MessageExpr->getSelector();
@@ -849,7 +845,7 @@ void DynamicTypePropagation::checkPreObjCMessage(const ObjCMethodCall &M,
return;
}
- Optional<ArrayRef<QualType>> TypeArgs =
+ std::optional<ArrayRef<QualType>> TypeArgs =
(*TrackedType)->getObjCSubstitutions(Method->getDeclContext());
// This case might happen when there is an unspecialized override of a
// specialized method.
@@ -982,7 +978,7 @@ void DynamicTypePropagation::checkPostObjCMessage(const ObjCMethodCall &M,
if (!Method)
return;
- Optional<ArrayRef<QualType>> TypeArgs =
+ std::optional<ArrayRef<QualType>> TypeArgs =
(*TrackedType)->getObjCSubstitutions(Method->getDeclContext());
if (!TypeArgs)
return;
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/EnumCastOutOfRangeChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/EnumCastOutOfRangeChecker.cpp
index 0e94b915a468..0fa20428c1b5 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/EnumCastOutOfRangeChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/EnumCastOutOfRangeChecker.cpp
@@ -22,9 +22,12 @@
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "llvm/Support/FormatVariadic.h"
+#include <optional>
using namespace clang;
using namespace ento;
+using llvm::formatv;
namespace {
// This evaluator checks two SVals for equality. The first SVal is provided via
@@ -57,8 +60,9 @@ public:
// Being conservative, it does not warn if there is slight possibility the
// value can be matching.
class EnumCastOutOfRangeChecker : public Checker<check::PreStmt<CastExpr>> {
- mutable std::unique_ptr<BuiltinBug> EnumValueCastOutOfRange;
- void reportWarning(CheckerContext &C) const;
+ const BugType EnumValueCastOutOfRange{this, "Enum cast out of range"};
+ void reportWarning(CheckerContext &C, const CastExpr *CE,
+ const EnumDecl *E) const;
public:
void checkPreStmt(const CastExpr *CE, CheckerContext &C) const;
@@ -71,21 +75,39 @@ EnumValueVector getDeclValuesForEnum(const EnumDecl *ED) {
EnumValueVector DeclValues(
std::distance(ED->enumerator_begin(), ED->enumerator_end()));
llvm::transform(ED->enumerators(), DeclValues.begin(),
- [](const EnumConstantDecl *D) { return D->getInitVal(); });
+ [](const EnumConstantDecl *D) { return D->getInitVal(); });
return DeclValues;
}
} // namespace
-void EnumCastOutOfRangeChecker::reportWarning(CheckerContext &C) const {
+void EnumCastOutOfRangeChecker::reportWarning(CheckerContext &C,
+ const CastExpr *CE,
+ const EnumDecl *E) const {
+ assert(E && "valid EnumDecl* is expected");
if (const ExplodedNode *N = C.generateNonFatalErrorNode()) {
- if (!EnumValueCastOutOfRange)
- EnumValueCastOutOfRange.reset(
- new BuiltinBug(this, "Enum cast out of range",
- "The value provided to the cast expression is not in "
- "the valid range of values for the enum"));
- C.emitReport(std::make_unique<PathSensitiveBugReport>(
- *EnumValueCastOutOfRange, EnumValueCastOutOfRange->getDescription(),
- N));
+ std::string ValueStr = "", NameStr = "the enum";
+
+ // Try to add details to the message:
+ const auto ConcreteValue =
+ C.getSVal(CE->getSubExpr()).getAs<nonloc::ConcreteInt>();
+ if (ConcreteValue) {
+ ValueStr = formatv(" '{0}'", ConcreteValue->getValue());
+ }
+ if (StringRef EnumName{E->getName()}; !EnumName.empty()) {
+ NameStr = formatv("'{0}'", EnumName);
+ }
+
+ std::string Msg = formatv("The value{0} provided to the cast expression is "
+ "not in the valid range of values for {1}",
+ ValueStr, NameStr);
+
+ auto BR = std::make_unique<PathSensitiveBugReport>(EnumValueCastOutOfRange,
+ Msg, N);
+ bugreporter::trackExpressionValue(N, CE->getSubExpr(), *BR);
+ BR->addNote("enum declared here",
+ PathDiagnosticLocation::create(E, C.getSourceManager()),
+ {E->getSourceRange()});
+ C.emitReport(std::move(BR));
}
}
@@ -94,10 +116,10 @@ void EnumCastOutOfRangeChecker::checkPreStmt(const CastExpr *CE,
// Only perform enum range check on casts where such checks are valid. For
// all other cast kinds (where enum range checks are unnecessary or invalid),
- // just return immediately. TODO: The set of casts whitelisted for enum
- // range checking may be incomplete. Better to add a missing cast kind to
- // enable a missing check than to generate false negatives and have to remove
- // those later.
+ // just return immediately. TODO: The set of casts allowed for enum range
+ // checking may be incomplete. Better to add a missing cast kind to enable a
+ // missing check than to generate false negatives and have to remove those
+ // later.
switch (CE->getCastKind()) {
case CK_IntegralCast:
break;
@@ -108,7 +130,7 @@ void EnumCastOutOfRangeChecker::checkPreStmt(const CastExpr *CE,
}
// Get the value of the expression to cast.
- const llvm::Optional<DefinedOrUnknownSVal> ValueToCast =
+ const std::optional<DefinedOrUnknownSVal> ValueToCast =
C.getSVal(CE->getSubExpr()).getAs<DefinedOrUnknownSVal>();
// If the value cannot be reasoned about (not even a DefinedOrUnknownSVal),
@@ -128,14 +150,25 @@ void EnumCastOutOfRangeChecker::checkPreStmt(const CastExpr *CE,
const EnumDecl *ED = T->castAs<EnumType>()->getDecl();
EnumValueVector DeclValues = getDeclValuesForEnum(ED);
+
+ // If the declarator list is empty, bail out.
+ // Every initialization an enum with a fixed underlying type but without any
+ // enumerators would produce a warning if we were to continue at this point.
+ // The most notable example is std::byte in the C++17 standard library.
+ // TODO: Create heuristics to bail out when the enum type is intended to be
+ // used to store combinations of flag values (to mitigate the limitation
+ // described in the docs).
+ if (DeclValues.size() == 0)
+ return;
+
// Check if any of the enum values possibly match.
- bool PossibleValueMatch = llvm::any_of(
- DeclValues, ConstraintBasedEQEvaluator(C, *ValueToCast));
+ bool PossibleValueMatch =
+ llvm::any_of(DeclValues, ConstraintBasedEQEvaluator(C, *ValueToCast));
// If there is no value that can possibly match any of the enum values, then
// warn.
if (!PossibleValueMatch)
- reportWarning(C);
+ reportWarning(C, CE, ED);
}
void ento::registerEnumCastOutOfRangeChecker(CheckerManager &mgr) {
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoChecker.cpp
new file mode 100644
index 000000000000..265185e64107
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoChecker.cpp
@@ -0,0 +1,250 @@
+//=== ErrnoChecker.cpp ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines an "errno checker" that can detect some invalid use of the
+// system-defined value 'errno'. This checker works together with the
+// ErrnoModeling checker and other checkers like StdCLibraryFunctions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ErrnoModeling.h"
+#include "clang/AST/ParentMapContext.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+#include "llvm/ADT/STLExtras.h"
+#include <optional>
+
+using namespace clang;
+using namespace ento;
+using namespace errno_modeling;
+
+namespace {
+
+class ErrnoChecker
+ : public Checker<check::Location, check::PreCall, check::RegionChanges> {
+public:
+ void checkLocation(SVal Loc, bool IsLoad, const Stmt *S,
+ CheckerContext &) const;
+ void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
+ ProgramStateRef
+ checkRegionChanges(ProgramStateRef State,
+ const InvalidatedSymbols *Invalidated,
+ ArrayRef<const MemRegion *> ExplicitRegions,
+ ArrayRef<const MemRegion *> Regions,
+ const LocationContext *LCtx, const CallEvent *Call) const;
+ void checkBranchCondition(const Stmt *Condition, CheckerContext &Ctx) const;
+
+ /// Indicates if a read (load) of \c errno is allowed in a non-condition part
+ /// of \c if, \c switch, loop and conditional statements when the errno
+ /// value may be undefined.
+ bool AllowErrnoReadOutsideConditions = true;
+
+private:
+ void generateErrnoNotCheckedBug(CheckerContext &C, ProgramStateRef State,
+ const MemRegion *ErrnoRegion,
+ const CallEvent *CallMayChangeErrno) const;
+
+ BugType BT_InvalidErrnoRead{this, "Value of 'errno' could be undefined",
+ "Error handling"};
+ BugType BT_ErrnoNotChecked{this, "Value of 'errno' was not checked",
+ "Error handling"};
+};
+
+} // namespace
+
+static ProgramStateRef setErrnoStateIrrelevant(ProgramStateRef State) {
+ return setErrnoState(State, Irrelevant);
+}
+
+/// Check if a statement (expression) or an ancestor of it is in a condition
+/// part of a (conditional, loop, switch) statement.
+static bool isInCondition(const Stmt *S, CheckerContext &C) {
+ ParentMapContext &ParentCtx = C.getASTContext().getParentMapContext();
+ bool CondFound = false;
+ while (S && !CondFound) {
+ const DynTypedNodeList Parents = ParentCtx.getParents(*S);
+ if (Parents.empty())
+ break;
+ const auto *ParentS = Parents[0].get<Stmt>();
+ if (!ParentS || isa<CallExpr>(ParentS))
+ break;
+ switch (ParentS->getStmtClass()) {
+ case Expr::IfStmtClass:
+ CondFound = (S == cast<IfStmt>(ParentS)->getCond());
+ break;
+ case Expr::ForStmtClass:
+ CondFound = (S == cast<ForStmt>(ParentS)->getCond());
+ break;
+ case Expr::DoStmtClass:
+ CondFound = (S == cast<DoStmt>(ParentS)->getCond());
+ break;
+ case Expr::WhileStmtClass:
+ CondFound = (S == cast<WhileStmt>(ParentS)->getCond());
+ break;
+ case Expr::SwitchStmtClass:
+ CondFound = (S == cast<SwitchStmt>(ParentS)->getCond());
+ break;
+ case Expr::ConditionalOperatorClass:
+ CondFound = (S == cast<ConditionalOperator>(ParentS)->getCond());
+ break;
+ case Expr::BinaryConditionalOperatorClass:
+ CondFound = (S == cast<BinaryConditionalOperator>(ParentS)->getCommon());
+ break;
+ default:
+ break;
+ }
+ S = ParentS;
+ }
+ return CondFound;
+}
+
+void ErrnoChecker::generateErrnoNotCheckedBug(
+ CheckerContext &C, ProgramStateRef State, const MemRegion *ErrnoRegion,
+ const CallEvent *CallMayChangeErrno) const {
+ if (ExplodedNode *N = C.generateNonFatalErrorNode(State)) {
+ SmallString<100> StrBuf;
+ llvm::raw_svector_ostream OS(StrBuf);
+ if (CallMayChangeErrno) {
+ OS << "Value of 'errno' was not checked and may be overwritten by "
+ "function '";
+ const auto *CallD =
+ dyn_cast_or_null<FunctionDecl>(CallMayChangeErrno->getDecl());
+ assert(CallD && CallD->getIdentifier());
+ OS << CallD->getIdentifier()->getName() << "'";
+ } else {
+ OS << "Value of 'errno' was not checked and is overwritten here";
+ }
+ auto BR = std::make_unique<PathSensitiveBugReport>(BT_ErrnoNotChecked,
+ OS.str(), N);
+ BR->markInteresting(ErrnoRegion);
+ C.emitReport(std::move(BR));
+ }
+}
+
+void ErrnoChecker::checkLocation(SVal Loc, bool IsLoad, const Stmt *S,
+ CheckerContext &C) const {
+ std::optional<ento::Loc> ErrnoLoc = getErrnoLoc(C.getState());
+ if (!ErrnoLoc)
+ return;
+
+ auto L = Loc.getAs<ento::Loc>();
+ if (!L || *ErrnoLoc != *L)
+ return;
+
+ ProgramStateRef State = C.getState();
+ ErrnoCheckState EState = getErrnoState(State);
+
+ if (IsLoad) {
+ switch (EState) {
+ case MustNotBeChecked:
+ // Read of 'errno' when it may have undefined value.
+ if (!AllowErrnoReadOutsideConditions || isInCondition(S, C)) {
+ if (ExplodedNode *N = C.generateErrorNode()) {
+ auto BR = std::make_unique<PathSensitiveBugReport>(
+ BT_InvalidErrnoRead,
+ "An undefined value may be read from 'errno'", N);
+ BR->markInteresting(ErrnoLoc->getAsRegion());
+ C.emitReport(std::move(BR));
+ }
+ }
+ break;
+ case MustBeChecked:
+ // 'errno' has to be checked. A load is required for this, with no more
+ // information we can assume that it is checked somehow.
+ // After this place 'errno' is allowed to be read and written.
+ State = setErrnoStateIrrelevant(State);
+ C.addTransition(State);
+ break;
+ default:
+ break;
+ }
+ } else {
+ switch (EState) {
+ case MustBeChecked:
+ // 'errno' is overwritten without a read before but it should have been
+ // checked.
+ generateErrnoNotCheckedBug(C, setErrnoStateIrrelevant(State),
+ ErrnoLoc->getAsRegion(), nullptr);
+ break;
+ case MustNotBeChecked:
+ // Write to 'errno' when it is not allowed to be read.
+ // After this place 'errno' is allowed to be read and written.
+ State = setErrnoStateIrrelevant(State);
+ C.addTransition(State);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+void ErrnoChecker::checkPreCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ const auto *CallF = dyn_cast_or_null<FunctionDecl>(Call.getDecl());
+ if (!CallF)
+ return;
+
+ CallF = CallF->getCanonicalDecl();
+ // If 'errno' must be checked, it should be done as soon as possible, and
+ // before any other call to a system function (something in a system header).
+ // To avoid use of a long list of functions that may change 'errno'
+ // (which may be different with standard library versions) assume that any
+ // function can change it.
+ // A list of special functions can be used that are allowed here without
+ // generation of diagnostic. For now the only such case is 'errno' itself.
+ // Probably 'strerror'?
+ if (CallF->isExternC() && CallF->isGlobal() &&
+ C.getSourceManager().isInSystemHeader(CallF->getLocation()) &&
+ !isErrno(CallF)) {
+ if (getErrnoState(C.getState()) == MustBeChecked) {
+ std::optional<ento::Loc> ErrnoLoc = getErrnoLoc(C.getState());
+ assert(ErrnoLoc && "ErrnoLoc should exist if an errno state is set.");
+ generateErrnoNotCheckedBug(C, setErrnoStateIrrelevant(C.getState()),
+ ErrnoLoc->getAsRegion(), &Call);
+ }
+ }
+}
+
+ProgramStateRef ErrnoChecker::checkRegionChanges(
+ ProgramStateRef State, const InvalidatedSymbols *Invalidated,
+ ArrayRef<const MemRegion *> ExplicitRegions,
+ ArrayRef<const MemRegion *> Regions, const LocationContext *LCtx,
+ const CallEvent *Call) const {
+ std::optional<ento::Loc> ErrnoLoc = getErrnoLoc(State);
+ if (!ErrnoLoc)
+ return State;
+ const MemRegion *ErrnoRegion = ErrnoLoc->getAsRegion();
+
+ // If 'errno' is invalidated we can not know if it is checked or written into,
+ // allow read and write without bug reports.
+ if (llvm::is_contained(Regions, ErrnoRegion))
+ return clearErrnoState(State);
+
+ // Always reset errno state when the system memory space is invalidated.
+ // The ErrnoRegion is not always found in the list in this case.
+ if (llvm::is_contained(Regions, ErrnoRegion->getMemorySpace()))
+ return clearErrnoState(State);
+
+ return State;
+}
+
+void ento::registerErrnoChecker(CheckerManager &mgr) {
+ const AnalyzerOptions &Opts = mgr.getAnalyzerOptions();
+ auto *Checker = mgr.registerChecker<ErrnoChecker>();
+ Checker->AllowErrnoReadOutsideConditions = Opts.getCheckerBooleanOption(
+ Checker, "AllowErrnoReadOutsideConditionExpressions");
+}
+
+bool ento::shouldRegisterErrnoChecker(const CheckerManager &mgr) {
+ return true;
+}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoModeling.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoModeling.cpp
new file mode 100644
index 000000000000..1b34ea0e056e
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoModeling.cpp
@@ -0,0 +1,325 @@
+//=== ErrnoModeling.cpp -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines a checker `ErrnoModeling`, which is used to make the system
+// value 'errno' available to other checkers.
+// The 'errno' value is stored at a special memory region that is accessible
+// through the `errno_modeling` namespace. The memory region is either the
+// region of `errno` itself if it is a variable, otherwise an artifically
+// created region (in the system memory space). If `errno` is defined by using
+// a function which returns the address of it (this is always the case if it is
+// not a variable) this function is recognized and evaluated. In this way
+// `errno` becomes visible to the analysis and checkers can change its value.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ErrnoModeling.h"
+#include "clang/AST/ParentMapContext.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/FormatVariadic.h"
+#include <optional>
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+// Name of the "errno" variable.
+// FIXME: Is there a system where it is not called "errno" but is a variable?
+const char *ErrnoVarName = "errno";
+// Names of functions that return a location of the "errno" value.
+// FIXME: Are there other similar function names?
+const char *ErrnoLocationFuncNames[] = {"__errno_location", "___errno",
+ "__errno", "_errno", "__error"};
+
+class ErrnoModeling
+ : public Checker<check::ASTDecl<TranslationUnitDecl>, check::BeginFunction,
+ check::LiveSymbols, eval::Call> {
+public:
+ void checkASTDecl(const TranslationUnitDecl *D, AnalysisManager &Mgr,
+ BugReporter &BR) const;
+ void checkBeginFunction(CheckerContext &C) const;
+ void checkLiveSymbols(ProgramStateRef State, SymbolReaper &SR) const;
+ bool evalCall(const CallEvent &Call, CheckerContext &C) const;
+
+ // The declaration of an "errno" variable or "errno location" function.
+ mutable const Decl *ErrnoDecl = nullptr;
+
+private:
+ // FIXME: Names from `ErrnoLocationFuncNames` are used to build this set.
+ CallDescriptionSet ErrnoLocationCalls{{{"__errno_location"}, 0, 0},
+ {{"___errno"}, 0, 0},
+ {{"__errno"}, 0, 0},
+ {{"_errno"}, 0, 0},
+ {{"__error"}, 0, 0}};
+};
+
+} // namespace
+
+/// Store a MemRegion that contains the 'errno' integer value.
+/// The value is null if the 'errno' value was not recognized in the AST.
+REGISTER_TRAIT_WITH_PROGRAMSTATE(ErrnoRegion, const MemRegion *)
+
+REGISTER_TRAIT_WITH_PROGRAMSTATE(ErrnoState, errno_modeling::ErrnoCheckState)
+
+/// Search for a variable called "errno" in the AST.
+/// Return nullptr if not found.
+static const VarDecl *getErrnoVar(ASTContext &ACtx) {
+ IdentifierInfo &II = ACtx.Idents.get(ErrnoVarName);
+ auto LookupRes = ACtx.getTranslationUnitDecl()->lookup(&II);
+ auto Found = llvm::find_if(LookupRes, [&ACtx](const Decl *D) {
+ if (auto *VD = dyn_cast<VarDecl>(D))
+ return ACtx.getSourceManager().isInSystemHeader(VD->getLocation()) &&
+ VD->hasExternalStorage() &&
+ VD->getType().getCanonicalType() == ACtx.IntTy;
+ return false;
+ });
+ if (Found == LookupRes.end())
+ return nullptr;
+
+ return cast<VarDecl>(*Found);
+}
+
+/// Search for a function with a specific name that is used to return a pointer
+/// to "errno".
+/// Return nullptr if no such function was found.
+static const FunctionDecl *getErrnoFunc(ASTContext &ACtx) {
+ SmallVector<const Decl *> LookupRes;
+ for (StringRef ErrnoName : ErrnoLocationFuncNames) {
+ IdentifierInfo &II = ACtx.Idents.get(ErrnoName);
+ llvm::append_range(LookupRes, ACtx.getTranslationUnitDecl()->lookup(&II));
+ }
+
+ auto Found = llvm::find_if(LookupRes, [&ACtx](const Decl *D) {
+ if (auto *FD = dyn_cast<FunctionDecl>(D))
+ return ACtx.getSourceManager().isInSystemHeader(FD->getLocation()) &&
+ FD->isExternC() && FD->getNumParams() == 0 &&
+ FD->getReturnType().getCanonicalType() ==
+ ACtx.getPointerType(ACtx.IntTy);
+ return false;
+ });
+ if (Found == LookupRes.end())
+ return nullptr;
+
+ return cast<FunctionDecl>(*Found);
+}
+
+void ErrnoModeling::checkASTDecl(const TranslationUnitDecl *D,
+ AnalysisManager &Mgr, BugReporter &BR) const {
+ // Try to find an usable `errno` value.
+ // It can be an external variable called "errno" or a function that returns a
+ // pointer to the "errno" value. This function can have different names.
+ // The actual case is dependent on the C library implementation, we
+ // can only search for a match in one of these variations.
+ // We assume that exactly one of these cases might be true.
+ ErrnoDecl = getErrnoVar(Mgr.getASTContext());
+ if (!ErrnoDecl)
+ ErrnoDecl = getErrnoFunc(Mgr.getASTContext());
+}
+
+void ErrnoModeling::checkBeginFunction(CheckerContext &C) const {
+ if (!C.inTopFrame())
+ return;
+
+ ASTContext &ACtx = C.getASTContext();
+ ProgramStateRef State = C.getState();
+
+ if (const auto *ErrnoVar = dyn_cast_or_null<VarDecl>(ErrnoDecl)) {
+ // There is an external 'errno' variable.
+ // Use its memory region.
+ // The memory region for an 'errno'-like variable is allocated in system
+ // space by MemRegionManager.
+ const MemRegion *ErrnoR =
+ State->getRegion(ErrnoVar, C.getLocationContext());
+ assert(ErrnoR && "Memory region should exist for the 'errno' variable.");
+ State = State->set<ErrnoRegion>(ErrnoR);
+ State =
+ errno_modeling::setErrnoValue(State, C, 0, errno_modeling::Irrelevant);
+ C.addTransition(State);
+ } else if (ErrnoDecl) {
+ assert(isa<FunctionDecl>(ErrnoDecl) && "Invalid errno location function.");
+ // There is a function that returns the location of 'errno'.
+ // We must create a memory region for it in system space.
+ // Currently a symbolic region is used with an artifical symbol.
+ // FIXME: It is better to have a custom (new) kind of MemRegion for such
+ // cases.
+ SValBuilder &SVB = C.getSValBuilder();
+ MemRegionManager &RMgr = C.getStateManager().getRegionManager();
+
+ const MemSpaceRegion *GlobalSystemSpace =
+ RMgr.getGlobalsRegion(MemRegion::GlobalSystemSpaceRegionKind);
+
+ // Create an artifical symbol for the region.
+ // It is not possible to associate a statement or expression in this case.
+ const SymbolConjured *Sym = SVB.conjureSymbol(
+ nullptr, C.getLocationContext(),
+ ACtx.getLValueReferenceType(ACtx.IntTy), C.blockCount(), &ErrnoDecl);
+
+ // The symbolic region is untyped, create a typed sub-region in it.
+ // The ElementRegion is used to make the errno region a typed region.
+ const MemRegion *ErrnoR = RMgr.getElementRegion(
+ ACtx.IntTy, SVB.makeZeroArrayIndex(),
+ RMgr.getSymbolicRegion(Sym, GlobalSystemSpace), C.getASTContext());
+ State = State->set<ErrnoRegion>(ErrnoR);
+ State =
+ errno_modeling::setErrnoValue(State, C, 0, errno_modeling::Irrelevant);
+ C.addTransition(State);
+ }
+}
+
+bool ErrnoModeling::evalCall(const CallEvent &Call, CheckerContext &C) const {
+ // Return location of "errno" at a call to an "errno address returning"
+ // function.
+ if (ErrnoLocationCalls.contains(Call)) {
+ ProgramStateRef State = C.getState();
+
+ const MemRegion *ErrnoR = State->get<ErrnoRegion>();
+ if (!ErrnoR)
+ return false;
+
+ State = State->BindExpr(Call.getOriginExpr(), C.getLocationContext(),
+ loc::MemRegionVal{ErrnoR});
+ C.addTransition(State);
+ return true;
+ }
+
+ return false;
+}
+
+void ErrnoModeling::checkLiveSymbols(ProgramStateRef State,
+ SymbolReaper &SR) const {
+ // The special errno region should never garbage collected.
+ if (const auto *ErrnoR = State->get<ErrnoRegion>())
+ SR.markLive(ErrnoR);
+}
+
+namespace clang {
+namespace ento {
+namespace errno_modeling {
+
+std::optional<SVal> getErrnoValue(ProgramStateRef State) {
+ const MemRegion *ErrnoR = State->get<ErrnoRegion>();
+ if (!ErrnoR)
+ return {};
+ QualType IntTy = State->getAnalysisManager().getASTContext().IntTy;
+ return State->getSVal(ErrnoR, IntTy);
+}
+
+ProgramStateRef setErrnoValue(ProgramStateRef State,
+ const LocationContext *LCtx, SVal Value,
+ ErrnoCheckState EState) {
+ const MemRegion *ErrnoR = State->get<ErrnoRegion>();
+ if (!ErrnoR)
+ return State;
+ // First set the errno value, the old state is still available at 'checkBind'
+ // or 'checkLocation' for errno value.
+ State = State->bindLoc(loc::MemRegionVal{ErrnoR}, Value, LCtx);
+ return State->set<ErrnoState>(EState);
+}
+
+ProgramStateRef setErrnoValue(ProgramStateRef State, CheckerContext &C,
+ uint64_t Value, ErrnoCheckState EState) {
+ const MemRegion *ErrnoR = State->get<ErrnoRegion>();
+ if (!ErrnoR)
+ return State;
+ State = State->bindLoc(
+ loc::MemRegionVal{ErrnoR},
+ C.getSValBuilder().makeIntVal(Value, C.getASTContext().IntTy),
+ C.getLocationContext());
+ return State->set<ErrnoState>(EState);
+}
+
+std::optional<Loc> getErrnoLoc(ProgramStateRef State) {
+ const MemRegion *ErrnoR = State->get<ErrnoRegion>();
+ if (!ErrnoR)
+ return {};
+ return loc::MemRegionVal{ErrnoR};
+}
+
+ErrnoCheckState getErrnoState(ProgramStateRef State) {
+ return State->get<ErrnoState>();
+}
+
+ProgramStateRef setErrnoState(ProgramStateRef State, ErrnoCheckState EState) {
+ return State->set<ErrnoState>(EState);
+}
+
+ProgramStateRef clearErrnoState(ProgramStateRef State) {
+ return setErrnoState(State, Irrelevant);
+}
+
+bool isErrno(const Decl *D) {
+ if (const auto *VD = dyn_cast_or_null<VarDecl>(D))
+ if (const IdentifierInfo *II = VD->getIdentifier())
+ return II->getName() == ErrnoVarName;
+ if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D))
+ if (const IdentifierInfo *II = FD->getIdentifier())
+ return llvm::is_contained(ErrnoLocationFuncNames, II->getName());
+ return false;
+}
+
+const NoteTag *getErrnoNoteTag(CheckerContext &C, const std::string &Message) {
+ return C.getNoteTag([Message](PathSensitiveBugReport &BR) -> std::string {
+ const MemRegion *ErrnoR = BR.getErrorNode()->getState()->get<ErrnoRegion>();
+ if (ErrnoR && BR.isInteresting(ErrnoR)) {
+ BR.markNotInteresting(ErrnoR);
+ return Message;
+ }
+ return "";
+ });
+}
+
+ProgramStateRef setErrnoForStdSuccess(ProgramStateRef State,
+ CheckerContext &C) {
+ return setErrnoState(State, MustNotBeChecked);
+}
+
+ProgramStateRef setErrnoForStdFailure(ProgramStateRef State, CheckerContext &C,
+ NonLoc ErrnoSym) {
+ SValBuilder &SVB = C.getSValBuilder();
+ NonLoc ZeroVal = SVB.makeZeroVal(C.getASTContext().IntTy).castAs<NonLoc>();
+ DefinedOrUnknownSVal Cond =
+ SVB.evalBinOp(State, BO_NE, ErrnoSym, ZeroVal, SVB.getConditionType())
+ .castAs<DefinedOrUnknownSVal>();
+ State = State->assume(Cond, true);
+ if (!State)
+ return nullptr;
+ return setErrnoValue(State, C.getLocationContext(), ErrnoSym, Irrelevant);
+}
+
+ProgramStateRef setErrnoStdMustBeChecked(ProgramStateRef State,
+ CheckerContext &C,
+ const Expr *InvalE) {
+ const MemRegion *ErrnoR = State->get<ErrnoRegion>();
+ if (!ErrnoR)
+ return State;
+ State = State->invalidateRegions(ErrnoR, InvalE, C.blockCount(),
+ C.getLocationContext(), false);
+ if (!State)
+ return nullptr;
+ return setErrnoState(State, MustBeChecked);
+}
+
+} // namespace errno_modeling
+} // namespace ento
+} // namespace clang
+
+void ento::registerErrnoModeling(CheckerManager &mgr) {
+ mgr.registerChecker<ErrnoModeling>();
+}
+
+bool ento::shouldRegisterErrnoModeling(const CheckerManager &mgr) {
+ return true;
+}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoModeling.h b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoModeling.h
new file mode 100644
index 000000000000..6b53572fe5e2
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoModeling.h
@@ -0,0 +1,110 @@
+//=== ErrnoModeling.h - Tracking value of 'errno'. -----------------*- C++ -*-//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines inter-checker API for using the system value 'errno'.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_ERRNOMODELING_H
+#define LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_ERRNOMODELING_H
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+#include <optional>
+
+namespace clang {
+namespace ento {
+namespace errno_modeling {
+
+/// Describe how reads and writes of \c errno are handled by the checker.
+enum ErrnoCheckState : unsigned {
+ /// We do not know anything about 'errno'.
+ /// Read and write is always allowed.
+ Irrelevant = 0,
+
+ /// Value of 'errno' should be checked to find out if a previous function call
+ /// has failed.
+ /// When this state is set \c errno must be read by the program before a next
+ /// standard function call or other overwrite of \c errno follows, otherwise
+ /// a bug report is emitted.
+ MustBeChecked = 1,
+
+ /// Value of 'errno' is not allowed to be read, it can contain an unspecified
+ /// value.
+ /// When this state is set \c errno is not allowed to be read by the program
+ /// until it is overwritten or invalidated.
+ MustNotBeChecked = 2
+};
+
+/// Returns the value of 'errno', if 'errno' was found in the AST.
+std::optional<SVal> getErrnoValue(ProgramStateRef State);
+
+/// Returns the errno check state, \c Errno_Irrelevant if 'errno' was not found
+/// (this is not the only case for that value).
+ErrnoCheckState getErrnoState(ProgramStateRef State);
+
+/// Returns the location that points to the \c MemoryRegion where the 'errno'
+/// value is stored. Returns \c std::nullopt if 'errno' was not found. Otherwise
+/// it always returns a valid memory region in the system global memory space.
+std::optional<Loc> getErrnoLoc(ProgramStateRef State);
+
+/// Set value of 'errno' to any SVal, if possible.
+/// The errno check state is set always when the 'errno' value is set.
+ProgramStateRef setErrnoValue(ProgramStateRef State,
+ const LocationContext *LCtx, SVal Value,
+ ErrnoCheckState EState);
+
+/// Set value of 'errno' to a concrete (signed) integer, if possible.
+/// The errno check state is set always when the 'errno' value is set.
+ProgramStateRef setErrnoValue(ProgramStateRef State, CheckerContext &C,
+ uint64_t Value, ErrnoCheckState EState);
+
+/// Set the errno check state, do not modify the errno value.
+ProgramStateRef setErrnoState(ProgramStateRef State, ErrnoCheckState EState);
+
+/// Clear state of errno (make it irrelevant).
+ProgramStateRef clearErrnoState(ProgramStateRef State);
+
+/// Determine if a `Decl` node related to 'errno'.
+/// This is true if the declaration is the errno variable or a function
+/// that returns a pointer to the 'errno' value (usually the 'errno' macro is
+/// defined with this function). \p D is not required to be a canonical
+/// declaration.
+bool isErrno(const Decl *D);
+
+/// Create a NoteTag that displays the message if the 'errno' memory region is
+/// marked as interesting, and resets the interestingness.
+const NoteTag *getErrnoNoteTag(CheckerContext &C, const std::string &Message);
+
+/// Set errno state for the common case when a standard function is successful.
+/// Set \c ErrnoCheckState to \c MustNotBeChecked (the \c errno value is not
+/// affected).
+ProgramStateRef setErrnoForStdSuccess(ProgramStateRef State, CheckerContext &C);
+
+/// Set errno state for the common case when a standard function fails.
+/// Set \c errno value to be not equal to zero and \c ErrnoCheckState to
+/// \c Irrelevant . The irrelevant errno state ensures that no related bug
+/// report is emitted later and no note tag is needed.
+/// \arg \c ErrnoSym Value to be used for \c errno and constrained to be
+/// non-zero.
+ProgramStateRef setErrnoForStdFailure(ProgramStateRef State, CheckerContext &C,
+ NonLoc ErrnoSym);
+
+/// Set errno state for the common case when a standard function indicates
+/// failure only by \c errno. Sets \c ErrnoCheckState to \c MustBeChecked, and
+/// invalidates the errno region (clear of previous value).
+/// \arg \c InvalE Expression that causes invalidation of \c errno.
+ProgramStateRef setErrnoStdMustBeChecked(ProgramStateRef State,
+ CheckerContext &C, const Expr *InvalE);
+
+} // namespace errno_modeling
+} // namespace ento
+} // namespace clang
+
+#endif // LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_ERRNOMODELING_H
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoTesterChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoTesterChecker.cpp
new file mode 100644
index 000000000000..c46ebee0c94f
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoTesterChecker.cpp
@@ -0,0 +1,185 @@
+//=== ErrnoTesterChecker.cpp ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines ErrnoTesterChecker, which is used to test functionality of the
+// errno_check API.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ErrnoModeling.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include <optional>
+
+using namespace clang;
+using namespace ento;
+using namespace errno_modeling;
+
+namespace {
+
+class ErrnoTesterChecker : public Checker<eval::Call> {
+public:
+ bool evalCall(const CallEvent &Call, CheckerContext &C) const;
+
+private:
+ /// Evaluate function \code void ErrnoTesterChecker_setErrno(int) \endcode.
+ /// Set value of \c errno to the argument.
+ static void evalSetErrno(CheckerContext &C, const CallEvent &Call);
+ /// Evaluate function \code int ErrnoTesterChecker_getErrno() \endcode.
+ /// Return the value of \c errno.
+ static void evalGetErrno(CheckerContext &C, const CallEvent &Call);
+ /// Evaluate function \code int ErrnoTesterChecker_setErrnoIfError() \endcode.
+ /// Simulate a standard library function tha returns 0 on success and 1 on
+ /// failure. On the success case \c errno is not allowed to be used (may be
+ /// undefined). On the failure case \c errno is set to a fixed value 11 and
+ /// is not needed to be checked.
+ static void evalSetErrnoIfError(CheckerContext &C, const CallEvent &Call);
+ /// Evaluate function \code int ErrnoTesterChecker_setErrnoIfErrorRange()
+ /// \endcode. Same as \c ErrnoTesterChecker_setErrnoIfError but \c errno is
+ /// set to a range (to be nonzero) at the failure case.
+ static void evalSetErrnoIfErrorRange(CheckerContext &C,
+ const CallEvent &Call);
+ /// Evaluate function \code int ErrnoTesterChecker_setErrnoCheckState()
+ /// \endcode. This function simulates the following:
+ /// - Return 0 and leave \c errno with undefined value.
+ /// This is the case of a successful standard function call.
+ /// For example if \c ftell returns not -1.
+ /// - Return 1 and sets \c errno to a specific error code (1).
+ /// This is the case of a failed standard function call.
+ /// The function indicates the failure by a special return value
+ /// that is returned only at failure.
+ /// \c errno can be checked but it is not required.
+ /// For example if \c ftell returns -1.
+ /// - Return 2 and may set errno to a value (actually it does not set it).
+ /// This is the case of a standard function call where the failure can only
+ /// be checked by reading from \c errno. The value of \c errno is changed by
+ /// the function only at failure, the user should set \c errno to 0 before
+ /// the call (\c ErrnoChecker does not check for this rule).
+ /// \c strtol is an example of this case, if it returns \c LONG_MIN (or
+ /// \c LONG_MAX). This case applies only if \c LONG_MIN or \c LONG_MAX is
+ /// returned, otherwise the first case in this list applies.
+ static void evalSetErrnoCheckState(CheckerContext &C, const CallEvent &Call);
+
+ using EvalFn = std::function<void(CheckerContext &, const CallEvent &)>;
+ const CallDescriptionMap<EvalFn> TestCalls{
+ {{{"ErrnoTesterChecker_setErrno"}, 1}, &ErrnoTesterChecker::evalSetErrno},
+ {{{"ErrnoTesterChecker_getErrno"}, 0}, &ErrnoTesterChecker::evalGetErrno},
+ {{{"ErrnoTesterChecker_setErrnoIfError"}, 0},
+ &ErrnoTesterChecker::evalSetErrnoIfError},
+ {{{"ErrnoTesterChecker_setErrnoIfErrorRange"}, 0},
+ &ErrnoTesterChecker::evalSetErrnoIfErrorRange},
+ {{{"ErrnoTesterChecker_setErrnoCheckState"}, 0},
+ &ErrnoTesterChecker::evalSetErrnoCheckState}};
+};
+
+} // namespace
+
+void ErrnoTesterChecker::evalSetErrno(CheckerContext &C,
+ const CallEvent &Call) {
+ C.addTransition(setErrnoValue(C.getState(), C.getLocationContext(),
+ Call.getArgSVal(0), Irrelevant));
+}
+
+void ErrnoTesterChecker::evalGetErrno(CheckerContext &C,
+ const CallEvent &Call) {
+ ProgramStateRef State = C.getState();
+
+ std::optional<SVal> ErrnoVal = getErrnoValue(State);
+ assert(ErrnoVal && "Errno value should be available.");
+ State =
+ State->BindExpr(Call.getOriginExpr(), C.getLocationContext(), *ErrnoVal);
+
+ C.addTransition(State);
+}
+
+void ErrnoTesterChecker::evalSetErrnoIfError(CheckerContext &C,
+ const CallEvent &Call) {
+ ProgramStateRef State = C.getState();
+ SValBuilder &SVB = C.getSValBuilder();
+
+ ProgramStateRef StateSuccess = State->BindExpr(
+ Call.getOriginExpr(), C.getLocationContext(), SVB.makeIntVal(0, true));
+ StateSuccess = setErrnoState(StateSuccess, MustNotBeChecked);
+
+ ProgramStateRef StateFailure = State->BindExpr(
+ Call.getOriginExpr(), C.getLocationContext(), SVB.makeIntVal(1, true));
+ StateFailure = setErrnoValue(StateFailure, C, 11, Irrelevant);
+
+ C.addTransition(StateSuccess);
+ C.addTransition(StateFailure);
+}
+
+void ErrnoTesterChecker::evalSetErrnoIfErrorRange(CheckerContext &C,
+ const CallEvent &Call) {
+ ProgramStateRef State = C.getState();
+ SValBuilder &SVB = C.getSValBuilder();
+
+ ProgramStateRef StateSuccess = State->BindExpr(
+ Call.getOriginExpr(), C.getLocationContext(), SVB.makeIntVal(0, true));
+ StateSuccess = setErrnoState(StateSuccess, MustNotBeChecked);
+
+ ProgramStateRef StateFailure = State->BindExpr(
+ Call.getOriginExpr(), C.getLocationContext(), SVB.makeIntVal(1, true));
+ DefinedOrUnknownSVal ErrnoVal = SVB.conjureSymbolVal(
+ nullptr, Call.getOriginExpr(), C.getLocationContext(), C.blockCount());
+ StateFailure = StateFailure->assume(ErrnoVal, true);
+ assert(StateFailure && "Failed to assume on an initial value.");
+ StateFailure =
+ setErrnoValue(StateFailure, C.getLocationContext(), ErrnoVal, Irrelevant);
+
+ C.addTransition(StateSuccess);
+ C.addTransition(StateFailure);
+}
+
+void ErrnoTesterChecker::evalSetErrnoCheckState(CheckerContext &C,
+ const CallEvent &Call) {
+ ProgramStateRef State = C.getState();
+ SValBuilder &SVB = C.getSValBuilder();
+
+ ProgramStateRef StateSuccess = State->BindExpr(
+ Call.getOriginExpr(), C.getLocationContext(), SVB.makeIntVal(0, true));
+ StateSuccess = setErrnoState(StateSuccess, MustNotBeChecked);
+
+ ProgramStateRef StateFailure1 = State->BindExpr(
+ Call.getOriginExpr(), C.getLocationContext(), SVB.makeIntVal(1, true));
+ StateFailure1 = setErrnoValue(StateFailure1, C, 1, Irrelevant);
+
+ ProgramStateRef StateFailure2 = State->BindExpr(
+ Call.getOriginExpr(), C.getLocationContext(), SVB.makeIntVal(2, true));
+ StateFailure2 = setErrnoValue(StateFailure2, C, 2, MustBeChecked);
+
+ C.addTransition(StateSuccess,
+ getErrnoNoteTag(C, "Assuming that this function succeeds but "
+ "sets 'errno' to an unspecified value."));
+ C.addTransition(StateFailure1);
+ C.addTransition(
+ StateFailure2,
+ getErrnoNoteTag(C, "Assuming that this function returns 2. 'errno' "
+ "should be checked to test for failure."));
+}
+
+bool ErrnoTesterChecker::evalCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ const EvalFn *Fn = TestCalls.lookup(Call);
+ if (Fn) {
+ (*Fn)(C, Call);
+ return C.isDifferent();
+ }
+ return false;
+}
+
+void ento::registerErrnoTesterChecker(CheckerManager &Mgr) {
+ Mgr.registerChecker<ErrnoTesterChecker>();
+}
+
+bool ento::shouldRegisterErrnoTesterChecker(const CheckerManager &Mgr) {
+ return true;
+}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp
index 2ce1bef6d228..3096999e9fd1 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp
@@ -6,10 +6,10 @@
//
//===----------------------------------------------------------------------===//
-#include "Taint.h"
#include "clang/Analysis/IssueHash.h"
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Checkers/SValExplainer.h"
+#include "clang/StaticAnalyzer/Checkers/Taint.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
@@ -17,6 +17,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicExtent.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/ScopedPrinter.h"
+#include <optional>
using namespace clang;
using namespace ento;
@@ -24,7 +25,7 @@ using namespace ento;
namespace {
class ExprInspectionChecker
: public Checker<eval::Call, check::DeadSymbols, check::EndAnalysis> {
- mutable std::unique_ptr<BugType> BT;
+ const BugType BT{this, "Checking analyzer assumptions", "debug"};
// These stats are per-analysis, not per-branch, hence they shouldn't
// stay inside the program state.
@@ -40,6 +41,8 @@ class ExprInspectionChecker
void analyzerNumTimesReached(const CallExpr *CE, CheckerContext &C) const;
void analyzerCrash(const CallExpr *CE, CheckerContext &C) const;
void analyzerWarnOnDeadSymbol(const CallExpr *CE, CheckerContext &C) const;
+ void analyzerValue(const CallExpr *CE, CheckerContext &C) const;
+ void analyzerDumpSValType(const CallExpr *CE, CheckerContext &C) const;
void analyzerDump(const CallExpr *CE, CheckerContext &C) const;
void analyzerExplain(const CallExpr *CE, CheckerContext &C) const;
void analyzerPrintState(const CallExpr *CE, CheckerContext &C) const;
@@ -56,9 +59,10 @@ class ExprInspectionChecker
// Optional parameter `ExprVal` for expression value to be marked interesting.
ExplodedNode *reportBug(llvm::StringRef Msg, CheckerContext &C,
- Optional<SVal> ExprVal = None) const;
+ std::optional<SVal> ExprVal = std::nullopt) const;
ExplodedNode *reportBug(llvm::StringRef Msg, BugReporter &BR, ExplodedNode *N,
- Optional<SVal> ExprVal = None) const;
+ std::optional<SVal> ExprVal = std::nullopt) const;
+ template <typename T> void printAndReport(CheckerContext &C, T What) const;
const Expr *getArgExpr(const CallExpr *CE, CheckerContext &C) const;
const MemRegion *getArgRegion(const CallExpr *CE, CheckerContext &C) const;
@@ -98,6 +102,9 @@ bool ExprInspectionChecker::evalCall(const CallEvent &Call,
&ExprInspectionChecker::analyzerDumpExtent)
.Case("clang_analyzer_dumpElementCount",
&ExprInspectionChecker::analyzerDumpElementCount)
+ .Case("clang_analyzer_value", &ExprInspectionChecker::analyzerValue)
+ .StartsWith("clang_analyzer_dumpSvalType",
+ &ExprInspectionChecker::analyzerDumpSValType)
.StartsWith("clang_analyzer_dump",
&ExprInspectionChecker::analyzerDump)
.Case("clang_analyzer_getExtent",
@@ -109,7 +116,8 @@ bool ExprInspectionChecker::evalCall(const CallEvent &Call,
.Case("clang_analyzer_hashDump",
&ExprInspectionChecker::analyzerHashDump)
.Case("clang_analyzer_denote", &ExprInspectionChecker::analyzerDenote)
- .Case("clang_analyzer_express",
+ .Case("clang_analyzer_express", // This also marks the argument as
+ // interesting.
&ExprInspectionChecker::analyzerExpress)
.StartsWith("clang_analyzer_isTainted",
&ExprInspectionChecker::analyzerIsTainted)
@@ -154,24 +162,21 @@ static const char *getArgumentValueString(const CallExpr *CE,
}
}
-ExplodedNode *ExprInspectionChecker::reportBug(llvm::StringRef Msg,
- CheckerContext &C,
- Optional<SVal> ExprVal) const {
+ExplodedNode *
+ExprInspectionChecker::reportBug(llvm::StringRef Msg, CheckerContext &C,
+ std::optional<SVal> ExprVal) const {
ExplodedNode *N = C.generateNonFatalErrorNode();
reportBug(Msg, C.getBugReporter(), N, ExprVal);
return N;
}
-ExplodedNode *ExprInspectionChecker::reportBug(llvm::StringRef Msg,
- BugReporter &BR, ExplodedNode *N,
- Optional<SVal> ExprVal) const {
+ExplodedNode *
+ExprInspectionChecker::reportBug(llvm::StringRef Msg, BugReporter &BR,
+ ExplodedNode *N,
+ std::optional<SVal> ExprVal) const {
if (!N)
return nullptr;
-
- if (!BT)
- BT.reset(new BugType(this, "Checking analyzer assumptions", "debug"));
-
- auto R = std::make_unique<PathSensitiveBugReport>(*BT, Msg, N);
+ auto R = std::make_unique<PathSensitiveBugReport>(BT, Msg, N);
if (ExprVal) {
R->markInteresting(*ExprVal);
}
@@ -255,6 +260,55 @@ void ExprInspectionChecker::analyzerExplain(const CallExpr *CE,
reportBug(Ex.Visit(V), C);
}
+static void printHelper(llvm::raw_svector_ostream &Out, CheckerContext &C,
+ const llvm::APSInt &I) {
+ Out << I.getBitWidth() << (I.isUnsigned() ? "u:" : "s:");
+ Out << I;
+}
+
+static void printHelper(llvm::raw_svector_ostream &Out, CheckerContext &C,
+ SymbolRef Sym) {
+ C.getConstraintManager().printValue(Out, C.getState(), Sym);
+}
+
+static void printHelper(llvm::raw_svector_ostream &Out, CheckerContext &C,
+ SVal V) {
+ Out << V;
+}
+
+template <typename T>
+void ExprInspectionChecker::printAndReport(CheckerContext &C, T What) const {
+ llvm::SmallString<64> Str;
+ llvm::raw_svector_ostream OS(Str);
+ printHelper(OS, C, What);
+ reportBug(OS.str(), C);
+}
+
+void ExprInspectionChecker::analyzerValue(const CallExpr *CE,
+ CheckerContext &C) const {
+ const Expr *Arg = getArgExpr(CE, C);
+ if (!Arg)
+ return;
+
+ SVal V = C.getSVal(Arg);
+ if (const SymbolRef Sym = V.getAsSymbol())
+ printAndReport(C, Sym);
+ else if (const llvm::APSInt *I = V.getAsInteger())
+ printAndReport(C, *I);
+ else
+ reportBug("n/a", C);
+}
+
+void ExprInspectionChecker::analyzerDumpSValType(const CallExpr *CE,
+ CheckerContext &C) const {
+ const Expr *Arg = getArgExpr(CE, C);
+ if (!Arg)
+ return;
+
+ QualType Ty = C.getSVal(Arg).getType(C.getASTContext());
+ reportBug(Ty.getAsString(), C);
+}
+
void ExprInspectionChecker::analyzerDump(const CallExpr *CE,
CheckerContext &C) const {
const Expr *Arg = getArgExpr(CE, C);
@@ -262,21 +316,17 @@ void ExprInspectionChecker::analyzerDump(const CallExpr *CE,
return;
SVal V = C.getSVal(Arg);
-
- llvm::SmallString<32> Str;
- llvm::raw_svector_ostream OS(Str);
- V.dumpToStream(OS);
- reportBug(OS.str(), C);
+ printAndReport(C, V);
}
void ExprInspectionChecker::analyzerGetExtent(const CallExpr *CE,
CheckerContext &C) const {
- const MemRegion *MR = getArgRegion(CE, C);
- if (!MR)
+ const Expr *Arg = getArgExpr(CE, C);
+ if (!Arg)
return;
ProgramStateRef State = C.getState();
- DefinedOrUnknownSVal Size = getDynamicExtent(State, MR, C.getSValBuilder());
+ SVal Size = getDynamicExtentWithOffset(State, C.getSVal(Arg));
State = State->BindExpr(CE, C.getLocationContext(), Size);
C.addTransition(State);
@@ -284,17 +334,13 @@ void ExprInspectionChecker::analyzerGetExtent(const CallExpr *CE,
void ExprInspectionChecker::analyzerDumpExtent(const CallExpr *CE,
CheckerContext &C) const {
- const MemRegion *MR = getArgRegion(CE, C);
- if (!MR)
+ const Expr *Arg = getArgExpr(CE, C);
+ if (!Arg)
return;
- DefinedOrUnknownSVal Size =
- getDynamicExtent(C.getState(), MR, C.getSValBuilder());
-
- SmallString<64> Msg;
- llvm::raw_svector_ostream Out(Msg);
- Out << Size;
- reportBug(Out.str(), C);
+ ProgramStateRef State = C.getState();
+ SVal Size = getDynamicExtentWithOffset(State, C.getSVal(Arg));
+ printAndReport(C, Size);
}
void ExprInspectionChecker::analyzerDumpElementCount(const CallExpr *CE,
@@ -307,19 +353,14 @@ void ExprInspectionChecker::analyzerDumpElementCount(const CallExpr *CE,
if (const auto *TVR = MR->getAs<TypedValueRegion>()) {
ElementTy = TVR->getValueType();
} else {
- ElementTy =
- MR->castAs<SymbolicRegion>()->getSymbol()->getType()->getPointeeType();
+ ElementTy = MR->castAs<SymbolicRegion>()->getPointeeStaticType();
}
assert(!ElementTy->isPointerType());
- DefinedOrUnknownSVal ElementCount =
- getDynamicElementCount(C.getState(), MR, C.getSValBuilder(), ElementTy);
-
- SmallString<128> Msg;
- llvm::raw_svector_ostream Out(Msg);
- Out << ElementCount;
- reportBug(Out.str(), C);
+ DefinedOrUnknownSVal ElementCount = getDynamicElementCountWithOffset(
+ C.getState(), C.getSVal(getArgExpr(CE, C)), ElementTy);
+ printAndReport(C, ElementCount);
}
void ExprInspectionChecker::analyzerPrintState(const CallExpr *CE,
@@ -348,8 +389,7 @@ void ExprInspectionChecker::checkDeadSymbols(SymbolReaper &SymReaper,
ProgramStateRef State = C.getState();
const MarkedSymbolsTy &Syms = State->get<MarkedSymbols>();
ExplodedNode *N = C.getPredecessor();
- for (auto I = Syms.begin(), E = Syms.end(); I != E; ++I) {
- SymbolRef Sym = *I;
+ for (SymbolRef Sym : Syms) {
if (!SymReaper.isDead(Sym))
continue;
@@ -423,50 +463,60 @@ void ExprInspectionChecker::analyzerDenote(const CallExpr *CE,
namespace {
class SymbolExpressor
- : public SymExprVisitor<SymbolExpressor, Optional<std::string>> {
+ : public SymExprVisitor<SymbolExpressor, std::optional<std::string>> {
ProgramStateRef State;
public:
SymbolExpressor(ProgramStateRef State) : State(State) {}
- Optional<std::string> lookup(const SymExpr *S) {
+ std::optional<std::string> lookup(const SymExpr *S) {
if (const StringLiteral *const *SLPtr = State->get<DenotedSymbols>(S)) {
const StringLiteral *SL = *SLPtr;
return std::string(SL->getBytes());
}
- return None;
+ return std::nullopt;
}
- Optional<std::string> VisitSymExpr(const SymExpr *S) { return lookup(S); }
+ std::optional<std::string> VisitSymExpr(const SymExpr *S) {
+ return lookup(S);
+ }
- Optional<std::string> VisitSymIntExpr(const SymIntExpr *S) {
- if (Optional<std::string> Str = lookup(S))
+ std::optional<std::string> VisitSymIntExpr(const SymIntExpr *S) {
+ if (std::optional<std::string> Str = lookup(S))
return Str;
- if (Optional<std::string> Str = Visit(S->getLHS()))
+ if (std::optional<std::string> Str = Visit(S->getLHS()))
return (*Str + " " + BinaryOperator::getOpcodeStr(S->getOpcode()) + " " +
std::to_string(S->getRHS().getLimitedValue()) +
(S->getRHS().isUnsigned() ? "U" : ""))
.str();
- return None;
+ return std::nullopt;
}
- Optional<std::string> VisitSymSymExpr(const SymSymExpr *S) {
- if (Optional<std::string> Str = lookup(S))
+ std::optional<std::string> VisitSymSymExpr(const SymSymExpr *S) {
+ if (std::optional<std::string> Str = lookup(S))
return Str;
- if (Optional<std::string> Str1 = Visit(S->getLHS()))
- if (Optional<std::string> Str2 = Visit(S->getRHS()))
+ if (std::optional<std::string> Str1 = Visit(S->getLHS()))
+ if (std::optional<std::string> Str2 = Visit(S->getRHS()))
return (*Str1 + " " + BinaryOperator::getOpcodeStr(S->getOpcode()) +
" " + *Str2)
.str();
- return None;
+ return std::nullopt;
}
- Optional<std::string> VisitSymbolCast(const SymbolCast *S) {
- if (Optional<std::string> Str = lookup(S))
+ std::optional<std::string> VisitUnarySymExpr(const UnarySymExpr *S) {
+ if (std::optional<std::string> Str = lookup(S))
return Str;
- if (Optional<std::string> Str = Visit(S->getOperand()))
+ if (std::optional<std::string> Str = Visit(S->getOperand()))
+ return (UnaryOperator::getOpcodeStr(S->getOpcode()) + *Str).str();
+ return std::nullopt;
+ }
+
+ std::optional<std::string> VisitSymbolCast(const SymbolCast *S) {
+ if (std::optional<std::string> Str = lookup(S))
+ return Str;
+ if (std::optional<std::string> Str = Visit(S->getOperand()))
return (Twine("(") + S->getType().getAsString() + ")" + *Str).str();
- return None;
+ return std::nullopt;
}
};
} // namespace
@@ -480,14 +530,14 @@ void ExprInspectionChecker::analyzerExpress(const CallExpr *CE,
SVal ArgVal = C.getSVal(CE->getArg(0));
SymbolRef Sym = ArgVal.getAsSymbol();
if (!Sym) {
- reportBug("Not a symbol", C);
+ reportBug("Not a symbol", C, ArgVal);
return;
}
SymbolExpressor V(C.getState());
auto Str = V.Visit(Sym);
if (!Str) {
- reportBug("Unable to express", C);
+ reportBug("Unable to express", C, ArgVal);
return;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp
index 6275e49e51ae..7aefcdc6d358 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp
@@ -24,7 +24,7 @@ using namespace ento;
namespace {
class FixedAddressChecker
: public Checker< check::PreStmt<BinaryOperator> > {
- mutable std::unique_ptr<BuiltinBug> BT;
+ const BugType BT{this, "Use fixed address"};
public:
void checkPreStmt(const BinaryOperator *B, CheckerContext &C) const;
@@ -49,14 +49,11 @@ void FixedAddressChecker::checkPreStmt(const BinaryOperator *B,
return;
if (ExplodedNode *N = C.generateNonFatalErrorNode()) {
- if (!BT)
- BT.reset(
- new BuiltinBug(this, "Use fixed address",
- "Using a fixed address is not portable because that "
- "address will probably not be valid in all "
- "environments or platforms."));
- auto R =
- std::make_unique<PathSensitiveBugReport>(*BT, BT->getDescription(), N);
+ // FIXME: improve grammar in the following strings:
+ constexpr llvm::StringLiteral Msg =
+ "Using a fixed address is not portable because that address will "
+ "probably not be valid in all environments or platforms.";
+ auto R = std::make_unique<PathSensitiveBugReport>(BT, Msg, N);
R->addRange(B->getRHS()->getSourceRange());
C.emitReport(std::move(R));
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/FuchsiaHandleChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/FuchsiaHandleChecker.cpp
index e3f4be0726c8..079bc61a87d9 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/FuchsiaHandleChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/FuchsiaHandleChecker.cpp
@@ -101,6 +101,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymExpr.h"
#include "llvm/ADT/StringExtras.h"
+#include <optional>
using namespace clang;
using namespace ento;
@@ -254,9 +255,6 @@ static const ExplodedNode *getAcquireSite(const ExplodedNode *N, SymbolRef Sym,
namespace {
class FuchsiaHandleSymbolVisitor final : public SymbolVisitor {
public:
- FuchsiaHandleSymbolVisitor(ProgramStateRef State) : State(std::move(State)) {}
- ProgramStateRef getState() const { return State; }
-
bool VisitSymbol(SymbolRef S) override {
if (const auto *HandleType = S->getType()->getAs<TypedefType>())
if (HandleType->getDecl()->getName() == HandleTypeName)
@@ -268,7 +266,6 @@ public:
private:
SmallVector<SymbolRef, 1024> Symbols;
- ProgramStateRef State;
};
} // end anonymous namespace
@@ -284,7 +281,7 @@ getFuchsiaHandleSymbols(QualType QT, SVal Arg, ProgramStateRef State) {
if (QT->isStructureType()) {
// If we see a structure, see if there is any handle referenced by the
// structure.
- FuchsiaHandleSymbolVisitor Visitor(State);
+ FuchsiaHandleSymbolVisitor Visitor;
State->scanReachableSymbols(Arg, Visitor);
return Visitor.GetSymbols();
}
@@ -304,7 +301,7 @@ getFuchsiaHandleSymbols(QualType QT, SVal Arg, ProgramStateRef State) {
}
} else {
assert(PtrToHandleLevel == 1);
- if (Optional<Loc> ArgLoc = Arg.getAs<Loc>()) {
+ if (std::optional<Loc> ArgLoc = Arg.getAs<Loc>()) {
SymbolRef Sym = State->getSVal(*ArgLoc).getAsSymbol();
if (Sym) {
return {Sym};
@@ -384,12 +381,12 @@ void FuchsiaHandleChecker::checkPostCall(const CallEvent &Call,
SymbolRef RetSym = Call.getReturnValue().getAsSymbol();
Notes.push_back([RetSym, FuncDecl](BugReport &BR) -> std::string {
auto *PathBR = static_cast<PathSensitiveBugReport *>(&BR);
- if (auto IsInteresting = PathBR->getInterestingnessKind(RetSym)) {
+ if (PathBR->getInterestingnessKind(RetSym)) {
std::string SBuf;
llvm::raw_string_ostream OS(SBuf);
OS << "Function '" << FuncDecl->getDeclName()
<< "' returns an open handle";
- return OS.str();
+ return SBuf;
} else
return "";
});
@@ -400,12 +397,12 @@ void FuchsiaHandleChecker::checkPostCall(const CallEvent &Call,
SymbolRef RetSym = Call.getReturnValue().getAsSymbol();
Notes.push_back([RetSym, FuncDecl](BugReport &BR) -> std::string {
auto *PathBR = static_cast<PathSensitiveBugReport *>(&BR);
- if (auto IsInteresting = PathBR->getInterestingnessKind(RetSym)) {
+ if (PathBR->getInterestingnessKind(RetSym)) {
std::string SBuf;
llvm::raw_string_ostream OS(SBuf);
OS << "Function '" << FuncDecl->getDeclName()
<< "' returns an unowned handle";
- return OS.str();
+ return SBuf;
} else
return "";
});
@@ -434,12 +431,12 @@ void FuchsiaHandleChecker::checkPostCall(const CallEvent &Call,
} else {
Notes.push_back([Handle, ParamDiagIdx](BugReport &BR) -> std::string {
auto *PathBR = static_cast<PathSensitiveBugReport *>(&BR);
- if (auto IsInteresting = PathBR->getInterestingnessKind(Handle)) {
+ if (PathBR->getInterestingnessKind(Handle)) {
std::string SBuf;
llvm::raw_string_ostream OS(SBuf);
OS << "Handle released through " << ParamDiagIdx
<< llvm::getOrdinalSuffix(ParamDiagIdx) << " parameter";
- return OS.str();
+ return SBuf;
} else
return "";
});
@@ -448,12 +445,12 @@ void FuchsiaHandleChecker::checkPostCall(const CallEvent &Call,
} else if (hasFuchsiaAttr<AcquireHandleAttr>(PVD)) {
Notes.push_back([Handle, ParamDiagIdx](BugReport &BR) -> std::string {
auto *PathBR = static_cast<PathSensitiveBugReport *>(&BR);
- if (auto IsInteresting = PathBR->getInterestingnessKind(Handle)) {
+ if (PathBR->getInterestingnessKind(Handle)) {
std::string SBuf;
llvm::raw_string_ostream OS(SBuf);
OS << "Handle allocated through " << ParamDiagIdx
<< llvm::getOrdinalSuffix(ParamDiagIdx) << " parameter";
- return OS.str();
+ return SBuf;
} else
return "";
});
@@ -462,12 +459,12 @@ void FuchsiaHandleChecker::checkPostCall(const CallEvent &Call,
} else if (hasFuchsiaUnownedAttr<AcquireHandleAttr>(PVD)) {
Notes.push_back([Handle, ParamDiagIdx](BugReport &BR) -> std::string {
auto *PathBR = static_cast<PathSensitiveBugReport *>(&BR);
- if (auto IsInteresting = PathBR->getInterestingnessKind(Handle)) {
+ if (PathBR->getInterestingnessKind(Handle)) {
std::string SBuf;
llvm::raw_string_ostream OS(SBuf);
OS << "Unowned handle allocated through " << ParamDiagIdx
<< llvm::getOrdinalSuffix(ParamDiagIdx) << " parameter";
- return OS.str();
+ return SBuf;
} else
return "";
});
@@ -656,10 +653,11 @@ void FuchsiaHandleChecker::reportBug(SymbolRef Sym, ExplodedNode *ErrorNode,
if (Type.isSuppressOnSink()) {
const ExplodedNode *AcquireNode = getAcquireSite(ErrorNode, Sym, C);
if (AcquireNode) {
+ const Stmt *S = AcquireNode->getStmtForDiagnostics();
+ assert(S && "Statement cannot be null.");
PathDiagnosticLocation LocUsedForUniqueing =
PathDiagnosticLocation::createBegin(
- AcquireNode->getStmtForDiagnostics(), C.getSourceManager(),
- AcquireNode->getLocationContext());
+ S, C.getSourceManager(), AcquireNode->getLocationContext());
R = std::make_unique<PathSensitiveBugReport>(
Type, Msg, ErrorNode, LocUsedForUniqueing,
@@ -689,11 +687,10 @@ void FuchsiaHandleChecker::printState(raw_ostream &Out, ProgramStateRef State,
if (!StateMap.isEmpty()) {
Out << Sep << "FuchsiaHandleChecker :" << NL;
- for (HStateMapTy::iterator I = StateMap.begin(), E = StateMap.end(); I != E;
- ++I) {
- I.getKey()->dumpToStream(Out);
+ for (const auto &[Sym, HandleState] : StateMap) {
+ Sym->dumpToStream(Out);
Out << " : ";
- I.getData().dump(Out);
+ HandleState.dump(Out);
Out << NL;
}
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GCDAntipatternChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GCDAntipatternChecker.cpp
index 8e02ef74c668..5637941a58f0 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GCDAntipatternChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GCDAntipatternChecker.cpp
@@ -73,7 +73,7 @@ decltype(auto) bindAssignmentToDecl(const char *DeclName) {
static bool isTest(const Decl *D) {
if (const auto* ND = dyn_cast<NamedDecl>(D)) {
std::string DeclName = ND->getNameAsString();
- if (StringRef(DeclName).startswith("test"))
+ if (StringRef(DeclName).starts_with("test"))
return true;
}
if (const auto *OD = dyn_cast<ObjCMethodDecl>(D)) {
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GTestChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GTestChecker.cpp
index 8d9afbe88aa8..6c32a8dec844 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GTestChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GTestChecker.cpp
@@ -20,6 +20,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "llvm/Support/raw_ostream.h"
+#include <optional>
using namespace clang;
using namespace ento;
@@ -91,11 +92,11 @@ using namespace ento;
namespace {
class GTestChecker : public Checker<check::PostCall> {
- mutable IdentifierInfo *AssertionResultII;
- mutable IdentifierInfo *SuccessII;
+ mutable IdentifierInfo *AssertionResultII = nullptr;
+ mutable IdentifierInfo *SuccessII = nullptr;
public:
- GTestChecker();
+ GTestChecker() = default;
void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
@@ -119,8 +120,6 @@ private:
};
} // End anonymous namespace.
-GTestChecker::GTestChecker() : AssertionResultII(nullptr), SuccessII(nullptr) {}
-
/// Model a call to an un-inlined AssertionResult(bool) or
/// AssertionResult(bool &, ...).
/// To do so, constrain the value of the newly-constructed instance's 'success_'
@@ -135,7 +134,7 @@ void GTestChecker::modelAssertionResultBoolConstructor(
SVal BooleanArgVal = Call->getArgSVal(0);
if (IsRef) {
// The argument is a reference, so load from it to get the boolean value.
- if (!BooleanArgVal.getAs<Loc>())
+ if (!isa<Loc>(BooleanArgVal))
return;
BooleanArgVal = C.getState()->getSVal(BooleanArgVal.castAs<Loc>());
}
@@ -258,9 +257,9 @@ SVal GTestChecker::getAssertionResultSuccessFieldValue(
if (!SuccessField)
return UnknownVal();
- Optional<Loc> FieldLoc =
+ std::optional<Loc> FieldLoc =
State->getLValue(SuccessField, Instance).getAs<Loc>();
- if (!FieldLoc.hasValue())
+ if (!FieldLoc)
return UnknownVal();
return State->getSVal(*FieldLoc);
@@ -270,20 +269,17 @@ SVal GTestChecker::getAssertionResultSuccessFieldValue(
ProgramStateRef GTestChecker::assumeValuesEqual(SVal Val1, SVal Val2,
ProgramStateRef State,
CheckerContext &C) {
- if (!Val1.getAs<DefinedOrUnknownSVal>() ||
- !Val2.getAs<DefinedOrUnknownSVal>())
+ auto DVal1 = Val1.getAs<DefinedOrUnknownSVal>();
+ auto DVal2 = Val2.getAs<DefinedOrUnknownSVal>();
+ if (!DVal1 || !DVal2)
return State;
auto ValuesEqual =
- C.getSValBuilder().evalEQ(State, Val1.castAs<DefinedOrUnknownSVal>(),
- Val2.castAs<DefinedOrUnknownSVal>());
-
- if (!ValuesEqual.getAs<DefinedSVal>())
+ C.getSValBuilder().evalEQ(State, *DVal1, *DVal2).getAs<DefinedSVal>();
+ if (!ValuesEqual)
return State;
- State = C.getConstraintManager().assume(
- State, ValuesEqual.castAs<DefinedSVal>(), true);
-
+ State = C.getConstraintManager().assume(State, *ValuesEqual, true);
return State;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp
index 42c777eb2c52..4ceaf933d0bf 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp
@@ -14,838 +14,1065 @@
//
//===----------------------------------------------------------------------===//
-#include "Taint.h"
#include "Yaml.h"
#include "clang/AST/Attr.h"
#include "clang/Basic/Builtins.h"
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Checkers/Taint.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/YAMLTraits.h"
-#include <algorithm>
#include <limits>
#include <memory>
-#include <unordered_map>
+#include <optional>
#include <utility>
+#include <vector>
+
+#define DEBUG_TYPE "taint-checker"
using namespace clang;
using namespace ento;
using namespace taint;
+using llvm::ImmutableSet;
+
namespace {
-class GenericTaintChecker : public Checker<check::PreCall, check::PostCall> {
-public:
- static void *getTag() {
- static int Tag;
- return &Tag;
+
+class GenericTaintChecker;
+
+/// Check for CWE-134: Uncontrolled Format String.
+constexpr llvm::StringLiteral MsgUncontrolledFormatString =
+ "Untrusted data is used as a format string "
+ "(CWE-134: Uncontrolled Format String)";
+
+/// Check for:
+/// CERT/STR02-C. "Sanitize data passed to complex subsystems"
+/// CWE-78, "Failure to Sanitize Data into an OS Command"
+constexpr llvm::StringLiteral MsgSanitizeSystemArgs =
+ "Untrusted data is passed to a system call "
+ "(CERT/STR02-C. Sanitize data passed to complex subsystems)";
+
+/// Check if tainted data is used as a buffer size in strn.. functions,
+/// and allocators.
+constexpr llvm::StringLiteral MsgTaintedBufferSize =
+ "Untrusted data is used to specify the buffer size "
+ "(CERT/STR31-C. Guarantee that storage for strings has sufficient space "
+ "for character data and the null terminator)";
+
+/// Check if tainted data is used as a custom sink's parameter.
+constexpr llvm::StringLiteral MsgCustomSink =
+ "Untrusted data is passed to a user-defined sink";
+
+using ArgIdxTy = int;
+using ArgVecTy = llvm::SmallVector<ArgIdxTy, 2>;
+
+/// Denotes the return value.
+constexpr ArgIdxTy ReturnValueIndex{-1};
+
+static ArgIdxTy fromArgumentCount(unsigned Count) {
+ assert(Count <=
+ static_cast<std::size_t>(std::numeric_limits<ArgIdxTy>::max()) &&
+ "ArgIdxTy is not large enough to represent the number of arguments.");
+ return Count;
+}
+
+/// Check if the region the expression evaluates to is the standard input,
+/// and thus, is tainted.
+/// FIXME: Move this to Taint.cpp.
+bool isStdin(SVal Val, const ASTContext &ACtx) {
+ // FIXME: What if Val is NonParamVarRegion?
+
+ // The region should be symbolic, we do not know it's value.
+ const auto *SymReg = dyn_cast_or_null<SymbolicRegion>(Val.getAsRegion());
+ if (!SymReg)
+ return false;
+
+ // Get it's symbol and find the declaration region it's pointing to.
+ const auto *DeclReg =
+ dyn_cast_or_null<DeclRegion>(SymReg->getSymbol()->getOriginRegion());
+ if (!DeclReg)
+ return false;
+
+ // This region corresponds to a declaration, find out if it's a global/extern
+ // variable named stdin with the proper type.
+ if (const auto *D = dyn_cast_or_null<VarDecl>(DeclReg->getDecl())) {
+ D = D->getCanonicalDecl();
+ if (D->getName() == "stdin" && D->hasExternalStorage() && D->isExternC()) {
+ const QualType FILETy = ACtx.getFILEType().getCanonicalType();
+ const QualType Ty = D->getType().getCanonicalType();
+
+ if (Ty->isPointerType())
+ return Ty->getPointeeType() == FILETy;
+ }
}
+ return false;
+}
- void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
- void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
+SVal getPointeeOf(ProgramStateRef State, Loc LValue) {
+ const QualType ArgTy = LValue.getType(State->getStateManager().getContext());
+ if (!ArgTy->isPointerType() || !ArgTy->getPointeeType()->isVoidType())
+ return State->getSVal(LValue);
- void printState(raw_ostream &Out, ProgramStateRef State, const char *NL,
- const char *Sep) const override;
+ // Do not dereference void pointers. Treat them as byte pointers instead.
+ // FIXME: we might want to consider more than just the first byte.
+ return State->getSVal(LValue, State->getStateManager().getContext().CharTy);
+}
- using ArgVector = SmallVector<unsigned, 2>;
- using SignedArgVector = SmallVector<int, 2>;
+/// Given a pointer/reference argument, return the value it refers to.
+std::optional<SVal> getPointeeOf(ProgramStateRef State, SVal Arg) {
+ if (auto LValue = Arg.getAs<Loc>())
+ return getPointeeOf(State, *LValue);
+ return std::nullopt;
+}
- enum class VariadicType { None, Src, Dst };
+/// Given a pointer, return the SVal of its pointee or if it is tainted,
+/// otherwise return the pointer's SVal if tainted.
+/// Also considers stdin as a taint source.
+std::optional<SVal> getTaintedPointeeOrPointer(ProgramStateRef State,
+ SVal Arg) {
+ if (auto Pointee = getPointeeOf(State, Arg))
+ if (isTainted(State, *Pointee)) // FIXME: isTainted(...) ? Pointee : None;
+ return Pointee;
+
+ if (isTainted(State, Arg))
+ return Arg;
+ return std::nullopt;
+}
- /// Used to parse the configuration file.
- struct TaintConfiguration {
- using NameScopeArgs = std::tuple<std::string, std::string, ArgVector>;
-
- struct Propagation {
- std::string Name;
- std::string Scope;
- ArgVector SrcArgs;
- SignedArgVector DstArgs;
- VariadicType VarType;
- unsigned VarIndex;
- };
-
- std::vector<Propagation> Propagations;
- std::vector<NameScopeArgs> Filters;
- std::vector<NameScopeArgs> Sinks;
-
- TaintConfiguration() = default;
- TaintConfiguration(const TaintConfiguration &) = default;
- TaintConfiguration(TaintConfiguration &&) = default;
- TaintConfiguration &operator=(const TaintConfiguration &) = default;
- TaintConfiguration &operator=(TaintConfiguration &&) = default;
- };
+bool isTaintedOrPointsToTainted(ProgramStateRef State, SVal ExprSVal) {
+ return getTaintedPointeeOrPointer(State, ExprSVal).has_value();
+}
- /// Convert SignedArgVector to ArgVector.
- ArgVector convertToArgVector(CheckerManager &Mgr, const std::string &Option,
- const SignedArgVector &Args);
+/// Helps in printing taint diagnostics.
+/// Marks the incoming parameters of a function interesting (to be printed)
+/// when the return value, or the outgoing parameters are tainted.
+const NoteTag *taintOriginTrackerTag(CheckerContext &C,
+ std::vector<SymbolRef> TaintedSymbols,
+ std::vector<ArgIdxTy> TaintedArgs,
+ const LocationContext *CallLocation) {
+ return C.getNoteTag([TaintedSymbols = std::move(TaintedSymbols),
+ TaintedArgs = std::move(TaintedArgs), CallLocation](
+ PathSensitiveBugReport &BR) -> std::string {
+ SmallString<256> Msg;
+ // We give diagnostics only for taint related reports
+ if (!BR.isInteresting(CallLocation) ||
+ BR.getBugType().getCategory() != categories::TaintedData) {
+ return "";
+ }
+ if (TaintedSymbols.empty())
+ return "Taint originated here";
- /// Parse the config.
- void parseConfiguration(CheckerManager &Mgr, const std::string &Option,
- TaintConfiguration &&Config);
+ for (auto Sym : TaintedSymbols) {
+ BR.markInteresting(Sym);
+ }
+ LLVM_DEBUG(for (auto Arg
+ : TaintedArgs) {
+ llvm::dbgs() << "Taint Propagated from argument " << Arg + 1 << "\n";
+ });
+ return "";
+ });
+}
+
+/// Helps in printing taint diagnostics.
+/// Marks the function interesting (to be printed)
+/// when the return value, or the outgoing parameters are tainted.
+const NoteTag *taintPropagationExplainerTag(
+ CheckerContext &C, std::vector<SymbolRef> TaintedSymbols,
+ std::vector<ArgIdxTy> TaintedArgs, const LocationContext *CallLocation) {
+ assert(TaintedSymbols.size() == TaintedArgs.size());
+ return C.getNoteTag([TaintedSymbols = std::move(TaintedSymbols),
+ TaintedArgs = std::move(TaintedArgs), CallLocation](
+ PathSensitiveBugReport &BR) -> std::string {
+ SmallString<256> Msg;
+ llvm::raw_svector_ostream Out(Msg);
+ // We give diagnostics only for taint related reports
+ if (TaintedSymbols.empty() ||
+ BR.getBugType().getCategory() != categories::TaintedData) {
+ return "";
+ }
+ int nofTaintedArgs = 0;
+ for (auto [Idx, Sym] : llvm::enumerate(TaintedSymbols)) {
+ if (BR.isInteresting(Sym)) {
+ BR.markInteresting(CallLocation);
+ if (TaintedArgs[Idx] != ReturnValueIndex) {
+ LLVM_DEBUG(llvm::dbgs() << "Taint Propagated to argument "
+ << TaintedArgs[Idx] + 1 << "\n");
+ if (nofTaintedArgs == 0)
+ Out << "Taint propagated to the ";
+ else
+ Out << ", ";
+ Out << TaintedArgs[Idx] + 1
+ << llvm::getOrdinalSuffix(TaintedArgs[Idx] + 1) << " argument";
+ nofTaintedArgs++;
+ } else {
+ LLVM_DEBUG(llvm::dbgs() << "Taint Propagated to return value.\n");
+ Out << "Taint propagated to the return value";
+ }
+ }
+ }
+ return std::string(Out.str());
+ });
+}
+
+/// ArgSet is used to describe arguments relevant for taint detection or
+/// taint application. A discrete set of argument indexes and a variadic
+/// argument list signified by a starting index are supported.
+class ArgSet {
+public:
+ ArgSet() = default;
+ ArgSet(ArgVecTy &&DiscreteArgs,
+ std::optional<ArgIdxTy> VariadicIndex = std::nullopt)
+ : DiscreteArgs(std::move(DiscreteArgs)),
+ VariadicIndex(std::move(VariadicIndex)) {}
+
+ bool contains(ArgIdxTy ArgIdx) const {
+ if (llvm::is_contained(DiscreteArgs, ArgIdx))
+ return true;
+
+ return VariadicIndex && ArgIdx >= *VariadicIndex;
+ }
- static const unsigned InvalidArgIndex{std::numeric_limits<unsigned>::max()};
- /// Denotes the return vale.
- static const unsigned ReturnValueIndex{std::numeric_limits<unsigned>::max() -
- 1};
+ bool isEmpty() const { return DiscreteArgs.empty() && !VariadicIndex; }
private:
- mutable std::unique_ptr<BugType> BT;
- void initBugType() const {
- if (!BT)
- BT = std::make_unique<BugType>(this, "Use of Untrusted Data",
- "Untrusted Data");
+ ArgVecTy DiscreteArgs;
+ std::optional<ArgIdxTy> VariadicIndex;
+};
+
+/// A struct used to specify taint propagation rules for a function.
+///
+/// If any of the possible taint source arguments is tainted, all of the
+/// destination arguments should also be tainted. If ReturnValueIndex is added
+/// to the dst list, the return value will be tainted.
+class GenericTaintRule {
+ /// Arguments which are taints sinks and should be checked, and a report
+ /// should be emitted if taint reaches these.
+ ArgSet SinkArgs;
+ /// Arguments which should be sanitized on function return.
+ ArgSet FilterArgs;
+ /// Arguments which can participate in taint propagation. If any of the
+ /// arguments in PropSrcArgs is tainted, all arguments in PropDstArgs should
+ /// be tainted.
+ ArgSet PropSrcArgs;
+ ArgSet PropDstArgs;
+
+ /// A message that explains why the call is sensitive to taint.
+ std::optional<StringRef> SinkMsg;
+
+ GenericTaintRule() = default;
+
+ GenericTaintRule(ArgSet &&Sink, ArgSet &&Filter, ArgSet &&Src, ArgSet &&Dst,
+ std::optional<StringRef> SinkMsg = std::nullopt)
+ : SinkArgs(std::move(Sink)), FilterArgs(std::move(Filter)),
+ PropSrcArgs(std::move(Src)), PropDstArgs(std::move(Dst)),
+ SinkMsg(SinkMsg) {}
+
+public:
+ /// Make a rule that reports a warning if taint reaches any of \p FilterArgs
+ /// arguments.
+ static GenericTaintRule Sink(ArgSet &&SinkArgs,
+ std::optional<StringRef> Msg = std::nullopt) {
+ return {std::move(SinkArgs), {}, {}, {}, Msg};
}
- struct FunctionData {
- FunctionData() = delete;
- FunctionData(const FunctionDecl *FDecl, StringRef Name,
- std::string FullName)
- : FDecl(FDecl), Name(Name), FullName(std::move(FullName)) {}
- FunctionData(const FunctionData &) = default;
- FunctionData(FunctionData &&) = default;
- FunctionData &operator=(const FunctionData &) = delete;
- FunctionData &operator=(FunctionData &&) = delete;
-
- static Optional<FunctionData> create(const CallEvent &Call,
- const CheckerContext &C) {
- if (!Call.getDecl())
- return None;
-
- const FunctionDecl *FDecl = Call.getDecl()->getAsFunction();
- if (!FDecl || (FDecl->getKind() != Decl::Function &&
- FDecl->getKind() != Decl::CXXMethod))
- return None;
-
- StringRef Name = C.getCalleeName(FDecl);
- std::string FullName = FDecl->getQualifiedNameAsString();
- if (Name.empty() || FullName.empty())
- return None;
-
- return FunctionData{FDecl, Name, std::move(FullName)};
- }
+ /// Make a rule that sanitizes all FilterArgs arguments.
+ static GenericTaintRule Filter(ArgSet &&FilterArgs) {
+ return {{}, std::move(FilterArgs), {}, {}};
+ }
- bool isInScope(StringRef Scope) const {
- return StringRef(FullName).startswith(Scope);
- }
+ /// Make a rule that unconditionally taints all Args.
+ /// If Func is provided, it must also return true for taint to propagate.
+ static GenericTaintRule Source(ArgSet &&SourceArgs) {
+ return {{}, {}, {}, std::move(SourceArgs)};
+ }
- const FunctionDecl *const FDecl;
- const StringRef Name;
- const std::string FullName;
- };
+ /// Make a rule that taints all PropDstArgs if any of PropSrcArgs is tainted.
+ static GenericTaintRule Prop(ArgSet &&SrcArgs, ArgSet &&DstArgs) {
+ return {{}, {}, std::move(SrcArgs), std::move(DstArgs)};
+ }
- /// Catch taint related bugs. Check if tainted data is passed to a
- /// system call etc. Returns true on matching.
- bool checkPre(const CallEvent &Call, const FunctionData &FData,
- CheckerContext &C) const;
+ /// Make a rule that taints all PropDstArgs if any of PropSrcArgs is tainted.
+ static GenericTaintRule
+ SinkProp(ArgSet &&SinkArgs, ArgSet &&SrcArgs, ArgSet &&DstArgs,
+ std::optional<StringRef> Msg = std::nullopt) {
+ return {
+ std::move(SinkArgs), {}, std::move(SrcArgs), std::move(DstArgs), Msg};
+ }
- /// Add taint sources on a pre-visit. Returns true on matching.
- bool addSourcesPre(const CallEvent &Call, const FunctionData &FData,
- CheckerContext &C) const;
+ /// Process a function which could either be a taint source, a taint sink, a
+ /// taint filter or a taint propagator.
+ void process(const GenericTaintChecker &Checker, const CallEvent &Call,
+ CheckerContext &C) const;
- /// Mark filter's arguments not tainted on a pre-visit. Returns true on
- /// matching.
- bool addFiltersPre(const CallEvent &Call, const FunctionData &FData,
- CheckerContext &C) const;
+ /// Handles the resolution of indexes of type ArgIdxTy to Expr*-s.
+ static const Expr *GetArgExpr(ArgIdxTy ArgIdx, const CallEvent &Call) {
+ return ArgIdx == ReturnValueIndex ? Call.getOriginExpr()
+ : Call.getArgExpr(ArgIdx);
+ };
- /// Propagate taint generated at pre-visit. Returns true on matching.
- static bool propagateFromPre(const CallEvent &Call, CheckerContext &C);
+ /// Functions for custom taintedness propagation.
+ static bool UntrustedEnv(CheckerContext &C);
+};
- /// Check if the region the expression evaluates to is the standard input,
- /// and thus, is tainted.
- static bool isStdin(const Expr *E, CheckerContext &C);
+using RuleLookupTy = CallDescriptionMap<GenericTaintRule>;
- /// Given a pointer argument, return the value it points to.
- static Optional<SVal> getPointeeOf(CheckerContext &C, const Expr *Arg);
+/// Used to parse the configuration file.
+struct TaintConfiguration {
+ using NameScopeArgs = std::tuple<std::string, std::string, ArgVecTy>;
+ enum class VariadicType { None, Src, Dst };
- /// Check for CWE-134: Uncontrolled Format String.
- static constexpr llvm::StringLiteral MsgUncontrolledFormatString =
- "Untrusted data is used as a format string "
- "(CWE-134: Uncontrolled Format String)";
- bool checkUncontrolledFormatString(const CallEvent &Call,
- CheckerContext &C) const;
+ struct Common {
+ std::string Name;
+ std::string Scope;
+ };
- /// Check for:
- /// CERT/STR02-C. "Sanitize data passed to complex subsystems"
- /// CWE-78, "Failure to Sanitize Data into an OS Command"
- static constexpr llvm::StringLiteral MsgSanitizeSystemArgs =
- "Untrusted data is passed to a system call "
- "(CERT/STR02-C. Sanitize data passed to complex subsystems)";
- bool checkSystemCall(const CallEvent &Call, StringRef Name,
- CheckerContext &C) const;
-
- /// Check if tainted data is used as a buffer size ins strn.. functions,
- /// and allocators.
- static constexpr llvm::StringLiteral MsgTaintedBufferSize =
- "Untrusted data is used to specify the buffer size "
- "(CERT/STR31-C. Guarantee that storage for strings has sufficient space "
- "for character data and the null terminator)";
- bool checkTaintedBufferSize(const CallEvent &Call, CheckerContext &C) const;
-
- /// Check if tainted data is used as a custom sink's parameter.
- static constexpr llvm::StringLiteral MsgCustomSink =
- "Untrusted data is passed to a user-defined sink";
- bool checkCustomSinks(const CallEvent &Call, const FunctionData &FData,
- CheckerContext &C) const;
+ struct Sink : Common {
+ ArgVecTy SinkArgs;
+ };
- /// Generate a report if the expression is tainted or points to tainted data.
- bool generateReportIfTainted(const Expr *E, StringRef Msg,
- CheckerContext &C) const;
+ struct Filter : Common {
+ ArgVecTy FilterArgs;
+ };
- struct TaintPropagationRule;
- template <typename T>
- using ConfigDataMap =
- std::unordered_multimap<std::string, std::pair<std::string, T>>;
- using NameRuleMap = ConfigDataMap<TaintPropagationRule>;
- using NameArgMap = ConfigDataMap<ArgVector>;
-
- /// Find a function with the given name and scope. Returns the first match
- /// or the end of the map.
- template <typename T>
- static auto findFunctionInConfig(const ConfigDataMap<T> &Map,
- const FunctionData &FData);
-
- /// A struct used to specify taint propagation rules for a function.
- ///
- /// If any of the possible taint source arguments is tainted, all of the
- /// destination arguments should also be tainted. Use InvalidArgIndex in the
- /// src list to specify that all of the arguments can introduce taint. Use
- /// InvalidArgIndex in the dst arguments to signify that all the non-const
- /// pointer and reference arguments might be tainted on return. If
- /// ReturnValueIndex is added to the dst list, the return value will be
- /// tainted.
- struct TaintPropagationRule {
- using PropagationFuncType = bool (*)(bool IsTainted, const CallEvent &Call,
- CheckerContext &C);
-
- /// List of arguments which can be taint sources and should be checked.
- ArgVector SrcArgs;
- /// List of arguments which should be tainted on function return.
- ArgVector DstArgs;
- /// Index for the first variadic parameter if exist.
- unsigned VariadicIndex;
- /// Show when a function has variadic parameters. If it has, it marks all
- /// of them as source or destination.
+ struct Propagation : Common {
+ ArgVecTy SrcArgs;
+ ArgVecTy DstArgs;
VariadicType VarType;
- /// Special function for tainted source determination. If defined, it can
- /// override the default behavior.
- PropagationFuncType PropagationFunc;
-
- TaintPropagationRule()
- : VariadicIndex(InvalidArgIndex), VarType(VariadicType::None),
- PropagationFunc(nullptr) {}
-
- TaintPropagationRule(ArgVector &&Src, ArgVector &&Dst,
- VariadicType Var = VariadicType::None,
- unsigned VarIndex = InvalidArgIndex,
- PropagationFuncType Func = nullptr)
- : SrcArgs(std::move(Src)), DstArgs(std::move(Dst)),
- VariadicIndex(VarIndex), VarType(Var), PropagationFunc(Func) {}
-
- /// Get the propagation rule for a given function.
- static TaintPropagationRule
- getTaintPropagationRule(const NameRuleMap &CustomPropagations,
- const FunctionData &FData, CheckerContext &C);
-
- void addSrcArg(unsigned A) { SrcArgs.push_back(A); }
- void addDstArg(unsigned A) { DstArgs.push_back(A); }
-
- bool isNull() const {
- return SrcArgs.empty() && DstArgs.empty() &&
- VariadicType::None == VarType;
- }
+ ArgIdxTy VarIndex;
+ };
- bool isDestinationArgument(unsigned ArgNum) const {
- return (llvm::find(DstArgs, ArgNum) != DstArgs.end());
- }
+ std::vector<Propagation> Propagations;
+ std::vector<Filter> Filters;
+ std::vector<Sink> Sinks;
- static bool isTaintedOrPointsToTainted(const Expr *E,
- const ProgramStateRef &State,
- CheckerContext &C) {
- if (isTainted(State, E, C.getLocationContext()) || isStdin(E, C))
- return true;
+ TaintConfiguration() = default;
+ TaintConfiguration(const TaintConfiguration &) = default;
+ TaintConfiguration(TaintConfiguration &&) = default;
+ TaintConfiguration &operator=(const TaintConfiguration &) = default;
+ TaintConfiguration &operator=(TaintConfiguration &&) = default;
+};
- if (!E->getType().getTypePtr()->isPointerType())
- return false;
+struct GenericTaintRuleParser {
+ GenericTaintRuleParser(CheckerManager &Mgr) : Mgr(Mgr) {}
+ /// Container type used to gather call identification objects grouped into
+ /// pairs with their corresponding taint rules. It is temporary as it is used
+ /// to finally initialize RuleLookupTy, which is considered to be immutable.
+ using RulesContTy = std::vector<std::pair<CallDescription, GenericTaintRule>>;
+ RulesContTy parseConfiguration(const std::string &Option,
+ TaintConfiguration &&Config) const;
- Optional<SVal> V = getPointeeOf(C, E);
- return (V && isTainted(State, *V));
- }
+private:
+ using NamePartsTy = llvm::SmallVector<StringRef, 2>;
- /// Pre-process a function which propagates taint according to the
- /// taint rule.
- ProgramStateRef process(const CallEvent &Call, CheckerContext &C) const;
+ /// Validate part of the configuration, which contains a list of argument
+ /// indexes.
+ void validateArgVector(const std::string &Option, const ArgVecTy &Args) const;
- // Functions for custom taintedness propagation.
- static bool postSocket(bool IsTainted, const CallEvent &Call,
- CheckerContext &C);
- };
+ template <typename Config> static NamePartsTy parseNameParts(const Config &C);
- /// Defines a map between the propagation function's name, scope
- /// and TaintPropagationRule.
- NameRuleMap CustomPropagations;
+ // Takes the config and creates a CallDescription for it and associates a Rule
+ // with that.
+ template <typename Config>
+ static void consumeRulesFromConfig(const Config &C, GenericTaintRule &&Rule,
+ RulesContTy &Rules);
- /// Defines a map between the filter function's name, scope and filtering
- /// args.
- NameArgMap CustomFilters;
+ void parseConfig(const std::string &Option, TaintConfiguration::Sink &&P,
+ RulesContTy &Rules) const;
+ void parseConfig(const std::string &Option, TaintConfiguration::Filter &&P,
+ RulesContTy &Rules) const;
+ void parseConfig(const std::string &Option,
+ TaintConfiguration::Propagation &&P,
+ RulesContTy &Rules) const;
- /// Defines a map between the sink function's name, scope and sinking args.
- NameArgMap CustomSinks;
+ CheckerManager &Mgr;
};
-const unsigned GenericTaintChecker::ReturnValueIndex;
-const unsigned GenericTaintChecker::InvalidArgIndex;
+class GenericTaintChecker : public Checker<check::PreCall, check::PostCall> {
+public:
+ void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
+ void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
-// FIXME: these lines can be removed in C++17
-constexpr llvm::StringLiteral GenericTaintChecker::MsgUncontrolledFormatString;
-constexpr llvm::StringLiteral GenericTaintChecker::MsgSanitizeSystemArgs;
-constexpr llvm::StringLiteral GenericTaintChecker::MsgTaintedBufferSize;
-constexpr llvm::StringLiteral GenericTaintChecker::MsgCustomSink;
-} // end of anonymous namespace
+ void printState(raw_ostream &Out, ProgramStateRef State, const char *NL,
+ const char *Sep) const override;
-using TaintConfig = GenericTaintChecker::TaintConfiguration;
+ /// Generate a report if the expression is tainted or points to tainted data.
+ bool generateReportIfTainted(const Expr *E, StringRef Msg,
+ CheckerContext &C) const;
-LLVM_YAML_IS_SEQUENCE_VECTOR(TaintConfig::Propagation)
-LLVM_YAML_IS_SEQUENCE_VECTOR(TaintConfig::NameScopeArgs)
+private:
+ const BugType BT{this, "Use of Untrusted Data", categories::TaintedData};
+
+ bool checkUncontrolledFormatString(const CallEvent &Call,
+ CheckerContext &C) const;
+
+ void taintUnsafeSocketProtocol(const CallEvent &Call,
+ CheckerContext &C) const;
+
+ /// Default taint rules are initalized with the help of a CheckerContext to
+ /// access the names of built-in functions like memcpy.
+ void initTaintRules(CheckerContext &C) const;
+
+ /// CallDescription currently cannot restrict matches to the global namespace
+ /// only, which is why multiple CallDescriptionMaps are used, as we want to
+ /// disambiguate global C functions from functions inside user-defined
+ /// namespaces.
+ // TODO: Remove separation to simplify matching logic once CallDescriptions
+ // are more expressive.
+
+ mutable std::optional<RuleLookupTy> StaticTaintRules;
+ mutable std::optional<RuleLookupTy> DynamicTaintRules;
+};
+} // end of anonymous namespace
+
+/// YAML serialization mapping.
+LLVM_YAML_IS_SEQUENCE_VECTOR(TaintConfiguration::Sink)
+LLVM_YAML_IS_SEQUENCE_VECTOR(TaintConfiguration::Filter)
+LLVM_YAML_IS_SEQUENCE_VECTOR(TaintConfiguration::Propagation)
namespace llvm {
namespace yaml {
-template <> struct MappingTraits<TaintConfig> {
- static void mapping(IO &IO, TaintConfig &Config) {
+template <> struct MappingTraits<TaintConfiguration> {
+ static void mapping(IO &IO, TaintConfiguration &Config) {
IO.mapOptional("Propagations", Config.Propagations);
IO.mapOptional("Filters", Config.Filters);
IO.mapOptional("Sinks", Config.Sinks);
}
};
-template <> struct MappingTraits<TaintConfig::Propagation> {
- static void mapping(IO &IO, TaintConfig::Propagation &Propagation) {
+template <> struct MappingTraits<TaintConfiguration::Sink> {
+ static void mapping(IO &IO, TaintConfiguration::Sink &Sink) {
+ IO.mapRequired("Name", Sink.Name);
+ IO.mapOptional("Scope", Sink.Scope);
+ IO.mapRequired("Args", Sink.SinkArgs);
+ }
+};
+
+template <> struct MappingTraits<TaintConfiguration::Filter> {
+ static void mapping(IO &IO, TaintConfiguration::Filter &Filter) {
+ IO.mapRequired("Name", Filter.Name);
+ IO.mapOptional("Scope", Filter.Scope);
+ IO.mapRequired("Args", Filter.FilterArgs);
+ }
+};
+
+template <> struct MappingTraits<TaintConfiguration::Propagation> {
+ static void mapping(IO &IO, TaintConfiguration::Propagation &Propagation) {
IO.mapRequired("Name", Propagation.Name);
IO.mapOptional("Scope", Propagation.Scope);
IO.mapOptional("SrcArgs", Propagation.SrcArgs);
IO.mapOptional("DstArgs", Propagation.DstArgs);
- IO.mapOptional("VariadicType", Propagation.VarType,
- GenericTaintChecker::VariadicType::None);
- IO.mapOptional("VariadicIndex", Propagation.VarIndex,
- GenericTaintChecker::InvalidArgIndex);
+ IO.mapOptional("VariadicType", Propagation.VarType);
+ IO.mapOptional("VariadicIndex", Propagation.VarIndex);
}
};
-template <> struct ScalarEnumerationTraits<GenericTaintChecker::VariadicType> {
- static void enumeration(IO &IO, GenericTaintChecker::VariadicType &Value) {
- IO.enumCase(Value, "None", GenericTaintChecker::VariadicType::None);
- IO.enumCase(Value, "Src", GenericTaintChecker::VariadicType::Src);
- IO.enumCase(Value, "Dst", GenericTaintChecker::VariadicType::Dst);
- }
-};
-
-template <> struct MappingTraits<TaintConfig::NameScopeArgs> {
- static void mapping(IO &IO, TaintConfig::NameScopeArgs &NSA) {
- IO.mapRequired("Name", std::get<0>(NSA));
- IO.mapOptional("Scope", std::get<1>(NSA));
- IO.mapRequired("Args", std::get<2>(NSA));
+template <> struct ScalarEnumerationTraits<TaintConfiguration::VariadicType> {
+ static void enumeration(IO &IO, TaintConfiguration::VariadicType &Value) {
+ IO.enumCase(Value, "None", TaintConfiguration::VariadicType::None);
+ IO.enumCase(Value, "Src", TaintConfiguration::VariadicType::Src);
+ IO.enumCase(Value, "Dst", TaintConfiguration::VariadicType::Dst);
}
};
} // namespace yaml
} // namespace llvm
/// A set which is used to pass information from call pre-visit instruction
-/// to the call post-visit. The values are unsigned integers, which are either
+/// to the call post-visit. The values are signed integers, which are either
/// ReturnValueIndex, or indexes of the pointer/reference argument, which
/// points to data, which should be tainted on return.
-REGISTER_SET_WITH_PROGRAMSTATE(TaintArgsOnPostVisit, unsigned)
-
-GenericTaintChecker::ArgVector
-GenericTaintChecker::convertToArgVector(CheckerManager &Mgr,
- const std::string &Option,
- const SignedArgVector &Args) {
- ArgVector Result;
- for (int Arg : Args) {
- if (Arg == -1)
- Result.push_back(ReturnValueIndex);
- else if (Arg < -1) {
- Result.push_back(InvalidArgIndex);
+REGISTER_MAP_WITH_PROGRAMSTATE(TaintArgsOnPostVisit, const LocationContext *,
+ ImmutableSet<ArgIdxTy>)
+REGISTER_SET_FACTORY_WITH_PROGRAMSTATE(ArgIdxFactory, ArgIdxTy)
+
+void GenericTaintRuleParser::validateArgVector(const std::string &Option,
+ const ArgVecTy &Args) const {
+ for (ArgIdxTy Arg : Args) {
+ if (Arg < ReturnValueIndex) {
Mgr.reportInvalidCheckerOptionValue(
- this, Option,
+ Mgr.getChecker<GenericTaintChecker>(), Option,
"an argument number for propagation rules greater or equal to -1");
- } else
- Result.push_back(static_cast<unsigned>(Arg));
+ }
}
- return Result;
}
-void GenericTaintChecker::parseConfiguration(CheckerManager &Mgr,
- const std::string &Option,
- TaintConfiguration &&Config) {
- for (auto &P : Config.Propagations) {
- GenericTaintChecker::CustomPropagations.emplace(
- P.Name,
- std::make_pair(P.Scope, TaintPropagationRule{
- std::move(P.SrcArgs),
- convertToArgVector(Mgr, Option, P.DstArgs),
- P.VarType, P.VarIndex}));
+template <typename Config>
+GenericTaintRuleParser::NamePartsTy
+GenericTaintRuleParser::parseNameParts(const Config &C) {
+ NamePartsTy NameParts;
+ if (!C.Scope.empty()) {
+ // If the Scope argument contains multiple "::" parts, those are considered
+ // namespace identifiers.
+ StringRef{C.Scope}.split(NameParts, "::", /*MaxSplit*/ -1,
+ /*KeepEmpty*/ false);
}
+ NameParts.emplace_back(C.Name);
+ return NameParts;
+}
- for (auto &F : Config.Filters) {
- GenericTaintChecker::CustomFilters.emplace(
- std::get<0>(F),
- std::make_pair(std::move(std::get<1>(F)), std::move(std::get<2>(F))));
- }
+template <typename Config>
+void GenericTaintRuleParser::consumeRulesFromConfig(const Config &C,
+ GenericTaintRule &&Rule,
+ RulesContTy &Rules) {
+ NamePartsTy NameParts = parseNameParts(C);
+ Rules.emplace_back(CallDescription(NameParts), std::move(Rule));
+}
- for (auto &S : Config.Sinks) {
- GenericTaintChecker::CustomSinks.emplace(
- std::get<0>(S),
- std::make_pair(std::move(std::get<1>(S)), std::move(std::get<2>(S))));
- }
+void GenericTaintRuleParser::parseConfig(const std::string &Option,
+ TaintConfiguration::Sink &&S,
+ RulesContTy &Rules) const {
+ validateArgVector(Option, S.SinkArgs);
+ consumeRulesFromConfig(S, GenericTaintRule::Sink(std::move(S.SinkArgs)),
+ Rules);
}
-template <typename T>
-auto GenericTaintChecker::findFunctionInConfig(const ConfigDataMap<T> &Map,
- const FunctionData &FData) {
- auto Range = Map.equal_range(std::string(FData.Name));
- auto It =
- std::find_if(Range.first, Range.second, [&FData](const auto &Entry) {
- const auto &Value = Entry.second;
- StringRef Scope = Value.first;
- return Scope.empty() || FData.isInScope(Scope);
- });
- return It != Range.second ? It : Map.end();
+void GenericTaintRuleParser::parseConfig(const std::string &Option,
+ TaintConfiguration::Filter &&S,
+ RulesContTy &Rules) const {
+ validateArgVector(Option, S.FilterArgs);
+ consumeRulesFromConfig(S, GenericTaintRule::Filter(std::move(S.FilterArgs)),
+ Rules);
}
-GenericTaintChecker::TaintPropagationRule
-GenericTaintChecker::TaintPropagationRule::getTaintPropagationRule(
- const NameRuleMap &CustomPropagations, const FunctionData &FData,
- CheckerContext &C) {
- // TODO: Currently, we might lose precision here: we always mark a return
- // value as tainted even if it's just a pointer, pointing to tainted data.
+void GenericTaintRuleParser::parseConfig(const std::string &Option,
+ TaintConfiguration::Propagation &&P,
+ RulesContTy &Rules) const {
+ validateArgVector(Option, P.SrcArgs);
+ validateArgVector(Option, P.DstArgs);
+ bool IsSrcVariadic = P.VarType == TaintConfiguration::VariadicType::Src;
+ bool IsDstVariadic = P.VarType == TaintConfiguration::VariadicType::Dst;
+ std::optional<ArgIdxTy> JustVarIndex = P.VarIndex;
+
+ ArgSet SrcDesc(std::move(P.SrcArgs),
+ IsSrcVariadic ? JustVarIndex : std::nullopt);
+ ArgSet DstDesc(std::move(P.DstArgs),
+ IsDstVariadic ? JustVarIndex : std::nullopt);
+
+ consumeRulesFromConfig(
+ P, GenericTaintRule::Prop(std::move(SrcDesc), std::move(DstDesc)), Rules);
+}
+
+GenericTaintRuleParser::RulesContTy
+GenericTaintRuleParser::parseConfiguration(const std::string &Option,
+ TaintConfiguration &&Config) const {
+
+ RulesContTy Rules;
+
+ for (auto &F : Config.Filters)
+ parseConfig(Option, std::move(F), Rules);
+
+ for (auto &S : Config.Sinks)
+ parseConfig(Option, std::move(S), Rules);
+ for (auto &P : Config.Propagations)
+ parseConfig(Option, std::move(P), Rules);
+
+ return Rules;
+}
+
+void GenericTaintChecker::initTaintRules(CheckerContext &C) const {
// Check for exact name match for functions without builtin substitutes.
// Use qualified name, because these are C functions without namespace.
- TaintPropagationRule Rule =
- llvm::StringSwitch<TaintPropagationRule>(FData.FullName)
- // Source functions
- // TODO: Add support for vfscanf & family.
- .Case("fdopen", {{}, {ReturnValueIndex}})
- .Case("fopen", {{}, {ReturnValueIndex}})
- .Case("freopen", {{}, {ReturnValueIndex}})
- .Case("getch", {{}, {ReturnValueIndex}})
- .Case("getchar", {{}, {ReturnValueIndex}})
- .Case("getchar_unlocked", {{}, {ReturnValueIndex}})
- .Case("getenv", {{}, {ReturnValueIndex}})
- .Case("gets", {{}, {0, ReturnValueIndex}})
- .Case("scanf", {{}, {}, VariadicType::Dst, 1})
- .Case("socket", {{},
- {ReturnValueIndex},
- VariadicType::None,
- InvalidArgIndex,
- &TaintPropagationRule::postSocket})
- .Case("wgetch", {{}, {ReturnValueIndex}})
- // Propagating functions
- .Case("atoi", {{0}, {ReturnValueIndex}})
- .Case("atol", {{0}, {ReturnValueIndex}})
- .Case("atoll", {{0}, {ReturnValueIndex}})
- .Case("fgetc", {{0}, {ReturnValueIndex}})
- .Case("fgetln", {{0}, {ReturnValueIndex}})
- .Case("fgets", {{2}, {0, ReturnValueIndex}})
- .Case("fscanf", {{0}, {}, VariadicType::Dst, 2})
- .Case("sscanf", {{0}, {}, VariadicType::Dst, 2})
- .Case("getc", {{0}, {ReturnValueIndex}})
- .Case("getc_unlocked", {{0}, {ReturnValueIndex}})
- .Case("getdelim", {{3}, {0}})
- .Case("getline", {{2}, {0}})
- .Case("getw", {{0}, {ReturnValueIndex}})
- .Case("pread", {{0, 1, 2, 3}, {1, ReturnValueIndex}})
- .Case("read", {{0, 2}, {1, ReturnValueIndex}})
- .Case("strchr", {{0}, {ReturnValueIndex}})
- .Case("strrchr", {{0}, {ReturnValueIndex}})
- .Case("tolower", {{0}, {ReturnValueIndex}})
- .Case("toupper", {{0}, {ReturnValueIndex}})
- .Default({});
-
- if (!Rule.isNull())
- return Rule;
- assert(FData.FDecl);
-
- // Check if it's one of the memory setting/copying functions.
- // This check is specialized but faster then calling isCLibraryFunction.
- const FunctionDecl *FDecl = FData.FDecl;
- unsigned BId = 0;
- if ((BId = FDecl->getMemoryFunctionKind())) {
- switch (BId) {
- case Builtin::BImemcpy:
- case Builtin::BImemmove:
- case Builtin::BIstrncpy:
- case Builtin::BIstrncat:
- return {{1, 2}, {0, ReturnValueIndex}};
- case Builtin::BIstrlcpy:
- case Builtin::BIstrlcat:
- return {{1, 2}, {0}};
- case Builtin::BIstrndup:
- return {{0, 1}, {ReturnValueIndex}};
-
- default:
- break;
- }
+
+ if (StaticTaintRules || DynamicTaintRules)
+ return;
+
+ using RulesConstructionTy =
+ std::vector<std::pair<CallDescription, GenericTaintRule>>;
+ using TR = GenericTaintRule;
+
+ const Builtin::Context &BI = C.getASTContext().BuiltinInfo;
+
+ RulesConstructionTy GlobalCRules{
+ // Sources
+ {{{"fdopen"}}, TR::Source({{ReturnValueIndex}})},
+ {{{"fopen"}}, TR::Source({{ReturnValueIndex}})},
+ {{{"freopen"}}, TR::Source({{ReturnValueIndex}})},
+ {{{"getch"}}, TR::Source({{ReturnValueIndex}})},
+ {{{"getchar"}}, TR::Source({{ReturnValueIndex}})},
+ {{{"getchar_unlocked"}}, TR::Source({{ReturnValueIndex}})},
+ {{{"gets"}}, TR::Source({{0}, ReturnValueIndex})},
+ {{{"gets_s"}}, TR::Source({{0}, ReturnValueIndex})},
+ {{{"scanf"}}, TR::Source({{}, 1})},
+ {{{"scanf_s"}}, TR::Source({{}, {1}})},
+ {{{"wgetch"}}, TR::Source({{}, ReturnValueIndex})},
+ // Sometimes the line between taint sources and propagators is blurry.
+ // _IO_getc is choosen to be a source, but could also be a propagator.
+ // This way it is simpler, as modeling it as a propagator would require
+ // to model the possible sources of _IO_FILE * values, which the _IO_getc
+ // function takes as parameters.
+ {{{"_IO_getc"}}, TR::Source({{ReturnValueIndex}})},
+ {{{"getcwd"}}, TR::Source({{0, ReturnValueIndex}})},
+ {{{"getwd"}}, TR::Source({{0, ReturnValueIndex}})},
+ {{{"readlink"}}, TR::Source({{1, ReturnValueIndex}})},
+ {{{"readlinkat"}}, TR::Source({{2, ReturnValueIndex}})},
+ {{{"get_current_dir_name"}}, TR::Source({{ReturnValueIndex}})},
+ {{{"gethostname"}}, TR::Source({{0}})},
+ {{{"getnameinfo"}}, TR::Source({{2, 4}})},
+ {{{"getseuserbyname"}}, TR::Source({{1, 2}})},
+ {{{"getgroups"}}, TR::Source({{1, ReturnValueIndex}})},
+ {{{"getlogin"}}, TR::Source({{ReturnValueIndex}})},
+ {{{"getlogin_r"}}, TR::Source({{0}})},
+
+ // Props
+ {{{"accept"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{{"atoi"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{{"atol"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{{"atoll"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{{"fgetc"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{{"fgetln"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{{"fgets"}}, TR::Prop({{2}}, {{0, ReturnValueIndex}})},
+ {{{"fgetws"}}, TR::Prop({{2}}, {{0, ReturnValueIndex}})},
+ {{{"fscanf"}}, TR::Prop({{0}}, {{}, 2})},
+ {{{"fscanf_s"}}, TR::Prop({{0}}, {{}, {2}})},
+ {{{"sscanf"}}, TR::Prop({{0}}, {{}, 2})},
+
+ {{{"getc"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{{"getc_unlocked"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{{"getdelim"}}, TR::Prop({{3}}, {{0}})},
+ {{{"getline"}}, TR::Prop({{2}}, {{0}})},
+ {{{"getw"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{{"pread"}}, TR::Prop({{0, 1, 2, 3}}, {{1, ReturnValueIndex}})},
+ {{{"read"}}, TR::Prop({{0, 2}}, {{1, ReturnValueIndex}})},
+ {{{"strchr"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{{"strrchr"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{{"tolower"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{{"toupper"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{{"fread"}}, TR::Prop({{3}}, {{0, ReturnValueIndex}})},
+ {{{"recv"}}, TR::Prop({{0}}, {{1, ReturnValueIndex}})},
+ {{{"recvfrom"}}, TR::Prop({{0}}, {{1, ReturnValueIndex}})},
+
+ {{{"ttyname"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{{"ttyname_r"}}, TR::Prop({{0}}, {{1, ReturnValueIndex}})},
+
+ {{{"basename"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{{"dirname"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{{"fnmatch"}}, TR::Prop({{1}}, {{ReturnValueIndex}})},
+ {{{"memchr"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{{"memrchr"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{{"rawmemchr"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+
+ {{{"mbtowc"}}, TR::Prop({{1}}, {{0, ReturnValueIndex}})},
+ {{{"wctomb"}}, TR::Prop({{1}}, {{0, ReturnValueIndex}})},
+ {{{"wcwidth"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+
+ {{{"memcmp"}}, TR::Prop({{0, 1}}, {{ReturnValueIndex}})},
+ {{{"memcpy"}}, TR::Prop({{1}}, {{0, ReturnValueIndex}})},
+ {{{"memmove"}}, TR::Prop({{1}}, {{0, ReturnValueIndex}})},
+ // If memmem was called with a tainted needle and the search was
+ // successful, that would mean that the value pointed by the return value
+ // has the same content as the needle. If we choose to go by the policy of
+ // content equivalence implies taintedness equivalence, that would mean
+ // haystack should be considered a propagation source argument.
+ {{{"memmem"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+
+ // The comment for memmem above also applies to strstr.
+ {{{"strstr"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{{"strcasestr"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+
+ {{{"strchrnul"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+
+ {{{"index"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{{"rindex"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+
+ // FIXME: In case of arrays, only the first element of the array gets
+ // tainted.
+ {{{"qsort"}}, TR::Prop({{0}}, {{0}})},
+ {{{"qsort_r"}}, TR::Prop({{0}}, {{0}})},
+
+ {{{"strcmp"}}, TR::Prop({{0, 1}}, {{ReturnValueIndex}})},
+ {{{"strcasecmp"}}, TR::Prop({{0, 1}}, {{ReturnValueIndex}})},
+ {{{"strncmp"}}, TR::Prop({{0, 1, 2}}, {{ReturnValueIndex}})},
+ {{{"strncasecmp"}}, TR::Prop({{0, 1, 2}}, {{ReturnValueIndex}})},
+ {{{"strspn"}}, TR::Prop({{0, 1}}, {{ReturnValueIndex}})},
+ {{{"strcspn"}}, TR::Prop({{0, 1}}, {{ReturnValueIndex}})},
+ {{{"strpbrk"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{{"strndup"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{{"strndupa"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+
+ // strlen, wcslen, strnlen and alike intentionally don't propagate taint.
+ // See the details here: https://github.com/llvm/llvm-project/pull/66086
+
+ {{{"strtol"}}, TR::Prop({{0}}, {{1, ReturnValueIndex}})},
+ {{{"strtoll"}}, TR::Prop({{0}}, {{1, ReturnValueIndex}})},
+ {{{"strtoul"}}, TR::Prop({{0}}, {{1, ReturnValueIndex}})},
+ {{{"strtoull"}}, TR::Prop({{0}}, {{1, ReturnValueIndex}})},
+
+ {{{"isalnum"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{{"isalpha"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{{"isascii"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{{"isblank"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{{"iscntrl"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{{"isdigit"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{{"isgraph"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{{"islower"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{{"isprint"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{{"ispunct"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{{"isspace"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{{"isupper"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{{"isxdigit"}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+
+ {{CDF_MaybeBuiltin, {BI.getName(Builtin::BIstrncat)}},
+ TR::Prop({{1, 2}}, {{0, ReturnValueIndex}})},
+ {{CDF_MaybeBuiltin, {BI.getName(Builtin::BIstrlcpy)}},
+ TR::Prop({{1, 2}}, {{0}})},
+ {{CDF_MaybeBuiltin, {BI.getName(Builtin::BIstrlcat)}},
+ TR::Prop({{1, 2}}, {{0}})},
+ {{CDF_MaybeBuiltin, {{"snprintf"}}},
+ TR::Prop({{1}, 3}, {{0, ReturnValueIndex}})},
+ {{CDF_MaybeBuiltin, {{"sprintf"}}},
+ TR::Prop({{1}, 2}, {{0, ReturnValueIndex}})},
+ {{CDF_MaybeBuiltin, {{"strcpy"}}},
+ TR::Prop({{1}}, {{0, ReturnValueIndex}})},
+ {{CDF_MaybeBuiltin, {{"stpcpy"}}},
+ TR::Prop({{1}}, {{0, ReturnValueIndex}})},
+ {{CDF_MaybeBuiltin, {{"strcat"}}},
+ TR::Prop({{1}}, {{0, ReturnValueIndex}})},
+ {{CDF_MaybeBuiltin, {{"wcsncat"}}},
+ TR::Prop({{1}}, {{0, ReturnValueIndex}})},
+ {{CDF_MaybeBuiltin, {{"strdup"}}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{CDF_MaybeBuiltin, {{"strdupa"}}},
+ TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{CDF_MaybeBuiltin, {{"wcsdup"}}}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+
+ // Sinks
+ {{{"system"}}, TR::Sink({{0}}, MsgSanitizeSystemArgs)},
+ {{{"popen"}}, TR::Sink({{0}}, MsgSanitizeSystemArgs)},
+ {{{"execl"}}, TR::Sink({{}, {0}}, MsgSanitizeSystemArgs)},
+ {{{"execle"}}, TR::Sink({{}, {0}}, MsgSanitizeSystemArgs)},
+ {{{"execlp"}}, TR::Sink({{}, {0}}, MsgSanitizeSystemArgs)},
+ {{{"execv"}}, TR::Sink({{0, 1}}, MsgSanitizeSystemArgs)},
+ {{{"execve"}}, TR::Sink({{0, 1, 2}}, MsgSanitizeSystemArgs)},
+ {{{"fexecve"}}, TR::Sink({{0, 1, 2}}, MsgSanitizeSystemArgs)},
+ {{{"execvp"}}, TR::Sink({{0, 1}}, MsgSanitizeSystemArgs)},
+ {{{"execvpe"}}, TR::Sink({{0, 1, 2}}, MsgSanitizeSystemArgs)},
+ {{{"dlopen"}}, TR::Sink({{0}}, MsgSanitizeSystemArgs)},
+ {{CDF_MaybeBuiltin, {{"malloc"}}}, TR::Sink({{0}}, MsgTaintedBufferSize)},
+ {{CDF_MaybeBuiltin, {{"calloc"}}}, TR::Sink({{0}}, MsgTaintedBufferSize)},
+ {{CDF_MaybeBuiltin, {{"alloca"}}}, TR::Sink({{0}}, MsgTaintedBufferSize)},
+ {{CDF_MaybeBuiltin, {{"memccpy"}}},
+ TR::Sink({{3}}, MsgTaintedBufferSize)},
+ {{CDF_MaybeBuiltin, {{"realloc"}}},
+ TR::Sink({{1}}, MsgTaintedBufferSize)},
+ {{{{"setproctitle"}}}, TR::Sink({{0}, 1}, MsgUncontrolledFormatString)},
+ {{{{"setproctitle_fast"}}},
+ TR::Sink({{0}, 1}, MsgUncontrolledFormatString)},
+
+ // SinkProps
+ {{CDF_MaybeBuiltin, BI.getName(Builtin::BImemcpy)},
+ TR::SinkProp({{2}}, {{1, 2}}, {{0, ReturnValueIndex}},
+ MsgTaintedBufferSize)},
+ {{CDF_MaybeBuiltin, {BI.getName(Builtin::BImemmove)}},
+ TR::SinkProp({{2}}, {{1, 2}}, {{0, ReturnValueIndex}},
+ MsgTaintedBufferSize)},
+ {{CDF_MaybeBuiltin, {BI.getName(Builtin::BIstrncpy)}},
+ TR::SinkProp({{2}}, {{1, 2}}, {{0, ReturnValueIndex}},
+ MsgTaintedBufferSize)},
+ {{CDF_MaybeBuiltin, {BI.getName(Builtin::BIstrndup)}},
+ TR::SinkProp({{1}}, {{0, 1}}, {{ReturnValueIndex}},
+ MsgTaintedBufferSize)},
+ {{CDF_MaybeBuiltin, {{"bcopy"}}},
+ TR::SinkProp({{2}}, {{0, 2}}, {{1}}, MsgTaintedBufferSize)}};
+
+ // `getenv` returns taint only in untrusted environments.
+ if (TR::UntrustedEnv(C)) {
+ // void setproctitle_init(int argc, char *argv[], char *envp[])
+ GlobalCRules.push_back(
+ {{{"setproctitle_init"}}, TR::Sink({{1, 2}}, MsgCustomSink)});
+ GlobalCRules.push_back({{{"getenv"}}, TR::Source({{ReturnValueIndex}})});
}
- // Process all other functions which could be defined as builtins.
- if (Rule.isNull()) {
- const auto OneOf = [FDecl](const auto &... Name) {
- // FIXME: use fold expression in C++17
- using unused = int[];
- bool ret = false;
- static_cast<void>(unused{
- 0, (ret |= CheckerContext::isCLibraryFunction(FDecl, Name), 0)...});
- return ret;
- };
- if (OneOf("snprintf"))
- return {{1}, {0, ReturnValueIndex}, VariadicType::Src, 3};
- if (OneOf("sprintf"))
- return {{}, {0, ReturnValueIndex}, VariadicType::Src, 2};
- if (OneOf("strcpy", "stpcpy", "strcat"))
- return {{1}, {0, ReturnValueIndex}};
- if (OneOf("bcopy"))
- return {{0, 2}, {1}};
- if (OneOf("strdup", "strdupa", "wcsdup"))
- return {{0}, {ReturnValueIndex}};
+ StaticTaintRules.emplace(std::make_move_iterator(GlobalCRules.begin()),
+ std::make_move_iterator(GlobalCRules.end()));
+
+ // User-provided taint configuration.
+ CheckerManager *Mgr = C.getAnalysisManager().getCheckerManager();
+ assert(Mgr);
+ GenericTaintRuleParser ConfigParser{*Mgr};
+ std::string Option{"Config"};
+ StringRef ConfigFile =
+ Mgr->getAnalyzerOptions().getCheckerStringOption(this, Option);
+ std::optional<TaintConfiguration> Config =
+ getConfiguration<TaintConfiguration>(*Mgr, this, Option, ConfigFile);
+ if (!Config) {
+ // We don't have external taint config, no parsing required.
+ DynamicTaintRules = RuleLookupTy{};
+ return;
}
- // Skipping the following functions, since they might be used for cleansing or
- // smart memory copy:
- // - memccpy - copying until hitting a special character.
+ GenericTaintRuleParser::RulesContTy Rules{
+ ConfigParser.parseConfiguration(Option, std::move(*Config))};
- auto It = findFunctionInConfig(CustomPropagations, FData);
- if (It != CustomPropagations.end())
- return It->second.second;
- return {};
+ DynamicTaintRules.emplace(std::make_move_iterator(Rules.begin()),
+ std::make_move_iterator(Rules.end()));
}
void GenericTaintChecker::checkPreCall(const CallEvent &Call,
CheckerContext &C) const {
- Optional<FunctionData> FData = FunctionData::create(Call, C);
- if (!FData)
- return;
-
- // Check for taintedness related errors first: system call, uncontrolled
- // format string, tainted buffer size.
- if (checkPre(Call, *FData, C))
- return;
-
- // Marks the function's arguments and/or return value tainted if it present in
- // the list.
- if (addSourcesPre(Call, *FData, C))
- return;
-
- addFiltersPre(Call, *FData, C);
+ initTaintRules(C);
+
+ // FIXME: this should be much simpler.
+ if (const auto *Rule =
+ Call.isGlobalCFunction() ? StaticTaintRules->lookup(Call) : nullptr)
+ Rule->process(*this, Call, C);
+ else if (const auto *Rule = DynamicTaintRules->lookup(Call))
+ Rule->process(*this, Call, C);
+
+ // FIXME: These edge cases are to be eliminated from here eventually.
+ //
+ // Additional check that is not supported by CallDescription.
+ // TODO: Make CallDescription be able to match attributes such as printf-like
+ // arguments.
+ checkUncontrolledFormatString(Call, C);
+
+ // TODO: Modeling sockets should be done in a specific checker.
+ // Socket is a source, which taints the return value.
+ taintUnsafeSocketProtocol(Call, C);
}
void GenericTaintChecker::checkPostCall(const CallEvent &Call,
CheckerContext &C) const {
// Set the marked values as tainted. The return value only accessible from
// checkPostStmt.
- propagateFromPre(Call, C);
-}
-
-void GenericTaintChecker::printState(raw_ostream &Out, ProgramStateRef State,
- const char *NL, const char *Sep) const {
- printTaint(State, Out, NL, Sep);
-}
-
-bool GenericTaintChecker::addSourcesPre(const CallEvent &Call,
- const FunctionData &FData,
- CheckerContext &C) const {
- // First, try generating a propagation rule for this function.
- TaintPropagationRule Rule = TaintPropagationRule::getTaintPropagationRule(
- this->CustomPropagations, FData, C);
- if (!Rule.isNull()) {
- ProgramStateRef State = Rule.process(Call, C);
- if (State) {
- C.addTransition(State);
- return true;
- }
- }
- return false;
-}
-
-bool GenericTaintChecker::addFiltersPre(const CallEvent &Call,
- const FunctionData &FData,
- CheckerContext &C) const {
- auto It = findFunctionInConfig(CustomFilters, FData);
- if (It == CustomFilters.end())
- return false;
-
- ProgramStateRef State = C.getState();
- const auto &Value = It->second;
- const ArgVector &Args = Value.second;
- for (unsigned ArgNum : Args) {
- if (ArgNum >= Call.getNumArgs())
- continue;
-
- const Expr *Arg = Call.getArgExpr(ArgNum);
- Optional<SVal> V = getPointeeOf(C, Arg);
- if (V)
- State = removeTaint(State, *V);
- }
-
- if (State != C.getState()) {
- C.addTransition(State);
- return true;
- }
- return false;
-}
-
-bool GenericTaintChecker::propagateFromPre(const CallEvent &Call,
- CheckerContext &C) {
ProgramStateRef State = C.getState();
+ const StackFrameContext *CurrentFrame = C.getStackFrame();
// Depending on what was tainted at pre-visit, we determined a set of
// arguments which should be tainted after the function returns. These are
// stored in the state as TaintArgsOnPostVisit set.
- TaintArgsOnPostVisitTy TaintArgs = State->get<TaintArgsOnPostVisit>();
- if (TaintArgs.isEmpty())
- return false;
+ TaintArgsOnPostVisitTy TaintArgsMap = State->get<TaintArgsOnPostVisit>();
- for (unsigned ArgNum : TaintArgs) {
+ const ImmutableSet<ArgIdxTy> *TaintArgs = TaintArgsMap.lookup(CurrentFrame);
+ if (!TaintArgs)
+ return;
+ assert(!TaintArgs->isEmpty());
+
+ LLVM_DEBUG(for (ArgIdxTy I
+ : *TaintArgs) {
+ llvm::dbgs() << "PostCall<";
+ Call.dump(llvm::dbgs());
+ llvm::dbgs() << "> actually wants to taint arg index: " << I << '\n';
+ });
+
+ const NoteTag *InjectionTag = nullptr;
+ std::vector<SymbolRef> TaintedSymbols;
+ std::vector<ArgIdxTy> TaintedIndexes;
+ for (ArgIdxTy ArgNum : *TaintArgs) {
// Special handling for the tainted return value.
if (ArgNum == ReturnValueIndex) {
State = addTaint(State, Call.getReturnValue());
+ std::vector<SymbolRef> TaintedSyms =
+ getTaintedSymbols(State, Call.getReturnValue());
+ if (!TaintedSyms.empty()) {
+ TaintedSymbols.push_back(TaintedSyms[0]);
+ TaintedIndexes.push_back(ArgNum);
+ }
continue;
}
-
// The arguments are pointer arguments. The data they are pointing at is
// tainted after the call.
- if (Call.getNumArgs() < (ArgNum + 1))
- return false;
- const Expr *Arg = Call.getArgExpr(ArgNum);
- Optional<SVal> V = getPointeeOf(C, Arg);
- if (V)
+ if (auto V = getPointeeOf(State, Call.getArgSVal(ArgNum))) {
State = addTaint(State, *V);
+ std::vector<SymbolRef> TaintedSyms = getTaintedSymbols(State, *V);
+ if (!TaintedSyms.empty()) {
+ TaintedSymbols.push_back(TaintedSyms[0]);
+ TaintedIndexes.push_back(ArgNum);
+ }
+ }
}
-
+ // Create a NoteTag callback, which prints to the user where the taintedness
+ // was propagated to.
+ InjectionTag = taintPropagationExplainerTag(C, TaintedSymbols, TaintedIndexes,
+ Call.getCalleeStackFrame(0));
// Clear up the taint info from the state.
- State = State->remove<TaintArgsOnPostVisit>();
-
- if (State != C.getState()) {
- C.addTransition(State);
- return true;
- }
- return false;
-}
-
-bool GenericTaintChecker::checkPre(const CallEvent &Call,
- const FunctionData &FData,
- CheckerContext &C) const {
- if (checkUncontrolledFormatString(Call, C))
- return true;
-
- if (checkSystemCall(Call, FData.Name, C))
- return true;
-
- if (checkTaintedBufferSize(Call, C))
- return true;
-
- return checkCustomSinks(Call, FData, C);
+ State = State->remove<TaintArgsOnPostVisit>(CurrentFrame);
+ C.addTransition(State, InjectionTag);
}
-Optional<SVal> GenericTaintChecker::getPointeeOf(CheckerContext &C,
- const Expr *Arg) {
- ProgramStateRef State = C.getState();
- SVal AddrVal = C.getSVal(Arg->IgnoreParens());
- if (AddrVal.isUnknownOrUndef())
- return None;
-
- Optional<Loc> AddrLoc = AddrVal.getAs<Loc>();
- if (!AddrLoc)
- return None;
-
- QualType ArgTy = Arg->getType().getCanonicalType();
- if (!ArgTy->isPointerType())
- return State->getSVal(*AddrLoc);
-
- QualType ValTy = ArgTy->getPointeeType();
-
- // Do not dereference void pointers. Treat them as byte pointers instead.
- // FIXME: we might want to consider more than just the first byte.
- if (ValTy->isVoidType())
- ValTy = C.getASTContext().CharTy;
-
- return State->getSVal(*AddrLoc, ValTy);
+void GenericTaintChecker::printState(raw_ostream &Out, ProgramStateRef State,
+ const char *NL, const char *Sep) const {
+ printTaint(State, Out, NL, Sep);
}
-ProgramStateRef
-GenericTaintChecker::TaintPropagationRule::process(const CallEvent &Call,
- CheckerContext &C) const {
+void GenericTaintRule::process(const GenericTaintChecker &Checker,
+ const CallEvent &Call, CheckerContext &C) const {
ProgramStateRef State = C.getState();
+ const ArgIdxTy CallNumArgs = fromArgumentCount(Call.getNumArgs());
- // Check for taint in arguments.
- bool IsTainted = true;
- for (unsigned ArgNum : SrcArgs) {
- if (ArgNum >= Call.getNumArgs())
- continue;
-
- if ((IsTainted =
- isTaintedOrPointsToTainted(Call.getArgExpr(ArgNum), State, C)))
- break;
- }
-
- // Check for taint in variadic arguments.
- if (!IsTainted && VariadicType::Src == VarType) {
- // Check if any of the arguments is tainted
- for (unsigned i = VariadicIndex; i < Call.getNumArgs(); ++i) {
- if ((IsTainted =
- isTaintedOrPointsToTainted(Call.getArgExpr(i), State, C)))
- break;
+ /// Iterate every call argument, and get their corresponding Expr and SVal.
+ const auto ForEachCallArg = [&C, &Call, CallNumArgs](auto &&Fun) {
+ for (ArgIdxTy I = ReturnValueIndex; I < CallNumArgs; ++I) {
+ const Expr *E = GetArgExpr(I, Call);
+ Fun(I, E, C.getSVal(E));
}
- }
+ };
- if (PropagationFunc)
- IsTainted = PropagationFunc(IsTainted, Call, C);
+ /// Check for taint sinks.
+ ForEachCallArg([this, &Checker, &C, &State](ArgIdxTy I, const Expr *E, SVal) {
+ // Add taintedness to stdin parameters
+ if (isStdin(C.getSVal(E), C.getASTContext())) {
+ State = addTaint(State, C.getSVal(E));
+ }
+ if (SinkArgs.contains(I) && isTaintedOrPointsToTainted(State, C.getSVal(E)))
+ Checker.generateReportIfTainted(E, SinkMsg.value_or(MsgCustomSink), C);
+ });
+
+ /// Check for taint filters.
+ ForEachCallArg([this, &State](ArgIdxTy I, const Expr *E, SVal S) {
+ if (FilterArgs.contains(I)) {
+ State = removeTaint(State, S);
+ if (auto P = getPointeeOf(State, S))
+ State = removeTaint(State, *P);
+ }
+ });
+
+ /// Check for taint propagation sources.
+ /// A rule will make the destination variables tainted if PropSrcArgs
+ /// is empty (taints the destination
+ /// arguments unconditionally), or if any of its signified
+ /// args are tainted in context of the current CallEvent.
+ bool IsMatching = PropSrcArgs.isEmpty();
+ std::vector<SymbolRef> TaintedSymbols;
+ std::vector<ArgIdxTy> TaintedIndexes;
+ ForEachCallArg([this, &C, &IsMatching, &State, &TaintedSymbols,
+ &TaintedIndexes](ArgIdxTy I, const Expr *E, SVal) {
+ std::optional<SVal> TaintedSVal =
+ getTaintedPointeeOrPointer(State, C.getSVal(E));
+ IsMatching =
+ IsMatching || (PropSrcArgs.contains(I) && TaintedSVal.has_value());
+
+ // We track back tainted arguments except for stdin
+ if (TaintedSVal && !isStdin(*TaintedSVal, C.getASTContext())) {
+ std::vector<SymbolRef> TaintedArgSyms =
+ getTaintedSymbols(State, *TaintedSVal);
+ if (!TaintedArgSyms.empty()) {
+ llvm::append_range(TaintedSymbols, TaintedArgSyms);
+ TaintedIndexes.push_back(I);
+ }
+ }
+ });
- if (!IsTainted)
- return State;
+ // Early return for propagation rules which dont match.
+ // Matching propagations, Sinks and Filters will pass this point.
+ if (!IsMatching)
+ return;
- // Mark the arguments which should be tainted after the function returns.
- for (unsigned ArgNum : DstArgs) {
- // Should mark the return value?
- if (ArgNum == ReturnValueIndex) {
- State = State->add<TaintArgsOnPostVisit>(ReturnValueIndex);
- continue;
- }
+ const auto WouldEscape = [](SVal V, QualType Ty) -> bool {
+ if (!isa<Loc>(V))
+ return false;
- if (ArgNum >= Call.getNumArgs())
- continue;
+ const bool IsNonConstRef = Ty->isReferenceType() && !Ty.isConstQualified();
+ const bool IsNonConstPtr =
+ Ty->isPointerType() && !Ty->getPointeeType().isConstQualified();
- // Mark the given argument.
- State = State->add<TaintArgsOnPostVisit>(ArgNum);
- }
+ return IsNonConstRef || IsNonConstPtr;
+ };
- // Mark all variadic arguments tainted if present.
- if (VariadicType::Dst == VarType) {
- // For all pointer and references that were passed in:
- // If they are not pointing to const data, mark data as tainted.
- // TODO: So far we are just going one level down; ideally we'd need to
- // recurse here.
- for (unsigned i = VariadicIndex; i < Call.getNumArgs(); ++i) {
- const Expr *Arg = Call.getArgExpr(i);
- // Process pointer argument.
- const Type *ArgTy = Arg->getType().getTypePtr();
- QualType PType = ArgTy->getPointeeType();
- if ((!PType.isNull() && !PType.isConstQualified()) ||
- (ArgTy->isReferenceType() && !Arg->getType().isConstQualified())) {
- State = State->add<TaintArgsOnPostVisit>(i);
- }
- }
- }
+ /// Propagate taint where it is necessary.
+ auto &F = State->getStateManager().get_context<ArgIdxFactory>();
+ ImmutableSet<ArgIdxTy> Result = F.getEmptySet();
+ ForEachCallArg(
+ [&](ArgIdxTy I, const Expr *E, SVal V) {
+ if (PropDstArgs.contains(I)) {
+ LLVM_DEBUG(llvm::dbgs() << "PreCall<"; Call.dump(llvm::dbgs());
+ llvm::dbgs()
+ << "> prepares tainting arg index: " << I << '\n';);
+ Result = F.add(Result, I);
+ }
+
+ // Taint property gets lost if the variable is passed as a
+ // non-const pointer or reference to a function which is
+ // not inlined. For matching rules we want to preserve the taintedness.
+ // TODO: We should traverse all reachable memory regions via the
+ // escaping parameter. Instead of doing that we simply mark only the
+ // referred memory region as tainted.
+ if (WouldEscape(V, E->getType()) && getTaintedPointeeOrPointer(State, V)) {
+ LLVM_DEBUG(if (!Result.contains(I)) {
+ llvm::dbgs() << "PreCall<";
+ Call.dump(llvm::dbgs());
+ llvm::dbgs() << "> prepares tainting arg index: " << I << '\n';
+ });
+ Result = F.add(Result, I);
+ }
+ });
- return State;
+ if (!Result.isEmpty())
+ State = State->set<TaintArgsOnPostVisit>(C.getStackFrame(), Result);
+ const NoteTag *InjectionTag = taintOriginTrackerTag(
+ C, std::move(TaintedSymbols), std::move(TaintedIndexes),
+ Call.getCalleeStackFrame(0));
+ C.addTransition(State, InjectionTag);
}
-// If argument 0(protocol domain) is network, the return value should get taint.
-bool GenericTaintChecker::TaintPropagationRule::postSocket(
- bool /*IsTainted*/, const CallEvent &Call, CheckerContext &C) {
- SourceLocation DomLoc = Call.getArgExpr(0)->getExprLoc();
- StringRef DomName = C.getMacroNameOrSpelling(DomLoc);
- // White list the internal communication protocols.
- if (DomName.equals("AF_SYSTEM") || DomName.equals("AF_LOCAL") ||
- DomName.equals("AF_UNIX") || DomName.equals("AF_RESERVED_36"))
- return false;
- return true;
+bool GenericTaintRule::UntrustedEnv(CheckerContext &C) {
+ return !C.getAnalysisManager()
+ .getAnalyzerOptions()
+ .ShouldAssumeControlledEnvironment;
}
-bool GenericTaintChecker::isStdin(const Expr *E, CheckerContext &C) {
- ProgramStateRef State = C.getState();
- SVal Val = C.getSVal(E);
-
- // stdin is a pointer, so it would be a region.
- const MemRegion *MemReg = Val.getAsRegion();
-
- // The region should be symbolic, we do not know it's value.
- const auto *SymReg = dyn_cast_or_null<SymbolicRegion>(MemReg);
- if (!SymReg)
- return false;
+bool GenericTaintChecker::generateReportIfTainted(const Expr *E, StringRef Msg,
+ CheckerContext &C) const {
+ assert(E);
+ std::optional<SVal> TaintedSVal =
+ getTaintedPointeeOrPointer(C.getState(), C.getSVal(E));
- // Get it's symbol and find the declaration region it's pointing to.
- const auto *Sm = dyn_cast<SymbolRegionValue>(SymReg->getSymbol());
- if (!Sm)
- return false;
- const auto *DeclReg = dyn_cast_or_null<DeclRegion>(Sm->getRegion());
- if (!DeclReg)
+ if (!TaintedSVal)
return false;
- // This region corresponds to a declaration, find out if it's a global/extern
- // variable named stdin with the proper type.
- if (const auto *D = dyn_cast_or_null<VarDecl>(DeclReg->getDecl())) {
- D = D->getCanonicalDecl();
- if ((D->getName().find("stdin") != StringRef::npos) && D->isExternC()) {
- const auto *PtrTy = dyn_cast<PointerType>(D->getType().getTypePtr());
- if (PtrTy && PtrTy->getPointeeType().getCanonicalType() ==
- C.getASTContext().getFILEType().getCanonicalType())
- return true;
+ // Generate diagnostic.
+ if (ExplodedNode *N = C.generateNonFatalErrorNode()) {
+ auto report = std::make_unique<PathSensitiveBugReport>(BT, Msg, N);
+ report->addRange(E->getSourceRange());
+ for (auto TaintedSym : getTaintedSymbols(C.getState(), *TaintedSVal)) {
+ report->markInteresting(TaintedSym);
}
+
+ C.emitReport(std::move(report));
+ return true;
}
return false;
}
+/// TODO: remove checking for printf format attributes and socket whitelisting
+/// from GenericTaintChecker, and that means the following functions:
+/// getPrintfFormatArgumentNum,
+/// GenericTaintChecker::checkUncontrolledFormatString,
+/// GenericTaintChecker::taintUnsafeSocketProtocol
+
static bool getPrintfFormatArgumentNum(const CallEvent &Call,
const CheckerContext &C,
- unsigned &ArgNum) {
+ ArgIdxTy &ArgNum) {
// Find if the function contains a format string argument.
// Handles: fprintf, printf, sprintf, snprintf, vfprintf, vprintf, vsprintf,
// vsnprintf, syslog, custom annotated functions.
- const FunctionDecl *FDecl = Call.getDecl()->getAsFunction();
+ const Decl *CallDecl = Call.getDecl();
+ if (!CallDecl)
+ return false;
+ const FunctionDecl *FDecl = CallDecl->getAsFunction();
if (!FDecl)
return false;
+
+ const ArgIdxTy CallNumArgs = fromArgumentCount(Call.getNumArgs());
+
for (const auto *Format : FDecl->specific_attrs<FormatAttr>()) {
ArgNum = Format->getFormatIdx() - 1;
- if ((Format->getType()->getName() == "printf") &&
- Call.getNumArgs() > ArgNum)
+ if ((Format->getType()->getName() == "printf") && CallNumArgs > ArgNum)
return true;
}
- // Or if a function is named setproctitle (this is a heuristic).
- if (C.getCalleeName(FDecl).find("setproctitle") != StringRef::npos) {
- ArgNum = 0;
- return true;
- }
-
- return false;
-}
-
-bool GenericTaintChecker::generateReportIfTainted(const Expr *E, StringRef Msg,
- CheckerContext &C) const {
- assert(E);
-
- // Check for taint.
- ProgramStateRef State = C.getState();
- Optional<SVal> PointedToSVal = getPointeeOf(C, E);
- SVal TaintedSVal;
- if (PointedToSVal && isTainted(State, *PointedToSVal))
- TaintedSVal = *PointedToSVal;
- else if (isTainted(State, E, C.getLocationContext()))
- TaintedSVal = C.getSVal(E);
- else
- return false;
-
- // Generate diagnostic.
- if (ExplodedNode *N = C.generateNonFatalErrorNode()) {
- initBugType();
- auto report = std::make_unique<PathSensitiveBugReport>(*BT, Msg, N);
- report->addRange(E->getSourceRange());
- report->addVisitor(std::make_unique<TaintBugVisitor>(TaintedSVal));
- C.emitReport(std::move(report));
- return true;
- }
return false;
}
bool GenericTaintChecker::checkUncontrolledFormatString(
const CallEvent &Call, CheckerContext &C) const {
// Check if the function contains a format string argument.
- unsigned ArgNum = 0;
+ ArgIdxTy ArgNum = 0;
if (!getPrintfFormatArgumentNum(Call, C, ArgNum))
return false;
@@ -855,102 +1082,35 @@ bool GenericTaintChecker::checkUncontrolledFormatString(
MsgUncontrolledFormatString, C);
}
-bool GenericTaintChecker::checkSystemCall(const CallEvent &Call, StringRef Name,
- CheckerContext &C) const {
- // TODO: It might make sense to run this check on demand. In some cases,
- // we should check if the environment has been cleansed here. We also might
- // need to know if the user was reset before these calls(seteuid).
- unsigned ArgNum = llvm::StringSwitch<unsigned>(Name)
- .Case("system", 0)
- .Case("popen", 0)
- .Case("execl", 0)
- .Case("execle", 0)
- .Case("execlp", 0)
- .Case("execv", 0)
- .Case("execvp", 0)
- .Case("execvP", 0)
- .Case("execve", 0)
- .Case("dlopen", 0)
- .Default(InvalidArgIndex);
-
- if (ArgNum == InvalidArgIndex || Call.getNumArgs() < (ArgNum + 1))
- return false;
-
- return generateReportIfTainted(Call.getArgExpr(ArgNum), MsgSanitizeSystemArgs,
- C);
-}
-
-// TODO: Should this check be a part of the CString checker?
-// If yes, should taint be a global setting?
-bool GenericTaintChecker::checkTaintedBufferSize(const CallEvent &Call,
- CheckerContext &C) const {
- const auto *FDecl = Call.getDecl()->getAsFunction();
- // If the function has a buffer size argument, set ArgNum.
- unsigned ArgNum = InvalidArgIndex;
- unsigned BId = 0;
- if ((BId = FDecl->getMemoryFunctionKind())) {
- switch (BId) {
- case Builtin::BImemcpy:
- case Builtin::BImemmove:
- case Builtin::BIstrncpy:
- ArgNum = 2;
- break;
- case Builtin::BIstrndup:
- ArgNum = 1;
- break;
- default:
- break;
- }
- }
-
- if (ArgNum == InvalidArgIndex) {
- using CCtx = CheckerContext;
- if (CCtx::isCLibraryFunction(FDecl, "malloc") ||
- CCtx::isCLibraryFunction(FDecl, "calloc") ||
- CCtx::isCLibraryFunction(FDecl, "alloca"))
- ArgNum = 0;
- else if (CCtx::isCLibraryFunction(FDecl, "memccpy"))
- ArgNum = 3;
- else if (CCtx::isCLibraryFunction(FDecl, "realloc"))
- ArgNum = 1;
- else if (CCtx::isCLibraryFunction(FDecl, "bcopy"))
- ArgNum = 2;
- }
-
- return ArgNum != InvalidArgIndex && Call.getNumArgs() > ArgNum &&
- generateReportIfTainted(Call.getArgExpr(ArgNum), MsgTaintedBufferSize,
- C);
-}
-
-bool GenericTaintChecker::checkCustomSinks(const CallEvent &Call,
- const FunctionData &FData,
- CheckerContext &C) const {
- auto It = findFunctionInConfig(CustomSinks, FData);
- if (It == CustomSinks.end())
- return false;
-
- const auto &Value = It->second;
- const GenericTaintChecker::ArgVector &Args = Value.second;
- for (unsigned ArgNum : Args) {
- if (ArgNum >= Call.getNumArgs())
- continue;
+void GenericTaintChecker::taintUnsafeSocketProtocol(const CallEvent &Call,
+ CheckerContext &C) const {
+ if (Call.getNumArgs() < 1)
+ return;
+ const IdentifierInfo *ID = Call.getCalleeIdentifier();
+ if (!ID)
+ return;
+ if (!ID->getName().equals("socket"))
+ return;
- if (generateReportIfTainted(Call.getArgExpr(ArgNum), MsgCustomSink, C))
- return true;
- }
+ SourceLocation DomLoc = Call.getArgExpr(0)->getExprLoc();
+ StringRef DomName = C.getMacroNameOrSpelling(DomLoc);
+ // Allow internal communication protocols.
+ bool SafeProtocol = DomName.equals("AF_SYSTEM") ||
+ DomName.equals("AF_LOCAL") || DomName.equals("AF_UNIX") ||
+ DomName.equals("AF_RESERVED_36");
+ if (SafeProtocol)
+ return;
- return false;
+ ProgramStateRef State = C.getState();
+ auto &F = State->getStateManager().get_context<ArgIdxFactory>();
+ ImmutableSet<ArgIdxTy> Result = F.add(F.getEmptySet(), ReturnValueIndex);
+ State = State->set<TaintArgsOnPostVisit>(C.getStackFrame(), Result);
+ C.addTransition(State);
}
+/// Checker registration
void ento::registerGenericTaintChecker(CheckerManager &Mgr) {
- auto *Checker = Mgr.registerChecker<GenericTaintChecker>();
- std::string Option{"Config"};
- StringRef ConfigFile =
- Mgr.getAnalyzerOptions().getCheckerStringOption(Checker, Option);
- llvm::Optional<TaintConfig> Config =
- getConfiguration<TaintConfig>(Mgr, Checker, Option, ConfigFile);
- if (Config)
- Checker->parseConfiguration(Mgr, Option, std::move(Config.getValue()));
+ Mgr.registerChecker<GenericTaintChecker>();
}
bool ento::shouldRegisterGenericTaintChecker(const CheckerManager &mgr) {
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/InnerPointerChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/InnerPointerChecker.cpp
index bcae73378028..b673b51c4623 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/InnerPointerChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/InnerPointerChecker.cpp
@@ -13,11 +13,12 @@
//===----------------------------------------------------------------------===//
#include "AllocationState.h"
-#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "InterCheckerAPI.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/BugReporter/CommonBugCategories.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
@@ -34,9 +35,9 @@ namespace {
class InnerPointerChecker
: public Checker<check::DeadSymbols, check::PostCall> {
- CallDescription AppendFn, AssignFn, AddressofFn, ClearFn, CStrFn, DataFn,
- DataMemberFn, EraseFn, InsertFn, PopBackFn, PushBackFn, ReplaceFn,
- ReserveFn, ResizeFn, ShrinkToFitFn, SwapFn;
+ CallDescription AppendFn, AssignFn, AddressofFn, AddressofFn_, ClearFn,
+ CStrFn, DataFn, DataMemberFn, EraseFn, InsertFn, PopBackFn, PushBackFn,
+ ReplaceFn, ReserveFn, ResizeFn, ShrinkToFitFn, SwapFn;
public:
class InnerPointerBRVisitor : public BugReporterVisitor {
@@ -54,9 +55,9 @@ public:
ID.AddPointer(getTag());
}
- virtual PathDiagnosticPieceRef
- VisitNode(const ExplodedNode *N, BugReporterContext &BRC,
- PathSensitiveBugReport &BR) override;
+ PathDiagnosticPieceRef VisitNode(const ExplodedNode *N,
+ BugReporterContext &BRC,
+ PathSensitiveBugReport &BR) override;
// FIXME: Scan the map once in the visitor's constructor and do a direct
// lookup by region.
@@ -73,7 +74,7 @@ public:
InnerPointerChecker()
: AppendFn({"std", "basic_string", "append"}),
AssignFn({"std", "basic_string", "assign"}),
- AddressofFn({"std", "addressof"}),
+ AddressofFn({"std", "addressof"}), AddressofFn_({"std", "__addressof"}),
ClearFn({"std", "basic_string", "clear"}),
CStrFn({"std", "basic_string", "c_str"}), DataFn({"std", "data"}, 1),
DataMemberFn({"std", "basic_string", "data"}),
@@ -125,19 +126,15 @@ bool InnerPointerChecker::isInvalidatingMemberFunction(
return true;
return false;
}
- return (isa<CXXDestructorCall>(Call) || Call.isCalled(AppendFn) ||
- Call.isCalled(AssignFn) || Call.isCalled(ClearFn) ||
- Call.isCalled(EraseFn) || Call.isCalled(InsertFn) ||
- Call.isCalled(PopBackFn) || Call.isCalled(PushBackFn) ||
- Call.isCalled(ReplaceFn) || Call.isCalled(ReserveFn) ||
- Call.isCalled(ResizeFn) || Call.isCalled(ShrinkToFitFn) ||
- Call.isCalled(SwapFn));
+ return isa<CXXDestructorCall>(Call) ||
+ matchesAny(Call, AppendFn, AssignFn, ClearFn, EraseFn, InsertFn,
+ PopBackFn, PushBackFn, ReplaceFn, ReserveFn, ResizeFn,
+ ShrinkToFitFn, SwapFn);
}
bool InnerPointerChecker::isInnerPointerAccessFunction(
const CallEvent &Call) const {
- return (Call.isCalled(CStrFn) || Call.isCalled(DataFn) ||
- Call.isCalled(DataMemberFn));
+ return matchesAny(Call, CStrFn, DataFn, DataMemberFn);
}
void InnerPointerChecker::markPtrSymbolsReleased(const CallEvent &Call,
@@ -182,9 +179,9 @@ void InnerPointerChecker::checkFunctionArguments(const CallEvent &Call,
if (!ArgRegion)
continue;
- // std::addressof function accepts a non-const reference as an argument,
+ // std::addressof functions accepts a non-const reference as an argument,
// but doesn't modify it.
- if (Call.isCalled(AddressofFn))
+ if (matchesAny(Call, AddressofFn, AddressofFn_))
continue;
markPtrSymbolsReleased(Call, State, ArgRegion, C);
@@ -323,8 +320,7 @@ PathDiagnosticPieceRef InnerPointerChecker::InnerPointerBRVisitor::VisitNode(
SmallString<256> Buf;
llvm::raw_svector_ostream OS(Buf);
- OS << "Pointer to inner buffer of '" << ObjTy.getAsString()
- << "' obtained here";
+ OS << "Pointer to inner buffer of '" << ObjTy << "' obtained here";
PathDiagnosticLocation Pos(S, BRC.getSourceManager(),
N->getLocationContext());
return std::make_shared<PathDiagnosticEventPiece>(Pos, OS.str(), true);
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/InvalidatedIteratorChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/InvalidatedIteratorChecker.cpp
index 6955ba11a28f..3f5856a3efbe 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/InvalidatedIteratorChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/InvalidatedIteratorChecker.cpp
@@ -31,14 +31,14 @@ class InvalidatedIteratorChecker
check::PreStmt<ArraySubscriptExpr>,
check::PreStmt<MemberExpr>> {
- std::unique_ptr<BugType> InvalidatedBugType;
+ const BugType InvalidatedBugType{this, "Iterator invalidated",
+ "Misuse of STL APIs"};
- void verifyAccess(CheckerContext &C, const SVal &Val) const;
- void reportBug(const StringRef &Message, const SVal &Val,
- CheckerContext &C, ExplodedNode *ErrNode) const;
-public:
- InvalidatedIteratorChecker();
+ void verifyAccess(CheckerContext &C, SVal Val) const;
+ void reportBug(StringRef Message, SVal Val, CheckerContext &C,
+ ExplodedNode *ErrNode) const;
+public:
void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
void checkPreStmt(const UnaryOperator *UO, CheckerContext &C) const;
void checkPreStmt(const BinaryOperator *BO, CheckerContext &C) const;
@@ -49,11 +49,6 @@ public:
} //namespace
-InvalidatedIteratorChecker::InvalidatedIteratorChecker() {
- InvalidatedBugType.reset(
- new BugType(this, "Iterator invalidated", "Misuse of STL APIs"));
-}
-
void InvalidatedIteratorChecker::checkPreCall(const CallEvent &Call,
CheckerContext &C) const {
// Check for access of invalidated position
@@ -114,7 +109,8 @@ void InvalidatedIteratorChecker::checkPreStmt(const MemberExpr *ME,
verifyAccess(C, BaseVal);
}
-void InvalidatedIteratorChecker::verifyAccess(CheckerContext &C, const SVal &Val) const {
+void InvalidatedIteratorChecker::verifyAccess(CheckerContext &C,
+ SVal Val) const {
auto State = C.getState();
const auto *Pos = getIteratorPosition(State, Val);
if (Pos && !Pos->isValid()) {
@@ -126,11 +122,11 @@ void InvalidatedIteratorChecker::verifyAccess(CheckerContext &C, const SVal &Val
}
}
-void InvalidatedIteratorChecker::reportBug(const StringRef &Message,
- const SVal &Val, CheckerContext &C,
+void InvalidatedIteratorChecker::reportBug(StringRef Message, SVal Val,
+ CheckerContext &C,
ExplodedNode *ErrNode) const {
- auto R = std::make_unique<PathSensitiveBugReport>(*InvalidatedBugType,
- Message, ErrNode);
+ auto R = std::make_unique<PathSensitiveBugReport>(InvalidatedBugType, Message,
+ ErrNode);
R->markInteresting(Val);
C.emitReport(std::move(R));
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Iterator.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Iterator.cpp
index 496190149991..e8d35aac2efd 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Iterator.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Iterator.cpp
@@ -29,8 +29,8 @@ bool isIterator(const CXXRecordDecl *CRD) {
return false;
const auto Name = CRD->getName();
- if (!(Name.endswith_insensitive("iterator") ||
- Name.endswith_insensitive("iter") || Name.endswith_insensitive("it")))
+ if (!(Name.ends_with_insensitive("iterator") ||
+ Name.ends_with_insensitive("iter") || Name.ends_with_insensitive("it")))
return false;
bool HasCopyCtor = false, HasCopyAssign = true, HasDtor = false,
@@ -181,8 +181,7 @@ const ContainerData *getContainerData(ProgramStateRef State,
return State->get<ContainerMap>(Cont);
}
-const IteratorPosition *getIteratorPosition(ProgramStateRef State,
- const SVal &Val) {
+const IteratorPosition *getIteratorPosition(ProgramStateRef State, SVal Val) {
if (auto Reg = Val.getAsRegion()) {
Reg = Reg->getMostDerivedObjectRegion();
return State->get<IteratorRegionMap>(Reg);
@@ -194,7 +193,7 @@ const IteratorPosition *getIteratorPosition(ProgramStateRef State,
return nullptr;
}
-ProgramStateRef setIteratorPosition(ProgramStateRef State, const SVal &Val,
+ProgramStateRef setIteratorPosition(ProgramStateRef State, SVal Val,
const IteratorPosition &Pos) {
if (auto Reg = Val.getAsRegion()) {
Reg = Reg->getMostDerivedObjectRegion();
@@ -207,8 +206,8 @@ ProgramStateRef setIteratorPosition(ProgramStateRef State, const SVal &Val,
return nullptr;
}
-ProgramStateRef createIteratorPosition(ProgramStateRef State, const SVal &Val,
- const MemRegion *Cont, const Stmt* S,
+ProgramStateRef createIteratorPosition(ProgramStateRef State, SVal Val,
+ const MemRegion *Cont, const Stmt *S,
const LocationContext *LCtx,
unsigned blockCount) {
auto &StateMgr = State->getStateManager();
@@ -221,9 +220,8 @@ ProgramStateRef createIteratorPosition(ProgramStateRef State, const SVal &Val,
IteratorPosition::getPosition(Cont, Sym));
}
-ProgramStateRef advancePosition(ProgramStateRef State, const SVal &Iter,
- OverloadedOperatorKind Op,
- const SVal &Distance) {
+ProgramStateRef advancePosition(ProgramStateRef State, SVal Iter,
+ OverloadedOperatorKind Op, SVal Distance) {
const auto *Pos = getIteratorPosition(State, Iter);
if (!Pos)
return nullptr;
@@ -308,8 +306,8 @@ bool compare(ProgramStateRef State, NonLoc NL1, NonLoc NL2,
const auto comparison =
SVB.evalBinOp(State, Opc, NL1, NL2, SVB.getConditionType());
- assert(comparison.getAs<DefinedSVal>() &&
- "Symbol comparison must be a `DefinedSVal`");
+ assert(isa<DefinedSVal>(comparison) &&
+ "Symbol comparison must be a `DefinedSVal`");
return !State->assume(comparison.castAs<DefinedSVal>(), false);
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Iterator.h b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Iterator.h
index 37157492fe3e..46de8ea01d77 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Iterator.h
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Iterator.h
@@ -63,9 +63,7 @@ public:
return Cont == X.Cont && Valid == X.Valid && Offset == X.Offset;
}
- bool operator!=(const IteratorPosition &X) const {
- return Cont != X.Cont || Valid != X.Valid || Offset != X.Offset;
- }
+ bool operator!=(const IteratorPosition &X) const { return !(*this == X); }
void Profile(llvm::FoldingSetNodeID &ID) const {
ID.AddPointer(Cont);
@@ -101,9 +99,7 @@ public:
return Begin == X.Begin && End == X.End;
}
- bool operator!=(const ContainerData &X) const {
- return Begin != X.Begin || End != X.End;
- }
+ bool operator!=(const ContainerData &X) const { return !(*this == X); }
void Profile(llvm::FoldingSetNodeID &ID) const {
ID.Add(Begin);
@@ -165,18 +161,15 @@ bool isRandomIncrOrDecrOperator(OverloadedOperatorKind OK);
bool isRandomIncrOrDecrOperator(BinaryOperatorKind OK);
const ContainerData *getContainerData(ProgramStateRef State,
const MemRegion *Cont);
-const IteratorPosition *getIteratorPosition(ProgramStateRef State,
- const SVal &Val);
-ProgramStateRef setIteratorPosition(ProgramStateRef State, const SVal &Val,
+const IteratorPosition *getIteratorPosition(ProgramStateRef State, SVal Val);
+ProgramStateRef setIteratorPosition(ProgramStateRef State, SVal Val,
const IteratorPosition &Pos);
-ProgramStateRef createIteratorPosition(ProgramStateRef State, const SVal &Val,
- const MemRegion *Cont, const Stmt* S,
+ProgramStateRef createIteratorPosition(ProgramStateRef State, SVal Val,
+ const MemRegion *Cont, const Stmt *S,
const LocationContext *LCtx,
unsigned blockCount);
-ProgramStateRef advancePosition(ProgramStateRef State,
- const SVal &Iter,
- OverloadedOperatorKind Op,
- const SVal &Distance);
+ProgramStateRef advancePosition(ProgramStateRef State, SVal Iter,
+ OverloadedOperatorKind Op, SVal Distance);
ProgramStateRef assumeNoOverflow(ProgramStateRef State, SymbolRef Sym,
long Scale);
bool compare(ProgramStateRef State, SymbolRef Sym1, SymbolRef Sym2,
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IteratorModeling.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IteratorModeling.cpp
index ab5e6a1c9991..a95e811c2a41 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IteratorModeling.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IteratorModeling.cpp
@@ -64,13 +64,15 @@
// making an assumption e.g. `S1 + n == S2 + m` we store `S1 - S2 == m - n` as
// a constraint which we later retrieve when doing an actual comparison.
-#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/DeclTemplate.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicType.h"
+#include "llvm/ADT/STLExtras.h"
#include "Iterator.h"
@@ -98,18 +100,17 @@ class IteratorModeling
const AdvanceFn *Handler) const;
void handleComparison(CheckerContext &C, const Expr *CE, SVal RetVal,
- const SVal &LVal, const SVal &RVal,
- OverloadedOperatorKind Op) const;
+ SVal LVal, SVal RVal, OverloadedOperatorKind Op) const;
void processComparison(CheckerContext &C, ProgramStateRef State,
- SymbolRef Sym1, SymbolRef Sym2, const SVal &RetVal,
+ SymbolRef Sym1, SymbolRef Sym2, SVal RetVal,
OverloadedOperatorKind Op) const;
- void handleIncrement(CheckerContext &C, const SVal &RetVal, const SVal &Iter,
+ void handleIncrement(CheckerContext &C, SVal RetVal, SVal Iter,
bool Postfix) const;
- void handleDecrement(CheckerContext &C, const SVal &RetVal, const SVal &Iter,
+ void handleDecrement(CheckerContext &C, SVal RetVal, SVal Iter,
bool Postfix) const;
void handleRandomIncrOrDecr(CheckerContext &C, const Expr *CE,
- OverloadedOperatorKind Op, const SVal &RetVal,
- const SVal &Iterator, const SVal &Amount) const;
+ OverloadedOperatorKind Op, SVal RetVal,
+ SVal Iterator, SVal Amount) const;
void handlePtrIncrOrDecr(CheckerContext &C, const Expr *Iterator,
OverloadedOperatorKind OK, SVal Offset) const;
void handleAdvance(CheckerContext &C, const Expr *CE, SVal RetVal, SVal Iter,
@@ -118,7 +119,7 @@ class IteratorModeling
SVal Amount) const;
void handleNext(CheckerContext &C, const Expr *CE, SVal RetVal, SVal Iter,
SVal Amount) const;
- void assignToContainer(CheckerContext &C, const Expr *CE, const SVal &RetVal,
+ void assignToContainer(CheckerContext &C, const Expr *CE, SVal RetVal,
const MemRegion *Cont) const;
bool noChangeInAdvance(CheckerContext &C, SVal Iter, const Expr *CE) const;
void printState(raw_ostream &Out, ProgramStateRef State, const char *NL,
@@ -150,8 +151,6 @@ public:
void checkBind(SVal Loc, SVal Val, const Stmt *S, CheckerContext &C) const;
void checkPostStmt(const UnaryOperator *UO, CheckerContext &C) const;
void checkPostStmt(const BinaryOperator *BO, CheckerContext &C) const;
- void checkPostStmt(const CXXConstructExpr *CCE, CheckerContext &C) const;
- void checkPostStmt(const DeclStmt *DS, CheckerContext &C) const;
void checkPostStmt(const MaterializeTemporaryExpr *MTE,
CheckerContext &C) const;
void checkLiveSymbols(ProgramStateRef State, SymbolReaper &SR) const;
@@ -160,7 +159,7 @@ public:
bool isSimpleComparisonOperator(OverloadedOperatorKind OK);
bool isSimpleComparisonOperator(BinaryOperatorKind OK);
-ProgramStateRef removeIteratorPosition(ProgramStateRef State, const SVal &Val);
+ProgramStateRef removeIteratorPosition(ProgramStateRef State, SVal Val);
ProgramStateRef relateSymbols(ProgramStateRef State, SymbolRef Sym1,
SymbolRef Sym2, bool Equal);
bool isBoundThroughLazyCompoundVal(const Environment &Env,
@@ -283,7 +282,7 @@ void IteratorModeling::checkPostStmt(const BinaryOperator *BO,
// The non-iterator side must have an integral or enumeration type.
if (!AmountExpr->getType()->isIntegralOrEnumerationType())
return;
- const SVal &AmountVal = IsIterOnLHS ? RVal : LVal;
+ SVal AmountVal = IsIterOnLHS ? RVal : LVal;
handlePtrIncrOrDecr(C, IterExpr, BinaryOperator::getOverloadedOperator(OK),
AmountVal);
}
@@ -304,21 +303,18 @@ void IteratorModeling::checkLiveSymbols(ProgramStateRef State,
SymbolReaper &SR) const {
// Keep symbolic expressions of iterator positions alive
auto RegionMap = State->get<IteratorRegionMap>();
- for (const auto &Reg : RegionMap) {
- const auto Offset = Reg.second.getOffset();
- for (auto i = Offset->symbol_begin(); i != Offset->symbol_end(); ++i)
- if (isa<SymbolData>(*i))
- SR.markLive(*i);
+ for (const IteratorPosition &Pos : llvm::make_second_range(RegionMap)) {
+ for (SymbolRef Sym : Pos.getOffset()->symbols())
+ if (isa<SymbolData>(Sym))
+ SR.markLive(Sym);
}
auto SymbolMap = State->get<IteratorSymbolMap>();
- for (const auto &Sym : SymbolMap) {
- const auto Offset = Sym.second.getOffset();
- for (auto i = Offset->symbol_begin(); i != Offset->symbol_end(); ++i)
- if (isa<SymbolData>(*i))
- SR.markLive(*i);
+ for (const IteratorPosition &Pos : llvm::make_second_range(SymbolMap)) {
+ for (SymbolRef Sym : Pos.getOffset()->symbols())
+ if (isa<SymbolData>(Sym))
+ SR.markLive(Sym);
}
-
}
void IteratorModeling::checkDeadSymbols(SymbolReaper &SR,
@@ -391,8 +387,8 @@ IteratorModeling::handleOverloadedOperator(CheckerContext &C,
const bool IsIterFirst = FirstType->isStructureOrClassType();
const SVal FirstArg = Call.getArgSVal(0);
const SVal SecondArg = Call.getArgSVal(1);
- const SVal &Iterator = IsIterFirst ? FirstArg : SecondArg;
- const SVal &Amount = IsIterFirst ? SecondArg : FirstArg;
+ SVal Iterator = IsIterFirst ? FirstArg : SecondArg;
+ SVal Amount = IsIterFirst ? SecondArg : FirstArg;
handleRandomIncrOrDecr(C, OrigExpr, Op, Call.getReturnValue(),
Iterator, Amount);
@@ -447,14 +443,13 @@ IteratorModeling::handleAdvanceLikeFunction(CheckerContext &C,
}
void IteratorModeling::handleComparison(CheckerContext &C, const Expr *CE,
- SVal RetVal, const SVal &LVal,
- const SVal &RVal,
- OverloadedOperatorKind Op) const {
+ SVal RetVal, SVal LVal, SVal RVal,
+ OverloadedOperatorKind Op) const {
// Record the operands and the operator of the comparison for the next
// evalAssume, if the result is a symbolic expression. If it is a concrete
// value (only one branch is possible), then transfer the state between
// the operands according to the operator and the result
- auto State = C.getState();
+ auto State = C.getState();
const auto *LPos = getIteratorPosition(State, LVal);
const auto *RPos = getIteratorPosition(State, RVal);
const MemRegion *Cont = nullptr;
@@ -507,7 +502,7 @@ void IteratorModeling::handleComparison(CheckerContext &C, const Expr *CE,
void IteratorModeling::processComparison(CheckerContext &C,
ProgramStateRef State, SymbolRef Sym1,
- SymbolRef Sym2, const SVal &RetVal,
+ SymbolRef Sym2, SVal RetVal,
OverloadedOperatorKind Op) const {
if (const auto TruthVal = RetVal.getAs<nonloc::ConcreteInt>()) {
if ((State = relateSymbols(State, Sym1, Sym2,
@@ -535,8 +530,8 @@ void IteratorModeling::processComparison(CheckerContext &C,
}
}
-void IteratorModeling::handleIncrement(CheckerContext &C, const SVal &RetVal,
- const SVal &Iter, bool Postfix) const {
+void IteratorModeling::handleIncrement(CheckerContext &C, SVal RetVal,
+ SVal Iter, bool Postfix) const {
// Increment the symbolic expressions which represents the position of the
// iterator
auto State = C.getState();
@@ -561,8 +556,8 @@ void IteratorModeling::handleIncrement(CheckerContext &C, const SVal &RetVal,
C.addTransition(State);
}
-void IteratorModeling::handleDecrement(CheckerContext &C, const SVal &RetVal,
- const SVal &Iter, bool Postfix) const {
+void IteratorModeling::handleDecrement(CheckerContext &C, SVal RetVal,
+ SVal Iter, bool Postfix) const {
// Decrement the symbolic expressions which represents the position of the
// iterator
auto State = C.getState();
@@ -589,9 +584,8 @@ void IteratorModeling::handleDecrement(CheckerContext &C, const SVal &RetVal,
void IteratorModeling::handleRandomIncrOrDecr(CheckerContext &C, const Expr *CE,
OverloadedOperatorKind Op,
- const SVal &RetVal,
- const SVal &Iterator,
- const SVal &Amount) const {
+ SVal RetVal, SVal Iterator,
+ SVal Amount) const {
// Increment or decrement the symbolic expressions which represents the
// position of the iterator
auto State = C.getState();
@@ -630,7 +624,7 @@ void IteratorModeling::handlePtrIncrOrDecr(CheckerContext &C,
const Expr *Iterator,
OverloadedOperatorKind OK,
SVal Offset) const {
- if (!Offset.getAs<DefinedSVal>())
+ if (!isa<DefinedSVal>(Offset))
return;
QualType PtrType = Iterator->getType();
@@ -687,7 +681,7 @@ void IteratorModeling::handleNext(CheckerContext &C, const Expr *CE,
}
void IteratorModeling::assignToContainer(CheckerContext &C, const Expr *CE,
- const SVal &RetVal,
+ SVal RetVal,
const MemRegion *Cont) const {
Cont = Cont->getMostDerivedObjectRegion();
@@ -775,7 +769,7 @@ bool isSimpleComparisonOperator(BinaryOperatorKind OK) {
return OK == BO_EQ || OK == BO_NE;
}
-ProgramStateRef removeIteratorPosition(ProgramStateRef State, const SVal &Val) {
+ProgramStateRef removeIteratorPosition(ProgramStateRef State, SVal Val) {
if (auto Reg = Val.getAsRegion()) {
Reg = Reg->getMostDerivedObjectRegion();
return State->remove<IteratorRegionMap>(Reg);
@@ -800,8 +794,8 @@ ProgramStateRef relateSymbols(ProgramStateRef State, SymbolRef Sym1,
SVB.evalBinOp(State, BO_EQ, nonloc::SymbolVal(Sym1),
nonloc::SymbolVal(Sym2), SVB.getConditionType());
- assert(comparison.getAs<DefinedSVal>() &&
- "Symbol comparison must be a `DefinedSVal`");
+ assert(isa<DefinedSVal>(comparison) &&
+ "Symbol comparison must be a `DefinedSVal`");
auto NewState = State->assume(comparison.castAs<DefinedSVal>(), Equal);
if (!NewState)
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IteratorRangeChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IteratorRangeChecker.cpp
index a47484497771..c8828219dd73 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IteratorRangeChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IteratorRangeChecker.cpp
@@ -14,10 +14,10 @@
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
-
#include "Iterator.h"
using namespace clang;
@@ -32,7 +32,8 @@ class IteratorRangeChecker
check::PreStmt<ArraySubscriptExpr>,
check::PreStmt<MemberExpr>> {
- std::unique_ptr<BugType> OutOfRangeBugType;
+ const BugType OutOfRangeBugType{this, "Iterator out of range",
+ "Misuse of STL APIs"};
void verifyDereference(CheckerContext &C, SVal Val) const;
void verifyIncrement(CheckerContext &C, SVal Iter) const;
@@ -42,12 +43,10 @@ class IteratorRangeChecker
void verifyAdvance(CheckerContext &C, SVal LHS, SVal RHS) const;
void verifyPrev(CheckerContext &C, SVal LHS, SVal RHS) const;
void verifyNext(CheckerContext &C, SVal LHS, SVal RHS) const;
- void reportBug(const StringRef &Message, SVal Val, CheckerContext &C,
+ void reportBug(StringRef Message, SVal Val, CheckerContext &C,
ExplodedNode *ErrNode) const;
public:
- IteratorRangeChecker();
-
void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
void checkPreStmt(const UnaryOperator *UO, CheckerContext &C) const;
void checkPreStmt(const BinaryOperator *BO, CheckerContext &C) const;
@@ -67,15 +66,10 @@ public:
bool isPastTheEnd(ProgramStateRef State, const IteratorPosition &Pos);
bool isAheadOfRange(ProgramStateRef State, const IteratorPosition &Pos);
bool isBehindPastTheEnd(ProgramStateRef State, const IteratorPosition &Pos);
-bool isZero(ProgramStateRef State, const NonLoc &Val);
+bool isZero(ProgramStateRef State, NonLoc Val);
} //namespace
-IteratorRangeChecker::IteratorRangeChecker() {
- OutOfRangeBugType.reset(
- new BugType(this, "Iterator out of range", "Misuse of STL APIs"));
-}
-
void IteratorRangeChecker::checkPreCall(const CallEvent &Call,
CheckerContext &C) const {
// Check for out of range access
@@ -228,7 +222,7 @@ void IteratorRangeChecker::verifyRandomIncrOrDecr(CheckerContext &C,
Value = State->getRawSVal(*ValAsLoc);
}
- if (Value.isUnknownOrUndef())
+ if (Value.isUnknownOrUndef() || !isa<NonLoc>(Value))
return;
// Incremention or decremention by 0 is never a bug.
@@ -275,10 +269,10 @@ void IteratorRangeChecker::verifyNext(CheckerContext &C, SVal LHS,
verifyRandomIncrOrDecr(C, OO_Plus, LHS, RHS);
}
-void IteratorRangeChecker::reportBug(const StringRef &Message, SVal Val,
+void IteratorRangeChecker::reportBug(StringRef Message, SVal Val,
CheckerContext &C,
ExplodedNode *ErrNode) const {
- auto R = std::make_unique<PathSensitiveBugReport>(*OutOfRangeBugType, Message,
+ auto R = std::make_unique<PathSensitiveBugReport>(OutOfRangeBugType, Message,
ErrNode);
const auto *Pos = getIteratorPosition(C.getState(), Val);
@@ -295,7 +289,7 @@ bool isLess(ProgramStateRef State, SymbolRef Sym1, SymbolRef Sym2);
bool isGreater(ProgramStateRef State, SymbolRef Sym1, SymbolRef Sym2);
bool isEqual(ProgramStateRef State, SymbolRef Sym1, SymbolRef Sym2);
-bool isZero(ProgramStateRef State, const NonLoc &Val) {
+bool isZero(ProgramStateRef State, NonLoc Val) {
auto &BVF = State->getBasicVals();
return compare(State, Val,
nonloc::ConcreteInt(BVF.getValue(llvm::APSInt::get(0))),
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp
index 3e6756efe0e6..f0276a57bdf9 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp
@@ -27,14 +27,15 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/StmtVisitor.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallString.h"
@@ -44,9 +45,9 @@ using namespace ento;
namespace {
struct ChecksFilter {
/// Check for missing invalidation method declarations.
- DefaultBool check_MissingInvalidationMethod;
+ bool check_MissingInvalidationMethod = false;
/// Check that all ivars are invalidated.
- DefaultBool check_InstanceVariableInvalidation;
+ bool check_InstanceVariableInvalidation = false;
CheckerNameRef checkName_MissingInvalidationMethod;
CheckerNameRef checkName_InstanceVariableInvalidation;
@@ -63,12 +64,12 @@ class IvarInvalidationCheckerImpl {
struct InvalidationInfo {
/// Has the ivar been invalidated?
- bool IsInvalidated;
+ bool IsInvalidated = false;
/// The methods which can be used to invalidate the ivar.
MethodSet InvalidationMethods;
- InvalidationInfo() : IsInvalidated(false) {}
+ InvalidationInfo() = default;
void addInvalidationMethod(const ObjCMethodDecl *MD) {
InvalidationMethods.insert(MD);
}
@@ -80,9 +81,8 @@ class IvarInvalidationCheckerImpl {
bool hasMethod(const ObjCMethodDecl *MD) {
if (IsInvalidated)
return true;
- for (MethodSet::iterator I = InvalidationMethods.begin(),
- E = InvalidationMethods.end(); I != E; ++I) {
- if (*I == MD) {
+ for (const ObjCMethodDecl *Curr : InvalidationMethods) {
+ if (Curr == MD) {
IsInvalidated = true;
return true;
}
@@ -318,9 +318,7 @@ const ObjCIvarDecl *IvarInvalidationCheckerImpl::findPropertyBackingIvar(
// Lookup IVars named "_PropName"or "PropName" among the tracked Ivars.
StringRef PropName = Prop->getIdentifier()->getName();
- for (IvarSet::const_iterator I = TrackedIvars.begin(),
- E = TrackedIvars.end(); I != E; ++I) {
- const ObjCIvarDecl *Iv = I->first;
+ for (const ObjCIvarDecl *Iv : llvm::make_first_range(TrackedIvars)) {
StringRef IvarName = Iv->getName();
if (IvarName == PropName)
@@ -379,12 +377,9 @@ visit(const ObjCImplementationDecl *ImplD) const {
IvarToPropMapTy IvarToPopertyMap;
ObjCInterfaceDecl::PropertyMap PropMap;
- ObjCInterfaceDecl::PropertyDeclOrder PropOrder;
- InterfaceD->collectPropertiesToImplement(PropMap, PropOrder);
+ InterfaceD->collectPropertiesToImplement(PropMap);
- for (ObjCInterfaceDecl::PropertyMap::iterator
- I = PropMap.begin(), E = PropMap.end(); I != E; ++I) {
- const ObjCPropertyDecl *PD = I->second;
+ for (const ObjCPropertyDecl *PD : llvm::make_second_range(PropMap)) {
if (PD->isClassProperty())
continue;
@@ -423,11 +418,7 @@ visit(const ObjCImplementationDecl *ImplD) const {
// Remove ivars invalidated by the partial invalidation methods. They do not
// need to be invalidated in the regular invalidation methods.
bool AtImplementationContainsAtLeastOnePartialInvalidationMethod = false;
- for (MethodSet::iterator
- I = PartialInfo.InvalidationMethods.begin(),
- E = PartialInfo.InvalidationMethods.end(); I != E; ++I) {
- const ObjCMethodDecl *InterfD = *I;
-
+ for (const ObjCMethodDecl *InterfD : PartialInfo.InvalidationMethods) {
// Get the corresponding method in the @implementation.
const ObjCMethodDecl *D = ImplD->getMethod(InterfD->getSelector(),
InterfD->isInstanceMethod());
@@ -476,10 +467,7 @@ visit(const ObjCImplementationDecl *ImplD) const {
// Check that all ivars are invalidated by the invalidation methods.
bool AtImplementationContainsAtLeastOneInvalidationMethod = false;
- for (MethodSet::iterator I = Info.InvalidationMethods.begin(),
- E = Info.InvalidationMethods.end(); I != E; ++I) {
- const ObjCMethodDecl *InterfD = *I;
-
+ for (const ObjCMethodDecl *InterfD : Info.InvalidationMethods) {
// Get the corresponding method in the @implementation.
const ObjCMethodDecl *D = ImplD->getMethod(InterfD->getSelector(),
InterfD->isInstanceMethod());
@@ -502,9 +490,8 @@ visit(const ObjCImplementationDecl *ImplD) const {
continue;
// Warn on the ivars that were not invalidated by the method.
- for (IvarSet::const_iterator
- I = IvarsI.begin(), E = IvarsI.end(); I != E; ++I)
- reportIvarNeedsInvalidation(I->first, IvarToPopertyMap, D);
+ for (const ObjCIvarDecl *Ivar : llvm::make_first_range(IvarsI))
+ reportIvarNeedsInvalidation(Ivar, IvarToPopertyMap, D);
}
}
@@ -513,9 +500,8 @@ visit(const ObjCImplementationDecl *ImplD) const {
if (AtImplementationContainsAtLeastOnePartialInvalidationMethod) {
// Warn on the ivars that were not invalidated by the prrtial
// invalidation methods.
- for (IvarSet::const_iterator
- I = Ivars.begin(), E = Ivars.end(); I != E; ++I)
- reportIvarNeedsInvalidation(I->first, IvarToPopertyMap, nullptr);
+ for (const ObjCIvarDecl *Ivar : llvm::make_first_range(Ivars))
+ reportIvarNeedsInvalidation(Ivar, IvarToPopertyMap, nullptr);
} else {
// Otherwise, no invalidation methods were implemented.
reportNoInvalidationMethod(Filter.checkName_InstanceVariableInvalidation,
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp
index 1f3d8844d330..fa51aa80216b 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp
@@ -273,7 +273,7 @@ void ASTFieldVisitor::ReportError(QualType T) {
os << (*I)->getName();
}
}
- os << " (type " << FieldChain.back()->getType().getAsString() << ")";
+ os << " (type " << FieldChain.back()->getType() << ")";
// Note that this will fire for every translation unit that uses this
// class. This is suboptimal, but at least scan-build will merge
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp
index 28d3e058fee2..812d787e2e37 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp
@@ -14,13 +14,13 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/Lex/Lexer.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
@@ -28,7 +28,9 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Unicode.h"
+#include <optional>
using namespace clang;
using namespace ento;
@@ -60,7 +62,8 @@ class NonLocalizedStringChecker
check::PostObjCMessage,
check::PostStmt<ObjCStringLiteral>> {
- mutable std::unique_ptr<BugType> BT;
+ const BugType BT{this, "Unlocalizable string",
+ "Localizability Issue (Apple)"};
// Methods that require a localized string
mutable llvm::DenseMap<const IdentifierInfo *,
@@ -87,12 +90,10 @@ class NonLocalizedStringChecker
Selector S) const;
public:
- NonLocalizedStringChecker();
-
// When this parameter is set to true, the checker assumes all
// methods that return NSStrings are unlocalized. Thus, more false
// positives will be reported.
- DefaultBool IsAggressive;
+ bool IsAggressive = false;
void checkPreObjCMessage(const ObjCMethodCall &msg, CheckerContext &C) const;
void checkPostObjCMessage(const ObjCMethodCall &msg, CheckerContext &C) const;
@@ -106,11 +107,6 @@ public:
REGISTER_MAP_WITH_PROGRAMSTATE(LocalizedMemMap, const MemRegion *,
LocalizedState)
-NonLocalizedStringChecker::NonLocalizedStringChecker() {
- BT.reset(new BugType(this, "Unlocalizable string",
- "Localizability Issue (Apple)"));
-}
-
namespace {
class NonLocalizedStringBRVisitor final : public BugReporterVisitor {
@@ -716,7 +712,7 @@ void NonLocalizedStringChecker::setNonLocalizedState(const SVal S,
static bool isDebuggingName(std::string name) {
- return StringRef(name).lower().find("debug") != StringRef::npos;
+ return StringRef(name).contains_insensitive("debug");
}
/// Returns true when, heuristically, the analyzer may be analyzing debugging
@@ -762,7 +758,7 @@ void NonLocalizedStringChecker::reportLocalizationError(
// Generate the bug report.
auto R = std::make_unique<PathSensitiveBugReport>(
- *BT, "User-facing text should use localized string macro", ErrNode);
+ BT, "User-facing text should use localized string macro", ErrNode);
if (argumentNumber) {
R->addRange(M.getArgExpr(argumentNumber - 1)->getSourceRange());
} else {
@@ -815,9 +811,9 @@ void NonLocalizedStringChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
// Handle the case where the receiver is an NSString
// These special NSString methods draw to the screen
- if (!(SelectorName.startswith("drawAtPoint") ||
- SelectorName.startswith("drawInRect") ||
- SelectorName.startswith("drawWithRect")))
+ if (!(SelectorName.starts_with("drawAtPoint") ||
+ SelectorName.starts_with("drawInRect") ||
+ SelectorName.starts_with("drawWithRect")))
return;
SVal svTitle = msg.getReceiverSVal();
@@ -846,10 +842,9 @@ void NonLocalizedStringChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
if (argumentNumber < 0) { // There was no match in UIMethods
if (const Decl *D = msg.getDecl()) {
if (const ObjCMethodDecl *OMD = dyn_cast_or_null<ObjCMethodDecl>(D)) {
- auto formals = OMD->parameters();
- for (unsigned i = 0, ei = formals.size(); i != ei; ++i) {
- if (isAnnotatedAsTakingLocalized(formals[i])) {
- argumentNumber = i;
+ for (auto [Idx, FormalParam] : llvm::enumerate(OMD->parameters())) {
+ if (isAnnotatedAsTakingLocalized(FormalParam)) {
+ argumentNumber = Idx;
break;
}
}
@@ -949,7 +944,7 @@ void NonLocalizedStringChecker::checkPostCall(const CallEvent &Call,
const IdentifierInfo *Identifier = Call.getCalleeIdentifier();
SVal sv = Call.getReturnValue();
- if (isAnnotatedAsReturningLocalized(D) || LSF.count(Identifier) != 0) {
+ if (isAnnotatedAsReturningLocalized(D) || LSF.contains(Identifier)) {
setLocalizedState(sv, C);
} else if (isNSStringType(RT, C.getASTContext()) &&
!hasLocalizedState(sv, C)) {
@@ -1004,8 +999,8 @@ NonLocalizedStringBRVisitor::VisitNode(const ExplodedNode *Succ,
if (Satisfied)
return nullptr;
- Optional<StmtPoint> Point = Succ->getLocation().getAs<StmtPoint>();
- if (!Point.hasValue())
+ std::optional<StmtPoint> Point = Succ->getLocation().getAs<StmtPoint>();
+ if (!Point)
return nullptr;
auto *LiteralExpr = dyn_cast<ObjCStringLiteral>(Point->getStmt());
@@ -1141,12 +1136,12 @@ void EmptyLocalizationContextChecker::MethodCrawler::VisitObjCMessageExpr(
SE = Mgr.getSourceManager().getSLocEntry(SLInfo.first);
}
- llvm::Optional<llvm::MemoryBufferRef> BF =
+ std::optional<llvm::MemoryBufferRef> BF =
Mgr.getSourceManager().getBufferOrNone(SLInfo.first, SL);
if (!BF)
return;
-
- Lexer TheLexer(SL, LangOptions(), BF->getBufferStart(),
+ LangOptions LangOpts;
+ Lexer TheLexer(SL, LangOpts, BF->getBufferStart(),
BF->getBufferStart() + SLInfo.second, BF->getBufferEnd());
Token I;
@@ -1253,8 +1248,8 @@ bool PluralMisuseChecker::MethodCrawler::isCheckingPlurality(
BO = B;
}
}
- if (VD->getName().lower().find("plural") != StringRef::npos ||
- VD->getName().lower().find("singular") != StringRef::npos) {
+ if (VD->getName().contains_insensitive("plural") ||
+ VD->getName().contains_insensitive("singular")) {
return true;
}
}
@@ -1339,7 +1334,10 @@ bool PluralMisuseChecker::MethodCrawler::EndVisitIfStmt(IfStmt *I) {
}
bool PluralMisuseChecker::MethodCrawler::VisitIfStmt(const IfStmt *I) {
- const Expr *Condition = I->getCond()->IgnoreParenImpCasts();
+ const Expr *Condition = I->getCond();
+ if (!Condition)
+ return true;
+ Condition = Condition->IgnoreParenImpCasts();
if (isCheckingPlurality(Condition)) {
MatchingStatements.push_back(I);
InMatchingStatement = true;
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MIGChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MIGChecker.cpp
index b72d72580c28..153a0a51e980 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MIGChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MIGChecker.cpp
@@ -27,8 +27,10 @@
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include <optional>
using namespace clang;
using namespace ento;
@@ -85,7 +87,7 @@ class MIGChecker : public Checker<check::PostCall, check::PreStmt<ReturnStmt>,
#undef CALL
};
- CallDescription OsRefRetain{"os_ref_retain", 1};
+ CallDescription OsRefRetain{{"os_ref_retain"}, 1};
void checkReturnAux(const ReturnStmt *RS, CheckerContext &C) const;
@@ -156,10 +158,10 @@ static bool isInMIGCall(CheckerContext &C) {
const Decl *D = SFC->getDecl();
- if (Optional<AnyCall> AC = AnyCall::forDecl(D)) {
+ if (std::optional<AnyCall> AC = AnyCall::forDecl(D)) {
// Even though there's a Sema warning when the return type of an annotated
// function is not a kern_return_t, this warning isn't an error, so we need
- // an extra sanity check here.
+ // an extra check here.
// FIXME: AnyCall doesn't support blocks yet, so they remain unchecked
// for now.
if (!AC->getReturnType(C.getASTContext())
@@ -180,7 +182,7 @@ static bool isInMIGCall(CheckerContext &C) {
}
void MIGChecker::checkPostCall(const CallEvent &Call, CheckerContext &C) const {
- if (Call.isCalled(OsRefRetain)) {
+ if (OsRefRetain.matches(Call)) {
// If the code is doing reference counting over the parameter,
// it opens up an opportunity for safely calling a destructor function.
// TODO: We should still check for over-releases.
@@ -198,7 +200,7 @@ void MIGChecker::checkPostCall(const CallEvent &Call, CheckerContext &C) const {
auto I = llvm::find_if(Deallocators,
[&](const std::pair<CallDescription, unsigned> &Item) {
- return Call.isCalled(Item.first);
+ return Item.first.matches(Call);
});
if (I == Deallocators.end())
return;
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.cpp
index bbf2ddec5762..3e374e6c240e 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.cpp
@@ -31,7 +31,7 @@ void MPIBugReporter::reportDoubleNonblocking(
RequestRegion->getDescriptiveName() + ". ";
auto Report = std::make_unique<PathSensitiveBugReport>(
- *DoubleNonblockingBugType, ErrorText, ExplNode);
+ DoubleNonblockingBugType, ErrorText, ExplNode);
Report->addRange(MPICallEvent.getSourceRange());
SourceRange Range = RequestRegion->sourceRange();
@@ -53,7 +53,7 @@ void MPIBugReporter::reportMissingWait(
std::string ErrorText{"Request " + RequestRegion->getDescriptiveName() +
" has no matching wait. "};
- auto Report = std::make_unique<PathSensitiveBugReport>(*MissingWaitBugType,
+ auto Report = std::make_unique<PathSensitiveBugReport>(MissingWaitBugType,
ErrorText, ExplNode);
SourceRange Range = RequestRegion->sourceRange();
@@ -73,7 +73,7 @@ void MPIBugReporter::reportUnmatchedWait(
std::string ErrorText{"Request " + RequestRegion->getDescriptiveName() +
" has no matching nonblocking call. "};
- auto Report = std::make_unique<PathSensitiveBugReport>(*UnmatchedWaitBugType,
+ auto Report = std::make_unique<PathSensitiveBugReport>(UnmatchedWaitBugType,
ErrorText, ExplNode);
Report->addRange(CE.getSourceRange());
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.h b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.h
index 9871da026b04..0222a2120b34 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.h
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.h
@@ -17,6 +17,7 @@
#include "MPITypes.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "llvm/ADT/StringRef.h"
namespace clang {
namespace ento {
@@ -24,12 +25,10 @@ namespace mpi {
class MPIBugReporter {
public:
- MPIBugReporter(const CheckerBase &CB) {
- UnmatchedWaitBugType.reset(new BugType(&CB, "Unmatched wait", MPIError));
- DoubleNonblockingBugType.reset(
- new BugType(&CB, "Double nonblocking", MPIError));
- MissingWaitBugType.reset(new BugType(&CB, "Missing wait", MPIError));
- }
+ MPIBugReporter(const CheckerBase &CB)
+ : UnmatchedWaitBugType(&CB, "Unmatched wait", MPIError),
+ MissingWaitBugType(&CB, "Missing wait", MPIError),
+ DoubleNonblockingBugType(&CB, "Double nonblocking", MPIError) {}
/// Report duplicate request use by nonblocking calls without intermediate
/// wait.
@@ -68,12 +67,10 @@ public:
BugReporter &BReporter) const;
private:
- const std::string MPIError = "MPI Error";
-
- // path-sensitive bug types
- std::unique_ptr<BugType> UnmatchedWaitBugType;
- std::unique_ptr<BugType> MissingWaitBugType;
- std::unique_ptr<BugType> DoubleNonblockingBugType;
+ const llvm::StringLiteral MPIError = "MPI Error";
+ const BugType UnmatchedWaitBugType;
+ const BugType MissingWaitBugType;
+ const BugType DoubleNonblockingBugType;
/// Bug visitor class to find the node where the request region was previously
/// used in order to include it into the BugReport path.
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.cpp
index 5d6bd381d3cc..4c0a8ba2c7c0 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.cpp
@@ -165,7 +165,7 @@ void MPIChecker::allRegionsUsedByWait(
Ctx.getState(), SuperRegion, Ctx.getSValBuilder(),
CE.getArgExpr(1)->getType()->getPointeeType());
const llvm::APSInt &ArrSize =
- ElementCount.getAs<nonloc::ConcreteInt>()->getValue();
+ ElementCount.castAs<nonloc::ConcreteInt>().getValue();
for (size_t i = 0; i < ArrSize; ++i) {
const NonLoc Idx = Ctx.getSValBuilder().makeArrayIndex(i);
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
index a157ee2da5df..12bf12a0b232 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
@@ -19,8 +19,10 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/raw_ostream.h"
+#include <optional>
using namespace clang;
using namespace ento;
@@ -31,7 +33,8 @@ class MacOSKeychainAPIChecker : public Checker<check::PreStmt<CallExpr>,
check::DeadSymbols,
check::PointerEscape,
eval::Assume> {
- mutable std::unique_ptr<BugType> BT;
+ const BugType BT{this, "Improper use of SecKeychain API",
+ categories::AppleAPIMisuse};
public:
/// AllocationState is a part of the checker specific state together with the
@@ -99,12 +102,6 @@ private:
/// function.
static unsigned getTrackedFunctionIndex(StringRef Name, bool IsAllocator);
- inline void initBugType() const {
- if (!BT)
- BT.reset(new BugType(this, "Improper use of SecKeychain API",
- "API Misuse (Apple)"));
- }
-
void generateDeallocatorMismatchReport(const AllocationPair &AP,
const Expr *ArgExpr,
CheckerContext &C) const;
@@ -160,7 +157,7 @@ static bool isEnclosingFunctionParam(const Expr *E) {
E = E->IgnoreParenCasts();
if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
const ValueDecl *VD = DRE->getDecl();
- if (isa<ImplicitParamDecl>(VD) || isa<ParmVarDecl>(VD))
+ if (isa<ImplicitParamDecl, ParmVarDecl>(VD))
return true;
}
return false;
@@ -199,8 +196,7 @@ unsigned MacOSKeychainAPIChecker::getTrackedFunctionIndex(StringRef Name,
static bool isBadDeallocationArgument(const MemRegion *Arg) {
if (!Arg)
return false;
- return isa<AllocaRegion>(Arg) || isa<BlockDataRegion>(Arg) ||
- isa<TypedRegion>(Arg);
+ return isa<AllocaRegion, BlockDataRegion, TypedRegion>(Arg);
}
/// Given the address expression, retrieve the value it's pointing to. Assume
@@ -210,7 +206,7 @@ static SymbolRef getAsPointeeSymbol(const Expr *Expr,
ProgramStateRef State = C.getState();
SVal ArgV = C.getSVal(Expr);
- if (Optional<loc::MemRegionVal> X = ArgV.getAs<loc::MemRegionVal>()) {
+ if (std::optional<loc::MemRegionVal> X = ArgV.getAs<loc::MemRegionVal>()) {
StoreManager& SM = C.getStoreManager();
SymbolRef sym = SM.getBinding(State->getStore(), *X).getAsLocSymbol();
if (sym)
@@ -231,7 +227,6 @@ void MacOSKeychainAPIChecker::
if (!N)
return;
- initBugType();
SmallString<80> sbuf;
llvm::raw_svector_ostream os(sbuf);
unsigned int PDeallocIdx =
@@ -239,7 +234,7 @@ void MacOSKeychainAPIChecker::
os << "Deallocator doesn't match the allocator: '"
<< FunctionsToTrack[PDeallocIdx].Name << "' should be used.";
- auto Report = std::make_unique<PathSensitiveBugReport>(*BT, os.str(), N);
+ auto Report = std::make_unique<PathSensitiveBugReport>(BT, os.str(), N);
Report->addVisitor(std::make_unique<SecKeychainBugVisitor>(AP.first));
Report->addRange(ArgExpr->getSourceRange());
markInteresting(Report.get(), AP);
@@ -275,7 +270,6 @@ void MacOSKeychainAPIChecker::checkPreStmt(const CallExpr *CE,
ExplodedNode *N = C.generateNonFatalErrorNode(State);
if (!N)
return;
- initBugType();
SmallString<128> sbuf;
llvm::raw_svector_ostream os(sbuf);
unsigned int DIdx = FunctionsToTrack[AS->AllocatorIdx].DeallocatorIdx;
@@ -283,8 +277,7 @@ void MacOSKeychainAPIChecker::checkPreStmt(const CallExpr *CE,
<< "the allocator: missing a call to '"
<< FunctionsToTrack[DIdx].Name
<< "'.";
- auto Report =
- std::make_unique<PathSensitiveBugReport>(*BT, os.str(), N);
+ auto Report = std::make_unique<PathSensitiveBugReport>(BT, os.str(), N);
Report->addVisitor(std::make_unique<SecKeychainBugVisitor>(V));
Report->addRange(ArgExpr->getSourceRange());
Report->markInteresting(AS->Region);
@@ -337,9 +330,8 @@ void MacOSKeychainAPIChecker::checkPreStmt(const CallExpr *CE,
ExplodedNode *N = C.generateNonFatalErrorNode(State);
if (!N)
return;
- initBugType();
auto Report = std::make_unique<PathSensitiveBugReport>(
- *BT, "Trying to free data which has not been allocated.", N);
+ BT, "Trying to free data which has not been allocated.", N);
Report->addRange(ArgExpr->getSourceRange());
if (AS)
Report->markInteresting(AS->Region);
@@ -473,7 +465,6 @@ std::unique_ptr<PathSensitiveBugReport>
MacOSKeychainAPIChecker::generateAllocatedDataNotReleasedReport(
const AllocationPair &AP, ExplodedNode *N, CheckerContext &C) const {
const ADFunctionInfo &FI = FunctionsToTrack[AP.second->AllocatorIdx];
- initBugType();
SmallString<70> sbuf;
llvm::raw_svector_ostream os(sbuf);
os << "Allocated data is not released: missing a call to '"
@@ -492,7 +483,7 @@ MacOSKeychainAPIChecker::generateAllocatedDataNotReleasedReport(
AllocNode->getLocationContext());
auto Report = std::make_unique<PathSensitiveBugReport>(
- *BT, os.str(), N, LocUsedForUniqueing,
+ BT, os.str(), N, LocUsedForUniqueing,
AllocNode->getLocationContext()->getDecl());
Report->addVisitor(std::make_unique<SecKeychainBugVisitor>(AP.first));
@@ -530,9 +521,9 @@ ProgramStateRef MacOSKeychainAPIChecker::evalAssume(ProgramStateRef State,
}
if (ReturnSymbol)
- for (auto I = AMap.begin(), E = AMap.end(); I != E; ++I) {
- if (ReturnSymbol == I->second.Region)
- State = State->remove<AllocatedData>(I->first);
+ for (auto [Sym, AllocState] : AMap) {
+ if (ReturnSymbol == AllocState.Region)
+ State = State->remove<AllocatedData>(Sym);
}
return State;
@@ -547,18 +538,18 @@ void MacOSKeychainAPIChecker::checkDeadSymbols(SymbolReaper &SR,
bool Changed = false;
AllocationPairVec Errors;
- for (auto I = AMap.begin(), E = AMap.end(); I != E; ++I) {
- if (!SR.isDead(I->first))
+ for (const auto &[Sym, AllocState] : AMap) {
+ if (!SR.isDead(Sym))
continue;
Changed = true;
- State = State->remove<AllocatedData>(I->first);
+ State = State->remove<AllocatedData>(Sym);
// If the allocated symbol is null do not report.
ConstraintManager &CMgr = State->getConstraintManager();
- ConditionTruthVal AllocFailed = CMgr.isNull(State, I.getKey());
+ ConditionTruthVal AllocFailed = CMgr.isNull(State, Sym);
if (AllocFailed.isConstrainedTrue())
continue;
- Errors.push_back(std::make_pair(I->first, &I->second));
+ Errors.push_back(std::make_pair(Sym, &AllocState));
}
if (!Changed) {
// Generate the new, cleaned up state.
@@ -656,8 +647,8 @@ void MacOSKeychainAPIChecker::printState(raw_ostream &Out,
if (!AMap.isEmpty()) {
Out << Sep << "KeychainAPIChecker :" << NL;
- for (auto I = AMap.begin(), E = AMap.end(); I != E; ++I) {
- I.getKey()->dumpToStream(Out);
+ for (SymbolRef Sym : llvm::make_first_range(AMap)) {
+ Sym->dumpToStream(Out);
}
}
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp
index 04e7f8dec8d7..754b16764296 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp
@@ -18,6 +18,7 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/CommonBugCategories.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
@@ -31,7 +32,8 @@ using namespace ento;
namespace {
class MacOSXAPIChecker : public Checker< check::PreStmt<CallExpr> > {
- mutable std::unique_ptr<BugType> BT_dispatchOnce;
+ const BugType BT_dispatchOnce{this, "Improper use of 'dispatch_once'",
+ categories::AppleAPIMisuse};
static const ObjCIvarRegion *getParentIvarRegion(const MemRegion *R);
@@ -136,12 +138,8 @@ void MacOSXAPIChecker::CheckDispatchOnce(CheckerContext &C, const CallExpr *CE,
if (!N)
return;
- if (!BT_dispatchOnce)
- BT_dispatchOnce.reset(new BugType(this, "Improper use of 'dispatch_once'",
- "API Misuse (Apple)"));
-
auto report =
- std::make_unique<PathSensitiveBugReport>(*BT_dispatchOnce, os.str(), N);
+ std::make_unique<PathSensitiveBugReport>(BT_dispatchOnce, os.str(), N);
report->addRange(CE->getArg(0)->getSourceRange());
C.emitReport(std::move(report));
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
index a6470da09c45..79ab05f2c786 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
@@ -48,9 +48,13 @@
#include "InterCheckerAPI.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ParentMap.h"
+#include "clang/ASTMatchers/ASTMatchFinder.h"
+#include "clang/ASTMatchers/ASTMatchers.h"
+#include "clang/Analysis/ProgramPoint.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
@@ -60,22 +64,29 @@
#include "clang/StaticAnalyzer/Core/BugReporter/CommonBugCategories.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicExtent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState_Fwd.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/StoreRef.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SetOperations.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
#include <climits>
#include <functional>
+#include <optional>
#include <utility>
using namespace clang;
@@ -210,10 +221,10 @@ static bool isReleased(SymbolRef Sym, CheckerContext &C);
/// Update the RefState to reflect the new memory allocation.
/// The optional \p RetVal parameter specifies the newly allocated pointer
/// value; if unspecified, the value of expression \p E is used.
-static ProgramStateRef MallocUpdateRefState(CheckerContext &C, const Expr *E,
- ProgramStateRef State,
- AllocationFamily Family,
- Optional<SVal> RetVal = None);
+static ProgramStateRef
+MallocUpdateRefState(CheckerContext &C, const Expr *E, ProgramStateRef State,
+ AllocationFamily Family,
+ std::optional<SVal> RetVal = std::nullopt);
//===----------------------------------------------------------------------===//
// The modeling of memory reallocation.
@@ -296,7 +307,9 @@ public:
/// functions might free the memory.
/// In optimistic mode, the checker assumes that all user-defined functions
/// which might free a pointer are annotated.
- DefaultBool ShouldIncludeOwnershipAnnotatedFunctions;
+ bool ShouldIncludeOwnershipAnnotatedFunctions = false;
+
+ bool ShouldRegisterNoOwnershipChangeVisitor = false;
/// Many checkers are essentially built into this one, so enabling them will
/// make MallocChecker perform additional modeling and reporting.
@@ -314,7 +327,7 @@ public:
using LeakInfo = std::pair<const ExplodedNode *, const MemRegion *>;
- DefaultBool ChecksEnabled[CK_NumCheckKinds];
+ bool ChecksEnabled[CK_NumCheckKinds] = {false};
CheckerNameRef CheckNames[CK_NumCheckKinds];
void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
@@ -379,70 +392,72 @@ private:
const CallEvent &Call, CheckerContext &C)>;
const CallDescriptionMap<CheckFn> FreeingMemFnMap{
- {{"free", 1}, &MallocChecker::checkFree},
- {{"if_freenameindex", 1}, &MallocChecker::checkIfFreeNameIndex},
- {{"kfree", 1}, &MallocChecker::checkFree},
- {{"g_free", 1}, &MallocChecker::checkFree},
+ {{{"free"}, 1}, &MallocChecker::checkFree},
+ {{{"if_freenameindex"}, 1}, &MallocChecker::checkIfFreeNameIndex},
+ {{{"kfree"}, 1}, &MallocChecker::checkFree},
+ {{{"g_free"}, 1}, &MallocChecker::checkFree},
};
bool isFreeingCall(const CallEvent &Call) const;
+ static bool isFreeingOwnershipAttrCall(const FunctionDecl *Func);
+
+ friend class NoOwnershipChangeVisitor;
CallDescriptionMap<CheckFn> AllocatingMemFnMap{
- {{"alloca", 1}, &MallocChecker::checkAlloca},
- {{"_alloca", 1}, &MallocChecker::checkAlloca},
- {{"malloc", 1}, &MallocChecker::checkBasicAlloc},
- {{"malloc", 3}, &MallocChecker::checkKernelMalloc},
- {{"calloc", 2}, &MallocChecker::checkCalloc},
- {{"valloc", 1}, &MallocChecker::checkBasicAlloc},
- {{CDF_MaybeBuiltin, "strndup", 2}, &MallocChecker::checkStrdup},
- {{CDF_MaybeBuiltin, "strdup", 1}, &MallocChecker::checkStrdup},
- {{"_strdup", 1}, &MallocChecker::checkStrdup},
- {{"kmalloc", 2}, &MallocChecker::checkKernelMalloc},
- {{"if_nameindex", 1}, &MallocChecker::checkIfNameIndex},
- {{CDF_MaybeBuiltin, "wcsdup", 1}, &MallocChecker::checkStrdup},
- {{CDF_MaybeBuiltin, "_wcsdup", 1}, &MallocChecker::checkStrdup},
- {{"g_malloc", 1}, &MallocChecker::checkBasicAlloc},
- {{"g_malloc0", 1}, &MallocChecker::checkGMalloc0},
- {{"g_try_malloc", 1}, &MallocChecker::checkBasicAlloc},
- {{"g_try_malloc0", 1}, &MallocChecker::checkGMalloc0},
- {{"g_memdup", 2}, &MallocChecker::checkGMemdup},
- {{"g_malloc_n", 2}, &MallocChecker::checkGMallocN},
- {{"g_malloc0_n", 2}, &MallocChecker::checkGMallocN0},
- {{"g_try_malloc_n", 2}, &MallocChecker::checkGMallocN},
- {{"g_try_malloc0_n", 2}, &MallocChecker::checkGMallocN0},
+ {{{"alloca"}, 1}, &MallocChecker::checkAlloca},
+ {{{"_alloca"}, 1}, &MallocChecker::checkAlloca},
+ {{{"malloc"}, 1}, &MallocChecker::checkBasicAlloc},
+ {{{"malloc"}, 3}, &MallocChecker::checkKernelMalloc},
+ {{{"calloc"}, 2}, &MallocChecker::checkCalloc},
+ {{{"valloc"}, 1}, &MallocChecker::checkBasicAlloc},
+ {{CDF_MaybeBuiltin, {"strndup"}, 2}, &MallocChecker::checkStrdup},
+ {{CDF_MaybeBuiltin, {"strdup"}, 1}, &MallocChecker::checkStrdup},
+ {{{"_strdup"}, 1}, &MallocChecker::checkStrdup},
+ {{{"kmalloc"}, 2}, &MallocChecker::checkKernelMalloc},
+ {{{"if_nameindex"}, 1}, &MallocChecker::checkIfNameIndex},
+ {{CDF_MaybeBuiltin, {"wcsdup"}, 1}, &MallocChecker::checkStrdup},
+ {{CDF_MaybeBuiltin, {"_wcsdup"}, 1}, &MallocChecker::checkStrdup},
+ {{{"g_malloc"}, 1}, &MallocChecker::checkBasicAlloc},
+ {{{"g_malloc0"}, 1}, &MallocChecker::checkGMalloc0},
+ {{{"g_try_malloc"}, 1}, &MallocChecker::checkBasicAlloc},
+ {{{"g_try_malloc0"}, 1}, &MallocChecker::checkGMalloc0},
+ {{{"g_memdup"}, 2}, &MallocChecker::checkGMemdup},
+ {{{"g_malloc_n"}, 2}, &MallocChecker::checkGMallocN},
+ {{{"g_malloc0_n"}, 2}, &MallocChecker::checkGMallocN0},
+ {{{"g_try_malloc_n"}, 2}, &MallocChecker::checkGMallocN},
+ {{{"g_try_malloc0_n"}, 2}, &MallocChecker::checkGMallocN0},
};
CallDescriptionMap<CheckFn> ReallocatingMemFnMap{
- {{"realloc", 2},
+ {{{"realloc"}, 2},
std::bind(&MallocChecker::checkRealloc, _1, _2, _3, false)},
- {{"reallocf", 2},
+ {{{"reallocf"}, 2},
std::bind(&MallocChecker::checkRealloc, _1, _2, _3, true)},
- {{"g_realloc", 2},
+ {{{"g_realloc"}, 2},
std::bind(&MallocChecker::checkRealloc, _1, _2, _3, false)},
- {{"g_try_realloc", 2},
+ {{{"g_try_realloc"}, 2},
std::bind(&MallocChecker::checkRealloc, _1, _2, _3, false)},
- {{"g_realloc_n", 3}, &MallocChecker::checkReallocN},
- {{"g_try_realloc_n", 3}, &MallocChecker::checkReallocN},
+ {{{"g_realloc_n"}, 3}, &MallocChecker::checkReallocN},
+ {{{"g_try_realloc_n"}, 3}, &MallocChecker::checkReallocN},
};
bool isMemCall(const CallEvent &Call) const;
// TODO: Remove mutable by moving the initializtaion to the registry function.
- mutable Optional<uint64_t> KernelZeroFlagVal;
+ mutable std::optional<uint64_t> KernelZeroFlagVal;
- using KernelZeroSizePtrValueTy = Optional<int>;
+ using KernelZeroSizePtrValueTy = std::optional<int>;
/// Store the value of macro called `ZERO_SIZE_PTR`.
/// The value is initialized at first use, before first use the outer
/// Optional is empty, afterwards it contains another Optional that indicates
/// if the macro value could be determined, and if yes the value itself.
- mutable Optional<KernelZeroSizePtrValueTy> KernelZeroSizePtrValue;
+ mutable std::optional<KernelZeroSizePtrValueTy> KernelZeroSizePtrValue;
/// Process C++ operator new()'s allocation, which is the part of C++
/// new-expression that goes before the constructor.
- LLVM_NODISCARD
- ProgramStateRef processNewAllocation(const CXXAllocatorCall &Call,
- CheckerContext &C,
- AllocationFamily Family) const;
+ [[nodiscard]] ProgramStateRef
+ processNewAllocation(const CXXAllocatorCall &Call, CheckerContext &C,
+ AllocationFamily Family) const;
/// Perform a zero-allocation check.
///
@@ -452,11 +467,10 @@ private:
/// 0.
/// \param [in] RetVal Specifies the newly allocated pointer value;
/// if unspecified, the value of expression \p E is used.
- LLVM_NODISCARD
- static ProgramStateRef ProcessZeroAllocCheck(const CallEvent &Call,
- const unsigned IndexOfSizeArg,
- ProgramStateRef State,
- Optional<SVal> RetVal = None);
+ [[nodiscard]] static ProgramStateRef
+ ProcessZeroAllocCheck(const CallEvent &Call, const unsigned IndexOfSizeArg,
+ ProgramStateRef State,
+ std::optional<SVal> RetVal = std::nullopt);
/// Model functions with the ownership_returns attribute.
///
@@ -474,10 +488,9 @@ private:
/// \param [in] Att The ownership_returns attribute.
/// \param [in] State The \c ProgramState right before allocation.
/// \returns The ProgramState right after allocation.
- LLVM_NODISCARD
- ProgramStateRef MallocMemReturnsAttr(CheckerContext &C, const CallEvent &Call,
- const OwnershipAttr *Att,
- ProgramStateRef State) const;
+ [[nodiscard]] ProgramStateRef
+ MallocMemReturnsAttr(CheckerContext &C, const CallEvent &Call,
+ const OwnershipAttr *Att, ProgramStateRef State) const;
/// Models memory allocation.
///
@@ -488,11 +501,9 @@ private:
/// malloc leaves it undefined.
/// \param [in] State The \c ProgramState right before allocation.
/// \returns The ProgramState right after allocation.
- LLVM_NODISCARD
- static ProgramStateRef MallocMemAux(CheckerContext &C, const CallEvent &Call,
- const Expr *SizeEx, SVal Init,
- ProgramStateRef State,
- AllocationFamily Family);
+ [[nodiscard]] static ProgramStateRef
+ MallocMemAux(CheckerContext &C, const CallEvent &Call, const Expr *SizeEx,
+ SVal Init, ProgramStateRef State, AllocationFamily Family);
/// Models memory allocation.
///
@@ -503,16 +514,13 @@ private:
/// malloc leaves it undefined.
/// \param [in] State The \c ProgramState right before allocation.
/// \returns The ProgramState right after allocation.
- LLVM_NODISCARD
- static ProgramStateRef MallocMemAux(CheckerContext &C, const CallEvent &Call,
- SVal Size, SVal Init,
- ProgramStateRef State,
- AllocationFamily Family);
+ [[nodiscard]] static ProgramStateRef
+ MallocMemAux(CheckerContext &C, const CallEvent &Call, SVal Size, SVal Init,
+ ProgramStateRef State, AllocationFamily Family);
// Check if this malloc() for special flags. At present that means M_ZERO or
// __GFP_ZERO (in which case, treat it like calloc).
- LLVM_NODISCARD
- llvm::Optional<ProgramStateRef>
+ [[nodiscard]] std::optional<ProgramStateRef>
performKernelMalloc(const CallEvent &Call, CheckerContext &C,
const ProgramStateRef &State) const;
@@ -533,10 +541,10 @@ private:
/// \param [in] Att The ownership_takes or ownership_holds attribute.
/// \param [in] State The \c ProgramState right before allocation.
/// \returns The ProgramState right after deallocation.
- LLVM_NODISCARD
- ProgramStateRef FreeMemAttr(CheckerContext &C, const CallEvent &Call,
- const OwnershipAttr *Att,
- ProgramStateRef State) const;
+ [[nodiscard]] ProgramStateRef FreeMemAttr(CheckerContext &C,
+ const CallEvent &Call,
+ const OwnershipAttr *Att,
+ ProgramStateRef State) const;
/// Models memory deallocation.
///
@@ -557,12 +565,10 @@ private:
/// \param [in] ReturnsNullOnFailure Whether the memory deallocation function
/// we're modeling returns with Null on failure.
/// \returns The ProgramState right after deallocation.
- LLVM_NODISCARD
- ProgramStateRef FreeMemAux(CheckerContext &C, const CallEvent &Call,
- ProgramStateRef State, unsigned Num, bool Hold,
- bool &IsKnownToBeAllocated,
- AllocationFamily Family,
- bool ReturnsNullOnFailure = false) const;
+ [[nodiscard]] ProgramStateRef
+ FreeMemAux(CheckerContext &C, const CallEvent &Call, ProgramStateRef State,
+ unsigned Num, bool Hold, bool &IsKnownToBeAllocated,
+ AllocationFamily Family, bool ReturnsNullOnFailure = false) const;
/// Models memory deallocation.
///
@@ -583,12 +589,10 @@ private:
/// \param [in] ReturnsNullOnFailure Whether the memory deallocation function
/// we're modeling returns with Null on failure.
/// \returns The ProgramState right after deallocation.
- LLVM_NODISCARD
- ProgramStateRef FreeMemAux(CheckerContext &C, const Expr *ArgExpr,
- const CallEvent &Call, ProgramStateRef State,
- bool Hold, bool &IsKnownToBeAllocated,
- AllocationFamily Family,
- bool ReturnsNullOnFailure = false) const;
+ [[nodiscard]] ProgramStateRef
+ FreeMemAux(CheckerContext &C, const Expr *ArgExpr, const CallEvent &Call,
+ ProgramStateRef State, bool Hold, bool &IsKnownToBeAllocated,
+ AllocationFamily Family, bool ReturnsNullOnFailure = false) const;
// TODO: Needs some refactoring, as all other deallocation modeling
// functions are suffering from out parameters and messy code due to how
@@ -603,29 +607,27 @@ private:
/// \param [in] SuffixWithN Whether the reallocation function we're modeling
/// has an '_n' suffix, such as g_realloc_n.
/// \returns The ProgramState right after reallocation.
- LLVM_NODISCARD
- ProgramStateRef ReallocMemAux(CheckerContext &C, const CallEvent &Call,
- bool ShouldFreeOnFail, ProgramStateRef State,
- AllocationFamily Family,
- bool SuffixWithN = false) const;
+ [[nodiscard]] ProgramStateRef
+ ReallocMemAux(CheckerContext &C, const CallEvent &Call, bool ShouldFreeOnFail,
+ ProgramStateRef State, AllocationFamily Family,
+ bool SuffixWithN = false) const;
/// Evaluates the buffer size that needs to be allocated.
///
/// \param [in] Blocks The amount of blocks that needs to be allocated.
/// \param [in] BlockBytes The size of a block.
/// \returns The symbolic value of \p Blocks * \p BlockBytes.
- LLVM_NODISCARD
- static SVal evalMulForBufferSize(CheckerContext &C, const Expr *Blocks,
- const Expr *BlockBytes);
+ [[nodiscard]] static SVal evalMulForBufferSize(CheckerContext &C,
+ const Expr *Blocks,
+ const Expr *BlockBytes);
/// Models zero initialized array allocation.
///
/// \param [in] Call The expression that reallocated memory
/// \param [in] State The \c ProgramState right before reallocation.
/// \returns The ProgramState right after allocation.
- LLVM_NODISCARD
- static ProgramStateRef CallocMem(CheckerContext &C, const CallEvent &Call,
- ProgramStateRef State);
+ [[nodiscard]] static ProgramStateRef
+ CallocMem(CheckerContext &C, const CallEvent &Call, ProgramStateRef State);
/// See if deallocation happens in a suspicious context. If so, escape the
/// pointers that otherwise would have been deallocated and return true.
@@ -658,12 +660,11 @@ private:
SymbolRef &EscapingSymbol) const;
/// Implementation of the checkPointerEscape callbacks.
- LLVM_NODISCARD
- ProgramStateRef checkPointerEscapeAux(ProgramStateRef State,
- const InvalidatedSymbols &Escaped,
- const CallEvent *Call,
- PointerEscapeKind Kind,
- bool IsConstPointerEscape) const;
+ [[nodiscard]] ProgramStateRef
+ checkPointerEscapeAux(ProgramStateRef State,
+ const InvalidatedSymbols &Escaped,
+ const CallEvent *Call, PointerEscapeKind Kind,
+ bool IsConstPointerEscape) const;
// Implementation of the checkPreStmt and checkEndFunction callbacks.
void checkEscapeOnReturn(const ReturnStmt *S, CheckerContext &C) const;
@@ -672,11 +673,11 @@ private:
/// Tells if a given family/call/symbol is tracked by the current checker.
/// Sets CheckKind to the kind of the checker responsible for this
/// family/call/symbol.
- Optional<CheckKind> getCheckIfTracked(AllocationFamily Family,
- bool IsALeakCheck = false) const;
+ std::optional<CheckKind> getCheckIfTracked(AllocationFamily Family,
+ bool IsALeakCheck = false) const;
- Optional<CheckKind> getCheckIfTracked(CheckerContext &C, SymbolRef Sym,
- bool IsALeakCheck = false) const;
+ std::optional<CheckKind> getCheckIfTracked(CheckerContext &C, SymbolRef Sym,
+ bool IsALeakCheck = false) const;
///@}
static bool SummarizeValue(raw_ostream &os, SVal V);
static bool SummarizeRegion(raw_ostream &os, const MemRegion *MR);
@@ -722,11 +723,204 @@ private:
bool isArgZERO_SIZE_PTR(ProgramStateRef State, CheckerContext &C,
SVal ArgVal) const;
};
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// Definition of NoOwnershipChangeVisitor.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class NoOwnershipChangeVisitor final : public NoStateChangeFuncVisitor {
+ // The symbol whose (lack of) ownership change we are interested in.
+ SymbolRef Sym;
+ const MallocChecker &Checker;
+ using OwnerSet = llvm::SmallPtrSet<const MemRegion *, 8>;
+
+ // Collect which entities point to the allocated memory, and could be
+ // responsible for deallocating it.
+ class OwnershipBindingsHandler : public StoreManager::BindingsHandler {
+ SymbolRef Sym;
+ OwnerSet &Owners;
+
+ public:
+ OwnershipBindingsHandler(SymbolRef Sym, OwnerSet &Owners)
+ : Sym(Sym), Owners(Owners) {}
+
+ bool HandleBinding(StoreManager &SMgr, Store Store, const MemRegion *Region,
+ SVal Val) override {
+ if (Val.getAsSymbol() == Sym)
+ Owners.insert(Region);
+ return true;
+ }
+
+ LLVM_DUMP_METHOD void dump() const { dumpToStream(llvm::errs()); }
+ LLVM_DUMP_METHOD void dumpToStream(llvm::raw_ostream &out) const {
+ out << "Owners: {\n";
+ for (const MemRegion *Owner : Owners) {
+ out << " ";
+ Owner->dumpToStream(out);
+ out << ",\n";
+ }
+ out << "}\n";
+ }
+ };
+
+protected:
+ OwnerSet getOwnersAtNode(const ExplodedNode *N) {
+ OwnerSet Ret;
+
+ ProgramStateRef State = N->getState();
+ OwnershipBindingsHandler Handler{Sym, Ret};
+ State->getStateManager().getStoreManager().iterBindings(State->getStore(),
+ Handler);
+ return Ret;
+ }
+
+ LLVM_DUMP_METHOD static std::string
+ getFunctionName(const ExplodedNode *CallEnterN) {
+ if (const CallExpr *CE = llvm::dyn_cast_or_null<CallExpr>(
+ CallEnterN->getLocationAs<CallEnter>()->getCallExpr()))
+ if (const FunctionDecl *FD = CE->getDirectCallee())
+ return FD->getQualifiedNameAsString();
+ return "";
+ }
+
+ /// Syntactically checks whether the callee is a deallocating function. Since
+ /// we have no path-sensitive information on this call (we would need a
+ /// CallEvent instead of a CallExpr for that), its possible that a
+ /// deallocation function was called indirectly through a function pointer,
+ /// but we are not able to tell, so this is a best effort analysis.
+ /// See namespace `memory_passed_to_fn_call_free_through_fn_ptr` in
+ /// clang/test/Analysis/NewDeleteLeaks.cpp.
+ bool isFreeingCallAsWritten(const CallExpr &Call) const {
+ if (Checker.FreeingMemFnMap.lookupAsWritten(Call) ||
+ Checker.ReallocatingMemFnMap.lookupAsWritten(Call))
+ return true;
+
+ if (const auto *Func =
+ llvm::dyn_cast_or_null<FunctionDecl>(Call.getCalleeDecl()))
+ return MallocChecker::isFreeingOwnershipAttrCall(Func);
+
+ return false;
+ }
+
+ /// Heuristically guess whether the callee intended to free memory. This is
+ /// done syntactically, because we are trying to argue about alternative
+ /// paths of execution, and as a consequence we don't have path-sensitive
+ /// information.
+ bool doesFnIntendToHandleOwnership(const Decl *Callee, ASTContext &ACtx) {
+ using namespace clang::ast_matchers;
+ const FunctionDecl *FD = dyn_cast<FunctionDecl>(Callee);
+
+ // Given that the stack frame was entered, the body should always be
+ // theoretically obtainable. In case of body farms, the synthesized body
+ // is not attached to declaration, thus triggering the '!FD->hasBody()'
+ // branch. That said, would a synthesized body ever intend to handle
+ // ownership? As of today they don't. And if they did, how would we
+ // put notes inside it, given that it doesn't match any source locations?
+ if (!FD || !FD->hasBody())
+ return false;
+
+ auto Matches = match(findAll(stmt(anyOf(cxxDeleteExpr().bind("delete"),
+ callExpr().bind("call")))),
+ *FD->getBody(), ACtx);
+ for (BoundNodes Match : Matches) {
+ if (Match.getNodeAs<CXXDeleteExpr>("delete"))
+ return true;
+
+ if (const auto *Call = Match.getNodeAs<CallExpr>("call"))
+ if (isFreeingCallAsWritten(*Call))
+ return true;
+ }
+ // TODO: Ownership might change with an attempt to store the allocated
+ // memory, not only through deallocation. Check for attempted stores as
+ // well.
+ return false;
+ }
+
+ bool wasModifiedInFunction(const ExplodedNode *CallEnterN,
+ const ExplodedNode *CallExitEndN) override {
+ if (!doesFnIntendToHandleOwnership(
+ CallExitEndN->getFirstPred()->getLocationContext()->getDecl(),
+ CallExitEndN->getState()->getAnalysisManager().getASTContext()))
+ return true;
+
+ if (CallEnterN->getState()->get<RegionState>(Sym) !=
+ CallExitEndN->getState()->get<RegionState>(Sym))
+ return true;
+
+ OwnerSet CurrOwners = getOwnersAtNode(CallEnterN);
+ OwnerSet ExitOwners = getOwnersAtNode(CallExitEndN);
+
+ // Owners in the current set may be purged from the analyzer later on.
+ // If a variable is dead (is not referenced directly or indirectly after
+ // some point), it will be removed from the Store before the end of its
+ // actual lifetime.
+ // This means that if the ownership status didn't change, CurrOwners
+ // must be a superset of, but not necessarily equal to ExitOwners.
+ return !llvm::set_is_subset(ExitOwners, CurrOwners);
+ }
+
+ static PathDiagnosticPieceRef emitNote(const ExplodedNode *N) {
+ PathDiagnosticLocation L = PathDiagnosticLocation::create(
+ N->getLocation(),
+ N->getState()->getStateManager().getContext().getSourceManager());
+ return std::make_shared<PathDiagnosticEventPiece>(
+ L, "Returning without deallocating memory or storing the pointer for "
+ "later deallocation");
+ }
+
+ PathDiagnosticPieceRef
+ maybeEmitNoteForObjCSelf(PathSensitiveBugReport &R,
+ const ObjCMethodCall &Call,
+ const ExplodedNode *N) override {
+ // TODO: Implement.
+ return nullptr;
+ }
+
+ PathDiagnosticPieceRef
+ maybeEmitNoteForCXXThis(PathSensitiveBugReport &R,
+ const CXXConstructorCall &Call,
+ const ExplodedNode *N) override {
+ // TODO: Implement.
+ return nullptr;
+ }
+
+ PathDiagnosticPieceRef
+ maybeEmitNoteForParameters(PathSensitiveBugReport &R, const CallEvent &Call,
+ const ExplodedNode *N) override {
+ // TODO: Factor the logic of "what constitutes as an entity being passed
+ // into a function call" out by reusing the code in
+ // NoStoreFuncVisitor::maybeEmitNoteForParameters, maybe by incorporating
+ // the printing technology in UninitializedObject's FieldChainInfo.
+ ArrayRef<ParmVarDecl *> Parameters = Call.parameters();
+ for (unsigned I = 0; I < Call.getNumArgs() && I < Parameters.size(); ++I) {
+ SVal V = Call.getArgSVal(I);
+ if (V.getAsSymbol() == Sym)
+ return emitNote(N);
+ }
+ return nullptr;
+ }
+
+public:
+ NoOwnershipChangeVisitor(SymbolRef Sym, const MallocChecker *Checker)
+ : NoStateChangeFuncVisitor(bugreporter::TrackingKind::Thorough), Sym(Sym),
+ Checker(*Checker) {}
+
+ void Profile(llvm::FoldingSetNodeID &ID) const override {
+ static int Tag = 0;
+ ID.AddPointer(&Tag);
+ ID.AddPointer(Sym);
+ }
+};
+
+} // end anonymous namespace
//===----------------------------------------------------------------------===//
// Definition of MallocBugVisitor.
//===----------------------------------------------------------------------===//
+namespace {
/// The bug visitor which allows us to print extra diagnostics along the
/// BugReport path. For example, showing the allocation site of the leaked
/// region.
@@ -767,7 +961,7 @@ public:
/// Did not track -> allocated. Other state (released) -> allocated.
static inline bool isAllocated(const RefState *RSCurr, const RefState *RSPrev,
const Stmt *Stmt) {
- return (Stmt && (isa<CallExpr>(Stmt) || isa<CXXNewExpr>(Stmt)) &&
+ return (isa_and_nonnull<CallExpr, CXXNewExpr>(Stmt) &&
(RSCurr &&
(RSCurr->isAllocated() || RSCurr->isAllocatedOfSizeZero())) &&
(!RSPrev ||
@@ -780,8 +974,7 @@ public:
const Stmt *Stmt) {
bool IsReleased =
(RSCurr && RSCurr->isReleased()) && (!RSPrev || !RSPrev->isReleased());
- assert(!IsReleased ||
- (Stmt && (isa<CallExpr>(Stmt) || isa<CXXDeleteExpr>(Stmt))) ||
+ assert(!IsReleased || (isa_and_nonnull<CallExpr, CXXDeleteExpr>(Stmt)) ||
(!Stmt && RSCurr->getAllocationFamily() == AF_InnerBuffer));
return IsReleased;
}
@@ -789,11 +982,10 @@ public:
/// Did not track -> relinquished. Other state (allocated) -> relinquished.
static inline bool isRelinquished(const RefState *RSCurr,
const RefState *RSPrev, const Stmt *Stmt) {
- return (Stmt &&
- (isa<CallExpr>(Stmt) || isa<ObjCMessageExpr>(Stmt) ||
- isa<ObjCPropertyRefExpr>(Stmt)) &&
- (RSCurr && RSCurr->isRelinquished()) &&
- (!RSPrev || !RSPrev->isRelinquished()));
+ return (
+ isa_and_nonnull<CallExpr, ObjCMessageExpr, ObjCPropertyRefExpr>(Stmt) &&
+ (RSCurr && RSCurr->isRelinquished()) &&
+ (!RSPrev || !RSPrev->isRelinquished()));
}
/// If the expression is not a call, and the state change is
@@ -803,7 +995,7 @@ public:
static inline bool hasReallocFailed(const RefState *RSCurr,
const RefState *RSPrev,
const Stmt *Stmt) {
- return ((!Stmt || !isa<CallExpr>(Stmt)) &&
+ return ((!isa_and_nonnull<CallExpr>(Stmt)) &&
(RSCurr &&
(RSCurr->isAllocated() || RSCurr->isAllocatedOfSizeZero())) &&
(RSPrev &&
@@ -851,7 +1043,6 @@ private:
}
};
};
-
} // end anonymous namespace
// A map from the freed symbol to the symbol representing the return value of
@@ -894,12 +1085,8 @@ static bool isStandardNewDelete(const FunctionDecl *FD) {
// Methods of MallocChecker and MallocBugVisitor.
//===----------------------------------------------------------------------===//
-bool MallocChecker::isFreeingCall(const CallEvent &Call) const {
- if (FreeingMemFnMap.lookup(Call) || ReallocatingMemFnMap.lookup(Call))
- return true;
-
- const auto *Func = dyn_cast<FunctionDecl>(Call.getDecl());
- if (Func && Func->hasAttrs()) {
+bool MallocChecker::isFreeingOwnershipAttrCall(const FunctionDecl *Func) {
+ if (Func->hasAttrs()) {
for (const auto *I : Func->specific_attrs<OwnershipAttr>()) {
OwnershipAttr::OwnershipKind OwnKind = I->getOwnKind();
if (OwnKind == OwnershipAttr::Takes || OwnKind == OwnershipAttr::Holds)
@@ -909,6 +1096,16 @@ bool MallocChecker::isFreeingCall(const CallEvent &Call) const {
return false;
}
+bool MallocChecker::isFreeingCall(const CallEvent &Call) const {
+ if (FreeingMemFnMap.lookup(Call) || ReallocatingMemFnMap.lookup(Call))
+ return true;
+
+ if (const auto *Func = dyn_cast_or_null<FunctionDecl>(Call.getDecl()))
+ return isFreeingOwnershipAttrCall(Func);
+
+ return false;
+}
+
bool MallocChecker::isMemCall(const CallEvent &Call) const {
if (FreeingMemFnMap.lookup(Call) || AllocatingMemFnMap.lookup(Call) ||
ReallocatingMemFnMap.lookup(Call))
@@ -921,7 +1118,7 @@ bool MallocChecker::isMemCall(const CallEvent &Call) const {
return Func && Func->hasAttr<OwnershipAttr>();
}
-llvm::Optional<ProgramStateRef>
+std::optional<ProgramStateRef>
MallocChecker::performKernelMalloc(const CallEvent &Call, CheckerContext &C,
const ProgramStateRef &State) const {
// 3-argument malloc(), as commonly used in {Free,Net,Open}BSD Kernels:
@@ -943,48 +1140,54 @@ MallocChecker::performKernelMalloc(const CallEvent &Call, CheckerContext &C,
ASTContext &Ctx = C.getASTContext();
llvm::Triple::OSType OS = Ctx.getTargetInfo().getTriple().getOS();
- if (!KernelZeroFlagVal.hasValue()) {
- if (OS == llvm::Triple::FreeBSD)
+ if (!KernelZeroFlagVal) {
+ switch (OS) {
+ case llvm::Triple::FreeBSD:
KernelZeroFlagVal = 0x0100;
- else if (OS == llvm::Triple::NetBSD)
+ break;
+ case llvm::Triple::NetBSD:
KernelZeroFlagVal = 0x0002;
- else if (OS == llvm::Triple::OpenBSD)
+ break;
+ case llvm::Triple::OpenBSD:
KernelZeroFlagVal = 0x0008;
- else if (OS == llvm::Triple::Linux)
+ break;
+ case llvm::Triple::Linux:
// __GFP_ZERO
KernelZeroFlagVal = 0x8000;
- else
+ break;
+ default:
// FIXME: We need a more general way of getting the M_ZERO value.
// See also: O_CREAT in UnixAPIChecker.cpp.
// Fall back to normal malloc behavior on platforms where we don't
// know M_ZERO.
- return None;
+ return std::nullopt;
+ }
}
// We treat the last argument as the flags argument, and callers fall-back to
// normal malloc on a None return. This works for the FreeBSD kernel malloc
// as well as Linux kmalloc.
if (Call.getNumArgs() < 2)
- return None;
+ return std::nullopt;
const Expr *FlagsEx = Call.getArgExpr(Call.getNumArgs() - 1);
const SVal V = C.getSVal(FlagsEx);
- if (!V.getAs<NonLoc>()) {
+ if (!isa<NonLoc>(V)) {
// The case where 'V' can be a location can only be due to a bad header,
// so in this case bail out.
- return None;
+ return std::nullopt;
}
NonLoc Flags = V.castAs<NonLoc>();
NonLoc ZeroFlag = C.getSValBuilder()
- .makeIntVal(KernelZeroFlagVal.getValue(), FlagsEx->getType())
- .castAs<NonLoc>();
+ .makeIntVal(*KernelZeroFlagVal, FlagsEx->getType())
+ .castAs<NonLoc>();
SVal MaskedFlagsUC = C.getSValBuilder().evalBinOpNN(State, BO_And,
Flags, ZeroFlag,
FlagsEx->getType());
if (MaskedFlagsUC.isUnknownOrUndef())
- return None;
+ return std::nullopt;
DefinedSVal MaskedFlags = MaskedFlagsUC.castAs<DefinedSVal>();
// Check if maskedFlags is non-zero.
@@ -998,7 +1201,7 @@ MallocChecker::performKernelMalloc(const CallEvent &Call, CheckerContext &C,
AF_Malloc);
}
- return None;
+ return std::nullopt;
}
SVal MallocChecker::evalMulForBufferSize(CheckerContext &C, const Expr *Blocks,
@@ -1024,10 +1227,10 @@ void MallocChecker::checkBasicAlloc(const CallEvent &Call,
void MallocChecker::checkKernelMalloc(const CallEvent &Call,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
- llvm::Optional<ProgramStateRef> MaybeState =
+ std::optional<ProgramStateRef> MaybeState =
performKernelMalloc(Call, C, State);
- if (MaybeState.hasValue())
- State = MaybeState.getValue();
+ if (MaybeState)
+ State = *MaybeState;
else
State = MallocMemAux(C, Call, Call.getArgExpr(0), UndefinedVal(), State,
AF_Malloc);
@@ -1191,8 +1394,8 @@ void MallocChecker::checkGMalloc0(const CallEvent &Call,
void MallocChecker::checkGMemdup(const CallEvent &Call,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
- State = MallocMemAux(C, Call, Call.getArgExpr(1), UndefinedVal(), State,
- AF_Malloc);
+ State =
+ MallocMemAux(C, Call, Call.getArgExpr(1), UnknownVal(), State, AF_Malloc);
State = ProcessZeroAllocCheck(Call, 1, State);
C.addTransition(State);
}
@@ -1294,7 +1497,7 @@ void MallocChecker::checkPostCall(const CallEvent &Call,
// Performs a 0-sized allocations check.
ProgramStateRef MallocChecker::ProcessZeroAllocCheck(
const CallEvent &Call, const unsigned IndexOfSizeArg, ProgramStateRef State,
- Optional<SVal> RetVal) {
+ std::optional<SVal> RetVal) {
if (!State)
return nullptr;
@@ -1446,7 +1649,7 @@ static bool isKnownDeallocObjCMethodName(const ObjCMethodCall &Call) {
FirstSlot == "initWithCharactersNoCopy";
}
-static Optional<bool> getFreeWhenDoneArg(const ObjCMethodCall &Call) {
+static std::optional<bool> getFreeWhenDoneArg(const ObjCMethodCall &Call) {
Selector S = Call.getSelector();
// FIXME: We should not rely on fully-constrained symbols being folded.
@@ -1454,7 +1657,7 @@ static Optional<bool> getFreeWhenDoneArg(const ObjCMethodCall &Call) {
if (S.getNameForSlot(i).equals("freeWhenDone"))
return !Call.getArgSVal(i).isZeroConstant();
- return None;
+ return std::nullopt;
}
void MallocChecker::checkPostObjCMessage(const ObjCMethodCall &Call,
@@ -1465,7 +1668,7 @@ void MallocChecker::checkPostObjCMessage(const ObjCMethodCall &Call,
if (!isKnownDeallocObjCMethodName(Call))
return;
- if (Optional<bool> FreeWhenDone = getFreeWhenDoneArg(Call))
+ if (std::optional<bool> FreeWhenDone = getFreeWhenDoneArg(Call))
if (!*FreeWhenDone)
return;
@@ -1476,7 +1679,7 @@ void MallocChecker::checkPostObjCMessage(const ObjCMethodCall &Call,
ProgramStateRef State =
FreeMemAux(C, Call.getArgExpr(0), Call, C.getState(),
/*Hold=*/true, IsKnownToBeAllocatedMemory, AF_Malloc,
- /*RetNullOnFailure=*/true);
+ /*ReturnsNullOnFailure=*/true);
C.addTransition(State);
}
@@ -1491,9 +1694,9 @@ MallocChecker::MallocMemReturnsAttr(CheckerContext &C, const CallEvent &Call,
if (Att->getModule()->getName() != "malloc")
return nullptr;
- OwnershipAttr::args_iterator I = Att->args_begin(), E = Att->args_end();
- if (I != E) {
- return MallocMemAux(C, Call, Call.getArgExpr(I->getASTIndex()),
+ if (!Att->args().empty()) {
+ return MallocMemAux(C, Call,
+ Call.getArgExpr(Att->args_begin()->getASTIndex()),
UndefinedVal(), State, AF_Malloc);
}
return MallocMemAux(C, Call, UnknownVal(), UndefinedVal(), State, AF_Malloc);
@@ -1525,21 +1728,27 @@ ProgramStateRef MallocChecker::MallocMemAux(CheckerContext &C,
return nullptr;
// Bind the return value to the symbolic value from the heap region.
- // TODO: We could rewrite post visit to eval call; 'malloc' does not have
- // side effects other than what we model here.
+ // TODO: move use of this functions to an EvalCall callback, becasue
+ // BindExpr() should'nt be used elsewhere.
unsigned Count = C.blockCount();
- SValBuilder &svalBuilder = C.getSValBuilder();
+ SValBuilder &SVB = C.getSValBuilder();
const LocationContext *LCtx = C.getPredecessor()->getLocationContext();
- DefinedSVal RetVal = svalBuilder.getConjuredHeapSymbolVal(CE, LCtx, Count)
- .castAs<DefinedSVal>();
+ DefinedSVal RetVal =
+ ((Family == AF_Alloca) ? SVB.getAllocaRegionVal(CE, LCtx, Count)
+ : SVB.getConjuredHeapSymbolVal(CE, LCtx, Count)
+ .castAs<DefinedSVal>());
State = State->BindExpr(CE, C.getLocationContext(), RetVal);
// Fill the region with the initialization value.
State = State->bindDefaultInitial(RetVal, Init, LCtx);
+ // If Size is somehow undefined at this point, this line prevents a crash.
+ if (Size.isUndef())
+ Size = UnknownVal();
+
// Set the region's extent.
State = setDynamicExtent(State, RetVal.getAsRegion(),
- Size.castAs<DefinedOrUnknownSVal>(), svalBuilder);
+ Size.castAs<DefinedOrUnknownSVal>(), SVB);
return MallocUpdateRefState(C, CE, State, Family);
}
@@ -1547,7 +1756,7 @@ ProgramStateRef MallocChecker::MallocMemAux(CheckerContext &C,
static ProgramStateRef MallocUpdateRefState(CheckerContext &C, const Expr *E,
ProgramStateRef State,
AllocationFamily Family,
- Optional<SVal> RetVal) {
+ std::optional<SVal> RetVal) {
if (!State)
return nullptr;
@@ -1695,12 +1904,12 @@ ProgramStateRef MallocChecker::FreeMemAux(
return nullptr;
SVal ArgVal = C.getSVal(ArgExpr);
- if (!ArgVal.getAs<DefinedOrUnknownSVal>())
+ if (!isa<DefinedOrUnknownSVal>(ArgVal))
return nullptr;
DefinedOrUnknownSVal location = ArgVal.castAs<DefinedOrUnknownSVal>();
// Check for null dereferences.
- if (!location.getAs<Loc>())
+ if (!isa<Loc>(location))
return nullptr;
// The explicit NULL case, no operation is performed.
@@ -1753,14 +1962,11 @@ ProgramStateRef MallocChecker::FreeMemAux(
// Parameters, locals, statics, globals, and memory returned by
// __builtin_alloca() shouldn't be freed.
- if (!(isa<UnknownSpaceRegion>(MS) || isa<HeapSpaceRegion>(MS))) {
- // FIXME: at the time this code was written, malloc() regions were
- // represented by conjured symbols, which are all in UnknownSpaceRegion.
- // This means that there isn't actually anything from HeapSpaceRegion
- // that should be freed, even though we allow it here.
- // Of course, free() can work on memory allocated outside the current
- // function, so UnknownSpaceRegion is always a possibility.
- // False negatives are better than false positives.
+ if (!isa<UnknownSpaceRegion, HeapSpaceRegion>(MS)) {
+ // Regions returned by malloc() are represented by SymbolicRegion objects
+ // within HeapSpaceRegion. Of course, free() can work on memory allocated
+ // outside the current function, so UnknownSpaceRegion is also a
+ // possibility here.
if (isa<AllocaRegion>(R))
HandleFreeAlloca(C, ArgVal, ArgExpr->getSourceRange());
@@ -1862,7 +2068,7 @@ ProgramStateRef MallocChecker::FreeMemAux(
RefState::getReleased(Family, ParentExpr));
}
-Optional<MallocChecker::CheckKind>
+std::optional<MallocChecker::CheckKind>
MallocChecker::getCheckIfTracked(AllocationFamily Family,
bool IsALeakCheck) const {
switch (Family) {
@@ -1871,7 +2077,7 @@ MallocChecker::getCheckIfTracked(AllocationFamily Family,
case AF_IfNameIndex: {
if (ChecksEnabled[CK_MallocChecker])
return CK_MallocChecker;
- return None;
+ return std::nullopt;
}
case AF_CXXNew:
case AF_CXXNewArray: {
@@ -1883,12 +2089,12 @@ MallocChecker::getCheckIfTracked(AllocationFamily Family,
if (ChecksEnabled[CK_NewDeleteChecker])
return CK_NewDeleteChecker;
}
- return None;
+ return std::nullopt;
}
case AF_InnerBuffer: {
if (ChecksEnabled[CK_InnerPointerChecker])
return CK_InnerPointerChecker;
- return None;
+ return std::nullopt;
}
case AF_None: {
llvm_unreachable("no family");
@@ -1897,7 +2103,7 @@ MallocChecker::getCheckIfTracked(AllocationFamily Family,
llvm_unreachable("unhandled family");
}
-Optional<MallocChecker::CheckKind>
+std::optional<MallocChecker::CheckKind>
MallocChecker::getCheckIfTracked(CheckerContext &C, SymbolRef Sym,
bool IsALeakCheck) const {
if (C.getState()->contains<ReallocSizeZeroSymbols>(Sym))
@@ -1909,11 +2115,13 @@ MallocChecker::getCheckIfTracked(CheckerContext &C, SymbolRef Sym,
}
bool MallocChecker::SummarizeValue(raw_ostream &os, SVal V) {
- if (Optional<nonloc::ConcreteInt> IntVal = V.getAs<nonloc::ConcreteInt>())
+ if (std::optional<nonloc::ConcreteInt> IntVal =
+ V.getAs<nonloc::ConcreteInt>())
os << "an integer (" << IntVal->getValue() << ")";
- else if (Optional<loc::ConcreteInt> ConstAddr = V.getAs<loc::ConcreteInt>())
+ else if (std::optional<loc::ConcreteInt> ConstAddr =
+ V.getAs<loc::ConcreteInt>())
os << "a constant address (" << ConstAddr->getValue() << ")";
- else if (Optional<loc::GotoLabel> Label = V.getAs<loc::GotoLabel>())
+ else if (std::optional<loc::GotoLabel> Label = V.getAs<loc::GotoLabel>())
os << "the address of the label '" << Label->getLabel()->getName() << "'";
else
return false;
@@ -2005,8 +2213,8 @@ void MallocChecker::HandleNonHeapDealloc(CheckerContext &C, SVal ArgVal,
return;
}
- Optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(Family);
- if (!CheckKind.hasValue())
+ std::optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(Family);
+ if (!CheckKind)
return;
if (ExplodedNode *N = C.generateErrorNode()) {
@@ -2046,7 +2254,7 @@ void MallocChecker::HandleNonHeapDealloc(CheckerContext &C, SVal ArgVal,
void MallocChecker::HandleFreeAlloca(CheckerContext &C, SVal ArgVal,
SourceRange Range) const {
- Optional<MallocChecker::CheckKind> CheckKind;
+ std::optional<MallocChecker::CheckKind> CheckKind;
if (ChecksEnabled[CK_MallocChecker])
CheckKind = CK_MallocChecker;
@@ -2138,8 +2346,8 @@ void MallocChecker::HandleOffsetFree(CheckerContext &C, SVal ArgVal,
return;
}
- Optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(Family);
- if (!CheckKind.hasValue())
+ std::optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(Family);
+ if (!CheckKind)
return;
ExplodedNode *N = C.generateErrorNode();
@@ -2195,8 +2403,8 @@ void MallocChecker::HandleUseAfterFree(CheckerContext &C, SourceRange Range,
return;
}
- Optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(C, Sym);
- if (!CheckKind.hasValue())
+ std::optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(C, Sym);
+ if (!CheckKind)
return;
if (ExplodedNode *N = C.generateErrorNode()) {
@@ -2234,8 +2442,8 @@ void MallocChecker::HandleDoubleFree(CheckerContext &C, SourceRange Range,
return;
}
- Optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(C, Sym);
- if (!CheckKind.hasValue())
+ std::optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(C, Sym);
+ if (!CheckKind)
return;
if (ExplodedNode *N = C.generateErrorNode()) {
@@ -2264,8 +2472,8 @@ void MallocChecker::HandleDoubleDelete(CheckerContext &C, SymbolRef Sym) const {
return;
}
- Optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(C, Sym);
- if (!CheckKind.hasValue())
+ std::optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(C, Sym);
+ if (!CheckKind)
return;
if (ExplodedNode *N = C.generateErrorNode()) {
@@ -2291,9 +2499,9 @@ void MallocChecker::HandleUseZeroAlloc(CheckerContext &C, SourceRange Range,
return;
}
- Optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(C, Sym);
+ std::optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(C, Sym);
- if (!CheckKind.hasValue())
+ if (!CheckKind)
return;
if (ExplodedNode *N = C.generateErrorNode()) {
@@ -2303,7 +2511,8 @@ void MallocChecker::HandleUseZeroAlloc(CheckerContext &C, SourceRange Range,
categories::MemoryError));
auto R = std::make_unique<PathSensitiveBugReport>(
- *BT_UseZerroAllocated[*CheckKind], "Use of zero-allocated memory", N);
+ *BT_UseZerroAllocated[*CheckKind],
+ "Use of memory allocated with size zero", N);
R->addRange(Range);
if (Sym) {
@@ -2323,8 +2532,8 @@ void MallocChecker::HandleFunctionPtrFree(CheckerContext &C, SVal ArgVal,
return;
}
- Optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(Family);
- if (!CheckKind.hasValue())
+ std::optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(Family);
+ if (!CheckKind)
return;
if (ExplodedNode *N = C.generateErrorNode()) {
@@ -2369,14 +2578,14 @@ MallocChecker::ReallocMemAux(CheckerContext &C, const CallEvent &Call,
const Expr *arg0Expr = CE->getArg(0);
SVal Arg0Val = C.getSVal(arg0Expr);
- if (!Arg0Val.getAs<DefinedOrUnknownSVal>())
+ if (!isa<DefinedOrUnknownSVal>(Arg0Val))
return nullptr;
DefinedOrUnknownSVal arg0Val = Arg0Val.castAs<DefinedOrUnknownSVal>();
SValBuilder &svalBuilder = C.getSValBuilder();
- DefinedOrUnknownSVal PtrEQ =
- svalBuilder.evalEQ(State, arg0Val, svalBuilder.makeNull());
+ DefinedOrUnknownSVal PtrEQ = svalBuilder.evalEQ(
+ State, arg0Val, svalBuilder.makeNullWithType(arg0Expr->getType()));
// Get the size argument.
const Expr *Arg1 = CE->getArg(1);
@@ -2385,13 +2594,14 @@ MallocChecker::ReallocMemAux(CheckerContext &C, const CallEvent &Call,
SVal TotalSize = C.getSVal(Arg1);
if (SuffixWithN)
TotalSize = evalMulForBufferSize(C, Arg1, CE->getArg(2));
- if (!TotalSize.getAs<DefinedOrUnknownSVal>())
+ if (!isa<DefinedOrUnknownSVal>(TotalSize))
return nullptr;
// Compare the size argument to 0.
DefinedOrUnknownSVal SizeZero =
- svalBuilder.evalEQ(State, TotalSize.castAs<DefinedOrUnknownSVal>(),
- svalBuilder.makeIntValWithPtrWidth(0, false));
+ svalBuilder.evalEQ(State, TotalSize.castAs<DefinedOrUnknownSVal>(),
+ svalBuilder.makeIntValWithWidth(
+ svalBuilder.getContext().getSizeType(), 0));
ProgramStateRef StatePtrIsNull, StatePtrNotNull;
std::tie(StatePtrIsNull, StatePtrNotNull) = State->assume(PtrEQ);
@@ -2533,10 +2743,10 @@ void MallocChecker::HandleLeak(SymbolRef Sym, ExplodedNode *N,
if (Family == AF_Alloca)
return;
- Optional<MallocChecker::CheckKind>
- CheckKind = getCheckIfTracked(Family, true);
+ std::optional<MallocChecker::CheckKind> CheckKind =
+ getCheckIfTracked(Family, true);
- if (!CheckKind.hasValue())
+ if (!CheckKind)
return;
assert(N);
@@ -2579,6 +2789,8 @@ void MallocChecker::HandleLeak(SymbolRef Sym, ExplodedNode *N,
AllocNode->getLocationContext()->getDecl());
R->markInteresting(Sym);
R->addVisitor<MallocBugVisitor>(Sym, true);
+ if (ShouldRegisterNoOwnershipChangeVisitor)
+ R->addVisitor<NoOwnershipChangeVisitor>(Sym, this);
C.emitReport(std::move(R));
}
@@ -2591,12 +2803,12 @@ void MallocChecker::checkDeadSymbols(SymbolReaper &SymReaper,
RegionStateTy RS = OldRS;
SmallVector<SymbolRef, 2> Errors;
- for (RegionStateTy::iterator I = RS.begin(), E = RS.end(); I != E; ++I) {
- if (SymReaper.isDead(I->first)) {
- if (I->second.isAllocated() || I->second.isAllocatedOfSizeZero())
- Errors.push_back(I->first);
+ for (auto [Sym, State] : RS) {
+ if (SymReaper.isDead(Sym)) {
+ if (State.isAllocated() || State.isAllocatedOfSizeZero())
+ Errors.push_back(Sym);
// Remove the dead symbol from the map.
- RS = F.remove(RS, I->first);
+ RS = F.remove(RS, Sym);
}
}
@@ -2611,19 +2823,17 @@ void MallocChecker::checkDeadSymbols(SymbolReaper &SymReaper,
// Cleanup the Realloc Pairs Map.
ReallocPairsTy RP = state->get<ReallocPairs>();
- for (ReallocPairsTy::iterator I = RP.begin(), E = RP.end(); I != E; ++I) {
- if (SymReaper.isDead(I->first) ||
- SymReaper.isDead(I->second.ReallocatedSym)) {
- state = state->remove<ReallocPairs>(I->first);
+ for (auto [Sym, ReallocPair] : RP) {
+ if (SymReaper.isDead(Sym) || SymReaper.isDead(ReallocPair.ReallocatedSym)) {
+ state = state->remove<ReallocPairs>(Sym);
}
}
// Cleanup the FreeReturnValue Map.
FreeReturnValueTy FR = state->get<FreeReturnValue>();
- for (FreeReturnValueTy::iterator I = FR.begin(), E = FR.end(); I != E; ++I) {
- if (SymReaper.isDead(I->first) ||
- SymReaper.isDead(I->second)) {
- state = state->remove<FreeReturnValue>(I->first);
+ for (auto [Sym, RetSym] : FR) {
+ if (SymReaper.isDead(Sym) || SymReaper.isDead(RetSym)) {
+ state = state->remove<FreeReturnValue>(Sym);
}
}
@@ -2633,9 +2843,8 @@ void MallocChecker::checkDeadSymbols(SymbolReaper &SymReaper,
static CheckerProgramPointTag Tag("MallocChecker", "DeadSymbolsLeak");
N = C.generateNonFatalErrorNode(C.getState(), &Tag);
if (N) {
- for (SmallVectorImpl<SymbolRef>::iterator
- I = Errors.begin(), E = Errors.end(); I != E; ++I) {
- HandleLeak(*I, N, C);
+ for (SymbolRef Sym : Errors) {
+ HandleLeak(Sym, N, C);
}
}
}
@@ -2692,7 +2901,7 @@ void MallocChecker::checkPreCall(const CallEvent &Call,
// Check arguments for being used after free.
for (unsigned I = 0, E = Call.getNumArgs(); I != E; ++I) {
SVal ArgSVal = Call.getArgSVal(I);
- if (ArgSVal.getAs<Loc>()) {
+ if (isa<Loc>(ArgSVal)) {
SymbolRef Sym = ArgSVal.getAsSymbol();
if (!Sym)
continue;
@@ -2733,7 +2942,7 @@ void MallocChecker::checkEscapeOnReturn(const ReturnStmt *S,
// the callee could still free the memory.
// TODO: This logic should be a part of generic symbol escape callback.
if (const MemRegion *MR = RetVal.getAsRegion())
- if (isa<FieldRegion>(MR) || isa<ElementRegion>(MR))
+ if (isa<FieldRegion, ElementRegion>(MR))
if (const SymbolicRegion *BMR =
dyn_cast<SymbolicRegion>(MR->getBaseRegion()))
Sym = BMR->getSymbol();
@@ -2758,18 +2967,16 @@ void MallocChecker::checkPostStmt(const BlockExpr *BE,
const BlockDataRegion *R =
cast<BlockDataRegion>(C.getSVal(BE).getAsRegion());
- BlockDataRegion::referenced_vars_iterator I = R->referenced_vars_begin(),
- E = R->referenced_vars_end();
-
- if (I == E)
+ auto ReferencedVars = R->referenced_vars();
+ if (ReferencedVars.empty())
return;
SmallVector<const MemRegion*, 10> Regions;
const LocationContext *LC = C.getLocationContext();
MemRegionManager &MemMgr = C.getSValBuilder().getRegionManager();
- for ( ; I != E; ++I) {
- const VarRegion *VR = I.getCapturedRegion();
+ for (const auto &Var : ReferencedVars) {
+ const VarRegion *VR = Var.getCapturedRegion();
if (VR->getSuperRegion() == R) {
VR = MemMgr.getVarRegion(VR->getDecl(), LC);
}
@@ -2865,28 +3072,28 @@ ProgramStateRef MallocChecker::evalAssume(ProgramStateRef state,
SVal Cond,
bool Assumption) const {
RegionStateTy RS = state->get<RegionState>();
- for (RegionStateTy::iterator I = RS.begin(), E = RS.end(); I != E; ++I) {
+ for (SymbolRef Sym : llvm::make_first_range(RS)) {
// If the symbol is assumed to be NULL, remove it from consideration.
ConstraintManager &CMgr = state->getConstraintManager();
- ConditionTruthVal AllocFailed = CMgr.isNull(state, I.getKey());
+ ConditionTruthVal AllocFailed = CMgr.isNull(state, Sym);
if (AllocFailed.isConstrainedTrue())
- state = state->remove<RegionState>(I.getKey());
+ state = state->remove<RegionState>(Sym);
}
// Realloc returns 0 when reallocation fails, which means that we should
// restore the state of the pointer being reallocated.
ReallocPairsTy RP = state->get<ReallocPairs>();
- for (ReallocPairsTy::iterator I = RP.begin(), E = RP.end(); I != E; ++I) {
+ for (auto [Sym, ReallocPair] : RP) {
// If the symbol is assumed to be NULL, remove it from consideration.
ConstraintManager &CMgr = state->getConstraintManager();
- ConditionTruthVal AllocFailed = CMgr.isNull(state, I.getKey());
+ ConditionTruthVal AllocFailed = CMgr.isNull(state, Sym);
if (!AllocFailed.isConstrainedTrue())
continue;
- SymbolRef ReallocSym = I.getData().ReallocatedSym;
+ SymbolRef ReallocSym = ReallocPair.ReallocatedSym;
if (const RefState *RS = state->get<RegionState>(ReallocSym)) {
if (RS->isReleased()) {
- switch (I.getData().Kind) {
+ switch (ReallocPair.Kind) {
case OAR_ToBeFreedAfterFailure:
state = state->set<RegionState>(ReallocSym,
RefState::getAllocated(RS->getAllocationFamily(), RS->getStmt()));
@@ -2895,11 +3102,11 @@ ProgramStateRef MallocChecker::evalAssume(ProgramStateRef state,
state = state->remove<RegionState>(ReallocSym);
break;
default:
- assert(I.getData().Kind == OAR_FreeOnFailure);
+ assert(ReallocPair.Kind == OAR_FreeOnFailure);
}
}
}
- state = state->remove<ReallocPairs>(I.getKey());
+ state = state->remove<ReallocPairs>(Sym);
}
return state;
@@ -2916,7 +3123,7 @@ bool MallocChecker::mayFreeAnyEscapedMemoryOrIsModeledExplicitly(
// TODO: If we want to be more optimistic here, we'll need to make sure that
// regions escape to C++ containers. They seem to do that even now, but for
// mysterious reasons.
- if (!(isa<SimpleFunctionCall>(Call) || isa<ObjCMethodCall>(Call)))
+ if (!isa<SimpleFunctionCall, ObjCMethodCall>(Call))
return true;
// Check Objective-C messages by selector name.
@@ -2935,7 +3142,7 @@ bool MallocChecker::mayFreeAnyEscapedMemoryOrIsModeledExplicitly(
// about, we can't be sure that the object will use free() to deallocate the
// memory, so we can't model it explicitly. The best we can do is use it to
// decide whether the pointer escapes.
- if (Optional<bool> FreeWhenDone = getFreeWhenDoneArg(*Msg))
+ if (std::optional<bool> FreeWhenDone = getFreeWhenDoneArg(*Msg))
return *FreeWhenDone;
// If the first selector piece ends with "NoCopy", and there is no
@@ -2943,16 +3150,16 @@ bool MallocChecker::mayFreeAnyEscapedMemoryOrIsModeledExplicitly(
// transferred. Again, though, we can't be sure that the object will use
// free() to deallocate the memory, so we can't model it explicitly.
StringRef FirstSlot = Msg->getSelector().getNameForSlot(0);
- if (FirstSlot.endswith("NoCopy"))
+ if (FirstSlot.ends_with("NoCopy"))
return true;
// If the first selector starts with addPointer, insertPointer,
// or replacePointer, assume we are dealing with NSPointerArray or similar.
// This is similar to C++ containers (vector); we still might want to check
// that the pointers get freed by following the container itself.
- if (FirstSlot.startswith("addPointer") ||
- FirstSlot.startswith("insertPointer") ||
- FirstSlot.startswith("replacePointer") ||
+ if (FirstSlot.starts_with("addPointer") ||
+ FirstSlot.starts_with("insertPointer") ||
+ FirstSlot.starts_with("replacePointer") ||
FirstSlot.equals("valueWithPointer")) {
return true;
}
@@ -2992,7 +3199,7 @@ bool MallocChecker::mayFreeAnyEscapedMemoryOrIsModeledExplicitly(
// White list the 'XXXNoCopy' CoreFoundation functions.
// We specifically check these before
- if (FName.endswith("NoCopy")) {
+ if (FName.ends_with("NoCopy")) {
// Look for the deallocator argument. We know that the memory ownership
// is not transferred only if the deallocator argument is
// 'kCFAllocatorNull'.
@@ -3024,7 +3231,7 @@ bool MallocChecker::mayFreeAnyEscapedMemoryOrIsModeledExplicitly(
const Expr *ArgE = Call->getArgExpr(0)->IgnoreParenCasts();
if (const DeclRefExpr *ArgDRE = dyn_cast<DeclRefExpr>(ArgE))
if (const VarDecl *D = dyn_cast<VarDecl>(ArgDRE->getDecl()))
- if (D->getCanonicalDecl()->getName().find("std") != StringRef::npos)
+ if (D->getCanonicalDecl()->getName().contains("std"))
return true;
}
}
@@ -3052,6 +3259,11 @@ bool MallocChecker::mayFreeAnyEscapedMemoryOrIsModeledExplicitly(
return true;
}
+ if (FName == "singleShotImpl" &&
+ FD->getQualifiedNameAsString() == "QTimer::singleShotImpl") {
+ return true;
+ }
+
// Handle cases where we know a buffer's /address/ can escape.
// Note that the above checks handle some special cases where we know that
// even though the address escapes, it's still our responsibility to free the
@@ -3100,11 +3312,7 @@ ProgramStateRef MallocChecker::checkPointerEscapeAux(
return State;
}
- for (InvalidatedSymbols::const_iterator I = Escaped.begin(),
- E = Escaped.end();
- I != E; ++I) {
- SymbolRef sym = *I;
-
+ for (SymbolRef sym : Escaped) {
if (EscapingSymbol && EscapingSymbol != sym)
continue;
@@ -3224,7 +3432,7 @@ PathDiagnosticPieceRef MallocBugVisitor::VisitNode(const ExplodedNode *N,
allocation_state::getContainerObjRegion(statePrev, Sym);
const auto *TypedRegion = cast<TypedValueRegion>(ObjRegion);
QualType ObjTy = TypedRegion->getValueType();
- OS << "Inner buffer of '" << ObjTy.getAsString() << "' ";
+ OS << "Inner buffer of '" << ObjTy << "' ";
if (N->getLocation().getKind() == ProgramPoint::PostImplicitCallKind) {
OS << "deallocated by call to destructor";
@@ -3239,7 +3447,8 @@ PathDiagnosticPieceRef MallocBugVisitor::VisitNode(const ExplodedNode *N,
OS << OpCallE->getDirectCallee()->getDeclName();
} else if (const auto *CallE = dyn_cast<CallExpr>(S)) {
auto &CEMgr = BRC.getStateManager().getCallEventManager();
- CallEventRef<> Call = CEMgr.getSimpleCall(CallE, state, CurrentLC);
+ CallEventRef<> Call =
+ CEMgr.getSimpleCall(CallE, state, CurrentLC, {nullptr, 0});
if (const auto *D = dyn_cast_or_null<NamedDecl>(Call->getDecl()))
OS << D->getDeclName();
else
@@ -3351,17 +3560,18 @@ void MallocChecker::printState(raw_ostream &Out, ProgramStateRef State,
if (!RS.isEmpty()) {
Out << Sep << "MallocChecker :" << NL;
- for (RegionStateTy::iterator I = RS.begin(), E = RS.end(); I != E; ++I) {
- const RefState *RefS = State->get<RegionState>(I.getKey());
+ for (auto [Sym, Data] : RS) {
+ const RefState *RefS = State->get<RegionState>(Sym);
AllocationFamily Family = RefS->getAllocationFamily();
- Optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(Family);
- if (!CheckKind.hasValue())
- CheckKind = getCheckIfTracked(Family, true);
+ std::optional<MallocChecker::CheckKind> CheckKind =
+ getCheckIfTracked(Family);
+ if (!CheckKind)
+ CheckKind = getCheckIfTracked(Family, true);
- I.getKey()->dumpToStream(Out);
+ Sym->dumpToStream(Out);
Out << " : ";
- I.getData().dump(Out);
- if (CheckKind.hasValue())
+ Data.dump(Out);
+ if (CheckKind)
Out << " (" << CheckNames[*CheckKind].getName() << ")";
Out << NL;
}
@@ -3395,6 +3605,9 @@ void ento::registerDynamicMemoryModeling(CheckerManager &mgr) {
auto *checker = mgr.registerChecker<MallocChecker>();
checker->ShouldIncludeOwnershipAnnotatedFunctions =
mgr.getAnalyzerOptions().getCheckerBooleanOption(checker, "Optimistic");
+ checker->ShouldRegisterNoOwnershipChangeVisitor =
+ mgr.getAnalyzerOptions().getCheckerBooleanOption(
+ checker, "AddNoOwnershipChangeNotes");
}
bool ento::shouldRegisterDynamicMemoryModeling(const CheckerManager &mgr) {
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp
index e31630f63b5a..3c8b38973c6b 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp
@@ -24,6 +24,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/SmallVector.h"
+#include <optional>
#include <utility>
using namespace clang;
@@ -32,12 +33,14 @@ using llvm::APSInt;
namespace {
struct MallocOverflowCheck {
+ const CallExpr *call;
const BinaryOperator *mulop;
const Expr *variable;
APSInt maxVal;
- MallocOverflowCheck(const BinaryOperator *m, const Expr *v, APSInt val)
- : mulop(m), variable(v), maxVal(std::move(val)) {}
+ MallocOverflowCheck(const CallExpr *call, const BinaryOperator *m,
+ const Expr *v, APSInt val)
+ : call(call), mulop(m), variable(v), maxVal(std::move(val)) {}
};
class MallocOverflowSecurityChecker : public Checker<check::ASTCodeBody> {
@@ -46,8 +49,8 @@ public:
BugReporter &BR) const;
void CheckMallocArgument(
- SmallVectorImpl<MallocOverflowCheck> &PossibleMallocOverflows,
- const Expr *TheArgument, ASTContext &Context) const;
+ SmallVectorImpl<MallocOverflowCheck> &PossibleMallocOverflows,
+ const CallExpr *TheCall, ASTContext &Context) const;
void OutputPossibleOverflows(
SmallVectorImpl<MallocOverflowCheck> &PossibleMallocOverflows,
@@ -62,16 +65,15 @@ static inline bool EvaluatesToZero(APSInt &Val, BinaryOperatorKind op) {
}
void MallocOverflowSecurityChecker::CheckMallocArgument(
- SmallVectorImpl<MallocOverflowCheck> &PossibleMallocOverflows,
- const Expr *TheArgument,
- ASTContext &Context) const {
+ SmallVectorImpl<MallocOverflowCheck> &PossibleMallocOverflows,
+ const CallExpr *TheCall, ASTContext &Context) const {
/* Look for a linear combination with a single variable, and at least
one multiplication.
Reject anything that applies to the variable: an explicit cast,
conditional expression, an operation that could reduce the range
of the result, or anything too complicated :-). */
- const Expr *e = TheArgument;
+ const Expr *e = TheCall->getArg(0);
const BinaryOperator * mulop = nullptr;
APSInt maxVal;
@@ -101,8 +103,7 @@ void MallocOverflowSecurityChecker::CheckMallocArgument(
e = rhs;
} else
return;
- }
- else if (isa<DeclRefExpr>(e) || isa<MemberExpr>(e))
+ } else if (isa<DeclRefExpr, MemberExpr>(e))
break;
else
return;
@@ -115,9 +116,8 @@ void MallocOverflowSecurityChecker::CheckMallocArgument(
// the data so when the body of the function is completely available
// we can check for comparisons.
- // TODO: Could push this into the innermost scope where 'e' is
- // defined, rather than the whole function.
- PossibleMallocOverflows.push_back(MallocOverflowCheck(mulop, e, maxVal));
+ PossibleMallocOverflows.push_back(
+ MallocOverflowCheck(TheCall, mulop, e, maxVal));
}
namespace {
@@ -153,17 +153,19 @@ private:
return getDecl(CheckDR) == getDecl(DR) && Pred(Check);
return false;
};
- toScanFor.erase(std::remove_if(toScanFor.begin(), toScanFor.end(), P),
- toScanFor.end());
+ llvm::erase_if(toScanFor, P);
}
void CheckExpr(const Expr *E_p) {
- auto PredTrue = [](const MallocOverflowCheck &) { return true; };
const Expr *E = E_p->IgnoreParenImpCasts();
+ const auto PrecedesMalloc = [E, this](const MallocOverflowCheck &c) {
+ return Context.getSourceManager().isBeforeInTranslationUnit(
+ E->getExprLoc(), c.call->getExprLoc());
+ };
if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(E))
- Erase<DeclRefExpr>(DR, PredTrue);
+ Erase<DeclRefExpr>(DR, PrecedesMalloc);
else if (const auto *ME = dyn_cast<MemberExpr>(E)) {
- Erase<MemberExpr>(ME, PredTrue);
+ Erase<MemberExpr>(ME, PrecedesMalloc);
}
}
@@ -278,17 +280,13 @@ void MallocOverflowSecurityChecker::OutputPossibleOverflows(
c.Visit(mgr.getAnalysisDeclContext(D)->getBody());
// Output warnings for all overflows that are left.
- for (CheckOverflowOps::theVecType::iterator
- i = PossibleMallocOverflows.begin(),
- e = PossibleMallocOverflows.end();
- i != e;
- ++i) {
+ for (const MallocOverflowCheck &Check : PossibleMallocOverflows) {
BR.EmitBasicReport(
D, this, "malloc() size overflow", categories::UnixAPI,
"the computation of the size of the memory allocation may overflow",
- PathDiagnosticLocation::createOperatorLoc(i->mulop,
+ PathDiagnosticLocation::createOperatorLoc(Check.mulop,
BR.getSourceManager()),
- i->mulop->getSourceRange());
+ Check.mulop->getSourceRange());
}
}
@@ -307,26 +305,27 @@ void MallocOverflowSecurityChecker::checkASTCodeBody(const Decl *D,
CFGBlock *block = *it;
for (CFGBlock::iterator bi = block->begin(), be = block->end();
bi != be; ++bi) {
- if (Optional<CFGStmt> CS = bi->getAs<CFGStmt>()) {
- if (const CallExpr *TheCall = dyn_cast<CallExpr>(CS->getStmt())) {
- // Get the callee.
- const FunctionDecl *FD = TheCall->getDirectCallee();
-
- if (!FD)
- continue;
-
- // Get the name of the callee. If it's a builtin, strip off the prefix.
- IdentifierInfo *FnInfo = FD->getIdentifier();
- if (!FnInfo)
- continue;
-
- if (FnInfo->isStr ("malloc") || FnInfo->isStr ("_MALLOC")) {
- if (TheCall->getNumArgs() == 1)
- CheckMallocArgument(PossibleMallocOverflows, TheCall->getArg(0),
- mgr.getASTContext());
+ if (std::optional<CFGStmt> CS = bi->getAs<CFGStmt>()) {
+ if (const CallExpr *TheCall = dyn_cast<CallExpr>(CS->getStmt())) {
+ // Get the callee.
+ const FunctionDecl *FD = TheCall->getDirectCallee();
+
+ if (!FD)
+ continue;
+
+ // Get the name of the callee. If it's a builtin, strip off the
+ // prefix.
+ IdentifierInfo *FnInfo = FD->getIdentifier();
+ if (!FnInfo)
+ continue;
+
+ if (FnInfo->isStr("malloc") || FnInfo->isStr("_MALLOC")) {
+ if (TheCall->getNumArgs() == 1)
+ CheckMallocArgument(PossibleMallocOverflows, TheCall,
+ mgr.getASTContext());
+ }
}
}
- }
}
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp
index 4b5206a102b8..9e81a6bd19fc 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp
@@ -12,14 +12,15 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/AST/TypeLoc.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
@@ -182,22 +183,20 @@ public:
AnalysisDeclContext *ADC = mgr.getAnalysisDeclContext(D);
CastedAllocFinder Finder(&BR.getContext());
Finder.Visit(D->getBody());
- for (CastedAllocFinder::CallVec::iterator i = Finder.Calls.begin(),
- e = Finder.Calls.end(); i != e; ++i) {
- QualType CastedType = i->CastedExpr->getType();
+ for (const auto &CallRec : Finder.Calls) {
+ QualType CastedType = CallRec.CastedExpr->getType();
if (!CastedType->isPointerType())
continue;
QualType PointeeType = CastedType->getPointeeType();
if (PointeeType->isVoidType())
continue;
- for (CallExpr::const_arg_iterator ai = i->AllocCall->arg_begin(),
- ae = i->AllocCall->arg_end(); ai != ae; ++ai) {
- if (!(*ai)->getType()->isIntegralOrUnscopedEnumerationType())
+ for (const Expr *Arg : CallRec.AllocCall->arguments()) {
+ if (!Arg->getType()->isIntegralOrUnscopedEnumerationType())
continue;
SizeofFinder SFinder;
- SFinder.Visit(*ai);
+ SFinder.Visit(Arg);
if (SFinder.Sizeofs.size() != 1)
continue;
@@ -212,34 +211,33 @@ public:
continue;
const TypeSourceInfo *TSI = nullptr;
- if (i->CastedExprParent.is<const VarDecl *>()) {
- TSI =
- i->CastedExprParent.get<const VarDecl *>()->getTypeSourceInfo();
+ if (CallRec.CastedExprParent.is<const VarDecl *>()) {
+ TSI = CallRec.CastedExprParent.get<const VarDecl *>()
+ ->getTypeSourceInfo();
} else {
- TSI = i->ExplicitCastType;
+ TSI = CallRec.ExplicitCastType;
}
SmallString<64> buf;
llvm::raw_svector_ostream OS(buf);
OS << "Result of ";
- const FunctionDecl *Callee = i->AllocCall->getDirectCallee();
+ const FunctionDecl *Callee = CallRec.AllocCall->getDirectCallee();
if (Callee && Callee->getIdentifier())
OS << '\'' << Callee->getIdentifier()->getName() << '\'';
else
OS << "call";
- OS << " is converted to a pointer of type '"
- << PointeeType.getAsString() << "', which is incompatible with "
- << "sizeof operand type '" << SizeofType.getAsString() << "'";
+ OS << " is converted to a pointer of type '" << PointeeType
+ << "', which is incompatible with "
+ << "sizeof operand type '" << SizeofType << "'";
SmallVector<SourceRange, 4> Ranges;
- Ranges.push_back(i->AllocCall->getCallee()->getSourceRange());
+ Ranges.push_back(CallRec.AllocCall->getCallee()->getSourceRange());
Ranges.push_back(SFinder.Sizeofs[0]->getSourceRange());
if (TSI)
Ranges.push_back(TSI->getTypeLoc().getSourceRange());
- PathDiagnosticLocation L =
- PathDiagnosticLocation::createBegin(i->AllocCall->getCallee(),
- BR.getSourceManager(), ADC);
+ PathDiagnosticLocation L = PathDiagnosticLocation::createBegin(
+ CallRec.AllocCall->getCallee(), BR.getSourceManager(), ADC);
BR.EmitBasicReport(D, this, "Allocator sizeof operand mismatch",
categories::UnixAPI, OS.str(), L, Ranges);
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MismatchedIteratorChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MismatchedIteratorChecker.cpp
index 1960873599f7..82a622831817 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MismatchedIteratorChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MismatchedIteratorChecker.cpp
@@ -30,22 +30,18 @@ namespace {
class MismatchedIteratorChecker
: public Checker<check::PreCall, check::PreStmt<BinaryOperator>> {
- std::unique_ptr<BugType> MismatchedBugType;
-
- void verifyMatch(CheckerContext &C, const SVal &Iter,
- const MemRegion *Cont) const;
- void verifyMatch(CheckerContext &C, const SVal &Iter1,
- const SVal &Iter2) const;
- void reportBug(const StringRef &Message, const SVal &Val1,
- const SVal &Val2, CheckerContext &C,
- ExplodedNode *ErrNode) const;
- void reportBug(const StringRef &Message, const SVal &Val,
- const MemRegion *Reg, CheckerContext &C,
+ const BugType MismatchedBugType{this, "Iterator(s) mismatched",
+ "Misuse of STL APIs",
+ /*SuppressOnSink=*/true};
+
+ void verifyMatch(CheckerContext &C, SVal Iter, const MemRegion *Cont) const;
+ void verifyMatch(CheckerContext &C, SVal Iter1, SVal Iter2) const;
+ void reportBug(StringRef Message, SVal Val1, SVal Val2, CheckerContext &C,
ExplodedNode *ErrNode) const;
+ void reportBug(StringRef Message, SVal Val, const MemRegion *Reg,
+ CheckerContext &C, ExplodedNode *ErrNode) const;
public:
- MismatchedIteratorChecker();
-
void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
void checkPreStmt(const BinaryOperator *BO, CheckerContext &C) const;
@@ -53,12 +49,6 @@ public:
} // namespace
-MismatchedIteratorChecker::MismatchedIteratorChecker() {
- MismatchedBugType.reset(
- new BugType(this, "Iterator(s) mismatched", "Misuse of STL APIs",
- /*SuppressOnSink=*/true));
-}
-
void MismatchedIteratorChecker::checkPreCall(const CallEvent &Call,
CheckerContext &C) const {
// Check for iterator mismatches
@@ -176,8 +166,10 @@ void MismatchedIteratorChecker::checkPreCall(const CallEvent &Call,
const auto *Param = Func->getParamDecl(J);
const auto *ParamType =
Param->getType()->getAs<SubstTemplateTypeParmType>();
- if (!ParamType ||
- ParamType->getReplacedParameter()->getDecl() != TPDecl)
+ if (!ParamType)
+ continue;
+ const TemplateTypeParmDecl *D = ParamType->getReplacedParameter();
+ if (D != TPDecl)
continue;
if (LHS.isUndef()) {
LHS = Call.getArgSVal(J);
@@ -200,7 +192,7 @@ void MismatchedIteratorChecker::checkPreStmt(const BinaryOperator *BO,
verifyMatch(C, LVal, RVal);
}
-void MismatchedIteratorChecker::verifyMatch(CheckerContext &C, const SVal &Iter,
+void MismatchedIteratorChecker::verifyMatch(CheckerContext &C, SVal Iter,
const MemRegion *Cont) const {
// Verify match between a container and the container of an iterator
Cont = Cont->getMostDerivedObjectRegion();
@@ -236,9 +228,8 @@ void MismatchedIteratorChecker::verifyMatch(CheckerContext &C, const SVal &Iter,
}
}
-void MismatchedIteratorChecker::verifyMatch(CheckerContext &C,
- const SVal &Iter1,
- const SVal &Iter2) const {
+void MismatchedIteratorChecker::verifyMatch(CheckerContext &C, SVal Iter1,
+ SVal Iter2) const {
// Verify match between the containers of two iterators
auto State = C.getState();
const auto *Pos1 = getIteratorPosition(State, Iter1);
@@ -275,23 +266,21 @@ void MismatchedIteratorChecker::verifyMatch(CheckerContext &C,
}
}
-void MismatchedIteratorChecker::reportBug(const StringRef &Message,
- const SVal &Val1,
- const SVal &Val2,
- CheckerContext &C,
+void MismatchedIteratorChecker::reportBug(StringRef Message, SVal Val1,
+ SVal Val2, CheckerContext &C,
ExplodedNode *ErrNode) const {
- auto R = std::make_unique<PathSensitiveBugReport>(*MismatchedBugType, Message,
+ auto R = std::make_unique<PathSensitiveBugReport>(MismatchedBugType, Message,
ErrNode);
R->markInteresting(Val1);
R->markInteresting(Val2);
C.emitReport(std::move(R));
}
-void MismatchedIteratorChecker::reportBug(const StringRef &Message,
- const SVal &Val, const MemRegion *Reg,
+void MismatchedIteratorChecker::reportBug(StringRef Message, SVal Val,
+ const MemRegion *Reg,
CheckerContext &C,
ExplodedNode *ErrNode) const {
- auto R = std::make_unique<PathSensitiveBugReport>(*MismatchedBugType, Message,
+ auto R = std::make_unique<PathSensitiveBugReport>(MismatchedBugType, Message,
ErrNode);
R->markInteresting(Val);
R->markInteresting(Reg);
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MmapWriteExecChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MmapWriteExecChecker.cpp
index 5d63d6efd234..2e31c16e457c 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MmapWriteExecChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MmapWriteExecChecker.cpp
@@ -15,14 +15,15 @@
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/CommonBugCategories.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
using namespace clang;
using namespace ento;
-using llvm::APSInt;
namespace {
class MmapWriteExecChecker : public Checker<check::PreCall> {
@@ -31,9 +32,11 @@ class MmapWriteExecChecker : public Checker<check::PreCall> {
static int ProtWrite;
static int ProtExec;
static int ProtRead;
- mutable std::unique_ptr<BugType> BT;
+ const BugType BT{this, "W^X check fails, Write Exec prot flags set",
+ "Security"};
+
public:
- MmapWriteExecChecker() : MmapFn("mmap", 6), MprotectFn("mprotect", 3) {}
+ MmapWriteExecChecker() : MmapFn({"mmap"}, 6), MprotectFn({"mprotect"}, 3) {}
void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
int ProtExecOv;
int ProtReadOv;
@@ -46,9 +49,11 @@ int MmapWriteExecChecker::ProtRead = 0x01;
void MmapWriteExecChecker::checkPreCall(const CallEvent &Call,
CheckerContext &C) const {
- if (Call.isCalled(MmapFn) || Call.isCalled(MprotectFn)) {
+ if (matchesAny(Call, MmapFn, MprotectFn)) {
SVal ProtVal = Call.getArgSVal(2);
- Optional<nonloc::ConcreteInt> ProtLoc = ProtVal.getAs<nonloc::ConcreteInt>();
+ auto ProtLoc = ProtVal.getAs<nonloc::ConcreteInt>();
+ if (!ProtLoc)
+ return;
int64_t Prot = ProtLoc->getValue().getSExtValue();
if (ProtExecOv != ProtExec)
ProtExec = ProtExecOv;
@@ -60,17 +65,16 @@ void MmapWriteExecChecker::checkPreCall(const CallEvent &Call,
return;
if ((Prot & (ProtWrite | ProtExec)) == (ProtWrite | ProtExec)) {
- if (!BT)
- BT.reset(new BugType(this, "W^X check fails, Write Exec prot flags set", "Security"));
-
ExplodedNode *N = C.generateNonFatalErrorNode();
if (!N)
return;
auto Report = std::make_unique<PathSensitiveBugReport>(
- *BT, "Both PROT_WRITE and PROT_EXEC flags are set. This can "
- "lead to exploitable memory regions, which could be overwritten "
- "with malicious code", N);
+ BT,
+ "Both PROT_WRITE and PROT_EXEC flags are set. This can "
+ "lead to exploitable memory regions, which could be overwritten "
+ "with malicious code",
+ N);
Report->addRange(Call.getArgSourceRange(2));
C.emitReport(std::move(Report));
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MoveChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MoveChecker.cpp
index cbe938982000..5240352a9bd2 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MoveChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MoveChecker.cpp
@@ -49,7 +49,6 @@ class MoveChecker
: public Checker<check::PreCall, check::PostCall,
check::DeadSymbols, check::RegionChanges> {
public:
- void checkEndFunction(const ReturnStmt *RS, CheckerContext &C) const;
void checkPreCall(const CallEvent &MC, CheckerContext &C) const;
void checkPostCall(const CallEvent &MC, CheckerContext &C) const;
void checkDeadSymbols(SymbolReaper &SR, CheckerContext &C) const;
@@ -185,7 +184,7 @@ private:
bool Found;
};
- AggressivenessKind Aggressiveness;
+ AggressivenessKind Aggressiveness = AK_KnownsAndLocals;
public:
void setAggressiveness(StringRef Str, CheckerManager &Mgr) {
@@ -214,8 +213,9 @@ private:
// Returns the exploded node against which the report was emitted.
// The caller *must* add any further transitions against this node.
- ExplodedNode *reportBug(const MemRegion *Region, const CXXRecordDecl *RD,
- CheckerContext &C, MisuseKind MK) const;
+ // Returns nullptr and does not report if such node already exists.
+ ExplodedNode *tryToReportBug(const MemRegion *Region, const CXXRecordDecl *RD,
+ CheckerContext &C, MisuseKind MK) const;
bool isInMoveSafeContext(const LocationContext *LC) const;
bool isStateResetMethod(const CXXMethodDecl *MethodDec) const;
@@ -310,7 +310,7 @@ MoveChecker::MovedBugVisitor::VisitNode(const ExplodedNode *N,
// If it's not a dereference, we don't care if it was reset to null
// or that it is even a smart pointer.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case SK_NonStd:
case SK_Safe:
OS << "Object";
@@ -378,19 +378,20 @@ void MoveChecker::modelUse(ProgramStateRef State, const MemRegion *Region,
return;
}
- ExplodedNode *N = reportBug(Region, RD, C, MK);
+ ExplodedNode *N = tryToReportBug(Region, RD, C, MK);
// If the program has already crashed on this path, don't bother.
- if (N->isSink())
+ if (!N || N->isSink())
return;
State = State->set<TrackedRegionMap>(Region, RegionState::getReported());
C.addTransition(State, N);
}
-ExplodedNode *MoveChecker::reportBug(const MemRegion *Region,
- const CXXRecordDecl *RD, CheckerContext &C,
- MisuseKind MK) const {
+ExplodedNode *MoveChecker::tryToReportBug(const MemRegion *Region,
+ const CXXRecordDecl *RD,
+ CheckerContext &C,
+ MisuseKind MK) const {
if (ExplodedNode *N = misuseCausesCrash(MK) ? C.generateErrorNode()
: C.generateNonFatalErrorNode()) {
// Uniqueing report to the same object.
@@ -554,7 +555,8 @@ MoveChecker::classifyObject(const MemRegion *MR,
// as not-"STL" types, because that's how the checker treats them.
MR = unwrapRValueReferenceIndirection(MR);
bool IsLocal =
- MR && isa<VarRegion>(MR) && isa<StackSpaceRegion>(MR->getMemorySpace());
+ isa_and_nonnull<VarRegion, CXXLifetimeExtendedObjectRegion>(MR) &&
+ isa<StackSpaceRegion>(MR->getMemorySpace());
if (!RD || !RD->getDeclContext()->isStdNamespace())
return { IsLocal, SK_NonStd };
@@ -588,7 +590,7 @@ void MoveChecker::explainObject(llvm::raw_ostream &OS, const MemRegion *MR,
break;
// We only care about the type if it's a dereference.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case SK_Unsafe:
OS << " of type '" << RD->getQualifiedNameAsString() << "'";
break;
@@ -619,10 +621,6 @@ void MoveChecker::checkPreCall(const CallEvent &Call, CheckerContext &C) const {
if (!IC)
return;
- // Calling a destructor on a moved object is fine.
- if (isa<CXXDestructorCall>(IC))
- return;
-
const MemRegion *ThisRegion = IC->getCXXThisVal().getAsRegion();
if (!ThisRegion)
return;
@@ -632,6 +630,10 @@ void MoveChecker::checkPreCall(const CallEvent &Call, CheckerContext &C) const {
if (!MethodDecl)
return;
+ // Calling a destructor on a moved object is fine.
+ if (isa<CXXDestructorDecl>(MethodDecl))
+ return;
+
// We want to investigate the whole object, not only sub-object of a parent
// class in which the encountered method defined.
ThisRegion = ThisRegion->getMostDerivedObjectRegion();
@@ -712,12 +714,9 @@ ProgramStateRef MoveChecker::checkRegionChanges(
// directly, but not all of them end up being invalidated.
// But when they do, they appear in the InvalidatedRegions array as well.
for (const auto *Region : RequestedRegions) {
- if (ThisRegion != Region) {
- if (llvm::find(InvalidatedRegions, Region) !=
- std::end(InvalidatedRegions)) {
- State = removeFromState(State, Region);
- }
- }
+ if (ThisRegion != Region &&
+ llvm::is_contained(InvalidatedRegions, Region))
+ State = removeFromState(State, Region);
}
} else {
// For invalidations that aren't caused by calls, assume nothing. In
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp
index be17e401fb53..0648084a7d39 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp
@@ -31,7 +31,8 @@ using namespace ento;
namespace {
class NSAutoreleasePoolChecker
: public Checker<check::PreObjCMessage> {
- mutable std::unique_ptr<BugType> BT;
+ const BugType BT{this, "Use -drain instead of -release",
+ "API Upgrade (Apple)"};
mutable Selector releaseS;
public:
@@ -57,10 +58,6 @@ void NSAutoreleasePoolChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
if (msg.getSelector() != releaseS)
return;
- if (!BT)
- BT.reset(new BugType(this, "Use -drain instead of -release",
- "API Upgrade (Apple)"));
-
ExplodedNode *N = C.generateNonFatalErrorNode();
if (!N) {
assert(0);
@@ -68,7 +65,7 @@ void NSAutoreleasePoolChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
}
auto Report = std::make_unique<PathSensitiveBugReport>(
- *BT,
+ BT,
"Use -drain instead of -release when using NSAutoreleasePool and "
"garbage collection",
N);
@@ -80,7 +77,7 @@ void ento::registerNSAutoreleasePoolChecker(CheckerManager &mgr) {
mgr.registerChecker<NSAutoreleasePoolChecker>();
}
-bool ento::shouldRegisterNSAutoreleasePoolChecker(const CheckerManager &mgr) {
+bool ento::shouldRegisterNSAutoreleasePoolChecker(const CheckerManager &mgr) {
const LangOptions &LO = mgr.getLangOpts();
return LO.getGC() != LangOptions::NonGC;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp
index 90c5583d8969..54870bcb4bb2 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp
@@ -6,7 +6,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines a CheckNSError, a flow-insenstive check
+// This file defines a CheckNSError, a flow-insensitive check
// that determines if an Objective-C class interface correctly returns
// a non-void return type.
//
@@ -24,6 +24,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/raw_ostream.h"
+#include <optional>
using namespace clang;
using namespace ento;
@@ -38,10 +39,10 @@ static bool IsCFError(QualType T, IdentifierInfo *II);
namespace {
class NSErrorMethodChecker
: public Checker< check::ASTDecl<ObjCMethodDecl> > {
- mutable IdentifierInfo *II;
+ mutable IdentifierInfo *II = nullptr;
public:
- NSErrorMethodChecker() : II(nullptr) {}
+ NSErrorMethodChecker() = default;
void checkASTDecl(const ObjCMethodDecl *D,
AnalysisManager &mgr, BugReporter &BR) const;
@@ -118,7 +119,7 @@ void CFErrorFunctionChecker::checkASTDecl(const FunctionDecl *D,
II = &D->getASTContext().Idents.get("CFErrorRef");
bool hasCFError = false;
- for (auto I : D->parameters()) {
+ for (auto *I : D->parameters()) {
if (IsCFError(I->getType(), II)) {
hasCFError = true;
break;
@@ -166,7 +167,7 @@ class NSOrCFErrorDerefChecker
mutable std::unique_ptr<NSErrorDerefBug> NSBT;
mutable std::unique_ptr<CFErrorDerefBug> CFBT;
public:
- DefaultBool ShouldCheckNSError, ShouldCheckCFError;
+ bool ShouldCheckNSError = false, ShouldCheckCFError = false;
CheckerNameRef NSErrorName, CFErrorName;
NSOrCFErrorDerefChecker() : NSErrorII(nullptr), CFErrorII(nullptr) {}
@@ -197,7 +198,7 @@ static void setFlag(ProgramStateRef state, SVal val, CheckerContext &C) {
static QualType parameterTypeFromSVal(SVal val, CheckerContext &C) {
const StackFrameContext * SFC = C.getStackFrame();
- if (Optional<loc::MemRegionVal> X = val.getAs<loc::MemRegionVal>()) {
+ if (std::optional<loc::MemRegionVal> X = val.getAs<loc::MemRegionVal>()) {
const MemRegion* R = X->getRegion();
if (const VarRegion *VR = R->getAs<VarRegion>())
if (const StackArgumentsSpaceRegion *
@@ -214,7 +215,7 @@ void NSOrCFErrorDerefChecker::checkLocation(SVal loc, bool isLoad,
CheckerContext &C) const {
if (!isLoad)
return;
- if (loc.isUndef() || !loc.getAs<Loc>())
+ if (loc.isUndef() || !isa<Loc>(loc))
return;
ASTContext &Ctx = C.getASTContext();
@@ -266,7 +267,7 @@ void NSOrCFErrorDerefChecker::checkEvent(ImplicitNullDerefEvent event) const {
SmallString<128> Buf;
llvm::raw_svector_ostream os(Buf);
- os << "Potential null dereference. According to coding standards ";
+ os << "Potential null dereference. According to coding standards ";
os << (isNSError
? "in 'Creating and Returning NSError Objects' the parameter"
: "documented in CoreFoundation/CFError.h the parameter");
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp
index af208e867318..17c3cb4e9e04 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp
@@ -44,9 +44,11 @@ void NoReturnFunctionChecker::checkPostCall(const CallEvent &CE,
if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CE.getDecl()))
BuildSinks = FD->hasAttr<AnalyzerNoReturnAttr>() || FD->isNoReturn();
- const Expr *Callee = CE.getOriginExpr();
- if (!BuildSinks && Callee)
- BuildSinks = getFunctionExtInfo(Callee->getType()).getNoReturn();
+ if (const CallExpr *CExpr = dyn_cast_or_null<CallExpr>(CE.getOriginExpr());
+ CExpr && !BuildSinks) {
+ if (const Expr *C = CExpr->getCallee())
+ BuildSinks = getFunctionExtInfo(C->getType()).getNoReturn();
+ }
if (!BuildSinks && CE.isGlobalCFunction()) {
if (const IdentifierInfo *II = CE.getCalleeIdentifier()) {
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp
index 534b5d68434f..a9002ee7c966 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp
@@ -18,6 +18,7 @@
#include "clang/Analysis/AnyCall.h"
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/CommonBugCategories.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
@@ -31,8 +32,9 @@ namespace {
class NonNullParamChecker
: public Checker<check::PreCall, check::BeginFunction,
EventDispatcher<ImplicitNullDerefEvent>> {
- mutable std::unique_ptr<BugType> BTAttrNonNull;
- mutable std::unique_ptr<BugType> BTNullRefArg;
+ const BugType BTAttrNonNull{
+ this, "Argument with 'nonnull' attribute passed null", "API"};
+ const BugType BTNullRefArg{this, "Dereference of null pointer"};
public:
void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
@@ -136,10 +138,10 @@ void NonNullParamChecker::checkPreCall(const CallEvent &Call,
if (!DV)
continue;
- assert(!HasRefTypeParam || DV->getAs<Loc>());
+ assert(!HasRefTypeParam || isa<Loc>(*DV));
// Process the case when the argument is not a location.
- if (ExpectedToBeNonNull && !DV->getAs<Loc>()) {
+ if (ExpectedToBeNonNull && !isa<Loc>(*DV)) {
// If the argument is a union type, we want to handle a potential
// transparent_union GCC extension.
if (!ArgE)
@@ -161,7 +163,7 @@ void NonNullParamChecker::checkPreCall(const CallEvent &Call,
assert(++CSV->begin() == CSV->end());
// FIXME: Handle (some_union){ some_other_union_val }, which turns into
// a LazyCompoundVal inside a CompoundVal.
- if (!V.getAs<Loc>())
+ if (!isa<Loc>(V))
continue;
// Retrieve the corresponding expression.
@@ -278,13 +280,6 @@ std::unique_ptr<PathSensitiveBugReport>
NonNullParamChecker::genReportNullAttrNonNull(const ExplodedNode *ErrorNode,
const Expr *ArgE,
unsigned IdxOfArg) const {
- // Lazily allocate the BugType object if it hasn't already been
- // created. Ownership is transferred to the BugReporter object once
- // the BugReport is passed to 'EmitWarning'.
- if (!BTAttrNonNull)
- BTAttrNonNull.reset(new BugType(
- this, "Argument with 'nonnull' attribute passed null", "API"));
-
llvm::SmallString<256> SBuf;
llvm::raw_svector_ostream OS(SBuf);
OS << "Null pointer passed to "
@@ -292,7 +287,7 @@ NonNullParamChecker::genReportNullAttrNonNull(const ExplodedNode *ErrorNode,
<< " parameter expecting 'nonnull'";
auto R =
- std::make_unique<PathSensitiveBugReport>(*BTAttrNonNull, SBuf, ErrorNode);
+ std::make_unique<PathSensitiveBugReport>(BTAttrNonNull, SBuf, ErrorNode);
if (ArgE)
bugreporter::trackExpressionValue(ErrorNode, ArgE, *R);
@@ -302,11 +297,8 @@ NonNullParamChecker::genReportNullAttrNonNull(const ExplodedNode *ErrorNode,
std::unique_ptr<PathSensitiveBugReport>
NonNullParamChecker::genReportReferenceToNullPointer(
const ExplodedNode *ErrorNode, const Expr *ArgE) const {
- if (!BTNullRefArg)
- BTNullRefArg.reset(new BuiltinBug(this, "Dereference of null pointer"));
-
auto R = std::make_unique<PathSensitiveBugReport>(
- *BTNullRefArg, "Forming reference to null pointer", ErrorNode);
+ BTNullRefArg, "Forming reference to null pointer", ErrorNode);
if (ArgE) {
const Expr *ArgEDeref = bugreporter::getDerefExpr(ArgE);
if (!ArgEDeref)
@@ -314,7 +306,6 @@ NonNullParamChecker::genReportReferenceToNullPointer(
bugreporter::trackExpressionValue(ErrorNode, ArgEDeref, *R);
}
return R;
-
}
void ento::registerNonNullParamChecker(CheckerManager &mgr) {
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NonnullGlobalConstantsChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NonnullGlobalConstantsChecker.cpp
index c5437b16c688..72c6a869d225 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NonnullGlobalConstantsChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NonnullGlobalConstantsChecker.cpp
@@ -26,6 +26,7 @@
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include <optional>
using namespace clang;
using namespace ento;
@@ -77,7 +78,8 @@ void NonnullGlobalConstantsChecker::checkLocation(SVal location, bool isLoad,
if (isGlobalConstString(location)) {
SVal V = State->getSVal(location.castAs<Loc>());
- Optional<DefinedOrUnknownSVal> Constr = V.getAs<DefinedOrUnknownSVal>();
+ std::optional<DefinedOrUnknownSVal> Constr =
+ V.getAs<DefinedOrUnknownSVal>();
if (Constr) {
@@ -91,7 +93,7 @@ void NonnullGlobalConstantsChecker::checkLocation(SVal location, bool isLoad,
/// \param V loaded lvalue.
/// \return whether @c val is a string-like const global.
bool NonnullGlobalConstantsChecker::isGlobalConstString(SVal V) const {
- Optional<loc::MemRegionVal> RegionVal = V.getAs<loc::MemRegionVal>();
+ std::optional<loc::MemRegionVal> RegionVal = V.getAs<loc::MemRegionVal>();
if (!RegionVal)
return false;
auto *Region = dyn_cast<VarRegion>(RegionVal->getAsRegion());
@@ -109,17 +111,20 @@ bool NonnullGlobalConstantsChecker::isGlobalConstString(SVal V) const {
// Look through the typedefs.
while (const Type *T = Ty.getTypePtr()) {
- if (const auto *TT = dyn_cast<TypedefType>(T)) {
+ if (const auto *AT = dyn_cast<AttributedType>(T)) {
+ if (AT->getAttrKind() == attr::TypeNonNull)
+ return true;
+ Ty = AT->getModifiedType();
+ } else if (const auto *ET = dyn_cast<ElaboratedType>(T)) {
+ const auto *TT = dyn_cast<TypedefType>(ET->getNamedType());
+ if (!TT)
+ return false;
Ty = TT->getDecl()->getUnderlyingType();
// It is sufficient for any intermediate typedef
// to be classified const.
HasConst = HasConst || Ty.isConstQualified();
if (isNonnullType(Ty) && HasConst)
return true;
- } else if (const auto *AT = dyn_cast<AttributedType>(T)) {
- if (AT->getAttrKind() == attr::TypeNonNull)
- return true;
- Ty = AT->getModifiedType();
} else {
return false;
}
@@ -136,7 +141,7 @@ bool NonnullGlobalConstantsChecker::isNonnullType(QualType Ty) const {
if (auto *T = dyn_cast<ObjCObjectPointerType>(Ty)) {
return T->getInterfaceDecl() &&
T->getInterfaceDecl()->getIdentifier() == NSStringII;
- } else if (auto *T = dyn_cast<TypedefType>(Ty)) {
+ } else if (auto *T = Ty->getAs<TypedefType>()) {
IdentifierInfo* II = T->getDecl()->getIdentifier();
return II == CFStringRefII || II == CFBooleanRefII || II == CFNullRefII;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp
index fe8f7e7bf69e..06f1ad00eaf2 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp
@@ -26,13 +26,15 @@
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/Analysis/AnyCall.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/Path.h"
@@ -80,8 +82,9 @@ enum class ErrorKind : int {
class NullabilityChecker
: public Checker<check::Bind, check::PreCall, check::PreStmt<ReturnStmt>,
check::PostCall, check::PostStmt<ExplicitCastExpr>,
- check::PostObjCMessage, check::DeadSymbols,
- check::Location, check::Event<ImplicitNullDerefEvent>> {
+ check::PostObjCMessage, check::DeadSymbols, eval::Assume,
+ check::Location, check::Event<ImplicitNullDerefEvent>,
+ check::BeginFunction> {
public:
// If true, the checker will not diagnose nullabilility issues for calls
@@ -90,7 +93,7 @@ public:
// find warnings about nullability annotations that they have explicitly
// added themselves higher priority to fix than warnings on calls to system
// libraries.
- DefaultBool NoDiagnoseCallsToSystemHeaders;
+ bool NoDiagnoseCallsToSystemHeaders = false;
void checkBind(SVal L, SVal V, const Stmt *S, CheckerContext &C) const;
void checkPostStmt(const ExplicitCastExpr *CE, CheckerContext &C) const;
@@ -102,6 +105,9 @@ public:
void checkEvent(ImplicitNullDerefEvent Event) const;
void checkLocation(SVal Location, bool IsLoad, const Stmt *S,
CheckerContext &C) const;
+ void checkBeginFunction(CheckerContext &Ctx) const;
+ ProgramStateRef evalAssume(ProgramStateRef State, SVal Cond,
+ bool Assumption) const;
void printState(raw_ostream &Out, ProgramStateRef State, const char *NL,
const char *Sep) const override;
@@ -115,7 +121,7 @@ public:
CK_NumCheckKinds
};
- DefaultBool ChecksEnabled[CK_NumCheckKinds];
+ bool ChecksEnabled[CK_NumCheckKinds] = {false};
CheckerNameRef CheckNames[CK_NumCheckKinds];
mutable std::unique_ptr<BugType> BTs[CK_NumCheckKinds];
@@ -129,8 +135,8 @@ public:
// When set to false no nullability information will be tracked in
// NullabilityMap. It is possible to catch errors like passing a null pointer
// to a callee that expects nonnull argument without the information that is
- // stroed in the NullabilityMap. This is an optimization.
- DefaultBool NeedTracking;
+ // stored in the NullabilityMap. This is an optimization.
+ bool NeedTracking = false;
private:
class NullabilityBugVisitor : public BugReporterVisitor {
@@ -230,10 +236,41 @@ bool operator==(NullabilityState Lhs, NullabilityState Rhs) {
Lhs.getNullabilitySource() == Rhs.getNullabilitySource();
}
+// For the purpose of tracking historical property accesses, the key for lookup
+// is an object pointer (could be an instance or a class) paired with the unique
+// identifier for the property being invoked on that object.
+using ObjectPropPair = std::pair<const MemRegion *, const IdentifierInfo *>;
+
+// Metadata associated with the return value from a recorded property access.
+struct ConstrainedPropertyVal {
+ // This will reference the conjured return SVal for some call
+ // of the form [object property]
+ DefinedOrUnknownSVal Value;
+
+ // If the SVal has been determined to be nonnull, that is recorded here
+ bool isConstrainedNonnull;
+
+ ConstrainedPropertyVal(DefinedOrUnknownSVal SV)
+ : Value(SV), isConstrainedNonnull(false) {}
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ Value.Profile(ID);
+ ID.AddInteger(isConstrainedNonnull ? 1 : 0);
+ }
+};
+
+bool operator==(const ConstrainedPropertyVal &Lhs,
+ const ConstrainedPropertyVal &Rhs) {
+ return Lhs.Value == Rhs.Value &&
+ Lhs.isConstrainedNonnull == Rhs.isConstrainedNonnull;
+}
+
} // end anonymous namespace
REGISTER_MAP_WITH_PROGRAMSTATE(NullabilityMap, const MemRegion *,
NullabilityState)
+REGISTER_MAP_WITH_PROGRAMSTATE(PropertyAccessesMap, ObjectPropPair,
+ ConstrainedPropertyVal)
// We say "the nullability type invariant is violated" when a location with a
// non-null type contains NULL or a function with a non-null return type returns
@@ -273,6 +310,10 @@ static NullConstraint getNullConstraint(DefinedOrUnknownSVal Val,
return NullConstraint::Unknown;
}
+static bool isValidPointerType(QualType T) {
+ return T->isAnyPointerType() || T->isBlockPointerType();
+}
+
const SymbolicRegion *
NullabilityChecker::getTrackRegion(SVal Val, bool CheckSuperRegion) const {
if (!NeedTracking)
@@ -285,8 +326,11 @@ NullabilityChecker::getTrackRegion(SVal Val, bool CheckSuperRegion) const {
const MemRegion *Region = RegionSVal->getRegion();
if (CheckSuperRegion) {
- if (auto FieldReg = Region->getAs<FieldRegion>())
+ if (const SubRegion *FieldReg = Region->getAs<FieldRegion>()) {
+ if (const auto *ER = dyn_cast<ElementRegion>(FieldReg->getSuperRegion()))
+ FieldReg = ER;
return dyn_cast<SymbolicRegion>(FieldReg->getSuperRegion());
+ }
if (auto ElementReg = Region->getAs<ElementRegion>())
return dyn_cast<SymbolicRegion>(ElementReg->getSuperRegion());
}
@@ -455,15 +499,24 @@ void NullabilityChecker::checkDeadSymbols(SymbolReaper &SR,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
NullabilityMapTy Nullabilities = State->get<NullabilityMap>();
- for (NullabilityMapTy::iterator I = Nullabilities.begin(),
- E = Nullabilities.end();
- I != E; ++I) {
- const auto *Region = I->first->getAs<SymbolicRegion>();
+ for (const MemRegion *Reg : llvm::make_first_range(Nullabilities)) {
+ const auto *Region = Reg->getAs<SymbolicRegion>();
assert(Region && "Non-symbolic region is tracked.");
if (SR.isDead(Region->getSymbol())) {
- State = State->remove<NullabilityMap>(I->first);
+ State = State->remove<NullabilityMap>(Reg);
}
}
+
+ // When an object goes out of scope, we can free the history associated
+ // with any property accesses on that object
+ PropertyAccessesMapTy PropertyAccesses = State->get<PropertyAccessesMap>();
+ for (ObjectPropPair PropKey : llvm::make_first_range(PropertyAccesses)) {
+ const MemRegion *ReceiverRegion = PropKey.first;
+ if (!SR.isLiveRegion(ReceiverRegion)) {
+ State = State->remove<PropertyAccessesMap>(PropKey);
+ }
+ }
+
// When one of the nonnull arguments are constrained to be null, nullability
// preconditions are violated. It is not enough to check this only when we
// actually report an error, because at that time interesting symbols might be
@@ -510,6 +563,37 @@ void NullabilityChecker::checkEvent(ImplicitNullDerefEvent Event) const {
}
}
+void NullabilityChecker::checkBeginFunction(CheckerContext &C) const {
+ if (!C.inTopFrame())
+ return;
+
+ const LocationContext *LCtx = C.getLocationContext();
+ auto AbstractCall = AnyCall::forDecl(LCtx->getDecl());
+ if (!AbstractCall || AbstractCall->parameters().empty())
+ return;
+
+ ProgramStateRef State = C.getState();
+ for (const ParmVarDecl *Param : AbstractCall->parameters()) {
+ if (!isValidPointerType(Param->getType()))
+ continue;
+
+ Nullability RequiredNullability =
+ getNullabilityAnnotation(Param->getType());
+ if (RequiredNullability != Nullability::Nullable)
+ continue;
+
+ const VarRegion *ParamRegion = State->getRegion(Param, LCtx);
+ const MemRegion *ParamPointeeRegion =
+ State->getSVal(ParamRegion).getAsRegion();
+ if (!ParamPointeeRegion)
+ continue;
+
+ State = State->set<NullabilityMap>(ParamPointeeRegion,
+ NullabilityState(RequiredNullability));
+ }
+ C.addTransition(State);
+}
+
// Whenever we see a load from a typed memory region that's been annotated as
// 'nonnull', we want to trust the user on that and assume that it is is indeed
// non-null.
@@ -572,7 +656,7 @@ void NullabilityChecker::checkPreStmt(const ReturnStmt *S,
if (!RetExpr)
return;
- if (!RetExpr->getType()->isAnyPointerType())
+ if (!isValidPointerType(RetExpr->getType()))
return;
ProgramStateRef State = C.getState();
@@ -705,7 +789,7 @@ void NullabilityChecker::checkPreCall(const CallEvent &Call,
if (!ArgSVal)
continue;
- if (!Param->getType()->isAnyPointerType() &&
+ if (!isValidPointerType(Param->getType()) &&
!Param->getType()->isReferenceType())
continue;
@@ -714,7 +798,7 @@ void NullabilityChecker::checkPreCall(const CallEvent &Call,
Nullability RequiredNullability =
getNullabilityAnnotation(Param->getType());
Nullability ArgExprTypeLevelNullability =
- getNullabilityAnnotation(ArgExpr->getType());
+ getNullabilityAnnotation(lookThroughImplicitCasts(ArgExpr)->getType());
unsigned ParamIdx = Param->getFunctionScopeIndex() + 1;
@@ -792,7 +876,7 @@ void NullabilityChecker::checkPostCall(const CallEvent &Call,
if (!FuncType)
return;
QualType ReturnType = FuncType->getReturnType();
- if (!ReturnType->isAnyPointerType())
+ if (!isValidPointerType(ReturnType))
return;
ProgramStateRef State = C.getState();
if (State->get<InvariantViolated>())
@@ -806,7 +890,7 @@ void NullabilityChecker::checkPostCall(const CallEvent &Call,
// of CG calls.
const SourceManager &SM = C.getSourceManager();
StringRef FilePath = SM.getFilename(SM.getSpellingLoc(Decl->getBeginLoc()));
- if (llvm::sys::path::filename(FilePath).startswith("CG")) {
+ if (llvm::sys::path::filename(FilePath).starts_with("CG")) {
State = State->set<NullabilityMap>(Region, Nullability::Contradicted);
C.addTransition(State);
return;
@@ -815,6 +899,14 @@ void NullabilityChecker::checkPostCall(const CallEvent &Call,
const NullabilityState *TrackedNullability =
State->get<NullabilityMap>(Region);
+ // ObjCMessageExpr gets the actual type through
+ // Sema::getMessageSendResultType, instead of using the return type of
+ // MethodDecl directly. The final type is generated by considering the
+ // nullability of receiver and MethodDecl together. Thus, The type of
+ // ObjCMessageExpr is prefer.
+ if (const Expr *E = Call.getOriginExpr())
+ ReturnType = E->getType();
+
if (!TrackedNullability &&
getNullabilityAnnotation(ReturnType) == Nullability::Nullable) {
State = State->set<NullabilityMap>(Region, Nullability::Nullable);
@@ -851,6 +943,30 @@ static Nullability getReceiverNullability(const ObjCMethodCall &M,
return Nullability::Unspecified;
}
+// The return value of a property access is typically a temporary value which
+// will not be tracked in a persistent manner by the analyzer. We use
+// evalAssume() in order to immediately record constraints on those temporaries
+// at the time they are imposed (e.g. by a nil-check conditional).
+ProgramStateRef NullabilityChecker::evalAssume(ProgramStateRef State, SVal Cond,
+ bool Assumption) const {
+ PropertyAccessesMapTy PropertyAccesses = State->get<PropertyAccessesMap>();
+ for (auto [PropKey, PropVal] : PropertyAccesses) {
+ if (!PropVal.isConstrainedNonnull) {
+ ConditionTruthVal IsNonNull = State->isNonNull(PropVal.Value);
+ if (IsNonNull.isConstrainedTrue()) {
+ ConstrainedPropertyVal Replacement = PropVal;
+ Replacement.isConstrainedNonnull = true;
+ State = State->set<PropertyAccessesMap>(PropKey, Replacement);
+ } else if (IsNonNull.isConstrainedFalse()) {
+ // Space optimization: no point in tracking constrained-null cases
+ State = State->remove<PropertyAccessesMap>(PropKey);
+ }
+ }
+ }
+
+ return State;
+}
+
/// Calculate the nullability of the result of a message expr based on the
/// nullability of the receiver, the nullability of the return value, and the
/// constraints.
@@ -860,7 +976,7 @@ void NullabilityChecker::checkPostObjCMessage(const ObjCMethodCall &M,
if (!Decl)
return;
QualType RetType = Decl->getReturnType();
- if (!RetType->isAnyPointerType())
+ if (!isValidPointerType(RetType))
return;
ProgramStateRef State = C.getState();
@@ -876,7 +992,7 @@ void NullabilityChecker::checkPostObjCMessage(const ObjCMethodCall &M,
// In order to reduce the noise in the diagnostics generated by this checker,
// some framework and programming style based heuristics are used. These
// heuristics are for Cocoa APIs which have NS prefix.
- if (Name.startswith("NS")) {
+ if (Name.starts_with("NS")) {
// Developers rely on dynamic invariants such as an item should be available
// in a collection, or a collection is not empty often. Those invariants can
// not be inferred by any static analysis tool. To not to bother the users
@@ -907,7 +1023,7 @@ void NullabilityChecker::checkPostObjCMessage(const ObjCMethodCall &M,
// this class of methods reduced the emitted diagnostics by about 30% on
// some projects (and all of that was false positives).
if (Name.contains("String")) {
- for (auto Param : M.parameters()) {
+ for (auto *Param : M.parameters()) {
if (Param->getName() == "encoding") {
State = State->set<NullabilityMap>(ReturnRegion,
Nullability::Contradicted);
@@ -945,14 +1061,55 @@ void NullabilityChecker::checkPostObjCMessage(const ObjCMethodCall &M,
}
// No tracked information. Use static type information for return value.
- Nullability RetNullability = getNullabilityAnnotation(RetType);
+ Nullability RetNullability = getNullabilityAnnotation(Message->getType());
- // Properties might be computed. For this reason the static analyzer creates a
- // new symbol each time an unknown property is read. To avoid false pozitives
- // do not treat unknown properties as nullable, even when they explicitly
- // marked nullable.
- if (M.getMessageKind() == OCM_PropertyAccess && !C.wasInlined)
- RetNullability = Nullability::Nonnull;
+ // Properties might be computed, which means the property value could
+ // theoretically change between calls even in commonly-observed cases like
+ // this:
+ //
+ // if (foo.prop) { // ok, it's nonnull here...
+ // [bar doStuffWithNonnullVal:foo.prop]; // ...but what about
+ // here?
+ // }
+ //
+ // If the property is nullable-annotated, a naive analysis would lead to many
+ // false positives despite the presence of probably-correct nil-checks. To
+ // reduce the false positive rate, we maintain a history of the most recently
+ // observed property value. For each property access, if the prior value has
+ // been constrained to be not nil then we will conservatively assume that the
+ // next access can be inferred as nonnull.
+ if (RetNullability != Nullability::Nonnull &&
+ M.getMessageKind() == OCM_PropertyAccess && !C.wasInlined) {
+ bool LookupResolved = false;
+ if (const MemRegion *ReceiverRegion = getTrackRegion(M.getReceiverSVal())) {
+ if (IdentifierInfo *Ident = M.getSelector().getIdentifierInfoForSlot(0)) {
+ LookupResolved = true;
+ ObjectPropPair Key = std::make_pair(ReceiverRegion, Ident);
+ const ConstrainedPropertyVal *PrevPropVal =
+ State->get<PropertyAccessesMap>(Key);
+ if (PrevPropVal && PrevPropVal->isConstrainedNonnull) {
+ RetNullability = Nullability::Nonnull;
+ } else {
+ // If a previous property access was constrained as nonnull, we hold
+ // on to that constraint (effectively inferring that all subsequent
+ // accesses on that code path can be inferred as nonnull). If the
+ // previous property access was *not* constrained as nonnull, then
+ // let's throw it away in favor of keeping the SVal associated with
+ // this more recent access.
+ if (auto ReturnSVal =
+ M.getReturnValue().getAs<DefinedOrUnknownSVal>()) {
+ State = State->set<PropertyAccessesMap>(
+ Key, ConstrainedPropertyVal(*ReturnSVal));
+ }
+ }
+ }
+ }
+
+ if (!LookupResolved) {
+ // Fallback: err on the side of suppressing the false positive.
+ RetNullability = Nullability::Nonnull;
+ }
+ }
Nullability ComputedNullab = getMostNullable(RetNullability, SelfNullability);
if (ComputedNullab == Nullability::Nullable) {
@@ -973,9 +1130,9 @@ void NullabilityChecker::checkPostStmt(const ExplicitCastExpr *CE,
CheckerContext &C) const {
QualType OriginType = CE->getSubExpr()->getType();
QualType DestType = CE->getType();
- if (!OriginType->isAnyPointerType())
+ if (!isValidPointerType(OriginType))
return;
- if (!DestType->isAnyPointerType())
+ if (!isValidPointerType(DestType))
return;
ProgramStateRef State = C.getState();
@@ -1099,7 +1256,7 @@ void NullabilityChecker::checkBind(SVal L, SVal V, const Stmt *S,
return;
QualType LocType = TVR->getValueType();
- if (!LocType->isAnyPointerType())
+ if (!isValidPointerType(LocType))
return;
ProgramStateRef State = C.getState();
@@ -1221,9 +1378,9 @@ void NullabilityChecker::printState(raw_ostream &Out, ProgramStateRef State,
if (!State->get<InvariantViolated>())
Out << Sep << NL;
- for (NullabilityMapTy::iterator I = B.begin(), E = B.end(); I != E; ++I) {
- Out << I->first << " : ";
- I->second.print(Out);
+ for (auto [Region, State] : B) {
+ Out << Region << " : ";
+ State.print(Out);
Out << NL;
}
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp
index df01cc760e7e..f217520d8f4a 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp
@@ -143,7 +143,7 @@ void Callback::run(const MatchFinder::MatchResult &Result) {
else
OS << "Converting ";
- OS << "a pointer value of type '" << ObjT.getAsString() << "' to a ";
+ OS << "a pointer value of type '" << ObjT << "' to a ";
std::string EuphemismForPlain = "primitive";
std::string SuggestedApi = IsObjC ? (IsInteger ? "" : "-boolValue")
@@ -196,12 +196,10 @@ void NumberObjectConversionChecker::checkASTCodeBody(const Decl *D,
AnalysisManager &AM,
BugReporter &BR) const {
// Currently this matches CoreFoundation opaque pointer typedefs.
- auto CSuspiciousNumberObjectExprM =
- expr(ignoringParenImpCasts(
- expr(hasType(
- typedefType(hasDeclaration(anyOf(
- typedefDecl(hasName("CFNumberRef")),
- typedefDecl(hasName("CFBooleanRef")))))))
+ auto CSuspiciousNumberObjectExprM = expr(ignoringParenImpCasts(
+ expr(hasType(elaboratedType(namesType(typedefType(
+ hasDeclaration(anyOf(typedefDecl(hasName("CFNumberRef")),
+ typedefDecl(hasName("CFBooleanRef")))))))))
.bind("c_object")));
// Currently this matches XNU kernel number-object pointers.
@@ -240,8 +238,9 @@ void NumberObjectConversionChecker::checkASTCodeBody(const Decl *D,
// The .bind here is in order to compose the error message more accurately.
auto ObjCSuspiciousScalarBooleanTypeM =
- qualType(typedefType(hasDeclaration(
- typedefDecl(hasName("BOOL"))))).bind("objc_bool_type");
+ qualType(elaboratedType(namesType(
+ typedefType(hasDeclaration(typedefDecl(hasName("BOOL")))))))
+ .bind("objc_bool_type");
// The .bind here is in order to compose the error message more accurately.
auto SuspiciousScalarBooleanTypeM =
@@ -253,9 +252,9 @@ void NumberObjectConversionChecker::checkASTCodeBody(const Decl *D,
// for storing pointers.
auto SuspiciousScalarNumberTypeM =
qualType(hasCanonicalType(isInteger()),
- unless(typedefType(hasDeclaration(
- typedefDecl(matchesName("^::u?intptr_t$"))))))
- .bind("int_type");
+ unless(elaboratedType(namesType(typedefType(hasDeclaration(
+ typedefDecl(matchesName("^::u?intptr_t$"))))))))
+ .bind("int_type");
auto SuspiciousScalarTypeM =
qualType(anyOf(SuspiciousScalarBooleanTypeM,
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp
index 43af4bb14286..552c222a251a 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp
@@ -25,8 +25,10 @@ using namespace ento;
namespace {
class ObjCAtSyncChecker
: public Checker< check::PreStmt<ObjCAtSynchronizedStmt> > {
- mutable std::unique_ptr<BuiltinBug> BT_null;
- mutable std::unique_ptr<BuiltinBug> BT_undef;
+ const BugType BT_null{this, "Nil value used as mutex for @synchronized() "
+ "(no synchronization will occur)"};
+ const BugType BT_undef{this, "Uninitialized value used as mutex "
+ "for @synchronized"};
public:
void checkPreStmt(const ObjCAtSynchronizedStmt *S, CheckerContext &C) const;
@@ -41,13 +43,10 @@ void ObjCAtSyncChecker::checkPreStmt(const ObjCAtSynchronizedStmt *S,
SVal V = C.getSVal(Ex);
// Uninitialized value used for the mutex?
- if (V.getAs<UndefinedVal>()) {
+ if (isa<UndefinedVal>(V)) {
if (ExplodedNode *N = C.generateErrorNode()) {
- if (!BT_undef)
- BT_undef.reset(new BuiltinBug(this, "Uninitialized value used as mutex "
- "for @synchronized"));
auto report = std::make_unique<PathSensitiveBugReport>(
- *BT_undef, BT_undef->getDescription(), N);
+ BT_undef, BT_undef.getDescription(), N);
bugreporter::trackExpressionValue(N, Ex, *report);
C.emitReport(std::move(report));
}
@@ -66,12 +65,8 @@ void ObjCAtSyncChecker::checkPreStmt(const ObjCAtSynchronizedStmt *S,
// Generate an error node. This isn't a sink since
// a null mutex just means no synchronization occurs.
if (ExplodedNode *N = C.generateNonFatalErrorNode(nullState)) {
- if (!BT_null)
- BT_null.reset(new BuiltinBug(
- this, "Nil value used as mutex for @synchronized() "
- "(no synchronization will occur)"));
auto report = std::make_unique<PathSensitiveBugReport>(
- *BT_null, BT_null->getDescription(), N);
+ BT_null, BT_null.getDescription(), N);
bugreporter::trackExpressionValue(N, Ex, *report);
C.emitReport(std::move(report));
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCAutoreleaseWriteChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCAutoreleaseWriteChecker.cpp
index c8eab3288094..514f53b4804f 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCAutoreleaseWriteChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCAutoreleaseWriteChecker.cpp
@@ -98,11 +98,13 @@ private:
};
}
-static inline std::vector<llvm::StringRef> toRefs(std::vector<std::string> V) {
+static inline std::vector<llvm::StringRef>
+toRefs(const std::vector<std::string> &V) {
return std::vector<llvm::StringRef>(V.begin(), V.end());
}
-static decltype(auto) callsNames(std::vector<std::string> FunctionNames) {
+static decltype(auto)
+callsNames(const std::vector<std::string> &FunctionNames) {
return callee(functionDecl(hasAnyName(toRefs(FunctionNames))));
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp
index 8428b2294ba6..2b008d1c775a 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp
@@ -72,7 +72,7 @@ class WalkAST : public StmtVisitor<WalkAST> {
public:
WalkAST(BugReporter &br, const CheckerBase *checker, AnalysisDeclContext *ac)
: BR(br), Checker(checker), AC(ac), ASTC(AC->getASTContext()),
- PtrWidth(ASTC.getTargetInfo().getPointerWidth(0)) {}
+ PtrWidth(ASTC.getTargetInfo().getPointerWidth(LangAS::Default)) {}
// Statement visitor methods.
void VisitChildren(Stmt *S);
@@ -135,9 +135,9 @@ void WalkAST::VisitCallExpr(CallExpr *CE) {
llvm::raw_svector_ostream Os(Buf);
// Use "second" and "third" since users will expect 1-based indexing
// for parameter names when mentioned in prose.
- Os << " The "<< ((ArgNum == 1) ? "second" : "third") << " argument to '"
- << Name << "' must be a C array of pointer-sized values, not '"
- << Arg->getType().getAsString() << "'";
+ Os << " The " << ((ArgNum == 1) ? "second" : "third") << " argument to '"
+ << Name << "' must be a C array of pointer-sized values, not '"
+ << Arg->getType() << "'";
PathDiagnosticLocation CELoc =
PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp
index 13985af76b00..28e88245ca95 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp
@@ -30,12 +30,7 @@ namespace {
class ObjCContainersChecker : public Checker< check::PreStmt<CallExpr>,
check::PostStmt<CallExpr>,
check::PointerEscape> {
- mutable std::unique_ptr<BugType> BT;
- inline void initBugType() const {
- if (!BT)
- BT.reset(new BugType(this, "CFArray API",
- categories::CoreFoundationObjectiveC));
- }
+ const BugType BT{this, "CFArray API", categories::CoreFoundationObjectiveC};
inline SymbolRef getArraySym(const Expr *E, CheckerContext &C) const {
SVal ArrayRef = C.getSVal(E);
@@ -47,9 +42,6 @@ class ObjCContainersChecker : public Checker< check::PreStmt<CallExpr>,
CheckerContext &C) const;
public:
- /// A tag to id this checker.
- static void *getTag() { static int Tag; return &Tag; }
-
void checkPostStmt(const CallExpr *CE, CheckerContext &C) const;
void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
ProgramStateRef checkPointerEscape(ProgramStateRef State,
@@ -137,15 +129,15 @@ void ObjCContainersChecker::checkPreStmt(const CallExpr *CE,
// Now, check if 'Idx in [0, Size-1]'.
const QualType T = IdxExpr->getType();
- ProgramStateRef StInBound = State->assumeInBound(Idx, *Size, true, T);
- ProgramStateRef StOutBound = State->assumeInBound(Idx, *Size, false, T);
+ ProgramStateRef StInBound, StOutBound;
+ std::tie(StInBound, StOutBound) = State->assumeInBoundDual(Idx, *Size, T);
if (StOutBound && !StInBound) {
ExplodedNode *N = C.generateErrorNode(StOutBound);
if (!N)
return;
- initBugType();
+
auto R = std::make_unique<PathSensitiveBugReport>(
- *BT, "Index is out of bounds", N);
+ BT, "Index is out of bounds", N);
R->addRange(IdxExpr->getSourceRange());
bugreporter::trackExpressionValue(N, IdxExpr, *R,
{bugreporter::TrackingKind::Thorough,
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp
index 35a600f2d7b8..598b368e74d4 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp
@@ -64,7 +64,7 @@ private:
class ObjCSuperCallChecker : public Checker<
check::ASTDecl<ObjCImplementationDecl> > {
public:
- ObjCSuperCallChecker() : IsInitialized(false) {}
+ ObjCSuperCallChecker() = default;
void checkASTDecl(const ObjCImplementationDecl *D, AnalysisManager &Mgr,
BugReporter &BR) const;
@@ -75,7 +75,7 @@ private:
void fillSelectors(ASTContext &Ctx, ArrayRef<SelectorDescriptor> Sel,
StringRef ClassName) const;
mutable llvm::StringMap<llvm::SmallPtrSet<Selector, 16>> SelectorsForClass;
- mutable bool IsInitialized;
+ mutable bool IsInitialized = false;
};
}
@@ -103,9 +103,7 @@ void ObjCSuperCallChecker::fillSelectors(ASTContext &Ctx,
llvm::SmallPtrSet<Selector, 16> &ClassSelectors =
SelectorsForClass[ClassName];
// Fill the Selectors SmallSet with all selectors we want to check.
- for (ArrayRef<SelectorDescriptor>::iterator I = Sel.begin(), E = Sel.end();
- I != E; ++I) {
- SelectorDescriptor Descriptor = *I;
+ for (SelectorDescriptor Descriptor : Sel) {
assert(Descriptor.ArgumentCount <= 1); // No multi-argument selectors yet.
// Get the selector.
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCPropertyChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCPropertyChecker.cpp
index 4636fd160511..08ad6877cbe6 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCPropertyChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCPropertyChecker.cpp
@@ -50,7 +50,7 @@ void ObjCPropertyChecker::checkCopyMutable(const ObjCPropertyDecl *D,
const std::string &PropTypeName(T->getPointeeType().getCanonicalType()
.getUnqualifiedType()
.getAsString());
- if (!StringRef(PropTypeName).startswith("NSMutable"))
+ if (!StringRef(PropTypeName).starts_with("NSMutable"))
return;
const ObjCImplDecl *ImplD = nullptr;
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp
index 17d3c042ac40..217c46451f80 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp
@@ -61,13 +61,13 @@ class ObjCSelfInitChecker : public Checker< check::PostObjCMessage,
check::PostCall,
check::Location,
check::Bind > {
- mutable std::unique_ptr<BugType> BT;
+ const BugType BT{this, "Missing \"self = [(super or self) init...]\"",
+ categories::CoreFoundationObjectiveC};
void checkForInvalidSelf(const Expr *E, CheckerContext &C,
const char *errorStr) const;
public:
- ObjCSelfInitChecker() {}
void checkPostObjCMessage(const ObjCMethodCall &Msg, CheckerContext &C) const;
void checkPostStmt(const ObjCIvarRefExpr *E, CheckerContext &C) const;
void checkPreStmt(const ReturnStmt *S, CheckerContext &C) const;
@@ -94,19 +94,19 @@ enum SelfFlagEnum {
};
}
-REGISTER_MAP_WITH_PROGRAMSTATE(SelfFlag, SymbolRef, unsigned)
+REGISTER_MAP_WITH_PROGRAMSTATE(SelfFlag, SymbolRef, SelfFlagEnum)
REGISTER_TRAIT_WITH_PROGRAMSTATE(CalledInit, bool)
/// A call receiving a reference to 'self' invalidates the object that
/// 'self' contains. This keeps the "self flags" assigned to the 'self'
/// object before the call so we can assign them to the new object that 'self'
/// points to after the call.
-REGISTER_TRAIT_WITH_PROGRAMSTATE(PreCallSelfFlags, unsigned)
+REGISTER_TRAIT_WITH_PROGRAMSTATE(PreCallSelfFlags, SelfFlagEnum)
static SelfFlagEnum getSelfFlags(SVal val, ProgramStateRef state) {
if (SymbolRef sym = val.getAsSymbol())
- if (const unsigned *attachedFlags = state->get<SelfFlag>(sym))
- return (SelfFlagEnum)*attachedFlags;
+ if (const SelfFlagEnum *attachedFlags = state->get<SelfFlag>(sym))
+ return *attachedFlags;
return SelfFlag_None;
}
@@ -118,7 +118,8 @@ static void addSelfFlag(ProgramStateRef state, SVal val,
SelfFlagEnum flag, CheckerContext &C) {
// We tag the symbol that the SVal wraps.
if (SymbolRef sym = val.getAsSymbol()) {
- state = state->set<SelfFlag>(sym, getSelfFlags(val, state) | flag);
+ state = state->set<SelfFlag>(sym,
+ SelfFlagEnum(getSelfFlags(val, state) | flag));
C.addTransition(state);
}
}
@@ -156,10 +157,7 @@ void ObjCSelfInitChecker::checkForInvalidSelf(const Expr *E, CheckerContext &C,
if (!N)
return;
- if (!BT)
- BT.reset(new BugType(this, "Missing \"self = [(super or self) init...]\"",
- categories::CoreFoundationObjectiveC));
- C.emitReport(std::make_unique<PathSensitiveBugReport>(*BT, errorStr, N));
+ C.emitReport(std::make_unique<PathSensitiveBugReport>(BT, errorStr, N));
}
void ObjCSelfInitChecker::checkPostObjCMessage(const ObjCMethodCall &Msg,
@@ -251,11 +249,12 @@ void ObjCSelfInitChecker::checkPreCall(const CallEvent &CE,
for (unsigned i = 0; i < NumArgs; ++i) {
SVal argV = CE.getArgSVal(i);
if (isSelfVar(argV, C)) {
- unsigned selfFlags = getSelfFlags(state->getSVal(argV.castAs<Loc>()), C);
+ SelfFlagEnum selfFlags =
+ getSelfFlags(state->getSVal(argV.castAs<Loc>()), C);
C.addTransition(state->set<PreCallSelfFlags>(selfFlags));
return;
} else if (hasSelfFlag(argV, SelfFlag_Self, C)) {
- unsigned selfFlags = getSelfFlags(argV, C);
+ SelfFlagEnum selfFlags = getSelfFlags(argV, C);
C.addTransition(state->set<PreCallSelfFlags>(selfFlags));
return;
}
@@ -270,7 +269,7 @@ void ObjCSelfInitChecker::checkPostCall(const CallEvent &CE,
return;
ProgramStateRef state = C.getState();
- SelfFlagEnum prevFlags = (SelfFlagEnum)state->get<PreCallSelfFlags>();
+ SelfFlagEnum prevFlags = state->get<PreCallSelfFlags>();
if (!prevFlags)
return;
state = state->remove<PreCallSelfFlags>();
@@ -338,7 +337,7 @@ void ObjCSelfInitChecker::printState(raw_ostream &Out, ProgramStateRef State,
const char *NL, const char *Sep) const {
SelfFlagTy FlagMap = State->get<SelfFlag>();
bool DidCallInit = State->get<CalledInit>();
- SelfFlagEnum PreCallFlags = (SelfFlagEnum)State->get<PreCallSelfFlags>();
+ SelfFlagEnum PreCallFlags = State->get<PreCallSelfFlags>();
if (FlagMap.isEmpty() && !DidCallInit && !PreCallFlags)
return;
@@ -360,18 +359,17 @@ void ObjCSelfInitChecker::printState(raw_ostream &Out, ProgramStateRef State,
}
Out << NL;
- for (SelfFlagTy::iterator I = FlagMap.begin(), E = FlagMap.end();
- I != E; ++I) {
- Out << I->first << " : ";
+ for (auto [Sym, Flag] : FlagMap) {
+ Out << Sym << " : ";
- if (I->second == SelfFlag_None)
+ if (Flag == SelfFlag_None)
Out << "none";
- if (I->second & SelfFlag_Self)
+ if (Flag & SelfFlag_Self)
Out << "self variable";
- if (I->second & SelfFlag_InitRes) {
- if (I->second != SelfFlag_InitRes)
+ if (Flag & SelfFlag_InitRes) {
+ if (Flag != SelfFlag_InitRes)
Out << " | ";
Out << "result of init method";
}
@@ -411,7 +409,7 @@ static bool isSelfVar(SVal location, CheckerContext &C) {
AnalysisDeclContext *analCtx = C.getCurrentAnalysisDeclContext();
if (!analCtx->getSelfDecl())
return false;
- if (!location.getAs<loc::MemRegionVal>())
+ if (!isa<loc::MemRegionVal>(location))
return false;
loc::MemRegionVal MRV = location.castAs<loc::MemRegionVal>();
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp
index 3547b7bb61a2..eb40711812e1 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp
@@ -26,18 +26,19 @@ namespace {
class ObjCSuperDeallocChecker
: public Checker<check::PostObjCMessage, check::PreObjCMessage,
check::PreCall, check::Location> {
-
- mutable IdentifierInfo *IIdealloc, *IINSObject;
+ mutable IdentifierInfo *IIdealloc = nullptr;
+ mutable IdentifierInfo *IINSObject = nullptr;
mutable Selector SELdealloc;
- std::unique_ptr<BugType> DoubleSuperDeallocBugType;
+ const BugType DoubleSuperDeallocBugType{
+ this, "[super dealloc] should not be called more than once",
+ categories::CoreFoundationObjectiveC};
void initIdentifierInfoAndSelectors(ASTContext &Ctx) const;
bool isSuperDeallocMessage(const ObjCMethodCall &M) const;
public:
- ObjCSuperDeallocChecker();
void checkPostObjCMessage(const ObjCMethodCall &M, CheckerContext &C) const;
void checkPreObjCMessage(const ObjCMethodCall &M, CheckerContext &C) const;
@@ -188,7 +189,7 @@ void ObjCSuperDeallocChecker::reportUseAfterDealloc(SymbolRef Sym,
Desc = "Use of 'self' after it has been deallocated";
// Generate the report.
- auto BR = std::make_unique<PathSensitiveBugReport>(*DoubleSuperDeallocBugType,
+ auto BR = std::make_unique<PathSensitiveBugReport>(DoubleSuperDeallocBugType,
Desc, ErrNode);
BR->addRange(S->getSourceRange());
BR->addVisitor(std::make_unique<SuperDeallocBRVisitor>(Sym));
@@ -213,14 +214,6 @@ void ObjCSuperDeallocChecker::diagnoseCallArguments(const CallEvent &CE,
}
}
-ObjCSuperDeallocChecker::ObjCSuperDeallocChecker()
- : IIdealloc(nullptr), IINSObject(nullptr) {
-
- DoubleSuperDeallocBugType.reset(
- new BugType(this, "[super dealloc] should not be called more than once",
- categories::CoreFoundationObjectiveC));
-}
-
void
ObjCSuperDeallocChecker::initIdentifierInfoAndSelectors(ASTContext &Ctx) const {
if (IIdealloc)
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp
index c9828c36a06a..1c2d84254d46 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp
@@ -12,16 +12,17 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
-#include "clang/Analysis/PathDiagnostic.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprObjC.h"
+#include "clang/Analysis/PathDiagnostic.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/SourceManager.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "llvm/ADT/STLExtras.h"
using namespace clang;
using namespace ento;
@@ -48,9 +49,7 @@ static void Scan(IvarUsageMap& M, const Stmt *S) {
}
if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(S))
- for (PseudoObjectExpr::const_semantics_iterator
- i = POE->semantics_begin(), e = POE->semantics_end(); i != e; ++i) {
- const Expr *sub = *i;
+ for (const Expr *sub : POE->semantics()) {
if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(sub))
sub = OVE->getSourceExpr();
Scan(M, sub);
@@ -134,8 +133,8 @@ static void checkObjCUnusedIvar(const ObjCImplementationDecl *D,
// Any potentially unused ivars?
bool hasUnused = false;
- for (IvarUsageMap::iterator I = M.begin(), E = M.end(); I!=E; ++I)
- if (I->second == Unused) {
+ for (IVarState State : llvm::make_second_range(M))
+ if (State == Unused) {
hasUnused = true;
break;
}
@@ -152,16 +151,16 @@ static void checkObjCUnusedIvar(const ObjCImplementationDecl *D,
Scan(M, D->getDeclContext(), SM.getFileID(D->getLocation()), SM);
// Find ivars that are unused.
- for (IvarUsageMap::iterator I = M.begin(), E = M.end(); I!=E; ++I)
- if (I->second == Unused) {
+ for (auto [Ivar, State] : M)
+ if (State == Unused) {
std::string sbuf;
llvm::raw_string_ostream os(sbuf);
- os << "Instance variable '" << *I->first << "' in class '" << *ID
+ os << "Instance variable '" << *Ivar << "' in class '" << *ID
<< "' is never used by the methods in its @implementation "
"(although it may be used by category methods).";
PathDiagnosticLocation L =
- PathDiagnosticLocation::create(I->first, BR.getSourceManager());
+ PathDiagnosticLocation::create(Ivar, BR.getSourceManager());
BR.EmitBasicReport(D, Checker, "Unused instance variable", "Optimization",
os.str(), L);
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
index 40472ccfe7e6..eee9449f3180 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
@@ -32,7 +32,7 @@ using namespace ento;
namespace {
class PaddingChecker : public Checker<check::ASTDecl<TranslationUnitDecl>> {
private:
- mutable std::unique_ptr<BugType> PaddingBug;
+ const BugType PaddingBug{this, "Excessive Padding", "Performance"};
mutable BugReporter *BR;
public:
@@ -182,7 +182,7 @@ public:
return false;
};
- if (std::any_of(RD->field_begin(), RD->field_end(), IsTrickyField))
+ if (llvm::any_of(RD->fields(), IsTrickyField))
return true;
return false;
}
@@ -273,7 +273,7 @@ public:
SmallVector<const FieldDecl *, 20> OptimalFieldsOrder;
while (!Fields.empty()) {
unsigned TrailingZeros =
- llvm::countTrailingZeros((unsigned long long)NewOffset.getQuantity());
+ llvm::countr_zero((unsigned long long)NewOffset.getQuantity());
// If NewOffset is zero, then countTrailingZeros will be 64. Shifting
// 64 will overflow our unsigned long long. Shifting 63 will turn
// our long long (and CharUnits internal type) negative. So shift 62.
@@ -310,10 +310,6 @@ public:
void reportRecord(
const RecordDecl *RD, CharUnits BaselinePad, CharUnits OptimalPad,
const SmallVector<const FieldDecl *, 20> &OptimalFieldsOrder) const {
- if (!PaddingBug)
- PaddingBug =
- std::make_unique<BugType>(this, "Excessive Padding", "Performance");
-
SmallString<100> Buf;
llvm::raw_svector_ostream Os(Buf);
Os << "Excessive padding in '";
@@ -332,17 +328,16 @@ public:
}
Os << " (" << BaselinePad.getQuantity() << " padding bytes, where "
- << OptimalPad.getQuantity() << " is optimal). \n"
- << "Optimal fields order: \n";
+ << OptimalPad.getQuantity() << " is optimal). "
+ << "Optimal fields order: ";
for (const auto *FD : OptimalFieldsOrder)
- Os << FD->getName() << ", \n";
+ Os << FD->getName() << ", ";
Os << "consider reordering the fields or adding explicit padding "
"members.";
PathDiagnosticLocation CELoc =
PathDiagnosticLocation::create(RD, BR->getSourceManager());
- auto Report =
- std::make_unique<BasicBugReport>(*PaddingBug, Os.str(), CELoc);
+ auto Report = std::make_unique<BasicBugReport>(PaddingBug, Os.str(), CELoc);
Report->setDeclWithIssue(RD);
Report->addRange(RD->getSourceRange());
BR->emitReport(std::move(Report));
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp
index d3e2849a0ce6..1141f07428b4 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp
@@ -11,13 +11,14 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "llvm/ADT/StringRef.h"
using namespace clang;
using namespace ento;
@@ -55,8 +56,8 @@ class PointerArithChecker
bool PointedNeeded = false) const;
void initAllocIdentifiers(ASTContext &C) const;
- mutable std::unique_ptr<BuiltinBug> BT_pointerArith;
- mutable std::unique_ptr<BuiltinBug> BT_polyArray;
+ const BugType BT_pointerArith{this, "Dangerous pointer arithmetic"};
+ const BugType BT_polyArray{this, "Dangerous pointer arithmetic"};
mutable llvm::SmallSet<IdentifierInfo *, 8> AllocFunctions;
public:
@@ -79,10 +80,9 @@ void PointerArithChecker::checkDeadSymbols(SymbolReaper &SR,
// see http://reviews.llvm.org/D14203 for further information.
/*ProgramStateRef State = C.getState();
RegionStateTy RegionStates = State->get<RegionState>();
- for (RegionStateTy::iterator I = RegionStates.begin(), E = RegionStates.end();
- I != E; ++I) {
- if (!SR.isLiveRegion(I->first))
- State = State->remove<RegionState>(I->first);
+ for (const MemRegion *Reg: llvm::make_first_range(RegionStates)) {
+ if (!SR.isLiveRegion(Reg))
+ State = State->remove<RegionState>(Reg);
}
C.addTransition(State);*/
}
@@ -168,13 +168,10 @@ void PointerArithChecker::reportPointerArithMisuse(const Expr *E,
if (!IsPolymorphic)
return;
if (ExplodedNode *N = C.generateNonFatalErrorNode()) {
- if (!BT_polyArray)
- BT_polyArray.reset(new BuiltinBug(
- this, "Dangerous pointer arithmetic",
- "Pointer arithmetic on a pointer to base class is dangerous "
- "because derived and base class may have different size."));
- auto R = std::make_unique<PathSensitiveBugReport>(
- *BT_polyArray, BT_polyArray->getDescription(), N);
+ constexpr llvm::StringLiteral Msg =
+ "Pointer arithmetic on a pointer to base class is dangerous "
+ "because derived and base class may have different size.";
+ auto R = std::make_unique<PathSensitiveBugReport>(BT_polyArray, Msg, N);
R->addRange(E->getSourceRange());
R->markInteresting(ArrayRegion);
C.emitReport(std::move(R));
@@ -191,13 +188,10 @@ void PointerArithChecker::reportPointerArithMisuse(const Expr *E,
return;
if (ExplodedNode *N = C.generateNonFatalErrorNode()) {
- if (!BT_pointerArith)
- BT_pointerArith.reset(new BuiltinBug(this, "Dangerous pointer arithmetic",
- "Pointer arithmetic on non-array "
- "variables relies on memory layout, "
- "which is dangerous."));
- auto R = std::make_unique<PathSensitiveBugReport>(
- *BT_pointerArith, BT_pointerArith->getDescription(), N);
+ constexpr llvm::StringLiteral Msg =
+ "Pointer arithmetic on non-array variables relies on memory layout, "
+ "which is dangerous.";
+ auto R = std::make_unique<PathSensitiveBugReport>(BT_pointerArith, Msg, N);
R->addRange(SR);
R->markInteresting(Region);
C.emitReport(std::move(R));
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp
index 81c19d9a0940..2438cf30b39b 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp
@@ -17,6 +17,7 @@
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "llvm/ADT/StringRef.h"
using namespace clang;
using namespace ento;
@@ -24,7 +25,7 @@ using namespace ento;
namespace {
class PointerSubChecker
: public Checker< check::PreStmt<BinaryOperator> > {
- mutable std::unique_ptr<BuiltinBug> BT;
+ const BugType BT{this, "Pointer subtraction"};
public:
void checkPreStmt(const BinaryOperator *B, CheckerContext &C) const;
@@ -58,13 +59,10 @@ void PointerSubChecker::checkPreStmt(const BinaryOperator *B,
return;
if (ExplodedNode *N = C.generateNonFatalErrorNode()) {
- if (!BT)
- BT.reset(
- new BuiltinBug(this, "Pointer subtraction",
- "Subtraction of two pointers that do not point to "
- "the same memory chunk may cause incorrect result."));
- auto R =
- std::make_unique<PathSensitiveBugReport>(*BT, BT->getDescription(), N);
+ constexpr llvm::StringLiteral Msg =
+ "Subtraction of two pointers that do not point to the same memory "
+ "chunk may cause incorrect result.";
+ auto R = std::make_unique<PathSensitiveBugReport>(BT, Msg, N);
R->addRange(B->getSourceRange());
C.emitReport(std::move(R));
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
index ee71b55a39e6..fa8572cf85ed 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
@@ -21,6 +21,7 @@
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
@@ -77,7 +78,7 @@ public:
CK_C11LockChecker,
CK_NumCheckKinds
};
- DefaultBool ChecksEnabled[CK_NumCheckKinds];
+ bool ChecksEnabled[CK_NumCheckKinds] = {false};
CheckerNameRef CheckNames[CK_NumCheckKinds];
private:
@@ -86,7 +87,7 @@ private:
CheckerKind CheckKind) const;
CallDescriptionMap<FnCheck> PThreadCallbacks = {
// Init.
- {{"pthread_mutex_init", 2}, &PthreadLockChecker::InitAnyLock},
+ {{{"pthread_mutex_init"}, 2}, &PthreadLockChecker::InitAnyLock},
// TODO: pthread_rwlock_init(2 arguments).
// TODO: lck_mtx_init(3 arguments).
// TODO: lck_mtx_alloc_init(2 arguments) => returns the mutex.
@@ -94,74 +95,74 @@ private:
// TODO: lck_rw_alloc_init(2 arguments) => returns the mutex.
// Acquire.
- {{"pthread_mutex_lock", 1}, &PthreadLockChecker::AcquirePthreadLock},
- {{"pthread_rwlock_rdlock", 1}, &PthreadLockChecker::AcquirePthreadLock},
- {{"pthread_rwlock_wrlock", 1}, &PthreadLockChecker::AcquirePthreadLock},
- {{"lck_mtx_lock", 1}, &PthreadLockChecker::AcquireXNULock},
- {{"lck_rw_lock_exclusive", 1}, &PthreadLockChecker::AcquireXNULock},
- {{"lck_rw_lock_shared", 1}, &PthreadLockChecker::AcquireXNULock},
+ {{{"pthread_mutex_lock"}, 1}, &PthreadLockChecker::AcquirePthreadLock},
+ {{{"pthread_rwlock_rdlock"}, 1}, &PthreadLockChecker::AcquirePthreadLock},
+ {{{"pthread_rwlock_wrlock"}, 1}, &PthreadLockChecker::AcquirePthreadLock},
+ {{{"lck_mtx_lock"}, 1}, &PthreadLockChecker::AcquireXNULock},
+ {{{"lck_rw_lock_exclusive"}, 1}, &PthreadLockChecker::AcquireXNULock},
+ {{{"lck_rw_lock_shared"}, 1}, &PthreadLockChecker::AcquireXNULock},
// Try.
- {{"pthread_mutex_trylock", 1}, &PthreadLockChecker::TryPthreadLock},
- {{"pthread_rwlock_tryrdlock", 1}, &PthreadLockChecker::TryPthreadLock},
- {{"pthread_rwlock_trywrlock", 1}, &PthreadLockChecker::TryPthreadLock},
- {{"lck_mtx_try_lock", 1}, &PthreadLockChecker::TryXNULock},
- {{"lck_rw_try_lock_exclusive", 1}, &PthreadLockChecker::TryXNULock},
- {{"lck_rw_try_lock_shared", 1}, &PthreadLockChecker::TryXNULock},
+ {{{"pthread_mutex_trylock"}, 1}, &PthreadLockChecker::TryPthreadLock},
+ {{{"pthread_rwlock_tryrdlock"}, 1}, &PthreadLockChecker::TryPthreadLock},
+ {{{"pthread_rwlock_trywrlock"}, 1}, &PthreadLockChecker::TryPthreadLock},
+ {{{"lck_mtx_try_lock"}, 1}, &PthreadLockChecker::TryXNULock},
+ {{{"lck_rw_try_lock_exclusive"}, 1}, &PthreadLockChecker::TryXNULock},
+ {{{"lck_rw_try_lock_shared"}, 1}, &PthreadLockChecker::TryXNULock},
// Release.
- {{"pthread_mutex_unlock", 1}, &PthreadLockChecker::ReleaseAnyLock},
- {{"pthread_rwlock_unlock", 1}, &PthreadLockChecker::ReleaseAnyLock},
- {{"lck_mtx_unlock", 1}, &PthreadLockChecker::ReleaseAnyLock},
- {{"lck_rw_unlock_exclusive", 1}, &PthreadLockChecker::ReleaseAnyLock},
- {{"lck_rw_unlock_shared", 1}, &PthreadLockChecker::ReleaseAnyLock},
- {{"lck_rw_done", 1}, &PthreadLockChecker::ReleaseAnyLock},
+ {{{"pthread_mutex_unlock"}, 1}, &PthreadLockChecker::ReleaseAnyLock},
+ {{{"pthread_rwlock_unlock"}, 1}, &PthreadLockChecker::ReleaseAnyLock},
+ {{{"lck_mtx_unlock"}, 1}, &PthreadLockChecker::ReleaseAnyLock},
+ {{{"lck_rw_unlock_exclusive"}, 1}, &PthreadLockChecker::ReleaseAnyLock},
+ {{{"lck_rw_unlock_shared"}, 1}, &PthreadLockChecker::ReleaseAnyLock},
+ {{{"lck_rw_done"}, 1}, &PthreadLockChecker::ReleaseAnyLock},
// Destroy.
- {{"pthread_mutex_destroy", 1}, &PthreadLockChecker::DestroyPthreadLock},
- {{"lck_mtx_destroy", 2}, &PthreadLockChecker::DestroyXNULock},
+ {{{"pthread_mutex_destroy"}, 1}, &PthreadLockChecker::DestroyPthreadLock},
+ {{{"lck_mtx_destroy"}, 2}, &PthreadLockChecker::DestroyXNULock},
// TODO: pthread_rwlock_destroy(1 argument).
// TODO: lck_rw_destroy(2 arguments).
};
CallDescriptionMap<FnCheck> FuchsiaCallbacks = {
// Init.
- {{"spin_lock_init", 1}, &PthreadLockChecker::InitAnyLock},
+ {{{"spin_lock_init"}, 1}, &PthreadLockChecker::InitAnyLock},
// Acquire.
- {{"spin_lock", 1}, &PthreadLockChecker::AcquirePthreadLock},
- {{"spin_lock_save", 3}, &PthreadLockChecker::AcquirePthreadLock},
- {{"sync_mutex_lock", 1}, &PthreadLockChecker::AcquirePthreadLock},
- {{"sync_mutex_lock_with_waiter", 1},
+ {{{"spin_lock"}, 1}, &PthreadLockChecker::AcquirePthreadLock},
+ {{{"spin_lock_save"}, 3}, &PthreadLockChecker::AcquirePthreadLock},
+ {{{"sync_mutex_lock"}, 1}, &PthreadLockChecker::AcquirePthreadLock},
+ {{{"sync_mutex_lock_with_waiter"}, 1},
&PthreadLockChecker::AcquirePthreadLock},
// Try.
- {{"spin_trylock", 1}, &PthreadLockChecker::TryFuchsiaLock},
- {{"sync_mutex_trylock", 1}, &PthreadLockChecker::TryFuchsiaLock},
- {{"sync_mutex_timedlock", 2}, &PthreadLockChecker::TryFuchsiaLock},
+ {{{"spin_trylock"}, 1}, &PthreadLockChecker::TryFuchsiaLock},
+ {{{"sync_mutex_trylock"}, 1}, &PthreadLockChecker::TryFuchsiaLock},
+ {{{"sync_mutex_timedlock"}, 2}, &PthreadLockChecker::TryFuchsiaLock},
// Release.
- {{"spin_unlock", 1}, &PthreadLockChecker::ReleaseAnyLock},
- {{"spin_unlock_restore", 3}, &PthreadLockChecker::ReleaseAnyLock},
- {{"sync_mutex_unlock", 1}, &PthreadLockChecker::ReleaseAnyLock},
+ {{{"spin_unlock"}, 1}, &PthreadLockChecker::ReleaseAnyLock},
+ {{{"spin_unlock_restore"}, 3}, &PthreadLockChecker::ReleaseAnyLock},
+ {{{"sync_mutex_unlock"}, 1}, &PthreadLockChecker::ReleaseAnyLock},
};
CallDescriptionMap<FnCheck> C11Callbacks = {
// Init.
- {{"mtx_init", 2}, &PthreadLockChecker::InitAnyLock},
+ {{{"mtx_init"}, 2}, &PthreadLockChecker::InitAnyLock},
// Acquire.
- {{"mtx_lock", 1}, &PthreadLockChecker::AcquirePthreadLock},
+ {{{"mtx_lock"}, 1}, &PthreadLockChecker::AcquirePthreadLock},
// Try.
- {{"mtx_trylock", 1}, &PthreadLockChecker::TryC11Lock},
- {{"mtx_timedlock", 2}, &PthreadLockChecker::TryC11Lock},
+ {{{"mtx_trylock"}, 1}, &PthreadLockChecker::TryC11Lock},
+ {{{"mtx_timedlock"}, 2}, &PthreadLockChecker::TryC11Lock},
// Release.
- {{"mtx_unlock", 1}, &PthreadLockChecker::ReleaseAnyLock},
+ {{{"mtx_unlock"}, 1}, &PthreadLockChecker::ReleaseAnyLock},
// Destroy
- {{"mtx_destroy", 1}, &PthreadLockChecker::DestroyPthreadLock},
+ {{{"mtx_destroy"}, 1}, &PthreadLockChecker::DestroyPthreadLock},
};
ProgramStateRef resolvePossiblyDestroyedMutex(ProgramStateRef state,
@@ -290,6 +291,7 @@ ProgramStateRef PthreadLockChecker::resolvePossiblyDestroyedMutex(
// Existence in DestroyRetVal ensures existence in LockMap.
// Existence in Destroyed also ensures that the lock state for lockR is either
// UntouchedAndPossiblyDestroyed or UnlockedAndPossiblyDestroyed.
+ assert(lstate);
assert(lstate->isUntouchedAndPossiblyDestroyed() ||
lstate->isUnlockedAndPossiblyDestroyed());
@@ -681,9 +683,7 @@ ProgramStateRef PthreadLockChecker::checkRegionChanges(
// We assume that system library function wouldn't touch the mutex unless
// it takes the mutex explicitly as an argument.
// FIXME: This is a bit quadratic.
- if (IsLibraryFunction &&
- std::find(ExplicitRegions.begin(), ExplicitRegions.end(), R) ==
- ExplicitRegions.end())
+ if (IsLibraryFunction && !llvm::is_contained(ExplicitRegions, R))
continue;
State = State->remove<LockMap>(R);
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.cpp
index 3f3267ff9391..7e74b418b335 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.cpp
@@ -14,6 +14,7 @@
#include "RetainCountChecker.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include <optional>
using namespace clang;
using namespace ento;
@@ -45,7 +46,7 @@ static ProgramStateRef removeRefBinding(ProgramStateRef State, SymbolRef Sym) {
void RefVal::print(raw_ostream &Out) const {
if (!T.isNull())
- Out << "Tracked " << T.getAsString() << " | ";
+ Out << "Tracked " << T << " | ";
switch (getKind()) {
default: llvm_unreachable("Invalid RefVal kind");
@@ -154,10 +155,8 @@ void RetainCountChecker::checkPostStmt(const BlockExpr *BE,
ProgramStateRef state = C.getState();
auto *R = cast<BlockDataRegion>(C.getSVal(BE).getAsRegion());
- BlockDataRegion::referenced_vars_iterator I = R->referenced_vars_begin(),
- E = R->referenced_vars_end();
-
- if (I == E)
+ auto ReferencedVars = R->referenced_vars();
+ if (ReferencedVars.empty())
return;
// FIXME: For now we invalidate the tracking of all symbols passed to blocks
@@ -167,8 +166,8 @@ void RetainCountChecker::checkPostStmt(const BlockExpr *BE,
const LocationContext *LC = C.getLocationContext();
MemRegionManager &MemMgr = C.getSValBuilder().getRegionManager();
- for ( ; I != E; ++I) {
- const VarRegion *VR = I.getCapturedRegion();
+ for (auto Var : ReferencedVars) {
+ const VarRegion *VR = Var.getCapturedRegion();
if (VR->getSuperRegion() == R) {
VR = MemMgr.getVarRegion(VR->getDecl(), LC);
}
@@ -284,13 +283,13 @@ void RetainCountChecker::checkPostStmt(const ObjCBoxedExpr *Ex,
void RetainCountChecker::checkPostStmt(const ObjCIvarRefExpr *IRE,
CheckerContext &C) const {
- Optional<Loc> IVarLoc = C.getSVal(IRE).getAs<Loc>();
+ std::optional<Loc> IVarLoc = C.getSVal(IRE).getAs<Loc>();
if (!IVarLoc)
return;
ProgramStateRef State = C.getState();
SymbolRef Sym = State->getSVal(*IVarLoc).getAsSymbol();
- if (!Sym || !dyn_cast_or_null<ObjCIvarRegion>(Sym->getOriginRegion()))
+ if (!Sym || !isa_and_nonnull<ObjCIvarRegion>(Sym->getOriginRegion()))
return;
// Accessing an ivar directly is unusual. If we've done that, be more
@@ -412,15 +411,15 @@ static QualType GetReturnType(const Expr *RetE, ASTContext &Ctx) {
return RetTy;
}
-static Optional<RefVal> refValFromRetEffect(RetEffect RE,
- QualType ResultTy) {
+static std::optional<RefVal> refValFromRetEffect(RetEffect RE,
+ QualType ResultTy) {
if (RE.isOwned()) {
return RefVal::makeOwned(RE.getObjKind(), ResultTy);
} else if (RE.notOwned()) {
return RefVal::makeNotOwned(RE.getObjKind(), ResultTy);
}
- return None;
+ return std::nullopt;
}
static bool isPointerToObject(QualType QT) {
@@ -692,7 +691,7 @@ void RetainCountChecker::checkSummary(const RetainSummary &Summ,
assert(Ex);
ResultTy = GetReturnType(Ex, C.getASTContext());
}
- if (Optional<RefVal> updatedRefVal = refValFromRetEffect(RE, ResultTy))
+ if (std::optional<RefVal> updatedRefVal = refValFromRetEffect(RE, ResultTy))
state = setRefBinding(state, Sym, *updatedRefVal);
}
@@ -767,7 +766,7 @@ ProgramStateRef RetainCountChecker::updateSymbol(ProgramStateRef state,
break;
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case DoNothing:
return state;
@@ -907,7 +906,7 @@ bool RetainCountChecker::evalCall(const CallEvent &Call,
const LocationContext *LCtx = C.getLocationContext();
using BehaviorSummary = RetainSummaryManager::BehaviorSummary;
- Optional<BehaviorSummary> BSmr =
+ std::optional<BehaviorSummary> BSmr =
SmrMgr.canEval(CE, FD, hasTrustedImplementationAnnotation);
// See if it's one of the specific functions we know how to eval.
@@ -945,7 +944,8 @@ bool RetainCountChecker::evalCall(const CallEvent &Call,
// Assume that output is zero on the other branch.
NullOutputState = NullOutputState->BindExpr(
- CE, LCtx, C.getSValBuilder().makeNull(), /*Invalidate=*/false);
+ CE, LCtx, C.getSValBuilder().makeNullWithType(ResultTy),
+ /*Invalidate=*/false);
C.addTransition(NullOutputState, &getCastFailTag());
// And on the original branch assume that both input and
@@ -1188,14 +1188,14 @@ ProgramStateRef RetainCountChecker::checkRegionChanges(
if (!invalidated)
return state;
- llvm::SmallPtrSet<SymbolRef, 8> WhitelistedSymbols;
+ llvm::SmallPtrSet<SymbolRef, 8> AllowedSymbols;
for (const MemRegion *I : ExplicitRegions)
if (const SymbolicRegion *SR = I->StripCasts()->getAs<SymbolicRegion>())
- WhitelistedSymbols.insert(SR->getSymbol());
+ AllowedSymbols.insert(SR->getSymbol());
for (SymbolRef sym : *invalidated) {
- if (WhitelistedSymbols.count(sym))
+ if (AllowedSymbols.count(sym))
continue;
// Remove any existing reference-count binding.
state = removeRefBinding(state, sym);
@@ -1335,7 +1335,7 @@ void RetainCountChecker::checkBeginFunction(CheckerContext &Ctx) const {
RetainSummaryManager &SmrMgr = getSummaryManager(Ctx);
const LocationContext *LCtx = Ctx.getLocationContext();
const Decl *D = LCtx->getDecl();
- Optional<AnyCall> C = AnyCall::forDecl(D);
+ std::optional<AnyCall> C = AnyCall::forDecl(D);
if (!C || SmrMgr.isTrustedReferenceCountImplementation(D))
return;
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.h b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.h
index 223e28c2c5b8..d4d7c4c74c56 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.h
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.h
@@ -36,7 +36,6 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/ImmutableList.h"
-#include "llvm/ADT/ImmutableMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp
index 64ac6bc4c06b..c3acb73ba717 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp
@@ -15,6 +15,7 @@
#include "RetainCountChecker.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
+#include <optional>
using namespace clang;
using namespace ento;
@@ -73,11 +74,8 @@ RefCountBug::RefCountBug(CheckerNameRef Checker, RefCountBugKind BT)
static bool isNumericLiteralExpression(const Expr *E) {
// FIXME: This set of cases was copied from SemaExprObjC.
- return isa<IntegerLiteral>(E) ||
- isa<CharacterLiteral>(E) ||
- isa<FloatingLiteral>(E) ||
- isa<ObjCBoolLiteralExpr>(E) ||
- isa<CXXBoolLiteralExpr>(E);
+ return isa<IntegerLiteral, CharacterLiteral, FloatingLiteral,
+ ObjCBoolLiteralExpr, CXXBoolLiteralExpr>(E);
}
/// If type represents a pointer to CXXRecordDecl,
@@ -168,13 +166,12 @@ static bool shouldGenerateNote(llvm::raw_string_ostream &os,
/// Finds argument index of the out paramter in the call @c S
/// corresponding to the symbol @c Sym.
-/// If none found, returns None.
-static Optional<unsigned> findArgIdxOfSymbol(ProgramStateRef CurrSt,
- const LocationContext *LCtx,
- SymbolRef &Sym,
- Optional<CallEventRef<>> CE) {
+/// If none found, returns std::nullopt.
+static std::optional<unsigned>
+findArgIdxOfSymbol(ProgramStateRef CurrSt, const LocationContext *LCtx,
+ SymbolRef &Sym, std::optional<CallEventRef<>> CE) {
if (!CE)
- return None;
+ return std::nullopt;
for (unsigned Idx = 0; Idx < (*CE)->getNumArgs(); Idx++)
if (const MemRegion *MR = (*CE)->getArgSVal(Idx).getAsRegion())
@@ -182,25 +179,25 @@ static Optional<unsigned> findArgIdxOfSymbol(ProgramStateRef CurrSt,
if (CurrSt->getSVal(MR, TR->getValueType()).getAsSymbol() == Sym)
return Idx;
- return None;
+ return std::nullopt;
}
-static Optional<std::string> findMetaClassAlloc(const Expr *Callee) {
+static std::optional<std::string> findMetaClassAlloc(const Expr *Callee) {
if (const auto *ME = dyn_cast<MemberExpr>(Callee)) {
if (ME->getMemberDecl()->getNameAsString() != "alloc")
- return None;
+ return std::nullopt;
const Expr *This = ME->getBase()->IgnoreParenImpCasts();
if (const auto *DRE = dyn_cast<DeclRefExpr>(This)) {
const ValueDecl *VD = DRE->getDecl();
if (VD->getNameAsString() != "metaClass")
- return None;
+ return std::nullopt;
if (const auto *RD = dyn_cast<CXXRecordDecl>(VD->getDeclContext()))
return RD->getNameAsString();
}
}
- return None;
+ return std::nullopt;
}
static std::string findAllocatedObjectName(const Stmt *S, QualType QT) {
@@ -237,8 +234,8 @@ static void generateDiagnosticsForCallLike(ProgramStateRef CurrSt,
os << "Operator 'new'";
} else {
assert(isa<ObjCMessageExpr>(S));
- CallEventRef<ObjCMethodCall> Call =
- Mgr.getObjCMethodCall(cast<ObjCMessageExpr>(S), CurrSt, LCtx);
+ CallEventRef<ObjCMethodCall> Call = Mgr.getObjCMethodCall(
+ cast<ObjCMessageExpr>(S), CurrSt, LCtx, {nullptr, 0});
switch (Call->getMessageKind()) {
case OCM_Message:
@@ -253,7 +250,7 @@ static void generateDiagnosticsForCallLike(ProgramStateRef CurrSt,
}
}
- Optional<CallEventRef<>> CE = Mgr.getCall(S, CurrSt, LCtx);
+ std::optional<CallEventRef<>> CE = Mgr.getCall(S, CurrSt, LCtx, {nullptr, 0});
auto Idx = findArgIdxOfSymbol(CurrSt, LCtx, Sym, CE);
// If index is not found, we assume that the symbol was returned.
@@ -264,14 +261,12 @@ static void generateDiagnosticsForCallLike(ProgramStateRef CurrSt,
}
if (CurrV.getObjKind() == ObjKind::CF) {
- os << "a Core Foundation object of type '"
- << Sym->getType().getAsString() << "' with a ";
+ os << "a Core Foundation object of type '" << Sym->getType() << "' with a ";
} else if (CurrV.getObjKind() == ObjKind::OS) {
os << "an OSObject of type '" << findAllocatedObjectName(S, Sym->getType())
<< "' with a ";
} else if (CurrV.getObjKind() == ObjKind::Generalized) {
- os << "an object of type '" << Sym->getType().getAsString()
- << "' with a ";
+ os << "an object of type '" << Sym->getType() << "' with a ";
} else {
assert(CurrV.getObjKind() == ObjKind::ObjC);
QualType T = Sym->getType();
@@ -279,8 +274,7 @@ static void generateDiagnosticsForCallLike(ProgramStateRef CurrSt,
os << "an Objective-C object with a ";
} else {
const ObjCObjectPointerType *PT = cast<ObjCObjectPointerType>(T);
- os << "an instance of " << PT->getPointeeType().getAsString()
- << " with a ";
+ os << "an instance of " << PT->getPointeeType() << " with a ";
}
}
@@ -608,16 +602,17 @@ RefCountReportVisitor::VisitNode(const ExplodedNode *N, BugReporterContext &BRC,
return std::move(P);
}
-static Optional<std::string> describeRegion(const MemRegion *MR) {
+static std::optional<std::string> describeRegion(const MemRegion *MR) {
if (const auto *VR = dyn_cast_or_null<VarRegion>(MR))
return std::string(VR->getDecl()->getName());
// Once we support more storage locations for bindings,
// this would need to be improved.
- return None;
+ return std::nullopt;
}
using Bindings = llvm::SmallVector<std::pair<const MemRegion *, SVal>, 4>;
+namespace {
class VarBindingsCollector : public StoreManager::BindingsHandler {
SymbolRef Sym;
Bindings &Result;
@@ -638,6 +633,7 @@ public:
return true;
}
};
+} // namespace
Bindings getAllVarBindingsForSymbol(ProgramStateManager &Manager,
const ExplodedNode *Node, SymbolRef Sym) {
@@ -734,7 +730,7 @@ static AllocationInfo GetAllocationSite(ProgramStateManager &StateMgr,
const LocationContext *InterestingMethodContext = nullptr;
if (InitMethodContext) {
const ProgramPoint AllocPP = AllocationNode->getLocation();
- if (Optional<StmtPoint> SP = AllocPP.getAs<StmtPoint>())
+ if (std::optional<StmtPoint> SP = AllocPP.getAs<StmtPoint>())
if (const ObjCMessageExpr *ME = SP->getStmtAs<ObjCMessageExpr>())
if (ME->getMethodFamily() == OMF_alloc)
InterestingMethodContext = InitMethodContext;
@@ -777,7 +773,7 @@ RefLeakReportVisitor::getEndPath(BugReporterContext &BRC,
os << "Object leaked: ";
- Optional<std::string> RegionDescription = describeRegion(LastBinding);
+ std::optional<std::string> RegionDescription = describeRegion(LastBinding);
if (RegionDescription) {
os << "object allocated and stored into '" << *RegionDescription << '\'';
} else {
@@ -790,9 +786,6 @@ RefLeakReportVisitor::getEndPath(BugReporterContext &BRC,
assert(RV);
if (RV->getKind() == RefVal::ErrorLeakReturned) {
- // FIXME: Per comments in rdar://6320065, "create" only applies to CF
- // objects. Only "copy", "alloc", "retain" and "new" transfer ownership
- // to the caller for NS objects.
const Decl *D = &EndN->getCodeDecl();
os << (isa<ObjCMethodDecl>(D) ? " is returned from a method "
@@ -923,7 +916,7 @@ void RefLeakReport::createDescription(CheckerContext &Ctx) {
llvm::raw_string_ostream os(Description);
os << "Potential leak of an object";
- Optional<std::string> RegionDescription =
+ std::optional<std::string> RegionDescription =
describeRegion(AllocBindingToReport);
if (RegionDescription) {
os << " stored into '" << *RegionDescription << '\'';
@@ -975,7 +968,7 @@ void RefLeakReport::findBindingToReport(CheckerContext &Ctx,
// Let's pick one of them at random (if there is something to pick from).
AllocBindingToReport = AllVarBindings[0].first;
- // Because 'AllocBindingToReport' is not the the same as
+ // Because 'AllocBindingToReport' is not the same as
// 'AllocFirstBinding', we need to explain how the leaking object
// got from one to another.
//
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp
index 885750218b9e..09d82ebabd4c 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitors.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
@@ -25,7 +26,9 @@ using namespace ento;
namespace {
class ReturnPointerRangeChecker :
public Checker< check::PreStmt<ReturnStmt> > {
- mutable std::unique_ptr<BuiltinBug> BT;
+ // FIXME: This bug correspond to CWE-466. Eventually we should have bug
+ // types explicitly reference such exploit categories (when applicable).
+ const BugType BT{this, "Buffer overflow"};
public:
void checkPreStmt(const ReturnStmt *RS, CheckerContext &C) const;
@@ -40,6 +43,10 @@ void ReturnPointerRangeChecker::checkPreStmt(const ReturnStmt *RS,
if (!RetE)
return;
+ // Skip "body farmed" functions.
+ if (RetE->getSourceRange().isInvalid())
+ return;
+
SVal V = C.getSVal(RetE);
const MemRegion *R = V.getAsRegion();
@@ -63,32 +70,55 @@ void ReturnPointerRangeChecker::checkPreStmt(const ReturnStmt *RS,
if (Idx == ElementCount)
return;
- ProgramStateRef StInBound = state->assumeInBound(Idx, ElementCount, true);
- ProgramStateRef StOutBound = state->assumeInBound(Idx, ElementCount, false);
+ ProgramStateRef StInBound, StOutBound;
+ std::tie(StInBound, StOutBound) = state->assumeInBoundDual(Idx, ElementCount);
if (StOutBound && !StInBound) {
ExplodedNode *N = C.generateErrorNode(StOutBound);
if (!N)
return;
- // FIXME: This bug correspond to CWE-466. Eventually we should have bug
- // types explicitly reference such exploit categories (when applicable).
- if (!BT)
- BT.reset(new BuiltinBug(
- this, "Buffer overflow",
- "Returned pointer value points outside the original object "
- "(potential buffer overflow)"));
-
- // FIXME: It would be nice to eventually make this diagnostic more clear,
- // e.g., by referencing the original declaration or by saying *why* this
- // reference is outside the range.
+ constexpr llvm::StringLiteral Msg =
+ "Returned pointer value points outside the original object "
+ "(potential buffer overflow)";
// Generate a report for this bug.
- auto report =
- std::make_unique<PathSensitiveBugReport>(*BT, BT->getDescription(), N);
-
- report->addRange(RetE->getSourceRange());
- C.emitReport(std::move(report));
+ auto Report = std::make_unique<PathSensitiveBugReport>(BT, Msg, N);
+ Report->addRange(RetE->getSourceRange());
+
+ const auto ConcreteElementCount = ElementCount.getAs<nonloc::ConcreteInt>();
+ const auto ConcreteIdx = Idx.getAs<nonloc::ConcreteInt>();
+
+ const auto *DeclR = ER->getSuperRegion()->getAs<DeclRegion>();
+
+ if (DeclR)
+ Report->addNote("Original object declared here",
+ {DeclR->getDecl(), C.getSourceManager()});
+
+ if (ConcreteElementCount) {
+ SmallString<128> SBuf;
+ llvm::raw_svector_ostream OS(SBuf);
+ OS << "Original object ";
+ if (DeclR) {
+ OS << "'";
+ DeclR->getDecl()->printName(OS);
+ OS << "' ";
+ }
+ OS << "is an array of " << ConcreteElementCount->getValue() << " '";
+ ER->getValueType().print(OS,
+ PrintingPolicy(C.getASTContext().getLangOpts()));
+ OS << "' objects";
+ if (ConcreteIdx) {
+ OS << ", returned pointer points at index " << ConcreteIdx->getValue();
+ }
+
+ Report->addNote(SBuf,
+ {RetE, C.getSourceManager(), C.getLocationContext()});
+ }
+
+ bugreporter::trackExpressionValue(N, RetE, *Report);
+
+ C.emitReport(std::move(Report));
}
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp
index 5266cbf86b44..efffbf2ee755 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp
@@ -24,8 +24,8 @@ using namespace ento;
namespace {
class ReturnUndefChecker : public Checker< check::PreStmt<ReturnStmt> > {
- mutable std::unique_ptr<BuiltinBug> BT_Undef;
- mutable std::unique_ptr<BuiltinBug> BT_NullReference;
+ const BugType BT_Undef{this, "Garbage return value"};
+ const BugType BT_NullReference{this, "Returning null reference"};
void emitUndef(CheckerContext &C, const Expr *RetE) const;
void checkReference(CheckerContext &C, const Expr *RetE,
@@ -77,14 +77,13 @@ void ReturnUndefChecker::checkPreStmt(const ReturnStmt *RS,
}
}
-static void emitBug(CheckerContext &C, BuiltinBug &BT, const Expr *RetE,
- const Expr *TrackingE = nullptr) {
+static void emitBug(CheckerContext &C, const BugType &BT, StringRef Msg,
+ const Expr *RetE, const Expr *TrackingE = nullptr) {
ExplodedNode *N = C.generateErrorNode();
if (!N)
return;
- auto Report =
- std::make_unique<PathSensitiveBugReport>(BT, BT.getDescription(), N);
+ auto Report = std::make_unique<PathSensitiveBugReport>(BT, Msg, N);
Report->addRange(RetE->getSourceRange());
bugreporter::trackExpressionValue(N, TrackingE ? TrackingE : RetE, *Report);
@@ -93,11 +92,7 @@ static void emitBug(CheckerContext &C, BuiltinBug &BT, const Expr *RetE,
}
void ReturnUndefChecker::emitUndef(CheckerContext &C, const Expr *RetE) const {
- if (!BT_Undef)
- BT_Undef.reset(
- new BuiltinBug(this, "Garbage return value",
- "Undefined or garbage value returned to caller"));
- emitBug(C, *BT_Undef, RetE);
+ emitBug(C, BT_Undef, "Undefined or garbage value returned to caller", RetE);
}
void ReturnUndefChecker::checkReference(CheckerContext &C, const Expr *RetE,
@@ -112,10 +107,8 @@ void ReturnUndefChecker::checkReference(CheckerContext &C, const Expr *RetE,
}
// The return value is known to be null. Emit a bug report.
- if (!BT_NullReference)
- BT_NullReference.reset(new BuiltinBug(this, "Returning null reference"));
-
- emitBug(C, *BT_NullReference, RetE, bugreporter::getDerefExpr(RetE));
+ emitBug(C, BT_NullReference, BT_NullReference.getDescription(), RetE,
+ bugreporter::getDerefExpr(RetE));
}
void ento::registerReturnUndefChecker(CheckerManager &mgr) {
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ReturnValueChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ReturnValueChecker.cpp
index 14ecede17083..c3112ebe4e79 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ReturnValueChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ReturnValueChecker.cpp
@@ -14,10 +14,11 @@
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallVector.h"
+#include <optional>
using namespace clang;
using namespace ento;
@@ -58,7 +59,7 @@ private:
} // namespace
static std::string getName(const CallEvent &Call) {
- std::string Name = "";
+ std::string Name;
if (const auto *MD = dyn_cast<CXXMethodDecl>(Call.getDecl()))
if (const CXXRecordDecl *RD = MD->getParent())
Name += RD->getNameAsString() + "::";
@@ -69,11 +70,11 @@ static std::string getName(const CallEvent &Call) {
// The predefinitions ('CDM') could break due to the ever growing code base.
// Check for the expected invariants and see whether they apply.
-static Optional<bool> isInvariantBreak(bool ExpectedValue, SVal ReturnV,
- CheckerContext &C) {
+static std::optional<bool> isInvariantBreak(bool ExpectedValue, SVal ReturnV,
+ CheckerContext &C) {
auto ReturnDV = ReturnV.getAs<DefinedOrUnknownSVal>();
if (!ReturnDV)
- return None;
+ return std::nullopt;
if (ExpectedValue)
return C.getState()->isNull(*ReturnDV).isConstrainedTrue();
@@ -89,7 +90,8 @@ void ReturnValueChecker::checkPostCall(const CallEvent &Call,
SVal ReturnV = Call.getReturnValue();
bool ExpectedValue = *RawExpectedValue;
- Optional<bool> IsInvariantBreak = isInvariantBreak(ExpectedValue, ReturnV, C);
+ std::optional<bool> IsInvariantBreak =
+ isInvariantBreak(ExpectedValue, ReturnV, C);
if (!IsInvariantBreak)
return;
@@ -136,7 +138,8 @@ void ReturnValueChecker::checkEndFunction(const ReturnStmt *RS,
SVal ReturnV = State->getSVal(RS->getRetValue(), C.getLocationContext());
bool ExpectedValue = *RawExpectedValue;
- Optional<bool> IsInvariantBreak = isInvariantBreak(ExpectedValue, ReturnV, C);
+ std::optional<bool> IsInvariantBreak =
+ isInvariantBreak(ExpectedValue, ReturnV, C);
if (!IsInvariantBreak)
return;
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/STLAlgorithmModeling.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/STLAlgorithmModeling.cpp
index 933e0146ff59..788f2875863c 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/STLAlgorithmModeling.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/STLAlgorithmModeling.cpp
@@ -12,6 +12,7 @@
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
@@ -60,7 +61,7 @@ class STLAlgorithmModeling : public Checker<eval::Call> {
public:
STLAlgorithmModeling() = default;
- bool AggressiveStdFindModeling;
+ bool AggressiveStdFindModeling = false;
bool evalCall(const CallEvent &Call, CheckerContext &C) const;
}; //
@@ -130,7 +131,7 @@ void STLAlgorithmModeling::Find(CheckerContext &C, const CallExpr *CE,
nonloc::SymbolVal(NewPos->getOffset()),
nonloc::SymbolVal(Pos->getOffset()),
SVB.getConditionType());
- assert(GreaterOrEqual.getAs<DefinedSVal>() &&
+ assert(isa<DefinedSVal>(GreaterOrEqual) &&
"Symbol comparison must be a `DefinedSVal`");
StateFound = StateFound->assume(GreaterOrEqual.castAs<DefinedSVal>(), true);
}
@@ -152,7 +153,7 @@ void STLAlgorithmModeling::Find(CheckerContext &C, const CallExpr *CE,
nonloc::SymbolVal(NewPos->getOffset()),
nonloc::SymbolVal(Pos->getOffset()),
SVB.getConditionType());
- assert(Less.getAs<DefinedSVal>() &&
+ assert(isa<DefinedSVal>(Less) &&
"Symbol comparison must be a `DefinedSVal`");
StateFound = StateFound->assume(Less.castAs<DefinedSVal>(), true);
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp
index 8d380ed1b93d..7cbe271dfbf9 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp
@@ -17,6 +17,7 @@
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include <utility>
@@ -51,10 +52,13 @@ class SimpleStreamChecker : public Checker<check::PostCall,
check::PreCall,
check::DeadSymbols,
check::PointerEscape> {
- CallDescription OpenFn, CloseFn;
+ const CallDescription OpenFn{{"fopen"}, 2};
+ const CallDescription CloseFn{{"fclose"}, 1};
- std::unique_ptr<BugType> DoubleCloseBugType;
- std::unique_ptr<BugType> LeakBugType;
+ const BugType DoubleCloseBugType{this, "Double fclose",
+ "Unix Stream API Error"};
+ const BugType LeakBugType{this, "Resource Leak", "Unix Stream API Error",
+ /*SuppressOnSink=*/true};
void reportDoubleClose(SymbolRef FileDescSym,
const CallEvent &Call,
@@ -66,8 +70,6 @@ class SimpleStreamChecker : public Checker<check::PostCall,
bool guaranteedNotToCloseFile(const CallEvent &Call) const;
public:
- SimpleStreamChecker();
-
/// Process fopen.
void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
/// Process fclose.
@@ -88,38 +90,12 @@ public:
/// state. Let's store it in the ProgramState.
REGISTER_MAP_WITH_PROGRAMSTATE(StreamMap, SymbolRef, StreamState)
-namespace {
-class StopTrackingCallback final : public SymbolVisitor {
- ProgramStateRef state;
-public:
- StopTrackingCallback(ProgramStateRef st) : state(std::move(st)) {}
- ProgramStateRef getState() const { return state; }
-
- bool VisitSymbol(SymbolRef sym) override {
- state = state->remove<StreamMap>(sym);
- return true;
- }
-};
-} // end anonymous namespace
-
-SimpleStreamChecker::SimpleStreamChecker()
- : OpenFn("fopen"), CloseFn("fclose", 1) {
- // Initialize the bug types.
- DoubleCloseBugType.reset(
- new BugType(this, "Double fclose", "Unix Stream API Error"));
-
- // Sinks are higher importance bugs as well as calls to assert() or exit(0).
- LeakBugType.reset(
- new BugType(this, "Resource Leak", "Unix Stream API Error",
- /*SuppressOnSink=*/true));
-}
-
void SimpleStreamChecker::checkPostCall(const CallEvent &Call,
CheckerContext &C) const {
if (!Call.isGlobalCFunction())
return;
- if (!Call.isCalled(OpenFn))
+ if (!OpenFn.matches(Call))
return;
// Get the symbolic value corresponding to the file handle.
@@ -138,7 +114,7 @@ void SimpleStreamChecker::checkPreCall(const CallEvent &Call,
if (!Call.isGlobalCFunction())
return;
- if (!Call.isCalled(CloseFn))
+ if (!CloseFn.matches(Call))
return;
// Get the symbolic value corresponding to the file handle.
@@ -176,13 +152,11 @@ void SimpleStreamChecker::checkDeadSymbols(SymbolReaper &SymReaper,
ProgramStateRef State = C.getState();
SymbolVector LeakedStreams;
StreamMapTy TrackedStreams = State->get<StreamMap>();
- for (StreamMapTy::iterator I = TrackedStreams.begin(),
- E = TrackedStreams.end(); I != E; ++I) {
- SymbolRef Sym = I->first;
+ for (auto [Sym, StreamStatus] : TrackedStreams) {
bool IsSymDead = SymReaper.isDead(Sym);
// Collect leaked symbols.
- if (isLeaked(Sym, I->second, IsSymDead, State))
+ if (isLeaked(Sym, StreamStatus, IsSymDead, State))
LeakedStreams.push_back(Sym);
// Remove the dead symbol from the streams map.
@@ -207,7 +181,7 @@ void SimpleStreamChecker::reportDoubleClose(SymbolRef FileDescSym,
// Generate the report.
auto R = std::make_unique<PathSensitiveBugReport>(
- *DoubleCloseBugType, "Closing a previously closed file stream", ErrNode);
+ DoubleCloseBugType, "Closing a previously closed file stream", ErrNode);
R->addRange(Call.getSourceRange());
R->markInteresting(FileDescSym);
C.emitReport(std::move(R));
@@ -220,7 +194,7 @@ void SimpleStreamChecker::reportLeaks(ArrayRef<SymbolRef> LeakedStreams,
// TODO: Identify the leaked file descriptor.
for (SymbolRef LeakedStream : LeakedStreams) {
auto R = std::make_unique<PathSensitiveBugReport>(
- *LeakBugType, "Opened file is never closed; potential resource leak",
+ LeakBugType, "Opened file is never closed; potential resource leak",
ErrNode);
R->markInteresting(LeakedStream);
C.emitReport(std::move(R));
@@ -254,11 +228,7 @@ SimpleStreamChecker::checkPointerEscape(ProgramStateRef State,
return State;
}
- for (InvalidatedSymbols::const_iterator I = Escaped.begin(),
- E = Escaped.end();
- I != E; ++I) {
- SymbolRef Sym = *I;
-
+ for (SymbolRef Sym : Escaped) {
// The symbol escaped. Optimistically, assume that the corresponding file
// handle will be closed somewhere else.
State = State->remove<StreamMap>(Sym);
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SmartPtr.h b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SmartPtr.h
index 6a40f8eda5fa..b4352b450c7f 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SmartPtr.h
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SmartPtr.h
@@ -25,8 +25,6 @@ bool isStdSmartPtrCall(const CallEvent &Call);
bool isStdSmartPtr(const CXXRecordDecl *RD);
bool isStdSmartPtr(const Expr *E);
-bool isStdSmartPtr(const CXXRecordDecl *RD);
-
/// Returns whether the smart pointer is null or not.
bool isNullSmartPtr(const ProgramStateRef State, const MemRegion *ThisRegion);
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SmartPtrModeling.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SmartPtrModeling.cpp
index 09e885e8133f..268fc742f050 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SmartPtrModeling.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SmartPtrModeling.cpp
@@ -23,6 +23,7 @@
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h"
@@ -30,8 +31,9 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymExpr.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
-#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/ErrorHandling.h"
+#include <optional>
#include <string>
using namespace clang;
@@ -47,9 +49,8 @@ class SmartPtrModeling
public:
// Whether the checker should model for null dereferences of smart pointers.
- DefaultBool ModelSmartPtrDereference;
+ bool ModelSmartPtrDereference = false;
bool evalCall(const CallEvent &Call, CheckerContext &C) const;
- void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const;
ProgramStateRef
checkRegionChanges(ProgramStateRef State,
@@ -70,7 +71,8 @@ private:
bool handleMoveCtr(const CallEvent &Call, CheckerContext &C,
const MemRegion *ThisRegion) const;
bool updateMovedSmartPointers(CheckerContext &C, const MemRegion *ThisRegion,
- const MemRegion *OtherSmartPtrRegion) const;
+ const MemRegion *OtherSmartPtrRegion,
+ const CallEvent &Call) const;
void handleBoolConversion(const CallEvent &Call, CheckerContext &C) const;
bool handleComparisionOp(const CallEvent &Call, CheckerContext &C) const;
bool handleOstreamOperator(const CallEvent &Call, CheckerContext &C) const;
@@ -84,10 +86,10 @@ private:
using SmartPtrMethodHandlerFn =
void (SmartPtrModeling::*)(const CallEvent &Call, CheckerContext &) const;
CallDescriptionMap<SmartPtrMethodHandlerFn> SmartPtrMethodHandlers{
- {{"reset"}, &SmartPtrModeling::handleReset},
- {{"release"}, &SmartPtrModeling::handleRelease},
- {{"swap", 1}, &SmartPtrModeling::handleSwapMethod},
- {{"get"}, &SmartPtrModeling::handleGet}};
+ {{{"reset"}}, &SmartPtrModeling::handleReset},
+ {{{"release"}}, &SmartPtrModeling::handleRelease},
+ {{{"swap"}, 1}, &SmartPtrModeling::handleSwapMethod},
+ {{{"get"}}, &SmartPtrModeling::handleGet}};
const CallDescription StdSwapCall{{"std", "swap"}, 2};
const CallDescription StdMakeUniqueCall{{"std", "make_unique"}};
const CallDescription StdMakeUniqueForOverwriteCall{
@@ -102,12 +104,8 @@ static bool hasStdClassWithName(const CXXRecordDecl *RD,
ArrayRef<llvm::StringLiteral> Names) {
if (!RD || !RD->getDeclContext()->isStdNamespace())
return false;
- if (RD->getDeclName().isIdentifier()) {
- StringRef Name = RD->getName();
- return llvm::any_of(Names, [&Name](StringRef GivenName) -> bool {
- return Name == GivenName;
- });
- }
+ if (RD->getDeclName().isIdentifier())
+ return llvm::is_contained(Names, RD->getName());
return false;
}
@@ -203,7 +201,7 @@ static QualType getInnerPointerType(CheckerContext C, const CXXRecordDecl *RD) {
static QualType getPointerTypeFromTemplateArg(const CallEvent &Call,
CheckerContext &C) {
const auto *FD = dyn_cast_or_null<FunctionDecl>(Call.getDecl());
- if (!FD || !FD->isFunctionTemplateSpecialization())
+ if (!FD || !FD->getPrimaryTemplate())
return {};
const auto &TemplateArgs = FD->getTemplateSpecializationArgs()->asArray();
if (TemplateArgs.size() == 0)
@@ -289,7 +287,7 @@ bool SmartPtrModeling::evalCall(const CallEvent &Call,
if (ModelSmartPtrDereference && isStdOstreamOperatorCall(Call))
return handleOstreamOperator(Call, C);
- if (Call.isCalled(StdSwapCall)) {
+ if (StdSwapCall.matches(Call)) {
// Check the first arg, if it is of std::unique_ptr type.
assert(Call.getNumArgs() == 2 && "std::swap should have two arguments");
const Expr *FirstArg = Call.getArgExpr(0);
@@ -298,12 +296,12 @@ bool SmartPtrModeling::evalCall(const CallEvent &Call,
return handleSwap(State, Call.getArgSVal(0), Call.getArgSVal(1), C);
}
- if (Call.isCalled(StdMakeUniqueCall) ||
- Call.isCalled(StdMakeUniqueForOverwriteCall)) {
+ if (matchesAny(Call, StdMakeUniqueCall, StdMakeUniqueForOverwriteCall)) {
if (!ModelSmartPtrDereference)
return false;
-
- const Optional<SVal> ThisRegionOpt = Call.getReturnValueUnderConstruction();
+
+ const std::optional<SVal> ThisRegionOpt =
+ Call.getReturnValueUnderConstruction();
if (!ThisRegionOpt)
return false;
@@ -383,11 +381,13 @@ bool SmartPtrModeling::evalCall(const CallEvent &Call,
if (!ThisRegion)
return false;
+ QualType ThisType = cast<CXXMethodDecl>(Call.getDecl())->getThisType();
+
if (CC->getDecl()->isMoveConstructor())
return handleMoveCtr(Call, C, ThisRegion);
if (Call.getNumArgs() == 0) {
- auto NullVal = C.getSValBuilder().makeNull();
+ auto NullVal = C.getSValBuilder().makeNullWithType(ThisType);
State = State->set<TrackedRegionMap>(ThisRegion, NullVal);
C.addTransition(
@@ -588,10 +588,9 @@ void SmartPtrModeling::checkLiveSymbols(ProgramStateRef State,
SymbolReaper &SR) const {
// Marking tracked symbols alive
TrackedRegionMapTy TrackedRegions = State->get<TrackedRegionMap>();
- for (auto I = TrackedRegions.begin(), E = TrackedRegions.end(); I != E; ++I) {
- SVal Val = I->second;
- for (auto si = Val.symbol_begin(), se = Val.symbol_end(); si != se; ++si) {
- SR.markLive(*si);
+ for (SVal Val : llvm::make_second_range(TrackedRegions)) {
+ for (SymbolRef Sym : Val.symbols()) {
+ SR.markLive(Sym);
}
}
}
@@ -644,7 +643,8 @@ void SmartPtrModeling::handleRelease(const CallEvent &Call,
*InnerPointVal);
}
- auto ValueToUpdate = C.getSValBuilder().makeNull();
+ QualType ThisType = cast<CXXMethodDecl>(Call.getDecl())->getThisType();
+ auto ValueToUpdate = C.getSValBuilder().makeNullWithType(ThisType);
State = State->set<TrackedRegionMap>(ThisRegion, ValueToUpdate);
C.addTransition(State, C.getNoteTag([ThisRegion](PathSensitiveBugReport &BR,
@@ -742,13 +742,15 @@ bool SmartPtrModeling::handleAssignOp(const CallEvent &Call,
if (!ThisRegion)
return false;
+ QualType ThisType = cast<CXXMethodDecl>(Call.getDecl())->getThisType();
+
const MemRegion *OtherSmartPtrRegion = OC->getArgSVal(0).getAsRegion();
// In case of 'nullptr' or '0' assigned
if (!OtherSmartPtrRegion) {
bool AssignedNull = Call.getArgSVal(0).isZeroConstant();
if (!AssignedNull)
return false;
- auto NullVal = C.getSValBuilder().makeNull();
+ auto NullVal = C.getSValBuilder().makeNullWithType(ThisType);
State = State->set<TrackedRegionMap>(ThisRegion, NullVal);
C.addTransition(State, C.getNoteTag([ThisRegion](PathSensitiveBugReport &BR,
llvm::raw_ostream &OS) {
@@ -762,7 +764,7 @@ bool SmartPtrModeling::handleAssignOp(const CallEvent &Call,
return true;
}
- return updateMovedSmartPointers(C, ThisRegion, OtherSmartPtrRegion);
+ return updateMovedSmartPointers(C, ThisRegion, OtherSmartPtrRegion, Call);
}
bool SmartPtrModeling::handleMoveCtr(const CallEvent &Call, CheckerContext &C,
@@ -771,17 +773,19 @@ bool SmartPtrModeling::handleMoveCtr(const CallEvent &Call, CheckerContext &C,
if (!OtherSmartPtrRegion)
return false;
- return updateMovedSmartPointers(C, ThisRegion, OtherSmartPtrRegion);
+ return updateMovedSmartPointers(C, ThisRegion, OtherSmartPtrRegion, Call);
}
bool SmartPtrModeling::updateMovedSmartPointers(
CheckerContext &C, const MemRegion *ThisRegion,
- const MemRegion *OtherSmartPtrRegion) const {
+ const MemRegion *OtherSmartPtrRegion, const CallEvent &Call) const {
ProgramStateRef State = C.getState();
+ QualType ThisType = cast<CXXMethodDecl>(Call.getDecl())->getThisType();
const auto *OtherInnerPtr = State->get<TrackedRegionMap>(OtherSmartPtrRegion);
if (OtherInnerPtr) {
State = State->set<TrackedRegionMap>(ThisRegion, *OtherInnerPtr);
- auto NullVal = C.getSValBuilder().makeNull();
+
+ auto NullVal = C.getSValBuilder().makeNullWithType(ThisType);
State = State->set<TrackedRegionMap>(OtherSmartPtrRegion, NullVal);
bool IsArgValNull = OtherInnerPtr->isZeroConstant();
@@ -807,7 +811,8 @@ bool SmartPtrModeling::updateMovedSmartPointers(
} else {
// In case we dont know anything about value we are moving from
// remove the entry from map for which smart pointer got moved to.
- auto NullVal = C.getSValBuilder().makeNull();
+ // For unique_ptr<A>, Ty will be 'A*'.
+ auto NullVal = C.getSValBuilder().makeNullWithType(ThisType);
State = State->remove<TrackedRegionMap>(ThisRegion);
State = State->set<TrackedRegionMap>(OtherSmartPtrRegion, NullVal);
C.addTransition(State, C.getNoteTag([OtherSmartPtrRegion,
@@ -834,6 +839,8 @@ void SmartPtrModeling::handleBoolConversion(const CallEvent &Call,
const MemRegion *ThisRegion =
cast<CXXInstanceCall>(&Call)->getCXXThisVal().getAsRegion();
+ QualType ThisType = cast<CXXMethodDecl>(Call.getDecl())->getThisType();
+
SVal InnerPointerVal;
if (const auto *InnerValPtr = State->get<TrackedRegionMap>(ThisRegion)) {
InnerPointerVal = *InnerValPtr;
@@ -872,7 +879,7 @@ void SmartPtrModeling::handleBoolConversion(const CallEvent &Call,
std::tie(NotNullState, NullState) =
State->assume(InnerPointerVal.castAs<DefinedOrUnknownSVal>());
- auto NullVal = C.getSValBuilder().makeNull();
+ auto NullVal = C.getSValBuilder().makeNullWithType(ThisType);
// Explicitly tracking the region as null.
NullState = NullState->set<TrackedRegionMap>(ThisRegion, NullVal);
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp
index b5c9356322fc..ea09c43cc5ce 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp
@@ -11,9 +11,9 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/ExprCXX.h"
#include "clang/Basic/SourceManager.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
@@ -29,11 +29,11 @@ namespace {
class StackAddrEscapeChecker
: public Checker<check::PreCall, check::PreStmt<ReturnStmt>,
check::EndFunction> {
- mutable IdentifierInfo *dispatch_semaphore_tII;
- mutable std::unique_ptr<BuiltinBug> BT_stackleak;
- mutable std::unique_ptr<BuiltinBug> BT_returnstack;
- mutable std::unique_ptr<BuiltinBug> BT_capturedstackasync;
- mutable std::unique_ptr<BuiltinBug> BT_capturedstackret;
+ mutable IdentifierInfo *dispatch_semaphore_tII = nullptr;
+ mutable std::unique_ptr<BugType> BT_stackleak;
+ mutable std::unique_ptr<BugType> BT_returnstack;
+ mutable std::unique_ptr<BugType> BT_capturedstackasync;
+ mutable std::unique_ptr<BugType> BT_capturedstackret;
public:
enum CheckKind {
@@ -42,7 +42,7 @@ public:
CK_NumCheckKinds
};
- DefaultBool ChecksEnabled[CK_NumCheckKinds];
+ bool ChecksEnabled[CK_NumCheckKinds] = {false};
CheckerNameRef CheckNames[CK_NumCheckKinds];
void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
@@ -61,7 +61,6 @@ private:
ASTContext &Ctx);
static SmallVector<const MemRegion *, 4>
getCapturedStackRegions(const BlockDataRegion &B, CheckerContext &C);
- static bool isArcManagedBlock(const MemRegion *R, CheckerContext &C);
static bool isNotInCurrentFrame(const MemRegion *R, CheckerContext &C);
};
} // namespace
@@ -97,6 +96,14 @@ SourceRange StackAddrEscapeChecker::genName(raw_ostream &os, const MemRegion *R,
os << "stack memory associated with local variable '" << VR->getString()
<< '\'';
range = VR->getDecl()->getSourceRange();
+ } else if (const auto *LER = dyn_cast<CXXLifetimeExtendedObjectRegion>(R)) {
+ QualType Ty = LER->getValueType().getLocalUnqualifiedType();
+ os << "stack memory associated with temporary object of type '";
+ Ty.print(os, Ctx.getPrintingPolicy());
+ os << "' lifetime extended by local variable";
+ if (const IdentifierInfo *ID = LER->getExtendingDecl()->getIdentifier())
+ os << " '" << ID->getName() << '\'';
+ range = LER->getExpr()->getSourceRange();
} else if (const auto *TOR = dyn_cast<CXXTempObjectRegion>(R)) {
QualType Ty = TOR->getValueType().getLocalUnqualifiedType();
os << "stack memory associated with temporary object of type '";
@@ -110,13 +117,6 @@ SourceRange StackAddrEscapeChecker::genName(raw_ostream &os, const MemRegion *R,
return range;
}
-bool StackAddrEscapeChecker::isArcManagedBlock(const MemRegion *R,
- CheckerContext &C) {
- assert(R && "MemRegion should not be null");
- return C.getASTContext().getLangOpts().ObjCAutoRefCount &&
- isa<BlockDataRegion>(R);
-}
-
bool StackAddrEscapeChecker::isNotInCurrentFrame(const MemRegion *R,
CheckerContext &C) {
const StackSpaceRegion *S = cast<StackSpaceRegion>(R->getMemorySpace());
@@ -138,10 +138,8 @@ SmallVector<const MemRegion *, 4>
StackAddrEscapeChecker::getCapturedStackRegions(const BlockDataRegion &B,
CheckerContext &C) {
SmallVector<const MemRegion *, 4> Regions;
- BlockDataRegion::referenced_vars_iterator I = B.referenced_vars_begin();
- BlockDataRegion::referenced_vars_iterator E = B.referenced_vars_end();
- for (; I != E; ++I) {
- SVal Val = C.getState()->getSVal(I.getCapturedRegion());
+ for (auto Var : B.referenced_vars()) {
+ SVal Val = C.getState()->getSVal(Var.getCapturedRegion());
const MemRegion *Region = Val.getAsRegion();
if (Region && isa<StackSpaceRegion>(Region->getMemorySpace()))
Regions.push_back(Region);
@@ -156,7 +154,7 @@ void StackAddrEscapeChecker::EmitStackError(CheckerContext &C,
if (!N)
return;
if (!BT_returnstack)
- BT_returnstack = std::make_unique<BuiltinBug>(
+ BT_returnstack = std::make_unique<BugType>(
CheckNames[CK_StackAddrEscapeChecker],
"Return of address to stack-allocated memory");
// Generate a report for this bug.
@@ -196,7 +194,7 @@ void StackAddrEscapeChecker::checkAsyncExecutedBlockCaptures(
if (!N)
continue;
if (!BT_capturedstackasync)
- BT_capturedstackasync = std::make_unique<BuiltinBug>(
+ BT_capturedstackasync = std::make_unique<BugType>(
CheckNames[CK_StackAddrAsyncEscapeChecker],
"Address of stack-allocated memory is captured");
SmallString<128> Buf;
@@ -214,13 +212,13 @@ void StackAddrEscapeChecker::checkAsyncExecutedBlockCaptures(
void StackAddrEscapeChecker::checkReturnedBlockCaptures(
const BlockDataRegion &B, CheckerContext &C) const {
for (const MemRegion *Region : getCapturedStackRegions(B, C)) {
- if (isArcManagedBlock(Region, C) || isNotInCurrentFrame(Region, C))
+ if (isNotInCurrentFrame(Region, C))
continue;
ExplodedNode *N = C.generateNonFatalErrorNode();
if (!N)
continue;
if (!BT_capturedstackret)
- BT_capturedstackret = std::make_unique<BuiltinBug>(
+ BT_capturedstackret = std::make_unique<BugType>(
CheckNames[CK_StackAddrEscapeChecker],
"Address of stack-allocated memory is captured");
SmallString<128> Buf;
@@ -267,8 +265,7 @@ void StackAddrEscapeChecker::checkPreStmt(const ReturnStmt *RS,
if (const BlockDataRegion *B = dyn_cast<BlockDataRegion>(R))
checkReturnedBlockCaptures(*B, C);
- if (!isa<StackSpaceRegion>(R->getMemorySpace()) ||
- isNotInCurrentFrame(R, C) || isArcManagedBlock(R, C))
+ if (!isa<StackSpaceRegion>(R->getMemorySpace()) || isNotInCurrentFrame(R, C))
return;
// Returning a record by value is fine. (In this case, the returned
@@ -303,21 +300,52 @@ void StackAddrEscapeChecker::checkEndFunction(const ReturnStmt *RS,
class CallBack : public StoreManager::BindingsHandler {
private:
CheckerContext &Ctx;
- const StackFrameContext *CurSFC;
+ const StackFrameContext *PoppedFrame;
+
+ /// Look for stack variables referring to popped stack variables.
+ /// Returns true only if it found some dangling stack variables
+ /// referred by an other stack variable from different stack frame.
+ bool checkForDanglingStackVariable(const MemRegion *Referrer,
+ const MemRegion *Referred) {
+ const auto *ReferrerMemSpace =
+ Referrer->getMemorySpace()->getAs<StackSpaceRegion>();
+ const auto *ReferredMemSpace =
+ Referred->getMemorySpace()->getAs<StackSpaceRegion>();
+
+ if (!ReferrerMemSpace || !ReferredMemSpace)
+ return false;
+
+ const auto *ReferrerFrame = ReferrerMemSpace->getStackFrame();
+ const auto *ReferredFrame = ReferredMemSpace->getStackFrame();
+
+ if (ReferrerMemSpace && ReferredMemSpace) {
+ if (ReferredFrame == PoppedFrame &&
+ ReferrerFrame->isParentOf(PoppedFrame)) {
+ V.emplace_back(Referrer, Referred);
+ return true;
+ }
+ }
+ return false;
+ }
public:
SmallVector<std::pair<const MemRegion *, const MemRegion *>, 10> V;
- CallBack(CheckerContext &CC) : Ctx(CC), CurSFC(CC.getStackFrame()) {}
+ CallBack(CheckerContext &CC) : Ctx(CC), PoppedFrame(CC.getStackFrame()) {}
bool HandleBinding(StoreManager &SMgr, Store S, const MemRegion *Region,
SVal Val) override {
+ const MemRegion *VR = Val.getAsRegion();
+ if (!VR)
+ return true;
+ if (checkForDanglingStackVariable(Region, VR))
+ return true;
+
+ // Check the globals for the same.
if (!isa<GlobalsSpaceRegion>(Region->getMemorySpace()))
return true;
- const MemRegion *VR = Val.getAsRegion();
- if (VR && isa<StackSpaceRegion>(VR->getMemorySpace()) &&
- !isArcManagedBlock(VR, Ctx) && !isNotInCurrentFrame(VR, Ctx))
+ if (VR && VR->hasStackStorage() && !isNotInCurrentFrame(VR, Ctx))
V.emplace_back(Region, VR);
return true;
}
@@ -336,27 +364,54 @@ void StackAddrEscapeChecker::checkEndFunction(const ReturnStmt *RS,
return;
if (!BT_stackleak)
- BT_stackleak = std::make_unique<BuiltinBug>(
- CheckNames[CK_StackAddrEscapeChecker],
- "Stack address stored into global variable",
- "Stack address was saved into a global variable. "
- "This is dangerous because the address will become "
- "invalid after returning from the function");
+ BT_stackleak =
+ std::make_unique<BugType>(CheckNames[CK_StackAddrEscapeChecker],
+ "Stack address stored into global variable");
for (const auto &P : Cb.V) {
+ const MemRegion *Referrer = P.first->getBaseRegion();
+ const MemRegion *Referred = P.second;
+
// Generate a report for this bug.
+ const StringRef CommonSuffix =
+ "upon returning to the caller. This will be a dangling reference";
SmallString<128> Buf;
llvm::raw_svector_ostream Out(Buf);
- SourceRange Range = genName(Out, P.second, Ctx.getASTContext());
- Out << " is still referred to by the ";
- if (isa<StaticGlobalSpaceRegion>(P.first->getMemorySpace()))
- Out << "static";
- else
- Out << "global";
- Out << " variable '";
- const VarRegion *VR = cast<VarRegion>(P.first->getBaseRegion());
- Out << *VR->getDecl()
- << "' upon returning to the caller. This will be a dangling reference";
+ const SourceRange Range = genName(Out, Referred, Ctx.getASTContext());
+
+ if (isa<CXXTempObjectRegion, CXXLifetimeExtendedObjectRegion>(Referrer)) {
+ Out << " is still referred to by a temporary object on the stack "
+ << CommonSuffix;
+ auto Report =
+ std::make_unique<PathSensitiveBugReport>(*BT_stackleak, Out.str(), N);
+ if (Range.isValid())
+ Report->addRange(Range);
+ Ctx.emitReport(std::move(Report));
+ return;
+ }
+
+ const StringRef ReferrerMemorySpace = [](const MemSpaceRegion *Space) {
+ if (isa<StaticGlobalSpaceRegion>(Space))
+ return "static";
+ if (isa<GlobalsSpaceRegion>(Space))
+ return "global";
+ assert(isa<StackSpaceRegion>(Space));
+ return "stack";
+ }(Referrer->getMemorySpace());
+
+ // We should really only have VarRegions here.
+ // Anything else is really surprising, and we should get notified if such
+ // ever happens.
+ const auto *ReferrerVar = dyn_cast<VarRegion>(Referrer);
+ if (!ReferrerVar) {
+ assert(false && "We should have a VarRegion here");
+ continue; // Defensively skip this one.
+ }
+ const std::string ReferrerVarName =
+ ReferrerVar->getDecl()->getDeclName().getAsString();
+
+ Out << " is still referred to by the " << ReferrerMemorySpace
+ << " variable '" << ReferrerVarName << "' " << CommonSuffix;
auto Report =
std::make_unique<PathSensitiveBugReport>(*BT_stackleak, Out.str(), N);
if (Range.isValid())
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp
index e758b465af1b..fcd907a9bb0d 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp
@@ -38,17 +38,9 @@
// Non-pure functions, for which only partial improvement over the default
// behavior is expected, are modeled via check::PostCall, non-intrusively.
//
-// The following standard C functions are currently supported:
-//
-// fgetc getline isdigit isupper toascii
-// fread isalnum isgraph isxdigit
-// fwrite isalpha islower read
-// getc isascii isprint write
-// getchar isblank ispunct toupper
-// getdelim iscntrl isspace tolower
-//
//===----------------------------------------------------------------------===//
+#include "ErrnoModeling.h"
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
@@ -57,9 +49,12 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicExtent.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/FormatVariadic.h"
+#include <optional>
#include <string>
using namespace clang;
@@ -72,58 +67,165 @@ class StdLibraryFunctionsChecker
class Summary;
/// Specify how much the analyzer engine should entrust modeling this function
- /// to us. If he doesn't, he performs additional invalidations.
- enum InvalidationKind { NoEvalCall, EvalCallAsPure };
+ /// to us.
+ enum InvalidationKind {
+ /// No \c eval::Call for the function, it can be modeled elsewhere.
+ /// This checker checks only pre and post conditions.
+ NoEvalCall,
+ /// The function is modeled completely in this checker.
+ EvalCallAsPure
+ };
+
+ /// Given a range, should the argument stay inside or outside this range?
+ enum RangeKind { OutOfRange, WithinRange };
- // The universal integral type to use in value range descriptions.
- // Unsigned to make sure overflows are well-defined.
+ static RangeKind negateKind(RangeKind K) {
+ switch (K) {
+ case OutOfRange:
+ return WithinRange;
+ case WithinRange:
+ return OutOfRange;
+ }
+ llvm_unreachable("Unknown range kind");
+ }
+
+ /// The universal integral type to use in value range descriptions.
+ /// Unsigned to make sure overflows are well-defined.
typedef uint64_t RangeInt;
- /// Normally, describes a single range constraint, eg. {{0, 1}, {3, 4}} is
- /// a non-negative integer, which less than 5 and not equal to 2. For
- /// `ComparesToArgument', holds information about how exactly to compare to
- /// the argument.
+ /// Describes a single range constraint. Eg. {{0, 1}, {3, 4}} is
+ /// a non-negative integer, which less than 5 and not equal to 2.
typedef std::vector<std::pair<RangeInt, RangeInt>> IntRangeVector;
/// A reference to an argument or return value by its number.
/// ArgNo in CallExpr and CallEvent is defined as Unsigned, but
/// obviously uint32_t should be enough for all practical purposes.
typedef uint32_t ArgNo;
+ /// Special argument number for specifying the return value.
static const ArgNo Ret;
- /// Returns the string representation of an argument index.
+ /// Get a string representation of an argument index.
/// E.g.: (1) -> '1st arg', (2) - > '2nd arg'
- static SmallString<8> getArgDesc(ArgNo);
+ static void printArgDesc(ArgNo, llvm::raw_ostream &Out);
+ /// Print value X of the argument in form " (which is X)",
+ /// if the value is a fixed known value, otherwise print nothing.
+ /// This is used as simple explanation of values if possible.
+ static void printArgValueInfo(ArgNo ArgN, ProgramStateRef State,
+ const CallEvent &Call, llvm::raw_ostream &Out);
+ /// Append textual description of a numeric range [RMin,RMax] to
+ /// \p Out.
+ static void appendInsideRangeDesc(llvm::APSInt RMin, llvm::APSInt RMax,
+ QualType ArgT, BasicValueFactory &BVF,
+ llvm::raw_ostream &Out);
+ /// Append textual description of a numeric range out of [RMin,RMax] to
+ /// \p Out.
+ static void appendOutOfRangeDesc(llvm::APSInt RMin, llvm::APSInt RMax,
+ QualType ArgT, BasicValueFactory &BVF,
+ llvm::raw_ostream &Out);
class ValueConstraint;
- // Pointer to the ValueConstraint. We need a copyable, polymorphic and
- // default initialize able type (vector needs that). A raw pointer was good,
- // however, we cannot default initialize that. unique_ptr makes the Summary
- // class non-copyable, therefore not an option. Releasing the copyability
- // requirement would render the initialization of the Summary map infeasible.
+ /// Pointer to the ValueConstraint. We need a copyable, polymorphic and
+ /// default initializable type (vector needs that). A raw pointer was good,
+ /// however, we cannot default initialize that. unique_ptr makes the Summary
+ /// class non-copyable, therefore not an option. Releasing the copyability
+ /// requirement would render the initialization of the Summary map infeasible.
+ /// Mind that a pointer to a new value constraint is created when the negate
+ /// function is used.
using ValueConstraintPtr = std::shared_ptr<ValueConstraint>;
/// Polymorphic base class that represents a constraint on a given argument
/// (or return value) of a function. Derived classes implement different kind
/// of constraints, e.g range constraints or correlation between two
/// arguments.
+ /// These are used as argument constraints (preconditions) of functions, in
+ /// which case a bug report may be emitted if the constraint is not satisfied.
+ /// Another use is as conditions for summary cases, to create different
+ /// classes of behavior for a function. In this case no description of the
+ /// constraint is needed because the summary cases have an own (not generated)
+ /// description string.
class ValueConstraint {
public:
ValueConstraint(ArgNo ArgN) : ArgN(ArgN) {}
virtual ~ValueConstraint() {}
+
/// Apply the effects of the constraint on the given program state. If null
/// is returned then the constraint is not feasible.
virtual ProgramStateRef apply(ProgramStateRef State, const CallEvent &Call,
const Summary &Summary,
CheckerContext &C) const = 0;
+
+ /// Represents that in which context do we require a description of the
+ /// constraint.
+ enum DescriptionKind {
+ /// Describe a constraint that was violated.
+ /// Description should start with something like "should be".
+ Violation,
+ /// Describe a constraint that was assumed to be true.
+ /// This can be used when a precondition is satisfied, or when a summary
+ /// case is applied.
+ /// Description should start with something like "is".
+ Assumption
+ };
+
+ /// Give a description that explains the constraint to the user. Used when
+ /// a bug is reported or when the constraint is applied and displayed as a
+ /// note. The description should not mention the argument (getArgNo).
+ /// See StdLibraryFunctionsChecker::reportBug about how this function is
+ /// used (this function is used not only there).
+ virtual void describe(DescriptionKind DK, const CallEvent &Call,
+ ProgramStateRef State, const Summary &Summary,
+ llvm::raw_ostream &Out) const {
+ // There are some descendant classes that are not used as argument
+ // constraints, e.g. ComparisonConstraint. In that case we can safely
+ // ignore the implementation of this function.
+ llvm_unreachable(
+ "Description not implemented for summary case constraints");
+ }
+
+ /// Give a description that explains the actual argument value (where the
+ /// current ValueConstraint applies to) to the user. This function should be
+ /// called only when the current constraint is satisfied by the argument.
+ /// It should produce a more precise description than the constraint itself.
+ /// The actual value of the argument and the program state can be used to
+ /// make the description more precise. In the most simple case, if the
+ /// argument has a fixed known value this value can be printed into \p Out,
+ /// this is done by default.
+ /// The function should return true if a description was printed to \p Out,
+ /// otherwise false.
+ /// See StdLibraryFunctionsChecker::reportBug about how this function is
+ /// used.
+ virtual bool describeArgumentValue(const CallEvent &Call,
+ ProgramStateRef State,
+ const Summary &Summary,
+ llvm::raw_ostream &Out) const {
+ if (auto N = getArgSVal(Call, getArgNo()).getAs<NonLoc>()) {
+ if (const llvm::APSInt *Int = N->getAsInteger()) {
+ Out << *Int;
+ return true;
+ }
+ }
+ return false;
+ }
+
+ /// Return those arguments that should be tracked when we report a bug about
+ /// argument constraint violation. By default it is the argument that is
+ /// constrained, however, in some special cases we need to track other
+ /// arguments as well. E.g. a buffer size might be encoded in another
+ /// argument.
+ /// The "return value" argument number can not occur as returned value.
+ virtual std::vector<ArgNo> getArgsToTrack() const { return {ArgN}; }
+
+ /// Get a constraint that represents exactly the opposite of the current.
virtual ValueConstraintPtr negate() const {
llvm_unreachable("Not implemented");
};
- // Check whether the constraint is malformed or not. It is malformed if the
- // specified argument has a mismatch with the given FunctionDecl (e.g. the
- // arg number is out-of-range of the function's argument list).
+ /// Check whether the constraint is malformed or not. It is malformed if the
+ /// specified argument has a mismatch with the given FunctionDecl (e.g. the
+ /// arg number is out-of-range of the function's argument list).
+ /// This condition can indicate if a probably wrong or unexpected function
+ /// was found where the constraint is to be applied.
bool checkValidity(const FunctionDecl *FD) const {
const bool ValidArg = ArgN == Ret || ArgN < FD->getNumParams();
assert(ValidArg && "Arg out of range!");
@@ -132,95 +234,75 @@ class StdLibraryFunctionsChecker
// Subclasses may further refine the validation.
return checkSpecificValidity(FD);
}
- ArgNo getArgNo() const { return ArgN; }
- // Return those arguments that should be tracked when we report a bug. By
- // default it is the argument that is constrained, however, in some special
- // cases we need to track other arguments as well. E.g. a buffer size might
- // be encoded in another argument.
- virtual std::vector<ArgNo> getArgsToTrack() const { return {ArgN}; }
-
- virtual StringRef getName() const = 0;
-
- // Give a description that explains the constraint to the user. Used when
- // the bug is reported.
- virtual std::string describe(ProgramStateRef State,
- const Summary &Summary) const {
- // There are some descendant classes that are not used as argument
- // constraints, e.g. ComparisonConstraint. In that case we can safely
- // ignore the implementation of this function.
- llvm_unreachable("Not implemented");
- }
+ /// Return the argument number (may be placeholder for "return value").
+ ArgNo getArgNo() const { return ArgN; }
protected:
- ArgNo ArgN; // Argument to which we apply the constraint.
-
- /// Do polymorphic sanity check on the constraint.
+ /// Argument to which to apply the constraint. It can be a real argument of
+ /// the function to check, or a special value to indicate the return value
+ /// of the function.
+ /// Every constraint is assigned to one main argument, even if other
+ /// arguments are involved.
+ ArgNo ArgN;
+
+ /// Do constraint-specific validation check.
virtual bool checkSpecificValidity(const FunctionDecl *FD) const {
return true;
}
};
- /// Given a range, should the argument stay inside or outside this range?
- enum RangeKind { OutOfRange, WithinRange };
-
- /// Encapsulates a range on a single symbol.
+ /// Check if a single argument falls into a specific "range".
+ /// A range is formed as a set of intervals.
+ /// E.g. \code {['A', 'Z'], ['a', 'z'], ['_', '_']} \endcode
+ /// The intervals are closed intervals that contain one or more values.
+ ///
+ /// The default constructed RangeConstraint has an empty range, applying
+ /// such constraint does not involve any assumptions, thus the State remains
+ /// unchanged. This is meaningful, if the range is dependent on a looked up
+ /// type (e.g. [0, Socklen_tMax]). If the type is not found, then the range
+ /// is default initialized to be empty.
class RangeConstraint : public ValueConstraint {
+ /// The constraint can be specified by allowing or disallowing the range.
+ /// WithinRange indicates allowing the range, OutOfRange indicates
+ /// disallowing it (allowing the complementary range).
RangeKind Kind;
- // A range is formed as a set of intervals (sub-ranges).
- // E.g. {['A', 'Z'], ['a', 'z']}
- //
- // The default constructed RangeConstraint has an empty range set, applying
- // such constraint does not involve any assumptions, thus the State remains
- // unchanged. This is meaningful, if the range is dependent on a looked up
- // type (e.g. [0, Socklen_tMax]). If the type is not found, then the range
- // is default initialized to be empty.
+
+ /// A set of intervals.
IntRangeVector Ranges;
- public:
- StringRef getName() const override { return "Range"; }
- RangeConstraint(ArgNo ArgN, RangeKind Kind, const IntRangeVector &Ranges)
- : ValueConstraint(ArgN), Kind(Kind), Ranges(Ranges) {}
+ /// A textual description of this constraint for the specific case where the
+ /// constraint is used. If empty a generated description will be used that
+ /// is built from the range of the constraint.
+ StringRef Description;
- std::string describe(ProgramStateRef State,
- const Summary &Summary) const override;
+ public:
+ RangeConstraint(ArgNo ArgN, RangeKind Kind, const IntRangeVector &Ranges,
+ StringRef Desc = "")
+ : ValueConstraint(ArgN), Kind(Kind), Ranges(Ranges), Description(Desc) {
+ }
const IntRangeVector &getRanges() const { return Ranges; }
- private:
- ProgramStateRef applyAsOutOfRange(ProgramStateRef State,
- const CallEvent &Call,
- const Summary &Summary) const;
- ProgramStateRef applyAsWithinRange(ProgramStateRef State,
- const CallEvent &Call,
- const Summary &Summary) const;
-
- public:
ProgramStateRef apply(ProgramStateRef State, const CallEvent &Call,
const Summary &Summary,
- CheckerContext &C) const override {
- switch (Kind) {
- case OutOfRange:
- return applyAsOutOfRange(State, Call, Summary);
- case WithinRange:
- return applyAsWithinRange(State, Call, Summary);
- }
- llvm_unreachable("Unknown range kind!");
- }
+ CheckerContext &C) const override;
+
+ void describe(DescriptionKind DK, const CallEvent &Call,
+ ProgramStateRef State, const Summary &Summary,
+ llvm::raw_ostream &Out) const override;
+
+ bool describeArgumentValue(const CallEvent &Call, ProgramStateRef State,
+ const Summary &Summary,
+ llvm::raw_ostream &Out) const override;
ValueConstraintPtr negate() const override {
RangeConstraint Tmp(*this);
- switch (Kind) {
- case OutOfRange:
- Tmp.Kind = WithinRange;
- break;
- case WithinRange:
- Tmp.Kind = OutOfRange;
- break;
- }
+ Tmp.Kind = negateKind(Kind);
return std::make_shared<RangeConstraint>(Tmp);
}
+ protected:
bool checkSpecificValidity(const FunctionDecl *FD) const override {
const bool ValidArg =
getArgType(FD, ArgN)->isIntegralType(FD->getASTContext());
@@ -228,14 +310,52 @@ class StdLibraryFunctionsChecker
"This constraint should be applied on an integral type");
return ValidArg;
}
+
+ private:
+ /// A callback function that is used when iterating over the range
+ /// intervals. It gets the begin and end (inclusive) of one interval.
+ /// This is used to make any kind of task possible that needs an iteration
+ /// over the intervals.
+ using RangeApplyFunction =
+ std::function<bool(const llvm::APSInt &Min, const llvm::APSInt &Max)>;
+
+ /// Call a function on the intervals of the range.
+ /// The function is called with all intervals in the range.
+ void applyOnWithinRange(BasicValueFactory &BVF, QualType ArgT,
+ const RangeApplyFunction &F) const;
+ /// Call a function on all intervals in the complementary range.
+ /// The function is called with all intervals that fall out of the range.
+ /// E.g. consider an interval list [A, B] and [C, D]
+ /// \code
+ /// -------+--------+------------------+------------+----------->
+ /// A B C D
+ /// \endcode
+ /// We get the ranges [-inf, A - 1], [D + 1, +inf], [B + 1, C - 1].
+ /// The \p ArgT is used to determine the min and max of the type that is
+ /// used as "-inf" and "+inf".
+ void applyOnOutOfRange(BasicValueFactory &BVF, QualType ArgT,
+ const RangeApplyFunction &F) const;
+ /// Call a function on the intervals of the range or the complementary
+ /// range.
+ void applyOnRange(RangeKind Kind, BasicValueFactory &BVF, QualType ArgT,
+ const RangeApplyFunction &F) const {
+ switch (Kind) {
+ case OutOfRange:
+ applyOnOutOfRange(BVF, ArgT, F);
+ break;
+ case WithinRange:
+ applyOnWithinRange(BVF, ArgT, F);
+ break;
+ };
+ }
};
+ /// Check relation of an argument to another.
class ComparisonConstraint : public ValueConstraint {
BinaryOperator::Opcode Opcode;
ArgNo OtherArgN;
public:
- virtual StringRef getName() const override { return "Comparison"; };
ComparisonConstraint(ArgNo ArgN, BinaryOperator::Opcode Opcode,
ArgNo OtherArgN)
: ValueConstraint(ArgN), Opcode(Opcode), OtherArgN(OtherArgN) {}
@@ -246,28 +366,27 @@ class StdLibraryFunctionsChecker
CheckerContext &C) const override;
};
+ /// Check null or non-null-ness of an argument that is of pointer type.
class NotNullConstraint : public ValueConstraint {
using ValueConstraint::ValueConstraint;
// This variable has a role when we negate the constraint.
bool CannotBeNull = true;
public:
- std::string describe(ProgramStateRef State,
- const Summary &Summary) const override;
- StringRef getName() const override { return "NonNull"; }
+ NotNullConstraint(ArgNo ArgN, bool CannotBeNull = true)
+ : ValueConstraint(ArgN), CannotBeNull(CannotBeNull) {}
+
ProgramStateRef apply(ProgramStateRef State, const CallEvent &Call,
const Summary &Summary,
- CheckerContext &C) const override {
- SVal V = getArgSVal(Call, getArgNo());
- if (V.isUndef())
- return State;
+ CheckerContext &C) const override;
- DefinedOrUnknownSVal L = V.castAs<DefinedOrUnknownSVal>();
- if (!L.getAs<Loc>())
- return State;
+ void describe(DescriptionKind DK, const CallEvent &Call,
+ ProgramStateRef State, const Summary &Summary,
+ llvm::raw_ostream &Out) const override;
- return State->assume(L, CannotBeNull);
- }
+ bool describeArgumentValue(const CallEvent &Call, ProgramStateRef State,
+ const Summary &Summary,
+ llvm::raw_ostream &Out) const override;
ValueConstraintPtr negate() const override {
NotNullConstraint Tmp(*this);
@@ -275,6 +394,54 @@ class StdLibraryFunctionsChecker
return std::make_shared<NotNullConstraint>(Tmp);
}
+ protected:
+ bool checkSpecificValidity(const FunctionDecl *FD) const override {
+ const bool ValidArg = getArgType(FD, ArgN)->isPointerType();
+ assert(ValidArg &&
+ "This constraint should be applied only on a pointer type");
+ return ValidArg;
+ }
+ };
+
+ /// Check null or non-null-ness of an argument that is of pointer type.
+ /// The argument is meant to be a buffer that has a size constraint, and it
+ /// is allowed to have a NULL value if the size is 0. The size can depend on
+ /// 1 or 2 additional arguments, if one of these is 0 the buffer is allowed to
+ /// be NULL. This is useful for functions like `fread` which have this special
+ /// property.
+ class NotNullBufferConstraint : public ValueConstraint {
+ using ValueConstraint::ValueConstraint;
+ ArgNo SizeArg1N;
+ std::optional<ArgNo> SizeArg2N;
+ // This variable has a role when we negate the constraint.
+ bool CannotBeNull = true;
+
+ public:
+ NotNullBufferConstraint(ArgNo ArgN, ArgNo SizeArg1N,
+ std::optional<ArgNo> SizeArg2N,
+ bool CannotBeNull = true)
+ : ValueConstraint(ArgN), SizeArg1N(SizeArg1N), SizeArg2N(SizeArg2N),
+ CannotBeNull(CannotBeNull) {}
+
+ ProgramStateRef apply(ProgramStateRef State, const CallEvent &Call,
+ const Summary &Summary,
+ CheckerContext &C) const override;
+
+ void describe(DescriptionKind DK, const CallEvent &Call,
+ ProgramStateRef State, const Summary &Summary,
+ llvm::raw_ostream &Out) const override;
+
+ bool describeArgumentValue(const CallEvent &Call, ProgramStateRef State,
+ const Summary &Summary,
+ llvm::raw_ostream &Out) const override;
+
+ ValueConstraintPtr negate() const override {
+ NotNullBufferConstraint Tmp(*this);
+ Tmp.CannotBeNull = !this->CannotBeNull;
+ return std::make_shared<NotNullBufferConstraint>(Tmp);
+ }
+
+ protected:
bool checkSpecificValidity(const FunctionDecl *FD) const override {
const bool ValidArg = getArgType(FD, ArgN)->isPointerType();
assert(ValidArg &&
@@ -295,18 +462,17 @@ class StdLibraryFunctionsChecker
// // Here, ptr is the buffer, and its minimum size is `size * nmemb`.
class BufferSizeConstraint : public ValueConstraint {
// The concrete value which is the minimum size for the buffer.
- llvm::Optional<llvm::APSInt> ConcreteSize;
+ std::optional<llvm::APSInt> ConcreteSize;
// The argument which holds the size of the buffer.
- llvm::Optional<ArgNo> SizeArgN;
+ std::optional<ArgNo> SizeArgN;
// The argument which is a multiplier to size. This is set in case of
// `fread` like functions where the size is computed as a multiplication of
// two arguments.
- llvm::Optional<ArgNo> SizeMultiplierArgN;
+ std::optional<ArgNo> SizeMultiplierArgN;
// The operator we use in apply. This is negated in negate().
BinaryOperator::Opcode Op = BO_LE;
public:
- StringRef getName() const override { return "BufferSize"; }
BufferSizeConstraint(ArgNo Buffer, llvm::APSInt BufMinSize)
: ValueConstraint(Buffer), ConcreteSize(BufMinSize) {}
BufferSizeConstraint(ArgNo Buffer, ArgNo BufSize)
@@ -315,6 +481,18 @@ class StdLibraryFunctionsChecker
: ValueConstraint(Buffer), SizeArgN(BufSize),
SizeMultiplierArgN(BufSizeMultiplier) {}
+ ProgramStateRef apply(ProgramStateRef State, const CallEvent &Call,
+ const Summary &Summary,
+ CheckerContext &C) const override;
+
+ void describe(DescriptionKind DK, const CallEvent &Call,
+ ProgramStateRef State, const Summary &Summary,
+ llvm::raw_ostream &Out) const override;
+
+ bool describeArgumentValue(const CallEvent &Call, ProgramStateRef State,
+ const Summary &Summary,
+ llvm::raw_ostream &Out) const override;
+
std::vector<ArgNo> getArgsToTrack() const override {
std::vector<ArgNo> Result{ArgN};
if (SizeArgN)
@@ -324,57 +502,13 @@ class StdLibraryFunctionsChecker
return Result;
}
- std::string describe(ProgramStateRef State,
- const Summary &Summary) const override;
-
- ProgramStateRef apply(ProgramStateRef State, const CallEvent &Call,
- const Summary &Summary,
- CheckerContext &C) const override {
- SValBuilder &SvalBuilder = C.getSValBuilder();
- // The buffer argument.
- SVal BufV = getArgSVal(Call, getArgNo());
-
- // Get the size constraint.
- const SVal SizeV = [this, &State, &Call, &Summary, &SvalBuilder]() {
- if (ConcreteSize) {
- return SVal(SvalBuilder.makeIntVal(*ConcreteSize));
- }
- assert(SizeArgN && "The constraint must be either a concrete value or "
- "encoded in an argument.");
- // The size argument.
- SVal SizeV = getArgSVal(Call, *SizeArgN);
- // Multiply with another argument if given.
- if (SizeMultiplierArgN) {
- SVal SizeMulV = getArgSVal(Call, *SizeMultiplierArgN);
- SizeV = SvalBuilder.evalBinOp(State, BO_Mul, SizeV, SizeMulV,
- Summary.getArgType(*SizeArgN));
- }
- return SizeV;
- }();
-
- // The dynamic size of the buffer argument, got from the analyzer engine.
- SVal BufDynSize = getDynamicExtentWithOffset(State, BufV);
-
- SVal Feasible = SvalBuilder.evalBinOp(State, Op, SizeV, BufDynSize,
- SvalBuilder.getContext().BoolTy);
- if (auto F = Feasible.getAs<DefinedOrUnknownSVal>())
- return State->assume(*F, true);
-
- // We can get here only if the size argument or the dynamic size is
- // undefined. But the dynamic size should never be undefined, only
- // unknown. So, here, the size of the argument is undefined, i.e. we
- // cannot apply the constraint. Actually, other checkers like
- // CallAndMessage should catch this situation earlier, because we call a
- // function with an uninitialized argument.
- llvm_unreachable("Size argument or the dynamic size is Undefined");
- }
-
ValueConstraintPtr negate() const override {
BufferSizeConstraint Tmp(*this);
Tmp.Op = BinaryOperator::negateComparisonOp(Op);
return std::make_shared<BufferSizeConstraint>(Tmp);
}
+ protected:
bool checkSpecificValidity(const FunctionDecl *FD) const override {
const bool ValidArg = getArgType(FD, ArgN)->isPointerType();
assert(ValidArg &&
@@ -384,10 +518,162 @@ class StdLibraryFunctionsChecker
};
/// The complete list of constraints that defines a single branch.
- typedef std::vector<ValueConstraintPtr> ConstraintSet;
+ using ConstraintSet = std::vector<ValueConstraintPtr>;
+
+ /// Define how a function affects the system variable 'errno'.
+ /// This works together with the \c ErrnoModeling and \c ErrnoChecker classes.
+ /// Currently 3 use cases exist: success, failure, irrelevant.
+ /// In the future the failure case can be customized to set \c errno to a
+ /// more specific constraint (for example > 0), or new case can be added
+ /// for functions which require check of \c errno in both success and failure
+ /// case.
+ class ErrnoConstraintBase {
+ public:
+ /// Apply specific state changes related to the errno variable.
+ virtual ProgramStateRef apply(ProgramStateRef State, const CallEvent &Call,
+ const Summary &Summary,
+ CheckerContext &C) const = 0;
+ /// Get a description about what happens with 'errno' here and how it causes
+ /// a later bug report created by ErrnoChecker.
+ /// Empty return value means that 'errno' related bug may not happen from
+ /// the current analyzed function.
+ virtual const std::string describe(CheckerContext &C) const { return ""; }
+
+ virtual ~ErrnoConstraintBase() {}
+
+ protected:
+ ErrnoConstraintBase() = default;
+
+ /// This is used for conjure symbol for errno to differentiate from the
+ /// original call expression (same expression is used for the errno symbol).
+ static int Tag;
+ };
+
+ /// Reset errno constraints to irrelevant.
+ /// This is applicable to functions that may change 'errno' and are not
+ /// modeled elsewhere.
+ class ResetErrnoConstraint : public ErrnoConstraintBase {
+ public:
+ ProgramStateRef apply(ProgramStateRef State, const CallEvent &Call,
+ const Summary &Summary,
+ CheckerContext &C) const override {
+ return errno_modeling::setErrnoState(State, errno_modeling::Irrelevant);
+ }
+ };
+
+ /// Do not change errno constraints.
+ /// This is applicable to functions that are modeled in another checker
+ /// and the already set errno constraints should not be changed in the
+ /// post-call event.
+ class NoErrnoConstraint : public ErrnoConstraintBase {
+ public:
+ ProgramStateRef apply(ProgramStateRef State, const CallEvent &Call,
+ const Summary &Summary,
+ CheckerContext &C) const override {
+ return State;
+ }
+ };
+
+ /// Set errno constraint at failure cases of standard functions.
+ /// Failure case: 'errno' becomes not equal to 0 and may or may not be checked
+ /// by the program. \c ErrnoChecker does not emit a bug report after such a
+ /// function call.
+ class FailureErrnoConstraint : public ErrnoConstraintBase {
+ public:
+ ProgramStateRef apply(ProgramStateRef State, const CallEvent &Call,
+ const Summary &Summary,
+ CheckerContext &C) const override {
+ SValBuilder &SVB = C.getSValBuilder();
+ NonLoc ErrnoSVal =
+ SVB.conjureSymbolVal(&Tag, Call.getOriginExpr(),
+ C.getLocationContext(), C.getASTContext().IntTy,
+ C.blockCount())
+ .castAs<NonLoc>();
+ return errno_modeling::setErrnoForStdFailure(State, C, ErrnoSVal);
+ }
+ };
+
+ /// Set errno constraint at success cases of standard functions.
+ /// Success case: 'errno' is not allowed to be used because the value is
+ /// undefined after successful call.
+ /// \c ErrnoChecker can emit bug report after such a function call if errno
+ /// is used.
+ class SuccessErrnoConstraint : public ErrnoConstraintBase {
+ public:
+ ProgramStateRef apply(ProgramStateRef State, const CallEvent &Call,
+ const Summary &Summary,
+ CheckerContext &C) const override {
+ return errno_modeling::setErrnoForStdSuccess(State, C);
+ }
+
+ const std::string describe(CheckerContext &C) const override {
+ return "'errno' becomes undefined after the call";
+ }
+ };
+
+ /// Set errno constraint at functions that indicate failure only with 'errno'.
+ /// In this case 'errno' is required to be observed.
+ /// \c ErrnoChecker can emit bug report after such a function call if errno
+ /// is overwritten without a read before.
+ class ErrnoMustBeCheckedConstraint : public ErrnoConstraintBase {
+ public:
+ ProgramStateRef apply(ProgramStateRef State, const CallEvent &Call,
+ const Summary &Summary,
+ CheckerContext &C) const override {
+ return errno_modeling::setErrnoStdMustBeChecked(State, C,
+ Call.getOriginExpr());
+ }
+
+ const std::string describe(CheckerContext &C) const override {
+ return "reading 'errno' is required to find out if the call has failed";
+ }
+ };
+
+ /// A single branch of a function summary.
+ ///
+ /// A branch is defined by a series of constraints - "assumptions" -
+ /// that together form a single possible outcome of invoking the function.
+ /// When static analyzer considers a branch, it tries to introduce
+ /// a child node in the Exploded Graph. The child node has to include
+ /// constraints that define the branch. If the constraints contradict
+ /// existing constraints in the state, the node is not created and the branch
+ /// is dropped; otherwise it's queued for future exploration.
+ /// The branch is accompanied by a note text that may be displayed
+ /// to the user when a bug is found on a path that takes this branch.
+ ///
+ /// For example, consider the branches in `isalpha(x)`:
+ /// Branch 1)
+ /// x is in range ['A', 'Z'] or in ['a', 'z']
+ /// then the return value is not 0. (I.e. out-of-range [0, 0])
+ /// and the note may say "Assuming the character is alphabetical"
+ /// Branch 2)
+ /// x is out-of-range ['A', 'Z'] and out-of-range ['a', 'z']
+ /// then the return value is 0
+ /// and the note may say "Assuming the character is non-alphabetical".
+ class SummaryCase {
+ ConstraintSet Constraints;
+ const ErrnoConstraintBase &ErrnoConstraint;
+ StringRef Note;
+
+ public:
+ SummaryCase(ConstraintSet &&Constraints, const ErrnoConstraintBase &ErrnoC,
+ StringRef Note)
+ : Constraints(std::move(Constraints)), ErrnoConstraint(ErrnoC),
+ Note(Note) {}
+
+ SummaryCase(const ConstraintSet &Constraints,
+ const ErrnoConstraintBase &ErrnoC, StringRef Note)
+ : Constraints(Constraints), ErrnoConstraint(ErrnoC), Note(Note) {}
+
+ const ConstraintSet &getConstraints() const { return Constraints; }
+ const ErrnoConstraintBase &getErrnoConstraint() const {
+ return ErrnoConstraint;
+ }
+ StringRef getNote() const { return Note; }
+ };
- using ArgTypes = std::vector<Optional<QualType>>;
- using RetType = Optional<QualType>;
+ using ArgTypes = std::vector<std::optional<QualType>>;
+ using RetType = std::optional<QualType>;
// A placeholder type, we use it whenever we do not care about the concrete
// type in a Signature.
@@ -409,7 +695,7 @@ class StdLibraryFunctionsChecker
// Construct a signature from optional types. If any of the optional types
// are not set then the signature will be invalid.
Signature(ArgTypes ArgTys, RetType RetTy) {
- for (Optional<QualType> Arg : ArgTys) {
+ for (std::optional<QualType> Arg : ArgTys) {
if (!Arg) {
Invalid = true;
return;
@@ -451,23 +737,12 @@ class StdLibraryFunctionsChecker
return T;
}
- using Cases = std::vector<ConstraintSet>;
+ using SummaryCases = std::vector<SummaryCase>;
/// A summary includes information about
/// * function prototype (signature)
/// * approach to invalidation,
- /// * a list of branches - a list of list of ranges -
- /// A branch represents a path in the exploded graph of a function (which
- /// is a tree). So, a branch is a series of assumptions. In other words,
- /// branches represent split states and additional assumptions on top of
- /// the splitting assumption.
- /// For example, consider the branches in `isalpha(x)`
- /// Branch 1)
- /// x is in range ['A', 'Z'] or in ['a', 'z']
- /// then the return value is not 0. (I.e. out-of-range [0, 0])
- /// Branch 2)
- /// x is out-of-range ['A', 'Z'] and out-of-range ['a', 'z']
- /// then the return value is 0.
+ /// * a list of branches - so, a list of list of ranges,
/// * a list of argument constraints, that must be true on every branch.
/// If these constraints are not satisfied that means a fatal error
/// usually resulting in undefined behaviour.
@@ -482,7 +757,7 @@ class StdLibraryFunctionsChecker
/// signature is matched.
class Summary {
const InvalidationKind InvalidationKd;
- Cases CaseConstraints;
+ SummaryCases Cases;
ConstraintSet ArgConstraints;
// The function to which the summary applies. This is set after lookup and
@@ -492,12 +767,14 @@ class StdLibraryFunctionsChecker
public:
Summary(InvalidationKind InvalidationKd) : InvalidationKd(InvalidationKd) {}
- Summary &Case(ConstraintSet &&CS) {
- CaseConstraints.push_back(std::move(CS));
+ Summary &Case(ConstraintSet &&CS, const ErrnoConstraintBase &ErrnoC,
+ StringRef Note = "") {
+ Cases.push_back(SummaryCase(std::move(CS), ErrnoC, Note));
return *this;
}
- Summary &Case(const ConstraintSet &CS) {
- CaseConstraints.push_back(CS);
+ Summary &Case(const ConstraintSet &CS, const ErrnoConstraintBase &ErrnoC,
+ StringRef Note = "") {
+ Cases.push_back(SummaryCase(CS, ErrnoC, Note));
return *this;
}
Summary &ArgConstraint(ValueConstraintPtr VC) {
@@ -508,7 +785,7 @@ class StdLibraryFunctionsChecker
}
InvalidationKind getInvalidationKd() const { return InvalidationKd; }
- const Cases &getCaseConstraints() const { return CaseConstraints; }
+ const SummaryCases &getCases() const { return Cases; }
const ConstraintSet &getArgConstraints() const { return ArgConstraints; }
QualType getArgType(ArgNo ArgN) const {
@@ -527,11 +804,11 @@ class StdLibraryFunctionsChecker
}
private:
- // Once we know the exact type of the function then do sanity check on all
- // the given constraints.
+ // Once we know the exact type of the function then do validation check on
+ // all the given constraints.
bool validateByConstraints(const FunctionDecl *FD) const {
- for (const ConstraintSet &Case : CaseConstraints)
- for (const ValueConstraintPtr &Constraint : Case)
+ for (const SummaryCase &Case : Cases)
+ for (const ValueConstraintPtr &Constraint : Case.getConstraints())
if (!Constraint->checkValidity(FD))
return false;
for (const ValueConstraintPtr &Constraint : ArgConstraints)
@@ -546,236 +823,310 @@ class StdLibraryFunctionsChecker
using FunctionSummaryMapType = llvm::DenseMap<const FunctionDecl *, Summary>;
mutable FunctionSummaryMapType FunctionSummaryMap;
- mutable std::unique_ptr<BugType> BT_InvalidArg;
+ const BugType BT_InvalidArg{this, "Function call with invalid argument"};
mutable bool SummariesInitialized = false;
static SVal getArgSVal(const CallEvent &Call, ArgNo ArgN) {
return ArgN == Ret ? Call.getReturnValue() : Call.getArgSVal(ArgN);
}
+ static std::string getFunctionName(const CallEvent &Call) {
+ assert(Call.getDecl() &&
+ "Call was found by a summary, should have declaration");
+ return cast<NamedDecl>(Call.getDecl())->getNameAsString();
+ }
public:
void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
bool evalCall(const CallEvent &Call, CheckerContext &C) const;
- enum CheckKind {
- CK_StdCLibraryFunctionArgsChecker,
- CK_StdCLibraryFunctionsTesterChecker,
- CK_NumCheckKinds
- };
- DefaultBool ChecksEnabled[CK_NumCheckKinds];
- CheckerNameRef CheckNames[CK_NumCheckKinds];
+ CheckerNameRef CheckName;
+ bool AddTestFunctions = false;
bool DisplayLoadedSummaries = false;
bool ModelPOSIX = false;
+ bool ShouldAssumeControlledEnvironment = false;
private:
- Optional<Summary> findFunctionSummary(const FunctionDecl *FD,
- CheckerContext &C) const;
- Optional<Summary> findFunctionSummary(const CallEvent &Call,
- CheckerContext &C) const;
+ std::optional<Summary> findFunctionSummary(const FunctionDecl *FD,
+ CheckerContext &C) const;
+ std::optional<Summary> findFunctionSummary(const CallEvent &Call,
+ CheckerContext &C) const;
void initFunctionSummaries(CheckerContext &C) const;
void reportBug(const CallEvent &Call, ExplodedNode *N,
- const ValueConstraint *VC, const Summary &Summary,
- CheckerContext &C) const {
- if (!ChecksEnabled[CK_StdCLibraryFunctionArgsChecker])
- return;
- std::string Msg =
- (Twine("Function argument constraint is not satisfied, constraint: ") +
- VC->getName().data())
- .str();
- if (!BT_InvalidArg)
- BT_InvalidArg = std::make_unique<BugType>(
- CheckNames[CK_StdCLibraryFunctionArgsChecker],
- "Unsatisfied argument constraints", categories::LogicError);
- auto R = std::make_unique<PathSensitiveBugReport>(*BT_InvalidArg, Msg, N);
-
- for (ArgNo ArgN : VC->getArgsToTrack())
+ const ValueConstraint *VC, const ValueConstraint *NegatedVC,
+ const Summary &Summary, CheckerContext &C) const {
+ assert(Call.getDecl() &&
+ "Function found in summary must have a declaration available");
+ SmallString<256> Msg;
+ llvm::raw_svector_ostream MsgOs(Msg);
+
+ MsgOs << "The ";
+ printArgDesc(VC->getArgNo(), MsgOs);
+ MsgOs << " to '" << getFunctionName(Call) << "' ";
+ bool ValuesPrinted =
+ NegatedVC->describeArgumentValue(Call, N->getState(), Summary, MsgOs);
+ if (ValuesPrinted)
+ MsgOs << " but ";
+ else
+ MsgOs << "is out of the accepted range; It ";
+ VC->describe(ValueConstraint::Violation, Call, C.getState(), Summary,
+ MsgOs);
+ Msg[0] = toupper(Msg[0]);
+ auto R = std::make_unique<PathSensitiveBugReport>(BT_InvalidArg, Msg, N);
+
+ for (ArgNo ArgN : VC->getArgsToTrack()) {
bugreporter::trackExpressionValue(N, Call.getArgExpr(ArgN), *R);
-
- // Highlight the range of the argument that was violated.
- R->addRange(Call.getArgSourceRange(VC->getArgNo()));
-
- // Describe the argument constraint in a note.
- R->addNote(VC->describe(C.getState(), Summary), R->getLocation(),
- Call.getArgSourceRange(VC->getArgNo()));
+ R->markInteresting(Call.getArgSVal(ArgN));
+ // All tracked arguments are important, highlight them.
+ R->addRange(Call.getArgSourceRange(ArgN));
+ }
C.emitReport(std::move(R));
}
+
+ /// These are the errno constraints that can be passed to summary cases.
+ /// One of these should fit for a single summary case.
+ /// Usually if a failure return value exists for function, that function
+ /// needs different cases for success and failure with different errno
+ /// constraints (and different return value constraints).
+ const NoErrnoConstraint ErrnoUnchanged{};
+ const ResetErrnoConstraint ErrnoIrrelevant{};
+ const ErrnoMustBeCheckedConstraint ErrnoMustBeChecked{};
+ const SuccessErrnoConstraint ErrnoMustNotBeChecked{};
+ const FailureErrnoConstraint ErrnoNEZeroIrrelevant{};
};
+int StdLibraryFunctionsChecker::ErrnoConstraintBase::Tag = 0;
+
const StdLibraryFunctionsChecker::ArgNo StdLibraryFunctionsChecker::Ret =
std::numeric_limits<ArgNo>::max();
-} // end of anonymous namespace
-
static BasicValueFactory &getBVF(ProgramStateRef State) {
ProgramStateManager &Mgr = State->getStateManager();
SValBuilder &SVB = Mgr.getSValBuilder();
return SVB.getBasicValueFactory();
}
-std::string StdLibraryFunctionsChecker::NotNullConstraint::describe(
- ProgramStateRef State, const Summary &Summary) const {
- SmallString<48> Result;
- Result += "The ";
- Result += getArgDesc(ArgN);
- Result += " should not be NULL";
- return Result.c_str();
-}
+} // end of anonymous namespace
-std::string StdLibraryFunctionsChecker::RangeConstraint::describe(
- ProgramStateRef State, const Summary &Summary) const {
+void StdLibraryFunctionsChecker::printArgDesc(
+ StdLibraryFunctionsChecker::ArgNo ArgN, llvm::raw_ostream &Out) {
+ Out << std::to_string(ArgN + 1);
+ Out << llvm::getOrdinalSuffix(ArgN + 1);
+ Out << " argument";
+}
- BasicValueFactory &BVF = getBVF(State);
+void StdLibraryFunctionsChecker::printArgValueInfo(ArgNo ArgN,
+ ProgramStateRef State,
+ const CallEvent &Call,
+ llvm::raw_ostream &Out) {
+ if (const llvm::APSInt *Val =
+ State->getStateManager().getSValBuilder().getKnownValue(
+ State, getArgSVal(Call, ArgN)))
+ Out << " (which is " << *Val << ")";
+}
- QualType T = Summary.getArgType(getArgNo());
- SmallString<48> Result;
- Result += "The ";
- Result += getArgDesc(ArgN);
- Result += " should be ";
-
- // Range kind as a string.
- Kind == OutOfRange ? Result += "out of" : Result += "within";
-
- // Get the range values as a string.
- Result += " the range ";
- if (Ranges.size() > 1)
- Result += "[";
- unsigned I = Ranges.size();
- for (const std::pair<RangeInt, RangeInt> &R : Ranges) {
- Result += "[";
- const llvm::APSInt &Min = BVF.getValue(R.first, T);
- const llvm::APSInt &Max = BVF.getValue(R.second, T);
- Min.toString(Result);
- Result += ", ";
- Max.toString(Result);
- Result += "]";
- if (--I > 0)
- Result += ", ";
+void StdLibraryFunctionsChecker::appendInsideRangeDesc(llvm::APSInt RMin,
+ llvm::APSInt RMax,
+ QualType ArgT,
+ BasicValueFactory &BVF,
+ llvm::raw_ostream &Out) {
+ if (RMin.isZero() && RMax.isZero())
+ Out << "zero";
+ else if (RMin == RMax)
+ Out << RMin;
+ else if (RMin == BVF.getMinValue(ArgT)) {
+ if (RMax == -1)
+ Out << "< 0";
+ else
+ Out << "<= " << RMax;
+ } else if (RMax == BVF.getMaxValue(ArgT)) {
+ if (RMin.isOne())
+ Out << "> 0";
+ else
+ Out << ">= " << RMin;
+ } else if (RMin.isNegative() == RMax.isNegative() &&
+ RMin.getLimitedValue() == RMax.getLimitedValue() - 1) {
+ Out << RMin << " or " << RMax;
+ } else {
+ Out << "between " << RMin << " and " << RMax;
}
- if (Ranges.size() > 1)
- Result += "]";
-
- return Result.c_str();
}
-SmallString<8>
-StdLibraryFunctionsChecker::getArgDesc(StdLibraryFunctionsChecker::ArgNo ArgN) {
- SmallString<8> Result;
- Result += std::to_string(ArgN + 1);
- Result += llvm::getOrdinalSuffix(ArgN + 1);
- Result += " arg";
- return Result;
+void StdLibraryFunctionsChecker::appendOutOfRangeDesc(llvm::APSInt RMin,
+ llvm::APSInt RMax,
+ QualType ArgT,
+ BasicValueFactory &BVF,
+ llvm::raw_ostream &Out) {
+ if (RMin.isZero() && RMax.isZero())
+ Out << "nonzero";
+ else if (RMin == RMax) {
+ Out << "not equal to " << RMin;
+ } else if (RMin == BVF.getMinValue(ArgT)) {
+ if (RMax == -1)
+ Out << ">= 0";
+ else
+ Out << "> " << RMax;
+ } else if (RMax == BVF.getMaxValue(ArgT)) {
+ if (RMin.isOne())
+ Out << "<= 0";
+ else
+ Out << "< " << RMin;
+ } else if (RMin.isNegative() == RMax.isNegative() &&
+ RMin.getLimitedValue() == RMax.getLimitedValue() - 1) {
+ Out << "not " << RMin << " and not " << RMax;
+ } else {
+ Out << "not between " << RMin << " and " << RMax;
+ }
}
-std::string StdLibraryFunctionsChecker::BufferSizeConstraint::describe(
- ProgramStateRef State, const Summary &Summary) const {
- SmallString<96> Result;
- Result += "The size of the ";
- Result += getArgDesc(ArgN);
- Result += " should be equal to or less than the value of ";
- if (ConcreteSize) {
- ConcreteSize->toString(Result);
- } else if (SizeArgN) {
- Result += "the ";
- Result += getArgDesc(*SizeArgN);
- if (SizeMultiplierArgN) {
- Result += " times the ";
- Result += getArgDesc(*SizeMultiplierArgN);
- }
+void StdLibraryFunctionsChecker::RangeConstraint::applyOnWithinRange(
+ BasicValueFactory &BVF, QualType ArgT, const RangeApplyFunction &F) const {
+ if (Ranges.empty())
+ return;
+
+ for (auto [Start, End] : getRanges()) {
+ const llvm::APSInt &Min = BVF.getValue(Start, ArgT);
+ const llvm::APSInt &Max = BVF.getValue(End, ArgT);
+ assert(Min <= Max);
+ if (!F(Min, Max))
+ return;
}
- return Result.c_str();
}
-ProgramStateRef StdLibraryFunctionsChecker::RangeConstraint::applyAsOutOfRange(
- ProgramStateRef State, const CallEvent &Call,
- const Summary &Summary) const {
+void StdLibraryFunctionsChecker::RangeConstraint::applyOnOutOfRange(
+ BasicValueFactory &BVF, QualType ArgT, const RangeApplyFunction &F) const {
if (Ranges.empty())
- return State;
+ return;
- ProgramStateManager &Mgr = State->getStateManager();
- SValBuilder &SVB = Mgr.getSValBuilder();
- BasicValueFactory &BVF = SVB.getBasicValueFactory();
- ConstraintManager &CM = Mgr.getConstraintManager();
- QualType T = Summary.getArgType(getArgNo());
+ const IntRangeVector &R = getRanges();
+ size_t E = R.size();
+
+ const llvm::APSInt &MinusInf = BVF.getMinValue(ArgT);
+ const llvm::APSInt &PlusInf = BVF.getMaxValue(ArgT);
+
+ const llvm::APSInt &RangeLeft = BVF.getValue(R[0].first - 1ULL, ArgT);
+ const llvm::APSInt &RangeRight = BVF.getValue(R[E - 1].second + 1ULL, ArgT);
+
+ // Iterate over the "holes" between intervals.
+ for (size_t I = 1; I != E; ++I) {
+ const llvm::APSInt &Min = BVF.getValue(R[I - 1].second + 1ULL, ArgT);
+ const llvm::APSInt &Max = BVF.getValue(R[I].first - 1ULL, ArgT);
+ if (Min <= Max) {
+ if (!F(Min, Max))
+ return;
+ }
+ }
+ // Check the interval [T_MIN, min(R) - 1].
+ if (RangeLeft != PlusInf) {
+ assert(MinusInf <= RangeLeft);
+ if (!F(MinusInf, RangeLeft))
+ return;
+ }
+ // Check the interval [max(R) + 1, T_MAX],
+ if (RangeRight != MinusInf) {
+ assert(RangeRight <= PlusInf);
+ if (!F(RangeRight, PlusInf))
+ return;
+ }
+}
+
+ProgramStateRef StdLibraryFunctionsChecker::RangeConstraint::apply(
+ ProgramStateRef State, const CallEvent &Call, const Summary &Summary,
+ CheckerContext &C) const {
+ ConstraintManager &CM = C.getConstraintManager();
SVal V = getArgSVal(Call, getArgNo());
+ QualType T = Summary.getArgType(getArgNo());
if (auto N = V.getAs<NonLoc>()) {
- const IntRangeVector &R = getRanges();
- size_t E = R.size();
- for (size_t I = 0; I != E; ++I) {
- const llvm::APSInt &Min = BVF.getValue(R[I].first, T);
- const llvm::APSInt &Max = BVF.getValue(R[I].second, T);
- assert(Min <= Max);
+ auto ExcludeRangeFromArg = [&](const llvm::APSInt &Min,
+ const llvm::APSInt &Max) {
State = CM.assumeInclusiveRange(State, *N, Min, Max, false);
- if (!State)
- break;
- }
+ return static_cast<bool>(State);
+ };
+ // "OutOfRange R" is handled by excluding all ranges in R.
+ // "WithinRange R" is treated as "OutOfRange [T_MIN, T_MAX] \ R".
+ applyOnRange(negateKind(Kind), C.getSValBuilder().getBasicValueFactory(), T,
+ ExcludeRangeFromArg);
}
return State;
}
-ProgramStateRef StdLibraryFunctionsChecker::RangeConstraint::applyAsWithinRange(
- ProgramStateRef State, const CallEvent &Call,
- const Summary &Summary) const {
- if (Ranges.empty())
- return State;
+void StdLibraryFunctionsChecker::RangeConstraint::describe(
+ DescriptionKind DK, const CallEvent &Call, ProgramStateRef State,
+ const Summary &Summary, llvm::raw_ostream &Out) const {
+
+ BasicValueFactory &BVF = getBVF(State);
+ QualType T = Summary.getArgType(getArgNo());
+
+ Out << ((DK == Violation) ? "should be " : "is ");
+ if (!Description.empty()) {
+ Out << Description;
+ } else {
+ unsigned I = Ranges.size();
+ if (Kind == WithinRange) {
+ for (const std::pair<RangeInt, RangeInt> &R : Ranges) {
+ appendInsideRangeDesc(BVF.getValue(R.first, T),
+ BVF.getValue(R.second, T), T, BVF, Out);
+ if (--I > 0)
+ Out << " or ";
+ }
+ } else {
+ for (const std::pair<RangeInt, RangeInt> &R : Ranges) {
+ appendOutOfRangeDesc(BVF.getValue(R.first, T),
+ BVF.getValue(R.second, T), T, BVF, Out);
+ if (--I > 0)
+ Out << " and ";
+ }
+ }
+ }
+}
+
+bool StdLibraryFunctionsChecker::RangeConstraint::describeArgumentValue(
+ const CallEvent &Call, ProgramStateRef State, const Summary &Summary,
+ llvm::raw_ostream &Out) const {
+ unsigned int NRanges = 0;
+ bool HaveAllRanges = true;
ProgramStateManager &Mgr = State->getStateManager();
- SValBuilder &SVB = Mgr.getSValBuilder();
- BasicValueFactory &BVF = SVB.getBasicValueFactory();
+ BasicValueFactory &BVF = Mgr.getSValBuilder().getBasicValueFactory();
ConstraintManager &CM = Mgr.getConstraintManager();
- QualType T = Summary.getArgType(getArgNo());
SVal V = getArgSVal(Call, getArgNo());
- // "WithinRange R" is treated as "outside [T_MIN, T_MAX] \ R".
- // We cut off [T_MIN, min(R) - 1] and [max(R) + 1, T_MAX] if necessary,
- // and then cut away all holes in R one by one.
- //
- // E.g. consider a range list R as [A, B] and [C, D]
- // -------+--------+------------------+------------+----------->
- // A B C D
- // Then we assume that the value is not in [-inf, A - 1],
- // then not in [D + 1, +inf], then not in [B + 1, C - 1]
if (auto N = V.getAs<NonLoc>()) {
- const IntRangeVector &R = getRanges();
- size_t E = R.size();
-
- const llvm::APSInt &MinusInf = BVF.getMinValue(T);
- const llvm::APSInt &PlusInf = BVF.getMaxValue(T);
-
- const llvm::APSInt &Left = BVF.getValue(R[0].first - 1ULL, T);
- if (Left != PlusInf) {
- assert(MinusInf <= Left);
- State = CM.assumeInclusiveRange(State, *N, MinusInf, Left, false);
- if (!State)
- return nullptr;
- }
-
- const llvm::APSInt &Right = BVF.getValue(R[E - 1].second + 1ULL, T);
- if (Right != MinusInf) {
- assert(Right <= PlusInf);
- State = CM.assumeInclusiveRange(State, *N, Right, PlusInf, false);
- if (!State)
- return nullptr;
+ if (const llvm::APSInt *Int = N->getAsInteger()) {
+ Out << "is ";
+ Out << *Int;
+ return true;
}
-
- for (size_t I = 1; I != E; ++I) {
- const llvm::APSInt &Min = BVF.getValue(R[I - 1].second + 1ULL, T);
- const llvm::APSInt &Max = BVF.getValue(R[I].first - 1ULL, T);
- if (Min <= Max) {
- State = CM.assumeInclusiveRange(State, *N, Min, Max, false);
- if (!State)
- return nullptr;
+ QualType T = Summary.getArgType(getArgNo());
+ SmallString<128> MoreInfo;
+ llvm::raw_svector_ostream MoreInfoOs(MoreInfo);
+ auto ApplyF = [&](const llvm::APSInt &Min, const llvm::APSInt &Max) {
+ if (CM.assumeInclusiveRange(State, *N, Min, Max, true)) {
+ if (NRanges > 0)
+ MoreInfoOs << " or ";
+ appendInsideRangeDesc(Min, Max, T, BVF, MoreInfoOs);
+ ++NRanges;
+ } else {
+ HaveAllRanges = false;
}
+ return true;
+ };
+
+ applyOnRange(Kind, BVF, T, ApplyF);
+ assert(NRanges > 0);
+ if (!HaveAllRanges || NRanges == 1) {
+ Out << "is ";
+ Out << MoreInfo;
+ return true;
}
}
-
- return State;
+ return false;
}
ProgramStateRef StdLibraryFunctionsChecker::ComparisonConstraint::apply(
@@ -800,9 +1151,165 @@ ProgramStateRef StdLibraryFunctionsChecker::ComparisonConstraint::apply(
return State;
}
+ProgramStateRef StdLibraryFunctionsChecker::NotNullConstraint::apply(
+ ProgramStateRef State, const CallEvent &Call, const Summary &Summary,
+ CheckerContext &C) const {
+ SVal V = getArgSVal(Call, getArgNo());
+ if (V.isUndef())
+ return State;
+
+ DefinedOrUnknownSVal L = V.castAs<DefinedOrUnknownSVal>();
+ if (!isa<Loc>(L))
+ return State;
+
+ return State->assume(L, CannotBeNull);
+}
+
+void StdLibraryFunctionsChecker::NotNullConstraint::describe(
+ DescriptionKind DK, const CallEvent &Call, ProgramStateRef State,
+ const Summary &Summary, llvm::raw_ostream &Out) const {
+ assert(CannotBeNull &&
+ "Describe should not be used when the value must be NULL");
+ if (DK == Violation)
+ Out << "should not be NULL";
+ else
+ Out << "is not NULL";
+}
+
+bool StdLibraryFunctionsChecker::NotNullConstraint::describeArgumentValue(
+ const CallEvent &Call, ProgramStateRef State, const Summary &Summary,
+ llvm::raw_ostream &Out) const {
+ assert(!CannotBeNull && "This function is used when the value is NULL");
+ Out << "is NULL";
+ return true;
+}
+
+ProgramStateRef StdLibraryFunctionsChecker::NotNullBufferConstraint::apply(
+ ProgramStateRef State, const CallEvent &Call, const Summary &Summary,
+ CheckerContext &C) const {
+ SVal V = getArgSVal(Call, getArgNo());
+ if (V.isUndef())
+ return State;
+ DefinedOrUnknownSVal L = V.castAs<DefinedOrUnknownSVal>();
+ if (!isa<Loc>(L))
+ return State;
+
+ std::optional<DefinedOrUnknownSVal> SizeArg1 =
+ getArgSVal(Call, SizeArg1N).getAs<DefinedOrUnknownSVal>();
+ std::optional<DefinedOrUnknownSVal> SizeArg2;
+ if (SizeArg2N)
+ SizeArg2 = getArgSVal(Call, *SizeArg2N).getAs<DefinedOrUnknownSVal>();
+
+ auto IsArgZero = [State](std::optional<DefinedOrUnknownSVal> Val) {
+ if (!Val)
+ return false;
+ auto [IsNonNull, IsNull] = State->assume(*Val);
+ return IsNull && !IsNonNull;
+ };
+
+ if (IsArgZero(SizeArg1) || IsArgZero(SizeArg2))
+ return State;
+
+ return State->assume(L, CannotBeNull);
+}
+
+void StdLibraryFunctionsChecker::NotNullBufferConstraint::describe(
+ DescriptionKind DK, const CallEvent &Call, ProgramStateRef State,
+ const Summary &Summary, llvm::raw_ostream &Out) const {
+ assert(CannotBeNull &&
+ "Describe should not be used when the value must be NULL");
+ if (DK == Violation)
+ Out << "should not be NULL";
+ else
+ Out << "is not NULL";
+}
+
+bool StdLibraryFunctionsChecker::NotNullBufferConstraint::describeArgumentValue(
+ const CallEvent &Call, ProgramStateRef State, const Summary &Summary,
+ llvm::raw_ostream &Out) const {
+ assert(!CannotBeNull && "This function is used when the value is NULL");
+ Out << "is NULL";
+ return true;
+}
+
+ProgramStateRef StdLibraryFunctionsChecker::BufferSizeConstraint::apply(
+ ProgramStateRef State, const CallEvent &Call, const Summary &Summary,
+ CheckerContext &C) const {
+ SValBuilder &SvalBuilder = C.getSValBuilder();
+ // The buffer argument.
+ SVal BufV = getArgSVal(Call, getArgNo());
+
+ // Get the size constraint.
+ const SVal SizeV = [this, &State, &Call, &Summary, &SvalBuilder]() {
+ if (ConcreteSize) {
+ return SVal(SvalBuilder.makeIntVal(*ConcreteSize));
+ }
+ assert(SizeArgN && "The constraint must be either a concrete value or "
+ "encoded in an argument.");
+ // The size argument.
+ SVal SizeV = getArgSVal(Call, *SizeArgN);
+ // Multiply with another argument if given.
+ if (SizeMultiplierArgN) {
+ SVal SizeMulV = getArgSVal(Call, *SizeMultiplierArgN);
+ SizeV = SvalBuilder.evalBinOp(State, BO_Mul, SizeV, SizeMulV,
+ Summary.getArgType(*SizeArgN));
+ }
+ return SizeV;
+ }();
+
+ // The dynamic size of the buffer argument, got from the analyzer engine.
+ SVal BufDynSize = getDynamicExtentWithOffset(State, BufV);
+
+ SVal Feasible = SvalBuilder.evalBinOp(State, Op, SizeV, BufDynSize,
+ SvalBuilder.getContext().BoolTy);
+ if (auto F = Feasible.getAs<DefinedOrUnknownSVal>())
+ return State->assume(*F, true);
+
+ // We can get here only if the size argument or the dynamic size is
+ // undefined. But the dynamic size should never be undefined, only
+ // unknown. So, here, the size of the argument is undefined, i.e. we
+ // cannot apply the constraint. Actually, other checkers like
+ // CallAndMessage should catch this situation earlier, because we call a
+ // function with an uninitialized argument.
+ llvm_unreachable("Size argument or the dynamic size is Undefined");
+}
+
+void StdLibraryFunctionsChecker::BufferSizeConstraint::describe(
+ DescriptionKind DK, const CallEvent &Call, ProgramStateRef State,
+ const Summary &Summary, llvm::raw_ostream &Out) const {
+ Out << ((DK == Violation) ? "should be " : "is ");
+ Out << "a buffer with size equal to or greater than ";
+ if (ConcreteSize) {
+ Out << *ConcreteSize;
+ } else if (SizeArgN) {
+ Out << "the value of the ";
+ printArgDesc(*SizeArgN, Out);
+ printArgValueInfo(*SizeArgN, State, Call, Out);
+ if (SizeMultiplierArgN) {
+ Out << " times the ";
+ printArgDesc(*SizeMultiplierArgN, Out);
+ printArgValueInfo(*SizeMultiplierArgN, State, Call, Out);
+ }
+ }
+}
+
+bool StdLibraryFunctionsChecker::BufferSizeConstraint::describeArgumentValue(
+ const CallEvent &Call, ProgramStateRef State, const Summary &Summary,
+ llvm::raw_ostream &Out) const {
+ SVal BufV = getArgSVal(Call, getArgNo());
+ SVal BufDynSize = getDynamicExtentWithOffset(State, BufV);
+ if (const llvm::APSInt *Val =
+ State->getStateManager().getSValBuilder().getKnownValue(State,
+ BufDynSize)) {
+ Out << "is a buffer with size " << *Val;
+ return true;
+ }
+ return false;
+}
+
void StdLibraryFunctionsChecker::checkPreCall(const CallEvent &Call,
CheckerContext &C) const {
- Optional<Summary> FoundSummary = findFunctionSummary(Call, C);
+ std::optional<Summary> FoundSummary = findFunctionSummary(Call, C);
if (!FoundSummary)
return;
@@ -810,55 +1317,155 @@ void StdLibraryFunctionsChecker::checkPreCall(const CallEvent &Call,
ProgramStateRef State = C.getState();
ProgramStateRef NewState = State;
+ ExplodedNode *NewNode = C.getPredecessor();
for (const ValueConstraintPtr &Constraint : Summary.getArgConstraints()) {
+ ValueConstraintPtr NegatedConstraint = Constraint->negate();
ProgramStateRef SuccessSt = Constraint->apply(NewState, Call, Summary, C);
ProgramStateRef FailureSt =
- Constraint->negate()->apply(NewState, Call, Summary, C);
+ NegatedConstraint->apply(NewState, Call, Summary, C);
// The argument constraint is not satisfied.
if (FailureSt && !SuccessSt) {
- if (ExplodedNode *N = C.generateErrorNode(NewState))
- reportBug(Call, N, Constraint.get(), Summary, C);
+ if (ExplodedNode *N = C.generateErrorNode(State, NewNode))
+ reportBug(Call, N, Constraint.get(), NegatedConstraint.get(), Summary,
+ C);
break;
- } else {
- // We will apply the constraint even if we cannot reason about the
- // argument. This means both SuccessSt and FailureSt can be true. If we
- // weren't applying the constraint that would mean that symbolic
- // execution continues on a code whose behaviour is undefined.
- assert(SuccessSt);
- NewState = SuccessSt;
+ }
+ // We will apply the constraint even if we cannot reason about the
+ // argument. This means both SuccessSt and FailureSt can be true. If we
+ // weren't applying the constraint that would mean that symbolic
+ // execution continues on a code whose behaviour is undefined.
+ assert(SuccessSt);
+ NewState = SuccessSt;
+ if (NewState != State) {
+ SmallString<128> Msg;
+ llvm::raw_svector_ostream Os(Msg);
+ Os << "Assuming that the ";
+ printArgDesc(Constraint->getArgNo(), Os);
+ Os << " to '";
+ Os << getFunctionName(Call);
+ Os << "' ";
+ Constraint->describe(ValueConstraint::Assumption, Call, NewState, Summary,
+ Os);
+ const auto ArgSVal = Call.getArgSVal(Constraint->getArgNo());
+ NewNode = C.addTransition(
+ NewState, NewNode,
+ C.getNoteTag([Msg = std::move(Msg), ArgSVal](
+ PathSensitiveBugReport &BR, llvm::raw_ostream &OS) {
+ if (BR.isInteresting(ArgSVal))
+ OS << Msg;
+ }));
}
}
- if (NewState && NewState != State)
- C.addTransition(NewState);
}
void StdLibraryFunctionsChecker::checkPostCall(const CallEvent &Call,
CheckerContext &C) const {
- Optional<Summary> FoundSummary = findFunctionSummary(Call, C);
+ std::optional<Summary> FoundSummary = findFunctionSummary(Call, C);
if (!FoundSummary)
return;
// Now apply the constraints.
const Summary &Summary = *FoundSummary;
ProgramStateRef State = C.getState();
+ ExplodedNode *Node = C.getPredecessor();
// Apply case/branch specifications.
- for (const ConstraintSet &Case : Summary.getCaseConstraints()) {
+ for (const SummaryCase &Case : Summary.getCases()) {
ProgramStateRef NewState = State;
- for (const ValueConstraintPtr &Constraint : Case) {
+ for (const ValueConstraintPtr &Constraint : Case.getConstraints()) {
NewState = Constraint->apply(NewState, Call, Summary, C);
if (!NewState)
break;
}
- if (NewState && NewState != State)
+ if (NewState)
+ NewState = Case.getErrnoConstraint().apply(NewState, Call, Summary, C);
+
+ if (!NewState)
+ continue;
+
+ // Here it's possible that NewState == State, e.g. when other checkers
+ // already applied the same constraints (or stricter ones).
+ // Still add these note tags, the other checker should add only its
+ // specialized note tags. These general note tags are handled always by
+ // StdLibraryFunctionsChecker.
+
+ ExplodedNode *Pred = Node;
+ DeclarationName FunctionName =
+ cast<NamedDecl>(Call.getDecl())->getDeclName();
+
+ std::string ErrnoNote = Case.getErrnoConstraint().describe(C);
+ std::string CaseNote;
+ if (Case.getNote().empty()) {
+ if (!ErrnoNote.empty())
+ ErrnoNote =
+ llvm::formatv("After calling '{0}' {1}", FunctionName, ErrnoNote);
+ } else {
+ CaseNote = llvm::formatv(Case.getNote().str().c_str(), FunctionName);
+ }
+ const SVal RV = Call.getReturnValue();
+
+ if (Summary.getInvalidationKd() == EvalCallAsPure) {
+ // Do not expect that errno is interesting (the "pure" functions do not
+ // affect it).
+ if (!CaseNote.empty()) {
+ const NoteTag *Tag = C.getNoteTag(
+ [Node, CaseNote, RV](PathSensitiveBugReport &BR) -> std::string {
+ // Try to omit the note if we know in advance which branch is
+ // taken (this means, only one branch exists).
+ // This check is performed inside the lambda, after other
+ // (or this) checkers had a chance to add other successors.
+ // Dereferencing the saved node object is valid because it's part
+ // of a bug report call sequence.
+ // FIXME: This check is not exact. We may be here after a state
+ // split that was performed by another checker (and can not find
+ // the successors). This is why this check is only used in the
+ // EvalCallAsPure case.
+ if (BR.isInteresting(RV) && Node->succ_size() > 1)
+ return CaseNote;
+ return "";
+ });
+ Pred = C.addTransition(NewState, Pred, Tag);
+ }
+ } else {
+ if (!CaseNote.empty() || !ErrnoNote.empty()) {
+ const NoteTag *Tag =
+ C.getNoteTag([CaseNote, ErrnoNote,
+ RV](PathSensitiveBugReport &BR) -> std::string {
+ // If 'errno' is interesting, show the user a note about the case
+ // (what happened at the function call) and about how 'errno'
+ // causes the problem. ErrnoChecker sets the errno (but not RV) to
+ // interesting.
+ // If only the return value is interesting, show only the case
+ // note.
+ std::optional<Loc> ErrnoLoc =
+ errno_modeling::getErrnoLoc(BR.getErrorNode()->getState());
+ bool ErrnoImportant = !ErrnoNote.empty() && ErrnoLoc &&
+ BR.isInteresting(ErrnoLoc->getAsRegion());
+ if (ErrnoImportant) {
+ BR.markNotInteresting(ErrnoLoc->getAsRegion());
+ if (CaseNote.empty())
+ return ErrnoNote;
+ return llvm::formatv("{0}; {1}", CaseNote, ErrnoNote);
+ } else {
+ if (BR.isInteresting(RV))
+ return CaseNote;
+ }
+ return "";
+ });
+ Pred = C.addTransition(NewState, Pred, Tag);
+ }
+ }
+
+ // Add the transition if no note tag was added.
+ if (Pred == Node && NewState != State)
C.addTransition(NewState);
}
}
bool StdLibraryFunctionsChecker::evalCall(const CallEvent &Call,
CheckerContext &C) const {
- Optional<Summary> FoundSummary = findFunctionSummary(Call, C);
+ std::optional<Summary> FoundSummary = findFunctionSummary(Call, C);
if (!FoundSummary)
return false;
@@ -871,7 +1478,9 @@ bool StdLibraryFunctionsChecker::evalCall(const CallEvent &Call,
SVal V = C.getSValBuilder().conjureSymbolVal(
CE, LC, CE->getType().getCanonicalType(), C.blockCount());
State = State->BindExpr(CE, LC, V);
+
C.addTransition(State);
+
return true;
}
case NoEvalCall:
@@ -910,12 +1519,11 @@ bool StdLibraryFunctionsChecker::Signature::matches(
}
// Check the argument types.
- for (size_t I = 0, E = ArgTys.size(); I != E; ++I) {
- QualType ArgTy = ArgTys[I];
+ for (auto [Idx, ArgTy] : llvm::enumerate(ArgTys)) {
if (isIrrelevant(ArgTy))
continue;
QualType FDArgTy =
- RemoveRestrict(FD->getParamDecl(I)->getType().getCanonicalType());
+ RemoveRestrict(FD->getParamDecl(Idx)->getType().getCanonicalType());
if (ArgTy != FDArgTy)
return false;
}
@@ -923,26 +1531,26 @@ bool StdLibraryFunctionsChecker::Signature::matches(
return true;
}
-Optional<StdLibraryFunctionsChecker::Summary>
+std::optional<StdLibraryFunctionsChecker::Summary>
StdLibraryFunctionsChecker::findFunctionSummary(const FunctionDecl *FD,
CheckerContext &C) const {
if (!FD)
- return None;
+ return std::nullopt;
initFunctionSummaries(C);
auto FSMI = FunctionSummaryMap.find(FD->getCanonicalDecl());
if (FSMI == FunctionSummaryMap.end())
- return None;
+ return std::nullopt;
return FSMI->second;
}
-Optional<StdLibraryFunctionsChecker::Summary>
+std::optional<StdLibraryFunctionsChecker::Summary>
StdLibraryFunctionsChecker::findFunctionSummary(const CallEvent &Call,
CheckerContext &C) const {
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(Call.getDecl());
if (!FD)
- return None;
+ return std::nullopt;
return findFunctionSummary(FD, C);
}
@@ -950,10 +1558,12 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
CheckerContext &C) const {
if (SummariesInitialized)
return;
+ SummariesInitialized = true;
SValBuilder &SVB = C.getSValBuilder();
BasicValueFactory &BVF = SVB.getBasicValueFactory();
const ASTContext &ACtx = BVF.getContext();
+ Preprocessor &PP = C.getPreprocessor();
// Helper class to lookup a type by its name.
class LookupType {
@@ -963,11 +1573,11 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
LookupType(const ASTContext &ACtx) : ACtx(ACtx) {}
// Find the type. If not found then the optional is not set.
- llvm::Optional<QualType> operator()(StringRef Name) {
+ std::optional<QualType> operator()(StringRef Name) {
IdentifierInfo &II = ACtx.Idents.get(Name);
auto LookupRes = ACtx.getTranslationUnitDecl()->lookup(&II);
if (LookupRes.empty())
- return None;
+ return std::nullopt;
// Prioritze typedef declarations.
// This is needed in case of C struct typedefs. E.g.:
@@ -985,7 +1595,7 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
for (Decl *D : LookupRes)
if (auto *TD = dyn_cast<TypeDecl>(D))
return ACtx.getTypeDeclType(TD).getCanonicalType();
- return None;
+ return std::nullopt;
}
} lookupTy(ACtx);
@@ -999,10 +1609,10 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
QualType operator()(QualType Ty) {
return ACtx.getLangOpts().C99 ? ACtx.getRestrictType(Ty) : Ty;
}
- Optional<QualType> operator()(Optional<QualType> Ty) {
+ std::optional<QualType> operator()(std::optional<QualType> Ty) {
if (Ty)
return operator()(*Ty);
- return None;
+ return std::nullopt;
}
} getRestrictTy(ACtx);
class GetPointerTy {
@@ -1011,16 +1621,16 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
public:
GetPointerTy(const ASTContext &ACtx) : ACtx(ACtx) {}
QualType operator()(QualType Ty) { return ACtx.getPointerType(Ty); }
- Optional<QualType> operator()(Optional<QualType> Ty) {
+ std::optional<QualType> operator()(std::optional<QualType> Ty) {
if (Ty)
return operator()(*Ty);
- return None;
+ return std::nullopt;
}
} getPointerTy(ACtx);
class {
public:
- Optional<QualType> operator()(Optional<QualType> Ty) {
- return Ty ? Optional<QualType>(Ty->withConst()) : None;
+ std::optional<QualType> operator()(std::optional<QualType> Ty) {
+ return Ty ? std::optional<QualType>(Ty->withConst()) : std::nullopt;
}
QualType operator()(QualType Ty) { return Ty.withConst(); }
} getConstTy;
@@ -1029,14 +1639,14 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
public:
GetMaxValue(BasicValueFactory &BVF) : BVF(BVF) {}
- Optional<RangeInt> operator()(QualType Ty) {
+ std::optional<RangeInt> operator()(QualType Ty) {
return BVF.getMaxValue(Ty).getLimitedValue();
}
- Optional<RangeInt> operator()(Optional<QualType> Ty) {
+ std::optional<RangeInt> operator()(std::optional<QualType> Ty) {
if (Ty) {
return operator()(*Ty);
}
- return None;
+ return std::nullopt;
}
} getMaxValue(BVF);
@@ -1089,14 +1699,11 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
const RangeInt UCharRangeMax =
std::min(BVF.getMaxValue(ACtx.UnsignedCharTy).getLimitedValue(), IntMax);
- // The platform dependent value of EOF.
- // Try our best to parse this from the Preprocessor, otherwise fallback to -1.
- const auto EOFv = [&C]() -> RangeInt {
- if (const llvm::Optional<int> OptInt =
- tryExpandAsInteger("EOF", C.getPreprocessor()))
- return *OptInt;
- return -1;
- }();
+ // Get platform dependent values of some macros.
+ // Try our best to parse this from the Preprocessor, otherwise fallback to a
+ // default value (what is found in a library header).
+ const auto EOFv = tryExpandAsInteger("EOF", PP).value_or(-1);
+ const auto AT_FDCWDv = tryExpandAsInteger("AT_FDCWD", PP).value_or(-100);
// Auxiliary class to aid adding summaries to the summary map.
struct AddToFunctionSummaryMap {
@@ -1146,9 +1753,9 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
} addToFunctionSummaryMap(ACtx, FunctionSummaryMap, DisplayLoadedSummaries);
// Below are helpers functions to create the summaries.
- auto ArgumentCondition = [](ArgNo ArgN, RangeKind Kind,
- IntRangeVector Ranges) {
- return std::make_shared<RangeConstraint>(ArgN, Kind, Ranges);
+ auto ArgumentCondition = [](ArgNo ArgN, RangeKind Kind, IntRangeVector Ranges,
+ StringRef Desc = "") {
+ return std::make_shared<RangeConstraint>(ArgN, Kind, Ranges, Desc);
};
auto BufferSize = [](auto... Args) {
return std::make_shared<BufferSizeConstraint>(Args...);
@@ -1165,13 +1772,13 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
auto operator()(RangeInt b, RangeInt e) {
return IntRangeVector{std::pair<RangeInt, RangeInt>{b, e}};
}
- auto operator()(RangeInt b, Optional<RangeInt> e) {
+ auto operator()(RangeInt b, std::optional<RangeInt> e) {
if (e)
return IntRangeVector{std::pair<RangeInt, RangeInt>{b, *e}};
return IntRangeVector{};
}
auto operator()(std::pair<RangeInt, RangeInt> i0,
- std::pair<RangeInt, Optional<RangeInt>> i1) {
+ std::pair<RangeInt, std::optional<RangeInt>> i1) {
if (i1.second)
return IntRangeVector{i0, {i1.first, *(i1.second)}};
return IntRangeVector{i0};
@@ -1184,10 +1791,26 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
auto NotNull = [&](ArgNo ArgN) {
return std::make_shared<NotNullConstraint>(ArgN);
};
+ auto IsNull = [&](ArgNo ArgN) {
+ return std::make_shared<NotNullConstraint>(ArgN, false);
+ };
+ auto NotNullBuffer = [&](ArgNo ArgN, ArgNo SizeArg1N, ArgNo SizeArg2N) {
+ return std::make_shared<NotNullBufferConstraint>(ArgN, SizeArg1N,
+ SizeArg2N);
+ };
- Optional<QualType> FileTy = lookupTy("FILE");
- Optional<QualType> FilePtrTy = getPointerTy(FileTy);
- Optional<QualType> FilePtrRestrictTy = getRestrictTy(FilePtrTy);
+ std::optional<QualType> FileTy = lookupTy("FILE");
+ std::optional<QualType> FilePtrTy = getPointerTy(FileTy);
+ std::optional<QualType> FilePtrRestrictTy = getRestrictTy(FilePtrTy);
+
+ std::optional<QualType> FPosTTy = lookupTy("fpos_t");
+ std::optional<QualType> FPosTPtrTy = getPointerTy(FPosTTy);
+ std::optional<QualType> ConstFPosTPtrTy = getPointerTy(getConstTy(FPosTTy));
+ std::optional<QualType> FPosTPtrRestrictTy = getRestrictTy(FPosTPtrTy);
+
+ constexpr llvm::StringLiteral GenericSuccessMsg(
+ "Assuming that '{0}' is successful");
+ constexpr llvm::StringLiteral GenericFailureMsg("Assuming that '{0}' fails");
// We are finally ready to define specifications for all supported functions.
//
@@ -1210,163 +1833,227 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
// Boils down to isupper() or islower() or isdigit().
.Case({ArgumentCondition(0U, WithinRange,
{{'0', '9'}, {'A', 'Z'}, {'a', 'z'}}),
- ReturnValueCondition(OutOfRange, SingleValue(0))})
+ ReturnValueCondition(OutOfRange, SingleValue(0))},
+ ErrnoIrrelevant, "Assuming the character is alphanumeric")
// The locale-specific range.
// No post-condition. We are completely unaware of
// locale-specific return values.
- .Case({ArgumentCondition(0U, WithinRange, {{128, UCharRangeMax}})})
+ .Case({ArgumentCondition(0U, WithinRange, {{128, UCharRangeMax}})},
+ ErrnoIrrelevant)
.Case(
{ArgumentCondition(
0U, OutOfRange,
{{'0', '9'}, {'A', 'Z'}, {'a', 'z'}, {128, UCharRangeMax}}),
- ReturnValueCondition(WithinRange, SingleValue(0))})
- .ArgConstraint(ArgumentCondition(
- 0U, WithinRange, {{EOFv, EOFv}, {0, UCharRangeMax}})));
+ ReturnValueCondition(WithinRange, SingleValue(0))},
+ ErrnoIrrelevant, "Assuming the character is non-alphanumeric")
+ .ArgConstraint(ArgumentCondition(0U, WithinRange,
+ {{EOFv, EOFv}, {0, UCharRangeMax}},
+ "an unsigned char value or EOF")));
addToFunctionSummaryMap(
"isalpha", Signature(ArgTypes{IntTy}, RetType{IntTy}),
Summary(EvalCallAsPure)
.Case({ArgumentCondition(0U, WithinRange, {{'A', 'Z'}, {'a', 'z'}}),
- ReturnValueCondition(OutOfRange, SingleValue(0))})
+ ReturnValueCondition(OutOfRange, SingleValue(0))},
+ ErrnoIrrelevant, "Assuming the character is alphabetical")
// The locale-specific range.
- .Case({ArgumentCondition(0U, WithinRange, {{128, UCharRangeMax}})})
+ .Case({ArgumentCondition(0U, WithinRange, {{128, UCharRangeMax}})},
+ ErrnoIrrelevant)
.Case({ArgumentCondition(
0U, OutOfRange,
{{'A', 'Z'}, {'a', 'z'}, {128, UCharRangeMax}}),
- ReturnValueCondition(WithinRange, SingleValue(0))}));
+ ReturnValueCondition(WithinRange, SingleValue(0))},
+ ErrnoIrrelevant, "Assuming the character is non-alphabetical"));
addToFunctionSummaryMap(
"isascii", Signature(ArgTypes{IntTy}, RetType{IntTy}),
Summary(EvalCallAsPure)
.Case({ArgumentCondition(0U, WithinRange, Range(0, 127)),
- ReturnValueCondition(OutOfRange, SingleValue(0))})
+ ReturnValueCondition(OutOfRange, SingleValue(0))},
+ ErrnoIrrelevant, "Assuming the character is an ASCII character")
.Case({ArgumentCondition(0U, OutOfRange, Range(0, 127)),
- ReturnValueCondition(WithinRange, SingleValue(0))}));
+ ReturnValueCondition(WithinRange, SingleValue(0))},
+ ErrnoIrrelevant,
+ "Assuming the character is not an ASCII character"));
addToFunctionSummaryMap(
"isblank", Signature(ArgTypes{IntTy}, RetType{IntTy}),
Summary(EvalCallAsPure)
.Case({ArgumentCondition(0U, WithinRange, {{'\t', '\t'}, {' ', ' '}}),
- ReturnValueCondition(OutOfRange, SingleValue(0))})
+ ReturnValueCondition(OutOfRange, SingleValue(0))},
+ ErrnoIrrelevant, "Assuming the character is a blank character")
.Case({ArgumentCondition(0U, OutOfRange, {{'\t', '\t'}, {' ', ' '}}),
- ReturnValueCondition(WithinRange, SingleValue(0))}));
+ ReturnValueCondition(WithinRange, SingleValue(0))},
+ ErrnoIrrelevant,
+ "Assuming the character is not a blank character"));
addToFunctionSummaryMap(
"iscntrl", Signature(ArgTypes{IntTy}, RetType{IntTy}),
Summary(EvalCallAsPure)
.Case({ArgumentCondition(0U, WithinRange, {{0, 32}, {127, 127}}),
- ReturnValueCondition(OutOfRange, SingleValue(0))})
+ ReturnValueCondition(OutOfRange, SingleValue(0))},
+ ErrnoIrrelevant,
+ "Assuming the character is a control character")
.Case({ArgumentCondition(0U, OutOfRange, {{0, 32}, {127, 127}}),
- ReturnValueCondition(WithinRange, SingleValue(0))}));
+ ReturnValueCondition(WithinRange, SingleValue(0))},
+ ErrnoIrrelevant,
+ "Assuming the character is not a control character"));
addToFunctionSummaryMap(
"isdigit", Signature(ArgTypes{IntTy}, RetType{IntTy}),
Summary(EvalCallAsPure)
.Case({ArgumentCondition(0U, WithinRange, Range('0', '9')),
- ReturnValueCondition(OutOfRange, SingleValue(0))})
+ ReturnValueCondition(OutOfRange, SingleValue(0))},
+ ErrnoIrrelevant, "Assuming the character is a digit")
.Case({ArgumentCondition(0U, OutOfRange, Range('0', '9')),
- ReturnValueCondition(WithinRange, SingleValue(0))}));
+ ReturnValueCondition(WithinRange, SingleValue(0))},
+ ErrnoIrrelevant, "Assuming the character is not a digit"));
addToFunctionSummaryMap(
"isgraph", Signature(ArgTypes{IntTy}, RetType{IntTy}),
Summary(EvalCallAsPure)
.Case({ArgumentCondition(0U, WithinRange, Range(33, 126)),
- ReturnValueCondition(OutOfRange, SingleValue(0))})
- .Case({ArgumentCondition(0U, OutOfRange, Range(33, 126)),
- ReturnValueCondition(WithinRange, SingleValue(0))}));
+ ReturnValueCondition(OutOfRange, SingleValue(0))},
+ ErrnoIrrelevant,
+ "Assuming the character has graphical representation")
+ .Case(
+ {ArgumentCondition(0U, OutOfRange, Range(33, 126)),
+ ReturnValueCondition(WithinRange, SingleValue(0))},
+ ErrnoIrrelevant,
+ "Assuming the character does not have graphical representation"));
addToFunctionSummaryMap(
"islower", Signature(ArgTypes{IntTy}, RetType{IntTy}),
Summary(EvalCallAsPure)
// Is certainly lowercase.
.Case({ArgumentCondition(0U, WithinRange, Range('a', 'z')),
- ReturnValueCondition(OutOfRange, SingleValue(0))})
+ ReturnValueCondition(OutOfRange, SingleValue(0))},
+ ErrnoIrrelevant, "Assuming the character is a lowercase letter")
// Is ascii but not lowercase.
.Case({ArgumentCondition(0U, WithinRange, Range(0, 127)),
ArgumentCondition(0U, OutOfRange, Range('a', 'z')),
- ReturnValueCondition(WithinRange, SingleValue(0))})
+ ReturnValueCondition(WithinRange, SingleValue(0))},
+ ErrnoIrrelevant,
+ "Assuming the character is not a lowercase letter")
// The locale-specific range.
- .Case({ArgumentCondition(0U, WithinRange, {{128, UCharRangeMax}})})
+ .Case({ArgumentCondition(0U, WithinRange, {{128, UCharRangeMax}})},
+ ErrnoIrrelevant)
// Is not an unsigned char.
.Case({ArgumentCondition(0U, OutOfRange, Range(0, UCharRangeMax)),
- ReturnValueCondition(WithinRange, SingleValue(0))}));
+ ReturnValueCondition(WithinRange, SingleValue(0))},
+ ErrnoIrrelevant));
addToFunctionSummaryMap(
"isprint", Signature(ArgTypes{IntTy}, RetType{IntTy}),
Summary(EvalCallAsPure)
.Case({ArgumentCondition(0U, WithinRange, Range(32, 126)),
- ReturnValueCondition(OutOfRange, SingleValue(0))})
+ ReturnValueCondition(OutOfRange, SingleValue(0))},
+ ErrnoIrrelevant, "Assuming the character is printable")
.Case({ArgumentCondition(0U, OutOfRange, Range(32, 126)),
- ReturnValueCondition(WithinRange, SingleValue(0))}));
+ ReturnValueCondition(WithinRange, SingleValue(0))},
+ ErrnoIrrelevant, "Assuming the character is non-printable"));
addToFunctionSummaryMap(
"ispunct", Signature(ArgTypes{IntTy}, RetType{IntTy}),
Summary(EvalCallAsPure)
.Case({ArgumentCondition(
0U, WithinRange,
{{'!', '/'}, {':', '@'}, {'[', '`'}, {'{', '~'}}),
- ReturnValueCondition(OutOfRange, SingleValue(0))})
+ ReturnValueCondition(OutOfRange, SingleValue(0))},
+ ErrnoIrrelevant, "Assuming the character is a punctuation mark")
.Case({ArgumentCondition(
0U, OutOfRange,
{{'!', '/'}, {':', '@'}, {'[', '`'}, {'{', '~'}}),
- ReturnValueCondition(WithinRange, SingleValue(0))}));
+ ReturnValueCondition(WithinRange, SingleValue(0))},
+ ErrnoIrrelevant,
+ "Assuming the character is not a punctuation mark"));
addToFunctionSummaryMap(
"isspace", Signature(ArgTypes{IntTy}, RetType{IntTy}),
Summary(EvalCallAsPure)
// Space, '\f', '\n', '\r', '\t', '\v'.
.Case({ArgumentCondition(0U, WithinRange, {{9, 13}, {' ', ' '}}),
- ReturnValueCondition(OutOfRange, SingleValue(0))})
+ ReturnValueCondition(OutOfRange, SingleValue(0))},
+ ErrnoIrrelevant,
+ "Assuming the character is a whitespace character")
// The locale-specific range.
- .Case({ArgumentCondition(0U, WithinRange, {{128, UCharRangeMax}})})
+ .Case({ArgumentCondition(0U, WithinRange, {{128, UCharRangeMax}})},
+ ErrnoIrrelevant)
.Case({ArgumentCondition(0U, OutOfRange,
{{9, 13}, {' ', ' '}, {128, UCharRangeMax}}),
- ReturnValueCondition(WithinRange, SingleValue(0))}));
+ ReturnValueCondition(WithinRange, SingleValue(0))},
+ ErrnoIrrelevant,
+ "Assuming the character is not a whitespace character"));
addToFunctionSummaryMap(
"isupper", Signature(ArgTypes{IntTy}, RetType{IntTy}),
Summary(EvalCallAsPure)
// Is certainly uppercase.
.Case({ArgumentCondition(0U, WithinRange, Range('A', 'Z')),
- ReturnValueCondition(OutOfRange, SingleValue(0))})
+ ReturnValueCondition(OutOfRange, SingleValue(0))},
+ ErrnoIrrelevant,
+ "Assuming the character is an uppercase letter")
// The locale-specific range.
- .Case({ArgumentCondition(0U, WithinRange, {{128, UCharRangeMax}})})
+ .Case({ArgumentCondition(0U, WithinRange, {{128, UCharRangeMax}})},
+ ErrnoIrrelevant)
// Other.
.Case({ArgumentCondition(0U, OutOfRange,
{{'A', 'Z'}, {128, UCharRangeMax}}),
- ReturnValueCondition(WithinRange, SingleValue(0))}));
+ ReturnValueCondition(WithinRange, SingleValue(0))},
+ ErrnoIrrelevant,
+ "Assuming the character is not an uppercase letter"));
addToFunctionSummaryMap(
"isxdigit", Signature(ArgTypes{IntTy}, RetType{IntTy}),
Summary(EvalCallAsPure)
.Case({ArgumentCondition(0U, WithinRange,
{{'0', '9'}, {'A', 'F'}, {'a', 'f'}}),
- ReturnValueCondition(OutOfRange, SingleValue(0))})
+ ReturnValueCondition(OutOfRange, SingleValue(0))},
+ ErrnoIrrelevant,
+ "Assuming the character is a hexadecimal digit")
.Case({ArgumentCondition(0U, OutOfRange,
{{'0', '9'}, {'A', 'F'}, {'a', 'f'}}),
- ReturnValueCondition(WithinRange, SingleValue(0))}));
+ ReturnValueCondition(WithinRange, SingleValue(0))},
+ ErrnoIrrelevant,
+ "Assuming the character is not a hexadecimal digit"));
addToFunctionSummaryMap(
"toupper", Signature(ArgTypes{IntTy}, RetType{IntTy}),
Summary(EvalCallAsPure)
- .ArgConstraint(ArgumentCondition(
- 0U, WithinRange, {{EOFv, EOFv}, {0, UCharRangeMax}})));
+ .ArgConstraint(ArgumentCondition(0U, WithinRange,
+ {{EOFv, EOFv}, {0, UCharRangeMax}},
+ "an unsigned char value or EOF")));
addToFunctionSummaryMap(
"tolower", Signature(ArgTypes{IntTy}, RetType{IntTy}),
Summary(EvalCallAsPure)
- .ArgConstraint(ArgumentCondition(
- 0U, WithinRange, {{EOFv, EOFv}, {0, UCharRangeMax}})));
+ .ArgConstraint(ArgumentCondition(0U, WithinRange,
+ {{EOFv, EOFv}, {0, UCharRangeMax}},
+ "an unsigned char value or EOF")));
addToFunctionSummaryMap(
"toascii", Signature(ArgTypes{IntTy}, RetType{IntTy}),
Summary(EvalCallAsPure)
- .ArgConstraint(ArgumentCondition(
- 0U, WithinRange, {{EOFv, EOFv}, {0, UCharRangeMax}})));
+ .ArgConstraint(ArgumentCondition(0U, WithinRange,
+ {{EOFv, EOFv}, {0, UCharRangeMax}},
+ "an unsigned char value or EOF")));
// The getc() family of functions that returns either a char or an EOF.
addToFunctionSummaryMap(
{"getc", "fgetc"}, Signature(ArgTypes{FilePtrTy}, RetType{IntTy}),
Summary(NoEvalCall)
.Case({ReturnValueCondition(WithinRange,
- {{EOFv, EOFv}, {0, UCharRangeMax}})}));
+ {{EOFv, EOFv}, {0, UCharRangeMax}})},
+ ErrnoIrrelevant));
addToFunctionSummaryMap(
"getchar", Signature(ArgTypes{}, RetType{IntTy}),
Summary(NoEvalCall)
.Case({ReturnValueCondition(WithinRange,
- {{EOFv, EOFv}, {0, UCharRangeMax}})}));
+ {{EOFv, EOFv}, {0, UCharRangeMax}})},
+ ErrnoIrrelevant));
// read()-like functions that never return more than buffer size.
auto FreadSummary =
Summary(NoEvalCall)
- .Case({ReturnValueCondition(LessThanOrEq, ArgNo(2)),
- ReturnValueCondition(WithinRange, Range(0, SizeMax))})
- .ArgConstraint(NotNull(ArgNo(0)))
+ .Case({ArgumentCondition(1U, WithinRange, Range(1, SizeMax)),
+ ArgumentCondition(2U, WithinRange, Range(1, SizeMax)),
+ ReturnValueCondition(BO_LT, ArgNo(2)),
+ ReturnValueCondition(WithinRange, Range(0, SizeMax))},
+ ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .Case({ArgumentCondition(1U, WithinRange, Range(1, SizeMax)),
+ ReturnValueCondition(BO_EQ, ArgNo(2)),
+ ReturnValueCondition(WithinRange, Range(0, SizeMax))},
+ ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case({ArgumentCondition(1U, WithinRange, SingleValue(0)),
+ ReturnValueCondition(WithinRange, SingleValue(0))},
+ ErrnoMustNotBeChecked,
+ "Assuming that argument 'size' to '{0}' is 0")
+ .ArgConstraint(NotNullBuffer(ArgNo(0), ArgNo(1), ArgNo(2)))
.ArgConstraint(NotNull(ArgNo(3)))
.ArgConstraint(BufferSize(/*Buffer=*/ArgNo(0), /*BufSize=*/ArgNo(1),
/*BufSizeMultiplier=*/ArgNo(2)));
@@ -1386,13 +2073,14 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
RetType{SizeTy}),
FreadSummary);
- Optional<QualType> Ssize_tTy = lookupTy("ssize_t");
- Optional<RangeInt> Ssize_tMax = getMaxValue(Ssize_tTy);
+ std::optional<QualType> Ssize_tTy = lookupTy("ssize_t");
+ std::optional<RangeInt> Ssize_tMax = getMaxValue(Ssize_tTy);
auto ReadSummary =
Summary(NoEvalCall)
.Case({ReturnValueCondition(LessThanOrEq, ArgNo(2)),
- ReturnValueCondition(WithinRange, Range(-1, Ssize_tMax))});
+ ReturnValueCondition(WithinRange, Range(-1, Ssize_tMax))},
+ ErrnoIrrelevant);
// FIXME these are actually defined by POSIX and not by the C standard, we
// should handle them together with the rest of the POSIX functions.
@@ -1409,7 +2097,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
auto GetLineSummary =
Summary(NoEvalCall)
.Case({ReturnValueCondition(WithinRange,
- Range({-1, -1}, {1, Ssize_tMax}))});
+ Range({-1, -1}, {1, Ssize_tMax}))},
+ ErrnoIrrelevant);
QualType CharPtrPtrRestrictTy = getRestrictTy(getPointerTy(CharPtrTy));
@@ -1433,7 +2122,226 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
RetType{Ssize_tTy}),
GetLineSummary);
+ {
+ Summary GetenvSummary =
+ Summary(NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0)))
+ .Case({NotNull(Ret)}, ErrnoIrrelevant,
+ "Assuming the environment variable exists");
+ // In untrusted environments the envvar might not exist.
+ if (!ShouldAssumeControlledEnvironment)
+ GetenvSummary.Case({NotNull(Ret)->negate()}, ErrnoIrrelevant,
+ "Assuming the environment variable does not exist");
+
+ // char *getenv(const char *name);
+ addToFunctionSummaryMap(
+ "getenv", Signature(ArgTypes{ConstCharPtrTy}, RetType{CharPtrTy}),
+ std::move(GetenvSummary));
+ }
+
if (ModelPOSIX) {
+ const auto ReturnsZeroOrMinusOne =
+ ConstraintSet{ReturnValueCondition(WithinRange, Range(-1, 0))};
+ const auto ReturnsZero =
+ ConstraintSet{ReturnValueCondition(WithinRange, SingleValue(0))};
+ const auto ReturnsMinusOne =
+ ConstraintSet{ReturnValueCondition(WithinRange, SingleValue(-1))};
+ const auto ReturnsEOF =
+ ConstraintSet{ReturnValueCondition(WithinRange, SingleValue(EOFv))};
+ const auto ReturnsNonnegative =
+ ConstraintSet{ReturnValueCondition(WithinRange, Range(0, IntMax))};
+ const auto ReturnsNonZero =
+ ConstraintSet{ReturnValueCondition(OutOfRange, SingleValue(0))};
+ const auto ReturnsFileDescriptor =
+ ConstraintSet{ReturnValueCondition(WithinRange, Range(-1, IntMax))};
+ const auto &ReturnsValidFileDescriptor = ReturnsNonnegative;
+
+ auto ValidFileDescriptorOrAtFdcwd = [&](ArgNo ArgN) {
+ return std::make_shared<RangeConstraint>(
+ ArgN, WithinRange, Range({AT_FDCWDv, AT_FDCWDv}, {0, IntMax}),
+ "a valid file descriptor or AT_FDCWD");
+ };
+
+ // FILE *fopen(const char *restrict pathname, const char *restrict mode);
+ addToFunctionSummaryMap(
+ "fopen",
+ Signature(ArgTypes{ConstCharPtrRestrictTy, ConstCharPtrRestrictTy},
+ RetType{FilePtrTy}),
+ Summary(NoEvalCall)
+ .Case({NotNull(Ret)}, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case({IsNull(Ret)}, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(NotNull(ArgNo(0)))
+ .ArgConstraint(NotNull(ArgNo(1))));
+
+ // FILE *fdopen(int fd, const char *mode);
+ addToFunctionSummaryMap(
+ "fdopen",
+ Signature(ArgTypes{IntTy, ConstCharPtrTy}, RetType{FilePtrTy}),
+ Summary(NoEvalCall)
+ .Case({NotNull(Ret)}, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case({IsNull(Ret)}, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
+ .ArgConstraint(NotNull(ArgNo(1))));
+
+ // FILE *tmpfile(void);
+ addToFunctionSummaryMap(
+ "tmpfile", Signature(ArgTypes{}, RetType{FilePtrTy}),
+ Summary(NoEvalCall)
+ .Case({NotNull(Ret)}, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case({IsNull(Ret)}, ErrnoNEZeroIrrelevant, GenericFailureMsg));
+
+ // FILE *freopen(const char *restrict pathname, const char *restrict mode,
+ // FILE *restrict stream);
+ addToFunctionSummaryMap(
+ "freopen",
+ Signature(ArgTypes{ConstCharPtrRestrictTy, ConstCharPtrRestrictTy,
+ FilePtrRestrictTy},
+ RetType{FilePtrTy}),
+ Summary(NoEvalCall)
+ .Case({ReturnValueCondition(BO_EQ, ArgNo(2))},
+ ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case({IsNull(Ret)}, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(NotNull(ArgNo(1)))
+ .ArgConstraint(NotNull(ArgNo(2))));
+
+ // int fclose(FILE *stream);
+ addToFunctionSummaryMap(
+ "fclose", Signature(ArgTypes{FilePtrTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsEOF, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // int ungetc(int c, FILE *stream);
+ addToFunctionSummaryMap(
+ "ungetc", Signature(ArgTypes{IntTy, FilePtrTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case({ReturnValueCondition(BO_EQ, ArgNo(0)),
+ ArgumentCondition(0, WithinRange, {{0, UCharRangeMax}})},
+ ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case({ReturnValueCondition(WithinRange, SingleValue(EOFv)),
+ ArgumentCondition(0, WithinRange, SingleValue(EOFv))},
+ ErrnoNEZeroIrrelevant,
+ "Assuming that 'ungetc' fails because EOF was passed as "
+ "character")
+ .Case({ReturnValueCondition(WithinRange, SingleValue(EOFv)),
+ ArgumentCondition(0, WithinRange, {{0, UCharRangeMax}})},
+ ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(ArgumentCondition(
+ 0, WithinRange, {{EOFv, EOFv}, {0, UCharRangeMax}}))
+ .ArgConstraint(NotNull(ArgNo(1))));
+
+ std::optional<QualType> Off_tTy = lookupTy("off_t");
+ std::optional<RangeInt> Off_tMax = getMaxValue(Off_tTy);
+
+ // int fseek(FILE *stream, long offset, int whence);
+ // FIXME: It can be possible to get the 'SEEK_' values (like EOFv) and use
+ // these for condition of arg 2.
+ // Now the range [0,2] is used (the `SEEK_*` constants are usually 0,1,2).
+ addToFunctionSummaryMap(
+ "fseek", Signature(ArgTypes{FilePtrTy, LongTy, IntTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(NotNull(ArgNo(0)))
+ .ArgConstraint(ArgumentCondition(2, WithinRange, {{0, 2}})));
+
+ // int fseeko(FILE *stream, off_t offset, int whence);
+ addToFunctionSummaryMap(
+ "fseeko",
+ Signature(ArgTypes{FilePtrTy, Off_tTy, IntTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(NotNull(ArgNo(0)))
+ .ArgConstraint(ArgumentCondition(2, WithinRange, {{0, 2}})));
+
+ // int fgetpos(FILE *restrict stream, fpos_t *restrict pos);
+ // From 'The Open Group Base Specifications Issue 7, 2018 edition':
+ // "The fgetpos() function shall not change the setting of errno if
+ // successful."
+ addToFunctionSummaryMap(
+ "fgetpos",
+ Signature(ArgTypes{FilePtrRestrictTy, FPosTPtrRestrictTy},
+ RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZero, ErrnoUnchanged, GenericSuccessMsg)
+ .Case(ReturnsNonZero, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(NotNull(ArgNo(0)))
+ .ArgConstraint(NotNull(ArgNo(1))));
+
+ // int fsetpos(FILE *stream, const fpos_t *pos);
+ // From 'The Open Group Base Specifications Issue 7, 2018 edition':
+ // "The fsetpos() function shall not change the setting of errno if
+ // successful."
+ addToFunctionSummaryMap(
+ "fsetpos",
+ Signature(ArgTypes{FilePtrTy, ConstFPosTPtrTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZero, ErrnoUnchanged, GenericSuccessMsg)
+ .Case(ReturnsNonZero, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(NotNull(ArgNo(0)))
+ .ArgConstraint(NotNull(ArgNo(1))));
+
+ // int fflush(FILE *stream);
+ addToFunctionSummaryMap(
+ "fflush", Signature(ArgTypes{FilePtrTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsEOF, ErrnoNEZeroIrrelevant, GenericFailureMsg));
+
+ // long ftell(FILE *stream);
+ // From 'The Open Group Base Specifications Issue 7, 2018 edition':
+ // "The ftell() function shall not change the setting of errno if
+ // successful."
+ addToFunctionSummaryMap(
+ "ftell", Signature(ArgTypes{FilePtrTy}, RetType{LongTy}),
+ Summary(NoEvalCall)
+ .Case({ReturnValueCondition(WithinRange, Range(0, LongMax))},
+ ErrnoUnchanged, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // off_t ftello(FILE *stream);
+ addToFunctionSummaryMap(
+ "ftello", Signature(ArgTypes{FilePtrTy}, RetType{Off_tTy}),
+ Summary(NoEvalCall)
+ .Case({ReturnValueCondition(WithinRange, Range(0, Off_tMax))},
+ ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // int fileno(FILE *stream);
+ addToFunctionSummaryMap(
+ "fileno", Signature(ArgTypes{FilePtrTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsValidFileDescriptor, ErrnoMustNotBeChecked,
+ GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // void rewind(FILE *stream);
+ // This function indicates error only by setting of 'errno'.
+ addToFunctionSummaryMap("rewind",
+ Signature(ArgTypes{FilePtrTy}, RetType{VoidTy}),
+ Summary(NoEvalCall)
+ .Case({}, ErrnoMustBeChecked)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // void clearerr(FILE *stream);
+ addToFunctionSummaryMap(
+ "clearerr", Signature(ArgTypes{FilePtrTy}, RetType{VoidTy}),
+ Summary(NoEvalCall).ArgConstraint(NotNull(ArgNo(0))));
+
+ // int feof(FILE *stream);
+ addToFunctionSummaryMap(
+ "feof", Signature(ArgTypes{FilePtrTy}, RetType{IntTy}),
+ Summary(NoEvalCall).ArgConstraint(NotNull(ArgNo(0))));
+
+ // int ferror(FILE *stream);
+ addToFunctionSummaryMap(
+ "ferror", Signature(ArgTypes{FilePtrTy}, RetType{IntTy}),
+ Summary(NoEvalCall).ArgConstraint(NotNull(ArgNo(0))));
// long a64l(const char *str64);
addToFunctionSummaryMap(
@@ -1447,16 +2355,32 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
.ArgConstraint(ArgumentCondition(
0, WithinRange, Range(0, LongMax))));
- const auto ReturnsZeroOrMinusOne =
- ConstraintSet{ReturnValueCondition(WithinRange, Range(-1, 0))};
- const auto ReturnsFileDescriptor =
- ConstraintSet{ReturnValueCondition(WithinRange, Range(-1, IntMax))};
+ // int open(const char *path, int oflag, ...);
+ addToFunctionSummaryMap(
+ "open", Signature(ArgTypes{ConstCharPtrTy, IntTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsValidFileDescriptor, ErrnoMustNotBeChecked,
+ GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(NotNull(ArgNo(0))));
+
+ // int openat(int fd, const char *path, int oflag, ...);
+ addToFunctionSummaryMap(
+ "openat",
+ Signature(ArgTypes{IntTy, ConstCharPtrTy, IntTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsValidFileDescriptor, ErrnoMustNotBeChecked,
+ GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(ValidFileDescriptorOrAtFdcwd(ArgNo(0)))
+ .ArgConstraint(NotNull(ArgNo(1))));
// int access(const char *pathname, int amode);
addToFunctionSummaryMap(
"access", Signature(ArgTypes{ConstCharPtrTy, IntTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(0))));
// int faccessat(int dirfd, const char *pathname, int mode, int flags);
@@ -1465,57 +2389,66 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{IntTy, ConstCharPtrTy, IntTy, IntTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(ValidFileDescriptorOrAtFdcwd(ArgNo(0)))
.ArgConstraint(NotNull(ArgNo(1))));
// int dup(int fildes);
- addToFunctionSummaryMap("dup", Signature(ArgTypes{IntTy}, RetType{IntTy}),
- Summary(NoEvalCall)
- .Case(ReturnsFileDescriptor)
- .ArgConstraint(ArgumentCondition(
- 0, WithinRange, Range(0, IntMax))));
+ addToFunctionSummaryMap(
+ "dup", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsValidFileDescriptor, ErrnoMustNotBeChecked,
+ GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(
+ ArgumentCondition(0, WithinRange, Range(0, IntMax))));
// int dup2(int fildes1, int filedes2);
addToFunctionSummaryMap(
"dup2", Signature(ArgTypes{IntTy, IntTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsFileDescriptor)
+ .Case(ReturnsValidFileDescriptor, ErrnoMustNotBeChecked,
+ GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
.ArgConstraint(
ArgumentCondition(1, WithinRange, Range(0, IntMax))));
// int fdatasync(int fildes);
- addToFunctionSummaryMap("fdatasync",
- Signature(ArgTypes{IntTy}, RetType{IntTy}),
- Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
- .ArgConstraint(ArgumentCondition(
- 0, WithinRange, Range(0, IntMax))));
+ addToFunctionSummaryMap(
+ "fdatasync", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(
+ ArgumentCondition(0, WithinRange, Range(0, IntMax))));
// int fnmatch(const char *pattern, const char *string, int flags);
addToFunctionSummaryMap(
"fnmatch",
Signature(ArgTypes{ConstCharPtrTy, ConstCharPtrTy, IntTy},
RetType{IntTy}),
- Summary(EvalCallAsPure)
+ Summary(NoEvalCall)
.ArgConstraint(NotNull(ArgNo(0)))
.ArgConstraint(NotNull(ArgNo(1))));
// int fsync(int fildes);
- addToFunctionSummaryMap("fsync", Signature(ArgTypes{IntTy}, RetType{IntTy}),
- Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
- .ArgConstraint(ArgumentCondition(
- 0, WithinRange, Range(0, IntMax))));
-
- Optional<QualType> Off_tTy = lookupTy("off_t");
+ addToFunctionSummaryMap(
+ "fsync", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(
+ ArgumentCondition(0, WithinRange, Range(0, IntMax))));
// int truncate(const char *path, off_t length);
addToFunctionSummaryMap(
"truncate",
Signature(ArgTypes{ConstCharPtrTy, Off_tTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(0))));
// int symlink(const char *oldpath, const char *newpath);
@@ -1523,7 +2456,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
"symlink",
Signature(ArgTypes{ConstCharPtrTy, ConstCharPtrTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(0)))
.ArgConstraint(NotNull(ArgNo(1))));
@@ -1533,26 +2467,30 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{ConstCharPtrTy, IntTy, ConstCharPtrTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(0)))
- .ArgConstraint(ArgumentCondition(1, WithinRange, Range(0, IntMax)))
+ .ArgConstraint(ValidFileDescriptorOrAtFdcwd(ArgNo(1)))
.ArgConstraint(NotNull(ArgNo(2))));
// int lockf(int fd, int cmd, off_t len);
addToFunctionSummaryMap(
"lockf", Signature(ArgTypes{IntTy, IntTy, Off_tTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(
ArgumentCondition(0, WithinRange, Range(0, IntMax))));
- Optional<QualType> Mode_tTy = lookupTy("mode_t");
+ std::optional<QualType> Mode_tTy = lookupTy("mode_t");
// int creat(const char *pathname, mode_t mode);
addToFunctionSummaryMap(
"creat", Signature(ArgTypes{ConstCharPtrTy, Mode_tTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsFileDescriptor)
+ .Case(ReturnsValidFileDescriptor, ErrnoMustNotBeChecked,
+ GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(0))));
// unsigned int sleep(unsigned int seconds);
@@ -1562,15 +2500,17 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
.ArgConstraint(
ArgumentCondition(0, WithinRange, Range(0, UnsignedIntMax))));
- Optional<QualType> DirTy = lookupTy("DIR");
- Optional<QualType> DirPtrTy = getPointerTy(DirTy);
+ std::optional<QualType> DirTy = lookupTy("DIR");
+ std::optional<QualType> DirPtrTy = getPointerTy(DirTy);
// int dirfd(DIR *dirp);
- addToFunctionSummaryMap("dirfd",
- Signature(ArgTypes{DirPtrTy}, RetType{IntTy}),
- Summary(NoEvalCall)
- .Case(ReturnsFileDescriptor)
- .ArgConstraint(NotNull(ArgNo(0))));
+ addToFunctionSummaryMap(
+ "dirfd", Signature(ArgTypes{DirPtrTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsValidFileDescriptor, ErrnoMustNotBeChecked,
+ GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(NotNull(ArgNo(0))));
// unsigned int alarm(unsigned int seconds);
addToFunctionSummaryMap(
@@ -1580,11 +2520,12 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
ArgumentCondition(0, WithinRange, Range(0, UnsignedIntMax))));
// int closedir(DIR *dir);
- addToFunctionSummaryMap("closedir",
- Signature(ArgTypes{DirPtrTy}, RetType{IntTy}),
- Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
- .ArgConstraint(NotNull(ArgNo(0))));
+ addToFunctionSummaryMap(
+ "closedir", Signature(ArgTypes{DirPtrTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(NotNull(ArgNo(0))));
// char *strdup(const char *s);
addToFunctionSummaryMap(
@@ -1606,21 +2547,39 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Summary(NoEvalCall).ArgConstraint(NotNull(ArgNo(0))));
// int mkstemp(char *template);
- addToFunctionSummaryMap("mkstemp",
- Signature(ArgTypes{CharPtrTy}, RetType{IntTy}),
- Summary(NoEvalCall)
- .Case(ReturnsFileDescriptor)
- .ArgConstraint(NotNull(ArgNo(0))));
+ addToFunctionSummaryMap(
+ "mkstemp", Signature(ArgTypes{CharPtrTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsValidFileDescriptor, ErrnoMustNotBeChecked,
+ GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(NotNull(ArgNo(0))));
// char *mkdtemp(char *template);
addToFunctionSummaryMap(
"mkdtemp", Signature(ArgTypes{CharPtrTy}, RetType{CharPtrTy}),
- Summary(NoEvalCall).ArgConstraint(NotNull(ArgNo(0))));
+ Summary(NoEvalCall)
+ .Case({ReturnValueCondition(BO_EQ, ArgNo(0))},
+ ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case({IsNull(Ret)}, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(NotNull(ArgNo(0))));
// char *getcwd(char *buf, size_t size);
addToFunctionSummaryMap(
"getcwd", Signature(ArgTypes{CharPtrTy, SizeTy}, RetType{CharPtrTy}),
Summary(NoEvalCall)
+ .Case({ArgumentCondition(1, WithinRange, Range(1, SizeMax)),
+ ReturnValueCondition(BO_EQ, ArgNo(0))},
+ ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case({ArgumentCondition(1, WithinRange, SingleValue(0)),
+ IsNull(Ret)},
+ ErrnoNEZeroIrrelevant, "Assuming that argument 'size' is 0")
+ .Case({ArgumentCondition(1, WithinRange, Range(1, SizeMax)),
+ IsNull(Ret)},
+ ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(NotNull(ArgNo(0)))
+ .ArgConstraint(
+ BufferSize(/*Buffer*/ ArgNo(0), /*BufSize*/ ArgNo(1)))
.ArgConstraint(
ArgumentCondition(1, WithinRange, Range(0, SizeMax))));
@@ -1628,7 +2587,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
addToFunctionSummaryMap(
"mkdir", Signature(ArgTypes{ConstCharPtrTy, Mode_tTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(0))));
// int mkdirat(int dirfd, const char *pathname, mode_t mode);
@@ -1636,17 +2596,20 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
"mkdirat",
Signature(ArgTypes{IntTy, ConstCharPtrTy, Mode_tTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(ValidFileDescriptorOrAtFdcwd(ArgNo(0)))
.ArgConstraint(NotNull(ArgNo(1))));
- Optional<QualType> Dev_tTy = lookupTy("dev_t");
+ std::optional<QualType> Dev_tTy = lookupTy("dev_t");
// int mknod(const char *pathname, mode_t mode, dev_t dev);
addToFunctionSummaryMap(
"mknod",
Signature(ArgTypes{ConstCharPtrTy, Mode_tTy, Dev_tTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(0))));
// int mknodat(int dirfd, const char *pathname, mode_t mode, dev_t dev);
@@ -1655,14 +2618,17 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{IntTy, ConstCharPtrTy, Mode_tTy, Dev_tTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(ValidFileDescriptorOrAtFdcwd(ArgNo(0)))
.ArgConstraint(NotNull(ArgNo(1))));
// int chmod(const char *path, mode_t mode);
addToFunctionSummaryMap(
"chmod", Signature(ArgTypes{ConstCharPtrTy, Mode_tTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(0))));
// int fchmodat(int dirfd, const char *pathname, mode_t mode, int flags);
@@ -1671,20 +2637,22 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{IntTy, ConstCharPtrTy, Mode_tTy, IntTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
- .ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(ValidFileDescriptorOrAtFdcwd(ArgNo(0)))
.ArgConstraint(NotNull(ArgNo(1))));
// int fchmod(int fildes, mode_t mode);
addToFunctionSummaryMap(
"fchmod", Signature(ArgTypes{IntTy, Mode_tTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(
ArgumentCondition(0, WithinRange, Range(0, IntMax))));
- Optional<QualType> Uid_tTy = lookupTy("uid_t");
- Optional<QualType> Gid_tTy = lookupTy("gid_t");
+ std::optional<QualType> Uid_tTy = lookupTy("uid_t");
+ std::optional<QualType> Gid_tTy = lookupTy("gid_t");
// int fchownat(int dirfd, const char *pathname, uid_t owner, gid_t group,
// int flags);
@@ -1693,8 +2661,9 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{IntTy, ConstCharPtrTy, Uid_tTy, Gid_tTy, IntTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
- .ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(ValidFileDescriptorOrAtFdcwd(ArgNo(0)))
.ArgConstraint(NotNull(ArgNo(1))));
// int chown(const char *path, uid_t owner, gid_t group);
@@ -1702,7 +2671,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
"chown",
Signature(ArgTypes{ConstCharPtrTy, Uid_tTy, Gid_tTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(0))));
// int lchown(const char *path, uid_t owner, gid_t group);
@@ -1710,37 +2680,42 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
"lchown",
Signature(ArgTypes{ConstCharPtrTy, Uid_tTy, Gid_tTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(0))));
// int fchown(int fildes, uid_t owner, gid_t group);
addToFunctionSummaryMap(
"fchown", Signature(ArgTypes{IntTy, Uid_tTy, Gid_tTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(
ArgumentCondition(0, WithinRange, Range(0, IntMax))));
// int rmdir(const char *pathname);
- addToFunctionSummaryMap("rmdir",
- Signature(ArgTypes{ConstCharPtrTy}, RetType{IntTy}),
- Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
- .ArgConstraint(NotNull(ArgNo(0))));
+ addToFunctionSummaryMap(
+ "rmdir", Signature(ArgTypes{ConstCharPtrTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(NotNull(ArgNo(0))));
// int chdir(const char *path);
- addToFunctionSummaryMap("chdir",
- Signature(ArgTypes{ConstCharPtrTy}, RetType{IntTy}),
- Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
- .ArgConstraint(NotNull(ArgNo(0))));
+ addToFunctionSummaryMap(
+ "chdir", Signature(ArgTypes{ConstCharPtrTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(NotNull(ArgNo(0))));
// int link(const char *oldpath, const char *newpath);
addToFunctionSummaryMap(
"link",
Signature(ArgTypes{ConstCharPtrTy, ConstCharPtrTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(0)))
.ArgConstraint(NotNull(ArgNo(1))));
@@ -1751,37 +2726,42 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{IntTy, ConstCharPtrTy, IntTy, ConstCharPtrTy, IntTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
- .ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(ValidFileDescriptorOrAtFdcwd(ArgNo(0)))
.ArgConstraint(NotNull(ArgNo(1)))
- .ArgConstraint(ArgumentCondition(2, WithinRange, Range(0, IntMax)))
+ .ArgConstraint(ValidFileDescriptorOrAtFdcwd(ArgNo(2)))
.ArgConstraint(NotNull(ArgNo(3))));
// int unlink(const char *pathname);
- addToFunctionSummaryMap("unlink",
- Signature(ArgTypes{ConstCharPtrTy}, RetType{IntTy}),
- Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
- .ArgConstraint(NotNull(ArgNo(0))));
+ addToFunctionSummaryMap(
+ "unlink", Signature(ArgTypes{ConstCharPtrTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(NotNull(ArgNo(0))));
// int unlinkat(int fd, const char *path, int flag);
addToFunctionSummaryMap(
"unlinkat",
Signature(ArgTypes{IntTy, ConstCharPtrTy, IntTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
- .ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(ValidFileDescriptorOrAtFdcwd(ArgNo(0)))
.ArgConstraint(NotNull(ArgNo(1))));
- Optional<QualType> StructStatTy = lookupTy("stat");
- Optional<QualType> StructStatPtrTy = getPointerTy(StructStatTy);
- Optional<QualType> StructStatPtrRestrictTy = getRestrictTy(StructStatPtrTy);
+ std::optional<QualType> StructStatTy = lookupTy("stat");
+ std::optional<QualType> StructStatPtrTy = getPointerTy(StructStatTy);
+ std::optional<QualType> StructStatPtrRestrictTy =
+ getRestrictTy(StructStatPtrTy);
// int fstat(int fd, struct stat *statbuf);
addToFunctionSummaryMap(
"fstat", Signature(ArgTypes{IntTy, StructStatPtrTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
.ArgConstraint(NotNull(ArgNo(1))));
@@ -1791,7 +2771,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{ConstCharPtrRestrictTy, StructStatPtrRestrictTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(0)))
.ArgConstraint(NotNull(ArgNo(1))));
@@ -1801,7 +2782,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{ConstCharPtrRestrictTy, StructStatPtrRestrictTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(0)))
.ArgConstraint(NotNull(ArgNo(1))));
@@ -1813,32 +2795,40 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
StructStatPtrRestrictTy, IntTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
- .ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(ValidFileDescriptorOrAtFdcwd(ArgNo(0)))
.ArgConstraint(NotNull(ArgNo(1)))
.ArgConstraint(NotNull(ArgNo(2))));
// DIR *opendir(const char *name);
addToFunctionSummaryMap(
"opendir", Signature(ArgTypes{ConstCharPtrTy}, RetType{DirPtrTy}),
- Summary(NoEvalCall).ArgConstraint(NotNull(ArgNo(0))));
+ Summary(NoEvalCall)
+ .Case({NotNull(Ret)}, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case({IsNull(Ret)}, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(NotNull(ArgNo(0))));
// DIR *fdopendir(int fd);
- addToFunctionSummaryMap("fdopendir",
- Signature(ArgTypes{IntTy}, RetType{DirPtrTy}),
- Summary(NoEvalCall)
- .ArgConstraint(ArgumentCondition(
- 0, WithinRange, Range(0, IntMax))));
+ addToFunctionSummaryMap(
+ "fdopendir", Signature(ArgTypes{IntTy}, RetType{DirPtrTy}),
+ Summary(NoEvalCall)
+ .Case({NotNull(Ret)}, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case({IsNull(Ret)}, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(
+ ArgumentCondition(0, WithinRange, Range(0, IntMax))));
// int isatty(int fildes);
addToFunctionSummaryMap(
"isatty", Signature(ArgTypes{IntTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case({ReturnValueCondition(WithinRange, Range(0, 1))})
+ .Case({ReturnValueCondition(WithinRange, Range(0, 1))},
+ ErrnoIrrelevant)
.ArgConstraint(
ArgumentCondition(0, WithinRange, Range(0, IntMax))));
// FILE *popen(const char *command, const char *type);
+ // FIXME: Improve for errno modeling.
addToFunctionSummaryMap(
"popen",
Signature(ArgTypes{ConstCharPtrTy, ConstCharPtrTy}, RetType{FilePtrTy}),
@@ -1847,16 +2837,19 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
.ArgConstraint(NotNull(ArgNo(1))));
// int pclose(FILE *stream);
+ // FIXME: Improve for errno modeling.
addToFunctionSummaryMap(
"pclose", Signature(ArgTypes{FilePtrTy}, RetType{IntTy}),
Summary(NoEvalCall).ArgConstraint(NotNull(ArgNo(0))));
// int close(int fildes);
- addToFunctionSummaryMap("close", Signature(ArgTypes{IntTy}, RetType{IntTy}),
- Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
- .ArgConstraint(ArgumentCondition(
- 0, WithinRange, Range(-1, IntMax))));
+ addToFunctionSummaryMap(
+ "close", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(
+ ArgumentCondition(0, WithinRange, Range(-1, IntMax))));
// long fpathconf(int fildes, int name);
addToFunctionSummaryMap("fpathconf",
@@ -1870,14 +2863,6 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
"pathconf", Signature(ArgTypes{ConstCharPtrTy, IntTy}, RetType{LongTy}),
Summary(NoEvalCall).ArgConstraint(NotNull(ArgNo(0))));
- // FILE *fdopen(int fd, const char *mode);
- addToFunctionSummaryMap(
- "fdopen",
- Signature(ArgTypes{IntTy, ConstCharPtrTy}, RetType{FilePtrTy}),
- Summary(NoEvalCall)
- .ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
- .ArgConstraint(NotNull(ArgNo(1))));
-
// void rewinddir(DIR *dir);
addToFunctionSummaryMap(
"rewinddir", Signature(ArgTypes{DirPtrTy}, RetType{VoidTy}),
@@ -1893,28 +2878,9 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
"rand_r", Signature(ArgTypes{UnsignedIntPtrTy}, RetType{IntTy}),
Summary(NoEvalCall).ArgConstraint(NotNull(ArgNo(0))));
- // int fileno(FILE *stream);
- addToFunctionSummaryMap("fileno",
- Signature(ArgTypes{FilePtrTy}, RetType{IntTy}),
- Summary(NoEvalCall)
- .Case(ReturnsFileDescriptor)
- .ArgConstraint(NotNull(ArgNo(0))));
-
- // int fseeko(FILE *stream, off_t offset, int whence);
- addToFunctionSummaryMap(
- "fseeko",
- Signature(ArgTypes{FilePtrTy, Off_tTy, IntTy}, RetType{IntTy}),
- Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
- .ArgConstraint(NotNull(ArgNo(0))));
-
- // off_t ftello(FILE *stream);
- addToFunctionSummaryMap(
- "ftello", Signature(ArgTypes{FilePtrTy}, RetType{Off_tTy}),
- Summary(NoEvalCall).ArgConstraint(NotNull(ArgNo(0))));
-
// void *mmap(void *addr, size_t length, int prot, int flags, int fd,
// off_t offset);
+ // FIXME: Improve for errno modeling.
addToFunctionSummaryMap(
"mmap",
Signature(ArgTypes{VoidPtrTy, SizeTy, IntTy, IntTy, IntTy, Off_tTy},
@@ -1924,9 +2890,10 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
.ArgConstraint(
ArgumentCondition(4, WithinRange, Range(-1, IntMax))));
- Optional<QualType> Off64_tTy = lookupTy("off64_t");
+ std::optional<QualType> Off64_tTy = lookupTy("off64_t");
// void *mmap64(void *addr, size_t length, int prot, int flags, int fd,
// off64_t offset);
+ // FIXME: Improve for errno modeling.
addToFunctionSummaryMap(
"mmap64",
Signature(ArgTypes{VoidPtrTy, SizeTy, IntTy, IntTy, IntTy, Off64_tTy},
@@ -1937,16 +2904,23 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
ArgumentCondition(4, WithinRange, Range(-1, IntMax))));
// int pipe(int fildes[2]);
- addToFunctionSummaryMap("pipe",
- Signature(ArgTypes{IntPtrTy}, RetType{IntTy}),
- Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
- .ArgConstraint(NotNull(ArgNo(0))));
+ addToFunctionSummaryMap(
+ "pipe", Signature(ArgTypes{IntPtrTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(NotNull(ArgNo(0))));
// off_t lseek(int fildes, off_t offset, int whence);
+ // In the first case we can not tell for sure if it failed or not.
+ // A return value different from of the expected offset (that is unknown
+ // here) may indicate failure. For this reason we do not enforce the errno
+ // check (can cause false positive).
addToFunctionSummaryMap(
"lseek", Signature(ArgTypes{IntTy, Off_tTy, IntTy}, RetType{Off_tTy}),
Summary(NoEvalCall)
+ .Case(ReturnsNonnegative, ErrnoIrrelevant)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(
ArgumentCondition(0, WithinRange, Range(0, IntMax))));
@@ -1957,8 +2931,15 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{ConstCharPtrRestrictTy, CharPtrRestrictTy, SizeTy},
RetType{Ssize_tTy}),
Summary(NoEvalCall)
- .Case({ReturnValueCondition(LessThanOrEq, ArgNo(2)),
- ReturnValueCondition(WithinRange, Range(-1, Ssize_tMax))})
+ .Case({ArgumentCondition(2, WithinRange, Range(1, IntMax)),
+ ReturnValueCondition(LessThanOrEq, ArgNo(2)),
+ ReturnValueCondition(WithinRange, Range(1, Ssize_tMax))},
+ ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case({ArgumentCondition(2, WithinRange, SingleValue(0)),
+ ReturnValueCondition(WithinRange, SingleValue(0))},
+ ErrnoMustNotBeChecked,
+ "Assuming that argument 'bufsize' is 0")
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(0)))
.ArgConstraint(NotNull(ArgNo(1)))
.ArgConstraint(BufferSize(/*Buffer=*/ArgNo(1),
@@ -1974,9 +2955,16 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
ArgTypes{IntTy, ConstCharPtrRestrictTy, CharPtrRestrictTy, SizeTy},
RetType{Ssize_tTy}),
Summary(NoEvalCall)
- .Case({ReturnValueCondition(LessThanOrEq, ArgNo(3)),
- ReturnValueCondition(WithinRange, Range(-1, Ssize_tMax))})
- .ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
+ .Case({ArgumentCondition(3, WithinRange, Range(1, IntMax)),
+ ReturnValueCondition(LessThanOrEq, ArgNo(3)),
+ ReturnValueCondition(WithinRange, Range(1, Ssize_tMax))},
+ ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case({ArgumentCondition(3, WithinRange, SingleValue(0)),
+ ReturnValueCondition(WithinRange, SingleValue(0))},
+ ErrnoMustNotBeChecked,
+ "Assuming that argument 'bufsize' is 0")
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(ValidFileDescriptorOrAtFdcwd(ArgNo(0)))
.ArgConstraint(NotNull(ArgNo(1)))
.ArgConstraint(NotNull(ArgNo(2)))
.ArgConstraint(BufferSize(/*Buffer=*/ArgNo(2),
@@ -1991,12 +2979,16 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{IntTy, ConstCharPtrTy, IntTy, ConstCharPtrTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(ValidFileDescriptorOrAtFdcwd(ArgNo(0)))
.ArgConstraint(NotNull(ArgNo(1)))
+ .ArgConstraint(ValidFileDescriptorOrAtFdcwd(ArgNo(2)))
.ArgConstraint(NotNull(ArgNo(3))));
// char *realpath(const char *restrict file_name,
// char *restrict resolved_name);
+ // FIXME: Improve for errno modeling.
addToFunctionSummaryMap(
"realpath",
Signature(ArgTypes{ConstCharPtrRestrictTy, CharPtrRestrictTy},
@@ -2010,7 +3002,7 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
"execv",
Signature(ArgTypes{ConstCharPtrTy, CharPtrConstPtr}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case({ReturnValueCondition(WithinRange, SingleValue(-1))})
+ .Case(ReturnsMinusOne, ErrnoIrrelevant)
.ArgConstraint(NotNull(ArgNo(0))));
// int execvp(const char *file, char *const argv[]);
@@ -2018,7 +3010,7 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
"execvp",
Signature(ArgTypes{ConstCharPtrTy, CharPtrConstPtr}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case({ReturnValueCondition(WithinRange, SingleValue(-1))})
+ .Case(ReturnsMinusOne, ErrnoIrrelevant)
.ArgConstraint(NotNull(ArgNo(0))));
// int getopt(int argc, char * const argv[], const char *optstring);
@@ -2027,23 +3019,26 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{IntTy, CharPtrConstPtr, ConstCharPtrTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case({ReturnValueCondition(WithinRange, Range(-1, UCharRangeMax))})
+ .Case({ReturnValueCondition(WithinRange, Range(-1, UCharRangeMax))},
+ ErrnoIrrelevant)
.ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
.ArgConstraint(NotNull(ArgNo(1)))
.ArgConstraint(NotNull(ArgNo(2))));
- Optional<QualType> StructSockaddrTy = lookupTy("sockaddr");
- Optional<QualType> StructSockaddrPtrTy = getPointerTy(StructSockaddrTy);
- Optional<QualType> ConstStructSockaddrPtrTy =
+ std::optional<QualType> StructSockaddrTy = lookupTy("sockaddr");
+ std::optional<QualType> StructSockaddrPtrTy =
+ getPointerTy(StructSockaddrTy);
+ std::optional<QualType> ConstStructSockaddrPtrTy =
getPointerTy(getConstTy(StructSockaddrTy));
- Optional<QualType> StructSockaddrPtrRestrictTy =
+ std::optional<QualType> StructSockaddrPtrRestrictTy =
getRestrictTy(StructSockaddrPtrTy);
- Optional<QualType> ConstStructSockaddrPtrRestrictTy =
+ std::optional<QualType> ConstStructSockaddrPtrRestrictTy =
getRestrictTy(ConstStructSockaddrPtrTy);
- Optional<QualType> Socklen_tTy = lookupTy("socklen_t");
- Optional<QualType> Socklen_tPtrTy = getPointerTy(Socklen_tTy);
- Optional<QualType> Socklen_tPtrRestrictTy = getRestrictTy(Socklen_tPtrTy);
- Optional<RangeInt> Socklen_tMax = getMaxValue(Socklen_tTy);
+ std::optional<QualType> Socklen_tTy = lookupTy("socklen_t");
+ std::optional<QualType> Socklen_tPtrTy = getPointerTy(Socklen_tTy);
+ std::optional<QualType> Socklen_tPtrRestrictTy =
+ getRestrictTy(Socklen_tPtrTy);
+ std::optional<RangeInt> Socklen_tMax = getMaxValue(Socklen_tTy);
// In 'socket.h' of some libc implementations with C99, sockaddr parameter
// is a transparent union of the underlying sockaddr_ family of pointers
@@ -2051,9 +3046,20 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
// standardized signature will not match, thus we try to match with another
// signature that has the joker Irrelevant type. We also remove those
// constraints which require pointer types for the sockaddr param.
+
+ // int socket(int domain, int type, int protocol);
+ addToFunctionSummaryMap(
+ "socket", Signature(ArgTypes{IntTy, IntTy, IntTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsValidFileDescriptor, ErrnoMustNotBeChecked,
+ GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg));
+
auto Accept =
Summary(NoEvalCall)
- .Case(ReturnsFileDescriptor)
+ .Case(ReturnsValidFileDescriptor, ErrnoMustNotBeChecked,
+ GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)));
if (!addToFunctionSummaryMap(
"accept",
@@ -2076,7 +3082,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{IntTy, ConstStructSockaddrPtrTy, Socklen_tTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(
ArgumentCondition(0, WithinRange, Range(0, IntMax)))
.ArgConstraint(NotNull(ArgNo(1)))
@@ -2089,7 +3096,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
"bind",
Signature(ArgTypes{IntTy, Irrelevant, Socklen_tTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(
ArgumentCondition(0, WithinRange, Range(0, IntMax)))
.ArgConstraint(
@@ -2103,7 +3111,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Socklen_tPtrRestrictTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(
ArgumentCondition(0, WithinRange, Range(0, IntMax)))
.ArgConstraint(NotNull(ArgNo(1)))
@@ -2113,7 +3122,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{IntTy, Irrelevant, Socklen_tPtrRestrictTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(
ArgumentCondition(0, WithinRange, Range(0, IntMax))));
@@ -2125,7 +3135,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Socklen_tPtrRestrictTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(
ArgumentCondition(0, WithinRange, Range(0, IntMax)))
.ArgConstraint(NotNull(ArgNo(1)))
@@ -2135,7 +3146,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{IntTy, Irrelevant, Socklen_tPtrRestrictTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(
ArgumentCondition(0, WithinRange, Range(0, IntMax))));
@@ -2146,7 +3158,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{IntTy, ConstStructSockaddrPtrTy, Socklen_tTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(
ArgumentCondition(0, WithinRange, Range(0, IntMax)))
.ArgConstraint(NotNull(ArgNo(1)))))
@@ -2154,14 +3167,20 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
"connect",
Signature(ArgTypes{IntTy, Irrelevant, Socklen_tTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(
ArgumentCondition(0, WithinRange, Range(0, IntMax))));
auto Recvfrom =
Summary(NoEvalCall)
.Case({ReturnValueCondition(LessThanOrEq, ArgNo(2)),
- ReturnValueCondition(WithinRange, Range(-1, Ssize_tMax))})
+ ReturnValueCondition(WithinRange, Range(1, Ssize_tMax))},
+ ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case({ReturnValueCondition(WithinRange, SingleValue(0)),
+ ArgumentCondition(2, WithinRange, SingleValue(0))},
+ ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
.ArgConstraint(BufferSize(/*Buffer=*/ArgNo(1),
/*BufSize=*/ArgNo(2)));
@@ -2186,7 +3205,12 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
auto Sendto =
Summary(NoEvalCall)
.Case({ReturnValueCondition(LessThanOrEq, ArgNo(2)),
- ReturnValueCondition(WithinRange, Range(-1, Ssize_tMax))})
+ ReturnValueCondition(WithinRange, Range(1, Ssize_tMax))},
+ ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case({ReturnValueCondition(WithinRange, SingleValue(0)),
+ ArgumentCondition(2, WithinRange, SingleValue(0))},
+ ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
.ArgConstraint(BufferSize(/*Buffer=*/ArgNo(1),
/*BufSize=*/ArgNo(2)));
@@ -2207,12 +3231,13 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Sendto);
// int listen(int sockfd, int backlog);
- addToFunctionSummaryMap("listen",
- Signature(ArgTypes{IntTy, IntTy}, RetType{IntTy}),
- Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
- .ArgConstraint(ArgumentCondition(
- 0, WithinRange, Range(0, IntMax))));
+ addToFunctionSummaryMap(
+ "listen", Signature(ArgTypes{IntTy, IntTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(
+ ArgumentCondition(0, WithinRange, Range(0, IntMax))));
// ssize_t recv(int sockfd, void *buf, size_t len, int flags);
addToFunctionSummaryMap(
@@ -2221,14 +3246,19 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
RetType{Ssize_tTy}),
Summary(NoEvalCall)
.Case({ReturnValueCondition(LessThanOrEq, ArgNo(2)),
- ReturnValueCondition(WithinRange, Range(-1, Ssize_tMax))})
+ ReturnValueCondition(WithinRange, Range(1, Ssize_tMax))},
+ ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case({ReturnValueCondition(WithinRange, SingleValue(0)),
+ ArgumentCondition(2, WithinRange, SingleValue(0))},
+ ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
.ArgConstraint(BufferSize(/*Buffer=*/ArgNo(1),
/*BufSize=*/ArgNo(2))));
- Optional<QualType> StructMsghdrTy = lookupTy("msghdr");
- Optional<QualType> StructMsghdrPtrTy = getPointerTy(StructMsghdrTy);
- Optional<QualType> ConstStructMsghdrPtrTy =
+ std::optional<QualType> StructMsghdrTy = lookupTy("msghdr");
+ std::optional<QualType> StructMsghdrPtrTy = getPointerTy(StructMsghdrTy);
+ std::optional<QualType> ConstStructMsghdrPtrTy =
getPointerTy(getConstTy(StructMsghdrTy));
// ssize_t recvmsg(int sockfd, struct msghdr *msg, int flags);
@@ -2237,7 +3267,9 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{IntTy, StructMsghdrPtrTy, IntTy},
RetType{Ssize_tTy}),
Summary(NoEvalCall)
- .Case({ReturnValueCondition(WithinRange, Range(-1, Ssize_tMax))})
+ .Case({ReturnValueCondition(WithinRange, Range(1, Ssize_tMax))},
+ ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(
ArgumentCondition(0, WithinRange, Range(0, IntMax))));
@@ -2247,7 +3279,9 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{IntTy, ConstStructMsghdrPtrTy, IntTy},
RetType{Ssize_tTy}),
Summary(NoEvalCall)
- .Case({ReturnValueCondition(WithinRange, Range(-1, Ssize_tMax))})
+ .Case({ReturnValueCondition(WithinRange, Range(1, Ssize_tMax))},
+ ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(
ArgumentCondition(0, WithinRange, Range(0, IntMax))));
@@ -2258,7 +3292,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{IntTy, IntTy, IntTy, ConstVoidPtrTy, Socklen_tTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(3)))
.ArgConstraint(
BufferSize(/*Buffer=*/ArgNo(3), /*BufSize=*/ArgNo(4)))
@@ -2274,7 +3309,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Socklen_tPtrRestrictTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(3)))
.ArgConstraint(NotNull(ArgNo(4))));
@@ -2285,7 +3321,12 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
RetType{Ssize_tTy}),
Summary(NoEvalCall)
.Case({ReturnValueCondition(LessThanOrEq, ArgNo(2)),
- ReturnValueCondition(WithinRange, Range(-1, Ssize_tMax))})
+ ReturnValueCondition(WithinRange, Range(1, Ssize_tMax))},
+ ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case({ReturnValueCondition(WithinRange, SingleValue(0)),
+ ArgumentCondition(2, WithinRange, SingleValue(0))},
+ ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
.ArgConstraint(BufferSize(/*Buffer=*/ArgNo(1),
/*BufSize=*/ArgNo(2))));
@@ -2295,9 +3336,19 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
"socketpair",
Signature(ArgTypes{IntTy, IntTy, IntTy, IntPtrTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(3))));
+ // int shutdown(int socket, int how);
+ addToFunctionSummaryMap(
+ "shutdown", Signature(ArgTypes{IntTy, IntTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(
+ ArgumentCondition(0, WithinRange, Range(0, IntMax))));
+
// int getnameinfo(const struct sockaddr *restrict sa, socklen_t salen,
// char *restrict node, socklen_t nodelen,
// char *restrict service,
@@ -2325,20 +3376,22 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
.ArgConstraint(
ArgumentCondition(5, WithinRange, Range(0, Socklen_tMax))));
- Optional<QualType> StructUtimbufTy = lookupTy("utimbuf");
- Optional<QualType> StructUtimbufPtrTy = getPointerTy(StructUtimbufTy);
+ std::optional<QualType> StructUtimbufTy = lookupTy("utimbuf");
+ std::optional<QualType> StructUtimbufPtrTy = getPointerTy(StructUtimbufTy);
// int utime(const char *filename, struct utimbuf *buf);
addToFunctionSummaryMap(
"utime",
Signature(ArgTypes{ConstCharPtrTy, StructUtimbufPtrTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(0))));
- Optional<QualType> StructTimespecTy = lookupTy("timespec");
- Optional<QualType> StructTimespecPtrTy = getPointerTy(StructTimespecTy);
- Optional<QualType> ConstStructTimespecPtrTy =
+ std::optional<QualType> StructTimespecTy = lookupTy("timespec");
+ std::optional<QualType> StructTimespecPtrTy =
+ getPointerTy(StructTimespecTy);
+ std::optional<QualType> ConstStructTimespecPtrTy =
getPointerTy(getConstTy(StructTimespecTy));
// int futimens(int fd, const struct timespec times[2]);
@@ -2346,22 +3399,25 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
"futimens",
Signature(ArgTypes{IntTy, ConstStructTimespecPtrTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(
ArgumentCondition(0, WithinRange, Range(0, IntMax))));
// int utimensat(int dirfd, const char *pathname,
// const struct timespec times[2], int flags);
- addToFunctionSummaryMap("utimensat",
- Signature(ArgTypes{IntTy, ConstCharPtrTy,
- ConstStructTimespecPtrTy, IntTy},
- RetType{IntTy}),
- Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
- .ArgConstraint(NotNull(ArgNo(1))));
+ addToFunctionSummaryMap(
+ "utimensat",
+ Signature(
+ ArgTypes{IntTy, ConstCharPtrTy, ConstStructTimespecPtrTy, IntTy},
+ RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
+ .ArgConstraint(NotNull(ArgNo(1))));
- Optional<QualType> StructTimevalTy = lookupTy("timeval");
- Optional<QualType> ConstStructTimevalPtrTy =
+ std::optional<QualType> StructTimevalTy = lookupTy("timeval");
+ std::optional<QualType> ConstStructTimevalPtrTy =
getPointerTy(getConstTy(StructTimevalTy));
// int utimes(const char *filename, const struct timeval times[2]);
@@ -2370,7 +3426,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{ConstCharPtrTy, ConstStructTimevalPtrTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(0))));
// int nanosleep(const struct timespec *rqtp, struct timespec *rmtp);
@@ -2379,20 +3436,23 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{ConstStructTimespecPtrTy, StructTimespecPtrTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(0))));
- Optional<QualType> Time_tTy = lookupTy("time_t");
- Optional<QualType> ConstTime_tPtrTy = getPointerTy(getConstTy(Time_tTy));
- Optional<QualType> ConstTime_tPtrRestrictTy =
+ std::optional<QualType> Time_tTy = lookupTy("time_t");
+ std::optional<QualType> ConstTime_tPtrTy =
+ getPointerTy(getConstTy(Time_tTy));
+ std::optional<QualType> ConstTime_tPtrRestrictTy =
getRestrictTy(ConstTime_tPtrTy);
- Optional<QualType> StructTmTy = lookupTy("tm");
- Optional<QualType> StructTmPtrTy = getPointerTy(StructTmTy);
- Optional<QualType> StructTmPtrRestrictTy = getRestrictTy(StructTmPtrTy);
- Optional<QualType> ConstStructTmPtrTy =
+ std::optional<QualType> StructTmTy = lookupTy("tm");
+ std::optional<QualType> StructTmPtrTy = getPointerTy(StructTmTy);
+ std::optional<QualType> StructTmPtrRestrictTy =
+ getRestrictTy(StructTmPtrTy);
+ std::optional<QualType> ConstStructTmPtrTy =
getPointerTy(getConstTy(StructTmTy));
- Optional<QualType> ConstStructTmPtrRestrictTy =
+ std::optional<QualType> ConstStructTmPtrRestrictTy =
getRestrictTy(ConstStructTmPtrTy);
// struct tm * localtime(const time_t *tp);
@@ -2448,46 +3508,54 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
"gmtime", Signature(ArgTypes{ConstTime_tPtrTy}, RetType{StructTmPtrTy}),
Summary(NoEvalCall).ArgConstraint(NotNull(ArgNo(0))));
- Optional<QualType> Clockid_tTy = lookupTy("clockid_t");
+ std::optional<QualType> Clockid_tTy = lookupTy("clockid_t");
// int clock_gettime(clockid_t clock_id, struct timespec *tp);
addToFunctionSummaryMap(
"clock_gettime",
Signature(ArgTypes{Clockid_tTy, StructTimespecPtrTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(1))));
- Optional<QualType> StructItimervalTy = lookupTy("itimerval");
- Optional<QualType> StructItimervalPtrTy = getPointerTy(StructItimervalTy);
+ std::optional<QualType> StructItimervalTy = lookupTy("itimerval");
+ std::optional<QualType> StructItimervalPtrTy =
+ getPointerTy(StructItimervalTy);
// int getitimer(int which, struct itimerval *curr_value);
addToFunctionSummaryMap(
"getitimer",
Signature(ArgTypes{IntTy, StructItimervalPtrTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked, GenericSuccessMsg)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(1))));
- Optional<QualType> Pthread_cond_tTy = lookupTy("pthread_cond_t");
- Optional<QualType> Pthread_cond_tPtrTy = getPointerTy(Pthread_cond_tTy);
- Optional<QualType> Pthread_tTy = lookupTy("pthread_t");
- Optional<QualType> Pthread_tPtrTy = getPointerTy(Pthread_tTy);
- Optional<QualType> Pthread_tPtrRestrictTy = getRestrictTy(Pthread_tPtrTy);
- Optional<QualType> Pthread_mutex_tTy = lookupTy("pthread_mutex_t");
- Optional<QualType> Pthread_mutex_tPtrTy = getPointerTy(Pthread_mutex_tTy);
- Optional<QualType> Pthread_mutex_tPtrRestrictTy =
+ std::optional<QualType> Pthread_cond_tTy = lookupTy("pthread_cond_t");
+ std::optional<QualType> Pthread_cond_tPtrTy =
+ getPointerTy(Pthread_cond_tTy);
+ std::optional<QualType> Pthread_tTy = lookupTy("pthread_t");
+ std::optional<QualType> Pthread_tPtrTy = getPointerTy(Pthread_tTy);
+ std::optional<QualType> Pthread_tPtrRestrictTy =
+ getRestrictTy(Pthread_tPtrTy);
+ std::optional<QualType> Pthread_mutex_tTy = lookupTy("pthread_mutex_t");
+ std::optional<QualType> Pthread_mutex_tPtrTy =
+ getPointerTy(Pthread_mutex_tTy);
+ std::optional<QualType> Pthread_mutex_tPtrRestrictTy =
getRestrictTy(Pthread_mutex_tPtrTy);
- Optional<QualType> Pthread_attr_tTy = lookupTy("pthread_attr_t");
- Optional<QualType> Pthread_attr_tPtrTy = getPointerTy(Pthread_attr_tTy);
- Optional<QualType> ConstPthread_attr_tPtrTy =
+ std::optional<QualType> Pthread_attr_tTy = lookupTy("pthread_attr_t");
+ std::optional<QualType> Pthread_attr_tPtrTy =
+ getPointerTy(Pthread_attr_tTy);
+ std::optional<QualType> ConstPthread_attr_tPtrTy =
getPointerTy(getConstTy(Pthread_attr_tTy));
- Optional<QualType> ConstPthread_attr_tPtrRestrictTy =
+ std::optional<QualType> ConstPthread_attr_tPtrRestrictTy =
getRestrictTy(ConstPthread_attr_tPtrTy);
- Optional<QualType> Pthread_mutexattr_tTy = lookupTy("pthread_mutexattr_t");
- Optional<QualType> ConstPthread_mutexattr_tPtrTy =
+ std::optional<QualType> Pthread_mutexattr_tTy =
+ lookupTy("pthread_mutexattr_t");
+ std::optional<QualType> ConstPthread_mutexattr_tPtrTy =
getPointerTy(getConstTy(Pthread_mutexattr_tTy));
- Optional<QualType> ConstPthread_mutexattr_tPtrRestrictTy =
+ std::optional<QualType> ConstPthread_mutexattr_tPtrRestrictTy =
getRestrictTy(ConstPthread_mutexattr_tPtrTy);
QualType PthreadStartRoutineTy = getPointerTy(
@@ -2564,12 +3632,24 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
}
// Functions for testing.
- if (ChecksEnabled[CK_StdCLibraryFunctionsTesterChecker]) {
+ if (AddTestFunctions) {
+ const RangeInt IntMin = BVF.getMinValue(IntTy).getLimitedValue();
+
addToFunctionSummaryMap(
"__not_null", Signature(ArgTypes{IntPtrTy}, RetType{IntTy}),
Summary(EvalCallAsPure).ArgConstraint(NotNull(ArgNo(0))));
- // Test range values.
+ addToFunctionSummaryMap(
+ "__not_null_buffer",
+ Signature(ArgTypes{VoidPtrTy, IntTy, IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(NotNullBuffer(ArgNo(0), ArgNo(1), ArgNo(2))));
+
+ // Test inside range constraints.
+ addToFunctionSummaryMap(
+ "__single_val_0", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(0U, WithinRange, SingleValue(0))));
addToFunctionSummaryMap(
"__single_val_1", Signature(ArgTypes{IntTy}, RetType{IntTy}),
Summary(EvalCallAsPure)
@@ -2578,11 +3658,124 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
"__range_1_2", Signature(ArgTypes{IntTy}, RetType{IntTy}),
Summary(EvalCallAsPure)
.ArgConstraint(ArgumentCondition(0U, WithinRange, Range(1, 2))));
- addToFunctionSummaryMap("__range_1_2__4_5",
+ addToFunctionSummaryMap(
+ "__range_m1_1", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(0U, WithinRange, Range(-1, 1))));
+ addToFunctionSummaryMap(
+ "__range_m2_m1", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(0U, WithinRange, Range(-2, -1))));
+ addToFunctionSummaryMap(
+ "__range_m10_10", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(0U, WithinRange, Range(-10, 10))));
+ addToFunctionSummaryMap("__range_m1_inf",
+ Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(
+ 0U, WithinRange, Range(-1, IntMax))));
+ addToFunctionSummaryMap("__range_0_inf",
+ Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(
+ 0U, WithinRange, Range(0, IntMax))));
+ addToFunctionSummaryMap("__range_1_inf",
+ Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(
+ 0U, WithinRange, Range(1, IntMax))));
+ addToFunctionSummaryMap("__range_minf_m1",
+ Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(
+ 0U, WithinRange, Range(IntMin, -1))));
+ addToFunctionSummaryMap("__range_minf_0",
+ Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(
+ 0U, WithinRange, Range(IntMin, 0))));
+ addToFunctionSummaryMap("__range_minf_1",
+ Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(
+ 0U, WithinRange, Range(IntMin, 1))));
+ addToFunctionSummaryMap("__range_1_2__4_6",
+ Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(
+ 0U, WithinRange, Range({1, 2}, {4, 6}))));
+ addToFunctionSummaryMap(
+ "__range_1_2__4_inf", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(0U, WithinRange,
+ Range({1, 2}, {4, IntMax}))));
+
+ // Test out of range constraints.
+ addToFunctionSummaryMap(
+ "__single_val_out_0", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(0U, OutOfRange, SingleValue(0))));
+ addToFunctionSummaryMap(
+ "__single_val_out_1", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(0U, OutOfRange, SingleValue(1))));
+ addToFunctionSummaryMap(
+ "__range_out_1_2", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(0U, OutOfRange, Range(1, 2))));
+ addToFunctionSummaryMap(
+ "__range_out_m1_1", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(0U, OutOfRange, Range(-1, 1))));
+ addToFunctionSummaryMap(
+ "__range_out_m2_m1", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(0U, OutOfRange, Range(-2, -1))));
+ addToFunctionSummaryMap(
+ "__range_out_m10_10", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(0U, OutOfRange, Range(-10, 10))));
+ addToFunctionSummaryMap("__range_out_m1_inf",
+ Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(
+ 0U, OutOfRange, Range(-1, IntMax))));
+ addToFunctionSummaryMap("__range_out_0_inf",
+ Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(
+ 0U, OutOfRange, Range(0, IntMax))));
+ addToFunctionSummaryMap("__range_out_1_inf",
+ Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(
+ 0U, OutOfRange, Range(1, IntMax))));
+ addToFunctionSummaryMap("__range_out_minf_m1",
+ Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(
+ 0U, OutOfRange, Range(IntMin, -1))));
+ addToFunctionSummaryMap("__range_out_minf_0",
+ Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(
+ 0U, OutOfRange, Range(IntMin, 0))));
+ addToFunctionSummaryMap("__range_out_minf_1",
+ Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(
+ 0U, OutOfRange, Range(IntMin, 1))));
+ addToFunctionSummaryMap("__range_out_1_2__4_6",
Signature(ArgTypes{IntTy}, RetType{IntTy}),
Summary(EvalCallAsPure)
.ArgConstraint(ArgumentCondition(
- 0U, WithinRange, Range({1, 2}, {4, 5}))));
+ 0U, OutOfRange, Range({1, 2}, {4, 6}))));
+ addToFunctionSummaryMap(
+ "__range_out_1_2__4_inf", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(
+ ArgumentCondition(0U, OutOfRange, Range({1, 2}, {4, IntMax}))));
// Test range kind.
addToFunctionSummaryMap(
@@ -2638,18 +3831,48 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
"__test_restrict_param_2"},
Signature(ArgTypes{VoidPtrRestrictTy}, RetType{VoidTy}),
Summary(EvalCallAsPure));
- }
- SummariesInitialized = true;
+ // Test the application of cases.
+ addToFunctionSummaryMap(
+ "__test_case_note", Signature(ArgTypes{}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .Case({ReturnValueCondition(WithinRange, SingleValue(0))},
+ ErrnoIrrelevant, "Function returns 0")
+ .Case({ReturnValueCondition(WithinRange, SingleValue(1))},
+ ErrnoIrrelevant, "Function returns 1"));
+ addToFunctionSummaryMap(
+ "__test_case_range_1_2__4_6",
+ Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .Case({ArgumentCondition(0U, WithinRange,
+ IntRangeVector{{IntMin, 0}, {3, 3}}),
+ ReturnValueCondition(WithinRange, SingleValue(1))},
+ ErrnoIrrelevant)
+ .Case({ArgumentCondition(0U, WithinRange,
+ IntRangeVector{{3, 3}, {7, IntMax}}),
+ ReturnValueCondition(WithinRange, SingleValue(2))},
+ ErrnoIrrelevant)
+ .Case({ArgumentCondition(0U, WithinRange,
+ IntRangeVector{{IntMin, 0}, {7, IntMax}}),
+ ReturnValueCondition(WithinRange, SingleValue(3))},
+ ErrnoIrrelevant)
+ .Case({ArgumentCondition(
+ 0U, WithinRange,
+ IntRangeVector{{IntMin, 0}, {3, 3}, {7, IntMax}}),
+ ReturnValueCondition(WithinRange, SingleValue(4))},
+ ErrnoIrrelevant));
+ }
}
void ento::registerStdCLibraryFunctionsChecker(CheckerManager &mgr) {
auto *Checker = mgr.registerChecker<StdLibraryFunctionsChecker>();
+ Checker->CheckName = mgr.getCurrentCheckerName();
+ const AnalyzerOptions &Opts = mgr.getAnalyzerOptions();
Checker->DisplayLoadedSummaries =
- mgr.getAnalyzerOptions().getCheckerBooleanOption(
- Checker, "DisplayLoadedSummaries");
- Checker->ModelPOSIX =
- mgr.getAnalyzerOptions().getCheckerBooleanOption(Checker, "ModelPOSIX");
+ Opts.getCheckerBooleanOption(Checker, "DisplayLoadedSummaries");
+ Checker->ModelPOSIX = Opts.getCheckerBooleanOption(Checker, "ModelPOSIX");
+ Checker->ShouldAssumeControlledEnvironment =
+ Opts.ShouldAssumeControlledEnvironment;
}
bool ento::shouldRegisterStdCLibraryFunctionsChecker(
@@ -2657,16 +3880,12 @@ bool ento::shouldRegisterStdCLibraryFunctionsChecker(
return true;
}
-#define REGISTER_CHECKER(name) \
- void ento::register##name(CheckerManager &mgr) { \
- StdLibraryFunctionsChecker *checker = \
- mgr.getChecker<StdLibraryFunctionsChecker>(); \
- checker->ChecksEnabled[StdLibraryFunctionsChecker::CK_##name] = true; \
- checker->CheckNames[StdLibraryFunctionsChecker::CK_##name] = \
- mgr.getCurrentCheckerName(); \
- } \
- \
- bool ento::shouldRegister##name(const CheckerManager &mgr) { return true; }
-
-REGISTER_CHECKER(StdCLibraryFunctionArgsChecker)
-REGISTER_CHECKER(StdCLibraryFunctionsTesterChecker)
+void ento::registerStdCLibraryFunctionsTesterChecker(CheckerManager &mgr) {
+ auto *Checker = mgr.getChecker<StdLibraryFunctionsChecker>();
+ Checker->AddTestFunctions = true;
+}
+
+bool ento::shouldRegisterStdCLibraryFunctionsTesterChecker(
+ const CheckerManager &mgr) {
+ return true;
+}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StdVariantChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StdVariantChecker.cpp
new file mode 100644
index 000000000000..f7b7befe28ee
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StdVariantChecker.cpp
@@ -0,0 +1,298 @@
+//===- StdVariantChecker.cpp -------------------------------------*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/Type.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Casting.h"
+#include <optional>
+#include <string_view>
+
+#include "TaggedUnionModeling.h"
+
+using namespace clang;
+using namespace ento;
+using namespace tagged_union_modeling;
+
+REGISTER_MAP_WITH_PROGRAMSTATE(VariantHeldTypeMap, const MemRegion *, QualType)
+
+namespace clang::ento::tagged_union_modeling {
+
+const CXXConstructorDecl *
+getConstructorDeclarationForCall(const CallEvent &Call) {
+ const auto *ConstructorCall = dyn_cast<CXXConstructorCall>(&Call);
+ if (!ConstructorCall)
+ return nullptr;
+
+ return ConstructorCall->getDecl();
+}
+
+bool isCopyConstructorCall(const CallEvent &Call) {
+ if (const CXXConstructorDecl *ConstructorDecl =
+ getConstructorDeclarationForCall(Call))
+ return ConstructorDecl->isCopyConstructor();
+ return false;
+}
+
+bool isCopyAssignmentCall(const CallEvent &Call) {
+ const Decl *CopyAssignmentDecl = Call.getDecl();
+
+ if (const auto *AsMethodDecl =
+ dyn_cast_or_null<CXXMethodDecl>(CopyAssignmentDecl))
+ return AsMethodDecl->isCopyAssignmentOperator();
+ return false;
+}
+
+bool isMoveConstructorCall(const CallEvent &Call) {
+ const CXXConstructorDecl *ConstructorDecl =
+ getConstructorDeclarationForCall(Call);
+ if (!ConstructorDecl)
+ return false;
+
+ return ConstructorDecl->isMoveConstructor();
+}
+
+bool isMoveAssignmentCall(const CallEvent &Call) {
+ const Decl *CopyAssignmentDecl = Call.getDecl();
+
+ const auto *AsMethodDecl =
+ dyn_cast_or_null<CXXMethodDecl>(CopyAssignmentDecl);
+ if (!AsMethodDecl)
+ return false;
+
+ return AsMethodDecl->isMoveAssignmentOperator();
+}
+
+bool isStdType(const Type *Type, llvm::StringRef TypeName) {
+ auto *Decl = Type->getAsRecordDecl();
+ if (!Decl)
+ return false;
+ return (Decl->getName() == TypeName) && Decl->isInStdNamespace();
+}
+
+bool isStdVariant(const Type *Type) {
+ return isStdType(Type, llvm::StringLiteral("variant"));
+}
+
+} // end of namespace clang::ento::tagged_union_modeling
+
+static std::optional<ArrayRef<TemplateArgument>>
+getTemplateArgsFromVariant(const Type *VariantType) {
+ const auto *TempSpecType = VariantType->getAs<TemplateSpecializationType>();
+ if (!TempSpecType)
+ return {};
+
+ return TempSpecType->template_arguments();
+}
+
+static std::optional<QualType>
+getNthTemplateTypeArgFromVariant(const Type *varType, unsigned i) {
+ std::optional<ArrayRef<TemplateArgument>> VariantTemplates =
+ getTemplateArgsFromVariant(varType);
+ if (!VariantTemplates)
+ return {};
+
+ return (*VariantTemplates)[i].getAsType();
+}
+
+static bool isVowel(char a) {
+ switch (a) {
+ case 'a':
+ case 'e':
+ case 'i':
+ case 'o':
+ case 'u':
+ return true;
+ default:
+ return false;
+ }
+}
+
+static llvm::StringRef indefiniteArticleBasedOnVowel(char a) {
+ if (isVowel(a))
+ return "an";
+ return "a";
+}
+
+class StdVariantChecker : public Checker<eval::Call, check::RegionChanges> {
+ // Call descriptors to find relevant calls
+ CallDescription VariantConstructor{{"std", "variant", "variant"}};
+ CallDescription VariantAssignmentOperator{{"std", "variant", "operator="}};
+ CallDescription StdGet{{"std", "get"}, 1, 1};
+
+ BugType BadVariantType{this, "BadVariantType", "BadVariantType"};
+
+public:
+ ProgramStateRef checkRegionChanges(ProgramStateRef State,
+ const InvalidatedSymbols *,
+ ArrayRef<const MemRegion *>,
+ ArrayRef<const MemRegion *> Regions,
+ const LocationContext *,
+ const CallEvent *Call) const {
+ if (!Call)
+ return State;
+
+ return removeInformationStoredForDeadInstances<VariantHeldTypeMap>(
+ *Call, State, Regions);
+ }
+
+ bool evalCall(const CallEvent &Call, CheckerContext &C) const {
+ // Check if the call was not made from a system header. If it was then
+ // we do an early return because it is part of the implementation.
+ if (Call.isCalledFromSystemHeader())
+ return false;
+
+ if (StdGet.matches(Call))
+ return handleStdGetCall(Call, C);
+
+ // First check if a constructor call is happening. If it is a
+ // constructor call, check if it is an std::variant constructor call.
+ bool IsVariantConstructor =
+ isa<CXXConstructorCall>(Call) && VariantConstructor.matches(Call);
+ bool IsVariantAssignmentOperatorCall =
+ isa<CXXMemberOperatorCall>(Call) &&
+ VariantAssignmentOperator.matches(Call);
+
+ if (IsVariantConstructor || IsVariantAssignmentOperatorCall) {
+ if (Call.getNumArgs() == 0 && IsVariantConstructor) {
+ handleDefaultConstructor(cast<CXXConstructorCall>(&Call), C);
+ return true;
+ }
+
+ // FIXME Later this checker should be extended to handle constructors
+ // with multiple arguments.
+ if (Call.getNumArgs() != 1)
+ return false;
+
+ SVal ThisSVal;
+ if (IsVariantConstructor) {
+ const auto &AsConstructorCall = cast<CXXConstructorCall>(Call);
+ ThisSVal = AsConstructorCall.getCXXThisVal();
+ } else if (IsVariantAssignmentOperatorCall) {
+ const auto &AsMemberOpCall = cast<CXXMemberOperatorCall>(Call);
+ ThisSVal = AsMemberOpCall.getCXXThisVal();
+ } else {
+ return false;
+ }
+
+ handleConstructorAndAssignment<VariantHeldTypeMap>(Call, C, ThisSVal);
+ return true;
+ }
+ return false;
+ }
+
+private:
+ // The default constructed std::variant must be handled separately
+ // by default the std::variant is going to hold a default constructed instance
+ // of the first type of the possible types
+ void handleDefaultConstructor(const CXXConstructorCall *ConstructorCall,
+ CheckerContext &C) const {
+ SVal ThisSVal = ConstructorCall->getCXXThisVal();
+
+ const auto *const ThisMemRegion = ThisSVal.getAsRegion();
+ if (!ThisMemRegion)
+ return;
+
+ std::optional<QualType> DefaultType = getNthTemplateTypeArgFromVariant(
+ ThisSVal.getType(C.getASTContext())->getPointeeType().getTypePtr(), 0);
+ if (!DefaultType)
+ return;
+
+ ProgramStateRef State = ConstructorCall->getState();
+ State = State->set<VariantHeldTypeMap>(ThisMemRegion, *DefaultType);
+ C.addTransition(State);
+ }
+
+ bool handleStdGetCall(const CallEvent &Call, CheckerContext &C) const {
+ ProgramStateRef State = Call.getState();
+
+ const auto &ArgType = Call.getArgSVal(0)
+ .getType(C.getASTContext())
+ ->getPointeeType()
+ .getTypePtr();
+ // We have to make sure that the argument is an std::variant.
+ // There is another std::get with std::pair argument
+ if (!isStdVariant(ArgType))
+ return false;
+
+ // Get the mem region of the argument std::variant and look up the type
+ // information that we know about it.
+ const MemRegion *ArgMemRegion = Call.getArgSVal(0).getAsRegion();
+ const QualType *StoredType = State->get<VariantHeldTypeMap>(ArgMemRegion);
+ if (!StoredType)
+ return false;
+
+ const CallExpr *CE = cast<CallExpr>(Call.getOriginExpr());
+ const FunctionDecl *FD = CE->getDirectCallee();
+ if (FD->getTemplateSpecializationArgs()->size() < 1)
+ return false;
+
+ const auto &TypeOut = FD->getTemplateSpecializationArgs()->asArray()[0];
+ // std::get's first template parameter can be the type we want to get
+ // out of the std::variant or a natural number which is the position of
+ // the requested type in the argument type list of the std::variant's
+ // argument.
+ QualType RetrievedType;
+ switch (TypeOut.getKind()) {
+ case TemplateArgument::ArgKind::Type:
+ RetrievedType = TypeOut.getAsType();
+ break;
+ case TemplateArgument::ArgKind::Integral:
+ // In the natural number case we look up which type corresponds to the
+ // number.
+ if (std::optional<QualType> NthTemplate =
+ getNthTemplateTypeArgFromVariant(
+ ArgType, TypeOut.getAsIntegral().getSExtValue())) {
+ RetrievedType = *NthTemplate;
+ break;
+ }
+ [[fallthrough]];
+ default:
+ return false;
+ }
+
+ QualType RetrievedCanonicalType = RetrievedType.getCanonicalType();
+ QualType StoredCanonicalType = StoredType->getCanonicalType();
+ if (RetrievedCanonicalType == StoredCanonicalType)
+ return true;
+
+ ExplodedNode *ErrNode = C.generateNonFatalErrorNode();
+ if (!ErrNode)
+ return false;
+ llvm::SmallString<128> Str;
+ llvm::raw_svector_ostream OS(Str);
+ std::string StoredTypeName = StoredType->getAsString();
+ std::string RetrievedTypeName = RetrievedType.getAsString();
+ OS << "std::variant " << ArgMemRegion->getDescriptiveName() << " held "
+ << indefiniteArticleBasedOnVowel(StoredTypeName[0]) << " \'"
+ << StoredTypeName << "\', not "
+ << indefiniteArticleBasedOnVowel(RetrievedTypeName[0]) << " \'"
+ << RetrievedTypeName << "\'";
+ auto R = std::make_unique<PathSensitiveBugReport>(BadVariantType, OS.str(),
+ ErrNode);
+ C.emitReport(std::move(R));
+ return true;
+ }
+};
+
+bool clang::ento::shouldRegisterStdVariantChecker(
+ clang::ento::CheckerManager const &mgr) {
+ return true;
+}
+
+void clang::ento::registerStdVariantChecker(clang::ento::CheckerManager &mgr) {
+ mgr.registerChecker<StdVariantChecker>();
+} \ No newline at end of file
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
index dd65f8c035aa..07727b339d96 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
@@ -14,12 +14,15 @@
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
#include <functional>
+#include <optional>
using namespace clang;
using namespace ento;
@@ -84,10 +87,10 @@ const StreamErrorState ErrorFError{false, false, true};
/// Full state information about a stream pointer.
struct StreamState {
/// The last file operation called in the stream.
+ /// Can be nullptr.
const FnDescription *LastOperation;
/// State of a stream symbol.
- /// FIXME: We need maybe an "escaped" state later.
enum KindTy {
Opened, /// Stream is opened.
Closed, /// Closed stream (an invalid stream pointer after it was closed).
@@ -145,7 +148,7 @@ struct StreamState {
void Profile(llvm::FoldingSetNodeID &ID) const {
ID.AddPointer(LastOperation);
ID.AddInteger(State);
- ID.AddInteger(ErrorState);
+ ErrorState.Profile(ID);
ID.AddBoolean(FilePositionIndeterminate);
}
};
@@ -201,7 +204,7 @@ ProgramStateRef bindAndAssumeTrue(ProgramStateRef State, CheckerContext &C,
ProgramStateRef bindInt(uint64_t Value, ProgramStateRef State,
CheckerContext &C, const CallExpr *CE) {
State = State->BindExpr(CE, C.getLocationContext(),
- C.getSValBuilder().makeIntVal(Value, false));
+ C.getSValBuilder().makeIntVal(Value, CE->getType()));
return State;
}
@@ -235,48 +238,96 @@ public:
private:
CallDescriptionMap<FnDescription> FnDescriptions = {
- {{"fopen"}, {nullptr, &StreamChecker::evalFopen, ArgNone}},
- {{"freopen", 3},
+ {{{"fopen"}, 2}, {nullptr, &StreamChecker::evalFopen, ArgNone}},
+ {{{"fdopen"}, 2}, {nullptr, &StreamChecker::evalFopen, ArgNone}},
+ {{{"freopen"}, 3},
{&StreamChecker::preFreopen, &StreamChecker::evalFreopen, 2}},
- {{"tmpfile"}, {nullptr, &StreamChecker::evalFopen, ArgNone}},
- {{"fclose", 1},
+ {{{"tmpfile"}, 0}, {nullptr, &StreamChecker::evalFopen, ArgNone}},
+ {{{"fclose"}, 1},
{&StreamChecker::preDefault, &StreamChecker::evalFclose, 0}},
- {{"fread", 4},
- {&StreamChecker::preFread,
+ {{{"fread"}, 4},
+ {std::bind(&StreamChecker::preReadWrite, _1, _2, _3, _4, true),
std::bind(&StreamChecker::evalFreadFwrite, _1, _2, _3, _4, true), 3}},
- {{"fwrite", 4},
- {&StreamChecker::preFwrite,
+ {{{"fwrite"}, 4},
+ {std::bind(&StreamChecker::preReadWrite, _1, _2, _3, _4, false),
std::bind(&StreamChecker::evalFreadFwrite, _1, _2, _3, _4, false), 3}},
- {{"fseek", 3}, {&StreamChecker::preFseek, &StreamChecker::evalFseek, 0}},
- {{"ftell", 1}, {&StreamChecker::preDefault, nullptr, 0}},
- {{"rewind", 1}, {&StreamChecker::preDefault, nullptr, 0}},
- {{"fgetpos", 2}, {&StreamChecker::preDefault, nullptr, 0}},
- {{"fsetpos", 2}, {&StreamChecker::preDefault, nullptr, 0}},
- {{"clearerr", 1},
+ {{{"fgetc"}, 1},
+ {std::bind(&StreamChecker::preReadWrite, _1, _2, _3, _4, true),
+ std::bind(&StreamChecker::evalFgetx, _1, _2, _3, _4, true), 0}},
+ {{{"fgets"}, 3},
+ {std::bind(&StreamChecker::preReadWrite, _1, _2, _3, _4, true),
+ std::bind(&StreamChecker::evalFgetx, _1, _2, _3, _4, false), 2}},
+ {{{"fputc"}, 2},
+ {std::bind(&StreamChecker::preReadWrite, _1, _2, _3, _4, false),
+ std::bind(&StreamChecker::evalFputx, _1, _2, _3, _4, true), 1}},
+ {{{"fputs"}, 2},
+ {std::bind(&StreamChecker::preReadWrite, _1, _2, _3, _4, false),
+ std::bind(&StreamChecker::evalFputx, _1, _2, _3, _4, false), 1}},
+ {{{"fprintf"}},
+ {std::bind(&StreamChecker::preReadWrite, _1, _2, _3, _4, false),
+ std::bind(&StreamChecker::evalFprintf, _1, _2, _3, _4), 0}},
+ {{{"fscanf"}},
+ {std::bind(&StreamChecker::preReadWrite, _1, _2, _3, _4, true),
+ std::bind(&StreamChecker::evalFscanf, _1, _2, _3, _4), 0}},
+ {{{"ungetc"}, 2},
+ {std::bind(&StreamChecker::preReadWrite, _1, _2, _3, _4, false),
+ std::bind(&StreamChecker::evalUngetc, _1, _2, _3, _4), 1}},
+ {{{"getdelim"}, 4},
+ {std::bind(&StreamChecker::preReadWrite, _1, _2, _3, _4, true),
+ std::bind(&StreamChecker::evalGetdelim, _1, _2, _3, _4), 3}},
+ {{{"getline"}, 3},
+ {std::bind(&StreamChecker::preReadWrite, _1, _2, _3, _4, true),
+ std::bind(&StreamChecker::evalGetdelim, _1, _2, _3, _4), 2}},
+ {{{"fseek"}, 3},
+ {&StreamChecker::preFseek, &StreamChecker::evalFseek, 0}},
+ {{{"fseeko"}, 3},
+ {&StreamChecker::preFseek, &StreamChecker::evalFseek, 0}},
+ {{{"ftell"}, 1},
+ {&StreamChecker::preDefault, &StreamChecker::evalFtell, 0}},
+ {{{"ftello"}, 1},
+ {&StreamChecker::preDefault, &StreamChecker::evalFtell, 0}},
+ {{{"fflush"}, 1},
+ {&StreamChecker::preFflush, &StreamChecker::evalFflush, 0}},
+ {{{"rewind"}, 1},
+ {&StreamChecker::preDefault, &StreamChecker::evalRewind, 0}},
+ {{{"fgetpos"}, 2},
+ {&StreamChecker::preDefault, &StreamChecker::evalFgetpos, 0}},
+ {{{"fsetpos"}, 2},
+ {&StreamChecker::preDefault, &StreamChecker::evalFsetpos, 0}},
+ {{{"clearerr"}, 1},
{&StreamChecker::preDefault, &StreamChecker::evalClearerr, 0}},
- {{"feof", 1},
+ {{{"feof"}, 1},
{&StreamChecker::preDefault,
std::bind(&StreamChecker::evalFeofFerror, _1, _2, _3, _4, ErrorFEof),
0}},
- {{"ferror", 1},
+ {{{"ferror"}, 1},
{&StreamChecker::preDefault,
std::bind(&StreamChecker::evalFeofFerror, _1, _2, _3, _4, ErrorFError),
0}},
- {{"fileno", 1}, {&StreamChecker::preDefault, nullptr, 0}},
+ {{{"fileno"}, 1}, {&StreamChecker::preDefault, nullptr, 0}},
};
CallDescriptionMap<FnDescription> FnTestDescriptions = {
- {{"StreamTesterChecker_make_feof_stream", 1},
+ {{{"StreamTesterChecker_make_feof_stream"}, 1},
{nullptr,
std::bind(&StreamChecker::evalSetFeofFerror, _1, _2, _3, _4, ErrorFEof),
0}},
- {{"StreamTesterChecker_make_ferror_stream", 1},
+ {{{"StreamTesterChecker_make_ferror_stream"}, 1},
{nullptr,
std::bind(&StreamChecker::evalSetFeofFerror, _1, _2, _3, _4,
ErrorFError),
0}},
};
+ /// Expanded value of EOF, empty before initialization.
+ mutable std::optional<int> EofVal;
+ /// Expanded value of SEEK_SET, 0 if not found.
+ mutable int SeekSetVal = 0;
+ /// Expanded value of SEEK_CUR, 1 if not found.
+ mutable int SeekCurVal = 1;
+ /// Expanded value of SEEK_END, 2 if not found.
+ mutable int SeekEndVal = 2;
+
void evalFopen(const FnDescription *Desc, const CallEvent &Call,
CheckerContext &C) const;
@@ -288,20 +339,47 @@ private:
void evalFclose(const FnDescription *Desc, const CallEvent &Call,
CheckerContext &C) const;
- void preFread(const FnDescription *Desc, const CallEvent &Call,
- CheckerContext &C) const;
-
- void preFwrite(const FnDescription *Desc, const CallEvent &Call,
- CheckerContext &C) const;
+ void preReadWrite(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C, bool IsRead) const;
void evalFreadFwrite(const FnDescription *Desc, const CallEvent &Call,
CheckerContext &C, bool IsFread) const;
+ void evalFgetx(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C, bool SingleChar) const;
+
+ void evalFputx(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C, bool IsSingleChar) const;
+
+ void evalFprintf(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const;
+
+ void evalFscanf(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const;
+
+ void evalUngetc(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const;
+
+ void evalGetdelim(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const;
+
void preFseek(const FnDescription *Desc, const CallEvent &Call,
CheckerContext &C) const;
void evalFseek(const FnDescription *Desc, const CallEvent &Call,
CheckerContext &C) const;
+ void evalFgetpos(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const;
+
+ void evalFsetpos(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const;
+
+ void evalFtell(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const;
+
+ void evalRewind(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const;
+
void preDefault(const FnDescription *Desc, const CallEvent &Call,
CheckerContext &C) const;
@@ -316,6 +394,12 @@ private:
CheckerContext &C,
const StreamErrorState &ErrorKind) const;
+ void preFflush(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const;
+
+ void evalFflush(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const;
+
/// Check that the stream (in StreamVal) is not NULL.
/// If it can only be NULL a fatal error is emitted and nullptr returned.
/// Otherwise the return value is a new state where the stream is constrained
@@ -367,7 +451,7 @@ private:
// (and matching name) as stream functions.
if (!Call.isGlobalCFunction())
return nullptr;
- for (auto P : Call.parameters()) {
+ for (auto *P : Call.parameters()) {
QualType T = P->getType();
if (!T->isIntegralOrEnumerationType() && !T->isPointerType())
return nullptr;
@@ -378,23 +462,14 @@ private:
/// Generate a message for BugReporterVisitor if the stored symbol is
/// marked as interesting by the actual bug report.
- // FIXME: Use lambda instead.
- struct NoteFn {
- const BugType *BT_ResourceLeak;
- SymbolRef StreamSym;
- std::string Message;
-
- std::string operator()(PathSensitiveBugReport &BR) const {
- if (BR.isInteresting(StreamSym) && &BR.getBugType() == BT_ResourceLeak)
- return Message;
-
- return "";
- }
- };
-
const NoteTag *constructNoteTag(CheckerContext &C, SymbolRef StreamSym,
const std::string &Message) const {
- return C.getNoteTag(NoteFn{&BT_ResourceLeak, StreamSym, Message});
+ return C.getNoteTag([this, StreamSym,
+ Message](PathSensitiveBugReport &BR) -> std::string {
+ if (BR.isInteresting(StreamSym) && &BR.getBugType() == &BT_ResourceLeak)
+ return Message;
+ return "";
+ });
}
const NoteTag *constructSetEofNoteTag(CheckerContext &C,
@@ -410,6 +485,26 @@ private:
});
}
+ void initMacroValues(CheckerContext &C) const {
+ if (EofVal)
+ return;
+
+ if (const std::optional<int> OptInt =
+ tryExpandAsInteger("EOF", C.getPreprocessor()))
+ EofVal = *OptInt;
+ else
+ EofVal = -1;
+ if (const std::optional<int> OptInt =
+ tryExpandAsInteger("SEEK_SET", C.getPreprocessor()))
+ SeekSetVal = *OptInt;
+ if (const std::optional<int> OptInt =
+ tryExpandAsInteger("SEEK_END", C.getPreprocessor()))
+ SeekEndVal = *OptInt;
+ if (const std::optional<int> OptInt =
+ tryExpandAsInteger("SEEK_CUR", C.getPreprocessor()))
+ SeekCurVal = *OptInt;
+ }
+
/// Searches for the ExplodedNode where the file descriptor was acquired for
/// StreamSym.
static const ExplodedNode *getAcquisitionSite(const ExplodedNode *N,
@@ -425,8 +520,7 @@ private:
REGISTER_MAP_WITH_PROGRAMSTATE(StreamMap, SymbolRef, StreamState)
inline void assertStreamStateOpened(const StreamState *SS) {
- assert(SS->isOpened() &&
- "Previous create of error node for non-opened stream failed?");
+ assert(SS->isOpened() && "Stream is expected to be opened");
}
const ExplodedNode *StreamChecker::getAcquisitionSite(const ExplodedNode *N,
@@ -456,6 +550,8 @@ const ExplodedNode *StreamChecker::getAcquisitionSite(const ExplodedNode *N,
void StreamChecker::checkPreCall(const CallEvent &Call,
CheckerContext &C) const {
+ initMacroValues(C);
+
const FnDescription *Desc = lookupFn(Call);
if (!Desc || !Desc->PreFn)
return;
@@ -525,7 +621,7 @@ void StreamChecker::evalFreopen(const FnDescription *Desc,
if (!CE)
return;
- Optional<DefinedSVal> StreamVal =
+ std::optional<DefinedSVal> StreamVal =
getStreamArg(Desc, Call).getAs<DefinedSVal>();
if (!StreamVal)
return;
@@ -548,8 +644,9 @@ void StreamChecker::evalFreopen(const FnDescription *Desc,
State->BindExpr(CE, C.getLocationContext(), *StreamVal);
// Generate state for NULL return value.
// Stream switches to OpenFailed state.
- ProgramStateRef StateRetNull = State->BindExpr(CE, C.getLocationContext(),
- C.getSValBuilder().makeNull());
+ ProgramStateRef StateRetNull =
+ State->BindExpr(CE, C.getLocationContext(),
+ C.getSValBuilder().makeNullWithType(CE->getType()));
StateRetNotNull =
StateRetNotNull->set<StreamMap>(StreamSym, StreamState::getOpened(Desc));
@@ -572,6 +669,10 @@ void StreamChecker::evalFclose(const FnDescription *Desc, const CallEvent &Call,
if (!SS)
return;
+ auto *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
+ if (!CE)
+ return;
+
assertStreamStateOpened(SS);
// Close the File Descriptor.
@@ -579,11 +680,21 @@ void StreamChecker::evalFclose(const FnDescription *Desc, const CallEvent &Call,
// and can not be used any more.
State = State->set<StreamMap>(Sym, StreamState::getClosed(Desc));
- C.addTransition(State);
+ // Return 0 on success, EOF on failure.
+ SValBuilder &SVB = C.getSValBuilder();
+ ProgramStateRef StateSuccess = State->BindExpr(
+ CE, C.getLocationContext(), SVB.makeIntVal(0, C.getASTContext().IntTy));
+ ProgramStateRef StateFailure =
+ State->BindExpr(CE, C.getLocationContext(),
+ SVB.makeIntVal(*EofVal, C.getASTContext().IntTy));
+
+ C.addTransition(StateSuccess);
+ C.addTransition(StateFailure);
}
-void StreamChecker::preFread(const FnDescription *Desc, const CallEvent &Call,
- CheckerContext &C) const {
+void StreamChecker::preReadWrite(const FnDescription *Desc,
+ const CallEvent &Call, CheckerContext &C,
+ bool IsRead) const {
ProgramStateRef State = C.getState();
SVal StreamVal = getStreamArg(Desc, Call);
State = ensureStreamNonNull(StreamVal, Call.getArgExpr(Desc->StreamArgNo), C,
@@ -597,6 +708,11 @@ void StreamChecker::preFread(const FnDescription *Desc, const CallEvent &Call,
if (!State)
return;
+ if (!IsRead) {
+ C.addTransition(State);
+ return;
+ }
+
SymbolRef Sym = StreamVal.getAsSymbol();
if (Sym && State->get<StreamMap>(Sym)) {
const StreamState *SS = State->get<StreamMap>(Sym);
@@ -607,24 +723,6 @@ void StreamChecker::preFread(const FnDescription *Desc, const CallEvent &Call,
}
}
-void StreamChecker::preFwrite(const FnDescription *Desc, const CallEvent &Call,
- CheckerContext &C) const {
- ProgramStateRef State = C.getState();
- SVal StreamVal = getStreamArg(Desc, Call);
- State = ensureStreamNonNull(StreamVal, Call.getArgExpr(Desc->StreamArgNo), C,
- State);
- if (!State)
- return;
- State = ensureStreamOpened(StreamVal, C, State);
- if (!State)
- return;
- State = ensureNoFilePositionIndeterminate(StreamVal, C, State);
- if (!State)
- return;
-
- C.addTransition(State);
-}
-
void StreamChecker::evalFreadFwrite(const FnDescription *Desc,
const CallEvent &Call, CheckerContext &C,
bool IsFread) const {
@@ -637,10 +735,10 @@ void StreamChecker::evalFreadFwrite(const FnDescription *Desc,
if (!CE)
return;
- Optional<NonLoc> SizeVal = Call.getArgSVal(1).getAs<NonLoc>();
+ std::optional<NonLoc> SizeVal = Call.getArgSVal(1).getAs<NonLoc>();
if (!SizeVal)
return;
- Optional<NonLoc> NMembVal = Call.getArgSVal(2).getAs<NonLoc>();
+ std::optional<NonLoc> NMembVal = Call.getArgSVal(2).getAs<NonLoc>();
if (!NMembVal)
return;
@@ -670,24 +768,19 @@ void StreamChecker::evalFreadFwrite(const FnDescription *Desc,
if (!IsFread || (OldSS->ErrorState != ErrorFEof)) {
ProgramStateRef StateNotFailed =
State->BindExpr(CE, C.getLocationContext(), *NMembVal);
- if (StateNotFailed) {
- StateNotFailed = StateNotFailed->set<StreamMap>(
- StreamSym, StreamState::getOpened(Desc));
- C.addTransition(StateNotFailed);
- }
+ StateNotFailed =
+ StateNotFailed->set<StreamMap>(StreamSym, StreamState::getOpened(Desc));
+ C.addTransition(StateNotFailed);
}
// Add transition for the failed state.
- Optional<NonLoc> RetVal = makeRetVal(C, CE).castAs<NonLoc>();
- assert(RetVal && "Value should be NonLoc.");
+ NonLoc RetVal = makeRetVal(C, CE).castAs<NonLoc>();
ProgramStateRef StateFailed =
- State->BindExpr(CE, C.getLocationContext(), *RetVal);
- if (!StateFailed)
- return;
- auto Cond = C.getSValBuilder()
- .evalBinOpNN(State, BO_LT, *RetVal, *NMembVal,
- C.getASTContext().IntTy)
- .getAs<DefinedOrUnknownSVal>();
+ State->BindExpr(CE, C.getLocationContext(), RetVal);
+ SValBuilder &SVB = C.getSValBuilder();
+ auto Cond =
+ SVB.evalBinOpNN(State, BO_LT, RetVal, *NMembVal, SVB.getConditionType())
+ .getAs<DefinedOrUnknownSVal>();
if (!Cond)
return;
StateFailed = StateFailed->assume(*Cond, true);
@@ -710,6 +803,351 @@ void StreamChecker::evalFreadFwrite(const FnDescription *Desc,
C.addTransition(StateFailed);
}
+void StreamChecker::evalFgetx(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C, bool SingleChar) const {
+ ProgramStateRef State = C.getState();
+ SymbolRef StreamSym = getStreamArg(Desc, Call).getAsSymbol();
+ if (!StreamSym)
+ return;
+
+ const CallExpr *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
+ if (!CE)
+ return;
+
+ const StreamState *OldSS = State->get<StreamMap>(StreamSym);
+ if (!OldSS)
+ return;
+
+ assertStreamStateOpened(OldSS);
+
+ // `fgetc` returns the read character on success, otherwise returns EOF.
+ // `fgets` returns the read buffer address on success, otherwise returns NULL.
+
+ if (OldSS->ErrorState != ErrorFEof) {
+ if (SingleChar) {
+ // Generate a transition for the success state of `fgetc`.
+ NonLoc RetVal = makeRetVal(C, CE).castAs<NonLoc>();
+ ProgramStateRef StateNotFailed =
+ State->BindExpr(CE, C.getLocationContext(), RetVal);
+ SValBuilder &SVB = C.getSValBuilder();
+ ASTContext &ASTC = C.getASTContext();
+ // The returned 'unsigned char' of `fgetc` is converted to 'int',
+ // so we need to check if it is in range [0, 255].
+ auto CondLow =
+ SVB.evalBinOp(State, BO_GE, RetVal, SVB.makeZeroVal(ASTC.IntTy),
+ SVB.getConditionType())
+ .getAs<DefinedOrUnknownSVal>();
+ auto CondHigh =
+ SVB.evalBinOp(State, BO_LE, RetVal,
+ SVB.makeIntVal(SVB.getBasicValueFactory()
+ .getMaxValue(ASTC.UnsignedCharTy)
+ .getLimitedValue(),
+ ASTC.IntTy),
+ SVB.getConditionType())
+ .getAs<DefinedOrUnknownSVal>();
+ if (!CondLow || !CondHigh)
+ return;
+ StateNotFailed = StateNotFailed->assume(*CondLow, true);
+ if (!StateNotFailed)
+ return;
+ StateNotFailed = StateNotFailed->assume(*CondHigh, true);
+ if (!StateNotFailed)
+ return;
+ C.addTransition(StateNotFailed);
+ } else {
+ // Generate a transition for the success state of `fgets`.
+ std::optional<DefinedSVal> GetBuf =
+ Call.getArgSVal(0).getAs<DefinedSVal>();
+ if (!GetBuf)
+ return;
+ ProgramStateRef StateNotFailed =
+ State->BindExpr(CE, C.getLocationContext(), *GetBuf);
+ StateNotFailed = StateNotFailed->set<StreamMap>(
+ StreamSym, StreamState::getOpened(Desc));
+ C.addTransition(StateNotFailed);
+ }
+ }
+
+ // Add transition for the failed state.
+ ProgramStateRef StateFailed;
+ if (SingleChar)
+ StateFailed = bindInt(*EofVal, State, C, CE);
+ else
+ StateFailed =
+ State->BindExpr(CE, C.getLocationContext(),
+ C.getSValBuilder().makeNullWithType(CE->getType()));
+
+ // If a (non-EOF) error occurs, the resulting value of the file position
+ // indicator for the stream is indeterminate.
+ StreamErrorState NewES =
+ OldSS->ErrorState == ErrorFEof ? ErrorFEof : ErrorFEof | ErrorFError;
+ StreamState NewSS = StreamState::getOpened(Desc, NewES, !NewES.isFEof());
+ StateFailed = StateFailed->set<StreamMap>(StreamSym, NewSS);
+ if (OldSS->ErrorState != ErrorFEof)
+ C.addTransition(StateFailed, constructSetEofNoteTag(C, StreamSym));
+ else
+ C.addTransition(StateFailed);
+}
+
+void StreamChecker::evalFputx(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C, bool IsSingleChar) const {
+ ProgramStateRef State = C.getState();
+ SymbolRef StreamSym = getStreamArg(Desc, Call).getAsSymbol();
+ if (!StreamSym)
+ return;
+
+ const CallExpr *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
+ if (!CE)
+ return;
+
+ const StreamState *OldSS = State->get<StreamMap>(StreamSym);
+ if (!OldSS)
+ return;
+
+ assertStreamStateOpened(OldSS);
+
+ // `fputc` returns the written character on success, otherwise returns EOF.
+ // `fputs` returns a non negative value on sucecess, otherwise returns EOF.
+
+ if (IsSingleChar) {
+ // Generate a transition for the success state of `fputc`.
+ std::optional<NonLoc> PutVal = Call.getArgSVal(0).getAs<NonLoc>();
+ if (!PutVal)
+ return;
+ ProgramStateRef StateNotFailed =
+ State->BindExpr(CE, C.getLocationContext(), *PutVal);
+ StateNotFailed =
+ StateNotFailed->set<StreamMap>(StreamSym, StreamState::getOpened(Desc));
+ C.addTransition(StateNotFailed);
+ } else {
+ // Generate a transition for the success state of `fputs`.
+ NonLoc RetVal = makeRetVal(C, CE).castAs<NonLoc>();
+ ProgramStateRef StateNotFailed =
+ State->BindExpr(CE, C.getLocationContext(), RetVal);
+ SValBuilder &SVB = C.getSValBuilder();
+ auto &ASTC = C.getASTContext();
+ auto Cond = SVB.evalBinOp(State, BO_GE, RetVal, SVB.makeZeroVal(ASTC.IntTy),
+ SVB.getConditionType())
+ .getAs<DefinedOrUnknownSVal>();
+ if (!Cond)
+ return;
+ StateNotFailed = StateNotFailed->assume(*Cond, true);
+ if (!StateNotFailed)
+ return;
+ StateNotFailed =
+ StateNotFailed->set<StreamMap>(StreamSym, StreamState::getOpened(Desc));
+ C.addTransition(StateNotFailed);
+ }
+
+ // Add transition for the failed state. The resulting value of the file
+ // position indicator for the stream is indeterminate.
+ ProgramStateRef StateFailed = bindInt(*EofVal, State, C, CE);
+ StreamState NewSS = StreamState::getOpened(Desc, ErrorFError, true);
+ StateFailed = StateFailed->set<StreamMap>(StreamSym, NewSS);
+ C.addTransition(StateFailed);
+}
+
+void StreamChecker::evalFprintf(const FnDescription *Desc,
+ const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ if (Call.getNumArgs() < 2)
+ return;
+ SymbolRef StreamSym = getStreamArg(Desc, Call).getAsSymbol();
+ if (!StreamSym)
+ return;
+
+ const CallExpr *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
+ if (!CE)
+ return;
+
+ const StreamState *OldSS = State->get<StreamMap>(StreamSym);
+ if (!OldSS)
+ return;
+
+ assertStreamStateOpened(OldSS);
+
+ NonLoc RetVal = makeRetVal(C, CE).castAs<NonLoc>();
+ State = State->BindExpr(CE, C.getLocationContext(), RetVal);
+ SValBuilder &SVB = C.getSValBuilder();
+ auto &ACtx = C.getASTContext();
+ auto Cond = SVB.evalBinOp(State, BO_GE, RetVal, SVB.makeZeroVal(ACtx.IntTy),
+ SVB.getConditionType())
+ .getAs<DefinedOrUnknownSVal>();
+ if (!Cond)
+ return;
+ ProgramStateRef StateNotFailed, StateFailed;
+ std::tie(StateNotFailed, StateFailed) = State->assume(*Cond);
+
+ StateNotFailed =
+ StateNotFailed->set<StreamMap>(StreamSym, StreamState::getOpened(Desc));
+ C.addTransition(StateNotFailed);
+
+ // Add transition for the failed state. The resulting value of the file
+ // position indicator for the stream is indeterminate.
+ StateFailed = StateFailed->set<StreamMap>(
+ StreamSym, StreamState::getOpened(Desc, ErrorFError, true));
+ C.addTransition(StateFailed);
+}
+
+void StreamChecker::evalFscanf(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ if (Call.getNumArgs() < 2)
+ return;
+ SymbolRef StreamSym = getStreamArg(Desc, Call).getAsSymbol();
+ if (!StreamSym)
+ return;
+
+ const CallExpr *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
+ if (!CE)
+ return;
+
+ const StreamState *OldSS = State->get<StreamMap>(StreamSym);
+ if (!OldSS)
+ return;
+
+ assertStreamStateOpened(OldSS);
+
+ SValBuilder &SVB = C.getSValBuilder();
+ ASTContext &ACtx = C.getASTContext();
+
+ // Add the success state.
+ // In this context "success" means there is not an EOF or other read error
+ // before any item is matched in 'fscanf'. But there may be match failure,
+ // therefore return value can be 0 or greater.
+ // It is not specified what happens if some items (not all) are matched and
+ // then EOF or read error happens. Now this case is handled like a "success"
+ // case, and no error flags are set on the stream. This is probably not
+ // accurate, and the POSIX documentation does not tell more.
+ if (OldSS->ErrorState != ErrorFEof) {
+ NonLoc RetVal = makeRetVal(C, CE).castAs<NonLoc>();
+ ProgramStateRef StateNotFailed =
+ State->BindExpr(CE, C.getLocationContext(), RetVal);
+ auto RetGeZero =
+ SVB.evalBinOp(StateNotFailed, BO_GE, RetVal,
+ SVB.makeZeroVal(ACtx.IntTy), SVB.getConditionType())
+ .getAs<DefinedOrUnknownSVal>();
+ if (!RetGeZero)
+ return;
+ StateNotFailed = StateNotFailed->assume(*RetGeZero, true);
+
+ C.addTransition(StateNotFailed);
+ }
+
+ // Add transition for the failed state.
+ // Error occurs if nothing is matched yet and reading the input fails.
+ // Error can be EOF, or other error. At "other error" FERROR or 'errno' can
+ // be set but it is not further specified if all are required to be set.
+ // Documentation does not mention, but file position will be set to
+ // indeterminate similarly as at 'fread'.
+ ProgramStateRef StateFailed = bindInt(*EofVal, State, C, CE);
+ StreamErrorState NewES = (OldSS->ErrorState == ErrorFEof)
+ ? ErrorFEof
+ : ErrorNone | ErrorFEof | ErrorFError;
+ StreamState NewSS = StreamState::getOpened(Desc, NewES, !NewES.isFEof());
+ StateFailed = StateFailed->set<StreamMap>(StreamSym, NewSS);
+ if (OldSS->ErrorState != ErrorFEof)
+ C.addTransition(StateFailed, constructSetEofNoteTag(C, StreamSym));
+ else
+ C.addTransition(StateFailed);
+}
+
+void StreamChecker::evalUngetc(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ SymbolRef StreamSym = getStreamArg(Desc, Call).getAsSymbol();
+ if (!StreamSym)
+ return;
+
+ const CallExpr *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
+ if (!CE)
+ return;
+
+ const StreamState *OldSS = State->get<StreamMap>(StreamSym);
+ if (!OldSS)
+ return;
+
+ assertStreamStateOpened(OldSS);
+
+ // Generate a transition for the success state.
+ std::optional<NonLoc> PutVal = Call.getArgSVal(0).getAs<NonLoc>();
+ if (!PutVal)
+ return;
+ ProgramStateRef StateNotFailed =
+ State->BindExpr(CE, C.getLocationContext(), *PutVal);
+ StateNotFailed =
+ StateNotFailed->set<StreamMap>(StreamSym, StreamState::getOpened(Desc));
+ C.addTransition(StateNotFailed);
+
+ // Add transition for the failed state.
+ // Failure of 'ungetc' does not result in feof or ferror state.
+ // If the PutVal has value of EofVal the function should "fail", but this is
+ // the same transition as the success state.
+ // In this case only one state transition is added by the analyzer (the two
+ // new states may be similar).
+ ProgramStateRef StateFailed = bindInt(*EofVal, State, C, CE);
+ StateFailed =
+ StateFailed->set<StreamMap>(StreamSym, StreamState::getOpened(Desc));
+ C.addTransition(StateFailed);
+}
+
+void StreamChecker::evalGetdelim(const FnDescription *Desc,
+ const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ SymbolRef StreamSym = getStreamArg(Desc, Call).getAsSymbol();
+ if (!StreamSym)
+ return;
+
+ const CallExpr *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
+ if (!CE)
+ return;
+
+ const StreamState *OldSS = State->get<StreamMap>(StreamSym);
+ if (!OldSS)
+ return;
+
+ assertStreamStateOpened(OldSS);
+
+ // Upon successful completion, the getline() and getdelim() functions shall
+ // return the number of bytes written into the buffer.
+ // If the end-of-file indicator for the stream is set, the function shall
+ // return -1.
+ // If an error occurs, the function shall return -1 and set 'errno'.
+
+ // Add transition for the successful state.
+ if (OldSS->ErrorState != ErrorFEof) {
+ NonLoc RetVal = makeRetVal(C, CE).castAs<NonLoc>();
+ ProgramStateRef StateNotFailed =
+ State->BindExpr(CE, C.getLocationContext(), RetVal);
+ SValBuilder &SVB = C.getSValBuilder();
+ auto Cond =
+ SVB.evalBinOp(State, BO_GE, RetVal, SVB.makeZeroVal(CE->getType()),
+ SVB.getConditionType())
+ .getAs<DefinedOrUnknownSVal>();
+ if (!Cond)
+ return;
+ StateNotFailed = StateNotFailed->assume(*Cond, true);
+ if (!StateNotFailed)
+ return;
+ C.addTransition(StateNotFailed);
+ }
+
+ // Add transition for the failed state.
+ // If a (non-EOF) error occurs, the resulting value of the file position
+ // indicator for the stream is indeterminate.
+ ProgramStateRef StateFailed = bindInt(-1, State, C, CE);
+ StreamErrorState NewES =
+ OldSS->ErrorState == ErrorFEof ? ErrorFEof : ErrorFEof | ErrorFError;
+ StreamState NewSS = StreamState::getOpened(Desc, NewES, !NewES.isFEof());
+ StateFailed = StateFailed->set<StreamMap>(StreamSym, NewSS);
+ if (OldSS->ErrorState != ErrorFEof)
+ C.addTransition(StateFailed, constructSetEofNoteTag(C, StreamSym));
+ else
+ C.addTransition(StateFailed);
+}
+
void StreamChecker::preFseek(const FnDescription *Desc, const CallEvent &Call,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
@@ -743,6 +1181,11 @@ void StreamChecker::evalFseek(const FnDescription *Desc, const CallEvent &Call,
if (!State->get<StreamMap>(StreamSym))
return;
+ const llvm::APSInt *PosV =
+ C.getSValBuilder().getKnownValue(State, Call.getArgSVal(1));
+ const llvm::APSInt *WhenceV =
+ C.getSValBuilder().getKnownValue(State, Call.getArgSVal(2));
+
DefinedSVal RetVal = makeRetVal(C, CE);
// Make expression result.
@@ -761,14 +1204,145 @@ void StreamChecker::evalFseek(const FnDescription *Desc, const CallEvent &Call,
// It is possible that fseek fails but sets none of the error flags.
// If fseek failed, assume that the file position becomes indeterminate in any
// case.
+ StreamErrorState NewErrS = ErrorNone | ErrorFError;
+ // Setting the position to start of file never produces EOF error.
+ if (!(PosV && *PosV == 0 && WhenceV && *WhenceV == SeekSetVal))
+ NewErrS = NewErrS | ErrorFEof;
StateFailed = StateFailed->set<StreamMap>(
- StreamSym,
- StreamState::getOpened(Desc, ErrorNone | ErrorFEof | ErrorFError, true));
+ StreamSym, StreamState::getOpened(Desc, NewErrS, true));
C.addTransition(StateNotFailed);
C.addTransition(StateFailed, constructSetEofNoteTag(C, StreamSym));
}
+void StreamChecker::evalFgetpos(const FnDescription *Desc,
+ const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ SymbolRef Sym = getStreamArg(Desc, Call).getAsSymbol();
+ if (!Sym)
+ return;
+
+ // Do not evaluate if stream is not found.
+ if (!State->get<StreamMap>(Sym))
+ return;
+
+ auto *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
+ if (!CE)
+ return;
+
+ DefinedSVal RetVal = makeRetVal(C, CE);
+ State = State->BindExpr(CE, C.getLocationContext(), RetVal);
+ ProgramStateRef StateNotFailed, StateFailed;
+ std::tie(StateFailed, StateNotFailed) =
+ C.getConstraintManager().assumeDual(State, RetVal);
+
+ // This function does not affect the stream state.
+ // Still we add success and failure state with the appropriate return value.
+ // StdLibraryFunctionsChecker can change these states (set the 'errno' state).
+ C.addTransition(StateNotFailed);
+ C.addTransition(StateFailed);
+}
+
+void StreamChecker::evalFsetpos(const FnDescription *Desc,
+ const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ SymbolRef StreamSym = getStreamArg(Desc, Call).getAsSymbol();
+ if (!StreamSym)
+ return;
+
+ const StreamState *SS = State->get<StreamMap>(StreamSym);
+ if (!SS)
+ return;
+
+ auto *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
+ if (!CE)
+ return;
+
+ assertStreamStateOpened(SS);
+
+ DefinedSVal RetVal = makeRetVal(C, CE);
+ State = State->BindExpr(CE, C.getLocationContext(), RetVal);
+ ProgramStateRef StateNotFailed, StateFailed;
+ std::tie(StateFailed, StateNotFailed) =
+ C.getConstraintManager().assumeDual(State, RetVal);
+
+ StateNotFailed = StateNotFailed->set<StreamMap>(
+ StreamSym, StreamState::getOpened(Desc, ErrorNone, false));
+
+ // At failure ferror could be set.
+ // The standards do not tell what happens with the file position at failure.
+ // But we can assume that it is dangerous to make a next I/O operation after
+ // the position was not set correctly (similar to 'fseek').
+ StateFailed = StateFailed->set<StreamMap>(
+ StreamSym, StreamState::getOpened(Desc, ErrorNone | ErrorFError, true));
+
+ C.addTransition(StateNotFailed);
+ C.addTransition(StateFailed);
+}
+
+void StreamChecker::evalFtell(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ SymbolRef Sym = getStreamArg(Desc, Call).getAsSymbol();
+ if (!Sym)
+ return;
+
+ if (!State->get<StreamMap>(Sym))
+ return;
+
+ auto *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
+ if (!CE)
+ return;
+
+ SValBuilder &SVB = C.getSValBuilder();
+ NonLoc RetVal = makeRetVal(C, CE).castAs<NonLoc>();
+ ProgramStateRef StateNotFailed =
+ State->BindExpr(CE, C.getLocationContext(), RetVal);
+ auto Cond =
+ SVB.evalBinOp(State, BO_GE, RetVal, SVB.makeZeroVal(Call.getResultType()),
+ SVB.getConditionType())
+ .getAs<DefinedOrUnknownSVal>();
+ if (!Cond)
+ return;
+ StateNotFailed = StateNotFailed->assume(*Cond, true);
+ if (!StateNotFailed)
+ return;
+
+ ProgramStateRef StateFailed = State->BindExpr(
+ CE, C.getLocationContext(), SVB.makeIntVal(-1, Call.getResultType()));
+
+ // This function does not affect the stream state.
+ // Still we add success and failure state with the appropriate return value.
+ // StdLibraryFunctionsChecker can change these states (set the 'errno' state).
+ C.addTransition(StateNotFailed);
+ C.addTransition(StateFailed);
+}
+
+void StreamChecker::evalRewind(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ SymbolRef StreamSym = getStreamArg(Desc, Call).getAsSymbol();
+ if (!StreamSym)
+ return;
+
+ const StreamState *SS = State->get<StreamMap>(StreamSym);
+ if (!SS)
+ return;
+
+ auto *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
+ if (!CE)
+ return;
+
+ assertStreamStateOpened(SS);
+
+ State = State->set<StreamMap>(StreamSym,
+ StreamState::getOpened(Desc, ErrorNone, false));
+
+ C.addTransition(State);
+}
+
void StreamChecker::evalClearerr(const FnDescription *Desc,
const CallEvent &Call,
CheckerContext &C) const {
@@ -858,6 +1432,84 @@ void StreamChecker::evalSetFeofFerror(const FnDescription *Desc,
C.addTransition(State);
}
+void StreamChecker::preFflush(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ SVal StreamVal = getStreamArg(Desc, Call);
+ std::optional<DefinedSVal> Stream = StreamVal.getAs<DefinedSVal>();
+ if (!Stream)
+ return;
+
+ ProgramStateRef StateNotNull, StateNull;
+ std::tie(StateNotNull, StateNull) =
+ C.getConstraintManager().assumeDual(State, *Stream);
+ if (StateNotNull && !StateNull)
+ ensureStreamOpened(StreamVal, C, StateNotNull);
+}
+
+void StreamChecker::evalFflush(const FnDescription *Desc, const CallEvent &Call,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ SVal StreamVal = getStreamArg(Desc, Call);
+ std::optional<DefinedSVal> Stream = StreamVal.getAs<DefinedSVal>();
+ if (!Stream)
+ return;
+
+ // Skip if the stream can be both NULL and non-NULL.
+ ProgramStateRef StateNotNull, StateNull;
+ std::tie(StateNotNull, StateNull) =
+ C.getConstraintManager().assumeDual(State, *Stream);
+ if (StateNotNull && StateNull)
+ return;
+ if (StateNotNull && !StateNull)
+ State = StateNotNull;
+ else
+ State = StateNull;
+
+ const CallExpr *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
+ if (!CE)
+ return;
+
+ // `fflush` returns EOF on failure, otherwise returns 0.
+ ProgramStateRef StateFailed = bindInt(*EofVal, State, C, CE);
+ ProgramStateRef StateNotFailed = bindInt(0, State, C, CE);
+
+ // Clear error states if `fflush` returns 0, but retain their EOF flags.
+ auto ClearErrorInNotFailed = [&StateNotFailed, Desc](SymbolRef Sym,
+ const StreamState *SS) {
+ if (SS->ErrorState & ErrorFError) {
+ StreamErrorState NewES =
+ (SS->ErrorState & ErrorFEof) ? ErrorFEof : ErrorNone;
+ StreamState NewSS = StreamState::getOpened(Desc, NewES, false);
+ StateNotFailed = StateNotFailed->set<StreamMap>(Sym, NewSS);
+ }
+ };
+
+ if (StateNotNull && !StateNull) {
+ // Skip if the input stream's state is unknown, open-failed or closed.
+ if (SymbolRef StreamSym = StreamVal.getAsSymbol()) {
+ const StreamState *SS = State->get<StreamMap>(StreamSym);
+ if (SS) {
+ assert(SS->isOpened() && "Stream is expected to be opened");
+ ClearErrorInNotFailed(StreamSym, SS);
+ } else
+ return;
+ }
+ } else {
+ // Clear error states for all streams.
+ const StreamMapTy &Map = StateNotFailed->get<StreamMap>();
+ for (const auto &I : Map) {
+ SymbolRef Sym = I.first;
+ const StreamState &SS = I.second;
+ if (SS.isOpened())
+ ClearErrorInNotFailed(Sym, &SS);
+ }
+ }
+
+ C.addTransition(StateNotFailed);
+ C.addTransition(StateFailed);
+}
+
ProgramStateRef
StreamChecker::ensureStreamNonNull(SVal StreamVal, const Expr *StreamE,
CheckerContext &C,
@@ -869,7 +1521,7 @@ StreamChecker::ensureStreamNonNull(SVal StreamVal, const Expr *StreamE,
ConstraintManager &CM = C.getConstraintManager();
ProgramStateRef StateNotNull, StateNull;
- std::tie(StateNotNull, StateNull) = CM.assumeDual(C.getState(), *Stream);
+ std::tie(StateNotNull, StateNull) = CM.assumeDual(State, *Stream);
if (!StateNotNull && StateNull) {
if (ExplodedNode *N = C.generateErrorNode(StateNull)) {
@@ -925,7 +1577,6 @@ ProgramStateRef StreamChecker::ensureStreamOpened(SVal StreamVal,
N));
return nullptr;
}
- return State;
}
return State;
@@ -979,12 +1630,13 @@ ProgramStateRef StreamChecker::ensureNoFilePositionIndeterminate(
ProgramStateRef
StreamChecker::ensureFseekWhenceCorrect(SVal WhenceVal, CheckerContext &C,
ProgramStateRef State) const {
- Optional<nonloc::ConcreteInt> CI = WhenceVal.getAs<nonloc::ConcreteInt>();
+ std::optional<nonloc::ConcreteInt> CI =
+ WhenceVal.getAs<nonloc::ConcreteInt>();
if (!CI)
return State;
int64_t X = CI->getValue().getSExtValue();
- if (X >= 0 && X <= 2)
+ if (X == SeekSetVal || X == SeekCurVal || X == SeekEndVal)
return State;
if (ExplodedNode *N = C.generateNonFatalErrorNode(State)) {
@@ -1035,10 +1687,12 @@ StreamChecker::reportLeaks(const SmallVector<SymbolRef, 2> &LeakedSyms,
// FIXME: Add a checker option to turn this uniqueing feature off.
const ExplodedNode *StreamOpenNode = getAcquisitionSite(Err, LeakSym, C);
assert(StreamOpenNode && "Could not find place of stream opening.");
- PathDiagnosticLocation LocUsedForUniqueing =
- PathDiagnosticLocation::createBegin(
- StreamOpenNode->getStmtForDiagnostics(), C.getSourceManager(),
- StreamOpenNode->getLocationContext());
+
+ PathDiagnosticLocation LocUsedForUniqueing;
+ if (const Stmt *StreamStmt = StreamOpenNode->getStmtForDiagnostics())
+ LocUsedForUniqueing = PathDiagnosticLocation::createBegin(
+ StreamStmt, C.getSourceManager(),
+ StreamOpenNode->getLocationContext());
std::unique_ptr<PathSensitiveBugReport> R =
std::make_unique<PathSensitiveBugReport>(
@@ -1118,4 +1772,4 @@ void ento::registerStreamTesterChecker(CheckerManager &Mgr) {
bool ento::shouldRegisterStreamTesterChecker(const CheckerManager &Mgr) {
return true;
-} \ No newline at end of file
+}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StringChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StringChecker.cpp
new file mode 100644
index 000000000000..2dc9e29ca906
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StringChecker.cpp
@@ -0,0 +1,105 @@
+//=== StringChecker.cpp -------------------------------------------*- C++ -*--//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the modeling of the std::basic_string type.
+// This involves checking preconditions of the operations and applying the
+// effects of the operations, e.g. their post-conditions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class StringChecker : public Checker<check::PreCall> {
+ BugType BT_Null{this, "Dereference of null pointer", categories::LogicError};
+ mutable const FunctionDecl *StringConstCharPtrCtor = nullptr;
+ mutable CanQualType SizeTypeTy;
+ const CallDescription TwoParamStdStringCtor = {
+ {"std", "basic_string", "basic_string"}, 2, 2};
+
+ bool isCharToStringCtor(const CallEvent &Call, const ASTContext &ACtx) const;
+
+public:
+ void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
+};
+
+bool StringChecker::isCharToStringCtor(const CallEvent &Call,
+ const ASTContext &ACtx) const {
+ if (!TwoParamStdStringCtor.matches(Call))
+ return false;
+ const auto *FD = dyn_cast<FunctionDecl>(Call.getDecl());
+ assert(FD);
+
+ // See if we already cached it.
+ if (StringConstCharPtrCtor && StringConstCharPtrCtor == FD)
+ return true;
+
+ // Verify that the parameters have the expected types:
+ // - arg 1: `const CharT *`
+ // - arg 2: some allocator - which is definately not `size_t`.
+ const QualType Arg1Ty = Call.getArgExpr(0)->getType().getCanonicalType();
+ const QualType Arg2Ty = Call.getArgExpr(1)->getType().getCanonicalType();
+
+ if (!Arg1Ty->isPointerType())
+ return false;
+
+ // It makes sure that we don't select the `string(const char* p, size_t len)`
+ // overload accidentally.
+ if (Arg2Ty.getCanonicalType() == ACtx.getSizeType())
+ return false;
+
+ StringConstCharPtrCtor = FD; // Cache the decl of the right overload.
+ return true;
+}
+
+void StringChecker::checkPreCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ if (!isCharToStringCtor(Call, C.getASTContext()))
+ return;
+ const auto Param = Call.getArgSVal(0).getAs<Loc>();
+ if (!Param)
+ return;
+
+ // We managed to constrain the parameter to non-null.
+ ProgramStateRef NotNull, Null;
+ std::tie(NotNull, Null) = C.getState()->assume(*Param);
+
+ if (NotNull) {
+ const auto Callback = [Param](PathSensitiveBugReport &BR) -> std::string {
+ return BR.isInteresting(*Param) ? "Assuming the pointer is not null."
+ : "";
+ };
+
+ // Emit note only if this operation constrained the pointer to be null.
+ C.addTransition(NotNull, Null ? C.getNoteTag(Callback) : nullptr);
+ return;
+ }
+
+ // We found a path on which the parameter is NULL.
+ if (ExplodedNode *N = C.generateErrorNode(C.getState())) {
+ auto R = std::make_unique<PathSensitiveBugReport>(
+ BT_Null, "The parameter must not be null", N);
+ bugreporter::trackExpressionValue(N, Call.getArgExpr(0), *R);
+ C.emitReport(std::move(R));
+ }
+}
+
+} // end anonymous namespace
+
+void ento::registerStringChecker(CheckerManager &Mgr) {
+ Mgr.registerChecker<StringChecker>();
+}
+
+bool ento::shouldRegisterStringChecker(const CheckerManager &) { return true; }
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TaggedUnionModeling.h b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TaggedUnionModeling.h
new file mode 100644
index 000000000000..6de33da107a3
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TaggedUnionModeling.h
@@ -0,0 +1,99 @@
+//===- TaggedUnionModeling.h -------------------------------------*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_TAGGEDUNIONMODELING_H
+#define LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_TAGGEDUNIONMODELING_H
+
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "llvm/ADT/FoldingSet.h"
+#include <numeric>
+
+namespace clang::ento::tagged_union_modeling {
+
+// The implementation of all these functions can be found in the file
+// StdVariantChecker.cpp under the same directory as this file.
+
+bool isCopyConstructorCall(const CallEvent &Call);
+bool isCopyAssignmentCall(const CallEvent &Call);
+bool isMoveAssignmentCall(const CallEvent &Call);
+bool isMoveConstructorCall(const CallEvent &Call);
+bool isStdType(const Type *Type, const std::string &TypeName);
+bool isStdVariant(const Type *Type);
+
+// When invalidating regions, we also have to follow that by invalidating the
+// corresponding custom data in the program state.
+template <class TypeMap>
+ProgramStateRef
+removeInformationStoredForDeadInstances(const CallEvent &Call,
+ ProgramStateRef State,
+ ArrayRef<const MemRegion *> Regions) {
+ // If we do not know anything about the call we shall not continue.
+ // If the call is happens within a system header it is implementation detail.
+ // We should not take it into consideration.
+ if (Call.isInSystemHeader())
+ return State;
+
+ for (const MemRegion *Region : Regions)
+ State = State->remove<TypeMap>(Region);
+
+ return State;
+}
+
+template <class TypeMap>
+void handleConstructorAndAssignment(const CallEvent &Call, CheckerContext &C,
+ SVal ThisSVal) {
+ ProgramStateRef State = Call.getState();
+
+ if (!State)
+ return;
+
+ auto ArgSVal = Call.getArgSVal(0);
+ const auto *ThisRegion = ThisSVal.getAsRegion();
+ const auto *ArgMemRegion = ArgSVal.getAsRegion();
+
+ // Make changes to the state according to type of constructor/assignment
+ bool IsCopy = isCopyConstructorCall(Call) || isCopyAssignmentCall(Call);
+ bool IsMove = isMoveConstructorCall(Call) || isMoveAssignmentCall(Call);
+ // First we handle copy and move operations
+ if (IsCopy || IsMove) {
+ const QualType *OtherQType = State->get<TypeMap>(ArgMemRegion);
+
+ // If the argument of a copy constructor or assignment is unknown then
+ // we will not know the argument of the copied to object.
+ if (!OtherQType) {
+ State = State->remove<TypeMap>(ThisRegion);
+ } else {
+ // When move semantics is used we can only know that the moved from
+ // object must be in a destructible state. Other usage of the object
+ // than destruction is undefined.
+ if (IsMove)
+ State = State->remove<TypeMap>(ArgMemRegion);
+
+ State = State->set<TypeMap>(ThisRegion, *OtherQType);
+ }
+ } else {
+ // Value constructor
+ auto ArgQType = ArgSVal.getType(C.getASTContext());
+ const Type *ArgTypePtr = ArgQType.getTypePtr();
+
+ QualType WoPointer = ArgTypePtr->getPointeeType();
+ State = State->set<TypeMap>(ThisRegion, WoPointer);
+ }
+
+ C.addTransition(State);
+}
+
+} // namespace clang::ento::tagged_union_modeling
+
+#endif // LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_TAGGEDUNIONMODELING_H \ No newline at end of file
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Taint.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Taint.cpp
index 71b2ab834a07..4edb671753bf 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Taint.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Taint.cpp
@@ -10,9 +10,10 @@
//
//===----------------------------------------------------------------------===//
-#include "Taint.h"
+#include "clang/StaticAnalyzer/Checkers/Taint.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include <optional>
using namespace clang;
using namespace ento;
@@ -37,7 +38,9 @@ void taint::printTaint(ProgramStateRef State, raw_ostream &Out, const char *NL,
Out << I.first << " : " << I.second << NL;
}
-void dumpTaint(ProgramStateRef State) { printTaint(State, llvm::errs()); }
+void taint::dumpTaint(ProgramStateRef State) {
+ printTaint(State, llvm::errs());
+}
ProgramStateRef taint::addTaint(ProgramStateRef State, const Stmt *S,
const LocationContext *LCtx,
@@ -61,7 +64,7 @@ ProgramStateRef taint::addTaint(ProgramStateRef State, SVal V,
// their parent region, which is a conjured symbol default-bound to the base
// region of the parent region.
if (auto LCV = V.getAs<nonloc::LazyCompoundVal>()) {
- if (Optional<SVal> binding =
+ if (std::optional<SVal> binding =
State->getStateManager().getStoreManager().getDefaultBinding(
*LCV)) {
if (SymbolRef Sym = binding->getAsSymbol())
@@ -143,62 +146,142 @@ ProgramStateRef taint::addPartialTaint(ProgramStateRef State,
bool taint::isTainted(ProgramStateRef State, const Stmt *S,
const LocationContext *LCtx, TaintTagType Kind) {
- SVal val = State->getSVal(S, LCtx);
- return isTainted(State, val, Kind);
+ return !getTaintedSymbolsImpl(State, S, LCtx, Kind, /*ReturnFirstOnly=*/true)
+ .empty();
}
bool taint::isTainted(ProgramStateRef State, SVal V, TaintTagType Kind) {
- if (SymbolRef Sym = V.getAsSymbol())
- return isTainted(State, Sym, Kind);
- if (const MemRegion *Reg = V.getAsRegion())
- return isTainted(State, Reg, Kind);
- return false;
+ return !getTaintedSymbolsImpl(State, V, Kind, /*ReturnFirstOnly=*/true)
+ .empty();
}
bool taint::isTainted(ProgramStateRef State, const MemRegion *Reg,
TaintTagType K) {
- if (!Reg)
- return false;
+ return !getTaintedSymbolsImpl(State, Reg, K, /*ReturnFirstOnly=*/true)
+ .empty();
+}
+
+bool taint::isTainted(ProgramStateRef State, SymbolRef Sym, TaintTagType Kind) {
+ return !getTaintedSymbolsImpl(State, Sym, Kind, /*ReturnFirstOnly=*/true)
+ .empty();
+}
+
+std::vector<SymbolRef> taint::getTaintedSymbols(ProgramStateRef State,
+ const Stmt *S,
+ const LocationContext *LCtx,
+ TaintTagType Kind) {
+ return getTaintedSymbolsImpl(State, S, LCtx, Kind, /*ReturnFirstOnly=*/false);
+}
+
+std::vector<SymbolRef> taint::getTaintedSymbols(ProgramStateRef State, SVal V,
+ TaintTagType Kind) {
+ return getTaintedSymbolsImpl(State, V, Kind, /*ReturnFirstOnly=*/false);
+}
+
+std::vector<SymbolRef> taint::getTaintedSymbols(ProgramStateRef State,
+ SymbolRef Sym,
+ TaintTagType Kind) {
+ return getTaintedSymbolsImpl(State, Sym, Kind, /*ReturnFirstOnly=*/false);
+}
+
+std::vector<SymbolRef> taint::getTaintedSymbols(ProgramStateRef State,
+ const MemRegion *Reg,
+ TaintTagType Kind) {
+ return getTaintedSymbolsImpl(State, Reg, Kind, /*ReturnFirstOnly=*/false);
+}
+
+std::vector<SymbolRef> taint::getTaintedSymbolsImpl(ProgramStateRef State,
+ const Stmt *S,
+ const LocationContext *LCtx,
+ TaintTagType Kind,
+ bool returnFirstOnly) {
+ SVal val = State->getSVal(S, LCtx);
+ return getTaintedSymbolsImpl(State, val, Kind, returnFirstOnly);
+}
+
+std::vector<SymbolRef> taint::getTaintedSymbolsImpl(ProgramStateRef State,
+ SVal V, TaintTagType Kind,
+ bool returnFirstOnly) {
+ if (SymbolRef Sym = V.getAsSymbol())
+ return getTaintedSymbolsImpl(State, Sym, Kind, returnFirstOnly);
+ if (const MemRegion *Reg = V.getAsRegion())
+ return getTaintedSymbolsImpl(State, Reg, Kind, returnFirstOnly);
+ return {};
+}
+std::vector<SymbolRef> taint::getTaintedSymbolsImpl(ProgramStateRef State,
+ const MemRegion *Reg,
+ TaintTagType K,
+ bool returnFirstOnly) {
+ std::vector<SymbolRef> TaintedSymbols;
+ if (!Reg)
+ return TaintedSymbols;
// Element region (array element) is tainted if either the base or the offset
// are tainted.
- if (const ElementRegion *ER = dyn_cast<ElementRegion>(Reg))
- return isTainted(State, ER->getSuperRegion(), K) ||
- isTainted(State, ER->getIndex(), K);
+ if (const ElementRegion *ER = dyn_cast<ElementRegion>(Reg)) {
+ std::vector<SymbolRef> TaintedIndex =
+ getTaintedSymbolsImpl(State, ER->getIndex(), K, returnFirstOnly);
+ llvm::append_range(TaintedSymbols, TaintedIndex);
+ if (returnFirstOnly && !TaintedSymbols.empty())
+ return TaintedSymbols; // return early if needed
+ std::vector<SymbolRef> TaintedSuperRegion =
+ getTaintedSymbolsImpl(State, ER->getSuperRegion(), K, returnFirstOnly);
+ llvm::append_range(TaintedSymbols, TaintedSuperRegion);
+ if (returnFirstOnly && !TaintedSymbols.empty())
+ return TaintedSymbols; // return early if needed
+ }
- if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(Reg))
- return isTainted(State, SR->getSymbol(), K);
+ if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(Reg)) {
+ std::vector<SymbolRef> TaintedRegions =
+ getTaintedSymbolsImpl(State, SR->getSymbol(), K, returnFirstOnly);
+ llvm::append_range(TaintedSymbols, TaintedRegions);
+ if (returnFirstOnly && !TaintedSymbols.empty())
+ return TaintedSymbols; // return early if needed
+ }
- if (const SubRegion *ER = dyn_cast<SubRegion>(Reg))
- return isTainted(State, ER->getSuperRegion(), K);
+ if (const SubRegion *ER = dyn_cast<SubRegion>(Reg)) {
+ std::vector<SymbolRef> TaintedSubRegions =
+ getTaintedSymbolsImpl(State, ER->getSuperRegion(), K, returnFirstOnly);
+ llvm::append_range(TaintedSymbols, TaintedSubRegions);
+ if (returnFirstOnly && !TaintedSymbols.empty())
+ return TaintedSymbols; // return early if needed
+ }
- return false;
+ return TaintedSymbols;
}
-bool taint::isTainted(ProgramStateRef State, SymbolRef Sym, TaintTagType Kind) {
+std::vector<SymbolRef> taint::getTaintedSymbolsImpl(ProgramStateRef State,
+ SymbolRef Sym,
+ TaintTagType Kind,
+ bool returnFirstOnly) {
+ std::vector<SymbolRef> TaintedSymbols;
if (!Sym)
- return false;
+ return TaintedSymbols;
// Traverse all the symbols this symbol depends on to see if any are tainted.
- for (SymExpr::symbol_iterator SI = Sym->symbol_begin(),
- SE = Sym->symbol_end();
- SI != SE; ++SI) {
- if (!isa<SymbolData>(*SI))
+ for (SymbolRef SubSym : Sym->symbols()) {
+ if (!isa<SymbolData>(SubSym))
continue;
- if (const TaintTagType *Tag = State->get<TaintMap>(*SI)) {
- if (*Tag == Kind)
- return true;
+ if (const TaintTagType *Tag = State->get<TaintMap>(SubSym)) {
+ if (*Tag == Kind) {
+ TaintedSymbols.push_back(SubSym);
+ if (returnFirstOnly)
+ return TaintedSymbols; // return early if needed
+ }
}
- if (const auto *SD = dyn_cast<SymbolDerived>(*SI)) {
+ if (const auto *SD = dyn_cast<SymbolDerived>(SubSym)) {
// If this is a SymbolDerived with a tainted parent, it's also tainted.
- if (isTainted(State, SD->getParentSymbol(), Kind))
- return true;
+ std::vector<SymbolRef> TaintedParents = getTaintedSymbolsImpl(
+ State, SD->getParentSymbol(), Kind, returnFirstOnly);
+ llvm::append_range(TaintedSymbols, TaintedParents);
+ if (returnFirstOnly && !TaintedSymbols.empty())
+ return TaintedSymbols; // return early if needed
// If this is a SymbolDerived with the same parent symbol as another
- // tainted SymbolDerived and a region that's a sub-region of that tainted
- // symbol, it's also tainted.
+ // tainted SymbolDerived and a region that's a sub-region of that
+ // tainted symbol, it's also tainted.
if (const TaintedSubRegions *Regs =
State->get<DerivedSymTaint>(SD->getParentSymbol())) {
const TypedValueRegion *R = SD->getRegion();
@@ -207,46 +290,32 @@ bool taint::isTainted(ProgramStateRef State, SymbolRef Sym, TaintTagType Kind) {
// complete. For example, this would not currently identify
// overlapping fields in a union as tainted. To identify this we can
// check for overlapping/nested byte offsets.
- if (Kind == I.second && R->isSubRegionOf(I.first))
- return true;
+ if (Kind == I.second && R->isSubRegionOf(I.first)) {
+ TaintedSymbols.push_back(SD->getParentSymbol());
+ if (returnFirstOnly && !TaintedSymbols.empty())
+ return TaintedSymbols; // return early if needed
+ }
}
}
}
// If memory region is tainted, data is also tainted.
- if (const auto *SRV = dyn_cast<SymbolRegionValue>(*SI)) {
- if (isTainted(State, SRV->getRegion(), Kind))
- return true;
+ if (const auto *SRV = dyn_cast<SymbolRegionValue>(SubSym)) {
+ std::vector<SymbolRef> TaintedRegions =
+ getTaintedSymbolsImpl(State, SRV->getRegion(), Kind, returnFirstOnly);
+ llvm::append_range(TaintedSymbols, TaintedRegions);
+ if (returnFirstOnly && !TaintedSymbols.empty())
+ return TaintedSymbols; // return early if needed
}
// If this is a SymbolCast from a tainted value, it's also tainted.
- if (const auto *SC = dyn_cast<SymbolCast>(*SI)) {
- if (isTainted(State, SC->getOperand(), Kind))
- return true;
+ if (const auto *SC = dyn_cast<SymbolCast>(SubSym)) {
+ std::vector<SymbolRef> TaintedCasts =
+ getTaintedSymbolsImpl(State, SC->getOperand(), Kind, returnFirstOnly);
+ llvm::append_range(TaintedSymbols, TaintedCasts);
+ if (returnFirstOnly && !TaintedSymbols.empty())
+ return TaintedSymbols; // return early if needed
}
}
-
- return false;
-}
-
-PathDiagnosticPieceRef TaintBugVisitor::VisitNode(const ExplodedNode *N,
- BugReporterContext &BRC,
- PathSensitiveBugReport &BR) {
-
- // Find the ExplodedNode where the taint was first introduced
- if (!isTainted(N->getState(), V) ||
- isTainted(N->getFirstPred()->getState(), V))
- return nullptr;
-
- const Stmt *S = N->getStmtForDiagnostics();
- if (!S)
- return nullptr;
-
- const LocationContext *NCtx = N->getLocationContext();
- PathDiagnosticLocation L =
- PathDiagnosticLocation::createBegin(S, BRC.getSourceManager(), NCtx);
- if (!L.isValid() || !L.asLocation().isValid())
- return nullptr;
-
- return std::make_shared<PathDiagnosticEventPiece>(L, "Taint originated here");
-}
+ return TaintedSymbols;
+} \ No newline at end of file
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Taint.h b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Taint.h
deleted file mode 100644
index 659a3c898d56..000000000000
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Taint.h
+++ /dev/null
@@ -1,106 +0,0 @@
-//=== Taint.h - Taint tracking and basic propagation rules. --------*- C++ -*-//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// Defines basic, non-domain-specific mechanisms for tracking tainted values.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_TAINT_H
-#define LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_TAINT_H
-
-#include "clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitors.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
-
-namespace clang {
-namespace ento {
-namespace taint {
-
-/// The type of taint, which helps to differentiate between different types of
-/// taint.
-using TaintTagType = unsigned;
-
-static constexpr TaintTagType TaintTagGeneric = 0;
-
-/// Create a new state in which the value of the statement is marked as tainted.
-LLVM_NODISCARD ProgramStateRef addTaint(ProgramStateRef State, const Stmt *S,
- const LocationContext *LCtx,
- TaintTagType Kind = TaintTagGeneric);
-
-/// Create a new state in which the value is marked as tainted.
-LLVM_NODISCARD ProgramStateRef addTaint(ProgramStateRef State, SVal V,
- TaintTagType Kind = TaintTagGeneric);
-
-/// Create a new state in which the symbol is marked as tainted.
-LLVM_NODISCARD ProgramStateRef addTaint(ProgramStateRef State, SymbolRef Sym,
- TaintTagType Kind = TaintTagGeneric);
-
-/// Create a new state in which the pointer represented by the region
-/// is marked as tainted.
-LLVM_NODISCARD ProgramStateRef addTaint(ProgramStateRef State,
- const MemRegion *R,
- TaintTagType Kind = TaintTagGeneric);
-
-LLVM_NODISCARD ProgramStateRef removeTaint(ProgramStateRef State, SVal V);
-
-LLVM_NODISCARD ProgramStateRef removeTaint(ProgramStateRef State,
- const MemRegion *R);
-
-LLVM_NODISCARD ProgramStateRef removeTaint(ProgramStateRef State,
- SymbolRef Sym);
-
-/// Create a new state in a which a sub-region of a given symbol is tainted.
-/// This might be necessary when referring to regions that can not have an
-/// individual symbol, e.g. if they are represented by the default binding of
-/// a LazyCompoundVal.
-LLVM_NODISCARD ProgramStateRef addPartialTaint(
- ProgramStateRef State, SymbolRef ParentSym, const SubRegion *SubRegion,
- TaintTagType Kind = TaintTagGeneric);
-
-/// Check if the statement has a tainted value in the given state.
-bool isTainted(ProgramStateRef State, const Stmt *S,
- const LocationContext *LCtx,
- TaintTagType Kind = TaintTagGeneric);
-
-/// Check if the value is tainted in the given state.
-bool isTainted(ProgramStateRef State, SVal V,
- TaintTagType Kind = TaintTagGeneric);
-
-/// Check if the symbol is tainted in the given state.
-bool isTainted(ProgramStateRef State, SymbolRef Sym,
- TaintTagType Kind = TaintTagGeneric);
-
-/// Check if the pointer represented by the region is tainted in the given
-/// state.
-bool isTainted(ProgramStateRef State, const MemRegion *Reg,
- TaintTagType Kind = TaintTagGeneric);
-
-void printTaint(ProgramStateRef State, raw_ostream &Out, const char *nl = "\n",
- const char *sep = "");
-
-LLVM_DUMP_METHOD void dumpTaint(ProgramStateRef State);
-
-/// The bug visitor prints a diagnostic message at the location where a given
-/// variable was tainted.
-class TaintBugVisitor final : public BugReporterVisitor {
-private:
- const SVal V;
-
-public:
- TaintBugVisitor(const SVal V) : V(V) {}
- void Profile(llvm::FoldingSetNodeID &ID) const override { ID.Add(V); }
-
- PathDiagnosticPieceRef VisitNode(const ExplodedNode *N,
- BugReporterContext &BRC,
- PathSensitiveBugReport &BR) override;
-};
-
-} // namespace taint
-} // namespace ento
-} // namespace clang
-
-#endif
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp
index 916977c10c0c..acf4e833095b 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp
@@ -10,8 +10,8 @@
//
//===----------------------------------------------------------------------===//
-#include "Taint.h"
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Checkers/Taint.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
@@ -22,27 +22,14 @@ using namespace ento;
using namespace taint;
namespace {
-class TaintTesterChecker : public Checker< check::PostStmt<Expr> > {
-
- mutable std::unique_ptr<BugType> BT;
- void initBugType() const;
-
- /// Given a pointer argument, get the symbol of the value it contains
- /// (points to).
- SymbolRef getPointedToSymbol(CheckerContext &C,
- const Expr* Arg,
- bool IssueWarning = true) const;
+class TaintTesterChecker : public Checker<check::PostStmt<Expr>> {
+ const BugType BT{this, "Tainted data", "General"};
public:
void checkPostStmt(const Expr *E, CheckerContext &C) const;
};
}
-inline void TaintTesterChecker::initBugType() const {
- if (!BT)
- BT.reset(new BugType(this, "Tainted data", "General"));
-}
-
void TaintTesterChecker::checkPostStmt(const Expr *E,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
@@ -51,8 +38,7 @@ void TaintTesterChecker::checkPostStmt(const Expr *E,
if (isTainted(State, E, C.getLocationContext())) {
if (ExplodedNode *N = C.generateNonFatalErrorNode()) {
- initBugType();
- auto report = std::make_unique<PathSensitiveBugReport>(*BT, "tainted", N);
+ auto report = std::make_unique<PathSensitiveBugReport>(BT, "tainted", N);
report->addRange(E->getSourceRange());
C.emitReport(std::move(report));
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp
index eeec807ccee4..667b19f8120e 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TestAfterDivZeroChecker.cpp
@@ -17,6 +17,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "llvm/ADT/FoldingSet.h"
+#include <optional>
using namespace clang;
using namespace ento;
@@ -77,7 +78,7 @@ public:
class TestAfterDivZeroChecker
: public Checker<check::PreStmt<BinaryOperator>, check::BranchCondition,
check::EndFunction> {
- mutable std::unique_ptr<BuiltinBug> DivZeroBug;
+ const BugType DivZeroBug{this, "Division by zero"};
void reportBug(SVal Val, CheckerContext &C) const;
public:
@@ -100,7 +101,7 @@ DivisionBRVisitor::VisitNode(const ExplodedNode *Succ, BugReporterContext &BRC,
const Expr *E = nullptr;
- if (Optional<PostStmt> P = Succ->getLocationAs<PostStmt>())
+ if (std::optional<PostStmt> P = Succ->getLocationAs<PostStmt>())
if (const BinaryOperator *BO = P->getStmtAs<BinaryOperator>()) {
BinaryOperator::Opcode Op = BO->getOpcode();
if (Op == BO_Div || Op == BO_Rem || Op == BO_DivAssign ||
@@ -132,7 +133,7 @@ DivisionBRVisitor::VisitNode(const ExplodedNode *Succ, BugReporterContext &BRC,
}
bool TestAfterDivZeroChecker::isZero(SVal S, CheckerContext &C) const {
- Optional<DefinedSVal> DSV = S.getAs<DefinedSVal>();
+ std::optional<DefinedSVal> DSV = S.getAs<DefinedSVal>();
if (!DSV)
return false;
@@ -164,12 +165,10 @@ bool TestAfterDivZeroChecker::hasDivZeroMap(SVal Var,
void TestAfterDivZeroChecker::reportBug(SVal Val, CheckerContext &C) const {
if (ExplodedNode *N = C.generateErrorNode(C.getState())) {
- if (!DivZeroBug)
- DivZeroBug.reset(new BuiltinBug(this, "Division by zero"));
-
auto R = std::make_unique<PathSensitiveBugReport>(
- *DivZeroBug, "Value being compared against zero has already been used "
- "for division",
+ DivZeroBug,
+ "Value being compared against zero has already been used "
+ "for division",
N);
R->addVisitor(std::make_unique<DivisionBRVisitor>(Val.getAsSymbol(),
@@ -187,10 +186,7 @@ void TestAfterDivZeroChecker::checkEndFunction(const ReturnStmt *,
return;
DivZeroMapTy::Factory &F = State->get_context<DivZeroMap>();
- for (llvm::ImmutableSet<ZeroState>::iterator I = DivZeroes.begin(),
- E = DivZeroes.end();
- I != E; ++I) {
- ZeroState ZS = *I;
+ for (const ZeroState &ZS : DivZeroes) {
if (ZS.getStackFrameContext() == C.getStackFrame())
DivZeroes = F.remove(DivZeroes, ZS);
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TrustNonnullChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TrustNonnullChecker.cpp
index 5cc713172527..e2f8bd541c96 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TrustNonnullChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TrustNonnullChecker.cpp
@@ -69,8 +69,7 @@ public:
if (!CondS || CondS->computeComplexity() > ComplexityThreshold)
return State;
- for (auto B=CondS->symbol_begin(), E=CondS->symbol_end(); B != E; ++B) {
- const SymbolRef Antecedent = *B;
+ for (SymbolRef Antecedent : CondS->symbols()) {
State = addImplication(Antecedent, State, true);
State = addImplication(Antecedent, State, false);
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TrustReturnsNonnullChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TrustReturnsNonnullChecker.cpp
new file mode 100644
index 000000000000..d80559c6a915
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TrustReturnsNonnullChecker.cpp
@@ -0,0 +1,60 @@
+//== TrustReturnsNonnullChecker.cpp -- API nullability modeling -*- C++ -*--==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This checker adds nullability-related assumptions to methods annotated with
+// returns_nonnull attribute.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/Attr.h"
+#include "clang/AST/Decl.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+class TrustReturnsNonnullChecker : public Checker<check::PostCall> {
+
+public:
+ TrustReturnsNonnullChecker(ASTContext &Ctx) {}
+
+ void checkPostCall(const CallEvent &Call, CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+
+ if (isNonNullPtr(Call))
+ if (auto L = Call.getReturnValue().getAs<Loc>())
+ State = State->assume(*L, /*assumption=*/true);
+
+ C.addTransition(State);
+ }
+
+private:
+ /// \returns Whether the method declaration has the attribute returns_nonnull.
+ bool isNonNullPtr(const CallEvent &Call) const {
+ QualType ExprRetType = Call.getResultType();
+ const Decl *CallDeclaration = Call.getDecl();
+ if (!ExprRetType->isAnyPointerType() || !CallDeclaration)
+ return false;
+
+ return CallDeclaration->hasAttr<ReturnsNonNullAttr>();
+ }
+};
+
+} // namespace
+
+void ento::registerTrustReturnsNonnullChecker(CheckerManager &Mgr) {
+ Mgr.registerChecker<TrustReturnsNonnullChecker>(Mgr.getASTContext());
+}
+
+bool ento::shouldRegisterTrustReturnsNonnullChecker(const CheckerManager &mgr) {
+ return true;
+}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp
index ebe5ad53cc30..aa478b69aade 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp
@@ -18,6 +18,7 @@
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include <optional>
#include <utility>
using namespace clang;
@@ -26,7 +27,7 @@ using namespace ento;
namespace {
class UndefBranchChecker : public Checker<check::BranchCondition> {
- mutable std::unique_ptr<BuiltinBug> BT;
+ const BugType BT{this, "Branch condition evaluates to a garbage value"};
struct FindUndefExpr {
ProgramStateRef St;
@@ -63,52 +64,47 @@ void UndefBranchChecker::checkBranchCondition(const Stmt *Condition,
// ObjCForCollection is a loop, but has no actual condition.
if (isa<ObjCForCollectionStmt>(Condition))
return;
- SVal X = Ctx.getSVal(Condition);
- if (X.isUndef()) {
- // Generate a sink node, which implicitly marks both outgoing branches as
- // infeasible.
- ExplodedNode *N = Ctx.generateErrorNode();
- if (N) {
- if (!BT)
- BT.reset(new BuiltinBug(
- this, "Branch condition evaluates to a garbage value"));
-
- // What's going on here: we want to highlight the subexpression of the
- // condition that is the most likely source of the "uninitialized
- // branch condition." We do a recursive walk of the condition's
- // subexpressions and roughly look for the most nested subexpression
- // that binds to Undefined. We then highlight that expression's range.
-
- // Get the predecessor node and check if is a PostStmt with the Stmt
- // being the terminator condition. We want to inspect the state
- // of that node instead because it will contain main information about
- // the subexpressions.
-
- // Note: any predecessor will do. They should have identical state,
- // since all the BlockEdge did was act as an error sink since the value
- // had to already be undefined.
- assert (!N->pred_empty());
- const Expr *Ex = cast<Expr>(Condition);
- ExplodedNode *PrevN = *N->pred_begin();
- ProgramPoint P = PrevN->getLocation();
- ProgramStateRef St = N->getState();
-
- if (Optional<PostStmt> PS = P.getAs<PostStmt>())
- if (PS->getStmt() == Ex)
- St = PrevN->getState();
-
- FindUndefExpr FindIt(St, Ctx.getLocationContext());
- Ex = FindIt.FindExpr(Ex);
-
- // Emit the bug report.
- auto R = std::make_unique<PathSensitiveBugReport>(
- *BT, BT->getDescription(), N);
- bugreporter::trackExpressionValue(N, Ex, *R);
- R->addRange(Ex->getSourceRange());
-
- Ctx.emitReport(std::move(R));
- }
- }
+ if (!Ctx.getSVal(Condition).isUndef())
+ return;
+
+ // Generate a sink node, which implicitly marks both outgoing branches as
+ // infeasible.
+ ExplodedNode *N = Ctx.generateErrorNode();
+ if (!N)
+ return;
+ // What's going on here: we want to highlight the subexpression of the
+ // condition that is the most likely source of the "uninitialized
+ // branch condition." We do a recursive walk of the condition's
+ // subexpressions and roughly look for the most nested subexpression
+ // that binds to Undefined. We then highlight that expression's range.
+
+ // Get the predecessor node and check if is a PostStmt with the Stmt
+ // being the terminator condition. We want to inspect the state
+ // of that node instead because it will contain main information about
+ // the subexpressions.
+
+ // Note: any predecessor will do. They should have identical state,
+ // since all the BlockEdge did was act as an error sink since the value
+ // had to already be undefined.
+ assert(!N->pred_empty());
+ const Expr *Ex = cast<Expr>(Condition);
+ ExplodedNode *PrevN = *N->pred_begin();
+ ProgramPoint P = PrevN->getLocation();
+ ProgramStateRef St = N->getState();
+
+ if (std::optional<PostStmt> PS = P.getAs<PostStmt>())
+ if (PS->getStmt() == Ex)
+ St = PrevN->getState();
+
+ FindUndefExpr FindIt(St, Ctx.getLocationContext());
+ Ex = FindIt.FindExpr(Ex);
+
+ // Emit the bug report.
+ auto R = std::make_unique<PathSensitiveBugReport>(BT, BT.getDescription(), N);
+ bugreporter::trackExpressionValue(N, Ex, *R);
+ R->addRange(Ex->getSourceRange());
+
+ Ctx.emitReport(std::move(R));
}
void ento::registerUndefBranchChecker(CheckerManager &mgr) {
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp
index 816a547cadc3..2839ef0b6d2e 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp
@@ -19,6 +19,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/raw_ostream.h"
+#include <optional>
using namespace clang;
using namespace ento;
@@ -26,7 +27,7 @@ using namespace ento;
namespace {
class UndefCapturedBlockVarChecker
: public Checker< check::PostStmt<BlockExpr> > {
- mutable std::unique_ptr<BugType> BT;
+ const BugType BT{this, "uninitialized variable captured by block"};
public:
void checkPostStmt(const BlockExpr *BE, CheckerContext &C) const;
@@ -56,26 +57,19 @@ UndefCapturedBlockVarChecker::checkPostStmt(const BlockExpr *BE,
ProgramStateRef state = C.getState();
auto *R = cast<BlockDataRegion>(C.getSVal(BE).getAsRegion());
- BlockDataRegion::referenced_vars_iterator I = R->referenced_vars_begin(),
- E = R->referenced_vars_end();
-
- for (; I != E; ++I) {
+ for (auto Var : R->referenced_vars()) {
// This VarRegion is the region associated with the block; we need
// the one associated with the encompassing context.
- const VarRegion *VR = I.getCapturedRegion();
+ const VarRegion *VR = Var.getCapturedRegion();
const VarDecl *VD = VR->getDecl();
if (VD->hasAttr<BlocksAttr>() || !VD->hasLocalStorage())
continue;
// Get the VarRegion associated with VD in the local stack frame.
- if (Optional<UndefinedVal> V =
- state->getSVal(I.getOriginalRegion()).getAs<UndefinedVal>()) {
+ if (std::optional<UndefinedVal> V =
+ state->getSVal(Var.getOriginalRegion()).getAs<UndefinedVal>()) {
if (ExplodedNode *N = C.generateErrorNode()) {
- if (!BT)
- BT.reset(
- new BuiltinBug(this, "uninitialized variable captured by block"));
-
// Generate a bug report.
SmallString<128> buf;
llvm::raw_svector_ostream os(buf);
@@ -83,7 +77,7 @@ UndefCapturedBlockVarChecker::checkPostStmt(const BlockExpr *BE,
os << "Variable '" << VD->getName()
<< "' is uninitialized when captured by block";
- auto R = std::make_unique<PathSensitiveBugReport>(*BT, os.str(), N);
+ auto R = std::make_unique<PathSensitiveBugReport>(BT, os.str(), N);
if (const Expr *Ex = FindBlockDeclRefExpr(BE->getBody(), VD))
R->addRange(Ex->getSourceRange());
bugreporter::trackStoredValue(*V, VR, *R,
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp
index 477d910bc653..4b845bb3ded2 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp
@@ -28,7 +28,7 @@ namespace {
class UndefResultChecker
: public Checker< check::PostStmt<BinaryOperator> > {
- mutable std::unique_ptr<BugType> BT;
+ const BugType BT{this, "Result of operation is garbage or undefined"};
public:
void checkPostStmt(const BinaryOperator *B, CheckerContext &C) const;
@@ -53,33 +53,17 @@ static bool isArrayIndexOutOfBounds(CheckerContext &C, const Expr *Ex) {
DefinedOrUnknownSVal Idx = ER->getIndex().castAs<DefinedOrUnknownSVal>();
DefinedOrUnknownSVal ElementCount = getDynamicElementCount(
state, ER->getSuperRegion(), C.getSValBuilder(), ER->getValueType());
- ProgramStateRef StInBound = state->assumeInBound(Idx, ElementCount, true);
- ProgramStateRef StOutBound = state->assumeInBound(Idx, ElementCount, false);
+ ProgramStateRef StInBound, StOutBound;
+ std::tie(StInBound, StOutBound) = state->assumeInBoundDual(Idx, ElementCount);
return StOutBound && !StInBound;
}
-static bool isShiftOverflow(const BinaryOperator *B, CheckerContext &C) {
- return C.isGreaterOrEqual(
- B->getRHS(), C.getASTContext().getIntWidth(B->getLHS()->getType()));
-}
-
-static bool isLeftShiftResultUnrepresentable(const BinaryOperator *B,
- CheckerContext &C) {
- SValBuilder &SB = C.getSValBuilder();
- ProgramStateRef State = C.getState();
- const llvm::APSInt *LHS = SB.getKnownValue(State, C.getSVal(B->getLHS()));
- const llvm::APSInt *RHS = SB.getKnownValue(State, C.getSVal(B->getRHS()));
- assert(LHS && RHS && "Values unknown, inconsistent state");
- return (unsigned)RHS->getZExtValue() > LHS->countLeadingZeros();
-}
-
void UndefResultChecker::checkPostStmt(const BinaryOperator *B,
CheckerContext &C) const {
if (C.getSVal(B).isUndef()) {
// Do not report assignments of uninitialized values inside swap functions.
// This should allow to swap partially uninitialized structs
- // (radar://14129997)
if (const FunctionDecl *EnclosingFunctionDecl =
dyn_cast<FunctionDecl>(C.getStackFrame()->getDecl()))
if (C.getCalleeName(EnclosingFunctionDecl) == "swap")
@@ -90,10 +74,6 @@ void UndefResultChecker::checkPostStmt(const BinaryOperator *B,
if (!N)
return;
- if (!BT)
- BT.reset(
- new BuiltinBug(this, "Result of operation is garbage or undefined"));
-
SmallString<256> sbuf;
llvm::raw_svector_ostream OS(sbuf);
const Expr *Ex = nullptr;
@@ -116,62 +96,11 @@ void UndefResultChecker::checkPostStmt(const BinaryOperator *B,
OS << " due to array index out of bounds";
} else {
// Neither operand was undefined, but the result is undefined.
- if ((B->getOpcode() == BinaryOperatorKind::BO_Shl ||
- B->getOpcode() == BinaryOperatorKind::BO_Shr) &&
- C.isNegative(B->getRHS())) {
- OS << "The result of the "
- << ((B->getOpcode() == BinaryOperatorKind::BO_Shl) ? "left"
- : "right")
- << " shift is undefined because the right operand is negative";
- Ex = B->getRHS();
- } else if ((B->getOpcode() == BinaryOperatorKind::BO_Shl ||
- B->getOpcode() == BinaryOperatorKind::BO_Shr) &&
- isShiftOverflow(B, C)) {
-
- OS << "The result of the "
- << ((B->getOpcode() == BinaryOperatorKind::BO_Shl) ? "left"
- : "right")
- << " shift is undefined due to shifting by ";
- Ex = B->getRHS();
-
- SValBuilder &SB = C.getSValBuilder();
- const llvm::APSInt *I =
- SB.getKnownValue(C.getState(), C.getSVal(B->getRHS()));
- if (!I)
- OS << "a value that is";
- else if (I->isUnsigned())
- OS << '\'' << I->getZExtValue() << "\', which is";
- else
- OS << '\'' << I->getSExtValue() << "\', which is";
-
- OS << " greater or equal to the width of type '"
- << B->getLHS()->getType().getAsString() << "'.";
- } else if (B->getOpcode() == BinaryOperatorKind::BO_Shl &&
- C.isNegative(B->getLHS())) {
- OS << "The result of the left shift is undefined because the left "
- "operand is negative";
- Ex = B->getLHS();
- } else if (B->getOpcode() == BinaryOperatorKind::BO_Shl &&
- isLeftShiftResultUnrepresentable(B, C)) {
- ProgramStateRef State = C.getState();
- SValBuilder &SB = C.getSValBuilder();
- const llvm::APSInt *LHS =
- SB.getKnownValue(State, C.getSVal(B->getLHS()));
- const llvm::APSInt *RHS =
- SB.getKnownValue(State, C.getSVal(B->getRHS()));
- OS << "The result of the left shift is undefined due to shifting \'"
- << LHS->getSExtValue() << "\' by \'" << RHS->getZExtValue()
- << "\', which is unrepresentable in the unsigned version of "
- << "the return type \'" << B->getLHS()->getType().getAsString()
- << "\'";
- Ex = B->getLHS();
- } else {
- OS << "The result of the '"
- << BinaryOperator::getOpcodeStr(B->getOpcode())
- << "' expression is undefined";
- }
+ OS << "The result of the '"
+ << BinaryOperator::getOpcodeStr(B->getOpcode())
+ << "' expression is undefined";
}
- auto report = std::make_unique<PathSensitiveBugReport>(*BT, OS.str(), N);
+ auto report = std::make_unique<PathSensitiveBugReport>(BT, OS.str(), N);
if (Ex) {
report->addRange(Ex->getSourceRange());
bugreporter::trackExpressionValue(N, Ex, *report);
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp
index fdefe75e8201..baa07fa66764 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp
@@ -24,7 +24,7 @@ using namespace ento;
namespace {
class UndefinedArraySubscriptChecker
: public Checker< check::PreStmt<ArraySubscriptExpr> > {
- mutable std::unique_ptr<BugType> BT;
+ const BugType BT{this, "Array subscript is undefined"};
public:
void checkPreStmt(const ArraySubscriptExpr *A, CheckerContext &C) const;
@@ -48,11 +48,8 @@ UndefinedArraySubscriptChecker::checkPreStmt(const ArraySubscriptExpr *A,
ExplodedNode *N = C.generateErrorNode();
if (!N)
return;
- if (!BT)
- BT.reset(new BuiltinBug(this, "Array subscript is undefined"));
-
// Generate a report for this bug.
- auto R = std::make_unique<PathSensitiveBugReport>(*BT, BT->getDescription(), N);
+ auto R = std::make_unique<PathSensitiveBugReport>(BT, BT.getDescription(), N);
R->addRange(A->getIdx()->getSourceRange());
bugreporter::trackExpressionValue(N, A->getIdx(), *R);
C.emitReport(std::move(R));
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp
index 05f8f6084c0b..ddc6cc9e8202 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp
@@ -23,7 +23,7 @@ using namespace ento;
namespace {
class UndefinedAssignmentChecker
: public Checker<check::Bind> {
- mutable std::unique_ptr<BugType> BT;
+ const BugType BT{this, "Assigned value is garbage or undefined"};
public:
void checkBind(SVal location, SVal val, const Stmt *S,
@@ -39,7 +39,6 @@ void UndefinedAssignmentChecker::checkBind(SVal location, SVal val,
// Do not report assignments of uninitialized values inside swap functions.
// This should allow to swap partially uninitialized structs
- // (radar://14129997)
if (const FunctionDecl *EnclosingFunctionDecl =
dyn_cast<FunctionDecl>(C.getStackFrame()->getDecl()))
if (C.getCalleeName(EnclosingFunctionDecl) == "swap")
@@ -50,11 +49,6 @@ void UndefinedAssignmentChecker::checkBind(SVal location, SVal val,
if (!N)
return;
- static const char *const DefaultMsg =
- "Assigned value is garbage or undefined";
- if (!BT)
- BT.reset(new BuiltinBug(this, DefaultMsg));
-
// Generate a report for this bug.
llvm::SmallString<128> Str;
llvm::raw_svector_ostream OS(Str);
@@ -92,7 +86,7 @@ void UndefinedAssignmentChecker::checkBind(SVal location, SVal val,
if (const auto *CD =
dyn_cast<CXXConstructorDecl>(C.getStackFrame()->getDecl())) {
if (CD->isImplicit()) {
- for (auto I : CD->inits()) {
+ for (auto *I : CD->inits()) {
if (I->getInit()->IgnoreImpCasts() == StoreE) {
OS << "Value assigned to field '" << I->getMember()->getName()
<< "' in implicit constructor is garbage or undefined";
@@ -106,9 +100,9 @@ void UndefinedAssignmentChecker::checkBind(SVal location, SVal val,
}
if (OS.str().empty())
- OS << DefaultMsg;
+ OS << BT.getDescription();
- auto R = std::make_unique<PathSensitiveBugReport>(*BT, OS.str(), N);
+ auto R = std::make_unique<PathSensitiveBugReport>(BT, OS.str(), N);
if (ex) {
R->addRange(ex->getSourceRange());
bugreporter::trackExpressionValue(N, ex, *R);
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefinedNewArraySizeChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefinedNewArraySizeChecker.cpp
new file mode 100644
index 000000000000..f053ee887a1a
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefinedNewArraySizeChecker.cpp
@@ -0,0 +1,80 @@
+//===--- UndefinedNewArraySizeChecker.cpp -----------------------*- C++ -*--==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines UndefinedNewArraySizeChecker, a builtin check in ExprEngine
+// that checks if the size of the array in a new[] expression is undefined.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class UndefinedNewArraySizeChecker : public Checker<check::PreCall> {
+
+private:
+ BugType BT{this, "Undefined array element count in new[]",
+ categories::LogicError};
+
+public:
+ void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
+ void HandleUndefinedArrayElementCount(CheckerContext &C, SVal ArgVal,
+ const Expr *Init,
+ SourceRange Range) const;
+};
+} // namespace
+
+void UndefinedNewArraySizeChecker::checkPreCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ if (const auto *AC = dyn_cast<CXXAllocatorCall>(&Call)) {
+ if (!AC->isArray())
+ return;
+
+ auto *SizeEx = *AC->getArraySizeExpr();
+ auto SizeVal = AC->getArraySizeVal();
+
+ if (SizeVal.isUndef())
+ HandleUndefinedArrayElementCount(C, SizeVal, SizeEx,
+ SizeEx->getSourceRange());
+ }
+}
+
+void UndefinedNewArraySizeChecker::HandleUndefinedArrayElementCount(
+ CheckerContext &C, SVal ArgVal, const Expr *Init, SourceRange Range) const {
+
+ if (ExplodedNode *N = C.generateErrorNode()) {
+
+ SmallString<100> buf;
+ llvm::raw_svector_ostream os(buf);
+
+ os << "Element count in new[] is a garbage value";
+
+ auto R = std::make_unique<PathSensitiveBugReport>(BT, os.str(), N);
+ R->markInteresting(ArgVal);
+ R->addRange(Range);
+ bugreporter::trackExpressionValue(N, Init, *R);
+
+ C.emitReport(std::move(R));
+ }
+}
+
+void ento::registerUndefinedNewArraySizeChecker(CheckerManager &mgr) {
+ mgr.registerChecker<UndefinedNewArraySizeChecker>();
+}
+
+bool ento::shouldRegisterUndefinedNewArraySizeChecker(
+ const CheckerManager &mgr) {
+ return mgr.getLangOpts().CPlusPlus;
+}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObject.h b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObject.h
index 2fcdd6086309..e35778e6480c 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObject.h
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObject.h
@@ -299,7 +299,7 @@ private:
bool isDereferencableUninit(const FieldRegion *FR, FieldChainInfo LocalChain);
/// Returns true if the value of a primitive object is uninitialized.
- bool isPrimitiveUninit(const SVal &V);
+ bool isPrimitiveUninit(SVal V);
// Note that we don't have a method for arrays -- the elements of an array are
// often left uninitialized intentionally even when it is of a C++ record
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObjectChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObjectChecker.cpp
index 4182b51c02b0..6e1222fedad3 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObjectChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObjectChecker.cpp
@@ -38,15 +38,12 @@ namespace {
class UninitializedObjectChecker
: public Checker<check::EndFunction, check::DeadSymbols> {
- std::unique_ptr<BuiltinBug> BT_uninitField;
+ const BugType BT_uninitField{this, "Uninitialized fields"};
public:
// The fields of this struct will be initialized when registering the checker.
UninitObjCheckerOptions Opts;
- UninitializedObjectChecker()
- : BT_uninitField(new BuiltinBug(this, "Uninitialized fields")) {}
-
void checkEndFunction(const ReturnStmt *RS, CheckerContext &C) const;
void checkDeadSymbols(SymbolReaper &SR, CheckerContext &C) const;
};
@@ -57,19 +54,17 @@ class RegularField final : public FieldNode {
public:
RegularField(const FieldRegion *FR) : FieldNode(FR) {}
- virtual void printNoteMsg(llvm::raw_ostream &Out) const override {
+ void printNoteMsg(llvm::raw_ostream &Out) const override {
Out << "uninitialized field ";
}
- virtual void printPrefix(llvm::raw_ostream &Out) const override {}
+ void printPrefix(llvm::raw_ostream &Out) const override {}
- virtual void printNode(llvm::raw_ostream &Out) const override {
+ void printNode(llvm::raw_ostream &Out) const override {
Out << getVariableName(getDecl());
}
- virtual void printSeparator(llvm::raw_ostream &Out) const override {
- Out << '.';
- }
+ void printSeparator(llvm::raw_ostream &Out) const override { Out << '.'; }
};
/// Represents that the FieldNode that comes after this is declared in a base
@@ -85,20 +80,20 @@ public:
assert(T->getAsCXXRecordDecl());
}
- virtual void printNoteMsg(llvm::raw_ostream &Out) const override {
+ void printNoteMsg(llvm::raw_ostream &Out) const override {
llvm_unreachable("This node can never be the final node in the "
"fieldchain!");
}
- virtual void printPrefix(llvm::raw_ostream &Out) const override {}
+ void printPrefix(llvm::raw_ostream &Out) const override {}
- virtual void printNode(llvm::raw_ostream &Out) const override {
+ void printNode(llvm::raw_ostream &Out) const override {
Out << BaseClassT->getAsCXXRecordDecl()->getName() << "::";
}
- virtual void printSeparator(llvm::raw_ostream &Out) const override {}
+ void printSeparator(llvm::raw_ostream &Out) const override {}
- virtual bool isBase() const override { return true; }
+ bool isBase() const override { return true; }
};
} // end of anonymous namespace
@@ -188,7 +183,7 @@ void UninitializedObjectChecker::checkEndFunction(
for (const auto &Pair : UninitFields) {
auto Report = std::make_unique<PathSensitiveBugReport>(
- *BT_uninitField, Pair.second, Node, LocUsedForUniqueing,
+ BT_uninitField, Pair.second, Node, LocUsedForUniqueing,
Node->getLocationContext()->getDecl());
Context.emitReport(std::move(Report));
}
@@ -202,7 +197,7 @@ void UninitializedObjectChecker::checkEndFunction(
<< " at the end of the constructor call";
auto Report = std::make_unique<PathSensitiveBugReport>(
- *BT_uninitField, WarningOS.str(), Node, LocUsedForUniqueing,
+ BT_uninitField, WarningOS.str(), Node, LocUsedForUniqueing,
Node->getLocationContext()->getDecl());
for (const auto &Pair : UninitFields) {
@@ -330,7 +325,7 @@ bool FindUninitializedFields::isNonUnionUninit(const TypedValueRegion *R,
SVal V = State->getSVal(FieldVal);
- if (isDereferencableType(T) || V.getAs<nonloc::LocAsInteger>()) {
+ if (isDereferencableType(T) || isa<nonloc::LocAsInteger>(V)) {
if (isDereferencableUninit(FR, LocalChain))
ContainsUninitField = true;
continue;
@@ -381,7 +376,7 @@ bool FindUninitializedFields::isUnionUninit(const TypedValueRegion *R) {
return false;
}
-bool FindUninitializedFields::isPrimitiveUninit(const SVal &V) {
+bool FindUninitializedFields::isPrimitiveUninit(SVal V) {
if (V.isUndef())
return true;
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedPointee.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedPointee.cpp
index f0dd0bf813af..54e1e0e11909 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedPointee.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedPointee.cpp
@@ -19,6 +19,7 @@
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicType.h"
+#include <optional>
using namespace clang;
using namespace clang::ento;
@@ -34,20 +35,20 @@ public:
LocField(const FieldRegion *FR, const bool IsDereferenced = true)
: FieldNode(FR), IsDereferenced(IsDereferenced) {}
- virtual void printNoteMsg(llvm::raw_ostream &Out) const override {
+ void printNoteMsg(llvm::raw_ostream &Out) const override {
if (IsDereferenced)
Out << "uninitialized pointee ";
else
Out << "uninitialized pointer ";
}
- virtual void printPrefix(llvm::raw_ostream &Out) const override {}
+ void printPrefix(llvm::raw_ostream &Out) const override {}
- virtual void printNode(llvm::raw_ostream &Out) const override {
+ void printNode(llvm::raw_ostream &Out) const override {
Out << getVariableName(getDecl());
}
- virtual void printSeparator(llvm::raw_ostream &Out) const override {
+ void printSeparator(llvm::raw_ostream &Out) const override {
if (getDecl()->getType()->isPointerType())
Out << "->";
else
@@ -64,11 +65,11 @@ public:
NeedsCastLocField(const FieldRegion *FR, const QualType &T)
: FieldNode(FR), CastBackType(T) {}
- virtual void printNoteMsg(llvm::raw_ostream &Out) const override {
+ void printNoteMsg(llvm::raw_ostream &Out) const override {
Out << "uninitialized pointee ";
}
- virtual void printPrefix(llvm::raw_ostream &Out) const override {
+ void printPrefix(llvm::raw_ostream &Out) const override {
// If this object is a nonloc::LocAsInteger.
if (getDecl()->getType()->isIntegerType())
Out << "reinterpret_cast";
@@ -78,13 +79,11 @@ public:
Out << '<' << CastBackType.getAsString() << ">(";
}
- virtual void printNode(llvm::raw_ostream &Out) const override {
+ void printNode(llvm::raw_ostream &Out) const override {
Out << getVariableName(getDecl()) << ')';
}
- virtual void printSeparator(llvm::raw_ostream &Out) const override {
- Out << "->";
- }
+ void printSeparator(llvm::raw_ostream &Out) const override { Out << "->"; }
};
/// Represents a Loc field that points to itself.
@@ -93,17 +92,17 @@ class CyclicLocField final : public FieldNode {
public:
CyclicLocField(const FieldRegion *FR) : FieldNode(FR) {}
- virtual void printNoteMsg(llvm::raw_ostream &Out) const override {
+ void printNoteMsg(llvm::raw_ostream &Out) const override {
Out << "object references itself ";
}
- virtual void printPrefix(llvm::raw_ostream &Out) const override {}
+ void printPrefix(llvm::raw_ostream &Out) const override {}
- virtual void printNode(llvm::raw_ostream &Out) const override {
+ void printNode(llvm::raw_ostream &Out) const override {
Out << getVariableName(getDecl());
}
- virtual void printSeparator(llvm::raw_ostream &Out) const override {
+ void printSeparator(llvm::raw_ostream &Out) const override {
llvm_unreachable("CyclicLocField objects must be the last node of the "
"fieldchain!");
}
@@ -123,9 +122,9 @@ struct DereferenceInfo {
/// Dereferences \p FR and returns with the pointee's region, and whether it
/// needs to be casted back to it's location type. If for whatever reason
-/// dereferencing fails, returns with None.
-static llvm::Optional<DereferenceInfo> dereference(ProgramStateRef State,
- const FieldRegion *FR);
+/// dereferencing fails, returns std::nullopt.
+static std::optional<DereferenceInfo> dereference(ProgramStateRef State,
+ const FieldRegion *FR);
/// Returns whether \p T can be (transitively) dereferenced to a void pointer
/// type (void*, void**, ...).
@@ -141,10 +140,10 @@ bool FindUninitializedFields::isDereferencableUninit(
SVal V = State->getSVal(FR);
assert((isDereferencableType(FR->getDecl()->getType()) ||
- V.getAs<nonloc::LocAsInteger>()) &&
+ isa<nonloc::LocAsInteger>(V)) &&
"This method only checks dereferenceable objects!");
- if (V.isUnknown() || V.getAs<loc::ConcreteInt>()) {
+ if (V.isUnknown() || isa<loc::ConcreteInt>(V)) {
IsAnyFieldInitialized = true;
return false;
}
@@ -161,7 +160,7 @@ bool FindUninitializedFields::isDereferencableUninit(
// At this point the pointer itself is initialized and points to a valid
// location, we'll now check the pointee.
- llvm::Optional<DereferenceInfo> DerefInfo = dereference(State, FR);
+ std::optional<DereferenceInfo> DerefInfo = dereference(State, FR);
if (!DerefInfo) {
IsAnyFieldInitialized = true;
return false;
@@ -219,8 +218,8 @@ bool FindUninitializedFields::isDereferencableUninit(
// Utility functions.
//===----------------------------------------------------------------------===//
-static llvm::Optional<DereferenceInfo> dereference(ProgramStateRef State,
- const FieldRegion *FR) {
+static std::optional<DereferenceInfo> dereference(ProgramStateRef State,
+ const FieldRegion *FR) {
llvm::SmallSet<const TypedValueRegion *, 5> VisitedRegions;
@@ -230,13 +229,13 @@ static llvm::Optional<DereferenceInfo> dereference(ProgramStateRef State,
// If the static type of the field is a void pointer, or it is a
// nonloc::LocAsInteger, we need to cast it back to the dynamic type before
// dereferencing.
- bool NeedsCastBack = isVoidPointer(FR->getDecl()->getType()) ||
- V.getAs<nonloc::LocAsInteger>();
+ bool NeedsCastBack =
+ isVoidPointer(FR->getDecl()->getType()) || isa<nonloc::LocAsInteger>(V);
// The region we'd like to acquire.
const auto *R = V.getAsRegion()->getAs<TypedValueRegion>();
if (!R)
- return None;
+ return std::nullopt;
VisitedRegions.insert(R);
@@ -247,7 +246,7 @@ static llvm::Optional<DereferenceInfo> dereference(ProgramStateRef State,
R = Tmp->getAs<TypedValueRegion>();
if (!R)
- return None;
+ return std::nullopt;
// We found a cyclic pointer, like int *ptr = (int *)&ptr.
if (!VisitedRegions.insert(R).second)
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
index 381334de068e..b05ce610067c 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
@@ -11,17 +11,18 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/Basic/TargetInfo.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/CommonBugCategories.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/raw_ostream.h"
+#include <optional>
using namespace clang;
using namespace ento;
@@ -39,12 +40,12 @@ enum class OpenVariant {
namespace {
class UnixAPIMisuseChecker : public Checker< check::PreStmt<CallExpr> > {
- mutable std::unique_ptr<BugType> BT_open, BT_pthreadOnce;
- mutable Optional<uint64_t> Val_O_CREAT;
+ const BugType BT_open{this, "Improper use of 'open'", categories::UnixAPI};
+ const BugType BT_pthreadOnce{this, "Improper use of 'pthread_once'",
+ categories::UnixAPI};
+ mutable std::optional<uint64_t> Val_O_CREAT;
public:
- DefaultBool CheckMisuse, CheckPortability;
-
void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
void CheckOpen(CheckerContext &C, const CallExpr *CE) const;
@@ -66,7 +67,9 @@ public:
void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
private:
- mutable std::unique_ptr<BugType> BT_mallocZero;
+ const BugType BT_mallocZero{
+ this, "Undefined allocation of 0 bytes (CERT MEM04-C; CWE-131)",
+ categories::UnixAPI};
void CheckCallocZero(CheckerContext &C, const CallExpr *CE) const;
void CheckMallocZero(CheckerContext &C, const CallExpr *CE) const;
@@ -89,14 +92,6 @@ private:
} //end anonymous namespace
-static void LazyInitialize(const CheckerBase *Checker,
- std::unique_ptr<BugType> &BT,
- const char *name) {
- if (BT)
- return;
- BT.reset(new BugType(Checker, name, categories::UnixAPI));
-}
-
//===----------------------------------------------------------------------===//
// "open" (man 2 open)
//===----------------------------------------------------------------------===/
@@ -110,7 +105,7 @@ void UnixAPIMisuseChecker::checkPreStmt(const CallExpr *CE,
// Don't treat functions in namespaces with the same name a Unix function
// as a call to the Unix function.
const DeclContext *NamespaceCtx = FD->getEnclosingNamespaceContext();
- if (NamespaceCtx && isa<NamespaceDecl>(NamespaceCtx))
+ if (isa_and_nonnull<NamespaceDecl>(NamespaceCtx))
return;
StringRef FName = C.getCalleeName(FD);
@@ -134,9 +129,7 @@ void UnixAPIMisuseChecker::ReportOpenBug(CheckerContext &C,
if (!N)
return;
- LazyInitialize(this, BT_open, "Improper use of 'open'");
-
- auto Report = std::make_unique<PathSensitiveBugReport>(*BT_open, Msg, N);
+ auto Report = std::make_unique<PathSensitiveBugReport>(BT_open, Msg, N);
Report->addRange(SR);
C.emitReport(std::move(Report));
}
@@ -182,8 +175,7 @@ void UnixAPIMisuseChecker::CheckOpenVariant(CheckerContext &C,
ProgramStateRef state = C.getState();
if (CE->getNumArgs() < MinArgCount) {
- // The frontend should issue a warning for this case, so this is a sanity
- // check.
+ // The frontend should issue a warning for this case. Just return.
return;
} else if (CE->getNumArgs() == MaxArgCount) {
const Expr *Arg = CE->getArg(CreateModeArgIndex);
@@ -214,7 +206,7 @@ void UnixAPIMisuseChecker::CheckOpenVariant(CheckerContext &C,
// The definition of O_CREAT is platform specific. We need a better way
// of querying this information from the checking environment.
- if (!Val_O_CREAT.hasValue()) {
+ if (!Val_O_CREAT) {
if (C.getASTContext().getTargetInfo().getTriple().getVendor()
== llvm::Triple::Apple)
Val_O_CREAT = 0x0200;
@@ -230,14 +222,15 @@ void UnixAPIMisuseChecker::CheckOpenVariant(CheckerContext &C,
// Now check if oflags has O_CREAT set.
const Expr *oflagsEx = CE->getArg(FlagsArgIndex);
const SVal V = C.getSVal(oflagsEx);
- if (!V.getAs<NonLoc>()) {
+ if (!isa<NonLoc>(V)) {
// The case where 'V' can be a location can only be due to a bad header,
// so in this case bail out.
return;
}
NonLoc oflags = V.castAs<NonLoc>();
NonLoc ocreateFlag = C.getSValBuilder()
- .makeIntVal(Val_O_CREAT.getValue(), oflagsEx->getType()).castAs<NonLoc>();
+ .makeIntVal(*Val_O_CREAT, oflagsEx->getType())
+ .castAs<NonLoc>();
SVal maskedFlagsUC = C.getSValBuilder().evalBinOpNN(state, BO_And,
oflags, ocreateFlag,
oflagsEx->getType());
@@ -303,10 +296,8 @@ void UnixAPIMisuseChecker::CheckPthreadOnce(CheckerContext &C,
if (isa<VarRegion>(R) && isa<StackLocalsSpaceRegion>(R->getMemorySpace()))
os << " Perhaps you intended to declare the variable as 'static'?";
- LazyInitialize(this, BT_pthreadOnce, "Improper use of 'pthread_once'");
-
auto report =
- std::make_unique<PathSensitiveBugReport>(*BT_pthreadOnce, os.str(), N);
+ std::make_unique<PathSensitiveBugReport>(BT_pthreadOnce, os.str(), N);
report->addRange(CE->getArg(0)->getSourceRange());
C.emitReport(std::move(report));
}
@@ -343,14 +334,11 @@ bool UnixAPIPortabilityChecker::ReportZeroByteAllocation(
if (!N)
return false;
- LazyInitialize(this, BT_mallocZero,
- "Undefined allocation of 0 bytes (CERT MEM04-C; CWE-131)");
-
SmallString<256> S;
llvm::raw_svector_ostream os(S);
os << "Call to '" << fn_name << "' has an allocation size of 0 bytes";
auto report =
- std::make_unique<PathSensitiveBugReport>(*BT_mallocZero, os.str(), N);
+ std::make_unique<PathSensitiveBugReport>(BT_mallocZero, os.str(), N);
report->addRange(arg->getSourceRange());
bugreporter::trackExpressionValue(N, arg, *report);
@@ -366,7 +354,7 @@ void UnixAPIPortabilityChecker::BasicAllocationCheck(CheckerContext &C,
const unsigned numArgs,
const unsigned sizeArg,
const char *fn) const {
- // Sanity check for the correct number of arguments
+ // Check for the correct number of arguments.
if (CE->getNumArgs() != numArgs)
return;
@@ -466,7 +454,7 @@ void UnixAPIPortabilityChecker::checkPreStmt(const CallExpr *CE,
// Don't treat functions in namespaces with the same name a Unix function
// as a call to the Unix function.
const DeclContext *NamespaceCtx = FD->getEnclosingNamespaceContext();
- if (NamespaceCtx && isa<NamespaceDecl>(NamespaceCtx))
+ if (isa_and_nonnull<NamespaceDecl>(NamespaceCtx))
return;
StringRef FName = C.getCalleeName(FD);
@@ -504,7 +492,7 @@ void UnixAPIPortabilityChecker::checkPreStmt(const CallExpr *CE,
mgr.registerChecker<CHECKERNAME>(); \
} \
\
- bool ento::shouldRegister##CHECKERNAME(const CheckerManager &mgr) { \
+ bool ento::shouldRegister##CHECKERNAME(const CheckerManager &mgr) { \
return true; \
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp
index d231be64c2e1..d24a124f5ffe 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp
@@ -24,6 +24,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
#include "llvm/ADT/SmallSet.h"
+#include <optional>
using namespace clang;
using namespace ento;
@@ -58,9 +59,8 @@ void UnreachableCodeChecker::checkEndAnalysis(ExplodedGraph &G,
const ParentMap *PM = nullptr;
const LocationContext *LC = nullptr;
// Iterate over ExplodedGraph
- for (ExplodedGraph::node_iterator I = G.nodes_begin(), E = G.nodes_end();
- I != E; ++I) {
- const ProgramPoint &P = I->getLocation();
+ for (const ExplodedNode &N : G.nodes()) {
+ const ProgramPoint &P = N.getLocation();
LC = P.getLocationContext();
if (!LC->inTopFrame())
continue;
@@ -74,7 +74,7 @@ void UnreachableCodeChecker::checkEndAnalysis(ExplodedGraph &G,
if (!PM)
PM = &LC->getParentMap();
- if (Optional<BlockEntrance> BE = P.getAs<BlockEntrance>()) {
+ if (std::optional<BlockEntrance> BE = P.getAs<BlockEntrance>()) {
const CFGBlock *CB = BE->getBlock();
reachable.insert(CB->getBlockID());
}
@@ -92,8 +92,7 @@ void UnreachableCodeChecker::checkEndAnalysis(ExplodedGraph &G,
return;
// Find CFGBlocks that were not covered by any node
- for (CFG::const_iterator I = C->begin(), E = C->end(); I != E; ++I) {
- const CFGBlock *CB = *I;
+ for (const CFGBlock *CB : *C) {
// Check if the block is unreachable
if (reachable.count(CB->getBlockID()))
continue;
@@ -129,7 +128,7 @@ void UnreachableCodeChecker::checkEndAnalysis(ExplodedGraph &G,
bool foundUnreachable = false;
for (CFGBlock::const_iterator ci = CB->begin(), ce = CB->end();
ci != ce; ++ci) {
- if (Optional<CFGStmt> S = (*ci).getAs<CFGStmt>())
+ if (std::optional<CFGStmt> S = (*ci).getAs<CFGStmt>())
if (const CallExpr *CE = dyn_cast<CallExpr>(S->getStmt())) {
if (CE->getBuiltinCallee() == Builtin::BI__builtin_unreachable ||
CE->isBuiltinAssumeFalse(Eng.getContext())) {
@@ -180,34 +179,30 @@ void UnreachableCodeChecker::FindUnreachableEntryPoints(const CFGBlock *CB,
CFGBlocksSet &visited) {
visited.insert(CB->getBlockID());
- for (CFGBlock::const_pred_iterator I = CB->pred_begin(), E = CB->pred_end();
- I != E; ++I) {
- if (!*I)
+ for (const CFGBlock *PredBlock : CB->preds()) {
+ if (!PredBlock)
continue;
- if (!reachable.count((*I)->getBlockID())) {
+ if (!reachable.count(PredBlock->getBlockID())) {
// If we find an unreachable predecessor, mark this block as reachable so
// we don't report this block
reachable.insert(CB->getBlockID());
- if (!visited.count((*I)->getBlockID()))
+ if (!visited.count(PredBlock->getBlockID()))
// If we haven't previously visited the unreachable predecessor, recurse
- FindUnreachableEntryPoints(*I, reachable, visited);
+ FindUnreachableEntryPoints(PredBlock, reachable, visited);
}
}
}
// Find the Stmt* in a CFGBlock for reporting a warning
const Stmt *UnreachableCodeChecker::getUnreachableStmt(const CFGBlock *CB) {
- for (CFGBlock::const_iterator I = CB->begin(), E = CB->end(); I != E; ++I) {
- if (Optional<CFGStmt> S = I->getAs<CFGStmt>()) {
+ for (const CFGElement &Elem : *CB) {
+ if (std::optional<CFGStmt> S = Elem.getAs<CFGStmt>()) {
if (!isa<DeclStmt>(S->getStmt()))
return S->getStmt();
}
}
- if (const Stmt *S = CB->getTerminatorStmt())
- return S;
- else
- return nullptr;
+ return CB->getTerminatorStmt();
}
// Determines if the path to this CFGBlock contained an element that infers this
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
index 96501215c689..d76fe4991869 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
@@ -13,9 +13,9 @@
//
//===----------------------------------------------------------------------===//
-#include "Taint.h"
#include "clang/AST/CharUnits.h"
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Checkers/Taint.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
@@ -24,6 +24,7 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/raw_ostream.h"
+#include <optional>
using namespace clang;
using namespace ento;
@@ -33,14 +34,11 @@ namespace {
class VLASizeChecker
: public Checker<check::PreStmt<DeclStmt>,
check::PreStmt<UnaryExprOrTypeTraitExpr>> {
- mutable std::unique_ptr<BugType> BT;
- enum VLASize_Kind {
- VLA_Garbage,
- VLA_Zero,
- VLA_Tainted,
- VLA_Negative,
- VLA_Overflow
- };
+ const BugType BT{this, "Dangerous variable-length array (VLA) declaration"};
+ const BugType TaintBT{this,
+ "Dangerous variable-length array (VLA) declaration",
+ categories::TaintedData};
+ enum VLASize_Kind { VLA_Garbage, VLA_Zero, VLA_Negative, VLA_Overflow };
/// Check a VLA for validity.
/// Every dimension of the array and the total size is checked for validity.
@@ -54,8 +52,10 @@ class VLASizeChecker
const Expr *SizeE) const;
void reportBug(VLASize_Kind Kind, const Expr *SizeE, ProgramStateRef State,
- CheckerContext &C,
- std::unique_ptr<BugReporterVisitor> Visitor = nullptr) const;
+ CheckerContext &C) const;
+
+ void reportTaintBug(const Expr *SizeE, ProgramStateRef State,
+ CheckerContext &C, SVal TaintedSVal) const;
public:
void checkPreStmt(const DeclStmt *DS, CheckerContext &C) const;
@@ -166,8 +166,7 @@ ProgramStateRef VLASizeChecker::checkVLAIndexSize(CheckerContext &C,
// Check if the size is tainted.
if (isTainted(State, SizeV)) {
- reportBug(VLA_Tainted, SizeE, nullptr, C,
- std::make_unique<TaintBugVisitor>(SizeV));
+ reportTaintBug(SizeE, State, C, SizeV);
return nullptr;
}
@@ -191,8 +190,9 @@ ProgramStateRef VLASizeChecker::checkVLAIndexSize(CheckerContext &C,
QualType SizeTy = SizeE->getType();
DefinedOrUnknownSVal Zero = SVB.makeZeroVal(SizeTy);
- SVal LessThanZeroVal = SVB.evalBinOp(State, BO_LT, SizeD, Zero, SizeTy);
- if (Optional<DefinedSVal> LessThanZeroDVal =
+ SVal LessThanZeroVal =
+ SVB.evalBinOp(State, BO_LT, SizeD, Zero, SVB.getConditionType());
+ if (std::optional<DefinedSVal> LessThanZeroDVal =
LessThanZeroVal.getAs<DefinedSVal>()) {
ConstraintManager &CM = C.getConstraintManager();
ProgramStateRef StatePos, StateNeg;
@@ -208,17 +208,34 @@ ProgramStateRef VLASizeChecker::checkVLAIndexSize(CheckerContext &C,
return State;
}
-void VLASizeChecker::reportBug(
- VLASize_Kind Kind, const Expr *SizeE, ProgramStateRef State,
- CheckerContext &C, std::unique_ptr<BugReporterVisitor> Visitor) const {
+void VLASizeChecker::reportTaintBug(const Expr *SizeE, ProgramStateRef State,
+ CheckerContext &C, SVal TaintedSVal) const {
// Generate an error node.
ExplodedNode *N = C.generateErrorNode(State);
if (!N)
return;
- if (!BT)
- BT.reset(new BuiltinBug(
- this, "Dangerous variable-length array (VLA) declaration"));
+ SmallString<256> buf;
+ llvm::raw_svector_ostream os(buf);
+ os << "Declared variable-length array (VLA) ";
+ os << "has tainted size";
+
+ auto report = std::make_unique<PathSensitiveBugReport>(TaintBT, os.str(), N);
+ report->addRange(SizeE->getSourceRange());
+ bugreporter::trackExpressionValue(N, SizeE, *report);
+ // The vla size may be a complex expression where multiple memory locations
+ // are tainted.
+ for (auto Sym : getTaintedSymbols(State, TaintedSVal))
+ report->markInteresting(Sym);
+ C.emitReport(std::move(report));
+}
+
+void VLASizeChecker::reportBug(VLASize_Kind Kind, const Expr *SizeE,
+ ProgramStateRef State, CheckerContext &C) const {
+ // Generate an error node.
+ ExplodedNode *N = C.generateErrorNode(State);
+ if (!N)
+ return;
SmallString<256> buf;
llvm::raw_svector_ostream os(buf);
@@ -230,9 +247,6 @@ void VLASizeChecker::reportBug(
case VLA_Zero:
os << "has zero size";
break;
- case VLA_Tainted:
- os << "has tainted size";
- break;
case VLA_Negative:
os << "has negative size";
break;
@@ -241,8 +255,7 @@ void VLASizeChecker::reportBug(
break;
}
- auto report = std::make_unique<PathSensitiveBugReport>(*BT, os.str(), N);
- report->addVisitor(std::move(Visitor));
+ auto report = std::make_unique<PathSensitiveBugReport>(BT, os.str(), N);
report->addRange(SizeE->getSourceRange());
bugreporter::trackExpressionValue(N, SizeE, *report);
C.emitReport(std::move(report));
@@ -278,8 +291,7 @@ void VLASizeChecker::checkPreStmt(const DeclStmt *DS, CheckerContext &C) const {
if (!State)
return;
- auto ArraySizeNL = ArraySize.getAs<NonLoc>();
- if (!ArraySizeNL) {
+ if (!isa<NonLoc>(ArraySize)) {
// Array size could not be determined but state may contain new assumptions.
C.addTransition(State);
return;
@@ -289,7 +301,7 @@ void VLASizeChecker::checkPreStmt(const DeclStmt *DS, CheckerContext &C) const {
if (VD) {
State =
setDynamicExtent(State, State->getRegion(VD, C.getLocationContext()),
- ArraySize.castAs<DefinedOrUnknownSVal>(), SVB);
+ ArraySize.castAs<NonLoc>(), SVB);
}
// Remember our assumptions!
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ValistChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ValistChecker.cpp
index dde5912b6d6e..2d1b873abf73 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ValistChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ValistChecker.cpp
@@ -15,6 +15,7 @@
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
@@ -45,7 +46,7 @@ public:
CK_NumCheckKinds
};
- DefaultBool ChecksEnabled[CK_NumCheckKinds];
+ bool ChecksEnabled[CK_NumCheckKinds] = {false};
CheckerNameRef CheckNames[CK_NumCheckKinds];
void checkPreStmt(const VAArgExpr *VAA, CheckerContext &C) const;
@@ -99,42 +100,41 @@ private:
};
const SmallVector<ValistChecker::VAListAccepter, 15>
- ValistChecker::VAListAccepters = {
- {{"vfprintf", 3}, 2},
- {{"vfscanf", 3}, 2},
- {{"vprintf", 2}, 1},
- {{"vscanf", 2}, 1},
- {{"vsnprintf", 4}, 3},
- {{"vsprintf", 3}, 2},
- {{"vsscanf", 3}, 2},
- {{"vfwprintf", 3}, 2},
- {{"vfwscanf", 3}, 2},
- {{"vwprintf", 2}, 1},
- {{"vwscanf", 2}, 1},
- {{"vswprintf", 4}, 3},
- // vswprintf is the wide version of vsnprintf,
- // vsprintf has no wide version
- {{"vswscanf", 3}, 2}};
-
-const CallDescription
- ValistChecker::VaStart("__builtin_va_start", /*Args=*/2, /*Params=*/1),
- ValistChecker::VaCopy("__builtin_va_copy", 2),
- ValistChecker::VaEnd("__builtin_va_end", 1);
+ ValistChecker::VAListAccepters = {{{{"vfprintf"}, 3}, 2},
+ {{{"vfscanf"}, 3}, 2},
+ {{{"vprintf"}, 2}, 1},
+ {{{"vscanf"}, 2}, 1},
+ {{{"vsnprintf"}, 4}, 3},
+ {{{"vsprintf"}, 3}, 2},
+ {{{"vsscanf"}, 3}, 2},
+ {{{"vfwprintf"}, 3}, 2},
+ {{{"vfwscanf"}, 3}, 2},
+ {{{"vwprintf"}, 2}, 1},
+ {{{"vwscanf"}, 2}, 1},
+ {{{"vswprintf"}, 4}, 3},
+ // vswprintf is the wide version of
+ // vsnprintf, vsprintf has no wide version
+ {{{"vswscanf"}, 3}, 2}};
+
+const CallDescription ValistChecker::VaStart({"__builtin_va_start"}, /*Args=*/2,
+ /*Params=*/1),
+ ValistChecker::VaCopy({"__builtin_va_copy"}, 2),
+ ValistChecker::VaEnd({"__builtin_va_end"}, 1);
} // end anonymous namespace
void ValistChecker::checkPreCall(const CallEvent &Call,
CheckerContext &C) const {
if (!Call.isGlobalCFunction())
return;
- if (Call.isCalled(VaStart))
+ if (VaStart.matches(Call))
checkVAListStartCall(Call, C, false);
- else if (Call.isCalled(VaCopy))
+ else if (VaCopy.matches(Call))
checkVAListStartCall(Call, C, true);
- else if (Call.isCalled(VaEnd))
+ else if (VaEnd.matches(Call))
checkVAListEndCall(Call, C);
else {
for (auto FuncInfo : VAListAccepters) {
- if (!Call.isCalled(FuncInfo.Func))
+ if (!FuncInfo.Func.matches(Call))
continue;
bool Symbolic;
const MemRegion *VAList =
@@ -177,7 +177,7 @@ const MemRegion *ValistChecker::getVAListAsRegion(SVal SV, const Expr *E,
if (isa<ParmVarDecl>(DeclReg->getDecl()))
Reg = C.getState()->getSVal(SV.castAs<Loc>()).getAsRegion();
}
- IsSymbolic = Reg && Reg->getAs<SymbolicRegion>();
+ IsSymbolic = Reg && Reg->getBaseRegion()->getAs<SymbolicRegion>();
// Some VarRegion based VA lists reach here as ElementRegions.
const auto *EReg = dyn_cast_or_null<ElementRegion>(Reg);
return (EReg && VaListModelledAsArray) ? EReg->getSuperRegion() : Reg;
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/VforkChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/VforkChecker.cpp
index 8f147026ae19..cb73ac68edd1 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/VforkChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/VforkChecker.cpp
@@ -9,7 +9,7 @@
// This file defines vfork checker which checks for dangerous uses of vfork.
// Vforked process shares memory (including stack) with parent so it's
// range of actions is significantly limited: can't write variables,
-// can't call functions not in whitelist, etc. For more details, see
+// can't call functions not in the allowed list, etc. For more details, see
// http://man7.org/linux/man-pages/man2/vfork.2.html
//
// This checker checks for prohibited constructs in vforked process.
@@ -35,6 +35,7 @@
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/AST/ParentMap.h"
+#include <optional>
using namespace clang;
using namespace ento;
@@ -43,20 +44,21 @@ namespace {
class VforkChecker : public Checker<check::PreCall, check::PostCall,
check::Bind, check::PreStmt<ReturnStmt>> {
- mutable std::unique_ptr<BuiltinBug> BT;
- mutable llvm::SmallSet<const IdentifierInfo *, 10> VforkWhitelist;
- mutable const IdentifierInfo *II_vfork;
+ const BugType BT{this, "Dangerous construct in a vforked process"};
+ mutable llvm::SmallSet<const IdentifierInfo *, 10> VforkAllowlist;
+ mutable const IdentifierInfo *II_vfork = nullptr;
static bool isChildProcess(const ProgramStateRef State);
bool isVforkCall(const Decl *D, CheckerContext &C) const;
- bool isCallWhitelisted(const IdentifierInfo *II, CheckerContext &C) const;
+ bool isCallExplicitelyAllowed(const IdentifierInfo *II,
+ CheckerContext &C) const;
void reportBug(const char *What, CheckerContext &C,
const char *Details = nullptr) const;
public:
- VforkChecker() : II_vfork(nullptr) {}
+ VforkChecker() = default;
void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
@@ -93,9 +95,9 @@ bool VforkChecker::isVforkCall(const Decl *D, CheckerContext &C) const {
}
// Returns true iff ok to call function after successful vfork.
-bool VforkChecker::isCallWhitelisted(const IdentifierInfo *II,
- CheckerContext &C) const {
- if (VforkWhitelist.empty()) {
+bool VforkChecker::isCallExplicitelyAllowed(const IdentifierInfo *II,
+ CheckerContext &C) const {
+ if (VforkAllowlist.empty()) {
// According to manpage.
const char *ids[] = {
"_Exit",
@@ -112,19 +114,15 @@ bool VforkChecker::isCallWhitelisted(const IdentifierInfo *II,
ASTContext &AC = C.getASTContext();
for (const char **id = ids; *id; ++id)
- VforkWhitelist.insert(&AC.Idents.get(*id));
+ VforkAllowlist.insert(&AC.Idents.get(*id));
}
- return VforkWhitelist.count(II);
+ return VforkAllowlist.count(II);
}
void VforkChecker::reportBug(const char *What, CheckerContext &C,
const char *Details) const {
if (ExplodedNode *N = C.generateErrorNode(C.getState())) {
- if (!BT)
- BT.reset(new BuiltinBug(this,
- "Dangerous construct in a vforked process"));
-
SmallString<256> buf;
llvm::raw_svector_ostream os(buf);
@@ -133,7 +131,7 @@ void VforkChecker::reportBug(const char *What, CheckerContext &C,
if (Details)
os << "; " << Details;
- auto Report = std::make_unique<PathSensitiveBugReport>(*BT, os.str(), N);
+ auto Report = std::make_unique<PathSensitiveBugReport>(BT, os.str(), N);
// TODO: mark vfork call in BugReportVisitor
C.emitReport(std::move(Report));
}
@@ -153,8 +151,8 @@ void VforkChecker::checkPostCall(const CallEvent &Call,
// Get return value of vfork.
SVal VforkRetVal = Call.getReturnValue();
- Optional<DefinedOrUnknownSVal> DVal =
- VforkRetVal.getAs<DefinedOrUnknownSVal>();
+ std::optional<DefinedOrUnknownSVal> DVal =
+ VforkRetVal.getAs<DefinedOrUnknownSVal>();
if (!DVal)
return;
@@ -179,12 +177,13 @@ void VforkChecker::checkPostCall(const CallEvent &Call,
C.addTransition(ChildState);
}
-// Prohibit calls to non-whitelist functions in child process.
+// Prohibit calls to functions in child process which are not explicitly
+// allowed.
void VforkChecker::checkPreCall(const CallEvent &Call,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
- if (isChildProcess(State)
- && !isCallWhitelisted(Call.getCalleeIdentifier(), C))
+ if (isChildProcess(State) &&
+ !isCallExplicitelyAllowed(Call.getCalleeIdentifier(), C))
reportBug("This function call", C);
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp
index 1c589e3468c2..33a9a07f9d32 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp
@@ -116,7 +116,7 @@ void VirtualCallChecker::checkPreCall(const CallEvent &Call,
if (!ObState)
return;
- bool IsPure = MD->isPure();
+ bool IsPure = MD->isPureVirtual();
// At this point we're sure that we're calling a virtual method
// during construction or destruction, so we'll emit a report.
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.cpp
index 9c7a59971763..64028b277021 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.cpp
@@ -12,8 +12,8 @@
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/ExprCXX.h"
+#include <optional>
-using llvm::Optional;
namespace clang {
std::pair<const Expr *, bool>
@@ -34,8 +34,7 @@ tryToFindPtrOrigin(const Expr *E, bool StopAtFirstRefCountedObj) {
}
if (auto *call = dyn_cast<CallExpr>(E)) {
if (auto *memberCall = dyn_cast<CXXMemberCallExpr>(call)) {
- Optional<bool> IsGetterOfRefCt =
- isGetterOfRefCounted(memberCall->getMethodDecl());
+ std::optional<bool> IsGetterOfRefCt = isGetterOfRefCounted(memberCall->getMethodDecl());
if (IsGetterOfRefCt && *IsGetterOfRefCt) {
E = memberCall->getImplicitObjectArgument();
if (StopAtFirstRefCountedObj) {
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.h b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.h
index ed4577755457..e35ea4ef05dd 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.h
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.h
@@ -17,10 +17,6 @@
#include <utility>
namespace clang {
-class CXXRecordDecl;
-class CXXBaseSpecifier;
-class FunctionDecl;
-class CXXMethodDecl;
class Expr;
/// This function de-facto defines a set of transformations that we consider
@@ -29,7 +25,7 @@ class Expr;
/// values).
///
/// For more context see Static Analyzer checkers documentation - specifically
-/// webkit.UncountedCallArgsChecker checker. Whitelist of transformations:
+/// webkit.UncountedCallArgsChecker checker. Allowed list of transformations:
/// - constructors of ref-counted types (including factory methods)
/// - getters of ref-counted types
/// - member overloaded operators
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/NoUncountedMembersChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/NoUncountedMembersChecker.cpp
index 97f75135bf92..c753ed84a700 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/NoUncountedMembersChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/NoUncountedMembersChecker.cpp
@@ -17,8 +17,8 @@
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
-#include "llvm/ADT/DenseSet.h"
#include "llvm/Support/Casting.h"
+#include <optional>
using namespace clang;
using namespace ento;
@@ -69,7 +69,7 @@ public:
if (shouldSkipDecl(RD))
return;
- for (auto Member : RD->fields()) {
+ for (auto *Member : RD->fields()) {
const Type *MemberType = Member->getType().getTypePtrOrNull();
if (!MemberType)
continue;
@@ -77,9 +77,9 @@ public:
if (auto *MemberCXXRD = MemberType->getPointeeCXXRecordDecl()) {
// If we don't see the definition we just don't know.
if (MemberCXXRD->hasDefinition()) {
- llvm::Optional<bool> isRCAble = isRefCountable(MemberCXXRD);
- if (isRCAble && *isRCAble)
- reportBug(Member, MemberType, MemberCXXRD, RD);
+ std::optional<bool> isRCAble = isRefCountable(MemberCXXRD);
+ if (isRCAble && *isRCAble)
+ reportBug(Member, MemberType, MemberCXXRD, RD);
}
}
}
@@ -103,7 +103,7 @@ public:
const auto Kind = RD->getTagKind();
// FIMXE: Should we check union members too?
- if (Kind != TTK_Struct && Kind != TTK_Class)
+ if (Kind != TagTypeKind::Struct && Kind != TagTypeKind::Class)
return true;
// Ignore CXXRecords that come from system headers.
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.cpp
index a198943c9433..d2b663410580 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.cpp
@@ -12,31 +12,21 @@
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/ExprCXX.h"
-#include "llvm/ADT/Optional.h"
+#include <optional>
-using llvm::Optional;
using namespace clang;
namespace {
-bool hasPublicRefAndDeref(const CXXRecordDecl *R) {
+bool hasPublicMethodInBaseClass(const CXXRecordDecl *R,
+ const char *NameToMatch) {
assert(R);
assert(R->hasDefinition());
- bool hasRef = false;
- bool hasDeref = false;
for (const CXXMethodDecl *MD : R->methods()) {
const auto MethodName = safeGetName(MD);
-
- if (MethodName == "ref" && MD->getAccess() == AS_public) {
- if (hasDeref)
- return true;
- hasRef = true;
- } else if (MethodName == "deref" && MD->getAccess() == AS_public) {
- if (hasRef)
- return true;
- hasDeref = true;
- }
+ if (MethodName == NameToMatch && MD->getAccess() == AS_public)
+ return true;
}
return false;
}
@@ -45,54 +35,70 @@ bool hasPublicRefAndDeref(const CXXRecordDecl *R) {
namespace clang {
-llvm::Optional<const clang::CXXRecordDecl *>
-isRefCountable(const CXXBaseSpecifier *Base) {
+std::optional<const clang::CXXRecordDecl *>
+hasPublicMethodInBase(const CXXBaseSpecifier *Base, const char *NameToMatch) {
assert(Base);
const Type *T = Base->getType().getTypePtrOrNull();
if (!T)
- return llvm::None;
+ return std::nullopt;
const CXXRecordDecl *R = T->getAsCXXRecordDecl();
if (!R)
- return llvm::None;
+ return std::nullopt;
if (!R->hasDefinition())
- return llvm::None;
+ return std::nullopt;
- return hasPublicRefAndDeref(R) ? R : nullptr;
+ return hasPublicMethodInBaseClass(R, NameToMatch) ? R : nullptr;
}
-llvm::Optional<bool> isRefCountable(const CXXRecordDecl *R) {
+std::optional<bool> isRefCountable(const CXXRecordDecl* R)
+{
assert(R);
R = R->getDefinition();
if (!R)
- return llvm::None;
+ return std::nullopt;
- if (hasPublicRefAndDeref(R))
+ bool hasRef = hasPublicMethodInBaseClass(R, "ref");
+ bool hasDeref = hasPublicMethodInBaseClass(R, "deref");
+ if (hasRef && hasDeref)
return true;
CXXBasePaths Paths;
Paths.setOrigin(const_cast<CXXRecordDecl *>(R));
bool AnyInconclusiveBase = false;
- const auto isRefCountableBase =
+ const auto hasPublicRefInBase =
[&AnyInconclusiveBase](const CXXBaseSpecifier *Base, CXXBasePath &) {
- Optional<const clang::CXXRecordDecl *> IsRefCountable =
- clang::isRefCountable(Base);
- if (!IsRefCountable) {
+ auto hasRefInBase = clang::hasPublicMethodInBase(Base, "ref");
+ if (!hasRefInBase) {
AnyInconclusiveBase = true;
return false;
}
- return (*IsRefCountable) != nullptr;
+ return (*hasRefInBase) != nullptr;
};
- bool BasesResult = R->lookupInBases(isRefCountableBase, Paths,
+ hasRef = hasRef || R->lookupInBases(hasPublicRefInBase, Paths,
/*LookupInDependent =*/true);
if (AnyInconclusiveBase)
- return llvm::None;
+ return std::nullopt;
- return BasesResult;
+ const auto hasPublicDerefInBase =
+ [&AnyInconclusiveBase](const CXXBaseSpecifier *Base, CXXBasePath &) {
+ auto hasDerefInBase = clang::hasPublicMethodInBase(Base, "deref");
+ if (!hasDerefInBase) {
+ AnyInconclusiveBase = true;
+ return false;
+ }
+ return (*hasDerefInBase) != nullptr;
+ };
+ hasDeref = hasDeref || R->lookupInBases(hasPublicDerefInBase, Paths,
+ /*LookupInDependent =*/true);
+ if (AnyInconclusiveBase)
+ return std::nullopt;
+
+ return hasRef && hasDeref;
}
bool isCtorOfRefCounted(const clang::FunctionDecl *F) {
@@ -112,19 +118,21 @@ bool isCtorOfRefCounted(const clang::FunctionDecl *F) {
|| FunctionName == "Identifier";
}
-llvm::Optional<bool> isUncounted(const CXXRecordDecl *Class) {
+std::optional<bool> isUncounted(const CXXRecordDecl* Class)
+{
// Keep isRefCounted first as it's cheaper.
if (isRefCounted(Class))
return false;
- llvm::Optional<bool> IsRefCountable = isRefCountable(Class);
+ std::optional<bool> IsRefCountable = isRefCountable(Class);
if (!IsRefCountable)
- return llvm::None;
+ return std::nullopt;
return (*IsRefCountable);
}
-llvm::Optional<bool> isUncountedPtr(const Type *T) {
+std::optional<bool> isUncountedPtr(const Type* T)
+{
assert(T);
if (T->isPointerType() || T->isReferenceType()) {
@@ -135,7 +143,8 @@ llvm::Optional<bool> isUncountedPtr(const Type *T) {
return false;
}
-Optional<bool> isGetterOfRefCounted(const CXXMethodDecl *M) {
+std::optional<bool> isGetterOfRefCounted(const CXXMethodDecl* M)
+{
assert(M);
if (isa<CXXMethodDecl>(M)) {
@@ -183,8 +192,7 @@ bool isPtrConversion(const FunctionDecl *F) {
// FIXME: check # of params == 1
const auto FunctionName = safeGetName(F);
if (FunctionName == "getPtr" || FunctionName == "WeakPtr" ||
- FunctionName == "makeWeakPtr"
-
+ FunctionName == "dynamicDowncast"
|| FunctionName == "downcast" || FunctionName == "bitwise_cast")
return true;
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.h b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.h
index 730a59977175..45b21cc09184 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.h
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/PtrTypesSemantics.h
@@ -10,12 +10,12 @@
#define LLVM_CLANG_ANALYZER_WEBKIT_PTRTYPESEMANTICS_H
#include "llvm/ADT/APInt.h"
+#include <optional>
namespace clang {
class CXXBaseSpecifier;
class CXXMethodDecl;
class CXXRecordDecl;
-class Expr;
class FunctionDecl;
class Type;
@@ -26,32 +26,32 @@ class Type;
// In WebKit there are two ref-counted templated smart pointers: RefPtr<T> and
// Ref<T>.
-/// \returns CXXRecordDecl of the base if the type is ref-countable, nullptr if
-/// not, None if inconclusive.
-llvm::Optional<const clang::CXXRecordDecl *>
-isRefCountable(const clang::CXXBaseSpecifier *Base);
+/// \returns CXXRecordDecl of the base if the type has ref as a public method,
+/// nullptr if not, std::nullopt if inconclusive.
+std::optional<const clang::CXXRecordDecl *>
+hasPublicMethodInBase(const CXXBaseSpecifier *Base, const char *NameToMatch);
-/// \returns true if \p Class is ref-countable, false if not, None if
+/// \returns true if \p Class is ref-countable, false if not, std::nullopt if
/// inconclusive.
-llvm::Optional<bool> isRefCountable(const clang::CXXRecordDecl *Class);
+std::optional<bool> isRefCountable(const clang::CXXRecordDecl* Class);
/// \returns true if \p Class is ref-counted, false if not.
bool isRefCounted(const clang::CXXRecordDecl *Class);
/// \returns true if \p Class is ref-countable AND not ref-counted, false if
-/// not, None if inconclusive.
-llvm::Optional<bool> isUncounted(const clang::CXXRecordDecl *Class);
+/// not, std::nullopt if inconclusive.
+std::optional<bool> isUncounted(const clang::CXXRecordDecl* Class);
/// \returns true if \p T is either a raw pointer or reference to an uncounted
-/// class, false if not, None if inconclusive.
-llvm::Optional<bool> isUncountedPtr(const clang::Type *T);
+/// class, false if not, std::nullopt if inconclusive.
+std::optional<bool> isUncountedPtr(const clang::Type* T);
/// \returns true if \p F creates ref-countable object from uncounted parameter,
/// false if not.
bool isCtorOfRefCounted(const clang::FunctionDecl *F);
/// \returns true if \p M is getter of a ref-counted class, false if not.
-llvm::Optional<bool> isGetterOfRefCounted(const clang::CXXMethodDecl *Method);
+std::optional<bool> isGetterOfRefCounted(const clang::CXXMethodDecl* Method);
/// \returns true if \p F is a conversion between ref-countable or ref-counted
/// pointer types.
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/RefCntblBaseVirtualDtorChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/RefCntblBaseVirtualDtorChecker.cpp
index fa9ece217cc0..d879c110b75d 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/RefCntblBaseVirtualDtorChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/RefCntblBaseVirtualDtorChecker.cpp
@@ -14,6 +14,7 @@
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
+#include <optional>
using namespace clang;
using namespace ento;
@@ -76,15 +77,53 @@ public:
(AccSpec == AS_none && RD->isClass()))
return false;
- llvm::Optional<const CXXRecordDecl *> RefCntblBaseRD =
- isRefCountable(Base);
- if (!RefCntblBaseRD || !(*RefCntblBaseRD))
+ auto hasRefInBase = clang::hasPublicMethodInBase(Base, "ref");
+ auto hasDerefInBase = clang::hasPublicMethodInBase(Base, "deref");
+
+ bool hasRef = hasRefInBase && *hasRefInBase != nullptr;
+ bool hasDeref = hasDerefInBase && *hasDerefInBase != nullptr;
+
+ QualType T = Base->getType();
+ if (T.isNull())
+ return false;
+
+ const CXXRecordDecl *C = T->getAsCXXRecordDecl();
+ if (!C)
+ return false;
+ bool AnyInconclusiveBase = false;
+ const auto hasPublicRefInBase =
+ [&AnyInconclusiveBase](const CXXBaseSpecifier *Base,
+ CXXBasePath &) {
+ auto hasRefInBase = clang::hasPublicMethodInBase(Base, "ref");
+ if (!hasRefInBase) {
+ AnyInconclusiveBase = true;
+ return false;
+ }
+ return (*hasRefInBase) != nullptr;
+ };
+ const auto hasPublicDerefInBase = [&AnyInconclusiveBase](
+ const CXXBaseSpecifier *Base,
+ CXXBasePath &) {
+ auto hasDerefInBase = clang::hasPublicMethodInBase(Base, "deref");
+ if (!hasDerefInBase) {
+ AnyInconclusiveBase = true;
+ return false;
+ }
+ return (*hasDerefInBase) != nullptr;
+ };
+ CXXBasePaths Paths;
+ Paths.setOrigin(C);
+ hasRef = hasRef || C->lookupInBases(hasPublicRefInBase, Paths,
+ /*LookupInDependent =*/true);
+ hasDeref = hasDeref || C->lookupInBases(hasPublicDerefInBase, Paths,
+ /*LookupInDependent =*/true);
+ if (AnyInconclusiveBase || !hasRef || !hasDeref)
return false;
- const auto *Dtor = (*RefCntblBaseRD)->getDestructor();
+ const auto *Dtor = C->getDestructor();
if (!Dtor || !Dtor->isVirtual()) {
ProblematicBaseSpecifier = Base;
- ProblematicBaseClass = *RefCntblBaseRD;
+ ProblematicBaseClass = C;
return true;
}
@@ -114,7 +153,7 @@ public:
return true;
const auto Kind = RD->getTagKind();
- if (Kind != TTK_Struct && Kind != TTK_Class)
+ if (Kind != TagTypeKind::Struct && Kind != TagTypeKind::Class)
return true;
// Ignore CXXRecords that come from system headers.
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedCallArgsChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedCallArgsChecker.cpp
index d70bd9489d2c..31ccae8b097b 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedCallArgsChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedCallArgsChecker.cpp
@@ -18,7 +18,7 @@
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
-#include "llvm/ADT/DenseSet.h"
+#include <optional>
using namespace clang;
using namespace ento;
@@ -68,8 +68,7 @@ public:
if (auto *F = CE->getDirectCallee()) {
// Skip the first argument for overloaded member operators (e. g. lambda
// or std::function call operator).
- unsigned ArgIdx =
- isa<CXXOperatorCallExpr>(CE) && dyn_cast_or_null<CXXMethodDecl>(F);
+ unsigned ArgIdx = isa<CXXOperatorCallExpr>(CE) && isa_and_nonnull<CXXMethodDecl>(F);
for (auto P = F->param_begin();
// FIXME: Also check variadic function parameters.
@@ -86,7 +85,7 @@ public:
continue; // FIXME? Should we bail?
// FIXME: more complex types (arrays, references to raw pointers, etc)
- Optional<bool> IsUncounted = isUncountedPtr(ArgType);
+ std::optional<bool> IsUncounted = isUncountedPtr(ArgType);
if (!IsUncounted || !(*IsUncounted))
continue;
@@ -149,7 +148,7 @@ public:
auto name = safeGetName(Callee);
if (name == "adoptRef" || name == "getPtr" || name == "WeakPtr" ||
- name == "makeWeakPtr" || name == "downcast" || name == "bitwise_cast" ||
+ name == "dynamicDowncast" || name == "downcast" || name == "bitwise_cast" ||
name == "is" || name == "equal" || name == "hash" ||
name == "isType"
// FIXME: Most/all of these should be implemented via attributes.
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedLambdaCapturesChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedLambdaCapturesChecker.cpp
index deebbd603b2c..a226a01ec0a5 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedLambdaCapturesChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedLambdaCapturesChecker.cpp
@@ -14,6 +14,7 @@
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
+#include <optional>
using namespace clang;
using namespace ento;
@@ -24,7 +25,7 @@ class UncountedLambdaCapturesChecker
private:
BugType Bug{this, "Lambda capture of uncounted variable",
"WebKit coding guidelines"};
- mutable BugReporter *BR;
+ mutable BugReporter *BR = nullptr;
public:
void checkASTDecl(const TranslationUnitDecl *TUD, AnalysisManager &MGR,
@@ -57,18 +58,18 @@ public:
void visitLambdaExpr(LambdaExpr *L) const {
for (const LambdaCapture &C : L->captures()) {
if (C.capturesVariable()) {
- VarDecl *CapturedVar = C.getCapturedVar();
+ ValueDecl *CapturedVar = C.getCapturedVar();
if (auto *CapturedVarType = CapturedVar->getType().getTypePtrOrNull()) {
- Optional<bool> IsUncountedPtr = isUncountedPtr(CapturedVarType);
- if (IsUncountedPtr && *IsUncountedPtr) {
- reportBug(C, CapturedVar, CapturedVarType);
- }
+ std::optional<bool> IsUncountedPtr = isUncountedPtr(CapturedVarType);
+ if (IsUncountedPtr && *IsUncountedPtr) {
+ reportBug(C, CapturedVar, CapturedVarType);
+ }
}
}
}
}
- void reportBug(const LambdaCapture &Capture, VarDecl *CapturedVar,
+ void reportBug(const LambdaCapture &Capture, ValueDecl *CapturedVar,
const Type *T) const {
assert(CapturedVar);
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedLocalVarsChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedLocalVarsChecker.cpp
index 7e86f28cb70f..5a72f53b12ed 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedLocalVarsChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/WebKit/UncountedLocalVarsChecker.cpp
@@ -19,7 +19,7 @@
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
-#include "llvm/ADT/DenseSet.h"
+#include <optional>
using namespace clang;
using namespace ento;
@@ -169,7 +169,7 @@ public:
if (!ArgType)
return;
- Optional<bool> IsUncountedPtr = isUncountedPtr(ArgType);
+ std::optional<bool> IsUncountedPtr = isUncountedPtr(ArgType);
if (IsUncountedPtr && *IsUncountedPtr) {
const Expr *const InitExpr = V->getInit();
if (!InitExpr)
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Yaml.h b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Yaml.h
index ec612dde3b8b..b2d17420686e 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Yaml.h
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Yaml.h
@@ -17,6 +17,7 @@
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "llvm/Support/VirtualFileSystem.h"
#include "llvm/Support/YAMLTraits.h"
+#include <optional>
namespace clang {
namespace ento {
@@ -25,20 +26,20 @@ namespace ento {
/// template parameter must have a yaml MappingTraits.
/// Emit diagnostic error in case of any failure.
template <class T, class Checker>
-llvm::Optional<T> getConfiguration(CheckerManager &Mgr, Checker *Chk,
- StringRef Option, StringRef ConfigFile) {
+std::optional<T> getConfiguration(CheckerManager &Mgr, Checker *Chk,
+ StringRef Option, StringRef ConfigFile) {
if (ConfigFile.trim().empty())
- return None;
+ return std::nullopt;
llvm::vfs::FileSystem *FS = llvm::vfs::getRealFileSystem().get();
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> Buffer =
FS->getBufferForFile(ConfigFile.str());
- if (std::error_code ec = Buffer.getError()) {
+ if (Buffer.getError()) {
Mgr.reportInvalidCheckerOptionValue(Chk, Option,
"a valid filename instead of '" +
std::string(ConfigFile) + "'");
- return None;
+ return std::nullopt;
}
llvm::yaml::Input Input(Buffer.get()->getBuffer());
@@ -48,7 +49,7 @@ llvm::Optional<T> getConfiguration(CheckerManager &Mgr, Checker *Chk,
if (std::error_code ec = Input.error()) {
Mgr.reportInvalidCheckerOptionValue(Chk, Option,
"a valid yaml file: " + ec.message());
- return None;
+ return std::nullopt;
}
return Config;
@@ -57,4 +58,4 @@ llvm::Optional<T> getConfiguration(CheckerManager &Mgr, Checker *Chk,
} // namespace ento
} // namespace clang
-#endif // LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_MOVE_H
+#endif // LLVM_CLANG_LIB_STATICANALYZER_CHECKER_YAML_H
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/cert/InvalidPtrChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/cert/InvalidPtrChecker.cpp
new file mode 100644
index 000000000000..e5dd907c660d
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/cert/InvalidPtrChecker.cpp
@@ -0,0 +1,353 @@
+//== InvalidPtrChecker.cpp ------------------------------------- -*- C++ -*--=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines InvalidPtrChecker which finds usages of possibly
+// invalidated pointer.
+// CERT SEI Rules ENV31-C and ENV34-C
+// For more information see:
+// https://wiki.sei.cmu.edu/confluence/x/8tYxBQ
+// https://wiki.sei.cmu.edu/confluence/x/5NUxBQ
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+class InvalidPtrChecker
+ : public Checker<check::Location, check::BeginFunction, check::PostCall> {
+private:
+ // For accurate emission of NoteTags, the BugType of this checker should have
+ // a unique address.
+ BugType InvalidPtrBugType{this, "Use of invalidated pointer",
+ categories::MemoryError};
+
+ void EnvpInvalidatingCall(const CallEvent &Call, CheckerContext &C) const;
+
+ using HandlerFn = void (InvalidPtrChecker::*)(const CallEvent &Call,
+ CheckerContext &C) const;
+
+ // SEI CERT ENV31-C
+
+ // If set to true, consider getenv calls as invalidating operations on the
+ // environment variable buffer. This is implied in the standard, but in
+ // practice does not cause problems (in the commonly used environments).
+ bool InvalidatingGetEnv = false;
+
+ // GetEnv can be treated invalidating and non-invalidating as well.
+ const CallDescription GetEnvCall{{"getenv"}, 1};
+
+ const CallDescriptionMap<HandlerFn> EnvpInvalidatingFunctions = {
+ {{{"setenv"}, 3}, &InvalidPtrChecker::EnvpInvalidatingCall},
+ {{{"unsetenv"}, 1}, &InvalidPtrChecker::EnvpInvalidatingCall},
+ {{{"putenv"}, 1}, &InvalidPtrChecker::EnvpInvalidatingCall},
+ {{{"_putenv_s"}, 2}, &InvalidPtrChecker::EnvpInvalidatingCall},
+ {{{"_wputenv_s"}, 2}, &InvalidPtrChecker::EnvpInvalidatingCall},
+ };
+
+ void postPreviousReturnInvalidatingCall(const CallEvent &Call,
+ CheckerContext &C) const;
+
+ // SEI CERT ENV34-C
+ const CallDescriptionMap<HandlerFn> PreviousCallInvalidatingFunctions = {
+ {{{"setlocale"}, 2},
+ &InvalidPtrChecker::postPreviousReturnInvalidatingCall},
+ {{{"strerror"}, 1},
+ &InvalidPtrChecker::postPreviousReturnInvalidatingCall},
+ {{{"localeconv"}, 0},
+ &InvalidPtrChecker::postPreviousReturnInvalidatingCall},
+ {{{"asctime"}, 1},
+ &InvalidPtrChecker::postPreviousReturnInvalidatingCall},
+ };
+
+ // The private members of this checker corresponding to commandline options
+ // are set in this function.
+ friend void ento::registerInvalidPtrChecker(CheckerManager &);
+
+public:
+ // Obtain the environment pointer from 'main()' (if present).
+ void checkBeginFunction(CheckerContext &C) const;
+
+ // Handle functions in EnvpInvalidatingFunctions, that invalidate environment
+ // pointer from 'main()'
+ // Handle functions in PreviousCallInvalidatingFunctions.
+ // Also, check if invalidated region is passed to a
+ // conservatively evaluated function call as an argument.
+ void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
+
+ // Check if invalidated region is being dereferenced.
+ void checkLocation(SVal l, bool isLoad, const Stmt *S,
+ CheckerContext &C) const;
+
+private:
+ const NoteTag *createEnvInvalidationNote(CheckerContext &C,
+ ProgramStateRef State,
+ StringRef FunctionName) const;
+};
+
+} // namespace
+
+// Set of memory regions that were invalidated
+REGISTER_SET_WITH_PROGRAMSTATE(InvalidMemoryRegions, const MemRegion *)
+
+// Stores the region of the environment pointer of 'main' (if present).
+REGISTER_TRAIT_WITH_PROGRAMSTATE(MainEnvPtrRegion, const MemRegion *)
+
+// Stores the regions of environments returned by getenv calls.
+REGISTER_SET_WITH_PROGRAMSTATE(GetenvEnvPtrRegions, const MemRegion *)
+
+// Stores key-value pairs, where key is function declaration and value is
+// pointer to memory region returned by previous call of this function
+REGISTER_MAP_WITH_PROGRAMSTATE(PreviousCallResultMap, const FunctionDecl *,
+ const MemRegion *)
+
+const NoteTag *InvalidPtrChecker::createEnvInvalidationNote(
+ CheckerContext &C, ProgramStateRef State, StringRef FunctionName) const {
+
+ const MemRegion *MainRegion = State->get<MainEnvPtrRegion>();
+ const auto GetenvRegions = State->get<GetenvEnvPtrRegions>();
+
+ return C.getNoteTag([this, MainRegion, GetenvRegions,
+ FunctionName = std::string{FunctionName}](
+ PathSensitiveBugReport &BR, llvm::raw_ostream &Out) {
+ // Only handle the BugType of this checker.
+ if (&BR.getBugType() != &InvalidPtrBugType)
+ return;
+
+ // Mark all regions that were interesting before as NOT interesting now
+ // to avoid extra notes coming from invalidation points higher up the
+ // bugpath. This ensures that only the last invalidation point is marked
+ // with a note tag.
+ llvm::SmallVector<std::string, 2> InvalidLocationNames;
+ if (BR.isInteresting(MainRegion)) {
+ BR.markNotInteresting(MainRegion);
+ InvalidLocationNames.push_back("the environment parameter of 'main'");
+ }
+ bool InterestingGetenvFound = false;
+ for (const MemRegion *MR : GetenvRegions) {
+ if (BR.isInteresting(MR)) {
+ BR.markNotInteresting(MR);
+ if (!InterestingGetenvFound) {
+ InterestingGetenvFound = true;
+ InvalidLocationNames.push_back(
+ "the environment returned by 'getenv'");
+ }
+ }
+ }
+
+ // Emit note tag message.
+ if (InvalidLocationNames.size() >= 1)
+ Out << '\'' << FunctionName << "' call may invalidate "
+ << InvalidLocationNames[0];
+ if (InvalidLocationNames.size() == 2)
+ Out << ", and " << InvalidLocationNames[1];
+ });
+}
+
+void InvalidPtrChecker::EnvpInvalidatingCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ // This callevent invalidates all previously generated pointers to the
+ // environment.
+ ProgramStateRef State = C.getState();
+ if (const MemRegion *MainEnvPtr = State->get<MainEnvPtrRegion>())
+ State = State->add<InvalidMemoryRegions>(MainEnvPtr);
+ for (const MemRegion *EnvPtr : State->get<GetenvEnvPtrRegions>())
+ State = State->add<InvalidMemoryRegions>(EnvPtr);
+
+ StringRef FunctionName = Call.getCalleeIdentifier()->getName();
+ const NoteTag *InvalidationNote =
+ createEnvInvalidationNote(C, State, FunctionName);
+
+ C.addTransition(State, InvalidationNote);
+}
+
+void InvalidPtrChecker::postPreviousReturnInvalidatingCall(
+ const CallEvent &Call, CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+
+ const NoteTag *Note = nullptr;
+ const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(Call.getDecl());
+ // Invalidate the region of the previously returned pointer - if there was
+ // one.
+ if (const MemRegion *const *Reg = State->get<PreviousCallResultMap>(FD)) {
+ const MemRegion *PrevReg = *Reg;
+ State = State->add<InvalidMemoryRegions>(PrevReg);
+ Note = C.getNoteTag([this, PrevReg, FD](PathSensitiveBugReport &BR,
+ llvm::raw_ostream &Out) {
+ if (!BR.isInteresting(PrevReg) || &BR.getBugType() != &InvalidPtrBugType)
+ return;
+ Out << '\'';
+ FD->getNameForDiagnostic(Out, FD->getASTContext().getLangOpts(), true);
+ Out << "' call may invalidate the result of the previous " << '\'';
+ FD->getNameForDiagnostic(Out, FD->getASTContext().getLangOpts(), true);
+ Out << '\'';
+ });
+ }
+
+ const LocationContext *LCtx = C.getLocationContext();
+ const auto *CE = cast<CallExpr>(Call.getOriginExpr());
+
+ // Function call will return a pointer to the new symbolic region.
+ DefinedOrUnknownSVal RetVal = C.getSValBuilder().conjureSymbolVal(
+ CE, LCtx, CE->getType(), C.blockCount());
+ State = State->BindExpr(CE, LCtx, RetVal);
+
+ // Remember to this region.
+ const auto *SymRegOfRetVal = cast<SymbolicRegion>(RetVal.getAsRegion());
+ const MemRegion *MR = SymRegOfRetVal->getBaseRegion();
+ State = State->set<PreviousCallResultMap>(FD, MR);
+
+ ExplodedNode *Node = C.addTransition(State, Note);
+ const NoteTag *PreviousCallNote = C.getNoteTag(
+ [this, MR](PathSensitiveBugReport &BR, llvm::raw_ostream &Out) {
+ if (!BR.isInteresting(MR) || &BR.getBugType() != &InvalidPtrBugType)
+ return;
+ Out << "previous function call was here";
+ });
+
+ C.addTransition(State, Node, PreviousCallNote);
+}
+
+// TODO: This seems really ugly. Simplify this.
+static const MemRegion *findInvalidatedSymbolicBase(ProgramStateRef State,
+ const MemRegion *Reg) {
+ while (Reg) {
+ if (State->contains<InvalidMemoryRegions>(Reg))
+ return Reg;
+ const auto *SymBase = Reg->getSymbolicBase();
+ if (!SymBase)
+ break;
+ const auto *SRV = dyn_cast<SymbolRegionValue>(SymBase->getSymbol());
+ if (!SRV)
+ break;
+ Reg = SRV->getRegion();
+ if (const auto *VarReg = dyn_cast<VarRegion>(SRV->getRegion()))
+ Reg = VarReg;
+ }
+ return nullptr;
+}
+
+// Handle functions in EnvpInvalidatingFunctions, that invalidate environment
+// pointer from 'main()' Also, check if invalidated region is passed to a
+// function call as an argument.
+void InvalidPtrChecker::checkPostCall(const CallEvent &Call,
+ CheckerContext &C) const {
+
+ ProgramStateRef State = C.getState();
+
+ // Model 'getenv' calls
+ if (GetEnvCall.matches(Call)) {
+ const MemRegion *Region = Call.getReturnValue().getAsRegion();
+ if (Region) {
+ State = State->add<GetenvEnvPtrRegions>(Region);
+ C.addTransition(State);
+ }
+ }
+
+ // Check if function invalidates 'envp' argument of 'main'
+ if (const auto *Handler = EnvpInvalidatingFunctions.lookup(Call))
+ (this->**Handler)(Call, C);
+
+ // Check if function invalidates the result of previous call
+ if (const auto *Handler = PreviousCallInvalidatingFunctions.lookup(Call))
+ (this->**Handler)(Call, C);
+
+ // If pedantic mode is on, regard 'getenv' calls invalidating as well
+ if (InvalidatingGetEnv && GetEnvCall.matches(Call))
+ postPreviousReturnInvalidatingCall(Call, C);
+
+ // Check if one of the arguments of the function call is invalidated
+
+ // If call was inlined, don't report invalidated argument
+ if (C.wasInlined)
+ return;
+
+ for (unsigned I = 0, NumArgs = Call.getNumArgs(); I < NumArgs; ++I) {
+
+ if (const auto *SR = dyn_cast_or_null<SymbolicRegion>(
+ Call.getArgSVal(I).getAsRegion())) {
+ if (const MemRegion *InvalidatedSymbolicBase =
+ findInvalidatedSymbolicBase(State, SR)) {
+ ExplodedNode *ErrorNode = C.generateNonFatalErrorNode();
+ if (!ErrorNode)
+ return;
+
+ SmallString<256> Msg;
+ llvm::raw_svector_ostream Out(Msg);
+ Out << "use of invalidated pointer '";
+ Call.getArgExpr(I)->printPretty(Out, /*Helper=*/nullptr,
+ C.getASTContext().getPrintingPolicy());
+ Out << "' in a function call";
+
+ auto Report = std::make_unique<PathSensitiveBugReport>(
+ InvalidPtrBugType, Out.str(), ErrorNode);
+ Report->markInteresting(InvalidatedSymbolicBase);
+ Report->addRange(Call.getArgSourceRange(I));
+ C.emitReport(std::move(Report));
+ }
+ }
+ }
+}
+
+// Obtain the environment pointer from 'main()', if present.
+void InvalidPtrChecker::checkBeginFunction(CheckerContext &C) const {
+ if (!C.inTopFrame())
+ return;
+
+ const auto *FD = dyn_cast<FunctionDecl>(C.getLocationContext()->getDecl());
+ if (!FD || FD->param_size() != 3 || !FD->isMain())
+ return;
+
+ ProgramStateRef State = C.getState();
+ const MemRegion *EnvpReg =
+ State->getRegion(FD->parameters()[2], C.getLocationContext());
+
+ // Save the memory region pointed by the environment pointer parameter of
+ // 'main'.
+ C.addTransition(State->set<MainEnvPtrRegion>(EnvpReg));
+}
+
+// Check if invalidated region is being dereferenced.
+void InvalidPtrChecker::checkLocation(SVal Loc, bool isLoad, const Stmt *S,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+
+ // Ignore memory operations involving 'non-invalidated' locations.
+ const MemRegion *InvalidatedSymbolicBase =
+ findInvalidatedSymbolicBase(State, Loc.getAsRegion());
+ if (!InvalidatedSymbolicBase)
+ return;
+
+ ExplodedNode *ErrorNode = C.generateNonFatalErrorNode();
+ if (!ErrorNode)
+ return;
+
+ auto Report = std::make_unique<PathSensitiveBugReport>(
+ InvalidPtrBugType, "dereferencing an invalid pointer", ErrorNode);
+ Report->markInteresting(InvalidatedSymbolicBase);
+ C.emitReport(std::move(Report));
+}
+
+void ento::registerInvalidPtrChecker(CheckerManager &Mgr) {
+ auto *Checker = Mgr.registerChecker<InvalidPtrChecker>();
+ Checker->InvalidatingGetEnv =
+ Mgr.getAnalyzerOptions().getCheckerBooleanOption(Checker,
+ "InvalidatingGetEnv");
+}
+
+bool ento::shouldRegisterInvalidPtrChecker(const CheckerManager &) {
+ return true;
+}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/cert/PutenvWithAutoChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/cert/PutenvWithAutoChecker.cpp
index 1c67bbd77ec8..eae162cda693 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/cert/PutenvWithAutoChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/cert/PutenvWithAutoChecker.cpp
@@ -17,6 +17,7 @@
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
@@ -29,7 +30,7 @@ class PutenvWithAutoChecker : public Checker<check::PostCall> {
private:
BugType BT{this, "'putenv' function should not be called with auto variables",
categories::SecurityError};
- const CallDescription Putenv{"putenv", 1};
+ const CallDescription Putenv{{"putenv"}, 1};
public:
void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
@@ -38,7 +39,7 @@ public:
void PutenvWithAutoChecker::checkPostCall(const CallEvent &Call,
CheckerContext &C) const {
- if (!Call.isCalled(Putenv))
+ if (!Putenv.matches(Call))
return;
SVal ArgV = Call.getArgSVal(0);
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/APSIntType.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/APSIntType.cpp
index a1de10c89ed9..1185cdaa044a 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/APSIntType.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/APSIntType.cpp
@@ -23,7 +23,7 @@ APSIntType::testInRange(const llvm::APSInt &Value,
unsigned MinBits;
if (AllowSignConversions) {
if (Value.isSigned() && !IsUnsigned)
- MinBits = Value.getMinSignedBits();
+ MinBits = Value.getSignificantBits();
else
MinBits = Value.getActiveBits();
@@ -33,7 +33,7 @@ APSIntType::testInRange(const llvm::APSInt &Value,
// Unsigned integers can be converted to unsigned integers of the same width
// or signed integers with one more bit.
if (Value.isSigned())
- MinBits = Value.getMinSignedBits() - IsUnsigned;
+ MinBits = Value.getSignificantBits() - IsUnsigned;
else
MinBits = Value.getActiveBits() + !IsUnsigned;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/AnalysisManager.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/AnalysisManager.cpp
index ecfc7106560e..f9750db7b501 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/AnalysisManager.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/AnalysisManager.cpp
@@ -50,17 +50,14 @@ AnalysisManager::AnalysisManager(ASTContext &ASTCtx, Preprocessor &PP,
AnalysisManager::~AnalysisManager() {
FlushDiagnostics();
- for (PathDiagnosticConsumers::iterator I = PathConsumers.begin(),
- E = PathConsumers.end(); I != E; ++I) {
- delete *I;
+ for (PathDiagnosticConsumer *Consumer : PathConsumers) {
+ delete Consumer;
}
}
void AnalysisManager::FlushDiagnostics() {
PathDiagnosticConsumer::FilesMade filesMade;
- for (PathDiagnosticConsumers::iterator I = PathConsumers.begin(),
- E = PathConsumers.end();
- I != E; ++I) {
- (*I)->FlushDiagnostics(&filesMade);
+ for (PathDiagnosticConsumer *Consumer : PathConsumers) {
+ Consumer->FlushDiagnostics(&filesMade);
}
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp
index 8cd7f75e4e38..86ef4a568665 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp
@@ -23,6 +23,7 @@
#include "llvm/Support/raw_ostream.h"
#include <cassert>
#include <cstddef>
+#include <optional>
#include <utility>
#include <vector>
@@ -64,34 +65,44 @@ void AnalyzerOptions::printFormattedEntry(
ExplorationStrategyKind
AnalyzerOptions::getExplorationStrategy() const {
auto K =
- llvm::StringSwitch<llvm::Optional<ExplorationStrategyKind>>(
- ExplorationStrategy)
+ llvm::StringSwitch<std::optional<ExplorationStrategyKind>>(
+ ExplorationStrategy)
.Case("dfs", ExplorationStrategyKind::DFS)
.Case("bfs", ExplorationStrategyKind::BFS)
- .Case("unexplored_first",
- ExplorationStrategyKind::UnexploredFirst)
+ .Case("unexplored_first", ExplorationStrategyKind::UnexploredFirst)
.Case("unexplored_first_queue",
ExplorationStrategyKind::UnexploredFirstQueue)
.Case("unexplored_first_location_queue",
ExplorationStrategyKind::UnexploredFirstLocationQueue)
.Case("bfs_block_dfs_contents",
ExplorationStrategyKind::BFSBlockDFSContents)
- .Default(None);
- assert(K.hasValue() && "User mode is invalid.");
- return K.getValue();
+ .Default(std::nullopt);
+ assert(K && "User mode is invalid.");
+ return *K;
+}
+
+CTUPhase1InliningKind AnalyzerOptions::getCTUPhase1Inlining() const {
+ auto K = llvm::StringSwitch<std::optional<CTUPhase1InliningKind>>(
+ CTUPhase1InliningMode)
+ .Case("none", CTUPhase1InliningKind::None)
+ .Case("small", CTUPhase1InliningKind::Small)
+ .Case("all", CTUPhase1InliningKind::All)
+ .Default(std::nullopt);
+ assert(K && "CTU inlining mode is invalid.");
+ return *K;
}
IPAKind AnalyzerOptions::getIPAMode() const {
- auto K = llvm::StringSwitch<llvm::Optional<IPAKind>>(IPAMode)
- .Case("none", IPAK_None)
- .Case("basic-inlining", IPAK_BasicInlining)
- .Case("inlining", IPAK_Inlining)
- .Case("dynamic", IPAK_DynamicDispatch)
- .Case("dynamic-bifurcate", IPAK_DynamicDispatchBifurcate)
- .Default(None);
- assert(K.hasValue() && "IPA Mode is invalid.");
-
- return K.getValue();
+ auto K = llvm::StringSwitch<std::optional<IPAKind>>(IPAMode)
+ .Case("none", IPAK_None)
+ .Case("basic-inlining", IPAK_BasicInlining)
+ .Case("inlining", IPAK_Inlining)
+ .Case("dynamic", IPAK_DynamicDispatch)
+ .Case("dynamic-bifurcate", IPAK_DynamicDispatchBifurcate)
+ .Default(std::nullopt);
+ assert(K && "IPA Mode is invalid.");
+
+ return *K;
}
bool
@@ -100,16 +111,15 @@ AnalyzerOptions::mayInlineCXXMemberFunction(
if (getIPAMode() < IPAK_Inlining)
return false;
- auto K =
- llvm::StringSwitch<llvm::Optional<CXXInlineableMemberKind>>(
- CXXMemberInliningMode)
- .Case("constructors", CIMK_Constructors)
- .Case("destructors", CIMK_Destructors)
- .Case("methods", CIMK_MemberFunctions)
- .Case("none", CIMK_None)
- .Default(None);
+ auto K = llvm::StringSwitch<std::optional<CXXInlineableMemberKind>>(
+ CXXMemberInliningMode)
+ .Case("constructors", CIMK_Constructors)
+ .Case("destructors", CIMK_Destructors)
+ .Case("methods", CIMK_MemberFunctions)
+ .Case("none", CIMK_None)
+ .Default(std::nullopt);
- assert(K.hasValue() && "Invalid c++ member function inlining mode.");
+ assert(K && "Invalid c++ member function inlining mode.");
return *K >= Param;
}
@@ -151,12 +161,12 @@ StringRef AnalyzerOptions::getCheckerStringOption(const ento::CheckerBase *C,
bool AnalyzerOptions::getCheckerBooleanOption(StringRef CheckerName,
StringRef OptionName,
bool SearchInParents) const {
- auto Ret = llvm::StringSwitch<llvm::Optional<bool>>(
- getCheckerStringOption(CheckerName, OptionName,
- SearchInParents))
- .Case("true", true)
- .Case("false", false)
- .Default(None);
+ auto Ret =
+ llvm::StringSwitch<std::optional<bool>>(
+ getCheckerStringOption(CheckerName, OptionName, SearchInParents))
+ .Case("true", true)
+ .Case("false", false)
+ .Default(std::nullopt);
assert(Ret &&
"This option should be either 'true' or 'false', and should've been "
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp
index 40cdaef1bfa7..5c10e757244d 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp
@@ -97,8 +97,7 @@ const llvm::APSInt& BasicValueFactory::getValue(const llvm::APSInt& X) {
FoldNodeTy* P = APSIntSet.FindNodeOrInsertPos(ID, InsertPos);
if (!P) {
- P = (FoldNodeTy*) BPAlloc.Allocate<FoldNodeTy>();
- new (P) FoldNodeTy(X);
+ P = new (BPAlloc) FoldNodeTy(X);
APSIntSet.InsertNode(P, InsertPos);
}
@@ -132,8 +131,7 @@ BasicValueFactory::getCompoundValData(QualType T,
CompoundValData* D = CompoundValDataSet.FindNodeOrInsertPos(ID, InsertPos);
if (!D) {
- D = (CompoundValData*) BPAlloc.Allocate<CompoundValData>();
- new (D) CompoundValData(T, Vals);
+ D = new (BPAlloc) CompoundValData(T, Vals);
CompoundValDataSet.InsertNode(D, InsertPos);
}
@@ -151,8 +149,7 @@ BasicValueFactory::getLazyCompoundValData(const StoreRef &store,
LazyCompoundValDataSet.FindNodeOrInsertPos(ID, InsertPos);
if (!D) {
- D = (LazyCompoundValData*) BPAlloc.Allocate<LazyCompoundValData>();
- new (D) LazyCompoundValData(store, region);
+ D = new (BPAlloc) LazyCompoundValData(store, region);
LazyCompoundValDataSet.InsertNode(D, InsertPos);
}
@@ -169,8 +166,7 @@ const PointerToMemberData *BasicValueFactory::getPointerToMemberData(
PointerToMemberDataSet.FindNodeOrInsertPos(ID, InsertPos);
if (!D) {
- D = (PointerToMemberData *)BPAlloc.Allocate<PointerToMemberData>();
- new (D) PointerToMemberData(ND, L);
+ D = new (BPAlloc) PointerToMemberData(ND, L);
PointerToMemberDataSet.InsertNode(D, InsertPos);
}
@@ -276,7 +272,7 @@ BasicValueFactory::evalAPSInt(BinaryOperator::Opcode Op,
// FIXME: This logic should probably go higher up, where we can
// test these conditions symbolically.
- if (V2.isSigned() && V2.isNegative())
+ if (V2.isNegative() || V2.getBitWidth() > 64)
return nullptr;
uint64_t Amt = V2.getZExtValue();
@@ -284,14 +280,6 @@ BasicValueFactory::evalAPSInt(BinaryOperator::Opcode Op,
if (Amt >= V1.getBitWidth())
return nullptr;
- if (!Ctx.getLangOpts().CPlusPlus20) {
- if (V1.isSigned() && V1.isNegative())
- return nullptr;
-
- if (V1.isSigned() && Amt > V1.countLeadingZeros())
- return nullptr;
- }
-
return &getValue( V1.operator<<( (unsigned) Amt ));
}
@@ -299,7 +287,7 @@ BasicValueFactory::evalAPSInt(BinaryOperator::Opcode Op,
// FIXME: This logic should probably go higher up, where we can
// test these conditions symbolically.
- if (V2.isSigned() && V2.isNegative())
+ if (V2.isNegative() || V2.getBitWidth() > 64)
return nullptr;
uint64_t Amt = V2.getZExtValue();
@@ -358,8 +346,7 @@ BasicValueFactory::getPersistentSValWithData(const SVal& V, uintptr_t Data) {
FoldNodeTy* P = Map.FindNodeOrInsertPos(ID, InsertPos);
if (!P) {
- P = (FoldNodeTy*) BPAlloc.Allocate<FoldNodeTy>();
- new (P) FoldNodeTy(std::make_pair(V, Data));
+ P = new (BPAlloc) FoldNodeTy(std::make_pair(V, Data));
Map.InsertNode(P, InsertPos);
}
@@ -383,8 +370,7 @@ BasicValueFactory::getPersistentSValPair(const SVal& V1, const SVal& V2) {
FoldNodeTy* P = Map.FindNodeOrInsertPos(ID, InsertPos);
if (!P) {
- P = (FoldNodeTy*) BPAlloc.Allocate<FoldNodeTy>();
- new (P) FoldNodeTy(std::make_pair(V1, V2));
+ P = new (BPAlloc) FoldNodeTy(std::make_pair(V1, V2));
Map.InsertNode(P, InsertPos);
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporter.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporter.cpp
index d6f69ae03afe..f3e0a5f9f314 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporter.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporter.cpp
@@ -12,12 +12,15 @@
//===----------------------------------------------------------------------===//
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/AST/ASTTypeTraits.h"
+#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclBase.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ParentMap.h"
+#include "clang/AST/ParentMapContext.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
@@ -46,8 +49,6 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/FoldingSet.h"
-#include "llvm/ADT/None.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallString.h"
@@ -66,6 +67,7 @@
#include <cstddef>
#include <iterator>
#include <memory>
+#include <optional>
#include <queue>
#include <string>
#include <tuple>
@@ -188,6 +190,9 @@ public:
PathPieces &getMutablePieces() { return PD->getMutablePieces(); }
bool shouldAddPathEdges() const { return Consumer->shouldAddPathEdges(); }
+ bool shouldAddControlNotes() const {
+ return Consumer->shouldAddControlNotes();
+ }
bool shouldGenerateDiagnostics() const {
return Consumer->shouldGenerateDiagnostics();
}
@@ -218,8 +223,8 @@ class PathDiagnosticBuilder : public BugReporterContext {
public:
/// Find a non-invalidated report for a given equivalence class, and returns
/// a PathDiagnosticBuilder able to construct bug reports for different
- /// consumers. Returns None if no valid report is found.
- static Optional<PathDiagnosticBuilder>
+ /// consumers. Returns std::nullopt if no valid report is found.
+ static std::optional<PathDiagnosticBuilder>
findValidReport(ArrayRef<PathSensitiveBugReport *> &bugReports,
PathSensitiveBugReporter &Reporter);
@@ -294,26 +299,24 @@ std::string StackHintGeneratorForSymbol::getMessage(const ExplodedNode *N){
return {};
// Check if one of the parameters are set to the interesting symbol.
- unsigned ArgIndex = 0;
- for (CallExpr::const_arg_iterator I = CE->arg_begin(),
- E = CE->arg_end(); I != E; ++I, ++ArgIndex){
- SVal SV = N->getSVal(*I);
+ for (auto [Idx, ArgExpr] : llvm::enumerate(CE->arguments())) {
+ SVal SV = N->getSVal(ArgExpr);
// Check if the variable corresponding to the symbol is passed by value.
SymbolRef AS = SV.getAsLocSymbol();
if (AS == Sym) {
- return getMessageForArg(*I, ArgIndex);
+ return getMessageForArg(ArgExpr, Idx);
}
// Check if the parameter is a pointer to the symbol.
- if (Optional<loc::MemRegionVal> Reg = SV.getAs<loc::MemRegionVal>()) {
+ if (std::optional<loc::MemRegionVal> Reg = SV.getAs<loc::MemRegionVal>()) {
// Do not attempt to dereference void*.
- if ((*I)->getType()->isVoidPointerType())
+ if (ArgExpr->getType()->isVoidPointerType())
continue;
SVal PSV = N->getState()->getSVal(Reg->getRegion());
SymbolRef AS = PSV.getAsLocSymbol();
if (AS == Sym) {
- return getMessageForArg(*I, ArgIndex);
+ return getMessageForArg(ArgExpr, Idx);
}
}
}
@@ -534,10 +537,10 @@ static void removeEdgesToDefaultInitializers(PathPieces &Pieces) {
if (auto *CF = dyn_cast<PathDiagnosticControlFlowPiece>(I->get())) {
const Stmt *Start = CF->getStartLocation().asStmt();
const Stmt *End = CF->getEndLocation().asStmt();
- if (Start && isa<CXXDefaultInitExpr>(Start)) {
+ if (isa_and_nonnull<CXXDefaultInitExpr>(Start)) {
I = Pieces.erase(I);
continue;
- } else if (End && isa<CXXDefaultInitExpr>(End)) {
+ } else if (isa_and_nonnull<CXXDefaultInitExpr>(End)) {
PathPieces::iterator Next = std::next(I);
if (Next != E) {
if (auto *NextCF =
@@ -764,7 +767,7 @@ PathDiagnosticPieceRef PathDiagnosticBuilder::generateDiagForSwitchOP(
case Stmt::CaseStmtClass: {
os << "Control jumps to 'case ";
const auto *Case = cast<CaseStmt>(S);
- const Expr *LHS = Case->getLHS()->IgnoreParenCasts();
+ const Expr *LHS = Case->getLHS()->IgnoreParenImpCasts();
// Determine if it is an enum.
bool GetRawInt = true;
@@ -1030,7 +1033,7 @@ static bool isContainedByStmt(const ParentMap &PM, const Stmt *S,
static const Stmt *getStmtBeforeCond(const ParentMap &PM, const Stmt *Term,
const ExplodedNode *N) {
while (N) {
- Optional<StmtPoint> SP = N->getLocation().getAs<StmtPoint>();
+ std::optional<StmtPoint> SP = N->getLocation().getAs<StmtPoint>();
if (SP) {
const Stmt *S = SP->getStmt();
if (!isContainedByStmt(PM, Term, S))
@@ -1191,7 +1194,7 @@ void PathDiagnosticBuilder::generatePathDiagnosticsForNode(
"location context associated with the active path!");
// Have we encountered an exit from a function call?
- if (Optional<CallExitEnd> CE = P.getAs<CallExitEnd>()) {
+ if (std::optional<CallExitEnd> CE = P.getAs<CallExitEnd>()) {
// We are descending into a call (backwards). Construct
// a new call piece to contain the path pieces for that call.
@@ -1232,8 +1235,11 @@ void PathDiagnosticBuilder::generatePathDiagnosticsForNode(
} else if (auto BE = P.getAs<BlockEdge>()) {
- if (!C.shouldAddPathEdges()) {
+ if (C.shouldAddControlNotes()) {
generateMinimalDiagForBlockEdge(C, *BE);
+ }
+
+ if (!C.shouldAddPathEdges()) {
return;
}
@@ -1254,12 +1260,14 @@ void PathDiagnosticBuilder::generatePathDiagnosticsForNode(
// do-while statements are explicitly excluded here
auto p = std::make_shared<PathDiagnosticEventPiece>(
- L, "Looping back to the head "
- "of the loop");
+ L, "Looping back to the head of the loop");
p->setPrunable(true);
addEdgeToPath(C.getActivePath(), PrevLoc, p->getLocation());
- C.getActivePath().push_front(std::move(p));
+ // We might've added a very similar control node already
+ if (!C.shouldAddControlNotes()) {
+ C.getActivePath().push_front(std::move(p));
+ }
if (const auto *CS = dyn_cast_or_null<CompoundStmt>(Body)) {
addEdgeToPath(C.getActivePath(), PrevLoc,
@@ -1300,10 +1308,13 @@ void PathDiagnosticBuilder::generatePathDiagnosticsForNode(
auto PE = std::make_shared<PathDiagnosticEventPiece>(L, str);
PE->setPrunable(true);
addEdgeToPath(C.getActivePath(), PrevLoc, PE->getLocation());
- C.getActivePath().push_front(std::move(PE));
+
+ // We might've added a very similar control node already
+ if (!C.shouldAddControlNotes()) {
+ C.getActivePath().push_front(std::move(PE));
+ }
}
- } else if (isa<BreakStmt>(Term) || isa<ContinueStmt>(Term) ||
- isa<GotoStmt>(Term)) {
+ } else if (isa<BreakStmt, ContinueStmt, GotoStmt>(Term)) {
PathDiagnosticLocation L(Term, SM, C.getCurrLocationContext());
addEdgeToPath(C.getActivePath(), PrevLoc, L);
}
@@ -1342,9 +1353,7 @@ static const Stmt *getStmtParent(const Stmt *S, const ParentMap &PM) {
if (!S)
break;
- if (isa<FullExpr>(S) ||
- isa<CXXBindTemporaryExpr>(S) ||
- isa<SubstNonTypeTemplateParmExpr>(S))
+ if (isa<FullExpr, CXXBindTemporaryExpr, SubstNonTypeTemplateParmExpr>(S))
continue;
break;
@@ -1446,7 +1455,7 @@ static void addContextEdges(PathPieces &pieces, const LocationContext *LC) {
break;
// If the source is in the same context, we're already good.
- if (llvm::find(SrcContexts, DstContext) != SrcContexts.end())
+ if (llvm::is_contained(SrcContexts, DstContext))
break;
// Update the subexpression node to point to the context edge.
@@ -1540,9 +1549,8 @@ static void simplifySimpleBranches(PathPieces &pieces) {
// We only perform this transformation for specific branch kinds.
// We don't want to do this for do..while, for example.
- if (!(isa<ForStmt>(s1Start) || isa<WhileStmt>(s1Start) ||
- isa<IfStmt>(s1Start) || isa<ObjCForCollectionStmt>(s1Start) ||
- isa<CXXForRangeStmt>(s1Start)))
+ if (!isa<ForStmt, WhileStmt, IfStmt, ObjCForCollectionStmt,
+ CXXForRangeStmt>(s1Start))
continue;
// Is s1End the branch condition?
@@ -1558,21 +1566,22 @@ static void simplifySimpleBranches(PathPieces &pieces) {
/// Returns the number of bytes in the given (character-based) SourceRange.
///
-/// If the locations in the range are not on the same line, returns None.
+/// If the locations in the range are not on the same line, returns
+/// std::nullopt.
///
/// Note that this does not do a precise user-visible character or column count.
-static Optional<size_t> getLengthOnSingleLine(const SourceManager &SM,
- SourceRange Range) {
+static std::optional<size_t> getLengthOnSingleLine(const SourceManager &SM,
+ SourceRange Range) {
SourceRange ExpansionRange(SM.getExpansionLoc(Range.getBegin()),
SM.getExpansionRange(Range.getEnd()).getEnd());
FileID FID = SM.getFileID(ExpansionRange.getBegin());
if (FID != SM.getFileID(ExpansionRange.getEnd()))
- return None;
+ return std::nullopt;
- Optional<MemoryBufferRef> Buffer = SM.getBufferOrNone(FID);
+ std::optional<MemoryBufferRef> Buffer = SM.getBufferOrNone(FID);
if (!Buffer)
- return None;
+ return std::nullopt;
unsigned BeginOffset = SM.getFileOffset(ExpansionRange.getBegin());
unsigned EndOffset = SM.getFileOffset(ExpansionRange.getEnd());
@@ -1583,15 +1592,15 @@ static Optional<size_t> getLengthOnSingleLine(const SourceManager &SM,
// SourceRange is covering a large or small amount of space in the user's
// editor.
if (Snippet.find_first_of("\r\n") != StringRef::npos)
- return None;
+ return std::nullopt;
// This isn't Unicode-aware, but it doesn't need to be.
return Snippet.size();
}
/// \sa getLengthOnSingleLine(SourceManager, SourceRange)
-static Optional<size_t> getLengthOnSingleLine(const SourceManager &SM,
- const Stmt *S) {
+static std::optional<size_t> getLengthOnSingleLine(const SourceManager &SM,
+ const Stmt *S) {
return getLengthOnSingleLine(SM, S->getSourceRange());
}
@@ -1650,9 +1659,9 @@ static void removeContextCycles(PathPieces &Path, const SourceManager &SM) {
if (s1Start && s2Start && s1Start == s2End && s2Start == s1End) {
const size_t MAX_SHORT_LINE_LENGTH = 80;
- Optional<size_t> s1Length = getLengthOnSingleLine(SM, s1Start);
+ std::optional<size_t> s1Length = getLengthOnSingleLine(SM, s1Start);
if (s1Length && *s1Length <= MAX_SHORT_LINE_LENGTH) {
- Optional<size_t> s2Length = getLengthOnSingleLine(SM, s2Start);
+ std::optional<size_t> s2Length = getLengthOnSingleLine(SM, s2Start);
if (s2Length && *s2Length <= MAX_SHORT_LINE_LENGTH) {
Path.erase(I);
I = Path.erase(NextI);
@@ -1711,7 +1720,7 @@ static void removePunyEdges(PathPieces &path, const SourceManager &SM,
std::swap(SecondLoc, FirstLoc);
SourceRange EdgeRange(FirstLoc, SecondLoc);
- Optional<size_t> ByteWidth = getLengthOnSingleLine(SM, EdgeRange);
+ std::optional<size_t> ByteWidth = getLengthOnSingleLine(SM, EdgeRange);
// If the statements are on different lines, continue.
if (!ByteWidth)
@@ -1875,7 +1884,7 @@ static bool optimizeEdges(const PathDiagnosticConstruct &C, PathPieces &path,
lexicalContains(PM, s1Start, s1End)) {
SourceRange EdgeRange(PieceI->getEndLocation().asLocation(),
PieceI->getStartLocation().asLocation());
- if (!getLengthOnSingleLine(SM, EdgeRange).hasValue())
+ if (!getLengthOnSingleLine(SM, EdgeRange))
removeEdge = true;
}
}
@@ -2093,8 +2102,6 @@ PathDiagnosticBuilder::generate(const PathDiagnosticConsumer *PDC) const {
void BugType::anchor() {}
-void BuiltinBug::anchor() {}
-
//===----------------------------------------------------------------------===//
// Methods for BugReport and subclasses.
//===----------------------------------------------------------------------===//
@@ -2135,15 +2142,14 @@ PathSensitiveBugReport::PathSensitiveBugReport(
"checkers to emit warnings, because checkers should depend on "
"*modeling*, not *diagnostics*.");
- assert(
- (bt.getCheckerName().startswith("debug") ||
- !isHidden(ErrorNode->getState()
- ->getAnalysisManager()
- .getCheckerManager()
- ->getCheckerRegistryData(),
- bt.getCheckerName())) &&
- "Hidden checkers musn't emit diagnostics as they are by definition "
- "non-user facing!");
+ assert((bt.getCheckerName().starts_with("debug") ||
+ !isHidden(ErrorNode->getState()
+ ->getAnalysisManager()
+ .getCheckerManager()
+ ->getCheckerRegistryData(),
+ bt.getCheckerName())) &&
+ "Hidden checkers musn't emit diagnostics as they are by definition "
+ "non-user facing!");
}
void PathSensitiveBugReport::addVisitor(
@@ -2302,7 +2308,7 @@ void PathSensitiveBugReport::markInteresting(const LocationContext *LC) {
InterestingLocationContexts.insert(LC);
}
-Optional<bugreporter::TrackingKind>
+std::optional<bugreporter::TrackingKind>
PathSensitiveBugReport::getInterestingnessKind(SVal V) const {
auto RKind = getInterestingnessKind(V.getAsRegion());
auto SKind = getInterestingnessKind(V.getAsSymbol());
@@ -2324,25 +2330,25 @@ PathSensitiveBugReport::getInterestingnessKind(SVal V) const {
"BugReport::getInterestingnessKind currently can only handle 2 different "
"tracking kinds! Please define what tracking kind should we return here "
"when the kind of getAsRegion() and getAsSymbol() is different!");
- return None;
+ return std::nullopt;
}
-Optional<bugreporter::TrackingKind>
+std::optional<bugreporter::TrackingKind>
PathSensitiveBugReport::getInterestingnessKind(SymbolRef sym) const {
if (!sym)
- return None;
+ return std::nullopt;
// We don't currently consider metadata symbols to be interesting
// even if we know their region is interesting. Is that correct behavior?
auto It = InterestingSymbols.find(sym);
if (It == InterestingSymbols.end())
- return None;
+ return std::nullopt;
return It->getSecond();
}
-Optional<bugreporter::TrackingKind>
+std::optional<bugreporter::TrackingKind>
PathSensitiveBugReport::getInterestingnessKind(const MemRegion *R) const {
if (!R)
- return None;
+ return std::nullopt;
R = R->getBaseRegion();
auto It = InterestingRegions.find(R);
@@ -2351,19 +2357,19 @@ PathSensitiveBugReport::getInterestingnessKind(const MemRegion *R) const {
if (const auto *SR = dyn_cast<SymbolicRegion>(R))
return getInterestingnessKind(SR->getSymbol());
- return None;
+ return std::nullopt;
}
bool PathSensitiveBugReport::isInteresting(SVal V) const {
- return getInterestingnessKind(V).hasValue();
+ return getInterestingnessKind(V).has_value();
}
bool PathSensitiveBugReport::isInteresting(SymbolRef sym) const {
- return getInterestingnessKind(sym).hasValue();
+ return getInterestingnessKind(sym).has_value();
}
bool PathSensitiveBugReport::isInteresting(const MemRegion *R) const {
- return getInterestingnessKind(R).hasValue();
+ return getInterestingnessKind(R).has_value();
}
bool PathSensitiveBugReport::isInteresting(const LocationContext *LC) const {
@@ -2379,7 +2385,7 @@ const Stmt *PathSensitiveBugReport::getStmt() const {
ProgramPoint ProgP = ErrorNode->getLocation();
const Stmt *S = nullptr;
- if (Optional<BlockEntrance> BE = ProgP.getAs<BlockEntrance>()) {
+ if (std::optional<BlockEntrance> BE = ProgP.getAs<BlockEntrance>()) {
CFGBlock &Exit = ProgP.getLocationContext()->getCFG()->getExit();
if (BE->getBlock() == &Exit)
S = ErrorNode->getPreviousStmtForDiagnostics();
@@ -2411,7 +2417,7 @@ PathSensitiveBugReport::getLocation() const {
if (!S) {
// If this is an implicit call, return the implicit call point location.
- if (Optional<PreImplicitCall> PIE = P.getAs<PreImplicitCall>())
+ if (std::optional<PreImplicitCall> PIE = P.getAs<PreImplicitCall>())
return PathDiagnosticLocation(PIE->getLocation(), SM);
if (auto FE = P.getAs<FunctionExitPoint>()) {
if (const ReturnStmt *RS = FE->getStmt())
@@ -2421,6 +2427,12 @@ PathSensitiveBugReport::getLocation() const {
}
if (S) {
+ // Attributed statements usually have corrupted begin locations,
+ // it's OK to ignore attributes for our purposes and deal with
+ // the actual annotated statement.
+ if (const auto *AS = dyn_cast<AttributedStmt>(S))
+ S = AS->getSubStmt();
+
// For member expressions, return the location of the '.' or '->'.
if (const auto *ME = dyn_cast<MemberExpr>(S))
return PathDiagnosticLocation::createMemberLoc(ME, SM);
@@ -2619,8 +2631,7 @@ BugPathInfo *BugPathGetter::getNextBugPath() {
const ExplodedNode *OrigN;
std::tie(CurrentBugPath.Report, OrigN) = ReportNodes.pop_back_val();
- assert(PriorityMap.find(OrigN) != PriorityMap.end() &&
- "error node not accessible from root");
+ assert(PriorityMap.contains(OrigN) && "error node not accessible from root");
// Create a new graph with a single path. This is the graph that will be
// returned to the caller.
@@ -2813,7 +2824,7 @@ generateVisitorsDiagnostics(PathSensitiveBugReport *R,
return Notes;
}
-Optional<PathDiagnosticBuilder> PathDiagnosticBuilder::findValidReport(
+std::optional<PathDiagnosticBuilder> PathDiagnosticBuilder::findValidReport(
ArrayRef<PathSensitiveBugReport *> &bugReports,
PathSensitiveBugReporter &Reporter) {
@@ -2872,7 +2883,7 @@ PathSensitiveBugReporter::generatePathDiagnostics(
auto Out = std::make_unique<DiagnosticForConsumerMapTy>();
- Optional<PathDiagnosticBuilder> PDB =
+ std::optional<PathDiagnosticBuilder> PDB =
PathDiagnosticBuilder::findValidReport(bugReports, *this);
if (PDB) {
@@ -2894,6 +2905,10 @@ void BugReporter::emitReport(std::unique_ptr<BugReport> R) {
if (!ValidSourceLoc)
return;
+ // If the user asked to suppress this report, we should skip it.
+ if (UserSuppressions.isSuppressed(*R))
+ return;
+
// Compute the bug report's hash to determine its equivalence class.
llvm::FoldingSetNodeID ID;
R->Profile(ID);
@@ -3061,8 +3076,7 @@ void BugReporter::FlushReport(BugReportEquivClass& EQ) {
// See whether we need to silence the checker/package.
for (const std::string &CheckerOrPackage :
getAnalyzerOptions().SilencedCheckersAndPackages) {
- if (report->getBugType().getCheckerName().startswith(
- CheckerOrPackage))
+ if (report->getBugType().getCheckerName().starts_with(CheckerOrPackage))
return;
}
@@ -3089,9 +3103,8 @@ void BugReporter::FlushReport(BugReportEquivClass& EQ) {
if (getAnalyzerOptions().ShouldDisplayNotesAsEvents) {
// For path diagnostic consumers that don't support extra notes,
// we may optionally convert those to path notes.
- for (auto I = report->getNotes().rbegin(),
- E = report->getNotes().rend(); I != E; ++I) {
- PathDiagnosticNotePiece *Piece = I->get();
+ for (const auto &I : llvm::reverse(report->getNotes())) {
+ PathDiagnosticNotePiece *Piece = I.get();
auto ConvertedPiece = std::make_shared<PathDiagnosticEventPiece>(
Piece->getLocation(), Piece->getString());
for (const auto &R: Piece->getRanges())
@@ -3100,9 +3113,8 @@ void BugReporter::FlushReport(BugReportEquivClass& EQ) {
Pieces.push_front(std::move(ConvertedPiece));
}
} else {
- for (auto I = report->getNotes().rbegin(),
- E = report->getNotes().rend(); I != E; ++I)
- Pieces.push_front(*I);
+ for (const auto &I : llvm::reverse(report->getNotes()))
+ Pieces.push_front(I);
}
for (const auto &I : report->getFixits())
@@ -3181,7 +3193,7 @@ findExecutedLines(const SourceManager &SM, const ExplodedNode *N) {
P = N->getParentMap().getParent(RS);
}
- if (P && (isa<SwitchCase>(P) || isa<LabelStmt>(P)))
+ if (isa_and_nonnull<SwitchCase, LabelStmt>(P))
populateExecutedLinesWithStmt(P, SM, *ExecutedLines);
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
index d06a2d493303..2f9965036b9e 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
@@ -46,8 +46,6 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/None.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallString.h"
@@ -60,6 +58,7 @@
#include <cassert>
#include <deque>
#include <memory>
+#include <optional>
#include <string>
#include <utility>
@@ -82,6 +81,10 @@ static const Expr *peelOffPointerArithmetic(const BinaryOperator *B) {
return nullptr;
}
+/// \return A subexpression of @c Ex which represents the
+/// expression-of-interest.
+static const Expr *peelOffOuterExpr(const Expr *Ex, const ExplodedNode *N);
+
/// Given that expression S represents a pointer that would be dereferenced,
/// try to find a sub-expression from which the pointer came from.
/// This is used for tracking down origins of a null or undefined value:
@@ -129,6 +132,16 @@ const Expr *bugreporter::getDerefExpr(const Stmt *S) {
}
// Pattern match for a few useful cases: a[0], p->f, *p etc.
else if (const auto *ME = dyn_cast<MemberExpr>(E)) {
+ // This handles the case when the dereferencing of a member reference
+ // happens. This is needed, because the AST for dereferencing a
+ // member reference looks like the following:
+ // |-MemberExpr
+ // `-DeclRefExpr
+ // Without this special case the notes would refer to the whole object
+ // (struct, class or union variable) instead of just the relevant member.
+
+ if (ME->getMemberDecl()->getType()->isReferenceType())
+ break;
E = ME->getBase();
} else if (const auto *IvarRef = dyn_cast<ObjCIvarRefExpr>(E)) {
E = IvarRef->getBase();
@@ -154,26 +167,42 @@ const Expr *bugreporter::getDerefExpr(const Stmt *S) {
return E;
}
+static const VarDecl *getVarDeclForExpression(const Expr *E) {
+ if (const auto *DR = dyn_cast<DeclRefExpr>(E))
+ return dyn_cast<VarDecl>(DR->getDecl());
+ return nullptr;
+}
+
static const MemRegion *
getLocationRegionIfReference(const Expr *E, const ExplodedNode *N,
bool LookingForReference = true) {
- if (const auto *DR = dyn_cast<DeclRefExpr>(E)) {
- if (const auto *VD = dyn_cast<VarDecl>(DR->getDecl())) {
- if (LookingForReference && !VD->getType()->isReferenceType())
- return nullptr;
- return N->getState()
- ->getLValue(VD, N->getLocationContext())
- .getAsRegion();
+ if (const auto *ME = dyn_cast<MemberExpr>(E)) {
+ // This handles null references from FieldRegions, for example:
+ // struct Wrapper { int &ref; };
+ // Wrapper w = { *(int *)0 };
+ // w.ref = 1;
+ const Expr *Base = ME->getBase();
+ const VarDecl *VD = getVarDeclForExpression(Base);
+ if (!VD)
+ return nullptr;
+
+ const auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl());
+ if (!FD)
+ return nullptr;
+
+ if (FD->getType()->isReferenceType()) {
+ SVal StructSVal = N->getState()->getLValue(VD, N->getLocationContext());
+ return N->getState()->getLValue(FD, StructSVal).getAsRegion();
}
+ return nullptr;
}
- // FIXME: This does not handle other kinds of null references,
- // for example, references from FieldRegions:
- // struct Wrapper { int &ref; };
- // Wrapper w = { *(int *)0 };
- // w.ref = 1;
-
- return nullptr;
+ const VarDecl *VD = getVarDeclForExpression(E);
+ if (!VD)
+ return nullptr;
+ if (LookingForReference && !VD->getType()->isReferenceType())
+ return nullptr;
+ return N->getState()->getLValue(VD, N->getLocationContext()).getAsRegion();
}
/// Comparing internal representations of symbolic values (via
@@ -202,8 +231,8 @@ static bool hasVisibleUpdate(const ExplodedNode *LeftNode, SVal LeftVal,
RLCV->getStore() == RightNode->getState()->getStore();
}
-static Optional<SVal> getSValForVar(const Expr *CondVarExpr,
- const ExplodedNode *N) {
+static std::optional<SVal> getSValForVar(const Expr *CondVarExpr,
+ const ExplodedNode *N) {
ProgramStateRef State = N->getState();
const LocationContext *LCtx = N->getLocationContext();
@@ -223,16 +252,16 @@ static Optional<SVal> getSValForVar(const Expr *CondVarExpr,
if (auto FieldL = State->getSVal(ME, LCtx).getAs<Loc>())
return State->getRawSVal(*FieldL, FD->getType());
- return None;
+ return std::nullopt;
}
-static Optional<const llvm::APSInt *>
+static std::optional<const llvm::APSInt *>
getConcreteIntegerValue(const Expr *CondVarExpr, const ExplodedNode *N) {
- if (Optional<SVal> V = getSValForVar(CondVarExpr, N))
+ if (std::optional<SVal> V = getSValForVar(CondVarExpr, N))
if (auto CI = V->getAs<nonloc::ConcreteInt>())
return &CI->getValue();
- return None;
+ return std::nullopt;
}
static bool isVarAnInterestingCondition(const Expr *CondVarExpr,
@@ -244,8 +273,9 @@ static bool isVarAnInterestingCondition(const Expr *CondVarExpr,
if (!B->getErrorNode()->getStackFrame()->isParentOf(N->getStackFrame()))
return false;
- if (Optional<SVal> V = getSValForVar(CondVarExpr, N))
- if (Optional<bugreporter::TrackingKind> K = B->getInterestingnessKind(*V))
+ if (std::optional<SVal> V = getSValForVar(CondVarExpr, N))
+ if (std::optional<bugreporter::TrackingKind> K =
+ B->getInterestingnessKind(*V))
return *K == bugreporter::TrackingKind::Condition;
return false;
@@ -253,8 +283,8 @@ static bool isVarAnInterestingCondition(const Expr *CondVarExpr,
static bool isInterestingExpr(const Expr *E, const ExplodedNode *N,
const PathSensitiveBugReport *B) {
- if (Optional<SVal> V = getSValForVar(E, N))
- return B->getInterestingnessKind(*V).hasValue();
+ if (std::optional<SVal> V = getSValForVar(E, N))
+ return B->getInterestingnessKind(*V).has_value();
return false;
}
@@ -344,45 +374,178 @@ BugReporterVisitor::getDefaultEndPath(const BugReporterContext &BRC,
}
//===----------------------------------------------------------------------===//
+// Implementation of NoStateChangeFuncVisitor.
+//===----------------------------------------------------------------------===//
+
+bool NoStateChangeFuncVisitor::isModifiedInFrame(const ExplodedNode *N) {
+ const LocationContext *Ctx = N->getLocationContext();
+ const StackFrameContext *SCtx = Ctx->getStackFrame();
+ if (!FramesModifyingCalculated.count(SCtx))
+ findModifyingFrames(N);
+ return FramesModifying.count(SCtx);
+}
+
+void NoStateChangeFuncVisitor::markFrameAsModifying(
+ const StackFrameContext *SCtx) {
+ while (!SCtx->inTopFrame()) {
+ auto p = FramesModifying.insert(SCtx);
+ if (!p.second)
+ break; // Frame and all its parents already inserted.
+
+ SCtx = SCtx->getParent()->getStackFrame();
+ }
+}
+
+static const ExplodedNode *getMatchingCallExitEnd(const ExplodedNode *N) {
+ assert(N->getLocationAs<CallEnter>());
+ // The stackframe of the callee is only found in the nodes succeeding
+ // the CallEnter node. CallEnter's stack frame refers to the caller.
+ const StackFrameContext *OrigSCtx = N->getFirstSucc()->getStackFrame();
+
+ // Similarly, the nodes preceding CallExitEnd refer to the callee's stack
+ // frame.
+ auto IsMatchingCallExitEnd = [OrigSCtx](const ExplodedNode *N) {
+ return N->getLocationAs<CallExitEnd>() &&
+ OrigSCtx == N->getFirstPred()->getStackFrame();
+ };
+ while (N && !IsMatchingCallExitEnd(N)) {
+ assert(N->succ_size() <= 1 &&
+ "This function is to be used on the trimmed ExplodedGraph!");
+ N = N->getFirstSucc();
+ }
+ return N;
+}
+
+void NoStateChangeFuncVisitor::findModifyingFrames(
+ const ExplodedNode *const CallExitBeginN) {
+
+ assert(CallExitBeginN->getLocationAs<CallExitBegin>());
+
+ const StackFrameContext *const OriginalSCtx =
+ CallExitBeginN->getLocationContext()->getStackFrame();
+
+ const ExplodedNode *CurrCallExitBeginN = CallExitBeginN;
+ const StackFrameContext *CurrentSCtx = OriginalSCtx;
+
+ for (const ExplodedNode *CurrN = CallExitBeginN; CurrN;
+ CurrN = CurrN->getFirstPred()) {
+ // Found a new inlined call.
+ if (CurrN->getLocationAs<CallExitBegin>()) {
+ CurrCallExitBeginN = CurrN;
+ CurrentSCtx = CurrN->getStackFrame();
+ FramesModifyingCalculated.insert(CurrentSCtx);
+ // We won't see a change in between two identical exploded nodes: skip.
+ continue;
+ }
+
+ if (auto CE = CurrN->getLocationAs<CallEnter>()) {
+ if (const ExplodedNode *CallExitEndN = getMatchingCallExitEnd(CurrN))
+ if (wasModifiedInFunction(CurrN, CallExitEndN))
+ markFrameAsModifying(CurrentSCtx);
+
+ // We exited this inlined call, lets actualize the stack frame.
+ CurrentSCtx = CurrN->getStackFrame();
+
+ // Stop calculating at the current function, but always regard it as
+ // modifying, so we can avoid notes like this:
+ // void f(Foo &F) {
+ // F.field = 0; // note: 0 assigned to 'F.field'
+ // // note: returning without writing to 'F.field'
+ // }
+ if (CE->getCalleeContext() == OriginalSCtx) {
+ markFrameAsModifying(CurrentSCtx);
+ break;
+ }
+ }
+
+ if (wasModifiedBeforeCallExit(CurrN, CurrCallExitBeginN))
+ markFrameAsModifying(CurrentSCtx);
+ }
+}
+
+PathDiagnosticPieceRef NoStateChangeFuncVisitor::VisitNode(
+ const ExplodedNode *N, BugReporterContext &BR, PathSensitiveBugReport &R) {
+
+ const LocationContext *Ctx = N->getLocationContext();
+ const StackFrameContext *SCtx = Ctx->getStackFrame();
+ ProgramStateRef State = N->getState();
+ auto CallExitLoc = N->getLocationAs<CallExitBegin>();
+
+ // No diagnostic if region was modified inside the frame.
+ if (!CallExitLoc || isModifiedInFrame(N))
+ return nullptr;
+
+ CallEventRef<> Call =
+ BR.getStateManager().getCallEventManager().getCaller(SCtx, State);
+
+ // Optimistically suppress uninitialized value bugs that result
+ // from system headers having a chance to initialize the value
+ // but failing to do so. It's too unlikely a system header's fault.
+ // It's much more likely a situation in which the function has a failure
+ // mode that the user decided not to check. If we want to hunt such
+ // omitted checks, we should provide an explicit function-specific note
+ // describing the precondition under which the function isn't supposed to
+ // initialize its out-parameter, and additionally check that such
+ // precondition can actually be fulfilled on the current path.
+ if (Call->isInSystemHeader()) {
+ // We make an exception for system header functions that have no branches.
+ // Such functions unconditionally fail to initialize the variable.
+ // If they call other functions that have more paths within them,
+ // this suppression would still apply when we visit these inner functions.
+ // One common example of a standard function that doesn't ever initialize
+ // its out parameter is operator placement new; it's up to the follow-up
+ // constructor (if any) to initialize the memory.
+ if (!N->getStackFrame()->getCFG()->isLinear()) {
+ static int i = 0;
+ R.markInvalid(&i, nullptr);
+ }
+ return nullptr;
+ }
+
+ if (const auto *MC = dyn_cast<ObjCMethodCall>(Call)) {
+ // If we failed to construct a piece for self, we still want to check
+ // whether the entity of interest is in a parameter.
+ if (PathDiagnosticPieceRef Piece = maybeEmitNoteForObjCSelf(R, *MC, N))
+ return Piece;
+ }
+
+ if (const auto *CCall = dyn_cast<CXXConstructorCall>(Call)) {
+ // Do not generate diagnostics for not modified parameters in
+ // constructors.
+ return maybeEmitNoteForCXXThis(R, *CCall, N);
+ }
+
+ return maybeEmitNoteForParameters(R, *Call, N);
+}
+
+//===----------------------------------------------------------------------===//
// Implementation of NoStoreFuncVisitor.
//===----------------------------------------------------------------------===//
namespace {
-
/// Put a diagnostic on return statement of all inlined functions
/// for which the region of interest \p RegionOfInterest was passed into,
/// but not written inside, and it has caused an undefined read or a null
/// pointer dereference outside.
-class NoStoreFuncVisitor final : public BugReporterVisitor {
+class NoStoreFuncVisitor final : public NoStateChangeFuncVisitor {
const SubRegion *RegionOfInterest;
MemRegionManager &MmrMgr;
const SourceManager &SM;
const PrintingPolicy &PP;
- bugreporter::TrackingKind TKind;
/// Recursion limit for dereferencing fields when looking for the
/// region of interest.
/// The limit of two indicates that we will dereference fields only once.
static const unsigned DEREFERENCE_LIMIT = 2;
- /// Frames writing into \c RegionOfInterest.
- /// This visitor generates a note only if a function does not write into
- /// a region of interest. This information is not immediately available
- /// by looking at the node associated with the exit from the function
- /// (usually the return statement). To avoid recomputing the same information
- /// many times (going up the path for each node and checking whether the
- /// region was written into) we instead lazily compute the
- /// stack frames along the path which write into the region of interest.
- llvm::SmallPtrSet<const StackFrameContext *, 32> FramesModifyingRegion;
- llvm::SmallPtrSet<const StackFrameContext *, 32> FramesModifyingCalculated;
-
using RegionVector = SmallVector<const MemRegion *, 5>;
public:
NoStoreFuncVisitor(const SubRegion *R, bugreporter::TrackingKind TKind)
- : RegionOfInterest(R), MmrMgr(R->getMemRegionManager()),
+ : NoStateChangeFuncVisitor(TKind), RegionOfInterest(R),
+ MmrMgr(R->getMemRegionManager()),
SM(MmrMgr.getContext().getSourceManager()),
- PP(MmrMgr.getContext().getPrintingPolicy()), TKind(TKind) {}
+ PP(MmrMgr.getContext().getPrintingPolicy()) {}
void Profile(llvm::FoldingSetNodeID &ID) const override {
static int Tag = 0;
@@ -390,41 +553,36 @@ public:
ID.AddPointer(RegionOfInterest);
}
- void *getTag() const {
- static int Tag = 0;
- return static_cast<void *>(&Tag);
- }
-
- PathDiagnosticPieceRef VisitNode(const ExplodedNode *N,
- BugReporterContext &BR,
- PathSensitiveBugReport &R) override;
-
private:
+ /// \return Whether \c RegionOfInterest was modified at \p CurrN compared to
+ /// the value it holds in \p CallExitBeginN.
+ bool wasModifiedBeforeCallExit(const ExplodedNode *CurrN,
+ const ExplodedNode *CallExitBeginN) override;
+
/// Attempts to find the region of interest in a given record decl,
/// by either following the base classes or fields.
/// Dereferences fields up to a given recursion limit.
/// Note that \p Vec is passed by value, leading to quadratic copying cost,
/// but it's OK in practice since its length is limited to DEREFERENCE_LIMIT.
- /// \return A chain fields leading to the region of interest or None.
- const Optional<RegionVector>
+ /// \return A chain fields leading to the region of interest or std::nullopt.
+ const std::optional<RegionVector>
findRegionOfInterestInRecord(const RecordDecl *RD, ProgramStateRef State,
const MemRegion *R, const RegionVector &Vec = {},
int depth = 0);
- /// Check and lazily calculate whether the region of interest is
- /// modified in the stack frame to which \p N belongs.
- /// The calculation is cached in FramesModifyingRegion.
- bool isRegionOfInterestModifiedInFrame(const ExplodedNode *N) {
- const LocationContext *Ctx = N->getLocationContext();
- const StackFrameContext *SCtx = Ctx->getStackFrame();
- if (!FramesModifyingCalculated.count(SCtx))
- findModifyingFrames(N);
- return FramesModifyingRegion.count(SCtx);
- }
+ // Region of interest corresponds to an IVar, exiting a method
+ // which could have written into that IVar, but did not.
+ PathDiagnosticPieceRef maybeEmitNoteForObjCSelf(PathSensitiveBugReport &R,
+ const ObjCMethodCall &Call,
+ const ExplodedNode *N) final;
+
+ PathDiagnosticPieceRef maybeEmitNoteForCXXThis(PathSensitiveBugReport &R,
+ const CXXConstructorCall &Call,
+ const ExplodedNode *N) final;
- /// Write to \c FramesModifyingRegion all stack frames along
- /// the path in the current stack frame which modify \c RegionOfInterest.
- void findModifyingFrames(const ExplodedNode *N);
+ PathDiagnosticPieceRef
+ maybeEmitNoteForParameters(PathSensitiveBugReport &R, const CallEvent &Call,
+ const ExplodedNode *N) final;
/// Consume the information on the no-store stack frame in order to
/// either emit a note or suppress the report enirely.
@@ -436,22 +594,18 @@ private:
const MemRegion *MatchedRegion, StringRef FirstElement,
bool FirstIsReferenceType, unsigned IndirectionLevel);
- /// Pretty-print region \p MatchedRegion to \p os.
- /// \return Whether printing succeeded.
- bool prettyPrintRegionName(StringRef FirstElement, bool FirstIsReferenceType,
+ bool prettyPrintRegionName(const RegionVector &FieldChain,
const MemRegion *MatchedRegion,
- const RegionVector &FieldChain,
- int IndirectionLevel,
+ StringRef FirstElement, bool FirstIsReferenceType,
+ unsigned IndirectionLevel,
llvm::raw_svector_ostream &os);
- /// Print first item in the chain, return new separator.
- static StringRef prettyPrintFirstElement(StringRef FirstElement,
- bool MoreItemsExpected,
- int IndirectionLevel,
- llvm::raw_svector_ostream &os);
+ StringRef prettyPrintFirstElement(StringRef FirstElement,
+ bool MoreItemsExpected,
+ int IndirectionLevel,
+ llvm::raw_svector_ostream &os);
};
-
-} // end of anonymous namespace
+} // namespace
/// \return Whether the method declaration \p Parent
/// syntactically has a binary operation writing into the ivar \p Ivar.
@@ -478,7 +632,7 @@ static bool potentiallyWritesIntoIvar(const Decl *Parent,
if (const auto *DRE = dyn_cast<DeclRefExpr>(Base))
if (const auto *ID = dyn_cast<ImplicitParamDecl>(DRE->getDecl()))
- if (ID->getParameterKind() == ImplicitParamDecl::ObjCSelf)
+ if (ID->getParameterKind() == ImplicitParamKind::ObjCSelf)
return true;
return false;
@@ -486,50 +640,31 @@ static bool potentiallyWritesIntoIvar(const Decl *Parent,
return false;
}
-/// Get parameters associated with runtime definition in order
-/// to get the correct parameter name.
-static ArrayRef<ParmVarDecl *> getCallParameters(CallEventRef<> Call) {
- // Use runtime definition, if available.
- RuntimeDefinition RD = Call->getRuntimeDefinition();
- if (const auto *FD = dyn_cast_or_null<FunctionDecl>(RD.getDecl()))
- return FD->parameters();
- if (const auto *MD = dyn_cast_or_null<ObjCMethodDecl>(RD.getDecl()))
- return MD->parameters();
-
- return Call->parameters();
-}
-
-/// \return whether \p Ty points to a const type, or is a const reference.
-static bool isPointerToConst(QualType Ty) {
- return !Ty->getPointeeType().isNull() &&
- Ty->getPointeeType().getCanonicalType().isConstQualified();
-}
-
/// Attempts to find the region of interest in a given CXX decl,
/// by either following the base classes or fields.
/// Dereferences fields up to a given recursion limit.
/// Note that \p Vec is passed by value, leading to quadratic copying cost,
/// but it's OK in practice since its length is limited to DEREFERENCE_LIMIT.
-/// \return A chain fields leading to the region of interest or None.
-const Optional<NoStoreFuncVisitor::RegionVector>
+/// \return A chain fields leading to the region of interest or std::nullopt.
+const std::optional<NoStoreFuncVisitor::RegionVector>
NoStoreFuncVisitor::findRegionOfInterestInRecord(
const RecordDecl *RD, ProgramStateRef State, const MemRegion *R,
const NoStoreFuncVisitor::RegionVector &Vec /* = {} */,
int depth /* = 0 */) {
if (depth == DEREFERENCE_LIMIT) // Limit the recursion depth.
- return None;
+ return std::nullopt;
if (const auto *RDX = dyn_cast<CXXRecordDecl>(RD))
if (!RDX->hasDefinition())
- return None;
+ return std::nullopt;
// Recursively examine the base classes.
// Note that following base classes does not increase the recursion depth.
if (const auto *RDX = dyn_cast<CXXRecordDecl>(RD))
for (const auto &II : RDX->bases())
if (const RecordDecl *RRD = II.getType()->getAsRecordDecl())
- if (Optional<RegionVector> Out =
+ if (std::optional<RegionVector> Out =
findRegionOfInterestInRecord(RRD, State, R, Vec, depth))
return Out;
@@ -555,77 +690,75 @@ NoStoreFuncVisitor::findRegionOfInterestInRecord(
continue;
if (const RecordDecl *RRD = PT->getAsRecordDecl())
- if (Optional<RegionVector> Out =
+ if (std::optional<RegionVector> Out =
findRegionOfInterestInRecord(RRD, State, VR, VecF, depth + 1))
return Out;
}
- return None;
+ return std::nullopt;
}
PathDiagnosticPieceRef
-NoStoreFuncVisitor::VisitNode(const ExplodedNode *N, BugReporterContext &BR,
- PathSensitiveBugReport &R) {
-
- const LocationContext *Ctx = N->getLocationContext();
- const StackFrameContext *SCtx = Ctx->getStackFrame();
- ProgramStateRef State = N->getState();
- auto CallExitLoc = N->getLocationAs<CallExitBegin>();
-
- // No diagnostic if region was modified inside the frame.
- if (!CallExitLoc || isRegionOfInterestModifiedInFrame(N))
- return nullptr;
-
- CallEventRef<> Call =
- BR.getStateManager().getCallEventManager().getCaller(SCtx, State);
-
- // Region of interest corresponds to an IVar, exiting a method
- // which could have written into that IVar, but did not.
- if (const auto *MC = dyn_cast<ObjCMethodCall>(Call)) {
- if (const auto *IvarR = dyn_cast<ObjCIvarRegion>(RegionOfInterest)) {
- const MemRegion *SelfRegion = MC->getReceiverSVal().getAsRegion();
- if (RegionOfInterest->isSubRegionOf(SelfRegion) &&
- potentiallyWritesIntoIvar(Call->getRuntimeDefinition().getDecl(),
- IvarR->getDecl()))
- return maybeEmitNote(R, *Call, N, {}, SelfRegion, "self",
- /*FirstIsReferenceType=*/false, 1);
- }
+NoStoreFuncVisitor::maybeEmitNoteForObjCSelf(PathSensitiveBugReport &R,
+ const ObjCMethodCall &Call,
+ const ExplodedNode *N) {
+ if (const auto *IvarR = dyn_cast<ObjCIvarRegion>(RegionOfInterest)) {
+ const MemRegion *SelfRegion = Call.getReceiverSVal().getAsRegion();
+ if (RegionOfInterest->isSubRegionOf(SelfRegion) &&
+ potentiallyWritesIntoIvar(Call.getRuntimeDefinition().getDecl(),
+ IvarR->getDecl()))
+ return maybeEmitNote(R, Call, N, {}, SelfRegion, "self",
+ /*FirstIsReferenceType=*/false, 1);
}
+ return nullptr;
+}
- if (const auto *CCall = dyn_cast<CXXConstructorCall>(Call)) {
- const MemRegion *ThisR = CCall->getCXXThisVal().getAsRegion();
- if (RegionOfInterest->isSubRegionOf(ThisR) &&
- !CCall->getDecl()->isImplicit())
- return maybeEmitNote(R, *Call, N, {}, ThisR, "this",
- /*FirstIsReferenceType=*/false, 1);
+PathDiagnosticPieceRef
+NoStoreFuncVisitor::maybeEmitNoteForCXXThis(PathSensitiveBugReport &R,
+ const CXXConstructorCall &Call,
+ const ExplodedNode *N) {
+ const MemRegion *ThisR = Call.getCXXThisVal().getAsRegion();
+ if (RegionOfInterest->isSubRegionOf(ThisR) && !Call.getDecl()->isImplicit())
+ return maybeEmitNote(R, Call, N, {}, ThisR, "this",
+ /*FirstIsReferenceType=*/false, 1);
+
+ // Do not generate diagnostics for not modified parameters in
+ // constructors.
+ return nullptr;
+}
- // Do not generate diagnostics for not modified parameters in
- // constructors.
- return nullptr;
- }
+/// \return whether \p Ty points to a const type, or is a const reference.
+static bool isPointerToConst(QualType Ty) {
+ return !Ty->getPointeeType().isNull() &&
+ Ty->getPointeeType().getCanonicalType().isConstQualified();
+}
- ArrayRef<ParmVarDecl *> parameters = getCallParameters(Call);
- for (unsigned I = 0; I < Call->getNumArgs() && I < parameters.size(); ++I) {
- const ParmVarDecl *PVD = parameters[I];
- SVal V = Call->getArgSVal(I);
+PathDiagnosticPieceRef NoStoreFuncVisitor::maybeEmitNoteForParameters(
+ PathSensitiveBugReport &R, const CallEvent &Call, const ExplodedNode *N) {
+ ArrayRef<ParmVarDecl *> Parameters = Call.parameters();
+ for (unsigned I = 0; I < Call.getNumArgs() && I < Parameters.size(); ++I) {
+ const ParmVarDecl *PVD = Parameters[I];
+ SVal V = Call.getArgSVal(I);
bool ParamIsReferenceType = PVD->getType()->isReferenceType();
std::string ParamName = PVD->getNameAsString();
- int IndirectionLevel = 1;
+ unsigned IndirectionLevel = 1;
QualType T = PVD->getType();
while (const MemRegion *MR = V.getAsRegion()) {
if (RegionOfInterest->isSubRegionOf(MR) && !isPointerToConst(T))
- return maybeEmitNote(R, *Call, N, {}, MR, ParamName,
+ return maybeEmitNote(R, Call, N, {}, MR, ParamName,
ParamIsReferenceType, IndirectionLevel);
QualType PT = T->getPointeeType();
if (PT.isNull() || PT->isVoidType())
break;
+ ProgramStateRef State = N->getState();
+
if (const RecordDecl *RD = PT->getAsRecordDecl())
- if (Optional<RegionVector> P =
+ if (std::optional<RegionVector> P =
findRegionOfInterestInRecord(RD, State, MR))
- return maybeEmitNote(R, *Call, N, *P, RegionOfInterest, ParamName,
+ return maybeEmitNote(R, Call, N, *P, RegionOfInterest, ParamName,
ParamIsReferenceType, IndirectionLevel);
V = State->getSVal(MR, PT);
@@ -637,40 +770,11 @@ NoStoreFuncVisitor::VisitNode(const ExplodedNode *N, BugReporterContext &BR,
return nullptr;
}
-void NoStoreFuncVisitor::findModifyingFrames(const ExplodedNode *N) {
- assert(N->getLocationAs<CallExitBegin>());
- ProgramStateRef LastReturnState = N->getState();
- SVal ValueAtReturn = LastReturnState->getSVal(RegionOfInterest);
- const LocationContext *Ctx = N->getLocationContext();
- const StackFrameContext *OriginalSCtx = Ctx->getStackFrame();
-
- do {
- ProgramStateRef State = N->getState();
- auto CallExitLoc = N->getLocationAs<CallExitBegin>();
- if (CallExitLoc) {
- LastReturnState = State;
- ValueAtReturn = LastReturnState->getSVal(RegionOfInterest);
- }
-
- FramesModifyingCalculated.insert(N->getLocationContext()->getStackFrame());
-
- if (wasRegionOfInterestModifiedAt(RegionOfInterest, N, ValueAtReturn)) {
- const StackFrameContext *SCtx = N->getStackFrame();
- while (!SCtx->inTopFrame()) {
- auto p = FramesModifyingRegion.insert(SCtx);
- if (!p.second)
- break; // Frame and all its parents already inserted.
- SCtx = SCtx->getParent()->getStackFrame();
- }
- }
-
- // Stop calculation at the call to the current function.
- if (auto CE = N->getLocationAs<CallEnter>())
- if (CE->getCalleeContext() == OriginalSCtx)
- break;
-
- N = N->getFirstPred();
- } while (N);
+bool NoStoreFuncVisitor::wasModifiedBeforeCallExit(
+ const ExplodedNode *CurrN, const ExplodedNode *CallExitBeginN) {
+ return ::wasRegionOfInterestModifiedAt(
+ RegionOfInterest, CurrN,
+ CallExitBeginN->getState()->getSVal(RegionOfInterest));
}
static llvm::StringLiteral WillBeUsedForACondition =
@@ -681,27 +785,6 @@ PathDiagnosticPieceRef NoStoreFuncVisitor::maybeEmitNote(
const RegionVector &FieldChain, const MemRegion *MatchedRegion,
StringRef FirstElement, bool FirstIsReferenceType,
unsigned IndirectionLevel) {
- // Optimistically suppress uninitialized value bugs that result
- // from system headers having a chance to initialize the value
- // but failing to do so. It's too unlikely a system header's fault.
- // It's much more likely a situation in which the function has a failure
- // mode that the user decided not to check. If we want to hunt such
- // omitted checks, we should provide an explicit function-specific note
- // describing the precondition under which the function isn't supposed to
- // initialize its out-parameter, and additionally check that such
- // precondition can actually be fulfilled on the current path.
- if (Call.isInSystemHeader()) {
- // We make an exception for system header functions that have no branches.
- // Such functions unconditionally fail to initialize the variable.
- // If they call other functions that have more paths within them,
- // this suppression would still apply when we visit these inner functions.
- // One common example of a standard function that doesn't ever initialize
- // its out parameter is operator placement new; it's up to the follow-up
- // constructor (if any) to initialize the memory.
- if (!N->getStackFrame()->getCFG()->isLinear())
- R.markInvalid(getTag(), nullptr);
- return nullptr;
- }
PathDiagnosticLocation L =
PathDiagnosticLocation::create(N->getLocation(), SM);
@@ -717,8 +800,8 @@ PathDiagnosticPieceRef NoStoreFuncVisitor::maybeEmitNote(
os << "Returning without writing to '";
// Do not generate the note if failed to pretty-print.
- if (!prettyPrintRegionName(FirstElement, FirstIsReferenceType, MatchedRegion,
- FieldChain, IndirectionLevel, os))
+ if (!prettyPrintRegionName(FieldChain, MatchedRegion, FirstElement,
+ FirstIsReferenceType, IndirectionLevel, os))
return nullptr;
os << "'";
@@ -727,11 +810,11 @@ PathDiagnosticPieceRef NoStoreFuncVisitor::maybeEmitNote(
return std::make_shared<PathDiagnosticEventPiece>(L, os.str());
}
-bool NoStoreFuncVisitor::prettyPrintRegionName(StringRef FirstElement,
- bool FirstIsReferenceType,
+bool NoStoreFuncVisitor::prettyPrintRegionName(const RegionVector &FieldChain,
const MemRegion *MatchedRegion,
- const RegionVector &FieldChain,
- int IndirectionLevel,
+ StringRef FirstElement,
+ bool FirstIsReferenceType,
+ unsigned IndirectionLevel,
llvm::raw_svector_ostream &os) {
if (FirstIsReferenceType)
@@ -754,7 +837,7 @@ bool NoStoreFuncVisitor::prettyPrintRegionName(StringRef FirstElement,
// Just keep going up to the base region.
// Element regions may appear due to casts.
- if (isa<CXXBaseObjectRegion>(R) || isa<CXXTempObjectRegion>(R))
+ if (isa<CXXBaseObjectRegion, CXXTempObjectRegion>(R))
continue;
if (Sep.empty())
@@ -854,7 +937,7 @@ public:
const SVal V) {
AnalyzerOptions &Options = N->getState()->getAnalysisManager().options;
if (EnableNullFPSuppression && Options.ShouldSuppressNullReturnPaths &&
- V.getAs<Loc>())
+ isa<Loc>(V))
BR.addVisitor<MacroNullReturnSuppressionVisitor>(R->getAs<SubRegion>(),
V);
}
@@ -871,12 +954,12 @@ public:
private:
/// \return Source location of right hand side of an assignment
/// into \c RegionOfInterest, empty optional if none found.
- Optional<SourceLocation> matchAssignment(const ExplodedNode *N) {
+ std::optional<SourceLocation> matchAssignment(const ExplodedNode *N) {
const Stmt *S = N->getStmtForDiagnostics();
ProgramStateRef State = N->getState();
auto *LCtx = N->getLocationContext();
if (!S)
- return None;
+ return std::nullopt;
if (const auto *DS = dyn_cast<DeclStmt>(S)) {
if (const auto *VD = dyn_cast<VarDecl>(DS->getSingleDecl()))
@@ -891,7 +974,7 @@ private:
return RHS->getBeginLoc();
}
}
- return None;
+ return std::nullopt;
}
};
@@ -944,7 +1027,7 @@ public:
if (N->getLocationContext() != CalleeSFC)
return nullptr;
- Optional<StmtPoint> SP = N->getLocationAs<StmtPoint>();
+ std::optional<StmtPoint> SP = N->getLocationAs<StmtPoint>();
if (!SP)
return nullptr;
@@ -966,18 +1049,17 @@ public:
assert(RetE && "Tracking a return value for a void function");
// Handle cases where a reference is returned and then immediately used.
- Optional<Loc> LValue;
+ std::optional<Loc> LValue;
if (RetE->isGLValue()) {
if ((LValue = V.getAs<Loc>())) {
SVal RValue = State->getRawSVal(*LValue, RetE->getType());
- if (RValue.getAs<DefinedSVal>())
+ if (isa<DefinedSVal>(RValue))
V = RValue;
}
}
// Ignore aggregate rvalues.
- if (V.getAs<nonloc::LazyCompoundVal>() ||
- V.getAs<nonloc::CompoundVal>())
+ if (isa<nonloc::LazyCompoundVal, nonloc::CompoundVal>(V))
return nullptr;
RetE = RetE->IgnoreParenCasts();
@@ -992,7 +1074,7 @@ public:
bool WouldEventBeMeaningless = false;
if (State->isNull(V).isConstrainedTrue()) {
- if (V.getAs<Loc>()) {
+ if (isa<Loc>(V)) {
// If we have counter-suppression enabled, make sure we keep visiting
// future nodes. We want to emit a path note as well, in case
@@ -1022,10 +1104,7 @@ public:
if (N->getCFG().size() == 3)
WouldEventBeMeaningless = true;
- if (V.getAs<Loc>())
- Out << "Returning pointer";
- else
- Out << "Returning value";
+ Out << (isa<Loc>(V) ? "Returning pointer" : "Returning value");
}
}
@@ -1069,7 +1148,7 @@ public:
assert(Options.ShouldAvoidSuppressingNullArgumentPaths);
// Are we at the entry node for this call?
- Optional<CallEnter> CE = N->getLocationAs<CallEnter>();
+ std::optional<CallEnter> CE = N->getLocationAs<CallEnter>();
if (!CE)
return nullptr;
@@ -1087,7 +1166,7 @@ public:
ProgramStateRef State = N->getState();
CallEventRef<> Call = CallMgr.getCaller(CalleeSFC, State);
for (unsigned I = 0, E = Call->getNumArgs(); I != E; ++I) {
- Optional<Loc> ArgV = Call->getArgSVal(I).getAs<Loc>();
+ std::optional<Loc> ArgV = Call->getArgSVal(I).getAs<Loc>();
if (!ArgV)
continue;
@@ -1134,8 +1213,6 @@ public:
}
};
-} // end of anonymous namespace
-
//===----------------------------------------------------------------------===//
// StoreSiteFinder
//===----------------------------------------------------------------------===//
@@ -1153,7 +1230,7 @@ class StoreSiteFinder final : public TrackingBugReporterVisitor {
public:
/// \param V We're searching for the store where \c R received this value.
/// \param R The region we're tracking.
- /// \param TKind May limit the amount of notes added to the bug report.
+ /// \param Options Tracking behavior options.
/// \param OriginSFC Only adds notes when the last store happened in a
/// different stackframe to this one. Disregarded if the tracking kind
/// is thorough.
@@ -1175,6 +1252,7 @@ public:
BugReporterContext &BRC,
PathSensitiveBugReport &BR) override;
};
+} // namespace
void StoreSiteFinder::Profile(llvm::FoldingSetNodeID &ID) const {
static int tag = 0;
@@ -1188,7 +1266,7 @@ void StoreSiteFinder::Profile(llvm::FoldingSetNodeID &ID) const {
/// Returns true if \p N represents the DeclStmt declaring and initializing
/// \p VR.
static bool isInitializationOfVar(const ExplodedNode *N, const VarRegion *VR) {
- Optional<PostStmt> P = N->getLocationAs<PostStmt>();
+ std::optional<PostStmt> P = N->getLocationAs<PostStmt>();
if (!P)
return false;
@@ -1248,7 +1326,7 @@ static void showBRDiagnostics(llvm::raw_svector_ostream &OS, StoreInfo SI) {
llvm_unreachable("Unexpected store kind");
}
- if (SI.Value.getAs<loc::ConcreteInt>()) {
+ if (isa<loc::ConcreteInt>(SI.Value)) {
OS << Action << (isObjCPointer(SI.Dest) ? "nil" : "a null pointer value");
} else if (auto CVal = SI.Value.getAs<nonloc::ConcreteInt>()) {
@@ -1287,13 +1365,12 @@ static void showBRDiagnostics(llvm::raw_svector_ostream &OS, StoreInfo SI) {
static void showBRParamDiagnostics(llvm::raw_svector_ostream &OS,
StoreInfo SI) {
const auto *VR = cast<VarRegion>(SI.Dest);
- const auto *Param = cast<ParmVarDecl>(VR->getDecl());
+ const auto *D = VR->getDecl();
OS << "Passing ";
- if (SI.Value.getAs<loc::ConcreteInt>()) {
- OS << (isObjCPointer(Param) ? "nil object reference"
- : "null pointer value");
+ if (isa<loc::ConcreteInt>(SI.Value)) {
+ OS << (isObjCPointer(D) ? "nil object reference" : "null pointer value");
} else if (SI.Value.isUndef()) {
OS << "uninitialized value";
@@ -1308,12 +1385,18 @@ static void showBRParamDiagnostics(llvm::raw_svector_ostream &OS,
OS << "value";
}
- // Printed parameter indexes are 1-based, not 0-based.
- unsigned Idx = Param->getFunctionScopeIndex() + 1;
- OS << " via " << Idx << llvm::getOrdinalSuffix(Idx) << " parameter";
- if (VR->canPrintPretty()) {
- OS << " ";
- VR->printPretty(OS);
+ if (const auto *Param = dyn_cast<ParmVarDecl>(VR->getDecl())) {
+ // Printed parameter indexes are 1-based, not 0-based.
+ unsigned Idx = Param->getFunctionScopeIndex() + 1;
+ OS << " via " << Idx << llvm::getOrdinalSuffix(Idx) << " parameter";
+ if (VR->canPrintPretty()) {
+ OS << " ";
+ VR->printPretty(OS);
+ }
+ } else if (const auto *ImplParam = dyn_cast<ImplicitParamDecl>(D)) {
+ if (ImplParam->getParameterKind() == ImplicitParamKind::ObjCSelf) {
+ OS << " via implicit parameter 'self'";
+ }
}
}
@@ -1322,7 +1405,7 @@ static void showBRDefaultDiagnostics(llvm::raw_svector_ostream &OS,
StoreInfo SI) {
const bool HasSuffix = SI.Dest->canPrintPretty();
- if (SI.Value.getAs<loc::ConcreteInt>()) {
+ if (isa<loc::ConcreteInt>(SI.Value)) {
OS << (isObjCPointer(SI.Dest) ? "nil object reference stored"
: (HasSuffix ? "Null pointer value stored"
: "Storing null pointer value"));
@@ -1357,6 +1440,83 @@ static void showBRDefaultDiagnostics(llvm::raw_svector_ostream &OS,
}
}
+static bool isTrivialCopyOrMoveCtor(const CXXConstructExpr *CE) {
+ if (!CE)
+ return false;
+
+ const auto *CtorDecl = CE->getConstructor();
+
+ return CtorDecl->isCopyOrMoveConstructor() && CtorDecl->isTrivial();
+}
+
+static const Expr *tryExtractInitializerFromList(const InitListExpr *ILE,
+ const MemRegion *R) {
+
+ const auto *TVR = dyn_cast_or_null<TypedValueRegion>(R);
+
+ if (!TVR)
+ return nullptr;
+
+ const auto ITy = ILE->getType().getCanonicalType();
+
+ // Push each sub-region onto the stack.
+ std::stack<const TypedValueRegion *> TVRStack;
+ while (isa<FieldRegion>(TVR) || isa<ElementRegion>(TVR)) {
+ // We found a region that matches the type of the init list,
+ // so we assume this is the outer-most region. This can happen
+ // if the initializer list is inside a class. If our assumption
+ // is wrong, we return a nullptr in the end.
+ if (ITy == TVR->getValueType().getCanonicalType())
+ break;
+
+ TVRStack.push(TVR);
+ TVR = cast<TypedValueRegion>(TVR->getSuperRegion());
+ }
+
+ // If the type of the outer most region doesn't match the type
+ // of the ILE, we can't match the ILE and the region.
+ if (ITy != TVR->getValueType().getCanonicalType())
+ return nullptr;
+
+ const Expr *Init = ILE;
+ while (!TVRStack.empty()) {
+ TVR = TVRStack.top();
+ TVRStack.pop();
+
+ // We hit something that's not an init list before
+ // running out of regions, so we most likely failed.
+ if (!isa<InitListExpr>(Init))
+ return nullptr;
+
+ ILE = cast<InitListExpr>(Init);
+ auto NumInits = ILE->getNumInits();
+
+ if (const auto *FR = dyn_cast<FieldRegion>(TVR)) {
+ const auto *FD = FR->getDecl();
+
+ if (FD->getFieldIndex() >= NumInits)
+ return nullptr;
+
+ Init = ILE->getInit(FD->getFieldIndex());
+ } else if (const auto *ER = dyn_cast<ElementRegion>(TVR)) {
+ const auto Ind = ER->getIndex();
+
+ // If index is symbolic, we can't figure out which expression
+ // belongs to the region.
+ if (!Ind.isConstant())
+ return nullptr;
+
+ const auto IndVal = Ind.getAsInteger()->getLimitedValue();
+ if (IndVal >= NumInits)
+ return nullptr;
+
+ Init = ILE->getInit(IndVal);
+ }
+ }
+
+ return Init;
+}
+
PathDiagnosticPieceRef StoreSiteFinder::VisitNode(const ExplodedNode *Succ,
BugReporterContext &BRC,
PathSensitiveBugReport &BR) {
@@ -1378,7 +1538,8 @@ PathDiagnosticPieceRef StoreSiteFinder::VisitNode(const ExplodedNode *Succ,
// If this is a post initializer expression, initializing the region, we
// should track the initializer expression.
- if (Optional<PostInitializer> PIP = Pred->getLocationAs<PostInitializer>()) {
+ if (std::optional<PostInitializer> PIP =
+ Pred->getLocationAs<PostInitializer>()) {
const MemRegion *FieldReg = (const MemRegion *)PIP->getLocationValue();
if (FieldReg == R) {
StoreSite = Pred;
@@ -1396,25 +1557,101 @@ PathDiagnosticPieceRef StoreSiteFinder::VisitNode(const ExplodedNode *Succ,
return nullptr;
if (hasVisibleUpdate(Pred, Pred->getState()->getSVal(R), Succ, V)) {
- Optional<PostStore> PS = Succ->getLocationAs<PostStore>();
+ std::optional<PostStore> PS = Succ->getLocationAs<PostStore>();
if (!PS || PS->getLocationValue() != R)
return nullptr;
}
StoreSite = Succ;
- // If this is an assignment expression, we can track the value
- // being assigned.
- if (Optional<PostStmt> P = Succ->getLocationAs<PostStmt>())
- if (const BinaryOperator *BO = P->getStmtAs<BinaryOperator>())
+ if (std::optional<PostStmt> P = Succ->getLocationAs<PostStmt>()) {
+ // If this is an assignment expression, we can track the value
+ // being assigned.
+ if (const BinaryOperator *BO = P->getStmtAs<BinaryOperator>()) {
if (BO->isAssignmentOp())
InitE = BO->getRHS();
+ }
+ // If we have a declaration like 'S s{1,2}' that needs special
+ // handling, we handle it here.
+ else if (const auto *DS = P->getStmtAs<DeclStmt>()) {
+ const auto *Decl = DS->getSingleDecl();
+ if (isa<VarDecl>(Decl)) {
+ const auto *VD = cast<VarDecl>(Decl);
+
+ // FIXME: Here we only track the inner most region, so we lose
+ // information, but it's still better than a crash or no information
+ // at all.
+ //
+ // E.g.: The region we have is 's.s2.s3.s4.y' and we only track 'y',
+ // and throw away the rest.
+ if (const auto *ILE = dyn_cast<InitListExpr>(VD->getInit()))
+ InitE = tryExtractInitializerFromList(ILE, R);
+ }
+ } else if (const auto *CE = P->getStmtAs<CXXConstructExpr>()) {
+
+ const auto State = Succ->getState();
+
+ if (isTrivialCopyOrMoveCtor(CE) && isa<SubRegion>(R)) {
+ // Migrate the field regions from the current object to
+ // the parent object. If we track 'a.y.e' and encounter
+ // 'S a = b' then we need to track 'b.y.e'.
+
+ // Push the regions to a stack, from last to first, so
+ // considering the example above the stack will look like
+ // (bottom) 'e' -> 'y' (top).
+
+ std::stack<const SubRegion *> SRStack;
+ const SubRegion *SR = cast<SubRegion>(R);
+ while (isa<FieldRegion>(SR) || isa<ElementRegion>(SR)) {
+ SRStack.push(SR);
+ SR = cast<SubRegion>(SR->getSuperRegion());
+ }
+
+ // Get the region for the object we copied/moved from.
+ const auto *OriginEx = CE->getArg(0);
+ const auto OriginVal =
+ State->getSVal(OriginEx, Succ->getLocationContext());
+
+ // Pop the stored field regions and apply them to the origin
+ // object in the same order we had them on the copy.
+ // OriginField will evolve like 'b' -> 'b.y' -> 'b.y.e'.
+ SVal OriginField = OriginVal;
+ while (!SRStack.empty()) {
+ const auto *TopR = SRStack.top();
+ SRStack.pop();
+
+ if (const auto *FR = dyn_cast<FieldRegion>(TopR)) {
+ OriginField = State->getLValue(FR->getDecl(), OriginField);
+ } else if (const auto *ER = dyn_cast<ElementRegion>(TopR)) {
+ OriginField = State->getLValue(ER->getElementType(),
+ ER->getIndex(), OriginField);
+ } else {
+ // FIXME: handle other region type
+ }
+ }
+
+ // Track 'b.y.e'.
+ getParentTracker().track(V, OriginField.getAsRegion(), Options);
+ InitE = OriginEx;
+ }
+ }
+ // This branch can occur in cases like `Ctor() : field{ x, y } {}'.
+ else if (const auto *ILE = P->getStmtAs<InitListExpr>()) {
+ // FIXME: Here we only track the top level region, so we lose
+ // information, but it's still better than a crash or no information
+ // at all.
+ //
+ // E.g.: The region we have is 's.s2.s3.s4.y' and we only track 'y', and
+ // throw away the rest.
+ InitE = tryExtractInitializerFromList(ILE, R);
+ }
+ }
// If this is a call entry, the variable should be a parameter.
// FIXME: Handle CXXThisRegion as well. (This is not a priority because
// 'this' should never be NULL, but this visitor isn't just for NULL and
// UndefinedVal.)
- if (Optional<CallEnter> CE = Succ->getLocationAs<CallEnter>()) {
+ if (std::optional<CallEnter> CE = Succ->getLocationAs<CallEnter>()) {
if (const auto *VR = dyn_cast<VarRegion>(R)) {
if (const auto *Param = dyn_cast<ParmVarDecl>(VR->getDecl())) {
@@ -1537,7 +1774,7 @@ PathDiagnosticPieceRef StoreSiteFinder::VisitNode(const ExplodedNode *Succ,
R,
OldRegion};
- if (Optional<PostStmt> PS = StoreSite->getLocationAs<PostStmt>()) {
+ if (std::optional<PostStmt> PS = StoreSite->getLocationAs<PostStmt>()) {
const Stmt *S = PS->getStmt();
const auto *DS = dyn_cast<DeclStmt>(S);
const auto *VR = dyn_cast<VarRegion>(R);
@@ -1574,6 +1811,7 @@ PathDiagnosticPieceRef StoreSiteFinder::VisitNode(const ExplodedNode *Succ,
void TrackConstraintBRVisitor::Profile(llvm::FoldingSetNodeID &ID) const {
static int tag = 0;
ID.AddPointer(&tag);
+ ID.AddString(Message);
ID.AddBoolean(Assumption);
ID.Add(Constraint);
}
@@ -1584,8 +1822,12 @@ const char *TrackConstraintBRVisitor::getTag() {
return "TrackConstraintBRVisitor";
}
+bool TrackConstraintBRVisitor::isZeroCheck() const {
+ return !Assumption && Constraint.getAs<Loc>();
+}
+
bool TrackConstraintBRVisitor::isUnderconstrained(const ExplodedNode *N) const {
- if (IsZeroCheck)
+ if (isZeroCheck())
return N->getState()->isNull(Constraint).isUnderconstrained();
return (bool)N->getState()->assume(Constraint, !Assumption);
}
@@ -1609,32 +1851,27 @@ PathDiagnosticPieceRef TrackConstraintBRVisitor::VisitNode(
if (isUnderconstrained(PrevN)) {
IsSatisfied = true;
- // As a sanity check, make sure that the negation of the constraint
- // was infeasible in the current state. If it is feasible, we somehow
- // missed the transition point.
+ // At this point, the negation of the constraint should be infeasible. If it
+ // is feasible, make sure that the negation of the constrainti was
+ // infeasible in the current state. If it is feasible, we somehow missed
+ // the transition point.
assert(!isUnderconstrained(N));
- // We found the transition point for the constraint. We now need to
- // pretty-print the constraint. (work-in-progress)
- SmallString<64> sbuf;
- llvm::raw_svector_ostream os(sbuf);
-
- if (Constraint.getAs<Loc>()) {
- os << "Assuming pointer value is ";
- os << (Assumption ? "non-null" : "null");
- }
+ // Construct a new PathDiagnosticPiece.
+ ProgramPoint P = N->getLocation();
- if (os.str().empty())
+ // If this node already have a specialized note, it's probably better
+ // than our generic note.
+ // FIXME: This only looks for note tags, not for other ways to add a note.
+ if (isa_and_nonnull<NoteTag>(P.getTag()))
return nullptr;
- // Construct a new PathDiagnosticPiece.
- ProgramPoint P = N->getLocation();
PathDiagnosticLocation L =
PathDiagnosticLocation::create(P, BRC.getSourceManager());
if (!L.isValid())
return nullptr;
- auto X = std::make_shared<PathDiagnosticEventPiece>(L, os.str());
+ auto X = std::make_shared<PathDiagnosticEventPiece>(L, Message);
X->setTag(getTag());
return std::move(X);
}
@@ -1857,11 +2094,33 @@ TrackControlDependencyCondBRVisitor::VisitNode(const ExplodedNode *N,
return nullptr;
if (const Expr *Condition = NB->getLastCondition()) {
+
+ // If we can't retrieve a sensible condition, just bail out.
+ const Expr *InnerExpr = peelOffOuterExpr(Condition, N);
+ if (!InnerExpr)
+ return nullptr;
+
+ // If the condition was a function call, we likely won't gain much from
+ // tracking it either. Evidence suggests that it will mostly trigger in
+ // scenarios like this:
+ //
+ // void f(int *x) {
+ // x = nullptr;
+ // if (alwaysTrue()) // We don't need a whole lot of explanation
+ // // here, the function name is good enough.
+ // *x = 5;
+ // }
+ //
+ // Its easy to create a counterexample where this heuristic would make us
+ // lose valuable information, but we've never really seen one in practice.
+ if (isa<CallExpr>(InnerExpr))
+ return nullptr;
+
// Keeping track of the already tracked conditions on a visitor level
// isn't sufficient, because a new visitor is created for each tracked
// expression, hence the BugReport level set.
if (BR.addTrackedCondition(N)) {
- getParentTracker().track(Condition, N,
+ getParentTracker().track(InnerExpr, N,
{bugreporter::TrackingKind::Condition,
/*EnableNullFPSuppression=*/false});
return constructDebugPieceForTrackedCondition(Condition, N, BRC);
@@ -1876,10 +2135,8 @@ TrackControlDependencyCondBRVisitor::VisitNode(const ExplodedNode *N,
// Implementation of trackExpressionValue.
//===----------------------------------------------------------------------===//
-/// \return A subexpression of @c Ex which represents the
-/// expression-of-interest.
-static const Expr *peelOffOuterExpr(const Expr *Ex,
- const ExplodedNode *N) {
+static const Expr *peelOffOuterExpr(const Expr *Ex, const ExplodedNode *N) {
+
Ex = Ex->IgnoreParenCasts();
if (const auto *FE = dyn_cast<FullExpr>(Ex))
return peelOffOuterExpr(FE->getSubExpr(), N);
@@ -1902,7 +2159,7 @@ static const Expr *peelOffOuterExpr(const Expr *Ex,
const ExplodedNode *NI = N;
do {
ProgramPoint ProgPoint = NI->getLocation();
- if (Optional<BlockEdge> BE = ProgPoint.getAs<BlockEdge>()) {
+ if (std::optional<BlockEdge> BE = ProgPoint.getAs<BlockEdge>()) {
const CFGBlock *srcBlk = BE->getSrc();
if (const Stmt *term = srcBlk->getTerminatorStmt()) {
if (term == CO) {
@@ -1977,6 +2234,7 @@ PathDiagnosticPieceRef StoreHandler::constructNote(StoreInfo SI,
return std::make_shared<PathDiagnosticEventPiece>(L, NodeText);
}
+namespace {
class DefaultStoreHandler final : public StoreHandler {
public:
using StoreHandler::StoreHandler;
@@ -2125,8 +2383,9 @@ public:
// null.
if (V.getAsLocSymbol(/*IncludeBaseRegions=*/true))
if (LVState->isNull(V).isConstrainedTrue())
- Report.addVisitor<TrackConstraintBRVisitor>(V.castAs<DefinedSVal>(),
- false);
+ Report.addVisitor<TrackConstraintBRVisitor>(
+ V.castAs<DefinedSVal>(),
+ /*Assumption=*/false, "Assuming pointer value is null");
// Add visitor, which will suppress inline defensive checks.
if (auto DV = V.getAs<DefinedSVal>())
@@ -2174,7 +2433,8 @@ class InlinedFunctionCallHandler final : public ExpressionHandler {
do {
// If that is satisfied we found our statement as an inlined call.
- if (Optional<CallExitEnd> CEE = ExprNode->getLocationAs<CallExitEnd>())
+ if (std::optional<CallExitEnd> CEE =
+ ExprNode->getLocationAs<CallExitEnd>())
if (CEE->getCalleeContext()->getCallSite() == E)
break;
@@ -2189,7 +2449,7 @@ class InlinedFunctionCallHandler final : public ExpressionHandler {
// FIXME: This code currently bypasses the call site for the
// conservatively evaluated allocator.
if (!BypassCXXNewExprEval)
- if (Optional<StmtPoint> SP = ExprNode->getLocationAs<StmtPoint>())
+ if (std::optional<StmtPoint> SP = ExprNode->getLocationAs<StmtPoint>())
// See if we do not enter into another context.
if (SP->getStmt() == E && CurrentSFC == PredSFC)
break;
@@ -2204,7 +2464,7 @@ class InlinedFunctionCallHandler final : public ExpressionHandler {
return {};
// Finally, see if we inlined the call.
- Optional<CallExitEnd> CEE = ExprNode->getLocationAs<CallExitEnd>();
+ std::optional<CallExitEnd> CEE = ExprNode->getLocationAs<CallExitEnd>();
if (!CEE)
return {};
@@ -2218,7 +2478,7 @@ class InlinedFunctionCallHandler final : public ExpressionHandler {
// Handle cases where a reference is returned and then immediately used.
if (cast<Expr>(E)->isGLValue())
- if (Optional<Loc> LValue = RetVal.getAs<Loc>())
+ if (std::optional<Loc> LValue = RetVal.getAs<Loc>())
RetVal = State->getSVal(*LValue);
// See if the return value is NULL. If so, suppress the report.
@@ -2226,7 +2486,7 @@ class InlinedFunctionCallHandler final : public ExpressionHandler {
bool EnableNullFPSuppression = false;
if (Opts.EnableNullFPSuppression && Options.ShouldSuppressNullReturnPaths)
- if (Optional<Loc> RetLoc = RetVal.getAs<Loc>())
+ if (std::optional<Loc> RetLoc = RetVal.getAs<Loc>())
EnableNullFPSuppression = State->isNull(*RetLoc).isConstrainedTrue();
PathSensitiveBugReport &Report = getParentTracker().getReport();
@@ -2261,7 +2521,7 @@ public:
// what is written inside the pointer.
bool CanDereference = true;
if (const auto *SR = L->getRegionAs<SymbolicRegion>()) {
- if (SR->getSymbol()->getType()->getPointeeType()->isVoidType())
+ if (SR->getPointeeStaticType()->isVoidType())
CanDereference = false;
} else if (L->getRegionAs<AllocaRegion>())
CanDereference = false;
@@ -2271,7 +2531,7 @@ public:
// well. Try to use the correct type when looking up the value.
SVal RVal;
if (ExplodedGraph::isInterestingLValueExpr(Inner))
- RVal = LVState->getRawSVal(L.getValue(), Inner->getType());
+ RVal = LVState->getRawSVal(*L, Inner->getType());
else if (CanDereference)
RVal = LVState->getSVal(L->getRegion());
@@ -2289,7 +2549,7 @@ public:
Report.markInteresting(RegionRVal, Opts.Kind);
Report.addVisitor<TrackConstraintBRVisitor>(
loc::MemRegionVal(RegionRVal),
- /*assumption=*/false);
+ /*Assumption=*/false, "Assuming pointer value is null");
Result.FoundSomethingToTrack = true;
}
}
@@ -2314,6 +2574,29 @@ public:
if (!RVNode)
return {};
+ Tracker::Result CombinedResult;
+ Tracker &Parent = getParentTracker();
+
+ const auto track = [&CombinedResult, &Parent, ExprNode,
+ Opts](const Expr *Inner) {
+ CombinedResult.combineWith(Parent.track(Inner, ExprNode, Opts));
+ };
+
+ // FIXME: Initializer lists can appear in many different contexts
+ // and most of them needs a special handling. For now let's handle
+ // what we can. If the initializer list only has 1 element, we track
+ // that.
+ // This snippet even handles nesting, e.g.: int *x{{{{{y}}}}};
+ if (const auto *ILE = dyn_cast<InitListExpr>(E)) {
+ if (ILE->getNumInits() == 1) {
+ track(ILE->getInit(0));
+
+ return CombinedResult;
+ }
+
+ return {};
+ }
+
ProgramStateRef RVState = RVNode->getState();
SVal V = RVState->getSValAsScalarOrLoc(E, RVNode->getLocationContext());
const auto *BO = dyn_cast<BinaryOperator>(E);
@@ -2325,13 +2608,6 @@ public:
SVal LHSV = RVState->getSVal(BO->getLHS(), RVNode->getLocationContext());
// Track both LHS and RHS of a multiplication.
- Tracker::Result CombinedResult;
- Tracker &Parent = getParentTracker();
-
- const auto track = [&CombinedResult, &Parent, ExprNode, Opts](Expr *Inner) {
- CombinedResult.combineWith(Parent.track(Inner, ExprNode, Opts));
- };
-
if (BO->getOpcode() == BO_Mul) {
if (LHSV.isZeroConstant())
track(BO->getLHS());
@@ -2345,6 +2621,7 @@ public:
return CombinedResult;
}
};
+} // namespace
Tracker::Tracker(PathSensitiveBugReport &Report) : Report(Report) {
// Default expression handlers.
@@ -2443,7 +2720,7 @@ const Expr *NilReceiverBRVisitor::getNilReceiver(const Stmt *S,
PathDiagnosticPieceRef
NilReceiverBRVisitor::VisitNode(const ExplodedNode *N, BugReporterContext &BRC,
PathSensitiveBugReport &BR) {
- Optional<PreStmt> P = N->getLocationAs<PreStmt>();
+ std::optional<PreStmt> P = N->getLocationAs<PreStmt>();
if (!P)
return nullptr;
@@ -2507,7 +2784,7 @@ ConditionBRVisitor::VisitNodeImpl(const ExplodedNode *N,
// If an assumption was made on a branch, it should be caught
// here by looking at the state transition.
- if (Optional<BlockEdge> BE = ProgPoint.getAs<BlockEdge>()) {
+ if (std::optional<BlockEdge> BE = ProgPoint.getAs<BlockEdge>()) {
const CFGBlock *SrcBlock = BE->getSrc();
if (const Stmt *Term = SrcBlock->getTerminatorStmt()) {
// If the tag of the previous node is 'Eagerly Assume...' the current
@@ -2524,7 +2801,7 @@ ConditionBRVisitor::VisitNodeImpl(const ExplodedNode *N,
return nullptr;
}
- if (Optional<PostStmt> PS = ProgPoint.getAs<PostStmt>()) {
+ if (std::optional<PostStmt> PS = ProgPoint.getAs<PostStmt>()) {
const ProgramPointTag *CurrentNodeTag = PS->getTag();
if (CurrentNodeTag != Tags.first && CurrentNodeTag != Tags.second)
return nullptr;
@@ -2663,20 +2940,17 @@ ConditionBRVisitor::VisitTrueTest(const Expr *Cond, BugReporterContext &BRC,
Loc, TookTrue ? GenericTrueMessage : GenericFalseMessage);
}
-bool ConditionBRVisitor::patternMatch(const Expr *Ex,
- const Expr *ParentEx,
- raw_ostream &Out,
- BugReporterContext &BRC,
+bool ConditionBRVisitor::patternMatch(const Expr *Ex, const Expr *ParentEx,
+ raw_ostream &Out, BugReporterContext &BRC,
PathSensitiveBugReport &report,
const ExplodedNode *N,
- Optional<bool> &prunable,
+ std::optional<bool> &prunable,
bool IsSameFieldName) {
const Expr *OriginalExpr = Ex;
Ex = Ex->IgnoreParenCasts();
- if (isa<GNUNullExpr>(Ex) || isa<ObjCBoolLiteralExpr>(Ex) ||
- isa<CXXBoolLiteralExpr>(Ex) || isa<IntegerLiteral>(Ex) ||
- isa<FloatingLiteral>(Ex)) {
+ if (isa<GNUNullExpr, ObjCBoolLiteralExpr, CXXBoolLiteralExpr, IntegerLiteral,
+ FloatingLiteral>(Ex)) {
// Use heuristics to determine if the expression is a macro
// expanding to a literal and if so, use the macro's name.
SourceLocation BeginLoc = OriginalExpr->getBeginLoc();
@@ -2743,7 +3017,8 @@ bool ConditionBRVisitor::patternMatch(const Expr *Ex,
Out << '\''
<< Lexer::getSourceText(
CharSourceRange::getTokenRange(Ex->getSourceRange()),
- BRC.getSourceManager(), BRC.getASTContext().getLangOpts(), 0)
+ BRC.getSourceManager(), BRC.getASTContext().getLangOpts(),
+ nullptr)
<< '\'';
}
@@ -2755,7 +3030,7 @@ PathDiagnosticPieceRef ConditionBRVisitor::VisitTrueTest(
PathSensitiveBugReport &R, const ExplodedNode *N, bool TookTrue,
bool IsAssuming) {
bool shouldInvert = false;
- Optional<bool> shouldPrune;
+ std::optional<bool> shouldPrune;
// Check if the field name of the MemberExprs is ambiguous. Example:
// " 'a.d' is equal to 'h.d' " in 'test/Analysis/null-deref-path-notes.cpp'.
@@ -2865,8 +3140,8 @@ PathDiagnosticPieceRef ConditionBRVisitor::VisitTrueTest(
PathDiagnosticLocation Loc(Cond, SM, LCtx);
auto event = std::make_shared<PathDiagnosticEventPiece>(Loc, Message);
- if (shouldPrune.hasValue())
- event->setPrunable(shouldPrune.getValue());
+ if (shouldPrune)
+ event->setPrunable(*shouldPrune);
return event;
}
@@ -2989,20 +3264,20 @@ bool ConditionBRVisitor::printValue(const Expr *CondVarExpr, raw_ostream &Out,
if (!Ty->isIntegralOrEnumerationType())
return false;
- Optional<const llvm::APSInt *> IntValue;
+ std::optional<const llvm::APSInt *> IntValue;
if (!IsAssuming)
IntValue = getConcreteIntegerValue(CondVarExpr, N);
- if (IsAssuming || !IntValue.hasValue()) {
+ if (IsAssuming || !IntValue) {
if (Ty->isBooleanType())
Out << (TookTrue ? "true" : "false");
else
Out << (TookTrue ? "not equal to 0" : "0");
} else {
if (Ty->isBooleanType())
- Out << (IntValue.getValue()->getBoolValue() ? "true" : "false");
+ Out << ((*IntValue)->getBoolValue() ? "true" : "false");
else
- Out << *IntValue.getValue();
+ Out << **IntValue;
}
return true;
@@ -3097,7 +3372,7 @@ void LikelyFalsePositiveSuppressionBRVisitor::finalizeVisitor(
FullSourceLoc Loc = BR.getLocation().asLocation();
while (Loc.isMacroID()) {
Loc = Loc.getSpellingLoc();
- if (SM.getFilename(Loc).endswith("sys/queue.h")) {
+ if (SM.getFilename(Loc).ends_with("sys/queue.h")) {
BR.markInvalid(getTag(), nullptr);
return;
}
@@ -3115,7 +3390,7 @@ UndefOrNullArgVisitor::VisitNode(const ExplodedNode *N, BugReporterContext &BRC,
ProgramPoint ProgLoc = N->getLocation();
// We are only interested in visiting CallEnter nodes.
- Optional<CallEnter> CEnter = ProgLoc.getAs<CallEnter>();
+ std::optional<CallEnter> CEnter = ProgLoc.getAs<CallEnter>();
if (!CEnter)
return nullptr;
@@ -3194,11 +3469,11 @@ void FalsePositiveRefutationBRVisitor::finalizeVisitor(
}
// And check for satisfiability
- Optional<bool> IsSAT = RefutationSolver->check();
- if (!IsSAT.hasValue())
+ std::optional<bool> IsSAT = RefutationSolver->check();
+ if (!IsSAT)
return;
- if (!IsSAT.getValue())
+ if (!*IsSAT)
BR.markInvalid("Infeasible constraints", EndPathNode->getLocationContext());
}
@@ -3253,7 +3528,7 @@ PathDiagnosticPieceRef TagVisitor::VisitNode(const ExplodedNode *N,
if (!T)
return nullptr;
- if (Optional<std::string> Msg = T->generateMessage(BRC, R)) {
+ if (std::optional<std::string> Msg = T->generateMessage(BRC, R)) {
PathDiagnosticLocation Loc =
PathDiagnosticLocation::create(PP, BRC.getSourceManager());
auto Piece = std::make_shared<PathDiagnosticEventPiece>(Loc, *Msg);
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugSuppression.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugSuppression.cpp
new file mode 100644
index 000000000000..b5991e47a538
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugSuppression.cpp
@@ -0,0 +1,169 @@
+//===- BugSuppression.cpp - Suppression interface -------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/BugReporter/BugSuppression.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+using Ranges = llvm::SmallVectorImpl<SourceRange>;
+
+inline bool hasSuppression(const Decl *D) {
+ // FIXME: Implement diagnostic identifier arguments
+ // (checker names, "hashtags").
+ if (const auto *Suppression = D->getAttr<SuppressAttr>())
+ return !Suppression->isGSL() &&
+ (Suppression->diagnosticIdentifiers().empty());
+ return false;
+}
+inline bool hasSuppression(const AttributedStmt *S) {
+ // FIXME: Implement diagnostic identifier arguments
+ // (checker names, "hashtags").
+ return llvm::any_of(S->getAttrs(), [](const Attr *A) {
+ const auto *Suppression = dyn_cast<SuppressAttr>(A);
+ return Suppression && !Suppression->isGSL() &&
+ (Suppression->diagnosticIdentifiers().empty());
+ });
+}
+
+template <class NodeType> inline SourceRange getRange(const NodeType *Node) {
+ return Node->getSourceRange();
+}
+template <> inline SourceRange getRange(const AttributedStmt *S) {
+ // Begin location for attributed statement node seems to be ALWAYS invalid.
+ //
+ // It is unlikely that we ever report any warnings on suppression
+ // attribute itself, but even if we do, we wouldn't want that warning
+ // to be suppressed by that same attribute.
+ //
+ // Long story short, we can use inner statement and it's not going to break
+ // anything.
+ return getRange(S->getSubStmt());
+}
+
+inline bool isLessOrEqual(SourceLocation LHS, SourceLocation RHS,
+ const SourceManager &SM) {
+ // SourceManager::isBeforeInTranslationUnit tests for strict
+ // inequality, when we need a non-strict comparison (bug
+ // can be reported directly on the annotated note).
+ // For this reason, we use the following equivalence:
+ //
+ // A <= B <==> !(B < A)
+ //
+ return !SM.isBeforeInTranslationUnit(RHS, LHS);
+}
+
+inline bool fullyContains(SourceRange Larger, SourceRange Smaller,
+ const SourceManager &SM) {
+ // Essentially this means:
+ //
+ // Larger.fullyContains(Smaller)
+ //
+ // However, that method has a very trivial implementation and couldn't
+ // compare regular locations and locations from macro expansions.
+ // We could've converted everything into regular locations as a solution,
+ // but the following solution seems to be the most bulletproof.
+ return isLessOrEqual(Larger.getBegin(), Smaller.getBegin(), SM) &&
+ isLessOrEqual(Smaller.getEnd(), Larger.getEnd(), SM);
+}
+
+class CacheInitializer : public RecursiveASTVisitor<CacheInitializer> {
+public:
+ static void initialize(const Decl *D, Ranges &ToInit) {
+ CacheInitializer(ToInit).TraverseDecl(const_cast<Decl *>(D));
+ }
+
+ bool VisitVarDecl(VarDecl *VD) {
+ // Bug location could be somewhere in the init value of
+ // a freshly declared variable. Even though it looks like the
+ // user applied attribute to a statement, it will apply to a
+ // variable declaration, and this is where we check for it.
+ return VisitAttributedNode(VD);
+ }
+
+ bool VisitAttributedStmt(AttributedStmt *AS) {
+ // When we apply attributes to statements, it actually creates
+ // a wrapper statement that only contains attributes and the wrapped
+ // statement.
+ return VisitAttributedNode(AS);
+ }
+
+private:
+ template <class NodeType> bool VisitAttributedNode(NodeType *Node) {
+ if (hasSuppression(Node)) {
+ // TODO: In the future, when we come up with good stable IDs for checkers
+ // we can return a list of kinds to ignore, or all if no arguments
+ // were provided.
+ addRange(getRange(Node));
+ }
+ // We should keep traversing AST.
+ return true;
+ }
+
+ void addRange(SourceRange R) {
+ if (R.isValid()) {
+ Result.push_back(R);
+ }
+ }
+
+ CacheInitializer(Ranges &R) : Result(R) {}
+ Ranges &Result;
+};
+
+} // end anonymous namespace
+
+// TODO: Introduce stable IDs for checkers and check for those here
+// to be more specific. Attribute without arguments should still
+// be considered as "suppress all".
+// It is already much finer granularity than what we have now
+// (i.e. removing the whole function from the analysis).
+bool BugSuppression::isSuppressed(const BugReport &R) {
+ PathDiagnosticLocation Location = R.getLocation();
+ PathDiagnosticLocation UniqueingLocation = R.getUniqueingLocation();
+ const Decl *DeclWithIssue = R.getDeclWithIssue();
+
+ return isSuppressed(Location, DeclWithIssue, {}) ||
+ isSuppressed(UniqueingLocation, DeclWithIssue, {});
+}
+
+bool BugSuppression::isSuppressed(const PathDiagnosticLocation &Location,
+ const Decl *DeclWithIssue,
+ DiagnosticIdentifierList Hashtags) {
+ if (!Location.isValid() || DeclWithIssue == nullptr)
+ return false;
+
+ // While some warnings are attached to AST nodes (mostly path-sensitive
+ // checks), others are simply associated with a plain source location
+ // or range. Figuring out the node based on locations can be tricky,
+ // so instead, we traverse the whole body of the declaration and gather
+ // information on ALL suppressions. After that we can simply check if
+ // any of those suppressions affect the warning in question.
+ //
+ // Traversing AST of a function is not a heavy operation, but for
+ // large functions with a lot of bugs it can make a dent in performance.
+ // In order to avoid this scenario, we cache traversal results.
+ auto InsertionResult = CachedSuppressionLocations.insert(
+ std::make_pair(DeclWithIssue, CachedRanges{}));
+ Ranges &SuppressionRanges = InsertionResult.first->second;
+ if (InsertionResult.second) {
+ // We haven't checked this declaration for suppressions yet!
+ CacheInitializer::initialize(DeclWithIssue, SuppressionRanges);
+ }
+
+ SourceRange BugRange = Location.asRange();
+ const SourceManager &SM = Location.getManager();
+
+ return llvm::any_of(SuppressionRanges,
+ [BugRange, &SM](SourceRange Suppression) {
+ return fullyContains(Suppression, BugRange, SM);
+ });
+}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CallDescription.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CallDescription.cpp
new file mode 100644
index 000000000000..94b2fde0a6f3
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CallDescription.cpp
@@ -0,0 +1,169 @@
+//===- CallDescription.cpp - function/method call matching --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file This file defines a generic mechanism for matching for function and
+/// method calls of C, C++, and Objective-C languages. Instances of these
+/// classes are frequently used together with the CallEvent classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h"
+#include "clang/AST/Decl.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "llvm/ADT/ArrayRef.h"
+#include <iterator>
+#include <optional>
+
+using namespace llvm;
+using namespace clang;
+
+using MaybeCount = std::optional<unsigned>;
+
+// A constructor helper.
+static MaybeCount readRequiredParams(MaybeCount RequiredArgs,
+ MaybeCount RequiredParams) {
+ if (RequiredParams)
+ return RequiredParams;
+ if (RequiredArgs)
+ return RequiredArgs;
+ return std::nullopt;
+}
+
+ento::CallDescription::CallDescription(CallDescriptionFlags Flags,
+ ArrayRef<StringRef> QualifiedName,
+ MaybeCount RequiredArgs /*= None*/,
+ MaybeCount RequiredParams /*= None*/)
+ : RequiredArgs(RequiredArgs),
+ RequiredParams(readRequiredParams(RequiredArgs, RequiredParams)),
+ Flags(Flags) {
+ assert(!QualifiedName.empty());
+ this->QualifiedName.reserve(QualifiedName.size());
+ llvm::transform(QualifiedName, std::back_inserter(this->QualifiedName),
+ [](StringRef From) { return From.str(); });
+}
+
+/// Construct a CallDescription with default flags.
+ento::CallDescription::CallDescription(ArrayRef<StringRef> QualifiedName,
+ MaybeCount RequiredArgs /*= None*/,
+ MaybeCount RequiredParams /*= None*/)
+ : CallDescription(CDF_None, QualifiedName, RequiredArgs, RequiredParams) {}
+
+bool ento::CallDescription::matches(const CallEvent &Call) const {
+ // FIXME: Add ObjC Message support.
+ if (Call.getKind() == CE_ObjCMessage)
+ return false;
+
+ const auto *FD = dyn_cast_or_null<FunctionDecl>(Call.getDecl());
+ if (!FD)
+ return false;
+
+ return matchesImpl(FD, Call.getNumArgs(), Call.parameters().size());
+}
+
+bool ento::CallDescription::matchesAsWritten(const CallExpr &CE) const {
+ const auto *FD = dyn_cast_or_null<FunctionDecl>(CE.getCalleeDecl());
+ if (!FD)
+ return false;
+
+ return matchesImpl(FD, CE.getNumArgs(), FD->param_size());
+}
+
+bool ento::CallDescription::matchesImpl(const FunctionDecl *Callee,
+ size_t ArgCount,
+ size_t ParamCount) const {
+ const auto *FD = Callee;
+ if (!FD)
+ return false;
+
+ if (Flags & CDF_MaybeBuiltin) {
+ return CheckerContext::isCLibraryFunction(FD, getFunctionName()) &&
+ (!RequiredArgs || *RequiredArgs <= ArgCount) &&
+ (!RequiredParams || *RequiredParams <= ParamCount);
+ }
+
+ if (!II) {
+ II = &FD->getASTContext().Idents.get(getFunctionName());
+ }
+
+ const auto MatchNameOnly = [](const CallDescription &CD,
+ const NamedDecl *ND) -> bool {
+ DeclarationName Name = ND->getDeclName();
+ if (const auto *II = Name.getAsIdentifierInfo())
+ return II == *CD.II; // Fast case.
+
+ // Fallback to the slow stringification and comparison for:
+ // C++ overloaded operators, constructors, destructors, etc.
+ // FIXME This comparison is way SLOWER than comparing pointers.
+ // At some point in the future, we should compare FunctionDecl pointers.
+ return Name.getAsString() == CD.getFunctionName();
+ };
+
+ const auto ExactMatchArgAndParamCounts =
+ [](size_t ArgCount, size_t ParamCount,
+ const CallDescription &CD) -> bool {
+ const bool ArgsMatch = !CD.RequiredArgs || *CD.RequiredArgs == ArgCount;
+ const bool ParamsMatch =
+ !CD.RequiredParams || *CD.RequiredParams == ParamCount;
+ return ArgsMatch && ParamsMatch;
+ };
+
+ const auto MatchQualifiedNameParts = [](const CallDescription &CD,
+ const Decl *D) -> bool {
+ const auto FindNextNamespaceOrRecord =
+ [](const DeclContext *Ctx) -> const DeclContext * {
+ while (Ctx && !isa<NamespaceDecl, RecordDecl>(Ctx))
+ Ctx = Ctx->getParent();
+ return Ctx;
+ };
+
+ auto QualifierPartsIt = CD.begin_qualified_name_parts();
+ const auto QualifierPartsEndIt = CD.end_qualified_name_parts();
+
+ // Match namespace and record names. Skip unrelated names if they don't
+ // match.
+ const DeclContext *Ctx = FindNextNamespaceOrRecord(D->getDeclContext());
+ for (; Ctx && QualifierPartsIt != QualifierPartsEndIt;
+ Ctx = FindNextNamespaceOrRecord(Ctx->getParent())) {
+ // If not matched just continue and try matching for the next one.
+ if (cast<NamedDecl>(Ctx)->getName() != *QualifierPartsIt)
+ continue;
+ ++QualifierPartsIt;
+ }
+
+ // We matched if we consumed all expected qualifier segments.
+ return QualifierPartsIt == QualifierPartsEndIt;
+ };
+
+ // Let's start matching...
+ if (!ExactMatchArgAndParamCounts(ArgCount, ParamCount, *this))
+ return false;
+
+ if (!MatchNameOnly(*this, FD))
+ return false;
+
+ if (!hasQualifiedNameParts())
+ return true;
+
+ return MatchQualifiedNameParts(*this, FD);
+}
+
+ento::CallDescriptionSet::CallDescriptionSet(
+ std::initializer_list<CallDescription> &&List) {
+ Impl.LinearMap.reserve(List.size());
+ for (const CallDescription &CD : List)
+ Impl.LinearMap.push_back({CD, /*unused*/ true});
+}
+
+bool ento::CallDescriptionSet::contains(const CallEvent &Call) const {
+ return static_cast<bool>(Impl.lookup(Call));
+}
+
+bool ento::CallDescriptionSet::containsAsWritten(const CallExpr &CE) const {
+ return static_cast<bool>(Impl.lookupAsWritten(CE));
+}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CallEvent.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
index 3785f498414f..bc14aea27f67 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
@@ -36,6 +36,7 @@
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/Specifiers.h"
#include "clang/CrossTU/CrossTranslationUnit.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicType.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicTypeInfo.h"
@@ -48,8 +49,6 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/ImmutableList.h"
-#include "llvm/ADT/None.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
@@ -61,6 +60,7 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
+#include <optional>
#include <utility>
#define DEBUG_TYPE "static-analyzer-call-event"
@@ -73,26 +73,7 @@ QualType CallEvent::getResultType() const {
const Expr *E = getOriginExpr();
if (!E)
return Ctx.VoidTy;
- assert(E);
-
- QualType ResultTy = E->getType();
-
- // A function that returns a reference to 'int' will have a result type
- // of simply 'int'. Check the origin expr's value kind to recover the
- // proper type.
- switch (E->getValueKind()) {
- case VK_LValue:
- ResultTy = Ctx.getLValueReferenceType(ResultTy);
- break;
- case VK_XValue:
- ResultTy = Ctx.getRValueReferenceType(ResultTy);
- break;
- case VK_PRValue:
- // No adjustment is necessary.
- break;
- }
-
- return ResultTy;
+ return Ctx.getReferenceQualifiedType(E);
}
static bool isCallback(QualType T) {
@@ -306,6 +287,7 @@ ProgramStateRef CallEvent::invalidateRegions(unsigned BlockCount,
ProgramPoint CallEvent::getProgramPoint(bool IsPreVisit,
const ProgramPointTag *Tag) const {
+
if (const Expr *E = getOriginExpr()) {
if (IsPreVisit)
return PreStmt(E, getLocationContext(), Tag);
@@ -314,69 +296,13 @@ ProgramPoint CallEvent::getProgramPoint(bool IsPreVisit,
const Decl *D = getDecl();
assert(D && "Cannot get a program point without a statement or decl");
+ assert(ElemRef.getParent() &&
+ "Cannot get a program point without a CFGElementRef");
SourceLocation Loc = getSourceRange().getBegin();
if (IsPreVisit)
- return PreImplicitCall(D, Loc, getLocationContext(), Tag);
- return PostImplicitCall(D, Loc, getLocationContext(), Tag);
-}
-
-bool CallEvent::isCalled(const CallDescription &CD) const {
- // FIXME: Add ObjC Message support.
- if (getKind() == CE_ObjCMessage)
- return false;
-
- const IdentifierInfo *II = getCalleeIdentifier();
- if (!II)
- return false;
- const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(getDecl());
- if (!FD)
- return false;
-
- if (CD.Flags & CDF_MaybeBuiltin) {
- return CheckerContext::isCLibraryFunction(FD, CD.getFunctionName()) &&
- (!CD.RequiredArgs || CD.RequiredArgs <= getNumArgs()) &&
- (!CD.RequiredParams || CD.RequiredParams <= parameters().size());
- }
-
- if (!CD.IsLookupDone) {
- CD.IsLookupDone = true;
- CD.II = &getState()->getStateManager().getContext().Idents.get(
- CD.getFunctionName());
- }
-
- if (II != CD.II)
- return false;
-
- // If CallDescription provides prefix names, use them to improve matching
- // accuracy.
- if (CD.QualifiedName.size() > 1 && FD) {
- const DeclContext *Ctx = FD->getDeclContext();
- // See if we'll be able to match them all.
- size_t NumUnmatched = CD.QualifiedName.size() - 1;
- for (; Ctx && isa<NamedDecl>(Ctx); Ctx = Ctx->getParent()) {
- if (NumUnmatched == 0)
- break;
-
- if (const auto *ND = dyn_cast<NamespaceDecl>(Ctx)) {
- if (ND->getName() == CD.QualifiedName[NumUnmatched - 1])
- --NumUnmatched;
- continue;
- }
-
- if (const auto *RD = dyn_cast<RecordDecl>(Ctx)) {
- if (RD->getName() == CD.QualifiedName[NumUnmatched - 1])
- --NumUnmatched;
- continue;
- }
- }
-
- if (NumUnmatched > 0)
- return false;
- }
-
- return (!CD.RequiredArgs || CD.RequiredArgs == getNumArgs()) &&
- (!CD.RequiredParams || CD.RequiredParams == parameters().size());
+ return PreImplicitCall(D, Loc, getLocationContext(), ElemRef, Tag);
+ return PostImplicitCall(D, Loc, getLocationContext(), ElemRef, Tag);
}
SVal CallEvent::getArgSVal(unsigned Index) const {
@@ -406,7 +332,6 @@ void CallEvent::dump(raw_ostream &Out) const {
ASTContext &Ctx = getState()->getStateManager().getContext();
if (const Expr *E = getOriginExpr()) {
E->printPretty(Out, nullptr, Ctx.getPrintingPolicy());
- Out << "\n";
return;
}
@@ -420,9 +345,7 @@ void CallEvent::dump(raw_ostream &Out) const {
}
bool CallEvent::isCallStmt(const Stmt *S) {
- return isa<CallExpr>(S) || isa<ObjCMessageExpr>(S)
- || isa<CXXConstructExpr>(S)
- || isa<CXXNewExpr>(S);
+ return isa<CallExpr, ObjCMessageExpr, CXXConstructExpr, CXXNewExpr>(S);
}
QualType CallEvent::getDeclaredResultType(const Decl *D) {
@@ -503,6 +426,38 @@ static SVal processArgument(SVal Value, const Expr *ArgumentExpr,
return Value;
}
+/// Cast the argument value to the type of the parameter at the function
+/// declaration.
+/// Returns the argument value if it didn't need a cast.
+/// Or returns the cast argument if it needed a cast.
+/// Or returns 'Unknown' if it would need a cast but the callsite and the
+/// runtime definition don't match in terms of argument and parameter count.
+static SVal castArgToParamTypeIfNeeded(const CallEvent &Call, unsigned ArgIdx,
+ SVal ArgVal, SValBuilder &SVB) {
+ const FunctionDecl *RTDecl =
+ Call.getRuntimeDefinition().getDecl()->getAsFunction();
+ const auto *CallExprDecl = dyn_cast_or_null<FunctionDecl>(Call.getDecl());
+
+ if (!RTDecl || !CallExprDecl)
+ return ArgVal;
+
+ // The function decl of the Call (in the AST) will not have any parameter
+ // declarations, if it was 'only' declared without a prototype. However, the
+ // engine will find the appropriate runtime definition - basically a
+ // redeclaration, which has a function body (and a function prototype).
+ if (CallExprDecl->hasPrototype() || !RTDecl->hasPrototype())
+ return ArgVal;
+
+ // Only do this cast if the number arguments at the callsite matches with
+ // the parameters at the runtime definition.
+ if (Call.getNumArgs() != RTDecl->getNumParams())
+ return UnknownVal();
+
+ const Expr *ArgExpr = Call.getArgExpr(ArgIdx);
+ const ParmVarDecl *Param = RTDecl->getParamDecl(ArgIdx);
+ return SVB.evalCast(ArgVal, Param->getType(), ArgExpr->getType());
+}
+
static void addParameterValuesToBindings(const StackFrameContext *CalleeCtx,
CallEvent::BindingsTy &Bindings,
SValBuilder &SVB,
@@ -528,12 +483,18 @@ static void addParameterValuesToBindings(const StackFrameContext *CalleeCtx,
// which makes getArgSVal() fail and return UnknownVal.
SVal ArgVal = Call.getArgSVal(Idx);
const Expr *ArgExpr = Call.getArgExpr(Idx);
- if (!ArgVal.isUnknown()) {
- Loc ParamLoc = SVB.makeLoc(
- MRMgr.getParamVarRegion(Call.getOriginExpr(), Idx, CalleeCtx));
- Bindings.push_back(
- std::make_pair(ParamLoc, processArgument(ArgVal, ArgExpr, *I, SVB)));
- }
+
+ if (ArgVal.isUnknown())
+ continue;
+
+ // Cast the argument value to match the type of the parameter in some
+ // edge-cases.
+ ArgVal = castArgToParamTypeIfNeeded(Call, Idx, ArgVal, SVB);
+
+ Loc ParamLoc = SVB.makeLoc(
+ MRMgr.getParamVarRegion(Call.getOriginExpr(), Idx, CalleeCtx));
+ Bindings.push_back(
+ std::make_pair(ParamLoc, processArgument(ArgVal, ArgExpr, *I, SVB)));
}
// FIXME: Variadic arguments are not handled at all right now.
@@ -556,24 +517,43 @@ const ConstructionContext *CallEvent::getConstructionContext() const {
return nullptr;
}
-Optional<SVal>
-CallEvent::getReturnValueUnderConstruction() const {
+const CallEventRef<> CallEvent::getCaller() const {
+ const auto *CallLocationContext = this->getLocationContext();
+ if (!CallLocationContext || CallLocationContext->inTopFrame())
+ return nullptr;
+
+ const auto *CallStackFrameContext = CallLocationContext->getStackFrame();
+ if (!CallStackFrameContext)
+ return nullptr;
+
+ CallEventManager &CEMgr = State->getStateManager().getCallEventManager();
+ return CEMgr.getCaller(CallStackFrameContext, State);
+}
+
+bool CallEvent::isCalledFromSystemHeader() const {
+ if (const CallEventRef<> Caller = getCaller())
+ return Caller->isInSystemHeader();
+
+ return false;
+}
+
+std::optional<SVal> CallEvent::getReturnValueUnderConstruction() const {
const auto *CC = getConstructionContext();
if (!CC)
- return None;
+ return std::nullopt;
EvalCallOptions CallOpts;
ExprEngine &Engine = getState()->getStateManager().getOwningEngine();
- SVal RetVal =
- Engine.computeObjectUnderConstruction(getOriginExpr(), getState(),
- getLocationContext(), CC, CallOpts);
+ SVal RetVal = Engine.computeObjectUnderConstruction(
+ getOriginExpr(), getState(), &Engine.getBuilderContext(),
+ getLocationContext(), CC, CallOpts);
return RetVal;
}
ArrayRef<ParmVarDecl*> AnyFunctionCall::parameters() const {
const FunctionDecl *D = getDecl();
if (!D)
- return None;
+ return std::nullopt;
return D->parameters();
}
@@ -594,20 +574,28 @@ RuntimeDefinition AnyFunctionCall::getRuntimeDefinition() const {
llvm::dbgs() << "Using autosynthesized body for " << FD->getName()
<< "\n";
});
- if (Body) {
- const Decl* Decl = AD->getDecl();
- return RuntimeDefinition(Decl);
- }
ExprEngine &Engine = getState()->getStateManager().getOwningEngine();
+ cross_tu::CrossTranslationUnitContext &CTUCtx =
+ *Engine.getCrossTranslationUnitContext();
+
AnalyzerOptions &Opts = Engine.getAnalysisManager().options;
+ if (Body) {
+ const Decl* Decl = AD->getDecl();
+ if (Opts.IsNaiveCTUEnabled && CTUCtx.isImportedAsNew(Decl)) {
+ // A newly created definition, but we had error(s) during the import.
+ if (CTUCtx.hasError(Decl))
+ return {};
+ return RuntimeDefinition(Decl, /*Foreign=*/true);
+ }
+ return RuntimeDefinition(Decl, /*Foreign=*/false);
+ }
+
// Try to get CTU definition only if CTUDir is provided.
if (!Opts.IsNaiveCTUEnabled)
return {};
- cross_tu::CrossTranslationUnitContext &CTUCtx =
- *Engine.getCrossTranslationUnitContext();
llvm::Expected<const FunctionDecl *> CTUDeclOrError =
CTUCtx.getCrossTUDefinition(FD, Opts.CTUDir, Opts.CTUIndexName,
Opts.DisplayCTUProgress);
@@ -620,7 +608,7 @@ RuntimeDefinition AnyFunctionCall::getRuntimeDefinition() const {
return {};
}
- return RuntimeDefinition(*CTUDeclOrError);
+ return RuntimeDefinition(*CTUDeclOrError, /*Foreign=*/true);
}
void AnyFunctionCall::getInitialStackFrameContents(
@@ -671,17 +659,17 @@ bool AnyFunctionCall::argumentsMayEscape() const {
// - CoreFoundation functions that end with "NoCopy" can free a passed-in
// buffer even if it is const.
- if (FName.endswith("NoCopy"))
+ if (FName.ends_with("NoCopy"))
return true;
// - NSXXInsertXX, for example NSMapInsertIfAbsent, since they can
// be deallocated by NSMapRemove.
- if (FName.startswith("NS") && (FName.find("Insert") != StringRef::npos))
+ if (FName.starts_with("NS") && FName.contains("Insert"))
return true;
// - Many CF containers allow objects to escape through custom
// allocators/deallocators upon container construction. (PR12101)
- if (FName.startswith("CF") || FName.startswith("CG")) {
+ if (FName.starts_with("CF") || FName.starts_with("CG")) {
return StrInStrNoCase(FName, "InsertValue") != StringRef::npos ||
StrInStrNoCase(FName, "AddValue") != StringRef::npos ||
StrInStrNoCase(FName, "SetValue") != StringRef::npos ||
@@ -747,11 +735,15 @@ void CXXInstanceCall::getExtraInvalidatedValues(
SVal CXXInstanceCall::getCXXThisVal() const {
const Expr *Base = getCXXThisExpr();
// FIXME: This doesn't handle an overloaded ->* operator.
- if (!Base)
- return UnknownVal();
+ SVal ThisVal = Base ? getSVal(Base) : UnknownVal();
- SVal ThisVal = getSVal(Base);
- assert(ThisVal.isUnknownOrUndef() || ThisVal.getAs<Loc>());
+ if (isa<NonLoc>(ThisVal)) {
+ SValBuilder &SVB = getState()->getStateManager().getSValBuilder();
+ QualType OriginalTy = ThisVal.getType(SVB.getContext());
+ return SVB.evalCast(ThisVal, Base->getType(), OriginalTy);
+ }
+
+ assert(ThisVal.isUnknownOrUndef() || isa<Loc>(ThisVal));
return ThisVal;
}
@@ -797,8 +789,9 @@ RuntimeDefinition CXXInstanceCall::getRuntimeDefinition() const {
// the static type. However, because we currently don't update
// DynamicTypeInfo when an object is cast, we can't actually be sure the
// DynamicTypeInfo is up to date. This assert should be re-enabled once
- // this is fixed. <rdar://problem/12287087>
- //assert(!MD->getParent()->isDerivedFrom(RD) && "Bad DynamicTypeInfo");
+ // this is fixed.
+ //
+ // assert(!MD->getParent()->isDerivedFrom(RD) && "Bad DynamicTypeInfo");
return {};
}
@@ -841,9 +834,9 @@ void CXXInstanceCall::getInitialStackFrameContents(
QualType Ty = Ctx.getPointerType(Ctx.getRecordType(Class));
// FIXME: CallEvent maybe shouldn't be directly accessing StoreManager.
- bool Failed;
- ThisVal = StateMgr.getStoreManager().attemptDownCast(ThisVal, Ty, Failed);
- if (Failed) {
+ std::optional<SVal> V =
+ StateMgr.getStoreManager().evalBaseToDerived(ThisVal, Ty);
+ if (!V) {
// We might have suffered some sort of placement new earlier, so
// we're constructing in a completely unexpected storage.
// Fall back to a generic pointer cast for this-value.
@@ -851,7 +844,8 @@ void CXXInstanceCall::getInitialStackFrameContents(
const CXXRecordDecl *StaticClass = StaticMD->getParent();
QualType StaticTy = Ctx.getPointerType(Ctx.getRecordType(StaticClass));
ThisVal = SVB.evalCast(ThisVal, Ty, StaticTy);
- }
+ } else
+ ThisVal = *V;
}
if (!ThisVal.isUnknown())
@@ -889,7 +883,7 @@ const BlockDataRegion *BlockCall::getBlockRegion() const {
ArrayRef<ParmVarDecl*> BlockCall::parameters() const {
const BlockDecl *D = getDecl();
if (!D)
- return None;
+ return std::nullopt;
return D->parameters();
}
@@ -978,7 +972,7 @@ RuntimeDefinition CXXDestructorCall::getRuntimeDefinition() const {
ArrayRef<ParmVarDecl*> ObjCMethodCall::parameters() const {
const ObjCMethodDecl *D = getDecl();
if (!D)
- return None;
+ return std::nullopt;
return D->parameters();
}
@@ -1058,12 +1052,12 @@ const PseudoObjectExpr *ObjCMethodCall::getContainingPseudoObjectExpr() const {
static const Expr *
getSyntacticFromForPseudoObjectExpr(const PseudoObjectExpr *POE) {
- const Expr *Syntactic = POE->getSyntacticForm();
+ const Expr *Syntactic = POE->getSyntacticForm()->IgnoreParens();
// This handles the funny case of assigning to the result of a getter.
// This can happen if the getter returns a non-const reference.
if (const auto *BO = dyn_cast<BinaryOperator>(Syntactic))
- Syntactic = BO->getLHS();
+ Syntactic = BO->getLHS()->IgnoreParens();
return Syntactic;
}
@@ -1194,7 +1188,7 @@ static const ObjCMethodDecl *findDefiningRedecl(const ObjCMethodDecl *MD) {
// Find the redeclaration that defines the method.
if (!MD->hasBody()) {
- for (auto I : MD->redecls())
+ for (auto *I : MD->redecls())
if (I->hasBody())
MD = cast<ObjCMethodDecl>(I);
}
@@ -1255,14 +1249,14 @@ lookupRuntimeDefinition(const ObjCInterfaceDecl *Interface,
// stays around until clang quits, which also may be bad if we
// need to release memory.
using PrivateMethodCache =
- llvm::DenseMap<PrivateMethodKey, Optional<const ObjCMethodDecl *>>;
+ llvm::DenseMap<PrivateMethodKey, std::optional<const ObjCMethodDecl *>>;
static PrivateMethodCache PMC;
- Optional<const ObjCMethodDecl *> &Val =
+ std::optional<const ObjCMethodDecl *> &Val =
PMC[{Interface, LookupSelector, InstanceMethod}];
// Query lookupPrivateMethod() if the cache does not hit.
- if (!Val.hasValue()) {
+ if (!Val) {
Val = Interface->lookupPrivateMethod(LookupSelector, InstanceMethod);
if (!*Val) {
@@ -1271,7 +1265,7 @@ lookupRuntimeDefinition(const ObjCInterfaceDecl *Interface,
}
}
- return Val.getValue();
+ return *Val;
}
RuntimeDefinition ObjCMethodCall::getRuntimeDefinition() const {
@@ -1407,23 +1401,24 @@ void ObjCMethodCall::getInitialStackFrameContents(
CallEventRef<>
CallEventManager::getSimpleCall(const CallExpr *CE, ProgramStateRef State,
- const LocationContext *LCtx) {
+ const LocationContext *LCtx,
+ CFGBlock::ConstCFGElementRef ElemRef) {
if (const auto *MCE = dyn_cast<CXXMemberCallExpr>(CE))
- return create<CXXMemberCall>(MCE, State, LCtx);
+ return create<CXXMemberCall>(MCE, State, LCtx, ElemRef);
if (const auto *OpCE = dyn_cast<CXXOperatorCallExpr>(CE)) {
const FunctionDecl *DirectCallee = OpCE->getDirectCallee();
if (const auto *MD = dyn_cast<CXXMethodDecl>(DirectCallee))
- if (MD->isInstance())
- return create<CXXMemberOperatorCall>(OpCE, State, LCtx);
+ if (MD->isImplicitObjectMemberFunction())
+ return create<CXXMemberOperatorCall>(OpCE, State, LCtx, ElemRef);
} else if (CE->getCallee()->getType()->isBlockPointerType()) {
- return create<BlockCall>(CE, State, LCtx);
+ return create<BlockCall>(CE, State, LCtx, ElemRef);
}
// Otherwise, it's a normal function call, static member function call, or
// something we can't reason about.
- return create<SimpleFunctionCall>(CE, State, LCtx);
+ return create<SimpleFunctionCall>(CE, State, LCtx, ElemRef);
}
CallEventRef<>
@@ -1431,12 +1426,14 @@ CallEventManager::getCaller(const StackFrameContext *CalleeCtx,
ProgramStateRef State) {
const LocationContext *ParentCtx = CalleeCtx->getParent();
const LocationContext *CallerCtx = ParentCtx->getStackFrame();
+ CFGBlock::ConstCFGElementRef ElemRef = {CalleeCtx->getCallSiteBlock(),
+ CalleeCtx->getIndex()};
assert(CallerCtx && "This should not be used for top-level stack frames");
const Stmt *CallSite = CalleeCtx->getCallSite();
if (CallSite) {
- if (CallEventRef<> Out = getCall(CallSite, State, CallerCtx))
+ if (CallEventRef<> Out = getCall(CallSite, State, CallerCtx, ElemRef))
return Out;
SValBuilder &SVB = State->getStateManager().getSValBuilder();
@@ -1445,10 +1442,11 @@ CallEventManager::getCaller(const StackFrameContext *CalleeCtx,
SVal ThisVal = State->getSVal(ThisPtr);
if (const auto *CE = dyn_cast<CXXConstructExpr>(CallSite))
- return getCXXConstructorCall(CE, ThisVal.getAsRegion(), State, CallerCtx);
+ return getCXXConstructorCall(CE, ThisVal.getAsRegion(), State, CallerCtx,
+ ElemRef);
else if (const auto *CIE = dyn_cast<CXXInheritedCtorInitExpr>(CallSite))
return getCXXInheritedConstructorCall(CIE, ThisVal.getAsRegion(), State,
- CallerCtx);
+ CallerCtx, ElemRef);
else {
// All other cases are handled by getCall.
llvm_unreachable("This is not an inlineable statement");
@@ -1468,26 +1466,30 @@ CallEventManager::getCaller(const StackFrameContext *CalleeCtx,
SVal ThisVal = State->getSVal(ThisPtr);
const Stmt *Trigger;
- if (Optional<CFGAutomaticObjDtor> AutoDtor = E.getAs<CFGAutomaticObjDtor>())
+ if (std::optional<CFGAutomaticObjDtor> AutoDtor =
+ E.getAs<CFGAutomaticObjDtor>())
Trigger = AutoDtor->getTriggerStmt();
- else if (Optional<CFGDeleteDtor> DeleteDtor = E.getAs<CFGDeleteDtor>())
+ else if (std::optional<CFGDeleteDtor> DeleteDtor = E.getAs<CFGDeleteDtor>())
Trigger = DeleteDtor->getDeleteExpr();
else
Trigger = Dtor->getBody();
return getCXXDestructorCall(Dtor, Trigger, ThisVal.getAsRegion(),
- E.getAs<CFGBaseDtor>().hasValue(), State,
- CallerCtx);
+ E.getAs<CFGBaseDtor>().has_value(), State,
+ CallerCtx, ElemRef);
}
CallEventRef<> CallEventManager::getCall(const Stmt *S, ProgramStateRef State,
- const LocationContext *LC) {
+ const LocationContext *LC,
+ CFGBlock::ConstCFGElementRef ElemRef) {
if (const auto *CE = dyn_cast<CallExpr>(S)) {
- return getSimpleCall(CE, State, LC);
+ return getSimpleCall(CE, State, LC, ElemRef);
} else if (const auto *NE = dyn_cast<CXXNewExpr>(S)) {
- return getCXXAllocatorCall(NE, State, LC);
+ return getCXXAllocatorCall(NE, State, LC, ElemRef);
+ } else if (const auto *DE = dyn_cast<CXXDeleteExpr>(S)) {
+ return getCXXDeallocatorCall(DE, State, LC, ElemRef);
} else if (const auto *ME = dyn_cast<ObjCMessageExpr>(S)) {
- return getObjCMethodCall(ME, State, LC);
+ return getObjCMethodCall(ME, State, LC, ElemRef);
} else {
return nullptr;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerContext.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerContext.cpp
index 3d64ce453479..d6d4cec9dd3d 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerContext.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerContext.cpp
@@ -14,6 +14,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/Basic/Builtins.h"
#include "clang/Lex/Lexer.h"
+#include "llvm/ADT/StringExtras.h"
using namespace clang;
using namespace ento;
@@ -38,7 +39,7 @@ StringRef CheckerContext::getCalleeName(const FunctionDecl *FunDecl) const {
}
StringRef CheckerContext::getDeclDescription(const Decl *D) {
- if (isa<ObjCMethodDecl>(D) || isa<CXXMethodDecl>(D))
+ if (isa<ObjCMethodDecl, CXXMethodDecl>(D))
return "method";
if (isa<BlockDecl>(D))
return "anonymous block";
@@ -55,8 +56,29 @@ bool CheckerContext::isCLibraryFunction(const FunctionDecl *FD,
if (Name.empty())
return true;
StringRef BName = FD->getASTContext().BuiltinInfo.getName(BId);
- if (BName.find(Name) != StringRef::npos)
- return true;
+ size_t start = BName.find(Name);
+ if (start != StringRef::npos) {
+ // Accept exact match.
+ if (BName.size() == Name.size())
+ return true;
+
+ // v-- match starts here
+ // ...xxxxx...
+ // _xxxxx_
+ // ^ ^ lookbehind and lookahead characters
+
+ const auto MatchPredecessor = [=]() -> bool {
+ return start <= 0 || !llvm::isAlpha(BName[start - 1]);
+ };
+ const auto MatchSuccessor = [=]() -> bool {
+ std::size_t LookbehindPlace = start + Name.size();
+ return LookbehindPlace >= BName.size() ||
+ !llvm::isAlpha(BName[LookbehindPlace]);
+ };
+
+ if (MatchPredecessor() && MatchSuccessor())
+ return true;
+ }
}
const IdentifierInfo *II = FD->getIdentifier();
@@ -83,11 +105,11 @@ bool CheckerContext::isCLibraryFunction(const FunctionDecl *FD,
if (FName.equals(Name))
return true;
- if (FName.startswith("__inline") && (FName.find(Name) != StringRef::npos))
+ if (FName.starts_with("__inline") && FName.contains(Name))
return true;
- if (FName.startswith("__") && FName.endswith("_chk") &&
- FName.find(Name) != StringRef::npos)
+ if (FName.starts_with("__") && FName.ends_with("_chk") &&
+ FName.contains(Name))
return true;
return false;
@@ -107,10 +129,10 @@ static bool evalComparison(SVal LHSVal, BinaryOperatorKind ComparisonOp,
if (LHSVal.isUnknownOrUndef())
return false;
ProgramStateManager &Mgr = State->getStateManager();
- if (!LHSVal.getAs<NonLoc>()) {
+ if (!isa<NonLoc>(LHSVal)) {
LHSVal = Mgr.getStoreManager().getBinding(State->getStore(),
LHSVal.castAs<Loc>());
- if (LHSVal.isUnknownOrUndef() || !LHSVal.getAs<NonLoc>())
+ if (LHSVal.isUnknownOrUndef() || !isa<NonLoc>(LHSVal))
return false;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerHelpers.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerHelpers.cpp
index 626ae1ae8066..84ad20a54807 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerHelpers.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerHelpers.cpp
@@ -14,6 +14,7 @@
#include "clang/AST/Decl.h"
#include "clang/AST/Expr.h"
#include "clang/Lex/Preprocessor.h"
+#include <optional>
namespace clang {
@@ -110,14 +111,13 @@ Nullability getNullabilityAnnotation(QualType Type) {
return Nullability::Unspecified;
}
-llvm::Optional<int> tryExpandAsInteger(StringRef Macro,
- const Preprocessor &PP) {
+std::optional<int> tryExpandAsInteger(StringRef Macro, const Preprocessor &PP) {
const auto *MacroII = PP.getIdentifierInfo(Macro);
if (!MacroII)
- return llvm::None;
+ return std::nullopt;
const MacroInfo *MI = PP.getMacroInfo(MacroII);
if (!MI)
- return llvm::None;
+ return std::nullopt;
// Filter out parens.
std::vector<Token> FilteredTokens;
@@ -131,12 +131,12 @@ llvm::Optional<int> tryExpandAsInteger(StringRef Macro,
// FIXME: EOF macro token coming from a PCH file on macOS while marked as
// literal, doesn't contain any literal data
if (!T.isLiteral() || !T.getLiteralData())
- return llvm::None;
+ return std::nullopt;
StringRef ValueStr = StringRef(T.getLiteralData(), T.getLength());
llvm::APInt IntValue;
constexpr unsigned AutoSenseRadix = 0;
if (ValueStr.getAsInteger(AutoSenseRadix, IntValue))
- return llvm::None;
+ return std::nullopt;
// Parse an optional minus sign.
size_t Size = FilteredTokens.size();
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp
index e09399a83589..6fc16223ea82 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp
@@ -26,7 +26,9 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FormatVariadic.h"
#include <cassert>
+#include <optional>
#include <vector>
using namespace clang;
@@ -34,10 +36,7 @@ using namespace ento;
bool CheckerManager::hasPathSensitiveCheckers() const {
const auto IfAnyAreNonEmpty = [](const auto &... Callbacks) -> bool {
- bool Result = false;
- // FIXME: Use fold expressions in C++17.
- LLVM_ATTRIBUTE_UNUSED int Unused[]{0, (Result |= !Callbacks.empty())...};
- return Result;
+ return (!Callbacks.empty() || ...);
};
return IfAnyAreNonEmpty(
StmtCheckers, PreObjCMessageCheckers, ObjCMessageNilCheckers,
@@ -655,7 +654,7 @@ void CheckerManager::runCheckersForEvalCall(ExplodedNodeSet &Dst,
ExprEngine &Eng,
const EvalCallOptions &CallOpts) {
for (auto *const Pred : Src) {
- bool anyEvaluated = false;
+ std::optional<CheckerNameRef> evaluatorChecker;
ExplodedNodeSet checkDst;
NodeBuilder B(Pred, checkDst, Eng.getBuilderContext());
@@ -674,10 +673,26 @@ void CheckerManager::runCheckersForEvalCall(ExplodedNodeSet &Dst,
CheckerContext C(B, Eng, Pred, L);
evaluated = EvalCallChecker(Call, C);
}
- assert(!(evaluated && anyEvaluated)
- && "There are more than one checkers evaluating the call");
+#ifndef NDEBUG
+ if (evaluated && evaluatorChecker) {
+ const auto toString = [](const CallEvent &Call) -> std::string {
+ std::string Buf;
+ llvm::raw_string_ostream OS(Buf);
+ Call.dump(OS);
+ OS.flush();
+ return Buf;
+ };
+ std::string AssertionMessage = llvm::formatv(
+ "The '{0}' call has been already evaluated by the {1} checker, "
+ "while the {2} checker also tried to evaluate the same call. At "
+ "most one checker supposed to evaluate a call.",
+ toString(Call), evaluatorChecker->getName(),
+ EvalCallChecker.Checker->getCheckerName());
+ llvm_unreachable(AssertionMessage.c_str());
+ }
+#endif
if (evaluated) {
- anyEvaluated = true;
+ evaluatorChecker = EvalCallChecker.Checker->getCheckerName();
Dst.insert(checkDst);
#ifdef NDEBUG
break; // on release don't check that no other checker also evals.
@@ -686,7 +701,7 @@ void CheckerManager::runCheckersForEvalCall(ExplodedNodeSet &Dst,
}
// If none of the checkers evaluated the call, ask ExprEngine to handle it.
- if (!anyEvaluated) {
+ if (!evaluatorChecker) {
NodeBuilder B(Pred, Dst, Eng.getBuilderContext());
Eng.defaultEvalCall(B, Pred, Call, CallOpts);
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerRegistryData.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerRegistryData.cpp
index 1b3e8b11549d..b9c6278991f4 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerRegistryData.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerRegistryData.cpp
@@ -82,7 +82,7 @@ static constexpr char PackageSeparator = '.';
static bool isInPackage(const CheckerInfo &Checker, StringRef PackageName) {
// Does the checker's full name have the package as a prefix?
- if (!Checker.FullName.startswith(PackageName))
+ if (!Checker.FullName.starts_with(PackageName))
return false;
// Is the package actually just the name of a specific checker?
@@ -158,7 +158,7 @@ void CheckerRegistryData::printCheckerWithDescList(
continue;
}
- if (Checker.FullName.startswith("alpha")) {
+ if (Checker.FullName.starts_with("alpha")) {
if (AnOpts.ShowCheckerHelpAlpha)
Print(Out, Checker,
("(Enable only for development!) " + Checker.Desc).str());
@@ -228,7 +228,7 @@ void CheckerRegistryData::printCheckerOptionList(const AnalyzerOptions &AnOpts,
}
if (Option.DevelopmentStatus == "alpha" ||
- Entry.first.startswith("alpha")) {
+ Entry.first.starts_with("alpha")) {
if (AnOpts.ShowCheckerOptionAlphaList)
Print(Out, FullOption,
llvm::Twine("(Enable only for development!) " + Desc).str());
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CommonBugCategories.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CommonBugCategories.cpp
index d12c35ef156a..66fab523c864 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CommonBugCategories.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CommonBugCategories.cpp
@@ -13,6 +13,7 @@ namespace clang {
namespace ento {
namespace categories {
+const char *const AppleAPIMisuse = "API Misuse (Apple)";
const char *const CoreFoundationObjectiveC = "Core Foundation/Objective-C";
const char *const LogicError = "Logic error";
const char *const MemoryRefCount =
@@ -23,6 +24,7 @@ const char *const CXXObjectLifecycle = "C++ object lifecycle";
const char *const CXXMoveSemantics = "C++ move semantics";
const char *const SecurityError = "Security error";
const char *const UnusedCode = "Unused code";
+const char *const TaintedData = "Tainted data used";
} // namespace categories
} // namespace ento
} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ConstraintManager.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ConstraintManager.cpp
index d642c3530268..c0b3f346b654 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ConstraintManager.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ConstraintManager.cpp
@@ -16,6 +16,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState_Fwd.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+#include "llvm/ADT/ScopeExit.h"
using namespace clang;
using namespace ento;
@@ -41,3 +42,82 @@ ConditionTruthVal ConstraintManager::checkNull(ProgramStateRef State,
return ConditionTruthVal(true);
return {};
}
+
+template <typename AssumeFunction>
+ConstraintManager::ProgramStatePair
+ConstraintManager::assumeDualImpl(ProgramStateRef &State,
+ AssumeFunction &Assume) {
+ if (LLVM_UNLIKELY(State->isPosteriorlyOverconstrained()))
+ return {State, State};
+
+ // Assume functions might recurse (see `reAssume` or `tryRearrange`). During
+ // the recursion the State might not change anymore, that means we reached a
+ // fixpoint.
+ // We avoid infinite recursion of assume calls by checking already visited
+ // States on the stack of assume function calls.
+ const ProgramState *RawSt = State.get();
+ if (LLVM_UNLIKELY(AssumeStack.contains(RawSt)))
+ return {State, State};
+ AssumeStack.push(RawSt);
+ auto AssumeStackBuilder =
+ llvm::make_scope_exit([this]() { AssumeStack.pop(); });
+
+ ProgramStateRef StTrue = Assume(true);
+
+ if (!StTrue) {
+ ProgramStateRef StFalse = Assume(false);
+ if (LLVM_UNLIKELY(!StFalse)) { // both infeasible
+ ProgramStateRef StInfeasible = State->cloneAsPosteriorlyOverconstrained();
+ assert(StInfeasible->isPosteriorlyOverconstrained());
+ // Checkers might rely on the API contract that both returned states
+ // cannot be null. Thus, we return StInfeasible for both branches because
+ // it might happen that a Checker uncoditionally uses one of them if the
+ // other is a nullptr. This may also happen with the non-dual and
+ // adjacent `assume(true)` and `assume(false)` calls. By implementing
+ // assume in therms of assumeDual, we can keep our API contract there as
+ // well.
+ return ProgramStatePair(StInfeasible, StInfeasible);
+ }
+ return ProgramStatePair(nullptr, StFalse);
+ }
+
+ ProgramStateRef StFalse = Assume(false);
+ if (!StFalse) {
+ return ProgramStatePair(StTrue, nullptr);
+ }
+
+ return ProgramStatePair(StTrue, StFalse);
+}
+
+ConstraintManager::ProgramStatePair
+ConstraintManager::assumeDual(ProgramStateRef State, DefinedSVal Cond) {
+ auto AssumeFun = [&, Cond](bool Assumption) {
+ return assumeInternal(State, Cond, Assumption);
+ };
+ return assumeDualImpl(State, AssumeFun);
+}
+
+ConstraintManager::ProgramStatePair
+ConstraintManager::assumeInclusiveRangeDual(ProgramStateRef State, NonLoc Value,
+ const llvm::APSInt &From,
+ const llvm::APSInt &To) {
+ auto AssumeFun = [&](bool Assumption) {
+ return assumeInclusiveRangeInternal(State, Value, From, To, Assumption);
+ };
+ return assumeDualImpl(State, AssumeFun);
+}
+
+ProgramStateRef ConstraintManager::assume(ProgramStateRef State,
+ DefinedSVal Cond, bool Assumption) {
+ ConstraintManager::ProgramStatePair R = assumeDual(State, Cond);
+ return Assumption ? R.first : R.second;
+}
+
+ProgramStateRef
+ConstraintManager::assumeInclusiveRange(ProgramStateRef State, NonLoc Value,
+ const llvm::APSInt &From,
+ const llvm::APSInt &To, bool InBound) {
+ ConstraintManager::ProgramStatePair R =
+ assumeInclusiveRangeDual(State, Value, From, To);
+ return InBound ? R.first : R.second;
+}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp
index bc939d252800..d3499e7a917d 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp
@@ -26,7 +26,6 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/FunctionSummary.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/WorkList.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/Casting.h"
@@ -34,6 +33,7 @@
#include <algorithm>
#include <cassert>
#include <memory>
+#include <optional>
#include <utility>
using namespace clang;
@@ -43,6 +43,8 @@ using namespace ento;
STATISTIC(NumSteps,
"The # of steps executed.");
+STATISTIC(NumSTUSteps, "The # of STU steps executed.");
+STATISTIC(NumCTUSteps, "The # of CTU steps executed.");
STATISTIC(NumReachedMaxSteps,
"The # of times we reached the max number of steps.");
STATISTIC(NumPathsExplored,
@@ -73,11 +75,18 @@ static std::unique_ptr<WorkList> generateWorkList(AnalyzerOptions &Opts) {
CoreEngine::CoreEngine(ExprEngine &exprengine, FunctionSummariesTy *FS,
AnalyzerOptions &Opts)
: ExprEng(exprengine), WList(generateWorkList(Opts)),
+ CTUWList(Opts.IsNaiveCTUEnabled ? generateWorkList(Opts) : nullptr),
BCounterFactory(G.getAllocator()), FunctionSummaries(FS) {}
+void CoreEngine::setBlockCounter(BlockCounter C) {
+ WList->setBlockCounter(C);
+ if (CTUWList)
+ CTUWList->setBlockCounter(C);
+}
+
/// ExecuteWorkList - Run the worklist algorithm for a maximum number of steps.
-bool CoreEngine::ExecuteWorkList(const LocationContext *L, unsigned Steps,
- ProgramStateRef InitState) {
+bool CoreEngine::ExecuteWorkList(const LocationContext *L, unsigned MaxSteps,
+ ProgramStateRef InitState) {
if (G.num_roots() == 0) { // Initialize the analysis by constructing
// the root if none exists.
@@ -100,7 +109,7 @@ bool CoreEngine::ExecuteWorkList(const LocationContext *L, unsigned Steps,
BlockEdge StartLoc(Entry, Succ, L);
// Set the current block counter to being empty.
- WList->setBlockCounter(BCounterFactory.GetEmptyCounter());
+ setBlockCounter(BCounterFactory.GetEmptyCounter());
if (!InitState)
InitState = ExprEng.getInitialState(L);
@@ -118,34 +127,54 @@ bool CoreEngine::ExecuteWorkList(const LocationContext *L, unsigned Steps,
}
// Check if we have a steps limit
- bool UnlimitedSteps = Steps == 0;
+ bool UnlimitedSteps = MaxSteps == 0;
+
// Cap our pre-reservation in the event that the user specifies
// a very large number of maximum steps.
const unsigned PreReservationCap = 4000000;
if(!UnlimitedSteps)
- G.reserve(std::min(Steps,PreReservationCap));
-
- while (WList->hasWork()) {
- if (!UnlimitedSteps) {
- if (Steps == 0) {
- NumReachedMaxSteps++;
- break;
+ G.reserve(std::min(MaxSteps, PreReservationCap));
+
+ auto ProcessWList = [this, UnlimitedSteps](unsigned MaxSteps) {
+ unsigned Steps = MaxSteps;
+ while (WList->hasWork()) {
+ if (!UnlimitedSteps) {
+ if (Steps == 0) {
+ NumReachedMaxSteps++;
+ break;
+ }
+ --Steps;
}
- --Steps;
- }
- NumSteps++;
+ NumSteps++;
- const WorkListUnit& WU = WList->dequeue();
+ const WorkListUnit &WU = WList->dequeue();
- // Set the current block counter.
- WList->setBlockCounter(WU.getBlockCounter());
+ // Set the current block counter.
+ setBlockCounter(WU.getBlockCounter());
- // Retrieve the node.
- ExplodedNode *Node = WU.getNode();
+ // Retrieve the node.
+ ExplodedNode *Node = WU.getNode();
- dispatchWorkItem(Node, Node->getLocation(), WU);
+ dispatchWorkItem(Node, Node->getLocation(), WU);
+ }
+ return MaxSteps - Steps;
+ };
+ const unsigned STUSteps = ProcessWList(MaxSteps);
+
+ if (CTUWList) {
+ NumSTUSteps += STUSteps;
+ const unsigned MinCTUSteps =
+ this->ExprEng.getAnalysisManager().options.CTUMaxNodesMin;
+ const unsigned Pct =
+ this->ExprEng.getAnalysisManager().options.CTUMaxNodesPercentage;
+ unsigned MaxCTUSteps = std::max(STUSteps * Pct / 100, MinCTUSteps);
+
+ WList = std::move(CTUWList);
+ const unsigned CTUSteps = ProcessWList(MaxCTUSteps);
+ NumCTUSteps += CTUSteps;
}
+
ExprEng.processEndWorklist();
return WList->hasWork();
}
@@ -244,10 +273,10 @@ void CoreEngine::HandleBlockEdge(const BlockEdge &L, ExplodedNode *Pred) {
const ReturnStmt *RS = nullptr;
if (!L.getSrc()->empty()) {
CFGElement LastElement = L.getSrc()->back();
- if (Optional<CFGStmt> LastStmt = LastElement.getAs<CFGStmt>()) {
+ if (std::optional<CFGStmt> LastStmt = LastElement.getAs<CFGStmt>()) {
RS = dyn_cast<ReturnStmt>(LastStmt->getStmt());
- } else if (Optional<CFGAutomaticObjDtor> AutoDtor =
- LastElement.getAs<CFGAutomaticObjDtor>()) {
+ } else if (std::optional<CFGAutomaticObjDtor> AutoDtor =
+ LastElement.getAs<CFGAutomaticObjDtor>()) {
RS = dyn_cast<ReturnStmt>(AutoDtor->getTriggerStmt());
}
}
@@ -282,14 +311,13 @@ void CoreEngine::HandleBlockEntrance(const BlockEntrance &L,
BlockCounter Counter = WList->getBlockCounter();
Counter = BCounterFactory.IncrementCount(Counter, LC->getStackFrame(),
BlockId);
- WList->setBlockCounter(Counter);
+ setBlockCounter(Counter);
// Process the entrance of the block.
- if (Optional<CFGElement> E = L.getFirstElement()) {
+ if (std::optional<CFGElement> E = L.getFirstElement()) {
NodeBuilderContext Ctx(*this, L.getBlock(), Pred);
ExprEng.processCFGElement(*E, Pred, 0, &Ctx);
- }
- else
+ } else
HandleBlockExit(L.getBlock(), Pred);
}
@@ -475,8 +503,8 @@ void CoreEngine::HandleVirtualBaseBranch(const CFGBlock *B,
if (const auto *CallerCtor = dyn_cast_or_null<CXXConstructExpr>(
LCtx->getStackFrame()->getCallSite())) {
switch (CallerCtor->getConstructionKind()) {
- case CXXConstructExpr::CK_NonVirtualBase:
- case CXXConstructExpr::CK_VirtualBase: {
+ case CXXConstructionKind::NonVirtualBase:
+ case CXXConstructionKind::VirtualBase: {
BlockEdge Loc(B, *B->succ_begin(), LCtx);
HandleBlockEdge(Loc, Pred);
return;
@@ -587,7 +615,7 @@ void CoreEngine::enqueue(ExplodedNodeSet &Set,
}
void CoreEngine::enqueueEndOfFunction(ExplodedNodeSet &Set, const ReturnStmt *RS) {
- for (auto I : Set) {
+ for (auto *I : Set) {
// If we are in an inlined call, generate CallExitBegin node.
if (I->getLocationContext()->getParent()) {
I = generateCallExitBeginNode(I, RS);
@@ -686,8 +714,8 @@ SwitchNodeBuilder::generateDefaultCaseNode(ProgramStateRef St,
assert(Src->succ_rbegin() != Src->succ_rend());
CFGBlock *DefaultBlock = *Src->succ_rbegin();
- // Sanity check for default blocks that are unreachable and not caught
- // by earlier stages.
+ // Basic correctness check for default blocks that are unreachable and not
+ // caught by earlier stages.
if (!DefaultBlock)
return nullptr;
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/DynamicExtent.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/DynamicExtent.cpp
index db9698b4086e..6cf06413b537 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/DynamicExtent.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/DynamicExtent.cpp
@@ -30,7 +30,9 @@ DefinedOrUnknownSVal getDynamicExtent(ProgramStateRef State,
MR = MR->StripCasts();
if (const DefinedOrUnknownSVal *Size = State->get<DynamicExtentMap>(MR))
- return *Size;
+ if (auto SSize =
+ SVB.convertToArrayIndex(*Size).getAs<DefinedOrUnknownSVal>())
+ return *SSize;
return MR->getMemRegionManager().getStaticSize(MR, SVB);
}
@@ -40,23 +42,49 @@ DefinedOrUnknownSVal getElementExtent(QualType Ty, SValBuilder &SVB) {
SVB.getArrayIndexType());
}
+static DefinedOrUnknownSVal getConstantArrayElementCount(SValBuilder &SVB,
+ const MemRegion *MR) {
+ MR = MR->StripCasts();
+
+ const auto *TVR = MR->getAs<TypedValueRegion>();
+ if (!TVR)
+ return UnknownVal();
+
+ if (const ConstantArrayType *CAT =
+ SVB.getContext().getAsConstantArrayType(TVR->getValueType()))
+ return SVB.makeIntVal(CAT->getSize(), /* isUnsigned = */ false);
+
+ return UnknownVal();
+}
+
+static DefinedOrUnknownSVal
+getDynamicElementCount(ProgramStateRef State, SVal Size,
+ DefinedOrUnknownSVal ElementSize) {
+ SValBuilder &SVB = State->getStateManager().getSValBuilder();
+
+ auto ElementCount =
+ SVB.evalBinOp(State, BO_Div, Size, ElementSize, SVB.getArrayIndexType())
+ .getAs<DefinedOrUnknownSVal>();
+ return ElementCount.value_or(UnknownVal());
+}
+
DefinedOrUnknownSVal getDynamicElementCount(ProgramStateRef State,
const MemRegion *MR,
SValBuilder &SVB,
QualType ElementTy) {
+ assert(MR != nullptr && "Not-null region expected");
MR = MR->StripCasts();
- DefinedOrUnknownSVal Size = getDynamicExtent(State, MR, SVB);
- SVal ElementSize = getElementExtent(ElementTy, SVB);
+ DefinedOrUnknownSVal ElementSize = getElementExtent(ElementTy, SVB);
+ if (ElementSize.isZeroConstant())
+ return getConstantArrayElementCount(SVB, MR);
- SVal ElementCount =
- SVB.evalBinOp(State, BO_Div, Size, ElementSize, SVB.getArrayIndexType());
-
- return ElementCount.castAs<DefinedOrUnknownSVal>();
+ return getDynamicElementCount(State, getDynamicExtent(State, MR, SVB),
+ ElementSize);
}
SVal getDynamicExtentWithOffset(ProgramStateRef State, SVal BufV) {
- SValBuilder &SvalBuilder = State->getStateManager().getSValBuilder();
+ SValBuilder &SVB = State->getStateManager().getSValBuilder();
const MemRegion *MRegion = BufV.getAsRegion();
if (!MRegion)
return UnknownVal();
@@ -67,15 +95,28 @@ SVal getDynamicExtentWithOffset(ProgramStateRef State, SVal BufV) {
if (!BaseRegion)
return UnknownVal();
- NonLoc OffsetInBytes = SvalBuilder.makeArrayIndex(
- Offset.getOffset() /
- MRegion->getMemRegionManager().getContext().getCharWidth());
- DefinedOrUnknownSVal ExtentInBytes =
- getDynamicExtent(State, BaseRegion, SvalBuilder);
+ NonLoc OffsetInChars =
+ SVB.makeArrayIndex(Offset.getOffset() / SVB.getContext().getCharWidth());
+ DefinedOrUnknownSVal ExtentInBytes = getDynamicExtent(State, BaseRegion, SVB);
+
+ return SVB.evalBinOp(State, BinaryOperator::Opcode::BO_Sub, ExtentInBytes,
+ OffsetInChars, SVB.getArrayIndexType());
+}
+
+DefinedOrUnknownSVal getDynamicElementCountWithOffset(ProgramStateRef State,
+ SVal BufV,
+ QualType ElementTy) {
+ const MemRegion *MR = BufV.getAsRegion();
+ if (!MR)
+ return UnknownVal();
+
+ SValBuilder &SVB = State->getStateManager().getSValBuilder();
+ DefinedOrUnknownSVal ElementSize = getElementExtent(ElementTy, SVB);
+ if (ElementSize.isZeroConstant())
+ return getConstantArrayElementCount(SVB, MR);
- return SvalBuilder.evalBinOp(State, BinaryOperator::Opcode::BO_Sub,
- ExtentInBytes, OffsetInBytes,
- SvalBuilder.getArrayIndexType());
+ return getDynamicElementCount(State, getDynamicExtentWithOffset(State, BufV),
+ ElementSize);
}
ProgramStateRef setDynamicExtent(ProgramStateRef State, const MemRegion *MR,
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/DynamicType.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/DynamicType.cpp
index 9ed915aafcab..06052cb99fd1 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/DynamicType.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/DynamicType.cpp
@@ -209,7 +209,7 @@ static raw_ostream &printJson(const DynamicTypeInfo &DTI, raw_ostream &Out,
if (ToPrint->isAnyPointerType())
ToPrint = ToPrint->getPointeeType();
- Out << '\"' << ToPrint.getAsString() << "\", \"sub_classable\": "
+ Out << '\"' << ToPrint << "\", \"sub_classable\": "
<< (DTI.canBeASubClass() ? "true" : "false");
}
return Out;
@@ -217,9 +217,9 @@ static raw_ostream &printJson(const DynamicTypeInfo &DTI, raw_ostream &Out,
static raw_ostream &printJson(const DynamicCastInfo &DCI, raw_ostream &Out,
const char *NL, unsigned int Space, bool IsDot) {
- return Out << "\"from\": \"" << DCI.from().getAsString() << "\", \"to\": \""
- << DCI.to().getAsString() << "\", \"kind\": \""
- << (DCI.succeeds() ? "success" : "fail") << "\"";
+ return Out << "\"from\": \"" << DCI.from() << "\", \"to\": \"" << DCI.to()
+ << "\", \"kind\": \"" << (DCI.succeeds() ? "success" : "fail")
+ << "\"";
}
template <class T, class U>
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Environment.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Environment.cpp
index ee7474592528..427f51109853 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Environment.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Environment.cpp
@@ -17,9 +17,9 @@
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtObjC.h"
#include "clang/Analysis/AnalysisDeclContext.h"
+#include "clang/Basic/JsonSupport.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/LangOptions.h"
-#include "clang/Basic/JsonSupport.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
@@ -40,8 +40,11 @@ static const Expr *ignoreTransparentExprs(const Expr *E) {
switch (E->getStmtClass()) {
case Stmt::OpaqueValueExprClass:
- E = cast<OpaqueValueExpr>(E)->getSourceExpr();
- break;
+ if (const Expr *SE = cast<OpaqueValueExpr>(E)->getSourceExpr()) {
+ E = SE;
+ break;
+ }
+ return E;
case Stmt::ExprWithCleanupsClass:
E = cast<ExprWithCleanups>(E)->getSubExpr();
break;
@@ -88,7 +91,7 @@ SVal Environment::getSVal(const EnvironmentEntry &Entry,
const Stmt *S = Entry.getStmt();
assert(!isa<ObjCForCollectionStmt>(S) &&
"Use ExprEngine::hasMoreIteration()!");
- assert((isa<Expr>(S) || isa<ReturnStmt>(S)) &&
+ assert((isa<Expr, ReturnStmt>(S)) &&
"Environment can only argue about Exprs, since only they express "
"a value! Any non-expression statement stored in Environment is a "
"result of a hack!");
@@ -98,7 +101,6 @@ SVal Environment::getSVal(const EnvironmentEntry &Entry,
case Stmt::CXXBindTemporaryExprClass:
case Stmt::ExprWithCleanupsClass:
case Stmt::GenericSelectionExprClass:
- case Stmt::OpaqueValueExprClass:
case Stmt::ConstantExprClass:
case Stmt::ParenExprClass:
case Stmt::SubstNonTypeTemplateParmExprClass:
@@ -118,7 +120,7 @@ SVal Environment::getSVal(const EnvironmentEntry &Entry,
case Stmt::SizeOfPackExprClass:
case Stmt::PredefinedExprClass:
// Known constants; defer to SValBuilder.
- return svalBuilder.getConstantVal(cast<Expr>(S)).getValue();
+ return *svalBuilder.getConstantVal(cast<Expr>(S));
case Stmt::ReturnStmtClass: {
const auto *RS = cast<ReturnStmt>(S);
@@ -193,7 +195,7 @@ EnvironmentManager::removeDeadBindings(Environment Env,
// Iterate over the block-expr bindings.
for (Environment::iterator I = Env.begin(), End = Env.end(); I != End; ++I) {
const EnvironmentEntry &BlkExpr = I.getKey();
- const SVal &X = I.getData();
+ SVal X = I.getData();
const Expr *E = dyn_cast<Expr>(BlkExpr.getStmt());
if (!E)
@@ -274,7 +276,8 @@ void Environment::printJson(raw_ostream &Out, const ASTContext &Ctx,
const Stmt *S = I->first.getStmt();
Indent(Out, InnerSpace, IsDot)
- << "{ \"stmt_id\": " << S->getID(Ctx) << ", \"pretty\": ";
+ << "{ \"stmt_id\": " << S->getID(Ctx) << ", \"kind\": \""
+ << S->getStmtClassName() << "\", \"pretty\": ";
S->printJson(Out, nullptr, PP, /*AddQuotes=*/true);
Out << ", \"value\": ";
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExplodedGraph.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
index 635495e9bf60..f84da769d182 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
@@ -25,12 +25,12 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState_Fwd.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/FoldingSet.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Casting.h"
#include <cassert>
#include <memory>
+#include <optional>
using namespace clang;
using namespace ento;
@@ -50,8 +50,7 @@ ExplodedGraph::~ExplodedGraph() = default;
bool ExplodedGraph::isInterestingLValueExpr(const Expr *Ex) {
if (!Ex->isLValue())
return false;
- return isa<DeclRefExpr>(Ex) || isa<MemberExpr>(Ex) ||
- isa<ObjCIvarRefExpr>(Ex) || isa<ArraySubscriptExpr>(Ex);
+ return isa<DeclRefExpr, MemberExpr, ObjCIvarRefExpr, ArraySubscriptExpr>(Ex);
}
bool ExplodedGraph::shouldCollect(const ExplodedNode *node) {
@@ -140,7 +139,7 @@ bool ExplodedGraph::shouldCollect(const ExplodedNode *node) {
// Condition 10.
const ProgramPoint SuccLoc = succ->getLocation();
- if (Optional<StmtPoint> SP = SuccLoc.getAs<StmtPoint>())
+ if (std::optional<StmtPoint> SP = SuccLoc.getAs<StmtPoint>())
if (CallEvent::isCallStmt(SP->getStmt()))
return false;
@@ -234,8 +233,7 @@ void ExplodedNode::NodeGroup::addNode(ExplodedNode *N, ExplodedGraph &G) {
ExplodedNode *Old = Storage.get<ExplodedNode *>();
BumpVectorContext &Ctx = G.getNodeAllocator();
- V = G.getAllocator().Allocate<ExplodedNodeVector>();
- new (V) ExplodedNodeVector(Ctx, 4);
+ V = new (G.getAllocator()) ExplodedNodeVector(Ctx, 4);
V->push_back(Old, Ctx);
Storage = V;
@@ -409,7 +407,7 @@ ExplodedNode *ExplodedGraph::getNode(const ProgramPoint &L,
}
else {
// Allocate a new node.
- V = (NodeTy*) getAllocator().Allocate<NodeTy>();
+ V = getAllocator().Allocate<NodeTy>();
}
++NumNodes;
@@ -433,7 +431,7 @@ ExplodedNode *ExplodedGraph::createUncachedNode(const ProgramPoint &L,
ProgramStateRef State,
int64_t Id,
bool IsSink) {
- NodeTy *V = (NodeTy *) getAllocator().Allocate<NodeTy>();
+ NodeTy *V = getAllocator().Allocate<NodeTy>();
new (V) NodeTy(L, State, Id, IsSink);
return V;
}
@@ -489,7 +487,7 @@ ExplodedGraph::trim(ArrayRef<const NodeTy *> Sinks,
const ExplodedNode *N = WL2.pop_back_val();
// Skip this node if we have already processed it.
- if (Pass2.find(N) != Pass2.end())
+ if (Pass2.contains(N))
continue;
// Create the corresponding node in the new graph and record the mapping
@@ -510,9 +508,8 @@ ExplodedGraph::trim(ArrayRef<const NodeTy *> Sinks,
// Walk through the predecessors of 'N' and hook up their corresponding
// nodes in the new graph (if any) to the freshly created node.
- for (ExplodedNode::pred_iterator I = N->Preds.begin(), E = N->Preds.end();
- I != E; ++I) {
- Pass2Ty::iterator PI = Pass2.find(*I);
+ for (const ExplodedNode *Pred : N->Preds) {
+ Pass2Ty::iterator PI = Pass2.find(Pred);
if (PI == Pass2.end())
continue;
@@ -523,17 +520,16 @@ ExplodedGraph::trim(ArrayRef<const NodeTy *> Sinks,
// been created, we should hook them up as successors. Otherwise, enqueue
// the new nodes from the original graph that should have nodes created
// in the new graph.
- for (ExplodedNode::succ_iterator I = N->Succs.begin(), E = N->Succs.end();
- I != E; ++I) {
- Pass2Ty::iterator PI = Pass2.find(*I);
+ for (const ExplodedNode *Succ : N->Succs) {
+ Pass2Ty::iterator PI = Pass2.find(Succ);
if (PI != Pass2.end()) {
const_cast<ExplodedNode *>(PI->second)->addPredecessor(NewN, *G);
continue;
}
// Enqueue nodes to the worklist that were marked during pass 1.
- if (Pass1.count(*I))
- WL2.push_back(*I);
+ if (Pass1.count(Succ))
+ WL2.push_back(Succ);
}
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
index 66332d3b848c..24e91a22fd68 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
@@ -7,7 +7,7 @@
//===----------------------------------------------------------------------===//
//
// This file defines a meta-engine for path-sensitive dataflow analysis that
-// is built on GREngine, but provides the boilerplate to execute transfer
+// is built on CoreEngine, but provides the boilerplate to execute transfer
// functions and build the ExplodedGraph at the expression level.
//
//===----------------------------------------------------------------------===//
@@ -48,6 +48,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ConstraintManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicExtent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/LoopUnrolling.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/LoopWidening.h"
@@ -64,7 +65,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/ImmutableMap.h"
#include "llvm/ADT/ImmutableSet.h"
-#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/Casting.h"
@@ -77,6 +78,7 @@
#include <cassert>
#include <cstdint>
#include <memory>
+#include <optional>
#include <string>
#include <tuple>
#include <utility>
@@ -118,18 +120,10 @@ namespace {
/// the construction context was present and contained references to these
/// AST nodes.
class ConstructedObjectKey {
- typedef std::pair<ConstructionContextItem, const LocationContext *>
- ConstructedObjectKeyImpl;
-
+ using ConstructedObjectKeyImpl =
+ std::pair<ConstructionContextItem, const LocationContext *>;
const ConstructedObjectKeyImpl Impl;
- const void *getAnyASTNodePtr() const {
- if (const Stmt *S = getItem().getStmtOrNull())
- return S;
- else
- return getItem().getCXXCtorInitializer();
- }
-
public:
explicit ConstructedObjectKey(const ConstructionContextItem &Item,
const LocationContext *LC)
@@ -193,6 +187,31 @@ typedef llvm::ImmutableMap<ConstructedObjectKey, SVal>
REGISTER_TRAIT_WITH_PROGRAMSTATE(ObjectsUnderConstruction,
ObjectsUnderConstructionMap)
+// This trait is responsible for storing the index of the element that is to be
+// constructed in the next iteration. As a result a CXXConstructExpr is only
+// stored if it is array type. Also the index is the index of the continuous
+// memory region, which is important for multi-dimensional arrays. E.g:: int
+// arr[2][2]; assume arr[1][1] will be the next element under construction, so
+// the index is 3.
+typedef llvm::ImmutableMap<
+ std::pair<const CXXConstructExpr *, const LocationContext *>, unsigned>
+ IndexOfElementToConstructMap;
+REGISTER_TRAIT_WITH_PROGRAMSTATE(IndexOfElementToConstruct,
+ IndexOfElementToConstructMap)
+
+// This trait is responsible for holding our pending ArrayInitLoopExprs.
+// It pairs the LocationContext and the initializer CXXConstructExpr with
+// the size of the array that's being copy initialized.
+typedef llvm::ImmutableMap<
+ std::pair<const CXXConstructExpr *, const LocationContext *>, unsigned>
+ PendingInitLoopMap;
+REGISTER_TRAIT_WITH_PROGRAMSTATE(PendingInitLoop, PendingInitLoopMap)
+
+typedef llvm::ImmutableMap<const LocationContext *, unsigned>
+ PendingArrayDestructionMap;
+REGISTER_TRAIT_WITH_PROGRAMSTATE(PendingArrayDestruction,
+ PendingArrayDestructionMap)
+
//===----------------------------------------------------------------------===//
// Engine construction and deletion.
//===----------------------------------------------------------------------===//
@@ -200,24 +219,17 @@ REGISTER_TRAIT_WITH_PROGRAMSTATE(ObjectsUnderConstruction,
static const char* TagProviderName = "ExprEngine";
ExprEngine::ExprEngine(cross_tu::CrossTranslationUnitContext &CTU,
- AnalysisManager &mgr,
- SetOfConstDecls *VisitedCalleesIn,
- FunctionSummariesTy *FS,
- InliningModes HowToInlineIn)
- : CTU(CTU), AMgr(mgr),
- AnalysisDeclContexts(mgr.getAnalysisDeclContextManager()),
+ AnalysisManager &mgr, SetOfConstDecls *VisitedCalleesIn,
+ FunctionSummariesTy *FS, InliningModes HowToInlineIn)
+ : CTU(CTU), IsCTUEnabled(mgr.getAnalyzerOptions().IsNaiveCTUEnabled),
+ AMgr(mgr), AnalysisDeclContexts(mgr.getAnalysisDeclContextManager()),
Engine(*this, FS, mgr.getAnalyzerOptions()), G(Engine.getGraph()),
StateMgr(getContext(), mgr.getStoreManagerCreator(),
- mgr.getConstraintManagerCreator(), G.getAllocator(),
- this),
- SymMgr(StateMgr.getSymbolManager()),
- MRMgr(StateMgr.getRegionManager()),
- svalBuilder(StateMgr.getSValBuilder()),
- ObjCNoRet(mgr.getASTContext()),
- BR(mgr, *this),
- VisitedCallees(VisitedCalleesIn),
- HowToInline(HowToInlineIn)
- {
+ mgr.getConstraintManagerCreator(), G.getAllocator(), this),
+ SymMgr(StateMgr.getSymbolManager()), MRMgr(StateMgr.getRegionManager()),
+ svalBuilder(StateMgr.getSValBuilder()), ObjCNoRet(mgr.getASTContext()),
+ BR(mgr, *this), VisitedCallees(VisitedCalleesIn),
+ HowToInline(HowToInlineIn) {
unsigned TrimInterval = mgr.options.GraphTrimInterval;
if (TrimInterval != 0) {
// Enable eager node reclamation when constructing the ExplodedGraph.
@@ -259,7 +271,7 @@ ProgramStateRef ExprEngine::getInitialState(const LocationContext *InitLoc) {
svalBuilder.makeZeroVal(T),
svalBuilder.getConditionType());
- Optional<DefinedOrUnknownSVal> Constraint =
+ std::optional<DefinedOrUnknownSVal> Constraint =
Constraint_untested.getAs<DefinedOrUnknownSVal>();
if (!Constraint)
@@ -279,7 +291,7 @@ ProgramStateRef ExprEngine::getInitialState(const LocationContext *InitLoc) {
const MemRegion *R = state->getRegion(SelfD, InitLoc);
SVal V = state->getSVal(loc::MemRegionVal(R));
- if (Optional<Loc> LV = V.getAs<Loc>()) {
+ if (std::optional<Loc> LV = V.getAs<Loc>()) {
// Assume that the pointer value in 'self' is non-null.
state = state->assume(*LV, true);
assert(state && "'self' cannot be null");
@@ -287,7 +299,7 @@ ProgramStateRef ExprEngine::getInitialState(const LocationContext *InitLoc) {
}
if (const auto *MD = dyn_cast<CXXMethodDecl>(D)) {
- if (!MD->isStatic()) {
+ if (MD->isImplicitObjectMemberFunction()) {
// Precondition: 'this' is always non-null upon entry to the
// top-level function. This is our starting assumption for
// analyzing an "open" program.
@@ -295,7 +307,7 @@ ProgramStateRef ExprEngine::getInitialState(const LocationContext *InitLoc) {
if (SFC->getParent() == nullptr) {
loc::MemRegionVal L = svalBuilder.getCXXThis(MD, SFC);
SVal V = state->getSVal(L);
- if (Optional<Loc> LV = V.getAs<Loc>()) {
+ if (std::optional<Loc> LV = V.getAs<Loc>()) {
state = state->assume(*LV, true);
assert(state && "'this' cannot be null");
}
@@ -319,16 +331,16 @@ ProgramStateRef ExprEngine::createTemporaryRegionIfNeeded(
if (!Result) {
// If we don't have an explicit result expression, we're in "if needed"
// mode. Only create a region if the current value is a NonLoc.
- if (!InitValWithAdjustments.getAs<NonLoc>()) {
+ if (!isa<NonLoc>(InitValWithAdjustments)) {
if (OutRegionWithAdjustments)
*OutRegionWithAdjustments = nullptr;
return State;
}
Result = InitWithAdjustments;
} else {
- // We need to create a region no matter what. For sanity, make sure we don't
- // try to stuff a Loc into a non-pointer temporary region.
- assert(!InitValWithAdjustments.getAs<Loc>() ||
+ // We need to create a region no matter what. Make sure we don't try to
+ // stuff a Loc into a non-pointer temporary region.
+ assert(!isa<Loc>(InitValWithAdjustments) ||
Loc::isLocType(Result->getType()) ||
Result->getType()->isMemberPointerType());
}
@@ -371,19 +383,23 @@ ProgramStateRef ExprEngine::createTemporaryRegionIfNeeded(
// into that region. This is not correct, but it is better than nothing.
const TypedValueRegion *TR = nullptr;
if (const auto *MT = dyn_cast<MaterializeTemporaryExpr>(Result)) {
- if (Optional<SVal> V = getObjectUnderConstruction(State, MT, LC)) {
+ if (std::optional<SVal> V = getObjectUnderConstruction(State, MT, LC)) {
State = finishObjectConstruction(State, MT, LC);
State = State->BindExpr(Result, LC, *V);
return State;
- } else {
+ } else if (const ValueDecl *VD = MT->getExtendingDecl()) {
StorageDuration SD = MT->getStorageDuration();
+ assert(SD != SD_FullExpression);
// If this object is bound to a reference with static storage duration, we
// put it in a different region to prevent "address leakage" warnings.
if (SD == SD_Static || SD == SD_Thread) {
- TR = MRMgr.getCXXStaticTempObjectRegion(Init);
+ TR = MRMgr.getCXXStaticLifetimeExtendedObjectRegion(Init, VD);
} else {
- TR = MRMgr.getCXXTempObjectRegion(Init, LC);
+ TR = MRMgr.getCXXLifetimeExtendedObjectRegion(Init, VD, LC);
}
+ } else {
+ assert(MT->getStorageDuration() == SD_FullExpression);
+ TR = MRMgr.getCXXTempObjectRegion(Init, LC);
}
} else {
TR = MRMgr.getCXXTempObjectRegion(Init, LC);
@@ -393,8 +409,7 @@ ProgramStateRef ExprEngine::createTemporaryRegionIfNeeded(
SVal BaseReg = Reg;
// Make the necessary adjustments to obtain the sub-object.
- for (auto I = Adjustments.rbegin(), E = Adjustments.rend(); I != E; ++I) {
- const SubobjectAdjustment &Adj = *I;
+ for (const SubobjectAdjustment &Adj : llvm::reverse(Adjustments)) {
switch (Adj.Kind) {
case SubobjectAdjustment::DerivedToBaseAdjustment:
Reg = StoreMgr.evalDerivedToBase(Reg, Adj.DerivedToBase.BasePath);
@@ -457,25 +472,140 @@ ProgramStateRef ExprEngine::createTemporaryRegionIfNeeded(
return State;
}
+ProgramStateRef ExprEngine::setIndexOfElementToConstruct(
+ ProgramStateRef State, const CXXConstructExpr *E,
+ const LocationContext *LCtx, unsigned Idx) {
+ auto Key = std::make_pair(E, LCtx->getStackFrame());
+
+ assert(!State->contains<IndexOfElementToConstruct>(Key) || Idx > 0);
+
+ return State->set<IndexOfElementToConstruct>(Key, Idx);
+}
+
+std::optional<unsigned>
+ExprEngine::getPendingInitLoop(ProgramStateRef State, const CXXConstructExpr *E,
+ const LocationContext *LCtx) {
+ const unsigned *V = State->get<PendingInitLoop>({E, LCtx->getStackFrame()});
+ return V ? std::make_optional(*V) : std::nullopt;
+}
+
+ProgramStateRef ExprEngine::removePendingInitLoop(ProgramStateRef State,
+ const CXXConstructExpr *E,
+ const LocationContext *LCtx) {
+ auto Key = std::make_pair(E, LCtx->getStackFrame());
+
+ assert(E && State->contains<PendingInitLoop>(Key));
+ return State->remove<PendingInitLoop>(Key);
+}
+
+ProgramStateRef ExprEngine::setPendingInitLoop(ProgramStateRef State,
+ const CXXConstructExpr *E,
+ const LocationContext *LCtx,
+ unsigned Size) {
+ auto Key = std::make_pair(E, LCtx->getStackFrame());
+
+ assert(!State->contains<PendingInitLoop>(Key) && Size > 0);
+
+ return State->set<PendingInitLoop>(Key, Size);
+}
+
+std::optional<unsigned>
+ExprEngine::getIndexOfElementToConstruct(ProgramStateRef State,
+ const CXXConstructExpr *E,
+ const LocationContext *LCtx) {
+ const unsigned *V =
+ State->get<IndexOfElementToConstruct>({E, LCtx->getStackFrame()});
+ return V ? std::make_optional(*V) : std::nullopt;
+}
+
+ProgramStateRef
+ExprEngine::removeIndexOfElementToConstruct(ProgramStateRef State,
+ const CXXConstructExpr *E,
+ const LocationContext *LCtx) {
+ auto Key = std::make_pair(E, LCtx->getStackFrame());
+
+ assert(E && State->contains<IndexOfElementToConstruct>(Key));
+ return State->remove<IndexOfElementToConstruct>(Key);
+}
+
+std::optional<unsigned>
+ExprEngine::getPendingArrayDestruction(ProgramStateRef State,
+ const LocationContext *LCtx) {
+ assert(LCtx && "LocationContext shouldn't be null!");
+
+ const unsigned *V =
+ State->get<PendingArrayDestruction>(LCtx->getStackFrame());
+ return V ? std::make_optional(*V) : std::nullopt;
+}
+
+ProgramStateRef ExprEngine::setPendingArrayDestruction(
+ ProgramStateRef State, const LocationContext *LCtx, unsigned Idx) {
+ assert(LCtx && "LocationContext shouldn't be null!");
+
+ auto Key = LCtx->getStackFrame();
+
+ return State->set<PendingArrayDestruction>(Key, Idx);
+}
+
+ProgramStateRef
+ExprEngine::removePendingArrayDestruction(ProgramStateRef State,
+ const LocationContext *LCtx) {
+ assert(LCtx && "LocationContext shouldn't be null!");
+
+ auto Key = LCtx->getStackFrame();
+
+ assert(LCtx && State->contains<PendingArrayDestruction>(Key));
+ return State->remove<PendingArrayDestruction>(Key);
+}
+
ProgramStateRef
ExprEngine::addObjectUnderConstruction(ProgramStateRef State,
const ConstructionContextItem &Item,
const LocationContext *LC, SVal V) {
ConstructedObjectKey Key(Item, LC->getStackFrame());
+
+ const Expr *Init = nullptr;
+
+ if (auto DS = dyn_cast_or_null<DeclStmt>(Item.getStmtOrNull())) {
+ if (auto VD = dyn_cast_or_null<VarDecl>(DS->getSingleDecl()))
+ Init = VD->getInit();
+ }
+
+ if (auto LE = dyn_cast_or_null<LambdaExpr>(Item.getStmtOrNull()))
+ Init = *(LE->capture_init_begin() + Item.getIndex());
+
+ if (!Init && !Item.getStmtOrNull())
+ Init = Item.getCXXCtorInitializer()->getInit();
+
+ // In an ArrayInitLoopExpr the real initializer is returned by
+ // getSubExpr(). Note that AILEs can be nested in case of
+ // multidimesnional arrays.
+ if (const auto *AILE = dyn_cast_or_null<ArrayInitLoopExpr>(Init))
+ Init = extractElementInitializerFromNestedAILE(AILE);
+
// FIXME: Currently the state might already contain the marker due to
// incorrect handling of temporaries bound to default parameters.
- assert(!State->get<ObjectsUnderConstruction>(Key) ||
- Key.getItem().getKind() ==
- ConstructionContextItem::TemporaryDestructorKind);
+ // The state will already contain the marker if we construct elements
+ // in an array, as we visit the same statement multiple times before
+ // the array declaration. The marker is removed when we exit the
+ // constructor call.
+ assert((!State->get<ObjectsUnderConstruction>(Key) ||
+ Key.getItem().getKind() ==
+ ConstructionContextItem::TemporaryDestructorKind ||
+ State->contains<IndexOfElementToConstruct>(
+ {dyn_cast_or_null<CXXConstructExpr>(Init), LC})) &&
+ "The object is already marked as `UnderConstruction`, when it's not "
+ "supposed to!");
return State->set<ObjectsUnderConstruction>(Key, V);
}
-Optional<SVal>
+std::optional<SVal>
ExprEngine::getObjectUnderConstruction(ProgramStateRef State,
const ConstructionContextItem &Item,
const LocationContext *LC) {
ConstructedObjectKey Key(Item, LC->getStackFrame());
- return Optional<SVal>::create(State->get<ObjectsUnderConstruction>(Key));
+ const SVal *V = State->get<ObjectsUnderConstruction>(Key);
+ return V ? std::make_optional(*V) : std::nullopt;
}
ProgramStateRef
@@ -569,7 +699,7 @@ printObjectsUnderConstructionJson(raw_ostream &Out, ProgramStateRef State,
continue;
if (!HasItem) {
- Out << "[" << NL;
+ Out << '[' << NL;
HasItem = true;
}
@@ -598,29 +728,238 @@ printObjectsUnderConstructionJson(raw_ostream &Out, ProgramStateRef State,
}
}
-void ExprEngine::printJson(raw_ostream &Out, ProgramStateRef State,
- const LocationContext *LCtx, const char *NL,
- unsigned int Space, bool IsDot) const {
- Indent(Out, Space, IsDot) << "\"constructing_objects\": ";
+static void printIndicesOfElementsToConstructJson(
+ raw_ostream &Out, ProgramStateRef State, const char *NL,
+ const LocationContext *LCtx, unsigned int Space = 0, bool IsDot = false) {
+ using KeyT = std::pair<const Expr *, const LocationContext *>;
+
+ const auto &Context = LCtx->getAnalysisDeclContext()->getASTContext();
+ PrintingPolicy PP = Context.getPrintingPolicy();
+
+ ++Space;
+ bool HasItem = false;
+
+ // Store the last key.
+ KeyT LastKey;
+ for (const auto &I : State->get<IndexOfElementToConstruct>()) {
+ const KeyT &Key = I.first;
+ if (Key.second != LCtx)
+ continue;
+
+ if (!HasItem) {
+ Out << '[' << NL;
+ HasItem = true;
+ }
+
+ LastKey = Key;
+ }
+
+ for (const auto &I : State->get<IndexOfElementToConstruct>()) {
+ const KeyT &Key = I.first;
+ unsigned Value = I.second;
+ if (Key.second != LCtx)
+ continue;
+
+ Indent(Out, Space, IsDot) << "{ ";
+
+ // Expr
+ const Expr *E = Key.first;
+ Out << "\"stmt_id\": " << E->getID(Context);
+
+ // Kind
+ Out << ", \"kind\": null";
+
+ // Pretty-print
+ Out << ", \"pretty\": ";
+ Out << "\"" << E->getStmtClassName() << ' '
+ << E->getSourceRange().printToString(Context.getSourceManager()) << " '"
+ << QualType::getAsString(E->getType().split(), PP);
+ Out << "'\"";
+
+ Out << ", \"value\": \"Current index: " << Value - 1 << "\" }";
+
+ if (Key != LastKey)
+ Out << ',';
+ Out << NL;
+ }
+
+ if (HasItem)
+ Indent(Out, --Space, IsDot) << ']'; // End of "location_context".
+ else {
+ Out << "null ";
+ }
+}
+
+static void printPendingInitLoopJson(raw_ostream &Out, ProgramStateRef State,
+ const char *NL,
+ const LocationContext *LCtx,
+ unsigned int Space = 0,
+ bool IsDot = false) {
+ using KeyT = std::pair<const CXXConstructExpr *, const LocationContext *>;
+
+ const auto &Context = LCtx->getAnalysisDeclContext()->getASTContext();
+ PrintingPolicy PP = Context.getPrintingPolicy();
+
+ ++Space;
+ bool HasItem = false;
+
+ // Store the last key.
+ KeyT LastKey;
+ for (const auto &I : State->get<PendingInitLoop>()) {
+ const KeyT &Key = I.first;
+ if (Key.second != LCtx)
+ continue;
+
+ if (!HasItem) {
+ Out << '[' << NL;
+ HasItem = true;
+ }
+
+ LastKey = Key;
+ }
+
+ for (const auto &I : State->get<PendingInitLoop>()) {
+ const KeyT &Key = I.first;
+ unsigned Value = I.second;
+ if (Key.second != LCtx)
+ continue;
+
+ Indent(Out, Space, IsDot) << "{ ";
+
+ const CXXConstructExpr *E = Key.first;
+ Out << "\"stmt_id\": " << E->getID(Context);
+
+ Out << ", \"kind\": null";
+ Out << ", \"pretty\": ";
+ Out << '\"' << E->getStmtClassName() << ' '
+ << E->getSourceRange().printToString(Context.getSourceManager()) << " '"
+ << QualType::getAsString(E->getType().split(), PP);
+ Out << "'\"";
+
+ Out << ", \"value\": \"Flattened size: " << Value << "\"}";
+
+ if (Key != LastKey)
+ Out << ',';
+ Out << NL;
+ }
+
+ if (HasItem)
+ Indent(Out, --Space, IsDot) << ']'; // End of "location_context".
+ else {
+ Out << "null ";
+ }
+}
+
+static void
+printPendingArrayDestructionsJson(raw_ostream &Out, ProgramStateRef State,
+ const char *NL, const LocationContext *LCtx,
+ unsigned int Space = 0, bool IsDot = false) {
+ using KeyT = const LocationContext *;
+
+ ++Space;
+ bool HasItem = false;
+
+ // Store the last key.
+ KeyT LastKey = nullptr;
+ for (const auto &I : State->get<PendingArrayDestruction>()) {
+ const KeyT &Key = I.first;
+ if (Key != LCtx)
+ continue;
+
+ if (!HasItem) {
+ Out << '[' << NL;
+ HasItem = true;
+ }
+
+ LastKey = Key;
+ }
+
+ for (const auto &I : State->get<PendingArrayDestruction>()) {
+ const KeyT &Key = I.first;
+ if (Key != LCtx)
+ continue;
+
+ Indent(Out, Space, IsDot) << "{ ";
+
+ Out << "\"stmt_id\": null";
+ Out << ", \"kind\": null";
+ Out << ", \"pretty\": \"Current index: \"";
+ Out << ", \"value\": \"" << I.second << "\" }";
- if (LCtx && !State->get<ObjectsUnderConstruction>().isEmpty()) {
+ if (Key != LastKey)
+ Out << ',';
+ Out << NL;
+ }
+
+ if (HasItem)
+ Indent(Out, --Space, IsDot) << ']'; // End of "location_context".
+ else {
+ Out << "null ";
+ }
+}
+
+/// A helper function to generalize program state trait printing.
+/// The function invokes Printer as 'Printer(Out, State, NL, LC, Space, IsDot,
+/// std::forward<Args>(args)...)'. \n One possible type for Printer is
+/// 'void()(raw_ostream &, ProgramStateRef, const char *, const LocationContext
+/// *, unsigned int, bool, ...)' \n \param Trait The state trait to be printed.
+/// \param Printer A void function that prints Trait.
+/// \param Args An additional parameter pack that is passed to Print upon
+/// invocation.
+template <typename Trait, typename Printer, typename... Args>
+static void printStateTraitWithLocationContextJson(
+ raw_ostream &Out, ProgramStateRef State, const LocationContext *LCtx,
+ const char *NL, unsigned int Space, bool IsDot,
+ const char *jsonPropertyName, Printer printer, Args &&...args) {
+
+ using RequiredType =
+ void (*)(raw_ostream &, ProgramStateRef, const char *,
+ const LocationContext *, unsigned int, bool, Args &&...);
+
+ // Try to do as much compile time checking as possible.
+ // FIXME: check for invocable instead of function?
+ static_assert(std::is_function_v<std::remove_pointer_t<Printer>>,
+ "Printer is not a function!");
+ static_assert(std::is_convertible_v<Printer, RequiredType>,
+ "Printer doesn't have the required type!");
+
+ if (LCtx && !State->get<Trait>().isEmpty()) {
+ Indent(Out, Space, IsDot) << '\"' << jsonPropertyName << "\": ";
++Space;
Out << '[' << NL;
LCtx->printJson(Out, NL, Space, IsDot, [&](const LocationContext *LC) {
- printObjectsUnderConstructionJson(Out, State, NL, LC, Space, IsDot);
+ printer(Out, State, NL, LC, Space, IsDot, std::forward<Args>(args)...);
});
--Space;
- Indent(Out, Space, IsDot) << "]," << NL; // End of "constructing_objects".
- } else {
- Out << "null," << NL;
+ Indent(Out, Space, IsDot) << "]," << NL; // End of "jsonPropertyName".
}
+}
+
+void ExprEngine::printJson(raw_ostream &Out, ProgramStateRef State,
+ const LocationContext *LCtx, const char *NL,
+ unsigned int Space, bool IsDot) const {
+
+ printStateTraitWithLocationContextJson<ObjectsUnderConstruction>(
+ Out, State, LCtx, NL, Space, IsDot, "constructing_objects",
+ printObjectsUnderConstructionJson);
+ printStateTraitWithLocationContextJson<IndexOfElementToConstruct>(
+ Out, State, LCtx, NL, Space, IsDot, "index_of_element",
+ printIndicesOfElementsToConstructJson);
+ printStateTraitWithLocationContextJson<PendingInitLoop>(
+ Out, State, LCtx, NL, Space, IsDot, "pending_init_loops",
+ printPendingInitLoopJson);
+ printStateTraitWithLocationContextJson<PendingArrayDestruction>(
+ Out, State, LCtx, NL, Space, IsDot, "pending_destructors",
+ printPendingArrayDestructionsJson);
getCheckerManager().runCheckersForPrintStateJson(Out, State, NL, Space,
IsDot);
}
void ExprEngine::processEndWorklist() {
+ // This prints the name of the top-level function if we crash.
+ PrettyStackTraceLocationContext CrashInfo(getRootLocationContext());
getCheckerManager().runCheckersForEndAnalysis(G, BR, *this);
}
@@ -654,6 +993,7 @@ void ExprEngine::processCFGElement(const CFGElement E, ExplodedNode *Pred,
ProcessLoopExit(E.castAs<CFGLoopExit>().getLoopStmt(), Pred);
return;
case CFGElement::LifetimeEnds:
+ case CFGElement::CleanupFunction:
case CFGElement::ScopeBegin:
case CFGElement::ScopeEnd:
return;
@@ -865,7 +1205,7 @@ void ExprEngine::ProcessInitializer(const CFGInitializer CFGInit,
SVal LValue = State->getSVal(Init, stackFrame);
if (!Field->getType()->isReferenceType())
- if (Optional<Loc> LValueLoc = LValue.getAs<Loc>())
+ if (std::optional<Loc> LValueLoc = LValue.getAs<Loc>())
InitVal = State->getSVal(*LValueLoc);
// If we fail to get the value for some reason, use a symbolic value.
@@ -882,6 +1222,14 @@ void ExprEngine::ProcessInitializer(const CFGInitializer CFGInit,
PostInitializer PP(BMI, FieldLoc.getAsRegion(), stackFrame);
evalBind(Tmp, Init, Pred, FieldLoc, InitVal, /*isInit=*/true, &PP);
}
+ } else if (BMI->isBaseInitializer() && isa<InitListExpr>(Init)) {
+ // When the base class is initialized with an initialization list and the
+ // base class does not have a ctor, there will not be a CXXConstructExpr to
+ // initialize the base region. Hence, we need to make the bind for it.
+ SVal BaseLoc = getStoreManager().evalDerivedToBase(
+ thisVal, QualType(BMI->getBaseClass(), 0), BMI->isBaseVirtual());
+ SVal InitVal = State->getSVal(Init, stackFrame);
+ evalBind(Tmp, Init, Pred, BaseLoc, InitVal, /*isInit=*/true);
} else {
assert(BMI->isBaseInitializer() || BMI->isDelegatingInitializer());
Tmp.insert(Pred);
@@ -902,6 +1250,43 @@ void ExprEngine::ProcessInitializer(const CFGInitializer CFGInit,
Engine.enqueue(Dst, currBldrCtx->getBlock(), currStmtIdx);
}
+std::pair<ProgramStateRef, uint64_t>
+ExprEngine::prepareStateForArrayDestruction(const ProgramStateRef State,
+ const MemRegion *Region,
+ const QualType &ElementTy,
+ const LocationContext *LCtx,
+ SVal *ElementCountVal) {
+ assert(Region != nullptr && "Not-null region expected");
+
+ QualType Ty = ElementTy.getDesugaredType(getContext());
+ while (const auto *NTy = dyn_cast<ArrayType>(Ty))
+ Ty = NTy->getElementType().getDesugaredType(getContext());
+
+ auto ElementCount = getDynamicElementCount(State, Region, svalBuilder, Ty);
+
+ if (ElementCountVal)
+ *ElementCountVal = ElementCount;
+
+ // Note: the destructors are called in reverse order.
+ unsigned Idx = 0;
+ if (auto OptionalIdx = getPendingArrayDestruction(State, LCtx)) {
+ Idx = *OptionalIdx;
+ } else {
+ // The element count is either unknown, or an SVal that's not an integer.
+ if (!ElementCount.isConstant())
+ return {State, 0};
+
+ Idx = ElementCount.getAsInteger()->getLimitedValue();
+ }
+
+ if (Idx == 0)
+ return {State, 0};
+
+ --Idx;
+
+ return {setPendingArrayDestruction(State, LCtx, Idx), Idx};
+}
+
void ExprEngine::ProcessImplicitDtor(const CFGImplicitDtor D,
ExplodedNode *Pred) {
ExplodedNodeSet Dst;
@@ -942,7 +1327,8 @@ void ExprEngine::ProcessNewAllocator(const CXXNewExpr *NE,
else {
NodeBuilder Bldr(Pred, Dst, *currBldrCtx);
const LocationContext *LCtx = Pred->getLocationContext();
- PostImplicitCall PP(NE->getOperatorNew(), NE->getBeginLoc(), LCtx);
+ PostImplicitCall PP(NE->getOperatorNew(), NE->getBeginLoc(), LCtx,
+ getCFGElementRef());
Bldr.generateNode(PP, Pred->getState(), Pred);
}
Engine.enqueue(Dst, currBldrCtx->getBlock(), currStmtIdx);
@@ -951,11 +1337,14 @@ void ExprEngine::ProcessNewAllocator(const CXXNewExpr *NE,
void ExprEngine::ProcessAutomaticObjDtor(const CFGAutomaticObjDtor Dtor,
ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
+ const auto *DtorDecl = Dtor.getDestructorDecl(getContext());
const VarDecl *varDecl = Dtor.getVarDecl();
QualType varType = varDecl->getType();
ProgramStateRef state = Pred->getState();
- SVal dest = state->getLValue(varDecl, Pred->getLocationContext());
+ const LocationContext *LCtx = Pred->getLocationContext();
+
+ SVal dest = state->getLValue(varDecl, LCtx);
const MemRegion *Region = dest.castAs<loc::MemRegionVal>().getRegion();
if (varType->isReferenceType()) {
@@ -971,12 +1360,47 @@ void ExprEngine::ProcessAutomaticObjDtor(const CFGAutomaticObjDtor Dtor,
varType = cast<TypedValueRegion>(Region)->getValueType();
}
- // FIXME: We need to run the same destructor on every element of the array.
- // This workaround will just run the first destructor (which will still
- // invalidate the entire array).
+ unsigned Idx = 0;
+ if (isa<ArrayType>(varType)) {
+ SVal ElementCount;
+ std::tie(state, Idx) = prepareStateForArrayDestruction(
+ state, Region, varType, LCtx, &ElementCount);
+
+ if (ElementCount.isConstant()) {
+ uint64_t ArrayLength = ElementCount.getAsInteger()->getLimitedValue();
+ assert(ArrayLength &&
+ "An automatic dtor for a 0 length array shouldn't be triggered!");
+
+ // Still handle this case if we don't have assertions enabled.
+ if (!ArrayLength) {
+ static SimpleProgramPointTag PT(
+ "ExprEngine", "Skipping automatic 0 length array destruction, "
+ "which shouldn't be in the CFG.");
+ PostImplicitCall PP(DtorDecl, varDecl->getLocation(), LCtx,
+ getCFGElementRef(), &PT);
+ NodeBuilder Bldr(Pred, Dst, *currBldrCtx);
+ Bldr.generateSink(PP, Pred->getState(), Pred);
+ return;
+ }
+ }
+ }
+
EvalCallOptions CallOpts;
- Region = makeZeroElementRegion(state, loc::MemRegionVal(Region), varType,
- CallOpts.IsArrayCtorOrDtor).getAsRegion();
+ Region = makeElementRegion(state, loc::MemRegionVal(Region), varType,
+ CallOpts.IsArrayCtorOrDtor, Idx)
+ .getAsRegion();
+
+ NodeBuilder Bldr(Pred, Dst, getBuilderContext());
+
+ static SimpleProgramPointTag PT("ExprEngine",
+ "Prepare for object destruction");
+ PreImplicitCall PP(DtorDecl, varDecl->getLocation(), LCtx, getCFGElementRef(),
+ &PT);
+ Pred = Bldr.generateNode(PP, state, Pred);
+
+ if (!Pred)
+ return;
+ Bldr.takeNodes(Pred);
VisitCXXDestructor(varType, Region, Dtor.getTriggerStmt(),
/*IsBase=*/false, Pred, Dst, CallOpts);
@@ -999,26 +1423,62 @@ void ExprEngine::ProcessDeleteDtor(const CFGDeleteDtor Dtor,
const CXXRecordDecl *RD = BTy->getAsCXXRecordDecl();
const CXXDestructorDecl *Dtor = RD->getDestructor();
- PostImplicitCall PP(Dtor, DE->getBeginLoc(), LCtx);
+ PostImplicitCall PP(Dtor, DE->getBeginLoc(), LCtx, getCFGElementRef());
NodeBuilder Bldr(Pred, Dst, *currBldrCtx);
Bldr.generateNode(PP, Pred->getState(), Pred);
return;
}
+ auto getDtorDecl = [](const QualType &DTy) {
+ const CXXRecordDecl *RD = DTy->getAsCXXRecordDecl();
+ return RD->getDestructor();
+ };
+
+ unsigned Idx = 0;
EvalCallOptions CallOpts;
const MemRegion *ArgR = ArgVal.getAsRegion();
+
if (DE->isArrayForm()) {
- // FIXME: We need to run the same destructor on every element of the array.
- // This workaround will just run the first destructor (which will still
- // invalidate the entire array).
CallOpts.IsArrayCtorOrDtor = true;
// Yes, it may even be a multi-dimensional array.
while (const auto *AT = getContext().getAsArrayType(DTy))
DTy = AT->getElementType();
- if (ArgR)
- ArgR = getStoreManager().GetElementZeroRegion(cast<SubRegion>(ArgR), DTy);
+
+ if (ArgR) {
+ SVal ElementCount;
+ std::tie(State, Idx) = prepareStateForArrayDestruction(
+ State, ArgR, DTy, LCtx, &ElementCount);
+
+ // If we're about to destruct a 0 length array, don't run any of the
+ // destructors.
+ if (ElementCount.isConstant() &&
+ ElementCount.getAsInteger()->getLimitedValue() == 0) {
+
+ static SimpleProgramPointTag PT(
+ "ExprEngine", "Skipping 0 length array delete destruction");
+ PostImplicitCall PP(getDtorDecl(DTy), DE->getBeginLoc(), LCtx,
+ getCFGElementRef(), &PT);
+ NodeBuilder Bldr(Pred, Dst, *currBldrCtx);
+ Bldr.generateNode(PP, Pred->getState(), Pred);
+ return;
+ }
+
+ ArgR = State->getLValue(DTy, svalBuilder.makeArrayIndex(Idx), ArgVal)
+ .getAsRegion();
+ }
}
+ NodeBuilder Bldr(Pred, Dst, getBuilderContext());
+ static SimpleProgramPointTag PT("ExprEngine",
+ "Prepare for object destruction");
+ PreImplicitCall PP(getDtorDecl(DTy), DE->getBeginLoc(), LCtx,
+ getCFGElementRef(), &PT);
+ Pred = Bldr.generateNode(PP, State, Pred);
+
+ if (!Pred)
+ return;
+ Bldr.takeNodes(Pred);
+
VisitCXXDestructor(DTy, ArgR, DE, /*IsBase=*/false, Pred, Dst, CallOpts);
}
@@ -1044,6 +1504,7 @@ void ExprEngine::ProcessBaseDtor(const CFGBaseDtor D,
void ExprEngine::ProcessMemberDtor(const CFGMemberDtor D,
ExplodedNode *Pred, ExplodedNodeSet &Dst) {
+ const auto *DtorDecl = D.getDestructorDecl(getContext());
const FieldDecl *Member = D.getFieldDecl();
QualType T = Member->getType();
ProgramStateRef State = Pred->getState();
@@ -1055,12 +1516,46 @@ void ExprEngine::ProcessMemberDtor(const CFGMemberDtor D,
Loc ThisLoc = State->getSVal(ThisStorageLoc).castAs<Loc>();
SVal FieldVal = State->getLValue(Member, ThisLoc);
- // FIXME: We need to run the same destructor on every element of the array.
- // This workaround will just run the first destructor (which will still
- // invalidate the entire array).
+ unsigned Idx = 0;
+ if (isa<ArrayType>(T)) {
+ SVal ElementCount;
+ std::tie(State, Idx) = prepareStateForArrayDestruction(
+ State, FieldVal.getAsRegion(), T, LCtx, &ElementCount);
+
+ if (ElementCount.isConstant()) {
+ uint64_t ArrayLength = ElementCount.getAsInteger()->getLimitedValue();
+ assert(ArrayLength &&
+ "A member dtor for a 0 length array shouldn't be triggered!");
+
+ // Still handle this case if we don't have assertions enabled.
+ if (!ArrayLength) {
+ static SimpleProgramPointTag PT(
+ "ExprEngine", "Skipping member 0 length array destruction, which "
+ "shouldn't be in the CFG.");
+ PostImplicitCall PP(DtorDecl, Member->getLocation(), LCtx,
+ getCFGElementRef(), &PT);
+ NodeBuilder Bldr(Pred, Dst, *currBldrCtx);
+ Bldr.generateSink(PP, Pred->getState(), Pred);
+ return;
+ }
+ }
+ }
+
EvalCallOptions CallOpts;
- FieldVal = makeZeroElementRegion(State, FieldVal, T,
- CallOpts.IsArrayCtorOrDtor);
+ FieldVal =
+ makeElementRegion(State, FieldVal, T, CallOpts.IsArrayCtorOrDtor, Idx);
+
+ NodeBuilder Bldr(Pred, Dst, getBuilderContext());
+
+ static SimpleProgramPointTag PT("ExprEngine",
+ "Prepare for object destruction");
+ PreImplicitCall PP(DtorDecl, Member->getLocation(), LCtx, getCFGElementRef(),
+ &PT);
+ Pred = Bldr.generateNode(PP, State, Pred);
+
+ if (!Pred)
+ return;
+ Bldr.takeNodes(Pred);
VisitCXXDestructor(T, FieldVal.getAsRegion(), CurDtor->getBody(),
/*IsBase=*/false, Pred, Dst, CallOpts);
@@ -1074,9 +1569,8 @@ void ExprEngine::ProcessTemporaryDtor(const CFGTemporaryDtor D,
const LocationContext *LC = Pred->getLocationContext();
const MemRegion *MR = nullptr;
- if (Optional<SVal> V =
- getObjectUnderConstruction(State, D.getBindTemporaryExpr(),
- Pred->getLocationContext())) {
+ if (std::optional<SVal> V = getObjectUnderConstruction(
+ State, D.getBindTemporaryExpr(), Pred->getLocationContext())) {
// FIXME: Currently we insert temporary destructors for default parameters,
// but we don't insert the constructors, so the entry in
// ObjectsUnderConstruction may be missing.
@@ -1092,7 +1586,7 @@ void ExprEngine::ProcessTemporaryDtor(const CFGTemporaryDtor D,
NodeBuilder Bldr(Pred, Dst, *currBldrCtx);
PostImplicitCall PP(D.getDestructorDecl(getContext()),
D.getBindTemporaryExpr()->getBeginLoc(),
- Pred->getLocationContext());
+ Pred->getLocationContext(), getCFGElementRef());
Bldr.generateNode(PP, State, Pred);
return;
}
@@ -1111,15 +1605,31 @@ void ExprEngine::ProcessTemporaryDtor(const CFGTemporaryDtor D,
EvalCallOptions CallOpts;
CallOpts.IsTemporaryCtorOrDtor = true;
if (!MR) {
- // If we have no MR, we still need to unwrap the array to avoid destroying
- // the whole array at once. Regardless, we'd eventually need to model array
- // destructors properly, element-by-element.
+ // FIXME: If we have no MR, we still need to unwrap the array to avoid
+ // destroying the whole array at once.
+ //
+ // For this case there is no universal solution as there is no way to
+ // directly create an array of temporary objects. There are some expressions
+ // however which can create temporary objects and have an array type.
+ //
+ // E.g.: std::initializer_list<S>{S(), S()};
+ //
+ // The expression above has a type of 'const struct S[2]' but it's a single
+ // 'std::initializer_list<>'. The destructors of the 2 temporary 'S()'
+ // objects will be called anyway, because they are 2 separate objects in 2
+ // separate clusters, i.e.: not an array.
+ //
+ // Now the 'std::initializer_list<>' is not an array either even though it
+ // has the type of an array. The point is, we only want to invoke the
+ // destructor for the initializer list once not twice or so.
while (const ArrayType *AT = getContext().getAsArrayType(T)) {
T = AT->getElementType();
- CallOpts.IsArrayCtorOrDtor = true;
+
+ // FIXME: Enable this flag once we handle this case properly.
+ // CallOpts.IsArrayCtorOrDtor = true;
}
} else {
- // We'd eventually need to makeZeroElementRegion() trick here,
+ // FIXME: We'd eventually need to makeElementRegion() trick here,
// but for now we don't have the respective construction contexts,
// so MR would always be null in this case. Do nothing for now.
}
@@ -1245,6 +1755,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::OMPForSimdDirectiveClass:
case Stmt::OMPSectionsDirectiveClass:
case Stmt::OMPSectionDirectiveClass:
+ case Stmt::OMPScopeDirectiveClass:
case Stmt::OMPSingleDirectiveClass:
case Stmt::OMPMasterDirectiveClass:
case Stmt::OMPCriticalDirectiveClass:
@@ -1252,10 +1763,12 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::OMPParallelForSimdDirectiveClass:
case Stmt::OMPParallelSectionsDirectiveClass:
case Stmt::OMPParallelMasterDirectiveClass:
+ case Stmt::OMPParallelMaskedDirectiveClass:
case Stmt::OMPTaskDirectiveClass:
case Stmt::OMPTaskyieldDirectiveClass:
case Stmt::OMPBarrierDirectiveClass:
case Stmt::OMPTaskwaitDirectiveClass:
+ case Stmt::OMPErrorDirectiveClass:
case Stmt::OMPTaskgroupDirectiveClass:
case Stmt::OMPFlushDirectiveClass:
case Stmt::OMPDepobjDirectiveClass:
@@ -1275,9 +1788,13 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::OMPTaskLoopDirectiveClass:
case Stmt::OMPTaskLoopSimdDirectiveClass:
case Stmt::OMPMasterTaskLoopDirectiveClass:
+ case Stmt::OMPMaskedTaskLoopDirectiveClass:
case Stmt::OMPMasterTaskLoopSimdDirectiveClass:
+ case Stmt::OMPMaskedTaskLoopSimdDirectiveClass:
case Stmt::OMPParallelMasterTaskLoopDirectiveClass:
+ case Stmt::OMPParallelMaskedTaskLoopDirectiveClass:
case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass:
+ case Stmt::OMPParallelMaskedTaskLoopSimdDirectiveClass:
case Stmt::OMPDistributeDirectiveClass:
case Stmt::OMPDistributeParallelForDirectiveClass:
case Stmt::OMPDistributeParallelForSimdDirectiveClass:
@@ -1297,8 +1814,14 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::OMPInteropDirectiveClass:
case Stmt::OMPDispatchDirectiveClass:
case Stmt::OMPMaskedDirectiveClass:
+ case Stmt::OMPGenericLoopDirectiveClass:
+ case Stmt::OMPTeamsGenericLoopDirectiveClass:
+ case Stmt::OMPTargetTeamsGenericLoopDirectiveClass:
+ case Stmt::OMPParallelGenericLoopDirectiveClass:
+ case Stmt::OMPTargetParallelGenericLoopDirectiveClass:
case Stmt::CapturedStmtClass:
- case Stmt::OMPUnrollDirectiveClass: {
+ case Stmt::OMPUnrollDirectiveClass:
+ case Stmt::OMPMetaDirectiveClass: {
const ExplodedNode *node = Bldr.generateSink(S, Pred, Pred->getState());
Engine.addAbortedBlock(node, currBldrCtx->getBlock());
break;
@@ -1341,8 +1864,9 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::GNUNullExprClass: {
// GNU __null is a pointer-width integer, not an actual pointer.
ProgramStateRef state = Pred->getState();
- state = state->BindExpr(S, Pred->getLocationContext(),
- svalBuilder.makeIntValWithPtrWidth(0, false));
+ state = state->BindExpr(
+ S, Pred->getLocationContext(),
+ svalBuilder.makeIntValWithWidth(getContext().VoidPtrTy, 0));
Bldr.generateNode(S, Pred, state);
break;
}
@@ -1369,10 +1893,14 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
break;
}
+ case Stmt::ArrayInitLoopExprClass:
+ Bldr.takeNodes(Pred);
+ VisitArrayInitLoopExpr(cast<ArrayInitLoopExpr>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
// Cases not handled yet; but will handle some day.
case Stmt::DesignatedInitExprClass:
case Stmt::DesignatedInitUpdateExprClass:
- case Stmt::ArrayInitLoopExprClass:
case Stmt::ArrayInitIndexExprClass:
case Stmt::ExtVectorElementExprClass:
case Stmt::ImaginaryLiteralClass:
@@ -1394,6 +1922,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::ConceptSpecializationExprClass:
case Stmt::CXXRewrittenBinaryOperatorClass:
case Stmt::RequiresExprClass:
+ case Expr::CXXParenListInitExprClass:
// Fall through.
// Cases we intentionally don't evaluate, since they don't need
@@ -1453,7 +1982,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
IsTemporary = true;
}
- Optional<SVal> ConstantVal = svalBuilder.getConstantVal(ArgE);
+ std::optional<SVal> ConstantVal = svalBuilder.getConstantVal(ArgE);
if (!ConstantVal)
ConstantVal = UnknownVal();
@@ -1593,7 +2122,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
// valid region.
const Decl *Callee = OCE->getCalleeDecl();
if (const auto *MD = dyn_cast_or_null<CXXMethodDecl>(Callee)) {
- if (MD->isInstance()) {
+ if (MD->isImplicitObjectMemberFunction()) {
ProgramStateRef State = Pred->getState();
const LocationContext *LCtx = Pred->getLocationContext();
ProgramStateRef NewState =
@@ -1607,8 +2136,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
}
}
}
- // FALLTHROUGH
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case Stmt::CallExprClass:
@@ -1919,7 +2447,7 @@ bool ExprEngine::replayWithoutInlining(ExplodedNode *N,
continue;
if (L.getAs<CallEnter>())
continue;
- if (Optional<StmtPoint> SP = L.getAs<StmtPoint>())
+ if (std::optional<StmtPoint> SP = L.getAs<StmtPoint>())
if (SP->getStmt() == CE)
continue;
break;
@@ -1932,8 +2460,9 @@ bool ExprEngine::replayWithoutInlining(ExplodedNode *N,
// Build an Epsilon node from which we will restart the analyzes.
// Note that CE is permitted to be NULL!
- ProgramPoint NewNodeLoc =
- EpsilonPoint(BeforeProcessingCall->getLocationContext(), CE);
+ static SimpleProgramPointTag PT("ExprEngine", "Replay without inlining");
+ ProgramPoint NewNodeLoc = EpsilonPoint(
+ BeforeProcessingCall->getLocationContext(), CE, nullptr, &PT);
// Add the special flag to GDM to signal retrying with no inlining.
// Note, changing the state ensures that we are not going to cache out.
ProgramStateRef NewNodeState = BeforeProcessingCall->getState();
@@ -1988,8 +2517,7 @@ void ExprEngine::processCFGBlockEntrance(const BlockEdge &L,
if (BlockCount == AMgr.options.maxBlockVisitOnPath - 1 &&
AMgr.options.ShouldWidenLoops) {
const Stmt *Term = nodeBuilder.getContext().getBlock()->getTerminatorStmt();
- if (!(Term &&
- (isa<ForStmt>(Term) || isa<WhileStmt>(Term) || isa<DoStmt>(Term))))
+ if (!isa_and_nonnull<ForStmt, WhileStmt, DoStmt, CXXForRangeStmt>(Term))
return;
// Widen.
const LocationContext *LCtx = Pred->getLocationContext();
@@ -2123,10 +2651,8 @@ static const Stmt *ResolveCondition(const Stmt *Condition,
// The invariants are still shifting, but it is possible that the
// last element in a CFGBlock is not a CFGStmt. Look for the last
// CFGStmt as the value of the condition.
- CFGBlock::const_reverse_iterator I = B->rbegin(), E = B->rend();
- for (; I != E; ++I) {
- CFGElement Elem = *I;
- Optional<CFGStmt> CS = Elem.getAs<CFGStmt>();
+ for (CFGElement Elem : llvm::reverse(*B)) {
+ std::optional<CFGStmt> CS = Elem.getAs<CFGStmt>();
if (!CS)
continue;
const Stmt *LastStmt = CS->getStmt();
@@ -2164,9 +2690,9 @@ bool ExprEngine::hasMoreIteration(ProgramStateRef State,
}
/// Split the state on whether there are any more iterations left for this loop.
-/// Returns a (HasMoreIteration, HasNoMoreIteration) pair, or None when the
-/// acquisition of the loop condition value failed.
-static Optional<std::pair<ProgramStateRef, ProgramStateRef>>
+/// Returns a (HasMoreIteration, HasNoMoreIteration) pair, or std::nullopt when
+/// the acquisition of the loop condition value failed.
+static std::optional<std::pair<ProgramStateRef, ProgramStateRef>>
assumeCondition(const Stmt *Condition, ExplodedNode *N) {
ProgramStateRef State = N->getState();
if (const auto *ObjCFor = dyn_cast<ObjCForCollectionStmt>(Condition)) {
@@ -2205,7 +2731,7 @@ assumeCondition(const Stmt *Condition, ExplodedNode *N) {
// If the condition is still unknown, give up.
if (X.isUnknownOrUndef())
- return None;
+ return std::nullopt;
DefinedSVal V = X.castAs<DefinedSVal>();
@@ -2265,7 +2791,7 @@ void ExprEngine::processBranch(const Stmt *Condition,
continue;
}
if (StTrue && StFalse)
- assert(!isa<ObjCForCollectionStmt>(Condition));;
+ assert(!isa<ObjCForCollectionStmt>(Condition));
// Process the true branch.
if (builder.isFeasible(true)) {
@@ -2330,12 +2856,12 @@ void ExprEngine::processIndirectGoto(IndirectGotoNodeBuilder &builder) {
using iterator = IndirectGotoNodeBuilder::iterator;
- if (Optional<loc::GotoLabel> LV = V.getAs<loc::GotoLabel>()) {
+ if (std::optional<loc::GotoLabel> LV = V.getAs<loc::GotoLabel>()) {
const LabelDecl *L = LV->getLabel();
- for (iterator I = builder.begin(), E = builder.end(); I != E; ++I) {
- if (I.getLabel() == L) {
- builder.generateNode(I, state);
+ for (iterator Succ : builder) {
+ if (Succ.getLabel() == L) {
+ builder.generateNode(Succ, state);
return;
}
}
@@ -2343,7 +2869,7 @@ void ExprEngine::processIndirectGoto(IndirectGotoNodeBuilder &builder) {
llvm_unreachable("No block with label.");
}
- if (V.getAs<loc::ConcreteInt>() || V.getAs<UndefinedVal>()) {
+ if (isa<UndefinedVal, loc::ConcreteInt>(V)) {
// Dispatch to the first target and mark it as a sink.
//ExplodedNode* N = builder.generateNode(builder.begin(), state, true);
// FIXME: add checker visit.
@@ -2354,8 +2880,8 @@ void ExprEngine::processIndirectGoto(IndirectGotoNodeBuilder &builder) {
// This is really a catch-all. We don't support symbolics yet.
// FIXME: Implement dispatch for symbolic pointers.
- for (iterator I = builder.begin(), E = builder.end(); I != E; ++I)
- builder.generateNode(I, state);
+ for (iterator Succ : builder)
+ builder.generateNode(Succ, state);
}
void ExprEngine::processBeginOfFunction(NodeBuilderContext &BC,
@@ -2482,7 +3008,7 @@ void ExprEngine::processSwitch(SwitchNodeBuilder& builder) {
V2 = V1;
ProgramStateRef StateCase;
- if (Optional<NonLoc> NL = CondV.getAs<NonLoc>())
+ if (std::optional<NonLoc> NL = CondV.getAs<NonLoc>())
std::tie(StateCase, DefaultSt) =
DefaultSt->assumeInclusiveRange(*NL, V1, V2);
else // UnknownVal
@@ -2541,14 +3067,14 @@ void ExprEngine::VisitCommonDeclRefExpr(const Expr *Ex, const NamedDecl *D,
const Decl *D = LocCtxt->getDecl();
const auto *MD = dyn_cast_or_null<CXXMethodDecl>(D);
const auto *DeclRefEx = dyn_cast<DeclRefExpr>(Ex);
- Optional<std::pair<SVal, QualType>> VInfo;
+ std::optional<std::pair<SVal, QualType>> VInfo;
if (AMgr.options.ShouldInlineLambdas && DeclRefEx &&
DeclRefEx->refersToEnclosingVariableOrCapture() && MD &&
MD->getParent()->isLambda()) {
// Lookup the field of the lambda.
const CXXRecordDecl *CXXRec = MD->getParent();
- llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
+ llvm::DenseMap<const ValueDecl *, FieldDecl *> LambdaCaptureFields;
FieldDecl *LambdaThisCaptureField;
CXXRec->getCaptureFields(LambdaCaptureFields, LambdaThisCaptureField);
@@ -2593,20 +3119,175 @@ void ExprEngine::VisitCommonDeclRefExpr(const Expr *Ex, const NamedDecl *D,
ProgramPoint::PostLValueKind);
return;
}
- if (isa<FieldDecl>(D) || isa<IndirectFieldDecl>(D)) {
+ if (isa<FieldDecl, IndirectFieldDecl>(D)) {
// Delegate all work related to pointer to members to the surrounding
// operator&.
return;
}
- if (isa<BindingDecl>(D)) {
- // FIXME: proper support for bound declarations.
- // For now, let's just prevent crashing.
+ if (const auto *BD = dyn_cast<BindingDecl>(D)) {
+ const auto *DD = cast<DecompositionDecl>(BD->getDecomposedDecl());
+
+ SVal Base = state->getLValue(DD, LCtx);
+ if (DD->getType()->isReferenceType()) {
+ if (const MemRegion *R = Base.getAsRegion())
+ Base = state->getSVal(R);
+ else
+ Base = UnknownVal();
+ }
+
+ SVal V = UnknownVal();
+
+ // Handle binding to data members
+ if (const auto *ME = dyn_cast<MemberExpr>(BD->getBinding())) {
+ const auto *Field = cast<FieldDecl>(ME->getMemberDecl());
+ V = state->getLValue(Field, Base);
+ }
+ // Handle binding to arrays
+ else if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(BD->getBinding())) {
+ SVal Idx = state->getSVal(ASE->getIdx(), LCtx);
+
+ // Note: the index of an element in a structured binding is automatically
+ // created and it is a unique identifier of the specific element. Thus it
+ // cannot be a value that varies at runtime.
+ assert(Idx.isConstant() && "BindingDecl array index is not a constant!");
+
+ V = state->getLValue(BD->getType(), Idx, Base);
+ }
+ // Handle binding to tuple-like structures
+ else if (const auto *HV = BD->getHoldingVar()) {
+ V = state->getLValue(HV, LCtx);
+
+ if (HV->getType()->isReferenceType()) {
+ if (const MemRegion *R = V.getAsRegion())
+ V = state->getSVal(R);
+ else
+ V = UnknownVal();
+ }
+ } else
+ llvm_unreachable("An unknown case of structured binding encountered!");
+
+ // In case of tuple-like types the references are already handled, so we
+ // don't want to handle them again.
+ if (BD->getType()->isReferenceType() && !BD->getHoldingVar()) {
+ if (const MemRegion *R = V.getAsRegion())
+ V = state->getSVal(R);
+ else
+ V = UnknownVal();
+ }
+
+ Bldr.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, V), nullptr,
+ ProgramPoint::PostLValueKind);
+
+ return;
+ }
+
+ if (const auto *TPO = dyn_cast<TemplateParamObjectDecl>(D)) {
+ // FIXME: We should meaningfully implement this.
+ (void)TPO;
return;
}
llvm_unreachable("Support for this Decl not implemented.");
}
+/// VisitArrayInitLoopExpr - Transfer function for array init loop.
+void ExprEngine::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *Ex,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ ExplodedNodeSet CheckerPreStmt;
+ getCheckerManager().runCheckersForPreStmt(CheckerPreStmt, Pred, Ex, *this);
+
+ ExplodedNodeSet EvalSet;
+ StmtNodeBuilder Bldr(CheckerPreStmt, EvalSet, *currBldrCtx);
+
+ const Expr *Arr = Ex->getCommonExpr()->getSourceExpr();
+
+ for (auto *Node : CheckerPreStmt) {
+
+ // The constructor visitior has already taken care of everything.
+ if (isa<CXXConstructExpr>(Ex->getSubExpr()))
+ break;
+
+ const LocationContext *LCtx = Node->getLocationContext();
+ ProgramStateRef state = Node->getState();
+
+ SVal Base = UnknownVal();
+
+ // As in case of this expression the sub-expressions are not visited by any
+ // other transfer functions, they are handled by matching their AST.
+
+ // Case of implicit copy or move ctor of object with array member
+ //
+ // Note: ExprEngine::VisitMemberExpr is not able to bind the array to the
+ // environment.
+ //
+ // struct S {
+ // int arr[2];
+ // };
+ //
+ //
+ // S a;
+ // S b = a;
+ //
+ // The AST in case of a *copy constructor* looks like this:
+ // ArrayInitLoopExpr
+ // |-OpaqueValueExpr
+ // | `-MemberExpr <-- match this
+ // | `-DeclRefExpr
+ // ` ...
+ //
+ //
+ // S c;
+ // S d = std::move(d);
+ //
+ // In case of a *move constructor* the resulting AST looks like:
+ // ArrayInitLoopExpr
+ // |-OpaqueValueExpr
+ // | `-MemberExpr <-- match this first
+ // | `-CXXStaticCastExpr <-- match this after
+ // | `-DeclRefExpr
+ // ` ...
+ if (const auto *ME = dyn_cast<MemberExpr>(Arr)) {
+ Expr *MEBase = ME->getBase();
+
+ // Move ctor
+ if (auto CXXSCE = dyn_cast<CXXStaticCastExpr>(MEBase)) {
+ MEBase = CXXSCE->getSubExpr();
+ }
+
+ auto ObjDeclExpr = cast<DeclRefExpr>(MEBase);
+ SVal Obj = state->getLValue(cast<VarDecl>(ObjDeclExpr->getDecl()), LCtx);
+
+ Base = state->getLValue(cast<FieldDecl>(ME->getMemberDecl()), Obj);
+ }
+
+ // Case of lambda capture and decomposition declaration
+ //
+ // int arr[2];
+ //
+ // [arr]{ int a = arr[0]; }();
+ // auto[a, b] = arr;
+ //
+ // In both of these cases the AST looks like the following:
+ // ArrayInitLoopExpr
+ // |-OpaqueValueExpr
+ // | `-DeclRefExpr <-- match this
+ // ` ...
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Arr))
+ Base = state->getLValue(cast<VarDecl>(DRE->getDecl()), LCtx);
+
+ // Create a lazy compound value to the original array
+ if (const MemRegion *R = Base.getAsRegion())
+ Base = state->getSVal(R);
+ else
+ Base = UnknownVal();
+
+ Bldr.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, Base));
+ }
+
+ getCheckerManager().runCheckersForPostStmt(Dst, EvalSet, Ex, *this);
+}
+
/// VisitArraySubscriptExpr - Transfer function for array accesses
void ExprEngine::VisitArraySubscriptExpr(const ArraySubscriptExpr *A,
ExplodedNode *Pred,
@@ -2670,7 +3351,7 @@ void ExprEngine::VisitMemberExpr(const MemberExpr *M, ExplodedNode *Pred,
// Handle static member variables and enum constants accessed via
// member syntax.
- if (isa<VarDecl>(Member) || isa<EnumConstantDecl>(Member)) {
+ if (isa<VarDecl, EnumConstantDecl>(Member)) {
for (const auto I : CheckedSet)
VisitCommonDeclRefExpr(M, Member, I, EvalSet);
} else {
@@ -2684,7 +3365,7 @@ void ExprEngine::VisitMemberExpr(const MemberExpr *M, ExplodedNode *Pred,
// Handle C++ method calls.
if (const auto *MD = dyn_cast<CXXMethodDecl>(Member)) {
- if (MD->isInstance())
+ if (MD->isImplicitObjectMemberFunction())
state = createTemporaryRegionIfNeeded(state, LCtx, BaseExpr);
SVal MDVal = svalBuilder.getFunctionPointer(MD);
@@ -2702,6 +3383,14 @@ void ExprEngine::VisitMemberExpr(const MemberExpr *M, ExplodedNode *Pred,
SVal baseExprVal =
MR ? loc::MemRegionVal(MR) : state->getSVal(BaseExpr, LCtx);
+ // FIXME: Copied from RegionStoreManager::bind()
+ if (const auto *SR =
+ dyn_cast_or_null<SymbolicRegion>(baseExprVal.getAsRegion())) {
+ QualType T = SR->getPointeeStaticType();
+ baseExprVal =
+ loc::MemRegionVal(getStoreManager().GetElementZeroRegion(SR, T));
+ }
+
const auto *field = cast<FieldDecl>(Member);
SVal L = state->getLValue(field, baseExprVal);
@@ -2791,7 +3480,8 @@ ProgramStateRef ExprEngine::processPointerEscapedOnBind(
for (const std::pair<SVal, SVal> &LocAndVal : LocAndVals) {
// Cases (1) and (2).
const MemRegion *MR = LocAndVal.first.getAsRegion();
- if (!MR || !MR->hasStackStorage()) {
+ if (!MR ||
+ !isa<StackSpaceRegion, StaticGlobalSpaceRegion>(MR->getMemorySpace())) {
Escaped.push_back(LocAndVal.second);
continue;
}
@@ -2894,7 +3584,7 @@ void ExprEngine::evalBind(ExplodedNodeSet &Dst, const Stmt *StoreE,
// If the location is not a 'Loc', it will already be handled by
// the checkers. There is nothing left to do.
- if (!location.getAs<Loc>()) {
+ if (!isa<Loc>(location)) {
const ProgramPoint L = PostStore(StoreE, LC, /*Loc*/nullptr,
/*tag*/nullptr);
ProgramStateRef state = Pred->getState();
@@ -2915,7 +3605,7 @@ void ExprEngine::evalBind(ExplodedNodeSet &Dst, const Stmt *StoreE,
Val, LC, /* notifyChanges = */ !atDeclInit);
const MemRegion *LocReg = nullptr;
- if (Optional<loc::MemRegionVal> LocRegVal =
+ if (std::optional<loc::MemRegionVal> LocRegVal =
location.getAs<loc::MemRegionVal>()) {
LocReg = LocRegVal->getRegion();
}
@@ -2964,7 +3654,7 @@ void ExprEngine::evalLoad(ExplodedNodeSet &Dst,
SVal location,
const ProgramPointTag *tag,
QualType LoadTy) {
- assert(!location.getAs<NonLoc>() && "location cannot be a NonLoc.");
+ assert(!isa<NonLoc>(location) && "location cannot be a NonLoc.");
assert(NodeEx);
assert(BoundEx);
// Evaluate the location (checks for bad dereferences).
@@ -3056,7 +3746,7 @@ void ExprEngine::evalEagerlyAssumeBinOpBifurcation(ExplodedNodeSet &Dst,
ProgramStateRef state = Pred->getState();
SVal V = state->getSVal(Ex, Pred->getLocationContext());
- Optional<nonloc::SymbolVal> SEV = V.getAs<nonloc::SymbolVal>();
+ std::optional<nonloc::SymbolVal> SEV = V.getAs<nonloc::SymbolVal>();
if (SEV && SEV->isExpression()) {
const std::pair<const ProgramPointTag *, const ProgramPointTag*> &tags =
geteagerlyAssumeBinOpBifurcationTags();
@@ -3095,9 +3785,9 @@ void ExprEngine::VisitGCCAsmStmt(const GCCAsmStmt *A, ExplodedNode *Pred,
for (const Expr *O : A->outputs()) {
SVal X = state->getSVal(O, Pred->getLocationContext());
- assert(!X.getAs<NonLoc>()); // Should be an Lval, or unknown, undef.
+ assert(!isa<NonLoc>(X)); // Should be an Lval, or unknown, undef.
- if (Optional<Loc> LV = X.getAs<Loc>())
+ if (std::optional<Loc> LV = X.getAs<Loc>())
state = state->bindLoc(*LV, UnknownVal(), Pred->getLocationContext());
}
@@ -3114,7 +3804,6 @@ void ExprEngine::VisitMSAsmStmt(const MSAsmStmt *A, ExplodedNode *Pred,
// Visualization.
//===----------------------------------------------------------------------===//
-#ifndef NDEBUG
namespace llvm {
template<>
@@ -3125,12 +3814,9 @@ struct DOTGraphTraits<ExplodedGraph*> : public DefaultDOTGraphTraits {
BugReporter &BR = static_cast<ExprEngine &>(
N->getState()->getStateManager().getOwningEngine()).getBugReporter();
- const auto EQClasses =
- llvm::make_range(BR.EQClasses_begin(), BR.EQClasses_end());
-
- for (const auto &EQ : EQClasses) {
- for (const auto &I : EQ.getReports()) {
- const auto *PR = dyn_cast<PathSensitiveBugReport>(I.get());
+ for (const auto &Class : BR.equivalenceClasses()) {
+ for (const auto &Report : Class.getReports()) {
+ const auto *PR = dyn_cast<PathSensitiveBugReport>(Report.get());
if (!PR)
continue;
const ExplodedNode *EN = PR->getErrorNode();
@@ -3190,7 +3876,7 @@ struct DOTGraphTraits<ExplodedGraph*> : public DefaultDOTGraphTraits {
OtherNode->getLocation().printJson(Out, /*NL=*/"\\l");
Out << ", \"tag\": ";
if (const ProgramPointTag *Tag = OtherNode->getLocation().getTag())
- Out << '\"' << Tag->getTagDescription() << "\"";
+ Out << '\"' << Tag->getTagDescription() << '\"';
else
Out << "null";
Out << ", \"node_id\": " << OtherNode->getID() <<
@@ -3212,72 +3898,51 @@ struct DOTGraphTraits<ExplodedGraph*> : public DefaultDOTGraphTraits {
};
} // namespace llvm
-#endif
void ExprEngine::ViewGraph(bool trim) {
-#ifndef NDEBUG
std::string Filename = DumpGraph(trim);
llvm::DisplayGraph(Filename, false, llvm::GraphProgram::DOT);
-#else
- llvm::errs() << "Warning: viewing graph requires assertions" << "\n";
-#endif
}
-
-void ExprEngine::ViewGraph(ArrayRef<const ExplodedNode*> Nodes) {
-#ifndef NDEBUG
+void ExprEngine::ViewGraph(ArrayRef<const ExplodedNode *> Nodes) {
std::string Filename = DumpGraph(Nodes);
llvm::DisplayGraph(Filename, false, llvm::GraphProgram::DOT);
-#else
- llvm::errs() << "Warning: viewing graph requires assertions" << "\n";
-#endif
}
std::string ExprEngine::DumpGraph(bool trim, StringRef Filename) {
-#ifndef NDEBUG
if (trim) {
std::vector<const ExplodedNode *> Src;
// Iterate through the reports and get their nodes.
- for (BugReporter::EQClasses_iterator
- EI = BR.EQClasses_begin(), EE = BR.EQClasses_end(); EI != EE; ++EI) {
+ for (const auto &Class : BR.equivalenceClasses()) {
const auto *R =
- dyn_cast<PathSensitiveBugReport>(EI->getReports()[0].get());
+ dyn_cast<PathSensitiveBugReport>(Class.getReports()[0].get());
if (!R)
continue;
const auto *N = const_cast<ExplodedNode *>(R->getErrorNode());
Src.push_back(N);
}
return DumpGraph(Src, Filename);
- } else {
- return llvm::WriteGraph(&G, "ExprEngine", /*ShortNames=*/false,
- /*Title=*/"Exploded Graph",
- /*Filename=*/std::string(Filename));
}
-#else
- llvm::errs() << "Warning: dumping graph requires assertions" << "\n";
- return "";
-#endif
+
+ return llvm::WriteGraph(&G, "ExprEngine", /*ShortNames=*/false,
+ /*Title=*/"Exploded Graph",
+ /*Filename=*/std::string(Filename));
}
-std::string ExprEngine::DumpGraph(ArrayRef<const ExplodedNode*> Nodes,
+std::string ExprEngine::DumpGraph(ArrayRef<const ExplodedNode *> Nodes,
StringRef Filename) {
-#ifndef NDEBUG
std::unique_ptr<ExplodedGraph> TrimmedG(G.trim(Nodes));
if (!TrimmedG.get()) {
llvm::errs() << "warning: Trimmed ExplodedGraph is empty.\n";
return "";
- } else {
- return llvm::WriteGraph(TrimmedG.get(), "TrimmedExprEngine",
- /*ShortNames=*/false,
- /*Title=*/"Trimmed Exploded Graph",
- /*Filename=*/std::string(Filename));
- }
-#else
- llvm::errs() << "Warning: dumping graph requires assertions" << "\n";
- return "";
-#endif
+ }
+
+ return llvm::WriteGraph(TrimmedG.get(), "TrimmedExprEngine",
+ /*ShortNames=*/false,
+ /*Title=*/"Trimmed Exploded Graph",
+ /*Filename=*/std::string(Filename));
}
void *ProgramStateTrait<ReplayWithoutInlining>::GDMIndex() {
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp
index 7ad3dca831ac..7e431f7e598c 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp
@@ -10,10 +10,11 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/AST/ExprCXX.h"
#include "clang/AST/DeclCXX.h"
+#include "clang/AST/ExprCXX.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include <optional>
using namespace clang;
using namespace ento;
@@ -29,8 +30,7 @@ static SVal conjureOffsetSymbolOnLocation(
SVal Symbol, SVal Other, Expr* Expression, SValBuilder &svalBuilder,
unsigned Count, const LocationContext *LCtx) {
QualType Ty = Expression->getType();
- if (Other.getAs<Loc>() &&
- Ty->isIntegralOrEnumerationType() &&
+ if (isa<Loc>(Other) && Ty->isIntegralOrEnumerationType() &&
Symbol.isUnknown()) {
return svalBuilder.conjureSymbolVal(Expression, LCtx, Ty, Count);
}
@@ -133,11 +133,9 @@ void ExprEngine::VisitBinaryOperator(const BinaryOperator* B,
SVal location = LeftV;
evalLoad(Tmp, B, LHS, *it, state, location);
- for (ExplodedNodeSet::iterator I = Tmp.begin(), E = Tmp.end(); I != E;
- ++I) {
-
- state = (*I)->getState();
- const LocationContext *LCtx = (*I)->getLocationContext();
+ for (ExplodedNode *N : Tmp) {
+ state = N->getState();
+ const LocationContext *LCtx = N->getLocationContext();
SVal V = state->getSVal(LHS, LCtx);
// Get the computation type.
@@ -171,8 +169,7 @@ void ExprEngine::VisitBinaryOperator(const BinaryOperator* B,
currBldrCtx->blockCount());
// However, we need to convert the symbol to the computation type.
Result = svalBuilder.evalCast(LHSVal, CTy, LTy);
- }
- else {
+ } else {
// The left-hand side may bind to a different value then the
// computation type.
LHSVal = svalBuilder.evalCast(Result, LTy, CTy);
@@ -185,7 +182,7 @@ void ExprEngine::VisitBinaryOperator(const BinaryOperator* B,
else
state = state->BindExpr(B, LCtx, Result);
- evalStore(Tmp2, B, LHS, *I, state, location, LHSVal);
+ evalStore(Tmp2, B, LHS, N, state, location, LHSVal);
}
}
@@ -211,14 +208,12 @@ void ExprEngine::VisitBlockExpr(const BlockExpr *BE, ExplodedNode *Pred,
if (const BlockDataRegion *BDR =
dyn_cast_or_null<BlockDataRegion>(V.getAsRegion())) {
- BlockDataRegion::referenced_vars_iterator I = BDR->referenced_vars_begin(),
- E = BDR->referenced_vars_end();
-
+ auto ReferencedVars = BDR->referenced_vars();
auto CI = BD->capture_begin();
auto CE = BD->capture_end();
- for (; I != E; ++I) {
- const VarRegion *capturedR = I.getCapturedRegion();
- const TypedValueRegion *originalR = I.getOriginalRegion();
+ for (auto Var : ReferencedVars) {
+ const VarRegion *capturedR = Var.getCapturedRegion();
+ const TypedValueRegion *originalR = Var.getOriginalRegion();
// If the capture had a copy expression, use the result of evaluating
// that expression, otherwise use the original value.
@@ -269,10 +264,12 @@ ProgramStateRef ExprEngine::handleLValueBitCast(
}
// Delegate to SValBuilder to process.
SVal OrigV = state->getSVal(Ex, LCtx);
- SVal V = svalBuilder.evalCast(OrigV, T, ExTy);
+ SVal SimplifiedOrigV = svalBuilder.simplifySVal(state, OrigV);
+ SVal V = svalBuilder.evalCast(SimplifiedOrigV, T, ExTy);
// Negate the result if we're treating the boolean as a signed i1
- if (CastE->getCastKind() == CK_BooleanToSignedIntegral)
- V = evalMinus(V);
+ if (CastE->getCastKind() == CK_BooleanToSignedIntegral && V.isValid())
+ V = svalBuilder.evalMinus(V.castAs<NonLoc>());
+
state = state->BindExpr(CastE, LCtx, V);
if (V.isUnknown() && !OrigV.isUnknown()) {
state = escapeValues(state, OrigV, PSK_EscapeOther);
@@ -290,9 +287,7 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
if (CastE->getCastKind() == CK_LValueToRValue ||
CastE->getCastKind() == CK_LValueToRValueBitCast) {
- for (ExplodedNodeSet::iterator I = dstPreStmt.begin(), E = dstPreStmt.end();
- I!=E; ++I) {
- ExplodedNode *subExprNode = *I;
+ for (ExplodedNode *subExprNode : dstPreStmt) {
ProgramStateRef state = subExprNode->getState();
const LocationContext *LCtx = subExprNode->getLocationContext();
evalLoad(Dst, CastE, CastE, subExprNode, state, state->getSVal(Ex, LCtx));
@@ -308,10 +303,7 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
T = ExCast->getTypeAsWritten();
StmtNodeBuilder Bldr(dstPreStmt, Dst, *currBldrCtx);
- for (ExplodedNodeSet::iterator I = dstPreStmt.begin(), E = dstPreStmt.end();
- I != E; ++I) {
-
- Pred = *I;
+ for (ExplodedNode *Pred : dstPreStmt) {
ProgramStateRef state = Pred->getState();
const LocationContext *LCtx = Pred->getLocationContext();
@@ -371,7 +363,7 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
case CK_IntegralToPointer:
case CK_PointerToIntegral: {
SVal V = state->getSVal(Ex, LCtx);
- if (V.getAs<nonloc::PointerToMember>()) {
+ if (isa<nonloc::PointerToMember>(V)) {
state = state->BindExpr(CastE, LCtx, UnknownVal());
Bldr.generateNode(CastE, Pred, state);
continue;
@@ -416,7 +408,10 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
case CK_IntegralCast: {
// Delegate to SValBuilder to process.
SVal V = state->getSVal(Ex, LCtx);
- V = svalBuilder.evalIntegralCast(state, V, T, ExTy);
+ if (AMgr.options.ShouldSupportSymbolicIntegerCasts)
+ V = svalBuilder.evalCast(V, T, ExTy);
+ else
+ V = svalBuilder.evalIntegralCast(state, V, T, ExTy);
state = state->BindExpr(CastE, LCtx, V);
Bldr.generateNode(CastE, Pred, state);
continue;
@@ -439,14 +434,15 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
if (CastE->isGLValue())
resultType = getContext().getPointerType(resultType);
- bool Failed = false;
+ bool Failed = true;
- // Check if the value being cast evaluates to 0.
- if (val.isZeroConstant())
- Failed = true;
- // Else, evaluate the cast.
- else
- val = getStoreManager().attemptDownCast(val, T, Failed);
+ // Check if the value being cast does not evaluates to 0.
+ if (!val.isZeroConstant())
+ if (std::optional<SVal> V =
+ StateMgr.getStoreManager().evalBaseToDerived(val, T)) {
+ val = *V;
+ Failed = false;
+ }
if (Failed) {
if (T->isReferenceType()) {
@@ -456,7 +452,8 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
continue;
} else {
// If the cast fails on a pointer, bind to 0.
- state = state->BindExpr(CastE, LCtx, svalBuilder.makeNull());
+ state = state->BindExpr(CastE, LCtx,
+ svalBuilder.makeNullWithType(resultType));
}
} else {
// If we don't know if the cast succeeded, conjure a new symbol.
@@ -478,14 +475,13 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
if (CastE->isGLValue())
resultType = getContext().getPointerType(resultType);
- bool Failed = false;
-
if (!val.isConstant()) {
- val = getStoreManager().attemptDownCast(val, T, Failed);
+ std::optional<SVal> V = getStoreManager().evalBaseToDerived(val, T);
+ val = V ? *V : UnknownVal();
}
// Failed to cast or the result is unknown, fall back to conservative.
- if (Failed || val.isUnknown()) {
+ if (val.isUnknown()) {
val =
svalBuilder.conjureSymbolVal(nullptr, CastE, LCtx, resultType,
currBldrCtx->blockCount());
@@ -495,7 +491,7 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
continue;
}
case CK_NullToPointer: {
- SVal V = svalBuilder.makeNull();
+ SVal V = svalBuilder.makeNullWithType(CastE->getType());
state = state->BindExpr(CastE, LCtx, V);
Bldr.generateNode(CastE, Pred, state);
continue;
@@ -520,7 +516,7 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
}
// Explicitly proceed with default handler for this case cascade.
}
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
// Various C++ casts that are not handled yet.
case CK_ToUnion:
case CK_MatrixCast:
@@ -550,7 +546,7 @@ void ExprEngine::VisitCompoundLiteralExpr(const CompoundLiteralExpr *CL,
const Expr *Init = CL->getInitializer();
SVal V = State->getSVal(CL->getInitializer(), LCtx);
- if (isa<CXXConstructExpr>(Init) || isa<CXXStdInitializerListExpr>(Init)) {
+ if (isa<CXXConstructExpr, CXXStdInitializerListExpr>(Init)) {
// No work needed. Just pass the value up to this expression.
} else {
assert(isa<InitListExpr>(Init));
@@ -757,9 +753,8 @@ void ExprEngine::VisitInitListExpr(const InitListExpr *IE,
return;
}
- for (InitListExpr::const_reverse_iterator it = IE->rbegin(),
- ei = IE->rend(); it != ei; ++it) {
- SVal V = state->getSVal(cast<Expr>(*it), LCtx);
+ for (const Stmt *S : llvm::reverse(*IE)) {
+ SVal V = state->getSVal(cast<Expr>(S), LCtx);
vals = getBasicVals().prependSVal(V, vals);
}
@@ -820,7 +815,7 @@ void ExprEngine::VisitGuardedExpr(const Expr *Ex,
SVal V;
for (CFGElement CE : llvm::reverse(*SrcBlock)) {
- if (Optional<CFGStmt> CS = CE.getAs<CFGStmt>()) {
+ if (std::optional<CFGStmt> CS = CE.getAs<CFGStmt>()) {
const Expr *ValEx = cast<Expr>(CS->getStmt());
ValEx = ValEx->IgnoreParens();
@@ -879,8 +874,7 @@ VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *Ex,
QualType T = Ex->getTypeOfArgument();
- for (ExplodedNodeSet::iterator I = CheckedSet.begin(), E = CheckedSet.end();
- I != E; ++I) {
+ for (ExplodedNode *N : CheckedSet) {
if (Ex->getKind() == UETT_SizeOf) {
if (!T->isIncompleteType() && !T->isConstantSizeType()) {
assert(T->isVariableArrayType() && "Unknown non-constant-sized type.");
@@ -899,18 +893,17 @@ VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *Ex,
APSInt Value = Ex->EvaluateKnownConstInt(getContext());
CharUnits amt = CharUnits::fromQuantity(Value.getZExtValue());
- ProgramStateRef state = (*I)->getState();
- state = state->BindExpr(Ex, (*I)->getLocationContext(),
- svalBuilder.makeIntVal(amt.getQuantity(),
- Ex->getType()));
- Bldr.generateNode(Ex, *I, state);
+ ProgramStateRef state = N->getState();
+ state = state->BindExpr(
+ Ex, N->getLocationContext(),
+ svalBuilder.makeIntVal(amt.getQuantity(), Ex->getType()));
+ Bldr.generateNode(Ex, N, state);
}
getCheckerManager().runCheckersForPostStmt(Dst, EvalSet, Ex, *this);
}
-void ExprEngine::handleUOExtension(ExplodedNodeSet::iterator I,
- const UnaryOperator *U,
+void ExprEngine::handleUOExtension(ExplodedNode *N, const UnaryOperator *U,
StmtNodeBuilder &Bldr) {
// FIXME: We can probably just have some magic in Environment::getSVal()
// that propagates values, instead of creating a new node here.
@@ -920,10 +913,9 @@ void ExprEngine::handleUOExtension(ExplodedNodeSet::iterator I,
// generate an extra node that just propagates the value of the
// subexpression.
const Expr *Ex = U->getSubExpr()->IgnoreParens();
- ProgramStateRef state = (*I)->getState();
- const LocationContext *LCtx = (*I)->getLocationContext();
- Bldr.generateNode(U, *I, state->BindExpr(U, LCtx,
- state->getSVal(Ex, LCtx)));
+ ProgramStateRef state = N->getState();
+ const LocationContext *LCtx = N->getLocationContext();
+ Bldr.generateNode(U, N, state->BindExpr(U, LCtx, state->getSVal(Ex, LCtx)));
}
void ExprEngine::VisitUnaryOperator(const UnaryOperator* U, ExplodedNode *Pred,
@@ -935,13 +927,12 @@ void ExprEngine::VisitUnaryOperator(const UnaryOperator* U, ExplodedNode *Pred,
ExplodedNodeSet EvalSet;
StmtNodeBuilder Bldr(CheckedSet, EvalSet, *currBldrCtx);
- for (ExplodedNodeSet::iterator I = CheckedSet.begin(), E = CheckedSet.end();
- I != E; ++I) {
+ for (ExplodedNode *N : CheckedSet) {
switch (U->getOpcode()) {
default: {
- Bldr.takeNodes(*I);
+ Bldr.takeNodes(N);
ExplodedNodeSet Tmp;
- VisitIncrementDecrementOperator(U, *I, Tmp);
+ VisitIncrementDecrementOperator(U, N, Tmp);
Bldr.addNodes(Tmp);
break;
}
@@ -956,10 +947,10 @@ void ExprEngine::VisitUnaryOperator(const UnaryOperator* U, ExplodedNode *Pred,
// For all other types, UO_Real is an identity operation.
assert (U->getType() == Ex->getType());
- ProgramStateRef state = (*I)->getState();
- const LocationContext *LCtx = (*I)->getLocationContext();
- Bldr.generateNode(U, *I, state->BindExpr(U, LCtx,
- state->getSVal(Ex, LCtx)));
+ ProgramStateRef state = N->getState();
+ const LocationContext *LCtx = N->getLocationContext();
+ Bldr.generateNode(U, N,
+ state->BindExpr(U, LCtx, state->getSVal(Ex, LCtx)));
break;
}
@@ -971,10 +962,10 @@ void ExprEngine::VisitUnaryOperator(const UnaryOperator* U, ExplodedNode *Pred,
break;
}
// For all other types, UO_Imag returns 0.
- ProgramStateRef state = (*I)->getState();
- const LocationContext *LCtx = (*I)->getLocationContext();
+ ProgramStateRef state = N->getState();
+ const LocationContext *LCtx = N->getLocationContext();
SVal X = svalBuilder.makeZeroVal(Ex->getType());
- Bldr.generateNode(U, *I, state->BindExpr(U, LCtx, X));
+ Bldr.generateNode(U, N, state->BindExpr(U, LCtx, X));
break;
}
@@ -984,25 +975,24 @@ void ExprEngine::VisitUnaryOperator(const UnaryOperator* U, ExplodedNode *Pred,
if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Ex)) {
const ValueDecl *VD = DRE->getDecl();
- if (isa<CXXMethodDecl>(VD) || isa<FieldDecl>(VD) ||
- isa<IndirectFieldDecl>(VD)) {
- ProgramStateRef State = (*I)->getState();
- const LocationContext *LCtx = (*I)->getLocationContext();
+ if (isa<CXXMethodDecl, FieldDecl, IndirectFieldDecl>(VD)) {
+ ProgramStateRef State = N->getState();
+ const LocationContext *LCtx = N->getLocationContext();
SVal SV = svalBuilder.getMemberPointer(cast<NamedDecl>(VD));
- Bldr.generateNode(U, *I, State->BindExpr(U, LCtx, SV));
+ Bldr.generateNode(U, N, State->BindExpr(U, LCtx, SV));
break;
}
}
// Explicitly proceed with default handler for this case cascade.
- handleUOExtension(I, U, Bldr);
+ handleUOExtension(N, U, Bldr);
break;
}
case UO_Plus:
assert(!U->isGLValue());
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case UO_Deref:
case UO_Extension: {
- handleUOExtension(I, U, Bldr);
+ handleUOExtension(N, U, Bldr);
break;
}
@@ -1011,14 +1001,14 @@ void ExprEngine::VisitUnaryOperator(const UnaryOperator* U, ExplodedNode *Pred,
case UO_Not: {
assert (!U->isGLValue());
const Expr *Ex = U->getSubExpr()->IgnoreParens();
- ProgramStateRef state = (*I)->getState();
- const LocationContext *LCtx = (*I)->getLocationContext();
+ ProgramStateRef state = N->getState();
+ const LocationContext *LCtx = N->getLocationContext();
// Get the value of the subexpression.
SVal V = state->getSVal(Ex, LCtx);
if (V.isUnknownOrUndef()) {
- Bldr.generateNode(U, *I, state->BindExpr(U, LCtx, V));
+ Bldr.generateNode(U, N, state->BindExpr(U, LCtx, V));
break;
}
@@ -1027,11 +1017,13 @@ void ExprEngine::VisitUnaryOperator(const UnaryOperator* U, ExplodedNode *Pred,
llvm_unreachable("Invalid Opcode.");
case UO_Not:
// FIXME: Do we need to handle promotions?
- state = state->BindExpr(U, LCtx, evalComplement(V.castAs<NonLoc>()));
+ state = state->BindExpr(
+ U, LCtx, svalBuilder.evalComplement(V.castAs<NonLoc>()));
break;
case UO_Minus:
// FIXME: Do we need to handle promotions?
- state = state->BindExpr(U, LCtx, evalMinus(V.castAs<NonLoc>()));
+ state = state->BindExpr(U, LCtx,
+ svalBuilder.evalMinus(V.castAs<NonLoc>()));
break;
case UO_LNot:
// C99 6.5.3.3: "The expression !E is equivalent to (0==E)."
@@ -1039,22 +1031,21 @@ void ExprEngine::VisitUnaryOperator(const UnaryOperator* U, ExplodedNode *Pred,
// Note: technically we do "E == 0", but this is the same in the
// transfer functions as "0 == E".
SVal Result;
- if (Optional<Loc> LV = V.getAs<Loc>()) {
- Loc X = svalBuilder.makeNullWithType(Ex->getType());
- Result = evalBinOp(state, BO_EQ, *LV, X, U->getType());
+ if (std::optional<Loc> LV = V.getAs<Loc>()) {
+ Loc X = svalBuilder.makeNullWithType(Ex->getType());
+ Result = evalBinOp(state, BO_EQ, *LV, X, U->getType());
} else if (Ex->getType()->isFloatingType()) {
- // FIXME: handle floating point types.
- Result = UnknownVal();
+ // FIXME: handle floating point types.
+ Result = UnknownVal();
} else {
- nonloc::ConcreteInt X(getBasicVals().getValue(0, Ex->getType()));
- Result = evalBinOp(state, BO_EQ, V.castAs<NonLoc>(), X,
- U->getType());
+ nonloc::ConcreteInt X(getBasicVals().getValue(0, Ex->getType()));
+ Result = evalBinOp(state, BO_EQ, V.castAs<NonLoc>(), X, U->getType());
}
state = state->BindExpr(U, LCtx, Result);
break;
}
- Bldr.generateNode(U, *I, state);
+ Bldr.generateNode(U, N, state);
break;
}
}
@@ -1080,10 +1071,9 @@ void ExprEngine::VisitIncrementDecrementOperator(const UnaryOperator* U,
ExplodedNodeSet Dst2;
StmtNodeBuilder Bldr(Tmp, Dst2, *currBldrCtx);
- for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end();I!=E;++I) {
-
- state = (*I)->getState();
- assert(LCtx == (*I)->getLocationContext());
+ for (ExplodedNode *N : Tmp) {
+ state = N->getState();
+ assert(LCtx == N->getLocationContext());
SVal V2_untested = state->getSVal(Ex, LCtx);
// Propagate unknown and undefined values.
@@ -1091,9 +1081,9 @@ void ExprEngine::VisitIncrementDecrementOperator(const UnaryOperator* U,
state = state->BindExpr(U, LCtx, V2_untested);
// Perform the store, so that the uninitialized value detection happens.
- Bldr.takeNodes(*I);
+ Bldr.takeNodes(N);
ExplodedNodeSet Dst3;
- evalStore(Dst3, U, Ex, *I, state, loc, V2_untested);
+ evalStore(Dst3, U, Ex, N, state, loc, V2_untested);
Bldr.addNodes(Dst3);
continue;
@@ -1159,9 +1149,9 @@ void ExprEngine::VisitIncrementDecrementOperator(const UnaryOperator* U,
state = state->BindExpr(U, LCtx, U->isPostfix() ? V2 : Result);
// Perform the store.
- Bldr.takeNodes(*I);
+ Bldr.takeNodes(N);
ExplodedNodeSet Dst3;
- evalStore(Dst3, U, Ex, *I, state, loc, Result);
+ evalStore(Dst3, U, Ex, N, state, loc, Result);
Bldr.addNodes(Dst3);
}
Dst.insert(Dst2);
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
index cab65687444b..504fd7f05e0f 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
@@ -10,15 +10,19 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
-#include "clang/Analysis/ConstructionContext.h"
#include "clang/AST/DeclCXX.h"
-#include "clang/AST/StmtCXX.h"
#include "clang/AST/ParentMap.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/Analysis/ConstructionContext.h"
#include "clang/Basic/PrettyStackTrace.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/Sequence.h"
+#include <optional>
using namespace clang;
using namespace ento;
@@ -57,61 +61,68 @@ void ExprEngine::performTrivialCopy(NodeBuilder &Bldr, ExplodedNode *Pred,
AlwaysReturnsLValue = true;
}
- assert(ThisRD);
- if (ThisRD->isEmpty()) {
- // Do nothing for empty classes. Otherwise it'd retrieve an UnknownVal
- // and bind it and RegionStore would think that the actual value
- // in this region at this offset is unknown.
- return;
- }
-
const LocationContext *LCtx = Pred->getLocationContext();
+ const Expr *CallExpr = Call.getOriginExpr();
ExplodedNodeSet Dst;
Bldr.takeNodes(Pred);
- SVal V = Call.getArgSVal(0);
-
- // If the value being copied is not unknown, load from its location to get
- // an aggregate rvalue.
- if (Optional<Loc> L = V.getAs<Loc>())
- V = Pred->getState()->getSVal(*L);
- else
- assert(V.isUnknownOrUndef());
+ assert(ThisRD);
+ if (!ThisRD->isEmpty()) {
+ // Load the source value only for non-empty classes.
+ // Otherwise it'd retrieve an UnknownVal
+ // and bind it and RegionStore would think that the actual value
+ // in this region at this offset is unknown.
+ SVal V = Call.getArgSVal(0);
- const Expr *CallExpr = Call.getOriginExpr();
- evalBind(Dst, CallExpr, Pred, ThisVal, V, true);
+ // If the value being copied is not unknown, load from its location to get
+ // an aggregate rvalue.
+ if (std::optional<Loc> L = V.getAs<Loc>())
+ V = Pred->getState()->getSVal(*L);
+ else
+ assert(V.isUnknownOrUndef());
+ evalBind(Dst, CallExpr, Pred, ThisVal, V, true);
+ } else {
+ Dst.Add(Pred);
+ }
PostStmt PS(CallExpr, LCtx);
- for (ExplodedNodeSet::iterator I = Dst.begin(), E = Dst.end();
- I != E; ++I) {
- ProgramStateRef State = (*I)->getState();
+ for (ExplodedNode *N : Dst) {
+ ProgramStateRef State = N->getState();
if (AlwaysReturnsLValue)
State = State->BindExpr(CallExpr, LCtx, ThisVal);
else
State = bindReturnValue(Call, LCtx, State);
- Bldr.generateNode(PS, State, *I);
+ Bldr.generateNode(PS, State, N);
}
}
-
-SVal ExprEngine::makeZeroElementRegion(ProgramStateRef State, SVal LValue,
- QualType &Ty, bool &IsArray) {
+SVal ExprEngine::makeElementRegion(ProgramStateRef State, SVal LValue,
+ QualType &Ty, bool &IsArray, unsigned Idx) {
SValBuilder &SVB = State->getStateManager().getSValBuilder();
ASTContext &Ctx = SVB.getContext();
- while (const ArrayType *AT = Ctx.getAsArrayType(Ty)) {
- Ty = AT->getElementType();
- LValue = State->getLValue(Ty, SVB.makeZeroArrayIndex(), LValue);
+ if (const ArrayType *AT = Ctx.getAsArrayType(Ty)) {
+ while (AT) {
+ Ty = AT->getElementType();
+ AT = dyn_cast<ArrayType>(AT->getElementType());
+ }
+ LValue = State->getLValue(Ty, SVB.makeArrayIndex(Idx), LValue);
IsArray = true;
}
return LValue;
}
+// In case when the prvalue is returned from the function (kind is one of
+// SimpleReturnedValueKind, CXX17ElidedCopyReturnedValueKind), then
+// it's materialization happens in context of the caller.
+// We pass BldrCtx explicitly, as currBldrCtx always refers to callee's context.
SVal ExprEngine::computeObjectUnderConstruction(
- const Expr *E, ProgramStateRef State, const LocationContext *LCtx,
- const ConstructionContext *CC, EvalCallOptions &CallOpts) {
+ const Expr *E, ProgramStateRef State, const NodeBuilderContext *BldrCtx,
+ const LocationContext *LCtx, const ConstructionContext *CC,
+ EvalCallOptions &CallOpts, unsigned Idx) {
+
SValBuilder &SVB = getSValBuilder();
MemRegionManager &MRMgr = SVB.getRegionManager();
ASTContext &ACtx = SVB.getContext();
@@ -125,8 +136,8 @@ SVal ExprEngine::computeObjectUnderConstruction(
const auto *DS = DSCC->getDeclStmt();
const auto *Var = cast<VarDecl>(DS->getSingleDecl());
QualType Ty = Var->getType();
- return makeZeroElementRegion(State, State->getLValue(Var, LCtx), Ty,
- CallOpts.IsArrayCtorOrDtor);
+ return makeElementRegion(State, State->getLValue(Var, LCtx), Ty,
+ CallOpts.IsArrayCtorOrDtor, Idx);
}
case ConstructionContext::CXX17ElidedCopyConstructorInitializerKind:
case ConstructionContext::SimpleConstructorInitializerKind: {
@@ -158,8 +169,8 @@ SVal ExprEngine::computeObjectUnderConstruction(
}
QualType Ty = Field->getType();
- return makeZeroElementRegion(State, FieldVal, Ty,
- CallOpts.IsArrayCtorOrDtor);
+ return makeElementRegion(State, FieldVal, Ty, CallOpts.IsArrayCtorOrDtor,
+ Idx);
}
case ConstructionContext::NewAllocatedObjectKind: {
if (AMgr.getAnalyzerOptions().MayInlineCXXAllocator) {
@@ -169,11 +180,16 @@ SVal ExprEngine::computeObjectUnderConstruction(
if (const SubRegion *MR =
dyn_cast_or_null<SubRegion>(V.getAsRegion())) {
if (NE->isArray()) {
- // TODO: In fact, we need to call the constructor for every
- // allocated element, not just the first one!
CallOpts.IsArrayCtorOrDtor = true;
- return loc::MemRegionVal(getStoreManager().GetElementZeroRegion(
- MR, NE->getType()->getPointeeType()));
+
+ auto Ty = NE->getType()->getPointeeType();
+ while (const auto *AT = getContext().getAsArrayType(Ty))
+ Ty = AT->getElementType();
+
+ auto R = MRMgr.getElementRegion(Ty, svalBuilder.makeArrayIndex(Idx),
+ MR, SVB.getContext());
+
+ return loc::MemRegionVal(R);
}
return V;
}
@@ -203,8 +219,11 @@ SVal ExprEngine::computeObjectUnderConstruction(
CallerLCtx = CallerLCtx->getParent();
assert(!isa<BlockInvocationContext>(CallerLCtx));
}
+
+ NodeBuilderContext CallerBldrCtx(getCoreEngine(),
+ SFC->getCallSiteBlock(), CallerLCtx);
return computeObjectUnderConstruction(
- cast<Expr>(SFC->getCallSite()), State, CallerLCtx,
+ cast<Expr>(SFC->getCallSite()), State, &CallerBldrCtx, CallerLCtx,
RTC->getConstructionContext(), CallOpts);
} else {
// We are on the top frame of the analysis. We do not know where is the
@@ -244,7 +263,7 @@ SVal ExprEngine::computeObjectUnderConstruction(
EvalCallOptions PreElideCallOpts = CallOpts;
SVal V = computeObjectUnderConstruction(
- TCC->getConstructorAfterElision(), State, LCtx,
+ TCC->getConstructorAfterElision(), State, BldrCtx, LCtx,
TCC->getConstructionContextAfterElision(), CallOpts);
// FIXME: This definition of "copy elision has not failed" is unreliable.
@@ -257,7 +276,7 @@ SVal ExprEngine::computeObjectUnderConstruction(
// a simple temporary.
CallOpts = PreElideCallOpts;
CallOpts.IsElidableCtorThatHasNotBeenElided = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case ConstructionContext::SimpleTemporaryObjectKind: {
const auto *TCC = cast<TemporaryObjectConstructionContext>(CC);
@@ -266,7 +285,8 @@ SVal ExprEngine::computeObjectUnderConstruction(
CallOpts.IsTemporaryCtorOrDtor = true;
if (MTE) {
if (const ValueDecl *VD = MTE->getExtendingDecl()) {
- assert(MTE->getStorageDuration() != SD_FullExpression);
+ StorageDuration SD = MTE->getStorageDuration();
+ assert(SD != SD_FullExpression);
if (!VD->getType()->isReferenceType()) {
// We're lifetime-extended by a surrounding aggregate.
// Automatic destructors aren't quite working in this case
@@ -275,15 +295,36 @@ SVal ExprEngine::computeObjectUnderConstruction(
// the MaterializeTemporaryExpr?
CallOpts.IsTemporaryLifetimeExtendedViaAggregate = true;
}
- }
- if (MTE->getStorageDuration() == SD_Static ||
- MTE->getStorageDuration() == SD_Thread)
- return loc::MemRegionVal(MRMgr.getCXXStaticTempObjectRegion(E));
+ if (SD == SD_Static || SD == SD_Thread)
+ return loc::MemRegionVal(
+ MRMgr.getCXXStaticLifetimeExtendedObjectRegion(E, VD));
+
+ return loc::MemRegionVal(
+ MRMgr.getCXXLifetimeExtendedObjectRegion(E, VD, LCtx));
+ }
+ assert(MTE->getStorageDuration() == SD_FullExpression);
}
return loc::MemRegionVal(MRMgr.getCXXTempObjectRegion(E, LCtx));
}
+ case ConstructionContext::LambdaCaptureKind: {
+ CallOpts.IsTemporaryCtorOrDtor = true;
+
+ const auto *LCC = cast<LambdaCaptureConstructionContext>(CC);
+
+ SVal Base = loc::MemRegionVal(
+ MRMgr.getCXXTempObjectRegion(LCC->getInitializer(), LCtx));
+
+ const auto *CE = dyn_cast_or_null<CXXConstructExpr>(E);
+ if (getIndexOfElementToConstruct(State, CE, LCtx)) {
+ CallOpts.IsArrayCtorOrDtor = true;
+ Base = State->getLValue(E->getType(), svalBuilder.makeArrayIndex(Idx),
+ Base);
+ }
+
+ return Base;
+ }
case ConstructionContext::ArgumentKind: {
// Arguments are technically temporaries.
CallOpts.IsTemporaryCtorOrDtor = true;
@@ -293,13 +334,13 @@ SVal ExprEngine::computeObjectUnderConstruction(
unsigned Idx = ACC->getIndex();
CallEventManager &CEMgr = getStateManager().getCallEventManager();
- auto getArgLoc = [&](CallEventRef<> Caller) -> Optional<SVal> {
+ auto getArgLoc = [&](CallEventRef<> Caller) -> std::optional<SVal> {
const LocationContext *FutureSFC =
- Caller->getCalleeStackFrame(currBldrCtx->blockCount());
+ Caller->getCalleeStackFrame(BldrCtx->blockCount());
// Return early if we are unable to reliably foresee
// the future stack frame.
if (!FutureSFC)
- return None;
+ return std::nullopt;
// This should be equivalent to Caller->getDecl() for now, but
// FutureSFC->getDecl() is likely to support better stuff (like
@@ -308,37 +349,39 @@ SVal ExprEngine::computeObjectUnderConstruction(
// FIXME: Support for variadic arguments is not implemented here yet.
if (CallEvent::isVariadic(CalleeD))
- return None;
+ return std::nullopt;
// Operator arguments do not correspond to operator parameters
// because this-argument is implemented as a normal argument in
// operator call expressions but not in operator declarations.
const TypedValueRegion *TVR = Caller->getParameterLocation(
- *Caller->getAdjustedParameterIndex(Idx), currBldrCtx->blockCount());
+ *Caller->getAdjustedParameterIndex(Idx), BldrCtx->blockCount());
if (!TVR)
- return None;
+ return std::nullopt;
return loc::MemRegionVal(TVR);
};
if (const auto *CE = dyn_cast<CallExpr>(E)) {
- CallEventRef<> Caller = CEMgr.getSimpleCall(CE, State, LCtx);
- if (Optional<SVal> V = getArgLoc(Caller))
+ CallEventRef<> Caller =
+ CEMgr.getSimpleCall(CE, State, LCtx, getCFGElementRef());
+ if (std::optional<SVal> V = getArgLoc(Caller))
return *V;
else
break;
} else if (const auto *CCE = dyn_cast<CXXConstructExpr>(E)) {
// Don't bother figuring out the target region for the future
// constructor because we won't need it.
- CallEventRef<> Caller =
- CEMgr.getCXXConstructorCall(CCE, /*Target=*/nullptr, State, LCtx);
- if (Optional<SVal> V = getArgLoc(Caller))
+ CallEventRef<> Caller = CEMgr.getCXXConstructorCall(
+ CCE, /*Target=*/nullptr, State, LCtx, getCFGElementRef());
+ if (std::optional<SVal> V = getArgLoc(Caller))
return *V;
else
break;
} else if (const auto *ME = dyn_cast<ObjCMessageExpr>(E)) {
- CallEventRef<> Caller = CEMgr.getObjCMethodCall(ME, State, LCtx);
- if (Optional<SVal> V = getArgLoc(Caller))
+ CallEventRef<> Caller =
+ CEMgr.getObjCMethodCall(ME, State, LCtx, getCFGElementRef());
+ if (std::optional<SVal> V = getArgLoc(Caller))
return *V;
else
break;
@@ -432,7 +475,7 @@ ProgramStateRef ExprEngine::updateObjectsUnderConstruction(
}
// If we decided not to elide the constructor, proceed as if
// it's a simple temporary.
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
case ConstructionContext::SimpleTemporaryObjectKind: {
const auto *TCC = cast<TemporaryObjectConstructionContext>(CC);
@@ -444,6 +487,17 @@ ProgramStateRef ExprEngine::updateObjectsUnderConstruction(
return State;
}
+ case ConstructionContext::LambdaCaptureKind: {
+ const auto *LCC = cast<LambdaCaptureConstructionContext>(CC);
+
+ // If we capture and array, we want to store the super region, not a
+ // sub-region.
+ if (const auto *EL = dyn_cast_or_null<ElementRegion>(V.getAsRegion()))
+ V = loc::MemRegionVal(EL->getSuperRegion());
+
+ return addObjectUnderConstruction(
+ State, {LCC->getLambdaExpr(), LCC->getIndex()}, LCtx, V);
+ }
case ConstructionContext::ArgumentKind: {
const auto *ACC = cast<ArgumentConstructionContext>(CC);
if (const auto *BTE = ACC->getCXXBindTemporaryExpr())
@@ -456,6 +510,75 @@ ProgramStateRef ExprEngine::updateObjectsUnderConstruction(
llvm_unreachable("Unhandled construction context!");
}
+static ProgramStateRef
+bindRequiredArrayElementToEnvironment(ProgramStateRef State,
+ const ArrayInitLoopExpr *AILE,
+ const LocationContext *LCtx, SVal Idx) {
+ // The ctor in this case is guaranteed to be a copy ctor, otherwise we hit a
+ // compile time error.
+ //
+ // -ArrayInitLoopExpr <-- we're here
+ // |-OpaqueValueExpr
+ // | `-DeclRefExpr <-- match this
+ // `-CXXConstructExpr
+ // `-ImplicitCastExpr
+ // `-ArraySubscriptExpr
+ // |-ImplicitCastExpr
+ // | `-OpaqueValueExpr
+ // | `-DeclRefExpr
+ // `-ArrayInitIndexExpr
+ //
+ // The resulting expression might look like the one below in an implicit
+ // copy/move ctor.
+ //
+ // ArrayInitLoopExpr <-- we're here
+ // |-OpaqueValueExpr
+ // | `-MemberExpr <-- match this
+ // | (`-CXXStaticCastExpr) <-- move ctor only
+ // | `-DeclRefExpr
+ // `-CXXConstructExpr
+ // `-ArraySubscriptExpr
+ // |-ImplicitCastExpr
+ // | `-OpaqueValueExpr
+ // | `-MemberExpr
+ // | `-DeclRefExpr
+ // `-ArrayInitIndexExpr
+ //
+ // The resulting expression for a multidimensional array.
+ // ArrayInitLoopExpr <-- we're here
+ // |-OpaqueValueExpr
+ // | `-DeclRefExpr <-- match this
+ // `-ArrayInitLoopExpr
+ // |-OpaqueValueExpr
+ // | `-ArraySubscriptExpr
+ // | |-ImplicitCastExpr
+ // | | `-OpaqueValueExpr
+ // | | `-DeclRefExpr
+ // | `-ArrayInitIndexExpr
+ // `-CXXConstructExpr <-- extract this
+ // ` ...
+
+ const auto *OVESrc = AILE->getCommonExpr()->getSourceExpr();
+
+ // HACK: There is no way we can put the index of the array element into the
+ // CFG unless we unroll the loop, so we manually select and bind the required
+ // parameter to the environment.
+ const auto *CE =
+ cast<CXXConstructExpr>(extractElementInitializerFromNestedAILE(AILE));
+
+ SVal Base = UnknownVal();
+ if (const auto *ME = dyn_cast<MemberExpr>(OVESrc))
+ Base = State->getSVal(ME, LCtx);
+ else if (const auto *DRE = dyn_cast<DeclRefExpr>(OVESrc))
+ Base = State->getLValue(cast<VarDecl>(DRE->getDecl()), LCtx);
+ else
+ llvm_unreachable("ArrayInitLoopExpr contains unexpected source expression");
+
+ SVal NthElem = State->getLValue(CE->getType(), Idx, Base);
+
+ return State->BindExpr(CE->getArg(0), LCtx, NthElem);
+}
+
void ExprEngine::handleConstructor(const Expr *E,
ExplodedNode *Pred,
ExplodedNodeSet &destNodes) {
@@ -469,57 +592,95 @@ void ExprEngine::handleConstructor(const Expr *E,
SVal Target = UnknownVal();
if (CE) {
- if (Optional<SVal> ElidedTarget =
+ if (std::optional<SVal> ElidedTarget =
getObjectUnderConstruction(State, CE, LCtx)) {
- // We've previously modeled an elidable constructor by pretending that it
- // in fact constructs into the correct target. This constructor can
- // therefore be skipped.
- Target = *ElidedTarget;
- StmtNodeBuilder Bldr(Pred, destNodes, *currBldrCtx);
- State = finishObjectConstruction(State, CE, LCtx);
- if (auto L = Target.getAs<Loc>())
- State = State->BindExpr(CE, LCtx, State->getSVal(*L, CE->getType()));
- Bldr.generateNode(CE, Pred, State);
- return;
+ // We've previously modeled an elidable constructor by pretending that
+ // it in fact constructs into the correct target. This constructor can
+ // therefore be skipped.
+ Target = *ElidedTarget;
+ StmtNodeBuilder Bldr(Pred, destNodes, *currBldrCtx);
+ State = finishObjectConstruction(State, CE, LCtx);
+ if (auto L = Target.getAs<Loc>())
+ State = State->BindExpr(CE, LCtx, State->getSVal(*L, CE->getType()));
+ Bldr.generateNode(CE, Pred, State);
+ return;
}
}
- // FIXME: Handle arrays, which run the same constructor for every element.
- // For now, we just run the first constructor (which should still invalidate
- // the entire array).
-
EvalCallOptions CallOpts;
auto C = getCurrentCFGElement().getAs<CFGConstructor>();
assert(C || getCurrentCFGElement().getAs<CFGStmt>());
const ConstructionContext *CC = C ? C->getConstructionContext() : nullptr;
- const CXXConstructExpr::ConstructionKind CK =
+ const CXXConstructionKind CK =
CE ? CE->getConstructionKind() : CIE->getConstructionKind();
switch (CK) {
- case CXXConstructExpr::CK_Complete: {
+ case CXXConstructionKind::Complete: {
// Inherited constructors are always base class constructors.
assert(CE && !CIE && "A complete constructor is inherited?!");
+ // If the ctor is part of an ArrayInitLoopExpr, we want to handle it
+ // differently.
+ auto *AILE = CC ? CC->getArrayInitLoop() : nullptr;
+
+ unsigned Idx = 0;
+ if (CE->getType()->isArrayType() || AILE) {
+
+ auto isZeroSizeArray = [&] {
+ uint64_t Size = 1;
+
+ if (const auto *CAT = dyn_cast<ConstantArrayType>(CE->getType()))
+ Size = getContext().getConstantArrayElementCount(CAT);
+ else if (AILE)
+ Size = getContext().getArrayInitLoopExprElementCount(AILE);
+
+ return Size == 0;
+ };
+
+ // No element construction will happen in a 0 size array.
+ if (isZeroSizeArray()) {
+ StmtNodeBuilder Bldr(Pred, destNodes, *currBldrCtx);
+ static SimpleProgramPointTag T{"ExprEngine",
+ "Skipping 0 size array construction"};
+ Bldr.generateNode(CE, Pred, State, &T);
+ return;
+ }
+
+ Idx = getIndexOfElementToConstruct(State, CE, LCtx).value_or(0u);
+ State = setIndexOfElementToConstruct(State, CE, LCtx, Idx + 1);
+ }
+
+ if (AILE) {
+ // Only set this once even though we loop through it multiple times.
+ if (!getPendingInitLoop(State, CE, LCtx))
+ State = setPendingInitLoop(
+ State, CE, LCtx,
+ getContext().getArrayInitLoopExprElementCount(AILE));
+
+ State = bindRequiredArrayElementToEnvironment(
+ State, AILE, LCtx, svalBuilder.makeArrayIndex(Idx));
+ }
+
// The target region is found from construction context.
- std::tie(State, Target) =
- handleConstructionContext(CE, State, LCtx, CC, CallOpts);
+ std::tie(State, Target) = handleConstructionContext(
+ CE, State, currBldrCtx, LCtx, CC, CallOpts, Idx);
break;
}
- case CXXConstructExpr::CK_VirtualBase: {
+ case CXXConstructionKind::VirtualBase: {
// Make sure we are not calling virtual base class initializers twice.
// Only the most-derived object should initialize virtual base classes.
const auto *OuterCtor = dyn_cast_or_null<CXXConstructExpr>(
LCtx->getStackFrame()->getCallSite());
assert(
(!OuterCtor ||
- OuterCtor->getConstructionKind() == CXXConstructExpr::CK_Complete ||
- OuterCtor->getConstructionKind() == CXXConstructExpr::CK_Delegating) &&
+ OuterCtor->getConstructionKind() == CXXConstructionKind::Complete ||
+ OuterCtor->getConstructionKind() == CXXConstructionKind::Delegating) &&
("This virtual base should have already been initialized by "
"the most derived class!"));
(void)OuterCtor;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
- case CXXConstructExpr::CK_NonVirtualBase:
+ case CXXConstructionKind::NonVirtualBase:
// In C++17, classes with non-virtual bases may be aggregates, so they would
// be initialized as aggregates without a constructor call, so we may have
// a base class constructed directly into an initializer list without
@@ -531,24 +692,24 @@ void ExprEngine::handleConstructor(const Expr *E,
// FIXME: Instead of relying on the ParentMap, we should have the
// trigger-statement (InitListExpr in this case) passed down from CFG or
// otherwise always available during construction.
- if (dyn_cast_or_null<InitListExpr>(LCtx->getParentMap().getParent(E))) {
+ if (isa_and_nonnull<InitListExpr>(LCtx->getParentMap().getParent(E))) {
MemRegionManager &MRMgr = getSValBuilder().getRegionManager();
Target = loc::MemRegionVal(MRMgr.getCXXTempObjectRegion(E, LCtx));
CallOpts.IsCtorOrDtorWithImproperlyModeledTargetRegion = true;
break;
}
- LLVM_FALLTHROUGH;
- case CXXConstructExpr::CK_Delegating: {
+ [[fallthrough]];
+ case CXXConstructionKind::Delegating: {
const CXXMethodDecl *CurCtor = cast<CXXMethodDecl>(LCtx->getDecl());
Loc ThisPtr = getSValBuilder().getCXXThis(CurCtor,
LCtx->getStackFrame());
SVal ThisVal = State->getSVal(ThisPtr);
- if (CK == CXXConstructExpr::CK_Delegating) {
+ if (CK == CXXConstructionKind::Delegating) {
Target = ThisVal;
} else {
// Cast to the base type.
- bool IsVirtual = (CK == CXXConstructExpr::CK_VirtualBase);
+ bool IsVirtual = (CK == CXXConstructionKind::VirtualBase);
SVal BaseVal =
getStoreManager().evalDerivedToBase(ThisVal, E->getType(), IsVirtual);
Target = BaseVal;
@@ -573,9 +734,9 @@ void ExprEngine::handleConstructor(const Expr *E,
CallEventManager &CEMgr = getStateManager().getCallEventManager();
CallEventRef<> Call =
CIE ? (CallEventRef<>)CEMgr.getCXXInheritedConstructorCall(
- CIE, TargetRegion, State, LCtx)
+ CIE, TargetRegion, State, LCtx, getCFGElementRef())
: (CallEventRef<>)CEMgr.getCXXConstructorCall(
- CE, TargetRegion, State, LCtx);
+ CE, TargetRegion, State, LCtx, getCFGElementRef());
ExplodedNodeSet DstPreVisit;
getCheckerManager().runCheckersForPreStmt(DstPreVisit, Pred, E, *this);
@@ -584,10 +745,8 @@ void ExprEngine::handleConstructor(const Expr *E,
if (CE) {
// FIXME: Is it possible and/or useful to do this before PreStmt?
StmtNodeBuilder Bldr(DstPreVisit, PreInitialized, *currBldrCtx);
- for (ExplodedNodeSet::iterator I = DstPreVisit.begin(),
- E = DstPreVisit.end();
- I != E; ++I) {
- ProgramStateRef State = (*I)->getState();
+ for (ExplodedNode *N : DstPreVisit) {
+ ProgramStateRef State = N->getState();
if (CE->requiresZeroInitialization()) {
// FIXME: Once we properly handle constructors in new-expressions, we'll
// need to invalidate the region before setting a default value, to make
@@ -604,7 +763,7 @@ void ExprEngine::handleConstructor(const Expr *E,
State = State->bindDefaultZero(Target, LCtx);
}
- Bldr.generateNode(CE, *I, State, /*tag=*/nullptr,
+ Bldr.generateNode(CE, N, State, /*tag=*/nullptr,
ProgramPoint::PreStmtKind);
}
} else {
@@ -622,14 +781,12 @@ void ExprEngine::handleConstructor(const Expr *E,
!CallOpts.IsArrayCtorOrDtor) {
StmtNodeBuilder Bldr(DstPreCall, DstEvaluated, *currBldrCtx);
// FIXME: Handle other kinds of trivial constructors as well.
- for (ExplodedNodeSet::iterator I = DstPreCall.begin(), E = DstPreCall.end();
- I != E; ++I)
- performTrivialCopy(Bldr, *I, *Call);
+ for (ExplodedNode *N : DstPreCall)
+ performTrivialCopy(Bldr, N, *Call);
} else {
- for (ExplodedNodeSet::iterator I = DstPreCall.begin(), E = DstPreCall.end();
- I != E; ++I)
- getCheckerManager().runCheckersForEvalCall(DstEvaluated, *I, *Call, *this,
+ for (ExplodedNode *N : DstPreCall)
+ getCheckerManager().runCheckersForEvalCall(DstEvaluated, N, *Call, *this,
CallOpts);
}
@@ -644,7 +801,8 @@ void ExprEngine::handleConstructor(const Expr *E,
StmtNodeBuilder Bldr(DstEvaluated, DstEvaluatedPostProcessed, *currBldrCtx);
const AnalysisDeclContext *ADC = LCtx->getAnalysisDeclContext();
if (!ADC->getCFGBuildOptions().AddTemporaryDtors) {
- if (llvm::isa_and_nonnull<CXXTempObjectRegion>(TargetRegion) &&
+ if (llvm::isa_and_nonnull<CXXTempObjectRegion,
+ CXXLifetimeExtendedObjectRegion>(TargetRegion) &&
cast<CXXConstructorDecl>(Call->getDecl())
->getParent()
->isAnyDestructorNoReturn()) {
@@ -716,7 +874,8 @@ void ExprEngine::VisitCXXDestructor(QualType ObjectType,
// it would interrupt the analysis instead.
static SimpleProgramPointTag T("ExprEngine", "SkipInvalidDestructor");
// FIXME: PostImplicitCall with a null decl may crash elsewhere anyway.
- PostImplicitCall PP(/*Decl=*/nullptr, S->getEndLoc(), LCtx, &T);
+ PostImplicitCall PP(/*Decl=*/nullptr, S->getEndLoc(), LCtx,
+ getCFGElementRef(), &T);
NodeBuilder Bldr(Pred, Dst, *currBldrCtx);
Bldr.generateNode(PP, Pred->getState(), Pred);
return;
@@ -741,8 +900,8 @@ void ExprEngine::VisitCXXDestructor(QualType ObjectType,
}
CallEventManager &CEMgr = getStateManager().getCallEventManager();
- CallEventRef<CXXDestructorCall> Call =
- CEMgr.getCXXDestructorCall(DtorDecl, S, Dest, IsBaseDtor, State, LCtx);
+ CallEventRef<CXXDestructorCall> Call = CEMgr.getCXXDestructorCall(
+ DtorDecl, S, Dest, IsBaseDtor, State, LCtx, getCFGElementRef());
PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),
Call->getSourceRange().getBegin(),
@@ -754,9 +913,8 @@ void ExprEngine::VisitCXXDestructor(QualType ObjectType,
ExplodedNodeSet DstInvalidated;
StmtNodeBuilder Bldr(DstPreCall, DstInvalidated, *currBldrCtx);
- for (ExplodedNodeSet::iterator I = DstPreCall.begin(), E = DstPreCall.end();
- I != E; ++I)
- defaultEvalCall(Bldr, *I, *Call, CallOpts);
+ for (ExplodedNode *N : DstPreCall)
+ defaultEvalCall(Bldr, N, *Call, CallOpts);
getCheckerManager().runCheckersForPostCall(Dst, DstInvalidated,
*Call, *this);
@@ -772,7 +930,7 @@ void ExprEngine::VisitCXXNewAllocatorCall(const CXXNewExpr *CNE,
"Error evaluating New Allocator Call");
CallEventManager &CEMgr = getStateManager().getCallEventManager();
CallEventRef<CXXAllocatorCall> Call =
- CEMgr.getCXXAllocatorCall(CNE, State, LCtx);
+ CEMgr.getCXXAllocatorCall(CNE, State, LCtx, getCFGElementRef());
ExplodedNodeSet DstPreCall;
getCheckerManager().runCheckersForPreCall(DstPreCall, Pred,
@@ -802,6 +960,11 @@ void ExprEngine::VisitCXXNewAllocatorCall(const CXXNewExpr *CNE,
// skip it for now.
ProgramStateRef State = I->getState();
SVal RetVal = State->getSVal(CNE, LCtx);
+ // [basic.stc.dynamic.allocation] (on the return value of an allocation
+ // function):
+ // "The order, contiguity, and initial value of storage allocated by
+ // successive calls to an allocation function are unspecified."
+ State = State->bindDefaultInitial(RetVal, UndefinedVal{}, LCtx);
// If this allocation function is not declared as non-throwing, failures
// /must/ be signalled by exceptions, and thus the return value will never
@@ -865,7 +1028,7 @@ void ExprEngine::VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred,
CallEventManager &CEMgr = getStateManager().getCallEventManager();
CallEventRef<CXXAllocatorCall> Call =
- CEMgr.getCXXAllocatorCall(CNE, State, LCtx);
+ CEMgr.getCXXAllocatorCall(CNE, State, LCtx, getCFGElementRef());
if (!AMgr.getAnalyzerOptions().MayInlineCXXAllocator) {
// Invalidate placement args.
@@ -883,13 +1046,10 @@ void ExprEngine::VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred,
// where new can return NULL. If we end up supporting that option, we can
// consider adding a check for it here.
// C++11 [basic.stc.dynamic.allocation]p3.
- if (FD) {
- QualType Ty = FD->getType();
- if (const auto *ProtoType = Ty->getAs<FunctionProtoType>())
- if (!ProtoType->isNothrow())
- if (auto dSymVal = symVal.getAs<DefinedOrUnknownSVal>())
- State = State->assume(*dSymVal, true);
- }
+ if (const auto *ProtoType = FD->getType()->getAs<FunctionProtoType>())
+ if (!ProtoType->isNothrow())
+ if (auto dSymVal = symVal.getAs<DefinedOrUnknownSVal>())
+ State = State->assume(*dSymVal, true);
}
StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx);
@@ -897,14 +1057,39 @@ void ExprEngine::VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred,
SVal Result = symVal;
if (CNE->isArray()) {
- // FIXME: allocating an array requires simulating the constructors.
- // For now, just return a symbolicated region.
+
if (const auto *NewReg = cast_or_null<SubRegion>(symVal.getAsRegion())) {
- QualType ObjTy = CNE->getType()->getPointeeType();
+ // If each element is initialized by their default constructor, the field
+ // values are properly placed inside the required region, however if an
+ // initializer list is used, this doesn't happen automatically.
+ auto *Init = CNE->getInitializer();
+ bool isInitList = isa_and_nonnull<InitListExpr>(Init);
+
+ QualType ObjTy =
+ isInitList ? Init->getType() : CNE->getType()->getPointeeType();
const ElementRegion *EleReg =
- getStoreManager().GetElementZeroRegion(NewReg, ObjTy);
+ MRMgr.getElementRegion(ObjTy, svalBuilder.makeArrayIndex(0), NewReg,
+ svalBuilder.getContext());
Result = loc::MemRegionVal(EleReg);
+
+ // If the array is list initialized, we bind the initializer list to the
+ // memory region here, otherwise we would lose it.
+ if (isInitList) {
+ Bldr.takeNodes(Pred);
+ Pred = Bldr.generateNode(CNE, Pred, State);
+
+ SVal V = State->getSVal(Init, LCtx);
+ ExplodedNodeSet evaluated;
+ evalBind(evaluated, CNE, Pred, Result, V, true);
+
+ Bldr.takeNodes(Pred);
+ Bldr.addNodes(evaluated);
+
+ Pred = *evaluated.begin();
+ State = Pred->getState();
+ }
}
+
State = State->BindExpr(CNE, Pred->getLocationContext(), Result);
Bldr.generateNode(CNE, Pred, State);
return;
@@ -914,7 +1099,7 @@ void ExprEngine::VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred,
// CXXNewExpr, we need to make sure that the constructed object is not
// immediately invalidated here. (The placement call should happen before
// the constructor call anyway.)
- if (FD && FD->isReservedGlobalPlacementOperator()) {
+ if (FD->isReservedGlobalPlacementOperator()) {
// Non-array placement new should always return the placement location.
SVal PlacementLoc = State->getSVal(CNE->getPlacementArg(0), LCtx);
Result = svalBuilder.evalCast(PlacementLoc, CNE->getType(),
@@ -944,12 +1129,21 @@ void ExprEngine::VisitCXXDeleteExpr(const CXXDeleteExpr *CDE,
CallEventManager &CEMgr = getStateManager().getCallEventManager();
CallEventRef<CXXDeallocatorCall> Call = CEMgr.getCXXDeallocatorCall(
- CDE, Pred->getState(), Pred->getLocationContext());
+ CDE, Pred->getState(), Pred->getLocationContext(), getCFGElementRef());
ExplodedNodeSet DstPreCall;
getCheckerManager().runCheckersForPreCall(DstPreCall, Pred, *Call, *this);
+ ExplodedNodeSet DstPostCall;
- getCheckerManager().runCheckersForPostCall(Dst, DstPreCall, *Call, *this);
+ if (AMgr.getAnalyzerOptions().MayInlineCXXAllocator) {
+ StmtNodeBuilder Bldr(DstPreCall, DstPostCall, *currBldrCtx);
+ for (ExplodedNode *I : DstPreCall) {
+ defaultEvalCall(Bldr, I, *Call);
+ }
+ } else {
+ DstPostCall = DstPreCall;
+ }
+ getCheckerManager().runCheckersForPostCall(Dst, DstPostCall, *Call, *this);
}
void ExprEngine::VisitCXXCatchStmt(const CXXCatchStmt *CS, ExplodedNode *Pred,
@@ -999,19 +1193,41 @@ void ExprEngine::VisitLambdaExpr(const LambdaExpr *LE, ExplodedNode *Pred,
// If we created a new MemRegion for the lambda, we should explicitly bind
// the captures.
- CXXRecordDecl::field_iterator CurField = LE->getLambdaClass()->field_begin();
- for (LambdaExpr::const_capture_init_iterator i = LE->capture_init_begin(),
- e = LE->capture_init_end();
- i != e; ++i, ++CurField) {
- FieldDecl *FieldForCapture = *CurField;
+ for (auto const [Idx, FieldForCapture, InitExpr] :
+ llvm::zip(llvm::seq<unsigned>(0, -1), LE->getLambdaClass()->fields(),
+ LE->capture_inits())) {
SVal FieldLoc = State->getLValue(FieldForCapture, V);
SVal InitVal;
if (!FieldForCapture->hasCapturedVLAType()) {
- Expr *InitExpr = *i;
assert(InitExpr && "Capture missing initialization expression");
- InitVal = State->getSVal(InitExpr, LocCtxt);
+
+ // Capturing a 0 length array is a no-op, so we ignore it to get a more
+ // accurate analysis. If it's not ignored, it would set the default
+ // binding of the lambda to 'Unknown', which can lead to falsely detecting
+ // 'Uninitialized' values as 'Unknown' and not reporting a warning.
+ const auto FTy = FieldForCapture->getType();
+ if (FTy->isConstantArrayType() &&
+ getContext().getConstantArrayElementCount(
+ getContext().getAsConstantArrayType(FTy)) == 0)
+ continue;
+
+ // With C++17 copy elision the InitExpr can be anything, so instead of
+ // pattern matching all cases, we simple check if the current field is
+ // under construction or not, regardless what it's InitExpr is.
+ if (const auto OUC =
+ getObjectUnderConstruction(State, {LE, Idx}, LocCtxt)) {
+ InitVal = State->getSVal(OUC->getAsRegion());
+
+ State = finishObjectConstruction(State, {LE, Idx}, LocCtxt);
+ } else
+ InitVal = State->getSVal(InitExpr, LocCtxt);
+
} else {
+
+ assert(!getObjectUnderConstruction(State, {LE, Idx}, LocCtxt) &&
+ "VLA capture by value is a compile time error!");
+
// The field stores the length of a captured variable-length array.
// These captures don't have initialization expressions; instead we
// get the length from the VLAType size expression.
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
index e6918e071a4f..4755b6bfa6dc 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
@@ -25,6 +25,7 @@
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/SaveAndRestore.h"
+#include <optional>
using namespace clang;
using namespace ento;
@@ -85,10 +86,10 @@ static std::pair<const Stmt*,
const ProgramPoint &PP = Node->getLocation();
if (PP.getStackFrame() == SF) {
- if (Optional<StmtPoint> SP = PP.getAs<StmtPoint>()) {
+ if (std::optional<StmtPoint> SP = PP.getAs<StmtPoint>()) {
S = SP->getStmt();
break;
- } else if (Optional<CallExitEnd> CEE = PP.getAs<CallExitEnd>()) {
+ } else if (std::optional<CallExitEnd> CEE = PP.getAs<CallExitEnd>()) {
S = CEE->getCalleeContext()->getCallSite();
if (S)
break;
@@ -96,17 +97,17 @@ static std::pair<const Stmt*,
// If there is no statement, this is an implicitly-generated call.
// We'll walk backwards over it and then continue the loop to find
// an actual statement.
- Optional<CallEnter> CE;
+ std::optional<CallEnter> CE;
do {
Node = Node->getFirstPred();
CE = Node->getLocationAs<CallEnter>();
} while (!CE || CE->getCalleeContext() != CEE->getCalleeContext());
// Continue searching the graph.
- } else if (Optional<BlockEdge> BE = PP.getAs<BlockEdge>()) {
+ } else if (std::optional<BlockEdge> BE = PP.getAs<BlockEdge>()) {
Blk = BE->getSrc();
}
- } else if (Optional<CallEnter> CE = PP.getAs<CallEnter>()) {
+ } else if (std::optional<CallEnter> CE = PP.getAs<CallEnter>()) {
// If we reached the CallEnter for this function, it has no statements.
if (CE->getCalleeContext() == SF)
break;
@@ -129,7 +130,7 @@ static std::pair<const Stmt*,
static SVal adjustReturnValue(SVal V, QualType ExpectedTy, QualType ActualTy,
StoreManager &StoreMgr) {
// For now, the only adjustments we handle apply only to locations.
- if (!V.getAs<Loc>())
+ if (!isa<Loc>(V))
return V;
// If the types already match, don't do any unnecessary work.
@@ -195,6 +196,53 @@ static bool wasDifferentDeclUsedForInlining(CallEventRef<> Call,
return RuntimeCallee->getCanonicalDecl() != StaticDecl->getCanonicalDecl();
}
+// Returns the number of elements in the array currently being destructed.
+// If the element count is not found 0 will be returned.
+static unsigned getElementCountOfArrayBeingDestructed(
+ const CallEvent &Call, const ProgramStateRef State, SValBuilder &SVB) {
+ assert(isa<CXXDestructorCall>(Call) &&
+ "The call event is not a destructor call!");
+
+ const auto &DtorCall = cast<CXXDestructorCall>(Call);
+
+ auto ThisVal = DtorCall.getCXXThisVal();
+
+ if (auto ThisElementRegion = dyn_cast<ElementRegion>(ThisVal.getAsRegion())) {
+ auto ArrayRegion = ThisElementRegion->getAsArrayOffset().getRegion();
+ auto ElementType = ThisElementRegion->getElementType();
+
+ auto ElementCount =
+ getDynamicElementCount(State, ArrayRegion, SVB, ElementType);
+
+ if (!ElementCount.isConstant())
+ return 0;
+
+ return ElementCount.getAsInteger()->getLimitedValue();
+ }
+
+ return 0;
+}
+
+ProgramStateRef ExprEngine::removeStateTraitsUsedForArrayEvaluation(
+ ProgramStateRef State, const CXXConstructExpr *E,
+ const LocationContext *LCtx) {
+
+ assert(LCtx && "Location context must be provided!");
+
+ if (E) {
+ if (getPendingInitLoop(State, E, LCtx))
+ State = removePendingInitLoop(State, E, LCtx);
+
+ if (getIndexOfElementToConstruct(State, E, LCtx))
+ State = removeIndexOfElementToConstruct(State, E, LCtx);
+ }
+
+ if (getPendingArrayDestruction(State, LCtx))
+ State = removePendingArrayDestruction(State, LCtx);
+
+ return State;
+}
+
/// The call exit is simulated with a sequence of nodes, which occur between
/// CallExitBegin and CallExitEnd. The following operations occur between the
/// two program points:
@@ -227,6 +275,23 @@ void ExprEngine::processCallExit(ExplodedNode *CEBNode) {
// Step 2: generate node with bound return value: CEBNode -> BindedRetNode.
+ // If this variable is set to 'true' the analyzer will evaluate the call
+ // statement we are about to exit again, instead of continuing the execution
+ // from the statement after the call. This is useful for non-POD type array
+ // construction where the CXXConstructExpr is referenced only once in the CFG,
+ // but we want to evaluate it as many times as many elements the array has.
+ bool ShouldRepeatCall = false;
+
+ if (const auto *DtorDecl =
+ dyn_cast_or_null<CXXDestructorDecl>(Call->getDecl())) {
+ if (auto Idx = getPendingArrayDestruction(state, callerCtx)) {
+ ShouldRepeatCall = *Idx > 0;
+
+ auto ThisVal = svalBuilder.getCXXThis(DtorDecl->getParent(), calleeCtx);
+ state = state->killBinding(ThisVal);
+ }
+ }
+
// If the callee returns an expression, bind its value to CallExpr.
if (CE) {
if (const ReturnStmt *RS = dyn_cast_or_null<ReturnStmt>(LastSt)) {
@@ -255,6 +320,8 @@ void ExprEngine::processCallExit(ExplodedNode *CEBNode) {
SVal ThisV = state->getSVal(This);
ThisV = state->getSVal(ThisV.castAs<Loc>());
state = state->BindExpr(CCE, callerCtx, ThisV);
+
+ ShouldRepeatCall = shouldRepeatCtorCall(state, CCE, callerCtx);
}
if (const auto *CNE = dyn_cast<CXXNewExpr>(CE)) {
@@ -273,6 +340,11 @@ void ExprEngine::processCallExit(ExplodedNode *CEBNode) {
}
}
+ if (!ShouldRepeatCall) {
+ state = removeStateTraitsUsedForArrayEvaluation(
+ state, dyn_cast_or_null<CXXConstructExpr>(CE), callerCtx);
+ }
+
// Step 3: BindedRetNode -> CleanedNodes
// If we can find a statement and a block in the inlined function, run remove
// dead bindings before returning from the call. This is important to ensure
@@ -302,17 +374,15 @@ void ExprEngine::processCallExit(ExplodedNode *CEBNode) {
CleanedNodes.Add(CEBNode);
}
- for (ExplodedNodeSet::iterator I = CleanedNodes.begin(),
- E = CleanedNodes.end(); I != E; ++I) {
-
+ for (ExplodedNode *N : CleanedNodes) {
// Step 4: Generate the CallExit and leave the callee's context.
// CleanedNodes -> CEENode
CallExitEnd Loc(calleeCtx, callerCtx);
bool isNew;
- ProgramStateRef CEEState = (*I == CEBNode) ? state : (*I)->getState();
+ ProgramStateRef CEEState = (N == CEBNode) ? state : N->getState();
ExplodedNode *CEENode = G.getNode(Loc, CEEState, false, &isNew);
- CEENode->addPredecessor(*I, G);
+ CEENode->addPredecessor(N, G);
if (!isNew)
return;
@@ -320,9 +390,8 @@ void ExprEngine::processCallExit(ExplodedNode *CEBNode) {
// result onto the work list.
// CEENode -> Dst -> WorkList
NodeBuilderContext Ctx(Engine, calleeCtx->getCallSiteBlock(), CEENode);
- SaveAndRestore<const NodeBuilderContext*> NBCSave(currBldrCtx,
- &Ctx);
- SaveAndRestore<unsigned> CBISave(currStmtIdx, calleeCtx->getIndex());
+ SaveAndRestore<const NodeBuilderContext *> NBCSave(currBldrCtx, &Ctx);
+ SaveAndRestore CBISave(currStmtIdx, calleeCtx->getIndex());
CallEventRef<> UpdatedCall = Call.cloneWithState(CEEState);
@@ -358,9 +427,10 @@ void ExprEngine::processCallExit(ExplodedNode *CEBNode) {
// Enqueue the next element in the block.
for (ExplodedNodeSet::iterator PSI = Dst.begin(), PSE = Dst.end();
- PSI != PSE; ++PSI) {
- Engine.getWorkList()->enqueue(*PSI, calleeCtx->getCallSiteBlock(),
- calleeCtx->getIndex()+1);
+ PSI != PSE; ++PSI) {
+ unsigned Idx = calleeCtx->getIndex() + (ShouldRepeatCall ? 0 : 1);
+
+ Engine.getWorkList()->enqueue(*PSI, calleeCtx->getCallSiteBlock(), Idx);
}
}
}
@@ -427,10 +497,39 @@ namespace {
REGISTER_MAP_WITH_PROGRAMSTATE(DynamicDispatchBifurcationMap,
const MemRegion *, unsigned)
+REGISTER_TRAIT_WITH_PROGRAMSTATE(CTUDispatchBifurcation, bool)
+
+void ExprEngine::ctuBifurcate(const CallEvent &Call, const Decl *D,
+ NodeBuilder &Bldr, ExplodedNode *Pred,
+ ProgramStateRef State) {
+ ProgramStateRef ConservativeEvalState = nullptr;
+ if (Call.isForeign() && !isSecondPhaseCTU()) {
+ const auto IK = AMgr.options.getCTUPhase1Inlining();
+ const bool DoInline = IK == CTUPhase1InliningKind::All ||
+ (IK == CTUPhase1InliningKind::Small &&
+ isSmall(AMgr.getAnalysisDeclContext(D)));
+ if (DoInline) {
+ inlineCall(Engine.getWorkList(), Call, D, Bldr, Pred, State);
+ return;
+ }
+ const bool BState = State->get<CTUDispatchBifurcation>();
+ if (!BState) { // This is the first time we see this foreign function.
+ // Enqueue it to be analyzed in the second (ctu) phase.
+ inlineCall(Engine.getCTUWorkList(), Call, D, Bldr, Pred, State);
+ // Conservatively evaluate in the first phase.
+ ConservativeEvalState = State->set<CTUDispatchBifurcation>(true);
+ conservativeEvalCall(Call, Bldr, Pred, ConservativeEvalState);
+ } else {
+ conservativeEvalCall(Call, Bldr, Pred, State);
+ }
+ return;
+ }
+ inlineCall(Engine.getWorkList(), Call, D, Bldr, Pred, State);
+}
-bool ExprEngine::inlineCall(const CallEvent &Call, const Decl *D,
- NodeBuilder &Bldr, ExplodedNode *Pred,
- ProgramStateRef State) {
+void ExprEngine::inlineCall(WorkList *WList, const CallEvent &Call,
+ const Decl *D, NodeBuilder &Bldr,
+ ExplodedNode *Pred, ProgramStateRef State) {
assert(D);
const LocationContext *CurLC = Pred->getLocationContext();
@@ -465,7 +564,7 @@ bool ExprEngine::inlineCall(const CallEvent &Call, const Decl *D,
if (ExplodedNode *N = G.getNode(Loc, State, false, &isNew)) {
N->addPredecessor(Pred, G);
if (isNew)
- Engine.getWorkList()->enqueue(N);
+ WList->enqueue(N);
}
// If we decided to inline the call, the successor has been manually
@@ -475,11 +574,17 @@ bool ExprEngine::inlineCall(const CallEvent &Call, const Decl *D,
NumInlinedCalls++;
Engine.FunctionSummaries->bumpNumTimesInlined(D);
- // Mark the decl as visited.
- if (VisitedCallees)
- VisitedCallees->insert(D);
-
- return true;
+ // Do not mark as visited in the 2nd run (CTUWList), so the function will
+ // be visited as top-level, this way we won't loose reports in non-ctu
+ // mode. Considering the case when a function in a foreign TU calls back
+ // into the main TU.
+ // Note, during the 1st run, it doesn't matter if we mark the foreign
+ // functions as visited (or not) because they can never appear as a top level
+ // function in the main TU.
+ if (!isSecondPhaseCTU())
+ // Mark the decl as visited.
+ if (VisitedCallees)
+ VisitedCallees->insert(D);
}
static ProgramStateRef getInlineFailedState(ProgramStateRef State,
@@ -503,15 +608,14 @@ void ExprEngine::VisitCallExpr(const CallExpr *CE, ExplodedNode *Pred,
// Get the call in its initial state. We use this as a template to perform
// all the checks.
CallEventManager &CEMgr = getStateManager().getCallEventManager();
- CallEventRef<> CallTemplate
- = CEMgr.getSimpleCall(CE, Pred->getState(), Pred->getLocationContext());
+ CallEventRef<> CallTemplate = CEMgr.getSimpleCall(
+ CE, Pred->getState(), Pred->getLocationContext(), getCFGElementRef());
// Evaluate the function call. We try each of the checkers
// to see if the can evaluate the function call.
ExplodedNodeSet dstCallEvaluated;
- for (ExplodedNodeSet::iterator I = dstPreVisit.begin(), E = dstPreVisit.end();
- I != E; ++I) {
- evalCall(dstCallEvaluated, *I, *CallTemplate);
+ for (ExplodedNode *N : dstPreVisit) {
+ evalCall(dstCallEvaluated, N, *CallTemplate);
}
// Finally, perform the post-condition check of the CallExpr and store
@@ -533,8 +637,7 @@ ProgramStateRef ExprEngine::finishArgumentConstruction(ProgramStateRef State,
const LocationContext *LC = Call.getLocationContext();
for (unsigned CallI = 0, CallN = Call.getNumArgs(); CallI != CallN; ++CallI) {
unsigned I = Call.getASTArgumentIndex(CallI);
- if (Optional<SVal> V =
- getObjectUnderConstruction(State, {E, I}, LC)) {
+ if (std::optional<SVal> V = getObjectUnderConstruction(State, {E, I}, LC)) {
SVal VV = *V;
(void)VV;
assert(cast<VarRegion>(VV.castAs<loc::MemRegionVal>().getRegion())
@@ -667,9 +770,9 @@ ProgramStateRef ExprEngine::bindReturnValue(const CallEvent &Call,
SVal Target;
assert(RTC->getStmt() == Call.getOriginExpr());
EvalCallOptions CallOpts; // FIXME: We won't really need those.
- std::tie(State, Target) =
- handleConstructionContext(Call.getOriginExpr(), State, LCtx,
- RTC->getConstructionContext(), CallOpts);
+ std::tie(State, Target) = handleConstructionContext(
+ Call.getOriginExpr(), State, currBldrCtx, LCtx,
+ RTC->getConstructionContext(), CallOpts);
const MemRegion *TargetR = Target.getAsRegion();
assert(TargetR);
// Invalidate the region so that it didn't look uninitialized. If this is
@@ -697,7 +800,7 @@ ProgramStateRef ExprEngine::bindReturnValue(const CallEvent &Call,
// Store the extent of the allocated object(s).
SVal ElementCount;
- if (const Expr *SizeExpr = CNE->getArraySize().getValueOr(nullptr)) {
+ if (const Expr *SizeExpr = CNE->getArraySize().value_or(nullptr)) {
ElementCount = State->getSVal(SizeExpr, LCtx);
} else {
ElementCount = svalBuilder.makeIntVal(1, /*IsUnsigned=*/true);
@@ -709,6 +812,11 @@ ProgramStateRef ExprEngine::bindReturnValue(const CallEvent &Call,
svalBuilder.evalBinOp(State, BO_Mul, ElementCount, ElementSize,
svalBuilder.getArrayIndexType());
+ // FIXME: This line is to prevent a crash. For more details please check
+ // issue #56264.
+ if (Size.isUndef())
+ Size = UnknownVal();
+
State = setDynamicExtent(State, MR, Size.castAs<DefinedOrUnknownSVal>(),
svalBuilder);
} else {
@@ -726,7 +834,8 @@ void ExprEngine::conservativeEvalCall(const CallEvent &Call, NodeBuilder &Bldr,
State = bindReturnValue(Call, Pred->getLocationContext(), State);
// And make the result node.
- Bldr.generateNode(Call.getProgramPoint(), State, Pred);
+ static SimpleProgramPointTag PT("ExprEngine", "Conservative eval call");
+ Bldr.generateNode(Call.getProgramPoint(false, &PT), State, Pred);
}
ExprEngine::CallInlinePolicy
@@ -760,13 +869,10 @@ ExprEngine::mayInlineCallKind(const CallEvent &Call, const ExplodedNode *Pred,
!Opts.MayInlineCXXAllocator)
return CIP_DisallowedOnce;
- // FIXME: We don't handle constructors or destructors for arrays properly.
- // Even once we do, we still need to be careful about implicitly-generated
- // initializers for array fields in default move/copy constructors.
- // We still allow construction into ElementRegion targets when they don't
- // represent array elements.
- if (CallOpts.IsArrayCtorOrDtor)
- return CIP_DisallowedOnce;
+ if (CallOpts.IsArrayCtorOrDtor) {
+ if (!shouldInlineArrayConstruction(Pred->getState(), CtorExpr, CurLC))
+ return CIP_DisallowedOnce;
+ }
// Inlining constructors requires including initializers in the CFG.
const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext();
@@ -782,7 +888,7 @@ ExprEngine::mayInlineCallKind(const CallEvent &Call, const ExplodedNode *Pred,
if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors))
return CIP_DisallowedAlways;
- if (CtorExpr->getConstructionKind() == CXXConstructExpr::CK_Complete) {
+ if (CtorExpr->getConstructionKind() == CXXConstructionKind::Complete) {
// If we don't handle temporary destructors, we shouldn't inline
// their constructors.
if (CallOpts.IsTemporaryCtorOrDtor &&
@@ -817,9 +923,12 @@ ExprEngine::mayInlineCallKind(const CallEvent &Call, const ExplodedNode *Pred,
assert(ADC->getCFGBuildOptions().AddImplicitDtors && "No CFG destructors");
(void)ADC;
- // FIXME: We don't handle constructors or destructors for arrays properly.
- if (CallOpts.IsArrayCtorOrDtor)
- return CIP_DisallowedOnce;
+ if (CallOpts.IsArrayCtorOrDtor) {
+ if (!shouldInlineArrayDestruction(getElementCountOfArrayBeingDestructed(
+ Call, Pred->getState(), svalBuilder))) {
+ return CIP_DisallowedOnce;
+ }
+ }
// Allow disabling temporary destructor inlining with a separate option.
if (CallOpts.IsTemporaryCtorOrDtor &&
@@ -834,7 +943,7 @@ ExprEngine::mayInlineCallKind(const CallEvent &Call, const ExplodedNode *Pred,
break;
}
case CE_CXXDeallocator:
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case CE_CXXAllocator:
if (Opts.MayInlineCXXAllocator)
break;
@@ -979,9 +1088,9 @@ bool ExprEngine::shouldInlineCall(const CallEvent &Call, const Decl *D,
return false;
// Check if this function has been marked as non-inlinable.
- Optional<bool> MayInline = Engine.FunctionSummaries->mayInline(D);
- if (MayInline.hasValue()) {
- if (!MayInline.getValue())
+ std::optional<bool> MayInline = Engine.FunctionSummaries->mayInline(D);
+ if (MayInline) {
+ if (!*MayInline)
return false;
} else {
@@ -1002,7 +1111,7 @@ bool ExprEngine::shouldInlineCall(const CallEvent &Call, const Decl *D,
CallInlinePolicy CIP = mayInlineCallKind(Call, Pred, Opts, CallOpts);
if (CIP != CIP_Allowed) {
if (CIP == CIP_DisallowedAlways) {
- assert(!MayInline.hasValue() || MayInline.getValue());
+ assert(!MayInline || *MayInline);
Engine.FunctionSummaries->markShouldNotInline(D);
}
return false;
@@ -1030,6 +1139,63 @@ bool ExprEngine::shouldInlineCall(const CallEvent &Call, const Decl *D,
return true;
}
+bool ExprEngine::shouldInlineArrayConstruction(const ProgramStateRef State,
+ const CXXConstructExpr *CE,
+ const LocationContext *LCtx) {
+ if (!CE)
+ return false;
+
+ // FIXME: Handle other arrays types.
+ if (const auto *CAT = dyn_cast<ConstantArrayType>(CE->getType())) {
+ unsigned ArrSize = getContext().getConstantArrayElementCount(CAT);
+
+ // This might seem conter-intuitive at first glance, but the functions are
+ // closely related. Reasoning about destructors depends only on the type
+ // of the expression that initialized the memory region, which is the
+ // CXXConstructExpr. So to avoid code repetition, the work is delegated
+ // to the function that reasons about destructor inlining. Also note that
+ // if the constructors of the array elements are inlined, the destructors
+ // can also be inlined and if the destructors can be inline, it's safe to
+ // inline the constructors.
+ return shouldInlineArrayDestruction(ArrSize);
+ }
+
+ // Check if we're inside an ArrayInitLoopExpr, and it's sufficiently small.
+ if (auto Size = getPendingInitLoop(State, CE, LCtx))
+ return shouldInlineArrayDestruction(*Size);
+
+ return false;
+}
+
+bool ExprEngine::shouldInlineArrayDestruction(uint64_t Size) {
+
+ uint64_t maxAllowedSize = AMgr.options.maxBlockVisitOnPath;
+
+ // Declaring a 0 element array is also possible.
+ return Size <= maxAllowedSize && Size > 0;
+}
+
+bool ExprEngine::shouldRepeatCtorCall(ProgramStateRef State,
+ const CXXConstructExpr *E,
+ const LocationContext *LCtx) {
+
+ if (!E)
+ return false;
+
+ auto Ty = E->getType();
+
+ // FIXME: Handle non constant array types
+ if (const auto *CAT = dyn_cast<ConstantArrayType>(Ty)) {
+ unsigned Size = getContext().getConstantArrayElementCount(CAT);
+ return Size > getIndexOfElementToConstruct(State, E, LCtx);
+ }
+
+ if (auto Size = getPendingInitLoop(State, E, LCtx))
+ return Size > getIndexOfElementToConstruct(State, E, LCtx);
+
+ return false;
+}
+
static bool isTrivialObjectAssignment(const CallEvent &Call) {
const CXXInstanceCall *ICall = dyn_cast<CXXInstanceCall>(&Call);
if (!ICall)
@@ -1068,6 +1234,7 @@ void ExprEngine::defaultEvalCall(NodeBuilder &Bldr, ExplodedNode *Pred,
State = InlinedFailedState;
} else {
RuntimeDefinition RD = Call->getRuntimeDefinition();
+ Call->setForeign(RD.isForeign());
const Decl *D = RD.getDecl();
if (shouldInlineCall(*Call, D, Pred, CallOpts)) {
if (RD.mayHaveOtherDefinitions()) {
@@ -1085,14 +1252,17 @@ void ExprEngine::defaultEvalCall(NodeBuilder &Bldr, ExplodedNode *Pred,
return;
}
}
-
- // We are not bifurcating and we do have a Decl, so just inline.
- if (inlineCall(*Call, D, Bldr, Pred, State))
- return;
+ ctuBifurcate(*Call, D, Bldr, Pred, State);
+ return;
}
}
- // If we can't inline it, handle the return value and invalidate the regions.
+ // If we can't inline it, clean up the state traits used only if the function
+ // is inlined.
+ State = removeStateTraitsUsedForArrayEvaluation(
+ State, dyn_cast_or_null<CXXConstructExpr>(E), Call->getLocationContext());
+
+ // Also handle the return value and invalidate the regions.
conservativeEvalCall(*Call, Bldr, Pred, State);
}
@@ -1110,8 +1280,7 @@ void ExprEngine::BifurcateCall(const MemRegion *BifurReg,
if (BState) {
// If we are on "inline path", keep inlining if possible.
if (*BState == DynamicDispatchModeInlined)
- if (inlineCall(Call, D, Bldr, Pred, State))
- return;
+ ctuBifurcate(Call, D, Bldr, Pred, State);
// If inline failed, or we are on the path where we assume we
// don't have enough info about the receiver to inline, conjure the
// return value and invalidate the regions.
@@ -1124,7 +1293,7 @@ void ExprEngine::BifurcateCall(const MemRegion *BifurReg,
ProgramStateRef IState =
State->set<DynamicDispatchBifurcationMap>(BifurReg,
DynamicDispatchModeInlined);
- inlineCall(Call, D, Bldr, Pred, IState);
+ ctuBifurcate(Call, D, Bldr, Pred, IState);
ProgramStateRef NoIState =
State->set<DynamicDispatchBifurcationMap>(BifurReg,
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp
index 5a55e81497b0..f075df3ab5e4 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp
@@ -148,8 +148,8 @@ void ExprEngine::VisitObjCMessage(const ObjCMessageExpr *ME,
ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
CallEventManager &CEMgr = getStateManager().getCallEventManager();
- CallEventRef<ObjCMethodCall> Msg =
- CEMgr.getObjCMethodCall(ME, Pred->getState(), Pred->getLocationContext());
+ CallEventRef<ObjCMethodCall> Msg = CEMgr.getObjCMethodCall(
+ ME, Pred->getState(), Pred->getLocationContext(), getCFGElementRef());
// There are three cases for the receiver:
// (1) it is definitely nil,
@@ -167,19 +167,32 @@ void ExprEngine::VisitObjCMessage(const ObjCMessageExpr *ME,
// intentionally drops coverage in order to prevent false alarms
// in the following scenario:
//
- // id result = [o someMethod]
- // if (result) {
- // if (!o) {
- // // <-- This program point should be unreachable because if o is nil
- // // it must the case that result is nil as well.
+ // id result = [o someMethod]
+ // if (result) {
+ // if (!o) {
+ // // <-- This program point should be unreachable because if o is nil
+ // // it must the case that result is nil as well.
+ // }
// }
- // }
//
- // We could avoid dropping coverage by performing an explicit case split
- // on each method call -- but this would get very expensive. An alternative
- // would be to introduce lazy constraints.
- // FIXME: This ignores many potential bugs (<rdar://problem/11733396>).
- // Revisit once we have lazier constraints.
+ // However, it also loses coverage of the nil path prematurely,
+ // leading to missed reports.
+ //
+ // It's possible to handle this by performing a state split on every call:
+ // explore the state where the receiver is non-nil, and independently
+ // explore the state where it's nil. But this is not only slow, but
+ // completely unwarranted. The mere presence of the message syntax in the code
+ // isn't sufficient evidence that nil is a realistic possibility.
+ //
+ // An ideal solution would be to add the following constraint that captures
+ // both possibilities without splitting the state:
+ //
+ // ($x == 0) => ($y == 0) (1)
+ //
+ // where in our case '$x' is the receiver symbol, '$y' is the returned symbol,
+ // and '=>' is logical implication. But RangeConstraintManager can't handle
+ // such constraints yet, so for now we go with a simpler, more restrictive
+ // constraint: $x != 0, from which (1) follows as a vacuous truth.
if (Msg->isInstanceMessage()) {
SVal recVal = Msg->getReceiverSVal();
if (!recVal.isUndef()) {
@@ -206,7 +219,7 @@ void ExprEngine::VisitObjCMessage(const ObjCMessageExpr *ME,
ExplodedNodeSet dstPostCheckers;
getCheckerManager().runCheckersForObjCMessageNil(dstPostCheckers, Pred,
*Msg, *this);
- for (auto I : dstPostCheckers)
+ for (auto *I : dstPostCheckers)
finishArgumentConstruction(Dst, I, *Msg);
return;
}
@@ -270,7 +283,7 @@ void ExprEngine::VisitObjCMessage(const ObjCMessageExpr *ME,
// If there were constructors called for object-type arguments, clean them up.
ExplodedNodeSet dstArgCleanup;
- for (auto I : dstEval)
+ for (auto *I : dstEval)
finishArgumentConstruction(dstArgCleanup, I, *Msg);
ExplodedNodeSet dstPostvisit;
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
index 64fc32ea7554..86947b7929e9 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
@@ -27,6 +27,8 @@
#include "clang/Rewrite/Core/Rewriter.h"
#include "clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h"
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/Sequence.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator_range.h"
@@ -57,6 +59,8 @@ using namespace ento;
namespace {
+class ArrowMap;
+
class HTMLDiagnostics : public PathDiagnosticConsumer {
PathDiagnosticConsumerOptions DiagOpts;
std::string Directory;
@@ -64,6 +68,7 @@ class HTMLDiagnostics : public PathDiagnosticConsumer {
bool noDir = false;
const Preprocessor &PP;
const bool SupportsCrossFileDiagnostics;
+ llvm::StringSet<> EmittedHashes;
public:
HTMLDiagnostics(PathDiagnosticConsumerOptions DiagOpts,
@@ -77,60 +82,93 @@ public:
void FlushDiagnosticsImpl(std::vector<const PathDiagnostic *> &Diags,
FilesMade *filesMade) override;
- StringRef getName() const override {
- return "HTMLDiagnostics";
- }
+ StringRef getName() const override { return "HTMLDiagnostics"; }
bool supportsCrossFileDiagnostics() const override {
return SupportsCrossFileDiagnostics;
}
- unsigned ProcessMacroPiece(raw_ostream &os,
- const PathDiagnosticMacroPiece& P,
+ unsigned ProcessMacroPiece(raw_ostream &os, const PathDiagnosticMacroPiece &P,
unsigned num);
+ unsigned ProcessControlFlowPiece(Rewriter &R, FileID BugFileID,
+ const PathDiagnosticControlFlowPiece &P,
+ unsigned Number);
+
void HandlePiece(Rewriter &R, FileID BugFileID, const PathDiagnosticPiece &P,
const std::vector<SourceRange> &PopUpRanges, unsigned num,
unsigned max);
- void HighlightRange(Rewriter& R, FileID BugFileID, SourceRange Range,
+ void HighlightRange(Rewriter &R, FileID BugFileID, SourceRange Range,
const char *HighlightStart = "<span class=\"mrange\">",
const char *HighlightEnd = "</span>");
- void ReportDiag(const PathDiagnostic& D,
- FilesMade *filesMade);
+ void ReportDiag(const PathDiagnostic &D, FilesMade *filesMade);
// Generate the full HTML report
- std::string GenerateHTML(const PathDiagnostic& D, Rewriter &R,
- const SourceManager& SMgr, const PathPieces& path,
+ std::string GenerateHTML(const PathDiagnostic &D, Rewriter &R,
+ const SourceManager &SMgr, const PathPieces &path,
const char *declName);
// Add HTML header/footers to file specified by FID
- void FinalizeHTML(const PathDiagnostic& D, Rewriter &R,
- const SourceManager& SMgr, const PathPieces& path,
- FileID FID, const FileEntry *Entry, const char *declName);
+ void FinalizeHTML(const PathDiagnostic &D, Rewriter &R,
+ const SourceManager &SMgr, const PathPieces &path,
+ FileID FID, FileEntryRef Entry, const char *declName);
// Rewrite the file specified by FID with HTML formatting.
- void RewriteFile(Rewriter &R, const PathPieces& path, FileID FID);
+ void RewriteFile(Rewriter &R, const PathPieces &path, FileID FID);
+ PathGenerationScheme getGenerationScheme() const override {
+ return Everything;
+ }
private:
+ void addArrowSVGs(Rewriter &R, FileID BugFileID,
+ const ArrowMap &ArrowIndices);
+
/// \return Javascript for displaying shortcuts help;
StringRef showHelpJavascript();
/// \return Javascript for navigating the HTML report using j/k keys.
StringRef generateKeyboardNavigationJavascript();
+ /// \return Javascript for drawing control-flow arrows.
+ StringRef generateArrowDrawingJavascript();
+
/// \return JavaScript for an option to only show relevant lines.
- std::string showRelevantLinesJavascript(
- const PathDiagnostic &D, const PathPieces &path);
+ std::string showRelevantLinesJavascript(const PathDiagnostic &D,
+ const PathPieces &path);
/// Write executed lines from \p D in JSON format into \p os.
- void dumpCoverageData(const PathDiagnostic &D,
- const PathPieces &path,
+ void dumpCoverageData(const PathDiagnostic &D, const PathPieces &path,
llvm::raw_string_ostream &os);
};
+bool isArrowPiece(const PathDiagnosticPiece &P) {
+ return isa<PathDiagnosticControlFlowPiece>(P) && P.getString().empty();
+}
+
+unsigned getPathSizeWithoutArrows(const PathPieces &Path) {
+ unsigned TotalPieces = Path.size();
+ unsigned TotalArrowPieces = llvm::count_if(
+ Path, [](const PathDiagnosticPieceRef &P) { return isArrowPiece(*P); });
+ return TotalPieces - TotalArrowPieces;
+}
+
+class ArrowMap : public std::vector<unsigned> {
+ using Base = std::vector<unsigned>;
+
+public:
+ ArrowMap(unsigned Size) : Base(Size, 0) {}
+ unsigned getTotalNumberOfArrows() const { return at(0); }
+};
+
+llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const ArrowMap &Indices) {
+ OS << "[ ";
+ llvm::interleave(Indices, OS, ",");
+ return OS << " ]";
+}
+
} // namespace
void ento::createHTMLDiagnosticConsumer(
@@ -208,6 +246,18 @@ void HTMLDiagnostics::FlushDiagnosticsImpl(
ReportDiag(*Diag, filesMade);
}
+static llvm::SmallString<32> getIssueHash(const PathDiagnostic &D,
+ const Preprocessor &PP) {
+ SourceManager &SMgr = PP.getSourceManager();
+ PathDiagnosticLocation UPDLoc = D.getUniqueingLoc();
+ FullSourceLoc L(SMgr.getExpansionLoc(UPDLoc.isValid()
+ ? UPDLoc.asLocation()
+ : D.getLocation().asLocation()),
+ SMgr);
+ return getIssueHash(L, D.getCheckerName(), D.getBugType(),
+ D.getDeclWithIssue(), PP.getLangOpts());
+}
+
void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D,
FilesMade *filesMade) {
// Create the HTML directory if it is missing.
@@ -234,11 +284,6 @@ void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D,
// Create a new rewriter to generate HTML.
Rewriter R(const_cast<SourceManager&>(SMgr), PP.getLangOpts());
- // The file for the first path element is considered the main report file, it
- // will usually be equivalent to SMgr.getMainFileID(); however, it might be a
- // header when -analyzer-opt-analyze-headers is used.
- FileID ReportFile = path.front()->getLocation().asLocation().getExpansionLoc().getFileID();
-
// Get the function/method name
SmallString<128> declName("unknown");
int offsetDecl = 0;
@@ -257,6 +302,17 @@ void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D,
}
}
+ SmallString<32> IssueHash = getIssueHash(D, PP);
+ auto [It, IsNew] = EmittedHashes.insert(IssueHash);
+ if (!IsNew) {
+ // We've already emitted a duplicate issue. It'll get overwritten anyway.
+ return;
+ }
+
+ // FIXME: This causes each file to be re-parsed and syntax-highlighted
+ // and macro-expanded separately for each report. We could cache such rewrites
+ // across all reports and only re-do the part that's actually different:
+ // the warning/note bubbles.
std::string report = GenerateHTML(D, R, SMgr, path, declName.c_str());
if (report.empty()) {
llvm::errs() << "warning: no diagnostics generated for main file.\n";
@@ -265,46 +321,52 @@ void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D,
// Create a path for the target HTML file.
int FD;
- SmallString<128> Model, ResultPath;
-
- if (!DiagOpts.ShouldWriteStableReportFilename) {
- llvm::sys::path::append(Model, Directory, "report-%%%%%%.html");
- if (std::error_code EC =
- llvm::sys::fs::make_absolute(Model)) {
- llvm::errs() << "warning: could not make '" << Model
- << "' absolute: " << EC.message() << '\n';
- return;
- }
- if (std::error_code EC = llvm::sys::fs::createUniqueFile(
- Model, FD, ResultPath, llvm::sys::fs::OF_Text)) {
- llvm::errs() << "warning: could not create file in '" << Directory
- << "': " << EC.message() << '\n';
- return;
- }
- } else {
- int i = 1;
- std::error_code EC;
- do {
- // Find a filename which is not already used
- const FileEntry* Entry = SMgr.getFileEntryForID(ReportFile);
- std::stringstream filename;
- Model = "";
- filename << "report-"
- << llvm::sys::path::filename(Entry->getName()).str()
- << "-" << declName.c_str()
- << "-" << offsetDecl
- << "-" << i << ".html";
- llvm::sys::path::append(Model, Directory,
- filename.str());
- EC = llvm::sys::fs::openFileForReadWrite(
- Model, FD, llvm::sys::fs::CD_CreateNew, llvm::sys::fs::OF_None);
- if (EC && EC != llvm::errc::file_exists) {
- llvm::errs() << "warning: could not create file '" << Model
- << "': " << EC.message() << '\n';
- return;
- }
- i++;
- } while (EC);
+
+ SmallString<128> FileNameStr;
+ llvm::raw_svector_ostream FileName(FileNameStr);
+ FileName << "report-";
+
+ // Historically, neither the stable report filename nor the unstable report
+ // filename were actually stable. That said, the stable report filename
+ // was more stable because it was mostly composed of information
+ // about the bug report instead of being completely random.
+ // Now both stable and unstable report filenames are in fact stable
+ // but the stable report filename is still more verbose.
+ if (DiagOpts.ShouldWriteVerboseReportFilename) {
+ // FIXME: This code relies on knowing what constitutes the issue hash.
+ // Otherwise deduplication won't work correctly.
+ FileID ReportFile =
+ path.back()->getLocation().asLocation().getExpansionLoc().getFileID();
+
+ OptionalFileEntryRef Entry = SMgr.getFileEntryRefForID(ReportFile);
+
+ FileName << llvm::sys::path::filename(Entry->getName()).str() << "-"
+ << declName.c_str() << "-" << offsetDecl << "-";
+ }
+
+ FileName << StringRef(IssueHash).substr(0, 6).str() << ".html";
+
+ SmallString<128> ResultPath;
+ llvm::sys::path::append(ResultPath, Directory, FileName.str());
+ if (std::error_code EC = llvm::sys::fs::make_absolute(ResultPath)) {
+ llvm::errs() << "warning: could not make '" << ResultPath
+ << "' absolute: " << EC.message() << '\n';
+ return;
+ }
+
+ if (std::error_code EC = llvm::sys::fs::openFileForReadWrite(
+ ResultPath, FD, llvm::sys::fs::CD_CreateNew,
+ llvm::sys::fs::OF_Text)) {
+ // Existence of the file corresponds to the situation where a different
+ // Clang instance has emitted a bug report with the same issue hash.
+ // This is an entirely normal situation that does not deserve a warning,
+ // as apart from hash collisions this can happen because the reports
+ // are in fact similar enough to be considered duplicates of each other.
+ if (EC != llvm::errc::file_exists) {
+ llvm::errs() << "warning: could not create file in '" << Directory
+ << "': " << EC.message() << '\n';
+ }
+ return;
}
llvm::raw_fd_ostream os(FD, true);
@@ -346,7 +408,7 @@ std::string HTMLDiagnostics::GenerateHTML(const PathDiagnostic& D, Rewriter &R,
os << "<div class=FileNav><a href=\"#File" << (I - 1)->getHashValue()
<< "\">&#x2190;</a></div>";
- os << "<h4 class=FileName>" << SMgr.getFileEntryForID(*I)->getName()
+ os << "<h4 class=FileName>" << SMgr.getFileEntryRefForID(*I)->getName()
<< "</h4>\n";
// Right nav arrow
@@ -360,7 +422,7 @@ std::string HTMLDiagnostics::GenerateHTML(const PathDiagnostic& D, Rewriter &R,
}
// Append files to the main report file in the order they appear in the path
- for (auto I : llvm::make_range(FileIDs.begin() + 1, FileIDs.end())) {
+ for (auto I : llvm::drop_begin(FileIDs)) {
std::string s;
llvm::raw_string_ostream os(s);
@@ -379,15 +441,15 @@ std::string HTMLDiagnostics::GenerateHTML(const PathDiagnostic& D, Rewriter &R,
// Add CSS, header, and footer.
FileID FID =
path.back()->getLocation().asLocation().getExpansionLoc().getFileID();
- const FileEntry* Entry = SMgr.getFileEntryForID(FID);
- FinalizeHTML(D, R, SMgr, path, FileIDs[0], Entry, declName);
+ OptionalFileEntryRef Entry = SMgr.getFileEntryRefForID(FID);
+ FinalizeHTML(D, R, SMgr, path, FileIDs[0], *Entry, declName);
std::string file;
llvm::raw_string_ostream os(file);
for (auto BI : *Buf)
os << BI;
- return os.str();
+ return file;
}
void HTMLDiagnostics::dumpCoverageData(
@@ -452,10 +514,11 @@ window.addEventListener("keydown", function (event) {
if (event.defaultPrevented) {
return;
}
- if (event.key == "S") {
+ // SHIFT + S
+ if (event.shiftKey && event.keyCode == 83) {
var checked = document.getElementsByName("showCounterexample")[0].checked;
filterCounterexample(!checked);
- document.getElementsByName("showCounterexample")[0].checked = !checked;
+ document.getElementsByName("showCounterexample")[0].click();
} else {
return;
}
@@ -475,22 +538,28 @@ document.addEventListener("DOMContentLoaded", function() {
<label for="showCounterexample">
Show only relevant lines
</label>
+ <input type="checkbox" name="showArrows"
+ id="showArrows" style="margin-left: 10px" />
+ <label for="showArrows">
+ Show control flow arrows
+ </label>
</form>
)<<<";
- return os.str();
+ return s;
}
-void HTMLDiagnostics::FinalizeHTML(const PathDiagnostic& D, Rewriter &R,
- const SourceManager& SMgr, const PathPieces& path, FileID FID,
- const FileEntry *Entry, const char *declName) {
+void HTMLDiagnostics::FinalizeHTML(const PathDiagnostic &D, Rewriter &R,
+ const SourceManager &SMgr,
+ const PathPieces &path, FileID FID,
+ FileEntryRef Entry, const char *declName) {
// This is a cludge; basically we want to append either the full
// working directory if we have no directory information. This is
// a work in progress.
llvm::SmallString<0> DirName;
- if (llvm::sys::path::is_relative(Entry->getName())) {
+ if (llvm::sys::path::is_relative(Entry.getName())) {
llvm::sys::fs::current_path(DirName);
DirName += '/';
}
@@ -503,6 +572,9 @@ void HTMLDiagnostics::FinalizeHTML(const PathDiagnostic& D, Rewriter &R,
R.InsertTextBefore(SMgr.getLocForStartOfFile(FID),
generateKeyboardNavigationJavascript());
+ R.InsertTextBefore(SMgr.getLocForStartOfFile(FID),
+ generateArrowDrawingJavascript());
+
// Checkbox and javascript for filtering the output to the counterexample.
R.InsertTextBefore(SMgr.getLocForStartOfFile(FID),
showRelevantLinesJavascript(D, path));
@@ -516,7 +588,7 @@ void HTMLDiagnostics::FinalizeHTML(const PathDiagnostic& D, Rewriter &R,
<< "<h3>Bug Summary</h3>\n<table class=\"simpletable\">\n"
"<tr><td class=\"rowname\">File:</td><td>"
<< html::EscapeText(DirName)
- << html::EscapeText(Entry->getName())
+ << html::EscapeText(Entry.getName())
<< "</td></tr>\n<tr><td class=\"rowname\">Warning:</td><td>"
"<a href=\"#EndPath\">line "
<< LineNumber
@@ -533,19 +605,19 @@ void HTMLDiagnostics::FinalizeHTML(const PathDiagnostic& D, Rewriter &R,
P->getLocation().asLocation().getExpansionLineNumber();
int ColumnNumber =
P->getLocation().asLocation().getExpansionColumnNumber();
+ ++NumExtraPieces;
os << "<tr><td class=\"rowname\">Note:</td><td>"
<< "<a href=\"#Note" << NumExtraPieces << "\">line "
<< LineNumber << ", column " << ColumnNumber << "</a><br />"
<< P->getString() << "</td></tr>";
- ++NumExtraPieces;
}
}
// Output any other meta data.
- for (PathDiagnostic::meta_iterator I = D.meta_begin(), E = D.meta_end();
- I != E; ++I) {
- os << "<tr><td></td><td>" << html::EscapeText(*I) << "</td></tr>\n";
+ for (const std::string &Metadata :
+ llvm::make_range(D.meta_begin(), D.meta_end())) {
+ os << "<tr><td></td><td>" << html::EscapeText(Metadata) << "</td></tr>\n";
}
os << R"<<<(
@@ -570,6 +642,7 @@ void HTMLDiagnostics::FinalizeHTML(const PathDiagnostic& D, Rewriter &R,
<a href="#" onclick="toggleHelp(); return false;">Close</a>
</div>
)<<<";
+
R.InsertTextBefore(SMgr.getLocForStartOfFile(FID), os.str());
}
@@ -591,21 +664,18 @@ void HTMLDiagnostics::FinalizeHTML(const PathDiagnostic& D, Rewriter &R,
? UPDLoc.asLocation()
: D.getLocation().asLocation()),
SMgr);
- const Decl *DeclWithIssue = D.getDeclWithIssue();
StringRef BugCategory = D.getCategory();
if (!BugCategory.empty())
os << "\n<!-- BUGCATEGORY " << BugCategory << " -->\n";
- os << "\n<!-- BUGFILE " << DirName << Entry->getName() << " -->\n";
+ os << "\n<!-- BUGFILE " << DirName << Entry.getName() << " -->\n";
- os << "\n<!-- FILENAME " << llvm::sys::path::filename(Entry->getName()) << " -->\n";
+ os << "\n<!-- FILENAME " << llvm::sys::path::filename(Entry.getName()) << " -->\n";
os << "\n<!-- FUNCTIONNAME " << declName << " -->\n";
- os << "\n<!-- ISSUEHASHCONTENTOFLINEINCONTEXT "
- << getIssueHash(L, D.getCheckerName(), D.getBugType(), DeclWithIssue,
- PP.getLangOpts())
+ os << "\n<!-- ISSUEHASHCONTENTOFLINEINCONTEXT " << getIssueHash(D, PP)
<< " -->\n";
os << "\n<!-- BUGLINE "
@@ -616,7 +686,7 @@ void HTMLDiagnostics::FinalizeHTML(const PathDiagnostic& D, Rewriter &R,
<< ColumnNumber
<< " -->\n";
- os << "\n<!-- BUGPATHLENGTH " << path.size() << " -->\n";
+ os << "\n<!-- BUGPATHLENGTH " << getPathSizeWithoutArrows(path) << " -->\n";
// Mark the end of the tags.
os << "\n<!-- BUGMETAEND -->\n";
@@ -625,7 +695,7 @@ void HTMLDiagnostics::FinalizeHTML(const PathDiagnostic& D, Rewriter &R,
R.InsertTextBefore(SMgr.getLocForStartOfFile(FID), os.str());
}
- html::AddHeaderFooterInternalBuiltinCSS(R, FID, Entry->getName());
+ html::AddHeaderFooterInternalBuiltinCSS(R, FID, Entry.getName());
}
StringRef HTMLDiagnostics::showHelpJavascript() {
@@ -695,8 +765,7 @@ static void HandlePopUpPieceEndTag(Rewriter &R,
Out << "</div></td><td>" << Piece.getString() << "</td></tr>";
// If no report made at this range mark the variable and add the end tags.
- if (std::find(PopUpRanges.begin(), PopUpRanges.end(), Range) ==
- PopUpRanges.end()) {
+ if (!llvm::is_contained(PopUpRanges, Range)) {
// Store that we create a report at this range.
PopUpRanges.push_back(Range);
@@ -711,30 +780,33 @@ static void HandlePopUpPieceEndTag(Rewriter &R,
}
}
-void HTMLDiagnostics::RewriteFile(Rewriter &R,
- const PathPieces& path, FileID FID) {
+void HTMLDiagnostics::RewriteFile(Rewriter &R, const PathPieces &path,
+ FileID FID) {
+
// Process the path.
// Maintain the counts of extra note pieces separately.
- unsigned TotalPieces = path.size();
- unsigned TotalNotePieces = std::count_if(
- path.begin(), path.end(), [](const PathDiagnosticPieceRef &p) {
+ unsigned TotalPieces = getPathSizeWithoutArrows(path);
+ unsigned TotalNotePieces =
+ llvm::count_if(path, [](const PathDiagnosticPieceRef &p) {
return isa<PathDiagnosticNotePiece>(*p);
});
- unsigned PopUpPieceCount = std::count_if(
- path.begin(), path.end(), [](const PathDiagnosticPieceRef &p) {
+ unsigned PopUpPieceCount =
+ llvm::count_if(path, [](const PathDiagnosticPieceRef &p) {
return isa<PathDiagnosticPopUpPiece>(*p);
});
unsigned TotalRegularPieces = TotalPieces - TotalNotePieces - PopUpPieceCount;
unsigned NumRegularPieces = TotalRegularPieces;
unsigned NumNotePieces = TotalNotePieces;
+ unsigned NumberOfArrows = 0;
// Stores the count of the regular piece indices.
std::map<int, int> IndexMap;
+ ArrowMap ArrowIndices(TotalRegularPieces + 1);
// Stores the different ranges where we have reported something.
std::vector<SourceRange> PopUpRanges;
- for (auto I = path.rbegin(), E = path.rend(); I != E; ++I) {
- const auto &Piece = *I->get();
+ for (const PathDiagnosticPieceRef &I : llvm::reverse(path)) {
+ const auto &Piece = *I.get();
if (isa<PathDiagnosticPopUpPiece>(Piece)) {
++IndexMap[NumRegularPieces];
@@ -744,18 +816,40 @@ void HTMLDiagnostics::RewriteFile(Rewriter &R,
// as a separate pass through the piece list.
HandlePiece(R, FID, Piece, PopUpRanges, NumNotePieces, TotalNotePieces);
--NumNotePieces;
+
+ } else if (isArrowPiece(Piece)) {
+ NumberOfArrows = ProcessControlFlowPiece(
+ R, FID, cast<PathDiagnosticControlFlowPiece>(Piece), NumberOfArrows);
+ ArrowIndices[NumRegularPieces] = NumberOfArrows;
+
} else {
HandlePiece(R, FID, Piece, PopUpRanges, NumRegularPieces,
TotalRegularPieces);
--NumRegularPieces;
+ ArrowIndices[NumRegularPieces] = ArrowIndices[NumRegularPieces + 1];
}
}
+ ArrowIndices[0] = NumberOfArrows;
+
+ // At this point ArrowIndices represent the following data structure:
+ // [a_0, a_1, ..., a_N]
+ // where N is the number of events in the path.
+ //
+ // Then for every event with index i \in [0, N - 1], we can say that
+ // arrows with indices \in [a_(i+1), a_i) correspond to that event.
+ // We can say that because arrows with these indices appeared in the
+ // path in between the i-th and the (i+1)-th events.
+ assert(ArrowIndices.back() == 0 &&
+ "No arrows should be after the last event");
+ // This assertion also guarantees that all indices in are <= NumberOfArrows.
+ assert(llvm::is_sorted(ArrowIndices, std::greater<unsigned>()) &&
+ "Incorrect arrow indices map");
// Secondary indexing if we are having multiple pop-ups between two notes.
// (e.g. [(13) 'a' is 'true']; [(13.1) 'b' is 'false']; [(13.2) 'c' is...)
NumRegularPieces = TotalRegularPieces;
- for (auto I = path.rbegin(), E = path.rend(); I != E; ++I) {
- const auto &Piece = *I->get();
+ for (const PathDiagnosticPieceRef &I : llvm::reverse(path)) {
+ const auto &Piece = *I.get();
if (const auto *PopUpP = dyn_cast<PathDiagnosticPopUpPiece>(&Piece)) {
int PopUpPieceIndex = IndexMap[NumRegularPieces];
@@ -771,7 +865,7 @@ void HTMLDiagnostics::RewriteFile(Rewriter &R,
if (PopUpPieceIndex > 0)
--IndexMap[NumRegularPieces];
- } else if (!isa<PathDiagnosticNotePiece>(Piece)) {
+ } else if (!isa<PathDiagnosticNotePiece>(Piece) && !isArrowPiece(Piece)) {
--NumRegularPieces;
}
}
@@ -783,6 +877,8 @@ void HTMLDiagnostics::RewriteFile(Rewriter &R,
html::EscapeText(R, FID);
html::AddLineNumbers(R, FID);
+ addArrowSVGs(R, FID, ArrowIndices);
+
// If we have a preprocessor, relex the file and syntax highlight.
// We might not have a preprocessor if we come from a deserialized AST file,
// for example.
@@ -1007,8 +1103,7 @@ void HTMLDiagnostics::HandlePiece(Rewriter &R, FileID BugFileID,
ArrayRef<SourceRange> Ranges = P.getRanges();
for (const auto &Range : Ranges) {
// If we have already highlighted the range as a pop-up there is no work.
- if (std::find(PopUpRanges.begin(), PopUpRanges.end(), Range) !=
- PopUpRanges.end())
+ if (llvm::is_contained(PopUpRanges, Range))
continue;
HighlightRange(R, LPosInfo.first, Range);
@@ -1049,6 +1144,104 @@ unsigned HTMLDiagnostics::ProcessMacroPiece(raw_ostream &os,
return num;
}
+void HTMLDiagnostics::addArrowSVGs(Rewriter &R, FileID BugFileID,
+ const ArrowMap &ArrowIndices) {
+ std::string S;
+ llvm::raw_string_ostream OS(S);
+
+ OS << R"<<<(
+<style type="text/css">
+ svg {
+ position:absolute;
+ top:0;
+ left:0;
+ height:100%;
+ width:100%;
+ pointer-events: none;
+ overflow: visible
+ }
+ .arrow {
+ stroke-opacity: 0.2;
+ stroke-width: 1;
+ marker-end: url(#arrowhead);
+ }
+
+ .arrow.selected {
+ stroke-opacity: 0.6;
+ stroke-width: 2;
+ marker-end: url(#arrowheadSelected);
+ }
+
+ .arrowhead {
+ orient: auto;
+ stroke: none;
+ opacity: 0.6;
+ fill: blue;
+ }
+</style>
+<svg xmlns="http://www.w3.org/2000/svg">
+ <defs>
+ <marker id="arrowheadSelected" class="arrowhead" opacity="0.6"
+ viewBox="0 0 10 10" refX="3" refY="5"
+ markerWidth="4" markerHeight="4">
+ <path d="M 0 0 L 10 5 L 0 10 z" />
+ </marker>
+ <marker id="arrowhead" class="arrowhead" opacity="0.2"
+ viewBox="0 0 10 10" refX="3" refY="5"
+ markerWidth="4" markerHeight="4">
+ <path d="M 0 0 L 10 5 L 0 10 z" />
+ </marker>
+ </defs>
+ <g id="arrows" fill="none" stroke="blue" visibility="hidden">
+)<<<";
+
+ for (unsigned Index : llvm::seq(0u, ArrowIndices.getTotalNumberOfArrows())) {
+ OS << " <path class=\"arrow\" id=\"arrow" << Index << "\"/>\n";
+ }
+
+ OS << R"<<<(
+ </g>
+</svg>
+<script type='text/javascript'>
+const arrowIndices = )<<<";
+
+ OS << ArrowIndices << "\n</script>\n";
+
+ R.InsertTextBefore(R.getSourceMgr().getLocForStartOfFile(BugFileID),
+ OS.str());
+}
+
+std::string getSpanBeginForControl(const char *ClassName, unsigned Index) {
+ std::string Result;
+ llvm::raw_string_ostream OS(Result);
+ OS << "<span id=\"" << ClassName << Index << "\">";
+ return Result;
+}
+
+std::string getSpanBeginForControlStart(unsigned Index) {
+ return getSpanBeginForControl("start", Index);
+}
+
+std::string getSpanBeginForControlEnd(unsigned Index) {
+ return getSpanBeginForControl("end", Index);
+}
+
+unsigned HTMLDiagnostics::ProcessControlFlowPiece(
+ Rewriter &R, FileID BugFileID, const PathDiagnosticControlFlowPiece &P,
+ unsigned Number) {
+ for (const PathDiagnosticLocationPair &LPair : P) {
+ std::string Start = getSpanBeginForControlStart(Number),
+ End = getSpanBeginForControlEnd(Number++);
+
+ HighlightRange(R, BugFileID, LPair.getStart().asRange().getBegin(),
+ Start.c_str());
+ HighlightRange(R, BugFileID, LPair.getEnd().asRange().getBegin(),
+ End.c_str());
+ }
+
+ return Number;
+}
+
void HTMLDiagnostics::HighlightRange(Rewriter& R, FileID BugFileID,
SourceRange Range,
const char *HighlightStart,
@@ -1109,7 +1302,7 @@ document.addEventListener("DOMContentLoaded", function() {
});
var findNum = function() {
- var s = document.querySelector(".selected");
+ var s = document.querySelector(".msg.selected");
if (!s || s.id == "EndPath") {
return 0;
}
@@ -1117,14 +1310,32 @@ var findNum = function() {
return out;
};
+var classListAdd = function(el, theClass) {
+ if(!el.className.baseVal)
+ el.className += " " + theClass;
+ else
+ el.className.baseVal += " " + theClass;
+};
+
+var classListRemove = function(el, theClass) {
+ var className = (!el.className.baseVal) ?
+ el.className : el.className.baseVal;
+ className = className.replace(" " + theClass, "");
+ if(!el.className.baseVal)
+ el.className = className;
+ else
+ el.className.baseVal = className;
+};
+
var scrollTo = function(el) {
querySelectorAllArray(".selected").forEach(function(s) {
- s.classList.remove("selected");
+ classListRemove(s, "selected");
});
- el.classList.add("selected");
+ classListAdd(el, "selected");
window.scrollBy(0, el.getBoundingClientRect().top -
(window.innerHeight / 2));
-}
+ highlightArrowsForSelectedEvent();
+};
var move = function(num, up, numItems) {
if (num == 1 && up || num == numItems - 1 && !up) {
@@ -1159,9 +1370,11 @@ window.addEventListener("keydown", function (event) {
if (event.defaultPrevented) {
return;
}
- if (event.key == "j") {
+ // key 'j'
+ if (event.keyCode == 74) {
navigateTo(/*up=*/false);
- } else if (event.key == "k") {
+ // key 'k'
+ } else if (event.keyCode == 75) {
navigateTo(/*up=*/true);
} else {
return;
@@ -1171,3 +1384,258 @@ window.addEventListener("keydown", function (event) {
</script>
)<<<";
}
+
+StringRef HTMLDiagnostics::generateArrowDrawingJavascript() {
+ return R"<<<(
+<script type='text/javascript'>
+// Return range of numbers from a range [lower, upper).
+function range(lower, upper) {
+ var array = [];
+ for (var i = lower; i <= upper; ++i) {
+ array.push(i);
+ }
+ return array;
+}
+
+var getRelatedArrowIndices = function(pathId) {
+ // HTML numeration of events is a bit different than it is in the path.
+ // Everything is rotated one step to the right, so the last element
+ // (error diagnostic) has index 0.
+ if (pathId == 0) {
+ // arrowIndices has at least 2 elements
+ pathId = arrowIndices.length - 1;
+ }
+
+ return range(arrowIndices[pathId], arrowIndices[pathId - 1]);
+}
+
+var highlightArrowsForSelectedEvent = function() {
+ const selectedNum = findNum();
+ const arrowIndicesToHighlight = getRelatedArrowIndices(selectedNum);
+ arrowIndicesToHighlight.forEach((index) => {
+ var arrow = document.querySelector("#arrow" + index);
+ if(arrow) {
+ classListAdd(arrow, "selected")
+ }
+ });
+}
+
+var getAbsoluteBoundingRect = function(element) {
+ const relative = element.getBoundingClientRect();
+ return {
+ left: relative.left + window.pageXOffset,
+ right: relative.right + window.pageXOffset,
+ top: relative.top + window.pageYOffset,
+ bottom: relative.bottom + window.pageYOffset,
+ height: relative.height,
+ width: relative.width
+ };
+}
+
+var drawArrow = function(index) {
+ // This function is based on the great answer from SO:
+ // https://stackoverflow.com/a/39575674/11582326
+ var start = document.querySelector("#start" + index);
+ var end = document.querySelector("#end" + index);
+ var arrow = document.querySelector("#arrow" + index);
+
+ var startRect = getAbsoluteBoundingRect(start);
+ var endRect = getAbsoluteBoundingRect(end);
+
+ // It is an arrow from a token to itself, no need to visualize it.
+ if (startRect.top == endRect.top &&
+ startRect.left == endRect.left)
+ return;
+
+ // Each arrow is a very simple Bézier curve, with two nodes and
+ // two handles. So, we need to calculate four points in the window:
+ // * start node
+ var posStart = { x: 0, y: 0 };
+ // * end node
+ var posEnd = { x: 0, y: 0 };
+ // * handle for the start node
+ var startHandle = { x: 0, y: 0 };
+ // * handle for the end node
+ var endHandle = { x: 0, y: 0 };
+ // One can visualize it as follows:
+ //
+ // start handle
+ // /
+ // X"""_.-""""X
+ // .' \
+ // / start node
+ // |
+ // |
+ // | end node
+ // \ /
+ // `->X
+ // X-'
+ // \
+ // end handle
+ //
+ // NOTE: (0, 0) is the top left corner of the window.
+
+ // We have 3 similar, but still different scenarios to cover:
+ //
+ // 1. Two tokens on different lines.
+ // -xxx
+ // /
+ // \
+ // -> xxx
+ // In this situation, we draw arrow on the left curving to the left.
+ // 2. Two tokens on the same line, and the destination is on the right.
+ // ____
+ // / \
+ // / V
+ // xxx xxx
+ // In this situation, we draw arrow above curving upwards.
+ // 3. Two tokens on the same line, and the destination is on the left.
+ // xxx xxx
+ // ^ /
+ // \____/
+ // In this situation, we draw arrow below curving downwards.
+ const onDifferentLines = startRect.top <= endRect.top - 5 ||
+ startRect.top >= endRect.top + 5;
+ const leftToRight = startRect.left < endRect.left;
+
+ // NOTE: various magic constants are chosen empirically for
+ // better positioning and look
+ if (onDifferentLines) {
+ // Case #1
+ const topToBottom = startRect.top < endRect.top;
+ posStart.x = startRect.left - 1;
+ // We don't want to start it at the top left corner of the token,
+ // it doesn't feel like this is where the arrow comes from.
+ // For this reason, we start it in the middle of the left side
+ // of the token.
+ posStart.y = startRect.top + startRect.height / 2;
+
+ // End node has arrow head and we give it a bit more space.
+ posEnd.x = endRect.left - 4;
+ posEnd.y = endRect.top;
+
+ // Utility object with x and y offsets for handles.
+ var curvature = {
+ // We want bottom-to-top arrow to curve a bit more, so it doesn't
+ // overlap much with top-to-bottom curves (much more frequent).
+ x: topToBottom ? 15 : 25,
+ y: Math.min((posEnd.y - posStart.y) / 3, 10)
+ }
+
+ // When destination is on the different line, we can make a
+ // curvier arrow because we have space for it.
+ // So, instead of using
+ //
+ // startHandle.x = posStart.x - curvature.x
+ // endHandle.x = posEnd.x - curvature.x
+ //
+ // We use the leftmost of these two values for both handles.
+ startHandle.x = Math.min(posStart.x, posEnd.x) - curvature.x;
+ endHandle.x = startHandle.x;
+
+ // Curving downwards from the start node...
+ startHandle.y = posStart.y + curvature.y;
+ // ... and upwards from the end node.
+ endHandle.y = posEnd.y - curvature.y;
+
+ } else if (leftToRight) {
+ // Case #2
+ // Starting from the top right corner...
+ posStart.x = startRect.right - 1;
+ posStart.y = startRect.top;
+
+ // ...and ending at the top left corner of the end token.
+ posEnd.x = endRect.left + 1;
+ posEnd.y = endRect.top - 1;
+
+ // Utility object with x and y offsets for handles.
+ var curvature = {
+ x: Math.min((posEnd.x - posStart.x) / 3, 15),
+ y: 5
+ }
+
+ // Curving to the right...
+ startHandle.x = posStart.x + curvature.x;
+ // ... and upwards from the start node.
+ startHandle.y = posStart.y - curvature.y;
+
+ // And to the left...
+ endHandle.x = posEnd.x - curvature.x;
+ // ... and upwards from the end node.
+ endHandle.y = posEnd.y - curvature.y;
+
+ } else {
+ // Case #3
+ // Starting from the bottom right corner...
+ posStart.x = startRect.right;
+ posStart.y = startRect.bottom;
+
+ // ...and ending also at the bottom right corner, but of the end token.
+ posEnd.x = endRect.right - 1;
+ posEnd.y = endRect.bottom + 1;
+
+ // Utility object with x and y offsets for handles.
+ var curvature = {
+ x: Math.min((posStart.x - posEnd.x) / 3, 15),
+ y: 5
+ }
+
+ // Curving to the left...
+ startHandle.x = posStart.x - curvature.x;
+ // ... and downwards from the start node.
+ startHandle.y = posStart.y + curvature.y;
+
+ // And to the right...
+ endHandle.x = posEnd.x + curvature.x;
+ // ... and downwards from the end node.
+ endHandle.y = posEnd.y + curvature.y;
+ }
+
+ // Put it all together into a path.
+ // More information on the format:
+ // https://developer.mozilla.org/en-US/docs/Web/SVG/Tutorial/Paths
+ var pathStr = "M" + posStart.x + "," + posStart.y + " " +
+ "C" + startHandle.x + "," + startHandle.y + " " +
+ endHandle.x + "," + endHandle.y + " " +
+ posEnd.x + "," + posEnd.y;
+
+ arrow.setAttribute("d", pathStr);
+};
+
+var drawArrows = function() {
+ const numOfArrows = document.querySelectorAll("path[id^=arrow]").length;
+ for (var i = 0; i < numOfArrows; ++i) {
+ drawArrow(i);
+ }
+}
+
+var toggleArrows = function(event) {
+ const arrows = document.querySelector("#arrows");
+ if (event.target.checked) {
+ arrows.setAttribute("visibility", "visible");
+ } else {
+ arrows.setAttribute("visibility", "hidden");
+ }
+}
+
+window.addEventListener("resize", drawArrows);
+document.addEventListener("DOMContentLoaded", function() {
+ // Whenever we show invocation, locations change, i.e. we
+ // need to redraw arrows.
+ document
+ .querySelector('input[id="showinvocation"]')
+ .addEventListener("click", drawArrows);
+ // Hiding irrelevant lines also should cause arrow rerender.
+ document
+ .querySelector('input[name="showCounterexample"]')
+ .addEventListener("change", drawArrows);
+ document
+ .querySelector('input[name="showArrows"]')
+ .addEventListener("change", toggleArrows);
+ drawArrows();
+ // Default highlighting for the last event.
+ highlightArrowsForSelectedEvent();
+});
+</script>
+ )<<<";
+}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/LoopUnrolling.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/LoopUnrolling.cpp
index e5f4e9ea30c9..a80352816be6 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/LoopUnrolling.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/LoopUnrolling.cpp
@@ -17,6 +17,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/LoopUnrolling.h"
+#include <optional>
using namespace clang;
using namespace ento;
@@ -24,6 +25,7 @@ using namespace clang::ast_matchers;
static const int MAXIMUM_STEP_UNROLLED = 128;
+namespace {
struct LoopState {
private:
enum Kind { Normal, Unrolled } K;
@@ -56,6 +58,7 @@ public:
ID.AddInteger(maxStep);
}
};
+} // namespace
// The tracked stack of loops. The stack indicates that which loops the
// simulated element contained by. The loops are marked depending if we decided
@@ -69,7 +72,7 @@ namespace clang {
namespace ento {
static bool isLoopStmt(const Stmt *S) {
- return S && (isa<ForStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S));
+ return isa_and_nonnull<ForStmt, WhileStmt, DoStmt>(S);
}
ProgramStateRef processLoopEnd(const Stmt *LoopStmt, ProgramStateRef State) {
@@ -175,7 +178,7 @@ static bool isCapturedByReference(ExplodedNode *N, const DeclRefExpr *DR) {
const CXXRecordDecl *LambdaCXXRec = MD->getParent();
// Lookup the fields of the lambda
- llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
+ llvm::DenseMap<const ValueDecl *, FieldDecl *> LambdaCaptureFields;
FieldDecl *LambdaThisCaptureField;
LambdaCXXRec->getCaptureFields(LambdaCaptureFields, LambdaThisCaptureField);
@@ -264,8 +267,8 @@ bool shouldCompletelyUnroll(const Stmt *LoopStmt, ASTContext &ASTCtx,
Matches[0].getNodeAs<IntegerLiteral>("initNum")->getValue();
auto CondOp = Matches[0].getNodeAs<BinaryOperator>("conditionOperator");
if (InitNum.getBitWidth() != BoundNum.getBitWidth()) {
- InitNum = InitNum.zextOrSelf(BoundNum.getBitWidth());
- BoundNum = BoundNum.zextOrSelf(InitNum.getBitWidth());
+ InitNum = InitNum.zext(BoundNum.getBitWidth());
+ BoundNum = BoundNum.zext(InitNum.getBitWidth());
}
if (CondOp->getOpcode() == BO_GE || CondOp->getOpcode() == BO_LE)
@@ -284,7 +287,7 @@ bool madeNewBranch(ExplodedNode *N, const Stmt *LoopStmt) {
return true;
ProgramPoint P = N->getLocation();
- if (Optional<BlockEntrance> BE = P.getAs<BlockEntrance>())
+ if (std::optional<BlockEntrance> BE = P.getAs<BlockEntrance>())
S = BE->getBlock()->getTerminatorStmt();
if (S == LoopStmt)
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/LoopWidening.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/LoopWidening.cpp
index 47e34dd84b9a..9e4280176062 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/LoopWidening.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/LoopWidening.cpp
@@ -35,6 +35,8 @@ static const Expr *getLoopCondition(const Stmt *LoopStmt) {
return cast<WhileStmt>(LoopStmt)->getCond();
case Stmt::DoStmtClass:
return cast<DoStmt>(LoopStmt)->getCond();
+ case Stmt::CXXForRangeStmtClass:
+ return cast<CXXForRangeStmt>(LoopStmt)->getCond();
}
}
@@ -45,8 +47,7 @@ ProgramStateRef getWidenedLoopState(ProgramStateRef PrevState,
const LocationContext *LCtx,
unsigned BlockCount, const Stmt *LoopStmt) {
- assert(isa<ForStmt>(LoopStmt) || isa<WhileStmt>(LoopStmt) ||
- isa<DoStmt>(LoopStmt));
+ assert((isa<ForStmt, WhileStmt, DoStmt, CXXForRangeStmt>(LoopStmt)));
// Invalidate values in the current state.
// TODO Make this more conservative by only invalidating values that might
@@ -85,7 +86,7 @@ ProgramStateRef getWidenedLoopState(ProgramStateRef PrevState,
// pointer should remain unchanged. Ignore static methods, since they do not
// have 'this' pointers.
const CXXMethodDecl *CXXMD = dyn_cast<CXXMethodDecl>(STC->getDecl());
- if (CXXMD && !CXXMD->isStatic()) {
+ if (CXXMD && CXXMD->isImplicitObjectMemberFunction()) {
const CXXThisRegion *ThisR =
MRMgr.getCXXThisRegion(CXXMD->getThisType(), STC);
ITraits.setTrait(ThisR,
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/MemRegion.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/MemRegion.cpp
index bd725ee9eaa3..16db6b249dc9 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/MemRegion.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/MemRegion.cpp
@@ -28,17 +28,18 @@
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceManager.h"
+#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicExtent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/FoldingSet.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
+#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CheckedArithmetic.h"
@@ -50,6 +51,7 @@
#include <cstdint>
#include <functional>
#include <iterator>
+#include <optional>
#include <string>
#include <tuple>
#include <utility>
@@ -72,8 +74,7 @@ RegionTy* MemRegionManager::getSubRegion(const Arg1Ty arg1,
auto *R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID, InsertPos));
if (!R) {
- R = A.Allocate<RegionTy>();
- new (R) RegionTy(arg1, superRegion);
+ R = new (A) RegionTy(arg1, superRegion);
Regions.InsertNode(R, InsertPos);
}
@@ -89,8 +90,7 @@ RegionTy* MemRegionManager::getSubRegion(const Arg1Ty arg1, const Arg2Ty arg2,
auto *R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID, InsertPos));
if (!R) {
- R = A.Allocate<RegionTy>();
- new (R) RegionTy(arg1, arg2, superRegion);
+ R = new (A) RegionTy(arg1, arg2, superRegion);
Regions.InsertNode(R, InsertPos);
}
@@ -108,8 +108,7 @@ RegionTy* MemRegionManager::getSubRegion(const Arg1Ty arg1, const Arg2Ty arg2,
auto *R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID, InsertPos));
if (!R) {
- R = A.Allocate<RegionTy>();
- new (R) RegionTy(arg1, arg2, arg3, superRegion);
+ R = new (A) RegionTy(arg1, arg2, arg3, superRegion);
Regions.InsertNode(R, InsertPos);
}
@@ -160,8 +159,22 @@ const StackFrameContext *VarRegion::getStackFrame() const {
return SSR ? SSR->getStackFrame() : nullptr;
}
+const StackFrameContext *
+CXXLifetimeExtendedObjectRegion::getStackFrame() const {
+ const auto *SSR = dyn_cast<StackSpaceRegion>(getMemorySpace());
+ return SSR ? SSR->getStackFrame() : nullptr;
+}
+
+const StackFrameContext *CXXTempObjectRegion::getStackFrame() const {
+ assert(isa<StackSpaceRegion>(getMemorySpace()) &&
+ "A temporary object can only be allocated on the stack");
+ return cast<StackSpaceRegion>(getMemorySpace())->getStackFrame();
+}
+
ObjCIvarRegion::ObjCIvarRegion(const ObjCIvarDecl *ivd, const SubRegion *sReg)
- : DeclRegion(sReg, ObjCIvarRegionKind), IVD(ivd) {}
+ : DeclRegion(sReg, ObjCIvarRegionKind), IVD(ivd) {
+ assert(IVD);
+}
const ObjCIvarDecl *ObjCIvarRegion::getDecl() const { return IVD; }
@@ -389,6 +402,20 @@ void CXXTempObjectRegion::Profile(llvm::FoldingSetNodeID &ID) const {
ProfileRegion(ID, Ex, getSuperRegion());
}
+void CXXLifetimeExtendedObjectRegion::ProfileRegion(llvm::FoldingSetNodeID &ID,
+ const Expr *E,
+ const ValueDecl *D,
+ const MemRegion *sReg) {
+ ID.AddPointer(E);
+ ID.AddPointer(D);
+ ID.AddPointer(sReg);
+}
+
+void CXXLifetimeExtendedObjectRegion::Profile(
+ llvm::FoldingSetNodeID &ID) const {
+ ProfileRegion(ID, Ex, ExD, getSuperRegion());
+}
+
void CXXBaseObjectRegion::ProfileRegion(llvm::FoldingSetNodeID &ID,
const CXXRecordDecl *RD,
bool IsVirtual,
@@ -443,7 +470,7 @@ std::string MemRegion::getString() const {
std::string s;
llvm::raw_string_ostream os(s);
dumpToStream(os);
- return os.str();
+ return s;
}
void MemRegion::dumpToStream(raw_ostream &os) const {
@@ -465,11 +492,9 @@ void BlockCodeRegion::dumpToStream(raw_ostream &os) const {
void BlockDataRegion::dumpToStream(raw_ostream &os) const {
os << "block_data{" << BC;
os << "; ";
- for (BlockDataRegion::referenced_vars_iterator
- I = referenced_vars_begin(),
- E = referenced_vars_end(); I != E; ++I)
- os << "(" << I.getCapturedRegion() << "<-" <<
- I.getOriginalRegion() << ") ";
+ for (auto Var : referenced_vars())
+ os << "(" << Var.getCapturedRegion() << "<-" << Var.getOriginalRegion()
+ << ") ";
os << '}';
}
@@ -479,7 +504,17 @@ void CompoundLiteralRegion::dumpToStream(raw_ostream &os) const {
}
void CXXTempObjectRegion::dumpToStream(raw_ostream &os) const {
- os << "temp_object{" << getValueType().getAsString() << ", "
+ os << "temp_object{" << getValueType() << ", "
+ << "S" << Ex->getID(getContext()) << '}';
+}
+
+void CXXLifetimeExtendedObjectRegion::dumpToStream(raw_ostream &os) const {
+ os << "lifetime_extended_object{" << getValueType() << ", ";
+ if (const IdentifierInfo *ID = ExD->getIdentifier())
+ os << ID->getName();
+ else
+ os << "D" << ExD->getID();
+ os << ", "
<< "S" << Ex->getID(getContext()) << '}';
}
@@ -496,8 +531,8 @@ void CXXThisRegion::dumpToStream(raw_ostream &os) const {
}
void ElementRegion::dumpToStream(raw_ostream &os) const {
- os << "Element{" << superRegion << ','
- << Index << ',' << getElementType().getAsString() << '}';
+ os << "Element{" << superRegion << ',' << Index << ',' << getElementType()
+ << '}';
}
void FieldRegion::dumpToStream(raw_ostream &os) const {
@@ -709,21 +744,17 @@ std::string MemRegion::getDescriptiveName(bool UseQuotes) const {
}
SourceRange MemRegion::sourceRange() const {
- const auto *const VR = dyn_cast<VarRegion>(this->getBaseRegion());
- const auto *const FR = dyn_cast<FieldRegion>(this);
-
// Check for more specific regions first.
- // FieldRegion
- if (FR) {
+ if (auto *FR = dyn_cast<FieldRegion>(this)) {
return FR->getDecl()->getSourceRange();
}
- // VarRegion
- else if (VR) {
+
+ if (auto *VR = dyn_cast<VarRegion>(this->getBaseRegion())) {
return VR->getDecl()->getSourceRange();
}
+
// Return invalid source range (can be checked by client).
- else
- return {};
+ return {};
}
//===----------------------------------------------------------------------===//
@@ -747,6 +778,7 @@ DefinedOrUnknownSVal MemRegionManager::getStaticSize(const MemRegion *MR,
case MemRegion::CXXBaseObjectRegionKind:
case MemRegion::CXXDerivedObjectRegionKind:
case MemRegion::CXXTempObjectRegionKind:
+ case MemRegion::CXXLifetimeExtendedObjectRegionKind:
case MemRegion::CXXThisRegionKind:
case MemRegion::ObjCIvarRegionKind:
case MemRegion::NonParamVarRegionKind:
@@ -768,14 +800,52 @@ DefinedOrUnknownSVal MemRegionManager::getStaticSize(const MemRegion *MR,
return UnknownVal();
QualType Ty = cast<TypedValueRegion>(SR)->getDesugaredValueType(Ctx);
- DefinedOrUnknownSVal Size = getElementExtent(Ty, SVB);
-
- // A zero-length array at the end of a struct often stands for dynamically
- // allocated extra memory.
- if (Size.isZeroConstant()) {
- if (isa<ConstantArrayType>(Ty))
- return UnknownVal();
- }
+ const DefinedOrUnknownSVal Size = getElementExtent(Ty, SVB);
+
+ // We currently don't model flexible array members (FAMs), which are:
+ // - int array[]; of IncompleteArrayType
+ // - int array[0]; of ConstantArrayType with size 0
+ // - int array[1]; of ConstantArrayType with size 1
+ // https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html
+ const auto isFlexibleArrayMemberCandidate =
+ [this](const ArrayType *AT) -> bool {
+ if (!AT)
+ return false;
+
+ auto IsIncompleteArray = [](const ArrayType *AT) {
+ return isa<IncompleteArrayType>(AT);
+ };
+ auto IsArrayOfZero = [](const ArrayType *AT) {
+ const auto *CAT = dyn_cast<ConstantArrayType>(AT);
+ return CAT && CAT->getSize() == 0;
+ };
+ auto IsArrayOfOne = [](const ArrayType *AT) {
+ const auto *CAT = dyn_cast<ConstantArrayType>(AT);
+ return CAT && CAT->getSize() == 1;
+ };
+
+ using FAMKind = LangOptions::StrictFlexArraysLevelKind;
+ const FAMKind StrictFlexArraysLevel =
+ Ctx.getLangOpts().getStrictFlexArraysLevel();
+
+ // "Default": Any trailing array member is a FAM.
+ // Since we cannot tell at this point if this array is a trailing member
+ // or not, let's just do the same as for "OneZeroOrIncomplete".
+ if (StrictFlexArraysLevel == FAMKind::Default)
+ return IsArrayOfOne(AT) || IsArrayOfZero(AT) || IsIncompleteArray(AT);
+
+ if (StrictFlexArraysLevel == FAMKind::OneZeroOrIncomplete)
+ return IsArrayOfOne(AT) || IsArrayOfZero(AT) || IsIncompleteArray(AT);
+
+ if (StrictFlexArraysLevel == FAMKind::ZeroOrIncomplete)
+ return IsArrayOfZero(AT) || IsIncompleteArray(AT);
+
+ assert(StrictFlexArraysLevel == FAMKind::IncompleteOnly);
+ return IsIncompleteArray(AT);
+ };
+
+ if (isFlexibleArrayMemberCandidate(Ctx.getAsArrayType(Ty)))
+ return UnknownVal();
return Size;
}
@@ -794,8 +864,7 @@ DefinedOrUnknownSVal MemRegionManager::getStaticSize(const MemRegion *MR,
template <typename REG>
const REG *MemRegionManager::LazyAllocate(REG*& region) {
if (!region) {
- region = A.Allocate<REG>();
- new (region) REG(*this);
+ region = new (A) REG(*this);
}
return region;
@@ -804,8 +873,7 @@ const REG *MemRegionManager::LazyAllocate(REG*& region) {
template <typename REG, typename ARG>
const REG *MemRegionManager::LazyAllocate(REG*& region, ARG a) {
if (!region) {
- region = A.Allocate<REG>();
- new (region) REG(this, a);
+ region = new (A) REG(this, a);
}
return region;
@@ -819,8 +887,7 @@ MemRegionManager::getStackLocalsRegion(const StackFrameContext *STC) {
if (R)
return R;
- R = A.Allocate<StackLocalsSpaceRegion>();
- new (R) StackLocalsSpaceRegion(*this, STC);
+ R = new (A) StackLocalsSpaceRegion(*this, STC);
return R;
}
@@ -832,8 +899,7 @@ MemRegionManager::getStackArgumentsRegion(const StackFrameContext *STC) {
if (R)
return R;
- R = A.Allocate<StackArgumentsSpaceRegion>();
- new (R) StackArgumentsSpaceRegion(*this, STC);
+ R = new (A) StackArgumentsSpaceRegion(*this, STC);
return R;
}
@@ -854,8 +920,7 @@ const GlobalsSpaceRegion
if (R)
return R;
- R = A.Allocate<StaticGlobalSpaceRegion>();
- new (R) StaticGlobalSpaceRegion(*this, CR);
+ R = new (A) StaticGlobalSpaceRegion(*this, CR);
return R;
}
@@ -901,13 +966,11 @@ getStackOrCaptureRegionForDeclContext(const LocationContext *LC,
if (const auto *BC = dyn_cast<BlockInvocationContext>(LC)) {
const auto *BR = static_cast<const BlockDataRegion *>(BC->getData());
// FIXME: This can be made more efficient.
- for (BlockDataRegion::referenced_vars_iterator
- I = BR->referenced_vars_begin(),
- E = BR->referenced_vars_end(); I != E; ++I) {
- const TypedValueRegion *OrigR = I.getOriginalRegion();
+ for (auto Var : BR->referenced_vars()) {
+ const TypedValueRegion *OrigR = Var.getOriginalRegion();
if (const auto *VR = dyn_cast<VarRegion>(OrigR)) {
if (VR->getDecl() == VD)
- return cast<VarRegion>(I.getCapturedRegion());
+ return cast<VarRegion>(Var.getCapturedRegion());
}
}
}
@@ -945,26 +1008,14 @@ const VarRegion *MemRegionManager::getVarRegion(const VarDecl *D,
const MemRegion *sReg = nullptr;
if (D->hasGlobalStorage() && !D->isStaticLocal()) {
-
- // First handle the globals defined in system headers.
- if (Ctx.getSourceManager().isInSystemHeader(D->getLocation())) {
- // Whitelist the system globals which often DO GET modified, assume the
- // rest are immutable.
- if (D->getName().find("errno") != StringRef::npos)
- sReg = getGlobalsRegion(MemRegion::GlobalSystemSpaceRegionKind);
- else
- sReg = getGlobalsRegion(MemRegion::GlobalImmutableSpaceRegionKind);
-
- // Treat other globals as GlobalInternal unless they are constants.
+ QualType Ty = D->getType();
+ assert(!Ty.isNull());
+ if (Ty.isConstQualified()) {
+ sReg = getGlobalsRegion(MemRegion::GlobalImmutableSpaceRegionKind);
+ } else if (Ctx.getSourceManager().isInSystemHeader(D->getLocation())) {
+ sReg = getGlobalsRegion(MemRegion::GlobalSystemSpaceRegionKind);
} else {
- QualType GQT = D->getType();
- const Type *GT = GQT.getTypePtrOrNull();
- // TODO: We could walk the complex types here and see if everything is
- // constified.
- if (GT && GQT.isConstQualified() && GT->isArithmeticType())
- sReg = getGlobalsRegion(MemRegion::GlobalImmutableSpaceRegionKind);
- else
- sReg = getGlobalsRegion();
+ sReg = getGlobalsRegion(MemRegion::GlobalInternalSpaceRegionKind);
}
// Finally handle static locals.
@@ -986,14 +1037,15 @@ const VarRegion *MemRegionManager::getVarRegion(const VarDecl *D,
sReg = getUnknownRegion();
} else {
if (D->hasLocalStorage()) {
- sReg = isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)
- ? static_cast<const MemRegion*>(getStackArgumentsRegion(STC))
- : static_cast<const MemRegion*>(getStackLocalsRegion(STC));
+ sReg =
+ isa<ParmVarDecl, ImplicitParamDecl>(D)
+ ? static_cast<const MemRegion *>(getStackArgumentsRegion(STC))
+ : static_cast<const MemRegion *>(getStackLocalsRegion(STC));
}
else {
assert(D->isStaticLocal());
const Decl *STCD = STC->getDecl();
- if (isa<FunctionDecl>(STCD) || isa<ObjCMethodDecl>(STCD))
+ if (isa<FunctionDecl, ObjCMethodDecl>(STCD))
sReg = getGlobalsRegion(MemRegion::StaticGlobalSpaceRegionKind,
getFunctionCodeRegion(cast<NamedDecl>(STCD)));
else if (const auto *BD = dyn_cast<BlockDecl>(STCD)) {
@@ -1006,8 +1058,10 @@ const VarRegion *MemRegionManager::getVarRegion(const VarDecl *D,
T = TSI->getType();
if (T.isNull())
T = getContext().VoidTy;
- if (!T->getAs<FunctionType>())
- T = getContext().getFunctionNoProtoType(T);
+ if (!T->getAs<FunctionType>()) {
+ FunctionProtoType::ExtProtoInfo Ext;
+ T = getContext().getFunctionType(T, std::nullopt, Ext);
+ }
T = getContext().getBlockPointerType(T);
const BlockCodeRegion *BTR =
@@ -1023,13 +1077,16 @@ const VarRegion *MemRegionManager::getVarRegion(const VarDecl *D,
}
}
- return getSubRegion<NonParamVarRegion>(D, sReg);
+ return getNonParamVarRegion(D, sReg);
}
const NonParamVarRegion *
MemRegionManager::getNonParamVarRegion(const VarDecl *D,
const MemRegion *superR) {
+ // Prefer the definition over the canonical decl as the canonical form.
D = D->getCanonicalDecl();
+ if (const VarDecl *Def = D->getDefinition())
+ D = Def;
return getSubRegion<NonParamVarRegion>(D, superR);
}
@@ -1053,14 +1110,18 @@ MemRegionManager::getBlockDataRegion(const BlockCodeRegion *BC,
sReg = getGlobalsRegion(MemRegion::GlobalImmutableSpaceRegionKind);
}
else {
- if (LC) {
+ bool IsArcManagedBlock = Ctx.getLangOpts().ObjCAutoRefCount;
+
+ // ARC managed blocks can be initialized on stack or directly in heap
+ // depending on the implementations. So we initialize them with
+ // UnknownRegion.
+ if (!IsArcManagedBlock && LC) {
// FIXME: Once we implement scope handling, we want the parent region
// to be the scope.
const StackFrameContext *STC = LC->getStackFrame();
assert(STC);
sReg = getStackLocalsRegion(STC);
- }
- else {
+ } else {
// We allow 'LC' to be NULL for cases where want BlockDataRegions
// without context-sensitivity.
sReg = getUnknownRegion();
@@ -1070,12 +1131,6 @@ MemRegionManager::getBlockDataRegion(const BlockCodeRegion *BC,
return getSubRegion<BlockDataRegion>(BC, LC, blockCount, sReg);
}
-const CXXTempObjectRegion *
-MemRegionManager::getCXXStaticTempObjectRegion(const Expr *Ex) {
- return getSubRegion<CXXTempObjectRegion>(
- Ex, getGlobalsRegion(MemRegion::GlobalInternalSpaceRegionKind, nullptr));
-}
-
const CompoundLiteralRegion*
MemRegionManager::getCompoundLiteralRegion(const CompoundLiteralExpr *CL,
const LocationContext *LC) {
@@ -1106,8 +1161,7 @@ MemRegionManager::getElementRegion(QualType elementType, NonLoc Idx,
auto *R = cast_or_null<ElementRegion>(data);
if (!R) {
- R = A.Allocate<ElementRegion>();
- new (R) ElementRegion(T, Idx, superRegion);
+ R = new (A) ElementRegion(T, Idx, superRegion);
Regions.InsertNode(R, InsertPos);
}
@@ -1126,9 +1180,12 @@ MemRegionManager::getBlockCodeRegion(const BlockDecl *BD, CanQualType locTy,
return getSubRegion<BlockCodeRegion>(BD, locTy, AC, getCodeRegion());
}
-/// getSymbolicRegion - Retrieve or create a "symbolic" memory region.
-const SymbolicRegion *MemRegionManager::getSymbolicRegion(SymbolRef sym) {
- return getSubRegion<SymbolicRegion>(sym, getUnknownRegion());
+const SymbolicRegion *
+MemRegionManager::getSymbolicRegion(SymbolRef sym,
+ const MemSpaceRegion *MemSpace) {
+ if (MemSpace == nullptr)
+ MemSpace = getUnknownRegion();
+ return getSubRegion<SymbolicRegion>(sym, MemSpace);
}
const SymbolicRegion *MemRegionManager::getSymbolicHeapRegion(SymbolRef Sym) {
@@ -1155,6 +1212,23 @@ MemRegionManager::getCXXTempObjectRegion(Expr const *E,
return getSubRegion<CXXTempObjectRegion>(E, getStackLocalsRegion(SFC));
}
+const CXXLifetimeExtendedObjectRegion *
+MemRegionManager::getCXXLifetimeExtendedObjectRegion(
+ const Expr *Ex, const ValueDecl *VD, const LocationContext *LC) {
+ const StackFrameContext *SFC = LC->getStackFrame();
+ assert(SFC);
+ return getSubRegion<CXXLifetimeExtendedObjectRegion>(
+ Ex, VD, getStackLocalsRegion(SFC));
+}
+
+const CXXLifetimeExtendedObjectRegion *
+MemRegionManager::getCXXStaticLifetimeExtendedObjectRegion(
+ const Expr *Ex, const ValueDecl *VD) {
+ return getSubRegion<CXXLifetimeExtendedObjectRegion>(
+ Ex, VD,
+ getGlobalsRegion(MemRegion::GlobalInternalSpaceRegionKind, nullptr));
+}
+
/// Checks whether \p BaseClass is a valid virtual or direct non-virtual base
/// class of the type of \p Super.
static bool isValidBaseClass(const CXXRecordDecl *BaseClass,
@@ -1241,7 +1315,7 @@ const MemSpaceRegion *MemRegion::getMemorySpace() const {
SR = dyn_cast<SubRegion>(R);
}
- return dyn_cast<MemSpaceRegion>(R);
+ return cast<MemSpaceRegion>(R);
}
bool MemRegion::hasStackStorage() const {
@@ -1256,14 +1330,8 @@ bool MemRegion::hasStackParametersStorage() const {
return isa<StackArgumentsSpaceRegion>(getMemorySpace());
}
-bool MemRegion::hasGlobalsOrParametersStorage() const {
- const MemSpaceRegion *MS = getMemorySpace();
- return isa<StackArgumentsSpaceRegion>(MS) ||
- isa<GlobalsSpaceRegion>(MS);
-}
-
-// getBaseRegion strips away all elements and fields, and get the base region
-// of them.
+// Strips away all elements and fields.
+// Returns the base region of them.
const MemRegion *MemRegion::getBaseRegion() const {
const MemRegion *R = this;
while (true) {
@@ -1283,8 +1351,7 @@ const MemRegion *MemRegion::getBaseRegion() const {
return R;
}
-// getgetMostDerivedObjectRegion gets the region of the root class of a C++
-// class hierarchy.
+// Returns the region of the root class of a C++ class hierarchy.
const MemRegion *MemRegion::getMostDerivedObjectRegion() const {
const MemRegion *R = this;
while (const auto *BR = dyn_cast<CXXBaseObjectRegion>(R))
@@ -1435,6 +1502,7 @@ static RegionOffset calculateOffset(const MemRegion *R) {
case MemRegion::NonParamVarRegionKind:
case MemRegion::ParamVarRegionKind:
case MemRegion::CXXTempObjectRegionKind:
+ case MemRegion::CXXLifetimeExtendedObjectRegionKind:
// Usual base regions.
goto Finish;
@@ -1458,7 +1526,7 @@ static RegionOffset calculateOffset(const MemRegion *R) {
// If our base region is symbolic, we don't know what type it really is.
// Pretend the type of the symbol is the true dynamic type.
// (This will at least be self-consistent for the life of the symbol.)
- Ty = SR->getSymbol()->getType()->getPointeeType();
+ Ty = SR->getPointeeStaticType();
RootIsSymbolic = true;
}
@@ -1515,7 +1583,7 @@ static RegionOffset calculateOffset(const MemRegion *R) {
}
SVal Index = ER->getIndex();
- if (Optional<nonloc::ConcreteInt> CI =
+ if (std::optional<nonloc::ConcreteInt> CI =
Index.getAs<nonloc::ConcreteInt>()) {
// Don't bother calculating precise offsets if we already have a
// symbolic offset somewhere in the chain.
@@ -1625,10 +1693,8 @@ void BlockDataRegion::LazyInitializeReferencedVars() {
using VarVec = BumpVector<const MemRegion *>;
- auto *BV = A.Allocate<VarVec>();
- new (BV) VarVec(BC, NumBlockVars);
- auto *BVOriginal = A.Allocate<VarVec>();
- new (BVOriginal) VarVec(BC, NumBlockVars);
+ auto *BV = new (A) VarVec(BC, NumBlockVars);
+ auto *BVOriginal = new (A) VarVec(BC, NumBlockVars);
for (const auto *VD : ReferencedBlockVars) {
const VarRegion *VR = nullptr;
@@ -1676,10 +1742,13 @@ BlockDataRegion::referenced_vars_end() const {
VecOriginal->end());
}
+llvm::iterator_range<BlockDataRegion::referenced_vars_iterator>
+BlockDataRegion::referenced_vars() const {
+ return llvm::make_range(referenced_vars_begin(), referenced_vars_end());
+}
+
const VarRegion *BlockDataRegion::getOriginalRegion(const VarRegion *R) const {
- for (referenced_vars_iterator I = referenced_vars_begin(),
- E = referenced_vars_end();
- I != E; ++I) {
+ for (const auto &I : referenced_vars()) {
if (I.getCapturedRegion() == R)
return I.getOriginalRegion();
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
index 92104d628711..be19a1c118ea 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
@@ -28,6 +28,7 @@
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/Casting.h"
#include <memory>
+#include <optional>
using namespace clang;
using namespace ento;
@@ -165,7 +166,7 @@ static void printCoverage(const PathDiagnostic *D,
FIDMap &FM,
llvm::raw_fd_ostream &o);
-static Optional<StringRef> getExpandedMacro(
+static std::optional<StringRef> getExpandedMacro(
SourceLocation MacroLoc, const cross_tu::CrossTranslationUnitContext &CTU,
const MacroExpansionContext &MacroExpansions, const SourceManager &SM);
@@ -366,10 +367,8 @@ void PlistPrinter::ReportMacroSubPieces(raw_ostream &o,
unsigned indent, unsigned depth) {
MacroPieces.push_back(&P);
- for (PathPieces::const_iterator I = P.subPieces.begin(),
- E = P.subPieces.end();
- I != E; ++I) {
- ReportPiece(o, **I, indent, depth, /*includeControlFlow*/ false);
+ for (const auto &SubPiece : P.subPieces) {
+ ReportPiece(o, *SubPiece, indent, depth, /*includeControlFlow*/ false);
}
assert(P.getFixits().size() == 0 &&
@@ -384,12 +383,12 @@ void PlistPrinter::ReportMacroExpansions(raw_ostream &o, unsigned indent) {
SourceLocation MacroExpansionLoc =
P->getLocation().asLocation().getExpansionLoc();
- const Optional<StringRef> MacroName =
+ const std::optional<StringRef> MacroName =
MacroExpansions.getOriginalText(MacroExpansionLoc);
- const Optional<StringRef> ExpansionText =
+ const std::optional<StringRef> ExpansionText =
getExpandedMacro(MacroExpansionLoc, CTU, MacroExpansions, SM);
- if (!MacroName.hasValue() || !ExpansionText.hasValue())
+ if (!MacroName || !ExpansionText)
continue;
Indent(o, indent) << "<dict>\n";
@@ -407,11 +406,11 @@ void PlistPrinter::ReportMacroExpansions(raw_ostream &o, unsigned indent) {
// Output the macro name.
Indent(o, indent) << "<key>name</key>";
- EmitString(o, MacroName.getValue()) << '\n';
+ EmitString(o, *MacroName) << '\n';
// Output what it expands into.
Indent(o, indent) << "<key>expansion</key>";
- EmitString(o, ExpansionText.getValue()) << '\n';
+ EmitString(o, *ExpansionText) << '\n';
// Finish up.
--indent;
@@ -499,12 +498,12 @@ static void printCoverage(const PathDiagnostic *D,
// Mapping from file IDs to executed lines.
const FilesToLineNumsMap &ExecutedLines = D->getExecutedLines();
- for (auto I = ExecutedLines.begin(), E = ExecutedLines.end(); I != E; ++I) {
- unsigned FileKey = AddFID(FM, Fids, I->first);
+ for (const auto &[FID, Lines] : ExecutedLines) {
+ unsigned FileKey = AddFID(FM, Fids, FID);
Indent(o, IndentLevel) << "<key>" << FileKey << "</key>\n";
Indent(o, IndentLevel) << "<array>\n";
IndentLevel++;
- for (unsigned LineNo : I->second) {
+ for (unsigned LineNo : Lines) {
Indent(o, IndentLevel);
EmitInteger(o, LineNo) << "\n";
}
@@ -596,8 +595,8 @@ void PlistDiagnostics::printBugPath(llvm::raw_ostream &o, const FIDMap &FM,
o << " <array>\n";
- for (PathPieces::const_iterator E = Path.end(); I != E; ++I)
- Printer.ReportDiag(o, **I);
+ for (const auto &Piece : llvm::make_range(I, Path.end()))
+ Printer.ReportDiag(o, *Piece);
o << " </array>\n";
@@ -805,7 +804,7 @@ void PlistDiagnostics::FlushDiagnosticsImpl(
o << " <key>files</key>\n"
" <array>\n";
for (FileID FID : Fids)
- EmitString(o << " ", SM.getFileEntryForID(FID)->getName()) << '\n';
+ EmitString(o << " ", SM.getFileEntryRefForID(FID)->getName()) << '\n';
o << " </array>\n";
if (llvm::AreStatisticsEnabled() && DiagOpts.ShouldSerializeStats) {
@@ -825,7 +824,7 @@ void PlistDiagnostics::FlushDiagnosticsImpl(
// Definitions of helper functions and methods for expanding macros.
//===----------------------------------------------------------------------===//
-static Optional<StringRef>
+static std::optional<StringRef>
getExpandedMacro(SourceLocation MacroExpansionLoc,
const cross_tu::CrossTranslationUnitContext &CTU,
const MacroExpansionContext &MacroExpansions,
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ProgramState.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ProgramState.cpp
index 1ccb0de92fba..f12f1a5ac970 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ProgramState.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ProgramState.cpp
@@ -19,6 +19,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include "llvm/Support/raw_ostream.h"
+#include <optional>
using namespace clang;
using namespace ento;
@@ -54,12 +55,8 @@ ProgramState::ProgramState(ProgramStateManager *mgr, const Environment& env,
}
ProgramState::ProgramState(const ProgramState &RHS)
- : llvm::FoldingSetNode(),
- stateMgr(RHS.stateMgr),
- Env(RHS.Env),
- store(RHS.store),
- GDM(RHS.GDM),
- refCount(0) {
+ : stateMgr(RHS.stateMgr), Env(RHS.Env), store(RHS.store), GDM(RHS.GDM),
+ PosteriorlyOverconstrained(RHS.PosteriorlyOverconstrained), refCount(0) {
stateMgr->getStoreManager().incrementReferenceCount(store);
}
@@ -159,9 +156,8 @@ ProgramState::invalidateRegions(RegionList Regions,
const CallEvent *Call,
RegionAndSymbolInvalidationTraits *ITraits) const {
SmallVector<SVal, 8> Values;
- for (RegionList::const_iterator I = Regions.begin(),
- End = Regions.end(); I != End; ++I)
- Values.push_back(loc::MemRegionVal(*I));
+ for (const MemRegion *Reg : Regions)
+ Values.push_back(loc::MemRegionVal(Reg));
return invalidateRegionsImpl(Values, E, Count, LCtx, CausedByPointerEscape,
IS, ITraits, Call);
@@ -220,8 +216,6 @@ ProgramState::invalidateRegionsImpl(ValueList Values,
}
ProgramStateRef ProgramState::killBinding(Loc LV) const {
- assert(!LV.getAs<loc::MemRegionVal>() && "Use invalidateRegion instead.");
-
Store OldStore = getStore();
const StoreRef &newStore =
getStateManager().StoreMgr->killBinding(OldStore, LV);
@@ -318,12 +312,12 @@ ProgramStateRef ProgramState::BindExpr(const Stmt *S,
return getStateManager().getPersistentState(NewSt);
}
-ProgramStateRef ProgramState::assumeInBound(DefinedOrUnknownSVal Idx,
- DefinedOrUnknownSVal UpperBound,
- bool Assumption,
- QualType indexTy) const {
+[[nodiscard]] std::pair<ProgramStateRef, ProgramStateRef>
+ProgramState::assumeInBoundDual(DefinedOrUnknownSVal Idx,
+ DefinedOrUnknownSVal UpperBound,
+ QualType indexTy) const {
if (Idx.isUnknown() || UpperBound.isUnknown())
- return this;
+ return {this, this};
// Build an expression for 0 <= Idx < UpperBound.
// This is the same as Idx + MIN < UpperBound + MIN, if overflow is allowed.
@@ -342,7 +336,7 @@ ProgramStateRef ProgramState::assumeInBound(DefinedOrUnknownSVal Idx,
SVal newIdx = svalBuilder.evalBinOpNN(this, BO_Add,
Idx.castAs<NonLoc>(), Min, indexTy);
if (newIdx.isUnknownOrUndef())
- return this;
+ return {this, this};
// Adjust the upper bound.
SVal newBound =
@@ -350,17 +344,26 @@ ProgramStateRef ProgramState::assumeInBound(DefinedOrUnknownSVal Idx,
Min, indexTy);
if (newBound.isUnknownOrUndef())
- return this;
+ return {this, this};
// Build the actual comparison.
SVal inBound = svalBuilder.evalBinOpNN(this, BO_LT, newIdx.castAs<NonLoc>(),
newBound.castAs<NonLoc>(), Ctx.IntTy);
if (inBound.isUnknownOrUndef())
- return this;
+ return {this, this};
// Finally, let the constraint manager take care of it.
ConstraintManager &CM = SM.getConstraintManager();
- return CM.assume(this, inBound.castAs<DefinedSVal>(), Assumption);
+ return CM.assumeDual(this, inBound.castAs<DefinedSVal>());
+}
+
+ProgramStateRef ProgramState::assumeInBound(DefinedOrUnknownSVal Idx,
+ DefinedOrUnknownSVal UpperBound,
+ bool Assumption,
+ QualType indexTy) const {
+ std::pair<ProgramStateRef, ProgramStateRef> R =
+ assumeInBoundDual(Idx, UpperBound, indexTy);
+ return Assumption ? R.first : R.second;
}
ConditionTruthVal ProgramState::isNonNull(SVal V) const {
@@ -420,7 +423,7 @@ ProgramStateRef ProgramStateManager::getPersistentState(ProgramState &State) {
freeStates.pop_back();
}
else {
- newState = (ProgramState*) Alloc.Allocate<ProgramState>();
+ newState = Alloc.Allocate<ProgramState>();
}
new (newState) ProgramState(State);
StateSet.InsertNode(newState, InsertPos);
@@ -433,6 +436,12 @@ ProgramStateRef ProgramState::makeWithStore(const StoreRef &store) const {
return getStateManager().getPersistentState(NewSt);
}
+ProgramStateRef ProgramState::cloneAsPosteriorlyOverconstrained() const {
+ ProgramState NewSt(*this);
+ NewSt.PosteriorlyOverconstrained = true;
+ return getStateManager().getPersistentState(NewSt);
+}
+
void ProgramState::setStore(const StoreRef &newStore) {
Store newStoreStore = newStore.getStore();
if (newStoreStore)
@@ -546,22 +555,20 @@ bool ScanReachableSymbols::scan(nonloc::LazyCompoundVal val) {
}
bool ScanReachableSymbols::scan(nonloc::CompoundVal val) {
- for (nonloc::CompoundVal::iterator I=val.begin(), E=val.end(); I!=E; ++I)
- if (!scan(*I))
+ for (SVal V : val)
+ if (!scan(V))
return false;
return true;
}
bool ScanReachableSymbols::scan(const SymExpr *sym) {
- for (SymExpr::symbol_iterator SI = sym->symbol_begin(),
- SE = sym->symbol_end();
- SI != SE; ++SI) {
- bool wasVisited = !visited.insert(*SI).second;
+ for (SymbolRef SubSym : sym->symbols()) {
+ bool wasVisited = !visited.insert(SubSym).second;
if (wasVisited)
continue;
- if (!visitor.VisitSymbol(*SI))
+ if (!visitor.VisitSymbol(SubSym))
return false;
}
@@ -569,20 +576,20 @@ bool ScanReachableSymbols::scan(const SymExpr *sym) {
}
bool ScanReachableSymbols::scan(SVal val) {
- if (Optional<loc::MemRegionVal> X = val.getAs<loc::MemRegionVal>())
+ if (std::optional<loc::MemRegionVal> X = val.getAs<loc::MemRegionVal>())
return scan(X->getRegion());
- if (Optional<nonloc::LazyCompoundVal> X =
+ if (std::optional<nonloc::LazyCompoundVal> X =
val.getAs<nonloc::LazyCompoundVal>())
return scan(*X);
- if (Optional<nonloc::LocAsInteger> X = val.getAs<nonloc::LocAsInteger>())
+ if (std::optional<nonloc::LocAsInteger> X = val.getAs<nonloc::LocAsInteger>())
return scan(X->getLoc());
if (SymbolRef Sym = val.getAsSymbol())
return scan(Sym);
- if (Optional<nonloc::CompoundVal> X = val.getAs<nonloc::CompoundVal>())
+ if (std::optional<nonloc::CompoundVal> X = val.getAs<nonloc::CompoundVal>())
return scan(*X);
return true;
@@ -620,10 +627,8 @@ bool ScanReachableSymbols::scan(const MemRegion *R) {
// Regions captured by a block are also implicitly reachable.
if (const BlockDataRegion *BDR = dyn_cast<BlockDataRegion>(R)) {
- BlockDataRegion::referenced_vars_iterator I = BDR->referenced_vars_begin(),
- E = BDR->referenced_vars_end();
- for ( ; I != E; ++I) {
- if (!scan(I.getCapturedRegion()))
+ for (auto Var : BDR->referenced_vars()) {
+ if (!scan(Var.getCapturedRegion()))
return false;
}
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
index 69554576bdb2..25d066c4652f 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
@@ -20,12 +20,13 @@
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/ImmutableSet.h"
#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <iterator>
+#include <optional>
using namespace clang;
using namespace ento;
@@ -110,6 +111,14 @@ public:
RangeSet::ContainerType RangeSet::Factory::EmptySet{};
+RangeSet RangeSet::Factory::add(RangeSet LHS, RangeSet RHS) {
+ ContainerType Result;
+ Result.reserve(LHS.size() + RHS.size());
+ std::merge(LHS.begin(), LHS.end(), RHS.begin(), RHS.end(),
+ std::back_inserter(Result));
+ return makePersistent(std::move(Result));
+}
+
RangeSet RangeSet::Factory::add(RangeSet Original, Range Element) {
ContainerType Result;
Result.reserve(Original.size() + 1);
@@ -126,6 +135,186 @@ RangeSet RangeSet::Factory::add(RangeSet Original, const llvm::APSInt &Point) {
return add(Original, Range(Point));
}
+RangeSet RangeSet::Factory::unite(RangeSet LHS, RangeSet RHS) {
+ ContainerType Result = unite(*LHS.Impl, *RHS.Impl);
+ return makePersistent(std::move(Result));
+}
+
+RangeSet RangeSet::Factory::unite(RangeSet Original, Range R) {
+ ContainerType Result;
+ Result.push_back(R);
+ Result = unite(*Original.Impl, Result);
+ return makePersistent(std::move(Result));
+}
+
+RangeSet RangeSet::Factory::unite(RangeSet Original, llvm::APSInt Point) {
+ return unite(Original, Range(ValueFactory.getValue(Point)));
+}
+
+RangeSet RangeSet::Factory::unite(RangeSet Original, llvm::APSInt From,
+ llvm::APSInt To) {
+ return unite(Original,
+ Range(ValueFactory.getValue(From), ValueFactory.getValue(To)));
+}
+
+template <typename T>
+void swapIterators(T &First, T &FirstEnd, T &Second, T &SecondEnd) {
+ std::swap(First, Second);
+ std::swap(FirstEnd, SecondEnd);
+}
+
+RangeSet::ContainerType RangeSet::Factory::unite(const ContainerType &LHS,
+ const ContainerType &RHS) {
+ if (LHS.empty())
+ return RHS;
+ if (RHS.empty())
+ return LHS;
+
+ using llvm::APSInt;
+ using iterator = ContainerType::const_iterator;
+
+ iterator First = LHS.begin();
+ iterator FirstEnd = LHS.end();
+ iterator Second = RHS.begin();
+ iterator SecondEnd = RHS.end();
+ APSIntType Ty = APSIntType(First->From());
+ const APSInt Min = Ty.getMinValue();
+
+ // Handle a corner case first when both range sets start from MIN.
+ // This helps to avoid complicated conditions below. Specifically, this
+ // particular check for `MIN` is not needed in the loop below every time
+ // when we do `Second->From() - One` operation.
+ if (Min == First->From() && Min == Second->From()) {
+ if (First->To() > Second->To()) {
+ // [ First ]--->
+ // [ Second ]----->
+ // MIN^
+ // The Second range is entirely inside the First one.
+
+ // Check if Second is the last in its RangeSet.
+ if (++Second == SecondEnd)
+ // [ First ]--[ First + 1 ]--->
+ // [ Second ]--------------------->
+ // MIN^
+ // The Union is equal to First's RangeSet.
+ return LHS;
+ } else {
+ // case 1: [ First ]----->
+ // case 2: [ First ]--->
+ // [ Second ]--->
+ // MIN^
+ // The First range is entirely inside or equal to the Second one.
+
+ // Check if First is the last in its RangeSet.
+ if (++First == FirstEnd)
+ // [ First ]----------------------->
+ // [ Second ]--[ Second + 1 ]---->
+ // MIN^
+ // The Union is equal to Second's RangeSet.
+ return RHS;
+ }
+ }
+
+ const APSInt One = Ty.getValue(1);
+ ContainerType Result;
+
+ // This is called when there are no ranges left in one of the ranges.
+ // Append the rest of the ranges from another range set to the Result
+ // and return with that.
+ const auto AppendTheRest = [&Result](iterator I, iterator E) {
+ Result.append(I, E);
+ return Result;
+ };
+
+ while (true) {
+ // We want to keep the following invariant at all times:
+ // ---[ First ------>
+ // -----[ Second --->
+ if (First->From() > Second->From())
+ swapIterators(First, FirstEnd, Second, SecondEnd);
+
+ // The Union definitely starts with First->From().
+ // ----------[ First ------>
+ // ------------[ Second --->
+ // ----------[ Union ------>
+ // UnionStart^
+ const llvm::APSInt &UnionStart = First->From();
+
+ // Loop where the invariant holds.
+ while (true) {
+ // Skip all enclosed ranges.
+ // ---[ First ]--->
+ // -----[ Second ]--[ Second + 1 ]--[ Second + N ]----->
+ while (First->To() >= Second->To()) {
+ // Check if Second is the last in its RangeSet.
+ if (++Second == SecondEnd) {
+ // Append the Union.
+ // ---[ Union ]--->
+ // -----[ Second ]----->
+ // --------[ First ]--->
+ // UnionEnd^
+ Result.emplace_back(UnionStart, First->To());
+ // ---[ Union ]----------------->
+ // --------------[ First + 1]--->
+ // Append all remaining ranges from the First's RangeSet.
+ return AppendTheRest(++First, FirstEnd);
+ }
+ }
+
+ // Check if First and Second are disjoint. It means that we find
+ // the end of the Union. Exit the loop and append the Union.
+ // ---[ First ]=------------->
+ // ------------=[ Second ]--->
+ // ----MinusOne^
+ if (First->To() < Second->From() - One)
+ break;
+
+ // First is entirely inside the Union. Go next.
+ // ---[ Union ----------->
+ // ---- [ First ]-------->
+ // -------[ Second ]----->
+ // Check if First is the last in its RangeSet.
+ if (++First == FirstEnd) {
+ // Append the Union.
+ // ---[ Union ]--->
+ // -----[ First ]------->
+ // --------[ Second ]--->
+ // UnionEnd^
+ Result.emplace_back(UnionStart, Second->To());
+ // ---[ Union ]------------------>
+ // --------------[ Second + 1]--->
+ // Append all remaining ranges from the Second's RangeSet.
+ return AppendTheRest(++Second, SecondEnd);
+ }
+
+ // We know that we are at one of the two cases:
+ // case 1: --[ First ]--------->
+ // case 2: ----[ First ]------->
+ // --------[ Second ]---------->
+ // In both cases First starts after Second->From().
+ // Make sure that the loop invariant holds.
+ swapIterators(First, FirstEnd, Second, SecondEnd);
+ }
+
+ // Here First and Second are disjoint.
+ // Append the Union.
+ // ---[ Union ]--------------->
+ // -----------------[ Second ]--->
+ // ------[ First ]--------------->
+ // UnionEnd^
+ Result.emplace_back(UnionStart, First->To());
+
+ // Check if First is the last in its RangeSet.
+ if (++First == FirstEnd)
+ // ---[ Union ]--------------->
+ // --------------[ Second ]--->
+ // Append all remaining ranges from the Second's RangeSet.
+ return AppendTheRest(Second, SecondEnd);
+ }
+
+ llvm_unreachable("Normally, we should not reach here");
+}
+
RangeSet RangeSet::Factory::getRangeSet(Range From) {
ContainerType Result;
Result.push_back(From);
@@ -155,13 +344,6 @@ RangeSet::ContainerType *RangeSet::Factory::construct(ContainerType &&From) {
return new (Buffer) ContainerType(std::move(From));
}
-RangeSet RangeSet::Factory::add(RangeSet LHS, RangeSet RHS) {
- ContainerType Result;
- std::merge(LHS.begin(), LHS.end(), RHS.begin(), RHS.end(),
- std::back_inserter(Result));
- return makePersistent(std::move(Result));
-}
-
const llvm::APSInt &RangeSet::getMinValue() const {
assert(!isEmpty());
return begin()->From();
@@ -172,6 +354,21 @@ const llvm::APSInt &RangeSet::getMaxValue() const {
return std::prev(end())->To();
}
+bool clang::ento::RangeSet::isUnsigned() const {
+ assert(!isEmpty());
+ return begin()->From().isUnsigned();
+}
+
+uint32_t clang::ento::RangeSet::getBitWidth() const {
+ assert(!isEmpty());
+ return begin()->From().getBitWidth();
+}
+
+APSIntType clang::ento::RangeSet::getAPSIntType() const {
+ assert(!isEmpty());
+ return APSIntType(begin()->From());
+}
+
bool RangeSet::containsImpl(llvm::APSInt &Point) const {
if (isEmpty() || !pin(Point))
return false;
@@ -325,11 +522,6 @@ RangeSet RangeSet::Factory::intersect(const RangeSet::ContainerType &LHS,
const_iterator First = LHS.begin(), Second = RHS.begin(),
FirstEnd = LHS.end(), SecondEnd = RHS.end();
- const auto SwapIterators = [&First, &FirstEnd, &Second, &SecondEnd]() {
- std::swap(First, Second);
- std::swap(FirstEnd, SecondEnd);
- };
-
// If we ran out of ranges in one set, but not in the other,
// it means that those elements are definitely not in the
// intersection.
@@ -339,7 +531,7 @@ RangeSet RangeSet::Factory::intersect(const RangeSet::ContainerType &LHS,
// ----[ First ---------------------->
// --------[ Second ----------------->
if (Second->From() < First->From())
- SwapIterators();
+ swapIterators(First, FirstEnd, Second, SecondEnd);
// Loop where the invariant holds:
do {
@@ -373,7 +565,7 @@ RangeSet RangeSet::Factory::intersect(const RangeSet::ContainerType &LHS,
if (Second->To() > First->To()) {
// Here we make a decision to keep First as the "longer"
// range.
- SwapIterators();
+ swapIterators(First, FirstEnd, Second, SecondEnd);
}
// At this point, we have the following situation:
@@ -479,6 +671,181 @@ RangeSet RangeSet::Factory::negate(RangeSet What) {
return makePersistent(std::move(Result));
}
+// Convert range set to the given integral type using truncation and promotion.
+// This works similar to APSIntType::apply function but for the range set.
+RangeSet RangeSet::Factory::castTo(RangeSet What, APSIntType Ty) {
+ // Set is empty or NOOP (aka cast to the same type).
+ if (What.isEmpty() || What.getAPSIntType() == Ty)
+ return What;
+
+ const bool IsConversion = What.isUnsigned() != Ty.isUnsigned();
+ const bool IsTruncation = What.getBitWidth() > Ty.getBitWidth();
+ const bool IsPromotion = What.getBitWidth() < Ty.getBitWidth();
+
+ if (IsTruncation)
+ return makePersistent(truncateTo(What, Ty));
+
+ // Here we handle 2 cases:
+ // - IsConversion && !IsPromotion.
+ // In this case we handle changing a sign with same bitwidth: char -> uchar,
+ // uint -> int. Here we convert negatives to positives and positives which
+ // is out of range to negatives. We use convertTo function for that.
+ // - IsConversion && IsPromotion && !What.isUnsigned().
+ // In this case we handle changing a sign from signeds to unsigneds with
+ // higher bitwidth: char -> uint, int-> uint64. The point is that we also
+ // need convert negatives to positives and use convertTo function as well.
+ // For example, we don't need such a convertion when converting unsigned to
+ // signed with higher bitwidth, because all the values of unsigned is valid
+ // for the such signed.
+ if (IsConversion && (!IsPromotion || !What.isUnsigned()))
+ return makePersistent(convertTo(What, Ty));
+
+ assert(IsPromotion && "Only promotion operation from unsigneds left.");
+ return makePersistent(promoteTo(What, Ty));
+}
+
+RangeSet RangeSet::Factory::castTo(RangeSet What, QualType T) {
+ assert(T->isIntegralOrEnumerationType() && "T shall be an integral type.");
+ return castTo(What, ValueFactory.getAPSIntType(T));
+}
+
+RangeSet::ContainerType RangeSet::Factory::truncateTo(RangeSet What,
+ APSIntType Ty) {
+ using llvm::APInt;
+ using llvm::APSInt;
+ ContainerType Result;
+ ContainerType Dummy;
+ // CastRangeSize is an amount of all possible values of cast type.
+ // Example: `char` has 256 values; `short` has 65536 values.
+ // But in fact we use `amount of values` - 1, because
+ // we can't keep `amount of values of UINT64` inside uint64_t.
+ // E.g. 256 is an amount of all possible values of `char` and we can't keep
+ // it inside `char`.
+ // And it's OK, it's enough to do correct calculations.
+ uint64_t CastRangeSize = APInt::getMaxValue(Ty.getBitWidth()).getZExtValue();
+ for (const Range &R : What) {
+ // Get bounds of the given range.
+ APSInt FromInt = R.From();
+ APSInt ToInt = R.To();
+ // CurrentRangeSize is an amount of all possible values of the current
+ // range minus one.
+ uint64_t CurrentRangeSize = (ToInt - FromInt).getZExtValue();
+ // This is an optimization for a specific case when this Range covers
+ // the whole range of the target type.
+ Dummy.clear();
+ if (CurrentRangeSize >= CastRangeSize) {
+ Dummy.emplace_back(ValueFactory.getMinValue(Ty),
+ ValueFactory.getMaxValue(Ty));
+ Result = std::move(Dummy);
+ break;
+ }
+ // Cast the bounds.
+ Ty.apply(FromInt);
+ Ty.apply(ToInt);
+ const APSInt &PersistentFrom = ValueFactory.getValue(FromInt);
+ const APSInt &PersistentTo = ValueFactory.getValue(ToInt);
+ if (FromInt > ToInt) {
+ Dummy.emplace_back(ValueFactory.getMinValue(Ty), PersistentTo);
+ Dummy.emplace_back(PersistentFrom, ValueFactory.getMaxValue(Ty));
+ } else
+ Dummy.emplace_back(PersistentFrom, PersistentTo);
+ // Every range retrieved after truncation potentialy has garbage values.
+ // So, we have to unite every next range with the previouses.
+ Result = unite(Result, Dummy);
+ }
+
+ return Result;
+}
+
+// Divide the convertion into two phases (presented as loops here).
+// First phase(loop) works when casted values go in ascending order.
+// E.g. char{1,3,5,127} -> uint{1,3,5,127}
+// Interrupt the first phase and go to second one when casted values start
+// go in descending order. That means that we crossed over the middle of
+// the type value set (aka 0 for signeds and MAX/2+1 for unsigneds).
+// For instance:
+// 1: uchar{1,3,5,128,255} -> char{1,3,5,-128,-1}
+// Here we put {1,3,5} to one array and {-128, -1} to another
+// 2: char{-128,-127,-1,0,1,2} -> uchar{128,129,255,0,1,3}
+// Here we put {128,129,255} to one array and {0,1,3} to another.
+// After that we unite both arrays.
+// NOTE: We don't just concatenate the arrays, because they may have
+// adjacent ranges, e.g.:
+// 1: char(-128, 127) -> uchar -> arr1(128, 255), arr2(0, 127) ->
+// unite -> uchar(0, 255)
+// 2: uchar(0, 1)U(254, 255) -> char -> arr1(0, 1), arr2(-2, -1) ->
+// unite -> uchar(-2, 1)
+RangeSet::ContainerType RangeSet::Factory::convertTo(RangeSet What,
+ APSIntType Ty) {
+ using llvm::APInt;
+ using llvm::APSInt;
+ using Bounds = std::pair<const APSInt &, const APSInt &>;
+ ContainerType AscendArray;
+ ContainerType DescendArray;
+ auto CastRange = [Ty, &VF = ValueFactory](const Range &R) -> Bounds {
+ // Get bounds of the given range.
+ APSInt FromInt = R.From();
+ APSInt ToInt = R.To();
+ // Cast the bounds.
+ Ty.apply(FromInt);
+ Ty.apply(ToInt);
+ return {VF.getValue(FromInt), VF.getValue(ToInt)};
+ };
+ // Phase 1. Fill the first array.
+ APSInt LastConvertedInt = Ty.getMinValue();
+ const auto *It = What.begin();
+ const auto *E = What.end();
+ while (It != E) {
+ Bounds NewBounds = CastRange(*(It++));
+ // If values stop going acsending order, go to the second phase(loop).
+ if (NewBounds.first < LastConvertedInt) {
+ DescendArray.emplace_back(NewBounds.first, NewBounds.second);
+ break;
+ }
+ // If the range contains a midpoint, then split the range.
+ // E.g. char(-5, 5) -> uchar(251, 5)
+ // Here we shall add a range (251, 255) to the first array and (0, 5) to the
+ // second one.
+ if (NewBounds.first > NewBounds.second) {
+ DescendArray.emplace_back(ValueFactory.getMinValue(Ty), NewBounds.second);
+ AscendArray.emplace_back(NewBounds.first, ValueFactory.getMaxValue(Ty));
+ } else
+ // Values are going acsending order.
+ AscendArray.emplace_back(NewBounds.first, NewBounds.second);
+ LastConvertedInt = NewBounds.first;
+ }
+ // Phase 2. Fill the second array.
+ while (It != E) {
+ Bounds NewBounds = CastRange(*(It++));
+ DescendArray.emplace_back(NewBounds.first, NewBounds.second);
+ }
+ // Unite both arrays.
+ return unite(AscendArray, DescendArray);
+}
+
+/// Promotion from unsigneds to signeds/unsigneds left.
+RangeSet::ContainerType RangeSet::Factory::promoteTo(RangeSet What,
+ APSIntType Ty) {
+ ContainerType Result;
+ // We definitely know the size of the result set.
+ Result.reserve(What.size());
+
+ // Each unsigned value fits every larger type without any changes,
+ // whether the larger type is signed or unsigned. So just promote and push
+ // back each range one by one.
+ for (const Range &R : What) {
+ // Get bounds of the given range.
+ llvm::APSInt FromInt = R.From();
+ llvm::APSInt ToInt = R.To();
+ // Cast the bounds.
+ Ty.apply(FromInt);
+ Ty.apply(ToInt);
+ Result.emplace_back(ValueFactory.getValue(FromInt),
+ ValueFactory.getValue(ToInt));
+ }
+ return Result;
+}
+
RangeSet RangeSet::Factory::deletePoint(RangeSet From,
const llvm::APSInt &Point) {
if (!From.contains(Point))
@@ -494,15 +861,17 @@ RangeSet RangeSet::Factory::deletePoint(RangeSet From,
return intersect(From, Upper, Lower);
}
-void Range::dump(raw_ostream &OS) const {
+LLVM_DUMP_METHOD void Range::dump(raw_ostream &OS) const {
OS << '[' << toString(From(), 10) << ", " << toString(To(), 10) << ']';
}
+LLVM_DUMP_METHOD void Range::dump() const { dump(llvm::errs()); }
-void RangeSet::dump(raw_ostream &OS) const {
+LLVM_DUMP_METHOD void RangeSet::dump(raw_ostream &OS) const {
OS << "{ ";
llvm::interleaveComma(*this, OS, [&OS](const Range &R) { R.dump(OS); });
OS << " }";
}
+LLVM_DUMP_METHOD void RangeSet::dump() const { dump(llvm::errs()); }
REGISTER_SET_FACTORY_WITH_PROGRAMSTATE(SymbolSet, SymbolRef)
@@ -545,20 +914,20 @@ namespace {
class EquivalenceClass : public llvm::FoldingSetNode {
public:
/// Find equivalence class for the given symbol in the given state.
- LLVM_NODISCARD static inline EquivalenceClass find(ProgramStateRef State,
- SymbolRef Sym);
+ [[nodiscard]] static inline EquivalenceClass find(ProgramStateRef State,
+ SymbolRef Sym);
/// Merge classes for the given symbols and return a new state.
- LLVM_NODISCARD static inline ProgramStateRef merge(RangeSet::Factory &F,
- ProgramStateRef State,
- SymbolRef First,
- SymbolRef Second);
+ [[nodiscard]] static inline ProgramStateRef merge(RangeSet::Factory &F,
+ ProgramStateRef State,
+ SymbolRef First,
+ SymbolRef Second);
// Merge this class with the given class and return a new state.
- LLVM_NODISCARD inline ProgramStateRef
+ [[nodiscard]] inline ProgramStateRef
merge(RangeSet::Factory &F, ProgramStateRef State, EquivalenceClass Other);
/// Return a set of class members for the given state.
- LLVM_NODISCARD inline SymbolSet getClassMembers(ProgramStateRef State) const;
+ [[nodiscard]] inline SymbolSet getClassMembers(ProgramStateRef State) const;
/// Return true if the current class is trivial in the given state.
/// A class is trivial if and only if there is not any member relations stored
@@ -571,39 +940,42 @@ public:
/// members and then during the removal of dead symbols we remove one of its
/// members. In this case, the class is still non-trivial (it still has the
/// mappings in ClassMembers), even though it has only one member.
- LLVM_NODISCARD inline bool isTrivial(ProgramStateRef State) const;
+ [[nodiscard]] inline bool isTrivial(ProgramStateRef State) const;
/// Return true if the current class is trivial and its only member is dead.
- LLVM_NODISCARD inline bool isTriviallyDead(ProgramStateRef State,
- SymbolReaper &Reaper) const;
+ [[nodiscard]] inline bool isTriviallyDead(ProgramStateRef State,
+ SymbolReaper &Reaper) const;
- LLVM_NODISCARD static inline ProgramStateRef
+ [[nodiscard]] static inline ProgramStateRef
markDisequal(RangeSet::Factory &F, ProgramStateRef State, SymbolRef First,
SymbolRef Second);
- LLVM_NODISCARD static inline ProgramStateRef
+ [[nodiscard]] static inline ProgramStateRef
markDisequal(RangeSet::Factory &F, ProgramStateRef State,
EquivalenceClass First, EquivalenceClass Second);
- LLVM_NODISCARD inline ProgramStateRef
+ [[nodiscard]] inline ProgramStateRef
markDisequal(RangeSet::Factory &F, ProgramStateRef State,
EquivalenceClass Other) const;
- LLVM_NODISCARD static inline ClassSet
- getDisequalClasses(ProgramStateRef State, SymbolRef Sym);
- LLVM_NODISCARD inline ClassSet
- getDisequalClasses(ProgramStateRef State) const;
- LLVM_NODISCARD inline ClassSet
+ [[nodiscard]] static inline ClassSet getDisequalClasses(ProgramStateRef State,
+ SymbolRef Sym);
+ [[nodiscard]] inline ClassSet getDisequalClasses(ProgramStateRef State) const;
+ [[nodiscard]] inline ClassSet
getDisequalClasses(DisequalityMapTy Map, ClassSet::Factory &Factory) const;
- LLVM_NODISCARD static inline Optional<bool> areEqual(ProgramStateRef State,
- EquivalenceClass First,
- EquivalenceClass Second);
- LLVM_NODISCARD static inline Optional<bool>
+ [[nodiscard]] static inline std::optional<bool>
+ areEqual(ProgramStateRef State, EquivalenceClass First,
+ EquivalenceClass Second);
+ [[nodiscard]] static inline std::optional<bool>
areEqual(ProgramStateRef State, SymbolRef First, SymbolRef Second);
+ /// Remove one member from the class.
+ [[nodiscard]] ProgramStateRef removeMember(ProgramStateRef State,
+ const SymbolRef Old);
+
/// Iterate over all symbols and try to simplify them.
- LLVM_NODISCARD static inline ProgramStateRef simplify(SValBuilder &SVB,
- RangeSet::Factory &F,
- ProgramStateRef State,
- EquivalenceClass Class);
+ [[nodiscard]] static inline ProgramStateRef simplify(SValBuilder &SVB,
+ RangeSet::Factory &F,
+ ProgramStateRef State,
+ EquivalenceClass Class);
void dumpToStream(ProgramStateRef State, raw_ostream &os) const;
LLVM_DUMP_METHOD void dump(ProgramStateRef State) const {
@@ -611,10 +983,10 @@ public:
}
/// Check equivalence data for consistency.
- LLVM_NODISCARD LLVM_ATTRIBUTE_UNUSED static bool
+ [[nodiscard]] LLVM_ATTRIBUTE_UNUSED static bool
isClassDataConsistent(ProgramStateRef State);
- LLVM_NODISCARD QualType getType() const {
+ [[nodiscard]] QualType getType() const {
return getRepresentativeSymbol()->getType();
}
@@ -655,6 +1027,7 @@ private:
inline ProgramStateRef mergeImpl(RangeSet::Factory &F, ProgramStateRef State,
SymbolSet Members, EquivalenceClass Other,
SymbolSet OtherMembers);
+
static inline bool
addToDisequalityInfo(DisequalityMapTy &Info, ConstraintRangeTy &Constraints,
RangeSet::Factory &F, ProgramStateRef State,
@@ -668,7 +1041,7 @@ private:
// Constraint functions
//===----------------------------------------------------------------------===//
-LLVM_NODISCARD LLVM_ATTRIBUTE_UNUSED bool
+[[nodiscard]] LLVM_ATTRIBUTE_UNUSED bool
areFeasible(ConstraintRangeTy Constraints) {
return llvm::none_of(
Constraints,
@@ -677,24 +1050,24 @@ areFeasible(ConstraintRangeTy Constraints) {
});
}
-LLVM_NODISCARD inline const RangeSet *getConstraint(ProgramStateRef State,
- EquivalenceClass Class) {
+[[nodiscard]] inline const RangeSet *getConstraint(ProgramStateRef State,
+ EquivalenceClass Class) {
return State->get<ConstraintRange>(Class);
}
-LLVM_NODISCARD inline const RangeSet *getConstraint(ProgramStateRef State,
- SymbolRef Sym) {
+[[nodiscard]] inline const RangeSet *getConstraint(ProgramStateRef State,
+ SymbolRef Sym) {
return getConstraint(State, EquivalenceClass::find(State, Sym));
}
-LLVM_NODISCARD ProgramStateRef setConstraint(ProgramStateRef State,
- EquivalenceClass Class,
- RangeSet Constraint) {
+[[nodiscard]] ProgramStateRef setConstraint(ProgramStateRef State,
+ EquivalenceClass Class,
+ RangeSet Constraint) {
return State->set<ConstraintRange>(Class, Constraint);
}
-LLVM_NODISCARD ProgramStateRef setConstraints(ProgramStateRef State,
- ConstraintRangeTy Constraints) {
+[[nodiscard]] ProgramStateRef setConstraints(ProgramStateRef State,
+ ConstraintRangeTy Constraints) {
return State->set<ConstraintRange>(Constraints);
}
@@ -710,8 +1083,8 @@ LLVM_NODISCARD ProgramStateRef setConstraints(ProgramStateRef State,
///
/// \returns true if assuming this Sym to be true means equality of operands
/// false if it means disequality of operands
-/// None otherwise
-Optional<bool> meansEquality(const SymSymExpr *Sym) {
+/// std::nullopt otherwise
+std::optional<bool> meansEquality(const SymSymExpr *Sym) {
switch (Sym->getOpcode()) {
case BO_Sub:
// This case is: A - B != 0 -> disequality check.
@@ -723,7 +1096,7 @@ Optional<bool> meansEquality(const SymSymExpr *Sym) {
// This case is: A != B != 0 -> diseqiality check.
return false;
default:
- return llvm::None;
+ return std::nullopt;
}
}
@@ -732,8 +1105,8 @@ Optional<bool> meansEquality(const SymSymExpr *Sym) {
//===----------------------------------------------------------------------===//
template <class SecondTy, class... RestTy>
-LLVM_NODISCARD inline RangeSet intersect(RangeSet::Factory &F, RangeSet Head,
- SecondTy Second, RestTy... Tail);
+[[nodiscard]] inline RangeSet intersect(RangeSet::Factory &F, RangeSet Head,
+ SecondTy Second, RestTy... Tail);
template <class... RangeTy> struct IntersectionTraits;
@@ -745,7 +1118,7 @@ template <class... TailTy> struct IntersectionTraits<RangeSet, TailTy...> {
template <> struct IntersectionTraits<> {
// We ran out of types, and we didn't find any RangeSet, so the result should
// be optional.
- using Type = Optional<RangeSet>;
+ using Type = std::optional<RangeSet>;
};
template <class OptionalOrPointer, class... TailTy>
@@ -755,32 +1128,33 @@ struct IntersectionTraits<OptionalOrPointer, TailTy...> {
};
template <class EndTy>
-LLVM_NODISCARD inline EndTy intersect(RangeSet::Factory &F, EndTy End) {
- // If the list contains only RangeSet or Optional<RangeSet>, simply return
- // that range set.
+[[nodiscard]] inline EndTy intersect(RangeSet::Factory &F, EndTy End) {
+ // If the list contains only RangeSet or std::optional<RangeSet>, simply
+ // return that range set.
return End;
}
-LLVM_NODISCARD LLVM_ATTRIBUTE_UNUSED inline Optional<RangeSet>
+[[nodiscard]] LLVM_ATTRIBUTE_UNUSED inline std::optional<RangeSet>
intersect(RangeSet::Factory &F, const RangeSet *End) {
- // This is an extraneous conversion from a raw pointer into Optional<RangeSet>
+ // This is an extraneous conversion from a raw pointer into
+ // std::optional<RangeSet>
if (End) {
return *End;
}
- return llvm::None;
+ return std::nullopt;
}
template <class... RestTy>
-LLVM_NODISCARD inline RangeSet intersect(RangeSet::Factory &F, RangeSet Head,
- RangeSet Second, RestTy... Tail) {
+[[nodiscard]] inline RangeSet intersect(RangeSet::Factory &F, RangeSet Head,
+ RangeSet Second, RestTy... Tail) {
// Here we call either the <RangeSet,RangeSet,...> or <RangeSet,...> version
// of the function and can be sure that the result is RangeSet.
return intersect(F, F.intersect(Head, Second), Tail...);
}
template <class SecondTy, class... RestTy>
-LLVM_NODISCARD inline RangeSet intersect(RangeSet::Factory &F, RangeSet Head,
- SecondTy Second, RestTy... Tail) {
+[[nodiscard]] inline RangeSet intersect(RangeSet::Factory &F, RangeSet Head,
+ SecondTy Second, RestTy... Tail) {
if (Second) {
// Here we call the <RangeSet,RangeSet,...> version of the function...
return intersect(F, Head, *Second, Tail...);
@@ -792,11 +1166,12 @@ LLVM_NODISCARD inline RangeSet intersect(RangeSet::Factory &F, RangeSet Head,
/// Main generic intersect function.
/// It intersects all of the given range sets. If some of the given arguments
-/// don't hold a range set (nullptr or llvm::None), the function will skip them.
+/// don't hold a range set (nullptr or std::nullopt), the function will skip
+/// them.
///
/// Available representations for the arguments are:
/// * RangeSet
-/// * Optional<RangeSet>
+/// * std::optional<RangeSet>
/// * RangeSet *
/// Pointer to a RangeSet is automatically assumed to be nullable and will get
/// checked as well as the optional version. If this behaviour is undesired,
@@ -804,13 +1179,14 @@ LLVM_NODISCARD inline RangeSet intersect(RangeSet::Factory &F, RangeSet Head,
///
/// Return type depends on the arguments' types. If we can be sure in compile
/// time that there will be a range set as a result, the returning type is
-/// simply RangeSet, in other cases we have to back off to Optional<RangeSet>.
+/// simply RangeSet, in other cases we have to back off to
+/// std::optional<RangeSet>.
///
/// Please, prefer optional range sets to raw pointers. If the last argument is
-/// a raw pointer and all previous arguments are None, it will cost one
-/// additional check to convert RangeSet * into Optional<RangeSet>.
+/// a raw pointer and all previous arguments are std::nullopt, it will cost one
+/// additional check to convert RangeSet * into std::optional<RangeSet>.
template <class HeadTy, class SecondTy, class... RestTy>
-LLVM_NODISCARD inline
+[[nodiscard]] inline
typename IntersectionTraits<HeadTy, SecondTy, RestTy...>::Type
intersect(RangeSet::Factory &F, HeadTy Head, SecondTy Second,
RestTy... Tail) {
@@ -840,13 +1216,21 @@ public:
}
RangeSet VisitSymExpr(SymbolRef Sym) {
- // If we got to this function, the actual type of the symbolic
+ if (std::optional<RangeSet> RS = getRangeForNegatedSym(Sym))
+ return *RS;
+ // If we've reached this line, the actual type of the symbolic
// expression is not supported for advanced inference.
// In this case, we simply backoff to the default "let's simply
// infer the range from the expression's type".
return infer(Sym->getType());
}
+ RangeSet VisitUnarySymExpr(const UnarySymExpr *USE) {
+ if (std::optional<RangeSet> RS = getRangeForNegatedUnarySym(USE))
+ return *RS;
+ return infer(USE->getType());
+ }
+
RangeSet VisitSymIntExpr(const SymIntExpr *Sym) {
return VisitBinaryOperator(Sym);
}
@@ -855,14 +1239,25 @@ public:
return VisitBinaryOperator(Sym);
}
- RangeSet VisitSymSymExpr(const SymSymExpr *Sym) {
+ RangeSet VisitSymSymExpr(const SymSymExpr *SSE) {
return intersect(
RangeFactory,
+ // If Sym is a difference of symbols A - B, then maybe we have range
+ // set stored for B - A.
+ //
+ // If we have range set stored for both A - B and B - A then
+ // calculate the effective range set by intersecting the range set
+ // for A - B and the negated range set of B - A.
+ getRangeForNegatedSymSym(SSE),
+ // If Sym is a comparison expression (except <=>),
+ // find any other comparisons with the same operands.
+ // See function description.
+ getRangeForComparisonSymbol(SSE),
// If Sym is (dis)equality, we might have some information
// on that in our equality classes data structure.
- getRangeForEqualities(Sym),
+ getRangeForEqualities(SSE),
// And we should always check what we can get from the operands.
- VisitBinaryOperator(Sym));
+ VisitBinaryOperator(SSE));
}
private:
@@ -891,25 +1286,13 @@ private:
}
RangeSet infer(SymbolRef Sym) {
- return intersect(
- RangeFactory,
- // Of course, we should take the constraint directly associated with
- // this symbol into consideration.
- getConstraint(State, Sym),
- // If Sym is a difference of symbols A - B, then maybe we have range
- // set stored for B - A.
- //
- // If we have range set stored for both A - B and B - A then
- // calculate the effective range set by intersecting the range set
- // for A - B and the negated range set of B - A.
- getRangeForNegatedSub(Sym),
- // If Sym is a comparison expression (except <=>),
- // find any other comparisons with the same operands.
- // See function description.
- getRangeForComparisonSymbol(Sym),
- // Apart from the Sym itself, we can infer quite a lot if we look
- // into subexpressions of Sym.
- Visit(Sym));
+ return intersect(RangeFactory,
+ // Of course, we should take the constraint directly
+ // associated with this symbol into consideration.
+ getConstraint(State, Sym),
+ // Apart from the Sym itself, we can infer quite a lot if
+ // we look into subexpressions of Sym.
+ Visit(Sym));
}
RangeSet infer(EquivalenceClass Class) {
@@ -953,18 +1336,7 @@ private:
}
RangeSet VisitBinaryOperator(RangeSet LHS, BinaryOperator::Opcode Op,
- RangeSet RHS, QualType T) {
- switch (Op) {
- case BO_Or:
- return VisitBinaryOperator<BO_Or>(LHS, RHS, T);
- case BO_And:
- return VisitBinaryOperator<BO_And>(LHS, RHS, T);
- case BO_Rem:
- return VisitBinaryOperator<BO_Rem>(LHS, RHS, T);
- default:
- return infer(T);
- }
- }
+ RangeSet RHS, QualType T);
//===----------------------------------------------------------------------===//
// Ranges and operators
@@ -982,11 +1354,11 @@ private:
/// Try to convert given range into the given type.
///
- /// It will return llvm::None only when the trivial conversion is possible.
- llvm::Optional<Range> convert(const Range &Origin, APSIntType To) {
+ /// It will return std::nullopt only when the trivial conversion is possible.
+ std::optional<Range> convert(const Range &Origin, APSIntType To) {
if (To.testInRange(Origin.From(), false) != APSIntType::RTR_Within ||
To.testInRange(Origin.To(), false) != APSIntType::RTR_Within) {
- return llvm::None;
+ return std::nullopt;
}
return Range(ValueFactory.Convert(To, Origin.From()),
ValueFactory.Convert(To, Origin.To()));
@@ -994,11 +1366,7 @@ private:
template <BinaryOperator::Opcode Op>
RangeSet VisitBinaryOperator(RangeSet LHS, RangeSet RHS, QualType T) {
- // We should propagate information about unfeasbility of one of the
- // operands to the resulting range.
- if (LHS.isEmpty() || RHS.isEmpty()) {
- return RangeFactory.getEmptySet();
- }
+ assert(!LHS.isEmpty() && !RHS.isEmpty());
Range CoarseLHS = fillGaps(LHS);
Range CoarseRHS = fillGaps(RHS);
@@ -1070,31 +1438,51 @@ private:
return RangeFactory.deletePoint(Domain, IntType.getZeroValue());
}
- // FIXME: Once SValBuilder supports unary minus, we should use SValBuilder to
- // obtain the negated symbolic expression instead of constructing the
- // symbol manually. This will allow us to support finding ranges of not
- // only negated SymSymExpr-type expressions, but also of other, simpler
- // expressions which we currently do not know how to negate.
- Optional<RangeSet> getRangeForNegatedSub(SymbolRef Sym) {
- if (const SymSymExpr *SSE = dyn_cast<SymSymExpr>(Sym)) {
- if (SSE->getOpcode() == BO_Sub) {
- QualType T = Sym->getType();
+ template <typename ProduceNegatedSymFunc>
+ std::optional<RangeSet> getRangeForNegatedExpr(ProduceNegatedSymFunc F,
+ QualType T) {
+ // Do not negate if the type cannot be meaningfully negated.
+ if (!T->isUnsignedIntegerOrEnumerationType() &&
+ !T->isSignedIntegerOrEnumerationType())
+ return std::nullopt;
+
+ if (SymbolRef NegatedSym = F())
+ if (const RangeSet *NegatedRange = getConstraint(State, NegatedSym))
+ return RangeFactory.negate(*NegatedRange);
- // Do not negate unsigned ranges
- if (!T->isUnsignedIntegerOrEnumerationType() &&
- !T->isSignedIntegerOrEnumerationType())
- return llvm::None;
+ return std::nullopt;
+ }
- SymbolManager &SymMgr = State->getSymbolManager();
- SymbolRef NegatedSym =
- SymMgr.getSymSymExpr(SSE->getRHS(), BO_Sub, SSE->getLHS(), T);
+ std::optional<RangeSet> getRangeForNegatedUnarySym(const UnarySymExpr *USE) {
+ // Just get the operand when we negate a symbol that is already negated.
+ // -(-a) == a
+ return getRangeForNegatedExpr(
+ [USE]() -> SymbolRef {
+ if (USE->getOpcode() == UO_Minus)
+ return USE->getOperand();
+ return nullptr;
+ },
+ USE->getType());
+ }
- if (const RangeSet *NegatedRange = getConstraint(State, NegatedSym)) {
- return RangeFactory.negate(*NegatedRange);
- }
- }
- }
- return llvm::None;
+ std::optional<RangeSet> getRangeForNegatedSymSym(const SymSymExpr *SSE) {
+ return getRangeForNegatedExpr(
+ [SSE, State = this->State]() -> SymbolRef {
+ if (SSE->getOpcode() == BO_Sub)
+ return State->getSymbolManager().getSymSymExpr(
+ SSE->getRHS(), BO_Sub, SSE->getLHS(), SSE->getType());
+ return nullptr;
+ },
+ SSE->getType());
+ }
+
+ std::optional<RangeSet> getRangeForNegatedSym(SymbolRef Sym) {
+ return getRangeForNegatedExpr(
+ [Sym, State = this->State]() {
+ return State->getSymbolManager().getUnarySymExpr(Sym, UO_Minus,
+ Sym->getType());
+ },
+ Sym->getType());
}
// Returns ranges only for binary comparison operators (except <=>)
@@ -1107,16 +1495,12 @@ private:
// It covers all possible combinations (see CmpOpTable description).
// Note that `x` and `y` can also stand for subexpressions,
// not only for actual symbols.
- Optional<RangeSet> getRangeForComparisonSymbol(SymbolRef Sym) {
- const auto *SSE = dyn_cast<SymSymExpr>(Sym);
- if (!SSE)
- return llvm::None;
-
- BinaryOperatorKind CurrentOP = SSE->getOpcode();
+ std::optional<RangeSet> getRangeForComparisonSymbol(const SymSymExpr *SSE) {
+ const BinaryOperatorKind CurrentOP = SSE->getOpcode();
// We currently do not support <=> (C++20).
if (!BinaryOperator::isComparisonOp(CurrentOP) || (CurrentOP == BO_Cmp))
- return llvm::None;
+ return std::nullopt;
static const OperatorRelationsTable CmpOpTable{};
@@ -1126,7 +1510,12 @@ private:
SymbolManager &SymMgr = State->getSymbolManager();
- int UnknownStates = 0;
+ // We use this variable to store the last queried operator (`QueriedOP`)
+ // for which the `getCmpOpState` returned with `Unknown`. If there are two
+ // different OPs that returned `Unknown` then we have to query the special
+ // `UnknownX2` column. We assume that `getCmpOpState(CurrentOP, CurrentOP)`
+ // never returns `Unknown`, so `CurrentOP` is a good initial value.
+ BinaryOperatorKind LastQueriedOpToUnknown = CurrentOP;
// Loop goes through all of the columns exept the last one ('UnknownX2').
// We treat `UnknownX2` column separately at the end of the loop body.
@@ -1163,31 +1552,34 @@ private:
CmpOpTable.getCmpOpState(CurrentOP, QueriedOP);
if (BranchState == OperatorRelationsTable::Unknown) {
- if (++UnknownStates == 2)
- // If we met both Unknown states.
+ if (LastQueriedOpToUnknown != CurrentOP &&
+ LastQueriedOpToUnknown != QueriedOP) {
+ // If we got the Unknown state for both different operators.
// if (x <= y) // assume true
// if (x != y) // assume true
// if (x < y) // would be also true
// Get a state from `UnknownX2` column.
BranchState = CmpOpTable.getCmpOpStateForUnknownX2(CurrentOP);
- else
+ } else {
+ LastQueriedOpToUnknown = QueriedOP;
continue;
+ }
}
return (BranchState == OperatorRelationsTable::True) ? getTrueRange(T)
: getFalseRange(T);
}
- return llvm::None;
+ return std::nullopt;
}
- Optional<RangeSet> getRangeForEqualities(const SymSymExpr *Sym) {
- Optional<bool> Equality = meansEquality(Sym);
+ std::optional<RangeSet> getRangeForEqualities(const SymSymExpr *Sym) {
+ std::optional<bool> Equality = meansEquality(Sym);
if (!Equality)
- return llvm::None;
+ return std::nullopt;
- if (Optional<bool> AreEqual =
+ if (std::optional<bool> AreEqual =
EquivalenceClass::areEqual(State, Sym->getLHS(), Sym->getRHS())) {
// Here we cover two cases at once:
// * if Sym is equality and its operands are known to be equal -> true
@@ -1199,7 +1591,7 @@ private:
return getFalseRange(Sym->getType());
}
- return llvm::None;
+ return std::nullopt;
}
RangeSet getTrueRange(QualType T) {
@@ -1222,6 +1614,57 @@ private:
//===----------------------------------------------------------------------===//
template <>
+RangeSet SymbolicRangeInferrer::VisitBinaryOperator<BO_NE>(RangeSet LHS,
+ RangeSet RHS,
+ QualType T) {
+ assert(!LHS.isEmpty() && !RHS.isEmpty());
+
+ if (LHS.getAPSIntType() == RHS.getAPSIntType()) {
+ if (intersect(RangeFactory, LHS, RHS).isEmpty())
+ return getTrueRange(T);
+
+ } else {
+ // We can only lose information if we are casting smaller signed type to
+ // bigger unsigned type. For e.g.,
+ // LHS (unsigned short): [2, USHRT_MAX]
+ // RHS (signed short): [SHRT_MIN, 0]
+ //
+ // Casting RHS to LHS type will leave us with overlapping values
+ // CastedRHS : [0, 0] U [SHRT_MAX + 1, USHRT_MAX]
+ //
+ // We can avoid this by checking if signed type's maximum value is lesser
+ // than unsigned type's minimum value.
+
+ // If both have different signs then only we can get more information.
+ if (LHS.isUnsigned() != RHS.isUnsigned()) {
+ if (LHS.isUnsigned() && (LHS.getBitWidth() >= RHS.getBitWidth())) {
+ if (RHS.getMaxValue().isNegative() ||
+ LHS.getAPSIntType().convert(RHS.getMaxValue()) < LHS.getMinValue())
+ return getTrueRange(T);
+
+ } else if (RHS.isUnsigned() && (LHS.getBitWidth() <= RHS.getBitWidth())) {
+ if (LHS.getMaxValue().isNegative() ||
+ RHS.getAPSIntType().convert(LHS.getMaxValue()) < RHS.getMinValue())
+ return getTrueRange(T);
+ }
+ }
+
+ // Both RangeSets should be casted to bigger unsigned type.
+ APSIntType CastingType(std::max(LHS.getBitWidth(), RHS.getBitWidth()),
+ LHS.isUnsigned() || RHS.isUnsigned());
+
+ RangeSet CastedLHS = RangeFactory.castTo(LHS, CastingType);
+ RangeSet CastedRHS = RangeFactory.castTo(RHS, CastingType);
+
+ if (intersect(RangeFactory, CastedLHS, CastedRHS).isEmpty())
+ return getTrueRange(T);
+ }
+
+ // In all other cases, the resulting range cannot be deduced.
+ return infer(T);
+}
+
+template <>
RangeSet SymbolicRangeInferrer::VisitBinaryOperator<BO_Or>(Range LHS, Range RHS,
QualType T) {
APSIntType ResultType = ValueFactory.getAPSIntType(T);
@@ -1381,6 +1824,144 @@ RangeSet SymbolicRangeInferrer::VisitBinaryOperator<BO_Rem>(Range LHS,
return {RangeFactory, ValueFactory.getValue(Min), ValueFactory.getValue(Max)};
}
+RangeSet SymbolicRangeInferrer::VisitBinaryOperator(RangeSet LHS,
+ BinaryOperator::Opcode Op,
+ RangeSet RHS, QualType T) {
+ // We should propagate information about unfeasbility of one of the
+ // operands to the resulting range.
+ if (LHS.isEmpty() || RHS.isEmpty()) {
+ return RangeFactory.getEmptySet();
+ }
+
+ switch (Op) {
+ case BO_NE:
+ return VisitBinaryOperator<BO_NE>(LHS, RHS, T);
+ case BO_Or:
+ return VisitBinaryOperator<BO_Or>(LHS, RHS, T);
+ case BO_And:
+ return VisitBinaryOperator<BO_And>(LHS, RHS, T);
+ case BO_Rem:
+ return VisitBinaryOperator<BO_Rem>(LHS, RHS, T);
+ default:
+ return infer(T);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Constraint manager implementation details
+//===----------------------------------------------------------------------===//
+
+class RangeConstraintManager : public RangedConstraintManager {
+public:
+ RangeConstraintManager(ExprEngine *EE, SValBuilder &SVB)
+ : RangedConstraintManager(EE, SVB), F(getBasicVals()) {}
+
+ //===------------------------------------------------------------------===//
+ // Implementation for interface from ConstraintManager.
+ //===------------------------------------------------------------------===//
+
+ bool haveEqualConstraints(ProgramStateRef S1,
+ ProgramStateRef S2) const override {
+ // NOTE: ClassMembers are as simple as back pointers for ClassMap,
+ // so comparing constraint ranges and class maps should be
+ // sufficient.
+ return S1->get<ConstraintRange>() == S2->get<ConstraintRange>() &&
+ S1->get<ClassMap>() == S2->get<ClassMap>();
+ }
+
+ bool canReasonAbout(SVal X) const override;
+
+ ConditionTruthVal checkNull(ProgramStateRef State, SymbolRef Sym) override;
+
+ const llvm::APSInt *getSymVal(ProgramStateRef State,
+ SymbolRef Sym) const override;
+
+ const llvm::APSInt *getSymMinVal(ProgramStateRef State,
+ SymbolRef Sym) const override;
+
+ const llvm::APSInt *getSymMaxVal(ProgramStateRef State,
+ SymbolRef Sym) const override;
+
+ ProgramStateRef removeDeadBindings(ProgramStateRef State,
+ SymbolReaper &SymReaper) override;
+
+ void printJson(raw_ostream &Out, ProgramStateRef State, const char *NL = "\n",
+ unsigned int Space = 0, bool IsDot = false) const override;
+ void printValue(raw_ostream &Out, ProgramStateRef State,
+ SymbolRef Sym) override;
+ void printConstraints(raw_ostream &Out, ProgramStateRef State,
+ const char *NL = "\n", unsigned int Space = 0,
+ bool IsDot = false) const;
+ void printEquivalenceClasses(raw_ostream &Out, ProgramStateRef State,
+ const char *NL = "\n", unsigned int Space = 0,
+ bool IsDot = false) const;
+ void printDisequalities(raw_ostream &Out, ProgramStateRef State,
+ const char *NL = "\n", unsigned int Space = 0,
+ bool IsDot = false) const;
+
+ //===------------------------------------------------------------------===//
+ // Implementation for interface from RangedConstraintManager.
+ //===------------------------------------------------------------------===//
+
+ ProgramStateRef assumeSymNE(ProgramStateRef State, SymbolRef Sym,
+ const llvm::APSInt &V,
+ const llvm::APSInt &Adjustment) override;
+
+ ProgramStateRef assumeSymEQ(ProgramStateRef State, SymbolRef Sym,
+ const llvm::APSInt &V,
+ const llvm::APSInt &Adjustment) override;
+
+ ProgramStateRef assumeSymLT(ProgramStateRef State, SymbolRef Sym,
+ const llvm::APSInt &V,
+ const llvm::APSInt &Adjustment) override;
+
+ ProgramStateRef assumeSymGT(ProgramStateRef State, SymbolRef Sym,
+ const llvm::APSInt &V,
+ const llvm::APSInt &Adjustment) override;
+
+ ProgramStateRef assumeSymLE(ProgramStateRef State, SymbolRef Sym,
+ const llvm::APSInt &V,
+ const llvm::APSInt &Adjustment) override;
+
+ ProgramStateRef assumeSymGE(ProgramStateRef State, SymbolRef Sym,
+ const llvm::APSInt &V,
+ const llvm::APSInt &Adjustment) override;
+
+ ProgramStateRef assumeSymWithinInclusiveRange(
+ ProgramStateRef State, SymbolRef Sym, const llvm::APSInt &From,
+ const llvm::APSInt &To, const llvm::APSInt &Adjustment) override;
+
+ ProgramStateRef assumeSymOutsideInclusiveRange(
+ ProgramStateRef State, SymbolRef Sym, const llvm::APSInt &From,
+ const llvm::APSInt &To, const llvm::APSInt &Adjustment) override;
+
+private:
+ RangeSet::Factory F;
+
+ RangeSet getRange(ProgramStateRef State, SymbolRef Sym);
+ RangeSet getRange(ProgramStateRef State, EquivalenceClass Class);
+ ProgramStateRef setRange(ProgramStateRef State, SymbolRef Sym,
+ RangeSet Range);
+ ProgramStateRef setRange(ProgramStateRef State, EquivalenceClass Class,
+ RangeSet Range);
+
+ RangeSet getSymLTRange(ProgramStateRef St, SymbolRef Sym,
+ const llvm::APSInt &Int,
+ const llvm::APSInt &Adjustment);
+ RangeSet getSymGTRange(ProgramStateRef St, SymbolRef Sym,
+ const llvm::APSInt &Int,
+ const llvm::APSInt &Adjustment);
+ RangeSet getSymLERange(ProgramStateRef St, SymbolRef Sym,
+ const llvm::APSInt &Int,
+ const llvm::APSInt &Adjustment);
+ RangeSet getSymLERange(llvm::function_ref<RangeSet()> RS,
+ const llvm::APSInt &Int,
+ const llvm::APSInt &Adjustment);
+ RangeSet getSymGERange(ProgramStateRef St, SymbolRef Sym,
+ const llvm::APSInt &Int,
+ const llvm::APSInt &Adjustment);
+};
+
//===----------------------------------------------------------------------===//
// Constraint assignment logic
//===----------------------------------------------------------------------===//
@@ -1482,7 +2063,7 @@ public:
class ConstraintAssignor : public ConstraintAssignorBase<ConstraintAssignor> {
public:
template <class ClassOrSymbol>
- LLVM_NODISCARD static ProgramStateRef
+ [[nodiscard]] static ProgramStateRef
assign(ProgramStateRef State, SValBuilder &Builder, RangeSet::Factory &F,
ClassOrSymbol CoS, RangeSet NewConstraint) {
if (!State || NewConstraint.isEmpty())
@@ -1492,7 +2073,28 @@ public:
return Assignor.assign(CoS, NewConstraint);
}
+ /// Handle expressions like: a % b != 0.
+ template <typename SymT>
+ bool handleRemainderOp(const SymT *Sym, RangeSet Constraint) {
+ if (Sym->getOpcode() != BO_Rem)
+ return true;
+ // a % b != 0 implies that a != 0.
+ if (!Constraint.containsZero()) {
+ SVal SymSVal = Builder.makeSymbolVal(Sym->getLHS());
+ if (auto NonLocSymSVal = SymSVal.getAs<nonloc::SymbolVal>()) {
+ State = State->assume(*NonLocSymSVal, true);
+ if (!State)
+ return false;
+ }
+ }
+ return true;
+ }
+
inline bool assignSymExprToConst(const SymExpr *Sym, Const Constraint);
+ inline bool assignSymIntExprToRangeSet(const SymIntExpr *Sym,
+ RangeSet Constraint) {
+ return handleRemainderOp(Sym, Constraint);
+ }
inline bool assignSymSymExprToRangeSet(const SymSymExpr *Sym,
RangeSet Constraint);
@@ -1503,7 +2105,7 @@ private:
using Base = ConstraintAssignorBase<ConstraintAssignor>;
/// Base method for handling new constraints for symbols.
- LLVM_NODISCARD ProgramStateRef assign(SymbolRef Sym, RangeSet NewConstraint) {
+ [[nodiscard]] ProgramStateRef assign(SymbolRef Sym, RangeSet NewConstraint) {
// All constraints are actually associated with equivalence classes, and
// that's what we are going to do first.
State = assign(EquivalenceClass::find(State, Sym), NewConstraint);
@@ -1517,8 +2119,8 @@ private:
}
/// Base method for handling new constraints for classes.
- LLVM_NODISCARD ProgramStateRef assign(EquivalenceClass Class,
- RangeSet NewConstraint) {
+ [[nodiscard]] ProgramStateRef assign(EquivalenceClass Class,
+ RangeSet NewConstraint) {
// There is a chance that we might need to update constraints for the
// classes that are known to be disequal to Class.
//
@@ -1564,18 +2166,16 @@ private:
return EquivalenceClass::merge(RangeFactory, State, LHS, RHS);
}
- LLVM_NODISCARD Optional<bool> interpreteAsBool(RangeSet Constraint) {
+ [[nodiscard]] std::optional<bool> interpreteAsBool(RangeSet Constraint) {
assert(!Constraint.isEmpty() && "Empty ranges shouldn't get here");
if (Constraint.getConcreteValue())
- return !Constraint.getConcreteValue()->isNullValue();
+ return !Constraint.getConcreteValue()->isZero();
- APSIntType T{Constraint.getMinValue()};
- Const Zero = T.getZeroValue();
- if (!Constraint.contains(Zero))
+ if (!Constraint.containsZero())
return true;
- return llvm::None;
+ return std::nullopt;
}
ProgramStateRef State;
@@ -1583,113 +2183,6 @@ private:
RangeSet::Factory &RangeFactory;
};
-//===----------------------------------------------------------------------===//
-// Constraint manager implementation details
-//===----------------------------------------------------------------------===//
-
-class RangeConstraintManager : public RangedConstraintManager {
-public:
- RangeConstraintManager(ExprEngine *EE, SValBuilder &SVB)
- : RangedConstraintManager(EE, SVB), F(getBasicVals()) {}
-
- //===------------------------------------------------------------------===//
- // Implementation for interface from ConstraintManager.
- //===------------------------------------------------------------------===//
-
- bool haveEqualConstraints(ProgramStateRef S1,
- ProgramStateRef S2) const override {
- // NOTE: ClassMembers are as simple as back pointers for ClassMap,
- // so comparing constraint ranges and class maps should be
- // sufficient.
- return S1->get<ConstraintRange>() == S2->get<ConstraintRange>() &&
- S1->get<ClassMap>() == S2->get<ClassMap>();
- }
-
- bool canReasonAbout(SVal X) const override;
-
- ConditionTruthVal checkNull(ProgramStateRef State, SymbolRef Sym) override;
-
- const llvm::APSInt *getSymVal(ProgramStateRef State,
- SymbolRef Sym) const override;
-
- ProgramStateRef removeDeadBindings(ProgramStateRef State,
- SymbolReaper &SymReaper) override;
-
- void printJson(raw_ostream &Out, ProgramStateRef State, const char *NL = "\n",
- unsigned int Space = 0, bool IsDot = false) const override;
- void printConstraints(raw_ostream &Out, ProgramStateRef State,
- const char *NL = "\n", unsigned int Space = 0,
- bool IsDot = false) const;
- void printEquivalenceClasses(raw_ostream &Out, ProgramStateRef State,
- const char *NL = "\n", unsigned int Space = 0,
- bool IsDot = false) const;
- void printDisequalities(raw_ostream &Out, ProgramStateRef State,
- const char *NL = "\n", unsigned int Space = 0,
- bool IsDot = false) const;
-
- //===------------------------------------------------------------------===//
- // Implementation for interface from RangedConstraintManager.
- //===------------------------------------------------------------------===//
-
- ProgramStateRef assumeSymNE(ProgramStateRef State, SymbolRef Sym,
- const llvm::APSInt &V,
- const llvm::APSInt &Adjustment) override;
-
- ProgramStateRef assumeSymEQ(ProgramStateRef State, SymbolRef Sym,
- const llvm::APSInt &V,
- const llvm::APSInt &Adjustment) override;
-
- ProgramStateRef assumeSymLT(ProgramStateRef State, SymbolRef Sym,
- const llvm::APSInt &V,
- const llvm::APSInt &Adjustment) override;
-
- ProgramStateRef assumeSymGT(ProgramStateRef State, SymbolRef Sym,
- const llvm::APSInt &V,
- const llvm::APSInt &Adjustment) override;
-
- ProgramStateRef assumeSymLE(ProgramStateRef State, SymbolRef Sym,
- const llvm::APSInt &V,
- const llvm::APSInt &Adjustment) override;
-
- ProgramStateRef assumeSymGE(ProgramStateRef State, SymbolRef Sym,
- const llvm::APSInt &V,
- const llvm::APSInt &Adjustment) override;
-
- ProgramStateRef assumeSymWithinInclusiveRange(
- ProgramStateRef State, SymbolRef Sym, const llvm::APSInt &From,
- const llvm::APSInt &To, const llvm::APSInt &Adjustment) override;
-
- ProgramStateRef assumeSymOutsideInclusiveRange(
- ProgramStateRef State, SymbolRef Sym, const llvm::APSInt &From,
- const llvm::APSInt &To, const llvm::APSInt &Adjustment) override;
-
-private:
- RangeSet::Factory F;
-
- RangeSet getRange(ProgramStateRef State, SymbolRef Sym);
- RangeSet getRange(ProgramStateRef State, EquivalenceClass Class);
- ProgramStateRef setRange(ProgramStateRef State, SymbolRef Sym,
- RangeSet Range);
- ProgramStateRef setRange(ProgramStateRef State, EquivalenceClass Class,
- RangeSet Range);
-
- RangeSet getSymLTRange(ProgramStateRef St, SymbolRef Sym,
- const llvm::APSInt &Int,
- const llvm::APSInt &Adjustment);
- RangeSet getSymGTRange(ProgramStateRef St, SymbolRef Sym,
- const llvm::APSInt &Int,
- const llvm::APSInt &Adjustment);
- RangeSet getSymLERange(ProgramStateRef St, SymbolRef Sym,
- const llvm::APSInt &Int,
- const llvm::APSInt &Adjustment);
- RangeSet getSymLERange(llvm::function_ref<RangeSet()> RS,
- const llvm::APSInt &Int,
- const llvm::APSInt &Adjustment);
- RangeSet getSymGERange(ProgramStateRef St, SymbolRef Sym,
- const llvm::APSInt &Int,
- const llvm::APSInt &Adjustment);
-};
-
bool ConstraintAssignor::assignSymExprToConst(const SymExpr *Sym,
const llvm::APSInt &Constraint) {
llvm::SmallSet<EquivalenceClass, 4> SimplifiedClasses;
@@ -1716,17 +2209,32 @@ bool ConstraintAssignor::assignSymExprToConst(const SymExpr *Sym,
return false;
}
+ // We may have trivial equivalence classes in the disequality info as
+ // well, and we need to simplify them.
+ DisequalityMapTy DisequalityInfo = State->get<DisequalityMap>();
+ for (std::pair<EquivalenceClass, ClassSet> DisequalityEntry :
+ DisequalityInfo) {
+ EquivalenceClass Class = DisequalityEntry.first;
+ ClassSet DisequalClasses = DisequalityEntry.second;
+ State = EquivalenceClass::simplify(Builder, RangeFactory, State, Class);
+ if (!State)
+ return false;
+ }
+
return true;
}
bool ConstraintAssignor::assignSymSymExprToRangeSet(const SymSymExpr *Sym,
RangeSet Constraint) {
- Optional<bool> ConstraintAsBool = interpreteAsBool(Constraint);
+ if (!handleRemainderOp(Sym, Constraint))
+ return false;
+
+ std::optional<bool> ConstraintAsBool = interpreteAsBool(Constraint);
if (!ConstraintAsBool)
return true;
- if (Optional<bool> Equality = meansEquality(Sym)) {
+ if (std::optional<bool> Equality = meansEquality(Sym)) {
// Here we cover two cases:
// * if Sym is equality and the new constraint is true -> Sym's operands
// should be marked as equal
@@ -1864,7 +2372,7 @@ EquivalenceClass::mergeImpl(RangeSet::Factory &RangeFactory,
//
// Intersection here makes perfect sense because both of these constraints
// must hold for the whole new class.
- if (Optional<RangeSet> NewClassConstraint =
+ if (std::optional<RangeSet> NewClassConstraint =
intersect(RangeFactory, getConstraint(State, *this),
getConstraint(State, Other))) {
// NOTE: Essentially, NewClassConstraint should NEVER be infeasible because
@@ -2062,16 +2570,16 @@ inline bool EquivalenceClass::addToDisequalityInfo(
return true;
}
-inline Optional<bool> EquivalenceClass::areEqual(ProgramStateRef State,
- SymbolRef FirstSym,
- SymbolRef SecondSym) {
+inline std::optional<bool> EquivalenceClass::areEqual(ProgramStateRef State,
+ SymbolRef FirstSym,
+ SymbolRef SecondSym) {
return EquivalenceClass::areEqual(State, find(State, FirstSym),
find(State, SecondSym));
}
-inline Optional<bool> EquivalenceClass::areEqual(ProgramStateRef State,
- EquivalenceClass First,
- EquivalenceClass Second) {
+inline std::optional<bool> EquivalenceClass::areEqual(ProgramStateRef State,
+ EquivalenceClass First,
+ EquivalenceClass Second) {
// The same equivalence class => symbols are equal.
if (First == Second)
return true;
@@ -2083,7 +2591,61 @@ inline Optional<bool> EquivalenceClass::areEqual(ProgramStateRef State,
return false;
// It is not clear.
- return llvm::None;
+ return std::nullopt;
+}
+
+[[nodiscard]] ProgramStateRef
+EquivalenceClass::removeMember(ProgramStateRef State, const SymbolRef Old) {
+
+ SymbolSet ClsMembers = getClassMembers(State);
+ assert(ClsMembers.contains(Old));
+
+ // Remove `Old`'s Class->Sym relation.
+ SymbolSet::Factory &F = getMembersFactory(State);
+ ClassMembersTy::Factory &EMFactory = State->get_context<ClassMembers>();
+ ClsMembers = F.remove(ClsMembers, Old);
+ // Ensure another precondition of the removeMember function (we can check
+ // this only with isEmpty, thus we have to do the remove first).
+ assert(!ClsMembers.isEmpty() &&
+ "Class should have had at least two members before member removal");
+ // Overwrite the existing members assigned to this class.
+ ClassMembersTy ClassMembersMap = State->get<ClassMembers>();
+ ClassMembersMap = EMFactory.add(ClassMembersMap, *this, ClsMembers);
+ State = State->set<ClassMembers>(ClassMembersMap);
+
+ // Remove `Old`'s Sym->Class relation.
+ ClassMapTy Classes = State->get<ClassMap>();
+ ClassMapTy::Factory &CMF = State->get_context<ClassMap>();
+ Classes = CMF.remove(Classes, Old);
+ State = State->set<ClassMap>(Classes);
+
+ return State;
+}
+
+// Re-evaluate an SVal with top-level `State->assume` logic.
+[[nodiscard]] ProgramStateRef
+reAssume(ProgramStateRef State, const RangeSet *Constraint, SVal TheValue) {
+ if (!Constraint)
+ return State;
+
+ const auto DefinedVal = TheValue.castAs<DefinedSVal>();
+
+ // If the SVal is 0, we can simply interpret that as `false`.
+ if (Constraint->encodesFalseRange())
+ return State->assume(DefinedVal, false);
+
+ // If the constraint does not encode 0 then we can interpret that as `true`
+ // AND as a Range(Set).
+ if (Constraint->encodesTrueRange()) {
+ State = State->assume(DefinedVal, true);
+ if (!State)
+ return nullptr;
+ // Fall through, re-assume based on the range values as well.
+ }
+ // Overestimate the individual Ranges with the RangeSet' lowest and
+ // highest values.
+ return State->assumeInclusiveRange(DefinedVal, Constraint->getMinValue(),
+ Constraint->getMaxValue(), true);
}
// Iterate over all symbols and try to simplify them. Once a symbol is
@@ -2091,19 +2653,78 @@ inline Optional<bool> EquivalenceClass::areEqual(ProgramStateRef State,
// class to this class. This way, we simplify not just the symbols but the
// classes as well: we strive to keep the number of the classes to be the
// absolute minimum.
-LLVM_NODISCARD ProgramStateRef
+[[nodiscard]] ProgramStateRef
EquivalenceClass::simplify(SValBuilder &SVB, RangeSet::Factory &F,
ProgramStateRef State, EquivalenceClass Class) {
SymbolSet ClassMembers = Class.getClassMembers(State);
for (const SymbolRef &MemberSym : ClassMembers) {
- SymbolRef SimplifiedMemberSym = ento::simplify(State, MemberSym);
+
+ const SVal SimplifiedMemberVal = simplifyToSVal(State, MemberSym);
+ const SymbolRef SimplifiedMemberSym = SimplifiedMemberVal.getAsSymbol();
+
+ // The symbol is collapsed to a constant, check if the current State is
+ // still feasible.
+ if (const auto CI = SimplifiedMemberVal.getAs<nonloc::ConcreteInt>()) {
+ const llvm::APSInt &SV = CI->getValue();
+ const RangeSet *ClassConstraint = getConstraint(State, Class);
+ // We have found a contradiction.
+ if (ClassConstraint && !ClassConstraint->contains(SV))
+ return nullptr;
+ }
+
if (SimplifiedMemberSym && MemberSym != SimplifiedMemberSym) {
// The simplified symbol should be the member of the original Class,
// however, it might be in another existing class at the moment. We
// have to merge these classes.
+ ProgramStateRef OldState = State;
State = merge(F, State, MemberSym, SimplifiedMemberSym);
if (!State)
return nullptr;
+ // No state change, no merge happened actually.
+ if (OldState == State)
+ continue;
+
+ // Be aware that `SimplifiedMemberSym` might refer to an already dead
+ // symbol. In that case, the eqclass of that might not be the same as the
+ // eqclass of `MemberSym`. This is because the dead symbols are not
+ // preserved in the `ClassMap`, hence
+ // `find(State, SimplifiedMemberSym)` will result in a trivial eqclass
+ // compared to the eqclass of `MemberSym`.
+ // These eqclasses should be the same if `SimplifiedMemberSym` is alive.
+ // --> assert(find(State, MemberSym) == find(State, SimplifiedMemberSym))
+ //
+ // Note that `MemberSym` must be alive here since that is from the
+ // `ClassMembers` where all the symbols are alive.
+
+ // Remove the old and more complex symbol.
+ State = find(State, MemberSym).removeMember(State, MemberSym);
+
+ // Query the class constraint again b/c that may have changed during the
+ // merge above.
+ const RangeSet *ClassConstraint = getConstraint(State, Class);
+
+ // Re-evaluate an SVal with top-level `State->assume`, this ignites
+ // a RECURSIVE algorithm that will reach a FIXPOINT.
+ //
+ // About performance and complexity: Let us assume that in a State we
+ // have N non-trivial equivalence classes and that all constraints and
+ // disequality info is related to non-trivial classes. In the worst case,
+ // we can simplify only one symbol of one class in each iteration. The
+ // number of symbols in one class cannot grow b/c we replace the old
+ // symbol with the simplified one. Also, the number of the equivalence
+ // classes can decrease only, b/c the algorithm does a merge operation
+ // optionally. We need N iterations in this case to reach the fixpoint.
+ // Thus, the steps needed to be done in the worst case is proportional to
+ // N*N.
+ //
+ // This worst case scenario can be extended to that case when we have
+ // trivial classes in the constraints and in the disequality map. This
+ // case can be reduced to the case with a State where there are only
+ // non-trivial classes. This is because a merge operation on two trivial
+ // classes results in one non-trivial class.
+ State = reAssume(State, ClassConstraint, SimplifiedMemberVal);
+ if (!State)
+ return nullptr;
}
}
return State;
@@ -2173,7 +2794,7 @@ bool EquivalenceClass::isClassDataConsistent(ProgramStateRef State) {
//===----------------------------------------------------------------------===//
bool RangeConstraintManager::canReasonAbout(SVal X) const {
- Optional<nonloc::SymbolVal> SymVal = X.getAs<nonloc::SymbolVal>();
+ std::optional<nonloc::SymbolVal> SymVal = X.getAs<nonloc::SymbolVal>();
if (SymVal && SymVal->isExpression()) {
const SymExpr *SE = SymVal->getSymbol();
@@ -2248,6 +2869,22 @@ const llvm::APSInt *RangeConstraintManager::getSymVal(ProgramStateRef St,
return T ? T->getConcreteValue() : nullptr;
}
+const llvm::APSInt *RangeConstraintManager::getSymMinVal(ProgramStateRef St,
+ SymbolRef Sym) const {
+ const RangeSet *T = getConstraint(St, Sym);
+ if (!T || T->isEmpty())
+ return nullptr;
+ return &T->getMinValue();
+}
+
+const llvm::APSInt *RangeConstraintManager::getSymMaxVal(ProgramStateRef St,
+ SymbolRef Sym) const {
+ const RangeSet *T = getConstraint(St, Sym);
+ if (!T || T->isEmpty())
+ return nullptr;
+ return &T->getMaxValue();
+}
+
//===----------------------------------------------------------------------===//
// Remove dead symbols from existing constraints
//===----------------------------------------------------------------------===//
@@ -2630,6 +3267,13 @@ void RangeConstraintManager::printJson(raw_ostream &Out, ProgramStateRef State,
printDisequalities(Out, State, NL, Space, IsDot);
}
+void RangeConstraintManager::printValue(raw_ostream &Out, ProgramStateRef State,
+ SymbolRef Sym) {
+ const RangeSet RS = getRange(State, Sym);
+ Out << RS.getBitWidth() << (RS.isUnsigned() ? "u:" : "s:");
+ RS.dump(Out);
+}
+
static std::string toString(const SymbolRef &Sym) {
std::string S;
llvm::raw_string_ostream O(S);
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RangedConstraintManager.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RangedConstraintManager.cpp
index d227c025fb20..4bbe933be212 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RangedConstraintManager.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RangedConstraintManager.cpp
@@ -41,49 +41,55 @@ ProgramStateRef RangedConstraintManager::assumeSym(ProgramStateRef State,
return assumeSymRel(State, SIE->getLHS(), op, SIE->getRHS());
}
- } else if (const SymSymExpr *SSE = dyn_cast<SymSymExpr>(Sym)) {
+ // Handle adjustment with non-comparison ops.
+ const llvm::APSInt &Zero = getBasicVals().getValue(0, SIE->getType());
+ return assumeSymRel(State, SIE, (Assumption ? BO_NE : BO_EQ), Zero);
+ }
+
+ if (const auto *SSE = dyn_cast<SymSymExpr>(Sym)) {
BinaryOperator::Opcode Op = SSE->getOpcode();
- assert(BinaryOperator::isComparisonOp(Op));
-
- // We convert equality operations for pointers only.
- if (Loc::isLocType(SSE->getLHS()->getType()) &&
- Loc::isLocType(SSE->getRHS()->getType())) {
- // Translate "a != b" to "(b - a) != 0".
- // We invert the order of the operands as a heuristic for how loop
- // conditions are usually written ("begin != end") as compared to length
- // calculations ("end - begin"). The more correct thing to do would be to
- // canonicalize "a - b" and "b - a", which would allow us to treat
- // "a != b" and "b != a" the same.
-
- SymbolManager &SymMgr = getSymbolManager();
- QualType DiffTy = SymMgr.getContext().getPointerDiffType();
- SymbolRef Subtraction =
- SymMgr.getSymSymExpr(SSE->getRHS(), BO_Sub, SSE->getLHS(), DiffTy);
-
- const llvm::APSInt &Zero = getBasicVals().getValue(0, DiffTy);
- Op = BinaryOperator::reverseComparisonOp(Op);
- if (!Assumption)
- Op = BinaryOperator::negateComparisonOp(Op);
- return assumeSymRel(State, Subtraction, Op, Zero);
- }
+ if (BinaryOperator::isComparisonOp(Op)) {
+
+ // We convert equality operations for pointers only.
+ if (Loc::isLocType(SSE->getLHS()->getType()) &&
+ Loc::isLocType(SSE->getRHS()->getType())) {
+ // Translate "a != b" to "(b - a) != 0".
+ // We invert the order of the operands as a heuristic for how loop
+ // conditions are usually written ("begin != end") as compared to length
+ // calculations ("end - begin"). The more correct thing to do would be
+ // to canonicalize "a - b" and "b - a", which would allow us to treat
+ // "a != b" and "b != a" the same.
+
+ SymbolManager &SymMgr = getSymbolManager();
+ QualType DiffTy = SymMgr.getContext().getPointerDiffType();
+ SymbolRef Subtraction =
+ SymMgr.getSymSymExpr(SSE->getRHS(), BO_Sub, SSE->getLHS(), DiffTy);
+
+ const llvm::APSInt &Zero = getBasicVals().getValue(0, DiffTy);
+ Op = BinaryOperator::reverseComparisonOp(Op);
+ if (!Assumption)
+ Op = BinaryOperator::negateComparisonOp(Op);
+ return assumeSymRel(State, Subtraction, Op, Zero);
+ }
- if (BinaryOperator::isEqualityOp(Op)) {
- SymbolManager &SymMgr = getSymbolManager();
+ if (BinaryOperator::isEqualityOp(Op)) {
+ SymbolManager &SymMgr = getSymbolManager();
- QualType ExprType = SSE->getType();
- SymbolRef CanonicalEquality =
- SymMgr.getSymSymExpr(SSE->getLHS(), BO_EQ, SSE->getRHS(), ExprType);
+ QualType ExprType = SSE->getType();
+ SymbolRef CanonicalEquality =
+ SymMgr.getSymSymExpr(SSE->getLHS(), BO_EQ, SSE->getRHS(), ExprType);
- bool WasEqual = SSE->getOpcode() == BO_EQ;
- bool IsExpectedEqual = WasEqual == Assumption;
+ bool WasEqual = SSE->getOpcode() == BO_EQ;
+ bool IsExpectedEqual = WasEqual == Assumption;
- const llvm::APSInt &Zero = getBasicVals().getValue(0, ExprType);
+ const llvm::APSInt &Zero = getBasicVals().getValue(0, ExprType);
- if (IsExpectedEqual) {
- return assumeSymNE(State, CanonicalEquality, Zero, Zero);
- }
+ if (IsExpectedEqual) {
+ return assumeSymNE(State, CanonicalEquality, Zero, Zero);
+ }
- return assumeSymEQ(State, CanonicalEquality, Zero, Zero);
+ return assumeSymEQ(State, CanonicalEquality, Zero, Zero);
+ }
}
}
@@ -226,9 +232,13 @@ void RangedConstraintManager::computeAdjustment(SymbolRef &Sym,
}
}
-SymbolRef simplify(ProgramStateRef State, SymbolRef Sym) {
+SVal simplifyToSVal(ProgramStateRef State, SymbolRef Sym) {
SValBuilder &SVB = State->getStateManager().getSValBuilder();
- SVal SimplifiedVal = SVB.simplifySVal(State, SVB.makeSymbolVal(Sym));
+ return SVB.simplifySVal(State, SVB.makeSymbolVal(Sym));
+}
+
+SymbolRef simplify(ProgramStateRef State, SymbolRef Sym) {
+ SVal SimplifiedVal = simplifyToSVal(State, Sym);
if (SymbolRef SimplifiedSym = SimplifiedVal.getAsSymbol())
return SimplifiedSym;
return Sym;
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RegionStore.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
index 4ffa1aacb41f..da9a1a1a4d1f 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
@@ -28,8 +28,9 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include "llvm/ADT/ImmutableMap.h"
-#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/raw_ostream.h"
+#include <optional>
#include <utility>
using namespace clang;
@@ -62,8 +63,8 @@ private:
: P(r, k), Data(offset) {
assert(r && "Must have known regions.");
assert(getOffset() == offset && "Failed to store offset");
- assert((r == r->getBaseRegion() || isa<ObjCIvarRegion>(r) ||
- isa <CXXDerivedObjectRegion>(r)) &&
+ assert((r == r->getBaseRegion() ||
+ isa<ObjCIvarRegion, CXXDerivedObjectRegion>(r)) &&
"Not a base");
}
public:
@@ -212,11 +213,11 @@ public:
removeBinding(R, BindingKey::Default);
}
- Optional<SVal> getDirectBinding(const MemRegion *R) const;
+ std::optional<SVal> getDirectBinding(const MemRegion *R) const;
/// getDefaultBinding - Returns an SVal* representing an optional default
/// binding associated with a region and its subregions.
- Optional<SVal> getDefaultBinding(const MemRegion *R) const;
+ std::optional<SVal> getDefaultBinding(const MemRegion *R) const;
/// Return the internal tree as a Store.
Store asStore() const {
@@ -231,7 +232,7 @@ public:
void printJson(raw_ostream &Out, const char *NL = "\n",
unsigned int Space = 0, bool IsDot = false) const {
- for (iterator I = begin(); I != end(); ++I) {
+ for (iterator I = begin(), E = end(); I != E; ++I) {
// TODO: We might need a .printJson for I.getKey() as well.
Indent(Out, Space, IsDot)
<< "{ \"cluster\": \"" << I.getKey() << "\", \"pointer\": \""
@@ -239,18 +240,19 @@ public:
++Space;
const ClusterBindings &CB = I.getData();
- for (ClusterBindings::iterator CI = CB.begin(); CI != CB.end(); ++CI) {
+ for (ClusterBindings::iterator CI = CB.begin(), CE = CB.end(); CI != CE;
+ ++CI) {
Indent(Out, Space, IsDot) << "{ " << CI.getKey() << ", \"value\": ";
CI.getData().printJson(Out, /*AddQuotes=*/true);
Out << " }";
- if (std::next(CI) != CB.end())
+ if (std::next(CI) != CE)
Out << ',';
Out << NL;
}
--Space;
Indent(Out, Space, IsDot) << "]}";
- if (std::next(I) != end())
+ if (std::next(I) != E)
Out << ',';
Out << NL;
}
@@ -262,12 +264,16 @@ public:
typedef const RegionBindingsRef& RegionBindingsConstRef;
-Optional<SVal> RegionBindingsRef::getDirectBinding(const MemRegion *R) const {
- return Optional<SVal>::create(lookup(R, BindingKey::Direct));
+std::optional<SVal>
+RegionBindingsRef::getDirectBinding(const MemRegion *R) const {
+ const SVal *V = lookup(R, BindingKey::Direct);
+ return V ? std::optional<SVal>(*V) : std::nullopt;
}
-Optional<SVal> RegionBindingsRef::getDefaultBinding(const MemRegion *R) const {
- return Optional<SVal>::create(lookup(R, BindingKey::Default));
+std::optional<SVal>
+RegionBindingsRef::getDefaultBinding(const MemRegion *R) const {
+ const SVal *V = lookup(R, BindingKey::Default);
+ return V ? std::optional<SVal>(*V) : std::nullopt;
}
RegionBindingsRef RegionBindingsRef::addBinding(BindingKey K, SVal V) const {
@@ -318,29 +324,6 @@ RegionBindingsRef RegionBindingsRef::removeBinding(const MemRegion *R,
}
//===----------------------------------------------------------------------===//
-// Fine-grained control of RegionStoreManager.
-//===----------------------------------------------------------------------===//
-
-namespace {
-struct minimal_features_tag {};
-struct maximal_features_tag {};
-
-class RegionStoreFeatures {
- bool SupportsFields;
-public:
- RegionStoreFeatures(minimal_features_tag) :
- SupportsFields(false) {}
-
- RegionStoreFeatures(maximal_features_tag) :
- SupportsFields(true) {}
-
- void enableFields(bool t) { SupportsFields = t; }
-
- bool supportsFields() const { return SupportsFields; }
-};
-}
-
-//===----------------------------------------------------------------------===//
// Main RegionStore logic.
//===----------------------------------------------------------------------===//
@@ -349,8 +332,6 @@ class InvalidateRegionsWorker;
class RegionStoreManager : public StoreManager {
public:
- const RegionStoreFeatures Features;
-
RegionBindings::Factory RBFactory;
mutable ClusterBindings::Factory CBFactory;
@@ -370,6 +351,16 @@ private:
/// To disable all small-struct-dependent behavior, set the option to "0".
unsigned SmallStructLimit;
+ /// The largest number of element an array can have and still be
+ /// considered "small".
+ ///
+ /// This is currently used to decide whether or not it is worth "forcing" a
+ /// LazyCompoundVal on bind.
+ ///
+ /// This is controlled by 'region-store-small-struct-limit' option.
+ /// To disable all small-struct-dependent behavior, set the option to "0".
+ unsigned SmallArrayLimit;
+
/// A helper used to populate the work list with the given set of
/// regions.
void populateWorkList(InvalidateRegionsWorker &W,
@@ -377,16 +368,15 @@ private:
InvalidatedRegions *TopLevelRegions);
public:
- RegionStoreManager(ProgramStateManager& mgr, const RegionStoreFeatures &f)
- : StoreManager(mgr), Features(f),
- RBFactory(mgr.getAllocator()), CBFactory(mgr.getAllocator()),
- SmallStructLimit(0) {
+ RegionStoreManager(ProgramStateManager &mgr)
+ : StoreManager(mgr), RBFactory(mgr.getAllocator()),
+ CBFactory(mgr.getAllocator()), SmallStructLimit(0), SmallArrayLimit(0) {
ExprEngine &Eng = StateMgr.getOwningEngine();
AnalyzerOptions &Options = Eng.getAnalysisManager().options;
SmallStructLimit = Options.RegionStoreSmallStructLimit;
+ SmallArrayLimit = Options.RegionStoreSmallArrayLimit;
}
-
/// setImplicitDefaultValue - Set the default binding for the provided
/// MemRegion to the value implicitly defined for compound literals when
/// the value is not specified.
@@ -437,6 +427,15 @@ public:
RegionBindingsRef removeSubRegionBindings(RegionBindingsConstRef B,
const SubRegion *R);
+ std::optional<SVal>
+ getConstantValFromConstArrayInitializer(RegionBindingsConstRef B,
+ const ElementRegion *R);
+ std::optional<SVal>
+ getSValFromInitListExpr(const InitListExpr *ILE,
+ const SmallVector<uint64_t, 2> &ConcreteOffsets,
+ QualType ElemT);
+ SVal getSValFromStringLiteral(const StringLiteral *SL, uint64_t Offset,
+ QualType ElemT);
public: // Part of public interface to class.
@@ -490,12 +489,11 @@ public: // Part of public interface to class.
/// than using a Default binding at the base of the entire region. This is a
/// heuristic attempting to avoid building long chains of LazyCompoundVals.
///
- /// \returns The updated store bindings, or \c None if binding non-lazily
- /// would be too expensive.
- Optional<RegionBindingsRef> tryBindSmallStruct(RegionBindingsConstRef B,
- const TypedValueRegion *R,
- const RecordDecl *RD,
- nonloc::LazyCompoundVal LCV);
+ /// \returns The updated store bindings, or \c std::nullopt if binding
+ /// non-lazily would be too expensive.
+ std::optional<RegionBindingsRef>
+ tryBindSmallStruct(RegionBindingsConstRef B, const TypedValueRegion *R,
+ const RecordDecl *RD, nonloc::LazyCompoundVal LCV);
/// BindStruct - Bind a compound value to a structure.
RegionBindingsRef bindStruct(RegionBindingsConstRef B,
@@ -505,6 +503,10 @@ public: // Part of public interface to class.
RegionBindingsRef bindVector(RegionBindingsConstRef B,
const TypedValueRegion* R, SVal V);
+ std::optional<RegionBindingsRef>
+ tryBindSmallArray(RegionBindingsConstRef B, const TypedValueRegion *R,
+ const ArrayType *AT, nonloc::LazyCompoundVal LCV);
+
RegionBindingsRef bindArray(RegionBindingsConstRef B,
const TypedValueRegion* R,
SVal V);
@@ -550,7 +552,7 @@ public: // Part of public interface to class.
return getBinding(getRegionBindings(S), L, T);
}
- Optional<SVal> getDefaultBinding(Store S, const MemRegion *R) override {
+ std::optional<SVal> getDefaultBinding(Store S, const MemRegion *R) override {
RegionBindingsRef B = getRegionBindings(S);
// Default bindings are always applied over a base region so look up the
// base region's default binding, otherwise the lookup will fail when R
@@ -591,10 +593,10 @@ public: // Part of public interface to class.
///
/// Note that callers may need to specially handle LazyCompoundVals, which
/// are returned as is in case the caller needs to treat them differently.
- Optional<SVal> getBindingForDerivedDefaultValue(RegionBindingsConstRef B,
- const MemRegion *superR,
- const TypedValueRegion *R,
- QualType Ty);
+ std::optional<SVal>
+ getBindingForDerivedDefaultValue(RegionBindingsConstRef B,
+ const MemRegion *superR,
+ const TypedValueRegion *R, QualType Ty);
/// Get the state and region whose binding this region \p R corresponds to.
///
@@ -610,6 +612,10 @@ public: // Part of public interface to class.
/// The precise value of "interesting" is determined for the purposes of
/// RegionStore's internal analysis. It must always contain all regions and
/// symbols, but may omit constants and other kinds of SVal.
+ ///
+ /// In contrast to compound values, LazyCompoundVals are also added
+ /// to the 'interesting values' list in addition to the child interesting
+ /// values.
const SValListTy &getInterestingValues(nonloc::LazyCompoundVal LCV);
//===------------------------------------------------------------------===//
@@ -640,16 +646,13 @@ public: // Part of public interface to class.
void iterBindings(Store store, BindingsHandler& f) override {
RegionBindingsRef B = getRegionBindings(store);
- for (RegionBindingsRef::iterator I = B.begin(), E = B.end(); I != E; ++I) {
- const ClusterBindings &Cluster = I.getData();
- for (ClusterBindings::iterator CI = Cluster.begin(), CE = Cluster.end();
- CI != CE; ++CI) {
- const BindingKey &K = CI.getKey();
- if (!K.isDirect())
+ for (const auto &[Region, Cluster] : B) {
+ for (const auto &[Key, Value] : Cluster) {
+ if (!Key.isDirect())
continue;
- if (const SubRegion *R = dyn_cast<SubRegion>(K.getRegion())) {
+ if (const SubRegion *R = dyn_cast<SubRegion>(Key.getRegion())) {
// FIXME: Possibly incorporate the offset?
- if (!f.HandleBinding(*this, store, R, CI.getData()))
+ if (!f.HandleBinding(*this, store, R, Value))
return;
}
}
@@ -665,18 +668,9 @@ public: // Part of public interface to class.
std::unique_ptr<StoreManager>
ento::CreateRegionStoreManager(ProgramStateManager &StMgr) {
- RegionStoreFeatures F = maximal_features_tag();
- return std::make_unique<RegionStoreManager>(StMgr, F);
-}
-
-std::unique_ptr<StoreManager>
-ento::CreateFieldsOnlyRegionStoreManager(ProgramStateManager &StMgr) {
- RegionStoreFeatures F = minimal_features_tag();
- F.enableFields(true);
- return std::make_unique<RegionStoreManager>(StMgr, F);
+ return std::make_unique<RegionStoreManager>(StMgr);
}
-
//===----------------------------------------------------------------------===//
// Region Cluster analysis.
//===----------------------------------------------------------------------===//
@@ -868,7 +862,7 @@ collectSubRegionBindings(SmallVectorImpl<BindingPair> &Bindings,
// Find the length (in bits) of the region being invalidated.
uint64_t Length = UINT64_MAX;
SVal Extent = Top->getMemRegionManager().getStaticSize(Top, SVB);
- if (Optional<nonloc::ConcreteInt> ExtentCI =
+ if (std::optional<nonloc::ConcreteInt> ExtentCI =
Extent.getAs<nonloc::ConcreteInt>()) {
const llvm::APSInt &ExtentInt = ExtentCI->getValue();
assert(ExtentInt.isNonNegative() || ExtentInt.isUnsigned());
@@ -879,9 +873,8 @@ collectSubRegionBindings(SmallVectorImpl<BindingPair> &Bindings,
Length = FR->getDecl()->getBitWidthValue(SVB.getContext());
}
- for (ClusterBindings::iterator I = Cluster.begin(), E = Cluster.end();
- I != E; ++I) {
- BindingKey NextKey = I.getKey();
+ for (const auto &StoreEntry : Cluster) {
+ BindingKey NextKey = StoreEntry.first;
if (NextKey.getRegion() == TopKey.getRegion()) {
// FIXME: This doesn't catch the case where we're really invalidating a
// region with a symbolic offset. Example:
@@ -892,7 +885,7 @@ collectSubRegionBindings(SmallVectorImpl<BindingPair> &Bindings,
NextKey.getOffset() - TopKey.getOffset() < Length) {
// Case 1: The next binding is inside the region we're invalidating.
// Include it.
- Bindings.push_back(*I);
+ Bindings.push_back(StoreEntry);
} else if (NextKey.getOffset() == TopKey.getOffset()) {
// Case 2: The next binding is at the same offset as the region we're
@@ -902,7 +895,7 @@ collectSubRegionBindings(SmallVectorImpl<BindingPair> &Bindings,
// FIXME: This is probably incorrect; consider invalidating an outer
// struct whose first field is bound to a LazyCompoundVal.
if (IncludeAllDefaultBindings || NextKey.isDirect())
- Bindings.push_back(*I);
+ Bindings.push_back(StoreEntry);
}
} else if (NextKey.hasSymbolicOffset()) {
@@ -913,13 +906,13 @@ collectSubRegionBindings(SmallVectorImpl<BindingPair> &Bindings,
// we'll be conservative and include it.
if (IncludeAllDefaultBindings || NextKey.isDirect())
if (isCompatibleWithFields(NextKey, FieldsInSymbolicSubregions))
- Bindings.push_back(*I);
+ Bindings.push_back(StoreEntry);
} else if (const SubRegion *BaseSR = dyn_cast<SubRegion>(Base)) {
// Case 4: The next key is symbolic, but we changed a known
// super-region. In this case the binding is certainly included.
if (BaseSR->isSubRegionOf(Top))
if (isCompatibleWithFields(NextKey, FieldsInSymbolicSubregions))
- Bindings.push_back(*I);
+ Bindings.push_back(StoreEntry);
}
}
}
@@ -961,10 +954,8 @@ RegionStoreManager::removeSubRegionBindings(RegionBindingsConstRef B,
/*IncludeAllDefaultBindings=*/false);
ClusterBindingsRef Result(*Cluster, CBFactory);
- for (SmallVectorImpl<BindingPair>::const_iterator I = Bindings.begin(),
- E = Bindings.end();
- I != E; ++I)
- Result = Result.remove(I->first);
+ for (BindingKey Key : llvm::make_first_range(Bindings))
+ Result = Result.remove(Key);
// If we're invalidating a region with a symbolic offset, we need to make sure
// we don't treat the base region as uninitialized anymore.
@@ -1040,15 +1031,14 @@ void InvalidateRegionsWorker::VisitBinding(SVal V) {
}
// Is it a LazyCompoundVal? All references get invalidated as well.
- if (Optional<nonloc::LazyCompoundVal> LCS =
+ if (std::optional<nonloc::LazyCompoundVal> LCS =
V.getAs<nonloc::LazyCompoundVal>()) {
- const RegionStoreManager::SValListTy &Vals = RM.getInterestingValues(*LCS);
-
- for (RegionStoreManager::SValListTy::const_iterator I = Vals.begin(),
- E = Vals.end();
- I != E; ++I)
- VisitBinding(*I);
+ // `getInterestingValues()` returns SVals contained within LazyCompoundVals,
+ // so there is no need to visit them.
+ for (SVal V : RM.getInterestingValues(*LCS))
+ if (!isa<nonloc::LazyCompoundVal>(V))
+ VisitBinding(V);
return;
}
@@ -1062,8 +1052,8 @@ void InvalidateRegionsWorker::VisitCluster(const MemRegion *baseR,
RegionAndSymbolInvalidationTraits::TK_PreserveContents);
if (C) {
- for (ClusterBindings::iterator I = C->begin(), E = C->end(); I != E; ++I)
- VisitBinding(I.getData());
+ for (SVal Val : llvm::make_second_range(*C))
+ VisitBinding(Val);
// Invalidate regions contents.
if (!PreserveRegionsContents)
@@ -1099,10 +1089,8 @@ void InvalidateRegionsWorker::VisitCluster(const MemRegion *baseR,
// BlockDataRegion? If so, invalidate captured variables that are passed
// by reference.
if (const BlockDataRegion *BR = dyn_cast<BlockDataRegion>(baseR)) {
- for (BlockDataRegion::referenced_vars_iterator
- BI = BR->referenced_vars_begin(), BE = BR->referenced_vars_end() ;
- BI != BE; ++BI) {
- const VarRegion *VR = BI.getCapturedRegion();
+ for (auto Var : BR->referenced_vars()) {
+ const VarRegion *VR = Var.getCapturedRegion();
const VarDecl *VD = VR->getDecl();
if (VD->hasAttr<BlocksAttr>() || !VD->hasLocalStorage()) {
AddToWorkList(VR);
@@ -1114,7 +1102,7 @@ void InvalidateRegionsWorker::VisitCluster(const MemRegion *baseR,
// a pointer value, but the thing pointed by that pointer may
// get invalidated.
SVal V = RM.getBinding(B, loc::MemRegionVal(VR));
- if (Optional<Loc> L = V.getAs<Loc>()) {
+ if (std::optional<Loc> L = V.getAs<Loc>()) {
if (const MemRegion *LR = L->getAsRegion())
AddToWorkList(LR);
}
@@ -1135,7 +1123,7 @@ void InvalidateRegionsWorker::VisitCluster(const MemRegion *baseR,
if (Regions)
Regions->push_back(baseR);
- if (isa<AllocaRegion>(baseR) || isa<SymbolicRegion>(baseR)) {
+ if (isa<AllocaRegion, SymbolicRegion>(baseR)) {
// Invalidate the region by setting its default value to
// conjured symbol. The type of the symbol is irrelevant.
DefinedOrUnknownSVal V =
@@ -1174,7 +1162,7 @@ void InvalidateRegionsWorker::VisitCluster(const MemRegion *baseR,
if (doNotInvalidateSuperRegion) {
// We are not doing blank invalidation of the whole array region so we
// have to manually invalidate each elements.
- Optional<uint64_t> NumElements;
+ std::optional<uint64_t> NumElements;
// Compute lower and upper offsets for region within array.
if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT))
@@ -1206,11 +1194,9 @@ void InvalidateRegionsWorker::VisitCluster(const MemRegion *baseR,
if (!C)
goto conjure_default;
- for (ClusterBindings::iterator I = C->begin(), E = C->end(); I != E;
- ++I) {
- const BindingKey &BK = I.getKey();
- Optional<uint64_t> ROffset =
- BK.hasSymbolicOffset() ? Optional<uint64_t>() : BK.getOffset();
+ for (const auto &[BK, V] : *C) {
+ std::optional<uint64_t> ROffset =
+ BK.hasSymbolicOffset() ? std::optional<uint64_t>() : BK.getOffset();
// Check offset is not symbolic and within array's boundaries.
// Handles arrays of 0 elements and of 0-sized elements as well.
@@ -1219,12 +1205,11 @@ void InvalidateRegionsWorker::VisitCluster(const MemRegion *baseR,
(UpperOverflow &&
(*ROffset >= LowerOffset || *ROffset < UpperOffset)) ||
(LowerOffset == UpperOffset && *ROffset == LowerOffset))) {
- B = B.removeBinding(I.getKey());
+ B = B.removeBinding(BK);
// Bound symbolic regions need to be invalidated for dead symbol
// detection.
- SVal V = I.getData();
const MemRegion *R = V.getAsRegion();
- if (R && isa<SymbolicRegion>(R))
+ if (isa_and_nonnull<SymbolicRegion>(R))
VisitBinding(V);
}
}
@@ -1295,21 +1280,12 @@ RegionStoreManager::invalidateGlobalRegion(MemRegion::Kind K,
void RegionStoreManager::populateWorkList(InvalidateRegionsWorker &W,
ArrayRef<SVal> Values,
InvalidatedRegions *TopLevelRegions) {
- for (ArrayRef<SVal>::iterator I = Values.begin(),
- E = Values.end(); I != E; ++I) {
- SVal V = *I;
- if (Optional<nonloc::LazyCompoundVal> LCS =
- V.getAs<nonloc::LazyCompoundVal>()) {
-
- const SValListTy &Vals = getInterestingValues(*LCS);
-
- for (SValListTy::const_iterator I = Vals.begin(),
- E = Vals.end(); I != E; ++I) {
- // Note: the last argument is false here because these are
- // non-top-level regions.
- if (const MemRegion *R = (*I).getAsRegion())
+ for (SVal V : Values) {
+ if (auto LCS = V.getAs<nonloc::LazyCompoundVal>()) {
+ for (SVal S : getInterestingValues(*LCS))
+ if (const MemRegion *R = S.getAsRegion())
W.AddToWorkList(R);
- }
+
continue;
}
@@ -1365,11 +1341,11 @@ RegionStoreManager::invalidateRegions(Store store,
case GFK_All:
B = invalidateGlobalRegion(MemRegion::GlobalInternalSpaceRegionKind,
Ex, Count, LCtx, B, Invalidated);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case GFK_SystemOnly:
B = invalidateGlobalRegion(MemRegion::GlobalSystemSpaceRegionKind,
Ex, Count, LCtx, B, Invalidated);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case GFK_None:
break;
}
@@ -1388,10 +1364,10 @@ RegionStoreManager::invalidateRegions(Store store,
/// the array). This is called by ExprEngine when evaluating casts
/// from arrays to pointers.
SVal RegionStoreManager::ArrayToPointer(Loc Array, QualType T) {
- if (Array.getAs<loc::ConcreteInt>())
+ if (isa<loc::ConcreteInt>(Array))
return Array;
- if (!Array.getAs<loc::MemRegionVal>())
+ if (!isa<loc::MemRegionVal>(Array))
return UnknownVal();
const SubRegion *R =
@@ -1405,8 +1381,8 @@ SVal RegionStoreManager::ArrayToPointer(Loc Array, QualType T) {
//===----------------------------------------------------------------------===//
SVal RegionStoreManager::getBinding(RegionBindingsConstRef B, Loc L, QualType T) {
- assert(!L.getAs<UnknownVal>() && "location unknown");
- assert(!L.getAs<UndefinedVal>() && "location undefined");
+ assert(!isa<UnknownVal>(L) && "location unknown");
+ assert(!isa<UndefinedVal>(L) && "location undefined");
// For access to concrete addresses, return UnknownVal. Checks
// for null dereferences (and similar errors) are done by checkers, not
@@ -1427,19 +1403,20 @@ SVal RegionStoreManager::getBinding(RegionBindingsConstRef B, Loc L, QualType T)
return UnknownVal();
}
- if (!isa<TypedValueRegion>(MR)) {
- if (T.isNull()) {
- if (const TypedRegion *TR = dyn_cast<TypedRegion>(MR))
- T = TR->getLocationType()->getPointeeType();
- else if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(MR))
- T = SR->getSymbol()->getType()->getPointeeType();
- }
- assert(!T.isNull() && "Unable to auto-detect binding type!");
- assert(!T->isVoidType() && "Attempting to dereference a void pointer!");
- MR = GetElementZeroRegion(cast<SubRegion>(MR), T);
- } else {
- T = cast<TypedValueRegion>(MR)->getValueType();
+ // Auto-detect the binding type.
+ if (T.isNull()) {
+ if (const auto *TVR = dyn_cast<TypedValueRegion>(MR))
+ T = TVR->getValueType();
+ else if (const auto *TR = dyn_cast<TypedRegion>(MR))
+ T = TR->getLocationType()->getPointeeType();
+ else if (const auto *SR = dyn_cast<SymbolicRegion>(MR))
+ T = SR->getPointeeStaticType();
}
+ assert(!T.isNull() && "Unable to auto-detect binding type!");
+ assert(!T->isVoidType() && "Attempting to dereference a void pointer!");
+
+ if (!isa<TypedValueRegion>(MR))
+ MR = GetElementZeroRegion(cast<SubRegion>(MR), T);
// FIXME: Perhaps this method should just take a 'const MemRegion*' argument
// instead of 'Loc', and have the other Loc cases handled at a higher level.
@@ -1548,16 +1525,17 @@ static QualType getUnderlyingType(const SubRegion *R) {
///
/// Note that unlike RegionStoreManager::findLazyBinding, this will not search
/// for lazy bindings for super-regions of \p R.
-static Optional<nonloc::LazyCompoundVal>
+static std::optional<nonloc::LazyCompoundVal>
getExistingLazyBinding(SValBuilder &SVB, RegionBindingsConstRef B,
const SubRegion *R, bool AllowSubregionBindings) {
- Optional<SVal> V = B.getDefaultBinding(R);
+ std::optional<SVal> V = B.getDefaultBinding(R);
if (!V)
- return None;
+ return std::nullopt;
- Optional<nonloc::LazyCompoundVal> LCV = V->getAs<nonloc::LazyCompoundVal>();
+ std::optional<nonloc::LazyCompoundVal> LCV =
+ V->getAs<nonloc::LazyCompoundVal>();
if (!LCV)
- return None;
+ return std::nullopt;
// If the LCV is for a subregion, the types might not match, and we shouldn't
// reuse the binding.
@@ -1566,7 +1544,7 @@ getExistingLazyBinding(SValBuilder &SVB, RegionBindingsConstRef B,
!RegionTy->isVoidPointerType()) {
QualType SourceRegionTy = LCV->getRegion()->getValueType();
if (!SVB.getContext().hasSameUnqualifiedType(RegionTy, SourceRegionTy))
- return None;
+ return std::nullopt;
}
if (!AllowSubregionBindings) {
@@ -1576,20 +1554,19 @@ getExistingLazyBinding(SValBuilder &SVB, RegionBindingsConstRef B,
collectSubRegionBindings(Bindings, SVB, *B.lookup(R->getBaseRegion()), R,
/*IncludeAllDefaultBindings=*/true);
if (Bindings.size() > 1)
- return None;
+ return std::nullopt;
}
return *LCV;
}
-
std::pair<Store, const SubRegion *>
RegionStoreManager::findLazyBinding(RegionBindingsConstRef B,
const SubRegion *R,
const SubRegion *originalRegion) {
if (originalRegion != R) {
- if (Optional<nonloc::LazyCompoundVal> V =
- getExistingLazyBinding(svalBuilder, B, R, true))
+ if (std::optional<nonloc::LazyCompoundVal> V =
+ getExistingLazyBinding(svalBuilder, B, R, true))
return std::make_pair(V->getStore(), V->getRegion());
}
@@ -1625,10 +1602,313 @@ RegionStoreManager::findLazyBinding(RegionBindingsConstRef B,
return Result;
}
+/// This is a helper function for `getConstantValFromConstArrayInitializer`.
+///
+/// Return an array of extents of the declared array type.
+///
+/// E.g. for `int x[1][2][3];` returns { 1, 2, 3 }.
+static SmallVector<uint64_t, 2>
+getConstantArrayExtents(const ConstantArrayType *CAT) {
+ assert(CAT && "ConstantArrayType should not be null");
+ CAT = cast<ConstantArrayType>(CAT->getCanonicalTypeInternal());
+ SmallVector<uint64_t, 2> Extents;
+ do {
+ Extents.push_back(CAT->getSize().getZExtValue());
+ } while ((CAT = dyn_cast<ConstantArrayType>(CAT->getElementType())));
+ return Extents;
+}
+
+/// This is a helper function for `getConstantValFromConstArrayInitializer`.
+///
+/// Return an array of offsets from nested ElementRegions and a root base
+/// region. The array is never empty and a base region is never null.
+///
+/// E.g. for `Element{Element{Element{VarRegion},1},2},3}` returns { 3, 2, 1 }.
+/// This represents an access through indirection: `arr[1][2][3];`
+///
+/// \param ER The given (possibly nested) ElementRegion.
+///
+/// \note The result array is in the reverse order of indirection expression:
+/// arr[1][2][3] -> { 3, 2, 1 }. This helps to provide complexity O(n), where n
+/// is a number of indirections. It may not affect performance in real-life
+/// code, though.
+static std::pair<SmallVector<SVal, 2>, const MemRegion *>
+getElementRegionOffsetsWithBase(const ElementRegion *ER) {
+ assert(ER && "ConstantArrayType should not be null");
+ const MemRegion *Base;
+ SmallVector<SVal, 2> SValOffsets;
+ do {
+ SValOffsets.push_back(ER->getIndex());
+ Base = ER->getSuperRegion();
+ ER = dyn_cast<ElementRegion>(Base);
+ } while (ER);
+ return {SValOffsets, Base};
+}
+
+/// This is a helper function for `getConstantValFromConstArrayInitializer`.
+///
+/// Convert array of offsets from `SVal` to `uint64_t` in consideration of
+/// respective array extents.
+/// \param SrcOffsets [in] The array of offsets of type `SVal` in reversed
+/// order (expectedly received from `getElementRegionOffsetsWithBase`).
+/// \param ArrayExtents [in] The array of extents.
+/// \param DstOffsets [out] The array of offsets of type `uint64_t`.
+/// \returns:
+/// - `std::nullopt` for successful convertion.
+/// - `UndefinedVal` or `UnknownVal` otherwise. It's expected that this SVal
+/// will be returned as a suitable value of the access operation.
+/// which should be returned as a correct
+///
+/// \example:
+/// const int arr[10][20][30] = {}; // ArrayExtents { 10, 20, 30 }
+/// int x1 = arr[4][5][6]; // SrcOffsets { NonLoc(6), NonLoc(5), NonLoc(4) }
+/// // DstOffsets { 4, 5, 6 }
+/// // returns std::nullopt
+/// int x2 = arr[42][5][-6]; // returns UndefinedVal
+/// int x3 = arr[4][5][x2]; // returns UnknownVal
+static std::optional<SVal>
+convertOffsetsFromSvalToUnsigneds(const SmallVector<SVal, 2> &SrcOffsets,
+ const SmallVector<uint64_t, 2> ArrayExtents,
+ SmallVector<uint64_t, 2> &DstOffsets) {
+ // Check offsets for being out of bounds.
+ // C++20 [expr.add] 7.6.6.4 (excerpt):
+ // If P points to an array element i of an array object x with n
+ // elements, where i < 0 or i > n, the behavior is undefined.
+ // Dereferencing is not allowed on the "one past the last
+ // element", when i == n.
+ // Example:
+ // const int arr[3][2] = {{1, 2}, {3, 4}};
+ // arr[0][0]; // 1
+ // arr[0][1]; // 2
+ // arr[0][2]; // UB
+ // arr[1][0]; // 3
+ // arr[1][1]; // 4
+ // arr[1][-1]; // UB
+ // arr[2][0]; // 0
+ // arr[2][1]; // 0
+ // arr[-2][0]; // UB
+ DstOffsets.resize(SrcOffsets.size());
+ auto ExtentIt = ArrayExtents.begin();
+ auto OffsetIt = DstOffsets.begin();
+ // Reverse `SValOffsets` to make it consistent with `ArrayExtents`.
+ for (SVal V : llvm::reverse(SrcOffsets)) {
+ if (auto CI = V.getAs<nonloc::ConcreteInt>()) {
+ // When offset is out of array's bounds, result is UB.
+ const llvm::APSInt &Offset = CI->getValue();
+ if (Offset.isNegative() || Offset.uge(*(ExtentIt++)))
+ return UndefinedVal();
+ // Store index in a reversive order.
+ *(OffsetIt++) = Offset.getZExtValue();
+ continue;
+ }
+ // Symbolic index presented. Return Unknown value.
+ // FIXME: We also need to take ElementRegions with symbolic indexes into
+ // account.
+ return UnknownVal();
+ }
+ return std::nullopt;
+}
+
+std::optional<SVal> RegionStoreManager::getConstantValFromConstArrayInitializer(
+ RegionBindingsConstRef B, const ElementRegion *R) {
+ assert(R && "ElementRegion should not be null");
+
+ // Treat an n-dimensional array.
+ SmallVector<SVal, 2> SValOffsets;
+ const MemRegion *Base;
+ std::tie(SValOffsets, Base) = getElementRegionOffsetsWithBase(R);
+ const VarRegion *VR = dyn_cast<VarRegion>(Base);
+ if (!VR)
+ return std::nullopt;
+
+ assert(!SValOffsets.empty() && "getElementRegionOffsets guarantees the "
+ "offsets vector is not empty.");
+
+ // Check if the containing array has an initialized value that we can trust.
+ // We can trust a const value or a value of a global initializer in main().
+ const VarDecl *VD = VR->getDecl();
+ if (!VD->getType().isConstQualified() &&
+ !R->getElementType().isConstQualified() &&
+ (!B.isMainAnalysis() || !VD->hasGlobalStorage()))
+ return std::nullopt;
+
+ // Array's declaration should have `ConstantArrayType` type, because only this
+ // type contains an array extent. It may happen that array type can be of
+ // `IncompleteArrayType` type. To get the declaration of `ConstantArrayType`
+ // type, we should find the declaration in the redeclarations chain that has
+ // the initialization expression.
+ // NOTE: `getAnyInitializer` has an out-parameter, which returns a new `VD`
+ // from which an initializer is obtained. We replace current `VD` with the new
+ // `VD`. If the return value of the function is null than `VD` won't be
+ // replaced.
+ const Expr *Init = VD->getAnyInitializer(VD);
+ // NOTE: If `Init` is non-null, then a new `VD` is non-null for sure. So check
+ // `Init` for null only and don't worry about the replaced `VD`.
+ if (!Init)
+ return std::nullopt;
+
+ // Array's declaration should have ConstantArrayType type, because only this
+ // type contains an array extent.
+ const ConstantArrayType *CAT = Ctx.getAsConstantArrayType(VD->getType());
+ if (!CAT)
+ return std::nullopt;
+
+ // Get array extents.
+ SmallVector<uint64_t, 2> Extents = getConstantArrayExtents(CAT);
+
+ // The number of offsets should equal to the numbers of extents,
+ // otherwise wrong type punning occurred. For instance:
+ // int arr[1][2][3];
+ // auto ptr = (int(*)[42])arr;
+ // auto x = ptr[4][2]; // UB
+ // FIXME: Should return UndefinedVal.
+ if (SValOffsets.size() != Extents.size())
+ return std::nullopt;
+
+ SmallVector<uint64_t, 2> ConcreteOffsets;
+ if (std::optional<SVal> V = convertOffsetsFromSvalToUnsigneds(
+ SValOffsets, Extents, ConcreteOffsets))
+ return *V;
+
+ // Handle InitListExpr.
+ // Example:
+ // const char arr[4][2] = { { 1, 2 }, { 3 }, 4, 5 };
+ if (const auto *ILE = dyn_cast<InitListExpr>(Init))
+ return getSValFromInitListExpr(ILE, ConcreteOffsets, R->getElementType());
+
+ // Handle StringLiteral.
+ // Example:
+ // const char arr[] = "abc";
+ if (const auto *SL = dyn_cast<StringLiteral>(Init))
+ return getSValFromStringLiteral(SL, ConcreteOffsets.front(),
+ R->getElementType());
+
+ // FIXME: Handle CompoundLiteralExpr.
+
+ return std::nullopt;
+}
+
+/// Returns an SVal, if possible, for the specified position of an
+/// initialization list.
+///
+/// \param ILE The given initialization list.
+/// \param Offsets The array of unsigned offsets. E.g. for the expression
+/// `int x = arr[1][2][3];` an array should be { 1, 2, 3 }.
+/// \param ElemT The type of the result SVal expression.
+/// \return Optional SVal for the particular position in the initialization
+/// list. E.g. for the list `{{1, 2},[3, 4],{5, 6}, {}}` offsets:
+/// - {1, 1} returns SVal{4}, because it's the second position in the second
+/// sublist;
+/// - {3, 0} returns SVal{0}, because there's no explicit value at this
+/// position in the sublist.
+///
+/// NOTE: Inorder to get a valid SVal, a caller shall guarantee valid offsets
+/// for the given initialization list. Otherwise SVal can be an equivalent to 0
+/// or lead to assertion.
+std::optional<SVal> RegionStoreManager::getSValFromInitListExpr(
+ const InitListExpr *ILE, const SmallVector<uint64_t, 2> &Offsets,
+ QualType ElemT) {
+ assert(ILE && "InitListExpr should not be null");
+
+ for (uint64_t Offset : Offsets) {
+ // C++20 [dcl.init.string] 9.4.2.1:
+ // An array of ordinary character type [...] can be initialized by [...]
+ // an appropriately-typed string-literal enclosed in braces.
+ // Example:
+ // const char arr[] = { "abc" };
+ if (ILE->isStringLiteralInit())
+ if (const auto *SL = dyn_cast<StringLiteral>(ILE->getInit(0)))
+ return getSValFromStringLiteral(SL, Offset, ElemT);
+
+ // C++20 [expr.add] 9.4.17.5 (excerpt):
+ // i-th array element is value-initialized for each k < i ≤ n,
+ // where k is an expression-list size and n is an array extent.
+ if (Offset >= ILE->getNumInits())
+ return svalBuilder.makeZeroVal(ElemT);
+
+ const Expr *E = ILE->getInit(Offset);
+ const auto *IL = dyn_cast<InitListExpr>(E);
+ if (!IL)
+ // Return a constant value, if it is presented.
+ // FIXME: Support other SVals.
+ return svalBuilder.getConstantVal(E);
+
+ // Go to the nested initializer list.
+ ILE = IL;
+ }
+
+ assert(ILE);
+
+ // FIXME: Unhandeled InitListExpr sub-expression, possibly constructing an
+ // enum?
+ return std::nullopt;
+}
+
+/// Returns an SVal, if possible, for the specified position in a string
+/// literal.
+///
+/// \param SL The given string literal.
+/// \param Offset The unsigned offset. E.g. for the expression
+/// `char x = str[42];` an offset should be 42.
+/// E.g. for the string "abc" offset:
+/// - 1 returns SVal{b}, because it's the second position in the string.
+/// - 42 returns SVal{0}, because there's no explicit value at this
+/// position in the string.
+/// \param ElemT The type of the result SVal expression.
+///
+/// NOTE: We return `0` for every offset >= the literal length for array
+/// declarations, like:
+/// const char str[42] = "123"; // Literal length is 4.
+/// char c = str[41]; // Offset is 41.
+/// FIXME: Nevertheless, we can't do the same for pointer declaraions, like:
+/// const char * const str = "123"; // Literal length is 4.
+/// char c = str[41]; // Offset is 41. Returns `0`, but Undef
+/// // expected.
+/// It should be properly handled before reaching this point.
+/// The main problem is that we can't distinguish between these declarations,
+/// because in case of array we can get the Decl from VarRegion, but in case
+/// of pointer the region is a StringRegion, which doesn't contain a Decl.
+/// Possible solution could be passing an array extent along with the offset.
+SVal RegionStoreManager::getSValFromStringLiteral(const StringLiteral *SL,
+ uint64_t Offset,
+ QualType ElemT) {
+ assert(SL && "StringLiteral should not be null");
+ // C++20 [dcl.init.string] 9.4.2.3:
+ // If there are fewer initializers than there are array elements, each
+ // element not explicitly initialized shall be zero-initialized [dcl.init].
+ uint32_t Code = (Offset >= SL->getLength()) ? 0 : SL->getCodeUnit(Offset);
+ return svalBuilder.makeIntVal(Code, ElemT);
+}
+
+static std::optional<SVal> getDerivedSymbolForBinding(
+ RegionBindingsConstRef B, const TypedValueRegion *BaseRegion,
+ const TypedValueRegion *SubReg, const ASTContext &Ctx, SValBuilder &SVB) {
+ assert(BaseRegion);
+ QualType BaseTy = BaseRegion->getValueType();
+ QualType Ty = SubReg->getValueType();
+ if (BaseTy->isScalarType() && Ty->isScalarType()) {
+ if (Ctx.getTypeSizeInChars(BaseTy) >= Ctx.getTypeSizeInChars(Ty)) {
+ if (const std::optional<SVal> &ParentValue =
+ B.getDirectBinding(BaseRegion)) {
+ if (SymbolRef ParentValueAsSym = ParentValue->getAsSymbol())
+ return SVB.getDerivedRegionValueSymbolVal(ParentValueAsSym, SubReg);
+
+ if (ParentValue->isUndef())
+ return UndefinedVal();
+
+ // Other cases: give up. We are indexing into a larger object
+ // that has some value, but we don't know how to handle that yet.
+ return UnknownVal();
+ }
+ }
+ }
+ return std::nullopt;
+}
+
SVal RegionStoreManager::getBindingForElement(RegionBindingsConstRef B,
const ElementRegion* R) {
// Check if the region has a binding.
- if (const Optional<SVal> &V = B.getDirectBinding(R))
+ if (const std::optional<SVal> &V = B.getDirectBinding(R))
return *V;
const MemRegion* superR = R->getSuperRegion();
@@ -1636,59 +1916,21 @@ SVal RegionStoreManager::getBindingForElement(RegionBindingsConstRef B,
// Check if the region is an element region of a string literal.
if (const StringRegion *StrR = dyn_cast<StringRegion>(superR)) {
// FIXME: Handle loads from strings where the literal is treated as
- // an integer, e.g., *((unsigned int*)"hello")
+ // an integer, e.g., *((unsigned int*)"hello"). Such loads are UB according
+ // to C++20 7.2.1.11 [basic.lval].
QualType T = Ctx.getAsArrayType(StrR->getValueType())->getElementType();
if (!Ctx.hasSameUnqualifiedType(T, R->getElementType()))
return UnknownVal();
-
- const StringLiteral *Str = StrR->getStringLiteral();
- SVal Idx = R->getIndex();
- if (Optional<nonloc::ConcreteInt> CI = Idx.getAs<nonloc::ConcreteInt>()) {
- int64_t i = CI->getValue().getSExtValue();
- // Abort on string underrun. This can be possible by arbitrary
- // clients of getBindingForElement().
- if (i < 0)
+ if (const auto CI = R->getIndex().getAs<nonloc::ConcreteInt>()) {
+ const llvm::APSInt &Idx = CI->getValue();
+ if (Idx < 0)
return UndefinedVal();
- int64_t length = Str->getLength();
- // Technically, only i == length is guaranteed to be null.
- // However, such overflows should be caught before reaching this point;
- // the only time such an access would be made is if a string literal was
- // used to initialize a larger array.
- char c = (i >= length) ? '\0' : Str->getCodeUnit(i);
- return svalBuilder.makeIntVal(c, T);
- }
- } else if (const VarRegion *VR = dyn_cast<VarRegion>(superR)) {
- // Check if the containing array has an initialized value that we can trust.
- // We can trust a const value or a value of a global initializer in main().
- const VarDecl *VD = VR->getDecl();
- if (VD->getType().isConstQualified() ||
- R->getElementType().isConstQualified() ||
- (B.isMainAnalysis() && VD->hasGlobalStorage())) {
- if (const Expr *Init = VD->getAnyInitializer()) {
- if (const auto *InitList = dyn_cast<InitListExpr>(Init)) {
- // The array index has to be known.
- if (auto CI = R->getIndex().getAs<nonloc::ConcreteInt>()) {
- int64_t i = CI->getValue().getSExtValue();
- // If it is known that the index is out of bounds, we can return
- // an undefined value.
- if (i < 0)
- return UndefinedVal();
-
- if (auto CAT = Ctx.getAsConstantArrayType(VD->getType()))
- if (CAT->getSize().sle(i))
- return UndefinedVal();
-
- // If there is a list, but no init, it must be zero.
- if (i >= InitList->getNumInits())
- return svalBuilder.makeZeroVal(R->getElementType());
-
- if (const Expr *ElemInit = InitList->getInit(i))
- if (Optional<SVal> V = svalBuilder.getConstantVal(ElemInit))
- return *V;
- }
- }
- }
+ const StringLiteral *SL = StrR->getStringLiteral();
+ return getSValFromStringLiteral(SL, Idx.getZExtValue(), T);
}
+ } else if (isa<ElementRegion, VarRegion>(superR)) {
+ if (std::optional<SVal> V = getConstantValFromConstArrayInitializer(B, R))
+ return *V;
}
// Check for loads from a code text region. For such loads, just give up.
@@ -1707,27 +1949,10 @@ SVal RegionStoreManager::getBindingForElement(RegionBindingsConstRef B,
if (!O.getRegion())
return UnknownVal();
- if (const TypedValueRegion *baseR =
- dyn_cast_or_null<TypedValueRegion>(O.getRegion())) {
- QualType baseT = baseR->getValueType();
- if (baseT->isScalarType()) {
- QualType elemT = R->getElementType();
- if (elemT->isScalarType()) {
- if (Ctx.getTypeSizeInChars(baseT) >= Ctx.getTypeSizeInChars(elemT)) {
- if (const Optional<SVal> &V = B.getDirectBinding(superR)) {
- if (SymbolRef parentSym = V->getAsSymbol())
- return svalBuilder.getDerivedRegionValueSymbolVal(parentSym, R);
-
- if (V->isUnknownOrUndef())
- return *V;
- // Other cases: give up. We are indexing into a larger object
- // that has some value, but we don't know how to handle that yet.
- return UnknownVal();
- }
- }
- }
- }
- }
+ if (const TypedValueRegion *baseR = dyn_cast<TypedValueRegion>(O.getRegion()))
+ if (auto V = getDerivedSymbolForBinding(B, baseR, R, Ctx, svalBuilder))
+ return *V;
+
return getBindingForFieldOrElementCommon(B, R, R->getElementType());
}
@@ -1735,18 +1960,12 @@ SVal RegionStoreManager::getBindingForField(RegionBindingsConstRef B,
const FieldRegion* R) {
// Check if the region has a binding.
- if (const Optional<SVal> &V = B.getDirectBinding(R))
+ if (const std::optional<SVal> &V = B.getDirectBinding(R))
return *V;
- // Is the field declared constant and has an in-class initializer?
+ // If the containing record was initialized, try to get its constant value.
const FieldDecl *FD = R->getDecl();
QualType Ty = FD->getType();
- if (Ty.isConstQualified())
- if (const Expr *Init = FD->getInClassInitializer())
- if (Optional<SVal> V = svalBuilder.getConstantVal(Init))
- return *V;
-
- // If the containing record was initialized, try to get its constant value.
const MemRegion* superR = R->getSuperRegion();
if (const auto *VR = dyn_cast<VarRegion>(superR)) {
const VarDecl *VD = VR->getDecl();
@@ -1761,7 +1980,7 @@ SVal RegionStoreManager::getBindingForField(RegionBindingsConstRef B,
if (const auto *InitList = dyn_cast<InitListExpr>(Init)) {
if (Index < InitList->getNumInits()) {
if (const Expr *FieldInit = InitList->getInit(Index))
- if (Optional<SVal> V = svalBuilder.getConstantVal(FieldInit))
+ if (std::optional<SVal> V = svalBuilder.getConstantVal(FieldInit))
return *V;
} else {
return svalBuilder.makeZeroVal(Ty);
@@ -1769,17 +1988,35 @@ SVal RegionStoreManager::getBindingForField(RegionBindingsConstRef B,
}
}
+ // Handle the case where we are accessing into a larger scalar object.
+ // For example, this handles:
+ // struct header {
+ // unsigned a : 1;
+ // unsigned b : 1;
+ // };
+ // struct parse_t {
+ // unsigned bits0 : 1;
+ // unsigned bits2 : 2; // <-- header
+ // unsigned bits4 : 4;
+ // };
+ // int parse(parse_t *p) {
+ // unsigned copy = p->bits2;
+ // header *bits = (header *)&copy;
+ // return bits->b; <-- here
+ // }
+ if (const auto *Base = dyn_cast<TypedValueRegion>(R->getBaseRegion()))
+ if (auto V = getDerivedSymbolForBinding(B, Base, R, Ctx, svalBuilder))
+ return *V;
+
return getBindingForFieldOrElementCommon(B, R, Ty);
}
-Optional<SVal>
-RegionStoreManager::getBindingForDerivedDefaultValue(RegionBindingsConstRef B,
- const MemRegion *superR,
- const TypedValueRegion *R,
- QualType Ty) {
+std::optional<SVal> RegionStoreManager::getBindingForDerivedDefaultValue(
+ RegionBindingsConstRef B, const MemRegion *superR,
+ const TypedValueRegion *R, QualType Ty) {
- if (const Optional<SVal> &D = B.getDefaultBinding(superR)) {
- const SVal &val = D.getValue();
+ if (const std::optional<SVal> &D = B.getDefaultBinding(superR)) {
+ SVal val = *D;
if (SymbolRef parentSym = val.getAsSymbol())
return svalBuilder.getDerivedRegionValueSymbolVal(parentSym, R);
@@ -1791,14 +2028,13 @@ RegionStoreManager::getBindingForDerivedDefaultValue(RegionBindingsConstRef B,
// Lazy bindings are usually handled through getExistingLazyBinding().
// We should unify these two code paths at some point.
- if (val.getAs<nonloc::LazyCompoundVal>() ||
- val.getAs<nonloc::CompoundVal>())
+ if (isa<nonloc::LazyCompoundVal, nonloc::CompoundVal>(val))
return val;
llvm_unreachable("Unknown default value");
}
- return None;
+ return std::nullopt;
}
SVal RegionStoreManager::getLazyBinding(const SubRegion *LazyBindingRegion,
@@ -1869,7 +2105,8 @@ RegionStoreManager::getBindingForFieldOrElementCommon(RegionBindingsConstRef B,
const SubRegion *SR = R;
while (SR) {
const MemRegion *Base = SR->getSuperRegion();
- if (Optional<SVal> D = getBindingForDerivedDefaultValue(B, Base, R, Ty)) {
+ if (std::optional<SVal> D =
+ getBindingForDerivedDefaultValue(B, Base, R, Ty)) {
if (D->getAs<nonloc::LazyCompoundVal>()) {
hasPartialLazyBinding = true;
break;
@@ -1908,8 +2145,13 @@ RegionStoreManager::getBindingForFieldOrElementCommon(RegionBindingsConstRef B,
return UnknownVal();
// Additionally allow introspection of a block's internal layout.
- if (!hasPartialLazyBinding && !isa<BlockDataRegion>(R->getBaseRegion()))
+ // Try to get direct binding if all other attempts failed thus far.
+ // Else, return UndefinedVal()
+ if (!hasPartialLazyBinding && !isa<BlockDataRegion>(R->getBaseRegion())) {
+ if (const std::optional<SVal> &V = B.getDefaultBinding(R))
+ return *V;
return UndefinedVal();
+ }
}
// All other values are symbolic.
@@ -1919,13 +2161,13 @@ RegionStoreManager::getBindingForFieldOrElementCommon(RegionBindingsConstRef B,
SVal RegionStoreManager::getBindingForObjCIvar(RegionBindingsConstRef B,
const ObjCIvarRegion* R) {
// Check if the region has a binding.
- if (const Optional<SVal> &V = B.getDirectBinding(R))
+ if (const std::optional<SVal> &V = B.getDirectBinding(R))
return *V;
const MemRegion *superR = R->getSuperRegion();
// Check if the super region has a default binding.
- if (const Optional<SVal> &V = B.getDefaultBinding(superR)) {
+ if (const std::optional<SVal> &V = B.getDefaultBinding(superR)) {
if (SymbolRef parentSym = V->getAsSymbol())
return svalBuilder.getDerivedRegionValueSymbolVal(parentSym, R);
@@ -1940,10 +2182,10 @@ SVal RegionStoreManager::getBindingForVar(RegionBindingsConstRef B,
const VarRegion *R) {
// Check if the region has a binding.
- if (Optional<SVal> V = B.getDirectBinding(R))
+ if (std::optional<SVal> V = B.getDirectBinding(R))
return *V;
- if (Optional<SVal> V = B.getDefaultBinding(R))
+ if (std::optional<SVal> V = B.getDefaultBinding(R))
return *V;
// Lazily derive a value for the VarRegion.
@@ -1957,7 +2199,7 @@ SVal RegionStoreManager::getBindingForVar(RegionBindingsConstRef B,
// Is 'VD' declared constant? If so, retrieve the constant value.
if (VD->getType().isConstQualified()) {
if (const Expr *Init = VD->getAnyInitializer()) {
- if (Optional<SVal> V = svalBuilder.getConstantVal(Init))
+ if (std::optional<SVal> V = svalBuilder.getConstantVal(Init))
return *V;
// If the variable is const qualified and has an initializer but
@@ -1978,7 +2220,7 @@ SVal RegionStoreManager::getBindingForVar(RegionBindingsConstRef B,
// If we're in main(), then global initializers have not become stale yet.
if (B.isMainAnalysis())
if (const Expr *Init = VD->getAnyInitializer())
- if (Optional<SVal> V = svalBuilder.getConstantVal(Init))
+ if (std::optional<SVal> V = svalBuilder.getConstantVal(Init))
return *V;
// Function-scoped static variables are default-initialized to 0; if they
@@ -1988,9 +2230,9 @@ SVal RegionStoreManager::getBindingForVar(RegionBindingsConstRef B,
if (isa<StaticGlobalSpaceRegion>(MS))
return svalBuilder.makeZeroVal(T);
- if (Optional<SVal> V = getBindingForDerivedDefaultValue(B, MS, R, T)) {
+ if (std::optional<SVal> V = getBindingForDerivedDefaultValue(B, MS, R, T)) {
assert(!V->getAs<nonloc::LazyCompoundVal>());
- return V.getValue();
+ return *V;
}
return svalBuilder.getRegionValueSymbolVal(R);
@@ -2026,18 +2268,13 @@ RegionStoreManager::getInterestingValues(nonloc::LazyCompoundVal LCV) {
SmallVector<BindingPair, 32> Bindings;
collectSubRegionBindings(Bindings, svalBuilder, *Cluster, LazyR,
/*IncludeAllDefaultBindings=*/true);
- for (SmallVectorImpl<BindingPair>::const_iterator I = Bindings.begin(),
- E = Bindings.end();
- I != E; ++I) {
- SVal V = I->second;
+ for (SVal V : llvm::make_second_range(Bindings)) {
if (V.isUnknownOrUndef() || V.isConstant())
continue;
- if (Optional<nonloc::LazyCompoundVal> InnerLCV =
- V.getAs<nonloc::LazyCompoundVal>()) {
+ if (auto InnerLCV = V.getAs<nonloc::LazyCompoundVal>()) {
const SValListTy &InnerList = getInterestingValues(*InnerLCV);
List.insert(List.end(), InnerList.begin(), InnerList.end());
- continue;
}
List.push_back(V);
@@ -2048,8 +2285,8 @@ RegionStoreManager::getInterestingValues(nonloc::LazyCompoundVal LCV) {
NonLoc RegionStoreManager::createLazyBinding(RegionBindingsConstRef B,
const TypedValueRegion *R) {
- if (Optional<nonloc::LazyCompoundVal> V =
- getExistingLazyBinding(svalBuilder, B, R, false))
+ if (std::optional<nonloc::LazyCompoundVal> V =
+ getExistingLazyBinding(svalBuilder, B, R, false))
return *V;
return svalBuilder.makeLazyCompoundVal(StoreRef(B.asStore(), *this), R);
@@ -2094,7 +2331,7 @@ bool RegionStoreManager::includedInBindings(Store store,
const ClusterBindings &Cluster = RI.getData();
for (ClusterBindings::iterator CI = Cluster.begin(), CE = Cluster.end();
CI != CE; ++CI) {
- const SVal &D = CI.getData();
+ SVal D = CI.getData();
if (const MemRegion *R = D.getAsRegion())
if (R->getBaseRegion() == region)
return true;
@@ -2109,7 +2346,7 @@ bool RegionStoreManager::includedInBindings(Store store,
//===----------------------------------------------------------------------===//
StoreRef RegionStoreManager::killBinding(Store ST, Loc L) {
- if (Optional<loc::MemRegionVal> LV = L.getAs<loc::MemRegionVal>())
+ if (std::optional<loc::MemRegionVal> LV = L.getAs<loc::MemRegionVal>())
if (const MemRegion* R = LV->getRegion())
return StoreRef(getRegionBindings(ST).removeBinding(R)
.asImmutableMap()
@@ -2140,22 +2377,21 @@ RegionStoreManager::bind(RegionBindingsConstRef B, Loc L, SVal V) {
return bindAggregate(B, TR, V);
}
- if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R)) {
- // Binding directly to a symbolic region should be treated as binding
- // to element 0.
- QualType T = SR->getSymbol()->getType();
- if (T->isAnyPointerType() || T->isReferenceType())
- T = T->getPointeeType();
-
- R = GetElementZeroRegion(SR, T);
- }
+ // Binding directly to a symbolic region should be treated as binding
+ // to element 0.
+ if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R))
+ R = GetElementZeroRegion(SR, SR->getPointeeStaticType());
assert((!isa<CXXThisRegion>(R) || !B.lookup(R)) &&
"'this' pointer is not an l-value and is not assignable");
// Clear out bindings that may overlap with this binding.
RegionBindingsRef NewB = removeSubRegionBindings(B, cast<SubRegion>(R));
- return NewB.addBinding(BindingKey::Make(R, BindingKey::Direct), V);
+
+ // LazyCompoundVals should be always bound as 'default' bindings.
+ auto KeyKind = isa<nonloc::LazyCompoundVal>(V) ? BindingKey::Default
+ : BindingKey::Direct;
+ return NewB.addBinding(BindingKey::Make(R, KeyKind), V);
}
RegionBindingsRef
@@ -2165,7 +2401,7 @@ RegionStoreManager::setImplicitDefaultValue(RegionBindingsConstRef B,
SVal V;
if (Loc::isLocType(T))
- V = svalBuilder.makeNull();
+ V = svalBuilder.makeNullWithType(T);
else if (T->isIntegralOrEnumerationType())
V = svalBuilder.makeZeroVal(T);
else if (T->isStructureOrClassType() || T->isArrayType()) {
@@ -2185,6 +2421,40 @@ RegionStoreManager::setImplicitDefaultValue(RegionBindingsConstRef B,
return B.addBinding(R, BindingKey::Default, V);
}
+std::optional<RegionBindingsRef> RegionStoreManager::tryBindSmallArray(
+ RegionBindingsConstRef B, const TypedValueRegion *R, const ArrayType *AT,
+ nonloc::LazyCompoundVal LCV) {
+
+ auto CAT = dyn_cast<ConstantArrayType>(AT);
+
+ // If we don't know the size, create a lazyCompoundVal instead.
+ if (!CAT)
+ return std::nullopt;
+
+ QualType Ty = CAT->getElementType();
+ if (!(Ty->isScalarType() || Ty->isReferenceType()))
+ return std::nullopt;
+
+ // If the array is too big, create a LCV instead.
+ uint64_t ArrSize = CAT->getSize().getLimitedValue();
+ if (ArrSize > SmallArrayLimit)
+ return std::nullopt;
+
+ RegionBindingsRef NewB = B;
+
+ for (uint64_t i = 0; i < ArrSize; ++i) {
+ auto Idx = svalBuilder.makeArrayIndex(i);
+ const ElementRegion *SrcER =
+ MRMgr.getElementRegion(Ty, Idx, LCV.getRegion(), Ctx);
+ SVal V = getBindingForElement(getRegionBindings(LCV.getStore()), SrcER);
+
+ const ElementRegion *DstER = MRMgr.getElementRegion(Ty, Idx, R, Ctx);
+ NewB = bind(NewB, loc::MemRegionVal(DstER), V);
+ }
+
+ return NewB;
+}
+
RegionBindingsRef
RegionStoreManager::bindArray(RegionBindingsConstRef B,
const TypedValueRegion* R,
@@ -2192,7 +2462,7 @@ RegionStoreManager::bindArray(RegionBindingsConstRef B,
const ArrayType *AT =cast<ArrayType>(Ctx.getCanonicalType(R->getValueType()));
QualType ElementTy = AT->getElementType();
- Optional<uint64_t> Size;
+ std::optional<uint64_t> Size;
if (const ConstantArrayType* CAT = dyn_cast<ConstantArrayType>(AT))
Size = CAT->getSize().getZExtValue();
@@ -2200,14 +2470,20 @@ RegionStoreManager::bindArray(RegionBindingsConstRef B,
// Check if the init expr is a literal. If so, bind the rvalue instead.
// FIXME: It's not responsibility of the Store to transform this lvalue
// to rvalue. ExprEngine or maybe even CFG should do this before binding.
- if (Optional<loc::MemRegionVal> MRV = Init.getAs<loc::MemRegionVal>()) {
+ if (std::optional<loc::MemRegionVal> MRV = Init.getAs<loc::MemRegionVal>()) {
SVal V = getBinding(B.asStore(), *MRV, R->getValueType());
return bindAggregate(B, R, V);
}
// Handle lazy compound values.
- if (Init.getAs<nonloc::LazyCompoundVal>())
+ if (std::optional<nonloc::LazyCompoundVal> LCV =
+ Init.getAs<nonloc::LazyCompoundVal>()) {
+ if (std::optional<RegionBindingsRef> NewB =
+ tryBindSmallArray(B, R, AT, *LCV))
+ return *NewB;
+
return bindAggregate(B, R, Init);
+ }
if (Init.isUnknown())
return bindAggregate(B, R, UnknownVal());
@@ -2219,12 +2495,12 @@ RegionStoreManager::bindArray(RegionBindingsConstRef B,
RegionBindingsRef NewB(B);
- for (; Size.hasValue() ? i < Size.getValue() : true ; ++i, ++VI) {
+ for (; Size ? i < *Size : true; ++i, ++VI) {
// The init list might be shorter than the array length.
if (VI == VE)
break;
- const NonLoc &Idx = svalBuilder.makeArrayIndex(i);
+ NonLoc Idx = svalBuilder.makeArrayIndex(i);
const ElementRegion *ER = MRMgr.getElementRegion(ElementTy, Idx, R, Ctx);
if (ElementTy->isStructureOrClassType())
@@ -2238,7 +2514,7 @@ RegionStoreManager::bindArray(RegionBindingsConstRef B,
// If the init list is shorter than the array length (or the array has
// variable length), set the array default value. Values that are already set
// are not overwritten.
- if (!Size.hasValue() || i < Size.getValue())
+ if (!Size || i < *Size)
NewB = setImplicitDefaultValue(NewB, R, ElementTy);
return NewB;
@@ -2251,13 +2527,13 @@ RegionBindingsRef RegionStoreManager::bindVector(RegionBindingsConstRef B,
const VectorType *VT = T->castAs<VectorType>(); // Use castAs for typedefs.
// Handle lazy compound values and symbolic values.
- if (V.getAs<nonloc::LazyCompoundVal>() || V.getAs<nonloc::SymbolVal>())
+ if (isa<nonloc::LazyCompoundVal, nonloc::SymbolVal>(V))
return bindAggregate(B, R, V);
// We may get non-CompoundVal accidentally due to imprecise cast logic or
// that we are binding symbolic struct value. Kill the field values, and if
// the value is symbolic go and bind it as a "default" binding.
- if (!V.getAs<nonloc::CompoundVal>()) {
+ if (!isa<nonloc::CompoundVal>(V)) {
return bindAggregate(B, R, UnknownVal());
}
@@ -2284,16 +2560,14 @@ RegionBindingsRef RegionStoreManager::bindVector(RegionBindingsConstRef B,
return NewB;
}
-Optional<RegionBindingsRef>
-RegionStoreManager::tryBindSmallStruct(RegionBindingsConstRef B,
- const TypedValueRegion *R,
- const RecordDecl *RD,
- nonloc::LazyCompoundVal LCV) {
+std::optional<RegionBindingsRef> RegionStoreManager::tryBindSmallStruct(
+ RegionBindingsConstRef B, const TypedValueRegion *R, const RecordDecl *RD,
+ nonloc::LazyCompoundVal LCV) {
FieldVector Fields;
if (const CXXRecordDecl *Class = dyn_cast<CXXRecordDecl>(RD))
if (Class->getNumBases() != 0 || Class->getNumVBases() != 0)
- return None;
+ return std::nullopt;
for (const auto *FD : RD->fields()) {
if (FD->isUnnamedBitfield())
@@ -2302,22 +2576,28 @@ RegionStoreManager::tryBindSmallStruct(RegionBindingsConstRef B,
// If there are too many fields, or if any of the fields are aggregates,
// just use the LCV as a default binding.
if (Fields.size() == SmallStructLimit)
- return None;
+ return std::nullopt;
QualType Ty = FD->getType();
+
+ // Zero length arrays are basically no-ops, so we also ignore them here.
+ if (Ty->isConstantArrayType() &&
+ Ctx.getConstantArrayElementCount(Ctx.getAsConstantArrayType(Ty)) == 0)
+ continue;
+
if (!(Ty->isScalarType() || Ty->isReferenceType()))
- return None;
+ return std::nullopt;
Fields.push_back(FD);
}
RegionBindingsRef NewB = B;
- for (FieldVector::iterator I = Fields.begin(), E = Fields.end(); I != E; ++I){
- const FieldRegion *SourceFR = MRMgr.getFieldRegion(*I, LCV.getRegion());
+ for (const FieldDecl *Field : Fields) {
+ const FieldRegion *SourceFR = MRMgr.getFieldRegion(Field, LCV.getRegion());
SVal V = getBindingForField(getRegionBindings(LCV.getStore()), SourceFR);
- const FieldRegion *DestFR = MRMgr.getFieldRegion(*I, R);
+ const FieldRegion *DestFR = MRMgr.getFieldRegion(Field, R);
NewB = bind(NewB, loc::MemRegionVal(DestFR), V);
}
@@ -2325,11 +2605,8 @@ RegionStoreManager::tryBindSmallStruct(RegionBindingsConstRef B,
}
RegionBindingsRef RegionStoreManager::bindStruct(RegionBindingsConstRef B,
- const TypedValueRegion* R,
+ const TypedValueRegion *R,
SVal V) {
- if (!Features.supportsFields())
- return B;
-
QualType T = R->getValueType();
assert(T->isStructureOrClassType());
@@ -2340,19 +2617,20 @@ RegionBindingsRef RegionStoreManager::bindStruct(RegionBindingsConstRef B,
return B;
// Handle lazy compound values and symbolic values.
- if (Optional<nonloc::LazyCompoundVal> LCV =
- V.getAs<nonloc::LazyCompoundVal>()) {
- if (Optional<RegionBindingsRef> NewB = tryBindSmallStruct(B, R, RD, *LCV))
+ if (std::optional<nonloc::LazyCompoundVal> LCV =
+ V.getAs<nonloc::LazyCompoundVal>()) {
+ if (std::optional<RegionBindingsRef> NewB =
+ tryBindSmallStruct(B, R, RD, *LCV))
return *NewB;
return bindAggregate(B, R, V);
}
- if (V.getAs<nonloc::SymbolVal>())
+ if (isa<nonloc::SymbolVal>(V))
return bindAggregate(B, R, V);
// We may get non-CompoundVal accidentally due to imprecise cast logic or
// that we are binding symbolic struct value. Kill the field values, and if
// the value is symbolic go and bind it as a "default" binding.
- if (V.isUnknown() || !V.getAs<nonloc::CompoundVal>())
+ if (V.isUnknown() || !isa<nonloc::CompoundVal>(V))
return bindAggregate(B, R, UnknownVal());
// The raw CompoundVal is essentially a symbolic InitListExpr: an (immutable)
@@ -2535,25 +2813,26 @@ void RemoveDeadBindingsWorker::VisitCluster(const MemRegion *baseR,
if (const SymbolicRegion *SymR = dyn_cast<SymbolicRegion>(baseR))
SymReaper.markLive(SymR->getSymbol());
- for (ClusterBindings::iterator I = C->begin(), E = C->end(); I != E; ++I) {
+ for (const auto &[Key, Val] : *C) {
// Element index of a binding key is live.
- SymReaper.markElementIndicesLive(I.getKey().getRegion());
+ SymReaper.markElementIndicesLive(Key.getRegion());
- VisitBinding(I.getData());
+ VisitBinding(Val);
}
}
void RemoveDeadBindingsWorker::VisitBinding(SVal V) {
- // Is it a LazyCompoundVal? All referenced regions are live as well.
- if (Optional<nonloc::LazyCompoundVal> LCS =
- V.getAs<nonloc::LazyCompoundVal>()) {
-
- const RegionStoreManager::SValListTy &Vals = RM.getInterestingValues(*LCS);
-
- for (RegionStoreManager::SValListTy::const_iterator I = Vals.begin(),
- E = Vals.end();
- I != E; ++I)
- VisitBinding(*I);
+ // Is it a LazyCompoundVal? All referenced regions are live as well.
+ // The LazyCompoundVal itself is not live but should be readable.
+ if (auto LCS = V.getAs<nonloc::LazyCompoundVal>()) {
+ SymReaper.markLazilyCopied(LCS->getRegion());
+
+ for (SVal V : RM.getInterestingValues(*LCS)) {
+ if (auto DepLCS = V.getAs<nonloc::LazyCompoundVal>())
+ SymReaper.markLazilyCopied(DepLCS->getRegion());
+ else
+ VisitBinding(V);
+ }
return;
}
@@ -2565,17 +2844,15 @@ void RemoveDeadBindingsWorker::VisitBinding(SVal V) {
// All regions captured by a block are also live.
if (const BlockDataRegion *BR = dyn_cast<BlockDataRegion>(R)) {
- BlockDataRegion::referenced_vars_iterator I = BR->referenced_vars_begin(),
- E = BR->referenced_vars_end();
- for ( ; I != E; ++I)
- AddToWorkList(I.getCapturedRegion());
+ for (auto Var : BR->referenced_vars())
+ AddToWorkList(Var.getCapturedRegion());
}
}
// Update the set of live symbols.
- for (auto SI = V.symbol_begin(), SE = V.symbol_end(); SI!=SE; ++SI)
- SymReaper.markLive(*SI);
+ for (SymbolRef Sym : V.symbols())
+ SymReaper.markLive(Sym);
}
bool RemoveDeadBindingsWorker::UpdatePostponed() {
@@ -2583,12 +2860,10 @@ bool RemoveDeadBindingsWorker::UpdatePostponed() {
// having done a scan.
bool Changed = false;
- for (auto I = Postponed.begin(), E = Postponed.end(); I != E; ++I) {
- if (const SymbolicRegion *SR = *I) {
- if (SymReaper.isLive(SR->getSymbol())) {
- Changed |= AddToWorkList(SR);
- *I = nullptr;
- }
+ for (const SymbolicRegion *SR : Postponed) {
+ if (SymReaper.isLive(SR->getSymbol())) {
+ Changed |= AddToWorkList(SR);
+ SR = nullptr;
}
}
@@ -2603,9 +2878,8 @@ StoreRef RegionStoreManager::removeDeadBindings(Store store,
W.GenerateClusters();
// Enqueue the region roots onto the worklist.
- for (SymbolReaper::region_iterator I = SymReaper.region_begin(),
- E = SymReaper.region_end(); I != E; ++I) {
- W.AddToWorkList(*I);
+ for (const MemRegion *Reg : SymReaper.regions()) {
+ W.AddToWorkList(Reg);
}
do W.RunWorkList(); while (W.UpdatePostponed());
@@ -2613,9 +2887,7 @@ StoreRef RegionStoreManager::removeDeadBindings(Store store,
// We have now scanned the store, marking reachable regions and symbols
// as live. We now remove all the regions that are dead from the store
// as well as update DSymbols with the set symbols that are now dead.
- for (RegionBindingsRef::iterator I = B.begin(), E = B.end(); I != E; ++I) {
- const MemRegion *Base = I.getKey();
-
+ for (const MemRegion *Base : llvm::make_first_range(B)) {
// If the cluster has been visited, we know the region has been marked.
// Otherwise, remove the dead entry.
if (!W.isVisited(Base))
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SMTConstraintManager.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SMTConstraintManager.cpp
index 7395622a659c..04165a443fff 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SMTConstraintManager.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SMTConstraintManager.cpp
@@ -1,9 +1,8 @@
//== SMTConstraintManager.cpp -----------------------------------*- C++ -*--==//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
index b459b5adb511..eb9cde5c8918 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
@@ -19,25 +19,25 @@
#include "clang/AST/ExprObjC.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/Type.h"
-#include "clang/Basic/LLVM.h"
#include "clang/Analysis/AnalysisDeclContext.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "clang/Basic/LLVM.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/APSIntType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState_Fwd.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SValVisitor.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/Store.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymExpr.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
#include "llvm/ADT/APSInt.h"
-#include "llvm/ADT/None.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include <cassert>
+#include <optional>
#include <tuple>
using namespace clang;
@@ -49,9 +49,19 @@ using namespace ento;
void SValBuilder::anchor() {}
+SValBuilder::SValBuilder(llvm::BumpPtrAllocator &alloc, ASTContext &context,
+ ProgramStateManager &stateMgr)
+ : Context(context), BasicVals(context, alloc),
+ SymMgr(context, BasicVals, alloc), MemMgr(context, alloc),
+ StateMgr(stateMgr),
+ AnOpts(
+ stateMgr.getOwningEngine().getAnalysisManager().getAnalyzerOptions()),
+ ArrayIndexTy(context.LongLongTy),
+ ArrayIndexWidth(context.getTypeSize(ArrayIndexTy)) {}
+
DefinedOrUnknownSVal SValBuilder::makeZeroVal(QualType type) {
if (Loc::isLocType(type))
- return makeNull();
+ return makeNullWithType(type);
if (type->isIntegralOrEnumerationType())
return makeIntVal(0, type);
@@ -64,8 +74,10 @@ DefinedOrUnknownSVal SValBuilder::makeZeroVal(QualType type) {
return UnknownVal();
}
-NonLoc SValBuilder::makeNonLoc(const SymExpr *lhs, BinaryOperator::Opcode op,
- const llvm::APSInt& rhs, QualType type) {
+nonloc::SymbolVal SValBuilder::makeNonLoc(const SymExpr *lhs,
+ BinaryOperator::Opcode op,
+ const llvm::APSInt &rhs,
+ QualType type) {
// The Environment ensures we always get a persistent APSInt in
// BasicValueFactory, so we don't need to get the APSInt from
// BasicValueFactory again.
@@ -74,25 +86,35 @@ NonLoc SValBuilder::makeNonLoc(const SymExpr *lhs, BinaryOperator::Opcode op,
return nonloc::SymbolVal(SymMgr.getSymIntExpr(lhs, op, rhs, type));
}
-NonLoc SValBuilder::makeNonLoc(const llvm::APSInt& lhs,
- BinaryOperator::Opcode op, const SymExpr *rhs,
- QualType type) {
+nonloc::SymbolVal SValBuilder::makeNonLoc(const llvm::APSInt &lhs,
+ BinaryOperator::Opcode op,
+ const SymExpr *rhs, QualType type) {
assert(rhs);
assert(!Loc::isLocType(type));
return nonloc::SymbolVal(SymMgr.getIntSymExpr(lhs, op, rhs, type));
}
-NonLoc SValBuilder::makeNonLoc(const SymExpr *lhs, BinaryOperator::Opcode op,
- const SymExpr *rhs, QualType type) {
+nonloc::SymbolVal SValBuilder::makeNonLoc(const SymExpr *lhs,
+ BinaryOperator::Opcode op,
+ const SymExpr *rhs, QualType type) {
assert(lhs && rhs);
assert(!Loc::isLocType(type));
return nonloc::SymbolVal(SymMgr.getSymSymExpr(lhs, op, rhs, type));
}
-NonLoc SValBuilder::makeNonLoc(const SymExpr *operand,
- QualType fromTy, QualType toTy) {
+NonLoc SValBuilder::makeNonLoc(const SymExpr *operand, UnaryOperator::Opcode op,
+ QualType type) {
+ assert(operand);
+ assert(!Loc::isLocType(type));
+ return nonloc::SymbolVal(SymMgr.getUnarySymExpr(operand, op, type));
+}
+
+nonloc::SymbolVal SValBuilder::makeNonLoc(const SymExpr *operand,
+ QualType fromTy, QualType toTy) {
assert(operand);
assert(!Loc::isLocType(toTy));
+ if (fromTy == toTy)
+ return nonloc::SymbolVal(operand);
return nonloc::SymbolVal(SymMgr.getCastSymbol(operand, fromTy, toTy));
}
@@ -101,7 +123,8 @@ SVal SValBuilder::convertToArrayIndex(SVal val) {
return val;
// Common case: we have an appropriately sized integer.
- if (Optional<nonloc::ConcreteInt> CI = val.getAs<nonloc::ConcreteInt>()) {
+ if (std::optional<nonloc::ConcreteInt> CI =
+ val.getAs<nonloc::ConcreteInt>()) {
const llvm::APSInt& I = CI->getValue();
if (I.getBitWidth() == ArrayIndexWidth && I.isSigned())
return val;
@@ -208,6 +231,14 @@ SValBuilder::getConjuredHeapSymbolVal(const Expr *E,
return loc::MemRegionVal(MemMgr.getSymbolicHeapRegion(sym));
}
+loc::MemRegionVal SValBuilder::getAllocaRegionVal(const Expr *E,
+ const LocationContext *LCtx,
+ unsigned VisitCount) {
+ const AllocaRegion *R =
+ getRegionManager().getAllocaRegion(E, VisitCount, LCtx);
+ return loc::MemRegionVal(R);
+}
+
DefinedSVal SValBuilder::getMetadataSymbolVal(const void *symbolTag,
const MemRegion *region,
const Expr *expr, QualType type,
@@ -244,8 +275,7 @@ SValBuilder::getDerivedRegionValueSymbolVal(SymbolRef parentSymbol,
}
DefinedSVal SValBuilder::getMemberPointer(const NamedDecl *ND) {
- assert(!ND || isa<CXXMethodDecl>(ND) || isa<FieldDecl>(ND) ||
- isa<IndirectFieldDecl>(ND));
+ assert(!ND || (isa<CXXMethodDecl, FieldDecl, IndirectFieldDecl>(ND)));
if (const auto *MD = dyn_cast_or_null<CXXMethodDecl>(ND)) {
// Sema treats pointers to static member functions as have function pointer
@@ -253,7 +283,7 @@ DefinedSVal SValBuilder::getMemberPointer(const NamedDecl *ND) {
// We don't need to play a similar trick for static member fields
// because these are represented as plain VarDecls and not FieldDecls
// in the AST.
- if (MD->isStatic())
+ if (!MD->isImplicitObjectMemberFunction())
return getFunctionPointer(MD);
}
@@ -275,11 +305,11 @@ DefinedSVal SValBuilder::getBlockPointer(const BlockDecl *block,
return loc::MemRegionVal(BD);
}
-Optional<loc::MemRegionVal>
+std::optional<loc::MemRegionVal>
SValBuilder::getCastedMemRegionVal(const MemRegion *R, QualType Ty) {
if (auto OptR = StateMgr.getStoreManager().castRegion(R, Ty))
return loc::MemRegionVal(*OptR);
- return None;
+ return std::nullopt;
}
/// Return a memory region for the 'this' object reference.
@@ -297,7 +327,7 @@ loc::MemRegionVal SValBuilder::getCXXThis(const CXXRecordDecl *D,
return loc::MemRegionVal(getRegionManager().getCXXThisRegion(PT, SFC));
}
-Optional<SVal> SValBuilder::getConstantVal(const Expr *E) {
+std::optional<SVal> SValBuilder::getConstantVal(const Expr *E) {
E = E->IgnoreParens();
switch (E->getStmtClass()) {
@@ -350,7 +380,7 @@ Optional<SVal> SValBuilder::getConstantVal(const Expr *E) {
return makeBoolVal(cast<ObjCBoolLiteralExpr>(E));
case Stmt::CXXNullPtrLiteralExprClass:
- return makeNull();
+ return makeNullWithType(E->getType());
case Stmt::CStyleCastExprClass:
case Stmt::CXXFunctionalCastExprClass:
@@ -367,21 +397,20 @@ Optional<SVal> SValBuilder::getConstantVal(const Expr *E) {
case CK_NoOp:
case CK_BitCast: {
const Expr *SE = CE->getSubExpr();
- Optional<SVal> Val = getConstantVal(SE);
+ std::optional<SVal> Val = getConstantVal(SE);
if (!Val)
- return None;
+ return std::nullopt;
return evalCast(*Val, CE->getType(), SE->getType());
}
}
- // FALLTHROUGH
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
}
// If we don't have a special case, fall back to the AST's constant evaluator.
default: {
// Don't try to come up with a value for materialized temporaries.
if (E->isGLValue())
- return None;
+ return std::nullopt;
ASTContext &Ctx = getContext();
Expr::EvalResult Result;
@@ -390,9 +419,9 @@ Optional<SVal> SValBuilder::getConstantVal(const Expr *E) {
if (Loc::isLocType(E->getType()))
if (E->isNullPointerConstant(Ctx, Expr::NPC_ValueDependentIsNotNull))
- return makeNull();
+ return makeNullWithType(E->getType());
- return None;
+ return std::nullopt;
}
}
}
@@ -405,25 +434,62 @@ SVal SValBuilder::makeSymExprValNN(BinaryOperator::Opcode Op,
// TODO: When the Max Complexity is reached, we should conjure a symbol
// instead of generating an Unknown value and propagate the taint info to it.
- const unsigned MaxComp = StateMgr.getOwningEngine()
- .getAnalysisManager()
- .options.MaxSymbolComplexity;
+ const unsigned MaxComp = AnOpts.MaxSymbolComplexity;
if (symLHS && symRHS &&
(symLHS->computeComplexity() + symRHS->computeComplexity()) < MaxComp)
return makeNonLoc(symLHS, Op, symRHS, ResultTy);
if (symLHS && symLHS->computeComplexity() < MaxComp)
- if (Optional<nonloc::ConcreteInt> rInt = RHS.getAs<nonloc::ConcreteInt>())
+ if (std::optional<nonloc::ConcreteInt> rInt =
+ RHS.getAs<nonloc::ConcreteInt>())
return makeNonLoc(symLHS, Op, rInt->getValue(), ResultTy);
if (symRHS && symRHS->computeComplexity() < MaxComp)
- if (Optional<nonloc::ConcreteInt> lInt = LHS.getAs<nonloc::ConcreteInt>())
+ if (std::optional<nonloc::ConcreteInt> lInt =
+ LHS.getAs<nonloc::ConcreteInt>())
return makeNonLoc(lInt->getValue(), Op, symRHS, ResultTy);
return UnknownVal();
}
+SVal SValBuilder::evalMinus(NonLoc X) {
+ switch (X.getKind()) {
+ case nonloc::ConcreteIntKind:
+ return makeIntVal(-X.castAs<nonloc::ConcreteInt>().getValue());
+ case nonloc::SymbolValKind:
+ return makeNonLoc(X.castAs<nonloc::SymbolVal>().getSymbol(), UO_Minus,
+ X.getType(Context));
+ default:
+ return UnknownVal();
+ }
+}
+
+SVal SValBuilder::evalComplement(NonLoc X) {
+ switch (X.getKind()) {
+ case nonloc::ConcreteIntKind:
+ return makeIntVal(~X.castAs<nonloc::ConcreteInt>().getValue());
+ case nonloc::SymbolValKind:
+ return makeNonLoc(X.castAs<nonloc::SymbolVal>().getSymbol(), UO_Not,
+ X.getType(Context));
+ default:
+ return UnknownVal();
+ }
+}
+
+SVal SValBuilder::evalUnaryOp(ProgramStateRef state, UnaryOperator::Opcode opc,
+ SVal operand, QualType type) {
+ auto OpN = operand.getAs<NonLoc>();
+ if (!OpN)
+ return UnknownVal();
+
+ if (opc == UO_Minus)
+ return evalMinus(*OpN);
+ if (opc == UO_Not)
+ return evalComplement(*OpN);
+ llvm_unreachable("Unexpected unary operator");
+}
+
SVal SValBuilder::evalBinOp(ProgramStateRef state, BinaryOperator::Opcode op,
SVal lhs, SVal rhs, QualType type) {
if (lhs.isUndef() || rhs.isUndef())
@@ -432,8 +498,7 @@ SVal SValBuilder::evalBinOp(ProgramStateRef state, BinaryOperator::Opcode op,
if (lhs.isUnknown() || rhs.isUnknown())
return UnknownVal();
- if (lhs.getAs<nonloc::LazyCompoundVal>() ||
- rhs.getAs<nonloc::LazyCompoundVal>()) {
+ if (isa<nonloc::LazyCompoundVal>(lhs) || isa<nonloc::LazyCompoundVal>(rhs)) {
return UnknownVal();
}
@@ -445,20 +510,30 @@ SVal SValBuilder::evalBinOp(ProgramStateRef state, BinaryOperator::Opcode op,
return UnknownVal();
}
- if (Optional<Loc> LV = lhs.getAs<Loc>()) {
- if (Optional<Loc> RV = rhs.getAs<Loc>())
+ if (std::optional<Loc> LV = lhs.getAs<Loc>()) {
+ if (std::optional<Loc> RV = rhs.getAs<Loc>())
return evalBinOpLL(state, op, *LV, *RV, type);
return evalBinOpLN(state, op, *LV, rhs.castAs<NonLoc>(), type);
}
- if (Optional<Loc> RV = rhs.getAs<Loc>()) {
- // Support pointer arithmetic where the addend is on the left
- // and the pointer on the right.
- assert(op == BO_Add);
+ if (const std::optional<Loc> RV = rhs.getAs<Loc>()) {
+ const auto IsCommutative = [](BinaryOperatorKind Op) {
+ return Op == BO_Mul || Op == BO_Add || Op == BO_And || Op == BO_Xor ||
+ Op == BO_Or;
+ };
+
+ if (IsCommutative(op)) {
+ // Swap operands.
+ return evalBinOpLN(state, op, *RV, lhs.castAs<NonLoc>(), type);
+ }
- // Commute the operands.
- return evalBinOpLN(state, op, *RV, lhs.castAs<NonLoc>(), type);
+ // If the right operand is a concrete int location then we have nothing
+ // better but to treat it as a simple nonloc.
+ if (auto RV = rhs.getAs<loc::ConcreteInt>()) {
+ const nonloc::ConcreteInt RhsAsLoc = makeIntVal(RV->getValue());
+ return evalBinOpNN(state, op, lhs.castAs<NonLoc>(), RhsAsLoc, type);
+ }
}
return evalBinOpNN(state, op, lhs.castAs<NonLoc>(), rhs.castAs<NonLoc>(),
@@ -531,11 +606,9 @@ SVal SValBuilder::evalIntegralCast(ProgramStateRef state, SVal val,
APSIntType ToType(getContext().getTypeSize(castTy),
castTy->isUnsignedIntegerType());
llvm::APSInt ToTypeMax = ToType.getMaxValue();
- NonLoc ToTypeMaxVal =
- makeIntVal(ToTypeMax.isUnsigned() ? ToTypeMax.getZExtValue()
- : ToTypeMax.getSExtValue(),
- castTy)
- .castAs<NonLoc>();
+
+ NonLoc ToTypeMaxVal = makeIntVal(ToTypeMax);
+
// Check the range of the symbol being casted against the maximum value of the
// target type.
NonLoc FromVal = val.castAs<NonLoc>();
@@ -546,448 +619,489 @@ SVal SValBuilder::evalIntegralCast(ProgramStateRef state, SVal val,
std::tie(IsNotTruncated, IsTruncated) = state->assume(CompVal);
if (!IsNotTruncated && IsTruncated) {
// Symbol is truncated so we evaluate it as a cast.
- NonLoc CastVal = makeNonLoc(se, originalTy, castTy);
- return CastVal;
+ return makeNonLoc(se, originalTy, castTy);
}
return evalCast(val, castTy, originalTy);
}
//===----------------------------------------------------------------------===//
-// Cast methods.
-// `evalCast` is the main method
-// `evalCastKind` and `evalCastSubKind` are helpers
+// Cast method.
+// `evalCast` and its helper `EvalCastVisitor`
//===----------------------------------------------------------------------===//
-/// Cast a given SVal to another SVal using given QualType's.
-/// \param V -- SVal that should be casted.
-/// \param CastTy -- QualType that V should be casted according to.
-/// \param OriginalTy -- QualType which is associated to V. It provides
-/// additional information about what type the cast performs from.
-/// \returns the most appropriate casted SVal.
-/// Note: Many cases don't use an exact OriginalTy. It can be extracted
-/// from SVal or the cast can performs unconditionaly. Always pass OriginalTy!
-/// It can be crucial in certain cases and generates different results.
-/// FIXME: If `OriginalTy.isNull()` is true, then cast performs based on CastTy
-/// only. This behavior is uncertain and should be improved.
-SVal SValBuilder::evalCast(SVal V, QualType CastTy, QualType OriginalTy) {
- if (CastTy.isNull())
- return V;
-
- CastTy = Context.getCanonicalType(CastTy);
+namespace {
+class EvalCastVisitor : public SValVisitor<EvalCastVisitor, SVal> {
+private:
+ SValBuilder &VB;
+ ASTContext &Context;
+ QualType CastTy, OriginalTy;
- const bool IsUnknownOriginalType = OriginalTy.isNull();
- if (!IsUnknownOriginalType) {
- OriginalTy = Context.getCanonicalType(OriginalTy);
+public:
+ EvalCastVisitor(SValBuilder &VB, QualType CastTy, QualType OriginalTy)
+ : VB(VB), Context(VB.getContext()), CastTy(CastTy),
+ OriginalTy(OriginalTy) {}
- if (CastTy == OriginalTy)
+ SVal Visit(SVal V) {
+ if (CastTy.isNull())
return V;
- // FIXME: Move this check to the most appropriate
- // evalCastKind/evalCastSubKind function. For const casts, casts to void,
- // just propagate the value.
- if (!CastTy->isVariableArrayType() && !OriginalTy->isVariableArrayType())
- if (shouldBeModeledWithNoOp(Context, Context.getPointerType(CastTy),
- Context.getPointerType(OriginalTy)))
- return V;
- }
-
- // Cast SVal according to kinds.
- switch (V.getBaseKind()) {
- case SVal::UndefinedValKind:
- return evalCastKind(V.castAs<UndefinedVal>(), CastTy, OriginalTy);
- case SVal::UnknownValKind:
- return evalCastKind(V.castAs<UnknownVal>(), CastTy, OriginalTy);
- case SVal::LocKind:
- return evalCastKind(V.castAs<Loc>(), CastTy, OriginalTy);
- case SVal::NonLocKind:
- return evalCastKind(V.castAs<NonLoc>(), CastTy, OriginalTy);
- }
-
- llvm_unreachable("Unknown SVal kind");
-}
+ CastTy = Context.getCanonicalType(CastTy);
-SVal SValBuilder::evalCastKind(UndefinedVal V, QualType CastTy,
- QualType OriginalTy) {
- return V;
-}
+ const bool IsUnknownOriginalType = OriginalTy.isNull();
+ if (!IsUnknownOriginalType) {
+ OriginalTy = Context.getCanonicalType(OriginalTy);
-SVal SValBuilder::evalCastKind(UnknownVal V, QualType CastTy,
- QualType OriginalTy) {
- return V;
-}
+ if (CastTy == OriginalTy)
+ return V;
-SVal SValBuilder::evalCastKind(Loc V, QualType CastTy, QualType OriginalTy) {
- switch (V.getSubKind()) {
- case loc::ConcreteIntKind:
- return evalCastSubKind(V.castAs<loc::ConcreteInt>(), CastTy, OriginalTy);
- case loc::GotoLabelKind:
- return evalCastSubKind(V.castAs<loc::GotoLabel>(), CastTy, OriginalTy);
- case loc::MemRegionValKind:
- return evalCastSubKind(V.castAs<loc::MemRegionVal>(), CastTy, OriginalTy);
+ // FIXME: Move this check to the most appropriate
+ // evalCastKind/evalCastSubKind function. For const casts, casts to void,
+ // just propagate the value.
+ if (!CastTy->isVariableArrayType() && !OriginalTy->isVariableArrayType())
+ if (shouldBeModeledWithNoOp(Context, Context.getPointerType(CastTy),
+ Context.getPointerType(OriginalTy)))
+ return V;
+ }
+ return SValVisitor::Visit(V);
}
+ SVal VisitUndefinedVal(UndefinedVal V) { return V; }
+ SVal VisitUnknownVal(UnknownVal V) { return V; }
+ SVal VisitConcreteInt(loc::ConcreteInt V) {
+ // Pointer to bool.
+ if (CastTy->isBooleanType())
+ return VB.makeTruthVal(V.getValue().getBoolValue(), CastTy);
+
+ // Pointer to integer.
+ if (CastTy->isIntegralOrEnumerationType()) {
+ llvm::APSInt Value = V.getValue();
+ VB.getBasicValueFactory().getAPSIntType(CastTy).apply(Value);
+ return VB.makeIntVal(Value);
+ }
- llvm_unreachable("Unknown SVal kind");
-}
+ // Pointer to any pointer.
+ if (Loc::isLocType(CastTy)) {
+ llvm::APSInt Value = V.getValue();
+ VB.getBasicValueFactory().getAPSIntType(CastTy).apply(Value);
+ return loc::ConcreteInt(VB.getBasicValueFactory().getValue(Value));
+ }
-SVal SValBuilder::evalCastKind(NonLoc V, QualType CastTy, QualType OriginalTy) {
- switch (V.getSubKind()) {
- case nonloc::CompoundValKind:
- return evalCastSubKind(V.castAs<nonloc::CompoundVal>(), CastTy, OriginalTy);
- case nonloc::ConcreteIntKind:
- return evalCastSubKind(V.castAs<nonloc::ConcreteInt>(), CastTy, OriginalTy);
- case nonloc::LazyCompoundValKind:
- return evalCastSubKind(V.castAs<nonloc::LazyCompoundVal>(), CastTy,
- OriginalTy);
- case nonloc::LocAsIntegerKind:
- return evalCastSubKind(V.castAs<nonloc::LocAsInteger>(), CastTy,
- OriginalTy);
- case nonloc::SymbolValKind:
- return evalCastSubKind(V.castAs<nonloc::SymbolVal>(), CastTy, OriginalTy);
- case nonloc::PointerToMemberKind:
- return evalCastSubKind(V.castAs<nonloc::PointerToMember>(), CastTy,
- OriginalTy);
+ // Pointer to whatever else.
+ return UnknownVal();
}
+ SVal VisitGotoLabel(loc::GotoLabel V) {
+ // Pointer to bool.
+ if (CastTy->isBooleanType())
+ // Labels are always true.
+ return VB.makeTruthVal(true, CastTy);
+
+ // Pointer to integer.
+ if (CastTy->isIntegralOrEnumerationType()) {
+ const unsigned BitWidth = Context.getIntWidth(CastTy);
+ return VB.makeLocAsInteger(V, BitWidth);
+ }
- llvm_unreachable("Unknown SVal kind");
-}
+ const bool IsUnknownOriginalType = OriginalTy.isNull();
+ if (!IsUnknownOriginalType) {
+ // Array to pointer.
+ if (isa<ArrayType>(OriginalTy))
+ if (CastTy->isPointerType() || CastTy->isReferenceType())
+ return UnknownVal();
+ }
-SVal SValBuilder::evalCastSubKind(loc::ConcreteInt V, QualType CastTy,
- QualType OriginalTy) {
- // Pointer to bool.
- if (CastTy->isBooleanType())
- return makeTruthVal(V.getValue().getBoolValue(), CastTy);
+ // Pointer to any pointer.
+ if (Loc::isLocType(CastTy))
+ return V;
- // Pointer to integer.
- if (CastTy->isIntegralOrEnumerationType()) {
- llvm::APSInt Value = V.getValue();
- BasicVals.getAPSIntType(CastTy).apply(Value);
- return makeIntVal(Value);
+ // Pointer to whatever else.
+ return UnknownVal();
}
+ SVal VisitMemRegionVal(loc::MemRegionVal V) {
+ // Pointer to bool.
+ if (CastTy->isBooleanType()) {
+ const MemRegion *R = V.getRegion();
+ if (const FunctionCodeRegion *FTR = dyn_cast<FunctionCodeRegion>(R))
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(FTR->getDecl()))
+ if (FD->isWeak())
+ // FIXME: Currently we are using an extent symbol here,
+ // because there are no generic region address metadata
+ // symbols to use, only content metadata.
+ return nonloc::SymbolVal(
+ VB.getSymbolManager().getExtentSymbol(FTR));
+
+ if (const SymbolicRegion *SymR = R->getSymbolicBase()) {
+ SymbolRef Sym = SymR->getSymbol();
+ QualType Ty = Sym->getType();
+ // This change is needed for architectures with varying
+ // pointer widths. See the amdgcn opencl reproducer with
+ // this change as an example: solver-sym-simplification-ptr-bool.cl
+ if (!Ty->isReferenceType())
+ return VB.makeNonLoc(
+ Sym, BO_NE, VB.getBasicValueFactory().getZeroWithTypeSize(Ty),
+ CastTy);
+ }
+ // Non-symbolic memory regions are always true.
+ return VB.makeTruthVal(true, CastTy);
+ }
- // Pointer to any pointer.
- if (Loc::isLocType(CastTy))
- return V;
+ const bool IsUnknownOriginalType = OriginalTy.isNull();
+ // Try to cast to array
+ const auto *ArrayTy =
+ IsUnknownOriginalType
+ ? nullptr
+ : dyn_cast<ArrayType>(OriginalTy.getCanonicalType());
+
+ // Pointer to integer.
+ if (CastTy->isIntegralOrEnumerationType()) {
+ SVal Val = V;
+ // Array to integer.
+ if (ArrayTy) {
+ // We will always decay to a pointer.
+ QualType ElemTy = ArrayTy->getElementType();
+ Val = VB.getStateManager().ArrayToPointer(V, ElemTy);
+ // FIXME: Keep these here for now in case we decide soon that we
+ // need the original decayed type.
+ // QualType elemTy = cast<ArrayType>(originalTy)->getElementType();
+ // QualType pointerTy = C.getPointerType(elemTy);
+ }
+ const unsigned BitWidth = Context.getIntWidth(CastTy);
+ return VB.makeLocAsInteger(Val.castAs<Loc>(), BitWidth);
+ }
- // Pointer to whatever else.
- return UnknownVal();
-}
+ // Pointer to pointer.
+ if (Loc::isLocType(CastTy)) {
-SVal SValBuilder::evalCastSubKind(loc::GotoLabel V, QualType CastTy,
- QualType OriginalTy) {
- // Pointer to bool.
- if (CastTy->isBooleanType())
- // Labels are always true.
- return makeTruthVal(true, CastTy);
+ if (IsUnknownOriginalType) {
+ // When retrieving symbolic pointer and expecting a non-void pointer,
+ // wrap them into element regions of the expected type if necessary.
+ // It is necessary to make sure that the retrieved value makes sense,
+ // because there's no other cast in the AST that would tell us to cast
+ // it to the correct pointer type. We might need to do that for non-void
+ // pointers as well.
+ // FIXME: We really need a single good function to perform casts for us
+ // correctly every time we need it.
+ const MemRegion *R = V.getRegion();
+ if (CastTy->isPointerType() && !CastTy->isVoidPointerType()) {
+ if (const auto *SR = dyn_cast<SymbolicRegion>(R)) {
+ QualType SRTy = SR->getSymbol()->getType();
+
+ auto HasSameUnqualifiedPointeeType = [](QualType ty1,
+ QualType ty2) {
+ return ty1->getPointeeType().getCanonicalType().getTypePtr() ==
+ ty2->getPointeeType().getCanonicalType().getTypePtr();
+ };
+ if (!HasSameUnqualifiedPointeeType(SRTy, CastTy)) {
+ if (auto OptMemRegV = VB.getCastedMemRegionVal(SR, CastTy))
+ return *OptMemRegV;
+ }
+ }
+ }
+ // Next fixes pointer dereference using type different from its initial
+ // one. See PR37503 and PR49007 for details.
+ if (const auto *ER = dyn_cast<ElementRegion>(R)) {
+ if (auto OptMemRegV = VB.getCastedMemRegionVal(ER, CastTy))
+ return *OptMemRegV;
+ }
- // Pointer to integer.
- if (CastTy->isIntegralOrEnumerationType()) {
- const unsigned BitWidth = Context.getIntWidth(CastTy);
- return makeLocAsInteger(V, BitWidth);
- }
+ return V;
+ }
- const bool IsUnknownOriginalType = OriginalTy.isNull();
- if (!IsUnknownOriginalType) {
- // Array to pointer.
- if (isa<ArrayType>(OriginalTy))
- if (CastTy->isPointerType() || CastTy->isReferenceType())
- return UnknownVal();
- }
+ if (OriginalTy->isIntegralOrEnumerationType() ||
+ OriginalTy->isBlockPointerType() ||
+ OriginalTy->isFunctionPointerType())
+ return V;
- // Pointer to any pointer.
- if (Loc::isLocType(CastTy))
- return V;
+ // Array to pointer.
+ if (ArrayTy) {
+ // Are we casting from an array to a pointer? If so just pass on
+ // the decayed value.
+ if (CastTy->isPointerType() || CastTy->isReferenceType()) {
+ // We will always decay to a pointer.
+ QualType ElemTy = ArrayTy->getElementType();
+ return VB.getStateManager().ArrayToPointer(V, ElemTy);
+ }
+ // Are we casting from an array to an integer? If so, cast the decayed
+ // pointer value to an integer.
+ assert(CastTy->isIntegralOrEnumerationType());
+ }
- // Pointer to whatever else.
- return UnknownVal();
-}
+ // Other pointer to pointer.
+ assert(Loc::isLocType(OriginalTy) || OriginalTy->isFunctionType() ||
+ CastTy->isReferenceType());
-static bool hasSameUnqualifiedPointeeType(QualType ty1, QualType ty2) {
- return ty1->getPointeeType().getCanonicalType().getTypePtr() ==
- ty2->getPointeeType().getCanonicalType().getTypePtr();
-}
-
-SVal SValBuilder::evalCastSubKind(loc::MemRegionVal V, QualType CastTy,
- QualType OriginalTy) {
- // Pointer to bool.
- if (CastTy->isBooleanType()) {
- const MemRegion *R = V.getRegion();
- if (const FunctionCodeRegion *FTR = dyn_cast<FunctionCodeRegion>(R))
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(FTR->getDecl()))
- if (FD->isWeak())
- // FIXME: Currently we are using an extent symbol here,
- // because there are no generic region address metadata
- // symbols to use, only content metadata.
- return nonloc::SymbolVal(SymMgr.getExtentSymbol(FTR));
-
- if (const SymbolicRegion *SymR = R->getSymbolicBase()) {
- SymbolRef Sym = SymR->getSymbol();
- QualType Ty = Sym->getType();
- // This change is needed for architectures with varying
- // pointer widths. See the amdgcn opencl reproducer with
- // this change as an example: solver-sym-simplification-ptr-bool.cl
- // FIXME: We could encounter a reference here,
- // try returning a concrete 'true' since it might
- // be easier on the solver.
- // FIXME: Cleanup remainder of `getZeroWithPtrWidth ()`
- // and `getIntWithPtrWidth()` functions to prevent future
- // confusion
- const llvm::APSInt &Zero = Ty->isReferenceType()
- ? BasicVals.getZeroWithPtrWidth()
- : BasicVals.getZeroWithTypeSize(Ty);
- return makeNonLoc(Sym, BO_NE, Zero, CastTy);
+ // We get a symbolic function pointer for a dereference of a function
+ // pointer, but it is of function type. Example:
+
+ // struct FPRec {
+ // void (*my_func)(int * x);
+ // };
+ //
+ // int bar(int x);
+ //
+ // int f1_a(struct FPRec* foo) {
+ // int x;
+ // (*foo->my_func)(&x);
+ // return bar(x)+1; // no-warning
+ // }
+
+ // Get the result of casting a region to a different type.
+ const MemRegion *R = V.getRegion();
+ if (auto OptMemRegV = VB.getCastedMemRegionVal(R, CastTy))
+ return *OptMemRegV;
}
- // Non-symbolic memory regions are always true.
- return makeTruthVal(true, CastTy);
+
+ // Pointer to whatever else.
+ // FIXME: There can be gross cases where one casts the result of a
+ // function (that returns a pointer) to some other value that happens to
+ // fit within that pointer value. We currently have no good way to model
+ // such operations. When this happens, the underlying operation is that
+ // the caller is reasoning about bits. Conceptually we are layering a
+ // "view" of a location on top of those bits. Perhaps we need to be more
+ // lazy about mutual possible views, even on an SVal? This may be
+ // necessary for bit-level reasoning as well.
+ return UnknownVal();
}
+ SVal VisitCompoundVal(nonloc::CompoundVal V) {
+ // Compound to whatever.
+ return UnknownVal();
+ }
+ SVal VisitConcreteInt(nonloc::ConcreteInt V) {
+ auto CastedValue = [V, this]() {
+ llvm::APSInt Value = V.getValue();
+ VB.getBasicValueFactory().getAPSIntType(CastTy).apply(Value);
+ return Value;
+ };
+
+ // Integer to bool.
+ if (CastTy->isBooleanType())
+ return VB.makeTruthVal(V.getValue().getBoolValue(), CastTy);
+
+ // Integer to pointer.
+ if (CastTy->isIntegralOrEnumerationType())
+ return VB.makeIntVal(CastedValue());
- const bool IsUnknownOriginalType = OriginalTy.isNull();
- // Try to cast to array
- const auto *ArrayTy =
- IsUnknownOriginalType
- ? nullptr
- : dyn_cast<ArrayType>(OriginalTy.getCanonicalType());
-
- // Pointer to integer.
- if (CastTy->isIntegralOrEnumerationType()) {
- SVal Val = V;
- // Array to integer.
- if (ArrayTy) {
- // We will always decay to a pointer.
- QualType ElemTy = ArrayTy->getElementType();
- Val = StateMgr.ArrayToPointer(V, ElemTy);
- // FIXME: Keep these here for now in case we decide soon that we
- // need the original decayed type.
- // QualType elemTy = cast<ArrayType>(originalTy)->getElementType();
- // QualType pointerTy = C.getPointerType(elemTy);
- }
- const unsigned BitWidth = Context.getIntWidth(CastTy);
- return makeLocAsInteger(Val.castAs<Loc>(), BitWidth);
+ // Integer to pointer.
+ if (Loc::isLocType(CastTy))
+ return VB.makeIntLocVal(CastedValue());
+
+ // Pointer to whatever else.
+ return UnknownVal();
}
+ SVal VisitLazyCompoundVal(nonloc::LazyCompoundVal V) {
+ // LazyCompound to whatever.
+ return UnknownVal();
+ }
+ SVal VisitLocAsInteger(nonloc::LocAsInteger V) {
+ Loc L = V.getLoc();
+
+ // Pointer as integer to bool.
+ if (CastTy->isBooleanType())
+ // Pass to Loc function.
+ return Visit(L);
+
+ const bool IsUnknownOriginalType = OriginalTy.isNull();
+ // Pointer as integer to pointer.
+ if (!IsUnknownOriginalType && Loc::isLocType(CastTy) &&
+ OriginalTy->isIntegralOrEnumerationType()) {
+ if (const MemRegion *R = L.getAsRegion())
+ if (auto OptMemRegV = VB.getCastedMemRegionVal(R, CastTy))
+ return *OptMemRegV;
+ return L;
+ }
- // Pointer to pointer.
- if (Loc::isLocType(CastTy)) {
-
- if (IsUnknownOriginalType) {
- // When retrieving symbolic pointer and expecting a non-void pointer,
- // wrap them into element regions of the expected type if necessary.
- // It is necessary to make sure that the retrieved value makes sense,
- // because there's no other cast in the AST that would tell us to cast
- // it to the correct pointer type. We might need to do that for non-void
- // pointers as well.
- // FIXME: We really need a single good function to perform casts for us
- // correctly every time we need it.
- const MemRegion *R = V.getRegion();
- if (CastTy->isPointerType() && !CastTy->isVoidPointerType()) {
- if (const auto *SR = dyn_cast<SymbolicRegion>(R)) {
- QualType SRTy = SR->getSymbol()->getType();
- if (!hasSameUnqualifiedPointeeType(SRTy, CastTy)) {
- if (auto OptMemRegV = getCastedMemRegionVal(SR, CastTy))
- return *OptMemRegV;
- }
- }
- }
- // Next fixes pointer dereference using type different from its initial
- // one. See PR37503 and PR49007 for details.
- if (const auto *ER = dyn_cast<ElementRegion>(R)) {
- if (auto OptMemRegV = getCastedMemRegionVal(ER, CastTy))
+ // Pointer as integer with region to integer/pointer.
+ const MemRegion *R = L.getAsRegion();
+ if (!IsUnknownOriginalType && R) {
+ if (CastTy->isIntegralOrEnumerationType())
+ return VisitMemRegionVal(loc::MemRegionVal(R));
+
+ if (Loc::isLocType(CastTy)) {
+ assert(Loc::isLocType(OriginalTy) || OriginalTy->isFunctionType() ||
+ CastTy->isReferenceType());
+ // Delegate to store manager to get the result of casting a region to a
+ // different type. If the MemRegion* returned is NULL, this expression
+ // Evaluates to UnknownVal.
+ if (auto OptMemRegV = VB.getCastedMemRegionVal(R, CastTy))
return *OptMemRegV;
}
+ } else {
+ if (Loc::isLocType(CastTy)) {
+ if (IsUnknownOriginalType)
+ return VisitMemRegionVal(loc::MemRegionVal(R));
+ return L;
+ }
- return V;
- }
+ SymbolRef SE = nullptr;
+ if (R) {
+ if (const SymbolicRegion *SR =
+ dyn_cast<SymbolicRegion>(R->StripCasts())) {
+ SE = SR->getSymbol();
+ }
+ }
- if (OriginalTy->isIntegralOrEnumerationType() ||
- OriginalTy->isBlockPointerType() || OriginalTy->isFunctionPointerType())
- return V;
+ if (!CastTy->isFloatingType() || !SE || SE->getType()->isFloatingType()) {
+ // FIXME: Correctly support promotions/truncations.
+ const unsigned CastSize = Context.getIntWidth(CastTy);
+ if (CastSize == V.getNumBits())
+ return V;
- // Array to pointer.
- if (ArrayTy) {
- // Are we casting from an array to a pointer? If so just pass on
- // the decayed value.
- if (CastTy->isPointerType() || CastTy->isReferenceType()) {
- // We will always decay to a pointer.
- QualType ElemTy = ArrayTy->getElementType();
- return StateMgr.ArrayToPointer(V, ElemTy);
+ return VB.makeLocAsInteger(L, CastSize);
}
- // Are we casting from an array to an integer? If so, cast the decayed
- // pointer value to an integer.
- assert(CastTy->isIntegralOrEnumerationType());
}
- // Other pointer to pointer.
- assert(Loc::isLocType(OriginalTy) || OriginalTy->isFunctionType() ||
- CastTy->isReferenceType());
-
- // We get a symbolic function pointer for a dereference of a function
- // pointer, but it is of function type. Example:
-
- // struct FPRec {
- // void (*my_func)(int * x);
- // };
- //
- // int bar(int x);
- //
- // int f1_a(struct FPRec* foo) {
- // int x;
- // (*foo->my_func)(&x);
- // return bar(x)+1; // no-warning
- // }
-
- // Get the result of casting a region to a different type.
- const MemRegion *R = V.getRegion();
- if (auto OptMemRegV = getCastedMemRegionVal(R, CastTy))
- return *OptMemRegV;
+ // Pointer as integer to whatever else.
+ return UnknownVal();
}
+ SVal VisitSymbolVal(nonloc::SymbolVal V) {
+ SymbolRef SE = V.getSymbol();
+
+ const bool IsUnknownOriginalType = OriginalTy.isNull();
+ // Symbol to bool.
+ if (!IsUnknownOriginalType && CastTy->isBooleanType()) {
+ // Non-float to bool.
+ if (Loc::isLocType(OriginalTy) ||
+ OriginalTy->isIntegralOrEnumerationType() ||
+ OriginalTy->isMemberPointerType()) {
+ BasicValueFactory &BVF = VB.getBasicValueFactory();
+ return VB.makeNonLoc(SE, BO_NE, BVF.getValue(0, SE->getType()), CastTy);
+ }
+ } else {
+ // Symbol to integer, float.
+ QualType T = Context.getCanonicalType(SE->getType());
+
+ // Produce SymbolCast if CastTy and T are different integers.
+ // NOTE: In the end the type of SymbolCast shall be equal to CastTy.
+ if (T->isIntegralOrUnscopedEnumerationType() &&
+ CastTy->isIntegralOrUnscopedEnumerationType()) {
+ AnalyzerOptions &Opts = VB.getStateManager()
+ .getOwningEngine()
+ .getAnalysisManager()
+ .getAnalyzerOptions();
+ // If appropriate option is disabled, ignore the cast.
+ // NOTE: ShouldSupportSymbolicIntegerCasts is `false` by default.
+ if (!Opts.ShouldSupportSymbolicIntegerCasts)
+ return V;
+ return simplifySymbolCast(V, CastTy);
+ }
+ if (!Loc::isLocType(CastTy))
+ if (!IsUnknownOriginalType || !CastTy->isFloatingType() ||
+ T->isFloatingType())
+ return VB.makeNonLoc(SE, T, CastTy);
+ }
- // Pointer to whatever else.
- // FIXME: There can be gross cases where one casts the result of a
- // function (that returns a pointer) to some other value that happens to
- // fit within that pointer value. We currently have no good way to model
- // such operations. When this happens, the underlying operation is that
- // the caller is reasoning about bits. Conceptually we are layering a
- // "view" of a location on top of those bits. Perhaps we need to be more
- // lazy about mutual possible views, even on an SVal? This may be
- // necessary for bit-level reasoning as well.
- return UnknownVal();
-}
-
-SVal SValBuilder::evalCastSubKind(nonloc::CompoundVal V, QualType CastTy,
- QualType OriginalTy) {
- // Compound to whatever.
- return UnknownVal();
-}
-
-SVal SValBuilder::evalCastSubKind(nonloc::ConcreteInt V, QualType CastTy,
- QualType OriginalTy) {
- auto CastedValue = [V, CastTy, this]() {
- llvm::APSInt Value = V.getValue();
- BasicVals.getAPSIntType(CastTy).apply(Value);
- return Value;
- };
-
- // Integer to bool.
- if (CastTy->isBooleanType())
- return makeTruthVal(V.getValue().getBoolValue(), CastTy);
-
- // Integer to pointer.
- if (CastTy->isIntegralOrEnumerationType())
- return makeIntVal(CastedValue());
-
- // Integer to pointer.
- if (Loc::isLocType(CastTy))
- return makeIntLocVal(CastedValue());
+ // FIXME: We should be able to cast NonLoc -> Loc
+ // (when Loc::isLocType(CastTy) is true)
+ // But it's hard to do as SymbolicRegions can't refer to SymbolCasts holding
+ // generic SymExprs. Check the commit message for the details.
- // Pointer to whatever else.
- return UnknownVal();
-}
+ // Symbol to pointer and whatever else.
+ return UnknownVal();
+ }
+ SVal VisitPointerToMember(nonloc::PointerToMember V) {
+ // Member pointer to whatever.
+ return V;
+ }
-SVal SValBuilder::evalCastSubKind(nonloc::LazyCompoundVal V, QualType CastTy,
- QualType OriginalTy) {
- // Compound to whatever.
- return UnknownVal();
-}
+ /// Reduce cast expression by removing redundant intermediate casts.
+ /// E.g.
+ /// - (char)(short)(int x) -> (char)(int x)
+ /// - (int)(int x) -> int x
+ ///
+ /// \param V -- SymbolVal, which pressumably contains SymbolCast or any symbol
+ /// that is applicable for cast operation.
+ /// \param CastTy -- QualType, which `V` shall be cast to.
+ /// \return SVal with simplified cast expression.
+ /// \note: Currently only support integral casts.
+ nonloc::SymbolVal simplifySymbolCast(nonloc::SymbolVal V, QualType CastTy) {
+ // We use seven conditions to recognize a simplification case.
+ // For the clarity let `CastTy` be `C`, SE->getType() - `T`, root type -
+ // `R`, prefix `u` for unsigned, `s` for signed, no prefix - any sign: E.g.
+ // (char)(short)(uint x)
+ // ( sC )( sT )( uR x)
+ //
+ // C === R (the same type)
+ // (char)(char x) -> (char x)
+ // (long)(long x) -> (long x)
+ // Note: Comparisons operators below are for bit width.
+ // C == T
+ // (short)(short)(int x) -> (short)(int x)
+ // (int)(long)(char x) -> (int)(char x) (sizeof(long) == sizeof(int))
+ // (long)(ullong)(char x) -> (long)(char x) (sizeof(long) ==
+ // sizeof(ullong))
+ // C < T
+ // (short)(int)(char x) -> (short)(char x)
+ // (char)(int)(short x) -> (char)(short x)
+ // (short)(int)(short x) -> (short x)
+ // C > T > uR
+ // (int)(short)(uchar x) -> (int)(uchar x)
+ // (uint)(short)(uchar x) -> (uint)(uchar x)
+ // (int)(ushort)(uchar x) -> (int)(uchar x)
+ // C > sT > sR
+ // (int)(short)(char x) -> (int)(char x)
+ // (uint)(short)(char x) -> (uint)(char x)
+ // C > sT == sR
+ // (int)(char)(char x) -> (int)(char x)
+ // (uint)(short)(short x) -> (uint)(short x)
+ // C > uT == uR
+ // (int)(uchar)(uchar x) -> (int)(uchar x)
+ // (uint)(ushort)(ushort x) -> (uint)(ushort x)
+ // (llong)(ulong)(uint x) -> (llong)(uint x) (sizeof(ulong) ==
+ // sizeof(uint))
+
+ SymbolRef SE = V.getSymbol();
+ QualType T = Context.getCanonicalType(SE->getType());
-SVal SValBuilder::evalCastSubKind(nonloc::LocAsInteger V, QualType CastTy,
- QualType OriginalTy) {
- Loc L = V.getLoc();
+ if (T == CastTy)
+ return V;
- // Pointer as integer to bool.
- if (CastTy->isBooleanType())
- // Pass to Loc function.
- return evalCastKind(L, CastTy, OriginalTy);
+ if (!isa<SymbolCast>(SE))
+ return VB.makeNonLoc(SE, T, CastTy);
- const bool IsUnknownOriginalType = OriginalTy.isNull();
- // Pointer as integer to pointer.
- if (!IsUnknownOriginalType && Loc::isLocType(CastTy) &&
- OriginalTy->isIntegralOrEnumerationType()) {
- if (const MemRegion *R = L.getAsRegion())
- if (auto OptMemRegV = getCastedMemRegionVal(R, CastTy))
- return *OptMemRegV;
- return L;
- }
+ SymbolRef RootSym = cast<SymbolCast>(SE)->getOperand();
+ QualType RT = RootSym->getType().getCanonicalType();
- // Pointer as integer with region to integer/pointer.
- const MemRegion *R = L.getAsRegion();
- if (!IsUnknownOriginalType && R) {
- if (CastTy->isIntegralOrEnumerationType())
- return evalCastSubKind(loc::MemRegionVal(R), CastTy, OriginalTy);
+ // FIXME support simplification from non-integers.
+ if (!RT->isIntegralOrEnumerationType())
+ return VB.makeNonLoc(SE, T, CastTy);
- if (Loc::isLocType(CastTy)) {
- assert(Loc::isLocType(OriginalTy) || OriginalTy->isFunctionType() ||
- CastTy->isReferenceType());
- // Delegate to store manager to get the result of casting a region to a
- // different type. If the MemRegion* returned is NULL, this expression
- // Evaluates to UnknownVal.
- if (auto OptMemRegV = getCastedMemRegionVal(R, CastTy))
- return *OptMemRegV;
- }
- } else {
- if (Loc::isLocType(CastTy)) {
- if (IsUnknownOriginalType)
- return evalCastSubKind(loc::MemRegionVal(R), CastTy, OriginalTy);
- return L;
- }
-
- SymbolRef SE = nullptr;
- if (R) {
- if (const SymbolicRegion *SR =
- dyn_cast<SymbolicRegion>(R->StripCasts())) {
- SE = SR->getSymbol();
- }
- }
+ BasicValueFactory &BVF = VB.getBasicValueFactory();
+ APSIntType CTy = BVF.getAPSIntType(CastTy);
+ APSIntType TTy = BVF.getAPSIntType(T);
- if (!CastTy->isFloatingType() || !SE || SE->getType()->isFloatingType()) {
- // FIXME: Correctly support promotions/truncations.
- const unsigned CastSize = Context.getIntWidth(CastTy);
- if (CastSize == V.getNumBits())
- return V;
+ const auto WC = CTy.getBitWidth();
+ const auto WT = TTy.getBitWidth();
- return makeLocAsInteger(L, CastSize);
+ if (WC <= WT) {
+ const bool isSameType = (RT == CastTy);
+ if (isSameType)
+ return nonloc::SymbolVal(RootSym);
+ return VB.makeNonLoc(RootSym, RT, CastTy);
}
- }
- // Pointer as integer to whatever else.
- return UnknownVal();
-}
+ APSIntType RTy = BVF.getAPSIntType(RT);
+ const auto WR = RTy.getBitWidth();
+ const bool UT = TTy.isUnsigned();
+ const bool UR = RTy.isUnsigned();
-SVal SValBuilder::evalCastSubKind(nonloc::SymbolVal V, QualType CastTy,
- QualType OriginalTy) {
- SymbolRef SE = V.getSymbol();
+ if (((WT > WR) && (UR || !UT)) || ((WT == WR) && (UT == UR)))
+ return VB.makeNonLoc(RootSym, RT, CastTy);
- const bool IsUnknownOriginalType = OriginalTy.isNull();
- // Symbol to bool.
- if (!IsUnknownOriginalType && CastTy->isBooleanType()) {
- // Non-float to bool.
- if (Loc::isLocType(OriginalTy) ||
- OriginalTy->isIntegralOrEnumerationType() ||
- OriginalTy->isMemberPointerType()) {
- BasicValueFactory &BVF = getBasicValueFactory();
- return makeNonLoc(SE, BO_NE, BVF.getValue(0, SE->getType()), CastTy);
- }
- } else {
- // Symbol to integer, float.
- QualType T = Context.getCanonicalType(SE->getType());
- // If types are the same or both are integers, ignore the cast.
- // FIXME: Remove this hack when we support symbolic truncation/extension.
- // HACK: If both castTy and T are integers, ignore the cast. This is
- // not a permanent solution. Eventually we want to precisely handle
- // extension/truncation of symbolic integers. This prevents us from losing
- // precision when we assign 'x = y' and 'y' is symbolic and x and y are
- // different integer types.
- if (haveSameType(T, CastTy))
- return V;
- if (!Loc::isLocType(CastTy))
- if (!IsUnknownOriginalType || !CastTy->isFloatingType() ||
- T->isFloatingType())
- return makeNonLoc(SE, T, CastTy);
+ return VB.makeNonLoc(SE, T, CastTy);
}
+};
+} // end anonymous namespace
- // Symbol to pointer and whatever else.
- return UnknownVal();
-}
-
-SVal SValBuilder::evalCastSubKind(nonloc::PointerToMember V, QualType CastTy,
- QualType OriginalTy) {
- // Member pointer to whatever.
- return V;
+/// Cast a given SVal to another SVal using given QualType's.
+/// \param V -- SVal that should be casted.
+/// \param CastTy -- QualType that V should be casted according to.
+/// \param OriginalTy -- QualType which is associated to V. It provides
+/// additional information about what type the cast performs from.
+/// \returns the most appropriate casted SVal.
+/// Note: Many cases don't use an exact OriginalTy. It can be extracted
+/// from SVal or the cast can performs unconditionaly. Always pass OriginalTy!
+/// It can be crucial in certain cases and generates different results.
+/// FIXME: If `OriginalTy.isNull()` is true, then cast performs based on CastTy
+/// only. This behavior is uncertain and should be improved.
+SVal SValBuilder::evalCast(SVal V, QualType CastTy, QualType OriginalTy) {
+ EvalCastVisitor TRV{*this, CastTy, OriginalTy};
+ return TRV.Visit(V);
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SVals.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SVals.cpp
index 117546e43b1a..0e1351215bb4 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SVals.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SVals.cpp
@@ -25,12 +25,12 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/SValVisitor.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymExpr.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
+#include <optional>
using namespace clang;
using namespace ento;
@@ -43,27 +43,8 @@ using namespace ento;
// Utility methods.
//===----------------------------------------------------------------------===//
-bool SVal::hasConjuredSymbol() const {
- if (Optional<nonloc::SymbolVal> SV = getAs<nonloc::SymbolVal>()) {
- SymbolRef sym = SV->getSymbol();
- if (isa<SymbolConjured>(sym))
- return true;
- }
-
- if (Optional<loc::MemRegionVal> RV = getAs<loc::MemRegionVal>()) {
- const MemRegion *R = RV->getRegion();
- if (const auto *SR = dyn_cast<SymbolicRegion>(R)) {
- SymbolRef sym = SR->getSymbol();
- if (isa<SymbolConjured>(sym))
- return true;
- }
- }
-
- return false;
-}
-
const FunctionDecl *SVal::getAsFunctionDecl() const {
- if (Optional<loc::MemRegionVal> X = getAs<loc::MemRegionVal>()) {
+ if (std::optional<loc::MemRegionVal> X = getAs<loc::MemRegionVal>()) {
const MemRegion* R = X->getRegion();
if (const FunctionCodeRegion *CTR = R->getAs<FunctionCodeRegion>())
if (const auto *FD = dyn_cast<FunctionDecl>(CTR->getDecl()))
@@ -97,7 +78,7 @@ SymbolRef SVal::getAsLocSymbol(bool IncludeBaseRegions) const {
/// Get the symbol in the SVal or its base region.
SymbolRef SVal::getLocSymbolInBase() const {
- Optional<loc::MemRegionVal> X = getAs<loc::MemRegionVal>();
+ std::optional<loc::MemRegionVal> X = getAs<loc::MemRegionVal>();
if (!X)
return nullptr;
@@ -122,17 +103,25 @@ SymbolRef SVal::getLocSymbolInBase() const {
/// should continue to the base regions if the region is not symbolic.
SymbolRef SVal::getAsSymbol(bool IncludeBaseRegions) const {
// FIXME: should we consider SymbolRef wrapped in CodeTextRegion?
- if (Optional<nonloc::SymbolVal> X = getAs<nonloc::SymbolVal>())
+ if (std::optional<nonloc::SymbolVal> X = getAs<nonloc::SymbolVal>())
return X->getSymbol();
return getAsLocSymbol(IncludeBaseRegions);
}
+const llvm::APSInt *SVal::getAsInteger() const {
+ if (auto CI = getAs<nonloc::ConcreteInt>())
+ return &CI->getValue();
+ if (auto CI = getAs<loc::ConcreteInt>())
+ return &CI->getValue();
+ return nullptr;
+}
+
const MemRegion *SVal::getAsRegion() const {
- if (Optional<loc::MemRegionVal> X = getAs<loc::MemRegionVal>())
+ if (std::optional<loc::MemRegionVal> X = getAs<loc::MemRegionVal>())
return X->getRegion();
- if (Optional<nonloc::LocAsInteger> X = getAs<nonloc::LocAsInteger>())
+ if (std::optional<nonloc::LocAsInteger> X = getAs<nonloc::LocAsInteger>())
return X->getLoc().getAsRegion();
return nullptr;
@@ -147,23 +136,19 @@ private:
public:
TypeRetrievingVisitor(const ASTContext &Context) : Context(Context) {}
- QualType VisitLocMemRegionVal(loc::MemRegionVal MRV) {
+ QualType VisitMemRegionVal(loc::MemRegionVal MRV) {
return Visit(MRV.getRegion());
}
- QualType VisitLocGotoLabel(loc::GotoLabel GL) {
+ QualType VisitGotoLabel(loc::GotoLabel GL) {
return QualType{Context.VoidPtrTy};
}
template <class ConcreteInt> QualType VisitConcreteInt(ConcreteInt CI) {
const llvm::APSInt &Value = CI.getValue();
+ if (1 == Value.getBitWidth())
+ return Context.BoolTy;
return Context.getIntTypeForBitwidth(Value.getBitWidth(), Value.isSigned());
}
- QualType VisitLocConcreteInt(loc::ConcreteInt CI) {
- return VisitConcreteInt(CI);
- }
- QualType VisitNonLocConcreteInt(nonloc::ConcreteInt CI) {
- return VisitConcreteInt(CI);
- }
- QualType VisitNonLocLocAsInteger(nonloc::LocAsInteger LI) {
+ QualType VisitLocAsInteger(nonloc::LocAsInteger LI) {
QualType NestedType = Visit(LI.getLoc());
if (NestedType.isNull())
return NestedType;
@@ -171,18 +156,21 @@ public:
return Context.getIntTypeForBitwidth(LI.getNumBits(),
NestedType->isSignedIntegerType());
}
- QualType VisitNonLocCompoundVal(nonloc::CompoundVal CV) {
+ QualType VisitCompoundVal(nonloc::CompoundVal CV) {
return CV.getValue()->getType();
}
- QualType VisitNonLocLazyCompoundVal(nonloc::LazyCompoundVal LCV) {
+ QualType VisitLazyCompoundVal(nonloc::LazyCompoundVal LCV) {
return LCV.getRegion()->getValueType();
}
- QualType VisitNonLocSymbolVal(nonloc::SymbolVal SV) {
+ QualType VisitSymbolVal(nonloc::SymbolVal SV) {
return Visit(SV.getSymbol());
}
QualType VisitSymbolicRegion(const SymbolicRegion *SR) {
return Visit(SR->getSymbol());
}
+ QualType VisitAllocaRegion(const AllocaRegion *) {
+ return QualType{Context.VoidPtrTy};
+ }
QualType VisitTypedRegion(const TypedRegion *TR) {
return TR->getLocationType();
}
@@ -196,8 +184,7 @@ QualType SVal::getType(const ASTContext &Context) const {
}
const MemRegion *loc::MemRegionVal::stripCasts(bool StripBaseCasts) const {
- const MemRegion *R = getRegion();
- return R ? R->StripCasts(StripBaseCasts) : nullptr;
+ return getRegion()->StripCasts(StripBaseCasts);
}
const void *nonloc::LazyCompoundVal::getStore() const {
@@ -261,9 +248,9 @@ bool SVal::isConstant() const {
}
bool SVal::isConstant(int I) const {
- if (Optional<loc::ConcreteInt> LV = getAs<loc::ConcreteInt>())
+ if (std::optional<loc::ConcreteInt> LV = getAs<loc::ConcreteInt>())
return LV->getValue() == I;
- if (Optional<nonloc::ConcreteInt> NV = getAs<nonloc::ConcreteInt>())
+ if (std::optional<nonloc::ConcreteInt> NV = getAs<nonloc::ConcreteInt>())
return NV->getValue() == I;
return false;
}
@@ -273,49 +260,6 @@ bool SVal::isZeroConstant() const {
}
//===----------------------------------------------------------------------===//
-// Transfer function dispatch for Non-Locs.
-//===----------------------------------------------------------------------===//
-
-SVal nonloc::ConcreteInt::evalBinOp(SValBuilder &svalBuilder,
- BinaryOperator::Opcode Op,
- const nonloc::ConcreteInt& R) const {
- const llvm::APSInt* X =
- svalBuilder.getBasicValueFactory().evalAPSInt(Op, getValue(), R.getValue());
-
- if (X)
- return nonloc::ConcreteInt(*X);
- else
- return UndefinedVal();
-}
-
-nonloc::ConcreteInt
-nonloc::ConcreteInt::evalComplement(SValBuilder &svalBuilder) const {
- return svalBuilder.makeIntVal(~getValue());
-}
-
-nonloc::ConcreteInt
-nonloc::ConcreteInt::evalMinus(SValBuilder &svalBuilder) const {
- return svalBuilder.makeIntVal(-getValue());
-}
-
-//===----------------------------------------------------------------------===//
-// Transfer function dispatch for Locs.
-//===----------------------------------------------------------------------===//
-
-SVal loc::ConcreteInt::evalBinOp(BasicValueFactory& BasicVals,
- BinaryOperator::Opcode Op,
- const loc::ConcreteInt& R) const {
- assert(BinaryOperator::isComparisonOp(Op) || Op == BO_Sub);
-
- const llvm::APSInt *X = BasicVals.evalAPSInt(Op, getValue(), R.getValue());
-
- if (X)
- return nonloc::ConcreteInt(*X);
- else
- return UndefinedVal();
-}
-
-//===----------------------------------------------------------------------===//
// Pretty-Printing.
//===----------------------------------------------------------------------===//
@@ -331,30 +275,33 @@ void SVal::printJson(raw_ostream &Out, bool AddQuotes) const {
}
void SVal::dumpToStream(raw_ostream &os) const {
- switch (getBaseKind()) {
- case UnknownValKind:
- os << "Unknown";
- break;
- case NonLocKind:
- castAs<NonLoc>().dumpToStream(os);
- break;
- case LocKind:
- castAs<Loc>().dumpToStream(os);
- break;
- case UndefinedValKind:
- os << "Undefined";
- break;
+ if (isUndef()) {
+ os << "Undefined";
+ return;
}
+ if (isUnknown()) {
+ os << "Unknown";
+ return;
+ }
+ if (NonLoc::classof(*this)) {
+ castAs<NonLoc>().dumpToStream(os);
+ return;
+ }
+ if (Loc::classof(*this)) {
+ castAs<Loc>().dumpToStream(os);
+ return;
+ }
+ llvm_unreachable("Unhandled SVal kind!");
}
void NonLoc::dumpToStream(raw_ostream &os) const {
- switch (getSubKind()) {
- case nonloc::ConcreteIntKind: {
- const auto &Value = castAs<nonloc::ConcreteInt>().getValue();
- os << Value << ' ' << (Value.isSigned() ? 'S' : 'U')
- << Value.getBitWidth() << 'b';
- break;
- }
+ switch (getKind()) {
+ case nonloc::ConcreteIntKind: {
+ const auto &Value = castAs<nonloc::ConcreteInt>().getValue();
+ os << Value << ' ' << (Value.isSigned() ? 'S' : 'U') << Value.getBitWidth()
+ << 'b';
+ break;
+ }
case nonloc::SymbolValKind:
os << castAs<nonloc::SymbolVal>().getSymbol();
break;
@@ -401,7 +348,7 @@ void NonLoc::dumpToStream(raw_ostream &os) const {
else
os << ", ";
- os << (*I).getType().getAsString();
+ os << I->getType();
}
os << '}';
@@ -410,21 +357,21 @@ void NonLoc::dumpToStream(raw_ostream &os) const {
default:
assert(false && "Pretty-printed not implemented for this NonLoc.");
break;
- }
+ }
}
void Loc::dumpToStream(raw_ostream &os) const {
- switch (getSubKind()) {
- case loc::ConcreteIntKind:
- os << castAs<loc::ConcreteInt>().getValue().getZExtValue() << " (Loc)";
- break;
- case loc::GotoLabelKind:
- os << "&&" << castAs<loc::GotoLabel>().getLabel()->getName();
- break;
- case loc::MemRegionValKind:
- os << '&' << castAs<loc::MemRegionVal>().getRegion()->getString();
- break;
- default:
- llvm_unreachable("Pretty-printing not implemented for this Loc.");
+ switch (getKind()) {
+ case loc::ConcreteIntKind:
+ os << castAs<loc::ConcreteInt>().getValue().getZExtValue() << " (Loc)";
+ break;
+ case loc::GotoLabelKind:
+ os << "&&" << castAs<loc::GotoLabel>().getLabel()->getName();
+ break;
+ case loc::MemRegionValKind:
+ os << '&' << castAs<loc::MemRegionVal>().getRegion()->getString();
+ break;
+ default:
+ llvm_unreachable("Pretty-printing not implemented for this Loc.");
}
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SarifDiagnostics.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SarifDiagnostics.cpp
index e1319a4c2e41..fab520098f13 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SarifDiagnostics.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SarifDiagnostics.cpp
@@ -13,6 +13,8 @@
#include "clang/Analysis/MacroExpansionContext.h"
#include "clang/Analysis/PathDiagnostic.h"
#include "clang/Basic/FileManager.h"
+#include "clang/Basic/Sarif.h"
+#include "clang/Basic/SourceManager.h"
#include "clang/Basic/Version.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h"
@@ -30,10 +32,12 @@ namespace {
class SarifDiagnostics : public PathDiagnosticConsumer {
std::string OutputFile;
const LangOptions &LO;
+ SarifDocumentWriter SarifWriter;
public:
- SarifDiagnostics(const std::string &Output, const LangOptions &LO)
- : OutputFile(Output), LO(LO) {}
+ SarifDiagnostics(const std::string &Output, const LangOptions &LO,
+ const SourceManager &SM)
+ : OutputFile(Output), LO(LO), SarifWriter(SM) {}
~SarifDiagnostics() override = default;
void FlushDiagnosticsImpl(std::vector<const PathDiagnostic *> &Diags,
@@ -56,250 +60,12 @@ void ento::createSarifDiagnosticConsumer(
if (Output.empty())
return;
- C.push_back(new SarifDiagnostics(Output, PP.getLangOpts()));
+ C.push_back(
+ new SarifDiagnostics(Output, PP.getLangOpts(), PP.getSourceManager()));
createTextMinimalPathDiagnosticConsumer(std::move(DiagOpts), C, Output, PP,
CTU, MacroExpansions);
}
-static StringRef getFileName(const FileEntry &FE) {
- StringRef Filename = FE.tryGetRealPathName();
- if (Filename.empty())
- Filename = FE.getName();
- return Filename;
-}
-
-static std::string percentEncodeURICharacter(char C) {
- // RFC 3986 claims alpha, numeric, and this handful of
- // characters are not reserved for the path component and
- // should be written out directly. Otherwise, percent
- // encode the character and write that out instead of the
- // reserved character.
- if (llvm::isAlnum(C) ||
- StringRef::npos != StringRef("-._~:@!$&'()*+,;=").find(C))
- return std::string(&C, 1);
- return "%" + llvm::toHex(StringRef(&C, 1));
-}
-
-static std::string fileNameToURI(StringRef Filename) {
- llvm::SmallString<32> Ret = StringRef("file://");
-
- // Get the root name to see if it has a URI authority.
- StringRef Root = sys::path::root_name(Filename);
- if (Root.startswith("//")) {
- // There is an authority, so add it to the URI.
- Ret += Root.drop_front(2).str();
- } else if (!Root.empty()) {
- // There is no authority, so end the component and add the root to the URI.
- Ret += Twine("/" + Root).str();
- }
-
- auto Iter = sys::path::begin(Filename), End = sys::path::end(Filename);
- assert(Iter != End && "Expected there to be a non-root path component.");
- // Add the rest of the path components, encoding any reserved characters;
- // we skip past the first path component, as it was handled it above.
- std::for_each(++Iter, End, [&Ret](StringRef Component) {
- // For reasons unknown to me, we may get a backslash with Windows native
- // paths for the initial backslash following the drive component, which
- // we need to ignore as a URI path part.
- if (Component == "\\")
- return;
-
- // Add the separator between the previous path part and the one being
- // currently processed.
- Ret += "/";
-
- // URI encode the part.
- for (char C : Component) {
- Ret += percentEncodeURICharacter(C);
- }
- });
-
- return std::string(Ret);
-}
-
-static json::Object createArtifactLocation(const FileEntry &FE) {
- return json::Object{{"uri", fileNameToURI(getFileName(FE))}};
-}
-
-static json::Object createArtifact(const FileEntry &FE) {
- return json::Object{{"location", createArtifactLocation(FE)},
- {"roles", json::Array{"resultFile"}},
- {"length", FE.getSize()},
- {"mimeType", "text/plain"}};
-}
-
-static json::Object createArtifactLocation(const FileEntry &FE,
- json::Array &Artifacts) {
- std::string FileURI = fileNameToURI(getFileName(FE));
-
- // See if the Artifacts array contains this URI already. If it does not,
- // create a new artifact object to add to the array.
- auto I = llvm::find_if(Artifacts, [&](const json::Value &File) {
- if (const json::Object *Obj = File.getAsObject()) {
- if (const json::Object *FileLoc = Obj->getObject("location")) {
- Optional<StringRef> URI = FileLoc->getString("uri");
- return URI && URI->equals(FileURI);
- }
- }
- return false;
- });
-
- // Calculate the index within the artifact array so it can be stored in
- // the JSON object.
- auto Index = static_cast<unsigned>(std::distance(Artifacts.begin(), I));
- if (I == Artifacts.end())
- Artifacts.push_back(createArtifact(FE));
-
- return json::Object{{"uri", FileURI}, {"index", Index}};
-}
-
-static unsigned int adjustColumnPos(const SourceManager &SM, SourceLocation Loc,
- unsigned int TokenLen = 0) {
- assert(!Loc.isInvalid() && "invalid Loc when adjusting column position");
-
- std::pair<FileID, unsigned> LocInfo = SM.getDecomposedExpansionLoc(Loc);
- assert(LocInfo.second > SM.getExpansionColumnNumber(Loc) &&
- "position in file is before column number?");
-
- Optional<MemoryBufferRef> Buf = SM.getBufferOrNone(LocInfo.first);
- assert(Buf && "got an invalid buffer for the location's file");
- assert(Buf->getBufferSize() >= (LocInfo.second + TokenLen) &&
- "token extends past end of buffer?");
-
- // Adjust the offset to be the start of the line, since we'll be counting
- // Unicode characters from there until our column offset.
- unsigned int Off = LocInfo.second - (SM.getExpansionColumnNumber(Loc) - 1);
- unsigned int Ret = 1;
- while (Off < (LocInfo.second + TokenLen)) {
- Off += getNumBytesForUTF8(Buf->getBuffer()[Off]);
- Ret++;
- }
-
- return Ret;
-}
-
-static json::Object createTextRegion(const LangOptions &LO, SourceRange R,
- const SourceManager &SM) {
- json::Object Region{
- {"startLine", SM.getExpansionLineNumber(R.getBegin())},
- {"startColumn", adjustColumnPos(SM, R.getBegin())},
- };
- if (R.getBegin() == R.getEnd()) {
- Region["endColumn"] = adjustColumnPos(SM, R.getBegin());
- } else {
- Region["endLine"] = SM.getExpansionLineNumber(R.getEnd());
- Region["endColumn"] = adjustColumnPos(
- SM, R.getEnd(),
- Lexer::MeasureTokenLength(R.getEnd(), SM, LO));
- }
- return Region;
-}
-
-static json::Object createPhysicalLocation(const LangOptions &LO,
- SourceRange R, const FileEntry &FE,
- const SourceManager &SMgr,
- json::Array &Artifacts) {
- return json::Object{
- {{"artifactLocation", createArtifactLocation(FE, Artifacts)},
- {"region", createTextRegion(LO, R, SMgr)}}};
-}
-
-enum class Importance { Important, Essential, Unimportant };
-
-static StringRef importanceToStr(Importance I) {
- switch (I) {
- case Importance::Important:
- return "important";
- case Importance::Essential:
- return "essential";
- case Importance::Unimportant:
- return "unimportant";
- }
- llvm_unreachable("Fully covered switch is not so fully covered");
-}
-
-static json::Object createThreadFlowLocation(json::Object &&Location,
- Importance I) {
- return json::Object{{"location", std::move(Location)},
- {"importance", importanceToStr(I)}};
-}
-
-static json::Object createMessage(StringRef Text) {
- return json::Object{{"text", Text.str()}};
-}
-
-static json::Object createLocation(json::Object &&PhysicalLocation,
- StringRef Message = "") {
- json::Object Ret{{"physicalLocation", std::move(PhysicalLocation)}};
- if (!Message.empty())
- Ret.insert({"message", createMessage(Message)});
- return Ret;
-}
-
-static Importance calculateImportance(const PathDiagnosticPiece &Piece) {
- switch (Piece.getKind()) {
- case PathDiagnosticPiece::Call:
- case PathDiagnosticPiece::Macro:
- case PathDiagnosticPiece::Note:
- case PathDiagnosticPiece::PopUp:
- // FIXME: What should be reported here?
- break;
- case PathDiagnosticPiece::Event:
- return Piece.getTagStr() == "ConditionBRVisitor" ? Importance::Important
- : Importance::Essential;
- case PathDiagnosticPiece::ControlFlow:
- return Importance::Unimportant;
- }
- return Importance::Unimportant;
-}
-
-static json::Object createThreadFlow(const LangOptions &LO,
- const PathPieces &Pieces,
- json::Array &Artifacts) {
- const SourceManager &SMgr = Pieces.front()->getLocation().getManager();
- json::Array Locations;
- for (const auto &Piece : Pieces) {
- const PathDiagnosticLocation &P = Piece->getLocation();
- Locations.push_back(createThreadFlowLocation(
- createLocation(createPhysicalLocation(
- LO, P.asRange(),
- *P.asLocation().getExpansionLoc().getFileEntry(),
- SMgr, Artifacts),
- Piece->getString()),
- calculateImportance(*Piece)));
- }
- return json::Object{{"locations", std::move(Locations)}};
-}
-
-static json::Object createCodeFlow(const LangOptions &LO,
- const PathPieces &Pieces,
- json::Array &Artifacts) {
- return json::Object{
- {"threadFlows", json::Array{createThreadFlow(LO, Pieces, Artifacts)}}};
-}
-
-static json::Object createResult(const LangOptions &LO,
- const PathDiagnostic &Diag,
- json::Array &Artifacts,
- const StringMap<unsigned> &RuleMapping) {
- const PathPieces &Path = Diag.path.flatten(false);
- const SourceManager &SMgr = Path.front()->getLocation().getManager();
-
- auto Iter = RuleMapping.find(Diag.getCheckerName());
- assert(Iter != RuleMapping.end() && "Rule ID is not in the array index map?");
-
- return json::Object{
- {"message", createMessage(Diag.getVerboseDescription())},
- {"codeFlows", json::Array{createCodeFlow(LO, Path, Artifacts)}},
- {"locations",
- json::Array{createLocation(createPhysicalLocation(
- LO, Diag.getLocation().asRange(),
- *Diag.getLocation().asLocation().getExpansionLoc().getFileEntry(),
- SMgr, Artifacts))}},
- {"ruleIndex", Iter->getValue()},
- {"ruleId", Diag.getCheckerName()}};
-}
-
static StringRef getRuleDescription(StringRef CheckName) {
return llvm::StringSwitch<StringRef>(CheckName)
#define GET_CHECKERS
@@ -322,61 +88,99 @@ static StringRef getRuleHelpURIStr(StringRef CheckName) {
;
}
-static json::Object createRule(const PathDiagnostic &Diag) {
- StringRef CheckName = Diag.getCheckerName();
- json::Object Ret{
- {"fullDescription", createMessage(getRuleDescription(CheckName))},
- {"name", CheckName},
- {"id", CheckName}};
-
- std::string RuleURI = std::string(getRuleHelpURIStr(CheckName));
- if (!RuleURI.empty())
- Ret["helpUri"] = RuleURI;
-
- return Ret;
+static ThreadFlowImportance
+calculateImportance(const PathDiagnosticPiece &Piece) {
+ switch (Piece.getKind()) {
+ case PathDiagnosticPiece::Call:
+ case PathDiagnosticPiece::Macro:
+ case PathDiagnosticPiece::Note:
+ case PathDiagnosticPiece::PopUp:
+ // FIXME: What should be reported here?
+ break;
+ case PathDiagnosticPiece::Event:
+ return Piece.getTagStr() == "ConditionBRVisitor"
+ ? ThreadFlowImportance::Important
+ : ThreadFlowImportance::Essential;
+ case PathDiagnosticPiece::ControlFlow:
+ return ThreadFlowImportance::Unimportant;
+ }
+ return ThreadFlowImportance::Unimportant;
+}
+
+/// Accepts a SourceRange corresponding to a pair of the first and last tokens
+/// and converts to a Character granular CharSourceRange.
+static CharSourceRange convertTokenRangeToCharRange(const SourceRange &R,
+ const SourceManager &SM,
+ const LangOptions &LO) {
+ // Caret diagnostics have the first and last locations pointed at the same
+ // location, return these as-is.
+ if (R.getBegin() == R.getEnd())
+ return CharSourceRange::getCharRange(R);
+
+ SourceLocation BeginCharLoc = R.getBegin();
+ // For token ranges, the raw end SLoc points at the first character of the
+ // last token in the range. This must be moved to one past the end of the
+ // last character using the lexer.
+ SourceLocation EndCharLoc =
+ Lexer::getLocForEndOfToken(R.getEnd(), /* Offset = */ 0, SM, LO);
+ return CharSourceRange::getCharRange(BeginCharLoc, EndCharLoc);
+}
+
+static SmallVector<ThreadFlow, 8> createThreadFlows(const PathDiagnostic *Diag,
+ const LangOptions &LO) {
+ SmallVector<ThreadFlow, 8> Flows;
+ const PathPieces &Pieces = Diag->path.flatten(false);
+ for (const auto &Piece : Pieces) {
+ auto Range = convertTokenRangeToCharRange(
+ Piece->getLocation().asRange(), Piece->getLocation().getManager(), LO);
+ auto Flow = ThreadFlow::create()
+ .setImportance(calculateImportance(*Piece))
+ .setRange(Range)
+ .setMessage(Piece->getString());
+ Flows.push_back(Flow);
+ }
+ return Flows;
}
-static json::Array createRules(std::vector<const PathDiagnostic *> &Diags,
- StringMap<unsigned> &RuleMapping) {
- json::Array Rules;
+static StringMap<uint32_t>
+createRuleMapping(const std::vector<const PathDiagnostic *> &Diags,
+ SarifDocumentWriter &SarifWriter) {
+ StringMap<uint32_t> RuleMapping;
llvm::StringSet<> Seen;
- llvm::for_each(Diags, [&](const PathDiagnostic *D) {
- StringRef RuleID = D->getCheckerName();
- std::pair<llvm::StringSet<>::iterator, bool> P = Seen.insert(RuleID);
+ for (const PathDiagnostic *D : Diags) {
+ StringRef CheckName = D->getCheckerName();
+ std::pair<llvm::StringSet<>::iterator, bool> P = Seen.insert(CheckName);
if (P.second) {
- RuleMapping[RuleID] = Rules.size(); // Maps RuleID to an Array Index.
- Rules.push_back(createRule(*D));
+ auto Rule = SarifRule::create()
+ .setName(CheckName)
+ .setRuleId(CheckName)
+ .setDescription(getRuleDescription(CheckName))
+ .setHelpURI(getRuleHelpURIStr(CheckName));
+ size_t RuleIdx = SarifWriter.createRule(Rule);
+ RuleMapping[CheckName] = RuleIdx;
}
- });
-
- return Rules;
+ }
+ return RuleMapping;
}
-static json::Object createTool(std::vector<const PathDiagnostic *> &Diags,
- StringMap<unsigned> &RuleMapping) {
- return json::Object{
- {"driver", json::Object{{"name", "clang"},
- {"fullName", "clang static analyzer"},
- {"language", "en-US"},
- {"version", getClangFullVersion()},
- {"rules", createRules(Diags, RuleMapping)}}}};
-}
+static SarifResult createResult(const PathDiagnostic *Diag,
+ const StringMap<uint32_t> &RuleMapping,
+ const LangOptions &LO) {
-static json::Object createRun(const LangOptions &LO,
- std::vector<const PathDiagnostic *> &Diags) {
- json::Array Results, Artifacts;
- StringMap<unsigned> RuleMapping;
- json::Object Tool = createTool(Diags, RuleMapping);
-
- llvm::for_each(Diags, [&](const PathDiagnostic *D) {
- Results.push_back(createResult(LO, *D, Artifacts, RuleMapping));
- });
+ StringRef CheckName = Diag->getCheckerName();
+ uint32_t RuleIdx = RuleMapping.lookup(CheckName);
+ auto Range = convertTokenRangeToCharRange(
+ Diag->getLocation().asRange(), Diag->getLocation().getManager(), LO);
- return json::Object{{"tool", std::move(Tool)},
- {"results", std::move(Results)},
- {"artifacts", std::move(Artifacts)},
- {"columnKind", "unicodeCodePoints"}};
+ SmallVector<ThreadFlow, 8> Flows = createThreadFlows(Diag, LO);
+ auto Result = SarifResult::create(RuleIdx)
+ .setRuleId(CheckName)
+ .setDiagnosticMessage(Diag->getVerboseDescription())
+ .setDiagnosticLevel(SarifResultLevel::Warning)
+ .setLocations({Range})
+ .setThreadFlows(Flows);
+ return Result;
}
void SarifDiagnostics::FlushDiagnosticsImpl(
@@ -392,10 +196,14 @@ void SarifDiagnostics::FlushDiagnosticsImpl(
llvm::errs() << "warning: could not create file: " << EC.message() << '\n';
return;
}
- json::Object Sarif{
- {"$schema",
- "https://raw.githubusercontent.com/oasis-tcs/sarif-spec/master/Schemata/sarif-schema-2.1.0.json"},
- {"version", "2.1.0"},
- {"runs", json::Array{createRun(LO, Diags)}}};
- OS << llvm::formatv("{0:2}\n", json::Value(std::move(Sarif)));
+
+ std::string ToolVersion = getClangFullVersion();
+ SarifWriter.createRun("clang", "clang static analyzer", ToolVersion);
+ StringMap<uint32_t> RuleMapping = createRuleMapping(Diags, SarifWriter);
+ for (const PathDiagnostic *D : Diags) {
+ SarifResult Result = createResult(D, RuleMapping, LO);
+ SarifWriter.appendResult(Result);
+ }
+ auto Document = SarifWriter.createDocument();
+ OS << llvm::formatv("{0:2}\n", json::Value(std::move(Document)));
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp
index f96974f97dcc..8ca2cdb9d3ab 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp
@@ -15,6 +15,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/APSIntType.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include <optional>
namespace clang {
@@ -22,11 +23,11 @@ namespace ento {
SimpleConstraintManager::~SimpleConstraintManager() {}
-ProgramStateRef SimpleConstraintManager::assume(ProgramStateRef State,
- DefinedSVal Cond,
- bool Assumption) {
+ProgramStateRef SimpleConstraintManager::assumeInternal(ProgramStateRef State,
+ DefinedSVal Cond,
+ bool Assumption) {
// If we have a Loc value, cast it to a bool NonLoc first.
- if (Optional<Loc> LV = Cond.getAs<Loc>()) {
+ if (std::optional<Loc> LV = Cond.getAs<Loc>()) {
SValBuilder &SVB = State->getStateManager().getSValBuilder();
QualType T;
const MemRegion *MR = LV->getAsRegion();
@@ -44,7 +45,7 @@ ProgramStateRef SimpleConstraintManager::assume(ProgramStateRef State,
ProgramStateRef SimpleConstraintManager::assume(ProgramStateRef State,
NonLoc Cond, bool Assumption) {
State = assumeAux(State, Cond, Assumption);
- if (NotifyAssumeClients && EE)
+ if (EE)
return EE->processAssume(State, Cond, Assumption);
return State;
}
@@ -62,7 +63,7 @@ ProgramStateRef SimpleConstraintManager::assumeAux(ProgramStateRef State,
return assumeSymUnsupported(State, Sym, Assumption);
}
- switch (Cond.getSubKind()) {
+ switch (Cond.getKind()) {
default:
llvm_unreachable("'Assume' not implemented for this NonLoc");
@@ -86,12 +87,12 @@ ProgramStateRef SimpleConstraintManager::assumeAux(ProgramStateRef State,
}
case nonloc::LocAsIntegerKind:
- return assume(State, Cond.castAs<nonloc::LocAsInteger>().getLoc(),
- Assumption);
+ return assumeInternal(State, Cond.castAs<nonloc::LocAsInteger>().getLoc(),
+ Assumption);
} // end switch
}
-ProgramStateRef SimpleConstraintManager::assumeInclusiveRange(
+ProgramStateRef SimpleConstraintManager::assumeInclusiveRangeInternal(
ProgramStateRef State, NonLoc Value, const llvm::APSInt &From,
const llvm::APSInt &To, bool InRange) {
@@ -106,7 +107,7 @@ ProgramStateRef SimpleConstraintManager::assumeInclusiveRange(
return assumeSymInclusiveRange(State, Sym, From, To, InRange);
}
- switch (Value.getSubKind()) {
+ switch (Value.getKind()) {
default:
llvm_unreachable("'assumeInclusiveRange' is not implemented"
"for this NonLoc");
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
index e57d92fbcebb..45e48d435aca 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
@@ -11,25 +11,63 @@
//===----------------------------------------------------------------------===//
#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/APSIntType.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SValVisitor.h"
+#include <optional>
using namespace clang;
using namespace ento;
namespace {
class SimpleSValBuilder : public SValBuilder {
+
+ // Query the constraint manager whether the SVal has only one possible
+ // (integer) value. If that is the case, the value is returned. Otherwise,
+ // returns NULL.
+ // This is an implementation detail. Checkers should use `getKnownValue()`
+ // instead.
+ static const llvm::APSInt *getConstValue(ProgramStateRef state, SVal V);
+
+ // Helper function that returns the value stored in a nonloc::ConcreteInt or
+ // loc::ConcreteInt.
+ static const llvm::APSInt *getConcreteValue(SVal V);
+
+ // With one `simplifySValOnce` call, a compound symbols might collapse to
+ // simpler symbol tree that is still possible to further simplify. Thus, we
+ // do the simplification on a new symbol tree until we reach the simplest
+ // form, i.e. the fixpoint.
+ // Consider the following symbol `(b * b) * b * b` which has this tree:
+ // *
+ // / \
+ // * b
+ // / \
+ // / b
+ // (b * b)
+ // Now, if the `b * b == 1` new constraint is added then during the first
+ // iteration we have the following transformations:
+ // * *
+ // / \ / \
+ // * b --> b b
+ // / \
+ // / b
+ // 1
+ // We need another iteration to reach the final result `1`.
+ SVal simplifyUntilFixpoint(ProgramStateRef State, SVal Val);
+
+ // Recursively descends into symbolic expressions and replaces symbols
+ // with their known values (in the sense of the getConstValue() method).
+ // We traverse the symbol tree and query the constraint values for the
+ // sub-trees and if a value is a constant we do the constant folding.
+ SVal simplifySValOnce(ProgramStateRef State, SVal V);
+
public:
SimpleSValBuilder(llvm::BumpPtrAllocator &alloc, ASTContext &context,
ProgramStateManager &stateMgr)
- : SValBuilder(alloc, context, stateMgr) {}
+ : SValBuilder(alloc, context, stateMgr) {}
~SimpleSValBuilder() override {}
- SVal evalMinus(NonLoc val) override;
- SVal evalComplement(NonLoc val) override;
SVal evalBinOpNN(ProgramStateRef state, BinaryOperator::Opcode op,
NonLoc lhs, NonLoc rhs, QualType resultTy) override;
SVal evalBinOpLL(ProgramStateRef state, BinaryOperator::Opcode op,
@@ -37,12 +75,21 @@ public:
SVal evalBinOpLN(ProgramStateRef state, BinaryOperator::Opcode op,
Loc lhs, NonLoc rhs, QualType resultTy) override;
- /// getKnownValue - evaluates a given SVal. If the SVal has only one possible
- /// (integer) value, that value is returned. Otherwise, returns NULL.
+ /// Evaluates a given SVal by recursively evaluating and
+ /// simplifying the children SVals. If the SVal has only one possible
+ /// (integer) value, that value is returned. Otherwise, returns NULL.
const llvm::APSInt *getKnownValue(ProgramStateRef state, SVal V) override;
- /// Recursively descends into symbolic expressions and replaces symbols
- /// with their known values (in the sense of the getKnownValue() method).
+ /// Evaluates a given SVal by recursively evaluating and simplifying the
+ /// children SVals, then returns its minimal possible (integer) value. If the
+ /// constraint manager cannot provide a meaningful answer, this returns NULL.
+ const llvm::APSInt *getMinValue(ProgramStateRef state, SVal V) override;
+
+ /// Evaluates a given SVal by recursively evaluating and simplifying the
+ /// children SVals, then returns its maximal possible (integer) value. If the
+ /// constraint manager cannot provide a meaningful answer, this returns NULL.
+ const llvm::APSInt *getMaxValue(ProgramStateRef state, SVal V) override;
+
SVal simplifySVal(ProgramStateRef State, SVal V) override;
SVal MakeSymIntVal(const SymExpr *LHS, BinaryOperator::Opcode op,
@@ -56,26 +103,21 @@ SValBuilder *ento::createSimpleSValBuilder(llvm::BumpPtrAllocator &alloc,
return new SimpleSValBuilder(alloc, context, stateMgr);
}
-//===----------------------------------------------------------------------===//
-// Transfer function for unary operators.
-//===----------------------------------------------------------------------===//
-
-SVal SimpleSValBuilder::evalMinus(NonLoc val) {
- switch (val.getSubKind()) {
- case nonloc::ConcreteIntKind:
- return val.castAs<nonloc::ConcreteInt>().evalMinus(*this);
- default:
- return UnknownVal();
+// Checks if the negation the value and flipping sign preserve
+// the semantics on the operation in the resultType
+static bool isNegationValuePreserving(const llvm::APSInt &Value,
+ APSIntType ResultType) {
+ const unsigned ValueBits = Value.getSignificantBits();
+ if (ValueBits == ResultType.getBitWidth()) {
+ // The value is the lowest negative value that is representable
+ // in signed integer with bitWith of result type. The
+ // negation is representable if resultType is unsigned.
+ return ResultType.isUnsigned();
}
-}
-SVal SimpleSValBuilder::evalComplement(NonLoc X) {
- switch (X.getSubKind()) {
- case nonloc::ConcreteIntKind:
- return X.castAs<nonloc::ConcreteInt>().evalComplement(*this);
- default:
- return UnknownVal();
- }
+ // If resultType bitWith is higher that number of bits required
+ // to represent RHS, the sign flip produce same value.
+ return ValueBits < ResultType.getBitWidth();
}
//===----------------------------------------------------------------------===//
@@ -129,14 +171,14 @@ SVal SimpleSValBuilder::MakeSymIntVal(const SymExpr *LHS,
// a&0 and a&(~0)
if (RHS == 0)
return makeIntVal(0, resultTy);
- else if (RHS.isAllOnesValue())
+ else if (RHS.isAllOnes())
isIdempotent = true;
break;
case BO_Or:
// a|0 and a|(~0)
if (RHS == 0)
isIdempotent = true;
- else if (RHS.isAllOnesValue()) {
+ else if (RHS.isAllOnes()) {
const llvm::APSInt &Result = BasicVals.Convert(resultTy, RHS);
return nonloc::ConcreteInt(Result);
}
@@ -171,6 +213,17 @@ SVal SimpleSValBuilder::MakeSymIntVal(const SymExpr *LHS,
if (RHS.isSigned() && !SymbolType->isSignedIntegerOrEnumerationType())
ConvertedRHS = &BasicVals.Convert(SymbolType, RHS);
}
+ } else if (BinaryOperator::isAdditiveOp(op) && RHS.isNegative()) {
+ // Change a+(-N) into a-N, and a-(-N) into a+N
+ // Adjust addition/subtraction of negative value, to
+ // subtraction/addition of the negated value.
+ APSIntType resultIntTy = BasicVals.getAPSIntType(resultTy);
+ if (isNegationValuePreserving(RHS, resultIntTy)) {
+ ConvertedRHS = &BasicVals.getValue(-resultIntTy.convert(RHS));
+ op = (op == BO_Add) ? BO_Sub : BO_Add;
+ } else {
+ ConvertedRHS = &BasicVals.Convert(resultTy, RHS);
+ }
} else
ConvertedRHS = &BasicVals.Convert(resultTy, RHS);
@@ -260,7 +313,6 @@ static NonLoc doRearrangeUnchecked(ProgramStateRef State,
else
llvm_unreachable("Operation not suitable for unchecked rearrangement!");
- // FIXME: Can we use assume() without getting into an infinite recursion?
if (LSym == RSym)
return SVB.evalBinOpNN(State, Op, nonloc::ConcreteInt(LInt),
nonloc::ConcreteInt(RInt), ResultTy)
@@ -311,51 +363,48 @@ static bool shouldRearrange(ProgramStateRef State, BinaryOperator::Opcode Op,
isWithinConstantOverflowBounds(Int)));
}
-static Optional<NonLoc> tryRearrange(ProgramStateRef State,
- BinaryOperator::Opcode Op, NonLoc Lhs,
- NonLoc Rhs, QualType ResultTy) {
+static std::optional<NonLoc> tryRearrange(ProgramStateRef State,
+ BinaryOperator::Opcode Op, NonLoc Lhs,
+ NonLoc Rhs, QualType ResultTy) {
ProgramStateManager &StateMgr = State->getStateManager();
SValBuilder &SVB = StateMgr.getSValBuilder();
// We expect everything to be of the same type - this type.
QualType SingleTy;
- auto &Opts =
- StateMgr.getOwningEngine().getAnalysisManager().getAnalyzerOptions();
-
// FIXME: After putting complexity threshold to the symbols we can always
// rearrange additive operations but rearrange comparisons only if
// option is set.
- if(!Opts.ShouldAggressivelySimplifyBinaryOperation)
- return None;
+ if (!SVB.getAnalyzerOptions().ShouldAggressivelySimplifyBinaryOperation)
+ return std::nullopt;
SymbolRef LSym = Lhs.getAsSymbol();
if (!LSym)
- return None;
+ return std::nullopt;
if (BinaryOperator::isComparisonOp(Op)) {
SingleTy = LSym->getType();
if (ResultTy != SVB.getConditionType())
- return None;
+ return std::nullopt;
// Initialize SingleTy later with a symbol's type.
} else if (BinaryOperator::isAdditiveOp(Op)) {
SingleTy = ResultTy;
if (LSym->getType() != SingleTy)
- return None;
+ return std::nullopt;
} else {
// Don't rearrange other operations.
- return None;
+ return std::nullopt;
}
assert(!SingleTy.isNull() && "We should have figured out the type by now!");
// Rearrange signed symbolic expressions only
if (!SingleTy->isSignedIntegerOrEnumerationType())
- return None;
+ return std::nullopt;
SymbolRef RSym = Rhs.getAsSymbol();
if (!RSym || RSym->getType() != SingleTy)
- return None;
+ return std::nullopt;
BasicValueFactory &BV = State->getBasicVals();
llvm::APSInt LInt, RInt;
@@ -363,7 +412,7 @@ static Optional<NonLoc> tryRearrange(ProgramStateRef State,
std::tie(RSym, RInt) = decomposeSymbol(RSym, BV);
if (!shouldRearrange(State, Op, LSym, LInt, SingleTy) ||
!shouldRearrange(State, Op, RSym, RInt, SingleTy))
- return None;
+ return std::nullopt;
// We know that no overflows can occur anymore.
return doRearrangeUnchecked(State, Op, LSym, LInt, RSym, RInt);
@@ -376,6 +425,15 @@ SVal SimpleSValBuilder::evalBinOpNN(ProgramStateRef state,
NonLoc InputLHS = lhs;
NonLoc InputRHS = rhs;
+ // Constraints may have changed since the creation of a bound SVal. Check if
+ // the values can be simplified based on those new constraints.
+ SVal simplifiedLhs = simplifySVal(state, lhs);
+ SVal simplifiedRhs = simplifySVal(state, rhs);
+ if (auto simplifiedLhsAsNonLoc = simplifiedLhs.getAs<NonLoc>())
+ lhs = *simplifiedLhsAsNonLoc;
+ if (auto simplifiedRhsAsNonLoc = simplifiedRhs.getAs<NonLoc>())
+ rhs = *simplifiedRhsAsNonLoc;
+
// Handle trivial case where left-side and right-side are the same.
if (lhs == rhs)
switch (op) {
@@ -400,12 +458,12 @@ SVal SimpleSValBuilder::evalBinOpNN(ProgramStateRef state,
return evalCast(lhs, resultTy, QualType{});
}
- while (1) {
- switch (lhs.getSubKind()) {
+ while (true) {
+ switch (lhs.getKind()) {
default:
return makeSymExprValNN(op, lhs, rhs, resultTy);
case nonloc::PointerToMemberKind: {
- assert(rhs.getSubKind() == nonloc::PointerToMemberKind &&
+ assert(rhs.getKind() == nonloc::PointerToMemberKind &&
"Both SVals should have pointer-to-member-type");
auto LPTM = lhs.castAs<nonloc::PointerToMember>(),
RPTM = rhs.castAs<nonloc::PointerToMember>();
@@ -421,36 +479,36 @@ SVal SimpleSValBuilder::evalBinOpNN(ProgramStateRef state,
}
case nonloc::LocAsIntegerKind: {
Loc lhsL = lhs.castAs<nonloc::LocAsInteger>().getLoc();
- switch (rhs.getSubKind()) {
- case nonloc::LocAsIntegerKind:
- // FIXME: at the moment the implementation
- // of modeling "pointers as integers" is not complete.
- if (!BinaryOperator::isComparisonOp(op))
- return UnknownVal();
- return evalBinOpLL(state, op, lhsL,
- rhs.castAs<nonloc::LocAsInteger>().getLoc(),
- resultTy);
- case nonloc::ConcreteIntKind: {
- // FIXME: at the moment the implementation
- // of modeling "pointers as integers" is not complete.
- if (!BinaryOperator::isComparisonOp(op))
- return UnknownVal();
- // Transform the integer into a location and compare.
- // FIXME: This only makes sense for comparisons. If we want to, say,
- // add 1 to a LocAsInteger, we'd better unpack the Loc and add to it,
- // then pack it back into a LocAsInteger.
- llvm::APSInt i = rhs.castAs<nonloc::ConcreteInt>().getValue();
- // If the region has a symbolic base, pay attention to the type; it
- // might be coming from a non-default address space. For non-symbolic
- // regions it doesn't matter that much because such comparisons would
- // most likely evaluate to concrete false anyway. FIXME: We might
- // still need to handle the non-comparison case.
- if (SymbolRef lSym = lhs.getAsLocSymbol(true))
- BasicVals.getAPSIntType(lSym->getType()).apply(i);
- else
- BasicVals.getAPSIntType(Context.VoidPtrTy).apply(i);
- return evalBinOpLL(state, op, lhsL, makeLoc(i), resultTy);
- }
+ switch (rhs.getKind()) {
+ case nonloc::LocAsIntegerKind:
+ // FIXME: at the moment the implementation
+ // of modeling "pointers as integers" is not complete.
+ if (!BinaryOperator::isComparisonOp(op))
+ return UnknownVal();
+ return evalBinOpLL(state, op, lhsL,
+ rhs.castAs<nonloc::LocAsInteger>().getLoc(),
+ resultTy);
+ case nonloc::ConcreteIntKind: {
+ // FIXME: at the moment the implementation
+ // of modeling "pointers as integers" is not complete.
+ if (!BinaryOperator::isComparisonOp(op))
+ return UnknownVal();
+ // Transform the integer into a location and compare.
+ // FIXME: This only makes sense for comparisons. If we want to, say,
+ // add 1 to a LocAsInteger, we'd better unpack the Loc and add to it,
+ // then pack it back into a LocAsInteger.
+ llvm::APSInt i = rhs.castAs<nonloc::ConcreteInt>().getValue();
+ // If the region has a symbolic base, pay attention to the type; it
+ // might be coming from a non-default address space. For non-symbolic
+ // regions it doesn't matter that much because such comparisons would
+ // most likely evaluate to concrete false anyway. FIXME: We might
+ // still need to handle the non-comparison case.
+ if (SymbolRef lSym = lhs.getAsLocSymbol(true))
+ BasicVals.getAPSIntType(lSym->getType()).apply(i);
+ else
+ BasicVals.getAPSIntType(Context.VoidPtrTy).apply(i);
+ return evalBinOpLL(state, op, lhsL, makeLoc(i), resultTy);
+ }
default:
switch (op) {
case BO_EQ:
@@ -461,13 +519,13 @@ SVal SimpleSValBuilder::evalBinOpNN(ProgramStateRef state,
// This case also handles pointer arithmetic.
return makeSymExprValNN(op, InputLHS, InputRHS, resultTy);
}
- }
+ }
}
case nonloc::ConcreteIntKind: {
llvm::APSInt LHSValue = lhs.castAs<nonloc::ConcreteInt>().getValue();
// If we're dealing with two known constants, just perform the operation.
- if (const llvm::APSInt *KnownRHSValue = getKnownValue(state, rhs)) {
+ if (const llvm::APSInt *KnownRHSValue = getConstValue(state, rhs)) {
llvm::APSInt RHSValue = *KnownRHSValue;
if (BinaryOperator::isComparisonOp(op)) {
// We're looking for a type big enough to compare the two values.
@@ -485,8 +543,21 @@ SVal SimpleSValBuilder::evalBinOpNN(ProgramStateRef state,
const llvm::APSInt *Result =
BasicVals.evalAPSInt(op, LHSValue, RHSValue);
- if (!Result)
+ if (!Result) {
+ if (op == BO_Shl || op == BO_Shr) {
+ // FIXME: At this point the constant folding claims that the result
+ // of a bitwise shift is undefined. However, constant folding
+ // relies on the inaccurate type information that is stored in the
+ // bit size of APSInt objects, and if we reached this point, then
+ // the checker core.BitwiseShift already determined that the shift
+ // is valid (in a PreStmt callback, by querying the real type from
+ // the AST node).
+ // To avoid embarrassing false positives, let's just say that we
+ // don't know anything about the result of the shift.
+ return UnknownVal();
+ }
return UndefinedVal();
+ }
return nonloc::ConcreteInt(*Result);
}
@@ -501,7 +572,7 @@ SVal SimpleSValBuilder::evalBinOpNN(ProgramStateRef state,
case BO_LE:
case BO_GE:
op = BinaryOperator::reverseComparisonOp(op);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case BO_EQ:
case BO_NE:
case BO_Add:
@@ -513,9 +584,9 @@ SVal SimpleSValBuilder::evalBinOpNN(ProgramStateRef state,
continue;
case BO_Shr:
// (~0)>>a
- if (LHSValue.isAllOnesValue() && LHSValue.isSigned())
+ if (LHSValue.isAllOnes() && LHSValue.isSigned())
return evalCast(lhs, resultTy, QualType{});
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case BO_Shl:
// 0<<a and 0>>a
if (LHSValue == 0)
@@ -527,7 +598,7 @@ SVal SimpleSValBuilder::evalBinOpNN(ProgramStateRef state,
// 0 % x == 0
if (LHSValue == 0)
return makeZeroVal(resultTy);
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
default:
return makeSymExprValNN(op, InputLHS, InputRHS, resultTy);
}
@@ -587,7 +658,7 @@ SVal SimpleSValBuilder::evalBinOpNN(ProgramStateRef state,
}
// For now, only handle expressions whose RHS is a constant.
- if (const llvm::APSInt *RHSValue = getKnownValue(state, rhs)) {
+ if (const llvm::APSInt *RHSValue = getConstValue(state, rhs)) {
// If both the LHS and the current expression are additive,
// fold their constants and try again.
if (BinaryOperator::isAdditiveOp(op)) {
@@ -604,16 +675,26 @@ SVal SimpleSValBuilder::evalBinOpNN(ProgramStateRef state,
const llvm::APSInt &first = IntType.convert(symIntExpr->getRHS());
const llvm::APSInt &second = IntType.convert(*RHSValue);
+ // If the op and lop agrees, then we just need to
+ // sum the constants. Otherwise, we change to operation
+ // type if substraction would produce negative value
+ // (and cause overflow for unsigned integers),
+ // as consequence x+1U-10 produces x-9U, instead
+ // of x+4294967287U, that would be produced without this
+ // additional check.
const llvm::APSInt *newRHS;
- if (lop == op)
+ if (lop == op) {
newRHS = BasicVals.evalAPSInt(BO_Add, first, second);
- else
+ } else if (first >= second) {
newRHS = BasicVals.evalAPSInt(BO_Sub, first, second);
+ op = lop;
+ } else {
+ newRHS = BasicVals.evalAPSInt(BO_Sub, second, first);
+ }
assert(newRHS && "Invalid operation despite common type!");
rhs = nonloc::ConcreteInt(*newRHS);
lhs = nonloc::SymbolVal(symIntExpr->getLHS());
- op = lop;
continue;
}
}
@@ -623,21 +704,11 @@ SVal SimpleSValBuilder::evalBinOpNN(ProgramStateRef state,
}
}
- // Does the symbolic expression simplify to a constant?
- // If so, "fold" the constant by setting 'lhs' to a ConcreteInt
- // and try again.
- SVal simplifiedLhs = simplifySVal(state, lhs);
- if (simplifiedLhs != lhs)
- if (auto simplifiedLhsAsNonLoc = simplifiedLhs.getAs<NonLoc>()) {
- lhs = *simplifiedLhsAsNonLoc;
- continue;
- }
-
// Is the RHS a constant?
- if (const llvm::APSInt *RHSValue = getKnownValue(state, rhs))
+ if (const llvm::APSInt *RHSValue = getConstValue(state, rhs))
return MakeSymIntVal(Sym, op, *RHSValue, resultTy);
- if (Optional<NonLoc> V = tryRearrange(state, op, lhs, rhs, resultTy))
+ if (std::optional<NonLoc> V = tryRearrange(state, op, lhs, rhs, resultTy))
return *V;
// Give up -- this is not a symbolic expression we can handle.
@@ -693,11 +764,40 @@ static SVal evalBinOpFieldRegionFieldRegion(const FieldRegion *LeftFR,
llvm_unreachable("Fields not found in parent record's definition");
}
+// This is used in debug builds only for now because some downstream users
+// may hit this assert in their subsequent merges.
+// There are still places in the analyzer where equal bitwidth Locs
+// are compared, and need to be found and corrected. Recent previous fixes have
+// addressed the known problems of making NULLs with specific bitwidths
+// for Loc comparisons along with deprecation of APIs for the same purpose.
+//
+static void assertEqualBitWidths(ProgramStateRef State, Loc RhsLoc,
+ Loc LhsLoc) {
+ // Implements a "best effort" check for RhsLoc and LhsLoc bit widths
+ ASTContext &Ctx = State->getStateManager().getContext();
+ uint64_t RhsBitwidth =
+ RhsLoc.getType(Ctx).isNull() ? 0 : Ctx.getTypeSize(RhsLoc.getType(Ctx));
+ uint64_t LhsBitwidth =
+ LhsLoc.getType(Ctx).isNull() ? 0 : Ctx.getTypeSize(LhsLoc.getType(Ctx));
+ if (RhsBitwidth && LhsBitwidth && (LhsLoc.getKind() == RhsLoc.getKind())) {
+ assert(RhsBitwidth == LhsBitwidth &&
+ "RhsLoc and LhsLoc bitwidth must be same!");
+ }
+}
+
// FIXME: all this logic will change if/when we have MemRegion::getLocation().
SVal SimpleSValBuilder::evalBinOpLL(ProgramStateRef state,
BinaryOperator::Opcode op,
Loc lhs, Loc rhs,
QualType resultTy) {
+
+ // Assert that bitwidth of lhs and rhs are the same.
+ // This can happen if two different address spaces are used,
+ // and the bitwidths of the address spaces are different.
+ // See LIT case clang/test/Analysis/cstring-checker-addressspace.c
+ // FIXME: See comment above in the function assertEqualBitWidths
+ assertEqualBitWidths(state, rhs, lhs);
+
// Only comparisons and subtractions are valid operations on two pointers.
// See [C99 6.5.5 through 6.5.14] or [C++0x 5.6 through 5.15].
// However, if a pointer is casted to an integer, evalBinOpNN may end up
@@ -725,7 +825,7 @@ SVal SimpleSValBuilder::evalBinOpLL(ProgramStateRef state,
}
}
- switch (lhs.getSubKind()) {
+ switch (lhs.getKind()) {
default:
llvm_unreachable("Ordering not implemented for this Loc.");
@@ -755,6 +855,8 @@ SVal SimpleSValBuilder::evalBinOpLL(ProgramStateRef state,
return UnknownVal();
case loc::ConcreteIntKind: {
+ auto L = lhs.castAs<loc::ConcreteInt>();
+
// If one of the operands is a symbol and the other is a constant,
// build an expression for use by the constraint manager.
if (SymbolRef rSym = rhs.getAsLocSymbol()) {
@@ -763,19 +865,17 @@ SVal SimpleSValBuilder::evalBinOpLL(ProgramStateRef state,
if (!BinaryOperator::isComparisonOp(op) || op == BO_Cmp)
return UnknownVal();
- const llvm::APSInt &lVal = lhs.castAs<loc::ConcreteInt>().getValue();
op = BinaryOperator::reverseComparisonOp(op);
- return makeNonLoc(rSym, op, lVal, resultTy);
+ return makeNonLoc(rSym, op, L.getValue(), resultTy);
}
// If both operands are constants, just perform the operation.
- if (Optional<loc::ConcreteInt> rInt = rhs.getAs<loc::ConcreteInt>()) {
- SVal ResultVal =
- lhs.castAs<loc::ConcreteInt>().evalBinOp(BasicVals, op, *rInt);
- if (Optional<NonLoc> Result = ResultVal.getAs<NonLoc>())
- return evalCast(*Result, resultTy, QualType{});
+ if (std::optional<loc::ConcreteInt> rInt = rhs.getAs<loc::ConcreteInt>()) {
+ assert(BinaryOperator::isComparisonOp(op) || op == BO_Sub);
- assert(!ResultVal.getAs<Loc>() && "Loc-Loc ops should not produce Locs");
+ if (const auto *ResultInt =
+ BasicVals.evalAPSInt(op, L.getValue(), rInt->getValue()))
+ return evalCast(nonloc::ConcreteInt(*ResultInt), resultTy, QualType{});
return UnknownVal();
}
@@ -783,7 +883,7 @@ SVal SimpleSValBuilder::evalBinOpLL(ProgramStateRef state,
// This must come after the test if the RHS is a symbol, which is used to
// build constraints. The address of any non-symbolic region is guaranteed
// to be non-NULL, as is any label.
- assert(rhs.getAs<loc::MemRegionVal>() || rhs.getAs<loc::GotoLabel>());
+ assert((isa<loc::MemRegionVal, loc::GotoLabel>(rhs)));
if (lhs.isZeroConstant()) {
switch (op) {
default:
@@ -804,7 +904,7 @@ SVal SimpleSValBuilder::evalBinOpLL(ProgramStateRef state,
return UnknownVal();
}
case loc::MemRegionValKind: {
- if (Optional<loc::ConcreteInt> rInt = rhs.getAs<loc::ConcreteInt>()) {
+ if (std::optional<loc::ConcreteInt> rInt = rhs.getAs<loc::ConcreteInt>()) {
// If one of the operands is a symbol and the other is a constant,
// build an expression for use by the constraint manager.
if (SymbolRef lSym = lhs.getAsLocSymbol(true)) {
@@ -901,7 +1001,7 @@ SVal SimpleSValBuilder::evalBinOpLL(ProgramStateRef state,
// Get the left index and cast it to the correct type.
// If the index is unknown or undefined, bail out here.
SVal LeftIndexVal = LeftER->getIndex();
- Optional<NonLoc> LeftIndex = LeftIndexVal.getAs<NonLoc>();
+ std::optional<NonLoc> LeftIndex = LeftIndexVal.getAs<NonLoc>();
if (!LeftIndex)
return UnknownVal();
LeftIndexVal = evalCast(*LeftIndex, ArrayIndexTy, QualType{});
@@ -911,7 +1011,7 @@ SVal SimpleSValBuilder::evalBinOpLL(ProgramStateRef state,
// Do the same for the right index.
SVal RightIndexVal = RightER->getIndex();
- Optional<NonLoc> RightIndex = RightIndexVal.getAs<NonLoc>();
+ std::optional<NonLoc> RightIndex = RightIndexVal.getAs<NonLoc>();
if (!RightIndex)
return UnknownVal();
RightIndexVal = evalCast(*RightIndex, ArrayIndexTy, QualType{});
@@ -1019,8 +1119,10 @@ SVal SimpleSValBuilder::evalBinOpLN(ProgramStateRef state,
// We are dealing with pointer arithmetic.
// Handle pointer arithmetic on constant values.
- if (Optional<nonloc::ConcreteInt> rhsInt = rhs.getAs<nonloc::ConcreteInt>()) {
- if (Optional<loc::ConcreteInt> lhsInt = lhs.getAs<loc::ConcreteInt>()) {
+ if (std::optional<nonloc::ConcreteInt> rhsInt =
+ rhs.getAs<nonloc::ConcreteInt>()) {
+ if (std::optional<loc::ConcreteInt> lhsInt =
+ lhs.getAs<loc::ConcreteInt>()) {
const llvm::APSInt &leftI = lhsInt->getValue();
assert(leftI.isUnsigned());
llvm::APSInt rightI(rhsInt->getValue(), /* isUnsigned */ true);
@@ -1084,7 +1186,7 @@ SVal SimpleSValBuilder::evalBinOpLN(ProgramStateRef state,
if (elementType->isVoidType())
elementType = getContext().CharTy;
- if (Optional<NonLoc> indexV = index.getAs<NonLoc>()) {
+ if (std::optional<NonLoc> indexV = index.getAs<NonLoc>()) {
return loc::MemRegionVal(MemMgr.getElementRegion(elementType, *indexV,
superR, getContext()));
}
@@ -1092,26 +1194,72 @@ SVal SimpleSValBuilder::evalBinOpLN(ProgramStateRef state,
return UnknownVal();
}
+const llvm::APSInt *SimpleSValBuilder::getConstValue(ProgramStateRef state,
+ SVal V) {
+ if (const llvm::APSInt *Res = getConcreteValue(V))
+ return Res;
+
+ if (SymbolRef Sym = V.getAsSymbol())
+ return state->getConstraintManager().getSymVal(state, Sym);
+
+ return nullptr;
+}
+
+const llvm::APSInt *SimpleSValBuilder::getConcreteValue(SVal V) {
+ if (std::optional<loc::ConcreteInt> X = V.getAs<loc::ConcreteInt>())
+ return &X->getValue();
+
+ if (std::optional<nonloc::ConcreteInt> X = V.getAs<nonloc::ConcreteInt>())
+ return &X->getValue();
+
+ return nullptr;
+}
+
const llvm::APSInt *SimpleSValBuilder::getKnownValue(ProgramStateRef state,
+ SVal V) {
+ return getConstValue(state, simplifySVal(state, V));
+}
+
+const llvm::APSInt *SimpleSValBuilder::getMinValue(ProgramStateRef state,
SVal V) {
V = simplifySVal(state, V);
- if (V.isUnknownOrUndef())
- return nullptr;
- if (Optional<loc::ConcreteInt> X = V.getAs<loc::ConcreteInt>())
- return &X->getValue();
+ if (const llvm::APSInt *Res = getConcreteValue(V))
+ return Res;
- if (Optional<nonloc::ConcreteInt> X = V.getAs<nonloc::ConcreteInt>())
- return &X->getValue();
+ if (SymbolRef Sym = V.getAsSymbol())
+ return state->getConstraintManager().getSymMinVal(state, Sym);
+
+ return nullptr;
+}
+
+const llvm::APSInt *SimpleSValBuilder::getMaxValue(ProgramStateRef state,
+ SVal V) {
+ V = simplifySVal(state, V);
+
+ if (const llvm::APSInt *Res = getConcreteValue(V))
+ return Res;
if (SymbolRef Sym = V.getAsSymbol())
- return state->getConstraintManager().getSymVal(state, Sym);
+ return state->getConstraintManager().getSymMaxVal(state, Sym);
- // FIXME: Add support for SymExprs.
return nullptr;
}
+SVal SimpleSValBuilder::simplifyUntilFixpoint(ProgramStateRef State, SVal Val) {
+ SVal SimplifiedVal = simplifySValOnce(State, Val);
+ while (SimplifiedVal != Val) {
+ Val = SimplifiedVal;
+ SimplifiedVal = simplifySValOnce(State, Val);
+ }
+ return SimplifiedVal;
+}
+
SVal SimpleSValBuilder::simplifySVal(ProgramStateRef State, SVal V) {
+ return simplifyUntilFixpoint(State, V);
+}
+
+SVal SimpleSValBuilder::simplifySValOnce(ProgramStateRef State, SVal V) {
// For now, this function tries to constant-fold symbols inside a
// nonloc::SymbolVal, and does nothing else. More simplifications should
// be possible, such as constant-folding an index in an ElementRegion.
@@ -1139,6 +1287,24 @@ SVal SimpleSValBuilder::simplifySVal(ProgramStateRef State, SVal V) {
return cache(Sym, SVB.makeSymbolVal(Sym));
}
+ // Return the known const value for the Sym if available, or return Undef
+ // otherwise.
+ SVal getConst(SymbolRef Sym) {
+ const llvm::APSInt *Const =
+ State->getConstraintManager().getSymVal(State, Sym);
+ if (Const)
+ return Loc::isLocType(Sym->getType()) ? (SVal)SVB.makeIntLocVal(*Const)
+ : (SVal)SVB.makeIntVal(*Const);
+ return UndefinedVal();
+ }
+
+ SVal getConstOrVisit(SymbolRef Sym) {
+ const SVal Ret = getConst(Sym);
+ if (Ret.isUndef())
+ return Visit(Sym);
+ return Ret;
+ }
+
public:
Simplifier(ProgramStateRef State)
: State(State), SVB(State->getStateManager().getSValBuilder()) {}
@@ -1146,21 +1312,18 @@ SVal SimpleSValBuilder::simplifySVal(ProgramStateRef State, SVal V) {
SVal VisitSymbolData(const SymbolData *S) {
// No cache here.
if (const llvm::APSInt *I =
- SVB.getKnownValue(State, SVB.makeSymbolVal(S)))
+ State->getConstraintManager().getSymVal(State, S))
return Loc::isLocType(S->getType()) ? (SVal)SVB.makeIntLocVal(*I)
: (SVal)SVB.makeIntVal(*I);
return SVB.makeSymbolVal(S);
}
- // TODO: Support SymbolCast. Support IntSymExpr when/if we actually
- // start producing them.
-
SVal VisitSymIntExpr(const SymIntExpr *S) {
auto I = Cached.find(S);
if (I != Cached.end())
return I->second;
- SVal LHS = Visit(S->getLHS());
+ SVal LHS = getConstOrVisit(S->getLHS());
if (isUnchanged(S->getLHS(), LHS))
return skip(S);
@@ -1187,6 +1350,20 @@ SVal SimpleSValBuilder::simplifySVal(ProgramStateRef State, SVal V) {
S, SVB.evalBinOp(State, S->getOpcode(), LHS, RHS, S->getType()));
}
+ SVal VisitIntSymExpr(const IntSymExpr *S) {
+ auto I = Cached.find(S);
+ if (I != Cached.end())
+ return I->second;
+
+ SVal RHS = getConstOrVisit(S->getRHS());
+ if (isUnchanged(S->getRHS(), RHS))
+ return skip(S);
+
+ SVal LHS = SVB.makeIntVal(S->getLHS());
+ return cache(
+ S, SVB.evalBinOp(State, S->getOpcode(), LHS, RHS, S->getType()));
+ }
+
SVal VisitSymSymExpr(const SymSymExpr *S) {
auto I = Cached.find(S);
if (I != Cached.end())
@@ -1200,8 +1377,9 @@ SVal SimpleSValBuilder::simplifySVal(ProgramStateRef State, SVal V) {
Loc::isLocType(S->getRHS()->getType()))
return skip(S);
- SVal LHS = Visit(S->getLHS());
- SVal RHS = Visit(S->getRHS());
+ SVal LHS = getConstOrVisit(S->getLHS());
+ SVal RHS = getConstOrVisit(S->getRHS());
+
if (isUnchanged(S->getLHS(), LHS) && isUnchanged(S->getRHS(), RHS))
return skip(S);
@@ -1209,11 +1387,35 @@ SVal SimpleSValBuilder::simplifySVal(ProgramStateRef State, SVal V) {
S, SVB.evalBinOp(State, S->getOpcode(), LHS, RHS, S->getType()));
}
+ SVal VisitSymbolCast(const SymbolCast *S) {
+ auto I = Cached.find(S);
+ if (I != Cached.end())
+ return I->second;
+ const SymExpr *OpSym = S->getOperand();
+ SVal OpVal = getConstOrVisit(OpSym);
+ if (isUnchanged(OpSym, OpVal))
+ return skip(S);
+
+ return cache(S, SVB.evalCast(OpVal, S->getType(), OpSym->getType()));
+ }
+
+ SVal VisitUnarySymExpr(const UnarySymExpr *S) {
+ auto I = Cached.find(S);
+ if (I != Cached.end())
+ return I->second;
+ SVal Op = getConstOrVisit(S->getOperand());
+ if (isUnchanged(S->getOperand(), Op))
+ return skip(S);
+
+ return cache(
+ S, SVB.evalUnaryOp(State, S->getOpcode(), Op, S->getType()));
+ }
+
SVal VisitSymExpr(SymbolRef S) { return nonloc::SymbolVal(S); }
SVal VisitMemRegion(const MemRegion *R) { return loc::MemRegionVal(R); }
- SVal VisitNonLocSymbolVal(nonloc::SymbolVal V) {
+ SVal VisitSymbolVal(nonloc::SymbolVal V) {
// Simplification is much more costly than computing complexity.
// For high complexity, it may be not worth it.
return Visit(V.getSymbol());
@@ -1222,14 +1424,6 @@ SVal SimpleSValBuilder::simplifySVal(ProgramStateRef State, SVal V) {
SVal VisitSVal(SVal V) { return V; }
};
- // A crude way of preventing this function from calling itself from evalBinOp.
- static bool isReentering = false;
- if (isReentering)
- return V;
-
- isReentering = true;
SVal SimplifiedV = Simplifier(State).Visit(V);
- isReentering = false;
-
return SimplifiedV;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Store.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Store.cpp
index b867b0746f90..67ca61bb56ba 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Store.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Store.cpp
@@ -29,12 +29,12 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/StoreRef.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymExpr.h"
#include "llvm/ADT/APSInt.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
#include <cassert>
#include <cstdint>
+#include <optional>
using namespace clang;
using namespace ento;
@@ -71,8 +71,8 @@ const ElementRegion *StoreManager::GetElementZeroRegion(const SubRegion *R,
return MRMgr.getElementRegion(T, idx, R, Ctx);
}
-Optional<const MemRegion *> StoreManager::castRegion(const MemRegion *R,
- QualType CastToTy) {
+std::optional<const MemRegion *> StoreManager::castRegion(const MemRegion *R,
+ QualType CastToTy) {
ASTContext &Ctx = StateMgr.getContext();
// Handle casts to Objective-C objects.
@@ -84,30 +84,36 @@ Optional<const MemRegion *> StoreManager::castRegion(const MemRegion *R,
// involved. Blocks can be casted to/from 'id', as they can be treated
// as Objective-C objects. This could possibly be handled by enhancing
// our reasoning of downcasts of symbolic objects.
- if (isa<CodeTextRegion>(R) || isa<SymbolicRegion>(R))
+ if (isa<CodeTextRegion, SymbolicRegion>(R))
return R;
// We don't know what to make of it. Return a NULL region, which
// will be interpreted as UnknownVal.
- return None;
+ return std::nullopt;
}
// Now assume we are casting from pointer to pointer. Other cases should
// already be handled.
QualType PointeeTy = CastToTy->getPointeeType();
QualType CanonPointeeTy = Ctx.getCanonicalType(PointeeTy);
+ CanonPointeeTy = CanonPointeeTy.getLocalUnqualifiedType();
// Handle casts to void*. We just pass the region through.
- if (CanonPointeeTy.getLocalUnqualifiedType() == Ctx.VoidTy)
+ if (CanonPointeeTy == Ctx.VoidTy)
return R;
- // Handle casts from compatible types.
- if (R->isBoundable())
+ const auto IsSameRegionType = [&Ctx](const MemRegion *R, QualType OtherTy) {
if (const auto *TR = dyn_cast<TypedValueRegion>(R)) {
QualType ObjTy = Ctx.getCanonicalType(TR->getValueType());
- if (CanonPointeeTy == ObjTy)
- return R;
+ if (OtherTy == ObjTy.getLocalUnqualifiedType())
+ return true;
}
+ return false;
+ };
+
+ // Handle casts from compatible types.
+ if (R->isBoundable() && IsSameRegionType(R, CanonPointeeTy))
+ return R;
// Process region cast according to the kind of the region being cast.
switch (R->getKind()) {
@@ -138,6 +144,7 @@ Optional<const MemRegion *> StoreManager::castRegion(const MemRegion *R,
case MemRegion::NonParamVarRegionKind:
case MemRegion::ParamVarRegionKind:
case MemRegion::CXXTempObjectRegionKind:
+ case MemRegion::CXXLifetimeExtendedObjectRegionKind:
case MemRegion::CXXBaseObjectRegionKind:
case MemRegion::CXXDerivedObjectRegionKind:
return MakeElementRegion(cast<SubRegion>(R), PointeeTy);
@@ -169,21 +176,16 @@ Optional<const MemRegion *> StoreManager::castRegion(const MemRegion *R,
// If we cannot compute a raw offset, throw up our hands and return
// a NULL MemRegion*.
if (!baseR)
- return None;
+ return std::nullopt;
CharUnits off = rawOff.getOffset();
if (off.isZero()) {
- // Edge case: we are at 0 bytes off the beginning of baseR. We
- // check to see if type we are casting to is the same as the base
- // region. If so, just return the base region.
- if (const auto *TR = dyn_cast<TypedValueRegion>(baseR)) {
- QualType ObjTy = Ctx.getCanonicalType(TR->getValueType());
- QualType CanonPointeeTy = Ctx.getCanonicalType(PointeeTy);
- if (CanonPointeeTy == ObjTy)
- return baseR;
- }
-
+ // Edge case: we are at 0 bytes off the beginning of baseR. We check to
+ // see if the type we are casting to is the same as the type of the base
+ // region. If so, just return the base region.
+ if (IsSameRegionType(baseR, CanonPointeeTy))
+ return baseR;
// Otherwise, create a new ElementRegion at offset 0.
return MakeElementRegion(cast<SubRegion>(baseR), PointeeTy);
}
@@ -248,17 +250,15 @@ static bool regionMatchesCXXRecordType(SVal V, QualType Ty) {
}
SVal StoreManager::evalDerivedToBase(SVal Derived, const CastExpr *Cast) {
- // Sanity check to avoid doing the wrong thing in the face of
+ // Early return to avoid doing the wrong thing in the face of
// reinterpret_cast.
if (!regionMatchesCXXRecordType(Derived, Cast->getSubExpr()->getType()))
return UnknownVal();
// Walk through the cast path to create nested CXXBaseRegions.
SVal Result = Derived;
- for (CastExpr::path_const_iterator I = Cast->path_begin(),
- E = Cast->path_end();
- I != E; ++I) {
- Result = evalDerivedToBase(Result, (*I)->getType(), (*I)->isVirtual());
+ for (const CXXBaseSpecifier *Base : Cast->path()) {
+ Result = evalDerivedToBase(Result, Base->getType(), Base->isVirtual());
}
return Result;
}
@@ -313,10 +313,8 @@ static const CXXRecordDecl *getCXXRecordType(const MemRegion *MR) {
return nullptr;
}
-SVal StoreManager::attemptDownCast(SVal Base, QualType TargetType,
- bool &Failed) {
- Failed = false;
-
+std::optional<SVal> StoreManager::evalBaseToDerived(SVal Base,
+ QualType TargetType) {
const MemRegion *MR = Base.getAsRegion();
if (!MR)
return UnknownVal();
@@ -391,7 +389,9 @@ SVal StoreManager::attemptDownCast(SVal Base, QualType TargetType,
}
// We failed if the region we ended up with has perfect type info.
- Failed = isa<TypedValueRegion>(MR);
+ if (isa<TypedValueRegion>(MR))
+ return std::nullopt;
+
return UnknownVal();
}
@@ -402,7 +402,7 @@ SVal StoreManager::getLValueFieldOrIvar(const Decl *D, SVal Base) {
Loc BaseL = Base.castAs<Loc>();
const SubRegion* BaseR = nullptr;
- switch (BaseL.getSubKind()) {
+ switch (BaseL.getKind()) {
case loc::MemRegionValKind:
BaseR = cast<SubRegion>(BaseL.castAs<loc::MemRegionVal>().getRegion());
break;
@@ -442,14 +442,27 @@ SVal StoreManager::getLValueIvar(const ObjCIvarDecl *decl, SVal base) {
SVal StoreManager::getLValueElement(QualType elementType, NonLoc Offset,
SVal Base) {
+
+ // Special case, if index is 0, return the same type as if
+ // this was not an array dereference.
+ if (Offset.isZeroConstant()) {
+ QualType BT = Base.getType(this->Ctx);
+ if (!BT.isNull() && !elementType.isNull()) {
+ QualType PointeeTy = BT->getPointeeType();
+ if (!PointeeTy.isNull() &&
+ PointeeTy.getCanonicalType() == elementType.getCanonicalType())
+ return Base;
+ }
+ }
+
// If the base is an unknown or undefined value, just return it back.
// FIXME: For absolute pointer addresses, we just return that value back as
// well, although in reality we should return the offset added to that
// value. See also the similar FIXME in getLValueFieldOrIvar().
- if (Base.isUnknownOrUndef() || Base.getAs<loc::ConcreteInt>())
+ if (Base.isUnknownOrUndef() || isa<loc::ConcreteInt>(Base))
return Base;
- if (Base.getAs<loc::GotoLabel>())
+ if (isa<loc::GotoLabel>(Base))
return UnknownVal();
const SubRegion *BaseRegion =
@@ -475,7 +488,7 @@ SVal StoreManager::getLValueElement(QualType elementType, NonLoc Offset,
SVal BaseIdx = ElemR->getIndex();
- if (!BaseIdx.getAs<nonloc::ConcreteInt>())
+ if (!isa<nonloc::ConcreteInt>(BaseIdx))
return UnknownVal();
const llvm::APSInt &BaseIdxI =
@@ -484,7 +497,7 @@ SVal StoreManager::getLValueElement(QualType elementType, NonLoc Offset,
// Only allow non-integer offsets if the base region has no offset itself.
// FIXME: This is a somewhat arbitrary restriction. We should be using
// SValBuilder here to add the two offsets without checking their types.
- if (!Offset.getAs<nonloc::ConcreteInt>()) {
+ if (!isa<nonloc::ConcreteInt>(Offset)) {
if (isa<ElementRegion>(BaseRegion->StripCasts()))
return UnknownVal();
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp
index 79a8eef30576..9025e11a3f51 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp
@@ -65,14 +65,23 @@ void BinarySymExpr::dumpToStreamImpl(raw_ostream &OS,
}
void SymbolCast::dumpToStream(raw_ostream &os) const {
- os << '(' << ToTy.getAsString() << ") (";
+ os << '(' << ToTy << ") (";
Operand->dumpToStream(os);
os << ')';
}
+void UnarySymExpr::dumpToStream(raw_ostream &os) const {
+ os << UnaryOperator::getOpcodeStr(Op);
+ bool Binary = isa<BinarySymExpr>(Operand);
+ if (Binary)
+ os << '(';
+ Operand->dumpToStream(os);
+ if (Binary)
+ os << ')';
+}
+
void SymbolConjured::dumpToStream(raw_ostream &os) const {
- os << getKindStr() << getSymbolID() << '{' << T.getAsString() << ", LC"
- << LCtx->getID();
+ os << getKindStr() << getSymbolID() << '{' << T << ", LC" << LCtx->getID();
if (S)
os << ", S" << S->getID(LCtx->getDecl()->getASTContext());
else
@@ -90,15 +99,13 @@ void SymbolExtent::dumpToStream(raw_ostream &os) const {
}
void SymbolMetadata::dumpToStream(raw_ostream &os) const {
- os << getKindStr() << getSymbolID() << '{' << getRegion() << ','
- << T.getAsString() << '}';
+ os << getKindStr() << getSymbolID() << '{' << getRegion() << ',' << T << '}';
}
void SymbolData::anchor() {}
void SymbolRegionValue::dumpToStream(raw_ostream &os) const {
- os << getKindStr() << getSymbolID() << '<' << getType().getAsString() << ' '
- << R << '>';
+ os << getKindStr() << getSymbolID() << '<' << getType() << ' ' << R << '>';
}
bool SymExpr::symbol_iterator::operator==(const symbol_iterator &X) const {
@@ -137,6 +144,9 @@ void SymExpr::symbol_iterator::expand() {
case SymExpr::SymbolCastKind:
itr.push_back(cast<SymbolCast>(SE)->getOperand());
return;
+ case SymExpr::UnarySymExprKind:
+ itr.push_back(cast<UnarySymExpr>(SE)->getOperand());
+ return;
case SymExpr::SymIntExprKind:
itr.push_back(cast<SymIntExpr>(SE)->getLHS());
return;
@@ -160,8 +170,7 @@ SymbolManager::getRegionValueSymbol(const TypedValueRegion* R) {
void *InsertPos;
SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos);
if (!SD) {
- SD = (SymExpr*) BPAlloc.Allocate<SymbolRegionValue>();
- new (SD) SymbolRegionValue(SymbolCounter, R);
+ SD = new (BPAlloc) SymbolRegionValue(SymbolCounter, R);
DataSet.InsertNode(SD, InsertPos);
++SymbolCounter;
}
@@ -179,8 +188,7 @@ const SymbolConjured* SymbolManager::conjureSymbol(const Stmt *E,
void *InsertPos;
SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos);
if (!SD) {
- SD = (SymExpr*) BPAlloc.Allocate<SymbolConjured>();
- new (SD) SymbolConjured(SymbolCounter, E, LCtx, T, Count, SymbolTag);
+ SD = new (BPAlloc) SymbolConjured(SymbolCounter, E, LCtx, T, Count, SymbolTag);
DataSet.InsertNode(SD, InsertPos);
++SymbolCounter;
}
@@ -196,8 +204,7 @@ SymbolManager::getDerivedSymbol(SymbolRef parentSymbol,
void *InsertPos;
SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos);
if (!SD) {
- SD = (SymExpr*) BPAlloc.Allocate<SymbolDerived>();
- new (SD) SymbolDerived(SymbolCounter, parentSymbol, R);
+ SD = new (BPAlloc) SymbolDerived(SymbolCounter, parentSymbol, R);
DataSet.InsertNode(SD, InsertPos);
++SymbolCounter;
}
@@ -212,8 +219,7 @@ SymbolManager::getExtentSymbol(const SubRegion *R) {
void *InsertPos;
SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos);
if (!SD) {
- SD = (SymExpr*) BPAlloc.Allocate<SymbolExtent>();
- new (SD) SymbolExtent(SymbolCounter, R);
+ SD = new (BPAlloc) SymbolExtent(SymbolCounter, R);
DataSet.InsertNode(SD, InsertPos);
++SymbolCounter;
}
@@ -230,8 +236,7 @@ SymbolManager::getMetadataSymbol(const MemRegion* R, const Stmt *S, QualType T,
void *InsertPos;
SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos);
if (!SD) {
- SD = (SymExpr*) BPAlloc.Allocate<SymbolMetadata>();
- new (SD) SymbolMetadata(SymbolCounter, R, S, T, LCtx, Count, SymbolTag);
+ SD = new (BPAlloc) SymbolMetadata(SymbolCounter, R, S, T, LCtx, Count, SymbolTag);
DataSet.InsertNode(SD, InsertPos);
++SymbolCounter;
}
@@ -247,8 +252,7 @@ SymbolManager::getCastSymbol(const SymExpr *Op,
void *InsertPos;
SymExpr *data = DataSet.FindNodeOrInsertPos(ID, InsertPos);
if (!data) {
- data = (SymbolCast*) BPAlloc.Allocate<SymbolCast>();
- new (data) SymbolCast(Op, From, To);
+ data = new (BPAlloc) SymbolCast(Op, From, To);
DataSet.InsertNode(data, InsertPos);
}
@@ -265,8 +269,7 @@ const SymIntExpr *SymbolManager::getSymIntExpr(const SymExpr *lhs,
SymExpr *data = DataSet.FindNodeOrInsertPos(ID, InsertPos);
if (!data) {
- data = (SymIntExpr*) BPAlloc.Allocate<SymIntExpr>();
- new (data) SymIntExpr(lhs, op, v, t);
+ data = new (BPAlloc) SymIntExpr(lhs, op, v, t);
DataSet.InsertNode(data, InsertPos);
}
@@ -283,8 +286,7 @@ const IntSymExpr *SymbolManager::getIntSymExpr(const llvm::APSInt& lhs,
SymExpr *data = DataSet.FindNodeOrInsertPos(ID, InsertPos);
if (!data) {
- data = (IntSymExpr*) BPAlloc.Allocate<IntSymExpr>();
- new (data) IntSymExpr(lhs, op, rhs, t);
+ data = new (BPAlloc) IntSymExpr(lhs, op, rhs, t);
DataSet.InsertNode(data, InsertPos);
}
@@ -301,14 +303,28 @@ const SymSymExpr *SymbolManager::getSymSymExpr(const SymExpr *lhs,
SymExpr *data = DataSet.FindNodeOrInsertPos(ID, InsertPos);
if (!data) {
- data = (SymSymExpr*) BPAlloc.Allocate<SymSymExpr>();
- new (data) SymSymExpr(lhs, op, rhs, t);
+ data = new (BPAlloc) SymSymExpr(lhs, op, rhs, t);
DataSet.InsertNode(data, InsertPos);
}
return cast<SymSymExpr>(data);
}
+const UnarySymExpr *SymbolManager::getUnarySymExpr(const SymExpr *Operand,
+ UnaryOperator::Opcode Opc,
+ QualType T) {
+ llvm::FoldingSetNodeID ID;
+ UnarySymExpr::Profile(ID, Operand, Opc, T);
+ void *InsertPos;
+ SymExpr *data = DataSet.FindNodeOrInsertPos(ID, InsertPos);
+ if (!data) {
+ data = new (BPAlloc) UnarySymExpr(Operand, Opc, T);
+ DataSet.InsertNode(data, InsertPos);
+ }
+
+ return cast<UnarySymExpr>(data);
+}
+
QualType SymbolConjured::getType() const {
return T;
}
@@ -372,7 +388,7 @@ void SymbolReaper::markDependentsLive(SymbolRef sym) {
if (const SymbolRefSmallVectorTy *Deps = SymMgr.getDependentSymbols(sym)) {
for (const auto I : *Deps) {
- if (TheLiving.find(I) != TheLiving.end())
+ if (TheLiving.contains(I))
continue;
markLive(I);
}
@@ -385,17 +401,21 @@ void SymbolReaper::markLive(SymbolRef sym) {
}
void SymbolReaper::markLive(const MemRegion *region) {
- RegionRoots.insert(region->getBaseRegion());
+ LiveRegionRoots.insert(region->getBaseRegion());
markElementIndicesLive(region);
}
+void SymbolReaper::markLazilyCopied(const clang::ento::MemRegion *region) {
+ LazilyCopiedRegionRoots.insert(region->getBaseRegion());
+}
+
void SymbolReaper::markElementIndicesLive(const MemRegion *region) {
for (auto SR = dyn_cast<SubRegion>(region); SR;
SR = dyn_cast<SubRegion>(SR->getSuperRegion())) {
if (const auto ER = dyn_cast<ElementRegion>(SR)) {
SVal Idx = ER->getIndex();
- for (auto SI = Idx.symbol_begin(), SE = Idx.symbol_end(); SI != SE; ++SI)
- markLive(*SI);
+ for (SymbolRef Sym : Idx.symbols())
+ markLive(Sym);
}
}
}
@@ -411,8 +431,7 @@ bool SymbolReaper::isLiveRegion(const MemRegion *MR) {
// is not used later in the path, we can diagnose a leak of a value within
// that field earlier than, say, the variable that contains the field dies.
MR = MR->getBaseRegion();
-
- if (RegionRoots.count(MR))
+ if (LiveRegionRoots.count(MR))
return true;
if (const auto *SR = dyn_cast<SymbolicRegion>(MR))
@@ -425,19 +444,16 @@ bool SymbolReaper::isLiveRegion(const MemRegion *MR) {
// tell if anything still refers to this region. Unlike SymbolicRegions,
// AllocaRegions don't have associated symbols, though, so we don't actually
// have a way to track their liveness.
- if (isa<AllocaRegion>(MR))
- return true;
-
- if (isa<CXXThisRegion>(MR))
- return true;
-
- if (isa<MemSpaceRegion>(MR))
- return true;
+ return isa<AllocaRegion, CXXThisRegion, MemSpaceRegion, CodeTextRegion>(MR);
+}
- if (isa<CodeTextRegion>(MR))
- return true;
+bool SymbolReaper::isLazilyCopiedRegion(const MemRegion *MR) const {
+ // TODO: See comment in isLiveRegion.
+ return LazilyCopiedRegionRoots.count(MR->getBaseRegion());
+}
- return false;
+bool SymbolReaper::isReadableRegion(const MemRegion *MR) {
+ return isLiveRegion(MR) || isLazilyCopiedRegion(MR);
}
bool SymbolReaper::isLive(SymbolRef sym) {
@@ -450,7 +466,7 @@ bool SymbolReaper::isLive(SymbolRef sym) {
switch (sym->getKind()) {
case SymExpr::SymbolRegionValueKind:
- KnownLive = isLiveRegion(cast<SymbolRegionValue>(sym)->getRegion());
+ KnownLive = isReadableRegion(cast<SymbolRegionValue>(sym)->getRegion());
break;
case SymExpr::SymbolConjuredKind:
KnownLive = false;
@@ -480,6 +496,9 @@ bool SymbolReaper::isLive(SymbolRef sym) {
case SymExpr::SymbolCastKind:
KnownLive = isLive(cast<SymbolCast>(sym)->getOperand());
break;
+ case SymExpr::UnarySymExprKind:
+ KnownLive = isLive(cast<UnarySymExpr>(sym)->getOperand());
+ break;
}
if (KnownLive)
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/TextDiagnostics.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/TextDiagnostics.cpp
index 4f3be7cae331..71268af22e24 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/TextDiagnostics.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/TextDiagnostics.cpp
@@ -31,8 +31,8 @@ using namespace ento;
using namespace tooling;
namespace {
-/// Emitsd minimal diagnostics (report message + notes) for the 'none' output
-/// type to the standard error, or to to compliment many others. Emits detailed
+/// Emits minimal diagnostics (report message + notes) for the 'none' output
+/// type to the standard error, or to complement many others. Emits detailed
/// diagnostics in textual format for the 'text' output type.
class TextDiagnostics : public PathDiagnosticConsumer {
PathDiagnosticConsumerOptions DiagOpts;
@@ -86,10 +86,7 @@ public:
}
};
- for (std::vector<const PathDiagnostic *>::iterator I = Diags.begin(),
- E = Diags.end();
- I != E; ++I) {
- const PathDiagnostic *PD = *I;
+ for (const PathDiagnostic *PD : Diags) {
std::string WarningMsg = (DiagOpts.ShouldDisplayDiagnosticName
? " [" + PD->getCheckerName() + "]"
: "")
@@ -129,7 +126,7 @@ public:
Rewriter Rewrite(SM, LO);
if (!applyAllReplacements(Repls, Rewrite)) {
- llvm::errs() << "An error occured during applying fix-it.\n";
+ llvm::errs() << "An error occurred during applying fix-it.\n";
}
Rewrite.overwriteChangedFiles();
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/WorkList.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/WorkList.cpp
index 348552ba73a9..7042a9020837 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/WorkList.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/WorkList.cpp
@@ -205,12 +205,6 @@ class UnexploredFirstPriorityQueue : public WorkList {
using QueuePriority = std::pair<int, unsigned long>;
using QueueItem = std::pair<WorkListUnit, QueuePriority>;
- struct ExplorationComparator {
- bool operator() (const QueueItem &LHS, const QueueItem &RHS) {
- return LHS.second < RHS.second;
- }
- };
-
// Number of inserted nodes, used to emulate DFS ordering in the priority
// queue when insertions are equal.
unsigned long Counter = 0;
@@ -219,7 +213,7 @@ class UnexploredFirstPriorityQueue : public WorkList {
VisitedTimesMap NumReached;
// The top item is the largest one.
- llvm::PriorityQueue<QueueItem, std::vector<QueueItem>, ExplorationComparator>
+ llvm::PriorityQueue<QueueItem, std::vector<QueueItem>, llvm::less_second>
queue;
public:
@@ -267,12 +261,6 @@ class UnexploredFirstPriorityLocationQueue : public WorkList {
using QueuePriority = std::pair<int, unsigned long>;
using QueueItem = std::pair<WorkListUnit, QueuePriority>;
- struct ExplorationComparator {
- bool operator() (const QueueItem &LHS, const QueueItem &RHS) {
- return LHS.second < RHS.second;
- }
- };
-
// Number of inserted nodes, used to emulate DFS ordering in the priority
// queue when insertions are equal.
unsigned long Counter = 0;
@@ -281,7 +269,7 @@ class UnexploredFirstPriorityLocationQueue : public WorkList {
VisitedTimesMap NumReached;
// The top item is the largest one.
- llvm::PriorityQueue<QueueItem, std::vector<QueueItem>, ExplorationComparator>
+ llvm::PriorityQueue<QueueItem, std::vector<QueueItem>, llvm::less_second>
queue;
public:
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
index 31de49033ac2..b6ef40595e3c 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
@@ -27,7 +27,6 @@
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Rewrite/Core/Rewriter.h"
-#include "clang/StaticAnalyzer/Checkers/LocalCheckers.h"
#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
@@ -35,6 +34,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/ADT/ScopeExit.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
@@ -87,7 +87,7 @@ public:
ASTContext *Ctx;
Preprocessor &PP;
const std::string OutDir;
- AnalyzerOptionsRef Opts;
+ AnalyzerOptions &Opts;
ArrayRef<std::string> Plugins;
CodeInjector *Injector;
cross_tu::CrossTranslationUnitContext CTU;
@@ -121,15 +121,15 @@ public:
FunctionSummariesTy FunctionSummaries;
AnalysisConsumer(CompilerInstance &CI, const std::string &outdir,
- AnalyzerOptionsRef opts, ArrayRef<std::string> plugins,
+ AnalyzerOptions &opts, ArrayRef<std::string> plugins,
CodeInjector *injector)
: RecVisitorMode(0), RecVisitorBR(nullptr), Ctx(nullptr),
- PP(CI.getPreprocessor()), OutDir(outdir), Opts(std::move(opts)),
+ PP(CI.getPreprocessor()), OutDir(outdir), Opts(opts),
Plugins(plugins), Injector(injector), CTU(CI),
MacroExpansions(CI.getLangOpts()) {
DigestAnalyzerOptions();
- if (Opts->AnalyzerDisplayProgress || Opts->PrintStats ||
- Opts->ShouldSerializeStats) {
+ if (Opts.AnalyzerDisplayProgress || Opts.PrintStats ||
+ Opts.ShouldSerializeStats) {
AnalyzerTimers = std::make_unique<llvm::TimerGroup>(
"analyzer", "Analyzer timers");
SyntaxCheckTimer = std::make_unique<llvm::Timer>(
@@ -141,27 +141,27 @@ public:
*AnalyzerTimers);
}
- if (Opts->PrintStats || Opts->ShouldSerializeStats) {
- llvm::EnableStatistics(/* PrintOnExit= */ false);
+ if (Opts.PrintStats || Opts.ShouldSerializeStats) {
+ llvm::EnableStatistics(/* DoPrintOnExit= */ false);
}
- if (Opts->ShouldDisplayMacroExpansions)
+ if (Opts.ShouldDisplayMacroExpansions)
MacroExpansions.registerForPreprocessor(PP);
}
~AnalysisConsumer() override {
- if (Opts->PrintStats) {
+ if (Opts.PrintStats) {
llvm::PrintStatistics();
}
}
void DigestAnalyzerOptions() {
- switch (Opts->AnalysisDiagOpt) {
+ switch (Opts.AnalysisDiagOpt) {
case PD_NONE:
break;
#define ANALYSIS_DIAGNOSTICS(NAME, CMDFLAG, DESC, CREATEFN) \
case PD_##NAME: \
- CREATEFN(Opts->getDiagOpts(), PathConsumers, OutDir, PP, CTU, \
+ CREATEFN(Opts.getDiagOpts(), PathConsumers, OutDir, PP, CTU, \
MacroExpansions); \
break;
#include "clang/StaticAnalyzer/Core/Analyses.def"
@@ -170,15 +170,9 @@ public:
}
// Create the analyzer component creators.
- switch (Opts->AnalysisStoreOpt) {
- default:
- llvm_unreachable("Unknown store manager.");
-#define ANALYSIS_STORE(NAME, CMDFLAG, DESC, CREATEFN) \
- case NAME##Model: CreateStoreMgr = CREATEFN; break;
-#include "clang/StaticAnalyzer/Core/Analyses.def"
- }
+ CreateStoreMgr = &CreateRegionStoreManager;
- switch (Opts->AnalysisConstraintsOpt) {
+ switch (Opts.AnalysisConstraintsOpt) {
default:
llvm_unreachable("Unknown constraint manager.");
#define ANALYSIS_CONSTRAINTS(NAME, CMDFLAG, DESC, CREATEFN) \
@@ -188,7 +182,7 @@ public:
}
void DisplayTime(llvm::TimeRecord &Time) {
- if (!Opts->AnalyzerDisplayProgress) {
+ if (!Opts.AnalyzerDisplayProgress) {
return;
}
llvm::errs() << " : " << llvm::format("%1.1f", Time.getWallTime() * 1000)
@@ -197,7 +191,7 @@ public:
void DisplayFunction(const Decl *D, AnalysisMode Mode,
ExprEngine::InliningModes IMode) {
- if (!Opts->AnalyzerDisplayProgress)
+ if (!Opts.AnalyzerDisplayProgress)
return;
SourceManager &SM = Mgr->getASTContext().getSourceManager();
@@ -228,12 +222,12 @@ public:
void Initialize(ASTContext &Context) override {
Ctx = &Context;
- checkerMgr = std::make_unique<CheckerManager>(*Ctx, *Opts, PP, Plugins,
+ checkerMgr = std::make_unique<CheckerManager>(*Ctx, Opts, PP, Plugins,
CheckerRegistrationFns);
Mgr = std::make_unique<AnalysisManager>(*Ctx, PP, PathConsumers,
CreateStoreMgr, CreateConstraintMgr,
- checkerMgr.get(), *Opts, Injector);
+ checkerMgr.get(), Opts, Injector);
}
/// Store the top level decls in the set to be processed later on.
@@ -284,11 +278,11 @@ public:
}
bool VisitVarDecl(VarDecl *VD) {
- if (!Opts->IsNaiveCTUEnabled)
+ if (!Opts.IsNaiveCTUEnabled)
return true;
if (VD->hasExternalStorage() || VD->isStaticDataMember()) {
- if (!cross_tu::containsConst(VD, *Ctx))
+ if (!cross_tu::shouldImport(VD, *Ctx))
return true;
} else {
// Cannot be initialized in another TU.
@@ -299,8 +293,8 @@ public:
return true;
llvm::Expected<const VarDecl *> CTUDeclOrError =
- CTU.getCrossTUDefinition(VD, Opts->CTUDir, Opts->CTUIndexName,
- Opts->DisplayCTUProgress);
+ CTU.getCrossTUDefinition(VD, Opts.CTUDir, Opts.CTUIndexName,
+ Opts.DisplayCTUProgress);
if (!CTUDeclOrError) {
handleAllErrors(CTUDeclOrError.takeError(),
@@ -314,7 +308,7 @@ public:
bool VisitFunctionDecl(FunctionDecl *FD) {
IdentifierInfo *II = FD->getIdentifier();
- if (II && II->getName().startswith("__inline"))
+ if (II && II->getName().starts_with("__inline"))
return true;
// We skip function template definitions, as their semantics is
@@ -357,13 +351,12 @@ public:
private:
void storeTopLevelDecls(DeclGroupRef DG);
- std::string getFunctionName(const Decl *D);
/// Check if we should skip (not analyze) the given function.
AnalysisMode getModeForDecl(Decl *D, AnalysisMode Mode);
void runAnalysisOnTranslationUnit(ASTContext &C);
- /// Print \p S to stderr if \c Opts->AnalyzerDisplayProgress is set.
+ /// Print \p S to stderr if \c Opts.AnalyzerDisplayProgress is set.
void reportAnalyzerProgress(StringRef S);
}; // namespace
} // end anonymous namespace
@@ -382,14 +375,14 @@ void AnalysisConsumer::HandleTopLevelDeclInObjCContainer(DeclGroupRef DG) {
}
void AnalysisConsumer::storeTopLevelDecls(DeclGroupRef DG) {
- for (DeclGroupRef::iterator I = DG.begin(), E = DG.end(); I != E; ++I) {
+ for (auto &I : DG) {
// Skip ObjCMethodDecl, wait for the objc container to avoid
// analyzing twice.
- if (isa<ObjCMethodDecl>(*I))
+ if (isa<ObjCMethodDecl>(I))
continue;
- LocalTUDecls.push_back(*I);
+ LocalTUDecls.push_back(I);
}
}
@@ -461,11 +454,9 @@ void AnalysisConsumer::HandleDeclsCallGraph(const unsigned LocalTUDeclsSize) {
SetOfConstDecls Visited;
SetOfConstDecls VisitedAsTopLevel;
llvm::ReversePostOrderTraversal<clang::CallGraph*> RPOT(&CG);
- for (llvm::ReversePostOrderTraversal<clang::CallGraph*>::rpo_iterator
- I = RPOT.begin(), E = RPOT.end(); I != E; ++I) {
+ for (auto &N : RPOT) {
NumFunctionTopLevel++;
- CallGraphNode *N = *I;
Decl *D = N->getDecl();
// Skip the abstract root node.
@@ -477,6 +468,18 @@ void AnalysisConsumer::HandleDeclsCallGraph(const unsigned LocalTUDeclsSize) {
if (shouldSkipFunction(D, Visited, VisitedAsTopLevel))
continue;
+ // The CallGraph might have declarations as callees. However, during CTU
+ // the declaration might form a declaration chain with the newly imported
+ // definition from another TU. In this case we don't want to analyze the
+ // function definition as toplevel.
+ if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
+ // Calling 'hasBody' replaces 'FD' in place with the FunctionDecl
+ // that has the body.
+ FD->hasBody(FD);
+ if (CTU.isImportedAsNew(FD))
+ continue;
+ }
+
// Analyze the function.
SetOfConstDecls VisitedCallees;
@@ -493,13 +496,33 @@ void AnalysisConsumer::HandleDeclsCallGraph(const unsigned LocalTUDeclsSize) {
}
}
-static bool isBisonFile(ASTContext &C) {
+static bool fileContainsString(StringRef Substring, ASTContext &C) {
const SourceManager &SM = C.getSourceManager();
FileID FID = SM.getMainFileID();
StringRef Buffer = SM.getBufferOrFake(FID).getBuffer();
- if (Buffer.startswith("/* A Bison parser, made by"))
- return true;
- return false;
+ return Buffer.contains(Substring);
+}
+
+static void reportAnalyzerFunctionMisuse(const AnalyzerOptions &Opts,
+ const ASTContext &Ctx) {
+ llvm::errs() << "Every top-level function was skipped.\n";
+
+ if (!Opts.AnalyzerDisplayProgress)
+ llvm::errs() << "Pass the -analyzer-display-progress for tracking which "
+ "functions are analyzed.\n";
+
+ bool HasBrackets =
+ Opts.AnalyzeSpecificFunction.find("(") != std::string::npos;
+
+ if (Ctx.getLangOpts().CPlusPlus && !HasBrackets) {
+ llvm::errs()
+ << "For analyzing C++ code you need to pass the function parameter "
+ "list: -analyze-function=\"foobar(int, _Bool)\"\n";
+ } else if (!Ctx.getLangOpts().CPlusPlus && HasBrackets) {
+ llvm::errs() << "For analyzing C code you shouldn't pass the function "
+ "parameter list, only the name of the function: "
+ "-analyze-function=foobar\n";
+ }
}
void AnalysisConsumer::runAnalysisOnTranslationUnit(ASTContext &C) {
@@ -538,52 +561,70 @@ void AnalysisConsumer::runAnalysisOnTranslationUnit(ASTContext &C) {
BR.FlushReports();
RecVisitorBR = nullptr;
+
+ // If the user wanted to analyze a specific function and the number of basic
+ // blocks analyzed is zero, than the user might not specified the function
+ // name correctly.
+ // FIXME: The user might have analyzed the requested function in Syntax mode,
+ // but we are unaware of that.
+ if (!Opts.AnalyzeSpecificFunction.empty() && NumFunctionsAnalyzed == 0)
+ reportAnalyzerFunctionMisuse(Opts, *Ctx);
}
void AnalysisConsumer::reportAnalyzerProgress(StringRef S) {
- if (Opts->AnalyzerDisplayProgress)
+ if (Opts.AnalyzerDisplayProgress)
llvm::errs() << S;
}
void AnalysisConsumer::HandleTranslationUnit(ASTContext &C) {
-
// Don't run the actions if an error has occurred with parsing the file.
DiagnosticsEngine &Diags = PP.getDiagnostics();
if (Diags.hasErrorOccurred() || Diags.hasFatalErrorOccurred())
return;
- if (isBisonFile(C)) {
+ // Explicitly destroy the PathDiagnosticConsumer. This will flush its output.
+ // FIXME: This should be replaced with something that doesn't rely on
+ // side-effects in PathDiagnosticConsumer's destructor. This is required when
+ // used with option -disable-free.
+ const auto DiagFlusherScopeExit =
+ llvm::make_scope_exit([this] { Mgr.reset(); });
+
+ if (Opts.ShouldIgnoreBisonGeneratedFiles &&
+ fileContainsString("/* A Bison parser, made by", C)) {
reportAnalyzerProgress("Skipping bison-generated file\n");
- } else if (Opts->DisableAllCheckers) {
+ return;
+ }
- // Don't analyze if the user explicitly asked for no checks to be performed
- // on this file.
+ if (Opts.ShouldIgnoreFlexGeneratedFiles &&
+ fileContainsString("/* A lexical scanner generated by flex", C)) {
+ reportAnalyzerProgress("Skipping flex-generated file\n");
+ return;
+ }
+
+ // Don't analyze if the user explicitly asked for no checks to be performed
+ // on this file.
+ if (Opts.DisableAllCheckers) {
reportAnalyzerProgress("All checks are disabled using a supplied option\n");
- } else {
- // Otherwise, just run the analysis.
- runAnalysisOnTranslationUnit(C);
+ return;
}
+ // Otherwise, just run the analysis.
+ runAnalysisOnTranslationUnit(C);
+
// Count how many basic blocks we have not covered.
NumBlocksInAnalyzedFunctions = FunctionSummaries.getTotalNumBasicBlocks();
NumVisitedBlocksInAnalyzedFunctions =
FunctionSummaries.getTotalNumVisitedBasicBlocks();
if (NumBlocksInAnalyzedFunctions > 0)
PercentReachableBlocks =
- (FunctionSummaries.getTotalNumVisitedBasicBlocks() * 100) /
+ (FunctionSummaries.getTotalNumVisitedBasicBlocks() * 100) /
NumBlocksInAnalyzedFunctions;
-
- // Explicitly destroy the PathDiagnosticConsumer. This will flush its output.
- // FIXME: This should be replaced with something that doesn't rely on
- // side-effects in PathDiagnosticConsumer's destructor. This is required when
- // used with option -disable-free.
- Mgr.reset();
}
AnalysisConsumer::AnalysisMode
AnalysisConsumer::getModeForDecl(Decl *D, AnalysisMode Mode) {
- if (!Opts->AnalyzeSpecificFunction.empty() &&
- AnalysisDeclContext::getFunctionName(D) != Opts->AnalyzeSpecificFunction)
+ if (!Opts.AnalyzeSpecificFunction.empty() &&
+ AnalysisDeclContext::getFunctionName(D) != Opts.AnalyzeSpecificFunction)
return AM_None;
// Unless -analyze-all is specified, treat decls differently depending on
@@ -591,16 +632,24 @@ AnalysisConsumer::getModeForDecl(Decl *D, AnalysisMode Mode) {
// - Main source file: run both path-sensitive and non-path-sensitive checks.
// - Header files: run non-path-sensitive checks only.
// - System headers: don't run any checks.
- SourceManager &SM = Ctx->getSourceManager();
- const Stmt *Body = D->getBody();
- SourceLocation SL = Body ? Body->getBeginLoc() : D->getLocation();
- SL = SM.getExpansionLoc(SL);
-
- if (!Opts->AnalyzeAll && !Mgr->isInCodeFile(SL)) {
- if (SL.isInvalid() || SM.isInSystemHeader(SL))
- return AM_None;
+ if (Opts.AnalyzeAll)
+ return Mode;
+
+ const SourceManager &SM = Ctx->getSourceManager();
+
+ const SourceLocation Loc = [&SM](Decl *D) -> SourceLocation {
+ const Stmt *Body = D->getBody();
+ SourceLocation SL = Body ? Body->getBeginLoc() : D->getLocation();
+ return SM.getExpansionLoc(SL);
+ }(D);
+
+ // Ignore system headers.
+ if (Loc.isInvalid() || SM.isInSystemHeader(Loc))
+ return AM_None;
+
+ // Disable path sensitive analysis in user-headers.
+ if (!Mgr->isInCodeFile(Loc))
return Mode & ~AM_Path;
- }
return Mode;
}
@@ -708,8 +757,8 @@ ento::CreateAnalysisConsumer(CompilerInstance &CI) {
// Disable the effects of '-Werror' when using the AnalysisConsumer.
CI.getPreprocessor().getDiagnostics().setWarningsAsErrors(false);
- AnalyzerOptionsRef analyzerOpts = CI.getAnalyzerOpts();
- bool hasModelPath = analyzerOpts->Config.count("model-path") > 0;
+ AnalyzerOptions &analyzerOpts = CI.getAnalyzerOpts();
+ bool hasModelPath = analyzerOpts.Config.count("model-path") > 0;
return std::make_unique<AnalysisConsumer>(
CI, CI.getFrontendOpts().OutputFile, analyzerOpts,
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/AnalyzerHelpFlags.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/AnalyzerHelpFlags.cpp
index eb6014a0629d..ea75c794f0b7 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/AnalyzerHelpFlags.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/AnalyzerHelpFlags.cpp
@@ -30,18 +30,18 @@ void ento::printCheckerHelp(raw_ostream &out, CompilerInstance &CI) {
out << "USAGE: -analyzer-checker <CHECKER or PACKAGE,...>\n\n";
auto CheckerMgr = std::make_unique<CheckerManager>(
- *CI.getAnalyzerOpts(), CI.getLangOpts(), CI.getDiagnostics(),
+ CI.getAnalyzerOpts(), CI.getLangOpts(), CI.getDiagnostics(),
CI.getFrontendOpts().Plugins);
CheckerMgr->getCheckerRegistryData().printCheckerWithDescList(
- *CI.getAnalyzerOpts(), out);
+ CI.getAnalyzerOpts(), out);
}
void ento::printEnabledCheckerList(raw_ostream &out, CompilerInstance &CI) {
out << "OVERVIEW: Clang Static Analyzer Enabled Checkers List\n\n";
auto CheckerMgr = std::make_unique<CheckerManager>(
- *CI.getAnalyzerOpts(), CI.getLangOpts(), CI.getDiagnostics(),
+ CI.getAnalyzerOpts(), CI.getLangOpts(), CI.getDiagnostics(),
CI.getFrontendOpts().Plugins);
CheckerMgr->getCheckerRegistryData().printEnabledCheckerList(out);
@@ -50,11 +50,11 @@ void ento::printEnabledCheckerList(raw_ostream &out, CompilerInstance &CI) {
void ento::printCheckerConfigList(raw_ostream &out, CompilerInstance &CI) {
auto CheckerMgr = std::make_unique<CheckerManager>(
- *CI.getAnalyzerOpts(), CI.getLangOpts(), CI.getDiagnostics(),
+ CI.getAnalyzerOpts(), CI.getLangOpts(), CI.getDiagnostics(),
CI.getFrontendOpts().Plugins);
CheckerMgr->getCheckerRegistryData().printCheckerOptionList(
- *CI.getAnalyzerOpts(), out);
+ CI.getAnalyzerOpts(), out);
}
void ento::printAnalyzerConfigList(raw_ostream &out) {
@@ -101,10 +101,7 @@ OPTIONS:
#undef ANALYZER_OPTION_DEPENDS_ON_USER_MODE
};
- llvm::sort(PrintableOptions, [](const OptionAndDescriptionTy &LHS,
- const OptionAndDescriptionTy &RHS) {
- return LHS.first < RHS.first;
- });
+ llvm::sort(PrintableOptions, llvm::less_first());
for (const auto &Pair : PrintableOptions) {
AnalyzerOptions::printFormattedEntry(out, Pair, /*InitialPad*/ 2,
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/CheckerRegistry.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/CheckerRegistry.cpp
index 528284ca8985..317df90a7781 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/CheckerRegistry.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/CheckerRegistry.cpp
@@ -15,7 +15,6 @@
#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/DynamicLibrary.h"
@@ -234,7 +233,7 @@ void CheckerRegistry::initializeRegistry(const CheckerManager &Mgr) {
// done recursively, its arguably cheaper, but for sure less error prone to
// recalculate from scratch.
auto IsEnabled = [&](const CheckerInfo *Checker) {
- return llvm::is_contained(Tmp, Checker);
+ return Tmp.contains(Checker);
};
for (const CheckerInfo &Checker : Data.Checkers) {
if (!Checker.isEnabled(Mgr))
@@ -311,8 +310,8 @@ template <bool IsWeak> void CheckerRegistry::resolveDependencies() {
"Failed to find the dependency of a checker!");
// We do allow diagnostics from unit test/example dependency checkers.
- assert((DependencyIt->FullName.startswith("test") ||
- DependencyIt->FullName.startswith("example") || IsWeak ||
+ assert((DependencyIt->FullName.starts_with("test") ||
+ DependencyIt->FullName.starts_with("example") || IsWeak ||
DependencyIt->IsHidden) &&
"Strong dependencies are modeling checkers, and as such "
"non-user facing! Mark them hidden in Checkers.td!");
@@ -480,9 +479,7 @@ static void isOptionContainedIn(const CmdLineOptionList &OptionList,
return Opt.OptionName == SuppliedOption;
};
- const auto *OptionIt = llvm::find_if(OptionList, SameOptName);
-
- if (OptionIt == OptionList.end()) {
+ if (llvm::none_of(OptionList, SameOptName)) {
Diags.Report(diag::err_analyzer_checker_option_unknown)
<< SuppliedChecker << SuppliedOption;
return;
@@ -528,4 +525,3 @@ void CheckerRegistry::validateCheckerOptions() const {
<< SuppliedCheckerOrPackage;
}
}
-
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/ModelConsumer.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/ModelConsumer.cpp
index 276f7313b08f..0f1039d81d52 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/ModelConsumer.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/ModelConsumer.cpp
@@ -28,11 +28,10 @@ using namespace ento;
ModelConsumer::ModelConsumer(llvm::StringMap<Stmt *> &Bodies)
: Bodies(Bodies) {}
-bool ModelConsumer::HandleTopLevelDecl(DeclGroupRef D) {
- for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I) {
-
+bool ModelConsumer::HandleTopLevelDecl(DeclGroupRef DeclGroup) {
+ for (const Decl *D : DeclGroup) {
// Only interested in definitions.
- const FunctionDecl *func = llvm::dyn_cast<FunctionDecl>(*I);
+ const auto *func = llvm::dyn_cast<FunctionDecl>(D);
if (func && func->hasBody()) {
Bodies.insert(std::make_pair(func->getName(), func->getBody()));
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/ModelInjector.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/ModelInjector.cpp
index 7baae6778ebd..ae11fbbe32b7 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/ModelInjector.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/ModelInjector.cpp
@@ -48,8 +48,7 @@ void ModelInjector::onBodySynthesis(const NamedDecl *D) {
SourceManager &SM = CI.getSourceManager();
FileID mainFileID = SM.getMainFileID();
- AnalyzerOptionsRef analyzerOpts = CI.getAnalyzerOpts();
- llvm::StringRef modelPath = analyzerOpts->ModelPath;
+ llvm::StringRef modelPath = CI.getAnalyzerOpts().ModelPath;
llvm::SmallString<128> fileName;
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/ModelInjector.h b/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/ModelInjector.h
index d2016c3b112c..4db26028362f 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/ModelInjector.h
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/ModelInjector.h
@@ -29,10 +29,7 @@
namespace clang {
class CompilerInstance;
-class ASTUnit;
-class ASTReader;
class NamedDecl;
-class Module;
namespace ento {
class ModelInjector : public CodeInjector {
diff --git a/contrib/llvm-project/clang/lib/Support/RISCVVIntrinsicUtils.cpp b/contrib/llvm-project/clang/lib/Support/RISCVVIntrinsicUtils.cpp
new file mode 100644
index 000000000000..2de977a3dc72
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Support/RISCVVIntrinsicUtils.cpp
@@ -0,0 +1,1238 @@
+//===- RISCVVIntrinsicUtils.cpp - RISC-V Vector Intrinsic Utils -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Support/RISCVVIntrinsicUtils.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include <numeric>
+#include <optional>
+
+using namespace llvm;
+
+namespace clang {
+namespace RISCV {
+
+const PrototypeDescriptor PrototypeDescriptor::Mask = PrototypeDescriptor(
+ BaseTypeModifier::Vector, VectorTypeModifier::MaskVector);
+const PrototypeDescriptor PrototypeDescriptor::VL =
+ PrototypeDescriptor(BaseTypeModifier::SizeT);
+const PrototypeDescriptor PrototypeDescriptor::Vector =
+ PrototypeDescriptor(BaseTypeModifier::Vector);
+
+//===----------------------------------------------------------------------===//
+// Type implementation
+//===----------------------------------------------------------------------===//
+
+LMULType::LMULType(int NewLog2LMUL) {
+ // Check Log2LMUL is -3, -2, -1, 0, 1, 2, 3
+ assert(NewLog2LMUL <= 3 && NewLog2LMUL >= -3 && "Bad LMUL number!");
+ Log2LMUL = NewLog2LMUL;
+}
+
+std::string LMULType::str() const {
+ if (Log2LMUL < 0)
+ return "mf" + utostr(1ULL << (-Log2LMUL));
+ return "m" + utostr(1ULL << Log2LMUL);
+}
+
+VScaleVal LMULType::getScale(unsigned ElementBitwidth) const {
+ int Log2ScaleResult = 0;
+ switch (ElementBitwidth) {
+ default:
+ break;
+ case 8:
+ Log2ScaleResult = Log2LMUL + 3;
+ break;
+ case 16:
+ Log2ScaleResult = Log2LMUL + 2;
+ break;
+ case 32:
+ Log2ScaleResult = Log2LMUL + 1;
+ break;
+ case 64:
+ Log2ScaleResult = Log2LMUL;
+ break;
+ }
+ // Illegal vscale result would be less than 1
+ if (Log2ScaleResult < 0)
+ return std::nullopt;
+ return 1 << Log2ScaleResult;
+}
+
+void LMULType::MulLog2LMUL(int log2LMUL) { Log2LMUL += log2LMUL; }
+
+RVVType::RVVType(BasicType BT, int Log2LMUL,
+ const PrototypeDescriptor &prototype)
+ : BT(BT), LMUL(LMULType(Log2LMUL)) {
+ applyBasicType();
+ applyModifier(prototype);
+ Valid = verifyType();
+ if (Valid) {
+ initBuiltinStr();
+ initTypeStr();
+ if (isVector()) {
+ initClangBuiltinStr();
+ }
+ }
+}
+
+// clang-format off
+// boolean type are encoded the ratio of n (SEW/LMUL)
+// SEW/LMUL | 1 | 2 | 4 | 8 | 16 | 32 | 64
+// c type | vbool64_t | vbool32_t | vbool16_t | vbool8_t | vbool4_t | vbool2_t | vbool1_t
+// IR type | nxv1i1 | nxv2i1 | nxv4i1 | nxv8i1 | nxv16i1 | nxv32i1 | nxv64i1
+
+// type\lmul | 1/8 | 1/4 | 1/2 | 1 | 2 | 4 | 8
+// -------- |------ | -------- | ------- | ------- | -------- | -------- | --------
+// i64 | N/A | N/A | N/A | nxv1i64 | nxv2i64 | nxv4i64 | nxv8i64
+// i32 | N/A | N/A | nxv1i32 | nxv2i32 | nxv4i32 | nxv8i32 | nxv16i32
+// i16 | N/A | nxv1i16 | nxv2i16 | nxv4i16 | nxv8i16 | nxv16i16 | nxv32i16
+// i8 | nxv1i8 | nxv2i8 | nxv4i8 | nxv8i8 | nxv16i8 | nxv32i8 | nxv64i8
+// double | N/A | N/A | N/A | nxv1f64 | nxv2f64 | nxv4f64 | nxv8f64
+// float | N/A | N/A | nxv1f32 | nxv2f32 | nxv4f32 | nxv8f32 | nxv16f32
+// half | N/A | nxv1f16 | nxv2f16 | nxv4f16 | nxv8f16 | nxv16f16 | nxv32f16
+// bfloat16 | N/A | nxv1bf16 | nxv2bf16| nxv4bf16| nxv8bf16 | nxv16bf16| nxv32bf16
+// clang-format on
+
+bool RVVType::verifyType() const {
+ if (ScalarType == Invalid)
+ return false;
+ if (isScalar())
+ return true;
+ if (!Scale)
+ return false;
+ if (isFloat() && ElementBitwidth == 8)
+ return false;
+ if (isBFloat() && ElementBitwidth != 16)
+ return false;
+ if (IsTuple && (NF == 1 || NF > 8))
+ return false;
+ if (IsTuple && (1 << std::max(0, LMUL.Log2LMUL)) * NF > 8)
+ return false;
+ unsigned V = *Scale;
+ switch (ElementBitwidth) {
+ case 1:
+ case 8:
+ // Check Scale is 1,2,4,8,16,32,64
+ return (V <= 64 && isPowerOf2_32(V));
+ case 16:
+ // Check Scale is 1,2,4,8,16,32
+ return (V <= 32 && isPowerOf2_32(V));
+ case 32:
+ // Check Scale is 1,2,4,8,16
+ return (V <= 16 && isPowerOf2_32(V));
+ case 64:
+ // Check Scale is 1,2,4,8
+ return (V <= 8 && isPowerOf2_32(V));
+ }
+ return false;
+}
+
+void RVVType::initBuiltinStr() {
+ assert(isValid() && "RVVType is invalid");
+ switch (ScalarType) {
+ case ScalarTypeKind::Void:
+ BuiltinStr = "v";
+ return;
+ case ScalarTypeKind::Size_t:
+ BuiltinStr = "z";
+ if (IsImmediate)
+ BuiltinStr = "I" + BuiltinStr;
+ if (IsPointer)
+ BuiltinStr += "*";
+ return;
+ case ScalarTypeKind::Ptrdiff_t:
+ BuiltinStr = "Y";
+ return;
+ case ScalarTypeKind::UnsignedLong:
+ BuiltinStr = "ULi";
+ return;
+ case ScalarTypeKind::SignedLong:
+ BuiltinStr = "Li";
+ return;
+ case ScalarTypeKind::Boolean:
+ assert(ElementBitwidth == 1);
+ BuiltinStr += "b";
+ break;
+ case ScalarTypeKind::SignedInteger:
+ case ScalarTypeKind::UnsignedInteger:
+ switch (ElementBitwidth) {
+ case 8:
+ BuiltinStr += "c";
+ break;
+ case 16:
+ BuiltinStr += "s";
+ break;
+ case 32:
+ BuiltinStr += "i";
+ break;
+ case 64:
+ BuiltinStr += "Wi";
+ break;
+ default:
+ llvm_unreachable("Unhandled ElementBitwidth!");
+ }
+ if (isSignedInteger())
+ BuiltinStr = "S" + BuiltinStr;
+ else
+ BuiltinStr = "U" + BuiltinStr;
+ break;
+ case ScalarTypeKind::Float:
+ switch (ElementBitwidth) {
+ case 16:
+ BuiltinStr += "x";
+ break;
+ case 32:
+ BuiltinStr += "f";
+ break;
+ case 64:
+ BuiltinStr += "d";
+ break;
+ default:
+ llvm_unreachable("Unhandled ElementBitwidth!");
+ }
+ break;
+ case ScalarTypeKind::BFloat:
+ BuiltinStr += "y";
+ break;
+ default:
+ llvm_unreachable("ScalarType is invalid!");
+ }
+ if (IsImmediate)
+ BuiltinStr = "I" + BuiltinStr;
+ if (isScalar()) {
+ if (IsConstant)
+ BuiltinStr += "C";
+ if (IsPointer)
+ BuiltinStr += "*";
+ return;
+ }
+ BuiltinStr = "q" + utostr(*Scale) + BuiltinStr;
+ // Pointer to vector types. Defined for segment load intrinsics.
+ // segment load intrinsics have pointer type arguments to store the loaded
+ // vector values.
+ if (IsPointer)
+ BuiltinStr += "*";
+
+ if (IsTuple)
+ BuiltinStr = "T" + utostr(NF) + BuiltinStr;
+}
+
+void RVVType::initClangBuiltinStr() {
+ assert(isValid() && "RVVType is invalid");
+ assert(isVector() && "Handle Vector type only");
+
+ ClangBuiltinStr = "__rvv_";
+ switch (ScalarType) {
+ case ScalarTypeKind::Boolean:
+ ClangBuiltinStr += "bool" + utostr(64 / *Scale) + "_t";
+ return;
+ case ScalarTypeKind::Float:
+ ClangBuiltinStr += "float";
+ break;
+ case ScalarTypeKind::BFloat:
+ ClangBuiltinStr += "bfloat";
+ break;
+ case ScalarTypeKind::SignedInteger:
+ ClangBuiltinStr += "int";
+ break;
+ case ScalarTypeKind::UnsignedInteger:
+ ClangBuiltinStr += "uint";
+ break;
+ default:
+ llvm_unreachable("ScalarTypeKind is invalid");
+ }
+ ClangBuiltinStr += utostr(ElementBitwidth) + LMUL.str() +
+ (IsTuple ? "x" + utostr(NF) : "") + "_t";
+}
+
+void RVVType::initTypeStr() {
+ assert(isValid() && "RVVType is invalid");
+
+ if (IsConstant)
+ Str += "const ";
+
+ auto getTypeString = [&](StringRef TypeStr) {
+ if (isScalar())
+ return Twine(TypeStr + Twine(ElementBitwidth) + "_t").str();
+ return Twine("v" + TypeStr + Twine(ElementBitwidth) + LMUL.str() +
+ (IsTuple ? "x" + utostr(NF) : "") + "_t")
+ .str();
+ };
+
+ switch (ScalarType) {
+ case ScalarTypeKind::Void:
+ Str = "void";
+ return;
+ case ScalarTypeKind::Size_t:
+ Str = "size_t";
+ if (IsPointer)
+ Str += " *";
+ return;
+ case ScalarTypeKind::Ptrdiff_t:
+ Str = "ptrdiff_t";
+ return;
+ case ScalarTypeKind::UnsignedLong:
+ Str = "unsigned long";
+ return;
+ case ScalarTypeKind::SignedLong:
+ Str = "long";
+ return;
+ case ScalarTypeKind::Boolean:
+ if (isScalar())
+ Str += "bool";
+ else
+ // Vector bool is special case, the formulate is
+ // `vbool<N>_t = MVT::nxv<64/N>i1` ex. vbool16_t = MVT::4i1
+ Str += "vbool" + utostr(64 / *Scale) + "_t";
+ break;
+ case ScalarTypeKind::Float:
+ if (isScalar()) {
+ if (ElementBitwidth == 64)
+ Str += "double";
+ else if (ElementBitwidth == 32)
+ Str += "float";
+ else if (ElementBitwidth == 16)
+ Str += "_Float16";
+ else
+ llvm_unreachable("Unhandled floating type.");
+ } else
+ Str += getTypeString("float");
+ break;
+ case ScalarTypeKind::BFloat:
+ if (isScalar()) {
+ if (ElementBitwidth == 16)
+ Str += "__bf16";
+ else
+ llvm_unreachable("Unhandled floating type.");
+ } else
+ Str += getTypeString("bfloat");
+ break;
+ case ScalarTypeKind::SignedInteger:
+ Str += getTypeString("int");
+ break;
+ case ScalarTypeKind::UnsignedInteger:
+ Str += getTypeString("uint");
+ break;
+ default:
+ llvm_unreachable("ScalarType is invalid!");
+ }
+ if (IsPointer)
+ Str += " *";
+}
+
+void RVVType::initShortStr() {
+ switch (ScalarType) {
+ case ScalarTypeKind::Boolean:
+ assert(isVector());
+ ShortStr = "b" + utostr(64 / *Scale);
+ return;
+ case ScalarTypeKind::Float:
+ ShortStr = "f" + utostr(ElementBitwidth);
+ break;
+ case ScalarTypeKind::BFloat:
+ ShortStr = "bf" + utostr(ElementBitwidth);
+ break;
+ case ScalarTypeKind::SignedInteger:
+ ShortStr = "i" + utostr(ElementBitwidth);
+ break;
+ case ScalarTypeKind::UnsignedInteger:
+ ShortStr = "u" + utostr(ElementBitwidth);
+ break;
+ default:
+ llvm_unreachable("Unhandled case!");
+ }
+ if (isVector())
+ ShortStr += LMUL.str();
+ if (isTuple())
+ ShortStr += "x" + utostr(NF);
+}
+
+static VectorTypeModifier getTupleVTM(unsigned NF) {
+ assert(2 <= NF && NF <= 8 && "2 <= NF <= 8");
+ return static_cast<VectorTypeModifier>(
+ static_cast<uint8_t>(VectorTypeModifier::Tuple2) + (NF - 2));
+}
+
+void RVVType::applyBasicType() {
+ switch (BT) {
+ case BasicType::Int8:
+ ElementBitwidth = 8;
+ ScalarType = ScalarTypeKind::SignedInteger;
+ break;
+ case BasicType::Int16:
+ ElementBitwidth = 16;
+ ScalarType = ScalarTypeKind::SignedInteger;
+ break;
+ case BasicType::Int32:
+ ElementBitwidth = 32;
+ ScalarType = ScalarTypeKind::SignedInteger;
+ break;
+ case BasicType::Int64:
+ ElementBitwidth = 64;
+ ScalarType = ScalarTypeKind::SignedInteger;
+ break;
+ case BasicType::Float16:
+ ElementBitwidth = 16;
+ ScalarType = ScalarTypeKind::Float;
+ break;
+ case BasicType::Float32:
+ ElementBitwidth = 32;
+ ScalarType = ScalarTypeKind::Float;
+ break;
+ case BasicType::Float64:
+ ElementBitwidth = 64;
+ ScalarType = ScalarTypeKind::Float;
+ break;
+ case BasicType::BFloat16:
+ ElementBitwidth = 16;
+ ScalarType = ScalarTypeKind::BFloat;
+ break;
+ default:
+ llvm_unreachable("Unhandled type code!");
+ }
+ assert(ElementBitwidth != 0 && "Bad element bitwidth!");
+}
+
+std::optional<PrototypeDescriptor>
+PrototypeDescriptor::parsePrototypeDescriptor(
+ llvm::StringRef PrototypeDescriptorStr) {
+ PrototypeDescriptor PD;
+ BaseTypeModifier PT = BaseTypeModifier::Invalid;
+ VectorTypeModifier VTM = VectorTypeModifier::NoModifier;
+
+ if (PrototypeDescriptorStr.empty())
+ return PD;
+
+ // Handle base type modifier
+ auto PType = PrototypeDescriptorStr.back();
+ switch (PType) {
+ case 'e':
+ PT = BaseTypeModifier::Scalar;
+ break;
+ case 'v':
+ PT = BaseTypeModifier::Vector;
+ break;
+ case 'w':
+ PT = BaseTypeModifier::Vector;
+ VTM = VectorTypeModifier::Widening2XVector;
+ break;
+ case 'q':
+ PT = BaseTypeModifier::Vector;
+ VTM = VectorTypeModifier::Widening4XVector;
+ break;
+ case 'o':
+ PT = BaseTypeModifier::Vector;
+ VTM = VectorTypeModifier::Widening8XVector;
+ break;
+ case 'm':
+ PT = BaseTypeModifier::Vector;
+ VTM = VectorTypeModifier::MaskVector;
+ break;
+ case '0':
+ PT = BaseTypeModifier::Void;
+ break;
+ case 'z':
+ PT = BaseTypeModifier::SizeT;
+ break;
+ case 't':
+ PT = BaseTypeModifier::Ptrdiff;
+ break;
+ case 'u':
+ PT = BaseTypeModifier::UnsignedLong;
+ break;
+ case 'l':
+ PT = BaseTypeModifier::SignedLong;
+ break;
+ case 'f':
+ PT = BaseTypeModifier::Float32;
+ break;
+ default:
+ llvm_unreachable("Illegal primitive type transformers!");
+ }
+ PD.PT = static_cast<uint8_t>(PT);
+ PrototypeDescriptorStr = PrototypeDescriptorStr.drop_back();
+
+ // Compute the vector type transformers, it can only appear one time.
+ if (PrototypeDescriptorStr.starts_with("(")) {
+ assert(VTM == VectorTypeModifier::NoModifier &&
+ "VectorTypeModifier should only have one modifier");
+ size_t Idx = PrototypeDescriptorStr.find(')');
+ assert(Idx != StringRef::npos);
+ StringRef ComplexType = PrototypeDescriptorStr.slice(1, Idx);
+ PrototypeDescriptorStr = PrototypeDescriptorStr.drop_front(Idx + 1);
+ assert(!PrototypeDescriptorStr.contains('(') &&
+ "Only allow one vector type modifier");
+
+ auto ComplexTT = ComplexType.split(":");
+ if (ComplexTT.first == "Log2EEW") {
+ uint32_t Log2EEW;
+ if (ComplexTT.second.getAsInteger(10, Log2EEW)) {
+ llvm_unreachable("Invalid Log2EEW value!");
+ return std::nullopt;
+ }
+ switch (Log2EEW) {
+ case 3:
+ VTM = VectorTypeModifier::Log2EEW3;
+ break;
+ case 4:
+ VTM = VectorTypeModifier::Log2EEW4;
+ break;
+ case 5:
+ VTM = VectorTypeModifier::Log2EEW5;
+ break;
+ case 6:
+ VTM = VectorTypeModifier::Log2EEW6;
+ break;
+ default:
+ llvm_unreachable("Invalid Log2EEW value, should be [3-6]");
+ return std::nullopt;
+ }
+ } else if (ComplexTT.first == "FixedSEW") {
+ uint32_t NewSEW;
+ if (ComplexTT.second.getAsInteger(10, NewSEW)) {
+ llvm_unreachable("Invalid FixedSEW value!");
+ return std::nullopt;
+ }
+ switch (NewSEW) {
+ case 8:
+ VTM = VectorTypeModifier::FixedSEW8;
+ break;
+ case 16:
+ VTM = VectorTypeModifier::FixedSEW16;
+ break;
+ case 32:
+ VTM = VectorTypeModifier::FixedSEW32;
+ break;
+ case 64:
+ VTM = VectorTypeModifier::FixedSEW64;
+ break;
+ default:
+ llvm_unreachable("Invalid FixedSEW value, should be 8, 16, 32 or 64");
+ return std::nullopt;
+ }
+ } else if (ComplexTT.first == "LFixedLog2LMUL") {
+ int32_t Log2LMUL;
+ if (ComplexTT.second.getAsInteger(10, Log2LMUL)) {
+ llvm_unreachable("Invalid LFixedLog2LMUL value!");
+ return std::nullopt;
+ }
+ switch (Log2LMUL) {
+ case -3:
+ VTM = VectorTypeModifier::LFixedLog2LMULN3;
+ break;
+ case -2:
+ VTM = VectorTypeModifier::LFixedLog2LMULN2;
+ break;
+ case -1:
+ VTM = VectorTypeModifier::LFixedLog2LMULN1;
+ break;
+ case 0:
+ VTM = VectorTypeModifier::LFixedLog2LMUL0;
+ break;
+ case 1:
+ VTM = VectorTypeModifier::LFixedLog2LMUL1;
+ break;
+ case 2:
+ VTM = VectorTypeModifier::LFixedLog2LMUL2;
+ break;
+ case 3:
+ VTM = VectorTypeModifier::LFixedLog2LMUL3;
+ break;
+ default:
+ llvm_unreachable("Invalid LFixedLog2LMUL value, should be [-3, 3]");
+ return std::nullopt;
+ }
+ } else if (ComplexTT.first == "SFixedLog2LMUL") {
+ int32_t Log2LMUL;
+ if (ComplexTT.second.getAsInteger(10, Log2LMUL)) {
+ llvm_unreachable("Invalid SFixedLog2LMUL value!");
+ return std::nullopt;
+ }
+ switch (Log2LMUL) {
+ case -3:
+ VTM = VectorTypeModifier::SFixedLog2LMULN3;
+ break;
+ case -2:
+ VTM = VectorTypeModifier::SFixedLog2LMULN2;
+ break;
+ case -1:
+ VTM = VectorTypeModifier::SFixedLog2LMULN1;
+ break;
+ case 0:
+ VTM = VectorTypeModifier::SFixedLog2LMUL0;
+ break;
+ case 1:
+ VTM = VectorTypeModifier::SFixedLog2LMUL1;
+ break;
+ case 2:
+ VTM = VectorTypeModifier::SFixedLog2LMUL2;
+ break;
+ case 3:
+ VTM = VectorTypeModifier::SFixedLog2LMUL3;
+ break;
+ default:
+ llvm_unreachable("Invalid LFixedLog2LMUL value, should be [-3, 3]");
+ return std::nullopt;
+ }
+
+ } else if (ComplexTT.first == "SEFixedLog2LMUL") {
+ int32_t Log2LMUL;
+ if (ComplexTT.second.getAsInteger(10, Log2LMUL)) {
+ llvm_unreachable("Invalid SEFixedLog2LMUL value!");
+ return std::nullopt;
+ }
+ switch (Log2LMUL) {
+ case -3:
+ VTM = VectorTypeModifier::SEFixedLog2LMULN3;
+ break;
+ case -2:
+ VTM = VectorTypeModifier::SEFixedLog2LMULN2;
+ break;
+ case -1:
+ VTM = VectorTypeModifier::SEFixedLog2LMULN1;
+ break;
+ case 0:
+ VTM = VectorTypeModifier::SEFixedLog2LMUL0;
+ break;
+ case 1:
+ VTM = VectorTypeModifier::SEFixedLog2LMUL1;
+ break;
+ case 2:
+ VTM = VectorTypeModifier::SEFixedLog2LMUL2;
+ break;
+ case 3:
+ VTM = VectorTypeModifier::SEFixedLog2LMUL3;
+ break;
+ default:
+ llvm_unreachable("Invalid LFixedLog2LMUL value, should be [-3, 3]");
+ return std::nullopt;
+ }
+ } else if (ComplexTT.first == "Tuple") {
+ unsigned NF = 0;
+ if (ComplexTT.second.getAsInteger(10, NF)) {
+ llvm_unreachable("Invalid NF value!");
+ return std::nullopt;
+ }
+ VTM = getTupleVTM(NF);
+ } else {
+ llvm_unreachable("Illegal complex type transformers!");
+ }
+ }
+ PD.VTM = static_cast<uint8_t>(VTM);
+
+ // Compute the remain type transformers
+ TypeModifier TM = TypeModifier::NoModifier;
+ for (char I : PrototypeDescriptorStr) {
+ switch (I) {
+ case 'P':
+ if ((TM & TypeModifier::Const) == TypeModifier::Const)
+ llvm_unreachable("'P' transformer cannot be used after 'C'");
+ if ((TM & TypeModifier::Pointer) == TypeModifier::Pointer)
+ llvm_unreachable("'P' transformer cannot be used twice");
+ TM |= TypeModifier::Pointer;
+ break;
+ case 'C':
+ TM |= TypeModifier::Const;
+ break;
+ case 'K':
+ TM |= TypeModifier::Immediate;
+ break;
+ case 'U':
+ TM |= TypeModifier::UnsignedInteger;
+ break;
+ case 'I':
+ TM |= TypeModifier::SignedInteger;
+ break;
+ case 'F':
+ TM |= TypeModifier::Float;
+ break;
+ case 'S':
+ TM |= TypeModifier::LMUL1;
+ break;
+ default:
+ llvm_unreachable("Illegal non-primitive type transformer!");
+ }
+ }
+ PD.TM = static_cast<uint8_t>(TM);
+
+ return PD;
+}
+
+void RVVType::applyModifier(const PrototypeDescriptor &Transformer) {
+ // Handle primitive type transformer
+ switch (static_cast<BaseTypeModifier>(Transformer.PT)) {
+ case BaseTypeModifier::Scalar:
+ Scale = 0;
+ break;
+ case BaseTypeModifier::Vector:
+ Scale = LMUL.getScale(ElementBitwidth);
+ break;
+ case BaseTypeModifier::Void:
+ ScalarType = ScalarTypeKind::Void;
+ break;
+ case BaseTypeModifier::SizeT:
+ ScalarType = ScalarTypeKind::Size_t;
+ break;
+ case BaseTypeModifier::Ptrdiff:
+ ScalarType = ScalarTypeKind::Ptrdiff_t;
+ break;
+ case BaseTypeModifier::UnsignedLong:
+ ScalarType = ScalarTypeKind::UnsignedLong;
+ break;
+ case BaseTypeModifier::SignedLong:
+ ScalarType = ScalarTypeKind::SignedLong;
+ break;
+ case BaseTypeModifier::Float32:
+ ElementBitwidth = 32;
+ ScalarType = ScalarTypeKind::Float;
+ break;
+ case BaseTypeModifier::Invalid:
+ ScalarType = ScalarTypeKind::Invalid;
+ return;
+ }
+
+ switch (static_cast<VectorTypeModifier>(Transformer.VTM)) {
+ case VectorTypeModifier::Widening2XVector:
+ ElementBitwidth *= 2;
+ LMUL.MulLog2LMUL(1);
+ Scale = LMUL.getScale(ElementBitwidth);
+ break;
+ case VectorTypeModifier::Widening4XVector:
+ ElementBitwidth *= 4;
+ LMUL.MulLog2LMUL(2);
+ Scale = LMUL.getScale(ElementBitwidth);
+ break;
+ case VectorTypeModifier::Widening8XVector:
+ ElementBitwidth *= 8;
+ LMUL.MulLog2LMUL(3);
+ Scale = LMUL.getScale(ElementBitwidth);
+ break;
+ case VectorTypeModifier::MaskVector:
+ ScalarType = ScalarTypeKind::Boolean;
+ Scale = LMUL.getScale(ElementBitwidth);
+ ElementBitwidth = 1;
+ break;
+ case VectorTypeModifier::Log2EEW3:
+ applyLog2EEW(3);
+ break;
+ case VectorTypeModifier::Log2EEW4:
+ applyLog2EEW(4);
+ break;
+ case VectorTypeModifier::Log2EEW5:
+ applyLog2EEW(5);
+ break;
+ case VectorTypeModifier::Log2EEW6:
+ applyLog2EEW(6);
+ break;
+ case VectorTypeModifier::FixedSEW8:
+ applyFixedSEW(8);
+ break;
+ case VectorTypeModifier::FixedSEW16:
+ applyFixedSEW(16);
+ break;
+ case VectorTypeModifier::FixedSEW32:
+ applyFixedSEW(32);
+ break;
+ case VectorTypeModifier::FixedSEW64:
+ applyFixedSEW(64);
+ break;
+ case VectorTypeModifier::LFixedLog2LMULN3:
+ applyFixedLog2LMUL(-3, FixedLMULType::LargerThan);
+ break;
+ case VectorTypeModifier::LFixedLog2LMULN2:
+ applyFixedLog2LMUL(-2, FixedLMULType::LargerThan);
+ break;
+ case VectorTypeModifier::LFixedLog2LMULN1:
+ applyFixedLog2LMUL(-1, FixedLMULType::LargerThan);
+ break;
+ case VectorTypeModifier::LFixedLog2LMUL0:
+ applyFixedLog2LMUL(0, FixedLMULType::LargerThan);
+ break;
+ case VectorTypeModifier::LFixedLog2LMUL1:
+ applyFixedLog2LMUL(1, FixedLMULType::LargerThan);
+ break;
+ case VectorTypeModifier::LFixedLog2LMUL2:
+ applyFixedLog2LMUL(2, FixedLMULType::LargerThan);
+ break;
+ case VectorTypeModifier::LFixedLog2LMUL3:
+ applyFixedLog2LMUL(3, FixedLMULType::LargerThan);
+ break;
+ case VectorTypeModifier::SFixedLog2LMULN3:
+ applyFixedLog2LMUL(-3, FixedLMULType::SmallerThan);
+ break;
+ case VectorTypeModifier::SFixedLog2LMULN2:
+ applyFixedLog2LMUL(-2, FixedLMULType::SmallerThan);
+ break;
+ case VectorTypeModifier::SFixedLog2LMULN1:
+ applyFixedLog2LMUL(-1, FixedLMULType::SmallerThan);
+ break;
+ case VectorTypeModifier::SFixedLog2LMUL0:
+ applyFixedLog2LMUL(0, FixedLMULType::SmallerThan);
+ break;
+ case VectorTypeModifier::SFixedLog2LMUL1:
+ applyFixedLog2LMUL(1, FixedLMULType::SmallerThan);
+ break;
+ case VectorTypeModifier::SFixedLog2LMUL2:
+ applyFixedLog2LMUL(2, FixedLMULType::SmallerThan);
+ break;
+ case VectorTypeModifier::SFixedLog2LMUL3:
+ applyFixedLog2LMUL(3, FixedLMULType::SmallerThan);
+ break;
+ case VectorTypeModifier::SEFixedLog2LMULN3:
+ applyFixedLog2LMUL(-3, FixedLMULType::SmallerOrEqual);
+ break;
+ case VectorTypeModifier::SEFixedLog2LMULN2:
+ applyFixedLog2LMUL(-2, FixedLMULType::SmallerOrEqual);
+ break;
+ case VectorTypeModifier::SEFixedLog2LMULN1:
+ applyFixedLog2LMUL(-1, FixedLMULType::SmallerOrEqual);
+ break;
+ case VectorTypeModifier::SEFixedLog2LMUL0:
+ applyFixedLog2LMUL(0, FixedLMULType::SmallerOrEqual);
+ break;
+ case VectorTypeModifier::SEFixedLog2LMUL1:
+ applyFixedLog2LMUL(1, FixedLMULType::SmallerOrEqual);
+ break;
+ case VectorTypeModifier::SEFixedLog2LMUL2:
+ applyFixedLog2LMUL(2, FixedLMULType::SmallerOrEqual);
+ break;
+ case VectorTypeModifier::SEFixedLog2LMUL3:
+ applyFixedLog2LMUL(3, FixedLMULType::SmallerOrEqual);
+ break;
+ case VectorTypeModifier::Tuple2:
+ case VectorTypeModifier::Tuple3:
+ case VectorTypeModifier::Tuple4:
+ case VectorTypeModifier::Tuple5:
+ case VectorTypeModifier::Tuple6:
+ case VectorTypeModifier::Tuple7:
+ case VectorTypeModifier::Tuple8: {
+ IsTuple = true;
+ NF = 2 + static_cast<uint8_t>(Transformer.VTM) -
+ static_cast<uint8_t>(VectorTypeModifier::Tuple2);
+ break;
+ }
+ case VectorTypeModifier::NoModifier:
+ break;
+ }
+
+ // Early return if the current type modifier is already invalid.
+ if (ScalarType == Invalid)
+ return;
+
+ for (unsigned TypeModifierMaskShift = 0;
+ TypeModifierMaskShift <= static_cast<unsigned>(TypeModifier::MaxOffset);
+ ++TypeModifierMaskShift) {
+ unsigned TypeModifierMask = 1 << TypeModifierMaskShift;
+ if ((static_cast<unsigned>(Transformer.TM) & TypeModifierMask) !=
+ TypeModifierMask)
+ continue;
+ switch (static_cast<TypeModifier>(TypeModifierMask)) {
+ case TypeModifier::Pointer:
+ IsPointer = true;
+ break;
+ case TypeModifier::Const:
+ IsConstant = true;
+ break;
+ case TypeModifier::Immediate:
+ IsImmediate = true;
+ IsConstant = true;
+ break;
+ case TypeModifier::UnsignedInteger:
+ ScalarType = ScalarTypeKind::UnsignedInteger;
+ break;
+ case TypeModifier::SignedInteger:
+ ScalarType = ScalarTypeKind::SignedInteger;
+ break;
+ case TypeModifier::Float:
+ ScalarType = ScalarTypeKind::Float;
+ break;
+ case TypeModifier::BFloat:
+ ScalarType = ScalarTypeKind::BFloat;
+ break;
+ case TypeModifier::LMUL1:
+ LMUL = LMULType(0);
+ // Update ElementBitwidth need to update Scale too.
+ Scale = LMUL.getScale(ElementBitwidth);
+ break;
+ default:
+ llvm_unreachable("Unknown type modifier mask!");
+ }
+ }
+}
+
+void RVVType::applyLog2EEW(unsigned Log2EEW) {
+ // update new elmul = (eew/sew) * lmul
+ LMUL.MulLog2LMUL(Log2EEW - Log2_32(ElementBitwidth));
+ // update new eew
+ ElementBitwidth = 1 << Log2EEW;
+ ScalarType = ScalarTypeKind::SignedInteger;
+ Scale = LMUL.getScale(ElementBitwidth);
+}
+
+void RVVType::applyFixedSEW(unsigned NewSEW) {
+ // Set invalid type if src and dst SEW are same.
+ if (ElementBitwidth == NewSEW) {
+ ScalarType = ScalarTypeKind::Invalid;
+ return;
+ }
+ // Update new SEW
+ ElementBitwidth = NewSEW;
+ Scale = LMUL.getScale(ElementBitwidth);
+}
+
+void RVVType::applyFixedLog2LMUL(int Log2LMUL, enum FixedLMULType Type) {
+ switch (Type) {
+ case FixedLMULType::LargerThan:
+ if (Log2LMUL <= LMUL.Log2LMUL) {
+ ScalarType = ScalarTypeKind::Invalid;
+ return;
+ }
+ break;
+ case FixedLMULType::SmallerThan:
+ if (Log2LMUL >= LMUL.Log2LMUL) {
+ ScalarType = ScalarTypeKind::Invalid;
+ return;
+ }
+ break;
+ case FixedLMULType::SmallerOrEqual:
+ if (Log2LMUL > LMUL.Log2LMUL) {
+ ScalarType = ScalarTypeKind::Invalid;
+ return;
+ }
+ break;
+ }
+
+ // Update new LMUL
+ LMUL = LMULType(Log2LMUL);
+ Scale = LMUL.getScale(ElementBitwidth);
+}
+
+std::optional<RVVTypes>
+RVVTypeCache::computeTypes(BasicType BT, int Log2LMUL, unsigned NF,
+ ArrayRef<PrototypeDescriptor> Prototype) {
+ RVVTypes Types;
+ for (const PrototypeDescriptor &Proto : Prototype) {
+ auto T = computeType(BT, Log2LMUL, Proto);
+ if (!T)
+ return std::nullopt;
+ // Record legal type index
+ Types.push_back(*T);
+ }
+ return Types;
+}
+
+// Compute the hash value of RVVType, used for cache the result of computeType.
+static uint64_t computeRVVTypeHashValue(BasicType BT, int Log2LMUL,
+ PrototypeDescriptor Proto) {
+ // Layout of hash value:
+ // 0 8 16 24 32 40
+ // | Log2LMUL + 3 | BT | Proto.PT | Proto.TM | Proto.VTM |
+ assert(Log2LMUL >= -3 && Log2LMUL <= 3);
+ return (Log2LMUL + 3) | (static_cast<uint64_t>(BT) & 0xff) << 8 |
+ ((uint64_t)(Proto.PT & 0xff) << 16) |
+ ((uint64_t)(Proto.TM & 0xff) << 24) |
+ ((uint64_t)(Proto.VTM & 0xff) << 32);
+}
+
+std::optional<RVVTypePtr> RVVTypeCache::computeType(BasicType BT, int Log2LMUL,
+ PrototypeDescriptor Proto) {
+ uint64_t Idx = computeRVVTypeHashValue(BT, Log2LMUL, Proto);
+ // Search first
+ auto It = LegalTypes.find(Idx);
+ if (It != LegalTypes.end())
+ return &(It->second);
+
+ if (IllegalTypes.count(Idx))
+ return std::nullopt;
+
+ // Compute type and record the result.
+ RVVType T(BT, Log2LMUL, Proto);
+ if (T.isValid()) {
+ // Record legal type index and value.
+ std::pair<std::unordered_map<uint64_t, RVVType>::iterator, bool>
+ InsertResult = LegalTypes.insert({Idx, T});
+ return &(InsertResult.first->second);
+ }
+ // Record illegal type index.
+ IllegalTypes.insert(Idx);
+ return std::nullopt;
+}
+
+//===----------------------------------------------------------------------===//
+// RVVIntrinsic implementation
+//===----------------------------------------------------------------------===//
+RVVIntrinsic::RVVIntrinsic(
+ StringRef NewName, StringRef Suffix, StringRef NewOverloadedName,
+ StringRef OverloadedSuffix, StringRef IRName, bool IsMasked,
+ bool HasMaskedOffOperand, bool HasVL, PolicyScheme Scheme,
+ bool SupportOverloading, bool HasBuiltinAlias, StringRef ManualCodegen,
+ const RVVTypes &OutInTypes, const std::vector<int64_t> &NewIntrinsicTypes,
+ const std::vector<StringRef> &RequiredFeatures, unsigned NF,
+ Policy NewPolicyAttrs, bool HasFRMRoundModeOp)
+ : IRName(IRName), IsMasked(IsMasked),
+ HasMaskedOffOperand(HasMaskedOffOperand), HasVL(HasVL), Scheme(Scheme),
+ SupportOverloading(SupportOverloading), HasBuiltinAlias(HasBuiltinAlias),
+ ManualCodegen(ManualCodegen.str()), NF(NF), PolicyAttrs(NewPolicyAttrs) {
+
+ // Init BuiltinName, Name and OverloadedName
+ BuiltinName = NewName.str();
+ Name = BuiltinName;
+ if (NewOverloadedName.empty())
+ OverloadedName = NewName.split("_").first.str();
+ else
+ OverloadedName = NewOverloadedName.str();
+ if (!Suffix.empty())
+ Name += "_" + Suffix.str();
+ if (!OverloadedSuffix.empty())
+ OverloadedName += "_" + OverloadedSuffix.str();
+
+ updateNamesAndPolicy(IsMasked, hasPolicy(), Name, BuiltinName, OverloadedName,
+ PolicyAttrs, HasFRMRoundModeOp);
+
+ // Init OutputType and InputTypes
+ OutputType = OutInTypes[0];
+ InputTypes.assign(OutInTypes.begin() + 1, OutInTypes.end());
+
+ // IntrinsicTypes is unmasked TA version index. Need to update it
+ // if there is merge operand (It is always in first operand).
+ IntrinsicTypes = NewIntrinsicTypes;
+ if ((IsMasked && hasMaskedOffOperand()) ||
+ (!IsMasked && hasPassthruOperand())) {
+ for (auto &I : IntrinsicTypes) {
+ if (I >= 0)
+ I += NF;
+ }
+ }
+}
+
+std::string RVVIntrinsic::getBuiltinTypeStr() const {
+ std::string S;
+ S += OutputType->getBuiltinStr();
+ for (const auto &T : InputTypes) {
+ S += T->getBuiltinStr();
+ }
+ return S;
+}
+
+std::string RVVIntrinsic::getSuffixStr(
+ RVVTypeCache &TypeCache, BasicType Type, int Log2LMUL,
+ llvm::ArrayRef<PrototypeDescriptor> PrototypeDescriptors) {
+ SmallVector<std::string> SuffixStrs;
+ for (auto PD : PrototypeDescriptors) {
+ auto T = TypeCache.computeType(Type, Log2LMUL, PD);
+ SuffixStrs.push_back((*T)->getShortStr());
+ }
+ return join(SuffixStrs, "_");
+}
+
+llvm::SmallVector<PrototypeDescriptor> RVVIntrinsic::computeBuiltinTypes(
+ llvm::ArrayRef<PrototypeDescriptor> Prototype, bool IsMasked,
+ bool HasMaskedOffOperand, bool HasVL, unsigned NF,
+ PolicyScheme DefaultScheme, Policy PolicyAttrs, bool IsTuple) {
+ SmallVector<PrototypeDescriptor> NewPrototype(Prototype.begin(),
+ Prototype.end());
+ bool HasPassthruOp = DefaultScheme == PolicyScheme::HasPassthruOperand;
+ if (IsMasked) {
+ // If HasMaskedOffOperand, insert result type as first input operand if
+ // need.
+ if (HasMaskedOffOperand && !PolicyAttrs.isTAMAPolicy()) {
+ if (NF == 1) {
+ NewPrototype.insert(NewPrototype.begin() + 1, NewPrototype[0]);
+ } else if (NF > 1) {
+ if (IsTuple) {
+ PrototypeDescriptor BasePtrOperand = Prototype[1];
+ PrototypeDescriptor MaskoffType = PrototypeDescriptor(
+ static_cast<uint8_t>(BaseTypeModifier::Vector),
+ static_cast<uint8_t>(getTupleVTM(NF)),
+ BasePtrOperand.TM & ~static_cast<uint8_t>(TypeModifier::Pointer));
+ NewPrototype.insert(NewPrototype.begin() + 1, MaskoffType);
+ } else {
+ // Convert
+ // (void, op0 address, op1 address, ...)
+ // to
+ // (void, op0 address, op1 address, ..., maskedoff0, maskedoff1, ...)
+ PrototypeDescriptor MaskoffType = NewPrototype[1];
+ MaskoffType.TM &= ~static_cast<uint8_t>(TypeModifier::Pointer);
+ NewPrototype.insert(NewPrototype.begin() + NF + 1, NF, MaskoffType);
+ }
+ }
+ }
+ if (HasMaskedOffOperand && NF > 1) {
+ // Convert
+ // (void, op0 address, op1 address, ..., maskedoff0, maskedoff1, ...)
+ // to
+ // (void, op0 address, op1 address, ..., mask, maskedoff0, maskedoff1,
+ // ...)
+ if (IsTuple)
+ NewPrototype.insert(NewPrototype.begin() + 1,
+ PrototypeDescriptor::Mask);
+ else
+ NewPrototype.insert(NewPrototype.begin() + NF + 1,
+ PrototypeDescriptor::Mask);
+ } else {
+ // If IsMasked, insert PrototypeDescriptor:Mask as first input operand.
+ NewPrototype.insert(NewPrototype.begin() + 1, PrototypeDescriptor::Mask);
+ }
+ } else {
+ if (NF == 1) {
+ if (PolicyAttrs.isTUPolicy() && HasPassthruOp)
+ NewPrototype.insert(NewPrototype.begin(), NewPrototype[0]);
+ } else if (PolicyAttrs.isTUPolicy() && HasPassthruOp) {
+ if (IsTuple) {
+ PrototypeDescriptor BasePtrOperand = Prototype[0];
+ PrototypeDescriptor MaskoffType = PrototypeDescriptor(
+ static_cast<uint8_t>(BaseTypeModifier::Vector),
+ static_cast<uint8_t>(getTupleVTM(NF)),
+ BasePtrOperand.TM & ~static_cast<uint8_t>(TypeModifier::Pointer));
+ NewPrototype.insert(NewPrototype.begin(), MaskoffType);
+ } else {
+ // NF > 1 cases for segment load operations.
+ // Convert
+ // (void, op0 address, op1 address, ...)
+ // to
+ // (void, op0 address, op1 address, maskedoff0, maskedoff1, ...)
+ PrototypeDescriptor MaskoffType = Prototype[1];
+ MaskoffType.TM &= ~static_cast<uint8_t>(TypeModifier::Pointer);
+ NewPrototype.insert(NewPrototype.begin() + NF + 1, NF, MaskoffType);
+ }
+ }
+ }
+
+ // If HasVL, append PrototypeDescriptor:VL to last operand
+ if (HasVL)
+ NewPrototype.push_back(PrototypeDescriptor::VL);
+
+ return NewPrototype;
+}
+
+llvm::SmallVector<Policy> RVVIntrinsic::getSupportedUnMaskedPolicies() {
+ return {Policy(Policy::PolicyType::Undisturbed)}; // TU
+}
+
+llvm::SmallVector<Policy>
+RVVIntrinsic::getSupportedMaskedPolicies(bool HasTailPolicy,
+ bool HasMaskPolicy) {
+ if (HasTailPolicy && HasMaskPolicy)
+ return {Policy(Policy::PolicyType::Undisturbed,
+ Policy::PolicyType::Agnostic), // TUM
+ Policy(Policy::PolicyType::Undisturbed,
+ Policy::PolicyType::Undisturbed), // TUMU
+ Policy(Policy::PolicyType::Agnostic,
+ Policy::PolicyType::Undisturbed)}; // MU
+ if (HasTailPolicy && !HasMaskPolicy)
+ return {Policy(Policy::PolicyType::Undisturbed,
+ Policy::PolicyType::Agnostic)}; // TU
+ if (!HasTailPolicy && HasMaskPolicy)
+ return {Policy(Policy::PolicyType::Agnostic,
+ Policy::PolicyType::Undisturbed)}; // MU
+ llvm_unreachable("An RVV instruction should not be without both tail policy "
+ "and mask policy");
+}
+
+void RVVIntrinsic::updateNamesAndPolicy(
+ bool IsMasked, bool HasPolicy, std::string &Name, std::string &BuiltinName,
+ std::string &OverloadedName, Policy &PolicyAttrs, bool HasFRMRoundModeOp) {
+
+ auto appendPolicySuffix = [&](const std::string &suffix) {
+ Name += suffix;
+ BuiltinName += suffix;
+ OverloadedName += suffix;
+ };
+
+ // This follows the naming guideline under riscv-c-api-doc to add the
+ // `__riscv_` suffix for all RVV intrinsics.
+ Name = "__riscv_" + Name;
+ OverloadedName = "__riscv_" + OverloadedName;
+
+ if (HasFRMRoundModeOp) {
+ Name += "_rm";
+ BuiltinName += "_rm";
+ }
+
+ if (IsMasked) {
+ if (PolicyAttrs.isTUMUPolicy())
+ appendPolicySuffix("_tumu");
+ else if (PolicyAttrs.isTUMAPolicy())
+ appendPolicySuffix("_tum");
+ else if (PolicyAttrs.isTAMUPolicy())
+ appendPolicySuffix("_mu");
+ else if (PolicyAttrs.isTAMAPolicy()) {
+ Name += "_m";
+ BuiltinName += "_m";
+ } else
+ llvm_unreachable("Unhandled policy condition");
+ } else {
+ if (PolicyAttrs.isTUPolicy())
+ appendPolicySuffix("_tu");
+ else if (PolicyAttrs.isTAPolicy()) // no suffix needed
+ return;
+ else
+ llvm_unreachable("Unhandled policy condition");
+ }
+}
+
+SmallVector<PrototypeDescriptor> parsePrototypes(StringRef Prototypes) {
+ SmallVector<PrototypeDescriptor> PrototypeDescriptors;
+ const StringRef Primaries("evwqom0ztulf");
+ while (!Prototypes.empty()) {
+ size_t Idx = 0;
+ // Skip over complex prototype because it could contain primitive type
+ // character.
+ if (Prototypes[0] == '(')
+ Idx = Prototypes.find_first_of(')');
+ Idx = Prototypes.find_first_of(Primaries, Idx);
+ assert(Idx != StringRef::npos);
+ auto PD = PrototypeDescriptor::parsePrototypeDescriptor(
+ Prototypes.slice(0, Idx + 1));
+ if (!PD)
+ llvm_unreachable("Error during parsing prototype.");
+ PrototypeDescriptors.push_back(*PD);
+ Prototypes = Prototypes.drop_front(Idx + 1);
+ }
+ return PrototypeDescriptors;
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const RVVIntrinsicRecord &Record) {
+ OS << "{";
+ OS << "\"" << Record.Name << "\",";
+ if (Record.OverloadedName == nullptr ||
+ StringRef(Record.OverloadedName).empty())
+ OS << "nullptr,";
+ else
+ OS << "\"" << Record.OverloadedName << "\",";
+ OS << Record.PrototypeIndex << ",";
+ OS << Record.SuffixIndex << ",";
+ OS << Record.OverloadedSuffixIndex << ",";
+ OS << (int)Record.PrototypeLength << ",";
+ OS << (int)Record.SuffixLength << ",";
+ OS << (int)Record.OverloadedSuffixSize << ",";
+ OS << Record.RequiredExtensions << ",";
+ OS << (int)Record.TypeRangeMask << ",";
+ OS << (int)Record.Log2LMULMask << ",";
+ OS << (int)Record.NF << ",";
+ OS << (int)Record.HasMasked << ",";
+ OS << (int)Record.HasVL << ",";
+ OS << (int)Record.HasMaskedOffOperand << ",";
+ OS << (int)Record.HasTailPolicy << ",";
+ OS << (int)Record.HasMaskPolicy << ",";
+ OS << (int)Record.HasFRMRoundModeOp << ",";
+ OS << (int)Record.IsTuple << ",";
+ OS << (int)Record.UnMaskedPolicyScheme << ",";
+ OS << (int)Record.MaskedPolicyScheme << ",";
+ OS << "},\n";
+ return OS;
+}
+
+} // end namespace RISCV
+} // end namespace clang
diff --git a/contrib/llvm-project/clang/lib/Testing/CommandLineArgs.cpp b/contrib/llvm-project/clang/lib/Testing/CommandLineArgs.cpp
index cd4d8c188da9..0da087c33e3f 100644
--- a/contrib/llvm-project/clang/lib/Testing/CommandLineArgs.cpp
+++ b/contrib/llvm-project/clang/lib/Testing/CommandLineArgs.cpp
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Testing/CommandLineArgs.h"
+#include "llvm/MC/TargetRegistry.h"
#include "llvm/Support/ErrorHandling.h"
namespace clang {
@@ -36,6 +37,9 @@ std::vector<std::string> getCommandLineArgsForTesting(TestLanguage Lang) {
case Lang_CXX20:
Args = {"-std=c++20", "-frtti"};
break;
+ case Lang_OBJC:
+ Args = {"-x", "objective-c", "-frtti", "-fobjc-nonfragile-abi"};
+ break;
case Lang_OBJCXX:
Args = {"-x", "objective-c++", "-frtti"};
break;
@@ -45,6 +49,42 @@ std::vector<std::string> getCommandLineArgsForTesting(TestLanguage Lang) {
return Args;
}
+std::vector<std::string> getCC1ArgsForTesting(TestLanguage Lang) {
+ std::vector<std::string> Args;
+ switch (Lang) {
+ case Lang_C89:
+ Args = {"-xc", "-std=c89"};
+ break;
+ case Lang_C99:
+ Args = {"-xc", "-std=c99"};
+ break;
+ case Lang_CXX03:
+ Args = {"-std=c++03"};
+ break;
+ case Lang_CXX11:
+ Args = {"-std=c++11"};
+ break;
+ case Lang_CXX14:
+ Args = {"-std=c++14"};
+ break;
+ case Lang_CXX17:
+ Args = {"-std=c++17"};
+ break;
+ case Lang_CXX20:
+ Args = {"-std=c++20"};
+ break;
+ case Lang_OBJC:
+ Args = {"-xobjective-c"};
+ break;
+ case Lang_OBJCXX:
+ Args = {"-xobjective-c++"};
+ break;
+ case Lang_OpenCL:
+ llvm_unreachable("Not implemented yet!");
+ }
+ return Args;
+}
+
StringRef getFilenameForTesting(TestLanguage Lang) {
switch (Lang) {
case Lang_C89:
@@ -61,10 +101,27 @@ StringRef getFilenameForTesting(TestLanguage Lang) {
case Lang_OpenCL:
return "input.cl";
+ case Lang_OBJC:
+ return "input.m";
+
case Lang_OBJCXX:
return "input.mm";
}
llvm_unreachable("Unhandled TestLanguage enum");
}
+std::string getAnyTargetForTesting() {
+ for (const auto &Target : llvm::TargetRegistry::targets()) {
+ std::string Error;
+ StringRef TargetName(Target.getName());
+ if (TargetName == "x86-64")
+ TargetName = "x86_64";
+ if (llvm::TargetRegistry::lookupTarget(std::string(TargetName), Error) ==
+ &Target) {
+ return std::string(TargetName);
+ }
+ }
+ return "";
+}
+
} // end namespace clang
diff --git a/contrib/llvm-project/clang/lib/Testing/TestAST.cpp b/contrib/llvm-project/clang/lib/Testing/TestAST.cpp
new file mode 100644
index 000000000000..3a50c2d9b5d0
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Testing/TestAST.cpp
@@ -0,0 +1,166 @@
+//===--- TestAST.cpp ------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Testing/TestAST.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Frontend/FrontendActions.h"
+#include "clang/Frontend/TextDiagnostic.h"
+#include "clang/Testing/CommandLineArgs.h"
+#include "llvm/ADT/ScopeExit.h"
+#include "llvm/Support/VirtualFileSystem.h"
+
+#include "gtest/gtest.h"
+#include <string>
+
+namespace clang {
+namespace {
+
+// Captures diagnostics into a vector, optionally reporting errors to gtest.
+class StoreDiagnostics : public DiagnosticConsumer {
+ std::vector<StoredDiagnostic> &Out;
+ bool ReportErrors;
+ LangOptions LangOpts;
+
+public:
+ StoreDiagnostics(std::vector<StoredDiagnostic> &Out, bool ReportErrors)
+ : Out(Out), ReportErrors(ReportErrors) {}
+
+ void BeginSourceFile(const LangOptions &LangOpts,
+ const Preprocessor *) override {
+ this->LangOpts = LangOpts;
+ }
+
+ void HandleDiagnostic(DiagnosticsEngine::Level DiagLevel,
+ const Diagnostic &Info) override {
+ Out.emplace_back(DiagLevel, Info);
+ if (ReportErrors && DiagLevel >= DiagnosticsEngine::Error) {
+ std::string Text;
+ llvm::raw_string_ostream OS(Text);
+ TextDiagnostic Renderer(OS, LangOpts,
+ &Info.getDiags()->getDiagnosticOptions());
+ Renderer.emitStoredDiagnostic(Out.back());
+ ADD_FAILURE() << Text;
+ }
+ }
+};
+
+// Fills in the bits of a CompilerInstance that weren't initialized yet.
+// Provides "empty" ASTContext etc if we fail before parsing gets started.
+void createMissingComponents(CompilerInstance &Clang) {
+ if (!Clang.hasDiagnostics())
+ Clang.createDiagnostics();
+ if (!Clang.hasFileManager())
+ Clang.createFileManager();
+ if (!Clang.hasSourceManager())
+ Clang.createSourceManager(Clang.getFileManager());
+ if (!Clang.hasTarget())
+ Clang.createTarget();
+ if (!Clang.hasPreprocessor())
+ Clang.createPreprocessor(TU_Complete);
+ if (!Clang.hasASTConsumer())
+ Clang.setASTConsumer(std::make_unique<ASTConsumer>());
+ if (!Clang.hasASTContext())
+ Clang.createASTContext();
+ if (!Clang.hasSema())
+ Clang.createSema(TU_Complete, /*CodeCompleteConsumer=*/nullptr);
+}
+
+} // namespace
+
+TestAST::TestAST(const TestInputs &In) {
+ Clang = std::make_unique<CompilerInstance>(
+ std::make_shared<PCHContainerOperations>());
+ // If we don't manage to finish parsing, create CompilerInstance components
+ // anyway so that the test will see an empty AST instead of crashing.
+ auto RecoverFromEarlyExit =
+ llvm::make_scope_exit([&] { createMissingComponents(*Clang); });
+
+ // Extra error conditions are reported through diagnostics, set that up first.
+ bool ErrorOK = In.ErrorOK || llvm::StringRef(In.Code).contains("error-ok");
+ Clang->createDiagnostics(new StoreDiagnostics(Diagnostics, !ErrorOK));
+
+ // Parse cc1 argv, (typically [-std=c++20 input.cc]) into CompilerInvocation.
+ std::vector<const char *> Argv;
+ std::vector<std::string> LangArgs = getCC1ArgsForTesting(In.Language);
+ for (const auto &S : LangArgs)
+ Argv.push_back(S.c_str());
+ for (const auto &S : In.ExtraArgs)
+ Argv.push_back(S.c_str());
+ std::string Filename = In.FileName;
+ if (Filename.empty())
+ Filename = getFilenameForTesting(In.Language).str();
+ Argv.push_back(Filename.c_str());
+ Clang->setInvocation(std::make_unique<CompilerInvocation>());
+ if (!CompilerInvocation::CreateFromArgs(Clang->getInvocation(), Argv,
+ Clang->getDiagnostics(), "clang")) {
+ ADD_FAILURE() << "Failed to create invocation";
+ return;
+ }
+ assert(!Clang->getInvocation().getFrontendOpts().DisableFree);
+
+ // Set up a VFS with only the virtual file visible.
+ auto VFS = llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
+ VFS->addFile(Filename, /*ModificationTime=*/0,
+ llvm::MemoryBuffer::getMemBufferCopy(In.Code, Filename));
+ for (const auto &Extra : In.ExtraFiles)
+ VFS->addFile(
+ Extra.getKey(), /*ModificationTime=*/0,
+ llvm::MemoryBuffer::getMemBufferCopy(Extra.getValue(), Extra.getKey()));
+ Clang->createFileManager(VFS);
+
+ // Running the FrontendAction creates the other components: SourceManager,
+ // Preprocessor, ASTContext, Sema. Preprocessor needs TargetInfo to be set.
+ EXPECT_TRUE(Clang->createTarget());
+ Action =
+ In.MakeAction ? In.MakeAction() : std::make_unique<SyntaxOnlyAction>();
+ const FrontendInputFile &Main = Clang->getFrontendOpts().Inputs.front();
+ if (!Action->BeginSourceFile(*Clang, Main)) {
+ ADD_FAILURE() << "Failed to BeginSourceFile()";
+ Action.reset(); // Don't call EndSourceFile if BeginSourceFile failed.
+ return;
+ }
+ if (auto Err = Action->Execute())
+ ADD_FAILURE() << "Failed to Execute(): " << llvm::toString(std::move(Err));
+
+ // Action->EndSourceFile() would destroy the ASTContext, we want to keep it.
+ // But notify the preprocessor we're done now.
+ Clang->getPreprocessor().EndSourceFile();
+ // We're done gathering diagnostics, detach the consumer so we can destroy it.
+ Clang->getDiagnosticClient().EndSourceFile();
+ Clang->getDiagnostics().setClient(new DiagnosticConsumer(),
+ /*ShouldOwnClient=*/true);
+}
+
+void TestAST::clear() {
+ if (Action) {
+ // We notified the preprocessor of EOF already, so detach it first.
+ // Sema needs the PP alive until after EndSourceFile() though.
+ auto PP = Clang->getPreprocessorPtr(); // Keep PP alive for now.
+ Clang->setPreprocessor(nullptr); // Detach so we don't send EOF twice.
+ Action->EndSourceFile(); // Destroy ASTContext and Sema.
+ // Now Sema is gone, PP can safely be destroyed.
+ }
+ Action.reset();
+ Clang.reset();
+ Diagnostics.clear();
+}
+
+TestAST &TestAST::operator=(TestAST &&M) {
+ clear();
+ Action = std::move(M.Action);
+ Clang = std::move(M.Clang);
+ Diagnostics = std::move(M.Diagnostics);
+ return *this;
+}
+
+TestAST::TestAST(TestAST &&M) { *this = std::move(M); }
+
+TestAST::~TestAST() { clear(); }
+
+} // end namespace clang
diff --git a/contrib/llvm-project/clang/lib/Tooling/ASTDiff/ASTDiff.cpp b/contrib/llvm-project/clang/lib/Tooling/ASTDiff/ASTDiff.cpp
index 0821863adcc6..5f7153cd53ac 100644
--- a/contrib/llvm-project/clang/lib/Tooling/ASTDiff/ASTDiff.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/ASTDiff/ASTDiff.cpp
@@ -19,6 +19,7 @@
#include <limits>
#include <memory>
+#include <optional>
#include <unordered_set>
using namespace llvm;
@@ -117,13 +118,11 @@ public:
Impl(SyntaxTree *Parent, Stmt *N, ASTContext &AST);
template <class T>
Impl(SyntaxTree *Parent,
- std::enable_if_t<std::is_base_of<Stmt, T>::value, T> *Node,
- ASTContext &AST)
+ std::enable_if_t<std::is_base_of_v<Stmt, T>, T> *Node, ASTContext &AST)
: Impl(Parent, dyn_cast<Stmt>(Node), AST) {}
template <class T>
Impl(SyntaxTree *Parent,
- std::enable_if_t<std::is_base_of<Decl, T>::value, T> *Node,
- ASTContext &AST)
+ std::enable_if_t<std::is_base_of_v<Decl, T>, T> *Node, ASTContext &AST)
: Impl(Parent, dyn_cast<Decl>(Node), AST) {}
SyntaxTree *Parent;
@@ -372,7 +371,7 @@ SyntaxTree::Impl::getRelativeName(const NamedDecl *ND,
// Strip the qualifier, if Val refers to something in the current scope.
// But leave one leading ':' in place, so that we know that this is a
// relative path.
- if (!ContextPrefix.empty() && StringRef(Val).startswith(ContextPrefix))
+ if (!ContextPrefix.empty() && StringRef(Val).starts_with(ContextPrefix))
Val = Val.substr(ContextPrefix.size() + 1);
return Val;
}
@@ -454,12 +453,12 @@ std::string SyntaxTree::Impl::getStmtValue(const Stmt *S) const {
if (auto *I = dyn_cast<IntegerLiteral>(S)) {
SmallString<256> Str;
I->getValue().toString(Str, /*Radix=*/10, /*Signed=*/false);
- return std::string(Str.str());
+ return std::string(Str);
}
if (auto *F = dyn_cast<FloatingLiteral>(S)) {
SmallString<256> Str;
F->getValue().toString(Str);
- return std::string(Str.str());
+ return std::string(Str);
}
if (auto *D = dyn_cast<DeclRefExpr>(S))
return getRelativeName(D->getDecl(), getEnclosingDeclContext(AST, S));
@@ -688,20 +687,20 @@ ASTNodeKind Node::getType() const { return ASTNode.getNodeKind(); }
StringRef Node::getTypeLabel() const { return getType().asStringRef(); }
-llvm::Optional<std::string> Node::getQualifiedIdentifier() const {
+std::optional<std::string> Node::getQualifiedIdentifier() const {
if (auto *ND = ASTNode.get<NamedDecl>()) {
if (ND->getDeclName().isIdentifier())
return ND->getQualifiedNameAsString();
}
- return llvm::None;
+ return std::nullopt;
}
-llvm::Optional<StringRef> Node::getIdentifier() const {
+std::optional<StringRef> Node::getIdentifier() const {
if (auto *ND = ASTNode.get<NamedDecl>()) {
if (ND->getDeclName().isIdentifier())
return ND->getName();
}
- return llvm::None;
+ return std::nullopt;
}
namespace {
diff --git a/contrib/llvm-project/clang/lib/Tooling/AllTUsExecution.cpp b/contrib/llvm-project/clang/lib/Tooling/AllTUsExecution.cpp
index 5565da9b548a..f327d0139941 100644
--- a/contrib/llvm-project/clang/lib/Tooling/AllTUsExecution.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/AllTUsExecution.cpp
@@ -121,7 +121,7 @@ llvm::Error AllTUsToolExecutor::execute(
[&](std::string Path) {
Log("[" + std::to_string(Count()) + "/" + TotalNumStr +
"] Processing file " + Path);
- // Each thread gets an indepent copy of a VFS to allow different
+ // Each thread gets an independent copy of a VFS to allow different
// concurrent working directories.
IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS =
llvm::vfs::createPhysicalFileSystem();
diff --git a/contrib/llvm-project/clang/lib/Tooling/ArgumentsAdjusters.cpp b/contrib/llvm-project/clang/lib/Tooling/ArgumentsAdjusters.cpp
index 7f5dc4d62f11..df4c74205b08 100644
--- a/contrib/llvm-project/clang/lib/Tooling/ArgumentsAdjusters.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/ArgumentsAdjusters.cpp
@@ -45,12 +45,12 @@ ArgumentsAdjuster getClangSyntaxOnlyAdjuster() {
StringRef Arg = Args[i];
// Skip output commands.
if (llvm::any_of(OutputCommands, [&Arg](llvm::StringRef OutputCommand) {
- return Arg.startswith(OutputCommand);
+ return Arg.starts_with(OutputCommand);
}))
continue;
- if (!Arg.startswith("-fcolor-diagnostics") &&
- !Arg.startswith("-fdiagnostics-color"))
+ if (!Arg.starts_with("-fcolor-diagnostics") &&
+ !Arg.starts_with("-fdiagnostics-color"))
AdjustedArgs.push_back(Args[i]);
// If we strip a color option, make sure we strip any preceeding `-Xclang`
// option as well.
@@ -73,7 +73,7 @@ ArgumentsAdjuster getClangStripOutputAdjuster() {
CommandLineArguments AdjustedArgs;
for (size_t i = 0, e = Args.size(); i < e; ++i) {
StringRef Arg = Args[i];
- if (!Arg.startswith("-o"))
+ if (!Arg.starts_with("-o"))
AdjustedArgs.push_back(Args[i]);
if (Arg == "-o") {
@@ -102,11 +102,11 @@ ArgumentsAdjuster getClangStripDependencyFileAdjuster() {
// When not using the cl driver mode, dependency file generation options
// begin with -M. These include -MM, -MF, -MG, -MP, -MT, -MQ, -MD, and
// -MMD.
- if (!UsingClDriver && Arg.startswith("-M"))
+ if (!UsingClDriver && Arg.starts_with("-M"))
continue;
// Under MSVC's cl driver mode, dependency file generation is controlled
// using /showIncludes
- if (Arg.startswith("/showIncludes") || Arg.startswith("-showIncludes"))
+ if (Arg.starts_with("/showIncludes") || Arg.starts_with("-showIncludes"))
continue;
AdjustedArgs.push_back(Args[i]);
@@ -122,7 +122,7 @@ ArgumentsAdjuster getInsertArgumentAdjuster(const CommandLineArguments &Extra,
CommandLineArguments::iterator I;
if (Pos == ArgumentInsertPosition::END) {
- I = std::find(Return.begin(), Return.end(), "--");
+ I = llvm::find(Return, "--");
} else {
I = Return.begin();
++I; // To leave the program name in place
@@ -159,7 +159,7 @@ ArgumentsAdjuster getStripPluginsAdjuster() {
// -Xclang <arbitrary-argument>
if (I + 4 < E && Args[I] == "-Xclang" &&
(Args[I + 1] == "-load" || Args[I + 1] == "-plugin" ||
- llvm::StringRef(Args[I + 1]).startswith("-plugin-arg-") ||
+ llvm::StringRef(Args[I + 1]).starts_with("-plugin-arg-") ||
Args[I + 1] == "-add-plugin") &&
Args[I + 2] == "-Xclang") {
I += 3;
diff --git a/contrib/llvm-project/clang/lib/Tooling/CommonOptionsParser.cpp b/contrib/llvm-project/clang/lib/Tooling/CommonOptionsParser.cpp
index 6301544dbb28..59ef47cc0166 100644
--- a/contrib/llvm-project/clang/lib/Tooling/CommonOptionsParser.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/CommonOptionsParser.cpp
@@ -86,21 +86,21 @@ llvm::Error CommonOptionsParser::init(
static cl::opt<std::string> BuildPath("p", cl::desc("Build path"),
cl::Optional, cl::cat(Category),
- cl::sub(*cl::AllSubCommands));
+ cl::sub(cl::SubCommand::getAll()));
static cl::list<std::string> SourcePaths(
cl::Positional, cl::desc("<source0> [... <sourceN>]"), OccurrencesFlag,
- cl::cat(Category), cl::sub(*cl::AllSubCommands));
+ cl::cat(Category), cl::sub(cl::SubCommand::getAll()));
static cl::list<std::string> ArgsAfter(
"extra-arg",
cl::desc("Additional argument to append to the compiler command line"),
- cl::cat(Category), cl::sub(*cl::AllSubCommands));
+ cl::cat(Category), cl::sub(cl::SubCommand::getAll()));
static cl::list<std::string> ArgsBefore(
"extra-arg-before",
cl::desc("Additional argument to prepend to the compiler command line"),
- cl::cat(Category), cl::sub(*cl::AllSubCommands));
+ cl::cat(Category), cl::sub(cl::SubCommand::getAll()));
cl::ResetAllOptionOccurrences();
@@ -170,7 +170,7 @@ CommonOptionsParser::CommonOptionsParser(
llvm::Error Err = init(argc, argv, Category, OccurrencesFlag, Overview);
if (Err) {
llvm::report_fatal_error(
- "CommonOptionsParser: failed to parse command-line arguments. " +
+ Twine("CommonOptionsParser: failed to parse command-line arguments. ") +
llvm::toString(std::move(Err)));
}
}
diff --git a/contrib/llvm-project/clang/lib/Tooling/CompilationDatabase.cpp b/contrib/llvm-project/clang/lib/Tooling/CompilationDatabase.cpp
index 1e19e68633d2..af18194ae0fe 100644
--- a/contrib/llvm-project/clang/lib/Tooling/CompilationDatabase.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/CompilationDatabase.cpp
@@ -37,11 +37,11 @@
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorOr.h"
-#include "llvm/Support/Host.h"
#include "llvm/Support/LineIterator.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/Host.h"
#include <algorithm>
#include <cassert>
#include <cstring>
@@ -156,6 +156,7 @@ private:
bool CollectChildren = Collect;
switch (A->getKind()) {
case driver::Action::CompileJobClass:
+ case driver::Action::PrecompileJobClass:
CollectChildren = true;
break;
@@ -204,7 +205,7 @@ public:
// which don't support these options.
struct FilterUnusedFlags {
bool operator() (StringRef S) {
- return (S == "-no-integrated-as") || S.startswith("-Wa,");
+ return (S == "-no-integrated-as") || S.starts_with("-Wa,");
}
};
@@ -215,7 +216,7 @@ std::string GetClangToolCommand() {
SmallString<128> ClangToolPath;
ClangToolPath = llvm::sys::path::parent_path(ClangExecutable);
llvm::sys::path::append(ClangToolPath, "clang-tool");
- return std::string(ClangToolPath.str());
+ return std::string(ClangToolPath);
}
} // namespace
@@ -293,7 +294,8 @@ static bool stripPositionalArgs(std::vector<const char *> Args,
// -flto* flags make the BackendJobClass, which still needs analyzer.
if (Cmd.getSource().getKind() == driver::Action::AssembleJobClass ||
Cmd.getSource().getKind() == driver::Action::BackendJobClass ||
- Cmd.getSource().getKind() == driver::Action::CompileJobClass) {
+ Cmd.getSource().getKind() == driver::Action::CompileJobClass ||
+ Cmd.getSource().getKind() == driver::Action::PrecompileJobClass) {
CompileAnalyzer.run(&Cmd.getSource());
}
}
diff --git a/contrib/llvm-project/clang/lib/Tooling/Core/Replacement.cpp b/contrib/llvm-project/clang/lib/Tooling/Core/Replacement.cpp
index 30e1923bf1cb..269f17a6db4c 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Core/Replacement.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Core/Replacement.cpp
@@ -67,7 +67,7 @@ bool Replacement::isApplicable() const {
bool Replacement::apply(Rewriter &Rewrite) const {
SourceManager &SM = Rewrite.getSourceMgr();
- auto Entry = SM.getFileManager().getFile(FilePath);
+ auto Entry = SM.getFileManager().getOptionalFileRef(FilePath);
if (!Entry)
return false;
@@ -122,7 +122,8 @@ void Replacement::setFromSourceLocation(const SourceManager &Sources,
StringRef ReplacementText) {
const std::pair<FileID, unsigned> DecomposedLocation =
Sources.getDecomposedLoc(Start);
- const FileEntry *Entry = Sources.getFileEntryForID(DecomposedLocation.first);
+ OptionalFileEntryRef Entry =
+ Sources.getFileEntryRefForID(DecomposedLocation.first);
this->FilePath = std::string(Entry ? Entry->getName() : InvalidLocation);
this->ReplacementRange = Range(DecomposedLocation.second, Length);
this->ReplacementText = std::string(ReplacementText);
@@ -179,9 +180,9 @@ static std::string getReplacementErrString(replacement_error Err) {
std::string ReplacementError::message() const {
std::string Message = getReplacementErrString(Err);
- if (NewReplacement.hasValue())
+ if (NewReplacement)
Message += "\nNew replacement: " + NewReplacement->toString();
- if (ExistingReplacement.hasValue())
+ if (ExistingReplacement)
Message += "\nExisting replacement: " + ExistingReplacement->toString();
return Message;
}
@@ -270,7 +271,7 @@ llvm::Error Replacements::add(const Replacement &R) {
assert(R.getLength() == 0);
// `I` is also an insertion, `R` and `I` conflict.
if (I->getLength() == 0) {
- // Check if two insertions are order-indepedent: if inserting them in
+ // Check if two insertions are order-independent: if inserting them in
// either order produces the same text, they are order-independent.
if ((R.getReplacementText() + I->getReplacementText()).str() !=
(I->getReplacementText() + R.getReplacementText()).str())
@@ -319,7 +320,7 @@ llvm::Error Replacements::add(const Replacement &R) {
Replaces.insert(R);
} else {
// `I` overlaps with `R`. We need to check `R` against all overlapping
- // replacements to see if they are order-indepedent. If they are, merge `R`
+ // replacements to see if they are order-independent. If they are, merge `R`
// with them and replace them with the merged replacements.
auto MergeBegin = I;
auto MergeEnd = std::next(I);
diff --git a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp
index 40e8bd2b8776..6f71650a3982 100644
--- a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp
@@ -7,99 +7,82 @@
//===----------------------------------------------------------------------===//
#include "clang/Tooling/DependencyScanning/DependencyScanningFilesystem.h"
-#include "clang/Lex/DependencyDirectivesSourceMinimizer.h"
#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/SmallVectorMemoryBuffer.h"
#include "llvm/Support/Threading.h"
+#include <optional>
using namespace clang;
using namespace tooling;
using namespace dependencies;
-CachedFileSystemEntry CachedFileSystemEntry::createFileEntry(
- StringRef Filename, llvm::vfs::FileSystem &FS, bool Minimize) {
+llvm::ErrorOr<DependencyScanningWorkerFilesystem::TentativeEntry>
+DependencyScanningWorkerFilesystem::readFile(StringRef Filename) {
// Load the file and its content from the file system.
- llvm::ErrorOr<std::unique_ptr<llvm::vfs::File>> MaybeFile =
- FS.openFileForRead(Filename);
+ auto MaybeFile = getUnderlyingFS().openFileForRead(Filename);
if (!MaybeFile)
return MaybeFile.getError();
- llvm::ErrorOr<llvm::vfs::Status> Stat = (*MaybeFile)->status();
- if (!Stat)
- return Stat.getError();
+ auto File = std::move(*MaybeFile);
- llvm::vfs::File &F = **MaybeFile;
- llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> MaybeBuffer =
- F.getBuffer(Stat->getName());
+ auto MaybeStat = File->status();
+ if (!MaybeStat)
+ return MaybeStat.getError();
+ auto Stat = std::move(*MaybeStat);
+
+ auto MaybeBuffer = File->getBuffer(Stat.getName());
if (!MaybeBuffer)
return MaybeBuffer.getError();
+ auto Buffer = std::move(*MaybeBuffer);
- llvm::SmallString<1024> MinimizedFileContents;
- // Minimize the file down to directives that might affect the dependencies.
- const auto &Buffer = *MaybeBuffer;
- SmallVector<minimize_source_to_dependency_directives::Token, 64> Tokens;
- if (!Minimize || minimizeSourceToDependencyDirectives(
- Buffer->getBuffer(), MinimizedFileContents, Tokens)) {
- // Use the original file unless requested otherwise, or
- // if the minimization failed.
- // FIXME: Propage the diagnostic if desired by the client.
- CachedFileSystemEntry Result;
- Result.MaybeStat = std::move(*Stat);
- Result.Contents.reserve(Buffer->getBufferSize() + 1);
- Result.Contents.append(Buffer->getBufferStart(), Buffer->getBufferEnd());
- // Implicitly null terminate the contents for Clang's lexer.
- Result.Contents.push_back('\0');
- Result.Contents.pop_back();
- return Result;
- }
-
- CachedFileSystemEntry Result;
- size_t Size = MinimizedFileContents.size();
- Result.MaybeStat = llvm::vfs::Status(Stat->getName(), Stat->getUniqueID(),
- Stat->getLastModificationTime(),
- Stat->getUser(), Stat->getGroup(), Size,
- Stat->getType(), Stat->getPermissions());
- // The contents produced by the minimizer must be null terminated.
- assert(MinimizedFileContents.data()[MinimizedFileContents.size()] == '\0' &&
- "not null terminated contents");
- // Even though there's an implicit null terminator in the minimized contents,
- // we want to temporarily make it explicit. This will ensure that the
- // std::move will preserve it even if it needs to do a copy if the
- // SmallString still has the small capacity.
- MinimizedFileContents.push_back('\0');
- Result.Contents = std::move(MinimizedFileContents);
- // Now make the null terminator implicit again, so that Clang's lexer can find
- // it right where the buffer ends.
- Result.Contents.pop_back();
-
- // Compute the skipped PP ranges that speedup skipping over inactive
- // preprocessor blocks.
- llvm::SmallVector<minimize_source_to_dependency_directives::SkippedRange, 32>
- SkippedRanges;
- minimize_source_to_dependency_directives::computeSkippedRanges(Tokens,
- SkippedRanges);
- PreprocessorSkippedRangeMapping Mapping;
- for (const auto &Range : SkippedRanges) {
- if (Range.Length < 16) {
- // Ignore small ranges as non-profitable.
- // FIXME: This is a heuristic, its worth investigating the tradeoffs
- // when it should be applied.
- continue;
- }
- Mapping[Range.Offset] = Range.Length;
- }
- Result.PPSkippedRangeMapping = std::move(Mapping);
+ // If the file size changed between read and stat, pretend it didn't.
+ if (Stat.getSize() != Buffer->getBufferSize())
+ Stat = llvm::vfs::Status::copyWithNewSize(Stat, Buffer->getBufferSize());
- return Result;
+ return TentativeEntry(Stat, std::move(Buffer));
}
-CachedFileSystemEntry
-CachedFileSystemEntry::createDirectoryEntry(llvm::vfs::Status &&Stat) {
- assert(Stat.isDirectory() && "not a directory!");
- auto Result = CachedFileSystemEntry();
- Result.MaybeStat = std::move(Stat);
- return Result;
+EntryRef DependencyScanningWorkerFilesystem::scanForDirectivesIfNecessary(
+ const CachedFileSystemEntry &Entry, StringRef Filename, bool Disable) {
+ if (Entry.isError() || Entry.isDirectory() || Disable ||
+ !shouldScanForDirectives(Filename))
+ return EntryRef(Filename, Entry);
+
+ CachedFileContents *Contents = Entry.getCachedContents();
+ assert(Contents && "contents not initialized");
+
+ // Double-checked locking.
+ if (Contents->DepDirectives.load())
+ return EntryRef(Filename, Entry);
+
+ std::lock_guard<std::mutex> GuardLock(Contents->ValueLock);
+
+ // Double-checked locking.
+ if (Contents->DepDirectives.load())
+ return EntryRef(Filename, Entry);
+
+ SmallVector<dependency_directives_scan::Directive, 64> Directives;
+ // Scan the file for preprocessor directives that might affect the
+ // dependencies.
+ if (scanSourceForDependencyDirectives(Contents->Original->getBuffer(),
+ Contents->DepDirectiveTokens,
+ Directives)) {
+ Contents->DepDirectiveTokens.clear();
+ // FIXME: Propagate the diagnostic if desired by the client.
+ Contents->DepDirectives.store(new std::optional<DependencyDirectivesTy>());
+ return EntryRef(Filename, Entry);
+ }
+
+ // This function performed double-checked locking using `DepDirectives`.
+ // Assigning it must be the last thing this function does, otherwise other
+ // threads may skip the
+ // critical section (`DepDirectives != nullptr`), leading to a data race.
+ Contents->DepDirectives.store(
+ new std::optional<DependencyDirectivesTy>(std::move(Directives)));
+ return EntryRef(Filename, Entry);
}
-DependencyScanningFilesystemSharedCache::SingleCache::SingleCache() {
+DependencyScanningFilesystemSharedCache::
+ DependencyScanningFilesystemSharedCache() {
// This heuristic was chosen using a empirical testing on a
// reasonably high core machine (iMacPro 18 cores / 36 threads). The cache
// sharding gives a performance edge by reducing the lock contention.
@@ -110,18 +93,72 @@ DependencyScanningFilesystemSharedCache::SingleCache::SingleCache() {
CacheShards = std::make_unique<CacheShard[]>(NumShards);
}
-DependencyScanningFilesystemSharedCache::SharedFileSystemEntry &
-DependencyScanningFilesystemSharedCache::SingleCache::get(StringRef Key) {
- CacheShard &Shard = CacheShards[llvm::hash_value(Key) % NumShards];
- std::unique_lock<std::mutex> LockGuard(Shard.CacheLock);
- auto It = Shard.Cache.try_emplace(Key);
- return It.first->getValue();
+DependencyScanningFilesystemSharedCache::CacheShard &
+DependencyScanningFilesystemSharedCache::getShardForFilename(
+ StringRef Filename) const {
+ assert(llvm::sys::path::is_absolute_gnu(Filename));
+ return CacheShards[llvm::hash_value(Filename) % NumShards];
+}
+
+DependencyScanningFilesystemSharedCache::CacheShard &
+DependencyScanningFilesystemSharedCache::getShardForUID(
+ llvm::sys::fs::UniqueID UID) const {
+ auto Hash = llvm::hash_combine(UID.getDevice(), UID.getFile());
+ return CacheShards[Hash % NumShards];
+}
+
+const CachedFileSystemEntry *
+DependencyScanningFilesystemSharedCache::CacheShard::findEntryByFilename(
+ StringRef Filename) const {
+ assert(llvm::sys::path::is_absolute_gnu(Filename));
+ std::lock_guard<std::mutex> LockGuard(CacheLock);
+ auto It = EntriesByFilename.find(Filename);
+ return It == EntriesByFilename.end() ? nullptr : It->getValue();
}
-DependencyScanningFilesystemSharedCache::SharedFileSystemEntry &
-DependencyScanningFilesystemSharedCache::get(StringRef Key, bool Minimized) {
- SingleCache &Cache = Minimized ? CacheMinimized : CacheOriginal;
- return Cache.get(Key);
+const CachedFileSystemEntry *
+DependencyScanningFilesystemSharedCache::CacheShard::findEntryByUID(
+ llvm::sys::fs::UniqueID UID) const {
+ std::lock_guard<std::mutex> LockGuard(CacheLock);
+ auto It = EntriesByUID.find(UID);
+ return It == EntriesByUID.end() ? nullptr : It->getSecond();
+}
+
+const CachedFileSystemEntry &
+DependencyScanningFilesystemSharedCache::CacheShard::
+ getOrEmplaceEntryForFilename(StringRef Filename,
+ llvm::ErrorOr<llvm::vfs::Status> Stat) {
+ std::lock_guard<std::mutex> LockGuard(CacheLock);
+ auto Insertion = EntriesByFilename.insert({Filename, nullptr});
+ if (Insertion.second)
+ Insertion.first->second =
+ new (EntryStorage.Allocate()) CachedFileSystemEntry(std::move(Stat));
+ return *Insertion.first->second;
+}
+
+const CachedFileSystemEntry &
+DependencyScanningFilesystemSharedCache::CacheShard::getOrEmplaceEntryForUID(
+ llvm::sys::fs::UniqueID UID, llvm::vfs::Status Stat,
+ std::unique_ptr<llvm::MemoryBuffer> Contents) {
+ std::lock_guard<std::mutex> LockGuard(CacheLock);
+ auto Insertion = EntriesByUID.insert({UID, nullptr});
+ if (Insertion.second) {
+ CachedFileContents *StoredContents = nullptr;
+ if (Contents)
+ StoredContents = new (ContentsStorage.Allocate())
+ CachedFileContents(std::move(Contents));
+ Insertion.first->second = new (EntryStorage.Allocate())
+ CachedFileSystemEntry(std::move(Stat), StoredContents);
+ }
+ return *Insertion.first->second;
+}
+
+const CachedFileSystemEntry &
+DependencyScanningFilesystemSharedCache::CacheShard::
+ getOrInsertEntryForFilename(StringRef Filename,
+ const CachedFileSystemEntry &Entry) {
+ std::lock_guard<std::mutex> LockGuard(CacheLock);
+ return *EntriesByFilename.insert({Filename, &Entry}).first->getValue();
}
/// Whitelist file extensions that should be minimized, treating no extension as
@@ -129,110 +166,150 @@ DependencyScanningFilesystemSharedCache::get(StringRef Key, bool Minimized) {
///
/// This is kinda hacky, it would be better if we knew what kind of file Clang
/// was expecting instead.
-static bool shouldMinimize(StringRef Filename) {
+static bool shouldScanForDirectivesBasedOnExtension(StringRef Filename) {
StringRef Ext = llvm::sys::path::extension(Filename);
if (Ext.empty())
return true; // C++ standard library
return llvm::StringSwitch<bool>(Ext)
- .CasesLower(".c", ".cc", ".cpp", ".c++", ".cxx", true)
- .CasesLower(".h", ".hh", ".hpp", ".h++", ".hxx", true)
- .CasesLower(".m", ".mm", true)
- .CasesLower(".i", ".ii", ".mi", ".mmi", true)
- .CasesLower(".def", ".inc", true)
- .Default(false);
+ .CasesLower(".c", ".cc", ".cpp", ".c++", ".cxx", true)
+ .CasesLower(".h", ".hh", ".hpp", ".h++", ".hxx", true)
+ .CasesLower(".m", ".mm", true)
+ .CasesLower(".i", ".ii", ".mi", ".mmi", true)
+ .CasesLower(".def", ".inc", true)
+ .Default(false);
}
-
static bool shouldCacheStatFailures(StringRef Filename) {
StringRef Ext = llvm::sys::path::extension(Filename);
if (Ext.empty())
return false; // This may be the module cache directory.
- return shouldMinimize(Filename); // Only cache stat failures on source files.
+ // Only cache stat failures on files that are not expected to change during
+ // the build.
+ StringRef FName = llvm::sys::path::filename(Filename);
+ if (FName == "module.modulemap" || FName == "module.map")
+ return true;
+ return shouldScanForDirectivesBasedOnExtension(Filename);
}
-void DependencyScanningWorkerFilesystem::ignoreFile(StringRef RawFilename) {
- llvm::SmallString<256> Filename;
- llvm::sys::path::native(RawFilename, Filename);
- IgnoredFiles.insert(Filename);
+DependencyScanningWorkerFilesystem::DependencyScanningWorkerFilesystem(
+ DependencyScanningFilesystemSharedCache &SharedCache,
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS)
+ : ProxyFileSystem(std::move(FS)), SharedCache(SharedCache),
+ WorkingDirForCacheLookup(llvm::errc::invalid_argument) {
+ updateWorkingDirForCacheLookup();
}
-bool DependencyScanningWorkerFilesystem::shouldIgnoreFile(
- StringRef RawFilename) {
- llvm::SmallString<256> Filename;
- llvm::sys::path::native(RawFilename, Filename);
- return IgnoredFiles.contains(Filename);
+bool DependencyScanningWorkerFilesystem::shouldScanForDirectives(
+ StringRef Filename) {
+ return shouldScanForDirectivesBasedOnExtension(Filename);
}
-llvm::ErrorOr<const CachedFileSystemEntry *>
-DependencyScanningWorkerFilesystem::getOrCreateFileSystemEntry(
- const StringRef Filename) {
- bool ShouldMinimize = !shouldIgnoreFile(Filename) && shouldMinimize(Filename);
+const CachedFileSystemEntry &
+DependencyScanningWorkerFilesystem::getOrEmplaceSharedEntryForUID(
+ TentativeEntry TEntry) {
+ auto &Shard = SharedCache.getShardForUID(TEntry.Status.getUniqueID());
+ return Shard.getOrEmplaceEntryForUID(TEntry.Status.getUniqueID(),
+ std::move(TEntry.Status),
+ std::move(TEntry.Contents));
+}
- if (const auto *Entry = Cache.getCachedEntry(Filename, ShouldMinimize))
+const CachedFileSystemEntry *
+DependencyScanningWorkerFilesystem::findEntryByFilenameWithWriteThrough(
+ StringRef Filename) {
+ if (const auto *Entry = LocalCache.findEntryByFilename(Filename))
return Entry;
+ auto &Shard = SharedCache.getShardForFilename(Filename);
+ if (const auto *Entry = Shard.findEntryByFilename(Filename))
+ return &LocalCache.insertEntryForFilename(Filename, *Entry);
+ return nullptr;
+}
+
+llvm::ErrorOr<const CachedFileSystemEntry &>
+DependencyScanningWorkerFilesystem::computeAndStoreResult(
+ StringRef OriginalFilename, StringRef FilenameForLookup) {
+ llvm::ErrorOr<llvm::vfs::Status> Stat =
+ getUnderlyingFS().status(OriginalFilename);
+ if (!Stat) {
+ if (!shouldCacheStatFailures(OriginalFilename))
+ return Stat.getError();
+ const auto &Entry =
+ getOrEmplaceSharedEntryForFilename(FilenameForLookup, Stat.getError());
+ return insertLocalEntryForFilename(FilenameForLookup, Entry);
+ }
- // FIXME: Handle PCM/PCH files.
- // FIXME: Handle module map files.
-
- DependencyScanningFilesystemSharedCache::SharedFileSystemEntry
- &SharedCacheEntry = SharedCache.get(Filename, ShouldMinimize);
- const CachedFileSystemEntry *Result;
- {
- std::unique_lock<std::mutex> LockGuard(SharedCacheEntry.ValueLock);
- CachedFileSystemEntry &CacheEntry = SharedCacheEntry.Value;
-
- if (!CacheEntry.isValid()) {
- llvm::vfs::FileSystem &FS = getUnderlyingFS();
- auto MaybeStatus = FS.status(Filename);
- if (!MaybeStatus) {
- if (!shouldCacheStatFailures(Filename))
- // HACK: We need to always restat non source files if the stat fails.
- // This is because Clang first looks up the module cache and module
- // files before building them, and then looks for them again. If we
- // cache the stat failure, it won't see them the second time.
- return MaybeStatus.getError();
- else
- CacheEntry = CachedFileSystemEntry(MaybeStatus.getError());
- } else if (MaybeStatus->isDirectory())
- CacheEntry = CachedFileSystemEntry::createDirectoryEntry(
- std::move(*MaybeStatus));
- else
- CacheEntry = CachedFileSystemEntry::createFileEntry(Filename, FS,
- ShouldMinimize);
+ if (const auto *Entry = findSharedEntryByUID(*Stat))
+ return insertLocalEntryForFilename(FilenameForLookup, *Entry);
+
+ auto TEntry =
+ Stat->isDirectory() ? TentativeEntry(*Stat) : readFile(OriginalFilename);
+
+ const CachedFileSystemEntry *SharedEntry = [&]() {
+ if (TEntry) {
+ const auto &UIDEntry = getOrEmplaceSharedEntryForUID(std::move(*TEntry));
+ return &getOrInsertSharedEntryForFilename(FilenameForLookup, UIDEntry);
}
+ return &getOrEmplaceSharedEntryForFilename(FilenameForLookup,
+ TEntry.getError());
+ }();
- Result = &CacheEntry;
- }
+ return insertLocalEntryForFilename(FilenameForLookup, *SharedEntry);
+}
- // Store the result in the local cache.
- Cache.setCachedEntry(Filename, ShouldMinimize, Result);
- return Result;
+llvm::ErrorOr<EntryRef>
+DependencyScanningWorkerFilesystem::getOrCreateFileSystemEntry(
+ StringRef OriginalFilename, bool DisableDirectivesScanning) {
+ StringRef FilenameForLookup;
+ SmallString<256> PathBuf;
+ if (llvm::sys::path::is_absolute_gnu(OriginalFilename)) {
+ FilenameForLookup = OriginalFilename;
+ } else if (!WorkingDirForCacheLookup) {
+ return WorkingDirForCacheLookup.getError();
+ } else {
+ StringRef RelFilename = OriginalFilename;
+ RelFilename.consume_front("./");
+ PathBuf = *WorkingDirForCacheLookup;
+ llvm::sys::path::append(PathBuf, RelFilename);
+ FilenameForLookup = PathBuf.str();
+ }
+ assert(llvm::sys::path::is_absolute_gnu(FilenameForLookup));
+ if (const auto *Entry =
+ findEntryByFilenameWithWriteThrough(FilenameForLookup))
+ return scanForDirectivesIfNecessary(*Entry, OriginalFilename,
+ DisableDirectivesScanning)
+ .unwrapError();
+ auto MaybeEntry = computeAndStoreResult(OriginalFilename, FilenameForLookup);
+ if (!MaybeEntry)
+ return MaybeEntry.getError();
+ return scanForDirectivesIfNecessary(*MaybeEntry, OriginalFilename,
+ DisableDirectivesScanning)
+ .unwrapError();
}
llvm::ErrorOr<llvm::vfs::Status>
DependencyScanningWorkerFilesystem::status(const Twine &Path) {
SmallString<256> OwnedFilename;
StringRef Filename = Path.toStringRef(OwnedFilename);
- const llvm::ErrorOr<const CachedFileSystemEntry *> Result =
- getOrCreateFileSystemEntry(Filename);
+
+ if (Filename.ends_with(".pcm"))
+ return getUnderlyingFS().status(Path);
+
+ llvm::ErrorOr<EntryRef> Result = getOrCreateFileSystemEntry(Filename);
if (!Result)
return Result.getError();
- return (*Result)->getStatus();
+ return Result->getStatus();
}
namespace {
/// The VFS that is used by clang consumes the \c CachedFileSystemEntry using
/// this subclass.
-class MinimizedVFSFile final : public llvm::vfs::File {
+class DepScanFile final : public llvm::vfs::File {
public:
- MinimizedVFSFile(std::unique_ptr<llvm::MemoryBuffer> Buffer,
- llvm::vfs::Status Stat)
+ DepScanFile(std::unique_ptr<llvm::MemoryBuffer> Buffer,
+ llvm::vfs::Status Stat)
: Buffer(std::move(Buffer)), Stat(std::move(Stat)) {}
- static llvm::ErrorOr<std::unique_ptr<llvm::vfs::File>>
- create(const CachedFileSystemEntry *Entry,
- ExcludedPreprocessorDirectiveSkipMapping *PPSkipMappings);
+ static llvm::ErrorOr<std::unique_ptr<llvm::vfs::File>> create(EntryRef Entry);
llvm::ErrorOr<llvm::vfs::Status> status() override { return Stat; }
@@ -251,22 +328,19 @@ private:
} // end anonymous namespace
-llvm::ErrorOr<std::unique_ptr<llvm::vfs::File>> MinimizedVFSFile::create(
- const CachedFileSystemEntry *Entry,
- ExcludedPreprocessorDirectiveSkipMapping *PPSkipMappings) {
- if (Entry->isDirectory())
- return llvm::ErrorOr<std::unique_ptr<llvm::vfs::File>>(
- std::make_error_code(std::errc::is_a_directory));
- llvm::ErrorOr<StringRef> Contents = Entry->getContents();
- if (!Contents)
- return Contents.getError();
- auto Result = std::make_unique<MinimizedVFSFile>(
- llvm::MemoryBuffer::getMemBuffer(*Contents, Entry->getName(),
+llvm::ErrorOr<std::unique_ptr<llvm::vfs::File>>
+DepScanFile::create(EntryRef Entry) {
+ assert(!Entry.isError() && "error");
+
+ if (Entry.isDirectory())
+ return std::make_error_code(std::errc::is_a_directory);
+
+ auto Result = std::make_unique<DepScanFile>(
+ llvm::MemoryBuffer::getMemBuffer(Entry.getContents(),
+ Entry.getStatus().getName(),
/*RequiresNullTerminator=*/false),
- *Entry->getStatus());
- if (!Entry->getPPSkippedRangeMapping().empty() && PPSkipMappings)
- (*PPSkipMappings)[Result->Buffer->getBufferStart()] =
- &Entry->getPPSkippedRangeMapping();
+ Entry.getStatus());
+
return llvm::ErrorOr<std::unique_ptr<llvm::vfs::File>>(
std::unique_ptr<llvm::vfs::File>(std::move(Result)));
}
@@ -276,9 +350,32 @@ DependencyScanningWorkerFilesystem::openFileForRead(const Twine &Path) {
SmallString<256> OwnedFilename;
StringRef Filename = Path.toStringRef(OwnedFilename);
- const llvm::ErrorOr<const CachedFileSystemEntry *> Result =
- getOrCreateFileSystemEntry(Filename);
+ if (Filename.ends_with(".pcm"))
+ return getUnderlyingFS().openFileForRead(Path);
+
+ llvm::ErrorOr<EntryRef> Result = getOrCreateFileSystemEntry(Filename);
if (!Result)
return Result.getError();
- return MinimizedVFSFile::create(Result.get(), PPSkipMappings);
+ return DepScanFile::create(Result.get());
+}
+
+std::error_code DependencyScanningWorkerFilesystem::setCurrentWorkingDirectory(
+ const Twine &Path) {
+ std::error_code EC = ProxyFileSystem::setCurrentWorkingDirectory(Path);
+ updateWorkingDirForCacheLookup();
+ return EC;
+}
+
+void DependencyScanningWorkerFilesystem::updateWorkingDirForCacheLookup() {
+ llvm::ErrorOr<std::string> CWD =
+ getUnderlyingFS().getCurrentWorkingDirectory();
+ if (!CWD) {
+ WorkingDirForCacheLookup = CWD.getError();
+ } else if (!llvm::sys::path::is_absolute_gnu(*CWD)) {
+ WorkingDirForCacheLookup = llvm::errc::invalid_argument;
+ } else {
+ WorkingDirForCacheLookup = *CWD;
+ }
+ assert(!WorkingDirForCacheLookup ||
+ llvm::sys::path::is_absolute_gnu(*WorkingDirForCacheLookup));
}
diff --git a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningService.cpp b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningService.cpp
index 4f3e574719d2..7458ef484b16 100644
--- a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningService.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningService.cpp
@@ -14,10 +14,10 @@ using namespace tooling;
using namespace dependencies;
DependencyScanningService::DependencyScanningService(
- ScanningMode Mode, ScanningOutputFormat Format, bool ReuseFileManager,
- bool SkipExcludedPPRanges)
- : Mode(Mode), Format(Format), ReuseFileManager(ReuseFileManager),
- SkipExcludedPPRanges(SkipExcludedPPRanges) {
+ ScanningMode Mode, ScanningOutputFormat Format,
+ ScanningOptimizations OptimizeArgs, bool EagerLoadModules)
+ : Mode(Mode), Format(Format), OptimizeArgs(OptimizeArgs),
+ EagerLoadModules(EagerLoadModules) {
// Initialize targets for object file support.
llvm::InitializeAllTargets();
llvm::InitializeAllTargetMCs();
diff --git a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningTool.cpp b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningTool.cpp
index 2fd12f7e12b1..4219f6716586 100644
--- a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningTool.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningTool.cpp
@@ -8,111 +8,74 @@
#include "clang/Tooling/DependencyScanning/DependencyScanningTool.h"
#include "clang/Frontend/Utils.h"
+#include <optional>
-namespace clang{
-namespace tooling{
-namespace dependencies{
-
-std::vector<std::string> FullDependencies::getAdditionalArgs(
- std::function<StringRef(ModuleID)> LookupPCMPath,
- std::function<const ModuleDeps &(ModuleID)> LookupModuleDeps) const {
- std::vector<std::string> Ret = getAdditionalArgsWithoutModulePaths();
-
- std::vector<std::string> PCMPaths;
- std::vector<std::string> ModMapPaths;
- dependencies::detail::collectPCMAndModuleMapPaths(
- ClangModuleDeps, LookupPCMPath, LookupModuleDeps, PCMPaths, ModMapPaths);
- for (const std::string &PCMPath : PCMPaths)
- Ret.push_back("-fmodule-file=" + PCMPath);
- for (const std::string &ModMapPath : ModMapPaths)
- Ret.push_back("-fmodule-map-file=" + ModMapPath);
-
- return Ret;
-}
-
-std::vector<std::string>
-FullDependencies::getAdditionalArgsWithoutModulePaths() const {
- std::vector<std::string> Args{
- "-fno-implicit-modules",
- "-fno-implicit-module-maps",
- };
-
- for (const PrebuiltModuleDep &PMD : PrebuiltModuleDeps) {
- Args.push_back("-fmodule-file=" + PMD.ModuleName + "=" + PMD.PCMFile);
- Args.push_back("-fmodule-map-file=" + PMD.ModuleMapFile);
- }
-
- return Args;
-}
+using namespace clang;
+using namespace tooling;
+using namespace dependencies;
DependencyScanningTool::DependencyScanningTool(
- DependencyScanningService &Service)
- : Worker(Service) {}
-
-llvm::Expected<std::string> DependencyScanningTool::getDependencyFile(
- const tooling::CompilationDatabase &Compilations, StringRef CWD) {
- /// Prints out all of the gathered dependencies into a string.
- class MakeDependencyPrinterConsumer : public DependencyConsumer {
- public:
- void
- handleDependencyOutputOpts(const DependencyOutputOptions &Opts) override {
- this->Opts = std::make_unique<DependencyOutputOptions>(Opts);
- }
-
- void handleFileDependency(StringRef File) override {
- Dependencies.push_back(std::string(File));
- }
-
- void handlePrebuiltModuleDependency(PrebuiltModuleDep PMD) override {
- // Same as `handleModuleDependency`.
- }
-
- void handleModuleDependency(ModuleDeps MD) override {
- // These are ignored for the make format as it can't support the full
- // set of deps, and handleFileDependency handles enough for implicitly
- // built modules to work.
- }
-
- void handleContextHash(std::string Hash) override {}
-
- void printDependencies(std::string &S) {
- assert(Opts && "Handled dependency output options.");
+ DependencyScanningService &Service,
+ llvm::IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS)
+ : Worker(Service, std::move(FS)) {}
+
+namespace {
+/// Prints out all of the gathered dependencies into a string.
+class MakeDependencyPrinterConsumer : public DependencyConsumer {
+public:
+ void handleBuildCommand(Command) override {}
+
+ void
+ handleDependencyOutputOpts(const DependencyOutputOptions &Opts) override {
+ this->Opts = std::make_unique<DependencyOutputOptions>(Opts);
+ }
- class DependencyPrinter : public DependencyFileGenerator {
- public:
- DependencyPrinter(DependencyOutputOptions &Opts,
- ArrayRef<std::string> Dependencies)
- : DependencyFileGenerator(Opts) {
- for (const auto &Dep : Dependencies)
- addDependency(Dep);
- }
+ void handleFileDependency(StringRef File) override {
+ Dependencies.push_back(std::string(File));
+ }
- void printDependencies(std::string &S) {
- llvm::raw_string_ostream OS(S);
- outputDependencyFile(OS);
- }
- };
+ // These are ignored for the make format as it can't support the full
+ // set of deps, and handleFileDependency handles enough for implicitly
+ // built modules to work.
+ void handlePrebuiltModuleDependency(PrebuiltModuleDep PMD) override {}
+ void handleModuleDependency(ModuleDeps MD) override {}
+ void handleDirectModuleDependency(ModuleID ID) override {}
+ void handleContextHash(std::string Hash) override {}
+
+ void printDependencies(std::string &S) {
+ assert(Opts && "Handled dependency output options.");
+
+ class DependencyPrinter : public DependencyFileGenerator {
+ public:
+ DependencyPrinter(DependencyOutputOptions &Opts,
+ ArrayRef<std::string> Dependencies)
+ : DependencyFileGenerator(Opts) {
+ for (const auto &Dep : Dependencies)
+ addDependency(Dep);
+ }
- DependencyPrinter Generator(*Opts, Dependencies);
- Generator.printDependencies(S);
- }
+ void printDependencies(std::string &S) {
+ llvm::raw_string_ostream OS(S);
+ outputDependencyFile(OS);
+ }
+ };
- private:
- std::unique_ptr<DependencyOutputOptions> Opts;
- std::vector<std::string> Dependencies;
- };
+ DependencyPrinter Generator(*Opts, Dependencies);
+ Generator.printDependencies(S);
+ }
- // We expect a single command here because if a source file occurs multiple
- // times in the original CDB, then `computeDependencies` would run the
- // `DependencyScanningAction` once for every time the input occured in the
- // CDB. Instead we split up the CDB into single command chunks to avoid this
- // behavior.
- assert(Compilations.getAllCompileCommands().size() == 1 &&
- "Expected a compilation database with a single command!");
- std::string Input = Compilations.getAllCompileCommands().front().Filename;
+protected:
+ std::unique_ptr<DependencyOutputOptions> Opts;
+ std::vector<std::string> Dependencies;
+};
+} // anonymous namespace
+llvm::Expected<std::string> DependencyScanningTool::getDependencyFile(
+ const std::vector<std::string> &CommandLine, StringRef CWD) {
MakeDependencyPrinterConsumer Consumer;
- auto Result = Worker.computeDependencies(Input, CWD, Compilations, Consumer);
+ CallbackActionController Controller(nullptr);
+ auto Result =
+ Worker.computeDependencies(CWD, CommandLine, Consumer, Controller);
if (Result)
return std::move(Result);
std::string Output;
@@ -120,89 +83,122 @@ llvm::Expected<std::string> DependencyScanningTool::getDependencyFile(
return Output;
}
-llvm::Expected<FullDependenciesResult>
-DependencyScanningTool::getFullDependencies(
- const tooling::CompilationDatabase &Compilations, StringRef CWD,
- const llvm::StringSet<> &AlreadySeen) {
- class FullDependencyPrinterConsumer : public DependencyConsumer {
+llvm::Expected<P1689Rule> DependencyScanningTool::getP1689ModuleDependencyFile(
+ const CompileCommand &Command, StringRef CWD, std::string &MakeformatOutput,
+ std::string &MakeformatOutputPath) {
+ class P1689ModuleDependencyPrinterConsumer
+ : public MakeDependencyPrinterConsumer {
public:
- FullDependencyPrinterConsumer(const llvm::StringSet<> &AlreadySeen)
- : AlreadySeen(AlreadySeen) {}
-
- void
- handleDependencyOutputOpts(const DependencyOutputOptions &Opts) override {}
-
- void handleFileDependency(StringRef File) override {
- Dependencies.push_back(std::string(File));
- }
-
- void handlePrebuiltModuleDependency(PrebuiltModuleDep PMD) override {
- PrebuiltModuleDeps.emplace_back(std::move(PMD));
+ P1689ModuleDependencyPrinterConsumer(P1689Rule &Rule,
+ const CompileCommand &Command)
+ : Filename(Command.Filename), Rule(Rule) {
+ Rule.PrimaryOutput = Command.Output;
}
- void handleModuleDependency(ModuleDeps MD) override {
- ClangModuleDeps[MD.ID.ContextHash + MD.ID.ModuleName] = std::move(MD);
+ void handleProvidedAndRequiredStdCXXModules(
+ std::optional<P1689ModuleInfo> Provided,
+ std::vector<P1689ModuleInfo> Requires) override {
+ Rule.Provides = Provided;
+ if (Rule.Provides)
+ Rule.Provides->SourcePath = Filename.str();
+ Rule.Requires = Requires;
}
- void handleContextHash(std::string Hash) override {
- ContextHash = std::move(Hash);
+ StringRef getMakeFormatDependencyOutputPath() {
+ if (Opts->OutputFormat != DependencyOutputFormat::Make)
+ return {};
+ return Opts->OutputFile;
}
- FullDependenciesResult getFullDependencies() const {
- FullDependencies FD;
+ private:
+ StringRef Filename;
+ P1689Rule &Rule;
+ };
- FD.ID.ContextHash = std::move(ContextHash);
+ class P1689ActionController : public DependencyActionController {
+ public:
+ // The lookupModuleOutput is for clang modules. P1689 format don't need it.
+ std::string lookupModuleOutput(const ModuleID &,
+ ModuleOutputKind Kind) override {
+ return "";
+ }
+ };
- FD.FileDeps.assign(Dependencies.begin(), Dependencies.end());
+ P1689Rule Rule;
+ P1689ModuleDependencyPrinterConsumer Consumer(Rule, Command);
+ P1689ActionController Controller;
+ auto Result = Worker.computeDependencies(CWD, Command.CommandLine, Consumer,
+ Controller);
+ if (Result)
+ return std::move(Result);
- for (auto &&M : ClangModuleDeps) {
- auto &MD = M.second;
- if (MD.ImportedByMainFile)
- FD.ClangModuleDeps.push_back(MD.ID);
- }
+ MakeformatOutputPath = Consumer.getMakeFormatDependencyOutputPath();
+ if (!MakeformatOutputPath.empty())
+ Consumer.printDependencies(MakeformatOutput);
+ return Rule;
+}
- FD.PrebuiltModuleDeps = std::move(PrebuiltModuleDeps);
+llvm::Expected<TranslationUnitDeps>
+DependencyScanningTool::getTranslationUnitDependencies(
+ const std::vector<std::string> &CommandLine, StringRef CWD,
+ const llvm::DenseSet<ModuleID> &AlreadySeen,
+ LookupModuleOutputCallback LookupModuleOutput) {
+ FullDependencyConsumer Consumer(AlreadySeen);
+ CallbackActionController Controller(LookupModuleOutput);
+ llvm::Error Result =
+ Worker.computeDependencies(CWD, CommandLine, Consumer, Controller);
+ if (Result)
+ return std::move(Result);
+ return Consumer.takeTranslationUnitDeps();
+}
- FullDependenciesResult FDR;
+llvm::Expected<ModuleDepsGraph> DependencyScanningTool::getModuleDependencies(
+ StringRef ModuleName, const std::vector<std::string> &CommandLine,
+ StringRef CWD, const llvm::DenseSet<ModuleID> &AlreadySeen,
+ LookupModuleOutputCallback LookupModuleOutput) {
+ FullDependencyConsumer Consumer(AlreadySeen);
+ CallbackActionController Controller(LookupModuleOutput);
+ llvm::Error Result = Worker.computeDependencies(CWD, CommandLine, Consumer,
+ Controller, ModuleName);
+ if (Result)
+ return std::move(Result);
+ return Consumer.takeModuleGraphDeps();
+}
- for (auto &&M : ClangModuleDeps) {
- // TODO: Avoid handleModuleDependency even being called for modules
- // we've already seen.
- if (AlreadySeen.count(M.first))
- continue;
- FDR.DiscoveredModules.push_back(std::move(M.second));
- }
+TranslationUnitDeps FullDependencyConsumer::takeTranslationUnitDeps() {
+ TranslationUnitDeps TU;
+
+ TU.ID.ContextHash = std::move(ContextHash);
+ TU.FileDeps = std::move(Dependencies);
+ TU.PrebuiltModuleDeps = std::move(PrebuiltModuleDeps);
+ TU.Commands = std::move(Commands);
+
+ for (auto &&M : ClangModuleDeps) {
+ auto &MD = M.second;
+ // TODO: Avoid handleModuleDependency even being called for modules
+ // we've already seen.
+ if (AlreadySeen.count(M.first))
+ continue;
+ TU.ModuleGraph.push_back(std::move(MD));
+ }
+ TU.ClangModuleDeps = std::move(DirectModuleDeps);
- FDR.FullDeps = std::move(FD);
- return FDR;
- }
+ return TU;
+}
- private:
- std::vector<std::string> Dependencies;
- std::vector<PrebuiltModuleDep> PrebuiltModuleDeps;
- std::unordered_map<std::string, ModuleDeps> ClangModuleDeps;
- std::string ContextHash;
- std::vector<std::string> OutputPaths;
- const llvm::StringSet<> &AlreadySeen;
- };
+ModuleDepsGraph FullDependencyConsumer::takeModuleGraphDeps() {
+ ModuleDepsGraph ModuleGraph;
- // We expect a single command here because if a source file occurs multiple
- // times in the original CDB, then `computeDependencies` would run the
- // `DependencyScanningAction` once for every time the input occured in the
- // CDB. Instead we split up the CDB into single command chunks to avoid this
- // behavior.
- assert(Compilations.getAllCompileCommands().size() == 1 &&
- "Expected a compilation database with a single command!");
- std::string Input = Compilations.getAllCompileCommands().front().Filename;
+ for (auto &&M : ClangModuleDeps) {
+ auto &MD = M.second;
+ // TODO: Avoid handleModuleDependency even being called for modules
+ // we've already seen.
+ if (AlreadySeen.count(M.first))
+ continue;
+ ModuleGraph.push_back(std::move(MD));
+ }
- FullDependencyPrinterConsumer Consumer(AlreadySeen);
- llvm::Error Result =
- Worker.computeDependencies(Input, CWD, Compilations, Consumer);
- if (Result)
- return std::move(Result);
- return Consumer.getFullDependencies();
+ return ModuleGraph;
}
-} // end namespace dependencies
-} // end namespace tooling
-} // end namespace clang
+CallbackActionController::~CallbackActionController() {}
diff --git a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp
index d651ff23b387..7ab4a699af6d 100644
--- a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp
@@ -7,7 +7,13 @@
//===----------------------------------------------------------------------===//
#include "clang/Tooling/DependencyScanning/DependencyScanningWorker.h"
+#include "clang/Basic/DiagnosticDriver.h"
+#include "clang/Basic/DiagnosticFrontend.h"
#include "clang/CodeGen/ObjectFilePCHContainerOperations.h"
+#include "clang/Driver/Compilation.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/Job.h"
+#include "clang/Driver/Tool.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/CompilerInvocation.h"
#include "clang/Frontend/FrontendActions.h"
@@ -17,6 +23,10 @@
#include "clang/Tooling/DependencyScanning/DependencyScanningService.h"
#include "clang/Tooling/DependencyScanning/ModuleDepCollector.h"
#include "clang/Tooling/Tooling.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Error.h"
+#include "llvm/TargetParser/Host.h"
+#include <optional>
using namespace clang;
using namespace tooling;
@@ -28,8 +38,9 @@ namespace {
class DependencyConsumerForwarder : public DependencyFileGenerator {
public:
DependencyConsumerForwarder(std::unique_ptr<DependencyOutputOptions> Opts,
- DependencyConsumer &C)
- : DependencyFileGenerator(*Opts), Opts(std::move(Opts)), C(C) {}
+ StringRef WorkingDirectory, DependencyConsumer &C)
+ : DependencyFileGenerator(*Opts), WorkingDirectory(WorkingDirectory),
+ Opts(std::move(Opts)), C(C) {}
void finishedMainFile(DiagnosticsEngine &Diags) override {
C.handleDependencyOutputOpts(*Opts);
@@ -37,87 +48,62 @@ public:
for (const auto &File : getDependencies()) {
CanonPath = File;
llvm::sys::path::remove_dots(CanonPath, /*remove_dot_dot=*/true);
+ llvm::sys::fs::make_absolute(WorkingDirectory, CanonPath);
C.handleFileDependency(CanonPath);
}
}
private:
+ StringRef WorkingDirectory;
std::unique_ptr<DependencyOutputOptions> Opts;
DependencyConsumer &C;
};
+using PrebuiltModuleFilesT = decltype(HeaderSearchOptions::PrebuiltModuleFiles);
+
/// A listener that collects the imported modules and optionally the input
/// files.
class PrebuiltModuleListener : public ASTReaderListener {
public:
- PrebuiltModuleListener(llvm::StringMap<std::string> &PrebuiltModuleFiles,
- llvm::StringSet<> &InputFiles, bool VisitInputFiles)
- : PrebuiltModuleFiles(PrebuiltModuleFiles), InputFiles(InputFiles),
- VisitInputFiles(VisitInputFiles) {}
+ PrebuiltModuleListener(PrebuiltModuleFilesT &PrebuiltModuleFiles,
+ llvm::SmallVector<std::string> &NewModuleFiles)
+ : PrebuiltModuleFiles(PrebuiltModuleFiles),
+ NewModuleFiles(NewModuleFiles) {}
bool needsImportVisitation() const override { return true; }
- bool needsInputFileVisitation() override { return VisitInputFiles; }
- bool needsSystemInputFileVisitation() override { return VisitInputFiles; }
void visitImport(StringRef ModuleName, StringRef Filename) override {
- PrebuiltModuleFiles.insert({ModuleName, Filename.str()});
- }
-
- bool visitInputFile(StringRef Filename, bool isSystem, bool isOverridden,
- bool isExplicitModule) override {
- InputFiles.insert(Filename);
- return true;
+ if (PrebuiltModuleFiles.insert({ModuleName.str(), Filename.str()}).second)
+ NewModuleFiles.push_back(Filename.str());
}
private:
- llvm::StringMap<std::string> &PrebuiltModuleFiles;
- llvm::StringSet<> &InputFiles;
- bool VisitInputFiles;
+ PrebuiltModuleFilesT &PrebuiltModuleFiles;
+ llvm::SmallVector<std::string> &NewModuleFiles;
};
-using PrebuiltModuleFilesT = decltype(HeaderSearchOptions::PrebuiltModuleFiles);
-
/// Visit the given prebuilt module and collect all of the modules it
/// transitively imports and contributing input files.
static void visitPrebuiltModule(StringRef PrebuiltModuleFilename,
CompilerInstance &CI,
- PrebuiltModuleFilesT &ModuleFiles,
- llvm::StringSet<> &InputFiles,
- bool VisitInputFiles) {
- // Maps the names of modules that weren't yet visited to their PCM path.
- llvm::StringMap<std::string> ModuleFilesWorklist;
- // Contains PCM paths of all visited modules.
- llvm::StringSet<> VisitedModuleFiles;
-
- PrebuiltModuleListener Listener(ModuleFilesWorklist, InputFiles,
- VisitInputFiles);
-
- auto GatherModuleFileInfo = [&](StringRef ASTFile) {
+ PrebuiltModuleFilesT &ModuleFiles) {
+ // List of module files to be processed.
+ llvm::SmallVector<std::string> Worklist{PrebuiltModuleFilename.str()};
+ PrebuiltModuleListener Listener(ModuleFiles, Worklist);
+
+ while (!Worklist.empty())
ASTReader::readASTFileControlBlock(
- ASTFile, CI.getFileManager(), CI.getPCHContainerReader(),
+ Worklist.pop_back_val(), CI.getFileManager(), CI.getModuleCache(),
+ CI.getPCHContainerReader(),
/*FindModuleFileExtensions=*/false, Listener,
/*ValidateDiagnosticOptions=*/false);
- };
-
- GatherModuleFileInfo(PrebuiltModuleFilename);
- while (!ModuleFilesWorklist.empty()) {
- auto WorklistItemIt = ModuleFilesWorklist.begin();
-
- if (!VisitedModuleFiles.contains(WorklistItemIt->getValue())) {
- VisitedModuleFiles.insert(WorklistItemIt->getValue());
- GatherModuleFileInfo(WorklistItemIt->getValue());
- ModuleFiles[WorklistItemIt->getKey().str()] = WorklistItemIt->getValue();
- }
-
- ModuleFilesWorklist.erase(WorklistItemIt);
- }
}
/// Transform arbitrary file name into an object-like file name.
static std::string makeObjFileName(StringRef FileName) {
SmallString<128> ObjFileName(FileName);
llvm::sys::path::replace_extension(ObjFileName, "o");
- return std::string(ObjFileName.str());
+ return std::string(ObjFileName);
}
/// Deduce the dependency target based on the output file and input files.
@@ -133,18 +119,31 @@ deduceDepTarget(const std::string &OutputFile,
return makeObjFileName(InputFiles.front().getFile());
}
+/// Sanitize diagnostic options for dependency scan.
+static void sanitizeDiagOpts(DiagnosticOptions &DiagOpts) {
+ // Don't print 'X warnings and Y errors generated'.
+ DiagOpts.ShowCarets = false;
+ // Don't write out diagnostic file.
+ DiagOpts.DiagnosticSerializationFile.clear();
+ // Don't emit warnings as errors (and all other warnings too).
+ DiagOpts.IgnoreWarnings = true;
+}
+
/// A clang tool that runs the preprocessor in a mode that's optimized for
/// dependency scanning for the given compiler invocation.
class DependencyScanningAction : public tooling::ToolAction {
public:
DependencyScanningAction(
StringRef WorkingDirectory, DependencyConsumer &Consumer,
+ DependencyActionController &Controller,
llvm::IntrusiveRefCntPtr<DependencyScanningWorkerFilesystem> DepFS,
- ExcludedPreprocessorDirectiveSkipMapping *PPSkipMappings,
- ScanningOutputFormat Format)
+ ScanningOutputFormat Format, ScanningOptimizations OptimizeArgs,
+ bool EagerLoadModules, bool DisableFree,
+ std::optional<StringRef> ModuleName = std::nullopt)
: WorkingDirectory(WorkingDirectory), Consumer(Consumer),
- DepFS(std::move(DepFS)), PPSkipMappings(PPSkipMappings),
- Format(Format) {}
+ Controller(Controller), DepFS(std::move(DepFS)), Format(Format),
+ OptimizeArgs(OptimizeArgs), EagerLoadModules(EagerLoadModules),
+ DisableFree(DisableFree), ModuleName(ModuleName) {}
bool runInvocation(std::shared_ptr<CompilerInvocation> Invocation,
FileManager *FileMgr,
@@ -152,66 +151,67 @@ public:
DiagnosticConsumer *DiagConsumer) override {
// Make a deep copy of the original Clang invocation.
CompilerInvocation OriginalInvocation(*Invocation);
+ // Restore the value of DisableFree, which may be modified by Tooling.
+ OriginalInvocation.getFrontendOpts().DisableFree = DisableFree;
+
+ if (Scanned) {
+ // Scanning runs once for the first -cc1 invocation in a chain of driver
+ // jobs. For any dependent jobs, reuse the scanning result and just
+ // update the LastCC1Arguments to correspond to the new invocation.
+ // FIXME: to support multi-arch builds, each arch requires a separate scan
+ setLastCC1Arguments(std::move(OriginalInvocation));
+ return true;
+ }
+
+ Scanned = true;
// Create a compiler instance to handle the actual work.
- CompilerInstance Compiler(std::move(PCHContainerOps));
- Compiler.setInvocation(std::move(Invocation));
-
- // Don't print 'X warnings and Y errors generated'.
- Compiler.getDiagnosticOpts().ShowCarets = false;
- // Don't write out diagnostic file.
- Compiler.getDiagnosticOpts().DiagnosticSerializationFile.clear();
- // Don't treat warnings as errors.
- Compiler.getDiagnosticOpts().Warnings.push_back("no-error");
+ ScanInstanceStorage.emplace(std::move(PCHContainerOps));
+ CompilerInstance &ScanInstance = *ScanInstanceStorage;
+ ScanInstance.setInvocation(std::move(Invocation));
+
// Create the compiler's actual diagnostics engine.
- Compiler.createDiagnostics(DiagConsumer, /*ShouldOwnClient=*/false);
- if (!Compiler.hasDiagnostics())
+ sanitizeDiagOpts(ScanInstance.getDiagnosticOpts());
+ ScanInstance.createDiagnostics(DiagConsumer, /*ShouldOwnClient=*/false);
+ if (!ScanInstance.hasDiagnostics())
return false;
- Compiler.getPreprocessorOpts().AllowPCHWithDifferentModulesCachePath = true;
+ ScanInstance.getPreprocessorOpts().AllowPCHWithDifferentModulesCachePath =
+ true;
- FileMgr->getFileSystemOpts().WorkingDir = std::string(WorkingDirectory);
- Compiler.setFileManager(FileMgr);
- Compiler.createSourceManager(*FileMgr);
+ ScanInstance.getFrontendOpts().GenerateGlobalModuleIndex = false;
+ ScanInstance.getFrontendOpts().UseGlobalModuleIndex = false;
+ ScanInstance.getFrontendOpts().ModulesShareFileManager = false;
+ ScanInstance.getHeaderSearchOpts().ModuleFormat = "raw";
+
+ ScanInstance.setFileManager(FileMgr);
+ // Support for virtual file system overlays.
+ FileMgr->setVirtualFileSystem(createVFSFromCompilerInvocation(
+ ScanInstance.getInvocation(), ScanInstance.getDiagnostics(),
+ FileMgr->getVirtualFileSystemPtr()));
+
+ ScanInstance.createSourceManager(*FileMgr);
- llvm::StringSet<> PrebuiltModulesInputFiles;
// Store the list of prebuilt module files into header search options. This
// will prevent the implicit build to create duplicate modules and will
// force reuse of the existing prebuilt module files instead.
- if (!Compiler.getPreprocessorOpts().ImplicitPCHInclude.empty())
+ if (!ScanInstance.getPreprocessorOpts().ImplicitPCHInclude.empty())
visitPrebuiltModule(
- Compiler.getPreprocessorOpts().ImplicitPCHInclude, Compiler,
- Compiler.getHeaderSearchOpts().PrebuiltModuleFiles,
- PrebuiltModulesInputFiles, /*VisitInputFiles=*/DepFS != nullptr);
+ ScanInstance.getPreprocessorOpts().ImplicitPCHInclude, ScanInstance,
+ ScanInstance.getHeaderSearchOpts().PrebuiltModuleFiles);
// Use the dependency scanning optimized file system if requested to do so.
if (DepFS) {
- const CompilerInvocation &CI = Compiler.getInvocation();
- DepFS->clearIgnoredFiles();
- // Ignore any files that contributed to prebuilt modules. The implicit
- // build validates the modules by comparing the reported sizes of their
- // inputs to the current state of the filesystem. Minimization would throw
- // this mechanism off.
- for (const auto &File : PrebuiltModulesInputFiles)
- DepFS->ignoreFile(File.getKey());
- // Add any filenames that were explicity passed in the build settings and
- // that might be opened, as we want to ensure we don't run source
- // minimization on them.
- for (const auto &Entry : CI.getHeaderSearchOpts().UserEntries)
- DepFS->ignoreFile(Entry.Path);
- for (const auto &Entry : CI.getHeaderSearchOpts().VFSOverlayFiles)
- DepFS->ignoreFile(Entry);
-
- // Support for virtual file system overlays on top of the caching
- // filesystem.
- FileMgr->setVirtualFileSystem(createVFSFromCompilerInvocation(
- CI, Compiler.getDiagnostics(), DepFS));
-
- // Pass the skip mappings which should speed up excluded conditional block
- // skipping in the preprocessor.
- if (PPSkipMappings)
- Compiler.getPreprocessorOpts()
- .ExcludedConditionalDirectiveSkipMappings = PPSkipMappings;
+ llvm::IntrusiveRefCntPtr<DependencyScanningWorkerFilesystem> LocalDepFS =
+ DepFS;
+ ScanInstance.getPreprocessorOpts().DependencyDirectivesForFile =
+ [LocalDepFS = std::move(LocalDepFS)](FileEntryRef File)
+ -> std::optional<ArrayRef<dependency_directives_scan::Directive>> {
+ if (llvm::ErrorOr<EntryRef> Entry =
+ LocalDepFS->getOrCreateFileSystemEntry(File.getName()))
+ return Entry->getDirectiveTokens();
+ return std::nullopt;
+ };
}
// Create the dependency collector that will collect the produced
@@ -222,104 +222,302 @@ public:
// which ensures that the compiler won't create new dependency collectors,
// and thus won't write out the extra '.d' files to disk.
auto Opts = std::make_unique<DependencyOutputOptions>();
- std::swap(*Opts, Compiler.getInvocation().getDependencyOutputOpts());
+ std::swap(*Opts, ScanInstance.getInvocation().getDependencyOutputOpts());
// We need at least one -MT equivalent for the generator of make dependency
// files to work.
if (Opts->Targets.empty())
- Opts->Targets = {deduceDepTarget(Compiler.getFrontendOpts().OutputFile,
- Compiler.getFrontendOpts().Inputs)};
+ Opts->Targets = {
+ deduceDepTarget(ScanInstance.getFrontendOpts().OutputFile,
+ ScanInstance.getFrontendOpts().Inputs)};
Opts->IncludeSystemHeaders = true;
switch (Format) {
case ScanningOutputFormat::Make:
- Compiler.addDependencyCollector(
- std::make_shared<DependencyConsumerForwarder>(std::move(Opts),
- Consumer));
+ ScanInstance.addDependencyCollector(
+ std::make_shared<DependencyConsumerForwarder>(
+ std::move(Opts), WorkingDirectory, Consumer));
break;
+ case ScanningOutputFormat::P1689:
case ScanningOutputFormat::Full:
- Compiler.addDependencyCollector(std::make_shared<ModuleDepCollector>(
- std::move(Opts), Compiler, Consumer, std::move(OriginalInvocation)));
+ MDC = std::make_shared<ModuleDepCollector>(
+ std::move(Opts), ScanInstance, Consumer, Controller,
+ OriginalInvocation, OptimizeArgs, EagerLoadModules,
+ Format == ScanningOutputFormat::P1689);
+ ScanInstance.addDependencyCollector(MDC);
break;
}
// Consider different header search and diagnostic options to create
// different modules. This avoids the unsound aliasing of module PCMs.
//
- // TODO: Implement diagnostic bucketing and header search pruning to reduce
- // the impact of strict context hashing.
- Compiler.getHeaderSearchOpts().ModulesStrictContextHash = true;
-
- auto Action = std::make_unique<ReadPCHAndPreprocessAction>();
- const bool Result = Compiler.ExecuteAction(*Action);
- if (!DepFS)
- FileMgr->clearStatCache();
+ // TODO: Implement diagnostic bucketing to reduce the impact of strict
+ // context hashing.
+ ScanInstance.getHeaderSearchOpts().ModulesStrictContextHash = true;
+ ScanInstance.getHeaderSearchOpts().ModulesSkipDiagnosticOptions = true;
+ ScanInstance.getHeaderSearchOpts().ModulesSkipHeaderSearchPaths = true;
+ ScanInstance.getHeaderSearchOpts().ModulesSkipPragmaDiagnosticMappings =
+ true;
+
+ // Avoid some checks and module map parsing when loading PCM files.
+ ScanInstance.getPreprocessorOpts().ModulesCheckRelocated = false;
+
+ std::unique_ptr<FrontendAction> Action;
+
+ if (ModuleName)
+ Action = std::make_unique<GetDependenciesByModuleNameAction>(*ModuleName);
+ else
+ Action = std::make_unique<ReadPCHAndPreprocessAction>();
+
+ const bool Result = ScanInstance.ExecuteAction(*Action);
+
+ if (Result)
+ setLastCC1Arguments(std::move(OriginalInvocation));
+
+ return Result;
+ }
+
+ bool hasScanned() const { return Scanned; }
+
+ /// Take the cc1 arguments corresponding to the most recent invocation used
+ /// with this action. Any modifications implied by the discovered dependencies
+ /// will have already been applied.
+ std::vector<std::string> takeLastCC1Arguments() {
+ std::vector<std::string> Result;
+ std::swap(Result, LastCC1Arguments); // Reset LastCC1Arguments to empty.
return Result;
}
private:
+ void setLastCC1Arguments(CompilerInvocation &&CI) {
+ if (MDC)
+ MDC->applyDiscoveredDependencies(CI);
+ LastCC1Arguments = CI.getCC1CommandLine();
+ }
+
+private:
StringRef WorkingDirectory;
DependencyConsumer &Consumer;
+ DependencyActionController &Controller;
llvm::IntrusiveRefCntPtr<DependencyScanningWorkerFilesystem> DepFS;
- ExcludedPreprocessorDirectiveSkipMapping *PPSkipMappings;
ScanningOutputFormat Format;
+ ScanningOptimizations OptimizeArgs;
+ bool EagerLoadModules;
+ bool DisableFree;
+ std::optional<StringRef> ModuleName;
+ std::optional<CompilerInstance> ScanInstanceStorage;
+ std::shared_ptr<ModuleDepCollector> MDC;
+ std::vector<std::string> LastCC1Arguments;
+ bool Scanned = false;
};
} // end anonymous namespace
DependencyScanningWorker::DependencyScanningWorker(
- DependencyScanningService &Service)
- : Format(Service.getFormat()) {
- DiagOpts = new DiagnosticOptions();
-
+ DependencyScanningService &Service,
+ llvm::IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS)
+ : Format(Service.getFormat()), OptimizeArgs(Service.getOptimizeArgs()),
+ EagerLoadModules(Service.shouldEagerLoadModules()) {
PCHContainerOps = std::make_shared<PCHContainerOperations>();
+ // We need to read object files from PCH built outside the scanner.
PCHContainerOps->registerReader(
std::make_unique<ObjectFilePCHContainerReader>());
- // We don't need to write object files, but the current PCH implementation
- // requires the writer to be registered as well.
- PCHContainerOps->registerWriter(
- std::make_unique<ObjectFilePCHContainerWriter>());
-
- RealFS = llvm::vfs::createPhysicalFileSystem();
- if (Service.canSkipExcludedPPRanges())
- PPSkipMappings =
- std::make_unique<ExcludedPreprocessorDirectiveSkipMapping>();
- if (Service.getMode() == ScanningMode::MinimizedSourcePreprocessing)
- DepFS = new DependencyScanningWorkerFilesystem(
- Service.getSharedCache(), RealFS, PPSkipMappings.get());
- if (Service.canReuseFileManager())
- Files = new FileManager(FileSystemOptions(), RealFS);
+ // The scanner itself writes only raw ast files.
+ PCHContainerOps->registerWriter(std::make_unique<RawPCHContainerWriter>());
+
+ switch (Service.getMode()) {
+ case ScanningMode::DependencyDirectivesScan:
+ DepFS =
+ new DependencyScanningWorkerFilesystem(Service.getSharedCache(), FS);
+ BaseFS = DepFS;
+ break;
+ case ScanningMode::CanonicalPreprocessing:
+ DepFS = nullptr;
+ BaseFS = FS;
+ break;
+ }
}
-static llvm::Error runWithDiags(
- DiagnosticOptions *DiagOpts,
- llvm::function_ref<bool(DiagnosticConsumer &DC)> BodyShouldSucceed) {
+llvm::Error DependencyScanningWorker::computeDependencies(
+ StringRef WorkingDirectory, const std::vector<std::string> &CommandLine,
+ DependencyConsumer &Consumer, DependencyActionController &Controller,
+ std::optional<StringRef> ModuleName) {
+ std::vector<const char *> CLI;
+ for (const std::string &Arg : CommandLine)
+ CLI.push_back(Arg.c_str());
+ auto DiagOpts = CreateAndPopulateDiagOpts(CLI);
+ sanitizeDiagOpts(*DiagOpts);
+
// Capture the emitted diagnostics and report them to the client
// in the case of a failure.
std::string DiagnosticOutput;
llvm::raw_string_ostream DiagnosticsOS(DiagnosticOutput);
- TextDiagnosticPrinter DiagPrinter(DiagnosticsOS, DiagOpts);
+ TextDiagnosticPrinter DiagPrinter(DiagnosticsOS, DiagOpts.release());
- if (BodyShouldSucceed(DiagPrinter))
+ if (computeDependencies(WorkingDirectory, CommandLine, Consumer, Controller,
+ DiagPrinter, ModuleName))
return llvm::Error::success();
return llvm::make_error<llvm::StringError>(DiagnosticsOS.str(),
llvm::inconvertibleErrorCode());
}
-llvm::Error DependencyScanningWorker::computeDependencies(
- const std::string &Input, StringRef WorkingDirectory,
- const CompilationDatabase &CDB, DependencyConsumer &Consumer) {
- RealFS->setCurrentWorkingDirectory(WorkingDirectory);
- return runWithDiags(DiagOpts.get(), [&](DiagnosticConsumer &DC) {
- /// Create the tool that uses the underlying file system to ensure that any
- /// file system requests that are made by the driver do not go through the
- /// dependency scanning filesystem.
- tooling::ClangTool Tool(CDB, Input, PCHContainerOps, RealFS, Files);
- Tool.clearArgumentsAdjusters();
- Tool.setRestoreWorkingDir(false);
- Tool.setPrintErrorMessage(false);
- Tool.setDiagnosticConsumer(&DC);
- DependencyScanningAction Action(WorkingDirectory, Consumer, DepFS,
- PPSkipMappings.get(), Format);
- return !Tool.run(&Action);
- });
+static bool forEachDriverJob(
+ ArrayRef<std::string> ArgStrs, DiagnosticsEngine &Diags, FileManager &FM,
+ llvm::function_ref<bool(const driver::Command &Cmd)> Callback) {
+ SmallVector<const char *, 256> Argv;
+ Argv.reserve(ArgStrs.size());
+ for (const std::string &Arg : ArgStrs)
+ Argv.push_back(Arg.c_str());
+
+ llvm::vfs::FileSystem *FS = &FM.getVirtualFileSystem();
+
+ std::unique_ptr<driver::Driver> Driver = std::make_unique<driver::Driver>(
+ Argv[0], llvm::sys::getDefaultTargetTriple(), Diags,
+ "clang LLVM compiler", FS);
+ Driver->setTitle("clang_based_tool");
+
+ llvm::BumpPtrAllocator Alloc;
+ bool CLMode = driver::IsClangCL(
+ driver::getDriverMode(Argv[0], ArrayRef(Argv).slice(1)));
+
+ if (llvm::Error E = driver::expandResponseFiles(Argv, CLMode, Alloc, FS)) {
+ Diags.Report(diag::err_drv_expand_response_file)
+ << llvm::toString(std::move(E));
+ return false;
+ }
+
+ const std::unique_ptr<driver::Compilation> Compilation(
+ Driver->BuildCompilation(llvm::ArrayRef(Argv)));
+ if (!Compilation)
+ return false;
+
+ if (Compilation->containsError())
+ return false;
+
+ for (const driver::Command &Job : Compilation->getJobs()) {
+ if (!Callback(Job))
+ return false;
+ }
+ return true;
}
+
+static bool createAndRunToolInvocation(
+ std::vector<std::string> CommandLine, DependencyScanningAction &Action,
+ FileManager &FM,
+ std::shared_ptr<clang::PCHContainerOperations> &PCHContainerOps,
+ DiagnosticsEngine &Diags, DependencyConsumer &Consumer) {
+
+ // Save executable path before providing CommandLine to ToolInvocation
+ std::string Executable = CommandLine[0];
+ ToolInvocation Invocation(std::move(CommandLine), &Action, &FM,
+ PCHContainerOps);
+ Invocation.setDiagnosticConsumer(Diags.getClient());
+ Invocation.setDiagnosticOptions(&Diags.getDiagnosticOptions());
+ if (!Invocation.run())
+ return false;
+
+ std::vector<std::string> Args = Action.takeLastCC1Arguments();
+ Consumer.handleBuildCommand({std::move(Executable), std::move(Args)});
+ return true;
+}
+
+bool DependencyScanningWorker::computeDependencies(
+ StringRef WorkingDirectory, const std::vector<std::string> &CommandLine,
+ DependencyConsumer &Consumer, DependencyActionController &Controller,
+ DiagnosticConsumer &DC, std::optional<StringRef> ModuleName) {
+ // Reset what might have been modified in the previous worker invocation.
+ BaseFS->setCurrentWorkingDirectory(WorkingDirectory);
+
+ std::optional<std::vector<std::string>> ModifiedCommandLine;
+ llvm::IntrusiveRefCntPtr<llvm::vfs::FileSystem> ModifiedFS;
+
+ // If we're scanning based on a module name alone, we don't expect the client
+ // to provide us with an input file. However, the driver really wants to have
+ // one. Let's just make it up to make the driver happy.
+ if (ModuleName) {
+ auto OverlayFS =
+ llvm::makeIntrusiveRefCnt<llvm::vfs::OverlayFileSystem>(BaseFS);
+ auto InMemoryFS =
+ llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
+ InMemoryFS->setCurrentWorkingDirectory(WorkingDirectory);
+ OverlayFS->pushOverlay(InMemoryFS);
+ ModifiedFS = OverlayFS;
+
+ SmallString<128> FakeInputPath;
+ // TODO: We should retry the creation if the path already exists.
+ llvm::sys::fs::createUniquePath(*ModuleName + "-%%%%%%%%.input",
+ FakeInputPath,
+ /*MakeAbsolute=*/false);
+ InMemoryFS->addFile(FakeInputPath, 0, llvm::MemoryBuffer::getMemBuffer(""));
+
+ ModifiedCommandLine = CommandLine;
+ ModifiedCommandLine->emplace_back(FakeInputPath);
+ }
+
+ const std::vector<std::string> &FinalCommandLine =
+ ModifiedCommandLine ? *ModifiedCommandLine : CommandLine;
+ auto &FinalFS = ModifiedFS ? ModifiedFS : BaseFS;
+
+ FileSystemOptions FSOpts;
+ FSOpts.WorkingDir = WorkingDirectory.str();
+ auto FileMgr = llvm::makeIntrusiveRefCnt<FileManager>(FSOpts, FinalFS);
+
+ std::vector<const char *> FinalCCommandLine(FinalCommandLine.size(), nullptr);
+ llvm::transform(FinalCommandLine, FinalCCommandLine.begin(),
+ [](const std::string &Str) { return Str.c_str(); });
+
+ auto DiagOpts = CreateAndPopulateDiagOpts(FinalCCommandLine);
+ sanitizeDiagOpts(*DiagOpts);
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags =
+ CompilerInstance::createDiagnostics(DiagOpts.release(), &DC,
+ /*ShouldOwnClient=*/false);
+
+ // Although `Diagnostics` are used only for command-line parsing, the
+ // custom `DiagConsumer` might expect a `SourceManager` to be present.
+ SourceManager SrcMgr(*Diags, *FileMgr);
+ Diags->setSourceManager(&SrcMgr);
+ // DisableFree is modified by Tooling for running
+ // in-process; preserve the original value, which is
+ // always true for a driver invocation.
+ bool DisableFree = true;
+ DependencyScanningAction Action(WorkingDirectory, Consumer, Controller, DepFS,
+ Format, OptimizeArgs, EagerLoadModules,
+ DisableFree, ModuleName);
+
+ bool Success = false;
+ if (FinalCommandLine[1] == "-cc1") {
+ Success = createAndRunToolInvocation(FinalCommandLine, Action, *FileMgr,
+ PCHContainerOps, *Diags, Consumer);
+ } else {
+ Success = forEachDriverJob(
+ FinalCommandLine, *Diags, *FileMgr, [&](const driver::Command &Cmd) {
+ if (StringRef(Cmd.getCreator().getName()) != "clang") {
+ // Non-clang command. Just pass through to the dependency
+ // consumer.
+ Consumer.handleBuildCommand(
+ {Cmd.getExecutable(),
+ {Cmd.getArguments().begin(), Cmd.getArguments().end()}});
+ return true;
+ }
+
+ // Insert -cc1 comand line options into Argv
+ std::vector<std::string> Argv;
+ Argv.push_back(Cmd.getExecutable());
+ Argv.insert(Argv.end(), Cmd.getArguments().begin(),
+ Cmd.getArguments().end());
+
+ // Create an invocation that uses the underlying file
+ // system to ensure that any file system requests that
+ // are made by the driver do not go through the
+ // dependency scanning filesystem.
+ return createAndRunToolInvocation(std::move(Argv), Action, *FileMgr,
+ PCHContainerOps, *Diags, Consumer);
+ });
+ }
+
+ if (Success && !Action.hasScanned())
+ Diags->Report(diag::err_fe_expected_compiler_job)
+ << llvm::join(FinalCommandLine, " ");
+ return Success && Action.hasScanned();
+}
+
+DependencyActionController::~DependencyActionController() {}
diff --git a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp
index 88cee63c98aa..bfaa89785104 100644
--- a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp
@@ -1,145 +1,407 @@
//===- ModuleDepCollector.cpp - Callbacks to collect deps -------*- C++ -*-===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "clang/Tooling/DependencyScanning/ModuleDepCollector.h"
+#include "clang/Basic/MakeSupport.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Tooling/DependencyScanning/DependencyScanningWorker.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/BLAKE3.h"
#include "llvm/Support/StringSaver.h"
+#include <optional>
using namespace clang;
using namespace tooling;
using namespace dependencies;
-CompilerInvocation ModuleDepCollector::makeInvocationForModuleBuildWithoutPaths(
- const ModuleDeps &Deps) const {
- // Make a deep copy of the original Clang invocation.
- CompilerInvocation CI(OriginalInvocation);
+const std::vector<std::string> &ModuleDeps::getBuildArguments() {
+ assert(!std::holds_alternative<std::monostate>(BuildInfo) &&
+ "Using uninitialized ModuleDeps");
+ if (const auto *CI = std::get_if<CowCompilerInvocation>(&BuildInfo))
+ BuildInfo = CI->getCC1CommandLine();
+ return std::get<std::vector<std::string>>(BuildInfo);
+}
+
+static void optimizeHeaderSearchOpts(HeaderSearchOptions &Opts,
+ ASTReader &Reader,
+ const serialization::ModuleFile &MF) {
+ // Only preserve search paths that were used during the dependency scan.
+ std::vector<HeaderSearchOptions::Entry> Entries = Opts.UserEntries;
+ Opts.UserEntries.clear();
+
+ llvm::BitVector SearchPathUsage(Entries.size());
+ llvm::DenseSet<const serialization::ModuleFile *> Visited;
+ std::function<void(const serialization::ModuleFile *)> VisitMF =
+ [&](const serialization::ModuleFile *MF) {
+ SearchPathUsage |= MF->SearchPathUsage;
+ Visited.insert(MF);
+ for (const serialization::ModuleFile *Import : MF->Imports)
+ if (!Visited.contains(Import))
+ VisitMF(Import);
+ };
+ VisitMF(&MF);
+
+ for (auto Idx : SearchPathUsage.set_bits())
+ Opts.UserEntries.push_back(Entries[Idx]);
+}
- // Remove options incompatible with explicit module build.
+static void optimizeDiagnosticOpts(DiagnosticOptions &Opts,
+ bool IsSystemModule) {
+ // If this is not a system module or -Wsystem-headers was passed, don't
+ // optimize.
+ if (!IsSystemModule)
+ return;
+ bool Wsystem_headers = false;
+ for (StringRef Opt : Opts.Warnings) {
+ bool isPositive = !Opt.consume_front("no-");
+ if (Opt == "system-headers")
+ Wsystem_headers = isPositive;
+ }
+ if (Wsystem_headers)
+ return;
+
+ // Remove all warning flags. System modules suppress most, but not all,
+ // warnings.
+ Opts.Warnings.clear();
+ Opts.UndefPrefixes.clear();
+ Opts.Remarks.clear();
+}
+
+static std::vector<std::string> splitString(std::string S, char Separator) {
+ SmallVector<StringRef> Segments;
+ StringRef(S).split(Segments, Separator, /*MaxSplit=*/-1, /*KeepEmpty=*/false);
+ std::vector<std::string> Result;
+ Result.reserve(Segments.size());
+ for (StringRef Segment : Segments)
+ Result.push_back(Segment.str());
+ return Result;
+}
+
+void ModuleDepCollector::addOutputPaths(CowCompilerInvocation &CI,
+ ModuleDeps &Deps) {
+ CI.getMutFrontendOpts().OutputFile =
+ Controller.lookupModuleOutput(Deps.ID, ModuleOutputKind::ModuleFile);
+ if (!CI.getDiagnosticOpts().DiagnosticSerializationFile.empty())
+ CI.getMutDiagnosticOpts().DiagnosticSerializationFile =
+ Controller.lookupModuleOutput(
+ Deps.ID, ModuleOutputKind::DiagnosticSerializationFile);
+ if (!CI.getDependencyOutputOpts().OutputFile.empty()) {
+ CI.getMutDependencyOutputOpts().OutputFile = Controller.lookupModuleOutput(
+ Deps.ID, ModuleOutputKind::DependencyFile);
+ CI.getMutDependencyOutputOpts().Targets =
+ splitString(Controller.lookupModuleOutput(
+ Deps.ID, ModuleOutputKind::DependencyTargets),
+ '\0');
+ if (!CI.getDependencyOutputOpts().OutputFile.empty() &&
+ CI.getDependencyOutputOpts().Targets.empty()) {
+ // Fallback to -o as dependency target, as in the driver.
+ SmallString<128> Target;
+ quoteMakeTarget(CI.getFrontendOpts().OutputFile, Target);
+ CI.getMutDependencyOutputOpts().Targets.push_back(std::string(Target));
+ }
+ }
+}
+
+static CowCompilerInvocation
+makeCommonInvocationForModuleBuild(CompilerInvocation CI) {
+ CI.resetNonModularOptions();
+ CI.clearImplicitModuleBuildOptions();
+
+ // Remove options incompatible with explicit module build or are likely to
+ // differ between identical modules discovered from different translation
+ // units.
CI.getFrontendOpts().Inputs.clear();
CI.getFrontendOpts().OutputFile.clear();
+ // LLVM options are not going to affect the AST
+ CI.getFrontendOpts().LLVMArgs.clear();
+
+ // TODO: Figure out better way to set options to their default value.
+ CI.getCodeGenOpts().MainFileName.clear();
+ CI.getCodeGenOpts().DwarfDebugFlags.clear();
+ if (!CI.getLangOpts().ModulesCodegen) {
+ CI.getCodeGenOpts().DebugCompilationDir.clear();
+ CI.getCodeGenOpts().CoverageCompilationDir.clear();
+ CI.getCodeGenOpts().CoverageDataFile.clear();
+ CI.getCodeGenOpts().CoverageNotesFile.clear();
+ }
+
+ // Map output paths that affect behaviour to "-" so their existence is in the
+ // context hash. The final path will be computed in addOutputPaths.
+ if (!CI.getDiagnosticOpts().DiagnosticSerializationFile.empty())
+ CI.getDiagnosticOpts().DiagnosticSerializationFile = "-";
+ if (!CI.getDependencyOutputOpts().OutputFile.empty())
+ CI.getDependencyOutputOpts().OutputFile = "-";
+ CI.getDependencyOutputOpts().Targets.clear();
CI.getFrontendOpts().ProgramAction = frontend::GenerateModule;
- CI.getLangOpts()->ModuleName = Deps.ID.ModuleName;
- CI.getFrontendOpts().IsSystemModule = Deps.IsSystem;
+ CI.getFrontendOpts().ARCMTAction = FrontendOptions::ARCMT_None;
+ CI.getFrontendOpts().ObjCMTAction = FrontendOptions::ObjCMT_None;
+ CI.getFrontendOpts().MTMigrateDir.clear();
+ CI.getLangOpts().ModuleName.clear();
+
+ // Remove any macro definitions that are explicitly ignored.
+ if (!CI.getHeaderSearchOpts().ModulesIgnoreMacros.empty()) {
+ llvm::erase_if(
+ CI.getPreprocessorOpts().Macros,
+ [&CI](const std::pair<std::string, bool> &Def) {
+ StringRef MacroDef = Def.first;
+ return CI.getHeaderSearchOpts().ModulesIgnoreMacros.contains(
+ llvm::CachedHashString(MacroDef.split('=').first));
+ });
+ // Remove the now unused option.
+ CI.getHeaderSearchOpts().ModulesIgnoreMacros.clear();
+ }
+
+ return CI;
+}
+
+CowCompilerInvocation
+ModuleDepCollector::getInvocationAdjustedForModuleBuildWithoutOutputs(
+ const ModuleDeps &Deps,
+ llvm::function_ref<void(CowCompilerInvocation &)> Optimize) const {
+ CowCompilerInvocation CI = CommonInvocation;
+
+ CI.getMutLangOpts().ModuleName = Deps.ID.ModuleName;
+ CI.getMutFrontendOpts().IsSystemModule = Deps.IsSystem;
+
+ // Inputs
+ InputKind ModuleMapInputKind(CI.getFrontendOpts().DashX.getLanguage(),
+ InputKind::Format::ModuleMap);
+ CI.getMutFrontendOpts().Inputs.emplace_back(Deps.ClangModuleMapFile,
+ ModuleMapInputKind);
+
+ auto CurrentModuleMapEntry =
+ ScanInstance.getFileManager().getFile(Deps.ClangModuleMapFile);
+ assert(CurrentModuleMapEntry && "module map file entry not found");
+
+ auto DepModuleMapFiles = collectModuleMapFiles(Deps.ClangModuleDeps);
+ for (StringRef ModuleMapFile : Deps.ModuleMapFileDeps) {
+ // TODO: Track these as `FileEntryRef` to simplify the equality check below.
+ auto ModuleMapEntry = ScanInstance.getFileManager().getFile(ModuleMapFile);
+ assert(ModuleMapEntry && "module map file entry not found");
+
+ // Don't report module maps describing eagerly-loaded dependency. This
+ // information will be deserialized from the PCM.
+ // TODO: Verify this works fine when modulemap for module A is eagerly
+ // loaded from A.pcm, and module map passed on the command line contains
+ // definition of a submodule: "explicit module A.Private { ... }".
+ if (EagerLoadModules && DepModuleMapFiles.contains(*ModuleMapEntry))
+ continue;
+
+ // Don't report module map file of the current module unless it also
+ // describes a dependency (for symmetry).
+ if (*ModuleMapEntry == *CurrentModuleMapEntry &&
+ !DepModuleMapFiles.contains(*ModuleMapEntry))
+ continue;
- CI.getLangOpts()->ImplicitModules = false;
+ CI.getMutFrontendOpts().ModuleMapFiles.emplace_back(ModuleMapFile);
+ }
// Report the prebuilt modules this module uses.
- for (const auto &PrebuiltModule : Deps.PrebuiltModuleDeps) {
- CI.getFrontendOpts().ModuleFiles.push_back(PrebuiltModule.PCMFile);
- CI.getFrontendOpts().ModuleMapFiles.push_back(PrebuiltModule.ModuleMapFile);
+ for (const auto &PrebuiltModule : Deps.PrebuiltModuleDeps)
+ CI.getMutFrontendOpts().ModuleFiles.push_back(PrebuiltModule.PCMFile);
+
+ // Add module file inputs from dependencies.
+ addModuleFiles(CI, Deps.ClangModuleDeps);
+
+ if (!CI.getDiagnosticOpts().SystemHeaderWarningsModules.empty()) {
+ // Apply -Wsystem-headers-in-module for the current module.
+ if (llvm::is_contained(CI.getDiagnosticOpts().SystemHeaderWarningsModules,
+ Deps.ID.ModuleName))
+ CI.getMutDiagnosticOpts().Warnings.push_back("system-headers");
+ // Remove the now unused option(s).
+ CI.getMutDiagnosticOpts().SystemHeaderWarningsModules.clear();
}
- CI.getPreprocessorOpts().ImplicitPCHInclude.clear();
+ Optimize(CI);
return CI;
}
-static std::vector<std::string>
-serializeCompilerInvocation(const CompilerInvocation &CI) {
- // Set up string allocator.
- llvm::BumpPtrAllocator Alloc;
- llvm::StringSaver Strings(Alloc);
- auto SA = [&Strings](const Twine &Arg) { return Strings.save(Arg).data(); };
+llvm::DenseSet<const FileEntry *> ModuleDepCollector::collectModuleMapFiles(
+ ArrayRef<ModuleID> ClangModuleDeps) const {
+ llvm::DenseSet<const FileEntry *> ModuleMapFiles;
+ for (const ModuleID &MID : ClangModuleDeps) {
+ ModuleDeps *MD = ModuleDepsByID.lookup(MID);
+ assert(MD && "Inconsistent dependency info");
+ // TODO: Track ClangModuleMapFile as `FileEntryRef`.
+ auto FE = ScanInstance.getFileManager().getFile(MD->ClangModuleMapFile);
+ assert(FE && "Missing module map file that was previously found");
+ ModuleMapFiles.insert(*FE);
+ }
+ return ModuleMapFiles;
+}
- // Synthesize full command line from the CompilerInvocation, including "-cc1".
- SmallVector<const char *, 32> Args{"-cc1"};
- CI.generateCC1CommandLine(Args, SA);
+void ModuleDepCollector::addModuleMapFiles(
+ CompilerInvocation &CI, ArrayRef<ModuleID> ClangModuleDeps) const {
+ if (EagerLoadModules)
+ return; // Only pcm is needed for eager load.
- // Convert arguments to the return type.
- return std::vector<std::string>{Args.begin(), Args.end()};
+ for (const ModuleID &MID : ClangModuleDeps) {
+ ModuleDeps *MD = ModuleDepsByID.lookup(MID);
+ assert(MD && "Inconsistent dependency info");
+ CI.getFrontendOpts().ModuleMapFiles.push_back(MD->ClangModuleMapFile);
+ }
}
-std::vector<std::string> ModuleDeps::getCanonicalCommandLine(
- std::function<StringRef(ModuleID)> LookupPCMPath,
- std::function<const ModuleDeps &(ModuleID)> LookupModuleDeps) const {
- CompilerInvocation CI(Invocation);
- FrontendOptions &FrontendOpts = CI.getFrontendOpts();
+void ModuleDepCollector::addModuleFiles(
+ CompilerInvocation &CI, ArrayRef<ModuleID> ClangModuleDeps) const {
+ for (const ModuleID &MID : ClangModuleDeps) {
+ std::string PCMPath =
+ Controller.lookupModuleOutput(MID, ModuleOutputKind::ModuleFile);
+ if (EagerLoadModules)
+ CI.getFrontendOpts().ModuleFiles.push_back(std::move(PCMPath));
+ else
+ CI.getHeaderSearchOpts().PrebuiltModuleFiles.insert(
+ {MID.ModuleName, std::move(PCMPath)});
+ }
+}
- InputKind ModuleMapInputKind(FrontendOpts.DashX.getLanguage(),
- InputKind::Format::ModuleMap);
- FrontendOpts.Inputs.emplace_back(ClangModuleMapFile, ModuleMapInputKind);
- FrontendOpts.OutputFile = std::string(LookupPCMPath(ID));
-
- dependencies::detail::collectPCMAndModuleMapPaths(
- ClangModuleDeps, LookupPCMPath, LookupModuleDeps,
- FrontendOpts.ModuleFiles, FrontendOpts.ModuleMapFiles);
-
- return serializeCompilerInvocation(CI);
-}
-
-std::vector<std::string>
-ModuleDeps::getCanonicalCommandLineWithoutModulePaths() const {
- return serializeCompilerInvocation(Invocation);
-}
-
-void dependencies::detail::collectPCMAndModuleMapPaths(
- llvm::ArrayRef<ModuleID> Modules,
- std::function<StringRef(ModuleID)> LookupPCMPath,
- std::function<const ModuleDeps &(ModuleID)> LookupModuleDeps,
- std::vector<std::string> &PCMPaths, std::vector<std::string> &ModMapPaths) {
- llvm::StringSet<> AlreadyAdded;
-
- std::function<void(llvm::ArrayRef<ModuleID>)> AddArgs =
- [&](llvm::ArrayRef<ModuleID> Modules) {
- for (const ModuleID &MID : Modules) {
- if (!AlreadyAdded.insert(MID.ModuleName + MID.ContextHash).second)
- continue;
- const ModuleDeps &M = LookupModuleDeps(MID);
- // Depth first traversal.
- AddArgs(M.ClangModuleDeps);
- PCMPaths.push_back(LookupPCMPath(MID).str());
- if (!M.ClangModuleMapFile.empty())
- ModMapPaths.push_back(M.ClangModuleMapFile);
- }
- };
+void ModuleDepCollector::addModuleFiles(
+ CowCompilerInvocation &CI, ArrayRef<ModuleID> ClangModuleDeps) const {
+ for (const ModuleID &MID : ClangModuleDeps) {
+ std::string PCMPath =
+ Controller.lookupModuleOutput(MID, ModuleOutputKind::ModuleFile);
+ if (EagerLoadModules)
+ CI.getMutFrontendOpts().ModuleFiles.push_back(std::move(PCMPath));
+ else
+ CI.getMutHeaderSearchOpts().PrebuiltModuleFiles.insert(
+ {MID.ModuleName, std::move(PCMPath)});
+ }
+}
+
+static bool needsModules(FrontendInputFile FIF) {
+ switch (FIF.getKind().getLanguage()) {
+ case Language::Unknown:
+ case Language::Asm:
+ case Language::LLVM_IR:
+ return false;
+ default:
+ return true;
+ }
+}
+
+void ModuleDepCollector::applyDiscoveredDependencies(CompilerInvocation &CI) {
+ CI.clearImplicitModuleBuildOptions();
+
+ if (llvm::any_of(CI.getFrontendOpts().Inputs, needsModules)) {
+ Preprocessor &PP = ScanInstance.getPreprocessor();
+ if (Module *CurrentModule = PP.getCurrentModuleImplementation())
+ if (OptionalFileEntryRef CurrentModuleMap =
+ PP.getHeaderSearchInfo()
+ .getModuleMap()
+ .getModuleMapFileForUniquing(CurrentModule))
+ CI.getFrontendOpts().ModuleMapFiles.emplace_back(
+ CurrentModuleMap->getNameAsRequested());
+
+ SmallVector<ModuleID> DirectDeps;
+ for (const auto &KV : ModularDeps)
+ if (DirectModularDeps.contains(KV.first))
+ DirectDeps.push_back(KV.second->ID);
+
+ // TODO: Report module maps the same way it's done for modular dependencies.
+ addModuleMapFiles(CI, DirectDeps);
+
+ addModuleFiles(CI, DirectDeps);
+
+ for (const auto &KV : DirectPrebuiltModularDeps)
+ CI.getFrontendOpts().ModuleFiles.push_back(KV.second.PCMFile);
+ }
+}
+
+static std::string getModuleContextHash(const ModuleDeps &MD,
+ const CowCompilerInvocation &CI,
+ bool EagerLoadModules,
+ llvm::vfs::FileSystem &VFS) {
+ llvm::HashBuilder<llvm::TruncatedBLAKE3<16>, llvm::endianness::native>
+ HashBuilder;
+ SmallString<32> Scratch;
+
+ // Hash the compiler version and serialization version to ensure the module
+ // will be readable.
+ HashBuilder.add(getClangFullRepositoryVersion());
+ HashBuilder.add(serialization::VERSION_MAJOR, serialization::VERSION_MINOR);
+ llvm::ErrorOr<std::string> CWD = VFS.getCurrentWorkingDirectory();
+ if (CWD)
+ HashBuilder.add(*CWD);
+
+ // Hash the BuildInvocation without any input files.
+ SmallString<0> ArgVec;
+ ArgVec.reserve(4096);
+ CI.generateCC1CommandLine([&](const Twine &Arg) {
+ Arg.toVector(ArgVec);
+ ArgVec.push_back('\0');
+ });
+ HashBuilder.add(ArgVec);
+
+ // Hash the module dependencies. These paths may differ even if the invocation
+ // is identical if they depend on the contents of the files in the TU -- for
+ // example, case-insensitive paths to modulemap files. Usually such a case
+ // would indicate a missed optimization to canonicalize, but it may be
+ // difficult to canonicalize all cases when there is a VFS.
+ for (const auto &ID : MD.ClangModuleDeps) {
+ HashBuilder.add(ID.ModuleName);
+ HashBuilder.add(ID.ContextHash);
+ }
+
+ HashBuilder.add(EagerLoadModules);
- AddArgs(Modules);
+ llvm::BLAKE3Result<16> Hash = HashBuilder.final();
+ std::array<uint64_t, 2> Words;
+ static_assert(sizeof(Hash) == sizeof(Words), "Hash must match Words");
+ std::memcpy(Words.data(), Hash.data(), sizeof(Hash));
+ return toString(llvm::APInt(sizeof(Words) * 8, Words), 36, /*Signed=*/false);
}
-void ModuleDepCollectorPP::FileChanged(SourceLocation Loc,
- FileChangeReason Reason,
- SrcMgr::CharacteristicKind FileType,
- FileID PrevFID) {
- if (Reason != PPCallbacks::EnterFile)
+void ModuleDepCollector::associateWithContextHash(
+ const CowCompilerInvocation &CI, ModuleDeps &Deps) {
+ Deps.ID.ContextHash = getModuleContextHash(
+ Deps, CI, EagerLoadModules, ScanInstance.getVirtualFileSystem());
+ bool Inserted = ModuleDepsByID.insert({Deps.ID, &Deps}).second;
+ (void)Inserted;
+ assert(Inserted && "duplicate module mapping");
+}
+
+void ModuleDepCollectorPP::LexedFileChanged(FileID FID,
+ LexedFileChangeReason Reason,
+ SrcMgr::CharacteristicKind FileType,
+ FileID PrevFID,
+ SourceLocation Loc) {
+ if (Reason != LexedFileChangeReason::EnterFile)
return;
-
+
// This has to be delayed as the context hash can change at the start of
// `CompilerInstance::ExecuteAction`.
if (MDC.ContextHash.empty()) {
- MDC.ContextHash = Instance.getInvocation().getModuleHash();
+ MDC.ContextHash = MDC.ScanInstance.getInvocation().getModuleHash();
MDC.Consumer.handleContextHash(MDC.ContextHash);
}
- SourceManager &SM = Instance.getSourceManager();
+ SourceManager &SM = MDC.ScanInstance.getSourceManager();
// Dependency generation really does want to go all the way to the
// file entry for a source location to find out what is depended on.
// We do not want #line markers to affect dependency generation!
- if (Optional<StringRef> Filename =
- SM.getNonBuiltinFilenameForID(SM.getFileID(SM.getExpansionLoc(Loc))))
- MDC.FileDeps.push_back(
- std::string(llvm::sys::path::remove_leading_dotslash(*Filename)));
+ if (std::optional<StringRef> Filename = SM.getNonBuiltinFilenameForID(FID))
+ MDC.addFileDep(llvm::sys::path::remove_leading_dotslash(*Filename));
}
void ModuleDepCollectorPP::InclusionDirective(
SourceLocation HashLoc, const Token &IncludeTok, StringRef FileName,
- bool IsAngled, CharSourceRange FilenameRange, const FileEntry *File,
+ bool IsAngled, CharSourceRange FilenameRange, OptionalFileEntryRef File,
StringRef SearchPath, StringRef RelativePath, const Module *Imported,
SrcMgr::CharacteristicKind FileType) {
if (!File && !Imported) {
// This is a non-modular include that HeaderSearch failed to find. Add it
// here as `FileChanged` will never see it.
- MDC.FileDeps.push_back(std::string(FileName));
+ MDC.addFileDep(FileName);
}
handleImport(Imported);
}
@@ -147,6 +409,14 @@ void ModuleDepCollectorPP::InclusionDirective(
void ModuleDepCollectorPP::moduleImport(SourceLocation ImportLoc,
ModuleIdPath Path,
const Module *Imported) {
+ if (MDC.ScanInstance.getPreprocessor().isInImportingCXXNamedModules()) {
+ P1689ModuleInfo RequiredModule;
+ RequiredModule.ModuleName = Path[0].first->getName().str();
+ RequiredModule.Type = P1689ModuleInfo::ModuleType::NamedCXXModule;
+ MDC.RequiredStdCXXModules.push_back(RequiredModule);
+ return;
+ }
+
handleImport(Imported);
}
@@ -157,98 +427,187 @@ void ModuleDepCollectorPP::handleImport(const Module *Imported) {
const Module *TopLevelModule = Imported->getTopLevelModule();
if (MDC.isPrebuiltModule(TopLevelModule))
- DirectPrebuiltModularDeps.insert(TopLevelModule);
+ MDC.DirectPrebuiltModularDeps.insert(
+ {TopLevelModule, PrebuiltModuleDep{TopLevelModule}});
else
- DirectModularDeps.insert(TopLevelModule);
+ MDC.DirectModularDeps.insert(TopLevelModule);
}
void ModuleDepCollectorPP::EndOfMainFile() {
- FileID MainFileID = Instance.getSourceManager().getMainFileID();
- MDC.MainFile = std::string(
- Instance.getSourceManager().getFileEntryForID(MainFileID)->getName());
-
- if (!Instance.getPreprocessorOpts().ImplicitPCHInclude.empty())
- MDC.FileDeps.push_back(Instance.getPreprocessorOpts().ImplicitPCHInclude);
-
- for (const Module *M : DirectModularDeps) {
- // A top-level module might not be actually imported as a module when
- // -fmodule-name is used to compile a translation unit that imports this
- // module. In that case it can be skipped. The appropriate header
- // dependencies will still be reported as expected.
- if (!M->getASTFile())
- continue;
- handleTopLevelModule(M);
+ FileID MainFileID = MDC.ScanInstance.getSourceManager().getMainFileID();
+ MDC.MainFile = std::string(MDC.ScanInstance.getSourceManager()
+ .getFileEntryRefForID(MainFileID)
+ ->getName());
+
+ auto &PP = MDC.ScanInstance.getPreprocessor();
+ if (PP.isInNamedModule()) {
+ P1689ModuleInfo ProvidedModule;
+ ProvidedModule.ModuleName = PP.getNamedModuleName();
+ ProvidedModule.Type = P1689ModuleInfo::ModuleType::NamedCXXModule;
+ ProvidedModule.IsStdCXXModuleInterface = PP.isInNamedInterfaceUnit();
+ // Don't put implementation (non partition) unit as Provide.
+ // Put the module as required instead. Since the implementation
+ // unit will import the primary module implicitly.
+ if (PP.isInImplementationUnit())
+ MDC.RequiredStdCXXModules.push_back(ProvidedModule);
+ else
+ MDC.ProvidedStdCXXModule = ProvidedModule;
}
+ if (!MDC.ScanInstance.getPreprocessorOpts().ImplicitPCHInclude.empty())
+ MDC.addFileDep(MDC.ScanInstance.getPreprocessorOpts().ImplicitPCHInclude);
+
+ for (const Module *M :
+ MDC.ScanInstance.getPreprocessor().getAffectingClangModules())
+ if (!MDC.isPrebuiltModule(M))
+ MDC.DirectModularDeps.insert(M);
+
+ for (const Module *M : MDC.DirectModularDeps)
+ handleTopLevelModule(M);
+
MDC.Consumer.handleDependencyOutputOpts(*MDC.Opts);
+ if (MDC.IsStdModuleP1689Format)
+ MDC.Consumer.handleProvidedAndRequiredStdCXXModules(
+ MDC.ProvidedStdCXXModule, MDC.RequiredStdCXXModules);
+
for (auto &&I : MDC.ModularDeps)
- MDC.Consumer.handleModuleDependency(I.second);
+ MDC.Consumer.handleModuleDependency(*I.second);
+
+ for (const Module *M : MDC.DirectModularDeps) {
+ auto It = MDC.ModularDeps.find(M);
+ // Only report direct dependencies that were successfully handled.
+ if (It != MDC.ModularDeps.end())
+ MDC.Consumer.handleDirectModuleDependency(MDC.ModularDeps[M]->ID);
+ }
for (auto &&I : MDC.FileDeps)
MDC.Consumer.handleFileDependency(I);
- for (auto &&I : DirectPrebuiltModularDeps)
- MDC.Consumer.handlePrebuiltModuleDependency(PrebuiltModuleDep{I});
+ for (auto &&I : MDC.DirectPrebuiltModularDeps)
+ MDC.Consumer.handlePrebuiltModuleDependency(I.second);
}
-ModuleID ModuleDepCollectorPP::handleTopLevelModule(const Module *M) {
+std::optional<ModuleID>
+ModuleDepCollectorPP::handleTopLevelModule(const Module *M) {
assert(M == M->getTopLevelModule() && "Expected top level module!");
+ // A top-level module might not be actually imported as a module when
+ // -fmodule-name is used to compile a translation unit that imports this
+ // module. In that case it can be skipped. The appropriate header
+ // dependencies will still be reported as expected.
+ if (!M->getASTFile())
+ return {};
+
// If this module has been handled already, just return its ID.
- auto ModI = MDC.ModularDeps.insert({M, ModuleDeps{}});
+ auto ModI = MDC.ModularDeps.insert({M, nullptr});
if (!ModI.second)
- return ModI.first->second.ID;
+ return ModI.first->second->ID;
- ModuleDeps &MD = ModI.first->second;
+ ModI.first->second = std::make_unique<ModuleDeps>();
+ ModuleDeps &MD = *ModI.first->second;
MD.ID.ModuleName = M->getFullModuleName();
- MD.ImportedByMainFile = DirectModularDeps.contains(M);
- MD.ImplicitModulePCMPath = std::string(M->getASTFile()->getName());
MD.IsSystem = M->IsSystem;
- const FileEntry *ModuleMap = Instance.getPreprocessor()
- .getHeaderSearchInfo()
- .getModuleMap()
- .getModuleMapFileForUniquing(M);
- MD.ClangModuleMapFile = std::string(ModuleMap ? ModuleMap->getName() : "");
+ ModuleMap &ModMapInfo =
+ MDC.ScanInstance.getPreprocessor().getHeaderSearchInfo().getModuleMap();
+
+ OptionalFileEntryRef ModuleMap = ModMapInfo.getModuleMapFileForUniquing(M);
+
+ if (ModuleMap) {
+ SmallString<128> Path = ModuleMap->getNameAsRequested();
+ ModMapInfo.canonicalizeModuleMapPath(Path);
+ MD.ClangModuleMapFile = std::string(Path);
+ }
serialization::ModuleFile *MF =
- MDC.Instance.getASTReader()->getModuleManager().lookup(M->getASTFile());
- MDC.Instance.getASTReader()->visitInputFiles(
- *MF, true, true, [&](const serialization::InputFile &IF, bool isSystem) {
+ MDC.ScanInstance.getASTReader()->getModuleManager().lookup(
+ *M->getASTFile());
+ MDC.ScanInstance.getASTReader()->visitInputFileInfos(
+ *MF, /*IncludeSystem=*/true,
+ [&](const serialization::InputFileInfo &IFI, bool IsSystem) {
// __inferred_module.map is the result of the way in which an implicit
// module build handles inferred modules. It adds an overlay VFS with
// this file in the proper directory and relies on the rest of Clang to
// handle it like normal. With explicitly built modules we don't need
// to play VFS tricks, so replace it with the correct module map.
- if (IF.getFile()->getName().endswith("__inferred_module.map")) {
- MD.FileDeps.insert(ModuleMap->getName());
+ if (StringRef(IFI.Filename).ends_with("__inferred_module.map")) {
+ MDC.addFileDep(MD, ModuleMap->getName());
return;
}
- MD.FileDeps.insert(IF.getFile()->getName());
+ MDC.addFileDep(MD, IFI.Filename);
+ });
+
+ llvm::DenseSet<const Module *> SeenDeps;
+ addAllSubmodulePrebuiltDeps(M, MD, SeenDeps);
+ addAllSubmoduleDeps(M, MD, SeenDeps);
+ addAllAffectingClangModules(M, MD, SeenDeps);
+
+ MDC.ScanInstance.getASTReader()->visitInputFileInfos(
+ *MF, /*IncludeSystem=*/true,
+ [&](const serialization::InputFileInfo &IFI, bool IsSystem) {
+ if (!(IFI.TopLevel && IFI.ModuleMap))
+ return;
+ if (StringRef(IFI.FilenameAsRequested)
+ .ends_with("__inferred_module.map"))
+ return;
+ MD.ModuleMapFileDeps.emplace_back(IFI.FilenameAsRequested);
});
- // Add direct prebuilt module dependencies now, so that we can use them when
- // creating a CompilerInvocation and computing context hash for this
- // ModuleDeps instance.
- addDirectPrebuiltModuleDeps(M, MD);
+ CowCompilerInvocation CI =
+ MDC.getInvocationAdjustedForModuleBuildWithoutOutputs(
+ MD, [&](CowCompilerInvocation &BuildInvocation) {
+ if (any(MDC.OptimizeArgs & ScanningOptimizations::HeaderSearch))
+ optimizeHeaderSearchOpts(BuildInvocation.getMutHeaderSearchOpts(),
+ *MDC.ScanInstance.getASTReader(), *MF);
+ if (any(MDC.OptimizeArgs & ScanningOptimizations::SystemWarnings))
+ optimizeDiagnosticOpts(
+ BuildInvocation.getMutDiagnosticOpts(),
+ BuildInvocation.getFrontendOpts().IsSystemModule);
+ });
- MD.Invocation = MDC.makeInvocationForModuleBuildWithoutPaths(MD);
- MD.ID.ContextHash = MD.Invocation.getModuleHash();
+ MDC.associateWithContextHash(CI, MD);
- llvm::DenseSet<const Module *> AddedModules;
- addAllSubmoduleDeps(M, MD, AddedModules);
+ // Finish the compiler invocation. Requires dependencies and the context hash.
+ MDC.addOutputPaths(CI, MD);
+
+ MD.BuildInfo = std::move(CI);
return MD.ID;
}
-void ModuleDepCollectorPP::addDirectPrebuiltModuleDeps(const Module *M,
- ModuleDeps &MD) {
+static void forEachSubmoduleSorted(const Module *M,
+ llvm::function_ref<void(const Module *)> F) {
+ // Submodule order depends on order of header includes for inferred submodules
+ // we don't care about the exact order, so sort so that it's consistent across
+ // TUs to improve sharing.
+ SmallVector<const Module *> Submodules(M->submodules());
+ llvm::stable_sort(Submodules, [](const Module *A, const Module *B) {
+ return A->Name < B->Name;
+ });
+ for (const Module *SubM : Submodules)
+ F(SubM);
+}
+
+void ModuleDepCollectorPP::addAllSubmodulePrebuiltDeps(
+ const Module *M, ModuleDeps &MD,
+ llvm::DenseSet<const Module *> &SeenSubmodules) {
+ addModulePrebuiltDeps(M, MD, SeenSubmodules);
+
+ forEachSubmoduleSorted(M, [&](const Module *SubM) {
+ addAllSubmodulePrebuiltDeps(SubM, MD, SeenSubmodules);
+ });
+}
+
+void ModuleDepCollectorPP::addModulePrebuiltDeps(
+ const Module *M, ModuleDeps &MD,
+ llvm::DenseSet<const Module *> &SeenSubmodules) {
for (const Module *Import : M->Imports)
if (Import->getTopLevelModule() != M->getTopLevelModule())
- if (MDC.isPrebuiltModule(Import))
- MD.PrebuiltModuleDeps.emplace_back(Import);
+ if (MDC.isPrebuiltModule(Import->getTopLevelModule()))
+ if (SeenSubmodules.insert(Import->getTopLevelModule()).second)
+ MD.PrebuiltModuleDeps.emplace_back(Import->getTopLevelModule());
}
void ModuleDepCollectorPP::addAllSubmoduleDeps(
@@ -256,8 +615,9 @@ void ModuleDepCollectorPP::addAllSubmoduleDeps(
llvm::DenseSet<const Module *> &AddedModules) {
addModuleDep(M, MD, AddedModules);
- for (const Module *SubM : M->submodules())
+ forEachSubmoduleSorted(M, [&](const Module *SubM) {
addAllSubmoduleDeps(SubM, MD, AddedModules);
+ });
}
void ModuleDepCollectorPP::addModuleDep(
@@ -266,21 +626,52 @@ void ModuleDepCollectorPP::addModuleDep(
for (const Module *Import : M->Imports) {
if (Import->getTopLevelModule() != M->getTopLevelModule() &&
!MDC.isPrebuiltModule(Import)) {
- ModuleID ImportID = handleTopLevelModule(Import->getTopLevelModule());
- if (AddedModules.insert(Import->getTopLevelModule()).second)
- MD.ClangModuleDeps.push_back(ImportID);
+ if (auto ImportID = handleTopLevelModule(Import->getTopLevelModule()))
+ if (AddedModules.insert(Import->getTopLevelModule()).second)
+ MD.ClangModuleDeps.push_back(*ImportID);
+ }
+ }
+}
+
+void ModuleDepCollectorPP::addAllAffectingClangModules(
+ const Module *M, ModuleDeps &MD,
+ llvm::DenseSet<const Module *> &AddedModules) {
+ addAffectingClangModule(M, MD, AddedModules);
+
+ for (const Module *SubM : M->submodules())
+ addAllAffectingClangModules(SubM, MD, AddedModules);
+}
+
+void ModuleDepCollectorPP::addAffectingClangModule(
+ const Module *M, ModuleDeps &MD,
+ llvm::DenseSet<const Module *> &AddedModules) {
+ for (const Module *Affecting : M->AffectingClangModules) {
+ assert(Affecting == Affecting->getTopLevelModule() &&
+ "Not quite import not top-level module");
+ if (Affecting != M->getTopLevelModule() &&
+ !MDC.isPrebuiltModule(Affecting)) {
+ if (auto ImportID = handleTopLevelModule(Affecting))
+ if (AddedModules.insert(Affecting).second)
+ MD.ClangModuleDeps.push_back(*ImportID);
}
}
}
ModuleDepCollector::ModuleDepCollector(
- std::unique_ptr<DependencyOutputOptions> Opts, CompilerInstance &I,
- DependencyConsumer &C, CompilerInvocation &&OriginalCI)
- : Instance(I), Consumer(C), Opts(std::move(Opts)),
- OriginalInvocation(std::move(OriginalCI)) {}
+ std::unique_ptr<DependencyOutputOptions> Opts,
+ CompilerInstance &ScanInstance, DependencyConsumer &C,
+ DependencyActionController &Controller, CompilerInvocation OriginalCI,
+ ScanningOptimizations OptimizeArgs, bool EagerLoadModules,
+ bool IsStdModuleP1689Format)
+ : ScanInstance(ScanInstance), Consumer(C), Controller(Controller),
+ Opts(std::move(Opts)),
+ CommonInvocation(
+ makeCommonInvocationForModuleBuild(std::move(OriginalCI))),
+ OptimizeArgs(OptimizeArgs), EagerLoadModules(EagerLoadModules),
+ IsStdModuleP1689Format(IsStdModuleP1689Format) {}
void ModuleDepCollector::attachToPreprocessor(Preprocessor &PP) {
- PP.addPPCallbacks(std::make_unique<ModuleDepCollectorPP>(Instance, *this));
+ PP.addPPCallbacks(std::make_unique<ModuleDepCollectorPP>(*this));
}
void ModuleDepCollector::attachToASTReader(ASTReader &R) {}
@@ -288,7 +679,7 @@ void ModuleDepCollector::attachToASTReader(ASTReader &R) {}
bool ModuleDepCollector::isPrebuiltModule(const Module *M) {
std::string Name(M->getTopLevelModuleName());
const auto &PrebuiltModuleFiles =
- Instance.getHeaderSearchOpts().PrebuiltModuleFiles;
+ ScanInstance.getHeaderSearchOpts().PrebuiltModuleFiles;
auto PrebuiltModuleFileIt = PrebuiltModuleFiles.find(Name);
if (PrebuiltModuleFileIt == PrebuiltModuleFiles.end())
return false;
@@ -296,3 +687,38 @@ bool ModuleDepCollector::isPrebuiltModule(const Module *M) {
PrebuiltModuleFileIt->second == M->getASTFile()->getName());
return true;
}
+
+static StringRef makeAbsoluteAndPreferred(CompilerInstance &CI, StringRef Path,
+ SmallVectorImpl<char> &Storage) {
+ if (llvm::sys::path::is_absolute(Path) &&
+ !llvm::sys::path::is_style_windows(llvm::sys::path::Style::native))
+ return Path;
+ Storage.assign(Path.begin(), Path.end());
+ CI.getFileManager().makeAbsolutePath(Storage);
+ llvm::sys::path::make_preferred(Storage);
+ return StringRef(Storage.data(), Storage.size());
+}
+
+void ModuleDepCollector::addFileDep(StringRef Path) {
+ if (IsStdModuleP1689Format) {
+ // Within P1689 format, we don't want all the paths to be absolute path
+ // since it may violate the tranditional make style dependencies info.
+ FileDeps.push_back(std::string(Path));
+ return;
+ }
+
+ llvm::SmallString<256> Storage;
+ Path = makeAbsoluteAndPreferred(ScanInstance, Path, Storage);
+ FileDeps.push_back(std::string(Path));
+}
+
+void ModuleDepCollector::addFileDep(ModuleDeps &MD, StringRef Path) {
+ if (IsStdModuleP1689Format) {
+ MD.FileDeps.insert(Path);
+ return;
+ }
+
+ llvm::SmallString<256> Storage;
+ Path = makeAbsoluteAndPreferred(ScanInstance, Path, Storage);
+ MD.FileDeps.insert(Path);
+}
diff --git a/contrib/llvm-project/clang/lib/Tooling/DumpTool/ASTSrcLocProcessor.cpp b/contrib/llvm-project/clang/lib/Tooling/DumpTool/ASTSrcLocProcessor.cpp
index 2f97067f6171..42691d556d98 100644
--- a/contrib/llvm-project/clang/lib/Tooling/DumpTool/ASTSrcLocProcessor.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/DumpTool/ASTSrcLocProcessor.cpp
@@ -161,7 +161,7 @@ CaptureMethods(std::string TypeString, const clang::CXXRecordDecl *ASTClass,
optionally(
isDerivedFrom(cxxRecordDecl(hasName("clang::TypeLoc"))
.bind("typeLocBase"))))),
- returns(asString(TypeString)))
+ returns(hasCanonicalType(asString(TypeString))))
.bind("classMethod")),
*ASTClass, *Result.Context);
diff --git a/contrib/llvm-project/clang/lib/Tooling/DumpTool/ASTSrcLocProcessor.h b/contrib/llvm-project/clang/lib/Tooling/DumpTool/ASTSrcLocProcessor.h
index 05c4f92676e8..5f2b48173f28 100644
--- a/contrib/llvm-project/clang/lib/Tooling/DumpTool/ASTSrcLocProcessor.h
+++ b/contrib/llvm-project/clang/lib/Tooling/DumpTool/ASTSrcLocProcessor.h
@@ -35,7 +35,7 @@ public:
private:
void run(const ast_matchers::MatchFinder::MatchResult &Result) override;
- llvm::Optional<TraversalKind> getCheckTraversalKind() const override {
+ std::optional<TraversalKind> getCheckTraversalKind() const override {
return TK_IgnoreUnlessSpelledInSource;
}
diff --git a/contrib/llvm-project/clang/lib/Tooling/DumpTool/ClangSrcLocDump.cpp b/contrib/llvm-project/clang/lib/Tooling/DumpTool/ClangSrcLocDump.cpp
index 8091a467d056..1529bfa75d6d 100644
--- a/contrib/llvm-project/clang/lib/Tooling/DumpTool/ClangSrcLocDump.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/DumpTool/ClangSrcLocDump.cpp
@@ -18,8 +18,8 @@
#include "clang/Tooling/Tooling.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Host.h"
#include "llvm/Support/JSON.h"
+#include "llvm/TargetParser/Host.h"
#include "ASTSrcLocProcessor.h"
@@ -91,12 +91,8 @@ int main(int argc, const char **argv) {
llvm::transform(Args, Argv.begin(),
[](const std::string &Arg) { return Arg.c_str(); });
- IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts = new DiagnosticOptions();
- unsigned MissingArgIndex, MissingArgCount;
- auto Opts = driver::getDriverOptTable();
- auto ParsedArgs = Opts.ParseArgs(llvm::makeArrayRef(Argv).slice(1),
- MissingArgIndex, MissingArgCount);
- ParseDiagnosticArgs(*DiagOpts, ParsedArgs);
+ IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts =
+ CreateAndPopulateDiagOpts(Argv);
// Don't output diagnostics, because common scenarios such as
// cross-compiling fail with diagnostics. This is not fatal, but
@@ -115,24 +111,24 @@ int main(int argc, const char **argv) {
auto Files = llvm::makeIntrusiveRefCnt<FileManager>(FileSystemOptions(), OFS);
- auto Driver = std::make_unique<driver::Driver>(
+ auto Driver = std::make_unique<clang::driver::Driver>(
"clang", llvm::sys::getDefaultTargetTriple(), Diagnostics,
"ast-api-dump-tool", OFS);
std::unique_ptr<clang::driver::Compilation> Comp(
- Driver->BuildCompilation(llvm::makeArrayRef(Argv)));
+ Driver->BuildCompilation(llvm::ArrayRef(Argv)));
if (!Comp)
return 1;
const auto &Jobs = Comp->getJobs();
- if (Jobs.size() != 1 || !isa<driver::Command>(*Jobs.begin())) {
+ if (Jobs.size() != 1 || !isa<clang::driver::Command>(*Jobs.begin())) {
SmallString<256> error_msg;
llvm::raw_svector_ostream error_stream(error_msg);
Jobs.Print(error_stream, "; ", true);
return 1;
}
- const auto &Cmd = cast<driver::Command>(*Jobs.begin());
+ const auto &Cmd = cast<clang::driver::Command>(*Jobs.begin());
const llvm::opt::ArgStringList &CC1Args = Cmd.getArguments();
auto Invocation = std::make_unique<CompilerInvocation>();
diff --git a/contrib/llvm-project/clang/lib/Tooling/ExpandResponseFilesCompilationDatabase.cpp b/contrib/llvm-project/clang/lib/Tooling/ExpandResponseFilesCompilationDatabase.cpp
index 29787b8a8894..ebf8aa2a7628 100644
--- a/contrib/llvm-project/clang/lib/Tooling/ExpandResponseFilesCompilationDatabase.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/ExpandResponseFilesCompilationDatabase.cpp
@@ -7,15 +7,16 @@
//===----------------------------------------------------------------------===//
#include "clang/Tooling/CompilationDatabase.h"
+#include "clang/Tooling/Tooling.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/ErrorOr.h"
-#include "llvm/Support/Host.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/StringSaver.h"
+#include "llvm/TargetParser/Host.h"
+#include "llvm/TargetParser/Triple.h"
namespace clang {
namespace tooling {
@@ -48,25 +49,9 @@ public:
private:
std::vector<CompileCommand> expand(std::vector<CompileCommand> Cmds) const {
- for (auto &Cmd : Cmds) {
- bool SeenRSPFile = false;
- llvm::SmallVector<const char *, 20> Argv;
- Argv.reserve(Cmd.CommandLine.size());
- for (auto &Arg : Cmd.CommandLine) {
- Argv.push_back(Arg.c_str());
- if (!Arg.empty())
- SeenRSPFile |= Arg.front() == '@';
- }
- if (!SeenRSPFile)
- continue;
- llvm::BumpPtrAllocator Alloc;
- llvm::StringSaver Saver(Alloc);
- llvm::cl::ExpandResponseFiles(Saver, Tokenizer, Argv, false, false,
- llvm::StringRef(Cmd.Directory), *FS);
- // Don't assign directly, Argv aliases CommandLine.
- std::vector<std::string> ExpandedArgv(Argv.begin(), Argv.end());
- Cmd.CommandLine = std::move(ExpandedArgv);
- }
+ for (auto &Cmd : Cmds)
+ tooling::addExpandedResponseFiles(Cmd.CommandLine, Cmd.Directory,
+ Tokenizer, *FS);
return Cmds;
}
diff --git a/contrib/llvm-project/clang/lib/Tooling/Inclusions/HeaderAnalysis.cpp b/contrib/llvm-project/clang/lib/Tooling/Inclusions/HeaderAnalysis.cpp
new file mode 100644
index 000000000000..52b634e2e1af
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Tooling/Inclusions/HeaderAnalysis.cpp
@@ -0,0 +1,118 @@
+//===--- HeaderAnalysis.cpp -------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Tooling/Inclusions/HeaderAnalysis.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Lex/HeaderSearch.h"
+
+namespace clang::tooling {
+namespace {
+
+// Is Line an #if or #ifdef directive?
+// FIXME: This makes headers with #ifdef LINUX/WINDOWS/MACOS marked as non
+// self-contained and is probably not what we want.
+bool isIf(llvm::StringRef Line) {
+ Line = Line.ltrim();
+ if (!Line.consume_front("#"))
+ return false;
+ Line = Line.ltrim();
+ return Line.starts_with("if");
+}
+
+// Is Line an #error directive mentioning includes?
+bool isErrorAboutInclude(llvm::StringRef Line) {
+ Line = Line.ltrim();
+ if (!Line.consume_front("#"))
+ return false;
+ Line = Line.ltrim();
+ if (!Line.starts_with("error"))
+ return false;
+ return Line.contains_insensitive(
+ "includ"); // Matches "include" or "including".
+}
+
+// Heuristically headers that only want to be included via an umbrella.
+bool isDontIncludeMeHeader(StringRef Content) {
+ llvm::StringRef Line;
+ // Only sniff up to 100 lines or 10KB.
+ Content = Content.take_front(100 * 100);
+ for (unsigned I = 0; I < 100 && !Content.empty(); ++I) {
+ std::tie(Line, Content) = Content.split('\n');
+ if (isIf(Line) && isErrorAboutInclude(Content.split('\n').first))
+ return true;
+ }
+ return false;
+}
+
+bool isImportLine(llvm::StringRef Line) {
+ Line = Line.ltrim();
+ if (!Line.consume_front("#"))
+ return false;
+ Line = Line.ltrim();
+ return Line.starts_with("import");
+}
+
+llvm::StringRef getFileContents(FileEntryRef FE, const SourceManager &SM) {
+ return const_cast<SourceManager &>(SM)
+ .getMemoryBufferForFileOrNone(FE)
+ .value_or(llvm::MemoryBufferRef())
+ .getBuffer();
+}
+
+} // namespace
+
+bool isSelfContainedHeader(FileEntryRef FE, const SourceManager &SM,
+ const HeaderSearch &HeaderInfo) {
+ if (!HeaderInfo.isFileMultipleIncludeGuarded(FE) &&
+ !HeaderInfo.hasFileBeenImported(FE) &&
+ // Any header that contains #imports is supposed to be #import'd so no
+ // need to check for anything but the main-file.
+ (SM.getFileEntryForID(SM.getMainFileID()) != FE ||
+ !codeContainsImports(getFileContents(FE, SM))))
+ return false;
+ // This pattern indicates that a header can't be used without
+ // particular preprocessor state, usually set up by another header.
+ return !isDontIncludeMeHeader(getFileContents(FE, SM));
+}
+
+bool codeContainsImports(llvm::StringRef Code) {
+ // Only sniff up to 100 lines or 10KB.
+ Code = Code.take_front(100 * 100);
+ llvm::StringRef Line;
+ for (unsigned I = 0; I < 100 && !Code.empty(); ++I) {
+ std::tie(Line, Code) = Code.split('\n');
+ if (isImportLine(Line))
+ return true;
+ }
+ return false;
+}
+
+std::optional<StringRef> parseIWYUPragma(const char *Text) {
+ // Skip the comment start, // or /*.
+ if (Text[0] != '/' || (Text[1] != '/' && Text[1] != '*'))
+ return std::nullopt;
+ bool BlockComment = Text[1] == '*';
+ Text += 2;
+
+ // Per spec, direcitves are whitespace- and case-sensitive.
+ constexpr llvm::StringLiteral IWYUPragma = " IWYU pragma: ";
+ if (strncmp(Text, IWYUPragma.data(), IWYUPragma.size()))
+ return std::nullopt;
+ Text += IWYUPragma.size();
+ const char *End = Text;
+ while (*End != 0 && *End != '\n')
+ ++End;
+ StringRef Rest(Text, End - Text);
+ // Strip off whitespace and comment markers to avoid confusion. This isn't
+ // fully-compatible with IWYU, which splits into whitespace-delimited tokens.
+ if (BlockComment)
+ Rest.consume_back("*/");
+ return Rest.trim();
+}
+
+} // namespace clang::tooling
diff --git a/contrib/llvm-project/clang/lib/Tooling/Inclusions/HeaderIncludes.cpp b/contrib/llvm-project/clang/lib/Tooling/Inclusions/HeaderIncludes.cpp
index fbceb26c39c7..d275222ac6b5 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Inclusions/HeaderIncludes.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Inclusions/HeaderIncludes.cpp
@@ -10,9 +10,9 @@
#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Lex/Lexer.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/Support/FormatVariadic.h"
#include "llvm/Support/Path.h"
+#include <optional>
namespace clang {
namespace tooling {
@@ -43,8 +43,9 @@ unsigned getOffsetAfterTokenSequence(
GetOffsetAfterSequence) {
SourceManagerForFile VirtualSM(FileName, Code);
SourceManager &SM = VirtualSM.get();
+ LangOptions LangOpts = createLangOpts();
Lexer Lex(SM.getMainFileID(), SM.getBufferOrFake(SM.getMainFileID()), SM,
- createLangOpts());
+ LangOpts);
Token Tok;
// Get the first token.
Lex.LexFromRawLexer(Tok);
@@ -57,7 +58,7 @@ unsigned getOffsetAfterTokenSequence(
// (second) raw_identifier name is checked.
bool checkAndConsumeDirectiveWithName(
Lexer &Lex, StringRef Name, Token &Tok,
- llvm::Optional<StringRef> RawIDName = llvm::None) {
+ std::optional<StringRef> RawIDName = std::nullopt) {
bool Matched = Tok.is(tok::hash) && !Lex.LexFromRawLexer(Tok) &&
Tok.is(tok::raw_identifier) &&
Tok.getRawIdentifier() == Name && !Lex.LexFromRawLexer(Tok) &&
@@ -195,10 +196,10 @@ IncludeCategoryManager::IncludeCategoryManager(const IncludeStyle &Style,
? llvm::Regex::NoFlags
: llvm::Regex::IgnoreCase);
}
- IsMainFile = FileName.endswith(".c") || FileName.endswith(".cc") ||
- FileName.endswith(".cpp") || FileName.endswith(".c++") ||
- FileName.endswith(".cxx") || FileName.endswith(".m") ||
- FileName.endswith(".mm");
+ IsMainFile = FileName.ends_with(".c") || FileName.ends_with(".cc") ||
+ FileName.ends_with(".cpp") || FileName.ends_with(".c++") ||
+ FileName.ends_with(".cxx") || FileName.ends_with(".m") ||
+ FileName.ends_with(".mm");
if (!Style.IncludeIsMainSourceRegex.empty()) {
llvm::Regex MainFileRegex(Style.IncludeIsMainSourceRegex);
IsMainFile |= MainFileRegex.match(FileName);
@@ -233,7 +234,7 @@ int IncludeCategoryManager::getSortIncludePriority(StringRef IncludeName,
return Ret;
}
bool IncludeCategoryManager::isMainHeader(StringRef IncludeName) const {
- if (!IncludeName.startswith("\""))
+ if (!IncludeName.starts_with("\""))
return false;
IncludeName =
@@ -252,7 +253,7 @@ bool IncludeCategoryManager::isMainHeader(StringRef IncludeName) const {
// 1) foo.h => bar.cc
// 2) foo.proto.h => foo.cc
StringRef Matching;
- if (MatchingFileStem.startswith_insensitive(HeaderStem))
+ if (MatchingFileStem.starts_with_insensitive(HeaderStem))
Matching = MatchingFileStem; // example 1), 2)
else if (FileStem.equals_insensitive(HeaderStem))
Matching = FileStem; // example 3)
@@ -265,6 +266,8 @@ bool IncludeCategoryManager::isMainHeader(StringRef IncludeName) const {
return false;
}
+const llvm::Regex HeaderIncludes::IncludeRegex(IncludeRegexPattern);
+
HeaderIncludes::HeaderIncludes(StringRef FileName, StringRef Code,
const IncludeStyle &Style)
: FileName(FileName), Code(Code), FirstIncludeOffset(-1),
@@ -273,8 +276,8 @@ HeaderIncludes::HeaderIncludes(StringRef FileName, StringRef Code,
MaxInsertOffset(MinInsertOffset +
getMaxHeaderInsertionOffset(
FileName, Code.drop_front(MinInsertOffset), Style)),
- Categories(Style, FileName),
- IncludeRegex(llvm::Regex(IncludeRegexPattern)) {
+ MainIncludeFound(false),
+ Categories(Style, FileName) {
// Add 0 for main header and INT_MAX for headers that are not in any
// category.
Priorities = {0, INT_MAX};
@@ -294,7 +297,9 @@ HeaderIncludes::HeaderIncludes(StringRef FileName, StringRef Code,
addExistingInclude(
Include(Matches[2],
tooling::Range(
- Offset, std::min(Line.size() + 1, Code.size() - Offset))),
+ Offset, std::min(Line.size() + 1, Code.size() - Offset)),
+ Matches[1] == "import" ? tooling::IncludeDirective::Import
+ : tooling::IncludeDirective::Include),
NextLineOffset);
}
Offset = NextLineOffset;
@@ -331,7 +336,9 @@ void HeaderIncludes::addExistingInclude(Include IncludeToAdd,
// Only record the offset of current #include if we can insert after it.
if (CurInclude.R.getOffset() <= MaxInsertOffset) {
int Priority = Categories.getIncludePriority(
- CurInclude.Name, /*CheckMainHeader=*/FirstIncludeOffset < 0);
+ CurInclude.Name, /*CheckMainHeader=*/!MainIncludeFound);
+ if (Priority == 0)
+ MainIncludeFound = true;
CategoryEndOffsets[Priority] = NextLineOffset;
IncludesByPriority[Priority].push_back(&CurInclude);
if (FirstIncludeOffset < 0)
@@ -339,23 +346,26 @@ void HeaderIncludes::addExistingInclude(Include IncludeToAdd,
}
}
-llvm::Optional<tooling::Replacement>
-HeaderIncludes::insert(llvm::StringRef IncludeName, bool IsAngled) const {
+std::optional<tooling::Replacement>
+HeaderIncludes::insert(llvm::StringRef IncludeName, bool IsAngled,
+ IncludeDirective Directive) const {
assert(IncludeName == trimInclude(IncludeName));
// If a <header> ("header") already exists in code, "header" (<header>) with
- // different quotation will still be inserted.
+ // different quotation and/or directive will still be inserted.
// FIXME: figure out if this is the best behavior.
auto It = ExistingIncludes.find(IncludeName);
- if (It != ExistingIncludes.end())
+ if (It != ExistingIncludes.end()) {
for (const auto &Inc : It->second)
- if ((IsAngled && StringRef(Inc.Name).startswith("<")) ||
- (!IsAngled && StringRef(Inc.Name).startswith("\"")))
- return llvm::None;
+ if (Inc.Directive == Directive &&
+ ((IsAngled && StringRef(Inc.Name).starts_with("<")) ||
+ (!IsAngled && StringRef(Inc.Name).starts_with("\""))))
+ return std::nullopt;
+ }
std::string Quoted =
std::string(llvm::formatv(IsAngled ? "<{0}>" : "\"{0}\"", IncludeName));
StringRef QuotedName = Quoted;
int Priority = Categories.getIncludePriority(
- QuotedName, /*CheckMainHeader=*/FirstIncludeOffset < 0);
+ QuotedName, /*CheckMainHeader=*/!MainIncludeFound);
auto CatOffset = CategoryEndOffsets.find(Priority);
assert(CatOffset != CategoryEndOffsets.end());
unsigned InsertOffset = CatOffset->second; // Fall back offset
@@ -369,8 +379,10 @@ HeaderIncludes::insert(llvm::StringRef IncludeName, bool IsAngled) const {
}
}
assert(InsertOffset <= Code.size());
+ llvm::StringRef DirectiveSpelling =
+ Directive == IncludeDirective::Include ? "include" : "import";
std::string NewInclude =
- std::string(llvm::formatv("#include {0}\n", QuotedName));
+ llvm::formatv("#{0} {1}\n", DirectiveSpelling, QuotedName);
// When inserting headers at end of the code, also append '\n' to the code
// if it does not end with '\n'.
// FIXME: when inserting multiple #includes at the end of code, only one
@@ -388,8 +400,8 @@ tooling::Replacements HeaderIncludes::remove(llvm::StringRef IncludeName,
if (Iter == ExistingIncludes.end())
return Result;
for (const auto &Inc : Iter->second) {
- if ((IsAngled && StringRef(Inc.Name).startswith("\"")) ||
- (!IsAngled && StringRef(Inc.Name).startswith("<")))
+ if ((IsAngled && StringRef(Inc.Name).starts_with("\"")) ||
+ (!IsAngled && StringRef(Inc.Name).starts_with("<")))
continue;
llvm::Error Err = Result.add(tooling::Replacement(
FileName, Inc.R.getOffset(), Inc.R.getLength(), ""));
diff --git a/contrib/llvm-project/clang/lib/Tooling/Inclusions/Stdlib/CSymbolMap.inc b/contrib/llvm-project/clang/lib/Tooling/Inclusions/Stdlib/CSymbolMap.inc
new file mode 100644
index 000000000000..463ce921f067
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Tooling/Inclusions/Stdlib/CSymbolMap.inc
@@ -0,0 +1,945 @@
+//===-- gen_std.py generated file -------------------------------*- C++ -*-===//
+//
+// Used to build a lookup table (qualified names => include headers) for C
+// Standard Library symbols.
+//
+// This file was generated automatically by
+// clang/tools/include-mapping/gen_std.py, DO NOT EDIT!
+//
+// Generated from cppreference offline HTML book (modified on 2018-10-28).
+//===----------------------------------------------------------------------===//
+
+SYMBOL(ATOMIC_BOOL_LOCK_FREE, None, <stdatomic.h>)
+SYMBOL(ATOMIC_CHAR16_T_LOCK_FREE, None, <stdatomic.h>)
+SYMBOL(ATOMIC_CHAR32_T_LOCK_FREE, None, <stdatomic.h>)
+SYMBOL(ATOMIC_CHAR_LOCK_FREE, None, <stdatomic.h>)
+SYMBOL(ATOMIC_FLAG_INIT, None, <stdatomic.h>)
+SYMBOL(ATOMIC_INT_LOCK_FREE, None, <stdatomic.h>)
+SYMBOL(ATOMIC_LLONG_LOCK_FREE, None, <stdatomic.h>)
+SYMBOL(ATOMIC_LONG_LOGK_FREE, None, <stdatomic.h>)
+SYMBOL(ATOMIC_POINTER_LOCK_FREE, None, <stdatomic.h>)
+SYMBOL(ATOMIC_SHORT_LOCK_FREE, None, <stdatomic.h>)
+SYMBOL(ATOMIC_VAR_INIT, None, <stdatomic.h>)
+SYMBOL(ATOMIC_WCHAR_T_LOCK_FREE, None, <stdatomic.h>)
+SYMBOL(BUFSIZ, None, <stdio.h>)
+SYMBOL(CHAR_BIT, None, <limits.h>)
+SYMBOL(CHAR_MAX, None, <limits.h>)
+SYMBOL(CHAR_MIN, None, <limits.h>)
+SYMBOL(CLOCKS_PER_SEC, None, <time.h>)
+SYMBOL(CMPLX, None, <complex.h>)
+SYMBOL(CMPLXF, None, <complex.h>)
+SYMBOL(CMPLXL, None, <complex.h>)
+SYMBOL(DBL_DECIMAL_DIG, None, <float.h>)
+SYMBOL(DBL_DIG, None, <float.h>)
+SYMBOL(DBL_EPSILON, None, <float.h>)
+SYMBOL(DBL_HAS_SUBNORM, None, <float.h>)
+SYMBOL(DBL_MANT_DIG, None, <float.h>)
+SYMBOL(DBL_MAX, None, <float.h>)
+SYMBOL(DBL_MAX_10_EXP, None, <float.h>)
+SYMBOL(DBL_MAX_EXP, None, <float.h>)
+SYMBOL(DBL_MIN, None, <float.h>)
+SYMBOL(DBL_MIN_10_EXP, None, <float.h>)
+SYMBOL(DBL_MIN_EXP, None, <float.h>)
+SYMBOL(DBL_TRUE_MIN, None, <float.h>)
+SYMBOL(DECIMAL_DIG, None, <float.h>)
+SYMBOL(EDOM, None, <errno.h>)
+SYMBOL(EILSEQ, None, <errno.h>)
+SYMBOL(EOF, None, <stdio.h>)
+SYMBOL(ERANGE, None, <errno.h>)
+SYMBOL(EXIT_FAILURE, None, <stdlib.h>)
+SYMBOL(EXIT_SUCCESS, None, <stdlib.h>)
+SYMBOL(FE_ALL_EXCEPT, None, <fenv.h>)
+SYMBOL(FE_DFL_ENV, None, <fenv.h>)
+SYMBOL(FE_DIVBYZERO, None, <fenv.h>)
+SYMBOL(FE_DOWNWARD, None, <fenv.h>)
+SYMBOL(FE_INEXACT, None, <fenv.h>)
+SYMBOL(FE_INVALID, None, <fenv.h>)
+SYMBOL(FE_OVERFLOW, None, <fenv.h>)
+SYMBOL(FE_TONEAREST, None, <fenv.h>)
+SYMBOL(FE_TOWARDZERO, None, <fenv.h>)
+SYMBOL(FE_UNDERFLOW, None, <fenv.h>)
+SYMBOL(FE_UPWARD, None, <fenv.h>)
+SYMBOL(FILE, None, <stdio.h>)
+SYMBOL(FILENAME_MAX, None, <stdio.h>)
+SYMBOL(FLT_DECIMAL_DIG, None, <float.h>)
+SYMBOL(FLT_DIG, None, <float.h>)
+SYMBOL(FLT_EPSILON, None, <float.h>)
+SYMBOL(FLT_EVAL_METHOD, None, <float.h>)
+SYMBOL(FLT_HAS_SUBNORM, None, <float.h>)
+SYMBOL(FLT_MANT_DIG, None, <float.h>)
+SYMBOL(FLT_MAX, None, <float.h>)
+SYMBOL(FLT_MAX_10_EXP, None, <float.h>)
+SYMBOL(FLT_MAX_EXP, None, <float.h>)
+SYMBOL(FLT_MIN, None, <float.h>)
+SYMBOL(FLT_MIN_10_EXP, None, <float.h>)
+SYMBOL(FLT_MIN_EXP, None, <float.h>)
+SYMBOL(FLT_RADIX, None, <float.h>)
+SYMBOL(FLT_ROUNDS, None, <float.h>)
+SYMBOL(FLT_TRUE_MIN, None, <float.h>)
+SYMBOL(FOPEN_MAX, None, <stdio.h>)
+SYMBOL(FP_INFINITE, None, <math.h>)
+SYMBOL(FP_NAN, None, <math.h>)
+SYMBOL(FP_NORNAL, None, <math.h>)
+SYMBOL(FP_SUBNORMAL, None, <math.h>)
+SYMBOL(FP_ZERO, None, <math.h>)
+SYMBOL(HUGE_VAL, None, <math.h>)
+SYMBOL(HUGE_VALF, None, <math.h>)
+SYMBOL(HUGE_VALL, None, <math.h>)
+SYMBOL(I, None, <complex.h>)
+SYMBOL(INFINITY, None, <math.h>)
+SYMBOL(INT16_MAX, None, <stdint.h>)
+SYMBOL(INT16_MIN, None, <stdint.h>)
+SYMBOL(INT32_MAX, None, <stdint.h>)
+SYMBOL(INT32_MIN, None, <stdint.h>)
+SYMBOL(INT64_MAX, None, <stdint.h>)
+SYMBOL(INT64_MIN, None, <stdint.h>)
+SYMBOL(INT8_MAX, None, <stdint.h>)
+SYMBOL(INT8_MIN, None, <stdint.h>)
+SYMBOL(INTMAX_MAX, None, <stdint.h>)
+SYMBOL(INTMAX_MIN, None, <stdint.h>)
+SYMBOL(INTPTR_MAX, None, <stdint.h>)
+SYMBOL(INTPTR_MIN, None, <stdint.h>)
+SYMBOL(INT_FAST16_MAX, None, <stdint.h>)
+SYMBOL(INT_FAST16_MIN, None, <stdint.h>)
+SYMBOL(INT_FAST32_MAX, None, <stdint.h>)
+SYMBOL(INT_FAST32_MIN, None, <stdint.h>)
+SYMBOL(INT_FAST64_MAX, None, <stdint.h>)
+SYMBOL(INT_FAST64_MIN, None, <stdint.h>)
+SYMBOL(INT_FAST8_MAX, None, <stdint.h>)
+SYMBOL(INT_FAST8_MIN, None, <stdint.h>)
+SYMBOL(INT_LEAST16_MAX, None, <stdint.h>)
+SYMBOL(INT_LEAST16_MIN, None, <stdint.h>)
+SYMBOL(INT_LEAST32_MAX, None, <stdint.h>)
+SYMBOL(INT_LEAST32_MIN, None, <stdint.h>)
+SYMBOL(INT_LEAST64_MAX, None, <stdint.h>)
+SYMBOL(INT_LEAST64_MIN, None, <stdint.h>)
+SYMBOL(INT_LEAST8_MAX, None, <stdint.h>)
+SYMBOL(INT_LEAST8_MIN, None, <stdint.h>)
+SYMBOL(INT_MAX, None, <limits.h>)
+SYMBOL(INT_MIN, None, <limits.h>)
+SYMBOL(LC_ALL, None, <locale.h>)
+SYMBOL(LC_COLLATE, None, <locale.h>)
+SYMBOL(LC_CTYPE, None, <locale.h>)
+SYMBOL(LC_MONETARY, None, <locale.h>)
+SYMBOL(LC_NUMERIC, None, <locale.h>)
+SYMBOL(LC_TIME, None, <locale.h>)
+SYMBOL(LDBL_DECIMAL_DIG, None, <float.h>)
+SYMBOL(LDBL_DIG, None, <float.h>)
+SYMBOL(LDBL_EPSILON, None, <float.h>)
+SYMBOL(LDBL_HAS_SUBNORM, None, <float.h>)
+SYMBOL(LDBL_MANT_DIG, None, <float.h>)
+SYMBOL(LDBL_MAX, None, <float.h>)
+SYMBOL(LDBL_MAX_10_EXP, None, <float.h>)
+SYMBOL(LDBL_MAX_EXP, None, <float.h>)
+SYMBOL(LDBL_MIN, None, <float.h>)
+SYMBOL(LDBL_MIN_10_EXP, None, <float.h>)
+SYMBOL(LDBL_MIN_EXP, None, <float.h>)
+SYMBOL(LDBL_TRUE_MIN, None, <float.h>)
+SYMBOL(LLONG_MAX, None, <limits.h>)
+SYMBOL(LLONG_MIN, None, <limits.h>)
+SYMBOL(LONG_MAX, None, <limits.h>)
+SYMBOL(LONG_MIN, None, <limits.h>)
+SYMBOL(L_tmpnam, None, <stdio.h>)
+SYMBOL(L_tmpnam_s, None, <stdio.h>)
+SYMBOL(MATH_ERREXCEPT, None, <math.h>)
+SYMBOL(MATH_ERRNO, None, <math.h>)
+SYMBOL(MB_CUR_MAX, None, <stdlib.h>)
+SYMBOL(MB_LEN_MAX, None, <limits.h>)
+SYMBOL(NAN, None, <math.h>)
+SYMBOL(ONCE_FLAG_INIT, None, <threads.h>)
+SYMBOL(PTRDIFF_MAX, None, <stdint.h>)
+SYMBOL(PTRDIFF_MIN, None, <stdint.h>)
+SYMBOL(RAND_MAX, None, <stdlib.h>)
+SYMBOL(RSIZE_MAX, None, <stdint.h>)
+SYMBOL(SCHAR_MAX, None, <limits.h>)
+SYMBOL(SCHAR_MIN, None, <limits.h>)
+SYMBOL(SEEK_CUR, None, <stdio.h>)
+SYMBOL(SEEK_END, None, <stdio.h>)
+SYMBOL(SEEK_SET, None, <stdio.h>)
+SYMBOL(SHRT_MAX, None, <limits.h>)
+SYMBOL(SHRT_MIN, None, <limits.h>)
+SYMBOL(SIGABRT, None, <signal.h>)
+SYMBOL(SIGFPE, None, <signal.h>)
+SYMBOL(SIGILL, None, <signal.h>)
+SYMBOL(SIGINT, None, <signal.h>)
+SYMBOL(SIGSEGV, None, <signal.h>)
+SYMBOL(SIGTERM, None, <signal.h>)
+SYMBOL(SIG_ATOMIC_MAX, None, <stdint.h>)
+SYMBOL(SIG_ATOMIC_MIN, None, <stdint.h>)
+SYMBOL(SIG_DFL, None, <signal.h>)
+SYMBOL(SIG_ERR, None, <signal.h>)
+SYMBOL(SIG_IGN, None, <signal.h>)
+SYMBOL(SIZE_MAX, None, <stdint.h>)
+SYMBOL(TIME_UTC, None, <time.h>)
+SYMBOL(TMP_MAX, None, <stdio.h>)
+SYMBOL(TMP_MAX_S, None, <stdio.h>)
+SYMBOL(TSS_DTOR_ITERATIONS, None, <threads.h>)
+SYMBOL(UCHAR_MAX, None, <limits.h>)
+SYMBOL(UINT16_MAX, None, <stdint.h>)
+SYMBOL(UINT32_MAX, None, <stdint.h>)
+SYMBOL(UINT64_MAX, None, <stdint.h>)
+SYMBOL(UINT8_MAX, None, <stdint.h>)
+SYMBOL(UINTMAX_MAX, None, <stdint.h>)
+SYMBOL(UINTPTR_MAX, None, <stdint.h>)
+SYMBOL(UINT_FAST16_MAX, None, <stdint.h>)
+SYMBOL(UINT_FAST32_MAX, None, <stdint.h>)
+SYMBOL(UINT_FAST64_MAX, None, <stdint.h>)
+SYMBOL(UINT_FAST8_MAX, None, <stdint.h>)
+SYMBOL(UINT_LEAST16_MAX, None, <stdint.h>)
+SYMBOL(UINT_LEAST32_MAX, None, <stdint.h>)
+SYMBOL(UINT_LEAST64_MAX, None, <stdint.h>)
+SYMBOL(UINT_LEAST8_MAX, None, <stdint.h>)
+SYMBOL(UINT_MAX, None, <limits.h>)
+SYMBOL(ULLONG_MAX, None, <limits.h>)
+SYMBOL(ULONG_MAX, None, <limits.h>)
+SYMBOL(USHRT_MAX, None, <limits.h>)
+SYMBOL(WCHAR_MAX, None, <wchar.h>)
+SYMBOL(WCHAR_MIN, None, <wchar.h>)
+SYMBOL(WEOF, None, <wchar.h>)
+SYMBOL(WINT_MAX, None, <stdint.h>)
+SYMBOL(WINT_MIN, None, <stdint.h>)
+SYMBOL(_Complex_I, None, <complex.h>)
+SYMBOL(_IOFBF, None, <stdio.h>)
+SYMBOL(_IOLBF, None, <stdio.h>)
+SYMBOL(_IONBF, None, <stdio.h>)
+SYMBOL(_Imaginary_I, None, <complex.h>)
+SYMBOL(__alignas_is_defined, None, <stdalign.h>)
+SYMBOL(__alignof_is_defined, None, <stdalign.h>)
+SYMBOL(abort_handler_s, None, <stdlib.h>)
+SYMBOL(abs, None, <stdlib.h>)
+SYMBOL(acos, None, <math.h>)
+SYMBOL(acosf, None, <math.h>)
+SYMBOL(acosh, None, <math.h>)
+SYMBOL(acoshf, None, <math.h>)
+SYMBOL(acoshl, None, <math.h>)
+SYMBOL(acosl, None, <math.h>)
+SYMBOL(alignas, None, <stdalign.h>)
+SYMBOL(aligned_alloc, None, <stdlib.h>)
+SYMBOL(alignof, None, <stdalign.h>)
+SYMBOL(and, None, <iso646.h>)
+SYMBOL(and_eq, None, <iso646.h>)
+SYMBOL(asctime, None, <time.h>)
+SYMBOL(asctime_s, None, <time.h>)
+SYMBOL(asin, None, <math.h>)
+SYMBOL(asinf, None, <math.h>)
+SYMBOL(asinh, None, <math.h>)
+SYMBOL(asinhf, None, <math.h>)
+SYMBOL(asinhl, None, <math.h>)
+SYMBOL(asinl, None, <math.h>)
+SYMBOL(assert, None, <assert.h>)
+SYMBOL(at_quick_exit, None, <stdlib.h>)
+SYMBOL(atan, None, <math.h>)
+SYMBOL(atan2, None, <math.h>)
+SYMBOL(atan2f, None, <math.h>)
+SYMBOL(atan2l, None, <math.h>)
+SYMBOL(atanf, None, <math.h>)
+SYMBOL(atanh, None, <math.h>)
+SYMBOL(atanhf, None, <math.h>)
+SYMBOL(atanhl, None, <math.h>)
+SYMBOL(atanl, None, <math.h>)
+SYMBOL(atexit, None, <stdlib.h>)
+SYMBOL(atof, None, <stdlib.h>)
+SYMBOL(atoi, None, <stdlib.h>)
+SYMBOL(atol, None, <stdlib.h>)
+SYMBOL(atoll, None, <stdlib.h>)
+SYMBOL(atomic_bool, None, <stdatomic.h>)
+SYMBOL(atomic_char, None, <stdatomic.h>)
+SYMBOL(atomic_char16_t, None, <stdatomic.h>)
+SYMBOL(atomic_char32_t, None, <stdatomic.h>)
+SYMBOL(atomic_compare_exchange_strong, None, <stdatomic.h>)
+SYMBOL(atomic_compare_exchange_strong_explicit, None, <stdatomic.h>)
+SYMBOL(atomic_compare_exchange_weak, None, <stdatomic.h>)
+SYMBOL(atomic_compare_exchange_weak_explicit, None, <stdatomic.h>)
+SYMBOL(atomic_exchange, None, <stdatomic.h>)
+SYMBOL(atomic_exchange_explicit, None, <stdatomic.h>)
+SYMBOL(atomic_fetch_add, None, <stdatomic.h>)
+SYMBOL(atomic_fetch_add_explicit, None, <stdatomic.h>)
+SYMBOL(atomic_fetch_and, None, <stdatomic.h>)
+SYMBOL(atomic_fetch_and_explicit, None, <stdatomic.h>)
+SYMBOL(atomic_fetch_or, None, <stdatomic.h>)
+SYMBOL(atomic_fetch_or_explicit, None, <stdatomic.h>)
+SYMBOL(atomic_fetch_sub, None, <stdatomic.h>)
+SYMBOL(atomic_fetch_sub_explicit, None, <stdatomic.h>)
+SYMBOL(atomic_fetch_xor, None, <stdatomic.h>)
+SYMBOL(atomic_fetch_xor_explicit, None, <stdatomic.h>)
+SYMBOL(atomic_flag, None, <stdatomic.h>)
+SYMBOL(atomic_flag_clear, None, <stdatomic.h>)
+SYMBOL(atomic_flag_clear_explicit, None, <stdatomic.h>)
+SYMBOL(atomic_flag_test_and_set, None, <stdatomic.h>)
+SYMBOL(atomic_flag_test_and_set_explicit, None, <stdatomic.h>)
+SYMBOL(atomic_init, None, <stdatomic.h>)
+SYMBOL(atomic_int, None, <stdatomic.h>)
+SYMBOL(atomic_int_fast16_t, None, <stdatomic.h>)
+SYMBOL(atomic_int_fast32_t, None, <stdatomic.h>)
+SYMBOL(atomic_int_fast64_t, None, <stdatomic.h>)
+SYMBOL(atomic_int_fast8_t, None, <stdatomic.h>)
+SYMBOL(atomic_int_least16_t, None, <stdatomic.h>)
+SYMBOL(atomic_int_least32_t, None, <stdatomic.h>)
+SYMBOL(atomic_int_least64_t, None, <stdatomic.h>)
+SYMBOL(atomic_int_least8_t, None, <stdatomic.h>)
+SYMBOL(atomic_intmax_t, None, <stdatomic.h>)
+SYMBOL(atomic_intptr_t, None, <stdatomic.h>)
+SYMBOL(atomic_is_lock_free, None, <stdatomic.h>)
+SYMBOL(atomic_llong, None, <stdatomic.h>)
+SYMBOL(atomic_load, None, <stdatomic.h>)
+SYMBOL(atomic_load_explicit, None, <stdatomic.h>)
+SYMBOL(atomic_long, None, <stdatomic.h>)
+SYMBOL(atomic_ptrdiff_t, None, <stdatomic.h>)
+SYMBOL(atomic_schar, None, <stdatomic.h>)
+SYMBOL(atomic_short, None, <stdatomic.h>)
+SYMBOL(atomic_signal_fence, None, <stdatomic.h>)
+SYMBOL(atomic_size_t, None, <stdatomic.h>)
+SYMBOL(atomic_store, None, <stdatomic.h>)
+SYMBOL(atomic_store_explicit, None, <stdatomic.h>)
+SYMBOL(atomic_thread_fence, None, <stdatomic.h>)
+SYMBOL(atomic_uchar, None, <stdatomic.h>)
+SYMBOL(atomic_uint, None, <stdatomic.h>)
+SYMBOL(atomic_uint_fast16_t, None, <stdatomic.h>)
+SYMBOL(atomic_uint_fast32_t, None, <stdatomic.h>)
+SYMBOL(atomic_uint_fast64_t, None, <stdatomic.h>)
+SYMBOL(atomic_uint_fast8_t, None, <stdatomic.h>)
+SYMBOL(atomic_uint_least16_t, None, <stdatomic.h>)
+SYMBOL(atomic_uint_least32_t, None, <stdatomic.h>)
+SYMBOL(atomic_uint_least64_t, None, <stdatomic.h>)
+SYMBOL(atomic_uint_least8_t, None, <stdatomic.h>)
+SYMBOL(atomic_uintmax_t, None, <stdatomic.h>)
+SYMBOL(atomic_uintptr_t, None, <stdatomic.h>)
+SYMBOL(atomic_ullong, None, <stdatomic.h>)
+SYMBOL(atomic_ulong, None, <stdatomic.h>)
+SYMBOL(atomic_ushort, None, <stdatomic.h>)
+SYMBOL(atomic_wchar_t, None, <stdatomic.h>)
+SYMBOL(bitand, None, <iso646.h>)
+SYMBOL(bitor, None, <iso646.h>)
+SYMBOL(bsearch, None, <stdlib.h>)
+SYMBOL(bsearch_s, None, <stdlib.h>)
+SYMBOL(btowc, None, <wchar.h>)
+SYMBOL(c16rtomb, None, <uchar.h>)
+SYMBOL(c32rtomb, None, <uchar.h>)
+SYMBOL(cabs, None, <complex.h>)
+SYMBOL(cabsf, None, <complex.h>)
+SYMBOL(cabsl, None, <complex.h>)
+SYMBOL(cacos, None, <complex.h>)
+SYMBOL(cacosf, None, <complex.h>)
+SYMBOL(cacosh, None, <complex.h>)
+SYMBOL(cacoshf, None, <complex.h>)
+SYMBOL(cacoshl, None, <complex.h>)
+SYMBOL(cacosl, None, <complex.h>)
+SYMBOL(call_once, None, <threads.h>)
+SYMBOL(calloc, None, <stdlib.h>)
+SYMBOL(carg, None, <complex.h>)
+SYMBOL(cargf, None, <complex.h>)
+SYMBOL(cargl, None, <complex.h>)
+SYMBOL(casin, None, <complex.h>)
+SYMBOL(casinf, None, <complex.h>)
+SYMBOL(casinh, None, <complex.h>)
+SYMBOL(casinhf, None, <complex.h>)
+SYMBOL(casinhl, None, <complex.h>)
+SYMBOL(casinl, None, <complex.h>)
+SYMBOL(catan, None, <complex.h>)
+SYMBOL(catanf, None, <complex.h>)
+SYMBOL(catanh, None, <complex.h>)
+SYMBOL(catanhf, None, <complex.h>)
+SYMBOL(catanhl, None, <complex.h>)
+SYMBOL(catanl, None, <complex.h>)
+SYMBOL(cbrt, None, <math.h>)
+SYMBOL(cbrtf, None, <math.h>)
+SYMBOL(cbrtl, None, <math.h>)
+SYMBOL(ccos, None, <complex.h>)
+SYMBOL(ccosf, None, <complex.h>)
+SYMBOL(ccosh, None, <complex.h>)
+SYMBOL(ccoshf, None, <complex.h>)
+SYMBOL(ccoshl, None, <complex.h>)
+SYMBOL(ccosl, None, <complex.h>)
+SYMBOL(ceil, None, <math.h>)
+SYMBOL(ceilf, None, <math.h>)
+SYMBOL(ceill, None, <math.h>)
+SYMBOL(cexp, None, <complex.h>)
+SYMBOL(cexpf, None, <complex.h>)
+SYMBOL(cexpl, None, <complex.h>)
+SYMBOL(char16_t, None, <uchar.h>)
+SYMBOL(char32_t, None, <uchar.h>)
+SYMBOL(cimag, None, <complex.h>)
+SYMBOL(cimagf, None, <complex.h>)
+SYMBOL(cimagl, None, <complex.h>)
+SYMBOL(clearerr, None, <stdio.h>)
+SYMBOL(clock, None, <time.h>)
+SYMBOL(clock_t, None, <time.h>)
+SYMBOL(clog, None, <complex.h>)
+SYMBOL(clogf, None, <complex.h>)
+SYMBOL(clogl, None, <complex.h>)
+SYMBOL(cnd_broadcast, None, <threads.h>)
+SYMBOL(cnd_destroy, None, <threads.h>)
+SYMBOL(cnd_init, None, <threads.h>)
+SYMBOL(cnd_signal, None, <threads.h>)
+SYMBOL(cnd_t, None, <threads.h>)
+SYMBOL(cnd_timedwait, None, <threads.h>)
+SYMBOL(cnd_wait, None, <threads.h>)
+SYMBOL(compl, None, <iso646.h>)
+SYMBOL(complex, None, <complex.h>)
+SYMBOL(conj, None, <complex.h>)
+SYMBOL(conjf, None, <complex.h>)
+SYMBOL(conjl, None, <complex.h>)
+SYMBOL(constraint_handler_t, None, <stdlib.h>)
+SYMBOL(copysign, None, <math.h>)
+SYMBOL(copysignf, None, <math.h>)
+SYMBOL(copysignl, None, <math.h>)
+SYMBOL(cos, None, <math.h>)
+SYMBOL(cosf, None, <math.h>)
+SYMBOL(cosh, None, <math.h>)
+SYMBOL(coshf, None, <math.h>)
+SYMBOL(coshl, None, <math.h>)
+SYMBOL(cosl, None, <math.h>)
+SYMBOL(cpow, None, <complex.h>)
+SYMBOL(cpowf, None, <complex.h>)
+SYMBOL(cpowl, None, <complex.h>)
+SYMBOL(cproj, None, <complex.h>)
+SYMBOL(cprojf, None, <complex.h>)
+SYMBOL(cprojl, None, <complex.h>)
+SYMBOL(creal, None, <complex.h>)
+SYMBOL(crealf, None, <complex.h>)
+SYMBOL(creall, None, <complex.h>)
+SYMBOL(csin, None, <complex.h>)
+SYMBOL(csinf, None, <complex.h>)
+SYMBOL(csinh, None, <complex.h>)
+SYMBOL(csinhf, None, <complex.h>)
+SYMBOL(csinhl, None, <complex.h>)
+SYMBOL(csinl, None, <complex.h>)
+SYMBOL(csqrt, None, <complex.h>)
+SYMBOL(csqrtf, None, <complex.h>)
+SYMBOL(csqrtl, None, <complex.h>)
+SYMBOL(ctan, None, <complex.h>)
+SYMBOL(ctanf, None, <complex.h>)
+SYMBOL(ctanh, None, <complex.h>)
+SYMBOL(ctanhf, None, <complex.h>)
+SYMBOL(ctanhl, None, <complex.h>)
+SYMBOL(ctanl, None, <complex.h>)
+SYMBOL(ctime, None, <time.h>)
+SYMBOL(ctime_s, None, <time.h>)
+SYMBOL(difftime, None, <time.h>)
+SYMBOL(double_t, None, <math.h>)
+SYMBOL(erf, None, <math.h>)
+SYMBOL(erfc, None, <math.h>)
+SYMBOL(erfcf, None, <math.h>)
+SYMBOL(erfcl, None, <math.h>)
+SYMBOL(erff, None, <math.h>)
+SYMBOL(erfl, None, <math.h>)
+SYMBOL(errno, None, <errno.h>)
+SYMBOL(exit, None, <stdlib.h>)
+SYMBOL(exp, None, <math.h>)
+SYMBOL(exp2, None, <math.h>)
+SYMBOL(exp2f, None, <math.h>)
+SYMBOL(exp2l, None, <math.h>)
+SYMBOL(expf, None, <math.h>)
+SYMBOL(expl, None, <math.h>)
+SYMBOL(expm1, None, <math.h>)
+SYMBOL(expm1f, None, <math.h>)
+SYMBOL(expm1l, None, <math.h>)
+SYMBOL(fabs, None, <math.h>)
+SYMBOL(fabsf, None, <math.h>)
+SYMBOL(fabsl, None, <math.h>)
+SYMBOL(fclose, None, <stdio.h>)
+SYMBOL(fdim, None, <math.h>)
+SYMBOL(fdimf, None, <math.h>)
+SYMBOL(fdiml, None, <math.h>)
+SYMBOL(feclearexcept, None, <fenv.h>)
+SYMBOL(fegetenv, None, <fenv.h>)
+SYMBOL(fegetexceptflag, None, <fenv.h>)
+SYMBOL(fegetround, None, <fenv.h>)
+SYMBOL(feholdexcept, None, <fenv.h>)
+SYMBOL(fenv_t, None, <fenv.h>)
+SYMBOL(feof, None, <stdio.h>)
+SYMBOL(feraiseexcept, None, <fenv.h>)
+SYMBOL(ferror, None, <stdio.h>)
+SYMBOL(fesetenv, None, <fenv.h>)
+SYMBOL(fesetexceptflag, None, <fenv.h>)
+SYMBOL(fesetround, None, <fenv.h>)
+SYMBOL(fetestexcept, None, <fenv.h>)
+SYMBOL(feupdateenv, None, <fenv.h>)
+SYMBOL(fexcept_t, None, <fenv.h>)
+SYMBOL(fflush, None, <stdio.h>)
+SYMBOL(fgetc, None, <stdio.h>)
+SYMBOL(fgetpos, None, <stdio.h>)
+SYMBOL(fgets, None, <stdio.h>)
+SYMBOL(fgetwc, None, <wchar.h>)
+SYMBOL(fgetws, None, <wchar.h>)
+SYMBOL(float_t, None, <math.h>)
+SYMBOL(floor, None, <math.h>)
+SYMBOL(floorf, None, <math.h>)
+SYMBOL(floorl, None, <math.h>)
+SYMBOL(fma, None, <math.h>)
+SYMBOL(fmaf, None, <math.h>)
+SYMBOL(fmal, None, <math.h>)
+SYMBOL(fmax, None, <math.h>)
+SYMBOL(fmaxf, None, <math.h>)
+SYMBOL(fmaxl, None, <math.h>)
+SYMBOL(fmin, None, <math.h>)
+SYMBOL(fminf, None, <math.h>)
+SYMBOL(fminl, None, <math.h>)
+SYMBOL(fmod, None, <math.h>)
+SYMBOL(fmodf, None, <math.h>)
+SYMBOL(fmodl, None, <math.h>)
+SYMBOL(fopen, None, <stdio.h>)
+SYMBOL(fopen_s, None, <stdio.h>)
+SYMBOL(fpclassify, None, <math.h>)
+SYMBOL(fpos_t, None, <stdio.h>)
+SYMBOL(fprintf, None, <stdio.h>)
+SYMBOL(fprintf_s, None, <stdio.h>)
+SYMBOL(fputc, None, <stdio.h>)
+SYMBOL(fputs, None, <stdio.h>)
+SYMBOL(fputwc, None, <wchar.h>)
+SYMBOL(fputws, None, <wchar.h>)
+SYMBOL(fread, None, <stdio.h>)
+SYMBOL(free, None, <stdlib.h>)
+SYMBOL(freopen, None, <stdio.h>)
+SYMBOL(freopen_s, None, <stdio.h>)
+SYMBOL(frexp, None, <math.h>)
+SYMBOL(frexpf, None, <math.h>)
+SYMBOL(frexpl, None, <math.h>)
+SYMBOL(fscanf, None, <stdio.h>)
+SYMBOL(fscanf_s, None, <stdio.h>)
+SYMBOL(fseek, None, <stdio.h>)
+SYMBOL(fsetpos, None, <stdio.h>)
+SYMBOL(ftell, None, <stdio.h>)
+SYMBOL(fwide, None, <wchar.h>)
+SYMBOL(fwprintf, None, <wchar.h>)
+SYMBOL(fwprintf_s, None, <wchar.h>)
+SYMBOL(fwrite, None, <stdio.h>)
+SYMBOL(fwscanf, None, <wchar.h>)
+SYMBOL(fwscanf_s, None, <wchar.h>)
+SYMBOL(getc, None, <stdio.h>)
+SYMBOL(getchar, None, <stdio.h>)
+SYMBOL(getenv, None, <stdlib.h>)
+SYMBOL(getenv_s, None, <stdlib.h>)
+SYMBOL(gets, None, <stdio.h>)
+SYMBOL(gets_s, None, <stdio.h>)
+SYMBOL(getwc, None, <wchar.h>)
+SYMBOL(getwchar, None, <wchar.h>)
+SYMBOL(gmtime, None, <time.h>)
+SYMBOL(gmtime_s, None, <time.h>)
+SYMBOL(hypot, None, <math.h>)
+SYMBOL(hypotf, None, <math.h>)
+SYMBOL(hypotl, None, <math.h>)
+SYMBOL(ignore_handler_s, None, <stdlib.h>)
+SYMBOL(ilogb, None, <math.h>)
+SYMBOL(ilogbf, None, <math.h>)
+SYMBOL(ilogbl, None, <math.h>)
+SYMBOL(imaginary, None, <complex.h>)
+SYMBOL(imaxabs, None, <inttypes.h>)
+SYMBOL(int16_t, None, <stdint.h>)
+SYMBOL(int32_t, None, <stdint.h>)
+SYMBOL(int64_t, None, <stdint.h>)
+SYMBOL(int8_t, None, <stdint.h>)
+SYMBOL(int_fast16_t, None, <stdint.h>)
+SYMBOL(int_fast32_t, None, <stdint.h>)
+SYMBOL(int_fast64_t, None, <stdint.h>)
+SYMBOL(int_fast8_t, None, <stdint.h>)
+SYMBOL(int_least16_t, None, <stdint.h>)
+SYMBOL(int_least32_t, None, <stdint.h>)
+SYMBOL(int_least64_t, None, <stdint.h>)
+SYMBOL(int_least8_t, None, <stdint.h>)
+SYMBOL(intmax_t, None, <stdint.h>)
+SYMBOL(intptr_t, None, <stdint.h>)
+SYMBOL(isalnum, None, <ctype.h>)
+SYMBOL(isalpha, None, <ctype.h>)
+SYMBOL(isblank, None, <ctype.h>)
+SYMBOL(iscntrl, None, <ctype.h>)
+SYMBOL(isdigit, None, <ctype.h>)
+SYMBOL(isfinite, None, <math.h>)
+SYMBOL(isgraph, None, <ctype.h>)
+SYMBOL(isgreater, None, <math.h>)
+SYMBOL(isgreaterequal, None, <math.h>)
+SYMBOL(isinf, None, <math.h>)
+SYMBOL(isless, None, <math.h>)
+SYMBOL(islessequal, None, <math.h>)
+SYMBOL(islessgreater, None, <math.h>)
+SYMBOL(islower, None, <ctype.h>)
+SYMBOL(isnan, None, <math.h>)
+SYMBOL(isnormal, None, <math.h>)
+SYMBOL(isprint, None, <ctype.h>)
+SYMBOL(ispunct, None, <ctype.h>)
+SYMBOL(isspace, None, <ctype.h>)
+SYMBOL(isunordered, None, <math.h>)
+SYMBOL(isupper, None, <ctype.h>)
+SYMBOL(iswalnum, None, <wctype.h>)
+SYMBOL(iswalpha, None, <wctype.h>)
+SYMBOL(iswblank, None, <wctype.h>)
+SYMBOL(iswcntrl, None, <wctype.h>)
+SYMBOL(iswctype, None, <wctype.h>)
+SYMBOL(iswdigit, None, <wctype.h>)
+SYMBOL(iswgraph, None, <wctype.h>)
+SYMBOL(iswlower, None, <wctype.h>)
+SYMBOL(iswprint, None, <wctype.h>)
+SYMBOL(iswpunct, None, <wctype.h>)
+SYMBOL(iswspace, None, <wctype.h>)
+SYMBOL(iswupper, None, <wctype.h>)
+SYMBOL(iswxdigit, None, <wctype.h>)
+SYMBOL(isxdigit, None, <ctype.h>)
+SYMBOL(jmp_buf, None, <setjmp.h>)
+SYMBOL(kill_dependency, None, <stdatomic.h>)
+SYMBOL(labs, None, <stdlib.h>)
+SYMBOL(lconv, None, <locale.h>)
+SYMBOL(ldexp, None, <math.h>)
+SYMBOL(ldexpf, None, <math.h>)
+SYMBOL(ldexpl, None, <math.h>)
+SYMBOL(lgamma, None, <math.h>)
+SYMBOL(lgammaf, None, <math.h>)
+SYMBOL(lgammal, None, <math.h>)
+SYMBOL(llabs, None, <stdlib.h>)
+SYMBOL(llrint, None, <math.h>)
+SYMBOL(llrintf, None, <math.h>)
+SYMBOL(llrintl, None, <math.h>)
+SYMBOL(llround, None, <math.h>)
+SYMBOL(llroundf, None, <math.h>)
+SYMBOL(llroundl, None, <math.h>)
+SYMBOL(localeconv, None, <locale.h>)
+SYMBOL(localtime, None, <time.h>)
+SYMBOL(localtime_s, None, <time.h>)
+SYMBOL(log, None, <math.h>)
+SYMBOL(log10, None, <math.h>)
+SYMBOL(log10f, None, <math.h>)
+SYMBOL(log10l, None, <math.h>)
+SYMBOL(log1p, None, <math.h>)
+SYMBOL(log1pf, None, <math.h>)
+SYMBOL(log1pl, None, <math.h>)
+SYMBOL(log2, None, <math.h>)
+SYMBOL(log2f, None, <math.h>)
+SYMBOL(log2l, None, <math.h>)
+SYMBOL(logb, None, <math.h>)
+SYMBOL(logbf, None, <math.h>)
+SYMBOL(logbl, None, <math.h>)
+SYMBOL(logf, None, <math.h>)
+SYMBOL(logl, None, <math.h>)
+SYMBOL(longjmp, None, <setjmp.h>)
+SYMBOL(lrint, None, <math.h>)
+SYMBOL(lrintf, None, <math.h>)
+SYMBOL(lrintl, None, <math.h>)
+SYMBOL(lround, None, <math.h>)
+SYMBOL(lroundf, None, <math.h>)
+SYMBOL(lroundl, None, <math.h>)
+SYMBOL(malloc, None, <stdlib.h>)
+SYMBOL(math_errhandling, None, <math.h>)
+SYMBOL(max_align_t, None, <stddef.h>)
+SYMBOL(mblen, None, <stdlib.h>)
+SYMBOL(mbrlen, None, <wchar.h>)
+SYMBOL(mbrtoc16, None, <uchar.h>)
+SYMBOL(mbrtoc32, None, <uchar.h>)
+SYMBOL(mbrtowc, None, <wchar.h>)
+SYMBOL(mbsinit, None, <wchar.h>)
+SYMBOL(mbsrtowcs, None, <wchar.h>)
+SYMBOL(mbsrtowcs_s, None, <wchar.h>)
+SYMBOL(mbstowcs, None, <stdlib.h>)
+SYMBOL(mbstowcs_s, None, <stdlib.h>)
+SYMBOL(mbtowc, None, <stdlib.h>)
+SYMBOL(memchr, None, <string.h>)
+SYMBOL(memcmp, None, <string.h>)
+SYMBOL(memcpy, None, <string.h>)
+SYMBOL(memcpy_s, None, <string.h>)
+SYMBOL(memmove, None, <string.h>)
+SYMBOL(memmove_s, None, <string.h>)
+SYMBOL(memory_order, None, <stdatomic.h>)
+SYMBOL(memory_order_acq_rel, None, <stdatomic.h>)
+SYMBOL(memory_order_acquire, None, <stdatomic.h>)
+SYMBOL(memory_order_consume, None, <stdatomic.h>)
+SYMBOL(memory_order_relaxed, None, <stdatomic.h>)
+SYMBOL(memory_order_release, None, <stdatomic.h>)
+SYMBOL(memory_order_seq_cst, None, <stdatomic.h>)
+SYMBOL(memset, None, <string.h>)
+SYMBOL(memset_s, None, <string.h>)
+SYMBOL(mktime, None, <time.h>)
+SYMBOL(modf, None, <math.h>)
+SYMBOL(modff, None, <math.h>)
+SYMBOL(modfl, None, <math.h>)
+SYMBOL(mtx_destroy, None, <threads.h>)
+SYMBOL(mtx_init, None, <threads.h>)
+SYMBOL(mtx_lock, None, <threads.h>)
+SYMBOL(mtx_plain, None, <threads.h>)
+SYMBOL(mtx_recursive, None, <threads.h>)
+SYMBOL(mtx_t, None, <threads.h>)
+SYMBOL(mtx_timed, None, <threads.h>)
+SYMBOL(mtx_timedlock, None, <threads.h>)
+SYMBOL(mtx_trylock, None, <threads.h>)
+SYMBOL(mtx_unlock, None, <threads.h>)
+SYMBOL(nan, None, <math.h>)
+SYMBOL(nanf, None, <math.h>)
+SYMBOL(nanl, None, <math.h>)
+SYMBOL(nearbyint, None, <math.h>)
+SYMBOL(nearbyintf, None, <math.h>)
+SYMBOL(nearbyintl, None, <math.h>)
+SYMBOL(nextafter, None, <math.h>)
+SYMBOL(nextafterf, None, <math.h>)
+SYMBOL(nextafterl, None, <math.h>)
+SYMBOL(nexttoward, None, <math.h>)
+SYMBOL(nexttowardf, None, <math.h>)
+SYMBOL(nexttowardl, None, <math.h>)
+SYMBOL(noreturn, None, <stdnoreturn.h>)
+SYMBOL(not, None, <iso646.h>)
+SYMBOL(not_eq, None, <iso646.h>)
+SYMBOL(offsetof, None, <stddef.h>)
+SYMBOL(once_flag, None, <threads.h>)
+SYMBOL(or, None, <iso646.h>)
+SYMBOL(or_eq, None, <iso646.h>)
+SYMBOL(perror, None, <stdio.h>)
+SYMBOL(pow, None, <math.h>)
+SYMBOL(powf, None, <math.h>)
+SYMBOL(powl, None, <math.h>)
+SYMBOL(printf, None, <stdio.h>)
+SYMBOL(printf_s, None, <stdio.h>)
+SYMBOL(ptrdiff_t, None, <stddef.h>)
+SYMBOL(putc, None, <stdio.h>)
+SYMBOL(putchar, None, <stdio.h>)
+SYMBOL(puts, None, <stdio.h>)
+SYMBOL(putwc, None, <wchar.h>)
+SYMBOL(putwchar, None, <wchar.h>)
+SYMBOL(qsort, None, <stdlib.h>)
+SYMBOL(qsort_s, None, <stdlib.h>)
+SYMBOL(quick_exit, None, <stdlib.h>)
+SYMBOL(raise, None, <signal.h>)
+SYMBOL(rand, None, <stdlib.h>)
+SYMBOL(realloc, None, <stdlib.h>)
+SYMBOL(remainder, None, <math.h>)
+SYMBOL(remainderf, None, <math.h>)
+SYMBOL(remainderl, None, <math.h>)
+SYMBOL(remove, None, <stdio.h>)
+SYMBOL(remquo, None, <math.h>)
+SYMBOL(remquof, None, <math.h>)
+SYMBOL(remquol, None, <math.h>)
+SYMBOL(rename, None, <stdio.h>)
+SYMBOL(rewind, None, <stdio.h>)
+SYMBOL(rint, None, <math.h>)
+SYMBOL(rintf, None, <math.h>)
+SYMBOL(rintl, None, <math.h>)
+SYMBOL(round, None, <math.h>)
+SYMBOL(roundf, None, <math.h>)
+SYMBOL(roundl, None, <math.h>)
+SYMBOL(rsize_t, None, <stddef.h>)
+SYMBOL(scalbln, None, <math.h>)
+SYMBOL(scalblnf, None, <math.h>)
+SYMBOL(scalblnl, None, <math.h>)
+SYMBOL(scalbn, None, <math.h>)
+SYMBOL(scalbnf, None, <math.h>)
+SYMBOL(scalbnl, None, <math.h>)
+SYMBOL(scanf, None, <stdio.h>)
+SYMBOL(scanf_s, None, <stdio.h>)
+SYMBOL(set_constraint_handler_s, None, <stdlib.h>)
+SYMBOL(setbuf, None, <stdio.h>)
+SYMBOL(setjmp, None, <setjmp.h>)
+SYMBOL(setlocale, None, <locale.h>)
+SYMBOL(setvbuf, None, <stdio.h>)
+SYMBOL(sig_atomic_t, None, <signal.h>)
+SYMBOL(signal, None, <signal.h>)
+SYMBOL(signbit, None, <math.h>)
+SYMBOL(sin, None, <math.h>)
+SYMBOL(sinf, None, <math.h>)
+SYMBOL(sinh, None, <math.h>)
+SYMBOL(sinhf, None, <math.h>)
+SYMBOL(sinhl, None, <math.h>)
+SYMBOL(sinl, None, <math.h>)
+SYMBOL(snprintf, None, <stdio.h>)
+SYMBOL(snprintf_s, None, <stdio.h>)
+SYMBOL(snwprintf_s, None, <wchar.h>)
+SYMBOL(sprintf, None, <stdio.h>)
+SYMBOL(sprintf_s, None, <stdio.h>)
+SYMBOL(sqrt, None, <math.h>)
+SYMBOL(sqrtf, None, <math.h>)
+SYMBOL(sqrtl, None, <math.h>)
+SYMBOL(srand, None, <stdlib.h>)
+SYMBOL(sscanf, None, <stdio.h>)
+SYMBOL(sscanf_s, None, <stdio.h>)
+SYMBOL(static_assert, None, <assert.h>)
+SYMBOL(stderr, None, <stdio.h>)
+SYMBOL(stdin, None, <stdio.h>)
+SYMBOL(stdout, None, <stdio.h>)
+SYMBOL(strcat, None, <string.h>)
+SYMBOL(strcat_s, None, <string.h>)
+SYMBOL(strchr, None, <string.h>)
+SYMBOL(strcmp, None, <string.h>)
+SYMBOL(strcoll, None, <string.h>)
+SYMBOL(strcpy, None, <string.h>)
+SYMBOL(strcpy_s, None, <string.h>)
+SYMBOL(strcspn, None, <string.h>)
+SYMBOL(strerror, None, <string.h>)
+SYMBOL(strerror_s, None, <string.h>)
+SYMBOL(strerrorlen_s, None, <string.h>)
+SYMBOL(strftime, None, <time.h>)
+SYMBOL(strlen, None, <string.h>)
+SYMBOL(strncat, None, <string.h>)
+SYMBOL(strncat_s, None, <string.h>)
+SYMBOL(strncmp, None, <string.h>)
+SYMBOL(strncpy, None, <string.h>)
+SYMBOL(strncpy_s, None, <string.h>)
+SYMBOL(strnlen_s, None, <string.h>)
+SYMBOL(strpbrk, None, <string.h>)
+SYMBOL(strrchr, None, <string.h>)
+SYMBOL(strspn, None, <string.h>)
+SYMBOL(strstr, None, <string.h>)
+SYMBOL(strtod, None, <stdlib.h>)
+SYMBOL(strtof, None, <stdlib.h>)
+SYMBOL(strtoimax, None, <inttypes.h>)
+SYMBOL(strtok, None, <string.h>)
+SYMBOL(strtok_s, None, <string.h>)
+SYMBOL(strtol, None, <stdlib.h>)
+SYMBOL(strtold, None, <stdlib.h>)
+SYMBOL(strtoll, None, <stdlib.h>)
+SYMBOL(strtoul, None, <stdlib.h>)
+SYMBOL(strtoull, None, <stdlib.h>)
+SYMBOL(strtoumax, None, <inttypes.h>)
+SYMBOL(strxfrm, None, <string.h>)
+SYMBOL(swprintf, None, <wchar.h>)
+SYMBOL(swprintf_s, None, <wchar.h>)
+SYMBOL(swscanf, None, <wchar.h>)
+SYMBOL(swscanf_s, None, <wchar.h>)
+SYMBOL(system, None, <stdlib.h>)
+SYMBOL(tan, None, <math.h>)
+SYMBOL(tanf, None, <math.h>)
+SYMBOL(tanh, None, <math.h>)
+SYMBOL(tanhf, None, <math.h>)
+SYMBOL(tanhl, None, <math.h>)
+SYMBOL(tanl, None, <math.h>)
+SYMBOL(tgamma, None, <math.h>)
+SYMBOL(tgammaf, None, <math.h>)
+SYMBOL(tgammal, None, <math.h>)
+SYMBOL(thrd_busy, None, <threads.h>)
+SYMBOL(thrd_create, None, <threads.h>)
+SYMBOL(thrd_current, None, <threads.h>)
+SYMBOL(thrd_detach, None, <threads.h>)
+SYMBOL(thrd_equal, None, <threads.h>)
+SYMBOL(thrd_error, None, <threads.h>)
+SYMBOL(thrd_join, None, <threads.h>)
+SYMBOL(thrd_nomem, None, <threads.h>)
+SYMBOL(thrd_sleep, None, <threads.h>)
+SYMBOL(thrd_start_t, None, <threads.h>)
+SYMBOL(thrd_success, None, <threads.h>)
+SYMBOL(thrd_t, None, <threads.h>)
+SYMBOL(thrd_timedout, None, <threads.h>)
+SYMBOL(thrd_yield, None, <threads.h>)
+SYMBOL(thread_local, None, <threads.h>)
+SYMBOL(time, None, <time.h>)
+SYMBOL(time_t, None, <time.h>)
+SYMBOL(timespec, None, <time.h>)
+SYMBOL(timespec_get, None, <time.h>)
+SYMBOL(tm, None, <time.h>)
+SYMBOL(tmpfile, None, <stdio.h>)
+SYMBOL(tmpfile_s, None, <stdio.h>)
+SYMBOL(tmpnam, None, <stdio.h>)
+SYMBOL(tmpnam_s, None, <stdio.h>)
+SYMBOL(tolower, None, <ctype.h>)
+SYMBOL(toupper, None, <ctype.h>)
+SYMBOL(towctrans, None, <wctype.h>)
+SYMBOL(towlower, None, <wctype.h>)
+SYMBOL(towupper, None, <wctype.h>)
+SYMBOL(trunc, None, <math.h>)
+SYMBOL(truncf, None, <math.h>)
+SYMBOL(truncl, None, <math.h>)
+SYMBOL(tss_create, None, <threads.h>)
+SYMBOL(tss_delete, None, <threads.h>)
+SYMBOL(tss_dtor_t, None, <threads.h>)
+SYMBOL(tss_get, None, <threads.h>)
+SYMBOL(tss_set, None, <threads.h>)
+SYMBOL(tss_t, None, <threads.h>)
+SYMBOL(uint16_t, None, <stdint.h>)
+SYMBOL(uint32_t, None, <stdint.h>)
+SYMBOL(uint64_t, None, <stdint.h>)
+SYMBOL(uint8_t, None, <stdint.h>)
+SYMBOL(uint_fast16_t, None, <stdint.h>)
+SYMBOL(uint_fast32_t, None, <stdint.h>)
+SYMBOL(uint_fast64_t, None, <stdint.h>)
+SYMBOL(uint_fast8_t, None, <stdint.h>)
+SYMBOL(uint_least16_t, None, <stdint.h>)
+SYMBOL(uint_least32_t, None, <stdint.h>)
+SYMBOL(uint_least64_t, None, <stdint.h>)
+SYMBOL(uint_least8_t, None, <stdint.h>)
+SYMBOL(uintmax_t, None, <stdint.h>)
+SYMBOL(uintptr_t, None, <stdint.h>)
+SYMBOL(ungetc, None, <stdio.h>)
+SYMBOL(ungetwc, None, <wchar.h>)
+SYMBOL(va_arg, None, <stdarg.h>)
+SYMBOL(va_copy, None, <stdarg.h>)
+SYMBOL(va_end, None, <stdarg.h>)
+SYMBOL(va_start, None, <stdarg.h>)
+SYMBOL(vfprintf, None, <stdio.h>)
+SYMBOL(vfprintf_s, None, <stdio.h>)
+SYMBOL(vfscanf, None, <stdio.h>)
+SYMBOL(vfscanf_s, None, <stdio.h>)
+SYMBOL(vfwprintf, None, <wchar.h>)
+SYMBOL(vfwprintf_s, None, <wchar.h>)
+SYMBOL(vfwscanf, None, <wchar.h>)
+SYMBOL(vfwscanf_s, None, <wchar.h>)
+SYMBOL(vprintf, None, <stdio.h>)
+SYMBOL(vprintf_s, None, <stdio.h>)
+SYMBOL(vscanf, None, <stdio.h>)
+SYMBOL(vscanf_s, None, <stdio.h>)
+SYMBOL(vsnprintf, None, <stdio.h>)
+SYMBOL(vsnprintf_s, None, <stdio.h>)
+SYMBOL(vsnwprintf_s, None, <wchar.h>)
+SYMBOL(vsprintf, None, <stdio.h>)
+SYMBOL(vsprintf_s, None, <stdio.h>)
+SYMBOL(vsscanf, None, <stdio.h>)
+SYMBOL(vsscanf_s, None, <stdio.h>)
+SYMBOL(vswprintf, None, <wchar.h>)
+SYMBOL(vswprintf_s, None, <wchar.h>)
+SYMBOL(vswscanf, None, <wchar.h>)
+SYMBOL(vswscanf_s, None, <wchar.h>)
+SYMBOL(vwprintf, None, <wchar.h>)
+SYMBOL(vwprintf_s, None, <wchar.h>)
+SYMBOL(vwscanf, None, <wchar.h>)
+SYMBOL(vwscanf_s, None, <wchar.h>)
+SYMBOL(wchar_t, None, <wchar.h>)
+SYMBOL(wcrtomb, None, <wchar.h>)
+SYMBOL(wcrtomb_s, None, <wchar.h>)
+SYMBOL(wcscat, None, <wchar.h>)
+SYMBOL(wcscat_s, None, <wchar.h>)
+SYMBOL(wcschr, None, <wchar.h>)
+SYMBOL(wcscmp, None, <wchar.h>)
+SYMBOL(wcscoll, None, <wchar.h>)
+SYMBOL(wcscpy, None, <wchar.h>)
+SYMBOL(wcscpy_s, None, <wchar.h>)
+SYMBOL(wcscspn, None, <wchar.h>)
+SYMBOL(wcsftime, None, <wchar.h>)
+SYMBOL(wcslen, None, <wchar.h>)
+SYMBOL(wcsncat, None, <wchar.h>)
+SYMBOL(wcsncat_s, None, <wchar.h>)
+SYMBOL(wcsncmp, None, <wchar.h>)
+SYMBOL(wcsncpy, None, <wchar.h>)
+SYMBOL(wcsncpy_s, None, <wchar.h>)
+SYMBOL(wcsnlen_s, None, <wchar.h>)
+SYMBOL(wcspbrk, None, <wchar.h>)
+SYMBOL(wcsrchr, None, <wchar.h>)
+SYMBOL(wcsrtombs, None, <wchar.h>)
+SYMBOL(wcsrtombs_s, None, <wchar.h>)
+SYMBOL(wcsspn, None, <wchar.h>)
+SYMBOL(wcsstr, None, <wchar.h>)
+SYMBOL(wcstod, None, <wchar.h>)
+SYMBOL(wcstof, None, <wchar.h>)
+SYMBOL(wcstoimax, None, <inttypes.h>)
+SYMBOL(wcstok, None, <wchar.h>)
+SYMBOL(wcstok_s, None, <wchar.h>)
+SYMBOL(wcstol, None, <wchar.h>)
+SYMBOL(wcstold, None, <wchar.h>)
+SYMBOL(wcstoll, None, <wchar.h>)
+SYMBOL(wcstombs, None, <stdlib.h>)
+SYMBOL(wcstombs_s, None, <stdlib.h>)
+SYMBOL(wcstoul, None, <wchar.h>)
+SYMBOL(wcstoull, None, <wchar.h>)
+SYMBOL(wcstoumax, None, <inttypes.h>)
+SYMBOL(wcsxfrm, None, <wchar.h>)
+SYMBOL(wctob, None, <wchar.h>)
+SYMBOL(wctomb, None, <stdlib.h>)
+SYMBOL(wctomb_s, None, <stdlib.h>)
+SYMBOL(wctrans, None, <wctype.h>)
+SYMBOL(wctrans_t, None, <wctype.h>)
+SYMBOL(wctype, None, <wctype.h>)
+SYMBOL(wctype_t, None, <wctype.h>)
+SYMBOL(wint_t, None, <wctype.h>)
+SYMBOL(wmemchr, None, <wchar.h>)
+SYMBOL(wmemcmp, None, <wchar.h>)
+SYMBOL(wmemcpy, None, <wchar.h>)
+SYMBOL(wmemcpy_s, None, <wchar.h>)
+SYMBOL(wmemmove, None, <wchar.h>)
+SYMBOL(wmemmove_s, None, <wchar.h>)
+SYMBOL(wmemset, None, <wchar.h>)
+SYMBOL(wprintf, None, <wchar.h>)
+SYMBOL(wprintf_s, None, <wchar.h>)
+SYMBOL(wscanf, None, <wchar.h>)
+SYMBOL(wscanf_s, None, <wchar.h>)
+SYMBOL(xor, None, <iso646.h>)
+SYMBOL(xor_eq, None, <iso646.h>)
diff --git a/contrib/llvm-project/clang/lib/Tooling/Inclusions/Stdlib/StandardLibrary.cpp b/contrib/llvm-project/clang/lib/Tooling/Inclusions/Stdlib/StandardLibrary.cpp
new file mode 100644
index 000000000000..adf1b230ff03
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Tooling/Inclusions/Stdlib/StandardLibrary.cpp
@@ -0,0 +1,323 @@
+//===--- StandardLibrary.cpp ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Tooling/Inclusions/StandardLibrary.h"
+#include "clang/AST/Decl.h"
+#include "clang/Basic/LangOptions.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Casting.h"
+#include <optional>
+
+namespace clang {
+namespace tooling {
+namespace stdlib {
+
+namespace {
+// Symbol name -> Symbol::ID, within a namespace.
+using NSSymbolMap = llvm::DenseMap<llvm::StringRef, unsigned>;
+
+// A Mapping per language.
+struct SymbolHeaderMapping {
+ llvm::StringRef *HeaderNames = nullptr;
+ // Header name => Header::ID
+ llvm::DenseMap<llvm::StringRef, unsigned> *HeaderIDs;
+
+ unsigned SymbolCount = 0;
+ // Symbol::ID => symbol qualified_name/name/scope
+ struct SymbolName {
+ const char *Data; // std::vector
+ unsigned ScopeLen; // ~~~~~
+ unsigned NameLen; // ~~~~~~
+ StringRef scope() const { return StringRef(Data, ScopeLen); }
+ StringRef name() const { return StringRef(Data + ScopeLen, NameLen); }
+ StringRef qualifiedName() const {
+ return StringRef(Data, ScopeLen + NameLen);
+ }
+ } *SymbolNames = nullptr;
+ // Symbol name -> Symbol::ID, within a namespace.
+ llvm::DenseMap<llvm::StringRef, NSSymbolMap *> *NamespaceSymbols = nullptr;
+ // Symbol::ID => Header::ID
+ llvm::SmallVector<unsigned> *SymbolHeaderIDs = nullptr;
+};
+} // namespace
+static SymbolHeaderMapping
+ *LanguageMappings[static_cast<unsigned>(Lang::LastValue) + 1];
+static const SymbolHeaderMapping *getMappingPerLang(Lang L) {
+ return LanguageMappings[static_cast<unsigned>(L)];
+}
+
+static int countSymbols(Lang Language) {
+ ArrayRef<const char*> Symbols;
+#define SYMBOL(Name, NS, Header) #NS #Name,
+ switch (Language) {
+ case Lang::C: {
+ static constexpr const char *CSymbols[] = {
+#include "CSymbolMap.inc"
+ };
+ Symbols = CSymbols;
+ break;
+ }
+ case Lang::CXX: {
+ static constexpr const char *CXXSymbols[] = {
+#include "StdSpecialSymbolMap.inc"
+#include "StdSymbolMap.inc"
+#include "StdTsSymbolMap.inc"
+ };
+ Symbols = CXXSymbols;
+ break;
+ }
+ }
+#undef SYMBOL
+ return llvm::DenseSet<StringRef>(Symbols.begin(), Symbols.end()).size();
+}
+
+static int initialize(Lang Language) {
+ SymbolHeaderMapping *Mapping = new SymbolHeaderMapping();
+ LanguageMappings[static_cast<unsigned>(Language)] = Mapping;
+
+ unsigned SymCount = countSymbols(Language);
+ Mapping->SymbolCount = SymCount;
+ Mapping->SymbolNames =
+ new std::remove_reference_t<decltype(*Mapping->SymbolNames)>[SymCount];
+ Mapping->SymbolHeaderIDs = new std::remove_reference_t<
+ decltype(*Mapping->SymbolHeaderIDs)>[SymCount];
+ Mapping->NamespaceSymbols =
+ new std::remove_reference_t<decltype(*Mapping->NamespaceSymbols)>;
+ Mapping->HeaderIDs =
+ new std::remove_reference_t<decltype(*Mapping->HeaderIDs)>;
+ auto AddNS = [&](llvm::StringRef NS) -> NSSymbolMap & {
+ auto R = Mapping->NamespaceSymbols->try_emplace(NS, nullptr);
+ if (R.second)
+ R.first->second = new NSSymbolMap();
+ return *R.first->second;
+ };
+
+ auto AddHeader = [&](llvm::StringRef Header) -> unsigned {
+ return Mapping->HeaderIDs->try_emplace(Header, Mapping->HeaderIDs->size())
+ .first->second;
+ };
+
+ auto Add = [&, SymIndex(-1)](llvm::StringRef QName, unsigned NSLen,
+ llvm::StringRef HeaderName) mutable {
+ // Correct "Nonefoo" => foo.
+ // FIXME: get rid of "None" from the generated mapping files.
+ if (QName.take_front(NSLen) == "None") {
+ QName = QName.drop_front(NSLen);
+ NSLen = 0;
+ }
+
+ if (SymIndex >= 0 &&
+ Mapping->SymbolNames[SymIndex].qualifiedName() == QName) {
+ // Not a new symbol, use the same index.
+ assert(llvm::none_of(llvm::ArrayRef(Mapping->SymbolNames, SymIndex),
+ [&QName](const SymbolHeaderMapping::SymbolName &S) {
+ return S.qualifiedName() == QName;
+ }) &&
+ "The symbol has been added before, make sure entries in the .inc "
+ "file are grouped by symbol name!");
+ } else {
+ // First symbol or new symbol, increment next available index.
+ ++SymIndex;
+ }
+ Mapping->SymbolNames[SymIndex] = {
+ QName.data(), NSLen, static_cast<unsigned int>(QName.size() - NSLen)};
+ if (!HeaderName.empty())
+ Mapping->SymbolHeaderIDs[SymIndex].push_back(AddHeader(HeaderName));
+
+ NSSymbolMap &NSSymbols = AddNS(QName.take_front(NSLen));
+ NSSymbols.try_emplace(QName.drop_front(NSLen), SymIndex);
+ };
+
+ struct Symbol {
+ const char *QName;
+ unsigned NSLen;
+ const char *HeaderName;
+ };
+#define SYMBOL(Name, NS, Header) \
+ {#NS #Name, static_cast<decltype(Symbol::NSLen)>(StringRef(#NS).size()), \
+ #Header},
+ switch (Language) {
+ case Lang::C: {
+ static constexpr Symbol CSymbols[] = {
+#include "CSymbolMap.inc"
+ };
+ for (const Symbol &S : CSymbols)
+ Add(S.QName, S.NSLen, S.HeaderName);
+ break;
+ }
+ case Lang::CXX: {
+ static constexpr Symbol CXXSymbols[] = {
+#include "StdSpecialSymbolMap.inc"
+#include "StdSymbolMap.inc"
+#include "StdTsSymbolMap.inc"
+ };
+ for (const Symbol &S : CXXSymbols)
+ Add(S.QName, S.NSLen, S.HeaderName);
+ break;
+ }
+ }
+#undef SYMBOL
+
+ Mapping->HeaderNames = new llvm::StringRef[Mapping->HeaderIDs->size()];
+ for (const auto &E : *Mapping->HeaderIDs)
+ Mapping->HeaderNames[E.second] = E.first;
+
+ return 0;
+}
+
+static void ensureInitialized() {
+ static int Dummy = []() {
+ for (unsigned L = 0; L <= static_cast<unsigned>(Lang::LastValue); ++L)
+ initialize(static_cast<Lang>(L));
+ return 0;
+ }();
+ (void)Dummy;
+}
+
+std::vector<Header> Header::all(Lang L) {
+ ensureInitialized();
+ std::vector<Header> Result;
+ const auto *Mapping = getMappingPerLang(L);
+ Result.reserve(Mapping->HeaderIDs->size());
+ for (unsigned I = 0, E = Mapping->HeaderIDs->size(); I < E; ++I)
+ Result.push_back(Header(I, L));
+ return Result;
+}
+std::optional<Header> Header::named(llvm::StringRef Name, Lang L) {
+ ensureInitialized();
+ const auto *Mapping = getMappingPerLang(L);
+ auto It = Mapping->HeaderIDs->find(Name);
+ if (It == Mapping->HeaderIDs->end())
+ return std::nullopt;
+ return Header(It->second, L);
+}
+llvm::StringRef Header::name() const {
+ return getMappingPerLang(Language)->HeaderNames[ID];
+}
+
+std::vector<Symbol> Symbol::all(Lang L) {
+ ensureInitialized();
+ std::vector<Symbol> Result;
+ const auto *Mapping = getMappingPerLang(L);
+ Result.reserve(Mapping->SymbolCount);
+ for (unsigned I = 0, E = Mapping->SymbolCount; I < E; ++I)
+ Result.push_back(Symbol(I, L));
+ return Result;
+}
+llvm::StringRef Symbol::scope() const {
+ return getMappingPerLang(Language)->SymbolNames[ID].scope();
+}
+llvm::StringRef Symbol::name() const {
+ return getMappingPerLang(Language)->SymbolNames[ID].name();
+}
+llvm::StringRef Symbol::qualifiedName() const {
+ return getMappingPerLang(Language)->SymbolNames[ID].qualifiedName();
+}
+std::optional<Symbol> Symbol::named(llvm::StringRef Scope, llvm::StringRef Name,
+ Lang L) {
+ ensureInitialized();
+
+ if (NSSymbolMap *NSSymbols =
+ getMappingPerLang(L)->NamespaceSymbols->lookup(Scope)) {
+ auto It = NSSymbols->find(Name);
+ if (It != NSSymbols->end())
+ return Symbol(It->second, L);
+ }
+ return std::nullopt;
+}
+std::optional<Header> Symbol::header() const {
+ const auto& Headers = getMappingPerLang(Language)->SymbolHeaderIDs[ID];
+ if (Headers.empty())
+ return std::nullopt;
+ return Header(Headers.front(), Language);
+}
+llvm::SmallVector<Header> Symbol::headers() const {
+ llvm::SmallVector<Header> Results;
+ for (auto HeaderID : getMappingPerLang(Language)->SymbolHeaderIDs[ID])
+ Results.emplace_back(Header(HeaderID, Language));
+ return Results;
+}
+
+Recognizer::Recognizer() { ensureInitialized(); }
+
+NSSymbolMap *Recognizer::namespaceSymbols(const DeclContext *DC, Lang L) {
+ if (DC->isTranslationUnit()) // global scope.
+ return getMappingPerLang(L)->NamespaceSymbols->lookup("");
+
+ auto It = NamespaceCache.find(DC);
+ if (It != NamespaceCache.end())
+ return It->second;
+ const NamespaceDecl *D = llvm::cast<NamespaceDecl>(DC);
+ NSSymbolMap *Result = [&]() -> NSSymbolMap * {
+ if (D->isAnonymousNamespace())
+ return nullptr;
+ // Print the namespace and its parents ommitting inline scopes.
+ std::string Scope;
+ for (const auto *ND = D; ND;
+ ND = llvm::dyn_cast_or_null<NamespaceDecl>(ND->getParent()))
+ if (!ND->isInlineNamespace() && !ND->isAnonymousNamespace())
+ Scope = ND->getName().str() + "::" + Scope;
+ return getMappingPerLang(L)->NamespaceSymbols->lookup(Scope);
+ }();
+ NamespaceCache.try_emplace(D, Result);
+ return Result;
+}
+
+std::optional<Symbol> Recognizer::operator()(const Decl *D) {
+ Lang L;
+ if (D->getLangOpts().CPlusPlus)
+ L = Lang::CXX;
+ else if (D->getLangOpts().C99)
+ L = Lang::C;
+ else
+ return std::nullopt; // not a supported language.
+
+ // If D is std::vector::iterator, `vector` is the outer symbol to look up.
+ // We keep all the candidate DCs as some may turn out to be anon enums.
+ // Do this resolution lazily as we may turn out not to have a std namespace.
+ llvm::SmallVector<const DeclContext *> IntermediateDecl;
+ const DeclContext *DC = D->getDeclContext();
+ if (!DC) // The passed D is a TranslationUnitDecl!
+ return std::nullopt;
+ while (!DC->isNamespace() && !DC->isTranslationUnit()) {
+ if (NamedDecl::classofKind(DC->getDeclKind()))
+ IntermediateDecl.push_back(DC);
+ DC = DC->getParent();
+ }
+ NSSymbolMap *Symbols = namespaceSymbols(DC, L);
+ if (!Symbols)
+ return std::nullopt;
+
+ llvm::StringRef Name = [&]() -> llvm::StringRef {
+ for (const auto *SymDC : llvm::reverse(IntermediateDecl)) {
+ DeclarationName N = cast<NamedDecl>(SymDC)->getDeclName();
+ if (const auto *II = N.getAsIdentifierInfo())
+ return II->getName();
+ if (!N.isEmpty())
+ return ""; // e.g. operator<: give up
+ }
+ if (const auto *ND = llvm::dyn_cast<NamedDecl>(D))
+ if (const auto *II = ND->getIdentifier())
+ return II->getName();
+ return "";
+ }();
+ if (Name.empty())
+ return std::nullopt;
+
+ auto It = Symbols->find(Name);
+ if (It == Symbols->end())
+ return std::nullopt;
+ return Symbol(It->second, L);
+}
+
+} // namespace stdlib
+} // namespace tooling
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Tooling/Inclusions/Stdlib/StdSpecialSymbolMap.inc b/contrib/llvm-project/clang/lib/Tooling/Inclusions/Stdlib/StdSpecialSymbolMap.inc
new file mode 100644
index 000000000000..0d351d688a32
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Tooling/Inclusions/Stdlib/StdSpecialSymbolMap.inc
@@ -0,0 +1,739 @@
+//===-- StdSpecialSymbolMap.inc ---------------------------------*- C++ -*-===//
+//
+// This is a hand-curated list for C++ symbols that cannot be parsed/extracted
+// via the include-mapping tool (gen_std.py).
+//
+//===----------------------------------------------------------------------===//
+
+// Symbols that can be provided by any of the headers, ordered by the header
+// preference.
+// cppreference mentions the <locale> header is an alternative for these symbols,
+// but they are not per the standard.
+SYMBOL(consume_header, std::, <codecvt>)
+SYMBOL(generate_header, std::, <codecvt>)
+SYMBOL(little_endian, std::, <codecvt>)
+
+SYMBOL(mbstate_t, std::, <cwchar>)
+SYMBOL(mbstate_t, std::, <cuchar>)
+SYMBOL(size_t, std::, <cstddef>)
+SYMBOL(size_t, std::, <cstdlib>)
+SYMBOL(size_t, std::, <cstring>)
+SYMBOL(size_t, std::, <cwchar>)
+SYMBOL(size_t, std::, <cuchar>)
+SYMBOL(size_t, std::, <ctime>)
+SYMBOL(size_t, std::, <cstdio>)
+SYMBOL(size_t, None, <cstddef>)
+SYMBOL(size_t, None, <cstdlib>)
+SYMBOL(size_t, None, <cstring>)
+SYMBOL(size_t, None, <cwchar>)
+SYMBOL(size_t, None, <cuchar>)
+SYMBOL(size_t, None, <ctime>)
+SYMBOL(size_t, None, <cstdio>)
+SYMBOL(size_t, None, <stddef.h>)
+SYMBOL(size_t, None, <stdlib.h>)
+SYMBOL(size_t, None, <string.h>)
+SYMBOL(size_t, None, <wchar.h>)
+SYMBOL(size_t, None, <uchar.h>)
+SYMBOL(size_t, None, <time.h>)
+SYMBOL(size_t, None, <stdio.h>)
+SYMBOL(unwrap_ref_decay, std::, <type_traits>)
+SYMBOL(unwrap_ref_decay, std::, <functional>)
+SYMBOL(unwrap_reference, std::, <type_traits>)
+SYMBOL(unwrap_reference, std::, <functional>)
+SYMBOL(unwrap_ref_decay_t, std::, <type_traits>)
+SYMBOL(unwrap_ref_decay_t, std::, <functional>)
+SYMBOL(wint_t, std::, <cwctype>)
+SYMBOL(wint_t, std::, <cwchar>)
+SYMBOL(swap, std::, <utility>)
+SYMBOL(swap, std::, <algorithm>) // until C++11
+// C++ [string.view.synop 23.3.2]: The function templates defined in
+// [utility.swap] ... are available when <string_­view> is included.
+SYMBOL(swap, std::, <string_view>) // since C++17
+// C++ [tuple.helper 22.4.7]: In addition to being available via inclusion of
+// the <tuple> header, ... any of the headers <array>, <ranges>, or <utility>
+// are included.
+SYMBOL(tuple_size, std::, <tuple>)
+SYMBOL(tuple_size, std::, <array>)
+SYMBOL(tuple_size, std::, <ranges>)
+SYMBOL(tuple_size, std::, <utility>)
+SYMBOL(tuple_element, std::, <tuple>)
+SYMBOL(tuple_element, std::, <array>)
+SYMBOL(tuple_element, std::, <ranges>)
+SYMBOL(tuple_element, std::, <utility>)
+// C++ [iterator.range 25.7]: In addition to being available via inclusion of
+// the <iterator> header, the function templates in [iterator.range] are
+// available when any of the following headers are included: <array>, <deque>,
+// <forward_­list>, ... and <vector>.
+SYMBOL(begin, std::, <iterator>)
+SYMBOL(begin, std::, <array>)
+SYMBOL(begin, std::, <deque>)
+SYMBOL(begin, std::, <forward_list>)
+SYMBOL(begin, std::, <list>)
+SYMBOL(begin, std::, <map>)
+SYMBOL(begin, std::, <regex>)
+SYMBOL(begin, std::, <set>)
+SYMBOL(begin, std::, <span>)
+SYMBOL(begin, std::, <string>)
+SYMBOL(begin, std::, <string_view>)
+SYMBOL(begin, std::, <unordered_map>)
+SYMBOL(begin, std::, <unordered_set>)
+SYMBOL(begin, std::, <vector>)
+SYMBOL(cbegin, std::, <iterator>)
+SYMBOL(cbegin, std::, <array>)
+SYMBOL(cbegin, std::, <deque>)
+SYMBOL(cbegin, std::, <forward_list>)
+SYMBOL(cbegin, std::, <list>)
+SYMBOL(cbegin, std::, <map>)
+SYMBOL(cbegin, std::, <regex>)
+SYMBOL(cbegin, std::, <set>)
+SYMBOL(cbegin, std::, <span>)
+SYMBOL(cbegin, std::, <string>)
+SYMBOL(cbegin, std::, <string_view>)
+SYMBOL(cbegin, std::, <unordered_map>)
+SYMBOL(cbegin, std::, <unordered_set>)
+SYMBOL(cbegin, std::, <vector>)
+SYMBOL(cend, std::, <iterator>)
+SYMBOL(cend, std::, <array>)
+SYMBOL(cend, std::, <deque>)
+SYMBOL(cend, std::, <forward_list>)
+SYMBOL(cend, std::, <list>)
+SYMBOL(cend, std::, <map>)
+SYMBOL(cend, std::, <regex>)
+SYMBOL(cend, std::, <set>)
+SYMBOL(cend, std::, <span>)
+SYMBOL(cend, std::, <string>)
+SYMBOL(cend, std::, <string_view>)
+SYMBOL(cend, std::, <unordered_map>)
+SYMBOL(cend, std::, <unordered_set>)
+SYMBOL(cend, std::, <vector>)
+SYMBOL(crbegin, std::, <iterator>)
+SYMBOL(crbegin, std::, <array>)
+SYMBOL(crbegin, std::, <deque>)
+SYMBOL(crbegin, std::, <forward_list>)
+SYMBOL(crbegin, std::, <list>)
+SYMBOL(crbegin, std::, <map>)
+SYMBOL(crbegin, std::, <regex>)
+SYMBOL(crbegin, std::, <set>)
+SYMBOL(crbegin, std::, <span>)
+SYMBOL(crbegin, std::, <string>)
+SYMBOL(crbegin, std::, <string_view>)
+SYMBOL(crbegin, std::, <unordered_map>)
+SYMBOL(crbegin, std::, <unordered_set>)
+SYMBOL(crbegin, std::, <vector>)
+SYMBOL(crend, std::, <iterator>)
+SYMBOL(crend, std::, <array>)
+SYMBOL(crend, std::, <deque>)
+SYMBOL(crend, std::, <forward_list>)
+SYMBOL(crend, std::, <list>)
+SYMBOL(crend, std::, <map>)
+SYMBOL(crend, std::, <regex>)
+SYMBOL(crend, std::, <set>)
+SYMBOL(crend, std::, <span>)
+SYMBOL(crend, std::, <string>)
+SYMBOL(crend, std::, <string_view>)
+SYMBOL(crend, std::, <unordered_map>)
+SYMBOL(crend, std::, <unordered_set>)
+SYMBOL(crend, std::, <vector>)
+SYMBOL(data, std::, <iterator>)
+SYMBOL(data, std::, <array>)
+SYMBOL(data, std::, <deque>)
+SYMBOL(data, std::, <forward_list>)
+SYMBOL(data, std::, <list>)
+SYMBOL(data, std::, <map>)
+SYMBOL(data, std::, <regex>)
+SYMBOL(data, std::, <set>)
+SYMBOL(data, std::, <span>)
+SYMBOL(data, std::, <string>)
+SYMBOL(data, std::, <string_view>)
+SYMBOL(data, std::, <unordered_map>)
+SYMBOL(data, std::, <unordered_set>)
+SYMBOL(data, std::, <vector>)
+SYMBOL(empty, std::, <iterator>)
+SYMBOL(empty, std::, <array>)
+SYMBOL(empty, std::, <deque>)
+SYMBOL(empty, std::, <forward_list>)
+SYMBOL(empty, std::, <list>)
+SYMBOL(empty, std::, <map>)
+SYMBOL(empty, std::, <regex>)
+SYMBOL(empty, std::, <set>)
+SYMBOL(empty, std::, <span>)
+SYMBOL(empty, std::, <string>)
+SYMBOL(empty, std::, <string_view>)
+SYMBOL(empty, std::, <unordered_map>)
+SYMBOL(empty, std::, <unordered_set>)
+SYMBOL(empty, std::, <vector>)
+SYMBOL(end, std::, <iterator>)
+SYMBOL(end, std::, <array>)
+SYMBOL(end, std::, <deque>)
+SYMBOL(end, std::, <forward_list>)
+SYMBOL(end, std::, <list>)
+SYMBOL(end, std::, <map>)
+SYMBOL(end, std::, <regex>)
+SYMBOL(end, std::, <set>)
+SYMBOL(end, std::, <span>)
+SYMBOL(end, std::, <string>)
+SYMBOL(end, std::, <string_view>)
+SYMBOL(end, std::, <unordered_map>)
+SYMBOL(end, std::, <unordered_set>)
+SYMBOL(end, std::, <vector>)
+SYMBOL(rbegin, std::, <iterator>)
+SYMBOL(rbegin, std::, <array>)
+SYMBOL(rbegin, std::, <deque>)
+SYMBOL(rbegin, std::, <forward_list>)
+SYMBOL(rbegin, std::, <list>)
+SYMBOL(rbegin, std::, <map>)
+SYMBOL(rbegin, std::, <regex>)
+SYMBOL(rbegin, std::, <set>)
+SYMBOL(rbegin, std::, <span>)
+SYMBOL(rbegin, std::, <string>)
+SYMBOL(rbegin, std::, <string_view>)
+SYMBOL(rbegin, std::, <unordered_map>)
+SYMBOL(rbegin, std::, <unordered_set>)
+SYMBOL(rbegin, std::, <vector>)
+SYMBOL(rend, std::, <iterator>)
+SYMBOL(rend, std::, <array>)
+SYMBOL(rend, std::, <deque>)
+SYMBOL(rend, std::, <forward_list>)
+SYMBOL(rend, std::, <list>)
+SYMBOL(rend, std::, <map>)
+SYMBOL(rend, std::, <regex>)
+SYMBOL(rend, std::, <set>)
+SYMBOL(rend, std::, <span>)
+SYMBOL(rend, std::, <string>)
+SYMBOL(rend, std::, <string_view>)
+SYMBOL(rend, std::, <unordered_map>)
+SYMBOL(rend, std::, <unordered_set>)
+SYMBOL(rend, std::, <vector>)
+SYMBOL(size, std::, <iterator>)
+SYMBOL(size, std::, <array>)
+SYMBOL(size, std::, <deque>)
+SYMBOL(size, std::, <forward_list>)
+SYMBOL(size, std::, <list>)
+SYMBOL(size, std::, <map>)
+SYMBOL(size, std::, <regex>)
+SYMBOL(size, std::, <set>)
+SYMBOL(size, std::, <span>)
+SYMBOL(size, std::, <string>)
+SYMBOL(size, std::, <string_view>)
+SYMBOL(size, std::, <unordered_map>)
+SYMBOL(size, std::, <unordered_set>)
+SYMBOL(size, std::, <vector>)
+SYMBOL(ssize, std::, <iterator>)
+SYMBOL(ssize, std::, <array>)
+SYMBOL(ssize, std::, <deque>)
+SYMBOL(ssize, std::, <forward_list>)
+SYMBOL(ssize, std::, <list>)
+SYMBOL(ssize, std::, <map>)
+SYMBOL(ssize, std::, <regex>)
+SYMBOL(ssize, std::, <set>)
+SYMBOL(ssize, std::, <span>)
+SYMBOL(ssize, std::, <string>)
+SYMBOL(ssize, std::, <string_view>)
+SYMBOL(ssize, std::, <unordered_map>)
+SYMBOL(ssize, std::, <unordered_set>)
+SYMBOL(ssize, std::, <vector>)
+
+// Add headers for generic integer-type abs.
+// Ignore other variants (std::complex, std::valarray, std::intmax_t)
+SYMBOL(abs, std::, <cstdlib>)
+SYMBOL(abs, std::, <cmath>)
+SYMBOL(abs, None, <cstdlib>)
+SYMBOL(abs, None, <stdlib.h>)
+SYMBOL(abs, None, <cmath>)
+SYMBOL(abs, None, <math.h>)
+
+// Only add headers for the generic atomic template.
+// Ignore variants (std::weak_ptr, std::shared_ptr).
+SYMBOL(atomic, std::, <atomic>)
+// atomic_* family symbols. <stdatomic.h> is for C compatibility.
+SYMBOL(atomic_bool, std::, <atomic>)
+SYMBOL(atomic_bool, None, <stdatomic.h>)
+SYMBOL(atomic_char, std::, <atomic>)
+SYMBOL(atomic_char, None, <stdatomic.h>)
+SYMBOL(atomic_char16_t, std::, <atomic>)
+SYMBOL(atomic_char16_t, None, <stdatomic.h>)
+SYMBOL(atomic_char32_t, std::, <atomic>)
+SYMBOL(atomic_char32_t, None, <stdatomic.h>)
+SYMBOL(atomic_char8_t, std::, <atomic>)
+SYMBOL(atomic_char8_t, None, <stdatomic.h>)
+SYMBOL(atomic_int, std::, <atomic>)
+SYMBOL(atomic_int, None, <stdatomic.h>)
+SYMBOL(atomic_int16_t, std::, <atomic>)
+SYMBOL(atomic_int16_t, None, <stdatomic.h>)
+SYMBOL(atomic_int32_t, std::, <atomic>)
+SYMBOL(atomic_int32_t, None, <stdatomic.h>)
+SYMBOL(atomic_int64_t, std::, <atomic>)
+SYMBOL(atomic_int64_t, None, <stdatomic.h>)
+SYMBOL(atomic_int8_t, std::, <atomic>)
+SYMBOL(atomic_int8_t, None, <stdatomic.h>)
+SYMBOL(atomic_int_fast16_t, std::, <atomic>)
+SYMBOL(atomic_int_fast16_t, None, <stdatomic.h>)
+SYMBOL(atomic_int_fast32_t, std::, <atomic>)
+SYMBOL(atomic_int_fast32_t, None, <stdatomic.h>)
+SYMBOL(atomic_int_fast64_t, std::, <atomic>)
+SYMBOL(atomic_int_fast64_t, None, <stdatomic.h>)
+SYMBOL(atomic_int_fast8_t, std::, <atomic>)
+SYMBOL(atomic_int_fast8_t, None, <stdatomic.h>)
+SYMBOL(atomic_int_least16_t, std::, <atomic>)
+SYMBOL(atomic_int_least16_t, None, <stdatomic.h>)
+SYMBOL(atomic_int_least32_t, std::, <atomic>)
+SYMBOL(atomic_int_least32_t, None, <stdatomic.h>)
+SYMBOL(atomic_int_least64_t, std::, <atomic>)
+SYMBOL(atomic_int_least64_t, None, <stdatomic.h>)
+SYMBOL(atomic_int_least8_t, std::, <atomic>)
+SYMBOL(atomic_int_least8_t, None, <stdatomic.h>)
+SYMBOL(atomic_intmax_t, std::, <atomic>)
+SYMBOL(atomic_intmax_t, None, <stdatomic.h>)
+SYMBOL(atomic_intptr_t, std::, <atomic>)
+SYMBOL(atomic_intptr_t, None, <stdatomic.h>)
+SYMBOL(atomic_llong, std::, <atomic>)
+SYMBOL(atomic_llong, None, <stdatomic.h>)
+SYMBOL(atomic_long, std::, <atomic>)
+SYMBOL(atomic_long, None, <stdatomic.h>)
+SYMBOL(atomic_ptrdiff_t, std::, <atomic>)
+SYMBOL(atomic_ptrdiff_t, None, <stdatomic.h>)
+SYMBOL(atomic_schar, std::, <atomic>)
+SYMBOL(atomic_schar, None, <stdatomic.h>)
+SYMBOL(atomic_short, std::, <atomic>)
+SYMBOL(atomic_short, None, <stdatomic.h>)
+SYMBOL(atomic_signed_lock_free, std::, <atomic>)
+SYMBOL(atomic_signed_lock_free, None, <stdatomic.h>)
+SYMBOL(atomic_size_t, std::, <atomic>)
+SYMBOL(atomic_size_t, None, <stdatomic.h>)
+SYMBOL(atomic_uchar, std::, <atomic>)
+SYMBOL(atomic_uchar, None, <stdatomic.h>)
+SYMBOL(atomic_uint, std::, <atomic>)
+SYMBOL(atomic_uint, None, <stdatomic.h>)
+SYMBOL(atomic_uint16_t, std::, <atomic>)
+SYMBOL(atomic_uint16_t, None, <stdatomic.h>)
+SYMBOL(atomic_uint32_t, std::, <atomic>)
+SYMBOL(atomic_uint32_t, None, <stdatomic.h>)
+SYMBOL(atomic_uint64_t, std::, <atomic>)
+SYMBOL(atomic_uint64_t, None, <stdatomic.h>)
+SYMBOL(atomic_uint8_t, std::, <atomic>)
+SYMBOL(atomic_uint8_t, None, <stdatomic.h>)
+SYMBOL(atomic_uint_fast16_t, std::, <atomic>)
+SYMBOL(atomic_uint_fast16_t, None, <stdatomic.h>)
+SYMBOL(atomic_uint_fast32_t, std::, <atomic>)
+SYMBOL(atomic_uint_fast32_t, None, <stdatomic.h>)
+SYMBOL(atomic_uint_fast64_t, std::, <atomic>)
+SYMBOL(atomic_uint_fast64_t, None, <stdatomic.h>)
+SYMBOL(atomic_uint_fast8_t, std::, <atomic>)
+SYMBOL(atomic_uint_fast8_t, None, <stdatomic.h>)
+SYMBOL(atomic_uint_least16_t, std::, <atomic>)
+SYMBOL(atomic_uint_least16_t, None, <stdatomic.h>)
+SYMBOL(atomic_uint_least32_t, std::, <atomic>)
+SYMBOL(atomic_uint_least32_t, None, <stdatomic.h>)
+SYMBOL(atomic_uint_least64_t, std::, <atomic>)
+SYMBOL(atomic_uint_least64_t, None, <stdatomic.h>)
+SYMBOL(atomic_uint_least8_t, std::, <atomic>)
+SYMBOL(atomic_uint_least8_t, None, <stdatomic.h>)
+SYMBOL(atomic_uintmax_t, std::, <atomic>)
+SYMBOL(atomic_uintmax_t, None, <stdatomic.h>)
+SYMBOL(atomic_uintptr_t, std::, <atomic>)
+SYMBOL(atomic_uintptr_t, None, <stdatomic.h>)
+SYMBOL(atomic_ullong, std::, <atomic>)
+SYMBOL(atomic_ullong, None, <stdatomic.h>)
+SYMBOL(atomic_ulong, std::, <atomic>)
+SYMBOL(atomic_ulong, None, <stdatomic.h>)
+SYMBOL(atomic_unsigned_lock_free, std::, <atomic>)
+SYMBOL(atomic_unsigned_lock_free, None, <stdatomic.h>)
+SYMBOL(atomic_ushort, std::, <atomic>)
+SYMBOL(atomic_ushort, None, <stdatomic.h>)
+SYMBOL(atomic_wchar_t, std::, <atomic>)
+SYMBOL(atomic_wchar_t, None, <stdatomic.h>)
+
+// std::get has a few variants for different types (tuple, array, pair etc)
+// which is tricky to disambiguate without type information.
+// Don't set any header for it, as it comes with the type header.
+SYMBOL(get, std::, /*no headers*/)
+// Similarly make_error_{code,condition} also have different overloads (errc,
+// io_errc, future_errc) and each of them are provided by relevant headers
+// providing the type.
+SYMBOL(make_error_code, std::, /*no headers*/)
+SYMBOL(make_error_condition, std::, /*no headers*/)
+
+// cppreference symbol index page was missing these symbols.
+// Remove them when the cppreference offline archive catches up.
+SYMBOL(index_sequence, std::, <utility>)
+SYMBOL(index_sequence_for, std::, <utility>)
+SYMBOL(make_index_sequence, std::, <utility>)
+SYMBOL(make_integer_sequence, std::, <utility>)
+
+// Symbols missing from the generated symbol map as reported by users.
+// Remove when the generator starts producing them.
+SYMBOL(make_any, std::, <any>)
+SYMBOL(any_cast, std::, <any>)
+SYMBOL(div, std::, <cstdlib>)
+SYMBOL(abort, std::, <cstdlib>)
+
+// These are C symbols that are not under std namespace.
+SYMBOL(localtime_r, None, <ctime>)
+SYMBOL(localtime_r, None, <time.h>)
+SYMBOL(localtime_s, None, <ctime>)
+SYMBOL(localtime_s, None, <time.h>)
+SYMBOL(gmtime_r, None, <ctime>)
+SYMBOL(gmtime_r, None, <time.h>)
+SYMBOL(gmtime_s, None, <ctime>)
+SYMBOL(gmtime_s, None, <time.h>)
+
+// The std::placeholder symbols (_1, ..., _N) are listed in the cppreference
+// placeholder.html, but the index only contains a single entry with "_1, _2, ..., _N"
+// text, which are not handled by the script.
+// N is an implementation-defined number (10 for libc++; 29 for libstdc++).
+SYMBOL(_1, std::placeholders::, <functional>)
+SYMBOL(_2, std::placeholders::, <functional>)
+SYMBOL(_3, std::placeholders::, <functional>)
+SYMBOL(_4, std::placeholders::, <functional>)
+SYMBOL(_5, std::placeholders::, <functional>)
+SYMBOL(_6, std::placeholders::, <functional>)
+SYMBOL(_7, std::placeholders::, <functional>)
+SYMBOL(_8, std::placeholders::, <functional>)
+SYMBOL(_9, std::placeholders::, <functional>)
+SYMBOL(_10, std::placeholders::, <functional>)
+SYMBOL(_11, std::placeholders::, <functional>)
+SYMBOL(_12, std::placeholders::, <functional>)
+SYMBOL(_13, std::placeholders::, <functional>)
+SYMBOL(_14, std::placeholders::, <functional>)
+SYMBOL(_15, std::placeholders::, <functional>)
+SYMBOL(_16, std::placeholders::, <functional>)
+SYMBOL(_17, std::placeholders::, <functional>)
+SYMBOL(_18, std::placeholders::, <functional>)
+SYMBOL(_19, std::placeholders::, <functional>)
+SYMBOL(_20, std::placeholders::, <functional>)
+SYMBOL(_21, std::placeholders::, <functional>)
+SYMBOL(_22, std::placeholders::, <functional>)
+SYMBOL(_23, std::placeholders::, <functional>)
+SYMBOL(_24, std::placeholders::, <functional>)
+SYMBOL(_25, std::placeholders::, <functional>)
+SYMBOL(_26, std::placeholders::, <functional>)
+SYMBOL(_27, std::placeholders::, <functional>)
+SYMBOL(_28, std::placeholders::, <functional>)
+SYMBOL(_29, std::placeholders::, <functional>)
+
+// Macros
+SYMBOL(NULL, None, <cstddef>)
+SYMBOL(NULL, None, <stddef.h>)
+SYMBOL(NULL, None, <cstdlib>)
+SYMBOL(NULL, None, <stdlib.h>)
+SYMBOL(NULL, None, <cstring>)
+SYMBOL(NULL, None, <string.h>)
+SYMBOL(NULL, None, <cwchar>)
+SYMBOL(NULL, None, <wchar.h>)
+SYMBOL(NULL, None, <ctime>)
+SYMBOL(NULL, None, <time.h>)
+SYMBOL(NULL, None, <clocale>)
+SYMBOL(NULL, None, <locale.h>)
+SYMBOL(NULL, None, <cstdio>)
+SYMBOL(NULL, None, <stdio.h>)
+
+// Theres are macros that not spelled out in page linked from the index.
+// Extracted from https://en.cppreference.com/w/cpp/header/cinttypes
+SYMBOL(PRId8, None, <cinttypes>)
+SYMBOL(PRId8, None, <inttypes.h>)
+SYMBOL(PRId16, None, <cinttypes>)
+SYMBOL(PRId16, None, <inttypes.h>)
+SYMBOL(PRId32, None, <cinttypes>)
+SYMBOL(PRId32, None, <inttypes.h>)
+SYMBOL(PRId64, None, <cinttypes>)
+SYMBOL(PRId64, None, <inttypes.h>)
+SYMBOL(PRIdLEAST8, None, <cinttypes>)
+SYMBOL(PRIdLEAST8, None, <inttypes.h>)
+SYMBOL(PRIdLEAST16, None, <cinttypes>)
+SYMBOL(PRIdLEAST16, None, <inttypes.h>)
+SYMBOL(PRIdLEAST32, None, <cinttypes>)
+SYMBOL(PRIdLEAST32, None, <inttypes.h>)
+SYMBOL(PRIdLEAST64, None, <cinttypes>)
+SYMBOL(PRIdLEAST64, None, <inttypes.h>)
+SYMBOL(PRIdFAST8, None, <cinttypes>)
+SYMBOL(PRIdFAST8, None, <inttypes.h>)
+SYMBOL(PRIdFAST16, None, <cinttypes>)
+SYMBOL(PRIdFAST16, None, <inttypes.h>)
+SYMBOL(PRIdFAST32, None, <cinttypes>)
+SYMBOL(PRIdFAST32, None, <inttypes.h>)
+SYMBOL(PRIdFAST64, None, <cinttypes>)
+SYMBOL(PRIdFAST64, None, <inttypes.h>)
+SYMBOL(PRIdMAX, None, <cinttypes>)
+SYMBOL(PRIdMAX, None, <inttypes.h>)
+SYMBOL(PRIdPTR, None, <cinttypes>)
+SYMBOL(PRIdPTR, None, <inttypes.h>)
+SYMBOL(PRIi8, None, <cinttypes>)
+SYMBOL(PRIi8, None, <inttypes.h>)
+SYMBOL(PRIi16, None, <cinttypes>)
+SYMBOL(PRIi16, None, <inttypes.h>)
+SYMBOL(PRIi32, None, <cinttypes>)
+SYMBOL(PRIi32, None, <inttypes.h>)
+SYMBOL(PRIi64, None, <cinttypes>)
+SYMBOL(PRIi64, None, <inttypes.h>)
+SYMBOL(PRIiLEAST8, None, <cinttypes>)
+SYMBOL(PRIiLEAST8, None, <inttypes.h>)
+SYMBOL(PRIiLEAST16, None, <cinttypes>)
+SYMBOL(PRIiLEAST16, None, <inttypes.h>)
+SYMBOL(PRIiLEAST32, None, <cinttypes>)
+SYMBOL(PRIiLEAST32, None, <inttypes.h>)
+SYMBOL(PRIiLEAST64, None, <cinttypes>)
+SYMBOL(PRIiLEAST64, None, <inttypes.h>)
+SYMBOL(PRIiFAST8, None, <cinttypes>)
+SYMBOL(PRIiFAST8, None, <inttypes.h>)
+SYMBOL(PRIiFAST16, None, <cinttypes>)
+SYMBOL(PRIiFAST16, None, <inttypes.h>)
+SYMBOL(PRIiFAST32, None, <cinttypes>)
+SYMBOL(PRIiFAST32, None, <inttypes.h>)
+SYMBOL(PRIiFAST64, None, <cinttypes>)
+SYMBOL(PRIiFAST64, None, <inttypes.h>)
+SYMBOL(PRIiMAX, None, <cinttypes>)
+SYMBOL(PRIiMAX, None, <inttypes.h>)
+SYMBOL(PRIiPTR, None, <cinttypes>)
+SYMBOL(PRIiPTR, None, <inttypes.h>)
+SYMBOL(PRIu8, None, <cinttypes>)
+SYMBOL(PRIu8, None, <inttypes.h>)
+SYMBOL(PRIu16, None, <cinttypes>)
+SYMBOL(PRIu16, None, <inttypes.h>)
+SYMBOL(PRIu32, None, <cinttypes>)
+SYMBOL(PRIu32, None, <inttypes.h>)
+SYMBOL(PRIu64, None, <cinttypes>)
+SYMBOL(PRIu64, None, <inttypes.h>)
+SYMBOL(PRIuLEAST8, None, <cinttypes>)
+SYMBOL(PRIuLEAST8, None, <inttypes.h>)
+SYMBOL(PRIuLEAST16, None, <cinttypes>)
+SYMBOL(PRIuLEAST16, None, <inttypes.h>)
+SYMBOL(PRIuLEAST32, None, <cinttypes>)
+SYMBOL(PRIuLEAST32, None, <inttypes.h>)
+SYMBOL(PRIuLEAST64, None, <cinttypes>)
+SYMBOL(PRIuLEAST64, None, <inttypes.h>)
+SYMBOL(PRIuFAST8, None, <cinttypes>)
+SYMBOL(PRIuFAST8, None, <inttypes.h>)
+SYMBOL(PRIuFAST16, None, <cinttypes>)
+SYMBOL(PRIuFAST16, None, <inttypes.h>)
+SYMBOL(PRIuFAST32, None, <cinttypes>)
+SYMBOL(PRIuFAST32, None, <inttypes.h>)
+SYMBOL(PRIuFAST64, None, <cinttypes>)
+SYMBOL(PRIuFAST64, None, <inttypes.h>)
+SYMBOL(PRIuMAX, None, <cinttypes>)
+SYMBOL(PRIuMAX, None, <inttypes.h>)
+SYMBOL(PRIuPTR, None, <cinttypes>)
+SYMBOL(PRIuPTR, None, <inttypes.h>)
+SYMBOL(PRIo8, None, <cinttypes>)
+SYMBOL(PRIo8, None, <inttypes.h>)
+SYMBOL(PRIo16, None, <cinttypes>)
+SYMBOL(PRIo16, None, <inttypes.h>)
+SYMBOL(PRIo32, None, <cinttypes>)
+SYMBOL(PRIo32, None, <inttypes.h>)
+SYMBOL(PRIo64, None, <cinttypes>)
+SYMBOL(PRIo64, None, <inttypes.h>)
+SYMBOL(PRIoLEAST8, None, <cinttypes>)
+SYMBOL(PRIoLEAST8, None, <inttypes.h>)
+SYMBOL(PRIoLEAST16, None, <cinttypes>)
+SYMBOL(PRIoLEAST16, None, <inttypes.h>)
+SYMBOL(PRIoLEAST32, None, <cinttypes>)
+SYMBOL(PRIoLEAST32, None, <inttypes.h>)
+SYMBOL(PRIoLEAST64, None, <cinttypes>)
+SYMBOL(PRIoLEAST64, None, <inttypes.h>)
+SYMBOL(PRIoFAST8, None, <cinttypes>)
+SYMBOL(PRIoFAST8, None, <inttypes.h>)
+SYMBOL(PRIoFAST16, None, <cinttypes>)
+SYMBOL(PRIoFAST16, None, <inttypes.h>)
+SYMBOL(PRIoFAST32, None, <cinttypes>)
+SYMBOL(PRIoFAST32, None, <inttypes.h>)
+SYMBOL(PRIoFAST64, None, <cinttypes>)
+SYMBOL(PRIoFAST64, None, <inttypes.h>)
+SYMBOL(PRIoMAX, None, <cinttypes>)
+SYMBOL(PRIoMAX, None, <inttypes.h>)
+SYMBOL(PRIoPTR, None, <cinttypes>)
+SYMBOL(PRIoPTR, None, <inttypes.h>)
+SYMBOL(PRIx8, None, <cinttypes>)
+SYMBOL(PRIx8, None, <inttypes.h>)
+SYMBOL(PRIx16, None, <cinttypes>)
+SYMBOL(PRIx16, None, <inttypes.h>)
+SYMBOL(PRIx32, None, <cinttypes>)
+SYMBOL(PRIx32, None, <inttypes.h>)
+SYMBOL(PRIx64, None, <cinttypes>)
+SYMBOL(PRIx64, None, <inttypes.h>)
+SYMBOL(PRIxLEAST8, None, <cinttypes>)
+SYMBOL(PRIxLEAST8, None, <inttypes.h>)
+SYMBOL(PRIxLEAST16, None, <cinttypes>)
+SYMBOL(PRIxLEAST16, None, <inttypes.h>)
+SYMBOL(PRIxLEAST32, None, <cinttypes>)
+SYMBOL(PRIxLEAST32, None, <inttypes.h>)
+SYMBOL(PRIxLEAST64, None, <cinttypes>)
+SYMBOL(PRIxLEAST64, None, <inttypes.h>)
+SYMBOL(PRIxFAST8, None, <cinttypes>)
+SYMBOL(PRIxFAST8, None, <inttypes.h>)
+SYMBOL(PRIxFAST16, None, <cinttypes>)
+SYMBOL(PRIxFAST16, None, <inttypes.h>)
+SYMBOL(PRIxFAST32, None, <cinttypes>)
+SYMBOL(PRIxFAST32, None, <inttypes.h>)
+SYMBOL(PRIxFAST64, None, <cinttypes>)
+SYMBOL(PRIxFAST64, None, <inttypes.h>)
+SYMBOL(PRIxMAX, None, <cinttypes>)
+SYMBOL(PRIxMAX, None, <inttypes.h>)
+SYMBOL(PRIxPTR, None, <cinttypes>)
+SYMBOL(PRIxPTR, None, <inttypes.h>)
+SYMBOL(PRIX8, None, <cinttypes>)
+SYMBOL(PRIX8, None, <inttypes.h>)
+SYMBOL(PRIX16, None, <cinttypes>)
+SYMBOL(PRIX16, None, <inttypes.h>)
+SYMBOL(PRIX32, None, <cinttypes>)
+SYMBOL(PRIX32, None, <inttypes.h>)
+SYMBOL(PRIX64, None, <cinttypes>)
+SYMBOL(PRIX64, None, <inttypes.h>)
+SYMBOL(PRIXLEAST8, None, <cinttypes>)
+SYMBOL(PRIXLEAST8, None, <inttypes.h>)
+SYMBOL(PRIXLEAST16, None, <cinttypes>)
+SYMBOL(PRIXLEAST16, None, <inttypes.h>)
+SYMBOL(PRIXLEAST32, None, <cinttypes>)
+SYMBOL(PRIXLEAST32, None, <inttypes.h>)
+SYMBOL(PRIXLEAST64, None, <cinttypes>)
+SYMBOL(PRIXLEAST64, None, <inttypes.h>)
+SYMBOL(PRIXFAST8, None, <cinttypes>)
+SYMBOL(PRIXFAST8, None, <inttypes.h>)
+SYMBOL(PRIXFAST16, None, <cinttypes>)
+SYMBOL(PRIXFAST16, None, <inttypes.h>)
+SYMBOL(PRIXFAST32, None, <cinttypes>)
+SYMBOL(PRIXFAST32, None, <inttypes.h>)
+SYMBOL(PRIXFAST64, None, <cinttypes>)
+SYMBOL(PRIXFAST64, None, <inttypes.h>)
+SYMBOL(PRIXMAX, None, <cinttypes>)
+SYMBOL(PRIXMAX, None, <inttypes.h>)
+SYMBOL(PRIXPTR, None, <cinttypes>)
+SYMBOL(PRIXPTR, None, <inttypes.h>)
+SYMBOL(SCNd8, None, <cinttypes>)
+SYMBOL(SCNd8, None, <inttypes.h>)
+SYMBOL(SCNd16, None, <cinttypes>)
+SYMBOL(SCNd16, None, <inttypes.h>)
+SYMBOL(SCNd32, None, <cinttypes>)
+SYMBOL(SCNd32, None, <inttypes.h>)
+SYMBOL(SCNd64, None, <cinttypes>)
+SYMBOL(SCNd64, None, <inttypes.h>)
+SYMBOL(SCNdLEAST8, None, <cinttypes>)
+SYMBOL(SCNdLEAST8, None, <inttypes.h>)
+SYMBOL(SCNdLEAST16, None, <cinttypes>)
+SYMBOL(SCNdLEAST16, None, <inttypes.h>)
+SYMBOL(SCNdLEAST32, None, <cinttypes>)
+SYMBOL(SCNdLEAST32, None, <inttypes.h>)
+SYMBOL(SCNdLEAST64, None, <cinttypes>)
+SYMBOL(SCNdLEAST64, None, <inttypes.h>)
+SYMBOL(SCNdFAST8, None, <cinttypes>)
+SYMBOL(SCNdFAST8, None, <inttypes.h>)
+SYMBOL(SCNdFAST16, None, <cinttypes>)
+SYMBOL(SCNdFAST16, None, <inttypes.h>)
+SYMBOL(SCNdFAST32, None, <cinttypes>)
+SYMBOL(SCNdFAST32, None, <inttypes.h>)
+SYMBOL(SCNdFAST64, None, <cinttypes>)
+SYMBOL(SCNdFAST64, None, <inttypes.h>)
+SYMBOL(SCNdMAX, None, <cinttypes>)
+SYMBOL(SCNdMAX, None, <inttypes.h>)
+SYMBOL(SCNdPTR, None, <cinttypes>)
+SYMBOL(SCNdPTR, None, <inttypes.h>)
+SYMBOL(SCNi8, None, <cinttypes>)
+SYMBOL(SCNi8, None, <inttypes.h>)
+SYMBOL(SCNi16, None, <cinttypes>)
+SYMBOL(SCNi16, None, <inttypes.h>)
+SYMBOL(SCNi32, None, <cinttypes>)
+SYMBOL(SCNi32, None, <inttypes.h>)
+SYMBOL(SCNi64, None, <cinttypes>)
+SYMBOL(SCNi64, None, <inttypes.h>)
+SYMBOL(SCNiLEAST8, None, <cinttypes>)
+SYMBOL(SCNiLEAST8, None, <inttypes.h>)
+SYMBOL(SCNiLEAST16, None, <cinttypes>)
+SYMBOL(SCNiLEAST16, None, <inttypes.h>)
+SYMBOL(SCNiLEAST32, None, <cinttypes>)
+SYMBOL(SCNiLEAST32, None, <inttypes.h>)
+SYMBOL(SCNiLEAST64, None, <cinttypes>)
+SYMBOL(SCNiLEAST64, None, <inttypes.h>)
+SYMBOL(SCNiFAST8, None, <cinttypes>)
+SYMBOL(SCNiFAST8, None, <inttypes.h>)
+SYMBOL(SCNiFAST16, None, <cinttypes>)
+SYMBOL(SCNiFAST16, None, <inttypes.h>)
+SYMBOL(SCNiFAST32, None, <cinttypes>)
+SYMBOL(SCNiFAST32, None, <inttypes.h>)
+SYMBOL(SCNiFAST64, None, <cinttypes>)
+SYMBOL(SCNiFAST64, None, <inttypes.h>)
+SYMBOL(SCNiMAX, None, <cinttypes>)
+SYMBOL(SCNiMAX, None, <inttypes.h>)
+SYMBOL(SCNiPTR, None, <cinttypes>)
+SYMBOL(SCNiPTR, None, <inttypes.h>)
+SYMBOL(SCNu8, None, <cinttypes>)
+SYMBOL(SCNu8, None, <inttypes.h>)
+SYMBOL(SCNu16, None, <cinttypes>)
+SYMBOL(SCNu16, None, <inttypes.h>)
+SYMBOL(SCNu32, None, <cinttypes>)
+SYMBOL(SCNu32, None, <inttypes.h>)
+SYMBOL(SCNu64, None, <cinttypes>)
+SYMBOL(SCNu64, None, <inttypes.h>)
+SYMBOL(SCNuLEAST8, None, <cinttypes>)
+SYMBOL(SCNuLEAST8, None, <inttypes.h>)
+SYMBOL(SCNuLEAST16, None, <cinttypes>)
+SYMBOL(SCNuLEAST16, None, <inttypes.h>)
+SYMBOL(SCNuLEAST32, None, <cinttypes>)
+SYMBOL(SCNuLEAST32, None, <inttypes.h>)
+SYMBOL(SCNuLEAST64, None, <cinttypes>)
+SYMBOL(SCNuLEAST64, None, <inttypes.h>)
+SYMBOL(SCNuFAST8, None, <cinttypes>)
+SYMBOL(SCNuFAST8, None, <inttypes.h>)
+SYMBOL(SCNuFAST16, None, <cinttypes>)
+SYMBOL(SCNuFAST16, None, <inttypes.h>)
+SYMBOL(SCNuFAST32, None, <cinttypes>)
+SYMBOL(SCNuFAST32, None, <inttypes.h>)
+SYMBOL(SCNuFAST64, None, <cinttypes>)
+SYMBOL(SCNuFAST64, None, <inttypes.h>)
+SYMBOL(SCNuMAX, None, <cinttypes>)
+SYMBOL(SCNuMAX, None, <inttypes.h>)
+SYMBOL(SCNuPTR, None, <cinttypes>)
+SYMBOL(SCNuPTR, None, <inttypes.h>)
+SYMBOL(SCNo8, None, <cinttypes>)
+SYMBOL(SCNo8, None, <inttypes.h>)
+SYMBOL(SCNo16, None, <cinttypes>)
+SYMBOL(SCNo16, None, <inttypes.h>)
+SYMBOL(SCNo32, None, <cinttypes>)
+SYMBOL(SCNo32, None, <inttypes.h>)
+SYMBOL(SCNo64, None, <cinttypes>)
+SYMBOL(SCNo64, None, <inttypes.h>)
+SYMBOL(SCNoLEAST8, None, <cinttypes>)
+SYMBOL(SCNoLEAST8, None, <inttypes.h>)
+SYMBOL(SCNoLEAST16, None, <cinttypes>)
+SYMBOL(SCNoLEAST16, None, <inttypes.h>)
+SYMBOL(SCNoLEAST32, None, <cinttypes>)
+SYMBOL(SCNoLEAST32, None, <inttypes.h>)
+SYMBOL(SCNoLEAST64, None, <cinttypes>)
+SYMBOL(SCNoLEAST64, None, <inttypes.h>)
+SYMBOL(SCNoFAST8, None, <cinttypes>)
+SYMBOL(SCNoFAST8, None, <inttypes.h>)
+SYMBOL(SCNoFAST16, None, <cinttypes>)
+SYMBOL(SCNoFAST16, None, <inttypes.h>)
+SYMBOL(SCNoFAST32, None, <cinttypes>)
+SYMBOL(SCNoFAST32, None, <inttypes.h>)
+SYMBOL(SCNoFAST64, None, <cinttypes>)
+SYMBOL(SCNoFAST64, None, <inttypes.h>)
+SYMBOL(SCNoMAX, None, <cinttypes>)
+SYMBOL(SCNoMAX, None, <inttypes.h>)
+SYMBOL(SCNoPTR, None, <cinttypes>)
+SYMBOL(SCNoPTR, None, <inttypes.h>)
+SYMBOL(SCNx8, None, <cinttypes>)
+SYMBOL(SCNx8, None, <inttypes.h>)
+SYMBOL(SCNx16, None, <cinttypes>)
+SYMBOL(SCNx16, None, <inttypes.h>)
+SYMBOL(SCNx32, None, <cinttypes>)
+SYMBOL(SCNx32, None, <inttypes.h>)
+SYMBOL(SCNx64, None, <cinttypes>)
+SYMBOL(SCNx64, None, <inttypes.h>)
+SYMBOL(SCNxLEAST8, None, <cinttypes>)
+SYMBOL(SCNxLEAST8, None, <inttypes.h>)
+SYMBOL(SCNxLEAST16, None, <cinttypes>)
+SYMBOL(SCNxLEAST16, None, <inttypes.h>)
+SYMBOL(SCNxLEAST32, None, <cinttypes>)
+SYMBOL(SCNxLEAST32, None, <inttypes.h>)
+SYMBOL(SCNxLEAST64, None, <cinttypes>)
+SYMBOL(SCNxLEAST64, None, <inttypes.h>)
+SYMBOL(SCNxFAST8, None, <cinttypes>)
+SYMBOL(SCNxFAST8, None, <inttypes.h>)
+SYMBOL(SCNxFAST16, None, <cinttypes>)
+SYMBOL(SCNxFAST16, None, <inttypes.h>)
+SYMBOL(SCNxFAST32, None, <cinttypes>)
+SYMBOL(SCNxFAST32, None, <inttypes.h>)
+SYMBOL(SCNxFAST64, None, <cinttypes>)
+SYMBOL(SCNxFAST64, None, <inttypes.h>)
+SYMBOL(SCNxMAX, None, <cinttypes>)
+SYMBOL(SCNxMAX, None, <inttypes.h>)
+SYMBOL(SCNxPTR, None, <cinttypes>)
+SYMBOL(SCNxPTR, None, <inttypes.h>)
diff --git a/contrib/llvm-project/clang/lib/Tooling/Inclusions/Stdlib/StdSymbolMap.inc b/contrib/llvm-project/clang/lib/Tooling/Inclusions/Stdlib/StdSymbolMap.inc
new file mode 100644
index 000000000000..b46bd2e4d7a4
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Tooling/Inclusions/Stdlib/StdSymbolMap.inc
@@ -0,0 +1,3873 @@
+//===-- gen_std.py generated file -------------------------------*- C++ -*-===//
+//
+// Used to build a lookup table (qualified names => include headers) for CPP
+// Standard Library symbols.
+//
+// This file was generated automatically by
+// clang/tools/include-mapping/gen_std.py, DO NOT EDIT!
+//
+// Generated from cppreference offline HTML book (modified on 2022-07-30).
+//===----------------------------------------------------------------------===//
+
+SYMBOL(ATOMIC_BOOL_LOCK_FREE, None, <atomic>)
+SYMBOL(ATOMIC_CHAR16_T_LOCK_FREE, None, <atomic>)
+SYMBOL(ATOMIC_CHAR32_T_LOCK_FREE, None, <atomic>)
+SYMBOL(ATOMIC_CHAR8_T_LOCK_FREE, None, <atomic>)
+SYMBOL(ATOMIC_CHAR_LOCK_FREE, None, <atomic>)
+SYMBOL(ATOMIC_FLAG_INIT, None, <atomic>)
+SYMBOL(ATOMIC_INT_LOCK_FREE, None, <atomic>)
+SYMBOL(ATOMIC_LLONG_LOCK_FREE, None, <atomic>)
+SYMBOL(ATOMIC_LONG_LOCK_FREE, None, <atomic>)
+SYMBOL(ATOMIC_POINTER_LOCK_FREE, None, <atomic>)
+SYMBOL(ATOMIC_SHORT_LOCK_FREE, None, <atomic>)
+SYMBOL(ATOMIC_VAR_INIT, None, <atomic>)
+SYMBOL(ATOMIC_WCHAR_T_LOCK_FREE, None, <atomic>)
+SYMBOL(BUFSIZ, None, <cstdio>)
+SYMBOL(BUFSIZ, None, <stdio.h>)
+SYMBOL(CHAR_BIT, None, <climits>)
+SYMBOL(CHAR_BIT, None, <limits.h>)
+SYMBOL(CHAR_MAX, None, <climits>)
+SYMBOL(CHAR_MAX, None, <limits.h>)
+SYMBOL(CHAR_MIN, None, <climits>)
+SYMBOL(CHAR_MIN, None, <limits.h>)
+SYMBOL(CLOCKS_PER_SEC, None, <ctime>)
+SYMBOL(CLOCKS_PER_SEC, None, <time.h>)
+SYMBOL(DBL_DECIMAL_DIG, None, <cfloat>)
+SYMBOL(DBL_DECIMAL_DIG, None, <float.h>)
+SYMBOL(DBL_DIG, None, <cfloat>)
+SYMBOL(DBL_DIG, None, <float.h>)
+SYMBOL(DBL_EPSILON, None, <cfloat>)
+SYMBOL(DBL_EPSILON, None, <float.h>)
+SYMBOL(DBL_HAS_SUBNORM, None, <cfloat>)
+SYMBOL(DBL_HAS_SUBNORM, None, <float.h>)
+SYMBOL(DBL_MANT_DIG, None, <cfloat>)
+SYMBOL(DBL_MANT_DIG, None, <float.h>)
+SYMBOL(DBL_MAX, None, <cfloat>)
+SYMBOL(DBL_MAX, None, <float.h>)
+SYMBOL(DBL_MAX_10_EXP, None, <cfloat>)
+SYMBOL(DBL_MAX_10_EXP, None, <float.h>)
+SYMBOL(DBL_MAX_EXP, None, <cfloat>)
+SYMBOL(DBL_MAX_EXP, None, <float.h>)
+SYMBOL(DBL_MIN, None, <cfloat>)
+SYMBOL(DBL_MIN, None, <float.h>)
+SYMBOL(DBL_MIN_10_EXP, None, <cfloat>)
+SYMBOL(DBL_MIN_10_EXP, None, <float.h>)
+SYMBOL(DBL_MIN_EXP, None, <cfloat>)
+SYMBOL(DBL_MIN_EXP, None, <float.h>)
+SYMBOL(DBL_TRUE_MIN, None, <cfloat>)
+SYMBOL(DBL_TRUE_MIN, None, <float.h>)
+SYMBOL(DECIMAL_DIG, None, <cfloat>)
+SYMBOL(DECIMAL_DIG, None, <float.h>)
+SYMBOL(E2BIG, None, <cerrno>)
+SYMBOL(E2BIG, None, <errno.h>)
+SYMBOL(EACCES, None, <cerrno>)
+SYMBOL(EACCES, None, <errno.h>)
+SYMBOL(EADDRINUSE, None, <cerrno>)
+SYMBOL(EADDRINUSE, None, <errno.h>)
+SYMBOL(EADDRNOTAVAIL, None, <cerrno>)
+SYMBOL(EADDRNOTAVAIL, None, <errno.h>)
+SYMBOL(EAFNOSUPPORT, None, <cerrno>)
+SYMBOL(EAFNOSUPPORT, None, <errno.h>)
+SYMBOL(EAGAIN, None, <cerrno>)
+SYMBOL(EAGAIN, None, <errno.h>)
+SYMBOL(EALREADY, None, <cerrno>)
+SYMBOL(EALREADY, None, <errno.h>)
+SYMBOL(EBADF, None, <cerrno>)
+SYMBOL(EBADF, None, <errno.h>)
+SYMBOL(EBADMSG, None, <cerrno>)
+SYMBOL(EBADMSG, None, <errno.h>)
+SYMBOL(EBUSY, None, <cerrno>)
+SYMBOL(EBUSY, None, <errno.h>)
+SYMBOL(ECANCELED, None, <cerrno>)
+SYMBOL(ECANCELED, None, <errno.h>)
+SYMBOL(ECHILD, None, <cerrno>)
+SYMBOL(ECHILD, None, <errno.h>)
+SYMBOL(ECONNABORTED, None, <cerrno>)
+SYMBOL(ECONNABORTED, None, <errno.h>)
+SYMBOL(ECONNREFUSED, None, <cerrno>)
+SYMBOL(ECONNREFUSED, None, <errno.h>)
+SYMBOL(ECONNRESET, None, <cerrno>)
+SYMBOL(ECONNRESET, None, <errno.h>)
+SYMBOL(EDEADLK, None, <cerrno>)
+SYMBOL(EDEADLK, None, <errno.h>)
+SYMBOL(EDESTADDRREQ, None, <cerrno>)
+SYMBOL(EDESTADDRREQ, None, <errno.h>)
+SYMBOL(EDOM, None, <cerrno>)
+SYMBOL(EDOM, None, <errno.h>)
+SYMBOL(EEXIST, None, <cerrno>)
+SYMBOL(EEXIST, None, <errno.h>)
+SYMBOL(EFAULT, None, <cerrno>)
+SYMBOL(EFAULT, None, <errno.h>)
+SYMBOL(EFBIG, None, <cerrno>)
+SYMBOL(EFBIG, None, <errno.h>)
+SYMBOL(EHOSTUNREACH, None, <cerrno>)
+SYMBOL(EHOSTUNREACH, None, <errno.h>)
+SYMBOL(EIDRM, None, <cerrno>)
+SYMBOL(EIDRM, None, <errno.h>)
+SYMBOL(EILSEQ, None, <cerrno>)
+SYMBOL(EILSEQ, None, <errno.h>)
+SYMBOL(EINPROGRESS, None, <cerrno>)
+SYMBOL(EINPROGRESS, None, <errno.h>)
+SYMBOL(EINTR, None, <cerrno>)
+SYMBOL(EINTR, None, <errno.h>)
+SYMBOL(EINVAL, None, <cerrno>)
+SYMBOL(EINVAL, None, <errno.h>)
+SYMBOL(EIO, None, <cerrno>)
+SYMBOL(EIO, None, <errno.h>)
+SYMBOL(EISCONN, None, <cerrno>)
+SYMBOL(EISCONN, None, <errno.h>)
+SYMBOL(EISDIR, None, <cerrno>)
+SYMBOL(EISDIR, None, <errno.h>)
+SYMBOL(ELOOP, None, <cerrno>)
+SYMBOL(ELOOP, None, <errno.h>)
+SYMBOL(EMFILE, None, <cerrno>)
+SYMBOL(EMFILE, None, <errno.h>)
+SYMBOL(EMLINK, None, <cerrno>)
+SYMBOL(EMLINK, None, <errno.h>)
+SYMBOL(EMSGSIZE, None, <cerrno>)
+SYMBOL(EMSGSIZE, None, <errno.h>)
+SYMBOL(ENAMETOOLONG, None, <cerrno>)
+SYMBOL(ENAMETOOLONG, None, <errno.h>)
+SYMBOL(ENETDOWN, None, <cerrno>)
+SYMBOL(ENETDOWN, None, <errno.h>)
+SYMBOL(ENETRESET, None, <cerrno>)
+SYMBOL(ENETRESET, None, <errno.h>)
+SYMBOL(ENETUNREACH, None, <cerrno>)
+SYMBOL(ENETUNREACH, None, <errno.h>)
+SYMBOL(ENFILE, None, <cerrno>)
+SYMBOL(ENFILE, None, <errno.h>)
+SYMBOL(ENOBUFS, None, <cerrno>)
+SYMBOL(ENOBUFS, None, <errno.h>)
+SYMBOL(ENODATA, None, <cerrno>)
+SYMBOL(ENODATA, None, <errno.h>)
+SYMBOL(ENODEV, None, <cerrno>)
+SYMBOL(ENODEV, None, <errno.h>)
+SYMBOL(ENOENT, None, <cerrno>)
+SYMBOL(ENOENT, None, <errno.h>)
+SYMBOL(ENOEXEC, None, <cerrno>)
+SYMBOL(ENOEXEC, None, <errno.h>)
+SYMBOL(ENOLCK, None, <cerrno>)
+SYMBOL(ENOLCK, None, <errno.h>)
+SYMBOL(ENOLINK, None, <cerrno>)
+SYMBOL(ENOLINK, None, <errno.h>)
+SYMBOL(ENOMEM, None, <cerrno>)
+SYMBOL(ENOMEM, None, <errno.h>)
+SYMBOL(ENOMSG, None, <cerrno>)
+SYMBOL(ENOMSG, None, <errno.h>)
+SYMBOL(ENOPROTOOPT, None, <cerrno>)
+SYMBOL(ENOPROTOOPT, None, <errno.h>)
+SYMBOL(ENOSPC, None, <cerrno>)
+SYMBOL(ENOSPC, None, <errno.h>)
+SYMBOL(ENOSR, None, <cerrno>)
+SYMBOL(ENOSR, None, <errno.h>)
+SYMBOL(ENOSTR, None, <cerrno>)
+SYMBOL(ENOSTR, None, <errno.h>)
+SYMBOL(ENOSYS, None, <cerrno>)
+SYMBOL(ENOSYS, None, <errno.h>)
+SYMBOL(ENOTCONN, None, <cerrno>)
+SYMBOL(ENOTCONN, None, <errno.h>)
+SYMBOL(ENOTDIR, None, <cerrno>)
+SYMBOL(ENOTDIR, None, <errno.h>)
+SYMBOL(ENOTEMPTY, None, <cerrno>)
+SYMBOL(ENOTEMPTY, None, <errno.h>)
+SYMBOL(ENOTRECOVERABLE, None, <cerrno>)
+SYMBOL(ENOTRECOVERABLE, None, <errno.h>)
+SYMBOL(ENOTSOCK, None, <cerrno>)
+SYMBOL(ENOTSOCK, None, <errno.h>)
+SYMBOL(ENOTSUP, None, <cerrno>)
+SYMBOL(ENOTSUP, None, <errno.h>)
+SYMBOL(ENOTTY, None, <cerrno>)
+SYMBOL(ENOTTY, None, <errno.h>)
+SYMBOL(ENXIO, None, <cerrno>)
+SYMBOL(ENXIO, None, <errno.h>)
+SYMBOL(EOF, None, <cstdio>)
+SYMBOL(EOF, None, <stdio.h>)
+SYMBOL(EOPNOTSUPP, None, <cerrno>)
+SYMBOL(EOPNOTSUPP, None, <errno.h>)
+SYMBOL(EOVERFLOW, None, <cerrno>)
+SYMBOL(EOVERFLOW, None, <errno.h>)
+SYMBOL(EOWNERDEAD, None, <cerrno>)
+SYMBOL(EOWNERDEAD, None, <errno.h>)
+SYMBOL(EPERM, None, <cerrno>)
+SYMBOL(EPERM, None, <errno.h>)
+SYMBOL(EPIPE, None, <cerrno>)
+SYMBOL(EPIPE, None, <errno.h>)
+SYMBOL(EPROTO, None, <cerrno>)
+SYMBOL(EPROTO, None, <errno.h>)
+SYMBOL(EPROTONOSUPPORT, None, <cerrno>)
+SYMBOL(EPROTONOSUPPORT, None, <errno.h>)
+SYMBOL(EPROTOTYPE, None, <cerrno>)
+SYMBOL(EPROTOTYPE, None, <errno.h>)
+SYMBOL(ERANGE, None, <cerrno>)
+SYMBOL(ERANGE, None, <errno.h>)
+SYMBOL(EROFS, None, <cerrno>)
+SYMBOL(EROFS, None, <errno.h>)
+SYMBOL(ESPIPE, None, <cerrno>)
+SYMBOL(ESPIPE, None, <errno.h>)
+SYMBOL(ESRCH, None, <cerrno>)
+SYMBOL(ESRCH, None, <errno.h>)
+SYMBOL(ETIME, None, <cerrno>)
+SYMBOL(ETIME, None, <errno.h>)
+SYMBOL(ETIMEDOUT, None, <cerrno>)
+SYMBOL(ETIMEDOUT, None, <errno.h>)
+SYMBOL(ETXTBSY, None, <cerrno>)
+SYMBOL(ETXTBSY, None, <errno.h>)
+SYMBOL(EWOULDBLOCK, None, <cerrno>)
+SYMBOL(EWOULDBLOCK, None, <errno.h>)
+SYMBOL(EXDEV, None, <cerrno>)
+SYMBOL(EXDEV, None, <errno.h>)
+SYMBOL(EXIT_FAILURE, None, <cstdlib>)
+SYMBOL(EXIT_FAILURE, None, <stdlib.h>)
+SYMBOL(EXIT_SUCCESS, None, <cstdlib>)
+SYMBOL(EXIT_SUCCESS, None, <stdlib.h>)
+SYMBOL(FE_ALL_EXCEPT, None, <cfenv>)
+SYMBOL(FE_ALL_EXCEPT, None, <fenv.h>)
+SYMBOL(FE_DFL_ENV, None, <cfenv>)
+SYMBOL(FE_DFL_ENV, None, <fenv.h>)
+SYMBOL(FE_DIVBYZERO, None, <cfenv>)
+SYMBOL(FE_DIVBYZERO, None, <fenv.h>)
+SYMBOL(FE_DOWNWARD, None, <cfenv>)
+SYMBOL(FE_DOWNWARD, None, <fenv.h>)
+SYMBOL(FE_INEXACT, None, <cfenv>)
+SYMBOL(FE_INEXACT, None, <fenv.h>)
+SYMBOL(FE_INVALID, None, <cfenv>)
+SYMBOL(FE_INVALID, None, <fenv.h>)
+SYMBOL(FE_OVERFLOW, None, <cfenv>)
+SYMBOL(FE_OVERFLOW, None, <fenv.h>)
+SYMBOL(FE_TONEAREST, None, <cfenv>)
+SYMBOL(FE_TONEAREST, None, <fenv.h>)
+SYMBOL(FE_TOWARDZERO, None, <cfenv>)
+SYMBOL(FE_TOWARDZERO, None, <fenv.h>)
+SYMBOL(FE_UNDERFLOW, None, <cfenv>)
+SYMBOL(FE_UNDERFLOW, None, <fenv.h>)
+SYMBOL(FE_UPWARD, None, <cfenv>)
+SYMBOL(FE_UPWARD, None, <fenv.h>)
+SYMBOL(FILENAME_MAX, None, <cstdio>)
+SYMBOL(FILENAME_MAX, None, <stdio.h>)
+SYMBOL(FLT_DECIMAL_DIG, None, <cfloat>)
+SYMBOL(FLT_DECIMAL_DIG, None, <float.h>)
+SYMBOL(FLT_DIG, None, <cfloat>)
+SYMBOL(FLT_DIG, None, <float.h>)
+SYMBOL(FLT_EPSILON, None, <cfloat>)
+SYMBOL(FLT_EPSILON, None, <float.h>)
+SYMBOL(FLT_EVAL_METHOD, None, <cfloat>)
+SYMBOL(FLT_EVAL_METHOD, None, <float.h>)
+SYMBOL(FLT_HAS_SUBNORM, None, <cfloat>)
+SYMBOL(FLT_HAS_SUBNORM, None, <float.h>)
+SYMBOL(FLT_MANT_DIG, None, <cfloat>)
+SYMBOL(FLT_MANT_DIG, None, <float.h>)
+SYMBOL(FLT_MAX, None, <cfloat>)
+SYMBOL(FLT_MAX, None, <float.h>)
+SYMBOL(FLT_MAX_10_EXP, None, <cfloat>)
+SYMBOL(FLT_MAX_10_EXP, None, <float.h>)
+SYMBOL(FLT_MAX_EXP, None, <cfloat>)
+SYMBOL(FLT_MAX_EXP, None, <float.h>)
+SYMBOL(FLT_MIN, None, <cfloat>)
+SYMBOL(FLT_MIN, None, <float.h>)
+SYMBOL(FLT_MIN_10_EXP, None, <cfloat>)
+SYMBOL(FLT_MIN_10_EXP, None, <float.h>)
+SYMBOL(FLT_MIN_EXP, None, <cfloat>)
+SYMBOL(FLT_MIN_EXP, None, <float.h>)
+SYMBOL(FLT_RADIX, None, <cfloat>)
+SYMBOL(FLT_RADIX, None, <float.h>)
+SYMBOL(FLT_ROUNDS, None, <cfloat>)
+SYMBOL(FLT_ROUNDS, None, <float.h>)
+SYMBOL(FLT_TRUE_MIN, None, <cfloat>)
+SYMBOL(FLT_TRUE_MIN, None, <float.h>)
+SYMBOL(FOPEN_MAX, None, <cstdio>)
+SYMBOL(FOPEN_MAX, None, <stdio.h>)
+SYMBOL(FP_FAST_FMA, None, <cmath>)
+SYMBOL(FP_FAST_FMA, None, <math.h>)
+SYMBOL(FP_FAST_FMAF, None, <cmath>)
+SYMBOL(FP_FAST_FMAF, None, <math.h>)
+SYMBOL(FP_FAST_FMAL, None, <cmath>)
+SYMBOL(FP_FAST_FMAL, None, <math.h>)
+SYMBOL(FP_ILOGB0, None, <cmath>)
+SYMBOL(FP_ILOGB0, None, <math.h>)
+SYMBOL(FP_ILOGBNAN, None, <cmath>)
+SYMBOL(FP_ILOGBNAN, None, <math.h>)
+SYMBOL(FP_INFINITE, None, <cmath>)
+SYMBOL(FP_INFINITE, None, <math.h>)
+SYMBOL(FP_NAN, None, <cmath>)
+SYMBOL(FP_NAN, None, <math.h>)
+SYMBOL(FP_NORMAL, None, <cmath>)
+SYMBOL(FP_NORMAL, None, <math.h>)
+SYMBOL(FP_SUBNORMAL, None, <cmath>)
+SYMBOL(FP_SUBNORMAL, None, <math.h>)
+SYMBOL(FP_ZERO, None, <cmath>)
+SYMBOL(FP_ZERO, None, <math.h>)
+SYMBOL(HUGE_VAL, None, <cmath>)
+SYMBOL(HUGE_VAL, None, <math.h>)
+SYMBOL(HUGE_VALF, None, <cmath>)
+SYMBOL(HUGE_VALF, None, <math.h>)
+SYMBOL(HUGE_VALL, None, <cmath>)
+SYMBOL(HUGE_VALL, None, <math.h>)
+SYMBOL(INFINITY, None, <cmath>)
+SYMBOL(INFINITY, None, <math.h>)
+SYMBOL(INT16_MAX, None, <cstdint>)
+SYMBOL(INT16_MAX, None, <stdint.h>)
+SYMBOL(INT16_MIN, None, <cstdint>)
+SYMBOL(INT16_MIN, None, <stdint.h>)
+SYMBOL(INT32_MAX, None, <cstdint>)
+SYMBOL(INT32_MAX, None, <stdint.h>)
+SYMBOL(INT32_MIN, None, <cstdint>)
+SYMBOL(INT32_MIN, None, <stdint.h>)
+SYMBOL(INT64_MAX, None, <cstdint>)
+SYMBOL(INT64_MAX, None, <stdint.h>)
+SYMBOL(INT64_MIN, None, <cstdint>)
+SYMBOL(INT64_MIN, None, <stdint.h>)
+SYMBOL(INT8_MAX, None, <cstdint>)
+SYMBOL(INT8_MAX, None, <stdint.h>)
+SYMBOL(INT8_MIN, None, <cstdint>)
+SYMBOL(INT8_MIN, None, <stdint.h>)
+SYMBOL(INTMAX_MAX, None, <cstdint>)
+SYMBOL(INTMAX_MAX, None, <stdint.h>)
+SYMBOL(INTMAX_MIN, None, <cstdint>)
+SYMBOL(INTMAX_MIN, None, <stdint.h>)
+SYMBOL(INTPTR_MAX, None, <cstdint>)
+SYMBOL(INTPTR_MAX, None, <stdint.h>)
+SYMBOL(INTPTR_MIN, None, <cstdint>)
+SYMBOL(INTPTR_MIN, None, <stdint.h>)
+SYMBOL(INT_FAST16_MAX, None, <cstdint>)
+SYMBOL(INT_FAST16_MAX, None, <stdint.h>)
+SYMBOL(INT_FAST16_MIN, None, <cstdint>)
+SYMBOL(INT_FAST16_MIN, None, <stdint.h>)
+SYMBOL(INT_FAST32_MAX, None, <cstdint>)
+SYMBOL(INT_FAST32_MAX, None, <stdint.h>)
+SYMBOL(INT_FAST32_MIN, None, <cstdint>)
+SYMBOL(INT_FAST32_MIN, None, <stdint.h>)
+SYMBOL(INT_FAST64_MAX, None, <cstdint>)
+SYMBOL(INT_FAST64_MAX, None, <stdint.h>)
+SYMBOL(INT_FAST64_MIN, None, <cstdint>)
+SYMBOL(INT_FAST64_MIN, None, <stdint.h>)
+SYMBOL(INT_FAST8_MAX, None, <cstdint>)
+SYMBOL(INT_FAST8_MAX, None, <stdint.h>)
+SYMBOL(INT_FAST8_MIN, None, <cstdint>)
+SYMBOL(INT_FAST8_MIN, None, <stdint.h>)
+SYMBOL(INT_LEAST16_MAX, None, <cstdint>)
+SYMBOL(INT_LEAST16_MAX, None, <stdint.h>)
+SYMBOL(INT_LEAST16_MIN, None, <cstdint>)
+SYMBOL(INT_LEAST16_MIN, None, <stdint.h>)
+SYMBOL(INT_LEAST32_MAX, None, <cstdint>)
+SYMBOL(INT_LEAST32_MAX, None, <stdint.h>)
+SYMBOL(INT_LEAST32_MIN, None, <cstdint>)
+SYMBOL(INT_LEAST32_MIN, None, <stdint.h>)
+SYMBOL(INT_LEAST64_MAX, None, <cstdint>)
+SYMBOL(INT_LEAST64_MAX, None, <stdint.h>)
+SYMBOL(INT_LEAST64_MIN, None, <cstdint>)
+SYMBOL(INT_LEAST64_MIN, None, <stdint.h>)
+SYMBOL(INT_LEAST8_MAX, None, <cstdint>)
+SYMBOL(INT_LEAST8_MAX, None, <stdint.h>)
+SYMBOL(INT_LEAST8_MIN, None, <cstdint>)
+SYMBOL(INT_LEAST8_MIN, None, <stdint.h>)
+SYMBOL(INT_MAX, None, <climits>)
+SYMBOL(INT_MAX, None, <limits.h>)
+SYMBOL(INT_MIN, None, <climits>)
+SYMBOL(INT_MIN, None, <limits.h>)
+SYMBOL(LC_ALL, None, <clocale>)
+SYMBOL(LC_ALL, None, <locale.h>)
+SYMBOL(LC_COLLATE, None, <clocale>)
+SYMBOL(LC_COLLATE, None, <locale.h>)
+SYMBOL(LC_CTYPE, None, <clocale>)
+SYMBOL(LC_CTYPE, None, <locale.h>)
+SYMBOL(LC_MONETARY, None, <clocale>)
+SYMBOL(LC_MONETARY, None, <locale.h>)
+SYMBOL(LC_NUMERIC, None, <clocale>)
+SYMBOL(LC_NUMERIC, None, <locale.h>)
+SYMBOL(LC_TIME, None, <clocale>)
+SYMBOL(LC_TIME, None, <locale.h>)
+SYMBOL(LDBL_DECIMAL_DIG, None, <cfloat>)
+SYMBOL(LDBL_DECIMAL_DIG, None, <float.h>)
+SYMBOL(LDBL_DIG, None, <cfloat>)
+SYMBOL(LDBL_DIG, None, <float.h>)
+SYMBOL(LDBL_EPSILON, None, <cfloat>)
+SYMBOL(LDBL_EPSILON, None, <float.h>)
+SYMBOL(LDBL_HAS_SUBNORM, None, <cfloat>)
+SYMBOL(LDBL_HAS_SUBNORM, None, <float.h>)
+SYMBOL(LDBL_MANT_DIG, None, <cfloat>)
+SYMBOL(LDBL_MANT_DIG, None, <float.h>)
+SYMBOL(LDBL_MAX, None, <cfloat>)
+SYMBOL(LDBL_MAX, None, <float.h>)
+SYMBOL(LDBL_MAX_10_EXP, None, <cfloat>)
+SYMBOL(LDBL_MAX_10_EXP, None, <float.h>)
+SYMBOL(LDBL_MAX_EXP, None, <cfloat>)
+SYMBOL(LDBL_MAX_EXP, None, <float.h>)
+SYMBOL(LDBL_MIN, None, <cfloat>)
+SYMBOL(LDBL_MIN, None, <float.h>)
+SYMBOL(LDBL_MIN_10_EXP, None, <cfloat>)
+SYMBOL(LDBL_MIN_10_EXP, None, <float.h>)
+SYMBOL(LDBL_MIN_EXP, None, <cfloat>)
+SYMBOL(LDBL_MIN_EXP, None, <float.h>)
+SYMBOL(LDBL_TRUE_MIN, None, <cfloat>)
+SYMBOL(LDBL_TRUE_MIN, None, <float.h>)
+SYMBOL(LLONG_MAX, None, <climits>)
+SYMBOL(LLONG_MAX, None, <limits.h>)
+SYMBOL(LLONG_MIN, None, <climits>)
+SYMBOL(LLONG_MIN, None, <limits.h>)
+SYMBOL(LONG_MAX, None, <climits>)
+SYMBOL(LONG_MAX, None, <limits.h>)
+SYMBOL(LONG_MIN, None, <climits>)
+SYMBOL(LONG_MIN, None, <limits.h>)
+SYMBOL(L_tmpnam, None, <cstdio>)
+SYMBOL(L_tmpnam, None, <stdio.h>)
+SYMBOL(MATH_ERREXCEPT, None, <cmath>)
+SYMBOL(MATH_ERREXCEPT, None, <math.h>)
+SYMBOL(MATH_ERRNO, None, <cmath>)
+SYMBOL(MATH_ERRNO, None, <math.h>)
+SYMBOL(MB_CUR_MAX, None, <cstdlib>)
+SYMBOL(MB_CUR_MAX, None, <stdlib.h>)
+SYMBOL(MB_LEN_MAX, None, <climits>)
+SYMBOL(MB_LEN_MAX, None, <limits.h>)
+SYMBOL(NAN, None, <cmath>)
+SYMBOL(NAN, None, <math.h>)
+SYMBOL(ONCE_FLAG_INIT, None, <mutex>)
+SYMBOL(PTRDIFF_MAX, None, <cstdint>)
+SYMBOL(PTRDIFF_MAX, None, <stdint.h>)
+SYMBOL(PTRDIFF_MIN, None, <cstdint>)
+SYMBOL(PTRDIFF_MIN, None, <stdint.h>)
+SYMBOL(RAND_MAX, None, <cstdlib>)
+SYMBOL(RAND_MAX, None, <stdlib.h>)
+SYMBOL(SCHAR_MAX, None, <climits>)
+SYMBOL(SCHAR_MAX, None, <limits.h>)
+SYMBOL(SCHAR_MIN, None, <climits>)
+SYMBOL(SCHAR_MIN, None, <limits.h>)
+SYMBOL(SEEK_CUR, None, <cstdio>)
+SYMBOL(SEEK_CUR, None, <stdio.h>)
+SYMBOL(SEEK_END, None, <cstdio>)
+SYMBOL(SEEK_END, None, <stdio.h>)
+SYMBOL(SEEK_SET, None, <cstdio>)
+SYMBOL(SEEK_SET, None, <stdio.h>)
+SYMBOL(SHRT_MAX, None, <climits>)
+SYMBOL(SHRT_MAX, None, <limits.h>)
+SYMBOL(SHRT_MIN, None, <climits>)
+SYMBOL(SHRT_MIN, None, <limits.h>)
+SYMBOL(SIGABRT, None, <csignal>)
+SYMBOL(SIGABRT, None, <signal.h>)
+SYMBOL(SIGFPE, None, <csignal>)
+SYMBOL(SIGFPE, None, <signal.h>)
+SYMBOL(SIGILL, None, <csignal>)
+SYMBOL(SIGILL, None, <signal.h>)
+SYMBOL(SIGINT, None, <csignal>)
+SYMBOL(SIGINT, None, <signal.h>)
+SYMBOL(SIGSEGV, None, <csignal>)
+SYMBOL(SIGSEGV, None, <signal.h>)
+SYMBOL(SIGTERM, None, <csignal>)
+SYMBOL(SIGTERM, None, <signal.h>)
+SYMBOL(SIG_ATOMIC_MAX, None, <cstdint>)
+SYMBOL(SIG_ATOMIC_MAX, None, <stdint.h>)
+SYMBOL(SIG_ATOMIC_MIN, None, <cstdint>)
+SYMBOL(SIG_ATOMIC_MIN, None, <stdint.h>)
+SYMBOL(SIG_DFL, None, <csignal>)
+SYMBOL(SIG_DFL, None, <signal.h>)
+SYMBOL(SIG_ERR, None, <csignal>)
+SYMBOL(SIG_ERR, None, <signal.h>)
+SYMBOL(SIG_IGN, None, <csignal>)
+SYMBOL(SIG_IGN, None, <signal.h>)
+SYMBOL(SIZE_MAX, None, <cstdint>)
+SYMBOL(SIZE_MAX, None, <stdint.h>)
+SYMBOL(TIME_UTC, None, <ctime>)
+SYMBOL(TIME_UTC, None, <time.h>)
+SYMBOL(TMP_MAX, None, <cstdio>)
+SYMBOL(TMP_MAX, None, <stdio.h>)
+SYMBOL(UCHAR_MAX, None, <climits>)
+SYMBOL(UCHAR_MAX, None, <limits.h>)
+SYMBOL(UINT16_MAX, None, <cstdint>)
+SYMBOL(UINT16_MAX, None, <stdint.h>)
+SYMBOL(UINT32_MAX, None, <cstdint>)
+SYMBOL(UINT32_MAX, None, <stdint.h>)
+SYMBOL(UINT64_MAX, None, <cstdint>)
+SYMBOL(UINT64_MAX, None, <stdint.h>)
+SYMBOL(UINT8_MAX, None, <cstdint>)
+SYMBOL(UINT8_MAX, None, <stdint.h>)
+SYMBOL(UINTMAX_MAX, None, <cstdint>)
+SYMBOL(UINTMAX_MAX, None, <stdint.h>)
+SYMBOL(UINTPTR_MAX, None, <cstdint>)
+SYMBOL(UINTPTR_MAX, None, <stdint.h>)
+SYMBOL(UINT_FAST16_MAX, None, <cstdint>)
+SYMBOL(UINT_FAST16_MAX, None, <stdint.h>)
+SYMBOL(UINT_FAST32_MAX, None, <cstdint>)
+SYMBOL(UINT_FAST32_MAX, None, <stdint.h>)
+SYMBOL(UINT_FAST64_MAX, None, <cstdint>)
+SYMBOL(UINT_FAST64_MAX, None, <stdint.h>)
+SYMBOL(UINT_FAST8_MAX, None, <cstdint>)
+SYMBOL(UINT_FAST8_MAX, None, <stdint.h>)
+SYMBOL(UINT_LEAST16_MAX, None, <cstdint>)
+SYMBOL(UINT_LEAST16_MAX, None, <stdint.h>)
+SYMBOL(UINT_LEAST32_MAX, None, <cstdint>)
+SYMBOL(UINT_LEAST32_MAX, None, <stdint.h>)
+SYMBOL(UINT_LEAST64_MAX, None, <cstdint>)
+SYMBOL(UINT_LEAST64_MAX, None, <stdint.h>)
+SYMBOL(UINT_LEAST8_MAX, None, <cstdint>)
+SYMBOL(UINT_LEAST8_MAX, None, <stdint.h>)
+SYMBOL(UINT_MAX, None, <climits>)
+SYMBOL(UINT_MAX, None, <limits.h>)
+SYMBOL(ULLONG_MAX, None, <climits>)
+SYMBOL(ULLONG_MAX, None, <limits.h>)
+SYMBOL(ULONG_MAX, None, <climits>)
+SYMBOL(ULONG_MAX, None, <limits.h>)
+SYMBOL(USHRT_MAX, None, <climits>)
+SYMBOL(USHRT_MAX, None, <limits.h>)
+SYMBOL(WEOF, None, <cwchar>)
+SYMBOL(WEOF, None, <wchar.h>)
+SYMBOL(WINT_MAX, None, <cstdint>)
+SYMBOL(WINT_MAX, None, <stdint.h>)
+SYMBOL(WINT_MIN, None, <cstdint>)
+SYMBOL(WINT_MIN, None, <stdint.h>)
+SYMBOL(_IOFBF, None, <cstdio>)
+SYMBOL(_IOFBF, None, <stdio.h>)
+SYMBOL(_IOLBF, None, <cstdio>)
+SYMBOL(_IOLBF, None, <stdio.h>)
+SYMBOL(_IONBF, None, <cstdio>)
+SYMBOL(_IONBF, None, <stdio.h>)
+SYMBOL(assert, None, <cassert>)
+SYMBOL(assert, None, <assert.h>)
+SYMBOL(errno, None, <cerrno>)
+SYMBOL(errno, None, <errno.h>)
+SYMBOL(math_errhandling, None, <cmath>)
+SYMBOL(math_errhandling, None, <math.h>)
+SYMBOL(offsetof, None, <cstddef>)
+SYMBOL(offsetof, None, <stddef.h>)
+SYMBOL(setjmp, None, <csetjmp>)
+SYMBOL(setjmp, None, <setjmp.h>)
+SYMBOL(stderr, None, <cstdio>)
+SYMBOL(stderr, None, <stdio.h>)
+SYMBOL(stdin, None, <cstdio>)
+SYMBOL(stdin, None, <stdio.h>)
+SYMBOL(stdout, None, <cstdio>)
+SYMBOL(stdout, None, <stdio.h>)
+SYMBOL(va_arg, None, <cstdarg>)
+SYMBOL(va_arg, None, <stdarg.h>)
+SYMBOL(va_copy, None, <cstdarg>)
+SYMBOL(va_copy, None, <stdarg.h>)
+SYMBOL(va_end, None, <cstdarg>)
+SYMBOL(va_end, None, <stdarg.h>)
+SYMBOL(va_start, None, <cstdarg>)
+SYMBOL(va_start, None, <stdarg.h>)
+SYMBOL(FILE, std::, <cstdio>)
+SYMBOL(FILE, None, <cstdio>)
+SYMBOL(FILE, None, <stdio.h>)
+SYMBOL(_Exit, std::, <cstdlib>)
+SYMBOL(_Exit, None, <cstdlib>)
+SYMBOL(_Exit, None, <stdlib.h>)
+SYMBOL(accumulate, std::, <numeric>)
+SYMBOL(acos, std::, <cmath>)
+SYMBOL(acos, None, <cmath>)
+SYMBOL(acos, None, <math.h>)
+SYMBOL(acosf, std::, <cmath>)
+SYMBOL(acosf, None, <cmath>)
+SYMBOL(acosf, None, <math.h>)
+SYMBOL(acosh, std::, <cmath>)
+SYMBOL(acosh, None, <cmath>)
+SYMBOL(acosh, None, <math.h>)
+SYMBOL(acoshf, std::, <cmath>)
+SYMBOL(acoshf, None, <cmath>)
+SYMBOL(acoshf, None, <math.h>)
+SYMBOL(acoshl, std::, <cmath>)
+SYMBOL(acoshl, None, <cmath>)
+SYMBOL(acoshl, None, <math.h>)
+SYMBOL(acosl, std::, <cmath>)
+SYMBOL(acosl, None, <cmath>)
+SYMBOL(acosl, None, <math.h>)
+SYMBOL(add_const, std::, <type_traits>)
+SYMBOL(add_const_t, std::, <type_traits>)
+SYMBOL(add_cv, std::, <type_traits>)
+SYMBOL(add_cv_t, std::, <type_traits>)
+SYMBOL(add_lvalue_reference, std::, <type_traits>)
+SYMBOL(add_lvalue_reference_t, std::, <type_traits>)
+SYMBOL(add_pointer, std::, <type_traits>)
+SYMBOL(add_pointer_t, std::, <type_traits>)
+SYMBOL(add_rvalue_reference, std::, <type_traits>)
+SYMBOL(add_rvalue_reference_t, std::, <type_traits>)
+SYMBOL(add_volatile, std::, <type_traits>)
+SYMBOL(add_volatile_t, std::, <type_traits>)
+SYMBOL(addressof, std::, <memory>)
+SYMBOL(adjacent_difference, std::, <numeric>)
+SYMBOL(adjacent_find, std::, <algorithm>)
+SYMBOL(adopt_lock, std::, <mutex>)
+SYMBOL(adopt_lock_t, std::, <mutex>)
+SYMBOL(advance, std::, <iterator>)
+SYMBOL(align, std::, <memory>)
+SYMBOL(align_val_t, std::, <new>)
+SYMBOL(aligned_alloc, std::, <cstdlib>)
+SYMBOL(aligned_alloc, None, <cstdlib>)
+SYMBOL(aligned_alloc, None, <stdlib.h>)
+SYMBOL(aligned_storage, std::, <type_traits>)
+SYMBOL(aligned_storage_t, std::, <type_traits>)
+SYMBOL(aligned_union, std::, <type_traits>)
+SYMBOL(aligned_union_t, std::, <type_traits>)
+SYMBOL(alignment_of, std::, <type_traits>)
+SYMBOL(alignment_of_v, std::, <type_traits>)
+SYMBOL(all_of, std::, <algorithm>)
+SYMBOL(allocate_at_least, std::, <memory>)
+SYMBOL(allocate_shared, std::, <memory>)
+SYMBOL(allocate_shared_for_overwrite, std::, <memory>)
+SYMBOL(allocation_result, std::, <memory>)
+SYMBOL(allocator, std::, <memory>)
+SYMBOL(allocator_arg, std::, <memory>)
+SYMBOL(allocator_arg_t, std::, <memory>)
+SYMBOL(allocator_traits, std::, <memory>)
+SYMBOL(any, std::, <any>)
+SYMBOL(any_of, std::, <algorithm>)
+SYMBOL(apply, std::, <tuple>)
+SYMBOL(arg, std::, <complex>)
+SYMBOL(array, std::, <array>)
+SYMBOL(as_bytes, std::, <span>)
+SYMBOL(as_const, std::, <utility>)
+SYMBOL(as_writable_bytes, std::, <span>)
+SYMBOL(asctime, std::, <ctime>)
+SYMBOL(asctime, None, <ctime>)
+SYMBOL(asctime, None, <time.h>)
+SYMBOL(asin, std::, <cmath>)
+SYMBOL(asin, None, <cmath>)
+SYMBOL(asin, None, <math.h>)
+SYMBOL(asinf, std::, <cmath>)
+SYMBOL(asinf, None, <cmath>)
+SYMBOL(asinf, None, <math.h>)
+SYMBOL(asinh, std::, <cmath>)
+SYMBOL(asinh, None, <cmath>)
+SYMBOL(asinh, None, <math.h>)
+SYMBOL(asinhf, std::, <cmath>)
+SYMBOL(asinhf, None, <cmath>)
+SYMBOL(asinhf, None, <math.h>)
+SYMBOL(asinhl, std::, <cmath>)
+SYMBOL(asinhl, None, <cmath>)
+SYMBOL(asinhl, None, <math.h>)
+SYMBOL(asinl, std::, <cmath>)
+SYMBOL(asinl, None, <cmath>)
+SYMBOL(asinl, None, <math.h>)
+SYMBOL(assignable_from, std::, <concepts>)
+SYMBOL(assoc_laguerre, std::, <cmath>)
+SYMBOL(assoc_laguerref, std::, <cmath>)
+SYMBOL(assoc_laguerrel, std::, <cmath>)
+SYMBOL(assoc_legendre, std::, <cmath>)
+SYMBOL(assoc_legendref, std::, <cmath>)
+SYMBOL(assoc_legendrel, std::, <cmath>)
+SYMBOL(assume_aligned, std::, <memory>)
+SYMBOL(async, std::, <future>)
+SYMBOL(at_quick_exit, std::, <cstdlib>)
+SYMBOL(at_quick_exit, None, <cstdlib>)
+SYMBOL(at_quick_exit, None, <stdlib.h>)
+SYMBOL(atan, std::, <cmath>)
+SYMBOL(atan, None, <cmath>)
+SYMBOL(atan, None, <math.h>)
+SYMBOL(atan2, std::, <cmath>)
+SYMBOL(atan2, None, <cmath>)
+SYMBOL(atan2, None, <math.h>)
+SYMBOL(atan2f, std::, <cmath>)
+SYMBOL(atan2f, None, <cmath>)
+SYMBOL(atan2f, None, <math.h>)
+SYMBOL(atan2l, std::, <cmath>)
+SYMBOL(atan2l, None, <cmath>)
+SYMBOL(atan2l, None, <math.h>)
+SYMBOL(atanf, std::, <cmath>)
+SYMBOL(atanf, None, <cmath>)
+SYMBOL(atanf, None, <math.h>)
+SYMBOL(atanh, std::, <cmath>)
+SYMBOL(atanh, None, <cmath>)
+SYMBOL(atanh, None, <math.h>)
+SYMBOL(atanhf, std::, <cmath>)
+SYMBOL(atanhf, None, <cmath>)
+SYMBOL(atanhf, None, <math.h>)
+SYMBOL(atanhl, std::, <cmath>)
+SYMBOL(atanhl, None, <cmath>)
+SYMBOL(atanhl, None, <math.h>)
+SYMBOL(atanl, std::, <cmath>)
+SYMBOL(atanl, None, <cmath>)
+SYMBOL(atanl, None, <math.h>)
+SYMBOL(atexit, std::, <cstdlib>)
+SYMBOL(atexit, None, <cstdlib>)
+SYMBOL(atexit, None, <stdlib.h>)
+SYMBOL(atof, std::, <cstdlib>)
+SYMBOL(atof, None, <cstdlib>)
+SYMBOL(atof, None, <stdlib.h>)
+SYMBOL(atoi, std::, <cstdlib>)
+SYMBOL(atoi, None, <cstdlib>)
+SYMBOL(atoi, None, <stdlib.h>)
+SYMBOL(atol, std::, <cstdlib>)
+SYMBOL(atol, None, <cstdlib>)
+SYMBOL(atol, None, <stdlib.h>)
+SYMBOL(atoll, std::, <cstdlib>)
+SYMBOL(atoll, None, <cstdlib>)
+SYMBOL(atoll, None, <stdlib.h>)
+SYMBOL(atomic_compare_exchange_strong, std::, <atomic>)
+SYMBOL(atomic_compare_exchange_strong_explicit, std::, <atomic>)
+SYMBOL(atomic_compare_exchange_weak, std::, <atomic>)
+SYMBOL(atomic_compare_exchange_weak_explicit, std::, <atomic>)
+SYMBOL(atomic_exchange, std::, <atomic>)
+SYMBOL(atomic_exchange_explicit, std::, <atomic>)
+SYMBOL(atomic_fetch_add, std::, <atomic>)
+SYMBOL(atomic_fetch_add_explicit, std::, <atomic>)
+SYMBOL(atomic_fetch_and, std::, <atomic>)
+SYMBOL(atomic_fetch_and_explicit, std::, <atomic>)
+SYMBOL(atomic_fetch_or, std::, <atomic>)
+SYMBOL(atomic_fetch_or_explicit, std::, <atomic>)
+SYMBOL(atomic_fetch_sub, std::, <atomic>)
+SYMBOL(atomic_fetch_sub_explicit, std::, <atomic>)
+SYMBOL(atomic_fetch_xor, std::, <atomic>)
+SYMBOL(atomic_fetch_xor_explicit, std::, <atomic>)
+SYMBOL(atomic_flag, std::, <atomic>)
+SYMBOL(atomic_flag_clear, std::, <atomic>)
+SYMBOL(atomic_flag_clear_explicit, std::, <atomic>)
+SYMBOL(atomic_flag_notify_all, std::, <atomic>)
+SYMBOL(atomic_flag_notify_one, std::, <atomic>)
+SYMBOL(atomic_flag_test, std::, <atomic>)
+SYMBOL(atomic_flag_test_and_set, std::, <atomic>)
+SYMBOL(atomic_flag_test_and_set_explicit, std::, <atomic>)
+SYMBOL(atomic_flag_test_explicit, std::, <atomic>)
+SYMBOL(atomic_flag_wait, std::, <atomic>)
+SYMBOL(atomic_flag_wait_explicit, std::, <atomic>)
+SYMBOL(atomic_init, std::, <atomic>)
+SYMBOL(atomic_is_lock_free, std::, <atomic>)
+SYMBOL(atomic_load, std::, <atomic>)
+SYMBOL(atomic_load_explicit, std::, <atomic>)
+SYMBOL(atomic_notify_all, std::, <atomic>)
+SYMBOL(atomic_notify_one, std::, <atomic>)
+SYMBOL(atomic_ref, std::, <atomic>)
+SYMBOL(atomic_signal_fence, std::, <atomic>)
+SYMBOL(atomic_store, std::, <atomic>)
+SYMBOL(atomic_store_explicit, std::, <atomic>)
+SYMBOL(atomic_thread_fence, std::, <atomic>)
+SYMBOL(atomic_wait, std::, <atomic>)
+SYMBOL(atomic_wait_explicit, std::, <atomic>)
+SYMBOL(atto, std::, <ratio>)
+SYMBOL(auto_ptr, std::, <memory>)
+SYMBOL(back_insert_iterator, std::, <iterator>)
+SYMBOL(back_inserter, std::, <iterator>)
+SYMBOL(bad_alloc, std::, <new>)
+SYMBOL(bad_any_cast, std::, <any>)
+SYMBOL(bad_array_new_length, std::, <new>)
+SYMBOL(bad_cast, std::, <typeinfo>)
+SYMBOL(bad_exception, std::, <exception>)
+SYMBOL(bad_function_call, std::, <functional>)
+SYMBOL(bad_optional_access, std::, <optional>)
+SYMBOL(bad_typeid, std::, <typeinfo>)
+SYMBOL(bad_variant_access, std::, <variant>)
+SYMBOL(bad_weak_ptr, std::, <memory>)
+SYMBOL(barrier, std::, <barrier>)
+SYMBOL(basic_common_reference, std::, <type_traits>)
+SYMBOL(basic_filebuf, std::, <fstream>)
+SYMBOL(basic_filebuf, std::, <iosfwd>)
+SYMBOL(basic_format_arg, std::, <format>)
+SYMBOL(basic_format_args, std::, <format>)
+SYMBOL(basic_format_context, std::, <format>)
+SYMBOL(basic_format_parse_context, std::, <format>)
+SYMBOL(basic_fstream, std::, <fstream>)
+SYMBOL(basic_fstream, std::, <iosfwd>)
+SYMBOL(basic_ifstream, std::, <fstream>)
+SYMBOL(basic_ifstream, std::, <iosfwd>)
+SYMBOL(basic_ios, std::, <ios>)
+SYMBOL(basic_ios, std::, <iostream>)
+SYMBOL(basic_ios, std::, <iosfwd>)
+SYMBOL(basic_iostream, std::, <istream>)
+SYMBOL(basic_iostream, std::, <iostream>)
+SYMBOL(basic_iostream, std::, <iosfwd>)
+SYMBOL(basic_ispanstream, std::, <spanstream>)
+SYMBOL(basic_ispanstream, std::, <iosfwd>)
+SYMBOL(basic_istream, std::, <istream>)
+SYMBOL(basic_istream, std::, <iostream>)
+SYMBOL(basic_istream, std::, <iosfwd>)
+SYMBOL(basic_istringstream, std::, <sstream>)
+SYMBOL(basic_istringstream, std::, <iosfwd>)
+SYMBOL(basic_ofstream, std::, <fstream>)
+SYMBOL(basic_ofstream, std::, <iosfwd>)
+SYMBOL(basic_ospanstream, std::, <spanstream>)
+SYMBOL(basic_ospanstream, std::, <iosfwd>)
+SYMBOL(basic_ostream, std::, <ostream>)
+SYMBOL(basic_ostream, std::, <iostream>)
+SYMBOL(basic_ostream, std::, <iosfwd>)
+SYMBOL(basic_ostringstream, std::, <sstream>)
+SYMBOL(basic_ostringstream, std::, <iosfwd>)
+SYMBOL(basic_osyncstream, std::, <syncstream>)
+SYMBOL(basic_osyncstream, std::, <iosfwd>)
+SYMBOL(basic_regex, std::, <regex>)
+SYMBOL(basic_spanbuf, std::, <spanstream>)
+SYMBOL(basic_spanbuf, std::, <iosfwd>)
+SYMBOL(basic_spanstream, std::, <spanstream>)
+SYMBOL(basic_spanstream, std::, <iosfwd>)
+SYMBOL(basic_stacktrace, std::, <stacktrace>)
+SYMBOL(basic_streambuf, std::, <streambuf>)
+SYMBOL(basic_streambuf, std::, <iostream>)
+SYMBOL(basic_streambuf, std::, <iosfwd>)
+SYMBOL(basic_string, std::, <string>)
+SYMBOL(basic_string_view, std::, <string_view>)
+SYMBOL(basic_stringbuf, std::, <sstream>)
+SYMBOL(basic_stringbuf, std::, <iosfwd>)
+SYMBOL(basic_stringstream, std::, <sstream>)
+SYMBOL(basic_stringstream, std::, <iosfwd>)
+SYMBOL(basic_syncbuf, std::, <syncstream>)
+SYMBOL(basic_syncbuf, std::, <iosfwd>)
+SYMBOL(bernoulli_distribution, std::, <random>)
+SYMBOL(beta, std::, <cmath>)
+SYMBOL(betaf, std::, <cmath>)
+SYMBOL(betal, std::, <cmath>)
+SYMBOL(bidirectional_iterator, std::, <iterator>)
+SYMBOL(bidirectional_iterator_tag, std::, <iterator>)
+SYMBOL(binary_function, std::, <functional>)
+SYMBOL(binary_negate, std::, <functional>)
+SYMBOL(binary_search, std::, <algorithm>)
+SYMBOL(binary_semaphore, std::, <semaphore>)
+SYMBOL(bind, std::, <functional>)
+SYMBOL(bind1st, std::, <functional>)
+SYMBOL(bind2nd, std::, <functional>)
+SYMBOL(bind_back, std::, <functional>)
+SYMBOL(bind_front, std::, <functional>)
+SYMBOL(binder1st, std::, <functional>)
+SYMBOL(binder2nd, std::, <functional>)
+SYMBOL(binomial_distribution, std::, <random>)
+SYMBOL(bit_and, std::, <functional>)
+SYMBOL(bit_cast, std::, <bit>)
+SYMBOL(bit_ceil, std::, <bit>)
+SYMBOL(bit_floor, std::, <bit>)
+SYMBOL(bit_not, std::, <functional>)
+SYMBOL(bit_or, std::, <functional>)
+SYMBOL(bit_width, std::, <bit>)
+SYMBOL(bit_xor, std::, <functional>)
+SYMBOL(bitset, std::, <bitset>)
+SYMBOL(bool_constant, std::, <type_traits>)
+SYMBOL(boolalpha, std::, <ios>)
+SYMBOL(boolalpha, std::, <iostream>)
+SYMBOL(boyer_moore_horspool_searcher, std::, <functional>)
+SYMBOL(boyer_moore_searcher, std::, <functional>)
+SYMBOL(bsearch, std::, <cstdlib>)
+SYMBOL(bsearch, None, <cstdlib>)
+SYMBOL(bsearch, None, <stdlib.h>)
+SYMBOL(btowc, std::, <cwchar>)
+SYMBOL(btowc, None, <cwchar>)
+SYMBOL(btowc, None, <wchar.h>)
+SYMBOL(byte, std::, <cstddef>)
+SYMBOL(byteswap, std::, <bit>)
+SYMBOL(c16rtomb, std::, <cuchar>)
+SYMBOL(c16rtomb, None, <cuchar>)
+SYMBOL(c16rtomb, None, <uchar.h>)
+SYMBOL(c32rtomb, std::, <cuchar>)
+SYMBOL(c32rtomb, None, <cuchar>)
+SYMBOL(c32rtomb, None, <uchar.h>)
+SYMBOL(c8rtomb, std::, <cuchar>)
+SYMBOL(c8rtomb, None, <cuchar>)
+SYMBOL(c8rtomb, None, <uchar.h>)
+SYMBOL(call_once, std::, <mutex>)
+SYMBOL(calloc, std::, <cstdlib>)
+SYMBOL(calloc, None, <cstdlib>)
+SYMBOL(calloc, None, <stdlib.h>)
+SYMBOL(cauchy_distribution, std::, <random>)
+SYMBOL(cbrt, std::, <cmath>)
+SYMBOL(cbrt, None, <cmath>)
+SYMBOL(cbrt, None, <math.h>)
+SYMBOL(cbrtf, std::, <cmath>)
+SYMBOL(cbrtf, None, <cmath>)
+SYMBOL(cbrtf, None, <math.h>)
+SYMBOL(cbrtl, std::, <cmath>)
+SYMBOL(cbrtl, None, <cmath>)
+SYMBOL(cbrtl, None, <math.h>)
+SYMBOL(ceil, std::, <cmath>)
+SYMBOL(ceil, None, <cmath>)
+SYMBOL(ceil, None, <math.h>)
+SYMBOL(ceilf, std::, <cmath>)
+SYMBOL(ceilf, None, <cmath>)
+SYMBOL(ceilf, None, <math.h>)
+SYMBOL(ceill, std::, <cmath>)
+SYMBOL(ceill, None, <cmath>)
+SYMBOL(ceill, None, <math.h>)
+SYMBOL(centi, std::, <ratio>)
+SYMBOL(cerr, std::, <iostream>)
+SYMBOL(char_traits, std::, <string>)
+SYMBOL(chars_format, std::, <charconv>)
+SYMBOL(chi_squared_distribution, std::, <random>)
+SYMBOL(cin, std::, <iostream>)
+SYMBOL(clamp, std::, <algorithm>)
+SYMBOL(clearerr, std::, <cstdio>)
+SYMBOL(clearerr, None, <cstdio>)
+SYMBOL(clearerr, None, <stdio.h>)
+SYMBOL(clock, std::, <ctime>)
+SYMBOL(clock, None, <ctime>)
+SYMBOL(clock, None, <time.h>)
+SYMBOL(clock_t, std::, <ctime>)
+SYMBOL(clock_t, None, <ctime>)
+SYMBOL(clock_t, None, <time.h>)
+SYMBOL(clog, std::, <iostream>)
+SYMBOL(cmatch, std::, <regex>)
+SYMBOL(cmp_equal, std::, <utility>)
+SYMBOL(cmp_greater, std::, <utility>)
+SYMBOL(cmp_greater_equal, std::, <utility>)
+SYMBOL(cmp_less, std::, <utility>)
+SYMBOL(cmp_less_equal, std::, <utility>)
+SYMBOL(cmp_not_equal, std::, <utility>)
+SYMBOL(codecvt, std::, <locale>)
+SYMBOL(codecvt_base, std::, <locale>)
+SYMBOL(codecvt_byname, std::, <locale>)
+SYMBOL(codecvt_mode, std::, <codecvt>)
+SYMBOL(codecvt_utf16, std::, <codecvt>)
+SYMBOL(codecvt_utf8, std::, <codecvt>)
+SYMBOL(codecvt_utf8_utf16, std::, <codecvt>)
+SYMBOL(collate, std::, <locale>)
+SYMBOL(collate_byname, std::, <locale>)
+SYMBOL(common_comparison_category, std::, <compare>)
+SYMBOL(common_comparison_category_t, std::, <compare>)
+SYMBOL(common_iterator, std::, <iterator>)
+SYMBOL(common_reference, std::, <type_traits>)
+SYMBOL(common_reference_t, std::, <type_traits>)
+SYMBOL(common_reference_with, std::, <concepts>)
+SYMBOL(common_type, std::, <type_traits>)
+SYMBOL(common_type_t, std::, <type_traits>)
+SYMBOL(common_with, std::, <concepts>)
+SYMBOL(comp_ellint_1, std::, <cmath>)
+SYMBOL(comp_ellint_1f, std::, <cmath>)
+SYMBOL(comp_ellint_1l, std::, <cmath>)
+SYMBOL(comp_ellint_2, std::, <cmath>)
+SYMBOL(comp_ellint_2f, std::, <cmath>)
+SYMBOL(comp_ellint_2l, std::, <cmath>)
+SYMBOL(comp_ellint_3, std::, <cmath>)
+SYMBOL(comp_ellint_3f, std::, <cmath>)
+SYMBOL(comp_ellint_3l, std::, <cmath>)
+SYMBOL(compare_partial_order_fallback, std::, <compare>)
+SYMBOL(compare_strong_order_fallback, std::, <compare>)
+SYMBOL(compare_three_way_result, std::, <compare>)
+SYMBOL(compare_three_way_result_t, std::, <compare>)
+SYMBOL(compare_weak_order_fallback, std::, <compare>)
+SYMBOL(complex, std::, <complex>)
+SYMBOL(condition_variable, std::, <condition_variable>)
+SYMBOL(condition_variable_any, std::, <condition_variable>)
+SYMBOL(conditional, std::, <type_traits>)
+SYMBOL(conditional_t, std::, <type_traits>)
+SYMBOL(conj, std::, <complex>)
+SYMBOL(conjunction, std::, <type_traits>)
+SYMBOL(conjunction_v, std::, <type_traits>)
+SYMBOL(const_mem_fun1_ref_t, std::, <functional>)
+SYMBOL(const_mem_fun1_t, std::, <functional>)
+SYMBOL(const_mem_fun_ref_t, std::, <functional>)
+SYMBOL(const_mem_fun_t, std::, <functional>)
+SYMBOL(const_pointer_cast, std::, <memory>)
+SYMBOL(construct_at, std::, <memory>)
+SYMBOL(constructible_from, std::, <concepts>)
+SYMBOL(contiguous_iterator, std::, <iterator>)
+SYMBOL(contiguous_iterator_tag, std::, <iterator>)
+SYMBOL(convertible_to, std::, <concepts>)
+SYMBOL(copy, std::, <algorithm>)
+SYMBOL(copy_backward, std::, <algorithm>)
+SYMBOL(copy_constructible, std::, <concepts>)
+SYMBOL(copy_if, std::, <algorithm>)
+SYMBOL(copy_n, std::, <algorithm>)
+SYMBOL(copyable, std::, <concepts>)
+SYMBOL(copysign, std::, <cmath>)
+SYMBOL(copysign, None, <cmath>)
+SYMBOL(copysign, None, <math.h>)
+SYMBOL(copysignf, std::, <cmath>)
+SYMBOL(copysignf, None, <cmath>)
+SYMBOL(copysignf, None, <math.h>)
+SYMBOL(copysignl, std::, <cmath>)
+SYMBOL(copysignl, None, <cmath>)
+SYMBOL(copysignl, None, <math.h>)
+SYMBOL(coroutine_handle, std::, <coroutine>)
+SYMBOL(coroutine_traits, std::, <coroutine>)
+SYMBOL(cos, std::, <cmath>)
+SYMBOL(cos, None, <cmath>)
+SYMBOL(cos, None, <math.h>)
+SYMBOL(cosf, std::, <cmath>)
+SYMBOL(cosf, None, <cmath>)
+SYMBOL(cosf, None, <math.h>)
+SYMBOL(cosh, std::, <cmath>)
+SYMBOL(cosh, None, <cmath>)
+SYMBOL(cosh, None, <math.h>)
+SYMBOL(coshf, std::, <cmath>)
+SYMBOL(coshf, None, <cmath>)
+SYMBOL(coshf, None, <math.h>)
+SYMBOL(coshl, std::, <cmath>)
+SYMBOL(coshl, None, <cmath>)
+SYMBOL(coshl, None, <math.h>)
+SYMBOL(cosl, std::, <cmath>)
+SYMBOL(cosl, None, <cmath>)
+SYMBOL(cosl, None, <math.h>)
+SYMBOL(count, std::, <algorithm>)
+SYMBOL(count_if, std::, <algorithm>)
+SYMBOL(counted_iterator, std::, <iterator>)
+SYMBOL(counting_semaphore, std::, <semaphore>)
+SYMBOL(countl_one, std::, <bit>)
+SYMBOL(countl_zero, std::, <bit>)
+SYMBOL(countr_one, std::, <bit>)
+SYMBOL(countr_zero, std::, <bit>)
+SYMBOL(cout, std::, <iostream>)
+SYMBOL(cref, std::, <functional>)
+SYMBOL(cregex_iterator, std::, <regex>)
+SYMBOL(cregex_token_iterator, std::, <regex>)
+SYMBOL(csub_match, std::, <regex>)
+SYMBOL(ctime, std::, <ctime>)
+SYMBOL(ctime, None, <ctime>)
+SYMBOL(ctime, None, <time.h>)
+SYMBOL(ctype, std::, <locale>)
+SYMBOL(ctype_base, std::, <locale>)
+SYMBOL(ctype_byname, std::, <locale>)
+SYMBOL(current_exception, std::, <exception>)
+SYMBOL(cv_status, std::, <condition_variable>)
+SYMBOL(cyl_bessel_i, std::, <cmath>)
+SYMBOL(cyl_bessel_if, std::, <cmath>)
+SYMBOL(cyl_bessel_il, std::, <cmath>)
+SYMBOL(cyl_bessel_j, std::, <cmath>)
+SYMBOL(cyl_bessel_jf, std::, <cmath>)
+SYMBOL(cyl_bessel_jl, std::, <cmath>)
+SYMBOL(cyl_bessel_k, std::, <cmath>)
+SYMBOL(cyl_bessel_kf, std::, <cmath>)
+SYMBOL(cyl_bessel_kl, std::, <cmath>)
+SYMBOL(cyl_neumann, std::, <cmath>)
+SYMBOL(cyl_neumannf, std::, <cmath>)
+SYMBOL(cyl_neumannl, std::, <cmath>)
+SYMBOL(dec, std::, <ios>)
+SYMBOL(dec, std::, <iostream>)
+SYMBOL(deca, std::, <ratio>)
+SYMBOL(decay, std::, <type_traits>)
+SYMBOL(decay_t, std::, <type_traits>)
+SYMBOL(deci, std::, <ratio>)
+SYMBOL(declare_no_pointers, std::, <memory>)
+SYMBOL(declare_reachable, std::, <memory>)
+SYMBOL(declval, std::, <utility>)
+SYMBOL(default_delete, std::, <memory>)
+SYMBOL(default_initializable, std::, <concepts>)
+SYMBOL(default_random_engine, std::, <random>)
+SYMBOL(default_searcher, std::, <functional>)
+SYMBOL(default_sentinel, std::, <iterator>)
+SYMBOL(default_sentinel_t, std::, <iterator>)
+SYMBOL(defaultfloat, std::, <ios>)
+SYMBOL(defaultfloat, std::, <iostream>)
+SYMBOL(defer_lock, std::, <mutex>)
+SYMBOL(defer_lock_t, std::, <mutex>)
+SYMBOL(denorm_absent, std::, <limits>)
+SYMBOL(denorm_indeterminate, std::, <limits>)
+SYMBOL(denorm_present, std::, <limits>)
+SYMBOL(deque, std::, <deque>)
+SYMBOL(derived_from, std::, <concepts>)
+SYMBOL(destroy, std::, <memory>)
+SYMBOL(destroy_at, std::, <memory>)
+SYMBOL(destroy_n, std::, <memory>)
+SYMBOL(destroying_delete, std::, <new>)
+SYMBOL(destroying_delete_t, std::, <new>)
+SYMBOL(destructible, std::, <concepts>)
+SYMBOL(difftime, std::, <ctime>)
+SYMBOL(difftime, None, <ctime>)
+SYMBOL(difftime, None, <time.h>)
+SYMBOL(disable_sized_sentinel_for, std::, <iterator>)
+SYMBOL(discard_block_engine, std::, <random>)
+SYMBOL(discrete_distribution, std::, <random>)
+SYMBOL(disjunction, std::, <type_traits>)
+SYMBOL(disjunction_v, std::, <type_traits>)
+SYMBOL(distance, std::, <iterator>)
+SYMBOL(div_t, std::, <cstdlib>)
+SYMBOL(div_t, None, <cstdlib>)
+SYMBOL(div_t, None, <stdlib.h>)
+SYMBOL(divides, std::, <functional>)
+SYMBOL(domain_error, std::, <stdexcept>)
+SYMBOL(double_t, std::, <cmath>)
+SYMBOL(double_t, None, <cmath>)
+SYMBOL(double_t, None, <math.h>)
+SYMBOL(dynamic_extent, std::, <span>)
+SYMBOL(dynamic_pointer_cast, std::, <memory>)
+SYMBOL(ellint_1, std::, <cmath>)
+SYMBOL(ellint_1f, std::, <cmath>)
+SYMBOL(ellint_1l, std::, <cmath>)
+SYMBOL(ellint_2, std::, <cmath>)
+SYMBOL(ellint_2f, std::, <cmath>)
+SYMBOL(ellint_2l, std::, <cmath>)
+SYMBOL(ellint_3, std::, <cmath>)
+SYMBOL(ellint_3f, std::, <cmath>)
+SYMBOL(ellint_3l, std::, <cmath>)
+SYMBOL(emit_on_flush, std::, <ostream>)
+SYMBOL(emit_on_flush, std::, <iostream>)
+SYMBOL(enable_if, std::, <type_traits>)
+SYMBOL(enable_if_t, std::, <type_traits>)
+SYMBOL(enable_shared_from_this, std::, <memory>)
+SYMBOL(endian, std::, <bit>)
+SYMBOL(endl, std::, <ostream>)
+SYMBOL(endl, std::, <iostream>)
+SYMBOL(ends, std::, <ostream>)
+SYMBOL(ends, std::, <iostream>)
+SYMBOL(equal, std::, <algorithm>)
+SYMBOL(equal_range, std::, <algorithm>)
+SYMBOL(equal_to, std::, <functional>)
+SYMBOL(equality_comparable, std::, <concepts>)
+SYMBOL(equality_comparable_with, std::, <concepts>)
+SYMBOL(equivalence_relation, std::, <concepts>)
+SYMBOL(erase, std::, <vector>)
+SYMBOL(erase_if, std::, <vector>)
+SYMBOL(erf, std::, <cmath>)
+SYMBOL(erf, None, <cmath>)
+SYMBOL(erf, None, <math.h>)
+SYMBOL(erfc, std::, <cmath>)
+SYMBOL(erfc, None, <cmath>)
+SYMBOL(erfc, None, <math.h>)
+SYMBOL(erfcf, std::, <cmath>)
+SYMBOL(erfcf, None, <cmath>)
+SYMBOL(erfcf, None, <math.h>)
+SYMBOL(erfcl, std::, <cmath>)
+SYMBOL(erfcl, None, <cmath>)
+SYMBOL(erfcl, None, <math.h>)
+SYMBOL(erff, std::, <cmath>)
+SYMBOL(erff, None, <cmath>)
+SYMBOL(erff, None, <math.h>)
+SYMBOL(erfl, std::, <cmath>)
+SYMBOL(erfl, None, <cmath>)
+SYMBOL(erfl, None, <math.h>)
+SYMBOL(errc, std::, <system_error>)
+SYMBOL(error_category, std::, <system_error>)
+SYMBOL(error_code, std::, <system_error>)
+SYMBOL(error_condition, std::, <system_error>)
+SYMBOL(exa, std::, <ratio>)
+SYMBOL(exception, std::, <exception>)
+SYMBOL(exception_ptr, std::, <exception>)
+SYMBOL(exchange, std::, <utility>)
+SYMBOL(exclusive_scan, std::, <numeric>)
+SYMBOL(exit, std::, <cstdlib>)
+SYMBOL(exit, None, <cstdlib>)
+SYMBOL(exit, None, <stdlib.h>)
+SYMBOL(exp, std::, <cmath>)
+SYMBOL(exp, None, <cmath>)
+SYMBOL(exp, None, <math.h>)
+SYMBOL(exp2, std::, <cmath>)
+SYMBOL(exp2, None, <cmath>)
+SYMBOL(exp2, None, <math.h>)
+SYMBOL(exp2f, std::, <cmath>)
+SYMBOL(exp2f, None, <cmath>)
+SYMBOL(exp2f, None, <math.h>)
+SYMBOL(exp2l, std::, <cmath>)
+SYMBOL(exp2l, None, <cmath>)
+SYMBOL(exp2l, None, <math.h>)
+SYMBOL(expf, std::, <cmath>)
+SYMBOL(expf, None, <cmath>)
+SYMBOL(expf, None, <math.h>)
+SYMBOL(expint, std::, <cmath>)
+SYMBOL(expintf, std::, <cmath>)
+SYMBOL(expintl, std::, <cmath>)
+SYMBOL(expl, std::, <cmath>)
+SYMBOL(expl, None, <cmath>)
+SYMBOL(expl, None, <math.h>)
+SYMBOL(expm1, std::, <cmath>)
+SYMBOL(expm1, None, <cmath>)
+SYMBOL(expm1, None, <math.h>)
+SYMBOL(expm1f, std::, <cmath>)
+SYMBOL(expm1f, None, <cmath>)
+SYMBOL(expm1f, None, <math.h>)
+SYMBOL(expm1l, std::, <cmath>)
+SYMBOL(expm1l, None, <cmath>)
+SYMBOL(expm1l, None, <math.h>)
+SYMBOL(exponential_distribution, std::, <random>)
+SYMBOL(extent, std::, <type_traits>)
+SYMBOL(extent_v, std::, <type_traits>)
+SYMBOL(extreme_value_distribution, std::, <random>)
+SYMBOL(fabs, std::, <cmath>)
+SYMBOL(fabs, None, <cmath>)
+SYMBOL(fabs, None, <math.h>)
+SYMBOL(fabsf, std::, <cmath>)
+SYMBOL(fabsf, None, <cmath>)
+SYMBOL(fabsf, None, <math.h>)
+SYMBOL(fabsl, std::, <cmath>)
+SYMBOL(fabsl, None, <cmath>)
+SYMBOL(fabsl, None, <math.h>)
+SYMBOL(false_type, std::, <type_traits>)
+SYMBOL(fclose, std::, <cstdio>)
+SYMBOL(fclose, None, <cstdio>)
+SYMBOL(fclose, None, <stdio.h>)
+SYMBOL(fdim, std::, <cmath>)
+SYMBOL(fdim, None, <cmath>)
+SYMBOL(fdim, None, <math.h>)
+SYMBOL(fdimf, std::, <cmath>)
+SYMBOL(fdimf, None, <cmath>)
+SYMBOL(fdimf, None, <math.h>)
+SYMBOL(fdiml, std::, <cmath>)
+SYMBOL(fdiml, None, <cmath>)
+SYMBOL(fdiml, None, <math.h>)
+SYMBOL(feclearexcept, std::, <cfenv>)
+SYMBOL(feclearexcept, None, <cfenv>)
+SYMBOL(feclearexcept, None, <fenv.h>)
+SYMBOL(fegetenv, std::, <cfenv>)
+SYMBOL(fegetenv, None, <cfenv>)
+SYMBOL(fegetenv, None, <fenv.h>)
+SYMBOL(fegetexceptflag, std::, <cfenv>)
+SYMBOL(fegetexceptflag, None, <cfenv>)
+SYMBOL(fegetexceptflag, None, <fenv.h>)
+SYMBOL(fegetround, std::, <cfenv>)
+SYMBOL(fegetround, None, <cfenv>)
+SYMBOL(fegetround, None, <fenv.h>)
+SYMBOL(feholdexcept, std::, <cfenv>)
+SYMBOL(feholdexcept, None, <cfenv>)
+SYMBOL(feholdexcept, None, <fenv.h>)
+SYMBOL(femto, std::, <ratio>)
+SYMBOL(fenv_t, std::, <cfenv>)
+SYMBOL(fenv_t, None, <cfenv>)
+SYMBOL(fenv_t, None, <fenv.h>)
+SYMBOL(feof, std::, <cstdio>)
+SYMBOL(feof, None, <cstdio>)
+SYMBOL(feof, None, <stdio.h>)
+SYMBOL(feraiseexcept, std::, <cfenv>)
+SYMBOL(feraiseexcept, None, <cfenv>)
+SYMBOL(feraiseexcept, None, <fenv.h>)
+SYMBOL(ferror, std::, <cstdio>)
+SYMBOL(ferror, None, <cstdio>)
+SYMBOL(ferror, None, <stdio.h>)
+SYMBOL(fesetenv, std::, <cfenv>)
+SYMBOL(fesetenv, None, <cfenv>)
+SYMBOL(fesetenv, None, <fenv.h>)
+SYMBOL(fesetexceptflag, std::, <cfenv>)
+SYMBOL(fesetexceptflag, None, <cfenv>)
+SYMBOL(fesetexceptflag, None, <fenv.h>)
+SYMBOL(fesetround, std::, <cfenv>)
+SYMBOL(fesetround, None, <cfenv>)
+SYMBOL(fesetround, None, <fenv.h>)
+SYMBOL(fetestexcept, std::, <cfenv>)
+SYMBOL(fetestexcept, None, <cfenv>)
+SYMBOL(fetestexcept, None, <fenv.h>)
+SYMBOL(feupdateenv, std::, <cfenv>)
+SYMBOL(feupdateenv, None, <cfenv>)
+SYMBOL(feupdateenv, None, <fenv.h>)
+SYMBOL(fexcept_t, std::, <cfenv>)
+SYMBOL(fexcept_t, None, <cfenv>)
+SYMBOL(fexcept_t, None, <fenv.h>)
+SYMBOL(fflush, std::, <cstdio>)
+SYMBOL(fflush, None, <cstdio>)
+SYMBOL(fflush, None, <stdio.h>)
+SYMBOL(fgetc, std::, <cstdio>)
+SYMBOL(fgetc, None, <cstdio>)
+SYMBOL(fgetc, None, <stdio.h>)
+SYMBOL(fgetpos, std::, <cstdio>)
+SYMBOL(fgetpos, None, <cstdio>)
+SYMBOL(fgetpos, None, <stdio.h>)
+SYMBOL(fgets, std::, <cstdio>)
+SYMBOL(fgets, None, <cstdio>)
+SYMBOL(fgets, None, <stdio.h>)
+SYMBOL(fgetwc, std::, <cwchar>)
+SYMBOL(fgetwc, None, <cwchar>)
+SYMBOL(fgetwc, None, <wchar.h>)
+SYMBOL(fgetws, std::, <cwchar>)
+SYMBOL(fgetws, None, <cwchar>)
+SYMBOL(fgetws, None, <wchar.h>)
+SYMBOL(filebuf, std::, <streambuf>)
+SYMBOL(filebuf, std::, <iostream>)
+SYMBOL(filebuf, std::, <iosfwd>)
+SYMBOL(fill, std::, <algorithm>)
+SYMBOL(fill_n, std::, <algorithm>)
+SYMBOL(find, std::, <algorithm>)
+SYMBOL(find_end, std::, <algorithm>)
+SYMBOL(find_first_of, std::, <algorithm>)
+SYMBOL(find_if, std::, <algorithm>)
+SYMBOL(find_if_not, std::, <algorithm>)
+SYMBOL(fisher_f_distribution, std::, <random>)
+SYMBOL(fixed, std::, <ios>)
+SYMBOL(fixed, std::, <iostream>)
+SYMBOL(float_denorm_style, std::, <limits>)
+SYMBOL(float_round_style, std::, <limits>)
+SYMBOL(float_t, std::, <cmath>)
+SYMBOL(float_t, None, <cmath>)
+SYMBOL(float_t, None, <math.h>)
+SYMBOL(floating_point, std::, <concepts>)
+SYMBOL(floor, std::, <cmath>)
+SYMBOL(floor, None, <cmath>)
+SYMBOL(floor, None, <math.h>)
+SYMBOL(floorf, std::, <cmath>)
+SYMBOL(floorf, None, <cmath>)
+SYMBOL(floorf, None, <math.h>)
+SYMBOL(floorl, std::, <cmath>)
+SYMBOL(floorl, None, <cmath>)
+SYMBOL(floorl, None, <math.h>)
+SYMBOL(flush, std::, <ostream>)
+SYMBOL(flush, std::, <iostream>)
+SYMBOL(flush_emit, std::, <ostream>)
+SYMBOL(flush_emit, std::, <iostream>)
+SYMBOL(fma, std::, <cmath>)
+SYMBOL(fma, None, <cmath>)
+SYMBOL(fma, None, <math.h>)
+SYMBOL(fmaf, std::, <cmath>)
+SYMBOL(fmaf, None, <cmath>)
+SYMBOL(fmaf, None, <math.h>)
+SYMBOL(fmal, std::, <cmath>)
+SYMBOL(fmal, None, <cmath>)
+SYMBOL(fmal, None, <math.h>)
+SYMBOL(fmax, std::, <cmath>)
+SYMBOL(fmax, None, <cmath>)
+SYMBOL(fmax, None, <math.h>)
+SYMBOL(fmaxf, std::, <cmath>)
+SYMBOL(fmaxf, None, <cmath>)
+SYMBOL(fmaxf, None, <math.h>)
+SYMBOL(fmaxl, std::, <cmath>)
+SYMBOL(fmaxl, None, <cmath>)
+SYMBOL(fmaxl, None, <math.h>)
+SYMBOL(fmin, std::, <cmath>)
+SYMBOL(fmin, None, <cmath>)
+SYMBOL(fmin, None, <math.h>)
+SYMBOL(fminf, std::, <cmath>)
+SYMBOL(fminf, None, <cmath>)
+SYMBOL(fminf, None, <math.h>)
+SYMBOL(fminl, std::, <cmath>)
+SYMBOL(fminl, None, <cmath>)
+SYMBOL(fminl, None, <math.h>)
+SYMBOL(fmod, std::, <cmath>)
+SYMBOL(fmod, None, <cmath>)
+SYMBOL(fmod, None, <math.h>)
+SYMBOL(fmodf, std::, <cmath>)
+SYMBOL(fmodf, None, <cmath>)
+SYMBOL(fmodf, None, <math.h>)
+SYMBOL(fmodl, std::, <cmath>)
+SYMBOL(fmodl, None, <cmath>)
+SYMBOL(fmodl, None, <math.h>)
+SYMBOL(fopen, std::, <cstdio>)
+SYMBOL(fopen, None, <cstdio>)
+SYMBOL(fopen, None, <stdio.h>)
+SYMBOL(for_each, std::, <algorithm>)
+SYMBOL(for_each_n, std::, <algorithm>)
+SYMBOL(format, std::, <format>)
+SYMBOL(format_args, std::, <format>)
+SYMBOL(format_context, std::, <format>)
+SYMBOL(format_error, std::, <format>)
+SYMBOL(format_parse_context, std::, <format>)
+SYMBOL(format_to, std::, <format>)
+SYMBOL(format_to_n, std::, <format>)
+SYMBOL(format_to_n_result, std::, <format>)
+SYMBOL(formatted_size, std::, <format>)
+SYMBOL(formatter, std::, <format>)
+SYMBOL(forward, std::, <utility>)
+SYMBOL(forward_as_tuple, std::, <tuple>)
+SYMBOL(forward_iterator, std::, <iterator>)
+SYMBOL(forward_iterator_tag, std::, <iterator>)
+SYMBOL(forward_like, std::, <utility>)
+SYMBOL(forward_list, std::, <forward_list>)
+SYMBOL(fpclassify, std::, <cmath>)
+SYMBOL(fpclassify, None, <cmath>)
+SYMBOL(fpclassify, None, <math.h>)
+SYMBOL(fpos, std::, <ios>)
+SYMBOL(fpos, std::, <iostream>)
+SYMBOL(fpos, std::, <iosfwd>)
+SYMBOL(fpos_t, std::, <cstdio>)
+SYMBOL(fpos_t, None, <cstdio>)
+SYMBOL(fpos_t, None, <stdio.h>)
+SYMBOL(fprintf, std::, <cstdio>)
+SYMBOL(fprintf, None, <cstdio>)
+SYMBOL(fprintf, None, <stdio.h>)
+SYMBOL(fputc, std::, <cstdio>)
+SYMBOL(fputc, None, <cstdio>)
+SYMBOL(fputc, None, <stdio.h>)
+SYMBOL(fputs, std::, <cstdio>)
+SYMBOL(fputs, None, <cstdio>)
+SYMBOL(fputs, None, <stdio.h>)
+SYMBOL(fputwc, std::, <cwchar>)
+SYMBOL(fputwc, None, <cwchar>)
+SYMBOL(fputwc, None, <wchar.h>)
+SYMBOL(fputws, std::, <cwchar>)
+SYMBOL(fputws, None, <cwchar>)
+SYMBOL(fputws, None, <wchar.h>)
+SYMBOL(fread, std::, <cstdio>)
+SYMBOL(fread, None, <cstdio>)
+SYMBOL(fread, None, <stdio.h>)
+SYMBOL(free, std::, <cstdlib>)
+SYMBOL(free, None, <cstdlib>)
+SYMBOL(free, None, <stdlib.h>)
+SYMBOL(freopen, std::, <cstdio>)
+SYMBOL(freopen, None, <cstdio>)
+SYMBOL(freopen, None, <stdio.h>)
+SYMBOL(frexp, std::, <cmath>)
+SYMBOL(frexp, None, <cmath>)
+SYMBOL(frexp, None, <math.h>)
+SYMBOL(frexpf, std::, <cmath>)
+SYMBOL(frexpf, None, <cmath>)
+SYMBOL(frexpf, None, <math.h>)
+SYMBOL(frexpl, std::, <cmath>)
+SYMBOL(frexpl, None, <cmath>)
+SYMBOL(frexpl, None, <math.h>)
+SYMBOL(from_chars, std::, <charconv>)
+SYMBOL(from_chars_result, std::, <charconv>)
+SYMBOL(from_range, std::, <ranges>)
+SYMBOL(from_range_t, std::, <ranges>)
+SYMBOL(front_insert_iterator, std::, <iterator>)
+SYMBOL(front_inserter, std::, <iterator>)
+SYMBOL(fscanf, std::, <cstdio>)
+SYMBOL(fscanf, None, <cstdio>)
+SYMBOL(fscanf, None, <stdio.h>)
+SYMBOL(fseek, std::, <cstdio>)
+SYMBOL(fseek, None, <cstdio>)
+SYMBOL(fseek, None, <stdio.h>)
+SYMBOL(fsetpos, std::, <cstdio>)
+SYMBOL(fsetpos, None, <cstdio>)
+SYMBOL(fsetpos, None, <stdio.h>)
+SYMBOL(fstream, std::, <fstream>)
+SYMBOL(fstream, std::, <iosfwd>)
+SYMBOL(ftell, std::, <cstdio>)
+SYMBOL(ftell, None, <cstdio>)
+SYMBOL(ftell, None, <stdio.h>)
+SYMBOL(function, std::, <functional>)
+SYMBOL(future, std::, <future>)
+SYMBOL(future_category, std::, <future>)
+SYMBOL(future_errc, std::, <future>)
+SYMBOL(future_error, std::, <future>)
+SYMBOL(future_status, std::, <future>)
+SYMBOL(fwide, std::, <cwchar>)
+SYMBOL(fwide, None, <cwchar>)
+SYMBOL(fwide, None, <wchar.h>)
+SYMBOL(fwprintf, std::, <cwchar>)
+SYMBOL(fwprintf, None, <cwchar>)
+SYMBOL(fwprintf, None, <wchar.h>)
+SYMBOL(fwrite, std::, <cstdio>)
+SYMBOL(fwrite, None, <cstdio>)
+SYMBOL(fwrite, None, <stdio.h>)
+SYMBOL(fwscanf, std::, <cwchar>)
+SYMBOL(fwscanf, None, <cwchar>)
+SYMBOL(fwscanf, None, <wchar.h>)
+SYMBOL(gamma_distribution, std::, <random>)
+SYMBOL(gcd, std::, <numeric>)
+SYMBOL(generate, std::, <algorithm>)
+SYMBOL(generate_canonical, std::, <random>)
+SYMBOL(generate_n, std::, <algorithm>)
+SYMBOL(generic_category, std::, <system_error>)
+SYMBOL(geometric_distribution, std::, <random>)
+SYMBOL(get_deleter, std::, <memory>)
+SYMBOL(get_if, std::, <variant>)
+SYMBOL(get_money, std::, <iomanip>)
+SYMBOL(get_new_handler, std::, <new>)
+SYMBOL(get_pointer_safety, std::, <memory>)
+SYMBOL(get_temporary_buffer, std::, <memory>)
+SYMBOL(get_terminate, std::, <exception>)
+SYMBOL(get_time, std::, <iomanip>)
+SYMBOL(get_unexpected, std::, <exception>)
+SYMBOL(getc, std::, <cstdio>)
+SYMBOL(getc, None, <cstdio>)
+SYMBOL(getc, None, <stdio.h>)
+SYMBOL(getchar, std::, <cstdio>)
+SYMBOL(getchar, None, <cstdio>)
+SYMBOL(getchar, None, <stdio.h>)
+SYMBOL(getenv, std::, <cstdlib>)
+SYMBOL(getenv, None, <cstdlib>)
+SYMBOL(getenv, None, <stdlib.h>)
+SYMBOL(getline, std::, <string>)
+SYMBOL(gets, std::, <cstdio>)
+SYMBOL(gets, None, <cstdio>)
+SYMBOL(gets, None, <stdio.h>)
+SYMBOL(getwc, std::, <cwchar>)
+SYMBOL(getwc, None, <cwchar>)
+SYMBOL(getwc, None, <wchar.h>)
+SYMBOL(getwchar, std::, <cwchar>)
+SYMBOL(getwchar, None, <cwchar>)
+SYMBOL(getwchar, None, <wchar.h>)
+SYMBOL(giga, std::, <ratio>)
+SYMBOL(gmtime, std::, <ctime>)
+SYMBOL(gmtime, None, <ctime>)
+SYMBOL(gmtime, None, <time.h>)
+SYMBOL(greater, std::, <functional>)
+SYMBOL(greater_equal, std::, <functional>)
+SYMBOL(gslice, std::, <valarray>)
+SYMBOL(gslice_array, std::, <valarray>)
+SYMBOL(hardware_constructive_interference_size, std::, <new>)
+SYMBOL(hardware_destructive_interference_size, std::, <new>)
+SYMBOL(has_facet, std::, <locale>)
+SYMBOL(has_single_bit, std::, <bit>)
+SYMBOL(has_unique_object_representations, std::, <type_traits>)
+SYMBOL(has_unique_object_representations_v, std::, <type_traits>)
+SYMBOL(has_virtual_destructor, std::, <type_traits>)
+SYMBOL(has_virtual_destructor_v, std::, <type_traits>)
+SYMBOL(hash, std::, <functional>)
+SYMBOL(hecto, std::, <ratio>)
+SYMBOL(hermite, std::, <cmath>)
+SYMBOL(hermitef, std::, <cmath>)
+SYMBOL(hermitel, std::, <cmath>)
+SYMBOL(hex, std::, <ios>)
+SYMBOL(hex, std::, <iostream>)
+SYMBOL(hexfloat, std::, <ios>)
+SYMBOL(hexfloat, std::, <iostream>)
+SYMBOL(holds_alternative, std::, <variant>)
+SYMBOL(hypot, std::, <cmath>)
+SYMBOL(hypot, None, <cmath>)
+SYMBOL(hypot, None, <math.h>)
+SYMBOL(hypotf, std::, <cmath>)
+SYMBOL(hypotf, None, <cmath>)
+SYMBOL(hypotf, None, <math.h>)
+SYMBOL(hypotl, std::, <cmath>)
+SYMBOL(hypotl, None, <cmath>)
+SYMBOL(hypotl, None, <math.h>)
+SYMBOL(identity, std::, <functional>)
+SYMBOL(ifstream, std::, <fstream>)
+SYMBOL(ifstream, std::, <iosfwd>)
+SYMBOL(ignore, std::, <tuple>)
+SYMBOL(ilogb, std::, <cmath>)
+SYMBOL(ilogb, None, <cmath>)
+SYMBOL(ilogb, None, <math.h>)
+SYMBOL(ilogbf, std::, <cmath>)
+SYMBOL(ilogbf, None, <cmath>)
+SYMBOL(ilogbf, None, <math.h>)
+SYMBOL(ilogbl, std::, <cmath>)
+SYMBOL(ilogbl, None, <cmath>)
+SYMBOL(ilogbl, None, <math.h>)
+SYMBOL(imag, std::, <complex>)
+SYMBOL(imaxabs, std::, <cinttypes>)
+SYMBOL(imaxabs, None, <cinttypes>)
+SYMBOL(imaxabs, None, <inttypes.h>)
+SYMBOL(imaxdiv, std::, <cinttypes>)
+SYMBOL(imaxdiv, None, <cinttypes>)
+SYMBOL(imaxdiv, None, <inttypes.h>)
+SYMBOL(imaxdiv_t, std::, <cinttypes>)
+SYMBOL(imaxdiv_t, None, <cinttypes>)
+SYMBOL(imaxdiv_t, None, <inttypes.h>)
+SYMBOL(in_place, std::, <utility>)
+SYMBOL(in_place_index, std::, <utility>)
+SYMBOL(in_place_index_t, std::, <utility>)
+SYMBOL(in_place_t, std::, <utility>)
+SYMBOL(in_place_type, std::, <utility>)
+SYMBOL(in_place_type_t, std::, <utility>)
+SYMBOL(in_range, std::, <utility>)
+SYMBOL(includes, std::, <algorithm>)
+SYMBOL(inclusive_scan, std::, <numeric>)
+SYMBOL(incrementable, std::, <iterator>)
+SYMBOL(incrementable_traits, std::, <iterator>)
+SYMBOL(independent_bits_engine, std::, <random>)
+SYMBOL(indirect_array, std::, <valarray>)
+SYMBOL(indirect_binary_predicate, std::, <iterator>)
+SYMBOL(indirect_equivalence_relation, std::, <iterator>)
+SYMBOL(indirect_result_t, std::, <iterator>)
+SYMBOL(indirect_strict_weak_order, std::, <iterator>)
+SYMBOL(indirect_unary_predicate, std::, <iterator>)
+SYMBOL(indirectly_comparable, std::, <iterator>)
+SYMBOL(indirectly_copyable, std::, <iterator>)
+SYMBOL(indirectly_copyable_storable, std::, <iterator>)
+SYMBOL(indirectly_movable, std::, <iterator>)
+SYMBOL(indirectly_movable_storable, std::, <iterator>)
+SYMBOL(indirectly_readable, std::, <iterator>)
+SYMBOL(indirectly_readable_traits, std::, <iterator>)
+SYMBOL(indirectly_regular_unary_invocable, std::, <iterator>)
+SYMBOL(indirectly_swappable, std::, <iterator>)
+SYMBOL(indirectly_unary_invocable, std::, <iterator>)
+SYMBOL(indirectly_writable, std::, <iterator>)
+SYMBOL(initializer_list, std::, <initializer_list>)
+SYMBOL(inner_product, std::, <numeric>)
+SYMBOL(inout_ptr, std::, <memory>)
+SYMBOL(inout_ptr_t, std::, <memory>)
+SYMBOL(inplace_merge, std::, <algorithm>)
+SYMBOL(input_iterator, std::, <iterator>)
+SYMBOL(input_iterator_tag, std::, <iterator>)
+SYMBOL(input_or_output_iterator, std::, <iterator>)
+SYMBOL(insert_iterator, std::, <iterator>)
+SYMBOL(inserter, std::, <iterator>)
+SYMBOL(int16_t, std::, <cstdint>)
+SYMBOL(int16_t, None, <cstdint>)
+SYMBOL(int16_t, None, <stdint.h>)
+SYMBOL(int32_t, std::, <cstdint>)
+SYMBOL(int32_t, None, <cstdint>)
+SYMBOL(int32_t, None, <stdint.h>)
+SYMBOL(int64_t, std::, <cstdint>)
+SYMBOL(int64_t, None, <cstdint>)
+SYMBOL(int64_t, None, <stdint.h>)
+SYMBOL(int8_t, std::, <cstdint>)
+SYMBOL(int8_t, None, <cstdint>)
+SYMBOL(int8_t, None, <stdint.h>)
+SYMBOL(int_fast16_t, std::, <cstdint>)
+SYMBOL(int_fast16_t, None, <cstdint>)
+SYMBOL(int_fast16_t, None, <stdint.h>)
+SYMBOL(int_fast32_t, std::, <cstdint>)
+SYMBOL(int_fast32_t, None, <cstdint>)
+SYMBOL(int_fast32_t, None, <stdint.h>)
+SYMBOL(int_fast64_t, std::, <cstdint>)
+SYMBOL(int_fast64_t, None, <cstdint>)
+SYMBOL(int_fast64_t, None, <stdint.h>)
+SYMBOL(int_fast8_t, std::, <cstdint>)
+SYMBOL(int_fast8_t, None, <cstdint>)
+SYMBOL(int_fast8_t, None, <stdint.h>)
+SYMBOL(int_least16_t, std::, <cstdint>)
+SYMBOL(int_least16_t, None, <cstdint>)
+SYMBOL(int_least16_t, None, <stdint.h>)
+SYMBOL(int_least32_t, std::, <cstdint>)
+SYMBOL(int_least32_t, None, <cstdint>)
+SYMBOL(int_least32_t, None, <stdint.h>)
+SYMBOL(int_least64_t, std::, <cstdint>)
+SYMBOL(int_least64_t, None, <cstdint>)
+SYMBOL(int_least64_t, None, <stdint.h>)
+SYMBOL(int_least8_t, std::, <cstdint>)
+SYMBOL(int_least8_t, None, <cstdint>)
+SYMBOL(int_least8_t, None, <stdint.h>)
+SYMBOL(integer_sequence, std::, <utility>)
+SYMBOL(integral, std::, <concepts>)
+SYMBOL(integral_constant, std::, <type_traits>)
+SYMBOL(internal, std::, <ios>)
+SYMBOL(internal, std::, <iostream>)
+SYMBOL(intmax_t, std::, <cstdint>)
+SYMBOL(intmax_t, None, <cstdint>)
+SYMBOL(intmax_t, None, <stdint.h>)
+SYMBOL(intptr_t, std::, <cstdint>)
+SYMBOL(intptr_t, None, <cstdint>)
+SYMBOL(intptr_t, None, <stdint.h>)
+SYMBOL(invalid_argument, std::, <stdexcept>)
+SYMBOL(invocable, std::, <concepts>)
+SYMBOL(invoke, std::, <functional>)
+SYMBOL(invoke_r, std::, <functional>)
+SYMBOL(invoke_result, std::, <type_traits>)
+SYMBOL(invoke_result_t, std::, <type_traits>)
+SYMBOL(io_errc, std::, <ios>)
+SYMBOL(io_errc, std::, <iostream>)
+SYMBOL(io_state, std::, <ios>)
+SYMBOL(io_state, std::, <iostream>)
+SYMBOL(ios, std::, <ios>)
+SYMBOL(ios, std::, <iostream>)
+SYMBOL(ios, std::, <iosfwd>)
+SYMBOL(ios_base, std::, <ios>)
+SYMBOL(ios_base, std::, <iostream>)
+SYMBOL(iostream, std::, <istream>)
+SYMBOL(iostream, std::, <iostream>)
+SYMBOL(iostream, std::, <iosfwd>)
+SYMBOL(iostream_category, std::, <ios>)
+SYMBOL(iostream_category, std::, <iostream>)
+SYMBOL(iota, std::, <numeric>)
+SYMBOL(is_abstract, std::, <type_traits>)
+SYMBOL(is_abstract_v, std::, <type_traits>)
+SYMBOL(is_aggregate, std::, <type_traits>)
+SYMBOL(is_aggregate_v, std::, <type_traits>)
+SYMBOL(is_arithmetic, std::, <type_traits>)
+SYMBOL(is_arithmetic_v, std::, <type_traits>)
+SYMBOL(is_array, std::, <type_traits>)
+SYMBOL(is_array_v, std::, <type_traits>)
+SYMBOL(is_assignable, std::, <type_traits>)
+SYMBOL(is_assignable_v, std::, <type_traits>)
+SYMBOL(is_base_of, std::, <type_traits>)
+SYMBOL(is_base_of_v, std::, <type_traits>)
+SYMBOL(is_bind_expression, std::, <functional>)
+SYMBOL(is_bind_expression_v, std::, <functional>)
+SYMBOL(is_bounded_array, std::, <type_traits>)
+SYMBOL(is_bounded_array_v, std::, <type_traits>)
+SYMBOL(is_class, std::, <type_traits>)
+SYMBOL(is_class_v, std::, <type_traits>)
+SYMBOL(is_compound, std::, <type_traits>)
+SYMBOL(is_compound_v, std::, <type_traits>)
+SYMBOL(is_const, std::, <type_traits>)
+SYMBOL(is_const_v, std::, <type_traits>)
+SYMBOL(is_constant_evaluated, std::, <type_traits>)
+SYMBOL(is_constructible, std::, <type_traits>)
+SYMBOL(is_constructible_v, std::, <type_traits>)
+SYMBOL(is_convertible, std::, <type_traits>)
+SYMBOL(is_convertible_v, std::, <type_traits>)
+SYMBOL(is_copy_assignable, std::, <type_traits>)
+SYMBOL(is_copy_assignable_v, std::, <type_traits>)
+SYMBOL(is_copy_constructible, std::, <type_traits>)
+SYMBOL(is_copy_constructible_v, std::, <type_traits>)
+SYMBOL(is_corresponding_member, std::, <type_traits>)
+SYMBOL(is_default_constructible, std::, <type_traits>)
+SYMBOL(is_default_constructible_v, std::, <type_traits>)
+SYMBOL(is_destructible, std::, <type_traits>)
+SYMBOL(is_destructible_v, std::, <type_traits>)
+SYMBOL(is_empty, std::, <type_traits>)
+SYMBOL(is_empty_v, std::, <type_traits>)
+SYMBOL(is_enum, std::, <type_traits>)
+SYMBOL(is_enum_v, std::, <type_traits>)
+SYMBOL(is_eq, std::, <compare>)
+SYMBOL(is_error_code_enum, std::, <system_error>)
+SYMBOL(is_error_condition_enum, std::, <system_error>)
+SYMBOL(is_error_condition_enum_v, std::, <system_error>)
+SYMBOL(is_execution_policy, std::, <execution>)
+SYMBOL(is_execution_policy_v, std::, <execution>)
+SYMBOL(is_final, std::, <type_traits>)
+SYMBOL(is_final_v, std::, <type_traits>)
+SYMBOL(is_floating_point, std::, <type_traits>)
+SYMBOL(is_floating_point_v, std::, <type_traits>)
+SYMBOL(is_function, std::, <type_traits>)
+SYMBOL(is_function_v, std::, <type_traits>)
+SYMBOL(is_fundamental, std::, <type_traits>)
+SYMBOL(is_fundamental_v, std::, <type_traits>)
+SYMBOL(is_gt, std::, <compare>)
+SYMBOL(is_gteq, std::, <compare>)
+SYMBOL(is_heap, std::, <algorithm>)
+SYMBOL(is_heap_until, std::, <algorithm>)
+SYMBOL(is_integral, std::, <type_traits>)
+SYMBOL(is_integral_v, std::, <type_traits>)
+SYMBOL(is_invocable, std::, <type_traits>)
+SYMBOL(is_invocable_r, std::, <type_traits>)
+SYMBOL(is_invocable_r_v, std::, <type_traits>)
+SYMBOL(is_invocable_v, std::, <type_traits>)
+SYMBOL(is_layout_compatible, std::, <type_traits>)
+SYMBOL(is_layout_compatible_v, std::, <type_traits>)
+SYMBOL(is_literal_type, std::, <type_traits>)
+SYMBOL(is_literal_type_v, std::, <type_traits>)
+SYMBOL(is_lt, std::, <compare>)
+SYMBOL(is_lteq, std::, <compare>)
+SYMBOL(is_lvalue_reference, std::, <type_traits>)
+SYMBOL(is_lvalue_reference_v, std::, <type_traits>)
+SYMBOL(is_member_function_pointer, std::, <type_traits>)
+SYMBOL(is_member_function_pointer_v, std::, <type_traits>)
+SYMBOL(is_member_object_pointer, std::, <type_traits>)
+SYMBOL(is_member_object_pointer_v, std::, <type_traits>)
+SYMBOL(is_member_pointer, std::, <type_traits>)
+SYMBOL(is_member_pointer_v, std::, <type_traits>)
+SYMBOL(is_move_assignable, std::, <type_traits>)
+SYMBOL(is_move_assignable_v, std::, <type_traits>)
+SYMBOL(is_move_constructible, std::, <type_traits>)
+SYMBOL(is_move_constructible_v, std::, <type_traits>)
+SYMBOL(is_neq, std::, <compare>)
+SYMBOL(is_nothrow_assignable, std::, <type_traits>)
+SYMBOL(is_nothrow_assignable_v, std::, <type_traits>)
+SYMBOL(is_nothrow_constructible, std::, <type_traits>)
+SYMBOL(is_nothrow_constructible_v, std::, <type_traits>)
+SYMBOL(is_nothrow_convertible, std::, <type_traits>)
+SYMBOL(is_nothrow_convertible_v, std::, <type_traits>)
+SYMBOL(is_nothrow_copy_assignable, std::, <type_traits>)
+SYMBOL(is_nothrow_copy_assignable_v, std::, <type_traits>)
+SYMBOL(is_nothrow_copy_constructible, std::, <type_traits>)
+SYMBOL(is_nothrow_copy_constructible_v, std::, <type_traits>)
+SYMBOL(is_nothrow_default_constructible, std::, <type_traits>)
+SYMBOL(is_nothrow_default_constructible_v, std::, <type_traits>)
+SYMBOL(is_nothrow_destructible, std::, <type_traits>)
+SYMBOL(is_nothrow_destructible_v, std::, <type_traits>)
+SYMBOL(is_nothrow_invocable, std::, <type_traits>)
+SYMBOL(is_nothrow_invocable_r, std::, <type_traits>)
+SYMBOL(is_nothrow_invocable_r_v, std::, <type_traits>)
+SYMBOL(is_nothrow_invocable_v, std::, <type_traits>)
+SYMBOL(is_nothrow_move_assignable, std::, <type_traits>)
+SYMBOL(is_nothrow_move_assignable_v, std::, <type_traits>)
+SYMBOL(is_nothrow_move_constructible, std::, <type_traits>)
+SYMBOL(is_nothrow_move_constructible_v, std::, <type_traits>)
+SYMBOL(is_nothrow_swappable, std::, <type_traits>)
+SYMBOL(is_nothrow_swappable_v, std::, <type_traits>)
+SYMBOL(is_nothrow_swappable_with, std::, <type_traits>)
+SYMBOL(is_nothrow_swappable_with_v, std::, <type_traits>)
+SYMBOL(is_null_pointer, std::, <type_traits>)
+SYMBOL(is_null_pointer_v, std::, <type_traits>)
+SYMBOL(is_object, std::, <type_traits>)
+SYMBOL(is_object_v, std::, <type_traits>)
+SYMBOL(is_partitioned, std::, <algorithm>)
+SYMBOL(is_permutation, std::, <algorithm>)
+SYMBOL(is_placeholder, std::, <functional>)
+SYMBOL(is_placeholder_v, std::, <functional>)
+SYMBOL(is_pod, std::, <type_traits>)
+SYMBOL(is_pod_v, std::, <type_traits>)
+SYMBOL(is_pointer, std::, <type_traits>)
+SYMBOL(is_pointer_interconvertible_base_of, std::, <type_traits>)
+SYMBOL(is_pointer_interconvertible_base_of_v, std::, <type_traits>)
+SYMBOL(is_pointer_interconvertible_with_class, std::, <type_traits>)
+SYMBOL(is_pointer_v, std::, <type_traits>)
+SYMBOL(is_polymorphic, std::, <type_traits>)
+SYMBOL(is_polymorphic_v, std::, <type_traits>)
+SYMBOL(is_reference, std::, <type_traits>)
+SYMBOL(is_reference_v, std::, <type_traits>)
+SYMBOL(is_rvalue_reference, std::, <type_traits>)
+SYMBOL(is_rvalue_reference_v, std::, <type_traits>)
+SYMBOL(is_same, std::, <type_traits>)
+SYMBOL(is_same_v, std::, <type_traits>)
+SYMBOL(is_scalar, std::, <type_traits>)
+SYMBOL(is_scalar_v, std::, <type_traits>)
+SYMBOL(is_scoped_enum, std::, <type_traits>)
+SYMBOL(is_scoped_enum_v, std::, <type_traits>)
+SYMBOL(is_signed, std::, <type_traits>)
+SYMBOL(is_signed_v, std::, <type_traits>)
+SYMBOL(is_sorted, std::, <algorithm>)
+SYMBOL(is_sorted_until, std::, <algorithm>)
+SYMBOL(is_standard_layout, std::, <type_traits>)
+SYMBOL(is_standard_layout_v, std::, <type_traits>)
+SYMBOL(is_swappable, std::, <type_traits>)
+SYMBOL(is_swappable_v, std::, <type_traits>)
+SYMBOL(is_swappable_with, std::, <type_traits>)
+SYMBOL(is_swappable_with_v, std::, <type_traits>)
+SYMBOL(is_trivial, std::, <type_traits>)
+SYMBOL(is_trivial_v, std::, <type_traits>)
+SYMBOL(is_trivially_assignable, std::, <type_traits>)
+SYMBOL(is_trivially_assignable_v, std::, <type_traits>)
+SYMBOL(is_trivially_constructible, std::, <type_traits>)
+SYMBOL(is_trivially_constructible_v, std::, <type_traits>)
+SYMBOL(is_trivially_copy_assignable, std::, <type_traits>)
+SYMBOL(is_trivially_copy_assignable_v, std::, <type_traits>)
+SYMBOL(is_trivially_copy_constructible, std::, <type_traits>)
+SYMBOL(is_trivially_copy_constructible_v, std::, <type_traits>)
+SYMBOL(is_trivially_copyable, std::, <type_traits>)
+SYMBOL(is_trivially_copyable_v, std::, <type_traits>)
+SYMBOL(is_trivially_default_constructible, std::, <type_traits>)
+SYMBOL(is_trivially_default_constructible_v, std::, <type_traits>)
+SYMBOL(is_trivially_destructible, std::, <type_traits>)
+SYMBOL(is_trivially_destructible_v, std::, <type_traits>)
+SYMBOL(is_trivially_move_assignable, std::, <type_traits>)
+SYMBOL(is_trivially_move_assignable_v, std::, <type_traits>)
+SYMBOL(is_trivially_move_constructible, std::, <type_traits>)
+SYMBOL(is_trivially_move_constructible_v, std::, <type_traits>)
+SYMBOL(is_unbounded_array, std::, <type_traits>)
+SYMBOL(is_unbounded_array_v, std::, <type_traits>)
+SYMBOL(is_union, std::, <type_traits>)
+SYMBOL(is_union_v, std::, <type_traits>)
+SYMBOL(is_unsigned, std::, <type_traits>)
+SYMBOL(is_unsigned_v, std::, <type_traits>)
+SYMBOL(is_void, std::, <type_traits>)
+SYMBOL(is_void_v, std::, <type_traits>)
+SYMBOL(is_volatile, std::, <type_traits>)
+SYMBOL(is_volatile_v, std::, <type_traits>)
+SYMBOL(isalnum, std::, <cctype>)
+SYMBOL(isalnum, None, <cctype>)
+SYMBOL(isalnum, None, <ctype.h>)
+SYMBOL(isalpha, std::, <cctype>)
+SYMBOL(isalpha, None, <cctype>)
+SYMBOL(isalpha, None, <ctype.h>)
+SYMBOL(isblank, std::, <cctype>)
+SYMBOL(isblank, None, <cctype>)
+SYMBOL(isblank, None, <ctype.h>)
+SYMBOL(iscntrl, std::, <cctype>)
+SYMBOL(iscntrl, None, <cctype>)
+SYMBOL(iscntrl, None, <ctype.h>)
+SYMBOL(isdigit, std::, <cctype>)
+SYMBOL(isdigit, None, <cctype>)
+SYMBOL(isdigit, None, <ctype.h>)
+SYMBOL(isfinite, std::, <cmath>)
+SYMBOL(isfinite, None, <cmath>)
+SYMBOL(isfinite, None, <math.h>)
+SYMBOL(isgraph, std::, <cctype>)
+SYMBOL(isgraph, None, <cctype>)
+SYMBOL(isgraph, None, <ctype.h>)
+SYMBOL(isgreater, std::, <cmath>)
+SYMBOL(isgreater, None, <cmath>)
+SYMBOL(isgreater, None, <math.h>)
+SYMBOL(isgreaterequal, std::, <cmath>)
+SYMBOL(isgreaterequal, None, <cmath>)
+SYMBOL(isgreaterequal, None, <math.h>)
+SYMBOL(isinf, std::, <cmath>)
+SYMBOL(isinf, None, <cmath>)
+SYMBOL(isinf, None, <math.h>)
+SYMBOL(isless, std::, <cmath>)
+SYMBOL(isless, None, <cmath>)
+SYMBOL(isless, None, <math.h>)
+SYMBOL(islessequal, std::, <cmath>)
+SYMBOL(islessequal, None, <cmath>)
+SYMBOL(islessequal, None, <math.h>)
+SYMBOL(islessgreater, std::, <cmath>)
+SYMBOL(islessgreater, None, <cmath>)
+SYMBOL(islessgreater, None, <math.h>)
+SYMBOL(islower, std::, <cctype>)
+SYMBOL(islower, None, <cctype>)
+SYMBOL(islower, None, <ctype.h>)
+SYMBOL(isnan, std::, <cmath>)
+SYMBOL(isnan, None, <cmath>)
+SYMBOL(isnan, None, <math.h>)
+SYMBOL(isnormal, std::, <cmath>)
+SYMBOL(isnormal, None, <cmath>)
+SYMBOL(isnormal, None, <math.h>)
+SYMBOL(ispanstream, std::, <spanstream>)
+SYMBOL(ispanstream, std::, <iosfwd>)
+SYMBOL(isprint, std::, <cctype>)
+SYMBOL(isprint, None, <cctype>)
+SYMBOL(isprint, None, <ctype.h>)
+SYMBOL(ispunct, std::, <cctype>)
+SYMBOL(ispunct, None, <cctype>)
+SYMBOL(ispunct, None, <ctype.h>)
+SYMBOL(isspace, std::, <cctype>)
+SYMBOL(isspace, None, <cctype>)
+SYMBOL(isspace, None, <ctype.h>)
+SYMBOL(istream, std::, <istream>)
+SYMBOL(istream, std::, <iostream>)
+SYMBOL(istream, std::, <iosfwd>)
+SYMBOL(istream_iterator, std::, <iterator>)
+SYMBOL(istreambuf_iterator, std::, <iterator>)
+SYMBOL(istreambuf_iterator, std::, <iosfwd>)
+SYMBOL(istringstream, std::, <sstream>)
+SYMBOL(istringstream, std::, <iosfwd>)
+SYMBOL(istrstream, std::, <strstream>)
+SYMBOL(isunordered, std::, <cmath>)
+SYMBOL(isunordered, None, <cmath>)
+SYMBOL(isunordered, None, <math.h>)
+SYMBOL(isupper, std::, <cctype>)
+SYMBOL(isupper, None, <cctype>)
+SYMBOL(isupper, None, <ctype.h>)
+SYMBOL(iswalnum, std::, <cwctype>)
+SYMBOL(iswalnum, None, <cwctype>)
+SYMBOL(iswalnum, None, <wctype.h>)
+SYMBOL(iswalpha, std::, <cwctype>)
+SYMBOL(iswalpha, None, <cwctype>)
+SYMBOL(iswalpha, None, <wctype.h>)
+SYMBOL(iswblank, std::, <cwctype>)
+SYMBOL(iswblank, None, <cwctype>)
+SYMBOL(iswblank, None, <wctype.h>)
+SYMBOL(iswcntrl, std::, <cwctype>)
+SYMBOL(iswcntrl, None, <cwctype>)
+SYMBOL(iswcntrl, None, <wctype.h>)
+SYMBOL(iswctype, std::, <cwctype>)
+SYMBOL(iswctype, None, <cwctype>)
+SYMBOL(iswctype, None, <wctype.h>)
+SYMBOL(iswdigit, std::, <cwctype>)
+SYMBOL(iswdigit, None, <cwctype>)
+SYMBOL(iswdigit, None, <wctype.h>)
+SYMBOL(iswgraph, std::, <cwctype>)
+SYMBOL(iswgraph, None, <cwctype>)
+SYMBOL(iswgraph, None, <wctype.h>)
+SYMBOL(iswlower, std::, <cwctype>)
+SYMBOL(iswlower, None, <cwctype>)
+SYMBOL(iswlower, None, <wctype.h>)
+SYMBOL(iswprint, std::, <cwctype>)
+SYMBOL(iswprint, None, <cwctype>)
+SYMBOL(iswprint, None, <wctype.h>)
+SYMBOL(iswpunct, std::, <cwctype>)
+SYMBOL(iswpunct, None, <cwctype>)
+SYMBOL(iswpunct, None, <wctype.h>)
+SYMBOL(iswspace, std::, <cwctype>)
+SYMBOL(iswspace, None, <cwctype>)
+SYMBOL(iswspace, None, <wctype.h>)
+SYMBOL(iswupper, std::, <cwctype>)
+SYMBOL(iswupper, None, <cwctype>)
+SYMBOL(iswupper, None, <wctype.h>)
+SYMBOL(iswxdigit, std::, <cwctype>)
+SYMBOL(iswxdigit, None, <cwctype>)
+SYMBOL(iswxdigit, None, <wctype.h>)
+SYMBOL(isxdigit, std::, <cctype>)
+SYMBOL(isxdigit, None, <cctype>)
+SYMBOL(isxdigit, None, <ctype.h>)
+SYMBOL(iter_common_reference_t, std::, <iterator>)
+SYMBOL(iter_const_reference_t, std::, <iterator>)
+SYMBOL(iter_difference_t, std::, <iterator>)
+SYMBOL(iter_reference_t, std::, <iterator>)
+SYMBOL(iter_rvalue_reference_t, std::, <iterator>)
+SYMBOL(iter_swap, std::, <algorithm>)
+SYMBOL(iter_value_t, std::, <iterator>)
+SYMBOL(iterator, std::, <iterator>)
+SYMBOL(iterator_traits, std::, <iterator>)
+SYMBOL(jmp_buf, std::, <csetjmp>)
+SYMBOL(jmp_buf, None, <csetjmp>)
+SYMBOL(jmp_buf, None, <setjmp.h>)
+SYMBOL(jthread, std::, <thread>)
+SYMBOL(kill_dependency, std::, <atomic>)
+SYMBOL(kilo, std::, <ratio>)
+SYMBOL(knuth_b, std::, <random>)
+SYMBOL(labs, std::, <cstdlib>)
+SYMBOL(labs, None, <cstdlib>)
+SYMBOL(labs, None, <stdlib.h>)
+SYMBOL(laguerre, std::, <cmath>)
+SYMBOL(laguerref, std::, <cmath>)
+SYMBOL(laguerrel, std::, <cmath>)
+SYMBOL(latch, std::, <latch>)
+SYMBOL(launch, std::, <future>)
+SYMBOL(launder, std::, <new>)
+SYMBOL(lcm, std::, <numeric>)
+SYMBOL(lconv, std::, <clocale>)
+SYMBOL(lconv, None, <clocale>)
+SYMBOL(lconv, None, <locale.h>)
+SYMBOL(ldexp, std::, <cmath>)
+SYMBOL(ldexp, None, <cmath>)
+SYMBOL(ldexp, None, <math.h>)
+SYMBOL(ldexpf, std::, <cmath>)
+SYMBOL(ldexpf, None, <cmath>)
+SYMBOL(ldexpf, None, <math.h>)
+SYMBOL(ldexpl, std::, <cmath>)
+SYMBOL(ldexpl, None, <cmath>)
+SYMBOL(ldexpl, None, <math.h>)
+SYMBOL(ldiv, std::, <cstdlib>)
+SYMBOL(ldiv, None, <cstdlib>)
+SYMBOL(ldiv, None, <stdlib.h>)
+SYMBOL(ldiv_t, std::, <cstdlib>)
+SYMBOL(ldiv_t, None, <cstdlib>)
+SYMBOL(ldiv_t, None, <stdlib.h>)
+SYMBOL(left, std::, <ios>)
+SYMBOL(left, std::, <iostream>)
+SYMBOL(legendre, std::, <cmath>)
+SYMBOL(legendref, std::, <cmath>)
+SYMBOL(legendrel, std::, <cmath>)
+SYMBOL(length_error, std::, <stdexcept>)
+SYMBOL(lerp, std::, <cmath>)
+SYMBOL(less, std::, <functional>)
+SYMBOL(less_equal, std::, <functional>)
+SYMBOL(lexicographical_compare, std::, <algorithm>)
+SYMBOL(lexicographical_compare_three_way, std::, <algorithm>)
+SYMBOL(lgamma, std::, <cmath>)
+SYMBOL(lgamma, None, <cmath>)
+SYMBOL(lgamma, None, <math.h>)
+SYMBOL(lgammaf, std::, <cmath>)
+SYMBOL(lgammaf, None, <cmath>)
+SYMBOL(lgammaf, None, <math.h>)
+SYMBOL(lgammal, std::, <cmath>)
+SYMBOL(lgammal, None, <cmath>)
+SYMBOL(lgammal, None, <math.h>)
+SYMBOL(linear_congruential_engine, std::, <random>)
+SYMBOL(list, std::, <list>)
+SYMBOL(llabs, std::, <cstdlib>)
+SYMBOL(llabs, None, <cstdlib>)
+SYMBOL(llabs, None, <stdlib.h>)
+SYMBOL(lldiv, std::, <cstdlib>)
+SYMBOL(lldiv, None, <cstdlib>)
+SYMBOL(lldiv, None, <stdlib.h>)
+SYMBOL(lldiv_t, std::, <cstdlib>)
+SYMBOL(lldiv_t, None, <cstdlib>)
+SYMBOL(lldiv_t, None, <stdlib.h>)
+SYMBOL(llrint, std::, <cmath>)
+SYMBOL(llrint, None, <cmath>)
+SYMBOL(llrint, None, <math.h>)
+SYMBOL(llrintf, std::, <cmath>)
+SYMBOL(llrintf, None, <cmath>)
+SYMBOL(llrintf, None, <math.h>)
+SYMBOL(llrintl, std::, <cmath>)
+SYMBOL(llrintl, None, <cmath>)
+SYMBOL(llrintl, None, <math.h>)
+SYMBOL(llround, std::, <cmath>)
+SYMBOL(llround, None, <cmath>)
+SYMBOL(llround, None, <math.h>)
+SYMBOL(llroundf, std::, <cmath>)
+SYMBOL(llroundf, None, <cmath>)
+SYMBOL(llroundf, None, <math.h>)
+SYMBOL(llroundl, std::, <cmath>)
+SYMBOL(llroundl, None, <cmath>)
+SYMBOL(llroundl, None, <math.h>)
+SYMBOL(locale, std::, <locale>)
+SYMBOL(localeconv, std::, <clocale>)
+SYMBOL(localeconv, None, <clocale>)
+SYMBOL(localeconv, None, <locale.h>)
+SYMBOL(localtime, std::, <ctime>)
+SYMBOL(localtime, None, <ctime>)
+SYMBOL(localtime, None, <time.h>)
+SYMBOL(lock, std::, <mutex>)
+SYMBOL(lock_guard, std::, <mutex>)
+SYMBOL(log, std::, <cmath>)
+SYMBOL(log, None, <cmath>)
+SYMBOL(log, None, <math.h>)
+SYMBOL(log10, std::, <cmath>)
+SYMBOL(log10, None, <cmath>)
+SYMBOL(log10, None, <math.h>)
+SYMBOL(log10f, std::, <cmath>)
+SYMBOL(log10f, None, <cmath>)
+SYMBOL(log10f, None, <math.h>)
+SYMBOL(log10l, std::, <cmath>)
+SYMBOL(log10l, None, <cmath>)
+SYMBOL(log10l, None, <math.h>)
+SYMBOL(log1p, std::, <cmath>)
+SYMBOL(log1p, None, <cmath>)
+SYMBOL(log1p, None, <math.h>)
+SYMBOL(log1pf, std::, <cmath>)
+SYMBOL(log1pf, None, <cmath>)
+SYMBOL(log1pf, None, <math.h>)
+SYMBOL(log1pl, std::, <cmath>)
+SYMBOL(log1pl, None, <cmath>)
+SYMBOL(log1pl, None, <math.h>)
+SYMBOL(log2, std::, <cmath>)
+SYMBOL(log2, None, <cmath>)
+SYMBOL(log2, None, <math.h>)
+SYMBOL(log2f, std::, <cmath>)
+SYMBOL(log2f, None, <cmath>)
+SYMBOL(log2f, None, <math.h>)
+SYMBOL(log2l, std::, <cmath>)
+SYMBOL(log2l, None, <cmath>)
+SYMBOL(log2l, None, <math.h>)
+SYMBOL(logb, std::, <cmath>)
+SYMBOL(logb, None, <cmath>)
+SYMBOL(logb, None, <math.h>)
+SYMBOL(logbf, std::, <cmath>)
+SYMBOL(logbf, None, <cmath>)
+SYMBOL(logbf, None, <math.h>)
+SYMBOL(logbl, std::, <cmath>)
+SYMBOL(logbl, None, <cmath>)
+SYMBOL(logbl, None, <math.h>)
+SYMBOL(logf, std::, <cmath>)
+SYMBOL(logf, None, <cmath>)
+SYMBOL(logf, None, <math.h>)
+SYMBOL(logic_error, std::, <stdexcept>)
+SYMBOL(logical_and, std::, <functional>)
+SYMBOL(logical_not, std::, <functional>)
+SYMBOL(logical_or, std::, <functional>)
+SYMBOL(logl, std::, <cmath>)
+SYMBOL(logl, None, <cmath>)
+SYMBOL(logl, None, <math.h>)
+SYMBOL(lognormal_distribution, std::, <random>)
+SYMBOL(longjmp, std::, <csetjmp>)
+SYMBOL(longjmp, None, <csetjmp>)
+SYMBOL(longjmp, None, <setjmp.h>)
+SYMBOL(lower_bound, std::, <algorithm>)
+SYMBOL(lrint, std::, <cmath>)
+SYMBOL(lrint, None, <cmath>)
+SYMBOL(lrint, None, <math.h>)
+SYMBOL(lrintf, std::, <cmath>)
+SYMBOL(lrintf, None, <cmath>)
+SYMBOL(lrintf, None, <math.h>)
+SYMBOL(lrintl, std::, <cmath>)
+SYMBOL(lrintl, None, <cmath>)
+SYMBOL(lrintl, None, <math.h>)
+SYMBOL(lround, std::, <cmath>)
+SYMBOL(lround, None, <cmath>)
+SYMBOL(lround, None, <math.h>)
+SYMBOL(lroundf, std::, <cmath>)
+SYMBOL(lroundf, None, <cmath>)
+SYMBOL(lroundf, None, <math.h>)
+SYMBOL(lroundl, std::, <cmath>)
+SYMBOL(lroundl, None, <cmath>)
+SYMBOL(lroundl, None, <math.h>)
+SYMBOL(make_exception_ptr, std::, <exception>)
+SYMBOL(make_format_args, std::, <format>)
+SYMBOL(make_from_tuple, std::, <tuple>)
+SYMBOL(make_heap, std::, <algorithm>)
+SYMBOL(make_move_iterator, std::, <iterator>)
+SYMBOL(make_obj_using_allocator, std::, <memory>)
+SYMBOL(make_optional, std::, <optional>)
+SYMBOL(make_pair, std::, <utility>)
+SYMBOL(make_reverse_iterator, std::, <iterator>)
+SYMBOL(make_shared, std::, <memory>)
+SYMBOL(make_shared_for_overwrite, std::, <memory>)
+SYMBOL(make_signed, std::, <type_traits>)
+SYMBOL(make_signed_t, std::, <type_traits>)
+SYMBOL(make_tuple, std::, <tuple>)
+SYMBOL(make_unique, std::, <memory>)
+SYMBOL(make_unique_for_overwrite, std::, <memory>)
+SYMBOL(make_unsigned, std::, <type_traits>)
+SYMBOL(make_unsigned_t, std::, <type_traits>)
+SYMBOL(make_wformat_args, std::, <format>)
+SYMBOL(malloc, std::, <cstdlib>)
+SYMBOL(malloc, None, <cstdlib>)
+SYMBOL(malloc, None, <stdlib.h>)
+SYMBOL(map, std::, <map>)
+SYMBOL(mask_array, std::, <valarray>)
+SYMBOL(match_results, std::, <regex>)
+SYMBOL(max, std::, <algorithm>)
+SYMBOL(max_align_t, std::, <cstddef>)
+SYMBOL(max_align_t, None, <cstddef>)
+SYMBOL(max_align_t, None, <stddef.h>)
+SYMBOL(max_element, std::, <algorithm>)
+SYMBOL(mblen, std::, <cstdlib>)
+SYMBOL(mblen, None, <cstdlib>)
+SYMBOL(mblen, None, <stdlib.h>)
+SYMBOL(mbrlen, std::, <cwchar>)
+SYMBOL(mbrlen, None, <cwchar>)
+SYMBOL(mbrlen, None, <wchar.h>)
+SYMBOL(mbrtoc16, std::, <cuchar>)
+SYMBOL(mbrtoc16, None, <cuchar>)
+SYMBOL(mbrtoc16, None, <uchar.h>)
+SYMBOL(mbrtoc32, std::, <cuchar>)
+SYMBOL(mbrtoc32, None, <cuchar>)
+SYMBOL(mbrtoc32, None, <uchar.h>)
+SYMBOL(mbrtoc8, std::, <cuchar>)
+SYMBOL(mbrtoc8, None, <cuchar>)
+SYMBOL(mbrtoc8, None, <uchar.h>)
+SYMBOL(mbrtowc, std::, <cwchar>)
+SYMBOL(mbrtowc, None, <cwchar>)
+SYMBOL(mbrtowc, None, <wchar.h>)
+SYMBOL(mbsinit, std::, <cwchar>)
+SYMBOL(mbsinit, None, <cwchar>)
+SYMBOL(mbsinit, None, <wchar.h>)
+SYMBOL(mbsrtowcs, std::, <cwchar>)
+SYMBOL(mbsrtowcs, None, <cwchar>)
+SYMBOL(mbsrtowcs, None, <wchar.h>)
+SYMBOL(mbstowcs, std::, <cstdlib>)
+SYMBOL(mbstowcs, None, <cstdlib>)
+SYMBOL(mbstowcs, None, <stdlib.h>)
+SYMBOL(mbtowc, std::, <cstdlib>)
+SYMBOL(mbtowc, None, <cstdlib>)
+SYMBOL(mbtowc, None, <stdlib.h>)
+SYMBOL(mega, std::, <ratio>)
+SYMBOL(mem_fn, std::, <functional>)
+SYMBOL(mem_fun, std::, <functional>)
+SYMBOL(mem_fun1_ref_t, std::, <functional>)
+SYMBOL(mem_fun1_t, std::, <functional>)
+SYMBOL(mem_fun_ref, std::, <functional>)
+SYMBOL(mem_fun_ref_t, std::, <functional>)
+SYMBOL(mem_fun_t, std::, <functional>)
+SYMBOL(memchr, std::, <cstring>)
+SYMBOL(memchr, None, <cstring>)
+SYMBOL(memchr, None, <string.h>)
+SYMBOL(memcmp, std::, <cstring>)
+SYMBOL(memcmp, None, <cstring>)
+SYMBOL(memcmp, None, <string.h>)
+SYMBOL(memcpy, std::, <cstring>)
+SYMBOL(memcpy, None, <cstring>)
+SYMBOL(memcpy, None, <string.h>)
+SYMBOL(memmove, std::, <cstring>)
+SYMBOL(memmove, None, <cstring>)
+SYMBOL(memmove, None, <string.h>)
+SYMBOL(memory_order, std::, <atomic>)
+SYMBOL(memory_order_acq_rel, std::, <atomic>)
+SYMBOL(memory_order_acquire, std::, <atomic>)
+SYMBOL(memory_order_consume, std::, <atomic>)
+SYMBOL(memory_order_relaxed, std::, <atomic>)
+SYMBOL(memory_order_release, std::, <atomic>)
+SYMBOL(memory_order_seq_cst, std::, <atomic>)
+SYMBOL(memset, std::, <cstring>)
+SYMBOL(memset, None, <cstring>)
+SYMBOL(memset, None, <string.h>)
+SYMBOL(merge, std::, <algorithm>)
+SYMBOL(mergeable, std::, <iterator>)
+SYMBOL(mersenne_twister_engine, std::, <random>)
+SYMBOL(messages, std::, <locale>)
+SYMBOL(messages_base, std::, <locale>)
+SYMBOL(messages_byname, std::, <locale>)
+SYMBOL(micro, std::, <ratio>)
+SYMBOL(midpoint, std::, <numeric>)
+SYMBOL(milli, std::, <ratio>)
+SYMBOL(min, std::, <algorithm>)
+SYMBOL(min_element, std::, <algorithm>)
+SYMBOL(minmax, std::, <algorithm>)
+SYMBOL(minmax_element, std::, <algorithm>)
+SYMBOL(minstd_rand, std::, <random>)
+SYMBOL(minstd_rand0, std::, <random>)
+SYMBOL(minus, std::, <functional>)
+SYMBOL(mismatch, std::, <algorithm>)
+SYMBOL(mktime, std::, <ctime>)
+SYMBOL(mktime, None, <ctime>)
+SYMBOL(mktime, None, <time.h>)
+SYMBOL(modf, std::, <cmath>)
+SYMBOL(modf, None, <cmath>)
+SYMBOL(modf, None, <math.h>)
+SYMBOL(modff, std::, <cmath>)
+SYMBOL(modff, None, <cmath>)
+SYMBOL(modff, None, <math.h>)
+SYMBOL(modfl, std::, <cmath>)
+SYMBOL(modfl, None, <cmath>)
+SYMBOL(modfl, None, <math.h>)
+SYMBOL(modulus, std::, <functional>)
+SYMBOL(money_base, std::, <locale>)
+SYMBOL(money_get, std::, <locale>)
+SYMBOL(money_put, std::, <locale>)
+SYMBOL(moneypunct, std::, <locale>)
+SYMBOL(moneypunct_byname, std::, <locale>)
+SYMBOL(monostate, std::, <variant>)
+SYMBOL(movable, std::, <concepts>)
+SYMBOL(move_backward, std::, <algorithm>)
+SYMBOL(move_constructible, std::, <concepts>)
+SYMBOL(move_if_noexcept, std::, <utility>)
+SYMBOL(move_iterator, std::, <iterator>)
+SYMBOL(move_only_function, std::, <functional>)
+SYMBOL(move_sentinel, std::, <iterator>)
+SYMBOL(mt19937, std::, <random>)
+SYMBOL(mt19937_64, std::, <random>)
+SYMBOL(multimap, std::, <map>)
+SYMBOL(multiplies, std::, <functional>)
+SYMBOL(multiset, std::, <set>)
+SYMBOL(mutex, std::, <mutex>)
+SYMBOL(nan, std::, <cmath>)
+SYMBOL(nan, None, <cmath>)
+SYMBOL(nan, None, <math.h>)
+SYMBOL(nanf, std::, <cmath>)
+SYMBOL(nanf, None, <cmath>)
+SYMBOL(nanf, None, <math.h>)
+SYMBOL(nanl, std::, <cmath>)
+SYMBOL(nanl, None, <cmath>)
+SYMBOL(nanl, None, <math.h>)
+SYMBOL(nano, std::, <ratio>)
+SYMBOL(nearbyint, std::, <cmath>)
+SYMBOL(nearbyint, None, <cmath>)
+SYMBOL(nearbyint, None, <math.h>)
+SYMBOL(nearbyintf, std::, <cmath>)
+SYMBOL(nearbyintf, None, <cmath>)
+SYMBOL(nearbyintf, None, <math.h>)
+SYMBOL(nearbyintl, std::, <cmath>)
+SYMBOL(nearbyintl, None, <cmath>)
+SYMBOL(nearbyintl, None, <math.h>)
+SYMBOL(negate, std::, <functional>)
+SYMBOL(negation, std::, <type_traits>)
+SYMBOL(negation_v, std::, <type_traits>)
+SYMBOL(negative_binomial_distribution, std::, <random>)
+SYMBOL(nested_exception, std::, <exception>)
+SYMBOL(new_handler, std::, <new>)
+SYMBOL(next, std::, <iterator>)
+SYMBOL(next_permutation, std::, <algorithm>)
+SYMBOL(nextafter, std::, <cmath>)
+SYMBOL(nextafter, None, <cmath>)
+SYMBOL(nextafter, None, <math.h>)
+SYMBOL(nextafterf, std::, <cmath>)
+SYMBOL(nextafterf, None, <cmath>)
+SYMBOL(nextafterf, None, <math.h>)
+SYMBOL(nextafterl, std::, <cmath>)
+SYMBOL(nextafterl, None, <cmath>)
+SYMBOL(nextafterl, None, <math.h>)
+SYMBOL(nexttoward, std::, <cmath>)
+SYMBOL(nexttoward, None, <cmath>)
+SYMBOL(nexttoward, None, <math.h>)
+SYMBOL(nexttowardf, std::, <cmath>)
+SYMBOL(nexttowardf, None, <cmath>)
+SYMBOL(nexttowardf, None, <math.h>)
+SYMBOL(nexttowardl, std::, <cmath>)
+SYMBOL(nexttowardl, None, <cmath>)
+SYMBOL(nexttowardl, None, <math.h>)
+SYMBOL(noboolalpha, std::, <ios>)
+SYMBOL(noboolalpha, std::, <iostream>)
+SYMBOL(noemit_on_flush, std::, <ostream>)
+SYMBOL(noemit_on_flush, std::, <iostream>)
+SYMBOL(none_of, std::, <algorithm>)
+SYMBOL(noop_coroutine, std::, <coroutine>)
+SYMBOL(noop_coroutine_handle, std::, <coroutine>)
+SYMBOL(noop_coroutine_promise, std::, <coroutine>)
+SYMBOL(norm, std::, <complex>)
+SYMBOL(normal_distribution, std::, <random>)
+SYMBOL(noshowbase, std::, <ios>)
+SYMBOL(noshowbase, std::, <iostream>)
+SYMBOL(noshowpoint, std::, <ios>)
+SYMBOL(noshowpoint, std::, <iostream>)
+SYMBOL(noshowpos, std::, <ios>)
+SYMBOL(noshowpos, std::, <iostream>)
+SYMBOL(noskipws, std::, <ios>)
+SYMBOL(noskipws, std::, <iostream>)
+SYMBOL(nostopstate, std::, <stop_token>)
+SYMBOL(nostopstate_t, std::, <stop_token>)
+SYMBOL(not1, std::, <functional>)
+SYMBOL(not2, std::, <functional>)
+SYMBOL(not_equal_to, std::, <functional>)
+SYMBOL(not_fn, std::, <functional>)
+SYMBOL(nothrow, std::, <new>)
+SYMBOL(nothrow_t, std::, <new>)
+SYMBOL(notify_all_at_thread_exit, std::, <condition_variable>)
+SYMBOL(nounitbuf, std::, <ios>)
+SYMBOL(nounitbuf, std::, <iostream>)
+SYMBOL(nouppercase, std::, <ios>)
+SYMBOL(nouppercase, std::, <iostream>)
+SYMBOL(nth_element, std::, <algorithm>)
+SYMBOL(nullopt, std::, <optional>)
+SYMBOL(nullopt_t, std::, <optional>)
+SYMBOL(nullptr_t, std::, <cstddef>)
+SYMBOL(nullptr_t, None, <cstddef>)
+SYMBOL(nullptr_t, None, <stddef.h>)
+SYMBOL(num_get, std::, <locale>)
+SYMBOL(num_put, std::, <locale>)
+SYMBOL(numeric_limits, std::, <limits>)
+SYMBOL(numpunct, std::, <locale>)
+SYMBOL(numpunct_byname, std::, <locale>)
+SYMBOL(oct, std::, <ios>)
+SYMBOL(oct, std::, <iostream>)
+SYMBOL(ofstream, std::, <fstream>)
+SYMBOL(ofstream, std::, <iosfwd>)
+SYMBOL(once_flag, std::, <mutex>)
+SYMBOL(open_mode, std::, <ios>)
+SYMBOL(open_mode, std::, <iostream>)
+SYMBOL(optional, std::, <optional>)
+SYMBOL(ospanstream, std::, <spanstream>)
+SYMBOL(ospanstream, std::, <iosfwd>)
+SYMBOL(ostream, std::, <ostream>)
+SYMBOL(ostream, std::, <iostream>)
+SYMBOL(ostream, std::, <iosfwd>)
+SYMBOL(ostream_iterator, std::, <iterator>)
+SYMBOL(ostreambuf_iterator, std::, <iterator>)
+SYMBOL(ostreambuf_iterator, std::, <iosfwd>)
+SYMBOL(ostringstream, std::, <sstream>)
+SYMBOL(ostringstream, std::, <iosfwd>)
+SYMBOL(ostrstream, std::, <strstream>)
+SYMBOL(osyncstream, std::, <syncstream>)
+SYMBOL(osyncstream, std::, <iosfwd>)
+SYMBOL(out_of_range, std::, <stdexcept>)
+SYMBOL(out_ptr, std::, <memory>)
+SYMBOL(out_ptr_t, std::, <memory>)
+SYMBOL(output_iterator, std::, <iterator>)
+SYMBOL(output_iterator_tag, std::, <iterator>)
+SYMBOL(overflow_error, std::, <stdexcept>)
+SYMBOL(owner_less, std::, <memory>)
+SYMBOL(packaged_task, std::, <future>)
+SYMBOL(pair, std::, <utility>)
+SYMBOL(partial_order, std::, <compare>)
+SYMBOL(partial_ordering, std::, <compare>)
+SYMBOL(partial_sort, std::, <algorithm>)
+SYMBOL(partial_sort_copy, std::, <algorithm>)
+SYMBOL(partial_sum, std::, <numeric>)
+SYMBOL(partition, std::, <algorithm>)
+SYMBOL(partition_copy, std::, <algorithm>)
+SYMBOL(partition_point, std::, <algorithm>)
+SYMBOL(permutable, std::, <iterator>)
+SYMBOL(perror, std::, <cstdio>)
+SYMBOL(perror, None, <cstdio>)
+SYMBOL(perror, None, <stdio.h>)
+SYMBOL(peta, std::, <ratio>)
+SYMBOL(pico, std::, <ratio>)
+SYMBOL(piecewise_constant_distribution, std::, <random>)
+SYMBOL(piecewise_construct, std::, <utility>)
+SYMBOL(piecewise_construct_t, std::, <utility>)
+SYMBOL(piecewise_linear_distribution, std::, <random>)
+SYMBOL(plus, std::, <functional>)
+SYMBOL(pointer_safety, std::, <memory>)
+SYMBOL(pointer_traits, std::, <memory>)
+SYMBOL(poisson_distribution, std::, <random>)
+SYMBOL(polar, std::, <complex>)
+SYMBOL(pop_heap, std::, <algorithm>)
+SYMBOL(popcount, std::, <bit>)
+SYMBOL(pow, std::, <cmath>)
+SYMBOL(pow, None, <cmath>)
+SYMBOL(pow, None, <math.h>)
+SYMBOL(powf, std::, <cmath>)
+SYMBOL(powf, None, <cmath>)
+SYMBOL(powf, None, <math.h>)
+SYMBOL(powl, std::, <cmath>)
+SYMBOL(powl, None, <cmath>)
+SYMBOL(powl, None, <math.h>)
+SYMBOL(predicate, std::, <concepts>)
+SYMBOL(preferred, std::, <memory>)
+SYMBOL(prev, std::, <iterator>)
+SYMBOL(prev_permutation, std::, <algorithm>)
+SYMBOL(printf, std::, <cstdio>)
+SYMBOL(printf, None, <cstdio>)
+SYMBOL(printf, None, <stdio.h>)
+SYMBOL(priority_queue, std::, <queue>)
+SYMBOL(proj, std::, <complex>)
+SYMBOL(projected, std::, <iterator>)
+SYMBOL(promise, std::, <future>)
+SYMBOL(ptr_fun, std::, <functional>)
+SYMBOL(ptrdiff_t, std::, <cstddef>)
+SYMBOL(ptrdiff_t, None, <cstddef>)
+SYMBOL(ptrdiff_t, None, <stddef.h>)
+SYMBOL(push_heap, std::, <algorithm>)
+SYMBOL(put_money, std::, <iomanip>)
+SYMBOL(put_time, std::, <iomanip>)
+SYMBOL(putc, std::, <cstdio>)
+SYMBOL(putc, None, <cstdio>)
+SYMBOL(putc, None, <stdio.h>)
+SYMBOL(putchar, std::, <cstdio>)
+SYMBOL(putchar, None, <cstdio>)
+SYMBOL(putchar, None, <stdio.h>)
+SYMBOL(puts, std::, <cstdio>)
+SYMBOL(puts, None, <cstdio>)
+SYMBOL(puts, None, <stdio.h>)
+SYMBOL(putwc, std::, <cwchar>)
+SYMBOL(putwc, None, <cwchar>)
+SYMBOL(putwc, None, <wchar.h>)
+SYMBOL(putwchar, std::, <cwchar>)
+SYMBOL(putwchar, None, <cwchar>)
+SYMBOL(putwchar, None, <wchar.h>)
+SYMBOL(qsort, std::, <cstdlib>)
+SYMBOL(qsort, None, <cstdlib>)
+SYMBOL(qsort, None, <stdlib.h>)
+SYMBOL(queue, std::, <queue>)
+SYMBOL(quick_exit, std::, <cstdlib>)
+SYMBOL(quick_exit, None, <cstdlib>)
+SYMBOL(quick_exit, None, <stdlib.h>)
+SYMBOL(quoted, std::, <iomanip>)
+SYMBOL(raise, std::, <csignal>)
+SYMBOL(raise, None, <csignal>)
+SYMBOL(raise, None, <signal.h>)
+SYMBOL(rand, std::, <cstdlib>)
+SYMBOL(rand, None, <cstdlib>)
+SYMBOL(rand, None, <stdlib.h>)
+SYMBOL(random_access_iterator, std::, <iterator>)
+SYMBOL(random_access_iterator_tag, std::, <iterator>)
+SYMBOL(random_device, std::, <random>)
+SYMBOL(random_shuffle, std::, <algorithm>)
+SYMBOL(range_error, std::, <stdexcept>)
+SYMBOL(rank, std::, <type_traits>)
+SYMBOL(rank_v, std::, <type_traits>)
+SYMBOL(ranlux24, std::, <random>)
+SYMBOL(ranlux24_base, std::, <random>)
+SYMBOL(ranlux48, std::, <random>)
+SYMBOL(ranlux48_base, std::, <random>)
+SYMBOL(ratio, std::, <ratio>)
+SYMBOL(ratio_add, std::, <ratio>)
+SYMBOL(ratio_divide, std::, <ratio>)
+SYMBOL(ratio_equal, std::, <ratio>)
+SYMBOL(ratio_equal_v, std::, <ratio>)
+SYMBOL(ratio_greater, std::, <ratio>)
+SYMBOL(ratio_greater_equal, std::, <ratio>)
+SYMBOL(ratio_greater_equal_v, std::, <ratio>)
+SYMBOL(ratio_greater_v, std::, <ratio>)
+SYMBOL(ratio_less, std::, <ratio>)
+SYMBOL(ratio_less_equal, std::, <ratio>)
+SYMBOL(ratio_less_equal_v, std::, <ratio>)
+SYMBOL(ratio_less_v, std::, <ratio>)
+SYMBOL(ratio_multiply, std::, <ratio>)
+SYMBOL(ratio_not_equal, std::, <ratio>)
+SYMBOL(ratio_not_equal_v, std::, <ratio>)
+SYMBOL(ratio_subtract, std::, <ratio>)
+SYMBOL(raw_storage_iterator, std::, <memory>)
+SYMBOL(real, std::, <complex>)
+SYMBOL(realloc, std::, <cstdlib>)
+SYMBOL(realloc, None, <cstdlib>)
+SYMBOL(realloc, None, <stdlib.h>)
+SYMBOL(recursive_mutex, std::, <mutex>)
+SYMBOL(recursive_timed_mutex, std::, <mutex>)
+SYMBOL(reduce, std::, <numeric>)
+SYMBOL(ref, std::, <functional>)
+SYMBOL(reference_wrapper, std::, <functional>)
+SYMBOL(regex, std::, <regex>)
+SYMBOL(regex_error, std::, <regex>)
+SYMBOL(regex_iterator, std::, <regex>)
+SYMBOL(regex_match, std::, <regex>)
+SYMBOL(regex_replace, std::, <regex>)
+SYMBOL(regex_search, std::, <regex>)
+SYMBOL(regex_token_iterator, std::, <regex>)
+SYMBOL(regex_traits, std::, <regex>)
+SYMBOL(regular, std::, <concepts>)
+SYMBOL(regular_invocable, std::, <concepts>)
+SYMBOL(reinterpret_pointer_cast, std::, <memory>)
+SYMBOL(relation, std::, <concepts>)
+SYMBOL(remainder, std::, <cmath>)
+SYMBOL(remainder, None, <cmath>)
+SYMBOL(remainder, None, <math.h>)
+SYMBOL(remainderf, std::, <cmath>)
+SYMBOL(remainderf, None, <cmath>)
+SYMBOL(remainderf, None, <math.h>)
+SYMBOL(remainderl, std::, <cmath>)
+SYMBOL(remainderl, None, <cmath>)
+SYMBOL(remainderl, None, <math.h>)
+SYMBOL(remove_all_extents, std::, <type_traits>)
+SYMBOL(remove_all_extents_t, std::, <type_traits>)
+SYMBOL(remove_const, std::, <type_traits>)
+SYMBOL(remove_const_t, std::, <type_traits>)
+SYMBOL(remove_copy, std::, <algorithm>)
+SYMBOL(remove_copy_if, std::, <algorithm>)
+SYMBOL(remove_cv, std::, <type_traits>)
+SYMBOL(remove_cv_t, std::, <type_traits>)
+SYMBOL(remove_cvref, std::, <type_traits>)
+SYMBOL(remove_cvref_t, std::, <type_traits>)
+SYMBOL(remove_extent, std::, <type_traits>)
+SYMBOL(remove_extent_t, std::, <type_traits>)
+SYMBOL(remove_if, std::, <algorithm>)
+SYMBOL(remove_pointer, std::, <type_traits>)
+SYMBOL(remove_pointer_t, std::, <type_traits>)
+SYMBOL(remove_reference, std::, <type_traits>)
+SYMBOL(remove_reference_t, std::, <type_traits>)
+SYMBOL(remove_volatile, std::, <type_traits>)
+SYMBOL(remove_volatile_t, std::, <type_traits>)
+SYMBOL(remquo, std::, <cmath>)
+SYMBOL(remquo, None, <cmath>)
+SYMBOL(remquo, None, <math.h>)
+SYMBOL(remquof, std::, <cmath>)
+SYMBOL(remquof, None, <cmath>)
+SYMBOL(remquof, None, <math.h>)
+SYMBOL(remquol, std::, <cmath>)
+SYMBOL(remquol, None, <cmath>)
+SYMBOL(remquol, None, <math.h>)
+SYMBOL(rename, std::, <cstdio>)
+SYMBOL(rename, None, <cstdio>)
+SYMBOL(rename, None, <stdio.h>)
+SYMBOL(replace, std::, <algorithm>)
+SYMBOL(replace_copy, std::, <algorithm>)
+SYMBOL(replace_copy_if, std::, <algorithm>)
+SYMBOL(replace_if, std::, <algorithm>)
+SYMBOL(resetiosflags, std::, <iomanip>)
+SYMBOL(result_of, std::, <type_traits>)
+SYMBOL(result_of_t, std::, <type_traits>)
+SYMBOL(rethrow_exception, std::, <exception>)
+SYMBOL(rethrow_if_nested, std::, <exception>)
+SYMBOL(return_temporary_buffer, std::, <memory>)
+SYMBOL(reverse, std::, <algorithm>)
+SYMBOL(reverse_copy, std::, <algorithm>)
+SYMBOL(reverse_iterator, std::, <iterator>)
+SYMBOL(rewind, std::, <cstdio>)
+SYMBOL(rewind, None, <cstdio>)
+SYMBOL(rewind, None, <stdio.h>)
+SYMBOL(riemann_zeta, std::, <cmath>)
+SYMBOL(riemann_zetaf, std::, <cmath>)
+SYMBOL(riemann_zetal, std::, <cmath>)
+SYMBOL(right, std::, <ios>)
+SYMBOL(right, std::, <iostream>)
+SYMBOL(rint, std::, <cmath>)
+SYMBOL(rint, None, <cmath>)
+SYMBOL(rint, None, <math.h>)
+SYMBOL(rintf, std::, <cmath>)
+SYMBOL(rintf, None, <cmath>)
+SYMBOL(rintf, None, <math.h>)
+SYMBOL(rintl, std::, <cmath>)
+SYMBOL(rintl, None, <cmath>)
+SYMBOL(rintl, None, <math.h>)
+SYMBOL(rotate, std::, <algorithm>)
+SYMBOL(rotate_copy, std::, <algorithm>)
+SYMBOL(rotl, std::, <bit>)
+SYMBOL(rotr, std::, <bit>)
+SYMBOL(round, std::, <cmath>)
+SYMBOL(round, None, <cmath>)
+SYMBOL(round, None, <math.h>)
+SYMBOL(round_indeterminate, std::, <limits>)
+SYMBOL(round_to_nearest, std::, <limits>)
+SYMBOL(round_toward_infinity, std::, <limits>)
+SYMBOL(round_toward_neg_infinity, std::, <limits>)
+SYMBOL(round_toward_zero, std::, <limits>)
+SYMBOL(roundf, std::, <cmath>)
+SYMBOL(roundf, None, <cmath>)
+SYMBOL(roundf, None, <math.h>)
+SYMBOL(roundl, std::, <cmath>)
+SYMBOL(roundl, None, <cmath>)
+SYMBOL(roundl, None, <math.h>)
+SYMBOL(runtime_error, std::, <stdexcept>)
+SYMBOL(same_as, std::, <concepts>)
+SYMBOL(sample, std::, <algorithm>)
+SYMBOL(scalbln, std::, <cmath>)
+SYMBOL(scalbln, None, <cmath>)
+SYMBOL(scalbln, None, <math.h>)
+SYMBOL(scalblnf, std::, <cmath>)
+SYMBOL(scalblnf, None, <cmath>)
+SYMBOL(scalblnf, None, <math.h>)
+SYMBOL(scalblnl, std::, <cmath>)
+SYMBOL(scalblnl, None, <cmath>)
+SYMBOL(scalblnl, None, <math.h>)
+SYMBOL(scalbn, std::, <cmath>)
+SYMBOL(scalbn, None, <cmath>)
+SYMBOL(scalbn, None, <math.h>)
+SYMBOL(scalbnf, std::, <cmath>)
+SYMBOL(scalbnf, None, <cmath>)
+SYMBOL(scalbnf, None, <math.h>)
+SYMBOL(scalbnl, std::, <cmath>)
+SYMBOL(scalbnl, None, <cmath>)
+SYMBOL(scalbnl, None, <math.h>)
+SYMBOL(scanf, std::, <cstdio>)
+SYMBOL(scanf, None, <cstdio>)
+SYMBOL(scanf, None, <stdio.h>)
+SYMBOL(scientific, std::, <ios>)
+SYMBOL(scientific, std::, <iostream>)
+SYMBOL(scoped_allocator_adaptor, std::, <scoped_allocator>)
+SYMBOL(scoped_lock, std::, <mutex>)
+SYMBOL(search, std::, <algorithm>)
+SYMBOL(search_n, std::, <algorithm>)
+SYMBOL(seed_seq, std::, <random>)
+SYMBOL(seek_dir, std::, <ios>)
+SYMBOL(seek_dir, std::, <iostream>)
+SYMBOL(semiregular, std::, <concepts>)
+SYMBOL(sentinel_for, std::, <iterator>)
+SYMBOL(set, std::, <set>)
+SYMBOL(set_difference, std::, <algorithm>)
+SYMBOL(set_intersection, std::, <algorithm>)
+SYMBOL(set_new_handler, std::, <new>)
+SYMBOL(set_symmetric_difference, std::, <algorithm>)
+SYMBOL(set_terminate, std::, <exception>)
+SYMBOL(set_unexpected, std::, <exception>)
+SYMBOL(set_union, std::, <algorithm>)
+SYMBOL(setbase, std::, <iomanip>)
+SYMBOL(setbuf, std::, <cstdio>)
+SYMBOL(setbuf, None, <cstdio>)
+SYMBOL(setbuf, None, <stdio.h>)
+SYMBOL(setfill, std::, <iomanip>)
+SYMBOL(setiosflags, std::, <iomanip>)
+SYMBOL(setlocale, std::, <clocale>)
+SYMBOL(setlocale, None, <clocale>)
+SYMBOL(setlocale, None, <locale.h>)
+SYMBOL(setprecision, std::, <iomanip>)
+SYMBOL(setvbuf, std::, <cstdio>)
+SYMBOL(setvbuf, None, <cstdio>)
+SYMBOL(setvbuf, None, <stdio.h>)
+SYMBOL(setw, std::, <iomanip>)
+SYMBOL(shared_future, std::, <future>)
+SYMBOL(shared_lock, std::, <shared_mutex>)
+SYMBOL(shared_mutex, std::, <shared_mutex>)
+SYMBOL(shared_ptr, std::, <memory>)
+SYMBOL(shared_timed_mutex, std::, <shared_mutex>)
+SYMBOL(shift_left, std::, <algorithm>)
+SYMBOL(shift_right, std::, <algorithm>)
+SYMBOL(showbase, std::, <ios>)
+SYMBOL(showbase, std::, <iostream>)
+SYMBOL(showpoint, std::, <ios>)
+SYMBOL(showpoint, std::, <iostream>)
+SYMBOL(showpos, std::, <ios>)
+SYMBOL(showpos, std::, <iostream>)
+SYMBOL(shuffle, std::, <algorithm>)
+SYMBOL(shuffle_order_engine, std::, <random>)
+SYMBOL(sig_atomic_t, std::, <csignal>)
+SYMBOL(sig_atomic_t, None, <csignal>)
+SYMBOL(sig_atomic_t, None, <signal.h>)
+SYMBOL(signal, std::, <csignal>)
+SYMBOL(signal, None, <csignal>)
+SYMBOL(signal, None, <signal.h>)
+SYMBOL(signbit, std::, <cmath>)
+SYMBOL(signbit, None, <cmath>)
+SYMBOL(signbit, None, <math.h>)
+SYMBOL(signed_integral, std::, <concepts>)
+SYMBOL(sin, std::, <cmath>)
+SYMBOL(sin, None, <cmath>)
+SYMBOL(sin, None, <math.h>)
+SYMBOL(sinf, std::, <cmath>)
+SYMBOL(sinf, None, <cmath>)
+SYMBOL(sinf, None, <math.h>)
+SYMBOL(sinh, std::, <cmath>)
+SYMBOL(sinh, None, <cmath>)
+SYMBOL(sinh, None, <math.h>)
+SYMBOL(sinhf, std::, <cmath>)
+SYMBOL(sinhf, None, <cmath>)
+SYMBOL(sinhf, None, <math.h>)
+SYMBOL(sinhl, std::, <cmath>)
+SYMBOL(sinhl, None, <cmath>)
+SYMBOL(sinhl, None, <math.h>)
+SYMBOL(sinl, std::, <cmath>)
+SYMBOL(sinl, None, <cmath>)
+SYMBOL(sinl, None, <math.h>)
+SYMBOL(sized_sentinel_for, std::, <iterator>)
+SYMBOL(skipws, std::, <ios>)
+SYMBOL(skipws, std::, <iostream>)
+SYMBOL(slice, std::, <valarray>)
+SYMBOL(slice_array, std::, <valarray>)
+SYMBOL(smatch, std::, <regex>)
+SYMBOL(snprintf, std::, <cstdio>)
+SYMBOL(snprintf, None, <cstdio>)
+SYMBOL(snprintf, None, <stdio.h>)
+SYMBOL(sort, std::, <algorithm>)
+SYMBOL(sort_heap, std::, <algorithm>)
+SYMBOL(sortable, std::, <iterator>)
+SYMBOL(source_location, std::, <source_location>)
+SYMBOL(span, std::, <span>)
+SYMBOL(spanbuf, std::, <spanstream>)
+SYMBOL(spanbuf, std::, <iosfwd>)
+SYMBOL(spanstream, std::, <spanstream>)
+SYMBOL(spanstream, std::, <iosfwd>)
+SYMBOL(sph_bessel, std::, <cmath>)
+SYMBOL(sph_bessel, None, <cmath>)
+SYMBOL(sph_bessel, None, <math.h>)
+SYMBOL(sph_besself, std::, <cmath>)
+SYMBOL(sph_besself, None, <cmath>)
+SYMBOL(sph_besself, None, <math.h>)
+SYMBOL(sph_bessell, std::, <cmath>)
+SYMBOL(sph_bessell, None, <cmath>)
+SYMBOL(sph_bessell, None, <math.h>)
+SYMBOL(sph_legendre, std::, <cmath>)
+SYMBOL(sph_legendref, std::, <cmath>)
+SYMBOL(sph_legendrel, std::, <cmath>)
+SYMBOL(sph_neumann, std::, <cmath>)
+SYMBOL(sph_neumannf, std::, <cmath>)
+SYMBOL(sph_neumannl, std::, <cmath>)
+SYMBOL(sprintf, std::, <cstdio>)
+SYMBOL(sprintf, None, <cstdio>)
+SYMBOL(sprintf, None, <stdio.h>)
+SYMBOL(sqrt, std::, <cmath>)
+SYMBOL(sqrt, None, <cmath>)
+SYMBOL(sqrt, None, <math.h>)
+SYMBOL(sqrtf, std::, <cmath>)
+SYMBOL(sqrtf, None, <cmath>)
+SYMBOL(sqrtf, None, <math.h>)
+SYMBOL(sqrtl, std::, <cmath>)
+SYMBOL(sqrtl, None, <cmath>)
+SYMBOL(sqrtl, None, <math.h>)
+SYMBOL(srand, std::, <cstdlib>)
+SYMBOL(srand, None, <cstdlib>)
+SYMBOL(srand, None, <stdlib.h>)
+SYMBOL(sregex_iterator, std::, <regex>)
+SYMBOL(sregex_token_iterator, std::, <regex>)
+SYMBOL(sscanf, std::, <cstdio>)
+SYMBOL(sscanf, None, <cstdio>)
+SYMBOL(sscanf, None, <stdio.h>)
+SYMBOL(ssub_match, std::, <regex>)
+SYMBOL(stable_partition, std::, <algorithm>)
+SYMBOL(stable_sort, std::, <algorithm>)
+SYMBOL(stack, std::, <stack>)
+SYMBOL(stacktrace, std::, <stacktrace>)
+SYMBOL(stacktrace_entry, std::, <stacktrace>)
+SYMBOL(static_pointer_cast, std::, <memory>)
+SYMBOL(stod, std::, <string>)
+SYMBOL(stof, std::, <string>)
+SYMBOL(stoi, std::, <string>)
+SYMBOL(stol, std::, <string>)
+SYMBOL(stold, std::, <string>)
+SYMBOL(stoll, std::, <string>)
+SYMBOL(stop_callback, std::, <stop_token>)
+SYMBOL(stop_source, std::, <stop_token>)
+SYMBOL(stop_token, std::, <stop_token>)
+SYMBOL(stoul, std::, <string>)
+SYMBOL(stoull, std::, <string>)
+SYMBOL(strcat, std::, <cstring>)
+SYMBOL(strcat, None, <cstring>)
+SYMBOL(strcat, None, <string.h>)
+SYMBOL(strchr, std::, <cstring>)
+SYMBOL(strchr, None, <cstring>)
+SYMBOL(strchr, None, <string.h>)
+SYMBOL(strcmp, std::, <cstring>)
+SYMBOL(strcmp, None, <cstring>)
+SYMBOL(strcmp, None, <string.h>)
+SYMBOL(strcoll, std::, <cstring>)
+SYMBOL(strcoll, None, <cstring>)
+SYMBOL(strcoll, None, <string.h>)
+SYMBOL(strcpy, std::, <cstring>)
+SYMBOL(strcpy, None, <cstring>)
+SYMBOL(strcpy, None, <string.h>)
+SYMBOL(strcspn, std::, <cstring>)
+SYMBOL(strcspn, None, <cstring>)
+SYMBOL(strcspn, None, <string.h>)
+SYMBOL(streambuf, std::, <streambuf>)
+SYMBOL(streambuf, std::, <iostream>)
+SYMBOL(streambuf, std::, <iosfwd>)
+SYMBOL(streamoff, std::, <ios>)
+SYMBOL(streamoff, std::, <iostream>)
+SYMBOL(streampos, std::, <iosfwd>)
+SYMBOL(streampos, std::, <iosfwd>)
+SYMBOL(streamsize, std::, <ios>)
+SYMBOL(streamsize, std::, <iostream>)
+SYMBOL(strerror, std::, <cstring>)
+SYMBOL(strerror, None, <cstring>)
+SYMBOL(strerror, None, <string.h>)
+SYMBOL(strftime, std::, <ctime>)
+SYMBOL(strftime, None, <ctime>)
+SYMBOL(strftime, None, <time.h>)
+SYMBOL(strict, std::, <memory>)
+SYMBOL(strict_weak_order, std::, <concepts>)
+SYMBOL(string, std::, <string>)
+SYMBOL(string_view, std::, <string_view>)
+SYMBOL(stringbuf, std::, <sstream>)
+SYMBOL(stringbuf, std::, <iosfwd>)
+SYMBOL(stringstream, std::, <sstream>)
+SYMBOL(stringstream, std::, <iosfwd>)
+SYMBOL(strlen, std::, <cstring>)
+SYMBOL(strlen, None, <cstring>)
+SYMBOL(strlen, None, <string.h>)
+SYMBOL(strncat, std::, <cstring>)
+SYMBOL(strncat, None, <cstring>)
+SYMBOL(strncat, None, <string.h>)
+SYMBOL(strncmp, std::, <cstring>)
+SYMBOL(strncmp, None, <cstring>)
+SYMBOL(strncmp, None, <string.h>)
+SYMBOL(strncpy, std::, <cstring>)
+SYMBOL(strncpy, None, <cstring>)
+SYMBOL(strncpy, None, <string.h>)
+SYMBOL(strong_order, std::, <compare>)
+SYMBOL(strong_ordering, std::, <compare>)
+SYMBOL(strpbrk, std::, <cstring>)
+SYMBOL(strpbrk, None, <cstring>)
+SYMBOL(strpbrk, None, <string.h>)
+SYMBOL(strrchr, std::, <cstring>)
+SYMBOL(strrchr, None, <cstring>)
+SYMBOL(strrchr, None, <string.h>)
+SYMBOL(strspn, std::, <cstring>)
+SYMBOL(strspn, None, <cstring>)
+SYMBOL(strspn, None, <string.h>)
+SYMBOL(strstr, std::, <cstring>)
+SYMBOL(strstr, None, <cstring>)
+SYMBOL(strstr, None, <string.h>)
+SYMBOL(strstream, std::, <strstream>)
+SYMBOL(strstreambuf, std::, <strstream>)
+SYMBOL(strtod, std::, <cstdlib>)
+SYMBOL(strtod, None, <cstdlib>)
+SYMBOL(strtod, None, <stdlib.h>)
+SYMBOL(strtof, std::, <cstdlib>)
+SYMBOL(strtof, None, <cstdlib>)
+SYMBOL(strtof, None, <stdlib.h>)
+SYMBOL(strtoimax, std::, <cinttypes>)
+SYMBOL(strtoimax, None, <cinttypes>)
+SYMBOL(strtoimax, None, <inttypes.h>)
+SYMBOL(strtok, std::, <cstring>)
+SYMBOL(strtok, None, <cstring>)
+SYMBOL(strtok, None, <string.h>)
+SYMBOL(strtol, std::, <cstdlib>)
+SYMBOL(strtol, None, <cstdlib>)
+SYMBOL(strtol, None, <stdlib.h>)
+SYMBOL(strtold, std::, <cstdlib>)
+SYMBOL(strtold, None, <cstdlib>)
+SYMBOL(strtold, None, <stdlib.h>)
+SYMBOL(strtoll, std::, <cstdlib>)
+SYMBOL(strtoll, None, <cstdlib>)
+SYMBOL(strtoll, None, <stdlib.h>)
+SYMBOL(strtoul, std::, <cstdlib>)
+SYMBOL(strtoul, None, <cstdlib>)
+SYMBOL(strtoul, None, <stdlib.h>)
+SYMBOL(strtoull, std::, <cstdlib>)
+SYMBOL(strtoull, None, <cstdlib>)
+SYMBOL(strtoull, None, <stdlib.h>)
+SYMBOL(strtoumax, std::, <cinttypes>)
+SYMBOL(strtoumax, None, <cinttypes>)
+SYMBOL(strtoumax, None, <inttypes.h>)
+SYMBOL(strxfrm, std::, <cstring>)
+SYMBOL(strxfrm, None, <cstring>)
+SYMBOL(strxfrm, None, <string.h>)
+SYMBOL(student_t_distribution, std::, <random>)
+SYMBOL(sub_match, std::, <regex>)
+SYMBOL(subtract_with_carry_engine, std::, <random>)
+SYMBOL(suspend_always, std::, <coroutine>)
+SYMBOL(suspend_never, std::, <coroutine>)
+SYMBOL(swap_ranges, std::, <algorithm>)
+SYMBOL(swappable, std::, <concepts>)
+SYMBOL(swappable_with, std::, <concepts>)
+SYMBOL(swprintf, std::, <cwchar>)
+SYMBOL(swprintf, None, <cwchar>)
+SYMBOL(swprintf, None, <wchar.h>)
+SYMBOL(swscanf, std::, <cwchar>)
+SYMBOL(swscanf, None, <cwchar>)
+SYMBOL(swscanf, None, <wchar.h>)
+SYMBOL(syncbuf, std::, <syncstream>)
+SYMBOL(syncbuf, std::, <iosfwd>)
+SYMBOL(system, std::, <cstdlib>)
+SYMBOL(system, None, <cstdlib>)
+SYMBOL(system, None, <stdlib.h>)
+SYMBOL(system_category, std::, <system_error>)
+SYMBOL(system_error, std::, <system_error>)
+SYMBOL(tan, std::, <cmath>)
+SYMBOL(tan, None, <cmath>)
+SYMBOL(tan, None, <math.h>)
+SYMBOL(tanf, std::, <cmath>)
+SYMBOL(tanf, None, <cmath>)
+SYMBOL(tanf, None, <math.h>)
+SYMBOL(tanh, std::, <cmath>)
+SYMBOL(tanh, None, <cmath>)
+SYMBOL(tanh, None, <math.h>)
+SYMBOL(tanhf, std::, <cmath>)
+SYMBOL(tanhf, None, <cmath>)
+SYMBOL(tanhf, None, <math.h>)
+SYMBOL(tanhl, std::, <cmath>)
+SYMBOL(tanhl, None, <cmath>)
+SYMBOL(tanhl, None, <math.h>)
+SYMBOL(tanl, std::, <cmath>)
+SYMBOL(tanl, None, <cmath>)
+SYMBOL(tanl, None, <math.h>)
+SYMBOL(tera, std::, <ratio>)
+SYMBOL(terminate, std::, <exception>)
+SYMBOL(terminate_handler, std::, <exception>)
+SYMBOL(tgamma, std::, <cmath>)
+SYMBOL(tgamma, None, <cmath>)
+SYMBOL(tgamma, None, <math.h>)
+SYMBOL(tgammaf, std::, <cmath>)
+SYMBOL(tgammaf, None, <cmath>)
+SYMBOL(tgammaf, None, <math.h>)
+SYMBOL(tgammal, std::, <cmath>)
+SYMBOL(tgammal, None, <cmath>)
+SYMBOL(tgammal, None, <math.h>)
+SYMBOL(thread, std::, <thread>)
+SYMBOL(three_way_comparable, std::, <compare>)
+SYMBOL(three_way_comparable_with, std::, <compare>)
+SYMBOL(throw_with_nested, std::, <exception>)
+SYMBOL(tie, std::, <tuple>)
+SYMBOL(time, std::, <ctime>)
+SYMBOL(time, None, <ctime>)
+SYMBOL(time, None, <time.h>)
+SYMBOL(time_base, std::, <locale>)
+SYMBOL(time_get, std::, <locale>)
+SYMBOL(time_get_byname, std::, <locale>)
+SYMBOL(time_put, std::, <locale>)
+SYMBOL(time_put_byname, std::, <locale>)
+SYMBOL(time_t, std::, <ctime>)
+SYMBOL(time_t, None, <ctime>)
+SYMBOL(time_t, None, <time.h>)
+SYMBOL(timed_mutex, std::, <mutex>)
+SYMBOL(timespec, std::, <ctime>)
+SYMBOL(timespec, None, <ctime>)
+SYMBOL(timespec, None, <time.h>)
+SYMBOL(timespec_get, std::, <ctime>)
+SYMBOL(timespec_get, None, <ctime>)
+SYMBOL(timespec_get, None, <time.h>)
+SYMBOL(tm, std::, <ctime>)
+SYMBOL(tm, None, <ctime>)
+SYMBOL(tm, None, <time.h>)
+SYMBOL(tmpfile, std::, <cstdio>)
+SYMBOL(tmpfile, None, <cstdio>)
+SYMBOL(tmpfile, None, <stdio.h>)
+SYMBOL(tmpnam, std::, <cstdio>)
+SYMBOL(tmpnam, None, <cstdio>)
+SYMBOL(tmpnam, None, <stdio.h>)
+SYMBOL(to_address, std::, <memory>)
+SYMBOL(to_array, std::, <array>)
+SYMBOL(to_chars, std::, <charconv>)
+SYMBOL(to_chars_result, std::, <charconv>)
+SYMBOL(to_integer, std::, <cstddef>)
+SYMBOL(to_integer, None, <cstddef>)
+SYMBOL(to_integer, None, <stddef.h>)
+SYMBOL(to_string, std::, <string>)
+SYMBOL(to_underlying, std::, <utility>)
+SYMBOL(to_wstring, std::, <string>)
+SYMBOL(tolower, std::, <cctype>)
+SYMBOL(tolower, None, <cctype>)
+SYMBOL(tolower, None, <ctype.h>)
+SYMBOL(totally_ordered, std::, <concepts>)
+SYMBOL(totally_ordered_with, std::, <concepts>)
+SYMBOL(toupper, std::, <cctype>)
+SYMBOL(toupper, None, <cctype>)
+SYMBOL(toupper, None, <ctype.h>)
+SYMBOL(towctrans, std::, <cwctype>)
+SYMBOL(towctrans, None, <cwctype>)
+SYMBOL(towctrans, None, <wctype.h>)
+SYMBOL(towlower, std::, <cwctype>)
+SYMBOL(towlower, None, <cwctype>)
+SYMBOL(towlower, None, <wctype.h>)
+SYMBOL(towupper, std::, <cwctype>)
+SYMBOL(towupper, None, <cwctype>)
+SYMBOL(towupper, None, <wctype.h>)
+SYMBOL(transform, std::, <algorithm>)
+SYMBOL(transform_exclusive_scan, std::, <numeric>)
+SYMBOL(transform_inclusive_scan, std::, <numeric>)
+SYMBOL(transform_reduce, std::, <numeric>)
+SYMBOL(true_type, std::, <type_traits>)
+SYMBOL(trunc, std::, <cmath>)
+SYMBOL(trunc, None, <cmath>)
+SYMBOL(trunc, None, <math.h>)
+SYMBOL(truncf, std::, <cmath>)
+SYMBOL(truncf, None, <cmath>)
+SYMBOL(truncf, None, <math.h>)
+SYMBOL(truncl, std::, <cmath>)
+SYMBOL(truncl, None, <cmath>)
+SYMBOL(truncl, None, <math.h>)
+SYMBOL(try_lock, std::, <mutex>)
+SYMBOL(try_to_lock, std::, <mutex>)
+SYMBOL(try_to_lock_t, std::, <mutex>)
+SYMBOL(tuple, std::, <tuple>)
+SYMBOL(tuple_cat, std::, <tuple>)
+SYMBOL(tuple_element_t, std::, <tuple>)
+SYMBOL(tuple_size_v, std::, <tuple>)
+SYMBOL(type_identity, std::, <type_traits>)
+SYMBOL(type_identity_t, std::, <type_traits>)
+SYMBOL(type_index, std::, <typeindex>)
+SYMBOL(type_info, std::, <typeinfo>)
+SYMBOL(u16streampos, std::, <iosfwd>)
+SYMBOL(u16streampos, std::, <iosfwd>)
+SYMBOL(u16string, std::, <string>)
+SYMBOL(u16string_view, std::, <string_view>)
+SYMBOL(u32streampos, std::, <iosfwd>)
+SYMBOL(u32streampos, std::, <iosfwd>)
+SYMBOL(u32string, std::, <string>)
+SYMBOL(u32string_view, std::, <string_view>)
+SYMBOL(u8streampos, std::, <iosfwd>)
+SYMBOL(u8streampos, std::, <iosfwd>)
+SYMBOL(u8string, std::, <string>)
+SYMBOL(u8string_view, std::, <string_view>)
+SYMBOL(uint16_t, std::, <cstdint>)
+SYMBOL(uint16_t, None, <cstdint>)
+SYMBOL(uint16_t, None, <stdint.h>)
+SYMBOL(uint32_t, std::, <cstdint>)
+SYMBOL(uint32_t, None, <cstdint>)
+SYMBOL(uint32_t, None, <stdint.h>)
+SYMBOL(uint64_t, std::, <cstdint>)
+SYMBOL(uint64_t, None, <cstdint>)
+SYMBOL(uint64_t, None, <stdint.h>)
+SYMBOL(uint8_t, std::, <cstdint>)
+SYMBOL(uint8_t, None, <cstdint>)
+SYMBOL(uint8_t, None, <stdint.h>)
+SYMBOL(uint_fast16_t, std::, <cstdint>)
+SYMBOL(uint_fast16_t, None, <cstdint>)
+SYMBOL(uint_fast16_t, None, <stdint.h>)
+SYMBOL(uint_fast32_t, std::, <cstdint>)
+SYMBOL(uint_fast32_t, None, <cstdint>)
+SYMBOL(uint_fast32_t, None, <stdint.h>)
+SYMBOL(uint_fast64_t, std::, <cstdint>)
+SYMBOL(uint_fast64_t, None, <cstdint>)
+SYMBOL(uint_fast64_t, None, <stdint.h>)
+SYMBOL(uint_fast8_t, std::, <cstdint>)
+SYMBOL(uint_fast8_t, None, <cstdint>)
+SYMBOL(uint_fast8_t, None, <stdint.h>)
+SYMBOL(uint_least16_t, std::, <cstdint>)
+SYMBOL(uint_least16_t, None, <cstdint>)
+SYMBOL(uint_least16_t, None, <stdint.h>)
+SYMBOL(uint_least32_t, std::, <cstdint>)
+SYMBOL(uint_least32_t, None, <cstdint>)
+SYMBOL(uint_least32_t, None, <stdint.h>)
+SYMBOL(uint_least64_t, std::, <cstdint>)
+SYMBOL(uint_least64_t, None, <cstdint>)
+SYMBOL(uint_least64_t, None, <stdint.h>)
+SYMBOL(uint_least8_t, std::, <cstdint>)
+SYMBOL(uint_least8_t, None, <cstdint>)
+SYMBOL(uint_least8_t, None, <stdint.h>)
+SYMBOL(uintmax_t, std::, <cstdint>)
+SYMBOL(uintmax_t, None, <cstdint>)
+SYMBOL(uintmax_t, None, <stdint.h>)
+SYMBOL(uintptr_t, std::, <cstdint>)
+SYMBOL(uintptr_t, None, <cstdint>)
+SYMBOL(uintptr_t, None, <stdint.h>)
+SYMBOL(unary_function, std::, <functional>)
+SYMBOL(unary_negate, std::, <functional>)
+SYMBOL(uncaught_exception, std::, <exception>)
+SYMBOL(uncaught_exceptions, std::, <exception>)
+SYMBOL(undeclare_no_pointers, std::, <memory>)
+SYMBOL(undeclare_reachable, std::, <memory>)
+SYMBOL(underflow_error, std::, <stdexcept>)
+SYMBOL(underlying_type, std::, <type_traits>)
+SYMBOL(underlying_type_t, std::, <type_traits>)
+SYMBOL(unexpected_handler, std::, <exception>)
+SYMBOL(ungetc, std::, <cstdio>)
+SYMBOL(ungetc, None, <cstdio>)
+SYMBOL(ungetc, None, <stdio.h>)
+SYMBOL(ungetwc, std::, <cwchar>)
+SYMBOL(ungetwc, None, <cwchar>)
+SYMBOL(ungetwc, None, <wchar.h>)
+SYMBOL(uniform_int_distribution, std::, <random>)
+SYMBOL(uniform_random_bit_generator, std::, <random>)
+SYMBOL(uniform_real_distribution, std::, <random>)
+SYMBOL(uninitialized_construct_using_allocator, std::, <memory>)
+SYMBOL(uninitialized_copy, std::, <memory>)
+SYMBOL(uninitialized_copy_n, std::, <memory>)
+SYMBOL(uninitialized_default_construct, std::, <memory>)
+SYMBOL(uninitialized_default_construct_n, std::, <memory>)
+SYMBOL(uninitialized_fill, std::, <memory>)
+SYMBOL(uninitialized_fill_n, std::, <memory>)
+SYMBOL(uninitialized_move, std::, <memory>)
+SYMBOL(uninitialized_move_n, std::, <memory>)
+SYMBOL(uninitialized_value_construct, std::, <memory>)
+SYMBOL(uninitialized_value_construct_n, std::, <memory>)
+SYMBOL(unique, std::, <algorithm>)
+SYMBOL(unique_copy, std::, <algorithm>)
+SYMBOL(unique_lock, std::, <mutex>)
+SYMBOL(unique_ptr, std::, <memory>)
+SYMBOL(unitbuf, std::, <ios>)
+SYMBOL(unitbuf, std::, <iostream>)
+SYMBOL(unordered_map, std::, <unordered_map>)
+SYMBOL(unordered_multimap, std::, <unordered_map>)
+SYMBOL(unordered_multiset, std::, <unordered_set>)
+SYMBOL(unordered_set, std::, <unordered_set>)
+SYMBOL(unreachable, std::, <utility>)
+SYMBOL(unreachable_sentinel, std::, <iterator>)
+SYMBOL(unreachable_sentinel_t, std::, <iterator>)
+SYMBOL(unsigned_integral, std::, <concepts>)
+SYMBOL(upper_bound, std::, <algorithm>)
+SYMBOL(uppercase, std::, <ios>)
+SYMBOL(uppercase, std::, <iostream>)
+SYMBOL(use_facet, std::, <locale>)
+SYMBOL(uses_allocator, std::, <memory>)
+SYMBOL(uses_allocator_construction_args, std::, <memory>)
+SYMBOL(uses_allocator_v, std::, <memory>)
+SYMBOL(va_list, std::, <cstdarg>)
+SYMBOL(va_list, None, <cstdarg>)
+SYMBOL(va_list, None, <stdarg.h>)
+SYMBOL(valarray, std::, <valarray>)
+SYMBOL(variant, std::, <variant>)
+SYMBOL(variant_alternative, std::, <variant>)
+SYMBOL(variant_alternative_t, std::, <variant>)
+SYMBOL(variant_npos, std::, <variant>)
+SYMBOL(variant_size, std::, <variant>)
+SYMBOL(variant_size_v, std::, <variant>)
+SYMBOL(vector, std::, <vector>)
+SYMBOL(vformat, std::, <format>)
+SYMBOL(vformat_to, std::, <format>)
+SYMBOL(vfprintf, std::, <cstdio>)
+SYMBOL(vfprintf, None, <cstdio>)
+SYMBOL(vfprintf, None, <stdio.h>)
+SYMBOL(vfscanf, std::, <cstdio>)
+SYMBOL(vfscanf, None, <cstdio>)
+SYMBOL(vfscanf, None, <stdio.h>)
+SYMBOL(vfwprintf, std::, <cwchar>)
+SYMBOL(vfwprintf, None, <cwchar>)
+SYMBOL(vfwprintf, None, <wchar.h>)
+SYMBOL(vfwscanf, std::, <cwchar>)
+SYMBOL(vfwscanf, None, <cwchar>)
+SYMBOL(vfwscanf, None, <wchar.h>)
+SYMBOL(visit, std::, <variant>)
+SYMBOL(visit_format_arg, std::, <format>)
+SYMBOL(void_t, std::, <type_traits>)
+SYMBOL(vprintf, std::, <cstdio>)
+SYMBOL(vprintf, None, <cstdio>)
+SYMBOL(vprintf, None, <stdio.h>)
+SYMBOL(vscanf, std::, <cstdio>)
+SYMBOL(vscanf, None, <cstdio>)
+SYMBOL(vscanf, None, <stdio.h>)
+SYMBOL(vsnprintf, std::, <cstdio>)
+SYMBOL(vsnprintf, None, <cstdio>)
+SYMBOL(vsnprintf, None, <stdio.h>)
+SYMBOL(vsprintf, std::, <cstdio>)
+SYMBOL(vsprintf, None, <cstdio>)
+SYMBOL(vsprintf, None, <stdio.h>)
+SYMBOL(vsscanf, std::, <cstdio>)
+SYMBOL(vsscanf, None, <cstdio>)
+SYMBOL(vsscanf, None, <stdio.h>)
+SYMBOL(vswprintf, std::, <cwchar>)
+SYMBOL(vswprintf, None, <cwchar>)
+SYMBOL(vswprintf, None, <wchar.h>)
+SYMBOL(vswscanf, std::, <cwchar>)
+SYMBOL(vswscanf, None, <cwchar>)
+SYMBOL(vswscanf, None, <wchar.h>)
+SYMBOL(vwprintf, std::, <cwchar>)
+SYMBOL(vwprintf, None, <cwchar>)
+SYMBOL(vwprintf, None, <wchar.h>)
+SYMBOL(vwscanf, std::, <cwchar>)
+SYMBOL(vwscanf, None, <cwchar>)
+SYMBOL(vwscanf, None, <wchar.h>)
+SYMBOL(wbuffer_convert, std::, <locale>)
+SYMBOL(wcerr, std::, <iostream>)
+SYMBOL(wcin, std::, <iostream>)
+SYMBOL(wclog, std::, <iostream>)
+SYMBOL(wcmatch, std::, <regex>)
+SYMBOL(wcout, std::, <iostream>)
+SYMBOL(wcregex_iterator, std::, <regex>)
+SYMBOL(wcregex_token_iterator, std::, <regex>)
+SYMBOL(wcrtomb, std::, <cwchar>)
+SYMBOL(wcrtomb, None, <cwchar>)
+SYMBOL(wcrtomb, None, <wchar.h>)
+SYMBOL(wcscat, std::, <cwchar>)
+SYMBOL(wcscat, None, <cwchar>)
+SYMBOL(wcscat, None, <wchar.h>)
+SYMBOL(wcschr, std::, <cwchar>)
+SYMBOL(wcschr, None, <cwchar>)
+SYMBOL(wcschr, None, <wchar.h>)
+SYMBOL(wcscmp, std::, <cwchar>)
+SYMBOL(wcscmp, None, <cwchar>)
+SYMBOL(wcscmp, None, <wchar.h>)
+SYMBOL(wcscoll, std::, <cwchar>)
+SYMBOL(wcscoll, None, <cwchar>)
+SYMBOL(wcscoll, None, <wchar.h>)
+SYMBOL(wcscpy, std::, <cwchar>)
+SYMBOL(wcscpy, None, <cwchar>)
+SYMBOL(wcscpy, None, <wchar.h>)
+SYMBOL(wcscspn, std::, <cwchar>)
+SYMBOL(wcscspn, None, <cwchar>)
+SYMBOL(wcscspn, None, <wchar.h>)
+SYMBOL(wcsftime, std::, <cwchar>)
+SYMBOL(wcsftime, None, <cwchar>)
+SYMBOL(wcsftime, None, <wchar.h>)
+SYMBOL(wcslen, std::, <cwchar>)
+SYMBOL(wcslen, None, <cwchar>)
+SYMBOL(wcslen, None, <wchar.h>)
+SYMBOL(wcsncat, std::, <cwchar>)
+SYMBOL(wcsncat, None, <cwchar>)
+SYMBOL(wcsncat, None, <wchar.h>)
+SYMBOL(wcsncmp, std::, <cwchar>)
+SYMBOL(wcsncmp, None, <cwchar>)
+SYMBOL(wcsncmp, None, <wchar.h>)
+SYMBOL(wcsncpy, std::, <cwchar>)
+SYMBOL(wcsncpy, None, <cwchar>)
+SYMBOL(wcsncpy, None, <wchar.h>)
+SYMBOL(wcspbrk, std::, <cwchar>)
+SYMBOL(wcspbrk, None, <cwchar>)
+SYMBOL(wcspbrk, None, <wchar.h>)
+SYMBOL(wcsrchr, std::, <cwchar>)
+SYMBOL(wcsrchr, None, <cwchar>)
+SYMBOL(wcsrchr, None, <wchar.h>)
+SYMBOL(wcsrtombs, std::, <cwchar>)
+SYMBOL(wcsrtombs, None, <cwchar>)
+SYMBOL(wcsrtombs, None, <wchar.h>)
+SYMBOL(wcsspn, std::, <cwchar>)
+SYMBOL(wcsspn, None, <cwchar>)
+SYMBOL(wcsspn, None, <wchar.h>)
+SYMBOL(wcsstr, std::, <cwchar>)
+SYMBOL(wcsstr, None, <cwchar>)
+SYMBOL(wcsstr, None, <wchar.h>)
+SYMBOL(wcstod, std::, <cwchar>)
+SYMBOL(wcstod, None, <cwchar>)
+SYMBOL(wcstod, None, <wchar.h>)
+SYMBOL(wcstof, std::, <cwchar>)
+SYMBOL(wcstof, None, <cwchar>)
+SYMBOL(wcstof, None, <wchar.h>)
+SYMBOL(wcstoimax, std::, <cinttypes>)
+SYMBOL(wcstoimax, None, <cinttypes>)
+SYMBOL(wcstoimax, None, <inttypes.h>)
+SYMBOL(wcstok, std::, <cwchar>)
+SYMBOL(wcstok, None, <cwchar>)
+SYMBOL(wcstok, None, <wchar.h>)
+SYMBOL(wcstol, std::, <cwchar>)
+SYMBOL(wcstol, None, <cwchar>)
+SYMBOL(wcstol, None, <wchar.h>)
+SYMBOL(wcstold, std::, <cwchar>)
+SYMBOL(wcstold, None, <cwchar>)
+SYMBOL(wcstold, None, <wchar.h>)
+SYMBOL(wcstoll, std::, <cwchar>)
+SYMBOL(wcstoll, None, <cwchar>)
+SYMBOL(wcstoll, None, <wchar.h>)
+SYMBOL(wcstombs, std::, <cstdlib>)
+SYMBOL(wcstombs, None, <cstdlib>)
+SYMBOL(wcstombs, None, <stdlib.h>)
+SYMBOL(wcstoul, std::, <cwchar>)
+SYMBOL(wcstoul, None, <cwchar>)
+SYMBOL(wcstoul, None, <wchar.h>)
+SYMBOL(wcstoull, std::, <cwchar>)
+SYMBOL(wcstoull, None, <cwchar>)
+SYMBOL(wcstoull, None, <wchar.h>)
+SYMBOL(wcstoumax, std::, <cinttypes>)
+SYMBOL(wcstoumax, None, <cinttypes>)
+SYMBOL(wcstoumax, None, <inttypes.h>)
+SYMBOL(wcsub_match, std::, <regex>)
+SYMBOL(wcsxfrm, std::, <cwchar>)
+SYMBOL(wcsxfrm, None, <cwchar>)
+SYMBOL(wcsxfrm, None, <wchar.h>)
+SYMBOL(wctob, std::, <cwchar>)
+SYMBOL(wctob, None, <cwchar>)
+SYMBOL(wctob, None, <wchar.h>)
+SYMBOL(wctomb, std::, <cstdlib>)
+SYMBOL(wctomb, None, <cstdlib>)
+SYMBOL(wctomb, None, <stdlib.h>)
+SYMBOL(wctrans, std::, <cwctype>)
+SYMBOL(wctrans, None, <cwctype>)
+SYMBOL(wctrans, None, <wctype.h>)
+SYMBOL(wctrans_t, std::, <cwctype>)
+SYMBOL(wctrans_t, None, <cwctype>)
+SYMBOL(wctrans_t, None, <wctype.h>)
+SYMBOL(wctype, std::, <cwctype>)
+SYMBOL(wctype, None, <cwctype>)
+SYMBOL(wctype, None, <wctype.h>)
+SYMBOL(wctype_t, std::, <cwctype>)
+SYMBOL(wctype_t, None, <cwctype>)
+SYMBOL(wctype_t, None, <wctype.h>)
+SYMBOL(weak_order, std::, <compare>)
+SYMBOL(weak_ordering, std::, <compare>)
+SYMBOL(weak_ptr, std::, <memory>)
+SYMBOL(weakly_incrementable, std::, <iterator>)
+SYMBOL(weibull_distribution, std::, <random>)
+SYMBOL(wfilebuf, std::, <streambuf>)
+SYMBOL(wfilebuf, std::, <iostream>)
+SYMBOL(wfilebuf, std::, <iosfwd>)
+SYMBOL(wformat_args, std::, <format>)
+SYMBOL(wformat_context, std::, <format>)
+SYMBOL(wformat_parse_context, std::, <format>)
+SYMBOL(wfstream, std::, <fstream>)
+SYMBOL(wfstream, std::, <iosfwd>)
+SYMBOL(wifstream, std::, <fstream>)
+SYMBOL(wifstream, std::, <iosfwd>)
+SYMBOL(wios, std::, <ios>)
+SYMBOL(wios, std::, <iostream>)
+SYMBOL(wios, std::, <iosfwd>)
+SYMBOL(wiostream, std::, <istream>)
+SYMBOL(wiostream, std::, <iostream>)
+SYMBOL(wiostream, std::, <iosfwd>)
+SYMBOL(wispanstream, std::, <spanstream>)
+SYMBOL(wispanstream, std::, <iosfwd>)
+SYMBOL(wistream, std::, <istream>)
+SYMBOL(wistream, std::, <iostream>)
+SYMBOL(wistream, std::, <iosfwd>)
+SYMBOL(wistringstream, std::, <sstream>)
+SYMBOL(wistringstream, std::, <iosfwd>)
+SYMBOL(wmemchr, std::, <cwchar>)
+SYMBOL(wmemchr, None, <cwchar>)
+SYMBOL(wmemchr, None, <wchar.h>)
+SYMBOL(wmemcmp, std::, <cwchar>)
+SYMBOL(wmemcmp, None, <cwchar>)
+SYMBOL(wmemcmp, None, <wchar.h>)
+SYMBOL(wmemcpy, std::, <cwchar>)
+SYMBOL(wmemcpy, None, <cwchar>)
+SYMBOL(wmemcpy, None, <wchar.h>)
+SYMBOL(wmemmove, std::, <cwchar>)
+SYMBOL(wmemmove, None, <cwchar>)
+SYMBOL(wmemmove, None, <wchar.h>)
+SYMBOL(wmemset, std::, <cwchar>)
+SYMBOL(wmemset, None, <cwchar>)
+SYMBOL(wmemset, None, <wchar.h>)
+SYMBOL(wofstream, std::, <fstream>)
+SYMBOL(wofstream, std::, <iosfwd>)
+SYMBOL(wospanstream, std::, <spanstream>)
+SYMBOL(wospanstream, std::, <iosfwd>)
+SYMBOL(wostream, std::, <ostream>)
+SYMBOL(wostream, std::, <iostream>)
+SYMBOL(wostream, std::, <iosfwd>)
+SYMBOL(wostringstream, std::, <sstream>)
+SYMBOL(wostringstream, std::, <iosfwd>)
+SYMBOL(wosyncstream, std::, <syncstream>)
+SYMBOL(wosyncstream, std::, <iosfwd>)
+SYMBOL(wprintf, std::, <cwchar>)
+SYMBOL(wprintf, None, <cwchar>)
+SYMBOL(wprintf, None, <wchar.h>)
+SYMBOL(wregex, std::, <regex>)
+SYMBOL(ws, std::, <istream>)
+SYMBOL(ws, std::, <iostream>)
+SYMBOL(wscanf, std::, <cwchar>)
+SYMBOL(wscanf, None, <cwchar>)
+SYMBOL(wscanf, None, <wchar.h>)
+SYMBOL(wsmatch, std::, <regex>)
+SYMBOL(wspanbuf, std::, <spanstream>)
+SYMBOL(wspanbuf, std::, <iosfwd>)
+SYMBOL(wspanstream, std::, <spanstream>)
+SYMBOL(wspanstream, std::, <iosfwd>)
+SYMBOL(wsregex_iterator, std::, <regex>)
+SYMBOL(wsregex_token_iterator, std::, <regex>)
+SYMBOL(wssub_match, std::, <regex>)
+SYMBOL(wstreambuf, std::, <streambuf>)
+SYMBOL(wstreambuf, std::, <iostream>)
+SYMBOL(wstreambuf, std::, <iosfwd>)
+SYMBOL(wstreampos, std::, <iosfwd>)
+SYMBOL(wstreampos, std::, <iosfwd>)
+SYMBOL(wstring, std::, <string>)
+SYMBOL(wstring_convert, std::, <locale>)
+SYMBOL(wstring_view, std::, <string_view>)
+SYMBOL(wstringbuf, std::, <sstream>)
+SYMBOL(wstringbuf, std::, <iosfwd>)
+SYMBOL(wstringstream, std::, <sstream>)
+SYMBOL(wstringstream, std::, <iosfwd>)
+SYMBOL(wsyncbuf, std::, <syncstream>)
+SYMBOL(wsyncbuf, std::, <iosfwd>)
+SYMBOL(yocto, std::, <ratio>)
+SYMBOL(yotta, std::, <ratio>)
+SYMBOL(zepto, std::, <ratio>)
+SYMBOL(zetta, std::, <ratio>)
+SYMBOL(April, std::chrono::, <chrono>)
+SYMBOL(August, std::chrono::, <chrono>)
+SYMBOL(December, std::chrono::, <chrono>)
+SYMBOL(February, std::chrono::, <chrono>)
+SYMBOL(Friday, std::chrono::, <chrono>)
+SYMBOL(January, std::chrono::, <chrono>)
+SYMBOL(July, std::chrono::, <chrono>)
+SYMBOL(June, std::chrono::, <chrono>)
+SYMBOL(March, std::chrono::, <chrono>)
+SYMBOL(May, std::chrono::, <chrono>)
+SYMBOL(Monday, std::chrono::, <chrono>)
+SYMBOL(November, std::chrono::, <chrono>)
+SYMBOL(October, std::chrono::, <chrono>)
+SYMBOL(Saturday, std::chrono::, <chrono>)
+SYMBOL(September, std::chrono::, <chrono>)
+SYMBOL(Sunday, std::chrono::, <chrono>)
+SYMBOL(Thursday, std::chrono::, <chrono>)
+SYMBOL(Tuesday, std::chrono::, <chrono>)
+SYMBOL(Wednesday, std::chrono::, <chrono>)
+SYMBOL(abs, std::chrono::, <chrono>)
+SYMBOL(ambiguous_local_time, std::chrono::, <chrono>)
+SYMBOL(choose, std::chrono::, <chrono>)
+SYMBOL(clock_cast, std::chrono::, <chrono>)
+SYMBOL(clock_time_conversion, std::chrono::, <chrono>)
+SYMBOL(current_zone, std::chrono::, <chrono>)
+SYMBOL(day, std::chrono::, <chrono>)
+SYMBOL(duration, std::chrono::, <chrono>)
+SYMBOL(duration_cast, std::chrono::, <chrono>)
+SYMBOL(duration_values, std::chrono::, <chrono>)
+SYMBOL(file_clock, std::chrono::, <chrono>)
+SYMBOL(file_seconds, std::chrono::, <chrono>)
+SYMBOL(file_time, std::chrono::, <chrono>)
+SYMBOL(get_leap_second_info, std::chrono::, <chrono>)
+SYMBOL(gps_clock, std::chrono::, <chrono>)
+SYMBOL(gps_seconds, std::chrono::, <chrono>)
+SYMBOL(gps_time, std::chrono::, <chrono>)
+SYMBOL(hh_mm_ss, std::chrono::, <chrono>)
+SYMBOL(high_resolution_clock, std::chrono::, <chrono>)
+SYMBOL(hours, std::chrono::, <chrono>)
+SYMBOL(is_am, std::chrono::, <chrono>)
+SYMBOL(is_clock, std::chrono::, <chrono>)
+SYMBOL(is_clock_v, std::chrono::, <chrono>)
+SYMBOL(is_pm, std::chrono::, <chrono>)
+SYMBOL(last, std::chrono::, <chrono>)
+SYMBOL(last_spec, std::chrono::, <chrono>)
+SYMBOL(leap_second, std::chrono::, <chrono>)
+SYMBOL(leap_second_info, std::chrono::, <chrono>)
+SYMBOL(local_info, std::chrono::, <chrono>)
+SYMBOL(local_seconds, std::chrono::, <chrono>)
+SYMBOL(local_t, std::chrono::, <chrono>)
+SYMBOL(local_time, std::chrono::, <chrono>)
+SYMBOL(local_time_format, std::chrono::, <chrono>)
+SYMBOL(locate_zone, std::chrono::, <chrono>)
+SYMBOL(make12, std::chrono::, <chrono>)
+SYMBOL(make24, std::chrono::, <chrono>)
+SYMBOL(microseconds, std::chrono::, <chrono>)
+SYMBOL(milliseconds, std::chrono::, <chrono>)
+SYMBOL(minutes, std::chrono::, <chrono>)
+SYMBOL(month, std::chrono::, <chrono>)
+SYMBOL(month_day, std::chrono::, <chrono>)
+SYMBOL(month_day_last, std::chrono::, <chrono>)
+SYMBOL(month_weekday, std::chrono::, <chrono>)
+SYMBOL(month_weekday_last, std::chrono::, <chrono>)
+SYMBOL(nanoseconds, std::chrono::, <chrono>)
+SYMBOL(nonexistent_local_time, std::chrono::, <chrono>)
+SYMBOL(parse, std::chrono::, <chrono>)
+SYMBOL(seconds, std::chrono::, <chrono>)
+SYMBOL(steady_clock, std::chrono::, <chrono>)
+SYMBOL(sys_days, std::chrono::, <chrono>)
+SYMBOL(sys_info, std::chrono::, <chrono>)
+SYMBOL(sys_seconds, std::chrono::, <chrono>)
+SYMBOL(sys_time, std::chrono::, <chrono>)
+SYMBOL(system_clock, std::chrono::, <chrono>)
+SYMBOL(tai_clock, std::chrono::, <chrono>)
+SYMBOL(tai_seconds, std::chrono::, <chrono>)
+SYMBOL(tai_time, std::chrono::, <chrono>)
+SYMBOL(time_point, std::chrono::, <chrono>)
+SYMBOL(time_point_cast, std::chrono::, <chrono>)
+SYMBOL(time_zone, std::chrono::, <chrono>)
+SYMBOL(time_zone_link, std::chrono::, <chrono>)
+SYMBOL(treat_as_floating_point, std::chrono::, <chrono>)
+SYMBOL(treat_as_floating_point_v, std::chrono::, <chrono>)
+SYMBOL(tzdb, std::chrono::, <chrono>)
+SYMBOL(tzdb_list, std::chrono::, <chrono>)
+SYMBOL(utc_clock, std::chrono::, <chrono>)
+SYMBOL(utc_seconds, std::chrono::, <chrono>)
+SYMBOL(utc_time, std::chrono::, <chrono>)
+SYMBOL(weekday, std::chrono::, <chrono>)
+SYMBOL(weekday_indexed, std::chrono::, <chrono>)
+SYMBOL(weekday_last, std::chrono::, <chrono>)
+SYMBOL(year, std::chrono::, <chrono>)
+SYMBOL(year_month, std::chrono::, <chrono>)
+SYMBOL(year_month_day, std::chrono::, <chrono>)
+SYMBOL(year_month_day_last, std::chrono::, <chrono>)
+SYMBOL(year_month_weekday, std::chrono::, <chrono>)
+SYMBOL(year_month_weekday_last, std::chrono::, <chrono>)
+SYMBOL(zoned_seconds, std::chrono::, <chrono>)
+SYMBOL(zoned_time, std::chrono::, <chrono>)
+SYMBOL(zoned_traits, std::chrono::, <chrono>)
+SYMBOL(par, std::execution::, <execution>)
+SYMBOL(par_unseq, std::execution::, <execution>)
+SYMBOL(parallel_policy, std::execution::, <execution>)
+SYMBOL(parallel_unsequenced_policy, std::execution::, <execution>)
+SYMBOL(seq, std::execution::, <execution>)
+SYMBOL(sequenced_policy, std::execution::, <execution>)
+SYMBOL(unseq, std::execution::, <execution>)
+SYMBOL(unsequenced_policy, std::execution::, <execution>)
+SYMBOL(absolute, std::filesystem::, <filesystem>)
+SYMBOL(canonical, std::filesystem::, <filesystem>)
+SYMBOL(copy, std::filesystem::, <filesystem>)
+SYMBOL(copy_file, std::filesystem::, <filesystem>)
+SYMBOL(copy_options, std::filesystem::, <filesystem>)
+SYMBOL(copy_symlink, std::filesystem::, <filesystem>)
+SYMBOL(create_directories, std::filesystem::, <filesystem>)
+SYMBOL(create_directory, std::filesystem::, <filesystem>)
+SYMBOL(create_directory_symlink, std::filesystem::, <filesystem>)
+SYMBOL(create_hard_link, std::filesystem::, <filesystem>)
+SYMBOL(create_symlink, std::filesystem::, <filesystem>)
+SYMBOL(current_path, std::filesystem::, <filesystem>)
+SYMBOL(directory_entry, std::filesystem::, <filesystem>)
+SYMBOL(directory_iterator, std::filesystem::, <filesystem>)
+SYMBOL(directory_options, std::filesystem::, <filesystem>)
+SYMBOL(equivalent, std::filesystem::, <filesystem>)
+SYMBOL(exists, std::filesystem::, <filesystem>)
+SYMBOL(file_size, std::filesystem::, <filesystem>)
+SYMBOL(file_status, std::filesystem::, <filesystem>)
+SYMBOL(file_time_type, std::filesystem::, <filesystem>)
+SYMBOL(file_type, std::filesystem::, <filesystem>)
+SYMBOL(filesystem_error, std::filesystem::, <filesystem>)
+SYMBOL(hard_link_count, std::filesystem::, <filesystem>)
+SYMBOL(hash_value, std::filesystem::, <filesystem>)
+SYMBOL(is_block_file, std::filesystem::, <filesystem>)
+SYMBOL(is_character_file, std::filesystem::, <filesystem>)
+SYMBOL(is_directory, std::filesystem::, <filesystem>)
+SYMBOL(is_empty, std::filesystem::, <filesystem>)
+SYMBOL(is_fifo, std::filesystem::, <filesystem>)
+SYMBOL(is_other, std::filesystem::, <filesystem>)
+SYMBOL(is_regular_file, std::filesystem::, <filesystem>)
+SYMBOL(is_socket, std::filesystem::, <filesystem>)
+SYMBOL(is_symlink, std::filesystem::, <filesystem>)
+SYMBOL(last_write_time, std::filesystem::, <filesystem>)
+SYMBOL(path, std::filesystem::, <filesystem>)
+SYMBOL(perm_options, std::filesystem::, <filesystem>)
+SYMBOL(permissions, std::filesystem::, <filesystem>)
+SYMBOL(perms, std::filesystem::, <filesystem>)
+SYMBOL(proximate, std::filesystem::, <filesystem>)
+SYMBOL(read_symlink, std::filesystem::, <filesystem>)
+SYMBOL(recursive_directory_iterator, std::filesystem::, <filesystem>)
+SYMBOL(relative, std::filesystem::, <filesystem>)
+SYMBOL(remove, std::filesystem::, <filesystem>)
+SYMBOL(remove_all, std::filesystem::, <filesystem>)
+SYMBOL(rename, std::filesystem::, <filesystem>)
+SYMBOL(resize_file, std::filesystem::, <filesystem>)
+SYMBOL(space, std::filesystem::, <filesystem>)
+SYMBOL(space_info, std::filesystem::, <filesystem>)
+SYMBOL(status, std::filesystem::, <filesystem>)
+SYMBOL(status_known, std::filesystem::, <filesystem>)
+SYMBOL(symlink_status, std::filesystem::, <filesystem>)
+SYMBOL(temp_directory_path, std::filesystem::, <filesystem>)
+SYMBOL(u8path, std::filesystem::, <filesystem>)
+SYMBOL(weakly_canonical, std::filesystem::, <filesystem>)
+SYMBOL(e, std::numbers::, <numbers>)
+SYMBOL(e_v, std::numbers::, <numbers>)
+SYMBOL(egamma, std::numbers::, <numbers>)
+SYMBOL(egamma_v, std::numbers::, <numbers>)
+SYMBOL(inv_pi, std::numbers::, <numbers>)
+SYMBOL(inv_pi_v, std::numbers::, <numbers>)
+SYMBOL(inv_sqrt3, std::numbers::, <numbers>)
+SYMBOL(inv_sqrt3_v, std::numbers::, <numbers>)
+SYMBOL(inv_sqrtpi, std::numbers::, <numbers>)
+SYMBOL(inv_sqrtpi_v, std::numbers::, <numbers>)
+SYMBOL(ln10, std::numbers::, <numbers>)
+SYMBOL(ln10_v, std::numbers::, <numbers>)
+SYMBOL(ln2, std::numbers::, <numbers>)
+SYMBOL(ln2_v, std::numbers::, <numbers>)
+SYMBOL(log10e, std::numbers::, <numbers>)
+SYMBOL(log10e_v, std::numbers::, <numbers>)
+SYMBOL(log2e, std::numbers::, <numbers>)
+SYMBOL(log2e_v, std::numbers::, <numbers>)
+SYMBOL(phi, std::numbers::, <numbers>)
+SYMBOL(phi_v, std::numbers::, <numbers>)
+SYMBOL(pi, std::numbers::, <numbers>)
+SYMBOL(pi_v, std::numbers::, <numbers>)
+SYMBOL(sqrt2, std::numbers::, <numbers>)
+SYMBOL(sqrt2_v, std::numbers::, <numbers>)
+SYMBOL(sqrt3, std::numbers::, <numbers>)
+SYMBOL(sqrt3_v, std::numbers::, <numbers>)
+SYMBOL(basic_string, std::pmr::, <string>)
+SYMBOL(cmatch, std::pmr::, <regex>)
+SYMBOL(deque, std::pmr::, <deque>)
+SYMBOL(forward_list, std::pmr::, <forward_list>)
+SYMBOL(get_default_resource, std::pmr::, <memory_resource>)
+SYMBOL(list, std::pmr::, <list>)
+SYMBOL(map, std::pmr::, <map>)
+SYMBOL(match_results, std::pmr::, <regex>)
+SYMBOL(memory_resource, std::pmr::, <memory_resource>)
+SYMBOL(monotonic_buffer_resource, std::pmr::, <memory_resource>)
+SYMBOL(multimap, std::pmr::, <map>)
+SYMBOL(multiset, std::pmr::, <set>)
+SYMBOL(new_delete_resource, std::pmr::, <memory_resource>)
+SYMBOL(null_memory_resource, std::pmr::, <memory_resource>)
+SYMBOL(polymorphic_allocator, std::pmr::, <memory_resource>)
+SYMBOL(pool_options, std::pmr::, <memory_resource>)
+SYMBOL(set, std::pmr::, <set>)
+SYMBOL(set_default_resource, std::pmr::, <memory_resource>)
+SYMBOL(smatch, std::pmr::, <regex>)
+SYMBOL(stacktrace, std::pmr::, <stacktrace>)
+SYMBOL(string, std::pmr::, <string>)
+SYMBOL(synchronized_pool_resource, std::pmr::, <memory_resource>)
+SYMBOL(u16string, std::pmr::, <string>)
+SYMBOL(u32string, std::pmr::, <string>)
+SYMBOL(u8string, std::pmr::, <string>)
+SYMBOL(unordered_map, std::pmr::, <unordered_map>)
+SYMBOL(unordered_multimap, std::pmr::, <unordered_map>)
+SYMBOL(unordered_multiset, std::pmr::, <unordered_set>)
+SYMBOL(unordered_set, std::pmr::, <unordered_set>)
+SYMBOL(unsynchronized_pool_resource, std::pmr::, <memory_resource>)
+SYMBOL(vector, std::pmr::, <vector>)
+SYMBOL(wcmatch, std::pmr::, <regex>)
+SYMBOL(wsmatch, std::pmr::, <regex>)
+SYMBOL(wstring, std::pmr::, <string>)
+SYMBOL(adjacent_find, std::ranges::, <algorithm>)
+SYMBOL(advance, std::ranges::, <iterator>)
+SYMBOL(all_of, std::ranges::, <algorithm>)
+SYMBOL(any_of, std::ranges::, <algorithm>)
+SYMBOL(as_const_view, std::ranges::, <ranges>)
+SYMBOL(as_rvalue_view, std::ranges::, <ranges>)
+SYMBOL(basic_istream_view, std::ranges::, <ranges>)
+SYMBOL(begin, std::ranges::, <ranges>)
+SYMBOL(bidirectional_range, std::ranges::, <ranges>)
+SYMBOL(binary_transform_result, std::ranges::, <algorithm>)
+SYMBOL(borrowed_iterator_t, std::ranges::, <ranges>)
+SYMBOL(borrowed_range, std::ranges::, <ranges>)
+SYMBOL(borrowed_subrange_t, std::ranges::, <ranges>)
+SYMBOL(cbegin, std::ranges::, <ranges>)
+SYMBOL(cdata, std::ranges::, <ranges>)
+SYMBOL(cend, std::ranges::, <ranges>)
+SYMBOL(clamp, std::ranges::, <algorithm>)
+SYMBOL(common_range, std::ranges::, <ranges>)
+SYMBOL(common_view, std::ranges::, <ranges>)
+SYMBOL(const_iterator_t, std::ranges::, <ranges>)
+SYMBOL(constant_range, std::ranges::, <ranges>)
+SYMBOL(construct_at, std::ranges::, <memory>)
+SYMBOL(contains, std::ranges::, <algorithm>)
+SYMBOL(contains_subrange, std::ranges::, <algorithm>)
+SYMBOL(contiguous_range, std::ranges::, <ranges>)
+SYMBOL(copy, std::ranges::, <algorithm>)
+SYMBOL(copy_backward, std::ranges::, <algorithm>)
+SYMBOL(copy_backward_result, std::ranges::, <algorithm>)
+SYMBOL(copy_if, std::ranges::, <algorithm>)
+SYMBOL(copy_if_result, std::ranges::, <algorithm>)
+SYMBOL(copy_n, std::ranges::, <algorithm>)
+SYMBOL(copy_n_result, std::ranges::, <algorithm>)
+SYMBOL(copy_result, std::ranges::, <algorithm>)
+SYMBOL(count, std::ranges::, <algorithm>)
+SYMBOL(count_if, std::ranges::, <algorithm>)
+SYMBOL(crbegin, std::ranges::, <ranges>)
+SYMBOL(crend, std::ranges::, <ranges>)
+SYMBOL(dangling, std::ranges::, <ranges>)
+SYMBOL(data, std::ranges::, <ranges>)
+SYMBOL(destroy, std::ranges::, <memory>)
+SYMBOL(destroy_at, std::ranges::, <memory>)
+SYMBOL(destroy_n, std::ranges::, <memory>)
+SYMBOL(disable_sized_range, std::ranges::, <ranges>)
+SYMBOL(distance, std::ranges::, <iterator>)
+SYMBOL(drop_view, std::ranges::, <ranges>)
+SYMBOL(drop_while_view, std::ranges::, <ranges>)
+SYMBOL(elements_view, std::ranges::, <ranges>)
+SYMBOL(empty, std::ranges::, <ranges>)
+SYMBOL(empty_view, std::ranges::, <ranges>)
+SYMBOL(enable_borrowed_range, std::ranges::, <ranges>)
+SYMBOL(enable_view, std::ranges::, <ranges>)
+SYMBOL(end, std::ranges::, <ranges>)
+SYMBOL(ends_with, std::ranges::, <algorithm>)
+SYMBOL(equal, std::ranges::, <algorithm>)
+SYMBOL(equal_to, std::ranges::, <functional>)
+SYMBOL(fill, std::ranges::, <algorithm>)
+SYMBOL(fill_n, std::ranges::, <algorithm>)
+SYMBOL(filter_view, std::ranges::, <ranges>)
+SYMBOL(find, std::ranges::, <algorithm>)
+SYMBOL(find_end, std::ranges::, <algorithm>)
+SYMBOL(find_first_of, std::ranges::, <algorithm>)
+SYMBOL(find_if, std::ranges::, <algorithm>)
+SYMBOL(find_if_not, std::ranges::, <algorithm>)
+SYMBOL(find_last, std::ranges::, <algorithm>)
+SYMBOL(find_last_if, std::ranges::, <algorithm>)
+SYMBOL(find_last_if_not, std::ranges::, <algorithm>)
+SYMBOL(for_each, std::ranges::, <algorithm>)
+SYMBOL(for_each_n, std::ranges::, <algorithm>)
+SYMBOL(for_each_n_result, std::ranges::, <algorithm>)
+SYMBOL(for_each_result, std::ranges::, <algorithm>)
+SYMBOL(forward_range, std::ranges::, <ranges>)
+SYMBOL(generate, std::ranges::, <algorithm>)
+SYMBOL(generate_n, std::ranges::, <algorithm>)
+SYMBOL(greater, std::ranges::, <functional>)
+SYMBOL(greater_equal, std::ranges::, <functional>)
+SYMBOL(in_found_result, std::ranges::, <algorithm>)
+SYMBOL(in_fun_result, std::ranges::, <algorithm>)
+SYMBOL(in_in_out_result, std::ranges::, <algorithm>)
+SYMBOL(in_in_result, std::ranges::, <algorithm>)
+SYMBOL(in_out_out_result, std::ranges::, <algorithm>)
+SYMBOL(in_out_result, std::ranges::, <algorithm>)
+SYMBOL(in_value_result, std::ranges::, <algorithm>)
+SYMBOL(includes, std::ranges::, <algorithm>)
+SYMBOL(inplace_merge, std::ranges::, <algorithm>)
+SYMBOL(input_range, std::ranges::, <ranges>)
+SYMBOL(iota, std::ranges::, <numeric>)
+SYMBOL(iota_result, std::ranges::, <numeric>)
+SYMBOL(iota_view, std::ranges::, <ranges>)
+SYMBOL(is_heap, std::ranges::, <algorithm>)
+SYMBOL(is_heap_until, std::ranges::, <algorithm>)
+SYMBOL(is_partitioned, std::ranges::, <algorithm>)
+SYMBOL(is_permutation, std::ranges::, <algorithm>)
+SYMBOL(is_sorted, std::ranges::, <algorithm>)
+SYMBOL(is_sorted_until, std::ranges::, <algorithm>)
+SYMBOL(istream_view, std::ranges::, <ranges>)
+SYMBOL(iter_move, std::ranges::, <iterator>)
+SYMBOL(iter_swap, std::ranges::, <iterator>)
+SYMBOL(iterator_t, std::ranges::, <ranges>)
+SYMBOL(join_view, std::ranges::, <ranges>)
+SYMBOL(join_with_view, std::ranges::, <ranges>)
+SYMBOL(keys_view, std::ranges::, <ranges>)
+SYMBOL(lazy_split_view, std::ranges::, <ranges>)
+SYMBOL(less, std::ranges::, <functional>)
+SYMBOL(less_equal, std::ranges::, <functional>)
+SYMBOL(lexicographical_compare, std::ranges::, <algorithm>)
+SYMBOL(make_heap, std::ranges::, <algorithm>)
+SYMBOL(max, std::ranges::, <algorithm>)
+SYMBOL(max_element, std::ranges::, <algorithm>)
+SYMBOL(merge, std::ranges::, <algorithm>)
+SYMBOL(merge_result, std::ranges::, <algorithm>)
+SYMBOL(min, std::ranges::, <algorithm>)
+SYMBOL(min_element, std::ranges::, <algorithm>)
+SYMBOL(min_max_result, std::ranges::, <algorithm>)
+SYMBOL(minmax, std::ranges::, <algorithm>)
+SYMBOL(minmax_element, std::ranges::, <algorithm>)
+SYMBOL(minmax_element_result, std::ranges::, <algorithm>)
+SYMBOL(minmax_result, std::ranges::, <algorithm>)
+SYMBOL(mismatch, std::ranges::, <algorithm>)
+SYMBOL(mismatch_result, std::ranges::, <algorithm>)
+SYMBOL(move, std::ranges::, <algorithm>)
+SYMBOL(move_backward, std::ranges::, <algorithm>)
+SYMBOL(move_backward_result, std::ranges::, <algorithm>)
+SYMBOL(move_result, std::ranges::, <algorithm>)
+SYMBOL(next, std::ranges::, <iterator>)
+SYMBOL(next_permutation, std::ranges::, <algorithm>)
+SYMBOL(next_permutation_result, std::ranges::, <algorithm>)
+SYMBOL(none_of, std::ranges::, <algorithm>)
+SYMBOL(not_equal_to, std::ranges::, <functional>)
+SYMBOL(nth_element, std::ranges::, <algorithm>)
+SYMBOL(out_value_result, std::ranges::, <algorithm>)
+SYMBOL(output_range, std::ranges::, <ranges>)
+SYMBOL(owning_view, std::ranges::, <ranges>)
+SYMBOL(partial_sort, std::ranges::, <algorithm>)
+SYMBOL(partial_sort_copy, std::ranges::, <algorithm>)
+SYMBOL(partial_sort_copy_result, std::ranges::, <algorithm>)
+SYMBOL(partition, std::ranges::, <algorithm>)
+SYMBOL(partition_copy, std::ranges::, <algorithm>)
+SYMBOL(partition_copy_result, std::ranges::, <algorithm>)
+SYMBOL(partition_point, std::ranges::, <algorithm>)
+SYMBOL(pop_heap, std::ranges::, <algorithm>)
+SYMBOL(prev, std::ranges::, <iterator>)
+SYMBOL(prev_permutation, std::ranges::, <algorithm>)
+SYMBOL(prev_permutation_result, std::ranges::, <algorithm>)
+SYMBOL(push_heap, std::ranges::, <algorithm>)
+SYMBOL(random_access_range, std::ranges::, <ranges>)
+SYMBOL(range, std::ranges::, <ranges>)
+SYMBOL(range_const_reference_t, std::ranges::, <ranges>)
+SYMBOL(range_difference_t, std::ranges::, <ranges>)
+SYMBOL(range_reference_t, std::ranges::, <ranges>)
+SYMBOL(range_rvalue_reference_t, std::ranges::, <ranges>)
+SYMBOL(range_size_t, std::ranges::, <ranges>)
+SYMBOL(range_value_t, std::ranges::, <ranges>)
+SYMBOL(rbegin, std::ranges::, <ranges>)
+SYMBOL(ref_view, std::ranges::, <ranges>)
+SYMBOL(remove, std::ranges::, <algorithm>)
+SYMBOL(remove_copy, std::ranges::, <algorithm>)
+SYMBOL(remove_copy_if, std::ranges::, <algorithm>)
+SYMBOL(remove_copy_if_result, std::ranges::, <algorithm>)
+SYMBOL(remove_copy_result, std::ranges::, <algorithm>)
+SYMBOL(remove_if, std::ranges::, <algorithm>)
+SYMBOL(rend, std::ranges::, <ranges>)
+SYMBOL(replace, std::ranges::, <algorithm>)
+SYMBOL(replace_copy, std::ranges::, <algorithm>)
+SYMBOL(replace_copy_if, std::ranges::, <algorithm>)
+SYMBOL(replace_copy_if_result, std::ranges::, <algorithm>)
+SYMBOL(replace_copy_result, std::ranges::, <algorithm>)
+SYMBOL(replace_if, std::ranges::, <algorithm>)
+SYMBOL(reverse, std::ranges::, <algorithm>)
+SYMBOL(reverse_copy, std::ranges::, <algorithm>)
+SYMBOL(reverse_copy_result, std::ranges::, <algorithm>)
+SYMBOL(reverse_view, std::ranges::, <ranges>)
+SYMBOL(rotate, std::ranges::, <algorithm>)
+SYMBOL(rotate_copy, std::ranges::, <algorithm>)
+SYMBOL(rotate_copy_result, std::ranges::, <algorithm>)
+SYMBOL(sample, std::ranges::, <algorithm>)
+SYMBOL(search, std::ranges::, <algorithm>)
+SYMBOL(search_n, std::ranges::, <algorithm>)
+SYMBOL(sentinel_t, std::ranges::, <ranges>)
+SYMBOL(set_difference, std::ranges::, <algorithm>)
+SYMBOL(set_difference_result, std::ranges::, <algorithm>)
+SYMBOL(set_intersection, std::ranges::, <algorithm>)
+SYMBOL(set_intersection_result, std::ranges::, <algorithm>)
+SYMBOL(set_symmetric_difference, std::ranges::, <algorithm>)
+SYMBOL(set_symmetric_difference_result, std::ranges::, <algorithm>)
+SYMBOL(set_union, std::ranges::, <algorithm>)
+SYMBOL(set_union_result, std::ranges::, <algorithm>)
+SYMBOL(shift_left, std::ranges::, <algorithm>)
+SYMBOL(shift_right, std::ranges::, <algorithm>)
+SYMBOL(shuffle, std::ranges::, <algorithm>)
+SYMBOL(single_view, std::ranges::, <ranges>)
+SYMBOL(size, std::ranges::, <ranges>)
+SYMBOL(sized_range, std::ranges::, <ranges>)
+SYMBOL(sort, std::ranges::, <algorithm>)
+SYMBOL(sort_heap, std::ranges::, <algorithm>)
+SYMBOL(split_view, std::ranges::, <ranges>)
+SYMBOL(ssize, std::ranges::, <ranges>)
+SYMBOL(stable_partition, std::ranges::, <algorithm>)
+SYMBOL(stable_sort, std::ranges::, <algorithm>)
+SYMBOL(starts_with, std::ranges::, <algorithm>)
+SYMBOL(subrange, std::ranges::, <ranges>)
+SYMBOL(subrange_kind, std::ranges::, <ranges>)
+SYMBOL(swap, std::ranges::, <concepts>)
+SYMBOL(swap_ranges, std::ranges::, <algorithm>)
+SYMBOL(swap_ranges_result, std::ranges::, <algorithm>)
+SYMBOL(take_view, std::ranges::, <ranges>)
+SYMBOL(take_while_view, std::ranges::, <ranges>)
+SYMBOL(to, std::ranges::, <ranges>)
+SYMBOL(transform, std::ranges::, <algorithm>)
+SYMBOL(transform_view, std::ranges::, <ranges>)
+SYMBOL(unary_transform_result, std::ranges::, <algorithm>)
+SYMBOL(uninitialized_copy, std::ranges::, <memory>)
+SYMBOL(uninitialized_copy_n, std::ranges::, <memory>)
+SYMBOL(uninitialized_copy_n_result, std::ranges::, <memory>)
+SYMBOL(uninitialized_copy_result, std::ranges::, <memory>)
+SYMBOL(uninitialized_default_construct, std::ranges::, <memory>)
+SYMBOL(uninitialized_default_construct_n, std::ranges::, <memory>)
+SYMBOL(uninitialized_fill, std::ranges::, <memory>)
+SYMBOL(uninitialized_fill_n, std::ranges::, <memory>)
+SYMBOL(uninitialized_move, std::ranges::, <memory>)
+SYMBOL(uninitialized_move_n, std::ranges::, <memory>)
+SYMBOL(uninitialized_move_n_result, std::ranges::, <memory>)
+SYMBOL(uninitialized_move_result, std::ranges::, <memory>)
+SYMBOL(uninitialized_value_construct, std::ranges::, <memory>)
+SYMBOL(uninitialized_value_construct_n, std::ranges::, <memory>)
+SYMBOL(unique, std::ranges::, <algorithm>)
+SYMBOL(unique_copy, std::ranges::, <algorithm>)
+SYMBOL(unique_copy_result, std::ranges::, <algorithm>)
+SYMBOL(values_view, std::ranges::, <ranges>)
+SYMBOL(view, std::ranges::, <ranges>)
+SYMBOL(view_base, std::ranges::, <ranges>)
+SYMBOL(view_interface, std::ranges::, <ranges>)
+SYMBOL(viewable_range, std::ranges::, <ranges>)
+SYMBOL(wistream_view, std::ranges::, <ranges>)
+SYMBOL(zip_transform_view, std::ranges::, <ranges>)
+SYMBOL(zip_view, std::ranges::, <ranges>)
+SYMBOL(all, std::ranges::views::, <ranges>)
+SYMBOL(all_t, std::ranges::views::, <ranges>)
+SYMBOL(as_const, std::ranges::views::, <ranges>)
+SYMBOL(as_rvalue, std::ranges::views::, <ranges>)
+SYMBOL(common, std::ranges::views::, <ranges>)
+SYMBOL(counted, std::ranges::views::, <ranges>)
+SYMBOL(drop, std::ranges::views::, <ranges>)
+SYMBOL(drop_while, std::ranges::views::, <ranges>)
+SYMBOL(elements, std::ranges::views::, <ranges>)
+SYMBOL(empty, std::ranges::views::, <ranges>)
+SYMBOL(filter, std::ranges::views::, <ranges>)
+SYMBOL(iota, std::ranges::views::, <ranges>)
+SYMBOL(istream, std::ranges::views::, <ranges>)
+SYMBOL(istream, std::ranges::views::, <iosfwd>)
+SYMBOL(join, std::ranges::views::, <ranges>)
+SYMBOL(join_with, std::ranges::views::, <ranges>)
+SYMBOL(keys, std::ranges::views::, <ranges>)
+SYMBOL(lazy_split, std::ranges::views::, <ranges>)
+SYMBOL(reverse, std::ranges::views::, <ranges>)
+SYMBOL(single, std::ranges::views::, <ranges>)
+SYMBOL(split, std::ranges::views::, <ranges>)
+SYMBOL(take, std::ranges::views::, <ranges>)
+SYMBOL(take_while, std::ranges::views::, <ranges>)
+SYMBOL(transform, std::ranges::views::, <ranges>)
+SYMBOL(values, std::ranges::views::, <ranges>)
+SYMBOL(zip, std::ranges::views::, <ranges>)
+SYMBOL(zip_transform, std::ranges::views::, <ranges>)
+SYMBOL(ECMAScript, std::regex_constants::, <regex>)
+SYMBOL(awk, std::regex_constants::, <regex>)
+SYMBOL(basic, std::regex_constants::, <regex>)
+SYMBOL(collate, std::regex_constants::, <regex>)
+SYMBOL(egrep, std::regex_constants::, <regex>)
+SYMBOL(error_backref, std::regex_constants::, <regex>)
+SYMBOL(error_badbrace, std::regex_constants::, <regex>)
+SYMBOL(error_badrepeat, std::regex_constants::, <regex>)
+SYMBOL(error_brace, std::regex_constants::, <regex>)
+SYMBOL(error_brack, std::regex_constants::, <regex>)
+SYMBOL(error_collate, std::regex_constants::, <regex>)
+SYMBOL(error_complexity, std::regex_constants::, <regex>)
+SYMBOL(error_ctype, std::regex_constants::, <regex>)
+SYMBOL(error_escape, std::regex_constants::, <regex>)
+SYMBOL(error_paren, std::regex_constants::, <regex>)
+SYMBOL(error_range, std::regex_constants::, <regex>)
+SYMBOL(error_space, std::regex_constants::, <regex>)
+SYMBOL(error_stack, std::regex_constants::, <regex>)
+SYMBOL(error_type, std::regex_constants::, <regex>)
+SYMBOL(extended, std::regex_constants::, <regex>)
+SYMBOL(format_default, std::regex_constants::, <regex>)
+SYMBOL(format_first_only, std::regex_constants::, <regex>)
+SYMBOL(format_no_copy, std::regex_constants::, <regex>)
+SYMBOL(format_sed, std::regex_constants::, <regex>)
+SYMBOL(grep, std::regex_constants::, <regex>)
+SYMBOL(icase, std::regex_constants::, <regex>)
+SYMBOL(match_any, std::regex_constants::, <regex>)
+SYMBOL(match_continuous, std::regex_constants::, <regex>)
+SYMBOL(match_default, std::regex_constants::, <regex>)
+SYMBOL(match_flag_type, std::regex_constants::, <regex>)
+SYMBOL(match_not_bol, std::regex_constants::, <regex>)
+SYMBOL(match_not_bow, std::regex_constants::, <regex>)
+SYMBOL(match_not_eol, std::regex_constants::, <regex>)
+SYMBOL(match_not_eow, std::regex_constants::, <regex>)
+SYMBOL(match_not_null, std::regex_constants::, <regex>)
+SYMBOL(match_prev_avail, std::regex_constants::, <regex>)
+SYMBOL(multiline, std::regex_constants::, <regex>)
+SYMBOL(nosubs, std::regex_constants::, <regex>)
+SYMBOL(optimize, std::regex_constants::, <regex>)
+SYMBOL(syntax_option_type, std::regex_constants::, <regex>)
+SYMBOL(get_id, std::this_thread::, <thread>)
+SYMBOL(sleep_for, std::this_thread::, <thread>)
+SYMBOL(sleep_until, std::this_thread::, <thread>)
+SYMBOL(yield, std::this_thread::, <thread>)
+SYMBOL(all, std::views::, <ranges>)
+SYMBOL(all_t, std::views::, <ranges>)
+SYMBOL(as_const, std::views::, <ranges>)
+SYMBOL(as_rvalue, std::views::, <ranges>)
+SYMBOL(common, std::views::, <ranges>)
+SYMBOL(counted, std::views::, <ranges>)
+SYMBOL(drop, std::views::, <ranges>)
+SYMBOL(drop_while, std::views::, <ranges>)
+SYMBOL(elements, std::views::, <ranges>)
+SYMBOL(empty, std::views::, <ranges>)
+SYMBOL(filter, std::views::, <ranges>)
+SYMBOL(iota, std::views::, <ranges>)
+SYMBOL(istream, std::views::, <ranges>)
+SYMBOL(istream, std::views::, <iosfwd>)
+SYMBOL(join, std::views::, <ranges>)
+SYMBOL(join_with, std::views::, <ranges>)
+SYMBOL(keys, std::views::, <ranges>)
+SYMBOL(lazy_split, std::views::, <ranges>)
+SYMBOL(reverse, std::views::, <ranges>)
+SYMBOL(single, std::views::, <ranges>)
+SYMBOL(split, std::views::, <ranges>)
+SYMBOL(take, std::views::, <ranges>)
+SYMBOL(take_while, std::views::, <ranges>)
+SYMBOL(transform, std::views::, <ranges>)
+SYMBOL(values, std::views::, <ranges>)
+SYMBOL(zip, std::views::, <ranges>)
+SYMBOL(zip_transform, std::views::, <ranges>)
diff --git a/contrib/llvm-project/clang/lib/Tooling/Inclusions/Stdlib/StdTsSymbolMap.inc b/contrib/llvm-project/clang/lib/Tooling/Inclusions/Stdlib/StdTsSymbolMap.inc
new file mode 100644
index 000000000000..2733cb3f2ec4
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Tooling/Inclusions/Stdlib/StdTsSymbolMap.inc
@@ -0,0 +1,52 @@
+// These are derived from N4100[fs.filesystem.synopsis], final draft for
+// experimental filesystem.
+SYMBOL(absolute, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(canonical, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(copy, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(copy_file, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(copy_options, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(copy_symlink, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(create_directories, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(create_directory, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(create_directory_symlink, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(create_hard_link, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(create_symlink, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(current_path, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(directory_entry, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(directory_iterator, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(directory_options, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(equivalent, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(exists, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(file_size, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(file_status, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(file_time_type, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(file_type, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(filesystem_error, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(hard_link_count, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(is_block_file, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(is_character_file, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(is_directory, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(is_empty, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(is_fifo, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(is_other, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(is_regular_file, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(is_socket, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(is_symlink, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(last_write_time, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(path, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(permissions, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(perms, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(read_symlink, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(recursive_directory_iterator, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(remove, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(remove_all, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(rename, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(resize_file, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(space, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(space_info, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(status, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(status_known, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(symlink_status, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(system_complete, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(temp_directory_path, std::experimental::filesystem::, <experimental/filesystem>)
+SYMBOL(u8path, std::experimental::filesystem::, <experimental/filesystem>)
diff --git a/contrib/llvm-project/clang/lib/Tooling/InterpolatingCompilationDatabase.cpp b/contrib/llvm-project/clang/lib/Tooling/InterpolatingCompilationDatabase.cpp
index c1e25c41f719..995019ca5a4d 100644
--- a/contrib/llvm-project/clang/lib/Tooling/InterpolatingCompilationDatabase.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/InterpolatingCompilationDatabase.cpp
@@ -49,9 +49,7 @@
#include "clang/Tooling/CompilationDatabase.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/StringExtras.h"
-#include "llvm/ADT/StringSwitch.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Option/OptTable.h"
#include "llvm/Support/Debug.h"
@@ -59,6 +57,7 @@
#include "llvm/Support/StringSaver.h"
#include "llvm/Support/raw_ostream.h"
#include <memory>
+#include <optional>
namespace clang {
namespace tooling {
@@ -129,7 +128,7 @@ struct TransferableCommand {
// Flags that should not apply to all files are stripped from CommandLine.
CompileCommand Cmd;
// Language detected from -x or the filename. Never TY_INVALID.
- Optional<types::ID> Type;
+ std::optional<types::ID> Type;
// Standard specified by -std.
LangStandard::Kind Std = LangStandard::lang_unspecified;
// Whether the command line is for the cl-compatible driver.
@@ -148,7 +147,7 @@ struct TransferableCommand {
TmpArgv.push_back(S.c_str());
ClangCLMode = !TmpArgv.empty() &&
driver::IsClangCL(driver::getDriverMode(
- TmpArgv.front(), llvm::makeArrayRef(TmpArgv).slice(1)));
+ TmpArgv.front(), llvm::ArrayRef(TmpArgv).slice(1)));
ArgList = {TmpArgv.begin(), TmpArgv.end()};
}
@@ -165,8 +164,7 @@ struct TransferableCommand {
const unsigned OldPos = Pos;
std::unique_ptr<llvm::opt::Arg> Arg(OptTable.ParseOneArg(
ArgList, Pos,
- /* Include */ ClangCLMode ? CoreOption | CLOption : 0,
- /* Exclude */ ClangCLMode ? 0 : CLOption));
+ llvm::opt::Visibility(ClangCLMode ? CLOption : ClangOption)));
if (!Arg)
continue;
@@ -208,7 +206,7 @@ struct TransferableCommand {
Type = foldType(*Type);
// The contract is to store None instead of TY_INVALID.
if (Type == types::TY_INVALID)
- Type = llvm::None;
+ Type = std::nullopt;
}
// Produce a CompileCommand for \p filename, based on this one.
@@ -243,8 +241,7 @@ struct TransferableCommand {
llvm::Twine(ClangCLMode ? "/std:" : "-std=") +
LangStandard::getLangStandardForKind(Std).getName()).str());
}
- if (Filename.startswith("-") || (ClangCLMode && Filename.startswith("/")))
- Result.CommandLine.push_back("--");
+ Result.CommandLine.push_back("--");
Result.CommandLine.push_back(std::string(Filename));
return Result;
}
@@ -281,7 +278,7 @@ private:
}
// Try to interpret the argument as a type specifier, e.g. '-x'.
- Optional<types::ID> tryParseTypeArg(const llvm::opt::Arg &Arg) {
+ std::optional<types::ID> tryParseTypeArg(const llvm::opt::Arg &Arg) {
const llvm::opt::Option &Opt = Arg.getOption();
using namespace driver::options;
if (ClangCLMode) {
@@ -293,15 +290,15 @@ private:
if (Opt.matches(driver::options::OPT_x))
return types::lookupTypeForTypeSpecifier(Arg.getValue());
}
- return None;
+ return std::nullopt;
}
// Try to interpret the argument as '-std='.
- Optional<LangStandard::Kind> tryParseStdArg(const llvm::opt::Arg &Arg) {
+ std::optional<LangStandard::Kind> tryParseStdArg(const llvm::opt::Arg &Arg) {
using namespace driver::options;
if (Arg.getOption().matches(ClangCLMode ? OPT__SLASH_std : OPT_std_EQ))
return LangStandard::getLangKind(Arg.getValue());
- return None;
+ return std::nullopt;
}
};
@@ -329,7 +326,7 @@ public:
StringRef Path = Strings.save(StringRef(OriginalPaths[I]).lower());
Paths.emplace_back(Path, I);
- Types.push_back(foldType(guessType(Path)));
+ Types.push_back(foldType(guessType(OriginalPaths[I])));
Stems.emplace_back(sys::path::stem(Path), I);
auto Dir = ++sys::path::rbegin(Path), DirEnd = sys::path::rend(Path);
for (int J = 0; J < DirectorySegmentsIndexed && Dir != DirEnd; ++J, ++Dir)
diff --git a/contrib/llvm-project/clang/lib/Tooling/JSONCompilationDatabase.cpp b/contrib/llvm-project/clang/lib/Tooling/JSONCompilationDatabase.cpp
index 97ba7e411fbb..a77686996879 100644
--- a/contrib/llvm-project/clang/lib/Tooling/JSONCompilationDatabase.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/JSONCompilationDatabase.cpp
@@ -15,25 +15,25 @@
#include "clang/Tooling/CompilationDatabase.h"
#include "clang/Tooling/CompilationDatabasePluginRegistry.h"
#include "clang/Tooling/Tooling.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorOr.h"
-#include "llvm/Support/Host.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/StringSaver.h"
#include "llvm/Support/VirtualFileSystem.h"
#include "llvm/Support/YAMLParser.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/Host.h"
+#include "llvm/TargetParser/Triple.h"
#include <cassert>
#include <memory>
+#include <optional>
#include <string>
#include <system_error>
#include <tuple>
@@ -135,15 +135,12 @@ class CommandLineArgumentParser {
std::vector<std::string> unescapeCommandLine(JSONCommandLineSyntax Syntax,
StringRef EscapedCommandLine) {
if (Syntax == JSONCommandLineSyntax::AutoDetect) {
+#ifdef _WIN32
+ // Assume Windows command line parsing on Win32
+ Syntax = JSONCommandLineSyntax::Windows;
+#else
Syntax = JSONCommandLineSyntax::Gnu;
- llvm::Triple Triple(llvm::sys::getProcessTriple());
- if (Triple.getOS() == llvm::Triple::OSType::Win32) {
- // Assume Windows command line parsing on Win32 unless the triple
- // explicitly tells us otherwise.
- if (!Triple.hasEnvironment() ||
- Triple.getEnvironment() == llvm::Triple::EnvironmentType::MSVC)
- Syntax = JSONCommandLineSyntax::Windows;
- }
+#endif
}
if (Syntax == JSONCommandLineSyntax::Windows) {
@@ -352,7 +349,7 @@ bool JSONCompilationDatabase::parse(std::string &ErrorMessage) {
return false;
}
llvm::yaml::ScalarNode *Directory = nullptr;
- llvm::Optional<std::vector<llvm::yaml::ScalarNode *>> Command;
+ std::optional<std::vector<llvm::yaml::ScalarNode *>> Command;
llvm::yaml::ScalarNode *File = nullptr;
llvm::yaml::ScalarNode *Output = nullptr;
for (auto& NextKeyValue : *Object) {
@@ -422,14 +419,13 @@ bool JSONCompilationDatabase::parse(std::string &ErrorMessage) {
SmallString<128> NativeFilePath;
if (llvm::sys::path::is_relative(FileName)) {
SmallString<8> DirectoryStorage;
- SmallString<128> AbsolutePath(
- Directory->getValue(DirectoryStorage));
+ SmallString<128> AbsolutePath(Directory->getValue(DirectoryStorage));
llvm::sys::path::append(AbsolutePath, FileName);
- llvm::sys::path::remove_dots(AbsolutePath, /*remove_dot_dot=*/ true);
llvm::sys::path::native(AbsolutePath, NativeFilePath);
} else {
llvm::sys::path::native(FileName, NativeFilePath);
}
+ llvm::sys::path::remove_dots(NativeFilePath, /*remove_dot_dot=*/true);
auto Cmd = CompileCommandRef(Directory, File, *Command, Output);
IndexByFile[NativeFilePath].push_back(Cmd);
AllCommands.push_back(Cmd);
diff --git a/contrib/llvm-project/clang/lib/Tooling/Refactoring.cpp b/contrib/llvm-project/clang/lib/Tooling/Refactoring.cpp
index d45cd8c57f10..961fc1c18015 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Refactoring.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Refactoring.cpp
@@ -78,10 +78,7 @@ bool formatAndApplyAllReplacements(
const std::string &FilePath = FileAndReplaces.first;
auto &CurReplaces = FileAndReplaces.second;
- const FileEntry *Entry = nullptr;
- if (auto File = Files.getFile(FilePath))
- Entry = *File;
-
+ FileEntryRef Entry = llvm::cantFail(Files.getFileRef(FilePath));
FileID ID = SM.getOrCreateFileID(Entry, SrcMgr::C_User);
StringRef Code = SM.getBufferData(ID);
diff --git a/contrib/llvm-project/clang/lib/Tooling/Refactoring/ASTSelection.cpp b/contrib/llvm-project/clang/lib/Tooling/Refactoring/ASTSelection.cpp
index 9485c8bc04ad..058574d8ec1a 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Refactoring/ASTSelection.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Refactoring/ASTSelection.cpp
@@ -10,6 +10,7 @@
#include "clang/AST/LexicallyOrderedRecursiveASTVisitor.h"
#include "clang/Lex/Lexer.h"
#include "llvm/Support/SaveAndRestore.h"
+#include <optional>
using namespace clang;
using namespace tooling;
@@ -50,12 +51,12 @@ public:
SourceSelectionKind::None));
}
- Optional<SelectedASTNode> getSelectedASTNode() {
+ std::optional<SelectedASTNode> getSelectedASTNode() {
assert(SelectionStack.size() == 1 && "stack was not popped");
SelectedASTNode Result = std::move(SelectionStack.back());
SelectionStack.pop_back();
if (Result.Children.empty())
- return None;
+ return std::nullopt;
return std::move(Result);
}
@@ -63,14 +64,14 @@ public:
// Avoid traversing the semantic expressions. They should be handled by
// looking through the appropriate opaque expressions in order to build
// a meaningful selection tree.
- llvm::SaveAndRestore<bool> LookThrough(LookThroughOpaqueValueExprs, true);
+ llvm::SaveAndRestore LookThrough(LookThroughOpaqueValueExprs, true);
return TraverseStmt(E->getSyntacticForm());
}
bool TraverseOpaqueValueExpr(OpaqueValueExpr *E) {
if (!LookThroughOpaqueValueExprs)
return true;
- llvm::SaveAndRestore<bool> LookThrough(LookThroughOpaqueValueExprs, false);
+ llvm::SaveAndRestore LookThrough(LookThroughOpaqueValueExprs, false);
return TraverseStmt(E->getSourceExpr());
}
@@ -178,7 +179,7 @@ private:
} // end anonymous namespace
-Optional<SelectedASTNode>
+std::optional<SelectedASTNode>
clang::tooling::findSelectedASTNodes(const ASTContext &Context,
SourceRange SelectionRange) {
assert(SelectionRange.isValid() &&
@@ -375,22 +376,22 @@ static void findDeepestWithKind(
findDeepestWithKind(ASTSelection, MatchingNodes, Kind, ParentStack);
}
-Optional<CodeRangeASTSelection>
+std::optional<CodeRangeASTSelection>
CodeRangeASTSelection::create(SourceRange SelectionRange,
const SelectedASTNode &ASTSelection) {
// Code range is selected when the selection range is not empty.
if (SelectionRange.getBegin() == SelectionRange.getEnd())
- return None;
+ return std::nullopt;
llvm::SmallVector<SelectedNodeWithParents, 4> ContainSelection;
findDeepestWithKind(ASTSelection, ContainSelection,
SourceSelectionKind::ContainsSelection);
// We are looking for a selection in one body of code, so let's focus on
// one matching result.
if (ContainSelection.size() != 1)
- return None;
+ return std::nullopt;
SelectedNodeWithParents &Selected = ContainSelection[0];
if (!Selected.Node.get().Node.get<Stmt>())
- return None;
+ return std::nullopt;
const Stmt *CodeRangeStmt = Selected.Node.get().Node.get<Stmt>();
if (!isa<CompoundStmt>(CodeRangeStmt)) {
Selected.canonicalize();
diff --git a/contrib/llvm-project/clang/lib/Tooling/Refactoring/ASTSelectionRequirements.cpp b/contrib/llvm-project/clang/lib/Tooling/Refactoring/ASTSelectionRequirements.cpp
index 70a4df07ea67..0e052bb19768 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Refactoring/ASTSelectionRequirements.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Refactoring/ASTSelectionRequirements.cpp
@@ -8,6 +8,7 @@
#include "clang/Tooling/Refactoring/RefactoringActionRuleRequirements.h"
#include "clang/AST/Attr.h"
+#include <optional>
using namespace clang;
using namespace tooling;
@@ -20,7 +21,7 @@ ASTSelectionRequirement::evaluate(RefactoringRuleContext &Context) const {
if (!Range)
return Range.takeError();
- Optional<SelectedASTNode> Selection =
+ std::optional<SelectedASTNode> Selection =
findSelectedASTNodes(Context.getASTContext(), *Range);
if (!Selection)
return Context.createDiagnosticError(
@@ -37,8 +38,9 @@ Expected<CodeRangeASTSelection> CodeRangeASTSelectionRequirement::evaluate(
return ASTSelection.takeError();
std::unique_ptr<SelectedASTNode> StoredSelection =
std::make_unique<SelectedASTNode>(std::move(*ASTSelection));
- Optional<CodeRangeASTSelection> CodeRange = CodeRangeASTSelection::create(
- Context.getSelectionRange(), *StoredSelection);
+ std::optional<CodeRangeASTSelection> CodeRange =
+ CodeRangeASTSelection::create(Context.getSelectionRange(),
+ *StoredSelection);
if (!CodeRange)
return Context.createDiagnosticError(
Context.getSelectionRange().getBegin(),
diff --git a/contrib/llvm-project/clang/lib/Tooling/Refactoring/AtomicChange.cpp b/contrib/llvm-project/clang/lib/Tooling/Refactoring/AtomicChange.cpp
index 069e9c1eb36e..3d5ae2fed014 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Refactoring/AtomicChange.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Refactoring/AtomicChange.cpp
@@ -150,7 +150,7 @@ createReplacementsForHeaders(llvm::StringRef FilePath, llvm::StringRef Code,
for (const auto &Change : Changes) {
for (llvm::StringRef Header : Change.getInsertedHeaders()) {
std::string EscapedHeader =
- Header.startswith("<") || Header.startswith("\"")
+ Header.starts_with("<") || Header.starts_with("\"")
? Header.str()
: ("\"" + Header + "\"").str();
std::string ReplacementText = "#include " + EscapedHeader;
@@ -198,7 +198,7 @@ AtomicChange::AtomicChange(const SourceManager &SM,
const FullSourceLoc FullKeyPosition(KeyPosition, SM);
std::pair<FileID, unsigned> FileIDAndOffset =
FullKeyPosition.getSpellingLoc().getDecomposedLoc();
- const FileEntry *FE = SM.getFileEntryForID(FileIDAndOffset.first);
+ OptionalFileEntryRef FE = SM.getFileEntryRefForID(FileIDAndOffset.first);
assert(FE && "Cannot create AtomicChange with invalid location.");
FilePath = std::string(FE->getName());
Key = FilePath + ":" + std::to_string(FileIDAndOffset.second);
diff --git a/contrib/llvm-project/clang/lib/Tooling/Refactoring/Extract/Extract.cpp b/contrib/llvm-project/clang/lib/Tooling/Refactoring/Extract/Extract.cpp
index 402b56109052..d437f4c21f47 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Refactoring/Extract/Extract.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Refactoring/Extract/Extract.cpp
@@ -19,6 +19,7 @@
#include "clang/AST/ExprObjC.h"
#include "clang/Rewrite/Core/Rewriter.h"
#include "clang/Tooling/Refactoring/Extract/SourceExtraction.h"
+#include <optional>
namespace clang {
namespace tooling {
@@ -68,7 +69,7 @@ const RefactoringDescriptor &ExtractFunction::describe() {
Expected<ExtractFunction>
ExtractFunction::initiate(RefactoringRuleContext &Context,
CodeRangeASTSelection Code,
- Optional<std::string> DeclName) {
+ std::optional<std::string> DeclName) {
// We would like to extract code out of functions/methods/blocks.
// Prohibit extraction from things like global variable / field
// initializers and other top-level expressions.
diff --git a/contrib/llvm-project/clang/lib/Tooling/Refactoring/Extract/SourceExtraction.cpp b/contrib/llvm-project/clang/lib/Tooling/Refactoring/Extract/SourceExtraction.cpp
index 5d57ecf90a96..5e69fb805150 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Refactoring/Extract/SourceExtraction.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Refactoring/Extract/SourceExtraction.cpp
@@ -12,6 +12,7 @@
#include "clang/AST/StmtObjC.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Lex/Lexer.h"
+#include <optional>
using namespace clang;
@@ -100,7 +101,7 @@ ExtractionSemicolonPolicy::compute(const Stmt *S, SourceRange &ExtractedRange,
/// Other statements should generally have a trailing ';'. We can try to find
/// it and move it together it with the extracted code.
- Optional<Token> NextToken = Lexer::findNextToken(End, SM, LangOpts);
+ std::optional<Token> NextToken = Lexer::findNextToken(End, SM, LangOpts);
if (NextToken && NextToken->is(tok::semi) &&
areOnSameLine(NextToken->getLocation(), End, SM)) {
ExtractedRange.setEnd(NextToken->getLocation());
diff --git a/contrib/llvm-project/clang/lib/Tooling/Refactoring/Lookup.cpp b/contrib/llvm-project/clang/lib/Tooling/Refactoring/Lookup.cpp
index 9468d4d032a7..757fba0404e6 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Refactoring/Lookup.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Refactoring/Lookup.cpp
@@ -98,8 +98,8 @@ static StringRef getBestNamespaceSubstr(const DeclContext *DeclA,
// from NewName if it has an identical prefix.
std::string NS =
"::" + cast<NamespaceDecl>(DeclA)->getQualifiedNameAsString() + "::";
- if (NewName.startswith(NS))
- return NewName.substr(NS.size());
+ if (NewName.consume_front(NS))
+ return NewName;
// No match yet. Strip of a namespace from the end of the chain and try
// again. This allows to get optimal qualifications even if the old and new
@@ -128,9 +128,9 @@ static std::string disambiguateSpellingInScope(StringRef Spelling,
StringRef QName,
const DeclContext &UseContext,
SourceLocation UseLoc) {
- assert(QName.startswith("::"));
- assert(QName.endswith(Spelling));
- if (Spelling.startswith("::"))
+ assert(QName.starts_with("::"));
+ assert(QName.ends_with(Spelling));
+ if (Spelling.starts_with("::"))
return std::string(Spelling);
auto UnspelledSpecifier = QName.drop_back(Spelling.size());
@@ -146,7 +146,7 @@ static std::string disambiguateSpellingInScope(StringRef Spelling,
UseLoc = SM.getSpellingLoc(UseLoc);
auto IsAmbiguousSpelling = [&](const llvm::StringRef CurSpelling) {
- if (CurSpelling.startswith("::"))
+ if (CurSpelling.starts_with("::"))
return false;
// Lookup the first component of Spelling in all enclosing namespaces
// and check if there is any existing symbols with the same name but in
@@ -160,7 +160,7 @@ static std::string disambiguateSpellingInScope(StringRef Spelling,
// ambiguous. For example, a reference in a header file should not be
// affected by a potentially ambiguous name in some file that includes
// the header.
- if (!TrimmedQName.startswith(Res->getQualifiedNameAsString()) &&
+ if (!TrimmedQName.starts_with(Res->getQualifiedNameAsString()) &&
SM.isBeforeInTranslationUnit(
SM.getSpellingLoc(Res->getLocation()), UseLoc))
return true;
@@ -187,7 +187,7 @@ std::string tooling::replaceNestedName(const NestedNameSpecifier *Use,
const DeclContext *UseContext,
const NamedDecl *FromDecl,
StringRef ReplacementString) {
- assert(ReplacementString.startswith("::") &&
+ assert(ReplacementString.starts_with("::") &&
"Expected fully-qualified name!");
// We can do a raw name replacement when we are not inside the namespace for
diff --git a/contrib/llvm-project/clang/lib/Tooling/Refactoring/Rename/USRFindingAction.cpp b/contrib/llvm-project/clang/lib/Tooling/Refactoring/Rename/USRFindingAction.cpp
index a69b76a3c971..7708fea53d01 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Refactoring/Rename/USRFindingAction.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Refactoring/Rename/USRFindingAction.cpp
@@ -145,14 +145,12 @@ private:
void handleVarTemplateDecl(const VarTemplateDecl *VTD) {
USRSet.insert(getUSRForDecl(VTD));
USRSet.insert(getUSRForDecl(VTD->getTemplatedDecl()));
- llvm::for_each(VTD->specializations(), [&](const auto *Spec) {
+ for (const auto *Spec : VTD->specializations())
USRSet.insert(getUSRForDecl(Spec));
- });
SmallVector<VarTemplatePartialSpecializationDecl *, 4> PartialSpecs;
VTD->getPartialSpecializations(PartialSpecs);
- llvm::for_each(PartialSpecs, [&](const auto *Spec) {
+ for (const auto *Spec : PartialSpecs)
USRSet.insert(getUSRForDecl(Spec));
- });
}
void addUSRsOfCtorDtors(const CXXRecordDecl *RD) {
@@ -245,7 +243,8 @@ private:
DiagnosticsEngine::Error,
"SourceLocation in file %0 at offset %1 is invalid");
Engine.Report(SourceLocation(), InvalidOffset)
- << SourceMgr.getFileEntryForID(MainFileID)->getName() << SymbolOffset;
+ << SourceMgr.getFileEntryRefForID(MainFileID)->getName()
+ << SymbolOffset;
return false;
}
diff --git a/contrib/llvm-project/clang/lib/Tooling/Refactoring/Rename/USRLocFinder.cpp b/contrib/llvm-project/clang/lib/Tooling/Refactoring/Rename/USRLocFinder.cpp
index aecfffcbef1f..c18f9290471f 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Refactoring/Rename/USRLocFinder.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Refactoring/Rename/USRLocFinder.cpp
@@ -228,16 +228,17 @@ public:
bool VisitDesignatedInitExpr(const DesignatedInitExpr *E) {
for (const DesignatedInitExpr::Designator &D : E->designators()) {
- if (D.isFieldDesignator() && D.getField()) {
- const FieldDecl *Decl = D.getField();
- if (isInUSRSet(Decl)) {
- auto StartLoc = D.getFieldLoc();
- auto EndLoc = D.getFieldLoc();
- RenameInfos.push_back({StartLoc, EndLoc,
- /*FromDecl=*/nullptr,
- /*Context=*/nullptr,
- /*Specifier=*/nullptr,
- /*IgnorePrefixQualifiers=*/true});
+ if (D.isFieldDesignator()) {
+ if (const FieldDecl *Decl = D.getFieldDecl()) {
+ if (isInUSRSet(Decl)) {
+ auto StartLoc = D.getFieldLoc();
+ auto EndLoc = D.getFieldLoc();
+ RenameInfos.push_back({StartLoc, EndLoc,
+ /*FromDecl=*/nullptr,
+ /*Context=*/nullptr,
+ /*Specifier=*/nullptr,
+ /*IgnorePrefixQualifiers=*/true});
+ }
}
}
}
@@ -561,8 +562,8 @@ createRenameAtomicChanges(llvm::ArrayRef<std::string> USRs,
ReplacedName = tooling::replaceNestedName(
RenameInfo.Specifier, RenameInfo.Begin,
RenameInfo.Context->getDeclContext(), RenameInfo.FromDecl,
- NewName.startswith("::") ? NewName.str()
- : ("::" + NewName).str());
+ NewName.starts_with("::") ? NewName.str()
+ : ("::" + NewName).str());
} else {
// This fixes the case where type `T` is a parameter inside a function
// type (e.g. `std::function<void(T)>`) and the DeclContext of `T`
@@ -577,13 +578,13 @@ createRenameAtomicChanges(llvm::ArrayRef<std::string> USRs,
SM, TranslationUnitDecl->getASTContext().getLangOpts());
// Add the leading "::" back if the name written in the code contains
// it.
- if (ActualName.startswith("::") && !NewName.startswith("::")) {
+ if (ActualName.starts_with("::") && !NewName.starts_with("::")) {
ReplacedName = "::" + NewName.str();
}
}
}
// If the NewName contains leading "::", add it back.
- if (NewName.startswith("::") && NewName.substr(2) == ReplacedName)
+ if (NewName.starts_with("::") && NewName.substr(2) == ReplacedName)
ReplacedName = NewName.str();
}
Replace(RenameInfo.Begin, RenameInfo.End, ReplacedName);
diff --git a/contrib/llvm-project/clang/lib/Tooling/Syntax/BuildTree.cpp b/contrib/llvm-project/clang/lib/Tooling/Syntax/BuildTree.cpp
index 07888b5c32fa..cd0261989495 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Syntax/BuildTree.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Syntax/BuildTree.cpp
@@ -27,6 +27,7 @@
#include "clang/Lex/Lexer.h"
#include "clang/Lex/LiteralSupport.h"
#include "clang/Tooling/Syntax/Nodes.h"
+#include "clang/Tooling/Syntax/TokenBufferTokenManager.h"
#include "clang/Tooling/Syntax/Tokens.h"
#include "clang/Tooling/Syntax/Tree.h"
#include "llvm/ADT/ArrayRef.h"
@@ -155,9 +156,8 @@ private:
} // namespace
static CallExpr::arg_range dropDefaultArgs(CallExpr::arg_range Args) {
- auto FirstDefaultArg = std::find_if(Args.begin(), Args.end(), [](auto It) {
- return isa<CXXDefaultArgExpr>(It);
- });
+ auto FirstDefaultArg =
+ llvm::find_if(Args, [](auto It) { return isa<CXXDefaultArgExpr>(It); });
return llvm::make_range(Args.begin(), FirstDefaultArg);
}
@@ -366,21 +366,24 @@ private:
/// Call finalize() to finish building the tree and consume the root node.
class syntax::TreeBuilder {
public:
- TreeBuilder(syntax::Arena &Arena) : Arena(Arena), Pending(Arena) {
- for (const auto &T : Arena.getTokenBuffer().expandedTokens())
+ TreeBuilder(syntax::Arena &Arena, TokenBufferTokenManager& TBTM)
+ : Arena(Arena),
+ TBTM(TBTM),
+ Pending(Arena, TBTM.tokenBuffer()) {
+ for (const auto &T : TBTM.tokenBuffer().expandedTokens())
LocationToToken.insert({T.location(), &T});
}
llvm::BumpPtrAllocator &allocator() { return Arena.getAllocator(); }
const SourceManager &sourceManager() const {
- return Arena.getSourceManager();
+ return TBTM.sourceManager();
}
/// Populate children for \p New node, assuming it covers tokens from \p
/// Range.
void foldNode(ArrayRef<syntax::Token> Range, syntax::Tree *New, ASTPtr From) {
assert(New);
- Pending.foldChildren(Arena, Range, New);
+ Pending.foldChildren(TBTM.tokenBuffer(), Range, New);
if (From)
Mapping.add(From, New);
}
@@ -393,7 +396,7 @@ public:
void foldNode(llvm::ArrayRef<syntax::Token> Range, syntax::Tree *New,
NestedNameSpecifierLoc From) {
assert(New);
- Pending.foldChildren(Arena, Range, New);
+ Pending.foldChildren(TBTM.tokenBuffer(), Range, New);
if (From)
Mapping.add(From, New);
}
@@ -404,7 +407,7 @@ public:
ASTPtr From) {
assert(New);
auto ListRange = Pending.shrinkToFitList(SuperRange);
- Pending.foldChildren(Arena, ListRange, New);
+ Pending.foldChildren(TBTM.tokenBuffer(), ListRange, New);
if (From)
Mapping.add(From, New);
}
@@ -435,12 +438,12 @@ public:
/// Finish building the tree and consume the root node.
syntax::TranslationUnit *finalize() && {
- auto Tokens = Arena.getTokenBuffer().expandedTokens();
+ auto Tokens = TBTM.tokenBuffer().expandedTokens();
assert(!Tokens.empty());
assert(Tokens.back().kind() == tok::eof);
// Build the root of the tree, consuming all the children.
- Pending.foldChildren(Arena, Tokens.drop_back(),
+ Pending.foldChildren(TBTM.tokenBuffer(), Tokens.drop_back(),
new (Arena.getAllocator()) syntax::TranslationUnit);
auto *TU = cast<syntax::TranslationUnit>(std::move(Pending).finalize());
@@ -465,8 +468,8 @@ public:
assert(First.isValid());
assert(Last.isValid());
assert(First == Last ||
- Arena.getSourceManager().isBeforeInTranslationUnit(First, Last));
- return llvm::makeArrayRef(findToken(First), std::next(findToken(Last)));
+ TBTM.sourceManager().isBeforeInTranslationUnit(First, Last));
+ return llvm::ArrayRef(findToken(First), std::next(findToken(Last)));
}
ArrayRef<syntax::Token>
@@ -549,7 +552,7 @@ private:
assert(Tokens.back().kind() != tok::eof);
// We never consume 'eof', so looking at the next token is ok.
if (Tokens.back().kind() != tok::semi && Tokens.end()->kind() == tok::semi)
- return llvm::makeArrayRef(Tokens.begin(), Tokens.end() + 1);
+ return llvm::ArrayRef(Tokens.begin(), Tokens.end() + 1);
return Tokens;
}
@@ -565,15 +568,16 @@ private:
///
/// Ensures that added nodes properly nest and cover the whole token stream.
struct Forest {
- Forest(syntax::Arena &A) {
- assert(!A.getTokenBuffer().expandedTokens().empty());
- assert(A.getTokenBuffer().expandedTokens().back().kind() == tok::eof);
+ Forest(syntax::Arena &A, const syntax::TokenBuffer &TB) {
+ assert(!TB.expandedTokens().empty());
+ assert(TB.expandedTokens().back().kind() == tok::eof);
// Create all leaf nodes.
// Note that we do not have 'eof' in the tree.
- for (const auto &T : A.getTokenBuffer().expandedTokens().drop_back()) {
- auto *L = new (A.getAllocator()) syntax::Leaf(&T);
+ for (const auto &T : TB.expandedTokens().drop_back()) {
+ auto *L = new (A.getAllocator())
+ syntax::Leaf(reinterpret_cast<TokenManager::Key>(&T));
L->Original = true;
- L->CanModify = A.getTokenBuffer().spelledForExpanded(T).hasValue();
+ L->CanModify = TB.spelledForExpanded(T).has_value();
Trees.insert(Trees.end(), {&T, L});
}
}
@@ -621,8 +625,8 @@ private:
}
/// Add \p Node to the forest and attach child nodes based on \p Tokens.
- void foldChildren(const syntax::Arena &A, ArrayRef<syntax::Token> Tokens,
- syntax::Tree *Node) {
+ void foldChildren(const syntax::TokenBuffer &TB,
+ ArrayRef<syntax::Token> Tokens, syntax::Tree *Node) {
// Attach children to `Node`.
assert(Node->getFirstChild() == nullptr && "node already has children");
@@ -647,7 +651,7 @@ private:
// Mark that this node came from the AST and is backed by the source code.
Node->Original = true;
Node->CanModify =
- A.getTokenBuffer().spelledForExpanded(Tokens).hasValue();
+ TB.spelledForExpanded(Tokens).has_value();
Trees.erase(BeginChildren, EndChildren);
Trees.insert({FirstToken, Node});
@@ -661,18 +665,18 @@ private:
return Root;
}
- std::string str(const syntax::Arena &A) const {
+ std::string str(const syntax::TokenBufferTokenManager &STM) const {
std::string R;
for (auto It = Trees.begin(); It != Trees.end(); ++It) {
unsigned CoveredTokens =
It != Trees.end()
? (std::next(It)->first - It->first)
- : A.getTokenBuffer().expandedTokens().end() - It->first;
+ : STM.tokenBuffer().expandedTokens().end() - It->first;
R += std::string(
formatv("- '{0}' covers '{1}'+{2} tokens\n", It->second->getKind(),
- It->first->text(A.getSourceManager()), CoveredTokens));
- R += It->second->dump(A.getSourceManager());
+ It->first->text(STM.sourceManager()), CoveredTokens));
+ R += It->second->dump(STM);
}
return R;
}
@@ -685,9 +689,10 @@ private:
};
/// For debugging purposes.
- std::string str() { return Pending.str(Arena); }
+ std::string str() { return Pending.str(TBTM); }
syntax::Arena &Arena;
+ TokenBufferTokenManager& TBTM;
/// To quickly find tokens by their start location.
llvm::DenseMap<SourceLocation, const syntax::Token *> LocationToToken;
Forest Pending;
@@ -763,7 +768,7 @@ public:
// Build TemplateDeclaration nodes if we had template parameters.
auto ConsumeTemplateParameters = [&](const TemplateParameterList &L) {
const auto *TemplateKW = Builder.findToken(L.getTemplateLoc());
- auto R = llvm::makeArrayRef(TemplateKW, DeclarationRange.end());
+ auto R = llvm::ArrayRef(TemplateKW, DeclarationRange.end());
Result =
foldTemplateDeclaration(R, TemplateKW, DeclarationRange, nullptr);
DeclarationRange = R;
@@ -1634,7 +1639,7 @@ private:
auto Return = Builder.getRange(ReturnedType.getSourceRange());
const auto *Arrow = Return.begin() - 1;
assert(Arrow->kind() == tok::arrow);
- auto Tokens = llvm::makeArrayRef(Arrow, Return.end());
+ auto Tokens = llvm::ArrayRef(Arrow, Return.end());
Builder.markChildToken(Arrow, syntax::NodeRole::ArrowToken);
if (ReturnDeclarator)
Builder.markChild(ReturnDeclarator, syntax::NodeRole::Declarator);
@@ -1719,7 +1724,7 @@ void syntax::TreeBuilder::markStmtChild(Stmt *Child, NodeRole Role) {
markExprChild(ChildExpr, NodeRole::Expression);
ChildNode = new (allocator()) syntax::ExpressionStatement;
// (!) 'getStmtRange()' ensures this covers a trailing semicolon.
- Pending.foldChildren(Arena, getStmtRange(Child), ChildNode);
+ Pending.foldChildren(TBTM.tokenBuffer(), getStmtRange(Child), ChildNode);
} else {
ChildNode = Mapping.find(Child);
}
@@ -1746,8 +1751,9 @@ const syntax::Token *syntax::TreeBuilder::findToken(SourceLocation L) const {
}
syntax::TranslationUnit *syntax::buildSyntaxTree(Arena &A,
+ TokenBufferTokenManager& TBTM,
ASTContext &Context) {
- TreeBuilder Builder(A);
+ TreeBuilder Builder(A, TBTM);
BuildTreeVisitor(Context, Builder).TraverseAST(Context);
return std::move(Builder).finalize();
}
diff --git a/contrib/llvm-project/clang/lib/Tooling/Syntax/ComputeReplacements.cpp b/contrib/llvm-project/clang/lib/Tooling/Syntax/ComputeReplacements.cpp
index 31e1a40c74b6..fe9a9df73cb3 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Syntax/ComputeReplacements.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Syntax/ComputeReplacements.cpp
@@ -7,7 +7,9 @@
//===----------------------------------------------------------------------===//
#include "clang/Tooling/Core/Replacement.h"
#include "clang/Tooling/Syntax/Mutations.h"
+#include "clang/Tooling/Syntax/TokenBufferTokenManager.h"
#include "clang/Tooling/Syntax/Tokens.h"
+#include "clang/Tooling/Syntax/Tree.h"
#include "llvm/Support/Error.h"
using namespace clang;
@@ -16,17 +18,20 @@ namespace {
using ProcessTokensFn = llvm::function_ref<void(llvm::ArrayRef<syntax::Token>,
bool /*IsOriginal*/)>;
/// Enumerates spans of tokens from the tree consecutively laid out in memory.
-void enumerateTokenSpans(const syntax::Tree *Root, ProcessTokensFn Callback) {
+void enumerateTokenSpans(const syntax::Tree *Root,
+ const syntax::TokenBufferTokenManager &STM,
+ ProcessTokensFn Callback) {
struct Enumerator {
- Enumerator(ProcessTokensFn Callback)
- : SpanBegin(nullptr), SpanEnd(nullptr), SpanIsOriginal(false),
+ Enumerator(const syntax::TokenBufferTokenManager &STM,
+ ProcessTokensFn Callback)
+ : STM(STM), SpanBegin(nullptr), SpanEnd(nullptr), SpanIsOriginal(false),
Callback(Callback) {}
void run(const syntax::Tree *Root) {
process(Root);
// Report the last span to the user.
if (SpanBegin)
- Callback(llvm::makeArrayRef(SpanBegin, SpanEnd), SpanIsOriginal);
+ Callback(llvm::ArrayRef(SpanBegin, SpanEnd), SpanIsOriginal);
}
private:
@@ -39,33 +44,35 @@ void enumerateTokenSpans(const syntax::Tree *Root, ProcessTokensFn Callback) {
}
auto *L = cast<syntax::Leaf>(N);
- if (SpanEnd == L->getToken() && SpanIsOriginal == L->isOriginal()) {
+ if (SpanEnd == STM.getToken(L->getTokenKey()) &&
+ SpanIsOriginal == L->isOriginal()) {
// Extend the current span.
++SpanEnd;
return;
}
// Report the current span to the user.
if (SpanBegin)
- Callback(llvm::makeArrayRef(SpanBegin, SpanEnd), SpanIsOriginal);
+ Callback(llvm::ArrayRef(SpanBegin, SpanEnd), SpanIsOriginal);
// Start recording a new span.
- SpanBegin = L->getToken();
+ SpanBegin = STM.getToken(L->getTokenKey());
SpanEnd = SpanBegin + 1;
SpanIsOriginal = L->isOriginal();
}
+ const syntax::TokenBufferTokenManager &STM;
const syntax::Token *SpanBegin;
const syntax::Token *SpanEnd;
bool SpanIsOriginal;
ProcessTokensFn Callback;
};
- return Enumerator(Callback).run(Root);
+ return Enumerator(STM, Callback).run(Root);
}
-syntax::FileRange rangeOfExpanded(const syntax::Arena &A,
+syntax::FileRange rangeOfExpanded(const syntax::TokenBufferTokenManager &STM,
llvm::ArrayRef<syntax::Token> Expanded) {
- const auto &Buffer = A.getTokenBuffer();
- const auto &SM = A.getSourceManager();
+ const auto &Buffer = STM.tokenBuffer();
+ const auto &SM = STM.sourceManager();
// Check that \p Expanded actually points into expanded tokens.
assert(Buffer.expandedTokens().begin() <= Expanded.begin());
@@ -83,10 +90,10 @@ syntax::FileRange rangeOfExpanded(const syntax::Arena &A,
} // namespace
tooling::Replacements
-syntax::computeReplacements(const syntax::Arena &A,
+syntax::computeReplacements(const TokenBufferTokenManager &TBTM,
const syntax::TranslationUnit &TU) {
- const auto &Buffer = A.getTokenBuffer();
- const auto &SM = A.getSourceManager();
+ const auto &Buffer = TBTM.tokenBuffer();
+ const auto &SM = TBTM.sourceManager();
tooling::Replacements Replacements;
// Text inserted by the replacement we are building now.
@@ -95,13 +102,13 @@ syntax::computeReplacements(const syntax::Arena &A,
if (ReplacedRange.empty() && Replacement.empty())
return;
llvm::cantFail(Replacements.add(tooling::Replacement(
- SM, rangeOfExpanded(A, ReplacedRange).toCharRange(SM), Replacement)));
+ SM, rangeOfExpanded(TBTM, ReplacedRange).toCharRange(SM),
+ Replacement)));
Replacement = "";
};
-
const syntax::Token *NextOriginal = Buffer.expandedTokens().begin();
enumerateTokenSpans(
- &TU, [&](llvm::ArrayRef<syntax::Token> Tokens, bool IsOriginal) {
+ &TU, TBTM, [&](llvm::ArrayRef<syntax::Token> Tokens, bool IsOriginal) {
if (!IsOriginal) {
Replacement +=
syntax::Token::range(SM, Tokens.front(), Tokens.back()).text(SM);
@@ -111,17 +118,17 @@ syntax::computeReplacements(const syntax::Arena &A,
// We are looking at a span of original tokens.
if (NextOriginal != Tokens.begin()) {
// There is a gap, record a replacement or deletion.
- emitReplacement(llvm::makeArrayRef(NextOriginal, Tokens.begin()));
+ emitReplacement(llvm::ArrayRef(NextOriginal, Tokens.begin()));
} else {
// No gap, but we may have pending insertions. Emit them now.
- emitReplacement(llvm::makeArrayRef(NextOriginal, /*Length=*/0));
+ emitReplacement(llvm::ArrayRef(NextOriginal, /*Length=*/(size_t)0));
}
NextOriginal = Tokens.end();
});
// We might have pending replacements at the end of file. If so, emit them.
- emitReplacement(llvm::makeArrayRef(
- NextOriginal, Buffer.expandedTokens().drop_back().end()));
+ emitReplacement(
+ llvm::ArrayRef(NextOriginal, Buffer.expandedTokens().drop_back().end()));
return Replacements;
}
diff --git a/contrib/llvm-project/clang/lib/Tooling/Syntax/Mutations.cpp b/contrib/llvm-project/clang/lib/Tooling/Syntax/Mutations.cpp
index f8a652219b22..0f04acea3d04 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Syntax/Mutations.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Syntax/Mutations.cpp
@@ -15,7 +15,6 @@
#include "clang/Tooling/Syntax/Tokens.h"
#include "clang/Tooling/Syntax/Tree.h"
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Casting.h"
#include <cassert>
@@ -77,7 +76,8 @@ public:
}
};
-void syntax::removeStatement(syntax::Arena &A, syntax::Statement *S) {
+void syntax::removeStatement(syntax::Arena &A, TokenBufferTokenManager &TBTM,
+ syntax::Statement *S) {
assert(S);
assert(S->canModify());
@@ -90,5 +90,5 @@ void syntax::removeStatement(syntax::Arena &A, syntax::Statement *S) {
if (isa<EmptyStatement>(S))
return; // already an empty statement, nothing to do.
- MutationsImpl::replace(S, createEmptyStatement(A));
+ MutationsImpl::replace(S, createEmptyStatement(A, TBTM));
}
diff --git a/contrib/llvm-project/clang/lib/Tooling/Syntax/Nodes.cpp b/contrib/llvm-project/clang/lib/Tooling/Syntax/Nodes.cpp
index fc6f8ef1a82c..d0c1e9297cfa 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Syntax/Nodes.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Syntax/Nodes.cpp
@@ -6,7 +6,7 @@
//
//===----------------------------------------------------------------------===//
#include "clang/Tooling/Syntax/Nodes.h"
-#include "clang/Basic/TokenKinds.h"
+#include "llvm/Support/raw_ostream.h"
using namespace clang;
diff --git a/contrib/llvm-project/clang/lib/Tooling/Syntax/Synthesis.cpp b/contrib/llvm-project/clang/lib/Tooling/Syntax/Synthesis.cpp
index ef6492882be6..39c19951ae76 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Syntax/Synthesis.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Syntax/Synthesis.cpp
@@ -8,6 +8,8 @@
#include "clang/Basic/TokenKinds.h"
#include "clang/Tooling/Syntax/BuildTree.h"
#include "clang/Tooling/Syntax/Tree.h"
+#include "clang/Tooling/Syntax/Tokens.h"
+#include "clang/Tooling/Syntax/TokenBufferTokenManager.h"
using namespace clang;
@@ -27,35 +29,40 @@ public:
}
static std::pair<FileID, ArrayRef<Token>>
- lexBuffer(syntax::Arena &A, std::unique_ptr<llvm::MemoryBuffer> Buffer) {
- return A.lexBuffer(std::move(Buffer));
+ lexBuffer(TokenBufferTokenManager &TBTM,
+ std::unique_ptr<llvm::MemoryBuffer> Buffer) {
+ return TBTM.lexBuffer(std::move(Buffer));
}
};
// FIXME: `createLeaf` is based on `syntax::tokenize` internally, as such it
// doesn't support digraphs or line continuations.
-syntax::Leaf *clang::syntax::createLeaf(syntax::Arena &A, tok::TokenKind K,
- StringRef Spelling) {
+syntax::Leaf *clang::syntax::createLeaf(syntax::Arena &A,
+ TokenBufferTokenManager &TBTM,
+ tok::TokenKind K, StringRef Spelling) {
auto Tokens =
- FactoryImpl::lexBuffer(A, llvm::MemoryBuffer::getMemBufferCopy(Spelling))
+ FactoryImpl::lexBuffer(TBTM, llvm::MemoryBuffer::getMemBufferCopy(Spelling))
.second;
assert(Tokens.size() == 1);
assert(Tokens.front().kind() == K &&
"spelling is not lexed into the expected kind of token");
- auto *Leaf = new (A.getAllocator()) syntax::Leaf(Tokens.begin());
+ auto *Leaf = new (A.getAllocator()) syntax::Leaf(
+ reinterpret_cast<TokenManager::Key>(Tokens.begin()));
syntax::FactoryImpl::setCanModify(Leaf);
Leaf->assertInvariants();
return Leaf;
}
-syntax::Leaf *clang::syntax::createLeaf(syntax::Arena &A, tok::TokenKind K) {
+syntax::Leaf *clang::syntax::createLeaf(syntax::Arena &A,
+ TokenBufferTokenManager &TBTM,
+ tok::TokenKind K) {
const auto *Spelling = tok::getPunctuatorSpelling(K);
if (!Spelling)
Spelling = tok::getKeywordSpelling(K);
assert(Spelling &&
"Cannot infer the spelling of the token from its token kind.");
- return createLeaf(A, K, Spelling);
+ return createLeaf(A, TBTM, K, Spelling);
}
namespace {
@@ -208,24 +215,25 @@ syntax::Tree *clang::syntax::createTree(
}
syntax::Node *clang::syntax::deepCopyExpandingMacros(syntax::Arena &A,
+ TokenBufferTokenManager &TBTM,
const syntax::Node *N) {
if (const auto *L = dyn_cast<syntax::Leaf>(N))
// `L->getToken()` gives us the expanded token, thus we implicitly expand
// any macros here.
- return createLeaf(A, L->getToken()->kind(),
- L->getToken()->text(A.getSourceManager()));
+ return createLeaf(A, TBTM, TBTM.getToken(L->getTokenKey())->kind(),
+ TBTM.getText(L->getTokenKey()));
const auto *T = cast<syntax::Tree>(N);
std::vector<std::pair<syntax::Node *, syntax::NodeRole>> Children;
for (const auto *Child = T->getFirstChild(); Child;
Child = Child->getNextSibling())
- Children.push_back({deepCopyExpandingMacros(A, Child), Child->getRole()});
+ Children.push_back({deepCopyExpandingMacros(A, TBTM, Child), Child->getRole()});
return createTree(A, Children, N->getKind());
}
-syntax::EmptyStatement *clang::syntax::createEmptyStatement(syntax::Arena &A) {
+syntax::EmptyStatement *clang::syntax::createEmptyStatement(syntax::Arena &A, TokenBufferTokenManager &TBTM) {
return cast<EmptyStatement>(
- createTree(A, {{createLeaf(A, tok::semi), NodeRole::Unknown}},
+ createTree(A, {{createLeaf(A, TBTM, tok::semi), NodeRole::Unknown}},
NodeKind::EmptyStatement));
}
diff --git a/contrib/llvm-project/clang/lib/Tooling/Syntax/TokenBufferTokenManager.cpp b/contrib/llvm-project/clang/lib/Tooling/Syntax/TokenBufferTokenManager.cpp
new file mode 100644
index 000000000000..a06f7e2900d4
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Tooling/Syntax/TokenBufferTokenManager.cpp
@@ -0,0 +1,25 @@
+//===- TokenBufferTokenManager.cpp ----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Tooling/Syntax/TokenBufferTokenManager.h"
+
+namespace clang {
+namespace syntax {
+constexpr llvm::StringLiteral syntax::TokenBufferTokenManager::Kind;
+
+std::pair<FileID, ArrayRef<syntax::Token>>
+syntax::TokenBufferTokenManager::lexBuffer(
+ std::unique_ptr<llvm::MemoryBuffer> Input) {
+ auto FID = SM.createFileID(std::move(Input));
+ auto It = ExtraTokens.try_emplace(FID, tokenize(FID, SM, LangOpts));
+ assert(It.second && "duplicate FileID");
+ return {FID, It.first->second};
+}
+
+} // namespace syntax
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Tooling/Syntax/Tokens.cpp b/contrib/llvm-project/clang/lib/Tooling/Syntax/Tokens.cpp
index 8a31e776d030..8d32c45a4a70 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Syntax/Tokens.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Syntax/Tokens.cpp
@@ -18,8 +18,6 @@
#include "clang/Lex/Preprocessor.h"
#include "clang/Lex/Token.h"
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/None.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
@@ -28,6 +26,7 @@
#include <algorithm>
#include <cassert>
#include <iterator>
+#include <optional>
#include <string>
#include <utility>
#include <vector>
@@ -55,45 +54,88 @@ getTokensCovering(llvm::ArrayRef<syntax::Token> Toks, SourceRange R,
return {Begin, End};
}
-// Finds the smallest expansion range that contains expanded tokens First and
-// Last, e.g.:
+// Finds the range within FID corresponding to expanded tokens [First, Last].
+// Prev precedes First and Next follows Last, these must *not* be included.
+// If no range satisfies the criteria, returns an invalid range.
+//
// #define ID(x) x
// ID(ID(ID(a1) a2))
// ~~ -> a1
// ~~ -> a2
// ~~~~~~~~~ -> a1 a2
-SourceRange findCommonRangeForMacroArgs(const syntax::Token &First,
- const syntax::Token &Last,
- const SourceManager &SM) {
- SourceRange Res;
- auto FirstLoc = First.location(), LastLoc = Last.location();
- // Keep traversing up the spelling chain as longs as tokens are part of the
- // same expansion.
- while (!FirstLoc.isFileID() && !LastLoc.isFileID()) {
- auto ExpInfoFirst = SM.getSLocEntry(SM.getFileID(FirstLoc)).getExpansion();
- auto ExpInfoLast = SM.getSLocEntry(SM.getFileID(LastLoc)).getExpansion();
- // Stop if expansions have diverged.
- if (ExpInfoFirst.getExpansionLocStart() !=
- ExpInfoLast.getExpansionLocStart())
+SourceRange spelledForExpandedSlow(SourceLocation First, SourceLocation Last,
+ SourceLocation Prev, SourceLocation Next,
+ FileID TargetFile,
+ const SourceManager &SM) {
+ // There are two main parts to this algorithm:
+ // - identifying which spelled range covers the expanded tokens
+ // - validating that this range doesn't cover any extra tokens (First/Last)
+ //
+ // We do these in order. However as we transform the expanded range into the
+ // spelled one, we adjust First/Last so the validation remains simple.
+
+ assert(SM.getSLocEntry(TargetFile).isFile());
+ // In most cases, to select First and Last we must return their expansion
+ // range, i.e. the whole of any macros they are included in.
+ //
+ // When First and Last are part of the *same macro arg* of a macro written
+ // in TargetFile, we that slice of the arg, i.e. their spelling range.
+ //
+ // Unwrap such macro calls. If the target file has A(B(C)), the
+ // SourceLocation stack of a token inside C shows us the expansion of A first,
+ // then B, then any macros inside C's body, then C itself.
+ // (This is the reverse of the order the PP applies the expansions in).
+ while (First.isMacroID() && Last.isMacroID()) {
+ auto DecFirst = SM.getDecomposedLoc(First);
+ auto DecLast = SM.getDecomposedLoc(Last);
+ auto &ExpFirst = SM.getSLocEntry(DecFirst.first).getExpansion();
+ auto &ExpLast = SM.getSLocEntry(DecLast.first).getExpansion();
+
+ if (!ExpFirst.isMacroArgExpansion() || !ExpLast.isMacroArgExpansion())
+ break;
+ // Locations are in the same macro arg if they expand to the same place.
+ // (They may still have different FileIDs - an arg can have >1 chunks!)
+ if (ExpFirst.getExpansionLocStart() != ExpLast.getExpansionLocStart())
break;
- // Do not continue into macro bodies.
- if (!ExpInfoFirst.isMacroArgExpansion() ||
- !ExpInfoLast.isMacroArgExpansion())
+ // Careful, given:
+ // #define HIDE ID(ID(a))
+ // ID(ID(HIDE))
+ // The token `a` is wrapped in 4 arg-expansions, we only want to unwrap 2.
+ // We distinguish them by whether the macro expands into the target file.
+ // Fortunately, the target file ones will always appear first.
+ auto ExpFileID = SM.getFileID(ExpFirst.getExpansionLocStart());
+ if (ExpFileID == TargetFile)
break;
- FirstLoc = SM.getImmediateSpellingLoc(FirstLoc);
- LastLoc = SM.getImmediateSpellingLoc(LastLoc);
- // Update the result afterwards, as we want the tokens that triggered the
- // expansion.
- Res = {FirstLoc, LastLoc};
+ // Replace each endpoint with its spelling inside the macro arg.
+ // (This is getImmediateSpellingLoc without repeating lookups).
+ First = ExpFirst.getSpellingLoc().getLocWithOffset(DecFirst.second);
+ Last = ExpLast.getSpellingLoc().getLocWithOffset(DecLast.second);
+ }
+
+ // In all remaining cases we need the full containing macros.
+ // If this overlaps Prev or Next, then no range is possible.
+ SourceRange Candidate =
+ SM.getExpansionRange(SourceRange(First, Last)).getAsRange();
+ auto DecFirst = SM.getDecomposedExpansionLoc(Candidate.getBegin());
+ auto DecLast = SM.getDecomposedExpansionLoc(Candidate.getEnd());
+ // Can end up in the wrong file due to bad input or token-pasting shenanigans.
+ if (Candidate.isInvalid() || DecFirst.first != TargetFile ||
+ DecLast.first != TargetFile)
+ return SourceRange();
+ // Check bounds, which may still be inside macros.
+ if (Prev.isValid()) {
+ auto Dec = SM.getDecomposedLoc(SM.getExpansionRange(Prev).getBegin());
+ if (Dec.first != DecFirst.first || Dec.second >= DecFirst.second)
+ return SourceRange();
}
- // Normally mapping back to expansion location here only changes FileID, as
- // we've already found some tokens expanded from the same macro argument, and
- // they should map to a consecutive subset of spelled tokens. Unfortunately
- // SourceManager::isBeforeInTranslationUnit discriminates sourcelocations
- // based on their FileID in addition to offsets. So even though we are
- // referring to same tokens, SourceManager might tell us that one is before
- // the other if they've got different FileIDs.
- return SM.getExpansionRange(CharSourceRange(Res, true)).getAsRange();
+ if (Next.isValid()) {
+ auto Dec = SM.getDecomposedLoc(SM.getExpansionRange(Next).getEnd());
+ if (Dec.first != DecLast.first || Dec.second <= DecLast.second)
+ return SourceRange();
+ }
+ // Now we know that Candidate is a file range that covers [First, Last]
+ // without encroaching on {Prev, Next}. Ship it!
+ return Candidate;
}
} // namespace
@@ -331,8 +373,8 @@ TokenBuffer::expandedForSpelled(llvm::ArrayRef<syntax::Token> Spelled) const {
// Avoid returning empty ranges.
if (ExpandedBegin == ExpandedEnd)
return {};
- return {llvm::makeArrayRef(ExpandedTokens.data() + ExpandedBegin,
- ExpandedTokens.data() + ExpandedEnd)};
+ return {llvm::ArrayRef(ExpandedTokens.data() + ExpandedBegin,
+ ExpandedTokens.data() + ExpandedEnd)};
}
llvm::ArrayRef<syntax::Token> TokenBuffer::spelledTokens(FileID FID) const {
@@ -357,57 +399,60 @@ std::string TokenBuffer::Mapping::str() const {
BeginSpelled, EndSpelled, BeginExpanded, EndExpanded));
}
-llvm::Optional<llvm::ArrayRef<syntax::Token>>
+std::optional<llvm::ArrayRef<syntax::Token>>
TokenBuffer::spelledForExpanded(llvm::ArrayRef<syntax::Token> Expanded) const {
+ // In cases of invalid code, AST nodes can have source ranges that include
+ // the `eof` token. As there's no spelling for this token, exclude it from
+ // the range.
+ if (!Expanded.empty() && Expanded.back().kind() == tok::eof) {
+ Expanded = Expanded.drop_back();
+ }
// Mapping an empty range is ambiguous in case of empty mappings at either end
// of the range, bail out in that case.
if (Expanded.empty())
- return llvm::None;
-
- const syntax::Token *BeginSpelled;
- const Mapping *BeginMapping;
- std::tie(BeginSpelled, BeginMapping) =
- spelledForExpandedToken(&Expanded.front());
-
- const syntax::Token *LastSpelled;
- const Mapping *LastMapping;
- std::tie(LastSpelled, LastMapping) =
- spelledForExpandedToken(&Expanded.back());
+ return std::nullopt;
+ const syntax::Token *First = &Expanded.front();
+ const syntax::Token *Last = &Expanded.back();
+ auto [FirstSpelled, FirstMapping] = spelledForExpandedToken(First);
+ auto [LastSpelled, LastMapping] = spelledForExpandedToken(Last);
- FileID FID = SourceMgr->getFileID(BeginSpelled->location());
+ FileID FID = SourceMgr->getFileID(FirstSpelled->location());
// FIXME: Handle multi-file changes by trying to map onto a common root.
if (FID != SourceMgr->getFileID(LastSpelled->location()))
- return llvm::None;
+ return std::nullopt;
const MarkedFile &File = Files.find(FID)->second;
- // If both tokens are coming from a macro argument expansion, try and map to
- // smallest part of the macro argument. BeginMapping && LastMapping check is
- // only for performance, they are a prerequisite for Expanded.front() and
- // Expanded.back() being part of a macro arg expansion.
- if (BeginMapping && LastMapping &&
- SourceMgr->isMacroArgExpansion(Expanded.front().location()) &&
- SourceMgr->isMacroArgExpansion(Expanded.back().location())) {
- auto CommonRange = findCommonRangeForMacroArgs(Expanded.front(),
- Expanded.back(), *SourceMgr);
- // It might be the case that tokens are arguments of different macro calls,
- // in that case we should continue with the logic below instead of returning
- // an empty range.
- if (CommonRange.isValid())
- return getTokensCovering(File.SpelledTokens, CommonRange, *SourceMgr);
+ // If the range is within one macro argument, the result may be only part of a
+ // Mapping. We must use the general (SourceManager-based) algorithm.
+ if (FirstMapping && FirstMapping == LastMapping &&
+ SourceMgr->isMacroArgExpansion(First->location()) &&
+ SourceMgr->isMacroArgExpansion(Last->location())) {
+ // We use excluded Prev/Next token for bounds checking.
+ SourceLocation Prev = (First == &ExpandedTokens.front())
+ ? SourceLocation()
+ : (First - 1)->location();
+ SourceLocation Next = (Last == &ExpandedTokens.back())
+ ? SourceLocation()
+ : (Last + 1)->location();
+ SourceRange Range = spelledForExpandedSlow(
+ First->location(), Last->location(), Prev, Next, FID, *SourceMgr);
+ if (Range.isInvalid())
+ return std::nullopt;
+ return getTokensCovering(File.SpelledTokens, Range, *SourceMgr);
}
+ // Otherwise, use the fast version based on Mappings.
// Do not allow changes that doesn't cover full expansion.
- unsigned BeginExpanded = Expanded.begin() - ExpandedTokens.data();
- unsigned EndExpanded = Expanded.end() - ExpandedTokens.data();
- if (BeginMapping && BeginExpanded != BeginMapping->BeginExpanded)
- return llvm::None;
- if (LastMapping && LastMapping->EndExpanded != EndExpanded)
- return llvm::None;
- // All is good, return the result.
- return llvm::makeArrayRef(
- BeginMapping ? File.SpelledTokens.data() + BeginMapping->BeginSpelled
- : BeginSpelled,
+ unsigned FirstExpanded = Expanded.begin() - ExpandedTokens.data();
+ unsigned LastExpanded = Expanded.end() - ExpandedTokens.data();
+ if (FirstMapping && FirstExpanded != FirstMapping->BeginExpanded)
+ return std::nullopt;
+ if (LastMapping && LastMapping->EndExpanded != LastExpanded)
+ return std::nullopt;
+ return llvm::ArrayRef(
+ FirstMapping ? File.SpelledTokens.data() + FirstMapping->BeginSpelled
+ : FirstSpelled,
LastMapping ? File.SpelledTokens.data() + LastMapping->EndSpelled
: LastSpelled + 1);
}
@@ -415,10 +460,10 @@ TokenBuffer::spelledForExpanded(llvm::ArrayRef<syntax::Token> Expanded) const {
TokenBuffer::Expansion TokenBuffer::makeExpansion(const MarkedFile &F,
const Mapping &M) const {
Expansion E;
- E.Spelled = llvm::makeArrayRef(F.SpelledTokens.data() + M.BeginSpelled,
- F.SpelledTokens.data() + M.EndSpelled);
- E.Expanded = llvm::makeArrayRef(ExpandedTokens.data() + M.BeginExpanded,
- ExpandedTokens.data() + M.EndExpanded);
+ E.Spelled = llvm::ArrayRef(F.SpelledTokens.data() + M.BeginSpelled,
+ F.SpelledTokens.data() + M.EndSpelled);
+ E.Expanded = llvm::ArrayRef(ExpandedTokens.data() + M.BeginExpanded,
+ ExpandedTokens.data() + M.EndExpanded);
return E;
}
@@ -441,7 +486,7 @@ TokenBuffer::fileForSpelled(llvm::ArrayRef<syntax::Token> Spelled) const {
return File;
}
-llvm::Optional<TokenBuffer::Expansion>
+std::optional<TokenBuffer::Expansion>
TokenBuffer::expansionStartingAt(const syntax::Token *Spelled) const {
assert(Spelled);
const auto &File = fileForSpelled(*Spelled);
@@ -451,7 +496,7 @@ TokenBuffer::expansionStartingAt(const syntax::Token *Spelled) const {
return M.BeginSpelled < SpelledIndex;
});
if (M == File.Mappings.end() || M->BeginSpelled != SpelledIndex)
- return llvm::None;
+ return std::nullopt;
return makeExpansion(File, *M);
}
@@ -483,8 +528,8 @@ syntax::spelledTokensTouching(SourceLocation Loc,
bool AcceptRight = Right != Tokens.end() && Right->location() <= Loc;
bool AcceptLeft =
Right != Tokens.begin() && (Right - 1)->endLocation() >= Loc;
- return llvm::makeArrayRef(Right - (AcceptLeft ? 1 : 0),
- Right + (AcceptRight ? 1 : 0));
+ return llvm::ArrayRef(Right - (AcceptLeft ? 1 : 0),
+ Right + (AcceptRight ? 1 : 0));
}
llvm::ArrayRef<syntax::Token>
@@ -714,7 +759,7 @@ private:
// In the simplest case, skips spelled tokens until finding one that produced
// the NextExpanded token, and creates an empty mapping for them.
// If Drain is provided, skips remaining tokens from that file instead.
- void discard(llvm::Optional<FileID> Drain = llvm::None) {
+ void discard(std::optional<FileID> Drain = std::nullopt) {
SourceLocation Target =
Drain ? SM.getLocForEndOfFile(*Drain)
: SM.getExpansionLoc(
@@ -751,7 +796,7 @@ private:
SpelledTokens[NextSpelled].location() <= KnownEnd)
++NextSpelled;
FlushMapping(); // Emits [NextSpelled, KnownEnd]
- // Now the loop contitues and will emit (KnownEnd, Target).
+ // Now the loop continues and will emit (KnownEnd, Target).
} else {
++NextSpelled;
}
@@ -891,21 +936,21 @@ std::string TokenBuffer::dumpForTests() const {
OS << "expanded tokens:\n"
<< " ";
// (!) we do not show '<eof>'.
- DumpTokens(OS, llvm::makeArrayRef(ExpandedTokens).drop_back());
+ DumpTokens(OS, llvm::ArrayRef(ExpandedTokens).drop_back());
OS << "\n";
std::vector<FileID> Keys;
- for (auto F : Files)
+ for (const auto &F : Files)
Keys.push_back(F.first);
llvm::sort(Keys);
for (FileID ID : Keys) {
const MarkedFile &File = Files.find(ID)->second;
- auto *Entry = SourceMgr->getFileEntryForID(ID);
+ auto Entry = SourceMgr->getFileEntryRefForID(ID);
if (!Entry)
continue; // Skip builtin files.
- OS << llvm::formatv("file '{0}'\n", Entry->getName())
- << " spelled tokens:\n"
+ std::string Path = llvm::sys::path::convert_to_slash(Entry->getName());
+ OS << llvm::formatv("file '{0}'\n", Path) << " spelled tokens:\n"
<< " ";
DumpTokens(OS, File.SpelledTokens);
OS << "\n";
@@ -927,5 +972,5 @@ std::string TokenBuffer::dumpForTests() const {
M.EndExpanded);
}
}
- return OS.str();
+ return Dump;
}
diff --git a/contrib/llvm-project/clang/lib/Tooling/Syntax/Tree.cpp b/contrib/llvm-project/clang/lib/Tooling/Syntax/Tree.cpp
index 07ee13e313f5..20f7bd087aa0 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Syntax/Tree.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Syntax/Tree.cpp
@@ -8,8 +8,8 @@
#include "clang/Tooling/Syntax/Tree.h"
#include "clang/Basic/TokenKinds.h"
#include "clang/Tooling/Syntax/Nodes.h"
-#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/Casting.h"
#include <cassert>
@@ -32,25 +32,7 @@ static void traverse(syntax::Node *N,
}
} // namespace
-syntax::Arena::Arena(SourceManager &SourceMgr, const LangOptions &LangOpts,
- const TokenBuffer &Tokens)
- : SourceMgr(SourceMgr), LangOpts(LangOpts), Tokens(Tokens) {}
-
-const syntax::TokenBuffer &syntax::Arena::getTokenBuffer() const {
- return Tokens;
-}
-
-std::pair<FileID, ArrayRef<syntax::Token>>
-syntax::Arena::lexBuffer(std::unique_ptr<llvm::MemoryBuffer> Input) {
- auto FID = SourceMgr.createFileID(std::move(Input));
- auto It = ExtraTokens.try_emplace(FID, tokenize(FID, SourceMgr, LangOpts));
- assert(It.second && "duplicate FileID");
- return {FID, It.first->second};
-}
-
-syntax::Leaf::Leaf(const syntax::Token *Tok) : Node(NodeKind::Leaf), Tok(Tok) {
- assert(Tok != nullptr);
-}
+syntax::Leaf::Leaf(syntax::TokenManager::Key K) : Node(NodeKind::Leaf), K(K) {}
syntax::Node::Node(NodeKind Kind)
: Parent(nullptr), NextSibling(nullptr), PreviousSibling(nullptr),
@@ -126,7 +108,7 @@ void syntax::Tree::replaceChildRangeLowLevel(Node *Begin, Node *End,
for (auto *N = New; N; N = N->NextSibling) {
assert(N->Parent == nullptr);
assert(N->getRole() != NodeRole::Detached && "Roles must be set");
- // FIXME: sanity-check the role.
+ // FIXME: validate the role.
}
auto Reachable = [](Node *From, Node *N) {
@@ -189,20 +171,8 @@ void syntax::Tree::replaceChildRangeLowLevel(Node *Begin, Node *End,
}
namespace {
-static void dumpLeaf(raw_ostream &OS, const syntax::Leaf *L,
- const SourceManager &SM) {
- assert(L);
- const auto *Token = L->getToken();
- assert(Token);
- // Handle 'eof' separately, calling text() on it produces an empty string.
- if (Token->kind() == tok::eof)
- OS << "<eof>";
- else
- OS << Token->text(SM);
-}
-
static void dumpNode(raw_ostream &OS, const syntax::Node *N,
- const SourceManager &SM, std::vector<bool> IndentMask) {
+ const syntax::TokenManager &TM, llvm::BitVector IndentMask) {
auto DumpExtraInfo = [&OS](const syntax::Node *N) {
if (N->getRole() != syntax::NodeRole::Unknown)
OS << " " << N->getRole();
@@ -215,7 +185,7 @@ static void dumpNode(raw_ostream &OS, const syntax::Node *N,
assert(N);
if (const auto *L = dyn_cast<syntax::Leaf>(N)) {
OS << "'";
- dumpLeaf(OS, L, SM);
+ OS << TM.getText(L->getTokenKey());
OS << "'";
DumpExtraInfo(N);
OS << "\n";
@@ -228,8 +198,8 @@ static void dumpNode(raw_ostream &OS, const syntax::Node *N,
OS << "\n";
for (const syntax::Node &It : T->getChildren()) {
- for (bool Filled : IndentMask) {
- if (Filled)
+ for (unsigned Idx = 0; Idx < IndentMask.size(); ++Idx) {
+ if (IndentMask[Idx])
OS << "| ";
else
OS << " ";
@@ -241,29 +211,29 @@ static void dumpNode(raw_ostream &OS, const syntax::Node *N,
OS << "|-";
IndentMask.push_back(true);
}
- dumpNode(OS, &It, SM, IndentMask);
+ dumpNode(OS, &It, TM, IndentMask);
IndentMask.pop_back();
}
}
} // namespace
-std::string syntax::Node::dump(const SourceManager &SM) const {
+std::string syntax::Node::dump(const TokenManager &TM) const {
std::string Str;
llvm::raw_string_ostream OS(Str);
- dumpNode(OS, this, SM, /*IndentMask=*/{});
+ dumpNode(OS, this, TM, /*IndentMask=*/{});
return std::move(OS.str());
}
-std::string syntax::Node::dumpTokens(const SourceManager &SM) const {
+std::string syntax::Node::dumpTokens(const TokenManager &TM) const {
std::string Storage;
llvm::raw_string_ostream OS(Storage);
traverse(this, [&](const syntax::Node *N) {
if (const auto *L = dyn_cast<syntax::Leaf>(N)) {
- dumpLeaf(OS, L, SM);
+ OS << TM.getText(L->getTokenKey());
OS << " ";
}
});
- return OS.str();
+ return Storage;
}
void syntax::Node::assertInvariants() const {
@@ -296,7 +266,8 @@ void syntax::Node::assertInvariants() const {
C.getRole() == NodeRole::ListDelimiter);
if (C.getRole() == NodeRole::ListDelimiter) {
assert(isa<Leaf>(C));
- assert(cast<Leaf>(C).getToken()->kind() == L->getDelimiterTokenKind());
+ // FIXME: re-enable it when there is way to retrieve token kind in Leaf.
+ // assert(cast<Leaf>(C).getToken()->kind() == L->getDelimiterTokenKind());
}
}
diff --git a/contrib/llvm-project/clang/lib/Tooling/Tooling.cpp b/contrib/llvm-project/clang/lib/Tooling/Tooling.cpp
index 5242134097da..c5c3cdb47e92 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Tooling.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Tooling.cpp
@@ -43,14 +43,15 @@
#include "llvm/Option/OptTable.h"
#include "llvm/Option/Option.h"
#include "llvm/Support/Casting.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
-#include "llvm/Support/Host.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/VirtualFileSystem.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/Host.h"
#include <cassert>
#include <cstring>
#include <memory>
@@ -98,7 +99,7 @@ static bool ignoreExtraCC1Commands(const driver::Compilation *Compilation) {
OffloadCompilation = true;
if (Jobs.size() > 1) {
- for (auto A : Actions){
+ for (auto *A : Actions){
// On MacOSX real actions may end up being wrapped in BindArchAction
if (isa<driver::BindArchAction>(A))
A = *A->input_begin();
@@ -146,6 +147,13 @@ getCC1Arguments(DiagnosticsEngine *Diagnostics,
if (IsCC1Command(Job) && llvm::all_of(Job.getInputInfos(), IsSrcFile))
CC1Jobs.push_back(&Job);
+ // If there are no jobs for source files, try checking again for a single job
+ // with any file type. This accepts a preprocessed file as input.
+ if (CC1Jobs.empty())
+ for (const driver::Command &Job : Jobs)
+ if (IsCC1Command(Job))
+ CC1Jobs.push_back(&Job);
+
if (CC1Jobs.empty() ||
(CC1Jobs.size() > 1 && !ignoreExtraCC1Commands(Compilation))) {
SmallString<256> error_msg;
@@ -161,7 +169,7 @@ getCC1Arguments(DiagnosticsEngine *Diagnostics,
/// Returns a clang build invocation initialized from the CC1 flags.
CompilerInvocation *newInvocation(DiagnosticsEngine *Diagnostics,
- const llvm::opt::ArgStringList &CC1Args,
+ ArrayRef<const char *> CC1Args,
const char *const BinaryName) {
assert(!CC1Args.empty() && "Must at least contain the program name!");
CompilerInvocation *Invocation = new CompilerInvocation;
@@ -247,15 +255,13 @@ llvm::Expected<std::string> getAbsolutePath(llvm::vfs::FileSystem &FS,
StringRef File) {
StringRef RelativePath(File);
// FIXME: Should '.\\' be accepted on Win32?
- if (RelativePath.startswith("./")) {
- RelativePath = RelativePath.substr(strlen("./"));
- }
+ RelativePath.consume_front("./");
SmallString<1024> AbsolutePath = RelativePath;
if (auto EC = FS.makeAbsolute(AbsolutePath))
return llvm::errorCodeToError(EC);
llvm::sys::path::native(AbsolutePath);
- return std::string(AbsolutePath.str());
+ return std::string(AbsolutePath);
}
std::string getAbsolutePath(StringRef File) {
@@ -268,14 +274,14 @@ void addTargetAndModeForProgramName(std::vector<std::string> &CommandLine,
return;
const auto &Table = driver::getDriverOptTable();
// --target=X
- const std::string TargetOPT =
+ StringRef TargetOPT =
Table.getOption(driver::options::OPT_target).getPrefixedName();
// -target X
- const std::string TargetOPTLegacy =
+ StringRef TargetOPTLegacy =
Table.getOption(driver::options::OPT_target_legacy_spelling)
.getPrefixedName();
// --driver-mode=X
- const std::string DriverModeOPT =
+ StringRef DriverModeOPT =
Table.getOption(driver::options::OPT_driver_mode).getPrefixedName();
auto TargetMode =
driver::ToolChain::getTargetAndModeFromProgramName(InvokedAs);
@@ -286,17 +292,42 @@ void addTargetAndModeForProgramName(std::vector<std::string> &CommandLine,
for (auto Token = ++CommandLine.begin(); Token != CommandLine.end();
++Token) {
StringRef TokenRef(*Token);
- ShouldAddTarget = ShouldAddTarget && !TokenRef.startswith(TargetOPT) &&
+ ShouldAddTarget = ShouldAddTarget && !TokenRef.starts_with(TargetOPT) &&
!TokenRef.equals(TargetOPTLegacy);
- ShouldAddMode = ShouldAddMode && !TokenRef.startswith(DriverModeOPT);
+ ShouldAddMode = ShouldAddMode && !TokenRef.starts_with(DriverModeOPT);
}
if (ShouldAddMode) {
CommandLine.insert(++CommandLine.begin(), TargetMode.DriverMode);
}
if (ShouldAddTarget) {
CommandLine.insert(++CommandLine.begin(),
- TargetOPT + TargetMode.TargetPrefix);
+ (TargetOPT + TargetMode.TargetPrefix).str());
+ }
+}
+
+void addExpandedResponseFiles(std::vector<std::string> &CommandLine,
+ llvm::StringRef WorkingDir,
+ llvm::cl::TokenizerCallback Tokenizer,
+ llvm::vfs::FileSystem &FS) {
+ bool SeenRSPFile = false;
+ llvm::SmallVector<const char *, 20> Argv;
+ Argv.reserve(CommandLine.size());
+ for (auto &Arg : CommandLine) {
+ Argv.push_back(Arg.c_str());
+ if (!Arg.empty())
+ SeenRSPFile |= Arg.front() == '@';
}
+ if (!SeenRSPFile)
+ return;
+ llvm::BumpPtrAllocator Alloc;
+ llvm::cl::ExpansionContext ECtx(Alloc, Tokenizer);
+ llvm::Error Err =
+ ECtx.setVFS(&FS).setCurrentDir(WorkingDir).expandResponseFiles(Argv);
+ if (Err)
+ llvm::errs() << Err;
+ // Don't assign directly, Argv aliases CommandLine.
+ std::vector<std::string> ExpandedArgv(Argv.begin(), Argv.end());
+ CommandLine = std::move(ExpandedArgv);
}
} // namespace tooling
@@ -339,27 +370,42 @@ ToolInvocation::~ToolInvocation() {
}
bool ToolInvocation::run() {
- std::vector<const char*> Argv;
+ llvm::opt::ArgStringList Argv;
for (const std::string &Str : CommandLine)
Argv.push_back(Str.c_str());
const char *const BinaryName = Argv[0];
- IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts = new DiagnosticOptions();
- unsigned MissingArgIndex, MissingArgCount;
- llvm::opt::InputArgList ParsedArgs = driver::getDriverOptTable().ParseArgs(
- ArrayRef<const char *>(Argv).slice(1), MissingArgIndex, MissingArgCount);
- ParseDiagnosticArgs(*DiagOpts, ParsedArgs);
- TextDiagnosticPrinter DiagnosticPrinter(
- llvm::errs(), &*DiagOpts);
- DiagnosticsEngine Diagnostics(
- IntrusiveRefCntPtr<DiagnosticIDs>(new DiagnosticIDs()), &*DiagOpts,
- DiagConsumer ? DiagConsumer : &DiagnosticPrinter, false);
+
+ // Parse diagnostic options from the driver command-line only if none were
+ // explicitly set.
+ IntrusiveRefCntPtr<DiagnosticOptions> ParsedDiagOpts;
+ DiagnosticOptions *DiagOpts = this->DiagOpts;
+ if (!DiagOpts) {
+ ParsedDiagOpts = CreateAndPopulateDiagOpts(Argv);
+ DiagOpts = &*ParsedDiagOpts;
+ }
+
+ TextDiagnosticPrinter DiagnosticPrinter(llvm::errs(), DiagOpts);
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diagnostics =
+ CompilerInstance::createDiagnostics(
+ &*DiagOpts, DiagConsumer ? DiagConsumer : &DiagnosticPrinter, false);
// Although `Diagnostics` are used only for command-line parsing, the custom
// `DiagConsumer` might expect a `SourceManager` to be present.
- SourceManager SrcMgr(Diagnostics, *Files);
- Diagnostics.setSourceManager(&SrcMgr);
+ SourceManager SrcMgr(*Diagnostics, *Files);
+ Diagnostics->setSourceManager(&SrcMgr);
+
+ // We already have a cc1, just create an invocation.
+ if (CommandLine.size() >= 2 && CommandLine[1] == "-cc1") {
+ ArrayRef<const char *> CC1Args = ArrayRef(Argv).drop_front();
+ std::unique_ptr<CompilerInvocation> Invocation(
+ newInvocation(&*Diagnostics, CC1Args, BinaryName));
+ if (Diagnostics->hasErrorOccurred())
+ return false;
+ return Action->runInvocation(std::move(Invocation), Files,
+ std::move(PCHContainerOps), DiagConsumer);
+ }
const std::unique_ptr<driver::Driver> Driver(
- newDriver(&Diagnostics, BinaryName, &Files->getVirtualFileSystem()));
+ newDriver(&*Diagnostics, BinaryName, &Files->getVirtualFileSystem()));
// The "input file not found" diagnostics from the driver are useful.
// The driver is only aware of the VFS working directory, but some clients
// change this at the FileManager level instead.
@@ -367,15 +413,15 @@ bool ToolInvocation::run() {
if (!Files->getFileSystemOpts().WorkingDir.empty())
Driver->setCheckInputsExist(false);
const std::unique_ptr<driver::Compilation> Compilation(
- Driver->BuildCompilation(llvm::makeArrayRef(Argv)));
+ Driver->BuildCompilation(llvm::ArrayRef(Argv)));
if (!Compilation)
return false;
const llvm::opt::ArgStringList *const CC1Args = getCC1Arguments(
- &Diagnostics, Compilation.get());
+ &*Diagnostics, Compilation.get());
if (!CC1Args)
return false;
std::unique_ptr<CompilerInvocation> Invocation(
- newInvocation(&Diagnostics, *CC1Args, BinaryName));
+ newInvocation(&*Diagnostics, *CC1Args, BinaryName));
return runInvocation(BinaryName, Compilation.get(), std::move(Invocation),
std::move(PCHContainerOps));
}
@@ -459,7 +505,7 @@ static void injectResourceDir(CommandLineArguments &Args, const char *Argv0,
void *MainAddr) {
// Allow users to override the resource dir.
for (StringRef Arg : Args)
- if (Arg.startswith("-resource-dir"))
+ if (Arg.starts_with("-resource-dir"))
return;
// If there's no override in place add our resource dir.
@@ -501,15 +547,15 @@ int ClangTool::run(ToolAction *Action) {
// Remember the working directory in case we need to restore it.
std::string InitialWorkingDir;
- if (RestoreCWD) {
- if (auto CWD = OverlayFileSystem->getCurrentWorkingDirectory()) {
- InitialWorkingDir = std::move(*CWD);
- } else {
- llvm::errs() << "Could not get working directory: "
- << CWD.getError().message() << "\n";
- }
+ if (auto CWD = OverlayFileSystem->getCurrentWorkingDirectory()) {
+ InitialWorkingDir = std::move(*CWD);
+ } else {
+ llvm::errs() << "Could not get working directory: "
+ << CWD.getError().message() << "\n";
}
+ size_t NumOfTotalFiles = AbsolutePaths.size();
+ unsigned ProcessedFileCounter = 0;
for (llvm::StringRef File : AbsolutePaths) {
// Currently implementations of CompilationDatabase::getCompileCommands can
// change the state of the file system (e.g. prepare generated headers), so
@@ -565,7 +611,11 @@ int ClangTool::run(ToolAction *Action) {
// FIXME: We need a callback mechanism for the tool writer to output a
// customized message for each file.
- LLVM_DEBUG({ llvm::dbgs() << "Processing: " << File << ".\n"; });
+ if (NumOfTotalFiles > 1)
+ llvm::errs() << "[" + std::to_string(++ProcessedFileCounter) + "/" +
+ std::to_string(NumOfTotalFiles) +
+ "] Processing file " + File
+ << ".\n";
ToolInvocation Invocation(std::move(CommandLine), Action, Files.get(),
PCHContainerOps);
Invocation.setDiagnosticConsumer(DiagConsumer);
@@ -621,10 +671,6 @@ int ClangTool::buildASTs(std::vector<std::unique_ptr<ASTUnit>> &ASTs) {
return run(&Action);
}
-void ClangTool::setRestoreWorkingDir(bool RestoreCWD) {
- this->RestoreCWD = RestoreCWD;
-}
-
void ClangTool::setPrintErrorMessage(bool PrintErrorMessage) {
this->PrintErrorMessage = PrintErrorMessage;
}
@@ -669,7 +715,7 @@ std::unique_ptr<ASTUnit> buildASTFromCodeWithArgs(
if (!Invocation.run())
return nullptr;
-
+
assert(ASTs.size() == 1);
return std::move(ASTs[0]);
}
diff --git a/contrib/llvm-project/clang/lib/Tooling/Transformer/Parsing.cpp b/contrib/llvm-project/clang/lib/Tooling/Transformer/Parsing.cpp
index 66fa04a15594..53a78e8df22a 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Transformer/Parsing.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Transformer/Parsing.cpp
@@ -14,11 +14,11 @@
#include "clang/Lex/Lexer.h"
#include "clang/Tooling/Transformer/RangeSelector.h"
#include "clang/Tooling/Transformer/SourceCode.h"
-#include "llvm/ADT/None.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Errc.h"
#include "llvm/Support/Error.h"
+#include <optional>
#include <string>
#include <utility>
#include <vector>
@@ -33,7 +33,6 @@ using namespace transformer;
// much as possible with the AST Matchers parsing.
namespace {
-using llvm::Error;
using llvm::Expected;
template <typename... Ts> using RangeSelectorOp = RangeSelector (*)(Ts...);
@@ -47,7 +46,7 @@ struct ParseState {
};
// Represents an intermediate result returned by a parsing function. Functions
-// that don't generate values should use `llvm::None`
+// that don't generate values should use `std::nullopt`
template <typename ResultType> struct ParseProgress {
ParseState State;
// Intermediate result generated by the Parser.
@@ -121,11 +120,11 @@ getBinaryRangeSelectors() {
}
template <typename Element>
-llvm::Optional<Element> findOptional(const llvm::StringMap<Element> &Map,
- llvm::StringRef Key) {
+std::optional<Element> findOptional(const llvm::StringMap<Element> &Map,
+ llvm::StringRef Key) {
auto it = Map.find(Key);
if (it == Map.end())
- return llvm::None;
+ return std::nullopt;
return it->second;
}
@@ -153,19 +152,19 @@ static StringRef consumeWhitespace(StringRef S) {
// Parses a single expected character \c c from \c State, skipping preceding
// whitespace. Error if the expected character isn't found.
-static ExpectedProgress<llvm::NoneType> parseChar(char c, ParseState State) {
+static ExpectedProgress<std::nullopt_t> parseChar(char c, ParseState State) {
State.Input = consumeWhitespace(State.Input);
if (State.Input.empty() || State.Input.front() != c)
return makeParseError(State,
("expected char not found: " + llvm::Twine(c)).str());
- return makeParseProgress(advance(State, 1), llvm::None);
+ return makeParseProgress(advance(State, 1), std::nullopt);
}
// Parses an identitifer "token" -- handles preceding whitespace.
static ExpectedProgress<std::string> parseId(ParseState State) {
State.Input = consumeWhitespace(State.Input);
auto Id = State.Input.take_while(
- [](char c) { return isASCII(c) && isIdentifierBody(c); });
+ [](char c) { return isASCII(c) && isAsciiIdentifierContinue(c); });
if (Id.empty())
return makeParseError(State, "failed to parse name");
return makeParseProgress(advance(State, Id.size()), Id.str());
diff --git a/contrib/llvm-project/clang/lib/Tooling/Transformer/RangeSelector.cpp b/contrib/llvm-project/clang/lib/Tooling/Transformer/RangeSelector.cpp
index 753e89e0e1f3..7370baf01083 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Transformer/RangeSelector.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Transformer/RangeSelector.cpp
@@ -232,9 +232,11 @@ RangeSelector transformer::name(std::string ID) {
if (const auto *T = Node.get<TypeLoc>()) {
TypeLoc Loc = *T;
auto ET = Loc.getAs<ElaboratedTypeLoc>();
- if (!ET.isNull()) {
+ if (!ET.isNull())
Loc = ET.getNamedTypeLoc();
- }
+ if (auto SpecLoc = Loc.getAs<TemplateSpecializationTypeLoc>();
+ !SpecLoc.isNull())
+ return CharSourceRange::getTokenRange(SpecLoc.getTemplateNameLoc());
return CharSourceRange::getTokenRange(Loc.getSourceRange());
}
return typeError(ID, Node.getNodeKind(),
diff --git a/contrib/llvm-project/clang/lib/Tooling/Transformer/RewriteRule.cpp b/contrib/llvm-project/clang/lib/Tooling/Transformer/RewriteRule.cpp
index 93bd7e91dba7..eefddc349404 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Transformer/RewriteRule.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Transformer/RewriteRule.cpp
@@ -13,7 +13,6 @@
#include "clang/ASTMatchers/ASTMatchers.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Tooling/Transformer/SourceCode.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Errc.h"
#include "llvm/Support/Error.h"
@@ -39,8 +38,8 @@ translateEdits(const MatchResult &Result, ArrayRef<ASTEdit> ASTEdits) {
Expected<CharSourceRange> Range = E.TargetRange(Result);
if (!Range)
return Range.takeError();
- llvm::Optional<CharSourceRange> EditRange =
- tooling::getRangeForEdit(*Range, *Result.Context);
+ std::optional<CharSourceRange> EditRange =
+ tooling::getFileRangeForEdit(*Range, *Result.Context);
// FIXME: let user specify whether to treat this case as an error or ignore
// it as is currently done. This behavior is problematic in that it hides
// failures from bad ranges. Also, the behavior here differs from
@@ -50,17 +49,27 @@ translateEdits(const MatchResult &Result, ArrayRef<ASTEdit> ASTEdits) {
// produces a bad range, whereas the latter will simply ignore A.
if (!EditRange)
return SmallVector<Edit, 0>();
- auto Replacement = E.Replacement->eval(Result);
- if (!Replacement)
- return Replacement.takeError();
- auto Metadata = E.Metadata(Result);
- if (!Metadata)
- return Metadata.takeError();
transformer::Edit T;
T.Kind = E.Kind;
T.Range = *EditRange;
- T.Replacement = std::move(*Replacement);
- T.Metadata = std::move(*Metadata);
+ if (E.Replacement) {
+ auto Replacement = E.Replacement->eval(Result);
+ if (!Replacement)
+ return Replacement.takeError();
+ T.Replacement = std::move(*Replacement);
+ }
+ if (E.Note) {
+ auto Note = E.Note->eval(Result);
+ if (!Note)
+ return Note.takeError();
+ T.Note = std::move(*Note);
+ }
+ if (E.Metadata) {
+ auto Metadata = E.Metadata(Result);
+ if (!Metadata)
+ return Metadata.takeError();
+ T.Metadata = std::move(*Metadata);
+ }
Edits.push_back(std::move(T));
}
return Edits;
@@ -121,6 +130,13 @@ ASTEdit transformer::changeTo(RangeSelector Target, TextGenerator Replacement) {
return E;
}
+ASTEdit transformer::note(RangeSelector Anchor, TextGenerator Note) {
+ ASTEdit E;
+ E.TargetRange = transformer::before(Anchor);
+ E.Note = std::move(Note);
+ return E;
+}
+
namespace {
/// A \c TextGenerator that always returns a fixed string.
class SimpleTextGenerator : public MatchComputation<std::string> {
@@ -166,10 +182,26 @@ ASTEdit transformer::addInclude(RangeSelector Target, StringRef Header,
return E;
}
-RewriteRule transformer::makeRule(DynTypedMatcher M, EditGenerator Edits,
- TextGenerator Explanation) {
- return RewriteRule{{RewriteRule::Case{std::move(M), std::move(Edits),
- std::move(Explanation)}}};
+EditGenerator
+transformer::detail::makeEditGenerator(llvm::SmallVector<ASTEdit, 1> Edits) {
+ return editList(std::move(Edits));
+}
+
+EditGenerator transformer::detail::makeEditGenerator(ASTEdit Edit) {
+ return edit(std::move(Edit));
+}
+
+RewriteRule transformer::detail::makeRule(DynTypedMatcher M,
+ EditGenerator Edits) {
+ RewriteRule R;
+ R.Cases = {{std::move(M), std::move(Edits)}};
+ return R;
+}
+
+RewriteRule transformer::makeRule(ast_matchers::internal::DynTypedMatcher M,
+ std::initializer_list<ASTEdit> Edits) {
+ return detail::makeRule(std::move(M),
+ detail::makeEditGenerator(std::move(Edits)));
}
namespace {
@@ -247,9 +279,8 @@ public:
void run(const MatchFinder::MatchResult &Result) override {
if (!Edits)
return;
- transformer::RewriteRule::Case Case =
- transformer::detail::findSelectedCase(Result, Rule);
- auto Transformations = Case.Edits(Result);
+ size_t I = transformer::detail::findSelectedCase(Result, Rule);
+ auto Transformations = Rule.Cases[I].Edits(Result);
if (!Transformations) {
Edits = Transformations.takeError();
return;
@@ -325,7 +356,7 @@ EditGenerator transformer::rewriteDescendants(std::string NodeId,
};
}
-void transformer::addInclude(RewriteRule &Rule, StringRef Header,
+void transformer::addInclude(RewriteRuleBase &Rule, StringRef Header,
IncludeFormat Format) {
for (auto &Case : Rule.Cases)
Case.Edits = flatten(std::move(Case.Edits), addInclude(Header, Format));
@@ -366,7 +397,9 @@ static std::vector<DynTypedMatcher> taggedMatchers(
// Simply gathers the contents of the various rules into a single rule. The
// actual work to combine these into an ordered choice is deferred to matcher
// registration.
-RewriteRule transformer::applyFirst(ArrayRef<RewriteRule> Rules) {
+template <>
+RewriteRuleWith<void>
+transformer::applyFirst(ArrayRef<RewriteRuleWith<void>> Rules) {
RewriteRule R;
for (auto &Rule : Rules)
R.Cases.append(Rule.Cases.begin(), Rule.Cases.end());
@@ -374,12 +407,13 @@ RewriteRule transformer::applyFirst(ArrayRef<RewriteRule> Rules) {
}
std::vector<DynTypedMatcher>
-transformer::detail::buildMatchers(const RewriteRule &Rule) {
+transformer::detail::buildMatchers(const RewriteRuleBase &Rule) {
// Map the cases into buckets of matchers -- one for each "root" AST kind,
// which guarantees that they can be combined in a single anyOf matcher. Each
// case is paired with an identifying number that is converted to a string id
// in `taggedMatchers`.
- std::map<ASTNodeKind, SmallVector<std::pair<size_t, RewriteRule::Case>, 1>>
+ std::map<ASTNodeKind,
+ SmallVector<std::pair<size_t, RewriteRuleBase::Case>, 1>>
Buckets;
const SmallVectorImpl<RewriteRule::Case> &Cases = Rule.Cases;
for (int I = 0, N = Cases.size(); I < N; ++I) {
@@ -405,7 +439,7 @@ transformer::detail::buildMatchers(const RewriteRule &Rule) {
return Matchers;
}
-DynTypedMatcher transformer::detail::buildMatcher(const RewriteRule &Rule) {
+DynTypedMatcher transformer::detail::buildMatcher(const RewriteRuleBase &Rule) {
std::vector<DynTypedMatcher> Ms = buildMatchers(Rule);
assert(Ms.size() == 1 && "Cases must have compatible matchers.");
return Ms[0];
@@ -415,7 +449,7 @@ SourceLocation transformer::detail::getRuleMatchLoc(const MatchResult &Result) {
auto &NodesMap = Result.Nodes.getMap();
auto Root = NodesMap.find(RootID);
assert(Root != NodesMap.end() && "Transformation failed: missing root node.");
- llvm::Optional<CharSourceRange> RootRange = tooling::getRangeForEdit(
+ std::optional<CharSourceRange> RootRange = tooling::getFileRangeForEdit(
CharSourceRange::getTokenRange(Root->second.getSourceRange()),
*Result.Context);
if (RootRange)
@@ -428,19 +462,16 @@ SourceLocation transformer::detail::getRuleMatchLoc(const MatchResult &Result) {
// Finds the case that was "selected" -- that is, whose matcher triggered the
// `MatchResult`.
-const RewriteRule::Case &
-transformer::detail::findSelectedCase(const MatchResult &Result,
- const RewriteRule &Rule) {
+size_t transformer::detail::findSelectedCase(const MatchResult &Result,
+ const RewriteRuleBase &Rule) {
if (Rule.Cases.size() == 1)
- return Rule.Cases[0];
+ return 0;
auto &NodesMap = Result.Nodes.getMap();
for (size_t i = 0, N = Rule.Cases.size(); i < N; ++i) {
std::string Tag = ("Tag" + Twine(i)).str();
if (NodesMap.find(Tag) != NodesMap.end())
- return Rule.Cases[i];
+ return i;
}
llvm_unreachable("No tag found for this rule.");
}
-
-const llvm::StringRef RewriteRule::RootID = ::clang::transformer::RootID;
diff --git a/contrib/llvm-project/clang/lib/Tooling/Transformer/SourceCode.cpp b/contrib/llvm-project/clang/lib/Tooling/Transformer/SourceCode.cpp
index 26b204851f05..6aae834b0db5 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Transformer/SourceCode.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Transformer/SourceCode.cpp
@@ -50,8 +50,9 @@ CharSourceRange clang::tooling::maybeExtendRange(CharSourceRange Range,
return CharSourceRange::getTokenRange(Range.getBegin(), Tok.getLocation());
}
-llvm::Error clang::tooling::validateEditRange(const CharSourceRange &Range,
- const SourceManager &SM) {
+llvm::Error clang::tooling::validateRange(const CharSourceRange &Range,
+ const SourceManager &SM,
+ bool AllowSystemHeaders) {
if (Range.isInvalid())
return llvm::make_error<StringError>(errc::invalid_argument,
"Invalid range");
@@ -60,10 +61,12 @@ llvm::Error clang::tooling::validateEditRange(const CharSourceRange &Range,
return llvm::make_error<StringError>(
errc::invalid_argument, "Range starts or ends in a macro expansion");
- if (SM.isInSystemHeader(Range.getBegin()) ||
- SM.isInSystemHeader(Range.getEnd()))
- return llvm::make_error<StringError>(errc::invalid_argument,
- "Range is in system header");
+ if (!AllowSystemHeaders) {
+ if (SM.isInSystemHeader(Range.getBegin()) ||
+ SM.isInSystemHeader(Range.getEnd()))
+ return llvm::make_error<StringError>(errc::invalid_argument,
+ "Range is in system header");
+ }
std::pair<FileID, unsigned> BeginInfo = SM.getDecomposedLoc(Range.getBegin());
std::pair<FileID, unsigned> EndInfo = SM.getDecomposedLoc(Range.getEnd());
@@ -72,33 +75,74 @@ llvm::Error clang::tooling::validateEditRange(const CharSourceRange &Range,
errc::invalid_argument, "Range begins and ends in different files");
if (BeginInfo.second > EndInfo.second)
- return llvm::make_error<StringError>(
- errc::invalid_argument, "Range's begin is past its end");
+ return llvm::make_error<StringError>(errc::invalid_argument,
+ "Range's begin is past its end");
return llvm::Error::success();
}
-llvm::Optional<CharSourceRange>
-clang::tooling::getRangeForEdit(const CharSourceRange &EditRange,
+llvm::Error clang::tooling::validateEditRange(const CharSourceRange &Range,
+ const SourceManager &SM) {
+ return validateRange(Range, SM, /*AllowSystemHeaders=*/false);
+}
+
+static bool spelledInMacroDefinition(SourceLocation Loc,
+ const SourceManager &SM) {
+ while (Loc.isMacroID()) {
+ const auto &Expansion = SM.getSLocEntry(SM.getFileID(Loc)).getExpansion();
+ if (Expansion.isMacroArgExpansion()) {
+ // Check the spelling location of the macro arg, in case the arg itself is
+ // in a macro expansion.
+ Loc = Expansion.getSpellingLoc();
+ } else {
+ return true;
+ }
+ }
+ return false;
+}
+
+static CharSourceRange getRange(const CharSourceRange &EditRange,
const SourceManager &SM,
- const LangOptions &LangOpts) {
- // FIXME: makeFileCharRange() has the disadvantage of stripping off "identity"
- // macros. For example, if we're looking to rewrite the int literal 3 to 6,
- // and we have the following definition:
- // #define DO_NOTHING(x) x
- // then
- // foo(DO_NOTHING(3))
- // will be rewritten to
- // foo(6)
- // rather than the arguably better
- // foo(DO_NOTHING(6))
- // Decide whether the current behavior is desirable and modify if not.
- CharSourceRange Range = Lexer::makeFileCharRange(EditRange, SM, LangOpts);
+ const LangOptions &LangOpts,
+ bool IncludeMacroExpansion) {
+ CharSourceRange Range;
+ if (IncludeMacroExpansion) {
+ Range = Lexer::makeFileCharRange(EditRange, SM, LangOpts);
+ } else {
+ if (spelledInMacroDefinition(EditRange.getBegin(), SM) ||
+ spelledInMacroDefinition(EditRange.getEnd(), SM))
+ return {};
+
+ auto B = SM.getSpellingLoc(EditRange.getBegin());
+ auto E = SM.getSpellingLoc(EditRange.getEnd());
+ if (EditRange.isTokenRange())
+ E = Lexer::getLocForEndOfToken(E, 0, SM, LangOpts);
+ Range = CharSourceRange::getCharRange(B, E);
+ }
+ return Range;
+}
+
+std::optional<CharSourceRange> clang::tooling::getFileRangeForEdit(
+ const CharSourceRange &EditRange, const SourceManager &SM,
+ const LangOptions &LangOpts, bool IncludeMacroExpansion) {
+ CharSourceRange Range =
+ getRange(EditRange, SM, LangOpts, IncludeMacroExpansion);
bool IsInvalid = llvm::errorToBool(validateEditRange(Range, SM));
if (IsInvalid)
- return llvm::None;
+ return std::nullopt;
return Range;
+}
+std::optional<CharSourceRange> clang::tooling::getFileRange(
+ const CharSourceRange &EditRange, const SourceManager &SM,
+ const LangOptions &LangOpts, bool IncludeMacroExpansion) {
+ CharSourceRange Range =
+ getRange(EditRange, SM, LangOpts, IncludeMacroExpansion);
+ bool IsInvalid =
+ llvm::errorToBool(validateRange(Range, SM, /*AllowSystemHeaders=*/true));
+ if (IsInvalid)
+ return std::nullopt;
+ return Range;
}
static bool startsWithNewline(const SourceManager &SM, const Token &Tok) {
@@ -381,7 +425,7 @@ CharSourceRange tooling::getAssociatedRange(const Decl &Decl,
for (llvm::StringRef Prefix : {"[[", "__attribute__(("}) {
// Handle whitespace between attribute prefix and attribute value.
- if (BeforeAttrStripped.endswith(Prefix)) {
+ if (BeforeAttrStripped.ends_with(Prefix)) {
// Move start to start position of prefix, which is
// length(BeforeAttr) - length(BeforeAttrStripped) + length(Prefix)
// positions to the left.
diff --git a/contrib/llvm-project/clang/lib/Tooling/Transformer/SourceCodeBuilders.cpp b/contrib/llvm-project/clang/lib/Tooling/Transformer/SourceCodeBuilders.cpp
index a1c99b60216b..10588a383da0 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Transformer/SourceCodeBuilders.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Transformer/SourceCodeBuilders.cpp
@@ -10,6 +10,8 @@
#include "clang/AST/ASTContext.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/ASTMatchers/ASTMatchFinder.h"
+#include "clang/ASTMatchers/ASTMatchers.h"
#include "clang/Tooling/Transformer/SourceCode.h"
#include "llvm/ADT/Twine.h"
#include <string>
@@ -60,17 +62,27 @@ bool tooling::needParensAfterUnaryOperator(const Expr &E) {
return false;
}
-llvm::Optional<std::string> tooling::buildParens(const Expr &E,
- const ASTContext &Context) {
+bool tooling::isKnownPointerLikeType(QualType Ty, ASTContext &Context) {
+ using namespace ast_matchers;
+ const auto PointerLikeTy = type(hasUnqualifiedDesugaredType(
+ recordType(hasDeclaration(cxxRecordDecl(hasAnyName(
+ "::std::unique_ptr", "::std::shared_ptr", "::std::weak_ptr",
+ "::std::optional", "::absl::optional", "::llvm::Optional",
+ "absl::StatusOr", "::llvm::Expected"))))));
+ return match(PointerLikeTy, Ty, Context).size() > 0;
+}
+
+std::optional<std::string> tooling::buildParens(const Expr &E,
+ const ASTContext &Context) {
StringRef Text = getText(E, Context);
if (Text.empty())
- return llvm::None;
+ return std::nullopt;
if (mayEverNeedParens(E))
return ("(" + Text + ")").str();
return Text.str();
}
-llvm::Optional<std::string>
+std::optional<std::string>
tooling::buildDereference(const Expr &E, const ASTContext &Context) {
if (const auto *Op = dyn_cast<UnaryOperator>(&E))
if (Op->getOpcode() == UO_AddrOf) {
@@ -78,21 +90,21 @@ tooling::buildDereference(const Expr &E, const ASTContext &Context) {
StringRef Text =
getText(*Op->getSubExpr()->IgnoreParenImpCasts(), Context);
if (Text.empty())
- return llvm::None;
+ return std::nullopt;
return Text.str();
}
StringRef Text = getText(E, Context);
if (Text.empty())
- return llvm::None;
+ return std::nullopt;
// Add leading '*'.
if (needParensAfterUnaryOperator(E))
return ("*(" + Text + ")").str();
return ("*" + Text).str();
}
-llvm::Optional<std::string> tooling::buildAddressOf(const Expr &E,
- const ASTContext &Context) {
+std::optional<std::string> tooling::buildAddressOf(const Expr &E,
+ const ASTContext &Context) {
if (E.isImplicitCXXThis())
return std::string("this");
if (const auto *Op = dyn_cast<UnaryOperator>(&E))
@@ -101,28 +113,30 @@ llvm::Optional<std::string> tooling::buildAddressOf(const Expr &E,
StringRef Text =
getText(*Op->getSubExpr()->IgnoreParenImpCasts(), Context);
if (Text.empty())
- return llvm::None;
+ return std::nullopt;
return Text.str();
}
// Add leading '&'.
StringRef Text = getText(E, Context);
if (Text.empty())
- return llvm::None;
+ return std::nullopt;
if (needParensAfterUnaryOperator(E)) {
return ("&(" + Text + ")").str();
}
return ("&" + Text).str();
}
-llvm::Optional<std::string> tooling::buildDot(const Expr &E,
- const ASTContext &Context) {
+// Append the appropriate access operation (syntactically) to `E`, assuming `E`
+// is a non-pointer value.
+static std::optional<std::string>
+buildAccessForValue(const Expr &E, const ASTContext &Context) {
if (const auto *Op = llvm::dyn_cast<UnaryOperator>(&E))
if (Op->getOpcode() == UO_Deref) {
// Strip leading '*', add following '->'.
const Expr *SubExpr = Op->getSubExpr()->IgnoreParenImpCasts();
StringRef DerefText = getText(*SubExpr, Context);
if (DerefText.empty())
- return llvm::None;
+ return std::nullopt;
if (needParensBeforeDotOrArrow(*SubExpr))
return ("(" + DerefText + ")->").str();
return (DerefText + "->").str();
@@ -131,22 +145,24 @@ llvm::Optional<std::string> tooling::buildDot(const Expr &E,
// Add following '.'.
StringRef Text = getText(E, Context);
if (Text.empty())
- return llvm::None;
+ return std::nullopt;
if (needParensBeforeDotOrArrow(E)) {
return ("(" + Text + ").").str();
}
return (Text + ".").str();
}
-llvm::Optional<std::string> tooling::buildArrow(const Expr &E,
- const ASTContext &Context) {
+// Append the appropriate access operation (syntactically) to `E`, assuming `E`
+// is a pointer value.
+static std::optional<std::string>
+buildAccessForPointer(const Expr &E, const ASTContext &Context) {
if (const auto *Op = llvm::dyn_cast<UnaryOperator>(&E))
if (Op->getOpcode() == UO_AddrOf) {
// Strip leading '&', add following '.'.
const Expr *SubExpr = Op->getSubExpr()->IgnoreParenImpCasts();
StringRef DerefText = getText(*SubExpr, Context);
if (DerefText.empty())
- return llvm::None;
+ return std::nullopt;
if (needParensBeforeDotOrArrow(*SubExpr))
return ("(" + DerefText + ").").str();
return (DerefText + ".").str();
@@ -155,8 +171,69 @@ llvm::Optional<std::string> tooling::buildArrow(const Expr &E,
// Add following '->'.
StringRef Text = getText(E, Context);
if (Text.empty())
- return llvm::None;
+ return std::nullopt;
if (needParensBeforeDotOrArrow(E))
return ("(" + Text + ")->").str();
return (Text + "->").str();
}
+
+std::optional<std::string> tooling::buildDot(const Expr &E,
+ const ASTContext &Context) {
+ return buildAccessForValue(E, Context);
+}
+
+std::optional<std::string> tooling::buildArrow(const Expr &E,
+ const ASTContext &Context) {
+ return buildAccessForPointer(E, Context);
+}
+
+// If `E` is an overloaded-operator call of kind `K` on an object `O`, returns
+// `O`. Otherwise, returns `nullptr`.
+static const Expr *maybeGetOperatorObjectArg(const Expr &E,
+ OverloadedOperatorKind K) {
+ if (const auto *OpCall = dyn_cast<clang::CXXOperatorCallExpr>(&E)) {
+ if (OpCall->getOperator() == K && OpCall->getNumArgs() == 1)
+ return OpCall->getArg(0);
+ }
+ return nullptr;
+}
+
+static bool treatLikePointer(QualType Ty, PLTClass C, ASTContext &Context) {
+ switch (C) {
+ case PLTClass::Value:
+ return false;
+ case PLTClass::Pointer:
+ return isKnownPointerLikeType(Ty, Context);
+ }
+ llvm_unreachable("Unknown PLTClass enum");
+}
+
+// FIXME: move over the other `maybe` functionality from Stencil. Should all be
+// in one place.
+std::optional<std::string> tooling::buildAccess(const Expr &RawExpression,
+ ASTContext &Context,
+ PLTClass Classification) {
+ if (RawExpression.isImplicitCXXThis())
+ // Return the empty string, because `std::nullopt` signifies some sort of
+ // failure.
+ return std::string();
+
+ const Expr *E = RawExpression.IgnoreImplicitAsWritten();
+
+ if (E->getType()->isAnyPointerType() ||
+ treatLikePointer(E->getType(), Classification, Context)) {
+ // Strip off operator-> calls. They can only occur inside an actual arrow
+ // member access, so we treat them as equivalent to an actual object
+ // expression.
+ if (const auto *Obj = maybeGetOperatorObjectArg(*E, clang::OO_Arrow))
+ E = Obj;
+ return buildAccessForPointer(*E, Context);
+ }
+
+ if (const auto *Obj = maybeGetOperatorObjectArg(*E, clang::OO_Star)) {
+ if (treatLikePointer(Obj->getType(), Classification, Context))
+ return buildAccessForPointer(*Obj, Context);
+ };
+
+ return buildAccessForValue(*E, Context);
+}
diff --git a/contrib/llvm-project/clang/lib/Tooling/Transformer/Stencil.cpp b/contrib/llvm-project/clang/lib/Tooling/Transformer/Stencil.cpp
index 4dc3544bb06d..d91c9e0a20cc 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Transformer/Stencil.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Transformer/Stencil.cpp
@@ -11,7 +11,6 @@
#include "clang/AST/ASTTypeTraits.h"
#include "clang/AST/Expr.h"
#include "clang/ASTMatchers/ASTMatchFinder.h"
-#include "clang/ASTMatchers/ASTMatchers.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Lex/Lexer.h"
#include "clang/Tooling/Transformer/SourceCode.h"
@@ -27,14 +26,15 @@
using namespace clang;
using namespace transformer;
+using ast_matchers::BoundNodes;
using ast_matchers::MatchFinder;
using llvm::errc;
using llvm::Error;
using llvm::Expected;
using llvm::StringError;
-static llvm::Expected<DynTypedNode>
-getNode(const ast_matchers::BoundNodes &Nodes, StringRef Id) {
+static llvm::Expected<DynTypedNode> getNode(const BoundNodes &Nodes,
+ StringRef Id) {
auto &NodesMap = Nodes.getMap();
auto It = NodesMap.find(Id);
if (It == NodesMap.end())
@@ -55,39 +55,6 @@ static Error printNode(StringRef Id, const MatchFinder::MatchResult &Match,
return Error::success();
}
-// FIXME: Consider memoizing this function using the `ASTContext`.
-static bool isSmartPointerType(QualType Ty, ASTContext &Context) {
- using namespace ::clang::ast_matchers;
-
- // Optimization: hard-code common smart-pointer types. This can/should be
- // removed if we start caching the results of this function.
- auto KnownSmartPointer =
- cxxRecordDecl(hasAnyName("::std::unique_ptr", "::std::shared_ptr"));
- const auto QuacksLikeASmartPointer = cxxRecordDecl(
- hasMethod(cxxMethodDecl(hasOverloadedOperatorName("->"),
- returns(qualType(pointsTo(type()))))),
- hasMethod(cxxMethodDecl(hasOverloadedOperatorName("*"),
- returns(qualType(references(type()))))));
- const auto SmartPointer = qualType(hasDeclaration(
- cxxRecordDecl(anyOf(KnownSmartPointer, QuacksLikeASmartPointer))));
- return match(SmartPointer, Ty, Context).size() > 0;
-}
-
-// Identifies use of `operator*` on smart pointers, and returns the underlying
-// smart-pointer expression; otherwise, returns null.
-static const Expr *isSmartDereference(const Expr &E, ASTContext &Context) {
- using namespace ::clang::ast_matchers;
-
- const auto HasOverloadedArrow = cxxRecordDecl(hasMethod(cxxMethodDecl(
- hasOverloadedOperatorName("->"), returns(qualType(pointsTo(type()))))));
- // Verify it is a smart pointer by finding `operator->` in the class
- // declaration.
- auto Deref = cxxOperatorCallExpr(
- hasOverloadedOperatorName("*"), hasUnaryOperand(expr().bind("arg")),
- callee(cxxMethodDecl(ofClass(HasOverloadedArrow))));
- return selectFirst<Expr>("arg", match(Deref, E, Context));
-}
-
namespace {
// An arbitrary fragment of code within a stencil.
class RawTextStencil : public StencilInterface {
@@ -185,7 +152,7 @@ public:
if (E == nullptr)
return llvm::make_error<StringError>(errc::invalid_argument,
"Id not bound or not Expr: " + Id);
- llvm::Optional<std::string> Source;
+ std::optional<std::string> Source;
switch (Op) {
case UnaryNodeOperator::Parens:
Source = tooling::buildParens(*E, *Match.Context);
@@ -195,7 +162,7 @@ public:
break;
case UnaryNodeOperator::MaybeDeref:
if (E->getType()->isAnyPointerType() ||
- isSmartPointerType(E->getType(), *Match.Context)) {
+ tooling::isKnownPointerLikeType(E->getType(), *Match.Context)) {
// Strip off any operator->. This can only occur inside an actual arrow
// member access, so we treat it as equivalent to an actual object
// expression.
@@ -215,7 +182,7 @@ public:
break;
case UnaryNodeOperator::MaybeAddressOf:
if (E->getType()->isAnyPointerType() ||
- isSmartPointerType(E->getType(), *Match.Context)) {
+ tooling::isKnownPointerLikeType(E->getType(), *Match.Context)) {
// Strip off any operator->. This can only occur inside an actual arrow
// member access, so we treat it as equivalent to an actual object
// expression.
@@ -262,8 +229,8 @@ public:
// Validate the original range to attempt to get a meaningful error
// message. If it's valid, then something else is the cause and we just
// return the generic failure message.
- if (auto Err =
- tooling::validateEditRange(*RawRange, *Match.SourceManager))
+ if (auto Err = tooling::validateRange(*RawRange, *Match.SourceManager,
+ /*AllowSystemHeaders=*/true))
return handleErrors(std::move(Err), [](std::unique_ptr<StringError> E) {
assert(E->convertToErrorCode() ==
llvm::make_error_code(errc::invalid_argument) &&
@@ -278,8 +245,9 @@ public:
"selected range could not be resolved to a valid source range");
}
// Validate `Range`, because `makeFileCharRange` accepts some ranges that
- // `validateEditRange` rejects.
- if (auto Err = tooling::validateEditRange(Range, *Match.SourceManager))
+ // `validateRange` rejects.
+ if (auto Err = tooling::validateRange(Range, *Match.SourceManager,
+ /*AllowSystemHeaders=*/true))
return joinErrors(
llvm::createStringError(errc::invalid_argument,
"selected range is not valid for editing"),
@@ -310,34 +278,12 @@ public:
if (E == nullptr)
return llvm::make_error<StringError>(errc::invalid_argument,
"Id not bound: " + BaseId);
- if (!E->isImplicitCXXThis()) {
- llvm::Optional<std::string> S;
- if (E->getType()->isAnyPointerType() ||
- isSmartPointerType(E->getType(), *Match.Context)) {
- // Strip off any operator->. This can only occur inside an actual arrow
- // member access, so we treat it as equivalent to an actual object
- // expression.
- if (const auto *OpCall = dyn_cast<clang::CXXOperatorCallExpr>(E)) {
- if (OpCall->getOperator() == clang::OO_Arrow &&
- OpCall->getNumArgs() == 1) {
- E = OpCall->getArg(0);
- }
- }
- S = tooling::buildArrow(*E, *Match.Context);
- } else if (const auto *Operand = isSmartDereference(*E, *Match.Context)) {
- // `buildDot` already handles the built-in dereference operator, so we
- // only need to catch overloaded `operator*`.
- S = tooling::buildArrow(*Operand, *Match.Context);
- } else {
- S = tooling::buildDot(*E, *Match.Context);
- }
- if (S.hasValue())
- *Result += *S;
- else
- return llvm::make_error<StringError>(
- errc::invalid_argument,
- "Could not construct object text from ID: " + BaseId);
- }
+ std::optional<std::string> S = tooling::buildAccess(*E, *Match.Context);
+ if (!S)
+ return llvm::make_error<StringError>(
+ errc::invalid_argument,
+ "Could not construct object text from ID: " + BaseId);
+ *Result += *S;
return Member->eval(Match, Result);
}
};
@@ -366,6 +312,73 @@ public:
}
};
+class SelectBoundStencil : public clang::transformer::StencilInterface {
+ static bool containsNoNullStencils(
+ const std::vector<std::pair<std::string, Stencil>> &Cases) {
+ for (const auto &S : Cases)
+ if (S.second == nullptr)
+ return false;
+ return true;
+ }
+
+public:
+ SelectBoundStencil(std::vector<std::pair<std::string, Stencil>> Cases,
+ Stencil Default)
+ : CaseStencils(std::move(Cases)), DefaultStencil(std::move(Default)) {
+ assert(containsNoNullStencils(CaseStencils) &&
+ "cases of selectBound may not be null");
+ }
+ ~SelectBoundStencil() override {}
+
+ llvm::Error eval(const MatchFinder::MatchResult &match,
+ std::string *result) const override {
+ const BoundNodes::IDToNodeMap &NodeMap = match.Nodes.getMap();
+ for (const auto &S : CaseStencils) {
+ if (NodeMap.count(S.first) > 0) {
+ return S.second->eval(match, result);
+ }
+ }
+
+ if (DefaultStencil != nullptr) {
+ return DefaultStencil->eval(match, result);
+ }
+
+ llvm::SmallVector<llvm::StringRef, 2> CaseIDs;
+ CaseIDs.reserve(CaseStencils.size());
+ for (const auto &S : CaseStencils)
+ CaseIDs.emplace_back(S.first);
+
+ return llvm::createStringError(
+ errc::result_out_of_range,
+ llvm::Twine("selectBound failed: no cases bound and no default: {") +
+ llvm::join(CaseIDs, ", ") + "}");
+ }
+
+ std::string toString() const override {
+ std::string Buffer;
+ llvm::raw_string_ostream Stream(Buffer);
+ Stream << "selectBound({";
+ bool First = true;
+ for (const auto &S : CaseStencils) {
+ if (First)
+ First = false;
+ else
+ Stream << "}, ";
+ Stream << "{\"" << S.first << "\", " << S.second->toString();
+ }
+ Stream << "}}";
+ if (DefaultStencil != nullptr) {
+ Stream << ", " << DefaultStencil->toString();
+ }
+ Stream << ")";
+ return Stream.str();
+ }
+
+private:
+ std::vector<std::pair<std::string, Stencil>> CaseStencils;
+ Stencil DefaultStencil;
+};
+
class SequenceStencil : public StencilInterface {
std::vector<Stencil> Stencils;
@@ -462,6 +475,13 @@ Stencil transformer::ifBound(StringRef Id, Stencil TrueStencil,
std::move(FalseStencil));
}
+Stencil transformer::selectBound(
+ std::vector<std::pair<std::string, Stencil>> CaseStencils,
+ Stencil DefaultStencil) {
+ return std::make_shared<SelectBoundStencil>(std::move(CaseStencils),
+ std::move(DefaultStencil));
+}
+
Stencil transformer::run(MatchConsumer<std::string> Fn) {
return std::make_shared<RunStencil>(std::move(Fn));
}
diff --git a/contrib/llvm-project/clang/lib/Tooling/Transformer/Transformer.cpp b/contrib/llvm-project/clang/lib/Tooling/Transformer/Transformer.cpp
index 7a4d8b45f189..f95f2ab7d954 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Transformer/Transformer.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Transformer/Transformer.cpp
@@ -16,35 +16,29 @@
#include <utility>
#include <vector>
-using namespace clang;
-using namespace tooling;
+namespace clang {
+namespace tooling {
-using ast_matchers::MatchFinder;
+using ::clang::ast_matchers::MatchFinder;
-void Transformer::registerMatchers(MatchFinder *MatchFinder) {
- for (auto &Matcher : transformer::detail::buildMatchers(Rule))
- MatchFinder->addDynamicMatcher(Matcher, this);
-}
+namespace detail {
-void Transformer::run(const MatchFinder::MatchResult &Result) {
+void TransformerImpl::onMatch(
+ const ast_matchers::MatchFinder::MatchResult &Result) {
if (Result.Context->getDiagnostics().hasErrorOccurred())
return;
- transformer::RewriteRule::Case Case =
- transformer::detail::findSelectedCase(Result, Rule);
- auto Transformations = Case.Edits(Result);
- if (!Transformations) {
- Consumer(Transformations.takeError());
- return;
- }
-
- if (Transformations->empty())
- return;
+ onMatchImpl(Result);
+}
+llvm::Expected<llvm::SmallVector<AtomicChange, 1>>
+TransformerImpl::convertToAtomicChanges(
+ const llvm::SmallVectorImpl<transformer::Edit> &Edits,
+ const MatchFinder::MatchResult &Result) {
// Group the transformations, by file, into AtomicChanges, each anchored by
// the location of the first change in that file.
std::map<FileID, AtomicChange> ChangesByFileID;
- for (const auto &T : *Transformations) {
+ for (const auto &T : Edits) {
auto ID = Result.SourceManager->getFileID(T.Range.getBegin());
auto Iter = ChangesByFileID
.emplace(ID, AtomicChange(*Result.SourceManager,
@@ -55,8 +49,7 @@ void Transformer::run(const MatchFinder::MatchResult &Result) {
case transformer::EditKind::Range:
if (auto Err =
AC.replace(*Result.SourceManager, T.Range, T.Replacement)) {
- Consumer(std::move(Err));
- return;
+ return std::move(Err);
}
break;
case transformer::EditKind::AddInclude:
@@ -65,6 +58,27 @@ void Transformer::run(const MatchFinder::MatchResult &Result) {
}
}
+ llvm::SmallVector<AtomicChange, 1> Changes;
+ Changes.reserve(ChangesByFileID.size());
for (auto &IDChangePair : ChangesByFileID)
- Consumer(std::move(IDChangePair.second));
+ Changes.push_back(std::move(IDChangePair.second));
+
+ return Changes;
+}
+
+} // namespace detail
+
+void Transformer::registerMatchers(MatchFinder *MatchFinder) {
+ for (auto &Matcher : Impl->buildMatchers())
+ MatchFinder->addDynamicMatcher(Matcher, this);
}
+
+void Transformer::run(const MatchFinder::MatchResult &Result) {
+ if (Result.Context->getDiagnostics().hasErrorOccurred())
+ return;
+
+ Impl->onMatch(Result);
+}
+
+} // namespace tooling
+} // namespace clang
diff --git a/contrib/llvm-project/clang/tools/amdgpu-arch/AMDGPUArch.cpp b/contrib/llvm-project/clang/tools/amdgpu-arch/AMDGPUArch.cpp
index 4fae78b4f121..7ae57b7877e1 100644
--- a/contrib/llvm-project/clang/tools/amdgpu-arch/AMDGPUArch.cpp
+++ b/contrib/llvm-project/clang/tools/amdgpu-arch/AMDGPUArch.cpp
@@ -6,73 +6,48 @@
//
//===----------------------------------------------------------------------===//
//
-// This file implements a tool for detecting name of AMDGPU installed in system
-// using HSA. This tool is used by AMDGPU OpenMP driver.
+// This file implements a tool for detecting name of AMDGPU installed in system.
+// This tool is used by AMDGPU OpenMP and HIP driver.
//
//===----------------------------------------------------------------------===//
-#if defined(__has_include)
-#if __has_include("hsa.h")
-#define HSA_HEADER_FOUND 1
-#include "hsa.h"
-#elif __has_include("hsa/hsa.h")
-#define HSA_HEADER_FOUND 1
-#include "hsa/hsa.h"
-#else
-#define HSA_HEADER_FOUND 0
-#endif
-#else
-#define HSA_HEADER_FOUND 0
-#endif
-
-#if !HSA_HEADER_FOUND
-int main() { return 1; }
-#else
+#include "clang/Basic/Version.h"
+#include "llvm/Support/CommandLine.h"
-#include <string>
-#include <vector>
+using namespace llvm;
-static hsa_status_t iterateAgentsCallback(hsa_agent_t Agent, void *Data) {
- hsa_device_type_t DeviceType;
- hsa_status_t Status =
- hsa_agent_get_info(Agent, HSA_AGENT_INFO_DEVICE, &DeviceType);
+static cl::opt<bool> Help("h", cl::desc("Alias for -help"), cl::Hidden);
- // continue only if device type if GPU
- if (Status != HSA_STATUS_SUCCESS || DeviceType != HSA_DEVICE_TYPE_GPU) {
- return Status;
- }
+// Mark all our options with this category.
+static cl::OptionCategory AMDGPUArchCategory("amdgpu-arch options");
- std::vector<std::string> *GPUs =
- static_cast<std::vector<std::string> *>(Data);
- char GPUName[64];
- Status = hsa_agent_get_info(Agent, HSA_AGENT_INFO_NAME, GPUName);
- if (Status != HSA_STATUS_SUCCESS) {
- return Status;
- }
- GPUs->push_back(GPUName);
- return HSA_STATUS_SUCCESS;
+static void PrintVersion(raw_ostream &OS) {
+ OS << clang::getClangToolFullVersion("amdgpu-arch") << '\n';
}
-int main() {
- hsa_status_t Status = hsa_init();
- if (Status != HSA_STATUS_SUCCESS) {
- return 1;
- }
-
- std::vector<std::string> GPUs;
- Status = hsa_iterate_agents(iterateAgentsCallback, &GPUs);
- if (Status != HSA_STATUS_SUCCESS) {
- return 1;
- }
+int printGPUsByHSA();
+int printGPUsByHIP();
- for (const auto &GPU : GPUs)
- printf("%s\n", GPU.c_str());
+int main(int argc, char *argv[]) {
+ cl::HideUnrelatedOptions(AMDGPUArchCategory);
- if (GPUs.size() < 1)
- return 1;
+ cl::SetVersionPrinter(PrintVersion);
+ cl::ParseCommandLineOptions(
+ argc, argv,
+ "A tool to detect the presence of AMDGPU devices on the system. \n\n"
+ "The tool will output each detected GPU architecture separated by a\n"
+ "newline character. If multiple GPUs of the same architecture are found\n"
+ "a string will be printed for each\n");
- hsa_shut_down();
- return 0;
-}
+ if (Help) {
+ cl::PrintHelpMessage();
+ return 0;
+ }
+#ifndef _WIN32
+ if (!printGPUsByHSA())
+ return 0;
#endif
+
+ return printGPUsByHIP();
+}
diff --git a/contrib/llvm-project/clang/tools/amdgpu-arch/AMDGPUArchByHIP.cpp b/contrib/llvm-project/clang/tools/amdgpu-arch/AMDGPUArchByHIP.cpp
new file mode 100644
index 000000000000..7c9071be0918
--- /dev/null
+++ b/contrib/llvm-project/clang/tools/amdgpu-arch/AMDGPUArchByHIP.cpp
@@ -0,0 +1,96 @@
+//===- AMDGPUArch.cpp - list AMDGPU installed ----------*- C++ -*---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a tool for detecting name of AMDGPU installed in system
+// using HIP runtime. This tool is used by AMDGPU OpenMP and HIP driver.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Support/DynamicLibrary.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+typedef struct {
+ char padding[396];
+ char gcnArchName[256];
+ char padding2[1024];
+} hipDeviceProp_t;
+
+typedef enum {
+ hipSuccess = 0,
+} hipError_t;
+
+typedef hipError_t (*hipGetDeviceCount_t)(int *);
+typedef hipError_t (*hipDeviceGet_t)(int *, int);
+typedef hipError_t (*hipGetDeviceProperties_t)(hipDeviceProp_t *, int);
+
+int printGPUsByHIP() {
+#ifdef _WIN32
+ constexpr const char *DynamicHIPPath = "amdhip64.dll";
+#else
+ constexpr const char *DynamicHIPPath = "libamdhip64.so";
+#endif
+
+ std::string ErrMsg;
+ auto DynlibHandle = std::make_unique<llvm::sys::DynamicLibrary>(
+ llvm::sys::DynamicLibrary::getPermanentLibrary(DynamicHIPPath, &ErrMsg));
+ if (!DynlibHandle->isValid()) {
+ llvm::errs() << "Failed to load " << DynamicHIPPath << ": " << ErrMsg
+ << '\n';
+ return 1;
+ }
+
+#define DYNAMIC_INIT_HIP(SYMBOL) \
+ { \
+ void *SymbolPtr = DynlibHandle->getAddressOfSymbol(#SYMBOL); \
+ if (!SymbolPtr) { \
+ llvm::errs() << "Failed to find symbol " << #SYMBOL << '\n'; \
+ return 1; \
+ } \
+ SYMBOL = reinterpret_cast<decltype(SYMBOL)>(SymbolPtr); \
+ }
+
+ hipGetDeviceCount_t hipGetDeviceCount;
+ hipDeviceGet_t hipDeviceGet;
+ hipGetDeviceProperties_t hipGetDeviceProperties;
+
+ DYNAMIC_INIT_HIP(hipGetDeviceCount);
+ DYNAMIC_INIT_HIP(hipDeviceGet);
+ DYNAMIC_INIT_HIP(hipGetDeviceProperties);
+
+#undef DYNAMIC_INIT_HIP
+
+ int deviceCount;
+ hipError_t err = hipGetDeviceCount(&deviceCount);
+ if (err != hipSuccess) {
+ llvm::errs() << "Failed to get device count\n";
+ return 1;
+ }
+
+ for (int i = 0; i < deviceCount; ++i) {
+ int deviceId;
+ err = hipDeviceGet(&deviceId, i);
+ if (err != hipSuccess) {
+ llvm::errs() << "Failed to get device id for ordinal " << i << '\n';
+ return 1;
+ }
+
+ hipDeviceProp_t prop;
+ err = hipGetDeviceProperties(&prop, deviceId);
+ if (err != hipSuccess) {
+ llvm::errs() << "Failed to get device properties for device " << deviceId
+ << '\n';
+ return 1;
+ }
+ llvm::outs() << prop.gcnArchName << '\n';
+ }
+
+ return 0;
+}
diff --git a/contrib/llvm-project/clang/tools/amdgpu-arch/AMDGPUArchByHSA.cpp b/contrib/llvm-project/clang/tools/amdgpu-arch/AMDGPUArchByHSA.cpp
new file mode 100644
index 000000000000..f82a4890f465
--- /dev/null
+++ b/contrib/llvm-project/clang/tools/amdgpu-arch/AMDGPUArchByHSA.cpp
@@ -0,0 +1,122 @@
+//===- AMDGPUArchLinux.cpp - list AMDGPU installed ------*- C++ -*---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a tool for detecting name of AMDGPU installed in system
+// using HSA on Linux. This tool is used by AMDGPU OpenMP and HIP driver.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/Version.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/DynamicLibrary.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/raw_ostream.h"
+#include <memory>
+#include <string>
+#include <vector>
+
+using namespace llvm;
+
+typedef enum {
+ HSA_STATUS_SUCCESS = 0x0,
+} hsa_status_t;
+
+typedef enum {
+ HSA_DEVICE_TYPE_CPU = 0,
+ HSA_DEVICE_TYPE_GPU = 1,
+} hsa_device_type_t;
+
+typedef enum {
+ HSA_AGENT_INFO_NAME = 0,
+ HSA_AGENT_INFO_DEVICE = 17,
+} hsa_agent_info_t;
+
+typedef struct hsa_agent_s {
+ uint64_t handle;
+} hsa_agent_t;
+
+hsa_status_t (*hsa_init)();
+hsa_status_t (*hsa_shut_down)();
+hsa_status_t (*hsa_agent_get_info)(hsa_agent_t, hsa_agent_info_t, void *);
+hsa_status_t (*hsa_iterate_agents)(hsa_status_t (*)(hsa_agent_t, void *),
+ void *);
+
+constexpr const char *DynamicHSAPath = "libhsa-runtime64.so";
+
+llvm::Error loadHSA() {
+ std::string ErrMsg;
+ auto DynlibHandle = std::make_unique<llvm::sys::DynamicLibrary>(
+ llvm::sys::DynamicLibrary::getPermanentLibrary(DynamicHSAPath, &ErrMsg));
+ if (!DynlibHandle->isValid()) {
+ return llvm::createStringError(llvm::inconvertibleErrorCode(),
+ "Failed to 'dlopen' %s", DynamicHSAPath);
+ }
+#define DYNAMIC_INIT(SYMBOL) \
+ { \
+ void *SymbolPtr = DynlibHandle->getAddressOfSymbol(#SYMBOL); \
+ if (!SymbolPtr) \
+ return llvm::createStringError(llvm::inconvertibleErrorCode(), \
+ "Failed to 'dlsym' " #SYMBOL); \
+ SYMBOL = reinterpret_cast<decltype(SYMBOL)>(SymbolPtr); \
+ }
+ DYNAMIC_INIT(hsa_init);
+ DYNAMIC_INIT(hsa_shut_down);
+ DYNAMIC_INIT(hsa_agent_get_info);
+ DYNAMIC_INIT(hsa_iterate_agents);
+#undef DYNAMIC_INIT
+ return llvm::Error::success();
+}
+
+static hsa_status_t iterateAgentsCallback(hsa_agent_t Agent, void *Data) {
+ hsa_device_type_t DeviceType;
+ hsa_status_t Status =
+ hsa_agent_get_info(Agent, HSA_AGENT_INFO_DEVICE, &DeviceType);
+
+ // continue only if device type if GPU
+ if (Status != HSA_STATUS_SUCCESS || DeviceType != HSA_DEVICE_TYPE_GPU) {
+ return Status;
+ }
+
+ std::vector<std::string> *GPUs =
+ static_cast<std::vector<std::string> *>(Data);
+ char GPUName[64];
+ Status = hsa_agent_get_info(Agent, HSA_AGENT_INFO_NAME, GPUName);
+ if (Status != HSA_STATUS_SUCCESS) {
+ return Status;
+ }
+ GPUs->push_back(GPUName);
+ return HSA_STATUS_SUCCESS;
+}
+
+int printGPUsByHSA() {
+ // Attempt to load the HSA runtime.
+ if (llvm::Error Err = loadHSA()) {
+ logAllUnhandledErrors(std::move(Err), llvm::errs());
+ return 1;
+ }
+
+ hsa_status_t Status = hsa_init();
+ if (Status != HSA_STATUS_SUCCESS) {
+ return 1;
+ }
+
+ std::vector<std::string> GPUs;
+ Status = hsa_iterate_agents(iterateAgentsCallback, &GPUs);
+ if (Status != HSA_STATUS_SUCCESS) {
+ return 1;
+ }
+
+ for (const auto &GPU : GPUs)
+ llvm::outs() << GPU << '\n';
+
+ if (GPUs.size() < 1)
+ return 1;
+
+ hsa_shut_down();
+ return 0;
+}
diff --git a/contrib/llvm-project/clang/tools/clang-format/ClangFormat.cpp b/contrib/llvm-project/clang/tools/clang-format/ClangFormat.cpp
index 144e87f78c64..e122cea50f72 100644
--- a/contrib/llvm-project/clang/tools/clang-format/ClangFormat.cpp
+++ b/contrib/llvm-project/clang/tools/clang-format/ClangFormat.cpp
@@ -12,6 +12,7 @@
///
//===----------------------------------------------------------------------===//
+#include "../../lib/Format/MatchFilePath.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/DiagnosticOptions.h"
#include "clang/Basic/FileManager.h"
@@ -19,10 +20,12 @@
#include "clang/Basic/Version.h"
#include "clang/Format/Format.h"
#include "clang/Rewrite/Core/Rewriter.h"
+#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/InitLLVM.h"
#include "llvm/Support/Process.h"
+#include <fstream>
using namespace llvm;
using clang::tooling::Replacements;
@@ -68,16 +71,29 @@ static cl::opt<std::string>
cl::desc("The name of the predefined style used as a\n"
"fallback in case clang-format is invoked with\n"
"-style=file, but can not find the .clang-format\n"
- "file to use.\n"
+ "file to use. Defaults to 'LLVM'.\n"
"Use -fallback-style=none to skip formatting."),
cl::init(clang::format::DefaultFallbackStyle),
cl::cat(ClangFormatCategory));
static cl::opt<std::string> AssumeFileName(
"assume-filename",
- cl::desc("Override filename used to determine the language.\n"
- "When reading from stdin, clang-format assumes this\n"
- "filename to determine the language."),
+ cl::desc("Set filename used to determine the language and to find\n"
+ ".clang-format file.\n"
+ "Only used when reading from stdin.\n"
+ "If this is not passed, the .clang-format file is searched\n"
+ "relative to the current working directory when reading stdin.\n"
+ "Unrecognized filenames are treated as C++.\n"
+ "supported:\n"
+ " CSharp: .cs\n"
+ " Java: .java\n"
+ " JavaScript: .mjs .js .ts\n"
+ " Json: .json\n"
+ " Objective-C: .m .mm\n"
+ " Proto: .proto .protodevel\n"
+ " TableGen: .td\n"
+ " TextProto: .textpb .pb.txt .textproto .asciipb\n"
+ " Verilog: .sv .svh .v .vh"),
cl::init("<stdin>"), cl::cat(ClangFormatCategory));
static cl::opt<bool> Inplace("i",
@@ -98,11 +114,22 @@ static cl::opt<unsigned>
"clang-format from an editor integration"),
cl::init(0), cl::cat(ClangFormatCategory));
-static cl::opt<bool> SortIncludes(
- "sort-includes",
- cl::desc("If set, overrides the include sorting behavior determined by the "
- "SortIncludes style flag"),
- cl::cat(ClangFormatCategory));
+static cl::opt<bool>
+ SortIncludes("sort-includes",
+ cl::desc("If set, overrides the include sorting behavior\n"
+ "determined by the SortIncludes style flag"),
+ cl::cat(ClangFormatCategory));
+
+static cl::opt<std::string> QualifierAlignment(
+ "qualifier-alignment",
+ cl::desc("If set, overrides the qualifier alignment style\n"
+ "determined by the QualifierAlignment style flag"),
+ cl::init(""), cl::cat(ClangFormatCategory));
+
+static cl::opt<std::string> Files(
+ "files",
+ cl::desc("A file containing a list of files to process, one per line."),
+ cl::value_desc("filename"), cl::init(""), cl::cat(ClangFormatCategory));
static cl::opt<bool>
Verbose("verbose", cl::desc("If set, shows the list of processed files"),
@@ -135,8 +162,9 @@ static cl::opt<bool>
static cl::opt<unsigned> ErrorLimit(
"ferror-limit",
- cl::desc("Set the maximum number of clang-format errors to emit before "
- "stopping (0 = no limit). Used only with --dry-run or -n"),
+ cl::desc("Set the maximum number of clang-format errors to emit\n"
+ "before stopping (0 = no limit).\n"
+ "Used only with --dry-run or -n"),
cl::init(0), cl::cat(ClangFormatCategory));
static cl::opt<bool>
@@ -173,7 +201,8 @@ static cl::opt<bool>
"whether or not to print diagnostics in color"),
cl::init(false), cl::cat(ClangFormatCategory), cl::Hidden);
-static cl::list<std::string> FileNames(cl::Positional, cl::desc("[<file> ...]"),
+static cl::list<std::string> FileNames(cl::Positional,
+ cl::desc("[@<file>] [<file> ...]"),
cl::cat(ClangFormatCategory));
namespace clang {
@@ -220,8 +249,12 @@ static bool fillRanges(MemoryBuffer *Code,
errs() << "error: invalid <start line>:<end line> pair\n";
return true;
}
+ if (FromLine < 1) {
+ errs() << "error: start line should be at least 1\n";
+ return true;
+ }
if (FromLine > ToLine) {
- errs() << "error: start line should be less than end line\n";
+ errs() << "error: start line should not exceed end line\n";
return true;
}
SourceLocation Start = Sources.translateLineCol(ID, FromLine, 1);
@@ -298,8 +331,7 @@ static void outputReplacementXML(StringRef Text) {
static void outputReplacementsXML(const Replacements &Replaces) {
for (const auto &R : Replaces) {
- outs() << "<replacement "
- << "offset='" << R.getOffset() << "' "
+ outs() << "<replacement " << "offset='" << R.getOffset() << "' "
<< "length='" << R.getLength() << "'>";
outputReplacementXML(R.getReplacementText());
outs() << "</replacement>\n";
@@ -345,17 +377,31 @@ static void outputXML(const Replacements &Replaces,
if (!Status.FormatComplete)
outs() << " line='" << Status.Line << "'";
outs() << ">\n";
- if (Cursor.getNumOccurrences() != 0)
+ if (Cursor.getNumOccurrences() != 0) {
outs() << "<cursor>" << FormatChanges.getShiftedCodePosition(CursorPosition)
<< "</cursor>\n";
+ }
outputReplacementsXML(Replaces);
outs() << "</replacements>\n";
}
+class ClangFormatDiagConsumer : public DiagnosticConsumer {
+ virtual void anchor() {}
+
+ void HandleDiagnostic(DiagnosticsEngine::Level DiagLevel,
+ const Diagnostic &Info) override {
+
+ SmallVector<char, 16> vec;
+ Info.FormatDiagnostic(vec);
+ errs() << "clang-format error:" << vec << "\n";
+ }
+};
+
// Returns true on error.
static bool format(StringRef FileName) {
- if (!OutputXML && Inplace && FileName == "-") {
+ const bool IsSTDIN = FileName == "-";
+ if (!OutputXML && Inplace && IsSTDIN) {
errs() << "error: cannot use -i when reading from stdin.\n";
return false;
}
@@ -379,7 +425,7 @@ static bool format(StringRef FileName) {
if (InvalidBOM) {
errs() << "error: encoding with unsupported byte order mark \""
<< InvalidBOM << "\" detected";
- if (FileName != "-")
+ if (!IsSTDIN)
errs() << " in file '" << FileName << "'";
errs() << ".\n";
return true;
@@ -388,7 +434,7 @@ static bool format(StringRef FileName) {
std::vector<tooling::Range> Ranges;
if (fillRanges(Code.get(), Ranges))
return true;
- StringRef AssumedFileName = (FileName == "-") ? AssumeFileName : FileName;
+ StringRef AssumedFileName = IsSTDIN ? AssumeFileName : FileName;
if (AssumedFileName.empty()) {
llvm::errs() << "error: empty filenames are not allowed\n";
return true;
@@ -402,6 +448,27 @@ static bool format(StringRef FileName) {
return true;
}
+ StringRef QualifierAlignmentOrder = QualifierAlignment;
+
+ FormatStyle->QualifierAlignment =
+ StringSwitch<FormatStyle::QualifierAlignmentStyle>(
+ QualifierAlignmentOrder.lower())
+ .Case("right", FormatStyle::QAS_Right)
+ .Case("left", FormatStyle::QAS_Left)
+ .Default(FormatStyle->QualifierAlignment);
+
+ if (FormatStyle->QualifierAlignment == FormatStyle::QAS_Left) {
+ FormatStyle->QualifierOrder = {"const", "volatile", "type"};
+ } else if (FormatStyle->QualifierAlignment == FormatStyle::QAS_Right) {
+ FormatStyle->QualifierOrder = {"type", "const", "volatile"};
+ } else if (QualifierAlignmentOrder.contains("type")) {
+ FormatStyle->QualifierAlignment = FormatStyle::QAS_Custom;
+ SmallVector<StringRef> Qualifiers;
+ QualifierAlignmentOrder.split(Qualifiers, " ", /*MaxSplit=*/-1,
+ /*KeepEmpty=*/false);
+ FormatStyle->QualifierOrder = {Qualifiers.begin(), Qualifiers.end()};
+ }
+
if (SortIncludes.getNumOccurrences() != 0) {
if (SortIncludes)
FormatStyle->SortIncludes = FormatStyle::SI_CaseSensitive;
@@ -414,12 +481,11 @@ static bool format(StringRef FileName) {
// To format JSON insert a variable to trick the code into thinking its
// JavaScript.
- if (FormatStyle->isJson()) {
+ if (FormatStyle->isJson() && !FormatStyle->DisableFormat) {
auto Err = Replaces.add(tooling::Replacement(
tooling::Replacement(AssumedFileName, 0, 0, "x = ")));
- if (Err) {
+ if (Err)
llvm::errs() << "Bad Json variable insertion\n";
- }
}
auto ChangedCode = tooling::applyAllReplacements(Code->getBuffer(), Replaces);
@@ -434,18 +500,20 @@ static bool format(StringRef FileName) {
reformat(*FormatStyle, *ChangedCode, Ranges, AssumedFileName, &Status);
Replaces = Replaces.merge(FormatChanges);
if (OutputXML || DryRun) {
- if (DryRun) {
+ if (DryRun)
return emitReplacementWarnings(Replaces, AssumedFileName, Code);
- } else {
+ else
outputXML(Replaces, FormatChanges, Status, Cursor, CursorPosition);
- }
} else {
IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> InMemoryFileSystem(
new llvm::vfs::InMemoryFileSystem);
FileManager Files(FileSystemOptions(), InMemoryFileSystem);
+
+ IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts(new DiagnosticOptions());
+ ClangFormatDiagConsumer IgnoreDiagnostics;
DiagnosticsEngine Diagnostics(
- IntrusiveRefCntPtr<DiagnosticIDs>(new DiagnosticIDs),
- new DiagnosticOptions);
+ IntrusiveRefCntPtr<DiagnosticIDs>(new DiagnosticIDs), &*DiagOpts,
+ &IgnoreDiagnostics, false);
SourceManager Sources(Diagnostics, Files);
FileID ID = createInMemoryFile(AssumedFileName, *Code, Sources, Files,
InMemoryFileSystem.get());
@@ -479,27 +547,25 @@ static void PrintVersion(raw_ostream &OS) {
// Dump the configuration.
static int dumpConfig() {
- StringRef FileName;
std::unique_ptr<llvm::MemoryBuffer> Code;
- if (FileNames.empty()) {
- // We can't read the code to detect the language if there's no
- // file name, so leave Code empty here.
- FileName = AssumeFileName;
- } else {
- // Read in the code in case the filename alone isn't enough to
- // detect the language.
+ // We can't read the code to detect the language if there's no file name.
+ if (!FileNames.empty()) {
+ // Read in the code in case the filename alone isn't enough to detect the
+ // language.
ErrorOr<std::unique_ptr<MemoryBuffer>> CodeOrErr =
MemoryBuffer::getFileOrSTDIN(FileNames[0]);
if (std::error_code EC = CodeOrErr.getError()) {
llvm::errs() << EC.message() << "\n";
return 1;
}
- FileName = (FileNames[0] == "-") ? AssumeFileName : FileNames[0];
Code = std::move(CodeOrErr.get());
}
llvm::Expected<clang::format::FormatStyle> FormatStyle =
- clang::format::getStyle(Style, FileName, FallbackStyle,
- Code ? Code->getBuffer() : "");
+ clang::format::getStyle(Style,
+ FileNames.empty() || FileNames[0] == "-"
+ ? AssumeFileName
+ : FileNames[0],
+ FallbackStyle, Code ? Code->getBuffer() : "");
if (!FormatStyle) {
llvm::errs() << llvm::toString(FormatStyle.takeError()) << "\n";
return 1;
@@ -509,6 +575,94 @@ static int dumpConfig() {
return 0;
}
+using String = SmallString<128>;
+static String IgnoreDir; // Directory of .clang-format-ignore file.
+static String PrevDir; // Directory of previous `FilePath`.
+static SmallVector<String> Patterns; // Patterns in .clang-format-ignore file.
+
+// Check whether `FilePath` is ignored according to the nearest
+// .clang-format-ignore file based on the rules below:
+// - A blank line is skipped.
+// - Leading and trailing spaces of a line are trimmed.
+// - A line starting with a hash (`#`) is a comment.
+// - A non-comment line is a single pattern.
+// - The slash (`/`) is used as the directory separator.
+// - A pattern is relative to the directory of the .clang-format-ignore file (or
+// the root directory if the pattern starts with a slash).
+// - A pattern is negated if it starts with a bang (`!`).
+static bool isIgnored(StringRef FilePath) {
+ using namespace llvm::sys::fs;
+ if (!is_regular_file(FilePath))
+ return false;
+
+ String Path;
+ String AbsPath{FilePath};
+
+ using namespace llvm::sys::path;
+ make_absolute(AbsPath);
+ remove_dots(AbsPath, /*remove_dot_dot=*/true);
+
+ if (StringRef Dir{parent_path(AbsPath)}; PrevDir != Dir) {
+ PrevDir = Dir;
+
+ for (;;) {
+ Path = Dir;
+ append(Path, ".clang-format-ignore");
+ if (is_regular_file(Path))
+ break;
+ Dir = parent_path(Dir);
+ if (Dir.empty())
+ return false;
+ }
+
+ IgnoreDir = convert_to_slash(Dir);
+
+ std::ifstream IgnoreFile{Path.c_str()};
+ if (!IgnoreFile.good())
+ return false;
+
+ Patterns.clear();
+
+ for (std::string Line; std::getline(IgnoreFile, Line);) {
+ if (const auto Pattern{StringRef{Line}.trim()};
+ // Skip empty and comment lines.
+ !Pattern.empty() && Pattern[0] != '#') {
+ Patterns.push_back(Pattern);
+ }
+ }
+ }
+
+ if (IgnoreDir.empty())
+ return false;
+
+ const auto Pathname{convert_to_slash(AbsPath)};
+ for (const auto &Pat : Patterns) {
+ const bool IsNegated = Pat[0] == '!';
+ StringRef Pattern{Pat};
+ if (IsNegated)
+ Pattern = Pattern.drop_front();
+
+ if (Pattern.empty())
+ continue;
+
+ Pattern = Pattern.ltrim();
+
+ // `Pattern` is relative to `IgnoreDir` unless it starts with a slash.
+ // This doesn't support patterns containing drive names (e.g. `C:`).
+ if (Pattern[0] != '/') {
+ Path = IgnoreDir;
+ append(Path, Style::posix, Pattern);
+ remove_dots(Path, /*remove_dot_dot=*/true, Style::posix);
+ Pattern = Path;
+ }
+
+ if (clang::format::matchFilePath(Pattern, Pathname) == !IsNegated)
+ return true;
+ }
+
+ return false;
+}
+
int main(int argc, const char **argv) {
llvm::InitLLVM X(argc, argv);
@@ -530,24 +684,39 @@ int main(int argc, const char **argv) {
return 0;
}
- if (DumpConfig) {
+ if (DumpConfig)
return dumpConfig();
- }
- bool Error = false;
- if (FileNames.empty()) {
- Error = clang::format::format("-");
- return Error ? 1 : 0;
+ if (!Files.empty()) {
+ std::ifstream ExternalFileOfFiles{std::string(Files)};
+ std::string Line;
+ unsigned LineNo = 1;
+ while (std::getline(ExternalFileOfFiles, Line)) {
+ FileNames.push_back(Line);
+ LineNo++;
+ }
+ errs() << "Clang-formating " << LineNo << " files\n";
}
- if (FileNames.size() != 1 &&
+
+ if (FileNames.empty())
+ return clang::format::format("-");
+
+ if (FileNames.size() > 1 &&
(!Offsets.empty() || !Lengths.empty() || !LineRanges.empty())) {
errs() << "error: -offset, -length and -lines can only be used for "
"single file.\n";
return 1;
}
+
+ unsigned FileNo = 1;
+ bool Error = false;
for (const auto &FileName : FileNames) {
- if (Verbose)
- errs() << "Formatting " << FileName << "\n";
+ if (isIgnored(FileName))
+ continue;
+ if (Verbose) {
+ errs() << "Formatting [" << FileNo++ << "/" << FileNames.size() << "] "
+ << FileName << "\n";
+ }
Error |= clang::format::format(FileName);
}
return Error ? 1 : 0;
diff --git a/contrib/llvm-project/clang/tools/clang-repl/ClangRepl.cpp b/contrib/llvm-project/clang/tools/clang-repl/ClangRepl.cpp
index ba6bb11abc86..5bad8145324d 100644
--- a/contrib/llvm-project/clang/tools/clang-repl/ClangRepl.cpp
+++ b/contrib/llvm-project/clang/tools/clang-repl/ClangRepl.cpp
@@ -13,26 +13,41 @@
#include "clang/Basic/Diagnostic.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Interpreter/CodeCompletion.h"
#include "clang/Interpreter/Interpreter.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Sema/Sema.h"
#include "llvm/ExecutionEngine/Orc/LLJIT.h"
#include "llvm/LineEditor/LineEditor.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ManagedStatic.h" // llvm_shutdown
#include "llvm/Support/Signals.h"
-#include "llvm/Support/TargetSelect.h" // llvm::Initialize*
+#include "llvm/Support/TargetSelect.h"
+#include <optional>
+
+// Disable LSan for this test.
+// FIXME: Re-enable once we can assume GCC 13.2 or higher.
+// https://llvm.org/github.com/llvm/llvm-project/issues/67586.
+#if LLVM_ADDRESS_SANITIZER_BUILD || LLVM_HWADDRESS_SANITIZER_BUILD
+#include <sanitizer/lsan_interface.h>
+LLVM_ATTRIBUTE_USED int __lsan_is_turned_off() { return 1; }
+#endif
+
+static llvm::cl::opt<bool> CudaEnabled("cuda", llvm::cl::Hidden);
+static llvm::cl::opt<std::string> CudaPath("cuda-path", llvm::cl::Hidden);
+static llvm::cl::opt<std::string> OffloadArch("offload-arch", llvm::cl::Hidden);
static llvm::cl::list<std::string>
- ClangArgs("Xcc", llvm::cl::ZeroOrMore,
+ ClangArgs("Xcc",
llvm::cl::desc("Argument to pass to the CompilerInvocation"),
llvm::cl::CommaSeparated);
static llvm::cl::opt<bool> OptHostSupportsJit("host-supports-jit",
llvm::cl::Hidden);
static llvm::cl::list<std::string> OptInputs(llvm::cl::Positional,
- llvm::cl::ZeroOrMore,
llvm::cl::desc("[code to run]"));
-static void LLVMErrorHandler(void *UserData, const std::string &Message,
+static void LLVMErrorHandler(void *UserData, const char *Message,
bool GenCrashDiag) {
auto &Diags = *static_cast<clang::DiagnosticsEngine *>(UserData);
@@ -49,16 +64,94 @@ static void LLVMErrorHandler(void *UserData, const std::string &Message,
exit(GenCrashDiag ? 70 : 1);
}
+// If we are running with -verify a reported has to be returned as unsuccess.
+// This is relevant especially for the test suite.
+static int checkDiagErrors(const clang::CompilerInstance *CI, bool HasError) {
+ unsigned Errs = CI->getDiagnostics().getClient()->getNumErrors();
+ if (CI->getDiagnosticOpts().VerifyDiagnostics) {
+ // If there was an error that came from the verifier we must return 1 as
+ // an exit code for the process. This will make the test fail as expected.
+ clang::DiagnosticConsumer *Client = CI->getDiagnostics().getClient();
+ Client->EndSourceFile();
+ Errs = Client->getNumErrors();
+
+ // The interpreter expects BeginSourceFile/EndSourceFiles to be balanced.
+ Client->BeginSourceFile(CI->getLangOpts(), &CI->getPreprocessor());
+ }
+ return (Errs || HasError) ? EXIT_FAILURE : EXIT_SUCCESS;
+}
+
+struct ReplListCompleter {
+ clang::IncrementalCompilerBuilder &CB;
+ clang::Interpreter &MainInterp;
+ ReplListCompleter(clang::IncrementalCompilerBuilder &CB,
+ clang::Interpreter &Interp)
+ : CB(CB), MainInterp(Interp){};
+
+ std::vector<llvm::LineEditor::Completion> operator()(llvm::StringRef Buffer,
+ size_t Pos) const;
+ std::vector<llvm::LineEditor::Completion>
+ operator()(llvm::StringRef Buffer, size_t Pos, llvm::Error &ErrRes) const;
+};
+
+std::vector<llvm::LineEditor::Completion>
+ReplListCompleter::operator()(llvm::StringRef Buffer, size_t Pos) const {
+ auto Err = llvm::Error::success();
+ auto res = (*this)(Buffer, Pos, Err);
+ if (Err)
+ llvm::logAllUnhandledErrors(std::move(Err), llvm::errs(), "error: ");
+ return res;
+}
+
+std::vector<llvm::LineEditor::Completion>
+ReplListCompleter::operator()(llvm::StringRef Buffer, size_t Pos,
+ llvm::Error &ErrRes) const {
+ std::vector<llvm::LineEditor::Completion> Comps;
+ std::vector<std::string> Results;
+
+ auto CI = CB.CreateCpp();
+ if (auto Err = CI.takeError()) {
+ ErrRes = std::move(Err);
+ return {};
+ }
+
+ size_t Lines =
+ std::count(Buffer.begin(), std::next(Buffer.begin(), Pos), '\n') + 1;
+ auto Interp = clang::Interpreter::create(std::move(*CI));
+
+ if (auto Err = Interp.takeError()) {
+ // log the error and returns an empty vector;
+ ErrRes = std::move(Err);
+
+ return {};
+ }
+ auto *MainCI = (*Interp)->getCompilerInstance();
+ auto CC = clang::ReplCodeCompleter();
+ CC.codeComplete(MainCI, Buffer, Lines, Pos + 1,
+ MainInterp.getCompilerInstance(), Results);
+ for (auto c : Results) {
+ if (c.find(CC.Prefix) == 0)
+ Comps.push_back(
+ llvm::LineEditor::Completion(c.substr(CC.Prefix.size()), c));
+ }
+ return Comps;
+}
+
llvm::ExitOnError ExitOnErr;
int main(int argc, const char **argv) {
ExitOnErr.setBanner("clang-repl: ");
llvm::cl::ParseCommandLineOptions(argc, argv);
+ llvm::llvm_shutdown_obj Y; // Call llvm_shutdown() on exit.
+
std::vector<const char *> ClangArgv(ClangArgs.size());
std::transform(ClangArgs.begin(), ClangArgs.end(), ClangArgv.begin(),
[](const std::string &s) -> const char * { return s.data(); });
- llvm::InitializeNativeTarget();
- llvm::InitializeNativeTargetAsmPrinter();
+ // Initialize all targets (required for device offloading)
+ llvm::InitializeAllTargetInfos();
+ llvm::InitializeAllTargets();
+ llvm::InitializeAllTargetMCs();
+ llvm::InitializeAllAsmPrinters();
if (OptHostSupportsJit) {
auto J = llvm::orc::LLJITBuilder().create();
@@ -71,29 +164,98 @@ int main(int argc, const char **argv) {
return 0;
}
+ clang::IncrementalCompilerBuilder CB;
+ CB.SetCompilerArgs(ClangArgv);
+
+ std::unique_ptr<clang::CompilerInstance> DeviceCI;
+ if (CudaEnabled) {
+ if (!CudaPath.empty())
+ CB.SetCudaSDK(CudaPath);
+
+ if (OffloadArch.empty()) {
+ OffloadArch = "sm_35";
+ }
+ CB.SetOffloadArch(OffloadArch);
+
+ DeviceCI = ExitOnErr(CB.CreateCudaDevice());
+ }
+
// FIXME: Investigate if we could use runToolOnCodeWithArgs from tooling. It
// can replace the boilerplate code for creation of the compiler instance.
- auto CI = ExitOnErr(clang::IncrementalCompilerBuilder::create(ClangArgv));
+ std::unique_ptr<clang::CompilerInstance> CI;
+ if (CudaEnabled) {
+ CI = ExitOnErr(CB.CreateCudaHost());
+ } else {
+ CI = ExitOnErr(CB.CreateCpp());
+ }
// Set an error handler, so that any LLVM backend diagnostics go through our
// error handler.
llvm::install_fatal_error_handler(LLVMErrorHandler,
static_cast<void *>(&CI->getDiagnostics()));
- auto Interp = ExitOnErr(clang::Interpreter::create(std::move(CI)));
+ // Load any requested plugins.
+ CI->LoadRequestedPlugins();
+ if (CudaEnabled)
+ DeviceCI->LoadRequestedPlugins();
+
+ std::unique_ptr<clang::Interpreter> Interp;
+
+ if (CudaEnabled) {
+ Interp = ExitOnErr(
+ clang::Interpreter::createWithCUDA(std::move(CI), std::move(DeviceCI)));
+
+ if (CudaPath.empty()) {
+ ExitOnErr(Interp->LoadDynamicLibrary("libcudart.so"));
+ } else {
+ auto CudaRuntimeLibPath = CudaPath + "/lib/libcudart.so";
+ ExitOnErr(Interp->LoadDynamicLibrary(CudaRuntimeLibPath.c_str()));
+ }
+ } else
+ Interp = ExitOnErr(clang::Interpreter::create(std::move(CI)));
+
for (const std::string &input : OptInputs) {
if (auto Err = Interp->ParseAndExecute(input))
llvm::logAllUnhandledErrors(std::move(Err), llvm::errs(), "error: ");
}
+ bool HasError = false;
+
if (OptInputs.empty()) {
llvm::LineEditor LE("clang-repl");
- // FIXME: Add LE.setListCompleter
- while (llvm::Optional<std::string> Line = LE.readLine()) {
- if (*Line == "quit")
+ std::string Input;
+ LE.setListCompleter(ReplListCompleter(CB, *Interp));
+ while (std::optional<std::string> Line = LE.readLine()) {
+ llvm::StringRef L = *Line;
+ L = L.trim();
+ if (L.ends_with("\\")) {
+ // FIXME: Support #ifdef X \ ...
+ Input += L.drop_back(1);
+ LE.setPrompt("clang-repl... ");
+ continue;
+ }
+
+ Input += L;
+ if (Input == R"(%quit)") {
break;
- if (auto Err = Interp->ParseAndExecute(*Line))
+ }
+ if (Input == R"(%undo)") {
+ if (auto Err = Interp->Undo()) {
+ llvm::logAllUnhandledErrors(std::move(Err), llvm::errs(), "error: ");
+ HasError = true;
+ }
+ } else if (Input.rfind("%lib ", 0) == 0) {
+ if (auto Err = Interp->LoadDynamicLibrary(Input.data() + 5)) {
+ llvm::logAllUnhandledErrors(std::move(Err), llvm::errs(), "error: ");
+ HasError = true;
+ }
+ } else if (auto Err = Interp->ParseAndExecute(Input)) {
llvm::logAllUnhandledErrors(std::move(Err), llvm::errs(), "error: ");
+ HasError = true;
+ }
+
+ Input = "";
+ LE.setPrompt("clang-repl> ");
}
}
@@ -102,7 +264,5 @@ int main(int argc, const char **argv) {
// later errors use the default handling behavior instead.
llvm::remove_fatal_error_handler();
- llvm::llvm_shutdown();
-
- return 0;
+ return checkDiagErrors(Interp->getCompilerInstance(), HasError);
}
diff --git a/contrib/llvm-project/clang/tools/driver/cc1_main.cpp b/contrib/llvm-project/clang/tools/driver/cc1_main.cpp
index 396d6ff529f3..e9d2c6aad371 100644
--- a/contrib/llvm-project/clang/tools/driver/cc1_main.cpp
+++ b/contrib/llvm-project/clang/tools/driver/cc1_main.cpp
@@ -28,6 +28,8 @@
#include "llvm/ADT/Statistic.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/LinkAllPasses.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/TargetRegistry.h"
#include "llvm/Option/Arg.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Option/OptTable.h"
@@ -37,13 +39,15 @@
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Process.h"
+#include "llvm/Support/RISCVISAInfo.h"
#include "llvm/Support/Signals.h"
-#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/TargetSelect.h"
#include "llvm/Support/TimeProfiler.h"
#include "llvm/Support/Timer.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
+#include "llvm/TargetParser/AArch64TargetParser.h"
+#include "llvm/TargetParser/ARMTargetParser.h"
#include <cstdio>
#ifdef CLANG_HAVE_RLIMITS
@@ -57,7 +61,7 @@ using namespace llvm::opt;
// Main driver
//===----------------------------------------------------------------------===//
-static void LLVMErrorHandler(void *UserData, const std::string &Message,
+static void LLVMErrorHandler(void *UserData, const char *Message,
bool GenCrashDiag) {
DiagnosticsEngine &Diags = *static_cast<DiagnosticsEngine*>(UserData);
@@ -177,7 +181,45 @@ static int PrintSupportedCPUs(std::string TargetStr) {
// the target machine will handle the mcpu printing
llvm::TargetOptions Options;
std::unique_ptr<llvm::TargetMachine> TheTargetMachine(
- TheTarget->createTargetMachine(TargetStr, "", "+cpuhelp", Options, None));
+ TheTarget->createTargetMachine(TargetStr, "", "+cpuhelp", Options,
+ std::nullopt));
+ return 0;
+}
+
+static int PrintSupportedExtensions(std::string TargetStr) {
+ std::string Error;
+ const llvm::Target *TheTarget =
+ llvm::TargetRegistry::lookupTarget(TargetStr, Error);
+ if (!TheTarget) {
+ llvm::errs() << Error;
+ return 1;
+ }
+
+ llvm::TargetOptions Options;
+ std::unique_ptr<llvm::TargetMachine> TheTargetMachine(
+ TheTarget->createTargetMachine(TargetStr, "", "", Options, std::nullopt));
+ const llvm::Triple &MachineTriple = TheTargetMachine->getTargetTriple();
+ const llvm::MCSubtargetInfo *MCInfo = TheTargetMachine->getMCSubtargetInfo();
+ const llvm::ArrayRef<llvm::SubtargetFeatureKV> Features =
+ MCInfo->getAllProcessorFeatures();
+
+ llvm::StringMap<llvm::StringRef> DescMap;
+ for (const llvm::SubtargetFeatureKV &feature : Features)
+ DescMap.insert({feature.Key, feature.Desc});
+
+ if (MachineTriple.isRISCV())
+ llvm::riscvExtensionsHelp(DescMap);
+ else if (MachineTriple.isAArch64())
+ llvm::AArch64::PrintSupportedExtensions(DescMap);
+ else if (MachineTriple.isARM())
+ llvm::ARM::PrintSupportedExtensions(DescMap);
+ else {
+ // The option was already checked in Driver::HandleImmediateArgs,
+ // so we do not expect to get here if we are not a supported architecture.
+ assert(0 && "Unhandled triple for --print-supported-extensions option.");
+ return 1;
+ }
+
return 0;
}
@@ -212,7 +254,7 @@ int cc1_main(ArrayRef<const char *> Argv, const char *Argv0, void *MainAddr) {
bool Success = CompilerInvocation::CreateFromArgs(Clang->getInvocation(),
Argv, Diags, Argv0);
- if (Clang->getFrontendOpts().TimeTrace) {
+ if (!Clang->getFrontendOpts().TimeTracePath.empty()) {
llvm::timeTraceProfilerInitialize(
Clang->getFrontendOpts().TimeTraceGranularity, Argv0);
}
@@ -220,6 +262,10 @@ int cc1_main(ArrayRef<const char *> Argv, const char *Argv0, void *MainAddr) {
if (Clang->getFrontendOpts().PrintSupportedCPUs)
return PrintSupportedCPUs(Clang->getTargetOpts().Triple);
+ // --print-supported-extensions takes priority over the actual compilation.
+ if (Clang->getFrontendOpts().PrintSupportedExtensions)
+ return PrintSupportedExtensions(Clang->getTargetOpts().Triple);
+
// Infer the builtin include path if unspecified.
if (Clang->getHeaderSearchOpts().UseBuiltinIncludes &&
Clang->getHeaderSearchOpts().ResourceDir.empty())
@@ -237,8 +283,10 @@ int cc1_main(ArrayRef<const char *> Argv, const char *Argv0, void *MainAddr) {
static_cast<void*>(&Clang->getDiagnostics()));
DiagsBuffer->FlushDiagnostics(Clang->getDiagnostics());
- if (!Success)
+ if (!Success) {
+ Clang->getDiagnosticClient().finish();
return 1;
+ }
// Execute the frontend actions.
{
@@ -252,14 +300,24 @@ int cc1_main(ArrayRef<const char *> Argv, const char *Argv0, void *MainAddr) {
llvm::TimerGroup::clearAll();
if (llvm::timeTraceProfilerEnabled()) {
- SmallString<128> Path(Clang->getFrontendOpts().OutputFile);
- llvm::sys::path::replace_extension(Path, "json");
+ // It is possible that the compiler instance doesn't own a file manager here
+ // if we're compiling a module unit. Since the file manager are owned by AST
+ // when we're compiling a module unit. So the file manager may be invalid
+ // here.
+ //
+ // It should be fine to create file manager here since the file system
+ // options are stored in the compiler invocation and we can recreate the VFS
+ // from the compiler invocation.
+ if (!Clang->hasFileManager())
+ Clang->createFileManager(createVFSFromCompilerInvocation(
+ Clang->getInvocation(), Clang->getDiagnostics()));
+
if (auto profilerOutput = Clang->createOutputFile(
- Path.str(), /*Binary=*/false, /*RemoveFileOnSignal=*/false,
+ Clang->getFrontendOpts().TimeTracePath, /*Binary=*/false,
+ /*RemoveFileOnSignal=*/false,
/*useTemporary=*/false)) {
llvm::timeTraceProfilerWrite(*profilerOutput);
- // FIXME(ibiryukov): make profilerOutput flush in destructor instead.
- profilerOutput->flush();
+ profilerOutput.reset();
llvm::timeTraceProfilerCleanup();
Clang->clearOutputFiles(false);
}
diff --git a/contrib/llvm-project/clang/tools/driver/cc1as_main.cpp b/contrib/llvm-project/clang/tools/driver/cc1as_main.cpp
index 086ce0ea7787..bc398fa0731f 100644
--- a/contrib/llvm-project/clang/tools/driver/cc1as_main.cpp
+++ b/contrib/llvm-project/clang/tools/driver/cc1as_main.cpp
@@ -19,8 +19,8 @@
#include "clang/Frontend/TextDiagnosticPrinter.h"
#include "clang/Frontend/Utils.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/MC/MCAsmBackend.h"
#include "llvm/MC/MCAsmInfo.h"
@@ -36,6 +36,7 @@
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MC/MCTargetOptions.h"
+#include "llvm/MC/TargetRegistry.h"
#include "llvm/Option/Arg.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Option/OptTable.h"
@@ -43,17 +44,18 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/FormattedStream.h"
-#include "llvm/Support/Host.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Process.h"
#include "llvm/Support/Signals.h"
#include "llvm/Support/SourceMgr.h"
-#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/TargetSelect.h"
#include "llvm/Support/Timer.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/Host.h"
+#include "llvm/TargetParser/Triple.h"
#include <memory>
+#include <optional>
#include <system_error>
using namespace clang;
using namespace clang::driver;
@@ -96,7 +98,7 @@ struct AssemblerInvocation {
std::string DwarfDebugFlags;
std::string DwarfDebugProducer;
std::string DebugCompilationDir;
- std::map<const std::string, const std::string> DebugPrefixMap;
+ llvm::SmallVector<std::pair<std::string, std::string>, 0> DebugPrefixMap;
llvm::DebugCompressionType CompressDebugSections =
llvm::DebugCompressionType::None;
std::string MainFileName;
@@ -134,9 +136,17 @@ struct AssemblerInvocation {
unsigned NoExecStack : 1;
unsigned FatalWarnings : 1;
unsigned NoWarn : 1;
+ unsigned NoTypeCheck : 1;
unsigned IncrementalLinkerCompatible : 1;
unsigned EmbedBitcode : 1;
+ /// Whether to emit DWARF unwind info.
+ EmitDwarfUnwindType EmitDwarfUnwind;
+
+ // Whether to emit compact-unwind for non-canonical entries.
+ // Note: maybe overriden by other constraints.
+ unsigned EmitCompactUnwindNonCanonical : 1;
+
/// The name of the relocation model to use.
std::string RelocationModel;
@@ -144,6 +154,16 @@ struct AssemblerInvocation {
/// otherwise.
std::string TargetABI;
+ /// Darwin target variant triple, the variant of the deployment target
+ /// for which the code is being compiled.
+ std::optional<llvm::Triple> DarwinTargetVariantTriple;
+
+ /// The version of the darwin target variant SDK which was used during the
+ /// compilation
+ llvm::VersionTuple DarwinTargetVariantSDKVersion;
+
+ /// The name of a file to use with \c .secure_log_unique directives.
+ std::string AsSecureLogFile;
/// @}
public:
@@ -160,10 +180,13 @@ public:
NoExecStack = 0;
FatalWarnings = 0;
NoWarn = 0;
+ NoTypeCheck = 0;
IncrementalLinkerCompatible = 0;
Dwarf64 = 0;
DwarfVersion = 0;
EmbedBitcode = 0;
+ EmitDwarfUnwind = EmitDwarfUnwindType::Default;
+ EmitCompactUnwindNonCanonical = false;
}
static bool CreateFromArgs(AssemblerInvocation &Res,
@@ -181,10 +204,10 @@ bool AssemblerInvocation::CreateFromArgs(AssemblerInvocation &Opts,
// Parse the arguments.
const OptTable &OptTbl = getDriverOptTable();
- const unsigned IncludedFlagsBitmask = options::CC1AsOption;
+ llvm::opt::Visibility VisibilityMask(options::CC1AsOption);
unsigned MissingArgIndex, MissingArgCount;
- InputArgList Args = OptTbl.ParseArgs(Argv, MissingArgIndex, MissingArgCount,
- IncludedFlagsBitmask);
+ InputArgList Args =
+ OptTbl.ParseArgs(Argv, MissingArgIndex, MissingArgCount, VisibilityMask);
// Check for missing argument error.
if (MissingArgCount) {
@@ -197,7 +220,7 @@ bool AssemblerInvocation::CreateFromArgs(AssemblerInvocation &Opts,
for (const Arg *A : Args.filtered(OPT_UNKNOWN)) {
auto ArgString = A->getAsString(Args);
std::string Nearest;
- if (OptTbl.findNearest(ArgString, Nearest, IncludedFlagsBitmask) > 1)
+ if (OptTbl.findNearest(ArgString, Nearest, VisibilityMask) > 1)
Diags.Report(diag::err_drv_unknown_argument) << ArgString;
else
Diags.Report(diag::err_drv_unknown_argument_with_suggestion)
@@ -209,6 +232,17 @@ bool AssemblerInvocation::CreateFromArgs(AssemblerInvocation &Opts,
// Target Options
Opts.Triple = llvm::Triple::normalize(Args.getLastArgValue(OPT_triple));
+ if (Arg *A = Args.getLastArg(options::OPT_darwin_target_variant_triple))
+ Opts.DarwinTargetVariantTriple = llvm::Triple(A->getValue());
+ if (Arg *A = Args.getLastArg(OPT_darwin_target_variant_sdk_version_EQ)) {
+ VersionTuple Version;
+ if (Version.tryParse(A->getValue()))
+ Diags.Report(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << A->getValue();
+ else
+ Opts.DarwinTargetVariantSDKVersion = Version;
+ }
+
Opts.CPU = std::string(Args.getLastArgValue(OPT_target_cpu));
Opts.Features = Args.getAllArgValues(OPT_target_feature);
@@ -227,12 +261,12 @@ bool AssemblerInvocation::CreateFromArgs(AssemblerInvocation &Opts,
Opts.CompressDebugSections =
llvm::StringSwitch<llvm::DebugCompressionType>(A->getValue())
.Case("none", llvm::DebugCompressionType::None)
- .Case("zlib", llvm::DebugCompressionType::Z)
- .Case("zlib-gnu", llvm::DebugCompressionType::GNU)
+ .Case("zlib", llvm::DebugCompressionType::Zlib)
+ .Case("zstd", llvm::DebugCompressionType::Zstd)
.Default(llvm::DebugCompressionType::None);
}
- Opts.RelaxELFRelocations = Args.hasArg(OPT_mrelax_relocations);
+ Opts.RelaxELFRelocations = !Args.hasArg(OPT_mrelax_relocations_no);
if (auto *DwarfFormatArg = Args.getLastArg(OPT_gdwarf64, OPT_gdwarf32))
Opts.Dwarf64 = DwarfFormatArg->getOption().matches(OPT_gdwarf64);
Opts.DwarfVersion = getLastArgIntValue(Args, OPT_dwarf_version_EQ, 2, Diags);
@@ -247,8 +281,7 @@ bool AssemblerInvocation::CreateFromArgs(AssemblerInvocation &Opts,
for (const auto &Arg : Args.getAllArgValues(OPT_fdebug_prefix_map_EQ)) {
auto Split = StringRef(Arg).split('=');
- Opts.DebugPrefixMap.insert(
- {std::string(Split.first), std::string(Split.second)});
+ Opts.DebugPrefixMap.emplace_back(Split.first, Split.second);
}
// Frontend Options
@@ -295,6 +328,7 @@ bool AssemblerInvocation::CreateFromArgs(AssemblerInvocation &Opts,
Opts.NoExecStack = Args.hasArg(OPT_mno_exec_stack);
Opts.FatalWarnings = Args.hasArg(OPT_massembler_fatal_warnings);
Opts.NoWarn = Args.hasArg(OPT_massembler_no_warn);
+ Opts.NoTypeCheck = Args.hasArg(OPT_mno_type_check);
Opts.RelocationModel =
std::string(Args.getLastArgValue(OPT_mrelocation_model, "pic"));
Opts.TargetABI = std::string(Args.getLastArgValue(OPT_target_abi));
@@ -312,6 +346,19 @@ bool AssemblerInvocation::CreateFromArgs(AssemblerInvocation &Opts,
.Default(0);
}
+ if (auto *A = Args.getLastArg(OPT_femit_dwarf_unwind_EQ)) {
+ Opts.EmitDwarfUnwind =
+ llvm::StringSwitch<EmitDwarfUnwindType>(A->getValue())
+ .Case("always", EmitDwarfUnwindType::Always)
+ .Case("no-compact-unwind", EmitDwarfUnwindType::NoCompactUnwind)
+ .Case("default", EmitDwarfUnwindType::Default);
+ }
+
+ Opts.EmitCompactUnwindNonCanonical =
+ Args.hasArg(OPT_femit_compact_unwind_non_canonical);
+
+ Opts.AsSecureLogFile = Args.getLastArgValue(OPT_as_secure_log_file);
+
return Success;
}
@@ -345,8 +392,8 @@ static bool ExecuteAssemblerImpl(AssemblerInvocation &Opts,
MemoryBuffer::getFileOrSTDIN(Opts.InputFile, /*IsText=*/true);
if (std::error_code EC = Buffer.getError()) {
- Error = EC.message();
- return Diags.Report(diag::err_fe_error_reading) << Opts.InputFile;
+ return Diags.Report(diag::err_fe_error_reading)
+ << Opts.InputFile << EC.message();
}
SourceMgr SrcMgr;
@@ -362,6 +409,10 @@ static bool ExecuteAssemblerImpl(AssemblerInvocation &Opts,
assert(MRI && "Unable to create target register info!");
MCTargetOptions MCOptions;
+ MCOptions.EmitDwarfUnwind = Opts.EmitDwarfUnwind;
+ MCOptions.EmitCompactUnwindNonCanonical = Opts.EmitCompactUnwindNonCanonical;
+ MCOptions.AsSecureLogFile = Opts.AsSecureLogFile;
+
std::unique_ptr<MCAsmInfo> MAI(
TheTarget->createMCAsmInfo(*MRI, Opts.Triple, MCOptions));
assert(MAI && "Unable to create target asm info!");
@@ -408,6 +459,10 @@ static bool ExecuteAssemblerImpl(AssemblerInvocation &Opts,
// MCObjectFileInfo needs a MCContext reference in order to initialize itself.
std::unique_ptr<MCObjectFileInfo> MOFI(
TheTarget->createMCObjectFileInfo(Ctx, PIC));
+ if (Opts.DarwinTargetVariantTriple)
+ MOFI->setDarwinTargetVariantTriple(*Opts.DarwinTargetVariantTriple);
+ if (!Opts.DarwinTargetVariantSDKVersion.empty())
+ MOFI->setDarwinTargetVariantSDKVersion(Opts.DarwinTargetVariantSDKVersion);
Ctx.setObjectFileInfo(MOFI.get());
if (Opts.SaveTemporaryLabels)
@@ -447,6 +502,7 @@ static bool ExecuteAssemblerImpl(AssemblerInvocation &Opts,
MCOptions.MCNoWarn = Opts.NoWarn;
MCOptions.MCFatalWarnings = Opts.FatalWarnings;
+ MCOptions.MCNoTypeCheck = Opts.NoTypeCheck;
MCOptions.ABIName = Opts.TargetABI;
// FIXME: There is a bit of code duplication with addPassesToEmitFile.
@@ -456,7 +512,7 @@ static bool ExecuteAssemblerImpl(AssemblerInvocation &Opts,
std::unique_ptr<MCCodeEmitter> CE;
if (Opts.ShowEncoding)
- CE.reset(TheTarget->createMCCodeEmitter(*MCII, *MRI, Ctx));
+ CE.reset(TheTarget->createMCCodeEmitter(*MCII, Ctx));
std::unique_ptr<MCAsmBackend> MAB(
TheTarget->createMCAsmBackend(*STI, *MRI, MCOptions));
@@ -476,7 +532,7 @@ static bool ExecuteAssemblerImpl(AssemblerInvocation &Opts,
}
std::unique_ptr<MCCodeEmitter> CE(
- TheTarget->createMCCodeEmitter(*MCII, *MRI, Ctx));
+ TheTarget->createMCCodeEmitter(*MCII, Ctx));
std::unique_ptr<MCAsmBackend> MAB(
TheTarget->createMCAsmBackend(*STI, *MRI, MCOptions));
assert(MAB && "Unable to create asm backend!");
@@ -490,7 +546,7 @@ static bool ExecuteAssemblerImpl(AssemblerInvocation &Opts,
T, Ctx, std::move(MAB), std::move(OW), std::move(CE), *STI,
Opts.RelaxAll, Opts.IncrementalLinkerCompatible,
/*DWARFMustBeAtTheEnd*/ true));
- Str.get()->InitSections(Opts.NoExecStack);
+ Str.get()->initSections(Opts.NoExecStack, *STI);
}
// When -fembed-bitcode is passed to clang_as, a 1-byte marker
@@ -498,7 +554,7 @@ static bool ExecuteAssemblerImpl(AssemblerInvocation &Opts,
if (Opts.EmbedBitcode && Ctx.getObjectFileType() == MCContext::IsMachO) {
MCSection *AsmLabel = Ctx.getMachOSection(
"__LLVM", "__asm", MachO::S_REGULAR, 4, SectionKind::getReadOnly());
- Str.get()->SwitchSection(AsmLabel);
+ Str.get()->switchSection(AsmLabel);
Str.get()->emitZeros(1);
}
@@ -550,7 +606,7 @@ static bool ExecuteAssembler(AssemblerInvocation &Opts,
return Failed;
}
-static void LLVMErrorHandler(void *UserData, const std::string &Message,
+static void LLVMErrorHandler(void *UserData, const char *Message,
bool GenCrashDiag) {
DiagnosticsEngine &Diags = *static_cast<DiagnosticsEngine*>(UserData);
@@ -587,9 +643,10 @@ int cc1as_main(ArrayRef<const char *> Argv, const char *Argv0, void *MainAddr) {
if (Asm.ShowHelp) {
getDriverOptTable().printHelp(
llvm::outs(), "clang -cc1as [options] file...",
- "Clang Integrated Assembler",
- /*Include=*/driver::options::CC1AsOption, /*Exclude=*/0,
- /*ShowAllAliases=*/false);
+ "Clang Integrated Assembler", /*ShowHidden=*/false,
+ /*ShowAllAliases=*/false,
+ llvm::opt::Visibility(driver::options::CC1AsOption));
+
return 0;
}
diff --git a/contrib/llvm-project/clang/tools/driver/cc1gen_reproducer_main.cpp b/contrib/llvm-project/clang/tools/driver/cc1gen_reproducer_main.cpp
index 89b7227fdb17..e97fa3d27756 100644
--- a/contrib/llvm-project/clang/tools/driver/cc1gen_reproducer_main.cpp
+++ b/contrib/llvm-project/clang/tools/driver/cc1gen_reproducer_main.cpp
@@ -18,11 +18,13 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/FileSystem.h"
-#include "llvm/Support/Host.h"
+#include "llvm/Support/LLVMDriver.h"
#include "llvm/Support/TargetSelect.h"
#include "llvm/Support/VirtualFileSystem.h"
#include "llvm/Support/YAMLTraits.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/Host.h"
+#include <optional>
using namespace clang;
@@ -108,9 +110,10 @@ static std::string generateReproducerMetaInfo(const ClangInvocationInfo &Info) {
}
/// Generates a reproducer for a set of arguments from a specific invocation.
-static llvm::Optional<driver::Driver::CompilationDiagnosticReport>
+static std::optional<driver::Driver::CompilationDiagnosticReport>
generateReproducerForInvocationArguments(ArrayRef<const char *> Argv,
- const ClangInvocationInfo &Info) {
+ const ClangInvocationInfo &Info,
+ const llvm::ToolContext &ToolContext) {
using namespace driver;
auto TargetAndMode = ToolChain::getTargetAndModeFromProgramName(Argv[0]);
@@ -119,8 +122,11 @@ generateReproducerForInvocationArguments(ArrayRef<const char *> Argv,
IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
DiagnosticsEngine Diags(DiagID, &*DiagOpts, new IgnoringDiagConsumer());
ProcessWarningOptions(Diags, *DiagOpts, /*ReportDiags=*/false);
- Driver TheDriver(Argv[0], llvm::sys::getDefaultTargetTriple(), Diags);
+ Driver TheDriver(ToolContext.Path, llvm::sys::getDefaultTargetTriple(),
+ Diags);
TheDriver.setTargetAndMode(TargetAndMode);
+ if (ToolContext.NeedsPrependArg)
+ TheDriver.setPrependArg(ToolContext.PrependArg);
std::unique_ptr<Compilation> C(TheDriver.BuildCompilation(Argv));
if (C && !C->containsError()) {
@@ -134,7 +140,7 @@ generateReproducerForInvocationArguments(ArrayRef<const char *> Argv,
}
}
- return None;
+ return std::nullopt;
}
std::string GetExecutablePath(const char *Argv0, bool CanonicalPrefixes);
@@ -154,7 +160,8 @@ static void printReproducerInformation(
}
int cc1gen_reproducer_main(ArrayRef<const char *> Argv, const char *Argv0,
- void *MainAddr) {
+ void *MainAddr,
+ const llvm::ToolContext &ToolContext) {
if (Argv.size() < 1) {
llvm::errs() << "error: missing invocation file\n";
return 1;
@@ -180,8 +187,9 @@ int cc1gen_reproducer_main(ArrayRef<const char *> Argv, const char *Argv0,
DriverArgs.push_back(Arg.c_str());
std::string Path = GetExecutablePath(Argv0, /*CanonicalPrefixes=*/true);
DriverArgs[0] = Path.c_str();
- llvm::Optional<driver::Driver::CompilationDiagnosticReport> Report =
- generateReproducerForInvocationArguments(DriverArgs, InvocationInfo);
+ std::optional<driver::Driver::CompilationDiagnosticReport> Report =
+ generateReproducerForInvocationArguments(DriverArgs, InvocationInfo,
+ ToolContext);
// Emit the information about the reproduce files to stdout.
int Result = 1;
diff --git a/contrib/llvm-project/clang/tools/driver/driver.cpp b/contrib/llvm-project/clang/tools/driver/driver.cpp
index 5a453429e79b..1407c7fcdab7 100644
--- a/contrib/llvm-project/clang/tools/driver/driver.cpp
+++ b/contrib/llvm-project/clang/tools/driver/driver.cpp
@@ -13,6 +13,7 @@
#include "clang/Driver/Driver.h"
#include "clang/Basic/DiagnosticOptions.h"
+#include "clang/Basic/HeaderInclude.h"
#include "clang/Basic/Stack.h"
#include "clang/Config/config.h"
#include "clang/Driver/Compilation.h"
@@ -35,8 +36,7 @@
#include "llvm/Support/CrashRecoveryContext.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
-#include "llvm/Support/Host.h"
-#include "llvm/Support/InitLLVM.h"
+#include "llvm/Support/LLVMDriver.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/Process.h"
@@ -47,7 +47,9 @@
#include "llvm/Support/TargetSelect.h"
#include "llvm/Support/Timer.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetParser/Host.h"
#include <memory>
+#include <optional>
#include <set>
#include <system_error>
using namespace clang;
@@ -62,7 +64,7 @@ std::string GetExecutablePath(const char *Argv0, bool CanonicalPrefixes) {
if (llvm::ErrorOr<std::string> P =
llvm::sys::findProgramByName(ExecutablePath))
ExecutablePath = *P;
- return std::string(ExecutablePath.str());
+ return std::string(ExecutablePath);
}
// This just needs to be some symbol in the binary; C++ doesn't
@@ -76,9 +78,9 @@ static const char *GetStableCStr(std::set<std::string> &SavedStrings,
return SavedStrings.insert(std::string(S)).first->c_str();
}
-/// ApplyQAOverride - Apply a list of edits to the input argument lists.
+/// ApplyOneQAOverride - Apply a list of edits to the input argument lists.
///
-/// The input string is a space separate list of edits to perform,
+/// The input string is a space separated list of edits to perform,
/// they are applied in order to the input argument lists. Edits
/// should be one of the following forms:
///
@@ -119,8 +121,8 @@ static void ApplyOneQAOverride(raw_ostream &OS,
GetStableCStr(SavedStrings, Edit.substr(1));
OS << "### Adding argument " << Str << " at end\n";
Args.push_back(Str);
- } else if (Edit[0] == 's' && Edit[1] == '/' && Edit.endswith("/") &&
- Edit.slice(2, Edit.size()-1).find('/') != StringRef::npos) {
+ } else if (Edit[0] == 's' && Edit[1] == '/' && Edit.ends_with("/") &&
+ Edit.slice(2, Edit.size() - 1).contains('/')) {
StringRef MatchPattern = Edit.substr(2).split('/').first;
StringRef ReplPattern = Edit.substr(2).split('/').second;
ReplPattern = ReplPattern.slice(0, ReplPattern.size()-1);
@@ -174,7 +176,7 @@ static void ApplyOneQAOverride(raw_ostream &OS,
}
}
-/// ApplyQAOverride - Apply a comma separate list of edits to the
+/// ApplyQAOverride - Apply a space separated list of edits to the
/// input argument lists. See ApplyOneQAOverride.
static void ApplyQAOverride(SmallVectorImpl<const char*> &Args,
const char *OverrideStr,
@@ -208,7 +210,8 @@ extern int cc1_main(ArrayRef<const char *> Argv, const char *Argv0,
extern int cc1as_main(ArrayRef<const char *> Argv, const char *Argv0,
void *MainAddr);
extern int cc1gen_reproducer_main(ArrayRef<const char *> Argv,
- const char *Argv0, void *MainAddr);
+ const char *Argv0, void *MainAddr,
+ const llvm::ToolContext &);
static void insertTargetAndModeArgs(const ParsedClangName &NameParts,
SmallVectorImpl<const char *> &ArgVector,
@@ -243,29 +246,71 @@ static void getCLEnvVarOptions(std::string &EnvValue, llvm::StringSaver &Saver,
*NumberSignPtr = '=';
}
-static void SetBackdoorDriverOutputsFromEnvVars(Driver &TheDriver) {
- auto CheckEnvVar = [](const char *EnvOptSet, const char *EnvOptFile,
- std::string &OptFile) {
- bool OptSet = !!::getenv(EnvOptSet);
- if (OptSet) {
- if (const char *Var = ::getenv(EnvOptFile))
- OptFile = Var;
- }
- return OptSet;
- };
+template <class T>
+static T checkEnvVar(const char *EnvOptSet, const char *EnvOptFile,
+ std::string &OptFile) {
+ const char *Str = ::getenv(EnvOptSet);
+ if (!Str)
+ return T{};
+
+ T OptVal = Str;
+ if (const char *Var = ::getenv(EnvOptFile))
+ OptFile = Var;
+ return OptVal;
+}
+static bool SetBackdoorDriverOutputsFromEnvVars(Driver &TheDriver) {
TheDriver.CCPrintOptions =
- CheckEnvVar("CC_PRINT_OPTIONS", "CC_PRINT_OPTIONS_FILE",
- TheDriver.CCPrintOptionsFilename);
- TheDriver.CCPrintHeaders =
- CheckEnvVar("CC_PRINT_HEADERS", "CC_PRINT_HEADERS_FILE",
- TheDriver.CCPrintHeadersFilename);
+ checkEnvVar<bool>("CC_PRINT_OPTIONS", "CC_PRINT_OPTIONS_FILE",
+ TheDriver.CCPrintOptionsFilename);
+ if (checkEnvVar<bool>("CC_PRINT_HEADERS", "CC_PRINT_HEADERS_FILE",
+ TheDriver.CCPrintHeadersFilename)) {
+ TheDriver.CCPrintHeadersFormat = HIFMT_Textual;
+ TheDriver.CCPrintHeadersFiltering = HIFIL_None;
+ } else {
+ std::string EnvVar = checkEnvVar<std::string>(
+ "CC_PRINT_HEADERS_FORMAT", "CC_PRINT_HEADERS_FILE",
+ TheDriver.CCPrintHeadersFilename);
+ if (!EnvVar.empty()) {
+ TheDriver.CCPrintHeadersFormat =
+ stringToHeaderIncludeFormatKind(EnvVar.c_str());
+ if (!TheDriver.CCPrintHeadersFormat) {
+ TheDriver.Diag(clang::diag::err_drv_print_header_env_var)
+ << 0 << EnvVar;
+ return false;
+ }
+
+ const char *FilteringStr = ::getenv("CC_PRINT_HEADERS_FILTERING");
+ HeaderIncludeFilteringKind Filtering;
+ if (!stringToHeaderIncludeFiltering(FilteringStr, Filtering)) {
+ TheDriver.Diag(clang::diag::err_drv_print_header_env_var)
+ << 1 << FilteringStr;
+ return false;
+ }
+
+ if ((TheDriver.CCPrintHeadersFormat == HIFMT_Textual &&
+ Filtering != HIFIL_None) ||
+ (TheDriver.CCPrintHeadersFormat == HIFMT_JSON &&
+ Filtering != HIFIL_Only_Direct_System)) {
+ TheDriver.Diag(clang::diag::err_drv_print_header_env_var_combination)
+ << EnvVar << FilteringStr;
+ return false;
+ }
+ TheDriver.CCPrintHeadersFiltering = Filtering;
+ }
+ }
+
TheDriver.CCLogDiagnostics =
- CheckEnvVar("CC_LOG_DIAGNOSTICS", "CC_LOG_DIAGNOSTICS_FILE",
- TheDriver.CCLogDiagnosticsFilename);
+ checkEnvVar<bool>("CC_LOG_DIAGNOSTICS", "CC_LOG_DIAGNOSTICS_FILE",
+ TheDriver.CCLogDiagnosticsFilename);
TheDriver.CCPrintProcessStats =
- CheckEnvVar("CC_PRINT_PROC_STAT", "CC_PRINT_PROC_STAT_FILE",
- TheDriver.CCPrintStatReportFilename);
+ checkEnvVar<bool>("CC_PRINT_PROC_STAT", "CC_PRINT_PROC_STAT_FILE",
+ TheDriver.CCPrintStatReportFilename);
+ TheDriver.CCPrintInternalStats =
+ checkEnvVar<bool>("CC_PRINT_INTERNAL_STAT", "CC_PRINT_INTERNAL_STAT_FILE",
+ TheDriver.CCPrintInternalStatReportFilename);
+
+ return true;
}
static void FixupDiagPrefixExeName(TextDiagnosticPrinter *DiagClient,
@@ -278,27 +323,6 @@ static void FixupDiagPrefixExeName(TextDiagnosticPrinter *DiagClient,
DiagClient->setPrefix(std::string(ExeBasename));
}
-// This lets us create the DiagnosticsEngine with a properly-filled-out
-// DiagnosticOptions instance.
-static DiagnosticOptions *
-CreateAndPopulateDiagOpts(ArrayRef<const char *> argv, bool &UseNewCC1Process) {
- auto *DiagOpts = new DiagnosticOptions;
- unsigned MissingArgIndex, MissingArgCount;
- InputArgList Args = getDriverOptTable().ParseArgs(
- argv.slice(1), MissingArgIndex, MissingArgCount);
- // We ignore MissingArgCount and the return value of ParseDiagnosticArgs.
- // Any errors that would be diagnosed here will also be diagnosed later,
- // when the DiagnosticsEngine actually exists.
- (void)ParseDiagnosticArgs(*DiagOpts, Args);
-
- UseNewCC1Process =
- Args.hasFlag(clang::driver::options::OPT_fno_integrated_cc1,
- clang::driver::options::OPT_fintegrated_cc1,
- /*Default=*/CLANG_SPAWN_CC1);
-
- return DiagOpts;
-}
-
static void SetInstallDir(SmallVectorImpl<const char *> &argv,
Driver &TheDriver, bool CanonicalPrefixes) {
// Attempt to find the original path used to invoke the driver, to determine
@@ -321,7 +345,8 @@ static void SetInstallDir(SmallVectorImpl<const char *> &argv,
TheDriver.setInstalledDir(InstalledPathParent);
}
-static int ExecuteCC1Tool(SmallVectorImpl<const char *> &ArgV) {
+static int ExecuteCC1Tool(SmallVectorImpl<const char *> &ArgV,
+ const llvm::ToolContext &ToolContext) {
// If we call the cc1 tool from the clangDriver library (through
// Driver::CC1Main), we need to clean up the options usage count. The options
// are currently global, and they might have been used previously by the
@@ -329,28 +354,28 @@ static int ExecuteCC1Tool(SmallVectorImpl<const char *> &ArgV) {
llvm::cl::ResetAllOptionOccurrences();
llvm::BumpPtrAllocator A;
- llvm::StringSaver Saver(A);
- llvm::cl::ExpandResponseFiles(Saver, &llvm::cl::TokenizeGNUCommandLine, ArgV,
- /*MarkEOLs=*/false);
+ llvm::cl::ExpansionContext ECtx(A, llvm::cl::TokenizeGNUCommandLine);
+ if (llvm::Error Err = ECtx.expandResponseFiles(ArgV)) {
+ llvm::errs() << toString(std::move(Err)) << '\n';
+ return 1;
+ }
StringRef Tool = ArgV[1];
void *GetExecutablePathVP = (void *)(intptr_t)GetExecutablePath;
if (Tool == "-cc1")
- return cc1_main(makeArrayRef(ArgV).slice(1), ArgV[0], GetExecutablePathVP);
+ return cc1_main(ArrayRef(ArgV).slice(1), ArgV[0], GetExecutablePathVP);
if (Tool == "-cc1as")
- return cc1as_main(makeArrayRef(ArgV).slice(2), ArgV[0],
- GetExecutablePathVP);
+ return cc1as_main(ArrayRef(ArgV).slice(2), ArgV[0], GetExecutablePathVP);
if (Tool == "-cc1gen-reproducer")
- return cc1gen_reproducer_main(makeArrayRef(ArgV).slice(2), ArgV[0],
- GetExecutablePathVP);
+ return cc1gen_reproducer_main(ArrayRef(ArgV).slice(2), ArgV[0],
+ GetExecutablePathVP, ToolContext);
// Reject unknown tools.
llvm::errs() << "error: unknown integrated tool '" << Tool << "'. "
<< "Valid tools include '-cc1' and '-cc1as'.\n";
return 1;
}
-int main(int Argc, const char **Argv) {
+int clang_main(int Argc, char **Argv, const llvm::ToolContext &ToolContext) {
noteBottomOfStack();
- llvm::InitLLVM X(Argc, Argv);
llvm::setBugReportMsg("PLEASE submit a bug report to " BUG_REPORT_URL
" and include the crash backtrace, preprocessed "
"source, and associated run script.\n");
@@ -364,51 +389,21 @@ int main(int Argc, const char **Argv) {
llvm::BumpPtrAllocator A;
llvm::StringSaver Saver(A);
- // Parse response files using the GNU syntax, unless we're in CL mode. There
- // are two ways to put clang in CL compatibility mode: Args[0] is either
- // clang-cl or cl, or --driver-mode=cl is on the command line. The normal
- // command line parsing can't happen until after response file parsing, so we
- // have to manually search for a --driver-mode=cl argument the hard way.
- // Finally, our -cc1 tools don't care which tokenization mode we use because
- // response files written by clang will tokenize the same way in either mode.
+ const char *ProgName =
+ ToolContext.NeedsPrependArg ? ToolContext.PrependArg : ToolContext.Path;
+
bool ClangCLMode =
- IsClangCL(getDriverMode(Args[0], llvm::makeArrayRef(Args).slice(1)));
- enum { Default, POSIX, Windows } RSPQuoting = Default;
- for (const char *F : Args) {
- if (strcmp(F, "--rsp-quoting=posix") == 0)
- RSPQuoting = POSIX;
- else if (strcmp(F, "--rsp-quoting=windows") == 0)
- RSPQuoting = Windows;
- }
+ IsClangCL(getDriverMode(ProgName, llvm::ArrayRef(Args).slice(1)));
- // Determines whether we want nullptr markers in Args to indicate response
- // files end-of-lines. We only use this for the /LINK driver argument with
- // clang-cl.exe on Windows.
- bool MarkEOLs = ClangCLMode;
-
- llvm::cl::TokenizerCallback Tokenizer;
- if (RSPQuoting == Windows || (RSPQuoting == Default && ClangCLMode))
- Tokenizer = &llvm::cl::TokenizeWindowsCommandLine;
- else
- Tokenizer = &llvm::cl::TokenizeGNUCommandLine;
-
- if (MarkEOLs && Args.size() > 1 && StringRef(Args[1]).startswith("-cc1"))
- MarkEOLs = false;
- llvm::cl::ExpandResponseFiles(Saver, Tokenizer, Args, MarkEOLs);
-
- // Handle -cc1 integrated tools, even if -cc1 was expanded from a response
- // file.
- auto FirstArg = std::find_if(Args.begin() + 1, Args.end(),
- [](const char *A) { return A != nullptr; });
- if (FirstArg != Args.end() && StringRef(*FirstArg).startswith("-cc1")) {
- // If -cc1 came from a response file, remove the EOL sentinels.
- if (MarkEOLs) {
- auto newEnd = std::remove(Args.begin(), Args.end(), nullptr);
- Args.resize(newEnd - Args.begin());
- }
- return ExecuteCC1Tool(Args);
+ if (llvm::Error Err = expandResponseFiles(Args, ClangCLMode, A)) {
+ llvm::errs() << toString(std::move(Err)) << '\n';
+ return 1;
}
+ // Handle -cc1 integrated tools.
+ if (Args.size() >= 2 && StringRef(Args[1]).starts_with("-cc1"))
+ return ExecuteCC1Tool(Args, ToolContext);
+
// Handle options that need handling before the real command line parsing in
// Driver::BuildCompilation()
bool CanonicalPrefixes = true;
@@ -416,29 +411,29 @@ int main(int Argc, const char **Argv) {
// Skip end-of-line response file markers
if (Args[i] == nullptr)
continue;
- if (StringRef(Args[i]) == "-no-canonical-prefixes") {
+ if (StringRef(Args[i]) == "-canonical-prefixes")
+ CanonicalPrefixes = true;
+ else if (StringRef(Args[i]) == "-no-canonical-prefixes")
CanonicalPrefixes = false;
- break;
- }
}
// Handle CL and _CL_ which permits additional command line options to be
// prepended or appended.
if (ClangCLMode) {
// Arguments in "CL" are prepended.
- llvm::Optional<std::string> OptCL = llvm::sys::Process::GetEnv("CL");
- if (OptCL.hasValue()) {
+ std::optional<std::string> OptCL = llvm::sys::Process::GetEnv("CL");
+ if (OptCL) {
SmallVector<const char *, 8> PrependedOpts;
- getCLEnvVarOptions(OptCL.getValue(), Saver, PrependedOpts);
+ getCLEnvVarOptions(*OptCL, Saver, PrependedOpts);
// Insert right after the program name to prepend to the argument list.
Args.insert(Args.begin() + 1, PrependedOpts.begin(), PrependedOpts.end());
}
// Arguments in "_CL_" are appended.
- llvm::Optional<std::string> Opt_CL_ = llvm::sys::Process::GetEnv("_CL_");
- if (Opt_CL_.hasValue()) {
+ std::optional<std::string> Opt_CL_ = llvm::sys::Process::GetEnv("_CL_");
+ if (Opt_CL_) {
SmallVector<const char *, 8> AppendedOpts;
- getCLEnvVarOptions(Opt_CL_.getValue(), Saver, AppendedOpts);
+ getCLEnvVarOptions(*Opt_CL_, Saver, AppendedOpts);
// Insert at the end of the argument list to append.
Args.append(AppendedOpts.begin(), AppendedOpts.end());
@@ -453,20 +448,25 @@ int main(int Argc, const char **Argv) {
ApplyQAOverride(Args, OverrideStr, SavedStrings);
}
- std::string Path = GetExecutablePath(Args[0], CanonicalPrefixes);
+ std::string Path = GetExecutablePath(ToolContext.Path, CanonicalPrefixes);
// Whether the cc1 tool should be called inside the current process, or if we
// should spawn a new clang subprocess (old behavior).
// Not having an additional process saves some execution time of Windows,
// and makes debugging and profiling easier.
- bool UseNewCC1Process;
+ bool UseNewCC1Process = CLANG_SPAWN_CC1;
+ for (const char *Arg : Args)
+ UseNewCC1Process = llvm::StringSwitch<bool>(Arg)
+ .Case("-fno-integrated-cc1", true)
+ .Case("-fintegrated-cc1", false)
+ .Default(UseNewCC1Process);
IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts =
- CreateAndPopulateDiagOpts(Args, UseNewCC1Process);
+ CreateAndPopulateDiagOpts(Args);
TextDiagnosticPrinter *DiagClient
= new TextDiagnosticPrinter(llvm::errs(), &*DiagOpts);
- FixupDiagPrefixExeName(DiagClient, Path);
+ FixupDiagPrefixExeName(DiagClient, ProgName);
IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
@@ -484,46 +484,64 @@ int main(int Argc, const char **Argv) {
Driver TheDriver(Path, llvm::sys::getDefaultTargetTriple(), Diags);
SetInstallDir(Args, TheDriver, CanonicalPrefixes);
- auto TargetAndMode = ToolChain::getTargetAndModeFromProgramName(Args[0]);
+ auto TargetAndMode = ToolChain::getTargetAndModeFromProgramName(ProgName);
TheDriver.setTargetAndMode(TargetAndMode);
+ // If -canonical-prefixes is set, GetExecutablePath will have resolved Path
+ // to the llvm driver binary, not clang. In this case, we need to use
+ // PrependArg which should be clang-*. Checking just CanonicalPrefixes is
+ // safe even in the normal case because PrependArg will be null so
+ // setPrependArg will be a no-op.
+ if (ToolContext.NeedsPrependArg || CanonicalPrefixes)
+ TheDriver.setPrependArg(ToolContext.PrependArg);
insertTargetAndModeArgs(TargetAndMode, Args, SavedStrings);
- SetBackdoorDriverOutputsFromEnvVars(TheDriver);
+ if (!SetBackdoorDriverOutputsFromEnvVars(TheDriver))
+ return 1;
if (!UseNewCC1Process) {
- TheDriver.CC1Main = &ExecuteCC1Tool;
+ TheDriver.CC1Main = [ToolContext](SmallVectorImpl<const char *> &ArgV) {
+ return ExecuteCC1Tool(ArgV, ToolContext);
+ };
// Ensure the CC1Command actually catches cc1 crashes
llvm::CrashRecoveryContext::Enable();
}
std::unique_ptr<Compilation> C(TheDriver.BuildCompilation(Args));
+
+ Driver::ReproLevel ReproLevel = Driver::ReproLevel::OnCrash;
+ if (Arg *A = C->getArgs().getLastArg(options::OPT_gen_reproducer_eq)) {
+ auto Level =
+ llvm::StringSwitch<std::optional<Driver::ReproLevel>>(A->getValue())
+ .Case("off", Driver::ReproLevel::Off)
+ .Case("crash", Driver::ReproLevel::OnCrash)
+ .Case("error", Driver::ReproLevel::OnError)
+ .Case("always", Driver::ReproLevel::Always)
+ .Default(std::nullopt);
+ if (!Level) {
+ llvm::errs() << "Unknown value for " << A->getSpelling() << ": '"
+ << A->getValue() << "'\n";
+ return 1;
+ }
+ ReproLevel = *Level;
+ }
+ if (!!::getenv("FORCE_CLANG_DIAGNOSTICS_CRASH"))
+ ReproLevel = Driver::ReproLevel::Always;
+
int Res = 1;
bool IsCrash = false;
+ Driver::CommandStatus CommandStatus = Driver::CommandStatus::Ok;
+ // Pretend the first command failed if ReproStatus is Always.
+ const Command *FailingCommand = nullptr;
+ if (!C->getJobs().empty())
+ FailingCommand = &*C->getJobs().begin();
if (C && !C->containsError()) {
SmallVector<std::pair<int, const Command *>, 4> FailingCommands;
Res = TheDriver.ExecuteCompilation(*C, FailingCommands);
- // Force a crash to test the diagnostics.
- if (TheDriver.GenReproducer) {
- Diags.Report(diag::err_drv_force_crash)
- << !::getenv("FORCE_CLANG_DIAGNOSTICS_CRASH");
-
- // Pretend that every command failed.
- FailingCommands.clear();
- for (const auto &J : C->getJobs())
- if (const Command *C = dyn_cast<Command>(&J))
- FailingCommands.push_back(std::make_pair(-1, C));
-
- // Print the bug report message that would be printed if we did actually
- // crash, but only if we're crashing due to FORCE_CLANG_DIAGNOSTICS_CRASH.
- if (::getenv("FORCE_CLANG_DIAGNOSTICS_CRASH"))
- llvm::dbgs() << llvm::getBugReportMsg();
- }
-
for (const auto &P : FailingCommands) {
int CommandRes = P.first;
- const Command *FailingCommand = P.second;
+ FailingCommand = P.second;
if (!Res)
Res = CommandRes;
@@ -542,13 +560,22 @@ int main(int Argc, const char **Argv) {
// https://pubs.opengroup.org/onlinepubs/9699919799/xrat/V4_xcu_chap02.html
IsCrash |= CommandRes > 128;
#endif
- if (IsCrash) {
- TheDriver.generateCompilationDiagnostics(*C, *FailingCommand);
+ CommandStatus =
+ IsCrash ? Driver::CommandStatus::Crash : Driver::CommandStatus::Error;
+ if (IsCrash)
break;
- }
}
}
+ // Print the bug report message that would be printed if we did actually
+ // crash, but only if we're crashing due to FORCE_CLANG_DIAGNOSTICS_CRASH.
+ if (::getenv("FORCE_CLANG_DIAGNOSTICS_CRASH"))
+ llvm::dbgs() << llvm::getBugReportMsg();
+ if (FailingCommand != nullptr &&
+ TheDriver.maybeGenerateCompilationDiagnostics(CommandStatus, ReproLevel,
+ *C, *FailingCommand))
+ Res = 1;
+
Diags.getClient()->finish();
if (!UseNewCC1Process && IsCrash) {
diff --git a/contrib/llvm-project/clang/tools/nvptx-arch/NVPTXArch.cpp b/contrib/llvm-project/clang/tools/nvptx-arch/NVPTXArch.cpp
new file mode 100644
index 000000000000..71a48657576e
--- /dev/null
+++ b/contrib/llvm-project/clang/tools/nvptx-arch/NVPTXArch.cpp
@@ -0,0 +1,137 @@
+//===- NVPTXArch.cpp - list installed NVPTX devies ------*- C++ -*---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a tool for detecting name of CUDA gpus installed in the
+// system.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/Version.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/DynamicLibrary.h"
+#include "llvm/Support/Error.h"
+#include <cstdint>
+#include <cstdio>
+#include <memory>
+
+using namespace llvm;
+
+static cl::opt<bool> Help("h", cl::desc("Alias for -help"), cl::Hidden);
+
+static void PrintVersion(raw_ostream &OS) {
+ OS << clang::getClangToolFullVersion("nvptx-arch") << '\n';
+}
+// Mark all our options with this category, everything else (except for -version
+// and -help) will be hidden.
+static cl::OptionCategory NVPTXArchCategory("nvptx-arch options");
+
+typedef enum cudaError_enum {
+ CUDA_SUCCESS = 0,
+ CUDA_ERROR_NO_DEVICE = 100,
+} CUresult;
+
+typedef enum CUdevice_attribute_enum {
+ CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR = 75,
+ CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR = 76,
+} CUdevice_attribute;
+
+typedef uint32_t CUdevice;
+
+CUresult (*cuInit)(unsigned int);
+CUresult (*cuDeviceGetCount)(int *);
+CUresult (*cuGetErrorString)(CUresult, const char **);
+CUresult (*cuDeviceGet)(CUdevice *, int);
+CUresult (*cuDeviceGetAttribute)(int *, CUdevice_attribute, CUdevice);
+
+constexpr const char *DynamicCudaPath = "libcuda.so.1";
+
+llvm::Error loadCUDA() {
+ std::string ErrMsg;
+ auto DynlibHandle = std::make_unique<llvm::sys::DynamicLibrary>(
+ llvm::sys::DynamicLibrary::getPermanentLibrary(DynamicCudaPath, &ErrMsg));
+ if (!DynlibHandle->isValid()) {
+ return llvm::createStringError(llvm::inconvertibleErrorCode(),
+ "Failed to 'dlopen' %s", DynamicCudaPath);
+ }
+#define DYNAMIC_INIT(SYMBOL) \
+ { \
+ void *SymbolPtr = DynlibHandle->getAddressOfSymbol(#SYMBOL); \
+ if (!SymbolPtr) \
+ return llvm::createStringError(llvm::inconvertibleErrorCode(), \
+ "Failed to 'dlsym' " #SYMBOL); \
+ SYMBOL = reinterpret_cast<decltype(SYMBOL)>(SymbolPtr); \
+ }
+ DYNAMIC_INIT(cuInit);
+ DYNAMIC_INIT(cuDeviceGetCount);
+ DYNAMIC_INIT(cuGetErrorString);
+ DYNAMIC_INIT(cuDeviceGet);
+ DYNAMIC_INIT(cuDeviceGetAttribute);
+#undef DYNAMIC_INIT
+ return llvm::Error::success();
+}
+
+static int handleError(CUresult Err) {
+ const char *ErrStr = nullptr;
+ CUresult Result = cuGetErrorString(Err, &ErrStr);
+ if (Result != CUDA_SUCCESS)
+ return 1;
+ fprintf(stderr, "CUDA error: %s\n", ErrStr);
+ return 1;
+}
+
+int main(int argc, char *argv[]) {
+ cl::HideUnrelatedOptions(NVPTXArchCategory);
+
+ cl::SetVersionPrinter(PrintVersion);
+ cl::ParseCommandLineOptions(
+ argc, argv,
+ "A tool to detect the presence of NVIDIA devices on the system. \n\n"
+ "The tool will output each detected GPU architecture separated by a\n"
+ "newline character. If multiple GPUs of the same architecture are found\n"
+ "a string will be printed for each\n");
+
+ if (Help) {
+ cl::PrintHelpMessage();
+ return 0;
+ }
+
+ // Attempt to load the NVPTX driver runtime.
+ if (llvm::Error Err = loadCUDA()) {
+ logAllUnhandledErrors(std::move(Err), llvm::errs());
+ return 1;
+ }
+
+ if (CUresult Err = cuInit(0)) {
+ if (Err == CUDA_ERROR_NO_DEVICE)
+ return 0;
+ else
+ return handleError(Err);
+ }
+
+ int Count = 0;
+ if (CUresult Err = cuDeviceGetCount(&Count))
+ return handleError(Err);
+ if (Count == 0)
+ return 0;
+ for (int DeviceId = 0; DeviceId < Count; ++DeviceId) {
+ CUdevice Device;
+ if (CUresult Err = cuDeviceGet(&Device, DeviceId))
+ return handleError(Err);
+
+ int32_t Major, Minor;
+ if (CUresult Err = cuDeviceGetAttribute(
+ &Major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, Device))
+ return handleError(Err);
+ if (CUresult Err = cuDeviceGetAttribute(
+ &Minor, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, Device))
+ return handleError(Err);
+
+ printf("sm_%d%d\n", Major, Minor);
+ }
+ return 0;
+}
diff --git a/contrib/llvm-project/clang/utils/TableGen/ASTTableGen.cpp b/contrib/llvm-project/clang/utils/TableGen/ASTTableGen.cpp
index 3f6da40964e0..54288ff6a03b 100644
--- a/contrib/llvm-project/clang/utils/TableGen/ASTTableGen.cpp
+++ b/contrib/llvm-project/clang/utils/TableGen/ASTTableGen.cpp
@@ -15,6 +15,7 @@
#include "ASTTableGen.h"
#include "llvm/TableGen/Record.h"
#include "llvm/TableGen/Error.h"
+#include <optional>
using namespace llvm;
using namespace clang;
@@ -32,7 +33,7 @@ llvm::StringRef clang::tblgen::HasProperties::getName() const {
static StringRef removeExpectedNodeNameSuffix(Record *node, StringRef suffix) {
StringRef nodeName = node->getName();
- if (!nodeName.endswith(suffix)) {
+ if (!nodeName.ends_with(suffix)) {
PrintFatalError(node->getLoc(),
Twine("name of node doesn't end in ") + suffix);
}
@@ -81,7 +82,7 @@ void PropertyType::emitCXXValueTypeName(bool forRead, raw_ostream &out) const {
elementType.emitCXXValueTypeName(forRead, out);
out << ">";
} else if (auto valueType = getOptionalElementType()) {
- out << "llvm::Optional<";
+ out << "std::optional<";
valueType.emitCXXValueTypeName(forRead, out);
out << ">";
} else {
@@ -107,7 +108,7 @@ static void visitASTNodeRecursive(ASTNode node, ASTNode base,
static void visitHierarchy(RecordKeeper &records,
StringRef nodeClassName,
ASTNodeHierarchyVisitor<ASTNode> visit) {
- // Check for the node class, just as a sanity check.
+ // Check for the node class, just as a basic correctness check.
if (!records.getClass(nodeClassName)) {
PrintFatalError(Twine("cannot find definition for node class ")
+ nodeClassName);
diff --git a/contrib/llvm-project/clang/utils/TableGen/ASTTableGen.h b/contrib/llvm-project/clang/utils/TableGen/ASTTableGen.h
index ab9429f3feee..41f78a6a3bbc 100644
--- a/contrib/llvm-project/clang/utils/TableGen/ASTTableGen.h
+++ b/contrib/llvm-project/clang/utils/TableGen/ASTTableGen.h
@@ -11,6 +11,7 @@
#include "llvm/TableGen/Record.h"
#include "llvm/ADT/STLExtras.h"
+#include <optional>
// These are spellings in the tblgen files.
diff --git a/contrib/llvm-project/clang/utils/TableGen/ClangASTNodesEmitter.cpp b/contrib/llvm-project/clang/utils/TableGen/ClangASTNodesEmitter.cpp
index 2b8d7a9efdf1..07ddafce3291 100644
--- a/contrib/llvm-project/clang/utils/TableGen/ClangASTNodesEmitter.cpp
+++ b/contrib/llvm-project/clang/utils/TableGen/ClangASTNodesEmitter.cpp
@@ -33,6 +33,7 @@ class ClangASTNodesEmitter {
typedef std::multimap<ASTNode, ASTNode> ChildMap;
typedef ChildMap::const_iterator ChildIterator;
+ std::set<ASTNode> PrioritizedClasses;
RecordKeeper &Records;
ASTNode Root;
const std::string &NodeClassName;
@@ -70,8 +71,16 @@ class ClangASTNodesEmitter {
std::pair<ASTNode, ASTNode> EmitNode(raw_ostream& OS, ASTNode Base);
public:
explicit ClangASTNodesEmitter(RecordKeeper &R, const std::string &N,
- const std::string &S)
- : Records(R), NodeClassName(N), BaseSuffix(S) {}
+ const std::string &S,
+ std::string_view PriorizeIfSubclassOf)
+ : Records(R), NodeClassName(N), BaseSuffix(S) {
+ auto vecPrioritized =
+ PriorizeIfSubclassOf.empty()
+ ? std::vector<Record *>{}
+ : R.getAllDerivedDefinitions(PriorizeIfSubclassOf);
+ PrioritizedClasses =
+ std::set<ASTNode>(vecPrioritized.begin(), vecPrioritized.end());
+ }
// run - Output the .inc file contents
void run(raw_ostream &OS);
@@ -95,8 +104,23 @@ std::pair<ASTNode, ASTNode> ClangASTNodesEmitter::EmitNode(raw_ostream &OS,
if (!Base.isAbstract())
First = Last = Base;
+ auto comp = [this](ASTNode LHS, ASTNode RHS) {
+ auto LHSPrioritized = PrioritizedClasses.count(LHS) > 0;
+ auto RHSPrioritized = PrioritizedClasses.count(RHS) > 0;
+ if (LHSPrioritized && !RHSPrioritized)
+ return true;
+ if (!LHSPrioritized && RHSPrioritized)
+ return false;
+
+ return LHS.getName() > RHS.getName();
+ };
+ auto SortedChildren = std::set<ASTNode, decltype(comp)>(comp);
+
for (; i != e; ++i) {
- ASTNode Child = i->second;
+ SortedChildren.insert(i->second);
+ }
+
+ for (const auto &Child : SortedChildren) {
bool Abstract = Child.isAbstract();
std::string NodeName = macroName(std::string(Child.getName()));
@@ -148,9 +172,7 @@ void ClangASTNodesEmitter::deriveChildTree() {
const std::vector<Record*> Stmts
= Records.getAllDerivedDefinitions(NodeClassName);
- for (unsigned i = 0, e = Stmts.size(); i != e; ++i) {
- Record *R = Stmts[i];
-
+ for (auto *R : Stmts) {
if (auto B = R->getValueAsOptionalDef(BaseFieldName))
Tree.insert(std::make_pair(B, R));
else if (Root)
@@ -169,7 +191,7 @@ void ClangASTNodesEmitter::deriveChildTree() {
void ClangASTNodesEmitter::run(raw_ostream &OS) {
deriveChildTree();
- emitSourceFileHeader("List of AST nodes of a particular kind", OS);
+ emitSourceFileHeader("List of AST nodes of a particular kind", OS, Records);
// Write the preamble
OS << "#ifndef ABSTRACT_" << macroHierarchyName() << "\n";
@@ -182,9 +204,9 @@ void ClangASTNodesEmitter::run(raw_ostream &OS) {
OS << "#endif\n\n";
OS << "#ifndef LAST_" << macroHierarchyName() << "_RANGE\n";
- OS << "# define LAST_"
- << macroHierarchyName() << "_RANGE(Base, First, Last) "
- << macroHierarchyName() << "_RANGE(Base, First, Last)\n";
+ OS << "# define LAST_" << macroHierarchyName()
+ << "_RANGE(Base, First, Last) " << macroHierarchyName()
+ << "_RANGE(Base, First, Last)\n";
OS << "#endif\n\n";
EmitNode(OS, Root);
@@ -196,8 +218,20 @@ void ClangASTNodesEmitter::run(raw_ostream &OS) {
}
void clang::EmitClangASTNodes(RecordKeeper &RK, raw_ostream &OS,
- const std::string &N, const std::string &S) {
- ClangASTNodesEmitter(RK, N, S).run(OS);
+ const std::string &N, const std::string &S,
+ std::string_view PriorizeIfSubclassOf) {
+ ClangASTNodesEmitter(RK, N, S, PriorizeIfSubclassOf).run(OS);
+}
+
+void printDeclContext(const std::multimap<Record *, Record *> &Tree,
+ Record *DeclContext, raw_ostream &OS) {
+ if (!DeclContext->getValueAsBit(AbstractFieldName))
+ OS << "DECL_CONTEXT(" << DeclContext->getName() << ")\n";
+ auto i = Tree.lower_bound(DeclContext);
+ auto end = Tree.upper_bound(DeclContext);
+ for (; i != end; ++i) {
+ printDeclContext(Tree, i->second, OS);
+ }
}
// Emits and addendum to a .inc file to enumerate the clang declaration
@@ -205,43 +239,30 @@ void clang::EmitClangASTNodes(RecordKeeper &RK, raw_ostream &OS,
void clang::EmitClangDeclContext(RecordKeeper &Records, raw_ostream &OS) {
// FIXME: Find a .td file format to allow for this to be represented better.
- emitSourceFileHeader("List of AST Decl nodes", OS);
+ emitSourceFileHeader("List of AST Decl nodes", OS, Records);
OS << "#ifndef DECL_CONTEXT\n";
OS << "# define DECL_CONTEXT(DECL)\n";
OS << "#endif\n";
-
- OS << "#ifndef DECL_CONTEXT_BASE\n";
- OS << "# define DECL_CONTEXT_BASE(DECL) DECL_CONTEXT(DECL)\n";
- OS << "#endif\n";
-
- typedef std::set<Record*> RecordSet;
- typedef std::vector<Record*> RecordVector;
-
- RecordVector DeclContextsVector
- = Records.getAllDerivedDefinitions(DeclContextNodeClassName);
- RecordVector Decls = Records.getAllDerivedDefinitions(DeclNodeClassName);
- RecordSet DeclContexts (DeclContextsVector.begin(), DeclContextsVector.end());
-
- for (RecordVector::iterator i = Decls.begin(), e = Decls.end(); i != e; ++i) {
- Record *R = *i;
-
- if (Record *B = R->getValueAsOptionalDef(BaseFieldName)) {
- if (DeclContexts.find(B) != DeclContexts.end()) {
- OS << "DECL_CONTEXT_BASE(" << B->getName() << ")\n";
- DeclContexts.erase(B);
- }
- }
+
+ std::vector<Record *> DeclContextsVector =
+ Records.getAllDerivedDefinitions(DeclContextNodeClassName);
+ std::vector<Record *> Decls =
+ Records.getAllDerivedDefinitions(DeclNodeClassName);
+
+ std::multimap<Record *, Record *> Tree;
+
+ const std::vector<Record *> Stmts =
+ Records.getAllDerivedDefinitions(DeclNodeClassName);
+
+ for (auto *R : Stmts) {
+ if (auto *B = R->getValueAsOptionalDef(BaseFieldName))
+ Tree.insert(std::make_pair(B, R));
}
- // To keep identical order, RecordVector may be used
- // instead of RecordSet.
- for (RecordVector::iterator
- i = DeclContextsVector.begin(), e = DeclContextsVector.end();
- i != e; ++i)
- if (DeclContexts.find(*i) != DeclContexts.end())
- OS << "DECL_CONTEXT(" << (*i)->getName() << ")\n";
+ for (auto *DeclContext : DeclContextsVector) {
+ printDeclContext(Tree, DeclContext, OS);
+ }
OS << "#undef DECL_CONTEXT\n";
- OS << "#undef DECL_CONTEXT_BASE\n";
}
diff --git a/contrib/llvm-project/clang/utils/TableGen/ClangASTPropertiesEmitter.cpp b/contrib/llvm-project/clang/utils/TableGen/ClangASTPropertiesEmitter.cpp
index caced02e1e11..de8dda60681f 100644
--- a/contrib/llvm-project/clang/utils/TableGen/ClangASTPropertiesEmitter.cpp
+++ b/contrib/llvm-project/clang/utils/TableGen/ClangASTPropertiesEmitter.cpp
@@ -20,6 +20,7 @@
#include "llvm/TableGen/TableGenBackend.h"
#include <cctype>
#include <map>
+#include <optional>
#include <set>
#include <string>
using namespace llvm;
@@ -455,7 +456,7 @@ void ASTPropsEmitter::emitPropertiedReaderWriterBody(HasProperties node,
// Emit code to read all the properties.
visitAllProperties(node, nodeInfo, [&](Property prop) {
// Verify that the creation code refers to this property.
- if (info.IsReader && creationCode.find(prop.getName()) == StringRef::npos)
+ if (info.IsReader && !creationCode.contains(prop.getName()))
PrintFatalError(nodeInfo.Creator.getLoc(),
"creation code for " + node.getName()
+ " doesn't refer to property \""
@@ -525,7 +526,8 @@ void ASTPropsEmitter::emitReadOfProperty(StringRef readerName,
// get a pr-value back from read(), and we should be able to forward
// that in the creation rule.
Out << " ";
- if (!condition.empty()) Out << "llvm::Optional<";
+ if (!condition.empty())
+ Out << "std::optional<";
type.emitCXXValueTypeName(true, Out);
if (!condition.empty()) Out << ">";
Out << " " << name;
@@ -591,7 +593,7 @@ void ASTPropsEmitter::emitWriteOfProperty(StringRef writerName,
template <class NodeClass>
static void emitASTReader(RecordKeeper &records, raw_ostream &out,
StringRef description) {
- emitSourceFileHeader(description, out);
+ emitSourceFileHeader(description, out, records);
ASTPropsEmitter(records, out).emitNodeReaderClass<NodeClass>();
}
@@ -605,7 +607,7 @@ void clang::EmitClangTypeReader(RecordKeeper &records, raw_ostream &out) {
template <class NodeClass>
static void emitASTWriter(RecordKeeper &records, raw_ostream &out,
StringRef description) {
- emitSourceFileHeader(description, out);
+ emitSourceFileHeader(description, out, records);
ASTPropsEmitter(records, out).emitNodeWriterClass<NodeClass>();
}
@@ -662,9 +664,7 @@ ASTPropsEmitter::emitDispatcherTemplate(const ReaderWriterInfo &info) {
declareSpecialization("<class T>",
"llvm::ArrayRef<T>",
"Array");
- declareSpecialization("<class T>",
- "llvm::Optional<T>",
- "Optional");
+ declareSpecialization("<class T>", "std::optional<T>", "Optional");
Out << "\n";
}
@@ -677,15 +677,20 @@ ASTPropsEmitter::emitPackUnpackOptionalTemplate(const ReaderWriterInfo &info) {
Out << "template <class ValueType>\n"
"struct " << classPrefix << "OptionalValue;\n";
- auto declareSpecialization = [&](const Twine &typeName,
- StringRef code) {
+ auto declareSpecialization = [&](const Twine &typeName, StringRef code) {
Out << "template <>\n"
- "struct " << classPrefix << "OptionalValue<" << typeName << "> {\n"
- " static " << (info.IsReader ? "Optional<" : "") << typeName
- << (info.IsReader ? "> " : " ") << methodName << "("
- << (info.IsReader ? "" : "Optional<") << typeName
- << (info.IsReader ? "" : ">") << " value) {\n"
- " return " << code << ";\n"
+ "struct "
+ << classPrefix << "OptionalValue<" << typeName
+ << "> {\n"
+ " static "
+ << (info.IsReader ? "std::optional<" : "") << typeName
+ << (info.IsReader ? "> " : " ") << methodName << "("
+ << (info.IsReader ? "" : "std::optional<") << typeName
+ << (info.IsReader ? "" : ">")
+ << " value) {\n"
+ " return "
+ << code
+ << ";\n"
" }\n"
"};\n";
};
@@ -847,7 +852,7 @@ void ASTPropsEmitter::emitBasicReaderWriterFile(const ReaderWriterInfo &info) {
/// Emit an .inc file that defines some helper classes for reading
/// basic values.
void clang::EmitClangBasicReader(RecordKeeper &records, raw_ostream &out) {
- emitSourceFileHeader("Helper classes for BasicReaders", out);
+ emitSourceFileHeader("Helper classes for BasicReaders", out, records);
// Use any property, we won't be using those properties.
auto info = ReaderWriterInfo::forReader<TypeNode>();
@@ -857,7 +862,7 @@ void clang::EmitClangBasicReader(RecordKeeper &records, raw_ostream &out) {
/// Emit an .inc file that defines some helper classes for writing
/// basic values.
void clang::EmitClangBasicWriter(RecordKeeper &records, raw_ostream &out) {
- emitSourceFileHeader("Helper classes for BasicWriters", out);
+ emitSourceFileHeader("Helper classes for BasicWriters", out, records);
// Use any property, we won't be using those properties.
auto info = ReaderWriterInfo::forWriter<TypeNode>();
diff --git a/contrib/llvm-project/clang/utils/TableGen/ClangAttrEmitter.cpp b/contrib/llvm-project/clang/utils/TableGen/ClangAttrEmitter.cpp
index d679d58aaef1..89b88e386f25 100644
--- a/contrib/llvm-project/clang/utils/TableGen/ClangAttrEmitter.cpp
+++ b/contrib/llvm-project/clang/utils/TableGen/ClangAttrEmitter.cpp
@@ -16,6 +16,7 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
@@ -36,6 +37,7 @@
#include <cstdint>
#include <map>
#include <memory>
+#include <optional>
#include <set>
#include <sstream>
#include <string>
@@ -49,17 +51,21 @@ namespace {
class FlattenedSpelling {
std::string V, N, NS;
bool K = false;
+ const Record &OriginalSpelling;
public:
FlattenedSpelling(const std::string &Variety, const std::string &Name,
- const std::string &Namespace, bool KnownToGCC) :
- V(Variety), N(Name), NS(Namespace), K(KnownToGCC) {}
+ const std::string &Namespace, bool KnownToGCC,
+ const Record &OriginalSpelling)
+ : V(Variety), N(Name), NS(Namespace), K(KnownToGCC),
+ OriginalSpelling(OriginalSpelling) {}
explicit FlattenedSpelling(const Record &Spelling)
: V(std::string(Spelling.getValueAsString("Variety"))),
- N(std::string(Spelling.getValueAsString("Name"))) {
+ N(std::string(Spelling.getValueAsString("Name"))),
+ OriginalSpelling(Spelling) {
assert(V != "GCC" && V != "Clang" &&
"Given a GCC spelling, which means this hasn't been flattened!");
- if (V == "CXX11" || V == "C2x" || V == "Pragma")
+ if (V == "CXX11" || V == "C23" || V == "Pragma")
NS = std::string(Spelling.getValueAsString("Namespace"));
}
@@ -67,6 +73,7 @@ public:
const std::string &name() const { return N; }
const std::string &nameSpace() const { return NS; }
bool knownToGCC() const { return K; }
+ const Record &getSpellingRecord() const { return OriginalSpelling; }
};
} // end anonymous namespace
@@ -80,15 +87,15 @@ GetFlattenedSpellings(const Record &Attr) {
StringRef Variety = Spelling->getValueAsString("Variety");
StringRef Name = Spelling->getValueAsString("Name");
if (Variety == "GCC") {
- Ret.emplace_back("GNU", std::string(Name), "", true);
- Ret.emplace_back("CXX11", std::string(Name), "gnu", true);
+ Ret.emplace_back("GNU", std::string(Name), "", true, *Spelling);
+ Ret.emplace_back("CXX11", std::string(Name), "gnu", true, *Spelling);
if (Spelling->getValueAsBit("AllowInC"))
- Ret.emplace_back("C2x", std::string(Name), "gnu", true);
+ Ret.emplace_back("C23", std::string(Name), "gnu", true, *Spelling);
} else if (Variety == "Clang") {
- Ret.emplace_back("GNU", std::string(Name), "", false);
- Ret.emplace_back("CXX11", std::string(Name), "clang", false);
+ Ret.emplace_back("GNU", std::string(Name), "", false, *Spelling);
+ Ret.emplace_back("CXX11", std::string(Name), "clang", false, *Spelling);
if (Spelling->getValueAsBit("AllowInC"))
- Ret.emplace_back("C2x", std::string(Name), "clang", false);
+ Ret.emplace_back("C23", std::string(Name), "clang", false, *Spelling);
} else
Ret.push_back(FlattenedSpelling(*Spelling));
}
@@ -154,7 +161,7 @@ static StringRef NormalizeNameForSpellingComparison(StringRef Name) {
// Normalize the spelling of a GNU attribute (i.e. "x" in "__attribute__((x))"),
// removing "__" if it appears at the beginning and end of the attribute's name.
static StringRef NormalizeGNUAttrSpelling(StringRef AttrSpelling) {
- if (AttrSpelling.startswith("__") && AttrSpelling.endswith("__")) {
+ if (AttrSpelling.starts_with("__") && AttrSpelling.ends_with("__")) {
AttrSpelling = AttrSpelling.substr(2, AttrSpelling.size() - 4);
}
@@ -201,9 +208,9 @@ namespace {
bool Fake;
public:
- Argument(const Record &Arg, StringRef Attr)
- : lowerName(std::string(Arg.getValueAsString("Name"))),
- upperName(lowerName), attrName(Attr), isOpt(false), Fake(false) {
+ Argument(StringRef Arg, StringRef Attr)
+ : lowerName(std::string(Arg)), upperName(lowerName), attrName(Attr),
+ isOpt(false), Fake(false) {
if (!lowerName.empty()) {
lowerName[0] = std::tolower(lowerName[0]);
upperName[0] = std::toupper(upperName[0]);
@@ -214,6 +221,8 @@ namespace {
if (lowerName == "interface")
lowerName = "interface_";
}
+ Argument(const Record &Arg, StringRef Attr)
+ : Argument(Arg.getValueAsString("Name"), Attr) {}
virtual ~Argument() = default;
StringRef getLowerName() const { return lowerName; }
@@ -311,12 +320,19 @@ namespace {
}
std::string getIsOmitted() const override {
- if (type == "IdentifierInfo *")
+ auto IsOneOf = [](StringRef subject, auto... list) {
+ return ((subject == list) || ...);
+ };
+
+ if (IsOneOf(type, "IdentifierInfo *", "Expr *"))
return "!get" + getUpperName().str() + "()";
- if (type == "TypeSourceInfo *")
+ if (IsOneOf(type, "TypeSourceInfo *"))
return "!get" + getUpperName().str() + "Loc()";
- if (type == "ParamIdx")
+ if (IsOneOf(type, "ParamIdx"))
return "!get" + getUpperName().str() + "().isValid()";
+
+ assert(IsOneOf(type, "unsigned", "int", "bool", "FunctionDecl *",
+ "VarDecl *"));
return "false";
}
@@ -340,7 +356,7 @@ namespace {
}
void writeDump(raw_ostream &OS) const override {
- if (StringRef(type).endswith("Decl *")) {
+ if (StringRef(type).ends_with("Decl *")) {
OS << " OS << \" \";\n";
OS << " dumpBareDeclRef(SA->get" << getUpperName() << "());\n";
} else if (type == "IdentifierInfo *") {
@@ -499,6 +515,16 @@ namespace {
OS << " assert(!is" << getLowerName() << "Expr);\n";
OS << " return " << getLowerName() << "Type;\n";
OS << " }";
+
+ OS << " std::optional<unsigned> getCached" << getUpperName()
+ << "Value() const {\n";
+ OS << " return " << getLowerName() << "Cache;\n";
+ OS << " }";
+
+ OS << " void setCached" << getUpperName()
+ << "Value(unsigned AlignVal) {\n";
+ OS << " " << getLowerName() << "Cache = AlignVal;\n";
+ OS << " }";
}
void writeAccessorDefinitions(raw_ostream &OS) const override {
@@ -521,21 +547,6 @@ namespace {
OS << " return " << getLowerName()
<< "Type->getType()->containsErrors();\n";
OS << "}\n";
-
- // FIXME: Do not do the calculation here
- // FIXME: Handle types correctly
- // A null pointer means maximum alignment
- OS << "unsigned " << getAttrName() << "Attr::get" << getUpperName()
- << "(ASTContext &Ctx) const {\n";
- OS << " assert(!is" << getUpperName() << "Dependent());\n";
- OS << " if (is" << getLowerName() << "Expr)\n";
- OS << " return " << getLowerName() << "Expr ? " << getLowerName()
- << "Expr->EvaluateKnownConstInt(Ctx).getZExtValue()"
- << " * Ctx.getCharWidth() : "
- << "Ctx.getTargetDefaultAlignForAttributeAligned();\n";
- OS << " else\n";
- OS << " return 0; // FIXME\n";
- OS << "}\n";
}
void writeASTVisitorTraversal(raw_ostream &OS) const override {
@@ -592,7 +603,8 @@ namespace {
OS << "union {\n";
OS << "Expr *" << getLowerName() << "Expr;\n";
OS << "TypeSourceInfo *" << getLowerName() << "Type;\n";
- OS << "};";
+ OS << "};\n";
+ OS << "std::optional<unsigned> " << getLowerName() << "Cache;\n";
}
void writePCHReadArgs(raw_ostream &OS) const override {
@@ -619,14 +631,21 @@ namespace {
}
std::string getIsOmitted() const override {
- return "!is" + getLowerName().str() + "Expr || !" + getLowerName().str()
- + "Expr";
+ return "!((is" + getLowerName().str() + "Expr && " +
+ getLowerName().str() + "Expr) || (!is" + getLowerName().str() +
+ "Expr && " + getLowerName().str() + "Type))";
}
void writeValue(raw_ostream &OS) const override {
OS << "\";\n";
- OS << " " << getLowerName()
+ OS << " if (is" << getLowerName() << "Expr && " << getLowerName()
+ << "Expr)";
+ OS << " " << getLowerName()
<< "Expr->printPretty(OS, nullptr, Policy);\n";
+ OS << " if (!is" << getLowerName() << "Expr && " << getLowerName()
+ << "Type)";
+ OS << " " << getLowerName()
+ << "Type->getType().print(OS, Policy);\n";
OS << " OS << \"";
}
@@ -665,6 +684,11 @@ namespace {
ArgName(getLowerName().str() + "_"), ArgSizeName(ArgName + "Size"),
RangeName(std::string(getLowerName())) {}
+ VariadicArgument(StringRef Arg, StringRef Attr, std::string T)
+ : Argument(Arg, Attr), Type(std::move(T)),
+ ArgName(getLowerName().str() + "_"), ArgSizeName(ArgName + "Size"),
+ RangeName(std::string(getLowerName())) {}
+
const std::string &getType() const { return Type; }
const std::string &getArgName() const { return ArgName; }
const std::string &getArgSizeName() const { return ArgSizeName; }
@@ -687,6 +711,18 @@ namespace {
<< "); }\n";
}
+ void writeSetter(raw_ostream &OS) const {
+ OS << " void set" << getUpperName() << "(ASTContext &Ctx, ";
+ writeCtorParameters(OS);
+ OS << ") {\n";
+ OS << " " << ArgSizeName << " = " << getUpperName() << "Size;\n";
+ OS << " " << ArgName << " = new (Ctx, 16) " << getType() << "["
+ << ArgSizeName << "];\n";
+ OS << " ";
+ writeCtorBody(OS);
+ OS << " }\n";
+ }
+
void writeCloneArgs(raw_ostream &OS) const override {
OS << ArgName << ", " << ArgSizeName;
}
@@ -786,6 +822,49 @@ namespace {
}
};
+ class VariadicOMPInteropInfoArgument : public VariadicArgument {
+ public:
+ VariadicOMPInteropInfoArgument(const Record &Arg, StringRef Attr)
+ : VariadicArgument(Arg, Attr, "OMPInteropInfo") {}
+
+ void writeDump(raw_ostream &OS) const override {
+ OS << " for (" << getAttrName() << "Attr::" << getLowerName()
+ << "_iterator I = SA->" << getLowerName() << "_begin(), E = SA->"
+ << getLowerName() << "_end(); I != E; ++I) {\n";
+ OS << " if (I->IsTarget && I->IsTargetSync)\n";
+ OS << " OS << \" Target_TargetSync\";\n";
+ OS << " else if (I->IsTarget)\n";
+ OS << " OS << \" Target\";\n";
+ OS << " else\n";
+ OS << " OS << \" TargetSync\";\n";
+ OS << " }\n";
+ }
+
+ void writePCHReadDecls(raw_ostream &OS) const override {
+ OS << " unsigned " << getLowerName() << "Size = Record.readInt();\n";
+ OS << " SmallVector<OMPInteropInfo, 4> " << getLowerName() << ";\n";
+ OS << " " << getLowerName() << ".reserve(" << getLowerName()
+ << "Size);\n";
+ OS << " for (unsigned I = 0, E = " << getLowerName() << "Size; ";
+ OS << "I != E; ++I) {\n";
+ OS << " bool IsTarget = Record.readBool();\n";
+ OS << " bool IsTargetSync = Record.readBool();\n";
+ OS << " " << getLowerName()
+ << ".emplace_back(IsTarget, IsTargetSync);\n";
+ OS << " }\n";
+ }
+
+ void writePCHWrite(raw_ostream &OS) const override {
+ OS << " Record.push_back(SA->" << getLowerName() << "_size());\n";
+ OS << " for (" << getAttrName() << "Attr::" << getLowerName()
+ << "_iterator I = SA->" << getLowerName() << "_begin(), E = SA->"
+ << getLowerName() << "_end(); I != E; ++I) {\n";
+ OS << " Record.writeBool(I->IsTarget);\n";
+ OS << " Record.writeBool(I->IsTargetSync);\n";
+ OS << " }\n";
+ }
+ };
+
class VariadicParamIdxArgument : public VariadicArgument {
public:
VariadicParamIdxArgument(const Record &Arg, StringRef Attr)
@@ -819,15 +898,25 @@ namespace {
}
class EnumArgument : public Argument {
- std::string type;
+ std::string fullType;
+ StringRef shortType;
std::vector<StringRef> values, enums, uniques;
+ bool isExternal;
public:
EnumArgument(const Record &Arg, StringRef Attr)
- : Argument(Arg, Attr), type(std::string(Arg.getValueAsString("Type"))),
- values(Arg.getValueAsListOfStrings("Values")),
+ : Argument(Arg, Attr), values(Arg.getValueAsListOfStrings("Values")),
enums(Arg.getValueAsListOfStrings("Enums")),
- uniques(uniqueEnumsInOrder(enums)) {
+ uniques(uniqueEnumsInOrder(enums)),
+ isExternal(Arg.getValueAsBit("IsExternalType")) {
+ StringRef Type = Arg.getValueAsString("Type");
+ shortType = isExternal ? Type.rsplit("::").second : Type;
+ // If shortType didn't contain :: at all rsplit will give us an empty
+ // string.
+ if (shortType.empty())
+ shortType = Type;
+ fullType = isExternal ? Type : (getAttrName() + "Attr::" + Type).str();
+
// FIXME: Emit a proper error
assert(!uniques.empty());
}
@@ -835,7 +924,7 @@ namespace {
bool isEnumArg() const override { return true; }
void writeAccessors(raw_ostream &OS) const override {
- OS << " " << type << " get" << getUpperName() << "() const {\n";
+ OS << " " << fullType << " get" << getUpperName() << "() const {\n";
OS << " return " << getLowerName() << ";\n";
OS << " }";
}
@@ -851,30 +940,32 @@ namespace {
OS << getLowerName() << "(" << getUpperName() << ")";
}
void writeCtorDefaultInitializers(raw_ostream &OS) const override {
- OS << getLowerName() << "(" << type << "(0))";
+ OS << getLowerName() << "(" << fullType << "(0))";
}
void writeCtorParameters(raw_ostream &OS) const override {
- OS << type << " " << getUpperName();
+ OS << fullType << " " << getUpperName();
}
void writeDeclarations(raw_ostream &OS) const override {
- auto i = uniques.cbegin(), e = uniques.cend();
- // The last one needs to not have a comma.
- --e;
+ if (!isExternal) {
+ auto i = uniques.cbegin(), e = uniques.cend();
+ // The last one needs to not have a comma.
+ --e;
+
+ OS << "public:\n";
+ OS << " enum " << shortType << " {\n";
+ for (; i != e; ++i)
+ OS << " " << *i << ",\n";
+ OS << " " << *e << "\n";
+ OS << " };\n";
+ }
- OS << "public:\n";
- OS << " enum " << type << " {\n";
- for (; i != e; ++i)
- OS << " " << *i << ",\n";
- OS << " " << *e << "\n";
- OS << " };\n";
OS << "private:\n";
- OS << " " << type << " " << getLowerName() << ";";
+ OS << " " << fullType << " " << getLowerName() << ";";
}
void writePCHReadDecls(raw_ostream &OS) const override {
- OS << " " << getAttrName() << "Attr::" << type << " " << getLowerName()
- << "(static_cast<" << getAttrName() << "Attr::" << type
- << ">(Record.readInt()));\n";
+ OS << " " << fullType << " " << getLowerName() << "(static_cast<"
+ << fullType << ">(Record.readInt()));\n";
}
void writePCHReadArgs(raw_ostream &OS) const override {
@@ -882,45 +973,50 @@ namespace {
}
void writePCHWrite(raw_ostream &OS) const override {
- OS << "Record.push_back(SA->get" << getUpperName() << "());\n";
+ OS << "Record.push_back(static_cast<uint64_t>(SA->get" << getUpperName()
+ << "()));\n";
}
void writeValue(raw_ostream &OS) const override {
// FIXME: this isn't 100% correct -- some enum arguments require printing
// as a string literal, while others require printing as an identifier.
// Tablegen currently does not distinguish between the two forms.
- OS << "\\\"\" << " << getAttrName() << "Attr::Convert" << type << "ToStr(get"
- << getUpperName() << "()) << \"\\\"";
+ OS << "\\\"\" << " << getAttrName() << "Attr::Convert" << shortType
+ << "ToStr(get" << getUpperName() << "()) << \"\\\"";
}
void writeDump(raw_ostream &OS) const override {
OS << " switch(SA->get" << getUpperName() << "()) {\n";
for (const auto &I : uniques) {
- OS << " case " << getAttrName() << "Attr::" << I << ":\n";
+ OS << " case " << fullType << "::" << I << ":\n";
OS << " OS << \" " << I << "\";\n";
OS << " break;\n";
}
+ if (isExternal) {
+ OS << " default:\n";
+ OS << " llvm_unreachable(\"Invalid attribute value\");\n";
+ }
OS << " }\n";
}
void writeConversion(raw_ostream &OS, bool Header) const {
if (Header) {
- OS << " static bool ConvertStrTo" << type << "(StringRef Val, " << type
- << " &Out);\n";
- OS << " static const char *Convert" << type << "ToStr(" << type
- << " Val);\n";
+ OS << " static bool ConvertStrTo" << shortType << "(StringRef Val, "
+ << fullType << " &Out);\n";
+ OS << " static const char *Convert" << shortType << "ToStr("
+ << fullType << " Val);\n";
return;
}
- OS << "bool " << getAttrName() << "Attr::ConvertStrTo" << type
- << "(StringRef Val, " << type << " &Out) {\n";
- OS << " Optional<" << type << "> R = llvm::StringSwitch<Optional<";
- OS << type << ">>(Val)\n";
+ OS << "bool " << getAttrName() << "Attr::ConvertStrTo" << shortType
+ << "(StringRef Val, " << fullType << " &Out) {\n";
+ OS << " std::optional<" << fullType << "> "
+ << "R = llvm::StringSwitch<std::optional<" << fullType << ">>(Val)\n";
for (size_t I = 0; I < enums.size(); ++I) {
OS << " .Case(\"" << values[I] << "\", ";
- OS << getAttrName() << "Attr::" << enums[I] << ")\n";
+ OS << fullType << "::" << enums[I] << ")\n";
}
- OS << " .Default(Optional<" << type << ">());\n";
+ OS << " .Default(std::optional<" << fullType << ">());\n";
OS << " if (R) {\n";
OS << " Out = *R;\n return true;\n }\n";
OS << " return false;\n";
@@ -930,14 +1026,17 @@ namespace {
// trivial because some enumeration values have multiple named
// enumerators, such as type_visibility(internal) and
// type_visibility(hidden) both mapping to TypeVisibilityAttr::Hidden.
- OS << "const char *" << getAttrName() << "Attr::Convert" << type
- << "ToStr(" << type << " Val) {\n"
+ OS << "const char *" << getAttrName() << "Attr::Convert" << shortType
+ << "ToStr(" << fullType << " Val) {\n"
<< " switch(Val) {\n";
SmallDenseSet<StringRef, 8> Uniques;
for (size_t I = 0; I < enums.size(); ++I) {
if (Uniques.insert(enums[I]).second)
- OS << " case " << getAttrName() << "Attr::" << enums[I]
- << ": return \"" << values[I] << "\";\n";
+ OS << " case " << fullType << "::" << enums[I] << ": return \""
+ << values[I] << "\";\n";
+ }
+ if (isExternal) {
+ OS << " default: llvm_unreachable(\"Invalid attribute value\");\n";
}
OS << " }\n"
<< " llvm_unreachable(\"No enumerator with that value\");\n"
@@ -946,27 +1045,36 @@ namespace {
};
class VariadicEnumArgument: public VariadicArgument {
- std::string type, QualifiedTypeName;
+ std::string fullType;
+ StringRef shortType;
std::vector<StringRef> values, enums, uniques;
+ bool isExternal;
protected:
void writeValueImpl(raw_ostream &OS) const override {
// FIXME: this isn't 100% correct -- some enum arguments require printing
// as a string literal, while others require printing as an identifier.
// Tablegen currently does not distinguish between the two forms.
- OS << " OS << \"\\\"\" << " << getAttrName() << "Attr::Convert" << type
- << "ToStr(Val)" << "<< \"\\\"\";\n";
+ OS << " OS << \"\\\"\" << " << getAttrName() << "Attr::Convert"
+ << shortType << "ToStr(Val)"
+ << "<< \"\\\"\";\n";
}
public:
VariadicEnumArgument(const Record &Arg, StringRef Attr)
: VariadicArgument(Arg, Attr,
std::string(Arg.getValueAsString("Type"))),
- type(std::string(Arg.getValueAsString("Type"))),
values(Arg.getValueAsListOfStrings("Values")),
enums(Arg.getValueAsListOfStrings("Enums")),
- uniques(uniqueEnumsInOrder(enums)) {
- QualifiedTypeName = getAttrName().str() + "Attr::" + type;
+ uniques(uniqueEnumsInOrder(enums)),
+ isExternal(Arg.getValueAsBit("IsExternalType")) {
+ StringRef Type = Arg.getValueAsString("Type");
+ shortType = isExternal ? Type.rsplit("::").second : Type;
+ // If shortType didn't contain :: at all rsplit will give us an empty
+ // string.
+ if (shortType.empty())
+ shortType = Type;
+ fullType = isExternal ? Type : (getAttrName() + "Attr::" + Type).str();
// FIXME: Emit a proper error
assert(!uniques.empty());
@@ -975,16 +1083,18 @@ namespace {
bool isVariadicEnumArg() const override { return true; }
void writeDeclarations(raw_ostream &OS) const override {
- auto i = uniques.cbegin(), e = uniques.cend();
- // The last one needs to not have a comma.
- --e;
-
- OS << "public:\n";
- OS << " enum " << type << " {\n";
- for (; i != e; ++i)
- OS << " " << *i << ",\n";
- OS << " " << *e << "\n";
- OS << " };\n";
+ if (!isExternal) {
+ auto i = uniques.cbegin(), e = uniques.cend();
+ // The last one needs to not have a comma.
+ --e;
+
+ OS << "public:\n";
+ OS << " enum " << shortType << " {\n";
+ for (; i != e; ++i)
+ OS << " " << *i << ",\n";
+ OS << " " << *e << "\n";
+ OS << " };\n";
+ }
OS << "private:\n";
VariadicArgument::writeDeclarations(OS);
@@ -996,7 +1106,7 @@ namespace {
<< getLowerName() << "_end(); I != E; ++I) {\n";
OS << " switch(*I) {\n";
for (const auto &UI : uniques) {
- OS << " case " << getAttrName() << "Attr::" << UI << ":\n";
+ OS << " case " << fullType << "::" << UI << ":\n";
OS << " OS << \" " << UI << "\";\n";
OS << " break;\n";
}
@@ -1006,13 +1116,13 @@ namespace {
void writePCHReadDecls(raw_ostream &OS) const override {
OS << " unsigned " << getLowerName() << "Size = Record.readInt();\n";
- OS << " SmallVector<" << QualifiedTypeName << ", 4> " << getLowerName()
+ OS << " SmallVector<" << fullType << ", 4> " << getLowerName()
<< ";\n";
OS << " " << getLowerName() << ".reserve(" << getLowerName()
<< "Size);\n";
OS << " for (unsigned i = " << getLowerName() << "Size; i; --i)\n";
- OS << " " << getLowerName() << ".push_back(" << "static_cast<"
- << QualifiedTypeName << ">(Record.readInt()));\n";
+ OS << " " << getLowerName() << ".push_back("
+ << "static_cast<" << fullType << ">(Record.readInt()));\n";
}
void writePCHWrite(raw_ostream &OS) const override {
@@ -1020,41 +1130,42 @@ namespace {
OS << " for (" << getAttrName() << "Attr::" << getLowerName()
<< "_iterator i = SA->" << getLowerName() << "_begin(), e = SA->"
<< getLowerName() << "_end(); i != e; ++i)\n";
- OS << " " << WritePCHRecord(QualifiedTypeName, "(*i)");
+ OS << " " << WritePCHRecord(fullType, "(*i)");
}
void writeConversion(raw_ostream &OS, bool Header) const {
if (Header) {
- OS << " static bool ConvertStrTo" << type << "(StringRef Val, " << type
- << " &Out);\n";
- OS << " static const char *Convert" << type << "ToStr(" << type
- << " Val);\n";
+ OS << " static bool ConvertStrTo" << shortType << "(StringRef Val, "
+ << fullType << " &Out);\n";
+ OS << " static const char *Convert" << shortType << "ToStr("
+ << fullType << " Val);\n";
return;
}
- OS << "bool " << getAttrName() << "Attr::ConvertStrTo" << type
+ OS << "bool " << getAttrName() << "Attr::ConvertStrTo" << shortType
<< "(StringRef Val, ";
- OS << type << " &Out) {\n";
- OS << " Optional<" << type << "> R = llvm::StringSwitch<Optional<";
- OS << type << ">>(Val)\n";
+ OS << fullType << " &Out) {\n";
+ OS << " std::optional<" << fullType
+ << "> R = llvm::StringSwitch<std::optional<";
+ OS << fullType << ">>(Val)\n";
for (size_t I = 0; I < enums.size(); ++I) {
OS << " .Case(\"" << values[I] << "\", ";
- OS << getAttrName() << "Attr::" << enums[I] << ")\n";
+ OS << fullType << "::" << enums[I] << ")\n";
}
- OS << " .Default(Optional<" << type << ">());\n";
+ OS << " .Default(std::optional<" << fullType << ">());\n";
OS << " if (R) {\n";
OS << " Out = *R;\n return true;\n }\n";
OS << " return false;\n";
OS << "}\n\n";
- OS << "const char *" << getAttrName() << "Attr::Convert" << type
- << "ToStr(" << type << " Val) {\n"
+ OS << "const char *" << getAttrName() << "Attr::Convert" << shortType
+ << "ToStr(" << fullType << " Val) {\n"
<< " switch(Val) {\n";
SmallDenseSet<StringRef, 8> Uniques;
for (size_t I = 0; I < enums.size(); ++I) {
if (Uniques.insert(enums[I]).second)
- OS << " case " << getAttrName() << "Attr::" << enums[I]
- << ": return \"" << values[I] << "\";\n";
+ OS << " case " << fullType << "::" << enums[I] << ": return \""
+ << values[I] << "\";\n";
}
OS << " }\n"
<< " llvm_unreachable(\"No enumerator with that value\");\n"
@@ -1153,6 +1264,13 @@ namespace {
OS << " }\n";
}
+ void writeValue(raw_ostream &OS) const override {
+ OS << "\";\n";
+ OS << " get" << getUpperName()
+ << "()->printPretty(OS, nullptr, Policy);\n";
+ OS << " OS << \"";
+ }
+
void writeDump(raw_ostream &OS) const override {}
void writeDumpChildren(raw_ostream &OS) const override {
@@ -1168,6 +1286,9 @@ namespace {
: VariadicArgument(Arg, Attr, "Expr *")
{}
+ VariadicExprArgument(StringRef ArgName, StringRef Attr)
+ : VariadicArgument(ArgName, Attr, "Expr *") {}
+
void writeASTVisitorTraversal(raw_ostream &OS) const override {
OS << " {\n";
OS << " " << getType() << " *I = A->" << getLowerName()
@@ -1293,7 +1414,29 @@ namespace {
}
};
-} // end anonymous namespace
+ class WrappedAttr : public SimpleArgument {
+ public:
+ WrappedAttr(const Record &Arg, StringRef Attr)
+ : SimpleArgument(Arg, Attr, "Attr *") {}
+
+ void writePCHReadDecls(raw_ostream &OS) const override {
+ OS << " Attr *" << getLowerName() << " = Record.readAttr();";
+ }
+
+ void writePCHWrite(raw_ostream &OS) const override {
+ OS << " AddAttr(SA->get" << getUpperName() << "());";
+ }
+
+ void writeDump(raw_ostream &OS) const override {}
+
+ void writeDumpChildren(raw_ostream &OS) const override {
+ OS << " Visit(SA->get" << getUpperName() << "());\n";
+ }
+
+ void writeHasChildren(raw_ostream &OS) const override { OS << "true"; }
+ };
+
+ } // end anonymous namespace
static std::unique_ptr<Argument>
createArgument(const Record &Arg, StringRef Attr,
@@ -1349,8 +1492,12 @@ createArgument(const Record &Arg, StringRef Attr,
Ptr = std::make_unique<VariadicIdentifierArgument>(Arg, Attr);
else if (ArgName == "VersionArgument")
Ptr = std::make_unique<VersionArgument>(Arg, Attr);
+ else if (ArgName == "WrappedAttr")
+ Ptr = std::make_unique<WrappedAttr>(Arg, Attr);
else if (ArgName == "OMPTraitInfoArgument")
Ptr = std::make_unique<SimpleArgument>(Arg, Attr, "OMPTraitInfo *");
+ else if (ArgName == "VariadicOMPInteropInfoArgument")
+ Ptr = std::make_unique<VariadicOMPInteropInfoArgument>(Arg, Attr);
if (!Ptr) {
// Search in reverse order so that the most-derived type is handled first.
@@ -1445,7 +1592,7 @@ writePrettyPrintFunction(const Record &R,
if (Variety == "GNU") {
Prefix = " __attribute__((";
Suffix = "))";
- } else if (Variety == "CXX11" || Variety == "C2x") {
+ } else if (Variety == "CXX11" || Variety == "C23") {
Prefix = " [[";
Suffix = "]]";
std::string Namespace = Spellings[I].nameSpace();
@@ -1470,6 +1617,9 @@ writePrettyPrintFunction(const Record &R,
Spelling += Namespace;
Spelling += " ";
}
+ } else if (Variety == "HLSLSemantic") {
+ Prefix = ":";
+ Suffix = "";
} else {
llvm_unreachable("Unknown attribute syntax variety!");
}
@@ -1499,12 +1649,10 @@ writePrettyPrintFunction(const Record &R,
// To avoid printing parentheses around an empty argument list or
// printing spurious commas at the end of an argument list, we need to
// determine where the last provided non-fake argument is.
- unsigned NonFakeArgs = 0;
bool FoundNonOptArg = false;
for (const auto &arg : llvm::reverse(Args)) {
if (arg->isFake())
continue;
- ++NonFakeArgs;
if (FoundNonOptArg)
continue;
// FIXME: arg->getIsOmitted() == "false" means we haven't implemented
@@ -1598,8 +1746,7 @@ SpellingNamesAreCommon(const std::vector<FlattenedSpelling>& Spellings) {
assert(!Spellings.empty() && "An empty list of spellings was provided");
std::string FirstName =
std::string(NormalizeNameForSpellingComparison(Spellings.front().name()));
- for (const auto &Spelling :
- llvm::make_range(std::next(Spellings.begin()), Spellings.end())) {
+ for (const auto &Spelling : llvm::drop_begin(Spellings)) {
std::string Name =
std::string(NormalizeNameForSpellingComparison(Spelling.name()));
if (Name != FirstName)
@@ -1772,7 +1919,7 @@ struct AttributeSubjectMatchRule {
}
if (isAbstractRule())
Result += "_abstract";
- return std::string(Result.str());
+ return std::string(Result);
}
std::string getEnumValue() const { return "attr::" + getEnumValueName(); }
@@ -1969,7 +2116,7 @@ bool PragmaClangAttributeSupport::isAttributedSupported(
for (const auto *Subject : Subjects) {
if (!isSupportedPragmaClangAttributeSubject(*Subject))
continue;
- if (SubjectsToRules.find(Subject) == SubjectsToRules.end())
+ if (!SubjectsToRules.contains(Subject))
return false;
HasAtLeastOneValidSubject = true;
}
@@ -2038,12 +2185,12 @@ PragmaClangAttributeSupport::generateStrictConformsTo(const Record &Attr,
void PragmaClangAttributeSupport::generateParsingHelpers(raw_ostream &OS) {
// Generate routines that check the names of sub-rules.
- OS << "Optional<attr::SubjectMatchRule> "
+ OS << "std::optional<attr::SubjectMatchRule> "
"defaultIsAttributeSubjectMatchSubRuleFor(StringRef, bool) {\n";
- OS << " return None;\n";
+ OS << " return std::nullopt;\n";
OS << "}\n\n";
- std::map<const Record *, std::vector<AttributeSubjectMatchRule>>
+ llvm::MapVector<const Record *, std::vector<AttributeSubjectMatchRule>>
SubMatchRules;
for (const auto &Rule : Rules) {
if (!Rule.isSubRule())
@@ -2052,36 +2199,37 @@ void PragmaClangAttributeSupport::generateParsingHelpers(raw_ostream &OS) {
}
for (const auto &SubMatchRule : SubMatchRules) {
- OS << "Optional<attr::SubjectMatchRule> isAttributeSubjectMatchSubRuleFor_"
+ OS << "std::optional<attr::SubjectMatchRule> "
+ "isAttributeSubjectMatchSubRuleFor_"
<< SubMatchRule.first->getValueAsString("Name")
<< "(StringRef Name, bool IsUnless) {\n";
OS << " if (IsUnless)\n";
OS << " return "
- "llvm::StringSwitch<Optional<attr::SubjectMatchRule>>(Name).\n";
+ "llvm::StringSwitch<std::optional<attr::SubjectMatchRule>>(Name).\n";
for (const auto &Rule : SubMatchRule.second) {
if (Rule.isNegatedSubRule())
OS << " Case(\"" << Rule.getName() << "\", " << Rule.getEnumValue()
<< ").\n";
}
- OS << " Default(None);\n";
+ OS << " Default(std::nullopt);\n";
OS << " return "
- "llvm::StringSwitch<Optional<attr::SubjectMatchRule>>(Name).\n";
+ "llvm::StringSwitch<std::optional<attr::SubjectMatchRule>>(Name).\n";
for (const auto &Rule : SubMatchRule.second) {
if (!Rule.isNegatedSubRule())
OS << " Case(\"" << Rule.getName() << "\", " << Rule.getEnumValue()
<< ").\n";
}
- OS << " Default(None);\n";
+ OS << " Default(std::nullopt);\n";
OS << "}\n\n";
}
// Generate the function that checks for the top-level rules.
- OS << "std::pair<Optional<attr::SubjectMatchRule>, "
- "Optional<attr::SubjectMatchRule> (*)(StringRef, "
+ OS << "std::pair<std::optional<attr::SubjectMatchRule>, "
+ "std::optional<attr::SubjectMatchRule> (*)(StringRef, "
"bool)> isAttributeSubjectMatchRule(StringRef Name) {\n";
OS << " return "
- "llvm::StringSwitch<std::pair<Optional<attr::SubjectMatchRule>, "
- "Optional<attr::SubjectMatchRule> (*) (StringRef, "
+ "llvm::StringSwitch<std::pair<std::optional<attr::SubjectMatchRule>, "
+ "std::optional<attr::SubjectMatchRule> (*) (StringRef, "
"bool)>>(Name).\n";
for (const auto &Rule : Rules) {
if (Rule.isSubRule())
@@ -2095,7 +2243,7 @@ void PragmaClangAttributeSupport::generateParsingHelpers(raw_ostream &OS) {
OS << " Case(\"" << Rule.getName() << "\", std::make_pair("
<< Rule.getEnumValue() << ", " << SubRuleFunction << ")).\n";
}
- OS << " Default(std::make_pair(None, "
+ OS << " Default(std::make_pair(std::nullopt, "
"defaultIsAttributeSubjectMatchSubRuleFor));\n";
OS << "}\n\n";
@@ -2137,6 +2285,11 @@ static void forEachUniqueSpelling(const Record &Attr, Fn &&F) {
}
}
+static bool isTypeArgument(const Record *Arg) {
+ return !Arg->getSuperClasses().empty() &&
+ Arg->getSuperClasses().back().first->getName() == "TypeArgument";
+}
+
/// Emits the first-argument-is-type property for attributes.
static void emitClangAttrTypeArgList(RecordKeeper &Records, raw_ostream &OS) {
OS << "#if defined(CLANG_ATTR_TYPE_ARG_LIST)\n";
@@ -2148,7 +2301,7 @@ static void emitClangAttrTypeArgList(RecordKeeper &Records, raw_ostream &OS) {
if (Args.empty())
continue;
- if (Args[0]->getSuperClasses().back().first->getName() != "TypeArgument")
+ if (!isTypeArgument(Args[0]))
continue;
// All these spellings take a single type argument.
@@ -2178,7 +2331,7 @@ static void emitClangAttrArgContextList(RecordKeeper &Records, raw_ostream &OS)
OS << "#endif // CLANG_ATTR_ARG_CONTEXT_LIST\n\n";
}
-static bool isIdentifierArgument(Record *Arg) {
+static bool isIdentifierArgument(const Record *Arg) {
return !Arg->getSuperClasses().empty() &&
llvm::StringSwitch<bool>(Arg->getSuperClasses().back().first->getName())
.Case("IdentifierArgument", true)
@@ -2187,7 +2340,7 @@ static bool isIdentifierArgument(Record *Arg) {
.Default(false);
}
-static bool isVariadicIdentifierArgument(Record *Arg) {
+static bool isVariadicIdentifierArgument(const Record *Arg) {
return !Arg->getSuperClasses().empty() &&
llvm::StringSwitch<bool>(
Arg->getSuperClasses().back().first->getName())
@@ -2196,6 +2349,30 @@ static bool isVariadicIdentifierArgument(Record *Arg) {
.Default(false);
}
+static bool isVariadicExprArgument(const Record *Arg) {
+ return !Arg->getSuperClasses().empty() &&
+ llvm::StringSwitch<bool>(
+ Arg->getSuperClasses().back().first->getName())
+ .Case("VariadicExprArgument", true)
+ .Default(false);
+}
+
+static bool isStringLiteralArgument(const Record *Arg) {
+ return !Arg->getSuperClasses().empty() &&
+ llvm::StringSwitch<bool>(
+ Arg->getSuperClasses().back().first->getName())
+ .Case("StringArgument", true)
+ .Default(false);
+}
+
+static bool isVariadicStringLiteralArgument(const Record *Arg) {
+ return !Arg->getSuperClasses().empty() &&
+ llvm::StringSwitch<bool>(
+ Arg->getSuperClasses().back().first->getName())
+ .Case("VariadicStringArgument", true)
+ .Default(false);
+}
+
static void emitClangAttrVariadicIdentifierArgList(RecordKeeper &Records,
raw_ostream &OS) {
OS << "#if defined(CLANG_ATTR_VARIADIC_IDENTIFIER_ARG_LIST)\n";
@@ -2216,6 +2393,34 @@ static void emitClangAttrVariadicIdentifierArgList(RecordKeeper &Records,
OS << "#endif // CLANG_ATTR_VARIADIC_IDENTIFIER_ARG_LIST\n\n";
}
+// Emits the list of arguments that should be parsed as unevaluated string
+// literals for each attribute.
+static void emitClangAttrUnevaluatedStringLiteralList(RecordKeeper &Records,
+ raw_ostream &OS) {
+ OS << "#if defined(CLANG_ATTR_STRING_LITERAL_ARG_LIST)\n";
+ std::vector<Record *> Attrs = Records.getAllDerivedDefinitions("Attr");
+ for (const auto *Attr : Attrs) {
+ std::vector<Record *> Args = Attr->getValueAsListOfDefs("Args");
+ uint32_t Bits = 0;
+ assert(Args.size() <= 32 && "unsupported number of arguments in attribute");
+ for (uint32_t N = 0; N < Args.size(); ++N) {
+ Bits |= (isStringLiteralArgument(Args[N]) << N);
+ // If we have a variadic string argument, set all the remaining bits to 1
+ if (isVariadicStringLiteralArgument(Args[N])) {
+ Bits |= maskTrailingZeros<decltype(Bits)>(N);
+ break;
+ }
+ }
+ if (!Bits)
+ continue;
+ // All these spellings have at least one string literal has argument.
+ forEachUniqueSpelling(*Attr, [&](const FlattenedSpelling &S) {
+ OS << ".Case(\"" << S.name() << "\", " << Bits << ")\n";
+ });
+ }
+ OS << "#endif // CLANG_ATTR_STRING_LITERAL_ARG_LIST\n\n";
+}
+
// Emits the first-argument-is-identifier property for attributes.
static void emitClangAttrIdentifierArgList(RecordKeeper &Records, raw_ostream &OS) {
OS << "#if defined(CLANG_ATTR_IDENTIFIER_ARG_LIST)\n";
@@ -2263,6 +2468,40 @@ static void emitClangAttrThisIsaIdentifierArgList(RecordKeeper &Records,
OS << "#endif // CLANG_ATTR_THIS_ISA_IDENTIFIER_ARG_LIST\n\n";
}
+static void emitClangAttrAcceptsExprPack(RecordKeeper &Records,
+ raw_ostream &OS) {
+ OS << "#if defined(CLANG_ATTR_ACCEPTS_EXPR_PACK)\n";
+ ParsedAttrMap Attrs = getParsedAttrList(Records);
+ for (const auto &I : Attrs) {
+ const Record &Attr = *I.second;
+
+ if (!Attr.getValueAsBit("AcceptsExprPack"))
+ continue;
+
+ forEachUniqueSpelling(Attr, [&](const FlattenedSpelling &S) {
+ OS << ".Case(\"" << S.name() << "\", true)\n";
+ });
+ }
+ OS << "#endif // CLANG_ATTR_ACCEPTS_EXPR_PACK\n\n";
+}
+
+static bool isRegularKeywordAttribute(const FlattenedSpelling &S) {
+ return (S.variety() == "Keyword" &&
+ !S.getSpellingRecord().getValueAsBit("HasOwnParseRules"));
+}
+
+static void emitFormInitializer(raw_ostream &OS,
+ const FlattenedSpelling &Spelling,
+ StringRef SpellingIndex) {
+ bool IsAlignas =
+ (Spelling.variety() == "Keyword" && Spelling.name() == "alignas");
+ OS << "{AttributeCommonInfo::AS_" << Spelling.variety() << ", "
+ << SpellingIndex << ", " << (IsAlignas ? "true" : "false")
+ << " /*IsAlignas*/, "
+ << (isRegularKeywordAttribute(Spelling) ? "true" : "false")
+ << " /*IsRegularKeywordAttribute*/}";
+}
+
static void emitAttributes(RecordKeeper &Records, raw_ostream &OS,
bool Header) {
std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr");
@@ -2319,6 +2558,25 @@ static void emitAttributes(RecordKeeper &Records, raw_ostream &OS,
std::vector<std::unique_ptr<Argument>> Args;
Args.reserve(ArgRecords.size());
+ bool AttrAcceptsExprPack = Attr->getValueAsBit("AcceptsExprPack");
+ if (AttrAcceptsExprPack) {
+ for (size_t I = 0; I < ArgRecords.size(); ++I) {
+ const Record *ArgR = ArgRecords[I];
+ if (isIdentifierArgument(ArgR) || isVariadicIdentifierArgument(ArgR) ||
+ isTypeArgument(ArgR))
+ PrintFatalError(Attr->getLoc(),
+ "Attributes accepting packs cannot also "
+ "have identifier or type arguments.");
+ // When trying to determine if value-dependent expressions can populate
+ // the attribute without prior instantiation, the decision is made based
+ // on the assumption that only the last argument is ever variadic.
+ if (I < (ArgRecords.size() - 1) && isVariadicExprArgument(ArgR))
+ PrintFatalError(Attr->getLoc(),
+ "Attributes accepting packs can only have the last "
+ "argument be variadic.");
+ }
+ }
+
bool HasOptArg = false;
bool HasFakeArg = false;
for (const auto *ArgRecord : ArgRecords) {
@@ -2336,6 +2594,16 @@ static void emitAttributes(RecordKeeper &Records, raw_ostream &OS,
}
}
+ std::unique_ptr<VariadicExprArgument> DelayedArgs = nullptr;
+ if (AttrAcceptsExprPack) {
+ DelayedArgs =
+ std::make_unique<VariadicExprArgument>("DelayedArgs", R.getName());
+ if (Header) {
+ DelayedArgs->writeDeclarations(OS);
+ OS << "\n\n";
+ }
+ }
+
if (Header)
OS << "public:\n";
@@ -2362,7 +2630,7 @@ static void emitAttributes(RecordKeeper &Records, raw_ostream &OS,
});
// Emit CreateImplicit factory methods.
- auto emitCreate = [&](bool Implicit, bool emitFake) {
+ auto emitCreate = [&](bool Implicit, bool DelayedArgsOnly, bool emitFake) {
if (Header)
OS << " static ";
OS << R.getName() << "Attr *";
@@ -2371,16 +2639,22 @@ static void emitAttributes(RecordKeeper &Records, raw_ostream &OS,
OS << "Create";
if (Implicit)
OS << "Implicit";
+ if (DelayedArgsOnly)
+ OS << "WithDelayedArgs";
OS << "(";
OS << "ASTContext &Ctx";
- for (auto const &ai : Args) {
- if (ai->isFake() && !emitFake) continue;
+ if (!DelayedArgsOnly) {
+ for (auto const &ai : Args) {
+ if (ai->isFake() && !emitFake)
+ continue;
+ OS << ", ";
+ ai->writeCtorParameters(OS);
+ }
+ } else {
OS << ", ";
- ai->writeCtorParameters(OS);
+ DelayedArgs->writeCtorParameters(OS);
}
OS << ", const AttributeCommonInfo &CommonInfo";
- if (Header && Implicit)
- OS << " = {SourceRange{}}";
OS << ")";
if (Header) {
OS << ";\n";
@@ -2390,10 +2664,14 @@ static void emitAttributes(RecordKeeper &Records, raw_ostream &OS,
OS << " {\n";
OS << " auto *A = new (Ctx) " << R.getName();
OS << "Attr(Ctx, CommonInfo";
- for (auto const &ai : Args) {
- if (ai->isFake() && !emitFake) continue;
- OS << ", ";
- ai->writeImplicitCtorArgs(OS);
+
+ if (!DelayedArgsOnly) {
+ for (auto const &ai : Args) {
+ if (ai->isFake() && !emitFake)
+ continue;
+ OS << ", ";
+ ai->writeImplicitCtorArgs(OS);
+ }
}
OS << ");\n";
if (Implicit) {
@@ -2404,10 +2682,16 @@ static void emitAttributes(RecordKeeper &Records, raw_ostream &OS,
"!A->getAttrName())\n";
OS << " A->setAttributeSpellingListIndex(0);\n";
}
+ if (DelayedArgsOnly) {
+ OS << " A->setDelayedArgs(Ctx, ";
+ DelayedArgs->writeImplicitCtorArgs(OS);
+ OS << ");\n";
+ }
OS << " return A;\n}\n\n";
};
- auto emitCreateNoCI = [&](bool Implicit, bool emitFake) {
+ auto emitCreateNoCI = [&](bool Implicit, bool DelayedArgsOnly,
+ bool emitFake) {
if (Header)
OS << " static ";
OS << R.getName() << "Attr *";
@@ -2416,18 +2700,28 @@ static void emitAttributes(RecordKeeper &Records, raw_ostream &OS,
OS << "Create";
if (Implicit)
OS << "Implicit";
+ if (DelayedArgsOnly)
+ OS << "WithDelayedArgs";
OS << "(";
OS << "ASTContext &Ctx";
- for (auto const &ai : Args) {
- if (ai->isFake() && !emitFake) continue;
+ if (!DelayedArgsOnly) {
+ for (auto const &ai : Args) {
+ if (ai->isFake() && !emitFake)
+ continue;
+ OS << ", ";
+ ai->writeCtorParameters(OS);
+ }
+ } else {
OS << ", ";
- ai->writeCtorParameters(OS);
+ DelayedArgs->writeCtorParameters(OS);
}
- OS << ", SourceRange Range, AttributeCommonInfo::Syntax Syntax";
- if (!ElideSpelling) {
- OS << ", " << R.getName() << "Attr::Spelling S";
+ OS << ", SourceRange Range";
+ if (Header)
+ OS << " = {}";
+ if (Spellings.size() > 1) {
+ OS << ", Spelling S";
if (Header)
- OS << " = static_cast<Spelling>(SpellingNotCalculated)";
+ OS << " = " << SemanticToSyntacticMap[0];
}
OS << ")";
if (Header) {
@@ -2443,45 +2737,89 @@ static void emitAttributes(RecordKeeper &Records, raw_ostream &OS,
else
OS << "NoSemaHandlerAttribute";
- OS << ", Syntax";
- if (!ElideSpelling)
- OS << ", S";
+ if (Spellings.size() == 0) {
+ OS << ", AttributeCommonInfo::Form::Implicit()";
+ } else if (Spellings.size() == 1) {
+ OS << ", ";
+ emitFormInitializer(OS, Spellings[0], "0");
+ } else {
+ OS << ", [&]() {\n";
+ OS << " switch (S) {\n";
+ std::set<std::string> Uniques;
+ unsigned Idx = 0;
+ for (auto I = Spellings.begin(), E = Spellings.end(); I != E;
+ ++I, ++Idx) {
+ const FlattenedSpelling &S = *I;
+ const auto &Name = SemanticToSyntacticMap[Idx];
+ if (Uniques.insert(Name).second) {
+ OS << " case " << Name << ":\n";
+ OS << " return AttributeCommonInfo::Form";
+ emitFormInitializer(OS, S, Name);
+ OS << ";\n";
+ }
+ }
+ OS << " default:\n";
+ OS << " llvm_unreachable(\"Unknown attribute spelling!\");\n"
+ << " return AttributeCommonInfo::Form";
+ emitFormInitializer(OS, Spellings[0], "0");
+ OS << ";\n"
+ << " }\n"
+ << " }()";
+ }
+
OS << ");\n";
OS << " return Create";
if (Implicit)
OS << "Implicit";
+ if (DelayedArgsOnly)
+ OS << "WithDelayedArgs";
OS << "(Ctx";
- for (auto const &ai : Args) {
- if (ai->isFake() && !emitFake) continue;
+ if (!DelayedArgsOnly) {
+ for (auto const &ai : Args) {
+ if (ai->isFake() && !emitFake)
+ continue;
+ OS << ", ";
+ ai->writeImplicitCtorArgs(OS);
+ }
+ } else {
OS << ", ";
- ai->writeImplicitCtorArgs(OS);
+ DelayedArgs->writeImplicitCtorArgs(OS);
}
OS << ", I);\n";
OS << "}\n\n";
};
- auto emitCreates = [&](bool emitFake) {
- emitCreate(true, emitFake);
- emitCreate(false, emitFake);
- emitCreateNoCI(true, emitFake);
- emitCreateNoCI(false, emitFake);
+ auto emitCreates = [&](bool DelayedArgsOnly, bool emitFake) {
+ emitCreate(true, DelayedArgsOnly, emitFake);
+ emitCreate(false, DelayedArgsOnly, emitFake);
+ emitCreateNoCI(true, DelayedArgsOnly, emitFake);
+ emitCreateNoCI(false, DelayedArgsOnly, emitFake);
};
if (Header)
OS << " // Factory methods\n";
// Emit a CreateImplicit that takes all the arguments.
- emitCreates(true);
+ emitCreates(false, true);
// Emit a CreateImplicit that takes all the non-fake arguments.
if (HasFakeArg)
- emitCreates(false);
+ emitCreates(false, false);
+
+ // Emit a CreateWithDelayedArgs that takes only the dependent argument
+ // expressions.
+ if (DelayedArgs)
+ emitCreates(true, false);
// Emit constructors.
- auto emitCtor = [&](bool emitOpt, bool emitFake) {
+ auto emitCtor = [&](bool emitOpt, bool emitFake, bool emitNoArgs) {
auto shouldEmitArg = [=](const std::unique_ptr<Argument> &arg) {
- if (arg->isFake()) return emitFake;
- if (arg->isOptional()) return emitOpt;
+ if (emitNoArgs)
+ return false;
+ if (arg->isFake())
+ return emitFake;
+ if (arg->isOptional())
+ return emitOpt;
return true;
};
if (Header)
@@ -2492,7 +2830,8 @@ static void emitAttributes(RecordKeeper &Records, raw_ostream &OS,
<< "Attr(ASTContext &Ctx, const AttributeCommonInfo &CommonInfo";
OS << '\n';
for (auto const &ai : Args) {
- if (!shouldEmitArg(ai)) continue;
+ if (!shouldEmitArg(ai))
+ continue;
OS << " , ";
ai->writeCtorParameters(OS);
OS << "\n";
@@ -2522,11 +2861,17 @@ static void emitAttributes(RecordKeeper &Records, raw_ostream &OS,
}
OS << "\n";
}
+ if (DelayedArgs) {
+ OS << " , ";
+ DelayedArgs->writeCtorDefaultInitializers(OS);
+ OS << "\n";
+ }
OS << " {\n";
for (auto const &ai : Args) {
- if (!shouldEmitArg(ai)) continue;
+ if (!shouldEmitArg(ai))
+ continue;
ai->writeCtorBody(OS);
}
OS << "}\n\n";
@@ -2537,15 +2882,24 @@ static void emitAttributes(RecordKeeper &Records, raw_ostream &OS,
// Emit a constructor that includes all the arguments.
// This is necessary for cloning.
- emitCtor(true, true);
+ emitCtor(true, true, false);
// Emit a constructor that takes all the non-fake arguments.
if (HasFakeArg)
- emitCtor(true, false);
+ emitCtor(true, false, false);
// Emit a constructor that takes all the non-fake, non-optional arguments.
if (HasOptArg)
- emitCtor(false, false);
+ emitCtor(false, false, false);
+
+ // Emit constructors that takes no arguments if none already exists.
+ // This is used for delaying arguments.
+ bool HasRequiredArgs =
+ llvm::count_if(Args, [=](const std::unique_ptr<Argument> &arg) {
+ return !arg->isFake() && !arg->isOptional();
+ });
+ if (DelayedArgs && HasRequiredArgs)
+ emitCtor(false, false, true);
if (Header) {
OS << '\n';
@@ -2591,6 +2945,11 @@ static void emitAttributes(RecordKeeper &Records, raw_ostream &OS,
}
if (Header) {
+ if (DelayedArgs) {
+ DelayedArgs->writeAccessors(OS);
+ DelayedArgs->writeSetter(OS);
+ }
+
OS << R.getValueAsString("AdditionalMembers");
OS << "\n\n";
@@ -2599,6 +2958,9 @@ static void emitAttributes(RecordKeeper &Records, raw_ostream &OS,
OS << "};\n\n";
} else {
+ if (DelayedArgs)
+ DelayedArgs->writeAccessorDefinitions(OS);
+
OS << R.getName() << "Attr *" << R.getName()
<< "Attr::clone(ASTContext &C) const {\n";
OS << " auto *A = new (C) " << R.getName() << "Attr(C, *this";
@@ -2610,6 +2972,11 @@ static void emitAttributes(RecordKeeper &Records, raw_ostream &OS,
OS << " A->Inherited = Inherited;\n";
OS << " A->IsPackExpansion = IsPackExpansion;\n";
OS << " A->setImplicit(Implicit);\n";
+ if (DelayedArgs) {
+ OS << " A->setDelayedArgs(C, ";
+ DelayedArgs->writeCloneArgs(OS);
+ OS << ");\n";
+ }
OS << " return A;\n}\n\n";
writePrettyPrintFunction(R, Args, OS);
@@ -2619,7 +2986,7 @@ static void emitAttributes(RecordKeeper &Records, raw_ostream &OS,
}
// Emits the class definitions for attributes.
void clang::EmitClangAttrClass(RecordKeeper &Records, raw_ostream &OS) {
- emitSourceFileHeader("Attribute classes' definitions", OS);
+ emitSourceFileHeader("Attribute classes' definitions", OS, Records);
OS << "#ifndef LLVM_CLANG_ATTR_CLASSES_INC\n";
OS << "#define LLVM_CLANG_ATTR_CLASSES_INC\n\n";
@@ -2631,7 +2998,8 @@ void clang::EmitClangAttrClass(RecordKeeper &Records, raw_ostream &OS) {
// Emits the class method definitions for attributes.
void clang::EmitClangAttrImpl(RecordKeeper &Records, raw_ostream &OS) {
- emitSourceFileHeader("Attribute classes' member function definitions", OS);
+ emitSourceFileHeader("Attribute classes' member function definitions", OS,
+ Records);
emitAttributes(Records, OS, false);
@@ -2676,9 +3044,9 @@ static void emitAttrList(raw_ostream &OS, StringRef Class,
// Determines if an attribute has a Pragma spelling.
static bool AttrHasPragmaSpelling(const Record *R) {
std::vector<FlattenedSpelling> Spellings = GetFlattenedSpellings(*R);
- return llvm::find_if(Spellings, [](const FlattenedSpelling &S) {
- return S.variety() == "Pragma";
- }) != Spellings.end();
+ return llvm::any_of(Spellings, [](const FlattenedSpelling &S) {
+ return S.variety() == "Pragma";
+ });
}
namespace {
@@ -2698,7 +3066,8 @@ static const AttrClassDescriptor AttrClassDescriptors[] = {
{ "INHERITABLE_ATTR", "InheritableAttr" },
{ "DECL_OR_TYPE_ATTR", "DeclOrTypeAttr" },
{ "INHERITABLE_PARAM_ATTR", "InheritableParamAttr" },
- { "PARAMETER_ABI_ATTR", "ParameterABIAttr" }
+ { "PARAMETER_ABI_ATTR", "ParameterABIAttr" },
+ { "HLSL_ANNOTATION_ATTR", "HLSLAnnotationAttr"}
};
static void emitDefaultDefine(raw_ostream &OS, StringRef name,
@@ -2866,7 +3235,8 @@ namespace clang {
// Emits the enumeration list for attributes.
void EmitClangAttrList(RecordKeeper &Records, raw_ostream &OS) {
- emitSourceFileHeader("List of all attributes that Clang recognizes", OS);
+ emitSourceFileHeader("List of all attributes that Clang recognizes", OS,
+ Records);
AttrClassHierarchy Hierarchy(Records);
@@ -2905,9 +3275,41 @@ void EmitClangAttrList(RecordKeeper &Records, raw_ostream &OS) {
}
// Emits the enumeration list for attributes.
+void EmitClangAttrPrintList(const std::string &FieldName, RecordKeeper &Records,
+ raw_ostream &OS) {
+ emitSourceFileHeader(
+ "List of attributes that can be print on the left side of a decl", OS,
+ Records);
+
+ AttrClassHierarchy Hierarchy(Records);
+
+ std::vector<Record *> Attrs = Records.getAllDerivedDefinitions("Attr");
+ std::vector<Record *> PragmaAttrs;
+ bool first = false;
+
+ for (auto *Attr : Attrs) {
+ if (!Attr->getValueAsBit("ASTNode"))
+ continue;
+
+ if (!Attr->getValueAsBit(FieldName))
+ continue;
+
+ if (!first) {
+ first = true;
+ OS << "#define CLANG_ATTR_LIST_" << FieldName;
+ }
+
+ OS << " \\\n case attr::" << Attr->getName() << ":";
+ }
+
+ OS << '\n';
+}
+
+// Emits the enumeration list for attributes.
void EmitClangAttrSubjectMatchRuleList(RecordKeeper &Records, raw_ostream &OS) {
emitSourceFileHeader(
- "List of all attribute subject matching rules that Clang recognizes", OS);
+ "List of all attribute subject matching rules that Clang recognizes", OS,
+ Records);
PragmaClangAttributeSupport &PragmaAttributeSupport =
getPragmaAttributeSupport(Records);
emitDefaultDefine(OS, "ATTR_MATCH_RULE", nullptr);
@@ -2917,12 +3319,13 @@ void EmitClangAttrSubjectMatchRuleList(RecordKeeper &Records, raw_ostream &OS) {
// Emits the code to read an attribute from a precompiled header.
void EmitClangAttrPCHRead(RecordKeeper &Records, raw_ostream &OS) {
- emitSourceFileHeader("Attribute deserialization code", OS);
+ emitSourceFileHeader("Attribute deserialization code", OS, Records);
Record *InhClass = Records.getClass("InheritableAttr");
std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr"),
ArgRecords;
std::vector<std::unique_ptr<Argument>> Args;
+ std::unique_ptr<VariadicExprArgument> DelayedArgs;
OS << " switch (Kind) {\n";
for (const auto *Attr : Attrs) {
@@ -2935,6 +3338,12 @@ void EmitClangAttrPCHRead(RecordKeeper &Records, raw_ostream &OS) {
OS << " bool isInherited = Record.readInt();\n";
OS << " bool isImplicit = Record.readInt();\n";
OS << " bool isPackExpansion = Record.readInt();\n";
+ DelayedArgs = nullptr;
+ if (Attr->getValueAsBit("AcceptsExprPack")) {
+ DelayedArgs =
+ std::make_unique<VariadicExprArgument>("DelayedArgs", R.getName());
+ DelayedArgs->writePCHReadDecls(OS);
+ }
ArgRecords = R.getValueAsListOfDefs("Args");
Args.clear();
for (const auto *Arg : ArgRecords) {
@@ -2951,6 +3360,12 @@ void EmitClangAttrPCHRead(RecordKeeper &Records, raw_ostream &OS) {
OS << " cast<InheritableAttr>(New)->setInherited(isInherited);\n";
OS << " New->setImplicit(isImplicit);\n";
OS << " New->setPackExpansion(isPackExpansion);\n";
+ if (DelayedArgs) {
+ OS << " cast<" << R.getName()
+ << "Attr>(New)->setDelayedArgs(Context, ";
+ DelayedArgs->writePCHReadArgs(OS);
+ OS << ");\n";
+ }
OS << " break;\n";
OS << " }\n";
}
@@ -2959,7 +3374,7 @@ void EmitClangAttrPCHRead(RecordKeeper &Records, raw_ostream &OS) {
// Emits the code to write an attribute to a precompiled header.
void EmitClangAttrPCHWrite(RecordKeeper &Records, raw_ostream &OS) {
- emitSourceFileHeader("Attribute serialization code", OS);
+ emitSourceFileHeader("Attribute serialization code", OS, Records);
Record *InhClass = Records.getClass("InheritableAttr");
std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr"), Args;
@@ -2978,6 +3393,8 @@ void EmitClangAttrPCHWrite(RecordKeeper &Records, raw_ostream &OS) {
OS << " Record.push_back(SA->isInherited());\n";
OS << " Record.push_back(A->isImplicit());\n";
OS << " Record.push_back(A->isPackExpansion());\n";
+ if (Attr->getValueAsBit("AcceptsExprPack"))
+ VariadicExprArgument("DelayedArgs", R.getName()).writePCHWrite(OS);
for (const auto *Arg : Args)
createArgument(*Arg, R.getName())->writePCHWrite(OS);
@@ -3070,9 +3487,10 @@ static bool GenerateTargetSpecificAttrChecks(const Record *R,
}
static void GenerateHasAttrSpellingStringSwitch(
- const std::vector<Record *> &Attrs, raw_ostream &OS,
- const std::string &Variety = "", const std::string &Scope = "") {
- for (const auto *Attr : Attrs) {
+ const std::vector<std::pair<const Record *, FlattenedSpelling>> &Attrs,
+ raw_ostream &OS, const std::string &Variety,
+ const std::string &Scope = "") {
+ for (const auto &[Attr, Spelling] : Attrs) {
// C++11-style attributes have specific version information associated with
// them. If the attribute has no scope, the version information must not
// have the default value (1), as that's incorrect. Instead, the unscoped
@@ -3080,22 +3498,33 @@ static void GenerateHasAttrSpellingStringSwitch(
// document, which can be found at:
// https://isocpp.org/std/standing-documents/sd-6-sg10-feature-test-recommendations
//
- // C2x-style attributes have the same kind of version information
+ // C23-style attributes have the same kind of version information
// associated with them. The unscoped attribute version information should
// be taken from the specification of the attribute in the C Standard.
+ //
+ // Clang-specific attributes have the same kind of version information
+ // associated with them. This version is typically the default value (1).
+ // These version values are clang-specific and should typically be
+ // incremented once the attribute changes its syntax and/or semantics in a
+ // a way that is impactful to the end user.
int Version = 1;
- if (Variety == "CXX11" || Variety == "C2x") {
- std::vector<Record *> Spellings = Attr->getValueAsListOfDefs("Spellings");
- for (const auto &Spelling : Spellings) {
- if (Spelling->getValueAsString("Variety") == Variety) {
- Version = static_cast<int>(Spelling->getValueAsInt("Version"));
- if (Scope.empty() && Version == 1)
- PrintError(Spelling->getLoc(), "Standard attributes must have "
- "valid version information.");
- break;
- }
- }
+ assert(Spelling.variety() == Variety);
+ std::string Name = "";
+ if (Spelling.nameSpace().empty() || Scope == Spelling.nameSpace()) {
+ Name = Spelling.name();
+ Version = static_cast<int>(
+ Spelling.getSpellingRecord().getValueAsInt("Version"));
+ // Verify that explicitly specified CXX11 and C23 spellings (i.e.
+ // not inferred from Clang/GCC spellings) have a version that's
+ // different from the default (1).
+ bool RequiresValidVersion =
+ (Variety == "CXX11" || Variety == "C23") &&
+ Spelling.getSpellingRecord().getValueAsString("Variety") == Variety;
+ if (RequiresValidVersion && Scope.empty() && Version == 1)
+ PrintError(Spelling.getSpellingRecord().getLoc(),
+ "Standard attributes must have "
+ "valid version information.");
}
std::string Test;
@@ -3107,35 +3536,77 @@ static void GenerateHasAttrSpellingStringSwitch(
// If this is the C++11 variety, also add in the LangOpts test.
if (Variety == "CXX11")
Test += " && LangOpts.CPlusPlus11";
- else if (Variety == "C2x")
- Test += " && LangOpts.DoubleSquareBracketAttributes";
+ } else if (!Attr->getValueAsListOfDefs("TargetSpecificSpellings").empty()) {
+ // Add target checks if this spelling is target-specific.
+ const std::vector<Record *> TargetSpellings =
+ Attr->getValueAsListOfDefs("TargetSpecificSpellings");
+ for (const auto &TargetSpelling : TargetSpellings) {
+ // Find spelling that matches current scope and name.
+ for (const auto &Spelling : GetFlattenedSpellings(*TargetSpelling)) {
+ if (Scope == Spelling.nameSpace() && Name == Spelling.name()) {
+ const Record *Target = TargetSpelling->getValueAsDef("Target");
+ std::vector<StringRef> Arches =
+ Target->getValueAsListOfStrings("Arches");
+ GenerateTargetSpecificAttrChecks(Target, Arches, Test,
+ /*FnName=*/nullptr);
+ break;
+ }
+ }
+ }
+
+ if (Variety == "CXX11")
+ Test += " && LangOpts.CPlusPlus11";
} else if (Variety == "CXX11")
// C++11 mode should be checked against LangOpts, which is presumed to be
// present in the caller.
Test = "LangOpts.CPlusPlus11";
- else if (Variety == "C2x")
- Test = "LangOpts.DoubleSquareBracketAttributes";
-
- std::string TestStr =
- !Test.empty() ? Test + " ? " + llvm::itostr(Version) + " : 0" : "1";
- std::vector<FlattenedSpelling> Spellings = GetFlattenedSpellings(*Attr);
- for (const auto &S : Spellings)
- if (Variety.empty() || (Variety == S.variety() &&
- (Scope.empty() || Scope == S.nameSpace())))
- OS << " .Case(\"" << S.name() << "\", " << TestStr << ")\n";
+
+ std::string TestStr = !Test.empty()
+ ? Test + " ? " + llvm::itostr(Version) + " : 0"
+ : llvm::itostr(Version);
+ if (Scope.empty() || Scope == Spelling.nameSpace())
+ OS << " .Case(\"" << Spelling.name() << "\", " << TestStr << ")\n";
}
OS << " .Default(0);\n";
}
+// Emits list of regular keyword attributes with info about their arguments.
+void EmitClangRegularKeywordAttributeInfo(RecordKeeper &Records,
+ raw_ostream &OS) {
+ emitSourceFileHeader(
+ "A list of regular keyword attributes generated from the attribute"
+ " definitions",
+ OS);
+ // Assume for now that the same token is not used in multiple regular
+ // keyword attributes.
+ for (auto *R : Records.getAllDerivedDefinitions("Attr"))
+ for (const auto &S : GetFlattenedSpellings(*R)) {
+ if (!isRegularKeywordAttribute(S))
+ continue;
+ std::vector<Record *> Args = R->getValueAsListOfDefs("Args");
+ bool HasArgs = llvm::any_of(
+ Args, [](const Record *Arg) { return !Arg->getValueAsBit("Fake"); });
+
+ OS << "KEYWORD_ATTRIBUTE("
+ << S.getSpellingRecord().getValueAsString("Name") << ", "
+ << (HasArgs ? "true" : "false") << ", )\n";
+ }
+ OS << "#undef KEYWORD_ATTRIBUTE\n";
+}
+
// Emits the list of spellings for attributes.
void EmitClangAttrHasAttrImpl(RecordKeeper &Records, raw_ostream &OS) {
- emitSourceFileHeader("Code to implement the __has_attribute logic", OS);
+ emitSourceFileHeader("Code to implement the __has_attribute logic", OS,
+ Records);
// Separate all of the attributes out into four group: generic, C++11, GNU,
// and declspecs. Then generate a big switch statement for each of them.
std::vector<Record *> Attrs = Records.getAllDerivedDefinitions("Attr");
- std::vector<Record *> Declspec, Microsoft, GNU, Pragma;
- std::map<std::string, std::vector<Record *>> CXX, C2x;
+ std::vector<std::pair<const Record *, FlattenedSpelling>> Declspec, Microsoft,
+ GNU, Pragma, HLSLSemantic;
+ std::map<std::string,
+ std::vector<std::pair<const Record *, FlattenedSpelling>>>
+ CXX, C23;
// Walk over the list of all attributes, and split them out based on the
// spelling variety.
@@ -3144,37 +3615,45 @@ void EmitClangAttrHasAttrImpl(RecordKeeper &Records, raw_ostream &OS) {
for (const auto &SI : Spellings) {
const std::string &Variety = SI.variety();
if (Variety == "GNU")
- GNU.push_back(R);
+ GNU.emplace_back(R, SI);
else if (Variety == "Declspec")
- Declspec.push_back(R);
+ Declspec.emplace_back(R, SI);
else if (Variety == "Microsoft")
- Microsoft.push_back(R);
+ Microsoft.emplace_back(R, SI);
else if (Variety == "CXX11")
- CXX[SI.nameSpace()].push_back(R);
- else if (Variety == "C2x")
- C2x[SI.nameSpace()].push_back(R);
+ CXX[SI.nameSpace()].emplace_back(R, SI);
+ else if (Variety == "C23")
+ C23[SI.nameSpace()].emplace_back(R, SI);
else if (Variety == "Pragma")
- Pragma.push_back(R);
+ Pragma.emplace_back(R, SI);
+ else if (Variety == "HLSLSemantic")
+ HLSLSemantic.emplace_back(R, SI);
}
}
OS << "const llvm::Triple &T = Target.getTriple();\n";
OS << "switch (Syntax) {\n";
- OS << "case AttrSyntax::GNU:\n";
+ OS << "case AttributeCommonInfo::Syntax::AS_GNU:\n";
OS << " return llvm::StringSwitch<int>(Name)\n";
GenerateHasAttrSpellingStringSwitch(GNU, OS, "GNU");
- OS << "case AttrSyntax::Declspec:\n";
+ OS << "case AttributeCommonInfo::Syntax::AS_Declspec:\n";
OS << " return llvm::StringSwitch<int>(Name)\n";
GenerateHasAttrSpellingStringSwitch(Declspec, OS, "Declspec");
- OS << "case AttrSyntax::Microsoft:\n";
+ OS << "case AttributeCommonInfo::Syntax::AS_Microsoft:\n";
OS << " return llvm::StringSwitch<int>(Name)\n";
GenerateHasAttrSpellingStringSwitch(Microsoft, OS, "Microsoft");
- OS << "case AttrSyntax::Pragma:\n";
+ OS << "case AttributeCommonInfo::Syntax::AS_Pragma:\n";
OS << " return llvm::StringSwitch<int>(Name)\n";
GenerateHasAttrSpellingStringSwitch(Pragma, OS, "Pragma");
- auto fn = [&OS](const char *Spelling, const char *Variety,
- const std::map<std::string, std::vector<Record *>> &List) {
- OS << "case AttrSyntax::" << Variety << ": {\n";
+ OS << "case AttributeCommonInfo::Syntax::AS_HLSLSemantic:\n";
+ OS << " return llvm::StringSwitch<int>(Name)\n";
+ GenerateHasAttrSpellingStringSwitch(HLSLSemantic, OS, "HLSLSemantic");
+ auto fn = [&OS](const char *Spelling,
+ const std::map<
+ std::string,
+ std::vector<std::pair<const Record *, FlattenedSpelling>>>
+ &List) {
+ OS << "case AttributeCommonInfo::Syntax::AS_" << Spelling << ": {\n";
// C++11-style attributes are further split out based on the Scope.
for (auto I = List.cbegin(), E = List.cend(); I != E; ++I) {
if (I != List.cbegin())
@@ -3189,14 +3668,24 @@ void EmitClangAttrHasAttrImpl(RecordKeeper &Records, raw_ostream &OS) {
}
OS << "\n} break;\n";
};
- fn("CXX11", "CXX", CXX);
- fn("C2x", "C", C2x);
+ fn("CXX11", CXX);
+ fn("C23", C23);
+ OS << "case AttributeCommonInfo::Syntax::AS_Keyword:\n";
+ OS << "case AttributeCommonInfo::Syntax::AS_ContextSensitiveKeyword:\n";
+ OS << " llvm_unreachable(\"hasAttribute not supported for keyword\");\n";
+ OS << " return 0;\n";
+ OS << "case AttributeCommonInfo::Syntax::AS_Implicit:\n";
+ OS << " llvm_unreachable (\"hasAttribute not supported for "
+ "AS_Implicit\");\n";
+ OS << " return 0;\n";
+
OS << "}\n";
}
void EmitClangAttrSpellingListIndex(RecordKeeper &Records, raw_ostream &OS) {
- emitSourceFileHeader("Code to translate different attribute spellings "
- "into internal identifiers", OS);
+ emitSourceFileHeader("Code to translate different attribute spellings into "
+ "internal identifiers",
+ OS, Records);
OS << " switch (getParsedKind()) {\n";
OS << " case IgnoredAttribute:\n";
@@ -3226,7 +3715,8 @@ void EmitClangAttrSpellingListIndex(RecordKeeper &Records, raw_ostream &OS) {
// Emits code used by RecursiveASTVisitor to visit attributes
void EmitClangAttrASTVisitor(RecordKeeper &Records, raw_ostream &OS) {
- emitSourceFileHeader("Used by RecursiveASTVisitor to visit attributes.", OS);
+ emitSourceFileHeader("Used by RecursiveASTVisitor to visit attributes.", OS,
+ Records);
std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr");
@@ -3265,6 +3755,10 @@ void EmitClangAttrASTVisitor(RecordKeeper &Records, raw_ostream &OS) {
for (const auto *Arg : ArgRecords)
createArgument(*Arg, R.getName())->writeASTVisitorTraversal(OS);
+ if (Attr->getValueAsBit("AcceptsExprPack"))
+ VariadicExprArgument("DelayedArgs", R.getName())
+ .writeASTVisitorTraversal(OS);
+
OS << " return true;\n";
OS << "}\n\n";
}
@@ -3347,7 +3841,8 @@ void EmitClangAttrTemplateInstantiateHelper(const std::vector<Record *> &Attrs,
// Emits code to instantiate dependent attributes on templates.
void EmitClangAttrTemplateInstantiate(RecordKeeper &Records, raw_ostream &OS) {
- emitSourceFileHeader("Template instantiation code for attributes", OS);
+ emitSourceFileHeader("Template instantiation code for attributes", OS,
+ Records);
std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr");
@@ -3369,7 +3864,8 @@ void EmitClangAttrTemplateInstantiate(RecordKeeper &Records, raw_ostream &OS) {
// Emits the list of parsed attributes.
void EmitClangAttrParsedAttrList(RecordKeeper &Records, raw_ostream &OS) {
- emitSourceFileHeader("List of all attributes that Clang recognizes", OS);
+ emitSourceFileHeader("List of all attributes that Clang recognizes", OS,
+ Records);
OS << "#ifndef PARSED_ATTR\n";
OS << "#define PARSED_ATTR(NAME) NAME\n";
@@ -3390,7 +3886,7 @@ static void emitArgInfo(const Record &R, raw_ostream &OS) {
// attribute and emit the number of required arguments followed by the
// number of optional arguments.
std::vector<Record *> Args = R.getValueAsListOfDefs("Args");
- unsigned ArgCount = 0, OptCount = 0;
+ unsigned ArgCount = 0, OptCount = 0, ArgMemberCount = 0;
bool HasVariadic = false;
for (const auto *Arg : Args) {
// If the arg is fake, it's the user's job to supply it: general parsing
@@ -3398,14 +3894,16 @@ static void emitArgInfo(const Record &R, raw_ostream &OS) {
if (Arg->getValueAsBit("Fake"))
continue;
Arg->getValueAsBit("Optional") ? ++OptCount : ++ArgCount;
+ ++ArgMemberCount;
if (!HasVariadic && isArgVariadic(*Arg, R.getName()))
HasVariadic = true;
}
// If there is a variadic argument, we will set the optional argument count
// to its largest value. Since it's currently a 4-bit number, we set it to 15.
- OS << " NumArgs = " << ArgCount << ";\n";
- OS << " OptArgs = " << (HasVariadic ? 15 : OptCount) << ";\n";
+ OS << " /*NumArgs=*/" << ArgCount << ",\n";
+ OS << " /*OptArgs=*/" << (HasVariadic ? 15 : OptCount) << ",\n";
+ OS << " /*NumArgMembers=*/" << ArgMemberCount << ",\n";
}
static std::string GetDiagnosticSpelling(const Record &R) {
@@ -3554,8 +4052,9 @@ static void GenerateAppertainsTo(const Record &Attr, raw_ostream &OS) {
if (!StmtSubjects.empty()) {
OS << "bool diagAppertainsToDecl(Sema &S, const ParsedAttr &AL, ";
OS << "const Decl *D) const override {\n";
- OS << " S.Diag(AL.getLoc(), diag::err_stmt_attribute_invalid_on_decl)\n";
- OS << " << AL << D->getLocation();\n";
+ OS << " S.Diag(AL.getLoc(), diag::err_attribute_invalid_on_decl)\n";
+ OS << " << AL << AL.isRegularKeywordAttribute() << "
+ "D->getLocation();\n";
OS << " return false;\n";
OS << "}\n\n";
}
@@ -3584,7 +4083,7 @@ static void GenerateAppertainsTo(const Record &Attr, raw_ostream &OS) {
OS << (Warn ? "warn_attribute_wrong_decl_type_str"
: "err_attribute_wrong_decl_type_str");
OS << ")\n";
- OS << " << Attr << ";
+ OS << " << Attr << Attr.isRegularKeywordAttribute() << ";
OS << CalculateDiagnostic(*SubjectObj) << ";\n";
OS << " return false;\n";
OS << " }\n";
@@ -3599,7 +4098,8 @@ static void GenerateAppertainsTo(const Record &Attr, raw_ostream &OS) {
OS << "bool diagAppertainsToStmt(Sema &S, const ParsedAttr &AL, ";
OS << "const Stmt *St) const override {\n";
OS << " S.Diag(AL.getLoc(), diag::err_decl_attribute_invalid_on_stmt)\n";
- OS << " << AL << St->getBeginLoc();\n";
+ OS << " << AL << AL.isRegularKeywordAttribute() << "
+ "St->getBeginLoc();\n";
OS << " return false;\n";
OS << "}\n\n";
}
@@ -3618,7 +4118,7 @@ static void GenerateAppertainsTo(const Record &Attr, raw_ostream &OS) {
OS << (Warn ? "warn_attribute_wrong_decl_type_str"
: "err_attribute_wrong_decl_type_str");
OS << ")\n";
- OS << " << Attr << ";
+ OS << " << Attr << Attr.isRegularKeywordAttribute() << ";
OS << CalculateDiagnostic(*SubjectObj) << ";\n";
OS << " return false;\n";
OS << " }\n";
@@ -3689,7 +4189,8 @@ static void GenerateMutualExclusionsChecks(const Record &Attr,
for (const std::string &A : DeclAttrs) {
OS << " if (const auto *A = D->getAttr<" << A << ">()) {\n";
OS << " S.Diag(AL.getLoc(), diag::err_attributes_are_not_compatible)"
- << " << AL << A;\n";
+ << " << AL << A << (AL.isRegularKeywordAttribute() ||"
+ << " A->isRegularKeywordAttribute());\n";
OS << " S.Diag(A->getLocation(), diag::note_conflicting_attribute);";
OS << " \nreturn false;\n";
OS << " }\n";
@@ -3710,7 +4211,8 @@ static void GenerateMutualExclusionsChecks(const Record &Attr,
<< ">()) {\n";
MergeDeclOS << " S.Diag(First->getLocation(), "
<< "diag::err_attributes_are_not_compatible) << First << "
- << "Second;\n";
+ << "Second << (First->isRegularKeywordAttribute() || "
+ << "Second->isRegularKeywordAttribute());\n";
MergeDeclOS << " S.Diag(Second->getLocation(), "
<< "diag::note_conflicting_attribute);\n";
MergeDeclOS << " return false;\n";
@@ -3750,7 +4252,8 @@ static void GenerateMutualExclusionsChecks(const Record &Attr,
MergeStmtOS << " if (Iter != C.end()) {\n";
MergeStmtOS << " S.Diag((*Iter)->getLocation(), "
<< "diag::err_attributes_are_not_compatible) << *Iter << "
- << "Second;\n";
+ << "Second << ((*Iter)->isRegularKeywordAttribute() || "
+ << "Second->isRegularKeywordAttribute());\n";
MergeStmtOS << " S.Diag(Second->getLocation(), "
<< "diag::note_conflicting_attribute);\n";
MergeStmtOS << " return false;\n";
@@ -3803,14 +4306,8 @@ static void GenerateLangOptRequirements(const Record &R,
if (LangOpts.empty())
return;
- OS << "bool diagLangOpts(Sema &S, const ParsedAttr &Attr) ";
- OS << "const override {\n";
- OS << " auto &LangOpts = S.LangOpts;\n";
- OS << " if (" << GenerateTestExpression(LangOpts) << ")\n";
- OS << " return true;\n\n";
- OS << " S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) ";
- OS << "<< Attr;\n";
- OS << " return false;\n";
+ OS << "bool acceptsLangOpts(const LangOptions &LangOpts) const override {\n";
+ OS << " return " << GenerateTestExpression(LangOpts) << ";\n";
OS << "}\n\n";
}
@@ -3855,6 +4352,51 @@ static void GenerateTargetRequirements(const Record &Attr,
OS << "}\n\n";
}
+static void
+GenerateSpellingTargetRequirements(const Record &Attr,
+ const std::vector<Record *> &TargetSpellings,
+ raw_ostream &OS) {
+ // If there are no target specific spellings, use the default target handler.
+ if (TargetSpellings.empty())
+ return;
+
+ std::string Test;
+ bool UsesT = false;
+ const std::vector<FlattenedSpelling> SpellingList =
+ GetFlattenedSpellings(Attr);
+ for (unsigned TargetIndex = 0; TargetIndex < TargetSpellings.size();
+ ++TargetIndex) {
+ const auto &TargetSpelling = TargetSpellings[TargetIndex];
+ std::vector<FlattenedSpelling> Spellings =
+ GetFlattenedSpellings(*TargetSpelling);
+
+ Test += "((SpellingListIndex == ";
+ for (unsigned Index = 0; Index < Spellings.size(); ++Index) {
+ Test +=
+ llvm::itostr(getSpellingListIndex(SpellingList, Spellings[Index]));
+ if (Index != Spellings.size() - 1)
+ Test += " ||\n SpellingListIndex == ";
+ else
+ Test += ") && ";
+ }
+
+ const Record *Target = TargetSpelling->getValueAsDef("Target");
+ std::vector<StringRef> Arches = Target->getValueAsListOfStrings("Arches");
+ std::string FnName = "isTargetSpelling";
+ UsesT |= GenerateTargetSpecificAttrChecks(Target, Arches, Test, &FnName);
+ Test += ")";
+ if (TargetIndex != TargetSpellings.size() - 1)
+ Test += " || ";
+ }
+
+ OS << "bool spellingExistsInTarget(const TargetInfo &Target,\n";
+ OS << " const unsigned SpellingListIndex) const "
+ "override {\n";
+ if (UsesT)
+ OS << " const llvm::Triple &T = Target.getTriple(); (void)T;\n";
+ OS << " return " << Test << ";\n", OS << "}\n\n";
+}
+
static void GenerateSpellingIndexToSemanticSpelling(const Record &Attr,
raw_ostream &OS) {
// If the attribute does not have a semantic form, we can bail out early.
@@ -3895,6 +4437,55 @@ static void GenerateHandleDeclAttribute(const Record &Attr, raw_ostream &OS) {
OS << "}\n\n";
}
+static bool isParamExpr(const Record *Arg) {
+ return !Arg->getSuperClasses().empty() &&
+ llvm::StringSwitch<bool>(
+ Arg->getSuperClasses().back().first->getName())
+ .Case("ExprArgument", true)
+ .Case("VariadicExprArgument", true)
+ .Default(false);
+}
+
+void GenerateIsParamExpr(const Record &Attr, raw_ostream &OS) {
+ OS << "bool isParamExpr(size_t N) const override {\n";
+ OS << " return ";
+ auto Args = Attr.getValueAsListOfDefs("Args");
+ for (size_t I = 0; I < Args.size(); ++I)
+ if (isParamExpr(Args[I]))
+ OS << "(N == " << I << ") || ";
+ OS << "false;\n";
+ OS << "}\n\n";
+}
+
+void GenerateHandleAttrWithDelayedArgs(RecordKeeper &Records, raw_ostream &OS) {
+ OS << "static void handleAttrWithDelayedArgs(Sema &S, Decl *D, ";
+ OS << "const ParsedAttr &Attr) {\n";
+ OS << " SmallVector<Expr *, 4> ArgExprs;\n";
+ OS << " ArgExprs.reserve(Attr.getNumArgs());\n";
+ OS << " for (unsigned I = 0; I < Attr.getNumArgs(); ++I) {\n";
+ OS << " assert(!Attr.isArgIdent(I));\n";
+ OS << " ArgExprs.push_back(Attr.getArgAsExpr(I));\n";
+ OS << " }\n";
+ OS << " clang::Attr *CreatedAttr = nullptr;\n";
+ OS << " switch (Attr.getKind()) {\n";
+ OS << " default:\n";
+ OS << " llvm_unreachable(\"Attribute cannot hold delayed arguments.\");\n";
+ ParsedAttrMap Attrs = getParsedAttrList(Records);
+ for (const auto &I : Attrs) {
+ const Record &R = *I.second;
+ if (!R.getValueAsBit("AcceptsExprPack"))
+ continue;
+ OS << " case ParsedAttr::AT_" << I.first << ": {\n";
+ OS << " CreatedAttr = " << R.getName() << "Attr::CreateWithDelayedArgs";
+ OS << "(S.Context, ArgExprs.data(), ArgExprs.size(), Attr);\n";
+ OS << " break;\n";
+ OS << " }\n";
+ }
+ OS << " }\n";
+ OS << " D->addAttr(CreatedAttr);\n";
+ OS << "}\n\n";
+}
+
static bool IsKnownToGCC(const Record &Attr) {
// Look at the spellings for this subject; if there are any spellings which
// claim to be known to GCC, the attribute is known to GCC.
@@ -3905,7 +4496,7 @@ static bool IsKnownToGCC(const Record &Attr) {
/// Emits the parsed attribute helpers
void EmitClangAttrParsedAttrImpl(RecordKeeper &Records, raw_ostream &OS) {
- emitSourceFileHeader("Parsed attribute helpers", OS);
+ emitSourceFileHeader("Parsed attribute helpers", OS, Records);
OS << "#if !defined(WANT_DECL_MERGE_LOGIC) && "
<< "!defined(WANT_STMT_MERGE_LOGIC)\n";
@@ -3919,7 +4510,7 @@ void EmitClangAttrParsedAttrImpl(RecordKeeper &Records, raw_ostream &OS) {
// Generate all of the custom appertainsTo functions that the attributes
// will be using.
- for (auto I : Attrs) {
+ for (const auto &I : Attrs) {
const Record &Attr = *I.second;
if (Attr.isValueUnset("Subjects"))
continue;
@@ -3965,35 +4556,67 @@ void EmitClangAttrParsedAttrImpl(RecordKeeper &Records, raw_ostream &OS) {
}
OS << "};\n";
}
+
+ std::vector<std::string> ArgNames;
+ for (const auto &Arg : Attr.getValueAsListOfDefs("Args")) {
+ bool UnusedUnset;
+ if (Arg->getValueAsBitOrUnset("Fake", UnusedUnset))
+ continue;
+ ArgNames.push_back(Arg->getValueAsString("Name").str());
+ for (const auto &Class : Arg->getSuperClasses()) {
+ if (Class.first->getName().starts_with("Variadic")) {
+ ArgNames.back().append("...");
+ break;
+ }
+ }
+ }
+ if (!ArgNames.empty()) {
+ OS << "static constexpr const char *" << I->first << "ArgNames[] = {\n";
+ for (const auto &N : ArgNames)
+ OS << '"' << N << "\",";
+ OS << "};\n";
+ }
+
OS << "struct ParsedAttrInfo" << I->first
<< " final : public ParsedAttrInfo {\n";
- OS << " ParsedAttrInfo" << I->first << "() {\n";
- OS << " AttrKind = ParsedAttr::AT_" << AttrName << ";\n";
+ OS << " constexpr ParsedAttrInfo" << I->first << "() : ParsedAttrInfo(\n";
+ OS << " /*AttrKind=*/ParsedAttr::AT_" << AttrName << ",\n";
emitArgInfo(Attr, OS);
- OS << " HasCustomParsing = ";
- OS << Attr.getValueAsBit("HasCustomParsing") << ";\n";
- OS << " IsTargetSpecific = ";
- OS << Attr.isSubClassOf("TargetSpecificAttr") << ";\n";
- OS << " IsType = ";
- OS << (Attr.isSubClassOf("TypeAttr") ||
- Attr.isSubClassOf("DeclOrTypeAttr")) << ";\n";
- OS << " IsStmt = ";
+ OS << " /*HasCustomParsing=*/";
+ OS << Attr.getValueAsBit("HasCustomParsing") << ",\n";
+ OS << " /*AcceptsExprPack=*/";
+ OS << Attr.getValueAsBit("AcceptsExprPack") << ",\n";
+ OS << " /*IsTargetSpecific=*/";
+ OS << Attr.isSubClassOf("TargetSpecificAttr") << ",\n";
+ OS << " /*IsType=*/";
+ OS << (Attr.isSubClassOf("TypeAttr") || Attr.isSubClassOf("DeclOrTypeAttr"))
+ << ",\n";
+ OS << " /*IsStmt=*/";
OS << (Attr.isSubClassOf("StmtAttr") || Attr.isSubClassOf("DeclOrStmtAttr"))
- << ";\n";
- OS << " IsKnownToGCC = ";
- OS << IsKnownToGCC(Attr) << ";\n";
- OS << " IsSupportedByPragmaAttribute = ";
- OS << PragmaAttributeSupport.isAttributedSupported(*I->second) << ";\n";
+ << ",\n";
+ OS << " /*IsKnownToGCC=*/";
+ OS << IsKnownToGCC(Attr) << ",\n";
+ OS << " /*IsSupportedByPragmaAttribute=*/";
+ OS << PragmaAttributeSupport.isAttributedSupported(*I->second) << ",\n";
if (!Spellings.empty())
- OS << " Spellings = " << I->first << "Spellings;\n";
- OS << " }\n";
+ OS << " /*Spellings=*/" << I->first << "Spellings,\n";
+ else
+ OS << " /*Spellings=*/{},\n";
+ if (!ArgNames.empty())
+ OS << " /*ArgNames=*/" << I->first << "ArgNames";
+ else
+ OS << " /*ArgNames=*/{}";
+ OS << ") {}\n";
GenerateAppertainsTo(Attr, OS);
GenerateMutualExclusionsChecks(Attr, Records, OS, MergeDeclOS, MergeStmtOS);
GenerateLangOptRequirements(Attr, OS);
GenerateTargetRequirements(Attr, Dupes, OS);
+ GenerateSpellingTargetRequirements(
+ Attr, Attr.getValueAsListOfDefs("TargetSpecificSpellings"), OS);
GenerateSpellingIndexToSemanticSpelling(Attr, OS);
PragmaAttributeSupport.generateStrictConformsTo(*I->second, OS);
GenerateHandleDeclAttribute(Attr, OS);
+ GenerateIsParamExpr(Attr, OS);
OS << "static const ParsedAttrInfo" << I->first << " Instance;\n";
OS << "};\n";
OS << "const ParsedAttrInfo" << I->first << " ParsedAttrInfo" << I->first
@@ -4006,6 +4629,9 @@ void EmitClangAttrParsedAttrImpl(RecordKeeper &Records, raw_ostream &OS) {
}
OS << "};\n\n";
+ // Generate function for handling attributes with delayed arguments
+ GenerateHandleAttrWithDelayedArgs(Records, OS);
+
// Generate the attribute match rules.
emitAttributeMatchRules(PragmaAttributeSupport, OS);
@@ -4034,11 +4660,11 @@ void EmitClangAttrParsedAttrImpl(RecordKeeper &Records, raw_ostream &OS) {
// Emits the kind list of parsed attributes
void EmitClangAttrParsedAttrKinds(RecordKeeper &Records, raw_ostream &OS) {
- emitSourceFileHeader("Attribute name matcher", OS);
+ emitSourceFileHeader("Attribute name matcher", OS, Records);
std::vector<Record *> Attrs = Records.getAllDerivedDefinitions("Attr");
std::vector<StringMatcher::StringPair> GNU, Declspec, Microsoft, CXX11,
- Keywords, Pragma, C2x;
+ Keywords, Pragma, C23, HLSLSemantic;
std::set<std::string> Seen;
for (const auto *A : Attrs) {
const Record &Attr = *A;
@@ -4060,9 +4686,8 @@ void EmitClangAttrParsedAttrKinds(RecordKeeper &Records, raw_ostream &OS) {
if (Attr.isSubClassOf("TargetSpecificAttr") &&
!Attr.isValueUnset("ParseKind")) {
AttrName = std::string(Attr.getValueAsString("ParseKind"));
- if (Seen.find(AttrName) != Seen.end())
+ if (!Seen.insert(AttrName).second)
continue;
- Seen.insert(AttrName);
} else
AttrName = NormalizeAttrName(StringRef(Attr.getName())).str();
@@ -4076,8 +4701,8 @@ void EmitClangAttrParsedAttrKinds(RecordKeeper &Records, raw_ostream &OS) {
Matches = &CXX11;
if (!S.nameSpace().empty())
Spelling += S.nameSpace() + "::";
- } else if (Variety == "C2x") {
- Matches = &C2x;
+ } else if (Variety == "C23") {
+ Matches = &C23;
if (!S.nameSpace().empty())
Spelling += S.nameSpace() + "::";
} else if (Variety == "GNU")
@@ -4090,6 +4715,8 @@ void EmitClangAttrParsedAttrKinds(RecordKeeper &Records, raw_ostream &OS) {
Matches = &Keywords;
else if (Variety == "Pragma")
Matches = &Pragma;
+ else if (Variety == "HLSLSemantic")
+ Matches = &HLSLSemantic;
assert(Matches && "Unsupported spelling variety found");
@@ -4118,13 +4745,15 @@ void EmitClangAttrParsedAttrKinds(RecordKeeper &Records, raw_ostream &OS) {
StringMatcher("Name", Microsoft, OS).Emit();
OS << " } else if (AttributeCommonInfo::AS_CXX11 == Syntax) {\n";
StringMatcher("Name", CXX11, OS).Emit();
- OS << " } else if (AttributeCommonInfo::AS_C2x == Syntax) {\n";
- StringMatcher("Name", C2x, OS).Emit();
+ OS << " } else if (AttributeCommonInfo::AS_C23 == Syntax) {\n";
+ StringMatcher("Name", C23, OS).Emit();
OS << " } else if (AttributeCommonInfo::AS_Keyword == Syntax || ";
OS << "AttributeCommonInfo::AS_ContextSensitiveKeyword == Syntax) {\n";
StringMatcher("Name", Keywords, OS).Emit();
OS << " } else if (AttributeCommonInfo::AS_Pragma == Syntax) {\n";
StringMatcher("Name", Pragma, OS).Emit();
+ OS << " } else if (AttributeCommonInfo::AS_HLSLSemantic == Syntax) {\n";
+ StringMatcher("Name", HLSLSemantic, OS).Emit();
OS << " }\n";
OS << " return AttributeCommonInfo::UnknownAttribute;\n"
<< "}\n";
@@ -4132,7 +4761,7 @@ void EmitClangAttrParsedAttrKinds(RecordKeeper &Records, raw_ostream &OS) {
// Emits the code to dump an attribute.
void EmitClangAttrTextNodeDump(RecordKeeper &Records, raw_ostream &OS) {
- emitSourceFileHeader("Attribute text node dumper", OS);
+ emitSourceFileHeader("Attribute text node dumper", OS, Records);
std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr"), Args;
for (const auto *Attr : Attrs) {
@@ -4155,6 +4784,9 @@ void EmitClangAttrTextNodeDump(RecordKeeper &Records, raw_ostream &OS) {
for (const auto *Arg : Args)
createArgument(*Arg, R.getName())->writeDump(SS);
+ if (Attr->getValueAsBit("AcceptsExprPack"))
+ VariadicExprArgument("DelayedArgs", R.getName()).writeDump(OS);
+
if (SS.tell()) {
OS << " void Visit" << R.getName() << "Attr(const " << R.getName()
<< "Attr *A) {\n";
@@ -4168,7 +4800,7 @@ void EmitClangAttrTextNodeDump(RecordKeeper &Records, raw_ostream &OS) {
}
void EmitClangAttrNodeTraverse(RecordKeeper &Records, raw_ostream &OS) {
- emitSourceFileHeader("Attribute text node traverser", OS);
+ emitSourceFileHeader("Attribute text node traverser", OS, Records);
std::vector<Record *> Attrs = Records.getAllDerivedDefinitions("Attr"), Args;
for (const auto *Attr : Attrs) {
@@ -4182,6 +4814,8 @@ void EmitClangAttrNodeTraverse(RecordKeeper &Records, raw_ostream &OS) {
Args = R.getValueAsListOfDefs("Args");
for (const auto *Arg : Args)
createArgument(*Arg, R.getName())->writeDumpChildren(SS);
+ if (Attr->getValueAsBit("AcceptsExprPack"))
+ VariadicExprArgument("DelayedArgs", R.getName()).writeDumpChildren(SS);
if (SS.tell()) {
OS << " void Visit" << R.getName() << "Attr(const " << R.getName()
<< "Attr *A) {\n";
@@ -4194,13 +4828,14 @@ void EmitClangAttrNodeTraverse(RecordKeeper &Records, raw_ostream &OS) {
}
}
-void EmitClangAttrParserStringSwitches(RecordKeeper &Records,
- raw_ostream &OS) {
- emitSourceFileHeader("Parser-related llvm::StringSwitch cases", OS);
+void EmitClangAttrParserStringSwitches(RecordKeeper &Records, raw_ostream &OS) {
+ emitSourceFileHeader("Parser-related llvm::StringSwitch cases", OS, Records);
emitClangAttrArgContextList(Records, OS);
emitClangAttrIdentifierArgList(Records, OS);
+ emitClangAttrUnevaluatedStringLiteralList(Records, OS);
emitClangAttrVariadicIdentifierArgList(Records, OS);
emitClangAttrThisIsaIdentifierArgList(Records, OS);
+ emitClangAttrAcceptsExprPack(Records, OS);
emitClangAttrTypeArgList(Records, OS);
emitClangAttrLateParsedList(Records, OS);
}
@@ -4210,16 +4845,36 @@ void EmitClangAttrSubjectMatchRulesParserStringSwitches(RecordKeeper &Records,
getPragmaAttributeSupport(Records).generateParsingHelpers(OS);
}
-enum class SpellingKind {
+void EmitClangAttrDocTable(RecordKeeper &Records, raw_ostream &OS) {
+ emitSourceFileHeader("Clang attribute documentation", OS, Records);
+
+ std::vector<Record *> Attrs = Records.getAllDerivedDefinitions("Attr");
+ for (const auto *A : Attrs) {
+ if (!A->getValueAsBit("ASTNode"))
+ continue;
+ std::vector<Record *> Docs = A->getValueAsListOfDefs("Documentation");
+ assert(!Docs.empty());
+ // Only look at the first documentation if there are several.
+ // (Currently there's only one such attr, revisit if this becomes common).
+ StringRef Text =
+ Docs.front()->getValueAsOptionalString("Content").value_or("");
+ OS << "\nstatic const char AttrDoc_" << A->getName() << "[] = "
+ << "R\"reST(" << Text.trim() << ")reST\";\n";
+ }
+}
+
+enum class SpellingKind : size_t {
GNU,
CXX11,
- C2x,
+ C23,
Declspec,
Microsoft,
Keyword,
Pragma,
+ HLSLSemantic,
+ NumSpellingKinds
};
-static const size_t NumSpellingKinds = (size_t)SpellingKind::Pragma + 1;
+static const size_t NumSpellingKinds = (size_t)SpellingKind::NumSpellingKinds;
class SpellingList {
std::vector<std::string> Spellings[NumSpellingKinds];
@@ -4233,16 +4888,17 @@ public:
SpellingKind Kind = StringSwitch<SpellingKind>(Spelling.variety())
.Case("GNU", SpellingKind::GNU)
.Case("CXX11", SpellingKind::CXX11)
- .Case("C2x", SpellingKind::C2x)
+ .Case("C23", SpellingKind::C23)
.Case("Declspec", SpellingKind::Declspec)
.Case("Microsoft", SpellingKind::Microsoft)
.Case("Keyword", SpellingKind::Keyword)
- .Case("Pragma", SpellingKind::Pragma);
+ .Case("Pragma", SpellingKind::Pragma)
+ .Case("HLSLSemantic", SpellingKind::HLSLSemantic);
std::string Name;
if (!Spelling.nameSpace().empty()) {
switch (Kind) {
case SpellingKind::CXX11:
- case SpellingKind::C2x:
+ case SpellingKind::C23:
Name = Spelling.nameSpace() + "::";
break;
case SpellingKind::Pragma:
@@ -4287,7 +4943,8 @@ static void WriteCategoryHeader(const Record *DocCategory,
static std::pair<std::string, SpellingList>
GetAttributeHeadingAndSpellings(const Record &Documentation,
- const Record &Attribute) {
+ const Record &Attribute,
+ StringRef Cat) {
// FIXME: there is no way to have a per-spelling category for the attribute
// documentation. This may not be a limiting factor since the spellings
// should generally be consistently applied across the category.
@@ -4307,7 +4964,7 @@ GetAttributeHeadingAndSpellings(const Record &Documentation,
else {
std::set<std::string> Uniques;
for (auto I = Spellings.begin(), E = Spellings.end();
- I != E && Uniques.size() <= 1; ++I) {
+ I != E; ++I) {
std::string Spelling =
std::string(NormalizeNameForSpellingComparison(I->name()));
Uniques.insert(Spelling);
@@ -4316,6 +4973,11 @@ GetAttributeHeadingAndSpellings(const Record &Documentation,
// needs.
if (Uniques.size() == 1)
Heading = *Uniques.begin();
+ // If it's in the undocumented category, just construct a header by
+ // concatenating all the spellings. Might not be great, but better than
+ // nothing.
+ else if (Cat == "Undocumented")
+ Heading = llvm::join(Uniques.begin(), Uniques.end(), ", ");
}
}
@@ -4336,10 +4998,12 @@ static void WriteDocumentation(RecordKeeper &Records,
OS << Doc.Heading << "\n" << std::string(Doc.Heading.length(), '-') << "\n";
// List what spelling syntaxes the attribute supports.
+ // Note: "#pragma clang attribute" is handled outside the spelling kinds loop
+ // so it must be last.
OS << ".. csv-table:: Supported Syntaxes\n";
- OS << " :header: \"GNU\", \"C++11\", \"C2x\", \"``__declspec``\",";
- OS << " \"Keyword\", \"``#pragma``\", \"``#pragma clang attribute``\"\n\n";
- OS << " \"";
+ OS << " :header: \"GNU\", \"C++11\", \"C23\", \"``__declspec``\",";
+ OS << " \"Keyword\", \"``#pragma``\", \"HLSL Semantic\", \"``#pragma clang ";
+ OS << "attribute``\"\n\n \"";
for (size_t Kind = 0; Kind != NumSpellingKinds; ++Kind) {
SpellingKind K = (SpellingKind)Kind;
// TODO: List Microsoft (IDL-style attribute) spellings once we fully
@@ -4397,26 +5061,32 @@ void EmitClangAttrDocs(RecordKeeper &Records, raw_ostream &OS) {
// Gather the Documentation lists from each of the attributes, based on the
// category provided.
std::vector<Record *> Attrs = Records.getAllDerivedDefinitions("Attr");
- std::map<const Record *, std::vector<DocumentationData>> SplitDocs;
+ struct CategoryLess {
+ bool operator()(const Record *L, const Record *R) const {
+ return L->getValueAsString("Name") < R->getValueAsString("Name");
+ }
+ };
+ std::map<const Record *, std::vector<DocumentationData>, CategoryLess>
+ SplitDocs;
for (const auto *A : Attrs) {
const Record &Attr = *A;
std::vector<Record *> Docs = Attr.getValueAsListOfDefs("Documentation");
for (const auto *D : Docs) {
const Record &Doc = *D;
const Record *Category = Doc.getValueAsDef("Category");
- // If the category is "undocumented", then there cannot be any other
- // documentation categories (otherwise, the attribute would become
- // documented).
+ // If the category is "InternalOnly", then there cannot be any other
+ // documentation categories (otherwise, the attribute would be
+ // emitted into the docs).
const StringRef Cat = Category->getValueAsString("Name");
- bool Undocumented = Cat == "Undocumented";
- if (Undocumented && Docs.size() > 1)
+ bool InternalOnly = Cat == "InternalOnly";
+ if (InternalOnly && Docs.size() > 1)
PrintFatalError(Doc.getLoc(),
- "Attribute is \"Undocumented\", but has multiple "
+ "Attribute is \"InternalOnly\", but has multiple "
"documentation categories");
- if (!Undocumented)
+ if (!InternalOnly)
SplitDocs[Category].push_back(DocumentationData(
- Doc, Attr, GetAttributeHeadingAndSpellings(Doc, Attr)));
+ Doc, Attr, GetAttributeHeadingAndSpellings(Doc, Attr, Cat)));
}
}
diff --git a/contrib/llvm-project/clang/utils/TableGen/ClangCommentCommandInfoEmitter.cpp b/contrib/llvm-project/clang/utils/TableGen/ClangCommentCommandInfoEmitter.cpp
index eb2f23191c55..a113b02e1999 100644
--- a/contrib/llvm-project/clang/utils/TableGen/ClangCommentCommandInfoEmitter.cpp
+++ b/contrib/llvm-project/clang/utils/TableGen/ClangCommentCommandInfoEmitter.cpp
@@ -20,9 +20,10 @@
using namespace llvm;
-void clang::EmitClangCommentCommandInfo(RecordKeeper &Records, raw_ostream &OS) {
- emitSourceFileHeader("A list of commands useable in documentation "
- "comments", OS);
+void clang::EmitClangCommentCommandInfo(RecordKeeper &Records,
+ raw_ostream &OS) {
+ emitSourceFileHeader("A list of commands useable in documentation comments",
+ OS, Records);
OS << "namespace {\n"
"const CommandInfo Commands[] = {\n";
@@ -83,6 +84,12 @@ static std::string MangleName(StringRef Str) {
default:
Mangled += Str[i];
break;
+ case '(':
+ Mangled += "lparen";
+ break;
+ case ')':
+ Mangled += "rparen";
+ break;
case '[':
Mangled += "lsquare";
break;
@@ -106,9 +113,10 @@ static std::string MangleName(StringRef Str) {
return Mangled;
}
-void clang::EmitClangCommentCommandList(RecordKeeper &Records, raw_ostream &OS) {
- emitSourceFileHeader("A list of commands useable in documentation "
- "comments", OS);
+void clang::EmitClangCommentCommandList(RecordKeeper &Records,
+ raw_ostream &OS) {
+ emitSourceFileHeader("A list of commands useable in documentation comments",
+ OS, Records);
OS << "#ifndef COMMENT_COMMAND\n"
<< "# define COMMENT_COMMAND(NAME)\n"
diff --git a/contrib/llvm-project/clang/utils/TableGen/ClangCommentHTMLNamedCharacterReferenceEmitter.cpp b/contrib/llvm-project/clang/utils/TableGen/ClangCommentHTMLNamedCharacterReferenceEmitter.cpp
index 15671a99a3fc..f1cd9af0519d 100644
--- a/contrib/llvm-project/clang/utils/TableGen/ClangCommentHTMLNamedCharacterReferenceEmitter.cpp
+++ b/contrib/llvm-project/clang/utils/TableGen/ClangCommentHTMLNamedCharacterReferenceEmitter.cpp
@@ -66,12 +66,12 @@ void clang::EmitClangCommentHTMLNamedCharacterReferences(RecordKeeper &Records,
}
CLiteral.append(";");
- StringMatcher::StringPair Match(Spelling, std::string(CLiteral.str()));
+ StringMatcher::StringPair Match(Spelling, std::string(CLiteral));
NameToUTF8.push_back(Match);
}
- emitSourceFileHeader("HTML named character reference to UTF-8 "
- "translation", OS);
+ emitSourceFileHeader("HTML named character reference to UTF-8 translation",
+ OS, Records);
OS << "StringRef translateHTMLNamedCharacterReferenceToUTF8(\n"
" StringRef Name) {\n";
diff --git a/contrib/llvm-project/clang/utils/TableGen/ClangCommentHTMLTagsEmitter.cpp b/contrib/llvm-project/clang/utils/TableGen/ClangCommentHTMLTagsEmitter.cpp
index 78bbbd1cba57..3dc1098753e0 100644
--- a/contrib/llvm-project/clang/utils/TableGen/ClangCommentHTMLTagsEmitter.cpp
+++ b/contrib/llvm-project/clang/utils/TableGen/ClangCommentHTMLTagsEmitter.cpp
@@ -27,7 +27,7 @@ void clang::EmitClangCommentHTMLTags(RecordKeeper &Records, raw_ostream &OS) {
"return true;");
}
- emitSourceFileHeader("HTML tag name matcher", OS);
+ emitSourceFileHeader("HTML tag name matcher", OS, Records);
OS << "bool isHTMLTagName(StringRef Name) {\n";
StringMatcher("Name", Matches, OS).Emit();
@@ -49,7 +49,7 @@ void clang::EmitClangCommentHTMLTagsProperties(RecordKeeper &Records,
MatchesEndTagForbidden.push_back(Match);
}
- emitSourceFileHeader("HTML tag properties", OS);
+ emitSourceFileHeader("HTML tag properties", OS, Records);
OS << "bool isHTMLEndTagOptional(StringRef Name) {\n";
StringMatcher("Name", MatchesEndTagOptional, OS).Emit();
@@ -61,4 +61,3 @@ void clang::EmitClangCommentHTMLTagsProperties(RecordKeeper &Records,
OS << " return false;\n"
<< "}\n\n";
}
-
diff --git a/contrib/llvm-project/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp b/contrib/llvm-project/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp
index 014c1adcd809..480c7c83f5f8 100644
--- a/contrib/llvm-project/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp
+++ b/contrib/llvm-project/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp
@@ -12,7 +12,6 @@
#include "TableGenBackends.h"
#include "llvm/ADT/DenseSet.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
@@ -30,6 +29,7 @@
#include <cctype>
#include <functional>
#include <map>
+#include <optional>
#include <set>
using namespace llvm;
@@ -129,13 +129,14 @@ namespace {
};
struct GroupInfo {
+ llvm::StringRef GroupName;
std::vector<const Record*> DiagsInGroup;
std::vector<std::string> SubGroups;
- unsigned IDNo;
+ unsigned IDNo = 0;
llvm::SmallVector<const Record *, 1> Defs;
- GroupInfo() : IDNo(0) {}
+ GroupInfo() = default;
};
} // end anonymous namespace.
@@ -174,6 +175,7 @@ static void groupDiagnostics(const std::vector<Record*> &Diags,
Record *Group = DiagGroups[i];
GroupInfo &GI =
DiagsInGroup[std::string(Group->getValueAsString("GroupName"))];
+ GI.GroupName = Group->getName();
GI.Defs.push_back(Group);
std::vector<Record*> SubGroups = Group->getValueAsListOfDefs("SubGroups");
@@ -248,8 +250,9 @@ typedef llvm::PointerUnion<RecordVec*, RecordSet*> VecOrSet;
namespace {
class InferPedantic {
- typedef llvm::DenseMap<const Record*,
- std::pair<unsigned, Optional<unsigned> > > GMap;
+ typedef llvm::DenseMap<const Record *,
+ std::pair<unsigned, std::optional<unsigned>>>
+ GMap;
DiagGroupParentMap &DiagGroupParents;
const std::vector<Record*> &Diags;
@@ -323,7 +326,7 @@ bool InferPedantic::isOffByDefault(const Record *Diag) {
bool InferPedantic::groupInPedantic(const Record *Group, bool increment) {
GMap::mapped_type &V = GroupCount[Group];
// Lazily compute the threshold value for the group count.
- if (!V.second.hasValue()) {
+ if (!V.second) {
const GroupInfo &GI =
DiagsInGroup[std::string(Group->getValueAsString("GroupName"))];
V.second = GI.SubGroups.size() + GI.DiagsInGroup.size();
@@ -335,7 +338,7 @@ bool InferPedantic::groupInPedantic(const Record *Group, bool increment) {
// Consider a group in -Wpendatic IFF if has at least one diagnostic
// or subgroup AND all of those diagnostics and subgroups are covered
// by -Wpedantic via our computation.
- return V.first != 0 && V.first == V.second.getValue();
+ return V.first != 0 && V.first == *V.second;
}
void InferPedantic::markGroup(const Record *Group) {
@@ -402,17 +405,14 @@ void InferPedantic::compute(VecOrSet DiagsInPedantic,
if (!groupInPedantic(Group))
continue;
- unsigned ParentsInPedantic = 0;
const std::vector<Record*> &Parents = DiagGroupParents.getParents(Group);
- for (unsigned j = 0, ej = Parents.size(); j != ej; ++j) {
- if (groupInPedantic(Parents[j]))
- ++ParentsInPedantic;
- }
+ bool AllParentsInPedantic =
+ llvm::all_of(Parents, [&](Record *R) { return groupInPedantic(R); });
// If all the parents are in -Wpedantic, this means that this diagnostic
// group will be indirectly included by -Wpedantic already. In that
// case, do not add it directly to -Wpedantic. If the group has no
// parents, obviously it should go into -Wpedantic.
- if (Parents.size() > 0 && ParentsInPedantic == Parents.size())
+ if (Parents.size() > 0 && AllParentsInPedantic)
continue;
if (RecordVec *V = GroupsInPedantic.dyn_cast<RecordVec*>())
@@ -614,7 +614,7 @@ struct DiagnosticTextBuilder {
return It->second.Root;
}
- LLVM_ATTRIBUTE_NORETURN void PrintFatalError(llvm::Twine const &Msg) const {
+ [[noreturn]] void PrintFatalError(llvm::Twine const &Msg) const {
assert(EvaluatingRecord && "not evaluating a record?");
llvm::PrintFatalError(EvaluatingRecord->getLoc(), Msg);
}
@@ -653,6 +653,14 @@ private:
Root(O.Root) {
O.Root = nullptr;
}
+ // The move assignment operator is defined as deleted pending further
+ // motivation.
+ DiagText &operator=(DiagText &&) = delete;
+
+ // The copy constrcutor and copy assignment operator is defined as deleted
+ // pending further motivation.
+ DiagText(const DiagText &) = delete;
+ DiagText &operator=(const DiagText &) = delete;
~DiagText() {
for (Piece *P : AllocatedPieces)
@@ -676,7 +684,7 @@ private:
};
template <class Derived> struct DiagTextVisitor {
- using ModifierMappingsType = Optional<std::vector<int>>;
+ using ModifierMappingsType = std::optional<std::vector<int>>;
private:
Derived &getDerived() { return static_cast<Derived &>(*this); }
@@ -707,7 +715,7 @@ public:
private:
DiagTextVisitor &Visitor;
- Optional<std::vector<int>> OldMappings;
+ std::optional<std::vector<int>> OldMappings;
public:
Piece *Substitution;
@@ -1166,7 +1174,7 @@ std::vector<std::string>
DiagnosticTextBuilder::buildForDocumentation(StringRef Severity,
const Record *R) {
EvaluatingRecordGuard Guard(&EvaluatingRecord, R);
- StringRef Text = R->getValueAsString("Text");
+ StringRef Text = R->getValueAsString("Summary");
DiagText D(*this, Text);
TextPiece *Prefix = D.New<TextPiece>(Severity, Severity);
@@ -1185,7 +1193,7 @@ DiagnosticTextBuilder::buildForDocumentation(StringRef Severity,
std::string DiagnosticTextBuilder::buildForDefinition(const Record *R) {
EvaluatingRecordGuard Guard(&EvaluatingRecord, R);
- StringRef Text = R->getValueAsString("Text");
+ StringRef Text = R->getValueAsString("Summary");
DiagText D(*this, Text);
std::string Result;
DiagTextPrinter{*this, Result}.Visit(D.Root);
@@ -1279,8 +1287,8 @@ void clang::EmitClangDiagsDefs(RecordKeeper &Records, raw_ostream &OS,
OS << ", \"";
OS.write_escaped(DiagTextBuilder.buildForDefinition(&R)) << '"';
- // Warning associated with the diagnostic. This is stored as an index into
- // the alphabetically sorted warning table.
+ // Warning group associated with the diagnostic. This is stored as an index
+ // into the alphabetically sorted warning group table.
if (DefInit *DI = dyn_cast<DefInit>(R.getValueInit("Group"))) {
std::map<std::string, GroupInfo>::iterator I = DiagsInGroup.find(
std::string(DI->getDef()->getValueAsString("GroupName")));
@@ -1309,6 +1317,11 @@ void clang::EmitClangDiagsDefs(RecordKeeper &Records, raw_ostream &OS,
else
OS << ", false";
+ if (R.getValueAsBit("ShowInSystemMacro"))
+ OS << ", true";
+ else
+ OS << ", false";
+
if (R.getValueAsBit("Deferrable"))
OS << ", true";
else
@@ -1330,7 +1343,7 @@ static std::string getDiagCategoryEnum(llvm::StringRef name) {
SmallString<256> enumName = llvm::StringRef("DiagCat_");
for (llvm::StringRef::iterator I = name.begin(), E = name.end(); I != E; ++I)
enumName += isalnum(*I) ? *I : '_';
- return std::string(enumName.str());
+ return std::string(enumName);
}
/// Emit the array of diagnostic subgroups.
@@ -1487,18 +1500,20 @@ static void emitDiagTable(std::map<std::string, GroupInfo> &DiagsInGroup,
for (auto const &I: DiagsInGroup)
MaxLen = std::max(MaxLen, (unsigned)I.first.size());
- OS << "\n#ifdef GET_DIAG_TABLE\n";
+ OS << "\n#ifdef DIAG_ENTRY\n";
unsigned SubGroupIndex = 1, DiagArrayIndex = 1;
for (auto const &I: DiagsInGroup) {
// Group option string.
- OS << " { /* ";
+ OS << "DIAG_ENTRY(";
+ OS << I.second.GroupName << " /* ";
+
if (I.first.find_first_not_of("abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"0123456789!@#$%^*-+=:?") !=
std::string::npos)
PrintFatalError("Invalid character in diagnostic group '" + I.first +
"'");
- OS << I.first << " */ " << std::string(MaxLen - I.first.size(), ' ');
+ OS << I.first << " */, ";
// Store a pascal-style length byte at the beginning of the string.
std::string Name = char(I.first.size()) + I.first;
OS << GroupNames.GetOrAddStringOffset(Name, false) << ", ";
@@ -1517,7 +1532,7 @@ static void emitDiagTable(std::map<std::string, GroupInfo> &DiagsInGroup,
DiagArrayIndex += DiagsInPedantic.size();
DiagArrayIndex += V.size() + 1;
} else {
- OS << "/* Empty */ 0, ";
+ OS << "0, ";
}
// Subgroups.
@@ -1525,17 +1540,25 @@ static void emitDiagTable(std::map<std::string, GroupInfo> &DiagsInGroup,
const bool hasSubGroups =
!SubGroups.empty() || (IsPedantic && !GroupsInPedantic.empty());
if (hasSubGroups) {
- OS << "/* DiagSubGroup" << I.second.IDNo << " */ " << SubGroupIndex;
+ OS << "/* DiagSubGroup" << I.second.IDNo << " */ " << SubGroupIndex
+ << ", ";
if (IsPedantic)
SubGroupIndex += GroupsInPedantic.size();
SubGroupIndex += SubGroups.size() + 1;
} else {
- OS << "/* Empty */ 0";
+ OS << "0, ";
}
- OS << " },\n";
+ std::string Documentation = I.second.Defs.back()
+ ->getValue("Documentation")
+ ->getValue()
+ ->getAsUnquotedString();
+
+ OS << "R\"(" << StringRef(Documentation).trim() << ")\"";
+
+ OS << ")\n";
}
- OS << "#endif // GET_DIAG_TABLE\n\n";
+ OS << "#endif // DIAG_ENTRY\n\n";
}
/// Emit the table of diagnostic categories.
@@ -1688,7 +1711,7 @@ void writeHeader(StringRef Str, raw_ostream &OS, char Kind = '-') {
void writeDiagnosticText(DiagnosticTextBuilder &Builder, const Record *R,
StringRef Role, raw_ostream &OS) {
- StringRef Text = R->getValueAsString("Text");
+ StringRef Text = R->getValueAsString("Summary");
if (Text == "%0")
OS << "The text of this diagnostic is not controlled by Clang.\n\n";
else {
diff --git a/contrib/llvm-project/clang/utils/TableGen/ClangOpcodesEmitter.cpp b/contrib/llvm-project/clang/utils/TableGen/ClangOpcodesEmitter.cpp
index ffeedcdf0ee2..db88c990d5f9 100644
--- a/contrib/llvm-project/clang/utils/TableGen/ClangOpcodesEmitter.cpp
+++ b/contrib/llvm-project/clang/utils/TableGen/ClangOpcodesEmitter.cpp
@@ -21,7 +21,7 @@ using namespace llvm;
namespace {
class ClangOpcodesEmitter {
RecordKeeper &Records;
- Record Root;
+ const Record Root;
unsigned NumTypes;
public:
@@ -34,33 +34,32 @@ public:
private:
/// Emits the opcode name for the opcode enum.
/// The name is obtained by concatenating the name with the list of types.
- void EmitEnum(raw_ostream &OS, StringRef N, Record *R);
+ void EmitEnum(raw_ostream &OS, StringRef N, const Record *R);
/// Emits the switch case and the invocation in the interpreter.
- void EmitInterp(raw_ostream &OS, StringRef N, Record *R);
+ void EmitInterp(raw_ostream &OS, StringRef N, const Record *R);
/// Emits the disassembler.
- void EmitDisasm(raw_ostream &OS, StringRef N, Record *R);
+ void EmitDisasm(raw_ostream &OS, StringRef N, const Record *R);
/// Emits the byte code emitter method.
- void EmitEmitter(raw_ostream &OS, StringRef N, Record *R);
+ void EmitEmitter(raw_ostream &OS, StringRef N, const Record *R);
/// Emits the prototype.
- void EmitProto(raw_ostream &OS, StringRef N, Record *R);
+ void EmitProto(raw_ostream &OS, StringRef N, const Record *R);
/// Emits the prototype to dispatch from a type.
- void EmitGroup(raw_ostream &OS, StringRef N, Record *R);
+ void EmitGroup(raw_ostream &OS, StringRef N, const Record *R);
/// Emits the evaluator method.
- void EmitEval(raw_ostream &OS, StringRef N, Record *R);
+ void EmitEval(raw_ostream &OS, StringRef N, const Record *R);
- void PrintTypes(raw_ostream &OS, ArrayRef<Record *> Types);
+ void PrintTypes(raw_ostream &OS, ArrayRef<const Record *> Types);
};
-void Enumerate(const Record *R,
- StringRef N,
- std::function<void(ArrayRef<Record *>, Twine)> &&F) {
- llvm::SmallVector<Record *, 2> TypePath;
+void Enumerate(const Record *R, StringRef N,
+ std::function<void(ArrayRef<const Record *>, Twine)> &&F) {
+ llvm::SmallVector<const Record *, 2> TypePath;
auto *Types = R->getValueAsListInit("Types");
std::function<void(size_t, const Twine &)> Rec;
@@ -102,67 +101,80 @@ void ClangOpcodesEmitter::run(raw_ostream &OS) {
}
}
-void ClangOpcodesEmitter::EmitEnum(raw_ostream &OS, StringRef N, Record *R) {
+void ClangOpcodesEmitter::EmitEnum(raw_ostream &OS, StringRef N,
+ const Record *R) {
OS << "#ifdef GET_OPCODE_NAMES\n";
- Enumerate(R, N, [&OS](ArrayRef<Record *>, const Twine &ID) {
+ Enumerate(R, N, [&OS](ArrayRef<const Record *>, const Twine &ID) {
OS << "OP_" << ID << ",\n";
});
OS << "#endif\n";
}
-void ClangOpcodesEmitter::EmitInterp(raw_ostream &OS, StringRef N, Record *R) {
+void ClangOpcodesEmitter::EmitInterp(raw_ostream &OS, StringRef N,
+ const Record *R) {
OS << "#ifdef GET_INTERP\n";
- Enumerate(R, N, [this, R, &OS, &N](ArrayRef<Record *> TS, const Twine &ID) {
- bool CanReturn = R->getValueAsBit("CanReturn");
- bool ChangesPC = R->getValueAsBit("ChangesPC");
- auto Args = R->getValueAsListOfDefs("Args");
-
- OS << "case OP_" << ID << ": {\n";
-
- // Emit calls to read arguments.
- for (size_t I = 0, N = Args.size(); I < N; ++I) {
- OS << " auto V" << I;
- OS << " = ";
- OS << "PC.read<" << Args[I]->getValueAsString("Name") << ">();\n";
- }
-
- // Emit a call to the template method and pass arguments.
- OS << " if (!" << N;
- PrintTypes(OS, TS);
- OS << "(S";
- if (ChangesPC)
- OS << ", PC";
- else
- OS << ", OpPC";
- if (CanReturn)
- OS << ", Result";
- for (size_t I = 0, N = Args.size(); I < N; ++I)
- OS << ", V" << I;
- OS << "))\n";
- OS << " return false;\n";
-
- // Bail out if interpreter returned.
- if (CanReturn) {
- OS << " if (!S.Current || S.Current->isRoot())\n";
- OS << " return true;\n";
- }
-
- OS << " continue;\n";
- OS << "}\n";
- });
+ Enumerate(R, N,
+ [this, R, &OS, &N](ArrayRef<const Record *> TS, const Twine &ID) {
+ bool CanReturn = R->getValueAsBit("CanReturn");
+ bool ChangesPC = R->getValueAsBit("ChangesPC");
+ auto Args = R->getValueAsListOfDefs("Args");
+
+ OS << "case OP_" << ID << ": {\n";
+
+ if (CanReturn)
+ OS << " bool DoReturn = (S.Current == StartFrame);\n";
+
+ // Emit calls to read arguments.
+ for (size_t I = 0, N = Args.size(); I < N; ++I) {
+ OS << " auto V" << I;
+ OS << " = ";
+ OS << "ReadArg<" << Args[I]->getValueAsString("Name")
+ << ">(S, PC);\n";
+ }
+
+ // Emit a call to the template method and pass arguments.
+ OS << " if (!" << N;
+ PrintTypes(OS, TS);
+ OS << "(S";
+ if (ChangesPC)
+ OS << ", PC";
+ else
+ OS << ", OpPC";
+ if (CanReturn)
+ OS << ", Result";
+ for (size_t I = 0, N = Args.size(); I < N; ++I)
+ OS << ", V" << I;
+ OS << "))\n";
+ OS << " return false;\n";
+
+ // Bail out if interpreter returned.
+ if (CanReturn) {
+ OS << " if (!S.Current || S.Current->isRoot())\n";
+ OS << " return true;\n";
+
+ OS << " if (DoReturn)\n";
+ OS << " return true;\n";
+ }
+
+ OS << " continue;\n";
+ OS << "}\n";
+ });
OS << "#endif\n";
}
-void ClangOpcodesEmitter::EmitDisasm(raw_ostream &OS, StringRef N, Record *R) {
+void ClangOpcodesEmitter::EmitDisasm(raw_ostream &OS, StringRef N,
+ const Record *R) {
OS << "#ifdef GET_DISASM\n";
- Enumerate(R, N, [R, &OS](ArrayRef<Record *>, const Twine &ID) {
+ Enumerate(R, N, [R, &OS](ArrayRef<const Record *>, const Twine &ID) {
OS << "case OP_" << ID << ":\n";
OS << " PrintName(\"" << ID << "\");\n";
OS << " OS << \"\\t\"";
- for (auto *Arg : R->getValueAsListOfDefs("Args"))
- OS << " << PC.read<" << Arg->getValueAsString("Name") << ">() << \" \"";
+ for (auto *Arg : R->getValueAsListOfDefs("Args")) {
+ OS << " << ReadArg<" << Arg->getValueAsString("Name") << ">(P, PC)";
+ OS << " << \" \"";
+ }
OS << " << \"\\n\";\n";
OS << " continue;\n";
@@ -170,12 +182,13 @@ void ClangOpcodesEmitter::EmitDisasm(raw_ostream &OS, StringRef N, Record *R) {
OS << "#endif\n";
}
-void ClangOpcodesEmitter::EmitEmitter(raw_ostream &OS, StringRef N, Record *R) {
+void ClangOpcodesEmitter::EmitEmitter(raw_ostream &OS, StringRef N,
+ const Record *R) {
if (R->getValueAsBit("HasCustomLink"))
return;
OS << "#ifdef GET_LINK_IMPL\n";
- Enumerate(R, N, [R, &OS](ArrayRef<Record *>, const Twine &ID) {
+ Enumerate(R, N, [R, &OS](ArrayRef<const Record *>, const Twine &ID) {
auto Args = R->getValueAsListOfDefs("Args");
// Emit the list of arguments.
@@ -200,10 +213,11 @@ void ClangOpcodesEmitter::EmitEmitter(raw_ostream &OS, StringRef N, Record *R) {
OS << "#endif\n";
}
-void ClangOpcodesEmitter::EmitProto(raw_ostream &OS, StringRef N, Record *R) {
+void ClangOpcodesEmitter::EmitProto(raw_ostream &OS, StringRef N,
+ const Record *R) {
OS << "#if defined(GET_EVAL_PROTO) || defined(GET_LINK_PROTO)\n";
auto Args = R->getValueAsListOfDefs("Args");
- Enumerate(R, N, [&OS, &Args](ArrayRef<Record *> TS, const Twine &ID) {
+ Enumerate(R, N, [&OS, &Args](ArrayRef<const Record *> TS, const Twine &ID) {
OS << "bool emit" << ID << "(";
for (auto *Arg : Args)
OS << Arg->getValueAsString("Name") << ", ";
@@ -231,16 +245,19 @@ void ClangOpcodesEmitter::EmitProto(raw_ostream &OS, StringRef N, Record *R) {
OS << "#endif\n";
}
-void ClangOpcodesEmitter::EmitGroup(raw_ostream &OS, StringRef N, Record *R) {
+void ClangOpcodesEmitter::EmitGroup(raw_ostream &OS, StringRef N,
+ const Record *R) {
if (!R->getValueAsBit("HasGroup"))
return;
auto *Types = R->getValueAsListInit("Types");
auto Args = R->getValueAsListOfDefs("Args");
+ Twine EmitFuncName = "emit" + N;
+
// Emit the prototype of the group emitter in the header.
OS << "#if defined(GET_EVAL_PROTO) || defined(GET_LINK_PROTO)\n";
- OS << "bool emit" << N << "(";
+ OS << "bool " << EmitFuncName << "(";
for (size_t I = 0, N = Types->size(); I < N; ++I)
OS << "PrimType, ";
for (auto *Arg : Args)
@@ -256,7 +273,7 @@ void ClangOpcodesEmitter::EmitGroup(raw_ostream &OS, StringRef N, Record *R) {
OS << "#else\n";
OS << "ByteCodeEmitter\n";
OS << "#endif\n";
- OS << "::emit" << N << "(";
+ OS << "::" << EmitFuncName << "(";
for (size_t I = 0, N = Types->size(); I < N; ++I)
OS << "PrimType T" << I << ", ";
for (size_t I = 0, N = Args.size(); I < N; ++I)
@@ -264,8 +281,9 @@ void ClangOpcodesEmitter::EmitGroup(raw_ostream &OS, StringRef N, Record *R) {
OS << "const SourceInfo &I) {\n";
std::function<void(size_t, const Twine &)> Rec;
- llvm::SmallVector<Record *, 2> TS;
- Rec = [this, &Rec, &OS, Types, &Args, R, &TS, N](size_t I, const Twine &ID) {
+ llvm::SmallVector<const Record *, 2> TS;
+ Rec = [this, &Rec, &OS, Types, &Args, R, &TS, N,
+ EmitFuncName](size_t I, const Twine &ID) {
if (I >= Types->size()) {
// Print a call to the emitter method.
// Custom evaluator methods dispatch to template methods.
@@ -301,7 +319,8 @@ void ClangOpcodesEmitter::EmitGroup(raw_ostream &OS, StringRef N, Record *R) {
}
// Emit a default case if not all types are present.
if (Cases.size() < NumTypes)
- OS << " default: llvm_unreachable(\"invalid type\");\n";
+ OS << " default: llvm_unreachable(\"invalid type: " << EmitFuncName
+ << "\");\n";
OS << " }\n";
OS << " llvm_unreachable(\"invalid enum value\");\n";
} else {
@@ -314,34 +333,37 @@ void ClangOpcodesEmitter::EmitGroup(raw_ostream &OS, StringRef N, Record *R) {
OS << "#endif\n";
}
-void ClangOpcodesEmitter::EmitEval(raw_ostream &OS, StringRef N, Record *R) {
+void ClangOpcodesEmitter::EmitEval(raw_ostream &OS, StringRef N,
+ const Record *R) {
if (R->getValueAsBit("HasCustomEval"))
return;
OS << "#ifdef GET_EVAL_IMPL\n";
- Enumerate(R, N, [this, R, &N, &OS](ArrayRef<Record *> TS, const Twine &ID) {
- auto Args = R->getValueAsListOfDefs("Args");
-
- OS << "bool EvalEmitter::emit" << ID << "(";
- for (size_t I = 0, N = Args.size(); I < N; ++I)
- OS << Args[I]->getValueAsString("Name") << " A" << I << ", ";
- OS << "const SourceInfo &L) {\n";
- OS << " if (!isActive()) return true;\n";
- OS << " CurrentSource = L;\n";
-
- OS << " return " << N;
- PrintTypes(OS, TS);
- OS << "(S, OpPC";
- for (size_t I = 0, N = Args.size(); I < N; ++I)
- OS << ", A" << I;
- OS << ");\n";
- OS << "}\n";
- });
+ Enumerate(R, N,
+ [this, R, &N, &OS](ArrayRef<const Record *> TS, const Twine &ID) {
+ auto Args = R->getValueAsListOfDefs("Args");
+
+ OS << "bool EvalEmitter::emit" << ID << "(";
+ for (size_t I = 0, N = Args.size(); I < N; ++I)
+ OS << Args[I]->getValueAsString("Name") << " A" << I << ", ";
+ OS << "const SourceInfo &L) {\n";
+ OS << " if (!isActive()) return true;\n";
+ OS << " CurrentSource = L;\n";
+
+ OS << " return " << N;
+ PrintTypes(OS, TS);
+ OS << "(S, OpPC";
+ for (size_t I = 0, N = Args.size(); I < N; ++I)
+ OS << ", A" << I;
+ OS << ");\n";
+ OS << "}\n";
+ });
OS << "#endif\n";
}
-void ClangOpcodesEmitter::PrintTypes(raw_ostream &OS, ArrayRef<Record *> Types) {
+void ClangOpcodesEmitter::PrintTypes(raw_ostream &OS,
+ ArrayRef<const Record *> Types) {
if (Types.empty())
return;
OS << "<";
diff --git a/contrib/llvm-project/clang/utils/TableGen/ClangOpenCLBuiltinEmitter.cpp b/contrib/llvm-project/clang/utils/TableGen/ClangOpenCLBuiltinEmitter.cpp
index a4cb5b7cacd9..968b3e0661a8 100644
--- a/contrib/llvm-project/clang/utils/TableGen/ClangOpenCLBuiltinEmitter.cpp
+++ b/contrib/llvm-project/clang/utils/TableGen/ClangOpenCLBuiltinEmitter.cpp
@@ -17,6 +17,7 @@
#include "TableGenBackends.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringMap.h"
@@ -233,19 +234,18 @@ private:
MapVector<BuiltinIndexListTy *, BuiltinTableEntries> SignatureListMap;
};
-// OpenCL builtin test generator. This class processes the same TableGen input
-// as BuiltinNameEmitter, but generates a .cl file that contains a call to each
-// builtin function described in the .td input.
-class OpenCLBuiltinTestEmitter {
+/// Base class for emitting a file (e.g. header or test) from OpenCLBuiltins.td
+class OpenCLBuiltinFileEmitterBase {
public:
- OpenCLBuiltinTestEmitter(RecordKeeper &Records, raw_ostream &OS)
+ OpenCLBuiltinFileEmitterBase(RecordKeeper &Records, raw_ostream &OS)
: Records(Records), OS(OS) {}
+ virtual ~OpenCLBuiltinFileEmitterBase() = default;
// Entrypoint to generate the functions for testing all OpenCL builtin
// functions.
- void emit();
+ virtual void emit() = 0;
-private:
+protected:
struct TypeFlags {
TypeFlags() : IsConst(false), IsVolatile(false), IsPointer(false) {}
bool IsConst : 1;
@@ -282,6 +282,27 @@ private:
expandTypesInSignature(const std::vector<Record *> &Signature,
SmallVectorImpl<SmallVector<std::string, 2>> &Types);
+ // Emit extension enabling pragmas.
+ void emitExtensionSetup();
+
+ // Emit an #if guard for a Builtin's extension. Return the corresponding
+ // closing #endif, or an empty string if no extension #if guard was emitted.
+ std::string emitExtensionGuard(const Record *Builtin);
+
+ // Emit an #if guard for a Builtin's language version. Return the
+ // corresponding closing #endif, or an empty string if no version #if guard
+ // was emitted.
+ std::string emitVersionGuard(const Record *Builtin);
+
+ // Emit an #if guard for all type extensions required for the given type
+ // strings. Return the corresponding closing #endif, or an empty string
+ // if no extension #if guard was emitted.
+ StringRef
+ emitTypeExtensionGuards(const SmallVectorImpl<std::string> &Signature);
+
+ // Map type strings to type extensions (e.g. "half2" -> "cl_khr_fp16").
+ StringMap<StringRef> TypeExtMap;
+
// Contains OpenCL builtin functions and related information, stored as
// Record instances. They are coming from the associated TableGen file.
RecordKeeper &Records;
@@ -290,10 +311,35 @@ private:
raw_ostream &OS;
};
+// OpenCL builtin test generator. This class processes the same TableGen input
+// as BuiltinNameEmitter, but generates a .cl file that contains a call to each
+// builtin function described in the .td input.
+class OpenCLBuiltinTestEmitter : public OpenCLBuiltinFileEmitterBase {
+public:
+ OpenCLBuiltinTestEmitter(RecordKeeper &Records, raw_ostream &OS)
+ : OpenCLBuiltinFileEmitterBase(Records, OS) {}
+
+ // Entrypoint to generate the functions for testing all OpenCL builtin
+ // functions.
+ void emit() override;
+};
+
+// OpenCL builtin header generator. This class processes the same TableGen
+// input as BuiltinNameEmitter, but generates a .h file that contains a
+// prototype for each builtin function described in the .td input.
+class OpenCLBuiltinHeaderEmitter : public OpenCLBuiltinFileEmitterBase {
+public:
+ OpenCLBuiltinHeaderEmitter(RecordKeeper &Records, raw_ostream &OS)
+ : OpenCLBuiltinFileEmitterBase(Records, OS) {}
+
+ // Entrypoint to generate the header.
+ void emit() override;
+};
+
} // namespace
void BuiltinNameEmitter::Emit() {
- emitSourceFileHeader("OpenCL Builtin handling", OS);
+ emitSourceFileHeader("OpenCL Builtin handling", OS, Records);
OS << "#include \"llvm/ADT/StringRef.h\"\n";
OS << "using namespace clang;\n\n";
@@ -323,7 +369,7 @@ void BuiltinNameEmitter::ExtractEnumTypes(std::vector<Record *> &Types,
raw_string_ostream SS(Output);
for (const auto *T : Types) {
- if (TypesSeen.find(T->getValueAsString("Name")) == TypesSeen.end()) {
+ if (!TypesSeen.contains(T->getValueAsString("Name"))) {
SS << " OCLT_" + T->getValueAsString("Name") << ",\n";
// Save the type names in the same order as their enum value. Note that
// the Record can be a VectorType or something else, only the name is
@@ -464,7 +510,7 @@ void BuiltinNameEmitter::GetOverloads() {
std::vector<Record *> Builtins = Records.getAllDerivedDefinitions("Builtin");
for (const auto *B : Builtins) {
StringRef BName = B->getValueAsString("Name");
- if (FctOverloadMap.find(BName) == FctOverloadMap.end()) {
+ if (!FctOverloadMap.contains(BName)) {
FctOverloadMap.insert(std::make_pair(
BName, std::vector<std::pair<const Record *, unsigned>>{}));
}
@@ -472,10 +518,10 @@ void BuiltinNameEmitter::GetOverloads() {
auto Signature = B->getValueAsListOfDefs("Signature");
// Reuse signatures to avoid unnecessary duplicates.
auto it =
- std::find_if(SignaturesList.begin(), SignaturesList.end(),
- [&](const std::pair<std::vector<Record *>, unsigned> &a) {
- return a.first == Signature;
- });
+ llvm::find_if(SignaturesList,
+ [&](const std::pair<std::vector<Record *>, unsigned> &a) {
+ return a.first == Signature;
+ });
unsigned SignIndex;
if (it == SignaturesList.end()) {
VerifySignature(Signature, B);
@@ -564,7 +610,7 @@ static unsigned short EncodeVersions(unsigned int MinVersion,
}
unsigned VersionIDs[] = {100, 110, 120, 200, 300};
- for (unsigned I = 0; I < sizeof(VersionIDs) / sizeof(VersionIDs[0]); I++) {
+ for (unsigned I = 0; I < std::size(VersionIDs); I++) {
if (VersionIDs[I] >= MinVersion && VersionIDs[I] < MaxVersion) {
Encoded |= 1 << I;
}
@@ -709,6 +755,20 @@ static std::pair<unsigned, unsigned> isOpenCLBuiltin(llvm::StringRef Name) {
OS << "} // isOpenCLBuiltin\n";
}
+// Emit an if-statement with an isMacroDefined call for each extension in
+// the space-separated list of extensions.
+static void EmitMacroChecks(raw_ostream &OS, StringRef Extensions) {
+ SmallVector<StringRef, 2> ExtVec;
+ Extensions.split(ExtVec, " ");
+ OS << " if (";
+ for (StringRef Ext : ExtVec) {
+ if (Ext != ExtVec.front())
+ OS << " && ";
+ OS << "S.getPreprocessor().isMacroDefined(\"" << Ext << "\")";
+ }
+ OS << ") {\n ";
+}
+
void BuiltinNameEmitter::EmitQualTypeFinder() {
OS << R"(
@@ -774,15 +834,24 @@ static void OCL2Qual(Sema &S, const OpenCLTypeStruct &Ty,
<< " case OCLAQ_None:\n"
<< " llvm_unreachable(\"Image without access qualifier\");\n";
for (const auto &Image : ITE.getValue()) {
+ StringRef Exts =
+ Image->getValueAsDef("Extension")->getValueAsString("ExtName");
OS << StringSwitch<const char *>(
Image->getValueAsString("AccessQualifier"))
.Case("RO", " case OCLAQ_ReadOnly:\n")
.Case("WO", " case OCLAQ_WriteOnly:\n")
- .Case("RW", " case OCLAQ_ReadWrite:\n")
- << " QT.push_back("
+ .Case("RW", " case OCLAQ_ReadWrite:\n");
+ if (!Exts.empty()) {
+ OS << " ";
+ EmitMacroChecks(OS, Exts);
+ }
+ OS << " QT.push_back("
<< Image->getValueAsDef("QTExpr")->getValueAsString("TypeExpr")
- << ");\n"
- << " break;\n";
+ << ");\n";
+ if (!Exts.empty()) {
+ OS << " }\n";
+ }
+ OS << " break;\n";
}
OS << " }\n"
<< " break;\n";
@@ -801,15 +870,14 @@ static void OCL2Qual(Sema &S, const OpenCLTypeStruct &Ty,
// Collect all QualTypes for a single vector size into TypeList.
OS << " SmallVector<QualType, " << BaseTypes.size() << "> TypeList;\n";
for (const auto *T : BaseTypes) {
- StringRef Ext =
+ StringRef Exts =
T->getValueAsDef("Extension")->getValueAsString("ExtName");
- if (!Ext.empty()) {
- OS << " if (S.getPreprocessor().isMacroDefined(\"" << Ext
- << "\")) {\n ";
+ if (!Exts.empty()) {
+ EmitMacroChecks(OS, Exts);
}
OS << " TypeList.push_back("
<< T->getValueAsDef("QTExpr")->getValueAsString("TypeExpr") << ");\n";
- if (!Ext.empty()) {
+ if (!Exts.empty()) {
OS << " }\n";
}
}
@@ -839,10 +907,10 @@ static void OCL2Qual(Sema &S, const OpenCLTypeStruct &Ty,
for (const auto *T : Types) {
// Check this is not an image type
- if (ImageTypesMap.find(T->getValueAsString("Name")) != ImageTypesMap.end())
+ if (ImageTypesMap.contains(T->getValueAsString("Name")))
continue;
// Check we have not seen this Type
- if (TypesSeen.find(T->getValueAsString("Name")) != TypesSeen.end())
+ if (TypesSeen.contains(T->getValueAsString("Name")))
continue;
TypesSeen.insert(std::make_pair(T->getValueAsString("Name"), true));
@@ -853,15 +921,14 @@ static void OCL2Qual(Sema &S, const OpenCLTypeStruct &Ty,
// Emit the cases for non generic, non image types.
OS << " case OCLT_" << T->getValueAsString("Name") << ":\n";
- StringRef Ext = T->getValueAsDef("Extension")->getValueAsString("ExtName");
- // If this type depends on an extension, ensure the extension macro is
+ StringRef Exts = T->getValueAsDef("Extension")->getValueAsString("ExtName");
+ // If this type depends on an extension, ensure the extension macros are
// defined.
- if (!Ext.empty()) {
- OS << " if (S.getPreprocessor().isMacroDefined(\"" << Ext
- << "\")) {\n ";
+ if (!Exts.empty()) {
+ EmitMacroChecks(OS, Exts);
}
OS << " QT.push_back(" << QT->getValueAsString("TypeExpr") << ");\n";
- if (!Ext.empty()) {
+ if (!Exts.empty()) {
OS << " }\n";
}
OS << " break;\n";
@@ -923,9 +990,9 @@ static void OCL2Qual(Sema &S, const OpenCLTypeStruct &Ty,
OS << "\n} // OCL2Qual\n";
}
-std::string OpenCLBuiltinTestEmitter::getTypeString(const Record *Type,
- TypeFlags Flags,
- int VectorSize) const {
+std::string OpenCLBuiltinFileEmitterBase::getTypeString(const Record *Type,
+ TypeFlags Flags,
+ int VectorSize) const {
std::string S;
if (Type->getValueAsBit("IsConst") || Flags.IsConst) {
S += "const ";
@@ -970,7 +1037,7 @@ std::string OpenCLBuiltinTestEmitter::getTypeString(const Record *Type,
return S;
}
-void OpenCLBuiltinTestEmitter::getTypeLists(
+void OpenCLBuiltinFileEmitterBase::getTypeLists(
Record *Type, TypeFlags &Flags, std::vector<Record *> &TypeList,
std::vector<int64_t> &VectorList) const {
bool isGenType = Type->isSubClassOf("GenericType");
@@ -1003,7 +1070,7 @@ void OpenCLBuiltinTestEmitter::getTypeLists(
VectorList.push_back(Type->getValueAsInt("VecWidth"));
}
-void OpenCLBuiltinTestEmitter::expandTypesInSignature(
+void OpenCLBuiltinFileEmitterBase::expandTypesInSignature(
const std::vector<Record *> &Signature,
SmallVectorImpl<SmallVector<std::string, 2>> &Types) {
// Find out if there are any GenTypes in this signature, and if so, calculate
@@ -1021,7 +1088,16 @@ void OpenCLBuiltinTestEmitter::expandTypesInSignature(
// Insert the Cartesian product of the types and vector sizes.
for (const auto &Vector : VectorList) {
for (const auto &Type : TypeList) {
- ExpandedArg.push_back(getTypeString(Type, Flags, Vector));
+ std::string FullType = getTypeString(Type, Flags, Vector);
+ ExpandedArg.push_back(FullType);
+
+ // If the type requires an extension, add a TypeExtMap entry mapping
+ // the full type name to the extension.
+ StringRef Ext =
+ Type->getValueAsDef("Extension")->getValueAsString("ExtName");
+ if (!Ext.empty() && !TypeExtMap.contains(FullType)) {
+ TypeExtMap.insert({FullType, Ext});
+ }
}
}
NumSignatures = std::max<unsigned>(NumSignatures, ExpandedArg.size());
@@ -1044,10 +1120,7 @@ void OpenCLBuiltinTestEmitter::expandTypesInSignature(
}
}
-void OpenCLBuiltinTestEmitter::emit() {
- emitSourceFileHeader("OpenCL Builtin exhaustive testing", OS);
-
- // Enable some extensions for testing.
+void OpenCLBuiltinFileEmitterBase::emitExtensionSetup() {
OS << R"(
#pragma OPENCL EXTENSION cl_khr_fp16 : enable
#pragma OPENCL EXTENSION cl_khr_fp64 : enable
@@ -1058,6 +1131,93 @@ void OpenCLBuiltinTestEmitter::emit() {
#pragma OPENCL EXTENSION cl_khr_3d_image_writes : enable
)";
+}
+
+std::string
+OpenCLBuiltinFileEmitterBase::emitExtensionGuard(const Record *Builtin) {
+ StringRef Extensions =
+ Builtin->getValueAsDef("Extension")->getValueAsString("ExtName");
+ if (Extensions.empty())
+ return "";
+
+ OS << "#if";
+
+ SmallVector<StringRef, 2> ExtVec;
+ Extensions.split(ExtVec, " ");
+ bool isFirst = true;
+ for (StringRef Ext : ExtVec) {
+ if (!isFirst) {
+ OS << " &&";
+ }
+ OS << " defined(" << Ext << ")";
+ isFirst = false;
+ }
+ OS << "\n";
+
+ return "#endif // Extension\n";
+}
+
+std::string
+OpenCLBuiltinFileEmitterBase::emitVersionGuard(const Record *Builtin) {
+ std::string OptionalEndif;
+ auto PrintOpenCLVersion = [this](int Version) {
+ OS << "CL_VERSION_" << (Version / 100) << "_" << ((Version % 100) / 10);
+ };
+ int MinVersion = Builtin->getValueAsDef("MinVersion")->getValueAsInt("ID");
+ if (MinVersion != 100) {
+ // OpenCL 1.0 is the default minimum version.
+ OS << "#if __OPENCL_C_VERSION__ >= ";
+ PrintOpenCLVersion(MinVersion);
+ OS << "\n";
+ OptionalEndif = "#endif // MinVersion\n" + OptionalEndif;
+ }
+ int MaxVersion = Builtin->getValueAsDef("MaxVersion")->getValueAsInt("ID");
+ if (MaxVersion) {
+ OS << "#if __OPENCL_C_VERSION__ < ";
+ PrintOpenCLVersion(MaxVersion);
+ OS << "\n";
+ OptionalEndif = "#endif // MaxVersion\n" + OptionalEndif;
+ }
+ return OptionalEndif;
+}
+
+StringRef OpenCLBuiltinFileEmitterBase::emitTypeExtensionGuards(
+ const SmallVectorImpl<std::string> &Signature) {
+ SmallSet<StringRef, 2> ExtSet;
+
+ // Iterate over all types to gather the set of required TypeExtensions.
+ for (const auto &Ty : Signature) {
+ StringRef TypeExt = TypeExtMap.lookup(Ty);
+ if (!TypeExt.empty()) {
+ // The TypeExtensions are space-separated in the .td file.
+ SmallVector<StringRef, 2> ExtVec;
+ TypeExt.split(ExtVec, " ");
+ for (const auto Ext : ExtVec) {
+ ExtSet.insert(Ext);
+ }
+ }
+ }
+
+ // Emit the #if only when at least one extension is required.
+ if (ExtSet.empty())
+ return "";
+
+ OS << "#if ";
+ bool isFirst = true;
+ for (const auto Ext : ExtSet) {
+ if (!isFirst)
+ OS << " && ";
+ OS << "defined(" << Ext << ")";
+ isFirst = false;
+ }
+ OS << "\n";
+ return "#endif // TypeExtension\n";
+}
+
+void OpenCLBuiltinTestEmitter::emit() {
+ emitSourceFileHeader("OpenCL Builtin exhaustive testing", OS, Records);
+
+ emitExtensionSetup();
// Ensure each test has a unique name by numbering them.
unsigned TestID = 0;
@@ -1071,44 +1231,13 @@ void OpenCLBuiltinTestEmitter::emit() {
expandTypesInSignature(B->getValueAsListOfDefs("Signature"), FTypes);
OS << "// Test " << Name << "\n";
- std::string OptionalEndif;
- StringRef Extensions =
- B->getValueAsDef("Extension")->getValueAsString("ExtName");
- if (!Extensions.empty()) {
- OS << "#if";
- OptionalEndif = "#endif // Extension\n";
- SmallVector<StringRef, 2> ExtVec;
- Extensions.split(ExtVec, " ");
- bool isFirst = true;
- for (StringRef Ext : ExtVec) {
- if (!isFirst) {
- OS << " &&";
- }
- OS << " defined(" << Ext << ")";
- isFirst = false;
- }
- OS << "\n";
- }
- auto PrintOpenCLVersion = [this](int Version) {
- OS << "CL_VERSION_" << (Version / 100) << "_" << ((Version % 100) / 10);
- };
- int MinVersion = B->getValueAsDef("MinVersion")->getValueAsInt("ID");
- if (MinVersion != 100) {
- // OpenCL 1.0 is the default minimum version.
- OS << "#if __OPENCL_C_VERSION__ >= ";
- PrintOpenCLVersion(MinVersion);
- OS << "\n";
- OptionalEndif = "#endif // MinVersion\n" + OptionalEndif;
- }
- int MaxVersion = B->getValueAsDef("MaxVersion")->getValueAsInt("ID");
- if (MaxVersion) {
- OS << "#if __OPENCL_C_VERSION__ < ";
- PrintOpenCLVersion(MaxVersion);
- OS << "\n";
- OptionalEndif = "#endif // MaxVersion\n" + OptionalEndif;
- }
+ std::string OptionalExtensionEndif = emitExtensionGuard(B);
+ std::string OptionalVersionEndif = emitVersionGuard(B);
+
for (const auto &Signature : FTypes) {
+ StringRef OptionalTypeExtEndif = emitTypeExtensionGuards(Signature);
+
// Emit function declaration.
OS << Signature[0] << " test" << TestID++ << "_" << Name << "(";
if (Signature.size() > 1) {
@@ -1135,16 +1264,84 @@ void OpenCLBuiltinTestEmitter::emit() {
// End of function body.
OS << "}\n";
+ OS << OptionalTypeExtEndif;
}
- OS << OptionalEndif << "\n";
+
+ OS << OptionalVersionEndif;
+ OS << OptionalExtensionEndif;
}
}
+void OpenCLBuiltinHeaderEmitter::emit() {
+ emitSourceFileHeader("OpenCL Builtin declarations", OS, Records);
+
+ emitExtensionSetup();
+
+ OS << R"(
+#define __ovld __attribute__((overloadable))
+#define __conv __attribute__((convergent))
+#define __purefn __attribute__((pure))
+#define __cnfn __attribute__((const))
+
+)";
+
+ // Iterate over all builtins; sort to follow order of definition in .td file.
+ std::vector<Record *> Builtins = Records.getAllDerivedDefinitions("Builtin");
+ llvm::sort(Builtins, LessRecord());
+
+ for (const auto *B : Builtins) {
+ StringRef Name = B->getValueAsString("Name");
+
+ std::string OptionalExtensionEndif = emitExtensionGuard(B);
+ std::string OptionalVersionEndif = emitVersionGuard(B);
+
+ SmallVector<SmallVector<std::string, 2>, 4> FTypes;
+ expandTypesInSignature(B->getValueAsListOfDefs("Signature"), FTypes);
+
+ for (const auto &Signature : FTypes) {
+ StringRef OptionalTypeExtEndif = emitTypeExtensionGuards(Signature);
+
+ // Emit function declaration.
+ OS << Signature[0] << " __ovld ";
+ if (B->getValueAsBit("IsConst"))
+ OS << "__cnfn ";
+ if (B->getValueAsBit("IsPure"))
+ OS << "__purefn ";
+ if (B->getValueAsBit("IsConv"))
+ OS << "__conv ";
+
+ OS << Name << "(";
+ if (Signature.size() > 1) {
+ for (unsigned I = 1; I < Signature.size(); I++) {
+ if (I != 1)
+ OS << ", ";
+ OS << Signature[I];
+ }
+ }
+ OS << ");\n";
+
+ OS << OptionalTypeExtEndif;
+ }
+
+ OS << OptionalVersionEndif;
+ OS << OptionalExtensionEndif;
+ }
+
+ OS << "\n// Disable any extensions we may have enabled previously.\n"
+ "#pragma OPENCL EXTENSION all : disable\n";
+}
+
void clang::EmitClangOpenCLBuiltins(RecordKeeper &Records, raw_ostream &OS) {
BuiltinNameEmitter NameChecker(Records, OS);
NameChecker.Emit();
}
+void clang::EmitClangOpenCLBuiltinHeader(RecordKeeper &Records,
+ raw_ostream &OS) {
+ OpenCLBuiltinHeaderEmitter HeaderFileGenerator(Records, OS);
+ HeaderFileGenerator.emit();
+}
+
void clang::EmitClangOpenCLBuiltinTests(RecordKeeper &Records,
raw_ostream &OS) {
OpenCLBuiltinTestEmitter TestFileGenerator(Records, OS);
diff --git a/contrib/llvm-project/clang/utils/TableGen/ClangOptionDocEmitter.cpp b/contrib/llvm-project/clang/utils/TableGen/ClangOptionDocEmitter.cpp
index 0e079b6b505a..a4095950ca97 100644
--- a/contrib/llvm-project/clang/utils/TableGen/ClangOptionDocEmitter.cpp
+++ b/contrib/llvm-project/clang/utils/TableGen/ClangOptionDocEmitter.cpp
@@ -31,13 +31,40 @@ struct DocumentedGroup;
struct Documentation {
std::vector<DocumentedGroup> Groups;
std::vector<DocumentedOption> Options;
+
+ bool empty() {
+ return Groups.empty() && Options.empty();
+ }
};
struct DocumentedGroup : Documentation {
Record *Group;
};
+static bool hasFlag(const Record *Option, StringRef OptionFlag,
+ StringRef FlagsField) {
+ for (const Record *Flag : Option->getValueAsListOfDefs(FlagsField))
+ if (Flag->getName() == OptionFlag)
+ return true;
+ if (const DefInit *DI = dyn_cast<DefInit>(Option->getValueInit("Group")))
+ for (const Record *Flag : DI->getDef()->getValueAsListOfDefs(FlagsField))
+ if (Flag->getName() == OptionFlag)
+ return true;
+ return false;
+}
+
+static bool isOptionVisible(const Record *Option, const Record *DocInfo) {
+ for (StringRef IgnoredFlag : DocInfo->getValueAsListOfStrings("IgnoreFlags"))
+ if (hasFlag(Option, IgnoredFlag, "Flags"))
+ return false;
+ for (StringRef Mask : DocInfo->getValueAsListOfStrings("VisibilityMask"))
+ if (hasFlag(Option, Mask, "Visibility"))
+ return true;
+ return false;
+}
+
// Reorganize the records into a suitable form for emitting documentation.
-Documentation extractDocumentation(RecordKeeper &Records) {
+Documentation extractDocumentation(RecordKeeper &Records,
+ const Record *DocInfo) {
Documentation Result;
// Build the tree of groups. The root in the tree is the fake option group
@@ -124,12 +151,15 @@ Documentation extractDocumentation(RecordKeeper &Records) {
D.Groups.back().Group = G;
Documentation &Base = D.Groups.back();
Base = DocumentationForGroup(G);
+ if (Base.empty())
+ D.Groups.pop_back();
}
auto &Options = OptionsInGroup[R];
llvm::sort(Options, CompareByName);
for (Record *O : Options)
- D.Options.push_back(DocumentationForOption(O));
+ if (isOptionVisible(O, DocInfo))
+ D.Options.push_back(DocumentationForOption(O));
return D;
};
@@ -161,25 +191,10 @@ unsigned getNumArgsForKind(Record *OptionKind, const Record *Option) {
.Default(0);
}
-bool hasFlag(const Record *OptionOrGroup, StringRef OptionFlag) {
- for (const Record *Flag : OptionOrGroup->getValueAsListOfDefs("Flags"))
- if (Flag->getName() == OptionFlag)
- return true;
- return false;
-}
-
-bool isExcluded(const Record *OptionOrGroup, const Record *DocInfo) {
- // FIXME: Provide a flag to specify the set of exclusions.
- for (StringRef Exclusion : DocInfo->getValueAsListOfStrings("ExcludedFlags"))
- if (hasFlag(OptionOrGroup, Exclusion))
- return true;
- return false;
-}
-
std::string escapeRST(StringRef Str) {
std::string Out;
for (auto K : Str) {
- if (StringRef("`*|_[]\\").count(K))
+ if (StringRef("`*|[]\\").count(K))
Out.push_back('\\');
Out.push_back(K);
}
@@ -238,6 +253,8 @@ void emitOptionWithArgs(StringRef Prefix, const Record *Option,
}
}
+constexpr StringLiteral DefaultMetaVarName = "<arg>";
+
void emitOptionName(StringRef Prefix, const Record *Option, raw_ostream &OS) {
// Find the arguments to list after the option.
unsigned NumArgs = getNumArgsForKind(Option->getValueAsDef("Kind"), Option);
@@ -247,7 +264,7 @@ void emitOptionName(StringRef Prefix, const Record *Option, raw_ostream &OS) {
if (HasMetaVarName)
Args.push_back(std::string(Option->getValueAsString("MetaVarName")));
else if (NumArgs == 1)
- Args.push_back("<arg>");
+ Args.push_back(DefaultMetaVarName.str());
// Fill up arguments if this option didn't provide a meta var name or it
// supports an unlimited number of arguments. We can't see how many arguments
@@ -294,14 +311,13 @@ void forEachOptionName(const DocumentedOption &Option, const Record *DocInfo,
F(Option.Option);
for (auto *Alias : Option.Aliases)
- if (!isExcluded(Alias, DocInfo) && canSphinxCopeWithOption(Option.Option))
+ if (isOptionVisible(Alias, DocInfo) &&
+ canSphinxCopeWithOption(Option.Option))
F(Alias);
}
void emitOption(const DocumentedOption &Option, const Record *DocInfo,
raw_ostream &OS) {
- if (isExcluded(Option.Option, DocInfo))
- return;
if (Option.Option->getValueAsDef("Kind")->getName() == "KIND_UNKNOWN" ||
Option.Option->getValueAsDef("Kind")->getName() == "KIND_INPUT")
return;
@@ -341,8 +357,30 @@ void emitOption(const DocumentedOption &Option, const Record *DocInfo,
OS << "\n\n";
// Emit the description, if we have one.
+ const Record *R = Option.Option;
std::string Description =
- getRSTStringWithTextFallback(Option.Option, "DocBrief", "HelpText");
+ getRSTStringWithTextFallback(R, "DocBrief", "HelpText");
+
+ if (!isa<UnsetInit>(R->getValueInit("Values"))) {
+ if (!Description.empty() && Description.back() != '.')
+ Description.push_back('.');
+
+ StringRef MetaVarName;
+ if (!isa<UnsetInit>(R->getValueInit("MetaVarName")))
+ MetaVarName = R->getValueAsString("MetaVarName");
+ else
+ MetaVarName = DefaultMetaVarName;
+
+ SmallVector<StringRef> Values;
+ SplitString(R->getValueAsString("Values"), Values, ",");
+ Description += (" " + MetaVarName + " must be '").str();
+ if (Values.size() > 1) {
+ Description += join(Values.begin(), Values.end() - 1, "', '");
+ Description += "' or '";
+ }
+ Description += (Values.back() + "'.").str();
+ }
+
if (!Description.empty())
OS << Description << "\n\n";
}
@@ -352,9 +390,6 @@ void emitDocumentation(int Depth, const Documentation &Doc,
void emitGroup(int Depth, const DocumentedGroup &Group, const Record *DocInfo,
raw_ostream &OS) {
- if (isExcluded(Group.Group, DocInfo))
- return;
-
emitHeading(Depth,
getRSTStringWithTextFallback(Group.Group, "DocName", "Name"), OS);
@@ -388,5 +423,5 @@ void clang::EmitClangOptDocs(RecordKeeper &Records, raw_ostream &OS) {
OS << DocInfo->getValueAsString("Intro") << "\n";
OS << ".. program:: " << DocInfo->getValueAsString("Program") << "\n";
- emitDocumentation(0, extractDocumentation(Records), DocInfo, OS);
+ emitDocumentation(0, extractDocumentation(Records, DocInfo), DocInfo, OS);
}
diff --git a/contrib/llvm-project/clang/utils/TableGen/ClangSACheckersEmitter.cpp b/contrib/llvm-project/clang/utils/TableGen/ClangSACheckersEmitter.cpp
index 00d88274fc38..2a2e466ae197 100644
--- a/contrib/llvm-project/clang/utils/TableGen/ClangSACheckersEmitter.cpp
+++ b/contrib/llvm-project/clang/utils/TableGen/ClangSACheckersEmitter.cpp
@@ -24,28 +24,29 @@ using namespace llvm;
// Static Analyzer Checkers Tables generation
//===----------------------------------------------------------------------===//
-static std::string getPackageFullName(const Record *R);
+static std::string getPackageFullName(const Record *R, StringRef Sep = ".");
-static std::string getParentPackageFullName(const Record *R) {
+static std::string getParentPackageFullName(const Record *R,
+ StringRef Sep = ".") {
std::string name;
if (DefInit *DI = dyn_cast<DefInit>(R->getValueInit("ParentPackage")))
- name = getPackageFullName(DI->getDef());
+ name = getPackageFullName(DI->getDef(), Sep);
return name;
}
-static std::string getPackageFullName(const Record *R) {
- std::string name = getParentPackageFullName(R);
+static std::string getPackageFullName(const Record *R, StringRef Sep) {
+ std::string name = getParentPackageFullName(R, Sep);
if (!name.empty())
- name += ".";
+ name += Sep;
assert(!R->getValueAsString("PackageName").empty());
name += R->getValueAsString("PackageName");
return name;
}
-static std::string getCheckerFullName(const Record *R) {
- std::string name = getParentPackageFullName(R);
+static std::string getCheckerFullName(const Record *R, StringRef Sep = ".") {
+ std::string name = getParentPackageFullName(R, Sep);
if (!name.empty())
- name += ".";
+ name += Sep;
assert(!R->getValueAsString("CheckerName").empty());
name += R->getValueAsString("CheckerName");
return name;
@@ -74,20 +75,18 @@ static inline uint64_t getValueFromBitsInit(const BitsInit *B, const Record &R)
}
static std::string getCheckerDocs(const Record &R) {
- StringRef LandingPage;
- if (BitsInit *BI = R.getValueAsBitsInit("Documentation")) {
- uint64_t V = getValueFromBitsInit(BI, R);
- if (V == 1)
- LandingPage = "available_checks.html";
- else if (V == 2)
- LandingPage = "alpha_checks.html";
- }
-
- if (LandingPage.empty())
+ const BitsInit *BI = R.getValueAsBitsInit("Documentation");
+ if (!BI)
+ PrintFatalError(R.getLoc(), "missing Documentation<...> member for " +
+ getCheckerFullName(&R));
+
+ // Ignore 'Documentation<NotDocumented>' checkers.
+ if (getValueFromBitsInit(BI, R) == 0)
return "";
- return (llvm::Twine("https://clang-analyzer.llvm.org/") + LandingPage + "#" +
- getCheckerFullName(&R))
+ std::string CheckerFullName = StringRef(getCheckerFullName(&R, "-")).lower();
+ return (llvm::Twine("https://clang.llvm.org/docs/analyzer/checkers.html#") +
+ CheckerFullName)
.str();
}
@@ -220,7 +219,7 @@ void clang::EmitClangSACheckers(RecordKeeper &Records, raw_ostream &OS) {
// - DESCRIPTION
// - DEFAULT: The default value for this option.
//
- // The full option can be specified in the command like like this:
+ // The full option can be specified in the command like this:
// -analyzer-config PACKAGENAME:OPTIONNAME=VALUE
OS << "\n"
"#ifdef GET_PACKAGE_OPTIONS\n";
@@ -320,7 +319,7 @@ void clang::EmitClangSACheckers(RecordKeeper &Records, raw_ostream &OS) {
// - DESCRIPTION
// - DEFAULT: The default value for this option.
//
- // The full option can be specified in the command like like this:
+ // The full option can be specified in the command like this:
// -analyzer-config CHECKERNAME:OPTIONNAME=VALUE
OS << "\n"
"#ifdef GET_CHECKER_OPTIONS\n";
diff --git a/contrib/llvm-project/clang/utils/TableGen/ClangSyntaxEmitter.cpp b/contrib/llvm-project/clang/utils/TableGen/ClangSyntaxEmitter.cpp
index a940edbb1d24..9720d5873184 100644
--- a/contrib/llvm-project/clang/utils/TableGen/ClangSyntaxEmitter.cpp
+++ b/contrib/llvm-project/clang/utils/TableGen/ClangSyntaxEmitter.cpp
@@ -129,7 +129,7 @@ struct SyntaxConstraint {
void clang::EmitClangSyntaxNodeList(llvm::RecordKeeper &Records,
llvm::raw_ostream &OS) {
- llvm::emitSourceFileHeader("Syntax tree node list", OS);
+ llvm::emitSourceFileHeader("Syntax tree node list", OS, Records);
Hierarchy H(Records);
OS << R"cpp(
#ifndef NODE
@@ -188,7 +188,7 @@ static void printDoc(llvm::StringRef Doc, llvm::raw_ostream &OS) {
void clang::EmitClangSyntaxNodeClasses(llvm::RecordKeeper &Records,
llvm::raw_ostream &OS) {
- llvm::emitSourceFileHeader("Syntax tree node list", OS);
+ llvm::emitSourceFileHeader("Syntax tree node list", OS, Records);
Hierarchy H(Records);
OS << "\n// Forward-declare node types so we don't have to carefully "
diff --git a/contrib/llvm-project/clang/utils/TableGen/ClangTypeNodesEmitter.cpp b/contrib/llvm-project/clang/utils/TableGen/ClangTypeNodesEmitter.cpp
index 690042f3200e..66bdf5e67602 100644
--- a/contrib/llvm-project/clang/utils/TableGen/ClangTypeNodesEmitter.cpp
+++ b/contrib/llvm-project/clang/utils/TableGen/ClangTypeNodesEmitter.cpp
@@ -104,7 +104,7 @@ void TypeNodeEmitter::emit() {
if (Types.empty())
PrintFatalError("no Type records in input!");
- emitSourceFileHeader("An x-macro database of Clang type nodes", Out);
+ emitSourceFileHeader("An x-macro database of Clang type nodes", Out, Records);
// Preamble
addMacroToUndef(TypeMacroName);
diff --git a/contrib/llvm-project/clang/utils/TableGen/MveEmitter.cpp b/contrib/llvm-project/clang/utils/TableGen/MveEmitter.cpp
index 091af2dc52a1..496cb10d14f2 100644
--- a/contrib/llvm-project/clang/utils/TableGen/MveEmitter.cpp
+++ b/contrib/llvm-project/clang/utils/TableGen/MveEmitter.cpp
@@ -212,6 +212,7 @@ public:
std::string llvmName() const override {
return "llvm::PointerType::getUnqual(" + Pointee->llvmName() + ")";
}
+ const Type *getPointeeType() const { return Pointee; }
static bool classof(const Type *T) {
return T->typeKind() == TypeKind::Pointer;
@@ -349,13 +350,8 @@ public:
bool requiresFloat() const override { return false; };
bool requiresMVE() const override { return true; }
std::string llvmName() const override {
- // Use <4 x i1> instead of <2 x i1> for two-lane vector types. See
- // the comment in llvm/lib/Target/ARM/ARMInstrMVE.td for further
- // explanation.
- unsigned ModifiedLanes = (Lanes == 2 ? 4 : Lanes);
-
- return "llvm::FixedVectorType::get(Builder.getInt1Ty(), " +
- utostr(ModifiedLanes) + ")";
+ return "llvm::FixedVectorType::get(Builder.getInt1Ty(), " + utostr(Lanes) +
+ ")";
}
static bool classof(const Type *T) {
@@ -707,12 +703,14 @@ public:
class AddressResult : public Result {
public:
Ptr Arg;
+ const Type *Ty;
unsigned Align;
- AddressResult(Ptr Arg, unsigned Align) : Arg(Arg), Align(Align) {}
+ AddressResult(Ptr Arg, const Type *Ty, unsigned Align)
+ : Arg(Arg), Ty(Ty), Align(Align) {}
void genCode(raw_ostream &OS,
CodeGenParamAllocator &ParamAlloc) const override {
- OS << "Address(" << Arg->varname() << ", CharUnits::fromQuantity("
- << Align << "))";
+ OS << "Address(" << Arg->varname() << ", " << Ty->llvmName()
+ << ", CharUnits::fromQuantity(" << Align << "))";
}
std::string typeName() const override {
return "Address";
@@ -884,7 +882,7 @@ public:
} else if (V->varnameUsed()) {
std::string Type = V->typeName();
OS << V->typeName();
- if (!StringRef(Type).endswith("*"))
+ if (!StringRef(Type).ends_with("*"))
OS << " ";
OS << V->varname() << " = ";
}
@@ -898,7 +896,7 @@ public:
llvm::APInt i = iOrig.trunc(64);
SmallString<40> s;
i.toString(s, 16, true, true);
- return std::string(s.str());
+ return std::string(s);
}
std::string genSema() const {
@@ -1194,13 +1192,21 @@ Result::Ptr EmitterBase::getCodeForDag(DagInit *D, const Result::Scope &Scope,
if (D->getNumArgs() != 2)
PrintFatalError("'address' should have two arguments");
Result::Ptr Arg = getCodeForDagArg(D, 0, Scope, Param);
+
+ const Type *Ty = nullptr;
+ if (auto *DI = dyn_cast<DagInit>(D->getArg(0)))
+ if (auto *PTy = dyn_cast<PointerType>(getType(DI->getOperator(), Param)))
+ Ty = PTy->getPointeeType();
+ if (!Ty)
+ PrintFatalError("'address' pointer argument should be a pointer");
+
unsigned Alignment;
if (auto *II = dyn_cast<IntInit>(D->getArg(1))) {
Alignment = II->getValue();
} else {
PrintFatalError("'address' alignment argument should be an integer");
}
- return std::make_shared<AddressResult>(Arg, Alignment);
+ return std::make_shared<AddressResult>(Arg, Ty, Alignment);
} else if (Op->getName() == "unsignedflag") {
if (D->getNumArgs() != 1)
PrintFatalError("unsignedflag should have exactly one argument");
@@ -1494,8 +1500,7 @@ protected:
class raw_self_contained_string_ostream : private string_holder,
public raw_string_ostream {
public:
- raw_self_contained_string_ostream()
- : string_holder(), raw_string_ostream(S) {}
+ raw_self_contained_string_ostream() : raw_string_ostream(S) {}
};
const char LLVMLicenseHeader[] =
@@ -1675,7 +1680,7 @@ void EmitterBase::EmitBuiltinCG(raw_ostream &OS) {
for (size_t i = 0, e = MG.ParamTypes.size(); i < e; ++i) {
StringRef Type = MG.ParamTypes[i];
OS << " " << Type;
- if (!Type.endswith("*"))
+ if (!Type.ends_with("*"))
OS << " ";
OS << " Param" << utostr(i) << ";\n";
}
@@ -1828,7 +1833,7 @@ void MveEmitter::EmitHeader(raw_ostream &OS) {
// prototype.
std::string RetTypeName = Int.returnType()->cName();
- if (!StringRef(RetTypeName).endswith("*"))
+ if (!StringRef(RetTypeName).ends_with("*"))
RetTypeName += " ";
std::vector<std::string> ArgTypeNames;
@@ -1941,8 +1946,8 @@ void MveEmitter::EmitHeader(raw_ostream &OS) {
void MveEmitter::EmitBuiltinDef(raw_ostream &OS) {
for (const auto &kv : ACLEIntrinsics) {
const ACLEIntrinsic &Int = *kv.second;
- OS << "TARGET_HEADER_BUILTIN(__builtin_arm_mve_" << Int.fullName()
- << ", \"\", \"n\", \"arm_mve.h\", ALL_LANGUAGES, \"\")\n";
+ OS << "BUILTIN(__builtin_arm_mve_" << Int.fullName()
+ << ", \"\", \"n\")\n";
}
std::set<std::string> ShortNamesSeen;
@@ -2073,7 +2078,7 @@ void CdeEmitter::EmitHeader(raw_ostream &OS) {
// Make strings for the types involved in the function's
// prototype.
std::string RetTypeName = Int.returnType()->cName();
- if (!StringRef(RetTypeName).endswith("*"))
+ if (!StringRef(RetTypeName).ends_with("*"))
RetTypeName += " ";
std::vector<std::string> ArgTypeNames;
@@ -2151,8 +2156,8 @@ void CdeEmitter::EmitBuiltinDef(raw_ostream &OS) {
if (kv.second->headerOnly())
continue;
const ACLEIntrinsic &Int = *kv.second;
- OS << "TARGET_HEADER_BUILTIN(__builtin_arm_cde_" << Int.fullName()
- << ", \"\", \"ncU\", \"arm_cde.h\", ALL_LANGUAGES, \"\")\n";
+ OS << "BUILTIN(__builtin_arm_cde_" << Int.fullName()
+ << ", \"\", \"ncU\")\n";
}
}
diff --git a/contrib/llvm-project/clang/utils/TableGen/NeonEmitter.cpp b/contrib/llvm-project/clang/utils/TableGen/NeonEmitter.cpp
index f0da1a7d2f4e..53334016c180 100644
--- a/contrib/llvm-project/clang/utils/TableGen/NeonEmitter.cpp
+++ b/contrib/llvm-project/clang/utils/TableGen/NeonEmitter.cpp
@@ -26,8 +26,6 @@
#include "TableGenBackends.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/None.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
@@ -45,6 +43,7 @@
#include <cstdint>
#include <deque>
#include <map>
+#include <optional>
#include <set>
#include <sstream>
#include <string>
@@ -292,7 +291,7 @@ class Variable {
std::string N;
public:
- Variable() : T(Type::getVoid()), N("") {}
+ Variable() : T(Type::getVoid()) {}
Variable(Type T, std::string N) : T(std::move(T)), N(std::move(N)) {}
Type getType() const { return T; }
@@ -321,8 +320,10 @@ class Intrinsic {
/// The list of DAGs for the body. May be empty, in which case we should
/// emit a builtin call.
ListInit *Body;
- /// The architectural #ifdef guard.
- std::string Guard;
+ /// The architectural ifdef guard.
+ std::string ArchGuard;
+ /// The architectural target() guard.
+ std::string TargetGuard;
/// Set if the Unavailable bit is 1. This means we don't generate a body,
/// just an "unavailable" attribute on a declaration.
bool IsUnavailable;
@@ -368,9 +369,9 @@ class Intrinsic {
public:
Intrinsic(Record *R, StringRef Name, StringRef Proto, TypeSpec OutTS,
TypeSpec InTS, ClassKind CK, ListInit *Body, NeonEmitter &Emitter,
- StringRef Guard, bool IsUnavailable, bool BigEndianSafe)
+ StringRef ArchGuard, StringRef TargetGuard, bool IsUnavailable, bool BigEndianSafe)
: R(R), Name(Name.str()), OutTS(OutTS), InTS(InTS), CK(CK), Body(Body),
- Guard(Guard.str()), IsUnavailable(IsUnavailable),
+ ArchGuard(ArchGuard.str()), TargetGuard(TargetGuard.str()), IsUnavailable(IsUnavailable),
BigEndianSafe(BigEndianSafe), PolymorphicKeyType(0), NeededEarly(false),
UseMacro(false), BaseType(OutTS, "."), InBaseType(InTS, "."),
Emitter(Emitter) {
@@ -382,13 +383,13 @@ public:
StringRef Mods = getNextModifiers(Proto, Pos);
while (!Mods.empty()) {
Types.emplace_back(InTS, Mods);
- if (Mods.find('!') != StringRef::npos)
+ if (Mods.contains('!'))
PolymorphicKeyType = Types.size() - 1;
Mods = getNextModifiers(Proto, Pos);
}
- for (auto Type : Types) {
+ for (const auto &Type : Types) {
// If this builtin takes an immediate argument, we need to #define it rather
// than use a standard declaration, so that SemaChecking can range check
// the immediate passed by the user.
@@ -411,14 +412,14 @@ public:
/// transitive closure.
const std::set<Intrinsic *> &getDependencies() const { return Dependencies; }
/// Get the architectural guard string (#ifdef).
- std::string getGuard() const { return Guard; }
+ std::string getArchGuard() const { return ArchGuard; }
+ std::string getTargetGuard() const { return TargetGuard; }
/// Get the non-mangled name.
std::string getName() const { return Name; }
/// Return true if the intrinsic takes an immediate operand.
bool hasImmediate() const {
- return std::any_of(Types.begin(), Types.end(),
- [](const Type &T) { return T.isImmediate(); });
+ return llvm::any_of(Types, [](const Type &T) { return T.isImmediate(); });
}
/// Return the parameter index of the immediate operand.
@@ -442,7 +443,7 @@ public:
/// Return the index that parameter PIndex will sit at
/// in a generated function call. This is often just PIndex,
/// but may not be as things such as multiple-vector operands
- /// and sret parameters need to be taken into accont.
+ /// and sret parameters need to be taken into account.
unsigned getGeneratedParamIdx(unsigned PIndex) {
unsigned Idx = 0;
if (getReturnType().getNumVectors() > 1)
@@ -460,9 +461,11 @@ public:
void setNeededEarly() { NeededEarly = true; }
bool operator<(const Intrinsic &Other) const {
- // Sort lexicographically on a two-tuple (Guard, Name)
- if (Guard != Other.Guard)
- return Guard < Other.Guard;
+ // Sort lexicographically on a three-tuple (ArchGuard, TargetGuard, Name)
+ if (ArchGuard != Other.ArchGuard)
+ return ArchGuard < Other.ArchGuard;
+ if (TargetGuard != Other.TargetGuard)
+ return TargetGuard < Other.TargetGuard;
return Name < Other.Name;
}
@@ -503,6 +506,7 @@ private:
void emitBody(StringRef CallPrefix);
void emitShadowedArgs();
void emitArgumentReversal();
+ void emitReturnVarDecl();
void emitReturnReversal();
void emitReverseVariable(Variable &Dest, Variable &Src);
void emitNewLine();
@@ -546,6 +550,8 @@ class NeonEmitter {
void createIntrinsic(Record *R, SmallVectorImpl<Intrinsic *> &Out);
void genBuiltinsDef(raw_ostream &OS, SmallVectorImpl<Intrinsic *> &Defs);
+ void genStreamingSVECompatibleList(raw_ostream &OS,
+ SmallVectorImpl<Intrinsic *> &Defs);
void genOverloadTypeCheckCode(raw_ostream &OS,
SmallVectorImpl<Intrinsic *> &Defs);
void genIntrinsicRangeCheckCode(raw_ostream &OS,
@@ -555,7 +561,7 @@ public:
/// Called by Intrinsic - this attempts to get an intrinsic that takes
/// the given types as arguments.
Intrinsic &getIntrinsic(StringRef Name, ArrayRef<Type> Types,
- Optional<std::string> MangledName);
+ std::optional<std::string> MangledName);
/// Called by Intrinsic - returns a globally-unique number.
unsigned getUniqueNumber() { return UniqueNumber++; }
@@ -589,6 +595,8 @@ public:
// Emit arm_bf16.h.inc
void runBF16(raw_ostream &o);
+ void runVectorTypes(raw_ostream &o);
+
// Emit all the __builtin prototypes used in arm_neon.h, arm_fp16.h and
// arm_bf16.h
void runHeader(raw_ostream &o);
@@ -732,17 +740,17 @@ Type Type::fromTypedefName(StringRef Name) {
Name = Name.drop_front();
}
- if (Name.startswith("float")) {
+ if (Name.starts_with("float")) {
T.Kind = Float;
Name = Name.drop_front(5);
- } else if (Name.startswith("poly")) {
+ } else if (Name.starts_with("poly")) {
T.Kind = Poly;
Name = Name.drop_front(4);
- } else if (Name.startswith("bfloat")) {
+ } else if (Name.starts_with("bfloat")) {
T.Kind = BFloat16;
Name = Name.drop_front(6);
} else {
- assert(Name.startswith("int"));
+ assert(Name.starts_with("int"));
Name = Name.drop_front(3);
}
@@ -783,7 +791,7 @@ Type Type::fromTypedefName(StringRef Name) {
Name = Name.drop_front(I);
}
- assert(Name.startswith("_t") && "Malformed typedef!");
+ assert(Name.starts_with("_t") && "Malformed typedef!");
return T;
}
@@ -817,19 +825,19 @@ void Type::applyTypespec(bool &Quad) {
break;
case 'h':
Kind = Float;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case 's':
ElementBitwidth = 16;
break;
case 'f':
Kind = Float;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case 'i':
ElementBitwidth = 32;
break;
case 'd':
Kind = Float;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case 'l':
ElementBitwidth = 64;
break;
@@ -951,7 +959,7 @@ std::string Intrinsic::getInstTypeCode(Type T, ClassKind CK) const {
char typeCode = '\0';
bool printNumber = true;
- if (CK == ClassB)
+ if (CK == ClassB && TargetGuard == "")
return "";
if (T.isBFloat16())
@@ -975,7 +983,7 @@ std::string Intrinsic::getInstTypeCode(Type T, ClassKind CK) const {
break;
}
}
- if (CK == ClassB) {
+ if (CK == ClassB && TargetGuard == "") {
typeCode = '\0';
}
@@ -1077,7 +1085,7 @@ std::string Intrinsic::mangleName(std::string Name, ClassKind LocalCK) const {
S += "_" + getInstTypeCode(InBaseType, LocalCK);
}
- if (LocalCK == ClassB)
+ if (LocalCK == ClassB && TargetGuard == "")
S += "_v";
// Insert a 'q' before the first '_' character so that it ends up before
@@ -1137,10 +1145,14 @@ void Intrinsic::initVariables() {
}
void Intrinsic::emitPrototype(StringRef NamePrefix) {
- if (UseMacro)
+ if (UseMacro) {
OS << "#define ";
- else
- OS << "__ai " << Types[0].str() << " ";
+ } else {
+ OS << "__ai ";
+ if (TargetGuard != "")
+ OS << "__attribute__((target(\"" << TargetGuard << "\"))) ";
+ OS << Types[0].str() << " ";
+ }
OS << NamePrefix.str() << mangleName(Name, ClassS) << "(";
@@ -1229,6 +1241,15 @@ void Intrinsic::emitArgumentReversal() {
}
}
+void Intrinsic::emitReturnVarDecl() {
+ assert(RetVar.getType() == Types[0]);
+ // Create a return variable, if we're not void.
+ if (!RetVar.getType().isVoid()) {
+ OS << " " << RetVar.getType().str() << " " << RetVar.getName() << ";";
+ emitNewLine();
+ }
+}
+
void Intrinsic::emitReturnReversal() {
if (isBigEndianSafe())
return;
@@ -1271,9 +1292,8 @@ void Intrinsic::emitShadowedArgs() {
}
bool Intrinsic::protoHasScalar() const {
- return std::any_of(Types.begin(), Types.end(), [](const Type &T) {
- return T.isScalar() && !T.isImmediate();
- });
+ return llvm::any_of(
+ Types, [](const Type &T) { return T.isScalar() && !T.isImmediate(); });
}
void Intrinsic::emitBodyAsBuiltinCall() {
@@ -1308,7 +1328,7 @@ void Intrinsic::emitBodyAsBuiltinCall() {
if (LocalCK == ClassB) {
Type T2 = T;
T2.makeOneVector();
- T2.makeInteger(8, /*Signed=*/true);
+ T2.makeInteger(8, /*Sign=*/true);
Cast = "(" + T2.str() + ")";
}
@@ -1355,13 +1375,6 @@ void Intrinsic::emitBodyAsBuiltinCall() {
void Intrinsic::emitBody(StringRef CallPrefix) {
std::vector<std::string> Lines;
- assert(RetVar.getType() == Types[0]);
- // Create a return variable, if we're not void.
- if (!RetVar.getType().isVoid()) {
- OS << " " << RetVar.getType().str() << " " << RetVar.getName() << ";";
- emitNewLine();
- }
-
if (!Body || Body->getValues().empty()) {
// Nothing specific to output - must output a builtin.
emitBodyAsBuiltinCall();
@@ -1462,7 +1475,7 @@ Intrinsic::DagEmitter::emitDagCall(DagInit *DI, bool MatchMangledName) {
N = SI->getAsUnquotedString();
else
N = emitDagArg(DI->getArg(0), "").second;
- Optional<std::string> MangledName;
+ std::optional<std::string> MangledName;
if (MatchMangledName) {
if (Intr.getRecord()->getValueAsBit("isLaneQ"))
N += "q";
@@ -1475,7 +1488,7 @@ Intrinsic::DagEmitter::emitDagCall(DagInit *DI, bool MatchMangledName) {
Intr.Dependencies.insert(&Callee);
// Now create the call itself.
- std::string S = "";
+ std::string S;
if (!Callee.isBigEndianSafe())
S += CallPrefix.str();
S += Callee.getMangledName(true) + "(";
@@ -1641,12 +1654,12 @@ std::pair<Type, std::string> Intrinsic::DagEmitter::emitDagShuffle(DagInit *DI){
std::make_unique<Rev>(Arg1.first.getElementSizeInBits()));
ST.addExpander("MaskExpand",
std::make_unique<MaskExpander>(Arg1.first.getNumElements()));
- ST.evaluate(DI->getArg(2), Elts, None);
+ ST.evaluate(DI->getArg(2), Elts, std::nullopt);
std::string S = "__builtin_shufflevector(" + Arg1.second + ", " + Arg2.second;
for (auto &E : Elts) {
StringRef Name = E->getName();
- assert_with_loc(Name.startswith("sv"),
+ assert_with_loc(Name.starts_with("sv"),
"Incorrect element kind in shuffle mask!");
S += ", " + Name.drop_front(2).str();
}
@@ -1851,6 +1864,9 @@ void Intrinsic::generateImpl(bool ReverseArguments,
OS << " __attribute__((unavailable));";
} else {
emitOpeningBrace();
+ // Emit return variable declaration first as to not trigger
+ // -Wdeclaration-after-statement.
+ emitReturnVarDecl();
emitShadowedArgs();
if (ReverseArguments)
emitArgumentReversal();
@@ -1869,6 +1885,9 @@ void Intrinsic::indexBody() {
CurrentRecord = R;
initVariables();
+ // Emit return variable declaration first as to not trigger
+ // -Wdeclaration-after-statement.
+ emitReturnVarDecl();
emitBody("");
OS.str("");
@@ -1880,7 +1899,7 @@ void Intrinsic::indexBody() {
//===----------------------------------------------------------------------===//
Intrinsic &NeonEmitter::getIntrinsic(StringRef Name, ArrayRef<Type> Types,
- Optional<std::string> MangledName) {
+ std::optional<std::string> MangledName) {
// First, look up the name in the intrinsic map.
assert_with_loc(IntrinsicMap.find(Name.str()) != IntrinsicMap.end(),
("Intrinsic '" + Name + "' not found!").str());
@@ -1916,10 +1935,9 @@ Intrinsic &NeonEmitter::getIntrinsic(StringRef Name, ArrayRef<Type> Types,
continue;
unsigned ArgNum = 0;
- bool MatchingArgumentTypes =
- std::all_of(Types.begin(), Types.end(), [&](const auto &Type) {
- return Type == I.getParamType(ArgNum++);
- });
+ bool MatchingArgumentTypes = llvm::all_of(Types, [&](const auto &Type) {
+ return Type == I.getParamType(ArgNum++);
+ });
if (MatchingArgumentTypes)
GoodVec.push_back(&I);
@@ -1939,7 +1957,8 @@ void NeonEmitter::createIntrinsic(Record *R,
std::string Types = std::string(R->getValueAsString("Types"));
Record *OperationRec = R->getValueAsDef("Operation");
bool BigEndianSafe = R->getValueAsBit("BigEndianSafe");
- std::string Guard = std::string(R->getValueAsString("ArchGuard"));
+ std::string ArchGuard = std::string(R->getValueAsString("ArchGuard"));
+ std::string TargetGuard = std::string(R->getValueAsString("TargetGuard"));
bool IsUnavailable = OperationRec->getValueAsBit("Unavailable");
std::string CartesianProductWith = std::string(R->getValueAsString("CartesianProductWith"));
@@ -1981,7 +2000,7 @@ void NeonEmitter::createIntrinsic(Record *R,
for (auto &I : NewTypeSpecs) {
Entry.emplace_back(R, Name, Proto, I.first, I.second, CK, Body, *this,
- Guard, IsUnavailable, BigEndianSafe);
+ ArchGuard, TargetGuard, IsUnavailable, BigEndianSafe);
Out.push_back(&Entry.back());
}
@@ -1996,22 +2015,55 @@ void NeonEmitter::genBuiltinsDef(raw_ostream &OS,
// We only want to emit a builtin once, and we want to emit them in
// alphabetical order, so use a std::set.
- std::set<std::string> Builtins;
+ std::set<std::pair<std::string, std::string>> Builtins;
for (auto *Def : Defs) {
if (Def->hasBody())
continue;
- std::string S = "BUILTIN(__builtin_neon_" + Def->getMangledName() + ", \"";
-
+ std::string S = "__builtin_neon_" + Def->getMangledName() + ", \"";
S += Def->getBuiltinTypeStr();
- S += "\", \"n\")";
+ S += "\", \"n\"";
- Builtins.insert(S);
+ Builtins.emplace(S, Def->getTargetGuard());
+ }
+
+ for (auto &S : Builtins) {
+ if (S.second == "")
+ OS << "BUILTIN(";
+ else
+ OS << "TARGET_BUILTIN(";
+ OS << S.first;
+ if (S.second == "")
+ OS << ")\n";
+ else
+ OS << ", \"" << S.second << "\")\n";
}
- for (auto &S : Builtins)
- OS << S << "\n";
+ OS << "#endif\n\n";
+}
+
+void NeonEmitter::genStreamingSVECompatibleList(
+ raw_ostream &OS, SmallVectorImpl<Intrinsic *> &Defs) {
+ OS << "#ifdef GET_NEON_STREAMING_COMPAT_FLAG\n";
+
+ std::set<std::string> Emitted;
+ for (auto *Def : Defs) {
+ // If the def has a body (that is, it has Operation DAGs), it won't call
+ // __builtin_neon_* so we don't need to generate a definition for it.
+ if (Def->hasBody())
+ continue;
+
+ std::string Name = Def->getMangledName();
+ if (Emitted.find(Name) != Emitted.end())
+ continue;
+
+ // FIXME: We should make exceptions here for some NEON builtins that are
+ // permitted in streaming mode.
+ OS << "case NEON::BI__builtin_neon_" << Name
+ << ": BuiltinType = ArmNonStreaming; break;\n";
+ Emitted.insert(Name);
+ }
OS << "#endif\n\n";
}
@@ -2025,10 +2077,10 @@ void NeonEmitter::genOverloadTypeCheckCode(raw_ostream &OS,
// definitions may extend the number of permitted types (i.e. augment the
// Mask). Use std::map to avoid sorting the table by hash number.
struct OverloadInfo {
- uint64_t Mask;
- int PtrArgNum;
- bool HasConstPtr;
- OverloadInfo() : Mask(0ULL), PtrArgNum(0), HasConstPtr(false) {}
+ uint64_t Mask = 0ULL;
+ int PtrArgNum = 0;
+ bool HasConstPtr = false;
+ OverloadInfo() = default;
};
std::map<std::string, OverloadInfo> OverloadMap;
@@ -2062,12 +2114,13 @@ void NeonEmitter::genOverloadTypeCheckCode(raw_ostream &OS,
std::string Name = Def->getName();
// Omit type checking for the pointer arguments of vld1_lane, vld1_dup,
- // and vst1_lane intrinsics. Using a pointer to the vector element
- // type with one of those operations causes codegen to select an aligned
- // load/store instruction. If you want an unaligned operation,
- // the pointer argument needs to have less alignment than element type,
- // so just accept any pointer type.
- if (Name == "vld1_lane" || Name == "vld1_dup" || Name == "vst1_lane") {
+ // vst1_lane, vldap1_lane, and vstl1_lane intrinsics. Using a pointer to
+ // the vector element type with one of those operations causes codegen to
+ // select an aligned load/store instruction. If you want an unaligned
+ // operation, the pointer argument needs to have less alignment than element
+ // type, so just accept any pointer type.
+ if (Name == "vld1_lane" || Name == "vld1_dup" || Name == "vst1_lane" ||
+ Name == "vldap1_lane" || Name == "vstl1_lane") {
PtrArgNum = -1;
HasConstPtr = false;
}
@@ -2197,6 +2250,8 @@ void NeonEmitter::runHeader(raw_ostream &OS) {
// Generate ARM overloaded type checking code for SemaChecking.cpp
genOverloadTypeCheckCode(OS, Defs);
+ genStreamingSVECompatibleList(OS, Defs);
+
// Generate ARM range checking code for shift/lane immediates.
genIntrinsicRangeCheckCode(OS, Defs);
}
@@ -2328,18 +2383,9 @@ void NeonEmitter::run(raw_ostream &OS) {
OS << "#include <stdint.h>\n\n";
- OS << "#ifdef __ARM_FEATURE_BF16\n";
OS << "#include <arm_bf16.h>\n";
- OS << "typedef __bf16 bfloat16_t;\n";
- OS << "#endif\n\n";
- // Emit NEON-specific scalar typedefs.
- OS << "typedef float float32_t;\n";
- OS << "typedef __fp16 float16_t;\n";
-
- OS << "#ifdef __aarch64__\n";
- OS << "typedef double float64_t;\n";
- OS << "#endif\n\n";
+ OS << "#include <arm_vector_types.h>\n";
// For now, signedness of polynomial types depends on target
OS << "#ifdef __aarch64__\n";
@@ -2352,12 +2398,7 @@ void NeonEmitter::run(raw_ostream &OS) {
OS << "typedef int16_t poly16_t;\n";
OS << "typedef int64_t poly64_t;\n";
OS << "#endif\n";
-
- emitNeonTypeDefs("cQcsQsiQilQlUcQUcUsQUsUiQUiUlQUlhQhfQfdQdPcQPcPsQPsPlQPl", OS);
-
- OS << "#ifdef __ARM_FEATURE_BF16\n";
- emitNeonTypeDefs("bQb", OS);
- OS << "#endif\n\n";
+ emitNeonTypeDefs("PcQPcPsQPsPlQPl", OS);
OS << "#define __ai static __inline__ __attribute__((__always_inline__, "
"__nodebug__))\n\n";
@@ -2393,10 +2434,10 @@ void NeonEmitter::run(raw_ostream &OS) {
}
// Emit #endif/#if pair if needed.
- if ((*I)->getGuard() != InGuard) {
+ if ((*I)->getArchGuard() != InGuard) {
if (!InGuard.empty())
OS << "#endif\n";
- InGuard = (*I)->getGuard();
+ InGuard = (*I)->getArchGuard();
if (!InGuard.empty())
OS << "#if " << InGuard << "\n";
}
@@ -2502,10 +2543,10 @@ void NeonEmitter::runFP16(raw_ostream &OS) {
}
// Emit #endif/#if pair if needed.
- if ((*I)->getGuard() != InGuard) {
+ if ((*I)->getArchGuard() != InGuard) {
if (!InGuard.empty())
OS << "#endif\n";
- InGuard = (*I)->getGuard();
+ InGuard = (*I)->getArchGuard();
if (!InGuard.empty())
OS << "#if " << InGuard << "\n";
}
@@ -2526,6 +2567,38 @@ void NeonEmitter::runFP16(raw_ostream &OS) {
OS << "#endif /* __ARM_FP16_H */\n";
}
+void NeonEmitter::runVectorTypes(raw_ostream &OS) {
+ OS << "/*===---- arm_vector_types - ARM vector type "
+ "------===\n"
+ " *\n"
+ " *\n"
+ " * Part of the LLVM Project, under the Apache License v2.0 with LLVM "
+ "Exceptions.\n"
+ " * See https://llvm.org/LICENSE.txt for license information.\n"
+ " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n"
+ " *\n"
+ " *===-----------------------------------------------------------------"
+ "------===\n"
+ " */\n\n";
+ OS << "#if !defined(__ARM_NEON_H) && !defined(__ARM_SVE_H)\n";
+ OS << "#error \"This file should not be used standalone. Please include"
+ " arm_neon.h or arm_sve.h instead\"\n\n";
+ OS << "#endif\n";
+ OS << "#ifndef __ARM_NEON_TYPES_H\n";
+ OS << "#define __ARM_NEON_TYPES_H\n";
+ OS << "typedef float float32_t;\n";
+ OS << "typedef __fp16 float16_t;\n";
+
+ OS << "#ifdef __aarch64__\n";
+ OS << "typedef double float64_t;\n";
+ OS << "#endif\n\n";
+
+ emitNeonTypeDefs("cQcsQsiQilQlUcQUcUsQUsUiQUiUlQUlhQhfQfdQd", OS);
+
+ emitNeonTypeDefs("bQb", OS);
+ OS << "#endif // __ARM_NEON_TYPES_H\n";
+}
+
void NeonEmitter::runBF16(raw_ostream &OS) {
OS << "/*===---- arm_bf16.h - ARM BF16 intrinsics "
"-----------------------------------===\n"
@@ -2579,10 +2652,10 @@ void NeonEmitter::runBF16(raw_ostream &OS) {
}
// Emit #endif/#if pair if needed.
- if ((*I)->getGuard() != InGuard) {
+ if ((*I)->getArchGuard() != InGuard) {
if (!InGuard.empty())
OS << "#endif\n";
- InGuard = (*I)->getGuard();
+ InGuard = (*I)->getArchGuard();
if (!InGuard.empty())
OS << "#if " << InGuard << "\n";
}
@@ -2620,6 +2693,10 @@ void clang::EmitNeonSema(RecordKeeper &Records, raw_ostream &OS) {
NeonEmitter(Records).runHeader(OS);
}
+void clang::EmitVectorTypes(RecordKeeper &Records, raw_ostream &OS) {
+ NeonEmitter(Records).runVectorTypes(OS);
+}
+
void clang::EmitNeonTest(RecordKeeper &Records, raw_ostream &OS) {
llvm_unreachable("Neon test generation no longer implemented!");
}
diff --git a/contrib/llvm-project/clang/utils/TableGen/RISCVVEmitter.cpp b/contrib/llvm-project/clang/utils/TableGen/RISCVVEmitter.cpp
index 24f2250c9ae0..2ca47f1ba59f 100644
--- a/contrib/llvm-project/clang/utils/TableGen/RISCVVEmitter.cpp
+++ b/contrib/llvm-project/clang/utils/TableGen/RISCVVEmitter.cpp
@@ -14,205 +14,87 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/Support/RISCVVIntrinsicUtils.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringSet.h"
+#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Twine.h"
#include "llvm/TableGen/Error.h"
#include "llvm/TableGen/Record.h"
#include <numeric>
+#include <optional>
using namespace llvm;
-using BasicType = char;
-using VScaleVal = Optional<unsigned>;
+using namespace clang::RISCV;
namespace {
+struct SemaRecord {
+ // Intrinsic name, e.g. vadd_vv
+ std::string Name;
-// Exponential LMUL
-struct LMULType {
- int Log2LMUL;
- LMULType(int Log2LMUL);
- // Return the C/C++ string representation of LMUL
- std::string str() const;
- Optional<unsigned> getScale(unsigned ElementBitwidth) const;
- void MulLog2LMUL(int Log2LMUL);
- LMULType &operator*=(uint32_t RHS);
-};
+ // Overloaded intrinsic name, could be empty if can be computed from Name
+ // e.g. vadd
+ std::string OverloadedName;
-// This class is compact representation of a valid and invalid RVVType.
-class RVVType {
- enum ScalarTypeKind : uint32_t {
- Void,
- Size_t,
- Ptrdiff_t,
- UnsignedLong,
- SignedLong,
- Boolean,
- SignedInteger,
- UnsignedInteger,
- Float,
- Invalid,
- };
- BasicType BT;
- ScalarTypeKind ScalarType = Invalid;
- LMULType LMUL;
- bool IsPointer = false;
- // IsConstant indices are "int", but have the constant expression.
- bool IsImmediate = false;
- // Const qualifier for pointer to const object or object of const type.
- bool IsConstant = false;
- unsigned ElementBitwidth = 0;
- VScaleVal Scale = 0;
- bool Valid;
-
- std::string BuiltinStr;
- std::string ClangBuiltinStr;
- std::string Str;
- std::string ShortStr;
+ // Supported type, mask of BasicType.
+ unsigned TypeRangeMask;
-public:
- RVVType() : RVVType(BasicType(), 0, StringRef()) {}
- RVVType(BasicType BT, int Log2LMUL, StringRef prototype);
-
- // Return the string representation of a type, which is an encoded string for
- // passing to the BUILTIN() macro in Builtins.def.
- const std::string &getBuiltinStr() const { return BuiltinStr; }
-
- // Return the clang buitlin type for RVV vector type which are used in the
- // riscv_vector.h header file.
- const std::string &getClangBuiltinStr() const { return ClangBuiltinStr; }
-
- // Return the C/C++ string representation of a type for use in the
- // riscv_vector.h header file.
- const std::string &getTypeStr() const { return Str; }
-
- // Return the short name of a type for C/C++ name suffix.
- const std::string &getShortStr() {
- // Not all types are used in short name, so compute the short name by
- // demanded.
- if (ShortStr.empty())
- initShortStr();
- return ShortStr;
- }
+ // Supported LMUL.
+ unsigned Log2LMULMask;
- bool isValid() const { return Valid; }
- bool isScalar() const { return Scale.hasValue() && Scale.getValue() == 0; }
- bool isVector() const { return Scale.hasValue() && Scale.getValue() != 0; }
- bool isFloat() const { return ScalarType == ScalarTypeKind::Float; }
- bool isSignedInteger() const {
- return ScalarType == ScalarTypeKind::SignedInteger;
- }
- bool isFloatVector(unsigned Width) const {
- return isVector() && isFloat() && ElementBitwidth == Width;
- }
- bool isFloat(unsigned Width) const {
- return isFloat() && ElementBitwidth == Width;
- }
+ // Required extensions for this intrinsic.
+ uint32_t RequiredExtensions;
-private:
- // Verify RVV vector type and set Valid.
- bool verifyType() const;
-
- // Creates a type based on basic types of TypeRange
- void applyBasicType();
-
- // Applies a prototype modifier to the current type. The result maybe an
- // invalid type.
- void applyModifier(StringRef prototype);
-
- // Compute and record a string for legal type.
- void initBuiltinStr();
- // Compute and record a builtin RVV vector type string.
- void initClangBuiltinStr();
- // Compute and record a type string for used in the header.
- void initTypeStr();
- // Compute and record a short name of a type for C/C++ name suffix.
- void initShortStr();
-};
+ // Prototype for this intrinsic.
+ SmallVector<PrototypeDescriptor> Prototype;
-using RVVTypePtr = RVVType *;
-using RVVTypes = std::vector<RVVTypePtr>;
+ // Suffix of intrinsic name.
+ SmallVector<PrototypeDescriptor> Suffix;
-enum RISCVExtension : uint8_t {
- Basic = 0,
- F = 1 << 1,
- D = 1 << 2,
- Zfh = 1 << 3,
- Zvamo = 1 << 4,
- Zvlsseg = 1 << 5,
-};
+ // Suffix of overloaded intrinsic name.
+ SmallVector<PrototypeDescriptor> OverloadedSuffix;
-// TODO refactor RVVIntrinsic class design after support all intrinsic
-// combination. This represents an instantiation of an intrinsic with a
-// particular type and prototype
-class RVVIntrinsic {
+ // Number of field, large than 1 if it's segment load/store.
+ unsigned NF;
+ bool HasMasked :1;
+ bool HasVL :1;
+ bool HasMaskedOffOperand :1;
+ bool HasTailPolicy : 1;
+ bool HasMaskPolicy : 1;
+ bool HasFRMRoundModeOp : 1;
+ bool IsTuple : 1;
+ uint8_t UnMaskedPolicyScheme : 2;
+ uint8_t MaskedPolicyScheme : 2;
+};
+
+// Compressed function signature table.
+class SemaSignatureTable {
private:
- std::string Name; // Builtin name
- std::string MangledName;
- std::string IRName;
- bool HasSideEffects;
- bool IsMask;
- bool HasMaskedOffOperand;
- bool HasVL;
- bool HasNoMaskedOverloaded;
- bool HasAutoDef; // There is automiatic definition in header
- std::string ManualCodegen;
- RVVTypePtr OutputType; // Builtin output type
- RVVTypes InputTypes; // Builtin input types
- // The types we use to obtain the specific LLVM intrinsic. They are index of
- // InputTypes. -1 means the return type.
- std::vector<int64_t> IntrinsicTypes;
- uint8_t RISCVExtensions = 0;
- unsigned NF = 1;
+ std::vector<PrototypeDescriptor> SignatureTable;
+
+ void insert(ArrayRef<PrototypeDescriptor> Signature);
public:
- RVVIntrinsic(StringRef Name, StringRef Suffix, StringRef MangledName,
- StringRef MangledSuffix, StringRef IRName, bool HasSideEffects,
- bool IsMask, bool HasMaskedOffOperand, bool HasVL,
- bool HasNoMaskedOverloaded, bool HasAutoDef,
- StringRef ManualCodegen, const RVVTypes &Types,
- const std::vector<int64_t> &IntrinsicTypes,
- StringRef RequiredExtension, unsigned NF);
- ~RVVIntrinsic() = default;
-
- StringRef getName() const { return Name; }
- StringRef getMangledName() const { return MangledName; }
- bool hasSideEffects() const { return HasSideEffects; }
- bool hasMaskedOffOperand() const { return HasMaskedOffOperand; }
- bool hasVL() const { return HasVL; }
- bool hasNoMaskedOverloaded() const { return HasNoMaskedOverloaded; }
- bool hasManualCodegen() const { return !ManualCodegen.empty(); }
- bool hasAutoDef() const { return HasAutoDef; }
- bool isMask() const { return IsMask; }
- StringRef getIRName() const { return IRName; }
- StringRef getManualCodegen() const { return ManualCodegen; }
- uint8_t getRISCVExtensions() const { return RISCVExtensions; }
- unsigned getNF() const { return NF; }
-
- // Return the type string for a BUILTIN() macro in Builtins.def.
- std::string getBuiltinTypeStr() const;
-
- // Emit the code block for switch body in EmitRISCVBuiltinExpr, it should
- // init the RVVIntrinsic ID and IntrinsicTypes.
- void emitCodeGenSwitchBody(raw_ostream &o) const;
-
- // Emit the macros for mapping C/C++ intrinsic function to builtin functions.
- void emitIntrinsicMacro(raw_ostream &o) const;
-
- // Emit the mangled function definition.
- void emitMangledFuncDef(raw_ostream &o) const;
+ static constexpr unsigned INVALID_INDEX = ~0U;
+
+ // Create compressed signature table from SemaRecords.
+ void init(ArrayRef<SemaRecord> SemaRecords);
+
+ // Query the Signature, return INVALID_INDEX if not found.
+ unsigned getIndex(ArrayRef<PrototypeDescriptor> Signature);
+
+ /// Print signature table in RVVHeader Record to \p OS
+ void print(raw_ostream &OS);
};
class RVVEmitter {
private:
RecordKeeper &Records;
- std::string HeaderCode;
- // Concat BasicType, LMUL and Proto as key
- StringMap<RVVType> LegalTypes;
- StringSet<> IllegalTypes;
+ RVVTypeCache TypeCache;
public:
RVVEmitter(RecordKeeper &R) : Records(R) {}
@@ -226,619 +108,113 @@ public:
/// Emit all the information needed to map builtin -> LLVM IR intrinsic.
void createCodeGen(raw_ostream &o);
- std::string getSuffixStr(char Type, int Log2LMUL, StringRef Prototypes);
+ /// Emit all the information needed by SemaRISCVVectorLookup.cpp.
+ /// We've large number of intrinsic function for RVV, creating a customized
+ /// could speed up the compilation time.
+ void createSema(raw_ostream &o);
private:
- /// Create all intrinsics and add them to \p Out
- void createRVVIntrinsics(std::vector<std::unique_ptr<RVVIntrinsic>> &Out);
- /// Compute output and input types by applying different config (basic type
- /// and LMUL with type transformers). It also record result of type in legal
- /// or illegal set to avoid compute the same config again. The result maybe
- /// have illegal RVVType.
- Optional<RVVTypes> computeTypes(BasicType BT, int Log2LMUL, unsigned NF,
- ArrayRef<std::string> PrototypeSeq);
- Optional<RVVTypePtr> computeType(BasicType BT, int Log2LMUL, StringRef Proto);
-
- /// Emit Acrh predecessor definitions and body, assume the element of Defs are
- /// sorted by extension.
- void emitArchMacroAndBody(
- std::vector<std::unique_ptr<RVVIntrinsic>> &Defs, raw_ostream &o,
- std::function<void(raw_ostream &, const RVVIntrinsic &)>);
-
- // Emit the architecture preprocessor definitions. Return true when emits
- // non-empty string.
- bool emitExtDefStr(uint8_t Extensions, raw_ostream &o);
- // Slice Prototypes string into sub prototype string and process each sub
- // prototype string individually in the Handler.
- void parsePrototypes(StringRef Prototypes,
- std::function<void(StringRef)> Handler);
+ /// Create all intrinsics and add them to \p Out and SemaRecords.
+ void createRVVIntrinsics(std::vector<std::unique_ptr<RVVIntrinsic>> &Out,
+ std::vector<SemaRecord> *SemaRecords = nullptr);
+ /// Create all intrinsic records and SemaSignatureTable from SemaRecords.
+ void createRVVIntrinsicRecords(std::vector<RVVIntrinsicRecord> &Out,
+ SemaSignatureTable &SST,
+ ArrayRef<SemaRecord> SemaRecords);
+
+ /// Print HeaderCode in RVVHeader Record to \p Out
+ void printHeaderCode(raw_ostream &OS);
};
} // namespace
-//===----------------------------------------------------------------------===//
-// Type implementation
-//===----------------------------------------------------------------------===//
-
-LMULType::LMULType(int NewLog2LMUL) {
- // Check Log2LMUL is -3, -2, -1, 0, 1, 2, 3
- assert(NewLog2LMUL <= 3 && NewLog2LMUL >= -3 && "Bad LMUL number!");
- Log2LMUL = NewLog2LMUL;
-}
-
-std::string LMULType::str() const {
- if (Log2LMUL < 0)
- return "mf" + utostr(1ULL << (-Log2LMUL));
- return "m" + utostr(1ULL << Log2LMUL);
-}
-
-VScaleVal LMULType::getScale(unsigned ElementBitwidth) const {
- int Log2ScaleResult = 0;
- switch (ElementBitwidth) {
- default:
- break;
- case 8:
- Log2ScaleResult = Log2LMUL + 3;
- break;
- case 16:
- Log2ScaleResult = Log2LMUL + 2;
- break;
- case 32:
- Log2ScaleResult = Log2LMUL + 1;
- break;
- case 64:
- Log2ScaleResult = Log2LMUL;
- break;
- }
- // Illegal vscale result would be less than 1
- if (Log2ScaleResult < 0)
- return None;
- return 1 << Log2ScaleResult;
-}
-
-void LMULType::MulLog2LMUL(int log2LMUL) { Log2LMUL += log2LMUL; }
-
-LMULType &LMULType::operator*=(uint32_t RHS) {
- assert(isPowerOf2_32(RHS));
- this->Log2LMUL = this->Log2LMUL + Log2_32(RHS);
- return *this;
-}
-
-RVVType::RVVType(BasicType BT, int Log2LMUL, StringRef prototype)
- : BT(BT), LMUL(LMULType(Log2LMUL)) {
- applyBasicType();
- applyModifier(prototype);
- Valid = verifyType();
- if (Valid) {
- initBuiltinStr();
- initTypeStr();
- if (isVector()) {
- initClangBuiltinStr();
- }
- }
-}
-
-// clang-format off
-// boolean type are encoded the ratio of n (SEW/LMUL)
-// SEW/LMUL | 1 | 2 | 4 | 8 | 16 | 32 | 64
-// c type | vbool64_t | vbool32_t | vbool16_t | vbool8_t | vbool4_t | vbool2_t | vbool1_t
-// IR type | nxv1i1 | nxv2i1 | nxv4i1 | nxv8i1 | nxv16i1 | nxv32i1 | nxv64i1
-
-// type\lmul | 1/8 | 1/4 | 1/2 | 1 | 2 | 4 | 8
-// -------- |------ | -------- | ------- | ------- | -------- | -------- | --------
-// i64 | N/A | N/A | N/A | nxv1i64 | nxv2i64 | nxv4i64 | nxv8i64
-// i32 | N/A | N/A | nxv1i32 | nxv2i32 | nxv4i32 | nxv8i32 | nxv16i32
-// i16 | N/A | nxv1i16 | nxv2i16 | nxv4i16 | nxv8i16 | nxv16i16 | nxv32i16
-// i8 | nxv1i8 | nxv2i8 | nxv4i8 | nxv8i8 | nxv16i8 | nxv32i8 | nxv64i8
-// double | N/A | N/A | N/A | nxv1f64 | nxv2f64 | nxv4f64 | nxv8f64
-// float | N/A | N/A | nxv1f32 | nxv2f32 | nxv4f32 | nxv8f32 | nxv16f32
-// half | N/A | nxv1f16 | nxv2f16 | nxv4f16 | nxv8f16 | nxv16f16 | nxv32f16
-// clang-format on
-
-bool RVVType::verifyType() const {
- if (ScalarType == Invalid)
- return false;
- if (isScalar())
- return true;
- if (!Scale.hasValue())
- return false;
- if (isFloat() && ElementBitwidth == 8)
- return false;
- unsigned V = Scale.getValue();
- switch (ElementBitwidth) {
- case 1:
- case 8:
- // Check Scale is 1,2,4,8,16,32,64
- return (V <= 64 && isPowerOf2_32(V));
- case 16:
- // Check Scale is 1,2,4,8,16,32
- return (V <= 32 && isPowerOf2_32(V));
- case 32:
- // Check Scale is 1,2,4,8,16
- return (V <= 16 && isPowerOf2_32(V));
- case 64:
- // Check Scale is 1,2,4,8
- return (V <= 8 && isPowerOf2_32(V));
- }
- return false;
-}
-
-void RVVType::initBuiltinStr() {
- assert(isValid() && "RVVType is invalid");
- switch (ScalarType) {
- case ScalarTypeKind::Void:
- BuiltinStr = "v";
- return;
- case ScalarTypeKind::Size_t:
- BuiltinStr = "z";
- if (IsImmediate)
- BuiltinStr = "I" + BuiltinStr;
- if (IsPointer)
- BuiltinStr += "*";
- return;
- case ScalarTypeKind::Ptrdiff_t:
- BuiltinStr = "Y";
- return;
- case ScalarTypeKind::UnsignedLong:
- BuiltinStr = "ULi";
- return;
- case ScalarTypeKind::SignedLong:
- BuiltinStr = "Li";
- return;
- case ScalarTypeKind::Boolean:
- assert(ElementBitwidth == 1);
- BuiltinStr += "b";
- break;
- case ScalarTypeKind::SignedInteger:
- case ScalarTypeKind::UnsignedInteger:
- switch (ElementBitwidth) {
- case 8:
- BuiltinStr += "c";
- break;
- case 16:
- BuiltinStr += "s";
- break;
- case 32:
- BuiltinStr += "i";
- break;
- case 64:
- BuiltinStr += "Wi";
- break;
- default:
- llvm_unreachable("Unhandled ElementBitwidth!");
- }
- if (isSignedInteger())
- BuiltinStr = "S" + BuiltinStr;
- else
- BuiltinStr = "U" + BuiltinStr;
- break;
- case ScalarTypeKind::Float:
- switch (ElementBitwidth) {
- case 16:
- BuiltinStr += "x";
- break;
- case 32:
- BuiltinStr += "f";
- break;
- case 64:
- BuiltinStr += "d";
- break;
- default:
- llvm_unreachable("Unhandled ElementBitwidth!");
- }
- break;
- default:
- llvm_unreachable("ScalarType is invalid!");
- }
- if (IsImmediate)
- BuiltinStr = "I" + BuiltinStr;
- if (isScalar()) {
- if (IsConstant)
- BuiltinStr += "C";
- if (IsPointer)
- BuiltinStr += "*";
- return;
- }
- BuiltinStr = "q" + utostr(Scale.getValue()) + BuiltinStr;
- // Pointer to vector types. Defined for Zvlsseg load intrinsics.
- // Zvlsseg load intrinsics have pointer type arguments to store the loaded
- // vector values.
- if (IsPointer)
- BuiltinStr += "*";
-}
-
-void RVVType::initClangBuiltinStr() {
- assert(isValid() && "RVVType is invalid");
- assert(isVector() && "Handle Vector type only");
-
- ClangBuiltinStr = "__rvv_";
- switch (ScalarType) {
- case ScalarTypeKind::Boolean:
- ClangBuiltinStr += "bool" + utostr(64 / Scale.getValue()) + "_t";
- return;
- case ScalarTypeKind::Float:
- ClangBuiltinStr += "float";
- break;
- case ScalarTypeKind::SignedInteger:
- ClangBuiltinStr += "int";
- break;
- case ScalarTypeKind::UnsignedInteger:
- ClangBuiltinStr += "uint";
- break;
- default:
- llvm_unreachable("ScalarTypeKind is invalid");
- }
- ClangBuiltinStr += utostr(ElementBitwidth) + LMUL.str() + "_t";
-}
-
-void RVVType::initTypeStr() {
- assert(isValid() && "RVVType is invalid");
-
- if (IsConstant)
- Str += "const ";
-
- auto getTypeString = [&](StringRef TypeStr) {
- if (isScalar())
- return Twine(TypeStr + Twine(ElementBitwidth) + "_t").str();
- return Twine("v" + TypeStr + Twine(ElementBitwidth) + LMUL.str() + "_t")
- .str();
- };
-
- switch (ScalarType) {
- case ScalarTypeKind::Void:
- Str = "void";
- return;
- case ScalarTypeKind::Size_t:
- Str = "size_t";
- if (IsPointer)
- Str += " *";
- return;
- case ScalarTypeKind::Ptrdiff_t:
- Str = "ptrdiff_t";
- return;
- case ScalarTypeKind::UnsignedLong:
- Str = "unsigned long";
- return;
- case ScalarTypeKind::SignedLong:
- Str = "long";
- return;
- case ScalarTypeKind::Boolean:
- if (isScalar())
- Str += "bool";
- else
- // Vector bool is special case, the formulate is
- // `vbool<N>_t = MVT::nxv<64/N>i1` ex. vbool16_t = MVT::4i1
- Str += "vbool" + utostr(64 / Scale.getValue()) + "_t";
- break;
- case ScalarTypeKind::Float:
- if (isScalar()) {
- if (ElementBitwidth == 64)
- Str += "double";
- else if (ElementBitwidth == 32)
- Str += "float";
- else if (ElementBitwidth == 16)
- Str += "_Float16";
- else
- llvm_unreachable("Unhandled floating type.");
- } else
- Str += getTypeString("float");
- break;
- case ScalarTypeKind::SignedInteger:
- Str += getTypeString("int");
- break;
- case ScalarTypeKind::UnsignedInteger:
- Str += getTypeString("uint");
- break;
- default:
- llvm_unreachable("ScalarType is invalid!");
- }
- if (IsPointer)
- Str += " *";
-}
-
-void RVVType::initShortStr() {
- switch (ScalarType) {
- case ScalarTypeKind::Boolean:
- assert(isVector());
- ShortStr = "b" + utostr(64 / Scale.getValue());
- return;
- case ScalarTypeKind::Float:
- ShortStr = "f" + utostr(ElementBitwidth);
- break;
- case ScalarTypeKind::SignedInteger:
- ShortStr = "i" + utostr(ElementBitwidth);
- break;
- case ScalarTypeKind::UnsignedInteger:
- ShortStr = "u" + utostr(ElementBitwidth);
- break;
- default:
- PrintFatalError("Unhandled case!");
- }
- if (isVector())
- ShortStr += LMUL.str();
-}
-
-void RVVType::applyBasicType() {
- switch (BT) {
+static BasicType ParseBasicType(char c) {
+ switch (c) {
case 'c':
- ElementBitwidth = 8;
- ScalarType = ScalarTypeKind::SignedInteger;
+ return BasicType::Int8;
break;
case 's':
- ElementBitwidth = 16;
- ScalarType = ScalarTypeKind::SignedInteger;
+ return BasicType::Int16;
break;
case 'i':
- ElementBitwidth = 32;
- ScalarType = ScalarTypeKind::SignedInteger;
+ return BasicType::Int32;
break;
case 'l':
- ElementBitwidth = 64;
- ScalarType = ScalarTypeKind::SignedInteger;
+ return BasicType::Int64;
break;
case 'x':
- ElementBitwidth = 16;
- ScalarType = ScalarTypeKind::Float;
+ return BasicType::Float16;
break;
case 'f':
- ElementBitwidth = 32;
- ScalarType = ScalarTypeKind::Float;
+ return BasicType::Float32;
break;
case 'd':
- ElementBitwidth = 64;
- ScalarType = ScalarTypeKind::Float;
- break;
- default:
- PrintFatalError("Unhandled type code!");
- }
- assert(ElementBitwidth != 0 && "Bad element bitwidth!");
-}
-
-void RVVType::applyModifier(StringRef Transformer) {
- if (Transformer.empty())
- return;
- // Handle primitive type transformer
- auto PType = Transformer.back();
- switch (PType) {
- case 'e':
- Scale = 0;
- break;
- case 'v':
- Scale = LMUL.getScale(ElementBitwidth);
- break;
- case 'w':
- ElementBitwidth *= 2;
- LMUL *= 2;
- Scale = LMUL.getScale(ElementBitwidth);
- break;
- case 'q':
- ElementBitwidth *= 4;
- LMUL *= 4;
- Scale = LMUL.getScale(ElementBitwidth);
- break;
- case 'o':
- ElementBitwidth *= 8;
- LMUL *= 8;
- Scale = LMUL.getScale(ElementBitwidth);
- break;
- case 'm':
- ScalarType = ScalarTypeKind::Boolean;
- Scale = LMUL.getScale(ElementBitwidth);
- ElementBitwidth = 1;
- break;
- case '0':
- ScalarType = ScalarTypeKind::Void;
- break;
- case 'z':
- ScalarType = ScalarTypeKind::Size_t;
- break;
- case 't':
- ScalarType = ScalarTypeKind::Ptrdiff_t;
+ return BasicType::Float64;
break;
- case 'u':
- ScalarType = ScalarTypeKind::UnsignedLong;
- break;
- case 'l':
- ScalarType = ScalarTypeKind::SignedLong;
+ case 'y':
+ return BasicType::BFloat16;
break;
default:
- PrintFatalError("Illegal primitive type transformers!");
- }
- Transformer = Transformer.drop_back();
-
- // Extract and compute complex type transformer. It can only appear one time.
- if (Transformer.startswith("(")) {
- size_t Idx = Transformer.find(')');
- assert(Idx != StringRef::npos);
- StringRef ComplexType = Transformer.slice(1, Idx);
- Transformer = Transformer.drop_front(Idx + 1);
- assert(Transformer.find('(') == StringRef::npos &&
- "Only allow one complex type transformer");
-
- auto UpdateAndCheckComplexProto = [&]() {
- Scale = LMUL.getScale(ElementBitwidth);
- const StringRef VectorPrototypes("vwqom");
- if (!VectorPrototypes.contains(PType))
- PrintFatalError("Complex type transformer only supports vector type!");
- if (Transformer.find_first_of("PCKWS") != StringRef::npos)
- PrintFatalError(
- "Illegal type transformer for Complex type transformer");
- };
- auto ComputeFixedLog2LMUL =
- [&](StringRef Value,
- std::function<bool(const int32_t &, const int32_t &)> Compare) {
- int32_t Log2LMUL;
- Value.getAsInteger(10, Log2LMUL);
- if (!Compare(Log2LMUL, LMUL.Log2LMUL)) {
- ScalarType = Invalid;
- return false;
- }
- // Update new LMUL
- LMUL = LMULType(Log2LMUL);
- UpdateAndCheckComplexProto();
- return true;
- };
- auto ComplexTT = ComplexType.split(":");
- if (ComplexTT.first == "Log2EEW") {
- uint32_t Log2EEW;
- ComplexTT.second.getAsInteger(10, Log2EEW);
- // update new elmul = (eew/sew) * lmul
- LMUL.MulLog2LMUL(Log2EEW - Log2_32(ElementBitwidth));
- // update new eew
- ElementBitwidth = 1 << Log2EEW;
- ScalarType = ScalarTypeKind::SignedInteger;
- UpdateAndCheckComplexProto();
- } else if (ComplexTT.first == "FixedSEW") {
- uint32_t NewSEW;
- ComplexTT.second.getAsInteger(10, NewSEW);
- // Set invalid type if src and dst SEW are same.
- if (ElementBitwidth == NewSEW) {
- ScalarType = Invalid;
- return;
- }
- // Update new SEW
- ElementBitwidth = NewSEW;
- UpdateAndCheckComplexProto();
- } else if (ComplexTT.first == "LFixedLog2LMUL") {
- // New LMUL should be larger than old
- if (!ComputeFixedLog2LMUL(ComplexTT.second, std::greater<int32_t>()))
- return;
- } else if (ComplexTT.first == "SFixedLog2LMUL") {
- // New LMUL should be smaller than old
- if (!ComputeFixedLog2LMUL(ComplexTT.second, std::less<int32_t>()))
- return;
- } else {
- PrintFatalError("Illegal complex type transformers!");
- }
- }
-
- // Compute the remain type transformers
- for (char I : Transformer) {
- switch (I) {
- case 'P':
- if (IsConstant)
- PrintFatalError("'P' transformer cannot be used after 'C'");
- if (IsPointer)
- PrintFatalError("'P' transformer cannot be used twice");
- IsPointer = true;
- break;
- case 'C':
- if (IsConstant)
- PrintFatalError("'C' transformer cannot be used twice");
- IsConstant = true;
- break;
- case 'K':
- IsImmediate = true;
- break;
- case 'U':
- ScalarType = ScalarTypeKind::UnsignedInteger;
- break;
- case 'I':
- ScalarType = ScalarTypeKind::SignedInteger;
- break;
- case 'F':
- ScalarType = ScalarTypeKind::Float;
- break;
- case 'S':
- LMUL = LMULType(0);
- // Update ElementBitwidth need to update Scale too.
- Scale = LMUL.getScale(ElementBitwidth);
- break;
- default:
- PrintFatalError("Illegal non-primitive type transformer!");
- }
+ return BasicType::Unknown;
}
}
-//===----------------------------------------------------------------------===//
-// RVVIntrinsic implementation
-//===----------------------------------------------------------------------===//
-RVVIntrinsic::RVVIntrinsic(StringRef NewName, StringRef Suffix,
- StringRef NewMangledName, StringRef MangledSuffix,
- StringRef IRName, bool HasSideEffects, bool IsMask,
- bool HasMaskedOffOperand, bool HasVL,
- bool HasNoMaskedOverloaded, bool HasAutoDef,
- StringRef ManualCodegen, const RVVTypes &OutInTypes,
- const std::vector<int64_t> &NewIntrinsicTypes,
- StringRef RequiredExtension, unsigned NF)
- : IRName(IRName), HasSideEffects(HasSideEffects), IsMask(IsMask),
- HasMaskedOffOperand(HasMaskedOffOperand), HasVL(HasVL),
- HasNoMaskedOverloaded(HasNoMaskedOverloaded), HasAutoDef(HasAutoDef),
- ManualCodegen(ManualCodegen.str()), NF(NF) {
-
- // Init Name and MangledName
- Name = NewName.str();
- if (NewMangledName.empty())
- MangledName = NewName.split("_").first.str();
- else
- MangledName = NewMangledName.str();
- if (!Suffix.empty())
- Name += "_" + Suffix.str();
- if (!MangledSuffix.empty())
- MangledName += "_" + MangledSuffix.str();
- if (IsMask) {
- Name += "_m";
- }
- // Init RISC-V extensions
- for (const auto &T : OutInTypes) {
- if (T->isFloatVector(16) || T->isFloat(16))
- RISCVExtensions |= RISCVExtension::Zfh;
- else if (T->isFloatVector(32) || T->isFloat(32))
- RISCVExtensions |= RISCVExtension::F;
- else if (T->isFloatVector(64) || T->isFloat(64))
- RISCVExtensions |= RISCVExtension::D;
- }
- if (RequiredExtension == "Zvamo")
- RISCVExtensions |= RISCVExtension::Zvamo;
- if (RequiredExtension == "Zvlsseg")
- RISCVExtensions |= RISCVExtension::Zvlsseg;
-
- // Init OutputType and InputTypes
- OutputType = OutInTypes[0];
- InputTypes.assign(OutInTypes.begin() + 1, OutInTypes.end());
-
- // IntrinsicTypes is nonmasked version index. Need to update it
- // if there is maskedoff operand (It is always in first operand).
- IntrinsicTypes = NewIntrinsicTypes;
- if (IsMask && HasMaskedOffOperand) {
- for (auto &I : IntrinsicTypes) {
- if (I >= 0)
- I += NF;
- }
- }
+static VectorTypeModifier getTupleVTM(unsigned NF) {
+ assert(2 <= NF && NF <= 8 && "2 <= NF <= 8");
+ return static_cast<VectorTypeModifier>(
+ static_cast<uint8_t>(VectorTypeModifier::Tuple2) + (NF - 2));
}
-std::string RVVIntrinsic::getBuiltinTypeStr() const {
- std::string S;
- S += OutputType->getBuiltinStr();
- for (const auto &T : InputTypes) {
- S += T->getBuiltinStr();
- }
- return S;
-}
+void emitCodeGenSwitchBody(const RVVIntrinsic *RVVI, raw_ostream &OS) {
+ if (!RVVI->getIRName().empty())
+ OS << " ID = Intrinsic::riscv_" + RVVI->getIRName() + ";\n";
+ if (RVVI->getNF() >= 2)
+ OS << " NF = " + utostr(RVVI->getNF()) + ";\n";
-void RVVIntrinsic::emitCodeGenSwitchBody(raw_ostream &OS) const {
- if (!getIRName().empty())
- OS << " ID = Intrinsic::riscv_" + getIRName() + ";\n";
- if (NF >= 2)
- OS << " NF = " + utostr(getNF()) + ";\n";
- if (hasManualCodegen()) {
- OS << ManualCodegen;
+ OS << " PolicyAttrs = " << RVVI->getPolicyAttrsBits() << ";\n";
+
+ if (RVVI->hasManualCodegen()) {
+ OS << "IsMasked = " << (RVVI->isMasked() ? "true" : "false") << ";\n";
+ OS << RVVI->getManualCodegen();
OS << "break;\n";
return;
}
- if (isMask()) {
- if (hasVL()) {
+ for (const auto &I : enumerate(RVVI->getInputTypes())) {
+ if (I.value()->isPointer()) {
+ assert(RVVI->getIntrinsicTypes().front() == -1 &&
+ "RVVI should be vector load intrinsic.");
+ }
+ }
+
+ if (RVVI->isMasked()) {
+ if (RVVI->hasVL()) {
OS << " std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);\n";
+ if (RVVI->hasPolicyOperand())
+ OS << " Ops.push_back(ConstantInt::get(Ops.back()->getType(),"
+ " PolicyAttrs));\n";
+ if (RVVI->hasMaskedOffOperand() && RVVI->getPolicyAttrs().isTAMAPolicy())
+ OS << " Ops.insert(Ops.begin(), "
+ "llvm::PoisonValue::get(ResultType));\n";
+ // Masked reduction cases.
+ if (!RVVI->hasMaskedOffOperand() && RVVI->hasPassthruOperand() &&
+ RVVI->getPolicyAttrs().isTAMAPolicy())
+ OS << " Ops.insert(Ops.begin(), "
+ "llvm::PoisonValue::get(ResultType));\n";
} else {
OS << " std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());\n";
}
+ } else {
+ if (RVVI->hasPolicyOperand())
+ OS << " Ops.push_back(ConstantInt::get(Ops.back()->getType(), "
+ "PolicyAttrs));\n";
+ else if (RVVI->hasPassthruOperand() && RVVI->getPolicyAttrs().isTAPolicy())
+ OS << " Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));\n";
}
OS << " IntrinsicTypes = {";
ListSeparator LS;
- for (const auto &Idx : IntrinsicTypes) {
+ for (const auto &Idx : RVVI->getIntrinsicTypes()) {
if (Idx == -1)
OS << LS << "ResultType";
else
@@ -847,40 +223,89 @@ void RVVIntrinsic::emitCodeGenSwitchBody(raw_ostream &OS) const {
// VL could be i64 or i32, need to encode it in IntrinsicTypes. VL is
// always last operand.
- if (hasVL())
+ if (RVVI->hasVL())
OS << ", Ops.back()->getType()";
OS << "};\n";
OS << " break;\n";
}
-void RVVIntrinsic::emitIntrinsicMacro(raw_ostream &OS) const {
- OS << "#define " << getName() << "(";
- if (!InputTypes.empty()) {
- ListSeparator LS;
- for (unsigned i = 0, e = InputTypes.size(); i != e; ++i)
- OS << LS << "op" << i;
- }
- OS << ") \\\n";
- OS << "__builtin_rvv_" << getName() << "(";
- if (!InputTypes.empty()) {
- ListSeparator LS;
- for (unsigned i = 0, e = InputTypes.size(); i != e; ++i)
- OS << LS << "(" << InputTypes[i]->getTypeStr() << ")(op" << i << ")";
+//===----------------------------------------------------------------------===//
+// SemaSignatureTable implementation
+//===----------------------------------------------------------------------===//
+void SemaSignatureTable::init(ArrayRef<SemaRecord> SemaRecords) {
+ // Sort signature entries by length, let longer signature insert first, to
+ // make it more possible to reuse table entries, that can reduce ~10% table
+ // size.
+ struct Compare {
+ bool operator()(const SmallVector<PrototypeDescriptor> &A,
+ const SmallVector<PrototypeDescriptor> &B) const {
+ if (A.size() != B.size())
+ return A.size() > B.size();
+
+ size_t Len = A.size();
+ for (size_t i = 0; i < Len; ++i) {
+ if (A[i] != B[i])
+ return A[i] < B[i];
+ }
+
+ return false;
+ }
+ };
+
+ std::set<SmallVector<PrototypeDescriptor>, Compare> Signatures;
+ auto InsertToSignatureSet =
+ [&](const SmallVector<PrototypeDescriptor> &Signature) {
+ if (Signature.empty())
+ return;
+
+ Signatures.insert(Signature);
+ };
+
+ assert(!SemaRecords.empty());
+
+ for (const SemaRecord &SR : SemaRecords) {
+ InsertToSignatureSet(SR.Prototype);
+ InsertToSignatureSet(SR.Suffix);
+ InsertToSignatureSet(SR.OverloadedSuffix);
}
- OS << ")\n";
+
+ for (auto &Sig : Signatures)
+ insert(Sig);
}
-void RVVIntrinsic::emitMangledFuncDef(raw_ostream &OS) const {
- OS << "__attribute__((clang_builtin_alias(";
- OS << "__builtin_rvv_" << getName() << ")))\n";
- OS << OutputType->getTypeStr() << " " << getMangledName() << "(";
- // Emit function arguments
- if (!InputTypes.empty()) {
- ListSeparator LS;
- for (unsigned i = 0; i < InputTypes.size(); ++i)
- OS << LS << InputTypes[i]->getTypeStr() << " op" << i;
+void SemaSignatureTable::insert(ArrayRef<PrototypeDescriptor> Signature) {
+ if (getIndex(Signature) != INVALID_INDEX)
+ return;
+
+ // Insert Signature into SignatureTable if not found in the table.
+ SignatureTable.insert(SignatureTable.begin(), Signature.begin(),
+ Signature.end());
+}
+
+unsigned SemaSignatureTable::getIndex(ArrayRef<PrototypeDescriptor> Signature) {
+ // Empty signature could be point into any index since there is length
+ // field when we use, so just always point it to 0.
+ if (Signature.empty())
+ return 0;
+
+ // Checking Signature already in table or not.
+ if (Signature.size() <= SignatureTable.size()) {
+ size_t Bound = SignatureTable.size() - Signature.size() + 1;
+ for (size_t Index = 0; Index < Bound; ++Index) {
+ if (equal(Signature.begin(), Signature.end(),
+ SignatureTable.begin() + Index))
+ return Index;
+ }
}
- OS << ");\n\n";
+
+ return INVALID_INDEX;
+}
+
+void SemaSignatureTable::print(raw_ostream &OS) {
+ for (const auto &Sig : SignatureTable)
+ OS << "PrototypeDescriptor(" << static_cast<int>(Sig.PT) << ", "
+ << static_cast<int>(Sig.VTM) << ", " << static_cast<int>(Sig.TM)
+ << "),\n";
}
//===----------------------------------------------------------------------===//
@@ -915,13 +340,9 @@ void RVVEmitter::createHeader(raw_ostream &OS) {
OS << "extern \"C\" {\n";
OS << "#endif\n\n";
- std::vector<std::unique_ptr<RVVIntrinsic>> Defs;
- createRVVIntrinsics(Defs);
+ OS << "#pragma clang riscv intrinsic vector\n\n";
- // Print header code
- if (!HeaderCode.empty()) {
- OS << HeaderCode;
- }
+ printHeaderCode(OS);
auto printType = [&](auto T) {
OS << "typedef " << T->getClangBuiltinStr() << " " << T->getTypeStr()
@@ -931,73 +352,66 @@ void RVVEmitter::createHeader(raw_ostream &OS) {
constexpr int Log2LMULs[] = {-3, -2, -1, 0, 1, 2, 3};
// Print RVV boolean types.
for (int Log2LMUL : Log2LMULs) {
- auto T = computeType('c', Log2LMUL, "m");
- if (T.hasValue())
- printType(T.getValue());
+ auto T = TypeCache.computeType(BasicType::Int8, Log2LMUL,
+ PrototypeDescriptor::Mask);
+ if (T)
+ printType(*T);
}
// Print RVV int/float types.
for (char I : StringRef("csil")) {
+ BasicType BT = ParseBasicType(I);
for (int Log2LMUL : Log2LMULs) {
- auto T = computeType(I, Log2LMUL, "v");
- if (T.hasValue()) {
- printType(T.getValue());
- auto UT = computeType(I, Log2LMUL, "Uv");
- printType(UT.getValue());
+ auto T = TypeCache.computeType(BT, Log2LMUL, PrototypeDescriptor::Vector);
+ if (T) {
+ printType(*T);
+ auto UT = TypeCache.computeType(
+ BT, Log2LMUL,
+ PrototypeDescriptor(BaseTypeModifier::Vector,
+ VectorTypeModifier::NoModifier,
+ TypeModifier::UnsignedInteger));
+ printType(*UT);
+ }
+ for (int NF = 2; NF <= 8; ++NF) {
+ auto TupleT = TypeCache.computeType(
+ BT, Log2LMUL,
+ PrototypeDescriptor(BaseTypeModifier::Vector, getTupleVTM(NF),
+ TypeModifier::SignedInteger));
+ auto TupleUT = TypeCache.computeType(
+ BT, Log2LMUL,
+ PrototypeDescriptor(BaseTypeModifier::Vector, getTupleVTM(NF),
+ TypeModifier::UnsignedInteger));
+ if (TupleT)
+ printType(*TupleT);
+ if (TupleUT)
+ printType(*TupleUT);
}
}
}
- OS << "#if defined(__riscv_zfh)\n";
- for (int Log2LMUL : Log2LMULs) {
- auto T = computeType('x', Log2LMUL, "v");
- if (T.hasValue())
- printType(T.getValue());
- }
- OS << "#endif\n";
-
- OS << "#if defined(__riscv_f)\n";
- for (int Log2LMUL : Log2LMULs) {
- auto T = computeType('f', Log2LMUL, "v");
- if (T.hasValue())
- printType(T.getValue());
- }
- OS << "#endif\n";
- OS << "#if defined(__riscv_d)\n";
- for (int Log2LMUL : Log2LMULs) {
- auto T = computeType('d', Log2LMUL, "v");
- if (T.hasValue())
- printType(T.getValue());
+ for (BasicType BT : {BasicType::Float16, BasicType::Float32,
+ BasicType::Float64, BasicType::BFloat16}) {
+ for (int Log2LMUL : Log2LMULs) {
+ auto T = TypeCache.computeType(BT, Log2LMUL, PrototypeDescriptor::Vector);
+ if (T)
+ printType(*T);
+ for (int NF = 2; NF <= 8; ++NF) {
+ auto TupleT = TypeCache.computeType(
+ BT, Log2LMUL,
+ PrototypeDescriptor(BaseTypeModifier::Vector, getTupleVTM(NF),
+ (BT == BasicType::BFloat16
+ ? TypeModifier::BFloat
+ : TypeModifier::Float)));
+ if (TupleT)
+ printType(*TupleT);
+ }
+ }
}
- OS << "#endif\n\n";
-
- // The same extension include in the same arch guard marco.
- std::stable_sort(Defs.begin(), Defs.end(),
- [](const std::unique_ptr<RVVIntrinsic> &A,
- const std::unique_ptr<RVVIntrinsic> &B) {
- return A->getRISCVExtensions() < B->getRISCVExtensions();
- });
-
- // Print intrinsic functions with macro
- emitArchMacroAndBody(Defs, OS, [](raw_ostream &OS, const RVVIntrinsic &Inst) {
- Inst.emitIntrinsicMacro(OS);
- });
OS << "#define __riscv_v_intrinsic_overloading 1\n";
- // Print Overloaded APIs
- OS << "#define __rvv_overloaded static inline "
- "__attribute__((__always_inline__, __nodebug__, __overloadable__))\n";
-
- emitArchMacroAndBody(Defs, OS, [](raw_ostream &OS, const RVVIntrinsic &Inst) {
- if (!Inst.isMask() && !Inst.hasNoMaskedOverloaded())
- return;
- OS << "__rvv_overloaded ";
- Inst.emitMangledFuncDef(OS);
- });
-
OS << "\n#ifdef __cplusplus\n";
OS << "}\n";
- OS << "#endif // __riscv_vector\n";
+ OS << "#endif // __cplusplus\n";
OS << "#endif // __RISCV_VECTOR_H\n";
}
@@ -1005,17 +419,29 @@ void RVVEmitter::createBuiltins(raw_ostream &OS) {
std::vector<std::unique_ptr<RVVIntrinsic>> Defs;
createRVVIntrinsics(Defs);
+ // Map to keep track of which builtin names have already been emitted.
+ StringMap<RVVIntrinsic *> BuiltinMap;
+
OS << "#if defined(TARGET_BUILTIN) && !defined(RISCVV_BUILTIN)\n";
OS << "#define RISCVV_BUILTIN(ID, TYPE, ATTRS) TARGET_BUILTIN(ID, TYPE, "
- "ATTRS, \"experimental-v\")\n";
+ "ATTRS, \"zve32x\")\n";
OS << "#endif\n";
for (auto &Def : Defs) {
- OS << "RISCVV_BUILTIN(__builtin_rvv_" << Def->getName() << ",\""
- << Def->getBuiltinTypeStr() << "\", ";
- if (!Def->hasSideEffects())
- OS << "\"n\")\n";
- else
- OS << "\"\")\n";
+ auto P =
+ BuiltinMap.insert(std::make_pair(Def->getBuiltinName(), Def.get()));
+ if (!P.second) {
+ // Verf that this would have produced the same builtin definition.
+ if (P.first->second->hasBuiltinAlias() != Def->hasBuiltinAlias())
+ PrintFatalError("Builtin with same name has different hasAutoDef");
+ else if (!Def->hasBuiltinAlias() &&
+ P.first->second->getBuiltinTypeStr() != Def->getBuiltinTypeStr())
+ PrintFatalError("Builtin with same name has different type string");
+ continue;
+ }
+ OS << "RISCVV_BUILTIN(__builtin_rvv_" << Def->getBuiltinName() << ",\"";
+ if (!Def->hasBuiltinAlias())
+ OS << Def->getBuiltinTypeStr();
+ OS << "\", \"n\")\n";
}
OS << "#undef RISCVV_BUILTIN\n";
}
@@ -1024,233 +450,319 @@ void RVVEmitter::createCodeGen(raw_ostream &OS) {
std::vector<std::unique_ptr<RVVIntrinsic>> Defs;
createRVVIntrinsics(Defs);
// IR name could be empty, use the stable sort preserves the relative order.
- std::stable_sort(Defs.begin(), Defs.end(),
- [](const std::unique_ptr<RVVIntrinsic> &A,
- const std::unique_ptr<RVVIntrinsic> &B) {
- return A->getIRName() < B->getIRName();
- });
- // Print switch body when the ir name or ManualCodegen changes from previous
- // iteration.
+ llvm::stable_sort(Defs, [](const std::unique_ptr<RVVIntrinsic> &A,
+ const std::unique_ptr<RVVIntrinsic> &B) {
+ if (A->getIRName() == B->getIRName())
+ return (A->getPolicyAttrs() < B->getPolicyAttrs());
+ return (A->getIRName() < B->getIRName());
+ });
+
+ // Map to keep track of which builtin names have already been emitted.
+ StringMap<RVVIntrinsic *> BuiltinMap;
+
+ // Print switch body when the ir name, ManualCodegen or policy changes from
+ // previous iteration.
RVVIntrinsic *PrevDef = Defs.begin()->get();
for (auto &Def : Defs) {
StringRef CurIRName = Def->getIRName();
if (CurIRName != PrevDef->getIRName() ||
- (Def->getManualCodegen() != PrevDef->getManualCodegen())) {
- PrevDef->emitCodeGenSwitchBody(OS);
+ (Def->getManualCodegen() != PrevDef->getManualCodegen()) ||
+ (Def->getPolicyAttrs() != PrevDef->getPolicyAttrs())) {
+ emitCodeGenSwitchBody(PrevDef, OS);
}
PrevDef = Def.get();
- OS << "case RISCV::BI__builtin_rvv_" << Def->getName() << ":\n";
- }
- Defs.back()->emitCodeGenSwitchBody(OS);
- OS << "\n";
-}
-void RVVEmitter::parsePrototypes(StringRef Prototypes,
- std::function<void(StringRef)> Handler) {
- const StringRef Primaries("evwqom0ztul");
- while (!Prototypes.empty()) {
- size_t Idx = 0;
- // Skip over complex prototype because it could contain primitive type
- // character.
- if (Prototypes[0] == '(')
- Idx = Prototypes.find_first_of(')');
- Idx = Prototypes.find_first_of(Primaries, Idx);
- assert(Idx != StringRef::npos);
- Handler(Prototypes.slice(0, Idx + 1));
- Prototypes = Prototypes.drop_front(Idx + 1);
- }
-}
+ auto P =
+ BuiltinMap.insert(std::make_pair(Def->getBuiltinName(), Def.get()));
+ if (P.second) {
+ OS << "case RISCVVector::BI__builtin_rvv_" << Def->getBuiltinName()
+ << ":\n";
+ continue;
+ }
-std::string RVVEmitter::getSuffixStr(char Type, int Log2LMUL,
- StringRef Prototypes) {
- SmallVector<std::string> SuffixStrs;
- parsePrototypes(Prototypes, [&](StringRef Proto) {
- auto T = computeType(Type, Log2LMUL, Proto);
- SuffixStrs.push_back(T.getValue()->getShortStr());
- });
- return join(SuffixStrs, "_");
+ if (P.first->second->getIRName() != Def->getIRName())
+ PrintFatalError("Builtin with same name has different IRName");
+ else if (P.first->second->getManualCodegen() != Def->getManualCodegen())
+ PrintFatalError("Builtin with same name has different ManualCodegen");
+ else if (P.first->second->isMasked() != Def->isMasked())
+ PrintFatalError("Builtin with same name has different isMasked");
+ else if (P.first->second->hasVL() != Def->hasVL())
+ PrintFatalError("Builtin with same name has different hasVL");
+ else if (P.first->second->getPolicyScheme() != Def->getPolicyScheme())
+ PrintFatalError("Builtin with same name has different getPolicyScheme");
+ else if (P.first->second->getIntrinsicTypes() != Def->getIntrinsicTypes())
+ PrintFatalError("Builtin with same name has different IntrinsicTypes");
+ }
+ emitCodeGenSwitchBody(Defs.back().get(), OS);
+ OS << "\n";
}
void RVVEmitter::createRVVIntrinsics(
- std::vector<std::unique_ptr<RVVIntrinsic>> &Out) {
+ std::vector<std::unique_ptr<RVVIntrinsic>> &Out,
+ std::vector<SemaRecord> *SemaRecords) {
std::vector<Record *> RV = Records.getAllDerivedDefinitions("RVVBuiltin");
for (auto *R : RV) {
StringRef Name = R->getValueAsString("Name");
StringRef SuffixProto = R->getValueAsString("Suffix");
- StringRef MangledName = R->getValueAsString("MangledName");
- StringRef MangledSuffixProto = R->getValueAsString("MangledSuffix");
+ StringRef OverloadedName = R->getValueAsString("OverloadedName");
+ StringRef OverloadedSuffixProto = R->getValueAsString("OverloadedSuffix");
StringRef Prototypes = R->getValueAsString("Prototype");
StringRef TypeRange = R->getValueAsString("TypeRange");
- bool HasMask = R->getValueAsBit("HasMask");
+ bool HasMasked = R->getValueAsBit("HasMasked");
bool HasMaskedOffOperand = R->getValueAsBit("HasMaskedOffOperand");
bool HasVL = R->getValueAsBit("HasVL");
- bool HasNoMaskedOverloaded = R->getValueAsBit("HasNoMaskedOverloaded");
- bool HasSideEffects = R->getValueAsBit("HasSideEffects");
+ Record *MPSRecord = R->getValueAsDef("MaskedPolicyScheme");
+ auto MaskedPolicyScheme =
+ static_cast<PolicyScheme>(MPSRecord->getValueAsInt("Value"));
+ Record *UMPSRecord = R->getValueAsDef("UnMaskedPolicyScheme");
+ auto UnMaskedPolicyScheme =
+ static_cast<PolicyScheme>(UMPSRecord->getValueAsInt("Value"));
std::vector<int64_t> Log2LMULList = R->getValueAsListOfInts("Log2LMUL");
+ bool HasTailPolicy = R->getValueAsBit("HasTailPolicy");
+ bool HasMaskPolicy = R->getValueAsBit("HasMaskPolicy");
+ bool SupportOverloading = R->getValueAsBit("SupportOverloading");
+ bool HasBuiltinAlias = R->getValueAsBit("HasBuiltinAlias");
StringRef ManualCodegen = R->getValueAsString("ManualCodegen");
- StringRef ManualCodegenMask = R->getValueAsString("ManualCodegenMask");
std::vector<int64_t> IntrinsicTypes =
R->getValueAsListOfInts("IntrinsicTypes");
- StringRef RequiredExtension = R->getValueAsString("RequiredExtension");
+ std::vector<StringRef> RequiredFeatures =
+ R->getValueAsListOfStrings("RequiredFeatures");
StringRef IRName = R->getValueAsString("IRName");
- StringRef IRNameMask = R->getValueAsString("IRNameMask");
+ StringRef MaskedIRName = R->getValueAsString("MaskedIRName");
unsigned NF = R->getValueAsInt("NF");
+ bool IsTuple = R->getValueAsBit("IsTuple");
+ bool HasFRMRoundModeOp = R->getValueAsBit("HasFRMRoundModeOp");
+
+ const Policy DefaultPolicy;
+ SmallVector<Policy> SupportedUnMaskedPolicies =
+ RVVIntrinsic::getSupportedUnMaskedPolicies();
+ SmallVector<Policy> SupportedMaskedPolicies =
+ RVVIntrinsic::getSupportedMaskedPolicies(HasTailPolicy, HasMaskPolicy);
- StringRef HeaderCodeStr = R->getValueAsString("HeaderCode");
- bool HasAutoDef = HeaderCodeStr.empty();
- if (!HeaderCodeStr.empty()) {
- HeaderCode += HeaderCodeStr.str();
- }
// Parse prototype and create a list of primitive type with transformers
- // (operand) in ProtoSeq. ProtoSeq[0] is output operand.
- SmallVector<std::string> ProtoSeq;
- parsePrototypes(Prototypes, [&ProtoSeq](StringRef Proto) {
- ProtoSeq.push_back(Proto.str());
- });
+ // (operand) in Prototype. Prototype[0] is output operand.
+ SmallVector<PrototypeDescriptor> BasicPrototype =
+ parsePrototypes(Prototypes);
+
+ SmallVector<PrototypeDescriptor> SuffixDesc = parsePrototypes(SuffixProto);
+ SmallVector<PrototypeDescriptor> OverloadedSuffixDesc =
+ parsePrototypes(OverloadedSuffixProto);
// Compute Builtin types
- SmallVector<std::string> ProtoMaskSeq = ProtoSeq;
- if (HasMask) {
- // If HasMaskedOffOperand, insert result type as first input operand.
- if (HasMaskedOffOperand) {
- if (NF == 1) {
- ProtoMaskSeq.insert(ProtoMaskSeq.begin() + 1, ProtoSeq[0]);
- } else {
- // Convert
- // (void, op0 address, op1 address, ...)
- // to
- // (void, op0 address, op1 address, ..., maskedoff0, maskedoff1, ...)
- for (unsigned I = 0; I < NF; ++I)
- ProtoMaskSeq.insert(
- ProtoMaskSeq.begin() + NF + 1,
- ProtoSeq[1].substr(1)); // Use substr(1) to skip '*'
- }
- }
- if (HasMaskedOffOperand && NF > 1) {
- // Convert
- // (void, op0 address, op1 address, ..., maskedoff0, maskedoff1, ...)
- // to
- // (void, op0 address, op1 address, ..., mask, maskedoff0, maskedoff1,
- // ...)
- ProtoMaskSeq.insert(ProtoMaskSeq.begin() + NF + 1, "m");
- } else {
- // If HasMask, insert 'm' as first input operand.
- ProtoMaskSeq.insert(ProtoMaskSeq.begin() + 1, "m");
- }
- }
- // If HasVL, append 'z' to last operand
- if (HasVL) {
- ProtoSeq.push_back("z");
- ProtoMaskSeq.push_back("z");
- }
+ auto Prototype = RVVIntrinsic::computeBuiltinTypes(
+ BasicPrototype, /*IsMasked=*/false,
+ /*HasMaskedOffOperand=*/false, HasVL, NF, UnMaskedPolicyScheme,
+ DefaultPolicy, IsTuple);
+ llvm::SmallVector<PrototypeDescriptor> MaskedPrototype;
+ if (HasMasked)
+ MaskedPrototype = RVVIntrinsic::computeBuiltinTypes(
+ BasicPrototype, /*IsMasked=*/true, HasMaskedOffOperand, HasVL, NF,
+ MaskedPolicyScheme, DefaultPolicy, IsTuple);
// Create Intrinsics for each type and LMUL.
for (char I : TypeRange) {
for (int Log2LMUL : Log2LMULList) {
- Optional<RVVTypes> Types = computeTypes(I, Log2LMUL, NF, ProtoSeq);
+ BasicType BT = ParseBasicType(I);
+ std::optional<RVVTypes> Types =
+ TypeCache.computeTypes(BT, Log2LMUL, NF, Prototype);
// Ignored to create new intrinsic if there are any illegal types.
- if (!Types.hasValue())
+ if (!Types)
continue;
- auto SuffixStr = getSuffixStr(I, Log2LMUL, SuffixProto);
- auto MangledSuffixStr = getSuffixStr(I, Log2LMUL, MangledSuffixProto);
- // Create a non-mask intrinsic
+ auto SuffixStr =
+ RVVIntrinsic::getSuffixStr(TypeCache, BT, Log2LMUL, SuffixDesc);
+ auto OverloadedSuffixStr = RVVIntrinsic::getSuffixStr(
+ TypeCache, BT, Log2LMUL, OverloadedSuffixDesc);
+ // Create a unmasked intrinsic
Out.push_back(std::make_unique<RVVIntrinsic>(
- Name, SuffixStr, MangledName, MangledSuffixStr, IRName,
- HasSideEffects, /*IsMask=*/false, /*HasMaskedOffOperand=*/false,
- HasVL, HasNoMaskedOverloaded, HasAutoDef, ManualCodegen,
- Types.getValue(), IntrinsicTypes, RequiredExtension, NF));
- if (HasMask) {
- // Create a mask intrinsic
- Optional<RVVTypes> MaskTypes =
- computeTypes(I, Log2LMUL, NF, ProtoMaskSeq);
+ Name, SuffixStr, OverloadedName, OverloadedSuffixStr, IRName,
+ /*IsMasked=*/false, /*HasMaskedOffOperand=*/false, HasVL,
+ UnMaskedPolicyScheme, SupportOverloading, HasBuiltinAlias,
+ ManualCodegen, *Types, IntrinsicTypes, RequiredFeatures, NF,
+ DefaultPolicy, HasFRMRoundModeOp));
+ if (UnMaskedPolicyScheme != PolicyScheme::SchemeNone)
+ for (auto P : SupportedUnMaskedPolicies) {
+ SmallVector<PrototypeDescriptor> PolicyPrototype =
+ RVVIntrinsic::computeBuiltinTypes(
+ BasicPrototype, /*IsMasked=*/false,
+ /*HasMaskedOffOperand=*/false, HasVL, NF,
+ UnMaskedPolicyScheme, P, IsTuple);
+ std::optional<RVVTypes> PolicyTypes =
+ TypeCache.computeTypes(BT, Log2LMUL, NF, PolicyPrototype);
+ Out.push_back(std::make_unique<RVVIntrinsic>(
+ Name, SuffixStr, OverloadedName, OverloadedSuffixStr, IRName,
+ /*IsMask=*/false, /*HasMaskedOffOperand=*/false, HasVL,
+ UnMaskedPolicyScheme, SupportOverloading, HasBuiltinAlias,
+ ManualCodegen, *PolicyTypes, IntrinsicTypes, RequiredFeatures,
+ NF, P, HasFRMRoundModeOp));
+ }
+ if (!HasMasked)
+ continue;
+ // Create a masked intrinsic
+ std::optional<RVVTypes> MaskTypes =
+ TypeCache.computeTypes(BT, Log2LMUL, NF, MaskedPrototype);
+ Out.push_back(std::make_unique<RVVIntrinsic>(
+ Name, SuffixStr, OverloadedName, OverloadedSuffixStr, MaskedIRName,
+ /*IsMasked=*/true, HasMaskedOffOperand, HasVL, MaskedPolicyScheme,
+ SupportOverloading, HasBuiltinAlias, ManualCodegen, *MaskTypes,
+ IntrinsicTypes, RequiredFeatures, NF, DefaultPolicy,
+ HasFRMRoundModeOp));
+ if (MaskedPolicyScheme == PolicyScheme::SchemeNone)
+ continue;
+ for (auto P : SupportedMaskedPolicies) {
+ SmallVector<PrototypeDescriptor> PolicyPrototype =
+ RVVIntrinsic::computeBuiltinTypes(
+ BasicPrototype, /*IsMasked=*/true, HasMaskedOffOperand, HasVL,
+ NF, MaskedPolicyScheme, P, IsTuple);
+ std::optional<RVVTypes> PolicyTypes =
+ TypeCache.computeTypes(BT, Log2LMUL, NF, PolicyPrototype);
Out.push_back(std::make_unique<RVVIntrinsic>(
- Name, SuffixStr, MangledName, MangledSuffixStr, IRNameMask,
- HasSideEffects, /*IsMask=*/true, HasMaskedOffOperand, HasVL,
- HasNoMaskedOverloaded, HasAutoDef, ManualCodegenMask,
- MaskTypes.getValue(), IntrinsicTypes, RequiredExtension, NF));
+ Name, SuffixStr, OverloadedName, OverloadedSuffixStr,
+ MaskedIRName, /*IsMasked=*/true, HasMaskedOffOperand, HasVL,
+ MaskedPolicyScheme, SupportOverloading, HasBuiltinAlias,
+ ManualCodegen, *PolicyTypes, IntrinsicTypes, RequiredFeatures, NF,
+ P, HasFRMRoundModeOp));
}
- } // end for Log2LMULList
- } // end for TypeRange
- }
-}
+ } // End for Log2LMULList
+ } // End for TypeRange
+
+ // We don't emit vsetvli and vsetvlimax for SemaRecord.
+ // They are written in riscv_vector.td and will emit those marco define in
+ // riscv_vector.h
+ if (Name == "vsetvli" || Name == "vsetvlimax")
+ continue;
+
+ if (!SemaRecords)
+ continue;
+
+ // Create SemaRecord
+ SemaRecord SR;
+ SR.Name = Name.str();
+ SR.OverloadedName = OverloadedName.str();
+ BasicType TypeRangeMask = BasicType::Unknown;
+ for (char I : TypeRange)
+ TypeRangeMask |= ParseBasicType(I);
+
+ SR.TypeRangeMask = static_cast<unsigned>(TypeRangeMask);
+
+ unsigned Log2LMULMask = 0;
+ for (int Log2LMUL : Log2LMULList)
+ Log2LMULMask |= 1 << (Log2LMUL + 3);
+
+ SR.Log2LMULMask = Log2LMULMask;
+
+ SR.RequiredExtensions = 0;
+ for (auto RequiredFeature : RequiredFeatures) {
+ RVVRequire RequireExt =
+ StringSwitch<RVVRequire>(RequiredFeature)
+ .Case("RV64", RVV_REQ_RV64)
+ .Case("Zvfhmin", RVV_REQ_Zvfhmin)
+ .Case("Xsfvcp", RVV_REQ_Xsfvcp)
+ .Case("Xsfvfnrclipxfqf", RVV_REQ_Xsfvfnrclipxfqf)
+ .Case("Xsfvfwmaccqqq", RVV_REQ_Xsfvfwmaccqqq)
+ .Case("Xsfvqmaccdod", RVV_REQ_Xsfvqmaccdod)
+ .Case("Xsfvqmaccqoq", RVV_REQ_Xsfvqmaccqoq)
+ .Case("Zvbb", RVV_REQ_Zvbb)
+ .Case("Zvbc", RVV_REQ_Zvbc)
+ .Case("Zvkb", RVV_REQ_Zvkb)
+ .Case("Zvkg", RVV_REQ_Zvkg)
+ .Case("Zvkned", RVV_REQ_Zvkned)
+ .Case("Zvknha", RVV_REQ_Zvknha)
+ .Case("Zvknhb", RVV_REQ_Zvknhb)
+ .Case("Zvksed", RVV_REQ_Zvksed)
+ .Case("Zvksh", RVV_REQ_Zvksh)
+ .Case("Experimental", RVV_REQ_Experimental)
+ .Default(RVV_REQ_None);
+ assert(RequireExt != RVV_REQ_None && "Unrecognized required feature?");
+ SR.RequiredExtensions |= RequireExt;
+ }
-Optional<RVVTypes>
-RVVEmitter::computeTypes(BasicType BT, int Log2LMUL, unsigned NF,
- ArrayRef<std::string> PrototypeSeq) {
- // LMUL x NF must be less than or equal to 8.
- if ((Log2LMUL >= 1) && (1 << Log2LMUL) * NF > 8)
- return llvm::None;
-
- RVVTypes Types;
- for (const std::string &Proto : PrototypeSeq) {
- auto T = computeType(BT, Log2LMUL, Proto);
- if (!T.hasValue())
- return llvm::None;
- // Record legal type index
- Types.push_back(T.getValue());
+ SR.NF = NF;
+ SR.HasMasked = HasMasked;
+ SR.HasVL = HasVL;
+ SR.HasMaskedOffOperand = HasMaskedOffOperand;
+ SR.HasTailPolicy = HasTailPolicy;
+ SR.HasMaskPolicy = HasMaskPolicy;
+ SR.UnMaskedPolicyScheme = static_cast<uint8_t>(UnMaskedPolicyScheme);
+ SR.MaskedPolicyScheme = static_cast<uint8_t>(MaskedPolicyScheme);
+ SR.Prototype = std::move(BasicPrototype);
+ SR.Suffix = parsePrototypes(SuffixProto);
+ SR.OverloadedSuffix = parsePrototypes(OverloadedSuffixProto);
+ SR.IsTuple = IsTuple;
+ SR.HasFRMRoundModeOp = HasFRMRoundModeOp;
+
+ SemaRecords->push_back(SR);
}
- return Types;
}
-Optional<RVVTypePtr> RVVEmitter::computeType(BasicType BT, int Log2LMUL,
- StringRef Proto) {
- std::string Idx = Twine(Twine(BT) + Twine(Log2LMUL) + Proto).str();
- // Search first
- auto It = LegalTypes.find(Idx);
- if (It != LegalTypes.end())
- return &(It->second);
- if (IllegalTypes.count(Idx))
- return llvm::None;
- // Compute type and record the result.
- RVVType T(BT, Log2LMUL, Proto);
- if (T.isValid()) {
- // Record legal type index and value.
- LegalTypes.insert({Idx, T});
- return &(LegalTypes[Idx]);
+void RVVEmitter::printHeaderCode(raw_ostream &OS) {
+ std::vector<Record *> RVVHeaders =
+ Records.getAllDerivedDefinitions("RVVHeader");
+ for (auto *R : RVVHeaders) {
+ StringRef HeaderCodeStr = R->getValueAsString("HeaderCode");
+ OS << HeaderCodeStr.str();
}
- // Record illegal type index.
- IllegalTypes.insert(Idx);
- return llvm::None;
}
-void RVVEmitter::emitArchMacroAndBody(
- std::vector<std::unique_ptr<RVVIntrinsic>> &Defs, raw_ostream &OS,
- std::function<void(raw_ostream &, const RVVIntrinsic &)> PrintBody) {
- uint8_t PrevExt = (*Defs.begin())->getRISCVExtensions();
- bool NeedEndif = emitExtDefStr(PrevExt, OS);
- for (auto &Def : Defs) {
- uint8_t CurExt = Def->getRISCVExtensions();
- if (CurExt != PrevExt) {
- if (NeedEndif)
- OS << "#endif\n\n";
- NeedEndif = emitExtDefStr(CurExt, OS);
- PrevExt = CurExt;
- }
- if (Def->hasAutoDef())
- PrintBody(OS, *Def);
+void RVVEmitter::createRVVIntrinsicRecords(std::vector<RVVIntrinsicRecord> &Out,
+ SemaSignatureTable &SST,
+ ArrayRef<SemaRecord> SemaRecords) {
+ SST.init(SemaRecords);
+
+ for (const auto &SR : SemaRecords) {
+ Out.emplace_back(RVVIntrinsicRecord());
+ RVVIntrinsicRecord &R = Out.back();
+ R.Name = SR.Name.c_str();
+ R.OverloadedName = SR.OverloadedName.c_str();
+ R.PrototypeIndex = SST.getIndex(SR.Prototype);
+ R.SuffixIndex = SST.getIndex(SR.Suffix);
+ R.OverloadedSuffixIndex = SST.getIndex(SR.OverloadedSuffix);
+ R.PrototypeLength = SR.Prototype.size();
+ R.SuffixLength = SR.Suffix.size();
+ R.OverloadedSuffixSize = SR.OverloadedSuffix.size();
+ R.RequiredExtensions = SR.RequiredExtensions;
+ R.TypeRangeMask = SR.TypeRangeMask;
+ R.Log2LMULMask = SR.Log2LMULMask;
+ R.NF = SR.NF;
+ R.HasMasked = SR.HasMasked;
+ R.HasVL = SR.HasVL;
+ R.HasMaskedOffOperand = SR.HasMaskedOffOperand;
+ R.HasTailPolicy = SR.HasTailPolicy;
+ R.HasMaskPolicy = SR.HasMaskPolicy;
+ R.UnMaskedPolicyScheme = SR.UnMaskedPolicyScheme;
+ R.MaskedPolicyScheme = SR.MaskedPolicyScheme;
+ R.IsTuple = SR.IsTuple;
+ R.HasFRMRoundModeOp = SR.HasFRMRoundModeOp;
+
+ assert(R.PrototypeIndex !=
+ static_cast<uint16_t>(SemaSignatureTable::INVALID_INDEX));
+ assert(R.SuffixIndex !=
+ static_cast<uint16_t>(SemaSignatureTable::INVALID_INDEX));
+ assert(R.OverloadedSuffixIndex !=
+ static_cast<uint16_t>(SemaSignatureTable::INVALID_INDEX));
}
- if (NeedEndif)
- OS << "#endif\n\n";
}
-bool RVVEmitter::emitExtDefStr(uint8_t Extents, raw_ostream &OS) {
- if (Extents == RISCVExtension::Basic)
- return false;
- OS << "#if ";
- ListSeparator LS(" && ");
- if (Extents & RISCVExtension::F)
- OS << LS << "defined(__riscv_f)";
- if (Extents & RISCVExtension::D)
- OS << LS << "defined(__riscv_d)";
- if (Extents & RISCVExtension::Zfh)
- OS << LS << "defined(__riscv_zfh)";
- if (Extents & RISCVExtension::Zvamo)
- OS << LS << "defined(__riscv_zvamo)";
- if (Extents & RISCVExtension::Zvlsseg)
- OS << LS << "defined(__riscv_zvlsseg)";
- OS << "\n";
- return true;
+void RVVEmitter::createSema(raw_ostream &OS) {
+ std::vector<std::unique_ptr<RVVIntrinsic>> Defs;
+ std::vector<RVVIntrinsicRecord> RVVIntrinsicRecords;
+ SemaSignatureTable SST;
+ std::vector<SemaRecord> SemaRecords;
+
+ createRVVIntrinsics(Defs, &SemaRecords);
+
+ createRVVIntrinsicRecords(RVVIntrinsicRecords, SST, SemaRecords);
+
+ // Emit signature table for SemaRISCVVectorLookup.cpp.
+ OS << "#ifdef DECL_SIGNATURE_TABLE\n";
+ SST.print(OS);
+ OS << "#endif\n";
+
+ // Emit RVVIntrinsicRecords for SemaRISCVVectorLookup.cpp.
+ OS << "#ifdef DECL_INTRINSIC_RECORDS\n";
+ for (const RVVIntrinsicRecord &Record : RVVIntrinsicRecords)
+ OS << Record;
+ OS << "#endif\n";
}
namespace clang {
@@ -1266,4 +778,8 @@ void EmitRVVBuiltinCG(RecordKeeper &Records, raw_ostream &OS) {
RVVEmitter(Records).createCodeGen(OS);
}
+void EmitRVVBuiltinSema(RecordKeeper &Records, raw_ostream &OS) {
+ RVVEmitter(Records).createSema(OS);
+}
+
} // End namespace clang
diff --git a/contrib/llvm-project/clang/utils/TableGen/SveEmitter.cpp b/contrib/llvm-project/clang/utils/TableGen/SveEmitter.cpp
index b2f6ede56522..174304f09007 100644
--- a/contrib/llvm-project/clang/utils/TableGen/SveEmitter.cpp
+++ b/contrib/llvm-project/clang/utils/TableGen/SveEmitter.cpp
@@ -23,16 +23,17 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
-#include "llvm/TableGen/Record.h"
+#include "llvm/ADT/StringMap.h"
#include "llvm/TableGen/Error.h"
-#include <string>
-#include <sstream>
-#include <set>
+#include "llvm/TableGen/Record.h"
+#include <array>
#include <cctype>
+#include <set>
+#include <sstream>
+#include <string>
#include <tuple>
using namespace llvm;
@@ -43,6 +44,8 @@ enum ClassKind {
ClassG, // Overloaded name without type suffix
};
+enum class ACLEKind { SVE, SME };
+
using TypeSpec = std::string;
namespace {
@@ -64,24 +67,29 @@ public:
};
class SVEType {
- TypeSpec TS;
bool Float, Signed, Immediate, Void, Constant, Pointer, BFloat;
- bool DefaultType, IsScalable, Predicate, PredicatePattern, PrefetchOp;
+ bool DefaultType, IsScalable, Predicate, PredicatePattern, PrefetchOp,
+ Svcount;
unsigned Bitwidth, ElementBitwidth, NumVectors;
public:
- SVEType() : SVEType(TypeSpec(), 'v') {}
+ SVEType() : SVEType("", 'v') {}
- SVEType(TypeSpec TS, char CharMod)
- : TS(TS), Float(false), Signed(true), Immediate(false), Void(false),
+ SVEType(StringRef TS, char CharMod, unsigned NumVectors = 1)
+ : Float(false), Signed(true), Immediate(false), Void(false),
Constant(false), Pointer(false), BFloat(false), DefaultType(false),
IsScalable(true), Predicate(false), PredicatePattern(false),
- PrefetchOp(false), Bitwidth(128), ElementBitwidth(~0U), NumVectors(1) {
+ PrefetchOp(false), Svcount(false), Bitwidth(128), ElementBitwidth(~0U),
+ NumVectors(NumVectors) {
if (!TS.empty())
- applyTypespec();
+ applyTypespec(TS);
applyModifier(CharMod);
}
+ SVEType(const SVEType &Base, unsigned NumV) : SVEType(Base) {
+ NumVectors = NumV;
+ }
+
bool isPointer() const { return Pointer; }
bool isVoidPointer() const { return Pointer && Void; }
bool isSigned() const { return Signed; }
@@ -89,19 +97,23 @@ public:
bool isScalar() const { return NumVectors == 0; }
bool isVector() const { return NumVectors > 0; }
bool isScalableVector() const { return isVector() && IsScalable; }
+ bool isFixedLengthVector() const { return isVector() && !IsScalable; }
bool isChar() const { return ElementBitwidth == 8; }
bool isVoid() const { return Void & !Pointer; }
bool isDefault() const { return DefaultType; }
bool isFloat() const { return Float && !BFloat; }
bool isBFloat() const { return BFloat && !Float; }
bool isFloatingPoint() const { return Float || BFloat; }
- bool isInteger() const { return !isFloatingPoint() && !Predicate; }
+ bool isInteger() const {
+ return !isFloatingPoint() && !Predicate && !Svcount;
+ }
bool isScalarPredicate() const {
return !isFloatingPoint() && Predicate && NumVectors == 0;
}
bool isPredicateVector() const { return Predicate; }
bool isPredicatePattern() const { return PredicatePattern; }
bool isPrefetchOp() const { return PrefetchOp; }
+ bool isSvcount() const { return Svcount; }
bool isConstant() const { return Constant; }
unsigned getElementSizeInBits() const { return ElementBitwidth; }
unsigned getNumVectors() const { return NumVectors; }
@@ -124,13 +136,12 @@ public:
private:
/// Creates the type based on the typespec string in TS.
- void applyTypespec();
+ void applyTypespec(StringRef TS);
/// Applies a prototype modifier to the type.
void applyModifier(char Mod);
};
-
class SVEEmitter;
/// The main grunt class. This represents an instantiation of an intrinsic with
@@ -189,7 +200,9 @@ public:
SVEType getReturnType() const { return Types[0]; }
ArrayRef<SVEType> getTypes() const { return Types; }
SVEType getParamType(unsigned I) const { return Types[I + 1]; }
- unsigned getNumParams() const { return Proto.size() - 1; }
+ unsigned getNumParams() const {
+ return Proto.size() - (2 * llvm::count(Proto, '.')) - 1;
+ }
uint64_t getFlags() const { return Flags; }
bool isFlagSet(uint64_t Flag) const { return Flags & Flag;}
@@ -203,6 +216,9 @@ public:
/// ClassS, so will add type suffixes such as _u32/_s32.
std::string getMangledName() const { return mangleName(ClassS); }
+ /// As above, but mangles the LLVM name instead.
+ std::string getMangledLLVMName() const { return mangleLLVMName(); }
+
/// Returns true if the intrinsic is overloaded, in that it should also generate
/// a short form without the type-specifiers, e.g. 'svld1(..)' instead of
/// 'svld1_u32(..)'.
@@ -220,19 +236,28 @@ public:
/// Return the parameter index of the splat operand.
unsigned getSplatIdx() const {
- // These prototype modifiers are described in arm_sve.td.
- auto Idx = Proto.find_first_of("ajfrKLR@");
- assert(Idx != std::string::npos && Idx > 0 &&
- "Prototype has no splat operand");
- return Idx - 1;
+ unsigned I = 1, Param = 0;
+ for (; I < Proto.size(); ++I, ++Param) {
+ if (Proto[I] == 'a' || Proto[I] == 'j' || Proto[I] == 'f' ||
+ Proto[I] == 'r' || Proto[I] == 'K' || Proto[I] == 'L' ||
+ Proto[I] == 'R' || Proto[I] == '@')
+ break;
+
+ // Multivector modifier can be skipped
+ if (Proto[I] == '.')
+ I += 2;
+ }
+ assert(I != Proto.size() && "Prototype has no splat operand");
+ return Param;
}
/// Emits the intrinsic declaration to the ostream.
- void emitIntrinsic(raw_ostream &OS) const;
+ void emitIntrinsic(raw_ostream &OS, SVEEmitter &Emitter, ACLEKind Kind) const;
private:
std::string getMergeSuffix() const { return MergeSuffix; }
std::string mangleName(ClassKind LocalCK) const;
+ std::string mangleLLVMName() const;
std::string replaceTemplatedArgs(std::string Name, TypeSpec TS,
std::string Proto) const;
};
@@ -244,17 +269,11 @@ private:
// which is inconvenient to specify in the arm_sve.td file or
// generate in CGBuiltin.cpp.
struct ReinterpretTypeInfo {
+ SVEType BaseType;
const char *Suffix;
- const char *Type;
- const char *BuiltinType;
};
- SmallVector<ReinterpretTypeInfo, 12> Reinterprets = {
- {"s8", "svint8_t", "q16Sc"}, {"s16", "svint16_t", "q8Ss"},
- {"s32", "svint32_t", "q4Si"}, {"s64", "svint64_t", "q2SWi"},
- {"u8", "svuint8_t", "q16Uc"}, {"u16", "svuint16_t", "q8Us"},
- {"u32", "svuint32_t", "q4Ui"}, {"u64", "svuint64_t", "q2UWi"},
- {"f16", "svfloat16_t", "q8h"}, {"bf16", "svbfloat16_t", "q8y"},
- {"f32", "svfloat32_t", "q4f"}, {"f64", "svfloat64_t", "q2d"}};
+
+ static const std::array<ReinterpretTypeInfo, 12> Reinterprets;
RecordKeeper &Records;
llvm::StringMap<uint64_t> EltTypes;
@@ -298,7 +317,8 @@ public:
auto It = FlagTypes.find(MaskName);
if (It != FlagTypes.end()) {
uint64_t Mask = It->getValue();
- unsigned Shift = llvm::countTrailingZeros(Mask);
+ unsigned Shift = llvm::countr_zero(Mask);
+ assert(Shift < 64 && "Mask value produced an invalid shift value");
return (V << Shift) & Mask;
}
llvm_unreachable("Unsupported flag");
@@ -334,6 +354,10 @@ public:
/// Emit arm_sve.h.
void createHeader(raw_ostream &o);
+ // Emits core intrinsics in both arm_sme.h and arm_sve.h
+ void createCoreHeaderIntrinsics(raw_ostream &o, SVEEmitter &Emitter,
+ ACLEKind Kind);
+
/// Emit all the __builtin prototypes and code needed by Sema.
void createBuiltins(raw_ostream &o);
@@ -346,10 +370,43 @@ public:
/// Create the SVETypeFlags used in CGBuiltins
void createTypeFlags(raw_ostream &o);
+ /// Emit arm_sme.h.
+ void createSMEHeader(raw_ostream &o);
+
+ /// Emit all the SME __builtin prototypes and code needed by Sema.
+ void createSMEBuiltins(raw_ostream &o);
+
+ /// Emit all the information needed to map builtin -> LLVM IR intrinsic.
+ void createSMECodeGenMap(raw_ostream &o);
+
+ /// Create a table for a builtin's requirement for PSTATE.SM.
+ void createStreamingAttrs(raw_ostream &o, ACLEKind Kind);
+
+ /// Emit all the range checks for the immediates.
+ void createSMERangeChecks(raw_ostream &o);
+
+ /// Create a table for a builtin's requirement for PSTATE.ZA.
+ void createBuiltinZAState(raw_ostream &OS);
+
/// Create intrinsic and add it to \p Out
- void createIntrinsic(Record *R, SmallVectorImpl<std::unique_ptr<Intrinsic>> &Out);
+ void createIntrinsic(Record *R,
+ SmallVectorImpl<std::unique_ptr<Intrinsic>> &Out);
};
+const std::array<SVEEmitter::ReinterpretTypeInfo, 12> SVEEmitter::Reinterprets =
+ {{{SVEType("c", 'd'), "s8"},
+ {SVEType("Uc", 'd'), "u8"},
+ {SVEType("s", 'd'), "s16"},
+ {SVEType("Us", 'd'), "u16"},
+ {SVEType("i", 'd'), "s32"},
+ {SVEType("Ui", 'd'), "u32"},
+ {SVEType("l", 'd'), "s64"},
+ {SVEType("Ul", 'd'), "u64"},
+ {SVEType("h", 'd'), "f16"},
+ {SVEType("b", 'd'), "bf16"},
+ {SVEType("f", 'd'), "f32"},
+ {SVEType("d", 'd'), "f64"}}};
+
} // end anonymous namespace
@@ -365,6 +422,9 @@ std::string SVEType::builtin_str() const {
if (isScalarPredicate())
return "b";
+ if (isSvcount())
+ return "Qa";
+
if (isVoidPointer())
S += "v";
else if (!isFloatingPoint())
@@ -413,7 +473,8 @@ std::string SVEType::builtin_str() const {
return S;
}
- assert(isScalableVector() && "Unsupported type");
+ if (isFixedLengthVector())
+ return "V" + utostr(getNumElements() * NumVectors) + S;
return "q" + utostr(getNumElements() * NumVectors) + S;
}
@@ -428,13 +489,15 @@ std::string SVEType::str() const {
if (Void)
S += "void";
else {
- if (isScalableVector())
+ if (isScalableVector() || isSvcount())
S += "sv";
if (!Signed && !isFloatingPoint())
S += "u";
if (Float)
S += "float";
+ else if (isSvcount())
+ S += "count";
else if (isScalarPredicate() || isPredicateVector())
S += "bool";
else if (isBFloat())
@@ -442,9 +505,9 @@ std::string SVEType::str() const {
else
S += "int";
- if (!isScalarPredicate() && !isPredicateVector())
+ if (!isScalarPredicate() && !isPredicateVector() && !isSvcount())
S += utostr(ElementBitwidth);
- if (!isScalableVector() && isVector())
+ if (isFixedLengthVector())
S += "x" + utostr(getNumElements());
if (NumVectors > 1)
S += "x" + utostr(NumVectors);
@@ -459,9 +522,13 @@ std::string SVEType::str() const {
return S;
}
-void SVEType::applyTypespec() {
+
+void SVEType::applyTypespec(StringRef TS) {
for (char I : TS) {
switch (I) {
+ case 'Q':
+ Svcount = true;
+ break;
case 'P':
Predicate = true;
break;
@@ -480,6 +547,9 @@ void SVEType::applyTypespec() {
case 'l':
ElementBitwidth = 64;
break;
+ case 'q':
+ ElementBitwidth = 128;
+ break;
case 'h':
Float = true;
ElementBitwidth = 16;
@@ -506,15 +576,6 @@ void SVEType::applyTypespec() {
void SVEType::applyModifier(char Mod) {
switch (Mod) {
- case '2':
- NumVectors = 2;
- break;
- case '3':
- NumVectors = 3;
- break;
- case '4':
- NumVectors = 4;
- break;
case 'v':
Void = true;
break;
@@ -523,7 +584,7 @@ void SVEType::applyModifier(char Mod) {
break;
case 'c':
Constant = true;
- LLVM_FALLTHROUGH;
+ [[fallthrough]];
case 'p':
Pointer = true;
Bitwidth = ElementBitwidth;
@@ -553,9 +614,15 @@ void SVEType::applyModifier(char Mod) {
Float = false;
BFloat = false;
Predicate = true;
+ Svcount = false;
Bitwidth = 16;
ElementBitwidth = 1;
break;
+ case '{':
+ IsScalable = false;
+ Bitwidth = 128;
+ NumVectors = 1;
+ break;
case 's':
case 'a':
Bitwidth = ElementBitwidth;
@@ -592,18 +659,21 @@ void SVEType::applyModifier(char Mod) {
break;
case 'u':
Predicate = false;
+ Svcount = false;
Signed = false;
Float = false;
BFloat = false;
break;
case 'x':
Predicate = false;
+ Svcount = false;
Signed = true;
Float = false;
BFloat = false;
break;
case 'i':
Predicate = false;
+ Svcount = false;
Float = false;
BFloat = false;
ElementBitwidth = Bitwidth = 64;
@@ -613,6 +683,7 @@ void SVEType::applyModifier(char Mod) {
break;
case 'I':
Predicate = false;
+ Svcount = false;
Float = false;
BFloat = false;
ElementBitwidth = Bitwidth = 32;
@@ -623,6 +694,7 @@ void SVEType::applyModifier(char Mod) {
break;
case 'J':
Predicate = false;
+ Svcount = false;
Float = false;
BFloat = false;
ElementBitwidth = Bitwidth = 32;
@@ -633,6 +705,7 @@ void SVEType::applyModifier(char Mod) {
break;
case 'k':
Predicate = false;
+ Svcount = false;
Signed = true;
Float = false;
BFloat = false;
@@ -641,6 +714,7 @@ void SVEType::applyModifier(char Mod) {
break;
case 'l':
Predicate = false;
+ Svcount = false;
Signed = true;
Float = false;
BFloat = false;
@@ -649,6 +723,7 @@ void SVEType::applyModifier(char Mod) {
break;
case 'm':
Predicate = false;
+ Svcount = false;
Signed = false;
Float = false;
BFloat = false;
@@ -657,6 +732,7 @@ void SVEType::applyModifier(char Mod) {
break;
case 'n':
Predicate = false;
+ Svcount = false;
Signed = false;
Float = false;
BFloat = false;
@@ -681,6 +757,12 @@ void SVEType::applyModifier(char Mod) {
BFloat = false;
ElementBitwidth = 64;
break;
+ case '[':
+ Signed = false;
+ Float = false;
+ BFloat = false;
+ ElementBitwidth = 8;
+ break;
case 't':
Signed = true;
Float = false;
@@ -695,17 +777,20 @@ void SVEType::applyModifier(char Mod) {
break;
case 'O':
Predicate = false;
+ Svcount = false;
Float = true;
ElementBitwidth = 16;
break;
case 'M':
Predicate = false;
+ Svcount = false;
Float = true;
BFloat = false;
ElementBitwidth = 32;
break;
case 'N':
Predicate = false;
+ Svcount = false;
Float = true;
ElementBitwidth = 64;
break;
@@ -757,6 +842,11 @@ void SVEType::applyModifier(char Mod) {
NumVectors = 0;
Signed = true;
break;
+ case '%':
+ Pointer = true;
+ Void = true;
+ NumVectors = 0;
+ break;
case 'A':
Pointer = true;
ElementBitwidth = Bitwidth = 8;
@@ -799,11 +889,51 @@ void SVEType::applyModifier(char Mod) {
NumVectors = 0;
Signed = false;
break;
+ case '$':
+ Predicate = false;
+ Svcount = false;
+ Float = false;
+ BFloat = true;
+ ElementBitwidth = 16;
+ break;
+ case '}':
+ Predicate = false;
+ Signed = true;
+ Svcount = true;
+ NumVectors = 0;
+ Float = false;
+ BFloat = false;
+ break;
+ case '.':
+ llvm_unreachable(". is never a type in itself");
+ break;
default:
llvm_unreachable("Unhandled character!");
}
}
+/// Returns the modifier and number of vectors for the given operand \p Op.
+std::pair<char, unsigned> getProtoModifier(StringRef Proto, unsigned Op) {
+ for (unsigned P = 0; !Proto.empty(); ++P) {
+ unsigned NumVectors = 1;
+ unsigned CharsToSkip = 1;
+ char Mod = Proto[0];
+ if (Mod == '2' || Mod == '3' || Mod == '4') {
+ NumVectors = Mod - '0';
+ Mod = 'd';
+ if (Proto.size() > 1 && Proto[1] == '.') {
+ Mod = Proto[2];
+ CharsToSkip = 3;
+ }
+ }
+
+ if (P == Op)
+ return {Mod, NumVectors};
+
+ Proto = Proto.drop_front(CharsToSkip);
+ }
+ llvm_unreachable("Unexpected Op");
+}
//===----------------------------------------------------------------------===//
// Intrinsic implementation
@@ -819,8 +949,11 @@ Intrinsic::Intrinsic(StringRef Name, StringRef Proto, uint64_t MergeTy,
MergeSuffix(MergeSuffix.str()), BaseType(BT, 'd'), Flags(Flags),
ImmChecks(Checks.begin(), Checks.end()) {
// Types[0] is the return value.
- for (unsigned I = 0; I < Proto.size(); ++I) {
- SVEType T(BaseTypeSpec, Proto[I]);
+ for (unsigned I = 0; I < (getNumParams() + 1); ++I) {
+ char Mod;
+ unsigned NumVectors;
+ std::tie(Mod, NumVectors) = getProtoModifier(Proto, I);
+ SVEType T(BaseTypeSpec, Mod, NumVectors);
Types.push_back(T);
// Add range checks for immediates
@@ -879,6 +1012,8 @@ std::string Intrinsic::replaceTemplatedArgs(std::string Name, TypeSpec TS,
std::string TypeCode;
if (T.isInteger())
TypeCode = T.isSigned() ? 's' : 'u';
+ else if (T.isSvcount())
+ TypeCode = 'c';
else if (T.isPredicateVector())
TypeCode = 'b';
else if (T.isBFloat())
@@ -891,6 +1026,13 @@ std::string Intrinsic::replaceTemplatedArgs(std::string Name, TypeSpec TS,
return Ret;
}
+std::string Intrinsic::mangleLLVMName() const {
+ std::string S = getLLVMName();
+
+ // Replace all {d} like expressions with e.g. 'u32'
+ return replaceTemplatedArgs(S, getBaseTypeSpec(), getProto());
+}
+
std::string Intrinsic::mangleName(ClassKind LocalCK) const {
std::string S = getName();
@@ -918,15 +1060,25 @@ std::string Intrinsic::mangleName(ClassKind LocalCK) const {
getMergeSuffix();
}
-void Intrinsic::emitIntrinsic(raw_ostream &OS) const {
+void Intrinsic::emitIntrinsic(raw_ostream &OS, SVEEmitter &Emitter,
+ ACLEKind Kind) const {
bool IsOverloaded = getClassKind() == ClassG && getProto().size() > 1;
std::string FullName = mangleName(ClassS);
std::string ProtoName = mangleName(getClassKind());
-
OS << (IsOverloaded ? "__aio " : "__ai ")
- << "__attribute__((__clang_arm_builtin_alias("
- << "__builtin_sve_" << FullName << ")))\n";
+ << "__attribute__((__clang_arm_builtin_alias(";
+
+ switch (Kind) {
+ case ACLEKind::SME:
+ OS << "__builtin_sme_" << FullName << ")";
+ break;
+ case ACLEKind::SVE:
+ OS << "__builtin_sve_" << FullName << ")";
+ break;
+ }
+
+ OS << "))\n";
OS << getTypes()[0].str() << " " << ProtoName << "(";
for (unsigned I = 0; I < getTypes().size() - 1; ++I) {
@@ -959,7 +1111,7 @@ uint64_t SVEEmitter::encodeTypeFlags(const SVEType &T) {
return encodeEltType("EltTyBFloat16");
}
- if (T.isPredicateVector()) {
+ if (T.isPredicateVector() || T.isSvcount()) {
switch (T.getElementSizeInBits()) {
case 8:
return encodeEltType("EltTyBool8");
@@ -983,6 +1135,8 @@ uint64_t SVEEmitter::encodeTypeFlags(const SVEType &T) {
return encodeEltType("EltTyInt32");
case 64:
return encodeEltType("EltTyInt64");
+ case 128:
+ return encodeEltType("EltTyInt128");
default:
llvm_unreachable("Unhandled integer element bitwidth!");
}
@@ -993,7 +1147,7 @@ void SVEEmitter::createIntrinsic(
StringRef Name = R->getValueAsString("Name");
StringRef Proto = R->getValueAsString("Prototype");
StringRef Types = R->getValueAsString("Types");
- StringRef Guard = R->getValueAsString("ArchGuard");
+ StringRef Guard = R->getValueAsString("TargetGuard");
StringRef LLVMName = R->getValueAsString("LLVMIntrinsic");
uint64_t Merge = R->getValueAsInt("Merge");
StringRef MergeSuffix = R->getValueAsString("MergeSuffix");
@@ -1039,10 +1193,11 @@ void SVEEmitter::createIntrinsic(
assert(Arg >= 0 && Kind >= 0 && "Arg and Kind must be nonnegative");
unsigned ElementSizeInBits = 0;
+ char Mod;
+ unsigned NumVectors;
+ std::tie(Mod, NumVectors) = getProtoModifier(Proto, EltSizeArg + 1);
if (EltSizeArg >= 0)
- ElementSizeInBits =
- SVEType(TS, Proto[EltSizeArg + /* offset by return arg */ 1])
- .getElementSizeInBits();
+ ElementSizeInBits = SVEType(TS, Mod, NumVectors).getElementSizeInBits();
ImmChecks.push_back(ImmCheck(Arg, Kind, ElementSizeInBits));
}
@@ -1058,6 +1213,34 @@ void SVEEmitter::createIntrinsic(
}
}
+void SVEEmitter::createCoreHeaderIntrinsics(raw_ostream &OS,
+ SVEEmitter &Emitter,
+ ACLEKind Kind) {
+ SmallVector<std::unique_ptr<Intrinsic>, 128> Defs;
+ std::vector<Record *> RV = Records.getAllDerivedDefinitions("Inst");
+ for (auto *R : RV)
+ createIntrinsic(R, Defs);
+
+ // Sort intrinsics in header file by following order/priority:
+ // - Architectural guard (i.e. does it require SVE2 or SVE2_AES)
+ // - Class (is intrinsic overloaded or not)
+ // - Intrinsic name
+ std::stable_sort(Defs.begin(), Defs.end(),
+ [](const std::unique_ptr<Intrinsic> &A,
+ const std::unique_ptr<Intrinsic> &B) {
+ auto ToTuple = [](const std::unique_ptr<Intrinsic> &I) {
+ return std::make_tuple(I->getGuard(),
+ (unsigned)I->getClassKind(),
+ I->getName());
+ };
+ return ToTuple(A) < ToTuple(B);
+ });
+
+ // Actually emit the intrinsic declarations.
+ for (auto &I : Defs)
+ I->emitIntrinsic(OS, Emitter, Kind);
+}
+
void SVEEmitter::createHeader(raw_ostream &OS) {
OS << "/*===---- arm_sve.h - ARM SVE intrinsics "
"-----------------------------------===\n"
@@ -1075,10 +1258,6 @@ void SVEEmitter::createHeader(raw_ostream &OS) {
OS << "#ifndef __ARM_SVE_H\n";
OS << "#define __ARM_SVE_H\n\n";
- OS << "#if !defined(__ARM_FEATURE_SVE)\n";
- OS << "#error \"SVE support not enabled\"\n";
- OS << "#else\n\n";
-
OS << "#if !defined(__LITTLE_ENDIAN__)\n";
OS << "#error \"Big endian is currently not supported for arm_sve.h\"\n";
OS << "#endif\n";
@@ -1104,20 +1283,10 @@ void SVEEmitter::createHeader(raw_ostream &OS) {
OS << "typedef __SVUint64_t svuint64_t;\n";
OS << "typedef __SVFloat16_t svfloat16_t;\n\n";
- OS << "#if defined(__ARM_FEATURE_SVE_BF16) && "
- "!defined(__ARM_FEATURE_BF16_SCALAR_ARITHMETIC)\n";
- OS << "#error \"__ARM_FEATURE_BF16_SCALAR_ARITHMETIC must be defined when "
- "__ARM_FEATURE_SVE_BF16 is defined\"\n";
- OS << "#endif\n\n";
-
- OS << "#if defined(__ARM_FEATURE_SVE_BF16)\n";
- OS << "typedef __SVBFloat16_t svbfloat16_t;\n";
- OS << "#endif\n\n";
+ OS << "typedef __SVBfloat16_t svbfloat16_t;\n";
- OS << "#if defined(__ARM_FEATURE_BF16_SCALAR_ARITHMETIC)\n";
OS << "#include <arm_bf16.h>\n";
- OS << "typedef __bf16 bfloat16_t;\n";
- OS << "#endif\n\n";
+ OS << "#include <arm_vector_types.h>\n";
OS << "typedef __SVFloat32_t svfloat32_t;\n";
OS << "typedef __SVFloat64_t svfloat64_t;\n";
@@ -1154,13 +1323,15 @@ void SVEEmitter::createHeader(raw_ostream &OS) {
OS << "typedef __clang_svfloat16x4_t svfloat16x4_t;\n";
OS << "typedef __clang_svfloat32x4_t svfloat32x4_t;\n";
OS << "typedef __clang_svfloat64x4_t svfloat64x4_t;\n";
- OS << "typedef __SVBool_t svbool_t;\n\n";
+ OS << "typedef __SVBool_t svbool_t;\n";
+ OS << "typedef __clang_svboolx2_t svboolx2_t;\n";
+ OS << "typedef __clang_svboolx4_t svboolx4_t;\n\n";
- OS << "#ifdef __ARM_FEATURE_SVE_BF16\n";
OS << "typedef __clang_svbfloat16x2_t svbfloat16x2_t;\n";
OS << "typedef __clang_svbfloat16x3_t svbfloat16x3_t;\n";
OS << "typedef __clang_svbfloat16x4_t svbfloat16x4_t;\n";
- OS << "#endif\n";
+
+ OS << "typedef __SVCount_t svcount_t;\n\n";
OS << "enum svpattern\n";
OS << "{\n";
@@ -1206,69 +1377,34 @@ void SVEEmitter::createHeader(raw_ostream &OS) {
"__nodebug__, __overloadable__))\n\n";
// Add reinterpret functions.
- for (auto ShortForm : { false, true } )
- for (const ReinterpretTypeInfo &From : Reinterprets)
+ for (auto [N, Suffix] :
+ std::initializer_list<std::pair<unsigned, const char *>>{
+ {1, ""}, {2, "_x2"}, {3, "_x3"}, {4, "_x4"}}) {
+ for (auto ShortForm : {false, true})
for (const ReinterpretTypeInfo &To : Reinterprets) {
- const bool IsBFloat = StringRef(From.Suffix).equals("bf16") ||
- StringRef(To.Suffix).equals("bf16");
- if (IsBFloat)
- OS << "#if defined(__ARM_FEATURE_SVE_BF16)\n";
- if (ShortForm) {
- OS << "__aio " << From.Type << " svreinterpret_" << From.Suffix;
- OS << "(" << To.Type << " op) {\n";
- OS << " return __builtin_sve_reinterpret_" << From.Suffix << "_"
- << To.Suffix << "(op);\n";
- OS << "}\n\n";
- } else
- OS << "#define svreinterpret_" << From.Suffix << "_" << To.Suffix
- << "(...) __builtin_sve_reinterpret_" << From.Suffix << "_"
- << To.Suffix << "(__VA_ARGS__)\n";
- if (IsBFloat)
- OS << "#endif /* #if defined(__ARM_FEATURE_SVE_BF16) */\n";
+ SVEType ToV(To.BaseType, N);
+ for (const ReinterpretTypeInfo &From : Reinterprets) {
+ SVEType FromV(From.BaseType, N);
+ if (ShortForm) {
+ OS << "__aio __attribute__((target(\"sve\"))) " << ToV.str()
+ << " svreinterpret_" << To.Suffix;
+ OS << "(" << FromV.str() << " op) __arm_streaming_compatible {\n";
+ OS << " return __builtin_sve_reinterpret_" << To.Suffix << "_"
+ << From.Suffix << Suffix << "(op);\n";
+ OS << "}\n\n";
+ } else
+ OS << "#define svreinterpret_" << To.Suffix << "_" << From.Suffix
+ << Suffix << "(...) __builtin_sve_reinterpret_" << To.Suffix
+ << "_" << From.Suffix << Suffix << "(__VA_ARGS__)\n";
+ }
}
-
- SmallVector<std::unique_ptr<Intrinsic>, 128> Defs;
- std::vector<Record *> RV = Records.getAllDerivedDefinitions("Inst");
- for (auto *R : RV)
- createIntrinsic(R, Defs);
-
- // Sort intrinsics in header file by following order/priority:
- // - Architectural guard (i.e. does it require SVE2 or SVE2_AES)
- // - Class (is intrinsic overloaded or not)
- // - Intrinsic name
- std::stable_sort(
- Defs.begin(), Defs.end(), [](const std::unique_ptr<Intrinsic> &A,
- const std::unique_ptr<Intrinsic> &B) {
- auto ToTuple = [](const std::unique_ptr<Intrinsic> &I) {
- return std::make_tuple(I->getGuard(), (unsigned)I->getClassKind(), I->getName());
- };
- return ToTuple(A) < ToTuple(B);
- });
-
- StringRef InGuard = "";
- for (auto &I : Defs) {
- // Emit #endif/#if pair if needed.
- if (I->getGuard() != InGuard) {
- if (!InGuard.empty())
- OS << "#endif //" << InGuard << "\n";
- InGuard = I->getGuard();
- if (!InGuard.empty())
- OS << "\n#if " << InGuard << "\n";
- }
-
- // Actually emit the intrinsic declaration.
- I->emitIntrinsic(OS);
}
- if (!InGuard.empty())
- OS << "#endif //" << InGuard << "\n";
+ createCoreHeaderIntrinsics(OS, *this, ACLEKind::SVE);
- OS << "#if defined(__ARM_FEATURE_SVE_BF16)\n";
OS << "#define svcvtnt_bf16_x svcvtnt_bf16_m\n";
OS << "#define svcvtnt_bf16_f32_x svcvtnt_bf16_f32_m\n";
- OS << "#endif /*__ARM_FEATURE_SVE_BF16 */\n\n";
- OS << "#if defined(__ARM_FEATURE_SVE2)\n";
OS << "#define svcvtnt_f16_x svcvtnt_f16_m\n";
OS << "#define svcvtnt_f16_f32_x svcvtnt_f16_f32_m\n";
OS << "#define svcvtnt_f32_x svcvtnt_f32_m\n";
@@ -1277,12 +1413,11 @@ void SVEEmitter::createHeader(raw_ostream &OS) {
OS << "#define svcvtxnt_f32_x svcvtxnt_f32_m\n";
OS << "#define svcvtxnt_f32_f64_x svcvtxnt_f32_f64_m\n\n";
- OS << "#endif /*__ARM_FEATURE_SVE2 */\n\n";
-
OS << "#ifdef __cplusplus\n";
OS << "} // extern \"C\"\n";
OS << "#endif\n\n";
- OS << "#endif /*__ARM_FEATURE_SVE */\n\n";
+ OS << "#undef __ai\n\n";
+ OS << "#undef __aio\n\n";
OS << "#endif /* __ARM_SVE_H */\n";
}
@@ -1303,19 +1438,28 @@ void SVEEmitter::createBuiltins(raw_ostream &OS) {
// Only create BUILTINs for non-overloaded intrinsics, as overloaded
// declarations only live in the header file.
if (Def->getClassKind() != ClassG)
- OS << "BUILTIN(__builtin_sve_" << Def->getMangledName() << ", \""
- << Def->getBuiltinTypeStr() << "\", \"n\")\n";
+ OS << "TARGET_BUILTIN(__builtin_sve_" << Def->getMangledName() << ", \""
+ << Def->getBuiltinTypeStr() << "\", \"n\", \"" << Def->getGuard()
+ << "\")\n";
}
- // Add reinterpret builtins
- for (const ReinterpretTypeInfo &From : Reinterprets)
- for (const ReinterpretTypeInfo &To : Reinterprets)
- OS << "BUILTIN(__builtin_sve_reinterpret_" << From.Suffix << "_"
- << To.Suffix << +", \"" << From.BuiltinType << To.BuiltinType
- << "\", \"n\")\n";
+ // Add reinterpret functions.
+ for (auto [N, Suffix] :
+ std::initializer_list<std::pair<unsigned, const char *>>{
+ {1, ""}, {2, "_x2"}, {3, "_x3"}, {4, "_x4"}}) {
+ for (const ReinterpretTypeInfo &To : Reinterprets) {
+ SVEType ToV(To.BaseType, N);
+ for (const ReinterpretTypeInfo &From : Reinterprets) {
+ SVEType FromV(From.BaseType, N);
+ OS << "TARGET_BUILTIN(__builtin_sve_reinterpret_" << To.Suffix << "_"
+ << From.Suffix << Suffix << +", \"" << ToV.builtin_str()
+ << FromV.builtin_str() << "\", \"n\", \"sve\")\n";
+ }
+ }
+ }
OS << "#endif\n\n";
- }
+}
void SVEEmitter::createCodeGenMap(raw_ostream &OS) {
std::vector<Record *> RV = Records.getAllDerivedDefinitions("Inst");
@@ -1339,7 +1483,7 @@ void SVEEmitter::createCodeGenMap(raw_ostream &OS) {
uint64_t Flags = Def->getFlags();
auto FlagString = std::to_string(Flags);
- std::string LLVMName = Def->getLLVMName();
+ std::string LLVMName = Def->getMangledLLVMName();
std::string Builtin = Def->getMangledName();
if (!LLVMName.empty())
OS << "SVEMAP1(" << Builtin << ", " << LLVMName << ", " << FlagString
@@ -1413,6 +1557,251 @@ void SVEEmitter::createTypeFlags(raw_ostream &OS) {
OS << "#endif\n\n";
}
+void SVEEmitter::createSMEHeader(raw_ostream &OS) {
+ OS << "/*===---- arm_sme.h - ARM SME intrinsics "
+ "------===\n"
+ " *\n"
+ " *\n"
+ " * Part of the LLVM Project, under the Apache License v2.0 with LLVM "
+ "Exceptions.\n"
+ " * See https://llvm.org/LICENSE.txt for license information.\n"
+ " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n"
+ " *\n"
+ " *===-----------------------------------------------------------------"
+ "------===\n"
+ " */\n\n";
+
+ OS << "#ifndef __ARM_SME_H\n";
+ OS << "#define __ARM_SME_H\n\n";
+
+ OS << "#if !defined(__LITTLE_ENDIAN__)\n";
+ OS << "#error \"Big endian is currently not supported for arm_sme.h\"\n";
+ OS << "#endif\n";
+
+ OS << "#include <arm_sve.h>\n\n";
+
+ OS << "/* Function attributes */\n";
+ OS << "#define __ai static __inline__ __attribute__((__always_inline__, "
+ "__nodebug__))\n\n";
+ OS << "#define __aio static __inline__ __attribute__((__always_inline__, "
+ "__nodebug__, __overloadable__))\n\n";
+
+ OS << "#ifdef __cplusplus\n";
+ OS << "extern \"C\" {\n";
+ OS << "#endif\n\n";
+
+ OS << "void __arm_za_disable(void) __arm_streaming_compatible;\n\n";
+
+ OS << "__ai bool __arm_has_sme(void) __arm_streaming_compatible {\n";
+ OS << " uint64_t x0, x1;\n";
+ OS << " __builtin_arm_get_sme_state(&x0, &x1);\n";
+ OS << " return x0 & (1ULL << 63);\n";
+ OS << "}\n\n";
+
+ OS << "__ai bool __arm_in_streaming_mode(void) __arm_streaming_compatible "
+ "{\n";
+ OS << " uint64_t x0, x1;\n";
+ OS << " __builtin_arm_get_sme_state(&x0, &x1);\n";
+ OS << " return x0 & 1;\n";
+ OS << "}\n\n";
+
+ OS << "__ai __attribute__((target(\"sme\"))) void svundef_za(void) "
+ "__arm_streaming_compatible __arm_out(\"za\") "
+ "{ }\n\n";
+
+ createCoreHeaderIntrinsics(OS, *this, ACLEKind::SME);
+
+ OS << "#ifdef __cplusplus\n";
+ OS << "} // extern \"C\"\n";
+ OS << "#endif\n\n";
+ OS << "#undef __ai\n\n";
+ OS << "#endif /* __ARM_SME_H */\n";
+}
+
+void SVEEmitter::createSMEBuiltins(raw_ostream &OS) {
+ std::vector<Record *> RV = Records.getAllDerivedDefinitions("Inst");
+ SmallVector<std::unique_ptr<Intrinsic>, 128> Defs;
+ for (auto *R : RV) {
+ createIntrinsic(R, Defs);
+ }
+
+ // The mappings must be sorted based on BuiltinID.
+ llvm::sort(Defs, [](const std::unique_ptr<Intrinsic> &A,
+ const std::unique_ptr<Intrinsic> &B) {
+ return A->getMangledName() < B->getMangledName();
+ });
+
+ OS << "#ifdef GET_SME_BUILTINS\n";
+ for (auto &Def : Defs) {
+ // Only create BUILTINs for non-overloaded intrinsics, as overloaded
+ // declarations only live in the header file.
+ if (Def->getClassKind() != ClassG)
+ OS << "TARGET_BUILTIN(__builtin_sme_" << Def->getMangledName() << ", \""
+ << Def->getBuiltinTypeStr() << "\", \"n\", \"" << Def->getGuard()
+ << "\")\n";
+ }
+
+ OS << "#endif\n\n";
+}
+
+void SVEEmitter::createSMECodeGenMap(raw_ostream &OS) {
+ std::vector<Record *> RV = Records.getAllDerivedDefinitions("Inst");
+ SmallVector<std::unique_ptr<Intrinsic>, 128> Defs;
+ for (auto *R : RV) {
+ createIntrinsic(R, Defs);
+ }
+
+ // The mappings must be sorted based on BuiltinID.
+ llvm::sort(Defs, [](const std::unique_ptr<Intrinsic> &A,
+ const std::unique_ptr<Intrinsic> &B) {
+ return A->getMangledName() < B->getMangledName();
+ });
+
+ OS << "#ifdef GET_SME_LLVM_INTRINSIC_MAP\n";
+ for (auto &Def : Defs) {
+ // Builtins only exist for non-overloaded intrinsics, overloaded
+ // declarations only live in the header file.
+ if (Def->getClassKind() == ClassG)
+ continue;
+
+ uint64_t Flags = Def->getFlags();
+ auto FlagString = std::to_string(Flags);
+
+ std::string LLVMName = Def->getLLVMName();
+ std::string Builtin = Def->getMangledName();
+ if (!LLVMName.empty())
+ OS << "SMEMAP1(" << Builtin << ", " << LLVMName << ", " << FlagString
+ << "),\n";
+ else
+ OS << "SMEMAP2(" << Builtin << ", " << FlagString << "),\n";
+ }
+ OS << "#endif\n\n";
+}
+
+void SVEEmitter::createSMERangeChecks(raw_ostream &OS) {
+ std::vector<Record *> RV = Records.getAllDerivedDefinitions("Inst");
+ SmallVector<std::unique_ptr<Intrinsic>, 128> Defs;
+ for (auto *R : RV) {
+ createIntrinsic(R, Defs);
+ }
+
+ // The mappings must be sorted based on BuiltinID.
+ llvm::sort(Defs, [](const std::unique_ptr<Intrinsic> &A,
+ const std::unique_ptr<Intrinsic> &B) {
+ return A->getMangledName() < B->getMangledName();
+ });
+
+
+ OS << "#ifdef GET_SME_IMMEDIATE_CHECK\n";
+
+ // Ensure these are only emitted once.
+ std::set<std::string> Emitted;
+
+ for (auto &Def : Defs) {
+ if (Emitted.find(Def->getMangledName()) != Emitted.end() ||
+ Def->getImmChecks().empty())
+ continue;
+
+ OS << "case SME::BI__builtin_sme_" << Def->getMangledName() << ":\n";
+ for (auto &Check : Def->getImmChecks())
+ OS << "ImmChecks.push_back(std::make_tuple(" << Check.getArg() << ", "
+ << Check.getKind() << ", " << Check.getElementSizeInBits() << "));\n";
+ OS << " break;\n";
+
+ Emitted.insert(Def->getMangledName());
+ }
+
+ OS << "#endif\n\n";
+}
+
+void SVEEmitter::createBuiltinZAState(raw_ostream &OS) {
+ std::vector<Record *> RV = Records.getAllDerivedDefinitions("Inst");
+ SmallVector<std::unique_ptr<Intrinsic>, 128> Defs;
+ for (auto *R : RV)
+ createIntrinsic(R, Defs);
+
+ std::map<std::string, std::set<std::string>> IntrinsicsPerState;
+ for (auto &Def : Defs) {
+ std::string Key;
+ auto AddToKey = [&Key](const std::string &S) -> void {
+ Key = Key.empty() ? S : (Key + " | " + S);
+ };
+
+ if (Def->isFlagSet(getEnumValueForFlag("IsInZA")))
+ AddToKey("ArmInZA");
+ else if (Def->isFlagSet(getEnumValueForFlag("IsOutZA")))
+ AddToKey("ArmOutZA");
+ else if (Def->isFlagSet(getEnumValueForFlag("IsInOutZA")))
+ AddToKey("ArmInOutZA");
+
+ if (Def->isFlagSet(getEnumValueForFlag("IsInZT0")))
+ AddToKey("ArmInZT0");
+ else if (Def->isFlagSet(getEnumValueForFlag("IsOutZT0")))
+ AddToKey("ArmOutZT0");
+ else if (Def->isFlagSet(getEnumValueForFlag("IsInOutZT0")))
+ AddToKey("ArmInOutZT0");
+
+ if (!Key.empty())
+ IntrinsicsPerState[Key].insert(Def->getMangledName());
+ }
+
+ OS << "#ifdef GET_SME_BUILTIN_GET_STATE\n";
+ for (auto &KV : IntrinsicsPerState) {
+ for (StringRef Name : KV.second)
+ OS << "case SME::BI__builtin_sme_" << Name << ":\n";
+ OS << " return " << KV.first << ";\n";
+ }
+ OS << "#endif\n\n";
+}
+
+void SVEEmitter::createStreamingAttrs(raw_ostream &OS, ACLEKind Kind) {
+ std::vector<Record *> RV = Records.getAllDerivedDefinitions("Inst");
+ SmallVector<std::unique_ptr<Intrinsic>, 128> Defs;
+ for (auto *R : RV)
+ createIntrinsic(R, Defs);
+
+ StringRef ExtensionKind;
+ switch (Kind) {
+ case ACLEKind::SME:
+ ExtensionKind = "SME";
+ break;
+ case ACLEKind::SVE:
+ ExtensionKind = "SVE";
+ break;
+ }
+
+ OS << "#ifdef GET_" << ExtensionKind << "_STREAMING_ATTRS\n";
+
+ llvm::StringMap<std::set<std::string>> StreamingMap;
+
+ uint64_t IsStreamingFlag = getEnumValueForFlag("IsStreaming");
+ uint64_t IsStreamingOrSVE2p1Flag = getEnumValueForFlag("IsStreamingOrSVE2p1");
+ uint64_t IsStreamingCompatibleFlag =
+ getEnumValueForFlag("IsStreamingCompatible");
+ for (auto &Def : Defs) {
+ if (Def->isFlagSet(IsStreamingFlag))
+ StreamingMap["ArmStreaming"].insert(Def->getMangledName());
+ else if (Def->isFlagSet(IsStreamingOrSVE2p1Flag))
+ StreamingMap["ArmStreamingOrSVE2p1"].insert(Def->getMangledName());
+ else if (Def->isFlagSet(IsStreamingCompatibleFlag))
+ StreamingMap["ArmStreamingCompatible"].insert(Def->getMangledName());
+ else
+ StreamingMap["ArmNonStreaming"].insert(Def->getMangledName());
+ }
+
+ for (auto BuiltinType : StreamingMap.keys()) {
+ for (auto Name : StreamingMap[BuiltinType]) {
+ OS << "case " << ExtensionKind << "::BI__builtin_"
+ << ExtensionKind.lower() << "_";
+ OS << Name << ":\n";
+ }
+ OS << " BuiltinType = " << BuiltinType << ";\n";
+ OS << " break;\n";
+ }
+
+ OS << "#endif\n\n";
+}
+
namespace clang {
void EmitSveHeader(RecordKeeper &Records, raw_ostream &OS) {
SVEEmitter(Records).createHeader(OS);
@@ -1434,4 +1823,31 @@ void EmitSveTypeFlags(RecordKeeper &Records, raw_ostream &OS) {
SVEEmitter(Records).createTypeFlags(OS);
}
+void EmitSveStreamingAttrs(RecordKeeper &Records, raw_ostream &OS) {
+ SVEEmitter(Records).createStreamingAttrs(OS, ACLEKind::SVE);
+}
+
+void EmitSmeHeader(RecordKeeper &Records, raw_ostream &OS) {
+ SVEEmitter(Records).createSMEHeader(OS);
+}
+
+void EmitSmeBuiltins(RecordKeeper &Records, raw_ostream &OS) {
+ SVEEmitter(Records).createSMEBuiltins(OS);
+}
+
+void EmitSmeBuiltinCG(RecordKeeper &Records, raw_ostream &OS) {
+ SVEEmitter(Records).createSMECodeGenMap(OS);
+}
+
+void EmitSmeRangeChecks(RecordKeeper &Records, raw_ostream &OS) {
+ SVEEmitter(Records).createSMERangeChecks(OS);
+}
+
+void EmitSmeStreamingAttrs(RecordKeeper &Records, raw_ostream &OS) {
+ SVEEmitter(Records).createStreamingAttrs(OS, ACLEKind::SME);
+}
+
+void EmitSmeBuiltinZAState(RecordKeeper &Records, raw_ostream &OS) {
+ SVEEmitter(Records).createBuiltinZAState(OS);
+}
} // End namespace clang
diff --git a/contrib/llvm-project/clang/utils/TableGen/TableGen.cpp b/contrib/llvm-project/clang/utils/TableGen/TableGen.cpp
index 7fb5d0acc6f3..158d10e2b3d6 100644
--- a/contrib/llvm-project/clang/utils/TableGen/TableGen.cpp
+++ b/contrib/llvm-project/clang/utils/TableGen/TableGen.cpp
@@ -13,6 +13,7 @@
#include "TableGenBackends.h" // Declares all backends.
#include "ASTTableGen.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/Signals.h"
#include "llvm/TableGen/Error.h"
@@ -30,9 +31,13 @@ enum ActionType {
GenClangAttrSubjectMatchRulesParserStringSwitches,
GenClangAttrImpl,
GenClangAttrList,
+ GenClangAttrCanPrintLeftList,
+ GenClangAttrMustPrintLeftList,
+ GenClangAttrDocTable,
GenClangAttrSubjectMatchRuleList,
GenClangAttrPCHRead,
GenClangAttrPCHWrite,
+ GenClangRegularKeywordAttributeInfo,
GenClangAttrHasAttributeImpl,
GenClangAttrSpellingListIndex,
GenClangAttrASTVisitor,
@@ -63,10 +68,12 @@ enum ActionType {
GenClangCommentCommandInfo,
GenClangCommentCommandList,
GenClangOpenCLBuiltins,
+ GenClangOpenCLBuiltinHeader,
GenClangOpenCLBuiltinTests,
GenArmNeon,
GenArmFP16,
GenArmBF16,
+ GenArmVectorType,
GenArmNeonSema,
GenArmNeonTest,
GenArmMveHeader,
@@ -79,6 +86,13 @@ enum ActionType {
GenArmSveBuiltinCG,
GenArmSveTypeFlags,
GenArmSveRangeChecks,
+ GenArmSveStreamingAttrs,
+ GenArmSmeHeader,
+ GenArmSmeBuiltins,
+ GenArmSmeBuiltinCG,
+ GenArmSmeRangeChecks,
+ GenArmSmeStreamingAttrs,
+ GenArmSmeBuiltinZAState,
GenArmCdeHeader,
GenArmCdeBuiltinDef,
GenArmCdeBuiltinSema,
@@ -87,6 +101,10 @@ enum ActionType {
GenRISCVVectorHeader,
GenRISCVVectorBuiltins,
GenRISCVVectorBuiltinCG,
+ GenRISCVVectorBuiltinSema,
+ GenRISCVSiFiveVectorBuiltins,
+ GenRISCVSiFiveVectorBuiltinCG,
+ GenRISCVSiFiveVectorBuiltinSema,
GenAttrDocs,
GenDiagDocs,
GenOptDocs,
@@ -115,6 +133,16 @@ cl::opt<ActionType> Action(
"Generate clang attribute implementations"),
clEnumValN(GenClangAttrList, "gen-clang-attr-list",
"Generate a clang attribute list"),
+ clEnumValN(GenClangAttrCanPrintLeftList,
+ "gen-clang-attr-can-print-left-list",
+ "Generate list of attributes that can be printed on left "
+ "side of a decl"),
+ clEnumValN(GenClangAttrMustPrintLeftList,
+ "gen-clang-attr-must-print-left-list",
+ "Generate list of attributes that must be printed on left "
+ "side of a decl"),
+ clEnumValN(GenClangAttrDocTable, "gen-clang-attr-doc-table",
+ "Generate a table of attribute documentation"),
clEnumValN(GenClangAttrSubjectMatchRuleList,
"gen-clang-attr-subject-match-rule-list",
"Generate a clang attribute subject match rule list"),
@@ -122,6 +150,10 @@ cl::opt<ActionType> Action(
"Generate clang PCH attribute reader"),
clEnumValN(GenClangAttrPCHWrite, "gen-clang-attr-pch-write",
"Generate clang PCH attribute writer"),
+ clEnumValN(GenClangRegularKeywordAttributeInfo,
+ "gen-clang-regular-keyword-attr-info",
+ "Generate a list of regular keyword attributes with info "
+ "about their arguments"),
clEnumValN(GenClangAttrHasAttributeImpl,
"gen-clang-attr-has-attribute-impl",
"Generate a clang attribute spelling list"),
@@ -195,11 +227,16 @@ cl::opt<ActionType> Action(
"documentation comments"),
clEnumValN(GenClangOpenCLBuiltins, "gen-clang-opencl-builtins",
"Generate OpenCL builtin declaration handlers"),
+ clEnumValN(GenClangOpenCLBuiltinHeader,
+ "gen-clang-opencl-builtin-header",
+ "Generate OpenCL builtin header"),
clEnumValN(GenClangOpenCLBuiltinTests, "gen-clang-opencl-builtin-tests",
"Generate OpenCL builtin declaration tests"),
clEnumValN(GenArmNeon, "gen-arm-neon", "Generate arm_neon.h for clang"),
clEnumValN(GenArmFP16, "gen-arm-fp16", "Generate arm_fp16.h for clang"),
clEnumValN(GenArmBF16, "gen-arm-bf16", "Generate arm_bf16.h for clang"),
+ clEnumValN(GenArmVectorType, "gen-arm-vector-type",
+ "Generate arm_vector_types.h for clang"),
clEnumValN(GenArmNeonSema, "gen-arm-neon-sema",
"Generate ARM NEON sema support for clang"),
clEnumValN(GenArmNeonTest, "gen-arm-neon-test",
@@ -214,6 +251,20 @@ cl::opt<ActionType> Action(
"Generate arm_sve_typeflags.inc for clang"),
clEnumValN(GenArmSveRangeChecks, "gen-arm-sve-sema-rangechecks",
"Generate arm_sve_sema_rangechecks.inc for clang"),
+ clEnumValN(GenArmSveStreamingAttrs, "gen-arm-sve-streaming-attrs",
+ "Generate arm_sve_streaming_attrs.inc for clang"),
+ clEnumValN(GenArmSmeHeader, "gen-arm-sme-header",
+ "Generate arm_sme.h for clang"),
+ clEnumValN(GenArmSmeBuiltins, "gen-arm-sme-builtins",
+ "Generate arm_sme_builtins.inc for clang"),
+ clEnumValN(GenArmSmeBuiltinCG, "gen-arm-sme-builtin-codegen",
+ "Generate arm_sme_builtin_cg_map.inc for clang"),
+ clEnumValN(GenArmSmeRangeChecks, "gen-arm-sme-sema-rangechecks",
+ "Generate arm_sme_sema_rangechecks.inc for clang"),
+ clEnumValN(GenArmSmeStreamingAttrs, "gen-arm-sme-streaming-attrs",
+ "Generate arm_sme_streaming_attrs.inc for clang"),
+ clEnumValN(GenArmSmeBuiltinZAState, "gen-arm-sme-builtin-za-state",
+ "Generate arm_sme_builtins_za_state.inc for clang"),
clEnumValN(GenArmMveHeader, "gen-arm-mve-header",
"Generate arm_mve.h for clang"),
clEnumValN(GenArmMveBuiltinDef, "gen-arm-mve-builtin-def",
@@ -240,6 +291,17 @@ cl::opt<ActionType> Action(
"Generate riscv_vector_builtins.inc for clang"),
clEnumValN(GenRISCVVectorBuiltinCG, "gen-riscv-vector-builtin-codegen",
"Generate riscv_vector_builtin_cg.inc for clang"),
+ clEnumValN(GenRISCVVectorBuiltinSema, "gen-riscv-vector-builtin-sema",
+ "Generate riscv_vector_builtin_sema.inc for clang"),
+ clEnumValN(GenRISCVSiFiveVectorBuiltins,
+ "gen-riscv-sifive-vector-builtins",
+ "Generate riscv_sifive_vector_builtins.inc for clang"),
+ clEnumValN(GenRISCVSiFiveVectorBuiltinCG,
+ "gen-riscv-sifive-vector-builtin-codegen",
+ "Generate riscv_sifive_vector_builtin_cg.inc for clang"),
+ clEnumValN(GenRISCVSiFiveVectorBuiltinSema,
+ "gen-riscv-sifive-vector-builtin-sema",
+ "Generate riscv_sifive_vector_builtin_sema.inc for clang"),
clEnumValN(GenAttrDocs, "gen-attr-docs",
"Generate attribute documentation"),
clEnumValN(GenDiagDocs, "gen-diag-docs",
@@ -280,6 +342,15 @@ bool ClangTableGenMain(raw_ostream &OS, RecordKeeper &Records) {
case GenClangAttrList:
EmitClangAttrList(Records, OS);
break;
+ case GenClangAttrCanPrintLeftList:
+ EmitClangAttrPrintList("CanPrintOnLeft", Records, OS);
+ break;
+ case GenClangAttrMustPrintLeftList:
+ EmitClangAttrPrintList("PrintOnLeft", Records, OS);
+ break;
+ case GenClangAttrDocTable:
+ EmitClangAttrDocTable(Records, OS);
+ break;
case GenClangAttrSubjectMatchRuleList:
EmitClangAttrSubjectMatchRuleList(Records, OS);
break;
@@ -289,6 +360,9 @@ bool ClangTableGenMain(raw_ostream &OS, RecordKeeper &Records) {
case GenClangAttrPCHWrite:
EmitClangAttrPCHWrite(Records, OS);
break;
+ case GenClangRegularKeywordAttributeInfo:
+ EmitClangRegularKeywordAttributeInfo(Records, OS);
+ break;
case GenClangAttrHasAttributeImpl:
EmitClangAttrHasAttrImpl(Records, OS);
break;
@@ -329,7 +403,8 @@ bool ClangTableGenMain(raw_ostream &OS, RecordKeeper &Records) {
EmitClangASTNodes(Records, OS, CommentNodeClassName, "");
break;
case GenClangDeclNodes:
- EmitClangASTNodes(Records, OS, DeclNodeClassName, "Decl");
+ EmitClangASTNodes(Records, OS, DeclNodeClassName, "Decl",
+ DeclContextNodeClassName);
EmitClangDeclContext(Records, OS);
break;
case GenClangStmtNodes:
@@ -374,6 +449,9 @@ bool ClangTableGenMain(raw_ostream &OS, RecordKeeper &Records) {
case GenClangOpenCLBuiltins:
EmitClangOpenCLBuiltins(Records, OS);
break;
+ case GenClangOpenCLBuiltinHeader:
+ EmitClangOpenCLBuiltinHeader(Records, OS);
+ break;
case GenClangOpenCLBuiltinTests:
EmitClangOpenCLBuiltinTests(Records, OS);
break;
@@ -389,6 +467,9 @@ bool ClangTableGenMain(raw_ostream &OS, RecordKeeper &Records) {
case GenArmFP16:
EmitFP16(Records, OS);
break;
+ case GenArmVectorType:
+ EmitVectorTypes(Records, OS);
+ break;
case GenArmBF16:
EmitBF16(Records, OS);
break;
@@ -428,6 +509,27 @@ bool ClangTableGenMain(raw_ostream &OS, RecordKeeper &Records) {
case GenArmSveRangeChecks:
EmitSveRangeChecks(Records, OS);
break;
+ case GenArmSveStreamingAttrs:
+ EmitSveStreamingAttrs(Records, OS);
+ break;
+ case GenArmSmeHeader:
+ EmitSmeHeader(Records, OS);
+ break;
+ case GenArmSmeBuiltins:
+ EmitSmeBuiltins(Records, OS);
+ break;
+ case GenArmSmeBuiltinCG:
+ EmitSmeBuiltinCG(Records, OS);
+ break;
+ case GenArmSmeRangeChecks:
+ EmitSmeRangeChecks(Records, OS);
+ break;
+ case GenArmSmeStreamingAttrs:
+ EmitSmeStreamingAttrs(Records, OS);
+ break;
+ case GenArmSmeBuiltinZAState:
+ EmitSmeBuiltinZAState(Records, OS);
+ break;
case GenArmCdeHeader:
EmitCdeHeader(Records, OS);
break;
@@ -452,6 +554,18 @@ bool ClangTableGenMain(raw_ostream &OS, RecordKeeper &Records) {
case GenRISCVVectorBuiltinCG:
EmitRVVBuiltinCG(Records, OS);
break;
+ case GenRISCVVectorBuiltinSema:
+ EmitRVVBuiltinSema(Records, OS);
+ break;
+ case GenRISCVSiFiveVectorBuiltins:
+ EmitRVVBuiltins(Records, OS);
+ break;
+ case GenRISCVSiFiveVectorBuiltinCG:
+ EmitRVVBuiltinCG(Records, OS);
+ break;
+ case GenRISCVSiFiveVectorBuiltinSema:
+ EmitRVVBuiltinSema(Records, OS);
+ break;
case GenAttrDocs:
EmitClangAttrDocs(Records, OS);
break;
diff --git a/contrib/llvm-project/clang/utils/TableGen/TableGenBackends.h b/contrib/llvm-project/clang/utils/TableGen/TableGenBackends.h
index bf40c7b1d18f..58a4af3c23a6 100644
--- a/contrib/llvm-project/clang/utils/TableGen/TableGenBackends.h
+++ b/contrib/llvm-project/clang/utils/TableGen/TableGenBackends.h
@@ -25,8 +25,16 @@ class RecordKeeper;
namespace clang {
void EmitClangDeclContext(llvm::RecordKeeper &RK, llvm::raw_ostream &OS);
+/**
+ @param PriorizeIfSubclassOf These classes should be prioritized in the output.
+ This is useful to force enum generation/jump tables/lookup tables to be more
+ compact in both size and surrounding code in hot functions. An example use is
+ in Decl for classes that inherit from DeclContext, for functions like
+ castFromDeclContext.
+ */
void EmitClangASTNodes(llvm::RecordKeeper &RK, llvm::raw_ostream &OS,
- const std::string &N, const std::string &S);
+ const std::string &N, const std::string &S,
+ std::string_view PriorizeIfSubclassOf = "");
void EmitClangBasicReader(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
void EmitClangBasicWriter(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
void EmitClangTypeNodes(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
@@ -39,10 +47,14 @@ void EmitClangAttrSubjectMatchRulesParserStringSwitches(
void EmitClangAttrClass(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
void EmitClangAttrImpl(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
void EmitClangAttrList(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
+void EmitClangAttrPrintList(const std::string &FieldName,
+ llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
void EmitClangAttrSubjectMatchRuleList(llvm::RecordKeeper &Records,
llvm::raw_ostream &OS);
void EmitClangAttrPCHRead(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
void EmitClangAttrPCHWrite(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
+void EmitClangRegularKeywordAttributeInfo(llvm::RecordKeeper &Records,
+ llvm::raw_ostream &OS);
void EmitClangAttrHasAttrImpl(llvm::RecordKeeper &Records,
llvm::raw_ostream &OS);
void EmitClangAttrSpellingListIndex(llvm::RecordKeeper &Records,
@@ -61,6 +73,7 @@ void EmitClangAttrTextNodeDump(llvm::RecordKeeper &Records,
llvm::raw_ostream &OS);
void EmitClangAttrNodeTraverse(llvm::RecordKeeper &Records,
llvm::raw_ostream &OS);
+void EmitClangAttrDocTable(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
void EmitClangDiagsDefs(llvm::RecordKeeper &Records, llvm::raw_ostream &OS,
const std::string &Component);
@@ -92,6 +105,7 @@ void EmitNeon(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
void EmitFP16(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
void EmitBF16(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
void EmitNeonSema(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
+void EmitVectorTypes(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
void EmitNeonTest(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
void EmitSveHeader(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
@@ -99,6 +113,14 @@ void EmitSveBuiltins(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
void EmitSveBuiltinCG(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
void EmitSveTypeFlags(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
void EmitSveRangeChecks(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
+void EmitSveStreamingAttrs(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
+
+void EmitSmeHeader(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
+void EmitSmeBuiltins(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
+void EmitSmeBuiltinCG(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
+void EmitSmeRangeChecks(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
+void EmitSmeStreamingAttrs(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
+void EmitSmeBuiltinZAState(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
void EmitMveHeader(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
void EmitMveBuiltinDef(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
@@ -109,6 +131,7 @@ void EmitMveBuiltinAliases(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
void EmitRVVHeader(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
void EmitRVVBuiltins(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
void EmitRVVBuiltinCG(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
+void EmitRVVBuiltinSema(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
void EmitCdeHeader(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
void EmitCdeBuiltinDef(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
@@ -122,6 +145,8 @@ void EmitClangOptDocs(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
void EmitClangOpenCLBuiltins(llvm::RecordKeeper &Records,
llvm::raw_ostream &OS);
+void EmitClangOpenCLBuiltinHeader(llvm::RecordKeeper &Records,
+ llvm::raw_ostream &OS);
void EmitClangOpenCLBuiltinTests(llvm::RecordKeeper &Records,
llvm::raw_ostream &OS);